summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/Kconfig52
-rw-r--r--drivers/gpu/drm/Makefile14
-rw-r--r--drivers/gpu/drm/amd/acp/Kconfig12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Kconfig6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h183
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c107
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c325
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c452
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c251
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c249
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c454
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h64
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c147
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c244
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c132
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c141
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c348
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c159
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c710
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_df.h65
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c252
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c219
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c347
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c63
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c215
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c76
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h61
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c211
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h64
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c154
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c70
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h (renamed from drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_offset.h)25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c442
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h53
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c84
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h101
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c211
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c1206
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c883
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h117
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c895
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h351
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c525
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h92
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c99
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c58
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_test.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c438
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c152
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h58
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c395
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h123
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c188
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c498
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c52
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c206
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/arct_reg_init.c60
-rw-r--r--drivers/gpu/drm/amd/amdgpu/athub_v1_0.c103
-rw-r--r--drivers/gpu/drm/amd/amdgpu/athub_v1_0.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/athub_v2_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_dp.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_i2c.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c182
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/df_v1_7.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/df_v3_6.c476
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c605
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c98
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c2608
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c978
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c87
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c408
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c91
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c92
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c830
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c586
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c827
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c641
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.h (renamed from drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gp102.c)23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c271
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c1602
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmsch_v1_0.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c97
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c384
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h (renamed from drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c)38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_ih.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_reg_init.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi12_reg_init.c52
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c53
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c305
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c273
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.c108
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c465
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v12_0.c534
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v12_0.h (renamed from drivers/gpu/drm/i915/intel_guc_fw.h)25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.c91
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c1005
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c132
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c127
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_ih.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c732
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.h (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.h)28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c472
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.h19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15_common.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_0.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_0.h31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_1.c373
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_1.h52
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c700
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c855
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c1799
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c76
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c74
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Kconfig4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Makefile6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h1454
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm1993
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm395
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm547
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c80
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c12
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c28
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c332
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c173
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c14
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c16
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c13
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_iommu.c9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c96
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h40
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v10.c348
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_module.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c10
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c103
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c52
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c40
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c (renamed from drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c)149
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c (renamed from drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c)41
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h24
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h62
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c146
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c24
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c49
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.h9
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig31
-rw-r--r--drivers/gpu/drm/amd/display/Makefile11
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c1409
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h89
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c236
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h67
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c77
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c386
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h69
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c98
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c29
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c535
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h7
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c205
-rw-r--r--drivers/gpu/drm/amd/display/dc/Makefile21
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/dc_common.c101
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/dc_common.h42
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c90
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.c159
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/Makefile36
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c59
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c79
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile18
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c43
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c148
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c758
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h41
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c198
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h40
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c765
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_debug.c50
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c838
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c139
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c1538
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c296
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c460
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_sink.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c152
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_surface.c37
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h171
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_bios_types.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_ddc_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c134
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h60
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h82
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dsc.h35
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_helper.c297
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h132
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h56
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h48
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h82
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_abm.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_abm.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_audio.c34
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_audio.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c165
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.h189
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c42
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c93
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h172
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c95
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c43
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c82
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c220
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h47
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c70
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c288
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c42
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c72
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c82
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c81
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c76
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/Makefile3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h73
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c87
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h84
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c1028
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h182
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.h43
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c111
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.h (renamed from drivers/gpu/drm/i915/i915_gem_render_state.h)28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c72
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h83
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c42
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c109
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h26
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c67
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c127
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/Makefile30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c46
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c61
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h172
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c165
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c158
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c111
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h27
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c1084
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h95
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c1626
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h136
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c133
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h189
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c95
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c142
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c1393
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h47
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c70
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/Makefile35
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c657
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h146
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c960
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.h134
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c114
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h47
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c142
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c468
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.h100
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c1938
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.h45
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_cp_psp.h49
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_helpers.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_pp_smu.h46
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile54
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c192
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c5155
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c1701
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h74
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c6134
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c1821
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.h73
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h42
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c54
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h143
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/Makefile35
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c253
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/drm_dsc_dc.c388
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dscc_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/qp_tables.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/Makefile16
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_factory_dce110.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_factory_dce80.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.c52
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c65
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.c240
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.h31
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c383
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.h (renamed from drivers/gpu/drm/i915/intel_guc_ads.h)31
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/generic_regs.h66
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c74
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c117
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_service.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c40
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_factory.h51
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_generic.c129
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_generic.h50
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c59
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/hdcp/Makefile28
-rw-r--r--drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c324
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_status.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h55
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h24
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dcn_calc_math.h (renamed from drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h139
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h44
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h35
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h18
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h27
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/gpio.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h47
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h18
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/opp.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h36
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h28
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h330
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h156
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link_hwss.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/reg_helper.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/Makefile12
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c32
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c374
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/irq_service.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/os_types.h35
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c14
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h289
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h48
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_vbios.h41
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_fw_meta.h63
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_rb.h154
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h523
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_trace_buffer.h69
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_types.h64
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/Makefile27
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c219
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h186
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c64
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h41
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_reg.c109
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_reg.h124
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c524
-rw-r--r--drivers/gpu/drm/amd/display/include/audio_types.h4
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_asic_id.h40
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/include/ddc_service_types.h12
-rw-r--r--drivers/gpu/drm/amd/display/include/gpio_interface.h9
-rw-r--r--drivers/gpu/drm/amd/display/include/gpio_service_interface.h18
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h3
-rw-r--r--drivers/gpu/drm/amd/display/include/hdcp_types.h96
-rw-r--r--drivers/gpu/drm/amd/display/include/i2caux_interface.h2
-rw-r--r--drivers/gpu/drm/amd/display/include/link_service_types.h24
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_interface.h2
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_types.h13
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c708
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.h14
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c363
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/Makefile33
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c509
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h587
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c529
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c319
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c886
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c679
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c631
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c281
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h193
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c832
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h466
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h3
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h298
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h9
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_shared.h2
-rw-r--r--drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c211
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c193
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.h1
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h8
-rw-r--r--drivers/gpu/drm/amd/include/arct_ip_offset.h1650
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_d.h1
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h1
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/clk/clk_10_0_2_offset.h56
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/clk/clk_10_0_2_sh_mask.h73
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_0_offset.h4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_0_sh_mask.h9
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_offset.h13875
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_sh_mask.h56646
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h26
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h16
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_0_offset.h647
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_0_sh_mask.h3912
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_1_0_offset.h565
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_1_0_sh_mask.h3430
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_offset.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_sh_mask.h39
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h28
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h187
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_offset.h264
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_sh_mask.h748
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_offset.h16
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_sh_mask.h122
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_default.h3933
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_offset.h7753
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h45012
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_12_0_0_offset.h336
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_12_0_0_sh_mask.h866
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_sh_mask.h30
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_smn.h6
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_0_smn.h12
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_sh_mask.h49
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_0_sh_mask.h8
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/rsmu/rsmu_0_0_2_offset.h (renamed from drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_sh_mask.h)18
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/rsmu/rsmu_0_0_2_sh_mask.h32
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_2_2_offset.h1051
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_2_2_sh_mask.h3002
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma1/sdma1_4_2_2_offset.h1043
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma1/sdma1_4_2_2_sh_mask.h2956
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma2/sdma2_4_2_2_offset.h1043
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma2/sdma2_4_2_2_sh_mask.h2956
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma3/sdma3_4_2_2_offset.h1043
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma3/sdma3_4_2_2_sh_mask.h2956
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma4/sdma4_4_2_2_offset.h1043
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma4/sdma4_4_2_2_sh_mask.h2956
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma5/sdma5_4_2_2_offset.h1043
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma5/sdma5_4_2_2_sh_mask.h2956
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma6/sdma6_4_2_2_offset.h1043
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma6/sdma6_4_2_2_sh_mask.h2956
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma7/sdma7_4_2_2_offset.h1043
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma7/sdma7_4_2_2_sh_mask.h2956
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_d.h1
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h1
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h1
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_offset.h184
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_sh_mask.h407
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_1_offset.h33
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_1_sh_mask.h91
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_2_offset.h33
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_2_sh_mask.h91
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_2_5_offset.h991
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_2_5_sh_mask.h3609
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h99
-rw-r--r--drivers/gpu/drm/amd/include/discovery.h1
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/nbio/irqsrcs_nbif_7_4.h42
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h19
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h21
-rw-r--r--drivers/gpu/drm/amd/include/navi12_ip_offset.h1119
-rw-r--r--drivers/gpu/drm/amd/include/navi14_ip_offset.h1119
-rw-r--r--drivers/gpu/drm/amd/include/renoir_ip_offset.h1398
-rw-r--r--drivers/gpu/drm/amd/include/soc15_ih_clientid.h11
-rw-r--r--drivers/gpu/drm/amd/include/v9_structs.h8
-rw-r--r--drivers/gpu/drm/amd/include/vega10_enum.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/Makefile2
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c105
-rw-r--r--drivers/gpu/drm/amd/powerplay/amdgpu_smu.c1646
-rw-r--r--drivers/gpu/drm/amd/powerplay/arcturus_ppt.c2315
-rw-r--r--drivers/gpu/drm/amd/powerplay/arcturus_ppt.h72
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/Makefile3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.c195
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.h29
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.c19
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.h13
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.c196
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.h29
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c18
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c22
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.c222
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.h29
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c30
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c33
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.c91
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.h32
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c68
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.c231
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.h29
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c264
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c30
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c23
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c157
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h604
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h119
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h16
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h912
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_navi10.h29
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu12_driver_if.h222
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_types.h268
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h164
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_ppsmc.h5
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h48
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h95
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_v12_0_ppsmc.h106
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/navi10_ppt.c1144
-rw-r--r--drivers/gpu/drm/amd/powerplay/navi10_ppt.h25
-rw-r--r--drivers/gpu/drm/amd/powerplay/renoir_ppt.c931
-rw-r--r--drivers/gpu/drm/amd/powerplay/renoir_ppt.h53
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_internal.h213
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_v11_0.c876
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_v12_0.c526
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c7
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c7
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c7
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c56
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c21
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c19
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c28
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c29
-rw-r--r--drivers/gpu/drm/amd/powerplay/vega20_ppt.c447
-rw-r--r--drivers/gpu/drm/arc/arcpgu_crtc.c36
-rw-r--r--drivers/gpu/drm/arc/arcpgu_drv.c21
-rw-r--r--drivers/gpu/drm/arc/arcpgu_hdmi.c1
-rw-r--r--drivers/gpu/drm/arc/arcpgu_regs.h2
-rw-r--r--drivers/gpu/drm/arm/display/include/malidp_product.h3
-rw-r--r--drivers/gpu/drm/arm/display/komeda/Makefile3
-rw-r--r--drivers/gpu/drm/arm/display/komeda/d71/d71_component.c285
-rw-r--r--drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c121
-rw-r--r--drivers/gpu/drm/arm/display/komeda/d71/d71_dev.h2
-rw-r--r--drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h25
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.c66
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.h10
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_crtc.c197
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_dev.c154
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_dev.h55
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_drv.c78
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_event.c158
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c19
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h3
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c5
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_kms.c42
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_kms.h6
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c19
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h27
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c88
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_plane.c4
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c14
-rw-r--r--drivers/gpu/drm/arm/hdlcd_crtc.c12
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c13
-rw-r--r--drivers/gpu/drm/arm/malidp_crtc.c11
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c29
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.h7
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.c19
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.h3
-rw-r--r--drivers/gpu/drm/arm/malidp_mw.c11
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c6
-rw-r--r--drivers/gpu/drm/arm/malidp_regs.h10
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c10
-rw-r--r--drivers/gpu/drm/armada/armada_debugfs.c8
-rw-r--r--drivers/gpu/drm/armada/armada_drm.h5
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c11
-rw-r--r--drivers/gpu/drm/armada/armada_fb.c3
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c5
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c24
-rw-r--r--drivers/gpu/drm/armada/armada_gem.h3
-rw-r--r--drivers/gpu/drm/armada/armada_overlay.c8
-rw-r--r--drivers/gpu/drm/armada/armada_plane.c4
-rw-r--r--drivers/gpu/drm/armada/armada_trace.h5
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c2
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx_drv.c3
-rw-r--r--drivers/gpu/drm/ast/Kconfig2
-rw-r--r--drivers/gpu/drm/ast/Makefile2
-rw-r--r--drivers/gpu/drm/ast/ast_dp501.c5
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c83
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h107
-rw-r--r--drivers/gpu/drm/ast/ast_fb.c346
-rw-r--r--drivers/gpu/drm/ast/ast_main.c110
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c972
-rw-r--r--drivers/gpu/drm/ast/ast_post.c7
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c8
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c30
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c45
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h20
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c6
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c19
-rw-r--r--drivers/gpu/drm/bochs/Kconfig2
-rw-r--r--drivers/gpu/drm/bochs/bochs.h7
-rw-r--r--drivers/gpu/drm/bochs/bochs_drv.c24
-rw-r--r--drivers/gpu/drm/bochs/bochs_hw.c6
-rw-r--r--drivers/gpu/drm/bochs/bochs_kms.c34
-rw-r--r--drivers/gpu/drm/bochs/bochs_mm.c3
-rw-r--r--drivers/gpu/drm/bridge/Kconfig29
-rw-r--r--drivers/gpu/drm/bridge/Makefile6
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c18
-rw-r--r--drivers/gpu/drm/bridge/analogix-anx78xx.h710
-rw-r--r--drivers/gpu/drm/bridge/analogix/Kconfig23
-rw-r--r--drivers/gpu/drm/bridge/analogix/Makefile4
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-anx6345.c817
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c (renamed from drivers/gpu/drm/bridge/analogix-anx78xx.c)258
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-anx78xx.h249
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-i2c-dptx.c165
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-i2c-dptx.h256
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-i2c-txcommon.h234
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c298
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.h2
-rw-r--r--drivers/gpu/drm/bridge/cdns-dsi.c9
-rw-r--r--drivers/gpu/drm/bridge/dumb-vga-dac.c14
-rw-r--r--drivers/gpu/drm/bridge/lvds-codec.c151
-rw-r--r--drivers/gpu/drm/bridge/lvds-encoder.c154
-rw-r--r--drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c2
-rw-r--r--drivers/gpu/drm/bridge/nxp-ptn3460.c4
-rw-r--r--drivers/gpu/drm/bridge/panel.c90
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8622.c4
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c45
-rw-r--r--drivers/gpu/drm/bridge/sii9234.c37
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c11
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c22
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-audio.h1
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c13
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c81
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c289
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.h52
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c97
-rw-r--r--drivers/gpu/drm/bridge/tc358764.c3
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c748
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c49
-rw-r--r--drivers/gpu/drm/bridge/ti-tfp410.c11
-rw-r--r--drivers/gpu/drm/cirrus/cirrus.c8
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h247
-rw-r--r--drivers/gpu/drm/drm_agpsupport.c49
-rw-r--r--drivers/gpu/drm/drm_atomic.c43
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c202
-rw-r--r--drivers/gpu/drm/drm_atomic_state_helper.c80
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c27
-rw-r--r--drivers/gpu/drm/drm_blend.c7
-rw-r--r--drivers/gpu/drm/drm_bridge.c280
-rw-r--r--drivers/gpu/drm/drm_cache.c14
-rw-r--r--drivers/gpu/drm/drm_client.c11
-rw-r--r--drivers/gpu/drm/drm_client_modeset.c75
-rw-r--r--drivers/gpu/drm/drm_color_mgmt.c40
-rw-r--r--drivers/gpu/drm/drm_connector.c253
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c57
-rw-r--r--drivers/gpu/drm/drm_crtc_helper_internal.h3
-rw-r--r--drivers/gpu/drm/drm_damage_helper.c8
-rw-r--r--drivers/gpu/drm/drm_debugfs_crc.c32
-rw-r--r--drivers/gpu/drm/drm_dma.c2
-rw-r--r--drivers/gpu/drm/drm_dp_aux_dev.c6
-rw-r--r--drivers/gpu/drm/drm_dp_cec.c29
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c253
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c2475
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology_internal.h24
-rw-r--r--drivers/gpu/drm/drm_drv.c36
-rw-r--r--drivers/gpu/drm/drm_dsc.c23
-rw-r--r--drivers/gpu/drm/drm_edid.c498
-rw-r--r--drivers/gpu/drm/drm_edid_load.c2
-rw-r--r--drivers/gpu/drm/drm_encoder.c16
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c1
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c269
-rw-r--r--drivers/gpu/drm/drm_file.c53
-rw-r--r--drivers/gpu/drm/drm_fourcc.c8
-rw-r--r--drivers/gpu/drm/drm_gem.c74
-rw-r--r--drivers/gpu/drm/drm_gem_framebuffer_helper.c79
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c103
-rw-r--r--drivers/gpu/drm/drm_gem_ttm_helper.c84
-rw-r--r--drivers/gpu/drm/drm_gem_vram_helper.c862
-rw-r--r--drivers/gpu/drm/drm_hdcp.c77
-rw-r--r--drivers/gpu/drm/drm_internal.h22
-rw-r--r--drivers/gpu/drm/drm_ioc32.c13
-rw-r--r--drivers/gpu/drm/drm_ioctl.c146
-rw-r--r--drivers/gpu/drm/drm_kms_helper_common.c2
-rw-r--r--drivers/gpu/drm/drm_legacy_misc.c2
-rw-r--r--drivers/gpu/drm/drm_lock.c5
-rw-r--r--drivers/gpu/drm/drm_memory.c3
-rw-r--r--drivers/gpu/drm/drm_mipi_dbi.c (renamed from drivers/gpu/drm/tinydrm/mipi-dbi.c)514
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c64
-rw-r--r--drivers/gpu/drm/drm_mm.c38
-rw-r--r--drivers/gpu/drm/drm_mode_config.c30
-rw-r--r--drivers/gpu/drm/drm_mode_object.c18
-rw-r--r--drivers/gpu/drm/drm_modes.c260
-rw-r--r--drivers/gpu/drm/drm_of.c121
-rw-r--r--drivers/gpu/drm/drm_panel.c193
-rw-r--r--drivers/gpu/drm/drm_pci.c17
-rw-r--r--drivers/gpu/drm/drm_prime.c866
-rw-r--r--drivers/gpu/drm/drm_print.c70
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c8
-rw-r--r--drivers/gpu/drm/drm_property.c2
-rw-r--r--drivers/gpu/drm/drm_rect.c42
-rw-r--r--drivers/gpu/drm/drm_scatter.c2
-rw-r--r--drivers/gpu/drm/drm_self_refresh_helper.c77
-rw-r--r--drivers/gpu/drm/drm_simple_kms_helper.c3
-rw-r--r--drivers/gpu/drm/drm_syncobj.c139
-rw-r--r--drivers/gpu/drm/drm_sysfs.c43
-rw-r--r--drivers/gpu/drm/drm_trace.h14
-rw-r--r--drivers/gpu/drm/drm_vblank.c85
-rw-r--r--drivers/gpu/drm/drm_vm.c2
-rw-r--r--drivers/gpu/drm/drm_vram_helper_common.c8
-rw-r--r--drivers/gpu/drm/drm_vram_mm_helper.c297
-rw-r--r--drivers/gpu/drm/drm_writeback.c23
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_buffer.c101
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c58
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h15
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c114
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.h38
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_dump.c67
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_dump.h4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c82
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.h15
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c67
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c163
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h16
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_iommu.c167
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_iommu.h20
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c288
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.c335
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.h114
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_perfmon.c48
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_sched.c4
-rw-r--r--drivers/gpu/drm/exynos/Kconfig6
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dpi.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c38
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h8
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c35
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.h2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_scaler.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c38
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c12
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c5
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c9
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c1
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c2
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c12
-rw-r--r--drivers/gpu/drm/gma500/accel_2d.c19
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_display.c10
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c3
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c135
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.h15
-rw-r--r--drivers/gpu/drm/gma500/gma_display.c48
-rw-r--r--drivers/gpu/drm/gma500/gma_display.h6
-rw-r--r--drivers/gpu/drm/gma500/gtt.c2
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_output.c2
-rw-r--r--drivers/gpu/drm/gma500/mdfld_intel_display.c23
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c4
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c1
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c46
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h8
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c1
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h3
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.c23
-rw-r--r--drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c88
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/Kconfig5
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/Makefile2
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c24
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c41
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h31
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c238
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c1
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c123
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/Kconfig10
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/Makefile3
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c1
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h1
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c359
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c258
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h48
-rw-r--r--drivers/gpu/drm/i2c/ch7006_priv.h1
-rw-r--r--drivers/gpu/drm/i2c/sil164_drv.c5
-rw-r--r--drivers/gpu/drm/i2c/tda9950.c12
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c12
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c21
-rw-r--r--drivers/gpu/drm/i810/i810_drv.c9
-rw-r--r--drivers/gpu/drm/i810/i810_drv.h2
-rw-r--r--drivers/gpu/drm/i915/.gitignore1
-rw-r--r--drivers/gpu/drm/i915/Kconfig18
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug164
-rw-r--r--drivers/gpu/drm/i915/Kconfig.profile49
-rw-r--r--drivers/gpu/drm/i915/Kconfig.unstable29
-rw-r--r--drivers/gpu/drm/i915/Makefile150
-rw-r--r--drivers/gpu/drm/i915/Makefile.header-test22
-rw-r--r--drivers/gpu/drm/i915/display/Makefile2
-rw-r--r--drivers/gpu/drm/i915/display/Makefile.header-test16
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ch7017.c2
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ch7xxx.c2
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ivch.c2
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ns2501.c2
-rw-r--r--drivers/gpu/drm/i915/display/dvo_sil164.c2
-rw-r--r--drivers/gpu/drm/i915/display/dvo_tfp410.c2
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c533
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.c158
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.h13
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c221
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c145
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c620
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.h11
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c133
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.h16
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c1262
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.h13
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c763
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.c195
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.c25
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c88
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c1632
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c6704
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h314
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c1220
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.h70
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h (renamed from drivers/gpu/drm/i915/intel_drv.h)362
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c1319
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h17
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c22
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c364
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.c40
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c1121
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.h62
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c339
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.h52
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi.h29
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.c245
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c28
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c323
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.h11
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c64
-rw-r--r--drivers/gpu/drm/i915/display/intel_fifo_underrun.c27
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.c262
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.h100
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.c21
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.h22
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c299
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c526
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c74
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_lpe_audio.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c30
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c192
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c32
-rw-r--r--drivers/gpu/drm/i915/display/intel_pipe_crc.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c622
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_quirks.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c369
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c1228
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.h16
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c563
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.h31
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c30
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h123
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c379
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.h11
-rw-r--r--drivers/gpu/drm/i915/display/intel_vga.c160
-rw-r--r--drivers/gpu/drm/i915/display/intel_vga.h18
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c191
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll.c28
-rw-r--r--drivers/gpu/drm/i915/gem/Makefile1
-rw-r--r--drivers/gpu/drm/i915/gem/Makefile.header-test16
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_busy.c16
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_clflush.c125
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_client_blt.c65
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c1100
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.h74
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context_types.h54
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c46
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_domain.c211
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c642
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_fence.c5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_internal.c20
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ioctls.h4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.c56
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.h29
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c661
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.h31
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c248
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h144
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_blt.c377
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_blt.h25
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h70
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c126
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_phys.c20
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.c201
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_region.c177
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_region.h29
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c92
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c202
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.h31
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c324
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.h34
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_throttle.c6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_tiling.c43
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c91
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_wait.c24
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gemfs.c31
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c14
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_gem_object.h6
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_pages.c730
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c68
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c224
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c1013
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c107
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c927
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c494
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c136
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h15
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_context.c38
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_context.h5
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c16
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.h2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_gem_object.h2
-rw-r--r--drivers/gpu/drm/i915/gt/Makefile2
-rw-r--r--drivers/gpu/drm/i915/gt/Makefile.header-test16
-rw-r--r--drivers/gpu/drm/i915/gt/debugfs_engines.c36
-rw-r--r--drivers/gpu/drm/i915/gt/debugfs_engines.h14
-rw-r--r--drivers/gpu/drm/i915/gt/debugfs_gt.c42
-rw-r--r--drivers/gpu/drm/i915/gt/debugfs_gt.h39
-rw-r--r--drivers/gpu/drm/i915/gt/debugfs_gt_pm.c601
-rw-r--r--drivers/gpu/drm/i915/gt/debugfs_gt_pm.h14
-rw-r--r--drivers/gpu/drm/i915/gt/gen6_ppgtt.c483
-rw-r--r--drivers/gpu/drm/i915/gt/gen6_ppgtt.h76
-rw-r--r--drivers/gpu/drm/i915/gt/gen6_renderstate.c (renamed from drivers/gpu/drm/i915/intel_renderstate_gen6.c)0
-rw-r--r--drivers/gpu/drm/i915/gt/gen7_renderstate.c (renamed from drivers/gpu/drm/i915/intel_renderstate_gen7.c)0
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_ppgtt.c724
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_ppgtt.h13
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_renderstate.c (renamed from drivers/gpu/drm/i915/intel_renderstate_gen8.c)0
-rw-r--r--drivers/gpu/drm/i915/gt/gen9_renderstate.c (renamed from drivers/gpu/drm/i915/intel_renderstate_gen9.c)0
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs.c116
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c285
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.h119
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_types.h24
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine.h332
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c854
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c242
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h23
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.c213
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.h51
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pool.c190
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pool.h34
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pool_types.h29
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h268
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_user.c299
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_user.h25
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c1486
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gpu_commands.h84
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c658
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.h71
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_irq.c456
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_irq.h44
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c311
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.h59
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c109
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm_irq.h22
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_requests.c225
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_requests.h32
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_types.h119
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.c598
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.h587
-rw-r--r--drivers/gpu/drm/i915/gt/intel_hangcheck.c347
-rw-r--r--drivers/gpu/drm/i915/gt/intel_llc.c161
-rw-r--r--drivers/gpu/drm/i915/gt/intel_llc.h15
-rw-r--r--drivers/gpu/drm/i915/gt/intel_llc_types.h13
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c3463
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.h46
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc_reg.h67
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.c426
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.h6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ppgtt.c218
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.c776
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.h28
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6_types.h31
-rw-r--r--drivers/gpu/drm/i915/gt/intel_renderstate.c (renamed from drivers/gpu/drm/i915/i915_gem_render_state.c)103
-rw-r--r--drivers/gpu/drm/i915/gt/intel_renderstate.h (renamed from drivers/gpu/drm/i915/intel_renderstate.h)25
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c875
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.h83
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset_types.h56
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring.c318
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring.h131
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c (renamed from drivers/gpu/drm/i915/gt/intel_ringbuffer.c)873
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_types.h51
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c1903
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.h39
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps_types.h93
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.c39
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.h37
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.c (renamed from drivers/gpu/drm/i915/i915_timeline.c)370
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.h94
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline_types.h100
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c369
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.h6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds_types.h1
-rw-r--r--drivers/gpu/drm/i915/gt/mock_engine.c161
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_context.c443
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine.c28
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine.h14
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_cs.c386
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c374
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_pm.c84
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_gt_pm.c79
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c703
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_llc.c80
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_llc.h14
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c2925
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_mocs.c419
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rc6.c203
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rc6.h13
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_reset.c133
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_timeline.c (renamed from drivers/gpu/drm/i915/selftests/i915_timeline.c)257
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_workarounds.c466
-rw-r--r--drivers/gpu/drm/i915/gt/selftests/mock_timeline.c (renamed from drivers/gpu/drm/i915/selftests/mock_timeline.c)10
-rw-r--r--drivers/gpu/drm/i915/gt/selftests/mock_timeline.h17
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c (renamed from drivers/gpu/drm/i915/intel_guc.c)502
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.h (renamed from drivers/gpu/drm/i915/intel_guc.h)122
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c (renamed from drivers/gpu/drm/i915/intel_guc_ads.c)60
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h15
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c (renamed from drivers/gpu/drm/i915/intel_guc_ct.c)347
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h77
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c166
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fw.h14
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h (renamed from drivers/gpu/drm/i915/intel_guc_fwif.h)107
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.c (renamed from drivers/gpu/drm/i915/intel_guc_log.c)132
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.h (renamed from drivers/gpu/drm/i915/intel_guc_log.h)28
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h (renamed from drivers/gpu/drm/i915/intel_guc_reg.h)65
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c682
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h23
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.c219
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.h54
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c43
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc_fw.h (renamed from drivers/gpu/drm/i915/intel_huc_fw.h)5
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c620
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.h89
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c604
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h240
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h77
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c24
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c182
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.h4
-rw-r--r--drivers/gpu/drm/i915/gvt/debugfs.c47
-rw-r--r--drivers/gpu/drm/i915/gvt/display.h5
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c73
-rw-r--r--drivers/gpu/drm/i915/gvt/edid.h4
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/firmware.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h13
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h8
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c52
-rw-r--r--drivers/gpu/drm/i915/gvt/hypercall.h6
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c55
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c58
-rw-r--r--drivers/gpu/drm/i915/gvt/mpt.h15
-rw-r--r--drivers/gpu/drm/i915/gvt/page_track.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c132
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c8
-rw-r--r--drivers/gpu/drm/i915/i915_active.c819
-rw-r--r--drivers/gpu/drm/i915/i915_active.h367
-rw-r--r--drivers/gpu/drm/i915/i915_active_types.h43
-rw-r--r--drivers/gpu/drm/i915/i915_buddy.c431
-rw-r--r--drivers/gpu/drm/i915/i915_buddy.h128
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c605
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c1312
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c1145
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1246
-rw-r--r--drivers/gpu/drm/i915/i915_fixed.h5
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c969
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h34
-rw-r--r--drivers/gpu/drm/i915/i915_gem_batch_pool.c140
-rw-r--r--drivers/gpu/drm/i915/i915_gem_batch_pool.h26
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c104
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence_reg.c233
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence_reg.h12
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c3875
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h690
-rw-r--r--drivers/gpu/drm/i915/i915_getparam.c173
-rw-r--r--drivers/gpu/drm/i915/i915_globals.c54
-rw-r--r--drivers/gpu/drm/i915/i915_globals.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c1717
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h406
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c2091
-rw-r--r--drivers/gpu/drm/i915/i915_irq.h98
-rw-r--r--drivers/gpu/drm/i915/i915_memcpy.c77
-rw-r--r--drivers/gpu/drm/i915/i915_memcpy.h34
-rw-r--r--drivers/gpu/drm/i915/i915_mm.c74
-rw-r--r--drivers/gpu/drm/i915/i915_oa_bdw.h15
-rw-r--r--drivers/gpu/drm/i915/i915_oa_bxt.h15
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cflgt2.h15
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cflgt3.h15
-rw-r--r--drivers/gpu/drm/i915/i915_oa_chv.h15
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cnl.h15
-rw-r--r--drivers/gpu/drm/i915/i915_oa_glk.h15
-rw-r--r--drivers/gpu/drm/i915/i915_oa_hsw.h15
-rw-r--r--drivers/gpu/drm/i915/i915_oa_icl.h15
-rw-r--r--drivers/gpu/drm/i915/i915_oa_kblgt2.h15
-rw-r--r--drivers/gpu/drm/i915/i915_oa_kblgt3.h15
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt2.h15
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt3.h15
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt4.h15
-rw-r--r--drivers/gpu/drm/i915/i915_params.c17
-rw-r--r--drivers/gpu/drm/i915/i915_params.h7
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c376
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c2448
-rw-r--r--drivers/gpu/drm/i915/i915_perf.h60
-rw-r--r--drivers/gpu/drm/i915/i915_perf_types.h434
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c560
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.h10
-rw-r--r--drivers/gpu/drm/i915/i915_priolist_types.h20
-rw-r--r--drivers/gpu/drm/i915/i915_pvinfo.h7
-rw-r--r--drivers/gpu/drm/i915/i915_query.c318
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1374
-rw-r--r--drivers/gpu/drm/i915/i915_request.c775
-rw-r--r--drivers/gpu/drm/i915/i915_request.h175
-rw-r--r--drivers/gpu/drm/i915/i915_scatterlist.h8
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.c95
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.h19
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler_types.h10
-rw-r--r--drivers/gpu/drm/i915/i915_selftest.h33
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c14
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.h14
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c69
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.h14
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence_work.c99
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence_work.h44
-rw-r--r--drivers/gpu/drm/i915/i915_switcheroo.c67
-rw-r--r--drivers/gpu/drm/i915/i915_switcheroo.h14
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c191
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.h14
-rw-r--r--drivers/gpu/drm/i915/i915_timeline.h94
-rw-r--r--drivers/gpu/drm/i915/i915_timeline_types.h67
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h70
-rw-r--r--drivers/gpu/drm/i915/i915_utils.c107
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h73
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c68
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.h7
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c838
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h248
-rw-r--r--drivers/gpu/drm/i915/i915_vma_types.h294
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c11
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c314
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h23
-rw-r--r--drivers/gpu/drm/i915/intel_guc_ct.h104
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fw.c308
-rw-r--r--drivers/gpu/drm/i915/intel_guc_submission.c1458
-rw-r--r--drivers/gpu/drm/i915/intel_guc_submission.h89
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c7
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.h7
-rw-r--r--drivers/gpu/drm/i915/intel_huc.c182
-rw-r--r--drivers/gpu/drm/i915/intel_huc.h65
-rw-r--r--drivers/gpu/drm/i915/intel_huc_fw.c215
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.c302
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.h143
-rw-r--r--drivers/gpu/drm/i915/intel_pch.c215
-rw-r--r--drivers/gpu/drm/i915/intel_pch.h79
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c4013
-rw-r--r--drivers/gpu/drm/i915/intel_pm.h33
-rw-r--r--drivers/gpu/drm/i915/intel_region_lmem.c138
-rw-r--r--drivers/gpu/drm/i915/intel_region_lmem.h16
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c4
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.h2
-rw-r--r--drivers/gpu/drm/i915/intel_sideband.c33
-rw-r--r--drivers/gpu/drm/i915/intel_uc.c561
-rw-r--r--drivers/gpu/drm/i915/intel_uc.h64
-rw-r--r--drivers/gpu/drm/i915/intel_uc_fw.c357
-rw-r--r--drivers/gpu/drm/i915/intel_uc_fw.h155
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c673
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h74
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.c101
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.h143
-rw-r--r--drivers/gpu/drm/i915/intel_wopcm.c268
-rw-r--r--drivers/gpu/drm/i915/intel_wopcm.h18
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_bdw.c (renamed from drivers/gpu/drm/i915/i915_oa_bdw.c)35
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_bdw.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_bxt.c (renamed from drivers/gpu/drm/i915/i915_oa_bxt.c)35
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_bxt.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_cflgt2.c (renamed from drivers/gpu/drm/i915/i915_oa_cflgt2.c)35
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_cflgt2.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_cflgt3.c (renamed from drivers/gpu/drm/i915/i915_oa_cflgt3.c)35
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_cflgt3.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_chv.c (renamed from drivers/gpu/drm/i915/i915_oa_chv.c)35
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_chv.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_cnl.c (renamed from drivers/gpu/drm/i915/i915_oa_cnl.c)35
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_cnl.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_glk.c (renamed from drivers/gpu/drm/i915/i915_oa_glk.c)35
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_glk.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_hsw.c (renamed from drivers/gpu/drm/i915/i915_oa_hsw.c)35
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_hsw.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_icl.c (renamed from drivers/gpu/drm/i915/i915_oa_icl.c)35
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_icl.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_kblgt2.c (renamed from drivers/gpu/drm/i915/i915_oa_kblgt2.c)35
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_kblgt2.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_kblgt3.c (renamed from drivers/gpu/drm/i915/i915_oa_kblgt3.c)35
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_kblgt3.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_sklgt2.c (renamed from drivers/gpu/drm/i915/i915_oa_sklgt2.c)35
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_sklgt2.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_sklgt3.c (renamed from drivers/gpu/drm/i915/i915_oa_sklgt3.c)35
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_sklgt3.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_sklgt4.c (renamed from drivers/gpu/drm/i915/i915_oa_sklgt4.c)35
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_sklgt4.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_tgl.c121
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_tgl.h16
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_active.c226
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_buddy.c724
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem.c64
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c171
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c505
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_live_selftests.h23
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_mock_selftests.h12
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_perf.c217
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_perf_selftests.h19
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_random.c20
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_random.h5
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c684
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_selftest.c121
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_vma.c29
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_atomic.c47
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_atomic.h41
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_flush_test.c34
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_flush_test.h2
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_live_test.c19
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_live_test.h2
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_mmap.c39
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_mmap.h19
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_reset.c38
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_reset.h10
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.c74
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.h9
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_wedge_me.h58
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_guc.c359
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_memory_region.c621
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_uncore.c56
-rw-r--r--drivers/gpu/drm/i915/selftests/lib_sw_fence.c1
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_drm.c73
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_drm.h18
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c88
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gtt.c16
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gtt.h3
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_region.c60
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_region.h21
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_request.c6
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_request.h4
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_timeline.h15
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_uncore.c9
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_uncore.h6
-rw-r--r--drivers/gpu/drm/imx/Makefile1
-rw-r--r--drivers/gpu/drm/imx/dw_hdmi-imx.c16
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c13
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c41
-rw-r--r--drivers/gpu/drm/imx/imx-tve.c16
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c8
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c5
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c20
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm.c121
-rw-r--r--drivers/gpu/drm/lima/Kconfig3
-rw-r--r--drivers/gpu/drm/lima/Makefile4
-rw-r--r--drivers/gpu/drm/lima/lima_device.c46
-rw-r--r--drivers/gpu/drm/lima/lima_drv.c42
-rw-r--r--drivers/gpu/drm/lima/lima_gem.c199
-rw-r--r--drivers/gpu/drm/lima/lima_gem.h32
-rw-r--r--drivers/gpu/drm/lima/lima_gem_prime.c47
-rw-r--r--drivers/gpu/drm/lima/lima_gem_prime.h13
-rw-r--r--drivers/gpu/drm/lima/lima_mmu.c1
-rw-r--r--drivers/gpu/drm/lima/lima_object.c122
-rw-r--r--drivers/gpu/drm/lima/lima_object.h36
-rw-r--r--drivers/gpu/drm/lima/lima_sched.c46
-rw-r--r--drivers/gpu/drm/lima/lima_sched.h2
-rw-r--r--drivers/gpu/drm/lima/lima_vm.c87
-rw-r--r--drivers/gpu/drm/lima/lima_vm.h4
-rw-r--r--drivers/gpu/drm/mcde/mcde_display.c57
-rw-r--r--drivers/gpu/drm/mcde/mcde_drm.h1
-rw-r--r--drivers/gpu/drm/mcde/mcde_drv.c32
-rw-r--r--drivers/gpu/drm/mcde/mcde_dsi.c496
-rw-r--r--drivers/gpu/drm/mcde/mcde_dsi_regs.h22
-rw-r--r--drivers/gpu/drm/mediatek/Makefile5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_color.c9
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c177
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_rdma.c45
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c19
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c338
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.h4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp.c128
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c239
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h93
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c180
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.h9
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_fb.c119
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_fb.h14
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_gem.c11
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_plane.c78
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_plane.h6
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c265
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c30
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mipi_tx.c338
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mipi_tx.h49
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mt8173_mipi_tx.c288
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mt8183_mipi_tx.c149
-rw-r--r--drivers/gpu/drm/meson/Makefile1
-rw-r--r--drivers/gpu/drm/meson/meson_crtc.c116
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c143
-rw-r--r--drivers/gpu/drm/meson/meson_drv.h47
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.c136
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.h12
-rw-r--r--drivers/gpu/drm/meson/meson_osd_afbcd.c389
-rw-r--r--drivers/gpu/drm/meson/meson_osd_afbcd.h28
-rw-r--r--drivers/gpu/drm/meson/meson_overlay.c15
-rw-r--r--drivers/gpu/drm/meson/meson_plane.c259
-rw-r--r--drivers/gpu/drm/meson/meson_rdma.c135
-rw-r--r--drivers/gpu/drm/meson/meson_rdma.h21
-rw-r--r--drivers/gpu/drm/meson/meson_registers.h248
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.c83
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.h4
-rw-r--r--drivers/gpu/drm/meson/meson_venc.c181
-rw-r--r--drivers/gpu/drm/meson/meson_venc.h2
-rw-r--r--drivers/gpu/drm/meson/meson_venc_cvbs.c72
-rw-r--r--drivers/gpu/drm/meson/meson_viu.c180
-rw-r--r--drivers/gpu/drm/meson/meson_viu.h19
-rw-r--r--drivers/gpu/drm/meson/meson_vpp.c42
-rw-r--r--drivers/gpu/drm/meson/meson_vpp.h3
-rw-r--r--drivers/gpu/drm/mga/mga_dma.c13
-rw-r--r--drivers/gpu/drm/mga/mga_drv.c7
-rw-r--r--drivers/gpu/drm/mga/mga_drv.h27
-rw-r--r--drivers/gpu/drm/mga/mga_ioc32.c3
-rw-r--r--drivers/gpu/drm/mga/mga_irq.c12
-rw-r--r--drivers/gpu/drm/mga/mga_state.c8
-rw-r--r--drivers/gpu/drm/mga/mga_warp.c4
-rw-r--r--drivers/gpu/drm/mgag200/Kconfig10
-rw-r--r--drivers/gpu/drm/mgag200/Makefile2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_cursor.c329
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c109
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h81
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_fb.c315
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_i2c.c5
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c123
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c76
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c10
-rw-r--r--drivers/gpu/drm/msm/Kconfig3
-rw-r--r--drivers/gpu/drm/msm/Makefile1
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c36
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.h3
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c33
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.h3
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_debugfs.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c90
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_power.c7
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx.xml.h52
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c34
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.h3
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c81
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.h9
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c76
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h16
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c27
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c106
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h32
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c43
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c37
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c130
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c300
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h11
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c91
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c120
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c18
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c241
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h39
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c95
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h26
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c22
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c36
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c13
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c28
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.h9
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c173
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h18
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c9
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c65
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c23
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c1
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_irq.c1
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c61
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c4
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c242
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c6
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c4
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_irq.c1
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c83
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c3
-rw-r--r--drivers/gpu/drm/msm/disp/mdp_format.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h3
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.c52
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.h3
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c73
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c64
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c20
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h1
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c60
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c4
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c4
-rw-r--r--drivers/gpu/drm/msm/edp/edp.c4
-rw-r--r--drivers/gpu/drm/msm/edp/edp.h1
-rw-r--r--drivers/gpu/drm/msm/edp/edp_bridge.c10
-rw-r--r--drivers/gpu/drm/msm/edp/edp_ctrl.c70
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c70
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h6
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c2
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_connector.c51
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy.c8
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c1
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c2
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c2
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c236
-rw-r--r--drivers/gpu/drm/msm/msm_atomic_trace.h110
-rw-r--r--drivers/gpu/drm/msm/msm_atomic_tracepoints.c3
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.c11
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c90
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h9
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c2
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c6
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c32
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h2
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c6
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c29
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c11
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h7
-rw-r--r--drivers/gpu/drm/msm/msm_gpu_trace.h2
-rw-r--r--drivers/gpu/drm/msm/msm_gpummu.c8
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c6
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h108
-rw-r--r--drivers/gpu/drm/msm/msm_mmu.h4
-rw-r--r--drivers/gpu/drm/msm/msm_perf.c3
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c23
-rw-r--r--drivers/gpu/drm/msm/msm_submitqueue.c2
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_crtc.c36
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c64
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.h4
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_out.c29
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig9
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/arb.c8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c54
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/cursor.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dac.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dfp.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.c5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.h1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/hw.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/hw.h1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/overlay.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv04.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv17.c14
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/atom.h15
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/base507c.c26
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/base827c.c11
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/base907c.c76
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/base917c.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core.h6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/corec37d.c25
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/corec57d.c9
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c430
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.h6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.c58
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.h10
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head507d.c9
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head827d.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head907d.c11
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head917d.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/headc37d.c11
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/headc57d.c12
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/lut.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/ovly507e.c3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/ovly827e.c3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/ovly907e.c13
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/ovly917e.c5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c132
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.h13
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c72
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c83
-rw-r--r--drivers/gpu/drm/nouveau/include/nvfw/acr.h152
-rw-r--r--drivers/gpu/drm/nouveau/include/nvfw/flcn.h97
-rw-r--r--drivers/gpu/drm/nouveau/include/nvfw/fw.h28
-rw-r--r--drivers/gpu/drm/nouveau/include/nvfw/hs.h31
-rw-r--r--drivers/gpu/drm/nouveau/include/nvfw/ls.h53
-rw-r--r--drivers/gpu/drm/nouveau/include/nvfw/pmu.h98
-rw-r--r--drivers/gpu/drm/nouveau/include/nvfw/sec2.h60
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/class.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if0008.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/mmu.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/device.h10
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/falcon.h77
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h51
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/memory.h16
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/os.h13
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h20
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h8
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h10
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h13
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/acr.h126
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/extdev.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h5
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h5
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c103
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c74
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h116
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_crtc.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c33
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c456
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.h11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c42
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h20
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c27
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c119
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hwmon.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ioc32.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c43
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c235
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vmm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvif/mmu.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/firmware.c67
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/memory.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/subdev.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c108
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c29
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c188
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgv100.c23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxtu102.c95
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h786
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h786
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c311
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h90
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c130
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c160
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c98
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c34
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gp108.c97
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c39
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c29
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c177
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c42
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gm107.c (renamed from drivers/gpu/drm/nouveau/include/nvkm/core/msgqueue.h)54
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/base.c63
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/gm107.c63
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c109
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c312
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp108.c39
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c47
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/base.c87
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/cmdq.c214
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/msgq.c213
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c577
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.h213
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c436
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c264
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/priv.h6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.c87
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.h89
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/v1.c86
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/nvfw/Kbuild7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/nvfw/acr.c165
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/nvfw/flcn.c115
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/nvfw/fw.c51
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/nvfw/hs.c62
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/nvfw/ls.c108
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/Kbuild10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c411
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c470
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm20b.c134
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c281
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp108.c111
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp10b.c57
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c180
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/lsfw.c253
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/priv.h151
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c215
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm20b.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/extdev.c13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp10b.c53
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c97
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf108.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm200.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c32
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c59
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c53
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp10b.c65
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c71
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c216
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c101
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.c54
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h70
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c1241
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h167
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c229
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.h71
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r364.c117
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c418
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.c168
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.h50
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c94
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c213
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c262
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h46
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c148
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c252
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp108.c88
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp10b.c95
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.c97
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.h81
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h161
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c160
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c177
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h65
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c3
-rw-r--r--drivers/gpu/drm/omapdrm/displays/Kconfig44
-rw-r--r--drivers/gpu/drm/omapdrm/displays/Makefile6
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c251
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c271
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c262
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c755
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c390
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c513
-rw-r--r--drivers/gpu/drm/omapdrm/dss/Kconfig12
-rw-r--r--drivers/gpu/drm/omapdrm/dss/Makefile2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/core.c55
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.c60
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c3
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.c50
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_core.c9
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5_core.c129
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c7
-rw-r--r--drivers/gpu/drm/omapdrm/dss/output.c5
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.c3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_debugfs.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.h2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c29
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h5
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c13
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c6
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c143
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.h3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c31
-rw-r--r--drivers/gpu/drm/omapdrm/omap_irq.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c9
-rw-r--r--drivers/gpu/drm/panel/Kconfig107
-rw-r--r--drivers/gpu/drm/panel/Makefile12
-rw-r--r--drivers/gpu/drm/panel/panel-arm-versatile.c11
-rw-r--r--drivers/gpu/drm/panel/panel-boe-himax8279d.c978
-rw-r--r--drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c21
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9322.c58
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9881c.c34
-rw-r--r--drivers/gpu/drm/panel/panel-innolux-p079zca.c48
-rw-r--r--drivers/gpu/drm/panel/panel-jdi-lt070me05000.c16
-rw-r--r--drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c46
-rw-r--r--drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c531
-rw-r--r--drivers/gpu/drm/panel/panel-lg-lb035q02.c243
-rw-r--r--drivers/gpu/drm/panel/panel-lg-lg4573.c19
-rw-r--r--drivers/gpu/drm/panel/panel-lvds.c71
-rw-r--r--drivers/gpu/drm/panel/panel-nec-nl8048hl11.c254
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt39016.c358
-rw-r--r--drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c32
-rw-r--r--drivers/gpu/drm/panel/panel-orisetech-otm8009a.c16
-rw-r--r--drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c40
-rw-r--r--drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c67
-rw-r--r--drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c28
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm67191.c667
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm68200.c31
-rw-r--r--drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c111
-rw-r--r--drivers/gpu/drm/panel/panel-ronbo-rb070d30.c36
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-ld9040.c9
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6d16d0.c11
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c11
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c11
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63m0.c11
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c9
-rw-r--r--drivers/gpu/drm/panel/panel-seiko-43wvf1g.c57
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c37
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c225
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c40
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c637
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7701.c28
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7789v.c53
-rw-r--r--drivers/gpu/drm/panel/panel-sony-acx424akp.c550
-rw-r--r--drivers/gpu/drm/panel/panel-sony-acx565akm.c707
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-td028ttec1.c391
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-td043mtea1.c515
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-tpg110.c31
-rw-r--r--drivers/gpu/drm/panel/panel-truly-nt35597.c9
-rw-r--r--drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c398
-rw-r--r--drivers/gpu/drm/panfrost/Makefile1
-rw-r--r--drivers/gpu/drm/panfrost/TODO15
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_devfreq.c161
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_devfreq.h4
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.c36
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.h45
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c296
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.c243
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.h64
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c114
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gpu.c5
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_issues.h81
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c113
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.h1
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c501
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.h15
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_perfcnt.c50
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_perfcnt.h2
-rw-r--r--drivers/gpu/drm/pl111/pl111_debugfs.c4
-rw-r--r--drivers/gpu/drm/pl111/pl111_display.c56
-rw-r--r--drivers/gpu/drm/pl111/pl111_drm.h11
-rw-r--r--drivers/gpu/drm/pl111/pl111_drv.c19
-rw-r--r--drivers/gpu/drm/pl111/pl111_nomadik.h3
-rw-r--r--drivers/gpu/drm/pl111/pl111_versatile.c9
-rw-r--r--drivers/gpu/drm/pl111/pl111_versatile.h3
-rw-r--r--drivers/gpu/drm/pl111/pl111_vexpress.c1
-rw-r--r--drivers/gpu/drm/qxl/Kconfig1
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c6
-rw-r--r--drivers/gpu/drm/qxl/qxl_debugfs.c10
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c11
-rw-r--r--drivers/gpu/drm/qxl/qxl_draw.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c61
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h17
-rw-r--r--drivers/gpu/drm/qxl/qxl_gem.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_irq.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_kms.c11
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c52
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.h6
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c25
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c80
-rw-r--r--drivers/gpu/drm/r128/Makefile2
-rw-r--r--drivers/gpu/drm/r128/ati_pcigart.c (renamed from drivers/gpu/drm/ati_pcigart.c)15
-rw-r--r--drivers/gpu/drm/r128/ati_pcigart.h31
-rw-r--r--drivers/gpu/drm/r128/r128_drv.c2
-rw-r--r--drivers/gpu/drm/r128/r128_drv.h3
-rw-r--r--drivers/gpu/drm/r128/r128_ioc32.c3
-rw-r--r--drivers/gpu/drm/r128/r128_irq.c5
-rw-r--r--drivers/gpu/drm/radeon/Makefile2
-rw-r--r--drivers/gpu/drm/radeon/atom.h1
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c3
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c6
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c11
-rw-r--r--drivers/gpu/drm/radeon/atombios_i2c.c5
-rw-r--r--drivers/gpu/drm/radeon/btc_dpm.c3
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c3
-rw-r--r--drivers/gpu/drm/radeon/cik.c116
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c4
-rw-r--r--drivers/gpu/drm/radeon/cypress_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c4
-rw-r--r--drivers/gpu/drm/radeon/evergreen_dma.c2
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c3
-rw-r--r--drivers/gpu/drm/radeon/ni.c8
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c3
-rw-r--r--drivers/gpu/drm/radeon/r100.c22
-rw-r--r--drivers/gpu/drm/radeon/r200.c6
-rw-r--r--drivers/gpu/drm/radeon/r300.c2
-rw-r--r--drivers/gpu/drm/radeon/r420.c2
-rw-r--r--drivers/gpu/drm/radeon/r600.c16
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c8
-rw-r--r--drivers/gpu/drm/radeon/r600_dma.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon.h24
-rw-r--r--drivers/gpu/drm/radeon/radeon_agp.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h18
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_clocks.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c178
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c25
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c23
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c69
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c27
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_tv.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_mn.c302
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c30
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_sync.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c40
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c22
-rw-r--r--drivers/gpu/drm/radeon/rs600.c2
-rw-r--r--drivers/gpu/drm/radeon/rs690.c2
-rw-r--r--drivers/gpu/drm/radeon/rs780_dpm.c3
-rw-r--r--drivers/gpu/drm/radeon/rv770.c4
-rw-r--r--drivers/gpu/drm/radeon/rv770_dma.c2
-rw-r--r--drivers/gpu/drm/radeon/si.c109
-rw-r--r--drivers/gpu/drm/radeon/si_dma.c2
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c9
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c3
-rw-r--r--drivers/gpu/drm/rcar-du/Kconfig8
-rw-r--r--drivers/gpu/drm/rcar-du/Makefile1
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_cmm.c217
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_cmm.h58
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c81
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.h2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c41
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.h2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.c5
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_group.c10
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_group.h2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c95
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_regs.h5
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_writeback.c4
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds.c363
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig9
-rw-r--r--drivers/gpu/drm/rockchip/Makefile3
-rw-r--r--drivers/gpu/drm/rockchip/analogix_dp-rockchip.c116
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c29
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.h5
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-reg.c19
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c184
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c7
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.c9
-rw-r--r--drivers/gpu/drm/rockchip/rk3066_hdmi.c18
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c20
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.c83
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c4
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c10
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_psr.c282
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_psr.h22
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c283
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.h10
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.c505
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.h19
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_rgb.c13
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c59
-rw-r--r--drivers/gpu/drm/savage/savage_drv.c2
-rw-r--r--drivers/gpu/drm/scheduler/gpu_scheduler_trace.h2
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c100
-rw-r--r--drivers/gpu/drm/scheduler/sched_fence.c10
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c117
-rw-r--r--drivers/gpu/drm/selftests/Makefile3
-rw-r--r--drivers/gpu/drm/selftests/drm_cmdline_selftests.h12
-rw-r--r--drivers/gpu/drm/selftests/drm_modeset_selftests.h6
-rw-r--r--drivers/gpu/drm/selftests/test-drm_cmdline_parser.c252
-rw-r--r--drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c242
-rw-r--r--drivers/gpu/drm/selftests/test-drm_framebuffer.c9
-rw-r--r--drivers/gpu/drm/selftests/test-drm_mm.c14
-rw-r--r--drivers/gpu/drm/selftests/test-drm_modeset_common.h9
-rw-r--r--drivers/gpu/drm/selftests/test-drm_rect.c223
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c3
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.h4
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c9
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_kms.c1
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_plane.c2
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_plane.h1
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_regs.h3
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c2
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.c2
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c6
-rw-r--r--drivers/gpu/drm/sti/sti_dvo.c13
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c2
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c7
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c37
-rw-r--r--drivers/gpu/drm/sti/sti_tvout.c28
-rw-r--r--drivers/gpu/drm/sti/sti_vtg.c4
-rw-r--r--drivers/gpu/drm/stm/drv.c5
-rw-r--r--drivers/gpu/drm/stm/dw_mipi_dsi-stm.c28
-rw-r--r--drivers/gpu/drm/stm/ltdc.c65
-rw-r--r--drivers/gpu/drm/sun4i/Kconfig16
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c25
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_crtc.c13
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c30
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_framebuffer.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_frontend.c10
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c32
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_layer.c7
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_lvds.c5
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_rgb.c5
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c45
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.h1
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tv.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_drc.c8
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c90
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h1
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_csc.c157
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_csc.h6
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c59
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h3
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.c22
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_tcon_top.c6
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_ui_layer.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_layer.c22
-rw-r--r--drivers/gpu/drm/tdfx/tdfx_drv.c11
-rw-r--r--drivers/gpu/drm/tegra/Kconfig2
-rw-r--r--drivers/gpu/drm/tegra/Makefile1
-rw-r--r--drivers/gpu/drm/tegra/dc.c198
-rw-r--r--drivers/gpu/drm/tegra/dc.h2
-rw-r--r--drivers/gpu/drm/tegra/dp.c876
-rw-r--r--drivers/gpu/drm/tegra/dp.h177
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c215
-rw-r--r--drivers/gpu/drm/tegra/drm.c481
-rw-r--r--drivers/gpu/drm/tegra/drm.h18
-rw-r--r--drivers/gpu/drm/tegra/dsi.c185
-rw-r--r--drivers/gpu/drm/tegra/falcon.c64
-rw-r--r--drivers/gpu/drm/tegra/falcon.h16
-rw-r--r--drivers/gpu/drm/tegra/fb.c12
-rw-r--r--drivers/gpu/drm/tegra/gem.c187
-rw-r--r--drivers/gpu/drm/tegra/gem.h6
-rw-r--r--drivers/gpu/drm/tegra/gr2d.c17
-rw-r--r--drivers/gpu/drm/tegra/gr3d.c16
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c130
-rw-r--r--drivers/gpu/drm/tegra/hub.c210
-rw-r--r--drivers/gpu/drm/tegra/hub.h3
-rw-r--r--drivers/gpu/drm/tegra/output.c46
-rw-r--r--drivers/gpu/drm/tegra/plane.c122
-rw-r--r--drivers/gpu/drm/tegra/plane.h8
-rw-r--r--drivers/gpu/drm/tegra/sor.c1864
-rw-r--r--drivers/gpu/drm/tegra/sor.h3
-rw-r--r--drivers/gpu/drm/tegra/vic.c148
-rw-r--r--drivers/gpu/drm/tilcdc/Makefile1
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c46
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c38
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.h33
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_external.c94
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_external.h1
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_panel.c20
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_plane.c6
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_tfp410.c385
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_tfp410.h15
-rw-r--r--drivers/gpu/drm/tiny/Kconfig (renamed from drivers/gpu/drm/tinydrm/Kconfig)65
-rw-r--r--drivers/gpu/drm/tiny/Makefile (renamed from drivers/gpu/drm/tinydrm/Makefile)6
-rw-r--r--drivers/gpu/drm/tiny/gm12u320.c804
-rw-r--r--drivers/gpu/drm/tiny/hx8357d.c (renamed from drivers/gpu/drm/tinydrm/hx8357d.c)64
-rw-r--r--drivers/gpu/drm/tiny/ili9225.c (renamed from drivers/gpu/drm/tinydrm/ili9225.c)185
-rw-r--r--drivers/gpu/drm/tiny/ili9341.c (renamed from drivers/gpu/drm/tinydrm/ili9341.c)86
-rw-r--r--drivers/gpu/drm/tiny/mi0283qt.c (renamed from drivers/gpu/drm/tinydrm/mi0283qt.c)93
-rw-r--r--drivers/gpu/drm/tiny/repaper.c (renamed from drivers/gpu/drm/tinydrm/repaper.c)61
-rw-r--r--drivers/gpu/drm/tiny/st7586.c (renamed from drivers/gpu/drm/tinydrm/st7586.c)134
-rw-r--r--drivers/gpu/drm/tiny/st7735r.c (renamed from drivers/gpu/drm/tinydrm/st7735r.c)81
-rw-r--r--drivers/gpu/drm/tinydrm/core/Makefile4
-rw-r--r--drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c207
-rw-r--r--drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c179
-rw-r--r--drivers/gpu/drm/ttm/Makefile4
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c379
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c40
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c301
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c77
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c7
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c7
-rw-r--r--drivers/gpu/drm/tve200/tve200_display.c8
-rw-r--r--drivers/gpu/drm/tve200/tve200_drm.h15
-rw-r--r--drivers/gpu/drm/tve200/tve200_drv.c14
-rw-r--r--drivers/gpu/drm/udl/Kconfig6
-rw-r--r--drivers/gpu/drm/udl/Makefile2
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c33
-rw-r--r--drivers/gpu/drm/udl/udl_connector.h2
-rw-r--r--drivers/gpu/drm/udl/udl_dmabuf.c254
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c54
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h92
-rw-r--r--drivers/gpu/drm/udl/udl_encoder.c70
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c528
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c258
-rw-r--r--drivers/gpu/drm/udl/udl_main.c15
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c384
-rw-r--r--drivers/gpu/drm/udl/udl_transfer.c16
-rw-r--r--drivers/gpu/drm/v3d/v3d_bo.c2
-rw-r--r--drivers/gpu/drm/v3d/v3d_debugfs.c3
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c19
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.h13
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c76
-rw-r--r--drivers/gpu/drm/v3d/v3d_irq.c2
-rw-r--r--drivers/gpu/drm/vboxvideo/Kconfig2
-rw-r--r--drivers/gpu/drm/vboxvideo/Makefile4
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_drv.c32
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_drv.h39
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_fb.c149
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_main.c119
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_mode.c138
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_prime.c56
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_ttm.c3
-rw-r--r--drivers/gpu/drm/vc4/Kconfig8
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c7
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c13
-rw-r--r--drivers/gpu/drm/vc4/vc4_debugfs.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_dpi.c3
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c12
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h20
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c56
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c19
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c30
-rw-r--r--drivers/gpu/drm/vc4/vc4_hvs.c7
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c13
-rw-r--r--drivers/gpu/drm/vc4/vc4_txp.c19
-rw-r--r--drivers/gpu/drm/vc4/vc4_v3d.c4
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c28
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.h1
-rw-r--r--drivers/gpu/drm/vgem/vgem_fence.c40
-rw-r--r--drivers/gpu/drm/via/via_dma.c43
-rw-r--r--drivers/gpu/drm/via/via_dmablit.c53
-rw-r--r--drivers/gpu/drm/via/via_drv.c7
-rw-r--r--drivers/gpu/drm/via/via_drv.h75
-rw-r--r--drivers/gpu/drm/via/via_irq.c54
-rw-r--r--drivers/gpu/drm/via/via_map.c7
-rw-r--r--drivers/gpu/drm/via/via_mm.c7
-rw-r--r--drivers/gpu/drm/via/via_verifier.c22
-rw-r--r--drivers/gpu/drm/via/via_video.c5
-rw-r--r--drivers/gpu/drm/virtio/Kconfig2
-rw-r--r--drivers/gpu/drm/virtio/Makefile2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_debugfs.c4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c12
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c33
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h169
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fence.c11
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_gem.c191
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c268
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_kms.c28
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c264
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c154
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_prime.c39
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ttm.c304
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c272
-rw-r--r--drivers/gpu/drm/vkms/Makefile2
-rw-r--r--drivers/gpu/drm/vkms/vkms_composer.c249
-rw-r--r--drivers/gpu/drm/vkms/vkms_crc.c272
-rw-r--r--drivers/gpu/drm/vkms/vkms_crtc.c109
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.c67
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h50
-rw-r--r--drivers/gpu/drm/vkms/vkms_gem.c28
-rw-r--r--drivers/gpu/drm/vkms/vkms_output.c6
-rw-r--r--drivers/gpu/drm/vkms/vkms_plane.c46
-rw-r--r--drivers/gpu/drm/vmwgfx/Kconfig1
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile2
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h233
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_lock.c100
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_lock.h32
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_object.h7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_binding.h3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_blit.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c44
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c17
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c280
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h181
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c74
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.h5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c41
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.c130
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.h35
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c488
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_prime.c33
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c254
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h15
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c427
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c20
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.c77
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.h21
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front.c28
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front.h11
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_cfg.c4
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_conn.c1
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_conn.h7
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_evtchnl.c4
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_gem.c11
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_gem.h7
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_kms.c25
-rw-r--r--drivers/gpu/drm/zte/zx_drm_drv.c8
-rw-r--r--drivers/gpu/drm/zte/zx_hdmi.c8
-rw-r--r--drivers/gpu/drm/zte/zx_plane.c2
-rw-r--r--drivers/gpu/drm/zte/zx_tvenc.c4
-rw-r--r--drivers/gpu/drm/zte/zx_vga.c10
-rw-r--r--drivers/gpu/drm/zte/zx_vou.c5
2444 files changed, 390646 insertions, 96464 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 3c88420e3497..d0aa6cff2e02 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -24,6 +24,10 @@ menuconfig DRM
details. You should also select and configure AGP
(/dev/agpgart) support if it is available for your platform.
+config DRM_MIPI_DBI
+ tristate
+ depends on DRM
+
config DRM_MIPI_DSI
bool
depends on DRM
@@ -50,6 +54,9 @@ config DRM_DEBUG_MM
If in doubt, say "N".
+config DRM_EXPORT_FOR_TESTS
+ bool
+
config DRM_DEBUG_SELFTEST
tristate "kselftests for DRM"
depends on DRM
@@ -57,6 +64,7 @@ config DRM_DEBUG_SELFTEST
select PRIME_NUMBERS
select DRM_LIB_RANDOM
select DRM_KMS_HELPER
+ select DRM_EXPORT_FOR_TESTS if m
default n
help
This option provides kernel modules that can be used to run
@@ -89,6 +97,21 @@ config DRM_KMS_FB_HELPER
help
FBDEV helpers for KMS drivers.
+config DRM_DEBUG_DP_MST_TOPOLOGY_REFS
+ bool "Enable refcount backtrace history in the DP MST helpers"
+ depends on STACKTRACE_SUPPORT
+ select STACKDEPOT
+ depends on DRM_KMS_HELPER
+ depends on DEBUG_KERNEL
+ depends on EXPERT
+ help
+ Enables debug tracing for topology refs in DRM's DP MST helpers. A
+ history of each topology reference/dereference will be printed to the
+ kernel log once a port or branch device's topology refcount reaches 0.
+
+ This has the potential to use a lot of memory and print some very
+ large kernel messages. If in doubt, say "N".
+
config DRM_FBDEV_EMULATION
bool "Enable legacy fbdev support for your modesetting driver"
depends on DRM
@@ -145,6 +168,7 @@ config DRM_LOAD_EDID_FIRMWARE
config DRM_DP_CEC
bool "Enable DisplayPort CEC-Tunneling-over-AUX HDMI support"
+ depends on DRM
select CEC_CORE
help
Choose this option if you want to enable HDMI CEC support for
@@ -161,13 +185,26 @@ config DRM_TTM
GPU memory types. Will be enabled automatically if a device driver
uses it.
+config DRM_TTM_DMA_PAGE_POOL
+ bool
+ depends on DRM_TTM && (SWIOTLB || INTEL_IOMMU)
+ default y
+ help
+ Choose this if you need the TTM dma page pool
+
config DRM_VRAM_HELPER
tristate
depends on DRM
- select DRM_TTM
help
Helpers for VRAM memory management
+config DRM_TTM_HELPER
+ tristate
+ depends on DRM
+ select DRM_TTM
+ help
+ Helpers for ttm-based gem objects
+
config DRM_GEM_CMA_HELPER
bool
depends on DRM
@@ -222,9 +259,9 @@ config DRM_AMDGPU
tristate "AMD GPU"
depends on DRM && PCI && MMU
select FW_LOADER
- select DRM_KMS_HELPER
+ select DRM_KMS_HELPER
select DRM_SCHED
- select DRM_TTM
+ select DRM_TTM
select POWER_SUPPLY
select HWMON
select BACKLIGHT_CLASS_DEVICE
@@ -253,6 +290,7 @@ config DRM_VKMS
tristate "Virtual KMS (EXPERIMENTAL)"
depends on DRM
select DRM_KMS_HELPER
+ select CRC32
default n
help
Virtual Kernel Mode-Setting (VKMS) is used for testing or for
@@ -261,9 +299,6 @@ config DRM_VKMS
If M is selected the module will be called vkms.
-config DRM_ATI_PCIGART
- bool
-
source "drivers/gpu/drm/exynos/Kconfig"
source "drivers/gpu/drm/rockchip/Kconfig"
@@ -336,7 +371,7 @@ source "drivers/gpu/drm/mxsfb/Kconfig"
source "drivers/gpu/drm/meson/Kconfig"
-source "drivers/gpu/drm/tinydrm/Kconfig"
+source "drivers/gpu/drm/tiny/Kconfig"
source "drivers/gpu/drm/pl111/Kconfig"
@@ -360,7 +395,6 @@ menuconfig DRM_LEGACY
bool "Enable legacy drivers (DANGEROUS)"
depends on DRM && MMU
select DRM_VM
- select DRM_ATI_PCIGART if PCI
help
Enable legacy DRI1 drivers. Those drivers expose unsafe and dangerous
APIs to user-space, which can be used to circumvent access
@@ -393,7 +427,7 @@ config DRM_R128
config DRM_I810
tristate "Intel I810"
- # !PREEMPT because of missing ioctl locking
+ # !PREEMPTION because of missing ioctl locking
depends on DRM && AGP && AGP_INTEL && (!PREEMPTION || BROKEN)
help
Choose this option if you have an Intel I810 graphics card. If M is
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 9f0d2ee35794..6493088a0fdd 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -5,7 +5,7 @@
drm-y := drm_auth.o drm_cache.o \
drm_file.o drm_gem.o drm_ioctl.o drm_irq.o \
- drm_memory.o drm_drv.o drm_pci.o \
+ drm_memory.o drm_drv.o \
drm_sysfs.o drm_hashtab.o drm_mm.o \
drm_crtc.o drm_fourcc.o drm_modes.o drm_edid.o \
drm_encoder_slave.o \
@@ -25,18 +25,20 @@ drm-$(CONFIG_DRM_VM) += drm_vm.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o
drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
drm-$(CONFIG_DRM_GEM_SHMEM_HELPER) += drm_gem_shmem_helper.o
-drm-$(CONFIG_DRM_ATI_PCIGART) += ati_pcigart.o
drm-$(CONFIG_DRM_PANEL) += drm_panel.o
drm-$(CONFIG_OF) += drm_of.o
drm-$(CONFIG_AGP) += drm_agpsupport.o
+drm-$(CONFIG_PCI) += drm_pci.o
drm-$(CONFIG_DEBUG_FS) += drm_debugfs.o drm_debugfs_crc.o
drm-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
drm_vram_helper-y := drm_gem_vram_helper.o \
- drm_vram_helper_common.o \
- drm_vram_mm_helper.o
+ drm_vram_helper_common.o
obj-$(CONFIG_DRM_VRAM_HELPER) += drm_vram_helper.o
+drm_ttm_helper-y := drm_gem_ttm_helper.o
+obj-$(CONFIG_DRM_TTM_HELPER) += drm_ttm_helper.o
+
drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_dsc.o drm_probe_helper.o \
drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \
drm_kms_helper_common.o drm_dp_dual_mode_helper.o \
@@ -55,6 +57,7 @@ obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
obj-$(CONFIG_DRM_DEBUG_SELFTEST) += selftests/
obj-$(CONFIG_DRM) += drm.o
+obj-$(CONFIG_DRM_MIPI_DBI) += drm_mipi_dbi.o
obj-$(CONFIG_DRM_MIPI_DSI) += drm_mipi_dsi.o
obj-$(CONFIG_DRM_PANEL_ORIENTATION_QUIRKS) += drm_panel_orientation_quirks.o
obj-y += arm/
@@ -62,7 +65,6 @@ obj-$(CONFIG_DRM_TTM) += ttm/
obj-$(CONFIG_DRM_SCHED) += scheduler/
obj-$(CONFIG_DRM_TDFX) += tdfx/
obj-$(CONFIG_DRM_R128) += r128/
-obj-$(CONFIG_HSA_AMD) += amd/amdkfd/
obj-$(CONFIG_DRM_RADEON)+= radeon/
obj-$(CONFIG_DRM_AMDGPU)+= amd/amdgpu/
obj-$(CONFIG_DRM_MGA) += mga/
@@ -111,7 +113,7 @@ obj-$(CONFIG_DRM_ARCPGU)+= arc/
obj-y += hisilicon/
obj-$(CONFIG_DRM_ZTE) += zte/
obj-$(CONFIG_DRM_MXSFB) += mxsfb/
-obj-$(CONFIG_DRM_TINYDRM) += tinydrm/
+obj-y += tiny/
obj-$(CONFIG_DRM_PL111) += pl111/
obj-$(CONFIG_DRM_TVE200) += tve200/
obj-$(CONFIG_DRM_XEN) += xen/
diff --git a/drivers/gpu/drm/amd/acp/Kconfig b/drivers/gpu/drm/amd/acp/Kconfig
index d968c2471412..13340f353ea8 100644
--- a/drivers/gpu/drm/amd/acp/Kconfig
+++ b/drivers/gpu/drm/amd/acp/Kconfig
@@ -1,12 +1,12 @@
-# SPDX-License-Identifier: GPL-2.0-only
+# SPDX-License-Identifier: MIT
menu "ACP (Audio CoProcessor) Configuration"
config DRM_AMD_ACP
- bool "Enable AMD Audio CoProcessor IP support"
- depends on DRM_AMDGPU
- select MFD_CORE
- select PM_GENERIC_DOMAINS if PM
- help
+ bool "Enable AMD Audio CoProcessor IP support"
+ depends on DRM_AMDGPU
+ select MFD_CORE
+ select PM_GENERIC_DOMAINS if PM
+ help
Choose this option to enable ACP IP support for AMD SOCs.
This adds the ACP (Audio CoProcessor) IP driver and wires
it up into the amdgpu driver. The ACP block provides the DMA
diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig
index f6e5c0282fc1..9375e7f12420 100644
--- a/drivers/gpu/drm/amd/amdgpu/Kconfig
+++ b/drivers/gpu/drm/amd/amdgpu/Kconfig
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0-only
+# SPDX-License-Identifier: MIT
config DRM_AMDGPU_SI
bool "Enable amdgpu support for SI parts"
depends on DRM_AMDGPU
@@ -27,7 +27,9 @@ config DRM_AMDGPU_CIK
config DRM_AMDGPU_USERPTR
bool "Always enable userptr write support"
depends on DRM_AMDGPU
- depends on HMM_MIRROR
+ depends on MMU
+ select HMM_MIRROR
+ select MMU_NOTIFIER
help
This option selects CONFIG_HMM and CONFIG_HMM_MIRROR if it
isn't already selected to enabled full userptr support.
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 56e084367b93..c2bbcdd9c875 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -53,8 +53,9 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \
amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o \
- amdgpu_gmc.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \
- amdgpu_vm_sdma.o amdgpu_discovery.o
+ amdgpu_gmc.o amdgpu_mmhub.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \
+ amdgpu_vm_sdma.o amdgpu_discovery.o amdgpu_ras_eeprom.o amdgpu_nbio.o \
+ amdgpu_umc.o smu_v11_0_i2c.o
amdgpu-$(CONFIG_PERF_EVENTS) += amdgpu_pmu.o
@@ -66,7 +67,8 @@ amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce
amdgpu-y += \
vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o \
- vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o navi10_reg_init.o
+ vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o navi10_reg_init.o navi14_reg_init.o \
+ arct_reg_init.o navi12_reg_init.o mxgpu_nv.o
# add DF block
amdgpu-y += \
@@ -77,9 +79,13 @@ amdgpu-y += \
amdgpu-y += \
gmc_v7_0.o \
gmc_v8_0.o \
- gfxhub_v1_0.o mmhub_v1_0.o gmc_v9_0.o gfxhub_v1_1.o \
+ gfxhub_v1_0.o mmhub_v1_0.o gmc_v9_0.o gfxhub_v1_1.o mmhub_v9_4.o \
gfxhub_v2_0.o mmhub_v2_0.o gmc_v10_0.o
+# add UMC block
+amdgpu-y += \
+ umc_v6_1.o umc_v6_0.o
+
# add IH block
amdgpu-y += \
amdgpu_irq.o \
@@ -95,7 +101,8 @@ amdgpu-y += \
amdgpu_psp.o \
psp_v3_1.o \
psp_v10_0.o \
- psp_v11_0.o
+ psp_v11_0.o \
+ psp_v12_0.o
# add SMC block
amdgpu-y += \
@@ -113,6 +120,7 @@ amdgpu-y += \
amdgpu_rlc.o \
gfx_v8_0.o \
gfx_v9_0.o \
+ gfx_v9_4.o \
gfx_v10_0.o
# add async DMA block
@@ -140,14 +148,20 @@ amdgpu-y += \
vce_v3_0.o \
vce_v4_0.o
-# add VCN block
+# add VCN and JPEG block
amdgpu-y += \
amdgpu_vcn.o \
vcn_v1_0.o \
- vcn_v2_0.o
+ vcn_v2_0.o \
+ vcn_v2_5.o \
+ amdgpu_jpeg.o \
+ jpeg_v1_0.o \
+ jpeg_v2_0.o \
+ jpeg_v2_5.o
# add ATHUB block
amdgpu-y += \
+ athub_v1_0.o \
athub_v2_0.o
# add amdkfd interfaces
@@ -162,6 +176,7 @@ amdgpu-y += \
amdgpu_amdkfd_gpuvm.o \
amdgpu_amdkfd_gfx_v8.o \
amdgpu_amdkfd_gfx_v9.o \
+ amdgpu_amdkfd_arcturus.o \
amdgpu_amdkfd_gfx_v10.o
ifneq ($(CONFIG_DRM_AMDGPU_CIK),)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 8199d201b43a..da3bcff61b97 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -69,10 +69,12 @@
#include "amdgpu_uvd.h"
#include "amdgpu_vce.h"
#include "amdgpu_vcn.h"
+#include "amdgpu_jpeg.h"
#include "amdgpu_mn.h"
#include "amdgpu_gmc.h"
#include "amdgpu_gfx.h"
#include "amdgpu_sdma.h"
+#include "amdgpu_nbio.h"
#include "amdgpu_dm.h"
#include "amdgpu_virt.h"
#include "amdgpu_csa.h"
@@ -86,6 +88,9 @@
#include "amdgpu_smu.h"
#include "amdgpu_discovery.h"
#include "amdgpu_mes.h"
+#include "amdgpu_umc.h"
+#include "amdgpu_mmhub.h"
+#include "amdgpu_df.h"
#define MAX_GPU_INSTANCE 16
@@ -104,6 +109,8 @@ struct amdgpu_mgpu_info
uint32_t num_apu;
};
+#define AMDGPU_MAX_TIMEOUT_PARAM_LENGTH 256
+
/*
* Modules parameters.
*/
@@ -120,6 +127,7 @@ extern int amdgpu_disp_priority;
extern int amdgpu_hw_i2c;
extern int amdgpu_pcie_gen2;
extern int amdgpu_msi;
+extern char amdgpu_lockup_timeout[AMDGPU_MAX_TIMEOUT_PARAM_LENGTH];
extern int amdgpu_dpm;
extern int amdgpu_fw_load_type;
extern int amdgpu_aspm;
@@ -133,6 +141,7 @@ extern int amdgpu_vm_fragment_size;
extern int amdgpu_vm_fault_stop;
extern int amdgpu_vm_debug;
extern int amdgpu_vm_update_mode;
+extern int amdgpu_exp_hw_support;
extern int amdgpu_dc;
extern int amdgpu_sched_jobs;
extern int amdgpu_sched_hw_submission;
@@ -144,11 +153,7 @@ extern uint amdgpu_sdma_phase_quantum;
extern char *amdgpu_disable_cu;
extern char *amdgpu_virtual_display;
extern uint amdgpu_pp_feature_mask;
-extern int amdgpu_ngg;
-extern int amdgpu_prim_buf_per_se;
-extern int amdgpu_pos_buf_per_se;
-extern int amdgpu_cntl_sb_buf_per_se;
-extern int amdgpu_param_buf_per_se;
+extern uint amdgpu_force_long_training;
extern int amdgpu_job_hang_limit;
extern int amdgpu_lbpw;
extern int amdgpu_compute_multipipe;
@@ -165,6 +170,12 @@ extern int amdgpu_mcbp;
extern int amdgpu_discovery;
extern int amdgpu_mes;
extern int amdgpu_noretry;
+extern int amdgpu_force_asic_type;
+#ifdef CONFIG_HSA_AMD
+extern int sched_policy;
+#else
+static const int sched_policy = KFD_SCHED_POLICY_HWS;
+#endif
#ifdef CONFIG_DRM_AMDGPU_SI
extern int amdgpu_si_support;
@@ -281,6 +292,9 @@ struct amdgpu_ip_block_version {
const struct amd_ip_funcs *funcs;
};
+#define HW_REV(_Major, _Minor, _Rev) \
+ ((((uint32_t) (_Major)) << 16) | ((uint32_t) (_Minor) << 8) | ((uint32_t) (_Rev)))
+
struct amdgpu_ip_block {
struct amdgpu_ip_block_status status;
const struct amdgpu_ip_block_version *version;
@@ -423,7 +437,6 @@ struct amdgpu_fpriv {
};
int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv);
-int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev);
int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
unsigned size, struct amdgpu_ib *ib);
@@ -475,7 +488,6 @@ struct amdgpu_cs_parser {
uint64_t bytes_moved_vis_threshold;
uint64_t bytes_moved;
uint64_t bytes_moved_vis;
- struct amdgpu_bo_list_entry *evictable;
/* user fence */
struct amdgpu_bo_list_entry uf_entry;
@@ -532,6 +544,14 @@ struct amdgpu_allowed_register_entry {
bool grbm_indexed;
};
+enum amd_reset_method {
+ AMD_RESET_METHOD_LEGACY = 0,
+ AMD_RESET_METHOD_MODE0,
+ AMD_RESET_METHOD_MODE1,
+ AMD_RESET_METHOD_MODE2,
+ AMD_RESET_METHOD_BACO
+};
+
/*
* ASIC specific functions.
*/
@@ -543,6 +563,7 @@ struct amdgpu_asic_funcs {
u32 sh_num, u32 reg_offset, u32 *value);
void (*set_vga_state)(struct amdgpu_device *adev, bool state);
int (*reset)(struct amdgpu_device *adev);
+ enum amd_reset_method (*reset_method)(struct amdgpu_device *adev);
/* get the reference clock */
u32 (*get_xclk)(struct amdgpu_device *adev);
/* MM block clocks */
@@ -569,6 +590,8 @@ struct amdgpu_asic_funcs {
bool (*need_reset_on_init)(struct amdgpu_device *adev);
/* PCIe replay counter */
uint64_t (*get_pcie_replay_count)(struct amdgpu_device *adev);
+ /* device supports BACO */
+ bool (*supports_baco)(struct amdgpu_device *adev);
};
/*
@@ -613,6 +636,10 @@ struct amdgpu_fw_vram_usage {
u64 size;
struct amdgpu_bo *reserved_bo;
void *va;
+
+ /* GDDR6 training support flag.
+ */
+ bool mem_train_support;
};
/*
@@ -627,91 +654,29 @@ void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device);
typedef uint32_t (*amdgpu_rreg_t)(struct amdgpu_device*, uint32_t);
typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
+typedef uint64_t (*amdgpu_rreg64_t)(struct amdgpu_device*, uint32_t);
+typedef void (*amdgpu_wreg64_t)(struct amdgpu_device*, uint32_t, uint64_t);
+
typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
-
-/*
- * amdgpu nbio functions
- *
- */
-struct nbio_hdp_flush_reg {
- u32 ref_and_mask_cp0;
- u32 ref_and_mask_cp1;
- u32 ref_and_mask_cp2;
- u32 ref_and_mask_cp3;
- u32 ref_and_mask_cp4;
- u32 ref_and_mask_cp5;
- u32 ref_and_mask_cp6;
- u32 ref_and_mask_cp7;
- u32 ref_and_mask_cp8;
- u32 ref_and_mask_cp9;
- u32 ref_and_mask_sdma0;
- u32 ref_and_mask_sdma1;
-};
-
struct amdgpu_mmio_remap {
u32 reg_offset;
resource_size_t bus_addr;
};
-struct amdgpu_nbio_funcs {
- const struct nbio_hdp_flush_reg *hdp_flush_reg;
- u32 (*get_hdp_flush_req_offset)(struct amdgpu_device *adev);
- u32 (*get_hdp_flush_done_offset)(struct amdgpu_device *adev);
- u32 (*get_pcie_index_offset)(struct amdgpu_device *adev);
- u32 (*get_pcie_data_offset)(struct amdgpu_device *adev);
- u32 (*get_rev_id)(struct amdgpu_device *adev);
- void (*mc_access_enable)(struct amdgpu_device *adev, bool enable);
- void (*hdp_flush)(struct amdgpu_device *adev, struct amdgpu_ring *ring);
- u32 (*get_memsize)(struct amdgpu_device *adev);
- void (*sdma_doorbell_range)(struct amdgpu_device *adev, int instance,
- bool use_doorbell, int doorbell_index, int doorbell_size);
- void (*vcn_doorbell_range)(struct amdgpu_device *adev, bool use_doorbell,
- int doorbell_index);
- void (*enable_doorbell_aperture)(struct amdgpu_device *adev,
- bool enable);
- void (*enable_doorbell_selfring_aperture)(struct amdgpu_device *adev,
- bool enable);
- void (*ih_doorbell_range)(struct amdgpu_device *adev,
- bool use_doorbell, int doorbell_index);
- void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev,
- bool enable);
- void (*update_medium_grain_light_sleep)(struct amdgpu_device *adev,
- bool enable);
- void (*get_clockgating_state)(struct amdgpu_device *adev,
- u32 *flags);
- void (*ih_control)(struct amdgpu_device *adev);
- void (*init_registers)(struct amdgpu_device *adev);
- void (*detect_hw_virt)(struct amdgpu_device *adev);
- void (*remap_hdp_registers)(struct amdgpu_device *adev);
-};
-
-struct amdgpu_df_funcs {
- void (*sw_init)(struct amdgpu_device *adev);
- void (*enable_broadcast_mode)(struct amdgpu_device *adev,
- bool enable);
- u32 (*get_fb_channel_number)(struct amdgpu_device *adev);
- u32 (*get_hbm_channel_number)(struct amdgpu_device *adev);
- void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev,
- bool enable);
- void (*get_clockgating_state)(struct amdgpu_device *adev,
- u32 *flags);
- void (*enable_ecc_force_par_wr_rmw)(struct amdgpu_device *adev,
- bool enable);
- int (*pmc_start)(struct amdgpu_device *adev, uint64_t config,
- int is_enable);
- int (*pmc_stop)(struct amdgpu_device *adev, uint64_t config,
- int is_disable);
- void (*pmc_get_count)(struct amdgpu_device *adev, uint64_t config,
- uint64_t *count);
-};
/* Define the HW IP blocks will be used in driver , add more if necessary */
enum amd_hw_ip_block_type {
GC_HWIP = 1,
HDP_HWIP,
SDMA0_HWIP,
SDMA1_HWIP,
+ SDMA2_HWIP,
+ SDMA3_HWIP,
+ SDMA4_HWIP,
+ SDMA5_HWIP,
+ SDMA6_HWIP,
+ SDMA7_HWIP,
MMHUB_HWIP,
ATHUB_HWIP,
NBIO_HWIP,
@@ -719,6 +684,7 @@ enum amd_hw_ip_block_type {
MP1_HWIP,
UVD_HWIP,
VCN_HWIP = UVD_HWIP,
+ JPEG_HWIP = VCN_HWIP,
VCE_HWIP,
DF_HWIP,
DCE_HWIP,
@@ -728,10 +694,12 @@ enum amd_hw_ip_block_type {
NBIF_HWIP,
THM_HWIP,
CLK_HWIP,
+ UMC_HWIP,
+ RSMU_HWIP,
MAX_HWIP
};
-#define HWIP_MAX_INSTANCE 6
+#define HWIP_MAX_INSTANCE 8
struct amd_powerplay {
void *pp_handle;
@@ -758,7 +726,6 @@ struct amdgpu_device {
int usec_timeout;
const struct amdgpu_asic_funcs *asic_funcs;
bool shutdown;
- bool need_dma32;
bool need_swiotlb;
bool accel_working;
struct notifier_block acpi_nb;
@@ -783,6 +750,7 @@ struct amdgpu_device {
uint8_t *bios;
uint32_t bios_size;
struct amdgpu_bo *stolen_vga_memory;
+ struct amdgpu_bo *discovery_memory;
uint32_t bios_scratch_reg_offset;
uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH];
@@ -803,6 +771,8 @@ struct amdgpu_device {
amdgpu_wreg_t pcie_wreg;
amdgpu_rreg_t pciep_rreg;
amdgpu_wreg_t pciep_wreg;
+ amdgpu_rreg64_t pcie_rreg64;
+ amdgpu_wreg64_t pcie_wreg64;
/* protects concurrent UVD register access */
spinlock_t uvd_ctx_idx_lock;
amdgpu_rreg_t uvd_ctx_rreg;
@@ -836,6 +806,7 @@ struct amdgpu_device {
dma_addr_t dummy_page_addr;
struct amdgpu_vm_manager vm_manager;
struct amdgpu_vmhub vmhub[AMDGPU_MAX_VMHUBS];
+ unsigned num_vmhubs;
/* memory management */
struct amdgpu_mman mman;
@@ -888,6 +859,12 @@ struct amdgpu_device {
u32 cg_flags;
u32 pg_flags;
+ /* nbio */
+ struct amdgpu_nbio nbio;
+
+ /* mmhub */
+ struct amdgpu_mmhub mmhub;
+
/* gfx */
struct amdgpu_gfx gfx;
@@ -903,6 +880,9 @@ struct amdgpu_device {
/* vcn */
struct amdgpu_vcn vcn;
+ /* jpeg */
+ struct amdgpu_jpeg jpeg;
+
/* firmwares */
struct amdgpu_firmware firmware;
@@ -915,6 +895,9 @@ struct amdgpu_device {
/* KFD */
struct amdgpu_kfd_dev kfd;
+ /* UMC */
+ struct amdgpu_umc umc;
+
/* display related functionality */
struct amdgpu_display_manager dm;
@@ -925,6 +908,9 @@ struct amdgpu_device {
bool enable_mes;
struct amdgpu_mes mes;
+ /* df */
+ struct amdgpu_df df;
+
struct amdgpu_ip_block ip_blocks[AMDGPU_MAX_IP_NUM];
int num_ip_blocks;
struct mutex mn_lock;
@@ -938,9 +924,6 @@ struct amdgpu_device {
/* soc15 register offset based on ip, instance and segment */
uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE];
- const struct amdgpu_nbio_funcs *nbio_funcs;
- const struct amdgpu_df_funcs *df_funcs;
-
/* delayed work_func for deferring clockgating during resume */
struct delayed_work delayed_init_work;
@@ -965,14 +948,15 @@ struct amdgpu_device {
/* record last mm index being written through WREG32*/
unsigned long last_mm_index;
bool in_gpu_reset;
+ enum pp_mp1_state mp1_state;
struct mutex lock_reset;
struct amdgpu_doorbell_index doorbell_index;
+ struct mutex notifier_lock;
+
int asic_reset_res;
struct work_struct xgmi_reset_work;
- bool in_baco_reset;
-
long gfx_timeout;
long sdma_timeout;
long video_timeout;
@@ -980,6 +964,14 @@ struct amdgpu_device {
uint64_t unique_id;
uint64_t df_perfmon_config_assign_mask[AMDGPU_MAX_DF_PERFMONS];
+
+ /* device pstate */
+ int pstate;
+ /* enable runtime pm on the device */
+ bool runpm;
+
+ bool pm_sysfs_en;
+ bool ucode_sysfs_en;
};
static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
@@ -994,6 +986,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
void amdgpu_device_fini(struct amdgpu_device *adev);
int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev);
+void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
+ uint32_t *buf, size_t size, bool write);
uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
uint32_t acc_flags);
void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
@@ -1015,10 +1009,14 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
#define AMDGPU_REGS_IDX (1<<0)
#define AMDGPU_REGS_NO_KIQ (1<<1)
+#define AMDGPU_REGS_KIQ (1<<2)
#define RREG32_NO_KIQ(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ)
#define WREG32_NO_KIQ(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ)
+#define RREG32_KIQ(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_KIQ)
+#define WREG32_KIQ(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_KIQ)
+
#define RREG8(reg) amdgpu_mm_rreg8(adev, (reg))
#define WREG8(reg, v) amdgpu_mm_wreg8(adev, (reg), (v))
@@ -1033,6 +1031,8 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
#define WREG32_PCIE(reg, v) adev->pcie_wreg(adev, (reg), (v))
#define RREG32_PCIE_PORT(reg) adev->pciep_rreg(adev, (reg))
#define WREG32_PCIE_PORT(reg, v) adev->pciep_wreg(adev, (reg), (v))
+#define RREG64_PCIE(reg) adev->pcie_rreg64(adev, (reg))
+#define WREG64_PCIE(reg, v) adev->pcie_wreg64(adev, (reg), (v))
#define RREG32_SMC(reg) adev->smc_rreg(adev, (reg))
#define WREG32_SMC(reg, v) adev->smc_wreg(adev, (reg), (v))
#define RREG32_UVD_CTX(reg) adev->uvd_ctx_rreg(adev, (reg))
@@ -1093,6 +1093,7 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
*/
#define amdgpu_asic_set_vga_state(adev, state) (adev)->asic_funcs->set_vga_state((adev), (state))
#define amdgpu_asic_reset(adev) (adev)->asic_funcs->reset((adev))
+#define amdgpu_asic_reset_method(adev) (adev)->asic_funcs->reset_method((adev))
#define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev))
#define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d))
#define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec))
@@ -1110,6 +1111,9 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
#define amdgpu_asic_get_pcie_usage(adev, cnt0, cnt1) ((adev)->asic_funcs->get_pcie_usage((adev), (cnt0), (cnt1)))
#define amdgpu_asic_need_reset_on_init(adev) (adev)->asic_funcs->need_reset_on_init((adev))
#define amdgpu_asic_get_pcie_replay_count(adev) ((adev)->asic_funcs->get_pcie_replay_count((adev)))
+#define amdgpu_asic_supports_baco(adev) (adev)->asic_funcs->supports_baco((adev))
+
+#define amdgpu_inc_vram_lost(adev) atomic_inc(&((adev)->vram_lost_counter));
/* Common functions */
bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev);
@@ -1125,9 +1129,12 @@ void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
const u32 *registers,
const u32 array_size);
-bool amdgpu_device_is_px(struct drm_device *dev);
+bool amdgpu_device_supports_boco(struct drm_device *dev);
+bool amdgpu_device_supports_baco(struct drm_device *dev);
bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
struct amdgpu_device *peer_adev);
+int amdgpu_device_baco_enter(struct drm_device *dev);
+int amdgpu_device_baco_exit(struct drm_device *dev);
/* atpx handler */
#if defined(CONFIG_VGA_SWITCHEROO)
@@ -1165,8 +1172,8 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv);
void amdgpu_driver_postclose_kms(struct drm_device *dev,
struct drm_file *file_priv);
int amdgpu_device_ip_suspend(struct amdgpu_device *adev);
-int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon);
-int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon);
+int amdgpu_device_suspend(struct drm_device *dev, bool fbcon);
+int amdgpu_device_resume(struct drm_device *dev, bool fbcon);
u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe);
int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe);
void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index eba42c752bca..12247a32f9ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -189,7 +189,7 @@ static int acp_hw_init(void *handle)
u32 val = 0;
u32 count = 0;
struct device *dev;
- struct i2s_platform_data *i2s_pdata;
+ struct i2s_platform_data *i2s_pdata = NULL;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -231,20 +231,21 @@ static int acp_hw_init(void *handle)
adev->acp.acp_cell = kcalloc(ACP_DEVS, sizeof(struct mfd_cell),
GFP_KERNEL);
- if (adev->acp.acp_cell == NULL)
- return -ENOMEM;
+ if (adev->acp.acp_cell == NULL) {
+ r = -ENOMEM;
+ goto failure;
+ }
adev->acp.acp_res = kcalloc(5, sizeof(struct resource), GFP_KERNEL);
if (adev->acp.acp_res == NULL) {
- kfree(adev->acp.acp_cell);
- return -ENOMEM;
+ r = -ENOMEM;
+ goto failure;
}
i2s_pdata = kcalloc(3, sizeof(struct i2s_platform_data), GFP_KERNEL);
if (i2s_pdata == NULL) {
- kfree(adev->acp.acp_res);
- kfree(adev->acp.acp_cell);
- return -ENOMEM;
+ r = -ENOMEM;
+ goto failure;
}
switch (adev->asic_type) {
@@ -341,14 +342,14 @@ static int acp_hw_init(void *handle)
r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell,
ACP_DEVS);
if (r)
- return r;
+ goto failure;
for (i = 0; i < ACP_DEVS ; i++) {
dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev);
if (r) {
dev_err(dev, "Failed to add dev to genpd\n");
- return r;
+ goto failure;
}
}
@@ -367,7 +368,8 @@ static int acp_hw_init(void *handle)
break;
if (--count == 0) {
dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
- return -ETIMEDOUT;
+ r = -ETIMEDOUT;
+ goto failure;
}
udelay(100);
}
@@ -384,7 +386,8 @@ static int acp_hw_init(void *handle)
break;
if (--count == 0) {
dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
- return -ETIMEDOUT;
+ r = -ETIMEDOUT;
+ goto failure;
}
udelay(100);
}
@@ -393,6 +396,13 @@ static int acp_hw_init(void *handle)
val &= ~ACP_SOFT_RESET__SoftResetAud_MASK;
cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
return 0;
+
+failure:
+ kfree(i2s_pdata);
+ kfree(adev->acp.acp_res);
+ kfree(adev->acp.acp_cell);
+ kfree(adev->acp.acp_genpd);
+ return r;
}
/**
@@ -517,7 +527,7 @@ static int acp_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- bool enable = state == AMD_PG_STATE_GATE ? true : false;
+ bool enable = (state == AMD_PG_STATE_GATE);
if (adev->powerplay.pp_funcs &&
adev->powerplay.pp_funcs->set_powergating_by_smu)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 9fa4f25a3745..8609287620ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -63,40 +63,10 @@ void amdgpu_amdkfd_fini(void)
void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
{
- const struct kfd2kgd_calls *kfd2kgd;
-
- switch (adev->asic_type) {
-#ifdef CONFIG_DRM_AMDGPU_CIK
- case CHIP_KAVERI:
- case CHIP_HAWAII:
- kfd2kgd = amdgpu_amdkfd_gfx_7_get_functions();
- break;
-#endif
- case CHIP_CARRIZO:
- case CHIP_TONGA:
- case CHIP_FIJI:
- case CHIP_POLARIS10:
- case CHIP_POLARIS11:
- case CHIP_POLARIS12:
- case CHIP_VEGAM:
- kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions();
- break;
- case CHIP_VEGA10:
- case CHIP_VEGA12:
- case CHIP_VEGA20:
- case CHIP_RAVEN:
- kfd2kgd = amdgpu_amdkfd_gfx_9_0_get_functions();
- break;
- case CHIP_NAVI10:
- kfd2kgd = amdgpu_amdkfd_gfx_10_0_get_functions();
- break;
- default:
- dev_info(adev->dev, "kfd not supported on this ASIC\n");
- return;
- }
+ bool vf = amdgpu_sriov_vf(adev);
adev->kfd.dev = kgd2kfd_probe((struct kgd_dev *)adev,
- adev->pdev, kfd2kgd);
+ adev->pdev, adev->asic_type, vf);
if (adev->kfd.dev)
amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size;
@@ -160,14 +130,6 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
adev->gfx.mec.queue_bitmap,
KGD_MAX_QUEUES);
- /* remove the KIQ bit as well */
- if (adev->gfx.kiq.ring.sched.ready)
- clear_bit(amdgpu_gfx_mec_queue_to_bit(adev,
- adev->gfx.kiq.ring.me - 1,
- adev->gfx.kiq.ring.pipe,
- adev->gfx.kiq.ring.queue),
- gpu_resources.queue_bitmap);
-
/* According to linux/bitmap.h we shouldn't use bitmap_clear if
* nbits is not compile time constant
*/
@@ -197,7 +159,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
adev->doorbell_index.last_non_cp;
}
- kgd2kfd_device_init(adev->kfd.dev, &gpu_resources);
+ kgd2kfd_device_init(adev->kfd.dev, adev->ddev, &gpu_resources);
}
}
@@ -651,11 +613,9 @@ void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
- if (adev->powerplay.pp_funcs &&
- adev->powerplay.pp_funcs->switch_power_profile)
- amdgpu_dpm_switch_power_profile(adev,
- PP_SMC_POWER_PROFILE_COMPUTE,
- !idle);
+ amdgpu_dpm_switch_power_profile(adev,
+ PP_SMC_POWER_PROFILE_COMPUTE,
+ !idle);
}
bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid)
@@ -668,6 +628,38 @@ bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid)
return false;
}
+int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct kgd_dev *kgd, uint16_t vmid)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+
+ if (adev->family == AMDGPU_FAMILY_AI) {
+ int i;
+
+ for (i = 0; i < adev->num_vmhubs; i++)
+ amdgpu_gmc_flush_gpu_tlb(adev, vmid, i, 0);
+ } else {
+ amdgpu_gmc_flush_gpu_tlb(adev, vmid, AMDGPU_GFXHUB_0, 0);
+ }
+
+ return 0;
+}
+
+int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+ uint32_t flush_type = 0;
+ bool all_hub = false;
+
+ if (adev->gmc.xgmi.num_physical_nodes &&
+ adev->asic_type == CHIP_VEGA20)
+ flush_type = 2;
+
+ if (adev->family == AMDGPU_FAMILY_AI)
+ all_hub = true;
+
+ return amdgpu_gmc_flush_gpu_tlb_pasid(adev, pasid, flush_type, all_hub);
+}
+
bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd)
{
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
@@ -700,33 +692,14 @@ int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm)
return 0;
}
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void)
-{
- return NULL;
-}
-
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void)
-{
- return NULL;
-}
-
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void)
-{
- return NULL;
-}
-
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_10_0_get_functions(void)
-{
- return NULL;
-}
-
struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev,
- const struct kfd2kgd_calls *f2g)
+ unsigned int asic_type, bool vf)
{
return NULL;
}
bool kgd2kfd_device_init(struct kfd_dev *kfd,
+ struct drm_device *ddev,
const struct kgd2kfd_shared_resources *gpu_resources)
{
return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index b6076d19e442..47b0f2957d1f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -57,7 +57,7 @@ struct kgd_mem {
unsigned int mapped_to_gpu_memory;
uint64_t va;
- uint32_t mapping_flags;
+ uint32_t alloc_flags;
atomic_t invalid;
struct amdkfd_process_info *process_info;
@@ -136,11 +136,8 @@ int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
uint32_t *ib_cmd, uint32_t ib_len);
void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle);
bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd);
-
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void);
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void);
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void);
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_10_0_get_functions(void);
+int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct kgd_dev *kgd, uint16_t vmid);
+int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid);
bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid);
@@ -178,10 +175,17 @@ uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev *kgd);
uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd);
uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src);
+/* Read user wptr from a specified user address space with page fault
+ * disabled. The memory must be pinned and mapped to the hardware when
+ * this is called in hqd_load functions, so it should never fault in
+ * the first place. This resolves a circular lock dependency involving
+ * four locks, including the DQM lock and mmap_sem.
+ */
#define read_user_wptr(mmptr, wptr, dst) \
({ \
bool valid = false; \
if ((mmptr) && (wptr)) { \
+ pagefault_disable(); \
if ((mmptr) == current->mm) { \
valid = !get_user((dst), (wptr)); \
} else if (current->mm == NULL) { \
@@ -189,6 +193,7 @@ uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *s
valid = !get_user((dst), (wptr)); \
unuse_mm(mmptr); \
} \
+ pagefault_enable(); \
} \
valid; \
})
@@ -239,8 +244,9 @@ void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo);
int kgd2kfd_init(void);
void kgd2kfd_exit(void);
struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev,
- const struct kfd2kgd_calls *f2g);
+ unsigned int asic_type, bool vf);
bool kgd2kfd_device_init(struct kfd_dev *kfd,
+ struct drm_device *ddev,
const struct kgd2kfd_shared_resources *gpu_resources);
void kgd2kfd_device_exit(struct kfd_dev *kfd);
void kgd2kfd_suspend(struct kfd_dev *kfd);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
new file mode 100644
index 000000000000..4bcc175a149d
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
@@ -0,0 +1,325 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <linux/module.h>
+#include <linux/fdtable.h>
+#include <linux/uaccess.h>
+#include <linux/mmu_context.h>
+#include <linux/firmware.h>
+#include "amdgpu.h"
+#include "amdgpu_amdkfd.h"
+#include "sdma0/sdma0_4_2_2_offset.h"
+#include "sdma0/sdma0_4_2_2_sh_mask.h"
+#include "sdma1/sdma1_4_2_2_offset.h"
+#include "sdma1/sdma1_4_2_2_sh_mask.h"
+#include "sdma2/sdma2_4_2_2_offset.h"
+#include "sdma2/sdma2_4_2_2_sh_mask.h"
+#include "sdma3/sdma3_4_2_2_offset.h"
+#include "sdma3/sdma3_4_2_2_sh_mask.h"
+#include "sdma4/sdma4_4_2_2_offset.h"
+#include "sdma4/sdma4_4_2_2_sh_mask.h"
+#include "sdma5/sdma5_4_2_2_offset.h"
+#include "sdma5/sdma5_4_2_2_sh_mask.h"
+#include "sdma6/sdma6_4_2_2_offset.h"
+#include "sdma6/sdma6_4_2_2_sh_mask.h"
+#include "sdma7/sdma7_4_2_2_offset.h"
+#include "sdma7/sdma7_4_2_2_sh_mask.h"
+#include "v9_structs.h"
+#include "soc15.h"
+#include "soc15d.h"
+#include "amdgpu_amdkfd_gfx_v9.h"
+#include "gfxhub_v1_0.h"
+#include "mmhub_v9_4.h"
+
+#define HQD_N_REGS 56
+#define DUMP_REG(addr) do { \
+ if (WARN_ON_ONCE(i >= HQD_N_REGS)) \
+ break; \
+ (*dump)[i][0] = (addr) << 2; \
+ (*dump)[i++][1] = RREG32(addr); \
+ } while (0)
+
+static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
+{
+ return (struct amdgpu_device *)kgd;
+}
+
+static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd)
+{
+ return (struct v9_sdma_mqd *)mqd;
+}
+
+static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
+ unsigned int engine_id,
+ unsigned int queue_id)
+{
+ uint32_t sdma_engine_reg_base = 0;
+ uint32_t sdma_rlc_reg_offset;
+
+ switch (engine_id) {
+ default:
+ dev_warn(adev->dev,
+ "Invalid sdma engine id (%d), using engine id 0\n",
+ engine_id);
+ /* fall through */
+ case 0:
+ sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
+ mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
+ break;
+ case 1:
+ sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA1, 0,
+ mmSDMA1_RLC0_RB_CNTL) - mmSDMA1_RLC0_RB_CNTL;
+ break;
+ case 2:
+ sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA2, 0,
+ mmSDMA2_RLC0_RB_CNTL) - mmSDMA2_RLC0_RB_CNTL;
+ break;
+ case 3:
+ sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA3, 0,
+ mmSDMA3_RLC0_RB_CNTL) - mmSDMA3_RLC0_RB_CNTL;
+ break;
+ case 4:
+ sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA4, 0,
+ mmSDMA4_RLC0_RB_CNTL) - mmSDMA4_RLC0_RB_CNTL;
+ break;
+ case 5:
+ sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA5, 0,
+ mmSDMA5_RLC0_RB_CNTL) - mmSDMA5_RLC0_RB_CNTL;
+ break;
+ case 6:
+ sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA6, 0,
+ mmSDMA6_RLC0_RB_CNTL) - mmSDMA6_RLC0_RB_CNTL;
+ break;
+ case 7:
+ sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA7, 0,
+ mmSDMA7_RLC0_RB_CNTL) - mmSDMA7_RLC0_RB_CNTL;
+ break;
+ }
+
+ sdma_rlc_reg_offset = sdma_engine_reg_base
+ + queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL);
+
+ pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id,
+ queue_id, sdma_rlc_reg_offset);
+
+ return sdma_rlc_reg_offset;
+}
+
+static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
+ uint32_t __user *wptr, struct mm_struct *mm)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ struct v9_sdma_mqd *m;
+ uint32_t sdma_rlc_reg_offset;
+ unsigned long end_jiffies;
+ uint32_t data;
+ uint64_t data64;
+ uint64_t __user *wptr64 = (uint64_t __user *)wptr;
+
+ m = get_sdma_mqd(mqd);
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
+ m->sdma_queue_id);
+
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
+ m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
+
+ end_jiffies = msecs_to_jiffies(2000) + jiffies;
+ while (true) {
+ data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
+ if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
+ break;
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
+ return -ETIME;
+ }
+ usleep_range(500, 1000);
+ }
+
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL_OFFSET,
+ m->sdmax_rlcx_doorbell_offset);
+
+ data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
+ ENABLE, 1);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
+ m->sdmax_rlcx_rb_rptr);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI,
+ m->sdmax_rlcx_rb_rptr_hi);
+
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
+ if (read_user_wptr(mm, wptr64, data64)) {
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
+ lower_32_bits(data64));
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
+ upper_32_bits(data64));
+ } else {
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
+ m->sdmax_rlcx_rb_rptr);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
+ m->sdmax_rlcx_rb_rptr_hi);
+ }
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
+
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
+ m->sdmax_rlcx_rb_base_hi);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
+ m->sdmax_rlcx_rb_rptr_addr_lo);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
+ m->sdmax_rlcx_rb_rptr_addr_hi);
+
+ data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
+ RB_ENABLE, 1);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
+
+ return 0;
+}
+
+static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
+ uint32_t engine_id, uint32_t queue_id,
+ uint32_t (**dump)[2], uint32_t *n_regs)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev,
+ engine_id, queue_id);
+ uint32_t i = 0, reg;
+#undef HQD_N_REGS
+#define HQD_N_REGS (19+6+7+10)
+
+ *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
+ if (*dump == NULL)
+ return -ENOMEM;
+
+ for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
+ DUMP_REG(sdma_rlc_reg_offset + reg);
+ for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++)
+ DUMP_REG(sdma_rlc_reg_offset + reg);
+ for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN;
+ reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++)
+ DUMP_REG(sdma_rlc_reg_offset + reg);
+ for (reg = mmSDMA0_RLC0_MIDCMD_DATA0;
+ reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++)
+ DUMP_REG(sdma_rlc_reg_offset + reg);
+
+ WARN_ON_ONCE(i != HQD_N_REGS);
+ *n_regs = i;
+
+ return 0;
+}
+
+static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ struct v9_sdma_mqd *m;
+ uint32_t sdma_rlc_reg_offset;
+ uint32_t sdma_rlc_rb_cntl;
+
+ m = get_sdma_mqd(mqd);
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
+ m->sdma_queue_id);
+
+ sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
+
+ if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
+ return true;
+
+ return false;
+}
+
+static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
+ unsigned int utimeout)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ struct v9_sdma_mqd *m;
+ uint32_t sdma_rlc_reg_offset;
+ uint32_t temp;
+ unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
+
+ m = get_sdma_mqd(mqd);
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
+ m->sdma_queue_id);
+
+ temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
+ temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
+
+ while (true) {
+ temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
+ if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
+ break;
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
+ return -ETIME;
+ }
+ usleep_range(500, 1000);
+ }
+
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
+ RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
+ SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
+
+ m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
+ m->sdmax_rlcx_rb_rptr_hi =
+ RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI);
+
+ return 0;
+}
+
+static void kgd_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
+ uint64_t page_table_base)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+ if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
+ pr_err("trying to set page table base for wrong VMID %u\n",
+ vmid);
+ return;
+ }
+
+ mmhub_v9_4_setup_vm_pt_regs(adev, vmid, page_table_base);
+
+ gfxhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
+}
+
+const struct kfd2kgd_calls arcturus_kfd2kgd = {
+ .program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
+ .set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping,
+ .init_interrupts = kgd_gfx_v9_init_interrupts,
+ .hqd_load = kgd_gfx_v9_hqd_load,
+ .hiq_mqd_load = kgd_gfx_v9_hiq_mqd_load,
+ .hqd_sdma_load = kgd_hqd_sdma_load,
+ .hqd_dump = kgd_gfx_v9_hqd_dump,
+ .hqd_sdma_dump = kgd_hqd_sdma_dump,
+ .hqd_is_occupied = kgd_gfx_v9_hqd_is_occupied,
+ .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
+ .hqd_destroy = kgd_gfx_v9_hqd_destroy,
+ .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
+ .address_watch_disable = kgd_gfx_v9_address_watch_disable,
+ .address_watch_execute = kgd_gfx_v9_address_watch_execute,
+ .wave_control_execute = kgd_gfx_v9_wave_control_execute,
+ .address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset,
+ .get_atc_vmid_pasid_mapping_info =
+ kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
+ .get_tile_config = kgd_gfx_v9_get_tile_config,
+ .set_vm_context_page_table_base = kgd_set_vm_context_page_table_base,
+ .get_hive_id = amdgpu_amdkfd_get_hive_id,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
index 0723f800e815..a7b17c8deb00 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
@@ -19,19 +19,9 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#undef pr_fmt
-#define pr_fmt(fmt) "kfd2kgd: " fmt
-
-#include <linux/module.h>
-#include <linux/fdtable.h>
-#include <linux/uaccess.h>
-#include <linux/firmware.h>
#include <linux/mmu_context.h>
-#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
-#include "amdgpu_ucode.h"
-#include "soc15_hw_ip.h"
#include "gc/gc_10_1_0_offset.h"
#include "gc/gc_10_1_0_sh_mask.h"
#include "navi10_enum.h"
@@ -43,6 +33,7 @@
#include "v10_structs.h"
#include "nv.h"
#include "nvd.h"
+#include "gfxhub_v2_0.h"
enum hqd_dequeue_request_type {
NO_ACTION = 0,
@@ -51,63 +42,6 @@ enum hqd_dequeue_request_type {
SAVE_WAVES
};
-/*
- * Register access functions
- */
-
-static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
- uint32_t sh_mem_config,
- uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit,
- uint32_t sh_mem_bases);
-static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
- unsigned int vmid);
-static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
-static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
- uint32_t queue_id, uint32_t __user *wptr,
- uint32_t wptr_shift, uint32_t wptr_mask,
- struct mm_struct *mm);
-static int kgd_hqd_dump(struct kgd_dev *kgd,
- uint32_t pipe_id, uint32_t queue_id,
- uint32_t (**dump)[2], uint32_t *n_regs);
-static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
- uint32_t __user *wptr, struct mm_struct *mm);
-static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
- uint32_t engine_id, uint32_t queue_id,
- uint32_t (**dump)[2], uint32_t *n_regs);
-static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
- uint32_t pipe_id, uint32_t queue_id);
-static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
-static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
- enum kfd_preempt_type reset_type,
- unsigned int utimeout, uint32_t pipe_id,
- uint32_t queue_id);
-static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
- unsigned int utimeout);
-#if 0
-static uint32_t get_watch_base_addr(struct amdgpu_device *adev);
-#endif
-static int kgd_address_watch_disable(struct kgd_dev *kgd);
-static int kgd_address_watch_execute(struct kgd_dev *kgd,
- unsigned int watch_point_id,
- uint32_t cntl_val,
- uint32_t addr_hi,
- uint32_t addr_lo);
-static int kgd_wave_control_execute(struct kgd_dev *kgd,
- uint32_t gfx_index_val,
- uint32_t sq_cmd);
-static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
- unsigned int watch_point_id,
- unsigned int reg_offset);
-
-static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
- uint8_t vmid);
-static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
- uint8_t vmid);
-static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
- uint64_t page_table_base);
-static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid);
-static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid);
-
/* Because of REG_GET_FIELD() being used, we put this function in the
* asic specific file.
*/
@@ -140,37 +74,6 @@ static int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
return 0;
}
-static const struct kfd2kgd_calls kfd2kgd = {
- .program_sh_mem_settings = kgd_program_sh_mem_settings,
- .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
- .init_interrupts = kgd_init_interrupts,
- .hqd_load = kgd_hqd_load,
- .hqd_sdma_load = kgd_hqd_sdma_load,
- .hqd_dump = kgd_hqd_dump,
- .hqd_sdma_dump = kgd_hqd_sdma_dump,
- .hqd_is_occupied = kgd_hqd_is_occupied,
- .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
- .hqd_destroy = kgd_hqd_destroy,
- .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
- .address_watch_disable = kgd_address_watch_disable,
- .address_watch_execute = kgd_address_watch_execute,
- .wave_control_execute = kgd_wave_control_execute,
- .address_watch_get_offset = kgd_address_watch_get_offset,
- .get_atc_vmid_pasid_mapping_pasid =
- get_atc_vmid_pasid_mapping_pasid,
- .get_atc_vmid_pasid_mapping_valid =
- get_atc_vmid_pasid_mapping_valid,
- .invalidate_tlbs = invalidate_tlbs,
- .invalidate_tlbs_vmid = invalidate_tlbs_vmid,
- .set_vm_context_page_table_base = set_vm_context_page_table_base,
- .get_tile_config = amdgpu_amdkfd_get_tile_config,
-};
-
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_10_0_get_functions()
-{
- return (struct kfd2kgd_calls *)&kfd2kgd;
-}
-
static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
{
return (struct amdgpu_device *)kgd;
@@ -204,13 +107,13 @@ static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
lock_srbm(kgd, mec, pipe, queue_id, 0);
}
-static uint32_t get_queue_mask(struct amdgpu_device *adev,
+static uint64_t get_queue_mask(struct amdgpu_device *adev,
uint32_t pipe_id, uint32_t queue_id)
{
- unsigned int bit = (pipe_id * adev->gfx.mec.num_queue_per_pipe +
- queue_id) & 31;
+ unsigned int bit = pipe_id * adev->gfx.mec.num_queue_per_pipe +
+ queue_id;
- return ((uint32_t)1) << bit;
+ return 1ull << bit;
}
static void release_queue(struct kgd_dev *kgd)
@@ -251,11 +154,6 @@ static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
ATC_VMID0_PASID_MAPPING__VALID_MASK;
pr_debug("pasid 0x%x vmid %d, reg value %x\n", pasid, vmid, pasid_mapping);
- /*
- * need to do this twice, once for gfx and once for mmhub
- * for ATC add 16 to VMID for mmhub, for IH different registers.
- * ATC_VMID0..15 registers are separate from ATC_VMID16..31.
- */
pr_debug("ATHUB, reg %x\n", SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid);
WREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid,
@@ -307,11 +205,11 @@ static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
return 0;
}
-static uint32_t get_sdma_base_addr(struct amdgpu_device *adev,
+static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
unsigned int engine_id,
unsigned int queue_id)
{
- uint32_t base[2] = {
+ uint32_t sdma_engine_reg_base[2] = {
SOC15_REG_OFFSET(SDMA0, 0,
mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL,
/* On gfx10, mmSDMA1_xxx registers are defined NOT based
@@ -323,12 +221,12 @@ static uint32_t get_sdma_base_addr(struct amdgpu_device *adev,
SOC15_REG_OFFSET(SDMA1, 0,
mmSDMA1_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL
};
- uint32_t retval;
- retval = base[engine_id] + queue_id * (mmSDMA0_RLC1_RB_CNTL -
- mmSDMA0_RLC0_RB_CNTL);
+ uint32_t retval = sdma_engine_reg_base[engine_id]
+ + queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL);
- pr_debug("sdma base address: 0x%x\n", retval);
+ pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id,
+ queue_id, retval);
return retval;
}
@@ -370,21 +268,6 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
pr_debug("Load hqd of pipe %d queue %d\n", pipe_id, queue_id);
acquire_queue(kgd, pipe_id, queue_id);
- /* HIQ is set during driver init period with vmid set to 0*/
- if (m->cp_hqd_vmid == 0) {
- uint32_t value, mec, pipe;
-
- mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
- pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
-
- pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
- mec, pipe, queue_id);
- value = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS));
- value = REG_SET_FIELD(value, RLC_CP_SCHEDULERS, scheduler1,
- ((mec << 5) | (pipe << 3) | queue_id | 0x80));
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS), value);
- }
-
/* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */
mqd_hqd = &m->cp_mqd_base_addr_lo;
hqd_base = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR);
@@ -434,9 +317,10 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
lower_32_bits((uint64_t)wptr));
WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI),
upper_32_bits((uint64_t)wptr));
- pr_debug("%s setting CP_PQ_WPTR_POLL_CNTL1 to %x\n", __func__, get_queue_mask(adev, pipe_id, queue_id));
+ pr_debug("%s setting CP_PQ_WPTR_POLL_CNTL1 to %x\n", __func__,
+ (uint32_t)get_queue_mask(adev, pipe_id, queue_id));
WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1),
- get_queue_mask(adev, pipe_id, queue_id));
+ (uint32_t)get_queue_mask(adev, pipe_id, queue_id));
}
/* Start the EOP fetcher */
@@ -452,6 +336,59 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
return 0;
}
+static int kgd_hiq_mqd_load(struct kgd_dev *kgd, void *mqd,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t doorbell_off)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
+ struct v10_compute_mqd *m;
+ uint32_t mec, pipe;
+ int r;
+
+ m = get_mqd(mqd);
+
+ acquire_queue(kgd, pipe_id, queue_id);
+
+ mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
+ pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
+
+ pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
+ mec, pipe, queue_id);
+
+ spin_lock(&adev->gfx.kiq.ring_lock);
+ r = amdgpu_ring_alloc(kiq_ring, 7);
+ if (r) {
+ pr_err("Failed to alloc KIQ (%d).\n", r);
+ goto out_unlock;
+ }
+
+ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
+ amdgpu_ring_write(kiq_ring,
+ PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
+ PACKET3_MAP_QUEUES_VMID(m->cp_hqd_vmid) | /* VMID */
+ PACKET3_MAP_QUEUES_QUEUE(queue_id) |
+ PACKET3_MAP_QUEUES_PIPE(pipe) |
+ PACKET3_MAP_QUEUES_ME((mec - 1)) |
+ PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
+ PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
+ PACKET3_MAP_QUEUES_ENGINE_SEL(1) | /* engine_sel: hiq */
+ PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
+ amdgpu_ring_write(kiq_ring,
+ PACKET3_MAP_QUEUES_DOORBELL_OFFSET(doorbell_off));
+ amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_lo);
+ amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_hi);
+ amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_lo);
+ amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_hi);
+ amdgpu_ring_commit(kiq_ring);
+
+out_unlock:
+ spin_unlock(&adev->gfx.kiq.ring_lock);
+ release_queue(kgd);
+
+ return r;
+}
+
static int kgd_hqd_dump(struct kgd_dev *kgd,
uint32_t pipe_id, uint32_t queue_id,
uint32_t (**dump)[2], uint32_t *n_regs)
@@ -489,72 +426,67 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v10_sdma_mqd *m;
- uint32_t sdma_base_addr, sdmax_gfx_context_cntl;
+ uint32_t sdma_rlc_reg_offset;
unsigned long end_jiffies;
uint32_t data;
uint64_t data64;
uint64_t __user *wptr64 = (uint64_t __user *)wptr;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
m->sdma_queue_id);
- pr_debug("sdma load base addr %x for engine %d, queue %d\n", sdma_base_addr, m->sdma_engine_id, m->sdma_queue_id);
- sdmax_gfx_context_cntl = m->sdma_engine_id ?
- SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_GFX_CONTEXT_CNTL) :
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_CONTEXT_CNTL);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
end_jiffies = msecs_to_jiffies(2000) + jiffies;
while (true) {
- data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+ data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
- if (time_after(jiffies, end_jiffies))
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
return -ETIME;
+ }
usleep_range(500, 1000);
}
- data = RREG32(sdmax_gfx_context_cntl);
- data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL,
- RESUME_CTX, 0);
- WREG32(sdmax_gfx_context_cntl, data);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL_OFFSET,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL_OFFSET,
m->sdmax_rlcx_doorbell_offset);
data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
ENABLE, 1);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdmax_rlcx_rb_rptr);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
+ m->sdmax_rlcx_rb_rptr);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI,
m->sdmax_rlcx_rb_rptr_hi);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
if (read_user_wptr(mm, wptr64, data64)) {
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
lower_32_bits(data64));
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
upper_32_bits(data64));
} else {
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
m->sdmax_rlcx_rb_rptr);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
m->sdmax_rlcx_rb_rptr_hi);
}
- WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
m->sdmax_rlcx_rb_base_hi);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
m->sdmax_rlcx_rb_rptr_addr_lo);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
m->sdmax_rlcx_rb_rptr_addr_hi);
data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
RB_ENABLE, 1);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
return 0;
}
@@ -564,28 +496,26 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
uint32_t (**dump)[2], uint32_t *n_regs)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
- uint32_t sdma_base_addr = get_sdma_base_addr(adev, engine_id, queue_id);
+ uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev,
+ engine_id, queue_id);
uint32_t i = 0, reg;
#undef HQD_N_REGS
#define HQD_N_REGS (19+6+7+10)
- pr_debug("sdma dump engine id %d queue_id %d\n", engine_id, queue_id);
- pr_debug("sdma base addr %x\n", sdma_base_addr);
-
*dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
- DUMP_REG(sdma_base_addr + reg);
+ DUMP_REG(sdma_rlc_reg_offset + reg);
for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++)
- DUMP_REG(sdma_base_addr + reg);
+ DUMP_REG(sdma_rlc_reg_offset + reg);
for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN;
reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++)
- DUMP_REG(sdma_base_addr + reg);
+ DUMP_REG(sdma_rlc_reg_offset + reg);
for (reg = mmSDMA0_RLC0_MIDCMD_DATA0;
reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++)
- DUMP_REG(sdma_base_addr + reg);
+ DUMP_REG(sdma_rlc_reg_offset + reg);
WARN_ON_ONCE(i != HQD_N_REGS);
*n_regs = i;
@@ -619,14 +549,14 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v10_sdma_mqd *m;
- uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_reg_offset;
uint32_t sdma_rlc_rb_cntl;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
m->sdma_queue_id);
- sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+ sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
return true;
@@ -747,157 +677,52 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v10_sdma_mqd *m;
- uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_reg_offset;
uint32_t temp;
unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
m->sdma_queue_id);
- temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+ temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
while (true) {
- temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+ temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
- if (time_after(jiffies, end_jiffies))
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
return -ETIME;
+ }
usleep_range(500, 1000);
}
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
- RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
+ RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
- m->sdmax_rlcx_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR);
+ m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
m->sdmax_rlcx_rb_rptr_hi =
- RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI);
+ RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI);
return 0;
}
-static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
- uint8_t vmid)
+static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
+ uint8_t vmid, uint16_t *p_pasid)
{
- uint32_t reg;
+ uint32_t value;
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
- reg = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
+ value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
+ vmid);
- return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
-}
-
-static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
- uint8_t vmid)
-{
- uint32_t reg;
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
-
- reg = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
- + vmid);
- return reg & ATC_VMID0_PASID_MAPPING__PASID_MASK;
-}
-
-static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
- uint32_t req = (1 << vmid) |
- (0 << GCVM_INVALIDATE_ENG0_REQ__FLUSH_TYPE__SHIFT) |/* legacy */
- GCVM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PTES_MASK |
- GCVM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE0_MASK |
- GCVM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE1_MASK |
- GCVM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE2_MASK |
- GCVM_INVALIDATE_ENG0_REQ__INVALIDATE_L1_PTES_MASK;
-
- mutex_lock(&adev->srbm_mutex);
-
- /* Use light weight invalidation.
- *
- * TODO 1: agree on the right set of invalidation registers for
- * KFD use. Use the last one for now. Invalidate only GCHUB as
- * SDMA is now moved to GCHUB
- *
- * TODO 2: support range-based invalidation, requires kfg2kgd
- * interface change
- */
- WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32),
- 0xffffffff);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_ADDR_RANGE_HI32),
- 0x0000001f);
-
- WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_REQ), req);
-
- while (!(RREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_ACK)) &
- (1 << vmid)))
- cpu_relax();
-
- mutex_unlock(&adev->srbm_mutex);
-}
-
-static int invalidate_tlbs_with_kiq(struct amdgpu_device *adev, uint16_t pasid)
-{
- signed long r;
- uint32_t seq;
- struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
-
- spin_lock(&adev->gfx.kiq.ring_lock);
- amdgpu_ring_alloc(ring, 12); /* fence + invalidate_tlbs package*/
- amdgpu_ring_write(ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
- amdgpu_ring_write(ring,
- PACKET3_INVALIDATE_TLBS_DST_SEL(1) |
- PACKET3_INVALIDATE_TLBS_PASID(pasid));
- amdgpu_fence_emit_polling(ring, &seq);
- amdgpu_ring_commit(ring);
- spin_unlock(&adev->gfx.kiq.ring_lock);
-
- r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
- if (r < 1) {
- DRM_ERROR("wait for kiq fence error: %ld.\n", r);
- return -ETIME;
- }
-
- return 0;
-}
+ *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
-static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
- int vmid;
- struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
-
- if (amdgpu_emu_mode == 0 && ring->sched.ready)
- return invalidate_tlbs_with_kiq(adev, pasid);
-
- for (vmid = 0; vmid < 16; vmid++) {
- if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid))
- continue;
- if (get_atc_vmid_pasid_mapping_valid(kgd, vmid)) {
- if (get_atc_vmid_pasid_mapping_pasid(kgd, vmid)
- == pasid) {
- write_vmid_invalidate_request(kgd, vmid);
- break;
- }
- }
- }
-
- return 0;
-}
-
-static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
-
- if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
- pr_err("non kfd vmid %d\n", vmid);
- return 0;
- }
-
- write_vmid_invalidate_request(kgd, vmid);
- return 0;
+ return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
}
static int kgd_address_watch_disable(struct kgd_dev *kgd)
@@ -950,7 +775,6 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
uint64_t page_table_base)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
- uint64_t base = page_table_base | AMDGPU_PTE_VALID;
if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
pr_err("trying to set page table base for wrong VMID %u\n",
@@ -958,18 +782,30 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
return;
}
- /* TODO: take advantage of per-process address space size. For
- * now, all processes share the same address space size, like
- * on GFX8 and older.
- */
- WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32) + (vmid*2), 0);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32) + (vmid*2), 0);
-
- WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32) + (vmid*2),
- lower_32_bits(adev->vm_manager.max_pfn - 1));
- WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32) + (vmid*2),
- upper_32_bits(adev->vm_manager.max_pfn - 1));
-
- WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32) + (vmid*2), lower_32_bits(base));
- WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32) + (vmid*2), upper_32_bits(base));
+ /* SDMA is on gfxhub as well for Navi1* series */
+ gfxhub_v2_0_setup_vm_pt_regs(adev, vmid, page_table_base);
}
+
+const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
+ .program_sh_mem_settings = kgd_program_sh_mem_settings,
+ .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
+ .init_interrupts = kgd_init_interrupts,
+ .hqd_load = kgd_hqd_load,
+ .hiq_mqd_load = kgd_hiq_mqd_load,
+ .hqd_sdma_load = kgd_hqd_sdma_load,
+ .hqd_dump = kgd_hqd_dump,
+ .hqd_sdma_dump = kgd_hqd_sdma_dump,
+ .hqd_is_occupied = kgd_hqd_is_occupied,
+ .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
+ .hqd_destroy = kgd_hqd_destroy,
+ .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
+ .address_watch_disable = kgd_address_watch_disable,
+ .address_watch_execute = kgd_address_watch_execute,
+ .wave_control_execute = kgd_wave_control_execute,
+ .address_watch_get_offset = kgd_address_watch_get_offset,
+ .get_atc_vmid_pasid_mapping_info =
+ get_atc_vmid_pasid_mapping_info,
+ .get_tile_config = amdgpu_amdkfd_get_tile_config,
+ .set_vm_context_page_table_base = set_vm_context_page_table_base,
+ .get_hive_id = amdgpu_amdkfd_get_hive_id,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index 5f459bf5f622..8f052e98a3c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -20,8 +20,6 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <linux/fdtable.h>
-#include <linux/uaccess.h>
#include <linux/mmu_context.h>
#include "amdgpu.h"
@@ -86,65 +84,6 @@ union TCP_WATCH_CNTL_BITS {
float f32All;
};
-/*
- * Register access functions
- */
-
-static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
- uint32_t sh_mem_config, uint32_t sh_mem_ape1_base,
- uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases);
-
-static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
- unsigned int vmid);
-
-static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
-static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
- uint32_t queue_id, uint32_t __user *wptr,
- uint32_t wptr_shift, uint32_t wptr_mask,
- struct mm_struct *mm);
-static int kgd_hqd_dump(struct kgd_dev *kgd,
- uint32_t pipe_id, uint32_t queue_id,
- uint32_t (**dump)[2], uint32_t *n_regs);
-static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
- uint32_t __user *wptr, struct mm_struct *mm);
-static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
- uint32_t engine_id, uint32_t queue_id,
- uint32_t (**dump)[2], uint32_t *n_regs);
-static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
- uint32_t pipe_id, uint32_t queue_id);
-
-static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
- enum kfd_preempt_type reset_type,
- unsigned int utimeout, uint32_t pipe_id,
- uint32_t queue_id);
-static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
-static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
- unsigned int utimeout);
-static int kgd_address_watch_disable(struct kgd_dev *kgd);
-static int kgd_address_watch_execute(struct kgd_dev *kgd,
- unsigned int watch_point_id,
- uint32_t cntl_val,
- uint32_t addr_hi,
- uint32_t addr_lo);
-static int kgd_wave_control_execute(struct kgd_dev *kgd,
- uint32_t gfx_index_val,
- uint32_t sq_cmd);
-static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
- unsigned int watch_point_id,
- unsigned int reg_offset);
-
-static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, uint8_t vmid);
-static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
- uint8_t vmid);
-
-static void set_scratch_backing_va(struct kgd_dev *kgd,
- uint64_t va, uint32_t vmid);
-static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
- uint64_t page_table_base);
-static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid);
-static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid);
-static uint32_t read_vmid_from_vmfault_reg(struct kgd_dev *kgd);
-
/* Because of REG_GET_FIELD() being used, we put this function in the
* asic specific file.
*/
@@ -170,37 +109,6 @@ static int get_tile_config(struct kgd_dev *kgd,
return 0;
}
-static const struct kfd2kgd_calls kfd2kgd = {
- .program_sh_mem_settings = kgd_program_sh_mem_settings,
- .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
- .init_interrupts = kgd_init_interrupts,
- .hqd_load = kgd_hqd_load,
- .hqd_sdma_load = kgd_hqd_sdma_load,
- .hqd_dump = kgd_hqd_dump,
- .hqd_sdma_dump = kgd_hqd_sdma_dump,
- .hqd_is_occupied = kgd_hqd_is_occupied,
- .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
- .hqd_destroy = kgd_hqd_destroy,
- .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
- .address_watch_disable = kgd_address_watch_disable,
- .address_watch_execute = kgd_address_watch_execute,
- .wave_control_execute = kgd_wave_control_execute,
- .address_watch_get_offset = kgd_address_watch_get_offset,
- .get_atc_vmid_pasid_mapping_pasid = get_atc_vmid_pasid_mapping_pasid,
- .get_atc_vmid_pasid_mapping_valid = get_atc_vmid_pasid_mapping_valid,
- .set_scratch_backing_va = set_scratch_backing_va,
- .get_tile_config = get_tile_config,
- .set_vm_context_page_table_base = set_vm_context_page_table_base,
- .invalidate_tlbs = invalidate_tlbs,
- .invalidate_tlbs_vmid = invalidate_tlbs_vmid,
- .read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg,
-};
-
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void)
-{
- return (struct kfd2kgd_calls *)&kfd2kgd;
-}
-
static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
{
return (struct amdgpu_device *)kgd;
@@ -303,14 +211,15 @@ static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
return 0;
}
-static inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers *m)
+static inline uint32_t get_sdma_rlc_reg_offset(struct cik_sdma_rlc_registers *m)
{
uint32_t retval;
retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET +
m->sdma_queue_id * KFD_CIK_SDMA_QUEUE_OFFSET;
- pr_debug("sdma base address: 0x%x\n", retval);
+ pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n",
+ m->sdma_engine_id, m->sdma_queue_id, retval);
return retval;
}
@@ -413,60 +322,52 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct cik_sdma_rlc_registers *m;
unsigned long end_jiffies;
- uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_reg_offset;
uint32_t data;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(m);
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
m->sdma_rlc_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
end_jiffies = msecs_to_jiffies(2000) + jiffies;
while (true) {
- data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+ data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
- if (time_after(jiffies, end_jiffies))
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
return -ETIME;
+ }
usleep_range(500, 1000);
}
- if (m->sdma_engine_id) {
- data = RREG32(mmSDMA1_GFX_CONTEXT_CNTL);
- data = REG_SET_FIELD(data, SDMA1_GFX_CONTEXT_CNTL,
- RESUME_CTX, 0);
- WREG32(mmSDMA1_GFX_CONTEXT_CNTL, data);
- } else {
- data = RREG32(mmSDMA0_GFX_CONTEXT_CNTL);
- data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL,
- RESUME_CTX, 0);
- WREG32(mmSDMA0_GFX_CONTEXT_CNTL, data);
- }
data = REG_SET_FIELD(m->sdma_rlc_doorbell, SDMA0_RLC0_DOORBELL,
ENABLE, 1);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdma_rlc_rb_rptr);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
+ m->sdma_rlc_rb_rptr);
if (read_user_wptr(mm, wptr, data))
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, data);
else
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
m->sdma_rlc_rb_rptr);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_VIRTUAL_ADDR,
m->sdma_rlc_virtual_addr);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdma_rlc_rb_base);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdma_rlc_rb_base);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
m->sdma_rlc_rb_base_hi);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
m->sdma_rlc_rb_rptr_addr_lo);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
m->sdma_rlc_rb_rptr_addr_hi);
data = REG_SET_FIELD(m->sdma_rlc_rb_cntl, SDMA0_RLC0_RB_CNTL,
RB_ENABLE, 1);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
return 0;
}
@@ -524,13 +425,13 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct cik_sdma_rlc_registers *m;
- uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_reg_offset;
uint32_t sdma_rlc_rb_cntl;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(m);
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
- sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+ sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
return true;
@@ -645,32 +546,34 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct cik_sdma_rlc_registers *m;
- uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_reg_offset;
uint32_t temp;
unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(m);
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
- temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+ temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
while (true) {
- temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+ temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
- if (time_after(jiffies, end_jiffies))
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
return -ETIME;
+ }
usleep_range(500, 1000);
}
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
- RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
+ RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
- m->sdma_rlc_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR);
+ m->sdma_rlc_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
return 0;
}
@@ -758,24 +661,16 @@ static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
return watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX + reg_offset];
}
-static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
- uint8_t vmid)
+static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
+ uint8_t vmid, uint16_t *p_pasid)
{
- uint32_t reg;
+ uint32_t value;
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
- reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
- return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
-}
-
-static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
- uint8_t vmid)
-{
- uint32_t reg;
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+ value = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
+ *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
- reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
- return reg & ATC_VMID0_PASID_MAPPING__PASID_MASK;
+ return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
}
static void set_scratch_backing_va(struct kgd_dev *kgd,
@@ -801,45 +696,6 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
lower_32_bits(page_table_base));
}
-static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
- int vmid;
- unsigned int tmp;
-
- if (adev->in_gpu_reset)
- return -EIO;
-
- for (vmid = 0; vmid < 16; vmid++) {
- if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid))
- continue;
-
- tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
- if ((tmp & ATC_VMID0_PASID_MAPPING__VALID_MASK) &&
- (tmp & ATC_VMID0_PASID_MAPPING__PASID_MASK) == pasid) {
- WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
- RREG32(mmVM_INVALIDATE_RESPONSE);
- break;
- }
- }
-
- return 0;
-}
-
-static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
-
- if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
- pr_err("non kfd vmid\n");
- return 0;
- }
-
- WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
- RREG32(mmVM_INVALIDATE_RESPONSE);
- return 0;
-}
-
/**
* read_vmid_from_vmfault_reg - read vmid from register
*
@@ -855,3 +711,26 @@ static uint32_t read_vmid_from_vmfault_reg(struct kgd_dev *kgd)
return REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
}
+
+const struct kfd2kgd_calls gfx_v7_kfd2kgd = {
+ .program_sh_mem_settings = kgd_program_sh_mem_settings,
+ .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
+ .init_interrupts = kgd_init_interrupts,
+ .hqd_load = kgd_hqd_load,
+ .hqd_sdma_load = kgd_hqd_sdma_load,
+ .hqd_dump = kgd_hqd_dump,
+ .hqd_sdma_dump = kgd_hqd_sdma_dump,
+ .hqd_is_occupied = kgd_hqd_is_occupied,
+ .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
+ .hqd_destroy = kgd_hqd_destroy,
+ .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
+ .address_watch_disable = kgd_address_watch_disable,
+ .address_watch_execute = kgd_address_watch_execute,
+ .wave_control_execute = kgd_wave_control_execute,
+ .address_watch_get_offset = kgd_address_watch_get_offset,
+ .get_atc_vmid_pasid_mapping_info = get_atc_vmid_pasid_mapping_info,
+ .set_scratch_backing_va = set_scratch_backing_va,
+ .get_tile_config = get_tile_config,
+ .set_vm_context_page_table_base = set_vm_context_page_table_base,
+ .read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index 6d2f61449606..19a10db93d68 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -20,9 +20,6 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <linux/module.h>
-#include <linux/fdtable.h>
-#include <linux/uaccess.h>
#include <linux/mmu_context.h>
#include "amdgpu.h"
@@ -44,62 +41,6 @@ enum hqd_dequeue_request_type {
RESET_WAVES
};
-/*
- * Register access functions
- */
-
-static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
- uint32_t sh_mem_config,
- uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit,
- uint32_t sh_mem_bases);
-static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
- unsigned int vmid);
-static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
-static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
- uint32_t queue_id, uint32_t __user *wptr,
- uint32_t wptr_shift, uint32_t wptr_mask,
- struct mm_struct *mm);
-static int kgd_hqd_dump(struct kgd_dev *kgd,
- uint32_t pipe_id, uint32_t queue_id,
- uint32_t (**dump)[2], uint32_t *n_regs);
-static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
- uint32_t __user *wptr, struct mm_struct *mm);
-static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
- uint32_t engine_id, uint32_t queue_id,
- uint32_t (**dump)[2], uint32_t *n_regs);
-static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
- uint32_t pipe_id, uint32_t queue_id);
-static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
-static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
- enum kfd_preempt_type reset_type,
- unsigned int utimeout, uint32_t pipe_id,
- uint32_t queue_id);
-static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
- unsigned int utimeout);
-static int kgd_address_watch_disable(struct kgd_dev *kgd);
-static int kgd_address_watch_execute(struct kgd_dev *kgd,
- unsigned int watch_point_id,
- uint32_t cntl_val,
- uint32_t addr_hi,
- uint32_t addr_lo);
-static int kgd_wave_control_execute(struct kgd_dev *kgd,
- uint32_t gfx_index_val,
- uint32_t sq_cmd);
-static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
- unsigned int watch_point_id,
- unsigned int reg_offset);
-
-static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
- uint8_t vmid);
-static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
- uint8_t vmid);
-static void set_scratch_backing_va(struct kgd_dev *kgd,
- uint64_t va, uint32_t vmid);
-static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
- uint64_t page_table_base);
-static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid);
-static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid);
-
/* Because of REG_GET_FIELD() being used, we put this function in the
* asic specific file.
*/
@@ -125,38 +66,6 @@ static int get_tile_config(struct kgd_dev *kgd,
return 0;
}
-static const struct kfd2kgd_calls kfd2kgd = {
- .program_sh_mem_settings = kgd_program_sh_mem_settings,
- .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
- .init_interrupts = kgd_init_interrupts,
- .hqd_load = kgd_hqd_load,
- .hqd_sdma_load = kgd_hqd_sdma_load,
- .hqd_dump = kgd_hqd_dump,
- .hqd_sdma_dump = kgd_hqd_sdma_dump,
- .hqd_is_occupied = kgd_hqd_is_occupied,
- .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
- .hqd_destroy = kgd_hqd_destroy,
- .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
- .address_watch_disable = kgd_address_watch_disable,
- .address_watch_execute = kgd_address_watch_execute,
- .wave_control_execute = kgd_wave_control_execute,
- .address_watch_get_offset = kgd_address_watch_get_offset,
- .get_atc_vmid_pasid_mapping_pasid =
- get_atc_vmid_pasid_mapping_pasid,
- .get_atc_vmid_pasid_mapping_valid =
- get_atc_vmid_pasid_mapping_valid,
- .set_scratch_backing_va = set_scratch_backing_va,
- .get_tile_config = get_tile_config,
- .set_vm_context_page_table_base = set_vm_context_page_table_base,
- .invalidate_tlbs = invalidate_tlbs,
- .invalidate_tlbs_vmid = invalidate_tlbs_vmid,
-};
-
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void)
-{
- return (struct kfd2kgd_calls *)&kfd2kgd;
-}
-
static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
{
return (struct amdgpu_device *)kgd;
@@ -260,13 +169,15 @@ static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
return 0;
}
-static inline uint32_t get_sdma_base_addr(struct vi_sdma_mqd *m)
+static inline uint32_t get_sdma_rlc_reg_offset(struct vi_sdma_mqd *m)
{
uint32_t retval;
retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET +
m->sdma_queue_id * KFD_VI_SDMA_QUEUE_OFFSET;
- pr_debug("sdma base address: 0x%x\n", retval);
+
+ pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n",
+ m->sdma_engine_id, m->sdma_queue_id, retval);
return retval;
}
@@ -398,59 +309,51 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct vi_sdma_mqd *m;
unsigned long end_jiffies;
- uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_reg_offset;
uint32_t data;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(m);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
end_jiffies = msecs_to_jiffies(2000) + jiffies;
while (true) {
- data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+ data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
- if (time_after(jiffies, end_jiffies))
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
return -ETIME;
+ }
usleep_range(500, 1000);
}
- if (m->sdma_engine_id) {
- data = RREG32(mmSDMA1_GFX_CONTEXT_CNTL);
- data = REG_SET_FIELD(data, SDMA1_GFX_CONTEXT_CNTL,
- RESUME_CTX, 0);
- WREG32(mmSDMA1_GFX_CONTEXT_CNTL, data);
- } else {
- data = RREG32(mmSDMA0_GFX_CONTEXT_CNTL);
- data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL,
- RESUME_CTX, 0);
- WREG32(mmSDMA0_GFX_CONTEXT_CNTL, data);
- }
data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
ENABLE, 1);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdmax_rlcx_rb_rptr);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
+ m->sdmax_rlcx_rb_rptr);
if (read_user_wptr(mm, wptr, data))
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, data);
else
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
m->sdmax_rlcx_rb_rptr);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_VIRTUAL_ADDR,
m->sdmax_rlcx_virtual_addr);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
m->sdmax_rlcx_rb_base_hi);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
m->sdmax_rlcx_rb_rptr_addr_lo);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
m->sdmax_rlcx_rb_rptr_addr_hi);
data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
RB_ENABLE, 1);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
return 0;
}
@@ -517,13 +420,13 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct vi_sdma_mqd *m;
- uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_reg_offset;
uint32_t sdma_rlc_rb_cntl;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(m);
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
- sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+ sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
return true;
@@ -641,54 +544,48 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct vi_sdma_mqd *m;
- uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_reg_offset;
uint32_t temp;
unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(m);
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
- temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+ temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
while (true) {
- temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+ temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
- if (time_after(jiffies, end_jiffies))
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
return -ETIME;
+ }
usleep_range(500, 1000);
}
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
- RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
+ RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
- m->sdmax_rlcx_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR);
+ m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
return 0;
}
-static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
- uint8_t vmid)
+static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
+ uint8_t vmid, uint16_t *p_pasid)
{
- uint32_t reg;
+ uint32_t value;
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
- reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
- return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
-}
-
-static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
- uint8_t vmid)
-{
- uint32_t reg;
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+ value = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
+ *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
- reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
- return reg & ATC_VMID0_PASID_MAPPING__PASID_MASK;
+ return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
}
static int kgd_address_watch_disable(struct kgd_dev *kgd)
@@ -760,41 +657,25 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
lower_32_bits(page_table_base));
}
-static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
- int vmid;
- unsigned int tmp;
-
- if (adev->in_gpu_reset)
- return -EIO;
-
- for (vmid = 0; vmid < 16; vmid++) {
- if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid))
- continue;
-
- tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
- if ((tmp & ATC_VMID0_PASID_MAPPING__VALID_MASK) &&
- (tmp & ATC_VMID0_PASID_MAPPING__PASID_MASK) == pasid) {
- WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
- RREG32(mmVM_INVALIDATE_RESPONSE);
- break;
- }
- }
-
- return 0;
-}
-
-static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
-
- if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
- pr_err("non kfd vmid %d\n", vmid);
- return -EINVAL;
- }
-
- WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
- RREG32(mmVM_INVALIDATE_RESPONSE);
- return 0;
-}
+const struct kfd2kgd_calls gfx_v8_kfd2kgd = {
+ .program_sh_mem_settings = kgd_program_sh_mem_settings,
+ .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
+ .init_interrupts = kgd_init_interrupts,
+ .hqd_load = kgd_hqd_load,
+ .hqd_sdma_load = kgd_hqd_sdma_load,
+ .hqd_dump = kgd_hqd_dump,
+ .hqd_sdma_dump = kgd_hqd_sdma_dump,
+ .hqd_is_occupied = kgd_hqd_is_occupied,
+ .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
+ .hqd_destroy = kgd_hqd_destroy,
+ .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
+ .address_watch_disable = kgd_address_watch_disable,
+ .address_watch_execute = kgd_address_watch_execute,
+ .wave_control_execute = kgd_wave_control_execute,
+ .address_watch_get_offset = kgd_address_watch_get_offset,
+ .get_atc_vmid_pasid_mapping_info =
+ get_atc_vmid_pasid_mapping_info,
+ .set_scratch_backing_va = set_scratch_backing_va,
+ .get_tile_config = get_tile_config,
+ .set_vm_context_page_table_base = set_vm_context_page_table_base,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index 85395f2d83a6..8562afe5b761 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -19,17 +19,10 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-
-#define pr_fmt(fmt) "kfd2kgd: " fmt
-
-#include <linux/module.h>
-#include <linux/fdtable.h>
-#include <linux/uaccess.h>
#include <linux/mmu_context.h>
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
-#include "soc15_hw_ip.h"
#include "gc/gc_9_0_offset.h"
#include "gc/gc_9_0_sh_mask.h"
#include "vega10_enum.h"
@@ -49,75 +42,17 @@
#include "gfxhub_v1_0.h"
-#define V9_PIPE_PER_MEC (4)
-#define V9_QUEUES_PER_PIPE_MEC (8)
-
enum hqd_dequeue_request_type {
NO_ACTION = 0,
DRAIN_PIPE,
RESET_WAVES
};
-/*
- * Register access functions
- */
-
-static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
- uint32_t sh_mem_config,
- uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit,
- uint32_t sh_mem_bases);
-static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
- unsigned int vmid);
-static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
-static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
- uint32_t queue_id, uint32_t __user *wptr,
- uint32_t wptr_shift, uint32_t wptr_mask,
- struct mm_struct *mm);
-static int kgd_hqd_dump(struct kgd_dev *kgd,
- uint32_t pipe_id, uint32_t queue_id,
- uint32_t (**dump)[2], uint32_t *n_regs);
-static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
- uint32_t __user *wptr, struct mm_struct *mm);
-static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
- uint32_t engine_id, uint32_t queue_id,
- uint32_t (**dump)[2], uint32_t *n_regs);
-static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
- uint32_t pipe_id, uint32_t queue_id);
-static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
-static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
- enum kfd_preempt_type reset_type,
- unsigned int utimeout, uint32_t pipe_id,
- uint32_t queue_id);
-static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
- unsigned int utimeout);
-static int kgd_address_watch_disable(struct kgd_dev *kgd);
-static int kgd_address_watch_execute(struct kgd_dev *kgd,
- unsigned int watch_point_id,
- uint32_t cntl_val,
- uint32_t addr_hi,
- uint32_t addr_lo);
-static int kgd_wave_control_execute(struct kgd_dev *kgd,
- uint32_t gfx_index_val,
- uint32_t sq_cmd);
-static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
- unsigned int watch_point_id,
- unsigned int reg_offset);
-
-static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
- uint8_t vmid);
-static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
- uint8_t vmid);
-static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
- uint64_t page_table_base);
-static void set_scratch_backing_va(struct kgd_dev *kgd,
- uint64_t va, uint32_t vmid);
-static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid);
-static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid);
/* Because of REG_GET_FIELD() being used, we put this function in the
* asic specific file.
*/
-static int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
+int kgd_gfx_v9_get_tile_config(struct kgd_dev *kgd,
struct tile_config *config)
{
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
@@ -135,39 +70,6 @@ static int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
return 0;
}
-static const struct kfd2kgd_calls kfd2kgd = {
- .program_sh_mem_settings = kgd_program_sh_mem_settings,
- .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
- .init_interrupts = kgd_init_interrupts,
- .hqd_load = kgd_hqd_load,
- .hqd_sdma_load = kgd_hqd_sdma_load,
- .hqd_dump = kgd_hqd_dump,
- .hqd_sdma_dump = kgd_hqd_sdma_dump,
- .hqd_is_occupied = kgd_hqd_is_occupied,
- .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
- .hqd_destroy = kgd_hqd_destroy,
- .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
- .address_watch_disable = kgd_address_watch_disable,
- .address_watch_execute = kgd_address_watch_execute,
- .wave_control_execute = kgd_wave_control_execute,
- .address_watch_get_offset = kgd_address_watch_get_offset,
- .get_atc_vmid_pasid_mapping_pasid =
- get_atc_vmid_pasid_mapping_pasid,
- .get_atc_vmid_pasid_mapping_valid =
- get_atc_vmid_pasid_mapping_valid,
- .set_scratch_backing_va = set_scratch_backing_va,
- .get_tile_config = amdgpu_amdkfd_get_tile_config,
- .set_vm_context_page_table_base = set_vm_context_page_table_base,
- .invalidate_tlbs = invalidate_tlbs,
- .invalidate_tlbs_vmid = invalidate_tlbs_vmid,
- .get_hive_id = amdgpu_amdkfd_get_hive_id,
-};
-
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void)
-{
- return (struct kfd2kgd_calls *)&kfd2kgd;
-}
-
static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
{
return (struct amdgpu_device *)kgd;
@@ -201,13 +103,13 @@ static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
lock_srbm(kgd, mec, pipe, queue_id, 0);
}
-static uint32_t get_queue_mask(struct amdgpu_device *adev,
+static uint64_t get_queue_mask(struct amdgpu_device *adev,
uint32_t pipe_id, uint32_t queue_id)
{
- unsigned int bit = (pipe_id * adev->gfx.mec.num_queue_per_pipe +
- queue_id) & 31;
+ unsigned int bit = pipe_id * adev->gfx.mec.num_queue_per_pipe +
+ queue_id;
- return ((uint32_t)1) << bit;
+ return 1ull << bit;
}
static void release_queue(struct kgd_dev *kgd)
@@ -215,7 +117,7 @@ static void release_queue(struct kgd_dev *kgd)
unlock_srbm(kgd);
}
-static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
+void kgd_gfx_v9_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
uint32_t sh_mem_config,
uint32_t sh_mem_ape1_base,
uint32_t sh_mem_ape1_limit,
@@ -232,7 +134,7 @@ static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
unlock_srbm(kgd);
}
-static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
+int kgd_gfx_v9_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
unsigned int vmid)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
@@ -293,7 +195,7 @@ static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
* but still works
*/
-static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
+int kgd_gfx_v9_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t mec;
@@ -313,22 +215,21 @@ static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
return 0;
}
-static uint32_t get_sdma_base_addr(struct amdgpu_device *adev,
+static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
unsigned int engine_id,
unsigned int queue_id)
{
- uint32_t base[2] = {
+ uint32_t sdma_engine_reg_base[2] = {
SOC15_REG_OFFSET(SDMA0, 0,
mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL,
SOC15_REG_OFFSET(SDMA1, 0,
mmSDMA1_RLC0_RB_CNTL) - mmSDMA1_RLC0_RB_CNTL
};
- uint32_t retval;
-
- retval = base[engine_id] + queue_id * (mmSDMA0_RLC1_RB_CNTL -
- mmSDMA0_RLC0_RB_CNTL);
+ uint32_t retval = sdma_engine_reg_base[engine_id]
+ + queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL);
- pr_debug("sdma base address: 0x%x\n", retval);
+ pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id,
+ queue_id, retval);
return retval;
}
@@ -343,7 +244,7 @@ static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd)
return (struct v9_sdma_mqd *)mqd;
}
-static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
+int kgd_gfx_v9_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
uint32_t queue_id, uint32_t __user *wptr,
uint32_t wptr_shift, uint32_t wptr_mask,
struct mm_struct *mm)
@@ -357,21 +258,6 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
acquire_queue(kgd, pipe_id, queue_id);
- /* HIQ is set during driver init period with vmid set to 0*/
- if (m->cp_hqd_vmid == 0) {
- uint32_t value, mec, pipe;
-
- mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
- pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
-
- pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
- mec, pipe, queue_id);
- value = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS));
- value = REG_SET_FIELD(value, RLC_CP_SCHEDULERS, scheduler1,
- ((mec << 5) | (pipe << 3) | queue_id | 0x80));
- WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS), value);
- }
-
/* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */
mqd_hqd = &m->cp_mqd_base_addr_lo;
hqd_base = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR);
@@ -422,7 +308,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI),
upper_32_bits((uintptr_t)wptr));
WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1),
- get_queue_mask(adev, pipe_id, queue_id));
+ (uint32_t)get_queue_mask(adev, pipe_id, queue_id));
}
/* Start the EOP fetcher */
@@ -438,7 +324,60 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
return 0;
}
-static int kgd_hqd_dump(struct kgd_dev *kgd,
+int kgd_gfx_v9_hiq_mqd_load(struct kgd_dev *kgd, void *mqd,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t doorbell_off)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
+ struct v9_mqd *m;
+ uint32_t mec, pipe;
+ int r;
+
+ m = get_mqd(mqd);
+
+ acquire_queue(kgd, pipe_id, queue_id);
+
+ mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
+ pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
+
+ pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
+ mec, pipe, queue_id);
+
+ spin_lock(&adev->gfx.kiq.ring_lock);
+ r = amdgpu_ring_alloc(kiq_ring, 7);
+ if (r) {
+ pr_err("Failed to alloc KIQ (%d).\n", r);
+ goto out_unlock;
+ }
+
+ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
+ amdgpu_ring_write(kiq_ring,
+ PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
+ PACKET3_MAP_QUEUES_VMID(m->cp_hqd_vmid) | /* VMID */
+ PACKET3_MAP_QUEUES_QUEUE(queue_id) |
+ PACKET3_MAP_QUEUES_PIPE(pipe) |
+ PACKET3_MAP_QUEUES_ME((mec - 1)) |
+ PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
+ PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
+ PACKET3_MAP_QUEUES_ENGINE_SEL(1) | /* engine_sel: hiq */
+ PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
+ amdgpu_ring_write(kiq_ring,
+ PACKET3_MAP_QUEUES_DOORBELL_OFFSET(doorbell_off));
+ amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_lo);
+ amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_hi);
+ amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_lo);
+ amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_hi);
+ amdgpu_ring_commit(kiq_ring);
+
+out_unlock:
+ spin_unlock(&adev->gfx.kiq.ring_lock);
+ release_queue(kgd);
+
+ return r;
+}
+
+int kgd_gfx_v9_hqd_dump(struct kgd_dev *kgd,
uint32_t pipe_id, uint32_t queue_id,
uint32_t (**dump)[2], uint32_t *n_regs)
{
@@ -475,71 +414,67 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v9_sdma_mqd *m;
- uint32_t sdma_base_addr, sdmax_gfx_context_cntl;
+ uint32_t sdma_rlc_reg_offset;
unsigned long end_jiffies;
uint32_t data;
uint64_t data64;
uint64_t __user *wptr64 = (uint64_t __user *)wptr;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
m->sdma_queue_id);
- sdmax_gfx_context_cntl = m->sdma_engine_id ?
- SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_GFX_CONTEXT_CNTL) :
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_CONTEXT_CNTL);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
end_jiffies = msecs_to_jiffies(2000) + jiffies;
while (true) {
- data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+ data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
- if (time_after(jiffies, end_jiffies))
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
return -ETIME;
+ }
usleep_range(500, 1000);
}
- data = RREG32(sdmax_gfx_context_cntl);
- data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL,
- RESUME_CTX, 0);
- WREG32(sdmax_gfx_context_cntl, data);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL_OFFSET,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL_OFFSET,
m->sdmax_rlcx_doorbell_offset);
data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
ENABLE, 1);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdmax_rlcx_rb_rptr);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
+ m->sdmax_rlcx_rb_rptr);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI,
m->sdmax_rlcx_rb_rptr_hi);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
if (read_user_wptr(mm, wptr64, data64)) {
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
lower_32_bits(data64));
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
upper_32_bits(data64));
} else {
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
m->sdmax_rlcx_rb_rptr);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
m->sdmax_rlcx_rb_rptr_hi);
}
- WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
m->sdmax_rlcx_rb_base_hi);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
m->sdmax_rlcx_rb_rptr_addr_lo);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
m->sdmax_rlcx_rb_rptr_addr_hi);
data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
RB_ENABLE, 1);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
return 0;
}
@@ -549,7 +484,8 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
uint32_t (**dump)[2], uint32_t *n_regs)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
- uint32_t sdma_base_addr = get_sdma_base_addr(adev, engine_id, queue_id);
+ uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev,
+ engine_id, queue_id);
uint32_t i = 0, reg;
#undef HQD_N_REGS
#define HQD_N_REGS (19+6+7+10)
@@ -559,15 +495,15 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
return -ENOMEM;
for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
- DUMP_REG(sdma_base_addr + reg);
+ DUMP_REG(sdma_rlc_reg_offset + reg);
for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++)
- DUMP_REG(sdma_base_addr + reg);
+ DUMP_REG(sdma_rlc_reg_offset + reg);
for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN;
reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++)
- DUMP_REG(sdma_base_addr + reg);
+ DUMP_REG(sdma_rlc_reg_offset + reg);
for (reg = mmSDMA0_RLC0_MIDCMD_DATA0;
reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++)
- DUMP_REG(sdma_base_addr + reg);
+ DUMP_REG(sdma_rlc_reg_offset + reg);
WARN_ON_ONCE(i != HQD_N_REGS);
*n_regs = i;
@@ -575,7 +511,7 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
return 0;
}
-static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
+bool kgd_gfx_v9_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
uint32_t pipe_id, uint32_t queue_id)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
@@ -601,14 +537,14 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v9_sdma_mqd *m;
- uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_reg_offset;
uint32_t sdma_rlc_rb_cntl;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
m->sdma_queue_id);
- sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+ sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
return true;
@@ -616,7 +552,7 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
return false;
}
-static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
+int kgd_gfx_v9_hqd_destroy(struct kgd_dev *kgd, void *mqd,
enum kfd_preempt_type reset_type,
unsigned int utimeout, uint32_t pipe_id,
uint32_t queue_id)
@@ -671,155 +607,60 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v9_sdma_mqd *m;
- uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_reg_offset;
uint32_t temp;
unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
m->sdma_queue_id);
- temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+ temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
while (true) {
- temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+ temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
- if (time_after(jiffies, end_jiffies))
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
return -ETIME;
+ }
usleep_range(500, 1000);
}
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
- RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
+ RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
- m->sdmax_rlcx_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR);
+ m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
m->sdmax_rlcx_rb_rptr_hi =
- RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI);
+ RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI);
return 0;
}
-static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
- uint8_t vmid)
+bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
+ uint8_t vmid, uint16_t *p_pasid)
{
- uint32_t reg;
+ uint32_t value;
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
- reg = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
+ value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
+ vmid);
- return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
-}
-
-static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
- uint8_t vmid)
-{
- uint32_t reg;
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+ *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
- reg = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
- + vmid);
- return reg & ATC_VMID0_PASID_MAPPING__PASID_MASK;
+ return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
}
-static int invalidate_tlbs_with_kiq(struct amdgpu_device *adev, uint16_t pasid,
- uint32_t flush_type)
+int kgd_gfx_v9_address_watch_disable(struct kgd_dev *kgd)
{
- signed long r;
- uint32_t seq;
- struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
-
- spin_lock(&adev->gfx.kiq.ring_lock);
- amdgpu_ring_alloc(ring, 12); /* fence + invalidate_tlbs package*/
- amdgpu_ring_write(ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
- amdgpu_ring_write(ring,
- PACKET3_INVALIDATE_TLBS_DST_SEL(1) |
- PACKET3_INVALIDATE_TLBS_ALL_HUB(1) |
- PACKET3_INVALIDATE_TLBS_PASID(pasid) |
- PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
- amdgpu_fence_emit_polling(ring, &seq);
- amdgpu_ring_commit(ring);
- spin_unlock(&adev->gfx.kiq.ring_lock);
-
- r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
- if (r < 1) {
- DRM_ERROR("wait for kiq fence error: %ld.\n", r);
- return -ETIME;
- }
-
- return 0;
-}
-
-static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
- int vmid;
- struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
- uint32_t flush_type = 0;
-
- if (adev->in_gpu_reset)
- return -EIO;
- if (adev->gmc.xgmi.num_physical_nodes &&
- adev->asic_type == CHIP_VEGA20)
- flush_type = 2;
-
- if (ring->sched.ready)
- return invalidate_tlbs_with_kiq(adev, pasid, flush_type);
-
- for (vmid = 0; vmid < 16; vmid++) {
- if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid))
- continue;
- if (get_atc_vmid_pasid_mapping_valid(kgd, vmid)) {
- if (get_atc_vmid_pasid_mapping_pasid(kgd, vmid)
- == pasid) {
- amdgpu_gmc_flush_gpu_tlb(adev, vmid,
- flush_type);
- break;
- }
- }
- }
-
return 0;
}
-static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
-
- if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
- pr_err("non kfd vmid %d\n", vmid);
- return 0;
- }
-
- /* Use legacy mode tlb invalidation.
- *
- * Currently on Raven the code below is broken for anything but
- * legacy mode due to a MMHUB power gating problem. A workaround
- * is for MMHUB to wait until the condition PER_VMID_INVALIDATE_REQ
- * == PER_VMID_INVALIDATE_ACK instead of simply waiting for the ack
- * bit.
- *
- * TODO 1: agree on the right set of invalidation registers for
- * KFD use. Use the last one for now. Invalidate both GC and
- * MMHUB.
- *
- * TODO 2: support range-based invalidation, requires kfg2kgd
- * interface change
- */
- amdgpu_gmc_flush_gpu_tlb(adev, vmid, 0);
- return 0;
-}
-
-static int kgd_address_watch_disable(struct kgd_dev *kgd)
-{
- return 0;
-}
-
-static int kgd_address_watch_execute(struct kgd_dev *kgd,
+int kgd_gfx_v9_address_watch_execute(struct kgd_dev *kgd,
unsigned int watch_point_id,
uint32_t cntl_val,
uint32_t addr_hi,
@@ -828,7 +669,7 @@ static int kgd_address_watch_execute(struct kgd_dev *kgd,
return 0;
}
-static int kgd_wave_control_execute(struct kgd_dev *kgd,
+int kgd_gfx_v9_wave_control_execute(struct kgd_dev *kgd,
uint32_t gfx_index_val,
uint32_t sq_cmd)
{
@@ -853,24 +694,15 @@ static int kgd_wave_control_execute(struct kgd_dev *kgd,
return 0;
}
-static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
+uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd,
unsigned int watch_point_id,
unsigned int reg_offset)
{
return 0;
}
-static void set_scratch_backing_va(struct kgd_dev *kgd,
- uint64_t va, uint32_t vmid)
-{
- /* No longer needed on GFXv9. The scratch base address is
- * passed to the shader by the CP. It's the user mode driver's
- * responsibility.
- */
-}
-
-static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
- uint64_t page_table_base)
+static void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd,
+ uint32_t vmid, uint64_t page_table_base)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
@@ -880,11 +712,31 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
return;
}
- /* TODO: take advantage of per-process address space size. For
- * now, all processes share the same address space size, like
- * on GFX8 and older.
- */
mmhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
gfxhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
}
+
+const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
+ .program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
+ .set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping,
+ .init_interrupts = kgd_gfx_v9_init_interrupts,
+ .hqd_load = kgd_gfx_v9_hqd_load,
+ .hiq_mqd_load = kgd_gfx_v9_hiq_mqd_load,
+ .hqd_sdma_load = kgd_hqd_sdma_load,
+ .hqd_dump = kgd_gfx_v9_hqd_dump,
+ .hqd_sdma_dump = kgd_hqd_sdma_dump,
+ .hqd_is_occupied = kgd_gfx_v9_hqd_is_occupied,
+ .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
+ .hqd_destroy = kgd_gfx_v9_hqd_destroy,
+ .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
+ .address_watch_disable = kgd_gfx_v9_address_watch_disable,
+ .address_watch_execute = kgd_gfx_v9_address_watch_execute,
+ .wave_control_execute = kgd_gfx_v9_wave_control_execute,
+ .address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset,
+ .get_atc_vmid_pasid_mapping_info =
+ kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
+ .get_tile_config = kgd_gfx_v9_get_tile_config,
+ .set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
+ .get_hive_id = amdgpu_amdkfd_get_hive_id,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
new file mode 100644
index 000000000000..63d3e6683dfe
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+
+void kgd_gfx_v9_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
+ uint32_t sh_mem_config,
+ uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit,
+ uint32_t sh_mem_bases);
+int kgd_gfx_v9_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
+ unsigned int vmid);
+int kgd_gfx_v9_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
+int kgd_gfx_v9_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
+ uint32_t queue_id, uint32_t __user *wptr,
+ uint32_t wptr_shift, uint32_t wptr_mask,
+ struct mm_struct *mm);
+int kgd_gfx_v9_hiq_mqd_load(struct kgd_dev *kgd, void *mqd,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t doorbell_off);
+int kgd_gfx_v9_hqd_dump(struct kgd_dev *kgd,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t (**dump)[2], uint32_t *n_regs);
+bool kgd_gfx_v9_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
+ uint32_t pipe_id, uint32_t queue_id);
+int kgd_gfx_v9_hqd_destroy(struct kgd_dev *kgd, void *mqd,
+ enum kfd_preempt_type reset_type,
+ unsigned int utimeout, uint32_t pipe_id,
+ uint32_t queue_id);
+int kgd_gfx_v9_address_watch_disable(struct kgd_dev *kgd);
+int kgd_gfx_v9_address_watch_execute(struct kgd_dev *kgd,
+ unsigned int watch_point_id,
+ uint32_t cntl_val,
+ uint32_t addr_hi,
+ uint32_t addr_lo);
+int kgd_gfx_v9_wave_control_execute(struct kgd_dev *kgd,
+ uint32_t gfx_index_val,
+ uint32_t sq_cmd);
+uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd,
+ unsigned int watch_point_id,
+ unsigned int reg_offset);
+
+bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
+ uint8_t vmid, uint16_t *p_pasid);
+int kgd_gfx_v9_get_tile_config(struct kgd_dev *kgd,
+ struct tile_config *config);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 6a5c96e519b1..fa8ac9d19a7a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -19,9 +19,6 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-
-#define pr_fmt(fmt) "kfd2kgd: " fmt
-
#include <linux/dma-buf.h>
#include <linux/list.h>
#include <linux/pagemap.h>
@@ -33,11 +30,6 @@
#include "amdgpu_amdkfd.h"
#include "amdgpu_dma_buf.h"
-/* Special VM and GART address alignment needed for VI pre-Fiji due to
- * a HW bug.
- */
-#define VI_BO_SIZE_ALIGN (0x8000)
-
/* BO flag to indicate a KFD userptr BO */
#define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63)
@@ -93,7 +85,7 @@ static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm,
}
/* Set memory usage limits. Current, limits are
- * System (TTM + userptr) memory - 3/4th System RAM
+ * System (TTM + userptr) memory - 15/16th System RAM
* TTM memory - 3/8th System RAM
*/
void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
@@ -106,18 +98,31 @@ void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
mem *= si.mem_unit;
spin_lock_init(&kfd_mem_limit.mem_limit_lock);
- kfd_mem_limit.max_system_mem_limit = (mem >> 1) + (mem >> 2);
+ kfd_mem_limit.max_system_mem_limit = mem - (mem >> 4);
kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3);
pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n",
(kfd_mem_limit.max_system_mem_limit >> 20),
(kfd_mem_limit.max_ttm_mem_limit >> 20));
}
+/* Estimate page table size needed to represent a given memory size
+ *
+ * With 4KB pages, we need one 8 byte PTE for each 4KB of memory
+ * (factor 512, >> 9). With 2MB pages, we need one 8 byte PTE for 2MB
+ * of memory (factor 256K, >> 18). ROCm user mode tries to optimize
+ * for 2MB pages for TLB efficiency. However, small allocations and
+ * fragmented system memory still need some 4KB pages. We choose a
+ * compromise that should work in most cases without reserving too
+ * much memory for page tables unnecessarily (factor 16K, >> 14).
+ */
+#define ESTIMATE_PT_SIZE(mem_size) ((mem_size) >> 14)
+
static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
uint64_t size, u32 domain, bool sg)
{
+ uint64_t reserved_for_pt =
+ ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed;
- uint64_t reserved_for_pt = amdgpu_amdkfd_total_mem_size >> 9;
int ret = 0;
acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
@@ -218,14 +223,14 @@ void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
struct amdgpu_amdkfd_fence *ef)
{
- struct reservation_object *resv = bo->tbo.resv;
- struct reservation_object_list *old, *new;
+ struct dma_resv *resv = bo->tbo.base.resv;
+ struct dma_resv_list *old, *new;
unsigned int i, j, k;
if (!ef)
return -EINVAL;
- old = reservation_object_get_list(resv);
+ old = dma_resv_get_list(resv);
if (!old)
return 0;
@@ -241,7 +246,7 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
struct dma_fence *f;
f = rcu_dereference_protected(old->shared[i],
- reservation_object_held(resv));
+ dma_resv_held(resv));
if (f->context == ef->base.context)
RCU_INIT_POINTER(new->shared[--j], f);
@@ -263,7 +268,7 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
struct dma_fence *f;
f = rcu_dereference_protected(new->shared[i],
- reservation_object_held(resv));
+ dma_resv_held(resv));
dma_fence_put(f);
}
kfree_rcu(old, rcu);
@@ -349,11 +354,44 @@ static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
int ret;
- ret = amdgpu_vm_update_directories(adev, vm);
+ ret = amdgpu_vm_update_pdes(adev, vm, false);
if (ret)
return ret;
- return amdgpu_sync_fence(NULL, sync, vm->last_update, false);
+ return amdgpu_sync_fence(sync, vm->last_update, false);
+}
+
+static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
+{
+ struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
+ bool coherent = mem->alloc_flags & ALLOC_MEM_FLAGS_COHERENT;
+ uint32_t mapping_flags;
+
+ mapping_flags = AMDGPU_VM_PAGE_READABLE;
+ if (mem->alloc_flags & ALLOC_MEM_FLAGS_WRITABLE)
+ mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
+ if (mem->alloc_flags & ALLOC_MEM_FLAGS_EXECUTABLE)
+ mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
+
+ switch (adev->asic_type) {
+ case CHIP_ARCTURUS:
+ if (mem->alloc_flags & ALLOC_MEM_FLAGS_VRAM) {
+ if (bo_adev == adev)
+ mapping_flags |= coherent ?
+ AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
+ else
+ mapping_flags |= AMDGPU_VM_MTYPE_UC;
+ } else {
+ mapping_flags |= coherent ?
+ AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
+ }
+ break;
+ default:
+ mapping_flags |= coherent ?
+ AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
+ }
+
+ return amdgpu_gem_va_map_flags(adev, mapping_flags);
}
/* add_bo_to_vm - Add a BO to a VM
@@ -404,8 +442,7 @@ static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
}
bo_va_entry->va = va;
- bo_va_entry->pte_flags = amdgpu_gmc_get_pte_flags(adev,
- mem->mapping_flags);
+ bo_va_entry->pte_flags = get_pte_flags(adev, mem);
bo_va_entry->kgd_dev = (void *)adev;
list_add(&bo_va_entry->bo_list, list_bo_va);
@@ -481,8 +518,7 @@ static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem,
*
* Returns 0 for success, negative errno for errors.
*/
-static int init_user_pages(struct kgd_mem *mem, struct mm_struct *mm,
- uint64_t user_addr)
+static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr)
{
struct amdkfd_process_info *process_info = mem->process_info;
struct amdgpu_bo *bo = mem->bo;
@@ -586,7 +622,7 @@ static int reserve_bo_and_vm(struct kgd_mem *mem,
amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
- false, &ctx->duplicates, true);
+ false, &ctx->duplicates);
if (!ret)
ctx->reserved = true;
else {
@@ -659,7 +695,7 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
}
ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
- false, &ctx->duplicates, true);
+ false, &ctx->duplicates);
if (!ret)
ctx->reserved = true;
else
@@ -714,7 +750,7 @@ static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
- amdgpu_sync_fence(NULL, sync, bo_va->last_pt_update, false);
+ amdgpu_sync_fence(sync, bo_va->last_pt_update, false);
return 0;
}
@@ -733,7 +769,7 @@ static int update_gpuvm_pte(struct amdgpu_device *adev,
return ret;
}
- return amdgpu_sync_fence(NULL, sync, bo_va->last_pt_update, false);
+ return amdgpu_sync_fence(sync, bo_va->last_pt_update, false);
}
static int map_bo_to_gpuvm(struct amdgpu_device *adev,
@@ -812,7 +848,7 @@ static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
struct amdgpu_bo *pd = peer_vm->root.base.bo;
ret = amdgpu_sync_resv(NULL,
- sync, pd->tbo.resv,
+ sync, pd->tbo.base.resv,
AMDGPU_FENCE_OWNER_KFD, false);
if (ret)
return ret;
@@ -887,7 +923,7 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
AMDGPU_FENCE_OWNER_KFD, false);
if (ret)
goto wait_pd_fail;
- ret = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv, 1);
+ ret = dma_resv_reserve_shared(vm->root.base.bo->tbo.base.resv, 1);
if (ret)
goto reserve_shared_fail;
amdgpu_bo_fence(vm->root.base.bo,
@@ -1079,10 +1115,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
uint64_t user_addr = 0;
struct amdgpu_bo *bo;
struct amdgpu_bo_param bp;
- int byte_align;
u32 domain, alloc_domain;
u64 alloc_flags;
- uint32_t mapping_flags;
int ret;
/*
@@ -1090,7 +1124,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
*/
if (flags & ALLOC_MEM_FLAGS_VRAM) {
domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
- alloc_flags = AMDGPU_GEM_CREATE_VRAM_CLEARED;
+ alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
alloc_flags |= (flags & ALLOC_MEM_FLAGS_PUBLIC) ?
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED :
AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
@@ -1103,7 +1137,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
alloc_flags = 0;
if (!offset || !*offset)
return -EINVAL;
- user_addr = *offset;
+ user_addr = untagged_addr(*offset);
} else if (flags & (ALLOC_MEM_FLAGS_DOORBELL |
ALLOC_MEM_FLAGS_MMIO_REMAP)) {
domain = AMDGPU_GEM_DOMAIN_GTT;
@@ -1135,25 +1169,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
if ((*mem)->aql_queue)
size = size >> 1;
- /* Workaround for TLB bug on older VI chips */
- byte_align = (adev->family == AMDGPU_FAMILY_VI &&
- adev->asic_type != CHIP_FIJI &&
- adev->asic_type != CHIP_POLARIS10 &&
- adev->asic_type != CHIP_POLARIS11 &&
- adev->asic_type != CHIP_POLARIS12 &&
- adev->asic_type != CHIP_VEGAM) ?
- VI_BO_SIZE_ALIGN : 1;
-
- mapping_flags = AMDGPU_VM_PAGE_READABLE;
- if (flags & ALLOC_MEM_FLAGS_WRITABLE)
- mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
- if (flags & ALLOC_MEM_FLAGS_EXECUTABLE)
- mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
- if (flags & ALLOC_MEM_FLAGS_COHERENT)
- mapping_flags |= AMDGPU_VM_MTYPE_UC;
- else
- mapping_flags |= AMDGPU_VM_MTYPE_NC;
- (*mem)->mapping_flags = mapping_flags;
+ (*mem)->alloc_flags = flags;
amdgpu_sync_create(&(*mem)->sync);
@@ -1168,7 +1184,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
memset(&bp, 0, sizeof(bp));
bp.size = size;
- bp.byte_align = byte_align;
+ bp.byte_align = 1;
bp.domain = alloc_domain;
bp.flags = alloc_flags;
bp.type = bo_type;
@@ -1195,7 +1211,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
if (user_addr) {
- ret = init_user_pages(*mem, current->mm, user_addr);
+ ret = init_user_pages(*mem, user_addr);
if (ret)
goto allocate_init_user_pages_failed;
}
@@ -1626,9 +1642,10 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
INIT_LIST_HEAD(&(*mem)->bo_va_list);
mutex_init(&(*mem)->lock);
- (*mem)->mapping_flags =
- AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
- AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_NC;
+ (*mem)->alloc_flags =
+ ((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
+ ALLOC_MEM_FLAGS_VRAM : ALLOC_MEM_FLAGS_GTT) |
+ ALLOC_MEM_FLAGS_WRITABLE | ALLOC_MEM_FLAGS_EXECUTABLE;
(*mem)->bo = amdgpu_bo_ref(bo);
(*mem)->va = va;
@@ -1657,10 +1674,10 @@ int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem,
struct mm_struct *mm)
{
struct amdkfd_process_info *process_info = mem->process_info;
- int invalid, evicted_bos;
+ int evicted_bos;
int r = 0;
- invalid = atomic_inc_return(&mem->invalid);
+ atomic_inc(&mem->invalid);
evicted_bos = atomic_inc_return(&process_info->evicted_bos);
if (evicted_bos == 1) {
/* First eviction, stop the queues */
@@ -1739,6 +1756,10 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
return ret;
}
+ /*
+ * FIXME: Cannot ignore the return code, must hold
+ * notifier_lock
+ */
amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
/* Mark the BO as valid unless it was invalidated
@@ -1797,8 +1818,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
}
/* Reserve all BOs and page tables for validation */
- ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates,
- true);
+ ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
WARN(!list_empty(&duplicates), "Duplicates should be empty");
if (ret)
goto out_free;
@@ -1996,7 +2016,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
}
ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
- false, &duplicate_save, true);
+ false, &duplicate_save);
if (ret) {
pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
goto ttm_reserve_fail;
@@ -2028,7 +2048,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
pr_debug("Memory eviction: Validate BOs failed. Try again\n");
goto validate_map_fail;
}
- ret = amdgpu_sync_fence(NULL, &sync_obj, bo->tbo.moving, false);
+ ret = amdgpu_sync_fence(&sync_obj, bo->tbo.moving, false);
if (ret) {
pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
goto validate_map_fail;
@@ -2109,6 +2129,7 @@ int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem
return -ENOMEM;
mutex_init(&(*mem)->lock);
+ INIT_LIST_HEAD(&(*mem)->bo_va_list);
(*mem)->bo = amdgpu_bo_ref(gws_bo);
(*mem)->domain = AMDGPU_GEM_DOMAIN_GWS;
(*mem)->process_info = process_info;
@@ -2133,7 +2154,7 @@ int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem
* Add process eviction fence to bo so they can
* evict each other.
*/
- ret = reservation_object_reserve_shared(gws_bo->tbo.resv, 1);
+ ret = dma_resv_reserve_shared(gws_bo->tbo.base.resv, 1);
if (ret)
goto reserve_shared_fail;
amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 1c9d40f97a9b..fdd52d86a4d7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -338,17 +338,9 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *
path_size += le16_to_cpu(path->usSize);
if (device_support & le16_to_cpu(path->usDeviceTag)) {
- uint8_t con_obj_id, con_obj_num, con_obj_type;
-
- con_obj_id =
+ uint8_t con_obj_id =
(le16_to_cpu(path->usConnObjectId) & OBJECT_ID_MASK)
>> OBJECT_ID_SHIFT;
- con_obj_num =
- (le16_to_cpu(path->usConnObjectId) & ENUM_ID_MASK)
- >> ENUM_ID_SHIFT;
- con_obj_type =
- (le16_to_cpu(path->usConnObjectId) &
- OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
/* Skip TV/CV support */
if ((le16_to_cpu(path->usDeviceTag) ==
@@ -373,15 +365,7 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *
router.ddc_valid = false;
router.cd_valid = false;
for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) {
- uint8_t grph_obj_id, grph_obj_num, grph_obj_type;
-
- grph_obj_id =
- (le16_to_cpu(path->usGraphicObjIds[j]) &
- OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
- grph_obj_num =
- (le16_to_cpu(path->usGraphicObjIds[j]) &
- ENUM_ID_MASK) >> ENUM_ID_SHIFT;
- grph_obj_type =
+ uint8_t grph_obj_type =
(le16_to_cpu(path->usGraphicObjIds[j]) &
OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
@@ -2038,6 +2022,11 @@ int amdgpu_atombios_init(struct amdgpu_device *adev)
if (adev->is_atom_fw) {
amdgpu_atomfirmware_scratch_regs_init(adev);
amdgpu_atomfirmware_allocate_fb_scratch(adev);
+ ret = amdgpu_atomfirmware_get_mem_train_info(adev);
+ if (ret) {
+ DRM_ERROR("Failed to get mem train fb location.\n");
+ return ret;
+ }
} else {
amdgpu_atombios_scratch_regs_init(adev);
amdgpu_atombios_allocate_fb_scratch(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index daf687428cdb..58f9d8c3a17a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -27,6 +27,7 @@
#include "amdgpu_atomfirmware.h"
#include "atom.h"
#include "atombios.h"
+#include "soc15_hw_ip.h"
bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev)
{
@@ -120,65 +121,14 @@ union vram_info {
struct atom_vram_info_header_v2_3 v23;
struct atom_vram_info_header_v2_4 v24;
};
-/*
- * Return vram width from integrated system info table, if available,
- * or 0 if not.
- */
-int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev)
-{
- struct amdgpu_mode_info *mode_info = &adev->mode_info;
- int index;
- u16 data_offset, size;
- union igp_info *igp_info;
- union vram_info *vram_info;
- u32 mem_channel_number;
- u32 mem_channel_width;
- u8 frev, crev;
- if (adev->flags & AMD_IS_APU)
- index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
- integratedsysteminfo);
- else
- index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
- vram_info);
-
- /* get any igp specific overrides */
- if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, &size,
- &frev, &crev, &data_offset)) {
- if (adev->flags & AMD_IS_APU) {
- igp_info = (union igp_info *)
- (mode_info->atom_context->bios + data_offset);
- switch (crev) {
- case 11:
- mem_channel_number = igp_info->v11.umachannelnumber;
- /* channel width is 64 */
- return mem_channel_number * 64;
- default:
- return 0;
- }
- } else {
- vram_info = (union vram_info *)
- (mode_info->atom_context->bios + data_offset);
- switch (crev) {
- case 3:
- mem_channel_number = vram_info->v23.vram_module[0].channel_num;
- mem_channel_width = vram_info->v23.vram_module[0].channel_width;
- return mem_channel_number * (1 << mem_channel_width);
- case 4:
- mem_channel_number = vram_info->v24.vram_module[0].channel_num;
- mem_channel_width = vram_info->v24.vram_module[0].channel_width;
- return mem_channel_number * (1 << mem_channel_width);
- default:
- return 0;
- }
- }
- }
-
- return 0;
-}
+union vram_module {
+ struct atom_vram_module_v9 v9;
+ struct atom_vram_module_v10 v10;
+};
-static int convert_atom_mem_type_to_vram_type (struct amdgpu_device *adev,
- int atom_mem_type)
+static int convert_atom_mem_type_to_vram_type(struct amdgpu_device *adev,
+ int atom_mem_type)
{
int vram_type;
@@ -219,19 +169,25 @@ static int convert_atom_mem_type_to_vram_type (struct amdgpu_device *adev,
return vram_type;
}
-/*
- * Return vram type from either integrated system info table
- * or umc info table, if available, or 0 (TYPE_UNKNOWN) if not
- */
-int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev)
+
+
+int
+amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
+ int *vram_width, int *vram_type,
+ int *vram_vendor)
{
struct amdgpu_mode_info *mode_info = &adev->mode_info;
- int index;
+ int index, i = 0;
u16 data_offset, size;
union igp_info *igp_info;
union vram_info *vram_info;
+ union vram_module *vram_module;
u8 frev, crev;
u8 mem_type;
+ u8 mem_vendor;
+ u32 mem_channel_number;
+ u32 mem_channel_width;
+ u32 module_id;
if (adev->flags & AMD_IS_APU)
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
@@ -239,6 +195,7 @@ int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev)
else
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
vram_info);
+
if (amdgpu_atom_parse_data_header(mode_info->atom_context,
index, &size,
&frev, &crev, &data_offset)) {
@@ -247,25 +204,67 @@ int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev)
(mode_info->atom_context->bios + data_offset);
switch (crev) {
case 11:
+ mem_channel_number = igp_info->v11.umachannelnumber;
+ /* channel width is 64 */
+ if (vram_width)
+ *vram_width = mem_channel_number * 64;
mem_type = igp_info->v11.memorytype;
- return convert_atom_mem_type_to_vram_type(adev, mem_type);
+ if (vram_type)
+ *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+ break;
default:
- return 0;
+ return -EINVAL;
}
} else {
vram_info = (union vram_info *)
(mode_info->atom_context->bios + data_offset);
+ module_id = (RREG32(adev->bios_scratch_reg_offset + 4) & 0x00ff0000) >> 16;
switch (crev) {
case 3:
- mem_type = vram_info->v23.vram_module[0].memory_type;
- return convert_atom_mem_type_to_vram_type(adev, mem_type);
+ if (module_id > vram_info->v23.vram_module_num)
+ module_id = 0;
+ vram_module = (union vram_module *)vram_info->v23.vram_module;
+ while (i < module_id) {
+ vram_module = (union vram_module *)
+ ((u8 *)vram_module + vram_module->v9.vram_module_size);
+ i++;
+ }
+ mem_type = vram_module->v9.memory_type;
+ if (vram_type)
+ *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+ mem_channel_number = vram_module->v9.channel_num;
+ mem_channel_width = vram_module->v9.channel_width;
+ if (vram_width)
+ *vram_width = mem_channel_number * (1 << mem_channel_width);
+ mem_vendor = (vram_module->v9.vender_rev_id) & 0xF;
+ if (vram_vendor)
+ *vram_vendor = mem_vendor;
+ break;
case 4:
- mem_type = vram_info->v24.vram_module[0].memory_type;
- return convert_atom_mem_type_to_vram_type(adev, mem_type);
+ if (module_id > vram_info->v24.vram_module_num)
+ module_id = 0;
+ vram_module = (union vram_module *)vram_info->v24.vram_module;
+ while (i < module_id) {
+ vram_module = (union vram_module *)
+ ((u8 *)vram_module + vram_module->v10.vram_module_size);
+ i++;
+ }
+ mem_type = vram_module->v10.memory_type;
+ if (vram_type)
+ *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+ mem_channel_number = vram_module->v10.channel_num;
+ mem_channel_width = vram_module->v10.channel_width;
+ if (vram_width)
+ *vram_width = mem_channel_number * (1 << mem_channel_width);
+ mem_vendor = (vram_module->v10.vender_rev_id) & 0xF;
+ if (vram_vendor)
+ *vram_vendor = mem_vendor;
+ break;
default:
- return 0;
+ return -EINVAL;
}
}
+
}
return 0;
@@ -464,3 +463,108 @@ int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev)
}
return -EINVAL;
}
+
+/*
+ * Check if VBIOS supports GDDR6 training data save/restore
+ */
+static bool gddr6_mem_train_vbios_support(struct amdgpu_device *adev)
+{
+ uint16_t data_offset;
+ int index;
+
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ firmwareinfo);
+ if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, NULL,
+ NULL, NULL, &data_offset)) {
+ struct atom_firmware_info_v3_1 *firmware_info =
+ (struct atom_firmware_info_v3_1 *)(adev->mode_info.atom_context->bios +
+ data_offset);
+
+ DRM_DEBUG("atom firmware capability:0x%08x.\n",
+ le32_to_cpu(firmware_info->firmware_capability));
+
+ if (le32_to_cpu(firmware_info->firmware_capability) &
+ ATOM_FIRMWARE_CAP_ENABLE_2STAGE_BIST_TRAINING)
+ return true;
+ }
+
+ return false;
+}
+
+static int gddr6_mem_train_support(struct amdgpu_device *adev)
+{
+ int ret;
+ uint32_t major, minor, revision, hw_v;
+
+ if (gddr6_mem_train_vbios_support(adev)) {
+ amdgpu_discovery_get_ip_version(adev, MP0_HWID, &major, &minor, &revision);
+ hw_v = HW_REV(major, minor, revision);
+ /*
+ * treat 0 revision as a special case since register for MP0 and MMHUB is missing
+ * for some Navi10 A0, preventing driver from discovering the hwip information since
+ * none of the functions will be initialized, it should not cause any problems
+ */
+ switch (hw_v) {
+ case HW_REV(11, 0, 0):
+ case HW_REV(11, 0, 5):
+ ret = 1;
+ break;
+ default:
+ DRM_ERROR("memory training vbios supports but psp hw(%08x)"
+ " doesn't support!\n", hw_v);
+ ret = -1;
+ break;
+ }
+ } else {
+ ret = 0;
+ hw_v = -1;
+ }
+
+
+ DRM_DEBUG("mp0 hw_v %08x, ret:%d.\n", hw_v, ret);
+ return ret;
+}
+
+int amdgpu_atomfirmware_get_mem_train_info(struct amdgpu_device *adev)
+{
+ struct atom_context *ctx = adev->mode_info.atom_context;
+ int index;
+ uint8_t frev, crev;
+ uint16_t data_offset, size;
+ int ret;
+
+ adev->fw_vram_usage.mem_train_support = false;
+
+ if (adev->asic_type != CHIP_NAVI10 &&
+ adev->asic_type != CHIP_NAVI14)
+ return 0;
+
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ ret = gddr6_mem_train_support(adev);
+ if (ret == -1)
+ return -EINVAL;
+ else if (ret == 0)
+ return 0;
+
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ vram_usagebyfirmware);
+ ret = amdgpu_atom_parse_data_header(ctx, index, &size, &frev, &crev,
+ &data_offset);
+ if (ret == 0) {
+ DRM_ERROR("parse data header failed.\n");
+ return -EINVAL;
+ }
+
+ DRM_DEBUG("atom firmware common table header size:0x%04x, frev:0x%02x,"
+ " crev:0x%02x, data_offset:0x%04x.\n", size, frev, crev, data_offset);
+ /* only support 2.1+ */
+ if (((uint16_t)frev << 8 | crev) < 0x0201) {
+ DRM_ERROR("frev:0x%02x, crev:0x%02x < 2.1 !\n", frev, crev);
+ return -EINVAL;
+ }
+
+ adev->fw_vram_usage.mem_train_support = true;
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
index 5ec6f92f353c..434fe2fa0089 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
@@ -29,8 +29,9 @@
bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev);
void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev);
int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev);
-int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev);
-int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev);
+int amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
+ int *vram_width, int *vram_type, int *vram_vendor);
+int amdgpu_atomfirmware_get_mem_train_info(struct amdgpu_device *adev);
int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev);
int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev);
bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index 9b384a94d2f3..3e35a8f2c5e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -574,6 +574,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = {
{ 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX },
{ 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX },
{ 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX },
+ { 0x1002, 0x699f, 0x1028, 0x0814, AMDGPU_PX_QUIRK_FORCE_ATPX },
{ 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX },
{ 0x1002, 0x6900, 0x17AA, 0x3806, AMDGPU_PX_QUIRK_FORCE_ATPX },
{ 0, 0, 0, 0, 0 },
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
index 649e68c4479b..d1495e1c9289 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
@@ -33,7 +33,7 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
{
unsigned long start_jiffies;
unsigned long end_jiffies;
- struct dma_fence *fence = NULL;
+ struct dma_fence *fence;
int i, r;
start_jiffies = jiffies;
@@ -44,16 +44,14 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
if (r)
goto exit_do_move;
r = dma_fence_wait(fence, false);
+ dma_fence_put(fence);
if (r)
goto exit_do_move;
- dma_fence_put(fence);
}
end_jiffies = jiffies;
r = jiffies_to_msecs(end_jiffies - start_jiffies);
exit_do_move:
- if (fence)
- dma_fence_put(fence);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index 7bcf86c61999..85b0515c0fdc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -140,7 +140,12 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
return 0;
error_free:
- while (i--) {
+ for (i = 0; i < last_entry; ++i) {
+ struct amdgpu_bo *bo = ttm_to_amdgpu_bo(array[i].tv.bo);
+
+ amdgpu_bo_unref(&bo);
+ }
+ for (i = first_userptr; i < num_entries; ++i) {
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(array[i].tv.bo);
amdgpu_bo_unref(&bo);
@@ -270,7 +275,7 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
r = amdgpu_bo_create_list_entry_array(&args->in, &info);
if (r)
- goto error_free;
+ return r;
switch (args->in.operation) {
case AMDGPU_BO_LIST_OP_CREATE:
@@ -283,8 +288,7 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
r = idr_alloc(&fpriv->bo_list_handles, list, 1, 0, GFP_KERNEL);
mutex_unlock(&fpriv->bo_list_lock);
if (r < 0) {
- amdgpu_bo_list_put(list);
- return r;
+ goto error_put_list;
}
handle = r;
@@ -306,9 +310,8 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
mutex_unlock(&fpriv->bo_list_lock);
if (IS_ERR(old)) {
- amdgpu_bo_list_put(list);
r = PTR_ERR(old);
- goto error_free;
+ goto error_put_list;
}
amdgpu_bo_list_put(old);
@@ -325,8 +328,10 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
return 0;
+error_put_list:
+ amdgpu_bo_list_put(list);
+
error_free:
- if (info)
- kvfree(info);
+ kvfree(info);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 73b2ede773d3..a62cbc8199de 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -217,11 +217,10 @@ amdgpu_connector_update_scratch_regs(struct drm_connector *connector,
struct drm_encoder *encoder;
const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
bool connected;
- int i;
best_encoder = connector_funcs->best_encoder(connector);
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
if ((encoder == best_encoder) && (status == connector_status_connected))
connected = true;
else
@@ -236,9 +235,8 @@ amdgpu_connector_find_encoder(struct drm_connector *connector,
int encoder_type)
{
struct drm_encoder *encoder;
- int i;
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
if (encoder->encoder_type == encoder_type)
return encoder;
}
@@ -347,10 +345,9 @@ static struct drm_encoder *
amdgpu_connector_best_single_encoder(struct drm_connector *connector)
{
struct drm_encoder *encoder;
- int i;
/* pick the first one */
- drm_connector_for_each_possible_encoder(connector, encoder, i)
+ drm_connector_for_each_possible_encoder(connector, encoder)
return encoder;
return NULL;
@@ -1022,8 +1019,12 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
*/
if (amdgpu_connector->shared_ddc && (ret == connector_status_connected)) {
struct drm_connector *list_connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *list_amdgpu_connector;
- list_for_each_entry(list_connector, &dev->mode_config.connector_list, head) {
+
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(list_connector,
+ &iter) {
if (connector == list_connector)
continue;
list_amdgpu_connector = to_amdgpu_connector(list_connector);
@@ -1040,6 +1041,7 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
}
}
}
+ drm_connector_list_iter_end(&iter);
}
}
}
@@ -1065,9 +1067,8 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
/* find analog encoder */
if (amdgpu_connector->dac_load_detect) {
struct drm_encoder *encoder;
- int i;
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
if (encoder->encoder_type != DRM_MODE_ENCODER_DAC &&
encoder->encoder_type != DRM_MODE_ENCODER_TVDAC)
continue;
@@ -1117,9 +1118,8 @@ amdgpu_connector_dvi_encoder(struct drm_connector *connector)
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
struct drm_encoder *encoder;
- int i;
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
if (amdgpu_connector->use_digital == true) {
if (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)
return encoder;
@@ -1134,7 +1134,7 @@ amdgpu_connector_dvi_encoder(struct drm_connector *connector)
/* then check use digitial */
/* pick the first one */
- drm_connector_for_each_possible_encoder(connector, encoder, i)
+ drm_connector_for_each_possible_encoder(connector, encoder)
return encoder;
return NULL;
@@ -1271,9 +1271,8 @@ u16 amdgpu_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *conn
{
struct drm_encoder *encoder;
struct amdgpu_encoder *amdgpu_encoder;
- int i;
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
amdgpu_encoder = to_amdgpu_encoder(encoder);
switch (amdgpu_encoder->encoder_id) {
@@ -1292,10 +1291,9 @@ static bool amdgpu_connector_encoder_is_hbr2(struct drm_connector *connector)
{
struct drm_encoder *encoder;
struct amdgpu_encoder *amdgpu_encoder;
- int i;
bool found = false;
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
amdgpu_encoder = to_amdgpu_encoder(encoder);
if (amdgpu_encoder->caps & ATOM_ENCODER_CAP_RECORD_HBR2)
found = true;
@@ -1501,10 +1499,12 @@ amdgpu_connector_add(struct amdgpu_device *adev,
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector;
struct amdgpu_connector_atom_dig *amdgpu_dig_connector;
struct drm_encoder *encoder;
struct amdgpu_encoder *amdgpu_encoder;
+ struct i2c_adapter *ddc = NULL;
uint32_t subpixel_order = SubPixelNone;
bool shared_ddc = false;
bool is_dp_bridge = false;
@@ -1514,10 +1514,12 @@ amdgpu_connector_add(struct amdgpu_device *adev,
return;
/* see if we already added it */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->connector_id == connector_id) {
amdgpu_connector->devices |= supported_device;
+ drm_connector_list_iter_end(&iter);
return;
}
if (amdgpu_connector->ddc_bus && i2c_bus->valid) {
@@ -1532,6 +1534,7 @@ amdgpu_connector_add(struct amdgpu_device *adev,
}
}
}
+ drm_connector_list_iter_end(&iter);
/* check if it's a dp bridge */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
@@ -1574,17 +1577,21 @@ amdgpu_connector_add(struct amdgpu_device *adev,
amdgpu_connector->con_priv = amdgpu_dig_connector;
if (i2c_bus->valid) {
amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus);
- if (amdgpu_connector->ddc_bus)
+ if (amdgpu_connector->ddc_bus) {
has_aux = true;
- else
+ ddc = &amdgpu_connector->ddc_bus->adapter;
+ } else {
DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ }
}
switch (connector_type) {
case DRM_MODE_CONNECTOR_VGA:
case DRM_MODE_CONNECTOR_DVIA:
default:
- drm_connector_init(dev, &amdgpu_connector->base,
- &amdgpu_connector_dp_funcs, connector_type);
+ drm_connector_init_with_ddc(dev, &amdgpu_connector->base,
+ &amdgpu_connector_dp_funcs,
+ connector_type,
+ ddc);
drm_connector_helper_add(&amdgpu_connector->base,
&amdgpu_connector_dp_helper_funcs);
connector->interlace_allowed = true;
@@ -1602,8 +1609,10 @@ amdgpu_connector_add(struct amdgpu_device *adev,
case DRM_MODE_CONNECTOR_HDMIA:
case DRM_MODE_CONNECTOR_HDMIB:
case DRM_MODE_CONNECTOR_DisplayPort:
- drm_connector_init(dev, &amdgpu_connector->base,
- &amdgpu_connector_dp_funcs, connector_type);
+ drm_connector_init_with_ddc(dev, &amdgpu_connector->base,
+ &amdgpu_connector_dp_funcs,
+ connector_type,
+ ddc);
drm_connector_helper_add(&amdgpu_connector->base,
&amdgpu_connector_dp_helper_funcs);
drm_object_attach_property(&amdgpu_connector->base.base,
@@ -1644,8 +1653,10 @@ amdgpu_connector_add(struct amdgpu_device *adev,
break;
case DRM_MODE_CONNECTOR_LVDS:
case DRM_MODE_CONNECTOR_eDP:
- drm_connector_init(dev, &amdgpu_connector->base,
- &amdgpu_connector_edp_funcs, connector_type);
+ drm_connector_init_with_ddc(dev, &amdgpu_connector->base,
+ &amdgpu_connector_edp_funcs,
+ connector_type,
+ ddc);
drm_connector_helper_add(&amdgpu_connector->base,
&amdgpu_connector_dp_helper_funcs);
drm_object_attach_property(&amdgpu_connector->base.base,
@@ -1659,13 +1670,18 @@ amdgpu_connector_add(struct amdgpu_device *adev,
} else {
switch (connector_type) {
case DRM_MODE_CONNECTOR_VGA:
- drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_vga_funcs, connector_type);
- drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_vga_helper_funcs);
if (i2c_bus->valid) {
amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus);
if (!amdgpu_connector->ddc_bus)
DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ else
+ ddc = &amdgpu_connector->ddc_bus->adapter;
}
+ drm_connector_init_with_ddc(dev, &amdgpu_connector->base,
+ &amdgpu_connector_vga_funcs,
+ connector_type,
+ ddc);
+ drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_vga_helper_funcs);
amdgpu_connector->dac_load_detect = true;
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.load_detect_property,
@@ -1679,13 +1695,18 @@ amdgpu_connector_add(struct amdgpu_device *adev,
connector->doublescan_allowed = true;
break;
case DRM_MODE_CONNECTOR_DVIA:
- drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_vga_funcs, connector_type);
- drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_vga_helper_funcs);
if (i2c_bus->valid) {
amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus);
if (!amdgpu_connector->ddc_bus)
DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ else
+ ddc = &amdgpu_connector->ddc_bus->adapter;
}
+ drm_connector_init_with_ddc(dev, &amdgpu_connector->base,
+ &amdgpu_connector_vga_funcs,
+ connector_type,
+ ddc);
+ drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_vga_helper_funcs);
amdgpu_connector->dac_load_detect = true;
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.load_detect_property,
@@ -1704,13 +1725,18 @@ amdgpu_connector_add(struct amdgpu_device *adev,
if (!amdgpu_dig_connector)
goto failed;
amdgpu_connector->con_priv = amdgpu_dig_connector;
- drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_dvi_funcs, connector_type);
- drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_dvi_helper_funcs);
if (i2c_bus->valid) {
amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus);
if (!amdgpu_connector->ddc_bus)
DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ else
+ ddc = &amdgpu_connector->ddc_bus->adapter;
}
+ drm_connector_init_with_ddc(dev, &amdgpu_connector->base,
+ &amdgpu_connector_dvi_funcs,
+ connector_type,
+ ddc);
+ drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_dvi_helper_funcs);
subpixel_order = SubPixelHorizontalRGB;
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.coherent_mode_property,
@@ -1754,13 +1780,18 @@ amdgpu_connector_add(struct amdgpu_device *adev,
if (!amdgpu_dig_connector)
goto failed;
amdgpu_connector->con_priv = amdgpu_dig_connector;
- drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_dvi_funcs, connector_type);
- drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_dvi_helper_funcs);
if (i2c_bus->valid) {
amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus);
if (!amdgpu_connector->ddc_bus)
DRM_ERROR("HDMI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ else
+ ddc = &amdgpu_connector->ddc_bus->adapter;
}
+ drm_connector_init_with_ddc(dev, &amdgpu_connector->base,
+ &amdgpu_connector_dvi_funcs,
+ connector_type,
+ ddc);
+ drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_dvi_helper_funcs);
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.coherent_mode_property,
1);
@@ -1796,15 +1827,20 @@ amdgpu_connector_add(struct amdgpu_device *adev,
if (!amdgpu_dig_connector)
goto failed;
amdgpu_connector->con_priv = amdgpu_dig_connector;
- drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_dp_funcs, connector_type);
- drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_dp_helper_funcs);
if (i2c_bus->valid) {
amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus);
- if (amdgpu_connector->ddc_bus)
+ if (amdgpu_connector->ddc_bus) {
has_aux = true;
- else
+ ddc = &amdgpu_connector->ddc_bus->adapter;
+ } else {
DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ }
}
+ drm_connector_init_with_ddc(dev, &amdgpu_connector->base,
+ &amdgpu_connector_dp_funcs,
+ connector_type,
+ ddc);
+ drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_dp_helper_funcs);
subpixel_order = SubPixelHorizontalRGB;
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.coherent_mode_property,
@@ -1838,15 +1874,20 @@ amdgpu_connector_add(struct amdgpu_device *adev,
if (!amdgpu_dig_connector)
goto failed;
amdgpu_connector->con_priv = amdgpu_dig_connector;
- drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_edp_funcs, connector_type);
- drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_dp_helper_funcs);
if (i2c_bus->valid) {
amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus);
- if (amdgpu_connector->ddc_bus)
+ if (amdgpu_connector->ddc_bus) {
has_aux = true;
- else
+ ddc = &amdgpu_connector->ddc_bus->adapter;
+ } else {
DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ }
}
+ drm_connector_init_with_ddc(dev, &amdgpu_connector->base,
+ &amdgpu_connector_edp_funcs,
+ connector_type,
+ ddc);
+ drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_dp_helper_funcs);
drm_object_attach_property(&amdgpu_connector->base.base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
@@ -1859,13 +1900,18 @@ amdgpu_connector_add(struct amdgpu_device *adev,
if (!amdgpu_dig_connector)
goto failed;
amdgpu_connector->con_priv = amdgpu_dig_connector;
- drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_lvds_funcs, connector_type);
- drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_lvds_helper_funcs);
if (i2c_bus->valid) {
amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus);
if (!amdgpu_connector->ddc_bus)
DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ else
+ ddc = &amdgpu_connector->ddc_bus->adapter;
}
+ drm_connector_init_with_ddc(dev, &amdgpu_connector->base,
+ &amdgpu_connector_lvds_funcs,
+ connector_type,
+ ddc);
+ drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_lvds_helper_funcs);
drm_object_attach_property(&amdgpu_connector->base.base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 4e4094f842e7..a52a084158b1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -35,6 +35,7 @@
#include "amdgpu_trace.h"
#include "amdgpu_gmc.h"
#include "amdgpu_gem.h"
+#include "amdgpu_ras.h"
static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
struct drm_amdgpu_cs_chunk_fence *data,
@@ -402,7 +403,7 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
struct ttm_operation_ctx ctx = {
.interruptible = true,
.no_wait_gpu = false,
- .resv = bo->tbo.resv,
+ .resv = bo->tbo.base.resv,
.flags = 0
};
uint32_t domain;
@@ -449,75 +450,12 @@ retry:
return r;
}
-/* Last resort, try to evict something from the current working set */
-static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
- struct amdgpu_bo *validated)
-{
- uint32_t domain = validated->allowed_domains;
- struct ttm_operation_ctx ctx = { true, false };
- int r;
-
- if (!p->evictable)
- return false;
-
- for (;&p->evictable->tv.head != &p->validated;
- p->evictable = list_prev_entry(p->evictable, tv.head)) {
-
- struct amdgpu_bo_list_entry *candidate = p->evictable;
- struct amdgpu_bo *bo = ttm_to_amdgpu_bo(candidate->tv.bo);
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- bool update_bytes_moved_vis;
- uint32_t other;
-
- /* If we reached our current BO we can forget it */
- if (bo == validated)
- break;
-
- /* We can't move pinned BOs here */
- if (bo->pin_count)
- continue;
-
- other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
-
- /* Check if this BO is in one of the domains we need space for */
- if (!(other & domain))
- continue;
-
- /* Check if we can move this BO somewhere else */
- other = bo->allowed_domains & ~domain;
- if (!other)
- continue;
-
- /* Good we can try to move this BO somewhere else */
- update_bytes_moved_vis =
- !amdgpu_gmc_vram_full_visible(&adev->gmc) &&
- amdgpu_bo_in_cpu_visible_vram(bo);
- amdgpu_bo_placement_from_domain(bo, other);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
- p->bytes_moved += ctx.bytes_moved;
- if (update_bytes_moved_vis)
- p->bytes_moved_vis += ctx.bytes_moved;
-
- if (unlikely(r))
- break;
-
- p->evictable = list_prev_entry(p->evictable, tv.head);
- list_move(&candidate->tv.head, &p->validated);
-
- return true;
- }
-
- return false;
-}
-
static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo)
{
struct amdgpu_cs_parser *p = param;
int r;
- do {
- r = amdgpu_cs_bo_validate(p, bo);
- } while (r == -ENOMEM && amdgpu_cs_try_evict(p, bo));
+ r = amdgpu_cs_bo_validate(p, bo);
if (r)
return r;
@@ -536,7 +474,6 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
list_for_each_entry(lobj, validated, tv.head) {
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(lobj->tv.bo);
- bool binding_userptr = false;
struct mm_struct *usermm;
usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
@@ -553,20 +490,14 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
lobj->user_pages);
- binding_userptr = true;
}
- if (p->evictable == lobj)
- p->evictable = NULL;
-
r = amdgpu_cs_validate(p, bo);
if (r)
return r;
- if (binding_userptr) {
- kvfree(lobj->user_pages);
- lobj->user_pages = NULL;
- }
+ kvfree(lobj->user_pages);
+ lobj->user_pages = NULL;
}
return 0;
}
@@ -607,8 +538,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
e->tv.num_shared = 2;
amdgpu_bo_list_get_list(p->bo_list, &p->validated);
- if (p->bo_list->first_userptr != p->bo_list->num_entries)
- p->mn = amdgpu_mn_get(p->adev, AMDGPU_MN_TYPE_GFX);
INIT_LIST_HEAD(&duplicates);
amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
@@ -650,7 +579,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
}
r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
- &duplicates, false);
+ &duplicates);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS)
DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
@@ -661,9 +590,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
&p->bytes_moved_vis_threshold);
p->bytes_moved = 0;
p->bytes_moved_vis = 0;
- p->evictable = list_last_entry(&p->validated,
- struct amdgpu_bo_list_entry,
- tv.head);
r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm,
amdgpu_cs_validate, p);
@@ -730,7 +656,7 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
list_for_each_entry(e, &p->validated, tv.head) {
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
- struct reservation_object *resv = bo->tbo.resv;
+ struct dma_resv *resv = bo->tbo.base.resv;
r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp,
amdgpu_bo_explicit_sync(bo));
@@ -869,29 +795,23 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (r)
return r;
- r = amdgpu_sync_fence(adev, &p->job->sync,
- fpriv->prt_va->last_pt_update, false);
+ r = amdgpu_sync_vm_fence(&p->job->sync, fpriv->prt_va->last_pt_update);
if (r)
return r;
if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
- struct dma_fence *f;
-
bo_va = fpriv->csa_va;
BUG_ON(!bo_va);
r = amdgpu_vm_bo_update(adev, bo_va, false);
if (r)
return r;
- f = bo_va->last_pt_update;
- r = amdgpu_sync_fence(adev, &p->job->sync, f, false);
+ r = amdgpu_sync_vm_fence(&p->job->sync, bo_va->last_pt_update);
if (r)
return r;
}
amdgpu_bo_list_for_each_entry(e, p->bo_list) {
- struct dma_fence *f;
-
/* ignore duplicates */
bo = ttm_to_amdgpu_bo(e->tv.bo);
if (!bo)
@@ -905,8 +825,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (r)
return r;
- f = bo_va->last_pt_update;
- r = amdgpu_sync_fence(adev, &p->job->sync, f, false);
+ r = amdgpu_sync_vm_fence(&p->job->sync, bo_va->last_pt_update);
if (r)
return r;
}
@@ -915,11 +834,11 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (r)
return r;
- r = amdgpu_vm_update_directories(adev, vm);
+ r = amdgpu_vm_update_pdes(adev, vm, false);
if (r)
return r;
- r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_update, false);
+ r = amdgpu_sync_vm_fence(&p->job->sync, vm->last_update);
if (r)
return r;
@@ -990,6 +909,11 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
if (parser->entity && parser->entity != entity)
return -EINVAL;
+ /* Return if there is no run queue associated with this entity.
+ * Possibly because of disabled HW IP*/
+ if (entity->rq == NULL)
+ return -EINVAL;
+
parser->entity = entity;
ring = to_amdgpu_ring(entity->rq->sched);
@@ -1061,7 +985,7 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
dma_fence_put(old);
}
- r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true);
+ r = amdgpu_sync_fence(&p->job->sync, fence, true);
dma_fence_put(fence);
if (r)
return r;
@@ -1083,7 +1007,7 @@ static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
return r;
}
- r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true);
+ r = amdgpu_sync_fence(&p->job->sync, fence, true);
dma_fence_put(fence);
return r;
@@ -1143,6 +1067,9 @@ static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
num_deps = chunk->length_dw * 4 /
sizeof(struct drm_amdgpu_cs_chunk_sem);
+ if (p->post_deps)
+ return -EINVAL;
+
p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
GFP_KERNEL);
p->num_post_deps = 0;
@@ -1166,8 +1093,7 @@ static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p,
- struct amdgpu_cs_chunk
- *chunk)
+ struct amdgpu_cs_chunk *chunk)
{
struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
unsigned num_deps;
@@ -1177,6 +1103,9 @@ static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p
num_deps = chunk->length_dw * 4 /
sizeof(struct drm_amdgpu_cs_chunk_syncobj);
+ if (p->post_deps)
+ return -EINVAL;
+
p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
GFP_KERNEL);
p->num_post_deps = 0;
@@ -1286,11 +1215,11 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
if (r)
goto error_unlock;
- /* No memory allocation is allowed while holding the mn lock.
- * p->mn is hold until amdgpu_cs_submit is finished and fence is added
- * to BOs.
+ /* No memory allocation is allowed while holding the notifier lock.
+ * The lock is held until amdgpu_cs_submit is finished and fence is
+ * added to BOs.
*/
- amdgpu_mn_lock(p->mn);
+ mutex_lock(&p->adev->notifier_lock);
/* If userptr are invalidated after amdgpu_cs_parser_bos(), return
* -EAGAIN, drmIoctl in libdrm will restart the amdgpu_cs_ioctl.
@@ -1305,7 +1234,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
goto error_abort;
}
- job->owner = p->filp;
p->fence = dma_fence_get(&job->base.s_fence->finished);
amdgpu_ctx_add_fence(p->ctx, entity, p->fence, &seq);
@@ -1333,13 +1261,13 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
- amdgpu_mn_unlock(p->mn);
+ mutex_unlock(&p->adev->notifier_lock);
return 0;
error_abort:
drm_sched_job_cleanup(&job->base);
- amdgpu_mn_unlock(p->mn);
+ mutex_unlock(&p->adev->notifier_lock);
error_unlock:
amdgpu_job_free(job);
@@ -1354,6 +1282,9 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
bool reserved_buffers = false;
int i, r;
+ if (amdgpu_ras_intr_triggered())
+ return -EHWPOISON;
+
if (!adev->accel_working)
return -EBUSY;
@@ -1727,7 +1658,7 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
*map = mapping;
/* Double check that the BO is reserved by this CS */
- if (READ_ONCE((*bo)->tbo.resv->lock.ctx) != &parser->ticket)
+ if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->ticket)
return -EINVAL;
if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
index 35a8d3c96fc9..08047bc4d588 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
@@ -80,7 +80,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
list_add(&csa_tv.head, &list);
amdgpu_vm_get_pd_bo(vm, &list, &pd);
- r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL, false);
+ r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
if (r) {
DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index f539a2a92774..94a6c42f29ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -42,19 +42,12 @@ const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
[AMDGPU_HW_IP_VCN_JPEG] = 1,
};
-static int amdgput_ctx_total_num_entities(void)
-{
- unsigned i, num_entities = 0;
-
- for (i = 0; i < AMDGPU_HW_IP_NUM; ++i)
- num_entities += amdgpu_ctx_num_entities[i];
-
- return num_entities;
-}
-
static int amdgpu_ctx_priority_permit(struct drm_file *filp,
enum drm_sched_priority priority)
{
+ if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
+ return -EINVAL;
+
/* NORMAL and below are accessible by everyone */
if (priority <= DRM_SCHED_PRIORITY_NORMAL)
return 0;
@@ -68,47 +61,94 @@ static int amdgpu_ctx_priority_permit(struct drm_file *filp,
return -EACCES;
}
+static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, const u32 hw_ip, const u32 ring)
+{
+ struct amdgpu_device *adev = ctx->adev;
+ struct amdgpu_ctx_entity *entity;
+ struct drm_gpu_scheduler **scheds = NULL, *sched = NULL;
+ unsigned num_scheds = 0;
+ enum drm_sched_priority priority;
+ int r;
+
+ entity = kcalloc(1, offsetof(typeof(*entity), fences[amdgpu_sched_jobs]),
+ GFP_KERNEL);
+ if (!entity)
+ return -ENOMEM;
+
+ entity->sequence = 1;
+ priority = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
+ ctx->init_priority : ctx->override_priority;
+ switch (hw_ip) {
+ case AMDGPU_HW_IP_GFX:
+ sched = &adev->gfx.gfx_ring[0].sched;
+ scheds = &sched;
+ num_scheds = 1;
+ break;
+ case AMDGPU_HW_IP_COMPUTE:
+ scheds = adev->gfx.compute_sched;
+ num_scheds = adev->gfx.num_compute_sched;
+ break;
+ case AMDGPU_HW_IP_DMA:
+ scheds = adev->sdma.sdma_sched;
+ num_scheds = adev->sdma.num_sdma_sched;
+ break;
+ case AMDGPU_HW_IP_UVD:
+ sched = &adev->uvd.inst[0].ring.sched;
+ scheds = &sched;
+ num_scheds = 1;
+ break;
+ case AMDGPU_HW_IP_VCE:
+ sched = &adev->vce.ring[0].sched;
+ scheds = &sched;
+ num_scheds = 1;
+ break;
+ case AMDGPU_HW_IP_UVD_ENC:
+ sched = &adev->uvd.inst[0].ring_enc[0].sched;
+ scheds = &sched;
+ num_scheds = 1;
+ break;
+ case AMDGPU_HW_IP_VCN_DEC:
+ scheds = adev->vcn.vcn_dec_sched;
+ num_scheds = adev->vcn.num_vcn_dec_sched;
+ break;
+ case AMDGPU_HW_IP_VCN_ENC:
+ scheds = adev->vcn.vcn_enc_sched;
+ num_scheds = adev->vcn.num_vcn_enc_sched;
+ break;
+ case AMDGPU_HW_IP_VCN_JPEG:
+ scheds = adev->jpeg.jpeg_sched;
+ num_scheds = adev->jpeg.num_jpeg_sched;
+ break;
+ }
+
+ r = drm_sched_entity_init(&entity->entity, priority, scheds, num_scheds,
+ &ctx->guilty);
+ if (r)
+ goto error_free_entity;
+
+ ctx->entities[hw_ip][ring] = entity;
+ return 0;
+
+error_free_entity:
+ kfree(entity);
+
+ return r;
+}
+
static int amdgpu_ctx_init(struct amdgpu_device *adev,
enum drm_sched_priority priority,
struct drm_file *filp,
struct amdgpu_ctx *ctx)
{
- unsigned num_entities = amdgput_ctx_total_num_entities();
- unsigned i, j;
int r;
- if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
- return -EINVAL;
-
r = amdgpu_ctx_priority_permit(filp, priority);
if (r)
return r;
memset(ctx, 0, sizeof(*ctx));
- ctx->adev = adev;
-
- ctx->fences = kcalloc(amdgpu_sched_jobs * num_entities,
- sizeof(struct dma_fence*), GFP_KERNEL);
- if (!ctx->fences)
- return -ENOMEM;
-
- ctx->entities[0] = kcalloc(num_entities,
- sizeof(struct amdgpu_ctx_entity),
- GFP_KERNEL);
- if (!ctx->entities[0]) {
- r = -ENOMEM;
- goto error_free_fences;
- }
-
- for (i = 0; i < num_entities; ++i) {
- struct amdgpu_ctx_entity *entity = &ctx->entities[0][i];
- entity->sequence = 1;
- entity->fences = &ctx->fences[amdgpu_sched_jobs * i];
- }
- for (i = 1; i < AMDGPU_HW_IP_NUM; ++i)
- ctx->entities[i] = ctx->entities[i - 1] +
- amdgpu_ctx_num_entities[i - 1];
+ ctx->adev = adev;
kref_init(&ctx->refcount);
spin_lock_init(&ctx->ring_lock);
@@ -120,104 +160,49 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
ctx->init_priority = priority;
ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
- for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
- struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
- struct drm_sched_rq *rqs[AMDGPU_MAX_RINGS];
- unsigned num_rings;
- unsigned num_rqs = 0;
-
- switch (i) {
- case AMDGPU_HW_IP_GFX:
- rings[0] = &adev->gfx.gfx_ring[0];
- num_rings = 1;
- break;
- case AMDGPU_HW_IP_COMPUTE:
- for (j = 0; j < adev->gfx.num_compute_rings; ++j)
- rings[j] = &adev->gfx.compute_ring[j];
- num_rings = adev->gfx.num_compute_rings;
- break;
- case AMDGPU_HW_IP_DMA:
- for (j = 0; j < adev->sdma.num_instances; ++j)
- rings[j] = &adev->sdma.instance[j].ring;
- num_rings = adev->sdma.num_instances;
- break;
- case AMDGPU_HW_IP_UVD:
- rings[0] = &adev->uvd.inst[0].ring;
- num_rings = 1;
- break;
- case AMDGPU_HW_IP_VCE:
- rings[0] = &adev->vce.ring[0];
- num_rings = 1;
- break;
- case AMDGPU_HW_IP_UVD_ENC:
- rings[0] = &adev->uvd.inst[0].ring_enc[0];
- num_rings = 1;
- break;
- case AMDGPU_HW_IP_VCN_DEC:
- rings[0] = &adev->vcn.ring_dec;
- num_rings = 1;
- break;
- case AMDGPU_HW_IP_VCN_ENC:
- rings[0] = &adev->vcn.ring_enc[0];
- num_rings = 1;
- break;
- case AMDGPU_HW_IP_VCN_JPEG:
- rings[0] = &adev->vcn.ring_jpeg;
- num_rings = 1;
- break;
- }
+ return 0;
- for (j = 0; j < num_rings; ++j) {
- if (!rings[j]->adev)
- continue;
+}
- rqs[num_rqs++] = &rings[j]->sched.sched_rq[priority];
- }
+static void amdgpu_ctx_fini_entity(struct amdgpu_ctx_entity *entity)
+{
- for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j)
- r = drm_sched_entity_init(&ctx->entities[i][j].entity,
- rqs, num_rqs, &ctx->guilty);
- if (r)
- goto error_cleanup_entities;
- }
+ int i;
- return 0;
+ if (!entity)
+ return;
-error_cleanup_entities:
- for (i = 0; i < num_entities; ++i)
- drm_sched_entity_destroy(&ctx->entities[0][i].entity);
- kfree(ctx->entities[0]);
+ for (i = 0; i < amdgpu_sched_jobs; ++i)
+ dma_fence_put(entity->fences[i]);
-error_free_fences:
- kfree(ctx->fences);
- ctx->fences = NULL;
- return r;
+ kfree(entity);
}
static void amdgpu_ctx_fini(struct kref *ref)
{
struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount);
- unsigned num_entities = amdgput_ctx_total_num_entities();
struct amdgpu_device *adev = ctx->adev;
unsigned i, j;
if (!adev)
return;
- for (i = 0; i < num_entities; ++i)
- for (j = 0; j < amdgpu_sched_jobs; ++j)
- dma_fence_put(ctx->entities[0][i].fences[j]);
- kfree(ctx->fences);
- kfree(ctx->entities[0]);
+ for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
+ for (j = 0; j < AMDGPU_MAX_ENTITY_NUM; ++j) {
+ amdgpu_ctx_fini_entity(ctx->entities[i][j]);
+ ctx->entities[i][j] = NULL;
+ }
+ }
mutex_destroy(&ctx->lock);
-
kfree(ctx);
}
int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
u32 ring, struct drm_sched_entity **entity)
{
+ int r;
+
if (hw_ip >= AMDGPU_HW_IP_NUM) {
DRM_ERROR("unknown HW IP type: %d\n", hw_ip);
return -EINVAL;
@@ -234,7 +219,13 @@ int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
return -EINVAL;
}
- *entity = &ctx->entities[hw_ip][ring].entity;
+ if (ctx->entities[hw_ip][ring] == NULL) {
+ r = amdgpu_ctx_init_entity(ctx, hw_ip, ring);
+ if (r)
+ return r;
+ }
+
+ *entity = &ctx->entities[hw_ip][ring]->entity;
return 0;
}
@@ -274,17 +265,17 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
static void amdgpu_ctx_do_release(struct kref *ref)
{
struct amdgpu_ctx *ctx;
- unsigned num_entities;
- u32 i;
+ u32 i, j;
ctx = container_of(ref, struct amdgpu_ctx, refcount);
+ for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
+ for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
+ if (!ctx->entities[i][j])
+ continue;
- num_entities = 0;
- for (i = 0; i < AMDGPU_HW_IP_NUM; i++)
- num_entities += amdgpu_ctx_num_entities[i];
-
- for (i = 0; i < num_entities; i++)
- drm_sched_entity_destroy(&ctx->entities[0][i].entity);
+ drm_sched_entity_destroy(&ctx->entities[i][j]->entity);
+ }
+ }
amdgpu_ctx_fini(ref);
}
@@ -344,7 +335,7 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev,
{
struct amdgpu_ctx *ctx;
struct amdgpu_ctx_mgr *mgr;
- uint32_t ras_counter;
+ unsigned long ras_counter;
if (!fpriv)
return -EINVAL;
@@ -514,19 +505,23 @@ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
enum drm_sched_priority priority)
{
- unsigned num_entities = amdgput_ctx_total_num_entities();
enum drm_sched_priority ctx_prio;
- unsigned i;
+ unsigned i, j;
ctx->override_priority = priority;
ctx_prio = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
ctx->init_priority : ctx->override_priority;
+ for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
+ for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
+ struct drm_sched_entity *entity;
- for (i = 0; i < num_entities; i++) {
- struct drm_sched_entity *entity = &ctx->entities[0][i].entity;
+ if (!ctx->entities[i][j])
+ continue;
- drm_sched_entity_set_priority(entity, ctx_prio);
+ entity = &ctx->entities[i][j]->entity;
+ drm_sched_entity_set_priority(entity, ctx_prio);
+ }
}
}
@@ -534,21 +529,24 @@ int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
struct drm_sched_entity *entity)
{
struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
- unsigned idx = centity->sequence & (amdgpu_sched_jobs - 1);
- struct dma_fence *other = centity->fences[idx];
+ struct dma_fence *other;
+ unsigned idx;
+ long r;
- if (other) {
- signed long r;
- r = dma_fence_wait(other, true);
- if (r < 0) {
- if (r != -ERESTARTSYS)
- DRM_ERROR("Error (%ld) waiting for fence!\n", r);
+ spin_lock(&ctx->ring_lock);
+ idx = centity->sequence & (amdgpu_sched_jobs - 1);
+ other = dma_fence_get(centity->fences[idx]);
+ spin_unlock(&ctx->ring_lock);
- return r;
- }
- }
+ if (!other)
+ return 0;
- return 0;
+ r = dma_fence_wait(other, true);
+ if (r < 0 && r != -ERESTARTSYS)
+ DRM_ERROR("Error (%ld) waiting for fence!\n", r);
+
+ dma_fence_put(other);
+ return r;
}
void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
@@ -559,20 +557,24 @@ void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout)
{
- unsigned num_entities = amdgput_ctx_total_num_entities();
struct amdgpu_ctx *ctx;
struct idr *idp;
- uint32_t id, i;
+ uint32_t id, i, j;
idp = &mgr->ctx_handles;
mutex_lock(&mgr->lock);
idr_for_each_entry(idp, ctx, id) {
- for (i = 0; i < num_entities; i++) {
- struct drm_sched_entity *entity;
+ for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
+ for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
+ struct drm_sched_entity *entity;
+
+ if (!ctx->entities[i][j])
+ continue;
- entity = &ctx->entities[0][i].entity;
- timeout = drm_sched_entity_flush(entity, timeout);
+ entity = &ctx->entities[i][j]->entity;
+ timeout = drm_sched_entity_flush(entity, timeout);
+ }
}
}
mutex_unlock(&mgr->lock);
@@ -581,10 +583,9 @@ long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout)
void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
{
- unsigned num_entities = amdgput_ctx_total_num_entities();
struct amdgpu_ctx *ctx;
struct idr *idp;
- uint32_t id, i;
+ uint32_t id, i, j;
idp = &mgr->ctx_handles;
@@ -594,8 +595,17 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
continue;
}
- for (i = 0; i < num_entities; i++)
- drm_sched_entity_fini(&ctx->entities[0][i].entity);
+ for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
+ for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
+ struct drm_sched_entity *entity;
+
+ if (!ctx->entities[i][j])
+ continue;
+
+ entity = &ctx->entities[i][j]->entity;
+ drm_sched_entity_fini(entity);
+ }
+ }
}
}
@@ -617,3 +627,45 @@ void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
idr_destroy(&mgr->ctx_handles);
mutex_destroy(&mgr->lock);
}
+
+void amdgpu_ctx_init_sched(struct amdgpu_device *adev)
+{
+ int i, j;
+
+ for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
+ adev->gfx.gfx_sched[i] = &adev->gfx.gfx_ring[i].sched;
+ adev->gfx.num_gfx_sched++;
+ }
+
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ adev->gfx.compute_sched[i] = &adev->gfx.compute_ring[i].sched;
+ adev->gfx.num_compute_sched++;
+ }
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ adev->sdma.sdma_sched[i] = &adev->sdma.instance[i].ring.sched;
+ adev->sdma.num_sdma_sched++;
+ }
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+ adev->vcn.vcn_dec_sched[adev->vcn.num_vcn_dec_sched++] =
+ &adev->vcn.inst[i].ring_dec.sched;
+ }
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+ for (j = 0; j < adev->vcn.num_enc_rings; ++j)
+ adev->vcn.vcn_enc_sched[adev->vcn.num_vcn_enc_sched++] =
+ &adev->vcn.inst[i].ring_enc[j].sched;
+ }
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ if (adev->jpeg.harvest_config & (1 << i))
+ continue;
+ adev->jpeg.jpeg_sched[adev->jpeg.num_jpeg_sched++] =
+ &adev->jpeg.inst[i].ring_dec.sched;
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
index 5f1b54c9bcdb..de490f183af2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
@@ -29,10 +29,12 @@ struct drm_device;
struct drm_file;
struct amdgpu_fpriv;
+#define AMDGPU_MAX_ENTITY_NUM 4
+
struct amdgpu_ctx_entity {
uint64_t sequence;
- struct dma_fence **fences;
struct drm_sched_entity entity;
+ struct dma_fence *fences[];
};
struct amdgpu_ctx {
@@ -42,15 +44,14 @@ struct amdgpu_ctx {
unsigned reset_counter_query;
uint32_t vram_lost_counter;
spinlock_t ring_lock;
- struct dma_fence **fences;
- struct amdgpu_ctx_entity *entities[AMDGPU_HW_IP_NUM];
+ struct amdgpu_ctx_entity *entities[AMDGPU_HW_IP_NUM][AMDGPU_MAX_ENTITY_NUM];
bool preamble_presented;
enum drm_sched_priority init_priority;
enum drm_sched_priority override_priority;
struct mutex lock;
atomic_t guilty;
- uint32_t ras_counter_ce;
- uint32_t ras_counter_ue;
+ unsigned long ras_counter_ce;
+ unsigned long ras_counter_ue;
};
struct amdgpu_ctx_mgr {
@@ -87,4 +88,7 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr);
long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout);
void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
+void amdgpu_ctx_init_sched(struct amdgpu_device *adev);
+
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 5652cc72ed3a..f24ed9a1a3e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -26,6 +26,7 @@
#include <linux/kthread.h>
#include <linux/pci.h>
#include <linux/uaccess.h>
+#include <linux/pm_runtime.h>
#include <drm/drm_debugfs.h>
@@ -129,7 +130,7 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f,
sh_bank = 0xFFFFFFFF;
if (instance_bank == 0x3FF)
instance_bank = 0xFFFFFFFF;
- use_bank = 1;
+ use_bank = true;
} else if (*pos & (1ULL << 61)) {
me = (*pos & GENMASK_ULL(33, 24)) >> 24;
@@ -137,17 +138,24 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f,
queue = (*pos & GENMASK_ULL(53, 44)) >> 44;
vmid = (*pos & GENMASK_ULL(58, 54)) >> 54;
- use_ring = 1;
+ use_ring = true;
} else {
- use_bank = use_ring = 0;
+ use_bank = use_ring = false;
}
*pos &= (1UL << 22) - 1;
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
+
if (use_bank) {
if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
- (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
+ (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) {
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return -EINVAL;
+ }
mutex_lock(&adev->grbm_idx_mutex);
amdgpu_gfx_select_se_sh(adev, se_bank,
sh_bank, instance_bank);
@@ -193,6 +201,9 @@ end:
if (pm_pg_lock)
mutex_unlock(&adev->pm.mutex);
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
return result;
}
@@ -237,13 +248,20 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
+
while (size) {
uint32_t value;
value = RREG32_PCIE(*pos >> 2);
r = put_user(value, (uint32_t *)buf);
- if (r)
+ if (r) {
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return r;
+ }
result += 4;
buf += 4;
@@ -251,6 +269,9 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
size -= 4;
}
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
return result;
}
@@ -276,12 +297,19 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
+
while (size) {
uint32_t value;
r = get_user(value, (uint32_t *)buf);
- if (r)
+ if (r) {
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return r;
+ }
WREG32_PCIE(*pos >> 2, value);
@@ -291,6 +319,9 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
size -= 4;
}
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
return result;
}
@@ -316,13 +347,20 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
+
while (size) {
uint32_t value;
value = RREG32_DIDT(*pos >> 2);
r = put_user(value, (uint32_t *)buf);
- if (r)
+ if (r) {
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return r;
+ }
result += 4;
buf += 4;
@@ -330,6 +368,9 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
size -= 4;
}
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
return result;
}
@@ -355,12 +396,19 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
+
while (size) {
uint32_t value;
r = get_user(value, (uint32_t *)buf);
- if (r)
+ if (r) {
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return r;
+ }
WREG32_DIDT(*pos >> 2, value);
@@ -370,6 +418,9 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user
size -= 4;
}
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
return result;
}
@@ -395,13 +446,20 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
+
while (size) {
uint32_t value;
value = RREG32_SMC(*pos);
r = put_user(value, (uint32_t *)buf);
- if (r)
+ if (r) {
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return r;
+ }
result += 4;
buf += 4;
@@ -409,6 +467,9 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
size -= 4;
}
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
return result;
}
@@ -434,12 +495,19 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
+
while (size) {
uint32_t value;
r = get_user(value, (uint32_t *)buf);
- if (r)
+ if (r) {
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return r;
+ }
WREG32_SMC(*pos, value);
@@ -449,6 +517,9 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
size -= 4;
}
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
return result;
}
@@ -572,7 +643,16 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
idx = *pos >> 2;
valuesize = sizeof(values);
+
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
+
r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
+
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
if (r)
return r;
@@ -633,6 +713,10 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
wave = (*pos & GENMASK_ULL(36, 31)) >> 31;
simd = (*pos & GENMASK_ULL(44, 37)) >> 37;
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
+
/* switch to the specific se/sh/cu */
mutex_lock(&adev->grbm_idx_mutex);
amdgpu_gfx_select_se_sh(adev, se, sh, cu);
@@ -644,6 +728,9 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
mutex_unlock(&adev->grbm_idx_mutex);
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
if (!x)
return -EINVAL;
@@ -711,6 +798,10 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
if (!data)
return -ENOMEM;
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
+
/* switch to the specific se/sh/cu */
mutex_lock(&adev->grbm_idx_mutex);
amdgpu_gfx_select_se_sh(adev, se, sh, cu);
@@ -726,6 +817,9 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
mutex_unlock(&adev->grbm_idx_mutex);
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
while (size) {
uint32_t value;
@@ -859,6 +953,13 @@ static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
struct amdgpu_device *adev = dev->dev_private;
int r = 0, i;
+ r = pm_runtime_get_sync(dev->dev);
+ if (r < 0)
+ return r;
+
+ /* Avoid accidently unparking the sched thread during GPU reset */
+ mutex_lock(&adev->lock_reset);
+
/* hold on the scheduler */
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
struct amdgpu_ring *ring = adev->rings[i];
@@ -884,6 +985,11 @@ static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
kthread_unpark(ring->sched.thread);
}
+ mutex_unlock(&adev->lock_reset);
+
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
+
return 0;
}
@@ -902,8 +1008,17 @@ static int amdgpu_debugfs_evict_vram(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *dev = node->minor->dev;
struct amdgpu_device *adev = dev->dev_private;
+ int r;
+
+ r = pm_runtime_get_sync(dev->dev);
+ if (r < 0)
+ return r;
seq_printf(m, "(%d)\n", amdgpu_bo_evict_vram(adev));
+
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
+
return 0;
}
@@ -912,8 +1027,17 @@ static int amdgpu_debugfs_evict_gtt(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *dev = node->minor->dev;
struct amdgpu_device *adev = dev->dev_private;
+ int r;
+
+ r = pm_runtime_get_sync(dev->dev);
+ if (r < 0)
+ return r;
seq_printf(m, "(%d)\n", ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_TT));
+
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
+
return 0;
}
@@ -1036,6 +1160,9 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
if (!fences)
return -ENOMEM;
+ /* Avoid accidently unparking the sched thread during GPU reset */
+ mutex_lock(&adev->lock_reset);
+
/* stop the scheduler */
kthread_park(ring->sched.thread);
@@ -1075,10 +1202,11 @@ failure:
/* restart the scheduler */
kthread_unpark(ring->sched.thread);
+ mutex_unlock(&adev->lock_reset);
+
ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
- if (fences)
- kfree(fences);
+ kfree(fences);
return 0;
}
@@ -1090,8 +1218,8 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
{
adev->debugfs_preempt =
debugfs_create_file("amdgpu_preempt_ib", 0600,
- adev->ddev->primary->debugfs_root,
- (void *)adev, &fops_ib_preempt);
+ adev->ddev->primary->debugfs_root, adev,
+ &fops_ib_preempt);
if (!(adev->debugfs_preempt)) {
DRM_ERROR("unable to create amdgpu_preempt_ib debugsfs file\n");
return -EIO;
@@ -1103,8 +1231,7 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
void amdgpu_debugfs_preempt_cleanup(struct amdgpu_device *adev)
{
- if (adev->debugfs_preempt)
- debugfs_remove(adev->debugfs_preempt);
+ debugfs_remove(adev->debugfs_preempt);
}
#else
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 5a7f893cf724..39cd545976b7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -65,16 +65,23 @@
#include "amdgpu_ras.h"
#include "amdgpu_pmu.h"
+#include <linux/suspend.h>
+#include <drm/task_barrier.h>
+
MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
+MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
+MODULE_FIRMWARE("amdgpu/renoir_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
+MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
+MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
#define AMDGPU_RESUME_MS 2000
-static const char *amdgpu_asic_name[] = {
+const char *amdgpu_asic_name[] = {
"TAHITI",
"PITCAIRN",
"VERDE",
@@ -98,7 +105,11 @@ static const char *amdgpu_asic_name[] = {
"VEGA12",
"VEGA20",
"RAVEN",
+ "ARCTURUS",
+ "RENOIR",
"NAVI10",
+ "NAVI14",
+ "NAVI12",
"LAST",
};
@@ -127,14 +138,14 @@ static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
/**
- * amdgpu_device_is_px - Is the device is a dGPU with HG/PX power control
+ * amdgpu_device_supports_boco - Is the device a dGPU with HG/PX power control
*
* @dev: drm_device pointer
*
* Returns true if the device is a dGPU with HG/PX power control,
* otherwise return false.
*/
-bool amdgpu_device_is_px(struct drm_device *dev)
+bool amdgpu_device_supports_boco(struct drm_device *dev)
{
struct amdgpu_device *adev = dev->dev_private;
@@ -143,6 +154,51 @@ bool amdgpu_device_is_px(struct drm_device *dev)
return false;
}
+/**
+ * amdgpu_device_supports_baco - Does the device support BACO
+ *
+ * @dev: drm_device pointer
+ *
+ * Returns true if the device supporte BACO,
+ * otherwise return false.
+ */
+bool amdgpu_device_supports_baco(struct drm_device *dev)
+{
+ struct amdgpu_device *adev = dev->dev_private;
+
+ return amdgpu_asic_supports_baco(adev);
+}
+
+/**
+ * VRAM access helper functions.
+ *
+ * amdgpu_device_vram_access - read/write a buffer in vram
+ *
+ * @adev: amdgpu_device pointer
+ * @pos: offset of the buffer in vram
+ * @buf: virtual address of the buffer in system memory
+ * @size: read/write size, sizeof(@buf) must > @size
+ * @write: true - write to vram, otherwise - read from vram
+ */
+void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
+ uint32_t *buf, size_t size, bool write)
+{
+ uint64_t last;
+ unsigned long flags;
+
+ last = size - 4;
+ for (last += pos; pos <= last; pos += 4) {
+ spin_lock_irqsave(&adev->mmio_idx_lock, flags);
+ WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
+ WREG32_NO_KIQ(mmMM_INDEX_HI, pos >> 31);
+ if (write)
+ WREG32_NO_KIQ(mmMM_DATA, *buf++);
+ else
+ *buf++ = RREG32_NO_KIQ(mmMM_DATA);
+ spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
+ }
+}
+
/*
* MMIO register access helper functions.
*/
@@ -160,8 +216,8 @@ uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
{
uint32_t ret;
- if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
- return amdgpu_virt_kiq_rreg(adev, reg);
+ if ((acc_flags & AMDGPU_REGS_KIQ) || (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)))
+ return amdgpu_kiq_rreg(adev, reg);
if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
@@ -238,8 +294,8 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
adev->last_mm_index = v;
}
- if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
- return amdgpu_virt_kiq_wreg(adev, reg, v);
+ if ((acc_flags & AMDGPU_REGS_KIQ) || (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)))
+ return amdgpu_kiq_wreg(adev, reg, v);
if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
@@ -413,6 +469,40 @@ static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32
}
/**
+ * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
+ *
+ * @adev: amdgpu device pointer
+ * @reg: offset of register
+ *
+ * Dummy register read function. Used for register blocks
+ * that certain asics don't have (all asics).
+ * Returns the value in the register.
+ */
+static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
+{
+ DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
+ BUG();
+ return 0;
+}
+
+/**
+ * amdgpu_invalid_wreg64 - dummy reg write function
+ *
+ * @adev: amdgpu device pointer
+ * @reg: offset of register
+ * @v: value to write to the register
+ *
+ * Dummy register read function. Used for register blocks
+ * that certain asics don't have (all asics).
+ */
+static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
+{
+ DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
+ reg, v);
+ BUG();
+}
+
+/**
* amdgpu_block_invalid_rreg - dummy reg read function
*
* @adev: amdgpu device pointer
@@ -895,7 +985,7 @@ static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
{
struct sysinfo si;
- bool is_os_64 = (sizeof(void *) == 8) ? true : false;
+ bool is_os_64 = (sizeof(void *) == 8);
uint64_t total_memory;
uint64_t dram_size_seven_GB = 0x1B8000000;
uint64_t dram_size_three_GB = 0xB8000000;
@@ -942,8 +1032,6 @@ def_value:
*/
static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
{
- int ret = 0;
-
if (amdgpu_sched_jobs < 4) {
dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
amdgpu_sched_jobs);
@@ -981,15 +1069,9 @@ static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
amdgpu_device_check_block_size(adev);
- ret = amdgpu_device_get_job_timeout_settings(adev);
- if (ret) {
- dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
- return ret;
- }
-
adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
- return ret;
+ return 0;
}
/**
@@ -1004,8 +1086,9 @@ static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
{
struct drm_device *dev = pci_get_drvdata(pdev);
+ int r;
- if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
+ if (amdgpu_device_supports_boco(dev) && state == VGA_SWITCHEROO_OFF)
return;
if (state == VGA_SWITCHEROO_ON) {
@@ -1013,7 +1096,12 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
/* don't suspend or resume card normally */
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
- amdgpu_device_resume(dev, true, true);
+ pci_set_power_state(dev->pdev, PCI_D0);
+ pci_restore_state(dev->pdev);
+ r = pci_enable_device(dev->pdev);
+ if (r)
+ DRM_WARN("pci_enable_device failed (%d)\n", r);
+ amdgpu_device_resume(dev, true);
dev->switch_power_state = DRM_SWITCH_POWER_ON;
drm_kms_helper_poll_enable(dev);
@@ -1021,7 +1109,11 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
pr_info("amdgpu: switched off\n");
drm_kms_helper_poll_disable(dev);
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
- amdgpu_device_suspend(dev, true, true);
+ amdgpu_device_suspend(dev, true);
+ pci_save_state(dev->pdev);
+ /* Shut down the device */
+ pci_disable_device(dev->pdev);
+ pci_set_power_state(dev->pdev, PCI_D3cold);
dev->switch_power_state = DRM_SWITCH_POWER_OFF;
}
}
@@ -1384,9 +1476,21 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
else
chip_name = "raven";
break;
+ case CHIP_ARCTURUS:
+ chip_name = "arcturus";
+ break;
+ case CHIP_RENOIR:
+ chip_name = "renoir";
+ break;
case CHIP_NAVI10:
chip_name = "navi10";
break;
+ case CHIP_NAVI14:
+ chip_name = "navi14";
+ break;
+ case CHIP_NAVI12:
+ chip_name = "navi12";
+ break;
}
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
@@ -1415,6 +1519,9 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
(const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10)
+ goto parse_soc_bounding_box;
+
adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
@@ -1442,14 +1549,18 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
adev->gfx.config.num_packer_per_sc =
le32_to_cpu(gpu_info_fw->num_packer_per_sc);
}
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
+
+parse_soc_bounding_box:
+ /*
+ * soc bounding box info is not integrated in disocovery table,
+ * we always need to parse it from gpu info firmware.
+ */
if (hdr->version_minor == 2) {
const struct gpu_info_firmware_v1_2 *gpu_info_fw =
(const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
}
-#endif
break;
}
default:
@@ -1529,7 +1640,10 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
- if (adev->asic_type == CHIP_RAVEN)
+ case CHIP_ARCTURUS:
+ case CHIP_RENOIR:
+ if (adev->asic_type == CHIP_RAVEN ||
+ adev->asic_type == CHIP_RENOIR)
adev->family = AMDGPU_FAMILY_RV;
else
adev->family = AMDGPU_FAMILY_AI;
@@ -1539,6 +1653,8 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
return r;
break;
case CHIP_NAVI10:
+ case CHIP_NAVI14:
+ case CHIP_NAVI12:
adev->family = AMDGPU_FAMILY_NV;
r = nv_set_ip_blocks(adev);
@@ -1554,19 +1670,19 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
if (r)
return r;
+ if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10)
+ amdgpu_discovery_get_gfx_info(adev);
+
amdgpu_amdkfd_device_probe(adev);
if (amdgpu_sriov_vf(adev)) {
r = amdgpu_virt_request_full_gpu(adev, true);
if (r)
return -EAGAIN;
-
- /* query the reg access mode at the very beginning */
- amdgpu_virt_init_reg_access_mode(adev);
}
adev->pm.pp_feature = amdgpu_pp_feature_mask;
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
for (i = 0; i < adev->num_ip_blocks; i++) {
@@ -1665,29 +1781,36 @@ static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
if (adev->asic_type >= CHIP_VEGA10) {
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
- if (adev->in_gpu_reset || adev->in_suspend) {
- if (amdgpu_sriov_vf(adev) && adev->in_gpu_reset)
- break; /* sriov gpu reset, psp need to do hw_init before IH because of hw limit */
- r = adev->ip_blocks[i].version->funcs->resume(adev);
- if (r) {
- DRM_ERROR("resume of IP block <%s> failed %d\n",
+ if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
+ continue;
+
+ /* no need to do the fw loading again if already done*/
+ if (adev->ip_blocks[i].status.hw == true)
+ break;
+
+ if (adev->in_gpu_reset || adev->in_suspend) {
+ r = adev->ip_blocks[i].version->funcs->resume(adev);
+ if (r) {
+ DRM_ERROR("resume of IP block <%s> failed %d\n",
adev->ip_blocks[i].version->funcs->name, r);
- return r;
- }
- } else {
- r = adev->ip_blocks[i].version->funcs->hw_init(adev);
- if (r) {
- DRM_ERROR("hw_init of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
- return r;
- }
+ return r;
+ }
+ } else {
+ r = adev->ip_blocks[i].version->funcs->hw_init(adev);
+ if (r) {
+ DRM_ERROR("hw_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
+ return r;
}
- adev->ip_blocks[i].status.hw = true;
}
+
+ adev->ip_blocks[i].status.hw = true;
+ break;
}
}
- r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
+
+ if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
+ r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
return r;
}
@@ -1754,6 +1877,9 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
}
}
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_init_data_exchange(adev);
+
r = amdgpu_ib_pool_init(adev);
if (r) {
dev_err(adev->dev, "IB initialization failed (%d).\n", r);
@@ -1777,16 +1903,26 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
if (r)
goto init_failed;
+ /*
+ * retired pages will be loaded from eeprom and reserved here,
+ * it should be called after amdgpu_device_ip_hw_init_phase2 since
+ * for some ASICs the RAS EEPROM code relies on SMU fully functioning
+ * for I2C communication which only true at this point.
+ * recovery_init may fail, but it can free all resources allocated by
+ * itself and its failure should not stop amdgpu init process.
+ *
+ * Note: theoretically, this should be called before all vram allocations
+ * to protect retired page from abusing
+ */
+ amdgpu_ras_recovery_init(adev);
+
if (adev->gmc.xgmi.num_physical_nodes > 1)
amdgpu_xgmi_add_device(adev);
amdgpu_amdkfd_device_init(adev);
init_failed:
- if (amdgpu_sriov_vf(adev)) {
- if (!r)
- amdgpu_virt_init_data_exchange(adev);
+ if (amdgpu_sriov_vf(adev))
amdgpu_virt_release_full_gpu(adev, true);
- }
return r;
}
@@ -1825,6 +1961,7 @@ static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
* amdgpu_device_set_cg_state - set clockgating for amdgpu device
*
* @adev: amdgpu_device pointer
+ * @state: clockgating state (gate or ungate)
*
* The list of all the hardware IPs that make up the asic is walked and the
* set_clockgating_state callbacks are run.
@@ -1849,6 +1986,7 @@ static int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
+ adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
adev->ip_blocks[i].version->funcs->set_clockgating_state) {
/* enable clockgating to save power */
r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
@@ -1879,6 +2017,7 @@ static int amdgpu_device_set_pg_state(struct amdgpu_device *adev, enum amd_power
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
+ adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
adev->ip_blocks[i].version->funcs->set_powergating_state) {
/* enable powergating to save power */
r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
@@ -1944,6 +2083,7 @@ out:
*/
static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
{
+ struct amdgpu_gpu_instance *gpu_instance;
int i = 0, r;
for (i = 0; i < adev->num_ip_blocks; i++) {
@@ -1969,8 +2109,39 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
if (r)
DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
- /* set to low pstate by default */
- amdgpu_xgmi_set_pstate(adev, 0);
+
+ if (adev->gmc.xgmi.num_physical_nodes > 1) {
+ mutex_lock(&mgpu_info.mutex);
+
+ /*
+ * Reset device p-state to low as this was booted with high.
+ *
+ * This should be performed only after all devices from the same
+ * hive get initialized.
+ *
+ * However, it's unknown how many device in the hive in advance.
+ * As this is counted one by one during devices initializations.
+ *
+ * So, we wait for all XGMI interlinked devices initialized.
+ * This may bring some delays as those devices may come from
+ * different hives. But that should be OK.
+ */
+ if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
+ for (i = 0; i < mgpu_info.num_gpu; i++) {
+ gpu_instance = &(mgpu_info.gpu_ins[i]);
+ if (gpu_instance->adev->flags & AMD_IS_APU)
+ continue;
+
+ r = amdgpu_xgmi_set_pstate(gpu_instance->adev, 0);
+ if (r) {
+ DRM_ERROR("pstate setting failed (%d).\n", r);
+ break;
+ }
+ }
+ }
+
+ mutex_unlock(&mgpu_info.mutex);
+ }
return 0;
}
@@ -2128,7 +2299,9 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
if (r) {
DRM_ERROR("suspend of IP block <%s> failed %d\n",
adev->ip_blocks[i].version->funcs->name, r);
+ return r;
}
+ adev->ip_blocks[i].status.hw = false;
}
}
@@ -2156,6 +2329,12 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
/* displays are handled in phase1 */
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
continue;
+ /* PSP lost connection when err_event_athub occurs */
+ if (amdgpu_ras_intr_triggered() &&
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
+ adev->ip_blocks[i].status.hw = false;
+ continue;
+ }
/* XXX handle errors */
r = adev->ip_blocks[i].version->funcs->suspend(adev);
/* XXX handle errors */
@@ -2163,6 +2342,18 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
DRM_ERROR("suspend of IP block <%s> failed %d\n",
adev->ip_blocks[i].version->funcs->name, r);
}
+ adev->ip_blocks[i].status.hw = false;
+ /* handle putting the SMC in the appropriate state */
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
+ r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
+ if (r) {
+ DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
+ adev->mp1_state, r);
+ return r;
+ }
+ }
+
+ adev->ip_blocks[i].status.hw = false;
}
return 0;
@@ -2215,6 +2406,7 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
for (j = 0; j < adev->num_ip_blocks; j++) {
block = &adev->ip_blocks[j];
+ block->status.hw = false;
if (block->version->type != ip_order[i] ||
!block->status.valid)
continue;
@@ -2223,6 +2415,7 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
if (r)
return r;
+ block->status.hw = true;
}
}
@@ -2239,7 +2432,8 @@ static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
AMD_IP_BLOCK_TYPE_GFX,
AMD_IP_BLOCK_TYPE_SDMA,
AMD_IP_BLOCK_TYPE_UVD,
- AMD_IP_BLOCK_TYPE_VCE
+ AMD_IP_BLOCK_TYPE_VCE,
+ AMD_IP_BLOCK_TYPE_VCN
};
for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
@@ -2250,13 +2444,19 @@ static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
block = &adev->ip_blocks[j];
if (block->version->type != ip_order[i] ||
- !block->status.valid)
+ !block->status.valid ||
+ block->status.hw)
continue;
- r = block->version->funcs->hw_init(adev);
+ if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
+ r = block->version->funcs->resume(adev);
+ else
+ r = block->version->funcs->hw_init(adev);
+
DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
if (r)
return r;
+ block->status.hw = true;
}
}
@@ -2280,17 +2480,19 @@ static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
int i, r;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_blocks[i].status.valid)
+ if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
continue;
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
+
r = adev->ip_blocks[i].version->funcs->resume(adev);
if (r) {
DRM_ERROR("resume of IP block <%s> failed %d\n",
adev->ip_blocks[i].version->funcs->name, r);
return r;
}
+ adev->ip_blocks[i].status.hw = true;
}
}
@@ -2315,7 +2517,7 @@ static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
int i, r;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_blocks[i].status.valid)
+ if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
continue;
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
@@ -2328,6 +2530,7 @@ static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
adev->ip_blocks[i].version->funcs->name, r);
return r;
}
+ adev->ip_blocks[i].status.hw = true;
}
return 0;
@@ -2421,15 +2624,19 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
case CHIP_VEGA10:
case CHIP_VEGA12:
case CHIP_VEGA20:
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
case CHIP_RAVEN:
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
case CHIP_NAVI10:
+ case CHIP_NAVI14:
+ case CHIP_NAVI12:
+ case CHIP_RENOIR:
#endif
return amdgpu_dc != 0;
#endif
default:
+ if (amdgpu_dc > 0)
+ DRM_INFO("Display Core has been requested via kernel parameter "
+ "but isn't supported by ASIC, ignoring\n");
return false;
}
}
@@ -2454,13 +2661,110 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
{
struct amdgpu_device *adev =
container_of(__work, struct amdgpu_device, xgmi_reset_work);
+ struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0);
+
+ /* It's a bug to not have a hive within this function */
+ if (WARN_ON(!hive))
+ return;
+
+ /*
+ * Use task barrier to synchronize all xgmi reset works across the
+ * hive. task_barrier_enter and task_barrier_exit will block
+ * until all the threads running the xgmi reset works reach
+ * those points. task_barrier_full will do both blocks.
+ */
+ if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
+
+ task_barrier_enter(&hive->tb);
+ adev->asic_reset_res = amdgpu_device_baco_enter(adev->ddev);
+
+ if (adev->asic_reset_res)
+ goto fail;
- adev->asic_reset_res = amdgpu_asic_reset(adev);
+ task_barrier_exit(&hive->tb);
+ adev->asic_reset_res = amdgpu_device_baco_exit(adev->ddev);
+
+ if (adev->asic_reset_res)
+ goto fail;
+ } else {
+
+ task_barrier_full(&hive->tb);
+ adev->asic_reset_res = amdgpu_asic_reset(adev);
+ }
+
+fail:
if (adev->asic_reset_res)
DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
adev->asic_reset_res, adev->ddev->unique);
}
+static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
+{
+ char *input = amdgpu_lockup_timeout;
+ char *timeout_setting = NULL;
+ int index = 0;
+ long timeout;
+ int ret = 0;
+
+ /*
+ * By default timeout for non compute jobs is 10000.
+ * And there is no timeout enforced on compute jobs.
+ * In SR-IOV or passthrough mode, timeout for compute
+ * jobs are 10000 by default.
+ */
+ adev->gfx_timeout = msecs_to_jiffies(10000);
+ adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
+ if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
+ adev->compute_timeout = adev->gfx_timeout;
+ else
+ adev->compute_timeout = MAX_SCHEDULE_TIMEOUT;
+
+ if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
+ while ((timeout_setting = strsep(&input, ",")) &&
+ strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
+ ret = kstrtol(timeout_setting, 0, &timeout);
+ if (ret)
+ return ret;
+
+ if (timeout == 0) {
+ index++;
+ continue;
+ } else if (timeout < 0) {
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ } else {
+ timeout = msecs_to_jiffies(timeout);
+ }
+
+ switch (index++) {
+ case 0:
+ adev->gfx_timeout = timeout;
+ break;
+ case 1:
+ adev->compute_timeout = timeout;
+ break;
+ case 2:
+ adev->sdma_timeout = timeout;
+ break;
+ case 3:
+ adev->video_timeout = timeout;
+ break;
+ default:
+ break;
+ }
+ }
+ /*
+ * There is only one value specified and
+ * it should apply to all non-compute jobs.
+ */
+ if (index == 1) {
+ adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
+ if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
+ adev->compute_timeout = adev->gfx_timeout;
+ }
+ }
+
+ return ret;
+}
/**
* amdgpu_device_init - initialize the driver
@@ -2480,7 +2784,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
uint32_t flags)
{
int r, i;
- bool runtime = false;
+ bool boco = false;
u32 max_MBps;
adev->shutdown = false;
@@ -2488,7 +2792,12 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->ddev = ddev;
adev->pdev = pdev;
adev->flags = flags;
- adev->asic_type = flags & AMD_ASIC_MASK;
+
+ if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
+ adev->asic_type = amdgpu_force_asic_type;
+ else
+ adev->asic_type = flags & AMD_ASIC_MASK;
+
adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
if (amdgpu_emu_mode == 1)
adev->usec_timeout *= 2;
@@ -2498,7 +2807,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->mman.buffer_funcs = NULL;
adev->mman.buffer_funcs_ring = NULL;
adev->vm_manager.vm_pte_funcs = NULL;
- adev->vm_manager.vm_pte_num_rqs = 0;
+ adev->vm_manager.vm_pte_num_scheds = 0;
adev->gmc.gmc_funcs = NULL;
adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
@@ -2509,6 +2818,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->pcie_wreg = &amdgpu_invalid_wreg;
adev->pciep_rreg = &amdgpu_invalid_rreg;
adev->pciep_wreg = &amdgpu_invalid_wreg;
+ adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
+ adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
adev->didt_rreg = &amdgpu_invalid_rreg;
@@ -2536,8 +2847,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
mutex_init(&adev->virt.vf_errors.lock);
hash_init(adev->mn_hash);
mutex_init(&adev->lock_reset);
- mutex_init(&adev->virt.dpm_mutex);
mutex_init(&adev->psp.mutex);
+ mutex_init(&adev->notifier_lock);
r = amdgpu_device_check_arguments(adev);
if (r)
@@ -2629,6 +2940,12 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (r)
return r;
+ r = amdgpu_device_get_job_timeout_settings(adev);
+ if (r) {
+ dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
+ return r;
+ }
+
/* doorbell bar mapping and doorbell index init*/
amdgpu_device_doorbell_init(adev);
@@ -2637,12 +2954,15 @@ int amdgpu_device_init(struct amdgpu_device *adev,
* ignore it */
vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
- if (amdgpu_device_is_px(ddev))
- runtime = true;
- if (!pci_is_thunderbolt_attached(adev->pdev))
+ if (amdgpu_device_supports_boco(ddev))
+ boco = true;
+ if (amdgpu_has_atpx() &&
+ (amdgpu_is_atpx_hybrid() ||
+ amdgpu_has_atpx_dgpu_power_cntl()) &&
+ !pci_is_thunderbolt_attached(adev->pdev))
vga_switcheroo_register_client(adev->pdev,
- &amdgpu_switcheroo_ops, runtime);
- if (runtime)
+ &amdgpu_switcheroo_ops, boco);
+ if (boco)
vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
if (amdgpu_emu_mode == 1) {
@@ -2729,11 +3049,17 @@ fence_driver_init:
}
dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
- if (amdgpu_virt_request_full_gpu(adev, false))
- amdgpu_virt_release_full_gpu(adev, false);
goto failed;
}
+ DRM_DEBUG("SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
+ adev->gfx.config.max_shader_engines,
+ adev->gfx.config.max_sh_per_se,
+ adev->gfx.config.max_cu_per_sh,
+ adev->gfx.cu_info.number);
+
+ amdgpu_ctx_init_sched(adev);
+
adev->accel_working = true;
amdgpu_vm_check_compute_bug(adev);
@@ -2748,16 +3074,19 @@ fence_driver_init:
amdgpu_fbdev_init(adev);
- if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev))
- amdgpu_pm_virt_sysfs_init(adev);
-
r = amdgpu_pm_sysfs_init(adev);
- if (r)
+ if (r) {
+ adev->pm_sysfs_en = false;
DRM_ERROR("registering pm debugfs failed (%d).\n", r);
+ } else
+ adev->pm_sysfs_en = true;
r = amdgpu_ucode_sysfs_init(adev);
- if (r)
+ if (r) {
+ adev->ucode_sysfs_en = false;
DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
+ } else
+ adev->ucode_sysfs_en = true;
r = amdgpu_debugfs_gem_init(adev);
if (r)
@@ -2788,6 +3117,13 @@ fence_driver_init:
DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
}
+ /*
+ * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
+ * Otherwise the mgpu fan boost feature will be skipped due to the
+ * gpu instance is counted less.
+ */
+ amdgpu_register_gpu_instance(adev);
+
/* enable clockgating, etc. after ib tests, etc. since some blocks require
* explicit gating rather than handling it automatically.
*/
@@ -2819,7 +3155,7 @@ fence_driver_init:
failed:
amdgpu_vf_error_trans_all(adev);
- if (runtime)
+ if (boco)
vga_switcheroo_fini_domain_pm_ops(adev->dev);
return r;
@@ -2838,7 +3174,9 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
int r;
DRM_INFO("amdgpu: finishing device.\n");
+ flush_delayed_work(&adev->delayed_init_work);
adev->shutdown = true;
+
/* disable all interrupts */
amdgpu_irq_disable_all(adev);
if (adev->mode_info.mode_config_initialized){
@@ -2848,7 +3186,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
drm_atomic_helper_shutdown(adev->ddev);
}
amdgpu_fence_driver_fini(adev);
- amdgpu_pm_sysfs_fini(adev);
+ if (adev->pm_sysfs_en)
+ amdgpu_pm_sysfs_fini(adev);
amdgpu_fbdev_fini(adev);
r = amdgpu_device_ip_fini(adev);
if (adev->firmware.gpu_info_fw) {
@@ -2856,7 +3195,6 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
adev->firmware.gpu_info_fw = NULL;
}
adev->accel_working = false;
- cancel_delayed_work_sync(&adev->delayed_init_work);
/* free i2c buses */
if (!amdgpu_device_has_dc_support(adev))
amdgpu_i2c_fini(adev);
@@ -2866,9 +3204,12 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
kfree(adev->bios);
adev->bios = NULL;
- if (!pci_is_thunderbolt_attached(adev->pdev))
+ if (amdgpu_has_atpx() &&
+ (amdgpu_is_atpx_hybrid() ||
+ amdgpu_has_atpx_dgpu_power_cntl()) &&
+ !pci_is_thunderbolt_attached(adev->pdev))
vga_switcheroo_unregister_client(adev->pdev);
- if (adev->flags & AMD_IS_PX)
+ if (amdgpu_device_supports_boco(adev->ddev))
vga_switcheroo_fini_domain_pm_ops(adev->dev);
vga_client_register(adev->pdev, NULL, NULL, NULL);
if (adev->rio_mem)
@@ -2877,12 +3218,11 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
iounmap(adev->rmmio);
adev->rmmio = NULL;
amdgpu_device_doorbell_fini(adev);
- if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev))
- amdgpu_pm_virt_sysfs_fini(adev);
amdgpu_debugfs_regs_cleanup(adev);
device_remove_file(adev->dev, &dev_attr_pcie_replay_count);
- amdgpu_ucode_sysfs_fini(adev);
+ if (adev->ucode_sysfs_en)
+ amdgpu_ucode_sysfs_fini(adev);
if (IS_ENABLED(CONFIG_PERF_EVENTS))
amdgpu_pmu_fini(adev);
amdgpu_debugfs_preempt_cleanup(adev);
@@ -2905,11 +3245,12 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
* Returns 0 for success or an error on failure.
* Called at driver suspend.
*/
-int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
+int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
{
struct amdgpu_device *adev;
struct drm_crtc *crtc;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
int r;
if (dev == NULL || dev->dev_private == NULL) {
@@ -2932,9 +3273,11 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
if (!amdgpu_device_has_dc_support(adev)) {
/* turn off display hw */
drm_modeset_lock_all(dev);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
- }
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter)
+ drm_helper_connector_dpms(connector,
+ DRM_MODE_DPMS_OFF);
+ drm_connector_list_iter_end(&iter);
drm_modeset_unlock_all(dev);
/* unpin the front buffers and cursors */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -2985,17 +3328,6 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
*/
amdgpu_bo_evict_vram(adev);
- pci_save_state(dev->pdev);
- if (suspend) {
- /* Shut down the device */
- pci_disable_device(dev->pdev);
- pci_set_power_state(dev->pdev, PCI_D3hot);
- } else {
- r = amdgpu_asic_reset(adev);
- if (r)
- DRM_ERROR("amdgpu asic reset failed\n");
- }
-
return 0;
}
@@ -3010,9 +3342,10 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
* Returns 0 for success or an error on failure.
* Called at driver resume.
*/
-int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
+int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
{
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_device *adev = dev->dev_private;
struct drm_crtc *crtc;
int r = 0;
@@ -3020,14 +3353,6 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- if (resume) {
- pci_set_power_state(dev->pdev, PCI_D0);
- pci_restore_state(dev->pdev);
- r = pci_enable_device(dev->pdev);
- if (r)
- return r;
- }
-
/* post card */
if (amdgpu_device_need_post(adev)) {
r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
@@ -3083,9 +3408,13 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
/* turn on display hw */
drm_modeset_lock_all(dev);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
- }
+
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter)
+ drm_helper_connector_dpms(connector,
+ DRM_MODE_DPMS_ON);
+ drm_connector_list_iter_end(&iter);
+
drm_modeset_unlock_all(dev);
}
amdgpu_fbdev_set_suspend(adev, 0);
@@ -3362,13 +3691,12 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
if (r)
return r;
- amdgpu_amdkfd_pre_reset(adev);
-
/* Resume IP prior to SMC */
r = amdgpu_device_ip_reinit_early_sriov(adev);
if (r)
goto error;
+ amdgpu_virt_init_data_exchange(adev);
/* we need recover gart prior to run SMC/CP/SDMA resume */
amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]);
@@ -3386,10 +3714,9 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
amdgpu_amdkfd_post_reset(adev);
error:
- amdgpu_virt_init_data_exchange(adev);
amdgpu_virt_release_full_gpu(adev, true);
if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
- atomic_inc(&adev->vram_lost_counter);
+ amdgpu_inc_vram_lost(adev);
r = amdgpu_device_recover_vram(adev);
}
@@ -3431,6 +3758,12 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
case CHIP_VEGA20:
case CHIP_VEGA10:
case CHIP_VEGA12:
+ case CHIP_RAVEN:
+ case CHIP_ARCTURUS:
+ case CHIP_RENOIR:
+ case CHIP_NAVI10:
+ case CHIP_NAVI14:
+ case CHIP_NAVI12:
break;
default:
goto disabled;
@@ -3507,7 +3840,7 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
/* For XGMI run all resets in parallel to speed up the process */
if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
- if (!queue_work(system_highpri_wq, &tmp_adev->xgmi_reset_work))
+ if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
r = -EALREADY;
} else
r = amdgpu_asic_reset(tmp_adev);
@@ -3519,7 +3852,7 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
}
}
- /* For XGMI wait for all PSP resets to complete before proceed */
+ /* For XGMI wait for all resets to complete before proceed */
if (!r) {
list_for_each_entry(tmp_adev, device_list_handle,
gmc.xgmi.head) {
@@ -3530,14 +3863,11 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
break;
}
}
-
- list_for_each_entry(tmp_adev, device_list_handle,
- gmc.xgmi.head) {
- amdgpu_ras_reserve_bad_pages(tmp_adev);
- }
}
}
+ if (!r && amdgpu_ras_intr_triggered())
+ amdgpu_ras_intr_cleared();
list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
if (need_full_reset) {
@@ -3554,7 +3884,7 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
if (vram_lost) {
DRM_INFO("VRAM is lost due to GPU reset!\n");
- atomic_inc(&tmp_adev->vram_lost_counter);
+ amdgpu_inc_vram_lost(tmp_adev);
}
r = amdgpu_gtt_mgr_recover(
@@ -3626,25 +3956,30 @@ static bool amdgpu_device_lock_adev(struct amdgpu_device *adev, bool trylock)
mutex_lock(&adev->lock_reset);
atomic_inc(&adev->gpu_reset_counter);
- adev->in_gpu_reset = 1;
- /* Block kfd: SRIOV would do it separately */
- if (!amdgpu_sriov_vf(adev))
- amdgpu_amdkfd_pre_reset(adev);
+ adev->in_gpu_reset = true;
+ switch (amdgpu_asic_reset_method(adev)) {
+ case AMD_RESET_METHOD_MODE1:
+ adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
+ break;
+ case AMD_RESET_METHOD_MODE2:
+ adev->mp1_state = PP_MP1_STATE_RESET;
+ break;
+ default:
+ adev->mp1_state = PP_MP1_STATE_NONE;
+ break;
+ }
return true;
}
static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
{
- /*unlock kfd: SRIOV would do it separately */
- if (!amdgpu_sriov_vf(adev))
- amdgpu_amdkfd_post_reset(adev);
amdgpu_vf_error_trans_all(adev);
- adev->in_gpu_reset = 0;
+ adev->mp1_state = PP_MP1_STATE_NONE;
+ adev->in_gpu_reset = false;
mutex_unlock(&adev->lock_reset);
}
-
/**
* amdgpu_device_gpu_recover - reset the asic and recover scheduler
*
@@ -3664,11 +3999,28 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
struct amdgpu_hive_info *hive = NULL;
struct amdgpu_device *tmp_adev = NULL;
int i, r = 0;
+ bool in_ras_intr = amdgpu_ras_intr_triggered();
+ bool use_baco =
+ (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) ?
+ true : false;
+
+ /*
+ * Flush RAM to disk so that after reboot
+ * the user can read log and see why the system rebooted.
+ */
+ if (in_ras_intr && !use_baco && amdgpu_ras_get_context(adev)->reboot) {
+
+ DRM_WARN("Emergency reboot.");
+
+ ksys_sync_helper();
+ emergency_restart();
+ }
need_full_reset = job_signaled = false;
INIT_LIST_HEAD(&device_list);
- dev_info(adev->dev, "GPU reset begin!\n");
+ dev_info(adev->dev, "GPU %s begin!\n",
+ (in_ras_intr && !use_baco) ? "jobs stop":"reset");
cancel_delayed_work_sync(&adev->delayed_init_work);
@@ -3684,20 +4036,27 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
if (hive && !mutex_trylock(&hive->reset_lock)) {
DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress",
- job->base.id, hive->hive_id);
+ job ? job->base.id : -1, hive->hive_id);
return 0;
}
/* Start with adev pre asic reset first for soft reset check.*/
if (!amdgpu_device_lock_adev(adev, !hive)) {
DRM_INFO("Bailing on TDR for s_job:%llx, as another already in progress",
- job->base.id);
+ job ? job->base.id : -1);
return 0;
}
+ /* Block kfd: SRIOV would do it separately */
+ if (!amdgpu_sriov_vf(adev))
+ amdgpu_amdkfd_pre_reset(adev);
+
/* Build list of devices to reset */
if (adev->gmc.xgmi.num_physical_nodes > 1) {
if (!hive) {
+ /*unlock kfd: SRIOV would do it separately */
+ if (!amdgpu_sriov_vf(adev))
+ amdgpu_amdkfd_post_reset(adev);
amdgpu_device_unlock_adev(adev);
return -ENODEV;
}
@@ -3713,17 +4072,23 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
device_list_handle = &device_list;
}
- /*
- * Mark these ASICs to be reseted as untracked first
- * And add them back after reset completed
- */
- list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head)
- amdgpu_unregister_gpu_instance(tmp_adev);
-
/* block all schedulers and reset given job's ring */
list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+ if (tmp_adev != adev) {
+ amdgpu_device_lock_adev(tmp_adev, false);
+ if (!amdgpu_sriov_vf(tmp_adev))
+ amdgpu_amdkfd_pre_reset(tmp_adev);
+ }
+
+ /*
+ * Mark these ASICs to be reseted as untracked first
+ * And add them back after reset completed
+ */
+ amdgpu_unregister_gpu_instance(tmp_adev);
+
/* disable ras on ALL IPs */
- if (amdgpu_device_ip_need_full_reset(tmp_adev))
+ if (!(in_ras_intr && !use_baco) &&
+ amdgpu_device_ip_need_full_reset(tmp_adev))
amdgpu_ras_suspend(tmp_adev);
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
@@ -3732,11 +4097,17 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
if (!ring || !ring->sched.thread)
continue;
- drm_sched_stop(&ring->sched, &job->base);
+ drm_sched_stop(&ring->sched, job ? &job->base : NULL);
+
+ if (in_ras_intr && !use_baco)
+ amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
}
}
+ if (in_ras_intr && !use_baco)
+ goto skip_sched_resume;
+
/*
* Must check guilty signal here since after this point all old
* HW fences are force signaled.
@@ -3747,9 +4118,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
dma_fence_is_signaled(job->base.s_fence->parent))
job_signaled = true;
- if (!amdgpu_device_ip_need_full_reset(adev))
- device_list_handle = &device_list;
-
if (job_signaled) {
dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
goto skip_hw_reset;
@@ -3757,9 +4125,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
/* Guilty job will be freed after this*/
- r = amdgpu_device_pre_asic_reset(adev,
- job,
- &need_full_reset);
+ r = amdgpu_device_pre_asic_reset(adev, job, &need_full_reset);
if (r) {
/*TODO Should we stop ?*/
DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ",
@@ -3773,7 +4139,6 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
if (tmp_adev == adev)
continue;
- amdgpu_device_lock_adev(tmp_adev, false);
r = amdgpu_device_pre_asic_reset(tmp_adev,
NULL,
&need_full_reset);
@@ -3801,6 +4166,7 @@ skip_hw_reset:
/* Post ASIC reset for all devs .*/
list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = tmp_adev->rings[i];
@@ -3822,12 +4188,18 @@ skip_hw_reset:
if (r) {
/* bad news, how to tell it to userspace ? */
- dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter));
+ dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
} else {
- dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&adev->gpu_reset_counter));
+ dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
}
+ }
+skip_sched_resume:
+ list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+ /*unlock kfd: SRIOV would do it separately */
+ if (!(in_ras_intr && !use_baco) && !amdgpu_sriov_vf(tmp_adev))
+ amdgpu_amdkfd_post_reset(tmp_adev);
amdgpu_device_unlock_adev(tmp_adev);
}
@@ -3975,3 +4347,35 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
}
}
+int amdgpu_device_baco_enter(struct drm_device *dev)
+{
+ struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+
+ if (!amdgpu_device_supports_baco(adev->ddev))
+ return -ENOTSUPP;
+
+ if (ras && ras->supported)
+ adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
+
+ return amdgpu_dpm_baco_enter(adev);
+}
+
+int amdgpu_device_baco_exit(struct drm_device *dev)
+{
+ struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ int ret = 0;
+
+ if (!amdgpu_device_supports_baco(adev->ddev))
+ return -ENOTSUPP;
+
+ ret = amdgpu_dpm_baco_exit(adev);
+ if (ret)
+ return ret;
+
+ if (ras && ras->supported)
+ adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h
new file mode 100644
index 000000000000..057f6ea645d7
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_DF_H__
+#define __AMDGPU_DF_H__
+
+struct amdgpu_df_hash_status {
+ bool hash_64k;
+ bool hash_2m;
+ bool hash_1g;
+};
+
+struct amdgpu_df_funcs {
+ void (*sw_init)(struct amdgpu_device *adev);
+ void (*sw_fini)(struct amdgpu_device *adev);
+ void (*enable_broadcast_mode)(struct amdgpu_device *adev,
+ bool enable);
+ u32 (*get_fb_channel_number)(struct amdgpu_device *adev);
+ u32 (*get_hbm_channel_number)(struct amdgpu_device *adev);
+ void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev,
+ bool enable);
+ void (*get_clockgating_state)(struct amdgpu_device *adev,
+ u32 *flags);
+ void (*enable_ecc_force_par_wr_rmw)(struct amdgpu_device *adev,
+ bool enable);
+ int (*pmc_start)(struct amdgpu_device *adev, uint64_t config,
+ int is_enable);
+ int (*pmc_stop)(struct amdgpu_device *adev, uint64_t config,
+ int is_disable);
+ void (*pmc_get_count)(struct amdgpu_device *adev, uint64_t config,
+ uint64_t *count);
+ uint64_t (*get_fica)(struct amdgpu_device *adev, uint32_t ficaa_val);
+ void (*set_fica)(struct amdgpu_device *adev, uint32_t ficaa_val,
+ uint32_t ficadl_val, uint32_t ficadh_val);
+ uint64_t (*get_dram_base_addr)(struct amdgpu_device *adev,
+ uint32_t df_inst);
+ uint32_t (*get_df_inst_id)(struct amdgpu_device *adev);
+};
+
+struct amdgpu_df {
+ struct amdgpu_df_hash_status hash_status;
+ const struct amdgpu_df_funcs *funcs;
+};
+
+#endif /* __AMDGPU_DF_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index 1481899f86c1..f95092741c38 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -134,20 +134,10 @@ static int hw_id_map[MAX_HWIP] = {
static int amdgpu_discovery_read_binary(struct amdgpu_device *adev, uint8_t *binary)
{
- uint32_t *p = (uint32_t *)binary;
uint64_t vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
- uint64_t pos = vram_size - BINARY_MAX_SIZE;
- unsigned long flags;
-
- while (pos < vram_size) {
- spin_lock_irqsave(&adev->mmio_idx_lock, flags);
- WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
- WREG32_NO_KIQ(mmMM_INDEX_HI, pos >> 31);
- *p++ = RREG32_NO_KIQ(mmMM_DATA);
- spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
- pos += 4;
- }
+ uint64_t pos = vram_size - DISCOVERY_TMR_SIZE;
+ amdgpu_device_vram_access(adev, pos, (uint32_t *)binary, DISCOVERY_TMR_SIZE, false);
return 0;
}
@@ -179,7 +169,7 @@ int amdgpu_discovery_init(struct amdgpu_device *adev)
uint16_t checksum;
int r;
- adev->discovery = kzalloc(BINARY_MAX_SIZE, GFP_KERNEL);
+ adev->discovery = kzalloc(DISCOVERY_TMR_SIZE, GFP_KERNEL);
if (!adev->discovery)
return -ENOMEM;
@@ -333,7 +323,7 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
}
int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id,
- int *major, int *minor)
+ int *major, int *minor, int *revision)
{
struct binary_header *bhdr;
struct ip_discovery_header *ihdr;
@@ -369,6 +359,8 @@ int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id,
*major = ip->major;
if (minor)
*minor = ip->minor;
+ if (revision)
+ *revision = ip->revision;
return 0;
}
ip_offset += sizeof(*ip) + 4 * (ip->num_base_address - 1);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
index 85b8c4d4d576..ba78e15d9b05 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
@@ -24,11 +24,13 @@
#ifndef __AMDGPU_DISCOVERY__
#define __AMDGPU_DISCOVERY__
+#define DISCOVERY_TMR_SIZE (64 << 10)
+
int amdgpu_discovery_init(struct amdgpu_device *adev);
void amdgpu_discovery_fini(struct amdgpu_device *adev);
int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev);
int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id,
- int *major, int *minor);
+ int *major, int *minor, int *revision);
int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev);
#endif /* __AMDGPU_DISCOVERY__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 535650967b1a..6d520a3eec40 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -191,7 +191,8 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
}
if (!adev->enable_virtual_display) {
- r = amdgpu_bo_pin(new_abo, amdgpu_display_supported_domains(adev));
+ r = amdgpu_bo_pin(new_abo,
+ amdgpu_display_supported_domains(adev, new_abo->flags));
if (unlikely(r != 0)) {
DRM_ERROR("failed to pin new abo buffer before flip\n");
goto unreserve;
@@ -204,7 +205,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
goto unpin;
}
- r = reservation_object_get_fences_rcu(new_abo->tbo.resv, &work->excl,
+ r = dma_resv_get_fences_rcu(new_abo->tbo.base.resv, &work->excl,
&work->shared_count,
&work->shared);
if (unlikely(r != 0)) {
@@ -369,11 +370,13 @@ void amdgpu_display_print_display_setup(struct drm_device *dev)
struct amdgpu_connector *amdgpu_connector;
struct drm_encoder *encoder;
struct amdgpu_encoder *amdgpu_encoder;
+ struct drm_connector_list_iter iter;
uint32_t devices;
int i = 0;
+ drm_connector_list_iter_begin(dev, &iter);
DRM_INFO("AMDGPU Display Connectors\n");
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_for_each_connector_iter(connector, &iter) {
amdgpu_connector = to_amdgpu_connector(connector);
DRM_INFO("Connector %d:\n", i);
DRM_INFO(" %s\n", connector->name);
@@ -437,6 +440,7 @@ void amdgpu_display_print_display_setup(struct drm_device *dev)
}
i++;
}
+ drm_connector_list_iter_end(&iter);
}
/**
@@ -495,15 +499,37 @@ static const struct drm_framebuffer_funcs amdgpu_fb_funcs = {
.create_handle = drm_gem_fb_create_handle,
};
-uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev)
+uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
+ uint64_t bo_flags)
{
uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM;
#if defined(CONFIG_DRM_AMD_DC)
- if (adev->asic_type >= CHIP_CARRIZO && adev->asic_type < CHIP_RAVEN &&
- adev->flags & AMD_IS_APU &&
- amdgpu_device_asic_has_dc_support(adev->asic_type))
- domain |= AMDGPU_GEM_DOMAIN_GTT;
+ /*
+ * if amdgpu_bo_support_uswc returns false it means that USWC mappings
+ * is not supported for this board. But this mapping is required
+ * to avoid hang caused by placement of scanout BO in GTT on certain
+ * APUs. So force the BO placement to VRAM in case this architecture
+ * will not allow USWC mappings.
+ * Also, don't allow GTT domain if the BO doens't have USWC falg set.
+ */
+ if ((bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) &&
+ amdgpu_bo_support_uswc(bo_flags) &&
+ amdgpu_device_asic_has_dc_support(adev->asic_type)) {
+ switch (adev->asic_type) {
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+ domain |= AMDGPU_GEM_DOMAIN_GTT;
+ break;
+ case CHIP_RAVEN:
+ /* enable S/G on PCO and RV2 */
+ if (adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)
+ domain |= AMDGPU_GEM_DOMAIN_GTT;
+ break;
+ default:
+ break;
+ }
+ }
#endif
return domain;
@@ -674,7 +700,6 @@ bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct amdgpu_encoder *amdgpu_encoder;
struct drm_connector *connector;
- struct amdgpu_connector *amdgpu_connector;
u32 src_v = 1, dst_v = 1;
u32 src_h = 1, dst_h = 1;
@@ -686,7 +711,6 @@ bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
continue;
amdgpu_encoder = to_amdgpu_encoder(encoder);
connector = amdgpu_get_connector_for_encoder(encoder);
- amdgpu_connector = to_amdgpu_connector(connector);
/* set scaling */
if (amdgpu_encoder->rmx_type == RMX_OFF)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
index 06b922fe0d42..3620b24785e1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
@@ -38,7 +38,8 @@
int amdgpu_display_freesync_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
void amdgpu_display_update_priority(struct amdgpu_device *adev);
-uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev);
+uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
+ uint64_t bo_flags);
struct drm_framebuffer *
amdgpu_display_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 489041df1f45..a59cd47aa6c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -34,27 +34,12 @@
#include "amdgpu.h"
#include "amdgpu_display.h"
#include "amdgpu_gem.h"
+#include "amdgpu_dma_buf.h"
#include <drm/amdgpu_drm.h>
#include <linux/dma-buf.h>
#include <linux/dma-fence-array.h>
/**
- * amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table
- * implementation
- * @obj: GEM buffer object (BO)
- *
- * Returns:
- * A scatter/gather table for the pinned pages of the BO's memory.
- */
-struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
-{
- struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
- int npages = bo->tbo.num_pages;
-
- return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
-}
-
-/**
* amdgpu_gem_prime_vmap - &dma_buf_ops.vmap implementation
* @obj: GEM BO
*
@@ -137,23 +122,23 @@ int amdgpu_gem_prime_mmap(struct drm_gem_object *obj,
}
static int
-__reservation_object_make_exclusive(struct reservation_object *obj)
+__dma_resv_make_exclusive(struct dma_resv *obj)
{
struct dma_fence **fences;
unsigned int count;
int r;
- if (!reservation_object_get_list(obj)) /* no shared fences to convert */
+ if (!dma_resv_get_list(obj)) /* no shared fences to convert */
return 0;
- r = reservation_object_get_fences_rcu(obj, NULL, &count, &fences);
+ r = dma_resv_get_fences_rcu(obj, NULL, &count, &fences);
if (r)
return r;
if (count == 0) {
/* Now that was unexpected. */
} else if (count == 1) {
- reservation_object_add_excl_fence(obj, fences[0]);
+ dma_resv_add_excl_fence(obj, fences[0]);
dma_fence_put(fences[0]);
kfree(fences);
} else {
@@ -165,7 +150,7 @@ __reservation_object_make_exclusive(struct reservation_object *obj)
if (!array)
goto err_fences_put;
- reservation_object_add_excl_fence(obj, &array->base);
+ dma_resv_add_excl_fence(obj, &array->base);
dma_fence_put(&array->base);
}
@@ -179,106 +164,126 @@ err_fences_put:
}
/**
- * amdgpu_dma_buf_map_attach - &dma_buf_ops.attach implementation
- * @dma_buf: Shared DMA buffer
- * @attach: DMA-buf attachment
+ * amdgpu_dma_buf_attach - &dma_buf_ops.attach implementation
*
- * Makes sure that the shared DMA buffer can be accessed by the target device.
- * For now, simply pins it to the GTT domain, where it should be accessible by
- * all DMA devices.
+ * @dmabuf: DMA-buf where we attach to
+ * @attach: attachment to add
*
- * Returns:
- * 0 on success or a negative error code on failure.
+ * Add the attachment as user to the exported DMA-buf.
*/
-static int amdgpu_dma_buf_map_attach(struct dma_buf *dma_buf,
- struct dma_buf_attachment *attach)
+static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attach)
{
- struct drm_gem_object *obj = dma_buf->priv;
+ struct drm_gem_object *obj = dmabuf->priv;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- long r;
+ int r;
- r = drm_gem_map_attach(dma_buf, attach);
- if (r)
- return r;
+ if (attach->dev->driver == adev->dev->driver)
+ return 0;
r = amdgpu_bo_reserve(bo, false);
if (unlikely(r != 0))
- goto error_detach;
-
-
- if (attach->dev->driver != adev->dev->driver) {
- /*
- * We only create shared fences for internal use, but importers
- * of the dmabuf rely on exclusive fences for implicitly
- * tracking write hazards. As any of the current fences may
- * correspond to a write, we need to convert all existing
- * fences on the reservation object into a single exclusive
- * fence.
- */
- r = __reservation_object_make_exclusive(bo->tbo.resv);
- if (r)
- goto error_unreserve;
- }
+ return r;
- /* pin buffer into GTT */
- r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
+ /*
+ * We only create shared fences for internal use, but importers
+ * of the dmabuf rely on exclusive fences for implicitly
+ * tracking write hazards. As any of the current fences may
+ * correspond to a write, we need to convert all existing
+ * fences on the reservation object into a single exclusive
+ * fence.
+ */
+ r = __dma_resv_make_exclusive(bo->tbo.base.resv);
if (r)
- goto error_unreserve;
-
- if (attach->dev->driver != adev->dev->driver)
- bo->prime_shared_count++;
+ return r;
-error_unreserve:
+ bo->prime_shared_count++;
amdgpu_bo_unreserve(bo);
+ return 0;
+}
-error_detach:
- if (r)
- drm_gem_map_detach(dma_buf, attach);
- return r;
+/**
+ * amdgpu_dma_buf_detach - &dma_buf_ops.detach implementation
+ *
+ * @dmabuf: DMA-buf where we remove the attachment from
+ * @attach: the attachment to remove
+ *
+ * Called when an attachment is removed from the DMA-buf.
+ */
+static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attach)
+{
+ struct drm_gem_object *obj = dmabuf->priv;
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+
+ if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count)
+ bo->prime_shared_count--;
}
/**
- * amdgpu_dma_buf_map_detach - &dma_buf_ops.detach implementation
- * @dma_buf: Shared DMA buffer
+ * amdgpu_dma_buf_map - &dma_buf_ops.map_dma_buf implementation
* @attach: DMA-buf attachment
+ * @dir: DMA direction
*
- * This is called when a shared DMA buffer no longer needs to be accessible by
- * another device. For now, simply unpins the buffer from GTT.
+ * Makes sure that the shared DMA buffer can be accessed by the target device.
+ * For now, simply pins it to the GTT domain, where it should be accessible by
+ * all DMA devices.
+ *
+ * Returns:
+ * sg_table filled with the DMA addresses to use or ERR_PRT with negative error
+ * code.
*/
-static void amdgpu_dma_buf_map_detach(struct dma_buf *dma_buf,
- struct dma_buf_attachment *attach)
+static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
+ enum dma_data_direction dir)
{
+ struct dma_buf *dma_buf = attach->dmabuf;
struct drm_gem_object *obj = dma_buf->priv;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- int ret = 0;
+ struct sg_table *sgt;
+ long r;
- ret = amdgpu_bo_reserve(bo, true);
- if (unlikely(ret != 0))
- goto error;
+ r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
+ if (r)
+ return ERR_PTR(r);
- amdgpu_bo_unpin(bo);
- if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count)
- bo->prime_shared_count--;
- amdgpu_bo_unreserve(bo);
+ sgt = drm_prime_pages_to_sg(bo->tbo.ttm->pages, bo->tbo.num_pages);
+ if (IS_ERR(sgt))
+ return sgt;
-error:
- drm_gem_map_detach(dma_buf, attach);
+ if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
+ DMA_ATTR_SKIP_CPU_SYNC))
+ goto error_free;
+
+ return sgt;
+
+error_free:
+ sg_free_table(sgt);
+ kfree(sgt);
+ return ERR_PTR(-ENOMEM);
}
/**
- * amdgpu_gem_prime_res_obj - &drm_driver.gem_prime_res_obj implementation
- * @obj: GEM BO
+ * amdgpu_dma_buf_unmap - &dma_buf_ops.unmap_dma_buf implementation
+ * @attach: DMA-buf attachment
+ * @sgt: sg_table to unmap
+ * @dir: DMA direction
*
- * Returns:
- * The BO's reservation object.
+ * This is called when a shared DMA buffer no longer needs to be accessible by
+ * another device. For now, simply unpins the buffer from GTT.
*/
-struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj)
+static void amdgpu_dma_buf_unmap(struct dma_buf_attachment *attach,
+ struct sg_table *sgt,
+ enum dma_data_direction dir)
{
+ struct drm_gem_object *obj = attach->dmabuf->priv;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
- return bo->tbo.resv;
+ dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
+ sg_free_table(sgt);
+ kfree(sgt);
+ amdgpu_bo_unpin(bo);
}
/**
@@ -299,7 +304,7 @@ static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
struct amdgpu_bo *bo = gem_to_amdgpu_bo(dma_buf->priv);
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct ttm_operation_ctx ctx = { true, false };
- u32 domain = amdgpu_display_supported_domains(adev);
+ u32 domain = amdgpu_display_supported_domains(adev, bo->flags);
int ret;
bool reads = (direction == DMA_BIDIRECTIONAL ||
direction == DMA_FROM_DEVICE);
@@ -322,10 +327,11 @@ static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
}
const struct dma_buf_ops amdgpu_dmabuf_ops = {
- .attach = amdgpu_dma_buf_map_attach,
- .detach = amdgpu_dma_buf_map_detach,
- .map_dma_buf = drm_gem_map_dma_buf,
- .unmap_dma_buf = drm_gem_unmap_dma_buf,
+ .dynamic_mapping = true,
+ .attach = amdgpu_dma_buf_attach,
+ .detach = amdgpu_dma_buf_detach,
+ .map_dma_buf = amdgpu_dma_buf_map,
+ .unmap_dma_buf = amdgpu_dma_buf_unmap,
.release = drm_gem_dmabuf_release,
.begin_cpu_access = amdgpu_dma_buf_begin_cpu_access,
.mmap = drm_gem_dmabuf_mmap,
@@ -335,18 +341,15 @@ const struct dma_buf_ops amdgpu_dmabuf_ops = {
/**
* amdgpu_gem_prime_export - &drm_driver.gem_prime_export implementation
- * @dev: DRM device
* @gobj: GEM BO
* @flags: Flags such as DRM_CLOEXEC and DRM_RDWR.
*
- * The main work is done by the &drm_gem_prime_export helper, which in turn
- * uses &amdgpu_gem_prime_res_obj.
+ * The main work is done by the &drm_gem_prime_export helper.
*
* Returns:
* Shared DMA buffer representing the GEM BO from the given device.
*/
-struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *gobj,
+struct dma_buf *amdgpu_gem_prime_export(struct drm_gem_object *gobj,
int flags)
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
@@ -356,63 +359,56 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
return ERR_PTR(-EPERM);
- buf = drm_gem_prime_export(dev, gobj, flags);
- if (!IS_ERR(buf)) {
- buf->file->f_mapping = dev->anon_inode->i_mapping;
+ buf = drm_gem_prime_export(gobj, flags);
+ if (!IS_ERR(buf))
buf->ops = &amdgpu_dmabuf_ops;
- }
return buf;
}
/**
- * amdgpu_gem_prime_import_sg_table - &drm_driver.gem_prime_import_sg_table
- * implementation
+ * amdgpu_dma_buf_create_obj - create BO for DMA-buf import
+ *
* @dev: DRM device
- * @attach: DMA-buf attachment
- * @sg: Scatter/gather table
+ * @dma_buf: DMA-buf
*
- * Imports shared DMA buffer memory exported by another device.
+ * Creates an empty SG BO for DMA-buf import.
*
* Returns:
* A new GEM BO of the given DRM device, representing the memory
* described by the given DMA-buf attachment and scatter/gather table.
*/
-struct drm_gem_object *
-amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
- struct dma_buf_attachment *attach,
- struct sg_table *sg)
+static struct drm_gem_object *
+amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf)
{
- struct reservation_object *resv = attach->dmabuf->resv;
+ struct dma_resv *resv = dma_buf->resv;
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_bo *bo;
struct amdgpu_bo_param bp;
int ret;
memset(&bp, 0, sizeof(bp));
- bp.size = attach->dmabuf->size;
+ bp.size = dma_buf->size;
bp.byte_align = PAGE_SIZE;
bp.domain = AMDGPU_GEM_DOMAIN_CPU;
bp.flags = 0;
bp.type = ttm_bo_type_sg;
bp.resv = resv;
- ww_mutex_lock(&resv->lock, NULL);
+ dma_resv_lock(resv, NULL);
ret = amdgpu_bo_create(adev, &bp, &bo);
if (ret)
goto error;
- bo->tbo.sg = sg;
- bo->tbo.ttm->sg = sg;
bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
- if (attach->dmabuf->ops != &amdgpu_dmabuf_ops)
+ if (dma_buf->ops != &amdgpu_dmabuf_ops)
bo->prime_shared_count = 1;
- ww_mutex_unlock(&resv->lock);
- return &bo->gem_base;
+ dma_resv_unlock(resv);
+ return &bo->tbo.base;
error:
- ww_mutex_unlock(&resv->lock);
+ dma_resv_unlock(resv);
return ERR_PTR(ret);
}
@@ -421,15 +417,15 @@ error:
* @dev: DRM device
* @dma_buf: Shared DMA buffer
*
- * The main work is done by the &drm_gem_prime_import helper, which in turn
- * uses &amdgpu_gem_prime_import_sg_table.
+ * Import a dma_buf into a the driver and potentially create a new GEM object.
*
* Returns:
* GEM BO representing the shared DMA buffer for the given device.
*/
struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
- struct dma_buf *dma_buf)
+ struct dma_buf *dma_buf)
{
+ struct dma_buf_attachment *attach;
struct drm_gem_object *obj;
if (dma_buf->ops == &amdgpu_dmabuf_ops) {
@@ -444,5 +440,17 @@ struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
}
}
- return drm_gem_prime_import(dev, dma_buf);
+ obj = amdgpu_dma_buf_create_obj(dev, dma_buf);
+ if (IS_ERR(obj))
+ return obj;
+
+ attach = dma_buf_dynamic_attach(dma_buf, dev->dev, true);
+ if (IS_ERR(attach)) {
+ drm_gem_object_put(obj);
+ return ERR_CAST(attach);
+ }
+
+ get_dma_buf(dma_buf);
+ obj->import_attach = attach;
+ return obj;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h
index c7056cbe8685..ec447a7b6b28 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h
@@ -25,17 +25,10 @@
#include <drm/drm_gem.h>
-struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
-struct drm_gem_object *
-amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
- struct dma_buf_attachment *attach,
- struct sg_table *sg);
-struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *gobj,
+struct dma_buf *amdgpu_gem_prime_export(struct drm_gem_object *gobj,
int flags);
struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf);
-struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *);
void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj);
void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
int amdgpu_gem_prime_mmap(struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h
index 790263dcc064..3fa18003d4d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h
@@ -130,13 +130,18 @@ typedef enum _AMDGPU_VEGA20_DOORBELL_ASSIGNMENT
AMDGPU_VEGA20_DOORBELL_IH = 0x178,
/* MMSCH: 392~407
* overlap the doorbell assignment with VCN as they are mutually exclusive
- * VCE engine's doorbell is 32 bit and two VCE ring share one QWORD
+ * VCN engine's doorbell is 32 bit and two VCN ring share one QWORD
*/
- AMDGPU_VEGA20_DOORBELL64_VCN0_1 = 0x188, /* lower 32 bits for VNC0 and upper 32 bits for VNC1 */
+ AMDGPU_VEGA20_DOORBELL64_VCN0_1 = 0x188, /* VNC0 */
AMDGPU_VEGA20_DOORBELL64_VCN2_3 = 0x189,
AMDGPU_VEGA20_DOORBELL64_VCN4_5 = 0x18A,
AMDGPU_VEGA20_DOORBELL64_VCN6_7 = 0x18B,
+ AMDGPU_VEGA20_DOORBELL64_VCN8_9 = 0x18C, /* VNC1 */
+ AMDGPU_VEGA20_DOORBELL64_VCNa_b = 0x18D,
+ AMDGPU_VEGA20_DOORBELL64_VCNc_d = 0x18E,
+ AMDGPU_VEGA20_DOORBELL64_VCNe_f = 0x18F,
+
AMDGPU_VEGA20_DOORBELL64_UVD_RING0_1 = 0x188,
AMDGPU_VEGA20_DOORBELL64_UVD_RING2_3 = 0x189,
AMDGPU_VEGA20_DOORBELL64_UVD_RING4_5 = 0x18A,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
index 61bd10310604..a2e8c3dfb4f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
@@ -911,7 +911,8 @@ int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
if (is_support_sw_smu(adev)) {
ret = smu_get_dpm_freq_range(&adev->smu, SMU_GFXCLK,
low ? &clk_freq : NULL,
- !low ? &clk_freq : NULL);
+ !low ? &clk_freq : NULL,
+ true);
if (ret)
return 0;
return clk_freq * 100;
@@ -928,7 +929,8 @@ int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
if (is_support_sw_smu(adev)) {
ret = smu_get_dpm_freq_range(&adev->smu, SMU_UCLK,
low ? &clk_freq : NULL,
- !low ? &clk_freq : NULL);
+ !low ? &clk_freq : NULL,
+ true);
if (ret)
return 0;
return clk_freq * 100;
@@ -944,20 +946,63 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block
bool swsmu = is_support_sw_smu(adev);
switch (block_type) {
- case AMD_IP_BLOCK_TYPE_GFX:
case AMD_IP_BLOCK_TYPE_UVD:
- case AMD_IP_BLOCK_TYPE_VCN:
case AMD_IP_BLOCK_TYPE_VCE:
+ if (swsmu) {
+ ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate);
+ } else if (adev->powerplay.pp_funcs &&
+ adev->powerplay.pp_funcs->set_powergating_by_smu) {
+ /*
+ * TODO: need a better lock mechanism
+ *
+ * Here adev->pm.mutex lock protection is enforced on
+ * UVD and VCE cases only. Since for other cases, there
+ * may be already lock protection in amdgpu_pm.c.
+ * This is a quick fix for the deadlock issue below.
+ * NFO: task ocltst:2028 blocked for more than 120 seconds.
+ * Tainted: G OE 5.0.0-37-generic #40~18.04.1-Ubuntu
+ * echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
+ * cltst D 0 2028 2026 0x00000000
+ * all Trace:
+ * __schedule+0x2c0/0x870
+ * schedule+0x2c/0x70
+ * schedule_preempt_disabled+0xe/0x10
+ * __mutex_lock.isra.9+0x26d/0x4e0
+ * __mutex_lock_slowpath+0x13/0x20
+ * ? __mutex_lock_slowpath+0x13/0x20
+ * mutex_lock+0x2f/0x40
+ * amdgpu_dpm_set_powergating_by_smu+0x64/0xe0 [amdgpu]
+ * gfx_v8_0_enable_gfx_static_mg_power_gating+0x3c/0x70 [amdgpu]
+ * gfx_v8_0_set_powergating_state+0x66/0x260 [amdgpu]
+ * amdgpu_device_ip_set_powergating_state+0x62/0xb0 [amdgpu]
+ * pp_dpm_force_performance_level+0xe7/0x100 [amdgpu]
+ * amdgpu_set_dpm_forced_performance_level+0x129/0x330 [amdgpu]
+ */
+ mutex_lock(&adev->pm.mutex);
+ ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu(
+ (adev)->powerplay.pp_handle, block_type, gate));
+ mutex_unlock(&adev->pm.mutex);
+ }
+ break;
+ case AMD_IP_BLOCK_TYPE_GFX:
+ case AMD_IP_BLOCK_TYPE_VCN:
+ case AMD_IP_BLOCK_TYPE_SDMA:
if (swsmu)
ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate);
- else
+ else if (adev->powerplay.pp_funcs &&
+ adev->powerplay.pp_funcs->set_powergating_by_smu)
ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu(
(adev)->powerplay.pp_handle, block_type, gate));
break;
+ case AMD_IP_BLOCK_TYPE_JPEG:
+ if (swsmu)
+ ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate);
+ break;
case AMD_IP_BLOCK_TYPE_GMC:
case AMD_IP_BLOCK_TYPE_ACP:
- case AMD_IP_BLOCK_TYPE_SDMA:
- ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu(
+ if (adev->powerplay.pp_funcs &&
+ adev->powerplay.pp_funcs->set_powergating_by_smu)
+ ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu(
(adev)->powerplay.pp_handle, block_type, gate));
break;
default:
@@ -966,3 +1011,163 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block
return ret;
}
+
+int amdgpu_dpm_baco_enter(struct amdgpu_device *adev)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ void *pp_handle = adev->powerplay.pp_handle;
+ struct smu_context *smu = &adev->smu;
+ int ret = 0;
+
+ if (is_support_sw_smu(adev)) {
+ ret = smu_baco_enter(smu);
+ } else {
+ if (!pp_funcs || !pp_funcs->set_asic_baco_state)
+ return -ENOENT;
+
+ /* enter BACO state */
+ ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
+ }
+
+ return ret;
+}
+
+int amdgpu_dpm_baco_exit(struct amdgpu_device *adev)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ void *pp_handle = adev->powerplay.pp_handle;
+ struct smu_context *smu = &adev->smu;
+ int ret = 0;
+
+ if (is_support_sw_smu(adev)) {
+ ret = smu_baco_exit(smu);
+ } else {
+ if (!pp_funcs || !pp_funcs->set_asic_baco_state)
+ return -ENOENT;
+
+ /* exit BACO state */
+ ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
+ }
+
+ return ret;
+}
+
+int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
+ enum pp_mp1_state mp1_state)
+{
+ int ret = 0;
+
+ if (is_support_sw_smu(adev)) {
+ ret = smu_set_mp1_state(&adev->smu, mp1_state);
+ } else if (adev->powerplay.pp_funcs &&
+ adev->powerplay.pp_funcs->set_mp1_state) {
+ ret = adev->powerplay.pp_funcs->set_mp1_state(
+ adev->powerplay.pp_handle,
+ mp1_state);
+ }
+
+ return ret;
+}
+
+bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ void *pp_handle = adev->powerplay.pp_handle;
+ struct smu_context *smu = &adev->smu;
+ bool baco_cap;
+
+ if (is_support_sw_smu(adev)) {
+ return smu_baco_is_support(smu);
+ } else {
+ if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
+ return false;
+
+ if (pp_funcs->get_asic_baco_capability(pp_handle, &baco_cap))
+ return false;
+
+ return baco_cap ? true : false;
+ }
+}
+
+int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ void *pp_handle = adev->powerplay.pp_handle;
+ struct smu_context *smu = &adev->smu;
+
+ if (is_support_sw_smu(adev)) {
+ return smu_mode2_reset(smu);
+ } else {
+ if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
+ return -ENOENT;
+
+ return pp_funcs->asic_reset_mode_2(pp_handle);
+ }
+}
+
+int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ void *pp_handle = adev->powerplay.pp_handle;
+ struct smu_context *smu = &adev->smu;
+ int ret = 0;
+
+ dev_info(adev->dev, "GPU BACO reset\n");
+
+ if (is_support_sw_smu(adev)) {
+ ret = smu_baco_enter(smu);
+ if (ret)
+ return ret;
+
+ ret = smu_baco_exit(smu);
+ if (ret)
+ return ret;
+ } else {
+ if (!pp_funcs
+ || !pp_funcs->set_asic_baco_state)
+ return -ENOENT;
+
+ /* enter BACO state */
+ ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
+ if (ret)
+ return ret;
+
+ /* exit BACO state */
+ ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
+ enum PP_SMC_POWER_PROFILE type,
+ bool en)
+{
+ int ret = 0;
+
+ if (is_support_sw_smu(adev))
+ ret = smu_switch_power_profile(&adev->smu, type, en);
+ else if (adev->powerplay.pp_funcs &&
+ adev->powerplay.pp_funcs->switch_power_profile)
+ ret = adev->powerplay.pp_funcs->switch_power_profile(
+ adev->powerplay.pp_handle, type, en);
+
+ return ret;
+}
+
+int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
+ uint32_t pstate)
+{
+ int ret = 0;
+
+ if (is_support_sw_smu_xgmi(adev))
+ ret = smu_set_xgmi_pstate(&adev->smu, pstate);
+ else if (adev->powerplay.pp_funcs &&
+ adev->powerplay.pp_funcs->set_xgmi_pstate)
+ ret = adev->powerplay.pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
+ pstate);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
index 1c5c0fd76dbf..902ca6c00cca 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
@@ -298,12 +298,6 @@ enum amdgpu_pcie_gen {
#define amdgpu_dpm_get_current_power_state(adev) \
((adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle))
-#define amdgpu_smu_get_current_power_state(adev) \
- ((adev)->smu.ppt_funcs->get_current_power_state(&((adev)->smu)))
-
-#define amdgpu_smu_set_power_state(adev) \
- ((adev)->smu.ppt_funcs->set_power_state(&((adev)->smu)))
-
#define amdgpu_dpm_get_pp_num_states(adev, data) \
((adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data))
@@ -347,10 +341,6 @@ enum amdgpu_pcie_gen {
((adev)->powerplay.pp_funcs->reset_power_profile_state(\
(adev)->powerplay.pp_handle, request))
-#define amdgpu_dpm_switch_power_profile(adev, type, en) \
- ((adev)->powerplay.pp_funcs->switch_power_profile(\
- (adev)->powerplay.pp_handle, type, en))
-
#define amdgpu_dpm_set_clockgating_by_smu(adev, msg_id) \
((adev)->powerplay.pp_funcs->set_clockgating_by_smu(\
(adev)->powerplay.pp_handle, msg_id))
@@ -523,4 +513,24 @@ extern int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low);
extern int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low);
+int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
+ uint32_t pstate);
+
+int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
+ enum PP_SMC_POWER_PROFILE type,
+ bool en);
+
+int amdgpu_dpm_baco_reset(struct amdgpu_device *adev);
+
+int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev);
+
+bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev);
+
+int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
+ enum pp_mp1_state mp1_state);
+
+int amdgpu_dpm_baco_exit(struct amdgpu_device *adev);
+
+int amdgpu_dpm_baco_enter(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 5376328d3fd0..94e2fd758e01 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -35,6 +35,7 @@
#include <linux/pm_runtime.h>
#include <linux/vga_switcheroo.h>
#include <drm/drm_probe_helper.h>
+#include <linux/mmu_notifier.h>
#include "amdgpu.h"
#include "amdgpu_irq.h"
@@ -42,6 +43,8 @@
#include "amdgpu_amdkfd.h"
+#include "amdgpu_ras.h"
+
/*
* KMS wrapper.
* - 3.0.0 - initial driver
@@ -79,13 +82,14 @@
* - 3.31.0 - Add support for per-flip tiling attribute changes with DC
* - 3.32.0 - Add syncobj timeline support to AMDGPU_CS.
* - 3.33.0 - Fixes for GDS ENOMEM failures in AMDGPU_CS.
+ * - 3.34.0 - Non-DC can flip correctly between buffers with different pitches
+ * - 3.35.0 - Add drm_amdgpu_info_device::tcc_disabled_mask
+ * - 3.36.0 - Allow reading more status registers on si/cik
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 33
+#define KMS_DRIVER_MINOR 36
#define KMS_DRIVER_PATCHLEVEL 0
-#define AMDGPU_MAX_TIMEOUT_PARAM_LENTH 256
-
int amdgpu_vram_limit = 0;
int amdgpu_vis_vram_limit = 0;
int amdgpu_gart_size = -1; /* auto */
@@ -98,7 +102,7 @@ int amdgpu_disp_priority = 0;
int amdgpu_hw_i2c = 0;
int amdgpu_pcie_gen2 = -1;
int amdgpu_msi = -1;
-char amdgpu_lockup_timeout[AMDGPU_MAX_TIMEOUT_PARAM_LENTH];
+char amdgpu_lockup_timeout[AMDGPU_MAX_TIMEOUT_PARAM_LENGTH];
int amdgpu_dpm = -1;
int amdgpu_fw_load_type = -1;
int amdgpu_aspm = -1;
@@ -125,11 +129,7 @@ char *amdgpu_disable_cu = NULL;
char *amdgpu_virtual_display = NULL;
/* OverDrive(bit 14) disabled by default*/
uint amdgpu_pp_feature_mask = 0xffffbfff;
-int amdgpu_ngg = 0;
-int amdgpu_prim_buf_per_se = 0;
-int amdgpu_pos_buf_per_se = 0;
-int amdgpu_cntl_sb_buf_per_se = 0;
-int amdgpu_param_buf_per_se = 0;
+uint amdgpu_force_long_training = 0;
int amdgpu_job_hang_limit = 0;
int amdgpu_lbpw = -1;
int amdgpu_compute_multipipe = -1;
@@ -143,12 +143,13 @@ int amdgpu_mcbp = 0;
int amdgpu_discovery = -1;
int amdgpu_mes = 0;
int amdgpu_noretry;
+int amdgpu_force_asic_type = -1;
struct amdgpu_mgpu_info mgpu_info = {
.mutex = __MUTEX_INITIALIZER(mgpu_info.mutex),
};
int amdgpu_ras_enable = -1;
-uint amdgpu_ras_mask = 0xfffffffb;
+uint amdgpu_ras_mask = 0xffffffff;
/**
* DOC: vramlimit (int)
@@ -241,16 +242,21 @@ module_param_named(msi, amdgpu_msi, int, 0444);
*
* The format can be [Non-Compute] or [GFX,Compute,SDMA,Video]. That is there can be one or
* multiple values specified. 0 and negative values are invalidated. They will be adjusted
- * to default timeout.
- * - With one value specified, the setting will apply to all non-compute jobs.
- * - With multiple values specified, the first one will be for GFX. The second one is for Compute.
- * And the third and fourth ones are for SDMA and Video.
+ * to the default timeout.
+ *
+ * - With one value specified, the setting will apply to all non-compute jobs.
+ * - With multiple values specified, the first one will be for GFX.
+ * The second one is for Compute. The third and fourth ones are
+ * for SDMA and Video.
+ *
* By default(with no lockup_timeout settings), the timeout for all non-compute(GFX, SDMA and Video)
* jobs is 10000. And there is no timeout enforced on compute jobs.
*/
-MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default: 10000 for non-compute jobs and infinity timeout for compute jobs."
+MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default: for bare metal 10000 for non-compute jobs and infinity timeout for compute jobs; "
+ "for passthrough or sriov, 10000 for all jobs."
" 0: keep default value. negative: infinity timeout), "
- "format is [Non-Compute] or [GFX,Compute,SDMA,Video]");
+ "format: for bare metal [Non-Compute] or [GFX,Compute,SDMA,Video]; "
+ "for passthrough or sriov [all jobs] or [GFX,Compute,SDMA,Video].");
module_param_string(lockup_timeout, amdgpu_lockup_timeout, sizeof(amdgpu_lockup_timeout), 0444);
/**
@@ -389,6 +395,14 @@ MODULE_PARM_DESC(ppfeaturemask, "all power features enabled (default))");
module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, uint, 0444);
/**
+ * DOC: forcelongtraining (uint)
+ * Force long memory training in resume.
+ * The default is zero, indicates short training in resume.
+ */
+MODULE_PARM_DESC(forcelongtraining, "force memory long training");
+module_param_named(forcelongtraining, amdgpu_force_long_training, uint, 0444);
+
+/**
* DOC: pcie_gen_cap (uint)
* Override PCIE gen speed capabilities. See the CAIL flags in drivers/gpu/drm/amd/include/amd_pcie.h.
* The default is 0 (automatic for each asic).
@@ -446,42 +460,6 @@ MODULE_PARM_DESC(virtual_display,
module_param_named(virtual_display, amdgpu_virtual_display, charp, 0444);
/**
- * DOC: ngg (int)
- * Set to enable Next Generation Graphics (1 = enable). The default is 0 (disabled).
- */
-MODULE_PARM_DESC(ngg, "Next Generation Graphics (1 = enable, 0 = disable(default depending on gfx))");
-module_param_named(ngg, amdgpu_ngg, int, 0444);
-
-/**
- * DOC: prim_buf_per_se (int)
- * Override the size of Primitive Buffer per Shader Engine in Byte. The default is 0 (depending on gfx).
- */
-MODULE_PARM_DESC(prim_buf_per_se, "the size of Primitive Buffer per Shader Engine (default depending on gfx)");
-module_param_named(prim_buf_per_se, amdgpu_prim_buf_per_se, int, 0444);
-
-/**
- * DOC: pos_buf_per_se (int)
- * Override the size of Position Buffer per Shader Engine in Byte. The default is 0 (depending on gfx).
- */
-MODULE_PARM_DESC(pos_buf_per_se, "the size of Position Buffer per Shader Engine (default depending on gfx)");
-module_param_named(pos_buf_per_se, amdgpu_pos_buf_per_se, int, 0444);
-
-/**
- * DOC: cntl_sb_buf_per_se (int)
- * Override the size of Control Sideband per Shader Engine in Byte. The default is 0 (depending on gfx).
- */
-MODULE_PARM_DESC(cntl_sb_buf_per_se, "the size of Control Sideband per Shader Engine (default depending on gfx)");
-module_param_named(cntl_sb_buf_per_se, amdgpu_cntl_sb_buf_per_se, int, 0444);
-
-/**
- * DOC: param_buf_per_se (int)
- * Override the size of Off-Chip Parameter Cache per Shader Engine in Byte.
- * The default is 0 (depending on gfx).
- */
-MODULE_PARM_DESC(param_buf_per_se, "the size of Off-Chip Parameter Cache per Shader Engine (default depending on gfx)");
-module_param_named(param_buf_per_se, amdgpu_param_buf_per_se, int, 0444);
-
-/**
* DOC: job_hang_limit (int)
* Set how much time allow a job hang and not drop it. The default is 0.
*/
@@ -613,6 +591,16 @@ MODULE_PARM_DESC(noretry,
"Disable retry faults (0 = retry enabled (default), 1 = retry disabled)");
module_param_named(noretry, amdgpu_noretry, int, 0644);
+/**
+ * DOC: force_asic_type (int)
+ * A non negative value used to specify the asic type for all supported GPUs.
+ */
+MODULE_PARM_DESC(force_asic_type,
+ "A non negative value used to specify the asic type for all supported GPUs");
+module_param_named(force_asic_type, amdgpu_force_asic_type, int, 0444);
+
+
+
#ifdef CONFIG_HSA_AMD
/**
* DOC: sched_policy (int)
@@ -996,6 +984,11 @@ static const struct pci_device_id pciidlist[] = {
/* Raven */
{0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU},
{0x1002, 0x15d8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU},
+ /* Arcturus */
+ {0x1002, 0x738C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS|AMD_EXP_HW_SUPPORT},
+ {0x1002, 0x7388, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS|AMD_EXP_HW_SUPPORT},
+ {0x1002, 0x738E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS|AMD_EXP_HW_SUPPORT},
+ {0x1002, 0x7390, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS|AMD_EXP_HW_SUPPORT},
/* Navi10 */
{0x1002, 0x7310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
{0x1002, 0x7312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
@@ -1004,6 +997,18 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x731A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
{0x1002, 0x731B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
{0x1002, 0x731F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
+ /* Navi14 */
+ {0x1002, 0x7340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
+ {0x1002, 0x7341, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
+ {0x1002, 0x7347, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
+ {0x1002, 0x734F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
+
+ /* Renoir */
+ {0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
+
+ /* Navi12 */
+ {0x1002, 0x7360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12|AMD_EXP_HW_SUPPORT},
+ {0x1002, 0x7362, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12|AMD_EXP_HW_SUPPORT},
{0, 0, 0}
};
@@ -1030,8 +1035,43 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
return -ENODEV;
}
+#ifdef CONFIG_DRM_AMDGPU_SI
+ if (!amdgpu_si_support) {
+ switch (flags & AMD_ASIC_MASK) {
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_VERDE:
+ case CHIP_OLAND:
+ case CHIP_HAINAN:
+ dev_info(&pdev->dev,
+ "SI support provided by radeon.\n");
+ dev_info(&pdev->dev,
+ "Use radeon.si_support=0 amdgpu.si_support=1 to override.\n"
+ );
+ return -ENODEV;
+ }
+ }
+#endif
+#ifdef CONFIG_DRM_AMDGPU_CIK
+ if (!amdgpu_cik_support) {
+ switch (flags & AMD_ASIC_MASK) {
+ case CHIP_KAVERI:
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ dev_info(&pdev->dev,
+ "CIK support provided by radeon.\n");
+ dev_info(&pdev->dev,
+ "Use radeon.cik_support=0 amdgpu.cik_support=1 to override.\n"
+ );
+ return -ENODEV;
+ }
+ }
+#endif
+
/* Get rid of things like offb */
- ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "amdgpudrmfb");
+ ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "amdgpudrmfb");
if (ret)
return ret;
@@ -1074,7 +1114,10 @@ amdgpu_pci_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
- DRM_ERROR("Device removal is currently not supported outside of fbcon\n");
+#ifdef MODULE
+ if (THIS_MODULE->state != MODULE_STATE_GOING)
+#endif
+ DRM_ERROR("Hotplug removal is not supported\n");
drm_dev_unplug(dev);
drm_dev_put(dev);
pci_disable_device(pdev);
@@ -1087,92 +1130,117 @@ amdgpu_pci_shutdown(struct pci_dev *pdev)
struct drm_device *dev = pci_get_drvdata(pdev);
struct amdgpu_device *adev = dev->dev_private;
+ if (amdgpu_ras_intr_triggered())
+ return;
+
/* if we are running in a VM, make sure the device
* torn down properly on reboot/shutdown.
* unfortunately we can't detect certain
* hypervisors so just do this all the time.
*/
+ adev->mp1_state = PP_MP1_STATE_UNLOAD;
amdgpu_device_ip_suspend(adev);
+ adev->mp1_state = PP_MP1_STATE_NONE;
}
static int amdgpu_pmops_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
- return amdgpu_device_suspend(drm_dev, true, true);
+ return amdgpu_device_suspend(drm_dev, true);
}
static int amdgpu_pmops_resume(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
/* GPU comes up enabled by the bios on resume */
- if (amdgpu_device_is_px(drm_dev)) {
+ if (amdgpu_device_supports_boco(drm_dev) ||
+ amdgpu_device_supports_baco(drm_dev)) {
pm_runtime_disable(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
}
- return amdgpu_device_resume(drm_dev, true, true);
+ return amdgpu_device_resume(drm_dev, true);
}
static int amdgpu_pmops_freeze(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_dev->dev_private;
+ int r;
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
- return amdgpu_device_suspend(drm_dev, false, true);
+ r = amdgpu_device_suspend(drm_dev, true);
+ if (r)
+ return r;
+ return amdgpu_asic_reset(adev);
}
static int amdgpu_pmops_thaw(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
- return amdgpu_device_resume(drm_dev, false, true);
+ return amdgpu_device_resume(drm_dev, true);
}
static int amdgpu_pmops_poweroff(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
- return amdgpu_device_suspend(drm_dev, true, true);
+ return amdgpu_device_suspend(drm_dev, true);
}
static int amdgpu_pmops_restore(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
- return amdgpu_device_resume(drm_dev, false, true);
+ return amdgpu_device_resume(drm_dev, true);
}
static int amdgpu_pmops_runtime_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
- int ret;
+ struct amdgpu_device *adev = drm_dev->dev_private;
+ int ret, i;
- if (!amdgpu_device_is_px(drm_dev)) {
+ if (!adev->runpm) {
pm_runtime_forbid(dev);
return -EBUSY;
}
- drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
+ /* wait for all rings to drain before suspending */
+ for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+ struct amdgpu_ring *ring = adev->rings[i];
+ if (ring && ring->sched.ready) {
+ ret = amdgpu_fence_wait_empty(ring);
+ if (ret)
+ return -EBUSY;
+ }
+ }
+
+ if (amdgpu_device_supports_boco(drm_dev))
+ drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
drm_kms_helper_poll_disable(drm_dev);
- ret = amdgpu_device_suspend(drm_dev, false, false);
- pci_save_state(pdev);
- pci_disable_device(pdev);
- pci_ignore_hotplug(pdev);
- if (amdgpu_is_atpx_hybrid())
- pci_set_power_state(pdev, PCI_D3cold);
- else if (!amdgpu_has_atpx_dgpu_power_cntl())
- pci_set_power_state(pdev, PCI_D3hot);
- drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
+ ret = amdgpu_device_suspend(drm_dev, false);
+ if (amdgpu_device_supports_boco(drm_dev)) {
+ /* Only need to handle PCI state in the driver for ATPX
+ * PCI core handles it for _PR3.
+ */
+ if (amdgpu_is_atpx_hybrid()) {
+ pci_ignore_hotplug(pdev);
+ } else {
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_ignore_hotplug(pdev);
+ pci_set_power_state(pdev, PCI_D3cold);
+ }
+ drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
+ } else if (amdgpu_device_supports_baco(drm_dev)) {
+ amdgpu_device_baco_enter(drm_dev);
+ }
return 0;
}
@@ -1181,35 +1249,45 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct amdgpu_device *adev = drm_dev->dev_private;
int ret;
- if (!amdgpu_device_is_px(drm_dev))
+ if (!adev->runpm)
return -EINVAL;
- drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
+ if (amdgpu_device_supports_boco(drm_dev)) {
+ drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
- if (amdgpu_is_atpx_hybrid() ||
- !amdgpu_has_atpx_dgpu_power_cntl())
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
- ret = pci_enable_device(pdev);
- if (ret)
- return ret;
- pci_set_master(pdev);
-
- ret = amdgpu_device_resume(drm_dev, false, false);
+ /* Only need to handle PCI state in the driver for ATPX
+ * PCI core handles it for _PR3.
+ */
+ if (amdgpu_is_atpx_hybrid()) {
+ pci_set_master(pdev);
+ } else {
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+ pci_set_master(pdev);
+ }
+ } else if (amdgpu_device_supports_baco(drm_dev)) {
+ amdgpu_device_baco_exit(drm_dev);
+ }
+ ret = amdgpu_device_resume(drm_dev, false);
drm_kms_helper_poll_enable(drm_dev);
- drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
+ if (amdgpu_device_supports_boco(drm_dev))
+ drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
return 0;
}
static int amdgpu_pmops_runtime_idle(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_dev->dev_private;
struct drm_crtc *crtc;
- if (!amdgpu_device_is_px(drm_dev)) {
+ if (!adev->runpm) {
pm_runtime_forbid(dev);
return -EBUSY;
}
@@ -1299,66 +1377,6 @@ int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv)
return 0;
}
-int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
-{
- char *input = amdgpu_lockup_timeout;
- char *timeout_setting = NULL;
- int index = 0;
- long timeout;
- int ret = 0;
-
- /*
- * By default timeout for non compute jobs is 10000.
- * And there is no timeout enforced on compute jobs.
- */
- adev->gfx_timeout = msecs_to_jiffies(10000);
- adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
- adev->compute_timeout = MAX_SCHEDULE_TIMEOUT;
-
- if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENTH)) {
- while ((timeout_setting = strsep(&input, ",")) &&
- strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENTH)) {
- ret = kstrtol(timeout_setting, 0, &timeout);
- if (ret)
- return ret;
-
- if (timeout == 0) {
- index++;
- continue;
- } else if (timeout < 0) {
- timeout = MAX_SCHEDULE_TIMEOUT;
- } else {
- timeout = msecs_to_jiffies(timeout);
- }
-
- switch (index++) {
- case 0:
- adev->gfx_timeout = timeout;
- break;
- case 1:
- adev->compute_timeout = timeout;
- break;
- case 2:
- adev->sdma_timeout = timeout;
- break;
- case 3:
- adev->video_timeout = timeout;
- break;
- default:
- break;
- }
- }
- /*
- * There is only one value specified and
- * it should apply to all non-compute jobs.
- */
- if (index == 1)
- adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
- }
-
- return ret;
-}
-
static bool
amdgpu_get_crtc_scanout_position(struct drm_device *dev, unsigned int pipe,
bool in_vblank_irq, int *vpos, int *hpos,
@@ -1373,7 +1391,8 @@ static struct drm_driver kms_driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_ATOMIC |
DRIVER_GEM |
- DRIVER_PRIME | DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ,
+ DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ |
+ DRIVER_SYNCOBJ_TIMELINE,
.load = amdgpu_driver_load_kms,
.open = amdgpu_driver_open_kms,
.postclose = amdgpu_driver_postclose_kms,
@@ -1397,9 +1416,6 @@ static struct drm_driver kms_driver = {
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = amdgpu_gem_prime_export,
.gem_prime_import = amdgpu_gem_prime_import,
- .gem_prime_res_obj = amdgpu_gem_prime_res_obj,
- .gem_prime_get_sg_table = amdgpu_gem_prime_get_sg_table,
- .gem_prime_import_sg_table = amdgpu_gem_prime_import_sg_table,
.gem_prime_vmap = amdgpu_gem_prime_vmap,
.gem_prime_vunmap = amdgpu_gem_prime_vunmap,
.gem_prime_mmap = amdgpu_gem_prime_mmap,
@@ -1464,6 +1480,7 @@ static void __exit amdgpu_exit(void)
amdgpu_unregister_atpx_handler();
amdgpu_sync_fini();
amdgpu_fence_slab_fini();
+ mmu_notifier_synchronize();
}
module_init(amdgpu_init);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
index 571a6dfb473e..61fcf247a638 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
@@ -37,12 +37,14 @@ amdgpu_link_encoder_connector(struct drm_device *dev)
{
struct amdgpu_device *adev = dev->dev_private;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector;
struct drm_encoder *encoder;
struct amdgpu_encoder *amdgpu_encoder;
+ drm_connector_list_iter_begin(dev, &iter);
/* walk the list and link encoders to connectors */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_for_each_connector_iter(connector, &iter) {
amdgpu_connector = to_amdgpu_connector(connector);
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
amdgpu_encoder = to_amdgpu_encoder(encoder);
@@ -55,6 +57,7 @@ amdgpu_link_encoder_connector(struct drm_device *dev)
}
}
}
+ drm_connector_list_iter_end(&iter);
}
void amdgpu_encoder_set_active_device(struct drm_encoder *encoder)
@@ -62,8 +65,10 @@ void amdgpu_encoder_set_active_device(struct drm_encoder *encoder)
struct drm_device *dev = encoder->dev;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
amdgpu_encoder->active_device = amdgpu_encoder->devices & amdgpu_connector->devices;
@@ -72,6 +77,7 @@ void amdgpu_encoder_set_active_device(struct drm_encoder *encoder)
amdgpu_connector->devices, encoder->encoder_type);
}
}
+ drm_connector_list_iter_end(&iter);
}
struct drm_connector *
@@ -79,15 +85,20 @@ amdgpu_get_connector_for_encoder(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct drm_connector *connector;
+ struct drm_connector *connector, *found = NULL;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
amdgpu_connector = to_amdgpu_connector(connector);
- if (amdgpu_encoder->active_device & amdgpu_connector->devices)
- return connector;
+ if (amdgpu_encoder->active_device & amdgpu_connector->devices) {
+ found = connector;
+ break;
+ }
}
- return NULL;
+ drm_connector_list_iter_end(&iter);
+ return found;
}
struct drm_connector *
@@ -95,15 +106,20 @@ amdgpu_get_connector_for_encoder_init(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct drm_connector *connector;
+ struct drm_connector *connector, *found = NULL;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
amdgpu_connector = to_amdgpu_connector(connector);
- if (amdgpu_encoder->devices & amdgpu_connector->devices)
- return connector;
+ if (amdgpu_encoder->devices & amdgpu_connector->devices) {
+ found = connector;
+ break;
+ }
}
- return NULL;
+ drm_connector_list_iter_end(&iter);
+ return found;
}
struct drm_encoder *amdgpu_get_external_encoder(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index eb3569b46c1e..2672dc64a310 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -69,7 +69,7 @@ amdgpufb_release(struct fb_info *info, int user)
return 0;
}
-static struct fb_ops amdgpufb_ops = {
+static const struct fb_ops amdgpufb_ops = {
.owner = THIS_MODULE,
DRM_FB_HELPER_DEFAULT_OPS,
.fb_open = amdgpufb_open,
@@ -131,6 +131,10 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
int aligned_size, size;
int height = mode_cmd->height;
u32 cpp;
+ u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
+ AMDGPU_GEM_CREATE_VRAM_CLEARED |
+ AMDGPU_GEM_CREATE_CPU_GTT_USWC;
info = drm_get_format_info(adev->ddev, mode_cmd);
cpp = info->cpp[0];
@@ -138,15 +142,11 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
/* need to align pitch with crtc limits */
mode_cmd->pitches[0] = amdgpu_align_pitch(adev, mode_cmd->width, cpp,
fb_tiled);
- domain = amdgpu_display_supported_domains(adev);
-
+ domain = amdgpu_display_supported_domains(adev, flags);
height = ALIGN(mode_cmd->height, 8);
size = mode_cmd->pitches[0] * height;
aligned_size = ALIGN(size, PAGE_SIZE);
- ret = amdgpu_gem_object_create(adev, aligned_size, 0, domain,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
- AMDGPU_GEM_CREATE_VRAM_CLEARED,
+ ret = amdgpu_gem_object_create(adev, aligned_size, 0, domain, flags,
ttm_bo_type_kernel, NULL, &gobj);
if (ret) {
pr_err("failed to allocate framebuffer (%d)\n", aligned_size);
@@ -168,7 +168,6 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
dev_err(adev->dev, "FB failed to set tiling flags\n");
}
-
ret = amdgpu_bo_pin(abo, domain);
if (ret) {
amdgpu_bo_unreserve(abo);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 23085b352cf2..3c01252b1e0e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -34,6 +34,7 @@
#include <linux/kref.h>
#include <linux/slab.h>
#include <linux/firmware.h>
+#include <linux/pm_runtime.h>
#include <drm/drm_debugfs.h>
@@ -154,7 +155,7 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
seq);
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
seq, flags | AMDGPU_FENCE_FLAG_INT);
-
+ pm_runtime_get_noresume(adev->ddev->dev);
ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
if (unlikely(rcu_dereference_protected(*ptr, 1))) {
struct dma_fence *old;
@@ -234,6 +235,7 @@ static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
bool amdgpu_fence_process(struct amdgpu_ring *ring)
{
struct amdgpu_fence_driver *drv = &ring->fence_drv;
+ struct amdgpu_device *adev = ring->adev;
uint32_t seq, last_seq;
int r;
@@ -274,6 +276,8 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring)
BUG();
dma_fence_put(fence);
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
} while (last_seq != seq);
return true;
@@ -462,18 +466,7 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
timeout = adev->gfx_timeout;
break;
case AMDGPU_RING_TYPE_COMPUTE:
- /*
- * For non-sriov case, no timeout enforce
- * on compute ring by default. Unless user
- * specifies a timeout for compute ring.
- *
- * For sriov case, always use the timeout
- * as gfx ring
- */
- if (!amdgpu_sriov_vf(ring->adev))
- timeout = adev->compute_timeout;
- else
- timeout = adev->gfx_timeout;
+ timeout = adev->compute_timeout;
break;
case AMDGPU_RING_TYPE_SDMA:
timeout = adev->sdma_timeout;
@@ -748,10 +741,18 @@ static int amdgpu_debugfs_gpu_recover(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct amdgpu_device *adev = dev->dev_private;
+ int r;
+
+ r = pm_runtime_get_sync(dev->dev);
+ if (r < 0)
+ return 0;
seq_printf(m, "gpu recover\n");
amdgpu_device_gpu_recover(adev, NULL);
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index d79ab1da9e07..e01e681d2a60 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -71,7 +71,7 @@
*/
static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev)
{
- struct page *dummy_page = adev->mman.bdev.glob->dummy_read_page;
+ struct page *dummy_page = ttm_bo_glob.dummy_read_page;
if (adev->dummy_page_addr)
return 0;
@@ -251,7 +251,9 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
}
mb();
amdgpu_asic_flush_hdp(adev, NULL);
- amdgpu_gmc_flush_gpu_tlb(adev, 0, 0);
+ for (i = 0; i < adev->num_vmhubs; i++)
+ amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0);
+
return 0;
}
@@ -300,6 +302,7 @@ int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
* @pages: number of pages to bind
* @pagelist: pages to bind
* @dma_addr: DMA addresses of pages
+ * @flags: page table entry flags
*
* Binds the requested pages to the gart page table
* (all asics).
@@ -310,9 +313,9 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
uint64_t flags)
{
#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
- unsigned i,t,p;
+ unsigned t,p;
#endif
- int r;
+ int r, i;
if (!adev->gart.ready) {
WARN(1, "trying to bind memory to uninitialized GART !\n");
@@ -336,7 +339,8 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
mb();
amdgpu_asic_flush_hdp(adev, NULL);
- amdgpu_gmc_flush_gpu_tlb(adev, 0, 0);
+ for (i = 0; i < adev->num_vmhubs; i++)
+ amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 939f8305511b..4277125a79ee 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -50,7 +50,7 @@ void amdgpu_gem_object_free(struct drm_gem_object *gobj)
int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
int alignment, u32 initial_domain,
u64 flags, enum ttm_bo_type type,
- struct reservation_object *resv,
+ struct dma_resv *resv,
struct drm_gem_object **obj)
{
struct amdgpu_bo *bo;
@@ -85,7 +85,7 @@ retry:
}
return r;
}
- *obj = &bo->gem_base;
+ *obj = &bo->tbo.base;
return 0;
}
@@ -134,7 +134,7 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj,
return -EPERM;
if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID &&
- abo->tbo.resv != vm->root.base.bo->tbo.resv)
+ abo->tbo.base.resv != vm->root.base.bo->tbo.base.resv)
return -EPERM;
r = amdgpu_bo_reserve(abo, false);
@@ -175,7 +175,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
- r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates, false);
+ r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
if (r) {
dev_err(adev->dev, "leaking bo va because "
"we fail to reserve bo (%d)\n", r);
@@ -215,7 +215,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
union drm_amdgpu_gem_create *args = data;
uint64_t flags = args->in.domain_flags;
uint64_t size = args->in.bo_size;
- struct reservation_object *resv = NULL;
+ struct dma_resv *resv = NULL;
struct drm_gem_object *gobj;
uint32_t handle;
int r;
@@ -252,7 +252,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
if (r)
return r;
- resv = vm->root.base.bo->tbo.resv;
+ resv = vm->root.base.bo->tbo.base.resv;
}
r = amdgpu_gem_object_create(adev, size, args->in.alignment,
@@ -291,6 +291,8 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
uint32_t handle;
int r;
+ args->addr = untagged_addr(args->addr);
+
if (offset_in_page(args->addr | args->size))
return -EINVAL;
@@ -433,7 +435,7 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
}
robj = gem_to_amdgpu_bo(gobj);
- ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true,
+ ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true,
timeout);
/* ret == 0 means not signaled,
@@ -525,13 +527,41 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
goto error;
}
- r = amdgpu_vm_update_directories(adev, vm);
+ r = amdgpu_vm_update_pdes(adev, vm, false);
error:
if (r && r != -ERESTARTSYS)
DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
}
+/**
+ * amdgpu_gem_va_map_flags - map GEM UAPI flags into hardware flags
+ *
+ * @adev: amdgpu_device pointer
+ * @flags: GEM UAPI flags
+ *
+ * Returns the GEM UAPI flags mapped into hardware for the ASIC.
+ */
+uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags)
+{
+ uint64_t pte_flag = 0;
+
+ if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
+ pte_flag |= AMDGPU_PTE_EXECUTABLE;
+ if (flags & AMDGPU_VM_PAGE_READABLE)
+ pte_flag |= AMDGPU_PTE_READABLE;
+ if (flags & AMDGPU_VM_PAGE_WRITEABLE)
+ pte_flag |= AMDGPU_PTE_WRITEABLE;
+ if (flags & AMDGPU_VM_PAGE_PRT)
+ pte_flag |= AMDGPU_PTE_PRT;
+
+ if (adev->gmc.gmc_funcs->map_mtype)
+ pte_flag |= amdgpu_gmc_map_mtype(adev,
+ flags & AMDGPU_VM_MTYPE_MASK);
+
+ return pte_flag;
+}
+
int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
@@ -611,7 +641,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
- r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates, false);
+ r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
if (r)
goto error_unref;
@@ -629,7 +659,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
switch (args->operation) {
case AMDGPU_VA_OP_MAP:
- va_flags = amdgpu_gmc_get_pte_flags(adev, args->flags);
+ va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
args->offset_in_bo, args->map_size,
va_flags);
@@ -644,7 +674,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
args->map_size);
break;
case AMDGPU_VA_OP_REPLACE:
- va_flags = amdgpu_gmc_get_pte_flags(adev, args->flags);
+ va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
args->offset_in_bo, args->map_size,
va_flags);
@@ -689,7 +719,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
struct drm_amdgpu_gem_create_in info;
void __user *out = u64_to_user_ptr(args->value);
- info.bo_size = robj->gem_base.size;
+ info.bo_size = robj->tbo.base.size;
info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
info.domains = robj->preferred_domains;
info.domain_flags = robj->flags;
@@ -747,7 +777,8 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
struct amdgpu_device *adev = dev->dev_private;
struct drm_gem_object *gobj;
uint32_t handle;
- u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+ u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_CPU_GTT_USWC;
u32 domain;
int r;
@@ -764,7 +795,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
args->size = (u64)args->pitch * args->height;
args->size = ALIGN(args->size, PAGE_SIZE);
domain = amdgpu_bo_get_preferred_pin_domain(adev,
- amdgpu_display_supported_domains(adev));
+ amdgpu_display_supported_domains(adev, flags));
r = amdgpu_gem_object_create(adev, args->size, 0, domain, flags,
ttm_bo_type_device, NULL, &gobj);
if (r)
@@ -819,8 +850,8 @@ static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
if (pin_count)
seq_printf(m, " pin count %d", pin_count);
- dma_buf = READ_ONCE(bo->gem_base.dma_buf);
- attachment = READ_ONCE(bo->gem_base.import_attach);
+ dma_buf = READ_ONCE(bo->tbo.base.dma_buf);
+ attachment = READ_ONCE(bo->tbo.base.import_attach);
if (attachment)
seq_printf(m, " imported from %p", dma_buf);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
index b8ba6e27c61f..e0f025dd1b14 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
@@ -31,7 +31,7 @@
*/
#define AMDGPU_GEM_DOMAIN_MAX 0x3
-#define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, gem_base)
+#define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, tbo.base)
void amdgpu_gem_object_free(struct drm_gem_object *obj);
int amdgpu_gem_object_open(struct drm_gem_object *obj,
@@ -47,7 +47,7 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev);
int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
int alignment, u32 initial_domain,
u64 flags, enum ttm_bo_type type,
- struct reservation_object *resv,
+ struct dma_resv *resv,
struct drm_gem_object **obj);
int amdgpu_mode_dumb_create(struct drm_file *file_priv,
@@ -67,6 +67,7 @@ int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
+uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags);
int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 74066e1466f7..0f960b498792 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -26,6 +26,7 @@
#include "amdgpu.h"
#include "amdgpu_gfx.h"
#include "amdgpu_rlc.h"
+#include "amdgpu_ras.h"
/* delay 0.1 second to enable gfx off feature */
#define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100)
@@ -231,12 +232,10 @@ void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev)
{
- int i, queue, pipe, me;
+ int i, queue, me;
for (i = 0; i < AMDGPU_MAX_GFX_QUEUES; ++i) {
queue = i % adev->gfx.me.num_queue_per_pipe;
- pipe = (i / adev->gfx.me.num_queue_per_pipe)
- % adev->gfx.me.num_pipe_per_me;
me = (i / adev->gfx.me.num_queue_per_pipe)
/ adev->gfx.me.num_pipe_per_me;
@@ -297,7 +296,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
spin_lock_init(&kiq->ring_lock);
- r = amdgpu_device_wb_get(adev, &adev->virt.reg_val_offs);
+ r = amdgpu_device_wb_get(adev, &kiq->reg_val_offs);
if (r)
return r;
@@ -320,10 +319,9 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
return r;
}
-void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring,
- struct amdgpu_irq_src *irq)
+void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring)
{
- amdgpu_device_wb_free(ring->adev, ring->adev->virt.reg_val_offs);
+ amdgpu_device_wb_free(ring->adev, ring->adev->gfx.kiq.reg_val_offs);
amdgpu_ring_fini(ring);
}
@@ -389,7 +387,7 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
}
- if (adev->asic_type == CHIP_NAVI10 && amdgpu_async_gfx_ring) {
+ if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) {
/* create MQD for each KGQ */
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
ring = &adev->gfx.gfx_ring[i];
@@ -437,7 +435,7 @@ void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev)
struct amdgpu_ring *ring = NULL;
int i;
- if (adev->asic_type == CHIP_NAVI10 && amdgpu_async_gfx_ring) {
+ if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) {
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
ring = &adev->gfx.gfx_ring[i];
kfree(adev->gfx.me.mqd_backup[i]);
@@ -456,8 +454,6 @@ void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev)
}
ring = &adev->gfx.kiq.ring;
- if (adev->asic_type == CHIP_NAVI10 && amdgpu_async_gfx_ring)
- kfree(adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS]);
kfree(adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS]);
amdgpu_bo_free_kernel(&ring->mqd_obj,
&ring->mqd_gpu_addr,
@@ -547,12 +543,6 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
return;
- if (!is_support_sw_smu(adev) &&
- (!adev->powerplay.pp_funcs ||
- !adev->powerplay.pp_funcs->set_powergating_by_smu))
- return;
-
-
mutex_lock(&adev->gfx.gfx_off_mutex);
if (!enable)
@@ -569,3 +559,194 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
mutex_unlock(&adev->gfx.gfx_off_mutex);
}
+
+int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev)
+{
+ int r;
+ struct ras_fs_if fs_info = {
+ .sysfs_name = "gfx_err_count",
+ .debugfs_name = "gfx_err_inject",
+ };
+ struct ras_ih_if ih_info = {
+ .cb = amdgpu_gfx_process_ras_data_cb,
+ };
+
+ if (!adev->gfx.ras_if) {
+ adev->gfx.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
+ if (!adev->gfx.ras_if)
+ return -ENOMEM;
+ adev->gfx.ras_if->block = AMDGPU_RAS_BLOCK__GFX;
+ adev->gfx.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->gfx.ras_if->sub_block_index = 0;
+ strcpy(adev->gfx.ras_if->name, "gfx");
+ }
+ fs_info.head = ih_info.head = *adev->gfx.ras_if;
+
+ r = amdgpu_ras_late_init(adev, adev->gfx.ras_if,
+ &fs_info, &ih_info);
+ if (r)
+ goto free;
+
+ if (amdgpu_ras_is_supported(adev, adev->gfx.ras_if->block)) {
+ r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
+ if (r)
+ goto late_fini;
+ } else {
+ /* free gfx ras_if if ras is not supported */
+ r = 0;
+ goto free;
+ }
+
+ return 0;
+late_fini:
+ amdgpu_ras_late_fini(adev, adev->gfx.ras_if, &ih_info);
+free:
+ kfree(adev->gfx.ras_if);
+ adev->gfx.ras_if = NULL;
+ return r;
+}
+
+void amdgpu_gfx_ras_fini(struct amdgpu_device *adev)
+{
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX) &&
+ adev->gfx.ras_if) {
+ struct ras_common_if *ras_if = adev->gfx.ras_if;
+ struct ras_ih_if ih_info = {
+ .head = *ras_if,
+ .cb = amdgpu_gfx_process_ras_data_cb,
+ };
+
+ amdgpu_ras_late_fini(adev, ras_if, &ih_info);
+ kfree(ras_if);
+ }
+}
+
+int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
+ void *err_data,
+ struct amdgpu_iv_entry *entry)
+{
+ /* TODO ue will trigger an interrupt.
+ *
+ * When “Full RAS” is enabled, the per-IP interrupt sources should
+ * be disabled and the driver should only look for the aggregated
+ * interrupt via sync flood
+ */
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) {
+ kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
+ if (adev->gfx.funcs->query_ras_error_count)
+ adev->gfx.funcs->query_ras_error_count(adev, err_data);
+ amdgpu_ras_reset_gpu(adev);
+ }
+ return AMDGPU_RAS_SUCCESS;
+}
+
+int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ struct ras_common_if *ras_if = adev->gfx.ras_if;
+ struct ras_dispatch_if ih_data = {
+ .entry = entry,
+ };
+
+ if (!ras_if)
+ return 0;
+
+ ih_data.head = *ras_if;
+
+ DRM_ERROR("CP ECC ERROR IRQ\n");
+ amdgpu_ras_interrupt_dispatch(adev, &ih_data);
+ return 0;
+}
+
+uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
+{
+ signed long r, cnt = 0;
+ unsigned long flags;
+ uint32_t seq;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+ struct amdgpu_ring *ring = &kiq->ring;
+
+ BUG_ON(!ring->funcs->emit_rreg);
+
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+ amdgpu_ring_alloc(ring, 32);
+ amdgpu_ring_emit_rreg(ring, reg);
+ amdgpu_fence_emit_polling(ring, &seq);
+ amdgpu_ring_commit(ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+
+ /* don't wait anymore for gpu reset case because this way may
+ * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
+ * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
+ * never return if we keep waiting in virt_kiq_rreg, which cause
+ * gpu_recover() hang there.
+ *
+ * also don't wait anymore for IRQ context
+ * */
+ if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
+ goto failed_kiq_read;
+
+ might_sleep();
+ while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
+ msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+ }
+
+ if (cnt > MAX_KIQ_REG_TRY)
+ goto failed_kiq_read;
+
+ return adev->wb.wb[kiq->reg_val_offs];
+
+failed_kiq_read:
+ pr_err("failed to read reg:%x\n", reg);
+ return ~0;
+}
+
+void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
+{
+ signed long r, cnt = 0;
+ unsigned long flags;
+ uint32_t seq;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+ struct amdgpu_ring *ring = &kiq->ring;
+
+ BUG_ON(!ring->funcs->emit_wreg);
+
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+ amdgpu_ring_alloc(ring, 32);
+ amdgpu_ring_emit_wreg(ring, reg, v);
+ amdgpu_fence_emit_polling(ring, &seq);
+ amdgpu_ring_commit(ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+
+ /* don't wait anymore for gpu reset case because this way may
+ * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
+ * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
+ * never return if we keep waiting in virt_kiq_rreg, which cause
+ * gpu_recover() hang there.
+ *
+ * also don't wait anymore for IRQ context
+ * */
+ if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
+ goto failed_kiq_write;
+
+ might_sleep();
+ while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
+
+ msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+ }
+
+ if (cnt > MAX_KIQ_REG_TRY)
+ goto failed_kiq_write;
+
+ return;
+
+failed_kiq_write:
+ pr_err("failed to write reg:%x\n", reg);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 1199b5828b90..ca17ffb01301 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -76,11 +76,15 @@ struct kiq_pm4_funcs {
struct amdgpu_ring *ring,
u64 addr,
u64 seq);
+ void (*kiq_invalidate_tlbs)(struct amdgpu_ring *kiq_ring,
+ uint16_t pasid, uint32_t flush_type,
+ bool all_hub);
/* Packet sizes */
int set_resources_size;
int map_queues_size;
int unmap_queues_size;
int query_status_size;
+ int invalidate_tlbs_size;
};
struct amdgpu_kiq {
@@ -90,6 +94,7 @@ struct amdgpu_kiq {
struct amdgpu_ring ring;
struct amdgpu_irq_src irq;
const struct kiq_pm4_funcs *pmf;
+ uint32_t reg_val_offs;
};
/*
@@ -165,6 +170,7 @@ struct amdgpu_gfx_config {
uint32_t num_sc_per_sh;
uint32_t num_packer_per_sc;
uint32_t pa_sc_tile_steering_override;
+ uint64_t tcc_disabled_mask;
};
struct amdgpu_cu_info {
@@ -196,28 +202,8 @@ struct amdgpu_gfx_funcs {
uint32_t *dst);
void (*select_me_pipe_q)(struct amdgpu_device *adev, u32 me, u32 pipe,
u32 queue, u32 vmid);
-};
-
-struct amdgpu_ngg_buf {
- struct amdgpu_bo *bo;
- uint64_t gpu_addr;
- uint32_t size;
- uint32_t bo_size;
-};
-
-enum {
- NGG_PRIM = 0,
- NGG_POS,
- NGG_CNTL,
- NGG_PARAM,
- NGG_BUF_MAX
-};
-
-struct amdgpu_ngg {
- struct amdgpu_ngg_buf buf[NGG_BUF_MAX];
- uint32_t gds_reserve_addr;
- uint32_t gds_reserve_size;
- bool init;
+ int (*ras_error_inject)(struct amdgpu_device *adev, void *inject_if);
+ int (*query_ras_error_count) (struct amdgpu_device *adev, void *ras_error_status);
};
struct sq_work {
@@ -244,7 +230,7 @@ struct amdgpu_me {
uint32_t num_me;
uint32_t num_pipe_per_me;
uint32_t num_queue_per_pipe;
- void *mqd_backup[AMDGPU_MAX_GFX_RINGS + 1];
+ void *mqd_backup[AMDGPU_MAX_GFX_RINGS];
/* These are the resources for which amdgpu takes ownership */
DECLARE_BITMAP(queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
@@ -286,9 +272,14 @@ struct amdgpu_gfx {
uint32_t mec2_feature_version;
bool mec_fw_write_wait;
bool me_fw_write_wait;
+ bool cp_fw_write_wait;
struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS];
+ struct drm_gpu_scheduler *gfx_sched[AMDGPU_MAX_GFX_RINGS];
+ uint32_t num_gfx_sched;
unsigned num_gfx_rings;
struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS];
+ struct drm_gpu_scheduler *compute_sched[AMDGPU_MAX_COMPUTE_RINGS];
+ uint32_t num_compute_sched;
unsigned num_compute_rings;
struct amdgpu_irq_src eop_irq;
struct amdgpu_irq_src priv_reg_irq;
@@ -308,9 +299,6 @@ struct amdgpu_gfx {
uint32_t grbm_soft_reset;
uint32_t srbm_soft_reset;
- /* NGG */
- struct amdgpu_ngg ngg;
-
/* gfx off */
bool gfx_off_state; /* true: enabled, false: disabled */
struct mutex gfx_off_mutex;
@@ -352,8 +340,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
struct amdgpu_irq_src *irq);
-void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring,
- struct amdgpu_irq_src *irq);
+void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring);
void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev);
int amdgpu_gfx_kiq_init(struct amdgpu_device *adev,
@@ -381,5 +368,14 @@ void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit,
bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev, int me,
int pipe, int queue);
void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable);
-
+int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev);
+void amdgpu_gfx_ras_fini(struct amdgpu_device *adev);
+int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
+ void *err_data,
+ struct amdgpu_iv_entry *entry);
+int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry);
+uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
+void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 924d83e711ef..5884ab590486 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -27,6 +27,8 @@
#include <linux/io-64-nonatomic-lo-hi.h>
#include "amdgpu.h"
+#include "amdgpu_ras.h"
+#include "amdgpu_xgmi.h"
/**
* amdgpu_gmc_get_pde_for_bo - get the PDE for a BO
@@ -220,6 +222,14 @@ void amdgpu_gmc_agp_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
const uint64_t sixteen_gb_mask = ~(sixteen_gb - 1);
u64 size_af, size_bf;
+ if (amdgpu_sriov_vf(adev)) {
+ mc->agp_start = 0xffffffffffff;
+ mc->agp_end = 0x0;
+ mc->agp_size = 0;
+
+ return;
+ }
+
if (mc->fb_start > mc->gart_start) {
size_bf = (mc->fb_start & sixteen_gb_mask) -
ALIGN(mc->gart_end + 1, sixteen_gb);
@@ -297,3 +307,69 @@ bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr,
gmc->fault_hash[hash].idx = gmc->last_fault++;
return false;
}
+
+int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev)
+{
+ int r;
+
+ if (adev->umc.funcs && adev->umc.funcs->ras_late_init) {
+ r = adev->umc.funcs->ras_late_init(adev);
+ if (r)
+ return r;
+ }
+
+ if (adev->mmhub.funcs && adev->mmhub.funcs->ras_late_init) {
+ r = adev->mmhub.funcs->ras_late_init(adev);
+ if (r)
+ return r;
+ }
+
+ return amdgpu_xgmi_ras_late_init(adev);
+}
+
+void amdgpu_gmc_ras_fini(struct amdgpu_device *adev)
+{
+ amdgpu_umc_ras_fini(adev);
+ amdgpu_mmhub_ras_fini(adev);
+ amdgpu_xgmi_ras_fini(adev);
+}
+
+ /*
+ * The latest engine allocation on gfx9/10 is:
+ * Engine 2, 3: firmware
+ * Engine 0, 1, 4~16: amdgpu ring,
+ * subject to change when ring number changes
+ * Engine 17: Gart flushes
+ */
+#define GFXHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3
+#define MMHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3
+
+int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] =
+ {GFXHUB_FREE_VM_INV_ENGS_BITMAP, MMHUB_FREE_VM_INV_ENGS_BITMAP,
+ GFXHUB_FREE_VM_INV_ENGS_BITMAP};
+ unsigned i;
+ unsigned vmhub, inv_eng;
+
+ for (i = 0; i < adev->num_rings; ++i) {
+ ring = adev->rings[i];
+ vmhub = ring->funcs->vmhub;
+
+ inv_eng = ffs(vm_inv_engs[vmhub]);
+ if (!inv_eng) {
+ dev_err(adev->dev, "no VM inv eng for ring %s\n",
+ ring->name);
+ return -EINVAL;
+ }
+
+ ring->vm_inv_eng = inv_eng - 1;
+ vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng);
+
+ dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
+ ring->name, ring->vm_inv_eng, ring->funcs->vmhub);
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index 071145ac67b5..d3c27a3c43f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -77,6 +77,7 @@ struct amdgpu_gmc_fault {
struct amdgpu_vmhub {
uint32_t ctx0_ptb_addr_lo32;
uint32_t ctx0_ptb_addr_hi32;
+ uint32_t vm_inv_eng0_sem;
uint32_t vm_inv_eng0_req;
uint32_t vm_inv_eng0_ack;
uint32_t vm_context0_cntl;
@@ -89,8 +90,11 @@ struct amdgpu_vmhub {
*/
struct amdgpu_gmc_funcs {
/* flush the vm tlb via mmio */
- void (*flush_gpu_tlb)(struct amdgpu_device *adev,
- uint32_t vmid, uint32_t flush_type);
+ void (*flush_gpu_tlb)(struct amdgpu_device *adev, uint32_t vmid,
+ uint32_t vmhub, uint32_t flush_type);
+ /* flush the vm tlb via pasid */
+ int (*flush_gpu_tlb_pasid)(struct amdgpu_device *adev, uint16_t pasid,
+ uint32_t flush_type, bool all_hub);
/* flush the vm tlb via ring */
uint64_t (*emit_flush_gpu_tlb)(struct amdgpu_ring *ring, unsigned vmid,
uint64_t pd_addr);
@@ -99,12 +103,15 @@ struct amdgpu_gmc_funcs {
unsigned pasid);
/* enable/disable PRT support */
void (*set_prt)(struct amdgpu_device *adev, bool enable);
- /* set pte flags based per asic */
- uint64_t (*get_vm_pte_flags)(struct amdgpu_device *adev,
- uint32_t flags);
+ /* map mtype to hardware flags */
+ uint64_t (*map_mtype)(struct amdgpu_device *adev, uint32_t flags);
/* get the pde for a given mc addr */
void (*get_vm_pde)(struct amdgpu_device *adev, int level,
u64 *dst, u64 *flags);
+ /* get the pte flags to use for a BO VA mapping */
+ void (*get_vm_pte)(struct amdgpu_device *adev,
+ struct amdgpu_bo_va_mapping *mapping,
+ uint64_t *flags);
};
struct amdgpu_xgmi {
@@ -120,21 +127,52 @@ struct amdgpu_xgmi {
/* gpu list in the same hive */
struct list_head head;
bool supported;
+ struct ras_common_if *ras_if;
};
struct amdgpu_gmc {
+ /* FB's physical address in MMIO space (for CPU to
+ * map FB). This is different compared to the agp/
+ * gart/vram_start/end field as the later is from
+ * GPU's view and aper_base is from CPU's view.
+ */
resource_size_t aper_size;
resource_size_t aper_base;
/* for some chips with <= 32MB we need to lie
* about vram size near mc fb location */
u64 mc_vram_size;
u64 visible_vram_size;
+ /* AGP aperture start and end in MC address space
+ * Driver find a hole in the MC address space
+ * to place AGP by setting MC_VM_AGP_BOT/TOP registers
+ * Under VMID0, logical address == MC address. AGP
+ * aperture maps to physical bus or IOVA addressed.
+ * AGP aperture is used to simulate FB in ZFB case.
+ * AGP aperture is also used for page table in system
+ * memory (mainly for APU).
+ *
+ */
u64 agp_size;
u64 agp_start;
u64 agp_end;
+ /* GART aperture start and end in MC address space
+ * Driver find a hole in the MC address space
+ * to place GART by setting VM_CONTEXT0_PAGE_TABLE_START/END_ADDR
+ * registers
+ * Under VMID0, logical address inside GART aperture will
+ * be translated through gpuvm gart page table to access
+ * paged system memory
+ */
u64 gart_size;
u64 gart_start;
u64 gart_end;
+ /* Frame buffer aperture of this GPU device. Different from
+ * fb_start (see below), this only covers the local GPU device.
+ * Driver get fb_start from MC_VM_FB_LOCATION_BASE (set by vbios)
+ * and calculate vram_start of this local device by adding an
+ * offset inside the XGMI hive.
+ * Under VMID0, logical address == MC address
+ */
u64 vram_start;
u64 vram_end;
/* FB region , it's same as local vram region in single GPU, in XGMI
@@ -153,6 +191,7 @@ struct amdgpu_gmc {
uint32_t fw_version;
struct amdgpu_irq_src vm_fault;
uint32_t vram_type;
+ uint8_t vram_vendor;
uint32_t srbm_soft_reset;
bool prt_warning;
uint64_t stolen_size;
@@ -177,14 +216,17 @@ struct amdgpu_gmc {
struct amdgpu_xgmi xgmi;
struct amdgpu_irq_src ecc_irq;
- struct ras_common_if *ras_if;
};
-#define amdgpu_gmc_flush_gpu_tlb(adev, vmid, type) (adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid), (type))
+#define amdgpu_gmc_flush_gpu_tlb(adev, vmid, vmhub, type) ((adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid), (vmhub), (type)))
+#define amdgpu_gmc_flush_gpu_tlb_pasid(adev, pasid, type, allhub) \
+ ((adev)->gmc.gmc_funcs->flush_gpu_tlb_pasid \
+ ((adev), (pasid), (type), (allhub)))
#define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr))
#define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid))
+#define amdgpu_gmc_map_mtype(adev, flags) (adev)->gmc.gmc_funcs->map_mtype((adev),(flags))
#define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags))
-#define amdgpu_gmc_get_pte_flags(adev, flags) (adev)->gmc.gmc_funcs->get_vm_pte_flags((adev),(flags))
+#define amdgpu_gmc_get_vm_pte(adev, mapping, flags) (adev)->gmc.gmc_funcs->get_vm_pte((adev), (mapping), (flags))
/**
* amdgpu_gmc_vram_full_visible - Check if full VRAM is visible through the BAR
@@ -229,5 +271,8 @@ void amdgpu_gmc_agp_location(struct amdgpu_device *adev,
struct amdgpu_gmc *mc);
bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr,
uint16_t pasid, uint64_t timestamp);
+int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev);
+void amdgpu_gmc_ras_fini(struct amdgpu_device *adev);
+int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 7850084a05e3..60655834d649 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -143,7 +143,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
/* ring tests don't use a job */
if (job) {
vm = job->vm;
- fence_ctx = job->base.s_fence->scheduled.context;
+ fence_ctx = job->base.s_fence ?
+ job->base.s_fence->scheduled.context : 0;
} else {
vm = NULL;
fence_ctx = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index 57b3d8a9bef3..3a67f6c046d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -104,7 +104,7 @@ static void amdgpu_pasid_free_cb(struct dma_fence *fence,
*
* Free the pasid only after all the fences in resv are signaled.
*/
-void amdgpu_pasid_free_delayed(struct reservation_object *resv,
+void amdgpu_pasid_free_delayed(struct dma_resv *resv,
unsigned int pasid)
{
struct dma_fence *fence, **fences;
@@ -112,7 +112,7 @@ void amdgpu_pasid_free_delayed(struct reservation_object *resv,
unsigned count;
int r;
- r = reservation_object_get_fences_rcu(resv, NULL, &count, &fences);
+ r = dma_resv_get_fences_rcu(resv, NULL, &count, &fences);
if (r)
goto fallback;
@@ -156,7 +156,7 @@ fallback:
/* Not enough memory for the delayed delete, as last resort
* block for all the fences to complete.
*/
- reservation_object_wait_timeout_rcu(resv, true, false,
+ dma_resv_wait_timeout_rcu(resv, true, false,
MAX_SCHEDULE_TIMEOUT);
amdgpu_pasid_free(pasid);
}
@@ -206,7 +206,7 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
int r;
if (ring->vmid_wait && !dma_fence_is_signaled(ring->vmid_wait))
- return amdgpu_sync_fence(adev, sync, ring->vmid_wait, false);
+ return amdgpu_sync_fence(sync, ring->vmid_wait, false);
fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL);
if (!fences)
@@ -241,7 +241,7 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
return -ENOMEM;
}
- r = amdgpu_sync_fence(adev, sync, &array->base, false);
+ r = amdgpu_sync_fence(sync, &array->base, false);
dma_fence_put(ring->vmid_wait);
ring->vmid_wait = &array->base;
return r;
@@ -282,7 +282,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
!dma_fence_is_later(updates, (*id)->flushed_updates))
updates = NULL;
- if ((*id)->owner != vm->entity.fence_context ||
+ if ((*id)->owner != vm->direct.fence_context ||
job->vm_pd_addr != (*id)->pd_gpu_addr ||
updates || !(*id)->last_flush ||
((*id)->last_flush->context != fence_context &&
@@ -294,7 +294,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
if (tmp) {
*id = NULL;
- r = amdgpu_sync_fence(adev, sync, tmp, false);
+ r = amdgpu_sync_fence(sync, tmp, false);
return r;
}
needs_flush = true;
@@ -303,7 +303,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
/* Good we can use this VMID. Remember this submission as
* user of the VMID.
*/
- r = amdgpu_sync_fence(ring->adev, &(*id)->active, fence, false);
+ r = amdgpu_sync_fence(&(*id)->active, fence, false);
if (r)
return r;
@@ -349,7 +349,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
struct dma_fence *flushed;
/* Check all the prerequisites to using this VMID */
- if ((*id)->owner != vm->entity.fence_context)
+ if ((*id)->owner != vm->direct.fence_context)
continue;
if ((*id)->pd_gpu_addr != job->vm_pd_addr)
@@ -368,13 +368,14 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
* are broken on Navi10 and Navi14.
*/
if (needs_flush && (adev->asic_type < CHIP_VEGA10 ||
- adev->asic_type == CHIP_NAVI10))
+ adev->asic_type == CHIP_NAVI10 ||
+ adev->asic_type == CHIP_NAVI14))
continue;
/* Good, we can use this VMID. Remember this submission as
* user of the VMID.
*/
- r = amdgpu_sync_fence(ring->adev, &(*id)->active, fence, false);
+ r = amdgpu_sync_fence(&(*id)->active, fence, false);
if (r)
return r;
@@ -434,8 +435,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
id = idle;
/* Remember this submission as user of the VMID */
- r = amdgpu_sync_fence(ring->adev, &id->active,
- fence, false);
+ r = amdgpu_sync_fence(&id->active, fence, false);
if (r)
goto error;
@@ -448,7 +448,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
}
id->pd_gpu_addr = job->vm_pd_addr;
- id->owner = vm->entity.fence_context;
+ id->owner = vm->direct.fence_context;
if (job->vm_needs_flush) {
dma_fence_put(id->last_flush);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
index 7625419f0fc2..8e58325bbca2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
@@ -72,7 +72,7 @@ struct amdgpu_vmid_mgr {
int amdgpu_pasid_alloc(unsigned int bits);
void amdgpu_pasid_free(unsigned int pasid);
-void amdgpu_pasid_free_delayed(struct reservation_object *resv,
+void amdgpu_pasid_free_delayed(struct dma_resv *resv,
unsigned int pasid);
bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index 6d8f05511aba..111a301ce878 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -66,7 +66,6 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
if (ih->ring == NULL)
return -ENOMEM;
- memset((void *)ih->ring, 0, ih->ring_size + 8);
ih->gpu_addr = dma_addr;
ih->wptr_addr = dma_addr + ih->ring_size;
ih->wptr_cpu = &ih->ring[ih->ring_size / 4];
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 2a3f5ec298db..5ed4227f304b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -55,6 +55,7 @@
#include "amdgpu_connectors.h"
#include "amdgpu_trace.h"
#include "amdgpu_amdkfd.h"
+#include "amdgpu_ras.h"
#include <linux/pm_runtime.h>
@@ -87,10 +88,13 @@ static void amdgpu_hotplug_work_func(struct work_struct *work)
struct drm_device *dev = adev->ddev;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
mutex_lock(&mode_config->mutex);
- list_for_each_entry(connector, &mode_config->connector_list, head)
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter)
amdgpu_connector_hotplug(connector);
+ drm_connector_list_iter_end(&iter);
mutex_unlock(&mode_config->mutex);
/* Just fire off a uevent and let userspace tell us what to do */
drm_helper_hpd_irq_event(dev);
@@ -153,6 +157,22 @@ irqreturn_t amdgpu_irq_handler(int irq, void *arg)
ret = amdgpu_ih_process(adev, &adev->irq.ih);
if (ret == IRQ_HANDLED)
pm_runtime_mark_last_busy(dev->dev);
+
+ /* For the hardware that cannot enable bif ring for both ras_controller_irq
+ * and ras_err_evnet_athub_irq ih cookies, the driver has to poll status
+ * register to check whether the interrupt is triggered or not, and properly
+ * ack the interrupt if it is there
+ */
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__PCIE_BIF)) {
+ if (adev->nbio.funcs &&
+ adev->nbio.funcs->handle_ras_controller_intr_no_bifring)
+ adev->nbio.funcs->handle_ras_controller_intr_no_bifring(adev);
+
+ if (adev->nbio.funcs &&
+ adev->nbio.funcs->handle_ras_err_event_athub_intr_no_bifring)
+ adev->nbio.funcs->handle_ras_err_event_athub_intr_no_bifring(adev);
+ }
+
return ret;
}
@@ -228,10 +248,19 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
adev->irq.msi_enabled = false;
if (amdgpu_msi_ok(adev)) {
- int ret = pci_enable_msi(adev->pdev);
- if (!ret) {
+ int nvec = pci_msix_vec_count(adev->pdev);
+ unsigned int flags;
+
+ if (nvec <= 0) {
+ flags = PCI_IRQ_MSI;
+ } else {
+ flags = PCI_IRQ_MSI | PCI_IRQ_MSIX;
+ }
+ /* we only need one vector */
+ nvec = pci_alloc_irq_vectors(adev->pdev, 1, 1, flags);
+ if (nvec > 0) {
adev->irq.msi_enabled = true;
- dev_dbg(adev->dev, "amdgpu: using MSI.\n");
+ dev_dbg(adev->dev, "amdgpu: using MSI/MSI-X.\n");
}
}
@@ -254,7 +283,8 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
INIT_WORK(&adev->irq.ih2_work, amdgpu_irq_handle_ih2);
adev->irq.installed = true;
- r = drm_irq_install(adev->ddev, adev->ddev->pdev->irq);
+ /* Use vector 0 for MSI-X */
+ r = drm_irq_install(adev->ddev, pci_irq_vector(adev->pdev, 0));
if (r) {
adev->irq.installed = false;
if (!amdgpu_device_has_dc_support(adev))
@@ -284,7 +314,7 @@ void amdgpu_irq_fini(struct amdgpu_device *adev)
drm_irq_uninstall(adev->ddev);
adev->irq.installed = false;
if (adev->irq.msi_enabled)
- pci_disable_msi(adev->pdev);
+ pci_free_irq_vectors(adev->pdev);
if (!amdgpu_device_has_dc_support(adev))
flush_work(&adev->hotplug_work);
}
@@ -369,7 +399,7 @@ int amdgpu_irq_add_id(struct amdgpu_device *adev,
* amdgpu_irq_dispatch - dispatch IRQ to IP blocks
*
* @adev: amdgpu device pointer
- * @entry: interrupt vector pointer
+ * @ih: interrupt ring instance
*
* Dispatches IRQ to IP blocks.
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 9d76e0923a5a..d42be880a236 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -153,7 +153,6 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
if (r)
return r;
- job->owner = owner;
*f = dma_fence_get(&job->base.s_fence->finished);
amdgpu_job_free_resources(job);
priority = job->base.s_priority;
@@ -193,8 +192,7 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
fence = amdgpu_sync_get_fence(&job->sync, &explicit);
if (fence && explicit) {
if (drm_sched_dependency_optimized(fence, s_entity)) {
- r = amdgpu_sync_fence(ring->adev, &job->sched_sync,
- fence, false);
+ r = amdgpu_sync_fence(&job->sched_sync, fence, false);
if (r)
DRM_ERROR("Error adding fence (%d)\n", r);
}
@@ -218,7 +216,7 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched);
struct dma_fence *fence = NULL, *finished;
struct amdgpu_job *job;
- int r;
+ int r = 0;
job = to_amdgpu_job(sched_job);
finished = &job->base.s_fence->finished;
@@ -243,9 +241,49 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
job->fence = dma_fence_get(fence);
amdgpu_job_free_resources(job);
+
+ fence = r ? ERR_PTR(r) : fence;
return fence;
}
+#define to_drm_sched_job(sched_job) \
+ container_of((sched_job), struct drm_sched_job, queue_node)
+
+void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
+{
+ struct drm_sched_job *s_job;
+ struct drm_sched_entity *s_entity = NULL;
+ int i;
+
+ /* Signal all jobs not yet scheduled */
+ for (i = DRM_SCHED_PRIORITY_MAX - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
+ struct drm_sched_rq *rq = &sched->sched_rq[i];
+
+ if (!rq)
+ continue;
+
+ spin_lock(&rq->lock);
+ list_for_each_entry(s_entity, &rq->entities, list) {
+ while ((s_job = to_drm_sched_job(spsc_queue_pop(&s_entity->job_queue)))) {
+ struct drm_sched_fence *s_fence = s_job->s_fence;
+
+ dma_fence_signal(&s_fence->scheduled);
+ dma_fence_set_error(&s_fence->finished, -EHWPOISON);
+ dma_fence_signal(&s_fence->finished);
+ }
+ }
+ spin_unlock(&rq->lock);
+ }
+
+ /* Signal all jobs already scheduled to HW */
+ list_for_each_entry(s_job, &sched->ring_mirror_list, node) {
+ struct drm_sched_fence *s_fence = s_job->s_fence;
+
+ dma_fence_set_error(&s_fence->finished, -EHWPOISON);
+ dma_fence_signal(&s_fence->finished);
+ }
+}
+
const struct drm_sched_backend_ops amdgpu_sched_ops = {
.dependency = amdgpu_job_dependency,
.run_job = amdgpu_job_run,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
index 51e62504c279..3f7b8433d179 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
@@ -49,7 +49,6 @@ struct amdgpu_job {
uint32_t preamble_status;
uint32_t preemption_status;
uint32_t num_ibs;
- void *owner;
bool vm_needs_flush;
uint64_t vm_pd_addr;
unsigned vmid;
@@ -76,4 +75,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
void *owner, struct dma_fence **f);
int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
struct dma_fence **fence);
+
+void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
new file mode 100644
index 000000000000..5727f00afc8e
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
@@ -0,0 +1,211 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_jpeg.h"
+#include "amdgpu_pm.h"
+#include "soc15d.h"
+#include "soc15_common.h"
+
+#define JPEG_IDLE_TIMEOUT msecs_to_jiffies(1000)
+
+static void amdgpu_jpeg_idle_work_handler(struct work_struct *work);
+
+int amdgpu_jpeg_sw_init(struct amdgpu_device *adev)
+{
+ INIT_DELAYED_WORK(&adev->jpeg.idle_work, amdgpu_jpeg_idle_work_handler);
+
+ return 0;
+}
+
+int amdgpu_jpeg_sw_fini(struct amdgpu_device *adev)
+{
+ int i;
+
+ cancel_delayed_work_sync(&adev->jpeg.idle_work);
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ if (adev->jpeg.harvest_config & (1 << i))
+ continue;
+
+ amdgpu_ring_fini(&adev->jpeg.inst[i].ring_dec);
+ }
+
+ return 0;
+}
+
+int amdgpu_jpeg_suspend(struct amdgpu_device *adev)
+{
+ cancel_delayed_work_sync(&adev->jpeg.idle_work);
+
+ return 0;
+}
+
+int amdgpu_jpeg_resume(struct amdgpu_device *adev)
+{
+ return 0;
+}
+
+static void amdgpu_jpeg_idle_work_handler(struct work_struct *work)
+{
+ struct amdgpu_device *adev =
+ container_of(work, struct amdgpu_device, jpeg.idle_work.work);
+ unsigned int fences = 0;
+ unsigned int i;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ if (adev->jpeg.harvest_config & (1 << i))
+ continue;
+
+ fences += amdgpu_fence_count_emitted(&adev->jpeg.inst[i].ring_dec);
+ }
+
+ if (fences == 0)
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_JPEG,
+ AMD_PG_STATE_GATE);
+ else
+ schedule_delayed_work(&adev->jpeg.idle_work, JPEG_IDLE_TIMEOUT);
+}
+
+void amdgpu_jpeg_ring_begin_use(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ bool set_clocks = !cancel_delayed_work_sync(&adev->jpeg.idle_work);
+
+ if (set_clocks)
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_JPEG,
+ AMD_PG_STATE_UNGATE);
+}
+
+void amdgpu_jpeg_ring_end_use(struct amdgpu_ring *ring)
+{
+ schedule_delayed_work(&ring->adev->jpeg.idle_work, JPEG_IDLE_TIMEOUT);
+}
+
+int amdgpu_jpeg_dec_ring_test_ring(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t tmp = 0;
+ unsigned i;
+ int r;
+
+ WREG32(adev->jpeg.inst[ring->me].external.jpeg_pitch, 0xCAFEDEAD);
+ r = amdgpu_ring_alloc(ring, 3);
+ if (r)
+ return r;
+
+ amdgpu_ring_write(ring, PACKET0(adev->jpeg.internal.jpeg_pitch, 0));
+ amdgpu_ring_write(ring, 0xDEADBEEF);
+ amdgpu_ring_commit(ring);
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ tmp = RREG32(adev->jpeg.inst[ring->me].external.jpeg_pitch);
+ if (tmp == 0xDEADBEEF)
+ break;
+ udelay(1);
+ }
+
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+
+ return r;
+}
+
+static int amdgpu_jpeg_dec_set_reg(struct amdgpu_ring *ring, uint32_t handle,
+ struct dma_fence **fence)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_job *job;
+ struct amdgpu_ib *ib;
+ struct dma_fence *f = NULL;
+ const unsigned ib_size_dw = 16;
+ int i, r;
+
+ r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+ if (r)
+ return r;
+
+ ib = &job->ibs[0];
+
+ ib->ptr[0] = PACKETJ(adev->jpeg.internal.jpeg_pitch, 0, 0, PACKETJ_TYPE0);
+ ib->ptr[1] = 0xDEADBEEF;
+ for (i = 2; i < 16; i += 2) {
+ ib->ptr[i] = PACKETJ(0, 0, 0, PACKETJ_TYPE6);
+ ib->ptr[i+1] = 0;
+ }
+ ib->length_dw = 16;
+
+ r = amdgpu_job_submit_direct(job, ring, &f);
+ if (r)
+ goto err;
+
+ if (fence)
+ *fence = dma_fence_get(f);
+ dma_fence_put(f);
+
+ return 0;
+
+err:
+ amdgpu_job_free(job);
+ return r;
+}
+
+int amdgpu_jpeg_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t tmp = 0;
+ unsigned i;
+ struct dma_fence *fence = NULL;
+ long r = 0;
+
+ r = amdgpu_jpeg_dec_set_reg(ring, 1, &fence);
+ if (r)
+ goto error;
+
+ r = dma_fence_wait_timeout(fence, false, timeout);
+ if (r == 0) {
+ r = -ETIMEDOUT;
+ goto error;
+ } else if (r < 0) {
+ goto error;
+ } else {
+ r = 0;
+ }
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ tmp = RREG32(adev->jpeg.inst[ring->me].external.jpeg_pitch);
+ if (tmp == 0xDEADBEEF)
+ break;
+ udelay(1);
+ }
+
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+
+ dma_fence_put(fence);
+error:
+ return r;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
new file mode 100644
index 000000000000..bd9ef9cc86de
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_JPEG_H__
+#define __AMDGPU_JPEG_H__
+
+#define AMDGPU_MAX_JPEG_INSTANCES 2
+
+#define AMDGPU_JPEG_HARVEST_JPEG0 (1 << 0)
+#define AMDGPU_JPEG_HARVEST_JPEG1 (1 << 1)
+
+struct amdgpu_jpeg_reg{
+ unsigned jpeg_pitch;
+};
+
+struct amdgpu_jpeg_inst {
+ struct amdgpu_ring ring_dec;
+ struct amdgpu_irq_src irq;
+ struct amdgpu_jpeg_reg external;
+};
+
+struct amdgpu_jpeg {
+ uint8_t num_jpeg_inst;
+ struct amdgpu_jpeg_inst inst[AMDGPU_MAX_JPEG_INSTANCES];
+ struct amdgpu_jpeg_reg internal;
+ struct drm_gpu_scheduler *jpeg_sched[AMDGPU_MAX_JPEG_INSTANCES];
+ uint32_t num_jpeg_sched;
+ unsigned harvest_config;
+ struct delayed_work idle_work;
+ enum amd_powergating_state cur_state;
+};
+
+int amdgpu_jpeg_sw_init(struct amdgpu_device *adev);
+int amdgpu_jpeg_sw_fini(struct amdgpu_device *adev);
+int amdgpu_jpeg_suspend(struct amdgpu_device *adev);
+int amdgpu_jpeg_resume(struct amdgpu_device *adev);
+
+void amdgpu_jpeg_ring_begin_use(struct amdgpu_ring *ring);
+void amdgpu_jpeg_ring_end_use(struct amdgpu_ring *ring);
+
+int amdgpu_jpeg_dec_ring_test_ring(struct amdgpu_ring *ring);
+int amdgpu_jpeg_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout);
+
+#endif /*__AMDGPU_JPEG_H__*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 0cf7e8606fd3..60591dbc2097 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -91,7 +91,7 @@ void amdgpu_driver_unload_kms(struct drm_device *dev)
if (amdgpu_sriov_vf(adev))
amdgpu_virt_request_full_gpu(adev, false);
- if (amdgpu_device_is_px(dev)) {
+ if (adev->runpm) {
pm_runtime_get_sync(dev->dev);
pm_runtime_forbid(dev->dev);
}
@@ -144,49 +144,13 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
struct amdgpu_device *adev;
int r, acpi_status;
-#ifdef CONFIG_DRM_AMDGPU_SI
- if (!amdgpu_si_support) {
- switch (flags & AMD_ASIC_MASK) {
- case CHIP_TAHITI:
- case CHIP_PITCAIRN:
- case CHIP_VERDE:
- case CHIP_OLAND:
- case CHIP_HAINAN:
- dev_info(dev->dev,
- "SI support provided by radeon.\n");
- dev_info(dev->dev,
- "Use radeon.si_support=0 amdgpu.si_support=1 to override.\n"
- );
- return -ENODEV;
- }
- }
-#endif
-#ifdef CONFIG_DRM_AMDGPU_CIK
- if (!amdgpu_cik_support) {
- switch (flags & AMD_ASIC_MASK) {
- case CHIP_KAVERI:
- case CHIP_BONAIRE:
- case CHIP_HAWAII:
- case CHIP_KABINI:
- case CHIP_MULLINS:
- dev_info(dev->dev,
- "CIK support provided by radeon.\n");
- dev_info(dev->dev,
- "Use radeon.cik_support=0 amdgpu.cik_support=1 to override.\n"
- );
- return -ENODEV;
- }
- }
-#endif
-
adev = kzalloc(sizeof(struct amdgpu_device), GFP_KERNEL);
if (adev == NULL) {
return -ENOMEM;
}
dev->dev_private = (void *)adev;
- if ((amdgpu_runtime_pm != 0) &&
- amdgpu_has_atpx() &&
+ if (amdgpu_has_atpx() &&
(amdgpu_is_atpx_hybrid() ||
amdgpu_has_atpx_dgpu_power_cntl()) &&
((flags & AMD_IS_APU) == 0) &&
@@ -205,6 +169,13 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
goto out;
}
+ if (amdgpu_device_supports_boco(dev) &&
+ (amdgpu_runtime_pm != 0)) /* enable runpm by default */
+ adev->runpm = true;
+ else if (amdgpu_device_supports_baco(dev) &&
+ (amdgpu_runtime_pm > 0)) /* enable runpm if runpm=1 */
+ adev->runpm = true;
+
/* Call ACPI methods: require modeset init
* but failure is not fatal
*/
@@ -215,7 +186,7 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
"Error during ACPI methods call\n");
}
- if (amdgpu_device_is_px(dev)) {
+ if (adev->runpm) {
dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP);
pm_runtime_use_autosuspend(dev->dev);
pm_runtime_set_autosuspend_delay(dev->dev, 5000);
@@ -225,11 +196,10 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
pm_runtime_put_autosuspend(dev->dev);
}
- amdgpu_register_gpu_instance(adev);
out:
if (r) {
/* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */
- if (adev->rmmio && amdgpu_device_is_px(dev))
+ if (adev->rmmio && adev->runpm)
pm_runtime_put_noidle(dev->dev);
amdgpu_driver_unload_kms(dev);
}
@@ -329,6 +299,10 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
fw_info->ver = adev->dm.dmcu_fw_version;
fw_info->feature = 0;
break;
+ case AMDGPU_INFO_FW_DMCUB:
+ fw_info->ver = adev->dm.dmcub_fw_version;
+ fw_info->feature = 0;
+ break;
default:
return -EINVAL;
}
@@ -408,23 +382,40 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
break;
case AMDGPU_HW_IP_VCN_DEC:
type = AMD_IP_BLOCK_TYPE_VCN;
- if (adev->vcn.ring_dec.sched.ready)
- ++num_rings;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->uvd.harvest_config & (1 << i))
+ continue;
+
+ if (adev->vcn.inst[i].ring_dec.sched.ready)
+ ++num_rings;
+ }
ib_start_alignment = 16;
ib_size_alignment = 16;
break;
case AMDGPU_HW_IP_VCN_ENC:
type = AMD_IP_BLOCK_TYPE_VCN;
- for (i = 0; i < adev->vcn.num_enc_rings; i++)
- if (adev->vcn.ring_enc[i].sched.ready)
- ++num_rings;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->uvd.harvest_config & (1 << i))
+ continue;
+
+ for (j = 0; j < adev->vcn.num_enc_rings; j++)
+ if (adev->vcn.inst[i].ring_enc[j].sched.ready)
+ ++num_rings;
+ }
ib_start_alignment = 64;
ib_size_alignment = 1;
break;
case AMDGPU_HW_IP_VCN_JPEG:
- type = AMD_IP_BLOCK_TYPE_VCN;
- if (adev->vcn.ring_jpeg.sched.ready)
- ++num_rings;
+ type = (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_JPEG)) ?
+ AMD_IP_BLOCK_TYPE_JPEG : AMD_IP_BLOCK_TYPE_VCN;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
+ if (adev->jpeg.harvest_config & (1 << i))
+ continue;
+
+ if (adev->jpeg.inst[i].ring_dec.sched.ready)
+ ++num_rings;
+ }
ib_start_alignment = 16;
ib_size_alignment = 16;
break;
@@ -538,9 +529,12 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
break;
case AMDGPU_HW_IP_VCN_DEC:
case AMDGPU_HW_IP_VCN_ENC:
- case AMDGPU_HW_IP_VCN_JPEG:
type = AMD_IP_BLOCK_TYPE_VCN;
break;
+ case AMDGPU_HW_IP_VCN_JPEG:
+ type = (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_JPEG)) ?
+ AMD_IP_BLOCK_TYPE_JPEG : AMD_IP_BLOCK_TYPE_VCN;
+ break;
default:
return -EINVAL;
}
@@ -604,9 +598,12 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
struct drm_amdgpu_info_vram_gtt vram_gtt;
vram_gtt.vram_size = adev->gmc.real_vram_size -
- atomic64_read(&adev->vram_pin_size);
- vram_gtt.vram_cpu_accessible_size = adev->gmc.visible_vram_size -
- atomic64_read(&adev->visible_pin_size);
+ atomic64_read(&adev->vram_pin_size) -
+ AMDGPU_VM_RESERVED_VRAM;
+ vram_gtt.vram_cpu_accessible_size =
+ min(adev->gmc.visible_vram_size -
+ atomic64_read(&adev->visible_pin_size),
+ vram_gtt.vram_size);
vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size;
vram_gtt.gtt_size *= PAGE_SIZE;
vram_gtt.gtt_size -= atomic64_read(&adev->gart_pin_size);
@@ -619,15 +616,18 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
memset(&mem, 0, sizeof(mem));
mem.vram.total_heap_size = adev->gmc.real_vram_size;
mem.vram.usable_heap_size = adev->gmc.real_vram_size -
- atomic64_read(&adev->vram_pin_size);
+ atomic64_read(&adev->vram_pin_size) -
+ AMDGPU_VM_RESERVED_VRAM;
mem.vram.heap_usage =
amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
mem.cpu_accessible_vram.total_heap_size =
adev->gmc.visible_vram_size;
- mem.cpu_accessible_vram.usable_heap_size = adev->gmc.visible_vram_size -
- atomic64_read(&adev->visible_pin_size);
+ mem.cpu_accessible_vram.usable_heap_size =
+ min(adev->gmc.visible_vram_size -
+ atomic64_read(&adev->visible_pin_size),
+ mem.vram.usable_heap_size);
mem.cpu_accessible_vram.heap_usage =
amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
mem.cpu_accessible_vram.max_allocation =
@@ -662,20 +662,27 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK)
sh_num = 0xffffffff;
+ if (info->read_mmr_reg.count > 128)
+ return -EINVAL;
+
regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL);
if (!regs)
return -ENOMEM;
alloc_size = info->read_mmr_reg.count * sizeof(*regs);
- for (i = 0; i < info->read_mmr_reg.count; i++)
+ amdgpu_gfx_off_ctrl(adev, false);
+ for (i = 0; i < info->read_mmr_reg.count; i++) {
if (amdgpu_asic_read_register(adev, se_num, sh_num,
info->read_mmr_reg.dword_offset + i,
&regs[i])) {
DRM_DEBUG_KMS("unallowed offset %#x\n",
info->read_mmr_reg.dword_offset + i);
kfree(regs);
+ amdgpu_gfx_off_ctrl(adev, true);
return -EFAULT;
}
+ }
+ amdgpu_gfx_off_ctrl(adev, true);
n = copy_to_user(out, regs, min(size, alloc_size));
kfree(regs);
return n ? -EFAULT : 0;
@@ -696,10 +703,6 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
if (adev->pm.dpm_enabled) {
dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10;
dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10;
- } else if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) &&
- adev->virt.ops->get_pp_clk) {
- dev_info.max_engine_clock = amdgpu_virt_get_sclk(adev, false) * 10;
- dev_info.max_memory_clock = amdgpu_virt_get_mclk(adev, false) * 10;
} else {
dev_info.max_engine_clock = adev->clock.default_sclk * 10;
dev_info.max_memory_clock = adev->clock.default_mclk * 10;
@@ -746,17 +749,6 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
dev_info.vce_harvest_config = adev->vce.harvest_config;
dev_info.gc_double_offchip_lds_buf =
adev->gfx.config.double_offchip_lds_buf;
-
- if (amdgpu_ngg) {
- dev_info.prim_buf_gpu_addr = adev->gfx.ngg.buf[NGG_PRIM].gpu_addr;
- dev_info.prim_buf_size = adev->gfx.ngg.buf[NGG_PRIM].size;
- dev_info.pos_buf_gpu_addr = adev->gfx.ngg.buf[NGG_POS].gpu_addr;
- dev_info.pos_buf_size = adev->gfx.ngg.buf[NGG_POS].size;
- dev_info.cntl_sb_buf_gpu_addr = adev->gfx.ngg.buf[NGG_CNTL].gpu_addr;
- dev_info.cntl_sb_buf_size = adev->gfx.ngg.buf[NGG_CNTL].size;
- dev_info.param_buf_gpu_addr = adev->gfx.ngg.buf[NGG_PARAM].gpu_addr;
- dev_info.param_buf_size = adev->gfx.ngg.buf[NGG_PARAM].size;
- }
dev_info.wave_front_size = adev->gfx.cu_info.wave_front_size;
dev_info.num_shader_visible_vgprs = adev->gfx.config.max_gprs;
dev_info.num_cu_per_sh = adev->gfx.config.max_cu_per_sh;
@@ -769,6 +761,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
dev_info.pa_sc_tile_steering_override =
adev->gfx.config.pa_sc_tile_steering_override;
+ dev_info.tcc_disabled_mask = adev->gfx.config.tcc_disabled_mask;
+
return copy_to_user(out, &dev_info,
min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0;
}
@@ -983,6 +977,12 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
/* Ensure IB tests are run on ring */
flush_delayed_work(&adev->delayed_init_work);
+
+ if (amdgpu_ras_intr_triggered()) {
+ DRM_ERROR("RAS Intr triggered, device disabled!!");
+ return -EHWPOISON;
+ }
+
file_priv->driver_priv = NULL;
r = pm_runtime_get_sync(dev->dev);
@@ -1088,7 +1088,7 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
amdgpu_vm_fini(adev, &fpriv->vm);
if (pasid)
- amdgpu_pasid_free_delayed(pd->tbo.resv, pasid);
+ amdgpu_pasid_free_delayed(pd->tbo.base.resv, pasid);
amdgpu_bo_unref(&pd);
idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
@@ -1405,6 +1405,14 @@ static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
seq_printf(m, "DMCU feature version: %u, firmware version: 0x%08x\n",
fw_info.feature, fw_info.ver);
+ /* DMCUB */
+ query_fw.fw_type = AMDGPU_INFO_FW_DMCUB;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "DMCUB feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
seq_printf(m, "VBIOS version: %s\n", ctx->vbios_version);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c
new file mode 100644
index 000000000000..676c48c02d77
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_ras.h"
+
+int amdgpu_mmhub_ras_late_init(struct amdgpu_device *adev)
+{
+ int r;
+ struct ras_ih_if ih_info = {
+ .cb = NULL,
+ };
+ struct ras_fs_if fs_info = {
+ .sysfs_name = "mmhub_err_count",
+ .debugfs_name = "mmhub_err_inject",
+ };
+
+ if (!adev->mmhub.ras_if) {
+ adev->mmhub.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
+ if (!adev->mmhub.ras_if)
+ return -ENOMEM;
+ adev->mmhub.ras_if->block = AMDGPU_RAS_BLOCK__MMHUB;
+ adev->mmhub.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->mmhub.ras_if->sub_block_index = 0;
+ strcpy(adev->mmhub.ras_if->name, "mmhub");
+ }
+ ih_info.head = fs_info.head = *adev->mmhub.ras_if;
+ r = amdgpu_ras_late_init(adev, adev->mmhub.ras_if,
+ &fs_info, &ih_info);
+ if (r || !amdgpu_ras_is_supported(adev, adev->mmhub.ras_if->block)) {
+ kfree(adev->mmhub.ras_if);
+ adev->mmhub.ras_if = NULL;
+ }
+
+ return r;
+}
+
+void amdgpu_mmhub_ras_fini(struct amdgpu_device *adev)
+{
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB) &&
+ adev->mmhub.ras_if) {
+ struct ras_common_if *ras_if = adev->mmhub.ras_if;
+ struct ras_ih_if ih_info = {
+ .cb = NULL,
+ };
+
+ amdgpu_ras_late_fini(adev, ras_if, &ih_info);
+ kfree(ras_if);
+ }
+}
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_offset.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
index 8f515875a34d..1cd78940cf82 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_offset.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2018 Advanced Micro Devices, Inc.
+ * Copyright (C) 2019 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -18,15 +18,22 @@
* AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
-#ifndef _mmhub_9_4_0_OFFSET_HEADER
-#define _mmhub_9_4_0_OFFSET_HEADER
+#ifndef __AMDGPU_MMHUB_H__
+#define __AMDGPU_MMHUB_H__
+struct amdgpu_mmhub_funcs {
+ void (*ras_init)(struct amdgpu_device *adev);
+ int (*ras_late_init)(struct amdgpu_device *adev);
+ void (*query_ras_error_count)(struct amdgpu_device *adev,
+ void *ras_error_status);
+};
-// addressBlock: mmhub_utcl2_vmsharedpfdec
-// base address: 0x6a040
-#define mmMC_VM_XGMI_LFB_CNTL 0x0823
-#define mmMC_VM_XGMI_LFB_CNTL_BASE_IDX 0
-#define mmMC_VM_XGMI_LFB_SIZE 0x0824
-#define mmMC_VM_XGMI_LFB_SIZE_BASE_IDX 0
+struct amdgpu_mmhub {
+ struct ras_common_if *ras_if;
+ const struct amdgpu_mmhub_funcs *funcs;
+};
+int amdgpu_mmhub_ras_late_init(struct amdgpu_device *adev);
+void amdgpu_mmhub_ras_fini(struct amdgpu_device *adev);
#endif
+
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index 3971c201f320..828b5167ff12 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -51,437 +51,107 @@
#include "amdgpu_amdkfd.h"
/**
- * struct amdgpu_mn_node
+ * amdgpu_mn_invalidate_gfx - callback to notify about mm change
*
- * @it: interval node defining start-last of the affected address range
- * @bos: list of all BOs in the affected address range
- *
- * Manages all BOs which are affected of a certain range of address space.
- */
-struct amdgpu_mn_node {
- struct interval_tree_node it;
- struct list_head bos;
-};
-
-/**
- * amdgpu_mn_destroy - destroy the HMM mirror
- *
- * @work: previously sheduled work item
- *
- * Lazy destroys the notifier from a work item
- */
-static void amdgpu_mn_destroy(struct work_struct *work)
-{
- struct amdgpu_mn *amn = container_of(work, struct amdgpu_mn, work);
- struct amdgpu_device *adev = amn->adev;
- struct amdgpu_mn_node *node, *next_node;
- struct amdgpu_bo *bo, *next_bo;
-
- mutex_lock(&adev->mn_lock);
- down_write(&amn->lock);
- hash_del(&amn->node);
- rbtree_postorder_for_each_entry_safe(node, next_node,
- &amn->objects.rb_root, it.rb) {
- list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) {
- bo->mn = NULL;
- list_del_init(&bo->mn_list);
- }
- kfree(node);
- }
- up_write(&amn->lock);
- mutex_unlock(&adev->mn_lock);
-
- hmm_mirror_unregister(&amn->mirror);
- kfree(amn);
-}
-
-/**
- * amdgpu_hmm_mirror_release - callback to notify about mm destruction
- *
- * @mirror: the HMM mirror (mm) this callback is about
- *
- * Shedule a work item to lazy destroy HMM mirror.
- */
-static void amdgpu_hmm_mirror_release(struct hmm_mirror *mirror)
-{
- struct amdgpu_mn *amn = container_of(mirror, struct amdgpu_mn, mirror);
-
- INIT_WORK(&amn->work, amdgpu_mn_destroy);
- schedule_work(&amn->work);
-}
-
-/**
- * amdgpu_mn_lock - take the write side lock for this notifier
- *
- * @mn: our notifier
- */
-void amdgpu_mn_lock(struct amdgpu_mn *mn)
-{
- if (mn)
- down_write(&mn->lock);
-}
-
-/**
- * amdgpu_mn_unlock - drop the write side lock for this notifier
- *
- * @mn: our notifier
- */
-void amdgpu_mn_unlock(struct amdgpu_mn *mn)
-{
- if (mn)
- up_write(&mn->lock);
-}
-
-/**
- * amdgpu_mn_read_lock - take the read side lock for this notifier
- *
- * @amn: our notifier
- */
-static int amdgpu_mn_read_lock(struct amdgpu_mn *amn, bool blockable)
-{
- if (blockable)
- down_read(&amn->lock);
- else if (!down_read_trylock(&amn->lock))
- return -EAGAIN;
-
- return 0;
-}
-
-/**
- * amdgpu_mn_read_unlock - drop the read side lock for this notifier
- *
- * @amn: our notifier
- */
-static void amdgpu_mn_read_unlock(struct amdgpu_mn *amn)
-{
- up_read(&amn->lock);
-}
-
-/**
- * amdgpu_mn_invalidate_node - unmap all BOs of a node
- *
- * @node: the node with the BOs to unmap
- * @start: start of address range affected
- * @end: end of address range affected
+ * @mni: the range (mm) is about to update
+ * @range: details on the invalidation
+ * @cur_seq: Value to pass to mmu_interval_set_seq()
*
* Block for operations on BOs to finish and mark pages as accessed and
* potentially dirty.
*/
-static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
- unsigned long start,
- unsigned long end)
+static bool amdgpu_mn_invalidate_gfx(struct mmu_interval_notifier *mni,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq)
{
- struct amdgpu_bo *bo;
+ struct amdgpu_bo *bo = container_of(mni, struct amdgpu_bo, notifier);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
long r;
- list_for_each_entry(bo, &node->bos, mn_list) {
-
- if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end))
- continue;
-
- r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
- true, false, MAX_SCHEDULE_TIMEOUT);
- if (r <= 0)
- DRM_ERROR("(%ld) failed to wait for user bo\n", r);
- }
-}
-
-/**
- * amdgpu_mn_sync_pagetables_gfx - callback to notify about mm change
- *
- * @mirror: the hmm_mirror (mm) is about to update
- * @update: the update start, end address
- *
- * Block for operations on BOs to finish and mark pages as accessed and
- * potentially dirty.
- */
-static int amdgpu_mn_sync_pagetables_gfx(struct hmm_mirror *mirror,
- const struct hmm_update *update)
-{
- struct amdgpu_mn *amn = container_of(mirror, struct amdgpu_mn, mirror);
- unsigned long start = update->start;
- unsigned long end = update->end;
- bool blockable = update->blockable;
- struct interval_tree_node *it;
+ if (!mmu_notifier_range_blockable(range))
+ return false;
- /* notification is exclusive, but interval is inclusive */
- end -= 1;
+ mutex_lock(&adev->notifier_lock);
- /* TODO we should be able to split locking for interval tree and
- * amdgpu_mn_invalidate_node
- */
- if (amdgpu_mn_read_lock(amn, blockable))
- return -EAGAIN;
+ mmu_interval_set_seq(mni, cur_seq);
- it = interval_tree_iter_first(&amn->objects, start, end);
- while (it) {
- struct amdgpu_mn_node *node;
-
- if (!blockable) {
- amdgpu_mn_read_unlock(amn);
- return -EAGAIN;
- }
-
- node = container_of(it, struct amdgpu_mn_node, it);
- it = interval_tree_iter_next(it, start, end);
-
- amdgpu_mn_invalidate_node(node, start, end);
- }
-
- amdgpu_mn_read_unlock(amn);
-
- return 0;
-}
-
-/**
- * amdgpu_mn_sync_pagetables_hsa - callback to notify about mm change
- *
- * @mirror: the hmm_mirror (mm) is about to update
- * @update: the update start, end address
- *
- * We temporarily evict all BOs between start and end. This
- * necessitates evicting all user-mode queues of the process. The BOs
- * are restorted in amdgpu_mn_invalidate_range_end_hsa.
- */
-static int amdgpu_mn_sync_pagetables_hsa(struct hmm_mirror *mirror,
- const struct hmm_update *update)
-{
- struct amdgpu_mn *amn = container_of(mirror, struct amdgpu_mn, mirror);
- unsigned long start = update->start;
- unsigned long end = update->end;
- bool blockable = update->blockable;
- struct interval_tree_node *it;
-
- /* notification is exclusive, but interval is inclusive */
- end -= 1;
-
- if (amdgpu_mn_read_lock(amn, blockable))
- return -EAGAIN;
-
- it = interval_tree_iter_first(&amn->objects, start, end);
- while (it) {
- struct amdgpu_mn_node *node;
- struct amdgpu_bo *bo;
-
- if (!blockable) {
- amdgpu_mn_read_unlock(amn);
- return -EAGAIN;
- }
-
- node = container_of(it, struct amdgpu_mn_node, it);
- it = interval_tree_iter_next(it, start, end);
-
- list_for_each_entry(bo, &node->bos, mn_list) {
- struct kgd_mem *mem = bo->kfd_bo;
-
- if (amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm,
- start, end))
- amdgpu_amdkfd_evict_userptr(mem, amn->mm);
- }
- }
-
- amdgpu_mn_read_unlock(amn);
-
- return 0;
+ r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, true, false,
+ MAX_SCHEDULE_TIMEOUT);
+ mutex_unlock(&adev->notifier_lock);
+ if (r <= 0)
+ DRM_ERROR("(%ld) failed to wait for user bo\n", r);
+ return true;
}
-/* Low bits of any reasonable mm pointer will be unused due to struct
- * alignment. Use these bits to make a unique key from the mm pointer
- * and notifier type.
- */
-#define AMDGPU_MN_KEY(mm, type) ((unsigned long)(mm) + (type))
-
-static struct hmm_mirror_ops amdgpu_hmm_mirror_ops[] = {
- [AMDGPU_MN_TYPE_GFX] = {
- .sync_cpu_device_pagetables = amdgpu_mn_sync_pagetables_gfx,
- .release = amdgpu_hmm_mirror_release
- },
- [AMDGPU_MN_TYPE_HSA] = {
- .sync_cpu_device_pagetables = amdgpu_mn_sync_pagetables_hsa,
- .release = amdgpu_hmm_mirror_release
- },
+static const struct mmu_interval_notifier_ops amdgpu_mn_gfx_ops = {
+ .invalidate = amdgpu_mn_invalidate_gfx,
};
/**
- * amdgpu_mn_get - create HMM mirror context
+ * amdgpu_mn_invalidate_hsa - callback to notify about mm change
*
- * @adev: amdgpu device pointer
- * @type: type of MMU notifier context
+ * @mni: the range (mm) is about to update
+ * @range: details on the invalidation
+ * @cur_seq: Value to pass to mmu_interval_set_seq()
*
- * Creates a HMM mirror context for current->mm.
+ * We temporarily evict the BO attached to this range. This necessitates
+ * evicting all user-mode queues of the process.
*/
-struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev,
- enum amdgpu_mn_type type)
+static bool amdgpu_mn_invalidate_hsa(struct mmu_interval_notifier *mni,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq)
{
- struct mm_struct *mm = current->mm;
- struct amdgpu_mn *amn;
- unsigned long key = AMDGPU_MN_KEY(mm, type);
- int r;
-
- mutex_lock(&adev->mn_lock);
- if (down_write_killable(&mm->mmap_sem)) {
- mutex_unlock(&adev->mn_lock);
- return ERR_PTR(-EINTR);
- }
-
- hash_for_each_possible(adev->mn_hash, amn, node, key)
- if (AMDGPU_MN_KEY(amn->mm, amn->type) == key)
- goto release_locks;
-
- amn = kzalloc(sizeof(*amn), GFP_KERNEL);
- if (!amn) {
- amn = ERR_PTR(-ENOMEM);
- goto release_locks;
- }
-
- amn->adev = adev;
- amn->mm = mm;
- init_rwsem(&amn->lock);
- amn->type = type;
- amn->objects = RB_ROOT_CACHED;
-
- amn->mirror.ops = &amdgpu_hmm_mirror_ops[type];
- r = hmm_mirror_register(&amn->mirror, mm);
- if (r)
- goto free_amn;
+ struct amdgpu_bo *bo = container_of(mni, struct amdgpu_bo, notifier);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- hash_add(adev->mn_hash, &amn->node, AMDGPU_MN_KEY(mm, type));
+ if (!mmu_notifier_range_blockable(range))
+ return false;
-release_locks:
- up_write(&mm->mmap_sem);
- mutex_unlock(&adev->mn_lock);
+ mutex_lock(&adev->notifier_lock);
- return amn;
+ mmu_interval_set_seq(mni, cur_seq);
-free_amn:
- up_write(&mm->mmap_sem);
- mutex_unlock(&adev->mn_lock);
- kfree(amn);
+ amdgpu_amdkfd_evict_userptr(bo->kfd_bo, bo->notifier.mm);
+ mutex_unlock(&adev->notifier_lock);
- return ERR_PTR(r);
+ return true;
}
+static const struct mmu_interval_notifier_ops amdgpu_mn_hsa_ops = {
+ .invalidate = amdgpu_mn_invalidate_hsa,
+};
+
/**
* amdgpu_mn_register - register a BO for notifier updates
*
* @bo: amdgpu buffer object
* @addr: userptr addr we should monitor
*
- * Registers an HMM mirror for the given BO at the specified address.
+ * Registers a mmu_notifier for the given BO at the specified address.
* Returns 0 on success, -ERRNO if anything goes wrong.
*/
int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
{
- unsigned long end = addr + amdgpu_bo_size(bo) - 1;
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- enum amdgpu_mn_type type =
- bo->kfd_bo ? AMDGPU_MN_TYPE_HSA : AMDGPU_MN_TYPE_GFX;
- struct amdgpu_mn *amn;
- struct amdgpu_mn_node *node = NULL, *new_node;
- struct list_head bos;
- struct interval_tree_node *it;
-
- amn = amdgpu_mn_get(adev, type);
- if (IS_ERR(amn))
- return PTR_ERR(amn);
-
- new_node = kmalloc(sizeof(*new_node), GFP_KERNEL);
- if (!new_node)
- return -ENOMEM;
-
- INIT_LIST_HEAD(&bos);
-
- down_write(&amn->lock);
-
- while ((it = interval_tree_iter_first(&amn->objects, addr, end))) {
- kfree(node);
- node = container_of(it, struct amdgpu_mn_node, it);
- interval_tree_remove(&node->it, &amn->objects);
- addr = min(it->start, addr);
- end = max(it->last, end);
- list_splice(&node->bos, &bos);
- }
-
- if (!node)
- node = new_node;
- else
- kfree(new_node);
-
- bo->mn = amn;
-
- node->it.start = addr;
- node->it.last = end;
- INIT_LIST_HEAD(&node->bos);
- list_splice(&bos, &node->bos);
- list_add(&bo->mn_list, &node->bos);
-
- interval_tree_insert(&node->it, &amn->objects);
-
- up_write(&amn->lock);
-
- return 0;
+ if (bo->kfd_bo)
+ return mmu_interval_notifier_insert(&bo->notifier, current->mm,
+ addr, amdgpu_bo_size(bo),
+ &amdgpu_mn_hsa_ops);
+ return mmu_interval_notifier_insert(&bo->notifier, current->mm, addr,
+ amdgpu_bo_size(bo),
+ &amdgpu_mn_gfx_ops);
}
/**
- * amdgpu_mn_unregister - unregister a BO for HMM mirror updates
+ * amdgpu_mn_unregister - unregister a BO for notifier updates
*
* @bo: amdgpu buffer object
*
- * Remove any registration of HMM mirror updates from the buffer object.
+ * Remove any registration of mmu notifier updates from the buffer object.
*/
void amdgpu_mn_unregister(struct amdgpu_bo *bo)
{
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- struct amdgpu_mn *amn;
- struct list_head *head;
-
- mutex_lock(&adev->mn_lock);
-
- amn = bo->mn;
- if (amn == NULL) {
- mutex_unlock(&adev->mn_lock);
+ if (!bo->notifier.mm)
return;
- }
-
- down_write(&amn->lock);
-
- /* save the next list entry for later */
- head = bo->mn_list.next;
-
- bo->mn = NULL;
- list_del_init(&bo->mn_list);
-
- if (list_empty(head)) {
- struct amdgpu_mn_node *node;
-
- node = container_of(head, struct amdgpu_mn_node, bos);
- interval_tree_remove(&node->it, &amn->objects);
- kfree(node);
- }
-
- up_write(&amn->lock);
- mutex_unlock(&adev->mn_lock);
-}
-
-/* flags used by HMM internal, not related to CPU/GPU PTE flags */
-static const uint64_t hmm_range_flags[HMM_PFN_FLAG_MAX] = {
- (1 << 0), /* HMM_PFN_VALID */
- (1 << 1), /* HMM_PFN_WRITE */
- 0 /* HMM_PFN_DEVICE_PRIVATE */
-};
-
-static const uint64_t hmm_range_values[HMM_PFN_VALUE_MAX] = {
- 0xfffffffffffffffeUL, /* HMM_PFN_ERROR */
- 0, /* HMM_PFN_NONE */
- 0xfffffffffffffffcUL /* HMM_PFN_SPECIAL */
-};
-
-void amdgpu_hmm_init_range(struct hmm_range *range)
-{
- if (range) {
- range->flags = hmm_range_flags;
- range->values = hmm_range_values;
- range->pfn_shift = PAGE_SHIFT;
- INIT_LIST_HEAD(&range->list);
- }
+ mmu_interval_notifier_remove(&bo->notifier);
+ bo->notifier.mm = NULL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h
index b8ed68943625..a292238f75eb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h
@@ -30,63 +30,10 @@
#include <linux/workqueue.h>
#include <linux/interval_tree.h>
-enum amdgpu_mn_type {
- AMDGPU_MN_TYPE_GFX,
- AMDGPU_MN_TYPE_HSA,
-};
-
-/**
- * struct amdgpu_mn
- *
- * @adev: amdgpu device pointer
- * @mm: process address space
- * @type: type of MMU notifier
- * @work: destruction work item
- * @node: hash table node to find structure by adev and mn
- * @lock: rw semaphore protecting the notifier nodes
- * @objects: interval tree containing amdgpu_mn_nodes
- * @mirror: HMM mirror function support
- *
- * Data for each amdgpu device and process address space.
- */
-struct amdgpu_mn {
- /* constant after initialisation */
- struct amdgpu_device *adev;
- struct mm_struct *mm;
- enum amdgpu_mn_type type;
-
- /* only used on destruction */
- struct work_struct work;
-
- /* protected by adev->mn_lock */
- struct hlist_node node;
-
- /* objects protected by lock */
- struct rw_semaphore lock;
- struct rb_root_cached objects;
-
-#ifdef CONFIG_HMM_MIRROR
- /* HMM mirror */
- struct hmm_mirror mirror;
-#endif
-};
-
#if defined(CONFIG_HMM_MIRROR)
-void amdgpu_mn_lock(struct amdgpu_mn *mn);
-void amdgpu_mn_unlock(struct amdgpu_mn *mn);
-struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev,
- enum amdgpu_mn_type type);
int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr);
void amdgpu_mn_unregister(struct amdgpu_bo *bo);
-void amdgpu_hmm_init_range(struct hmm_range *range);
#else
-static inline void amdgpu_mn_lock(struct amdgpu_mn *mn) {}
-static inline void amdgpu_mn_unlock(struct amdgpu_mn *mn) {}
-static inline struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev,
- enum amdgpu_mn_type type)
-{
- return NULL;
-}
static inline int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
{
DRM_WARN_ONCE("HMM_MIRROR kernel config option is not enabled, "
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
new file mode 100644
index 000000000000..7d5c3a9de9ea
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_ras.h"
+
+int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev)
+{
+ int r;
+ struct ras_ih_if ih_info = {
+ .cb = NULL,
+ };
+ struct ras_fs_if fs_info = {
+ .sysfs_name = "pcie_bif_err_count",
+ .debugfs_name = "pcie_bif_err_inject",
+ };
+
+ if (!adev->nbio.ras_if) {
+ adev->nbio.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
+ if (!adev->nbio.ras_if)
+ return -ENOMEM;
+ adev->nbio.ras_if->block = AMDGPU_RAS_BLOCK__PCIE_BIF;
+ adev->nbio.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->nbio.ras_if->sub_block_index = 0;
+ strcpy(adev->nbio.ras_if->name, "pcie_bif");
+ }
+ ih_info.head = fs_info.head = *adev->nbio.ras_if;
+ r = amdgpu_ras_late_init(adev, adev->nbio.ras_if,
+ &fs_info, &ih_info);
+ if (r)
+ goto free;
+
+ if (amdgpu_ras_is_supported(adev, adev->nbio.ras_if->block)) {
+ r = amdgpu_irq_get(adev, &adev->nbio.ras_controller_irq, 0);
+ if (r)
+ goto late_fini;
+ r = amdgpu_irq_get(adev, &adev->nbio.ras_err_event_athub_irq, 0);
+ if (r)
+ goto late_fini;
+ } else {
+ r = 0;
+ goto free;
+ }
+
+ return 0;
+late_fini:
+ amdgpu_ras_late_fini(adev, adev->nbio.ras_if, &ih_info);
+free:
+ kfree(adev->nbio.ras_if);
+ adev->nbio.ras_if = NULL;
+ return r;
+}
+
+void amdgpu_nbio_ras_fini(struct amdgpu_device *adev)
+{
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__PCIE_BIF) &&
+ adev->nbio.ras_if) {
+ struct ras_common_if *ras_if = adev->nbio.ras_if;
+ struct ras_ih_if ih_info = {
+ .cb = NULL,
+ };
+
+ amdgpu_ras_late_fini(adev, ras_if, &ih_info);
+ kfree(ras_if);
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
new file mode 100644
index 000000000000..919bd566ba3c
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __AMDGPU_NBIO_H__
+#define __AMDGPU_NBIO_H__
+
+/*
+ * amdgpu nbio functions
+ */
+struct nbio_hdp_flush_reg {
+ u32 ref_and_mask_cp0;
+ u32 ref_and_mask_cp1;
+ u32 ref_and_mask_cp2;
+ u32 ref_and_mask_cp3;
+ u32 ref_and_mask_cp4;
+ u32 ref_and_mask_cp5;
+ u32 ref_and_mask_cp6;
+ u32 ref_and_mask_cp7;
+ u32 ref_and_mask_cp8;
+ u32 ref_and_mask_cp9;
+ u32 ref_and_mask_sdma0;
+ u32 ref_and_mask_sdma1;
+ u32 ref_and_mask_sdma2;
+ u32 ref_and_mask_sdma3;
+ u32 ref_and_mask_sdma4;
+ u32 ref_and_mask_sdma5;
+ u32 ref_and_mask_sdma6;
+ u32 ref_and_mask_sdma7;
+};
+
+struct amdgpu_nbio_funcs {
+ const struct nbio_hdp_flush_reg *hdp_flush_reg;
+ u32 (*get_hdp_flush_req_offset)(struct amdgpu_device *adev);
+ u32 (*get_hdp_flush_done_offset)(struct amdgpu_device *adev);
+ u32 (*get_pcie_index_offset)(struct amdgpu_device *adev);
+ u32 (*get_pcie_data_offset)(struct amdgpu_device *adev);
+ u32 (*get_rev_id)(struct amdgpu_device *adev);
+ void (*mc_access_enable)(struct amdgpu_device *adev, bool enable);
+ void (*hdp_flush)(struct amdgpu_device *adev, struct amdgpu_ring *ring);
+ u32 (*get_memsize)(struct amdgpu_device *adev);
+ void (*sdma_doorbell_range)(struct amdgpu_device *adev, int instance,
+ bool use_doorbell, int doorbell_index, int doorbell_size);
+ void (*vcn_doorbell_range)(struct amdgpu_device *adev, bool use_doorbell,
+ int doorbell_index, int instance);
+ void (*enable_doorbell_aperture)(struct amdgpu_device *adev,
+ bool enable);
+ void (*enable_doorbell_selfring_aperture)(struct amdgpu_device *adev,
+ bool enable);
+ void (*ih_doorbell_range)(struct amdgpu_device *adev,
+ bool use_doorbell, int doorbell_index);
+ void (*enable_doorbell_interrupt)(struct amdgpu_device *adev,
+ bool enable);
+ void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev,
+ bool enable);
+ void (*update_medium_grain_light_sleep)(struct amdgpu_device *adev,
+ bool enable);
+ void (*get_clockgating_state)(struct amdgpu_device *adev,
+ u32 *flags);
+ void (*ih_control)(struct amdgpu_device *adev);
+ void (*init_registers)(struct amdgpu_device *adev);
+ void (*detect_hw_virt)(struct amdgpu_device *adev);
+ void (*remap_hdp_registers)(struct amdgpu_device *adev);
+ void (*handle_ras_controller_intr_no_bifring)(struct amdgpu_device *adev);
+ void (*handle_ras_err_event_athub_intr_no_bifring)(struct amdgpu_device *adev);
+ int (*init_ras_controller_interrupt)(struct amdgpu_device *adev);
+ int (*init_ras_err_event_athub_interrupt)(struct amdgpu_device *adev);
+ void (*query_ras_error_count)(struct amdgpu_device *adev,
+ void *ras_error_status);
+ int (*ras_late_init)(struct amdgpu_device *adev);
+};
+
+struct amdgpu_nbio {
+ const struct nbio_hdp_flush_reg *hdp_flush_reg;
+ struct amdgpu_irq_src ras_controller_irq;
+ struct amdgpu_irq_src ras_err_event_athub_irq;
+ struct ras_common_if *ras_if;
+ const struct amdgpu_nbio_funcs *funcs;
+};
+
+int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev);
+void amdgpu_nbio_ras_fini(struct amdgpu_device *adev);
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index bea6f298dfdc..e3f16b49e970 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -80,14 +80,11 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
if (bo->pin_count > 0)
amdgpu_bo_subtract_pin_size(bo);
- if (bo->kfd_bo)
- amdgpu_amdkfd_unreserve_memory_limit(bo);
-
amdgpu_bo_kunmap(bo);
- if (bo->gem_base.import_attach)
- drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg);
- drm_gem_object_release(&bo->gem_base);
+ if (bo->tbo.base.import_attach)
+ drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg);
+ drm_gem_object_release(&bo->tbo.base);
/* in case amdgpu_device_recover_vram got NULL of bo->parent */
if (!list_empty(&bo->shadow_list)) {
mutex_lock(&adev->shadow_list_lock);
@@ -249,8 +246,9 @@ int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
bp.size = size;
bp.byte_align = align;
bp.domain = domain;
- bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
+ bp.flags = cpu_addr ? AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
+ : AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
+ bp.flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
bp.type = ttm_bo_type_kernel;
bp.resv = NULL;
@@ -345,6 +343,70 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
}
/**
+ * amdgpu_bo_create_kernel_at - create BO for kernel use at specific location
+ *
+ * @adev: amdgpu device object
+ * @offset: offset of the BO
+ * @size: size of the BO
+ * @domain: where to place it
+ * @bo_ptr: used to initialize BOs in structures
+ * @cpu_addr: optional CPU address mapping
+ *
+ * Creates a kernel BO at a specific offset in the address space of the domain.
+ *
+ * Returns:
+ * 0 on success, negative error code otherwise.
+ */
+int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
+ uint64_t offset, uint64_t size, uint32_t domain,
+ struct amdgpu_bo **bo_ptr, void **cpu_addr)
+{
+ struct ttm_operation_ctx ctx = { false, false };
+ unsigned int i;
+ int r;
+
+ offset &= PAGE_MASK;
+ size = ALIGN(size, PAGE_SIZE);
+
+ r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE, domain, bo_ptr,
+ NULL, cpu_addr);
+ if (r)
+ return r;
+
+ /*
+ * Remove the original mem node and create a new one at the request
+ * position.
+ */
+ if (cpu_addr)
+ amdgpu_bo_kunmap(*bo_ptr);
+
+ ttm_bo_mem_put(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.mem);
+
+ for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) {
+ (*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT;
+ (*bo_ptr)->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
+ }
+ r = ttm_bo_mem_space(&(*bo_ptr)->tbo, &(*bo_ptr)->placement,
+ &(*bo_ptr)->tbo.mem, &ctx);
+ if (r)
+ goto error;
+
+ if (cpu_addr) {
+ r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
+ if (r)
+ goto error;
+ }
+
+ amdgpu_bo_unreserve(*bo_ptr);
+ return 0;
+
+error:
+ amdgpu_bo_unreserve(*bo_ptr);
+ amdgpu_bo_unref(bo_ptr);
+ return r;
+}
+
+/**
* amdgpu_bo_free_kernel - free BO for kernel use
*
* @bo: amdgpu BO to free
@@ -413,15 +475,50 @@ fail:
return false;
}
+bool amdgpu_bo_support_uswc(u64 bo_flags)
+{
+
+#ifdef CONFIG_X86_32
+ /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
+ * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
+ */
+ return false;
+#elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
+ /* Don't try to enable write-combining when it can't work, or things
+ * may be slow
+ * See https://bugs.freedesktop.org/show_bug.cgi?id=88758
+ */
+
+#ifndef CONFIG_COMPILE_TEST
+#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
+ thanks to write-combining
+#endif
+
+ if (bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
+ DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
+ "better performance thanks to write-combining\n");
+ return false;
+#else
+ /* For architectures that don't support WC memory,
+ * mask out the WC flag from the BO
+ */
+ if (!drm_arch_can_wc_memory())
+ return false;
+
+ return true;
+#endif
+}
+
static int amdgpu_bo_do_create(struct amdgpu_device *adev,
struct amdgpu_bo_param *bp,
struct amdgpu_bo **bo_ptr)
{
struct ttm_operation_ctx ctx = {
.interruptible = (bp->type != ttm_bo_type_kernel),
- .no_wait_gpu = false,
+ .no_wait_gpu = bp->no_wait_gpu,
.resv = bp->resv,
- .flags = TTM_OPT_FLAG_ALLOW_RES_EVICT
+ .flags = bp->type != ttm_bo_type_kernel ?
+ TTM_OPT_FLAG_ALLOW_RES_EVICT : 0
};
struct amdgpu_bo *bo;
unsigned long page_align, size = bp->size;
@@ -454,7 +551,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
if (bo == NULL)
return -ENOMEM;
- drm_gem_private_object_init(adev->ddev, &bo->gem_base, size);
+ drm_gem_private_object_init(adev->ddev, &bo->tbo.base, size);
INIT_LIST_HEAD(&bo->shadow_list);
bo->vm_bo = NULL;
bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain :
@@ -466,33 +563,8 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
bo->flags = bp->flags;
-#ifdef CONFIG_X86_32
- /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
- * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
- */
- bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
-#elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
- /* Don't try to enable write-combining when it can't work, or things
- * may be slow
- * See https://bugs.freedesktop.org/show_bug.cgi?id=88758
- */
-
-#ifndef CONFIG_COMPILE_TEST
-#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
- thanks to write-combining
-#endif
-
- if (bo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
- DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
- "better performance thanks to write-combining\n");
- bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
-#else
- /* For architectures that don't support WC memory,
- * mask out the WC flag from the BO
- */
- if (!drm_arch_can_wc_memory())
+ if (!amdgpu_bo_support_uswc(bo->flags))
bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
-#endif
bo->tbo.bdev = &adev->mman.bdev;
if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA |
@@ -521,7 +593,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
struct dma_fence *fence;
- r = amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence);
+ r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence);
if (unlikely(r))
goto fail_unreserve;
@@ -544,7 +616,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
fail_unreserve:
if (!bp->resv)
- ww_mutex_unlock(&bo->tbo.resv->lock);
+ dma_resv_unlock(bo->tbo.base.resv);
amdgpu_bo_unref(&bo);
return r;
}
@@ -565,7 +637,7 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC |
AMDGPU_GEM_CREATE_SHADOW;
bp.type = ttm_bo_type_kernel;
- bp.resv = bo->tbo.resv;
+ bp.resv = bo->tbo.base.resv;
r = amdgpu_bo_do_create(adev, &bp, &bo->shadow);
if (!r) {
@@ -606,13 +678,13 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
if ((flags & AMDGPU_GEM_CREATE_SHADOW) && !(adev->flags & AMD_IS_APU)) {
if (!bp->resv)
- WARN_ON(reservation_object_lock((*bo_ptr)->tbo.resv,
+ WARN_ON(dma_resv_lock((*bo_ptr)->tbo.base.resv,
NULL));
r = amdgpu_bo_create_shadow(adev, bp->size, *bo_ptr);
if (!bp->resv)
- reservation_object_unlock((*bo_ptr)->tbo.resv);
+ dma_resv_unlock((*bo_ptr)->tbo.base.resv);
if (r)
amdgpu_bo_unref(bo_ptr);
@@ -709,7 +781,7 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
return 0;
}
- r = reservation_object_wait_timeout_rcu(bo->tbo.resv, false, false,
+ r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, false, false,
MAX_SCHEDULE_TIMEOUT);
if (r < 0)
return r;
@@ -1051,7 +1123,10 @@ void amdgpu_bo_fini(struct amdgpu_device *adev)
int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
struct vm_area_struct *vma)
{
- return ttm_fbdev_mmap(vma, &bo->tbo);
+ if (vma->vm_pgoff != 0)
+ return -EACCES;
+
+ return ttm_bo_mmap_obj(vma, &bo->tbo);
}
/**
@@ -1087,7 +1162,7 @@ int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
*/
void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
{
- lockdep_assert_held(&bo->tbo.resv->lock.base);
+ dma_resv_assert_held(bo->tbo.base.resv);
if (tiling_flags)
*tiling_flags = bo->tiling_flags;
@@ -1212,6 +1287,42 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
}
/**
+ * amdgpu_bo_move_notify - notification about a BO being released
+ * @bo: pointer to a buffer object
+ *
+ * Wipes VRAM buffers whose contents should not be leaked before the
+ * memory is released.
+ */
+void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
+{
+ struct dma_fence *fence = NULL;
+ struct amdgpu_bo *abo;
+ int r;
+
+ if (!amdgpu_bo_is_amdgpu_bo(bo))
+ return;
+
+ abo = ttm_to_amdgpu_bo(bo);
+
+ if (abo->kfd_bo)
+ amdgpu_amdkfd_unreserve_memory_limit(abo);
+
+ if (bo->mem.mem_type != TTM_PL_VRAM || !bo->mem.mm_node ||
+ !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE))
+ return;
+
+ dma_resv_lock(bo->base.resv, NULL);
+
+ r = amdgpu_fill_buffer(abo, AMDGPU_POISON, bo->base.resv, &fence);
+ if (!WARN_ON(r)) {
+ amdgpu_bo_fence(abo, fence, false);
+ dma_fence_put(fence);
+ }
+
+ dma_resv_unlock(bo->base.resv);
+}
+
+/**
* amdgpu_bo_fault_reserve_notify - notification about a memory fault
* @bo: pointer to a buffer object
*
@@ -1283,12 +1394,12 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
bool shared)
{
- struct reservation_object *resv = bo->tbo.resv;
+ struct dma_resv *resv = bo->tbo.base.resv;
if (shared)
- reservation_object_add_shared_fence(resv, fence);
+ dma_resv_add_shared_fence(resv, fence);
else
- reservation_object_add_excl_fence(resv, fence);
+ dma_resv_add_excl_fence(resv, fence);
}
/**
@@ -1308,7 +1419,7 @@ int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
int r;
amdgpu_sync_create(&sync);
- amdgpu_sync_resv(adev, &sync, bo->tbo.resv, owner, false);
+ amdgpu_sync_resv(adev, &sync, bo->tbo.base.resv, owner, false);
r = amdgpu_sync_wait(&sync, intr);
amdgpu_sync_free(&sync);
@@ -1328,7 +1439,7 @@ int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
{
WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
- WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) &&
+ WARN_ON_ONCE(!dma_resv_is_locked(bo->tbo.base.resv) &&
!bo->pin_count && bo->tbo.type != ttm_bo_type_kernel);
WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index d60593cc436e..36dec51d1ef1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -30,6 +30,9 @@
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
+#ifdef CONFIG_MMU_NOTIFIER
+#include <linux/mmu_notifier.h>
+#endif
#define AMDGPU_BO_INVALID_OFFSET LONG_MAX
#define AMDGPU_BO_MAX_PLACEMENTS 3
@@ -41,7 +44,8 @@ struct amdgpu_bo_param {
u32 preferred_domain;
u64 flags;
enum ttm_bo_type type;
- struct reservation_object *resv;
+ bool no_wait_gpu;
+ struct dma_resv *resv;
};
/* bo virtual addresses in a vm */
@@ -94,17 +98,18 @@ struct amdgpu_bo {
/* per VM structure for page tables and with virtual addresses */
struct amdgpu_vm_bo_base *vm_bo;
/* Constant after initialization */
- struct drm_gem_object gem_base;
struct amdgpu_bo *parent;
struct amdgpu_bo *shadow;
struct ttm_bo_kmap_obj dma_buf_vmap;
struct amdgpu_mn *mn;
- union {
- struct list_head mn_list;
- struct list_head shadow_list;
- };
+
+#ifdef CONFIG_MMU_NOTIFIER
+ struct mmu_interval_notifier notifier;
+#endif
+
+ struct list_head shadow_list;
struct kgd_mem *kfd_bo;
};
@@ -192,7 +197,7 @@ static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo)
*/
static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo)
{
- return drm_vma_node_offset_addr(&bo->tbo.vma_node);
+ return drm_vma_node_offset_addr(&bo->tbo.base.vma_node);
}
/**
@@ -238,6 +243,9 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
unsigned long size, int align,
u32 domain, struct amdgpu_bo **bo_ptr,
u64 *gpu_addr, void **cpu_addr);
+int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
+ uint64_t offset, uint64_t size, uint32_t domain,
+ struct amdgpu_bo **bo_ptr, void **cpu_addr);
void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
void **cpu_addr);
int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
@@ -265,6 +273,7 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
bool evict,
struct ttm_mem_reg *new_mem);
+void amdgpu_bo_release_notify(struct ttm_buffer_object *bo);
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
bool shared);
@@ -308,5 +317,7 @@ void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
struct seq_file *m);
#endif
+bool amdgpu_bo_support_uswc(u64 bo_flags);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 2b546567853b..b03b1eb7ba04 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -37,6 +37,7 @@
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/nospec.h>
+#include <linux/pm_runtime.h>
#include "hwmgr.h"
#define WIDTH_4K 3840
@@ -158,10 +159,18 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
enum amd_pm_state_type pm;
+ int ret;
+
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ return 0;
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
if (is_support_sw_smu(adev)) {
if (adev->smu.ppt_funcs->get_current_power_state)
- pm = amdgpu_smu_get_current_power_state(adev);
+ pm = smu_get_current_power_state(&adev->smu);
else
pm = adev->pm.dpm.user_state;
} else if (adev->powerplay.pp_funcs->get_current_power_state) {
@@ -170,6 +179,9 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev,
pm = adev->pm.dpm.user_state;
}
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
return snprintf(buf, PAGE_SIZE, "%s\n",
(pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
(pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
@@ -183,6 +195,10 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
enum amd_pm_state_type state;
+ int ret;
+
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ return -EINVAL;
if (strncmp("battery", buf, strlen("battery")) == 0)
state = POWER_STATE_TYPE_BATTERY;
@@ -190,10 +206,12 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev,
state = POWER_STATE_TYPE_BALANCED;
else if (strncmp("performance", buf, strlen("performance")) == 0)
state = POWER_STATE_TYPE_PERFORMANCE;
- else {
- count = -EINVAL;
- goto fail;
- }
+ else
+ return -EINVAL;
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
if (is_support_sw_smu(adev)) {
mutex_lock(&adev->pm.mutex);
@@ -206,12 +224,11 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev,
adev->pm.dpm.user_state = state;
mutex_unlock(&adev->pm.mutex);
- /* Can't set dpm state when the card is off */
- if (!(adev->flags & AMD_IS_PX) ||
- (ddev->switch_power_state == DRM_SWITCH_POWER_ON))
- amdgpu_pm_compute_clocks(adev);
+ amdgpu_pm_compute_clocks(adev);
}
-fail:
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
return count;
}
@@ -282,13 +299,14 @@ static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
enum amd_dpm_forced_level level = 0xff;
+ int ret;
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
return 0;
- if ((adev->flags & AMD_IS_PX) &&
- (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
- return snprintf(buf, PAGE_SIZE, "off\n");
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
if (is_support_sw_smu(adev))
level = smu_get_performance_level(&adev->smu);
@@ -297,6 +315,9 @@ static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
else
level = adev->pm.dpm.forced_level;
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
return snprintf(buf, PAGE_SIZE, "%s\n",
(level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
(level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
@@ -320,18 +341,9 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
enum amd_dpm_forced_level current_level = 0xff;
int ret = 0;
- /* Can't force performance level when the card is off */
- if ((adev->flags & AMD_IS_PX) &&
- (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
return -EINVAL;
- if (!amdgpu_sriov_vf(adev)) {
- if (is_support_sw_smu(adev))
- current_level = smu_get_performance_level(&adev->smu);
- else if (adev->powerplay.pp_funcs->get_performance_level)
- current_level = amdgpu_dpm_get_performance_level(adev);
- }
-
if (strncmp("low", buf, strlen("low")) == 0) {
level = AMD_DPM_FORCED_LEVEL_LOW;
} else if (strncmp("high", buf, strlen("high")) == 0) {
@@ -351,24 +363,23 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
} else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) {
level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
} else {
- count = -EINVAL;
- goto fail;
- }
-
- if (amdgpu_sriov_vf(adev)) {
- if (amdgim_is_hwperf(adev) &&
- adev->virt.ops->force_dpm_level) {
- mutex_lock(&adev->pm.mutex);
- adev->virt.ops->force_dpm_level(adev, level);
- mutex_unlock(&adev->pm.mutex);
- return count;
- } else {
- return -EINVAL;
- }
- }
+ return -EINVAL;
+ }
- if (current_level == level)
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
+
+ if (is_support_sw_smu(adev))
+ current_level = smu_get_performance_level(&adev->smu);
+ else if (adev->powerplay.pp_funcs->get_performance_level)
+ current_level = amdgpu_dpm_get_performance_level(adev);
+
+ if (current_level == level) {
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
return count;
+ }
/* profile_exit setting is valid only when current mode is in profile mode */
if (!(current_level & (AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
@@ -377,29 +388,40 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)) &&
(level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) {
pr_err("Currently not in any profile mode!\n");
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
return -EINVAL;
}
if (is_support_sw_smu(adev)) {
ret = smu_force_performance_level(&adev->smu, level);
- if (ret)
- count = -EINVAL;
+ if (ret) {
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+ return -EINVAL;
+ }
} else if (adev->powerplay.pp_funcs->force_performance_level) {
mutex_lock(&adev->pm.mutex);
if (adev->pm.dpm.thermal_active) {
- count = -EINVAL;
mutex_unlock(&adev->pm.mutex);
- goto fail;
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+ return -EINVAL;
}
ret = amdgpu_dpm_force_performance_level(adev, level);
- if (ret)
- count = -EINVAL;
- else
+ if (ret) {
+ mutex_unlock(&adev->pm.mutex);
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+ return -EINVAL;
+ } else {
adev->pm.dpm.forced_level = level;
+ }
mutex_unlock(&adev->pm.mutex);
}
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
-fail:
return count;
}
@@ -412,6 +434,10 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
struct pp_states_info data;
int i, buf_len, ret;
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
+
if (is_support_sw_smu(adev)) {
ret = smu_get_power_num_states(&adev->smu, &data);
if (ret)
@@ -419,6 +445,9 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
} else if (adev->powerplay.pp_funcs->get_pp_num_states)
amdgpu_dpm_get_pp_num_states(adev, &data);
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums);
for (i = 0; i < data.nums; i++)
buf_len += snprintf(buf + buf_len, PAGE_SIZE, "%d %s\n", i,
@@ -441,6 +470,13 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
enum amd_pm_state_type pm = 0;
int i = 0, ret = 0;
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ return 0;
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
+
if (is_support_sw_smu(adev)) {
pm = smu_get_current_power_state(smu);
ret = smu_get_power_num_states(smu, &data);
@@ -452,6 +488,9 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
amdgpu_dpm_get_pp_num_states(adev, &data);
}
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
for (i = 0; i < data.nums; i++) {
if (pm == data.states[i])
break;
@@ -470,6 +509,9 @@ static ssize_t amdgpu_get_pp_force_state(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ return 0;
+
if (adev->pp_force_state_enabled)
return amdgpu_get_pp_cur_state(dev, attr, buf);
else
@@ -487,6 +529,9 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
unsigned long idx;
int ret;
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ return -EINVAL;
+
if (strlen(buf) == 1)
adev->pp_force_state_enabled = false;
else if (is_support_sw_smu(adev))
@@ -496,14 +541,18 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
struct pp_states_info data;
ret = kstrtoul(buf, 0, &idx);
- if (ret || idx >= ARRAY_SIZE(data.states)) {
- count = -EINVAL;
- goto fail;
- }
+ if (ret || idx >= ARRAY_SIZE(data.states))
+ return -EINVAL;
+
idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
amdgpu_dpm_get_pp_num_states(adev, &data);
state = data.states[idx];
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
+
/* only set user selected power states */
if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
state != POWER_STATE_TYPE_DEFAULT) {
@@ -511,8 +560,10 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
AMD_PP_TASK_ENABLE_USER_STATE, &state);
adev->pp_force_state_enabled = true;
}
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
}
-fail:
+
return count;
}
@@ -534,17 +585,32 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
char *table = NULL;
- int size;
+ int size, ret;
+
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ return 0;
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
if (is_support_sw_smu(adev)) {
size = smu_sys_get_pp_table(&adev->smu, (void **)&table);
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
if (size < 0)
return size;
- }
- else if (adev->powerplay.pp_funcs->get_pp_table)
+ } else if (adev->powerplay.pp_funcs->get_pp_table) {
size = amdgpu_dpm_get_pp_table(adev, &table);
- else
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+ if (size < 0)
+ return size;
+ } else {
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
return 0;
+ }
if (size >= PAGE_SIZE)
size = PAGE_SIZE - 1;
@@ -563,13 +629,26 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
struct amdgpu_device *adev = ddev->dev_private;
int ret = 0;
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ return -EINVAL;
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
+
if (is_support_sw_smu(adev)) {
ret = smu_sys_set_pp_table(&adev->smu, (void *)buf, count);
- if (ret)
+ if (ret) {
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
return ret;
+ }
} else if (adev->powerplay.pp_funcs->set_pp_table)
amdgpu_dpm_set_pp_table(adev, buf, count);
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
return count;
}
@@ -655,6 +734,9 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
const char delimiter[3] = {' ', '\n', '\0'};
uint32_t type;
+ if (amdgpu_sriov_vf(adev))
+ return -EINVAL;
+
if (count > 127)
return -EINVAL;
@@ -690,18 +772,28 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
tmp_str++;
}
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
+
if (is_support_sw_smu(adev)) {
ret = smu_od_edit_dpm_table(&adev->smu, type,
parameter, parameter_size);
- if (ret)
+ if (ret) {
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
return -EINVAL;
+ }
} else {
if (adev->powerplay.pp_funcs->odn_edit_dpm_table) {
ret = amdgpu_dpm_odn_edit_dpm_table(adev, type,
parameter, parameter_size);
- if (ret)
+ if (ret) {
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
return -EINVAL;
+ }
}
if (type == PP_OD_COMMIT_DPM_TABLE) {
@@ -709,12 +801,18 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
amdgpu_dpm_dispatch_task(adev,
AMD_PP_TASK_READJUST_POWER_STATE,
NULL);
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
return count;
} else {
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
return -EINVAL;
}
}
}
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
return count;
}
@@ -725,31 +823,40 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
- uint32_t size = 0;
+ ssize_t size;
+ int ret;
+
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
if (is_support_sw_smu(adev)) {
size = smu_print_clk_levels(&adev->smu, SMU_OD_SCLK, buf);
size += smu_print_clk_levels(&adev->smu, SMU_OD_MCLK, buf+size);
size += smu_print_clk_levels(&adev->smu, SMU_OD_VDDC_CURVE, buf+size);
size += smu_print_clk_levels(&adev->smu, SMU_OD_RANGE, buf+size);
- return size;
} else if (adev->powerplay.pp_funcs->print_clock_levels) {
size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size);
size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf+size);
size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf+size);
- return size;
} else {
- return snprintf(buf, PAGE_SIZE, "\n");
+ size = snprintf(buf, PAGE_SIZE, "\n");
}
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+ return size;
}
/**
- * DOC: ppfeatures
+ * DOC: pp_features
*
* The amdgpu driver provides a sysfs API for adjusting what powerplay
- * features to be enabled. The file ppfeatures is used for this. And
+ * features to be enabled. The file pp_features is used for this. And
* this is only available for Vega10 and later dGPUs.
*
* Reading back the file will show you the followings:
@@ -761,7 +868,7 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
* the corresponding bit from original ppfeature masks and input the
* new ppfeature masks.
*/
-static ssize_t amdgpu_set_ppfeature_status(struct device *dev,
+static ssize_t amdgpu_set_pp_feature_status(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
@@ -771,43 +878,71 @@ static ssize_t amdgpu_set_ppfeature_status(struct device *dev,
uint64_t featuremask;
int ret;
+ if (amdgpu_sriov_vf(adev))
+ return -EINVAL;
+
ret = kstrtou64(buf, 0, &featuremask);
if (ret)
return -EINVAL;
pr_debug("featuremask = 0x%llx\n", featuremask);
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
+
if (is_support_sw_smu(adev)) {
- ret = smu_set_ppfeature_status(&adev->smu, featuremask);
- if (ret)
+ ret = smu_sys_set_pp_feature_mask(&adev->smu, featuremask);
+ if (ret) {
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
return -EINVAL;
+ }
} else if (adev->powerplay.pp_funcs->set_ppfeature_status) {
ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
- if (ret)
+ if (ret) {
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
return -EINVAL;
+ }
}
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
return count;
}
-static ssize_t amdgpu_get_ppfeature_status(struct device *dev,
+static ssize_t amdgpu_get_pp_feature_status(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
+ ssize_t size;
+ int ret;
- if (is_support_sw_smu(adev)) {
- return smu_get_ppfeature_status(&adev->smu, buf);
- } else if (adev->powerplay.pp_funcs->get_ppfeature_status)
- return amdgpu_dpm_get_ppfeature_status(adev, buf);
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
- return snprintf(buf, PAGE_SIZE, "\n");
+ if (is_support_sw_smu(adev))
+ size = smu_sys_get_pp_feature_mask(&adev->smu, buf);
+ else if (adev->powerplay.pp_funcs->get_ppfeature_status)
+ size = amdgpu_dpm_get_ppfeature_status(adev, buf);
+ else
+ size = snprintf(buf, PAGE_SIZE, "\n");
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
+ return size;
}
/**
- * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk
- * pp_dpm_pcie
+ * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk pp_dpm_pcie
*
* The amdgpu driver provides a sysfs API for adjusting what power levels
* are enabled for a given power state. The files pp_dpm_sclk, pp_dpm_mclk,
@@ -823,9 +958,15 @@ static ssize_t amdgpu_get_ppfeature_status(struct device *dev,
*
* To manually adjust these states, first select manual using
* power_dpm_force_performance_level.
- * Secondly,Enter a new value for each level by inputing a string that
+ * Secondly, enter a new value for each level by inputing a string that
* contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie"
- * E.g., echo 4 5 6 to > pp_dpm_sclk will enable sclk levels 4, 5, and 6.
+ * E.g.,
+ *
+ * .. code-block:: bash
+ *
+ * echo "4 5 6" > pp_dpm_sclk
+ *
+ * will enable sclk levels 4, 5, and 6.
*
* NOTE: change to the dcefclk max dpm level is not supported now
*/
@@ -836,17 +977,27 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
+ ssize_t size;
+ int ret;
+
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ return 0;
- if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) &&
- adev->virt.ops->get_pp_clk)
- return adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
if (is_support_sw_smu(adev))
- return smu_print_clk_levels(&adev->smu, SMU_SCLK, buf);
+ size = smu_print_clk_levels(&adev->smu, SMU_SCLK, buf);
else if (adev->powerplay.pp_funcs->print_clock_levels)
- return amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
+ size = amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
else
- return snprintf(buf, PAGE_SIZE, "\n");
+ size = snprintf(buf, PAGE_SIZE, "\n");
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
+ return size;
}
/*
@@ -895,18 +1046,25 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
int ret;
uint32_t mask = 0;
- if (amdgpu_sriov_vf(adev))
- return 0;
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ return -EINVAL;
ret = amdgpu_read_mask(buf, count, &mask);
if (ret)
return ret;
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
+
if (is_support_sw_smu(adev))
- ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask);
+ ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask, true);
else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
if (ret)
return -EINVAL;
@@ -919,17 +1077,27 @@ static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
+ ssize_t size;
+ int ret;
- if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) &&
- adev->virt.ops->get_pp_clk)
- return adev->virt.ops->get_pp_clk(adev, PP_MCLK, buf);
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ return 0;
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
if (is_support_sw_smu(adev))
- return smu_print_clk_levels(&adev->smu, SMU_MCLK, buf);
+ size = smu_print_clk_levels(&adev->smu, SMU_MCLK, buf);
else if (adev->powerplay.pp_funcs->print_clock_levels)
- return amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
+ size = amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
else
- return snprintf(buf, PAGE_SIZE, "\n");
+ size = snprintf(buf, PAGE_SIZE, "\n");
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
+ return size;
}
static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
@@ -939,21 +1107,28 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
- int ret;
uint32_t mask = 0;
+ int ret;
- if (amdgpu_sriov_vf(adev))
- return 0;
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ return -EINVAL;
ret = amdgpu_read_mask(buf, count, &mask);
if (ret)
return ret;
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
+
if (is_support_sw_smu(adev))
- ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask);
+ ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask, true);
else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
if (ret)
return -EINVAL;
@@ -966,13 +1141,27 @@ static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
+ ssize_t size;
+ int ret;
+
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ return 0;
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
if (is_support_sw_smu(adev))
- return smu_print_clk_levels(&adev->smu, SMU_SOCCLK, buf);
+ size = smu_print_clk_levels(&adev->smu, SMU_SOCCLK, buf);
else if (adev->powerplay.pp_funcs->print_clock_levels)
- return amdgpu_dpm_print_clock_levels(adev, PP_SOCCLK, buf);
+ size = amdgpu_dpm_print_clock_levels(adev, PP_SOCCLK, buf);
else
- return snprintf(buf, PAGE_SIZE, "\n");
+ size = snprintf(buf, PAGE_SIZE, "\n");
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
+ return size;
}
static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
@@ -985,14 +1174,26 @@ static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
int ret;
uint32_t mask = 0;
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ return -EINVAL;
+
ret = amdgpu_read_mask(buf, count, &mask);
if (ret)
return ret;
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
+
if (is_support_sw_smu(adev))
- ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask);
+ ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask, true);
else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_SOCCLK, mask);
+ else
+ ret = 0;
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
if (ret)
return -EINVAL;
@@ -1006,13 +1207,27 @@ static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
+ ssize_t size;
+ int ret;
+
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ return 0;
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
if (is_support_sw_smu(adev))
- return smu_print_clk_levels(&adev->smu, SMU_FCLK, buf);
+ size = smu_print_clk_levels(&adev->smu, SMU_FCLK, buf);
else if (adev->powerplay.pp_funcs->print_clock_levels)
- return amdgpu_dpm_print_clock_levels(adev, PP_FCLK, buf);
+ size = amdgpu_dpm_print_clock_levels(adev, PP_FCLK, buf);
else
- return snprintf(buf, PAGE_SIZE, "\n");
+ size = snprintf(buf, PAGE_SIZE, "\n");
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
+ return size;
}
static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
@@ -1025,14 +1240,26 @@ static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
int ret;
uint32_t mask = 0;
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ return -EINVAL;
+
ret = amdgpu_read_mask(buf, count, &mask);
if (ret)
return ret;
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
+
if (is_support_sw_smu(adev))
- ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask);
+ ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask, true);
else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_FCLK, mask);
+ else
+ ret = 0;
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
if (ret)
return -EINVAL;
@@ -1046,13 +1273,27 @@ static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
+ ssize_t size;
+ int ret;
+
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
if (is_support_sw_smu(adev))
- return smu_print_clk_levels(&adev->smu, SMU_DCEFCLK, buf);
+ size = smu_print_clk_levels(&adev->smu, SMU_DCEFCLK, buf);
else if (adev->powerplay.pp_funcs->print_clock_levels)
- return amdgpu_dpm_print_clock_levels(adev, PP_DCEFCLK, buf);
+ size = amdgpu_dpm_print_clock_levels(adev, PP_DCEFCLK, buf);
else
- return snprintf(buf, PAGE_SIZE, "\n");
+ size = snprintf(buf, PAGE_SIZE, "\n");
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
+ return size;
}
static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
@@ -1065,14 +1306,26 @@ static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
int ret;
uint32_t mask = 0;
+ if (amdgpu_sriov_vf(adev))
+ return -EINVAL;
+
ret = amdgpu_read_mask(buf, count, &mask);
if (ret)
return ret;
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
+
if (is_support_sw_smu(adev))
- ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask);
+ ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask, true);
else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_DCEFCLK, mask);
+ else
+ ret = 0;
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
if (ret)
return -EINVAL;
@@ -1086,13 +1339,27 @@ static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
+ ssize_t size;
+ int ret;
+
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ return 0;
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
if (is_support_sw_smu(adev))
- return smu_print_clk_levels(&adev->smu, SMU_PCIE, buf);
+ size = smu_print_clk_levels(&adev->smu, SMU_PCIE, buf);
else if (adev->powerplay.pp_funcs->print_clock_levels)
- return amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
+ size = amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
else
- return snprintf(buf, PAGE_SIZE, "\n");
+ size = snprintf(buf, PAGE_SIZE, "\n");
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
+ return size;
}
static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
@@ -1105,14 +1372,26 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
int ret;
uint32_t mask = 0;
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ return -EINVAL;
+
ret = amdgpu_read_mask(buf, count, &mask);
if (ret)
return ret;
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
+
if (is_support_sw_smu(adev))
- ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask);
+ ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask, true);
else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
+ else
+ ret = 0;
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
if (ret)
return -EINVAL;
@@ -1127,12 +1406,23 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
uint32_t value = 0;
+ int ret;
+
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
if (is_support_sw_smu(adev))
value = smu_get_od_percentage(&(adev->smu), SMU_OD_SCLK);
else if (adev->powerplay.pp_funcs->get_sclk_od)
value = amdgpu_dpm_get_sclk_od(adev);
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
return snprintf(buf, PAGE_SIZE, "%d\n", value);
}
@@ -1146,12 +1436,17 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
int ret;
long int value;
+ if (amdgpu_sriov_vf(adev))
+ return -EINVAL;
+
ret = kstrtol(buf, 0, &value);
- if (ret) {
- count = -EINVAL;
- goto fail;
- }
+ if (ret)
+ return -EINVAL;
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
if (is_support_sw_smu(adev)) {
value = smu_set_od_percentage(&(adev->smu), SMU_OD_SCLK, (uint32_t)value);
@@ -1167,7 +1462,9 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
}
}
-fail:
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
return count;
}
@@ -1178,12 +1475,23 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
uint32_t value = 0;
+ int ret;
+
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
if (is_support_sw_smu(adev))
value = smu_get_od_percentage(&(adev->smu), SMU_OD_MCLK);
else if (adev->powerplay.pp_funcs->get_mclk_od)
value = amdgpu_dpm_get_mclk_od(adev);
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
return snprintf(buf, PAGE_SIZE, "%d\n", value);
}
@@ -1197,12 +1505,17 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
int ret;
long int value;
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
ret = kstrtol(buf, 0, &value);
- if (ret) {
- count = -EINVAL;
- goto fail;
- }
+ if (ret)
+ return -EINVAL;
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
if (is_support_sw_smu(adev)) {
value = smu_set_od_percentage(&(adev->smu), SMU_OD_MCLK, (uint32_t)value);
@@ -1218,7 +1531,9 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
}
}
-fail:
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
return count;
}
@@ -1248,13 +1563,27 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
+ ssize_t size;
+ int ret;
+
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ return 0;
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
if (is_support_sw_smu(adev))
- return smu_get_power_profile_mode(&adev->smu, buf);
+ size = smu_get_power_profile_mode(&adev->smu, buf);
else if (adev->powerplay.pp_funcs->get_power_profile_mode)
- return amdgpu_dpm_get_power_profile_mode(adev, buf);
+ size = amdgpu_dpm_get_power_profile_mode(adev, buf);
+ else
+ size = snprintf(buf, PAGE_SIZE, "\n");
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
- return snprintf(buf, PAGE_SIZE, "\n");
+ return size;
}
@@ -1279,7 +1608,10 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
tmp[1] = '\0';
ret = kstrtol(tmp, 0, &profile_mode);
if (ret)
- goto fail;
+ return -EINVAL;
+
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ return -EINVAL;
if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
if (count < 2 || count > 127)
@@ -1291,23 +1623,30 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
while (tmp_str[0]) {
sub_str = strsep(&tmp_str, delimiter);
ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
- if (ret) {
- count = -EINVAL;
- goto fail;
- }
+ if (ret)
+ return -EINVAL;
parameter_size++;
while (isspace(*tmp_str))
tmp_str++;
}
}
parameter[parameter_size] = profile_mode;
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
+
if (is_support_sw_smu(adev))
- ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size);
+ ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size, true);
else if (adev->powerplay.pp_funcs->set_power_profile_mode)
ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
if (!ret)
return count;
-fail:
+
return -EINVAL;
}
@@ -1327,10 +1666,20 @@ static ssize_t amdgpu_get_busy_percent(struct device *dev,
struct amdgpu_device *adev = ddev->dev_private;
int r, value, size = sizeof(value);
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ return 0;
+
+ r = pm_runtime_get_sync(ddev->dev);
+ if (r < 0)
+ return r;
+
/* read the IP busy sensor */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD,
(void *)&value, &size);
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
if (r)
return r;
@@ -1353,10 +1702,20 @@ static ssize_t amdgpu_get_memory_busy_percent(struct device *dev,
struct amdgpu_device *adev = ddev->dev_private;
int r, value, size = sizeof(value);
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ return 0;
+
+ r = pm_runtime_get_sync(ddev->dev);
+ if (r < 0)
+ return r;
+
/* read the IP busy sensor */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD,
(void *)&value, &size);
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
if (r)
return r;
@@ -1382,8 +1741,20 @@ static ssize_t amdgpu_get_pcie_bw(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
uint64_t count0, count1;
+ int ret;
+
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ return 0;
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
amdgpu_asic_get_pcie_usage(adev, &count0, &count1);
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
return snprintf(buf, PAGE_SIZE, "%llu %llu %i\n",
count0, count1, pcie_get_mps(adev->pdev));
}
@@ -1405,6 +1776,9 @@ static ssize_t amdgpu_get_unique_id(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ return 0;
+
if (adev->unique_id)
return snprintf(buf, PAGE_SIZE, "%016llx\n", adev->unique_id);
@@ -1458,9 +1832,9 @@ static DEVICE_ATTR(gpu_busy_percent, S_IRUGO,
static DEVICE_ATTR(mem_busy_percent, S_IRUGO,
amdgpu_get_memory_busy_percent, NULL);
static DEVICE_ATTR(pcie_bw, S_IRUGO, amdgpu_get_pcie_bw, NULL);
-static DEVICE_ATTR(ppfeatures, S_IRUGO | S_IWUSR,
- amdgpu_get_ppfeature_status,
- amdgpu_set_ppfeature_status);
+static DEVICE_ATTR(pp_features, S_IRUGO | S_IWUSR,
+ amdgpu_get_pp_feature_status,
+ amdgpu_set_pp_feature_status);
static DEVICE_ATTR(unique_id, S_IRUGO, amdgpu_get_unique_id, NULL);
static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
@@ -1468,42 +1842,43 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
char *buf)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
- struct drm_device *ddev = adev->ddev;
int channel = to_sensor_dev_attr(attr)->index;
int r, temp = 0, size = sizeof(temp);
- /* Can't get temperature when the card is off */
- if ((adev->flags & AMD_IS_PX) &&
- (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
- return -EINVAL;
-
if (channel >= PP_TEMP_MAX)
return -EINVAL;
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
+
switch (channel) {
case PP_TEMP_JUNCTION:
/* get current junction temperature */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
(void *)&temp, &size);
- if (r)
- return r;
break;
case PP_TEMP_EDGE:
/* get current edge temperature */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_EDGE_TEMP,
(void *)&temp, &size);
- if (r)
- return r;
break;
case PP_TEMP_MEM:
/* get current memory temperature */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_TEMP,
(void *)&temp, &size);
- if (r)
- return r;
+ break;
+ default:
+ r = -EINVAL;
break;
}
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
+ if (r)
+ return r;
+
return snprintf(buf, PAGE_SIZE, "%d\n", temp);
}
@@ -1599,15 +1974,27 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
u32 pwm_mode = 0;
+ int ret;
+
+ ret = pm_runtime_get_sync(adev->ddev->dev);
+ if (ret < 0)
+ return ret;
+
if (is_support_sw_smu(adev)) {
pwm_mode = smu_get_fan_control_mode(&adev->smu);
} else {
- if (!adev->powerplay.pp_funcs->get_fan_control_mode)
+ if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return -EINVAL;
+ }
pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
}
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
return sprintf(buf, "%i\n", pwm_mode);
}
@@ -1617,31 +2004,32 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
size_t count)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
- int err;
+ int err, ret;
int value;
- /* Can't adjust fan when the card is off */
- if ((adev->flags & AMD_IS_PX) &&
- (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
- return -EINVAL;
+ err = kstrtoint(buf, 10, &value);
+ if (err)
+ return err;
- if (is_support_sw_smu(adev)) {
- err = kstrtoint(buf, 10, &value);
- if (err)
- return err;
+ ret = pm_runtime_get_sync(adev->ddev->dev);
+ if (ret < 0)
+ return ret;
+ if (is_support_sw_smu(adev)) {
smu_set_fan_control_mode(&adev->smu, value);
} else {
- if (!adev->powerplay.pp_funcs->set_fan_control_mode)
+ if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return -EINVAL;
-
- err = kstrtoint(buf, 10, &value);
- if (err)
- return err;
+ }
amdgpu_dpm_set_fan_control_mode(adev, value);
}
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
return count;
}
@@ -1668,34 +2056,43 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
u32 value;
u32 pwm_mode;
- /* Can't adjust fan when the card is off */
- if ((adev->flags & AMD_IS_PX) &&
- (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
- return -EINVAL;
+ err = pm_runtime_get_sync(adev->ddev->dev);
+ if (err < 0)
+ return err;
+
if (is_support_sw_smu(adev))
pwm_mode = smu_get_fan_control_mode(&adev->smu);
else
pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
+
if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
pr_info("manual fan speed control should be enabled first\n");
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return -EINVAL;
}
err = kstrtou32(buf, 10, &value);
- if (err)
+ if (err) {
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return err;
+ }
value = (value * 100) / 255;
- if (is_support_sw_smu(adev)) {
+ if (is_support_sw_smu(adev))
err = smu_set_fan_speed_percent(&adev->smu, value);
- if (err)
- return err;
- } else if (adev->powerplay.pp_funcs->set_fan_speed_percent) {
+ else if (adev->powerplay.pp_funcs->set_fan_speed_percent)
err = amdgpu_dpm_set_fan_speed_percent(adev, value);
- if (err)
- return err;
- }
+ else
+ err = -EINVAL;
+
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
+ if (err)
+ return err;
return count;
}
@@ -1708,20 +2105,22 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
int err;
u32 speed = 0;
- /* Can't adjust fan when the card is off */
- if ((adev->flags & AMD_IS_PX) &&
- (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
- return -EINVAL;
+ err = pm_runtime_get_sync(adev->ddev->dev);
+ if (err < 0)
+ return err;
- if (is_support_sw_smu(adev)) {
+ if (is_support_sw_smu(adev))
err = smu_get_fan_speed_percent(&adev->smu, &speed);
- if (err)
- return err;
- } else if (adev->powerplay.pp_funcs->get_fan_speed_percent) {
+ else if (adev->powerplay.pp_funcs->get_fan_speed_percent)
err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
- if (err)
- return err;
- }
+ else
+ err = -EINVAL;
+
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
+ if (err)
+ return err;
speed = (speed * 255) / 100;
@@ -1736,20 +2135,22 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
int err;
u32 speed = 0;
- /* Can't adjust fan when the card is off */
- if ((adev->flags & AMD_IS_PX) &&
- (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
- return -EINVAL;
+ err = pm_runtime_get_sync(adev->ddev->dev);
+ if (err < 0)
+ return err;
- if (is_support_sw_smu(adev)) {
+ if (is_support_sw_smu(adev))
err = smu_get_fan_speed_rpm(&adev->smu, &speed);
- if (err)
- return err;
- } else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) {
+ else if (adev->powerplay.pp_funcs->get_fan_speed_rpm)
err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
- if (err)
- return err;
- }
+ else
+ err = -EINVAL;
+
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
+ if (err)
+ return err;
return sprintf(buf, "%i\n", speed);
}
@@ -1763,8 +2164,16 @@ static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
u32 size = sizeof(min_rpm);
int r;
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
+
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
(void *)&min_rpm, &size);
+
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
if (r)
return r;
@@ -1780,8 +2189,16 @@ static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
u32 size = sizeof(max_rpm);
int r;
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
+
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
(void *)&max_rpm, &size);
+
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
if (r)
return r;
@@ -1796,20 +2213,22 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
int err;
u32 rpm = 0;
- /* Can't adjust fan when the card is off */
- if ((adev->flags & AMD_IS_PX) &&
- (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
- return -EINVAL;
+ err = pm_runtime_get_sync(adev->ddev->dev);
+ if (err < 0)
+ return err;
- if (is_support_sw_smu(adev)) {
+ if (is_support_sw_smu(adev))
err = smu_get_fan_speed_rpm(&adev->smu, &rpm);
- if (err)
- return err;
- } else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) {
+ else if (adev->powerplay.pp_funcs->get_fan_speed_rpm)
err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
- if (err)
- return err;
- }
+ else
+ err = -EINVAL;
+
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
+ if (err)
+ return err;
return sprintf(buf, "%i\n", rpm);
}
@@ -1823,32 +2242,40 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
u32 value;
u32 pwm_mode;
+ err = pm_runtime_get_sync(adev->ddev->dev);
+ if (err < 0)
+ return err;
+
if (is_support_sw_smu(adev))
pwm_mode = smu_get_fan_control_mode(&adev->smu);
else
pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
- if (pwm_mode != AMD_FAN_CTRL_MANUAL)
+ if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return -ENODATA;
-
- /* Can't adjust fan when the card is off */
- if ((adev->flags & AMD_IS_PX) &&
- (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
- return -EINVAL;
+ }
err = kstrtou32(buf, 10, &value);
- if (err)
+ if (err) {
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return err;
+ }
- if (is_support_sw_smu(adev)) {
+ if (is_support_sw_smu(adev))
err = smu_set_fan_speed_rpm(&adev->smu, value);
- if (err)
- return err;
- } else if (adev->powerplay.pp_funcs->set_fan_speed_rpm) {
+ else if (adev->powerplay.pp_funcs->set_fan_speed_rpm)
err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
- if (err)
- return err;
- }
+ else
+ err = -EINVAL;
+
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
+ if (err)
+ return err;
return count;
}
@@ -1859,15 +2286,27 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
u32 pwm_mode = 0;
+ int ret;
+
+ ret = pm_runtime_get_sync(adev->ddev->dev);
+ if (ret < 0)
+ return ret;
if (is_support_sw_smu(adev)) {
pwm_mode = smu_get_fan_control_mode(&adev->smu);
} else {
- if (!adev->powerplay.pp_funcs->get_fan_control_mode)
+ if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return -EINVAL;
+ }
pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
}
+
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
return sprintf(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
}
@@ -1881,12 +2320,6 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
int value;
u32 pwm_mode;
- /* Can't adjust fan when the card is off */
- if ((adev->flags & AMD_IS_PX) &&
- (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
- return -EINVAL;
-
-
err = kstrtoint(buf, 10, &value);
if (err)
return err;
@@ -1898,14 +2331,24 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
else
return -EINVAL;
+ err = pm_runtime_get_sync(adev->ddev->dev);
+ if (err < 0)
+ return err;
+
if (is_support_sw_smu(adev)) {
smu_set_fan_control_mode(&adev->smu, pwm_mode);
} else {
- if (!adev->powerplay.pp_funcs->set_fan_control_mode)
+ if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return -EINVAL;
+ }
amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
}
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
return count;
}
@@ -1914,18 +2357,20 @@ static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
char *buf)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
- struct drm_device *ddev = adev->ddev;
u32 vddgfx;
int r, size = sizeof(vddgfx);
- /* Can't get voltage when the card is off */
- if ((adev->flags & AMD_IS_PX) &&
- (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
- return -EINVAL;
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
/* get the voltage */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX,
(void *)&vddgfx, &size);
+
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
if (r)
return r;
@@ -1944,7 +2389,6 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
char *buf)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
- struct drm_device *ddev = adev->ddev;
u32 vddnb;
int r, size = sizeof(vddnb);
@@ -1952,14 +2396,17 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
if (!(adev->flags & AMD_IS_APU))
return -EINVAL;
- /* Can't get voltage when the card is off */
- if ((adev->flags & AMD_IS_PX) &&
- (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
- return -EINVAL;
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
/* get the voltage */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB,
(void *)&vddnb, &size);
+
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
if (r)
return r;
@@ -1978,19 +2425,21 @@ static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
char *buf)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
- struct drm_device *ddev = adev->ddev;
u32 query = 0;
int r, size = sizeof(u32);
unsigned uw;
- /* Can't get power when the card is off */
- if ((adev->flags & AMD_IS_PX) &&
- (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
- return -EINVAL;
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
/* get the voltage */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER,
(void *)&query, &size);
+
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
if (r)
return r;
@@ -2013,16 +2462,27 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
uint32_t limit = 0;
+ ssize_t size;
+ int r;
+
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
if (is_support_sw_smu(adev)) {
- smu_get_power_limit(&adev->smu, &limit, true);
- return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
+ smu_get_power_limit(&adev->smu, &limit, true, true);
+ size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
} else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true);
- return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
+ size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
} else {
- return snprintf(buf, PAGE_SIZE, "\n");
+ size = snprintf(buf, PAGE_SIZE, "\n");
}
+
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
+ return size;
}
static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
@@ -2031,16 +2491,27 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
uint32_t limit = 0;
+ ssize_t size;
+ int r;
+
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
if (is_support_sw_smu(adev)) {
- smu_get_power_limit(&adev->smu, &limit, false);
- return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
+ smu_get_power_limit(&adev->smu, &limit, false, true);
+ size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
} else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false);
- return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
+ size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
} else {
- return snprintf(buf, PAGE_SIZE, "\n");
+ size = snprintf(buf, PAGE_SIZE, "\n");
}
+
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
+ return size;
}
@@ -2053,20 +2524,32 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
int err;
u32 value;
+ if (amdgpu_sriov_vf(adev))
+ return -EINVAL;
+
err = kstrtou32(buf, 10, &value);
if (err)
return err;
value = value / 1000000; /* convert to Watt */
- if (is_support_sw_smu(adev)) {
- adev->smu.funcs->set_power_limit(&adev->smu, value);
- } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_power_limit) {
+
+
+ err = pm_runtime_get_sync(adev->ddev->dev);
+ if (err < 0)
+ return err;
+
+ if (is_support_sw_smu(adev))
+ err = smu_set_power_limit(&adev->smu, value);
+ else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_power_limit)
err = adev->powerplay.pp_funcs->set_power_limit(adev->powerplay.pp_handle, value);
- if (err)
- return err;
- } else {
- return -EINVAL;
- }
+ else
+ err = -EINVAL;
+
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
+ if (err)
+ return err;
return count;
}
@@ -2076,18 +2559,20 @@ static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
char *buf)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
- struct drm_device *ddev = adev->ddev;
uint32_t sclk;
int r, size = sizeof(sclk);
- /* Can't get voltage when the card is off */
- if ((adev->flags & AMD_IS_PX) &&
- (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
- return -EINVAL;
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
/* get the sclk */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK,
(void *)&sclk, &size);
+
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
if (r)
return r;
@@ -2106,18 +2591,20 @@ static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
char *buf)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
- struct drm_device *ddev = adev->ddev;
uint32_t mclk;
int r, size = sizeof(mclk);
- /* Can't get voltage when the card is off */
- if ((adev->flags & AMD_IS_PX) &&
- (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
- return -EINVAL;
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
/* get the sclk */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK,
(void *)&mclk, &size);
+
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
if (r)
return r;
@@ -2199,9 +2686,9 @@ static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
*
* - fan1_input: fan speed in RPM
*
- * - fan[1-*]_target: Desired fan speed Unit: revolution/min (RPM)
+ * - fan[1-\*]_target: Desired fan speed Unit: revolution/min (RPM)
*
- * - fan[1-*]_enable: Enable or disable the sensors.1: Enable 0: Disable
+ * - fan[1-\*]_enable: Enable or disable the sensors.1: Enable 0: Disable
*
* hwmon interfaces for GPU clocks:
*
@@ -2297,6 +2784,23 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
struct amdgpu_device *adev = dev_get_drvdata(dev);
umode_t effective_mode = attr->mode;
+ /* under multi-vf mode, the hwmon attributes are all not supported */
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ return 0;
+
+ /* there is no fan under pp one vf mode */
+ if (amdgpu_sriov_is_pp_one_vf(adev) &&
+ (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
+ attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
+ attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
+ attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
+ attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
+ attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
+ attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
+ attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
+ attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
+ return 0;
+
/* Skip fan attributes if fan is not present */
if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
@@ -2352,7 +2856,9 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
effective_mode &= ~S_IWUSR;
}
- if ((adev->flags & AMD_IS_APU) &&
+ if (((adev->flags & AMD_IS_APU) ||
+ adev->family == AMDGPU_FAMILY_SI || /* not implemented yet */
+ adev->family == AMDGPU_FAMILY_KV) && /* not implemented yet */
(attr == &sensor_dev_attr_power1_average.dev_attr.attr ||
attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr||
@@ -2376,6 +2882,12 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
return 0;
}
+ if ((adev->family == AMDGPU_FAMILY_SI || /* not implemented yet */
+ adev->family == AMDGPU_FAMILY_KV) && /* not implemented yet */
+ (attr == &sensor_dev_attr_in0_input.dev_attr.attr ||
+ attr == &sensor_dev_attr_in0_label.dev_attr.attr))
+ return 0;
+
/* only APUs have vddnb */
if (!(adev->flags & AMD_IS_APU) &&
(attr == &sensor_dev_attr_in1_input.dev_attr.attr ||
@@ -2656,17 +3168,12 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
{
int ret = 0;
- if (is_support_sw_smu(adev)) {
- ret = smu_dpm_set_power_gate(&adev->smu, AMD_IP_BLOCK_TYPE_UVD, enable);
- if (ret)
- DRM_ERROR("[SW SMU]: dpm enable uvd failed, state = %s, ret = %d. \n",
- enable ? "true" : "false", ret);
- } else if (adev->powerplay.pp_funcs->set_powergating_by_smu) {
- /* enable/disable UVD */
- mutex_lock(&adev->pm.mutex);
- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
- mutex_unlock(&adev->pm.mutex);
- }
+
+ ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
+ if (ret)
+ DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
+ enable ? "enable" : "disable", ret);
+
/* enable/disable Low Memory PState for UVD (4k videos) */
if (adev->asic_type == CHIP_STONEY &&
adev->uvd.decode_image_width >= WIDTH_4K) {
@@ -2683,17 +3190,11 @@ void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
{
int ret = 0;
- if (is_support_sw_smu(adev)) {
- ret = smu_dpm_set_power_gate(&adev->smu, AMD_IP_BLOCK_TYPE_VCE, enable);
- if (ret)
- DRM_ERROR("[SW SMU]: dpm enable vce failed, state = %s, ret = %d. \n",
- enable ? "true" : "false", ret);
- } else if (adev->powerplay.pp_funcs->set_powergating_by_smu) {
- /* enable/disable VCE */
- mutex_lock(&adev->pm.mutex);
- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
- mutex_unlock(&adev->pm.mutex);
- }
+
+ ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
+ if (ret)
+ DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
+ enable ? "enable" : "disable", ret);
}
void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
@@ -2708,42 +3209,14 @@ void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
}
-int amdgpu_pm_virt_sysfs_init(struct amdgpu_device *adev)
+void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
{
int ret = 0;
- if (!(amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev)))
- return ret;
-
- ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk);
- if (ret) {
- DRM_ERROR("failed to create device file pp_dpm_sclk\n");
- return ret;
- }
-
- ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk);
- if (ret) {
- DRM_ERROR("failed to create device file pp_dpm_mclk\n");
- return ret;
- }
-
- ret = device_create_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
- if (ret) {
- DRM_ERROR("failed to create device file for dpm state\n");
- return ret;
- }
-
- return ret;
-}
-
-void amdgpu_pm_virt_sysfs_fini(struct amdgpu_device *adev)
-{
- if (!(amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev)))
- return;
-
- device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
- device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk);
- device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk);
+ ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable);
+ if (ret)
+ DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
+ enable ? "enable" : "disable", ret);
}
int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
@@ -2820,6 +3293,19 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
DRM_ERROR("failed to create device file pp_dpm_sclk\n");
return ret;
}
+
+ /* Arcturus does not support standalone mclk/socclk/fclk level setting */
+ if (adev->asic_type == CHIP_ARCTURUS) {
+ dev_attr_pp_dpm_mclk.attr.mode &= ~S_IWUGO;
+ dev_attr_pp_dpm_mclk.store = NULL;
+
+ dev_attr_pp_dpm_socclk.attr.mode &= ~S_IWUGO;
+ dev_attr_pp_dpm_socclk.store = NULL;
+
+ dev_attr_pp_dpm_fclk.attr.mode &= ~S_IWUGO;
+ dev_attr_pp_dpm_fclk.store = NULL;
+ }
+
ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk);
if (ret) {
DRM_ERROR("failed to create device file pp_dpm_mclk\n");
@@ -2831,10 +3317,12 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
DRM_ERROR("failed to create device file pp_dpm_socclk\n");
return ret;
}
- ret = device_create_file(adev->dev, &dev_attr_pp_dpm_dcefclk);
- if (ret) {
- DRM_ERROR("failed to create device file pp_dpm_dcefclk\n");
- return ret;
+ if (adev->asic_type != CHIP_ARCTURUS) {
+ ret = device_create_file(adev->dev, &dev_attr_pp_dpm_dcefclk);
+ if (ret) {
+ DRM_ERROR("failed to create device file pp_dpm_dcefclk\n");
+ return ret;
+ }
}
}
if (adev->asic_type >= CHIP_VEGA20) {
@@ -2844,10 +3332,12 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
return ret;
}
}
- ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie);
- if (ret) {
- DRM_ERROR("failed to create device file pp_dpm_pcie\n");
- return ret;
+ if (adev->asic_type != CHIP_ARCTURUS) {
+ ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie);
+ if (ret) {
+ DRM_ERROR("failed to create device file pp_dpm_pcie\n");
+ return ret;
+ }
}
ret = device_create_file(adev->dev, &dev_attr_pp_sclk_od);
if (ret) {
@@ -2917,10 +3407,10 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
if ((adev->asic_type >= CHIP_VEGA10) &&
!(adev->flags & AMD_IS_APU)) {
ret = device_create_file(adev->dev,
- &dev_attr_ppfeatures);
+ &dev_attr_pp_features);
if (ret) {
DRM_ERROR("failed to create device file "
- "ppfeatures\n");
+ "pp_features\n");
return ret;
}
}
@@ -2951,9 +3441,11 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk);
if (adev->asic_type >= CHIP_VEGA10) {
device_remove_file(adev->dev, &dev_attr_pp_dpm_socclk);
- device_remove_file(adev->dev, &dev_attr_pp_dpm_dcefclk);
+ if (adev->asic_type != CHIP_ARCTURUS)
+ device_remove_file(adev->dev, &dev_attr_pp_dpm_dcefclk);
}
- device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie);
+ if (adev->asic_type != CHIP_ARCTURUS)
+ device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie);
if (adev->asic_type >= CHIP_VEGA20)
device_remove_file(adev->dev, &dev_attr_pp_dpm_fclk);
device_remove_file(adev->dev, &dev_attr_pp_sclk_od);
@@ -2974,7 +3466,7 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
device_remove_file(adev->dev, &dev_attr_unique_id);
if ((adev->asic_type >= CHIP_VEGA10) &&
!(adev->flags & AMD_IS_APU))
- device_remove_file(adev->dev, &dev_attr_ppfeatures);
+ device_remove_file(adev->dev, &dev_attr_pp_features);
}
void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
@@ -2997,7 +3489,8 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
struct smu_dpm_context *smu_dpm = &adev->smu.smu_dpm;
smu_handle_task(&adev->smu,
smu_dpm->dpm_level,
- AMD_PP_TASK_DISPLAY_CONFIG_CHANGE);
+ AMD_PP_TASK_DISPLAY_CONFIG_CHANGE,
+ true);
} else {
if (adev->powerplay.pp_funcs->dispatch_tasks) {
if (!amdgpu_device_has_dc_support(adev)) {
@@ -3133,8 +3626,12 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct amdgpu_device *adev = dev->dev_private;
- struct drm_device *ddev = adev->ddev;
u32 flags = 0;
+ int r;
+
+ r = pm_runtime_get_sync(dev->dev);
+ if (r < 0)
+ return r;
amdgpu_device_ip_get_clockgating_state(adev, &flags);
seq_printf(m, "Clock Gating Flags Mask: 0x%x\n", flags);
@@ -3143,23 +3640,28 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
if (!adev->pm.dpm_enabled) {
seq_printf(m, "dpm not enabled\n");
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
return 0;
}
- if ((adev->flags & AMD_IS_PX) &&
- (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
- seq_printf(m, "PX asic powered off\n");
- } else if (!is_support_sw_smu(adev) && adev->powerplay.pp_funcs->debugfs_print_current_performance_level) {
+
+ if (!is_support_sw_smu(adev) &&
+ adev->powerplay.pp_funcs->debugfs_print_current_performance_level) {
mutex_lock(&adev->pm.mutex);
if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level)
adev->powerplay.pp_funcs->debugfs_print_current_performance_level(adev, m);
else
seq_printf(m, "Debugfs support not implemented for this asic\n");
mutex_unlock(&adev->pm.mutex);
+ r = 0;
} else {
- return amdgpu_debugfs_pm_info_pp(m, adev);
+ r = amdgpu_debugfs_pm_info_pp(m, adev);
}
- return 0;
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
+
+ return r;
}
static const struct drm_info_list amdgpu_pm_info_list[] = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
index ef31448ee8d8..3da1da277805 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
@@ -41,5 +41,6 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev);
void amdgpu_dpm_thermal_work_handler(struct work_struct *work);
void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable);
void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable);
+void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
index 0e6dba9f60f0..1311d6aec5d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
@@ -52,7 +52,7 @@ static int amdgpu_perf_event_init(struct perf_event *event)
return -ENOENT;
/* update the hw_perf_event struct with config data */
- hwc->conf = event->attr.config;
+ hwc->config = event->attr.config;
return 0;
}
@@ -74,9 +74,9 @@ static void amdgpu_perf_start(struct perf_event *event, int flags)
switch (pe->pmu_perf_type) {
case PERF_TYPE_AMDGPU_DF:
if (!(flags & PERF_EF_RELOAD))
- pe->adev->df_funcs->pmc_start(pe->adev, hwc->conf, 1);
+ pe->adev->df.funcs->pmc_start(pe->adev, hwc->config, 1);
- pe->adev->df_funcs->pmc_start(pe->adev, hwc->conf, 0);
+ pe->adev->df.funcs->pmc_start(pe->adev, hwc->config, 0);
break;
default:
break;
@@ -101,13 +101,13 @@ static void amdgpu_perf_read(struct perf_event *event)
switch (pe->pmu_perf_type) {
case PERF_TYPE_AMDGPU_DF:
- pe->adev->df_funcs->pmc_get_count(pe->adev, hwc->conf,
+ pe->adev->df.funcs->pmc_get_count(pe->adev, hwc->config,
&count);
break;
default:
count = 0;
break;
- };
+ }
} while (local64_cmpxchg(&hwc->prev_count, prev, count) != prev);
local64_add(count - prev, &event->count);
@@ -126,11 +126,11 @@ static void amdgpu_perf_stop(struct perf_event *event, int flags)
switch (pe->pmu_perf_type) {
case PERF_TYPE_AMDGPU_DF:
- pe->adev->df_funcs->pmc_stop(pe->adev, hwc->conf, 0);
+ pe->adev->df.funcs->pmc_stop(pe->adev, hwc->config, 0);
break;
default:
break;
- };
+ }
WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
hwc->state |= PERF_HES_STOPPED;
@@ -156,11 +156,12 @@ static int amdgpu_perf_add(struct perf_event *event, int flags)
switch (pe->pmu_perf_type) {
case PERF_TYPE_AMDGPU_DF:
- retval = pe->adev->df_funcs->pmc_start(pe->adev, hwc->conf, 1);
+ retval = pe->adev->df.funcs->pmc_start(pe->adev,
+ hwc->config, 1);
break;
default:
return 0;
- };
+ }
if (retval)
return retval;
@@ -184,11 +185,11 @@ static void amdgpu_perf_del(struct perf_event *event, int flags)
switch (pe->pmu_perf_type) {
case PERF_TYPE_AMDGPU_DF:
- pe->adev->df_funcs->pmc_stop(pe->adev, hwc->conf, 1);
+ pe->adev->df.funcs->pmc_stop(pe->adev, hwc->config, 1);
break;
default:
break;
- };
+ }
perf_event_update_userpage(event);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index c027e5e7713e..3a1570dafe34 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -32,6 +32,9 @@
#include "psp_v3_1.h"
#include "psp_v10_0.h"
#include "psp_v11_0.h"
+#include "psp_v12_0.h"
+
+#include "amdgpu_ras.h"
static void psp_set_funcs(struct amdgpu_device *adev);
@@ -53,13 +56,19 @@ static int psp_early_init(void *handle)
psp->autoload_supported = false;
break;
case CHIP_VEGA20:
+ case CHIP_ARCTURUS:
psp_v11_0_set_psp_funcs(psp);
psp->autoload_supported = false;
break;
case CHIP_NAVI10:
+ case CHIP_NAVI14:
+ case CHIP_NAVI12:
psp_v11_0_set_psp_funcs(psp);
psp->autoload_supported = true;
break;
+ case CHIP_RENOIR:
+ psp_v12_0_set_psp_funcs(psp);
+ break;
default:
return -EINVAL;
}
@@ -81,6 +90,17 @@ static int psp_sw_init(void *handle)
return ret;
}
+ ret = psp_mem_training_init(psp);
+ if (ret) {
+ DRM_ERROR("Failed to initialize memory training!\n");
+ return ret;
+ }
+ ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT);
+ if (ret) {
+ DRM_ERROR("Failed to process memory training!\n");
+ return ret;
+ }
+
return 0;
}
@@ -88,6 +108,7 @@ static int psp_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ psp_mem_training_fini(&adev->psp);
release_firmware(adev->psp.sos_fw);
adev->psp.sos_fw = NULL;
release_firmware(adev->psp.asd_fw);
@@ -137,18 +158,26 @@ psp_cmd_submit_buf(struct psp_context *psp,
memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp));
index = atomic_inc_return(&psp->fence_value);
- ret = psp_cmd_submit(psp, ucode, psp->cmd_buf_mc_addr,
- fence_mc_addr, index);
+ ret = psp_ring_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index);
if (ret) {
atomic_dec(&psp->fence_value);
mutex_unlock(&psp->mutex);
return ret;
}
+ amdgpu_asic_invalidate_hdp(psp->adev, NULL);
while (*((unsigned int *)psp->fence_buf) != index) {
if (--timeout == 0)
break;
+ /*
+ * Shouldn't wait for timeout when err_event_athub occurs,
+ * because gpu reset thread triggered and lock resource should
+ * be released for psp resume sequence.
+ */
+ if (amdgpu_ras_intr_triggered())
+ break;
msleep(1);
+ amdgpu_asic_invalidate_hdp(psp->adev, NULL);
}
/* In some cases, psp response status is not 0 even there is no
@@ -162,8 +191,9 @@ psp_cmd_submit_buf(struct psp_context *psp,
if (ucode)
DRM_WARN("failed to load ucode id (%d) ",
ucode->ucode_id);
- DRM_WARN("psp command failed and response status is (%d)\n",
- psp->cmd_buf_mem->resp.status);
+ DRM_WARN("psp command (0x%X) failed and response status is (0x%X)\n",
+ psp->cmd_buf_mem->cmd_id,
+ psp->cmd_buf_mem->resp.status);
if (!timeout) {
mutex_unlock(&psp->mutex);
return -EINVAL;
@@ -233,6 +263,8 @@ static int psp_tmr_init(struct psp_context *psp)
{
int ret;
int tmr_size;
+ void *tmr_buf;
+ void **pptr;
/*
* According to HW engineer, they prefer the TMR address be "naturally
@@ -245,7 +277,8 @@ static int psp_tmr_init(struct psp_context *psp)
/* For ASICs support RLC autoload, psp will parse the toc
* and calculate the total size of TMR needed */
- if (psp->toc_start_addr &&
+ if (!amdgpu_sriov_vf(psp->adev) &&
+ psp->toc_start_addr &&
psp->toc_bin_size &&
psp->fw_pri_buf) {
ret = psp_load_toc(psp, &tmr_size);
@@ -255,9 +288,10 @@ static int psp_tmr_init(struct psp_context *psp)
}
}
+ pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
- &psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf);
+ &psp->tmr_bo, &psp->tmr_mc_addr, pptr);
return ret;
}
@@ -278,47 +312,23 @@ static int psp_tmr_load(struct psp_context *psp)
ret = psp_cmd_submit_buf(psp, NULL, cmd,
psp->fence_buf_mc_addr);
- if (ret)
- goto failed;
kfree(cmd);
- return 0;
-
-failed:
- kfree(cmd);
return ret;
}
-static void psp_prep_asd_cmd_buf(struct psp_gfx_cmd_resp *cmd,
- uint64_t asd_mc, uint64_t asd_mc_shared,
- uint32_t size, uint32_t shared_size)
+static void psp_prep_asd_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+ uint64_t asd_mc, uint32_t size)
{
cmd->cmd_id = GFX_CMD_ID_LOAD_ASD;
cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(asd_mc);
cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(asd_mc);
cmd->cmd.cmd_load_ta.app_len = size;
- cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(asd_mc_shared);
- cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(asd_mc_shared);
- cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size;
-}
-
-static int psp_asd_init(struct psp_context *psp)
-{
- int ret;
-
- /*
- * Allocate 16k memory aligned to 4k from Frame Buffer (local
- * physical) for shared ASD <-> Driver
- */
- ret = amdgpu_bo_create_kernel(psp->adev, PSP_ASD_SHARED_MEM_SIZE,
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
- &psp->asd_shared_bo,
- &psp->asd_shared_mc_addr,
- &psp->asd_shared_buf);
-
- return ret;
+ cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = 0;
+ cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = 0;
+ cmd->cmd.cmd_load_ta.cmd_buf_len = 0;
}
static int psp_asd_load(struct psp_context *psp)
@@ -340,11 +350,49 @@ static int psp_asd_load(struct psp_context *psp)
memset(psp->fw_pri_buf, 0, PSP_1_MEG);
memcpy(psp->fw_pri_buf, psp->asd_start_addr, psp->asd_ucode_size);
- psp_prep_asd_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->asd_shared_mc_addr,
- psp->asd_ucode_size, PSP_ASD_SHARED_MEM_SIZE);
+ psp_prep_asd_load_cmd_buf(cmd, psp->fw_pri_mc_addr,
+ psp->asd_ucode_size);
ret = psp_cmd_submit_buf(psp, NULL, cmd,
psp->fence_buf_mc_addr);
+ if (!ret) {
+ psp->asd_context.asd_initialized = true;
+ psp->asd_context.session_id = cmd->resp.session_id;
+ }
+
+ kfree(cmd);
+
+ return ret;
+}
+
+static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+ uint32_t session_id)
+{
+ cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
+ cmd->cmd.cmd_unload_ta.session_id = session_id;
+}
+
+static int psp_asd_unload(struct psp_context *psp)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ if (!psp->asd_context.asd_initialized)
+ return 0;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ psp_prep_ta_unload_cmd_buf(cmd, psp->asd_context.session_id);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd,
+ psp->fence_buf_mc_addr);
+ if (!ret)
+ psp->asd_context.asd_initialized = false;
kfree(cmd);
@@ -379,18 +427,20 @@ int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg,
return ret;
}
-static void psp_prep_xgmi_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
- uint64_t xgmi_ta_mc, uint64_t xgmi_mc_shared,
- uint32_t xgmi_ta_size, uint32_t shared_size)
+static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+ uint64_t ta_bin_mc,
+ uint32_t ta_bin_size,
+ uint64_t ta_shared_mc,
+ uint32_t ta_shared_size)
{
- cmd->cmd_id = GFX_CMD_ID_LOAD_TA;
- cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(xgmi_ta_mc);
- cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(xgmi_ta_mc);
- cmd->cmd.cmd_load_ta.app_len = xgmi_ta_size;
-
- cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(xgmi_mc_shared);
- cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(xgmi_mc_shared);
- cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size;
+ cmd->cmd_id = GFX_CMD_ID_LOAD_TA;
+ cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ta_bin_mc);
+ cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ta_bin_mc);
+ cmd->cmd.cmd_load_ta.app_len = ta_bin_size;
+
+ cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(ta_shared_mc);
+ cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(ta_shared_mc);
+ cmd->cmd.cmd_load_ta.cmd_buf_len = ta_shared_size;
}
static int psp_xgmi_init_shared_buf(struct psp_context *psp)
@@ -410,6 +460,36 @@ static int psp_xgmi_init_shared_buf(struct psp_context *psp)
return ret;
}
+static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+ uint32_t ta_cmd_id,
+ uint32_t session_id)
+{
+ cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD;
+ cmd->cmd.cmd_invoke_cmd.session_id = session_id;
+ cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id;
+}
+
+int psp_ta_invoke(struct psp_context *psp,
+ uint32_t ta_cmd_id,
+ uint32_t session_id)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, session_id);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd,
+ psp->fence_buf_mc_addr);
+
+ kfree(cmd);
+
+ return ret;
+}
+
static int psp_xgmi_load(struct psp_context *psp)
{
int ret;
@@ -418,8 +498,6 @@ static int psp_xgmi_load(struct psp_context *psp)
/*
* TODO: bypass the loading in sriov for now
*/
- if (amdgpu_sriov_vf(psp->adev))
- return 0;
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
if (!cmd)
@@ -428,9 +506,11 @@ static int psp_xgmi_load(struct psp_context *psp)
memset(psp->fw_pri_buf, 0, PSP_1_MEG);
memcpy(psp->fw_pri_buf, psp->ta_xgmi_start_addr, psp->ta_xgmi_ucode_size);
- psp_prep_xgmi_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr,
- psp->xgmi_context.xgmi_shared_mc_addr,
- psp->ta_xgmi_ucode_size, PSP_XGMI_SHARED_MEM_SIZE);
+ psp_prep_ta_load_cmd_buf(cmd,
+ psp->fw_pri_mc_addr,
+ psp->ta_xgmi_ucode_size,
+ psp->xgmi_context.xgmi_shared_mc_addr,
+ PSP_XGMI_SHARED_MEM_SIZE);
ret = psp_cmd_submit_buf(psp, NULL, cmd,
psp->fence_buf_mc_addr);
@@ -445,29 +525,25 @@ static int psp_xgmi_load(struct psp_context *psp)
return ret;
}
-static void psp_prep_xgmi_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
- uint32_t xgmi_session_id)
-{
- cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
- cmd->cmd.cmd_unload_ta.session_id = xgmi_session_id;
-}
-
static int psp_xgmi_unload(struct psp_context *psp)
{
int ret;
struct psp_gfx_cmd_resp *cmd;
+ struct amdgpu_device *adev = psp->adev;
+
+ /* XGMI TA unload currently is not supported on Arcturus */
+ if (adev->asic_type == CHIP_ARCTURUS)
+ return 0;
/*
* TODO: bypass the unloading in sriov for now
*/
- if (amdgpu_sriov_vf(psp->adev))
- return 0;
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
- psp_prep_xgmi_ta_unload_cmd_buf(cmd, psp->xgmi_context.session_id);
+ psp_prep_ta_unload_cmd_buf(cmd, psp->xgmi_context.session_id);
ret = psp_cmd_submit_buf(psp, NULL, cmd,
psp->fence_buf_mc_addr);
@@ -477,40 +553,9 @@ static int psp_xgmi_unload(struct psp_context *psp)
return ret;
}
-static void psp_prep_xgmi_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
- uint32_t ta_cmd_id,
- uint32_t xgmi_session_id)
-{
- cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD;
- cmd->cmd.cmd_invoke_cmd.session_id = xgmi_session_id;
- cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id;
- /* Note: cmd_invoke_cmd.buf is not used for now */
-}
-
int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
{
- int ret;
- struct psp_gfx_cmd_resp *cmd;
-
- /*
- * TODO: bypass the loading in sriov for now
- */
- if (amdgpu_sriov_vf(psp->adev))
- return 0;
-
- cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
- if (!cmd)
- return -ENOMEM;
-
- psp_prep_xgmi_ta_invoke_cmd_buf(cmd, ta_cmd_id,
- psp->xgmi_context.session_id);
-
- ret = psp_cmd_submit_buf(psp, NULL, cmd,
- psp->fence_buf_mc_addr);
-
- kfree(cmd);
-
- return ret;
+ return psp_ta_invoke(psp, ta_cmd_id, psp->xgmi_context.session_id);
}
static int psp_xgmi_terminate(struct psp_context *psp)
@@ -539,7 +584,9 @@ static int psp_xgmi_initialize(struct psp_context *psp)
struct ta_xgmi_shared_memory *xgmi_cmd;
int ret;
- if (!psp->adev->psp.ta_fw)
+ if (!psp->adev->psp.ta_fw ||
+ !psp->adev->psp.ta_xgmi_ucode_size ||
+ !psp->adev->psp.ta_xgmi_start_addr)
return -ENOENT;
if (!psp->xgmi_context.initialized) {
@@ -564,20 +611,6 @@ static int psp_xgmi_initialize(struct psp_context *psp)
}
// ras begin
-static void psp_prep_ras_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
- uint64_t ras_ta_mc, uint64_t ras_mc_shared,
- uint32_t ras_ta_size, uint32_t shared_size)
-{
- cmd->cmd_id = GFX_CMD_ID_LOAD_TA;
- cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ras_ta_mc);
- cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ras_ta_mc);
- cmd->cmd.cmd_load_ta.app_len = ras_ta_size;
-
- cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(ras_mc_shared);
- cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(ras_mc_shared);
- cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size;
-}
-
static int psp_ras_init_shared_buf(struct psp_context *psp)
{
int ret;
@@ -613,15 +646,17 @@ static int psp_ras_load(struct psp_context *psp)
memset(psp->fw_pri_buf, 0, PSP_1_MEG);
memcpy(psp->fw_pri_buf, psp->ta_ras_start_addr, psp->ta_ras_ucode_size);
- psp_prep_ras_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr,
- psp->ras.ras_shared_mc_addr,
- psp->ta_ras_ucode_size, PSP_RAS_SHARED_MEM_SIZE);
+ psp_prep_ta_load_cmd_buf(cmd,
+ psp->fw_pri_mc_addr,
+ psp->ta_ras_ucode_size,
+ psp->ras.ras_shared_mc_addr,
+ PSP_RAS_SHARED_MEM_SIZE);
ret = psp_cmd_submit_buf(psp, NULL, cmd,
psp->fence_buf_mc_addr);
if (!ret) {
- psp->ras.ras_initialized = 1;
+ psp->ras.ras_initialized = true;
psp->ras.session_id = cmd->resp.session_id;
}
@@ -630,13 +665,6 @@ static int psp_ras_load(struct psp_context *psp)
return ret;
}
-static void psp_prep_ras_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
- uint32_t ras_session_id)
-{
- cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
- cmd->cmd.cmd_unload_ta.session_id = ras_session_id;
-}
-
static int psp_ras_unload(struct psp_context *psp)
{
int ret;
@@ -652,7 +680,7 @@ static int psp_ras_unload(struct psp_context *psp)
if (!cmd)
return -ENOMEM;
- psp_prep_ras_ta_unload_cmd_buf(cmd, psp->ras.session_id);
+ psp_prep_ta_unload_cmd_buf(cmd, psp->ras.session_id);
ret = psp_cmd_submit_buf(psp, NULL, cmd,
psp->fence_buf_mc_addr);
@@ -662,40 +690,15 @@ static int psp_ras_unload(struct psp_context *psp)
return ret;
}
-static void psp_prep_ras_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
- uint32_t ta_cmd_id,
- uint32_t ras_session_id)
-{
- cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD;
- cmd->cmd.cmd_invoke_cmd.session_id = ras_session_id;
- cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id;
- /* Note: cmd_invoke_cmd.buf is not used for now */
-}
-
int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
{
- int ret;
- struct psp_gfx_cmd_resp *cmd;
-
/*
* TODO: bypass the loading in sriov for now
*/
if (amdgpu_sriov_vf(psp->adev))
return 0;
- cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
- if (!cmd)
- return -ENOMEM;
-
- psp_prep_ras_ta_invoke_cmd_buf(cmd, ta_cmd_id,
- psp->ras.session_id);
-
- ret = psp_cmd_submit_buf(psp, NULL, cmd,
- psp->fence_buf_mc_addr);
-
- kfree(cmd);
-
- return ret;
+ return psp_ta_invoke(psp, ta_cmd_id, psp->ras.session_id);
}
int psp_ras_enable_features(struct psp_context *psp,
@@ -728,6 +731,12 @@ static int psp_ras_terminate(struct psp_context *psp)
{
int ret;
+ /*
+ * TODO: bypass the terminate in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
if (!psp->ras.ras_initialized)
return 0;
@@ -735,7 +744,7 @@ static int psp_ras_terminate(struct psp_context *psp)
if (ret)
return ret;
- psp->ras.ras_initialized = 0;
+ psp->ras.ras_initialized = false;
/* free ras shared memory */
amdgpu_bo_free_kernel(&psp->ras.ras_shared_bo,
@@ -749,6 +758,18 @@ static int psp_ras_initialize(struct psp_context *psp)
{
int ret;
+ /*
+ * TODO: bypass the initialize in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ if (!psp->adev->psp.ta_ras_ucode_size ||
+ !psp->adev->psp.ta_ras_start_addr) {
+ dev_warn(psp->adev->dev, "RAS: ras ta ucode is not available\n");
+ return 0;
+ }
+
if (!psp->ras.ras_initialized) {
ret = psp_ras_init_shared_buf(psp);
if (ret)
@@ -763,6 +784,274 @@ static int psp_ras_initialize(struct psp_context *psp)
}
// ras end
+// HDCP start
+static int psp_hdcp_init_shared_buf(struct psp_context *psp)
+{
+ int ret;
+
+ /*
+ * Allocate 16k memory aligned to 4k from Frame Buffer (local
+ * physical) for hdcp ta <-> Driver
+ */
+ ret = amdgpu_bo_create_kernel(psp->adev, PSP_HDCP_SHARED_MEM_SIZE,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
+ &psp->hdcp_context.hdcp_shared_bo,
+ &psp->hdcp_context.hdcp_shared_mc_addr,
+ &psp->hdcp_context.hdcp_shared_buf);
+
+ return ret;
+}
+
+static int psp_hdcp_load(struct psp_context *psp)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ /*
+ * TODO: bypass the loading in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ memset(psp->fw_pri_buf, 0, PSP_1_MEG);
+ memcpy(psp->fw_pri_buf, psp->ta_hdcp_start_addr,
+ psp->ta_hdcp_ucode_size);
+
+ psp_prep_ta_load_cmd_buf(cmd,
+ psp->fw_pri_mc_addr,
+ psp->ta_hdcp_ucode_size,
+ psp->hdcp_context.hdcp_shared_mc_addr,
+ PSP_HDCP_SHARED_MEM_SIZE);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+
+ if (!ret) {
+ psp->hdcp_context.hdcp_initialized = true;
+ psp->hdcp_context.session_id = cmd->resp.session_id;
+ }
+
+ kfree(cmd);
+
+ return ret;
+}
+static int psp_hdcp_initialize(struct psp_context *psp)
+{
+ int ret;
+
+ /*
+ * TODO: bypass the initialize in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ if (!psp->adev->psp.ta_hdcp_ucode_size ||
+ !psp->adev->psp.ta_hdcp_start_addr) {
+ dev_warn(psp->adev->dev, "HDCP: hdcp ta ucode is not available\n");
+ return 0;
+ }
+
+ if (!psp->hdcp_context.hdcp_initialized) {
+ ret = psp_hdcp_init_shared_buf(psp);
+ if (ret)
+ return ret;
+ }
+
+ ret = psp_hdcp_load(psp);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int psp_hdcp_unload(struct psp_context *psp)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ /*
+ * TODO: bypass the unloading in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ psp_prep_ta_unload_cmd_buf(cmd, psp->hdcp_context.session_id);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+
+ kfree(cmd);
+
+ return ret;
+}
+
+int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
+{
+ /*
+ * TODO: bypass the loading in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ return psp_ta_invoke(psp, ta_cmd_id, psp->hdcp_context.session_id);
+}
+
+static int psp_hdcp_terminate(struct psp_context *psp)
+{
+ int ret;
+
+ /*
+ * TODO: bypass the terminate in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ if (!psp->hdcp_context.hdcp_initialized)
+ return 0;
+
+ ret = psp_hdcp_unload(psp);
+ if (ret)
+ return ret;
+
+ psp->hdcp_context.hdcp_initialized = false;
+
+ /* free hdcp shared memory */
+ amdgpu_bo_free_kernel(&psp->hdcp_context.hdcp_shared_bo,
+ &psp->hdcp_context.hdcp_shared_mc_addr,
+ &psp->hdcp_context.hdcp_shared_buf);
+
+ return 0;
+}
+// HDCP end
+
+// DTM start
+static int psp_dtm_init_shared_buf(struct psp_context *psp)
+{
+ int ret;
+
+ /*
+ * Allocate 16k memory aligned to 4k from Frame Buffer (local
+ * physical) for dtm ta <-> Driver
+ */
+ ret = amdgpu_bo_create_kernel(psp->adev, PSP_DTM_SHARED_MEM_SIZE,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
+ &psp->dtm_context.dtm_shared_bo,
+ &psp->dtm_context.dtm_shared_mc_addr,
+ &psp->dtm_context.dtm_shared_buf);
+
+ return ret;
+}
+
+static int psp_dtm_load(struct psp_context *psp)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ /*
+ * TODO: bypass the loading in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ memset(psp->fw_pri_buf, 0, PSP_1_MEG);
+ memcpy(psp->fw_pri_buf, psp->ta_dtm_start_addr, psp->ta_dtm_ucode_size);
+
+ psp_prep_ta_load_cmd_buf(cmd,
+ psp->fw_pri_mc_addr,
+ psp->ta_dtm_ucode_size,
+ psp->dtm_context.dtm_shared_mc_addr,
+ PSP_DTM_SHARED_MEM_SIZE);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+
+ if (!ret) {
+ psp->dtm_context.dtm_initialized = true;
+ psp->dtm_context.session_id = cmd->resp.session_id;
+ }
+
+ kfree(cmd);
+
+ return ret;
+}
+
+static int psp_dtm_initialize(struct psp_context *psp)
+{
+ int ret;
+
+ /*
+ * TODO: bypass the initialize in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ if (!psp->adev->psp.ta_dtm_ucode_size ||
+ !psp->adev->psp.ta_dtm_start_addr) {
+ dev_warn(psp->adev->dev, "DTM: dtm ta ucode is not available\n");
+ return 0;
+ }
+
+ if (!psp->dtm_context.dtm_initialized) {
+ ret = psp_dtm_init_shared_buf(psp);
+ if (ret)
+ return ret;
+ }
+
+ ret = psp_dtm_load(psp);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
+{
+ /*
+ * TODO: bypass the loading in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ return psp_ta_invoke(psp, ta_cmd_id, psp->dtm_context.session_id);
+}
+
+static int psp_dtm_terminate(struct psp_context *psp)
+{
+ int ret;
+
+ /*
+ * TODO: bypass the terminate in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ if (!psp->dtm_context.dtm_initialized)
+ return 0;
+
+ ret = psp_hdcp_unload(psp);
+ if (ret)
+ return ret;
+
+ psp->dtm_context.dtm_initialized = false;
+
+ /* free hdcp shared memory */
+ amdgpu_bo_free_kernel(&psp->dtm_context.dtm_shared_bo,
+ &psp->dtm_context.dtm_shared_mc_addr,
+ &psp->dtm_context.dtm_shared_buf);
+
+ return 0;
+}
+// DTM end
+
static int psp_hw_start(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
@@ -809,36 +1098,6 @@ static int psp_hw_start(struct psp_context *psp)
return ret;
}
- ret = psp_asd_init(psp);
- if (ret) {
- DRM_ERROR("PSP asd init failed!\n");
- return ret;
- }
-
- ret = psp_asd_load(psp);
- if (ret) {
- DRM_ERROR("PSP load asd failed!\n");
- return ret;
- }
-
- if (adev->gmc.xgmi.num_physical_nodes > 1) {
- ret = psp_xgmi_initialize(psp);
- /* Warning the XGMI seesion initialize failure
- * Instead of stop driver initialization
- */
- if (ret)
- dev_err(psp->adev->dev,
- "XGMI: Failed to initialize XGMI session\n");
- }
-
-
- if (psp->adev->psp.ta_fw) {
- ret = psp_ras_initialize(psp);
- if (ret)
- dev_err(psp->adev->dev,
- "RAS: Failed to initialize RAS\n");
- }
-
return 0;
}
@@ -852,6 +1111,24 @@ static int psp_get_fw_type(struct amdgpu_firmware_info *ucode,
case AMDGPU_UCODE_ID_SDMA1:
*type = GFX_FW_TYPE_SDMA1;
break;
+ case AMDGPU_UCODE_ID_SDMA2:
+ *type = GFX_FW_TYPE_SDMA2;
+ break;
+ case AMDGPU_UCODE_ID_SDMA3:
+ *type = GFX_FW_TYPE_SDMA3;
+ break;
+ case AMDGPU_UCODE_ID_SDMA4:
+ *type = GFX_FW_TYPE_SDMA4;
+ break;
+ case AMDGPU_UCODE_ID_SDMA5:
+ *type = GFX_FW_TYPE_SDMA5;
+ break;
+ case AMDGPU_UCODE_ID_SDMA6:
+ *type = GFX_FW_TYPE_SDMA6;
+ break;
+ case AMDGPU_UCODE_ID_SDMA7:
+ *type = GFX_FW_TYPE_SDMA7;
+ break;
case AMDGPU_UCODE_ID_CP_CE:
*type = GFX_FW_TYPE_CP_CE;
break;
@@ -900,6 +1177,9 @@ static int psp_get_fw_type(struct amdgpu_firmware_info *ucode,
case AMDGPU_UCODE_ID_VCN:
*type = GFX_FW_TYPE_VCN;
break;
+ case AMDGPU_UCODE_ID_VCN1:
+ *type = GFX_FW_TYPE_VCN1;
+ break;
case AMDGPU_UCODE_ID_DMCU_ERAM:
*type = GFX_FW_TYPE_DMCU_ERAM;
break;
@@ -912,6 +1192,9 @@ static int psp_get_fw_type(struct amdgpu_firmware_info *ucode,
case AMDGPU_UCODE_ID_VCN1_RAM:
*type = GFX_FW_TYPE_VCN1_RAM;
break;
+ case AMDGPU_UCODE_ID_DMCUB:
+ *type = GFX_FW_TYPE_DMUB;
+ break;
case AMDGPU_UCODE_ID_MAXIMUM:
default:
return -EINVAL;
@@ -920,6 +1203,54 @@ static int psp_get_fw_type(struct amdgpu_firmware_info *ucode,
return 0;
}
+static void psp_print_fw_hdr(struct psp_context *psp,
+ struct amdgpu_firmware_info *ucode)
+{
+ struct amdgpu_device *adev = psp->adev;
+ struct common_firmware_header *hdr;
+
+ switch (ucode->ucode_id) {
+ case AMDGPU_UCODE_ID_SDMA0:
+ case AMDGPU_UCODE_ID_SDMA1:
+ case AMDGPU_UCODE_ID_SDMA2:
+ case AMDGPU_UCODE_ID_SDMA3:
+ case AMDGPU_UCODE_ID_SDMA4:
+ case AMDGPU_UCODE_ID_SDMA5:
+ case AMDGPU_UCODE_ID_SDMA6:
+ case AMDGPU_UCODE_ID_SDMA7:
+ hdr = (struct common_firmware_header *)
+ adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data;
+ amdgpu_ucode_print_sdma_hdr(hdr);
+ break;
+ case AMDGPU_UCODE_ID_CP_CE:
+ hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data;
+ amdgpu_ucode_print_gfx_hdr(hdr);
+ break;
+ case AMDGPU_UCODE_ID_CP_PFP:
+ hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data;
+ amdgpu_ucode_print_gfx_hdr(hdr);
+ break;
+ case AMDGPU_UCODE_ID_CP_ME:
+ hdr = (struct common_firmware_header *)adev->gfx.me_fw->data;
+ amdgpu_ucode_print_gfx_hdr(hdr);
+ break;
+ case AMDGPU_UCODE_ID_CP_MEC1:
+ hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data;
+ amdgpu_ucode_print_gfx_hdr(hdr);
+ break;
+ case AMDGPU_UCODE_ID_RLC_G:
+ hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data;
+ amdgpu_ucode_print_rlc_hdr(hdr);
+ break;
+ case AMDGPU_UCODE_ID_SMC:
+ hdr = (struct common_firmware_header *)adev->pm.fw->data;
+ amdgpu_ucode_print_smc_hdr(hdr);
+ break;
+ default:
+ break;
+ }
+}
+
static int psp_prep_load_ip_fw_cmd_buf(struct amdgpu_firmware_info *ucode,
struct psp_gfx_cmd_resp *cmd)
{
@@ -980,24 +1311,39 @@ out:
if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
(psp_smu_reload_quirk(psp) || psp->autoload_supported))
continue;
+
if (amdgpu_sriov_vf(adev) &&
(ucode->ucode_id == AMDGPU_UCODE_ID_SDMA0
|| ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1
- || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G))
+ || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2
+ || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3
+ || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA4
+ || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA5
+ || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA6
+ || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA7
+ || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G
+ || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL
+ || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM
+ || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM
+ || ucode->ucode_id == AMDGPU_UCODE_ID_SMC))
/*skip ucode loading in SRIOV VF */
continue;
+
if (psp->autoload_supported &&
(ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT ||
ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT))
/* skip mec JT when autoload is enabled */
continue;
+ psp_print_fw_hdr(psp, ucode);
+
ret = psp_execute_np_fw_load(psp, ucode);
if (ret)
return ret;
/* Start rlc autoload after psp recieved all the gfx firmware */
- if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM) {
+ if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ?
+ AMDGPU_UCODE_ID_CP_MEC2 : AMDGPU_UCODE_ID_RLC_G)) {
ret = psp_rlc_autoload(psp);
if (ret) {
DRM_ERROR("Failed to start rlc autoload\n");
@@ -1028,16 +1374,13 @@ static int psp_load_fw(struct amdgpu_device *adev)
if (!psp->cmd)
return -ENOMEM;
- /* this fw pri bo is not used under SRIOV */
- if (!amdgpu_sriov_vf(psp->adev)) {
- ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
- AMDGPU_GEM_DOMAIN_GTT,
- &psp->fw_pri_bo,
- &psp->fw_pri_mc_addr,
- &psp->fw_pri_buf);
- if (ret)
- goto failed;
- }
+ ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
+ AMDGPU_GEM_DOMAIN_GTT,
+ &psp->fw_pri_bo,
+ &psp->fw_pri_mc_addr,
+ &psp->fw_pri_buf);
+ if (ret)
+ goto failed;
ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
@@ -1071,6 +1414,39 @@ skip_memalloc:
if (ret)
goto failed;
+ ret = psp_asd_load(psp);
+ if (ret) {
+ DRM_ERROR("PSP load asd failed!\n");
+ return ret;
+ }
+
+ if (adev->gmc.xgmi.num_physical_nodes > 1) {
+ ret = psp_xgmi_initialize(psp);
+ /* Warning the XGMI seesion initialize failure
+ * Instead of stop driver initialization
+ */
+ if (ret)
+ dev_err(psp->adev->dev,
+ "XGMI: Failed to initialize XGMI session\n");
+ }
+
+ if (psp->adev->psp.ta_fw) {
+ ret = psp_ras_initialize(psp);
+ if (ret)
+ dev_err(psp->adev->dev,
+ "RAS: Failed to initialize RAS\n");
+
+ ret = psp_hdcp_initialize(psp);
+ if (ret)
+ dev_err(psp->adev->dev,
+ "HDCP: Failed to initialize HDCP\n");
+
+ ret = psp_dtm_initialize(psp);
+ if (ret)
+ dev_err(psp->adev->dev,
+ "DTM: Failed to initialize DTM\n");
+ }
+
return 0;
failed:
@@ -1115,23 +1491,29 @@ static int psp_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct psp_context *psp = &adev->psp;
+ void *tmr_buf;
+ void **pptr;
if (adev->gmc.xgmi.num_physical_nodes > 1 &&
psp->xgmi_context.initialized == 1)
psp_xgmi_terminate(psp);
- if (psp->adev->psp.ta_fw)
+ if (psp->adev->psp.ta_fw) {
psp_ras_terminate(psp);
+ psp_dtm_terminate(psp);
+ psp_hdcp_terminate(psp);
+ }
+
+ psp_asd_unload(psp);
psp_ring_destroy(psp, PSP_RING_TYPE__KM);
- amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf);
+ pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
+ amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr);
amdgpu_bo_free_kernel(&psp->fw_pri_bo,
&psp->fw_pri_mc_addr, &psp->fw_pri_buf);
amdgpu_bo_free_kernel(&psp->fence_buf_bo,
&psp->fence_buf_mc_addr, &psp->fence_buf);
- amdgpu_bo_free_kernel(&psp->asd_shared_bo, &psp->asd_shared_mc_addr,
- &psp->asd_shared_buf);
amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
(void **)&psp->cmd_buf_mem);
@@ -1162,6 +1544,16 @@ static int psp_suspend(void *handle)
DRM_ERROR("Failed to terminate ras ta\n");
return ret;
}
+ ret = psp_hdcp_terminate(psp);
+ if (ret) {
+ DRM_ERROR("Failed to terminate hdcp ta\n");
+ return ret;
+ }
+ ret = psp_dtm_terminate(psp);
+ if (ret) {
+ DRM_ERROR("Failed to terminate dtm ta\n");
+ return ret;
+ }
}
ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
@@ -1181,6 +1573,12 @@ static int psp_resume(void *handle)
DRM_INFO("PSP is resuming...\n");
+ ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME);
+ if (ret) {
+ DRM_ERROR("Failed to process memory training!\n");
+ return ret;
+ }
+
mutex_lock(&adev->firmware.mutex);
ret = psp_hw_start(psp);
@@ -1191,6 +1589,39 @@ static int psp_resume(void *handle)
if (ret)
goto failed;
+ ret = psp_asd_load(psp);
+ if (ret) {
+ DRM_ERROR("PSP load asd failed!\n");
+ goto failed;
+ }
+
+ if (adev->gmc.xgmi.num_physical_nodes > 1) {
+ ret = psp_xgmi_initialize(psp);
+ /* Warning the XGMI seesion initialize failure
+ * Instead of stop driver initialization
+ */
+ if (ret)
+ dev_err(psp->adev->dev,
+ "XGMI: Failed to initialize XGMI session\n");
+ }
+
+ if (psp->adev->psp.ta_fw) {
+ ret = psp_ras_initialize(psp);
+ if (ret)
+ dev_err(psp->adev->dev,
+ "RAS: Failed to initialize RAS\n");
+
+ ret = psp_hdcp_initialize(psp);
+ if (ret)
+ dev_err(psp->adev->dev,
+ "HDCP: Failed to initialize HDCP\n");
+
+ ret = psp_dtm_initialize(psp);
+ if (ret)
+ dev_err(psp->adev->dev,
+ "DTM: Failed to initialize DTM\n");
+ }
+
mutex_unlock(&adev->firmware.mutex);
return 0;
@@ -1220,9 +1651,6 @@ int psp_rlc_autoload_start(struct psp_context *psp)
int ret;
struct psp_gfx_cmd_resp *cmd;
- if (amdgpu_sriov_vf(psp->adev))
- return 0;
-
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
@@ -1248,6 +1676,56 @@ int psp_update_vcn_sram(struct amdgpu_device *adev, int inst_idx,
return psp_execute_np_fw_load(&adev->psp, &ucode);
}
+int psp_ring_cmd_submit(struct psp_context *psp,
+ uint64_t cmd_buf_mc_addr,
+ uint64_t fence_mc_addr,
+ int index)
+{
+ unsigned int psp_write_ptr_reg = 0;
+ struct psp_gfx_rb_frame *write_frame;
+ struct psp_ring *ring = &psp->km_ring;
+ struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem;
+ struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start +
+ ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1;
+ struct amdgpu_device *adev = psp->adev;
+ uint32_t ring_size_dw = ring->ring_size / 4;
+ uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
+
+ /* KM (GPCOM) prepare write pointer */
+ psp_write_ptr_reg = psp_ring_get_wptr(psp);
+
+ /* Update KM RB frame pointer to new frame */
+ /* write_frame ptr increments by size of rb_frame in bytes */
+ /* psp_write_ptr_reg increments by size of rb_frame in DWORDs */
+ if ((psp_write_ptr_reg % ring_size_dw) == 0)
+ write_frame = ring_buffer_start;
+ else
+ write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw);
+ /* Check invalid write_frame ptr address */
+ if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) {
+ DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n",
+ ring_buffer_start, ring_buffer_end, write_frame);
+ DRM_ERROR("write_frame is pointing to address out of bounds\n");
+ return -EINVAL;
+ }
+
+ /* Initialize KM RB frame */
+ memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame));
+
+ /* Update KM RB frame */
+ write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr);
+ write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr);
+ write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
+ write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
+ write_frame->fence_value = index;
+ amdgpu_asic_flush_hdp(adev, NULL);
+
+ /* Update the write Pointer in DWORDs */
+ psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
+ psp_ring_set_wptr(psp, psp_write_ptr_reg);
+ return 0;
+}
+
static bool psp_check_fw_loading_status(struct amdgpu_device *adev,
enum AMDGPU_UCODE_ID ucode_type)
{
@@ -1329,3 +1807,12 @@ const struct amdgpu_ip_block_version psp_v11_0_ip_block =
.rev = 0,
.funcs = &psp_ip_funcs,
};
+
+const struct amdgpu_ip_block_version psp_v12_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_PSP,
+ .major = 12,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &psp_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index e0fc2a790e53..611021514c52 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -32,11 +32,13 @@
#define PSP_FENCE_BUFFER_SIZE 0x1000
#define PSP_CMD_BUFFER_SIZE 0x1000
-#define PSP_ASD_SHARED_MEM_SIZE 0x4000
#define PSP_XGMI_SHARED_MEM_SIZE 0x4000
#define PSP_RAS_SHARED_MEM_SIZE 0x4000
#define PSP_1_MEG 0x100000
#define PSP_TMR_SIZE 0x400000
+#define PSP_HDCP_SHARED_MEM_SIZE 0x4000
+#define PSP_DTM_SHARED_MEM_SIZE 0x4000
+#define PSP_SHARED_MEM_SIZE 0x4000
struct psp_context;
struct psp_xgmi_node_info;
@@ -46,6 +48,8 @@ enum psp_bootloader_cmd {
PSP_BL__LOAD_SYSDRV = 0x10000,
PSP_BL__LOAD_SOSDRV = 0x20000,
PSP_BL__LOAD_KEY_DATABASE = 0x80000,
+ PSP_BL__DRAM_LONG_TRAIN = 0x100000,
+ PSP_BL__DRAM_SHORT_TRAIN = 0x200000,
};
enum psp_ring_type
@@ -89,10 +93,6 @@ struct psp_funcs
enum psp_ring_type ring_type);
int (*ring_destroy)(struct psp_context *psp,
enum psp_ring_type ring_type);
- int (*cmd_submit)(struct psp_context *psp,
- struct amdgpu_firmware_info *ucode,
- uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr,
- int index);
bool (*compare_sram_data)(struct psp_context *psp,
struct amdgpu_firmware_info *ucode,
enum AMDGPU_UCODE_ID ucode_type);
@@ -109,6 +109,11 @@ struct psp_funcs
struct ta_ras_trigger_error_input *info);
int (*ras_cure_posion)(struct psp_context *psp, uint64_t *mode_ptr);
int (*rlc_autoload_start)(struct psp_context *psp);
+ int (*mem_training_init)(struct psp_context *psp);
+ void (*mem_training_fini)(struct psp_context *psp);
+ int (*mem_training)(struct psp_context *psp, uint32_t ops);
+ uint32_t (*ring_get_wptr)(struct psp_context *psp);
+ void (*ring_set_wptr)(struct psp_context *psp, uint32_t value);
};
#define AMDGPU_XGMI_MAX_CONNECTED_NODES 64
@@ -124,6 +129,11 @@ struct psp_xgmi_topology_info {
struct psp_xgmi_node_info nodes[AMDGPU_XGMI_MAX_CONNECTED_NODES];
};
+struct psp_asd_context {
+ bool asd_initialized;
+ uint32_t session_id;
+};
+
struct psp_xgmi_context {
uint8_t initialized;
uint32_t session_id;
@@ -143,6 +153,66 @@ struct psp_ras_context {
struct amdgpu_ras *ras;
};
+struct psp_hdcp_context {
+ bool hdcp_initialized;
+ uint32_t session_id;
+ struct amdgpu_bo *hdcp_shared_bo;
+ uint64_t hdcp_shared_mc_addr;
+ void *hdcp_shared_buf;
+};
+
+struct psp_dtm_context {
+ bool dtm_initialized;
+ uint32_t session_id;
+ struct amdgpu_bo *dtm_shared_bo;
+ uint64_t dtm_shared_mc_addr;
+ void *dtm_shared_buf;
+};
+
+#define MEM_TRAIN_SYSTEM_SIGNATURE 0x54534942
+#define GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES 0x1000
+#define GDDR6_MEM_TRAINING_OFFSET 0x8000
+/*Define the VRAM size that will be encroached by BIST training.*/
+#define GDDR6_MEM_TRAINING_ENCROACHED_SIZE 0x2000000
+
+enum psp_memory_training_init_flag {
+ PSP_MEM_TRAIN_NOT_SUPPORT = 0x0,
+ PSP_MEM_TRAIN_SUPPORT = 0x1,
+ PSP_MEM_TRAIN_INIT_FAILED = 0x2,
+ PSP_MEM_TRAIN_RESERVE_SUCCESS = 0x4,
+ PSP_MEM_TRAIN_INIT_SUCCESS = 0x8,
+};
+
+enum psp_memory_training_ops {
+ PSP_MEM_TRAIN_SEND_LONG_MSG = 0x1,
+ PSP_MEM_TRAIN_SAVE = 0x2,
+ PSP_MEM_TRAIN_RESTORE = 0x4,
+ PSP_MEM_TRAIN_SEND_SHORT_MSG = 0x8,
+ PSP_MEM_TRAIN_COLD_BOOT = PSP_MEM_TRAIN_SEND_LONG_MSG,
+ PSP_MEM_TRAIN_RESUME = PSP_MEM_TRAIN_SEND_SHORT_MSG,
+};
+
+struct psp_memory_training_context {
+ /*training data size*/
+ u64 train_data_size;
+ /*
+ * sys_cache
+ * cpu virtual address
+ * system memory buffer that used to store the training data.
+ */
+ void *sys_cache;
+
+ /*vram offset of the p2c training data*/
+ u64 p2c_train_data_offset;
+
+ /*vram offset of the c2p training data*/
+ u64 c2p_train_data_offset;
+ struct amdgpu_bo *c2p_bo;
+
+ enum psp_memory_training_init_flag init;
+ u32 training_cnt;
+};
+
struct psp_context
{
struct amdgpu_device *adev;
@@ -172,17 +242,13 @@ struct psp_context
/* tmr buffer */
struct amdgpu_bo *tmr_bo;
uint64_t tmr_mc_addr;
- void *tmr_buf;
- /* asd firmware and buffer */
+ /* asd firmware */
const struct firmware *asd_fw;
uint32_t asd_fw_version;
uint32_t asd_feature_version;
uint32_t asd_ucode_size;
uint8_t *asd_start_addr;
- struct amdgpu_bo *asd_shared_bo;
- uint64_t asd_shared_mc_addr;
- void *asd_shared_buf;
/* fence buffer */
struct amdgpu_bo *fence_buf_bo;
@@ -208,9 +274,22 @@ struct psp_context
uint32_t ta_ras_ucode_version;
uint32_t ta_ras_ucode_size;
uint8_t *ta_ras_start_addr;
+
+ uint32_t ta_hdcp_ucode_version;
+ uint32_t ta_hdcp_ucode_size;
+ uint8_t *ta_hdcp_start_addr;
+
+ uint32_t ta_dtm_ucode_version;
+ uint32_t ta_dtm_ucode_size;
+ uint8_t *ta_dtm_start_addr;
+
+ struct psp_asd_context asd_context;
struct psp_xgmi_context xgmi_context;
struct psp_ras_context ras;
+ struct psp_hdcp_context hdcp_context;
+ struct psp_dtm_context dtm_context;
struct mutex mutex;
+ struct psp_memory_training_context mem_train_ctx;
};
struct amdgpu_psp_funcs {
@@ -223,8 +302,6 @@ struct amdgpu_psp_funcs {
#define psp_ring_create(psp, type) (psp)->funcs->ring_create((psp), (type))
#define psp_ring_stop(psp, type) (psp)->funcs->ring_stop((psp), (type))
#define psp_ring_destroy(psp, type) ((psp)->funcs->ring_destroy((psp), (type)))
-#define psp_cmd_submit(psp, ucode, cmd_mc, fence_mc, index) \
- (psp)->funcs->cmd_submit((psp), (ucode), (cmd_mc), (fence_mc), (index))
#define psp_compare_sram_data(psp, ucode, type) \
(psp)->funcs->compare_sram_data((psp), (ucode), (type))
#define psp_init_microcode(psp) \
@@ -253,6 +330,12 @@ struct amdgpu_psp_funcs {
(psp)->funcs->xgmi_set_topology_info((psp), (num_device), (topology)) : -EINVAL)
#define psp_rlc_autoload(psp) \
((psp)->funcs->rlc_autoload_start ? (psp)->funcs->rlc_autoload_start((psp)) : 0)
+#define psp_mem_training_init(psp) \
+ ((psp)->funcs->mem_training_init ? (psp)->funcs->mem_training_init((psp)) : 0)
+#define psp_mem_training_fini(psp) \
+ ((psp)->funcs->mem_training_fini ? (psp)->funcs->mem_training_fini((psp)) : 0)
+#define psp_mem_training(psp, ops) \
+ ((psp)->funcs->mem_training ? (psp)->funcs->mem_training((psp), (ops)) : 0)
#define amdgpu_psp_check_fw_loading_status(adev, i) (adev)->firmware.funcs->check_fw_loading_status((adev), (i))
@@ -263,6 +346,9 @@ struct amdgpu_psp_funcs {
((psp)->funcs->ras_cure_posion ? \
(psp)->funcs->ras_cure_posion(psp, (addr)) : -EINVAL)
+#define psp_ring_get_wptr(psp) (psp)->funcs->ring_get_wptr((psp))
+#define psp_ring_set_wptr(psp, value) (psp)->funcs->ring_set_wptr((psp), (value))
+
extern const struct amd_ip_funcs psp_ip_funcs;
extern const struct amdgpu_ip_block_version psp_v3_1_ip_block;
@@ -270,6 +356,7 @@ extern int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
uint32_t field_val, uint32_t mask, bool check_changed);
extern const struct amdgpu_ip_block_version psp_v10_0_ip_block;
+extern const struct amdgpu_ip_block_version psp_v12_0_ip_block;
int psp_gpu_reset(struct amdgpu_device *adev);
int psp_update_vcn_sram(struct amdgpu_device *adev, int inst_idx,
@@ -280,10 +367,16 @@ int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
int psp_ras_enable_features(struct psp_context *psp,
union ta_ras_cmd_input *info, bool enable);
+int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
+int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
int psp_rlc_autoload_start(struct psp_context *psp);
extern const struct amdgpu_ip_block_version psp_v11_0_ip_block;
int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg,
uint32_t value);
+int psp_ring_cmd_submit(struct psp_context *psp,
+ uint64_t cmd_buf_mc_addr,
+ uint64_t fence_mc_addr,
+ int index);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index fac7aa2c244f..cef94e2169fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -25,78 +25,13 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/uaccess.h>
+#include <linux/reboot.h>
+#include <linux/syscalls.h>
#include "amdgpu.h"
#include "amdgpu_ras.h"
#include "amdgpu_atomfirmware.h"
-
-struct ras_ih_data {
- /* interrupt bottom half */
- struct work_struct ih_work;
- int inuse;
- /* IP callback */
- ras_ih_cb cb;
- /* full of entries */
- unsigned char *ring;
- unsigned int ring_size;
- unsigned int element_size;
- unsigned int aligned_element_size;
- unsigned int rptr;
- unsigned int wptr;
-};
-
-struct ras_fs_data {
- char sysfs_name[32];
- char debugfs_name[32];
-};
-
-struct ras_err_data {
- unsigned long ue_count;
- unsigned long ce_count;
-};
-
-struct ras_err_handler_data {
- /* point to bad pages array */
- struct {
- unsigned long bp;
- struct amdgpu_bo *bo;
- } *bps;
- /* the count of entries */
- int count;
- /* the space can place new entries */
- int space_left;
- /* last reserved entry's index + 1 */
- int last_reserved;
-};
-
-struct ras_manager {
- struct ras_common_if head;
- /* reference count */
- int use;
- /* ras block link */
- struct list_head node;
- /* the device */
- struct amdgpu_device *adev;
- /* debugfs */
- struct dentry *ent;
- /* sysfs */
- struct device_attribute sysfs_attr;
- int attr_inuse;
-
- /* fs node name */
- struct ras_fs_data fs_data;
-
- /* IH data */
- struct ras_ih_data ih_data;
-
- struct ras_err_data err_data;
-};
-
-struct ras_badpage {
- unsigned int bp;
- unsigned int size;
- unsigned int flags;
-};
+#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
const char *ras_error_string[] = {
"none",
@@ -130,11 +65,19 @@ const char *ras_block_string[] = {
#define AMDGPU_RAS_FLAG_INIT_NEED_RESET 2
#define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
-static int amdgpu_ras_reserve_vram(struct amdgpu_device *adev,
- uint64_t offset, uint64_t size,
- struct amdgpu_bo **bo_ptr);
-static int amdgpu_ras_release_vram(struct amdgpu_device *adev,
- struct amdgpu_bo **bo_ptr);
+/* inject address is 52 bits */
+#define RAS_UMC_INJECT_ADDR_LIMIT (0x1ULL << 52)
+
+enum amdgpu_ras_retire_page_reservation {
+ AMDGPU_RAS_RETIRE_PAGE_RESERVED,
+ AMDGPU_RAS_RETIRE_PAGE_PENDING,
+ AMDGPU_RAS_RETIRE_PAGE_FAULT,
+};
+
+atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0);
+
+static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
+ uint64_t addr);
static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
@@ -196,6 +139,7 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
char err[9] = "ue";
int op = -1;
int block_id;
+ uint32_t sub_block;
u64 address, value;
if (*pos)
@@ -223,17 +167,23 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
return -EINVAL;
data->head.block = block_id;
- data->head.type = memcmp("ue", err, 2) == 0 ?
- AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE :
- AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE;
+ /* only ue and ce errors are supported */
+ if (!memcmp("ue", err, 2))
+ data->head.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ else if (!memcmp("ce", err, 2))
+ data->head.type = AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE;
+ else
+ return -EINVAL;
+
data->op = op;
if (op == 2) {
- if (sscanf(str, "%*s %*s %*s %llu %llu",
- &address, &value) != 2)
- if (sscanf(str, "%*s %*s %*s 0x%llx 0x%llx",
- &address, &value) != 2)
+ if (sscanf(str, "%*s %*s %*s %u %llu %llu",
+ &sub_block, &address, &value) != 3)
+ if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx",
+ &sub_block, &address, &value) != 3)
return -EINVAL;
+ data->head.sub_block_index = sub_block;
data->inject.address = address;
data->inject.value = value;
}
@@ -247,6 +197,7 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
return 0;
}
+
/**
* DOC: AMDGPU RAS debugfs control interface
*
@@ -266,32 +217,46 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
* As their names indicate, inject operation will write the
* value to the address.
*
- * Second member: struct ras_debug_if::op.
+ * The second member: struct ras_debug_if::op.
* It has three kinds of operations.
- * 0: disable RAS on the block. Take ::head as its data.
- * 1: enable RAS on the block. Take ::head as its data.
- * 2: inject errors on the block. Take ::inject as its data.
+ *
+ * - 0: disable RAS on the block. Take ::head as its data.
+ * - 1: enable RAS on the block. Take ::head as its data.
+ * - 2: inject errors on the block. Take ::inject as its data.
*
* How to use the interface?
- * programs:
- * copy the struct ras_debug_if in your codes and initialize it.
- * write the struct to the control node.
*
- * bash:
- * echo op block [error [address value]] > .../ras/ras_ctrl
- * op: disable, enable, inject
- * disable: only block is needed
- * enable: block and error are needed
- * inject: error, address, value are needed
- * block: umc, smda, gfx, .........
- * see ras_block_string[] for details
- * error: ue, ce
- * ue: multi_uncorrectable
- * ce: single_correctable
+ * Programs
+ *
+ * Copy the struct ras_debug_if in your codes and initialize it.
+ * Write the struct to the control node.
+ *
+ * Shells
+ *
+ * .. code-block:: bash
+ *
+ * echo op block [error [sub_block address value]] > .../ras/ras_ctrl
*
- * here are some examples for bash commands,
- * echo inject umc ue 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
- * echo inject umc ce 0 0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
+ * Parameters:
+ *
+ * op: disable, enable, inject
+ * disable: only block is needed
+ * enable: block and error are needed
+ * inject: error, address, value are needed
+ * block: umc, sdma, gfx, .........
+ * see ras_block_string[] for details
+ * error: ue, ce
+ * ue: multi_uncorrectable
+ * ce: single_correctable
+ * sub_block:
+ * sub block index, pass 0 if there is no sub block
+ *
+ * here are some examples for bash commands:
+ *
+ * .. code-block:: bash
+ *
+ * echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
+ * echo inject umc ce 0 0 0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
* echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl
*
* How to check the result?
@@ -302,15 +267,17 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
* For inject, please check corresponding err count at
* /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
*
- * NOTE: operation is only allowed on blocks which are supported.
- * Please check ras mask at /sys/module/amdgpu/parameters/ras_mask
+ * .. note::
+ * Operations are only allowed on blocks which are supported.
+ * Please check ras mask at /sys/module/amdgpu/parameters/ras_mask
+ * to see which blocks support RAS on a particular asic.
+ *
*/
static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *buf,
size_t size, loff_t *pos)
{
struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
struct ras_debug_if data;
- struct amdgpu_bo *bo;
int ret = 0;
ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data);
@@ -328,22 +295,27 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *
ret = amdgpu_ras_feature_enable(adev, &data.head, 1);
break;
case 2:
- ret = amdgpu_ras_reserve_vram(adev,
- data.inject.address, PAGE_SIZE, &bo);
- if (ret) {
- /* address was offset, now it is absolute.*/
- data.inject.address += adev->gmc.vram_start;
- if (data.inject.address > adev->gmc.vram_end)
- break;
- } else
- data.inject.address = amdgpu_bo_gpu_offset(bo);
+ if ((data.inject.address >= adev->gmc.mc_vram_size) ||
+ (data.inject.address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
+ ret = -EINVAL;
+ break;
+ }
+
+ /* umc ce/ue error injection for a bad page is not allowed */
+ if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) &&
+ amdgpu_ras_check_bad_page(adev, data.inject.address)) {
+ DRM_WARN("RAS WARN: 0x%llx has been marked as bad before error injection!\n",
+ data.inject.address);
+ break;
+ }
+
+ /* data.inject.address is offset instead of absolute gpu address */
ret = amdgpu_ras_error_inject(adev, &data.inject);
- amdgpu_ras_release_vram(adev, &bo);
break;
default:
ret = -EINVAL;
break;
- };
+ }
if (ret)
return -EINVAL;
@@ -351,6 +323,33 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *
return size;
}
+/**
+ * DOC: AMDGPU RAS debugfs EEPROM table reset interface
+ *
+ * Some boards contain an EEPROM which is used to persistently store a list of
+ * bad pages which experiences ECC errors in vram. This interface provides
+ * a way to reset the EEPROM, e.g., after testing error injection.
+ *
+ * Usage:
+ *
+ * .. code-block:: bash
+ *
+ * echo 1 > ../ras/ras_eeprom_reset
+ *
+ * will reset EEPROM table to 0 entries.
+ *
+ */
+static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
+ int ret;
+
+ ret = amdgpu_ras_eeprom_reset_table(&adev->psp.ras.ras->eeprom_control);
+
+ return ret == 1 ? size : -EIO;
+}
+
static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = {
.owner = THIS_MODULE,
.read = NULL,
@@ -358,6 +357,34 @@ static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = {
.llseek = default_llseek
};
+static const struct file_operations amdgpu_ras_debugfs_eeprom_ops = {
+ .owner = THIS_MODULE,
+ .read = NULL,
+ .write = amdgpu_ras_debugfs_eeprom_write,
+ .llseek = default_llseek
+};
+
+/**
+ * DOC: AMDGPU RAS sysfs Error Count Interface
+ *
+ * It allows the user to read the error count for each IP block on the gpu through
+ * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
+ *
+ * It outputs the multiple lines which report the uncorrected (ue) and corrected
+ * (ce) error counts.
+ *
+ * The format of one line is below,
+ *
+ * [ce|ue]: count
+ *
+ * Example:
+ *
+ * .. code-block:: bash
+ *
+ * ue: 0
+ * ce: 1
+ *
+ */
static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -415,7 +442,7 @@ static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev,
}
/* return an obj equal to head, or the first when head is NULL */
-static struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
+struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
struct ras_common_if *head)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
@@ -536,15 +563,17 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head)))
return 0;
- ret = psp_ras_enable_features(&adev->psp, &info, enable);
- if (ret) {
- DRM_ERROR("RAS ERROR: %s %s feature failed ret %d\n",
- enable ? "enable":"disable",
- ras_block_str(head->block),
- ret);
- if (ret == TA_RAS_STATUS__RESET_NEEDED)
- return -EAGAIN;
- return -EINVAL;
+ if (!amdgpu_ras_intr_triggered()) {
+ ret = psp_ras_enable_features(&adev->psp, &info, enable);
+ if (ret) {
+ DRM_ERROR("RAS ERROR: %s %s feature failed ret %d\n",
+ enable ? "enable":"disable",
+ ras_block_str(head->block),
+ ret);
+ if (ret == TA_RAS_STATUS__RESET_NEEDED)
+ return -EAGAIN;
+ return -EINVAL;
+ }
}
/* setup the obj */
@@ -656,17 +685,77 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev,
struct ras_query_if *info)
{
struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
+ struct ras_err_data err_data = {0, 0, 0, NULL};
+ int i;
if (!obj)
return -EINVAL;
- /* TODO might read the register to read the count */
+
+ switch (info->head.block) {
+ case AMDGPU_RAS_BLOCK__UMC:
+ if (adev->umc.funcs->query_ras_error_count)
+ adev->umc.funcs->query_ras_error_count(adev, &err_data);
+ /* umc query_ras_error_address is also responsible for clearing
+ * error status
+ */
+ if (adev->umc.funcs->query_ras_error_address)
+ adev->umc.funcs->query_ras_error_address(adev, &err_data);
+ break;
+ case AMDGPU_RAS_BLOCK__SDMA:
+ if (adev->sdma.funcs->query_ras_error_count) {
+ for (i = 0; i < adev->sdma.num_instances; i++)
+ adev->sdma.funcs->query_ras_error_count(adev, i,
+ &err_data);
+ }
+ break;
+ case AMDGPU_RAS_BLOCK__GFX:
+ if (adev->gfx.funcs->query_ras_error_count)
+ adev->gfx.funcs->query_ras_error_count(adev, &err_data);
+ break;
+ case AMDGPU_RAS_BLOCK__MMHUB:
+ if (adev->mmhub.funcs->query_ras_error_count)
+ adev->mmhub.funcs->query_ras_error_count(adev, &err_data);
+ break;
+ case AMDGPU_RAS_BLOCK__PCIE_BIF:
+ if (adev->nbio.funcs->query_ras_error_count)
+ adev->nbio.funcs->query_ras_error_count(adev, &err_data);
+ break;
+ default:
+ break;
+ }
+
+ obj->err_data.ue_count += err_data.ue_count;
+ obj->err_data.ce_count += err_data.ce_count;
info->ue_count = obj->err_data.ue_count;
info->ce_count = obj->err_data.ce_count;
+ if (err_data.ce_count) {
+ dev_info(adev->dev, "%ld correctable errors detected in %s block\n",
+ obj->err_data.ce_count, ras_block_str(info->head.block));
+ }
+ if (err_data.ue_count) {
+ dev_info(adev->dev, "%ld uncorrectable errors detected in %s block\n",
+ obj->err_data.ue_count, ras_block_str(info->head.block));
+ }
+
return 0;
}
+uint64_t get_xgmi_relative_phy_addr(struct amdgpu_device *adev, uint64_t addr)
+{
+ uint32_t df_inst_id;
+
+ if ((!adev->df.funcs) ||
+ (!adev->df.funcs->get_df_inst_id) ||
+ (!adev->df.funcs->get_dram_base_addr))
+ return addr;
+
+ df_inst_id = adev->df.funcs->get_df_inst_id(adev);
+
+ return addr + adev->df.funcs->get_dram_base_addr(adev, df_inst_id);
+}
+
/* wrapper of psp_ras_trigger_error */
int amdgpu_ras_error_inject(struct amdgpu_device *adev,
struct ras_inject_if *info)
@@ -684,13 +773,31 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
if (!obj)
return -EINVAL;
- if (block_info.block_id != TA_RAS_BLOCK__UMC) {
+ /* Calculate XGMI relative offset */
+ if (adev->gmc.xgmi.num_physical_nodes > 1) {
+ block_info.address = get_xgmi_relative_phy_addr(adev,
+ block_info.address);
+ }
+
+ switch (info->head.block) {
+ case AMDGPU_RAS_BLOCK__GFX:
+ if (adev->gfx.funcs->ras_error_inject)
+ ret = adev->gfx.funcs->ras_error_inject(adev, info);
+ else
+ ret = -EINVAL;
+ break;
+ case AMDGPU_RAS_BLOCK__UMC:
+ case AMDGPU_RAS_BLOCK__MMHUB:
+ case AMDGPU_RAS_BLOCK__XGMI_WAFL:
+ case AMDGPU_RAS_BLOCK__PCIE_BIF:
+ ret = psp_ras_trigger_error(&adev->psp, &block_info);
+ break;
+ default:
DRM_INFO("%s error injection is not supported yet\n",
ras_block_str(info->head.block));
- return -EINVAL;
+ ret = -EINVAL;
}
- ret = psp_ras_trigger_error(&adev->psp, &block_info);
if (ret)
DRM_ERROR("RAS ERROR: inject %s error failed ret %d\n",
ras_block_str(info->head.block),
@@ -707,7 +814,7 @@ int amdgpu_ras_error_cure(struct amdgpu_device *adev,
}
/* get the total error counts on all IPs */
-int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
+unsigned long amdgpu_ras_query_error_count(struct amdgpu_device *adev,
bool is_ce)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
@@ -715,7 +822,7 @@ int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
struct ras_err_data data = {0, 0};
if (!con)
- return -EINVAL;
+ return 0;
list_for_each_entry(obj, &con->head, node) {
struct ras_query_if info = {
@@ -723,7 +830,7 @@ int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
};
if (amdgpu_ras_error_query(adev, &info))
- return -EINVAL;
+ return 0;
data.ce_count += info.ce_count;
data.ue_count += info.ue_count;
@@ -742,18 +849,18 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
{
switch (flags) {
- case 0:
+ case AMDGPU_RAS_RETIRE_PAGE_RESERVED:
return "R";
- case 1:
+ case AMDGPU_RAS_RETIRE_PAGE_PENDING:
return "P";
- case 2:
+ case AMDGPU_RAS_RETIRE_PAGE_FAULT:
default:
return "F";
};
}
-/*
- * DOC: ras sysfs gpu_vram_bad_pages interface
+/**
+ * DOC: AMDGPU RAS sysfs gpu_vram_bad_pages Interface
*
* It allows user to read the bad pages of vram on the gpu through
* /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages
@@ -765,14 +872,21 @@ static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
*
* gpu pfn and gpu page size are printed in hex format.
* flags can be one of below character,
+ *
* R: reserved, this gpu page is reserved and not able to use.
+ *
* P: pending for reserve, this gpu page is marked as bad, will be reserved
- * in next window of page_reserve.
+ * in next window of page_reserve.
+ *
* F: unable to reserve. this gpu page can't be reserved due to some reasons.
*
- * examples:
- * 0x00000001 : 0x00001000 : R
- * 0x00000002 : 0x00001000 : P
+ * Examples:
+ *
+ * .. code-block:: bash
+ *
+ * 0x00000001 : 0x00001000 : R
+ * 0x00000002 : 0x00001000 : P
+ *
*/
static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f,
@@ -812,32 +926,8 @@ static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev,
{
struct amdgpu_ras *con =
container_of(attr, struct amdgpu_ras, features_attr);
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- struct ras_common_if head;
- int ras_block_count = AMDGPU_RAS_BLOCK_COUNT;
- int i;
- ssize_t s;
- struct ras_manager *obj;
-
- s = scnprintf(buf, PAGE_SIZE, "feature mask: 0x%x\n", con->features);
- for (i = 0; i < ras_block_count; i++) {
- head.block = i;
-
- if (amdgpu_ras_is_feature_enabled(adev, &head)) {
- obj = amdgpu_ras_find_obj(adev, &head);
- s += scnprintf(&buf[s], PAGE_SIZE - s,
- "%s: %s\n",
- ras_block_str(i),
- ras_err_str(obj->head.type));
- } else
- s += scnprintf(&buf[s], PAGE_SIZE - s,
- "%s: disabled\n",
- ras_block_str(i));
- }
-
- return s;
+ return scnprintf(buf, PAGE_SIZE, "feature mask: 0x%x\n", con->features);
}
static int amdgpu_ras_sysfs_create_feature_node(struct amdgpu_device *adev)
@@ -970,6 +1060,24 @@ static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev)
}
/* sysfs end */
+/**
+ * DOC: AMDGPU RAS Reboot Behavior for Unrecoverable Errors
+ *
+ * Normally when there is an uncorrectable error, the driver will reset
+ * the GPU to recover. However, in the event of an unrecoverable error,
+ * the driver provides an interface to reboot the system automatically
+ * in that event.
+ *
+ * The following file in debugfs provides that interface:
+ * /sys/kernel/debug/dri/[0/1/2...]/ras/auto_reboot
+ *
+ * Usage:
+ *
+ * .. code-block:: bash
+ *
+ * echo true > .../ras/auto_reboot
+ *
+ */
/* debugfs begin */
static void amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
{
@@ -977,8 +1085,21 @@ static void amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
struct drm_minor *minor = adev->ddev->primary;
con->dir = debugfs_create_dir("ras", minor->debugfs_root);
- con->ent = debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, con->dir,
- adev, &amdgpu_ras_debugfs_ctrl_ops);
+ debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, con->dir,
+ adev, &amdgpu_ras_debugfs_ctrl_ops);
+ debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, con->dir,
+ adev, &amdgpu_ras_debugfs_eeprom_ops);
+
+ /*
+ * After one uncorrectable error happens, usually GPU recovery will
+ * be scheduled. But due to the known problem in GPU recovery failing
+ * to bring GPU back, below interface provides one direct way to
+ * user to reboot system automatically in such case within
+ * ERREVENT_ATHUB_INTERRUPT generated. Normal GPU recovery routine
+ * will never be called.
+ */
+ debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, con->dir,
+ &con->reboot);
}
void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
@@ -1023,10 +1144,8 @@ static void amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev)
amdgpu_ras_debugfs_remove(adev, &obj->head);
}
- debugfs_remove(con->ent);
- debugfs_remove(con->dir);
+ debugfs_remove_recursive(con->dir);
con->dir = NULL;
- con->ent = NULL;
}
/* debugfs end */
@@ -1054,6 +1173,7 @@ static void amdgpu_ras_interrupt_handler(struct ras_manager *obj)
struct ras_ih_data *data = &obj->ih_data;
struct amdgpu_iv_entry entry;
int ret;
+ struct ras_err_data err_data = {0, 0, 0, NULL};
while (data->rptr != data->wptr) {
rmb();
@@ -1068,19 +1188,19 @@ static void amdgpu_ras_interrupt_handler(struct ras_manager *obj)
* from the callback to udpate the error type/count, etc
*/
if (data->cb) {
- ret = data->cb(obj->adev, &entry);
+ ret = data->cb(obj->adev, &err_data, &entry);
/* ue will trigger an interrupt, and in that case
* we need do a reset to recovery the whole system.
* But leave IP do that recovery, here we just dispatch
* the error.
*/
- if (ret == AMDGPU_RAS_UE) {
- obj->err_data.ue_count++;
+ if (ret == AMDGPU_RAS_SUCCESS) {
+ /* these counts could be left as 0 if
+ * some blocks do not count error number
+ */
+ obj->err_data.ue_count += err_data.ue_count;
+ obj->err_data.ce_count += err_data.ce_count;
}
- /* Might need get ce count by register, but not all IP
- * saves ce count, some IP just use one bit or two bits
- * to indicate ce happened.
- */
}
}
}
@@ -1219,6 +1339,7 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
data = con->eh_data;
if (!data || data->count == 0) {
*bps = NULL;
+ ret = -EINVAL;
goto out;
}
@@ -1230,15 +1351,15 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
for (; i < data->count; i++) {
(*bps)[i] = (struct ras_badpage){
- .bp = data->bps[i].bp,
+ .bp = data->bps[i].retired_page,
.size = AMDGPU_GPU_PAGE_SIZE,
- .flags = 0,
+ .flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
};
if (data->last_reserved <= i)
- (*bps)[i].flags = 1;
- else if (data->bps[i].bo == NULL)
- (*bps)[i].flags = 2;
+ (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
+ else if (data->bps_bo[i] == NULL)
+ (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
}
*count = data->count;
@@ -1252,109 +1373,51 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
struct amdgpu_ras *ras =
container_of(work, struct amdgpu_ras, recovery_work);
- amdgpu_device_gpu_recover(ras->adev, 0);
+ if (amdgpu_device_should_recover_gpu(ras->adev))
+ amdgpu_device_gpu_recover(ras->adev, 0);
atomic_set(&ras->in_recovery, 0);
}
-static int amdgpu_ras_release_vram(struct amdgpu_device *adev,
- struct amdgpu_bo **bo_ptr)
-{
- /* no need to free it actually. */
- amdgpu_bo_free_kernel(bo_ptr, NULL, NULL);
- return 0;
-}
-
-/* reserve vram with size@offset */
-static int amdgpu_ras_reserve_vram(struct amdgpu_device *adev,
- uint64_t offset, uint64_t size,
- struct amdgpu_bo **bo_ptr)
-{
- struct ttm_operation_ctx ctx = { false, false };
- struct amdgpu_bo_param bp;
- int r = 0;
- int i;
- struct amdgpu_bo *bo;
-
- if (bo_ptr)
- *bo_ptr = NULL;
- memset(&bp, 0, sizeof(bp));
- bp.size = size;
- bp.byte_align = PAGE_SIZE;
- bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
- bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
- AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
- bp.type = ttm_bo_type_kernel;
- bp.resv = NULL;
-
- r = amdgpu_bo_create(adev, &bp, &bo);
- if (r)
- return -EINVAL;
-
- r = amdgpu_bo_reserve(bo, false);
- if (r)
- goto error_reserve;
-
- offset = ALIGN(offset, PAGE_SIZE);
- for (i = 0; i < bo->placement.num_placement; ++i) {
- bo->placements[i].fpfn = offset >> PAGE_SHIFT;
- bo->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
- }
-
- ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem);
- r = ttm_bo_mem_space(&bo->tbo, &bo->placement, &bo->tbo.mem, &ctx);
- if (r)
- goto error_pin;
-
- r = amdgpu_bo_pin_restricted(bo,
- AMDGPU_GEM_DOMAIN_VRAM,
- offset,
- offset + size);
- if (r)
- goto error_pin;
-
- if (bo_ptr)
- *bo_ptr = bo;
-
- amdgpu_bo_unreserve(bo);
- return r;
-
-error_pin:
- amdgpu_bo_unreserve(bo);
-error_reserve:
- amdgpu_bo_unref(&bo);
- return r;
-}
-
/* alloc/realloc bps array */
static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
struct ras_err_handler_data *data, int pages)
{
unsigned int old_space = data->count + data->space_left;
unsigned int new_space = old_space + pages;
- unsigned int align_space = ALIGN(new_space, 1024);
- void *tmp = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
-
- if (!tmp)
+ unsigned int align_space = ALIGN(new_space, 512);
+ void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
+ struct amdgpu_bo **bps_bo =
+ kmalloc(align_space * sizeof(*data->bps_bo), GFP_KERNEL);
+
+ if (!bps || !bps_bo) {
+ kfree(bps);
+ kfree(bps_bo);
return -ENOMEM;
+ }
if (data->bps) {
- memcpy(tmp, data->bps,
+ memcpy(bps, data->bps,
data->count * sizeof(*data->bps));
kfree(data->bps);
}
+ if (data->bps_bo) {
+ memcpy(bps_bo, data->bps_bo,
+ data->count * sizeof(*data->bps_bo));
+ kfree(data->bps_bo);
+ }
- data->bps = tmp;
+ data->bps = bps;
+ data->bps_bo = bps_bo;
data->space_left += align_space - old_space;
return 0;
}
/* it deal with vram only. */
int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
- unsigned long *bps, int pages)
+ struct eeprom_table_record *bps, int pages)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_err_handler_data *data;
- int i = pages;
int ret = 0;
if (!con || !con->eh_data || !bps || pages <= 0)
@@ -1371,24 +1434,120 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
goto out;
}
- while (i--)
- data->bps[data->count++].bp = bps[i];
-
+ memcpy(&data->bps[data->count], bps, pages * sizeof(*data->bps));
+ data->count += pages;
data->space_left -= pages;
+
out:
mutex_unlock(&con->recovery_lock);
return ret;
}
+/*
+ * write error record array to eeprom, the function should be
+ * protected by recovery_lock
+ */
+static int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_err_handler_data *data;
+ struct amdgpu_ras_eeprom_control *control;
+ int save_count;
+
+ if (!con || !con->eh_data)
+ return 0;
+
+ control = &con->eeprom_control;
+ data = con->eh_data;
+ save_count = data->count - control->num_recs;
+ /* only new entries are saved */
+ if (save_count > 0)
+ if (amdgpu_ras_eeprom_process_recods(control,
+ &data->bps[control->num_recs],
+ true,
+ save_count)) {
+ DRM_ERROR("Failed to save EEPROM table data!");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/*
+ * read error record array in eeprom and reserve enough space for
+ * storing new bad pages
+ */
+static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras_eeprom_control *control =
+ &adev->psp.ras.ras->eeprom_control;
+ struct eeprom_table_record *bps = NULL;
+ int ret = 0;
+
+ /* no bad page record, skip eeprom access */
+ if (!control->num_recs)
+ return ret;
+
+ bps = kcalloc(control->num_recs, sizeof(*bps), GFP_KERNEL);
+ if (!bps)
+ return -ENOMEM;
+
+ if (amdgpu_ras_eeprom_process_recods(control, bps, false,
+ control->num_recs)) {
+ DRM_ERROR("Failed to load EEPROM table records!");
+ ret = -EIO;
+ goto out;
+ }
+
+ ret = amdgpu_ras_add_bad_pages(adev, bps, control->num_recs);
+
+out:
+ kfree(bps);
+ return ret;
+}
+
+/*
+ * check if an address belongs to bad page
+ *
+ * Note: this check is only for umc block
+ */
+static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
+ uint64_t addr)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_err_handler_data *data;
+ int i;
+ bool ret = false;
+
+ if (!con || !con->eh_data)
+ return ret;
+
+ mutex_lock(&con->recovery_lock);
+ data = con->eh_data;
+ if (!data)
+ goto out;
+
+ addr >>= AMDGPU_GPU_PAGE_SHIFT;
+ for (i = 0; i < data->count; i++)
+ if (addr == data->bps[i].retired_page) {
+ ret = true;
+ goto out;
+ }
+
+out:
+ mutex_unlock(&con->recovery_lock);
+ return ret;
+}
+
/* called in gpu recovery/init */
int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_err_handler_data *data;
uint64_t bp;
- struct amdgpu_bo *bo;
- int i;
+ struct amdgpu_bo *bo = NULL;
+ int i, ret = 0;
if (!con || !con->eh_data)
return 0;
@@ -1399,18 +1558,29 @@ int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev)
goto out;
/* reserve vram at driver post stage. */
for (i = data->last_reserved; i < data->count; i++) {
- bp = data->bps[i].bp;
+ bp = data->bps[i].retired_page;
- if (amdgpu_ras_reserve_vram(adev, bp << PAGE_SHIFT,
- PAGE_SIZE, &bo))
- DRM_ERROR("RAS ERROR: reserve vram %llx fail\n", bp);
+ /* There are two cases of reserve error should be ignored:
+ * 1) a ras bad page has been allocated (used by someone);
+ * 2) a ras bad page has been reserved (duplicate error injection
+ * for one page);
+ */
+ if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT,
+ AMDGPU_GPU_PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &bo, NULL))
+ DRM_WARN("RAS WARN: reserve vram for retired page %llx fail\n", bp);
- data->bps[i].bo = bo;
+ data->bps_bo[i] = bo;
data->last_reserved = i + 1;
+ bo = NULL;
}
+
+ /* continue to save bad pages to eeprom even reesrve_vram fails */
+ ret = amdgpu_ras_save_bad_pages(adev);
out:
mutex_unlock(&con->recovery_lock);
- return 0;
+ return ret;
}
/* called when driver unload */
@@ -1430,11 +1600,11 @@ static int amdgpu_ras_release_bad_pages(struct amdgpu_device *adev)
goto out;
for (i = data->last_reserved - 1; i >= 0; i--) {
- bo = data->bps[i].bo;
+ bo = data->bps_bo[i];
- amdgpu_ras_release_vram(adev, &bo);
+ amdgpu_bo_free_kernel(&bo, NULL, NULL);
- data->bps[i].bo = bo;
+ data->bps_bo[i] = bo;
data->last_reserved = i;
}
out:
@@ -1442,41 +1612,54 @@ out:
return 0;
}
-static int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev)
-{
- /* TODO
- * write the array to eeprom when SMU disabled.
- */
- return 0;
-}
-
-static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
-{
- /* TODO
- * read the array to eeprom when SMU disabled.
- */
- return 0;
-}
-
-static int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
+int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
- struct ras_err_handler_data **data = &con->eh_data;
+ struct ras_err_handler_data **data;
+ int ret;
- *data = kmalloc(sizeof(**data),
- GFP_KERNEL|__GFP_ZERO);
- if (!*data)
- return -ENOMEM;
+ if (con)
+ data = &con->eh_data;
+ else
+ return 0;
+
+ *data = kmalloc(sizeof(**data), GFP_KERNEL | __GFP_ZERO);
+ if (!*data) {
+ ret = -ENOMEM;
+ goto out;
+ }
mutex_init(&con->recovery_lock);
INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
atomic_set(&con->in_recovery, 0);
con->adev = adev;
- amdgpu_ras_load_bad_pages(adev);
- amdgpu_ras_reserve_bad_pages(adev);
+ ret = amdgpu_ras_eeprom_init(&con->eeprom_control);
+ if (ret)
+ goto free;
+
+ if (con->eeprom_control.num_recs) {
+ ret = amdgpu_ras_load_bad_pages(adev);
+ if (ret)
+ goto free;
+ ret = amdgpu_ras_reserve_bad_pages(adev);
+ if (ret)
+ goto release;
+ }
return 0;
+
+release:
+ amdgpu_ras_release_bad_pages(adev);
+free:
+ kfree((*data)->bps);
+ kfree((*data)->bps_bo);
+ kfree(*data);
+ con->eh_data = NULL;
+out:
+ DRM_WARN("Failed to initialize ras recovery!\n");
+
+ return ret;
}
static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
@@ -1484,13 +1667,17 @@ static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_err_handler_data *data = con->eh_data;
+ /* recovery_init failed to init it, fini is useless */
+ if (!data)
+ return 0;
+
cancel_work_sync(&con->recovery_work);
- amdgpu_ras_save_bad_pages(adev);
amdgpu_ras_release_bad_pages(adev);
mutex_lock(&con->recovery_lock);
con->eh_data = NULL;
kfree(data->bps);
+ kfree(data->bps_bo);
kfree(data);
mutex_unlock(&con->recovery_lock);
@@ -1527,7 +1714,8 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev,
*supported = 0;
if (amdgpu_sriov_vf(adev) ||
- adev->asic_type != CHIP_VEGA20)
+ (adev->asic_type != CHIP_VEGA20 &&
+ adev->asic_type != CHIP_ARCTURUS))
return;
if (adev->is_atom_fw &&
@@ -1542,6 +1730,7 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev,
int amdgpu_ras_init(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ int r;
if (con)
return 0;
@@ -1569,8 +1758,17 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
/* Might need get this flag from vbios. */
con->flags = RAS_DEFAULT_FLAGS;
- if (amdgpu_ras_recovery_init(adev))
- goto recovery_out;
+ if (adev->nbio.funcs->init_ras_controller_interrupt) {
+ r = adev->nbio.funcs->init_ras_controller_interrupt(adev);
+ if (r)
+ return r;
+ }
+
+ if (adev->nbio.funcs->init_ras_err_event_athub_interrupt) {
+ r = adev->nbio.funcs->init_ras_err_event_athub_interrupt(adev);
+ if (r)
+ return r;
+ }
amdgpu_ras_mask &= AMDGPU_RAS_BLOCK_MASK;
@@ -1582,14 +1780,84 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
con->hw_supported, con->supported);
return 0;
fs_out:
- amdgpu_ras_recovery_fini(adev);
-recovery_out:
amdgpu_ras_set_context(adev, NULL);
kfree(con);
return -EINVAL;
}
+/* helper function to handle common stuff in ip late init phase */
+int amdgpu_ras_late_init(struct amdgpu_device *adev,
+ struct ras_common_if *ras_block,
+ struct ras_fs_if *fs_info,
+ struct ras_ih_if *ih_info)
+{
+ int r;
+
+ /* disable RAS feature per IP block if it is not supported */
+ if (!amdgpu_ras_is_supported(adev, ras_block->block)) {
+ amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
+ return 0;
+ }
+
+ r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1);
+ if (r) {
+ if (r == -EAGAIN) {
+ /* request gpu reset. will run again */
+ amdgpu_ras_request_reset_on_boot(adev,
+ ras_block->block);
+ return 0;
+ } else if (adev->in_suspend || adev->in_gpu_reset) {
+ /* in resume phase, if fail to enable ras,
+ * clean up all ras fs nodes, and disable ras */
+ goto cleanup;
+ } else
+ return r;
+ }
+
+ /* in resume phase, no need to create ras fs node */
+ if (adev->in_suspend || adev->in_gpu_reset)
+ return 0;
+
+ if (ih_info->cb) {
+ r = amdgpu_ras_interrupt_add_handler(adev, ih_info);
+ if (r)
+ goto interrupt;
+ }
+
+ amdgpu_ras_debugfs_create(adev, fs_info);
+
+ r = amdgpu_ras_sysfs_create(adev, fs_info);
+ if (r)
+ goto sysfs;
+
+ return 0;
+cleanup:
+ amdgpu_ras_sysfs_remove(adev, ras_block);
+sysfs:
+ amdgpu_ras_debugfs_remove(adev, ras_block);
+ if (ih_info->cb)
+ amdgpu_ras_interrupt_remove_handler(adev, ih_info);
+interrupt:
+ amdgpu_ras_feature_enable(adev, ras_block, 0);
+ return r;
+}
+
+/* helper function to remove ras fs node and interrupt handler */
+void amdgpu_ras_late_fini(struct amdgpu_device *adev,
+ struct ras_common_if *ras_block,
+ struct ras_ih_if *ih_info)
+{
+ if (!ras_block || !ih_info)
+ return;
+
+ amdgpu_ras_sysfs_remove(adev, ras_block);
+ amdgpu_ras_debugfs_remove(adev, ras_block);
+ if (ih_info->cb)
+ amdgpu_ras_interrupt_remove_handler(adev, ih_info);
+ amdgpu_ras_feature_enable(adev, ras_block, 0);
+}
+
/* do some init work after IP late init as dependence.
* and it runs in resume/gpu reset/booting up cases.
*/
@@ -1632,7 +1900,7 @@ void amdgpu_ras_resume(struct amdgpu_device *adev)
* See feature_enable_on_boot
*/
amdgpu_ras_disable_all_features(adev, 1);
- amdgpu_ras_reset_gpu(adev, 0);
+ amdgpu_ras_reset_gpu(adev);
}
}
@@ -1683,3 +1951,18 @@ int amdgpu_ras_fini(struct amdgpu_device *adev)
return 0;
}
+
+void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
+{
+ uint32_t hw_supported, supported;
+
+ amdgpu_ras_check_supported(adev, &hw_supported, &supported);
+ if (!hw_supported)
+ return;
+
+ if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
+ DRM_WARN("RAS event of type ERREVENT_ATHUB_INTERRUPT detected!\n");
+
+ amdgpu_ras_reset_gpu(adev);
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index b2841195bd3b..a5fe29a9373e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -29,6 +29,7 @@
#include "amdgpu.h"
#include "amdgpu_psp.h"
#include "ta_ras_if.h"
+#include "amdgpu_ras_eeprom.h"
enum amdgpu_ras_block {
AMDGPU_RAS_BLOCK__UMC = 0,
@@ -52,6 +53,236 @@ enum amdgpu_ras_block {
#define AMDGPU_RAS_BLOCK_COUNT AMDGPU_RAS_BLOCK__LAST
#define AMDGPU_RAS_BLOCK_MASK ((1ULL << AMDGPU_RAS_BLOCK_COUNT) - 1)
+enum amdgpu_ras_gfx_subblock {
+ /* CPC */
+ AMDGPU_RAS_BLOCK__GFX_CPC_INDEX_START = 0,
+ AMDGPU_RAS_BLOCK__GFX_CPC_SCRATCH =
+ AMDGPU_RAS_BLOCK__GFX_CPC_INDEX_START,
+ AMDGPU_RAS_BLOCK__GFX_CPC_UCODE,
+ AMDGPU_RAS_BLOCK__GFX_DC_STATE_ME1,
+ AMDGPU_RAS_BLOCK__GFX_DC_CSINVOC_ME1,
+ AMDGPU_RAS_BLOCK__GFX_DC_RESTORE_ME1,
+ AMDGPU_RAS_BLOCK__GFX_DC_STATE_ME2,
+ AMDGPU_RAS_BLOCK__GFX_DC_CSINVOC_ME2,
+ AMDGPU_RAS_BLOCK__GFX_DC_RESTORE_ME2,
+ AMDGPU_RAS_BLOCK__GFX_CPC_INDEX_END =
+ AMDGPU_RAS_BLOCK__GFX_DC_RESTORE_ME2,
+ /* CPF */
+ AMDGPU_RAS_BLOCK__GFX_CPF_INDEX_START,
+ AMDGPU_RAS_BLOCK__GFX_CPF_ROQ_ME2 =
+ AMDGPU_RAS_BLOCK__GFX_CPF_INDEX_START,
+ AMDGPU_RAS_BLOCK__GFX_CPF_ROQ_ME1,
+ AMDGPU_RAS_BLOCK__GFX_CPF_TAG,
+ AMDGPU_RAS_BLOCK__GFX_CPF_INDEX_END = AMDGPU_RAS_BLOCK__GFX_CPF_TAG,
+ /* CPG */
+ AMDGPU_RAS_BLOCK__GFX_CPG_INDEX_START,
+ AMDGPU_RAS_BLOCK__GFX_CPG_DMA_ROQ =
+ AMDGPU_RAS_BLOCK__GFX_CPG_INDEX_START,
+ AMDGPU_RAS_BLOCK__GFX_CPG_DMA_TAG,
+ AMDGPU_RAS_BLOCK__GFX_CPG_TAG,
+ AMDGPU_RAS_BLOCK__GFX_CPG_INDEX_END = AMDGPU_RAS_BLOCK__GFX_CPG_TAG,
+ /* GDS */
+ AMDGPU_RAS_BLOCK__GFX_GDS_INDEX_START,
+ AMDGPU_RAS_BLOCK__GFX_GDS_MEM = AMDGPU_RAS_BLOCK__GFX_GDS_INDEX_START,
+ AMDGPU_RAS_BLOCK__GFX_GDS_INPUT_QUEUE,
+ AMDGPU_RAS_BLOCK__GFX_GDS_OA_PHY_CMD_RAM_MEM,
+ AMDGPU_RAS_BLOCK__GFX_GDS_OA_PHY_DATA_RAM_MEM,
+ AMDGPU_RAS_BLOCK__GFX_GDS_OA_PIPE_MEM,
+ AMDGPU_RAS_BLOCK__GFX_GDS_INDEX_END =
+ AMDGPU_RAS_BLOCK__GFX_GDS_OA_PIPE_MEM,
+ /* SPI */
+ AMDGPU_RAS_BLOCK__GFX_SPI_SR_MEM,
+ /* SQ */
+ AMDGPU_RAS_BLOCK__GFX_SQ_INDEX_START,
+ AMDGPU_RAS_BLOCK__GFX_SQ_SGPR = AMDGPU_RAS_BLOCK__GFX_SQ_INDEX_START,
+ AMDGPU_RAS_BLOCK__GFX_SQ_LDS_D,
+ AMDGPU_RAS_BLOCK__GFX_SQ_LDS_I,
+ AMDGPU_RAS_BLOCK__GFX_SQ_VGPR,
+ AMDGPU_RAS_BLOCK__GFX_SQ_INDEX_END = AMDGPU_RAS_BLOCK__GFX_SQ_VGPR,
+ /* SQC (3 ranges) */
+ AMDGPU_RAS_BLOCK__GFX_SQC_INDEX_START,
+ /* SQC range 0 */
+ AMDGPU_RAS_BLOCK__GFX_SQC_INDEX0_START =
+ AMDGPU_RAS_BLOCK__GFX_SQC_INDEX_START,
+ AMDGPU_RAS_BLOCK__GFX_SQC_INST_UTCL1_LFIFO =
+ AMDGPU_RAS_BLOCK__GFX_SQC_INDEX0_START,
+ AMDGPU_RAS_BLOCK__GFX_SQC_DATA_CU0_WRITE_DATA_BUF,
+ AMDGPU_RAS_BLOCK__GFX_SQC_DATA_CU0_UTCL1_LFIFO,
+ AMDGPU_RAS_BLOCK__GFX_SQC_DATA_CU1_WRITE_DATA_BUF,
+ AMDGPU_RAS_BLOCK__GFX_SQC_DATA_CU1_UTCL1_LFIFO,
+ AMDGPU_RAS_BLOCK__GFX_SQC_DATA_CU2_WRITE_DATA_BUF,
+ AMDGPU_RAS_BLOCK__GFX_SQC_DATA_CU2_UTCL1_LFIFO,
+ AMDGPU_RAS_BLOCK__GFX_SQC_INDEX0_END =
+ AMDGPU_RAS_BLOCK__GFX_SQC_DATA_CU2_UTCL1_LFIFO,
+ /* SQC range 1 */
+ AMDGPU_RAS_BLOCK__GFX_SQC_INDEX1_START,
+ AMDGPU_RAS_BLOCK__GFX_SQC_INST_BANKA_TAG_RAM =
+ AMDGPU_RAS_BLOCK__GFX_SQC_INDEX1_START,
+ AMDGPU_RAS_BLOCK__GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO,
+ AMDGPU_RAS_BLOCK__GFX_SQC_INST_BANKA_MISS_FIFO,
+ AMDGPU_RAS_BLOCK__GFX_SQC_INST_BANKA_BANK_RAM,
+ AMDGPU_RAS_BLOCK__GFX_SQC_DATA_BANKA_TAG_RAM,
+ AMDGPU_RAS_BLOCK__GFX_SQC_DATA_BANKA_HIT_FIFO,
+ AMDGPU_RAS_BLOCK__GFX_SQC_DATA_BANKA_MISS_FIFO,
+ AMDGPU_RAS_BLOCK__GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM,
+ AMDGPU_RAS_BLOCK__GFX_SQC_DATA_BANKA_BANK_RAM,
+ AMDGPU_RAS_BLOCK__GFX_SQC_INDEX1_END =
+ AMDGPU_RAS_BLOCK__GFX_SQC_DATA_BANKA_BANK_RAM,
+ /* SQC range 2 */
+ AMDGPU_RAS_BLOCK__GFX_SQC_INDEX2_START,
+ AMDGPU_RAS_BLOCK__GFX_SQC_INST_BANKB_TAG_RAM =
+ AMDGPU_RAS_BLOCK__GFX_SQC_INDEX2_START,
+ AMDGPU_RAS_BLOCK__GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO,
+ AMDGPU_RAS_BLOCK__GFX_SQC_INST_BANKB_MISS_FIFO,
+ AMDGPU_RAS_BLOCK__GFX_SQC_INST_BANKB_BANK_RAM,
+ AMDGPU_RAS_BLOCK__GFX_SQC_DATA_BANKB_TAG_RAM,
+ AMDGPU_RAS_BLOCK__GFX_SQC_DATA_BANKB_HIT_FIFO,
+ AMDGPU_RAS_BLOCK__GFX_SQC_DATA_BANKB_MISS_FIFO,
+ AMDGPU_RAS_BLOCK__GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM,
+ AMDGPU_RAS_BLOCK__GFX_SQC_DATA_BANKB_BANK_RAM,
+ AMDGPU_RAS_BLOCK__GFX_SQC_INDEX2_END =
+ AMDGPU_RAS_BLOCK__GFX_SQC_DATA_BANKB_BANK_RAM,
+ AMDGPU_RAS_BLOCK__GFX_SQC_INDEX_END =
+ AMDGPU_RAS_BLOCK__GFX_SQC_INDEX2_END,
+ /* TA */
+ AMDGPU_RAS_BLOCK__GFX_TA_INDEX_START,
+ AMDGPU_RAS_BLOCK__GFX_TA_FS_DFIFO =
+ AMDGPU_RAS_BLOCK__GFX_TA_INDEX_START,
+ AMDGPU_RAS_BLOCK__GFX_TA_FS_AFIFO,
+ AMDGPU_RAS_BLOCK__GFX_TA_FL_LFIFO,
+ AMDGPU_RAS_BLOCK__GFX_TA_FX_LFIFO,
+ AMDGPU_RAS_BLOCK__GFX_TA_FS_CFIFO,
+ AMDGPU_RAS_BLOCK__GFX_TA_INDEX_END = AMDGPU_RAS_BLOCK__GFX_TA_FS_CFIFO,
+ /* TCA */
+ AMDGPU_RAS_BLOCK__GFX_TCA_INDEX_START,
+ AMDGPU_RAS_BLOCK__GFX_TCA_HOLE_FIFO =
+ AMDGPU_RAS_BLOCK__GFX_TCA_INDEX_START,
+ AMDGPU_RAS_BLOCK__GFX_TCA_REQ_FIFO,
+ AMDGPU_RAS_BLOCK__GFX_TCA_INDEX_END =
+ AMDGPU_RAS_BLOCK__GFX_TCA_REQ_FIFO,
+ /* TCC (5 sub-ranges) */
+ AMDGPU_RAS_BLOCK__GFX_TCC_INDEX_START,
+ /* TCC range 0 */
+ AMDGPU_RAS_BLOCK__GFX_TCC_INDEX0_START =
+ AMDGPU_RAS_BLOCK__GFX_TCC_INDEX_START,
+ AMDGPU_RAS_BLOCK__GFX_TCC_CACHE_DATA =
+ AMDGPU_RAS_BLOCK__GFX_TCC_INDEX0_START,
+ AMDGPU_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_0_1,
+ AMDGPU_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_1_0,
+ AMDGPU_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_1_1,
+ AMDGPU_RAS_BLOCK__GFX_TCC_CACHE_DIRTY_BANK_0,
+ AMDGPU_RAS_BLOCK__GFX_TCC_CACHE_DIRTY_BANK_1,
+ AMDGPU_RAS_BLOCK__GFX_TCC_HIGH_RATE_TAG,
+ AMDGPU_RAS_BLOCK__GFX_TCC_LOW_RATE_TAG,
+ AMDGPU_RAS_BLOCK__GFX_TCC_INDEX0_END =
+ AMDGPU_RAS_BLOCK__GFX_TCC_LOW_RATE_TAG,
+ /* TCC range 1 */
+ AMDGPU_RAS_BLOCK__GFX_TCC_INDEX1_START,
+ AMDGPU_RAS_BLOCK__GFX_TCC_IN_USE_DEC =
+ AMDGPU_RAS_BLOCK__GFX_TCC_INDEX1_START,
+ AMDGPU_RAS_BLOCK__GFX_TCC_IN_USE_TRANSFER,
+ AMDGPU_RAS_BLOCK__GFX_TCC_INDEX1_END =
+ AMDGPU_RAS_BLOCK__GFX_TCC_IN_USE_TRANSFER,
+ /* TCC range 2 */
+ AMDGPU_RAS_BLOCK__GFX_TCC_INDEX2_START,
+ AMDGPU_RAS_BLOCK__GFX_TCC_RETURN_DATA =
+ AMDGPU_RAS_BLOCK__GFX_TCC_INDEX2_START,
+ AMDGPU_RAS_BLOCK__GFX_TCC_RETURN_CONTROL,
+ AMDGPU_RAS_BLOCK__GFX_TCC_UC_ATOMIC_FIFO,
+ AMDGPU_RAS_BLOCK__GFX_TCC_WRITE_RETURN,
+ AMDGPU_RAS_BLOCK__GFX_TCC_WRITE_CACHE_READ,
+ AMDGPU_RAS_BLOCK__GFX_TCC_SRC_FIFO,
+ AMDGPU_RAS_BLOCK__GFX_TCC_SRC_FIFO_NEXT_RAM,
+ AMDGPU_RAS_BLOCK__GFX_TCC_CACHE_TAG_PROBE_FIFO,
+ AMDGPU_RAS_BLOCK__GFX_TCC_INDEX2_END =
+ AMDGPU_RAS_BLOCK__GFX_TCC_CACHE_TAG_PROBE_FIFO,
+ /* TCC range 3 */
+ AMDGPU_RAS_BLOCK__GFX_TCC_INDEX3_START,
+ AMDGPU_RAS_BLOCK__GFX_TCC_LATENCY_FIFO =
+ AMDGPU_RAS_BLOCK__GFX_TCC_INDEX3_START,
+ AMDGPU_RAS_BLOCK__GFX_TCC_LATENCY_FIFO_NEXT_RAM,
+ AMDGPU_RAS_BLOCK__GFX_TCC_INDEX3_END =
+ AMDGPU_RAS_BLOCK__GFX_TCC_LATENCY_FIFO_NEXT_RAM,
+ /* TCC range 4 */
+ AMDGPU_RAS_BLOCK__GFX_TCC_INDEX4_START,
+ AMDGPU_RAS_BLOCK__GFX_TCC_WRRET_TAG_WRITE_RETURN =
+ AMDGPU_RAS_BLOCK__GFX_TCC_INDEX4_START,
+ AMDGPU_RAS_BLOCK__GFX_TCC_ATOMIC_RETURN_BUFFER,
+ AMDGPU_RAS_BLOCK__GFX_TCC_INDEX4_END =
+ AMDGPU_RAS_BLOCK__GFX_TCC_ATOMIC_RETURN_BUFFER,
+ AMDGPU_RAS_BLOCK__GFX_TCC_INDEX_END =
+ AMDGPU_RAS_BLOCK__GFX_TCC_INDEX4_END,
+ /* TCI */
+ AMDGPU_RAS_BLOCK__GFX_TCI_WRITE_RAM,
+ /* TCP */
+ AMDGPU_RAS_BLOCK__GFX_TCP_INDEX_START,
+ AMDGPU_RAS_BLOCK__GFX_TCP_CACHE_RAM =
+ AMDGPU_RAS_BLOCK__GFX_TCP_INDEX_START,
+ AMDGPU_RAS_BLOCK__GFX_TCP_LFIFO_RAM,
+ AMDGPU_RAS_BLOCK__GFX_TCP_CMD_FIFO,
+ AMDGPU_RAS_BLOCK__GFX_TCP_VM_FIFO,
+ AMDGPU_RAS_BLOCK__GFX_TCP_DB_RAM,
+ AMDGPU_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO0,
+ AMDGPU_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO1,
+ AMDGPU_RAS_BLOCK__GFX_TCP_INDEX_END =
+ AMDGPU_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO1,
+ /* TD */
+ AMDGPU_RAS_BLOCK__GFX_TD_INDEX_START,
+ AMDGPU_RAS_BLOCK__GFX_TD_SS_FIFO_LO =
+ AMDGPU_RAS_BLOCK__GFX_TD_INDEX_START,
+ AMDGPU_RAS_BLOCK__GFX_TD_SS_FIFO_HI,
+ AMDGPU_RAS_BLOCK__GFX_TD_CS_FIFO,
+ AMDGPU_RAS_BLOCK__GFX_TD_INDEX_END = AMDGPU_RAS_BLOCK__GFX_TD_CS_FIFO,
+ /* EA (3 sub-ranges) */
+ AMDGPU_RAS_BLOCK__GFX_EA_INDEX_START,
+ /* EA range 0 */
+ AMDGPU_RAS_BLOCK__GFX_EA_INDEX0_START =
+ AMDGPU_RAS_BLOCK__GFX_EA_INDEX_START,
+ AMDGPU_RAS_BLOCK__GFX_EA_DRAMRD_CMDMEM =
+ AMDGPU_RAS_BLOCK__GFX_EA_INDEX0_START,
+ AMDGPU_RAS_BLOCK__GFX_EA_DRAMWR_CMDMEM,
+ AMDGPU_RAS_BLOCK__GFX_EA_DRAMWR_DATAMEM,
+ AMDGPU_RAS_BLOCK__GFX_EA_RRET_TAGMEM,
+ AMDGPU_RAS_BLOCK__GFX_EA_WRET_TAGMEM,
+ AMDGPU_RAS_BLOCK__GFX_EA_GMIRD_CMDMEM,
+ AMDGPU_RAS_BLOCK__GFX_EA_GMIWR_CMDMEM,
+ AMDGPU_RAS_BLOCK__GFX_EA_GMIWR_DATAMEM,
+ AMDGPU_RAS_BLOCK__GFX_EA_INDEX0_END =
+ AMDGPU_RAS_BLOCK__GFX_EA_GMIWR_DATAMEM,
+ /* EA range 1 */
+ AMDGPU_RAS_BLOCK__GFX_EA_INDEX1_START,
+ AMDGPU_RAS_BLOCK__GFX_EA_DRAMRD_PAGEMEM =
+ AMDGPU_RAS_BLOCK__GFX_EA_INDEX1_START,
+ AMDGPU_RAS_BLOCK__GFX_EA_DRAMWR_PAGEMEM,
+ AMDGPU_RAS_BLOCK__GFX_EA_IORD_CMDMEM,
+ AMDGPU_RAS_BLOCK__GFX_EA_IOWR_CMDMEM,
+ AMDGPU_RAS_BLOCK__GFX_EA_IOWR_DATAMEM,
+ AMDGPU_RAS_BLOCK__GFX_EA_GMIRD_PAGEMEM,
+ AMDGPU_RAS_BLOCK__GFX_EA_GMIWR_PAGEMEM,
+ AMDGPU_RAS_BLOCK__GFX_EA_INDEX1_END =
+ AMDGPU_RAS_BLOCK__GFX_EA_GMIWR_PAGEMEM,
+ /* EA range 2 */
+ AMDGPU_RAS_BLOCK__GFX_EA_INDEX2_START,
+ AMDGPU_RAS_BLOCK__GFX_EA_MAM_D0MEM =
+ AMDGPU_RAS_BLOCK__GFX_EA_INDEX2_START,
+ AMDGPU_RAS_BLOCK__GFX_EA_MAM_D1MEM,
+ AMDGPU_RAS_BLOCK__GFX_EA_MAM_D2MEM,
+ AMDGPU_RAS_BLOCK__GFX_EA_MAM_D3MEM,
+ AMDGPU_RAS_BLOCK__GFX_EA_INDEX2_END =
+ AMDGPU_RAS_BLOCK__GFX_EA_MAM_D3MEM,
+ AMDGPU_RAS_BLOCK__GFX_EA_INDEX_END =
+ AMDGPU_RAS_BLOCK__GFX_EA_INDEX2_END,
+ /* UTC VM L2 bank */
+ AMDGPU_RAS_BLOCK__UTC_VML2_BANK_CACHE,
+ /* UTC VM walker */
+ AMDGPU_RAS_BLOCK__UTC_VML2_WALKER,
+ /* UTC ATC L2 2MB cache */
+ AMDGPU_RAS_BLOCK__UTC_ATCL2_CACHE_2M_BANK,
+ /* UTC ATC L2 4KB cache */
+ AMDGPU_RAS_BLOCK__UTC_ATCL2_CACHE_4K_BANK,
+ AMDGPU_RAS_BLOCK__GFX_MAX
+};
+
enum amdgpu_ras_error_type {
AMDGPU_RAS_ERROR__NONE = 0,
AMDGPU_RAS_ERROR__PARITY = 1,
@@ -76,9 +307,6 @@ struct ras_common_if {
char name[32];
};
-typedef int (*ras_ih_cb)(struct amdgpu_device *adev,
- struct amdgpu_iv_entry *entry);
-
struct amdgpu_ras {
/* ras infrastructure */
/* for ras itself. */
@@ -89,8 +317,6 @@ struct amdgpu_ras {
struct list_head head;
/* debugfs */
struct dentry *dir;
- /* debugfs ctrl */
- struct dentry *ent;
/* sysfs */
struct device_attribute features_attr;
struct bin_attribute badpages_attr;
@@ -106,10 +332,84 @@ struct amdgpu_ras {
struct mutex recovery_lock;
uint32_t flags;
+ bool reboot;
+ struct amdgpu_ras_eeprom_control eeprom_control;
};
-/* interfaces for IP */
+struct ras_fs_data {
+ char sysfs_name[32];
+ char debugfs_name[32];
+};
+
+struct ras_err_data {
+ unsigned long ue_count;
+ unsigned long ce_count;
+ unsigned long err_addr_cnt;
+ struct eeprom_table_record *err_addr;
+};
+
+struct ras_err_handler_data {
+ /* point to bad page records array */
+ struct eeprom_table_record *bps;
+ /* point to reserved bo array */
+ struct amdgpu_bo **bps_bo;
+ /* the count of entries */
+ int count;
+ /* the space can place new entries */
+ int space_left;
+ /* last reserved entry's index + 1 */
+ int last_reserved;
+};
+
+typedef int (*ras_ih_cb)(struct amdgpu_device *adev,
+ void *err_data,
+ struct amdgpu_iv_entry *entry);
+
+struct ras_ih_data {
+ /* interrupt bottom half */
+ struct work_struct ih_work;
+ int inuse;
+ /* IP callback */
+ ras_ih_cb cb;
+ /* full of entries */
+ unsigned char *ring;
+ unsigned int ring_size;
+ unsigned int element_size;
+ unsigned int aligned_element_size;
+ unsigned int rptr;
+ unsigned int wptr;
+};
+struct ras_manager {
+ struct ras_common_if head;
+ /* reference count */
+ int use;
+ /* ras block link */
+ struct list_head node;
+ /* the device */
+ struct amdgpu_device *adev;
+ /* debugfs */
+ struct dentry *ent;
+ /* sysfs */
+ struct device_attribute sysfs_attr;
+ int attr_inuse;
+
+ /* fs node name */
+ struct ras_fs_data fs_data;
+
+ /* IH data */
+ struct ras_ih_data ih_data;
+
+ struct ras_err_data err_data;
+};
+
+struct ras_badpage {
+ unsigned int bp;
+ unsigned int size;
+ unsigned int flags;
+};
+
+/* interfaces for IP */
struct ras_fs_if {
struct ras_common_if head;
char sysfs_name[32];
@@ -178,26 +478,32 @@ static inline int amdgpu_ras_is_supported(struct amdgpu_device *adev,
return ras && (ras->supported & (1 << block));
}
+int amdgpu_ras_recovery_init(struct amdgpu_device *adev);
int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev,
unsigned int block);
void amdgpu_ras_resume(struct amdgpu_device *adev);
void amdgpu_ras_suspend(struct amdgpu_device *adev);
-int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
+unsigned long amdgpu_ras_query_error_count(struct amdgpu_device *adev,
bool is_ce);
/* error handling functions */
int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
- unsigned long *bps, int pages);
+ struct eeprom_table_record *bps, int pages);
int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev);
-static inline int amdgpu_ras_reset_gpu(struct amdgpu_device *adev,
- bool is_baco)
+static inline int amdgpu_ras_reset_gpu(struct amdgpu_device *adev)
{
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ /* save bad page to eeprom before gpu reset,
+ * i2c may be unstable in gpu reset
+ */
+ if (in_task())
+ amdgpu_ras_reserve_bad_pages(adev);
+
if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0)
schedule_work(&ras->recovery_work);
return 0;
@@ -263,6 +569,13 @@ amdgpu_ras_error_to_ta(enum amdgpu_ras_error_type error) {
int amdgpu_ras_init(struct amdgpu_device *adev);
int amdgpu_ras_fini(struct amdgpu_device *adev);
int amdgpu_ras_pre_fini(struct amdgpu_device *adev);
+int amdgpu_ras_late_init(struct amdgpu_device *adev,
+ struct ras_common_if *ras_block,
+ struct ras_fs_if *fs_info,
+ struct ras_ih_if *ih_info);
+void amdgpu_ras_late_fini(struct amdgpu_device *adev,
+ struct ras_common_if *ras_block,
+ struct ras_ih_if *ih_info);
int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
struct ras_common_if *head, bool enable);
@@ -296,4 +609,22 @@ int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev,
int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
struct ras_dispatch_if *info);
+
+struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
+ struct ras_common_if *head);
+
+extern atomic_t amdgpu_ras_in_intr;
+
+static inline bool amdgpu_ras_intr_triggered(void)
+{
+ return !!atomic_read(&amdgpu_ras_in_intr);
+}
+
+static inline void amdgpu_ras_intr_cleared(void)
+{
+ atomic_set(&amdgpu_ras_in_intr, 0);
+}
+
+void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
new file mode 100644
index 000000000000..2a8e04895595
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -0,0 +1,525 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu_ras_eeprom.h"
+#include "amdgpu.h"
+#include "amdgpu_ras.h"
+#include <linux/bits.h>
+#include "smu_v11_0_i2c.h"
+
+#define EEPROM_I2C_TARGET_ADDR_ARCTURUS 0xA8
+#define EEPROM_I2C_TARGET_ADDR_VEGA20 0xA0
+
+/*
+ * The 2 macros bellow represent the actual size in bytes that
+ * those entities occupy in the EEPROM memory.
+ * EEPROM_TABLE_RECORD_SIZE is different than sizeof(eeprom_table_record) which
+ * uses uint64 to store 6b fields such as retired_page.
+ */
+#define EEPROM_TABLE_HEADER_SIZE 20
+#define EEPROM_TABLE_RECORD_SIZE 24
+
+#define EEPROM_ADDRESS_SIZE 0x2
+
+/* Table hdr is 'AMDR' */
+#define EEPROM_TABLE_HDR_VAL 0x414d4452
+#define EEPROM_TABLE_VER 0x00010000
+
+/* Assume 2 Mbit size */
+#define EEPROM_SIZE_BYTES 256000
+#define EEPROM_PAGE__SIZE_BYTES 256
+#define EEPROM_HDR_START 0
+#define EEPROM_RECORD_START (EEPROM_HDR_START + EEPROM_TABLE_HEADER_SIZE)
+#define EEPROM_MAX_RECORD_NUM ((EEPROM_SIZE_BYTES - EEPROM_TABLE_HEADER_SIZE) / EEPROM_TABLE_RECORD_SIZE)
+#define EEPROM_ADDR_MSB_MASK GENMASK(17, 8)
+
+#define to_amdgpu_device(x) (container_of(x, struct amdgpu_ras, eeprom_control))->adev
+
+static void __encode_table_header_to_buff(struct amdgpu_ras_eeprom_table_header *hdr,
+ unsigned char *buff)
+{
+ uint32_t *pp = (uint32_t *) buff;
+
+ pp[0] = cpu_to_le32(hdr->header);
+ pp[1] = cpu_to_le32(hdr->version);
+ pp[2] = cpu_to_le32(hdr->first_rec_offset);
+ pp[3] = cpu_to_le32(hdr->tbl_size);
+ pp[4] = cpu_to_le32(hdr->checksum);
+}
+
+static void __decode_table_header_from_buff(struct amdgpu_ras_eeprom_table_header *hdr,
+ unsigned char *buff)
+{
+ uint32_t *pp = (uint32_t *)buff;
+
+ hdr->header = le32_to_cpu(pp[0]);
+ hdr->version = le32_to_cpu(pp[1]);
+ hdr->first_rec_offset = le32_to_cpu(pp[2]);
+ hdr->tbl_size = le32_to_cpu(pp[3]);
+ hdr->checksum = le32_to_cpu(pp[4]);
+}
+
+static int __update_table_header(struct amdgpu_ras_eeprom_control *control,
+ unsigned char *buff)
+{
+ int ret = 0;
+ struct i2c_msg msg = {
+ .addr = 0,
+ .flags = 0,
+ .len = EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE,
+ .buf = buff,
+ };
+
+
+ *(uint16_t *)buff = EEPROM_HDR_START;
+ __encode_table_header_to_buff(&control->tbl_hdr, buff + EEPROM_ADDRESS_SIZE);
+
+ msg.addr = control->i2c_address;
+
+ ret = i2c_transfer(&control->eeprom_accessor, &msg, 1);
+ if (ret < 1)
+ DRM_ERROR("Failed to write EEPROM table header, ret:%d", ret);
+
+ return ret;
+}
+
+
+
+static uint32_t __calc_hdr_byte_sum(struct amdgpu_ras_eeprom_control *control)
+{
+ int i;
+ uint32_t tbl_sum = 0;
+
+ /* Header checksum, skip checksum field in the calculation */
+ for (i = 0; i < sizeof(control->tbl_hdr) - sizeof(control->tbl_hdr.checksum); i++)
+ tbl_sum += *(((unsigned char *)&control->tbl_hdr) + i);
+
+ return tbl_sum;
+}
+
+static uint32_t __calc_recs_byte_sum(struct eeprom_table_record *records,
+ int num)
+{
+ int i, j;
+ uint32_t tbl_sum = 0;
+
+ /* Records checksum */
+ for (i = 0; i < num; i++) {
+ struct eeprom_table_record *record = &records[i];
+
+ for (j = 0; j < sizeof(*record); j++) {
+ tbl_sum += *(((unsigned char *)record) + j);
+ }
+ }
+
+ return tbl_sum;
+}
+
+static inline uint32_t __calc_tbl_byte_sum(struct amdgpu_ras_eeprom_control *control,
+ struct eeprom_table_record *records, int num)
+{
+ return __calc_hdr_byte_sum(control) + __calc_recs_byte_sum(records, num);
+}
+
+/* Checksum = 256 -((sum of all table entries) mod 256) */
+static void __update_tbl_checksum(struct amdgpu_ras_eeprom_control *control,
+ struct eeprom_table_record *records, int num,
+ uint32_t old_hdr_byte_sum)
+{
+ /*
+ * This will update the table sum with new records.
+ *
+ * TODO: What happens when the EEPROM table is to be wrapped around
+ * and old records from start will get overridden.
+ */
+
+ /* need to recalculate updated header byte sum */
+ control->tbl_byte_sum -= old_hdr_byte_sum;
+ control->tbl_byte_sum += __calc_tbl_byte_sum(control, records, num);
+
+ control->tbl_hdr.checksum = 256 - (control->tbl_byte_sum % 256);
+}
+
+/* table sum mod 256 + checksum must equals 256 */
+static bool __validate_tbl_checksum(struct amdgpu_ras_eeprom_control *control,
+ struct eeprom_table_record *records, int num)
+{
+ control->tbl_byte_sum = __calc_tbl_byte_sum(control, records, num);
+
+ if (control->tbl_hdr.checksum + (control->tbl_byte_sum % 256) != 256) {
+ DRM_WARN("Checksum mismatch, checksum: %u ", control->tbl_hdr.checksum);
+ return false;
+ }
+
+ return true;
+}
+
+int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control)
+{
+ unsigned char buff[EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE] = { 0 };
+ struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr;
+ int ret = 0;
+
+ mutex_lock(&control->tbl_mutex);
+
+ hdr->header = EEPROM_TABLE_HDR_VAL;
+ hdr->version = EEPROM_TABLE_VER;
+ hdr->first_rec_offset = EEPROM_RECORD_START;
+ hdr->tbl_size = EEPROM_TABLE_HEADER_SIZE;
+
+ control->tbl_byte_sum = 0;
+ __update_tbl_checksum(control, NULL, 0, 0);
+ control->next_addr = EEPROM_RECORD_START;
+
+ ret = __update_table_header(control, buff);
+
+ mutex_unlock(&control->tbl_mutex);
+
+ return ret;
+
+}
+
+int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
+{
+ int ret = 0;
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ unsigned char buff[EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE] = { 0 };
+ struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr;
+ struct i2c_msg msg = {
+ .addr = 0,
+ .flags = I2C_M_RD,
+ .len = EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE,
+ .buf = buff,
+ };
+
+ mutex_init(&control->tbl_mutex);
+
+ switch (adev->asic_type) {
+ case CHIP_VEGA20:
+ control->i2c_address = EEPROM_I2C_TARGET_ADDR_VEGA20;
+ ret = smu_v11_0_i2c_eeprom_control_init(&control->eeprom_accessor);
+ break;
+
+ case CHIP_ARCTURUS:
+ control->i2c_address = EEPROM_I2C_TARGET_ADDR_ARCTURUS;
+ ret = smu_i2c_eeprom_init(&adev->smu, &control->eeprom_accessor);
+ break;
+
+ default:
+ return 0;
+ }
+
+ if (ret) {
+ DRM_ERROR("Failed to init I2C controller, ret:%d", ret);
+ return ret;
+ }
+
+ msg.addr = control->i2c_address;
+
+ /* Read/Create table header from EEPROM address 0 */
+ ret = i2c_transfer(&control->eeprom_accessor, &msg, 1);
+ if (ret < 1) {
+ DRM_ERROR("Failed to read EEPROM table header, ret:%d", ret);
+ return ret;
+ }
+
+ __decode_table_header_from_buff(hdr, &buff[2]);
+
+ if (hdr->header == EEPROM_TABLE_HDR_VAL) {
+ control->num_recs = (hdr->tbl_size - EEPROM_TABLE_HEADER_SIZE) /
+ EEPROM_TABLE_RECORD_SIZE;
+ control->tbl_byte_sum = __calc_hdr_byte_sum(control);
+ control->next_addr = EEPROM_RECORD_START;
+
+ DRM_DEBUG_DRIVER("Found existing EEPROM table with %d records",
+ control->num_recs);
+
+ } else {
+ DRM_INFO("Creating new EEPROM table");
+
+ ret = amdgpu_ras_eeprom_reset_table(control);
+ }
+
+ return ret == 1 ? 0 : -EIO;
+}
+
+void amdgpu_ras_eeprom_fini(struct amdgpu_ras_eeprom_control *control)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+
+ switch (adev->asic_type) {
+ case CHIP_VEGA20:
+ smu_v11_0_i2c_eeprom_control_fini(&control->eeprom_accessor);
+ break;
+ case CHIP_ARCTURUS:
+ smu_i2c_eeprom_fini(&adev->smu, &control->eeprom_accessor);
+ break;
+
+ default:
+ return;
+ }
+}
+
+static void __encode_table_record_to_buff(struct amdgpu_ras_eeprom_control *control,
+ struct eeprom_table_record *record,
+ unsigned char *buff)
+{
+ __le64 tmp = 0;
+ int i = 0;
+
+ /* Next are all record fields according to EEPROM page spec in LE foramt */
+ buff[i++] = record->err_type;
+
+ buff[i++] = record->bank;
+
+ tmp = cpu_to_le64(record->ts);
+ memcpy(buff + i, &tmp, 8);
+ i += 8;
+
+ tmp = cpu_to_le64((record->offset & 0xffffffffffff));
+ memcpy(buff + i, &tmp, 6);
+ i += 6;
+
+ buff[i++] = record->mem_channel;
+ buff[i++] = record->mcumc_id;
+
+ tmp = cpu_to_le64((record->retired_page & 0xffffffffffff));
+ memcpy(buff + i, &tmp, 6);
+}
+
+static void __decode_table_record_from_buff(struct amdgpu_ras_eeprom_control *control,
+ struct eeprom_table_record *record,
+ unsigned char *buff)
+{
+ __le64 tmp = 0;
+ int i = 0;
+
+ /* Next are all record fields according to EEPROM page spec in LE foramt */
+ record->err_type = buff[i++];
+
+ record->bank = buff[i++];
+
+ memcpy(&tmp, buff + i, 8);
+ record->ts = le64_to_cpu(tmp);
+ i += 8;
+
+ memcpy(&tmp, buff + i, 6);
+ record->offset = (le64_to_cpu(tmp) & 0xffffffffffff);
+ i += 6;
+
+ record->mem_channel = buff[i++];
+ record->mcumc_id = buff[i++];
+
+ memcpy(&tmp, buff + i, 6);
+ record->retired_page = (le64_to_cpu(tmp) & 0xffffffffffff);
+}
+
+/*
+ * When reaching end of EEPROM memory jump back to 0 record address
+ * When next record access will go beyond EEPROM page boundary modify bits A17/A8
+ * in I2C selector to go to next page
+ */
+static uint32_t __correct_eeprom_dest_address(uint32_t curr_address)
+{
+ uint32_t next_address = curr_address + EEPROM_TABLE_RECORD_SIZE;
+
+ /* When all EEPROM memory used jump back to 0 address */
+ if (next_address > EEPROM_SIZE_BYTES) {
+ DRM_INFO("Reached end of EEPROM memory, jumping to 0 "
+ "and overriding old record");
+ return EEPROM_RECORD_START;
+ }
+
+ /*
+ * To check if we overflow page boundary compare next address with
+ * current and see if bits 17/8 of the EEPROM address will change
+ * If they do start from the next 256b page
+ *
+ * https://www.st.com/resource/en/datasheet/m24m02-dr.pdf sec. 5.1.2
+ */
+ if ((curr_address & EEPROM_ADDR_MSB_MASK) != (next_address & EEPROM_ADDR_MSB_MASK)) {
+ DRM_DEBUG_DRIVER("Reached end of EEPROM memory page, jumping to next: %lx",
+ (next_address & EEPROM_ADDR_MSB_MASK));
+
+ return (next_address & EEPROM_ADDR_MSB_MASK);
+ }
+
+ return curr_address;
+}
+
+int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control,
+ struct eeprom_table_record *records,
+ bool write,
+ int num)
+{
+ int i, ret = 0;
+ struct i2c_msg *msgs, *msg;
+ unsigned char *buffs, *buff;
+ struct eeprom_table_record *record;
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+
+ if (adev->asic_type != CHIP_VEGA20 && adev->asic_type != CHIP_ARCTURUS)
+ return 0;
+
+ buffs = kcalloc(num, EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE,
+ GFP_KERNEL);
+ if (!buffs)
+ return -ENOMEM;
+
+ mutex_lock(&control->tbl_mutex);
+
+ msgs = kcalloc(num, sizeof(*msgs), GFP_KERNEL);
+ if (!msgs) {
+ ret = -ENOMEM;
+ goto free_buff;
+ }
+
+ /* In case of overflow just start from beginning to not lose newest records */
+ if (write && (control->next_addr + EEPROM_TABLE_RECORD_SIZE * num > EEPROM_SIZE_BYTES))
+ control->next_addr = EEPROM_RECORD_START;
+
+
+ /*
+ * TODO Currently makes EEPROM writes for each record, this creates
+ * internal fragmentation. Optimized the code to do full page write of
+ * 256b
+ */
+ for (i = 0; i < num; i++) {
+ buff = &buffs[i * (EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE)];
+ record = &records[i];
+ msg = &msgs[i];
+
+ control->next_addr = __correct_eeprom_dest_address(control->next_addr);
+
+ /*
+ * Update bits 16,17 of EEPROM address in I2C address by setting them
+ * to bits 1,2 of Device address byte
+ */
+ msg->addr = control->i2c_address |
+ ((control->next_addr & EEPROM_ADDR_MSB_MASK) >> 15);
+ msg->flags = write ? 0 : I2C_M_RD;
+ msg->len = EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE;
+ msg->buf = buff;
+
+ /* Insert the EEPROM dest addess, bits 0-15 */
+ buff[0] = ((control->next_addr >> 8) & 0xff);
+ buff[1] = (control->next_addr & 0xff);
+
+ /* EEPROM table content is stored in LE format */
+ if (write)
+ __encode_table_record_to_buff(control, record, buff + EEPROM_ADDRESS_SIZE);
+
+ /*
+ * The destination EEPROM address might need to be corrected to account
+ * for page or entire memory wrapping
+ */
+ control->next_addr += EEPROM_TABLE_RECORD_SIZE;
+ }
+
+ ret = i2c_transfer(&control->eeprom_accessor, msgs, num);
+ if (ret < 1) {
+ DRM_ERROR("Failed to process EEPROM table records, ret:%d", ret);
+
+ /* TODO Restore prev next EEPROM address ? */
+ goto free_msgs;
+ }
+
+
+ if (!write) {
+ for (i = 0; i < num; i++) {
+ buff = &buffs[i*(EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE)];
+ record = &records[i];
+
+ __decode_table_record_from_buff(control, record, buff + EEPROM_ADDRESS_SIZE);
+ }
+ }
+
+ if (write) {
+ uint32_t old_hdr_byte_sum = __calc_hdr_byte_sum(control);
+
+ /*
+ * Update table header with size and CRC and account for table
+ * wrap around where the assumption is that we treat it as empty
+ * table
+ *
+ * TODO - Check the assumption is correct
+ */
+ control->num_recs += num;
+ control->num_recs %= EEPROM_MAX_RECORD_NUM;
+ control->tbl_hdr.tbl_size += EEPROM_TABLE_RECORD_SIZE * num;
+ if (control->tbl_hdr.tbl_size > EEPROM_SIZE_BYTES)
+ control->tbl_hdr.tbl_size = EEPROM_TABLE_HEADER_SIZE +
+ control->num_recs * EEPROM_TABLE_RECORD_SIZE;
+
+ __update_tbl_checksum(control, records, num, old_hdr_byte_sum);
+
+ __update_table_header(control, buffs);
+ } else if (!__validate_tbl_checksum(control, records, num)) {
+ DRM_WARN("EEPROM Table checksum mismatch!");
+ /* TODO Uncomment when EEPROM read/write is relliable */
+ /* ret = -EIO; */
+ }
+
+free_msgs:
+ kfree(msgs);
+
+free_buff:
+ kfree(buffs);
+
+ mutex_unlock(&control->tbl_mutex);
+
+ return ret == num ? 0 : -EIO;
+}
+
+/* Used for testing if bugs encountered */
+#if 0
+void amdgpu_ras_eeprom_test(struct amdgpu_ras_eeprom_control *control)
+{
+ int i;
+ struct eeprom_table_record *recs = kcalloc(1, sizeof(*recs), GFP_KERNEL);
+
+ if (!recs)
+ return;
+
+ for (i = 0; i < 1 ; i++) {
+ recs[i].address = 0xdeadbeef;
+ recs[i].retired_page = i;
+ }
+
+ if (!amdgpu_ras_eeprom_process_recods(control, recs, true, 1)) {
+
+ memset(recs, 0, sizeof(*recs) * 1);
+
+ control->next_addr = EEPROM_RECORD_START;
+
+ if (!amdgpu_ras_eeprom_process_recods(control, recs, false, 1)) {
+ for (i = 0; i < 1; i++)
+ DRM_INFO("rec.address :0x%llx, rec.retired_page :%llu",
+ recs[i].address, recs[i].retired_page);
+ } else
+ DRM_ERROR("Failed in reading from table");
+
+ } else
+ DRM_ERROR("Failed in writing to table");
+}
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
new file mode 100644
index 000000000000..ca78f812d436
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _AMDGPU_RAS_EEPROM_H
+#define _AMDGPU_RAS_EEPROM_H
+
+#include <linux/i2c.h>
+
+struct amdgpu_device;
+
+enum amdgpu_ras_eeprom_err_type{
+ AMDGPU_RAS_EEPROM_ERR_PLACE_HOLDER,
+ AMDGPU_RAS_EEPROM_ERR_RECOVERABLE,
+ AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE
+};
+
+struct amdgpu_ras_eeprom_table_header {
+ uint32_t header;
+ uint32_t version;
+ uint32_t first_rec_offset;
+ uint32_t tbl_size;
+ uint32_t checksum;
+}__attribute__((__packed__));
+
+struct amdgpu_ras_eeprom_control {
+ struct amdgpu_ras_eeprom_table_header tbl_hdr;
+ struct i2c_adapter eeprom_accessor;
+ uint32_t next_addr;
+ unsigned int num_recs;
+ struct mutex tbl_mutex;
+ bool bus_locked;
+ uint32_t tbl_byte_sum;
+ uint16_t i2c_address; // 8-bit represented address
+};
+
+/*
+ * Represents single table record. Packed to be easily serialized into byte
+ * stream.
+ */
+struct eeprom_table_record {
+
+ union {
+ uint64_t address;
+ uint64_t offset;
+ };
+
+ uint64_t retired_page;
+ uint64_t ts;
+
+ enum amdgpu_ras_eeprom_err_type err_type;
+
+ union {
+ unsigned char bank;
+ unsigned char cu;
+ };
+
+ unsigned char mem_channel;
+ unsigned char mcumc_id;
+}__attribute__((__packed__));
+
+int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control);
+void amdgpu_ras_eeprom_fini(struct amdgpu_ras_eeprom_control *control);
+int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control);
+
+int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control,
+ struct eeprom_table_record *records,
+ bool write,
+ int num);
+
+void amdgpu_ras_eeprom_test(struct amdgpu_ras_eeprom_control *control);
+
+#endif // _AMDGPU_RAS_EEPROM_H
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 4410c97ac9b7..930316e60155 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -29,7 +29,7 @@
#include <drm/drm_print.h>
/* max number of rings */
-#define AMDGPU_MAX_RINGS 24
+#define AMDGPU_MAX_RINGS 28
#define AMDGPU_MAX_GFX_RINGS 2
#define AMDGPU_MAX_COMPUTE_RINGS 8
#define AMDGPU_MAX_VCE_RINGS 3
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
index c8793e6cc3c5..6373bfb47d55 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
@@ -124,13 +124,12 @@ int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws)
*/
int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev)
{
- volatile u32 *dst_ptr;
u32 dws;
int r;
/* allocate clear state block */
adev->gfx.rlc.clear_state_size = dws = adev->gfx.rlc.funcs->get_csb_size(adev);
- r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
+ r = amdgpu_bo_create_kernel(adev, dws * 4, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
&adev->gfx.rlc.clear_state_obj,
&adev->gfx.rlc.clear_state_gpu_addr,
@@ -141,13 +140,6 @@ int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev)
return r;
}
- /* set up the cs buffer */
- dst_ptr = adev->gfx.rlc.cs_ptr;
- adev->gfx.rlc.funcs->get_csb_buffer(adev, dst_ptr);
- amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
- amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
- amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
index 5c13c503e61f..a2ee30b16212 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
@@ -23,6 +23,7 @@
#include "amdgpu.h"
#include "amdgpu_sdma.h"
+#include "amdgpu_ras.h"
#define AMDGPU_CSA_SDMA_SIZE 64
/* SDMA CSA reside in the 3rd page of CSA */
@@ -83,3 +84,101 @@ uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring,
return csa_mc_addr;
}
+
+int amdgpu_sdma_ras_late_init(struct amdgpu_device *adev,
+ void *ras_ih_info)
+{
+ int r, i;
+ struct ras_ih_if *ih_info = (struct ras_ih_if *)ras_ih_info;
+ struct ras_fs_if fs_info = {
+ .sysfs_name = "sdma_err_count",
+ .debugfs_name = "sdma_err_inject",
+ };
+
+ if (!ih_info)
+ return -EINVAL;
+
+ if (!adev->sdma.ras_if) {
+ adev->sdma.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
+ if (!adev->sdma.ras_if)
+ return -ENOMEM;
+ adev->sdma.ras_if->block = AMDGPU_RAS_BLOCK__SDMA;
+ adev->sdma.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->sdma.ras_if->sub_block_index = 0;
+ strcpy(adev->sdma.ras_if->name, "sdma");
+ }
+ fs_info.head = ih_info->head = *adev->sdma.ras_if;
+
+ r = amdgpu_ras_late_init(adev, adev->sdma.ras_if,
+ &fs_info, ih_info);
+ if (r)
+ goto free;
+
+ if (amdgpu_ras_is_supported(adev, adev->sdma.ras_if->block)) {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ r = amdgpu_irq_get(adev, &adev->sdma.ecc_irq,
+ AMDGPU_SDMA_IRQ_INSTANCE0 + i);
+ if (r)
+ goto late_fini;
+ }
+ } else {
+ r = 0;
+ goto free;
+ }
+
+ return 0;
+
+late_fini:
+ amdgpu_ras_late_fini(adev, adev->sdma.ras_if, ih_info);
+free:
+ kfree(adev->sdma.ras_if);
+ adev->sdma.ras_if = NULL;
+ return r;
+}
+
+void amdgpu_sdma_ras_fini(struct amdgpu_device *adev)
+{
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA) &&
+ adev->sdma.ras_if) {
+ struct ras_common_if *ras_if = adev->sdma.ras_if;
+ struct ras_ih_if ih_info = {
+ .head = *ras_if,
+ /* the cb member will not be used by
+ * amdgpu_ras_interrupt_remove_handler, init it only
+ * to cheat the check in ras_late_fini
+ */
+ .cb = amdgpu_sdma_process_ras_data_cb,
+ };
+
+ amdgpu_ras_late_fini(adev, ras_if, &ih_info);
+ kfree(ras_if);
+ }
+}
+
+int amdgpu_sdma_process_ras_data_cb(struct amdgpu_device *adev,
+ void *err_data,
+ struct amdgpu_iv_entry *entry)
+{
+ kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
+ amdgpu_ras_reset_gpu(adev);
+
+ return AMDGPU_RAS_SUCCESS;
+}
+
+int amdgpu_sdma_process_ecc_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ struct ras_common_if *ras_if = adev->sdma.ras_if;
+ struct ras_dispatch_if ih_data = {
+ .entry = entry,
+ };
+
+ if (!ras_if)
+ return 0;
+
+ ih_data.head = *ras_if;
+
+ amdgpu_ras_interrupt_dispatch(adev, &ih_data);
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
index 35dd152f9d5c..485335267d78 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
@@ -25,11 +25,17 @@
#define __AMDGPU_SDMA_H__
/* max number of IP instances */
-#define AMDGPU_MAX_SDMA_INSTANCES 2
+#define AMDGPU_MAX_SDMA_INSTANCES 8
enum amdgpu_sdma_irq {
AMDGPU_SDMA_IRQ_INSTANCE0 = 0,
AMDGPU_SDMA_IRQ_INSTANCE1,
+ AMDGPU_SDMA_IRQ_INSTANCE2,
+ AMDGPU_SDMA_IRQ_INSTANCE3,
+ AMDGPU_SDMA_IRQ_INSTANCE4,
+ AMDGPU_SDMA_IRQ_INSTANCE5,
+ AMDGPU_SDMA_IRQ_INSTANCE6,
+ AMDGPU_SDMA_IRQ_INSTANCE7,
AMDGPU_SDMA_IRQ_LAST
};
@@ -44,8 +50,18 @@ struct amdgpu_sdma_instance {
bool burst_nop;
};
+struct amdgpu_sdma_ras_funcs {
+ int (*ras_late_init)(struct amdgpu_device *adev,
+ void *ras_ih_info);
+ void (*ras_fini)(struct amdgpu_device *adev);
+ int (*query_ras_error_count)(struct amdgpu_device *adev,
+ uint32_t instance, void *ras_error_status);
+};
+
struct amdgpu_sdma {
struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
+ struct drm_gpu_scheduler *sdma_sched[AMDGPU_MAX_SDMA_INSTANCES];
+ uint32_t num_sdma_sched;
struct amdgpu_irq_src trap_irq;
struct amdgpu_irq_src illegal_inst_irq;
struct amdgpu_irq_src ecc_irq;
@@ -53,6 +69,7 @@ struct amdgpu_sdma {
uint32_t srbm_soft_reset;
bool has_page_queue;
struct ras_common_if *ras_if;
+ const struct amdgpu_sdma_ras_funcs *funcs;
};
/*
@@ -98,4 +115,13 @@ struct amdgpu_sdma_instance *
amdgpu_sdma_get_instance_from_ring(struct amdgpu_ring *ring);
int amdgpu_sdma_get_index_from_ring(struct amdgpu_ring *ring, uint32_t *index);
uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring, unsigned vmid);
+int amdgpu_sdma_ras_late_init(struct amdgpu_device *adev,
+ void *ras_ih_info);
+void amdgpu_sdma_ras_fini(struct amdgpu_device *adev);
+int amdgpu_sdma_process_ras_data_cb(struct amdgpu_device *adev,
+ void *err_data,
+ struct amdgpu_iv_entry *entry);
+int amdgpu_sdma_process_ecc_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 9828f3c7c655..a09b6b9c27d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -129,7 +129,8 @@ static void amdgpu_sync_keep_later(struct dma_fence **keep,
* Tries to add the fence to an existing hash entry. Returns true when an entry
* was found, false otherwise.
*/
-static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f, bool explicit)
+static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f,
+ bool explicit)
{
struct amdgpu_sync_entry *e;
@@ -151,19 +152,18 @@ static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f,
* amdgpu_sync_fence - remember to sync to this fence
*
* @sync: sync object to add fence to
- * @fence: fence to sync to
+ * @f: fence to sync to
+ * @explicit: if this is an explicit dependency
*
+ * Add the fence to the sync object.
*/
-int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
- struct dma_fence *f, bool explicit)
+int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f,
+ bool explicit)
{
struct amdgpu_sync_entry *e;
if (!f)
return 0;
- if (amdgpu_sync_same_dev(adev, f) &&
- amdgpu_sync_get_owner(f) == AMDGPU_FENCE_OWNER_VM)
- amdgpu_sync_keep_later(&sync->last_vm_update, f);
if (amdgpu_sync_add_later(sync, f, explicit))
return 0;
@@ -180,6 +180,24 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
}
/**
+ * amdgpu_sync_vm_fence - remember to sync to this VM fence
+ *
+ * @adev: amdgpu device
+ * @sync: sync object to add fence to
+ * @fence: the VM fence to add
+ *
+ * Add the fence to the sync object and remember it as VM update.
+ */
+int amdgpu_sync_vm_fence(struct amdgpu_sync *sync, struct dma_fence *fence)
+{
+ if (!fence)
+ return 0;
+
+ amdgpu_sync_keep_later(&sync->last_vm_update, fence);
+ return amdgpu_sync_fence(sync, fence, false);
+}
+
+/**
* amdgpu_sync_resv - sync to a reservation object
*
* @sync: sync object to add fences from reservation object to
@@ -190,10 +208,10 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
*/
int amdgpu_sync_resv(struct amdgpu_device *adev,
struct amdgpu_sync *sync,
- struct reservation_object *resv,
+ struct dma_resv *resv,
void *owner, bool explicit_sync)
{
- struct reservation_object_list *flist;
+ struct dma_resv_list *flist;
struct dma_fence *f;
void *fence_owner;
unsigned i;
@@ -203,16 +221,16 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
return -EINVAL;
/* always sync to the exclusive fence */
- f = reservation_object_get_excl(resv);
- r = amdgpu_sync_fence(adev, sync, f, false);
+ f = dma_resv_get_excl(resv);
+ r = amdgpu_sync_fence(sync, f, false);
- flist = reservation_object_get_list(resv);
+ flist = dma_resv_get_list(resv);
if (!flist || r)
return r;
for (i = 0; i < flist->shared_count; ++i) {
f = rcu_dereference_protected(flist->shared[i],
- reservation_object_held(resv));
+ dma_resv_held(resv));
/* We only want to trigger KFD eviction fences on
* evict or move jobs. Skip KFD fences otherwise.
*/
@@ -222,13 +240,11 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
continue;
if (amdgpu_sync_same_dev(adev, f)) {
- /* VM updates are only interesting
- * for other VM updates and moves.
+ /* VM updates only sync with moves but not with user
+ * command submissions or KFD evictions fences
*/
- if ((owner != AMDGPU_FENCE_OWNER_UNDEFINED) &&
- (fence_owner != AMDGPU_FENCE_OWNER_UNDEFINED) &&
- ((owner == AMDGPU_FENCE_OWNER_VM) !=
- (fence_owner == AMDGPU_FENCE_OWNER_VM)))
+ if (owner == AMDGPU_FENCE_OWNER_VM &&
+ fence_owner != AMDGPU_FENCE_OWNER_UNDEFINED)
continue;
/* Ignore fence from the same owner and explicit one as
@@ -239,7 +255,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
continue;
}
- r = amdgpu_sync_fence(adev, sync, f, false);
+ r = amdgpu_sync_fence(sync, f, false);
if (r)
break;
}
@@ -340,7 +356,7 @@ int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone)
hash_for_each_safe(source->fences, i, tmp, e, node) {
f = e->fence;
if (!dma_fence_is_signaled(f)) {
- r = amdgpu_sync_fence(NULL, clone, f, e->explicit);
+ r = amdgpu_sync_fence(clone, f, e->explicit);
if (r)
return r;
} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
index 10cf23a57f17..d62c2b81d92b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
@@ -27,7 +27,7 @@
#include <linux/hashtable.h>
struct dma_fence;
-struct reservation_object;
+struct dma_resv;
struct amdgpu_device;
struct amdgpu_ring;
@@ -40,16 +40,18 @@ struct amdgpu_sync {
};
void amdgpu_sync_create(struct amdgpu_sync *sync);
-int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
- struct dma_fence *f, bool explicit);
+int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f,
+ bool explicit);
+int amdgpu_sync_vm_fence(struct amdgpu_sync *sync, struct dma_fence *fence);
int amdgpu_sync_resv(struct amdgpu_device *adev,
struct amdgpu_sync *sync,
- struct reservation_object *resv,
+ struct dma_resv *resv,
void *owner,
bool explicit_sync);
struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
struct amdgpu_ring *ring);
-struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync, bool *explicit);
+struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync,
+ bool *explicit);
int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone);
int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr);
void amdgpu_sync_free(struct amdgpu_sync *sync);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
index b66d29d5ffa2..b158230af8db 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
@@ -138,6 +138,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
}
dma_fence_put(fence);
+ fence = NULL;
r = amdgpu_bo_kmap(vram_obj, &vram_map);
if (r) {
@@ -183,6 +184,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
}
dma_fence_put(fence);
+ fence = NULL;
r = amdgpu_bo_kmap(gtt_obj[i], &gtt_map);
if (r) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 77674a7b9616..63e734a125fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -170,7 +170,7 @@ TRACE_EVENT(amdgpu_cs_ioctl,
__field(unsigned int, context)
__field(unsigned int, seqno)
__field(struct dma_fence *, fence)
- __field(char *, ring_name)
+ __string(ring, to_amdgpu_ring(job->base.sched)->name)
__field(u32, num_ibs)
),
@@ -179,12 +179,12 @@ TRACE_EVENT(amdgpu_cs_ioctl,
__assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
__entry->context = job->base.s_fence->finished.context;
__entry->seqno = job->base.s_fence->finished.seqno;
- __entry->ring_name = to_amdgpu_ring(job->base.sched)->name;
+ __assign_str(ring, to_amdgpu_ring(job->base.sched)->name)
__entry->num_ibs = job->num_ibs;
),
TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
__entry->sched_job_id, __get_str(timeline), __entry->context,
- __entry->seqno, __entry->ring_name, __entry->num_ibs)
+ __entry->seqno, __get_str(ring), __entry->num_ibs)
);
TRACE_EVENT(amdgpu_sched_run_job,
@@ -195,7 +195,7 @@ TRACE_EVENT(amdgpu_sched_run_job,
__string(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
__field(unsigned int, context)
__field(unsigned int, seqno)
- __field(char *, ring_name)
+ __string(ring, to_amdgpu_ring(job->base.sched)->name)
__field(u32, num_ibs)
),
@@ -204,12 +204,12 @@ TRACE_EVENT(amdgpu_sched_run_job,
__assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
__entry->context = job->base.s_fence->finished.context;
__entry->seqno = job->base.s_fence->finished.seqno;
- __entry->ring_name = to_amdgpu_ring(job->base.sched)->name;
+ __assign_str(ring, to_amdgpu_ring(job->base.sched)->name)
__entry->num_ibs = job->num_ibs;
),
TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
__entry->sched_job_id, __get_str(timeline), __entry->context,
- __entry->seqno, __entry->ring_name, __entry->num_ibs)
+ __entry->seqno, __get_str(ring), __entry->num_ibs)
);
@@ -323,14 +323,15 @@ DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_cs,
TRACE_EVENT(amdgpu_vm_set_ptes,
TP_PROTO(uint64_t pe, uint64_t addr, unsigned count,
- uint32_t incr, uint64_t flags),
- TP_ARGS(pe, addr, count, incr, flags),
+ uint32_t incr, uint64_t flags, bool direct),
+ TP_ARGS(pe, addr, count, incr, flags, direct),
TP_STRUCT__entry(
__field(u64, pe)
__field(u64, addr)
__field(u32, count)
__field(u32, incr)
__field(u64, flags)
+ __field(bool, direct)
),
TP_fast_assign(
@@ -339,28 +340,32 @@ TRACE_EVENT(amdgpu_vm_set_ptes,
__entry->count = count;
__entry->incr = incr;
__entry->flags = flags;
+ __entry->direct = direct;
),
- TP_printk("pe=%010Lx, addr=%010Lx, incr=%u, flags=%llx, count=%u",
- __entry->pe, __entry->addr, __entry->incr,
- __entry->flags, __entry->count)
+ TP_printk("pe=%010Lx, addr=%010Lx, incr=%u, flags=%llx, count=%u, "
+ "direct=%d", __entry->pe, __entry->addr, __entry->incr,
+ __entry->flags, __entry->count, __entry->direct)
);
TRACE_EVENT(amdgpu_vm_copy_ptes,
- TP_PROTO(uint64_t pe, uint64_t src, unsigned count),
- TP_ARGS(pe, src, count),
+ TP_PROTO(uint64_t pe, uint64_t src, unsigned count, bool direct),
+ TP_ARGS(pe, src, count, direct),
TP_STRUCT__entry(
__field(u64, pe)
__field(u64, src)
__field(u32, count)
+ __field(bool, direct)
),
TP_fast_assign(
__entry->pe = pe;
__entry->src = src;
__entry->count = count;
+ __entry->direct = direct;
),
- TP_printk("pe=%010Lx, src=%010Lx, count=%u",
- __entry->pe, __entry->src, __entry->count)
+ TP_printk("pe=%010Lx, src=%010Lx, count=%u, direct=%d",
+ __entry->pe, __entry->src, __entry->count,
+ __entry->direct)
);
TRACE_EVENT(amdgpu_vm_flush,
@@ -468,7 +473,7 @@ TRACE_EVENT(amdgpu_ib_pipe_sync,
TP_PROTO(struct amdgpu_job *sched_job, struct dma_fence *fence),
TP_ARGS(sched_job, fence),
TP_STRUCT__entry(
- __field(const char *,name)
+ __string(ring, sched_job->base.sched->name)
__field(uint64_t, id)
__field(struct dma_fence *, fence)
__field(uint64_t, ctx)
@@ -476,14 +481,14 @@ TRACE_EVENT(amdgpu_ib_pipe_sync,
),
TP_fast_assign(
- __entry->name = sched_job->base.sched->name;
+ __assign_str(ring, sched_job->base.sched->name)
__entry->id = sched_job->base.id;
__entry->fence = fence;
__entry->ctx = fence->context;
__entry->seqno = fence->seqno;
),
TP_printk("job ring=%s, id=%llu, need pipe sync to fence=%p, context=%llu, seq=%u",
- __entry->name, __entry->id,
+ __get_str(ring), __entry->id,
__entry->fence, __entry->ctx,
__entry->seqno)
);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index e51b48ac48eb..dee446278417 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -35,10 +35,13 @@
#include <linux/hmm.h>
#include <linux/pagemap.h>
#include <linux/sched/task.h>
+#include <linux/sched/mm.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/swiotlb.h>
+#include <linux/dma-buf.h>
+#include <linux/sizes.h>
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
@@ -54,6 +57,7 @@
#include "amdgpu_trace.h"
#include "amdgpu_amdkfd.h"
#include "amdgpu_sdma.h"
+#include "amdgpu_ras.h"
#include "bif/bif_4_1_d.h"
static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
@@ -227,7 +231,7 @@ static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
if (amdgpu_ttm_tt_get_usermm(bo->ttm))
return -EPERM;
- return drm_vma_node_verify_access(&abo->gem_base.vma_node,
+ return drm_vma_node_verify_access(&abo->tbo.base.vma_node,
filp->private_data);
}
@@ -303,7 +307,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
struct amdgpu_copy_mem *src,
struct amdgpu_copy_mem *dst,
uint64_t size,
- struct reservation_object *resv,
+ struct dma_resv *resv,
struct dma_fence **f)
{
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
@@ -440,10 +444,26 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst,
new_mem->num_pages << PAGE_SHIFT,
- bo->resv, &fence);
+ bo->base.resv, &fence);
if (r)
goto error;
+ /* clear the space being freed */
+ if (old_mem->mem_type == TTM_PL_VRAM &&
+ (ttm_to_amdgpu_bo(bo)->flags &
+ AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
+ struct dma_fence *wipe_fence = NULL;
+
+ r = amdgpu_fill_buffer(ttm_to_amdgpu_bo(bo), AMDGPU_POISON,
+ NULL, &wipe_fence);
+ if (r) {
+ goto error;
+ } else if (wipe_fence) {
+ dma_fence_put(fence);
+ fence = wipe_fence;
+ }
+ }
+
/* Always block for VM page tables before committing the new location */
if (bo->type == ttm_bo_type_kernel)
r = ttm_bo_move_accel_cleanup(bo, fence, true, new_mem);
@@ -468,15 +488,12 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
struct ttm_operation_ctx *ctx,
struct ttm_mem_reg *new_mem)
{
- struct amdgpu_device *adev;
struct ttm_mem_reg *old_mem = &bo->mem;
struct ttm_mem_reg tmp_mem;
struct ttm_place placements;
struct ttm_placement placement;
int r;
- adev = amdgpu_ttm_adev(bo->bdev);
-
/* create space/pages for new_mem in GTT space */
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
@@ -527,15 +544,12 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
struct ttm_operation_ctx *ctx,
struct ttm_mem_reg *new_mem)
{
- struct amdgpu_device *adev;
struct ttm_mem_reg *old_mem = &bo->mem;
struct ttm_mem_reg tmp_mem;
struct ttm_placement placement;
struct ttm_place placements;
int r;
- adev = amdgpu_ttm_adev(bo->bdev);
-
/* make space in GTT for old_mem buffer */
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
@@ -747,6 +761,7 @@ static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
*/
struct amdgpu_ttm_tt {
struct ttm_dma_tt ttm;
+ struct drm_gem_object *gobj;
u64 offset;
uint64_t userptr;
struct task_struct *usertask;
@@ -756,6 +771,20 @@ struct amdgpu_ttm_tt {
#endif
};
+#ifdef CONFIG_DRM_AMDGPU_USERPTR
+/* flags used by HMM internal, not related to CPU/GPU PTE flags */
+static const uint64_t hmm_range_flags[HMM_PFN_FLAG_MAX] = {
+ (1 << 0), /* HMM_PFN_VALID */
+ (1 << 1), /* HMM_PFN_WRITE */
+ 0 /* HMM_PFN_DEVICE_PRIVATE */
+};
+
+static const uint64_t hmm_range_values[HMM_PFN_VALUE_MAX] = {
+ 0xfffffffffffffffeUL, /* HMM_PFN_ERROR */
+ 0, /* HMM_PFN_NONE */
+ 0xfffffffffffffffcUL /* HMM_PFN_SPECIAL */
+};
+
/**
* amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user
* memory and start HMM tracking CPU page table update
@@ -763,97 +792,89 @@ struct amdgpu_ttm_tt {
* Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only
* once afterwards to stop HMM tracking
*/
-#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
-
-#define MAX_RETRY_HMM_RANGE_FAULT 16
-
int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
{
- struct hmm_mirror *mirror = bo->mn ? &bo->mn->mirror : NULL;
struct ttm_tt *ttm = bo->tbo.ttm;
struct amdgpu_ttm_tt *gtt = (void *)ttm;
- struct mm_struct *mm = gtt->usertask->mm;
unsigned long start = gtt->userptr;
struct vm_area_struct *vma;
struct hmm_range *range;
+ unsigned long timeout;
+ struct mm_struct *mm;
unsigned long i;
- uint64_t *pfns;
- int retry = 0;
int r = 0;
- if (!mm) /* Happens during process shutdown */
- return -ESRCH;
-
- if (unlikely(!mirror)) {
- DRM_DEBUG_DRIVER("Failed to get hmm_mirror\n");
- r = -EFAULT;
- goto out;
+ mm = bo->notifier.mm;
+ if (unlikely(!mm)) {
+ DRM_DEBUG_DRIVER("BO is not registered?\n");
+ return -EFAULT;
}
- vma = find_vma(mm, start);
- if (unlikely(!vma || start < vma->vm_start)) {
- r = -EFAULT;
- goto out;
- }
- if (unlikely((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) &&
- vma->vm_file)) {
- r = -EPERM;
- goto out;
- }
+ /* Another get_user_pages is running at the same time?? */
+ if (WARN_ON(gtt->range))
+ return -EFAULT;
+
+ if (!mmget_not_zero(mm)) /* Happens during process shutdown */
+ return -ESRCH;
range = kzalloc(sizeof(*range), GFP_KERNEL);
if (unlikely(!range)) {
r = -ENOMEM;
goto out;
}
+ range->notifier = &bo->notifier;
+ range->flags = hmm_range_flags;
+ range->values = hmm_range_values;
+ range->pfn_shift = PAGE_SHIFT;
+ range->start = bo->notifier.interval_tree.start;
+ range->end = bo->notifier.interval_tree.last + 1;
+ range->default_flags = hmm_range_flags[HMM_PFN_VALID];
+ if (!amdgpu_ttm_tt_is_readonly(ttm))
+ range->default_flags |= range->flags[HMM_PFN_WRITE];
- pfns = kvmalloc_array(ttm->num_pages, sizeof(*pfns), GFP_KERNEL);
- if (unlikely(!pfns)) {
+ range->pfns = kvmalloc_array(ttm->num_pages, sizeof(*range->pfns),
+ GFP_KERNEL);
+ if (unlikely(!range->pfns)) {
r = -ENOMEM;
goto out_free_ranges;
}
- amdgpu_hmm_init_range(range);
- range->default_flags = range->flags[HMM_PFN_VALID];
- range->default_flags |= amdgpu_ttm_tt_is_readonly(ttm) ?
- 0 : range->flags[HMM_PFN_WRITE];
- range->pfn_flags_mask = 0;
- range->pfns = pfns;
- hmm_range_register(range, mirror, start,
- start + ttm->num_pages * PAGE_SIZE, PAGE_SHIFT);
+ down_read(&mm->mmap_sem);
+ vma = find_vma(mm, start);
+ if (unlikely(!vma || start < vma->vm_start)) {
+ r = -EFAULT;
+ goto out_unlock;
+ }
+ if (unlikely((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) &&
+ vma->vm_file)) {
+ r = -EPERM;
+ goto out_unlock;
+ }
+ up_read(&mm->mmap_sem);
+ timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
retry:
- /*
- * Just wait for range to be valid, safe to ignore return value as we
- * will use the return value of hmm_range_fault() below under the
- * mmap_sem to ascertain the validity of the range.
- */
- hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT);
+ range->notifier_seq = mmu_interval_read_begin(&bo->notifier);
down_read(&mm->mmap_sem);
-
- r = hmm_range_fault(range, true);
- if (unlikely(r < 0)) {
- if (likely(r == -EAGAIN)) {
- /*
- * return -EAGAIN, mmap_sem is dropped
- */
- if (retry++ < MAX_RETRY_HMM_RANGE_FAULT)
- goto retry;
- else
- pr_err("Retry hmm fault too many times\n");
- }
-
- goto out_up_read;
- }
-
+ r = hmm_range_fault(range, 0);
up_read(&mm->mmap_sem);
+ if (unlikely(r <= 0)) {
+ /*
+ * FIXME: This timeout should encompass the retry from
+ * mmu_interval_read_retry() as well.
+ */
+ if ((r == 0 || r == -EBUSY) && !time_after(jiffies, timeout))
+ goto retry;
+ goto out_free_pfns;
+ }
for (i = 0; i < ttm->num_pages; i++) {
- pages[i] = hmm_device_entry_to_page(range, pfns[i]);
+ /* FIXME: The pages cannot be touched outside the notifier_lock */
+ pages[i] = hmm_device_entry_to_page(range, range->pfns[i]);
if (unlikely(!pages[i])) {
pr_err("Page fault failed for pfn[%lu] = 0x%llx\n",
- i, pfns[i]);
+ i, range->pfns[i]);
r = -ENOMEM;
goto out_free_pfns;
@@ -861,18 +882,18 @@ retry:
}
gtt->range = range;
+ mmput(mm);
return 0;
-out_up_read:
- if (likely(r != -EAGAIN))
- up_read(&mm->mmap_sem);
+out_unlock:
+ up_read(&mm->mmap_sem);
out_free_pfns:
- hmm_range_unregister(range);
- kvfree(pfns);
+ kvfree(range->pfns);
out_free_ranges:
kfree(range);
out:
+ mmput(mm);
return r;
}
@@ -897,15 +918,18 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
"No user pages to check\n");
if (gtt->range) {
- r = hmm_range_valid(gtt->range);
- hmm_range_unregister(gtt->range);
-
+ /*
+ * FIXME: Must always hold notifier_lock for this, and must
+ * not ignore the return code.
+ */
+ r = mmu_interval_read_retry(gtt->range->notifier,
+ gtt->range->notifier_seq);
kvfree(gtt->range->pfns);
kfree(gtt->range);
gtt->range = NULL;
}
- return r;
+ return !r;
}
#endif
@@ -986,10 +1010,18 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
sg_free_table(ttm->sg);
#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
- if (gtt->range &&
- ttm->pages[0] == hmm_device_entry_to_page(gtt->range,
- gtt->range->pfns[0]))
- WARN_ONCE(1, "Missing get_user_page_done\n");
+ if (gtt->range) {
+ unsigned long i;
+
+ for (i = 0; i < ttm->num_pages; i++) {
+ if (ttm->pages[i] !=
+ hmm_device_entry_to_page(gtt->range,
+ gtt->range->pfns[i]))
+ break;
+ }
+
+ WARN((i == ttm->num_pages), "Missing get_user_page_done\n");
+ }
#endif
}
@@ -1216,16 +1248,14 @@ static struct ttm_backend_func amdgpu_backend_func = {
static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
uint32_t page_flags)
{
- struct amdgpu_device *adev;
struct amdgpu_ttm_tt *gtt;
- adev = amdgpu_ttm_adev(bo->bdev);
-
gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
if (gtt == NULL) {
return NULL;
}
gtt->ttm.ttm.func = &amdgpu_backend_func;
+ gtt->gobj = &bo->base;
/* allocate space for the uninitialized page entries */
if (ttm_sg_tt_init(&gtt->ttm, bo, page_flags)) {
@@ -1246,7 +1276,6 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
{
struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
- bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
/* user pages are bound by amdgpu_ttm_tt_pin_userptr() */
if (gtt && gtt->userptr) {
@@ -1259,7 +1288,19 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
return 0;
}
- if (slave && ttm->sg) {
+ if (ttm->page_flags & TTM_PAGE_FLAG_SG) {
+ if (!ttm->sg) {
+ struct dma_buf_attachment *attach;
+ struct sg_table *sgt;
+
+ attach = gtt->gobj->import_attach;
+ sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sgt))
+ return PTR_ERR(sgt);
+
+ ttm->sg = sgt;
+ }
+
drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
gtt->ttm.dma_address,
ttm->num_pages);
@@ -1286,9 +1327,8 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
*/
static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
{
- struct amdgpu_device *adev;
struct amdgpu_ttm_tt *gtt = (void *)ttm;
- bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
+ struct amdgpu_device *adev;
if (gtt && gtt->userptr) {
amdgpu_ttm_tt_set_user_pages(ttm, NULL);
@@ -1297,7 +1337,16 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
return;
}
- if (slave)
+ if (ttm->sg && gtt->gobj->import_attach) {
+ struct dma_buf_attachment *attach;
+
+ attach = gtt->gobj->import_attach;
+ dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL);
+ ttm->sg = NULL;
+ return;
+ }
+
+ if (ttm->page_flags & TTM_PAGE_FLAG_SG)
return;
adev = amdgpu_ttm_adev(ttm->bdev);
@@ -1470,26 +1519,23 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
{
unsigned long num_pages = bo->mem.num_pages;
struct drm_mm_node *node = bo->mem.mm_node;
- struct reservation_object_list *flist;
+ struct dma_resv_list *flist;
struct dma_fence *f;
int i;
- /* Don't evict VM page tables while they are busy, otherwise we can't
- * cleanly handle page faults.
- */
if (bo->type == ttm_bo_type_kernel &&
- !reservation_object_test_signaled_rcu(bo->resv, true))
+ !amdgpu_vm_evictable(ttm_to_amdgpu_bo(bo)))
return false;
/* If bo is a KFD BO, check if the bo belongs to the current process.
* If true, then return false as any KFD process needs all its BOs to
* be resident to run successfully
*/
- flist = reservation_object_get_list(bo->resv);
+ flist = dma_resv_get_list(bo->base.resv);
if (flist) {
for (i = 0; i < flist->shared_count; ++i) {
f = rcu_dereference_protected(flist->shared[i],
- reservation_object_held(bo->resv));
+ dma_resv_held(bo->base.resv));
if (amdkfd_fence_check_mm(f, current->mm))
return false;
}
@@ -1599,6 +1645,7 @@ static struct ttm_bo_driver amdgpu_bo_driver = {
.move = &amdgpu_bo_move,
.verify_access = &amdgpu_verify_access,
.move_notify = &amdgpu_bo_move_notify,
+ .release_notify = &amdgpu_bo_release_notify,
.fault_reserve_notify = &amdgpu_bo_fault_reserve_notify,
.io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
.io_mem_free = &amdgpu_ttm_io_mem_free,
@@ -1632,81 +1679,96 @@ static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev)
*/
static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
{
- struct ttm_operation_ctx ctx = { false, false };
- struct amdgpu_bo_param bp;
- int r = 0;
- int i;
- u64 vram_size = adev->gmc.visible_vram_size;
- u64 offset = adev->fw_vram_usage.start_offset;
- u64 size = adev->fw_vram_usage.size;
- struct amdgpu_bo *bo;
-
- memset(&bp, 0, sizeof(bp));
- bp.size = adev->fw_vram_usage.size;
- bp.byte_align = PAGE_SIZE;
- bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
- bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
- bp.type = ttm_bo_type_kernel;
- bp.resv = NULL;
+ uint64_t vram_size = adev->gmc.visible_vram_size;
+
adev->fw_vram_usage.va = NULL;
adev->fw_vram_usage.reserved_bo = NULL;
- if (adev->fw_vram_usage.size > 0 &&
- adev->fw_vram_usage.size <= vram_size) {
+ if (adev->fw_vram_usage.size == 0 ||
+ adev->fw_vram_usage.size > vram_size)
+ return 0;
- r = amdgpu_bo_create(adev, &bp,
- &adev->fw_vram_usage.reserved_bo);
- if (r)
- goto error_create;
+ return amdgpu_bo_create_kernel_at(adev,
+ adev->fw_vram_usage.start_offset,
+ adev->fw_vram_usage.size,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->fw_vram_usage.reserved_bo,
+ &adev->fw_vram_usage.va);
+}
- r = amdgpu_bo_reserve(adev->fw_vram_usage.reserved_bo, false);
- if (r)
- goto error_reserve;
+/*
+ * Memoy training reservation functions
+ */
- /* remove the original mem node and create a new one at the
- * request position
- */
- bo = adev->fw_vram_usage.reserved_bo;
- offset = ALIGN(offset, PAGE_SIZE);
- for (i = 0; i < bo->placement.num_placement; ++i) {
- bo->placements[i].fpfn = offset >> PAGE_SHIFT;
- bo->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
- }
+/**
+ * amdgpu_ttm_training_reserve_vram_fini - free memory training reserved vram
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * free memory training reserved vram if it has been reserved.
+ */
+static int amdgpu_ttm_training_reserve_vram_fini(struct amdgpu_device *adev)
+{
+ struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
- ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem);
- r = ttm_bo_mem_space(&bo->tbo, &bo->placement,
- &bo->tbo.mem, &ctx);
- if (r)
- goto error_pin;
+ ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
+ amdgpu_bo_free_kernel(&ctx->c2p_bo, NULL, NULL);
+ ctx->c2p_bo = NULL;
- r = amdgpu_bo_pin_restricted(adev->fw_vram_usage.reserved_bo,
- AMDGPU_GEM_DOMAIN_VRAM,
- adev->fw_vram_usage.start_offset,
- (adev->fw_vram_usage.start_offset +
- adev->fw_vram_usage.size));
- if (r)
- goto error_pin;
- r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo,
- &adev->fw_vram_usage.va);
- if (r)
- goto error_kmap;
+ return 0;
+}
+
+static u64 amdgpu_ttm_training_get_c2p_offset(u64 vram_size)
+{
+ if ((vram_size & (SZ_1M - 1)) < (SZ_4K + 1) )
+ vram_size -= SZ_1M;
+
+ return ALIGN(vram_size, SZ_1M);
+}
+
+/**
+ * amdgpu_ttm_training_reserve_vram_init - create bo vram reservation from memory training
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * create bo vram reservation from memory training.
+ */
+static int amdgpu_ttm_training_reserve_vram_init(struct amdgpu_device *adev)
+{
+ int ret;
+ struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
+
+ memset(ctx, 0, sizeof(*ctx));
+ if (!adev->fw_vram_usage.mem_train_support) {
+ DRM_DEBUG("memory training does not support!\n");
+ return 0;
+ }
+
+ ctx->c2p_train_data_offset = amdgpu_ttm_training_get_c2p_offset(adev->gmc.mc_vram_size);
+ ctx->p2c_train_data_offset = (adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET);
+ ctx->train_data_size = GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES;
- amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
+ DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
+ ctx->train_data_size,
+ ctx->p2c_train_data_offset,
+ ctx->c2p_train_data_offset);
+
+ ret = amdgpu_bo_create_kernel_at(adev,
+ ctx->c2p_train_data_offset,
+ ctx->train_data_size,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &ctx->c2p_bo,
+ NULL);
+ if (ret) {
+ DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret);
+ amdgpu_ttm_training_reserve_vram_fini(adev);
+ return ret;
}
- return r;
-error_kmap:
- amdgpu_bo_unpin(adev->fw_vram_usage.reserved_bo);
-error_pin:
- amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
-error_reserve:
- amdgpu_bo_unref(&adev->fw_vram_usage.reserved_bo);
-error_create:
- adev->fw_vram_usage.va = NULL;
- adev->fw_vram_usage.reserved_bo = NULL;
- return r;
+ ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS;
+ return 0;
}
+
/**
* amdgpu_ttm_init - Init the memory management (ttm) as well as various
* gtt/vram related fields.
@@ -1721,6 +1783,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
uint64_t gtt_size;
int r;
u64 vis_vram_limit;
+ void *stolen_vga_buf;
mutex_init(&adev->mman.gtt_window_lock);
@@ -1728,7 +1791,8 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
r = ttm_bo_device_init(&adev->mman.bdev,
&amdgpu_bo_driver,
adev->ddev->anon_inode->i_mapping,
- adev->need_dma32);
+ adev->ddev->vma_offset_manager,
+ dma_addressing_limited(adev->dev));
if (r) {
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
return r;
@@ -1768,6 +1832,14 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
return r;
}
+ /*
+ *The reserved vram for memory training must be pinned to the specified
+ *place on the VRAM, so reserve it early.
+ */
+ r = amdgpu_ttm_training_reserve_vram_init(adev);
+ if (r)
+ return r;
+
/* allocate memory as required for VGA
* This is used for VGA emulation and pre-OS scanout buffers to
* avoid display artifacts while transitioning between pre-OS
@@ -1775,9 +1847,23 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
r = amdgpu_bo_create_kernel(adev, adev->gmc.stolen_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
&adev->stolen_vga_memory,
- NULL, NULL);
+ NULL, &stolen_vga_buf);
if (r)
return r;
+
+ /*
+ * reserve one TMR (64K) memory at the top of VRAM which holds
+ * IP Discovery data and is protected by PSP.
+ */
+ r = amdgpu_bo_create_kernel_at(adev,
+ adev->gmc.real_vram_size - DISCOVERY_TMR_SIZE,
+ DISCOVERY_TMR_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->discovery_memory,
+ NULL);
+ if (r)
+ return r;
+
DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
(unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));
@@ -1839,8 +1925,9 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
*/
void amdgpu_ttm_late_init(struct amdgpu_device *adev)
{
+ void *stolen_vga_buf;
/* return the VGA stolen memory (if any) back to VRAM */
- amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL);
+ amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, &stolen_vga_buf);
}
/**
@@ -1852,7 +1939,11 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
return;
amdgpu_ttm_debugfs_fini(adev);
+ amdgpu_ttm_training_reserve_vram_fini(adev);
+ /* return the IP Discovery TMR memory back to VRAM */
+ amdgpu_bo_free_kernel(&adev->discovery_memory, NULL, NULL);
amdgpu_ttm_fw_reserve_vram_fini(adev);
+
if (adev->mman.aper_base_kaddr)
iounmap(adev->mman.aper_base_kaddr);
adev->mman.aper_base_kaddr = NULL;
@@ -1888,11 +1979,13 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
if (enable) {
struct amdgpu_ring *ring;
- struct drm_sched_rq *rq;
+ struct drm_gpu_scheduler *sched;
ring = adev->mman.buffer_funcs_ring;
- rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
- r = drm_sched_entity_init(&adev->mman.entity, &rq, 1, NULL);
+ sched = &ring->sched;
+ r = drm_sched_entity_init(&adev->mman.entity,
+ DRM_SCHED_PRIORITY_KERNEL, &sched,
+ 1, NULL);
if (r) {
DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
r);
@@ -1948,10 +2041,7 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
*addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
AMDGPU_GPU_PAGE_SIZE;
- num_dw = adev->mman.buffer_funcs->copy_num_dw;
- while (num_dw & 0x7)
- num_dw++;
-
+ num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
num_bytes = num_pages * 8;
r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, &job);
@@ -1992,7 +2082,7 @@ error_free:
int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
uint64_t dst_offset, uint32_t byte_count,
- struct reservation_object *resv,
+ struct dma_resv *resv,
struct dma_fence **fence, bool direct_submit,
bool vm_needs_flush)
{
@@ -2011,11 +2101,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
num_loops = DIV_ROUND_UP(byte_count, max_bytes);
- num_dw = num_loops * adev->mman.buffer_funcs->copy_num_dw;
-
- /* for IB padding */
- while (num_dw & 0x7)
- num_dw++;
+ num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
if (r)
@@ -2066,7 +2152,7 @@ error_free:
int amdgpu_fill_buffer(struct amdgpu_bo *bo,
uint32_t src_data,
- struct reservation_object *resv,
+ struct dma_resv *resv,
struct dma_fence **fence)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index caa76c693700..0dddedc06ae3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -38,6 +38,8 @@
#define AMDGPU_GTT_MAX_TRANSFER_SIZE 512
#define AMDGPU_GTT_NUM_TRANSFER_WINDOWS 2
+#define AMDGPU_POISON 0xd0bed0be
+
struct amdgpu_mman {
struct ttm_bo_device bdev;
bool mem_global_referenced;
@@ -83,18 +85,18 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev,
int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
uint64_t dst_offset, uint32_t byte_count,
- struct reservation_object *resv,
+ struct dma_resv *resv,
struct dma_fence **fence, bool direct_submit,
bool vm_needs_flush);
int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
struct amdgpu_copy_mem *src,
struct amdgpu_copy_mem *dst,
uint64_t size,
- struct reservation_object *resv,
+ struct dma_resv *resv,
struct dma_fence **f);
int amdgpu_fill_buffer(struct amdgpu_bo *bo,
uint32_t src_data,
- struct reservation_object *resv,
+ struct dma_resv *resv,
struct dma_fence **fence);
int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index bfaa0eac3213..9ef312428231 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -83,8 +83,8 @@ void amdgpu_ucode_print_smc_hdr(const struct common_firmware_header *hdr)
const struct smc_firmware_header_v2_0 *v2_hdr =
container_of(v1_hdr, struct smc_firmware_header_v2_0, v1_0);
- DRM_INFO("ppt_offset_bytes: %u\n", le32_to_cpu(v2_hdr->ppt_offset_bytes));
- DRM_INFO("ppt_size_bytes: %u\n", le32_to_cpu(v2_hdr->ppt_size_bytes));
+ DRM_DEBUG("ppt_offset_bytes: %u\n", le32_to_cpu(v2_hdr->ppt_offset_bytes));
+ DRM_DEBUG("ppt_size_bytes: %u\n", le32_to_cpu(v2_hdr->ppt_size_bytes));
} else {
DRM_ERROR("Unknown SMC ucode version: %u.%u\n", version_major, version_minor);
}
@@ -269,6 +269,16 @@ void amdgpu_ucode_print_psp_hdr(const struct common_firmware_header *hdr)
DRM_DEBUG("kdb_size_bytes: %u\n",
le32_to_cpu(psp_hdr_v1_1->kdb_size_bytes));
}
+ if (version_minor == 2) {
+ const struct psp_firmware_header_v1_2 *psp_hdr_v1_2 =
+ container_of(psp_hdr, struct psp_firmware_header_v1_2, v1_0);
+ DRM_DEBUG("kdb_header_version: %u\n",
+ le32_to_cpu(psp_hdr_v1_2->kdb_header_version));
+ DRM_DEBUG("kdb_offset_bytes: %u\n",
+ le32_to_cpu(psp_hdr_v1_2->kdb_offset_bytes));
+ DRM_DEBUG("kdb_size_bytes: %u\n",
+ le32_to_cpu(psp_hdr_v1_2->kdb_size_bytes));
+ }
} else {
DRM_ERROR("Unknown PSP ucode version: %u.%u\n",
version_major, version_minor);
@@ -350,11 +360,16 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
case CHIP_RAVEN:
case CHIP_VEGA12:
case CHIP_VEGA20:
+ case CHIP_ARCTURUS:
+ case CHIP_RENOIR:
case CHIP_NAVI10:
+ case CHIP_NAVI14:
+ case CHIP_NAVI12:
if (!load_type)
return AMDGPU_FW_LOAD_DIRECT;
else
return AMDGPU_FW_LOAD_PSP;
+
default:
DRM_ERROR("Unknown firmware load type\n");
}
@@ -432,6 +447,7 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev,
const struct common_firmware_header *header = NULL;
const struct gfx_firmware_header_v1_0 *cp_hdr = NULL;
const struct dmcu_firmware_header_v1_0 *dmcu_hdr = NULL;
+ const struct dmcub_firmware_header_v1_0 *dmcub_hdr = NULL;
if (NULL == ucode->fw)
return 0;
@@ -445,6 +461,7 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev,
header = (const struct common_firmware_header *)ucode->fw->data;
cp_hdr = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
dmcu_hdr = (const struct dmcu_firmware_header_v1_0 *)ucode->fw->data;
+ dmcub_hdr = (const struct dmcub_firmware_header_v1_0 *)ucode->fw->data;
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP ||
(ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC1 &&
@@ -455,7 +472,8 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev,
ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM &&
ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM &&
ucode->ucode_id != AMDGPU_UCODE_ID_DMCU_ERAM &&
- ucode->ucode_id != AMDGPU_UCODE_ID_DMCU_INTV)) {
+ ucode->ucode_id != AMDGPU_UCODE_ID_DMCU_INTV &&
+ ucode->ucode_id != AMDGPU_UCODE_ID_DMCUB)) {
ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes);
memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data +
@@ -491,6 +509,12 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev,
le32_to_cpu(header->ucode_array_offset_bytes) +
le32_to_cpu(dmcu_hdr->intv_offset_bytes)),
ucode->ucode_size);
+ } else if (ucode->ucode_id == AMDGPU_UCODE_ID_DMCUB) {
+ ucode->ucode_size = le32_to_cpu(dmcub_hdr->inst_const_bytes);
+ memcpy(ucode->kaddr,
+ (void *)((uint8_t *)ucode->fw->data +
+ le32_to_cpu(header->ucode_array_offset_bytes)),
+ ucode->ucode_size);
} else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL) {
ucode->ucode_size = adev->gfx.rlc.save_restore_list_cntl_size_bytes;
memcpy(ucode->kaddr, adev->gfx.rlc.save_restore_list_cntl,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index c1fb6dc86440..b0e656409c03 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -90,6 +90,15 @@ struct psp_firmware_header_v1_1 {
uint32_t kdb_size_bytes;
};
+/* version_major=1, version_minor=2 */
+struct psp_firmware_header_v1_2 {
+ struct psp_firmware_header_v1_0 v1_0;
+ uint32_t reserve[3];
+ uint32_t kdb_header_version;
+ uint32_t kdb_offset_bytes;
+ uint32_t kdb_size_bytes;
+};
+
/* version_major=1, version_minor=0 */
struct ta_firmware_header_v1_0 {
struct common_firmware_header header;
@@ -99,6 +108,12 @@ struct ta_firmware_header_v1_0 {
uint32_t ta_ras_ucode_version;
uint32_t ta_ras_offset_bytes;
uint32_t ta_ras_size_bytes;
+ uint32_t ta_hdcp_ucode_version;
+ uint32_t ta_hdcp_offset_bytes;
+ uint32_t ta_hdcp_size_bytes;
+ uint32_t ta_dtm_ucode_version;
+ uint32_t ta_dtm_offset_bytes;
+ uint32_t ta_dtm_size_bytes;
};
/* version_major=1, version_minor=0 */
@@ -236,6 +251,13 @@ struct dmcu_firmware_header_v1_0 {
uint32_t intv_size_bytes; /* size of interrupt vectors, in bytes */
};
+/* version_major=1, version_minor=0 */
+struct dmcub_firmware_header_v1_0 {
+ struct common_firmware_header header;
+ uint32_t inst_const_bytes; /* size of instruction region, in bytes */
+ uint32_t bss_data_bytes; /* size of bss/data region, in bytes */
+};
+
/* header is fixed size */
union amdgpu_firmware_header {
struct common_firmware_header common;
@@ -253,6 +275,7 @@ union amdgpu_firmware_header {
struct sdma_firmware_header_v1_1 sdma_v1_1;
struct gpu_info_firmware_header_v1_0 gpu_info;
struct dmcu_firmware_header_v1_0 dmcu;
+ struct dmcub_firmware_header_v1_0 dmcub;
uint8_t raw[0x100];
};
@@ -262,6 +285,12 @@ union amdgpu_firmware_header {
enum AMDGPU_UCODE_ID {
AMDGPU_UCODE_ID_SDMA0 = 0,
AMDGPU_UCODE_ID_SDMA1,
+ AMDGPU_UCODE_ID_SDMA2,
+ AMDGPU_UCODE_ID_SDMA3,
+ AMDGPU_UCODE_ID_SDMA4,
+ AMDGPU_UCODE_ID_SDMA5,
+ AMDGPU_UCODE_ID_SDMA6,
+ AMDGPU_UCODE_ID_SDMA7,
AMDGPU_UCODE_ID_CP_CE,
AMDGPU_UCODE_ID_CP_PFP,
AMDGPU_UCODE_ID_CP_ME,
@@ -271,20 +300,22 @@ enum AMDGPU_UCODE_ID {
AMDGPU_UCODE_ID_CP_MEC2_JT,
AMDGPU_UCODE_ID_CP_MES,
AMDGPU_UCODE_ID_CP_MES_DATA,
- AMDGPU_UCODE_ID_RLC_G,
AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL,
AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM,
AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM,
+ AMDGPU_UCODE_ID_RLC_G,
AMDGPU_UCODE_ID_STORAGE,
AMDGPU_UCODE_ID_SMC,
AMDGPU_UCODE_ID_UVD,
AMDGPU_UCODE_ID_UVD1,
AMDGPU_UCODE_ID_VCE,
AMDGPU_UCODE_ID_VCN,
+ AMDGPU_UCODE_ID_VCN1,
AMDGPU_UCODE_ID_DMCU_ERAM,
AMDGPU_UCODE_ID_DMCU_INTV,
AMDGPU_UCODE_ID_VCN0_RAM,
AMDGPU_UCODE_ID_VCN1_RAM,
+ AMDGPU_UCODE_ID_DMCUB,
AMDGPU_UCODE_ID_MAXIMUM,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
new file mode 100644
index 000000000000..f4d40855147b
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
@@ -0,0 +1,152 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu_ras.h"
+
+int amdgpu_umc_ras_late_init(struct amdgpu_device *adev)
+{
+ int r;
+ struct ras_fs_if fs_info = {
+ .sysfs_name = "umc_err_count",
+ .debugfs_name = "umc_err_inject",
+ };
+ struct ras_ih_if ih_info = {
+ .cb = amdgpu_umc_process_ras_data_cb,
+ };
+
+ if (!adev->umc.ras_if) {
+ adev->umc.ras_if =
+ kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
+ if (!adev->umc.ras_if)
+ return -ENOMEM;
+ adev->umc.ras_if->block = AMDGPU_RAS_BLOCK__UMC;
+ adev->umc.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->umc.ras_if->sub_block_index = 0;
+ strcpy(adev->umc.ras_if->name, "umc");
+ }
+ ih_info.head = fs_info.head = *adev->umc.ras_if;
+
+ r = amdgpu_ras_late_init(adev, adev->umc.ras_if,
+ &fs_info, &ih_info);
+ if (r)
+ goto free;
+
+ if (amdgpu_ras_is_supported(adev, adev->umc.ras_if->block)) {
+ r = amdgpu_irq_get(adev, &adev->gmc.ecc_irq, 0);
+ if (r)
+ goto late_fini;
+ } else {
+ r = 0;
+ goto free;
+ }
+
+ /* ras init of specific umc version */
+ if (adev->umc.funcs && adev->umc.funcs->err_cnt_init)
+ adev->umc.funcs->err_cnt_init(adev);
+
+ return 0;
+
+late_fini:
+ amdgpu_ras_late_fini(adev, adev->umc.ras_if, &ih_info);
+free:
+ kfree(adev->umc.ras_if);
+ adev->umc.ras_if = NULL;
+ return r;
+}
+
+void amdgpu_umc_ras_fini(struct amdgpu_device *adev)
+{
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC) &&
+ adev->umc.ras_if) {
+ struct ras_common_if *ras_if = adev->umc.ras_if;
+ struct ras_ih_if ih_info = {
+ .head = *ras_if,
+ .cb = amdgpu_umc_process_ras_data_cb,
+ };
+
+ amdgpu_ras_late_fini(adev, ras_if, &ih_info);
+ kfree(ras_if);
+ }
+}
+
+int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
+ void *ras_error_status,
+ struct amdgpu_iv_entry *entry)
+{
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+
+ kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
+ if (adev->umc.funcs &&
+ adev->umc.funcs->query_ras_error_count)
+ adev->umc.funcs->query_ras_error_count(adev, ras_error_status);
+
+ if (adev->umc.funcs &&
+ adev->umc.funcs->query_ras_error_address &&
+ adev->umc.max_ras_err_cnt_per_query) {
+ err_data->err_addr =
+ kcalloc(adev->umc.max_ras_err_cnt_per_query,
+ sizeof(struct eeprom_table_record), GFP_KERNEL);
+
+ /* still call query_ras_error_address to clear error status
+ * even NOMEM error is encountered
+ */
+ if(!err_data->err_addr)
+ DRM_WARN("Failed to alloc memory for umc error address record!\n");
+
+ /* umc query_ras_error_address is also responsible for clearing
+ * error status
+ */
+ adev->umc.funcs->query_ras_error_address(adev, ras_error_status);
+ }
+
+ /* only uncorrectable error needs gpu reset */
+ if (err_data->ue_count) {
+ if (err_data->err_addr_cnt &&
+ amdgpu_ras_add_bad_pages(adev, err_data->err_addr,
+ err_data->err_addr_cnt))
+ DRM_WARN("Failed to add ras bad page!\n");
+
+ amdgpu_ras_reset_gpu(adev);
+ }
+
+ kfree(err_data->err_addr);
+ return AMDGPU_RAS_SUCCESS;
+}
+
+int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ struct ras_common_if *ras_if = adev->umc.ras_if;
+ struct ras_dispatch_if ih_data = {
+ .entry = entry,
+ };
+
+ if (!ras_if)
+ return 0;
+
+ ih_data.head = *ras_if;
+
+ amdgpu_ras_interrupt_dispatch(adev, &ih_data);
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
new file mode 100644
index 000000000000..a615a1eb750b
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __AMDGPU_UMC_H__
+#define __AMDGPU_UMC_H__
+
+struct amdgpu_umc_funcs {
+ void (*err_cnt_init)(struct amdgpu_device *adev);
+ int (*ras_late_init)(struct amdgpu_device *adev);
+ void (*query_ras_error_count)(struct amdgpu_device *adev,
+ void *ras_error_status);
+ void (*query_ras_error_address)(struct amdgpu_device *adev,
+ void *ras_error_status);
+ void (*init_registers)(struct amdgpu_device *adev);
+};
+
+struct amdgpu_umc {
+ /* max error count in one ras query call */
+ uint32_t max_ras_err_cnt_per_query;
+ /* number of umc channel instance with memory map register access */
+ uint32_t channel_inst_num;
+ /* number of umc instance with memory map register access */
+ uint32_t umc_inst_num;
+ /* UMC regiser per channel offset */
+ uint32_t channel_offs;
+ /* channel index table of interleaved memory */
+ const uint32_t *channel_idx_tbl;
+ struct ras_common_if *ras_if;
+
+ const struct amdgpu_umc_funcs *funcs;
+};
+
+int amdgpu_umc_ras_late_init(struct amdgpu_device *adev);
+void amdgpu_umc_ras_fini(struct amdgpu_device *adev);
+int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
+ void *ras_error_status,
+ struct amdgpu_iv_entry *entry);
+int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry);
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 5b2fea3b4a2c..a92f3b18e657 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -39,6 +39,8 @@
#include "cikd.h"
#include "uvd/uvd_4_2_d.h"
+#include "amdgpu_ras.h"
+
/* 1 second timeout */
#define UVD_IDLE_TIMEOUT msecs_to_jiffies(1000)
@@ -297,6 +299,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
{
int i, j;
+ cancel_delayed_work_sync(&adev->uvd.idle_work);
drm_sched_entity_destroy(&adev->uvd.entity);
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
@@ -327,12 +330,13 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
int amdgpu_uvd_entity_init(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
- struct drm_sched_rq *rq;
+ struct drm_gpu_scheduler *sched;
int r;
ring = &adev->uvd.inst[0].ring;
- rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
- r = drm_sched_entity_init(&adev->uvd.entity, &rq, 1, NULL);
+ sched = &ring->sched;
+ r = drm_sched_entity_init(&adev->uvd.entity, DRM_SCHED_PRIORITY_NORMAL,
+ &sched, 1, NULL);
if (r) {
DRM_ERROR("Failed setting up UVD kernel entity.\n");
return r;
@@ -346,6 +350,7 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
unsigned size;
void *ptr;
int i, j;
+ bool in_ras_intr = amdgpu_ras_intr_triggered();
cancel_delayed_work_sync(&adev->uvd.idle_work);
@@ -372,8 +377,16 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
if (!adev->uvd.inst[j].saved_bo)
return -ENOMEM;
- memcpy_fromio(adev->uvd.inst[j].saved_bo, ptr, size);
+ /* re-write 0 since err_event_athub will corrupt VCPU buffer */
+ if (in_ras_intr)
+ memset(adev->uvd.inst[j].saved_bo, 0, size);
+ else
+ memcpy_fromio(adev->uvd.inst[j].saved_bo, ptr, size);
}
+
+ if (in_ras_intr)
+ DRM_WARN("UVD VCPU state may lost due to RAS ERREVENT_ATHUB_INTERRUPT\n");
+
return 0;
}
@@ -1073,7 +1086,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
ib->length_dw = 16;
if (direct) {
- r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
+ r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
true, false,
msecs_to_jiffies(10));
if (r == 0)
@@ -1085,7 +1098,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
if (r)
goto err_free;
} else {
- r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.resv,
+ r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.base.resv,
AMDGPU_FENCE_OWNER_UNDEFINED, false);
if (r)
goto err_free;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index b70b3c45bb29..59ddba137946 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -80,6 +80,11 @@ MODULE_FIRMWARE(FIRMWARE_VEGA12);
MODULE_FIRMWARE(FIRMWARE_VEGA20);
static void amdgpu_vce_idle_work_handler(struct work_struct *work);
+static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+ struct amdgpu_bo *bo,
+ struct dma_fence **fence);
+static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
+ bool direct, struct dma_fence **fence);
/**
* amdgpu_vce_init - allocate memory, load vce firmware
@@ -211,6 +216,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
if (adev->vce.vcpu_bo == NULL)
return 0;
+ cancel_delayed_work_sync(&adev->vce.idle_work);
drm_sched_entity_destroy(&adev->vce.entity);
amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
@@ -234,12 +240,13 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
int amdgpu_vce_entity_init(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
- struct drm_sched_rq *rq;
+ struct drm_gpu_scheduler *sched;
int r;
ring = &adev->vce.ring[0];
- rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
- r = drm_sched_entity_init(&adev->vce.entity, &rq, 1, NULL);
+ sched = &ring->sched;
+ r = drm_sched_entity_init(&adev->vce.entity, DRM_SCHED_PRIORITY_NORMAL,
+ &sched, 1, NULL);
if (r != 0) {
DRM_ERROR("Failed setting up VCE run queue.\n");
return r;
@@ -428,14 +435,15 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
*
* Open up a stream for HW test
*/
-int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct dma_fence **fence)
+static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+ struct amdgpu_bo *bo,
+ struct dma_fence **fence)
{
const unsigned ib_size_dw = 1024;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct dma_fence *f = NULL;
- uint64_t dummy;
+ uint64_t addr;
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
@@ -444,7 +452,7 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
ib = &job->ibs[0];
- dummy = ib->gpu_addr + 1024;
+ addr = amdgpu_bo_gpu_offset(bo);
/* stitch together an VCE create msg */
ib->length_dw = 0;
@@ -476,8 +484,8 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
ib->ptr[ib->length_dw++] = 0x00000014; /* len */
ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
- ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
- ib->ptr[ib->length_dw++] = dummy;
+ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+ ib->ptr[ib->length_dw++] = addr;
ib->ptr[ib->length_dw++] = 0x00000001;
for (i = ib->length_dw; i < ib_size_dw; ++i)
@@ -507,8 +515,8 @@ err:
*
* Close up a stream for HW test or if userspace failed to do so
*/
-int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
- bool direct, struct dma_fence **fence)
+static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
+ bool direct, struct dma_fence **fence)
{
const unsigned ib_size_dw = 1024;
struct amdgpu_job *job;
@@ -644,7 +652,7 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
if ((addr + (uint64_t)size) >
(mapping->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
- DRM_ERROR("BO to small for addr 0x%010Lx %d %d\n",
+ DRM_ERROR("BO too small for addr 0x%010Lx %d %d\n",
addr, lo, hi);
return -EINVAL;
}
@@ -1110,13 +1118,20 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct dma_fence *fence = NULL;
+ struct amdgpu_bo *bo = NULL;
long r;
/* skip vce ring1/2 ib test for now, since it's not reliable */
if (ring != &ring->adev->vce.ring[0])
return 0;
- r = amdgpu_vce_get_create_msg(ring, 1, NULL);
+ r = amdgpu_bo_create_reserved(ring->adev, 512, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &bo, NULL, NULL);
+ if (r)
+ return r;
+
+ r = amdgpu_vce_get_create_msg(ring, 1, bo, NULL);
if (r)
goto error;
@@ -1132,5 +1147,7 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
error:
dma_fence_put(fence);
+ amdgpu_bo_unreserve(bo);
+ amdgpu_bo_unref(&bo);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
index 30ea54dd9117..d6d83a3ec803 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
@@ -58,10 +58,6 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev);
int amdgpu_vce_entity_init(struct amdgpu_device *adev);
int amdgpu_vce_suspend(struct amdgpu_device *adev);
int amdgpu_vce_resume(struct amdgpu_device *adev);
-int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct dma_fence **fence);
-int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
- bool direct, struct dma_fence **fence);
void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx);
int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 2e12eeb314a7..f96464e2c157 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -28,30 +28,29 @@
#include <linux/module.h>
#include <linux/pci.h>
-#include <drm/drm.h>
-
#include "amdgpu.h"
#include "amdgpu_pm.h"
#include "amdgpu_vcn.h"
#include "soc15d.h"
-#include "soc15_common.h"
-
-#include "vcn/vcn_1_0_offset.h"
-#include "vcn/vcn_1_0_sh_mask.h"
-
-/* 1 second timeout */
-#define VCN_IDLE_TIMEOUT msecs_to_jiffies(1000)
/* Firmware Names */
#define FIRMWARE_RAVEN "amdgpu/raven_vcn.bin"
#define FIRMWARE_PICASSO "amdgpu/picasso_vcn.bin"
#define FIRMWARE_RAVEN2 "amdgpu/raven2_vcn.bin"
+#define FIRMWARE_ARCTURUS "amdgpu/arcturus_vcn.bin"
+#define FIRMWARE_RENOIR "amdgpu/renoir_vcn.bin"
#define FIRMWARE_NAVI10 "amdgpu/navi10_vcn.bin"
+#define FIRMWARE_NAVI14 "amdgpu/navi14_vcn.bin"
+#define FIRMWARE_NAVI12 "amdgpu/navi12_vcn.bin"
MODULE_FIRMWARE(FIRMWARE_RAVEN);
MODULE_FIRMWARE(FIRMWARE_PICASSO);
MODULE_FIRMWARE(FIRMWARE_RAVEN2);
+MODULE_FIRMWARE(FIRMWARE_ARCTURUS);
+MODULE_FIRMWARE(FIRMWARE_RENOIR);
MODULE_FIRMWARE(FIRMWARE_NAVI10);
+MODULE_FIRMWARE(FIRMWARE_NAVI14);
+MODULE_FIRMWARE(FIRMWARE_NAVI12);
static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
@@ -61,7 +60,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
const char *fw_name;
const struct common_firmware_header *hdr;
unsigned char fw_check;
- int r;
+ int i, r;
INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
@@ -74,12 +73,36 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
else
fw_name = FIRMWARE_RAVEN;
break;
+ case CHIP_ARCTURUS:
+ fw_name = FIRMWARE_ARCTURUS;
+ if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
+ (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
+ adev->vcn.indirect_sram = true;
+ break;
+ case CHIP_RENOIR:
+ fw_name = FIRMWARE_RENOIR;
+ if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
+ (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
+ adev->vcn.indirect_sram = true;
+ break;
case CHIP_NAVI10:
fw_name = FIRMWARE_NAVI10;
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
adev->vcn.indirect_sram = true;
break;
+ case CHIP_NAVI14:
+ fw_name = FIRMWARE_NAVI14;
+ if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
+ (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
+ adev->vcn.indirect_sram = true;
+ break;
+ case CHIP_NAVI12:
+ fw_name = FIRMWARE_NAVI12;
+ if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
+ (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
+ adev->vcn.indirect_sram = true;
+ break;
default:
return -EINVAL;
}
@@ -133,22 +156,28 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE;
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
- r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.vcpu_bo,
- &adev->vcn.gpu_addr, &adev->vcn.cpu_addr);
- if (r) {
- dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
- return r;
- }
- if (adev->vcn.indirect_sram) {
- r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.dpg_sram_bo,
- &adev->vcn.dpg_sram_gpu_addr, &adev->vcn.dpg_sram_cpu_addr);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].vcpu_bo,
+ &adev->vcn.inst[i].gpu_addr, &adev->vcn.inst[i].cpu_addr);
if (r) {
- dev_err(adev->dev, "(%d) failed to allocate DPG bo\n", r);
+ dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
return r;
}
+
+ if (adev->vcn.indirect_sram) {
+ r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].dpg_sram_bo,
+ &adev->vcn.inst[i].dpg_sram_gpu_addr, &adev->vcn.inst[i].dpg_sram_cpu_addr);
+ if (r) {
+ dev_err(adev->dev, "VCN %d (%d) failed to allocate DPG bo\n", i, r);
+ return r;
+ }
+ }
}
return 0;
@@ -156,26 +185,29 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
{
- int i;
-
- kvfree(adev->vcn.saved_bo);
+ int i, j;
- if (adev->vcn.indirect_sram) {
- amdgpu_bo_free_kernel(&adev->vcn.dpg_sram_bo,
- &adev->vcn.dpg_sram_gpu_addr,
- (void **)&adev->vcn.dpg_sram_cpu_addr);
- }
+ cancel_delayed_work_sync(&adev->vcn.idle_work);
- amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo,
- &adev->vcn.gpu_addr,
- (void **)&adev->vcn.cpu_addr);
+ for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
+ if (adev->vcn.harvest_config & (1 << j))
+ continue;
+ if (adev->vcn.indirect_sram) {
+ amdgpu_bo_free_kernel(&adev->vcn.inst[j].dpg_sram_bo,
+ &adev->vcn.inst[j].dpg_sram_gpu_addr,
+ (void **)&adev->vcn.inst[j].dpg_sram_cpu_addr);
+ }
+ kvfree(adev->vcn.inst[j].saved_bo);
- amdgpu_ring_fini(&adev->vcn.ring_dec);
+ amdgpu_bo_free_kernel(&adev->vcn.inst[j].vcpu_bo,
+ &adev->vcn.inst[j].gpu_addr,
+ (void **)&adev->vcn.inst[j].cpu_addr);
- for (i = 0; i < adev->vcn.num_enc_rings; ++i)
- amdgpu_ring_fini(&adev->vcn.ring_enc[i]);
+ amdgpu_ring_fini(&adev->vcn.inst[j].ring_dec);
- amdgpu_ring_fini(&adev->vcn.ring_jpeg);
+ for (i = 0; i < adev->vcn.num_enc_rings; ++i)
+ amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]);
+ }
release_firmware(adev->vcn.fw);
@@ -186,21 +218,25 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev)
{
unsigned size;
void *ptr;
+ int i;
cancel_delayed_work_sync(&adev->vcn.idle_work);
- if (adev->vcn.vcpu_bo == NULL)
- return 0;
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+ if (adev->vcn.inst[i].vcpu_bo == NULL)
+ return 0;
- size = amdgpu_bo_size(adev->vcn.vcpu_bo);
- ptr = adev->vcn.cpu_addr;
+ size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
+ ptr = adev->vcn.inst[i].cpu_addr;
- adev->vcn.saved_bo = kvmalloc(size, GFP_KERNEL);
- if (!adev->vcn.saved_bo)
- return -ENOMEM;
-
- memcpy_fromio(adev->vcn.saved_bo, ptr, size);
+ adev->vcn.inst[i].saved_bo = kvmalloc(size, GFP_KERNEL);
+ if (!adev->vcn.inst[i].saved_bo)
+ return -ENOMEM;
+ memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size);
+ }
return 0;
}
@@ -208,32 +244,36 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev)
{
unsigned size;
void *ptr;
+ int i;
- if (adev->vcn.vcpu_bo == NULL)
- return -EINVAL;
-
- size = amdgpu_bo_size(adev->vcn.vcpu_bo);
- ptr = adev->vcn.cpu_addr;
-
- if (adev->vcn.saved_bo != NULL) {
- memcpy_toio(ptr, adev->vcn.saved_bo, size);
- kvfree(adev->vcn.saved_bo);
- adev->vcn.saved_bo = NULL;
- } else {
- const struct common_firmware_header *hdr;
- unsigned offset;
-
- hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
- if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
- offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
- memcpy_toio(adev->vcn.cpu_addr, adev->vcn.fw->data + offset,
- le32_to_cpu(hdr->ucode_size_bytes));
- size -= le32_to_cpu(hdr->ucode_size_bytes);
- ptr += le32_to_cpu(hdr->ucode_size_bytes);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+ if (adev->vcn.inst[i].vcpu_bo == NULL)
+ return -EINVAL;
+
+ size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
+ ptr = adev->vcn.inst[i].cpu_addr;
+
+ if (adev->vcn.inst[i].saved_bo != NULL) {
+ memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size);
+ kvfree(adev->vcn.inst[i].saved_bo);
+ adev->vcn.inst[i].saved_bo = NULL;
+ } else {
+ const struct common_firmware_header *hdr;
+ unsigned offset;
+
+ hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
+ offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
+ memcpy_toio(adev->vcn.inst[i].cpu_addr, adev->vcn.fw->data + offset,
+ le32_to_cpu(hdr->ucode_size_bytes));
+ size -= le32_to_cpu(hdr->ucode_size_bytes);
+ ptr += le32_to_cpu(hdr->ucode_size_bytes);
+ }
+ memset_io(ptr, 0, size);
}
- memset_io(ptr, 0, size);
}
-
return 0;
}
@@ -241,39 +281,36 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
{
struct amdgpu_device *adev =
container_of(work, struct amdgpu_device, vcn.idle_work.work);
- unsigned int fences = 0;
- unsigned int i;
+ unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0};
+ unsigned int i, j;
- for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
- fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]);
- }
+ for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
+ if (adev->vcn.harvest_config & (1 << j))
+ continue;
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
- struct dpg_pause_state new_state;
+ for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+ fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]);
+ }
- if (fences)
- new_state.fw_based = VCN_DPG_STATE__PAUSE;
- else
- new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+ struct dpg_pause_state new_state;
- if (amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg))
- new_state.jpeg = VCN_DPG_STATE__PAUSE;
- else
- new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
+ if (fence[j])
+ new_state.fw_based = VCN_DPG_STATE__PAUSE;
+ else
+ new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
- adev->vcn.pause_dpg_mode(adev, &new_state);
- }
+ adev->vcn.pause_dpg_mode(adev, j, &new_state);
+ }
- fences += amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg);
- fences += amdgpu_fence_count_emitted(&adev->vcn.ring_dec);
+ fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_dec);
+ fences += fence[j];
+ }
if (fences == 0) {
amdgpu_gfx_off_ctrl(adev, true);
- if (adev->asic_type < CHIP_NAVI10 && adev->pm.dpm_enabled)
- amdgpu_dpm_enable_uvd(adev, false);
- else
- amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
- AMD_PG_STATE_GATE);
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
+ AMD_PG_STATE_GATE);
} else {
schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
}
@@ -286,11 +323,8 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
if (set_clocks) {
amdgpu_gfx_off_ctrl(adev, false);
- if (adev->asic_type < CHIP_NAVI10 && adev->pm.dpm_enabled)
- amdgpu_dpm_enable_uvd(adev, true);
- else
- amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
- AMD_PG_STATE_UNGATE);
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
+ AMD_PG_STATE_UNGATE);
}
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
@@ -299,24 +333,17 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
unsigned int i;
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
- fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]);
+ fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]);
}
if (fences)
new_state.fw_based = VCN_DPG_STATE__PAUSE;
else
new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
- if (amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg))
- new_state.jpeg = VCN_DPG_STATE__PAUSE;
- else
- new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
-
if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
new_state.fw_based = VCN_DPG_STATE__PAUSE;
- else if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
- new_state.jpeg = VCN_DPG_STATE__PAUSE;
- adev->vcn.pause_dpg_mode(adev, &new_state);
+ adev->vcn.pause_dpg_mode(adev, ring->me, &new_state);
}
}
@@ -332,7 +359,7 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
unsigned i;
int r;
- WREG32(adev->vcn.external.scratch9, 0xCAFEDEAD);
+ WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD);
r = amdgpu_ring_alloc(ring, 3);
if (r)
return r;
@@ -340,7 +367,7 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, 0xDEADBEEF);
amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
- tmp = RREG32(adev->vcn.external.scratch9);
+ tmp = RREG32(adev->vcn.inst[ring->me].external.scratch9);
if (tmp == 0xDEADBEEF)
break;
udelay(1);
@@ -466,9 +493,14 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
+ struct amdgpu_device *adev = ring->adev;
struct dma_fence *fence;
long r;
+ /* temporarily disable ib test for sriov */
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL);
if (r)
goto error;
@@ -517,13 +549,14 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
}
static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct dma_fence **fence)
+ struct amdgpu_bo *bo,
+ struct dma_fence **fence)
{
const unsigned ib_size_dw = 16;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct dma_fence *f = NULL;
- uint64_t dummy;
+ uint64_t addr;
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
@@ -531,14 +564,14 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
return r;
ib = &job->ibs[0];
- dummy = ib->gpu_addr + 1024;
+ addr = amdgpu_bo_gpu_offset(bo);
ib->length_dw = 0;
ib->ptr[ib->length_dw++] = 0x00000018;
ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
ib->ptr[ib->length_dw++] = handle;
- ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
- ib->ptr[ib->length_dw++] = dummy;
+ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+ ib->ptr[ib->length_dw++] = addr;
ib->ptr[ib->length_dw++] = 0x0000000b;
ib->ptr[ib->length_dw++] = 0x00000014;
@@ -569,13 +602,14 @@ err:
}
static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct dma_fence **fence)
+ struct amdgpu_bo *bo,
+ struct dma_fence **fence)
{
const unsigned ib_size_dw = 16;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct dma_fence *f = NULL;
- uint64_t dummy;
+ uint64_t addr;
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
@@ -583,14 +617,14 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
return r;
ib = &job->ibs[0];
- dummy = ib->gpu_addr + 1024;
+ addr = amdgpu_bo_gpu_offset(bo);
ib->length_dw = 0;
ib->ptr[ib->length_dw++] = 0x00000018;
ib->ptr[ib->length_dw++] = 0x00000001;
ib->ptr[ib->length_dw++] = handle;
- ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
- ib->ptr[ib->length_dw++] = dummy;
+ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+ ib->ptr[ib->length_dw++] = addr;
ib->ptr[ib->length_dw++] = 0x0000000b;
ib->ptr[ib->length_dw++] = 0x00000014;
@@ -622,129 +656,38 @@ err:
int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
+ struct amdgpu_device *adev = ring->adev;
struct dma_fence *fence = NULL;
+ struct amdgpu_bo *bo = NULL;
long r;
- r = amdgpu_vcn_enc_get_create_msg(ring, 1, NULL);
- if (r)
- goto error;
-
- r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &fence);
- if (r)
- goto error;
-
- r = dma_fence_wait_timeout(fence, false, timeout);
- if (r == 0)
- r = -ETIMEDOUT;
- else if (r > 0)
- r = 0;
-
-error:
- dma_fence_put(fence);
- return r;
-}
-
-int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring)
-{
- struct amdgpu_device *adev = ring->adev;
- uint32_t tmp = 0;
- unsigned i;
- int r;
+ /* temporarily disable ib test for sriov */
+ if (amdgpu_sriov_vf(adev))
+ return 0;
- WREG32(adev->vcn.external.jpeg_pitch, 0xCAFEDEAD);
- r = amdgpu_ring_alloc(ring, 3);
+ r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &bo, NULL, NULL);
if (r)
return r;
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.jpeg_pitch, 0));
- amdgpu_ring_write(ring, 0xDEADBEEF);
- amdgpu_ring_commit(ring);
-
- for (i = 0; i < adev->usec_timeout; i++) {
- tmp = RREG32(adev->vcn.external.jpeg_pitch);
- if (tmp == 0xDEADBEEF)
- break;
- udelay(1);
- }
-
- if (i >= adev->usec_timeout)
- r = -ETIMEDOUT;
-
- return r;
-}
-
-static int amdgpu_vcn_jpeg_set_reg(struct amdgpu_ring *ring, uint32_t handle,
- struct dma_fence **fence)
-{
- struct amdgpu_device *adev = ring->adev;
- struct amdgpu_job *job;
- struct amdgpu_ib *ib;
- struct dma_fence *f = NULL;
- const unsigned ib_size_dw = 16;
- int i, r;
-
- r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+ r = amdgpu_vcn_enc_get_create_msg(ring, 1, bo, NULL);
if (r)
- return r;
-
- ib = &job->ibs[0];
-
- ib->ptr[0] = PACKETJ(adev->vcn.internal.jpeg_pitch, 0, 0, PACKETJ_TYPE0);
- ib->ptr[1] = 0xDEADBEEF;
- for (i = 2; i < 16; i += 2) {
- ib->ptr[i] = PACKETJ(0, 0, 0, PACKETJ_TYPE6);
- ib->ptr[i+1] = 0;
- }
- ib->length_dw = 16;
-
- r = amdgpu_job_submit_direct(job, ring, &f);
- if (r)
- goto err;
-
- if (fence)
- *fence = dma_fence_get(f);
- dma_fence_put(f);
-
- return 0;
-
-err:
- amdgpu_job_free(job);
- return r;
-}
-
-int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout)
-{
- struct amdgpu_device *adev = ring->adev;
- uint32_t tmp = 0;
- unsigned i;
- struct dma_fence *fence = NULL;
- long r = 0;
+ goto error;
- r = amdgpu_vcn_jpeg_set_reg(ring, 1, &fence);
+ r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, bo, &fence);
if (r)
goto error;
r = dma_fence_wait_timeout(fence, false, timeout);
- if (r == 0) {
+ if (r == 0)
r = -ETIMEDOUT;
- goto error;
- } else if (r < 0) {
- goto error;
- } else {
+ else if (r > 0)
r = 0;
- }
-
- for (i = 0; i < adev->usec_timeout; i++) {
- tmp = RREG32(adev->vcn.external.jpeg_pitch);
- if (tmp == 0xDEADBEEF)
- break;
- udelay(1);
- }
-
- if (i >= adev->usec_timeout)
- r = -ETIMEDOUT;
- dma_fence_put(fence);
error:
+ dma_fence_put(fence);
+ amdgpu_bo_unreserve(bo);
+ amdgpu_bo_unref(&bo);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index 19661c645703..6fe057329de2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -30,6 +30,12 @@
#define AMDGPU_VCN_FIRMWARE_OFFSET 256
#define AMDGPU_VCN_MAX_ENC_RINGS 3
+#define AMDGPU_MAX_VCN_INSTANCES 2
+#define AMDGPU_MAX_VCN_ENC_RINGS AMDGPU_VCN_MAX_ENC_RINGS * AMDGPU_MAX_VCN_INSTANCES
+
+#define AMDGPU_VCN_HARVEST_VCN0 (1 << 0)
+#define AMDGPU_VCN_HARVEST_VCN1 (1 << 1)
+
#define VCN_DEC_KMD_CMD 0x80000000
#define VCN_DEC_CMD_FENCE 0x00000000
#define VCN_DEC_CMD_TRAP 0x00000001
@@ -51,33 +57,41 @@
#define VCN_VID_IP_ADDRESS_2_0 0x0
#define VCN_AON_IP_ADDRESS_2_0 0x30000
-#define RREG32_SOC15_DPG_MODE(ip, inst, reg, mask, sram_sel) \
- ({ WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_MASK, mask); \
- WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_CTL, \
+#define mmUVD_RBC_XX_IB_REG_CHECK 0x026b
+#define mmUVD_RBC_XX_IB_REG_CHECK_BASE_IDX 1
+#define mmUVD_REG_XX_MASK 0x026c
+#define mmUVD_REG_XX_MASK_BASE_IDX 1
+
+/* 1 second timeout */
+#define VCN_IDLE_TIMEOUT msecs_to_jiffies(1000)
+
+#define RREG32_SOC15_DPG_MODE(ip, inst_idx, reg, mask, sram_sel) \
+ ({ WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_MASK, mask); \
+ WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_CTL, \
UVD_DPG_LMA_CTL__MASK_EN_MASK | \
- ((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) \
+ ((adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg) \
<< UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT) | \
(sram_sel << UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT)); \
- RREG32_SOC15(ip, inst, mmUVD_DPG_LMA_DATA); \
+ RREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_DATA); \
})
-#define WREG32_SOC15_DPG_MODE(ip, inst, reg, value, mask, sram_sel) \
+#define WREG32_SOC15_DPG_MODE(ip, inst_idx, reg, value, mask, sram_sel) \
do { \
- WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_DATA, value); \
- WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_MASK, mask); \
- WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_CTL, \
+ WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_DATA, value); \
+ WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_MASK, mask); \
+ WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_CTL, \
UVD_DPG_LMA_CTL__READ_WRITE_MASK | \
- ((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) \
+ ((adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg) \
<< UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT) | \
(sram_sel << UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT)); \
} while (0)
-#define SOC15_DPG_MODE_OFFSET_2_0(ip, inst, reg) \
+#define SOC15_DPG_MODE_OFFSET_2_0(ip, inst_idx, reg) \
({ \
uint32_t internal_reg_offset, addr; \
bool video_range, aon_range; \
\
- addr = (adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg); \
+ addr = (adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg); \
addr <<= 2; \
video_range = ((((0xFFFFF & addr) >= (VCN_VID_SOC_ADDRESS_2_0)) && \
((0xFFFFF & addr) < ((VCN_VID_SOC_ADDRESS_2_0 + 0x2600))))); \
@@ -95,27 +109,27 @@
internal_reg_offset >>= 2; \
})
-#define RREG32_SOC15_DPG_MODE_2_0(offset, mask_en) \
- ({ \
- WREG32_SOC15(VCN, 0, mmUVD_DPG_LMA_CTL, \
- (0x0 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT | \
- mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT | \
- offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT)); \
- RREG32_SOC15(VCN, 0, mmUVD_DPG_LMA_DATA); \
+#define RREG32_SOC15_DPG_MODE_2_0(inst_idx, offset, mask_en) \
+ ({ \
+ WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_LMA_CTL, \
+ (0x0 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT | \
+ mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT | \
+ offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT)); \
+ RREG32_SOC15(VCN, inst_idx, mmUVD_DPG_LMA_DATA); \
})
-#define WREG32_SOC15_DPG_MODE_2_0(offset, value, mask_en, indirect) \
- do { \
- if (!indirect) { \
- WREG32_SOC15(VCN, 0, mmUVD_DPG_LMA_DATA, value); \
- WREG32_SOC15(VCN, 0, mmUVD_DPG_LMA_CTL, \
- (0x1 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT | \
- mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT | \
- offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT)); \
- } else { \
- *adev->vcn.dpg_sram_curr_addr++ = offset; \
- *adev->vcn.dpg_sram_curr_addr++ = value; \
- } \
+#define WREG32_SOC15_DPG_MODE_2_0(inst_idx, offset, value, mask_en, indirect) \
+ do { \
+ if (!indirect) { \
+ WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_LMA_DATA, value); \
+ WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_LMA_CTL, \
+ (0x1 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT | \
+ mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT | \
+ offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT)); \
+ } else { \
+ *adev->vcn.inst[inst_idx].dpg_sram_curr_addr++ = offset; \
+ *adev->vcn.inst[inst_idx].dpg_sram_curr_addr++ = value; \
+ } \
} while (0)
enum engine_status_constants {
@@ -146,36 +160,52 @@ struct amdgpu_vcn_reg{
unsigned data1;
unsigned cmd;
unsigned nop;
+ unsigned context_id;
+ unsigned ib_vmid;
+ unsigned ib_bar_low;
+ unsigned ib_bar_high;
+ unsigned ib_size;
+ unsigned gp_scratch8;
unsigned scratch9;
- unsigned jpeg_pitch;
};
-struct amdgpu_vcn {
+struct amdgpu_vcn_inst {
struct amdgpu_bo *vcpu_bo;
void *cpu_addr;
uint64_t gpu_addr;
- unsigned fw_version;
void *saved_bo;
- struct delayed_work idle_work;
- const struct firmware *fw; /* VCN firmware */
struct amdgpu_ring ring_dec;
struct amdgpu_ring ring_enc[AMDGPU_VCN_MAX_ENC_RINGS];
- struct amdgpu_ring ring_jpeg;
struct amdgpu_irq_src irq;
- unsigned num_enc_rings;
- enum amd_powergating_state cur_state;
- struct dpg_pause_state pause_state;
- struct amdgpu_vcn_reg internal, external;
- int (*pause_dpg_mode)(struct amdgpu_device *adev,
- struct dpg_pause_state *new_state);
-
- bool indirect_sram;
+ struct amdgpu_vcn_reg external;
struct amdgpu_bo *dpg_sram_bo;
+ struct dpg_pause_state pause_state;
void *dpg_sram_cpu_addr;
uint64_t dpg_sram_gpu_addr;
uint32_t *dpg_sram_curr_addr;
};
+struct amdgpu_vcn {
+ unsigned fw_version;
+ struct delayed_work idle_work;
+ const struct firmware *fw; /* VCN firmware */
+ unsigned num_enc_rings;
+ enum amd_powergating_state cur_state;
+ bool indirect_sram;
+
+ uint8_t num_vcn_inst;
+ struct amdgpu_vcn_inst inst[AMDGPU_MAX_VCN_INSTANCES];
+ struct amdgpu_vcn_reg internal;
+ struct drm_gpu_scheduler *vcn_enc_sched[AMDGPU_MAX_VCN_ENC_RINGS];
+ struct drm_gpu_scheduler *vcn_dec_sched[AMDGPU_MAX_VCN_INSTANCES];
+ uint32_t num_vcn_enc_sched;
+ uint32_t num_vcn_dec_sched;
+
+ unsigned harvest_config;
+ int (*pause_dpg_mode)(struct amdgpu_device *adev,
+ int inst_idx, struct dpg_pause_state *new_state);
+};
+
int amdgpu_vcn_sw_init(struct amdgpu_device *adev);
int amdgpu_vcn_sw_fini(struct amdgpu_device *adev);
int amdgpu_vcn_suspend(struct amdgpu_device *adev);
@@ -189,7 +219,4 @@ int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout);
int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring);
int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout);
-int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring);
-int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout);
-
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 59dd204498c5..adc813cde8e2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -45,98 +45,6 @@ void amdgpu_virt_init_setting(struct amdgpu_device *adev)
adev->pg_flags = 0;
}
-uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
-{
- signed long r, cnt = 0;
- unsigned long flags;
- uint32_t seq;
- struct amdgpu_kiq *kiq = &adev->gfx.kiq;
- struct amdgpu_ring *ring = &kiq->ring;
-
- BUG_ON(!ring->funcs->emit_rreg);
-
- spin_lock_irqsave(&kiq->ring_lock, flags);
- amdgpu_ring_alloc(ring, 32);
- amdgpu_ring_emit_rreg(ring, reg);
- amdgpu_fence_emit_polling(ring, &seq);
- amdgpu_ring_commit(ring);
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
-
- r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
-
- /* don't wait anymore for gpu reset case because this way may
- * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
- * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
- * never return if we keep waiting in virt_kiq_rreg, which cause
- * gpu_recover() hang there.
- *
- * also don't wait anymore for IRQ context
- * */
- if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
- goto failed_kiq_read;
-
- might_sleep();
- while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
- msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
- r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
- }
-
- if (cnt > MAX_KIQ_REG_TRY)
- goto failed_kiq_read;
-
- return adev->wb.wb[adev->virt.reg_val_offs];
-
-failed_kiq_read:
- pr_err("failed to read reg:%x\n", reg);
- return ~0;
-}
-
-void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
-{
- signed long r, cnt = 0;
- unsigned long flags;
- uint32_t seq;
- struct amdgpu_kiq *kiq = &adev->gfx.kiq;
- struct amdgpu_ring *ring = &kiq->ring;
-
- BUG_ON(!ring->funcs->emit_wreg);
-
- spin_lock_irqsave(&kiq->ring_lock, flags);
- amdgpu_ring_alloc(ring, 32);
- amdgpu_ring_emit_wreg(ring, reg, v);
- amdgpu_fence_emit_polling(ring, &seq);
- amdgpu_ring_commit(ring);
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
-
- r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
-
- /* don't wait anymore for gpu reset case because this way may
- * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
- * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
- * never return if we keep waiting in virt_kiq_rreg, which cause
- * gpu_recover() hang there.
- *
- * also don't wait anymore for IRQ context
- * */
- if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
- goto failed_kiq_write;
-
- might_sleep();
- while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
-
- msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
- r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
- }
-
- if (cnt > MAX_KIQ_REG_TRY)
- goto failed_kiq_write;
-
- return;
-
-failed_kiq_write:
- pr_err("failed to write reg:%x\n", reg);
-}
-
void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
uint32_t reg0, uint32_t reg1,
uint32_t ref, uint32_t mask)
@@ -379,99 +287,3 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
}
}
}
-
-static uint32_t parse_clk(char *buf, bool min)
-{
- char *ptr = buf;
- uint32_t clk = 0;
-
- do {
- ptr = strchr(ptr, ':');
- if (!ptr)
- break;
- ptr+=2;
- if (kstrtou32(ptr, 10, &clk))
- return 0;
- } while (!min);
-
- return clk * 100;
-}
-
-uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest)
-{
- char *buf = NULL;
- uint32_t clk = 0;
-
- buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
- clk = parse_clk(buf, lowest);
-
- kfree(buf);
-
- return clk;
-}
-
-uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest)
-{
- char *buf = NULL;
- uint32_t clk = 0;
-
- buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- adev->virt.ops->get_pp_clk(adev, PP_MCLK, buf);
- clk = parse_clk(buf, lowest);
-
- kfree(buf);
-
- return clk;
-}
-
-void amdgpu_virt_init_reg_access_mode(struct amdgpu_device *adev)
-{
- struct amdgpu_virt *virt = &adev->virt;
-
- if (virt->ops && virt->ops->init_reg_access_mode)
- virt->ops->init_reg_access_mode(adev);
-}
-
-bool amdgpu_virt_support_psp_prg_ih_reg(struct amdgpu_device *adev)
-{
- bool ret = false;
- struct amdgpu_virt *virt = &adev->virt;
-
- if (amdgpu_sriov_vf(adev)
- && (virt->reg_access_mode & AMDGPU_VIRT_REG_ACCESS_PSP_PRG_IH))
- ret = true;
-
- return ret;
-}
-
-bool amdgpu_virt_support_rlc_prg_reg(struct amdgpu_device *adev)
-{
- bool ret = false;
- struct amdgpu_virt *virt = &adev->virt;
-
- if (amdgpu_sriov_vf(adev)
- && (virt->reg_access_mode & AMDGPU_VIRT_REG_ACCESS_RLC)
- && !(amdgpu_sriov_runtime(adev)))
- ret = true;
-
- return ret;
-}
-
-bool amdgpu_virt_support_skip_setting(struct amdgpu_device *adev)
-{
- bool ret = false;
- struct amdgpu_virt *virt = &adev->virt;
-
- if (amdgpu_sriov_vf(adev)
- && (virt->reg_access_mode & AMDGPU_VIRT_REG_SKIP_SEETING))
- ret = true;
-
- return ret;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index f5107731e9c4..daaf909d009a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -48,12 +48,6 @@ struct amdgpu_vf_error_buffer {
uint64_t data[AMDGPU_VF_ERROR_ENTRY_SIZE];
};
-/* According to the fw feature, some new reg access modes are supported */
-#define AMDGPU_VIRT_REG_ACCESS_LEGACY (1 << 0) /* directly mmio */
-#define AMDGPU_VIRT_REG_ACCESS_PSP_PRG_IH (1 << 1) /* by PSP */
-#define AMDGPU_VIRT_REG_ACCESS_RLC (1 << 2) /* by RLC */
-#define AMDGPU_VIRT_REG_SKIP_SEETING (1 << 3) /* Skip setting reg */
-
/**
* struct amdgpu_virt_ops - amdgpu device virt operations
*/
@@ -63,9 +57,6 @@ struct amdgpu_virt_ops {
int (*reset_gpu)(struct amdgpu_device *adev);
int (*wait_reset)(struct amdgpu_device *adev);
void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3);
- int (*get_pp_clk)(struct amdgpu_device *adev, u32 type, char *buf);
- int (*force_dpm_level)(struct amdgpu_device *adev, u32 level);
- void (*init_reg_access_mode)(struct amdgpu_device *adev);
};
/*
@@ -92,8 +83,8 @@ enum AMDGIM_FEATURE_FLAG {
AMDGIM_FEATURE_GIM_LOAD_UCODES = 0x2,
/* VRAM LOST by GIM */
AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4,
- /* HW PERF SIM in GIM */
- AMDGIM_FEATURE_HW_PERF_SIMULATION = (1 << 3),
+ /* PP ONE VF MODE in GIM */
+ AMDGIM_FEATURE_PP_ONE_VF = (1 << 4),
};
struct amd_sriov_msg_pf2vf_info_header {
@@ -264,8 +255,6 @@ struct amdgpu_virt {
struct amdgpu_vf_error_buffer vf_errors;
struct amdgpu_virt_fw_reserve fw_reserve;
uint32_t gim_feature;
- /* protect DPM events to GIM */
- struct mutex dpm_mutex;
uint32_t reg_access_mode;
};
@@ -293,13 +282,11 @@ static inline bool is_virtual_machine(void)
#endif
}
-#define amdgim_is_hwperf(adev) \
- ((adev)->virt.gim_feature & AMDGIM_FEATURE_HW_PERF_SIMULATION)
+#define amdgpu_sriov_is_pp_one_vf(adev) \
+ ((adev)->virt.gim_feature & AMDGIM_FEATURE_PP_ONE_VF)
bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
void amdgpu_virt_init_setting(struct amdgpu_device *adev);
-uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
-void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v);
void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
uint32_t reg0, uint32_t rreg1,
uint32_t ref, uint32_t mask);
@@ -313,12 +300,4 @@ int amdgpu_virt_fw_reserve_get_checksum(void *obj, unsigned long obj_size,
unsigned int key,
unsigned int chksum);
void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
-uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest);
-uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest);
-
-void amdgpu_virt_init_reg_access_mode(struct amdgpu_device *adev);
-bool amdgpu_virt_support_psp_prg_ih_reg(struct amdgpu_device *adev);
-bool amdgpu_virt_support_rlc_prg_reg(struct amdgpu_device *adev);
-bool amdgpu_virt_support_skip_setting(struct amdgpu_device *adev);
-
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 24c3c05e2fb7..d16231d6a790 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -83,6 +83,32 @@ struct amdgpu_prt_cb {
};
/**
+ * vm eviction_lock can be taken in MMU notifiers. Make sure no reclaim-FS
+ * happens while holding this lock anywhere to prevent deadlocks when
+ * an MMU notifier runs in reclaim-FS context.
+ */
+static inline void amdgpu_vm_eviction_lock(struct amdgpu_vm *vm)
+{
+ mutex_lock(&vm->eviction_lock);
+ vm->saved_flags = memalloc_nofs_save();
+}
+
+static inline int amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm)
+{
+ if (mutex_trylock(&vm->eviction_lock)) {
+ vm->saved_flags = memalloc_nofs_save();
+ return 1;
+ }
+ return 0;
+}
+
+static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm)
+{
+ memalloc_nofs_restore(vm->saved_flags);
+ mutex_unlock(&vm->eviction_lock);
+}
+
+/**
* amdgpu_vm_level_shift - return the addr shift for each level
*
* @adev: amdgpu_device pointer
@@ -130,7 +156,8 @@ static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
if (level == adev->vm_manager.root_level)
/* For the root directory */
- return round_up(adev->vm_manager.max_pfn, 1ULL << shift) >> shift;
+ return round_up(adev->vm_manager.max_pfn, 1ULL << shift)
+ >> shift;
else if (level != AMDGPU_VM_PTB)
/* Everything in between */
return 512;
@@ -302,7 +329,7 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
base->next = bo->vm_bo;
bo->vm_bo = base;
- if (bo->tbo.resv != vm->root.base.bo->tbo.resv)
+ if (bo->tbo.base.resv != vm->root.base.bo->tbo.base.resv)
return;
vm->bulk_moveable = false;
@@ -341,7 +368,7 @@ static struct amdgpu_vm_pt *amdgpu_vm_pt_parent(struct amdgpu_vm_pt *pt)
return container_of(parent->vm_bo, struct amdgpu_vm_pt, base);
}
-/**
+/*
* amdgpu_vm_pt_cursor - state for for_each_amdgpu_vm_pt
*/
struct amdgpu_vm_pt_cursor {
@@ -482,6 +509,7 @@ static void amdgpu_vm_pt_next(struct amdgpu_device *adev,
*
* @adev: amdgpu_device structure
* @vm: amdgpu_vm structure
+ * @start: optional cursor to start with
* @cursor: state to initialize
*
* Starts a deep first traversal of the PD/PT tree.
@@ -535,7 +563,7 @@ static void amdgpu_vm_pt_next_dfs(struct amdgpu_device *adev,
amdgpu_vm_pt_ancestor(cursor);
}
-/**
+/*
* for_each_amdgpu_vm_pt_dfs_safe - safe deep first search of all PDs/PTs
*/
#define for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry) \
@@ -560,12 +588,20 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
{
entry->priority = 0;
entry->tv.bo = &vm->root.base.bo->tbo;
- /* One for the VM updates, one for TTM and one for the CS job */
- entry->tv.num_shared = 3;
+ /* One for TTM and one for the CS job */
+ entry->tv.num_shared = 2;
entry->user_pages = NULL;
list_add(&entry->tv.head, validated);
}
+/**
+ * amdgpu_vm_del_from_lru_notify - update bulk_moveable flag
+ *
+ * @bo: BO which was removed from the LRU
+ *
+ * Make sure the bulk_moveable flag is updated when a BO is removed from the
+ * LRU.
+ */
void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo)
{
struct amdgpu_bo *abo;
@@ -583,7 +619,7 @@ void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo)
for (bo_base = abo->vm_bo; bo_base; bo_base = bo_base->next) {
struct amdgpu_vm *vm = bo_base->vm;
- if (abo->tbo.resv == vm->root.base.bo->tbo.resv)
+ if (abo->tbo.base.resv == vm->root.base.bo->tbo.base.resv)
vm->bulk_moveable = false;
}
@@ -600,21 +636,18 @@ void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo)
void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
struct amdgpu_vm *vm)
{
- struct ttm_bo_global *glob = adev->mman.bdev.glob;
struct amdgpu_vm_bo_base *bo_base;
-#if 0
if (vm->bulk_moveable) {
- spin_lock(&glob->lru_lock);
+ spin_lock(&ttm_bo_glob.lru_lock);
ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
return;
}
-#endif
memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));
- spin_lock(&glob->lru_lock);
+ spin_lock(&ttm_bo_glob.lru_lock);
list_for_each_entry(bo_base, &vm->idle, vm_status) {
struct amdgpu_bo *bo = bo_base->bo;
@@ -626,7 +659,7 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
ttm_bo_move_to_lru_tail(&bo->shadow->tbo,
&vm->lru_bulk_move);
}
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
vm->bulk_moveable = true;
}
@@ -649,7 +682,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
void *param)
{
struct amdgpu_vm_bo_base *bo_base, *tmp;
- int r = 0;
+ int r;
vm->bulk_moveable &= list_empty(&vm->evicted);
@@ -658,7 +691,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
r = validate(param, bo);
if (r)
- break;
+ return r;
if (bo->tbo.type != ttm_bo_type_kernel) {
amdgpu_vm_bo_moved(bo_base);
@@ -671,7 +704,11 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
}
}
- return r;
+ amdgpu_vm_eviction_lock(vm);
+ vm->evicting = false;
+ amdgpu_vm_eviction_unlock(vm);
+
+ return 0;
}
/**
@@ -695,6 +732,7 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
* @adev: amdgpu_device pointer
* @vm: VM to clear BO from
* @bo: BO to clear
+ * @direct: use a direct update
*
* Root PD needs to be reserved when calling this.
*
@@ -703,7 +741,8 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
*/
static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
- struct amdgpu_bo *bo)
+ struct amdgpu_bo *bo,
+ bool direct)
{
struct ttm_operation_ctx ctx = { true, false };
unsigned level = adev->vm_manager.root_level;
@@ -762,6 +801,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
memset(&params, 0, sizeof(params));
params.adev = adev;
params.vm = vm;
+ params.direct = direct;
r = vm->update_funcs->prepare(&params, AMDGPU_FENCE_OWNER_KFD, NULL);
if (r)
@@ -815,10 +855,13 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
*
* @adev: amdgpu_device pointer
* @vm: requesting vm
+ * @level: the page table level
+ * @direct: use a direct update
* @bp: resulting BO allocation parameters
*/
static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- int level, struct amdgpu_bo_param *bp)
+ int level, bool direct,
+ struct amdgpu_bo_param *bp)
{
memset(bp, 0, sizeof(*bp));
@@ -833,8 +876,9 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
else if (!vm->root.base.bo || vm->root.base.bo->shadow)
bp->flags |= AMDGPU_GEM_CREATE_SHADOW;
bp->type = ttm_bo_type_kernel;
+ bp->no_wait_gpu = direct;
if (vm->root.base.bo)
- bp->resv = vm->root.base.bo->tbo.resv;
+ bp->resv = vm->root.base.bo->tbo.base.resv;
}
/**
@@ -843,6 +887,7 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
* @adev: amdgpu_device pointer
* @vm: VM to allocate page tables for
* @cursor: Which page table to allocate
+ * @direct: use a direct update
*
* Make sure a specific page table or directory is allocated.
*
@@ -852,7 +897,8 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
*/
static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
- struct amdgpu_vm_pt_cursor *cursor)
+ struct amdgpu_vm_pt_cursor *cursor,
+ bool direct)
{
struct amdgpu_vm_pt *entry = cursor->entry;
struct amdgpu_bo_param bp;
@@ -873,7 +919,7 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
if (entry->base.bo)
return 0;
- amdgpu_vm_bo_param(adev, vm, cursor->level, &bp);
+ amdgpu_vm_bo_param(adev, vm, cursor->level, direct, &bp);
r = amdgpu_bo_create(adev, &bp, &pt);
if (r)
@@ -885,7 +931,7 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
pt->parent = amdgpu_bo_ref(cursor->parent->base.bo);
amdgpu_vm_bo_base_init(&entry->base, vm, pt);
- r = amdgpu_vm_clear_bo(adev, vm, pt);
+ r = amdgpu_vm_clear_bo(adev, vm, pt, direct);
if (r)
goto error_free_pt;
@@ -1022,7 +1068,8 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
* Returns:
* 0 on success, errno otherwise.
*/
-int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync)
+int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
+ bool need_pipe_sync)
{
struct amdgpu_device *adev = ring->adev;
unsigned vmhub = ring->funcs->vmhub;
@@ -1036,10 +1083,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
id->oa_base != job->oa_base ||
id->oa_size != job->oa_size);
bool vm_flush_needed = job->vm_needs_flush;
- bool pasid_mapping_needed = id->pasid != job->pasid ||
- !id->pasid_mapping ||
- !dma_fence_is_signaled(id->pasid_mapping);
struct dma_fence *fence = NULL;
+ bool pasid_mapping_needed = false;
unsigned patch_offset = 0;
int r;
@@ -1049,6 +1094,12 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
pasid_mapping_needed = true;
}
+ mutex_lock(&id_mgr->lock);
+ if (id->pasid != job->pasid || !id->pasid_mapping ||
+ !dma_fence_is_signaled(id->pasid_mapping))
+ pasid_mapping_needed = true;
+ mutex_unlock(&id_mgr->lock);
+
gds_switch_needed &= !!ring->funcs->emit_gds_switch;
vm_flush_needed &= !!ring->funcs->emit_vm_flush &&
job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET;
@@ -1088,9 +1139,11 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
}
if (pasid_mapping_needed) {
+ mutex_lock(&id_mgr->lock);
id->pasid = job->pasid;
dma_fence_put(id->pasid_mapping);
id->pasid_mapping = dma_fence_get(fence);
+ mutex_unlock(&id_mgr->lock);
}
dma_fence_put(fence);
@@ -1174,10 +1227,10 @@ uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
return result;
}
-/*
+/**
* amdgpu_vm_update_pde - update a single level in the hierarchy
*
- * @param: parameters for the update
+ * @params: parameters for the update
* @vm: requested vm
* @entry: entry to update
*
@@ -1201,7 +1254,7 @@ static int amdgpu_vm_update_pde(struct amdgpu_vm_update_params *params,
return vm->update_funcs->update(params, bo, pde, pt, 1, 0, flags);
}
-/*
+/**
* amdgpu_vm_invalidate_pds - mark all PDs as invalid
*
* @adev: amdgpu_device pointer
@@ -1220,19 +1273,20 @@ static void amdgpu_vm_invalidate_pds(struct amdgpu_device *adev,
amdgpu_vm_bo_relocated(&entry->base);
}
-/*
- * amdgpu_vm_update_directories - make sure that all directories are valid
+/**
+ * amdgpu_vm_update_pdes - make sure that all directories are valid
*
* @adev: amdgpu_device pointer
* @vm: requested vm
+ * @direct: submit directly to the paging queue
*
* Makes sure all directories are up to date.
*
* Returns:
* 0 for success, error for failure.
*/
-int amdgpu_vm_update_directories(struct amdgpu_device *adev,
- struct amdgpu_vm *vm)
+int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm, bool direct)
{
struct amdgpu_vm_update_params params;
int r;
@@ -1243,6 +1297,7 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
memset(&params, 0, sizeof(params));
params.adev = adev;
params.vm = vm;
+ params.direct = direct;
r = vm->update_funcs->prepare(&params, AMDGPU_FENCE_OWNER_VM, NULL);
if (r)
@@ -1270,7 +1325,7 @@ error:
return r;
}
-/**
+/*
* amdgpu_vm_update_flags - figure out flags for PTE updates
*
* Make sure to set the right flags for the PTEs at the desired level.
@@ -1393,7 +1448,11 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
uint64_t incr, entry_end, pe_start;
struct amdgpu_bo *pt;
- r = amdgpu_vm_alloc_pts(params->adev, params->vm, &cursor);
+ /* make sure that the page tables covering the address range are
+ * actually allocated
+ */
+ r = amdgpu_vm_alloc_pts(params->adev, params->vm, &cursor,
+ params->direct);
if (r)
return r;
@@ -1465,7 +1524,12 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
} while (frag_start < entry_end);
if (amdgpu_vm_pt_descendant(adev, &cursor)) {
- /* Free all child entries */
+ /* Free all child entries.
+ * Update the tables with the flags and addresses and free up subsequent
+ * tables in the case of huge pages or freed up areas.
+ * This is the maximum you can free, because all other page tables are not
+ * completely covered by the range and so potentially still in use.
+ */
while (cursor.pfn < frag_start) {
amdgpu_vm_free_pts(adev, params->vm, &cursor);
amdgpu_vm_pt_next(adev, &cursor);
@@ -1484,13 +1548,14 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
* amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
*
* @adev: amdgpu_device pointer
- * @exclusive: fence we need to sync to
- * @pages_addr: DMA addresses to use for mapping
* @vm: requested vm
+ * @direct: direct submission in a page fault
+ * @exclusive: fence we need to sync to
* @start: start of mapped range
* @last: last mapped entry
* @flags: flags for the entries
* @addr: addr to set the area to
+ * @pages_addr: DMA addresses to use for mapping
* @fence: optional resulting fence
*
* Fill in the page table entries between @start and @last.
@@ -1499,11 +1564,11 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
* 0 for success, -EINVAL for failure.
*/
static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm, bool direct,
struct dma_fence *exclusive,
- dma_addr_t *pages_addr,
- struct amdgpu_vm *vm,
uint64_t start, uint64_t last,
uint64_t flags, uint64_t addr,
+ dma_addr_t *pages_addr,
struct dma_fence **fence)
{
struct amdgpu_vm_update_params params;
@@ -1513,21 +1578,32 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
memset(&params, 0, sizeof(params));
params.adev = adev;
params.vm = vm;
+ params.direct = direct;
params.pages_addr = pages_addr;
/* sync to everything except eviction fences on unmapping */
if (!(flags & AMDGPU_PTE_VALID))
owner = AMDGPU_FENCE_OWNER_KFD;
+ amdgpu_vm_eviction_lock(vm);
+ if (vm->evicting) {
+ r = -EBUSY;
+ goto error_unlock;
+ }
+
r = vm->update_funcs->prepare(&params, owner, exclusive);
if (r)
- return r;
+ goto error_unlock;
r = amdgpu_vm_update_ptes(&params, start, last + 1, addr, flags);
if (r)
- return r;
+ goto error_unlock;
+
+ r = vm->update_funcs->commit(&params, fence);
- return vm->update_funcs->commit(&params, fence);
+error_unlock:
+ amdgpu_vm_eviction_unlock(vm);
+ return r;
}
/**
@@ -1571,27 +1647,8 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
flags &= ~AMDGPU_PTE_WRITEABLE;
- flags &= ~AMDGPU_PTE_EXECUTABLE;
- flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
-
- if (adev->asic_type == CHIP_NAVI10) {
- flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
- flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
- } else {
- flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
- flags |= (mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK);
- }
-
- if ((mapping->flags & AMDGPU_PTE_PRT) &&
- (adev->asic_type >= CHIP_VEGA10)) {
- flags |= AMDGPU_PTE_PRT;
- if (adev->asic_type >= CHIP_NAVI10) {
- flags |= AMDGPU_PTE_SNOOPED;
- flags |= AMDGPU_PTE_LOG;
- flags |= AMDGPU_PTE_SYSTEM;
- }
- flags &= ~AMDGPU_PTE_VALID;
- }
+ /* Apply ASIC specific mapping flags */
+ amdgpu_gmc_get_vm_pte(adev, mapping, &flags);
trace_amdgpu_vm_bo_update(mapping);
@@ -1635,7 +1692,8 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
dma_addr = pages_addr;
} else {
addr = pages_addr[pfn];
- max_entries = count * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
+ max_entries = count *
+ AMDGPU_GPU_PAGES_IN_CPU_PAGE;
}
} else if (flags & AMDGPU_PTE_VALID) {
@@ -1644,9 +1702,9 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
}
last = min((uint64_t)mapping->last, start + max_entries - 1);
- r = amdgpu_vm_bo_update_mapping(adev, exclusive, dma_addr, vm,
+ r = amdgpu_vm_bo_update_mapping(adev, vm, false, exclusive,
start, last, flags, addr,
- fence);
+ dma_addr, fence);
if (r)
return r;
@@ -1674,8 +1732,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
* Returns:
* 0 for success, -EINVAL for failure.
*/
-int amdgpu_vm_bo_update(struct amdgpu_device *adev,
- struct amdgpu_bo_va *bo_va,
+int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
bool clear)
{
struct amdgpu_bo *bo = bo_va->base.bo;
@@ -1702,7 +1759,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
pages_addr = ttm->dma_address;
}
- exclusive = reservation_object_get_excl(bo->tbo.resv);
+ exclusive = bo->tbo.moving;
}
if (bo) {
@@ -1712,7 +1769,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
flags = 0x0;
}
- if (clear || (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv))
+ if (clear || (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv))
last_update = &vm->last_update;
else
last_update = &bo_va->last_pt_update;
@@ -1733,20 +1790,15 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
return r;
}
- if (vm->use_cpu_for_update) {
- /* Flush HDP */
- mb();
- amdgpu_asic_flush_hdp(adev, NULL);
- }
-
/* If the BO is not in its preferred location add it back to
* the evicted list so that it gets validated again on the
* next command submission.
*/
- if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
+ if (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) {
uint32_t mem_type = bo->tbo.mem.mem_type;
- if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(mem_type)))
+ if (!(bo->preferred_domains &
+ amdgpu_mem_type_to_domain(mem_type)))
amdgpu_vm_bo_evicted(&bo_va->base);
else
amdgpu_vm_bo_idle(&bo_va->base);
@@ -1879,18 +1931,18 @@ static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
*/
static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
{
- struct reservation_object *resv = vm->root.base.bo->tbo.resv;
+ struct dma_resv *resv = vm->root.base.bo->tbo.base.resv;
struct dma_fence *excl, **shared;
unsigned i, shared_count;
int r;
- r = reservation_object_get_fences_rcu(resv, &excl,
+ r = dma_resv_get_fences_rcu(resv, &excl,
&shared_count, &shared);
if (r) {
/* Not enough memory to grab the fence list, as last resort
* block for all the fences to complete.
*/
- reservation_object_wait_timeout_rcu(resv, true, false,
+ dma_resv_wait_timeout_rcu(resv, true, false,
MAX_SCHEDULE_TIMEOUT);
return;
}
@@ -1940,9 +1992,9 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
mapping->start < AMDGPU_GMC_HOLE_START)
init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
- r = amdgpu_vm_bo_update_mapping(adev, NULL, NULL, vm,
+ r = amdgpu_vm_bo_update_mapping(adev, vm, false, NULL,
mapping->start, mapping->last,
- init_pte_value, 0, &f);
+ init_pte_value, 0, NULL, &f);
amdgpu_vm_free_mapping(adev, vm, mapping, f);
if (r) {
dma_fence_put(f);
@@ -1978,7 +2030,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
struct amdgpu_vm *vm)
{
struct amdgpu_bo_va *bo_va, *tmp;
- struct reservation_object *resv;
+ struct dma_resv *resv;
bool clear;
int r;
@@ -1993,11 +2045,11 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
while (!list_empty(&vm->invalidated)) {
bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
base.vm_status);
- resv = bo_va->base.bo->tbo.resv;
+ resv = bo_va->base.bo->tbo.base.resv;
spin_unlock(&vm->invalidated_lock);
/* Try to reserve the BO to avoid clearing its ptes */
- if (!amdgpu_vm_debug && reservation_object_trylock(resv))
+ if (!amdgpu_vm_debug && dma_resv_trylock(resv))
clear = false;
/* Somebody else is using the BO right now */
else
@@ -2008,7 +2060,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
return r;
if (!clear)
- reservation_object_unlock(resv);
+ dma_resv_unlock(resv);
spin_lock(&vm->invalidated_lock);
}
spin_unlock(&vm->invalidated_lock);
@@ -2084,7 +2136,7 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
if (mapping->flags & AMDGPU_PTE_PRT)
amdgpu_vm_prt_get(adev);
- if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv &&
+ if (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv &&
!bo_va->base.moved) {
list_move(&bo_va->base.vm_status, &vm->moved);
}
@@ -2416,7 +2468,8 @@ void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
struct amdgpu_bo *bo;
bo = mapping->bo_va->base.bo;
- if (READ_ONCE(bo->tbo.resv->lock.ctx) != ticket)
+ if (dma_resv_locking_ctx(bo->tbo.base.resv) !=
+ ticket)
continue;
}
@@ -2443,7 +2496,7 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
struct amdgpu_vm_bo_base **base;
if (bo) {
- if (bo->tbo.resv == vm->root.base.bo->tbo.resv)
+ if (bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv)
vm->bulk_moveable = false;
for (base = &bo_va->base.bo->vm_bo; *base;
@@ -2487,6 +2540,41 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
}
/**
+ * amdgpu_vm_evictable - check if we can evict a VM
+ *
+ * @bo: A page table of the VM.
+ *
+ * Check if it is possible to evict a VM.
+ */
+bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
+{
+ struct amdgpu_vm_bo_base *bo_base = bo->vm_bo;
+
+ /* Page tables of a destroyed VM can go away immediately */
+ if (!bo_base || !bo_base->vm)
+ return true;
+
+ /* Don't evict VM page tables while they are busy */
+ if (!dma_resv_test_signaled_rcu(bo->tbo.base.resv, true))
+ return false;
+
+ /* Try to block ongoing updates */
+ if (!amdgpu_vm_eviction_trylock(bo_base->vm))
+ return false;
+
+ /* Don't evict VM page tables while they are updated */
+ if (!dma_fence_is_signaled(bo_base->vm->last_direct) ||
+ !dma_fence_is_signaled(bo_base->vm->last_delayed)) {
+ amdgpu_vm_eviction_unlock(bo_base->vm);
+ return false;
+ }
+
+ bo_base->vm->evicting = true;
+ amdgpu_vm_eviction_unlock(bo_base->vm);
+ return true;
+}
+
+/**
* amdgpu_vm_bo_invalidate - mark the bo as invalid
*
* @adev: amdgpu_device pointer
@@ -2507,7 +2595,7 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
struct amdgpu_vm *vm = bo_base->vm;
- if (evicted && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
+ if (evicted && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) {
amdgpu_vm_bo_evicted(bo_base);
continue;
}
@@ -2518,7 +2606,7 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
if (bo->tbo.type == ttm_bo_type_kernel)
amdgpu_vm_bo_relocated(bo_base);
- else if (bo->tbo.resv == vm->root.base.bo->tbo.resv)
+ else if (bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv)
amdgpu_vm_bo_moved(bo_base);
else
amdgpu_vm_bo_invalidated(bo_base);
@@ -2648,8 +2736,16 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
*/
long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
{
- return reservation_object_wait_timeout_rcu(vm->root.base.bo->tbo.resv,
- true, true, timeout);
+ timeout = dma_resv_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv,
+ true, true, timeout);
+ if (timeout <= 0)
+ return timeout;
+
+ timeout = dma_fence_wait_timeout(vm->last_direct, true, timeout);
+ if (timeout <= 0)
+ return timeout;
+
+ return dma_fence_wait_timeout(vm->last_delayed, true, timeout);
}
/**
@@ -2683,13 +2779,22 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
spin_lock_init(&vm->invalidated_lock);
INIT_LIST_HEAD(&vm->freed);
- /* create scheduler entity for page table updates */
- r = drm_sched_entity_init(&vm->entity, adev->vm_manager.vm_pte_rqs,
- adev->vm_manager.vm_pte_num_rqs, NULL);
+
+ /* create scheduler entities for page table updates */
+ r = drm_sched_entity_init(&vm->direct, DRM_SCHED_PRIORITY_NORMAL,
+ adev->vm_manager.vm_pte_scheds,
+ adev->vm_manager.vm_pte_num_scheds, NULL);
if (r)
return r;
+ r = drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL,
+ adev->vm_manager.vm_pte_scheds,
+ adev->vm_manager.vm_pte_num_scheds, NULL);
+ if (r)
+ goto error_free_direct;
+
vm->pte_support_ats = false;
+ vm->is_compute_context = false;
if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) {
vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
@@ -2703,7 +2808,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
}
DRM_DEBUG_DRIVER("VM update mode is %s\n",
vm->use_cpu_for_update ? "CPU" : "SDMA");
- WARN_ONCE((vm->use_cpu_for_update && !amdgpu_gmc_vram_full_visible(&adev->gmc)),
+ WARN_ONCE((vm->use_cpu_for_update &&
+ !amdgpu_gmc_vram_full_visible(&adev->gmc)),
"CPU update of VM recommended only for large BAR system\n");
if (vm->use_cpu_for_update)
@@ -2711,25 +2817,30 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
else
vm->update_funcs = &amdgpu_vm_sdma_funcs;
vm->last_update = NULL;
+ vm->last_direct = dma_fence_get_stub();
+ vm->last_delayed = dma_fence_get_stub();
+
+ mutex_init(&vm->eviction_lock);
+ vm->evicting = false;
- amdgpu_vm_bo_param(adev, vm, adev->vm_manager.root_level, &bp);
+ amdgpu_vm_bo_param(adev, vm, adev->vm_manager.root_level, false, &bp);
if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE)
bp.flags &= ~AMDGPU_GEM_CREATE_SHADOW;
r = amdgpu_bo_create(adev, &bp, &root);
if (r)
- goto error_free_sched_entity;
+ goto error_free_delayed;
r = amdgpu_bo_reserve(root, true);
if (r)
goto error_free_root;
- r = reservation_object_reserve_shared(root->tbo.resv, 1);
+ r = dma_resv_reserve_shared(root->tbo.base.resv, 1);
if (r)
goto error_unreserve;
amdgpu_vm_bo_base_init(&vm->root.base, vm, root);
- r = amdgpu_vm_clear_bo(adev, vm, root);
+ r = amdgpu_vm_clear_bo(adev, vm, root, false);
if (r)
goto error_unreserve;
@@ -2760,8 +2871,13 @@ error_free_root:
amdgpu_bo_unref(&vm->root.base.bo);
vm->root.base.bo = NULL;
-error_free_sched_entity:
- drm_sched_entity_destroy(&vm->entity);
+error_free_delayed:
+ dma_fence_put(vm->last_direct);
+ dma_fence_put(vm->last_delayed);
+ drm_sched_entity_destroy(&vm->delayed);
+
+error_free_direct:
+ drm_sched_entity_destroy(&vm->direct);
return r;
}
@@ -2802,6 +2918,7 @@ static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev,
*
* @adev: amdgpu_device pointer
* @vm: requested vm
+ * @pasid: pasid to use
*
* This only works on GFX VMs that don't have any BOs added and no
* page tables allocated yet.
@@ -2817,7 +2934,8 @@ static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev,
* Returns:
* 0 for success, -errno for errors.
*/
-int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned int pasid)
+int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ unsigned int pasid)
{
bool pte_support_ats = (adev->asic_type == CHIP_RAVEN);
int r;
@@ -2849,7 +2967,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, uns
*/
if (pte_support_ats != vm->pte_support_ats) {
vm->pte_support_ats = pte_support_ats;
- r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo);
+ r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo, false);
if (r)
goto free_idr;
}
@@ -2859,9 +2977,18 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, uns
AMDGPU_VM_USE_CPU_FOR_COMPUTE);
DRM_DEBUG_DRIVER("VM update mode is %s\n",
vm->use_cpu_for_update ? "CPU" : "SDMA");
- WARN_ONCE((vm->use_cpu_for_update && !amdgpu_gmc_vram_full_visible(&adev->gmc)),
+ WARN_ONCE((vm->use_cpu_for_update &&
+ !amdgpu_gmc_vram_full_visible(&adev->gmc)),
"CPU update of VM recommended only for large BAR system\n");
+ if (vm->use_cpu_for_update)
+ vm->update_funcs = &amdgpu_vm_cpu_funcs;
+ else
+ vm->update_funcs = &amdgpu_vm_sdma_funcs;
+ dma_fence_put(vm->last_update);
+ vm->last_update = NULL;
+ vm->is_compute_context = true;
+
if (vm->pasid) {
unsigned long flags;
@@ -2915,6 +3042,7 @@ void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
}
vm->pasid = 0;
+ vm->is_compute_context = false;
}
/**
@@ -2931,31 +3059,26 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
struct amdgpu_bo_va_mapping *mapping, *tmp;
bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
struct amdgpu_bo *root;
- int i, r;
+ int i;
amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
+ root = amdgpu_bo_ref(vm->root.base.bo);
+ amdgpu_bo_reserve(root, true);
if (vm->pasid) {
unsigned long flags;
spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
+ vm->pasid = 0;
}
- drm_sched_entity_destroy(&vm->entity);
+ dma_fence_wait(vm->last_direct, false);
+ dma_fence_put(vm->last_direct);
+ dma_fence_wait(vm->last_delayed, false);
+ dma_fence_put(vm->last_delayed);
- if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
- dev_err(adev->dev, "still active bo inside vm\n");
- }
- rbtree_postorder_for_each_entry_safe(mapping, tmp,
- &vm->va.rb_root, rb) {
- /* Don't remove the mapping here, we don't want to trigger a
- * rebalance and the tree is about to be destroyed anyway.
- */
- list_del(&mapping->list);
- kfree(mapping);
- }
list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
amdgpu_vm_prt_fini(adev, vm);
@@ -2966,16 +3089,26 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
}
- root = amdgpu_bo_ref(vm->root.base.bo);
- r = amdgpu_bo_reserve(root, true);
- if (r) {
- dev_err(adev->dev, "Leaking page tables because BO reservation failed\n");
- } else {
- amdgpu_vm_free_pts(adev, vm, NULL);
- amdgpu_bo_unreserve(root);
- }
+ amdgpu_vm_free_pts(adev, vm, NULL);
+ amdgpu_bo_unreserve(root);
amdgpu_bo_unref(&root);
WARN_ON(vm->root.base.bo);
+
+ drm_sched_entity_destroy(&vm->direct);
+ drm_sched_entity_destroy(&vm->delayed);
+
+ if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
+ dev_err(adev->dev, "still active bo inside vm\n");
+ }
+ rbtree_postorder_for_each_entry_safe(mapping, tmp,
+ &vm->va.rb_root, rb) {
+ /* Don't remove the mapping here, we don't want to trigger a
+ * rebalance and the tree is about to be destroyed anyway.
+ */
+ list_del(&mapping->list);
+ kfree(mapping);
+ }
+
dma_fence_put(vm->last_update);
for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
amdgpu_vmid_free_reserved(adev, vm, i);
@@ -3059,13 +3192,14 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
switch (args->in.op) {
case AMDGPU_VM_OP_RESERVE_VMID:
- /* current, we only have requirement to reserve vmid from gfxhub */
- r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB);
+ /* We only have requirement to reserve vmid from gfxhub */
+ r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm,
+ AMDGPU_GFXHUB_0);
if (r)
return r;
break;
case AMDGPU_VM_OP_UNRESERVE_VMID:
- amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB);
+ amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB_0);
break;
default:
return -EINVAL;
@@ -3103,13 +3237,97 @@ void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
*/
void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
{
- if (!vm->task_info.pid) {
- vm->task_info.pid = current->pid;
- get_task_comm(vm->task_info.task_name, current);
+ if (vm->task_info.pid)
+ return;
- if (current->group_leader->mm == current->mm) {
- vm->task_info.tgid = current->group_leader->pid;
- get_task_comm(vm->task_info.process_name, current->group_leader);
- }
+ vm->task_info.pid = current->pid;
+ get_task_comm(vm->task_info.task_name, current);
+
+ if (current->group_leader->mm != current->mm)
+ return;
+
+ vm->task_info.tgid = current->group_leader->pid;
+ get_task_comm(vm->task_info.process_name, current->group_leader);
+}
+
+/**
+ * amdgpu_vm_handle_fault - graceful handling of VM faults.
+ * @adev: amdgpu device pointer
+ * @pasid: PASID of the VM
+ * @addr: Address of the fault
+ *
+ * Try to gracefully handle a VM fault. Return true if the fault was handled and
+ * shouldn't be reported any more.
+ */
+bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, unsigned int pasid,
+ uint64_t addr)
+{
+ struct amdgpu_bo *root;
+ uint64_t value, flags;
+ struct amdgpu_vm *vm;
+ long r;
+
+ spin_lock(&adev->vm_manager.pasid_lock);
+ vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
+ if (vm)
+ root = amdgpu_bo_ref(vm->root.base.bo);
+ else
+ root = NULL;
+ spin_unlock(&adev->vm_manager.pasid_lock);
+
+ if (!root)
+ return false;
+
+ r = amdgpu_bo_reserve(root, true);
+ if (r)
+ goto error_unref;
+
+ /* Double check that the VM still exists */
+ spin_lock(&adev->vm_manager.pasid_lock);
+ vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
+ if (vm && vm->root.base.bo != root)
+ vm = NULL;
+ spin_unlock(&adev->vm_manager.pasid_lock);
+ if (!vm)
+ goto error_unlock;
+
+ addr /= AMDGPU_GPU_PAGE_SIZE;
+ flags = AMDGPU_PTE_VALID | AMDGPU_PTE_SNOOPED |
+ AMDGPU_PTE_SYSTEM;
+
+ if (vm->is_compute_context) {
+ /* Intentionally setting invalid PTE flag
+ * combination to force a no-retry-fault
+ */
+ flags = AMDGPU_PTE_EXECUTABLE | AMDGPU_PDE_PTE |
+ AMDGPU_PTE_TF;
+ value = 0;
+
+ } else if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) {
+ /* Redirect the access to the dummy page */
+ value = adev->dummy_page_addr;
+ flags |= AMDGPU_PTE_EXECUTABLE | AMDGPU_PTE_READABLE |
+ AMDGPU_PTE_WRITEABLE;
+
+ } else {
+ /* Let the hw retry silently on the PTE */
+ value = 0;
}
+
+ r = amdgpu_vm_bo_update_mapping(adev, vm, true, NULL, addr, addr + 1,
+ flags, value, NULL, NULL);
+ if (r)
+ goto error_unlock;
+
+ r = amdgpu_vm_update_pdes(adev, vm, true);
+
+error_unlock:
+ amdgpu_bo_unreserve(root);
+ if (r < 0)
+ DRM_ERROR("Can't handle page fault (%ld)\n", r);
+
+error_unref:
+ amdgpu_bo_unref(&root);
+
+ return false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 489a162ca620..b4640ab38c95 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -30,6 +30,7 @@
#include <drm/gpu_scheduler.h>
#include <drm/drm_file.h>
#include <drm/ttm/ttm_bo_driver.h>
+#include <linux/sched/mm.h>
#include "amdgpu_sync.h"
#include "amdgpu_ring.h"
@@ -90,7 +91,7 @@ struct amdgpu_bo_list_entry;
| AMDGPU_PTE_WRITEABLE \
| AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_CC))
-/* NAVI10 only */
+/* gfx10 */
#define AMDGPU_PTE_MTYPE_NV10(a) ((uint64_t)(a) << 48)
#define AMDGPU_PTE_MTYPE_NV10_MASK AMDGPU_PTE_MTYPE_NV10(7ULL)
@@ -99,10 +100,14 @@ struct amdgpu_bo_list_entry;
#define AMDGPU_VM_FAULT_STOP_FIRST 1
#define AMDGPU_VM_FAULT_STOP_ALWAYS 2
+/* Reserve 4MB VRAM for page tables */
+#define AMDGPU_VM_RESERVED_VRAM (4ULL << 20)
+
/* max number of VMHUB */
-#define AMDGPU_MAX_VMHUBS 2
-#define AMDGPU_GFXHUB 0
-#define AMDGPU_MMHUB 1
+#define AMDGPU_MAX_VMHUBS 3
+#define AMDGPU_GFXHUB_0 0
+#define AMDGPU_MMHUB_0 1
+#define AMDGPU_MMHUB_1 2
/* hardcode that limit for now */
#define AMDGPU_VA_RESERVED_SIZE (1ULL << 20)
@@ -198,6 +203,11 @@ struct amdgpu_vm_update_params {
struct amdgpu_vm *vm;
/**
+ * @direct: if changes should be made directly
+ */
+ bool direct;
+
+ /**
* @pages_addr:
*
* DMA addresses to use for mapping
@@ -230,6 +240,13 @@ struct amdgpu_vm {
/* tree of virtual addresses mapped */
struct rb_root_cached va;
+ /* Lock to prevent eviction while we are updating page tables
+ * use vm_eviction_lock/unlock(vm)
+ */
+ struct mutex eviction_lock;
+ bool evicting;
+ unsigned int saved_flags;
+
/* BOs who needs a validation */
struct list_head evicted;
@@ -253,8 +270,13 @@ struct amdgpu_vm {
struct amdgpu_vm_pt root;
struct dma_fence *last_update;
- /* Scheduler entity for page table updates */
- struct drm_sched_entity entity;
+ /* Scheduler entities for page table updates */
+ struct drm_sched_entity direct;
+ struct drm_sched_entity delayed;
+
+ /* Last submission to the scheduler entities */
+ struct dma_fence *last_direct;
+ struct dma_fence *last_delayed;
unsigned int pasid;
/* dedicated to vm */
@@ -288,6 +310,8 @@ struct amdgpu_vm {
struct ttm_lru_bulk_move lru_bulk_move;
/* mark whether can do the bulk move */
bool bulk_moveable;
+ /* Flag to indicate if VM is used for compute */
+ bool is_compute_context;
};
struct amdgpu_vm_manager {
@@ -307,8 +331,8 @@ struct amdgpu_vm_manager {
u64 vram_base_offset;
/* vm pte handling */
const struct amdgpu_vm_pte_funcs *vm_pte_funcs;
- struct drm_sched_rq *vm_pte_rqs[AMDGPU_MAX_RINGS];
- unsigned vm_pte_num_rqs;
+ struct drm_gpu_scheduler *vm_pte_scheds[AMDGPU_MAX_RINGS];
+ unsigned vm_pte_num_scheds;
struct amdgpu_ring *page_fault;
/* partial resident texture handling */
@@ -356,8 +380,8 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int (*callback)(void *p, struct amdgpu_bo *bo),
void *param);
int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync);
-int amdgpu_vm_update_directories(struct amdgpu_device *adev,
- struct amdgpu_vm *vm);
+int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm, bool direct);
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct dma_fence **fence);
@@ -366,6 +390,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
bool clear);
+bool amdgpu_vm_evictable(struct amdgpu_bo *bo);
void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
struct amdgpu_bo *bo, bool evicted);
uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
@@ -403,6 +428,8 @@ void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev);
void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
struct amdgpu_task_info *task_info);
+bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, unsigned int pasid,
+ uint64_t addr);
void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
index 5222d165abfc..73fec7a0ced5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
@@ -49,13 +49,6 @@ static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p, void *owner,
{
int r;
- /* Wait for PT BOs to be idle. PTs share the same resv. object
- * as the root PD BO
- */
- r = amdgpu_bo_sync_wait(p->vm->root.base.bo, owner, true);
- if (unlikely(r))
- return r;
-
/* Wait for any BO move to be completed */
if (exclusive) {
r = dma_fence_wait(exclusive, true);
@@ -63,7 +56,14 @@ static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p, void *owner,
return r;
}
- return 0;
+ /* Don't wait for submissions during page fault */
+ if (p->direct)
+ return 0;
+
+ /* Wait for PT BOs to be idle. PTs share the same resv. object
+ * as the root PD BO
+ */
+ return amdgpu_bo_sync_wait(p->vm->root.base.bo, owner, true);
}
/**
@@ -89,7 +89,7 @@ static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p,
pe += (unsigned long)amdgpu_bo_kptr(bo);
- trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
+ trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->direct);
for (i = 0; i < count; i++) {
value = p->pages_addr ?
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
index ddd181f5ed37..19b7f80758f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
@@ -68,17 +68,19 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
if (r)
return r;
- r = amdgpu_sync_fence(p->adev, &p->job->sync, exclusive, false);
- if (r)
- return r;
+ p->num_dw_left = ndw;
- r = amdgpu_sync_resv(p->adev, &p->job->sync, root->tbo.resv,
- owner, false);
+ /* Wait for moves to be completed */
+ r = amdgpu_sync_fence(&p->job->sync, exclusive, false);
if (r)
return r;
- p->num_dw_left = ndw;
- return 0;
+ /* Don't wait for any submissions during page fault handling */
+ if (p->direct)
+ return 0;
+
+ return amdgpu_sync_resv(p->adev, &p->job->sync, root->tbo.base.resv,
+ owner, false);
}
/**
@@ -93,24 +95,30 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
struct dma_fence **fence)
{
- struct amdgpu_bo *root = p->vm->root.base.bo;
struct amdgpu_ib *ib = p->job->ibs;
+ struct drm_sched_entity *entity;
+ struct dma_fence *f, *tmp;
struct amdgpu_ring *ring;
- struct dma_fence *f;
int r;
- ring = container_of(p->vm->entity.rq->sched, struct amdgpu_ring, sched);
+ entity = p->direct ? &p->vm->direct : &p->vm->delayed;
+ ring = container_of(entity->rq->sched, struct amdgpu_ring, sched);
WARN_ON(ib->length_dw == 0);
amdgpu_ring_pad_ib(ring, ib);
WARN_ON(ib->length_dw > p->num_dw_left);
- r = amdgpu_job_submit(p->job, &p->vm->entity,
- AMDGPU_FENCE_OWNER_VM, &f);
+ r = amdgpu_job_submit(p->job, entity, AMDGPU_FENCE_OWNER_VM, &f);
if (r)
goto error;
- amdgpu_bo_fence(root, f, true);
- if (fence)
+ tmp = dma_fence_get(f);
+ if (p->direct)
+ swap(p->vm->last_direct, tmp);
+ else
+ swap(p->vm->last_delayed, tmp);
+ dma_fence_put(tmp);
+
+ if (fence && !p->direct)
swap(*fence, f);
dma_fence_put(f);
return 0;
@@ -120,7 +128,6 @@ error:
return r;
}
-
/**
* amdgpu_vm_sdma_copy_ptes - copy the PTEs from mapping
*
@@ -141,7 +148,7 @@ static void amdgpu_vm_sdma_copy_ptes(struct amdgpu_vm_update_params *p,
src += p->num_dw_left * 4;
pe += amdgpu_bo_gpu_offset(bo);
- trace_amdgpu_vm_copy_ptes(pe, src, count);
+ trace_amdgpu_vm_copy_ptes(pe, src, count, p->direct);
amdgpu_vm_copy_pte(p->adev, ib, pe, src, count);
}
@@ -168,7 +175,7 @@ static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p,
struct amdgpu_ib *ib = p->job->ibs;
pe += amdgpu_bo_gpu_offset(bo);
- trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
+ trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->direct);
if (count < 3) {
amdgpu_vm_write_pte(p->adev, ib, pe, addr | flags,
count, incr);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 3a9d8c15fe9f..82a3299e53c0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -23,6 +23,9 @@
*/
#include "amdgpu.h"
+#include "amdgpu_vm.h"
+#include "amdgpu_atomfirmware.h"
+#include "atom.h"
struct amdgpu_vram_mgr {
struct drm_mm mm;
@@ -101,6 +104,39 @@ static ssize_t amdgpu_mem_info_vis_vram_used_show(struct device *dev,
amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]));
}
+static ssize_t amdgpu_mem_info_vram_vendor(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+
+ switch (adev->gmc.vram_vendor) {
+ case SAMSUNG:
+ return snprintf(buf, PAGE_SIZE, "samsung\n");
+ case INFINEON:
+ return snprintf(buf, PAGE_SIZE, "infineon\n");
+ case ELPIDA:
+ return snprintf(buf, PAGE_SIZE, "elpida\n");
+ case ETRON:
+ return snprintf(buf, PAGE_SIZE, "etron\n");
+ case NANYA:
+ return snprintf(buf, PAGE_SIZE, "nanya\n");
+ case HYNIX:
+ return snprintf(buf, PAGE_SIZE, "hynix\n");
+ case MOSEL:
+ return snprintf(buf, PAGE_SIZE, "mosel\n");
+ case WINBOND:
+ return snprintf(buf, PAGE_SIZE, "winbond\n");
+ case ESMT:
+ return snprintf(buf, PAGE_SIZE, "esmt\n");
+ case MICRON:
+ return snprintf(buf, PAGE_SIZE, "micron\n");
+ default:
+ return snprintf(buf, PAGE_SIZE, "unknown\n");
+ }
+}
+
static DEVICE_ATTR(mem_info_vram_total, S_IRUGO,
amdgpu_mem_info_vram_total_show, NULL);
static DEVICE_ATTR(mem_info_vis_vram_total, S_IRUGO,
@@ -109,6 +145,8 @@ static DEVICE_ATTR(mem_info_vram_used, S_IRUGO,
amdgpu_mem_info_vram_used_show, NULL);
static DEVICE_ATTR(mem_info_vis_vram_used, S_IRUGO,
amdgpu_mem_info_vis_vram_used_show, NULL);
+static DEVICE_ATTR(mem_info_vram_vendor, S_IRUGO,
+ amdgpu_mem_info_vram_vendor, NULL);
/**
* amdgpu_vram_mgr_init - init VRAM manager and DRM MM
@@ -154,6 +192,11 @@ static int amdgpu_vram_mgr_init(struct ttm_mem_type_manager *man,
DRM_ERROR("Failed to create device file mem_info_vis_vram_used\n");
return ret;
}
+ ret = device_create_file(adev->dev, &dev_attr_mem_info_vram_vendor);
+ if (ret) {
+ DRM_ERROR("Failed to create device file mem_info_vram_vendor\n");
+ return ret;
+ }
return 0;
}
@@ -180,6 +223,7 @@ static int amdgpu_vram_mgr_fini(struct ttm_mem_type_manager *man)
device_remove_file(adev->dev, &dev_attr_mem_info_vis_vram_total);
device_remove_file(adev->dev, &dev_attr_mem_info_vram_used);
device_remove_file(adev->dev, &dev_attr_mem_info_vis_vram_used);
+ device_remove_file(adev->dev, &dev_attr_mem_info_vram_vendor);
return 0;
}
@@ -275,7 +319,7 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
struct drm_mm_node *nodes;
enum drm_mm_insert_mode mode;
unsigned long lpfn, num_nodes, pages_per_node, pages_left;
- uint64_t vis_usage = 0, mem_bytes;
+ uint64_t vis_usage = 0, mem_bytes, max_bytes;
unsigned i;
int r;
@@ -283,9 +327,13 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
if (!lpfn)
lpfn = man->size;
+ max_bytes = adev->gmc.mc_vram_size;
+ if (tbo->type != ttm_bo_type_kernel)
+ max_bytes -= AMDGPU_VM_RESERVED_VRAM;
+
/* bail out quickly if there's likely not enough VRAM for this BO */
mem_bytes = (u64)mem->num_pages << PAGE_SHIFT;
- if (atomic64_add_return(mem_bytes, &mgr->usage) > adev->gmc.mc_vram_size) {
+ if (atomic64_add_return(mem_bytes, &mgr->usage) > max_bytes) {
atomic64_sub(mem_bytes, &mgr->usage);
mem->mm_node = NULL;
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index d11eba09eadd..a97af422575a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -25,7 +25,8 @@
#include "amdgpu.h"
#include "amdgpu_xgmi.h"
#include "amdgpu_smu.h"
-
+#include "amdgpu_ras.h"
+#include "df/df_3_6_offset.h"
static DEFINE_MUTEX(xgmi_mutex);
@@ -131,9 +132,37 @@ static ssize_t amdgpu_xgmi_show_device_id(struct device *dev,
}
+#define AMDGPU_XGMI_SET_FICAA(o) ((o) | 0x456801)
+static ssize_t amdgpu_xgmi_show_error(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ uint32_t ficaa_pie_ctl_in, ficaa_pie_status_in;
+ uint64_t fica_out;
+ unsigned int error_count = 0;
+
+ ficaa_pie_ctl_in = AMDGPU_XGMI_SET_FICAA(0x200);
+ ficaa_pie_status_in = AMDGPU_XGMI_SET_FICAA(0x208);
-static DEVICE_ATTR(xgmi_device_id, S_IRUGO, amdgpu_xgmi_show_device_id, NULL);
+ fica_out = adev->df.funcs->get_fica(adev, ficaa_pie_ctl_in);
+ if (fica_out != 0x1f)
+ pr_err("xGMI error counters not enabled!\n");
+
+ fica_out = adev->df.funcs->get_fica(adev, ficaa_pie_status_in);
+
+ if ((fica_out & 0xffff) == 2)
+ error_count = ((fica_out >> 62) & 0x1) + (fica_out >> 63);
+ adev->df.funcs->set_fica(adev, ficaa_pie_status_in, 0, 0);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", error_count);
+}
+
+
+static DEVICE_ATTR(xgmi_device_id, S_IRUGO, amdgpu_xgmi_show_device_id, NULL);
+static DEVICE_ATTR(xgmi_error, S_IRUGO, amdgpu_xgmi_show_error, NULL);
static int amdgpu_xgmi_sysfs_add_dev_info(struct amdgpu_device *adev,
struct amdgpu_hive_info *hive)
@@ -148,6 +177,12 @@ static int amdgpu_xgmi_sysfs_add_dev_info(struct amdgpu_device *adev,
return ret;
}
+ /* Create xgmi error file */
+ ret = device_create_file(adev->dev, &dev_attr_xgmi_error);
+ if (ret)
+ pr_err("failed to create xgmi_error\n");
+
+
/* Create sysfs link to hive info folder on the first device */
if (adev != hive->adev) {
ret = sysfs_create_link(&adev->dev->kobj, hive->kobj,
@@ -226,6 +261,7 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lo
INIT_LIST_HEAD(&tmp->device_list);
mutex_init(&tmp->hive_lock);
mutex_init(&tmp->reset_lock);
+ task_barrier_init(&tmp->tb);
if (lock)
mutex_lock(&tmp->hive_lock);
@@ -239,22 +275,49 @@ int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate)
{
int ret = 0;
struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0);
+ struct amdgpu_device *tmp_adev;
+ bool update_hive_pstate = true;
+ bool is_high_pstate = pstate && adev->asic_type == CHIP_VEGA20;
if (!hive)
return 0;
- if (hive->pstate == pstate)
- return 0;
+ mutex_lock(&hive->hive_lock);
+
+ if (hive->pstate == pstate) {
+ adev->pstate = is_high_pstate ? pstate : adev->pstate;
+ goto out;
+ }
dev_dbg(adev->dev, "Set xgmi pstate %d.\n", pstate);
- if (is_support_sw_smu(adev))
- ret = smu_set_xgmi_pstate(&adev->smu, pstate);
- if (ret)
+ ret = amdgpu_dpm_set_xgmi_pstate(adev, pstate);
+ if (ret) {
dev_err(adev->dev,
"XGMI: Set pstate failure on device %llx, hive %llx, ret %d",
adev->gmc.xgmi.node_id,
adev->gmc.xgmi.hive_id, ret);
+ goto out;
+ }
+
+ /* Update device pstate */
+ adev->pstate = pstate;
+
+ /*
+ * Update the hive pstate only all devices of the hive
+ * are in the same pstate
+ */
+ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
+ if (tmp_adev->pstate != adev->pstate) {
+ update_hive_pstate = false;
+ break;
+ }
+ }
+ if (update_hive_pstate || is_high_pstate)
+ hive->pstate = pstate;
+
+out:
+ mutex_unlock(&hive->hive_lock);
return ret;
}
@@ -296,23 +359,28 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
struct amdgpu_xgmi *entry;
struct amdgpu_device *tmp_adev = NULL;
- int count = 0, ret = -EINVAL;
+ int count = 0, ret = 0;
if (!adev->gmc.xgmi.supported)
return 0;
- ret = psp_xgmi_get_node_id(&adev->psp, &adev->gmc.xgmi.node_id);
- if (ret) {
- dev_err(adev->dev,
- "XGMI: Failed to get node id\n");
- return ret;
- }
+ if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) {
+ ret = psp_xgmi_get_hive_id(&adev->psp, &adev->gmc.xgmi.hive_id);
+ if (ret) {
+ dev_err(adev->dev,
+ "XGMI: Failed to get hive id\n");
+ return ret;
+ }
- ret = psp_xgmi_get_hive_id(&adev->psp, &adev->gmc.xgmi.hive_id);
- if (ret) {
- dev_err(adev->dev,
- "XGMI: Failed to get hive id\n");
- return ret;
+ ret = psp_xgmi_get_node_id(&adev->psp, &adev->gmc.xgmi.node_id);
+ if (ret) {
+ dev_err(adev->dev,
+ "XGMI: Failed to get node id\n");
+ return ret;
+ }
+ } else {
+ adev->gmc.xgmi.hive_id = 16;
+ adev->gmc.xgmi.node_id = adev->gmc.xgmi.physical_node_id + 16;
}
hive = amdgpu_get_xgmi_hive(adev, 1);
@@ -324,6 +392,9 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
goto exit;
}
+ /* Set default device pstate */
+ adev->pstate = -1;
+
top_info = &adev->psp.xgmi_context.top_info;
list_add_tail(&adev->gmc.xgmi.head, &hive->device_list);
@@ -332,29 +403,34 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
top_info->num_nodes = count;
hive->number_devices = count;
- list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
- /* update node list for other device in the hive */
- if (tmp_adev != adev) {
- top_info = &tmp_adev->psp.xgmi_context.top_info;
- top_info->nodes[count - 1].node_id = adev->gmc.xgmi.node_id;
- top_info->num_nodes = count;
+ task_barrier_add_task(&hive->tb);
+
+ if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) {
+ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
+ /* update node list for other device in the hive */
+ if (tmp_adev != adev) {
+ top_info = &tmp_adev->psp.xgmi_context.top_info;
+ top_info->nodes[count - 1].node_id =
+ adev->gmc.xgmi.node_id;
+ top_info->num_nodes = count;
+ }
+ ret = amdgpu_xgmi_update_topology(hive, tmp_adev);
+ if (ret)
+ goto exit;
}
- ret = amdgpu_xgmi_update_topology(hive, tmp_adev);
- if (ret)
- goto exit;
- }
- /* get latest topology info for each device from psp */
- list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
- ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count,
- &tmp_adev->psp.xgmi_context.top_info);
- if (ret) {
- dev_err(tmp_adev->dev,
- "XGMI: Get topology failure on device %llx, hive %llx, ret %d",
- tmp_adev->gmc.xgmi.node_id,
- tmp_adev->gmc.xgmi.hive_id, ret);
- /* To do : continue with some node failed or disable the whole hive */
- goto exit;
+ /* get latest topology info for each device from psp */
+ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
+ ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count,
+ &tmp_adev->psp.xgmi_context.top_info);
+ if (ret) {
+ dev_err(tmp_adev->dev,
+ "XGMI: Get topology failure on device %llx, hive %llx, ret %d",
+ tmp_adev->gmc.xgmi.node_id,
+ tmp_adev->gmc.xgmi.hive_id, ret);
+ /* To do : continue with some node failed or disable the whole hive */
+ goto exit;
+ }
}
}
@@ -391,7 +467,57 @@ void amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
mutex_destroy(&hive->hive_lock);
mutex_destroy(&hive->reset_lock);
} else {
+ task_barrier_rem_task(&hive->tb);
amdgpu_xgmi_sysfs_rem_dev_info(adev, hive);
mutex_unlock(&hive->hive_lock);
}
}
+
+int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev)
+{
+ int r;
+ struct ras_ih_if ih_info = {
+ .cb = NULL,
+ };
+ struct ras_fs_if fs_info = {
+ .sysfs_name = "xgmi_wafl_err_count",
+ .debugfs_name = "xgmi_wafl_err_inject",
+ };
+
+ if (!adev->gmc.xgmi.supported ||
+ adev->gmc.xgmi.num_physical_nodes == 0)
+ return 0;
+
+ if (!adev->gmc.xgmi.ras_if) {
+ adev->gmc.xgmi.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
+ if (!adev->gmc.xgmi.ras_if)
+ return -ENOMEM;
+ adev->gmc.xgmi.ras_if->block = AMDGPU_RAS_BLOCK__XGMI_WAFL;
+ adev->gmc.xgmi.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->gmc.xgmi.ras_if->sub_block_index = 0;
+ strcpy(adev->gmc.xgmi.ras_if->name, "xgmi_wafl");
+ }
+ ih_info.head = fs_info.head = *adev->gmc.xgmi.ras_if;
+ r = amdgpu_ras_late_init(adev, adev->gmc.xgmi.ras_if,
+ &fs_info, &ih_info);
+ if (r || !amdgpu_ras_is_supported(adev, adev->gmc.xgmi.ras_if->block)) {
+ kfree(adev->gmc.xgmi.ras_if);
+ adev->gmc.xgmi.ras_if = NULL;
+ }
+
+ return r;
+}
+
+void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev)
+{
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL) &&
+ adev->gmc.xgmi.ras_if) {
+ struct ras_common_if *ras_if = adev->gmc.xgmi.ras_if;
+ struct ras_ih_if ih_info = {
+ .cb = NULL,
+ };
+
+ amdgpu_ras_late_fini(adev, ras_if, &ih_info);
+ kfree(ras_if);
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
index fbcee31788c4..74011fbc2251 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
@@ -22,6 +22,7 @@
#ifndef __AMDGPU_XGMI_H__
#define __AMDGPU_XGMI_H__
+#include <drm/task_barrier.h>
#include "amdgpu_psp.h"
struct amdgpu_hive_info {
@@ -33,6 +34,7 @@ struct amdgpu_hive_info {
struct device_attribute dev_attr;
struct amdgpu_device *adev;
int pstate; /*0 -- low , 1 -- high , -1 unknown*/
+ struct task_barrier tb;
};
struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lock);
@@ -42,6 +44,8 @@ void amdgpu_xgmi_remove_device(struct amdgpu_device *adev);
int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate);
int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev,
struct amdgpu_device *peer_adev);
+int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev);
+void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev);
static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev,
struct amdgpu_device *bo_adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/arct_reg_init.c b/drivers/gpu/drm/amd/amdgpu/arct_reg_init.c
new file mode 100644
index 000000000000..fda99c958c3b
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/arct_reg_init.c
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "soc15.h"
+
+#include "soc15_common.h"
+#include "arct_ip_offset.h"
+
+int arct_reg_base_init(struct amdgpu_device *adev)
+{
+ /* HW has more IP blocks, only initialized the block needed by our driver */
+ uint32_t i;
+ for (i = 0 ; i < MAX_INSTANCE ; ++i) {
+ adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
+ adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i]));
+ adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i]));
+ adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i]));
+ adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIF0_BASE.instance[i]));
+ adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i]));
+ adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i]));
+ adev->reg_offset[UVD_HWIP][i] = (uint32_t *)(&(UVD_BASE.instance[i]));
+ adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i]));
+ adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i]));
+ adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(SDMA0_BASE.instance[i]));
+ adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(SDMA1_BASE.instance[i]));
+ adev->reg_offset[SDMA2_HWIP][i] = (uint32_t *)(&(SDMA2_BASE.instance[i]));
+ adev->reg_offset[SDMA3_HWIP][i] = (uint32_t *)(&(SDMA3_BASE.instance[i]));
+ adev->reg_offset[SDMA4_HWIP][i] = (uint32_t *)(&(SDMA4_BASE.instance[i]));
+ adev->reg_offset[SDMA5_HWIP][i] = (uint32_t *)(&(SDMA5_BASE.instance[i]));
+ adev->reg_offset[SDMA6_HWIP][i] = (uint32_t *)(&(SDMA6_BASE.instance[i]));
+ adev->reg_offset[SDMA7_HWIP][i] = (uint32_t *)(&(SDMA7_BASE.instance[i]));
+ adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
+ adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
+ adev->reg_offset[UMC_HWIP][i] = (uint32_t *)(&(UMC_BASE.instance[i]));
+ adev->reg_offset[RSMU_HWIP][i] = (uint32_t *)(&(RSMU_BASE.instance[i]));
+ }
+ return 0;
+}
+
+
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c
new file mode 100644
index 000000000000..847ca9b3ce4e
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c
@@ -0,0 +1,103 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "athub_v1_0.h"
+
+#include "athub/athub_1_0_offset.h"
+#include "athub/athub_1_0_sh_mask.h"
+#include "vega10_enum.h"
+
+#include "soc15_common.h"
+
+static void athub_update_medium_grain_clock_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t def, data;
+
+ def = data = RREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL);
+
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
+ data |= ATHUB_MISC_CNTL__CG_ENABLE_MASK;
+ else
+ data &= ~ATHUB_MISC_CNTL__CG_ENABLE_MASK;
+
+ if (def != data)
+ WREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL, data);
+}
+
+static void athub_update_medium_grain_light_sleep(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t def, data;
+
+ def = data = RREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL);
+
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) &&
+ (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
+ data |= ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
+ else
+ data &= ~ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
+
+ if(def != data)
+ WREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL, data);
+}
+
+int athub_v1_0_set_clockgating(struct amdgpu_device *adev,
+ enum amd_clockgating_state state)
+{
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ switch (adev->asic_type) {
+ case CHIP_VEGA10:
+ case CHIP_VEGA12:
+ case CHIP_VEGA20:
+ case CHIP_RAVEN:
+ athub_update_medium_grain_clock_gating(adev,
+ state == AMD_CG_STATE_GATE);
+ athub_update_medium_grain_light_sleep(adev,
+ state == AMD_CG_STATE_GATE);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+void athub_v1_0_get_clockgating(struct amdgpu_device *adev, u32 *flags)
+{
+ int data;
+
+ if (amdgpu_sriov_vf(adev))
+ *flags = 0;
+
+ /* AMD_CG_SUPPORT_ATHUB_MGCG */
+ data = RREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL);
+ if (data & ATHUB_MISC_CNTL__CG_ENABLE_MASK)
+ *flags |= AMD_CG_SUPPORT_ATHUB_MGCG;
+
+ /* AMD_CG_SUPPORT_ATHUB_LS */
+ if (data & ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK)
+ *flags |= AMD_CG_SUPPORT_ATHUB_LS;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v1_0.h b/drivers/gpu/drm/amd/amdgpu/athub_v1_0.h
new file mode 100644
index 000000000000..b279af59e34f
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v1_0.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __ATHUB_V1_0_H__
+#define __ATHUB_V1_0_H__
+
+int athub_v1_0_set_clockgating(struct amdgpu_device *adev,
+ enum amd_clockgating_state state);
+void athub_v1_0_get_clockgating(struct amdgpu_device *adev, u32 *flags);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c
index 89b32b6b81c8..921a69abda55 100644
--- a/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c
@@ -74,10 +74,12 @@ int athub_v2_0_set_clockgating(struct amdgpu_device *adev,
switch (adev->asic_type) {
case CHIP_NAVI10:
+ case CHIP_NAVI14:
+ case CHIP_NAVI12:
athub_v2_0_update_medium_grain_clock_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
athub_v2_0_update_medium_grain_light_sleep(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
index 6858cde9fc5d..ea702a64f807 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
@@ -361,7 +361,6 @@ int amdgpu_atombios_dp_get_panel_mode(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- struct amdgpu_connector_atom_dig *dig_connector;
int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
u16 dp_bridge = amdgpu_connector_encoder_get_dp_bridge_encoder_id(connector);
u8 tmp;
@@ -369,8 +368,6 @@ int amdgpu_atombios_dp_get_panel_mode(struct drm_encoder *encoder,
if (!amdgpu_connector->con_priv)
return panel_mode;
- dig_connector = amdgpu_connector->con_priv;
-
if (dp_bridge != ENCODER_OBJECT_ID_NONE) {
/* DP bridge chips */
if (drm_dp_dpcd_readb(&amdgpu_connector->ddc_bus->aux,
@@ -713,7 +710,6 @@ void amdgpu_atombios_dp_link_train(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig;
struct amdgpu_connector *amdgpu_connector;
struct amdgpu_connector_atom_dig *dig_connector;
struct amdgpu_atombios_dp_link_train_info dp_info;
@@ -721,7 +717,6 @@ void amdgpu_atombios_dp_link_train(struct drm_encoder *encoder,
if (!amdgpu_encoder->enc_priv)
return;
- dig = amdgpu_encoder->enc_priv;
amdgpu_connector = to_amdgpu_connector(connector);
if (!amdgpu_connector->con_priv)
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c b/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c
index 980c363b1a0a..b4cc7c55fa16 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c
@@ -76,11 +76,6 @@ static int amdgpu_atombios_i2c_process_i2c_ch(struct amdgpu_i2c_chan *chan,
}
args.lpI2CDataOut = cpu_to_le16(out);
} else {
- if (num > ATOM_MAX_HW_I2C_READ) {
- DRM_ERROR("hw i2c: tried to read too many bytes (%d vs 255)\n", num);
- r = -EINVAL;
- goto done;
- }
args.ucRegIndex = 0;
args.lpI2CDataOut = 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 1ffbc0d3d7a1..006f21ef7ddf 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -966,6 +966,25 @@ static bool cik_read_bios_from_rom(struct amdgpu_device *adev,
static const struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = {
{mmGRBM_STATUS},
+ {mmGRBM_STATUS2},
+ {mmGRBM_STATUS_SE0},
+ {mmGRBM_STATUS_SE1},
+ {mmGRBM_STATUS_SE2},
+ {mmGRBM_STATUS_SE3},
+ {mmSRBM_STATUS},
+ {mmSRBM_STATUS2},
+ {mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET},
+ {mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET},
+ {mmCP_STAT},
+ {mmCP_STALLED_STAT1},
+ {mmCP_STALLED_STAT2},
+ {mmCP_STALLED_STAT3},
+ {mmCP_CPF_BUSY_STAT},
+ {mmCP_CPF_STALLED_STAT1},
+ {mmCP_CPF_STATUS},
+ {mmCP_CPC_BUSY_STAT},
+ {mmCP_CPC_STALLED_STAT1},
+ {mmCP_CPC_STATUS},
{mmGB_ADDR_CONFIG},
{mmMC_ARB_RAMCFG},
{mmGB_TILE_MODE0},
@@ -1270,15 +1289,15 @@ static int cik_gpu_pci_config_reset(struct amdgpu_device *adev)
}
/**
- * cik_asic_reset - soft reset GPU
+ * cik_asic_pci_config_reset - soft reset GPU
*
* @adev: amdgpu_device pointer
*
- * Look up which blocks are hung and attempt
- * to reset them.
+ * Use PCI Config method to reset the GPU.
+ *
* Returns 0 for success.
*/
-static int cik_asic_reset(struct amdgpu_device *adev)
+static int cik_asic_pci_config_reset(struct amdgpu_device *adev)
{
int r;
@@ -1291,6 +1310,64 @@ static int cik_asic_reset(struct amdgpu_device *adev)
return r;
}
+static bool cik_asic_supports_baco(struct amdgpu_device *adev)
+{
+ switch (adev->asic_type) {
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ return amdgpu_dpm_is_baco_supported(adev);
+ default:
+ return false;
+ }
+}
+
+static enum amd_reset_method
+cik_asic_reset_method(struct amdgpu_device *adev)
+{
+ bool baco_reset;
+
+ switch (adev->asic_type) {
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ /* disable baco reset until it works */
+ /* smu7_asic_get_baco_capability(adev, &baco_reset); */
+ baco_reset = false;
+ break;
+ default:
+ baco_reset = false;
+ break;
+ }
+
+ if (baco_reset)
+ return AMD_RESET_METHOD_BACO;
+ else
+ return AMD_RESET_METHOD_LEGACY;
+}
+
+/**
+ * cik_asic_reset - soft reset GPU
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Look up which blocks are hung and attempt
+ * to reset them.
+ * Returns 0 for success.
+ */
+static int cik_asic_reset(struct amdgpu_device *adev)
+{
+ int r;
+
+ if (cik_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
+ if (!adev->in_suspend)
+ amdgpu_inc_vram_lost(adev);
+ r = amdgpu_dpm_baco_reset(adev);
+ } else {
+ r = cik_asic_pci_config_reset(adev);
+ }
+
+ return r;
+}
+
static u32 cik_get_config_memsize(struct amdgpu_device *adev)
{
return RREG32(mmCONFIG_MEMSIZE);
@@ -1378,7 +1455,6 @@ static int cik_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
{
struct pci_dev *root = adev->pdev->bus->self;
- int bridge_pos, gpu_pos;
u32 speed_cntl, current_data_rate;
int i;
u16 tmp16;
@@ -1413,12 +1489,7 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
DRM_INFO("enabling PCIE gen 2 link speeds, disable with amdgpu.pcie_gen2=0\n");
}
- bridge_pos = pci_pcie_cap(root);
- if (!bridge_pos)
- return;
-
- gpu_pos = pci_pcie_cap(adev->pdev);
- if (!gpu_pos)
+ if (!pci_is_pcie(root) || !pci_is_pcie(adev->pdev))
return;
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
@@ -1428,14 +1499,17 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
u16 bridge_cfg2, gpu_cfg2;
u32 max_lw, current_lw, tmp;
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+ &bridge_cfg);
+ pcie_capability_read_word(adev->pdev, PCI_EXP_LNKCTL,
+ &gpu_cfg);
tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_write_word(root, PCI_EXP_LNKCTL, tmp16);
tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
- pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_write_word(adev->pdev, PCI_EXP_LNKCTL,
+ tmp16);
tmp = RREG32_PCIE(ixPCIE_LC_STATUS1);
max_lw = (tmp & PCIE_LC_STATUS1__LC_DETECTED_LINK_WIDTH_MASK) >>
@@ -1459,15 +1533,23 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
for (i = 0; i < 10; i++) {
/* check status */
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16);
+ pcie_capability_read_word(adev->pdev,
+ PCI_EXP_DEVSTA,
+ &tmp16);
if (tmp16 & PCI_EXP_DEVSTA_TRPND)
break;
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+ &bridge_cfg);
+ pcie_capability_read_word(adev->pdev,
+ PCI_EXP_LNKCTL,
+ &gpu_cfg);
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2);
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
+ &bridge_cfg2);
+ pcie_capability_read_word(adev->pdev,
+ PCI_EXP_LNKCTL2,
+ &gpu_cfg2);
tmp = RREG32_PCIE(ixPCIE_LC_CNTL4);
tmp |= PCIE_LC_CNTL4__LC_SET_QUIESCE_MASK;
@@ -1480,26 +1562,45 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
msleep(100);
/* linkctl */
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+ &tmp16);
tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_write_word(root, PCI_EXP_LNKCTL,
+ tmp16);
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16);
+ pcie_capability_read_word(adev->pdev,
+ PCI_EXP_LNKCTL,
+ &tmp16);
tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
- pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_write_word(adev->pdev,
+ PCI_EXP_LNKCTL,
+ tmp16);
/* linkctl2 */
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~((1 << 4) | (7 << 9));
- tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16);
-
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~((1 << 4) | (7 << 9));
- tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
- pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
+ &tmp16);
+ tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN);
+ tmp16 |= (bridge_cfg2 &
+ (PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN));
+ pcie_capability_write_word(root,
+ PCI_EXP_LNKCTL2,
+ tmp16);
+
+ pcie_capability_read_word(adev->pdev,
+ PCI_EXP_LNKCTL2,
+ &tmp16);
+ tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN);
+ tmp16 |= (gpu_cfg2 &
+ (PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN));
+ pcie_capability_write_word(adev->pdev,
+ PCI_EXP_LNKCTL2,
+ tmp16);
tmp = RREG32_PCIE(ixPCIE_LC_CNTL4);
tmp &= ~PCIE_LC_CNTL4__LC_SET_QUIESCE_MASK;
@@ -1514,15 +1615,16 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
speed_cntl &= ~PCIE_LC_SPEED_CNTL__LC_FORCE_DIS_SW_SPEED_CHANGE_MASK;
WREG32_PCIE(ixPCIE_LC_SPEED_CNTL, speed_cntl);
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~0xf;
+ pcie_capability_read_word(adev->pdev, PCI_EXP_LNKCTL2, &tmp16);
+ tmp16 &= ~PCI_EXP_LNKCTL2_TLS;
+
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
- tmp16 |= 3; /* gen3 */
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_8_0GT; /* gen3 */
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
- tmp16 |= 2; /* gen2 */
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_5_0GT; /* gen2 */
else
- tmp16 |= 1; /* gen1 */
- pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_2_5GT; /* gen1 */
+ pcie_capability_write_word(adev->pdev, PCI_EXP_LNKCTL2, tmp16);
speed_cntl = RREG32_PCIE(ixPCIE_LC_SPEED_CNTL);
speed_cntl |= PCIE_LC_SPEED_CNTL__LC_INITIATE_LINK_SPEED_CHANGE_MASK;
@@ -1823,6 +1925,7 @@ static const struct amdgpu_asic_funcs cik_asic_funcs =
.read_bios_from_rom = &cik_read_bios_from_rom,
.read_register = &cik_read_register,
.reset = &cik_asic_reset,
+ .reset_method = &cik_asic_reset_method,
.set_vga_state = &cik_vga_set_state,
.get_xclk = &cik_get_xclk,
.set_uvd_clocks = &cik_set_uvd_clocks,
@@ -1835,6 +1938,7 @@ static const struct amdgpu_asic_funcs cik_asic_funcs =
.get_pcie_usage = &cik_get_pcie_usage,
.need_reset_on_init = &cik_need_reset_on_init,
.get_pcie_replay_count = &cik_get_pcie_replay_count,
+ .supports_baco = &cik_asic_supports_baco,
};
static int cik_common_early_init(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.h b/drivers/gpu/drm/amd/amdgpu/cik.h
index 54c625a2e570..f91ab4c246b7 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.h
+++ b/drivers/gpu/drm/amd/amdgpu/cik.h
@@ -31,4 +31,5 @@ void cik_srbm_select(struct amdgpu_device *adev,
int cik_set_ip_blocks(struct amdgpu_device *adev);
void legacy_doorbell_index_init(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index c45304f1047c..580d3f93d670 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -228,7 +228,7 @@ static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring,
u32 extra_bits = vmid & 0xf;
/* IB packet must end on a 8 DW boundary */
- cik_sdma_ring_insert_nop(ring, (12 - (lower_32_bits(ring->wptr) & 7)) % 8);
+ cik_sdma_ring_insert_nop(ring, (4 - lower_32_bits(ring->wptr)) & 7);
amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits));
amdgpu_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */
@@ -811,7 +811,7 @@ static void cik_sdma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
u32 pad_count;
int i;
- pad_count = (8 - (ib->length_dw & 0x7)) % 8;
+ pad_count = (-ib->length_dw) & 7;
for (i = 0; i < pad_count; i++)
if (sdma && sdma->burst_nop && (i == 0))
ib->ptr[ib->length_dw++] =
@@ -1372,16 +1372,14 @@ static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = {
static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev)
{
- struct drm_gpu_scheduler *sched;
unsigned i;
adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs;
for (i = 0; i < adev->sdma.num_instances; i++) {
- sched = &adev->sdma.instance[i].ring.sched;
- adev->vm_manager.vm_pte_rqs[i] =
- &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+ adev->vm_manager.vm_pte_scheds[i] =
+ &adev->sdma.instance[i].ring.sched;
}
- adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
+ adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
}
const struct amdgpu_ip_block_version cik_sdma_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 1ffd1963e765..40d2ac723dd6 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -236,6 +236,7 @@ static void dce_v10_0_page_flip(struct amdgpu_device *adev,
int crtc_id, u64 crtc_base, bool async)
{
struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
+ struct drm_framebuffer *fb = amdgpu_crtc->base.primary->fb;
u32 tmp;
/* flip at hsync for async, default is vsync */
@@ -243,6 +244,9 @@ static void dce_v10_0_page_flip(struct amdgpu_device *adev,
tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
GRPH_SURFACE_UPDATE_H_RETRACE_EN, async ? 1 : 0);
WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+ /* update pitch */
+ WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset,
+ fb->pitches[0] / fb->format->cpp[0]);
/* update the primary scanout address */
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
upper_32_bits(crtc_base));
@@ -326,9 +330,11 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
u32 tmp;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
@@ -364,6 +370,7 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
amdgpu_irq_get(adev, &adev->hpd_irq,
amdgpu_connector->hpd.hpd);
}
+ drm_connector_list_iter_end(&iter);
}
/**
@@ -378,9 +385,11 @@ static void dce_v10_0_hpd_fini(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
u32 tmp;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
@@ -393,6 +402,7 @@ static void dce_v10_0_hpd_fini(struct amdgpu_device *adev)
amdgpu_irq_put(adev, &adev->hpd_irq,
amdgpu_connector->hpd.hpd);
}
+ drm_connector_list_iter_end(&iter);
}
static u32 dce_v10_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
@@ -1215,10 +1225,12 @@ static void dce_v10_0_afmt_audio_select_pin(struct drm_encoder *encoder)
static void dce_v10_0_audio_write_latency_fields(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
u32 tmp;
int interlace = 0;
@@ -1226,12 +1238,14 @@ static void dce_v10_0_audio_write_latency_fields(struct drm_encoder *encoder,
if (!dig || !dig->afmt || !dig->afmt->pin)
return;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
@@ -1257,10 +1271,12 @@ static void dce_v10_0_audio_write_latency_fields(struct drm_encoder *encoder,
static void dce_v10_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
u32 tmp;
u8 *sadb = NULL;
@@ -1269,12 +1285,14 @@ static void dce_v10_0_audio_write_speaker_allocation(struct drm_encoder *encoder
if (!dig || !dig->afmt || !dig->afmt->pin)
return;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
@@ -1309,10 +1327,12 @@ static void dce_v10_0_audio_write_speaker_allocation(struct drm_encoder *encoder
static void dce_v10_0_audio_write_sad_regs(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
struct cea_sad *sads;
int i, sad_count;
@@ -1335,12 +1355,14 @@ static void dce_v10_0_audio_write_sad_regs(struct drm_encoder *encoder)
if (!dig || !dig->afmt || !dig->afmt->pin)
return;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
@@ -1348,10 +1370,10 @@ static void dce_v10_0_audio_write_sad_regs(struct drm_encoder *encoder)
}
sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
- if (sad_count <= 0) {
+ if (sad_count < 0)
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
+ if (sad_count <= 0)
return;
- }
BUG_ON(!sads);
for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 9e0782b54066..898ef72d423c 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -254,6 +254,7 @@ static void dce_v11_0_page_flip(struct amdgpu_device *adev,
int crtc_id, u64 crtc_base, bool async)
{
struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
+ struct drm_framebuffer *fb = amdgpu_crtc->base.primary->fb;
u32 tmp;
/* flip immediate for async, default is vsync */
@@ -261,6 +262,9 @@ static void dce_v11_0_page_flip(struct amdgpu_device *adev,
tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
GRPH_SURFACE_UPDATE_IMMEDIATE_EN, async ? 1 : 0);
WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+ /* update pitch */
+ WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset,
+ fb->pitches[0] / fb->format->cpp[0]);
/* update the scanout addresses */
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
upper_32_bits(crtc_base));
@@ -344,9 +348,11 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
u32 tmp;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
@@ -381,6 +387,7 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
dce_v11_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
}
+ drm_connector_list_iter_end(&iter);
}
/**
@@ -395,9 +402,11 @@ static void dce_v11_0_hpd_fini(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
u32 tmp;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
@@ -409,6 +418,7 @@ static void dce_v11_0_hpd_fini(struct amdgpu_device *adev)
amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
}
+ drm_connector_list_iter_end(&iter);
}
static u32 dce_v11_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
@@ -1241,10 +1251,12 @@ static void dce_v11_0_afmt_audio_select_pin(struct drm_encoder *encoder)
static void dce_v11_0_audio_write_latency_fields(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
u32 tmp;
int interlace = 0;
@@ -1252,12 +1264,14 @@ static void dce_v11_0_audio_write_latency_fields(struct drm_encoder *encoder,
if (!dig || !dig->afmt || !dig->afmt->pin)
return;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
@@ -1283,10 +1297,12 @@ static void dce_v11_0_audio_write_latency_fields(struct drm_encoder *encoder,
static void dce_v11_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
u32 tmp;
u8 *sadb = NULL;
@@ -1295,12 +1311,14 @@ static void dce_v11_0_audio_write_speaker_allocation(struct drm_encoder *encoder
if (!dig || !dig->afmt || !dig->afmt->pin)
return;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
@@ -1335,10 +1353,12 @@ static void dce_v11_0_audio_write_speaker_allocation(struct drm_encoder *encoder
static void dce_v11_0_audio_write_sad_regs(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
struct cea_sad *sads;
int i, sad_count;
@@ -1361,12 +1381,14 @@ static void dce_v11_0_audio_write_sad_regs(struct drm_encoder *encoder)
if (!dig || !dig->afmt || !dig->afmt->pin)
return;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
@@ -1374,10 +1396,10 @@ static void dce_v11_0_audio_write_sad_regs(struct drm_encoder *encoder)
}
sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
- if (sad_count <= 0) {
+ if (sad_count < 0)
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
+ if (sad_count <= 0)
return;
- }
BUG_ON(!sads);
for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 4bf453e07dca..db15a112becc 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -191,10 +191,14 @@ static void dce_v6_0_page_flip(struct amdgpu_device *adev,
int crtc_id, u64 crtc_base, bool async)
{
struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
+ struct drm_framebuffer *fb = amdgpu_crtc->base.primary->fb;
/* flip at hsync for async, default is vsync */
WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ?
GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK : 0);
+ /* update pitch */
+ WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset,
+ fb->pitches[0] / fb->format->cpp[0]);
/* update the scanout addresses */
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
upper_32_bits(crtc_base));
@@ -277,9 +281,11 @@ static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
u32 tmp;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
@@ -305,7 +311,7 @@ static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
dce_v6_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
}
-
+ drm_connector_list_iter_end(&iter);
}
/**
@@ -320,9 +326,11 @@ static void dce_v6_0_hpd_fini(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
u32 tmp;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
@@ -334,6 +342,7 @@ static void dce_v6_0_hpd_fini(struct amdgpu_device *adev)
amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
}
+ drm_connector_list_iter_end(&iter);
}
static u32 dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
@@ -1120,20 +1129,24 @@ static void dce_v6_0_audio_select_pin(struct drm_encoder *encoder)
static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
int interlace = 0;
u32 tmp;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
@@ -1160,21 +1173,25 @@ static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder,
static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
u8 *sadb = NULL;
int sad_count;
u32 tmp;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
@@ -1217,10 +1234,12 @@ static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
struct cea_sad *sads;
int i, sad_count;
@@ -1240,12 +1259,14 @@ static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
};
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
@@ -1253,10 +1274,10 @@ static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
}
sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
- if (sad_count <= 0) {
+ if (sad_count < 0)
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
+ if (sad_count <= 0)
return;
- }
for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
u32 tmp = 0;
@@ -1628,6 +1649,7 @@ static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder,
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
int bpc = 8;
@@ -1635,12 +1657,14 @@ static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder,
if (!dig || !dig->afmt)
return;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index b23418ca8f6a..f06c9022c1fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -184,10 +184,14 @@ static void dce_v8_0_page_flip(struct amdgpu_device *adev,
int crtc_id, u64 crtc_base, bool async)
{
struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
+ struct drm_framebuffer *fb = amdgpu_crtc->base.primary->fb;
/* flip at hsync for async, default is vsync */
WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ?
GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK : 0);
+ /* update pitch */
+ WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset,
+ fb->pitches[0] / fb->format->cpp[0]);
/* update the primary scanout addresses */
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
upper_32_bits(crtc_base));
@@ -271,9 +275,11 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
u32 tmp;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
@@ -299,6 +305,7 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
dce_v8_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
}
+ drm_connector_list_iter_end(&iter);
}
/**
@@ -313,9 +320,11 @@ static void dce_v8_0_hpd_fini(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
u32 tmp;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
@@ -327,6 +336,7 @@ static void dce_v8_0_hpd_fini(struct amdgpu_device *adev)
amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
}
+ drm_connector_list_iter_end(&iter);
}
static u32 dce_v8_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
@@ -1153,10 +1163,12 @@ static void dce_v8_0_afmt_audio_select_pin(struct drm_encoder *encoder)
static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
u32 tmp = 0, offset;
@@ -1165,12 +1177,14 @@ static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder,
offset = dig->afmt->pin->offset;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
@@ -1210,10 +1224,12 @@ static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder,
static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
u32 offset, tmp;
u8 *sadb = NULL;
@@ -1224,12 +1240,14 @@ static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
offset = dig->afmt->pin->offset;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
@@ -1259,11 +1277,13 @@ static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
u32 offset;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
struct cea_sad *sads;
int i, sad_count;
@@ -1288,12 +1308,14 @@ static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder)
offset = dig->afmt->pin->offset;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
@@ -1301,10 +1323,10 @@ static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder)
}
sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
- if (sad_count <= 0) {
+ if (sad_count < 0)
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
+ if (sad_count <= 0)
return;
- }
BUG_ON(!sads);
for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index 3cc0a16649f9..e4f94863332c 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -260,15 +260,14 @@ static struct drm_encoder *
dce_virtual_encoder(struct drm_connector *connector)
{
struct drm_encoder *encoder;
- int i;
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL)
return encoder;
}
/* pick the first one */
- drm_connector_for_each_possible_encoder(connector, encoder, i)
+ drm_connector_for_each_possible_encoder(connector, encoder)
return encoder;
return NULL;
@@ -454,13 +453,8 @@ static int dce_virtual_hw_init(void *handle)
#endif
/* no DCE */
break;
- case CHIP_VEGA10:
- case CHIP_VEGA12:
- case CHIP_VEGA20:
- case CHIP_NAVI10:
- break;
default:
- DRM_ERROR("Virtual display unsupported ASIC type: 0x%X\n", adev->asic_type);
+ break;
}
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/df_v1_7.c b/drivers/gpu/drm/amd/amdgpu/df_v1_7.c
index 844c03868248..d6aca1c08068 100644
--- a/drivers/gpu/drm/amd/amdgpu/df_v1_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/df_v1_7.c
@@ -31,6 +31,13 @@ static u32 df_v1_7_channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
static void df_v1_7_sw_init(struct amdgpu_device *adev)
{
+ adev->df.hash_status.hash_64k = false;
+ adev->df.hash_status.hash_2m = false;
+ adev->df.hash_status.hash_1g = false;
+}
+
+static void df_v1_7_sw_fini(struct amdgpu_device *adev)
+{
}
static void df_v1_7_enable_broadcast_mode(struct amdgpu_device *adev,
@@ -62,7 +69,7 @@ static u32 df_v1_7_get_hbm_channel_number(struct amdgpu_device *adev)
{
int fb_channel_number;
- fb_channel_number = adev->df_funcs->get_fb_channel_number(adev);
+ fb_channel_number = adev->df.funcs->get_fb_channel_number(adev);
return df_v1_7_channel_number[fb_channel_number];
}
@@ -73,7 +80,7 @@ static void df_v1_7_update_medium_grain_clock_gating(struct amdgpu_device *adev,
u32 tmp;
/* Put DF on broadcast mode */
- adev->df_funcs->enable_broadcast_mode(adev, true);
+ adev->df.funcs->enable_broadcast_mode(adev, true);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG)) {
tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater);
@@ -88,7 +95,7 @@ static void df_v1_7_update_medium_grain_clock_gating(struct amdgpu_device *adev,
}
/* Exit boradcast mode */
- adev->df_funcs->enable_broadcast_mode(adev, false);
+ adev->df.funcs->enable_broadcast_mode(adev, false);
}
static void df_v1_7_get_clockgating_state(struct amdgpu_device *adev,
@@ -111,6 +118,7 @@ static void df_v1_7_enable_ecc_force_par_wr_rmw(struct amdgpu_device *adev,
const struct amdgpu_df_funcs df_v1_7_funcs = {
.sw_init = df_v1_7_sw_init,
+ .sw_fini = df_v1_7_sw_fini,
.enable_broadcast_mode = df_v1_7_enable_broadcast_mode,
.get_fb_channel_number = df_v1_7_get_fb_channel_number,
.get_hbm_channel_number = df_v1_7_get_hbm_channel_number,
diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
index ef6e91f9f51c..5a1bd8ed1a6c 100644
--- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
+++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
@@ -27,6 +27,9 @@
#include "df/df_3_6_offset.h"
#include "df/df_3_6_sh_mask.h"
+#define DF_3_6_SMN_REG_INST_DIST 0x8
+#define DF_3_6_INST_CNT 8
+
static u32 df_v3_6_channel_number[] = {1, 2, 0, 4, 0, 8, 0,
16, 32, 0, 0, 0, 2, 4, 8};
@@ -93,6 +96,151 @@ const struct attribute_group *df_v3_6_attr_groups[] = {
NULL
};
+static uint64_t df_v3_6_get_fica(struct amdgpu_device *adev,
+ uint32_t ficaa_val)
+{
+ unsigned long flags, address, data;
+ uint32_t ficadl_val, ficadh_val;
+
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
+
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessAddress3);
+ WREG32(data, ficaa_val);
+
+ WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessDataLo3);
+ ficadl_val = RREG32(data);
+
+ WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessDataHi3);
+ ficadh_val = RREG32(data);
+
+ spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+
+ return (((ficadh_val & 0xFFFFFFFFFFFFFFFF) << 32) | ficadl_val);
+}
+
+static void df_v3_6_set_fica(struct amdgpu_device *adev, uint32_t ficaa_val,
+ uint32_t ficadl_val, uint32_t ficadh_val)
+{
+ unsigned long flags, address, data;
+
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
+
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessAddress3);
+ WREG32(data, ficaa_val);
+
+ WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessDataLo3);
+ WREG32(data, ficadl_val);
+
+ WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessDataHi3);
+ WREG32(data, ficadh_val);
+
+ spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+}
+
+/*
+ * df_v3_6_perfmon_rreg - read perfmon lo and hi
+ *
+ * required to be atomic. no mmio method provided so subsequent reads for lo
+ * and hi require to preserve df finite state machine
+ */
+static void df_v3_6_perfmon_rreg(struct amdgpu_device *adev,
+ uint32_t lo_addr, uint32_t *lo_val,
+ uint32_t hi_addr, uint32_t *hi_val)
+{
+ unsigned long flags, address, data;
+
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
+
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ WREG32(address, lo_addr);
+ *lo_val = RREG32(data);
+ WREG32(address, hi_addr);
+ *hi_val = RREG32(data);
+ spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+}
+
+/*
+ * df_v3_6_perfmon_wreg - write to perfmon lo and hi
+ *
+ * required to be atomic. no mmio method provided so subsequent reads after
+ * data writes cannot occur to preserve data fabrics finite state machine.
+ */
+static void df_v3_6_perfmon_wreg(struct amdgpu_device *adev, uint32_t lo_addr,
+ uint32_t lo_val, uint32_t hi_addr, uint32_t hi_val)
+{
+ unsigned long flags, address, data;
+
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
+
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ WREG32(address, lo_addr);
+ WREG32(data, lo_val);
+ WREG32(address, hi_addr);
+ WREG32(data, hi_val);
+ spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+}
+
+/* same as perfmon_wreg but return status on write value check */
+static int df_v3_6_perfmon_arm_with_status(struct amdgpu_device *adev,
+ uint32_t lo_addr, uint32_t lo_val,
+ uint32_t hi_addr, uint32_t hi_val)
+{
+ unsigned long flags, address, data;
+ uint32_t lo_val_rb, hi_val_rb;
+
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
+
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ WREG32(address, lo_addr);
+ WREG32(data, lo_val);
+ WREG32(address, hi_addr);
+ WREG32(data, hi_val);
+
+ WREG32(address, lo_addr);
+ lo_val_rb = RREG32(data);
+ WREG32(address, hi_addr);
+ hi_val_rb = RREG32(data);
+ spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+
+ if (!(lo_val == lo_val_rb && hi_val == hi_val_rb))
+ return -EBUSY;
+
+ return 0;
+}
+
+
+/*
+ * retry arming counters every 100 usecs within 1 millisecond interval.
+ * if retry fails after time out, return error.
+ */
+#define ARM_RETRY_USEC_TIMEOUT 1000
+#define ARM_RETRY_USEC_INTERVAL 100
+static int df_v3_6_perfmon_arm_with_retry(struct amdgpu_device *adev,
+ uint32_t lo_addr, uint32_t lo_val,
+ uint32_t hi_addr, uint32_t hi_val)
+{
+ int countdown = ARM_RETRY_USEC_TIMEOUT;
+
+ while (countdown) {
+
+ if (!df_v3_6_perfmon_arm_with_status(adev, lo_addr, lo_val,
+ hi_addr, hi_val))
+ break;
+
+ countdown -= ARM_RETRY_USEC_INTERVAL;
+ udelay(ARM_RETRY_USEC_INTERVAL);
+ }
+
+ return countdown > 0 ? 0 : -ETIME;
+}
+
/* get the number of df counters available */
static ssize_t df_v3_6_get_df_cntr_avail(struct device *dev,
struct device_attribute *attr,
@@ -117,6 +265,32 @@ static ssize_t df_v3_6_get_df_cntr_avail(struct device *dev,
/* device attr for available perfmon counters */
static DEVICE_ATTR(df_cntr_avail, S_IRUGO, df_v3_6_get_df_cntr_avail, NULL);
+static void df_v3_6_query_hashes(struct amdgpu_device *adev)
+{
+ u32 tmp;
+
+ adev->df.hash_status.hash_64k = false;
+ adev->df.hash_status.hash_2m = false;
+ adev->df.hash_status.hash_1g = false;
+
+ if (adev->asic_type != CHIP_ARCTURUS)
+ return;
+
+ /* encoding for hash-enabled on Arcturus */
+ if (adev->df.funcs->get_fb_channel_number(adev) == 0xe) {
+ tmp = RREG32_SOC15(DF, 0, mmDF_CS_UMC_AON0_DfGlobalCtrl);
+ adev->df.hash_status.hash_64k = REG_GET_FIELD(tmp,
+ DF_CS_UMC_AON0_DfGlobalCtrl,
+ GlbHashIntlvCtl64K);
+ adev->df.hash_status.hash_2m = REG_GET_FIELD(tmp,
+ DF_CS_UMC_AON0_DfGlobalCtrl,
+ GlbHashIntlvCtl2M);
+ adev->df.hash_status.hash_1g = REG_GET_FIELD(tmp,
+ DF_CS_UMC_AON0_DfGlobalCtrl,
+ GlbHashIntlvCtl1G);
+ }
+}
+
/* init perfmons */
static void df_v3_6_sw_init(struct amdgpu_device *adev)
{
@@ -128,6 +302,15 @@ static void df_v3_6_sw_init(struct amdgpu_device *adev)
for (i = 0; i < AMDGPU_MAX_DF_PERFMONS; i++)
adev->df_perfmon_config_assign_mask[i] = 0;
+
+ df_v3_6_query_hashes(adev);
+}
+
+static void df_v3_6_sw_fini(struct amdgpu_device *adev)
+{
+
+ device_remove_file(adev->dev, &dev_attr_df_cntr_avail);
+
}
static void df_v3_6_enable_broadcast_mode(struct amdgpu_device *adev,
@@ -159,7 +342,7 @@ static u32 df_v3_6_get_hbm_channel_number(struct amdgpu_device *adev)
{
int fb_channel_number;
- fb_channel_number = adev->df_funcs->get_fb_channel_number(adev);
+ fb_channel_number = adev->df.funcs->get_fb_channel_number(adev);
if (fb_channel_number >= ARRAY_SIZE(df_v3_6_channel_number))
fb_channel_number = 0;
@@ -171,23 +354,29 @@ static void df_v3_6_update_medium_grain_clock_gating(struct amdgpu_device *adev,
{
u32 tmp;
- /* Put DF on broadcast mode */
- adev->df_funcs->enable_broadcast_mode(adev, true);
-
- if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG)) {
- tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater);
- tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
- tmp |= DF_V3_6_MGCG_ENABLE_15_CYCLE_DELAY;
- WREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater, tmp);
- } else {
- tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater);
- tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
- tmp |= DF_V3_6_MGCG_DISABLE;
- WREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater, tmp);
- }
+ if (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG) {
+ /* Put DF on broadcast mode */
+ adev->df.funcs->enable_broadcast_mode(adev, true);
+
+ if (enable) {
+ tmp = RREG32_SOC15(DF, 0,
+ mmDF_PIE_AON0_DfGlobalClkGater);
+ tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
+ tmp |= DF_V3_6_MGCG_ENABLE_15_CYCLE_DELAY;
+ WREG32_SOC15(DF, 0,
+ mmDF_PIE_AON0_DfGlobalClkGater, tmp);
+ } else {
+ tmp = RREG32_SOC15(DF, 0,
+ mmDF_PIE_AON0_DfGlobalClkGater);
+ tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
+ tmp |= DF_V3_6_MGCG_DISABLE;
+ WREG32_SOC15(DF, 0,
+ mmDF_PIE_AON0_DfGlobalClkGater, tmp);
+ }
- /* Exit broadcast mode */
- adev->df_funcs->enable_broadcast_mode(adev, false);
+ /* Exit broadcast mode */
+ adev->df.funcs->enable_broadcast_mode(adev, false);
+ }
}
static void df_v3_6_get_clockgating_state(struct amdgpu_device *adev,
@@ -231,20 +420,20 @@ static void df_v3_6_pmc_get_addr(struct amdgpu_device *adev,
switch (target_cntr) {
case 0:
- *lo_base_addr = is_ctrl ? smnPerfMonCtlLo0 : smnPerfMonCtrLo0;
- *hi_base_addr = is_ctrl ? smnPerfMonCtlHi0 : smnPerfMonCtrHi0;
+ *lo_base_addr = is_ctrl ? smnPerfMonCtlLo4 : smnPerfMonCtrLo4;
+ *hi_base_addr = is_ctrl ? smnPerfMonCtlHi4 : smnPerfMonCtrHi4;
break;
case 1:
- *lo_base_addr = is_ctrl ? smnPerfMonCtlLo1 : smnPerfMonCtrLo1;
- *hi_base_addr = is_ctrl ? smnPerfMonCtlHi1 : smnPerfMonCtrHi1;
+ *lo_base_addr = is_ctrl ? smnPerfMonCtlLo5 : smnPerfMonCtrLo5;
+ *hi_base_addr = is_ctrl ? smnPerfMonCtlHi5 : smnPerfMonCtrHi5;
break;
case 2:
- *lo_base_addr = is_ctrl ? smnPerfMonCtlLo2 : smnPerfMonCtrLo2;
- *hi_base_addr = is_ctrl ? smnPerfMonCtlHi2 : smnPerfMonCtrHi2;
+ *lo_base_addr = is_ctrl ? smnPerfMonCtlLo6 : smnPerfMonCtrLo6;
+ *hi_base_addr = is_ctrl ? smnPerfMonCtlHi6 : smnPerfMonCtrHi6;
break;
case 3:
- *lo_base_addr = is_ctrl ? smnPerfMonCtlLo3 : smnPerfMonCtrLo3;
- *hi_base_addr = is_ctrl ? smnPerfMonCtlHi3 : smnPerfMonCtrHi3;
+ *lo_base_addr = is_ctrl ? smnPerfMonCtlLo7 : smnPerfMonCtrLo7;
+ *hi_base_addr = is_ctrl ? smnPerfMonCtlHi7 : smnPerfMonCtrHi7;
break;
}
@@ -268,6 +457,10 @@ static int df_v3_6_pmc_get_ctrl_settings(struct amdgpu_device *adev,
uint32_t *lo_val,
uint32_t *hi_val)
{
+
+ uint32_t eventsel, instance, unitmask;
+ uint32_t instance_10, instance_5432, instance_76;
+
df_v3_6_pmc_get_addr(adev, config, 1, lo_base_addr, hi_base_addr);
if ((*lo_base_addr == 0) || (*hi_base_addr == 0)) {
@@ -276,40 +469,33 @@ static int df_v3_6_pmc_get_ctrl_settings(struct amdgpu_device *adev,
return -ENXIO;
}
- if (lo_val && hi_val) {
- uint32_t eventsel, instance, unitmask;
- uint32_t instance_10, instance_5432, instance_76;
+ eventsel = DF_V3_6_GET_EVENT(config) & 0x3f;
+ unitmask = DF_V3_6_GET_UNITMASK(config) & 0xf;
+ instance = DF_V3_6_GET_INSTANCE(config);
- eventsel = DF_V3_6_GET_EVENT(config) & 0x3f;
- unitmask = DF_V3_6_GET_UNITMASK(config) & 0xf;
- instance = DF_V3_6_GET_INSTANCE(config);
+ instance_10 = instance & 0x3;
+ instance_5432 = (instance >> 2) & 0xf;
+ instance_76 = (instance >> 6) & 0x3;
- instance_10 = instance & 0x3;
- instance_5432 = (instance >> 2) & 0xf;
- instance_76 = (instance >> 6) & 0x3;
+ *lo_val = (unitmask << 8) | (instance_10 << 6) | eventsel | (1 << 22);
+ *hi_val = (instance_76 << 29) | instance_5432;
- *lo_val = (unitmask << 8) | (instance_10 << 6) | eventsel;
- *hi_val = (instance_76 << 29) | instance_5432;
- }
+ DRM_DEBUG_DRIVER("config=%llx addr=%08x:%08x val=%08x:%08x",
+ config, *lo_base_addr, *hi_base_addr, *lo_val, *hi_val);
return 0;
}
-/* assign df performance counters for read */
-static int df_v3_6_pmc_assign_cntr(struct amdgpu_device *adev,
- uint64_t config,
- int *is_assigned)
+/* add df performance counters for read */
+static int df_v3_6_pmc_add_cntr(struct amdgpu_device *adev,
+ uint64_t config)
{
int i, target_cntr;
- *is_assigned = 0;
-
target_cntr = df_v3_6_pmc_config_2_cntr(adev, config);
- if (target_cntr >= 0) {
- *is_assigned = 1;
+ if (target_cntr >= 0)
return 0;
- }
for (i = 0; i < DF_V3_6_MAX_COUNTERS; i++) {
if (adev->df_perfmon_config_assign_mask[i] == 0U) {
@@ -322,6 +508,44 @@ static int df_v3_6_pmc_assign_cntr(struct amdgpu_device *adev,
return -ENOSPC;
}
+#define DEFERRED_ARM_MASK (1 << 31)
+static int df_v3_6_pmc_set_deferred(struct amdgpu_device *adev,
+ uint64_t config, bool is_deferred)
+{
+ int target_cntr;
+
+ target_cntr = df_v3_6_pmc_config_2_cntr(adev, config);
+
+ if (target_cntr < 0)
+ return -EINVAL;
+
+ if (is_deferred)
+ adev->df_perfmon_config_assign_mask[target_cntr] |=
+ DEFERRED_ARM_MASK;
+ else
+ adev->df_perfmon_config_assign_mask[target_cntr] &=
+ ~DEFERRED_ARM_MASK;
+
+ return 0;
+}
+
+static bool df_v3_6_pmc_is_deferred(struct amdgpu_device *adev,
+ uint64_t config)
+{
+ int target_cntr;
+
+ target_cntr = df_v3_6_pmc_config_2_cntr(adev, config);
+
+ /*
+ * we never get target_cntr < 0 since this funciton is only called in
+ * pmc_count for now but we should check anyways.
+ */
+ return (target_cntr >= 0 &&
+ (adev->df_perfmon_config_assign_mask[target_cntr]
+ & DEFERRED_ARM_MASK));
+
+}
+
/* release performance counter */
static void df_v3_6_pmc_release_cntr(struct amdgpu_device *adev,
uint64_t config)
@@ -344,72 +568,40 @@ static void df_v3_6_reset_perfmon_cntr(struct amdgpu_device *adev,
if ((lo_base_addr == 0) || (hi_base_addr == 0))
return;
- WREG32_PCIE(lo_base_addr, 0UL);
- WREG32_PCIE(hi_base_addr, 0UL);
-}
-
-
-static int df_v3_6_add_perfmon_cntr(struct amdgpu_device *adev,
- uint64_t config)
-{
- uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
- int ret, is_assigned;
-
- ret = df_v3_6_pmc_assign_cntr(adev, config, &is_assigned);
-
- if (ret || is_assigned)
- return ret;
-
- ret = df_v3_6_pmc_get_ctrl_settings(adev,
- config,
- &lo_base_addr,
- &hi_base_addr,
- &lo_val,
- &hi_val);
-
- if (ret)
- return ret;
-
- DRM_DEBUG_DRIVER("config=%llx addr=%08x:%08x val=%08x:%08x",
- config, lo_base_addr, hi_base_addr, lo_val, hi_val);
-
- WREG32_PCIE(lo_base_addr, lo_val);
- WREG32_PCIE(hi_base_addr, hi_val);
-
- return ret;
+ df_v3_6_perfmon_wreg(adev, lo_base_addr, 0, hi_base_addr, 0);
}
static int df_v3_6_pmc_start(struct amdgpu_device *adev, uint64_t config,
int is_enable)
{
- uint32_t lo_base_addr, hi_base_addr, lo_val;
- int ret = 0;
+ uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
+ int err = 0, ret = 0;
switch (adev->asic_type) {
case CHIP_VEGA20:
+ if (is_enable)
+ return df_v3_6_pmc_add_cntr(adev, config);
df_v3_6_reset_perfmon_cntr(adev, config);
- if (is_enable) {
- ret = df_v3_6_add_perfmon_cntr(adev, config);
- } else {
- ret = df_v3_6_pmc_get_ctrl_settings(adev,
+ ret = df_v3_6_pmc_get_ctrl_settings(adev,
config,
&lo_base_addr,
&hi_base_addr,
- NULL,
- NULL);
-
- if (ret)
- return ret;
+ &lo_val,
+ &hi_val);
- lo_val = RREG32_PCIE(lo_base_addr);
+ if (ret)
+ return ret;
- DRM_DEBUG_DRIVER("config=%llx addr=%08x:%08x val=%08x",
- config, lo_base_addr, hi_base_addr, lo_val);
+ err = df_v3_6_perfmon_arm_with_retry(adev,
+ lo_base_addr,
+ lo_val,
+ hi_base_addr,
+ hi_val);
- WREG32_PCIE(lo_base_addr, lo_val | (1ULL << 22));
- }
+ if (err)
+ ret = df_v3_6_pmc_set_deferred(adev, config, true);
break;
default:
@@ -422,7 +614,7 @@ static int df_v3_6_pmc_start(struct amdgpu_device *adev, uint64_t config,
static int df_v3_6_pmc_stop(struct amdgpu_device *adev, uint64_t config,
int is_disable)
{
- uint32_t lo_base_addr, hi_base_addr, lo_val;
+ uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
int ret = 0;
switch (adev->asic_type) {
@@ -431,18 +623,13 @@ static int df_v3_6_pmc_stop(struct amdgpu_device *adev, uint64_t config,
config,
&lo_base_addr,
&hi_base_addr,
- NULL,
- NULL);
+ &lo_val,
+ &hi_val);
if (ret)
return ret;
- lo_val = RREG32_PCIE(lo_base_addr);
-
- DRM_DEBUG_DRIVER("config=%llx addr=%08x:%08x val=%08x",
- config, lo_base_addr, hi_base_addr, lo_val);
-
- WREG32_PCIE(lo_base_addr, lo_val & ~(1ULL << 22));
+ df_v3_6_reset_perfmon_cntr(adev, config);
if (is_disable)
df_v3_6_pmc_release_cntr(adev, config);
@@ -459,20 +646,31 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev,
uint64_t config,
uint64_t *count)
{
- uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
+ uint32_t lo_base_addr, hi_base_addr, lo_val = 0, hi_val = 0;
*count = 0;
switch (adev->asic_type) {
case CHIP_VEGA20:
-
df_v3_6_pmc_get_read_settings(adev, config, &lo_base_addr,
&hi_base_addr);
if ((lo_base_addr == 0) || (hi_base_addr == 0))
return;
- lo_val = RREG32_PCIE(lo_base_addr);
- hi_val = RREG32_PCIE(hi_base_addr);
+ /* rearm the counter or throw away count value on failure */
+ if (df_v3_6_pmc_is_deferred(adev, config)) {
+ int rearm_err = df_v3_6_perfmon_arm_with_status(adev,
+ lo_base_addr, lo_val,
+ hi_base_addr, hi_val);
+
+ if (rearm_err)
+ return;
+
+ df_v3_6_pmc_set_deferred(adev, config, false);
+ }
+
+ df_v3_6_perfmon_rreg(adev, lo_base_addr, &lo_val,
+ hi_base_addr, &hi_val);
*count = ((hi_val | 0ULL) << 32) | (lo_val | 0ULL);
@@ -480,17 +678,69 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev,
*count = 0;
DRM_DEBUG_DRIVER("config=%llx addr=%08x:%08x val=%08x:%08x",
- config, lo_base_addr, hi_base_addr, lo_val, hi_val);
+ config, lo_base_addr, hi_base_addr, lo_val, hi_val);
break;
-
default:
break;
}
}
+static uint64_t df_v3_6_get_dram_base_addr(struct amdgpu_device *adev,
+ uint32_t df_inst)
+{
+ uint32_t base_addr_reg_val = 0;
+ uint64_t base_addr = 0;
+
+ base_addr_reg_val = RREG32_PCIE(smnDF_CS_UMC_AON0_DramBaseAddress0 +
+ df_inst * DF_3_6_SMN_REG_INST_DIST);
+
+ if (REG_GET_FIELD(base_addr_reg_val,
+ DF_CS_UMC_AON0_DramBaseAddress0,
+ AddrRngVal) == 0) {
+ DRM_WARN("address range not valid");
+ return 0;
+ }
+
+ base_addr = REG_GET_FIELD(base_addr_reg_val,
+ DF_CS_UMC_AON0_DramBaseAddress0,
+ DramBaseAddr);
+
+ return base_addr << 28;
+}
+
+static uint32_t df_v3_6_get_df_inst_id(struct amdgpu_device *adev)
+{
+ uint32_t xgmi_node_id = 0;
+ uint32_t df_inst_id = 0;
+
+ /* Walk through DF dst nodes to find current XGMI node */
+ for (df_inst_id = 0; df_inst_id < DF_3_6_INST_CNT; df_inst_id++) {
+
+ xgmi_node_id = RREG32_PCIE(smnDF_CS_UMC_AON0_DramLimitAddress0 +
+ df_inst_id * DF_3_6_SMN_REG_INST_DIST);
+ xgmi_node_id = REG_GET_FIELD(xgmi_node_id,
+ DF_CS_UMC_AON0_DramLimitAddress0,
+ DstFabricID);
+
+ /* TODO: establish reason dest fabric id is offset by 7 */
+ xgmi_node_id = xgmi_node_id >> 7;
+
+ if (adev->gmc.xgmi.physical_node_id == xgmi_node_id)
+ break;
+ }
+
+ if (df_inst_id == DF_3_6_INST_CNT) {
+ DRM_WARN("cant match df dst id with gpu node");
+ return 0;
+ }
+
+ return df_inst_id;
+}
+
const struct amdgpu_df_funcs df_v3_6_funcs = {
.sw_init = df_v3_6_sw_init,
+ .sw_fini = df_v3_6_sw_fini,
.enable_broadcast_mode = df_v3_6_enable_broadcast_mode,
.get_fb_channel_number = df_v3_6_get_fb_channel_number,
.get_hbm_channel_number = df_v3_6_get_hbm_channel_number,
@@ -499,5 +749,9 @@ const struct amdgpu_df_funcs df_v3_6_funcs = {
.get_clockgating_state = df_v3_6_get_clockgating_state,
.pmc_start = df_v3_6_pmc_start,
.pmc_stop = df_v3_6_pmc_stop,
- .pmc_get_count = df_v3_6_pmc_get_count
+ .pmc_get_count = df_v3_6_pmc_get_count,
+ .get_fica = df_v3_6_get_fica,
+ .set_fica = df_v3_6_set_fica,
+ .get_dram_base_addr = df_v3_6_get_dram_base_addr,
+ .get_df_inst_id = df_v3_6_get_df_inst_id
};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index f41287f9000d..1785fdad6ecb 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -20,8 +20,12 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
#include <linux/firmware.h>
-#include <drm/drmP.h>
+#include <linux/module.h>
+#include <linux/pci.h>
#include "amdgpu.h"
#include "amdgpu_gfx.h"
#include "amdgpu_psp.h"
@@ -36,6 +40,7 @@
#include "ivsrcid/gfx/irqsrcs_gfx_10_1.h"
#include "soc15.h"
+#include "soc15d.h"
#include "soc15_common.h"
#include "clearstate_gfx10.h"
#include "v10_structs.h"
@@ -46,9 +51,6 @@
* Navi10 has two graphic rings to share each graphic pipe.
* 1. Primary ring
* 2. Async ring
- *
- * In bring-up phase, it just used primary ring so set gfx ring number as 1 at
- * first.
*/
#define GFX10_NUM_GFX_RINGS 2
#define GFX10_MEC_HPD_SIZE 2048
@@ -56,6 +58,9 @@
#define F32_CE_PROGRAM_RAM_SIZE 65536
#define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
+#define mmCGTT_GS_NGG_CLK_CTRL 0x5087
+#define mmCGTT_GS_NGG_CLK_CTRL_BASE_IDX 1
+
MODULE_FIRMWARE("amdgpu/navi10_ce.bin");
MODULE_FIRMWARE("amdgpu/navi10_pfp.bin");
MODULE_FIRMWARE("amdgpu/navi10_me.bin");
@@ -63,11 +68,30 @@ MODULE_FIRMWARE("amdgpu/navi10_mec.bin");
MODULE_FIRMWARE("amdgpu/navi10_mec2.bin");
MODULE_FIRMWARE("amdgpu/navi10_rlc.bin");
+MODULE_FIRMWARE("amdgpu/navi14_ce_wks.bin");
+MODULE_FIRMWARE("amdgpu/navi14_pfp_wks.bin");
+MODULE_FIRMWARE("amdgpu/navi14_me_wks.bin");
+MODULE_FIRMWARE("amdgpu/navi14_mec_wks.bin");
+MODULE_FIRMWARE("amdgpu/navi14_mec2_wks.bin");
+MODULE_FIRMWARE("amdgpu/navi14_ce.bin");
+MODULE_FIRMWARE("amdgpu/navi14_pfp.bin");
+MODULE_FIRMWARE("amdgpu/navi14_me.bin");
+MODULE_FIRMWARE("amdgpu/navi14_mec.bin");
+MODULE_FIRMWARE("amdgpu/navi14_mec2.bin");
+MODULE_FIRMWARE("amdgpu/navi14_rlc.bin");
+
+MODULE_FIRMWARE("amdgpu/navi12_ce.bin");
+MODULE_FIRMWARE("amdgpu/navi12_pfp.bin");
+MODULE_FIRMWARE("amdgpu/navi12_me.bin");
+MODULE_FIRMWARE("amdgpu/navi12_mec.bin");
+MODULE_FIRMWARE("amdgpu/navi12_mec2.bin");
+MODULE_FIRMWARE("amdgpu/navi12_rlc.bin");
+
static const struct soc15_reg_golden golden_settings_gc_10_1[] =
{
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x00400014),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_CPF_CLK_CTRL, 0xfcff8fff, 0xf8000100),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xc0000000, 0xc0000100),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xcd000000, 0x0d000100),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQ_CLK_CTRL, 0x60000ff0, 0x60000100),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQG_CLK_CTRL, 0x40000000, 0x40000100),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xffff8fff, 0xffff8100),
@@ -91,17 +115,20 @@ static const struct soc15_reg_golden golden_settings_gc_10_1[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CGTT_SCLK_CTRL, 0x10000000, 0x10000100),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL2, 0xffffffff, 0x1402002f),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0xffff9fff, 0x00001188),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_TIMEOUT_COUNTER, 0xffffffff, 0x00000800),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x08000009),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00400000, 0x04440000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0x00000800, 0x00000820),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x001f0000, 0x00070104),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ALU_CLK_CTRL, 0xffffffff, 0xffffffff),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000100, 0x00000130),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CNTL, 0x60000010, 0x479c0010),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CGTT_CLK_CTRL, 0xfeff0fff, 0x40000100),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00800000, 0x00800000)
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00c00000, 0x00c00000)
};
static const struct soc15_reg_golden golden_settings_gc_10_0_nv10[] =
@@ -109,6 +136,102 @@ static const struct soc15_reg_golden golden_settings_gc_10_0_nv10[] =
/* Pending on emulation bring up */
};
+static const struct soc15_reg_golden golden_settings_gc_10_1_1[] =
+{
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x003c0014),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_GS_NGG_CLK_CTRL, 0xffff8fff, 0xffff8100),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_IA_CLK_CTRL, 0xffff0fff, 0xffff0100),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xcd000000, 0x0d000100),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQ_CLK_CTRL, 0xf8ff0fff, 0x60000100),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQG_CLK_CTRL, 0x40000ff0, 0x40000100),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xffff8fff, 0xffff8100),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_WD_CLK_CTRL, 0xffff8fff, 0xffff8100),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_PIPE_STEER, 0xffffffff, 0xe4e4e4e4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_VC5_ENABLE, 0x00000002, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_SD_CNTL, 0x800007ff, 0x000005ff),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG, 0xffffffff, 0x20000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xffffffff, 0x00000420),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0x00000200, 0x00000200),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x04900000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DFSM_TILES_IN_FLIGHT, 0x0000ffff, 0x0000003f),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_LAST_OF_BURST_CONFIG, 0xffffffff, 0x03860204),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff0ffff, 0x00000500),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PRIV_CONTROL, 0x000007ff, 0x000001fe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0xffffffff, 0xe4e4e4e4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xffffffe7),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xffffffe7),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CGTT_SCLK_CTRL, 0xffff0fff, 0x10000100),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL2, 0xffffffff, 0x1402002f),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0xffffbfff, 0x00000188),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_TIMEOUT_COUNTER, 0xffffffff, 0x00000800),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x08000009),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00400000, 0x04440000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0x00000800, 0x00000820),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x001f0000, 0x00070105),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ALU_CLK_CTRL, 0xffffffff, 0xffffffff),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000133, 0x00000130),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CNTL, 0x60000010, 0x479c0010),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00c00000, 0x00c00000),
+};
+
+static const struct soc15_reg_golden golden_settings_gc_10_1_2[] =
+{
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0x003e001f, 0x003c0014),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_GS_NGG_CLK_CTRL, 0xffff8fff, 0xffff8100),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_IA_CLK_CTRL, 0xffff0fff, 0xffff0100),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xff7f0fff, 0x0d000100),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQ_CLK_CTRL, 0xffffcfff, 0x60000100),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQG_CLK_CTRL, 0xffff0fff, 0x40000100),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xffff8fff, 0xffff8100),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_WD_CLK_CTRL, 0xffff8fff, 0xffff8100),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_PIPE_STEER, 0xffffffff, 0xe4e4e4e4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_VC5_ENABLE, 0x00000003, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_SD_CNTL, 0x800007ff, 0x000005ff),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG, 0xffffffff, 0x20000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xffffffff, 0x00000420),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000200),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x04800000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DFSM_TILES_IN_FLIGHT, 0x0000ffff, 0x0000003f),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_LAST_OF_BURST_CONFIG, 0xffffffff, 0x03860204),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff0ffff, 0x00000500),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PRIV_CONTROL, 0x00007fff, 0x000001fe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0xffffffff, 0xe4e4e4e4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_0, 0x77777777, 0x10321032),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_1, 0x77777777, 0x02310231),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CGTT_SCLK_CTRL, 0xffff0fff, 0x10000100),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL2, 0xffffffff, 0x1402002f),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0xffffbfff, 0x00000188),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_0, 0xffffffff, 0x842a4c02),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_TIMEOUT_COUNTER, 0xffffffff, 0x00000800),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x08000009),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04440000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0x00000820, 0x00000820),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ALU_CLK_CTRL, 0xffffffff, 0xffffffff),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000133, 0x00000130),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CNTL, 0xffdf80ff, 0x479c0010),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00800000)
+};
+
+static const struct soc15_reg_golden golden_settings_gc_10_1_nv14[] =
+{
+ /* Pending on emulation bring up */
+};
+
+static const struct soc15_reg_golden golden_settings_gc_10_1_2_nv12[] =
+{
+ /* Pending on emulation bring up */
+};
+
#define DEFAULT_SH_MEM_CONFIG \
((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \
(SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \
@@ -223,15 +346,29 @@ static void gfx10_kiq_query_status(struct amdgpu_ring *kiq_ring,
amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
}
+static void gfx10_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
+ uint16_t pasid, uint32_t flush_type,
+ bool all_hub)
+{
+ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
+ amdgpu_ring_write(kiq_ring,
+ PACKET3_INVALIDATE_TLBS_DST_SEL(1) |
+ PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) |
+ PACKET3_INVALIDATE_TLBS_PASID(pasid) |
+ PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
+}
+
static const struct kiq_pm4_funcs gfx_v10_0_kiq_pm4_funcs = {
.kiq_set_resources = gfx10_kiq_set_resources,
.kiq_map_queues = gfx10_kiq_map_queues,
.kiq_unmap_queues = gfx10_kiq_unmap_queues,
.kiq_query_status = gfx10_kiq_query_status,
+ .kiq_invalidate_tlbs = gfx10_kiq_invalidate_tlbs,
.set_resources_size = 8,
.map_queues_size = 7,
.unmap_queues_size = 6,
.query_status_size = 7,
+ .invalidate_tlbs_size = 2,
};
static void gfx_v10_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
@@ -250,6 +387,22 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_gc_10_0_nv10,
(const u32)ARRAY_SIZE(golden_settings_gc_10_0_nv10));
break;
+ case CHIP_NAVI14:
+ soc15_program_register_sequence(adev,
+ golden_settings_gc_10_1_1,
+ (const u32)ARRAY_SIZE(golden_settings_gc_10_1_1));
+ soc15_program_register_sequence(adev,
+ golden_settings_gc_10_1_nv14,
+ (const u32)ARRAY_SIZE(golden_settings_gc_10_1_nv14));
+ break;
+ case CHIP_NAVI12:
+ soc15_program_register_sequence(adev,
+ golden_settings_gc_10_1_2,
+ (const u32)ARRAY_SIZE(golden_settings_gc_10_1_2));
+ soc15_program_register_sequence(adev,
+ golden_settings_gc_10_1_2_nv12,
+ (const u32)ARRAY_SIZE(golden_settings_gc_10_1_2_nv12));
+ break;
default:
break;
}
@@ -331,20 +484,12 @@ static int gfx_v10_0_ring_test_ring(struct amdgpu_ring *ring)
if (amdgpu_emu_mode == 1)
msleep(1);
else
- DRM_UDELAY(1);
- }
- if (i < adev->usec_timeout) {
- if (amdgpu_emu_mode == 1)
- DRM_INFO("ring test on %d succeeded in %d msecs\n",
- ring->idx, i);
- else
- DRM_INFO("ring test on %d succeeded in %d usecs\n",
- ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
- ring->idx, scratch, tmp);
- r = -EINVAL;
+ udelay(1);
}
+
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+
amdgpu_gfx_scratch_free(adev, scratch);
return r;
@@ -394,14 +539,10 @@ static int gfx_v10_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
}
tmp = RREG32(scratch);
- if (tmp == 0xDEADBEEF) {
- DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+ if (tmp == 0xDEADBEEF)
r = 0;
- } else {
- DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
- scratch, tmp);
+ else
r = -EINVAL;
- }
err2:
amdgpu_ib_free(adev, &ib, NULL);
dma_fence_put(f);
@@ -429,6 +570,31 @@ static void gfx_v10_0_free_microcode(struct amdgpu_device *adev)
kfree(adev->gfx.rlc.register_list_format);
}
+static void gfx_v10_0_check_fw_write_wait(struct amdgpu_device *adev)
+{
+ adev->gfx.cp_fw_write_wait = false;
+
+ switch (adev->asic_type) {
+ case CHIP_NAVI10:
+ case CHIP_NAVI12:
+ case CHIP_NAVI14:
+ if ((adev->gfx.me_fw_version >= 0x00000046) &&
+ (adev->gfx.me_feature_version >= 27) &&
+ (adev->gfx.pfp_fw_version >= 0x00000068) &&
+ (adev->gfx.pfp_feature_version >= 27) &&
+ (adev->gfx.mec_fw_version >= 0x0000005b) &&
+ (adev->gfx.mec_feature_version >= 27))
+ adev->gfx.cp_fw_write_wait = true;
+ break;
+ default:
+ break;
+ }
+
+ if (adev->gfx.cp_fw_write_wait == false)
+ DRM_WARN_ONCE("CP firmware version too old, please update!");
+}
+
+
static void gfx_v10_0_init_rlc_ext_microcode(struct amdgpu_device *adev)
{
const struct rlc_firmware_header_v2_1 *rlc_hdr;
@@ -450,11 +616,29 @@ static void gfx_v10_0_init_rlc_ext_microcode(struct amdgpu_device *adev)
le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
}
+static bool gfx_v10_0_navi10_gfxoff_should_enable(struct amdgpu_device *adev)
+{
+ bool ret = false;
+
+ switch (adev->pdev->revision) {
+ case 0xc2:
+ case 0xc3:
+ ret = true;
+ break;
+ default:
+ ret = false;
+ break;
+ }
+
+ return ret ;
+}
+
static void gfx_v10_0_check_gfxoff_flag(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_NAVI10:
- adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
+ if (!gfx_v10_0_navi10_gfxoff_should_enable(adev))
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
break;
default:
break;
@@ -464,7 +648,8 @@ static void gfx_v10_0_check_gfxoff_flag(struct amdgpu_device *adev)
static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
{
const char *chip_name;
- char fw_name[30];
+ char fw_name[40];
+ char wks[10];
int err;
struct amdgpu_firmware_info *info = NULL;
const struct common_firmware_header *header = NULL;
@@ -477,15 +662,25 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
DRM_DEBUG("\n");
+ memset(wks, 0, sizeof(wks));
switch (adev->asic_type) {
case CHIP_NAVI10:
chip_name = "navi10";
break;
+ case CHIP_NAVI14:
+ chip_name = "navi14";
+ if (!(adev->pdev->device == 0x7340 &&
+ adev->pdev->revision != 0x00))
+ snprintf(wks, sizeof(wks), "_wks");
+ break;
+ case CHIP_NAVI12:
+ chip_name = "navi12";
+ break;
default:
BUG();
}
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp%s.bin", chip_name, wks);
err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
if (err)
goto out;
@@ -496,7 +691,7 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me%s.bin", chip_name, wks);
err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
if (err)
goto out;
@@ -507,7 +702,7 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce%s.bin", chip_name, wks);
err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
if (err)
goto out;
@@ -518,61 +713,63 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
- err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
- rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
- version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
- version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
- if (version_major == 2 && version_minor == 1)
- adev->gfx.rlc.is_rlc_v2_1 = true;
-
- adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
- adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
- adev->gfx.rlc.save_and_restore_offset =
+ if (!amdgpu_sriov_vf(adev)) {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
+ err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+ err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
+ rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
+ version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
+ version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
+ if (version_major == 2 && version_minor == 1)
+ adev->gfx.rlc.is_rlc_v2_1 = true;
+
+ adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
+ adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
+ adev->gfx.rlc.save_and_restore_offset =
le32_to_cpu(rlc_hdr->save_and_restore_offset);
- adev->gfx.rlc.clear_state_descriptor_offset =
+ adev->gfx.rlc.clear_state_descriptor_offset =
le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
- adev->gfx.rlc.avail_scratch_ram_locations =
+ adev->gfx.rlc.avail_scratch_ram_locations =
le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
- adev->gfx.rlc.reg_restore_list_size =
+ adev->gfx.rlc.reg_restore_list_size =
le32_to_cpu(rlc_hdr->reg_restore_list_size);
- adev->gfx.rlc.reg_list_format_start =
+ adev->gfx.rlc.reg_list_format_start =
le32_to_cpu(rlc_hdr->reg_list_format_start);
- adev->gfx.rlc.reg_list_format_separate_start =
+ adev->gfx.rlc.reg_list_format_separate_start =
le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
- adev->gfx.rlc.starting_offsets_start =
+ adev->gfx.rlc.starting_offsets_start =
le32_to_cpu(rlc_hdr->starting_offsets_start);
- adev->gfx.rlc.reg_list_format_size_bytes =
+ adev->gfx.rlc.reg_list_format_size_bytes =
le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
- adev->gfx.rlc.reg_list_size_bytes =
+ adev->gfx.rlc.reg_list_size_bytes =
le32_to_cpu(rlc_hdr->reg_list_size_bytes);
- adev->gfx.rlc.register_list_format =
+ adev->gfx.rlc.register_list_format =
kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
- adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
- if (!adev->gfx.rlc.register_list_format) {
- err = -ENOMEM;
- goto out;
- }
+ adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
+ if (!adev->gfx.rlc.register_list_format) {
+ err = -ENOMEM;
+ goto out;
+ }
- tmp = (unsigned int *)((uintptr_t)rlc_hdr +
- le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
- for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++)
- adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
+ tmp = (unsigned int *)((uintptr_t)rlc_hdr +
+ le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
+ for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++)
+ adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
- adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
+ adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
- tmp = (unsigned int *)((uintptr_t)rlc_hdr +
- le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
- for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
- adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
+ tmp = (unsigned int *)((uintptr_t)rlc_hdr +
+ le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
+ for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
+ adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
- if (adev->gfx.rlc.is_rlc_v2_1)
- gfx_v10_0_init_rlc_ext_microcode(adev);
+ if (adev->gfx.rlc.is_rlc_v2_1)
+ gfx_v10_0_init_rlc_ext_microcode(adev);
+ }
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec%s.bin", chip_name, wks);
err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
if (err)
goto out;
@@ -583,7 +780,7 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2%s.bin", chip_name, wks);
err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
if (!err) {
err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
@@ -625,10 +822,11 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
info->fw = adev->gfx.rlc_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
-
+ if (info->fw) {
+ header = (const struct common_firmware_header *)info->fw->data;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
+ }
if (adev->gfx.rlc.is_rlc_v2_1 &&
adev->gfx.rlc.save_restore_list_cntl_size_bytes &&
adev->gfx.rlc.save_restore_list_gpm_size_bytes &&
@@ -686,6 +884,7 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
}
}
+ gfx_v10_0_check_fw_write_wait(adev);
out:
if (err) {
dev_err(adev->dev,
@@ -820,39 +1019,6 @@ static int gfx_v10_0_rlc_init(struct amdgpu_device *adev)
return 0;
}
-static int gfx_v10_0_csb_vram_pin(struct amdgpu_device *adev)
-{
- int r;
-
- r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
- if (unlikely(r != 0))
- return r;
-
- r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj,
- AMDGPU_GEM_DOMAIN_VRAM);
- if (!r)
- adev->gfx.rlc.clear_state_gpu_addr =
- amdgpu_bo_gpu_offset(adev->gfx.rlc.clear_state_obj);
-
- amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
-
- return r;
-}
-
-static void gfx_v10_0_csb_vram_unpin(struct amdgpu_device *adev)
-{
- int r;
-
- if (!adev->gfx.rlc.clear_state_obj)
- return;
-
- r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
- if (likely(r == 0)) {
- amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
- amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
- }
-}
-
static void gfx_v10_0_mec_fini(struct amdgpu_device *adev)
{
amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
@@ -1026,6 +1192,8 @@ static void gfx_v10_0_gpu_early_init(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_NAVI10:
+ case CHIP_NAVI14:
+ case CHIP_NAVI12:
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
@@ -1133,6 +1301,8 @@ static int gfx_v10_0_sw_init(void *handle)
switch (adev->asic_type) {
case CHIP_NAVI10:
+ case CHIP_NAVI14:
+ case CHIP_NAVI12:
adev->gfx.me.num_me = 1;
adev->gfx.me.num_pipe_per_me = 2;
adev->gfx.me.num_queue_per_pipe = 1;
@@ -1292,7 +1462,7 @@ static int gfx_v10_0_sw_fini(void *handle)
amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
amdgpu_gfx_mqd_sw_fini(adev);
- amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
+ amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring);
amdgpu_gfx_kiq_fini(adev);
gfx_v10_0_pfp_fini(adev);
@@ -1452,6 +1622,25 @@ static void gfx_v10_0_init_compute_vmid(struct amdgpu_device *adev)
}
}
+static void gfx_v10_0_init_gds_vmid(struct amdgpu_device *adev)
+{
+ int vmid;
+
+ /*
+ * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
+ * access. Compute VMIDs should be enabled by FW for target VMIDs,
+ * the driver can enable them for graphics. VMID0 should maintain
+ * access so that HWS firmware can save/restore entries.
+ */
+ for (vmid = 1; vmid < 16; vmid++) {
+ WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * vmid, 0);
+ WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * vmid, 0);
+ WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, vmid, 0);
+ WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, vmid, 0);
+ }
+}
+
+
static void gfx_v10_0_tcp_harvest(struct amdgpu_device *adev)
{
int i, j, k;
@@ -1461,7 +1650,8 @@ static void gfx_v10_0_tcp_harvest(struct amdgpu_device *adev)
u32 utcl_invreq_disable = 0;
/*
* GCRD_TARGETS_DISABLE field contains
- * for Navi10: GL1C=[18:15], SQC=[14:10], TCP=[9:0]
+ * for Navi10/Navi12: GL1C=[18:15], SQC=[14:10], TCP=[9:0]
+ * for Navi14: GL1C=[21:18], SQC=[17:12], TCP=[11:0]
*/
u32 gcrd_targets_disable_mask = amdgpu_gfx_create_bitmask(
2 * max_wgp_per_sh + /* TCP */
@@ -1469,7 +1659,8 @@ static void gfx_v10_0_tcp_harvest(struct amdgpu_device *adev)
4); /* GL1C */
/*
* UTCL1_UTCL0_INVREQ_DISABLE field contains
- * for Navi10: SQG=[24], RMI=[23:20], SQC=[19:10], TCP=[9:0]
+ * for Navi10Navi12: SQG=[24], RMI=[23:20], SQC=[19:10], TCP=[9:0]
+ * for Navi14: SQG=[28], RMI=[27:24], SQC=[23:12], TCP=[11:0]
*/
u32 utcl_invreq_disable_mask = amdgpu_gfx_create_bitmask(
2 * max_wgp_per_sh + /* TCP */
@@ -1477,7 +1668,9 @@ static void gfx_v10_0_tcp_harvest(struct amdgpu_device *adev)
4 + /* RMI */
1); /* SQG */
- if (adev->asic_type == CHIP_NAVI10) {
+ if (adev->asic_type == CHIP_NAVI10 ||
+ adev->asic_type == CHIP_NAVI14 ||
+ adev->asic_type == CHIP_NAVI12) {
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
@@ -1518,6 +1711,17 @@ static void gfx_v10_0_tcp_harvest(struct amdgpu_device *adev)
}
}
+static void gfx_v10_0_get_tcc_info(struct amdgpu_device *adev)
+{
+ /* TCCs are global (not instanced). */
+ uint32_t tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE) |
+ RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE);
+
+ adev->gfx.config.tcc_disabled_mask =
+ REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, TCC_DISABLE) |
+ (REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, HI_TCC_DISABLE) << 16);
+}
+
static void gfx_v10_0_constants_init(struct amdgpu_device *adev)
{
u32 tmp;
@@ -1529,13 +1733,14 @@ static void gfx_v10_0_constants_init(struct amdgpu_device *adev)
gfx_v10_0_setup_rb(adev);
gfx_v10_0_get_cu_info(adev, &adev->gfx.cu_info);
+ gfx_v10_0_get_tcc_info(adev);
adev->gfx.config.pa_sc_tile_steering_override =
gfx_v10_0_init_pa_sc_tile_steering_override(adev);
/* XXX SH_MEM regs */
/* where to put LDS, scratch, GPUVM in FSA64 space */
mutex_lock(&adev->srbm_mutex);
- for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids; i++) {
+ for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids; i++) {
nv_grbm_select(adev, 0, 0, 0, i);
/* CP and shaders */
WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG);
@@ -1552,6 +1757,7 @@ static void gfx_v10_0_constants_init(struct amdgpu_device *adev)
mutex_unlock(&adev->srbm_mutex);
gfx_v10_0_init_compute_vmid(adev);
+ gfx_v10_0_init_gds_vmid(adev);
}
@@ -1572,24 +1778,18 @@ static void gfx_v10_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp);
}
-static void gfx_v10_0_init_csb(struct amdgpu_device *adev)
+static int gfx_v10_0_init_csb(struct amdgpu_device *adev)
{
+ adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
+
/* csib */
WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_HI,
adev->gfx.rlc.clear_state_gpu_addr >> 32);
WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_LO,
adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
WREG32_SOC15(GC, 0, mmRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size);
-}
-
-static void gfx_v10_0_init_pg(struct amdgpu_device *adev)
-{
- gfx_v10_0_init_csb(adev);
-
- amdgpu_gmc_flush_gpu_tlb(adev, 0, 0);
- /* TODO: init power gating */
- return;
+ return 0;
}
void gfx_v10_0_rlc_stop(struct amdgpu_device *adev)
@@ -1624,9 +1824,9 @@ static void gfx_v10_0_rlc_smu_handshake_cntl(struct amdgpu_device *adev,
* hence no handshake between SMU & RLC
* GFXOFF will be disabled
*/
- rlc_pg_cntl |= 0x80000;
+ rlc_pg_cntl |= 0x800000;
} else
- rlc_pg_cntl &= ~0x80000;
+ rlc_pg_cntl &= ~0x800000;
WREG32_SOC15(GC, 0, mmRLC_PG_CNTL, rlc_pg_cntl);
}
@@ -1684,18 +1884,16 @@ static int gfx_v10_0_rlc_resume(struct amdgpu_device *adev)
{
int r;
- if (amdgpu_sriov_vf(adev))
- return 0;
-
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+
r = gfx_v10_0_wait_for_rlc_autoload_complete(adev);
if (r)
return r;
- gfx_v10_0_init_pg(adev);
- /* enable RLC SRM */
- gfx_v10_0_rlc_enable_srm(adev);
+ gfx_v10_0_init_csb(adev);
+ if (!amdgpu_sriov_vf(adev)) /* enable RLC SRM */
+ gfx_v10_0_rlc_enable_srm(adev);
} else {
adev->gfx.rlc.funcs->stop(adev);
@@ -1717,7 +1915,8 @@ static int gfx_v10_0_rlc_resume(struct amdgpu_device *adev)
return r;
}
- gfx_v10_0_init_pg(adev);
+ gfx_v10_0_init_csb(adev);
+
adev->gfx.rlc.funcs->start(adev);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
@@ -1767,7 +1966,7 @@ static int gfx_v10_0_parse_rlc_toc(struct amdgpu_device *adev)
rlc_autoload_info[rlc_toc->id].size = rlc_toc->size * 4;
rlc_toc++;
- };
+ }
return 0;
}
@@ -2184,7 +2383,7 @@ static int gfx_v10_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev)
return 0;
}
-static void gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
+static int gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
{
int i;
u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL);
@@ -2197,7 +2396,17 @@ static void gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
adev->gfx.gfx_ring[i].sched.ready = false;
}
WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp);
- udelay(50);
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (RREG32_SOC15(GC, 0, mmCP_STAT) == 0)
+ break;
+ udelay(1);
+ }
+
+ if (i >= adev->usec_timeout)
+ DRM_ERROR("failed to %s cp gfx\n", enable ? "unhalt" : "halt");
+
+ return 0;
}
static int gfx_v10_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev)
@@ -2254,7 +2463,7 @@ static int gfx_v10_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev)
}
if (amdgpu_emu_mode == 1)
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
@@ -2324,7 +2533,7 @@ static int gfx_v10_0_cp_gfx_load_ce_microcode(struct amdgpu_device *adev)
}
if (amdgpu_emu_mode == 1)
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, VMID, 0);
@@ -2393,7 +2602,7 @@ static int gfx_v10_0_cp_gfx_load_me_microcode(struct amdgpu_device *adev)
}
if (amdgpu_emu_mode == 1)
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
@@ -2568,7 +2777,7 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev)
/* Init gfx ring 0 for pipe 0 */
mutex_lock(&adev->srbm_mutex);
gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID0);
- mutex_unlock(&adev->srbm_mutex);
+
/* Set ring buffer size */
ring = &adev->gfx.gfx_ring[0];
rb_bufsz = order_base_2(ring->ring_size / 8);
@@ -2606,11 +2815,11 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev)
WREG32_SOC15(GC, 0, mmCP_RB_ACTIVE, 1);
gfx_v10_0_cp_gfx_set_doorbell(adev, ring);
+ mutex_unlock(&adev->srbm_mutex);
/* Init gfx ring 1 for pipe 1 */
mutex_lock(&adev->srbm_mutex);
gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID1);
- mutex_unlock(&adev->srbm_mutex);
ring = &adev->gfx.gfx_ring[1];
rb_bufsz = order_base_2(ring->ring_size / 8);
tmp = REG_SET_FIELD(0, CP_RB1_CNTL, RB_BUFSZ, rb_bufsz);
@@ -2640,6 +2849,7 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev)
WREG32_SOC15(GC, 0, mmCP_RB1_ACTIVE, 1);
gfx_v10_0_cp_gfx_set_doorbell(adev, ring);
+ mutex_unlock(&adev->srbm_mutex);
/* Switch to pipe 0 */
mutex_lock(&adev->srbm_mutex);
@@ -2714,7 +2924,7 @@ static int gfx_v10_0_cp_compute_load_microcode(struct amdgpu_device *adev)
}
if (amdgpu_emu_mode == 1)
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
@@ -2898,6 +3108,7 @@ static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
struct v10_gfx_mqd *mqd = ring->mqd_ptr;
+ int mqd_idx = ring - &adev->gfx.gfx_ring[0];
if (!adev->in_gpu_reset && !adev->in_suspend) {
memset((void *)mqd, 0, sizeof(*mqd));
@@ -2909,14 +3120,15 @@ static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring)
#endif
nv_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
- if (adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS])
- memcpy(adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS], mqd, sizeof(*mqd));
+ if (adev->gfx.me.mqd_backup[mqd_idx])
+ memcpy(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
} else if (adev->in_gpu_reset) {
/* reset mqd with the backup copy */
- if (adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS])
- memcpy(mqd, adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS], sizeof(*mqd));
+ if (adev->gfx.me.mqd_backup[mqd_idx])
+ memcpy(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd));
/* reset the ring */
ring->wptr = 0;
+ adev->wb.wb[ring->wptr_offs] = 0;
amdgpu_ring_clear_ring(ring);
#ifdef BRING_UP_DEBUG
mutex_lock(&adev->srbm_mutex);
@@ -3125,8 +3337,11 @@ static int gfx_v10_0_compute_mqd_init(struct amdgpu_ring *ring)
tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
mqd->cp_hqd_ib_control = tmp;
- /* activate the queue */
- mqd->cp_hqd_active = 1;
+ /* map_queues packet doesn't need activate the queue,
+ * so only kiq need set this field.
+ */
+ if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
+ mqd->cp_hqd_active = 1;
return 0;
}
@@ -3397,23 +3612,16 @@ static int gfx_v10_0_cp_resume(struct amdgpu_device *adev)
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
ring = &adev->gfx.gfx_ring[i];
- DRM_INFO("gfx %d ring me %d pipe %d q %d\n",
- i, ring->me, ring->pipe, ring->queue);
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->sched.ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
return r;
- }
}
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
ring = &adev->gfx.compute_ring[i];
- ring->sched.ready = true;
- DRM_INFO("compute ring %d mec %d pipe %d q %d\n",
- i, ring->me, ring->pipe, ring->queue);
- r = amdgpu_ring_test_ring(ring);
+ r = amdgpu_ring_test_helper(ring);
if (r)
- ring->sched.ready = false;
+ return r;
}
return 0;
@@ -3516,10 +3724,6 @@ static int gfx_v10_0_hw_init(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = gfx_v10_0_csb_vram_pin(adev);
- if (r)
- return r;
-
if (!amdgpu_emu_mode)
gfx_v10_0_init_golden_registers(adev);
@@ -3602,32 +3806,23 @@ static int gfx_v10_0_hw_fini(void *handle)
if (amdgpu_gfx_disable_kcq(adev))
DRM_ERROR("KCQ disable failed\n");
if (amdgpu_sriov_vf(adev)) {
- pr_debug("For SRIOV client, shouldn't do anything.\n");
+ gfx_v10_0_cp_gfx_enable(adev, false);
return 0;
}
gfx_v10_0_cp_enable(adev, false);
gfx_v10_0_enable_gui_idle_interrupt(adev, false);
- gfx_v10_0_csb_vram_unpin(adev);
return 0;
}
static int gfx_v10_0_suspend(void *handle)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- adev->in_suspend = true;
- return gfx_v10_0_hw_fini(adev);
+ return gfx_v10_0_hw_fini(handle);
}
static int gfx_v10_0_resume(void *handle)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- int r;
-
- r = gfx_v10_0_hw_init(adev);
- adev->in_suspend = false;
- return r;
+ return gfx_v10_0_hw_init(handle);
}
static bool gfx_v10_0_is_idle(void *handle)
@@ -4034,9 +4229,10 @@ static int gfx_v10_0_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- bool enable = (state == AMD_PG_STATE_GATE) ? true : false;
+ bool enable = (state == AMD_PG_STATE_GATE);
switch (adev->asic_type) {
case CHIP_NAVI10:
+ case CHIP_NAVI14:
if (!enable) {
amdgpu_gfx_off_ctrl(adev, false);
cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
@@ -4056,8 +4252,10 @@ static int gfx_v10_0_set_clockgating_state(void *handle,
switch (adev->asic_type) {
case CHIP_NAVI10:
+ case CHIP_NAVI14:
+ case CHIP_NAVI12:
gfx_v10_0_update_gfx_clock_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
break;
default:
break;
@@ -4173,7 +4371,7 @@ static void gfx_v10_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 ref_and_mask, reg_mem_engine;
- const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
+ const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
switch (ring->me) {
@@ -4193,8 +4391,8 @@ static void gfx_v10_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
}
gfx_v10_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
- adev->nbio_funcs->get_hdp_flush_req_offset(adev),
- adev->nbio_funcs->get_hdp_flush_done_offset(adev),
+ adev->nbio.funcs->get_hdp_flush_req_offset(adev),
+ adev->nbio.funcs->get_hdp_flush_done_offset(adev),
ref_and_mask, ref_and_mask, 0x20);
}
@@ -4453,7 +4651,7 @@ static int gfx_v10_0_ring_preempt_ib(struct amdgpu_ring *ring)
if (ring->trail_seq ==
le32_to_cpu(*(ring->trail_fence_cpu_addr)))
break;
- DRM_UDELAY(1);
+ udelay(1);
}
if (i >= adev->usec_timeout) {
@@ -4539,6 +4737,7 @@ static void gfx_v10_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
static void gfx_v10_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
{
struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq;
amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
amdgpu_ring_write(ring, 0 | /* src: register*/
@@ -4547,9 +4746,9 @@ static void gfx_v10_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
amdgpu_ring_write(ring, reg);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
- adev->virt.reg_val_offs * 4));
+ kiq->reg_val_offs * 4));
amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
- adev->virt.reg_val_offs * 4));
+ kiq->reg_val_offs * 4));
}
static void gfx_v10_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
@@ -4581,6 +4780,24 @@ static void gfx_v10_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
gfx_v10_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
}
+static void gfx_v10_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
+ uint32_t reg0, uint32_t reg1,
+ uint32_t ref, uint32_t mask)
+{
+ int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
+ struct amdgpu_device *adev = ring->adev;
+ bool fw_version_ok = false;
+
+ fw_version_ok = adev->gfx.cp_fw_write_wait;
+
+ if (fw_version_ok)
+ gfx_v10_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
+ ref, mask, 0x20);
+ else
+ amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1,
+ ref, mask);
+}
+
static void
gfx_v10_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
uint32_t me, uint32_t pipe,
@@ -4927,7 +5144,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
.align_mask = 0xff,
.nop = PACKET3(PACKET3_NOP, 0x3FFF),
.support_64bit_ptrs = true,
- .vmhub = AMDGPU_GFXHUB,
+ .vmhub = AMDGPU_GFXHUB_0,
.get_rptr = gfx_v10_0_ring_get_rptr_gfx,
.get_wptr = gfx_v10_0_ring_get_wptr_gfx,
.set_wptr = gfx_v10_0_ring_set_wptr_gfx,
@@ -4971,6 +5188,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
.emit_tmz = gfx_v10_0_ring_emit_tmz,
.emit_wreg = gfx_v10_0_ring_emit_wreg,
.emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
+ .emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
};
static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
@@ -4978,7 +5196,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
.align_mask = 0xff,
.nop = PACKET3(PACKET3_NOP, 0x3FFF),
.support_64bit_ptrs = true,
- .vmhub = AMDGPU_GFXHUB,
+ .vmhub = AMDGPU_GFXHUB_0,
.get_rptr = gfx_v10_0_ring_get_rptr_compute,
.get_wptr = gfx_v10_0_ring_get_wptr_compute,
.set_wptr = gfx_v10_0_ring_set_wptr_compute,
@@ -5004,6 +5222,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_wreg = gfx_v10_0_ring_emit_wreg,
.emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
+ .emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
};
static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = {
@@ -5011,7 +5230,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = {
.align_mask = 0xff,
.nop = PACKET3(PACKET3_NOP, 0x3FFF),
.support_64bit_ptrs = true,
- .vmhub = AMDGPU_GFXHUB,
+ .vmhub = AMDGPU_GFXHUB_0,
.get_rptr = gfx_v10_0_ring_get_rptr_compute,
.get_wptr = gfx_v10_0_ring_get_wptr_compute,
.set_wptr = gfx_v10_0_ring_set_wptr_compute,
@@ -5034,6 +5253,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = {
.emit_rreg = gfx_v10_0_ring_emit_rreg,
.emit_wreg = gfx_v10_0_ring_emit_wreg,
.emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
+ .emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
};
static void gfx_v10_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -5088,6 +5308,8 @@ static void gfx_v10_0_set_rlc_funcs(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_NAVI10:
+ case CHIP_NAVI14:
+ case CHIP_NAVI12:
adev->gfx.rlc.funcs = &gfx_v10_0_rlc_funcs;
break;
default:
@@ -5097,15 +5319,12 @@ static void gfx_v10_0_set_rlc_funcs(struct amdgpu_device *adev)
static void gfx_v10_0_set_gds_init(struct amdgpu_device *adev)
{
- /* init asic gds info */
- switch (adev->asic_type) {
- case CHIP_NAVI10:
- default:
- adev->gds.gds_size = 0x10000;
- adev->gds.gds_compute_max_wave_id = 0x4ff;
- break;
- }
+ unsigned total_cu = adev->gfx.config.max_cu_per_sh *
+ adev->gfx.config.max_sh_per_se *
+ adev->gfx.config.max_shader_engines;
+ adev->gds.gds_size = 0x10000;
+ adev->gds.gds_compute_max_wave_id = total_cu * 32 - 1;
adev->gds.gws_size = 64;
adev->gds.oa_size = 16;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index 7f0a63628c43..31f44d05e606 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -1576,7 +1576,7 @@ static void gfx_v6_0_config_init(struct amdgpu_device *adev)
static void gfx_v6_0_constants_init(struct amdgpu_device *adev)
{
u32 gb_addr_config = 0;
- u32 mc_shared_chmap, mc_arb_ramcfg;
+ u32 mc_arb_ramcfg;
u32 sx_debug_1;
u32 hdp_host_path_cntl;
u32 tmp;
@@ -1678,7 +1678,6 @@ static void gfx_v6_0_constants_init(struct amdgpu_device *adev)
WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
- mc_shared_chmap = RREG32(mmMC_SHARED_CHMAP);
adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 21187275dfd3..8f20a5dd44fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -1890,6 +1890,24 @@ static void gfx_v7_0_init_compute_vmid(struct amdgpu_device *adev)
}
}
+static void gfx_v7_0_init_gds_vmid(struct amdgpu_device *adev)
+{
+ int vmid;
+
+ /*
+ * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
+ * access. Compute VMIDs should be enabled by FW for target VMIDs,
+ * the driver can enable them for graphics. VMID0 should maintain
+ * access so that HWS firmware can save/restore entries.
+ */
+ for (vmid = 1; vmid < 16; vmid++) {
+ WREG32(amdgpu_gds_reg_offset[vmid].mem_base, 0);
+ WREG32(amdgpu_gds_reg_offset[vmid].mem_size, 0);
+ WREG32(amdgpu_gds_reg_offset[vmid].gws, 0);
+ WREG32(amdgpu_gds_reg_offset[vmid].oa, 0);
+ }
+}
+
static void gfx_v7_0_config_init(struct amdgpu_device *adev)
{
adev->gfx.config.double_offchip_lds_buf = 1;
@@ -1968,6 +1986,7 @@ static void gfx_v7_0_constants_init(struct amdgpu_device *adev)
mutex_unlock(&adev->srbm_mutex);
gfx_v7_0_init_compute_vmid(adev);
+ gfx_v7_0_init_gds_vmid(adev);
WREG32(mmSX_DEBUG_1, 0x20);
@@ -4239,7 +4258,7 @@ static int gfx_v7_0_late_init(void *handle)
static void gfx_v7_0_gpu_early_init(struct amdgpu_device *adev)
{
u32 gb_addr_config;
- u32 mc_shared_chmap, mc_arb_ramcfg;
+ u32 mc_arb_ramcfg;
u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map;
u32 tmp;
@@ -4316,7 +4335,6 @@ static void gfx_v7_0_gpu_early_init(struct amdgpu_device *adev)
break;
}
- mc_shared_chmap = RREG32(mmMC_SHARED_CHMAP);
adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
@@ -4535,6 +4553,8 @@ static int gfx_v7_0_hw_init(void *handle)
gfx_v7_0_constants_init(adev);
+ /* init CSB */
+ adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
/* init rlc */
r = adev->gfx.rlc.funcs->resume(adev);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index ee1ccdcf2d30..fa245973de12 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -1321,39 +1321,6 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
return 0;
}
-static int gfx_v8_0_csb_vram_pin(struct amdgpu_device *adev)
-{
- int r;
-
- r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
- if (unlikely(r != 0))
- return r;
-
- r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj,
- AMDGPU_GEM_DOMAIN_VRAM);
- if (!r)
- adev->gfx.rlc.clear_state_gpu_addr =
- amdgpu_bo_gpu_offset(adev->gfx.rlc.clear_state_obj);
-
- amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
-
- return r;
-}
-
-static void gfx_v8_0_csb_vram_unpin(struct amdgpu_device *adev)
-{
- int r;
-
- if (!adev->gfx.rlc.clear_state_obj)
- return;
-
- r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
- if (likely(r == 0)) {
- amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
- amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
- }
-}
-
static void gfx_v8_0_mec_fini(struct amdgpu_device *adev)
{
amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
@@ -1710,7 +1677,7 @@ fail:
static int gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
{
u32 gb_addr_config;
- u32 mc_shared_chmap, mc_arb_ramcfg;
+ u32 mc_arb_ramcfg;
u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map;
u32 tmp;
int ret;
@@ -1850,7 +1817,6 @@ static int gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
break;
}
- mc_shared_chmap = RREG32(mmMC_SHARED_CHMAP);
adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
@@ -2103,7 +2069,7 @@ static int gfx_v8_0_sw_fini(void *handle)
amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
amdgpu_gfx_mqd_sw_fini(adev);
- amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
+ amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring);
amdgpu_gfx_kiq_fini(adev);
gfx_v8_0_mec_fini(adev);
@@ -3750,6 +3716,24 @@ static void gfx_v8_0_init_compute_vmid(struct amdgpu_device *adev)
}
}
+static void gfx_v8_0_init_gds_vmid(struct amdgpu_device *adev)
+{
+ int vmid;
+
+ /*
+ * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
+ * access. Compute VMIDs should be enabled by FW for target VMIDs,
+ * the driver can enable them for graphics. VMID0 should maintain
+ * access so that HWS firmware can save/restore entries.
+ */
+ for (vmid = 1; vmid < 16; vmid++) {
+ WREG32(amdgpu_gds_reg_offset[vmid].mem_base, 0);
+ WREG32(amdgpu_gds_reg_offset[vmid].mem_size, 0);
+ WREG32(amdgpu_gds_reg_offset[vmid].gws, 0);
+ WREG32(amdgpu_gds_reg_offset[vmid].oa, 0);
+ }
+}
+
static void gfx_v8_0_config_init(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
@@ -3816,6 +3800,7 @@ static void gfx_v8_0_constants_init(struct amdgpu_device *adev)
mutex_unlock(&adev->srbm_mutex);
gfx_v8_0_init_compute_vmid(adev);
+ gfx_v8_0_init_gds_vmid(adev);
mutex_lock(&adev->grbm_idx_mutex);
/*
@@ -3898,6 +3883,7 @@ static void gfx_v8_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
static void gfx_v8_0_init_csb(struct amdgpu_device *adev)
{
+ adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
/* csib */
WREG32(mmRLC_CSIB_ADDR_HI,
adev->gfx.rlc.clear_state_gpu_addr >> 32);
@@ -4572,8 +4558,11 @@ static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring)
mqd->cp_hqd_eop_wptr_mem = RREG32(mmCP_HQD_EOP_WPTR_MEM);
mqd->cp_hqd_eop_dones = RREG32(mmCP_HQD_EOP_DONES);
- /* activate the queue */
- mqd->cp_hqd_active = 1;
+ /* map_queues packet doesn't need activate the queue,
+ * so only kiq need set this field.
+ */
+ if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
+ mqd->cp_hqd_active = 1;
return 0;
}
@@ -4818,10 +4807,6 @@ static int gfx_v8_0_hw_init(void *handle)
gfx_v8_0_init_golden_registers(adev);
gfx_v8_0_constants_init(adev);
- r = gfx_v8_0_csb_vram_pin(adev);
- if (r)
- return r;
-
r = adev->gfx.rlc.funcs->resume(adev);
if (r)
return r;
@@ -4939,8 +4924,6 @@ static int gfx_v8_0_hw_fini(void *handle)
pr_err("rlc is busy, skip halt rlc\n");
amdgpu_gfx_rlc_exit_safe_mode(adev);
- gfx_v8_0_csb_vram_unpin(adev);
-
return 0;
}
@@ -6165,7 +6148,23 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
- /* EVENT_WRITE_EOP - flush caches, send int */
+ /* Workaround for cache flush problems. First send a dummy EOP
+ * event down the pipe with seq one below.
+ */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
+ amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
+ EOP_TC_ACTION_EN |
+ EOP_TC_WB_ACTION_EN |
+ EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
+ EVENT_INDEX(5)));
+ amdgpu_ring_write(ring, addr & 0xfffffffc);
+ amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
+ DATA_SEL(1) | INT_SEL(0));
+ amdgpu_ring_write(ring, lower_32_bits(seq - 1));
+ amdgpu_ring_write(ring, upper_32_bits(seq - 1));
+
+ /* Then send the real EOP event down the pipe:
+ * EVENT_WRITE_EOP - flush caches, send int */
amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
EOP_TC_ACTION_EN |
@@ -6450,6 +6449,7 @@ static void gfx_v8_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigne
static void gfx_v8_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
{
struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq;
amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
amdgpu_ring_write(ring, 0 | /* src: register*/
@@ -6458,9 +6458,9 @@ static void gfx_v8_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
amdgpu_ring_write(ring, reg);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
- adev->virt.reg_val_offs * 4));
+ kiq->reg_val_offs * 4));
amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
- adev->virt.reg_val_offs * 4));
+ kiq->reg_val_offs * 4));
}
static void gfx_v8_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
@@ -6907,7 +6907,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
5 + /* COND_EXEC */
7 + /* PIPELINE_SYNC */
VI_FLUSH_GPU_TLB_NUM_WREG * 5 + 9 + /* VM_FLUSH */
- 8 + /* FENCE for VM_FLUSH */
+ 12 + /* FENCE for VM_FLUSH */
20 + /* GDS switch */
4 + /* double SWITCH_BUFFER,
the first COND_EXEC jump to the place just
@@ -6919,7 +6919,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
31 + /* DE_META */
3 + /* CNTX_CTRL */
5 + /* HDP_INVL */
- 8 + 8 + /* FENCE x2 */
+ 12 + 12 + /* FENCE x2 */
2, /* SWITCH_BUFFER */
.emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_gfx */
.emit_ib = gfx_v8_0_ring_emit_ib_gfx,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 04b8ac4432c7..b33a4eb39193 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -36,10 +36,10 @@
#include "gc/gc_9_0_offset.h"
#include "gc/gc_9_0_sh_mask.h"
+
#include "vega10_enum.h"
#include "hdp/hdp_4_0_offset.h"
-#include "soc15.h"
#include "soc15_common.h"
#include "clearstate_gfx9.h"
#include "v9_structs.h"
@@ -48,6 +48,8 @@
#include "amdgpu_ras.h"
+#include "gfx_v9_4.h"
+
#define GFX9_NUM_GFX_RINGS 1
#define GFX9_MEC_HPD_SIZE 4096
#define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
@@ -60,6 +62,9 @@
#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK 0x00000001L
#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK 0x00000006L
+#define mmGCEA_PROBE_MAP 0x070c
+#define mmGCEA_PROBE_MAP_BASE_IDX 0
+
MODULE_FIRMWARE("amdgpu/vega10_ce.bin");
MODULE_FIRMWARE("amdgpu/vega10_pfp.bin");
MODULE_FIRMWARE("amdgpu/vega10_me.bin");
@@ -104,6 +109,397 @@ MODULE_FIRMWARE("amdgpu/raven2_mec2.bin");
MODULE_FIRMWARE("amdgpu/raven2_rlc.bin");
MODULE_FIRMWARE("amdgpu/raven_kicker_rlc.bin");
+MODULE_FIRMWARE("amdgpu/arcturus_mec.bin");
+MODULE_FIRMWARE("amdgpu/arcturus_mec2.bin");
+MODULE_FIRMWARE("amdgpu/arcturus_rlc.bin");
+
+MODULE_FIRMWARE("amdgpu/renoir_ce.bin");
+MODULE_FIRMWARE("amdgpu/renoir_pfp.bin");
+MODULE_FIRMWARE("amdgpu/renoir_me.bin");
+MODULE_FIRMWARE("amdgpu/renoir_mec.bin");
+MODULE_FIRMWARE("amdgpu/renoir_mec2.bin");
+MODULE_FIRMWARE("amdgpu/renoir_rlc.bin");
+
+#define mmTCP_CHAN_STEER_0_ARCT 0x0b03
+#define mmTCP_CHAN_STEER_0_ARCT_BASE_IDX 0
+#define mmTCP_CHAN_STEER_1_ARCT 0x0b04
+#define mmTCP_CHAN_STEER_1_ARCT_BASE_IDX 0
+#define mmTCP_CHAN_STEER_2_ARCT 0x0b09
+#define mmTCP_CHAN_STEER_2_ARCT_BASE_IDX 0
+#define mmTCP_CHAN_STEER_3_ARCT 0x0b0a
+#define mmTCP_CHAN_STEER_3_ARCT_BASE_IDX 0
+#define mmTCP_CHAN_STEER_4_ARCT 0x0b0b
+#define mmTCP_CHAN_STEER_4_ARCT_BASE_IDX 0
+#define mmTCP_CHAN_STEER_5_ARCT 0x0b0c
+#define mmTCP_CHAN_STEER_5_ARCT_BASE_IDX 0
+
+enum ta_ras_gfx_subblock {
+ /*CPC*/
+ TA_RAS_BLOCK__GFX_CPC_INDEX_START = 0,
+ TA_RAS_BLOCK__GFX_CPC_SCRATCH = TA_RAS_BLOCK__GFX_CPC_INDEX_START,
+ TA_RAS_BLOCK__GFX_CPC_UCODE,
+ TA_RAS_BLOCK__GFX_DC_STATE_ME1,
+ TA_RAS_BLOCK__GFX_DC_CSINVOC_ME1,
+ TA_RAS_BLOCK__GFX_DC_RESTORE_ME1,
+ TA_RAS_BLOCK__GFX_DC_STATE_ME2,
+ TA_RAS_BLOCK__GFX_DC_CSINVOC_ME2,
+ TA_RAS_BLOCK__GFX_DC_RESTORE_ME2,
+ TA_RAS_BLOCK__GFX_CPC_INDEX_END = TA_RAS_BLOCK__GFX_DC_RESTORE_ME2,
+ /* CPF*/
+ TA_RAS_BLOCK__GFX_CPF_INDEX_START,
+ TA_RAS_BLOCK__GFX_CPF_ROQ_ME2 = TA_RAS_BLOCK__GFX_CPF_INDEX_START,
+ TA_RAS_BLOCK__GFX_CPF_ROQ_ME1,
+ TA_RAS_BLOCK__GFX_CPF_TAG,
+ TA_RAS_BLOCK__GFX_CPF_INDEX_END = TA_RAS_BLOCK__GFX_CPF_TAG,
+ /* CPG*/
+ TA_RAS_BLOCK__GFX_CPG_INDEX_START,
+ TA_RAS_BLOCK__GFX_CPG_DMA_ROQ = TA_RAS_BLOCK__GFX_CPG_INDEX_START,
+ TA_RAS_BLOCK__GFX_CPG_DMA_TAG,
+ TA_RAS_BLOCK__GFX_CPG_TAG,
+ TA_RAS_BLOCK__GFX_CPG_INDEX_END = TA_RAS_BLOCK__GFX_CPG_TAG,
+ /* GDS*/
+ TA_RAS_BLOCK__GFX_GDS_INDEX_START,
+ TA_RAS_BLOCK__GFX_GDS_MEM = TA_RAS_BLOCK__GFX_GDS_INDEX_START,
+ TA_RAS_BLOCK__GFX_GDS_INPUT_QUEUE,
+ TA_RAS_BLOCK__GFX_GDS_OA_PHY_CMD_RAM_MEM,
+ TA_RAS_BLOCK__GFX_GDS_OA_PHY_DATA_RAM_MEM,
+ TA_RAS_BLOCK__GFX_GDS_OA_PIPE_MEM,
+ TA_RAS_BLOCK__GFX_GDS_INDEX_END = TA_RAS_BLOCK__GFX_GDS_OA_PIPE_MEM,
+ /* SPI*/
+ TA_RAS_BLOCK__GFX_SPI_SR_MEM,
+ /* SQ*/
+ TA_RAS_BLOCK__GFX_SQ_INDEX_START,
+ TA_RAS_BLOCK__GFX_SQ_SGPR = TA_RAS_BLOCK__GFX_SQ_INDEX_START,
+ TA_RAS_BLOCK__GFX_SQ_LDS_D,
+ TA_RAS_BLOCK__GFX_SQ_LDS_I,
+ TA_RAS_BLOCK__GFX_SQ_VGPR, /* VGPR = SP*/
+ TA_RAS_BLOCK__GFX_SQ_INDEX_END = TA_RAS_BLOCK__GFX_SQ_VGPR,
+ /* SQC (3 ranges)*/
+ TA_RAS_BLOCK__GFX_SQC_INDEX_START,
+ /* SQC range 0*/
+ TA_RAS_BLOCK__GFX_SQC_INDEX0_START = TA_RAS_BLOCK__GFX_SQC_INDEX_START,
+ TA_RAS_BLOCK__GFX_SQC_INST_UTCL1_LFIFO =
+ TA_RAS_BLOCK__GFX_SQC_INDEX0_START,
+ TA_RAS_BLOCK__GFX_SQC_DATA_CU0_WRITE_DATA_BUF,
+ TA_RAS_BLOCK__GFX_SQC_DATA_CU0_UTCL1_LFIFO,
+ TA_RAS_BLOCK__GFX_SQC_DATA_CU1_WRITE_DATA_BUF,
+ TA_RAS_BLOCK__GFX_SQC_DATA_CU1_UTCL1_LFIFO,
+ TA_RAS_BLOCK__GFX_SQC_DATA_CU2_WRITE_DATA_BUF,
+ TA_RAS_BLOCK__GFX_SQC_DATA_CU2_UTCL1_LFIFO,
+ TA_RAS_BLOCK__GFX_SQC_INDEX0_END =
+ TA_RAS_BLOCK__GFX_SQC_DATA_CU2_UTCL1_LFIFO,
+ /* SQC range 1*/
+ TA_RAS_BLOCK__GFX_SQC_INDEX1_START,
+ TA_RAS_BLOCK__GFX_SQC_INST_BANKA_TAG_RAM =
+ TA_RAS_BLOCK__GFX_SQC_INDEX1_START,
+ TA_RAS_BLOCK__GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO,
+ TA_RAS_BLOCK__GFX_SQC_INST_BANKA_MISS_FIFO,
+ TA_RAS_BLOCK__GFX_SQC_INST_BANKA_BANK_RAM,
+ TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_TAG_RAM,
+ TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_HIT_FIFO,
+ TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_MISS_FIFO,
+ TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM,
+ TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_BANK_RAM,
+ TA_RAS_BLOCK__GFX_SQC_INDEX1_END =
+ TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_BANK_RAM,
+ /* SQC range 2*/
+ TA_RAS_BLOCK__GFX_SQC_INDEX2_START,
+ TA_RAS_BLOCK__GFX_SQC_INST_BANKB_TAG_RAM =
+ TA_RAS_BLOCK__GFX_SQC_INDEX2_START,
+ TA_RAS_BLOCK__GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO,
+ TA_RAS_BLOCK__GFX_SQC_INST_BANKB_MISS_FIFO,
+ TA_RAS_BLOCK__GFX_SQC_INST_BANKB_BANK_RAM,
+ TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_TAG_RAM,
+ TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_HIT_FIFO,
+ TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_MISS_FIFO,
+ TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM,
+ TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_BANK_RAM,
+ TA_RAS_BLOCK__GFX_SQC_INDEX2_END =
+ TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_BANK_RAM,
+ TA_RAS_BLOCK__GFX_SQC_INDEX_END = TA_RAS_BLOCK__GFX_SQC_INDEX2_END,
+ /* TA*/
+ TA_RAS_BLOCK__GFX_TA_INDEX_START,
+ TA_RAS_BLOCK__GFX_TA_FS_DFIFO = TA_RAS_BLOCK__GFX_TA_INDEX_START,
+ TA_RAS_BLOCK__GFX_TA_FS_AFIFO,
+ TA_RAS_BLOCK__GFX_TA_FL_LFIFO,
+ TA_RAS_BLOCK__GFX_TA_FX_LFIFO,
+ TA_RAS_BLOCK__GFX_TA_FS_CFIFO,
+ TA_RAS_BLOCK__GFX_TA_INDEX_END = TA_RAS_BLOCK__GFX_TA_FS_CFIFO,
+ /* TCA*/
+ TA_RAS_BLOCK__GFX_TCA_INDEX_START,
+ TA_RAS_BLOCK__GFX_TCA_HOLE_FIFO = TA_RAS_BLOCK__GFX_TCA_INDEX_START,
+ TA_RAS_BLOCK__GFX_TCA_REQ_FIFO,
+ TA_RAS_BLOCK__GFX_TCA_INDEX_END = TA_RAS_BLOCK__GFX_TCA_REQ_FIFO,
+ /* TCC (5 sub-ranges)*/
+ TA_RAS_BLOCK__GFX_TCC_INDEX_START,
+ /* TCC range 0*/
+ TA_RAS_BLOCK__GFX_TCC_INDEX0_START = TA_RAS_BLOCK__GFX_TCC_INDEX_START,
+ TA_RAS_BLOCK__GFX_TCC_CACHE_DATA = TA_RAS_BLOCK__GFX_TCC_INDEX0_START,
+ TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_0_1,
+ TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_1_0,
+ TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_1_1,
+ TA_RAS_BLOCK__GFX_TCC_CACHE_DIRTY_BANK_0,
+ TA_RAS_BLOCK__GFX_TCC_CACHE_DIRTY_BANK_1,
+ TA_RAS_BLOCK__GFX_TCC_HIGH_RATE_TAG,
+ TA_RAS_BLOCK__GFX_TCC_LOW_RATE_TAG,
+ TA_RAS_BLOCK__GFX_TCC_INDEX0_END = TA_RAS_BLOCK__GFX_TCC_LOW_RATE_TAG,
+ /* TCC range 1*/
+ TA_RAS_BLOCK__GFX_TCC_INDEX1_START,
+ TA_RAS_BLOCK__GFX_TCC_IN_USE_DEC = TA_RAS_BLOCK__GFX_TCC_INDEX1_START,
+ TA_RAS_BLOCK__GFX_TCC_IN_USE_TRANSFER,
+ TA_RAS_BLOCK__GFX_TCC_INDEX1_END =
+ TA_RAS_BLOCK__GFX_TCC_IN_USE_TRANSFER,
+ /* TCC range 2*/
+ TA_RAS_BLOCK__GFX_TCC_INDEX2_START,
+ TA_RAS_BLOCK__GFX_TCC_RETURN_DATA = TA_RAS_BLOCK__GFX_TCC_INDEX2_START,
+ TA_RAS_BLOCK__GFX_TCC_RETURN_CONTROL,
+ TA_RAS_BLOCK__GFX_TCC_UC_ATOMIC_FIFO,
+ TA_RAS_BLOCK__GFX_TCC_WRITE_RETURN,
+ TA_RAS_BLOCK__GFX_TCC_WRITE_CACHE_READ,
+ TA_RAS_BLOCK__GFX_TCC_SRC_FIFO,
+ TA_RAS_BLOCK__GFX_TCC_SRC_FIFO_NEXT_RAM,
+ TA_RAS_BLOCK__GFX_TCC_CACHE_TAG_PROBE_FIFO,
+ TA_RAS_BLOCK__GFX_TCC_INDEX2_END =
+ TA_RAS_BLOCK__GFX_TCC_CACHE_TAG_PROBE_FIFO,
+ /* TCC range 3*/
+ TA_RAS_BLOCK__GFX_TCC_INDEX3_START,
+ TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO = TA_RAS_BLOCK__GFX_TCC_INDEX3_START,
+ TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO_NEXT_RAM,
+ TA_RAS_BLOCK__GFX_TCC_INDEX3_END =
+ TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO_NEXT_RAM,
+ /* TCC range 4*/
+ TA_RAS_BLOCK__GFX_TCC_INDEX4_START,
+ TA_RAS_BLOCK__GFX_TCC_WRRET_TAG_WRITE_RETURN =
+ TA_RAS_BLOCK__GFX_TCC_INDEX4_START,
+ TA_RAS_BLOCK__GFX_TCC_ATOMIC_RETURN_BUFFER,
+ TA_RAS_BLOCK__GFX_TCC_INDEX4_END =
+ TA_RAS_BLOCK__GFX_TCC_ATOMIC_RETURN_BUFFER,
+ TA_RAS_BLOCK__GFX_TCC_INDEX_END = TA_RAS_BLOCK__GFX_TCC_INDEX4_END,
+ /* TCI*/
+ TA_RAS_BLOCK__GFX_TCI_WRITE_RAM,
+ /* TCP*/
+ TA_RAS_BLOCK__GFX_TCP_INDEX_START,
+ TA_RAS_BLOCK__GFX_TCP_CACHE_RAM = TA_RAS_BLOCK__GFX_TCP_INDEX_START,
+ TA_RAS_BLOCK__GFX_TCP_LFIFO_RAM,
+ TA_RAS_BLOCK__GFX_TCP_CMD_FIFO,
+ TA_RAS_BLOCK__GFX_TCP_VM_FIFO,
+ TA_RAS_BLOCK__GFX_TCP_DB_RAM,
+ TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO0,
+ TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO1,
+ TA_RAS_BLOCK__GFX_TCP_INDEX_END = TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO1,
+ /* TD*/
+ TA_RAS_BLOCK__GFX_TD_INDEX_START,
+ TA_RAS_BLOCK__GFX_TD_SS_FIFO_LO = TA_RAS_BLOCK__GFX_TD_INDEX_START,
+ TA_RAS_BLOCK__GFX_TD_SS_FIFO_HI,
+ TA_RAS_BLOCK__GFX_TD_CS_FIFO,
+ TA_RAS_BLOCK__GFX_TD_INDEX_END = TA_RAS_BLOCK__GFX_TD_CS_FIFO,
+ /* EA (3 sub-ranges)*/
+ TA_RAS_BLOCK__GFX_EA_INDEX_START,
+ /* EA range 0*/
+ TA_RAS_BLOCK__GFX_EA_INDEX0_START = TA_RAS_BLOCK__GFX_EA_INDEX_START,
+ TA_RAS_BLOCK__GFX_EA_DRAMRD_CMDMEM = TA_RAS_BLOCK__GFX_EA_INDEX0_START,
+ TA_RAS_BLOCK__GFX_EA_DRAMWR_CMDMEM,
+ TA_RAS_BLOCK__GFX_EA_DRAMWR_DATAMEM,
+ TA_RAS_BLOCK__GFX_EA_RRET_TAGMEM,
+ TA_RAS_BLOCK__GFX_EA_WRET_TAGMEM,
+ TA_RAS_BLOCK__GFX_EA_GMIRD_CMDMEM,
+ TA_RAS_BLOCK__GFX_EA_GMIWR_CMDMEM,
+ TA_RAS_BLOCK__GFX_EA_GMIWR_DATAMEM,
+ TA_RAS_BLOCK__GFX_EA_INDEX0_END = TA_RAS_BLOCK__GFX_EA_GMIWR_DATAMEM,
+ /* EA range 1*/
+ TA_RAS_BLOCK__GFX_EA_INDEX1_START,
+ TA_RAS_BLOCK__GFX_EA_DRAMRD_PAGEMEM = TA_RAS_BLOCK__GFX_EA_INDEX1_START,
+ TA_RAS_BLOCK__GFX_EA_DRAMWR_PAGEMEM,
+ TA_RAS_BLOCK__GFX_EA_IORD_CMDMEM,
+ TA_RAS_BLOCK__GFX_EA_IOWR_CMDMEM,
+ TA_RAS_BLOCK__GFX_EA_IOWR_DATAMEM,
+ TA_RAS_BLOCK__GFX_EA_GMIRD_PAGEMEM,
+ TA_RAS_BLOCK__GFX_EA_GMIWR_PAGEMEM,
+ TA_RAS_BLOCK__GFX_EA_INDEX1_END = TA_RAS_BLOCK__GFX_EA_GMIWR_PAGEMEM,
+ /* EA range 2*/
+ TA_RAS_BLOCK__GFX_EA_INDEX2_START,
+ TA_RAS_BLOCK__GFX_EA_MAM_D0MEM = TA_RAS_BLOCK__GFX_EA_INDEX2_START,
+ TA_RAS_BLOCK__GFX_EA_MAM_D1MEM,
+ TA_RAS_BLOCK__GFX_EA_MAM_D2MEM,
+ TA_RAS_BLOCK__GFX_EA_MAM_D3MEM,
+ TA_RAS_BLOCK__GFX_EA_INDEX2_END = TA_RAS_BLOCK__GFX_EA_MAM_D3MEM,
+ TA_RAS_BLOCK__GFX_EA_INDEX_END = TA_RAS_BLOCK__GFX_EA_INDEX2_END,
+ /* UTC VM L2 bank*/
+ TA_RAS_BLOCK__UTC_VML2_BANK_CACHE,
+ /* UTC VM walker*/
+ TA_RAS_BLOCK__UTC_VML2_WALKER,
+ /* UTC ATC L2 2MB cache*/
+ TA_RAS_BLOCK__UTC_ATCL2_CACHE_2M_BANK,
+ /* UTC ATC L2 4KB cache*/
+ TA_RAS_BLOCK__UTC_ATCL2_CACHE_4K_BANK,
+ TA_RAS_BLOCK__GFX_MAX
+};
+
+struct ras_gfx_subblock {
+ unsigned char *name;
+ int ta_subblock;
+ int hw_supported_error_type;
+ int sw_supported_error_type;
+};
+
+#define AMDGPU_RAS_SUB_BLOCK(subblock, a, b, c, d, e, f, g, h) \
+ [AMDGPU_RAS_BLOCK__##subblock] = { \
+ #subblock, \
+ TA_RAS_BLOCK__##subblock, \
+ ((a) | ((b) << 1) | ((c) << 2) | ((d) << 3)), \
+ (((e) << 1) | ((f) << 3) | (g) | ((h) << 2)), \
+ }
+
+static const struct ras_gfx_subblock ras_gfx_subblocks[] = {
+ AMDGPU_RAS_SUB_BLOCK(GFX_CPC_SCRATCH, 0, 1, 1, 1, 1, 0, 0, 1),
+ AMDGPU_RAS_SUB_BLOCK(GFX_CPC_UCODE, 0, 1, 1, 1, 1, 0, 0, 1),
+ AMDGPU_RAS_SUB_BLOCK(GFX_DC_STATE_ME1, 1, 0, 0, 1, 0, 0, 1, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_DC_CSINVOC_ME1, 1, 0, 0, 1, 0, 0, 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_DC_RESTORE_ME1, 1, 0, 0, 1, 0, 0, 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_DC_STATE_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_DC_CSINVOC_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_DC_RESTORE_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_CPF_ROQ_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_CPF_ROQ_ME1, 1, 0, 0, 1, 0, 0, 1, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_CPF_TAG, 0, 1, 1, 1, 1, 0, 0, 1),
+ AMDGPU_RAS_SUB_BLOCK(GFX_CPG_DMA_ROQ, 1, 0, 0, 1, 0, 0, 1, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_CPG_DMA_TAG, 0, 1, 1, 1, 0, 1, 0, 1),
+ AMDGPU_RAS_SUB_BLOCK(GFX_CPG_TAG, 0, 1, 1, 1, 1, 1, 0, 1),
+ AMDGPU_RAS_SUB_BLOCK(GFX_GDS_MEM, 0, 1, 1, 1, 0, 0, 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_GDS_INPUT_QUEUE, 1, 0, 0, 1, 0, 0, 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PHY_CMD_RAM_MEM, 0, 1, 1, 1, 0, 0, 0,
+ 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PHY_DATA_RAM_MEM, 1, 0, 0, 1, 0, 0, 0,
+ 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PIPE_MEM, 0, 1, 1, 1, 0, 0, 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_SPI_SR_MEM, 1, 0, 0, 1, 0, 0, 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_SQ_SGPR, 0, 1, 1, 1, 0, 0, 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_SQ_LDS_D, 0, 1, 1, 1, 1, 0, 0, 1),
+ AMDGPU_RAS_SUB_BLOCK(GFX_SQ_LDS_I, 0, 1, 1, 1, 0, 0, 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_SQ_VGPR, 0, 1, 1, 1, 0, 0, 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0, 1),
+ AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU0_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
+ 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU0_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0,
+ 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU1_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
+ 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU1_UTCL1_LFIFO, 0, 1, 1, 1, 1, 0, 0,
+ 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU2_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
+ 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU2_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0,
+ 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_TAG_RAM, 0, 1, 1, 1, 1, 0, 0,
+ 1),
+ AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO, 1, 0, 0, 1, 0,
+ 0, 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
+ 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
+ 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_TAG_RAM, 0, 1, 1, 1, 0, 0, 0,
+ 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_HIT_FIFO, 1, 0, 0, 1, 0, 0, 0,
+ 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
+ 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM, 1, 0, 0, 1, 0, 0,
+ 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
+ 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_TAG_RAM, 0, 1, 1, 1, 1, 0, 0,
+ 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO, 1, 0, 0, 1, 0,
+ 0, 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
+ 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
+ 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_TAG_RAM, 0, 1, 1, 1, 0, 0, 0,
+ 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_HIT_FIFO, 1, 0, 0, 1, 0, 0, 0,
+ 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
+ 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM, 1, 0, 0, 1, 0, 0,
+ 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
+ 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_DFIFO, 0, 1, 1, 1, 1, 0, 0, 1),
+ AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_AFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_TA_FL_LFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_TA_FX_LFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_CFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCA_HOLE_FIFO, 1, 0, 0, 1, 0, 1, 1, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCA_REQ_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA, 0, 1, 1, 1, 1, 0, 0, 1),
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_0_1, 0, 1, 1, 1, 1, 0, 0,
+ 1),
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_1_0, 0, 1, 1, 1, 1, 0, 0,
+ 1),
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_1_1, 0, 1, 1, 1, 1, 0, 0,
+ 1),
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DIRTY_BANK_0, 0, 1, 1, 1, 0, 0, 0,
+ 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DIRTY_BANK_1, 0, 1, 1, 1, 0, 0, 0,
+ 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCC_HIGH_RATE_TAG, 0, 1, 1, 1, 0, 0, 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LOW_RATE_TAG, 0, 1, 1, 1, 0, 0, 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCC_IN_USE_DEC, 1, 0, 0, 1, 0, 0, 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCC_IN_USE_TRANSFER, 1, 0, 0, 1, 0, 0, 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCC_RETURN_DATA, 1, 0, 0, 1, 0, 0, 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCC_RETURN_CONTROL, 1, 0, 0, 1, 0, 0, 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCC_UC_ATOMIC_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRITE_RETURN, 1, 0, 0, 1, 0, 1, 1, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRITE_CACHE_READ, 1, 0, 0, 1, 0, 0, 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCC_SRC_FIFO, 0, 1, 1, 1, 0, 0, 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCC_SRC_FIFO_NEXT_RAM, 1, 0, 0, 1, 0, 0, 1, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_TAG_PROBE_FIFO, 1, 0, 0, 1, 0, 0, 0,
+ 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LATENCY_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LATENCY_FIFO_NEXT_RAM, 1, 0, 0, 1, 0, 0, 0,
+ 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRRET_TAG_WRITE_RETURN, 1, 0, 0, 1, 0, 0,
+ 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCC_ATOMIC_RETURN_BUFFER, 1, 0, 0, 1, 0, 0, 0,
+ 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCI_WRITE_RAM, 1, 0, 0, 1, 0, 0, 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCP_CACHE_RAM, 0, 1, 1, 1, 1, 0, 0, 1),
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCP_LFIFO_RAM, 0, 1, 1, 1, 0, 0, 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCP_CMD_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCP_VM_FIFO, 0, 1, 1, 1, 0, 0, 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCP_DB_RAM, 1, 0, 0, 1, 0, 0, 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCP_UTCL1_LFIFO0, 0, 1, 1, 1, 0, 0, 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCP_UTCL1_LFIFO1, 0, 1, 1, 1, 0, 0, 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_TD_SS_FIFO_LO, 0, 1, 1, 1, 1, 0, 0, 1),
+ AMDGPU_RAS_SUB_BLOCK(GFX_TD_SS_FIFO_HI, 0, 1, 1, 1, 0, 0, 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_TD_CS_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMRD_CMDMEM, 0, 1, 1, 1, 1, 0, 0, 1),
+ AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_DATAMEM, 0, 1, 1, 1, 0, 0, 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_EA_RRET_TAGMEM, 0, 1, 1, 1, 0, 0, 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_EA_WRET_TAGMEM, 0, 1, 1, 1, 0, 0, 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIRD_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_DATAMEM, 0, 1, 1, 1, 0, 0, 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMRD_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_EA_IORD_CMDMEM, 1, 0, 0, 1, 0, 0, 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_EA_IOWR_CMDMEM, 1, 0, 0, 1, 0, 0, 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_EA_IOWR_DATAMEM, 1, 0, 0, 1, 0, 0, 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIRD_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D0MEM, 1, 0, 0, 1, 0, 0, 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D1MEM, 1, 0, 0, 1, 0, 0, 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D2MEM, 1, 0, 0, 1, 0, 0, 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D3MEM, 1, 0, 0, 1, 0, 0, 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(UTC_VML2_BANK_CACHE, 0, 1, 1, 1, 0, 0, 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(UTC_VML2_WALKER, 0, 1, 1, 1, 0, 0, 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(UTC_ATCL2_CACHE_2M_BANK, 1, 0, 0, 1, 0, 0, 0, 0),
+ AMDGPU_RAS_SUB_BLOCK(UTC_ATCL2_CACHE_4K_BANK, 0, 1, 1, 1, 0, 0, 0, 0),
+};
+
static const struct soc15_reg_golden golden_settings_gc_9_0[] =
{
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400),
@@ -123,9 +519,9 @@ static const struct soc15_reg_golden golden_settings_gc_9_0[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
};
static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
@@ -188,9 +584,9 @@ static const struct soc15_reg_golden golden_settings_gc_9_1[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000000ff),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
};
static const struct soc15_reg_golden golden_settings_gc_9_1_rv1[] =
@@ -227,6 +623,22 @@ static const struct soc15_reg_golden golden_settings_gc_9_1_rv2[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x3f8fffff, 0x08000080),
};
+static const struct soc15_reg_golden golden_settings_gc_9_1_rn[] =
+{
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0xff7fffff, 0x0a000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x24000042),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xf3e777ff, 0x24000042),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003120),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCEA_PROBE_MAP, 0xffffffff, 0x0000cccc),
+};
+
static const struct soc15_reg_golden golden_settings_gc_9_x_common[] =
{
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_SD_CNTL, 0xffffffff, 0x000001ff),
@@ -266,9 +678,23 @@ static const struct soc15_reg_golden golden_settings_gc_9_2_1_vg12[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x76325410),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
+};
+
+static const struct soc15_reg_golden golden_settings_gc_9_4_1_arct[] =
+{
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x10b0000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_0_ARCT, 0x3fffffff, 0x346f0a4e),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_1_ARCT, 0x3fffffff, 0x1c642ca),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_2_ARCT, 0x3fffffff, 0x26f45098),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_3_ARCT, 0x3fffffff, 0x2ebd9fe3),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_4_ARCT, 0x3fffffff, 0xb90f5b1),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_5_ARCT, 0x3ff, 0x135),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_CONFIG, 0xffffffff, 0x011A0000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_FIFO_SIZES, 0xffffffff, 0x00000f00),
};
static const u32 GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[] =
@@ -310,19 +736,150 @@ static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev);
static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance);
static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring);
static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring);
+static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_error_status);
+static void gfx_v9_0_clear_ras_edc_counter(struct amdgpu_device *adev);
+static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
+ void *inject_if);
+
+static void gfx_v9_0_kiq_set_resources(struct amdgpu_ring *kiq_ring,
+ uint64_t queue_mask)
+{
+ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
+ amdgpu_ring_write(kiq_ring,
+ PACKET3_SET_RESOURCES_VMID_MASK(0) |
+ /* vmid_mask:0* queue_type:0 (KIQ) */
+ PACKET3_SET_RESOURCES_QUEUE_TYPE(0));
+ amdgpu_ring_write(kiq_ring,
+ lower_32_bits(queue_mask)); /* queue mask lo */
+ amdgpu_ring_write(kiq_ring,
+ upper_32_bits(queue_mask)); /* queue mask hi */
+ amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
+ amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
+ amdgpu_ring_write(kiq_ring, 0); /* oac mask */
+ amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
+}
+
+static void gfx_v9_0_kiq_map_queues(struct amdgpu_ring *kiq_ring,
+ struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = kiq_ring->adev;
+ uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
+ uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
+ uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
+
+ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
+ /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
+ amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
+ PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
+ PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
+ PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
+ PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
+ PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
+ /*queue_type: normal compute queue */
+ PACKET3_MAP_QUEUES_QUEUE_TYPE(0) |
+ /* alloc format: all_on_one_pipe */
+ PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) |
+ PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) |
+ /* num_queues: must be 1 */
+ PACKET3_MAP_QUEUES_NUM_QUEUES(1));
+ amdgpu_ring_write(kiq_ring,
+ PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
+ amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
+ amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
+ amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
+ amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
+}
+
+static void gfx_v9_0_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
+ struct amdgpu_ring *ring,
+ enum amdgpu_unmap_queues_action action,
+ u64 gpu_addr, u64 seq)
+{
+ uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
+
+ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
+ amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
+ PACKET3_UNMAP_QUEUES_ACTION(action) |
+ PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
+ PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) |
+ PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
+ amdgpu_ring_write(kiq_ring,
+ PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
+
+ if (action == PREEMPT_QUEUES_NO_UNMAP) {
+ amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr));
+ amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr));
+ amdgpu_ring_write(kiq_ring, seq);
+ } else {
+ amdgpu_ring_write(kiq_ring, 0);
+ amdgpu_ring_write(kiq_ring, 0);
+ amdgpu_ring_write(kiq_ring, 0);
+ }
+}
+
+static void gfx_v9_0_kiq_query_status(struct amdgpu_ring *kiq_ring,
+ struct amdgpu_ring *ring,
+ u64 addr,
+ u64 seq)
+{
+ uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
+
+ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5));
+ amdgpu_ring_write(kiq_ring,
+ PACKET3_QUERY_STATUS_CONTEXT_ID(0) |
+ PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) |
+ PACKET3_QUERY_STATUS_COMMAND(2));
+ /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
+ amdgpu_ring_write(kiq_ring,
+ PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) |
+ PACKET3_QUERY_STATUS_ENG_SEL(eng_sel));
+ amdgpu_ring_write(kiq_ring, lower_32_bits(addr));
+ amdgpu_ring_write(kiq_ring, upper_32_bits(addr));
+ amdgpu_ring_write(kiq_ring, lower_32_bits(seq));
+ amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
+}
+
+static void gfx_v9_0_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
+ uint16_t pasid, uint32_t flush_type,
+ bool all_hub)
+{
+ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
+ amdgpu_ring_write(kiq_ring,
+ PACKET3_INVALIDATE_TLBS_DST_SEL(1) |
+ PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) |
+ PACKET3_INVALIDATE_TLBS_PASID(pasid) |
+ PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
+}
+
+static const struct kiq_pm4_funcs gfx_v9_0_kiq_pm4_funcs = {
+ .kiq_set_resources = gfx_v9_0_kiq_set_resources,
+ .kiq_map_queues = gfx_v9_0_kiq_map_queues,
+ .kiq_unmap_queues = gfx_v9_0_kiq_unmap_queues,
+ .kiq_query_status = gfx_v9_0_kiq_query_status,
+ .kiq_invalidate_tlbs = gfx_v9_0_kiq_invalidate_tlbs,
+ .set_resources_size = 8,
+ .map_queues_size = 7,
+ .unmap_queues_size = 6,
+ .query_status_size = 7,
+ .invalidate_tlbs_size = 2,
+};
+
+static void gfx_v9_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
+{
+ adev->gfx.kiq.pmf = &gfx_v9_0_kiq_pm4_funcs;
+}
static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_VEGA10:
- if (!amdgpu_virt_support_skip_setting(adev)) {
- soc15_program_register_sequence(adev,
- golden_settings_gc_9_0,
- ARRAY_SIZE(golden_settings_gc_9_0));
- soc15_program_register_sequence(adev,
- golden_settings_gc_9_0_vg10,
- ARRAY_SIZE(golden_settings_gc_9_0_vg10));
- }
+ soc15_program_register_sequence(adev,
+ golden_settings_gc_9_0,
+ ARRAY_SIZE(golden_settings_gc_9_0));
+ soc15_program_register_sequence(adev,
+ golden_settings_gc_9_0_vg10,
+ ARRAY_SIZE(golden_settings_gc_9_0_vg10));
break;
case CHIP_VEGA12:
soc15_program_register_sequence(adev,
@@ -340,6 +897,11 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_gc_9_0_vg20,
ARRAY_SIZE(golden_settings_gc_9_0_vg20));
break;
+ case CHIP_ARCTURUS:
+ soc15_program_register_sequence(adev,
+ golden_settings_gc_9_4_1_arct,
+ ARRAY_SIZE(golden_settings_gc_9_4_1_arct));
+ break;
case CHIP_RAVEN:
soc15_program_register_sequence(adev, golden_settings_gc_9_1,
ARRAY_SIZE(golden_settings_gc_9_1));
@@ -352,12 +914,18 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_gc_9_1_rv1,
ARRAY_SIZE(golden_settings_gc_9_1_rv1));
break;
+ case CHIP_RENOIR:
+ soc15_program_register_sequence(adev,
+ golden_settings_gc_9_1_rn,
+ ARRAY_SIZE(golden_settings_gc_9_1_rn));
+ return; /* for renoir, don't need common goldensetting */
default:
break;
}
- soc15_program_register_sequence(adev, golden_settings_gc_9_x_common,
- (const u32)ARRAY_SIZE(golden_settings_gc_9_x_common));
+ if (adev->asic_type != CHIP_ARCTURUS)
+ soc15_program_register_sequence(adev, golden_settings_gc_9_x_common,
+ (const u32)ARRAY_SIZE(golden_settings_gc_9_x_common));
}
static void gfx_v9_0_scratch_init(struct amdgpu_device *adev)
@@ -538,6 +1106,12 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
adev->gfx.me_fw_write_wait = false;
adev->gfx.mec_fw_write_wait = false;
+ if ((adev->gfx.mec_fw_version < 0x000001a5) ||
+ (adev->gfx.mec_feature_version < 46) ||
+ (adev->gfx.pfp_fw_version < 0x000000b7) ||
+ (adev->gfx.pfp_feature_version < 46))
+ DRM_WARN_ONCE("CP firmware version too old, please update!");
+
switch (adev->asic_type) {
case CHIP_VEGA10:
if ((adev->gfx.me_fw_version >= 0x0000009c) &&
@@ -588,66 +1162,80 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
}
}
+struct amdgpu_gfxoff_quirk {
+ u16 chip_vendor;
+ u16 chip_device;
+ u16 subsys_vendor;
+ u16 subsys_device;
+ u8 revision;
+};
+
+static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = {
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=204689 */
+ { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
+ { 0, 0, 0, 0, 0 },
+};
+
+static bool gfx_v9_0_should_disable_gfxoff(struct pci_dev *pdev)
+{
+ const struct amdgpu_gfxoff_quirk *p = amdgpu_gfxoff_quirk_list;
+
+ while (p && p->chip_device != 0) {
+ if (pdev->vendor == p->chip_vendor &&
+ pdev->device == p->chip_device &&
+ pdev->subsystem_vendor == p->subsys_vendor &&
+ pdev->subsystem_device == p->subsys_device &&
+ pdev->revision == p->revision) {
+ return true;
+ }
+ ++p;
+ }
+ return false;
+}
+
static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
{
+ if (gfx_v9_0_should_disable_gfxoff(adev->pdev))
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
+
switch (adev->asic_type) {
case CHIP_VEGA10:
case CHIP_VEGA12:
case CHIP_VEGA20:
break;
case CHIP_RAVEN:
- if (adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)
- break;
- if ((adev->gfx.rlc_fw_version != 106 &&
- adev->gfx.rlc_fw_version < 531) ||
- (adev->gfx.rlc_fw_version == 53815) ||
- (adev->gfx.rlc_feature_version < 1) ||
- !adev->gfx.rlc.is_rlc_v2_1)
+ if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8) &&
+ ((adev->gfx.rlc_fw_version != 106 &&
+ adev->gfx.rlc_fw_version < 531) ||
+ (adev->gfx.rlc_fw_version == 53815) ||
+ (adev->gfx.rlc_feature_version < 1) ||
+ !adev->gfx.rlc.is_rlc_v2_1))
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
+
+ if (adev->pm.pp_feature & PP_GFXOFF_MASK)
+ adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
+ AMD_PG_SUPPORT_CP |
+ AMD_PG_SUPPORT_RLC_SMU_HS;
+ break;
+ case CHIP_RENOIR:
+ if (adev->pm.pp_feature & PP_GFXOFF_MASK)
+ adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
+ AMD_PG_SUPPORT_CP |
+ AMD_PG_SUPPORT_RLC_SMU_HS;
break;
default:
break;
}
}
-static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
+static int gfx_v9_0_init_cp_gfx_microcode(struct amdgpu_device *adev,
+ const char *chip_name)
{
- const char *chip_name;
char fw_name[30];
int err;
struct amdgpu_firmware_info *info = NULL;
const struct common_firmware_header *header = NULL;
const struct gfx_firmware_header_v1_0 *cp_hdr;
- const struct rlc_firmware_header_v2_0 *rlc_hdr;
- unsigned int *tmp = NULL;
- unsigned int i = 0;
- uint16_t version_major;
- uint16_t version_minor;
- uint32_t smu_version;
-
- DRM_DEBUG("\n");
-
- switch (adev->asic_type) {
- case CHIP_VEGA10:
- chip_name = "vega10";
- break;
- case CHIP_VEGA12:
- chip_name = "vega12";
- break;
- case CHIP_VEGA20:
- chip_name = "vega20";
- break;
- case CHIP_RAVEN:
- if (adev->rev_id >= 8)
- chip_name = "raven2";
- else if (adev->pdev->device == 0x15d8)
- chip_name = "picasso";
- else
- chip_name = "raven";
- break;
- default:
- BUG();
- }
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
@@ -682,6 +1270,58 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
+ info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
+ info->fw = adev->gfx.pfp_fw;
+ header = (const struct common_firmware_header *)info->fw->data;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
+
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
+ info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
+ info->fw = adev->gfx.me_fw;
+ header = (const struct common_firmware_header *)info->fw->data;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
+
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
+ info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
+ info->fw = adev->gfx.ce_fw;
+ header = (const struct common_firmware_header *)info->fw->data;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
+ }
+
+out:
+ if (err) {
+ dev_err(adev->dev,
+ "gfx9: Failed to load firmware \"%s\"\n",
+ fw_name);
+ release_firmware(adev->gfx.pfp_fw);
+ adev->gfx.pfp_fw = NULL;
+ release_firmware(adev->gfx.me_fw);
+ adev->gfx.me_fw = NULL;
+ release_firmware(adev->gfx.ce_fw);
+ adev->gfx.ce_fw = NULL;
+ }
+ return err;
+}
+
+static int gfx_v9_0_init_rlc_microcode(struct amdgpu_device *adev,
+ const char *chip_name)
+{
+ char fw_name[30];
+ int err;
+ struct amdgpu_firmware_info *info = NULL;
+ const struct common_firmware_header *header = NULL;
+ const struct rlc_firmware_header_v2_0 *rlc_hdr;
+ unsigned int *tmp = NULL;
+ unsigned int i = 0;
+ uint16_t version_major;
+ uint16_t version_minor;
+ uint32_t smu_version;
+
/*
* For Picasso && AM4 SOCKET board, we use picasso_rlc_am4.bin
* instead of picasso_rlc.bin.
@@ -756,57 +1396,7 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
if (adev->gfx.rlc.is_rlc_v2_1)
gfx_v9_0_init_rlc_ext_microcode(adev);
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
- err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.mec_fw);
- if (err)
- goto out;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
- adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
- adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
-
-
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
- err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
- if (!err) {
- err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
- if (err)
- goto out;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)
- adev->gfx.mec2_fw->data;
- adev->gfx.mec2_fw_version =
- le32_to_cpu(cp_hdr->header.ucode_version);
- adev->gfx.mec2_feature_version =
- le32_to_cpu(cp_hdr->ucode_feature_version);
- } else {
- err = 0;
- adev->gfx.mec2_fw = NULL;
- }
-
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
- info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
- info->fw = adev->gfx.pfp_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
- info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
- info->fw = adev->gfx.me_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
- info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
- info->fw = adev->gfx.ce_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
-
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
info->fw = adev->gfx.rlc_fw;
@@ -836,7 +1426,58 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
adev->firmware.fw_size +=
ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
}
+ }
+
+out:
+ if (err) {
+ dev_err(adev->dev,
+ "gfx9: Failed to load firmware \"%s\"\n",
+ fw_name);
+ release_firmware(adev->gfx.rlc_fw);
+ adev->gfx.rlc_fw = NULL;
+ }
+ return err;
+}
+
+static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
+ const char *chip_name)
+{
+ char fw_name[30];
+ int err;
+ struct amdgpu_firmware_info *info = NULL;
+ const struct common_firmware_header *header = NULL;
+ const struct gfx_firmware_header_v1_0 *cp_hdr;
+
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
+ err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+ err = amdgpu_ucode_validate(adev->gfx.mec_fw);
+ if (err)
+ goto out;
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
+ adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
+ err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
+ if (!err) {
+ err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
+ if (err)
+ goto out;
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)
+ adev->gfx.mec2_fw->data;
+ adev->gfx.mec2_fw_version =
+ le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.mec2_feature_version =
+ le32_to_cpu(cp_hdr->ucode_feature_version);
+ } else {
+ err = 0;
+ adev->gfx.mec2_fw = NULL;
+ }
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
info->fw = adev->gfx.mec_fw;
@@ -859,13 +1500,19 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT];
- info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT;
- info->fw = adev->gfx.mec2_fw;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
- }
+ /* TODO: Determine if MEC2 JT FW loading can be removed
+ for all GFX V9 asic and above */
+ if (adev->asic_type != CHIP_ARCTURUS &&
+ adev->asic_type != CHIP_RENOIR) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT];
+ info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT;
+ info->fw = adev->gfx.mec2_fw;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4,
+ PAGE_SIZE);
+ }
+ }
}
out:
@@ -875,14 +1522,6 @@ out:
dev_err(adev->dev,
"gfx9: Failed to load firmware \"%s\"\n",
fw_name);
- release_firmware(adev->gfx.pfp_fw);
- adev->gfx.pfp_fw = NULL;
- release_firmware(adev->gfx.me_fw);
- adev->gfx.me_fw = NULL;
- release_firmware(adev->gfx.ce_fw);
- adev->gfx.ce_fw = NULL;
- release_firmware(adev->gfx.rlc_fw);
- adev->gfx.rlc_fw = NULL;
release_firmware(adev->gfx.mec_fw);
adev->gfx.mec_fw = NULL;
release_firmware(adev->gfx.mec2_fw);
@@ -891,6 +1530,59 @@ out:
return err;
}
+static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
+{
+ const char *chip_name;
+ int r;
+
+ DRM_DEBUG("\n");
+
+ switch (adev->asic_type) {
+ case CHIP_VEGA10:
+ chip_name = "vega10";
+ break;
+ case CHIP_VEGA12:
+ chip_name = "vega12";
+ break;
+ case CHIP_VEGA20:
+ chip_name = "vega20";
+ break;
+ case CHIP_RAVEN:
+ if (adev->rev_id >= 8)
+ chip_name = "raven2";
+ else if (adev->pdev->device == 0x15d8)
+ chip_name = "picasso";
+ else
+ chip_name = "raven";
+ break;
+ case CHIP_ARCTURUS:
+ chip_name = "arcturus";
+ break;
+ case CHIP_RENOIR:
+ chip_name = "renoir";
+ break;
+ default:
+ BUG();
+ }
+
+ /* No CPG in Arcturus */
+ if (adev->asic_type != CHIP_ARCTURUS) {
+ r = gfx_v9_0_init_cp_gfx_microcode(adev, chip_name);
+ if (r)
+ return r;
+ }
+
+ r = gfx_v9_0_init_rlc_microcode(adev, chip_name);
+ if (r)
+ return r;
+
+ r = gfx_v9_0_init_cp_compute_microcode(adev, chip_name);
+ if (r)
+ return r;
+
+ return r;
+}
+
static u32 gfx_v9_0_get_csb_size(struct amdgpu_device *adev)
{
u32 count = 0;
@@ -1128,7 +1820,7 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
return r;
}
- if (adev->asic_type == CHIP_RAVEN) {
+ if (adev->asic_type == CHIP_RAVEN || adev->asic_type == CHIP_RENOIR) {
/* TODO: double check the cp_table_size for RV */
adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
r = amdgpu_gfx_rlc_init_cpt(adev);
@@ -1150,39 +1842,6 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
return 0;
}
-static int gfx_v9_0_csb_vram_pin(struct amdgpu_device *adev)
-{
- int r;
-
- r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
- if (unlikely(r != 0))
- return r;
-
- r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj,
- AMDGPU_GEM_DOMAIN_VRAM);
- if (!r)
- adev->gfx.rlc.clear_state_gpu_addr =
- amdgpu_bo_gpu_offset(adev->gfx.rlc.clear_state_obj);
-
- amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
-
- return r;
-}
-
-static void gfx_v9_0_csb_vram_unpin(struct amdgpu_device *adev)
-{
- int r;
-
- if (!adev->gfx.rlc.clear_state_obj)
- return;
-
- r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
- if (likely(r == 0)) {
- amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
- amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
- }
-}
-
static void gfx_v9_0_mec_fini(struct amdgpu_device *adev)
{
amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
@@ -1324,7 +1983,20 @@ static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
.read_wave_data = &gfx_v9_0_read_wave_data,
.read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
.read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
- .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q
+ .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
+ .ras_error_inject = &gfx_v9_0_ras_error_inject,
+ .query_ras_error_count = &gfx_v9_0_query_ras_error_count
+};
+
+static const struct amdgpu_gfx_funcs gfx_v9_4_gfx_funcs = {
+ .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
+ .select_se_sh = &gfx_v9_0_select_se_sh,
+ .read_wave_data = &gfx_v9_0_read_wave_data,
+ .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
+ .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
+ .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
+ .ras_error_inject = &gfx_v9_4_ras_error_inject,
+ .query_ras_error_count = &gfx_v9_4_query_ras_error_count
};
static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
@@ -1377,6 +2049,27 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
else
gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN;
break;
+ case CHIP_ARCTURUS:
+ adev->gfx.funcs = &gfx_v9_4_gfx_funcs;
+ adev->gfx.config.max_hw_contexts = 8;
+ adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+ adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+ adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+ adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
+ gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
+ gb_addr_config &= ~0xf3e777ff;
+ gb_addr_config |= 0x22014042;
+ break;
+ case CHIP_RENOIR:
+ adev->gfx.config.max_hw_contexts = 8;
+ adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+ adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+ adev->gfx.config.sc_hiz_tile_fifo_size = 0x80;
+ adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
+ gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
+ gb_addr_config &= ~0xf3e777ff;
+ gb_addr_config |= 0x22010042;
+ break;
default:
BUG();
break;
@@ -1422,190 +2115,6 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
return 0;
}
-static int gfx_v9_0_ngg_create_buf(struct amdgpu_device *adev,
- struct amdgpu_ngg_buf *ngg_buf,
- int size_se,
- int default_size_se)
-{
- int r;
-
- if (size_se < 0) {
- dev_err(adev->dev, "Buffer size is invalid: %d\n", size_se);
- return -EINVAL;
- }
- size_se = size_se ? size_se : default_size_se;
-
- ngg_buf->size = size_se * adev->gfx.config.max_shader_engines;
- r = amdgpu_bo_create_kernel(adev, ngg_buf->size,
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
- &ngg_buf->bo,
- &ngg_buf->gpu_addr,
- NULL);
- if (r) {
- dev_err(adev->dev, "(%d) failed to create NGG buffer\n", r);
- return r;
- }
- ngg_buf->bo_size = amdgpu_bo_size(ngg_buf->bo);
-
- return r;
-}
-
-static int gfx_v9_0_ngg_fini(struct amdgpu_device *adev)
-{
- int i;
-
- for (i = 0; i < NGG_BUF_MAX; i++)
- amdgpu_bo_free_kernel(&adev->gfx.ngg.buf[i].bo,
- &adev->gfx.ngg.buf[i].gpu_addr,
- NULL);
-
- memset(&adev->gfx.ngg.buf[0], 0,
- sizeof(struct amdgpu_ngg_buf) * NGG_BUF_MAX);
-
- adev->gfx.ngg.init = false;
-
- return 0;
-}
-
-static int gfx_v9_0_ngg_init(struct amdgpu_device *adev)
-{
- int r;
-
- if (!amdgpu_ngg || adev->gfx.ngg.init == true)
- return 0;
-
- /* GDS reserve memory: 64 bytes alignment */
- adev->gfx.ngg.gds_reserve_size = ALIGN(5 * 4, 0x40);
- adev->gds.gds_size -= adev->gfx.ngg.gds_reserve_size;
- adev->gfx.ngg.gds_reserve_addr = RREG32_SOC15(GC, 0, mmGDS_VMID0_BASE);
- adev->gfx.ngg.gds_reserve_addr += RREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE);
-
- /* Primitive Buffer */
- r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PRIM],
- amdgpu_prim_buf_per_se,
- 64 * 1024);
- if (r) {
- dev_err(adev->dev, "Failed to create Primitive Buffer\n");
- goto err;
- }
-
- /* Position Buffer */
- r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_POS],
- amdgpu_pos_buf_per_se,
- 256 * 1024);
- if (r) {
- dev_err(adev->dev, "Failed to create Position Buffer\n");
- goto err;
- }
-
- /* Control Sideband */
- r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_CNTL],
- amdgpu_cntl_sb_buf_per_se,
- 256);
- if (r) {
- dev_err(adev->dev, "Failed to create Control Sideband Buffer\n");
- goto err;
- }
-
- /* Parameter Cache, not created by default */
- if (amdgpu_param_buf_per_se <= 0)
- goto out;
-
- r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PARAM],
- amdgpu_param_buf_per_se,
- 512 * 1024);
- if (r) {
- dev_err(adev->dev, "Failed to create Parameter Cache\n");
- goto err;
- }
-
-out:
- adev->gfx.ngg.init = true;
- return 0;
-err:
- gfx_v9_0_ngg_fini(adev);
- return r;
-}
-
-static int gfx_v9_0_ngg_en(struct amdgpu_device *adev)
-{
- struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
- int r;
- u32 data, base;
-
- if (!amdgpu_ngg)
- return 0;
-
- /* Program buffer size */
- data = REG_SET_FIELD(0, WD_BUF_RESOURCE_1, INDEX_BUF_SIZE,
- adev->gfx.ngg.buf[NGG_PRIM].size >> 8);
- data = REG_SET_FIELD(data, WD_BUF_RESOURCE_1, POS_BUF_SIZE,
- adev->gfx.ngg.buf[NGG_POS].size >> 8);
- WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_1, data);
-
- data = REG_SET_FIELD(0, WD_BUF_RESOURCE_2, CNTL_SB_BUF_SIZE,
- adev->gfx.ngg.buf[NGG_CNTL].size >> 8);
- data = REG_SET_FIELD(data, WD_BUF_RESOURCE_2, PARAM_BUF_SIZE,
- adev->gfx.ngg.buf[NGG_PARAM].size >> 10);
- WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_2, data);
-
- /* Program buffer base address */
- base = lower_32_bits(adev->gfx.ngg.buf[NGG_PRIM].gpu_addr);
- data = REG_SET_FIELD(0, WD_INDEX_BUF_BASE, BASE, base);
- WREG32_SOC15(GC, 0, mmWD_INDEX_BUF_BASE, data);
-
- base = upper_32_bits(adev->gfx.ngg.buf[NGG_PRIM].gpu_addr);
- data = REG_SET_FIELD(0, WD_INDEX_BUF_BASE_HI, BASE_HI, base);
- WREG32_SOC15(GC, 0, mmWD_INDEX_BUF_BASE_HI, data);
-
- base = lower_32_bits(adev->gfx.ngg.buf[NGG_POS].gpu_addr);
- data = REG_SET_FIELD(0, WD_POS_BUF_BASE, BASE, base);
- WREG32_SOC15(GC, 0, mmWD_POS_BUF_BASE, data);
-
- base = upper_32_bits(adev->gfx.ngg.buf[NGG_POS].gpu_addr);
- data = REG_SET_FIELD(0, WD_POS_BUF_BASE_HI, BASE_HI, base);
- WREG32_SOC15(GC, 0, mmWD_POS_BUF_BASE_HI, data);
-
- base = lower_32_bits(adev->gfx.ngg.buf[NGG_CNTL].gpu_addr);
- data = REG_SET_FIELD(0, WD_CNTL_SB_BUF_BASE, BASE, base);
- WREG32_SOC15(GC, 0, mmWD_CNTL_SB_BUF_BASE, data);
-
- base = upper_32_bits(adev->gfx.ngg.buf[NGG_CNTL].gpu_addr);
- data = REG_SET_FIELD(0, WD_CNTL_SB_BUF_BASE_HI, BASE_HI, base);
- WREG32_SOC15(GC, 0, mmWD_CNTL_SB_BUF_BASE_HI, data);
-
- /* Clear GDS reserved memory */
- r = amdgpu_ring_alloc(ring, 17);
- if (r) {
- DRM_ERROR("amdgpu: NGG failed to lock ring %s (%d).\n",
- ring->name, r);
- return r;
- }
-
- gfx_v9_0_write_data_to_reg(ring, 0, false,
- SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE),
- (adev->gds.gds_size +
- adev->gfx.ngg.gds_reserve_size));
-
- amdgpu_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
- amdgpu_ring_write(ring, (PACKET3_DMA_DATA_CP_SYNC |
- PACKET3_DMA_DATA_DST_SEL(1) |
- PACKET3_DMA_DATA_SRC_SEL(2)));
- amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring, adev->gfx.ngg.gds_reserve_addr);
- amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring, PACKET3_DMA_DATA_CMD_RAW_WAIT |
- adev->gfx.ngg.gds_reserve_size);
-
- gfx_v9_0_write_data_to_reg(ring, 0, false,
- SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE), 0);
-
- amdgpu_ring_commit(ring);
-
- return 0;
-}
-
static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
int mec, int pipe, int queue)
{
@@ -1653,6 +2162,8 @@ static int gfx_v9_0_sw_init(void *handle)
case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
+ case CHIP_ARCTURUS:
+ case CHIP_RENOIR:
adev->gfx.mec.num_mec = 2;
break;
default:
@@ -1771,10 +2282,6 @@ static int gfx_v9_0_sw_init(void *handle)
if (r)
return r;
- r = gfx_v9_0_ngg_init(adev);
- if (r)
- return r;
-
return 0;
}
@@ -1784,19 +2291,7 @@ static int gfx_v9_0_sw_fini(void *handle)
int i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX) &&
- adev->gfx.ras_if) {
- struct ras_common_if *ras_if = adev->gfx.ras_if;
- struct ras_ih_if ih_info = {
- .head = *ras_if,
- };
-
- amdgpu_ras_debugfs_remove(adev, ras_if);
- amdgpu_ras_sysfs_remove(adev, ras_if);
- amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
- amdgpu_ras_feature_enable(adev, ras_if, 0);
- kfree(ras_if);
- }
+ amdgpu_gfx_ras_fini(adev);
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
@@ -1804,13 +2299,12 @@ static int gfx_v9_0_sw_fini(void *handle)
amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
amdgpu_gfx_mqd_sw_fini(adev);
- amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
+ amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring);
amdgpu_gfx_kiq_fini(adev);
gfx_v9_0_mec_fini(adev);
- gfx_v9_0_ngg_fini(adev);
amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
- if (adev->asic_type == CHIP_RAVEN) {
+ if (adev->asic_type == CHIP_RAVEN || adev->asic_type == CHIP_RENOIR) {
amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
&adev->gfx.rlc.cp_table_gpu_addr,
(void **)&adev->gfx.rlc.cp_table_ptr);
@@ -1929,6 +2423,40 @@ static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev)
}
}
+static void gfx_v9_0_init_gds_vmid(struct amdgpu_device *adev)
+{
+ int vmid;
+
+ /*
+ * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
+ * access. Compute VMIDs should be enabled by FW for target VMIDs,
+ * the driver can enable them for graphics. VMID0 should maintain
+ * access so that HWS firmware can save/restore entries.
+ */
+ for (vmid = 1; vmid < 16; vmid++) {
+ WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * vmid, 0);
+ WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * vmid, 0);
+ WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, vmid, 0);
+ WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, vmid, 0);
+ }
+}
+
+static void gfx_v9_0_init_sq_config(struct amdgpu_device *adev)
+{
+ uint32_t tmp;
+
+ switch (adev->asic_type) {
+ case CHIP_ARCTURUS:
+ tmp = RREG32_SOC15(GC, 0, mmSQ_CONFIG);
+ tmp = REG_SET_FIELD(tmp, SQ_CONFIG,
+ DISABLE_BARRIER_WAITCNT, 1);
+ WREG32_SOC15(GC, 0, mmSQ_CONFIG, tmp);
+ break;
+ default:
+ break;
+ };
+}
+
static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
{
u32 tmp;
@@ -1945,7 +2473,7 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
/* XXX SH_MEM regs */
/* where to put LDS, scratch, GPUVM in FSA64 space */
mutex_lock(&adev->srbm_mutex);
- for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids; i++) {
+ for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids; i++) {
soc15_grbm_select(adev, 0, 0, 0, i);
/* CP and shaders */
if (i == 0) {
@@ -1973,6 +2501,8 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
mutex_unlock(&adev->srbm_mutex);
gfx_v9_0_init_compute_vmid(adev);
+ gfx_v9_0_init_gds_vmid(adev);
+ gfx_v9_0_init_sq_config(adev);
}
static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
@@ -2028,6 +2558,7 @@ static void gfx_v9_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
static void gfx_v9_0_init_csb(struct amdgpu_device *adev)
{
+ adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
/* csib */
WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_HI),
adev->gfx.rlc.clear_state_gpu_addr >> 32);
@@ -2357,7 +2888,10 @@ static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
* And it's needed by gfxoff feature.
*/
if (adev->gfx.rlc.is_rlc_v2_1) {
- gfx_v9_1_init_rlc_save_restore_list(adev);
+ if (adev->asic_type == CHIP_VEGA12 ||
+ (adev->asic_type == CHIP_RAVEN &&
+ adev->rev_id >= 8))
+ gfx_v9_1_init_rlc_save_restore_list(adev);
gfx_v9_0_enable_save_restore_machine(adev);
}
@@ -2768,74 +3302,6 @@ static void gfx_v9_0_kiq_setting(struct amdgpu_ring *ring)
WREG32_SOC15_RLC(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
}
-static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev)
-{
- struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
- uint64_t queue_mask = 0;
- int r, i;
-
- for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
- if (!test_bit(i, adev->gfx.mec.queue_bitmap))
- continue;
-
- /* This situation may be hit in the future if a new HW
- * generation exposes more than 64 queues. If so, the
- * definition of queue_mask needs updating */
- if (WARN_ON(i >= (sizeof(queue_mask)*8))) {
- DRM_ERROR("Invalid KCQ enabled: %d\n", i);
- break;
- }
-
- queue_mask |= (1ull << i);
- }
-
- r = amdgpu_ring_alloc(kiq_ring, (7 * adev->gfx.num_compute_rings) + 8);
- if (r) {
- DRM_ERROR("Failed to lock KIQ (%d).\n", r);
- return r;
- }
-
- /* set resources */
- amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
- amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) |
- PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */
- amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */
- amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */
- amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
- amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
- amdgpu_ring_write(kiq_ring, 0); /* oac mask */
- amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
- for (i = 0; i < adev->gfx.num_compute_rings; i++) {
- struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
- uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
- uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
-
- amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
- /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
- amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
- PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
- PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
- PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
- PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
- PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
- PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
- PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
- PACKET3_MAP_QUEUES_ENGINE_SEL(0) | /* engine_sel: compute */
- PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
- amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
- amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
- amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
- amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
- amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
- }
-
- r = amdgpu_ring_test_helper(kiq_ring);
- if (r)
- DRM_ERROR("KCQ enable failed\n");
-
- return r;
-}
-
static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
@@ -2849,6 +3315,10 @@ static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
+ mqd->compute_static_thread_mgmt_se4 = 0xffffffff;
+ mqd->compute_static_thread_mgmt_se5 = 0xffffffff;
+ mqd->compute_static_thread_mgmt_se6 = 0xffffffff;
+ mqd->compute_static_thread_mgmt_se7 = 0xffffffff;
mqd->compute_misc_reserved = 0x00000003;
mqd->dynamic_cu_mask_addr_lo =
@@ -2968,8 +3438,11 @@ static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
mqd->cp_hqd_ib_control = tmp;
- /* activate the queue */
- mqd->cp_hqd_active = 1;
+ /* map_queues packet doesn't need activate the queue,
+ * so only kiq need set this field.
+ */
+ if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
+ mqd->cp_hqd_active = 1;
return 0;
}
@@ -3238,7 +3711,7 @@ static int gfx_v9_0_kcq_resume(struct amdgpu_device *adev)
goto done;
}
- r = gfx_v9_0_kiq_kcq_enable(adev);
+ r = amdgpu_gfx_enable_kcq(adev);
done:
return r;
}
@@ -3252,10 +3725,12 @@ static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
gfx_v9_0_enable_gui_idle_interrupt(adev, false);
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
- /* legacy firmware loading */
- r = gfx_v9_0_cp_gfx_load_microcode(adev);
- if (r)
- return r;
+ if (adev->asic_type != CHIP_ARCTURUS) {
+ /* legacy firmware loading */
+ r = gfx_v9_0_cp_gfx_load_microcode(adev);
+ if (r)
+ return r;
+ }
r = gfx_v9_0_cp_compute_load_microcode(adev);
if (r)
@@ -3266,18 +3741,22 @@ static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
if (r)
return r;
- r = gfx_v9_0_cp_gfx_resume(adev);
- if (r)
- return r;
+ if (adev->asic_type != CHIP_ARCTURUS) {
+ r = gfx_v9_0_cp_gfx_resume(adev);
+ if (r)
+ return r;
+ }
r = gfx_v9_0_kcq_resume(adev);
if (r)
return r;
- ring = &adev->gfx.gfx_ring[0];
- r = amdgpu_ring_test_helper(ring);
- if (r)
- return r;
+ if (adev->asic_type != CHIP_ARCTURUS) {
+ ring = &adev->gfx.gfx_ring[0];
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
+ return r;
+ }
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
ring = &adev->gfx.compute_ring[i];
@@ -3289,9 +3768,27 @@ static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
return 0;
}
+static void gfx_v9_0_init_tcp_config(struct amdgpu_device *adev)
+{
+ u32 tmp;
+
+ if (adev->asic_type != CHIP_ARCTURUS)
+ return;
+
+ tmp = RREG32_SOC15(GC, 0, mmTCP_ADDR_CONFIG);
+ tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE64KHASH,
+ adev->df.hash_status.hash_64k);
+ tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE2MHASH,
+ adev->df.hash_status.hash_2m);
+ tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE1GHASH,
+ adev->df.hash_status.hash_1g);
+ WREG32_SOC15(GC, 0, mmTCP_ADDR_CONFIG, tmp);
+}
+
static void gfx_v9_0_cp_enable(struct amdgpu_device *adev, bool enable)
{
- gfx_v9_0_cp_gfx_enable(adev, enable);
+ if (adev->asic_type != CHIP_ARCTURUS)
+ gfx_v9_0_cp_gfx_enable(adev, enable);
gfx_v9_0_cp_compute_enable(adev, enable);
}
@@ -3300,13 +3797,12 @@ static int gfx_v9_0_hw_init(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- gfx_v9_0_init_golden_registers(adev);
+ if (!amdgpu_sriov_vf(adev))
+ gfx_v9_0_init_golden_registers(adev);
gfx_v9_0_constants_init(adev);
- r = gfx_v9_0_csb_vram_pin(adev);
- if (r)
- return r;
+ gfx_v9_0_init_tcp_config(adev);
r = adev->gfx.rlc.funcs->resume(adev);
if (r)
@@ -3316,40 +3812,6 @@ static int gfx_v9_0_hw_init(void *handle)
if (r)
return r;
- r = gfx_v9_0_ngg_en(adev);
- if (r)
- return r;
-
- return r;
-}
-
-static int gfx_v9_0_kcq_disable(struct amdgpu_device *adev)
-{
- int r, i;
- struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
-
- r = amdgpu_ring_alloc(kiq_ring, 6 * adev->gfx.num_compute_rings);
- if (r)
- DRM_ERROR("Failed to lock KIQ (%d).\n", r);
-
- for (i = 0; i < adev->gfx.num_compute_rings; i++) {
- struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
-
- amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
- amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
- PACKET3_UNMAP_QUEUES_ACTION(1) | /* RESET_QUEUES */
- PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
- PACKET3_UNMAP_QUEUES_ENGINE_SEL(0) |
- PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
- amdgpu_ring_write(kiq_ring, PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
- amdgpu_ring_write(kiq_ring, 0);
- amdgpu_ring_write(kiq_ring, 0);
- amdgpu_ring_write(kiq_ring, 0);
- }
- r = amdgpu_ring_test_helper(kiq_ring);
- if (r)
- DRM_ERROR("KCQ disable failed\n");
-
return r;
}
@@ -3361,8 +3823,10 @@ static int gfx_v9_0_hw_fini(void *handle)
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
- /* disable KCQ to avoid CPC touch memory not valid anymore */
- gfx_v9_0_kcq_disable(adev);
+ /* DF freeze and kcq disable will fail */
+ if (!amdgpu_ras_intr_triggered())
+ /* disable KCQ to avoid CPC touch memory not valid anymore */
+ amdgpu_gfx_disable_kcq(adev);
if (amdgpu_sriov_vf(adev)) {
gfx_v9_0_cp_gfx_enable(adev, false);
@@ -3391,8 +3855,6 @@ static int gfx_v9_0_hw_fini(void *handle)
gfx_v9_0_cp_enable(adev, false);
adev->gfx.rlc.funcs->stop(adev);
- gfx_v9_0_csb_vram_unpin(adev);
-
return 0;
}
@@ -3466,8 +3928,9 @@ static int gfx_v9_0_soft_reset(void *handle)
/* stop the rlc */
adev->gfx.rlc.funcs->stop(adev);
- /* Disable GFX parsing/prefetching */
- gfx_v9_0_cp_gfx_enable(adev, false);
+ if (adev->asic_type != CHIP_ARCTURUS)
+ /* Disable GFX parsing/prefetching */
+ gfx_v9_0_cp_gfx_enable(adev, false);
/* Disable MEC parsing/prefetching */
gfx_v9_0_cp_compute_enable(adev, false);
@@ -3497,9 +3960,22 @@ static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
uint64_t clock;
mutex_lock(&adev->gfx.gpu_clock_mutex);
- WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
- clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
- ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
+ if (adev->asic_type == CHIP_VEGA10 && amdgpu_sriov_runtime(adev)) {
+ uint32_t tmp, lsb, msb, i = 0;
+ do {
+ if (i != 0)
+ udelay(1);
+ tmp = RREG32_SOC15(GC, 0, mmRLC_REFCLOCK_TIMESTAMP_MSB);
+ lsb = RREG32_SOC15(GC, 0, mmRLC_REFCLOCK_TIMESTAMP_LSB);
+ msb = RREG32_SOC15(GC, 0, mmRLC_REFCLOCK_TIMESTAMP_MSB);
+ i++;
+ } while (unlikely(tmp != msb) && (i < adev->usec_timeout));
+ clock = (uint64_t)lsb | ((uint64_t)msb << 32ULL);
+ } else {
+ WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
+ clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
+ ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
+ }
mutex_unlock(&adev->gfx.gpu_clock_mutex);
return clock;
}
@@ -3567,33 +4043,61 @@ static const u32 sgpr_init_compute_shader[] =
0xbe800080, 0xbf810000,
};
+/* When below register arrays changed, please update gpr_reg_size,
+ and sec_ded_counter_reg_size in function gfx_v9_0_do_edc_gpr_workarounds,
+ to cover all gfx9 ASICs */
static const struct soc15_reg_entry vgpr_init_regs[] = {
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 4 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x3f },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 }, /* 64KB LDS */
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
- { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x1000000 }, /* CU_GROUP_COUNT=1 */
- { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 256*2 },
- { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 1 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
+};
+
+static const struct soc15_reg_entry sgpr1_init_regs[] = {
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 8 },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
- { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x100007f }, /* VGPRS=15 (256 logical VGPRs, SGPRS=1 (16 SGPRs, BULKY=1 */
- { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 }, /* 64KB LDS */
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x240 }, /* (80 GPRS) */
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x0 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0x000000ff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0x000000ff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0x000000ff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0x000000ff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0x000000ff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0x000000ff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0x000000ff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0x000000ff },
};
-static const struct soc15_reg_entry sgpr_init_regs[] = {
- { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
- { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
- { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
- { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
- { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x1000000 }, /* CU_GROUP_COUNT=1 */
- { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 256*2 },
- { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 1 },
+static const struct soc15_reg_entry sgpr2_init_regs[] = {
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 8 },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
- { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x340 }, /* SGPRS=13 (112 GPRS) */
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x240 }, /* (80 GPRS) */
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x0 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0x0000ff00 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0x0000ff00 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0x0000ff00 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0x0000ff00 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0x0000ff00 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0x0000ff00 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0x0000ff00 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0x0000ff00 },
};
-static const struct soc15_reg_entry sec_ded_counter_registers[] = {
+static const struct soc15_reg_entry gfx_v9_0_edc_counter_regs[] = {
{ SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), 0, 1, 1},
{ SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT), 0, 1, 1},
{ SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 0, 1, 1},
@@ -3614,6 +4118,7 @@ static const struct soc15_reg_entry sec_ded_counter_registers[] = {
{ SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 1, 16},
{ SOC15_REG_ENTRY(GC, 0, mmTCP_ATC_EDC_GATCL1_CNT), 0, 4, 16},
{ SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT), 0, 4, 16},
+ { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 0, 4, 16},
{ SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 0, 4, 16},
{ SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 0, 4, 6},
{ SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 0, 4, 16},
@@ -3626,6 +4131,7 @@ static const struct soc15_reg_entry sec_ded_counter_registers[] = {
{ SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 1, 16},
{ SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 1, 2},
{ SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 0, 4, 6},
+ { SOC15_REG_ENTRY(HDP, 0, mmHDP_EDC_CNT), 0, 1, 1},
};
static int gfx_v9_0_do_edc_gds_workarounds(struct amdgpu_device *adev)
@@ -3633,6 +4139,10 @@ static int gfx_v9_0_do_edc_gds_workarounds(struct amdgpu_device *adev)
struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
int i, r;
+ /* only support when RAS is enabled */
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
+ return 0;
+
r = amdgpu_ring_alloc(ring, 7);
if (r) {
DRM_ERROR("amdgpu: GDS workarounds failed to lock ring %s (%d).\n",
@@ -3676,10 +4186,16 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
struct amdgpu_ib ib;
struct dma_fence *f = NULL;
- int r, i, j, k;
+ int r, i;
unsigned total_size, vgpr_offset, sgpr_offset;
u64 gpu_addr;
+ int compute_dim_x = adev->gfx.config.max_shader_engines *
+ adev->gfx.config.max_cu_per_sh *
+ adev->gfx.config.max_sh_per_se;
+ int sgpr_work_group_size = 5;
+ int gpr_reg_size = compute_dim_x / 16 + 6;
+
/* only support when RAS is enabled */
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
return 0;
@@ -3689,9 +4205,11 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
return 0;
total_size =
- ((ARRAY_SIZE(vgpr_init_regs) * 3) + 4 + 5 + 2) * 4;
+ (gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* VGPRS */
+ total_size +=
+ (gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* SGPRS1 */
total_size +=
- ((ARRAY_SIZE(sgpr_init_regs) * 3) + 4 + 5 + 2) * 4;
+ (gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* SGPRS2 */
total_size = ALIGN(total_size, 256);
vgpr_offset = total_size;
total_size += ALIGN(sizeof(vgpr_init_compute_shader), 256);
@@ -3718,7 +4236,7 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
/* VGPR */
/* write the register state for the compute dispatch */
- for (i = 0; i < ARRAY_SIZE(vgpr_init_regs); i++) {
+ for (i = 0; i < gpr_reg_size; i++) {
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(vgpr_init_regs[i])
- PACKET3_SET_SH_REG_START;
@@ -3734,7 +4252,7 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
/* write dispatch packet */
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
- ib.ptr[ib.length_dw++] = 128; /* x */
+ ib.ptr[ib.length_dw++] = compute_dim_x; /* x */
ib.ptr[ib.length_dw++] = 1; /* y */
ib.ptr[ib.length_dw++] = 1; /* z */
ib.ptr[ib.length_dw++] =
@@ -3744,13 +4262,13 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
- /* SGPR */
+ /* SGPR1 */
/* write the register state for the compute dispatch */
- for (i = 0; i < ARRAY_SIZE(sgpr_init_regs); i++) {
+ for (i = 0; i < gpr_reg_size; i++) {
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
- ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr_init_regs[i])
+ ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr1_init_regs[i])
- PACKET3_SET_SH_REG_START;
- ib.ptr[ib.length_dw++] = sgpr_init_regs[i].reg_value;
+ ib.ptr[ib.length_dw++] = sgpr1_init_regs[i].reg_value;
}
/* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
@@ -3762,7 +4280,35 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
/* write dispatch packet */
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
- ib.ptr[ib.length_dw++] = 128; /* x */
+ ib.ptr[ib.length_dw++] = compute_dim_x / 2 * sgpr_work_group_size; /* x */
+ ib.ptr[ib.length_dw++] = 1; /* y */
+ ib.ptr[ib.length_dw++] = 1; /* z */
+ ib.ptr[ib.length_dw++] =
+ REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
+
+ /* write CS partial flush packet */
+ ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
+ ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
+
+ /* SGPR2 */
+ /* write the register state for the compute dispatch */
+ for (i = 0; i < gpr_reg_size; i++) {
+ ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
+ ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr2_init_regs[i])
+ - PACKET3_SET_SH_REG_START;
+ ib.ptr[ib.length_dw++] = sgpr2_init_regs[i].reg_value;
+ }
+ /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
+ gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
+ ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
+ ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
+ - PACKET3_SET_SH_REG_START;
+ ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
+ ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
+
+ /* write dispatch packet */
+ ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
+ ib.ptr[ib.length_dw++] = compute_dim_x / 2 * sgpr_work_group_size; /* x */
ib.ptr[ib.length_dw++] = 1; /* y */
ib.ptr[ib.length_dw++] = 1; /* z */
ib.ptr[ib.length_dw++] =
@@ -3786,18 +4332,17 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
goto fail;
}
- /* read back registers to clear the counters */
- mutex_lock(&adev->grbm_idx_mutex);
- for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++) {
- for (j = 0; j < sec_ded_counter_registers[i].se_num; j++) {
- for (k = 0; k < sec_ded_counter_registers[i].instance; k++) {
- gfx_v9_0_select_se_sh(adev, j, 0x0, k);
- RREG32(SOC15_REG_ENTRY_OFFSET(sec_ded_counter_registers[i]));
- }
- }
+ switch (adev->asic_type)
+ {
+ case CHIP_VEGA20:
+ gfx_v9_0_clear_ras_edc_counter(adev);
+ break;
+ case CHIP_ARCTURUS:
+ gfx_v9_4_clear_ras_edc_counter(adev);
+ break;
+ default:
+ break;
}
- WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000);
- mutex_unlock(&adev->grbm_idx_mutex);
fail:
amdgpu_ib_free(adev, &ib, NULL);
@@ -3810,8 +4355,12 @@ static int gfx_v9_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS;
+ if (adev->asic_type == CHIP_ARCTURUS)
+ adev->gfx.num_gfx_rings = 0;
+ else
+ adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS;
adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
+ gfx_v9_0_set_kiq_pm4_funcs(adev);
gfx_v9_0_set_ring_funcs(adev);
gfx_v9_0_set_irq_funcs(adev);
gfx_v9_0_set_gds_init(adev);
@@ -3820,108 +4369,33 @@ static int gfx_v9_0_early_init(void *handle)
return 0;
}
-static int gfx_v9_0_process_ras_data_cb(struct amdgpu_device *adev,
- struct amdgpu_iv_entry *entry);
-
static int gfx_v9_0_ecc_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct ras_common_if **ras_if = &adev->gfx.ras_if;
- struct ras_ih_if ih_info = {
- .cb = gfx_v9_0_process_ras_data_cb,
- };
- struct ras_fs_if fs_info = {
- .sysfs_name = "gfx_err_count",
- .debugfs_name = "gfx_err_inject",
- };
- struct ras_common_if ras_block = {
- .block = AMDGPU_RAS_BLOCK__GFX,
- .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
- .sub_block_index = 0,
- .name = "gfx",
- };
int r;
- if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) {
- amdgpu_ras_feature_enable_on_boot(adev, &ras_block, 0);
- return 0;
+ /*
+ * Temp workaround to fix the issue that CP firmware fails to
+ * update read pointer when CPDMA is writing clearing operation
+ * to GDS in suspend/resume sequence on several cards. So just
+ * limit this operation in cold boot sequence.
+ */
+ if (!adev->in_suspend) {
+ r = gfx_v9_0_do_edc_gds_workarounds(adev);
+ if (r)
+ return r;
}
- r = gfx_v9_0_do_edc_gds_workarounds(adev);
- if (r)
- return r;
-
/* requires IBs so do in late init after IB pool is initialized */
r = gfx_v9_0_do_edc_gpr_workarounds(adev);
if (r)
return r;
- /* handle resume path. */
- if (*ras_if) {
- /* resend ras TA enable cmd during resume.
- * prepare to handle failure.
- */
- ih_info.head = **ras_if;
- r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
- if (r) {
- if (r == -EAGAIN) {
- /* request a gpu reset. will run again. */
- amdgpu_ras_request_reset_on_boot(adev,
- AMDGPU_RAS_BLOCK__GFX);
- return 0;
- }
- /* fail to enable ras, cleanup all. */
- goto irq;
- }
- /* enable successfully. continue. */
- goto resume;
- }
-
- *ras_if = kmalloc(sizeof(**ras_if), GFP_KERNEL);
- if (!*ras_if)
- return -ENOMEM;
-
- **ras_if = ras_block;
-
- r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
- if (r) {
- if (r == -EAGAIN) {
- amdgpu_ras_request_reset_on_boot(adev,
- AMDGPU_RAS_BLOCK__GFX);
- r = 0;
- }
- goto feature;
- }
-
- ih_info.head = **ras_if;
- fs_info.head = **ras_if;
-
- r = amdgpu_ras_interrupt_add_handler(adev, &ih_info);
- if (r)
- goto interrupt;
-
- amdgpu_ras_debugfs_create(adev, &fs_info);
-
- r = amdgpu_ras_sysfs_create(adev, &fs_info);
- if (r)
- goto sysfs;
-resume:
- r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
+ r = amdgpu_gfx_ras_late_init(adev);
if (r)
- goto irq;
+ return r;
return 0;
-irq:
- amdgpu_ras_sysfs_remove(adev, *ras_if);
-sysfs:
- amdgpu_ras_debugfs_remove(adev, *ras_if);
- amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
-interrupt:
- amdgpu_ras_feature_enable(adev, *ras_if, 0);
-feature:
- kfree(*ras_if);
- *ras_if = NULL;
- return r;
}
static int gfx_v9_0_late_init(void *handle)
@@ -3992,7 +4466,8 @@ static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
gfx_v9_0_enable_gfx_pipeline_powergating(adev, true);
} else {
gfx_v9_0_enable_gfx_cg_power_gating(adev, false);
- gfx_v9_0_enable_gfx_pipeline_powergating(adev, false);
+ if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
+ gfx_v9_0_enable_gfx_pipeline_powergating(adev, false);
}
amdgpu_gfx_rlc_exit_safe_mode(adev);
@@ -4097,6 +4572,9 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
{
uint32_t data, def;
+ if (adev->asic_type == CHIP_ARCTURUS)
+ return;
+
amdgpu_gfx_rlc_enter_safe_mode(adev);
/* Enable 3D CGCG/CGLS */
@@ -4162,8 +4640,12 @@ static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
/* enable cgcg FSM(0x0000363F) */
def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
- data = (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
- RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
+ if (adev->asic_type == CHIP_ARCTURUS)
+ data = (0x2000 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
+ RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
+ else
+ data = (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
+ RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
@@ -4231,10 +4713,11 @@ static int gfx_v9_0_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- bool enable = (state == AMD_PG_STATE_GATE) ? true : false;
+ bool enable = (state == AMD_PG_STATE_GATE);
switch (adev->asic_type) {
case CHIP_RAVEN:
+ case CHIP_RENOIR:
if (!enable) {
amdgpu_gfx_off_ctrl(adev, false);
cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
@@ -4289,8 +4772,10 @@ static int gfx_v9_0_set_clockgating_state(void *handle,
case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
+ case CHIP_ARCTURUS:
+ case CHIP_RENOIR:
gfx_v9_0_update_gfx_clock_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
break;
default:
break;
@@ -4307,12 +4792,12 @@ static void gfx_v9_0_get_clockgating_state(void *handle, u32 *flags)
*flags = 0;
/* AMD_CG_SUPPORT_GFX_MGCG */
- data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
+ data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE));
if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
*flags |= AMD_CG_SUPPORT_GFX_MGCG;
/* AMD_CG_SUPPORT_GFX_CGCG */
- data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
+ data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL));
if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
*flags |= AMD_CG_SUPPORT_GFX_CGCG;
@@ -4321,23 +4806,25 @@ static void gfx_v9_0_get_clockgating_state(void *handle, u32 *flags)
*flags |= AMD_CG_SUPPORT_GFX_CGLS;
/* AMD_CG_SUPPORT_GFX_RLC_LS */
- data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
+ data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_MEM_SLP_CNTL));
if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
*flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
/* AMD_CG_SUPPORT_GFX_CP_LS */
- data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
+ data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmCP_MEM_SLP_CNTL));
if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
*flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
- /* AMD_CG_SUPPORT_GFX_3D_CGCG */
- data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
- if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
- *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG;
+ if (adev->asic_type != CHIP_ARCTURUS) {
+ /* AMD_CG_SUPPORT_GFX_3D_CGCG */
+ data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D));
+ if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
+ *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG;
- /* AMD_CG_SUPPORT_GFX_3D_CGLS */
- if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK)
- *flags |= AMD_CG_SUPPORT_GFX_3D_CGLS;
+ /* AMD_CG_SUPPORT_GFX_3D_CGLS */
+ if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK)
+ *flags |= AMD_CG_SUPPORT_GFX_3D_CGLS;
+ }
}
static u64 gfx_v9_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
@@ -4379,7 +4866,7 @@ static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 ref_and_mask, reg_mem_engine;
- const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
+ const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
switch (ring->me) {
@@ -4399,8 +4886,8 @@ static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
}
gfx_v9_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
- adev->nbio_funcs->get_hdp_flush_req_offset(adev),
- adev->nbio_funcs->get_hdp_flush_done_offset(adev),
+ adev->nbio.funcs->get_hdp_flush_req_offset(adev),
+ adev->nbio.funcs->get_hdp_flush_done_offset(adev),
ref_and_mask, ref_and_mask, 0x20);
}
@@ -4801,6 +5288,7 @@ static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigne
static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
{
struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq;
amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
amdgpu_ring_write(ring, 0 | /* src: register*/
@@ -4809,9 +5297,9 @@ static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
amdgpu_ring_write(ring, reg);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
- adev->virt.reg_val_offs * 4));
+ kiq->reg_val_offs * 4));
amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
- adev->virt.reg_val_offs * 4));
+ kiq->reg_val_offs * 4));
}
static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
@@ -5132,31 +5620,788 @@ static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev,
return 0;
}
-static int gfx_v9_0_process_ras_data_cb(struct amdgpu_device *adev,
- struct amdgpu_iv_entry *entry)
+
+static const struct soc15_ras_field_entry gfx_v9_0_ras_fields[] = {
+ { "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT),
+ SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, SEC_COUNT),
+ SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, DED_COUNT)
+ },
+ { "CPC_UCODE", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT),
+ SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, SEC_COUNT),
+ SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, DED_COUNT)
+ },
+ { "CPF_ROQ_ME1", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT),
+ SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, COUNT_ME1),
+ 0, 0
+ },
+ { "CPF_ROQ_ME2", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT),
+ SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, COUNT_ME2),
+ 0, 0
+ },
+ { "CPF_TAG", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT),
+ SOC15_REG_FIELD(CPF_EDC_TAG_CNT, SEC_COUNT),
+ SOC15_REG_FIELD(CPF_EDC_TAG_CNT, DED_COUNT)
+ },
+ { "CPG_DMA_ROQ", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT),
+ SOC15_REG_FIELD(CPG_EDC_DMA_CNT, ROQ_COUNT),
+ 0, 0
+ },
+ { "CPG_DMA_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT),
+ SOC15_REG_FIELD(CPG_EDC_DMA_CNT, TAG_SEC_COUNT),
+ SOC15_REG_FIELD(CPG_EDC_DMA_CNT, TAG_DED_COUNT)
+ },
+ { "CPG_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_TAG_CNT),
+ SOC15_REG_FIELD(CPG_EDC_TAG_CNT, SEC_COUNT),
+ SOC15_REG_FIELD(CPG_EDC_TAG_CNT, DED_COUNT)
+ },
+ { "DC_CSINVOC", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT),
+ SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, COUNT_ME1),
+ 0, 0
+ },
+ { "DC_RESTORE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT),
+ SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, COUNT_ME1),
+ 0, 0
+ },
+ { "DC_STATE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT),
+ SOC15_REG_FIELD(DC_EDC_STATE_CNT, COUNT_ME1),
+ 0, 0
+ },
+ { "GDS_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT),
+ SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_DED)
+ },
+ { "GDS_INPUT_QUEUE", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT),
+ SOC15_REG_FIELD(GDS_EDC_CNT, GDS_INPUT_QUEUE_SED),
+ 0, 0
+ },
+ { "GDS_ME0_CS_PIPE_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_DED)
+ },
+ { "GDS_OA_PHY_PHY_CMD_RAM_MEM",
+ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_DED)
+ },
+ { "GDS_OA_PHY_PHY_DATA_RAM_MEM",
+ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_SED),
+ 0, 0
+ },
+ { "GDS_OA_PIPE_ME1_PIPE0_PIPE_MEM",
+ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_DED)
+ },
+ { "GDS_OA_PIPE_ME1_PIPE1_PIPE_MEM",
+ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_DED)
+ },
+ { "GDS_OA_PIPE_ME1_PIPE2_PIPE_MEM",
+ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_DED)
+ },
+ { "GDS_OA_PIPE_ME1_PIPE3_PIPE_MEM",
+ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_DED)
+ },
+ { "SPI_SR_MEM", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT),
+ SOC15_REG_FIELD(SPI_EDC_CNT, SPI_SR_MEM_SED_COUNT),
+ 0, 0
+ },
+ { "TA_FS_DFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_DED_COUNT)
+ },
+ { "TA_FS_AFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_SED_COUNT),
+ 0, 0
+ },
+ { "TA_FL_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FL_LFIFO_SED_COUNT),
+ 0, 0
+ },
+ { "TA_FX_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FX_LFIFO_SED_COUNT),
+ 0, 0
+ },
+ { "TA_FS_CFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_CFIFO_SED_COUNT),
+ 0, 0
+ },
+ { "TCA_HOLE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT),
+ SOC15_REG_FIELD(TCA_EDC_CNT, HOLE_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "TCA_REQ_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT),
+ SOC15_REG_FIELD(TCA_EDC_CNT, REQ_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "TCC_CACHE_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_DED_COUNT)
+ },
+ { "TCC_CACHE_DIRTY", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_DED_COUNT)
+ },
+ { "TCC_HIGH_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_DED_COUNT)
+ },
+ { "TCC_LOW_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_DED_COUNT)
+ },
+ { "TCC_SRC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_DED_COUNT)
+ },
+ { "TCC_IN_USE_DEC", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, IN_USE_DEC_SED_COUNT),
+ 0, 0
+ },
+ { "TCC_IN_USE_TRANSFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, IN_USE_TRANSFER_SED_COUNT),
+ 0, 0
+ },
+ { "TCC_LATENCY_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "TCC_RETURN_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, RETURN_DATA_SED_COUNT),
+ 0, 0
+ },
+ { "TCC_RETURN_CONTROL", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, RETURN_CONTROL_SED_COUNT),
+ 0, 0
+ },
+ { "TCC_UC_ATOMIC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, UC_ATOMIC_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "TCC_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_RETURN_SED_COUNT),
+ 0, 0
+ },
+ { "TCC_WRITE_CACHE_READ", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_CACHE_READ_SED_COUNT),
+ 0, 0
+ },
+ { "TCC_SRC_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, SRC_FIFO_NEXT_RAM_SED_COUNT),
+ 0, 0
+ },
+ { "TCC_LATENCY_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, LATENCY_FIFO_NEXT_RAM_SED_COUNT),
+ 0, 0
+ },
+ { "TCC_CACHE_TAG_PROBE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "TCC_WRRET_TAG_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, WRRET_TAG_WRITE_RETURN_SED_COUNT),
+ 0, 0
+ },
+ { "TCC_ATOMIC_RETURN_BUFFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, ATOMIC_RETURN_BUFFER_SED_COUNT),
+ 0, 0
+ },
+ { "TCI_WRITE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT),
+ SOC15_REG_FIELD(TCI_EDC_CNT, WRITE_RAM_SED_COUNT),
+ 0, 0
+ },
+ { "TCP_CACHE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_DED_COUNT)
+ },
+ { "TCP_LFIFO_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_DED_COUNT)
+ },
+ { "TCP_CMD_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CMD_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "TCP_VM_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, VM_FIFO_SEC_COUNT),
+ 0, 0
+ },
+ { "TCP_DB_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, DB_RAM_SED_COUNT),
+ 0, 0
+ },
+ { "TCP_UTCL1_LFIFO0", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_SEC_COUNT),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_DED_COUNT)
+ },
+ { "TCP_UTCL1_LFIFO1", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_SEC_COUNT),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_DED_COUNT)
+ },
+ { "TD_SS_FIFO_LO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
+ SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_SEC_COUNT),
+ SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_DED_COUNT)
+ },
+ { "TD_SS_FIFO_HI", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
+ SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_SEC_COUNT),
+ SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_DED_COUNT)
+ },
+ { "TD_CS_FIFO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
+ SOC15_REG_FIELD(TD_EDC_CNT, CS_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "SQ_LDS_D", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_DED_COUNT)
+ },
+ { "SQ_LDS_I", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_DED_COUNT)
+ },
+ { "SQ_SGPR", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_DED_COUNT)
+ },
+ { "SQ_VGPR0", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_DED_COUNT)
+ },
+ { "SQ_VGPR1", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_DED_COUNT)
+ },
+ { "SQ_VGPR2", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_DED_COUNT)
+ },
+ { "SQ_VGPR3", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_DED_COUNT)
+ },
+ { "SQC_DATA_CU0_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_DED_COUNT)
+ },
+ { "SQC_DATA_CU0_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_DED_COUNT)
+ },
+ { "SQC_DATA_CU1_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_DED_COUNT)
+ },
+ { "SQC_DATA_CU1_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_DED_COUNT)
+ },
+ { "SQC_DATA_CU2_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_DED_COUNT)
+ },
+ { "SQC_DATA_CU2_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_DED_COUNT)
+ },
+ { "SQC_INST_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_DED_COUNT)
+ },
+ { "SQC_INST_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_DED_COUNT)
+ },
+ { "SQC_DATA_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_DED_COUNT)
+ },
+ { "SQC_DATA_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_DED_COUNT)
+ },
+ { "SQC_INST_BANKA_UTCL1_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_UTCL1_MISS_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "SQC_INST_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_MISS_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "SQC_DATA_BANKA_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_HIT_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "SQC_DATA_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_MISS_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "SQC_DATA_BANKA_DIRTY_BIT_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_DIRTY_BIT_RAM_SED_COUNT),
+ 0, 0
+ },
+ { "SQC_INST_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_DED_COUNT)
+ },
+ { "SQC_INST_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_DED_COUNT)
+ },
+ { "SQC_INST_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_DED_COUNT)
+ },
+ { "SQC_DATA_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_DED_COUNT)
+ },
+ { "SQC_DATA_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_DED_COUNT)
+ },
+ { "SQC_INST_BANKB_UTCL1_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_UTCL1_MISS_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "SQC_INST_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_MISS_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "SQC_DATA_BANKB_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_HIT_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "SQC_DATA_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_MISS_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "SQC_DATA_BANKB_DIRTY_BIT_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_DIRTY_BIT_RAM_SED_COUNT),
+ 0, 0
+ },
+ { "EA_DRAMRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT)
+ },
+ { "EA_DRAMWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT)
+ },
+ { "EA_DRAMWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT)
+ },
+ { "EA_RRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_DED_COUNT)
+ },
+ { "EA_WRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_DED_COUNT)
+ },
+ { "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
+ 0, 0
+ },
+ { "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
+ 0, 0
+ },
+ { "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, IORD_CMDMEM_SED_COUNT),
+ 0, 0
+ },
+ { "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
+ 0, 0
+ },
+ { "EA_IOWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
+ 0, 0
+ },
+ { "GMIRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT)
+ },
+ { "GMIWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT)
+ },
+ { "GMIWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT)
+ },
+ { "GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
+ 0, 0
+ },
+ { "GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
+ 0, 0
+ },
+ { "MAM_D0MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+ 0, 0
+ },
+ { "MAM_D1MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+ 0, 0
+ },
+ { "MAM_D2MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+ 0, 0
+ },
+ { "MAM_D3MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+ 0, 0
+ }
+};
+
+static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
+ void *inject_if)
+{
+ struct ras_inject_if *info = (struct ras_inject_if *)inject_if;
+ int ret;
+ struct ta_ras_trigger_error_input block_info = { 0 };
+
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
+ return -EINVAL;
+
+ if (info->head.sub_block_index >= ARRAY_SIZE(ras_gfx_subblocks))
+ return -EINVAL;
+
+ if (!ras_gfx_subblocks[info->head.sub_block_index].name)
+ return -EPERM;
+
+ if (!(ras_gfx_subblocks[info->head.sub_block_index].hw_supported_error_type &
+ info->head.type)) {
+ DRM_ERROR("GFX Subblock %s, hardware do not support type 0x%x\n",
+ ras_gfx_subblocks[info->head.sub_block_index].name,
+ info->head.type);
+ return -EPERM;
+ }
+
+ if (!(ras_gfx_subblocks[info->head.sub_block_index].sw_supported_error_type &
+ info->head.type)) {
+ DRM_ERROR("GFX Subblock %s, driver do not support type 0x%x\n",
+ ras_gfx_subblocks[info->head.sub_block_index].name,
+ info->head.type);
+ return -EPERM;
+ }
+
+ block_info.block_id = amdgpu_ras_block_to_ta(info->head.block);
+ block_info.sub_block_index =
+ ras_gfx_subblocks[info->head.sub_block_index].ta_subblock;
+ block_info.inject_error_type = amdgpu_ras_error_to_ta(info->head.type);
+ block_info.address = info->address;
+ block_info.value = info->value;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+ ret = psp_ras_trigger_error(&adev->psp, &block_info);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ return ret;
+}
+
+static const char *vml2_mems[] = {
+ "UTC_VML2_BANK_CACHE_0_BIGK_MEM0",
+ "UTC_VML2_BANK_CACHE_0_BIGK_MEM1",
+ "UTC_VML2_BANK_CACHE_0_4K_MEM0",
+ "UTC_VML2_BANK_CACHE_0_4K_MEM1",
+ "UTC_VML2_BANK_CACHE_1_BIGK_MEM0",
+ "UTC_VML2_BANK_CACHE_1_BIGK_MEM1",
+ "UTC_VML2_BANK_CACHE_1_4K_MEM0",
+ "UTC_VML2_BANK_CACHE_1_4K_MEM1",
+ "UTC_VML2_BANK_CACHE_2_BIGK_MEM0",
+ "UTC_VML2_BANK_CACHE_2_BIGK_MEM1",
+ "UTC_VML2_BANK_CACHE_2_4K_MEM0",
+ "UTC_VML2_BANK_CACHE_2_4K_MEM1",
+ "UTC_VML2_BANK_CACHE_3_BIGK_MEM0",
+ "UTC_VML2_BANK_CACHE_3_BIGK_MEM1",
+ "UTC_VML2_BANK_CACHE_3_4K_MEM0",
+ "UTC_VML2_BANK_CACHE_3_4K_MEM1",
+};
+
+static const char *vml2_walker_mems[] = {
+ "UTC_VML2_CACHE_PDE0_MEM0",
+ "UTC_VML2_CACHE_PDE0_MEM1",
+ "UTC_VML2_CACHE_PDE1_MEM0",
+ "UTC_VML2_CACHE_PDE1_MEM1",
+ "UTC_VML2_CACHE_PDE2_MEM0",
+ "UTC_VML2_CACHE_PDE2_MEM1",
+ "UTC_VML2_RDIF_LOG_FIFO",
+};
+
+static const char *atc_l2_cache_2m_mems[] = {
+ "UTC_ATCL2_CACHE_2M_BANK0_WAY0_MEM",
+ "UTC_ATCL2_CACHE_2M_BANK0_WAY1_MEM",
+ "UTC_ATCL2_CACHE_2M_BANK1_WAY0_MEM",
+ "UTC_ATCL2_CACHE_2M_BANK1_WAY1_MEM",
+};
+
+static const char *atc_l2_cache_4k_mems[] = {
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM0",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM1",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM2",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM3",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM4",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM5",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM6",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM7",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM0",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM1",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM2",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM3",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM4",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM5",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM6",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM7",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM0",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM1",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM2",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM3",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM4",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM5",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM6",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM7",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM0",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM1",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM2",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM3",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM4",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM5",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM6",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM7",
+};
+
+static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
+ struct ras_err_data *err_data)
+{
+ uint32_t i, data;
+ uint32_t sec_count, ded_count;
+
+ WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT, 0);
+ WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT, 0);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT, 0);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT, 0);
+
+ for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) {
+ WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, i);
+ data = RREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT);
+
+ sec_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, SEC_COUNT);
+ if (sec_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+ vml2_mems[i], sec_count);
+ err_data->ce_count += sec_count;
+ }
+
+ ded_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, DED_COUNT);
+ if (ded_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+ vml2_mems[i], ded_count);
+ err_data->ue_count += ded_count;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) {
+ WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, i);
+ data = RREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT);
+
+ sec_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT,
+ SEC_COUNT);
+ if (sec_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+ vml2_walker_mems[i], sec_count);
+ err_data->ce_count += sec_count;
+ }
+
+ ded_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT,
+ DED_COUNT);
+ if (ded_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+ vml2_walker_mems[i], ded_count);
+ err_data->ue_count += ded_count;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) {
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, i);
+ data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT);
+
+ sec_count = (data & 0x00006000L) >> 0xd;
+ if (sec_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+ atc_l2_cache_2m_mems[i], sec_count);
+ err_data->ce_count += sec_count;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) {
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, i);
+ data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT);
+
+ sec_count = (data & 0x00006000L) >> 0xd;
+ if (sec_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+ atc_l2_cache_4k_mems[i], sec_count);
+ err_data->ce_count += sec_count;
+ }
+
+ ded_count = (data & 0x00018000L) >> 0xf;
+ if (ded_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+ atc_l2_cache_4k_mems[i], ded_count);
+ err_data->ue_count += ded_count;
+ }
+ }
+
+ WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
+
+ return 0;
+}
+
+static int gfx_v9_0_ras_error_count(const struct soc15_reg_entry *reg,
+ uint32_t se_id, uint32_t inst_id, uint32_t value,
+ uint32_t *sec_count, uint32_t *ded_count)
+{
+ uint32_t i;
+ uint32_t sec_cnt, ded_cnt;
+
+ for (i = 0; i < ARRAY_SIZE(gfx_v9_0_ras_fields); i++) {
+ if(gfx_v9_0_ras_fields[i].reg_offset != reg->reg_offset ||
+ gfx_v9_0_ras_fields[i].seg != reg->seg ||
+ gfx_v9_0_ras_fields[i].inst != reg->inst)
+ continue;
+
+ sec_cnt = (value &
+ gfx_v9_0_ras_fields[i].sec_count_mask) >>
+ gfx_v9_0_ras_fields[i].sec_count_shift;
+ if (sec_cnt) {
+ DRM_INFO("GFX SubBlock %s, Instance[%d][%d], SEC %d\n",
+ gfx_v9_0_ras_fields[i].name,
+ se_id, inst_id,
+ sec_cnt);
+ *sec_count += sec_cnt;
+ }
+
+ ded_cnt = (value &
+ gfx_v9_0_ras_fields[i].ded_count_mask) >>
+ gfx_v9_0_ras_fields[i].ded_count_shift;
+ if (ded_cnt) {
+ DRM_INFO("GFX SubBlock %s, Instance[%d][%d], DED %d\n",
+ gfx_v9_0_ras_fields[i].name,
+ se_id, inst_id,
+ ded_cnt);
+ *ded_count += ded_cnt;
+ }
+ }
+
+ return 0;
+}
+
+static void gfx_v9_0_clear_ras_edc_counter(struct amdgpu_device *adev)
{
- /* TODO ue will trigger an interrupt. */
- kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
- amdgpu_ras_reset_gpu(adev, 0);
- return AMDGPU_RAS_UE;
+ int i, j, k;
+
+ /* read back registers to clear the counters */
+ mutex_lock(&adev->grbm_idx_mutex);
+ for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) {
+ for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) {
+ for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) {
+ gfx_v9_0_select_se_sh(adev, j, 0x0, k);
+ RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i]));
+ }
+ }
+ }
+ WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT, 0);
+ WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT, 0);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT, 0);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT, 0);
+
+ for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) {
+ WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, i);
+ RREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) {
+ WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, i);
+ RREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) {
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, i);
+ RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) {
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, i);
+ RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT);
+ }
+
+ WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
}
-static int gfx_v9_0_cp_ecc_error_irq(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- struct amdgpu_iv_entry *entry)
+static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_error_status)
{
- struct ras_common_if *ras_if = adev->gfx.ras_if;
- struct ras_dispatch_if ih_data = {
- .entry = entry,
- };
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+ uint32_t sec_count = 0, ded_count = 0;
+ uint32_t i, j, k;
+ uint32_t reg_value;
- if (!ras_if)
- return 0;
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
+ return -EINVAL;
+
+ err_data->ue_count = 0;
+ err_data->ce_count = 0;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+
+ for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) {
+ for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) {
+ for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) {
+ gfx_v9_0_select_se_sh(adev, j, 0, k);
+ reg_value =
+ RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i]));
+ if (reg_value)
+ gfx_v9_0_ras_error_count(&gfx_v9_0_edc_counter_regs[i],
+ j, k, reg_value,
+ &sec_count, &ded_count);
+ }
+ }
+ }
+
+ err_data->ce_count += sec_count;
+ err_data->ue_count += ded_count;
+
+ gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
- ih_data.head = *ras_if;
+ gfx_v9_0_query_utc_edc_status(adev, err_data);
- DRM_ERROR("CP ECC ERROR IRQ\n");
- amdgpu_ras_interrupt_dispatch(adev, &ih_data);
return 0;
}
@@ -5183,7 +6428,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
.align_mask = 0xff,
.nop = PACKET3(PACKET3_NOP, 0x3FFF),
.support_64bit_ptrs = true,
- .vmhub = AMDGPU_GFXHUB,
+ .vmhub = AMDGPU_GFXHUB_0,
.get_rptr = gfx_v9_0_ring_get_rptr_gfx,
.get_wptr = gfx_v9_0_ring_get_wptr_gfx,
.set_wptr = gfx_v9_0_ring_set_wptr_gfx,
@@ -5234,7 +6479,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
.align_mask = 0xff,
.nop = PACKET3(PACKET3_NOP, 0x3FFF),
.support_64bit_ptrs = true,
- .vmhub = AMDGPU_GFXHUB,
+ .vmhub = AMDGPU_GFXHUB_0,
.get_rptr = gfx_v9_0_ring_get_rptr_compute,
.get_wptr = gfx_v9_0_ring_get_wptr_compute,
.set_wptr = gfx_v9_0_ring_set_wptr_compute,
@@ -5269,7 +6514,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
.align_mask = 0xff,
.nop = PACKET3(PACKET3_NOP, 0x3FFF),
.support_64bit_ptrs = true,
- .vmhub = AMDGPU_GFXHUB,
+ .vmhub = AMDGPU_GFXHUB_0,
.get_rptr = gfx_v9_0_ring_get_rptr_compute,
.get_wptr = gfx_v9_0_ring_get_wptr_compute,
.set_wptr = gfx_v9_0_ring_set_wptr_compute,
@@ -5323,7 +6568,7 @@ static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_inst_irq_funcs = {
static const struct amdgpu_irq_src_funcs gfx_v9_0_cp_ecc_error_irq_funcs = {
.set = gfx_v9_0_set_cp_ecc_error_state,
- .process = gfx_v9_0_cp_ecc_error_irq,
+ .process = amdgpu_gfx_cp_ecc_error_irq,
};
@@ -5349,6 +6594,8 @@ static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
+ case CHIP_ARCTURUS:
+ case CHIP_RENOIR:
adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs;
break;
default:
@@ -5366,6 +6613,7 @@ static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev)
adev->gds.gds_size = 0x10000;
break;
case CHIP_RAVEN:
+ case CHIP_ARCTURUS:
adev->gds.gds_size = 0x1000;
break;
default:
@@ -5387,6 +6635,9 @@ static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev)
else
adev->gds.gds_compute_max_wave_id = 0x15f; /* raven1 */
break;
+ case CHIP_ARCTURUS:
+ adev->gds.gds_compute_max_wave_id = 0xfff;
+ break;
default:
/* this really depends on the chip */
adev->gds.gds_compute_max_wave_id = 0x7ff;
@@ -5431,12 +6682,21 @@ static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
{
int i, j, k, counter, active_cu_number = 0;
u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
- unsigned disable_masks[4 * 2];
+ unsigned disable_masks[4 * 4];
if (!adev || !cu_info)
return -EINVAL;
- amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2);
+ /*
+ * 16 comes from bitmap array size 4*4, and it can cover all gfx9 ASICs
+ */
+ if (adev->gfx.config.max_shader_engines *
+ adev->gfx.config.max_sh_per_se > 16)
+ return -EINVAL;
+
+ amdgpu_gfx_parse_disable_cu(disable_masks,
+ adev->gfx.config.max_shader_engines,
+ adev->gfx.config.max_sh_per_se);
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
@@ -5445,11 +6705,23 @@ static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
ao_bitmap = 0;
counter = 0;
gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
- if (i < 4 && j < 2)
- gfx_v9_0_set_user_cu_inactive_bitmap(
- adev, disable_masks[i * 2 + j]);
+ gfx_v9_0_set_user_cu_inactive_bitmap(
+ adev, disable_masks[i * adev->gfx.config.max_sh_per_se + j]);
bitmap = gfx_v9_0_get_cu_active_bitmap(adev);
- cu_info->bitmap[i][j] = bitmap;
+
+ /*
+ * The bitmap(and ao_cu_bitmap) in cu_info structure is
+ * 4x4 size array, and it's usually suitable for Vega
+ * ASICs which has 4*2 SE/SH layout.
+ * But for Arcturus, SE/SH layout is changed to 8*1.
+ * To mostly reduce the impact, we make it compatible
+ * with current bitmap array as below:
+ * SE4,SH0 --> bitmap[0][1]
+ * SE5,SH0 --> bitmap[1][1]
+ * SE6,SH0 --> bitmap[2][1]
+ * SE7,SH0 --> bitmap[3][1]
+ */
+ cu_info->bitmap[i % 4][j + i / 4] = bitmap;
for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
if (bitmap & mask) {
@@ -5462,7 +6734,7 @@ static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
active_cu_number += counter;
if (i < 2 && j < 2)
ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
- cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
+ cu_info->ao_cu_bitmap[i % 4][j + i / 4] = ao_bitmap;
}
}
gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
new file mode 100644
index 000000000000..f099f13d7f1e
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
@@ -0,0 +1,978 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/kernel.h>
+
+#include "amdgpu.h"
+#include "amdgpu_gfx.h"
+#include "soc15.h"
+#include "soc15d.h"
+#include "amdgpu_atomfirmware.h"
+#include "amdgpu_pm.h"
+
+#include "gc/gc_9_4_1_offset.h"
+#include "gc/gc_9_4_1_sh_mask.h"
+#include "soc15_common.h"
+
+#include "gfx_v9_4.h"
+#include "amdgpu_ras.h"
+
+static const struct soc15_reg_entry gfx_v9_4_edc_counter_regs[] = {
+ /* CPC */
+ { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), 0, 1, 1 },
+ { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT), 0, 1, 1 },
+ /* DC */
+ { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT), 0, 1, 1 },
+ { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT), 0, 1, 1 },
+ { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT), 0, 1, 1 },
+ /* CPF */
+ { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 0, 1, 1 },
+ { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT), 0, 1, 1 },
+ /* GDS */
+ { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT), 0, 1, 1 },
+ { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_GRBM_CNT), 0, 1, 1 },
+ { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_DED), 0, 1, 1 },
+ { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), 0, 1, 1 },
+ { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1, 1 },
+ /* SPI */
+ { SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), 0, 4, 1 },
+ /* SQ */
+ { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 0, 4, 16 },
+ { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_DED_CNT), 0, 4, 16 },
+ { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_INFO), 0, 4, 16 },
+ { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_SEC_CNT), 0, 4, 16 },
+ /* SQC */
+ { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 0, 4, 6 },
+ { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 0, 4, 6 },
+ { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 0, 4, 6 },
+ { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3), 0, 4, 6 },
+ /* TA */
+ { SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 0, 4, 16 },
+ /* TCA */
+ { SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 1, 2 },
+ /* TCC */
+ { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 1, 16 },
+ { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 1, 16 },
+ /* TCI */
+ { SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT), 0, 1, 72 },
+ /* TCP */
+ { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 0, 4, 16 },
+ { SOC15_REG_ENTRY(GC, 0, mmTCP_ATC_EDC_GATCL1_CNT), 0, 4, 16 },
+ { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT), 0, 4, 16 },
+ /* TD */
+ { SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 0, 4, 16 },
+ /* GCEA */
+ { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 1, 32 },
+ { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 1, 32 },
+ { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 1, 32 },
+ /* RLC */
+ { SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT), 0, 1, 1 },
+ { SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2), 0, 1, 1 },
+};
+
+static void gfx_v9_4_select_se_sh(struct amdgpu_device *adev, u32 se_num,
+ u32 sh_num, u32 instance)
+{
+ u32 data;
+
+ if (instance == 0xffffffff)
+ data = REG_SET_FIELD(0, GRBM_GFX_INDEX,
+ INSTANCE_BROADCAST_WRITES, 1);
+ else
+ data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX,
+ instance);
+
+ if (se_num == 0xffffffff)
+ data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES,
+ 1);
+ else
+ data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
+
+ if (sh_num == 0xffffffff)
+ data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES,
+ 1);
+ else
+ data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
+
+ WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_INDEX, data);
+}
+
+static const struct soc15_ras_field_entry gfx_v9_4_ras_fields[] = {
+ /* CPC */
+ { "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT),
+ SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, SEC_COUNT),
+ SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, DED_COUNT) },
+ { "CPC_UCODE", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT),
+ SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, SEC_COUNT),
+ SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, DED_COUNT) },
+ { "CPC_DC_STATE_RAM_ME1", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT),
+ SOC15_REG_FIELD(DC_EDC_STATE_CNT, SEC_COUNT_ME1),
+ SOC15_REG_FIELD(DC_EDC_STATE_CNT, DED_COUNT_ME1) },
+ { "CPC_DC_CSINVOC_RAM_ME1",
+ SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT),
+ SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, SEC_COUNT_ME1),
+ SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, DED_COUNT_ME1) },
+ { "CPC_DC_RESTORE_RAM_ME1",
+ SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT),
+ SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, SEC_COUNT_ME1),
+ SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, DED_COUNT_ME1) },
+ { "CPC_DC_CSINVOC_RAM1_ME1",
+ SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT),
+ SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, SEC_COUNT1_ME1),
+ SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, DED_COUNT1_ME1) },
+ { "CPC_DC_RESTORE_RAM1_ME1",
+ SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT),
+ SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, SEC_COUNT1_ME1),
+ SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, DED_COUNT1_ME1) },
+
+ /* CPF */
+ { "CPF_ROQ_ME2", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT),
+ SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, SEC_COUNT_ME2),
+ SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, DED_COUNT_ME2) },
+ { "CPF_ROQ_ME1", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT),
+ SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, SEC_COUNT_ME1),
+ SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, DED_COUNT_ME1) },
+ { "CPF_TCIU_TAG", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT),
+ SOC15_REG_FIELD(CPF_EDC_TAG_CNT, SEC_COUNT),
+ SOC15_REG_FIELD(CPF_EDC_TAG_CNT, DED_COUNT) },
+
+ /* GDS */
+ { "GDS_GRBM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_GRBM_CNT),
+ SOC15_REG_FIELD(GDS_EDC_GRBM_CNT, SEC),
+ SOC15_REG_FIELD(GDS_EDC_GRBM_CNT, DED) },
+ { "GDS_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT),
+ SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_DED) },
+ { "GDS_PHY_CMD_RAM_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_DED) },
+ { "GDS_PHY_DATA_RAM_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_DED) },
+ { "GDS_ME0_CS_PIPE_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_DED) },
+ { "GDS_ME1_PIPE0_PIPE_MEM",
+ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_DED) },
+ { "GDS_ME1_PIPE1_PIPE_MEM",
+ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_DED) },
+ { "GDS_ME1_PIPE2_PIPE_MEM",
+ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_DED) },
+ { "GDS_ME1_PIPE3_PIPE_MEM",
+ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_DED) },
+
+ /* SPI */
+ { "SPI_SR_MEM", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT),
+ SOC15_REG_FIELD(SPI_EDC_CNT, SPI_SR_MEM_SEC_COUNT),
+ SOC15_REG_FIELD(SPI_EDC_CNT, SPI_SR_MEM_DED_COUNT) },
+ { "SPI_GDS_EXPREQ", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT),
+ SOC15_REG_FIELD(SPI_EDC_CNT, SPI_GDS_EXPREQ_SEC_COUNT),
+ SOC15_REG_FIELD(SPI_EDC_CNT, SPI_GDS_EXPREQ_DED_COUNT) },
+ { "SPI_WB_GRANT_30", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT),
+ SOC15_REG_FIELD(SPI_EDC_CNT, SPI_WB_GRANT_30_SEC_COUNT),
+ SOC15_REG_FIELD(SPI_EDC_CNT, SPI_WB_GRANT_30_DED_COUNT) },
+ { "SPI_WB_GRANT_61", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT),
+ SOC15_REG_FIELD(SPI_EDC_CNT, SPI_WB_GRANT_61_SEC_COUNT),
+ SOC15_REG_FIELD(SPI_EDC_CNT, SPI_WB_GRANT_61_DED_COUNT) },
+ { "SPI_LIFE_CNT", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT),
+ SOC15_REG_FIELD(SPI_EDC_CNT, SPI_LIFE_CNT_SEC_COUNT),
+ SOC15_REG_FIELD(SPI_EDC_CNT, SPI_LIFE_CNT_DED_COUNT) },
+
+ /* SQ */
+ { "SQ_SGPR", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_DED_COUNT) },
+ { "SQ_LDS_D", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_DED_COUNT) },
+ { "SQ_LDS_I", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_DED_COUNT) },
+ { "SQ_VGPR0", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_DED_COUNT) },
+ { "SQ_VGPR1", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_DED_COUNT) },
+ { "SQ_VGPR2", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_DED_COUNT) },
+ { "SQ_VGPR3", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_DED_COUNT) },
+
+ /* SQC */
+ { "SQC_INST_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_DED_COUNT) },
+ { "SQC_DATA_CU0_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_DED_COUNT) },
+ { "SQC_DATA_CU0_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_DED_COUNT) },
+ { "SQC_DATA_CU1_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_DED_COUNT) },
+ { "SQC_DATA_CU1_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_DED_COUNT) },
+ { "SQC_DATA_CU2_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_DED_COUNT) },
+ { "SQC_DATA_CU2_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_DED_COUNT) },
+ { "SQC_INST_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_DED_COUNT) },
+ { "SQC_INST_BANKA_UTCL1_MISS_FIFO",
+ SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3,
+ INST_BANKA_UTCL1_MISS_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3,
+ INST_BANKA_UTCL1_MISS_FIFO_DED_COUNT) },
+ { "SQC_INST_BANKA_MISS_FIFO",
+ SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, INST_BANKA_MISS_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3,
+ INST_BANKA_MISS_FIFO_DED_COUNT) },
+ { "SQC_INST_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_DED_COUNT) },
+ { "SQC_DATA_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_DED_COUNT) },
+ { "SQC_DATA_BANKA_HIT_FIFO",
+ SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKA_HIT_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKA_HIT_FIFO_DED_COUNT) },
+ { "SQC_DATA_BANKA_MISS_FIFO",
+ SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKA_MISS_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3,
+ DATA_BANKA_MISS_FIFO_DED_COUNT) },
+ { "SQC_DATA_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_DED_COUNT) },
+ { "SQC_INST_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_DED_COUNT) },
+ { "SQC_INST_BANKB_UTCL1_MISS_FIFO",
+ SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3,
+ INST_BANKB_UTCL1_MISS_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3,
+ INST_BANKB_UTCL1_MISS_FIFO_DED_COUNT) },
+ { "SQC_INST_BANKB_MISS_FIFO",
+ SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, INST_BANKB_MISS_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3,
+ INST_BANKB_MISS_FIFO_DED_COUNT) },
+ { "SQC_INST_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_DED_COUNT) },
+ { "SQC_DATA_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_DED_COUNT) },
+ { "SQC_DATA_BANKB_HIT_FIFO",
+ SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKB_HIT_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKB_HIT_FIFO_DED_COUNT) },
+ { "SQC_DATA_BANKB_MISS_FIFO",
+ SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKB_MISS_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3,
+ DATA_BANKB_MISS_FIFO_DED_COUNT) },
+ { "SQC_DATA_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_DED_COUNT) },
+
+ /* TA */
+ { "TA_FS_DFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_DED_COUNT) },
+ { "TA_FS_AFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_DED_COUNT) },
+ { "TA_FL_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FL_LFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FL_LFIFO_DED_COUNT) },
+ { "TA_FX_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FX_LFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FX_LFIFO_DED_COUNT) },
+ { "TA_FS_CFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_CFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_CFIFO_DED_COUNT) },
+
+ /* TCA */
+ { "TCA_HOLE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT),
+ SOC15_REG_FIELD(TCA_EDC_CNT, HOLE_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TCA_EDC_CNT, HOLE_FIFO_DED_COUNT) },
+ { "TCA_REQ_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT),
+ SOC15_REG_FIELD(TCA_EDC_CNT, REQ_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TCA_EDC_CNT, REQ_FIFO_DED_COUNT) },
+
+ /* TCC */
+ { "TCC_CACHE_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_DED_COUNT) },
+ { "TCC_CACHE_DIRTY", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_DED_COUNT) },
+ { "TCC_HIGH_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_DED_COUNT) },
+ { "TCC_LOW_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_DED_COUNT) },
+ { "TCC_IN_USE_DEC", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_DEC_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_DEC_DED_COUNT) },
+ { "TCC_IN_USE_TRANSFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_TRANSFER_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_TRANSFER_DED_COUNT) },
+ { "TCC_RETURN_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_DATA_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_DATA_DED_COUNT) },
+ { "TCC_RETURN_CONTROL", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_CONTROL_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_CONTROL_DED_COUNT) },
+ { "TCC_UC_ATOMIC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, UC_ATOMIC_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, UC_ATOMIC_FIFO_DED_COUNT) },
+ { "TCC_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_RETURN_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_RETURN_DED_COUNT) },
+ { "TCC_WRITE_CACHE_READ", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_CACHE_READ_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_CACHE_READ_DED_COUNT) },
+ { "TCC_SRC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_DED_COUNT) },
+ { "TCC_CACHE_TAG_PROBE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_DED_COUNT) },
+ { "TCC_LATENCY_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_DED_COUNT) },
+ { "TCC_LATENCY_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_NEXT_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_NEXT_RAM_DED_COUNT) },
+
+ /* TCI */
+ { "TCI_WRITE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT),
+ SOC15_REG_FIELD(TCI_EDC_CNT, WRITE_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(TCI_EDC_CNT, WRITE_RAM_DED_COUNT) },
+
+ /* TCP */
+ { "TCP_CACHE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_DED_COUNT) },
+ { "TCP_LFIFO_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_DED_COUNT) },
+ { "TCP_CMD_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CMD_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CMD_FIFO_DED_COUNT) },
+ { "TCP_VM_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, VM_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, VM_FIFO_DED_COUNT) },
+ { "TCP_DB_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, DB_RAM_SED_COUNT), 0, 0 },
+ { "TCP_UTCL1_LFIFO0", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_SEC_COUNT),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_DED_COUNT) },
+ { "TCP_UTCL1_LFIFO1", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_SEC_COUNT),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_DED_COUNT) },
+
+ /* TD */
+ { "TD_SS_FIFO_LO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
+ SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_SEC_COUNT),
+ SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_DED_COUNT) },
+ { "TD_SS_FIFO_HI", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
+ SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_SEC_COUNT),
+ SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_DED_COUNT) },
+ { "TD_CS_FIFO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
+ SOC15_REG_FIELD(TD_EDC_CNT, CS_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TD_EDC_CNT, CS_FIFO_DED_COUNT) },
+
+ /* EA */
+ { "EA_DRAMRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT) },
+ { "EA_DRAMWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT) },
+ { "EA_DRAMWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT) },
+ { "EA_RRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_DED_COUNT) },
+ { "EA_WRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_DED_COUNT) },
+ { "EA_GMIRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT) },
+ { "EA_GMIWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT) },
+ { "EA_GMIWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT) },
+ { "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), 0, 0 },
+ { "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0,
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT) },
+ { "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), 0, 0 },
+ { "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0,
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT) },
+ { "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, IORD_CMDMEM_SED_COUNT), 0, 0 },
+ { "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0,
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, IORD_CMDMEM_DED_COUNT) },
+ { "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_CMDMEM_SED_COUNT), 0, 0 },
+ { "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0,
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, IOWR_CMDMEM_DED_COUNT) },
+ { "EA_IOWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_DATAMEM_SED_COUNT), 0, 0 },
+ { "EA_IOWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0,
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, IOWR_DATAMEM_DED_COUNT) },
+ { "EA_GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), 0, 0 },
+ { "EA_GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0,
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT) },
+ { "EA_GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), 0, 0 },
+ { "EA_GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0,
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT) },
+ { "EA_MAM_D0MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D0MEM_DED_COUNT) },
+ { "EA_MAM_D1MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D1MEM_DED_COUNT) },
+ { "EA_MAM_D2MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D2MEM_DED_COUNT) },
+ { "EA_MAM_D3MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D3MEM_DED_COUNT) },
+ { "EA_MAM_A0MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3),
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A0MEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A0MEM_DED_COUNT) },
+ { "EA_MAM_A1MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3),
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A1MEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A1MEM_DED_COUNT) },
+ { "EA_MAM_A2MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3),
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A2MEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A2MEM_DED_COUNT) },
+ { "EA_MAM_A3MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3),
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A3MEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A3MEM_DED_COUNT) },
+ { "EA_MAM_AFMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, MAM_AFMEM_SEC_COUNT), 0, 0 },
+ { "EA_MAM_AFMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0,
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_AFMEM_DED_COUNT) },
+
+ /* RLC */
+ { "RLCG_INSTR_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_INSTR_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_INSTR_RAM_DED_COUNT) },
+ { "RLCG_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_SCRATCH_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_SCRATCH_RAM_DED_COUNT) },
+ { "RLCV_INSTR_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_INSTR_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_INSTR_RAM_DED_COUNT) },
+ { "RLCV_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_SCRATCH_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_SCRATCH_RAM_DED_COUNT) },
+ { "RLC_TCTAG_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLC_TCTAG_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLC_TCTAG_RAM_DED_COUNT) },
+ { "RLC_SPM_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SPM_SCRATCH_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SPM_SCRATCH_RAM_DED_COUNT) },
+ { "RLC_SRM_DATA_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_DATA_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_DATA_RAM_DED_COUNT) },
+ { "RLC_SRM_ADDR_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_ADDR_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_ADDR_RAM_DED_COUNT) },
+ { "RLC_SPM_SE0_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE0_SCRATCH_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE0_SCRATCH_RAM_DED_COUNT) },
+ { "RLC_SPM_SE1_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE1_SCRATCH_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE1_SCRATCH_RAM_DED_COUNT) },
+ { "RLC_SPM_SE2_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE2_SCRATCH_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE2_SCRATCH_RAM_DED_COUNT) },
+ { "RLC_SPM_SE3_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE3_SCRATCH_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE3_SCRATCH_RAM_DED_COUNT) },
+ { "RLC_SPM_SE4_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE4_SCRATCH_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE4_SCRATCH_RAM_DED_COUNT) },
+ { "RLC_SPM_SE5_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE5_SCRATCH_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE5_SCRATCH_RAM_DED_COUNT) },
+ { "RLC_SPM_SE6_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE6_SCRATCH_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE6_SCRATCH_RAM_DED_COUNT) },
+ { "RLC_SPM_SE7_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE7_SCRATCH_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE7_SCRATCH_RAM_DED_COUNT) },
+};
+
+static const char * const vml2_mems[] = {
+ "UTC_VML2_BANK_CACHE_0_BIGK_MEM0",
+ "UTC_VML2_BANK_CACHE_0_BIGK_MEM1",
+ "UTC_VML2_BANK_CACHE_0_4K_MEM0",
+ "UTC_VML2_BANK_CACHE_0_4K_MEM1",
+ "UTC_VML2_BANK_CACHE_1_BIGK_MEM0",
+ "UTC_VML2_BANK_CACHE_1_BIGK_MEM1",
+ "UTC_VML2_BANK_CACHE_1_4K_MEM0",
+ "UTC_VML2_BANK_CACHE_1_4K_MEM1",
+ "UTC_VML2_BANK_CACHE_2_BIGK_MEM0",
+ "UTC_VML2_BANK_CACHE_2_BIGK_MEM1",
+ "UTC_VML2_BANK_CACHE_2_4K_MEM0",
+ "UTC_VML2_BANK_CACHE_2_4K_MEM1",
+ "UTC_VML2_BANK_CACHE_3_BIGK_MEM0",
+ "UTC_VML2_BANK_CACHE_3_BIGK_MEM1",
+ "UTC_VML2_BANK_CACHE_3_4K_MEM0",
+ "UTC_VML2_BANK_CACHE_3_4K_MEM1",
+ "UTC_VML2_IFIFO_GROUP0",
+ "UTC_VML2_IFIFO_GROUP1",
+ "UTC_VML2_IFIFO_GROUP2",
+ "UTC_VML2_IFIFO_GROUP3",
+ "UTC_VML2_IFIFO_GROUP4",
+ "UTC_VML2_IFIFO_GROUP5",
+ "UTC_VML2_IFIFO_GROUP6",
+ "UTC_VML2_IFIFO_GROUP7",
+ "UTC_VML2_IFIFO_GROUP8",
+ "UTC_VML2_IFIFO_GROUP9",
+ "UTC_VML2_IFIFO_GROUP10",
+ "UTC_VML2_IFIFO_GROUP11",
+ "UTC_VML2_IFIFO_GROUP12",
+ "UTC_VML2_IFIFO_GROUP13",
+ "UTC_VML2_IFIFO_GROUP14",
+ "UTC_VML2_IFIFO_GROUP15",
+ "UTC_VML2_IFIFO_GROUP16",
+ "UTC_VML2_IFIFO_GROUP17",
+ "UTC_VML2_IFIFO_GROUP18",
+ "UTC_VML2_IFIFO_GROUP19",
+ "UTC_VML2_IFIFO_GROUP20",
+ "UTC_VML2_IFIFO_GROUP21",
+ "UTC_VML2_IFIFO_GROUP22",
+ "UTC_VML2_IFIFO_GROUP23",
+ "UTC_VML2_IFIFO_GROUP24",
+};
+
+static const char * const vml2_walker_mems[] = {
+ "UTC_VML2_CACHE_PDE0_MEM0",
+ "UTC_VML2_CACHE_PDE0_MEM1",
+ "UTC_VML2_CACHE_PDE1_MEM0",
+ "UTC_VML2_CACHE_PDE1_MEM1",
+ "UTC_VML2_CACHE_PDE2_MEM0",
+ "UTC_VML2_CACHE_PDE2_MEM1",
+ "UTC_VML2_RDIF_ARADDRS",
+ "UTC_VML2_RDIF_LOG_FIFO",
+ "UTC_VML2_QUEUE_REQ",
+ "UTC_VML2_QUEUE_RET",
+};
+
+static const char * const utcl2_router_mems[] = {
+ "UTCL2_ROUTER_GROUP0_VML2_REQ_FIFO0",
+ "UTCL2_ROUTER_GROUP1_VML2_REQ_FIFO1",
+ "UTCL2_ROUTER_GROUP2_VML2_REQ_FIFO2",
+ "UTCL2_ROUTER_GROUP3_VML2_REQ_FIFO3",
+ "UTCL2_ROUTER_GROUP4_VML2_REQ_FIFO4",
+ "UTCL2_ROUTER_GROUP5_VML2_REQ_FIFO5",
+ "UTCL2_ROUTER_GROUP6_VML2_REQ_FIFO6",
+ "UTCL2_ROUTER_GROUP7_VML2_REQ_FIFO7",
+ "UTCL2_ROUTER_GROUP8_VML2_REQ_FIFO8",
+ "UTCL2_ROUTER_GROUP9_VML2_REQ_FIFO9",
+ "UTCL2_ROUTER_GROUP10_VML2_REQ_FIFO10",
+ "UTCL2_ROUTER_GROUP11_VML2_REQ_FIFO11",
+ "UTCL2_ROUTER_GROUP12_VML2_REQ_FIFO12",
+ "UTCL2_ROUTER_GROUP13_VML2_REQ_FIFO13",
+ "UTCL2_ROUTER_GROUP14_VML2_REQ_FIFO14",
+ "UTCL2_ROUTER_GROUP15_VML2_REQ_FIFO15",
+ "UTCL2_ROUTER_GROUP16_VML2_REQ_FIFO16",
+ "UTCL2_ROUTER_GROUP17_VML2_REQ_FIFO17",
+ "UTCL2_ROUTER_GROUP18_VML2_REQ_FIFO18",
+ "UTCL2_ROUTER_GROUP19_VML2_REQ_FIFO19",
+ "UTCL2_ROUTER_GROUP20_VML2_REQ_FIFO20",
+ "UTCL2_ROUTER_GROUP21_VML2_REQ_FIFO21",
+ "UTCL2_ROUTER_GROUP22_VML2_REQ_FIFO22",
+ "UTCL2_ROUTER_GROUP23_VML2_REQ_FIFO23",
+ "UTCL2_ROUTER_GROUP24_VML2_REQ_FIFO24",
+};
+
+static const char * const atc_l2_cache_2m_mems[] = {
+ "UTC_ATCL2_CACHE_2M_BANK0_WAY0_MEM",
+ "UTC_ATCL2_CACHE_2M_BANK0_WAY1_MEM",
+ "UTC_ATCL2_CACHE_2M_BANK1_WAY0_MEM",
+ "UTC_ATCL2_CACHE_2M_BANK1_WAY1_MEM",
+};
+
+static const char * const atc_l2_cache_4k_mems[] = {
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM0",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM1",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM2",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM3",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM4",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM5",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM6",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM7",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM0",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM1",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM2",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM3",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM4",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM5",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM6",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM7",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM0",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM1",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM2",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM3",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM4",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM5",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM6",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM7",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM0",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM1",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM2",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM3",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM4",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM5",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM6",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM7",
+};
+
+static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev,
+ struct ras_err_data *err_data)
+{
+ uint32_t i, data;
+ uint32_t sec_count, ded_count;
+
+ WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_CNTL, 0);
+ WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_CNTL, 0);
+ WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_CNTL, 0);
+
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL, 0);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL, 0);
+
+ for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) {
+ WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, i);
+ data = RREG32_SOC15(GC, 0, mmVML2_MEM_ECC_CNTL);
+
+ sec_count = REG_GET_FIELD(data, VML2_MEM_ECC_CNTL, SEC_COUNT);
+ if (sec_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+ vml2_mems[i], sec_count);
+ err_data->ce_count += sec_count;
+ }
+
+ ded_count = REG_GET_FIELD(data, VML2_MEM_ECC_CNTL, DED_COUNT);
+ if (ded_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+ vml2_mems[i], ded_count);
+ err_data->ue_count += ded_count;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) {
+ WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, i);
+ data = RREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_CNTL);
+
+ sec_count = REG_GET_FIELD(data, VML2_WALKER_MEM_ECC_CNTL,
+ SEC_COUNT);
+ if (sec_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+ vml2_walker_mems[i], sec_count);
+ err_data->ce_count += sec_count;
+ }
+
+ ded_count = REG_GET_FIELD(data, VML2_WALKER_MEM_ECC_CNTL,
+ DED_COUNT);
+ if (ded_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+ vml2_walker_mems[i], ded_count);
+ err_data->ue_count += ded_count;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(utcl2_router_mems); i++) {
+ WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, i);
+ data = RREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_CNTL);
+
+ sec_count = REG_GET_FIELD(data, UTCL2_MEM_ECC_CNTL, SEC_COUNT);
+ if (sec_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+ utcl2_router_mems[i], sec_count);
+ err_data->ce_count += sec_count;
+ }
+
+ ded_count = REG_GET_FIELD(data, UTCL2_MEM_ECC_CNTL, DED_COUNT);
+ if (ded_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+ utcl2_router_mems[i], ded_count);
+ err_data->ue_count += ded_count;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) {
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, i);
+ data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL);
+
+ sec_count = REG_GET_FIELD(data, ATC_L2_CACHE_2M_DSM_CNTL,
+ SEC_COUNT);
+ if (sec_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+ atc_l2_cache_2m_mems[i], sec_count);
+ err_data->ce_count += sec_count;
+ }
+
+ ded_count = REG_GET_FIELD(data, ATC_L2_CACHE_2M_DSM_CNTL,
+ DED_COUNT);
+ if (ded_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+ atc_l2_cache_2m_mems[i], ded_count);
+ err_data->ue_count += ded_count;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) {
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, i);
+ data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_CNTL);
+
+ sec_count = REG_GET_FIELD(data, ATC_L2_CACHE_4K_DSM_CNTL,
+ SEC_COUNT);
+ if (sec_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+ atc_l2_cache_4k_mems[i], sec_count);
+ err_data->ce_count += sec_count;
+ }
+
+ ded_count = REG_GET_FIELD(data, ATC_L2_CACHE_4K_DSM_CNTL,
+ DED_COUNT);
+ if (ded_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+ atc_l2_cache_4k_mems[i], ded_count);
+ err_data->ue_count += ded_count;
+ }
+ }
+
+ WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, 255);
+
+ return 0;
+}
+
+static int gfx_v9_4_ras_error_count(const struct soc15_reg_entry *reg,
+ uint32_t se_id, uint32_t inst_id,
+ uint32_t value, uint32_t *sec_count,
+ uint32_t *ded_count)
+{
+ uint32_t i;
+ uint32_t sec_cnt, ded_cnt;
+
+ for (i = 0; i < ARRAY_SIZE(gfx_v9_4_ras_fields); i++) {
+ if (gfx_v9_4_ras_fields[i].reg_offset != reg->reg_offset ||
+ gfx_v9_4_ras_fields[i].seg != reg->seg ||
+ gfx_v9_4_ras_fields[i].inst != reg->inst)
+ continue;
+
+ sec_cnt = (value & gfx_v9_4_ras_fields[i].sec_count_mask) >>
+ gfx_v9_4_ras_fields[i].sec_count_shift;
+ if (sec_cnt) {
+ DRM_INFO("GFX SubBlock %s, Instance[%d][%d], SEC %d\n",
+ gfx_v9_4_ras_fields[i].name, se_id, inst_id,
+ sec_cnt);
+ *sec_count += sec_cnt;
+ }
+
+ ded_cnt = (value & gfx_v9_4_ras_fields[i].ded_count_mask) >>
+ gfx_v9_4_ras_fields[i].ded_count_shift;
+ if (ded_cnt) {
+ DRM_INFO("GFX SubBlock %s, Instance[%d][%d], DED %d\n",
+ gfx_v9_4_ras_fields[i].name, se_id, inst_id,
+ ded_cnt);
+ *ded_count += ded_cnt;
+ }
+ }
+
+ return 0;
+}
+
+int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_error_status)
+{
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+ uint32_t sec_count = 0, ded_count = 0;
+ uint32_t i, j, k;
+ uint32_t reg_value;
+
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
+ return -EINVAL;
+
+ err_data->ue_count = 0;
+ err_data->ce_count = 0;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+
+ for (i = 0; i < ARRAY_SIZE(gfx_v9_4_edc_counter_regs); i++) {
+ for (j = 0; j < gfx_v9_4_edc_counter_regs[i].se_num; j++) {
+ for (k = 0; k < gfx_v9_4_edc_counter_regs[i].instance;
+ k++) {
+ gfx_v9_4_select_se_sh(adev, j, 0, k);
+ reg_value = RREG32(SOC15_REG_ENTRY_OFFSET(
+ gfx_v9_4_edc_counter_regs[i]));
+ if (reg_value)
+ gfx_v9_4_ras_error_count(
+ &gfx_v9_4_edc_counter_regs[i],
+ j, k, reg_value, &sec_count,
+ &ded_count);
+ }
+ }
+ }
+
+ err_data->ce_count += sec_count;
+ err_data->ue_count += ded_count;
+
+ gfx_v9_4_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ gfx_v9_4_query_utc_edc_status(adev, err_data);
+
+ return 0;
+}
+
+void gfx_v9_4_clear_ras_edc_counter(struct amdgpu_device *adev)
+{
+ int i, j, k;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+ for (i = 0; i < ARRAY_SIZE(gfx_v9_4_edc_counter_regs); i++) {
+ for (j = 0; j < gfx_v9_4_edc_counter_regs[i].se_num; j++) {
+ for (k = 0; k < gfx_v9_4_edc_counter_regs[i].instance;
+ k++) {
+ gfx_v9_4_select_se_sh(adev, j, 0x0, k);
+ RREG32(SOC15_REG_ENTRY_OFFSET(
+ gfx_v9_4_edc_counter_regs[i]));
+ }
+ }
+ }
+ WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_CNTL, 0);
+ WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_CNTL, 0);
+ WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_CNTL, 0);
+
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL, 0);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL, 0);
+
+ for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) {
+ WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, i);
+ RREG32_SOC15(GC, 0, mmVML2_MEM_ECC_CNTL);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) {
+ WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, i);
+ RREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_CNTL);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(utcl2_router_mems); i++) {
+ WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, i);
+ RREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_CNTL);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) {
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, i);
+ RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) {
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, i);
+ RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_CNTL);
+ }
+
+ WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, 255);
+}
+
+int gfx_v9_4_ras_error_inject(struct amdgpu_device *adev, void *inject_if)
+{
+ struct ras_inject_if *info = (struct ras_inject_if *)inject_if;
+ int ret;
+ struct ta_ras_trigger_error_input block_info = { 0 };
+
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
+ return -EINVAL;
+
+ block_info.block_id = amdgpu_ras_block_to_ta(info->head.block);
+ block_info.sub_block_index = info->head.sub_block_index;
+ block_info.inject_error_type = amdgpu_ras_error_to_ta(info->head.type);
+ block_info.address = info->address;
+ block_info.value = info->value;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+ ret = psp_ras_trigger_error(&adev->psp, &block_info);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h
new file mode 100644
index 000000000000..2e3f6f755ad4
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __GFX_V9_4_H__
+#define __GFX_V9_4_H__
+
+void gfx_v9_4_clear_ras_edc_counter(struct amdgpu_device *adev);
+
+int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_error_status);
+
+int gfx_v9_4_ras_error_inject(struct amdgpu_device *adev,
+ void *inject_if);
+
+#endif /* __GFX_V9_4_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index 15986748f59f..1a2f18b908fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -75,40 +75,45 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
- /* Program the system aperture low logical page number. */
- WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
- min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
-
- if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8)
- /*
- * Raven2 has a HW issue that it is unable to use the vram which
- * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
- * workaround that increase system aperture high address (add 1)
- * to get rid of the VM fault and hardware hang.
- */
- WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
- max((adev->gmc.fb_end >> 18) + 0x1,
- adev->gmc.agp_end >> 18));
- else
- WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
- max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
-
- /* Set default page address. */
- value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start
- + adev->vm_manager.vram_base_offset;
- WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
- (u32)(value >> 12));
- WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
- (u32)(value >> 44));
-
- /* Program "protection fault". */
- WREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
- (u32)(adev->dummy_page_addr >> 12));
- WREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
- (u32)((u64)adev->dummy_page_addr >> 44));
-
- WREG32_FIELD15(GC, 0, VM_L2_PROTECTION_FAULT_CNTL2,
- ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
+ if (!amdgpu_sriov_vf(adev) || adev->asic_type <= CHIP_VEGA10) {
+ /* Program the system aperture low logical page number. */
+ WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
+
+ if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8)
+ /*
+ * Raven2 has a HW issue that it is unable to use the
+ * vram which is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR.
+ * So here is the workaround that increase system
+ * aperture high address (add 1) to get rid of the VM
+ * fault and hardware hang.
+ */
+ WREG32_SOC15_RLC(GC, 0,
+ mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ max((adev->gmc.fb_end >> 18) + 0x1,
+ adev->gmc.agp_end >> 18));
+ else
+ WREG32_SOC15_RLC(
+ GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
+
+ /* Set default page address. */
+ value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
+ adev->vm_manager.vram_base_offset;
+ WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
+ (u32)(value >> 12));
+ WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
+ (u32)(value >> 44));
+
+ /* Program "protection fault". */
+ WREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
+ (u32)(adev->dummy_page_addr >> 12));
+ WREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
+ (u32)((u64)adev->dummy_page_addr >> 44));
+
+ WREG32_FIELD15(GC, 0, VM_L2_PROTECTION_FAULT_CNTL2,
+ ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
+ }
}
static void gfxhub_v1_0_init_tlb_regs(struct amdgpu_device *adev)
@@ -178,6 +183,8 @@ static void gfxhub_v1_0_enable_system_domain(struct amdgpu_device *adev)
tmp = RREG32_SOC15(GC, 0, mmVM_CONTEXT0_CNTL);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL,
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
WREG32_SOC15(GC, 0, mmVM_CONTEXT0_CNTL, tmp);
}
@@ -262,7 +269,7 @@ static void gfxhub_v1_0_program_invalidation(struct amdgpu_device *adev)
int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev)
{
- if (amdgpu_sriov_vf(adev)) {
+ if (amdgpu_sriov_vf(adev) && adev->asic_type != CHIP_ARCTURUS) {
/*
* MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are
* VF copy registers so vbios post doesn't program them, for
@@ -278,10 +285,12 @@ int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev)
gfxhub_v1_0_init_gart_aperture_regs(adev);
gfxhub_v1_0_init_system_aperture_regs(adev);
gfxhub_v1_0_init_tlb_regs(adev);
- gfxhub_v1_0_init_cache_regs(adev);
+ if (!amdgpu_sriov_vf(adev))
+ gfxhub_v1_0_init_cache_regs(adev);
gfxhub_v1_0_enable_system_domain(adev);
- gfxhub_v1_0_disable_identity_aperture(adev);
+ if (!amdgpu_sriov_vf(adev))
+ gfxhub_v1_0_disable_identity_aperture(adev);
gfxhub_v1_0_setup_vmid_config(adev);
gfxhub_v1_0_program_invalidation(adev);
@@ -357,7 +366,7 @@ void gfxhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev,
void gfxhub_v1_0_init(struct amdgpu_device *adev)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
hub->ctx0_ptb_addr_lo32 =
SOC15_REG_OFFSET(GC, 0,
@@ -365,6 +374,8 @@ void gfxhub_v1_0_init(struct amdgpu_device *adev)
hub->ctx0_ptb_addr_hi32 =
SOC15_REG_OFFSET(GC, 0,
mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
+ hub->vm_inv_eng0_sem =
+ SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG0_SEM);
hub->vm_inv_eng0_req =
SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG0_REQ);
hub->vm_inv_eng0_ack =
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c
index 5e9ab8eb214a..c0ab71df0d90 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c
@@ -33,16 +33,31 @@ int gfxhub_v1_1_get_xgmi_info(struct amdgpu_device *adev)
u32 xgmi_lfb_cntl = RREG32_SOC15(GC, 0, mmMC_VM_XGMI_LFB_CNTL);
u32 max_region =
REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL, PF_MAX_REGION);
+ u32 max_num_physical_nodes = 0;
+ u32 max_physical_node_id = 0;
+
+ switch (adev->asic_type) {
+ case CHIP_VEGA20:
+ max_num_physical_nodes = 4;
+ max_physical_node_id = 3;
+ break;
+ case CHIP_ARCTURUS:
+ max_num_physical_nodes = 8;
+ max_physical_node_id = 7;
+ break;
+ default:
+ return -EINVAL;
+ }
/* PF_MAX_REGION=0 means xgmi is disabled */
if (max_region) {
adev->gmc.xgmi.num_physical_nodes = max_region + 1;
- if (adev->gmc.xgmi.num_physical_nodes > 4)
+ if (adev->gmc.xgmi.num_physical_nodes > max_num_physical_nodes)
return -EINVAL;
adev->gmc.xgmi.physical_node_id =
REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL, PF_LFB_REGION);
- if (adev->gmc.xgmi.physical_node_id > 3)
+ if (adev->gmc.xgmi.physical_node_id > max_physical_node_id)
return -EINVAL;
adev->gmc.xgmi.node_segment_size = REG_GET_FIELD(
RREG32_SOC15(GC, 0, mmMC_VM_XGMI_LFB_SIZE),
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
index d605b4963f8a..b70c7b483c24 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
@@ -46,21 +46,25 @@ u64 gfxhub_v2_0_get_mc_fb_offset(struct amdgpu_device *adev)
return (u64)RREG32_SOC15(GC, 0, mmGCMC_VM_FB_OFFSET) << 24;
}
-static void gfxhub_v2_0_init_gart_pt_regs(struct amdgpu_device *adev)
+void gfxhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+ uint64_t page_table_base)
{
- uint64_t value = amdgpu_gmc_pd_addr(adev->gart.bo);
+ /* two registers distance between mmGCVM_CONTEXT0_* to mmGCVM_CONTEXT1_* */
+ int offset = mmGCVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32
+ - mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
+ WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
+ offset * vmid, lower_32_bits(page_table_base));
- WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
- lower_32_bits(value));
-
- WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
- upper_32_bits(value));
+ WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
+ offset * vmid, upper_32_bits(page_table_base));
}
static void gfxhub_v2_0_init_gart_aperture_regs(struct amdgpu_device *adev)
{
- gfxhub_v2_0_init_gart_pt_regs(adev);
+ uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
+
+ gfxhub_v2_0_setup_vm_pt_regs(adev, 0, pt_base);
WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
(u32)(adev->gmc.gart_start >> 12));
@@ -140,7 +144,7 @@ static void gfxhub_v2_0_init_cache_regs(struct amdgpu_device *adev)
/* XXX for emulation, Refer to closed source code.*/
tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL,
L2_PDE0_CACHE_TAG_GENERATION_MODE, 0);
- tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 1);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0);
tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0);
WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL, tmp);
@@ -151,6 +155,15 @@ static void gfxhub_v2_0_init_cache_regs(struct amdgpu_device *adev)
WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL2, tmp);
tmp = mmGCVM_L2_CNTL3_DEFAULT;
+ if (adev->gmc.translate_further) {
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3, BANK_SELECT, 12);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3,
+ L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
+ } else {
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3, BANK_SELECT, 9);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3,
+ L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
+ }
WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL3, tmp);
tmp = mmGCVM_L2_CNTL4_DEFAULT;
@@ -166,6 +179,8 @@ static void gfxhub_v2_0_enable_system_domain(struct amdgpu_device *adev)
tmp = RREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_CNTL);
tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
+ tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL,
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_CNTL, tmp);
}
@@ -333,7 +348,7 @@ void gfxhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev,
void gfxhub_v2_0_init(struct amdgpu_device *adev)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
hub->ctx0_ptb_addr_lo32 =
SOC15_REG_OFFSET(GC, 0,
@@ -341,6 +356,8 @@ void gfxhub_v2_0_init(struct amdgpu_device *adev)
hub->ctx0_ptb_addr_hi32 =
SOC15_REG_OFFSET(GC, 0,
mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
+ hub->vm_inv_eng0_sem =
+ SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_SEM);
hub->vm_inv_eng0_req =
SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_REQ);
hub->vm_inv_eng0_ack =
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.h b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.h
index 06807940748b..392b8cd94fc0 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.h
@@ -31,5 +31,7 @@ void gfxhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev,
bool value);
void gfxhub_v2_0_init(struct amdgpu_device *adev);
u64 gfxhub_v2_0_get_mc_fb_offset(struct amdgpu_device *adev);
+void gfxhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+ uint64_t page_table_base);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 5eeb72fcc123..9775eca6fe43 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -30,6 +30,8 @@
#include "hdp/hdp_5_0_0_sh_mask.h"
#include "gc/gc_10_1_0_sh_mask.h"
#include "mmhub/mmhub_2_0_0_sh_mask.h"
+#include "athub/athub_2_0_0_sh_mask.h"
+#include "athub/athub_2_0_0_offset.h"
#include "dcn/dcn_2_0_0_offset.h"
#include "dcn/dcn_2_0_0_sh_mask.h"
#include "oss/osssys_5_0_0_offset.h"
@@ -37,6 +39,7 @@
#include "navi10_enum.h"
#include "soc15.h"
+#include "soc15d.h"
#include "soc15_common.h"
#include "nbio_v2_3.h"
@@ -62,7 +65,7 @@ gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_vmhub *hub;
u32 tmp, reg, bits[AMDGPU_MAX_VMHUBS], i;
- bits[AMDGPU_GFXHUB] = GCVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ bits[AMDGPU_GFXHUB_0] = GCVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
GCVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
GCVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
GCVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
@@ -70,7 +73,7 @@ gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
GCVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
GCVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
- bits[AMDGPU_MMHUB] = MMVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ bits[AMDGPU_MMHUB_0] = MMVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
MMVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
MMVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
MMVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
@@ -81,39 +84,39 @@ gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
/* MM HUB */
- hub = &adev->vmhub[AMDGPU_MMHUB];
+ hub = &adev->vmhub[AMDGPU_MMHUB_0];
for (i = 0; i < 16; i++) {
reg = hub->vm_context0_cntl + i;
tmp = RREG32(reg);
- tmp &= ~bits[AMDGPU_MMHUB];
+ tmp &= ~bits[AMDGPU_MMHUB_0];
WREG32(reg, tmp);
}
/* GFX HUB */
- hub = &adev->vmhub[AMDGPU_GFXHUB];
+ hub = &adev->vmhub[AMDGPU_GFXHUB_0];
for (i = 0; i < 16; i++) {
reg = hub->vm_context0_cntl + i;
tmp = RREG32(reg);
- tmp &= ~bits[AMDGPU_GFXHUB];
+ tmp &= ~bits[AMDGPU_GFXHUB_0];
WREG32(reg, tmp);
}
break;
case AMDGPU_IRQ_STATE_ENABLE:
/* MM HUB */
- hub = &adev->vmhub[AMDGPU_MMHUB];
+ hub = &adev->vmhub[AMDGPU_MMHUB_0];
for (i = 0; i < 16; i++) {
reg = hub->vm_context0_cntl + i;
tmp = RREG32(reg);
- tmp |= bits[AMDGPU_MMHUB];
+ tmp |= bits[AMDGPU_MMHUB_0];
WREG32(reg, tmp);
}
/* GFX HUB */
- hub = &adev->vmhub[AMDGPU_GFXHUB];
+ hub = &adev->vmhub[AMDGPU_GFXHUB_0];
for (i = 0; i < 16; i++) {
reg = hub->vm_context0_cntl + i;
tmp = RREG32(reg);
- tmp |= bits[AMDGPU_GFXHUB];
+ tmp |= bits[AMDGPU_GFXHUB_0];
WREG32(reg, tmp);
}
break;
@@ -136,22 +139,53 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
addr |= ((u64)entry->src_data[1] & 0xf) << 44;
if (!amdgpu_sriov_vf(adev)) {
+ /*
+ * Issue a dummy read to wait for the status register to
+ * be updated to avoid reading an incorrect value due to
+ * the new fast GRBM interface.
+ */
+ if (entry->vmid_src == AMDGPU_GFXHUB_0)
+ RREG32(hub->vm_l2_pro_fault_status);
+
status = RREG32(hub->vm_l2_pro_fault_status);
WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
}
if (printk_ratelimit()) {
+ struct amdgpu_task_info task_info;
+
+ memset(&task_info, 0, sizeof(struct amdgpu_task_info));
+ amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
+
dev_err(adev->dev,
- "[%s] VMC page fault (src_id:%u ring:%u vmid:%u pasid:%u)\n",
+ "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, "
+ "for process %s pid %d thread %s pid %d)\n",
entry->vmid_src ? "mmhub" : "gfxhub",
entry->src_id, entry->ring_id, entry->vmid,
- entry->pasid);
- dev_err(adev->dev, " at page 0x%016llx from %d\n",
+ entry->pasid, task_info.process_name, task_info.tgid,
+ task_info.task_name, task_info.pid);
+ dev_err(adev->dev, " in page starting at address 0x%016llx from client %d\n",
addr, entry->client_id);
- if (!amdgpu_sriov_vf(adev))
+ if (!amdgpu_sriov_vf(adev)) {
dev_err(adev->dev,
- "VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
+ "GCVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
status);
+ dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
+ REG_GET_FIELD(status,
+ GCVM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
+ dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
+ REG_GET_FIELD(status,
+ GCVM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
+ dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
+ REG_GET_FIELD(status,
+ GCVM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
+ dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
+ REG_GET_FIELD(status,
+ GCVM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
+ dev_err(adev->dev, "\t RW: 0x%lx\n",
+ REG_GET_FIELD(status,
+ GCVM_L2_PROTECTION_FAULT_STATUS, RW));
+ }
}
return 0;
@@ -188,6 +222,34 @@ static uint32_t gmc_v10_0_get_invalidate_req(unsigned int vmid,
return req;
}
+/**
+ * gmc_v10_0_use_invalidate_semaphore - judge whether to use semaphore
+ *
+ * @adev: amdgpu_device pointer
+ * @vmhub: vmhub type
+ *
+ */
+static bool gmc_v10_0_use_invalidate_semaphore(struct amdgpu_device *adev,
+ uint32_t vmhub)
+{
+ return ((vmhub == AMDGPU_MMHUB_0 ||
+ vmhub == AMDGPU_MMHUB_1) &&
+ (!amdgpu_sriov_vf(adev)));
+}
+
+static bool gmc_v10_0_get_atc_vmid_pasid_mapping_info(
+ struct amdgpu_device *adev,
+ uint8_t vmid, uint16_t *p_pasid)
+{
+ uint32_t value;
+
+ value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
+ + vmid);
+ *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
+
+ return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
+}
+
/*
* GART
* VMID 0 is the physical GPU addresses as used by the kernel.
@@ -198,13 +260,44 @@ static uint32_t gmc_v10_0_get_invalidate_req(unsigned int vmid,
static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
unsigned int vmhub, uint32_t flush_type)
{
+ bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(adev, vmhub);
struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
- u32 tmp = gmc_v10_0_get_invalidate_req(vmid, flush_type);
+ u32 inv_req = gmc_v10_0_get_invalidate_req(vmid, flush_type);
+ u32 tmp;
/* Use register 17 for GART */
const unsigned eng = 17;
unsigned int i;
- WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
+ spin_lock(&adev->gmc.invalidate_lock);
+ /*
+ * It may lose gpuvm invalidate acknowldege state across power-gating
+ * off cycle, add semaphore acquire before invalidation and semaphore
+ * release after invalidation to avoid entering power gated state
+ * to WA the Issue
+ */
+
+ /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
+ if (use_semaphore) {
+ for (i = 0; i < adev->usec_timeout; i++) {
+ /* a read return value of 1 means semaphore acuqire */
+ tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng);
+ if (tmp & 0x1)
+ break;
+ udelay(1);
+ }
+
+ if (i >= adev->usec_timeout)
+ DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
+ }
+
+ WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, inv_req);
+
+ /*
+ * Issue a dummy read to wait for the ACK register to be cleared
+ * to avoid a false ACK due to the new fast GRBM interface.
+ */
+ if (vmhub == AMDGPU_GFXHUB_0)
+ RREG32_NO_KIQ(hub->vm_inv_eng0_req + eng);
/* Wait for ACK with a delay.*/
for (i = 0; i < adev->usec_timeout; i++) {
@@ -216,6 +309,16 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
udelay(1);
}
+ /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
+ if (use_semaphore)
+ /*
+ * add semaphore release after invalidation,
+ * write with 0 means semaphore release
+ */
+ WREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng, 0);
+
+ spin_unlock(&adev->gmc.invalidate_lock);
+
if (i < adev->usec_timeout)
return;
@@ -230,8 +333,8 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
*
* Flush the TLB for the requested page table.
*/
-static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev,
- uint32_t vmid, uint32_t flush_type)
+static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
+ uint32_t vmhub, uint32_t flush_type)
{
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
struct dma_fence *fence;
@@ -240,15 +343,23 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev,
int r;
/* flush hdp cache */
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
mutex_lock(&adev->mman.gtt_window_lock);
- gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_MMHUB, 0);
+ if (vmhub == AMDGPU_MMHUB_0) {
+ gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_MMHUB_0, 0);
+ mutex_unlock(&adev->mman.gtt_window_lock);
+ return;
+ }
+
+ BUG_ON(vmhub != AMDGPU_GFXHUB_0);
+
if (!adev->mman.buffer_funcs_enabled ||
!adev->ib_pool_ready ||
- adev->in_gpu_reset) {
- gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_GFXHUB, 0);
+ adev->in_gpu_reset ||
+ ring->sched.ready == false) {
+ gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_GFXHUB_0, 0);
mutex_unlock(&adev->mman.gtt_window_lock);
return;
}
@@ -264,6 +375,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev,
job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo);
job->vm_needs_flush = true;
+ job->ibs->ptr[job->ibs->length_dw++] = ring->funcs->nop;
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
r = amdgpu_job_submit(job, &adev->mman.entity,
AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
@@ -285,24 +397,102 @@ error_alloc:
DRM_ERROR("Error flushing GPU TLB using the SDMA (%d)!\n", r);
}
+/**
+ * gmc_v10_0_flush_gpu_tlb_pasid - tlb flush via pasid
+ *
+ * @adev: amdgpu_device pointer
+ * @pasid: pasid to be flush
+ *
+ * Flush the TLB for the requested pasid.
+ */
+static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
+ uint16_t pasid, uint32_t flush_type,
+ bool all_hub)
+{
+ int vmid, i;
+ signed long r;
+ uint32_t seq;
+ uint16_t queried_pasid;
+ bool ret;
+ struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+
+ if (amdgpu_emu_mode == 0 && ring->sched.ready) {
+ spin_lock(&adev->gfx.kiq.ring_lock);
+ /* 2 dwords flush + 8 dwords fence */
+ amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8);
+ kiq->pmf->kiq_invalidate_tlbs(ring,
+ pasid, flush_type, all_hub);
+ amdgpu_fence_emit_polling(ring, &seq);
+ amdgpu_ring_commit(ring);
+ spin_unlock(&adev->gfx.kiq.ring_lock);
+ r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
+ if (r < 1) {
+ DRM_ERROR("wait for kiq fence error: %ld.\n", r);
+ return -ETIME;
+ }
+
+ return 0;
+ }
+
+ for (vmid = 1; vmid < 16; vmid++) {
+
+ ret = gmc_v10_0_get_atc_vmid_pasid_mapping_info(adev, vmid,
+ &queried_pasid);
+ if (ret && queried_pasid == pasid) {
+ if (all_hub) {
+ for (i = 0; i < adev->num_vmhubs; i++)
+ gmc_v10_0_flush_gpu_tlb(adev, vmid,
+ i, flush_type);
+ } else {
+ gmc_v10_0_flush_gpu_tlb(adev, vmid,
+ AMDGPU_GFXHUB_0, flush_type);
+ }
+ break;
+ }
+ }
+
+ return 0;
+}
+
static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr)
{
+ bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub);
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
uint32_t req = gmc_v10_0_get_invalidate_req(vmid, 0);
unsigned eng = ring->vm_inv_eng;
+ /*
+ * It may lose gpuvm invalidate acknowldege state across power-gating
+ * off cycle, add semaphore acquire before invalidation and semaphore
+ * release after invalidation to avoid entering power gated state
+ * to WA the Issue
+ */
+
+ /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
+ if (use_semaphore)
+ /* a read return value of 1 means semaphore acuqire */
+ amdgpu_ring_emit_reg_wait(ring,
+ hub->vm_inv_eng0_sem + eng, 0x1, 0x1);
+
amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid),
lower_32_bits(pd_addr));
amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + (2 * vmid),
upper_32_bits(pd_addr));
- amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_req + eng, req);
+ amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + eng,
+ hub->vm_inv_eng0_ack + eng,
+ req, 1 << vmid);
- /* wait for the invalidate to complete */
- amdgpu_ring_emit_reg_wait(ring, hub->vm_inv_eng0_ack + eng,
- 1 << vmid, 1 << vmid);
+ /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
+ if (use_semaphore)
+ /*
+ * add semaphore release after invalidation,
+ * write with 0 means semaphore release
+ */
+ amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem + eng, 0);
return pd_addr;
}
@@ -313,7 +503,7 @@ static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid
struct amdgpu_device *adev = ring->adev;
uint32_t reg;
- if (ring->funcs->vmhub == AMDGPU_GFXHUB)
+ if (ring->funcs->vmhub == AMDGPU_GFXHUB_0)
reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
else
reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
@@ -352,43 +542,23 @@ static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid
* 1 system
* 0 valid
*/
-static uint64_t gmc_v10_0_get_vm_pte_flags(struct amdgpu_device *adev,
- uint32_t flags)
-{
- uint64_t pte_flag = 0;
- if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
- pte_flag |= AMDGPU_PTE_EXECUTABLE;
- if (flags & AMDGPU_VM_PAGE_READABLE)
- pte_flag |= AMDGPU_PTE_READABLE;
- if (flags & AMDGPU_VM_PAGE_WRITEABLE)
- pte_flag |= AMDGPU_PTE_WRITEABLE;
-
- switch (flags & AMDGPU_VM_MTYPE_MASK) {
+static uint64_t gmc_v10_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
+{
+ switch (flags) {
case AMDGPU_VM_MTYPE_DEFAULT:
- pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
- break;
+ return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
case AMDGPU_VM_MTYPE_NC:
- pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
- break;
+ return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
case AMDGPU_VM_MTYPE_WC:
- pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_WC);
- break;
+ return AMDGPU_PTE_MTYPE_NV10(MTYPE_WC);
case AMDGPU_VM_MTYPE_CC:
- pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_CC);
- break;
+ return AMDGPU_PTE_MTYPE_NV10(MTYPE_CC);
case AMDGPU_VM_MTYPE_UC:
- pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
- break;
+ return AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
default:
- pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
- break;
+ return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
}
-
- if (flags & AMDGPU_VM_PAGE_PRT)
- pte_flag |= AMDGPU_PTE_PRT;
-
- return pte_flag;
}
static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level,
@@ -415,12 +585,33 @@ static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level,
}
}
+static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev,
+ struct amdgpu_bo_va_mapping *mapping,
+ uint64_t *flags)
+{
+ *flags &= ~AMDGPU_PTE_EXECUTABLE;
+ *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
+
+ *flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
+ *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
+
+ if (mapping->flags & AMDGPU_PTE_PRT) {
+ *flags |= AMDGPU_PTE_PRT;
+ *flags |= AMDGPU_PTE_SNOOPED;
+ *flags |= AMDGPU_PTE_LOG;
+ *flags |= AMDGPU_PTE_SYSTEM;
+ *flags &= ~AMDGPU_PTE_VALID;
+ }
+}
+
static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v10_0_flush_gpu_tlb,
+ .flush_gpu_tlb_pasid = gmc_v10_0_flush_gpu_tlb_pasid,
.emit_flush_gpu_tlb = gmc_v10_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping,
- .get_vm_pte_flags = gmc_v10_0_get_vm_pte_flags,
- .get_vm_pde = gmc_v10_0_get_vm_pde
+ .map_mtype = gmc_v10_0_map_mtype,
+ .get_vm_pde = gmc_v10_0_get_vm_pde,
+ .get_vm_pte = gmc_v10_0_get_vm_pte
};
static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device *adev)
@@ -449,22 +640,13 @@ static int gmc_v10_0_early_init(void *handle)
static int gmc_v10_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 4, 4 };
- unsigned i;
+ int r;
- for(i = 0; i < adev->num_rings; ++i) {
- struct amdgpu_ring *ring = adev->rings[i];
- unsigned vmhub = ring->funcs->vmhub;
+ amdgpu_bo_late_init(adev);
- ring->vm_inv_eng = vm_inv_eng[vmhub]++;
- dev_info(adev->dev, "ring %u(%s) uses VM inv eng %u on hub %u\n",
- ring->idx, ring->name, ring->vm_inv_eng,
- ring->funcs->vmhub);
- }
-
- /* Engine 17 is used for GART flushes */
- for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i)
- BUG_ON(vm_inv_eng[i] > 17);
+ r = amdgpu_gmc_allocate_vm_inv_eng(adev);
+ if (r)
+ return r;
return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
}
@@ -474,8 +656,7 @@ static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev,
{
u64 base = 0;
- if (!amdgpu_sriov_vf(adev))
- base = gfxhub_v2_0_get_fb_location(adev);
+ base = gfxhub_v2_0_get_fb_location(adev);
amdgpu_gmc_vram_location(adev, &adev->gmc, base);
amdgpu_gmc_gart_location(adev, mc);
@@ -495,24 +676,13 @@ static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev,
*/
static int gmc_v10_0_mc_init(struct amdgpu_device *adev)
{
- int chansize, numchan;
-
- if (!amdgpu_emu_mode)
- adev->gmc.vram_width = amdgpu_atomfirmware_get_vram_width(adev);
- else {
- /* hard code vram_width for emulation */
- chansize = 128;
- numchan = 1;
- adev->gmc.vram_width = numchan * chansize;
- }
-
/* Could aper size report 0 ? */
adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
/* size in MB on si */
adev->gmc.mc_vram_size =
- adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL;
+ adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
adev->gmc.visible_vram_size = adev->gmc.aper_size;
@@ -524,6 +694,8 @@ static int gmc_v10_0_mc_init(struct amdgpu_device *adev)
if (amdgpu_gart_size == -1) {
switch (adev->asic_type) {
case CHIP_NAVI10:
+ case CHIP_NAVI14:
+ case CHIP_NAVI12:
default:
adev->gmc.gart_size = 512ULL << 20;
break;
@@ -589,8 +761,7 @@ static unsigned gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev)
static int gmc_v10_0_sw_init(void *handle)
{
- int r;
- int dma_bits;
+ int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
gfxhub_v2_0_init(adev);
@@ -598,12 +769,23 @@ static int gmc_v10_0_sw_init(void *handle)
spin_lock_init(&adev->gmc.invalidate_lock);
- adev->gmc.vram_type = amdgpu_atomfirmware_get_vram_type(adev);
+ r = amdgpu_atomfirmware_get_vram_info(adev,
+ &vram_width, &vram_type, &vram_vendor);
+ if (!amdgpu_emu_mode)
+ adev->gmc.vram_width = vram_width;
+ else
+ adev->gmc.vram_width = 1 * 128; /* numchan * chansize */
+
+ adev->gmc.vram_type = vram_type;
+ adev->gmc.vram_vendor = vram_vendor;
switch (adev->asic_type) {
case CHIP_NAVI10:
+ case CHIP_NAVI14:
+ case CHIP_NAVI12:
+ adev->num_vmhubs = 2;
/*
* To fulfill 4-level page support,
- * vm size is 256TB (48bit), maximum size of Navi10,
+ * vm size is 256TB (48bit), maximum size of Navi10/Navi14/Navi12,
* block size 512 (9bit)
*/
amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
@@ -616,6 +798,10 @@ static int gmc_v10_0_sw_init(void *handle)
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC,
VMC_1_0__SRCID__VM_FAULT,
&adev->gmc.vm_fault);
+
+ if (r)
+ return r;
+
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2,
UTCL2_1_0__SRCID__FAULT,
&adev->gmc.vm_fault);
@@ -628,35 +814,10 @@ static int gmc_v10_0_sw_init(void *handle)
*/
adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
- /*
- * Reserve 8M stolen memory for navi10 like vega10
- * TODO: will check if it's really needed on asic.
- */
- if (amdgpu_emu_mode == 1)
- adev->gmc.stolen_size = 0;
- else
- adev->gmc.stolen_size = 9 * 1024 *1024;
-
- /*
- * Set DMA mask + need_dma32 flags.
- * PCIE - can handle 44-bits.
- * IGP - can handle 44-bits
- * PCI - dma32 for legacy pci gart, 44 bits on navi10
- */
- adev->need_dma32 = false;
- dma_bits = adev->need_dma32 ? 32 : 44;
-
- r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
+ r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
if (r) {
- adev->need_dma32 = true;
- dma_bits = 32;
printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
- }
-
- r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
- if (r) {
- pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
- printk(KERN_WARNING "amdgpu: No coherent DMA available.\n");
+ return r;
}
r = gmc_v10_0_mc_init(adev);
@@ -680,8 +841,8 @@ static int gmc_v10_0_sw_init(void *handle)
* amdgpu graphics/compute will use VMIDs 1-7
* amdkfd will use VMIDs 8-15
*/
- adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
- adev->vm_manager.id_mgr[AMDGPU_MMHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
+ adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS;
+ adev->vm_manager.id_mgr[AMDGPU_MMHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS;
amdgpu_vm_manager_init(adev);
@@ -717,6 +878,8 @@ static void gmc_v10_0_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_NAVI10:
+ case CHIP_NAVI14:
+ case CHIP_NAVI12:
break;
default:
break;
@@ -759,14 +922,15 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
/* Flush HDP after it is initialized */
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
false : true;
gfxhub_v2_0_set_fault_enable_default(adev, value);
mmhub_v2_0_set_fault_enable_default(adev, value);
- gmc_v10_0_flush_gpu_tlb(adev, 0, 0);
+ gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB_0, 0);
+ gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB_0, 0);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(adev->gmc.gart_size >> 20),
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index ca8dbe91cc8b..b205039350b6 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -362,8 +362,8 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
return 0;
}
-static void gmc_v6_0_flush_gpu_tlb(struct amdgpu_device *adev,
- uint32_t vmid, uint32_t flush_type)
+static void gmc_v6_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
+ uint32_t vmhub, uint32_t flush_type)
{
WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
}
@@ -386,27 +386,20 @@ static uint64_t gmc_v6_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
return pd_addr;
}
-static uint64_t gmc_v6_0_get_vm_pte_flags(struct amdgpu_device *adev,
- uint32_t flags)
-{
- uint64_t pte_flag = 0;
-
- if (flags & AMDGPU_VM_PAGE_READABLE)
- pte_flag |= AMDGPU_PTE_READABLE;
- if (flags & AMDGPU_VM_PAGE_WRITEABLE)
- pte_flag |= AMDGPU_PTE_WRITEABLE;
- if (flags & AMDGPU_VM_PAGE_PRT)
- pte_flag |= AMDGPU_PTE_PRT;
-
- return pte_flag;
-}
-
static void gmc_v6_0_get_vm_pde(struct amdgpu_device *adev, int level,
uint64_t *addr, uint64_t *flags)
{
BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
}
+static void gmc_v6_0_get_vm_pte(struct amdgpu_device *adev,
+ struct amdgpu_bo_va_mapping *mapping,
+ uint64_t *flags)
+{
+ *flags &= ~AMDGPU_PTE_EXECUTABLE;
+ *flags &= ~AMDGPU_PTE_PRT;
+}
+
static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev,
bool value)
{
@@ -571,7 +564,7 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
else
gmc_v6_0_set_fault_enable_default(adev, true);
- gmc_v6_0_flush_gpu_tlb(adev, 0, 0);
+ gmc_v6_0_flush_gpu_tlb(adev, 0, 0, 0);
dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(adev->gmc.gart_size >> 20),
(unsigned long long)table_addr);
@@ -839,9 +832,10 @@ static unsigned gmc_v6_0_get_vbios_fb_size(struct amdgpu_device *adev)
static int gmc_v6_0_sw_init(void *handle)
{
int r;
- int dma_bits;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ adev->num_vmhubs = 1;
+
if (adev->flags & AMD_IS_APU) {
adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
} else {
@@ -862,20 +856,12 @@ static int gmc_v6_0_sw_init(void *handle)
adev->gmc.mc_mask = 0xffffffffffULL;
- adev->need_dma32 = false;
- dma_bits = adev->need_dma32 ? 32 : 40;
- r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
+ r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
if (r) {
- adev->need_dma32 = true;
- dma_bits = 32;
dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n");
+ return r;
}
- r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
- if (r) {
- pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
- dev_warn(adev->dev, "amdgpu: No coherent DMA available.\n");
- }
- adev->need_swiotlb = drm_need_swiotlb(dma_bits);
+ adev->need_swiotlb = drm_need_swiotlb(44);
r = gmc_v6_0_init_microcode(adev);
if (r) {
@@ -1160,7 +1146,7 @@ static const struct amdgpu_gmc_funcs gmc_v6_0_gmc_funcs = {
.emit_flush_gpu_tlb = gmc_v6_0_emit_flush_gpu_tlb,
.set_prt = gmc_v6_0_set_prt,
.get_vm_pde = gmc_v6_0_get_vm_pde,
- .get_vm_pte_flags = gmc_v6_0_get_vm_pte_flags
+ .get_vm_pte = gmc_v6_0_get_vm_pte,
};
static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 57f80065d57a..9da9596a3638 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -381,7 +381,8 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
#ifdef CONFIG_X86_64
- if (adev->flags & AMD_IS_APU) {
+ if (adev->flags & AMD_IS_APU &&
+ adev->gmc.real_vram_size > adev->gmc.aper_size) {
adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22;
adev->gmc.aper_size = adev->gmc.real_vram_size;
}
@@ -418,6 +419,38 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
return 0;
}
+/**
+ * gmc_v7_0_flush_gpu_tlb_pasid - tlb flush via pasid
+ *
+ * @adev: amdgpu_device pointer
+ * @pasid: pasid to be flush
+ *
+ * Flush the TLB for the requested pasid.
+ */
+static int gmc_v7_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
+ uint16_t pasid, uint32_t flush_type,
+ bool all_hub)
+{
+ int vmid;
+ unsigned int tmp;
+
+ if (adev->in_gpu_reset)
+ return -EIO;
+
+ for (vmid = 1; vmid < 16; vmid++) {
+
+ tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
+ if ((tmp & ATC_VMID0_PASID_MAPPING__VALID_MASK) &&
+ (tmp & ATC_VMID0_PASID_MAPPING__PASID_MASK) == pasid) {
+ WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
+ RREG32(mmVM_INVALIDATE_RESPONSE);
+ break;
+ }
+ }
+
+ return 0;
+}
+
/*
* GART
* VMID 0 is the physical GPU addresses as used by the kernel.
@@ -433,8 +466,8 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
*
* Flush the TLB for the requested page table (CIK).
*/
-static void gmc_v7_0_flush_gpu_tlb(struct amdgpu_device *adev,
- uint32_t vmid, uint32_t flush_type)
+static void gmc_v7_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
+ uint32_t vmhub, uint32_t flush_type)
{
/* bits 0-15 are the VM contexts0-15 */
WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
@@ -463,27 +496,20 @@ static void gmc_v7_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid);
}
-static uint64_t gmc_v7_0_get_vm_pte_flags(struct amdgpu_device *adev,
- uint32_t flags)
-{
- uint64_t pte_flag = 0;
-
- if (flags & AMDGPU_VM_PAGE_READABLE)
- pte_flag |= AMDGPU_PTE_READABLE;
- if (flags & AMDGPU_VM_PAGE_WRITEABLE)
- pte_flag |= AMDGPU_PTE_WRITEABLE;
- if (flags & AMDGPU_VM_PAGE_PRT)
- pte_flag |= AMDGPU_PTE_PRT;
-
- return pte_flag;
-}
-
static void gmc_v7_0_get_vm_pde(struct amdgpu_device *adev, int level,
uint64_t *addr, uint64_t *flags)
{
BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
}
+static void gmc_v7_0_get_vm_pte(struct amdgpu_device *adev,
+ struct amdgpu_bo_va_mapping *mapping,
+ uint64_t *flags)
+{
+ *flags &= ~AMDGPU_PTE_EXECUTABLE;
+ *flags &= ~AMDGPU_PTE_PRT;
+}
+
/**
* gmc_v8_0_set_fault_enable_default - update VM fault handling
*
@@ -677,7 +703,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
WREG32(mmCHUB_CONTROL, tmp);
}
- gmc_v7_0_flush_gpu_tlb(adev, 0, 0);
+ gmc_v7_0_flush_gpu_tlb(adev, 0, 0, 0);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(adev->gmc.gart_size >> 20),
(unsigned long long)table_addr);
@@ -959,9 +985,10 @@ static unsigned gmc_v7_0_get_vbios_fb_size(struct amdgpu_device *adev)
static int gmc_v7_0_sw_init(void *handle)
{
int r;
- int dma_bits;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ adev->num_vmhubs = 1;
+
if (adev->flags & AMD_IS_APU) {
adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
} else {
@@ -990,25 +1017,12 @@ static int gmc_v7_0_sw_init(void *handle)
*/
adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
- /* set DMA mask + need_dma32 flags.
- * PCIE - can handle 40-bits.
- * IGP - can handle 40-bits
- * PCI - dma32 for legacy pci gart, 40 bits on newer asics
- */
- adev->need_dma32 = false;
- dma_bits = adev->need_dma32 ? 32 : 40;
- r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
+ r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(40));
if (r) {
- adev->need_dma32 = true;
- dma_bits = 32;
pr_warn("amdgpu: No suitable DMA available\n");
+ return r;
}
- r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
- if (r) {
- pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
- pr_warn("amdgpu: No coherent DMA available\n");
- }
- adev->need_swiotlb = drm_need_swiotlb(dma_bits);
+ adev->need_swiotlb = drm_need_swiotlb(40);
r = gmc_v7_0_init_microcode(adev);
if (r) {
@@ -1352,11 +1366,12 @@ static const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
static const struct amdgpu_gmc_funcs gmc_v7_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v7_0_flush_gpu_tlb,
+ .flush_gpu_tlb_pasid = gmc_v7_0_flush_gpu_tlb_pasid,
.emit_flush_gpu_tlb = gmc_v7_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v7_0_emit_pasid_mapping,
.set_prt = gmc_v7_0_set_prt,
- .get_vm_pte_flags = gmc_v7_0_get_vm_pte_flags,
- .get_vm_pde = gmc_v7_0_get_vm_pde
+ .get_vm_pde = gmc_v7_0_get_vm_pde,
+ .get_vm_pte = gmc_v7_0_get_vm_pte
};
static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 9238280d1ff7..27d83204fa2b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -620,6 +620,39 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
return 0;
}
+/**
+ * gmc_v8_0_flush_gpu_tlb_pasid - tlb flush via pasid
+ *
+ * @adev: amdgpu_device pointer
+ * @pasid: pasid to be flush
+ *
+ * Flush the TLB for the requested pasid.
+ */
+static int gmc_v8_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
+ uint16_t pasid, uint32_t flush_type,
+ bool all_hub)
+{
+ int vmid;
+ unsigned int tmp;
+
+ if (adev->in_gpu_reset)
+ return -EIO;
+
+ for (vmid = 1; vmid < 16; vmid++) {
+
+ tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
+ if ((tmp & ATC_VMID0_PASID_MAPPING__VALID_MASK) &&
+ (tmp & ATC_VMID0_PASID_MAPPING__PASID_MASK) == pasid) {
+ WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
+ RREG32(mmVM_INVALIDATE_RESPONSE);
+ break;
+ }
+ }
+
+ return 0;
+
+}
+
/*
* GART
* VMID 0 is the physical GPU addresses as used by the kernel.
@@ -635,8 +668,8 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
*
* Flush the TLB for the requested page table (VI).
*/
-static void gmc_v8_0_flush_gpu_tlb(struct amdgpu_device *adev,
- uint32_t vmid, uint32_t flush_type)
+static void gmc_v8_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
+ uint32_t vmhub, uint32_t flush_type)
{
/* bits 0-15 are the VM contexts0-15 */
WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
@@ -686,29 +719,21 @@ static void gmc_v8_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
* 0 valid
*/
-static uint64_t gmc_v8_0_get_vm_pte_flags(struct amdgpu_device *adev,
- uint32_t flags)
-{
- uint64_t pte_flag = 0;
-
- if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
- pte_flag |= AMDGPU_PTE_EXECUTABLE;
- if (flags & AMDGPU_VM_PAGE_READABLE)
- pte_flag |= AMDGPU_PTE_READABLE;
- if (flags & AMDGPU_VM_PAGE_WRITEABLE)
- pte_flag |= AMDGPU_PTE_WRITEABLE;
- if (flags & AMDGPU_VM_PAGE_PRT)
- pte_flag |= AMDGPU_PTE_PRT;
-
- return pte_flag;
-}
-
static void gmc_v8_0_get_vm_pde(struct amdgpu_device *adev, int level,
uint64_t *addr, uint64_t *flags)
{
BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
}
+static void gmc_v8_0_get_vm_pte(struct amdgpu_device *adev,
+ struct amdgpu_bo_va_mapping *mapping,
+ uint64_t *flags)
+{
+ *flags &= ~AMDGPU_PTE_EXECUTABLE;
+ *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
+ *flags &= ~AMDGPU_PTE_PRT;
+}
+
/**
* gmc_v8_0_set_fault_enable_default - update VM fault handling
*
@@ -921,7 +946,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
else
gmc_v8_0_set_fault_enable_default(adev, true);
- gmc_v8_0_flush_gpu_tlb(adev, 0, 0);
+ gmc_v8_0_flush_gpu_tlb(adev, 0, 0, 0);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(adev->gmc.gart_size >> 20),
(unsigned long long)table_addr);
@@ -1079,9 +1104,10 @@ static unsigned gmc_v8_0_get_vbios_fb_size(struct amdgpu_device *adev)
static int gmc_v8_0_sw_init(void *handle)
{
int r;
- int dma_bits;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ adev->num_vmhubs = 1;
+
if (adev->flags & AMD_IS_APU) {
adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
} else {
@@ -1116,25 +1142,12 @@ static int gmc_v8_0_sw_init(void *handle)
*/
adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
- /* set DMA mask + need_dma32 flags.
- * PCIE - can handle 40-bits.
- * IGP - can handle 40-bits
- * PCI - dma32 for legacy pci gart, 40 bits on newer asics
- */
- adev->need_dma32 = false;
- dma_bits = adev->need_dma32 ? 32 : 40;
- r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
+ r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(40));
if (r) {
- adev->need_dma32 = true;
- dma_bits = 32;
pr_warn("amdgpu: No suitable DMA available\n");
+ return r;
}
- r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
- if (r) {
- pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
- pr_warn("amdgpu: No coherent DMA available\n");
- }
- adev->need_swiotlb = drm_need_swiotlb(dma_bits);
+ adev->need_swiotlb = drm_need_swiotlb(40);
r = gmc_v8_0_init_microcode(adev);
if (r) {
@@ -1720,11 +1733,12 @@ static const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v8_0_flush_gpu_tlb,
+ .flush_gpu_tlb_pasid = gmc_v8_0_flush_gpu_tlb_pasid,
.emit_flush_gpu_tlb = gmc_v8_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v8_0_emit_pasid_mapping,
.set_prt = gmc_v8_0_set_prt,
- .get_vm_pte_flags = gmc_v8_0_get_vm_pte_flags,
- .get_vm_pde = gmc_v8_0_get_vm_pde
+ .get_vm_pde = gmc_v8_0_get_vm_pde,
+ .get_vm_pte = gmc_v8_0_get_vm_pte
};
static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 73f3b79ab131..90216abf14a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -38,20 +38,27 @@
#include "dce/dce_12_0_sh_mask.h"
#include "vega10_enum.h"
#include "mmhub/mmhub_1_0_offset.h"
+#include "athub/athub_1_0_sh_mask.h"
#include "athub/athub_1_0_offset.h"
#include "oss/osssys_4_0_offset.h"
#include "soc15.h"
+#include "soc15d.h"
#include "soc15_common.h"
#include "umc/umc_6_0_sh_mask.h"
#include "gfxhub_v1_0.h"
#include "mmhub_v1_0.h"
+#include "athub_v1_0.h"
#include "gfxhub_v1_1.h"
+#include "mmhub_v9_4.h"
+#include "umc_v6_1.h"
+#include "umc_v6_0.h"
#include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
#include "amdgpu_ras.h"
+#include "amdgpu_xgmi.h"
/* add these here since we already include dce12 headers and these are for DCN */
#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x055d
@@ -202,6 +209,11 @@ static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
{
u32 bits, i, tmp, reg;
+ /* Devices newer then VEGA10/12 shall have these programming
+ sequences performed by PSP BL */
+ if (adev->asic_type >= CHIP_VEGA20)
+ return 0;
+
bits = 0x7f;
switch (state) {
@@ -240,32 +252,6 @@ static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
return 0;
}
-static int gmc_v9_0_process_ras_data_cb(struct amdgpu_device *adev,
- struct amdgpu_iv_entry *entry)
-{
- kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
- amdgpu_ras_reset_gpu(adev, 0);
- return AMDGPU_RAS_UE;
-}
-
-static int gmc_v9_0_process_ecc_irq(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- struct amdgpu_iv_entry *entry)
-{
- struct ras_common_if *ras_if = adev->gmc.ras_if;
- struct ras_dispatch_if ih_data = {
- .entry = entry,
- };
-
- if (!ras_if)
- return 0;
-
- ih_data.head = *ras_if;
-
- amdgpu_ras_interrupt_dispatch(adev, &ih_data);
- return 0;
-}
-
static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
@@ -284,7 +270,7 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- for (j = 0; j < AMDGPU_MAX_VMHUBS; j++) {
+ for (j = 0; j < adev->num_vmhubs; j++) {
hub = &adev->vmhub[j];
for (i = 0; i < 16; i++) {
reg = hub->vm_context0_cntl + i;
@@ -295,7 +281,7 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
}
break;
case AMDGPU_IRQ_STATE_ENABLE:
- for (j = 0; j < AMDGPU_MAX_VMHUBS; j++) {
+ for (j = 0; j < adev->num_vmhubs; j++) {
hub = &adev->vmhub[j];
for (i = 0; i < 16; i++) {
reg = hub->vm_context0_cntl + i;
@@ -315,10 +301,11 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src];
+ struct amdgpu_vmhub *hub;
bool retry_fault = !!(entry->src_data[1] & 0x80);
uint32_t status = 0;
u64 addr;
+ char hub_name[10];
addr = (u64)entry->src_data[0] << 12;
addr |= ((u64)entry->src_data[1] & 0xf) << 44;
@@ -327,8 +314,31 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
entry->timestamp))
return 1; /* This also prevents sending it to KFD */
+ if (entry->client_id == SOC15_IH_CLIENTID_VMC) {
+ snprintf(hub_name, sizeof(hub_name), "mmhub0");
+ hub = &adev->vmhub[AMDGPU_MMHUB_0];
+ } else if (entry->client_id == SOC15_IH_CLIENTID_VMC1) {
+ snprintf(hub_name, sizeof(hub_name), "mmhub1");
+ hub = &adev->vmhub[AMDGPU_MMHUB_1];
+ } else {
+ snprintf(hub_name, sizeof(hub_name), "gfxhub0");
+ hub = &adev->vmhub[AMDGPU_GFXHUB_0];
+ }
+
/* If it's the first fault for this address, process it normally */
+ if (retry_fault && !in_interrupt() &&
+ amdgpu_vm_handle_fault(adev, entry->pasid, addr))
+ return 1; /* This also prevents sending it to KFD */
+
if (!amdgpu_sriov_vf(adev)) {
+ /*
+ * Issue a dummy read to wait for the status register to
+ * be updated to avoid reading an incorrect value due to
+ * the new fast GRBM interface.
+ */
+ if (entry->vmid_src == AMDGPU_GFXHUB_0)
+ RREG32(hub->vm_l2_pro_fault_status);
+
status = RREG32(hub->vm_l2_pro_fault_status);
WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
}
@@ -342,17 +352,33 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
dev_err(adev->dev,
"[%s] %s page fault (src_id:%u ring:%u vmid:%u "
"pasid:%u, for process %s pid %d thread %s pid %d)\n",
- entry->vmid_src ? "mmhub" : "gfxhub",
- retry_fault ? "retry" : "no-retry",
+ hub_name, retry_fault ? "retry" : "no-retry",
entry->src_id, entry->ring_id, entry->vmid,
entry->pasid, task_info.process_name, task_info.tgid,
task_info.task_name, task_info.pid);
- dev_err(adev->dev, " in page starting at address 0x%016llx from %d\n",
+ dev_err(adev->dev, " in page starting at address 0x%016llx from client %d\n",
addr, entry->client_id);
- if (!amdgpu_sriov_vf(adev))
+ if (!amdgpu_sriov_vf(adev)) {
dev_err(adev->dev,
"VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
status);
+ dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
+ REG_GET_FIELD(status,
+ VM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
+ dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
+ REG_GET_FIELD(status,
+ VM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
+ dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
+ REG_GET_FIELD(status,
+ VM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
+ dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
+ REG_GET_FIELD(status,
+ VM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
+ dev_err(adev->dev, "\t RW: 0x%lx\n",
+ REG_GET_FIELD(status,
+ VM_L2_PROTECTION_FAULT_STATUS, RW));
+
+ }
}
return 0;
@@ -366,7 +392,7 @@ static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs = {
.set = gmc_v9_0_ecc_interrupt_state,
- .process = gmc_v9_0_process_ecc_irq,
+ .process = amdgpu_umc_process_ecc_irq,
};
static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
@@ -374,8 +400,10 @@ static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
adev->gmc.vm_fault.num_types = 1;
adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
- adev->gmc.ecc_irq.num_types = 1;
- adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs;
+ if (!amdgpu_sriov_vf(adev)) {
+ adev->gmc.ecc_irq.num_types = 1;
+ adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs;
+ }
}
static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
@@ -397,6 +425,36 @@ static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
return req;
}
+/**
+ * gmc_v9_0_use_invalidate_semaphore - judge whether to use semaphore
+ *
+ * @adev: amdgpu_device pointer
+ * @vmhub: vmhub type
+ *
+ */
+static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev,
+ uint32_t vmhub)
+{
+ return ((vmhub == AMDGPU_MMHUB_0 ||
+ vmhub == AMDGPU_MMHUB_1) &&
+ (!amdgpu_sriov_vf(adev)) &&
+ (!(adev->asic_type == CHIP_RAVEN &&
+ adev->rev_id < 0x8 &&
+ adev->pdev->device == 0x15d8)));
+}
+
+static bool gmc_v9_0_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
+ uint8_t vmid, uint16_t *p_pasid)
+{
+ uint32_t value;
+
+ value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
+ + vmid);
+ *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
+
+ return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
+}
+
/*
* GART
* VMID 0 is the physical GPU addresses as used by the kernel.
@@ -413,54 +471,172 @@ static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
*
* Flush the TLB for the requested page table using certain type.
*/
-static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev,
- uint32_t vmid, uint32_t flush_type)
+static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
+ uint32_t vmhub, uint32_t flush_type)
{
+ bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub);
const unsigned eng = 17;
- unsigned i, j;
+ u32 j, inv_req, tmp;
+ struct amdgpu_vmhub *hub;
- for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
- struct amdgpu_vmhub *hub = &adev->vmhub[i];
- u32 tmp = gmc_v9_0_get_invalidate_req(vmid, flush_type);
+ BUG_ON(vmhub >= adev->num_vmhubs);
- /* This is necessary for a HW workaround under SRIOV as well
- * as GFXOFF under bare metal
- */
- if (adev->gfx.kiq.ring.sched.ready &&
- (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
- !adev->in_gpu_reset) {
- uint32_t req = hub->vm_inv_eng0_req + eng;
- uint32_t ack = hub->vm_inv_eng0_ack + eng;
-
- amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, tmp,
- 1 << vmid);
- continue;
- }
+ hub = &adev->vmhub[vmhub];
+ inv_req = gmc_v9_0_get_invalidate_req(vmid, flush_type);
+
+ /* This is necessary for a HW workaround under SRIOV as well
+ * as GFXOFF under bare metal
+ */
+ if (adev->gfx.kiq.ring.sched.ready &&
+ (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
+ !adev->in_gpu_reset) {
+ uint32_t req = hub->vm_inv_eng0_req + eng;
+ uint32_t ack = hub->vm_inv_eng0_ack + eng;
+
+ amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
+ 1 << vmid);
+ return;
+ }
+
+ spin_lock(&adev->gmc.invalidate_lock);
+
+ /*
+ * It may lose gpuvm invalidate acknowldege state across power-gating
+ * off cycle, add semaphore acquire before invalidation and semaphore
+ * release after invalidation to avoid entering power gated state
+ * to WA the Issue
+ */
- spin_lock(&adev->gmc.invalidate_lock);
- WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
+ /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
+ if (use_semaphore) {
for (j = 0; j < adev->usec_timeout; j++) {
- tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
- if (tmp & (1 << vmid))
+ /* a read return value of 1 means semaphore acuqire */
+ tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng);
+ if (tmp & 0x1)
break;
udelay(1);
}
- spin_unlock(&adev->gmc.invalidate_lock);
- if (j < adev->usec_timeout)
- continue;
- DRM_ERROR("Timeout waiting for VM flush ACK!\n");
+ if (j >= adev->usec_timeout)
+ DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
+ }
+
+ WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, inv_req);
+
+ /*
+ * Issue a dummy read to wait for the ACK register to be cleared
+ * to avoid a false ACK due to the new fast GRBM interface.
+ */
+ if (vmhub == AMDGPU_GFXHUB_0)
+ RREG32_NO_KIQ(hub->vm_inv_eng0_req + eng);
+
+ for (j = 0; j < adev->usec_timeout; j++) {
+ tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
+ if (tmp & (1 << vmid))
+ break;
+ udelay(1);
}
+
+ /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
+ if (use_semaphore)
+ /*
+ * add semaphore release after invalidation,
+ * write with 0 means semaphore release
+ */
+ WREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng, 0);
+
+ spin_unlock(&adev->gmc.invalidate_lock);
+
+ if (j < adev->usec_timeout)
+ return;
+
+ DRM_ERROR("Timeout waiting for VM flush ACK!\n");
+}
+
+/**
+ * gmc_v9_0_flush_gpu_tlb_pasid - tlb flush via pasid
+ *
+ * @adev: amdgpu_device pointer
+ * @pasid: pasid to be flush
+ *
+ * Flush the TLB for the requested pasid.
+ */
+static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
+ uint16_t pasid, uint32_t flush_type,
+ bool all_hub)
+{
+ int vmid, i;
+ signed long r;
+ uint32_t seq;
+ uint16_t queried_pasid;
+ bool ret;
+ struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+
+ if (adev->in_gpu_reset)
+ return -EIO;
+
+ if (ring->sched.ready) {
+ spin_lock(&adev->gfx.kiq.ring_lock);
+ /* 2 dwords flush + 8 dwords fence */
+ amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8);
+ kiq->pmf->kiq_invalidate_tlbs(ring,
+ pasid, flush_type, all_hub);
+ amdgpu_fence_emit_polling(ring, &seq);
+ amdgpu_ring_commit(ring);
+ spin_unlock(&adev->gfx.kiq.ring_lock);
+ r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
+ if (r < 1) {
+ DRM_ERROR("wait for kiq fence error: %ld.\n", r);
+ return -ETIME;
+ }
+
+ return 0;
+ }
+
+ for (vmid = 1; vmid < 16; vmid++) {
+
+ ret = gmc_v9_0_get_atc_vmid_pasid_mapping_info(adev, vmid,
+ &queried_pasid);
+ if (ret && queried_pasid == pasid) {
+ if (all_hub) {
+ for (i = 0; i < adev->num_vmhubs; i++)
+ gmc_v9_0_flush_gpu_tlb(adev, vmid,
+ i, flush_type);
+ } else {
+ gmc_v9_0_flush_gpu_tlb(adev, vmid,
+ AMDGPU_GFXHUB_0, flush_type);
+ }
+ break;
+ }
+ }
+
+ return 0;
+
}
static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr)
{
+ bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub);
struct amdgpu_device *adev = ring->adev;
struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub];
uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
unsigned eng = ring->vm_inv_eng;
+ /*
+ * It may lose gpuvm invalidate acknowldege state across power-gating
+ * off cycle, add semaphore acquire before invalidation and semaphore
+ * release after invalidation to avoid entering power gated state
+ * to WA the Issue
+ */
+
+ /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
+ if (use_semaphore)
+ /* a read return value of 1 means semaphore acuqire */
+ amdgpu_ring_emit_reg_wait(ring,
+ hub->vm_inv_eng0_sem + eng, 0x1, 0x1);
+
amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid),
lower_32_bits(pd_addr));
@@ -471,6 +647,14 @@ static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
hub->vm_inv_eng0_ack + eng,
req, 1 << vmid);
+ /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
+ if (use_semaphore)
+ /*
+ * add semaphore release after invalidation,
+ * write with 0 means semaphore release
+ */
+ amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem + eng, 0);
+
return pd_addr;
}
@@ -480,7 +664,11 @@ static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
struct amdgpu_device *adev = ring->adev;
uint32_t reg;
- if (ring->funcs->vmhub == AMDGPU_GFXHUB)
+ /* Do nothing because there's no lut register for mmhub1. */
+ if (ring->funcs->vmhub == AMDGPU_MMHUB_1)
+ return;
+
+ if (ring->funcs->vmhub == AMDGPU_GFXHUB_0)
reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
else
reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
@@ -520,44 +708,25 @@ static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
* 0 valid
*/
-static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev,
- uint32_t flags)
+static uint64_t gmc_v9_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
{
- uint64_t pte_flag = 0;
-
- if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
- pte_flag |= AMDGPU_PTE_EXECUTABLE;
- if (flags & AMDGPU_VM_PAGE_READABLE)
- pte_flag |= AMDGPU_PTE_READABLE;
- if (flags & AMDGPU_VM_PAGE_WRITEABLE)
- pte_flag |= AMDGPU_PTE_WRITEABLE;
-
- switch (flags & AMDGPU_VM_MTYPE_MASK) {
+ switch (flags) {
case AMDGPU_VM_MTYPE_DEFAULT:
- pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
- break;
+ return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
case AMDGPU_VM_MTYPE_NC:
- pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
- break;
+ return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
case AMDGPU_VM_MTYPE_WC:
- pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_WC);
- break;
+ return AMDGPU_PTE_MTYPE_VG10(MTYPE_WC);
+ case AMDGPU_VM_MTYPE_RW:
+ return AMDGPU_PTE_MTYPE_VG10(MTYPE_RW);
case AMDGPU_VM_MTYPE_CC:
- pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_CC);
- break;
+ return AMDGPU_PTE_MTYPE_VG10(MTYPE_CC);
case AMDGPU_VM_MTYPE_UC:
- pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_UC);
- break;
+ return AMDGPU_PTE_MTYPE_VG10(MTYPE_UC);
default:
- pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
- break;
+ return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
}
-
- if (flags & AMDGPU_VM_PAGE_PRT)
- pte_flag |= AMDGPU_PTE_PRT;
-
- return pte_flag;
}
static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
@@ -584,12 +753,35 @@ static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
}
}
+static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
+ struct amdgpu_bo_va_mapping *mapping,
+ uint64_t *flags)
+{
+ *flags &= ~AMDGPU_PTE_EXECUTABLE;
+ *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
+
+ *flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
+ *flags |= mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK;
+
+ if (mapping->flags & AMDGPU_PTE_PRT) {
+ *flags |= AMDGPU_PTE_PRT;
+ *flags &= ~AMDGPU_PTE_VALID;
+ }
+
+ if (adev->asic_type == CHIP_ARCTURUS &&
+ !(*flags & AMDGPU_PTE_SYSTEM) &&
+ mapping->bo_va->is_xgmi)
+ *flags |= AMDGPU_PTE_SNOOPED;
+}
+
static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
+ .flush_gpu_tlb_pasid = gmc_v9_0_flush_gpu_tlb_pasid,
.emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
- .get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags,
- .get_vm_pde = gmc_v9_0_get_vm_pde
+ .map_mtype = gmc_v9_0_map_mtype,
+ .get_vm_pde = gmc_v9_0_get_vm_pde,
+ .get_vm_pte = gmc_v9_0_get_vm_pte
};
static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
@@ -597,12 +789,55 @@ static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
}
+static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
+{
+ switch (adev->asic_type) {
+ case CHIP_VEGA10:
+ adev->umc.funcs = &umc_v6_0_funcs;
+ break;
+ case CHIP_VEGA20:
+ adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
+ adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
+ adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
+ adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_VG20;
+ adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
+ adev->umc.funcs = &umc_v6_1_funcs;
+ break;
+ case CHIP_ARCTURUS:
+ adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
+ adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
+ adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
+ adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_ARCT;
+ adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
+ adev->umc.funcs = &umc_v6_1_funcs;
+ break;
+ default:
+ break;
+ }
+}
+
+static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
+{
+ switch (adev->asic_type) {
+ case CHIP_VEGA20:
+ adev->mmhub.funcs = &mmhub_v1_0_funcs;
+ break;
+ case CHIP_ARCTURUS:
+ adev->mmhub.funcs = &mmhub_v9_4_funcs;
+ break;
+ default:
+ break;
+ }
+}
+
static int gmc_v9_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
gmc_v9_0_set_gmc_funcs(adev);
gmc_v9_0_set_irq_funcs(adev);
+ gmc_v9_0_set_umc_funcs(adev);
+ gmc_v9_0_set_mmhub_funcs(adev);
adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
adev->gmc.shared_aperture_end =
@@ -629,6 +864,8 @@ static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_VEGA10:
case CHIP_RAVEN:
+ case CHIP_ARCTURUS:
+ case CHIP_RENOIR:
return true;
case CHIP_VEGA12:
case CHIP_VEGA20:
@@ -637,136 +874,15 @@ static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev)
}
}
-static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev)
-{
- struct amdgpu_ring *ring;
- unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] =
- {GFXHUB_FREE_VM_INV_ENGS_BITMAP, MMHUB_FREE_VM_INV_ENGS_BITMAP};
- unsigned i;
- unsigned vmhub, inv_eng;
-
- for (i = 0; i < adev->num_rings; ++i) {
- ring = adev->rings[i];
- vmhub = ring->funcs->vmhub;
-
- inv_eng = ffs(vm_inv_engs[vmhub]);
- if (!inv_eng) {
- dev_err(adev->dev, "no VM inv eng for ring %s\n",
- ring->name);
- return -EINVAL;
- }
-
- ring->vm_inv_eng = inv_eng - 1;
- vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng);
-
- dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
- ring->name, ring->vm_inv_eng, ring->funcs->vmhub);
- }
-
- return 0;
-}
-
-static int gmc_v9_0_ecc_late_init(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct ras_common_if **ras_if = &adev->gmc.ras_if;
- struct ras_ih_if ih_info = {
- .cb = gmc_v9_0_process_ras_data_cb,
- };
- struct ras_fs_if fs_info = {
- .sysfs_name = "umc_err_count",
- .debugfs_name = "umc_err_inject",
- };
- struct ras_common_if ras_block = {
- .block = AMDGPU_RAS_BLOCK__UMC,
- .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
- .sub_block_index = 0,
- .name = "umc",
- };
- int r;
-
- if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) {
- amdgpu_ras_feature_enable_on_boot(adev, &ras_block, 0);
- return 0;
- }
- /* handle resume path. */
- if (*ras_if) {
- /* resend ras TA enable cmd during resume.
- * prepare to handle failure.
- */
- ih_info.head = **ras_if;
- r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
- if (r) {
- if (r == -EAGAIN) {
- /* request a gpu reset. will run again. */
- amdgpu_ras_request_reset_on_boot(adev,
- AMDGPU_RAS_BLOCK__UMC);
- return 0;
- }
- /* fail to enable ras, cleanup all. */
- goto irq;
- }
- /* enable successfully. continue. */
- goto resume;
- }
-
- *ras_if = kmalloc(sizeof(**ras_if), GFP_KERNEL);
- if (!*ras_if)
- return -ENOMEM;
-
- **ras_if = ras_block;
-
- r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
- if (r) {
- if (r == -EAGAIN) {
- amdgpu_ras_request_reset_on_boot(adev,
- AMDGPU_RAS_BLOCK__UMC);
- r = 0;
- }
- goto feature;
- }
-
- ih_info.head = **ras_if;
- fs_info.head = **ras_if;
-
- r = amdgpu_ras_interrupt_add_handler(adev, &ih_info);
- if (r)
- goto interrupt;
-
- amdgpu_ras_debugfs_create(adev, &fs_info);
-
- r = amdgpu_ras_sysfs_create(adev, &fs_info);
- if (r)
- goto sysfs;
-resume:
- r = amdgpu_irq_get(adev, &adev->gmc.ecc_irq, 0);
- if (r)
- goto irq;
-
- return 0;
-irq:
- amdgpu_ras_sysfs_remove(adev, *ras_if);
-sysfs:
- amdgpu_ras_debugfs_remove(adev, *ras_if);
- amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
-interrupt:
- amdgpu_ras_feature_enable(adev, *ras_if, 0);
-feature:
- kfree(*ras_if);
- *ras_if = NULL;
- return r;
-}
-
-
static int gmc_v9_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- bool r;
+ int r;
if (!gmc_v9_0_keep_stolen_memory(adev))
amdgpu_bo_late_init(adev);
- r = gmc_v9_0_allocate_vm_inv_eng(adev);
+ r = amdgpu_gmc_allocate_vm_inv_eng(adev);
if (r)
return r;
/* Check if ecc is available */
@@ -774,11 +890,12 @@ static int gmc_v9_0_late_init(void *handle)
switch (adev->asic_type) {
case CHIP_VEGA10:
case CHIP_VEGA20:
+ case CHIP_ARCTURUS:
r = amdgpu_atomfirmware_mem_ecc_supported(adev);
if (!r) {
DRM_INFO("ECC is not present.\n");
- if (adev->df_funcs->enable_ecc_force_par_wr_rmw)
- adev->df_funcs->enable_ecc_force_par_wr_rmw(adev, false);
+ if (adev->df.funcs->enable_ecc_force_par_wr_rmw)
+ adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false);
} else {
DRM_INFO("ECC is active.\n");
}
@@ -795,7 +912,7 @@ static int gmc_v9_0_late_init(void *handle)
}
}
- r = gmc_v9_0_ecc_late_init(handle);
+ r = amdgpu_gmc_ras_late_init(adev);
if (r)
return r;
@@ -806,14 +923,17 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
struct amdgpu_gmc *mc)
{
u64 base = 0;
- if (!amdgpu_sriov_vf(adev))
+
+ if (adev->asic_type == CHIP_ARCTURUS)
+ base = mmhub_v9_4_get_fb_location(adev);
+ else if (!amdgpu_sriov_vf(adev))
base = mmhub_v1_0_get_fb_location(adev);
+
/* add the xgmi offset of the physical node */
base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
amdgpu_gmc_vram_location(adev, mc, base);
amdgpu_gmc_gart_location(adev, mc);
- if (!amdgpu_sriov_vf(adev))
- amdgpu_gmc_agp_location(adev, mc);
+ amdgpu_gmc_agp_location(adev, mc);
/* base offset of vram pages */
adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
@@ -833,33 +953,11 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
*/
static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
{
- int chansize, numchan;
int r;
- if (amdgpu_sriov_vf(adev)) {
- /* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN,
- * and DF related registers is not readable, seems hardcord is the
- * only way to set the correct vram_width
- */
- adev->gmc.vram_width = 2048;
- } else if (amdgpu_emu_mode != 1) {
- adev->gmc.vram_width = amdgpu_atomfirmware_get_vram_width(adev);
- }
-
- if (!adev->gmc.vram_width) {
- /* hbm memory channel size */
- if (adev->flags & AMD_IS_APU)
- chansize = 64;
- else
- chansize = 128;
-
- numchan = adev->df_funcs->get_hbm_channel_number(adev);
- adev->gmc.vram_width = numchan * chansize;
- }
-
/* size in MB on si */
adev->gmc.mc_vram_size =
- adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL;
+ adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
if (!(adev->flags & AMD_IS_APU)) {
@@ -887,10 +985,12 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
case CHIP_VEGA10: /* all engines support GPUVM */
case CHIP_VEGA12: /* all engines support GPUVM */
case CHIP_VEGA20:
+ case CHIP_ARCTURUS:
default:
adev->gmc.gart_size = 512ULL << 20;
break;
case CHIP_RAVEN: /* DCE SG support */
+ case CHIP_RENOIR:
adev->gmc.gart_size = 1024ULL << 20;
break;
}
@@ -923,7 +1023,7 @@ static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
{
- u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
+ u32 d1vga_control;
unsigned size;
/*
@@ -933,6 +1033,7 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
if (gmc_v9_0_keep_stolen_memory(adev))
return 9 * 1024 * 1024;
+ d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
} else {
@@ -940,6 +1041,7 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_RAVEN:
+ case CHIP_RENOIR:
viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
size = (REG_GET_FIELD(viewport,
HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
@@ -967,18 +1069,47 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
static int gmc_v9_0_sw_init(void *handle)
{
- int r;
- int dma_bits;
+ int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
gfxhub_v1_0_init(adev);
- mmhub_v1_0_init(adev);
+ if (adev->asic_type == CHIP_ARCTURUS)
+ mmhub_v9_4_init(adev);
+ else
+ mmhub_v1_0_init(adev);
spin_lock_init(&adev->gmc.invalidate_lock);
- adev->gmc.vram_type = amdgpu_atomfirmware_get_vram_type(adev);
+ r = amdgpu_atomfirmware_get_vram_info(adev,
+ &vram_width, &vram_type, &vram_vendor);
+ if (amdgpu_sriov_vf(adev))
+ /* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN,
+ * and DF related registers is not readable, seems hardcord is the
+ * only way to set the correct vram_width
+ */
+ adev->gmc.vram_width = 2048;
+ else if (amdgpu_emu_mode != 1)
+ adev->gmc.vram_width = vram_width;
+
+ if (!adev->gmc.vram_width) {
+ int chansize, numchan;
+
+ /* hbm memory channel size */
+ if (adev->flags & AMD_IS_APU)
+ chansize = 64;
+ else
+ chansize = 128;
+
+ numchan = adev->df.funcs->get_hbm_channel_number(adev);
+ adev->gmc.vram_width = numchan * chansize;
+ }
+
+ adev->gmc.vram_type = vram_type;
+ adev->gmc.vram_vendor = vram_vendor;
switch (adev->asic_type) {
case CHIP_RAVEN:
+ adev->num_vmhubs = 2;
+
if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
} else {
@@ -991,6 +1122,10 @@ static int gmc_v9_0_sw_init(void *handle)
case CHIP_VEGA10:
case CHIP_VEGA12:
case CHIP_VEGA20:
+ case CHIP_RENOIR:
+ adev->num_vmhubs = 2;
+
+
/*
* To fulfill 4-level page support,
* vm size is 256TB (48bit), maximum size of Vega10,
@@ -1002,6 +1137,12 @@ static int gmc_v9_0_sw_init(void *handle)
else
amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
break;
+ case CHIP_ARCTURUS:
+ adev->num_vmhubs = 3;
+
+ /* Keep the vm size same with Vega20 */
+ amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
+ break;
default:
break;
}
@@ -1012,17 +1153,26 @@ static int gmc_v9_0_sw_init(void *handle)
if (r)
return r;
+ if (adev->asic_type == CHIP_ARCTURUS) {
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC1, VMC_1_0__SRCID__VM_FAULT,
+ &adev->gmc.vm_fault);
+ if (r)
+ return r;
+ }
+
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT,
&adev->gmc.vm_fault);
if (r)
return r;
- /* interrupt sent to DF. */
- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
- &adev->gmc.ecc_irq);
- if (r)
- return r;
+ if (!amdgpu_sriov_vf(adev)) {
+ /* interrupt sent to DF. */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
+ &adev->gmc.ecc_irq);
+ if (r)
+ return r;
+ }
/* Set the internal MC address mask
* This is the max address of the GPU's
@@ -1030,25 +1180,12 @@ static int gmc_v9_0_sw_init(void *handle)
*/
adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
- /* set DMA mask + need_dma32 flags.
- * PCIE - can handle 44-bits.
- * IGP - can handle 44-bits
- * PCI - dma32 for legacy pci gart, 44 bits on vega10
- */
- adev->need_dma32 = false;
- dma_bits = adev->need_dma32 ? 32 : 44;
- r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
+ r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
if (r) {
- adev->need_dma32 = true;
- dma_bits = 32;
printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
+ return r;
}
- r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
- if (r) {
- pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
- printk(KERN_WARNING "amdgpu: No coherent DMA available.\n");
- }
- adev->need_swiotlb = drm_need_swiotlb(dma_bits);
+ adev->need_swiotlb = drm_need_swiotlb(44);
if (adev->gmc.xgmi.supported) {
r = gfxhub_v1_1_get_xgmi_info(adev);
@@ -1077,8 +1214,9 @@ static int gmc_v9_0_sw_init(void *handle)
* amdgpu graphics/compute will use VMIDs 1-7
* amdkfd will use VMIDs 8-15
*/
- adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
- adev->vm_manager.id_mgr[AMDGPU_MMHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
+ adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS;
+ adev->vm_manager.id_mgr[AMDGPU_MMHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS;
+ adev->vm_manager.id_mgr[AMDGPU_MMHUB_1].num_ids = AMDGPU_NUM_OF_VMIDS;
amdgpu_vm_manager_init(adev);
@@ -1088,28 +1226,14 @@ static int gmc_v9_0_sw_init(void *handle)
static int gmc_v9_0_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ void *stolen_vga_buf;
- if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC) &&
- adev->gmc.ras_if) {
- struct ras_common_if *ras_if = adev->gmc.ras_if;
- struct ras_ih_if ih_info = {
- .head = *ras_if,
- };
-
- /*remove fs first*/
- amdgpu_ras_debugfs_remove(adev, ras_if);
- amdgpu_ras_sysfs_remove(adev, ras_if);
- /*remove the IH*/
- amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
- amdgpu_ras_feature_enable(adev, ras_if, 0);
- kfree(ras_if);
- }
-
+ amdgpu_gmc_ras_fini(adev);
amdgpu_gem_force_release(adev);
amdgpu_vm_manager_fini(adev);
if (gmc_v9_0_keep_stolen_memory(adev))
- amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL);
+ amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, &stolen_vga_buf);
amdgpu_gart_table_vram_free(adev);
amdgpu_bo_fini(adev);
@@ -1123,7 +1247,7 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_VEGA10:
- if (amdgpu_virt_support_skip_setting(adev))
+ if (amdgpu_sriov_vf(adev))
break;
/* fall through */
case CHIP_VEGA20:
@@ -1137,6 +1261,7 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
case CHIP_VEGA12:
break;
case CHIP_RAVEN:
+ /* TODO for renoir */
soc15_program_register_sequence(adev,
golden_settings_athub_1_0_0,
ARRAY_SIZE(golden_settings_athub_1_0_0));
@@ -1154,12 +1279,6 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
{
int r;
- bool value;
- u32 tmp;
-
- amdgpu_device_program_register_sequence(adev,
- golden_settings_vega10_hdp,
- ARRAY_SIZE(golden_settings_vega10_hdp));
if (adev->gart.bo == NULL) {
dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
@@ -1169,42 +1288,17 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
if (r)
return r;
- switch (adev->asic_type) {
- case CHIP_RAVEN:
- mmhub_v1_0_update_power_gating(adev, true);
- break;
- default:
- break;
- }
-
r = gfxhub_v1_0_gart_enable(adev);
if (r)
return r;
- r = mmhub_v1_0_gart_enable(adev);
+ if (adev->asic_type == CHIP_ARCTURUS)
+ r = mmhub_v9_4_gart_enable(adev);
+ else
+ r = mmhub_v1_0_gart_enable(adev);
if (r)
return r;
- WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
-
- tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
- WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
-
- WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8));
- WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40));
-
- /* After HDP is initialized, flush HDP.*/
- adev->nbio_funcs->hdp_flush(adev, NULL);
-
- if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
- value = false;
- else
- value = true;
-
- gfxhub_v1_0_set_fault_enable_default(adev, value);
- mmhub_v1_0_set_fault_enable_default(adev, value);
- gmc_v9_0_flush_gpu_tlb(adev, 0, 0);
-
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(adev->gmc.gart_size >> 20),
(unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
@@ -1214,20 +1308,69 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
static int gmc_v9_0_hw_init(void *handle)
{
- int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ bool value;
+ int r, i;
+ u32 tmp;
/* The sequence of these two function calls matters.*/
gmc_v9_0_init_golden_registers(adev);
if (adev->mode_info.num_crtc) {
- /* Lockout access through VGA aperture*/
- WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
+ if (adev->asic_type != CHIP_ARCTURUS) {
+ /* Lockout access through VGA aperture*/
+ WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
- /* disable VGA render */
- WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
+ /* disable VGA render */
+ WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
+ }
}
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_vega10_hdp,
+ ARRAY_SIZE(golden_settings_vega10_hdp));
+
+ switch (adev->asic_type) {
+ case CHIP_RAVEN:
+ /* TODO for renoir */
+ mmhub_v1_0_update_power_gating(adev, true);
+ break;
+ case CHIP_ARCTURUS:
+ WREG32_FIELD15(HDP, 0, HDP_MMHUB_CNTL, HDP_MMHUB_GCC, 1);
+ break;
+ default:
+ break;
+ }
+
+ WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
+
+ tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
+ WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
+
+ WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8));
+ WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40));
+
+ /* After HDP is initialized, flush HDP.*/
+ adev->nbio.funcs->hdp_flush(adev, NULL);
+
+ if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
+ value = false;
+ else
+ value = true;
+
+ if (!amdgpu_sriov_vf(adev)) {
+ gfxhub_v1_0_set_fault_enable_default(adev, value);
+ if (adev->asic_type == CHIP_ARCTURUS)
+ mmhub_v9_4_set_fault_enable_default(adev, value);
+ else
+ mmhub_v1_0_set_fault_enable_default(adev, value);
+ }
+ for (i = 0; i < adev->num_vmhubs; ++i)
+ gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0);
+
+ if (adev->umc.funcs && adev->umc.funcs->init_registers)
+ adev->umc.funcs->init_registers(adev);
+
r = gmc_v9_0_gart_enable(adev);
return r;
@@ -1243,7 +1386,10 @@ static int gmc_v9_0_hw_init(void *handle)
static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
{
gfxhub_v1_0_gart_disable(adev);
- mmhub_v1_0_gart_disable(adev);
+ if (adev->asic_type == CHIP_ARCTURUS)
+ mmhub_v9_4_gart_disable(adev);
+ else
+ mmhub_v1_0_gart_disable(adev);
amdgpu_gart_table_vram_unpin(adev);
}
@@ -1308,14 +1454,26 @@ static int gmc_v9_0_set_clockgating_state(void *handle,
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- return mmhub_v1_0_set_clockgating(adev, state);
+ if (adev->asic_type == CHIP_ARCTURUS)
+ mmhub_v9_4_set_clockgating(adev, state);
+ else
+ mmhub_v1_0_set_clockgating(adev, state);
+
+ athub_v1_0_set_clockgating(adev, state);
+
+ return 0;
}
static void gmc_v9_0_get_clockgating_state(void *handle, u32 *flags)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- mmhub_v1_0_get_clockgating(adev, flags);
+ if (adev->asic_type == CHIP_ARCTURUS)
+ mmhub_v9_4_get_clockgating(adev, flags);
+ else
+ mmhub_v1_0_get_clockgating(adev, flags);
+
+ athub_v1_0_get_clockgating(adev, flags);
}
static int gmc_v9_0_set_powergating_state(void *handle,
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h
index 5c8deac65580..e0585e8c6c1b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h
@@ -24,17 +24,6 @@
#ifndef __GMC_V9_0_H__
#define __GMC_V9_0_H__
- /*
- * The latest engine allocation on gfx9 is:
- * Engine 2, 3: firmware
- * Engine 0, 1, 4~16: amdgpu ring,
- * subject to change when ring number changes
- * Engine 17: Gart flushes
- */
-#define GFXHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3
-#define MMHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3
-
extern const struct amd_ip_funcs gmc_v9_0_ip_funcs;
extern const struct amdgpu_ip_block_version gmc_v9_0_ip_block;
-
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
new file mode 100644
index 000000000000..0debfd9f428c
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
@@ -0,0 +1,586 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_jpeg.h"
+#include "soc15.h"
+#include "soc15d.h"
+#include "vcn_v1_0.h"
+
+#include "vcn/vcn_1_0_offset.h"
+#include "vcn/vcn_1_0_sh_mask.h"
+
+static void jpeg_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
+static void jpeg_v1_0_set_irq_funcs(struct amdgpu_device *adev);
+
+static void jpeg_v1_0_decode_ring_patch_wreg(struct amdgpu_ring *ring, uint32_t *ptr, uint32_t reg_offset, uint32_t val)
+{
+ struct amdgpu_device *adev = ring->adev;
+ ring->ring[(*ptr)++] = PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0);
+ if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
+ ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
+ ring->ring[(*ptr)++] = 0;
+ ring->ring[(*ptr)++] = PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0);
+ } else {
+ ring->ring[(*ptr)++] = reg_offset;
+ ring->ring[(*ptr)++] = PACKETJ(0, 0, 0, PACKETJ_TYPE0);
+ }
+ ring->ring[(*ptr)++] = val;
+}
+
+static void jpeg_v1_0_decode_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ uint32_t reg, reg_offset, val, mask, i;
+
+ // 1st: program mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW
+ reg = SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW);
+ reg_offset = (reg << 2);
+ val = lower_32_bits(ring->gpu_addr);
+ jpeg_v1_0_decode_ring_patch_wreg(ring, &ptr, reg_offset, val);
+
+ // 2nd: program mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH
+ reg = SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH);
+ reg_offset = (reg << 2);
+ val = upper_32_bits(ring->gpu_addr);
+ jpeg_v1_0_decode_ring_patch_wreg(ring, &ptr, reg_offset, val);
+
+ // 3rd to 5th: issue MEM_READ commands
+ for (i = 0; i <= 2; i++) {
+ ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE2);
+ ring->ring[ptr++] = 0;
+ }
+
+ // 6th: program mmUVD_JRBC_RB_CNTL register to enable NO_FETCH and RPTR write ability
+ reg = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_CNTL);
+ reg_offset = (reg << 2);
+ val = 0x13;
+ jpeg_v1_0_decode_ring_patch_wreg(ring, &ptr, reg_offset, val);
+
+ // 7th: program mmUVD_JRBC_RB_REF_DATA
+ reg = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_REF_DATA);
+ reg_offset = (reg << 2);
+ val = 0x1;
+ jpeg_v1_0_decode_ring_patch_wreg(ring, &ptr, reg_offset, val);
+
+ // 8th: issue conditional register read mmUVD_JRBC_RB_CNTL
+ reg = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_CNTL);
+ reg_offset = (reg << 2);
+ val = 0x1;
+ mask = 0x1;
+
+ ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0);
+ ring->ring[ptr++] = 0x01400200;
+ ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0);
+ ring->ring[ptr++] = val;
+ ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0);
+ if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
+ ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
+ ring->ring[ptr++] = 0;
+ ring->ring[ptr++] = PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3);
+ } else {
+ ring->ring[ptr++] = reg_offset;
+ ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE3);
+ }
+ ring->ring[ptr++] = mask;
+
+ //9th to 21st: insert no-op
+ for (i = 0; i <= 12; i++) {
+ ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE6);
+ ring->ring[ptr++] = 0;
+ }
+
+ //22nd: reset mmUVD_JRBC_RB_RPTR
+ reg = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_RPTR);
+ reg_offset = (reg << 2);
+ val = 0;
+ jpeg_v1_0_decode_ring_patch_wreg(ring, &ptr, reg_offset, val);
+
+ //23rd: program mmUVD_JRBC_RB_CNTL to disable no_fetch
+ reg = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_CNTL);
+ reg_offset = (reg << 2);
+ val = 0x12;
+ jpeg_v1_0_decode_ring_patch_wreg(ring, &ptr, reg_offset, val);
+}
+
+/**
+ * jpeg_v1_0_decode_ring_get_rptr - get read pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware read pointer
+ */
+static uint64_t jpeg_v1_0_decode_ring_get_rptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ return RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_RPTR);
+}
+
+/**
+ * jpeg_v1_0_decode_ring_get_wptr - get write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware write pointer
+ */
+static uint64_t jpeg_v1_0_decode_ring_get_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ return RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR);
+}
+
+/**
+ * jpeg_v1_0_decode_ring_set_wptr - set write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Commits the write pointer to the hardware
+ */
+static void jpeg_v1_0_decode_ring_set_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
+}
+
+/**
+ * jpeg_v1_0_decode_ring_insert_start - insert a start command
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Write a start command to the ring.
+ */
+static void jpeg_v1_0_decode_ring_insert_start(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x68e04);
+
+ amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x80010000);
+}
+
+/**
+ * jpeg_v1_0_decode_ring_insert_end - insert a end command
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Write a end command to the ring.
+ */
+static void jpeg_v1_0_decode_ring_insert_end(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x68e04);
+
+ amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x00010000);
+}
+
+/**
+ * jpeg_v1_0_decode_ring_emit_fence - emit an fence & trap command
+ *
+ * @ring: amdgpu_ring pointer
+ * @fence: fence to emit
+ *
+ * Write a fence and a trap command to the ring.
+ */
+static void jpeg_v1_0_decode_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
+ unsigned flags)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_GPCOM_DATA0), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, seq);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_GPCOM_DATA1), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, seq);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, lower_32_bits(addr));
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, upper_32_bits(addr));
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_GPCOM_CMD), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x8);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_GPCOM_CMD), 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE4));
+ amdgpu_ring_write(ring, 0);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x01400200);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, seq);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, lower_32_bits(addr));
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, upper_32_bits(addr));
+
+ amdgpu_ring_write(ring,
+ PACKETJ(0, 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE2));
+ amdgpu_ring_write(ring, 0xffffffff);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x3fbc);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(0, 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x1);
+
+ /* emit trap */
+ amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE7));
+ amdgpu_ring_write(ring, 0);
+}
+
+/**
+ * jpeg_v1_0_decode_ring_emit_ib - execute indirect buffer
+ *
+ * @ring: amdgpu_ring pointer
+ * @ib: indirect buffer to execute
+ *
+ * Write ring commands to execute the indirect buffer.
+ */
+static void jpeg_v1_0_decode_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib,
+ uint32_t flags)
+{
+ struct amdgpu_device *adev = ring->adev;
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_IB_VMID), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, (vmid | (vmid << 4)));
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JPEG_VMID), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, (vmid | (vmid << 4)));
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_IB_SIZE), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, ib->length_dw);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, lower_32_bits(ring->gpu_addr));
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, upper_32_bits(ring->gpu_addr));
+
+ amdgpu_ring_write(ring,
+ PACKETJ(0, 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE2));
+ amdgpu_ring_write(ring, 0);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x01400200);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x2);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_STATUS), 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE3));
+ amdgpu_ring_write(ring, 0x2);
+}
+
+static void jpeg_v1_0_decode_ring_emit_reg_wait(struct amdgpu_ring *ring,
+ uint32_t reg, uint32_t val,
+ uint32_t mask)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t reg_offset = (reg << 2);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x01400200);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, val);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
+ if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
+ ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring,
+ PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3));
+ } else {
+ amdgpu_ring_write(ring, reg_offset);
+ amdgpu_ring_write(ring,
+ PACKETJ(0, 0, 0, PACKETJ_TYPE3));
+ }
+ amdgpu_ring_write(ring, mask);
+}
+
+static void jpeg_v1_0_decode_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ unsigned vmid, uint64_t pd_addr)
+{
+ struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
+ uint32_t data0, data1, mask;
+
+ pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
+
+ /* wait for register write */
+ data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2;
+ data1 = lower_32_bits(pd_addr);
+ mask = 0xffffffff;
+ jpeg_v1_0_decode_ring_emit_reg_wait(ring, data0, data1, mask);
+}
+
+static void jpeg_v1_0_decode_ring_emit_wreg(struct amdgpu_ring *ring,
+ uint32_t reg, uint32_t val)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t reg_offset = (reg << 2);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
+ if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
+ ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring,
+ PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0));
+ } else {
+ amdgpu_ring_write(ring, reg_offset);
+ amdgpu_ring_write(ring,
+ PACKETJ(0, 0, 0, PACKETJ_TYPE0));
+ }
+ amdgpu_ring_write(ring, val);
+}
+
+static void jpeg_v1_0_decode_ring_nop(struct amdgpu_ring *ring, uint32_t count)
+{
+ int i;
+
+ WARN_ON(ring->wptr % 2 || count % 2);
+
+ for (i = 0; i < count / 2; i++) {
+ amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6));
+ amdgpu_ring_write(ring, 0);
+ }
+}
+
+static int jpeg_v1_0_set_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ return 0;
+}
+
+static int jpeg_v1_0_process_interrupt(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ DRM_DEBUG("IH: JPEG decode TRAP\n");
+
+ switch (entry->src_id) {
+ case 126:
+ amdgpu_fence_process(&adev->jpeg.inst->ring_dec);
+ break;
+ default:
+ DRM_ERROR("Unhandled interrupt: %d %d\n",
+ entry->src_id, entry->src_data[0]);
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * jpeg_v1_0_early_init - set function pointers
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Set ring and irq function pointers
+ */
+int jpeg_v1_0_early_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ adev->jpeg.num_jpeg_inst = 1;
+
+ jpeg_v1_0_set_dec_ring_funcs(adev);
+ jpeg_v1_0_set_irq_funcs(adev);
+
+ return 0;
+}
+
+/**
+ * jpeg_v1_0_sw_init - sw init for JPEG block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ */
+int jpeg_v1_0_sw_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_ring *ring;
+ int r;
+
+ /* JPEG TRAP */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 126, &adev->jpeg.inst->irq);
+ if (r)
+ return r;
+
+ ring = &adev->jpeg.inst->ring_dec;
+ sprintf(ring->name, "jpeg_dec");
+ r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0);
+ if (r)
+ return r;
+
+ adev->jpeg.internal.jpeg_pitch = adev->jpeg.inst->external.jpeg_pitch =
+ SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_PITCH);
+
+ return 0;
+}
+
+/**
+ * jpeg_v1_0_sw_fini - sw fini for JPEG block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * JPEG free up sw allocation
+ */
+void jpeg_v1_0_sw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ amdgpu_ring_fini(&adev->jpeg.inst[0].ring_dec);
+}
+
+/**
+ * jpeg_v1_0_start - start JPEG block
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Setup and start the JPEG block
+ */
+void jpeg_v1_0_start(struct amdgpu_device *adev, int mode)
+{
+ struct amdgpu_ring *ring = &adev->jpeg.inst->ring_dec;
+
+ if (mode == 0) {
+ WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
+ WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_CNTL, UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK |
+ UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
+ WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW, lower_32_bits(ring->gpu_addr));
+ WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_RPTR, 0);
+ WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR, 0);
+ WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_CNTL, UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
+ }
+
+ /* initialize wptr */
+ ring->wptr = RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR);
+
+ /* copy patch commands to the jpeg ring */
+ jpeg_v1_0_decode_ring_set_patch_ring(ring,
+ (ring->wptr + ring->max_dw * amdgpu_sched_hw_submission));
+}
+
+static const struct amdgpu_ring_funcs jpeg_v1_0_decode_ring_vm_funcs = {
+ .type = AMDGPU_RING_TYPE_VCN_JPEG,
+ .align_mask = 0xf,
+ .nop = PACKET0(0x81ff, 0),
+ .support_64bit_ptrs = false,
+ .no_user_fence = true,
+ .vmhub = AMDGPU_MMHUB_0,
+ .extra_dw = 64,
+ .get_rptr = jpeg_v1_0_decode_ring_get_rptr,
+ .get_wptr = jpeg_v1_0_decode_ring_get_wptr,
+ .set_wptr = jpeg_v1_0_decode_ring_set_wptr,
+ .emit_frame_size =
+ 6 + 6 + /* hdp invalidate / flush */
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
+ 8 + /* jpeg_v1_0_decode_ring_emit_vm_flush */
+ 26 + 26 + /* jpeg_v1_0_decode_ring_emit_fence x2 vm fence */
+ 6,
+ .emit_ib_size = 22, /* jpeg_v1_0_decode_ring_emit_ib */
+ .emit_ib = jpeg_v1_0_decode_ring_emit_ib,
+ .emit_fence = jpeg_v1_0_decode_ring_emit_fence,
+ .emit_vm_flush = jpeg_v1_0_decode_ring_emit_vm_flush,
+ .test_ring = amdgpu_jpeg_dec_ring_test_ring,
+ .test_ib = amdgpu_jpeg_dec_ring_test_ib,
+ .insert_nop = jpeg_v1_0_decode_ring_nop,
+ .insert_start = jpeg_v1_0_decode_ring_insert_start,
+ .insert_end = jpeg_v1_0_decode_ring_insert_end,
+ .pad_ib = amdgpu_ring_generic_pad_ib,
+ .begin_use = vcn_v1_0_ring_begin_use,
+ .end_use = amdgpu_vcn_ring_end_use,
+ .emit_wreg = jpeg_v1_0_decode_ring_emit_wreg,
+ .emit_reg_wait = jpeg_v1_0_decode_ring_emit_reg_wait,
+ .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+};
+
+static void jpeg_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev)
+{
+ adev->jpeg.inst->ring_dec.funcs = &jpeg_v1_0_decode_ring_vm_funcs;
+ DRM_INFO("JPEG decode is enabled in VM mode\n");
+}
+
+static const struct amdgpu_irq_src_funcs jpeg_v1_0_irq_funcs = {
+ .set = jpeg_v1_0_set_interrupt_state,
+ .process = jpeg_v1_0_process_interrupt,
+};
+
+static void jpeg_v1_0_set_irq_funcs(struct amdgpu_device *adev)
+{
+ adev->jpeg.inst->irq.funcs = &jpeg_v1_0_irq_funcs;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h
new file mode 100644
index 000000000000..bbf33a6a3972
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __JPEG_V1_0_H__
+#define __JPEG_V1_0_H__
+
+int jpeg_v1_0_early_init(void *handle);
+int jpeg_v1_0_sw_init(void *handle);
+void jpeg_v1_0_sw_fini(void *handle);
+void jpeg_v1_0_start(struct amdgpu_device *adev, int mode);
+
+#endif /*__JPEG_V1_0_H__*/
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
new file mode 100644
index 000000000000..ff2e6e1ccde7
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
@@ -0,0 +1,827 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_jpeg.h"
+#include "amdgpu_pm.h"
+#include "soc15.h"
+#include "soc15d.h"
+
+#include "vcn/vcn_2_0_0_offset.h"
+#include "vcn/vcn_2_0_0_sh_mask.h"
+#include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
+
+#define mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET 0x1bfff
+#define mmUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET 0x4029
+#define mmUVD_JPEG_GPCOM_DATA0_INTERNAL_OFFSET 0x402a
+#define mmUVD_JPEG_GPCOM_DATA1_INTERNAL_OFFSET 0x402b
+#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40ea
+#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x40eb
+#define mmUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET 0x40cf
+#define mmUVD_LMI_JPEG_VMID_INTERNAL_OFFSET 0x40d1
+#define mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40e8
+#define mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x40e9
+#define mmUVD_JRBC_IB_SIZE_INTERNAL_OFFSET 0x4082
+#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40ec
+#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x40ed
+#define mmUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET 0x4085
+#define mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET 0x4084
+#define mmUVD_JRBC_STATUS_INTERNAL_OFFSET 0x4089
+#define mmUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f
+
+#define JRBC_DEC_EXTERNAL_REG_WRITE_ADDR 0x18000
+
+static void jpeg_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev);
+static void jpeg_v2_0_set_irq_funcs(struct amdgpu_device *adev);
+static int jpeg_v2_0_set_powergating_state(void *handle,
+ enum amd_powergating_state state);
+
+/**
+ * jpeg_v2_0_early_init - set function pointers
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Set ring and irq function pointers
+ */
+static int jpeg_v2_0_early_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ adev->jpeg.num_jpeg_inst = 1;
+
+ jpeg_v2_0_set_dec_ring_funcs(adev);
+ jpeg_v2_0_set_irq_funcs(adev);
+
+ return 0;
+}
+
+/**
+ * jpeg_v2_0_sw_init - sw init for JPEG block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Load firmware and sw initialization
+ */
+static int jpeg_v2_0_sw_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_ring *ring;
+ int r;
+
+ /* JPEG TRAP */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
+ VCN_2_0__SRCID__JPEG_DECODE, &adev->jpeg.inst->irq);
+ if (r)
+ return r;
+
+ r = amdgpu_jpeg_sw_init(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_jpeg_resume(adev);
+ if (r)
+ return r;
+
+ ring = &adev->jpeg.inst->ring_dec;
+ ring->use_doorbell = true;
+ ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1;
+ sprintf(ring->name, "jpeg_dec");
+ r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0);
+ if (r)
+ return r;
+
+ adev->jpeg.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET;
+ adev->jpeg.inst->external.jpeg_pitch = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_PITCH);
+
+ return 0;
+}
+
+/**
+ * jpeg_v2_0_sw_fini - sw fini for JPEG block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * JPEG suspend and free up sw allocation
+ */
+static int jpeg_v2_0_sw_fini(void *handle)
+{
+ int r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ r = amdgpu_jpeg_suspend(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_jpeg_sw_fini(adev);
+
+ return r;
+}
+
+/**
+ * jpeg_v2_0_hw_init - start and test JPEG block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ */
+static int jpeg_v2_0_hw_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_ring *ring = &adev->jpeg.inst->ring_dec;
+ int r;
+
+ adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+ (adev->doorbell_index.vcn.vcn_ring0_1 << 1), 0);
+
+ r = amdgpu_ring_test_helper(ring);
+ if (!r)
+ DRM_INFO("JPEG decode initialized successfully.\n");
+
+ return r;
+}
+
+/**
+ * jpeg_v2_0_hw_fini - stop the hardware block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Stop the JPEG block, mark ring as not ready any more
+ */
+static int jpeg_v2_0_hw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_ring *ring = &adev->jpeg.inst->ring_dec;
+
+ if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
+ RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS))
+ jpeg_v2_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
+
+ ring->sched.ready = false;
+
+ return 0;
+}
+
+/**
+ * jpeg_v2_0_suspend - suspend JPEG block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * HW fini and suspend JPEG block
+ */
+static int jpeg_v2_0_suspend(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r;
+
+ r = jpeg_v2_0_hw_fini(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_jpeg_suspend(adev);
+
+ return r;
+}
+
+/**
+ * jpeg_v2_0_resume - resume JPEG block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Resume firmware and hw init JPEG block
+ */
+static int jpeg_v2_0_resume(void *handle)
+{
+ int r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ r = amdgpu_jpeg_resume(adev);
+ if (r)
+ return r;
+
+ r = jpeg_v2_0_hw_init(adev);
+
+ return r;
+}
+
+static int jpeg_v2_0_disable_power_gating(struct amdgpu_device *adev)
+{
+ uint32_t data;
+ int r = 0;
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {
+ data = 1 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT;
+ WREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_PGFSM_CONFIG), data);
+
+ SOC15_WAIT_ON_RREG(JPEG, 0,
+ mmUVD_PGFSM_STATUS, UVD_PGFSM_STATUS_UVDJ_PWR_ON,
+ UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK, r);
+
+ if (r) {
+ DRM_ERROR("amdgpu: JPEG disable power gating failed\n");
+ return r;
+ }
+ }
+
+ /* Removing the anti hang mechanism to indicate the UVDJ tile is ON */
+ data = RREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_POWER_STATUS)) & ~0x1;
+ WREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_POWER_STATUS), data);
+
+ return 0;
+}
+
+static int jpeg_v2_0_enable_power_gating(struct amdgpu_device* adev)
+{
+ if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {
+ uint32_t data;
+ int r = 0;
+
+ data = RREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_POWER_STATUS));
+ data &= ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK;
+ data |= 0x1; //UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_TILES_OFF;
+ WREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_POWER_STATUS), data);
+
+ data = 2 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT;
+ WREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_PGFSM_CONFIG), data);
+
+ SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_PGFSM_STATUS,
+ (2 << UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT),
+ UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK, r);
+
+ if (r) {
+ DRM_ERROR("amdgpu: JPEG enable power gating failed\n");
+ return r;
+ }
+ }
+
+ return 0;
+}
+
+static void jpeg_v2_0_disable_clock_gating(struct amdgpu_device* adev)
+{
+ uint32_t data;
+
+ data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL);
+ if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG)
+ data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
+ else
+ data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
+
+ data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
+ data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
+ WREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL, data);
+
+ data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE);
+ data &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK
+ | JPEG_CGC_GATE__JPEG2_DEC_MASK
+ | JPEG_CGC_GATE__JPEG_ENC_MASK
+ | JPEG_CGC_GATE__JMCIF_MASK
+ | JPEG_CGC_GATE__JRBBM_MASK);
+ WREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE, data);
+}
+
+static void jpeg_v2_0_enable_clock_gating(struct amdgpu_device* adev)
+{
+ uint32_t data;
+
+ data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL);
+ if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG)
+ data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
+ else
+ data |= 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
+
+ data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
+ data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
+ WREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL, data);
+
+ data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE);
+ data |= (JPEG_CGC_GATE__JPEG_DEC_MASK
+ |JPEG_CGC_GATE__JPEG2_DEC_MASK
+ |JPEG_CGC_GATE__JPEG_ENC_MASK
+ |JPEG_CGC_GATE__JMCIF_MASK
+ |JPEG_CGC_GATE__JRBBM_MASK);
+ WREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE, data);
+}
+
+/**
+ * jpeg_v2_0_start - start JPEG block
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Setup and start the JPEG block
+ */
+static int jpeg_v2_0_start(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring = &adev->jpeg.inst->ring_dec;
+ int r;
+
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_jpeg(adev, true);
+
+ /* disable power gating */
+ r = jpeg_v2_0_disable_power_gating(adev);
+ if (r)
+ return r;
+
+ /* JPEG disable CGC */
+ jpeg_v2_0_disable_clock_gating(adev);
+
+ WREG32_SOC15(JPEG, 0, mmJPEG_DEC_GFX10_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
+
+ /* enable JMI channel */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JMI_CNTL), 0,
+ ~UVD_JMI_CNTL__SOFT_RESET_MASK);
+
+ /* enable System Interrupt for JRBC */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmJPEG_SYS_INT_EN),
+ JPEG_SYS_INT_EN__DJRBC_MASK,
+ ~JPEG_SYS_INT_EN__DJRBC_MASK);
+
+ WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
+ WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
+ WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
+ lower_32_bits(ring->gpu_addr));
+ WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
+ upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_RPTR, 0);
+ WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR, 0);
+ WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_CNTL, 0x00000002L);
+ WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_SIZE, ring->ring_size / 4);
+ ring->wptr = RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR);
+
+ return 0;
+}
+
+/**
+ * jpeg_v2_0_stop - stop JPEG block
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * stop the JPEG block
+ */
+static int jpeg_v2_0_stop(struct amdgpu_device *adev)
+{
+ int r;
+
+ /* reset JMI */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JMI_CNTL),
+ UVD_JMI_CNTL__SOFT_RESET_MASK,
+ ~UVD_JMI_CNTL__SOFT_RESET_MASK);
+
+ /* enable JPEG CGC */
+ jpeg_v2_0_enable_clock_gating(adev);
+
+ /* enable power gating */
+ r = jpeg_v2_0_enable_power_gating(adev);
+ if (r)
+ return r;
+
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_jpeg(adev, false);
+
+ return 0;
+}
+
+/**
+ * jpeg_v2_0_dec_ring_get_rptr - get read pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware read pointer
+ */
+static uint64_t jpeg_v2_0_dec_ring_get_rptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ return RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_RPTR);
+}
+
+/**
+ * jpeg_v2_0_dec_ring_get_wptr - get write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware write pointer
+ */
+static uint64_t jpeg_v2_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring->use_doorbell)
+ return adev->wb.wb[ring->wptr_offs];
+ else
+ return RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR);
+}
+
+/**
+ * jpeg_v2_0_dec_ring_set_wptr - set write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Commits the write pointer to the hardware
+ */
+static void jpeg_v2_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring->use_doorbell) {
+ adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
+ WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
+ } else {
+ WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
+ }
+}
+
+/**
+ * jpeg_v2_0_dec_ring_insert_start - insert a start command
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Write a start command to the ring.
+ */
+void jpeg_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x68e04);
+
+ amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x80010000);
+}
+
+/**
+ * jpeg_v2_0_dec_ring_insert_end - insert a end command
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Write a end command to the ring.
+ */
+void jpeg_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x68e04);
+
+ amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x00010000);
+}
+
+/**
+ * jpeg_v2_0_dec_ring_emit_fence - emit an fence & trap command
+ *
+ * @ring: amdgpu_ring pointer
+ * @fence: fence to emit
+ *
+ * Write a fence and a trap command to the ring.
+ */
+void jpeg_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
+ unsigned flags)
+{
+ WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
+
+ amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_GPCOM_DATA0_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, seq);
+
+ amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_GPCOM_DATA1_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, seq);
+
+ amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, lower_32_bits(addr));
+
+ amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, upper_32_bits(addr));
+
+ amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x8);
+
+ amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET,
+ 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE4));
+ amdgpu_ring_write(ring, 0);
+
+ amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x3fbc);
+
+ amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x1);
+
+ amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE7));
+ amdgpu_ring_write(ring, 0);
+}
+
+/**
+ * jpeg_v2_0_dec_ring_emit_ib - execute indirect buffer
+ *
+ * @ring: amdgpu_ring pointer
+ * @ib: indirect buffer to execute
+ *
+ * Write ring commands to execute the indirect buffer.
+ */
+void jpeg_v2_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib,
+ uint32_t flags)
+{
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
+ amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, (vmid | (vmid << 4)));
+
+ amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JPEG_VMID_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, (vmid | (vmid << 4)));
+
+ amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
+
+ amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
+
+ amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_IB_SIZE_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, ib->length_dw);
+
+ amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, lower_32_bits(ring->gpu_addr));
+
+ amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, upper_32_bits(ring->gpu_addr));
+
+ amdgpu_ring_write(ring, PACKETJ(0, 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE2));
+ amdgpu_ring_write(ring, 0);
+
+ amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x01400200);
+
+ amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x2);
+
+ amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_STATUS_INTERNAL_OFFSET,
+ 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE3));
+ amdgpu_ring_write(ring, 0x2);
+}
+
+void jpeg_v2_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t val, uint32_t mask)
+{
+ uint32_t reg_offset = (reg << 2);
+
+ amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x01400200);
+
+ amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, val);
+
+ amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ if (reg_offset >= 0x10000 && reg_offset <= 0x105ff) {
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring,
+ PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3));
+ } else {
+ amdgpu_ring_write(ring, reg_offset);
+ amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
+ 0, 0, PACKETJ_TYPE3));
+ }
+ amdgpu_ring_write(ring, mask);
+}
+
+void jpeg_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ unsigned vmid, uint64_t pd_addr)
+{
+ struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
+ uint32_t data0, data1, mask;
+
+ pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
+
+ /* wait for register write */
+ data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2;
+ data1 = lower_32_bits(pd_addr);
+ mask = 0xffffffff;
+ jpeg_v2_0_dec_ring_emit_reg_wait(ring, data0, data1, mask);
+}
+
+void jpeg_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val)
+{
+ uint32_t reg_offset = (reg << 2);
+
+ amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ if (reg_offset >= 0x10000 && reg_offset <= 0x105ff) {
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring,
+ PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0));
+ } else {
+ amdgpu_ring_write(ring, reg_offset);
+ amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
+ 0, 0, PACKETJ_TYPE0));
+ }
+ amdgpu_ring_write(ring, val);
+}
+
+void jpeg_v2_0_dec_ring_nop(struct amdgpu_ring *ring, uint32_t count)
+{
+ int i;
+
+ WARN_ON(ring->wptr % 2 || count % 2);
+
+ for (i = 0; i < count / 2; i++) {
+ amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6));
+ amdgpu_ring_write(ring, 0);
+ }
+}
+
+static bool jpeg_v2_0_is_idle(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ return ((RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS) &
+ UVD_JRBC_STATUS__RB_JOB_DONE_MASK) ==
+ UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
+}
+
+static int jpeg_v2_0_wait_for_idle(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int ret = 0;
+
+ SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_JRBC_STATUS, UVD_JRBC_STATUS__RB_JOB_DONE_MASK,
+ UVD_JRBC_STATUS__RB_JOB_DONE_MASK, ret);
+
+ return ret;
+}
+
+static int jpeg_v2_0_set_clockgating_state(void *handle,
+ enum amd_clockgating_state state)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ bool enable = (state == AMD_CG_STATE_GATE);
+
+ if (enable) {
+ if (jpeg_v2_0_is_idle(handle))
+ return -EBUSY;
+ jpeg_v2_0_enable_clock_gating(adev);
+ } else {
+ jpeg_v2_0_disable_clock_gating(adev);
+ }
+
+ return 0;
+}
+
+static int jpeg_v2_0_set_powergating_state(void *handle,
+ enum amd_powergating_state state)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int ret;
+
+ if (state == adev->jpeg.cur_state)
+ return 0;
+
+ if (state == AMD_PG_STATE_GATE)
+ ret = jpeg_v2_0_stop(adev);
+ else
+ ret = jpeg_v2_0_start(adev);
+
+ if (!ret)
+ adev->jpeg.cur_state = state;
+
+ return ret;
+}
+
+static int jpeg_v2_0_set_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ return 0;
+}
+
+static int jpeg_v2_0_process_interrupt(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ DRM_DEBUG("IH: JPEG TRAP\n");
+
+ switch (entry->src_id) {
+ case VCN_2_0__SRCID__JPEG_DECODE:
+ amdgpu_fence_process(&adev->jpeg.inst->ring_dec);
+ break;
+ default:
+ DRM_ERROR("Unhandled interrupt: %d %d\n",
+ entry->src_id, entry->src_data[0]);
+ break;
+ }
+
+ return 0;
+}
+
+static const struct amd_ip_funcs jpeg_v2_0_ip_funcs = {
+ .name = "jpeg_v2_0",
+ .early_init = jpeg_v2_0_early_init,
+ .late_init = NULL,
+ .sw_init = jpeg_v2_0_sw_init,
+ .sw_fini = jpeg_v2_0_sw_fini,
+ .hw_init = jpeg_v2_0_hw_init,
+ .hw_fini = jpeg_v2_0_hw_fini,
+ .suspend = jpeg_v2_0_suspend,
+ .resume = jpeg_v2_0_resume,
+ .is_idle = jpeg_v2_0_is_idle,
+ .wait_for_idle = jpeg_v2_0_wait_for_idle,
+ .check_soft_reset = NULL,
+ .pre_soft_reset = NULL,
+ .soft_reset = NULL,
+ .post_soft_reset = NULL,
+ .set_clockgating_state = jpeg_v2_0_set_clockgating_state,
+ .set_powergating_state = jpeg_v2_0_set_powergating_state,
+};
+
+static const struct amdgpu_ring_funcs jpeg_v2_0_dec_ring_vm_funcs = {
+ .type = AMDGPU_RING_TYPE_VCN_JPEG,
+ .align_mask = 0xf,
+ .vmhub = AMDGPU_MMHUB_0,
+ .get_rptr = jpeg_v2_0_dec_ring_get_rptr,
+ .get_wptr = jpeg_v2_0_dec_ring_get_wptr,
+ .set_wptr = jpeg_v2_0_dec_ring_set_wptr,
+ .emit_frame_size =
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
+ 8 + /* jpeg_v2_0_dec_ring_emit_vm_flush */
+ 18 + 18 + /* jpeg_v2_0_dec_ring_emit_fence x2 vm fence */
+ 8 + 16,
+ .emit_ib_size = 22, /* jpeg_v2_0_dec_ring_emit_ib */
+ .emit_ib = jpeg_v2_0_dec_ring_emit_ib,
+ .emit_fence = jpeg_v2_0_dec_ring_emit_fence,
+ .emit_vm_flush = jpeg_v2_0_dec_ring_emit_vm_flush,
+ .test_ring = amdgpu_jpeg_dec_ring_test_ring,
+ .test_ib = amdgpu_jpeg_dec_ring_test_ib,
+ .insert_nop = jpeg_v2_0_dec_ring_nop,
+ .insert_start = jpeg_v2_0_dec_ring_insert_start,
+ .insert_end = jpeg_v2_0_dec_ring_insert_end,
+ .pad_ib = amdgpu_ring_generic_pad_ib,
+ .begin_use = amdgpu_jpeg_ring_begin_use,
+ .end_use = amdgpu_jpeg_ring_end_use,
+ .emit_wreg = jpeg_v2_0_dec_ring_emit_wreg,
+ .emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait,
+ .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+};
+
+static void jpeg_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev)
+{
+ adev->jpeg.inst->ring_dec.funcs = &jpeg_v2_0_dec_ring_vm_funcs;
+ DRM_INFO("JPEG decode is enabled in VM mode\n");
+}
+
+static const struct amdgpu_irq_src_funcs jpeg_v2_0_irq_funcs = {
+ .set = jpeg_v2_0_set_interrupt_state,
+ .process = jpeg_v2_0_process_interrupt,
+};
+
+static void jpeg_v2_0_set_irq_funcs(struct amdgpu_device *adev)
+{
+ adev->jpeg.inst->irq.num_types = 1;
+ adev->jpeg.inst->irq.funcs = &jpeg_v2_0_irq_funcs;
+}
+
+const struct amdgpu_ip_block_version jpeg_v2_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_JPEG,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &jpeg_v2_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h
new file mode 100644
index 000000000000..15a344ed340f
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __JPEG_V2_0_H__
+#define __JPEG_V2_0_H__
+
+void jpeg_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring);
+void jpeg_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring);
+void jpeg_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
+ unsigned flags);
+void jpeg_v2_0_dec_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job,
+ struct amdgpu_ib *ib, uint32_t flags);
+void jpeg_v2_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t val, uint32_t mask);
+void jpeg_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ unsigned vmid, uint64_t pd_addr);
+void jpeg_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
+void jpeg_v2_0_dec_ring_nop(struct amdgpu_ring *ring, uint32_t count);
+
+extern const struct amdgpu_ip_block_version jpeg_v2_0_ip_block;
+
+#endif /* __JPEG_V2_0_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
new file mode 100644
index 000000000000..c6d046df4b70
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
@@ -0,0 +1,641 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_jpeg.h"
+#include "soc15.h"
+#include "soc15d.h"
+#include "jpeg_v2_0.h"
+
+#include "vcn/vcn_2_5_offset.h"
+#include "vcn/vcn_2_5_sh_mask.h"
+#include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
+
+#define mmUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f
+
+#define JPEG25_MAX_HW_INSTANCES_ARCTURUS 2
+
+static void jpeg_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev);
+static void jpeg_v2_5_set_irq_funcs(struct amdgpu_device *adev);
+static int jpeg_v2_5_set_powergating_state(void *handle,
+ enum amd_powergating_state state);
+
+static int amdgpu_ih_clientid_jpeg[] = {
+ SOC15_IH_CLIENTID_VCN,
+ SOC15_IH_CLIENTID_VCN1
+};
+
+/**
+ * jpeg_v2_5_early_init - set function pointers
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Set ring and irq function pointers
+ */
+static int jpeg_v2_5_early_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (adev->asic_type == CHIP_ARCTURUS) {
+ u32 harvest;
+ int i;
+
+ adev->jpeg.num_jpeg_inst = JPEG25_MAX_HW_INSTANCES_ARCTURUS;
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
+ harvest = RREG32_SOC15(JPEG, i, mmCC_UVD_HARVESTING);
+ if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
+ adev->jpeg.harvest_config |= 1 << i;
+ }
+
+ if (adev->jpeg.harvest_config == (AMDGPU_JPEG_HARVEST_JPEG0 |
+ AMDGPU_JPEG_HARVEST_JPEG1))
+ return -ENOENT;
+ } else
+ adev->jpeg.num_jpeg_inst = 1;
+
+ jpeg_v2_5_set_dec_ring_funcs(adev);
+ jpeg_v2_5_set_irq_funcs(adev);
+
+ return 0;
+}
+
+/**
+ * jpeg_v2_5_sw_init - sw init for JPEG block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Load firmware and sw initialization
+ */
+static int jpeg_v2_5_sw_init(void *handle)
+{
+ struct amdgpu_ring *ring;
+ int i, r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ if (adev->jpeg.harvest_config & (1 << i))
+ continue;
+
+ /* JPEG TRAP */
+ r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_jpeg[i],
+ VCN_2_0__SRCID__JPEG_DECODE, &adev->jpeg.inst[i].irq);
+ if (r)
+ return r;
+ }
+
+ r = amdgpu_jpeg_sw_init(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_jpeg_resume(adev);
+ if (r)
+ return r;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ if (adev->jpeg.harvest_config & (1 << i))
+ continue;
+
+ ring = &adev->jpeg.inst[i].ring_dec;
+ ring->use_doorbell = true;
+ ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + 8 * i;
+ sprintf(ring->name, "jpeg_dec_%d", i);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst[i].irq, 0);
+ if (r)
+ return r;
+
+ adev->jpeg.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET;
+ adev->jpeg.inst[i].external.jpeg_pitch = SOC15_REG_OFFSET(JPEG, i, mmUVD_JPEG_PITCH);
+ }
+
+ return 0;
+}
+
+/**
+ * jpeg_v2_5_sw_fini - sw fini for JPEG block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * JPEG suspend and free up sw allocation
+ */
+static int jpeg_v2_5_sw_fini(void *handle)
+{
+ int r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ r = amdgpu_jpeg_suspend(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_jpeg_sw_fini(adev);
+
+ return r;
+}
+
+/**
+ * jpeg_v2_5_hw_init - start and test JPEG block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ */
+static int jpeg_v2_5_hw_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_ring *ring;
+ int i, r;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ if (adev->jpeg.harvest_config & (1 << i))
+ continue;
+
+ ring = &adev->jpeg.inst[i].ring_dec;
+ adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+ (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i, i);
+
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
+ return r;
+ }
+
+ DRM_INFO("JPEG decode initialized successfully.\n");
+
+ return 0;
+}
+
+/**
+ * jpeg_v2_5_hw_fini - stop the hardware block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Stop the JPEG block, mark ring as not ready any more
+ */
+static int jpeg_v2_5_hw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_ring *ring;
+ int i;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ if (adev->jpeg.harvest_config & (1 << i))
+ continue;
+
+ ring = &adev->jpeg.inst[i].ring_dec;
+ if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
+ RREG32_SOC15(JPEG, i, mmUVD_JRBC_STATUS))
+ jpeg_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
+
+ ring->sched.ready = false;
+ }
+
+ return 0;
+}
+
+/**
+ * jpeg_v2_5_suspend - suspend JPEG block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * HW fini and suspend JPEG block
+ */
+static int jpeg_v2_5_suspend(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r;
+
+ r = jpeg_v2_5_hw_fini(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_jpeg_suspend(adev);
+
+ return r;
+}
+
+/**
+ * jpeg_v2_5_resume - resume JPEG block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Resume firmware and hw init JPEG block
+ */
+static int jpeg_v2_5_resume(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r;
+
+ r = amdgpu_jpeg_resume(adev);
+ if (r)
+ return r;
+
+ r = jpeg_v2_5_hw_init(adev);
+
+ return r;
+}
+
+static void jpeg_v2_5_disable_clock_gating(struct amdgpu_device* adev, int inst)
+{
+ uint32_t data;
+
+ data = RREG32_SOC15(JPEG, inst, mmJPEG_CGC_CTRL);
+ if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG)
+ data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
+ else
+ data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
+
+ data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
+ data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
+ WREG32_SOC15(JPEG, inst, mmJPEG_CGC_CTRL, data);
+
+ data = RREG32_SOC15(JPEG, inst, mmJPEG_CGC_GATE);
+ data &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK
+ | JPEG_CGC_GATE__JPEG2_DEC_MASK
+ | JPEG_CGC_GATE__JPEG_ENC_MASK
+ | JPEG_CGC_GATE__JMCIF_MASK
+ | JPEG_CGC_GATE__JRBBM_MASK);
+ WREG32_SOC15(JPEG, inst, mmJPEG_CGC_GATE, data);
+
+ data = RREG32_SOC15(JPEG, inst, mmJPEG_CGC_CTRL);
+ data &= ~(JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK
+ | JPEG_CGC_CTRL__JPEG2_DEC_MODE_MASK
+ | JPEG_CGC_CTRL__JMCIF_MODE_MASK
+ | JPEG_CGC_CTRL__JRBBM_MODE_MASK);
+ WREG32_SOC15(JPEG, inst, mmJPEG_CGC_CTRL, data);
+}
+
+static void jpeg_v2_5_enable_clock_gating(struct amdgpu_device* adev, int inst)
+{
+ uint32_t data;
+
+ data = RREG32_SOC15(JPEG, inst, mmJPEG_CGC_GATE);
+ data |= (JPEG_CGC_GATE__JPEG_DEC_MASK
+ |JPEG_CGC_GATE__JPEG2_DEC_MASK
+ |JPEG_CGC_GATE__JPEG_ENC_MASK
+ |JPEG_CGC_GATE__JMCIF_MASK
+ |JPEG_CGC_GATE__JRBBM_MASK);
+ WREG32_SOC15(JPEG, inst, mmJPEG_CGC_GATE, data);
+}
+
+/**
+ * jpeg_v2_5_start - start JPEG block
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Setup and start the JPEG block
+ */
+static int jpeg_v2_5_start(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ int i;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ if (adev->jpeg.harvest_config & (1 << i))
+ continue;
+
+ ring = &adev->jpeg.inst[i].ring_dec;
+ /* disable anti hang mechanism */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JPEG_POWER_STATUS), 0,
+ ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
+
+ /* JPEG disable CGC */
+ jpeg_v2_5_disable_clock_gating(adev, i);
+
+ /* MJPEG global tiling registers */
+ WREG32_SOC15(JPEG, i, mmJPEG_DEC_GFX8_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+ WREG32_SOC15(JPEG, i, mmJPEG_DEC_GFX10_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+
+ /* enable JMI channel */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JMI_CNTL), 0,
+ ~UVD_JMI_CNTL__SOFT_RESET_MASK);
+
+ /* enable System Interrupt for JRBC */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmJPEG_SYS_INT_EN),
+ JPEG_SYS_INT_EN__DJRBC_MASK,
+ ~JPEG_SYS_INT_EN__DJRBC_MASK);
+
+ WREG32_SOC15(JPEG, i, mmUVD_LMI_JRBC_RB_VMID, 0);
+ WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
+ WREG32_SOC15(JPEG, i, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
+ lower_32_bits(ring->gpu_addr));
+ WREG32_SOC15(JPEG, i, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
+ upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_RPTR, 0);
+ WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_WPTR, 0);
+ WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_CNTL, 0x00000002L);
+ WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_SIZE, ring->ring_size / 4);
+ ring->wptr = RREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_WPTR);
+ }
+
+ return 0;
+}
+
+/**
+ * jpeg_v2_5_stop - stop JPEG block
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * stop the JPEG block
+ */
+static int jpeg_v2_5_stop(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ if (adev->jpeg.harvest_config & (1 << i))
+ continue;
+
+ /* reset JMI */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JMI_CNTL),
+ UVD_JMI_CNTL__SOFT_RESET_MASK,
+ ~UVD_JMI_CNTL__SOFT_RESET_MASK);
+
+ jpeg_v2_5_enable_clock_gating(adev, i);
+
+ /* enable anti hang mechanism */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JPEG_POWER_STATUS),
+ UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK,
+ ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
+ }
+
+ return 0;
+}
+
+/**
+ * jpeg_v2_5_dec_ring_get_rptr - get read pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware read pointer
+ */
+static uint64_t jpeg_v2_5_dec_ring_get_rptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ return RREG32_SOC15(JPEG, ring->me, mmUVD_JRBC_RB_RPTR);
+}
+
+/**
+ * jpeg_v2_5_dec_ring_get_wptr - get write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware write pointer
+ */
+static uint64_t jpeg_v2_5_dec_ring_get_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring->use_doorbell)
+ return adev->wb.wb[ring->wptr_offs];
+ else
+ return RREG32_SOC15(JPEG, ring->me, mmUVD_JRBC_RB_WPTR);
+}
+
+/**
+ * jpeg_v2_5_dec_ring_set_wptr - set write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Commits the write pointer to the hardware
+ */
+static void jpeg_v2_5_dec_ring_set_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring->use_doorbell) {
+ adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
+ WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
+ } else {
+ WREG32_SOC15(JPEG, ring->me, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
+ }
+}
+
+static bool jpeg_v2_5_is_idle(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, ret = 1;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ if (adev->jpeg.harvest_config & (1 << i))
+ continue;
+
+ ret &= (((RREG32_SOC15(JPEG, i, mmUVD_JRBC_STATUS) &
+ UVD_JRBC_STATUS__RB_JOB_DONE_MASK) ==
+ UVD_JRBC_STATUS__RB_JOB_DONE_MASK));
+ }
+
+ return ret;
+}
+
+static int jpeg_v2_5_wait_for_idle(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, ret = 0;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ if (adev->jpeg.harvest_config & (1 << i))
+ continue;
+
+ SOC15_WAIT_ON_RREG(JPEG, i, mmUVD_JRBC_STATUS,
+ UVD_JRBC_STATUS__RB_JOB_DONE_MASK,
+ UVD_JRBC_STATUS__RB_JOB_DONE_MASK, ret);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int jpeg_v2_5_set_clockgating_state(void *handle,
+ enum amd_clockgating_state state)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ bool enable = (state == AMD_CG_STATE_GATE);
+ int i;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ if (adev->jpeg.harvest_config & (1 << i))
+ continue;
+
+ if (enable) {
+ if (jpeg_v2_5_is_idle(handle))
+ return -EBUSY;
+ jpeg_v2_5_enable_clock_gating(adev, i);
+ } else {
+ jpeg_v2_5_disable_clock_gating(adev, i);
+ }
+ }
+
+ return 0;
+}
+
+static int jpeg_v2_5_set_powergating_state(void *handle,
+ enum amd_powergating_state state)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int ret;
+
+ if(state == adev->jpeg.cur_state)
+ return 0;
+
+ if (state == AMD_PG_STATE_GATE)
+ ret = jpeg_v2_5_stop(adev);
+ else
+ ret = jpeg_v2_5_start(adev);
+
+ if(!ret)
+ adev->jpeg.cur_state = state;
+
+ return ret;
+}
+
+static int jpeg_v2_5_set_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ return 0;
+}
+
+static int jpeg_v2_5_process_interrupt(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ uint32_t ip_instance;
+
+ switch (entry->client_id) {
+ case SOC15_IH_CLIENTID_VCN:
+ ip_instance = 0;
+ break;
+ case SOC15_IH_CLIENTID_VCN1:
+ ip_instance = 1;
+ break;
+ default:
+ DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
+ return 0;
+ }
+
+ DRM_DEBUG("IH: JPEG TRAP\n");
+
+ switch (entry->src_id) {
+ case VCN_2_0__SRCID__JPEG_DECODE:
+ amdgpu_fence_process(&adev->jpeg.inst[ip_instance].ring_dec);
+ break;
+ default:
+ DRM_ERROR("Unhandled interrupt: %d %d\n",
+ entry->src_id, entry->src_data[0]);
+ break;
+ }
+
+ return 0;
+}
+
+static const struct amd_ip_funcs jpeg_v2_5_ip_funcs = {
+ .name = "jpeg_v2_5",
+ .early_init = jpeg_v2_5_early_init,
+ .late_init = NULL,
+ .sw_init = jpeg_v2_5_sw_init,
+ .sw_fini = jpeg_v2_5_sw_fini,
+ .hw_init = jpeg_v2_5_hw_init,
+ .hw_fini = jpeg_v2_5_hw_fini,
+ .suspend = jpeg_v2_5_suspend,
+ .resume = jpeg_v2_5_resume,
+ .is_idle = jpeg_v2_5_is_idle,
+ .wait_for_idle = jpeg_v2_5_wait_for_idle,
+ .check_soft_reset = NULL,
+ .pre_soft_reset = NULL,
+ .soft_reset = NULL,
+ .post_soft_reset = NULL,
+ .set_clockgating_state = jpeg_v2_5_set_clockgating_state,
+ .set_powergating_state = jpeg_v2_5_set_powergating_state,
+};
+
+static const struct amdgpu_ring_funcs jpeg_v2_5_dec_ring_vm_funcs = {
+ .type = AMDGPU_RING_TYPE_VCN_JPEG,
+ .align_mask = 0xf,
+ .vmhub = AMDGPU_MMHUB_1,
+ .get_rptr = jpeg_v2_5_dec_ring_get_rptr,
+ .get_wptr = jpeg_v2_5_dec_ring_get_wptr,
+ .set_wptr = jpeg_v2_5_dec_ring_set_wptr,
+ .emit_frame_size =
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
+ 8 + /* jpeg_v2_5_dec_ring_emit_vm_flush */
+ 18 + 18 + /* jpeg_v2_5_dec_ring_emit_fence x2 vm fence */
+ 8 + 16,
+ .emit_ib_size = 22, /* jpeg_v2_5_dec_ring_emit_ib */
+ .emit_ib = jpeg_v2_0_dec_ring_emit_ib,
+ .emit_fence = jpeg_v2_0_dec_ring_emit_fence,
+ .emit_vm_flush = jpeg_v2_0_dec_ring_emit_vm_flush,
+ .test_ring = amdgpu_jpeg_dec_ring_test_ring,
+ .test_ib = amdgpu_jpeg_dec_ring_test_ib,
+ .insert_nop = jpeg_v2_0_dec_ring_nop,
+ .insert_start = jpeg_v2_0_dec_ring_insert_start,
+ .insert_end = jpeg_v2_0_dec_ring_insert_end,
+ .pad_ib = amdgpu_ring_generic_pad_ib,
+ .begin_use = amdgpu_jpeg_ring_begin_use,
+ .end_use = amdgpu_jpeg_ring_end_use,
+ .emit_wreg = jpeg_v2_0_dec_ring_emit_wreg,
+ .emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait,
+ .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+};
+
+static void jpeg_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ if (adev->jpeg.harvest_config & (1 << i))
+ continue;
+
+ adev->jpeg.inst[i].ring_dec.funcs = &jpeg_v2_5_dec_ring_vm_funcs;
+ adev->jpeg.inst[i].ring_dec.me = i;
+ DRM_INFO("JPEG(%d) JPEG decode is enabled in VM mode\n", i);
+ }
+}
+
+static const struct amdgpu_irq_src_funcs jpeg_v2_5_irq_funcs = {
+ .set = jpeg_v2_5_set_interrupt_state,
+ .process = jpeg_v2_5_process_interrupt,
+};
+
+static void jpeg_v2_5_set_irq_funcs(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ if (adev->jpeg.harvest_config & (1 << i))
+ continue;
+
+ adev->jpeg.inst[i].irq.num_types = 1;
+ adev->jpeg.inst[i].irq.funcs = &jpeg_v2_5_irq_funcs;
+ }
+}
+
+const struct amdgpu_ip_block_version jpeg_v2_5_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_JPEG,
+ .major = 2,
+ .minor = 5,
+ .rev = 0,
+ .funcs = &jpeg_v2_5_ip_funcs,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gp102.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.h
index fde6328c6d71..2b4087c02620 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gp102.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+ * Copyright 2019 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -14,17 +14,16 @@
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
*/
-#include "priv.h"
+#ifndef __JPEG_V2_5_H__
+#define __JPEG_V2_5_H__
+
+extern const struct amdgpu_ip_block_version jpeg_v2_5_ip_block;
-int
-gp102_nvdec_new(struct nvkm_device *device, int index,
- struct nvkm_nvdec **pnvdec)
-{
- return nvkm_nvdec_new_(device, index, pnvdec);
-}
+#endif /* __JPEG_V2_5_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index dc5ce03034d3..49a3a56ec017 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -21,15 +21,14 @@
*
*/
#include "amdgpu.h"
+#include "amdgpu_ras.h"
#include "mmhub_v1_0.h"
#include "mmhub/mmhub_1_0_offset.h"
#include "mmhub/mmhub_1_0_sh_mask.h"
#include "mmhub/mmhub_1_0_default.h"
-#include "athub/athub_1_0_offset.h"
-#include "athub/athub_1_0_sh_mask.h"
#include "vega10_enum.h"
-
+#include "soc15.h"
#include "soc15_common.h"
#define mmDAGB0_CNTL_MISC2_RV 0x008f
@@ -111,7 +110,7 @@ static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
- if (amdgpu_virt_support_skip_setting(adev))
+ if (amdgpu_sriov_vf(adev))
return;
/* Set default page address. */
@@ -159,7 +158,7 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
{
uint32_t tmp;
- if (amdgpu_virt_support_skip_setting(adev))
+ if (amdgpu_sriov_vf(adev))
return;
/* Setup L2 cache */
@@ -203,12 +202,14 @@ static void mmhub_v1_0_enable_system_domain(struct amdgpu_device *adev)
tmp = RREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_CNTL);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL,
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_CNTL, tmp);
}
static void mmhub_v1_0_disable_identity_aperture(struct amdgpu_device *adev)
{
- if (amdgpu_virt_support_skip_setting(adev))
+ if (amdgpu_sriov_vf(adev))
return;
WREG32_SOC15(MMHUB, 0, mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
@@ -348,7 +349,7 @@ void mmhub_v1_0_gart_disable(struct amdgpu_device *adev)
0);
WREG32_SOC15(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL, tmp);
- if (!amdgpu_virt_support_skip_setting(adev)) {
+ if (!amdgpu_sriov_vf(adev)) {
/* Setup L2 cache */
tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
@@ -367,7 +368,7 @@ void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
{
u32 tmp;
- if (amdgpu_virt_support_skip_setting(adev))
+ if (amdgpu_sriov_vf(adev))
return;
tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL);
@@ -407,7 +408,7 @@ void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
void mmhub_v1_0_init(struct amdgpu_device *adev)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
hub->ctx0_ptb_addr_lo32 =
SOC15_REG_OFFSET(MMHUB, 0,
@@ -415,6 +416,8 @@ void mmhub_v1_0_init(struct amdgpu_device *adev)
hub->ctx0_ptb_addr_hi32 =
SOC15_REG_OFFSET(MMHUB, 0,
mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
+ hub->vm_inv_eng0_sem =
+ SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_SEM);
hub->vm_inv_eng0_req =
SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_REQ);
hub->vm_inv_eng0_ack =
@@ -491,22 +494,6 @@ static void mmhub_v1_0_update_medium_grain_clock_gating(struct amdgpu_device *ad
WREG32_SOC15(MMHUB, 0, mmDAGB1_CNTL_MISC2, data2);
}
-static void athub_update_medium_grain_clock_gating(struct amdgpu_device *adev,
- bool enable)
-{
- uint32_t def, data;
-
- def = data = RREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL);
-
- if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
- data |= ATHUB_MISC_CNTL__CG_ENABLE_MASK;
- else
- data &= ~ATHUB_MISC_CNTL__CG_ENABLE_MASK;
-
- if (def != data)
- WREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL, data);
-}
-
static void mmhub_v1_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
bool enable)
{
@@ -523,23 +510,6 @@ static void mmhub_v1_0_update_medium_grain_light_sleep(struct amdgpu_device *ade
WREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG, data);
}
-static void athub_update_medium_grain_light_sleep(struct amdgpu_device *adev,
- bool enable)
-{
- uint32_t def, data;
-
- def = data = RREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL);
-
- if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) &&
- (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
- data |= ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
- else
- data &= ~ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
-
- if(def != data)
- WREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL, data);
-}
-
int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev,
enum amd_clockgating_state state)
{
@@ -551,14 +521,11 @@ int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev,
case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
+ case CHIP_RENOIR:
mmhub_v1_0_update_medium_grain_clock_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
- athub_update_medium_grain_clock_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
mmhub_v1_0_update_medium_grain_light_sleep(adev,
- state == AMD_CG_STATE_GATE ? true : false);
- athub_update_medium_grain_light_sleep(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
break;
default:
break;
@@ -569,18 +536,218 @@ int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev,
void mmhub_v1_0_get_clockgating(struct amdgpu_device *adev, u32 *flags)
{
- int data;
+ int data, data1;
if (amdgpu_sriov_vf(adev))
*flags = 0;
+ data = RREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG);
+
+ data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2);
+
/* AMD_CG_SUPPORT_MC_MGCG */
- data = RREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL);
- if (data & ATHUB_MISC_CNTL__CG_ENABLE_MASK)
+ if ((data & ATC_L2_MISC_CG__ENABLE_MASK) &&
+ !(data1 & (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK)))
*flags |= AMD_CG_SUPPORT_MC_MGCG;
/* AMD_CG_SUPPORT_MC_LS */
- data = RREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG);
if (data & ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK)
*flags |= AMD_CG_SUPPORT_MC_LS;
}
+
+static const struct soc15_ras_field_entry mmhub_v1_0_ras_fields[] = {
+ { "MMEA0_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA0_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA0_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA0_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, RRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, RRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA0_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, WRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, WRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA0_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA0_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA0_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, IORD_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA0_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, IOWR_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA0_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, IOWR_DATAMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA0_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA0_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA0_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA0_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA0_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA1_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA1_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA1_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA1_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, RRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, RRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA1_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, WRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, WRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA1_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA1_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA1_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, IORD_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA1_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, IOWR_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA1_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, IOWR_DATAMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA1_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA1_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA1_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA1_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA1_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ }
+};
+
+static const struct soc15_reg_entry mmhub_v1_0_edc_cnt_regs[] = {
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), 0, 0, 0},
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20), 0, 0, 0},
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), 0, 0, 0},
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20), 0, 0, 0},
+};
+
+static int mmhub_v1_0_get_ras_error_count(const struct soc15_reg_entry *reg,
+ uint32_t value, uint32_t *sec_count, uint32_t *ded_count)
+{
+ uint32_t i;
+ uint32_t sec_cnt, ded_cnt;
+
+ for (i = 0; i < ARRAY_SIZE(mmhub_v1_0_ras_fields); i++) {
+ if(mmhub_v1_0_ras_fields[i].reg_offset != reg->reg_offset)
+ continue;
+
+ sec_cnt = (value &
+ mmhub_v1_0_ras_fields[i].sec_count_mask) >>
+ mmhub_v1_0_ras_fields[i].sec_count_shift;
+ if (sec_cnt) {
+ DRM_INFO("MMHUB SubBlock %s, SEC %d\n",
+ mmhub_v1_0_ras_fields[i].name,
+ sec_cnt);
+ *sec_count += sec_cnt;
+ }
+
+ ded_cnt = (value &
+ mmhub_v1_0_ras_fields[i].ded_count_mask) >>
+ mmhub_v1_0_ras_fields[i].ded_count_shift;
+ if (ded_cnt) {
+ DRM_INFO("MMHUB SubBlock %s, DED %d\n",
+ mmhub_v1_0_ras_fields[i].name,
+ ded_cnt);
+ *ded_count += ded_cnt;
+ }
+ }
+
+ return 0;
+}
+
+static void mmhub_v1_0_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_error_status)
+{
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+ uint32_t sec_count = 0, ded_count = 0;
+ uint32_t i;
+ uint32_t reg_value;
+
+ err_data->ue_count = 0;
+ err_data->ce_count = 0;
+
+ for (i = 0; i < ARRAY_SIZE(mmhub_v1_0_edc_cnt_regs); i++) {
+ reg_value =
+ RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v1_0_edc_cnt_regs[i]));
+ if (reg_value)
+ mmhub_v1_0_get_ras_error_count(&mmhub_v1_0_edc_cnt_regs[i],
+ reg_value, &sec_count, &ded_count);
+ }
+
+ err_data->ce_count += sec_count;
+ err_data->ue_count += ded_count;
+}
+
+const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs = {
+ .ras_late_init = amdgpu_mmhub_ras_late_init,
+ .query_ras_error_count = mmhub_v1_0_query_ras_error_count,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
index 0de0fdf98c00..c43319e8f945 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
@@ -23,6 +23,8 @@
#ifndef __MMHUB_V1_0_H__
#define __MMHUB_V1_0_H__
+extern const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs;
+
u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev);
int mmhub_v1_0_gart_enable(struct amdgpu_device *adev);
void mmhub_v1_0_gart_disable(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
index 0f9549f19ade..bde189680521 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
@@ -31,20 +31,25 @@
#include "soc15_common.h"
-static void mmhub_v2_0_init_gart_pt_regs(struct amdgpu_device *adev)
+void mmhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+ uint64_t page_table_base)
{
- uint64_t value = amdgpu_gmc_pd_addr(adev->gart.bo);
+ /* two registers distance between mmMMVM_CONTEXT0_* to mmMMVM_CONTEXT1_* */
+ int offset = mmMMVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32
+ - mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
- WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
- lower_32_bits(value));
+ WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
+ offset * vmid, lower_32_bits(page_table_base));
- WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
- upper_32_bits(value));
+ WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
+ offset * vmid, upper_32_bits(page_table_base));
}
static void mmhub_v2_0_init_gart_aperture_regs(struct amdgpu_device *adev)
{
- mmhub_v2_0_init_gart_pt_regs(adev);
+ uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
+
+ mmhub_v2_0_setup_vm_pt_regs(adev, 0, pt_base);
WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
(u32)(adev->gmc.gart_start >> 12));
@@ -126,7 +131,7 @@ static void mmhub_v2_0_init_cache_regs(struct amdgpu_device *adev)
/* XXX for emulation, Refer to closed source code.*/
tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE,
0);
- tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 1);
+ tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0);
tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0);
WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL, tmp);
@@ -137,6 +142,15 @@ static void mmhub_v2_0_init_cache_regs(struct amdgpu_device *adev)
WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL2, tmp);
tmp = mmMMVM_L2_CNTL3_DEFAULT;
+ if (adev->gmc.translate_further) {
+ tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, BANK_SELECT, 12);
+ tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3,
+ L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
+ } else {
+ tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, BANK_SELECT, 9);
+ tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3,
+ L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
+ }
WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL3, tmp);
tmp = mmMMVM_L2_CNTL4_DEFAULT;
@@ -152,6 +166,8 @@ static void mmhub_v2_0_enable_system_domain(struct amdgpu_device *adev)
tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_CNTL);
tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
+ tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL,
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_CNTL, tmp);
}
@@ -324,7 +340,7 @@ void mmhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
void mmhub_v2_0_init(struct amdgpu_device *adev)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
hub->ctx0_ptb_addr_lo32 =
SOC15_REG_OFFSET(MMHUB, 0,
@@ -332,6 +348,8 @@ void mmhub_v2_0_init(struct amdgpu_device *adev)
hub->ctx0_ptb_addr_hi32 =
SOC15_REG_OFFSET(MMHUB, 0,
mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
+ hub->vm_inv_eng0_sem =
+ SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_SEM);
hub->vm_inv_eng0_req =
SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_REQ);
hub->vm_inv_eng0_ack =
@@ -406,10 +424,12 @@ int mmhub_v2_0_set_clockgating(struct amdgpu_device *adev,
switch (adev->asic_type) {
case CHIP_NAVI10:
+ case CHIP_NAVI14:
+ case CHIP_NAVI12:
mmhub_v2_0_update_medium_grain_clock_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
mmhub_v2_0_update_medium_grain_light_sleep(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.h
index db16f3ece218..3ea4344f0315 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.h
@@ -31,5 +31,7 @@ void mmhub_v2_0_init(struct amdgpu_device *adev);
int mmhub_v2_0_set_clockgating(struct amdgpu_device *adev,
enum amd_clockgating_state state);
void mmhub_v2_0_get_clockgating(struct amdgpu_device *adev, u32 *flags);
+void mmhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+ uint64_t page_table_base);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
new file mode 100644
index 000000000000..a5281df8d84f
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
@@ -0,0 +1,1602 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "amdgpu_ras.h"
+#include "mmhub_v9_4.h"
+
+#include "mmhub/mmhub_9_4_1_offset.h"
+#include "mmhub/mmhub_9_4_1_sh_mask.h"
+#include "mmhub/mmhub_9_4_1_default.h"
+#include "athub/athub_1_0_offset.h"
+#include "athub/athub_1_0_sh_mask.h"
+#include "vega10_enum.h"
+#include "soc15.h"
+#include "soc15_common.h"
+
+#define MMHUB_NUM_INSTANCES 2
+#define MMHUB_INSTANCE_REGISTER_OFFSET 0x3000
+
+u64 mmhub_v9_4_get_fb_location(struct amdgpu_device *adev)
+{
+ /* The base should be same b/t 2 mmhubs on Acrturus. Read one here. */
+ u64 base = RREG32_SOC15(MMHUB, 0, mmVMSHAREDVC0_MC_VM_FB_LOCATION_BASE);
+ u64 top = RREG32_SOC15(MMHUB, 0, mmVMSHAREDVC0_MC_VM_FB_LOCATION_TOP);
+
+ base &= VMSHAREDVC0_MC_VM_FB_LOCATION_BASE__FB_BASE_MASK;
+ base <<= 24;
+
+ top &= VMSHAREDVC0_MC_VM_FB_LOCATION_TOP__FB_TOP_MASK;
+ top <<= 24;
+
+ adev->gmc.fb_start = base;
+ adev->gmc.fb_end = top;
+
+ return base;
+}
+
+static void mmhub_v9_4_setup_hubid_vm_pt_regs(struct amdgpu_device *adev, int hubid,
+ uint32_t vmid, uint64_t value)
+{
+ /* two registers distance between mmVML2VC0_VM_CONTEXT0_* to
+ * mmVML2VC0_VM_CONTEXT1_*
+ */
+ int dist = mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32
+ - mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
+
+ WREG32_SOC15_OFFSET(MMHUB, 0,
+ mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
+ dist * vmid + hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
+ lower_32_bits(value));
+
+ WREG32_SOC15_OFFSET(MMHUB, 0,
+ mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
+ dist * vmid + hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
+ upper_32_bits(value));
+
+}
+
+static void mmhub_v9_4_init_gart_aperture_regs(struct amdgpu_device *adev,
+ int hubid)
+{
+ uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
+
+ mmhub_v9_4_setup_hubid_vm_pt_regs(adev, hubid, 0, pt_base);
+
+ WREG32_SOC15_OFFSET(MMHUB, 0,
+ mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
+ hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
+ (u32)(adev->gmc.gart_start >> 12));
+ WREG32_SOC15_OFFSET(MMHUB, 0,
+ mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
+ hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
+ (u32)(adev->gmc.gart_start >> 44));
+
+ WREG32_SOC15_OFFSET(MMHUB, 0,
+ mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
+ hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
+ (u32)(adev->gmc.gart_end >> 12));
+ WREG32_SOC15_OFFSET(MMHUB, 0,
+ mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
+ hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
+ (u32)(adev->gmc.gart_end >> 44));
+}
+
+void mmhub_v9_4_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+ uint64_t page_table_base)
+{
+ int i;
+
+ for (i = 0; i < MMHUB_NUM_INSTANCES; i++)
+ mmhub_v9_4_setup_hubid_vm_pt_regs(adev, i, vmid,
+ page_table_base);
+}
+
+static void mmhub_v9_4_init_system_aperture_regs(struct amdgpu_device *adev,
+ int hubid)
+{
+ uint64_t value;
+ uint32_t tmp;
+
+ /* Program the AGP BAR */
+ WREG32_SOC15_OFFSET(MMHUB, 0, mmVMSHAREDVC0_MC_VM_AGP_BASE,
+ hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
+ 0);
+ WREG32_SOC15_OFFSET(MMHUB, 0, mmVMSHAREDVC0_MC_VM_AGP_TOP,
+ hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
+ adev->gmc.agp_end >> 24);
+ WREG32_SOC15_OFFSET(MMHUB, 0, mmVMSHAREDVC0_MC_VM_AGP_BOT,
+ hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
+ adev->gmc.agp_start >> 24);
+
+ if (!amdgpu_sriov_vf(adev)) {
+ /* Program the system aperture low logical page number. */
+ WREG32_SOC15_OFFSET(
+ MMHUB, 0, mmVMSHAREDVC0_MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
+ min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
+ WREG32_SOC15_OFFSET(
+ MMHUB, 0, mmVMSHAREDVC0_MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
+ max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
+
+ /* Set default page address. */
+ value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
+ adev->vm_manager.vram_base_offset;
+ WREG32_SOC15_OFFSET(
+ MMHUB, 0,
+ mmVMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
+ hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
+ (u32)(value >> 12));
+ WREG32_SOC15_OFFSET(
+ MMHUB, 0,
+ mmVMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
+ hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
+ (u32)(value >> 44));
+
+ /* Program "protection fault". */
+ WREG32_SOC15_OFFSET(
+ MMHUB, 0,
+ mmVML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
+ hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
+ (u32)(adev->dummy_page_addr >> 12));
+ WREG32_SOC15_OFFSET(
+ MMHUB, 0,
+ mmVML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
+ hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
+ (u32)((u64)adev->dummy_page_addr >> 44));
+
+ tmp = RREG32_SOC15_OFFSET(
+ MMHUB, 0, mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL2,
+ hubid * MMHUB_INSTANCE_REGISTER_OFFSET);
+ tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL2,
+ ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
+ WREG32_SOC15_OFFSET(MMHUB, 0,
+ mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL2,
+ hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
+ tmp);
+ }
+}
+
+static void mmhub_v9_4_init_tlb_regs(struct amdgpu_device *adev, int hubid)
+{
+ uint32_t tmp;
+
+ /* Setup TLB control */
+ tmp = RREG32_SOC15_OFFSET(MMHUB, 0,
+ mmVMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
+ hubid * MMHUB_INSTANCE_REGISTER_OFFSET);
+
+ tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
+ ENABLE_L1_TLB, 1);
+ tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
+ SYSTEM_ACCESS_MODE, 3);
+ tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
+ ENABLE_ADVANCED_DRIVER_MODEL, 1);
+ tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
+ SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
+ tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
+ ECO_BITS, 0);
+ tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
+ MTYPE, MTYPE_UC);/* XXX for emulation. */
+ tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
+ ATC_EN, 1);
+
+ WREG32_SOC15_OFFSET(MMHUB, 0, mmVMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
+ hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
+}
+
+static void mmhub_v9_4_init_cache_regs(struct amdgpu_device *adev, int hubid)
+{
+ uint32_t tmp;
+
+ /* Setup L2 cache */
+ tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL,
+ hubid * MMHUB_INSTANCE_REGISTER_OFFSET);
+ tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL,
+ ENABLE_L2_CACHE, 1);
+ tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL,
+ ENABLE_L2_FRAGMENT_PROCESSING, 1);
+ /* XXX for emulation, Refer to closed source code.*/
+ tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL,
+ L2_PDE0_CACHE_TAG_GENERATION_MODE, 0);
+ tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL,
+ PDE_FAULT_CLASSIFICATION, 0);
+ tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL,
+ CONTEXT1_IDENTITY_ACCESS_MODE, 1);
+ tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL,
+ IDENTITY_MODE_FRAGMENT_SIZE, 0);
+ WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL,
+ hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
+
+ tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL2,
+ hubid * MMHUB_INSTANCE_REGISTER_OFFSET);
+ tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL2,
+ INVALIDATE_ALL_L1_TLBS, 1);
+ tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL2,
+ INVALIDATE_L2_CACHE, 1);
+ WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL2,
+ hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
+
+ tmp = mmVML2PF0_VM_L2_CNTL3_DEFAULT;
+ if (adev->gmc.translate_further) {
+ tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL3, BANK_SELECT, 12);
+ tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL3,
+ L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
+ } else {
+ tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL3, BANK_SELECT, 9);
+ tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL3,
+ L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
+ }
+ WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL3,
+ hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
+
+ tmp = mmVML2PF0_VM_L2_CNTL4_DEFAULT;
+ tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL4,
+ VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
+ tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL4,
+ VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
+ WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL4,
+ hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
+}
+
+static void mmhub_v9_4_enable_system_domain(struct amdgpu_device *adev,
+ int hubid)
+{
+ uint32_t tmp;
+
+ tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT0_CNTL,
+ hubid * MMHUB_INSTANCE_REGISTER_OFFSET);
+ tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
+ tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
+ tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT0_CNTL,
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
+ WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT0_CNTL,
+ hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
+}
+
+static void mmhub_v9_4_disable_identity_aperture(struct amdgpu_device *adev,
+ int hubid)
+{
+ WREG32_SOC15_OFFSET(MMHUB, 0,
+ mmVML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
+ hubid * MMHUB_INSTANCE_REGISTER_OFFSET, 0XFFFFFFFF);
+ WREG32_SOC15_OFFSET(MMHUB, 0,
+ mmVML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
+ hubid * MMHUB_INSTANCE_REGISTER_OFFSET, 0x0000000F);
+
+ WREG32_SOC15_OFFSET(MMHUB, 0,
+ mmVML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32,
+ hubid * MMHUB_INSTANCE_REGISTER_OFFSET, 0);
+ WREG32_SOC15_OFFSET(MMHUB, 0,
+ mmVML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32,
+ hubid * MMHUB_INSTANCE_REGISTER_OFFSET, 0);
+
+ WREG32_SOC15_OFFSET(MMHUB, 0,
+ mmVML2PF0_VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32,
+ hubid * MMHUB_INSTANCE_REGISTER_OFFSET, 0);
+ WREG32_SOC15_OFFSET(MMHUB, 0,
+ mmVML2PF0_VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32,
+ hubid * MMHUB_INSTANCE_REGISTER_OFFSET, 0);
+}
+
+static void mmhub_v9_4_setup_vmid_config(struct amdgpu_device *adev, int hubid)
+{
+ uint32_t tmp;
+ int i;
+
+ for (i = 0; i <= 14; i++) {
+ tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT1_CNTL,
+ hubid * MMHUB_INSTANCE_REGISTER_OFFSET + i);
+ tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
+ ENABLE_CONTEXT, 1);
+ tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
+ PAGE_TABLE_DEPTH,
+ adev->vm_manager.num_level);
+ tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
+ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
+ DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT,
+ 1);
+ tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
+ PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
+ VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
+ READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
+ WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
+ EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
+ PAGE_TABLE_BLOCK_SIZE,
+ adev->vm_manager.block_size - 9);
+ /* Send no-retry XNACK on fault to suppress VM fault storm. */
+ tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
+ !amdgpu_noretry);
+ WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT1_CNTL,
+ hubid * MMHUB_INSTANCE_REGISTER_OFFSET + i,
+ tmp);
+ WREG32_SOC15_OFFSET(MMHUB, 0,
+ mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
+ hubid * MMHUB_INSTANCE_REGISTER_OFFSET + i*2, 0);
+ WREG32_SOC15_OFFSET(MMHUB, 0,
+ mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,
+ hubid * MMHUB_INSTANCE_REGISTER_OFFSET + i*2, 0);
+ WREG32_SOC15_OFFSET(MMHUB, 0,
+ mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,
+ hubid * MMHUB_INSTANCE_REGISTER_OFFSET + i*2,
+ lower_32_bits(adev->vm_manager.max_pfn - 1));
+ WREG32_SOC15_OFFSET(MMHUB, 0,
+ mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,
+ hubid * MMHUB_INSTANCE_REGISTER_OFFSET + i*2,
+ upper_32_bits(adev->vm_manager.max_pfn - 1));
+ }
+}
+
+static void mmhub_v9_4_program_invalidation(struct amdgpu_device *adev,
+ int hubid)
+{
+ unsigned i;
+
+ for (i = 0; i < 18; ++i) {
+ WREG32_SOC15_OFFSET(MMHUB, 0,
+ mmVML2VC0_VM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
+ hubid * MMHUB_INSTANCE_REGISTER_OFFSET + 2 * i,
+ 0xffffffff);
+ WREG32_SOC15_OFFSET(MMHUB, 0,
+ mmVML2VC0_VM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
+ hubid * MMHUB_INSTANCE_REGISTER_OFFSET + 2 * i,
+ 0x1f);
+ }
+}
+
+int mmhub_v9_4_gart_enable(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < MMHUB_NUM_INSTANCES; i++) {
+ /* GART Enable. */
+ mmhub_v9_4_init_gart_aperture_regs(adev, i);
+ mmhub_v9_4_init_system_aperture_regs(adev, i);
+ mmhub_v9_4_init_tlb_regs(adev, i);
+ if (!amdgpu_sriov_vf(adev))
+ mmhub_v9_4_init_cache_regs(adev, i);
+
+ mmhub_v9_4_enable_system_domain(adev, i);
+ if (!amdgpu_sriov_vf(adev))
+ mmhub_v9_4_disable_identity_aperture(adev, i);
+ mmhub_v9_4_setup_vmid_config(adev, i);
+ mmhub_v9_4_program_invalidation(adev, i);
+ }
+
+ return 0;
+}
+
+void mmhub_v9_4_gart_disable(struct amdgpu_device *adev)
+{
+ u32 tmp;
+ u32 i, j;
+
+ for (j = 0; j < MMHUB_NUM_INSTANCES; j++) {
+ /* Disable all tables */
+ for (i = 0; i < 16; i++)
+ WREG32_SOC15_OFFSET(MMHUB, 0,
+ mmVML2VC0_VM_CONTEXT0_CNTL,
+ j * MMHUB_INSTANCE_REGISTER_OFFSET +
+ i, 0);
+
+ /* Setup TLB control */
+ tmp = RREG32_SOC15_OFFSET(MMHUB, 0,
+ mmVMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
+ j * MMHUB_INSTANCE_REGISTER_OFFSET);
+ tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
+ ENABLE_L1_TLB, 0);
+ tmp = REG_SET_FIELD(tmp,
+ VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
+ ENABLE_ADVANCED_DRIVER_MODEL, 0);
+ WREG32_SOC15_OFFSET(MMHUB, 0,
+ mmVMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
+ j * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
+
+ /* Setup L2 cache */
+ tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL,
+ j * MMHUB_INSTANCE_REGISTER_OFFSET);
+ tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL,
+ ENABLE_L2_CACHE, 0);
+ WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL,
+ j * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
+ WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL3,
+ j * MMHUB_INSTANCE_REGISTER_OFFSET, 0);
+ }
+}
+
+/**
+ * mmhub_v1_0_set_fault_enable_default - update GART/VM fault handling
+ *
+ * @adev: amdgpu_device pointer
+ * @value: true redirects VM faults to the default page
+ */
+void mmhub_v9_4_set_fault_enable_default(struct amdgpu_device *adev, bool value)
+{
+ u32 tmp;
+ int i;
+
+ for (i = 0; i < MMHUB_NUM_INSTANCES; i++) {
+ tmp = RREG32_SOC15_OFFSET(MMHUB, 0,
+ mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
+ i * MMHUB_INSTANCE_REGISTER_OFFSET);
+ tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
+ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT,
+ value);
+ tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
+ PDE0_PROTECTION_FAULT_ENABLE_DEFAULT,
+ value);
+ tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
+ PDE1_PROTECTION_FAULT_ENABLE_DEFAULT,
+ value);
+ tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
+ PDE2_PROTECTION_FAULT_ENABLE_DEFAULT,
+ value);
+ tmp = REG_SET_FIELD(tmp,
+ VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
+ TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
+ value);
+ tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
+ NACK_PROTECTION_FAULT_ENABLE_DEFAULT,
+ value);
+ tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
+ DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT,
+ value);
+ tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
+ VALID_PROTECTION_FAULT_ENABLE_DEFAULT,
+ value);
+ tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
+ READ_PROTECTION_FAULT_ENABLE_DEFAULT,
+ value);
+ tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
+ WRITE_PROTECTION_FAULT_ENABLE_DEFAULT,
+ value);
+ tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
+ EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT,
+ value);
+ if (!value) {
+ tmp = REG_SET_FIELD(tmp,
+ VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
+ CRASH_ON_NO_RETRY_FAULT, 1);
+ tmp = REG_SET_FIELD(tmp,
+ VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
+ CRASH_ON_RETRY_FAULT, 1);
+ }
+
+ WREG32_SOC15_OFFSET(MMHUB, 0,
+ mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
+ i * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
+ }
+}
+
+void mmhub_v9_4_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_vmhub *hub[MMHUB_NUM_INSTANCES] =
+ {&adev->vmhub[AMDGPU_MMHUB_0], &adev->vmhub[AMDGPU_MMHUB_1]};
+ int i;
+
+ for (i = 0; i < MMHUB_NUM_INSTANCES; i++) {
+ hub[i]->ctx0_ptb_addr_lo32 =
+ SOC15_REG_OFFSET(MMHUB, 0,
+ mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32) +
+ i * MMHUB_INSTANCE_REGISTER_OFFSET;
+ hub[i]->ctx0_ptb_addr_hi32 =
+ SOC15_REG_OFFSET(MMHUB, 0,
+ mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32) +
+ i * MMHUB_INSTANCE_REGISTER_OFFSET;
+ hub[i]->vm_inv_eng0_sem =
+ SOC15_REG_OFFSET(MMHUB, 0,
+ mmVML2VC0_VM_INVALIDATE_ENG0_SEM) +
+ i * MMHUB_INSTANCE_REGISTER_OFFSET;
+ hub[i]->vm_inv_eng0_req =
+ SOC15_REG_OFFSET(MMHUB, 0,
+ mmVML2VC0_VM_INVALIDATE_ENG0_REQ) +
+ i * MMHUB_INSTANCE_REGISTER_OFFSET;
+ hub[i]->vm_inv_eng0_ack =
+ SOC15_REG_OFFSET(MMHUB, 0,
+ mmVML2VC0_VM_INVALIDATE_ENG0_ACK) +
+ i * MMHUB_INSTANCE_REGISTER_OFFSET;
+ hub[i]->vm_context0_cntl =
+ SOC15_REG_OFFSET(MMHUB, 0,
+ mmVML2VC0_VM_CONTEXT0_CNTL) +
+ i * MMHUB_INSTANCE_REGISTER_OFFSET;
+ hub[i]->vm_l2_pro_fault_status =
+ SOC15_REG_OFFSET(MMHUB, 0,
+ mmVML2PF0_VM_L2_PROTECTION_FAULT_STATUS) +
+ i * MMHUB_INSTANCE_REGISTER_OFFSET;
+ hub[i]->vm_l2_pro_fault_cntl =
+ SOC15_REG_OFFSET(MMHUB, 0,
+ mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL) +
+ i * MMHUB_INSTANCE_REGISTER_OFFSET;
+ }
+}
+
+static void mmhub_v9_4_update_medium_grain_clock_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t def, data, def1, data1;
+ int i, j;
+ int dist = mmDAGB1_CNTL_MISC2 - mmDAGB0_CNTL_MISC2;
+
+ for (i = 0; i < MMHUB_NUM_INSTANCES; i++) {
+ def = data = RREG32_SOC15_OFFSET(MMHUB, 0,
+ mmATCL2_0_ATC_L2_MISC_CG,
+ i * MMHUB_INSTANCE_REGISTER_OFFSET);
+
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
+ data |= ATCL2_0_ATC_L2_MISC_CG__ENABLE_MASK;
+ else
+ data &= ~ATCL2_0_ATC_L2_MISC_CG__ENABLE_MASK;
+
+ if (def != data)
+ WREG32_SOC15_OFFSET(MMHUB, 0, mmATCL2_0_ATC_L2_MISC_CG,
+ i * MMHUB_INSTANCE_REGISTER_OFFSET, data);
+
+ for (j = 0; j < 5; j++) {
+ def1 = data1 = RREG32_SOC15_OFFSET(MMHUB, 0,
+ mmDAGB0_CNTL_MISC2,
+ i * MMHUB_INSTANCE_REGISTER_OFFSET +
+ j * dist);
+ if (enable &&
+ (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) {
+ data1 &=
+ ~(DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
+ } else {
+ data1 |=
+ (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
+ }
+
+ if (def1 != data1)
+ WREG32_SOC15_OFFSET(MMHUB, 0,
+ mmDAGB0_CNTL_MISC2,
+ i * MMHUB_INSTANCE_REGISTER_OFFSET +
+ j * dist, data1);
+
+ if (i == 1 && j == 3)
+ break;
+ }
+ }
+}
+
+static void mmhub_v9_4_update_medium_grain_light_sleep(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t def, data;
+ int i;
+
+ for (i = 0; i < MMHUB_NUM_INSTANCES; i++) {
+ def = data = RREG32_SOC15_OFFSET(MMHUB, 0,
+ mmATCL2_0_ATC_L2_MISC_CG,
+ i * MMHUB_INSTANCE_REGISTER_OFFSET);
+
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
+ data |= ATCL2_0_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
+ else
+ data &= ~ATCL2_0_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
+
+ if (def != data)
+ WREG32_SOC15_OFFSET(MMHUB, 0, mmATCL2_0_ATC_L2_MISC_CG,
+ i * MMHUB_INSTANCE_REGISTER_OFFSET, data);
+ }
+}
+
+int mmhub_v9_4_set_clockgating(struct amdgpu_device *adev,
+ enum amd_clockgating_state state)
+{
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ switch (adev->asic_type) {
+ case CHIP_ARCTURUS:
+ mmhub_v9_4_update_medium_grain_clock_gating(adev,
+ state == AMD_CG_STATE_GATE);
+ mmhub_v9_4_update_medium_grain_light_sleep(adev,
+ state == AMD_CG_STATE_GATE);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+void mmhub_v9_4_get_clockgating(struct amdgpu_device *adev, u32 *flags)
+{
+ int data, data1;
+
+ if (amdgpu_sriov_vf(adev))
+ *flags = 0;
+
+ /* AMD_CG_SUPPORT_MC_MGCG */
+ data = RREG32_SOC15(MMHUB, 0, mmATCL2_0_ATC_L2_MISC_CG);
+
+ data1 = RREG32_SOC15(MMHUB, 0, mmATCL2_0_ATC_L2_MISC_CG);
+
+ if ((data & ATCL2_0_ATC_L2_MISC_CG__ENABLE_MASK) &&
+ !(data1 & (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK)))
+ *flags |= AMD_CG_SUPPORT_MC_MGCG;
+
+ /* AMD_CG_SUPPORT_MC_LS */
+ if (data & ATCL2_0_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK)
+ *flags |= AMD_CG_SUPPORT_MC_LS;
+}
+
+static const struct soc15_ras_field_entry mmhub_v9_4_ras_fields[] = {
+ /* MMHUB Range 0 */
+ { "MMEA0_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA0_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA0_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA0_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT, RRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA0_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT, WRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA0_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA0_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA0_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT, IORD_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA0_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA0_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA0_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA0_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA0_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA0_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA0_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA0_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA0_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA0_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA0_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA0_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA0_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA0_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA0_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA0_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA0_EDC_CNT3, IOWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA0_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA0_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA0_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA0_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA0_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D0MEM_DED_COUNT),
+ },
+ { "MMEA0_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D1MEM_DED_COUNT),
+ },
+ { "MMEA0_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D2MEM_DED_COUNT),
+ },
+ { "MMEA0_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D3MEM_DED_COUNT),
+ },
+
+ /* MMHUB Range 1 */
+ { "MMEA1_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA1_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA1_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA1_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT, RRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA1_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT, WRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA1_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA1_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA1_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT, IORD_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA1_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA1_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA1_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA1_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA1_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA1_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA1_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA1_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA1_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA1_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA1_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA1_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA1_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA1_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA1_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA1_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA1_EDC_CNT3, IOWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA1_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA1_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA1_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA1_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA1_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D0MEM_DED_COUNT),
+ },
+ { "MMEA1_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D1MEM_DED_COUNT),
+ },
+ { "MMEA1_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D2MEM_DED_COUNT),
+ },
+ { "MMEA1_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D3MEM_DED_COUNT),
+ },
+
+ /* MMHAB Range 2*/
+ { "MMEA2_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA2_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA2_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA2_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, RRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA2_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, WRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA2_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA2_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA2_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, IORD_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA2_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA2_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA2_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA2_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA2_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA2_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA2_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA2_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA2_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA2_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA2_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA2_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA2_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA2_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA2_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA2_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA2_EDC_CNT3, IOWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA2_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA2_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA2_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA2_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA2_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D0MEM_DED_COUNT),
+ },
+ { "MMEA2_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D1MEM_DED_COUNT),
+ },
+ { "MMEA2_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D2MEM_DED_COUNT),
+ },
+ { "MMEA2_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D3MEM_DED_COUNT),
+ },
+
+ /* MMHUB Rang 3 */
+ { "MMEA3_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA3_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA3_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA3_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, RRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA3_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, WRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA3_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA3_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA3_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, IORD_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA3_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA3_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA3_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA3_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA3_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA3_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA3_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA3_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA3_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA3_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA3_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA3_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA3_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA3_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA3_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA3_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA3_EDC_CNT3, IOWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA3_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA3_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA3_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA3_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA3_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D0MEM_DED_COUNT),
+ },
+ { "MMEA3_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D1MEM_DED_COUNT),
+ },
+ { "MMEA3_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D2MEM_DED_COUNT),
+ },
+ { "MMEA3_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D3MEM_DED_COUNT),
+ },
+
+ /* MMHUB Range 4 */
+ { "MMEA4_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA4_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA4_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA4_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, RRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA4_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, WRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA4_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA4_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA4_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, IORD_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA4_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA4_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA4_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA4_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA4_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA4_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA4_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA4_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA4_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA4_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA4_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA4_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA4_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA4_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA4_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA4_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA4_EDC_CNT3, IOWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA4_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA4_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA4_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA4_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA4_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D0MEM_DED_COUNT),
+ },
+ { "MMEA4_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D1MEM_DED_COUNT),
+ },
+ { "MMEA4_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D2MEM_DED_COUNT),
+ },
+ { "MMEA4_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D3MEM_DED_COUNT),
+ },
+
+ /* MMHUAB Range 5 */
+ { "MMEA5_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA5_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA5_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA5_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, RRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA5_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, WRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA5_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA5_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA5_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, IORD_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA5_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA5_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA5_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA5_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA5_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA5_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA5_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA5_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA5_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA5_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA5_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA5_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA5_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA5_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA5_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA5_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA5_EDC_CNT3, IOWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA5_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA5_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA5_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA5_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA5_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D0MEM_DED_COUNT),
+ },
+ { "MMEA5_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D1MEM_DED_COUNT),
+ },
+ { "MMEA5_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D2MEM_DED_COUNT),
+ },
+ { "MMEA5_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D3MEM_DED_COUNT),
+ },
+
+ /* MMHUB Range 6 */
+ { "MMEA6_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA6_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA6_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA6_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT, RRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA6_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT, WRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA6_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA6_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA6_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT, IORD_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA6_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA6_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA6_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA6_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA6_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA6_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA6_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA6_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA6_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA6_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA6_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA6_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA6_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA6_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA6_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA6_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA6_EDC_CNT3, IOWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA6_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA6_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA6_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA6_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA6_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D0MEM_DED_COUNT),
+ },
+ { "MMEA6_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D1MEM_DED_COUNT),
+ },
+ { "MMEA6_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D2MEM_DED_COUNT),
+ },
+ { "MMEA6_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D3MEM_DED_COUNT),
+ },
+
+ /* MMHUB Range 7*/
+ { "MMEA7_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA7_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA7_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA7_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT, RRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA7_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT, WRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA7_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA7_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA7_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT, IORD_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA7_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA7_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA7_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA7_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA7_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA7_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA7_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA7_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA7_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA7_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA7_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA7_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA7_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA7_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA7_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA7_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA7_EDC_CNT3, IOWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA7_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA7_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA7_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA7_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA7_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D0MEM_DED_COUNT),
+ },
+ { "MMEA7_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D1MEM_DED_COUNT),
+ },
+ { "MMEA7_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D2MEM_DED_COUNT),
+ },
+ { "MMEA7_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D3MEM_DED_COUNT),
+ }
+};
+
+static const struct soc15_reg_entry mmhub_v9_4_edc_cnt_regs[] = {
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3), 0, 0, 0 },
+};
+
+static int mmhub_v9_4_get_ras_error_count(const struct soc15_reg_entry *reg,
+ uint32_t value, uint32_t *sec_count, uint32_t *ded_count)
+{
+ uint32_t i;
+ uint32_t sec_cnt, ded_cnt;
+
+ for (i = 0; i < ARRAY_SIZE(mmhub_v9_4_ras_fields); i++) {
+ if(mmhub_v9_4_ras_fields[i].reg_offset != reg->reg_offset)
+ continue;
+
+ sec_cnt = (value &
+ mmhub_v9_4_ras_fields[i].sec_count_mask) >>
+ mmhub_v9_4_ras_fields[i].sec_count_shift;
+ if (sec_cnt) {
+ DRM_INFO("MMHUB SubBlock %s, SEC %d\n",
+ mmhub_v9_4_ras_fields[i].name,
+ sec_cnt);
+ *sec_count += sec_cnt;
+ }
+
+ ded_cnt = (value &
+ mmhub_v9_4_ras_fields[i].ded_count_mask) >>
+ mmhub_v9_4_ras_fields[i].ded_count_shift;
+ if (ded_cnt) {
+ DRM_INFO("MMHUB SubBlock %s, DED %d\n",
+ mmhub_v9_4_ras_fields[i].name,
+ ded_cnt);
+ *ded_count += ded_cnt;
+ }
+ }
+
+ return 0;
+}
+
+static void mmhub_v9_4_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_error_status)
+{
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+ uint32_t sec_count = 0, ded_count = 0;
+ uint32_t i;
+ uint32_t reg_value;
+
+ err_data->ue_count = 0;
+ err_data->ce_count = 0;
+
+ for (i = 0; i < ARRAY_SIZE(mmhub_v9_4_edc_cnt_regs); i++) {
+ reg_value =
+ RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v9_4_edc_cnt_regs[i]));
+ if (reg_value)
+ mmhub_v9_4_get_ras_error_count(&mmhub_v9_4_edc_cnt_regs[i],
+ reg_value, &sec_count, &ded_count);
+ }
+
+ err_data->ce_count += sec_count;
+ err_data->ue_count += ded_count;
+}
+
+const struct amdgpu_mmhub_funcs mmhub_v9_4_funcs = {
+ .ras_late_init = amdgpu_mmhub_ras_late_init,
+ .query_ras_error_count = mmhub_v9_4_query_ras_error_count,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h
new file mode 100644
index 000000000000..1b979773776c
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __MMHUB_V9_4_H__
+#define __MMHUB_V9_4_H__
+
+extern const struct amdgpu_mmhub_funcs mmhub_v9_4_funcs;
+
+u64 mmhub_v9_4_get_fb_location(struct amdgpu_device *adev);
+int mmhub_v9_4_gart_enable(struct amdgpu_device *adev);
+void mmhub_v9_4_gart_disable(struct amdgpu_device *adev);
+void mmhub_v9_4_set_fault_enable_default(struct amdgpu_device *adev,
+ bool value);
+void mmhub_v9_4_init(struct amdgpu_device *adev);
+int mmhub_v9_4_set_clockgating(struct amdgpu_device *adev,
+ enum amd_clockgating_state state);
+void mmhub_v9_4_get_clockgating(struct amdgpu_device *adev, u32 *flags);
+void mmhub_v9_4_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+ uint64_t page_table_base);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/mmsch_v1_0.h b/drivers/gpu/drm/amd/amdgpu/mmsch_v1_0.h
index 8af0bddf85e4..20958639b601 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmsch_v1_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/mmsch_v1_0.h
@@ -47,6 +47,18 @@ struct mmsch_v1_0_init_header {
uint32_t uvd_table_size;
};
+struct mmsch_vf_eng_init_header {
+ uint32_t init_status;
+ uint32_t table_offset;
+ uint32_t table_size;
+};
+
+struct mmsch_v1_1_init_header {
+ uint32_t version;
+ uint32_t total_size;
+ struct mmsch_vf_eng_init_header eng[2];
+};
+
struct mmsch_v1_0_cmd_direct_reg_header {
uint32_t reg_offset : 28;
uint32_t command_type : 4;
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 235548c0b41f..5fd67e1cc2a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -158,82 +158,6 @@ static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev,
xgpu_ai_mailbox_set_valid(adev, false);
}
-static int xgpu_ai_get_pp_clk(struct amdgpu_device *adev, u32 type, char *buf)
-{
- int r = 0;
- u32 req, val, size;
-
- if (!amdgim_is_hwperf(adev) || buf == NULL)
- return -EBADRQC;
-
- switch(type) {
- case PP_SCLK:
- req = IDH_IRQ_GET_PP_SCLK;
- break;
- case PP_MCLK:
- req = IDH_IRQ_GET_PP_MCLK;
- break;
- default:
- return -EBADRQC;
- }
-
- mutex_lock(&adev->virt.dpm_mutex);
-
- xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0);
-
- r = xgpu_ai_poll_msg(adev, IDH_SUCCESS);
- if (!r && adev->fw_vram_usage.va != NULL) {
- val = RREG32_NO_KIQ(
- SOC15_REG_OFFSET(NBIO, 0,
- mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW1));
- size = strnlen((((char *)adev->virt.fw_reserve.p_pf2vf) +
- val), PAGE_SIZE);
-
- if (size < PAGE_SIZE)
- strcpy(buf,((char *)adev->virt.fw_reserve.p_pf2vf + val));
- else
- size = 0;
-
- r = size;
- goto out;
- }
-
- r = xgpu_ai_poll_msg(adev, IDH_FAIL);
- if(r)
- pr_info("%s DPM request failed",
- (type == PP_SCLK)? "SCLK" : "MCLK");
-
-out:
- mutex_unlock(&adev->virt.dpm_mutex);
- return r;
-}
-
-static int xgpu_ai_force_dpm_level(struct amdgpu_device *adev, u32 level)
-{
- int r = 0;
- u32 req = IDH_IRQ_FORCE_DPM_LEVEL;
-
- if (!amdgim_is_hwperf(adev))
- return -EBADRQC;
-
- mutex_lock(&adev->virt.dpm_mutex);
- xgpu_ai_mailbox_trans_msg(adev, req, level, 0, 0);
-
- r = xgpu_ai_poll_msg(adev, IDH_SUCCESS);
- if (!r)
- goto out;
-
- r = xgpu_ai_poll_msg(adev, IDH_FAIL);
- if (!r)
- pr_info("DPM request failed");
- else
- pr_info("Mailbox is broken");
-
-out:
- mutex_unlock(&adev->virt.dpm_mutex);
- return r;
-}
-
static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
enum idh_request req)
{
@@ -326,7 +250,7 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
*/
locked = mutex_trylock(&adev->lock_reset);
if (locked)
- adev->in_gpu_reset = 1;
+ adev->in_gpu_reset = true;
do {
if (xgpu_ai_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
@@ -338,7 +262,7 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
flr_done:
if (locked) {
- adev->in_gpu_reset = 0;
+ adev->in_gpu_reset = false;
mutex_unlock(&adev->lock_reset);
}
@@ -449,27 +373,10 @@ void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev)
amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
}
-static void xgpu_ai_init_reg_access_mode(struct amdgpu_device *adev)
-{
- adev->virt.reg_access_mode = AMDGPU_VIRT_REG_ACCESS_LEGACY;
-
- /* Enable L1 security reg access mode by defaul, as non-security VF
- * will no longer be supported.
- */
- adev->virt.reg_access_mode |= AMDGPU_VIRT_REG_ACCESS_RLC;
-
- adev->virt.reg_access_mode |= AMDGPU_VIRT_REG_ACCESS_PSP_PRG_IH;
-
- adev->virt.reg_access_mode |= AMDGPU_VIRT_REG_SKIP_SEETING;
-}
-
const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
.req_full_gpu = xgpu_ai_request_full_gpu_access,
.rel_full_gpu = xgpu_ai_release_full_gpu_access,
.reset_gpu = xgpu_ai_request_reset,
.wait_reset = NULL,
.trans_msg = xgpu_ai_mailbox_trans_msg,
- .get_pp_clk = xgpu_ai_get_pp_clk,
- .force_dpm_level = xgpu_ai_force_dpm_level,
- .init_reg_access_mode = xgpu_ai_init_reg_access_mode,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
index 077e91a33d62..37dbe0f2142f 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
@@ -35,10 +35,6 @@ enum idh_request {
IDH_REL_GPU_FINI_ACCESS,
IDH_REQ_GPU_RESET_ACCESS,
- IDH_IRQ_FORCE_DPM_LEVEL = 10,
- IDH_IRQ_GET_PP_SCLK,
- IDH_IRQ_GET_PP_MCLK,
-
IDH_LOG_VF_ERROR = 200,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
new file mode 100644
index 000000000000..237fa5e16b7c
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
@@ -0,0 +1,384 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu.h"
+#include "nbio/nbio_2_3_offset.h"
+#include "nbio/nbio_2_3_sh_mask.h"
+#include "gc/gc_10_1_0_offset.h"
+#include "gc/gc_10_1_0_sh_mask.h"
+#include "soc15.h"
+#include "navi10_ih.h"
+#include "soc15_common.h"
+#include "mxgpu_nv.h"
+#include "mxgpu_ai.h"
+
+static void xgpu_nv_mailbox_send_ack(struct amdgpu_device *adev)
+{
+ WREG8(NV_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2);
+}
+
+static void xgpu_nv_mailbox_set_valid(struct amdgpu_device *adev, bool val)
+{
+ WREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE, val ? 1 : 0);
+}
+
+/*
+ * this peek_msg could *only* be called in IRQ routine becuase in IRQ routine
+ * RCV_MSG_VALID filed of BIF_BX_PF_MAILBOX_CONTROL must already be set to 1
+ * by host.
+ *
+ * if called no in IRQ routine, this peek_msg cannot guaranteed to return the
+ * correct value since it doesn't return the RCV_DW0 under the case that
+ * RCV_MSG_VALID is set by host.
+ */
+static enum idh_event xgpu_nv_mailbox_peek_msg(struct amdgpu_device *adev)
+{
+ return RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
+ mmBIF_BX_PF_MAILBOX_MSGBUF_RCV_DW0));
+}
+
+
+static int xgpu_nv_mailbox_rcv_msg(struct amdgpu_device *adev,
+ enum idh_event event)
+{
+ u32 reg;
+
+ reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
+ mmBIF_BX_PF_MAILBOX_MSGBUF_RCV_DW0));
+ if (reg != event)
+ return -ENOENT;
+
+ xgpu_nv_mailbox_send_ack(adev);
+
+ return 0;
+}
+
+static uint8_t xgpu_nv_peek_ack(struct amdgpu_device *adev)
+{
+ return RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE) & 2;
+}
+
+static int xgpu_nv_poll_ack(struct amdgpu_device *adev)
+{
+ int timeout = NV_MAILBOX_POLL_ACK_TIMEDOUT;
+ u8 reg;
+
+ do {
+ reg = RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE);
+ if (reg & 2)
+ return 0;
+
+ mdelay(5);
+ timeout -= 5;
+ } while (timeout > 1);
+
+ pr_err("Doesn't get TRN_MSG_ACK from pf in %d msec\n", NV_MAILBOX_POLL_ACK_TIMEDOUT);
+
+ return -ETIME;
+}
+
+static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
+{
+ int r, timeout = NV_MAILBOX_POLL_MSG_TIMEDOUT;
+
+ do {
+ r = xgpu_nv_mailbox_rcv_msg(adev, event);
+ if (!r)
+ return 0;
+
+ msleep(10);
+ timeout -= 10;
+ } while (timeout > 1);
+
+ pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
+
+ return -ETIME;
+}
+
+static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev,
+ enum idh_request req, u32 data1, u32 data2, u32 data3)
+{
+ u32 reg;
+ int r;
+ uint8_t trn;
+
+ /* IMPORTANT:
+ * clear TRN_MSG_VALID valid to clear host's RCV_MSG_ACK
+ * and with host's RCV_MSG_ACK cleared hw automatically clear host's RCV_MSG_ACK
+ * which lead to VF's TRN_MSG_ACK cleared, otherwise below xgpu_nv_poll_ack()
+ * will return immediatly
+ */
+ do {
+ xgpu_nv_mailbox_set_valid(adev, false);
+ trn = xgpu_nv_peek_ack(adev);
+ if (trn) {
+ pr_err("trn=%x ACK should not assert! wait again !\n", trn);
+ msleep(1);
+ }
+ } while (trn);
+
+ reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
+ mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW0));
+ reg = REG_SET_FIELD(reg, BIF_BX_PF_MAILBOX_MSGBUF_TRN_DW0,
+ MSGBUF_DATA, req);
+ WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW0),
+ reg);
+ WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW1),
+ data1);
+ WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW2),
+ data2);
+ WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW3),
+ data3);
+
+ xgpu_nv_mailbox_set_valid(adev, true);
+
+ /* start to poll ack */
+ r = xgpu_nv_poll_ack(adev);
+ if (r)
+ pr_err("Doesn't get ack from pf, continue\n");
+
+ xgpu_nv_mailbox_set_valid(adev, false);
+}
+
+static int xgpu_nv_send_access_requests(struct amdgpu_device *adev,
+ enum idh_request req)
+{
+ int r;
+
+ xgpu_nv_mailbox_trans_msg(adev, req, 0, 0, 0);
+
+ /* start to check msg if request is idh_req_gpu_init_access */
+ if (req == IDH_REQ_GPU_INIT_ACCESS ||
+ req == IDH_REQ_GPU_FINI_ACCESS ||
+ req == IDH_REQ_GPU_RESET_ACCESS) {
+ r = xgpu_nv_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
+ if (r) {
+ pr_err("Doesn't get READY_TO_ACCESS_GPU from pf, give up\n");
+ return r;
+ }
+ /* Retrieve checksum from mailbox2 */
+ if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
+ adev->virt.fw_reserve.checksum_key =
+ RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
+ mmBIF_BX_PF_MAILBOX_MSGBUF_RCV_DW2));
+ }
+ }
+
+ return 0;
+}
+
+static int xgpu_nv_request_reset(struct amdgpu_device *adev)
+{
+ return xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
+}
+
+static int xgpu_nv_request_full_gpu_access(struct amdgpu_device *adev,
+ bool init)
+{
+ enum idh_request req;
+
+ req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
+ return xgpu_nv_send_access_requests(adev, req);
+}
+
+static int xgpu_nv_release_full_gpu_access(struct amdgpu_device *adev,
+ bool init)
+{
+ enum idh_request req;
+ int r = 0;
+
+ req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
+ r = xgpu_nv_send_access_requests(adev, req);
+
+ return r;
+}
+
+static int xgpu_nv_mailbox_ack_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ DRM_DEBUG("get ack intr and do nothing.\n");
+ return 0;
+}
+
+static int xgpu_nv_set_mailbox_ack_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL));
+
+ tmp = REG_SET_FIELD(tmp, BIF_BX_PF_MAILBOX_INT_CNTL, ACK_INT_EN,
+ (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
+ WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL), tmp);
+
+ return 0;
+}
+
+static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
+{
+ struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
+ struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
+ int timeout = NV_MAILBOX_POLL_FLR_TIMEDOUT;
+ int locked;
+
+ /* block amdgpu_gpu_recover till msg FLR COMPLETE received,
+ * otherwise the mailbox msg will be ruined/reseted by
+ * the VF FLR.
+ *
+ * we can unlock the lock_reset to allow "amdgpu_job_timedout"
+ * to run gpu_recover() after FLR_NOTIFICATION_CMPL received
+ * which means host side had finished this VF's FLR.
+ */
+ locked = mutex_trylock(&adev->lock_reset);
+ if (locked)
+ adev->in_gpu_reset = true;
+
+ do {
+ if (xgpu_nv_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
+ goto flr_done;
+
+ msleep(10);
+ timeout -= 10;
+ } while (timeout > 1);
+
+flr_done:
+ if (locked) {
+ adev->in_gpu_reset = false;
+ mutex_unlock(&adev->lock_reset);
+ }
+
+ /* Trigger recovery for world switch failure if no TDR */
+ if (amdgpu_device_should_recover_gpu(adev)
+ && (adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT ||
+ adev->gfx_timeout == MAX_SCHEDULE_TIMEOUT ||
+ adev->compute_timeout == MAX_SCHEDULE_TIMEOUT ||
+ adev->video_timeout == MAX_SCHEDULE_TIMEOUT))
+ amdgpu_device_gpu_recover(adev, NULL);
+}
+
+static int xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL));
+
+ tmp = REG_SET_FIELD(tmp, BIF_BX_PF_MAILBOX_INT_CNTL, VALID_INT_EN,
+ (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
+ WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL), tmp);
+
+ return 0;
+}
+
+static int xgpu_nv_mailbox_rcv_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ enum idh_event event = xgpu_nv_mailbox_peek_msg(adev);
+
+ switch (event) {
+ case IDH_FLR_NOTIFICATION:
+ if (amdgpu_sriov_runtime(adev))
+ schedule_work(&adev->virt.flr_work);
+ break;
+ /* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
+ * it byfar since that polling thread will handle it,
+ * other msg like flr complete is not handled here.
+ */
+ case IDH_CLR_MSG_BUF:
+ case IDH_FLR_NOTIFICATION_CMPL:
+ case IDH_READY_TO_ACCESS_GPU:
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_ack_irq_funcs = {
+ .set = xgpu_nv_set_mailbox_ack_irq,
+ .process = xgpu_nv_mailbox_ack_irq,
+};
+
+static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_rcv_irq_funcs = {
+ .set = xgpu_nv_set_mailbox_rcv_irq,
+ .process = xgpu_nv_mailbox_rcv_irq,
+};
+
+void xgpu_nv_mailbox_set_irq_funcs(struct amdgpu_device *adev)
+{
+ adev->virt.ack_irq.num_types = 1;
+ adev->virt.ack_irq.funcs = &xgpu_nv_mailbox_ack_irq_funcs;
+ adev->virt.rcv_irq.num_types = 1;
+ adev->virt.rcv_irq.funcs = &xgpu_nv_mailbox_rcv_irq_funcs;
+}
+
+int xgpu_nv_mailbox_add_irq_id(struct amdgpu_device *adev)
+{
+ int r;
+
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq);
+ if (r)
+ return r;
+
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq);
+ if (r) {
+ amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
+ return r;
+ }
+
+ return 0;
+}
+
+int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev)
+{
+ int r;
+
+ r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
+ if (r)
+ return r;
+ r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
+ if (r) {
+ amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
+ return r;
+ }
+
+ INIT_WORK(&adev->virt.flr_work, xgpu_nv_mailbox_flr_work);
+
+ return 0;
+}
+
+void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev)
+{
+ amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
+ amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
+}
+
+const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
+ .req_full_gpu = xgpu_nv_request_full_gpu_access,
+ .rel_full_gpu = xgpu_nv_release_full_gpu_access,
+ .reset_gpu = xgpu_nv_request_reset,
+ .wait_reset = NULL,
+ .trans_msg = xgpu_nv_mailbox_trans_msg,
+};
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
index 19e54acb4125..99b15f6865cb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
@@ -21,33 +21,21 @@
*
*/
-#include "kfd_kernel_queue.h"
+#ifndef __MXGPU_NV_H__
+#define __MXGPU_NV_H__
-static bool initialize_cik(struct kernel_queue *kq, struct kfd_dev *dev,
- enum kfd_queue_type type, unsigned int queue_size);
-static void uninitialize_cik(struct kernel_queue *kq);
-static void submit_packet_cik(struct kernel_queue *kq);
+#define NV_MAILBOX_POLL_ACK_TIMEDOUT 500
+#define NV_MAILBOX_POLL_MSG_TIMEDOUT 12000
+#define NV_MAILBOX_POLL_FLR_TIMEDOUT 500
-void kernel_queue_init_cik(struct kernel_queue_ops *ops)
-{
- ops->initialize = initialize_cik;
- ops->uninitialize = uninitialize_cik;
- ops->submit_packet = submit_packet_cik;
-}
+extern const struct amdgpu_virt_ops xgpu_nv_virt_ops;
-static bool initialize_cik(struct kernel_queue *kq, struct kfd_dev *dev,
- enum kfd_queue_type type, unsigned int queue_size)
-{
- return true;
-}
+void xgpu_nv_mailbox_set_irq_funcs(struct amdgpu_device *adev);
+int xgpu_nv_mailbox_add_irq_id(struct amdgpu_device *adev);
+int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev);
+void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev);
-static void uninitialize_cik(struct kernel_queue *kq)
-{
-}
+#define NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE (SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_CONTROL) * 4)
+#define NV_MAIBOX_CONTROL_RCV_OFFSET_BYTE (SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_CONTROL) * 4 + 1)
-static void submit_packet_cik(struct kernel_queue *kq)
-{
- *kq->wptr_kernel = kq->pending_wptr;
- write_kernel_doorbell(kq->queue->properties.doorbell_ptr,
- kq->pending_wptr);
-}
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
index e963746be11c..cf557a428298 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
@@ -21,7 +21,8 @@
*
*/
-#include <drm/drmP.h>
+#include <linux/pci.h>
+
#include "amdgpu.h"
#include "amdgpu_ih.h"
@@ -109,14 +110,13 @@ static uint32_t navi10_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl
static int navi10_ih_irq_init(struct amdgpu_device *adev)
{
struct amdgpu_ih_ring *ih = &adev->irq.ih;
- int ret = 0;
u32 ih_rb_cntl, ih_doorbell_rtpr, ih_chicken;
u32 tmp;
/* disable irqs */
navi10_ih_disable_interrupts(adev);
- adev->nbio_funcs->ih_control(adev);
+ adev->nbio.funcs->ih_control(adev);
/* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE, ih->gpu_addr >> 8);
@@ -161,7 +161,7 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
}
WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR, ih_doorbell_rtpr);
- adev->nbio_funcs->ih_doorbell_range(adev, ih->use_doorbell,
+ adev->nbio.funcs->ih_doorbell_range(adev, ih->use_doorbell,
ih->doorbell_index);
tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
@@ -178,7 +178,7 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
/* enable interrupts */
navi10_ih_enable_interrupts(adev);
- return ret;
+ return 0;
}
/**
@@ -426,7 +426,7 @@ static int navi10_ih_set_clockgating_state(void *handle,
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
navi10_ih_update_clockgating_state(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_reg_init.c b/drivers/gpu/drm/amd/amdgpu/navi10_reg_init.c
index 55014ce8670a..88efaecf9f70 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_reg_init.c
@@ -24,25 +24,12 @@
#include "nv.h"
#include "soc15_common.h"
-#include "soc15_hw_ip.h"
#include "navi10_ip_offset.h"
int navi10_reg_base_init(struct amdgpu_device *adev)
{
- int r, i;
+ int i;
- if (amdgpu_discovery) {
- r = amdgpu_discovery_reg_base_init(adev);
- if (r) {
- DRM_WARN("failed to init reg base from ip discovery table, "
- "fallback to legacy init method\n");
- goto legacy_init;
- }
-
- return 0;
- }
-
-legacy_init:
for (i = 0 ; i < MAX_INSTANCE ; ++i) {
adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i]));
diff --git a/drivers/gpu/drm/amd/amdgpu/navi12_reg_init.c b/drivers/gpu/drm/amd/amdgpu/navi12_reg_init.c
new file mode 100644
index 000000000000..a786d159e5e9
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/navi12_reg_init.c
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "nv.h"
+
+#include "soc15_common.h"
+#include "navi12_ip_offset.h"
+
+int navi12_reg_base_init(struct amdgpu_device *adev)
+{
+ /* HW has more IP blocks, only initialized the blocks needed by driver */
+ uint32_t i;
+ for (i = 0 ; i < MAX_INSTANCE ; ++i) {
+ adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
+ adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i]));
+ adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i]));
+ adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i]));
+ adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIF0_BASE.instance[i]));
+ adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i]));
+ adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i]));
+ adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(UVD0_BASE.instance[i]));
+ adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i]));
+ adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DMU_BASE.instance[i]));
+ adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i]));
+ adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
+ adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
+ adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
+ adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
+ adev->reg_offset[CLK_HWIP][i] = (uint32_t *)(&(CLK_BASE.instance[i]));
+ }
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c b/drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c
new file mode 100644
index 000000000000..4ea1e8fbb601
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "nv.h"
+
+#include "soc15_common.h"
+#include "navi14_ip_offset.h"
+
+int navi14_reg_base_init(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0 ; i < MAX_INSTANCE ; ++i) {
+ adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
+ adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i]));
+ adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i]));
+ adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i]));
+ adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIF0_BASE.instance[i]));
+ adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i]));
+ adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i]));
+ adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(UVD0_BASE.instance[i]));
+ adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i]));
+ adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DMU_BASE.instance[i]));
+ adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i]));
+ adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
+ adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
+ adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
+ adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
+ adev->reg_offset[CLK_HWIP][i] = (uint32_t *)(&(CLK_BASE.instance[i]));
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
index 835d7b1a841f..f3a3fe746222 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
@@ -27,11 +27,21 @@
#include "nbio/nbio_2_3_default.h"
#include "nbio/nbio_2_3_offset.h"
#include "nbio/nbio_2_3_sh_mask.h"
+#include <uapi/linux/kfd_ioctl.h>
#define smnPCIE_CONFIG_CNTL 0x11180044
#define smnCPM_CONTROL 0x11180460
#define smnPCIE_CNTL2 0x11180070
+
+static void nbio_v2_3_remap_hdp_registers(struct amdgpu_device *adev)
+{
+ WREG32_SOC15(NBIO, 0, mmREMAP_HDP_MEM_FLUSH_CNTL,
+ adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL);
+ WREG32_SOC15(NBIO, 0, mmREMAP_HDP_REG_FLUSH_CNTL,
+ adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL);
+}
+
static u32 nbio_v2_3_get_rev_id(struct amdgpu_device *adev)
{
u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
@@ -56,10 +66,9 @@ static void nbio_v2_3_hdp_flush(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
if (!ring || !ring->funcs->emit_wreg)
- WREG32_SOC15_NO_KIQ(NBIO, 0, mmBIF_BX_PF_HDP_MEM_COHERENCY_FLUSH_CNTL, 0);
+ WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
else
- amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
- NBIO, 0, mmBIF_BX_PF_HDP_MEM_COHERENCY_FLUSH_CNTL), 0);
+ amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
}
static u32 nbio_v2_3_get_memsize(struct amdgpu_device *adev)
@@ -92,7 +101,7 @@ static void nbio_v2_3_sdma_doorbell_range(struct amdgpu_device *adev, int instan
}
static void nbio_v2_3_vcn_doorbell_range(struct amdgpu_device *adev, bool use_doorbell,
- int doorbell_index)
+ int doorbell_index, int instance)
{
u32 reg = SOC15_REG_OFFSET(NBIO, 0, mmBIF_MMSCH0_DOORBELL_RANGE);
@@ -311,7 +320,6 @@ static void nbio_v2_3_init_registers(struct amdgpu_device *adev)
}
const struct amdgpu_nbio_funcs nbio_v2_3_funcs = {
- .hdp_flush_reg = &nbio_v2_3_hdp_flush_reg,
.get_hdp_flush_req_offset = nbio_v2_3_get_hdp_flush_req_offset,
.get_hdp_flush_done_offset = nbio_v2_3_get_hdp_flush_done_offset,
.get_pcie_index_offset = nbio_v2_3_get_pcie_index_offset,
@@ -331,4 +339,5 @@ const struct amdgpu_nbio_funcs nbio_v2_3_funcs = {
.ih_control = nbio_v2_3_ih_control,
.init_registers = nbio_v2_3_init_registers,
.detect_hw_virt = nbio_v2_3_detect_hw_virt,
+ .remap_hdp_registers = nbio_v2_3_remap_hdp_registers,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h
index 5ae52085f6b7..a43b60acf7f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h
@@ -26,6 +26,7 @@
#include "soc15_common.h"
+extern const struct nbio_hdp_flush_reg nbio_v2_3_hdp_flush_reg;
extern const struct amdgpu_nbio_funcs nbio_v2_3_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
index 6590143c3f75..635d9e1fc0a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
@@ -226,7 +226,7 @@ static u32 nbio_v6_1_get_pcie_data_offset(struct amdgpu_device *adev)
return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2);
}
-static const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = {
+const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = {
.ref_and_mask_cp0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP0_MASK,
.ref_and_mask_cp1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP1_MASK,
.ref_and_mask_cp2 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP2_MASK,
@@ -277,7 +277,6 @@ static void nbio_v6_1_init_registers(struct amdgpu_device *adev)
}
const struct amdgpu_nbio_funcs nbio_v6_1_funcs = {
- .hdp_flush_reg = &nbio_v6_1_hdp_flush_reg,
.get_hdp_flush_req_offset = nbio_v6_1_get_hdp_flush_req_offset,
.get_hdp_flush_done_offset = nbio_v6_1_get_hdp_flush_done_offset,
.get_pcie_index_offset = nbio_v6_1_get_pcie_index_offset,
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h
index 0743a6f016f3..6dc743b73218 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h
@@ -26,6 +26,7 @@
#include "soc15_common.h"
+extern const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg;
extern const struct amdgpu_nbio_funcs nbio_v6_1_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
index 73419fa38159..d6cbf26074bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
@@ -91,6 +91,26 @@ static void nbio_v7_0_sdma_doorbell_range(struct amdgpu_device *adev, int instan
WREG32(reg, doorbell_range);
}
+static void nbio_v7_0_vcn_doorbell_range(struct amdgpu_device *adev, bool use_doorbell,
+ int doorbell_index, int instance)
+{
+ u32 reg = SOC15_REG_OFFSET(NBIO, 0, mmBIF_MMSCH0_DOORBELL_RANGE);
+
+ u32 doorbell_range = RREG32(reg);
+
+ if (use_doorbell) {
+ doorbell_range = REG_SET_FIELD(doorbell_range,
+ BIF_MMSCH0_DOORBELL_RANGE, OFFSET,
+ doorbell_index);
+ doorbell_range = REG_SET_FIELD(doorbell_range,
+ BIF_MMSCH0_DOORBELL_RANGE, SIZE, 8);
+ } else
+ doorbell_range = REG_SET_FIELD(doorbell_range,
+ BIF_MMSCH0_DOORBELL_RANGE, SIZE, 0);
+
+ WREG32(reg, doorbell_range);
+}
+
static void nbio_v7_0_enable_doorbell_aperture(struct amdgpu_device *adev,
bool enable)
{
@@ -272,7 +292,6 @@ static void nbio_v7_0_init_registers(struct amdgpu_device *adev)
}
const struct amdgpu_nbio_funcs nbio_v7_0_funcs = {
- .hdp_flush_reg = &nbio_v7_0_hdp_flush_reg,
.get_hdp_flush_req_offset = nbio_v7_0_get_hdp_flush_req_offset,
.get_hdp_flush_done_offset = nbio_v7_0_get_hdp_flush_done_offset,
.get_pcie_index_offset = nbio_v7_0_get_pcie_index_offset,
@@ -282,6 +301,7 @@ const struct amdgpu_nbio_funcs nbio_v7_0_funcs = {
.hdp_flush = nbio_v7_0_hdp_flush,
.get_memsize = nbio_v7_0_get_memsize,
.sdma_doorbell_range = nbio_v7_0_sdma_doorbell_range,
+ .vcn_doorbell_range = nbio_v7_0_vcn_doorbell_range,
.enable_doorbell_aperture = nbio_v7_0_enable_doorbell_aperture,
.enable_doorbell_selfring_aperture = nbio_v7_0_enable_doorbell_selfring_aperture,
.ih_doorbell_range = nbio_v7_0_ih_doorbell_range,
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h
index 508d549c5029..e7aefb252550 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h
@@ -26,6 +26,7 @@
#include "soc15_common.h"
+extern const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg;
extern const struct amdgpu_nbio_funcs nbio_v7_0_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index bfaaa327ae3c..65eb378fa035 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -23,14 +23,38 @@
#include "amdgpu.h"
#include "amdgpu_atombios.h"
#include "nbio_v7_4.h"
+#include "amdgpu_ras.h"
#include "nbio/nbio_7_4_offset.h"
#include "nbio/nbio_7_4_sh_mask.h"
#include "nbio/nbio_7_4_0_smn.h"
+#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
#include <uapi/linux/kfd_ioctl.h>
#define smnNBIF_MGCG_CTRL_LCLK 0x1013a21c
+/*
+ * These are nbio v7_4_1 registers mask. Temporarily define these here since
+ * nbio v7_4_1 header is incomplete.
+ */
+#define GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK 0x00001000L
+#define GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK 0x00002000L
+#define GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK 0x00004000L
+#define GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK 0x00008000L
+#define GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK 0x00010000L
+#define GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK 0x00020000L
+
+#define mmBIF_MMSCH1_DOORBELL_RANGE 0x01dc
+#define mmBIF_MMSCH1_DOORBELL_RANGE_BASE_IDX 2
+//BIF_MMSCH1_DOORBELL_RANGE
+#define BIF_MMSCH1_DOORBELL_RANGE__OFFSET__SHIFT 0x2
+#define BIF_MMSCH1_DOORBELL_RANGE__SIZE__SHIFT 0x10
+#define BIF_MMSCH1_DOORBELL_RANGE__OFFSET_MASK 0x00000FFCL
+#define BIF_MMSCH1_DOORBELL_RANGE__SIZE_MASK 0x001F0000L
+
+static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_error_status);
+
static void nbio_v7_4_remap_hdp_registers(struct amdgpu_device *adev)
{
WREG32_SOC15(NBIO, 0, mmREMAP_HDP_MEM_FLUSH_CNTL,
@@ -75,10 +99,24 @@ static u32 nbio_v7_4_get_memsize(struct amdgpu_device *adev)
static void nbio_v7_4_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
bool use_doorbell, int doorbell_index, int doorbell_size)
{
- u32 reg = instance == 0 ? SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE) :
- SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE);
+ u32 reg, doorbell_range;
- u32 doorbell_range = RREG32(reg);
+ if (instance < 2)
+ reg = instance +
+ SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE);
+ else
+ /*
+ * These registers address of SDMA2~7 is not consecutive
+ * from SDMA0~1. Need plus 4 dwords offset.
+ *
+ * BIF_SDMA0_DOORBELL_RANGE: 0x3bc0
+ * BIF_SDMA1_DOORBELL_RANGE: 0x3bc4
+ * BIF_SDMA2_DOORBELL_RANGE: 0x3bd8
+ */
+ reg = instance + 0x4 +
+ SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE);
+
+ doorbell_range = RREG32(reg);
if (use_doorbell) {
doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index);
@@ -89,6 +127,32 @@ static void nbio_v7_4_sdma_doorbell_range(struct amdgpu_device *adev, int instan
WREG32(reg, doorbell_range);
}
+static void nbio_v7_4_vcn_doorbell_range(struct amdgpu_device *adev, bool use_doorbell,
+ int doorbell_index, int instance)
+{
+ u32 reg;
+ u32 doorbell_range;
+
+ if (instance)
+ reg = SOC15_REG_OFFSET(NBIO, 0, mmBIF_MMSCH1_DOORBELL_RANGE);
+ else
+ reg = SOC15_REG_OFFSET(NBIO, 0, mmBIF_MMSCH0_DOORBELL_RANGE);
+
+ doorbell_range = RREG32(reg);
+
+ if (use_doorbell) {
+ doorbell_range = REG_SET_FIELD(doorbell_range,
+ BIF_MMSCH0_DOORBELL_RANGE, OFFSET,
+ doorbell_index);
+ doorbell_range = REG_SET_FIELD(doorbell_range,
+ BIF_MMSCH0_DOORBELL_RANGE, SIZE, 8);
+ } else
+ doorbell_range = REG_SET_FIELD(doorbell_range,
+ BIF_MMSCH0_DOORBELL_RANGE, SIZE, 0);
+
+ WREG32(reg, doorbell_range);
+}
+
static void nbio_v7_4_enable_doorbell_aperture(struct amdgpu_device *adev,
bool enable)
{
@@ -207,7 +271,7 @@ static u32 nbio_v7_4_get_pcie_data_offset(struct amdgpu_device *adev)
return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2);
}
-static const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg = {
+const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg = {
.ref_and_mask_cp0 = GPU_HDP_FLUSH_DONE__CP0_MASK,
.ref_and_mask_cp1 = GPU_HDP_FLUSH_DONE__CP1_MASK,
.ref_and_mask_cp2 = GPU_HDP_FLUSH_DONE__CP2_MASK,
@@ -220,6 +284,12 @@ static const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg = {
.ref_and_mask_cp9 = GPU_HDP_FLUSH_DONE__CP9_MASK,
.ref_and_mask_sdma0 = GPU_HDP_FLUSH_DONE__SDMA0_MASK,
.ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__SDMA1_MASK,
+ .ref_and_mask_sdma2 = GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK,
+ .ref_and_mask_sdma3 = GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK,
+ .ref_and_mask_sdma4 = GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK,
+ .ref_and_mask_sdma5 = GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK,
+ .ref_and_mask_sdma6 = GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK,
+ .ref_and_mask_sdma7 = GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK,
};
static void nbio_v7_4_detect_hw_virt(struct amdgpu_device *adev)
@@ -241,17 +311,224 @@ static void nbio_v7_4_detect_hw_virt(struct amdgpu_device *adev)
static void nbio_v7_4_init_registers(struct amdgpu_device *adev)
{
- uint32_t def, data;
- def = data = RREG32_PCIE(smnPCIE_CI_CNTL);
- data = REG_SET_FIELD(data, PCIE_CI_CNTL, CI_SLV_ORDERING_DIS, 1);
+}
- if (def != data)
- WREG32_PCIE(smnPCIE_CI_CNTL, data);
+static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device *adev)
+{
+ uint32_t bif_doorbell_intr_cntl;
+ struct ras_manager *obj = amdgpu_ras_find_obj(adev, adev->nbio.ras_if);
+
+ bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);
+ if (REG_GET_FIELD(bif_doorbell_intr_cntl,
+ BIF_DOORBELL_INT_CNTL, RAS_CNTLR_INTERRUPT_STATUS)) {
+ /* driver has to clear the interrupt status when bif ring is disabled */
+ bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
+ BIF_DOORBELL_INT_CNTL,
+ RAS_CNTLR_INTERRUPT_CLEAR, 1);
+ WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
+
+ /*
+ * clear error status after ras_controller_intr according to
+ * hw team and count ue number for query
+ */
+ nbio_v7_4_query_ras_error_count(adev, &obj->err_data);
+
+ DRM_WARN("RAS controller interrupt triggered by NBIF error\n");
+
+ /* ras_controller_int is dedicated for nbif ras error,
+ * not the global interrupt for sync flood
+ */
+ amdgpu_ras_reset_gpu(adev);
+ }
+}
+
+static void nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring(struct amdgpu_device *adev)
+{
+ uint32_t bif_doorbell_intr_cntl;
+
+ bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);
+ if (REG_GET_FIELD(bif_doorbell_intr_cntl,
+ BIF_DOORBELL_INT_CNTL, RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS)) {
+ /* driver has to clear the interrupt status when bif ring is disabled */
+ bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
+ BIF_DOORBELL_INT_CNTL,
+ RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR, 1);
+ WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
+
+ amdgpu_ras_global_ras_isr(adev);
+ }
+}
+
+
+static int nbio_v7_4_set_ras_controller_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ /* The ras_controller_irq enablement should be done in psp bl when it
+ * tries to enable ras feature. Driver only need to set the correct interrupt
+ * vector for bare-metal and sriov use case respectively
+ */
+ uint32_t bif_intr_cntl;
+
+ bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL);
+ if (state == AMDGPU_IRQ_STATE_ENABLE) {
+ /* set interrupt vector select bit to 0 to select
+ * vetcor 1 for bare metal case */
+ bif_intr_cntl = REG_SET_FIELD(bif_intr_cntl,
+ BIF_INTR_CNTL,
+ RAS_INTR_VEC_SEL, 0);
+ WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL, bif_intr_cntl);
+ }
+
+ return 0;
+}
+
+static int nbio_v7_4_process_ras_controller_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ /* By design, the ih cookie for ras_controller_irq should be written
+ * to BIFring instead of general iv ring. However, due to known bif ring
+ * hw bug, it has to be disabled. There is no chance the process function
+ * will be involked. Just left it as a dummy one.
+ */
+ return 0;
+}
+
+static int nbio_v7_4_set_ras_err_event_athub_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ /* The ras_controller_irq enablement should be done in psp bl when it
+ * tries to enable ras feature. Driver only need to set the correct interrupt
+ * vector for bare-metal and sriov use case respectively
+ */
+ uint32_t bif_intr_cntl;
+
+ bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL);
+ if (state == AMDGPU_IRQ_STATE_ENABLE) {
+ /* set interrupt vector select bit to 0 to select
+ * vetcor 1 for bare metal case */
+ bif_intr_cntl = REG_SET_FIELD(bif_intr_cntl,
+ BIF_INTR_CNTL,
+ RAS_INTR_VEC_SEL, 0);
+ WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL, bif_intr_cntl);
+ }
+
+ return 0;
+}
+
+static int nbio_v7_4_process_err_event_athub_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ /* By design, the ih cookie for err_event_athub_irq should be written
+ * to BIFring instead of general iv ring. However, due to known bif ring
+ * hw bug, it has to be disabled. There is no chance the process function
+ * will be involked. Just left it as a dummy one.
+ */
+ return 0;
+}
+
+static const struct amdgpu_irq_src_funcs nbio_v7_4_ras_controller_irq_funcs = {
+ .set = nbio_v7_4_set_ras_controller_irq_state,
+ .process = nbio_v7_4_process_ras_controller_irq,
+};
+
+static const struct amdgpu_irq_src_funcs nbio_v7_4_ras_err_event_athub_irq_funcs = {
+ .set = nbio_v7_4_set_ras_err_event_athub_irq_state,
+ .process = nbio_v7_4_process_err_event_athub_irq,
+};
+
+static int nbio_v7_4_init_ras_controller_interrupt (struct amdgpu_device *adev)
+{
+ int r;
+
+ /* init the irq funcs */
+ adev->nbio.ras_controller_irq.funcs =
+ &nbio_v7_4_ras_controller_irq_funcs;
+ adev->nbio.ras_controller_irq.num_types = 1;
+
+ /* register ras controller interrupt */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF,
+ NBIF_7_4__SRCID__RAS_CONTROLLER_INTERRUPT,
+ &adev->nbio.ras_controller_irq);
+
+ return r;
+}
+
+static int nbio_v7_4_init_ras_err_event_athub_interrupt (struct amdgpu_device *adev)
+{
+
+ int r;
+
+ /* init the irq funcs */
+ adev->nbio.ras_err_event_athub_irq.funcs =
+ &nbio_v7_4_ras_err_event_athub_irq_funcs;
+ adev->nbio.ras_err_event_athub_irq.num_types = 1;
+
+ /* register ras err event athub interrupt */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF,
+ NBIF_7_4__SRCID__ERREVENT_ATHUB_INTERRUPT,
+ &adev->nbio.ras_err_event_athub_irq);
+
+ return r;
+}
+
+#define smnPARITY_ERROR_STATUS_UNCORR_GRP2 0x13a20030
+
+static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_error_status)
+{
+ uint32_t global_sts, central_sts, int_eoi, parity_sts;
+ uint32_t corr, fatal, non_fatal;
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+
+ global_sts = RREG32_PCIE(smnRAS_GLOBAL_STATUS_LO);
+ corr = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO, ParityErrCorr);
+ fatal = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO, ParityErrFatal);
+ non_fatal = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO,
+ ParityErrNonFatal);
+ parity_sts = RREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2);
+
+ if (corr)
+ err_data->ce_count++;
+ if (fatal)
+ err_data->ue_count++;
+
+ if (corr || fatal || non_fatal) {
+ central_sts = RREG32_PCIE(smnBIFL_RAS_CENTRAL_STATUS);
+ /* clear error status register */
+ WREG32_PCIE(smnRAS_GLOBAL_STATUS_LO, global_sts);
+
+ if (fatal)
+ /* clear parity fatal error indication field */
+ WREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2,
+ parity_sts);
+
+ if (REG_GET_FIELD(central_sts, BIFL_RAS_CENTRAL_STATUS,
+ BIFL_RasContller_Intr_Recv)) {
+ /* clear interrupt status register */
+ WREG32_PCIE(smnBIFL_RAS_CENTRAL_STATUS, central_sts);
+ int_eoi = RREG32_PCIE(smnIOHC_INTERRUPT_EOI);
+ int_eoi = REG_SET_FIELD(int_eoi,
+ IOHC_INTERRUPT_EOI, SMI_EOI, 1);
+ WREG32_PCIE(smnIOHC_INTERRUPT_EOI, int_eoi);
+ }
+ }
+}
+
+static void nbio_v7_4_enable_doorbell_interrupt(struct amdgpu_device *adev,
+ bool enable)
+{
+ WREG32_FIELD15(NBIO, 0, BIF_DOORBELL_INT_CNTL,
+ DOORBELL_INTERRUPT_DISABLE, enable ? 0 : 1);
}
const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
- .hdp_flush_reg = &nbio_v7_4_hdp_flush_reg,
.get_hdp_flush_req_offset = nbio_v7_4_get_hdp_flush_req_offset,
.get_hdp_flush_done_offset = nbio_v7_4_get_hdp_flush_done_offset,
.get_pcie_index_offset = nbio_v7_4_get_pcie_index_offset,
@@ -261,9 +538,11 @@ const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
.hdp_flush = nbio_v7_4_hdp_flush,
.get_memsize = nbio_v7_4_get_memsize,
.sdma_doorbell_range = nbio_v7_4_sdma_doorbell_range,
+ .vcn_doorbell_range = nbio_v7_4_vcn_doorbell_range,
.enable_doorbell_aperture = nbio_v7_4_enable_doorbell_aperture,
.enable_doorbell_selfring_aperture = nbio_v7_4_enable_doorbell_selfring_aperture,
.ih_doorbell_range = nbio_v7_4_ih_doorbell_range,
+ .enable_doorbell_interrupt = nbio_v7_4_enable_doorbell_interrupt,
.update_medium_grain_clock_gating = nbio_v7_4_update_medium_grain_clock_gating,
.update_medium_grain_light_sleep = nbio_v7_4_update_medium_grain_light_sleep,
.get_clockgating_state = nbio_v7_4_get_clockgating_state,
@@ -271,4 +550,10 @@ const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
.init_registers = nbio_v7_4_init_registers,
.detect_hw_virt = nbio_v7_4_detect_hw_virt,
.remap_hdp_registers = nbio_v7_4_remap_hdp_registers,
+ .handle_ras_controller_intr_no_bifring = nbio_v7_4_handle_ras_controller_intr_no_bifring,
+ .handle_ras_err_event_athub_intr_no_bifring = nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring,
+ .init_ras_controller_interrupt = nbio_v7_4_init_ras_controller_interrupt,
+ .init_ras_err_event_athub_interrupt = nbio_v7_4_init_ras_err_event_athub_interrupt,
+ .query_ras_error_count = nbio_v7_4_query_ras_error_count,
+ .ras_late_init = amdgpu_nbio_ras_late_init,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h
index c442865bac4f..b1ac82872752 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h
@@ -26,6 +26,7 @@
#include "soc15_common.h"
+extern const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg;
extern const struct amdgpu_nbio_funcs nbio_v7_4_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index 662612f89c70..2d1bebdf1603 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -23,7 +23,8 @@
#include <linux/firmware.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <drm/drmP.h>
+#include <linux/pci.h>
+
#include "amdgpu.h"
#include "amdgpu_atombios.h"
#include "amdgpu_ih.h"
@@ -39,19 +40,23 @@
#include "gc/gc_10_1_0_sh_mask.h"
#include "hdp/hdp_5_0_0_offset.h"
#include "hdp/hdp_5_0_0_sh_mask.h"
+#include "smuio/smuio_11_0_0_offset.h"
#include "soc15.h"
#include "soc15_common.h"
#include "gmc_v10_0.h"
#include "gfxhub_v2_0.h"
#include "mmhub_v2_0.h"
+#include "nbio_v2_3.h"
#include "nv.h"
#include "navi10_ih.h"
#include "gfx_v10_0.h"
#include "sdma_v5_0.h"
#include "vcn_v2_0.h"
+#include "jpeg_v2_0.h"
#include "dce_virtual.h"
#include "mes_v10_1.h"
+#include "mxgpu_nv.h"
static const struct amd_ip_funcs nv_common_ip_funcs;
@@ -62,8 +67,8 @@ static u32 nv_pcie_rreg(struct amdgpu_device *adev, u32 reg)
{
unsigned long flags, address, data;
u32 r;
- address = adev->nbio_funcs->get_pcie_index_offset(adev);
- data = adev->nbio_funcs->get_pcie_data_offset(adev);
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
WREG32(address, reg);
@@ -77,8 +82,8 @@ static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
{
unsigned long flags, address, data;
- address = adev->nbio_funcs->get_pcie_index_offset(adev);
- data = adev->nbio_funcs->get_pcie_data_offset(adev);
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
WREG32(address, reg);
@@ -118,7 +123,7 @@ static void nv_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
static u32 nv_get_config_memsize(struct amdgpu_device *adev)
{
- return adev->nbio_funcs->get_memsize(adev);
+ return adev->nbio.funcs->get_memsize(adev);
}
static u32 nv_get_xclk(struct amdgpu_device *adev)
@@ -153,8 +158,27 @@ static bool nv_read_disabled_bios(struct amdgpu_device *adev)
static bool nv_read_bios_from_rom(struct amdgpu_device *adev,
u8 *bios, u32 length_bytes)
{
- /* TODO: will implement it when SMU header is available */
- return false;
+ u32 *dw_ptr;
+ u32 i, length_dw;
+
+ if (bios == NULL)
+ return false;
+ if (length_bytes == 0)
+ return false;
+ /* APU vbios image is part of sbios image */
+ if (adev->flags & AMD_IS_APU)
+ return false;
+
+ dw_ptr = (u32 *)bios;
+ length_dw = ALIGN(length_bytes, 4) / 4;
+
+ /* set rom index to 0 */
+ WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0);
+ /* read out the rom data */
+ for (i = 0; i < length_dw; i++)
+ dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA));
+
+ return true;
}
static struct soc15_allowed_register_entry nv_allowed_read_registers[] = {
@@ -175,6 +199,7 @@ static struct soc15_allowed_register_entry nv_allowed_read_registers[] = {
{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)},
{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)},
{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)},
+ { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)},
{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
{ SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
@@ -278,7 +303,7 @@ static int nv_asic_mode1_reset(struct amdgpu_device *adev)
/* wait for asic to come out of reset */
for (i = 0; i < adev->usec_timeout; i++) {
- u32 memsize = adev->nbio_funcs->get_memsize(adev);
+ u32 memsize = adev->nbio.funcs->get_memsize(adev);
if (memsize != 0xffffffff)
break;
@@ -289,6 +314,28 @@ static int nv_asic_mode1_reset(struct amdgpu_device *adev)
return ret;
}
+
+static bool nv_asic_supports_baco(struct amdgpu_device *adev)
+{
+ struct smu_context *smu = &adev->smu;
+
+ if (smu_baco_is_support(smu))
+ return true;
+ else
+ return false;
+}
+
+static enum amd_reset_method
+nv_asic_reset_method(struct amdgpu_device *adev)
+{
+ struct smu_context *smu = &adev->smu;
+
+ if (!amdgpu_sriov_vf(adev) && smu_baco_is_support(smu))
+ return AMD_RESET_METHOD_BACO;
+ else
+ return AMD_RESET_METHOD_MODE1;
+}
+
static int nv_asic_reset(struct amdgpu_device *adev)
{
@@ -303,10 +350,20 @@ static int nv_asic_reset(struct amdgpu_device *adev)
int ret = 0;
struct smu_context *smu = &adev->smu;
- if (smu_baco_is_support(smu))
- ret = smu_baco_reset(smu);
- else
+ if (nv_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
+ if (!adev->in_suspend)
+ amdgpu_inc_vram_lost(adev);
+ ret = smu_baco_enter(smu);
+ if (ret)
+ return ret;
+ ret = smu_baco_exit(smu);
+ if (ret)
+ return ret;
+ } else {
+ if (!adev->in_suspend)
+ amdgpu_inc_vram_lost(adev);
ret = nv_asic_mode1_reset(adev);
+ }
return ret;
}
@@ -350,8 +407,8 @@ static void nv_program_aspm(struct amdgpu_device *adev)
static void nv_enable_doorbell_aperture(struct amdgpu_device *adev,
bool enable)
{
- adev->nbio_funcs->enable_doorbell_aperture(adev, enable);
- adev->nbio_funcs->enable_doorbell_selfring_aperture(adev, enable);
+ adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
+ adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
}
static const struct amdgpu_ip_block_version nv_common_ip_block =
@@ -363,29 +420,65 @@ static const struct amdgpu_ip_block_version nv_common_ip_block =
.funcs = &nv_common_ip_funcs,
};
-int nv_set_ip_blocks(struct amdgpu_device *adev)
+static int nv_reg_base_init(struct amdgpu_device *adev)
{
- /* Set IP register base before any HW register access */
+ int r;
+
+ if (amdgpu_discovery) {
+ r = amdgpu_discovery_reg_base_init(adev);
+ if (r) {
+ DRM_WARN("failed to init reg base from ip discovery table, "
+ "fallback to legacy init method\n");
+ goto legacy_init;
+ }
+
+ return 0;
+ }
+
+legacy_init:
switch (adev->asic_type) {
case CHIP_NAVI10:
navi10_reg_base_init(adev);
break;
+ case CHIP_NAVI14:
+ navi14_reg_base_init(adev);
+ break;
+ case CHIP_NAVI12:
+ navi12_reg_base_init(adev);
+ break;
default:
return -EINVAL;
}
- adev->nbio_funcs = &nbio_v2_3_funcs;
+ return 0;
+}
- adev->nbio_funcs->detect_hw_virt(adev);
+int nv_set_ip_blocks(struct amdgpu_device *adev)
+{
+ int r;
+
+ /* Set IP register base before any HW register access */
+ r = nv_reg_base_init(adev);
+ if (r)
+ return r;
+
+ adev->nbio.funcs = &nbio_v2_3_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
+
+ adev->nbio.funcs->detect_hw_virt(adev);
+
+ if (amdgpu_sriov_vf(adev))
+ adev->virt.ops = &xgpu_nv_virt_ops;
switch (adev->asic_type) {
case CHIP_NAVI10:
+ case CHIP_NAVI14:
amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
- is_support_sw_smu(adev))
+ !amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
@@ -396,12 +489,35 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
- is_support_sw_smu(adev))
+ !amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
if (adev->enable_mes)
amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
break;
+ case CHIP_NAVI12:
+ amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
+ !amdgpu_sriov_vf(adev))
+ amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
+ if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+ else if (amdgpu_device_has_dc_support(adev))
+ amdgpu_device_ip_block_add(adev, &dm_ip_block);
+#endif
+ amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
+ !amdgpu_sriov_vf(adev))
+ amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
+ break;
default:
return -EINVAL;
}
@@ -411,12 +527,12 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
static uint32_t nv_get_rev_id(struct amdgpu_device *adev)
{
- return adev->nbio_funcs->get_rev_id(adev);
+ return adev->nbio.funcs->get_rev_id(adev);
}
static void nv_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
{
- adev->nbio_funcs->hdp_flush(adev, ring);
+ adev->nbio.funcs->hdp_flush(adev, ring);
}
static void nv_invalidate_hdp(struct amdgpu_device *adev,
@@ -461,6 +577,16 @@ static bool nv_need_reset_on_init(struct amdgpu_device *adev)
return false;
}
+static uint64_t nv_get_pcie_replay_count(struct amdgpu_device *adev)
+{
+
+ /* TODO
+ * dummy implement for pcie_replay_count sysfs interface
+ * */
+
+ return 0;
+}
+
static void nv_init_doorbell_index(struct amdgpu_device *adev)
{
adev->doorbell_index.kiq = AMDGPU_NAVI10_DOORBELL_KIQ;
@@ -496,6 +622,7 @@ static const struct amdgpu_asic_funcs nv_asic_funcs =
.read_bios_from_rom = &nv_read_bios_from_rom,
.read_register = &nv_read_register,
.reset = &nv_asic_reset,
+ .reset_method = &nv_asic_reset_method,
.set_vga_state = &nv_vga_set_state,
.get_xclk = &nv_get_xclk,
.set_uvd_clocks = &nv_set_uvd_clocks,
@@ -507,13 +634,17 @@ static const struct amdgpu_asic_funcs nv_asic_funcs =
.need_full_reset = &nv_need_full_reset,
.get_pcie_usage = &nv_get_pcie_usage,
.need_reset_on_init = &nv_need_reset_on_init,
+ .get_pcie_replay_count = &nv_get_pcie_replay_count,
+ .supports_baco = &nv_asic_supports_baco,
};
static int nv_common_early_init(void *handle)
{
- bool psp_enabled = false;
+#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
+ adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
adev->smc_rreg = NULL;
adev->smc_wreg = NULL;
adev->pcie_rreg = &nv_pcie_rreg;
@@ -528,10 +659,6 @@ static int nv_common_early_init(void *handle)
adev->asic_funcs = &nv_asic_funcs;
- if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP) &&
- (amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_PSP)))
- psp_enabled = true;
-
adev->rev_id = nv_get_rev_id(adev);
adev->external_rev_id = 0xff;
switch (adev->asic_type) {
@@ -548,29 +675,95 @@ static int nv_common_early_init(void *handle)
AMD_CG_SUPPORT_ATHUB_MGCG |
AMD_CG_SUPPORT_ATHUB_LS |
AMD_CG_SUPPORT_VCN_MGCG |
+ AMD_CG_SUPPORT_JPEG_MGCG |
AMD_CG_SUPPORT_BIF_MGCG |
AMD_CG_SUPPORT_BIF_LS;
adev->pg_flags = AMD_PG_SUPPORT_VCN |
AMD_PG_SUPPORT_VCN_DPG |
- AMD_PG_SUPPORT_MMHUB |
+ AMD_PG_SUPPORT_JPEG |
AMD_PG_SUPPORT_ATHUB;
adev->external_rev_id = adev->rev_id + 0x1;
break;
+ case CHIP_NAVI14:
+ adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_GFX_CGCG |
+ AMD_CG_SUPPORT_IH_CG |
+ AMD_CG_SUPPORT_HDP_MGCG |
+ AMD_CG_SUPPORT_HDP_LS |
+ AMD_CG_SUPPORT_SDMA_MGCG |
+ AMD_CG_SUPPORT_SDMA_LS |
+ AMD_CG_SUPPORT_MC_MGCG |
+ AMD_CG_SUPPORT_MC_LS |
+ AMD_CG_SUPPORT_ATHUB_MGCG |
+ AMD_CG_SUPPORT_ATHUB_LS |
+ AMD_CG_SUPPORT_VCN_MGCG |
+ AMD_CG_SUPPORT_JPEG_MGCG |
+ AMD_CG_SUPPORT_BIF_MGCG |
+ AMD_CG_SUPPORT_BIF_LS;
+ adev->pg_flags = AMD_PG_SUPPORT_VCN |
+ AMD_PG_SUPPORT_JPEG |
+ AMD_PG_SUPPORT_VCN_DPG;
+ adev->external_rev_id = adev->rev_id + 20;
+ break;
+ case CHIP_NAVI12:
+ adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_GFX_MGLS |
+ AMD_CG_SUPPORT_GFX_CGCG |
+ AMD_CG_SUPPORT_GFX_CP_LS |
+ AMD_CG_SUPPORT_GFX_RLC_LS |
+ AMD_CG_SUPPORT_IH_CG |
+ AMD_CG_SUPPORT_HDP_MGCG |
+ AMD_CG_SUPPORT_HDP_LS |
+ AMD_CG_SUPPORT_SDMA_MGCG |
+ AMD_CG_SUPPORT_SDMA_LS |
+ AMD_CG_SUPPORT_MC_MGCG |
+ AMD_CG_SUPPORT_MC_LS |
+ AMD_CG_SUPPORT_ATHUB_MGCG |
+ AMD_CG_SUPPORT_ATHUB_LS |
+ AMD_CG_SUPPORT_VCN_MGCG |
+ AMD_CG_SUPPORT_JPEG_MGCG;
+ adev->pg_flags = AMD_PG_SUPPORT_VCN |
+ AMD_PG_SUPPORT_VCN_DPG |
+ AMD_PG_SUPPORT_JPEG |
+ AMD_PG_SUPPORT_ATHUB;
+ /* guest vm gets 0xffffffff when reading RCC_DEV0_EPF0_STRAP0,
+ * as a consequence, the rev_id and external_rev_id are wrong.
+ * workaround it by hardcoding rev_id to 0 (default value).
+ */
+ if (amdgpu_sriov_vf(adev))
+ adev->rev_id = 0;
+ adev->external_rev_id = adev->rev_id + 0xa;
+ break;
default:
/* FIXME: not supported yet */
return -EINVAL;
}
+ if (amdgpu_sriov_vf(adev)) {
+ amdgpu_virt_init_setting(adev);
+ xgpu_nv_mailbox_set_irq_funcs(adev);
+ }
+
return 0;
}
static int nv_common_late_init(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (amdgpu_sriov_vf(adev))
+ xgpu_nv_mailbox_get_irq(adev);
+
return 0;
}
static int nv_common_sw_init(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (amdgpu_sriov_vf(adev))
+ xgpu_nv_mailbox_add_irq_id(adev);
+
return 0;
}
@@ -588,7 +781,13 @@ static int nv_common_hw_init(void *handle)
/* enable aspm */
nv_program_aspm(adev);
/* setup nbio registers */
- adev->nbio_funcs->init_registers(adev);
+ adev->nbio.funcs->init_registers(adev);
+ /* remap HDP registers to a hole in mmio space,
+ * for the purpose of expose those registers
+ * to process space
+ */
+ if (adev->nbio.funcs->remap_hdp_registers)
+ adev->nbio.funcs->remap_hdp_registers(adev);
/* enable the doorbell aperture */
nv_enable_doorbell_aperture(adev, true);
@@ -748,14 +947,16 @@ static int nv_common_set_clockgating_state(void *handle,
switch (adev->asic_type) {
case CHIP_NAVI10:
- adev->nbio_funcs->update_medium_grain_clock_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
- adev->nbio_funcs->update_medium_grain_light_sleep(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ case CHIP_NAVI14:
+ case CHIP_NAVI12:
+ adev->nbio.funcs->update_medium_grain_clock_gating(adev,
+ state == AMD_CG_STATE_GATE);
+ adev->nbio.funcs->update_medium_grain_light_sleep(adev,
+ state == AMD_CG_STATE_GATE);
nv_update_hdp_mem_power_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
nv_update_hdp_clock_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
break;
default:
break;
@@ -778,7 +979,7 @@ static void nv_common_get_clockgating_state(void *handle, u32 *flags)
if (amdgpu_sriov_vf(adev))
*flags = 0;
- adev->nbio_funcs->get_clockgating_state(adev, flags);
+ adev->nbio.funcs->get_clockgating_state(adev, flags);
/* AMD_CG_SUPPORT_HDP_MGCG */
tmp = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.h b/drivers/gpu/drm/amd/amdgpu/nv.h
index 639c54933cc5..82e6cb432f3d 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.h
+++ b/drivers/gpu/drm/amd/amdgpu/nv.h
@@ -30,4 +30,6 @@ void nv_grbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid);
int nv_set_ip_blocks(struct amdgpu_device *adev);
int navi10_reg_base_init(struct amdgpu_device *adev);
+int navi14_reg_base_init(struct amdgpu_device *adev);
+int navi12_reg_base_init(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
index 5080a73a95a5..36b65797434e 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
@@ -233,8 +233,16 @@ enum psp_gfx_fw_type {
GFX_FW_TYPE_RLCP_CAM = 46, /* RLCP CAM NV */
GFX_FW_TYPE_RLC_SPP_CAM_EXT = 47, /* RLC SPP CAM EXT NV */
GFX_FW_TYPE_RLX6_DRAM_BOOT = 48, /* RLX6 DRAM BOOT NV */
- GFX_FW_TYPE_VCN0_RAM = 49, /* VCN_RAM NV */
- GFX_FW_TYPE_VCN1_RAM = 50, /* VCN_RAM NV */
+ GFX_FW_TYPE_VCN0_RAM = 49, /* VCN_RAM NV + RN */
+ GFX_FW_TYPE_VCN1_RAM = 50, /* VCN_RAM NV + RN */
+ GFX_FW_TYPE_DMUB = 51, /* DMUB RN */
+ GFX_FW_TYPE_SDMA2 = 52, /* SDMA2 MI */
+ GFX_FW_TYPE_SDMA3 = 53, /* SDMA3 MI */
+ GFX_FW_TYPE_SDMA4 = 54, /* SDMA4 MI */
+ GFX_FW_TYPE_SDMA5 = 55, /* SDMA5 MI */
+ GFX_FW_TYPE_SDMA6 = 56, /* SDMA6 MI */
+ GFX_FW_TYPE_SDMA7 = 57, /* SDMA7 MI */
+ GFX_FW_TYPE_VCN1 = 58, /* VCN1 MI */
GFX_FW_TYPE_MAX
};
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
index ce1ea31feee0..7539104175e8 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
@@ -40,6 +40,9 @@
MODULE_FIRMWARE("amdgpu/raven_asd.bin");
MODULE_FIRMWARE("amdgpu/picasso_asd.bin");
MODULE_FIRMWARE("amdgpu/raven2_asd.bin");
+MODULE_FIRMWARE("amdgpu/picasso_ta.bin");
+MODULE_FIRMWARE("amdgpu/raven2_ta.bin");
+MODULE_FIRMWARE("amdgpu/raven_ta.bin");
static int psp_v10_0_init_microcode(struct psp_context *psp)
{
@@ -48,7 +51,7 @@ static int psp_v10_0_init_microcode(struct psp_context *psp)
char fw_name[30];
int err = 0;
const struct psp_firmware_header_v1_0 *hdr;
-
+ const struct ta_firmware_header_v1_0 *ta_hdr;
DRM_DEBUG("\n");
switch (adev->asic_type) {
@@ -79,7 +82,45 @@ static int psp_v10_0_init_microcode(struct psp_context *psp)
adev->psp.asd_start_addr = (uint8_t *)hdr +
le32_to_cpu(hdr->header.ucode_array_offset_bytes);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
+ err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
+ if (err) {
+ release_firmware(adev->psp.ta_fw);
+ adev->psp.ta_fw = NULL;
+ dev_info(adev->dev,
+ "psp v10.0: Failed to load firmware \"%s\"\n",
+ fw_name);
+ } else {
+ err = amdgpu_ucode_validate(adev->psp.ta_fw);
+ if (err)
+ goto out2;
+
+ ta_hdr = (const struct ta_firmware_header_v1_0 *)
+ adev->psp.ta_fw->data;
+ adev->psp.ta_hdcp_ucode_version =
+ le32_to_cpu(ta_hdr->ta_hdcp_ucode_version);
+ adev->psp.ta_hdcp_ucode_size =
+ le32_to_cpu(ta_hdr->ta_hdcp_size_bytes);
+ adev->psp.ta_hdcp_start_addr =
+ (uint8_t *)ta_hdr +
+ le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
+
+ adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
+
+ adev->psp.ta_dtm_ucode_version =
+ le32_to_cpu(ta_hdr->ta_dtm_ucode_version);
+ adev->psp.ta_dtm_ucode_size =
+ le32_to_cpu(ta_hdr->ta_dtm_size_bytes);
+ adev->psp.ta_dtm_start_addr =
+ (uint8_t *)adev->psp.ta_hdcp_start_addr +
+ le32_to_cpu(ta_hdr->ta_dtm_offset_bytes);
+ }
+
return 0;
+
+out2:
+ release_firmware(adev->psp.ta_fw);
+ adev->psp.ta_fw = NULL;
out:
if (err) {
dev_err(adev->dev,
@@ -189,54 +230,6 @@ static int psp_v10_0_ring_destroy(struct psp_context *psp,
return ret;
}
-static int psp_v10_0_cmd_submit(struct psp_context *psp,
- struct amdgpu_firmware_info *ucode,
- uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr,
- int index)
-{
- unsigned int psp_write_ptr_reg = 0;
- struct psp_gfx_rb_frame * write_frame = psp->km_ring.ring_mem;
- struct psp_ring *ring = &psp->km_ring;
- struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem;
- struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start +
- ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1;
- struct amdgpu_device *adev = psp->adev;
- uint32_t ring_size_dw = ring->ring_size / 4;
- uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
-
- /* KM (GPCOM) prepare write pointer */
- psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
-
- /* Update KM RB frame pointer to new frame */
- if ((psp_write_ptr_reg % ring_size_dw) == 0)
- write_frame = ring_buffer_start;
- else
- write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw);
- /* Check invalid write_frame ptr address */
- if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) {
- DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n",
- ring_buffer_start, ring_buffer_end, write_frame);
- DRM_ERROR("write_frame is pointing to address out of bounds\n");
- return -EINVAL;
- }
-
- /* Initialize KM RB frame */
- memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame));
-
- /* Update KM RB frame */
- write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr);
- write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr);
- write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
- write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
- write_frame->fence_value = index;
-
- /* Update the write Pointer in DWORDs */
- psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, psp_write_ptr_reg);
-
- return 0;
-}
-
static int
psp_v10_0_sram_map(struct amdgpu_device *adev,
unsigned int *sram_offset, unsigned int *sram_addr_reg_offset,
@@ -366,15 +359,30 @@ static int psp_v10_0_mode1_reset(struct psp_context *psp)
return -EINVAL;
}
+static uint32_t psp_v10_0_ring_get_wptr(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
+
+ return RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
+}
+
+static void psp_v10_0_ring_set_wptr(struct psp_context *psp, uint32_t value)
+{
+ struct amdgpu_device *adev = psp->adev;
+
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value);
+}
+
static const struct psp_funcs psp_v10_0_funcs = {
.init_microcode = psp_v10_0_init_microcode,
.ring_init = psp_v10_0_ring_init,
.ring_create = psp_v10_0_ring_create,
.ring_stop = psp_v10_0_ring_stop,
.ring_destroy = psp_v10_0_ring_destroy,
- .cmd_submit = psp_v10_0_cmd_submit,
.compare_sram_data = psp_v10_0_compare_sram_data,
.mode1_reset = psp_v10_0_mode1_reset,
+ .ring_get_wptr = psp_v10_0_ring_get_wptr,
+ .ring_set_wptr = psp_v10_0_ring_set_wptr,
};
void psp_v10_0_set_psp_funcs(struct psp_context *psp)
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index 41b72588adcf..0829188c1a5c 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -22,6 +22,7 @@
#include <linux/firmware.h>
#include <linux/module.h>
+#include <linux/vmalloc.h>
#include "amdgpu.h"
#include "amdgpu_psp.h"
@@ -43,6 +44,16 @@ MODULE_FIRMWARE("amdgpu/vega20_asd.bin");
MODULE_FIRMWARE("amdgpu/vega20_ta.bin");
MODULE_FIRMWARE("amdgpu/navi10_sos.bin");
MODULE_FIRMWARE("amdgpu/navi10_asd.bin");
+MODULE_FIRMWARE("amdgpu/navi10_ta.bin");
+MODULE_FIRMWARE("amdgpu/navi14_sos.bin");
+MODULE_FIRMWARE("amdgpu/navi14_asd.bin");
+MODULE_FIRMWARE("amdgpu/navi14_ta.bin");
+MODULE_FIRMWARE("amdgpu/navi12_sos.bin");
+MODULE_FIRMWARE("amdgpu/navi12_asd.bin");
+MODULE_FIRMWARE("amdgpu/navi12_ta.bin");
+MODULE_FIRMWARE("amdgpu/arcturus_sos.bin");
+MODULE_FIRMWARE("amdgpu/arcturus_asd.bin");
+MODULE_FIRMWARE("amdgpu/arcturus_ta.bin");
/* address block */
#define smnMP1_FIRMWARE_FLAGS 0x3010024
@@ -51,6 +62,8 @@ MODULE_FIRMWARE("amdgpu/navi10_asd.bin");
#define mmRLC_GPM_UCODE_DATA_NV10 0x5b62
#define mmSDMA0_UCODE_ADDR_NV10 0x5880
#define mmSDMA0_UCODE_DATA_NV10 0x5881
+/* memory training timeout define */
+#define MEM_TRAIN_SEND_MSG_TIMEOUT_US 3000000
static int psp_v11_0_init_microcode(struct psp_context *psp)
{
@@ -60,6 +73,7 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
int err = 0;
const struct psp_firmware_header_v1_0 *sos_hdr;
const struct psp_firmware_header_v1_1 *sos_hdr_v1_1;
+ const struct psp_firmware_header_v1_2 *sos_hdr_v1_2;
const struct psp_firmware_header_v1_0 *asd_hdr;
const struct ta_firmware_header_v1_0 *ta_hdr;
@@ -72,6 +86,15 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
case CHIP_NAVI10:
chip_name = "navi10";
break;
+ case CHIP_NAVI14:
+ chip_name = "navi14";
+ break;
+ case CHIP_NAVI12:
+ chip_name = "navi12";
+ break;
+ case CHIP_ARCTURUS:
+ chip_name = "arcturus";
+ break;
default:
BUG();
}
@@ -107,6 +130,12 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr +
le32_to_cpu(sos_hdr_v1_1->kdb_offset_bytes);
}
+ if (sos_hdr->header.header_version_minor == 2) {
+ sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data;
+ adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_2->kdb_size_bytes);
+ adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr +
+ le32_to_cpu(sos_hdr_v1_2->kdb_offset_bytes);
+ }
break;
default:
dev_err(adev->dev,
@@ -133,6 +162,7 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
switch (adev->asic_type) {
case CHIP_VEGA20:
+ case CHIP_ARCTURUS:
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
if (err) {
@@ -158,6 +188,33 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
}
break;
case CHIP_NAVI10:
+ case CHIP_NAVI14:
+ case CHIP_NAVI12:
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
+ err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
+ if (err) {
+ release_firmware(adev->psp.ta_fw);
+ adev->psp.ta_fw = NULL;
+ dev_info(adev->dev,
+ "psp v11.0: Failed to load firmware \"%s\"\n", fw_name);
+ } else {
+ err = amdgpu_ucode_validate(adev->psp.ta_fw);
+ if (err)
+ goto out2;
+
+ ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data;
+ adev->psp.ta_hdcp_ucode_version = le32_to_cpu(ta_hdr->ta_hdcp_ucode_version);
+ adev->psp.ta_hdcp_ucode_size = le32_to_cpu(ta_hdr->ta_hdcp_size_bytes);
+ adev->psp.ta_hdcp_start_addr = (uint8_t *)ta_hdr +
+ le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
+
+ adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
+
+ adev->psp.ta_dtm_ucode_version = le32_to_cpu(ta_hdr->ta_dtm_ucode_version);
+ adev->psp.ta_dtm_ucode_size = le32_to_cpu(ta_hdr->ta_dtm_size_bytes);
+ adev->psp.ta_dtm_start_addr = (uint8_t *)adev->psp.ta_hdcp_start_addr +
+ le32_to_cpu(ta_hdr->ta_dtm_offset_bytes);
+ }
break;
default:
BUG();
@@ -180,26 +237,55 @@ out:
return err;
}
+int psp_v11_0_wait_for_bootloader(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
+
+ int ret;
+ int retry_loop;
+
+ for (retry_loop = 0; retry_loop < 10; retry_loop++) {
+ /* Wait for bootloader to signify that is
+ ready having bit 31 of C2PMSG_35 set to 1 */
+ ret = psp_wait_for(psp,
+ SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
+ 0x80000000,
+ 0x80000000,
+ false);
+
+ if (ret == 0)
+ return 0;
+ }
+
+ return ret;
+}
+
+static bool psp_v11_0_is_sos_alive(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
+ uint32_t sol_reg;
+
+ sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
+
+ return sol_reg != 0x0;
+}
+
static int psp_v11_0_bootloader_load_kdb(struct psp_context *psp)
{
int ret;
uint32_t psp_gfxdrv_command_reg = 0;
struct amdgpu_device *adev = psp->adev;
- uint32_t sol_reg;
/* Check tOS sign of life register to confirm sys driver and sOS
* are already been loaded.
*/
- sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
- if (sol_reg) {
+ if (psp_v11_0_is_sos_alive(psp)) {
psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58);
dev_info(adev->dev, "sos fw version = 0x%x.\n", psp->sos_fw_version);
return 0;
}
- /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ ret = psp_v11_0_wait_for_bootloader(psp);
if (ret)
return ret;
@@ -208,16 +294,14 @@ static int psp_v11_0_bootloader_load_kdb(struct psp_context *psp)
/* Copy PSP KDB binary to memory */
memcpy(psp->fw_pri_buf, psp->kdb_start_addr, psp->kdb_bin_size);
- /* Provide the sys driver to bootloader */
+ /* Provide the PSP KDB to bootloader */
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
(uint32_t)(psp->fw_pri_mc_addr >> 20));
psp_gfxdrv_command_reg = PSP_BL__LOAD_KEY_DATABASE;
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35,
psp_gfxdrv_command_reg);
- /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1*/
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ ret = psp_v11_0_wait_for_bootloader(psp);
return ret;
}
@@ -227,21 +311,17 @@ static int psp_v11_0_bootloader_load_sysdrv(struct psp_context *psp)
int ret;
uint32_t psp_gfxdrv_command_reg = 0;
struct amdgpu_device *adev = psp->adev;
- uint32_t sol_reg;
/* Check sOS sign of life register to confirm sys driver and sOS
* are already been loaded.
*/
- sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
- if (sol_reg) {
+ if (psp_v11_0_is_sos_alive(psp)) {
psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58);
dev_info(adev->dev, "sos fw version = 0x%x.\n", psp->sos_fw_version);
return 0;
}
- /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ ret = psp_v11_0_wait_for_bootloader(psp);
if (ret)
return ret;
@@ -260,8 +340,7 @@ static int psp_v11_0_bootloader_load_sysdrv(struct psp_context *psp)
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ ret = psp_v11_0_wait_for_bootloader(psp);
return ret;
}
@@ -271,18 +350,14 @@ static int psp_v11_0_bootloader_load_sos(struct psp_context *psp)
int ret;
unsigned int psp_gfxdrv_command_reg = 0;
struct amdgpu_device *adev = psp->adev;
- uint32_t sol_reg;
/* Check sOS sign of life register to confirm sys driver and sOS
* are already been loaded.
*/
- sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
- if (sol_reg)
+ if (psp_v11_0_is_sos_alive(psp))
return 0;
- /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ ret = psp_v11_0_wait_for_bootloader(psp);
if (ret)
return ret;
@@ -373,6 +448,34 @@ static bool psp_v11_0_support_vmr_ring(struct psp_context *psp)
return false;
}
+static int psp_v11_0_ring_stop(struct psp_context *psp,
+ enum psp_ring_type ring_type)
+{
+ int ret = 0;
+ struct amdgpu_device *adev = psp->adev;
+
+ /* Write the ring destroy command*/
+ if (psp_v11_0_support_vmr_ring(psp))
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
+ GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING);
+ else
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64,
+ GFX_CTRL_CMD_ID_DESTROY_RINGS);
+
+ /* there might be handshake issue with hardware which needs delay */
+ mdelay(20);
+
+ /* Wait for response flag (bit 31) */
+ if (psp_v11_0_support_vmr_ring(psp))
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+ 0x80000000, 0x80000000, false);
+ else
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x80000000, false);
+
+ return ret;
+}
+
static int psp_v11_0_ring_create(struct psp_context *psp,
enum psp_ring_type ring_type)
{
@@ -382,6 +485,12 @@ static int psp_v11_0_ring_create(struct psp_context *psp,
struct amdgpu_device *adev = psp->adev;
if (psp_v11_0_support_vmr_ring(psp)) {
+ ret = psp_v11_0_ring_stop(psp, ring_type);
+ if (ret) {
+ DRM_ERROR("psp_v11_0_ring_stop_sriov failed!\n");
+ return ret;
+ }
+
/* Write low address of the ring to C2PMSG_102 */
psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_ring_reg);
@@ -401,6 +510,14 @@ static int psp_v11_0_ring_create(struct psp_context *psp,
0x80000000, 0x8000FFFF, false);
} else {
+ /* Wait for sOS ready for ring creation */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x80000000, false);
+ if (ret) {
+ DRM_ERROR("Failed to wait for sOS ready for ring creation\n");
+ return ret;
+ }
+
/* Write low address of the ring to C2PMSG_69 */
psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, psp_ring_reg);
@@ -426,33 +543,6 @@ static int psp_v11_0_ring_create(struct psp_context *psp,
return ret;
}
-static int psp_v11_0_ring_stop(struct psp_context *psp,
- enum psp_ring_type ring_type)
-{
- int ret = 0;
- struct amdgpu_device *adev = psp->adev;
-
- /* Write the ring destroy command*/
- if (psp_v11_0_support_vmr_ring(psp))
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
- GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING);
- else
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64,
- GFX_CTRL_CMD_ID_DESTROY_RINGS);
-
- /* there might be handshake issue with hardware which needs delay */
- mdelay(20);
-
- /* Wait for response flag (bit 31) */
- if (psp_v11_0_support_vmr_ring(psp))
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
- 0x80000000, 0x80000000, false);
- else
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
-
- return ret;
-}
static int psp_v11_0_ring_destroy(struct psp_context *psp,
enum psp_ring_type ring_type)
@@ -472,63 +562,6 @@ static int psp_v11_0_ring_destroy(struct psp_context *psp,
return ret;
}
-static int psp_v11_0_cmd_submit(struct psp_context *psp,
- struct amdgpu_firmware_info *ucode,
- uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr,
- int index)
-{
- unsigned int psp_write_ptr_reg = 0;
- struct psp_gfx_rb_frame *write_frame = psp->km_ring.ring_mem;
- struct psp_ring *ring = &psp->km_ring;
- struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem;
- struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start +
- ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1;
- struct amdgpu_device *adev = psp->adev;
- uint32_t ring_size_dw = ring->ring_size / 4;
- uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
-
- /* KM (GPCOM) prepare write pointer */
- if (psp_v11_0_support_vmr_ring(psp))
- psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
- else
- psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
-
- /* Update KM RB frame pointer to new frame */
- /* write_frame ptr increments by size of rb_frame in bytes */
- /* psp_write_ptr_reg increments by size of rb_frame in DWORDs */
- if ((psp_write_ptr_reg % ring_size_dw) == 0)
- write_frame = ring_buffer_start;
- else
- write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw);
- /* Check invalid write_frame ptr address */
- if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) {
- DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n",
- ring_buffer_start, ring_buffer_end, write_frame);
- DRM_ERROR("write_frame is pointing to address out of bounds\n");
- return -EINVAL;
- }
-
- /* Initialize KM RB frame */
- memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame));
-
- /* Update KM RB frame */
- write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr);
- write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr);
- write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
- write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
- write_frame->fence_value = index;
-
- /* Update the write Pointer in DWORDs */
- psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
- if (psp_v11_0_support_vmr_ring(psp)) {
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_write_ptr_reg);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD);
- } else
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, psp_write_ptr_reg);
-
- return 0;
-}
-
static int
psp_v11_0_sram_map(struct amdgpu_device *adev,
unsigned int *sram_offset, unsigned int *sram_addr_reg_offset,
@@ -865,6 +898,216 @@ static int psp_v11_0_rlc_autoload_start(struct psp_context *psp)
return psp_rlc_autoload_start(psp);
}
+static int psp_v11_0_memory_training_send_msg(struct psp_context *psp, int msg)
+{
+ int ret;
+ int i;
+ uint32_t data_32;
+ int max_wait;
+ struct amdgpu_device *adev = psp->adev;
+
+ data_32 = (psp->mem_train_ctx.c2p_train_data_offset >> 20);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, data_32);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, msg);
+
+ max_wait = MEM_TRAIN_SEND_MSG_TIMEOUT_US / adev->usec_timeout;
+ for (i = 0; i < max_wait; i++) {
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, false);
+ if (ret == 0)
+ break;
+ }
+ if (i < max_wait)
+ ret = 0;
+ else
+ ret = -ETIME;
+
+ DRM_DEBUG("training %s %s, cost %d @ %d ms\n",
+ (msg == PSP_BL__DRAM_SHORT_TRAIN) ? "short" : "long",
+ (ret == 0) ? "succeed" : "failed",
+ i, adev->usec_timeout/1000);
+ return ret;
+}
+
+static void psp_v11_0_memory_training_fini(struct psp_context *psp)
+{
+ struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
+
+ ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
+ kfree(ctx->sys_cache);
+ ctx->sys_cache = NULL;
+}
+
+static int psp_v11_0_memory_training_init(struct psp_context *psp)
+{
+ int ret;
+ struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
+
+ if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) {
+ DRM_DEBUG("memory training is not supported!\n");
+ return 0;
+ }
+
+ ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL);
+ if (ctx->sys_cache == NULL) {
+ DRM_ERROR("alloc mem_train_ctx.sys_cache failed!\n");
+ ret = -ENOMEM;
+ goto Err_out;
+ }
+
+ DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
+ ctx->train_data_size,
+ ctx->p2c_train_data_offset,
+ ctx->c2p_train_data_offset);
+ ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS;
+ return 0;
+
+Err_out:
+ psp_v11_0_memory_training_fini(psp);
+ return ret;
+}
+
+/*
+ * save and restore proces
+ */
+static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops)
+{
+ struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
+ uint32_t *pcache = (uint32_t*)ctx->sys_cache;
+ struct amdgpu_device *adev = psp->adev;
+ uint32_t p2c_header[4];
+ uint32_t sz;
+ void *buf;
+ int ret;
+
+ if (ctx->init == PSP_MEM_TRAIN_NOT_SUPPORT) {
+ DRM_DEBUG("Memory training is not supported.\n");
+ return 0;
+ } else if (ctx->init != PSP_MEM_TRAIN_INIT_SUCCESS) {
+ DRM_ERROR("Memory training initialization failure.\n");
+ return -EINVAL;
+ }
+
+ if (psp_v11_0_is_sos_alive(psp)) {
+ DRM_DEBUG("SOS is alive, skip memory training.\n");
+ return 0;
+ }
+
+ amdgpu_device_vram_access(adev, ctx->p2c_train_data_offset, p2c_header, sizeof(p2c_header), false);
+ DRM_DEBUG("sys_cache[%08x,%08x,%08x,%08x] p2c_header[%08x,%08x,%08x,%08x]\n",
+ pcache[0], pcache[1], pcache[2], pcache[3],
+ p2c_header[0], p2c_header[1], p2c_header[2], p2c_header[3]);
+
+ if (ops & PSP_MEM_TRAIN_SEND_SHORT_MSG) {
+ DRM_DEBUG("Short training depends on restore.\n");
+ ops |= PSP_MEM_TRAIN_RESTORE;
+ }
+
+ if ((ops & PSP_MEM_TRAIN_RESTORE) &&
+ pcache[0] != MEM_TRAIN_SYSTEM_SIGNATURE) {
+ DRM_DEBUG("sys_cache[0] is invalid, restore depends on save.\n");
+ ops |= PSP_MEM_TRAIN_SAVE;
+ }
+
+ if (p2c_header[0] == MEM_TRAIN_SYSTEM_SIGNATURE &&
+ !(pcache[0] == MEM_TRAIN_SYSTEM_SIGNATURE &&
+ pcache[3] == p2c_header[3])) {
+ DRM_DEBUG("sys_cache is invalid or out-of-date, need save training data to sys_cache.\n");
+ ops |= PSP_MEM_TRAIN_SAVE;
+ }
+
+ if ((ops & PSP_MEM_TRAIN_SAVE) &&
+ p2c_header[0] != MEM_TRAIN_SYSTEM_SIGNATURE) {
+ DRM_DEBUG("p2c_header[0] is invalid, save depends on long training.\n");
+ ops |= PSP_MEM_TRAIN_SEND_LONG_MSG;
+ }
+
+ if (ops & PSP_MEM_TRAIN_SEND_LONG_MSG) {
+ ops &= ~PSP_MEM_TRAIN_SEND_SHORT_MSG;
+ ops |= PSP_MEM_TRAIN_SAVE;
+ }
+
+ DRM_DEBUG("Memory training ops:%x.\n", ops);
+
+ if (ops & PSP_MEM_TRAIN_SEND_LONG_MSG) {
+ /*
+ * Long traing will encroach certain mount of bottom VRAM,
+ * saving the content of this bottom VRAM to system memory
+ * before training, and restoring it after training to avoid
+ * VRAM corruption.
+ */
+ sz = GDDR6_MEM_TRAINING_ENCROACHED_SIZE;
+
+ if (adev->gmc.visible_vram_size < sz || !adev->mman.aper_base_kaddr) {
+ DRM_ERROR("visible_vram_size %llx or aper_base_kaddr %p is not initialized.\n",
+ adev->gmc.visible_vram_size,
+ adev->mman.aper_base_kaddr);
+ return -EINVAL;
+ }
+
+ buf = vmalloc(sz);
+ if (!buf) {
+ DRM_ERROR("failed to allocate system memory.\n");
+ return -ENOMEM;
+ }
+
+ memcpy_fromio(buf, adev->mman.aper_base_kaddr, sz);
+ ret = psp_v11_0_memory_training_send_msg(psp, PSP_BL__DRAM_LONG_TRAIN);
+ if (ret) {
+ DRM_ERROR("Send long training msg failed.\n");
+ vfree(buf);
+ return ret;
+ }
+
+ memcpy_toio(adev->mman.aper_base_kaddr, buf, sz);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
+ vfree(buf);
+ }
+
+ if (ops & PSP_MEM_TRAIN_SAVE) {
+ amdgpu_device_vram_access(psp->adev, ctx->p2c_train_data_offset, ctx->sys_cache, ctx->train_data_size, false);
+ }
+
+ if (ops & PSP_MEM_TRAIN_RESTORE) {
+ amdgpu_device_vram_access(psp->adev, ctx->c2p_train_data_offset, ctx->sys_cache, ctx->train_data_size, true);
+ }
+
+ if (ops & PSP_MEM_TRAIN_SEND_SHORT_MSG) {
+ ret = psp_v11_0_memory_training_send_msg(psp, (amdgpu_force_long_training > 0) ?
+ PSP_BL__DRAM_LONG_TRAIN : PSP_BL__DRAM_SHORT_TRAIN);
+ if (ret) {
+ DRM_ERROR("send training msg failed.\n");
+ return ret;
+ }
+ }
+ ctx->training_cnt++;
+ return 0;
+}
+
+static uint32_t psp_v11_0_ring_get_wptr(struct psp_context *psp)
+{
+ uint32_t data;
+ struct amdgpu_device *adev = psp->adev;
+
+ if (psp_v11_0_support_vmr_ring(psp))
+ data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
+ else
+ data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
+
+ return data;
+}
+
+static void psp_v11_0_ring_set_wptr(struct psp_context *psp, uint32_t value)
+{
+ struct amdgpu_device *adev = psp->adev;
+
+ if (psp_v11_0_support_vmr_ring(psp)) {
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, value);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD);
+ } else
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value);
+}
+
static const struct psp_funcs psp_v11_0_funcs = {
.init_microcode = psp_v11_0_init_microcode,
.bootloader_load_kdb = psp_v11_0_bootloader_load_kdb,
@@ -874,7 +1117,6 @@ static const struct psp_funcs psp_v11_0_funcs = {
.ring_create = psp_v11_0_ring_create,
.ring_stop = psp_v11_0_ring_stop,
.ring_destroy = psp_v11_0_ring_destroy,
- .cmd_submit = psp_v11_0_cmd_submit,
.compare_sram_data = psp_v11_0_compare_sram_data,
.mode1_reset = psp_v11_0_mode1_reset,
.xgmi_get_topology_info = psp_v11_0_xgmi_get_topology_info,
@@ -885,6 +1127,11 @@ static const struct psp_funcs psp_v11_0_funcs = {
.ras_trigger_error = psp_v11_0_ras_trigger_error,
.ras_cure_posion = psp_v11_0_ras_cure_posion,
.rlc_autoload_start = psp_v11_0_rlc_autoload_start,
+ .mem_training_init = psp_v11_0_memory_training_init,
+ .mem_training_fini = psp_v11_0_memory_training_fini,
+ .mem_training = psp_v11_0_memory_training,
+ .ring_get_wptr = psp_v11_0_ring_get_wptr,
+ .ring_set_wptr = psp_v11_0_ring_set_wptr,
};
void psp_v11_0_set_psp_funcs(struct psp_context *psp)
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
new file mode 100644
index 000000000000..58d8b6d732e8
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
@@ -0,0 +1,534 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include "amdgpu.h"
+#include "amdgpu_psp.h"
+#include "amdgpu_ucode.h"
+#include "soc15_common.h"
+#include "psp_v12_0.h"
+
+#include "mp/mp_12_0_0_offset.h"
+#include "mp/mp_12_0_0_sh_mask.h"
+#include "gc/gc_9_0_offset.h"
+#include "sdma0/sdma0_4_0_offset.h"
+#include "nbio/nbio_7_4_offset.h"
+
+#include "oss/osssys_4_0_offset.h"
+#include "oss/osssys_4_0_sh_mask.h"
+
+MODULE_FIRMWARE("amdgpu/renoir_asd.bin");
+/* address block */
+#define smnMP1_FIRMWARE_FLAGS 0x3010024
+
+static int psp_v12_0_init_microcode(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
+ const char *chip_name;
+ char fw_name[30];
+ int err = 0;
+ const struct psp_firmware_header_v1_0 *asd_hdr;
+
+ DRM_DEBUG("\n");
+
+ switch (adev->asic_type) {
+ case CHIP_RENOIR:
+ chip_name = "renoir";
+ break;
+ default:
+ BUG();
+ }
+
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name);
+ err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev);
+ if (err)
+ goto out1;
+
+ err = amdgpu_ucode_validate(adev->psp.asd_fw);
+ if (err)
+ goto out1;
+
+ asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
+ adev->psp.asd_fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
+ adev->psp.asd_feature_version = le32_to_cpu(asd_hdr->ucode_feature_version);
+ adev->psp.asd_ucode_size = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
+ adev->psp.asd_start_addr = (uint8_t *)asd_hdr +
+ le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
+
+ return 0;
+
+out1:
+ release_firmware(adev->psp.asd_fw);
+ adev->psp.asd_fw = NULL;
+
+ return err;
+}
+
+static int psp_v12_0_bootloader_load_sysdrv(struct psp_context *psp)
+{
+ int ret;
+ uint32_t psp_gfxdrv_command_reg = 0;
+ struct amdgpu_device *adev = psp->adev;
+ uint32_t sol_reg;
+
+ /* Check sOS sign of life register to confirm sys driver and sOS
+ * are already been loaded.
+ */
+ sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
+ if (sol_reg) {
+ psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58);
+ printk("sos fw version = 0x%x.\n", psp->sos_fw_version);
+ return 0;
+ }
+
+ /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, false);
+ if (ret)
+ return ret;
+
+ memset(psp->fw_pri_buf, 0, PSP_1_MEG);
+
+ /* Copy PSP System Driver binary to memory */
+ memcpy(psp->fw_pri_buf, psp->sys_start_addr, psp->sys_bin_size);
+
+ /* Provide the sys driver to bootloader */
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
+ (uint32_t)(psp->fw_pri_mc_addr >> 20));
+ psp_gfxdrv_command_reg = 1 << 16;
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35,
+ psp_gfxdrv_command_reg);
+
+ /* there might be handshake issue with hardware which needs delay */
+ mdelay(20);
+
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, false);
+
+ return ret;
+}
+
+static int psp_v12_0_bootloader_load_sos(struct psp_context *psp)
+{
+ int ret;
+ unsigned int psp_gfxdrv_command_reg = 0;
+ struct amdgpu_device *adev = psp->adev;
+ uint32_t sol_reg;
+
+ /* Check sOS sign of life register to confirm sys driver and sOS
+ * are already been loaded.
+ */
+ sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
+ if (sol_reg)
+ return 0;
+
+ /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, false);
+ if (ret)
+ return ret;
+
+ memset(psp->fw_pri_buf, 0, PSP_1_MEG);
+
+ /* Copy Secure OS binary to PSP memory */
+ memcpy(psp->fw_pri_buf, psp->sos_start_addr, psp->sos_bin_size);
+
+ /* Provide the PSP secure OS to bootloader */
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
+ (uint32_t)(psp->fw_pri_mc_addr >> 20));
+ psp_gfxdrv_command_reg = 2 << 16;
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35,
+ psp_gfxdrv_command_reg);
+
+ /* there might be handshake issue with hardware which needs delay */
+ mdelay(20);
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_81),
+ RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81),
+ 0, true);
+
+ return ret;
+}
+
+static void psp_v12_0_reroute_ih(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
+ uint32_t tmp;
+
+ /* Change IH ring for VMC */
+ tmp = REG_SET_FIELD(0, IH_CLIENT_CFG_DATA, CREDIT_RETURN_ADDR, 0x1244b);
+ tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, CLIENT_TYPE, 1);
+ tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
+
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, 3);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, tmp);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_GBR_IH_SET);
+
+ mdelay(20);
+ psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x8000FFFF, false);
+
+ /* Change IH ring for UMC */
+ tmp = REG_SET_FIELD(0, IH_CLIENT_CFG_DATA, CREDIT_RETURN_ADDR, 0x1216b);
+ tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
+
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, 4);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, tmp);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_GBR_IH_SET);
+
+ mdelay(20);
+ psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x8000FFFF, false);
+}
+
+static int psp_v12_0_ring_init(struct psp_context *psp,
+ enum psp_ring_type ring_type)
+{
+ int ret = 0;
+ struct psp_ring *ring;
+ struct amdgpu_device *adev = psp->adev;
+
+ psp_v12_0_reroute_ih(psp);
+
+ ring = &psp->km_ring;
+
+ ring->ring_type = ring_type;
+
+ /* allocate 4k Page of Local Frame Buffer memory for ring */
+ ring->ring_size = 0x1000;
+ ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->firmware.rbuf,
+ &ring->ring_mem_mc_addr,
+ (void **)&ring->ring_mem);
+ if (ret) {
+ ring->ring_size = 0;
+ return ret;
+ }
+
+ return 0;
+}
+
+static bool psp_v12_0_support_vmr_ring(struct psp_context *psp)
+{
+ if (amdgpu_sriov_vf(psp->adev) && psp->sos_fw_version > 0x80045)
+ return true;
+ return false;
+}
+
+static int psp_v12_0_ring_create(struct psp_context *psp,
+ enum psp_ring_type ring_type)
+{
+ int ret = 0;
+ unsigned int psp_ring_reg = 0;
+ struct psp_ring *ring = &psp->km_ring;
+ struct amdgpu_device *adev = psp->adev;
+
+ if (psp_v12_0_support_vmr_ring(psp)) {
+ /* Write low address of the ring to C2PMSG_102 */
+ psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_ring_reg);
+ /* Write high address of the ring to C2PMSG_103 */
+ psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_103, psp_ring_reg);
+
+ /* Write the ring initialization command to C2PMSG_101 */
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
+ GFX_CTRL_CMD_ID_INIT_GPCOM_RING);
+
+ /* there might be handshake issue with hardware which needs delay */
+ mdelay(20);
+
+ /* Wait for response flag (bit 31) in C2PMSG_101 */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+ 0x80000000, 0x8000FFFF, false);
+
+ } else {
+ /* Write low address of the ring to C2PMSG_69 */
+ psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, psp_ring_reg);
+ /* Write high address of the ring to C2PMSG_70 */
+ psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, psp_ring_reg);
+ /* Write size of ring to C2PMSG_71 */
+ psp_ring_reg = ring->ring_size;
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_71, psp_ring_reg);
+ /* Write the ring initialization command to C2PMSG_64 */
+ psp_ring_reg = ring_type;
+ psp_ring_reg = psp_ring_reg << 16;
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
+
+ /* there might be handshake issue with hardware which needs delay */
+ mdelay(20);
+
+ /* Wait for response flag (bit 31) in C2PMSG_64 */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x8000FFFF, false);
+ }
+
+ return ret;
+}
+
+static int psp_v12_0_ring_stop(struct psp_context *psp,
+ enum psp_ring_type ring_type)
+{
+ int ret = 0;
+ struct amdgpu_device *adev = psp->adev;
+
+ /* Write the ring destroy command*/
+ if (psp_v12_0_support_vmr_ring(psp))
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
+ GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING);
+ else
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64,
+ GFX_CTRL_CMD_ID_DESTROY_RINGS);
+
+ /* there might be handshake issue with hardware which needs delay */
+ mdelay(20);
+
+ /* Wait for response flag (bit 31) */
+ if (psp_v12_0_support_vmr_ring(psp))
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+ 0x80000000, 0x80000000, false);
+ else
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x80000000, false);
+
+ return ret;
+}
+
+static int psp_v12_0_ring_destroy(struct psp_context *psp,
+ enum psp_ring_type ring_type)
+{
+ int ret = 0;
+ struct psp_ring *ring = &psp->km_ring;
+ struct amdgpu_device *adev = psp->adev;
+
+ ret = psp_v12_0_ring_stop(psp, ring_type);
+ if (ret)
+ DRM_ERROR("Fail to stop psp ring\n");
+
+ amdgpu_bo_free_kernel(&adev->firmware.rbuf,
+ &ring->ring_mem_mc_addr,
+ (void **)&ring->ring_mem);
+
+ return ret;
+}
+
+static int
+psp_v12_0_sram_map(struct amdgpu_device *adev,
+ unsigned int *sram_offset, unsigned int *sram_addr_reg_offset,
+ unsigned int *sram_data_reg_offset,
+ enum AMDGPU_UCODE_ID ucode_id)
+{
+ int ret = 0;
+
+ switch (ucode_id) {
+/* TODO: needs to confirm */
+#if 0
+ case AMDGPU_UCODE_ID_SMC:
+ *sram_offset = 0;
+ *sram_addr_reg_offset = 0;
+ *sram_data_reg_offset = 0;
+ break;
+#endif
+
+ case AMDGPU_UCODE_ID_CP_CE:
+ *sram_offset = 0x0;
+ *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_ADDR);
+ *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_DATA);
+ break;
+
+ case AMDGPU_UCODE_ID_CP_PFP:
+ *sram_offset = 0x0;
+ *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_ADDR);
+ *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_DATA);
+ break;
+
+ case AMDGPU_UCODE_ID_CP_ME:
+ *sram_offset = 0x0;
+ *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_ADDR);
+ *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_DATA);
+ break;
+
+ case AMDGPU_UCODE_ID_CP_MEC1:
+ *sram_offset = 0x10000;
+ *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_ADDR);
+ *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_DATA);
+ break;
+
+ case AMDGPU_UCODE_ID_CP_MEC2:
+ *sram_offset = 0x10000;
+ *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_ADDR);
+ *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_DATA);
+ break;
+
+ case AMDGPU_UCODE_ID_RLC_G:
+ *sram_offset = 0x2000;
+ *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_ADDR);
+ *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_DATA);
+ break;
+
+ case AMDGPU_UCODE_ID_SDMA0:
+ *sram_offset = 0x0;
+ *sram_addr_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_ADDR);
+ *sram_data_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_DATA);
+ break;
+
+/* TODO: needs to confirm */
+#if 0
+ case AMDGPU_UCODE_ID_SDMA1:
+ *sram_offset = ;
+ *sram_addr_reg_offset = ;
+ break;
+
+ case AMDGPU_UCODE_ID_UVD:
+ *sram_offset = ;
+ *sram_addr_reg_offset = ;
+ break;
+
+ case AMDGPU_UCODE_ID_VCE:
+ *sram_offset = ;
+ *sram_addr_reg_offset = ;
+ break;
+#endif
+
+ case AMDGPU_UCODE_ID_MAXIMUM:
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static bool psp_v12_0_compare_sram_data(struct psp_context *psp,
+ struct amdgpu_firmware_info *ucode,
+ enum AMDGPU_UCODE_ID ucode_type)
+{
+ int err = 0;
+ unsigned int fw_sram_reg_val = 0;
+ unsigned int fw_sram_addr_reg_offset = 0;
+ unsigned int fw_sram_data_reg_offset = 0;
+ unsigned int ucode_size;
+ uint32_t *ucode_mem = NULL;
+ struct amdgpu_device *adev = psp->adev;
+
+ err = psp_v12_0_sram_map(adev, &fw_sram_reg_val, &fw_sram_addr_reg_offset,
+ &fw_sram_data_reg_offset, ucode_type);
+ if (err)
+ return false;
+
+ WREG32(fw_sram_addr_reg_offset, fw_sram_reg_val);
+
+ ucode_size = ucode->ucode_size;
+ ucode_mem = (uint32_t *)ucode->kaddr;
+ while (ucode_size) {
+ fw_sram_reg_val = RREG32(fw_sram_data_reg_offset);
+
+ if (*ucode_mem != fw_sram_reg_val)
+ return false;
+
+ ucode_mem++;
+ /* 4 bytes */
+ ucode_size -= 4;
+ }
+
+ return true;
+}
+
+static int psp_v12_0_mode1_reset(struct psp_context *psp)
+{
+ int ret;
+ uint32_t offset;
+ struct amdgpu_device *adev = psp->adev;
+
+ offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64);
+
+ ret = psp_wait_for(psp, offset, 0x80000000, 0x8000FFFF, false);
+
+ if (ret) {
+ DRM_INFO("psp is not working correctly before mode1 reset!\n");
+ return -EINVAL;
+ }
+
+ /*send the mode 1 reset command*/
+ WREG32(offset, GFX_CTRL_CMD_ID_MODE1_RST);
+
+ msleep(500);
+
+ offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_33);
+
+ ret = psp_wait_for(psp, offset, 0x80000000, 0x80000000, false);
+
+ if (ret) {
+ DRM_INFO("psp mode 1 reset failed!\n");
+ return -EINVAL;
+ }
+
+ DRM_INFO("psp mode1 reset succeed \n");
+
+ return 0;
+}
+
+static uint32_t psp_v12_0_ring_get_wptr(struct psp_context *psp)
+{
+ uint32_t data;
+ struct amdgpu_device *adev = psp->adev;
+
+ if (psp_v12_0_support_vmr_ring(psp))
+ data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
+ else
+ data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
+
+ return data;
+}
+
+static void psp_v12_0_ring_set_wptr(struct psp_context *psp, uint32_t value)
+{
+ struct amdgpu_device *adev = psp->adev;
+
+ if (psp_v12_0_support_vmr_ring(psp)) {
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, value);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD);
+ } else
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value);
+}
+
+static const struct psp_funcs psp_v12_0_funcs = {
+ .init_microcode = psp_v12_0_init_microcode,
+ .bootloader_load_sysdrv = psp_v12_0_bootloader_load_sysdrv,
+ .bootloader_load_sos = psp_v12_0_bootloader_load_sos,
+ .ring_init = psp_v12_0_ring_init,
+ .ring_create = psp_v12_0_ring_create,
+ .ring_stop = psp_v12_0_ring_stop,
+ .ring_destroy = psp_v12_0_ring_destroy,
+ .compare_sram_data = psp_v12_0_compare_sram_data,
+ .mode1_reset = psp_v12_0_mode1_reset,
+ .ring_get_wptr = psp_v12_0_ring_get_wptr,
+ .ring_set_wptr = psp_v12_0_ring_set_wptr,
+};
+
+void psp_v12_0_set_psp_funcs(struct psp_context *psp)
+{
+ psp->funcs = &psp_v12_0_funcs;
+}
diff --git a/drivers/gpu/drm/i915/intel_guc_fw.h b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.h
index 4ec5d3d9e2b0..241693ab1990 100644
--- a/drivers/gpu/drm/i915/intel_guc_fw.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.h
@@ -1,5 +1,5 @@
/*
- * Copyright © 2017 Intel Corporation
+ * Copyright 2019 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -8,26 +8,23 @@
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
*
*/
+#ifndef __PSP_V12_0_H__
+#define __PSP_V12_0_H__
-#ifndef _INTEL_GUC_FW_H_
-#define _INTEL_GUC_FW_H_
+#include "amdgpu_psp.h"
-struct intel_guc;
-
-void intel_guc_fw_init_early(struct intel_guc *guc);
-int intel_guc_fw_upload(struct intel_guc *guc);
+void psp_v12_0_set_psp_funcs(struct psp_context *psp);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
index 019c47feee42..735c43c7daab 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
@@ -179,7 +179,7 @@ static bool psp_v3_1_match_version(struct amdgpu_device *adev, uint32_t ver)
* Double check if the latest four legacy versions.
* If yes, it is still the right version.
*/
- for (i = 0; i < sizeof(sos_old_versions) / sizeof(uint32_t); i++) {
+ for (i = 0; i < ARRAY_SIZE(sos_old_versions); i++) {
if (sos_old_versions[i] == adev->psp.sos_fw_version)
return true;
}
@@ -410,65 +410,6 @@ static int psp_v3_1_ring_destroy(struct psp_context *psp,
return ret;
}
-static int psp_v3_1_cmd_submit(struct psp_context *psp,
- struct amdgpu_firmware_info *ucode,
- uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr,
- int index)
-{
- unsigned int psp_write_ptr_reg = 0;
- struct psp_gfx_rb_frame * write_frame = psp->km_ring.ring_mem;
- struct psp_ring *ring = &psp->km_ring;
- struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem;
- struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start +
- ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1;
- struct amdgpu_device *adev = psp->adev;
- uint32_t ring_size_dw = ring->ring_size / 4;
- uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
-
- /* KM (GPCOM) prepare write pointer */
- if (psp_v3_1_support_vmr_ring(psp))
- psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
- else
- psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
-
- /* Update KM RB frame pointer to new frame */
- /* write_frame ptr increments by size of rb_frame in bytes */
- /* psp_write_ptr_reg increments by size of rb_frame in DWORDs */
- if ((psp_write_ptr_reg % ring_size_dw) == 0)
- write_frame = ring_buffer_start;
- else
- write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw);
- /* Check invalid write_frame ptr address */
- if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) {
- DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n",
- ring_buffer_start, ring_buffer_end, write_frame);
- DRM_ERROR("write_frame is pointing to address out of bounds\n");
- return -EINVAL;
- }
-
- /* Initialize KM RB frame */
- memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame));
-
- /* Update KM RB frame */
- write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr);
- write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr);
- write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
- write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
- write_frame->fence_value = index;
-
- /* Update the write Pointer in DWORDs */
- psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
- if (psp_v3_1_support_vmr_ring(psp)) {
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_write_ptr_reg);
- /* send interrupt to PSP for SRIOV ring write pointer update */
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
- GFX_CTRL_CMD_ID_CONSUME_CMD);
- } else
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, psp_write_ptr_reg);
-
- return 0;
-}
-
static int
psp_v3_1_sram_map(struct amdgpu_device *adev,
unsigned int *sram_offset, unsigned int *sram_addr_reg_offset,
@@ -636,12 +577,37 @@ static int psp_v3_1_mode1_reset(struct psp_context *psp)
static bool psp_v3_1_support_vmr_ring(struct psp_context *psp)
{
- if (amdgpu_sriov_vf(psp->adev) && psp->sos_fw_version >= 0x80455)
+ if (amdgpu_sriov_vf(psp->adev))
return true;
return false;
}
+static uint32_t psp_v3_1_ring_get_wptr(struct psp_context *psp)
+{
+ uint32_t data;
+ struct amdgpu_device *adev = psp->adev;
+
+ if (psp_v3_1_support_vmr_ring(psp))
+ data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
+ else
+ data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
+ return data;
+}
+
+static void psp_v3_1_ring_set_wptr(struct psp_context *psp, uint32_t value)
+{
+ struct amdgpu_device *adev = psp->adev;
+
+ if (psp_v3_1_support_vmr_ring(psp)) {
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, value);
+ /* send interrupt to PSP for SRIOV ring write pointer update */
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
+ GFX_CTRL_CMD_ID_CONSUME_CMD);
+ } else
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value);
+}
+
static const struct psp_funcs psp_v3_1_funcs = {
.init_microcode = psp_v3_1_init_microcode,
.bootloader_load_sysdrv = psp_v3_1_bootloader_load_sysdrv,
@@ -650,11 +616,12 @@ static const struct psp_funcs psp_v3_1_funcs = {
.ring_create = psp_v3_1_ring_create,
.ring_stop = psp_v3_1_ring_stop,
.ring_destroy = psp_v3_1_ring_destroy,
- .cmd_submit = psp_v3_1_cmd_submit,
.compare_sram_data = psp_v3_1_compare_sram_data,
.smu_reload_quirk = psp_v3_1_smu_reload_quirk,
.mode1_reset = psp_v3_1_mode1_reset,
.support_vmr_ring = psp_v3_1_support_vmr_ring,
+ .ring_get_wptr = psp_v3_1_ring_get_wptr,
+ .ring_set_wptr = psp_v3_1_ring_set_wptr,
};
void psp_v3_1_set_psp_funcs(struct psp_context *psp)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index a10175838013..7d509a40076f 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -255,7 +255,7 @@ static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring,
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
/* IB packet must end on a 8 DW boundary */
- sdma_v2_4_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
+ sdma_v2_4_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
@@ -750,7 +750,7 @@ static void sdma_v2_4_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib
u32 pad_count;
int i;
- pad_count = (8 - (ib->length_dw & 0x7)) % 8;
+ pad_count = (-ib->length_dw) & 7;
for (i = 0; i < pad_count; i++)
if (sdma && sdma->burst_nop && (i == 0))
ib->ptr[ib->length_dw++] =
@@ -1260,16 +1260,14 @@ static const struct amdgpu_vm_pte_funcs sdma_v2_4_vm_pte_funcs = {
static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev)
{
- struct drm_gpu_scheduler *sched;
unsigned i;
adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs;
for (i = 0; i < adev->sdma.num_instances; i++) {
- sched = &adev->sdma.instance[i].ring.sched;
- adev->vm_manager.vm_pte_rqs[i] =
- &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+ adev->vm_manager.vm_pte_scheds[i] =
+ &adev->sdma.instance[i].ring.sched;
}
- adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
+ adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
}
const struct amdgpu_ip_block_version sdma_v2_4_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 5f4e2c616241..b6109a99fc43 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -429,7 +429,7 @@ static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
/* IB packet must end on a 8 DW boundary */
- sdma_v3_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
+ sdma_v3_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
@@ -1021,7 +1021,7 @@ static void sdma_v3_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib
u32 pad_count;
int i;
- pad_count = (8 - (ib->length_dw & 0x7)) % 8;
+ pad_count = (-ib->length_dw) & 7;
for (i = 0; i < pad_count; i++)
if (sdma && sdma->burst_nop && (i == 0))
ib->ptr[ib->length_dw++] =
@@ -1698,16 +1698,14 @@ static const struct amdgpu_vm_pte_funcs sdma_v3_0_vm_pte_funcs = {
static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev)
{
- struct drm_gpu_scheduler *sched;
unsigned i;
adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs;
for (i = 0; i < adev->sdma.num_instances; i++) {
- sched = &adev->sdma.instance[i].ring.sched;
- adev->vm_manager.vm_pte_rqs[i] =
- &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+ adev->vm_manager.vm_pte_scheds[i] =
+ &adev->sdma.instance[i].ring.sched;
}
- adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
+ adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
}
const struct amdgpu_ip_block_version sdma_v3_0_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 4428018672d3..e55884d204bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -34,6 +34,18 @@
#include "sdma0/sdma0_4_2_sh_mask.h"
#include "sdma1/sdma1_4_2_offset.h"
#include "sdma1/sdma1_4_2_sh_mask.h"
+#include "sdma2/sdma2_4_2_2_offset.h"
+#include "sdma2/sdma2_4_2_2_sh_mask.h"
+#include "sdma3/sdma3_4_2_2_offset.h"
+#include "sdma3/sdma3_4_2_2_sh_mask.h"
+#include "sdma4/sdma4_4_2_2_offset.h"
+#include "sdma4/sdma4_4_2_2_sh_mask.h"
+#include "sdma5/sdma5_4_2_2_offset.h"
+#include "sdma5/sdma5_4_2_2_sh_mask.h"
+#include "sdma6/sdma6_4_2_2_offset.h"
+#include "sdma6/sdma6_4_2_2_sh_mask.h"
+#include "sdma7/sdma7_4_2_2_offset.h"
+#include "sdma7/sdma7_4_2_2_sh_mask.h"
#include "hdp/hdp_4_0_offset.h"
#include "sdma0/sdma0_4_1_default.h"
@@ -55,6 +67,8 @@ MODULE_FIRMWARE("amdgpu/vega20_sdma1.bin");
MODULE_FIRMWARE("amdgpu/raven_sdma.bin");
MODULE_FIRMWARE("amdgpu/picasso_sdma.bin");
MODULE_FIRMWARE("amdgpu/raven2_sdma.bin");
+MODULE_FIRMWARE("amdgpu/arcturus_sdma.bin");
+MODULE_FIRMWARE("amdgpu/renoir_sdma.bin");
#define SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK 0x000000F8L
#define SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK 0xFC000000L
@@ -68,6 +82,7 @@ static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev);
static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev);
static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev);
static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev);
+static void sdma_v4_0_set_ras_funcs(struct amdgpu_device *adev);
static const struct soc15_reg_golden golden_settings_sdma_4[] = {
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
@@ -202,25 +217,232 @@ static const struct soc15_reg_golden golden_settings_sdma_rv2[] =
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00003001)
};
+static const struct soc15_reg_golden golden_settings_sdma_arct[] =
+{
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+ SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
+ SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
+ SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+ SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
+ SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
+ SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+ SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
+ SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
+ SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+ SOC15_REG_GOLDEN_VALUE(SDMA5, 0, mmSDMA5_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
+ SOC15_REG_GOLDEN_VALUE(SDMA5, 0, mmSDMA5_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
+ SOC15_REG_GOLDEN_VALUE(SDMA5, 0, mmSDMA5_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+ SOC15_REG_GOLDEN_VALUE(SDMA6, 0, mmSDMA6_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
+ SOC15_REG_GOLDEN_VALUE(SDMA6, 0, mmSDMA6_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
+ SOC15_REG_GOLDEN_VALUE(SDMA6, 0, mmSDMA6_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+ SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
+ SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
+ SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002)
+};
+
+static const struct soc15_reg_golden golden_settings_sdma_4_3[] = {
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xffffffff, 0x3f000100),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00000002),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00000002),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_POWER_CNTL, 0x003fff07, 0x40000051),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x03fbe1fe)
+};
+
+static const struct soc15_ras_field_entry sdma_v4_0_ras_fields[] = {
+ { "SDMA_UCODE_BUF_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_UCODE_BUF_SED),
+ 0, 0,
+ },
+ { "SDMA_RB_CMD_BUF_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_RB_CMD_BUF_SED),
+ 0, 0,
+ },
+ { "SDMA_IB_CMD_BUF_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_IB_CMD_BUF_SED),
+ 0, 0,
+ },
+ { "SDMA_UTCL1_RD_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_UTCL1_RD_FIFO_SED),
+ 0, 0,
+ },
+ { "SDMA_UTCL1_RDBST_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_UTCL1_RDBST_FIFO_SED),
+ 0, 0,
+ },
+ { "SDMA_DATA_LUT_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_DATA_LUT_FIFO_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF0_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF0_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF1_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF1_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF2_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF2_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF3_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF3_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF4_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF4_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF5_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF5_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF6_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF6_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF7_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF7_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF8_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF8_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF9_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF9_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF10_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF10_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF11_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF11_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF12_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF12_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF13_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF13_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF14_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF14_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF15_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF15_SED),
+ 0, 0,
+ },
+ { "SDMA_SPLIT_DAT_BUF_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_SPLIT_DAT_BUF_SED),
+ 0, 0,
+ },
+ { "SDMA_MC_WR_ADDR_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MC_WR_ADDR_FIFO_SED),
+ 0, 0,
+ },
+};
+
static u32 sdma_v4_0_get_reg_offset(struct amdgpu_device *adev,
u32 instance, u32 offset)
{
- return ( 0 == instance ? (adev->reg_offset[SDMA0_HWIP][0][0] + offset) :
- (adev->reg_offset[SDMA1_HWIP][0][0] + offset));
+ switch (instance) {
+ case 0:
+ return (adev->reg_offset[SDMA0_HWIP][0][0] + offset);
+ case 1:
+ return (adev->reg_offset[SDMA1_HWIP][0][0] + offset);
+ case 2:
+ return (adev->reg_offset[SDMA2_HWIP][0][1] + offset);
+ case 3:
+ return (adev->reg_offset[SDMA3_HWIP][0][1] + offset);
+ case 4:
+ return (adev->reg_offset[SDMA4_HWIP][0][1] + offset);
+ case 5:
+ return (adev->reg_offset[SDMA5_HWIP][0][1] + offset);
+ case 6:
+ return (adev->reg_offset[SDMA6_HWIP][0][1] + offset);
+ case 7:
+ return (adev->reg_offset[SDMA7_HWIP][0][1] + offset);
+ default:
+ break;
+ }
+ return 0;
+}
+
+static unsigned sdma_v4_0_seq_to_irq_id(int seq_num)
+{
+ switch (seq_num) {
+ case 0:
+ return SOC15_IH_CLIENTID_SDMA0;
+ case 1:
+ return SOC15_IH_CLIENTID_SDMA1;
+ case 2:
+ return SOC15_IH_CLIENTID_SDMA2;
+ case 3:
+ return SOC15_IH_CLIENTID_SDMA3;
+ case 4:
+ return SOC15_IH_CLIENTID_SDMA4;
+ case 5:
+ return SOC15_IH_CLIENTID_SDMA5;
+ case 6:
+ return SOC15_IH_CLIENTID_SDMA6;
+ case 7:
+ return SOC15_IH_CLIENTID_SDMA7;
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+static int sdma_v4_0_irq_id_to_seq(unsigned client_id)
+{
+ switch (client_id) {
+ case SOC15_IH_CLIENTID_SDMA0:
+ return 0;
+ case SOC15_IH_CLIENTID_SDMA1:
+ return 1;
+ case SOC15_IH_CLIENTID_SDMA2:
+ return 2;
+ case SOC15_IH_CLIENTID_SDMA3:
+ return 3;
+ case SOC15_IH_CLIENTID_SDMA4:
+ return 4;
+ case SOC15_IH_CLIENTID_SDMA5:
+ return 5;
+ case SOC15_IH_CLIENTID_SDMA6:
+ return 6;
+ case SOC15_IH_CLIENTID_SDMA7:
+ return 7;
+ default:
+ break;
+ }
+ return -EINVAL;
}
static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_VEGA10:
- if (!amdgpu_virt_support_skip_setting(adev)) {
- soc15_program_register_sequence(adev,
- golden_settings_sdma_4,
- ARRAY_SIZE(golden_settings_sdma_4));
- soc15_program_register_sequence(adev,
- golden_settings_sdma_vg10,
- ARRAY_SIZE(golden_settings_sdma_vg10));
- }
+ soc15_program_register_sequence(adev,
+ golden_settings_sdma_4,
+ ARRAY_SIZE(golden_settings_sdma_4));
+ soc15_program_register_sequence(adev,
+ golden_settings_sdma_vg10,
+ ARRAY_SIZE(golden_settings_sdma_vg10));
break;
case CHIP_VEGA12:
soc15_program_register_sequence(adev,
@@ -241,6 +463,11 @@ static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_sdma1_4_2,
ARRAY_SIZE(golden_settings_sdma1_4_2));
break;
+ case CHIP_ARCTURUS:
+ soc15_program_register_sequence(adev,
+ golden_settings_sdma_arct,
+ ARRAY_SIZE(golden_settings_sdma_arct));
+ break;
case CHIP_RAVEN:
soc15_program_register_sequence(adev,
golden_settings_sdma_4_1,
@@ -254,11 +481,53 @@ static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_sdma_rv1,
ARRAY_SIZE(golden_settings_sdma_rv1));
break;
+ case CHIP_RENOIR:
+ soc15_program_register_sequence(adev,
+ golden_settings_sdma_4_3,
+ ARRAY_SIZE(golden_settings_sdma_4_3));
+ break;
default:
break;
}
}
+static int sdma_v4_0_init_inst_ctx(struct amdgpu_sdma_instance *sdma_inst)
+{
+ int err = 0;
+ const struct sdma_firmware_header_v1_0 *hdr;
+
+ err = amdgpu_ucode_validate(sdma_inst->fw);
+ if (err)
+ return err;
+
+ hdr = (const struct sdma_firmware_header_v1_0 *)sdma_inst->fw->data;
+ sdma_inst->fw_version = le32_to_cpu(hdr->header.ucode_version);
+ sdma_inst->feature_version = le32_to_cpu(hdr->ucode_feature_version);
+
+ if (sdma_inst->feature_version >= 20)
+ sdma_inst->burst_nop = true;
+
+ return 0;
+}
+
+static void sdma_v4_0_destroy_inst_ctx(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ if (adev->sdma.instance[i].fw != NULL)
+ release_firmware(adev->sdma.instance[i].fw);
+
+ /* arcturus shares the same FW memory across
+ all SDMA isntances */
+ if (adev->asic_type == CHIP_ARCTURUS)
+ break;
+ }
+
+ memset((void*)adev->sdma.instance, 0,
+ sizeof(struct amdgpu_sdma_instance) * AMDGPU_MAX_SDMA_INSTANCES);
+}
+
/**
* sdma_v4_0_init_microcode - load ucode images from disk
*
@@ -278,7 +547,6 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
int err = 0, i;
struct amdgpu_firmware_info *info = NULL;
const struct common_firmware_header *header = NULL;
- const struct sdma_firmware_header_v1_0 *hdr;
DRM_DEBUG("\n");
@@ -300,30 +568,52 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
else
chip_name = "raven";
break;
+ case CHIP_ARCTURUS:
+ chip_name = "arcturus";
+ break;
+ case CHIP_RENOIR:
+ chip_name = "renoir";
+ break;
default:
BUG();
}
- for (i = 0; i < adev->sdma.num_instances; i++) {
- if (i == 0)
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
- else
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
- err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);
- if (err)
- goto out;
- hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
- adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
- adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
- if (adev->sdma.instance[i].feature_version >= 20)
- adev->sdma.instance[i].burst_nop = true;
- DRM_DEBUG("psp_load == '%s'\n",
- adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? "true" : "false");
-
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
+
+ err = request_firmware(&adev->sdma.instance[0].fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+
+ err = sdma_v4_0_init_inst_ctx(&adev->sdma.instance[0]);
+ if (err)
+ goto out;
+
+ for (i = 1; i < adev->sdma.num_instances; i++) {
+ if (adev->asic_type == CHIP_ARCTURUS) {
+ /* Acturus will leverage the same FW memory
+ for every SDMA instance */
+ memcpy((void*)&adev->sdma.instance[i],
+ (void*)&adev->sdma.instance[0],
+ sizeof(struct amdgpu_sdma_instance));
+ }
+ else {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma%d.bin", chip_name, i);
+
+ err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+
+ err = sdma_v4_0_init_inst_ctx(&adev->sdma.instance[i]);
+ if (err)
+ goto out;
+ }
+ }
+
+ DRM_DEBUG("psp_load == '%s'\n",
+ adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? "true" : "false");
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
info->fw = adev->sdma.instance[i].fw;
@@ -332,13 +622,11 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
}
}
+
out:
if (err) {
DRM_ERROR("sdma_v4_0: Failed to load firmware \"%s\"\n", fw_name);
- for (i = 0; i < adev->sdma.num_instances; i++) {
- release_firmware(adev->sdma.instance[i].fw);
- adev->sdma.instance[i].fw = NULL;
- }
+ sdma_v4_0_destroy_inst_ctx(adev);
}
return err;
}
@@ -510,7 +798,7 @@ static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
/* IB packet must end on a 8 DW boundary */
- sdma_v4_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
+ sdma_v4_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
@@ -559,16 +847,13 @@ static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 ref_and_mask = 0;
- const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
+ const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
- if (ring->me == 0)
- ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0;
- else
- ref_and_mask = nbio_hf_reg->ref_and_mask_sdma1;
+ ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
sdma_v4_0_wait_reg_mem(ring, 0, 1,
- adev->nbio_funcs->get_hdp_flush_done_offset(adev),
- adev->nbio_funcs->get_hdp_flush_req_offset(adev),
+ adev->nbio.funcs->get_hdp_flush_done_offset(adev),
+ adev->nbio.funcs->get_hdp_flush_req_offset(adev),
ref_and_mask, ref_and_mask, 10);
}
@@ -620,26 +905,27 @@ static void sdma_v4_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
*/
static void sdma_v4_0_gfx_stop(struct amdgpu_device *adev)
{
- struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
- struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
+ struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
u32 rb_cntl, ib_cntl;
- int i;
+ int i, unset = 0;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ sdma[i] = &adev->sdma.instance[i].ring;
- if ((adev->mman.buffer_funcs_ring == sdma0) ||
- (adev->mman.buffer_funcs_ring == sdma1))
+ if ((adev->mman.buffer_funcs_ring == sdma[i]) && unset != 1) {
amdgpu_ttm_set_buffer_funcs_status(adev, false);
+ unset = 1;
+ }
- for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
WREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL, rb_cntl);
ib_cntl = RREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL);
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
WREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL, ib_cntl);
- }
- sdma0->sched.ready = false;
- sdma1->sched.ready = false;
+ sdma[i]->sched.ready = false;
+ }
}
/**
@@ -663,16 +949,20 @@ static void sdma_v4_0_rlc_stop(struct amdgpu_device *adev)
*/
static void sdma_v4_0_page_stop(struct amdgpu_device *adev)
{
- struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].page;
- struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].page;
+ struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
u32 rb_cntl, ib_cntl;
int i;
-
- if ((adev->mman.buffer_funcs_ring == sdma0) ||
- (adev->mman.buffer_funcs_ring == sdma1))
- amdgpu_ttm_set_buffer_funcs_status(adev, false);
+ bool unset = false;
for (i = 0; i < adev->sdma.num_instances; i++) {
+ sdma[i] = &adev->sdma.instance[i].page;
+
+ if ((adev->mman.buffer_funcs_ring == sdma[i]) &&
+ (unset == false)) {
+ amdgpu_ttm_set_buffer_funcs_status(adev, false);
+ unset = true;
+ }
+
rb_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_PAGE_RB_CNTL,
RB_ENABLE, 0);
@@ -681,10 +971,9 @@ static void sdma_v4_0_page_stop(struct amdgpu_device *adev)
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_PAGE_IB_CNTL,
IB_ENABLE, 0);
WREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL, ib_cntl);
- }
- sdma0->sched.ready = false;
- sdma1->sched.ready = false;
+ sdma[i]->sched.ready = false;
+ }
}
/**
@@ -1018,6 +1307,7 @@ static void sdma_v4_0_init_pg(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_RAVEN:
+ case CHIP_RENOIR:
sdma_v4_1_init_power_gating(adev);
sdma_v4_1_update_power_gating(adev, true);
break;
@@ -1389,7 +1679,7 @@ static void sdma_v4_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib
u32 pad_count;
int i;
- pad_count = (8 - (ib->length_dw & 0x7)) % 8;
+ pad_count = (-ib->length_dw) & 7;
for (i = 0; i < pad_count; i++)
if (sdma && sdma->burst_nop && (i == 0))
ib->ptr[ib->length_dw++] =
@@ -1473,8 +1763,10 @@ static int sdma_v4_0_early_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
- if (adev->asic_type == CHIP_RAVEN)
+ if (adev->asic_type == CHIP_RAVEN || adev->asic_type == CHIP_RENOIR)
adev->sdma.num_instances = 1;
+ else if (adev->asic_type == CHIP_ARCTURUS)
+ adev->sdma.num_instances = 8;
else
adev->sdma.num_instances = 2;
@@ -1494,109 +1786,33 @@ static int sdma_v4_0_early_init(void *handle)
sdma_v4_0_set_buffer_funcs(adev);
sdma_v4_0_set_vm_pte_funcs(adev);
sdma_v4_0_set_irq_funcs(adev);
+ sdma_v4_0_set_ras_funcs(adev);
return 0;
}
static int sdma_v4_0_process_ras_data_cb(struct amdgpu_device *adev,
+ void *err_data,
struct amdgpu_iv_entry *entry);
static int sdma_v4_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct ras_common_if **ras_if = &adev->sdma.ras_if;
struct ras_ih_if ih_info = {
.cb = sdma_v4_0_process_ras_data_cb,
};
- struct ras_fs_if fs_info = {
- .sysfs_name = "sdma_err_count",
- .debugfs_name = "sdma_err_inject",
- };
- struct ras_common_if ras_block = {
- .block = AMDGPU_RAS_BLOCK__SDMA,
- .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
- .sub_block_index = 0,
- .name = "sdma",
- };
- int r;
-
- if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
- amdgpu_ras_feature_enable_on_boot(adev, &ras_block, 0);
- return 0;
- }
-
- /* handle resume path. */
- if (*ras_if) {
- /* resend ras TA enable cmd during resume.
- * prepare to handle failure.
- */
- ih_info.head = **ras_if;
- r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
- if (r) {
- if (r == -EAGAIN) {
- /* request a gpu reset. will run again. */
- amdgpu_ras_request_reset_on_boot(adev,
- AMDGPU_RAS_BLOCK__SDMA);
- return 0;
- }
- /* fail to enable ras, cleanup all. */
- goto irq;
- }
- /* enable successfully. continue. */
- goto resume;
- }
-
- *ras_if = kmalloc(sizeof(**ras_if), GFP_KERNEL);
- if (!*ras_if)
- return -ENOMEM;
-
- **ras_if = ras_block;
+ int i;
- r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
- if (r) {
- if (r == -EAGAIN) {
- amdgpu_ras_request_reset_on_boot(adev,
- AMDGPU_RAS_BLOCK__SDMA);
- r = 0;
- }
- goto feature;
+ /* read back edc counter registers to clear the counters */
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
+ for (i = 0; i < adev->sdma.num_instances; i++)
+ RREG32_SDMA(i, mmSDMA0_EDC_COUNTER);
}
- ih_info.head = **ras_if;
- fs_info.head = **ras_if;
-
- r = amdgpu_ras_interrupt_add_handler(adev, &ih_info);
- if (r)
- goto interrupt;
-
- amdgpu_ras_debugfs_create(adev, &fs_info);
-
- r = amdgpu_ras_sysfs_create(adev, &fs_info);
- if (r)
- goto sysfs;
-resume:
- r = amdgpu_irq_get(adev, &adev->sdma.ecc_irq, AMDGPU_SDMA_IRQ_INSTANCE0);
- if (r)
- goto irq;
-
- r = amdgpu_irq_get(adev, &adev->sdma.ecc_irq, AMDGPU_SDMA_IRQ_INSTANCE1);
- if (r) {
- amdgpu_irq_put(adev, &adev->sdma.ecc_irq, AMDGPU_SDMA_IRQ_INSTANCE0);
- goto irq;
- }
-
- return 0;
-irq:
- amdgpu_ras_sysfs_remove(adev, *ras_if);
-sysfs:
- amdgpu_ras_debugfs_remove(adev, *ras_if);
- amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
-interrupt:
- amdgpu_ras_feature_enable(adev, *ras_if, 0);
-feature:
- kfree(*ras_if);
- *ras_if = NULL;
- return r;
+ if (adev->sdma.funcs && adev->sdma.funcs->ras_late_init)
+ return adev->sdma.funcs->ras_late_init(adev, &ih_info);
+ else
+ return 0;
}
static int sdma_v4_0_sw_init(void *handle)
@@ -1606,28 +1822,22 @@ static int sdma_v4_0_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* SDMA trap event */
- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA0, SDMA0_4_0__SRCID__SDMA_TRAP,
- &adev->sdma.trap_irq);
- if (r)
- return r;
-
- /* SDMA trap event */
- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA1, SDMA1_4_0__SRCID__SDMA_TRAP,
- &adev->sdma.trap_irq);
- if (r)
- return r;
-
- /* SDMA SRAM ECC event */
- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA0, SDMA0_4_0__SRCID__SDMA_SRAM_ECC,
- &adev->sdma.ecc_irq);
- if (r)
- return r;
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ r = amdgpu_irq_add_id(adev, sdma_v4_0_seq_to_irq_id(i),
+ SDMA0_4_0__SRCID__SDMA_TRAP,
+ &adev->sdma.trap_irq);
+ if (r)
+ return r;
+ }
/* SDMA SRAM ECC event */
- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA1, SDMA1_4_0__SRCID__SDMA_SRAM_ECC,
- &adev->sdma.ecc_irq);
- if (r)
- return r;
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ r = amdgpu_irq_add_id(adev, sdma_v4_0_seq_to_irq_id(i),
+ SDMA0_4_0__SRCID__SDMA_SRAM_ECC,
+ &adev->sdma.ecc_irq);
+ if (r)
+ return r;
+ }
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
@@ -1641,11 +1851,8 @@ static int sdma_v4_0_sw_init(void *handle)
ring->doorbell_index = adev->doorbell_index.sdma_engine[i] << 1;
sprintf(ring->name, "sdma%d", i);
- r = amdgpu_ring_init(adev, ring, 1024,
- &adev->sdma.trap_irq,
- (i == 0) ?
- AMDGPU_SDMA_IRQ_INSTANCE0 :
- AMDGPU_SDMA_IRQ_INSTANCE1);
+ r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
+ AMDGPU_SDMA_IRQ_INSTANCE0 + i);
if (r)
return r;
@@ -1663,9 +1870,7 @@ static int sdma_v4_0_sw_init(void *handle)
sprintf(ring->name, "page%d", i);
r = amdgpu_ring_init(adev, ring, 1024,
&adev->sdma.trap_irq,
- (i == 0) ?
- AMDGPU_SDMA_IRQ_INSTANCE0 :
- AMDGPU_SDMA_IRQ_INSTANCE1);
+ AMDGPU_SDMA_IRQ_INSTANCE0 + i);
if (r)
return r;
}
@@ -1679,21 +1884,8 @@ static int sdma_v4_0_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i;
- if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA) &&
- adev->sdma.ras_if) {
- struct ras_common_if *ras_if = adev->sdma.ras_if;
- struct ras_ih_if ih_info = {
- .head = *ras_if,
- };
-
- /*remove fs first*/
- amdgpu_ras_debugfs_remove(adev, ras_if);
- amdgpu_ras_sysfs_remove(adev, ras_if);
- /*remove the IH*/
- amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
- amdgpu_ras_feature_enable(adev, ras_if, 0);
- kfree(ras_if);
- }
+ if (adev->sdma.funcs && adev->sdma.funcs->ras_fini)
+ adev->sdma.funcs->ras_fini(adev);
for (i = 0; i < adev->sdma.num_instances; i++) {
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
@@ -1701,10 +1893,7 @@ static int sdma_v4_0_sw_fini(void *handle)
amdgpu_ring_fini(&adev->sdma.instance[i].page);
}
- for (i = 0; i < adev->sdma.num_instances; i++) {
- release_firmware(adev->sdma.instance[i].fw);
- adev->sdma.instance[i].fw = NULL;
- }
+ sdma_v4_0_destroy_inst_ctx(adev);
return 0;
}
@@ -1714,11 +1903,13 @@ static int sdma_v4_0_hw_init(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (adev->asic_type == CHIP_RAVEN && adev->powerplay.pp_funcs &&
- adev->powerplay.pp_funcs->set_powergating_by_smu)
+ if ((adev->asic_type == CHIP_RAVEN && adev->powerplay.pp_funcs &&
+ adev->powerplay.pp_funcs->set_powergating_by_smu) ||
+ (adev->asic_type == CHIP_RENOIR && !adev->in_gpu_reset))
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, false);
- sdma_v4_0_init_golden_registers(adev);
+ if (!amdgpu_sriov_vf(adev))
+ sdma_v4_0_init_golden_registers(adev);
r = sdma_v4_0_start(adev);
@@ -1728,18 +1919,22 @@ static int sdma_v4_0_hw_init(void *handle)
static int sdma_v4_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i;
if (amdgpu_sriov_vf(adev))
return 0;
- amdgpu_irq_put(adev, &adev->sdma.ecc_irq, AMDGPU_SDMA_IRQ_INSTANCE0);
- amdgpu_irq_put(adev, &adev->sdma.ecc_irq, AMDGPU_SDMA_IRQ_INSTANCE1);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ amdgpu_irq_put(adev, &adev->sdma.ecc_irq,
+ AMDGPU_SDMA_IRQ_INSTANCE0 + i);
+ }
sdma_v4_0_ctx_switch_enable(adev, false);
sdma_v4_0_enable(adev, false);
- if (adev->asic_type == CHIP_RAVEN && adev->powerplay.pp_funcs
- && adev->powerplay.pp_funcs->set_powergating_by_smu)
+ if ((adev->asic_type == CHIP_RAVEN && adev->powerplay.pp_funcs
+ && adev->powerplay.pp_funcs->set_powergating_by_smu) ||
+ adev->asic_type == CHIP_RENOIR)
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, true);
return 0;
@@ -1776,15 +1971,17 @@ static bool sdma_v4_0_is_idle(void *handle)
static int sdma_v4_0_wait_for_idle(void *handle)
{
- unsigned i;
- u32 sdma0, sdma1;
+ unsigned i, j;
+ u32 sdma[AMDGPU_MAX_SDMA_INSTANCES];
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (i = 0; i < adev->usec_timeout; i++) {
- sdma0 = RREG32_SDMA(0, mmSDMA0_STATUS_REG);
- sdma1 = RREG32_SDMA(1, mmSDMA0_STATUS_REG);
-
- if (sdma0 & sdma1 & SDMA0_STATUS_REG__IDLE_MASK)
+ for (j = 0; j < adev->sdma.num_instances; j++) {
+ sdma[j] = RREG32_SDMA(j, mmSDMA0_STATUS_REG);
+ if (!(sdma[j] & SDMA0_STATUS_REG__IDLE_MASK))
+ break;
+ }
+ if (j == adev->sdma.num_instances)
return 0;
udelay(1);
}
@@ -1820,17 +2017,7 @@ static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev,
uint32_t instance;
DRM_DEBUG("IH: SDMA trap\n");
- switch (entry->client_id) {
- case SOC15_IH_CLIENTID_SDMA0:
- instance = 0;
- break;
- case SOC15_IH_CLIENTID_SDMA1:
- instance = 1;
- break;
- default:
- return 0;
- }
-
+ instance = sdma_v4_0_irq_id_to_seq(entry->client_id);
switch (entry->ring_id) {
case 0:
amdgpu_fence_process(&adev->sdma.instance[instance].ring);
@@ -1851,55 +2038,26 @@ static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev,
}
static int sdma_v4_0_process_ras_data_cb(struct amdgpu_device *adev,
+ void *err_data,
struct amdgpu_iv_entry *entry)
{
- uint32_t instance, err_source;
-
- switch (entry->client_id) {
- case SOC15_IH_CLIENTID_SDMA0:
- instance = 0;
- break;
- case SOC15_IH_CLIENTID_SDMA1:
- instance = 1;
- break;
- default:
- return 0;
- }
-
- switch (entry->src_id) {
- case SDMA0_4_0__SRCID__SDMA_SRAM_ECC:
- err_source = 0;
- break;
- case SDMA0_4_0__SRCID__SDMA_ECC:
- err_source = 1;
- break;
- default:
- return 0;
- }
-
- kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
-
- amdgpu_ras_reset_gpu(adev, 0);
-
- return AMDGPU_RAS_UE;
-}
+ int instance;
-static int sdma_v4_0_process_ecc_irq(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- struct amdgpu_iv_entry *entry)
-{
- struct ras_common_if *ras_if = adev->sdma.ras_if;
- struct ras_dispatch_if ih_data = {
- .entry = entry,
- };
+ /* When “Full RAS” is enabled, the per-IP interrupt sources should
+ * be disabled and the driver should only look for the aggregated
+ * interrupt via sync flood
+ */
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
+ goto out;
- if (!ras_if)
- return 0;
+ instance = sdma_v4_0_irq_id_to_seq(entry->client_id);
+ if (instance < 0)
+ goto out;
- ih_data.head = *ras_if;
+ amdgpu_sdma_process_ras_data_cb(adev, err_data, entry);
- amdgpu_ras_interrupt_dispatch(adev, &ih_data);
- return 0;
+out:
+ return AMDGPU_RAS_SUCCESS;
}
static int sdma_v4_0_process_illegal_inst_irq(struct amdgpu_device *adev,
@@ -1910,16 +2068,9 @@ static int sdma_v4_0_process_illegal_inst_irq(struct amdgpu_device *adev,
DRM_ERROR("Illegal instruction in SDMA command stream\n");
- switch (entry->client_id) {
- case SOC15_IH_CLIENTID_SDMA0:
- instance = 0;
- break;
- case SOC15_IH_CLIENTID_SDMA1:
- instance = 1;
- break;
- default:
+ instance = sdma_v4_0_irq_id_to_seq(entry->client_id);
+ if (instance < 0)
return 0;
- }
switch (entry->ring_id) {
case 0:
@@ -1936,14 +2087,10 @@ static int sdma_v4_0_set_ecc_irq_state(struct amdgpu_device *adev,
{
u32 sdma_edc_config;
- u32 reg_offset = (type == AMDGPU_SDMA_IRQ_INSTANCE0) ?
- sdma_v4_0_get_reg_offset(adev, 0, mmSDMA0_EDC_CONFIG) :
- sdma_v4_0_get_reg_offset(adev, 1, mmSDMA0_EDC_CONFIG);
-
- sdma_edc_config = RREG32(reg_offset);
+ sdma_edc_config = RREG32_SDMA(type, mmSDMA0_EDC_CONFIG);
sdma_edc_config = REG_SET_FIELD(sdma_edc_config, SDMA0_EDC_CONFIG, ECC_INT_ENABLE,
state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
- WREG32(reg_offset, sdma_edc_config);
+ WREG32_SDMA(type, mmSDMA0_EDC_CONFIG, sdma_edc_config);
return 0;
}
@@ -1953,61 +2100,35 @@ static void sdma_v4_0_update_medium_grain_clock_gating(
bool enable)
{
uint32_t data, def;
+ int i;
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
- /* enable sdma0 clock gating */
- def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL));
- data &= ~(SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
- SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
- SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
- SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
- SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
- SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
- SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
- SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK);
- if (def != data)
- WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL), data);
-
- if (adev->sdma.num_instances > 1) {
- def = data = RREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_CLK_CTRL));
- data &= ~(SDMA1_CLK_CTRL__SOFT_OVERRIDE7_MASK |
- SDMA1_CLK_CTRL__SOFT_OVERRIDE6_MASK |
- SDMA1_CLK_CTRL__SOFT_OVERRIDE5_MASK |
- SDMA1_CLK_CTRL__SOFT_OVERRIDE4_MASK |
- SDMA1_CLK_CTRL__SOFT_OVERRIDE3_MASK |
- SDMA1_CLK_CTRL__SOFT_OVERRIDE2_MASK |
- SDMA1_CLK_CTRL__SOFT_OVERRIDE1_MASK |
- SDMA1_CLK_CTRL__SOFT_OVERRIDE0_MASK);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ def = data = RREG32_SDMA(i, mmSDMA0_CLK_CTRL);
+ data &= ~(SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
+ SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
+ SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
+ SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
+ SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
+ SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
+ SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
+ SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK);
if (def != data)
- WREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_CLK_CTRL), data);
+ WREG32_SDMA(i, mmSDMA0_CLK_CTRL, data);
}
} else {
- /* disable sdma0 clock gating */
- def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL));
- data |= (SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
- SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
- SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
- SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
- SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
- SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
- SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
- SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK);
-
- if (def != data)
- WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL), data);
-
- if (adev->sdma.num_instances > 1) {
- def = data = RREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_CLK_CTRL));
- data |= (SDMA1_CLK_CTRL__SOFT_OVERRIDE7_MASK |
- SDMA1_CLK_CTRL__SOFT_OVERRIDE6_MASK |
- SDMA1_CLK_CTRL__SOFT_OVERRIDE5_MASK |
- SDMA1_CLK_CTRL__SOFT_OVERRIDE4_MASK |
- SDMA1_CLK_CTRL__SOFT_OVERRIDE3_MASK |
- SDMA1_CLK_CTRL__SOFT_OVERRIDE2_MASK |
- SDMA1_CLK_CTRL__SOFT_OVERRIDE1_MASK |
- SDMA1_CLK_CTRL__SOFT_OVERRIDE0_MASK);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ def = data = RREG32_SDMA(i, mmSDMA0_CLK_CTRL);
+ data |= (SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
+ SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
+ SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
+ SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
+ SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
+ SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
+ SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
+ SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK);
if (def != data)
- WREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_CLK_CTRL), data);
+ WREG32_SDMA(i, mmSDMA0_CLK_CTRL, data);
}
}
}
@@ -2018,34 +2139,23 @@ static void sdma_v4_0_update_medium_grain_light_sleep(
bool enable)
{
uint32_t data, def;
+ int i;
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) {
- /* 1-not override: enable sdma0 mem light sleep */
- def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL));
- data |= SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
- if (def != data)
- WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL), data);
-
- /* 1-not override: enable sdma1 mem light sleep */
- if (adev->sdma.num_instances > 1) {
- def = data = RREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_POWER_CNTL));
- data |= SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ /* 1-not override: enable sdma mem light sleep */
+ def = data = RREG32_SDMA(0, mmSDMA0_POWER_CNTL);
+ data |= SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
if (def != data)
- WREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_POWER_CNTL), data);
+ WREG32_SDMA(0, mmSDMA0_POWER_CNTL, data);
}
} else {
- /* 0-override:disable sdma0 mem light sleep */
- def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL));
- data &= ~SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
- if (def != data)
- WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL), data);
-
- /* 0-override:disable sdma1 mem light sleep */
- if (adev->sdma.num_instances > 1) {
- def = data = RREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_POWER_CNTL));
- data &= ~SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ /* 0-override:disable sdma mem light sleep */
+ def = data = RREG32_SDMA(0, mmSDMA0_POWER_CNTL);
+ data &= ~SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
if (def != data)
- WREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_POWER_CNTL), data);
+ WREG32_SDMA(0, mmSDMA0_POWER_CNTL, data);
}
}
}
@@ -2063,10 +2173,12 @@ static int sdma_v4_0_set_clockgating_state(void *handle,
case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
+ case CHIP_ARCTURUS:
+ case CHIP_RENOIR:
sdma_v4_0_update_medium_grain_clock_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
sdma_v4_0_update_medium_grain_light_sleep(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
break;
default:
break;
@@ -2133,7 +2245,43 @@ static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs = {
.align_mask = 0xf,
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
.support_64bit_ptrs = true,
- .vmhub = AMDGPU_MMHUB,
+ .vmhub = AMDGPU_MMHUB_0,
+ .get_rptr = sdma_v4_0_ring_get_rptr,
+ .get_wptr = sdma_v4_0_ring_get_wptr,
+ .set_wptr = sdma_v4_0_ring_set_wptr,
+ .emit_frame_size =
+ 6 + /* sdma_v4_0_ring_emit_hdp_flush */
+ 3 + /* hdp invalidate */
+ 6 + /* sdma_v4_0_ring_emit_pipeline_sync */
+ /* sdma_v4_0_ring_emit_vm_flush */
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
+ 10 + 10 + 10, /* sdma_v4_0_ring_emit_fence x3 for user fence, vm fence */
+ .emit_ib_size = 7 + 6, /* sdma_v4_0_ring_emit_ib */
+ .emit_ib = sdma_v4_0_ring_emit_ib,
+ .emit_fence = sdma_v4_0_ring_emit_fence,
+ .emit_pipeline_sync = sdma_v4_0_ring_emit_pipeline_sync,
+ .emit_vm_flush = sdma_v4_0_ring_emit_vm_flush,
+ .emit_hdp_flush = sdma_v4_0_ring_emit_hdp_flush,
+ .test_ring = sdma_v4_0_ring_test_ring,
+ .test_ib = sdma_v4_0_ring_test_ib,
+ .insert_nop = sdma_v4_0_ring_insert_nop,
+ .pad_ib = sdma_v4_0_ring_pad_ib,
+ .emit_wreg = sdma_v4_0_ring_emit_wreg,
+ .emit_reg_wait = sdma_v4_0_ring_emit_reg_wait,
+ .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+};
+
+/*
+ * On Arcturus, SDMA instance 5~7 has a different vmhub type(AMDGPU_MMHUB_1).
+ * So create a individual constant ring_funcs for those instances.
+ */
+static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs_2nd_mmhub = {
+ .type = AMDGPU_RING_TYPE_SDMA,
+ .align_mask = 0xf,
+ .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
+ .support_64bit_ptrs = true,
+ .vmhub = AMDGPU_MMHUB_1,
.get_rptr = sdma_v4_0_ring_get_rptr,
.get_wptr = sdma_v4_0_ring_get_wptr,
.set_wptr = sdma_v4_0_ring_set_wptr,
@@ -2165,7 +2313,39 @@ static const struct amdgpu_ring_funcs sdma_v4_0_page_ring_funcs = {
.align_mask = 0xf,
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
.support_64bit_ptrs = true,
- .vmhub = AMDGPU_MMHUB,
+ .vmhub = AMDGPU_MMHUB_0,
+ .get_rptr = sdma_v4_0_ring_get_rptr,
+ .get_wptr = sdma_v4_0_page_ring_get_wptr,
+ .set_wptr = sdma_v4_0_page_ring_set_wptr,
+ .emit_frame_size =
+ 6 + /* sdma_v4_0_ring_emit_hdp_flush */
+ 3 + /* hdp invalidate */
+ 6 + /* sdma_v4_0_ring_emit_pipeline_sync */
+ /* sdma_v4_0_ring_emit_vm_flush */
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
+ 10 + 10 + 10, /* sdma_v4_0_ring_emit_fence x3 for user fence, vm fence */
+ .emit_ib_size = 7 + 6, /* sdma_v4_0_ring_emit_ib */
+ .emit_ib = sdma_v4_0_ring_emit_ib,
+ .emit_fence = sdma_v4_0_ring_emit_fence,
+ .emit_pipeline_sync = sdma_v4_0_ring_emit_pipeline_sync,
+ .emit_vm_flush = sdma_v4_0_ring_emit_vm_flush,
+ .emit_hdp_flush = sdma_v4_0_ring_emit_hdp_flush,
+ .test_ring = sdma_v4_0_ring_test_ring,
+ .test_ib = sdma_v4_0_ring_test_ib,
+ .insert_nop = sdma_v4_0_ring_insert_nop,
+ .pad_ib = sdma_v4_0_ring_pad_ib,
+ .emit_wreg = sdma_v4_0_ring_emit_wreg,
+ .emit_reg_wait = sdma_v4_0_ring_emit_reg_wait,
+ .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+};
+
+static const struct amdgpu_ring_funcs sdma_v4_0_page_ring_funcs_2nd_mmhub = {
+ .type = AMDGPU_RING_TYPE_SDMA,
+ .align_mask = 0xf,
+ .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
+ .support_64bit_ptrs = true,
+ .vmhub = AMDGPU_MMHUB_1,
.get_rptr = sdma_v4_0_ring_get_rptr,
.get_wptr = sdma_v4_0_page_ring_get_wptr,
.set_wptr = sdma_v4_0_page_ring_set_wptr,
@@ -2197,10 +2377,20 @@ static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev)
int i;
for (i = 0; i < adev->sdma.num_instances; i++) {
- adev->sdma.instance[i].ring.funcs = &sdma_v4_0_ring_funcs;
+ if (adev->asic_type == CHIP_ARCTURUS && i >= 5)
+ adev->sdma.instance[i].ring.funcs =
+ &sdma_v4_0_ring_funcs_2nd_mmhub;
+ else
+ adev->sdma.instance[i].ring.funcs =
+ &sdma_v4_0_ring_funcs;
adev->sdma.instance[i].ring.me = i;
if (adev->sdma.has_page_queue) {
- adev->sdma.instance[i].page.funcs = &sdma_v4_0_page_ring_funcs;
+ if (adev->asic_type == CHIP_ARCTURUS && i >= 5)
+ adev->sdma.instance[i].page.funcs =
+ &sdma_v4_0_page_ring_funcs_2nd_mmhub;
+ else
+ adev->sdma.instance[i].page.funcs =
+ &sdma_v4_0_page_ring_funcs;
adev->sdma.instance[i].page.me = i;
}
}
@@ -2217,17 +2407,30 @@ static const struct amdgpu_irq_src_funcs sdma_v4_0_illegal_inst_irq_funcs = {
static const struct amdgpu_irq_src_funcs sdma_v4_0_ecc_irq_funcs = {
.set = sdma_v4_0_set_ecc_irq_state,
- .process = sdma_v4_0_process_ecc_irq,
+ .process = amdgpu_sdma_process_ecc_irq,
};
static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev)
{
- adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
+ switch (adev->sdma.num_instances) {
+ case 1:
+ adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE1;
+ adev->sdma.ecc_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE1;
+ break;
+ case 8:
+ adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
+ adev->sdma.ecc_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
+ break;
+ case 2:
+ default:
+ adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE2;
+ adev->sdma.ecc_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE2;
+ break;
+ }
adev->sdma.trap_irq.funcs = &sdma_v4_0_trap_irq_funcs;
adev->sdma.illegal_inst_irq.funcs = &sdma_v4_0_illegal_inst_irq_funcs;
- adev->sdma.ecc_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
adev->sdma.ecc_irq.funcs = &sdma_v4_0_ecc_irq_funcs;
}
@@ -2293,8 +2496,8 @@ static const struct amdgpu_buffer_funcs sdma_v4_0_buffer_funcs = {
static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev)
{
adev->mman.buffer_funcs = &sdma_v4_0_buffer_funcs;
- if (adev->sdma.has_page_queue && adev->sdma.num_instances > 1)
- adev->mman.buffer_funcs_ring = &adev->sdma.instance[1].page;
+ if (adev->sdma.has_page_queue)
+ adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].page;
else
adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
}
@@ -2313,21 +2516,77 @@ static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev)
unsigned i;
adev->vm_manager.vm_pte_funcs = &sdma_v4_0_vm_pte_funcs;
- if (adev->sdma.has_page_queue && adev->sdma.num_instances > 1) {
- for (i = 1; i < adev->sdma.num_instances; i++) {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ if (adev->sdma.has_page_queue)
sched = &adev->sdma.instance[i].page.sched;
- adev->vm_manager.vm_pte_rqs[i - 1] =
- &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
- }
- adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances - 1;
- adev->vm_manager.page_fault = &adev->sdma.instance[0].page;
- } else {
- for (i = 0; i < adev->sdma.num_instances; i++) {
+ else
sched = &adev->sdma.instance[i].ring.sched;
- adev->vm_manager.vm_pte_rqs[i] =
- &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+ adev->vm_manager.vm_pte_scheds[i] = sched;
+ }
+ adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
+}
+
+static void sdma_v4_0_get_ras_error_count(uint32_t value,
+ uint32_t instance,
+ uint32_t *sec_count)
+{
+ uint32_t i;
+ uint32_t sec_cnt;
+
+ /* double bits error (multiple bits) error detection is not supported */
+ for (i = 0; i < ARRAY_SIZE(sdma_v4_0_ras_fields); i++) {
+ /* the SDMA_EDC_COUNTER register in each sdma instance
+ * shares the same sed shift_mask
+ * */
+ sec_cnt = (value &
+ sdma_v4_0_ras_fields[i].sec_count_mask) >>
+ sdma_v4_0_ras_fields[i].sec_count_shift;
+ if (sec_cnt) {
+ DRM_INFO("Detected %s in SDMA%d, SED %d\n",
+ sdma_v4_0_ras_fields[i].name,
+ instance, sec_cnt);
+ *sec_count += sec_cnt;
}
- adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
+ }
+}
+
+static int sdma_v4_0_query_ras_error_count(struct amdgpu_device *adev,
+ uint32_t instance, void *ras_error_status)
+{
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+ uint32_t sec_count = 0;
+ uint32_t reg_value = 0;
+
+ reg_value = RREG32_SDMA(instance, mmSDMA0_EDC_COUNTER);
+ /* double bit error is not supported */
+ if (reg_value)
+ sdma_v4_0_get_ras_error_count(reg_value,
+ instance, &sec_count);
+ /* err_data->ce_count should be initialized to 0
+ * before calling into this function */
+ err_data->ce_count += sec_count;
+ /* double bit error is not supported
+ * set ue count to 0 */
+ err_data->ue_count = 0;
+
+ return 0;
+};
+
+static const struct amdgpu_sdma_ras_funcs sdma_v4_0_ras_funcs = {
+ .ras_late_init = amdgpu_sdma_ras_late_init,
+ .ras_fini = amdgpu_sdma_ras_fini,
+ .query_ras_error_count = sdma_v4_0_query_ras_error_count,
+};
+
+static void sdma_v4_0_set_ras_funcs(struct amdgpu_device *adev)
+{
+ switch (adev->asic_type) {
+ case CHIP_VEGA20:
+ case CHIP_ARCTURUS:
+ adev->sdma.funcs = &sdma_v4_0_ras_funcs;
+ break;
+ default:
+ break;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index 3747c3f1f0cc..67b9830b7c7e 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -21,8 +21,11 @@
*
*/
+#include <linux/delay.h>
#include <linux/firmware.h>
-#include <drm/drmP.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
#include "amdgpu.h"
#include "amdgpu_ucode.h"
#include "amdgpu_trace.h"
@@ -42,6 +45,12 @@
MODULE_FIRMWARE("amdgpu/navi10_sdma.bin");
MODULE_FIRMWARE("amdgpu/navi10_sdma1.bin");
+MODULE_FIRMWARE("amdgpu/navi14_sdma.bin");
+MODULE_FIRMWARE("amdgpu/navi14_sdma1.bin");
+
+MODULE_FIRMWARE("amdgpu/navi12_sdma.bin");
+MODULE_FIRMWARE("amdgpu/navi12_sdma1.bin");
+
#define SDMA1_REG_OFFSET 0x600
#define SDMA0_HYP_DEC_REG_START 0x5880
#define SDMA0_HYP_DEC_REG_END 0x5893
@@ -59,7 +68,7 @@ static const struct soc15_reg_golden golden_settings_sdma_5[] = {
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
@@ -71,7 +80,7 @@ static const struct soc15_reg_golden golden_settings_sdma_5[] = {
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
@@ -80,6 +89,18 @@ static const struct soc15_reg_golden golden_settings_sdma_5[] = {
};
static const struct soc15_reg_golden golden_settings_sdma_nv10[] = {
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
+};
+
+static const struct soc15_reg_golden golden_settings_sdma_nv14[] = {
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+};
+
+static const struct soc15_reg_golden golden_settings_sdma_nv12[] = {
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
};
static u32 sdma_v5_0_get_reg_offset(struct amdgpu_device *adev, u32 instance, u32 internal_offset)
@@ -111,6 +132,22 @@ static void sdma_v5_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_sdma_nv10,
(const u32)ARRAY_SIZE(golden_settings_sdma_nv10));
break;
+ case CHIP_NAVI14:
+ soc15_program_register_sequence(adev,
+ golden_settings_sdma_5,
+ (const u32)ARRAY_SIZE(golden_settings_sdma_5));
+ soc15_program_register_sequence(adev,
+ golden_settings_sdma_nv14,
+ (const u32)ARRAY_SIZE(golden_settings_sdma_nv14));
+ break;
+ case CHIP_NAVI12:
+ soc15_program_register_sequence(adev,
+ golden_settings_sdma_5,
+ (const u32)ARRAY_SIZE(golden_settings_sdma_5));
+ soc15_program_register_sequence(adev,
+ golden_settings_sdma_nv12,
+ (const u32)ARRAY_SIZE(golden_settings_sdma_nv12));
+ break;
default:
break;
}
@@ -143,6 +180,12 @@ static int sdma_v5_0_init_microcode(struct amdgpu_device *adev)
case CHIP_NAVI10:
chip_name = "navi10";
break;
+ case CHIP_NAVI14:
+ chip_name = "navi14";
+ break;
+ case CHIP_NAVI12:
+ chip_name = "navi12";
+ break;
default:
BUG();
}
@@ -339,8 +382,15 @@ static void sdma_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
uint64_t csa_mc_addr = amdgpu_sdma_get_csa_mc_addr(ring, vmid);
- /* IB packet must end on a 8 DW boundary */
- sdma_v5_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
+ /* An IB packet must end on a 8 DW boundary--the next dword
+ * must be on a 8-dword boundary. Our IB packet below is 6
+ * dwords long, thus add x number of NOPs, such that, in
+ * modular arithmetic,
+ * wptr + 6 + x = 8k, k >= 0, which in C is,
+ * (wptr + 6 + x) % 8 = 0.
+ * The expression below, is a solution of x.
+ */
+ sdma_v5_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
@@ -363,7 +413,7 @@ static void sdma_v5_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 ref_and_mask = 0;
- const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
+ const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
if (ring->me == 0)
ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0;
@@ -373,8 +423,8 @@ static void sdma_v5_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
- amdgpu_ring_write(ring, (adev->nbio_funcs->get_hdp_flush_done_offset(adev)) << 2);
- amdgpu_ring_write(ring, (adev->nbio_funcs->get_hdp_flush_req_offset(adev)) << 2);
+ amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2);
+ amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2);
amdgpu_ring_write(ring, ref_and_mask); /* reference */
amdgpu_ring_write(ring, ref_and_mask); /* mask */
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
@@ -640,7 +690,7 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell);
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset);
- adev->nbio_funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
+ adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
ring->doorbell_index, 20);
if (amdgpu_sriov_vf(adev))
@@ -861,19 +911,12 @@ static int sdma_v5_0_ring_test_ring(struct amdgpu_ring *ring)
if (amdgpu_emu_mode == 1)
msleep(1);
else
- DRM_UDELAY(1);
+ udelay(1);
}
- if (i < adev->usec_timeout) {
- if (amdgpu_emu_mode == 1)
- DRM_INFO("ring test on %d succeeded in %d msecs\n", ring->idx, i);
- else
- DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
- ring->idx, tmp);
- r = -EINVAL;
- }
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+
amdgpu_device_wb_free(adev, index);
return r;
@@ -938,13 +981,10 @@ static int sdma_v5_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
goto err1;
}
tmp = le32_to_cpu(adev->wb.wb[index]);
- if (tmp == 0xDEADBEEF) {
- DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+ if (tmp == 0xDEADBEEF)
r = 0;
- } else {
- DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
+ else
r = -EINVAL;
- }
err1:
amdgpu_ib_free(adev, &ib, NULL);
@@ -1043,10 +1083,10 @@ static void sdma_v5_0_vm_set_pte_pde(struct amdgpu_ib *ib,
}
/**
- * sdma_v5_0_ring_pad_ib - pad the IB to the required number of dw
- *
+ * sdma_v5_0_ring_pad_ib - pad the IB
* @ib: indirect buffer to fill with padding
*
+ * Pad the IB with NOPs to a boundary multiple of 8.
*/
static void sdma_v5_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
{
@@ -1054,7 +1094,7 @@ static void sdma_v5_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib
u32 pad_count;
int i;
- pad_count = (8 - (ib->length_dw & 0x7)) % 8;
+ pad_count = (-ib->length_dw) & 0x7;
for (i = 0; i < pad_count; i++)
if (sdma && sdma->burst_nop && (i == 0))
ib->ptr[ib->length_dw++] =
@@ -1086,7 +1126,7 @@ static void sdma_v5_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, addr & 0xfffffffc);
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
amdgpu_ring_write(ring, seq); /* reference */
- amdgpu_ring_write(ring, 0xfffffff); /* mask */
+ amdgpu_ring_write(ring, 0xffffffff); /* mask */
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
}
@@ -1130,6 +1170,16 @@ static void sdma_v5_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10));
}
+static void sdma_v5_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
+ uint32_t reg0, uint32_t reg1,
+ uint32_t ref, uint32_t mask)
+{
+ amdgpu_ring_emit_wreg(ring, reg0, ref);
+ /* wait for a cycle to reset vm_inv_eng*_ack */
+ amdgpu_ring_emit_reg_wait(ring, reg0, 0, 0);
+ amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
+}
+
static int sdma_v5_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1316,7 +1366,7 @@ static int sdma_v5_0_ring_preempt_ib(struct amdgpu_ring *ring)
if (ring->trail_seq ==
le32_to_cpu(*(ring->trail_fence_cpu_addr)))
break;
- DRM_UDELAY(1);
+ udelay(1);
}
if (i >= adev->usec_timeout) {
@@ -1472,10 +1522,12 @@ static int sdma_v5_0_set_clockgating_state(void *handle,
switch (adev->asic_type) {
case CHIP_NAVI10:
+ case CHIP_NAVI14:
+ case CHIP_NAVI12:
sdma_v5_0_update_medium_grain_clock_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
sdma_v5_0_update_medium_grain_light_sleep(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
break;
default:
break;
@@ -1532,7 +1584,7 @@ static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = {
.align_mask = 0xf,
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
.support_64bit_ptrs = true,
- .vmhub = AMDGPU_GFXHUB,
+ .vmhub = AMDGPU_GFXHUB_0,
.get_rptr = sdma_v5_0_ring_get_rptr,
.get_wptr = sdma_v5_0_ring_get_wptr,
.set_wptr = sdma_v5_0_ring_set_wptr,
@@ -1543,7 +1595,7 @@ static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = {
6 + /* sdma_v5_0_ring_emit_pipeline_sync */
/* sdma_v5_0_ring_emit_vm_flush */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
- SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 * 2 +
10 + 10 + 10, /* sdma_v5_0_ring_emit_fence x3 for user fence, vm fence */
.emit_ib_size = 7 + 6, /* sdma_v5_0_ring_emit_ib */
.emit_ib = sdma_v5_0_ring_emit_ib,
@@ -1557,6 +1609,7 @@ static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = {
.pad_ib = sdma_v5_0_ring_pad_ib,
.emit_wreg = sdma_v5_0_ring_emit_wreg,
.emit_reg_wait = sdma_v5_0_ring_emit_reg_wait,
+ .emit_reg_write_reg_wait = sdma_v5_0_ring_emit_reg_write_reg_wait,
.init_cond_exec = sdma_v5_0_ring_init_cond_exec,
.patch_cond_exec = sdma_v5_0_ring_patch_cond_exec,
.preempt_ib = sdma_v5_0_ring_preempt_ib,
@@ -1583,7 +1636,8 @@ static const struct amdgpu_irq_src_funcs sdma_v5_0_illegal_inst_irq_funcs = {
static void sdma_v5_0_set_irq_funcs(struct amdgpu_device *adev)
{
- adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
+ adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE0 +
+ adev->sdma.num_instances;
adev->sdma.trap_irq.funcs = &sdma_v5_0_trap_irq_funcs;
adev->sdma.illegal_inst_irq.funcs = &sdma_v5_0_illegal_inst_irq_funcs;
}
@@ -1664,17 +1718,15 @@ static const struct amdgpu_vm_pte_funcs sdma_v5_0_vm_pte_funcs = {
static void sdma_v5_0_set_vm_pte_funcs(struct amdgpu_device *adev)
{
- struct drm_gpu_scheduler *sched;
unsigned i;
if (adev->vm_manager.vm_pte_funcs == NULL) {
adev->vm_manager.vm_pte_funcs = &sdma_v5_0_vm_pte_funcs;
for (i = 0; i < adev->sdma.num_instances; i++) {
- sched = &adev->sdma.instance[i].ring.sched;
- adev->vm_manager.vm_pte_rqs[i] =
- &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+ adev->vm_manager.vm_pte_scheds[i] =
+ &adev->sdma.instance[i].ring.sched;
}
- adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
+ adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index 4d74453f3cfb..4d415bfdb42f 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -975,6 +975,17 @@ static void si_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
static struct amdgpu_allowed_register_entry si_allowed_read_registers[] = {
{GRBM_STATUS},
+ {mmGRBM_STATUS2},
+ {mmGRBM_STATUS_SE0},
+ {mmGRBM_STATUS_SE1},
+ {mmSRBM_STATUS},
+ {mmSRBM_STATUS2},
+ {DMA_STATUS_REG + DMA0_REGISTER_OFFSET},
+ {DMA_STATUS_REG + DMA1_REGISTER_OFFSET},
+ {mmCP_STAT},
+ {mmCP_STALLED_STAT1},
+ {mmCP_STALLED_STAT2},
+ {mmCP_STALLED_STAT3},
{GB_ADDR_CONFIG},
{MC_ARB_RAMCFG},
{GB_TILE_MODE0},
@@ -1186,6 +1197,17 @@ static int si_asic_reset(struct amdgpu_device *adev)
return 0;
}
+static bool si_asic_supports_baco(struct amdgpu_device *adev)
+{
+ return false;
+}
+
+static enum amd_reset_method
+si_asic_reset_method(struct amdgpu_device *adev)
+{
+ return AMD_RESET_METHOD_LEGACY;
+}
+
static u32 si_get_config_memsize(struct amdgpu_device *adev)
{
return RREG32(mmCONFIG_MEMSIZE);
@@ -1394,6 +1416,7 @@ static const struct amdgpu_asic_funcs si_asic_funcs =
.read_bios_from_rom = &si_read_bios_from_rom,
.read_register = &si_read_register,
.reset = &si_asic_reset,
+ .reset_method = &si_asic_reset_method,
.set_vga_state = &si_vga_set_state,
.get_xclk = &si_get_xclk,
.set_uvd_clocks = &si_set_uvd_clocks,
@@ -1407,6 +1430,7 @@ static const struct amdgpu_asic_funcs si_asic_funcs =
.get_pcie_usage = &si_get_pcie_usage,
.need_reset_on_init = &si_need_reset_on_init,
.get_pcie_replay_count = &si_get_pcie_replay_count,
+ .supports_baco = &si_asic_supports_baco,
};
static uint32_t si_get_rev_id(struct amdgpu_device *adev)
@@ -1626,7 +1650,6 @@ static void si_init_golden_registers(struct amdgpu_device *adev)
static void si_pcie_gen3_enable(struct amdgpu_device *adev)
{
struct pci_dev *root = adev->pdev->bus->self;
- int bridge_pos, gpu_pos;
u32 speed_cntl, current_data_rate;
int i;
u16 tmp16;
@@ -1661,12 +1684,7 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
DRM_INFO("enabling PCIE gen 2 link speeds, disable with amdgpu.pcie_gen2=0\n");
}
- bridge_pos = pci_pcie_cap(root);
- if (!bridge_pos)
- return;
-
- gpu_pos = pci_pcie_cap(adev->pdev);
- if (!gpu_pos)
+ if (!pci_is_pcie(root) || !pci_is_pcie(adev->pdev))
return;
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
@@ -1675,14 +1693,17 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
u16 bridge_cfg2, gpu_cfg2;
u32 max_lw, current_lw, tmp;
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+ &bridge_cfg);
+ pcie_capability_read_word(adev->pdev, PCI_EXP_LNKCTL,
+ &gpu_cfg);
tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_write_word(root, PCI_EXP_LNKCTL, tmp16);
tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
- pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_write_word(adev->pdev, PCI_EXP_LNKCTL,
+ tmp16);
tmp = RREG32_PCIE(PCIE_LC_STATUS1);
max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
@@ -1699,15 +1720,23 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
}
for (i = 0; i < 10; i++) {
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16);
+ pcie_capability_read_word(adev->pdev,
+ PCI_EXP_DEVSTA,
+ &tmp16);
if (tmp16 & PCI_EXP_DEVSTA_TRPND)
break;
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+ &bridge_cfg);
+ pcie_capability_read_word(adev->pdev,
+ PCI_EXP_LNKCTL,
+ &gpu_cfg);
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2);
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
+ &bridge_cfg2);
+ pcie_capability_read_word(adev->pdev,
+ PCI_EXP_LNKCTL2,
+ &gpu_cfg2);
tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
tmp |= LC_SET_QUIESCE;
@@ -1719,25 +1748,44 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
mdelay(100);
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+ &tmp16);
tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_write_word(root, PCI_EXP_LNKCTL,
+ tmp16);
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16);
+ pcie_capability_read_word(adev->pdev,
+ PCI_EXP_LNKCTL,
+ &tmp16);
tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
- pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
-
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~((1 << 4) | (7 << 9));
- tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16);
-
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~((1 << 4) | (7 << 9));
- tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
- pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+ pcie_capability_write_word(adev->pdev,
+ PCI_EXP_LNKCTL,
+ tmp16);
+
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
+ &tmp16);
+ tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN);
+ tmp16 |= (bridge_cfg2 &
+ (PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN));
+ pcie_capability_write_word(root,
+ PCI_EXP_LNKCTL2,
+ tmp16);
+
+ pcie_capability_read_word(adev->pdev,
+ PCI_EXP_LNKCTL2,
+ &tmp16);
+ tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN);
+ tmp16 |= (gpu_cfg2 &
+ (PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN));
+ pcie_capability_write_word(adev->pdev,
+ PCI_EXP_LNKCTL2,
+ tmp16);
tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
tmp &= ~LC_SET_QUIESCE;
@@ -1750,15 +1798,16 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE;
WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~0xf;
+ pcie_capability_read_word(adev->pdev, PCI_EXP_LNKCTL2, &tmp16);
+ tmp16 &= ~PCI_EXP_LNKCTL2_TLS;
+
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
- tmp16 |= 3;
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_8_0GT; /* gen3 */
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
- tmp16 |= 2;
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_5_0GT; /* gen2 */
else
- tmp16 |= 1;
- pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_2_5GT; /* gen1 */
+ pcie_capability_write_word(adev->pdev, PCI_EXP_LNKCTL2, tmp16);
speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE;
@@ -1881,7 +1930,7 @@ static void si_program_aspm(struct amdgpu_device *adev)
if (orig != data)
si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_1, data);
- if ((adev->family != CHIP_OLAND) && (adev->family != CHIP_HAINAN)) {
+ if ((adev->asic_type != CHIP_OLAND) && (adev->asic_type != CHIP_HAINAN)) {
orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_0);
data &= ~PLL_RAMP_UP_TIME_0_MASK;
if (orig != data)
@@ -1930,14 +1979,14 @@ static void si_program_aspm(struct amdgpu_device *adev)
orig = data = si_pif_phy0_rreg(adev,PB0_PIF_CNTL);
data &= ~LS2_EXIT_TIME_MASK;
- if ((adev->family == CHIP_OLAND) || (adev->family == CHIP_HAINAN))
+ if ((adev->asic_type == CHIP_OLAND) || (adev->asic_type == CHIP_HAINAN))
data |= LS2_EXIT_TIME(5);
if (orig != data)
si_pif_phy0_wreg(adev,PB0_PIF_CNTL, data);
orig = data = si_pif_phy1_rreg(adev,PB1_PIF_CNTL);
data &= ~LS2_EXIT_TIME_MASK;
- if ((adev->family == CHIP_OLAND) || (adev->family == CHIP_HAINAN))
+ if ((adev->asic_type == CHIP_OLAND) || (adev->asic_type == CHIP_HAINAN))
data |= LS2_EXIT_TIME(5);
if (orig != data)
si_pif_phy1_wreg(adev,PB1_PIF_CNTL, data);
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index bdda8b4e03f0..42d5601b6bf3 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -648,7 +648,7 @@ static int si_dma_set_clockgating_state(void *handle,
bool enable;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ enable = (state == AMD_CG_STATE_GATE);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
for (i = 0; i < adev->sdma.num_instances; i++) {
@@ -834,16 +834,14 @@ static const struct amdgpu_vm_pte_funcs si_dma_vm_pte_funcs = {
static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev)
{
- struct drm_gpu_scheduler *sched;
unsigned i;
adev->vm_manager.vm_pte_funcs = &si_dma_vm_pte_funcs;
for (i = 0; i < adev->sdma.num_instances; i++) {
- sched = &adev->sdma.instance[i].ring.sched;
- adev->vm_manager.vm_pte_rqs[i] =
- &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+ adev->vm_manager.vm_pte_scheds[i] =
+ &adev->sdma.instance[i].ring.sched;
}
- adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
+ adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
}
const struct amdgpu_ip_block_version si_dma_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
index 57bb5f9e08b2..88ae27a5a03d 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
@@ -64,7 +64,8 @@ static int si_ih_irq_init(struct amdgpu_device *adev)
u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
si_ih_disable_interrupts(adev);
- WREG32(INTERRUPT_CNTL2, adev->irq.ih.gpu_addr >> 8);
+ /* set dummy read address to dummy page address */
+ WREG32(INTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
interrupt_cntl = RREG32(INTERRUPT_CNTL);
interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
diff --git a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
new file mode 100644
index 000000000000..c902f26cf50d
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
@@ -0,0 +1,732 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "smuio/smuio_11_0_0_offset.h"
+#include "smuio/smuio_11_0_0_sh_mask.h"
+
+#include "smu_v11_0_i2c.h"
+#include "amdgpu.h"
+#include "soc15_common.h"
+#include <drm/drm_fixed.h>
+#include <drm/drm_drv.h>
+#include "amdgpu_amdkfd.h"
+#include <linux/i2c.h>
+#include <linux/pci.h>
+#include "amdgpu_ras.h"
+
+/* error codes */
+#define I2C_OK 0
+#define I2C_NAK_7B_ADDR_NOACK 1
+#define I2C_NAK_TXDATA_NOACK 2
+#define I2C_TIMEOUT 4
+#define I2C_SW_TIMEOUT 8
+#define I2C_ABORT 0x10
+
+/* I2C transaction flags */
+#define I2C_NO_STOP 1
+#define I2C_RESTART 2
+
+#define to_amdgpu_device(x) (container_of(x, struct amdgpu_ras, eeprom_control.eeprom_accessor))->adev
+#define to_eeprom_control(x) container_of(x, struct amdgpu_ras_eeprom_control, eeprom_accessor)
+
+static void smu_v11_0_i2c_set_clock_gating(struct i2c_adapter *control, bool en)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ uint32_t reg = RREG32_SOC15(SMUIO, 0, mmSMUIO_PWRMGT);
+
+ reg = REG_SET_FIELD(reg, SMUIO_PWRMGT, i2c_clk_gate_en, en ? 1 : 0);
+ WREG32_SOC15(SMUIO, 0, mmSMUIO_PWRMGT, reg);
+}
+
+
+static void smu_v11_0_i2c_enable(struct i2c_adapter *control, bool enable)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+
+ WREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_ENABLE, enable ? 1 : 0);
+}
+
+static void smu_v11_0_i2c_clear_status(struct i2c_adapter *control)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ /* do */
+ {
+ RREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_CLR_INTR);
+
+ } /* while (reg_CKSVII2C_ic_clr_intr == 0) */
+}
+
+static void smu_v11_0_i2c_configure(struct i2c_adapter *control)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ uint32_t reg = 0;
+
+ reg = REG_SET_FIELD(reg, CKSVII2C_IC_CON, IC_SLAVE_DISABLE, 1);
+ reg = REG_SET_FIELD(reg, CKSVII2C_IC_CON, IC_RESTART_EN, 1);
+ reg = REG_SET_FIELD(reg, CKSVII2C_IC_CON, IC_10BITADDR_MASTER, 0);
+ reg = REG_SET_FIELD(reg, CKSVII2C_IC_CON, IC_10BITADDR_SLAVE, 0);
+ /* Standard mode */
+ reg = REG_SET_FIELD(reg, CKSVII2C_IC_CON, IC_MAX_SPEED_MODE, 2);
+ reg = REG_SET_FIELD(reg, CKSVII2C_IC_CON, IC_MASTER_MODE, 1);
+
+ WREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_CON, reg);
+}
+
+static void smu_v11_0_i2c_set_clock(struct i2c_adapter *control)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+
+ /*
+ * Standard mode speed, These values are taken from SMUIO MAS,
+ * but are different from what is given is
+ * Synopsys spec. The values here are based on assumption
+ * that refclock is 100MHz
+ *
+ * Configuration for standard mode; Speed = 100kbps
+ * Scale linearly, for now only support standard speed clock
+ * This will work only with 100M ref clock
+ *
+ * TBD:Change the calculation to take into account ref clock values also.
+ */
+
+ WREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_FS_SPKLEN, 2);
+ WREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_SS_SCL_HCNT, 120);
+ WREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_SS_SCL_LCNT, 130);
+ WREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_SDA_HOLD, 20);
+}
+
+static void smu_v11_0_i2c_set_address(struct i2c_adapter *control, uint8_t address)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+
+ /* Convert fromr 8-bit to 7-bit address */
+ address >>= 1;
+ WREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_TAR, (address & 0xFF));
+}
+
+static uint32_t smu_v11_0_i2c_poll_tx_status(struct i2c_adapter *control)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ uint32_t ret = I2C_OK;
+ uint32_t reg, reg_c_tx_abrt_source;
+
+ /*Check if transmission is completed */
+ unsigned long timeout_counter = jiffies + msecs_to_jiffies(20);
+
+ do {
+ if (time_after(jiffies, timeout_counter)) {
+ ret |= I2C_SW_TIMEOUT;
+ break;
+ }
+
+ reg = RREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_STATUS);
+
+ } while (REG_GET_FIELD(reg, CKSVII2C_IC_STATUS, TFE) == 0);
+
+ if (ret != I2C_OK)
+ return ret;
+
+ /* This only checks if NAK is received and transaction got aborted */
+ reg = RREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_INTR_STAT);
+
+ if (REG_GET_FIELD(reg, CKSVII2C_IC_INTR_STAT, R_TX_ABRT) == 1) {
+ reg_c_tx_abrt_source = RREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_TX_ABRT_SOURCE);
+ DRM_INFO("TX was terminated, IC_TX_ABRT_SOURCE val is:%x", reg_c_tx_abrt_source);
+
+ /* Check for stop due to NACK */
+ if (REG_GET_FIELD(reg_c_tx_abrt_source,
+ CKSVII2C_IC_TX_ABRT_SOURCE,
+ ABRT_TXDATA_NOACK) == 1) {
+
+ ret |= I2C_NAK_TXDATA_NOACK;
+
+ } else if (REG_GET_FIELD(reg_c_tx_abrt_source,
+ CKSVII2C_IC_TX_ABRT_SOURCE,
+ ABRT_7B_ADDR_NOACK) == 1) {
+
+ ret |= I2C_NAK_7B_ADDR_NOACK;
+ } else {
+ ret |= I2C_ABORT;
+ }
+
+ smu_v11_0_i2c_clear_status(control);
+ }
+
+ return ret;
+}
+
+static uint32_t smu_v11_0_i2c_poll_rx_status(struct i2c_adapter *control)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ uint32_t ret = I2C_OK;
+ uint32_t reg_ic_status, reg_c_tx_abrt_source;
+
+ reg_c_tx_abrt_source = RREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_TX_ABRT_SOURCE);
+
+ /* If slave is not present */
+ if (REG_GET_FIELD(reg_c_tx_abrt_source,
+ CKSVII2C_IC_TX_ABRT_SOURCE,
+ ABRT_7B_ADDR_NOACK) == 1) {
+ ret |= I2C_NAK_7B_ADDR_NOACK;
+
+ smu_v11_0_i2c_clear_status(control);
+ } else { /* wait till some data is there in RXFIFO */
+ /* Poll for some byte in RXFIFO */
+ unsigned long timeout_counter = jiffies + msecs_to_jiffies(20);
+
+ do {
+ if (time_after(jiffies, timeout_counter)) {
+ ret |= I2C_SW_TIMEOUT;
+ break;
+ }
+
+ reg_ic_status = RREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_STATUS);
+
+ } while (REG_GET_FIELD(reg_ic_status, CKSVII2C_IC_STATUS, RFNE) == 0);
+ }
+
+ return ret;
+}
+
+
+
+
+/**
+ * smu_v11_0_i2c_transmit - Send a block of data over the I2C bus to a slave device.
+ *
+ * @address: The I2C address of the slave device.
+ * @data: The data to transmit over the bus.
+ * @numbytes: The amount of data to transmit.
+ * @i2c_flag: Flags for transmission
+ *
+ * Returns 0 on success or error.
+ */
+static uint32_t smu_v11_0_i2c_transmit(struct i2c_adapter *control,
+ uint8_t address, uint8_t *data,
+ uint32_t numbytes, uint32_t i2c_flag)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ uint32_t bytes_sent, reg, ret = 0;
+ unsigned long timeout_counter;
+
+ bytes_sent = 0;
+
+ DRM_DEBUG_DRIVER("I2C_Transmit(), address = %x, bytes = %d , data: ",
+ (uint16_t)address, numbytes);
+
+ if (drm_debug_enabled(DRM_UT_DRIVER)) {
+ print_hex_dump(KERN_INFO, "data: ", DUMP_PREFIX_NONE,
+ 16, 1, data, numbytes, false);
+ }
+
+ /* Set the I2C slave address */
+ smu_v11_0_i2c_set_address(control, address);
+ /* Enable I2C */
+ smu_v11_0_i2c_enable(control, true);
+
+ /* Clear status bits */
+ smu_v11_0_i2c_clear_status(control);
+
+
+ timeout_counter = jiffies + msecs_to_jiffies(20);
+
+ while (numbytes > 0) {
+ reg = RREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_STATUS);
+ if (REG_GET_FIELD(reg, CKSVII2C_IC_STATUS, TFNF)) {
+ do {
+ reg = 0;
+ /*
+ * Prepare transaction, no need to set RESTART. I2C engine will send
+ * START as soon as it sees data in TXFIFO
+ */
+ if (bytes_sent == 0)
+ reg = REG_SET_FIELD(reg, CKSVII2C_IC_DATA_CMD, RESTART,
+ (i2c_flag & I2C_RESTART) ? 1 : 0);
+ reg = REG_SET_FIELD(reg, CKSVII2C_IC_DATA_CMD, DAT, data[bytes_sent]);
+
+ /* determine if we need to send STOP bit or not */
+ if (numbytes == 1)
+ /* Final transaction, so send stop unless I2C_NO_STOP */
+ reg = REG_SET_FIELD(reg, CKSVII2C_IC_DATA_CMD, STOP,
+ (i2c_flag & I2C_NO_STOP) ? 0 : 1);
+ /* Write */
+ reg = REG_SET_FIELD(reg, CKSVII2C_IC_DATA_CMD, CMD, 0);
+ WREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_DATA_CMD, reg);
+
+ /* Record that the bytes were transmitted */
+ bytes_sent++;
+ numbytes--;
+
+ reg = RREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_STATUS);
+
+ } while (numbytes && REG_GET_FIELD(reg, CKSVII2C_IC_STATUS, TFNF));
+ }
+
+ /*
+ * We waited too long for the transmission FIFO to become not-full.
+ * Exit the loop with error.
+ */
+ if (time_after(jiffies, timeout_counter)) {
+ ret |= I2C_SW_TIMEOUT;
+ goto Err;
+ }
+ }
+
+ ret = smu_v11_0_i2c_poll_tx_status(control);
+
+Err:
+ /* Any error, no point in proceeding */
+ if (ret != I2C_OK) {
+ if (ret & I2C_SW_TIMEOUT)
+ DRM_ERROR("TIMEOUT ERROR !!!");
+
+ if (ret & I2C_NAK_7B_ADDR_NOACK)
+ DRM_ERROR("Received I2C_NAK_7B_ADDR_NOACK !!!");
+
+
+ if (ret & I2C_NAK_TXDATA_NOACK)
+ DRM_ERROR("Received I2C_NAK_TXDATA_NOACK !!!");
+ }
+
+ return ret;
+}
+
+
+/**
+ * smu_v11_0_i2c_receive - Receive a block of data over the I2C bus from a slave device.
+ *
+ * @address: The I2C address of the slave device.
+ * @numbytes: The amount of data to transmit.
+ * @i2c_flag: Flags for transmission
+ *
+ * Returns 0 on success or error.
+ */
+static uint32_t smu_v11_0_i2c_receive(struct i2c_adapter *control,
+ uint8_t address, uint8_t *data,
+ uint32_t numbytes, uint8_t i2c_flag)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ uint32_t bytes_received, ret = I2C_OK;
+
+ bytes_received = 0;
+
+ /* Set the I2C slave address */
+ smu_v11_0_i2c_set_address(control, address);
+
+ /* Enable I2C */
+ smu_v11_0_i2c_enable(control, true);
+
+ while (numbytes > 0) {
+ uint32_t reg = 0;
+
+ smu_v11_0_i2c_clear_status(control);
+
+
+ /* Prepare transaction */
+
+ /* Each time we disable I2C, so this is not a restart */
+ if (bytes_received == 0)
+ reg = REG_SET_FIELD(reg, CKSVII2C_IC_DATA_CMD, RESTART,
+ (i2c_flag & I2C_RESTART) ? 1 : 0);
+
+ reg = REG_SET_FIELD(reg, CKSVII2C_IC_DATA_CMD, DAT, 0);
+ /* Read */
+ reg = REG_SET_FIELD(reg, CKSVII2C_IC_DATA_CMD, CMD, 1);
+
+ /* Transmitting last byte */
+ if (numbytes == 1)
+ /* Final transaction, so send stop if requested */
+ reg = REG_SET_FIELD(reg, CKSVII2C_IC_DATA_CMD, STOP,
+ (i2c_flag & I2C_NO_STOP) ? 0 : 1);
+
+ WREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_DATA_CMD, reg);
+
+ ret = smu_v11_0_i2c_poll_rx_status(control);
+
+ /* Any error, no point in proceeding */
+ if (ret != I2C_OK) {
+ if (ret & I2C_SW_TIMEOUT)
+ DRM_ERROR("TIMEOUT ERROR !!!");
+
+ if (ret & I2C_NAK_7B_ADDR_NOACK)
+ DRM_ERROR("Received I2C_NAK_7B_ADDR_NOACK !!!");
+
+ if (ret & I2C_NAK_TXDATA_NOACK)
+ DRM_ERROR("Received I2C_NAK_TXDATA_NOACK !!!");
+
+ break;
+ }
+
+ reg = RREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_DATA_CMD);
+ data[bytes_received] = REG_GET_FIELD(reg, CKSVII2C_IC_DATA_CMD, DAT);
+
+ /* Record that the bytes were received */
+ bytes_received++;
+ numbytes--;
+ }
+
+ DRM_DEBUG_DRIVER("I2C_Receive(), address = %x, bytes = %d, data :",
+ (uint16_t)address, bytes_received);
+
+ if (drm_debug_enabled(DRM_UT_DRIVER)) {
+ print_hex_dump(KERN_INFO, "data: ", DUMP_PREFIX_NONE,
+ 16, 1, data, bytes_received, false);
+ }
+
+ return ret;
+}
+
+static void smu_v11_0_i2c_abort(struct i2c_adapter *control)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ uint32_t reg = 0;
+
+ /* Enable I2C engine; */
+ reg = REG_SET_FIELD(reg, CKSVII2C_IC_ENABLE, ENABLE, 1);
+ WREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_ENABLE, reg);
+
+ /* Abort previous transaction */
+ reg = REG_SET_FIELD(reg, CKSVII2C_IC_ENABLE, ABORT, 1);
+ WREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_ENABLE, reg);
+
+ DRM_DEBUG_DRIVER("I2C_Abort() Done.");
+}
+
+
+static bool smu_v11_0_i2c_activity_done(struct i2c_adapter *control)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+
+ const uint32_t IDLE_TIMEOUT = 1024;
+ uint32_t timeout_count = 0;
+ uint32_t reg_ic_enable, reg_ic_enable_status, reg_ic_clr_activity;
+
+ reg_ic_enable_status = RREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_ENABLE_STATUS);
+ reg_ic_enable = RREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_ENABLE);
+
+
+ if ((REG_GET_FIELD(reg_ic_enable, CKSVII2C_IC_ENABLE, ENABLE) == 0) &&
+ (REG_GET_FIELD(reg_ic_enable_status, CKSVII2C_IC_ENABLE_STATUS, IC_EN) == 1)) {
+ /*
+ * Nobody is using I2C engine, but engine remains active because
+ * someone missed to send STOP
+ */
+ smu_v11_0_i2c_abort(control);
+ } else if (REG_GET_FIELD(reg_ic_enable, CKSVII2C_IC_ENABLE, ENABLE) == 0) {
+ /* Nobody is using I2C engine */
+ return true;
+ }
+
+ /* Keep reading activity bit until it's cleared */
+ do {
+ reg_ic_clr_activity = RREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_CLR_ACTIVITY);
+
+ if (REG_GET_FIELD(reg_ic_clr_activity,
+ CKSVII2C_IC_CLR_ACTIVITY, CLR_ACTIVITY) == 0)
+ return true;
+
+ ++timeout_count;
+
+ } while (timeout_count < IDLE_TIMEOUT);
+
+ return false;
+}
+
+static void smu_v11_0_i2c_init(struct i2c_adapter *control)
+{
+ /* Disable clock gating */
+ smu_v11_0_i2c_set_clock_gating(control, false);
+
+ if (!smu_v11_0_i2c_activity_done(control))
+ DRM_WARN("I2C busy !");
+
+ /* Disable I2C */
+ smu_v11_0_i2c_enable(control, false);
+
+ /* Configure I2C to operate as master and in standard mode */
+ smu_v11_0_i2c_configure(control);
+
+ /* Initialize the clock to 50 kHz default */
+ smu_v11_0_i2c_set_clock(control);
+
+}
+
+static void smu_v11_0_i2c_fini(struct i2c_adapter *control)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ uint32_t reg_ic_enable_status, reg_ic_enable;
+
+ smu_v11_0_i2c_enable(control, false);
+
+ /* Double check if disabled, else force abort */
+ reg_ic_enable_status = RREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_ENABLE_STATUS);
+ reg_ic_enable = RREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_ENABLE);
+
+ if ((REG_GET_FIELD(reg_ic_enable, CKSVII2C_IC_ENABLE, ENABLE) == 0) &&
+ (REG_GET_FIELD(reg_ic_enable_status,
+ CKSVII2C_IC_ENABLE_STATUS, IC_EN) == 1)) {
+ /*
+ * Nobody is using I2C engine, but engine remains active because
+ * someone missed to send STOP
+ */
+ smu_v11_0_i2c_abort(control);
+ }
+
+ /* Restore clock gating */
+
+ /*
+ * TODO Reenabling clock gating seems to break subsequent SMU operation
+ * on the I2C bus. My guess is that SMU doesn't disable clock gating like
+ * we do here before working with the bus. So for now just don't restore
+ * it but later work with SMU to see if they have this issue and can
+ * update their code appropriately
+ */
+ /* smu_v11_0_i2c_set_clock_gating(control, true); */
+
+}
+
+static bool smu_v11_0_i2c_bus_lock(struct i2c_adapter *control)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+
+ /* Send PPSMC_MSG_RequestI2CBus */
+ if (!adev->powerplay.pp_funcs->smu_i2c_bus_access)
+ goto Fail;
+
+
+ if (!adev->powerplay.pp_funcs->smu_i2c_bus_access(adev->powerplay.pp_handle, true))
+ return true;
+
+Fail:
+ return false;
+}
+
+static bool smu_v11_0_i2c_bus_unlock(struct i2c_adapter *control)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+
+ /* Send PPSMC_MSG_RequestI2CBus */
+ if (!adev->powerplay.pp_funcs->smu_i2c_bus_access)
+ goto Fail;
+
+ /* Send PPSMC_MSG_ReleaseI2CBus */
+ if (!adev->powerplay.pp_funcs->smu_i2c_bus_access(adev->powerplay.pp_handle,
+ false))
+ return true;
+
+Fail:
+ return false;
+}
+
+/***************************** EEPROM I2C GLUE ****************************/
+
+static uint32_t smu_v11_0_i2c_eeprom_read_data(struct i2c_adapter *control,
+ uint8_t address,
+ uint8_t *data,
+ uint32_t numbytes)
+{
+ uint32_t ret = 0;
+
+ /* First 2 bytes are dummy write to set EEPROM address */
+ ret = smu_v11_0_i2c_transmit(control, address, data, 2, I2C_NO_STOP);
+ if (ret != I2C_OK)
+ goto Fail;
+
+ /* Now read data starting with that address */
+ ret = smu_v11_0_i2c_receive(control, address, data + 2, numbytes - 2,
+ I2C_RESTART);
+
+Fail:
+ if (ret != I2C_OK)
+ DRM_ERROR("ReadData() - I2C error occurred :%x", ret);
+
+ return ret;
+}
+
+static uint32_t smu_v11_0_i2c_eeprom_write_data(struct i2c_adapter *control,
+ uint8_t address,
+ uint8_t *data,
+ uint32_t numbytes)
+{
+ uint32_t ret;
+
+ ret = smu_v11_0_i2c_transmit(control, address, data, numbytes, 0);
+
+ if (ret != I2C_OK)
+ DRM_ERROR("WriteI2CData() - I2C error occurred :%x", ret);
+ else
+ /*
+ * According to EEPROM spec there is a MAX of 10 ms required for
+ * EEPROM to flush internal RX buffer after STOP was issued at the
+ * end of write transaction. During this time the EEPROM will not be
+ * responsive to any more commands - so wait a bit more.
+ *
+ * TODO Improve to wait for first ACK for slave address after
+ * internal write cycle done.
+ */
+ msleep(10);
+
+ return ret;
+
+}
+
+static void lock_bus(struct i2c_adapter *i2c, unsigned int flags)
+{
+ struct amdgpu_ras_eeprom_control *control = to_eeprom_control(i2c);
+
+ if (!smu_v11_0_i2c_bus_lock(i2c)) {
+ DRM_ERROR("Failed to lock the bus from SMU");
+ return;
+ }
+
+ control->bus_locked = true;
+}
+
+static int trylock_bus(struct i2c_adapter *i2c, unsigned int flags)
+{
+ WARN_ONCE(1, "This operation not supposed to run in atomic context!");
+ return false;
+}
+
+static void unlock_bus(struct i2c_adapter *i2c, unsigned int flags)
+{
+ struct amdgpu_ras_eeprom_control *control = to_eeprom_control(i2c);
+
+ if (!smu_v11_0_i2c_bus_unlock(i2c)) {
+ DRM_ERROR("Failed to unlock the bus from SMU");
+ return;
+ }
+
+ control->bus_locked = false;
+}
+
+static const struct i2c_lock_operations smu_v11_0_i2c_i2c_lock_ops = {
+ .lock_bus = lock_bus,
+ .trylock_bus = trylock_bus,
+ .unlock_bus = unlock_bus,
+};
+
+static int smu_v11_0_i2c_eeprom_i2c_xfer(struct i2c_adapter *i2c_adap,
+ struct i2c_msg *msgs, int num)
+{
+ int i, ret;
+ struct amdgpu_ras_eeprom_control *control = to_eeprom_control(i2c_adap);
+
+ if (!control->bus_locked) {
+ DRM_ERROR("I2C bus unlocked, stopping transaction!");
+ return -EIO;
+ }
+
+ smu_v11_0_i2c_init(i2c_adap);
+
+ for (i = 0; i < num; i++) {
+ if (msgs[i].flags & I2C_M_RD)
+ ret = smu_v11_0_i2c_eeprom_read_data(i2c_adap,
+ (uint8_t)msgs[i].addr,
+ msgs[i].buf, msgs[i].len);
+ else
+ ret = smu_v11_0_i2c_eeprom_write_data(i2c_adap,
+ (uint8_t)msgs[i].addr,
+ msgs[i].buf, msgs[i].len);
+
+ if (ret != I2C_OK) {
+ num = -EIO;
+ break;
+ }
+ }
+
+ smu_v11_0_i2c_fini(i2c_adap);
+ return num;
+}
+
+static u32 smu_v11_0_i2c_eeprom_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+
+static const struct i2c_algorithm smu_v11_0_i2c_eeprom_i2c_algo = {
+ .master_xfer = smu_v11_0_i2c_eeprom_i2c_xfer,
+ .functionality = smu_v11_0_i2c_eeprom_i2c_func,
+};
+
+int smu_v11_0_i2c_eeprom_control_init(struct i2c_adapter *control)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ int res;
+
+ control->owner = THIS_MODULE;
+ control->class = I2C_CLASS_SPD;
+ control->dev.parent = &adev->pdev->dev;
+ control->algo = &smu_v11_0_i2c_eeprom_i2c_algo;
+ snprintf(control->name, sizeof(control->name), "RAS EEPROM");
+ control->lock_ops = &smu_v11_0_i2c_i2c_lock_ops;
+
+ res = i2c_add_adapter(control);
+ if (res)
+ DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
+
+ return res;
+}
+
+void smu_v11_0_i2c_eeprom_control_fini(struct i2c_adapter *control)
+{
+ i2c_del_adapter(control);
+}
+
+/*
+ * Keep this for future unit test if bugs arise
+ */
+#if 0
+#define I2C_TARGET_ADDR 0xA0
+
+bool smu_v11_0_i2c_test_bus(struct i2c_adapter *control)
+{
+
+ uint32_t ret = I2C_OK;
+ uint8_t data[6] = {0xf, 0, 0xde, 0xad, 0xbe, 0xef};
+
+
+ DRM_INFO("Begin");
+
+ if (!smu_v11_0_i2c_bus_lock(control)) {
+ DRM_ERROR("Failed to lock the bus!.");
+ return false;
+ }
+
+ smu_v11_0_i2c_init(control);
+
+ /* Write 0xde to address 0x0000 on the EEPROM */
+ ret = smu_v11_0_i2c_eeprom_write_data(control, I2C_TARGET_ADDR, data, 6);
+
+ ret = smu_v11_0_i2c_eeprom_read_data(control, I2C_TARGET_ADDR, data, 6);
+
+ smu_v11_0_i2c_fini(control);
+
+ smu_v11_0_i2c_bus_unlock(control);
+
+
+ DRM_INFO("End");
+ return true;
+}
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.h b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.h
index 8bdfb3e5cd1c..973f28d68e70 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.h
+++ b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ * Copyright 2019 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -14,23 +14,21 @@
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
*/
-#ifndef __NVKM_SECBOOT_ACR_R367_H__
-#define __NVKM_SECBOOT_ACR_R367_H__
+#ifndef SMU_V11_I2C_CONTROL_H
+#define SMU_V11_I2C_CONTROL_H
+
+#include <linux/types.h>
-#include "acr_r352.h"
+struct i2c_adapter;
-void acr_r367_fixup_hs_desc(struct acr_r352 *, struct nvkm_secboot *, void *);
+int smu_v11_0_i2c_eeprom_control_init(struct i2c_adapter *control);
+void smu_v11_0_i2c_eeprom_control_fini(struct i2c_adapter *control);
-struct ls_ucode_img *acr_r367_ls_ucode_img_load(const struct acr_r352 *,
- const struct nvkm_secboot *,
- enum nvkm_secboot_falcon);
-int acr_r367_ls_fill_headers(struct acr_r352 *, struct list_head *);
-int acr_r367_ls_write_wpr(struct acr_r352 *, struct list_head *,
- struct nvkm_gpuobj *, u64);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 23265414d448..15f3424a1ff7 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -58,11 +58,18 @@
#include "mmhub_v1_0.h"
#include "df_v1_7.h"
#include "df_v3_6.h"
+#include "nbio_v6_1.h"
+#include "nbio_v7_0.h"
+#include "nbio_v7_4.h"
#include "vega10_ih.h"
#include "sdma_v4_0.h"
#include "uvd_v7_0.h"
#include "vce_v4_0.h"
#include "vcn_v1_0.h"
+#include "vcn_v2_0.h"
+#include "jpeg_v2_0.h"
+#include "vcn_v2_5.h"
+#include "jpeg_v2_5.h"
#include "dce_virtual.h"
#include "mxgpu_ai.h"
#include "amdgpu_smu.h"
@@ -89,8 +96,8 @@ static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
{
unsigned long flags, address, data;
u32 r;
- address = adev->nbio_funcs->get_pcie_index_offset(adev);
- data = adev->nbio_funcs->get_pcie_data_offset(adev);
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
WREG32(address, reg);
@@ -104,8 +111,8 @@ static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
{
unsigned long flags, address, data;
- address = adev->nbio_funcs->get_pcie_index_offset(adev);
- data = adev->nbio_funcs->get_pcie_data_offset(adev);
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
WREG32(address, reg);
@@ -115,6 +122,49 @@ static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
}
+static u64 soc15_pcie_rreg64(struct amdgpu_device *adev, u32 reg)
+{
+ unsigned long flags, address, data;
+ u64 r;
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
+
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ /* read low 32 bit */
+ WREG32(address, reg);
+ (void)RREG32(address);
+ r = RREG32(data);
+
+ /* read high 32 bit*/
+ WREG32(address, reg + 4);
+ (void)RREG32(address);
+ r |= ((u64)RREG32(data) << 32);
+ spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+ return r;
+}
+
+static void soc15_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
+{
+ unsigned long flags, address, data;
+
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
+
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ /* write low 32 bit */
+ WREG32(address, reg);
+ (void)RREG32(address);
+ WREG32(data, (u32)(v & 0xffffffffULL));
+ (void)RREG32(data);
+
+ /* write high 32 bit */
+ WREG32(address, reg + 4);
+ (void)RREG32(address);
+ WREG32(data, (u32)(v >> 32));
+ (void)RREG32(data);
+ spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+}
+
static u32 soc15_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
{
unsigned long flags, address, data;
@@ -217,7 +267,7 @@ static void soc15_se_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
{
- return adev->nbio_funcs->get_memsize(adev);
+ return adev->nbio.funcs->get_memsize(adev);
}
static u32 soc15_get_xclk(struct amdgpu_device *adev)
@@ -291,6 +341,7 @@ static struct soc15_allowed_register_entry soc15_allowed_read_registers[] = {
{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)},
{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)},
{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)},
+ { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)},
{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
{ SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
@@ -416,7 +467,7 @@ static int soc15_asic_mode1_reset(struct amdgpu_device *adev)
/* wait for asic to come out of reset */
for (i = 0; i < adev->usec_timeout; i++) {
- u32 memsize = adev->nbio_funcs->get_memsize(adev);
+ u32 memsize = adev->nbio.funcs->get_memsize(adev);
if (memsize != 0xffffffff)
break;
@@ -428,76 +479,96 @@ static int soc15_asic_mode1_reset(struct amdgpu_device *adev)
return ret;
}
-static int soc15_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap)
-{
- void *pp_handle = adev->powerplay.pp_handle;
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
-
- if (!pp_funcs || !pp_funcs->get_asic_baco_capability) {
- *cap = false;
- return -ENOENT;
- }
-
- return pp_funcs->get_asic_baco_capability(pp_handle, cap);
-}
-
static int soc15_asic_baco_reset(struct amdgpu_device *adev)
{
- void *pp_handle = adev->powerplay.pp_handle;
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
-
- if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state)
- return -ENOENT;
-
- /* enter BACO state */
- if (pp_funcs->set_asic_baco_state(pp_handle, 1))
- return -EIO;
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ int ret = 0;
- /* exit BACO state */
- if (pp_funcs->set_asic_baco_state(pp_handle, 0))
- return -EIO;
+ /* avoid NBIF got stuck when do RAS recovery in BACO reset */
+ if (ras && ras->supported)
+ adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
- dev_info(adev->dev, "GPU BACO reset\n");
+ ret = amdgpu_dpm_baco_reset(adev);
+ if (ret)
+ return ret;
- adev->in_baco_reset = 1;
+ /* re-enable doorbell interrupt after BACO exit */
+ if (ras && ras->supported)
+ adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
return 0;
}
-static int soc15_asic_reset(struct amdgpu_device *adev)
+static enum amd_reset_method
+soc15_asic_reset_method(struct amdgpu_device *adev)
{
- int ret;
- bool baco_reset;
+ bool baco_reset = false;
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
switch (adev->asic_type) {
+ case CHIP_RAVEN:
+ case CHIP_RENOIR:
+ return AMD_RESET_METHOD_MODE2;
case CHIP_VEGA10:
case CHIP_VEGA12:
- soc15_asic_get_baco_capability(adev, &baco_reset);
+ case CHIP_ARCTURUS:
+ baco_reset = amdgpu_dpm_is_baco_supported(adev);
break;
case CHIP_VEGA20:
if (adev->psp.sos_fw_version >= 0x80067)
- soc15_asic_get_baco_capability(adev, &baco_reset);
- else
- baco_reset = false;
- if (baco_reset) {
- struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0);
- struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ baco_reset = amdgpu_dpm_is_baco_supported(adev);
- if (hive || (ras && ras->supported))
- baco_reset = false;
- }
+ /*
+ * 1. PMFW version > 0x284300: all cases use baco
+ * 2. PMFW version <= 0x284300: only sGPU w/o RAS use baco
+ */
+ if ((ras && ras->supported) && adev->pm.fw_version <= 0x283400)
+ baco_reset = false;
break;
default:
- baco_reset = false;
break;
}
if (baco_reset)
- ret = soc15_asic_baco_reset(adev);
+ return AMD_RESET_METHOD_BACO;
else
- ret = soc15_asic_mode1_reset(adev);
+ return AMD_RESET_METHOD_MODE1;
+}
- return ret;
+static int soc15_asic_reset(struct amdgpu_device *adev)
+{
+ /* original raven doesn't have full asic reset */
+ if (adev->pdev->device == 0x15dd && adev->rev_id < 0x8)
+ return 0;
+
+ switch (soc15_asic_reset_method(adev)) {
+ case AMD_RESET_METHOD_BACO:
+ if (!adev->in_suspend)
+ amdgpu_inc_vram_lost(adev);
+ return soc15_asic_baco_reset(adev);
+ case AMD_RESET_METHOD_MODE2:
+ return amdgpu_dpm_mode2_reset(adev);
+ default:
+ if (!adev->in_suspend)
+ amdgpu_inc_vram_lost(adev);
+ return soc15_asic_mode1_reset(adev);
+ }
+}
+
+static bool soc15_supports_baco(struct amdgpu_device *adev)
+{
+ switch (adev->asic_type) {
+ case CHIP_VEGA10:
+ case CHIP_VEGA12:
+ case CHIP_ARCTURUS:
+ return amdgpu_dpm_is_baco_supported(adev);
+ case CHIP_VEGA20:
+ if (adev->psp.sos_fw_version >= 0x80067)
+ return amdgpu_dpm_is_baco_supported(adev);
+ return false;
+ default:
+ return false;
+ }
}
/*static int soc15_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
@@ -556,8 +627,8 @@ static void soc15_program_aspm(struct amdgpu_device *adev)
static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev,
bool enable)
{
- adev->nbio_funcs->enable_doorbell_aperture(adev, enable);
- adev->nbio_funcs->enable_doorbell_selfring_aperture(adev, enable);
+ adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
+ adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
}
static const struct amdgpu_ip_block_version vega10_common_ip_block =
@@ -571,7 +642,7 @@ static const struct amdgpu_ip_block_version vega10_common_ip_block =
static uint32_t soc15_get_rev_id(struct amdgpu_device *adev)
{
- return adev->nbio_funcs->get_rev_id(adev);
+ return adev->nbio.funcs->get_rev_id(adev);
}
int soc15_set_ip_blocks(struct amdgpu_device *adev)
@@ -581,32 +652,41 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
case CHIP_VEGA10:
case CHIP_VEGA12:
case CHIP_RAVEN:
+ case CHIP_RENOIR:
vega10_reg_base_init(adev);
break;
case CHIP_VEGA20:
vega20_reg_base_init(adev);
break;
+ case CHIP_ARCTURUS:
+ arct_reg_base_init(adev);
+ break;
default:
return -EINVAL;
}
- if (adev->asic_type == CHIP_VEGA20)
+ if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS)
adev->gmc.xgmi.supported = true;
- if (adev->flags & AMD_IS_APU)
- adev->nbio_funcs = &nbio_v7_0_funcs;
- else if (adev->asic_type == CHIP_VEGA20)
- adev->nbio_funcs = &nbio_v7_4_funcs;
- else
- adev->nbio_funcs = &nbio_v6_1_funcs;
+ if (adev->flags & AMD_IS_APU) {
+ adev->nbio.funcs = &nbio_v7_0_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg;
+ } else if (adev->asic_type == CHIP_VEGA20 ||
+ adev->asic_type == CHIP_ARCTURUS) {
+ adev->nbio.funcs = &nbio_v7_4_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
+ } else {
+ adev->nbio.funcs = &nbio_v6_1_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg;
+ }
- if (adev->asic_type == CHIP_VEGA20)
- adev->df_funcs = &df_v3_6_funcs;
+ if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS)
+ adev->df.funcs = &df_v3_6_funcs;
else
- adev->df_funcs = &df_v1_7_funcs;
+ adev->df.funcs = &df_v1_7_funcs;
adev->rev_id = soc15_get_rev_id(adev);
- adev->nbio_funcs->detect_hw_virt(adev);
+ adev->nbio.funcs->detect_hw_virt(adev);
if (amdgpu_sriov_vf(adev))
adev->virt.ops = &xgpu_ai_virt_ops;
@@ -638,11 +718,11 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
}
amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
- if (!amdgpu_sriov_vf(adev)) {
- if (is_support_sw_smu(adev))
+ if (is_support_sw_smu(adev)) {
+ if (!amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
- else
- amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
+ } else {
+ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
}
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
@@ -672,6 +752,53 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
#endif
amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
break;
+ case CHIP_ARCTURUS:
+ amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
+
+ if (amdgpu_sriov_vf(adev)) {
+ if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
+ amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
+ } else {
+ amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
+ if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
+ amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
+ }
+
+ if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
+
+ if (amdgpu_sriov_vf(adev)) {
+ if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
+ amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
+ } else {
+ amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
+ }
+ if (!amdgpu_sriov_vf(adev))
+ amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block);
+ break;
+ case CHIP_RENOIR:
+ amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
+ if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
+ amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
+ if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+ else if (amdgpu_device_has_dc_support(adev))
+ amdgpu_device_ip_block_add(adev, &dm_ip_block);
+#endif
+ amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
+ break;
default:
return -EINVAL;
}
@@ -681,14 +808,14 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
static void soc15_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
{
- adev->nbio_funcs->hdp_flush(adev, ring);
+ adev->nbio.funcs->hdp_flush(adev, ring);
}
static void soc15_invalidate_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
if (!ring || !ring->funcs->emit_wreg)
- WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
+ WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
else
amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
@@ -714,14 +841,9 @@ static void soc15_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
/* Set the 2 events that we wish to watch, defined above */
/* Reg 40 is # received msgs */
+ /* Reg 104 is # of posted requests sent */
perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40);
- /* Pre-VG20, Reg 104 is # of posted requests sent. On VG20 it's 108 */
- if (adev->asic_type == CHIP_VEGA20)
- perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK,
- EVENT1_SEL, 108);
- else
- perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK,
- EVENT1_SEL, 104);
+ perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104);
/* Write to enable desired perf counters */
WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK, perfctr);
@@ -751,6 +873,55 @@ static void soc15_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
*count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32);
}
+static void vega20_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
+ uint64_t *count1)
+{
+ uint32_t perfctr = 0;
+ uint64_t cnt0_of, cnt1_of;
+ int tmp;
+
+ /* This reports 0 on APUs, so return to avoid writing/reading registers
+ * that may or may not be different from their GPU counterparts
+ */
+ if (adev->flags & AMD_IS_APU)
+ return;
+
+ /* Set the 2 events that we wish to watch, defined above */
+ /* Reg 40 is # received msgs */
+ /* Reg 108 is # of posted requests sent on VG20 */
+ perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK3,
+ EVENT0_SEL, 40);
+ perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK3,
+ EVENT1_SEL, 108);
+
+ /* Write to enable desired perf counters */
+ WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3, perfctr);
+ /* Zero out and enable the perf counters
+ * Write 0x5:
+ * Bit 0 = Start all counters(1)
+ * Bit 2 = Global counter reset enable(1)
+ */
+ WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000005);
+
+ msleep(1000);
+
+ /* Load the shadow and disable the perf counters
+ * Write 0x2:
+ * Bit 0 = Stop counters(0)
+ * Bit 1 = Load the shadow counters(1)
+ */
+ WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000002);
+
+ /* Read register values to get any >32bit overflow */
+ tmp = RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3);
+ cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK3, COUNTER0_UPPER);
+ cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK3, COUNTER1_UPPER);
+
+ /* Get the values and add the overflow */
+ *count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK3) | (cnt0_of << 32);
+ *count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK3) | (cnt1_of << 32);
+}
+
static bool soc15_need_reset_on_init(struct amdgpu_device *adev)
{
u32 sol_reg;
@@ -792,6 +963,7 @@ static const struct amdgpu_asic_funcs soc15_asic_funcs =
.read_bios_from_rom = &soc15_read_bios_from_rom,
.read_register = &soc15_read_register,
.reset = &soc15_asic_reset,
+ .reset_method = &soc15_asic_reset_method,
.set_vga_state = &soc15_vga_set_state,
.get_xclk = &soc15_get_xclk,
.set_uvd_clocks = &soc15_set_uvd_clocks,
@@ -804,6 +976,7 @@ static const struct amdgpu_asic_funcs soc15_asic_funcs =
.get_pcie_usage = &soc15_get_pcie_usage,
.need_reset_on_init = &soc15_need_reset_on_init,
.get_pcie_replay_count = &soc15_get_pcie_replay_count,
+ .supports_baco = &soc15_supports_baco,
};
static const struct amdgpu_asic_funcs vega20_asic_funcs =
@@ -812,6 +985,7 @@ static const struct amdgpu_asic_funcs vega20_asic_funcs =
.read_bios_from_rom = &soc15_read_bios_from_rom,
.read_register = &soc15_read_register,
.reset = &soc15_asic_reset,
+ .reset_method = &soc15_asic_reset_method,
.set_vga_state = &soc15_vga_set_state,
.get_xclk = &soc15_get_xclk,
.set_uvd_clocks = &soc15_set_uvd_clocks,
@@ -821,9 +995,10 @@ static const struct amdgpu_asic_funcs vega20_asic_funcs =
.invalidate_hdp = &soc15_invalidate_hdp,
.need_full_reset = &soc15_need_full_reset,
.init_doorbell_index = &vega20_doorbell_index_init,
- .get_pcie_usage = &soc15_get_pcie_usage,
+ .get_pcie_usage = &vega20_get_pcie_usage,
.need_reset_on_init = &soc15_need_reset_on_init,
.get_pcie_replay_count = &soc15_get_pcie_replay_count,
+ .supports_baco = &soc15_supports_baco,
};
static int soc15_common_early_init(void *handle)
@@ -837,6 +1012,8 @@ static int soc15_common_early_init(void *handle)
adev->smc_wreg = NULL;
adev->pcie_rreg = &soc15_pcie_rreg;
adev->pcie_wreg = &soc15_pcie_wreg;
+ adev->pcie_rreg64 = &soc15_pcie_rreg64;
+ adev->pcie_wreg64 = &soc15_pcie_wreg64;
adev->uvd_ctx_rreg = &soc15_uvd_ctx_rreg;
adev->uvd_ctx_wreg = &soc15_uvd_ctx_wreg;
adev->didt_rreg = &soc15_didt_rreg;
@@ -992,11 +1169,53 @@ static int soc15_common_early_init(void *handle)
adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
}
-
- if (adev->pm.pp_feature & PP_GFXOFF_MASK)
- adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
- AMD_PG_SUPPORT_CP |
- AMD_PG_SUPPORT_RLC_SMU_HS;
+ break;
+ case CHIP_ARCTURUS:
+ adev->asic_funcs = &vega20_asic_funcs;
+ adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_GFX_MGLS |
+ AMD_CG_SUPPORT_GFX_CGCG |
+ AMD_CG_SUPPORT_GFX_CGLS |
+ AMD_CG_SUPPORT_GFX_CP_LS |
+ AMD_CG_SUPPORT_HDP_MGCG |
+ AMD_CG_SUPPORT_HDP_LS |
+ AMD_CG_SUPPORT_SDMA_MGCG |
+ AMD_CG_SUPPORT_SDMA_LS |
+ AMD_CG_SUPPORT_MC_MGCG |
+ AMD_CG_SUPPORT_MC_LS |
+ AMD_CG_SUPPORT_IH_CG |
+ AMD_CG_SUPPORT_VCN_MGCG |
+ AMD_CG_SUPPORT_JPEG_MGCG;
+ adev->pg_flags = 0;
+ adev->external_rev_id = adev->rev_id + 0x32;
+ break;
+ case CHIP_RENOIR:
+ adev->asic_funcs = &soc15_asic_funcs;
+ adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_GFX_MGLS |
+ AMD_CG_SUPPORT_GFX_3D_CGCG |
+ AMD_CG_SUPPORT_GFX_3D_CGLS |
+ AMD_CG_SUPPORT_GFX_CGCG |
+ AMD_CG_SUPPORT_GFX_CGLS |
+ AMD_CG_SUPPORT_GFX_CP_LS |
+ AMD_CG_SUPPORT_MC_MGCG |
+ AMD_CG_SUPPORT_MC_LS |
+ AMD_CG_SUPPORT_SDMA_MGCG |
+ AMD_CG_SUPPORT_SDMA_LS |
+ AMD_CG_SUPPORT_BIF_LS |
+ AMD_CG_SUPPORT_HDP_LS |
+ AMD_CG_SUPPORT_ROM_MGCG |
+ AMD_CG_SUPPORT_VCN_MGCG |
+ AMD_CG_SUPPORT_JPEG_MGCG |
+ AMD_CG_SUPPORT_IH_CG |
+ AMD_CG_SUPPORT_ATHUB_LS |
+ AMD_CG_SUPPORT_ATHUB_MGCG |
+ AMD_CG_SUPPORT_DF_MGCG;
+ adev->pg_flags = AMD_PG_SUPPORT_SDMA |
+ AMD_PG_SUPPORT_VCN |
+ AMD_PG_SUPPORT_JPEG |
+ AMD_PG_SUPPORT_VCN_DPG;
+ adev->external_rev_id = adev->rev_id + 0x91;
break;
default:
/* FIXME: not supported yet */
@@ -1014,11 +1233,15 @@ static int soc15_common_early_init(void *handle)
static int soc15_common_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r = 0;
if (amdgpu_sriov_vf(adev))
xgpu_ai_mailbox_get_irq(adev);
- return 0;
+ if (adev->nbio.funcs->ras_late_init)
+ r = adev->nbio.funcs->ras_late_init(adev);
+
+ return r;
}
static int soc15_common_sw_init(void *handle)
@@ -1028,13 +1251,17 @@ static int soc15_common_sw_init(void *handle)
if (amdgpu_sriov_vf(adev))
xgpu_ai_mailbox_add_irq_id(adev);
- adev->df_funcs->sw_init(adev);
+ adev->df.funcs->sw_init(adev);
return 0;
}
static int soc15_common_sw_fini(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ amdgpu_nbio_ras_fini(adev);
+ adev->df.funcs->sw_fini(adev);
return 0;
}
@@ -1043,21 +1270,18 @@ static void soc15_doorbell_range_init(struct amdgpu_device *adev)
int i;
struct amdgpu_ring *ring;
- /* Two reasons to skip
- * 1, Host driver already programmed them
- * 2, To avoid registers program violations in SR-IOV
- */
- if (!amdgpu_virt_support_skip_setting(adev)) {
+ /* sdma/ih doorbell range are programed by hypervisor */
+ if (!amdgpu_sriov_vf(adev)) {
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
- adev->nbio_funcs->sdma_doorbell_range(adev, i,
+ adev->nbio.funcs->sdma_doorbell_range(adev, i,
ring->use_doorbell, ring->doorbell_index,
adev->doorbell_index.sdma_doorbell_range);
}
- }
- adev->nbio_funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
+ adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
adev->irq.ih.doorbell_index);
+ }
}
static int soc15_common_hw_init(void *handle)
@@ -1069,13 +1293,13 @@ static int soc15_common_hw_init(void *handle)
/* enable aspm */
soc15_program_aspm(adev);
/* setup nbio registers */
- adev->nbio_funcs->init_registers(adev);
+ adev->nbio.funcs->init_registers(adev);
/* remap HDP registers to a hole in mmio space,
* for the purpose of expose those registers
* to process space
*/
- if (adev->nbio_funcs->remap_hdp_registers)
- adev->nbio_funcs->remap_hdp_registers(adev);
+ if (adev->nbio.funcs->remap_hdp_registers)
+ adev->nbio.funcs->remap_hdp_registers(adev);
/* enable the doorbell aperture */
soc15_enable_doorbell_aperture(adev, true);
@@ -1098,6 +1322,14 @@ static int soc15_common_hw_fini(void *handle)
if (amdgpu_sriov_vf(adev))
xgpu_ai_mailbox_put_irq(adev);
+ if (adev->nbio.ras_if &&
+ amdgpu_ras_is_supported(adev, adev->nbio.ras_if->block)) {
+ if (adev->nbio.funcs->init_ras_controller_interrupt)
+ amdgpu_irq_put(adev, &adev->nbio.ras_controller_irq, 0);
+ if (adev->nbio.funcs->init_ras_err_event_athub_interrupt)
+ amdgpu_irq_put(adev, &adev->nbio.ras_err_event_athub_irq, 0);
+ }
+
return 0;
}
@@ -1134,7 +1366,8 @@ static void soc15_update_hdp_light_sleep(struct amdgpu_device *adev, bool enable
{
uint32_t def, data;
- if (adev->asic_type == CHIP_VEGA20) {
+ if (adev->asic_type == CHIP_VEGA20 ||
+ adev->asic_type == CHIP_ARCTURUS) {
def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL));
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
@@ -1237,34 +1470,39 @@ static int soc15_common_set_clockgating_state(void *handle,
case CHIP_VEGA10:
case CHIP_VEGA12:
case CHIP_VEGA20:
- adev->nbio_funcs->update_medium_grain_clock_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
- adev->nbio_funcs->update_medium_grain_light_sleep(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ adev->nbio.funcs->update_medium_grain_clock_gating(adev,
+ state == AMD_CG_STATE_GATE);
+ adev->nbio.funcs->update_medium_grain_light_sleep(adev,
+ state == AMD_CG_STATE_GATE);
soc15_update_hdp_light_sleep(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
soc15_update_drm_clock_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
soc15_update_drm_light_sleep(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
soc15_update_rom_medium_grain_clock_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
- adev->df_funcs->update_medium_grain_clock_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
+ adev->df.funcs->update_medium_grain_clock_gating(adev,
+ state == AMD_CG_STATE_GATE);
break;
case CHIP_RAVEN:
- adev->nbio_funcs->update_medium_grain_clock_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
- adev->nbio_funcs->update_medium_grain_light_sleep(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ case CHIP_RENOIR:
+ adev->nbio.funcs->update_medium_grain_clock_gating(adev,
+ state == AMD_CG_STATE_GATE);
+ adev->nbio.funcs->update_medium_grain_light_sleep(adev,
+ state == AMD_CG_STATE_GATE);
soc15_update_hdp_light_sleep(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
soc15_update_drm_clock_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
soc15_update_drm_light_sleep(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
soc15_update_rom_medium_grain_clock_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
+ break;
+ case CHIP_ARCTURUS:
+ soc15_update_hdp_light_sleep(adev,
+ state == AMD_CG_STATE_GATE);
break;
default:
break;
@@ -1280,7 +1518,7 @@ static void soc15_common_get_clockgating_state(void *handle, u32 *flags)
if (amdgpu_sriov_vf(adev))
*flags = 0;
- adev->nbio_funcs->get_clockgating_state(adev, flags);
+ adev->nbio.funcs->get_clockgating_state(adev, flags);
/* AMD_CG_SUPPORT_HDP_LS */
data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
@@ -1302,7 +1540,7 @@ static void soc15_common_get_clockgating_state(void *handle, u32 *flags)
if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK))
*flags |= AMD_CG_SUPPORT_ROM_MGCG;
- adev->df_funcs->get_clockgating_state(adev, flags);
+ adev->df.funcs->get_clockgating_state(adev, flags);
}
static int soc15_common_set_powergating_state(void *handle,
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h
index 7a6b2cc6d9f5..d0fb7a67c1a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.h
@@ -28,8 +28,8 @@
#include "nbio_v7_0.h"
#include "nbio_v7_4.h"
-#define SOC15_FLUSH_GPU_TLB_NUM_WREG 4
-#define SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT 1
+#define SOC15_FLUSH_GPU_TLB_NUM_WREG 6
+#define SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT 3
extern const struct amd_ip_funcs soc15_common_ip_funcs;
@@ -60,6 +60,18 @@ struct soc15_allowed_register_entry {
bool grbm_indexed;
};
+struct soc15_ras_field_entry {
+ const char *name;
+ uint32_t hwip;
+ uint32_t inst;
+ uint32_t seg;
+ uint32_t reg_offset;
+ uint32_t sec_count_mask;
+ uint32_t sec_count_shift;
+ uint32_t ded_count_mask;
+ uint32_t ded_count_shift;
+};
+
#define SOC15_REG_ENTRY(ip, inst, reg) ip##_HWIP, inst, reg##_BASE_IDX, reg
#define SOC15_REG_ENTRY_OFFSET(entry) (adev->reg_offset[entry.hwip][entry.inst][entry.seg] + entry.reg_offset)
@@ -67,6 +79,8 @@ struct soc15_allowed_register_entry {
#define SOC15_REG_GOLDEN_VALUE(ip, inst, reg, and_mask, or_mask) \
{ ip##_HWIP, inst, reg##_BASE_IDX, reg, and_mask, or_mask }
+#define SOC15_REG_FIELD(reg, field) reg##__##field##_MASK, reg##__##field##__SHIFT
+
void soc15_grbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid);
int soc15_set_ip_blocks(struct amdgpu_device *adev);
@@ -77,6 +91,7 @@ void soc15_program_register_sequence(struct amdgpu_device *adev,
int vega10_reg_base_init(struct amdgpu_device *adev);
int vega20_reg_base_init(struct amdgpu_device *adev);
+int arct_reg_base_init(struct amdgpu_device *adev);
void vega10_doorbell_index_init(struct amdgpu_device *adev);
void vega20_doorbell_index_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
index 47f74dab365d..19e870c79896 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
@@ -52,6 +52,7 @@
uint32_t old_ = 0; \
uint32_t tmp_ = RREG32(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg); \
uint32_t loop = adev->usec_timeout; \
+ ret = 0; \
while ((tmp_ & (mask)) != (expected_value)) { \
if (old_ != tmp_) { \
loop = adev->usec_timeout; \
@@ -69,9 +70,10 @@
} \
} while (0)
+#define AMDGPU_VIRT_SUPPORT_RLC_PRG_REG(a) (amdgpu_sriov_vf((a)) && !amdgpu_sriov_runtime((a)))
#define WREG32_RLC(reg, value) \
do { \
- if (amdgpu_virt_support_rlc_prg_reg(adev)) { \
+ if (AMDGPU_VIRT_SUPPORT_RLC_PRG_REG(adev)) { \
uint32_t i = 0; \
uint32_t retries = 50000; \
uint32_t r0 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0; \
@@ -96,7 +98,7 @@
#define WREG32_SOC15_RLC_SHADOW(ip, inst, reg, value) \
do { \
uint32_t target_reg = adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg;\
- if (amdgpu_virt_support_rlc_prg_reg(adev)) { \
+ if (AMDGPU_VIRT_SUPPORT_RLC_PRG_REG(adev)) { \
uint32_t r2 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG2; \
uint32_t r3 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3; \
uint32_t grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL; \
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_0.c
new file mode 100644
index 000000000000..0d6b50528d76
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_0.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "umc_v6_0.h"
+#include "amdgpu.h"
+
+static void umc_v6_0_init_registers(struct amdgpu_device *adev)
+{
+ unsigned i,j;
+
+ for (i = 0; i < 4; i++)
+ for (j = 0; j < 4; j++)
+ WREG32((i*0x100000 + 0x5010c + j*0x2000)/4, 0x1002);
+}
+
+const struct amdgpu_umc_funcs umc_v6_0_funcs = {
+ .init_registers = umc_v6_0_init_registers,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_0.h b/drivers/gpu/drm/amd/amdgpu/umc_v6_0.h
new file mode 100644
index 000000000000..109f1a57a46e
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_0.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __UMC_V6_0_H__
+#define __UMC_V6_0_H__
+
+#include "soc15_common.h"
+#include "amdgpu.h"
+
+extern const struct amdgpu_umc_funcs umc_v6_0_funcs;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
new file mode 100644
index 000000000000..793bf70e64b1
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
@@ -0,0 +1,373 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "umc_v6_1.h"
+#include "amdgpu_ras.h"
+#include "amdgpu.h"
+
+#include "rsmu/rsmu_0_0_2_offset.h"
+#include "rsmu/rsmu_0_0_2_sh_mask.h"
+#include "umc/umc_6_1_1_offset.h"
+#include "umc/umc_6_1_1_sh_mask.h"
+#include "umc/umc_6_1_2_offset.h"
+
+#define UMC_6_INST_DIST 0x40000
+
+/*
+ * (addr / 256) * 8192, the higher 26 bits in ErrorAddr
+ * is the index of 8KB block
+ */
+#define ADDR_OF_8KB_BLOCK(addr) (((addr) & ~0xffULL) << 5)
+/* channel index is the index of 256B block */
+#define ADDR_OF_256B_BLOCK(channel_index) ((channel_index) << 8)
+/* offset in 256B block */
+#define OFFSET_IN_256B_BLOCK(addr) ((addr) & 0xffULL)
+
+#define LOOP_UMC_INST(umc_inst) for ((umc_inst) = 0; (umc_inst) < adev->umc.umc_inst_num; (umc_inst)++)
+#define LOOP_UMC_CH_INST(ch_inst) for ((ch_inst) = 0; (ch_inst) < adev->umc.channel_inst_num; (ch_inst)++)
+#define LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) LOOP_UMC_INST((umc_inst)) LOOP_UMC_CH_INST((ch_inst))
+
+const uint32_t
+ umc_v6_1_channel_idx_tbl[UMC_V6_1_UMC_INSTANCE_NUM][UMC_V6_1_CHANNEL_INSTANCE_NUM] = {
+ {2, 18, 11, 27}, {4, 20, 13, 29},
+ {1, 17, 8, 24}, {7, 23, 14, 30},
+ {10, 26, 3, 19}, {12, 28, 5, 21},
+ {9, 25, 0, 16}, {15, 31, 6, 22}
+};
+
+static void umc_v6_1_enable_umc_index_mode(struct amdgpu_device *adev)
+{
+ WREG32_FIELD15(RSMU, 0, RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
+ RSMU_UMC_INDEX_MODE_EN, 1);
+}
+
+static void umc_v6_1_disable_umc_index_mode(struct amdgpu_device *adev)
+{
+ WREG32_FIELD15(RSMU, 0, RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
+ RSMU_UMC_INDEX_MODE_EN, 0);
+}
+
+static uint32_t umc_v6_1_get_umc_index_mode_state(struct amdgpu_device *adev)
+{
+ uint32_t rsmu_umc_index;
+
+ rsmu_umc_index = RREG32_SOC15(RSMU, 0,
+ mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU);
+
+ return REG_GET_FIELD(rsmu_umc_index,
+ RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
+ RSMU_UMC_INDEX_MODE_EN);
+}
+
+static inline uint32_t get_umc_6_reg_offset(struct amdgpu_device *adev,
+ uint32_t umc_inst,
+ uint32_t ch_inst)
+{
+ return adev->umc.channel_offs*ch_inst + UMC_6_INST_DIST*umc_inst;
+}
+
+static void umc_v6_1_query_correctable_error_count(struct amdgpu_device *adev,
+ uint32_t umc_reg_offset,
+ unsigned long *error_count)
+{
+ uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr;
+ uint32_t ecc_err_cnt, ecc_err_cnt_addr;
+ uint64_t mc_umc_status;
+ uint32_t mc_umc_status_addr;
+
+ if (adev->asic_type == CHIP_ARCTURUS) {
+ /* UMC 6_1_2 registers */
+ ecc_err_cnt_sel_addr =
+ SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel_ARCT);
+ ecc_err_cnt_addr =
+ SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCnt_ARCT);
+ mc_umc_status_addr =
+ SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT);
+ } else {
+ /* UMC 6_1_1 registers */
+ ecc_err_cnt_sel_addr =
+ SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel);
+ ecc_err_cnt_addr =
+ SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCnt);
+ mc_umc_status_addr =
+ SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0);
+ }
+
+ /* select the lower chip and check the error count */
+ ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4);
+ ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
+ EccErrCntCsSel, 0);
+ WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
+ ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4);
+ *error_count +=
+ (REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_EccErrCnt, EccErrCnt) -
+ UMC_V6_1_CE_CNT_INIT);
+ /* clear the lower chip err count */
+ WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V6_1_CE_CNT_INIT);
+
+ /* select the higher chip and check the err counter */
+ ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
+ EccErrCntCsSel, 1);
+ WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
+ ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4);
+ *error_count +=
+ (REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_EccErrCnt, EccErrCnt) -
+ UMC_V6_1_CE_CNT_INIT);
+ /* clear the higher chip err count */
+ WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V6_1_CE_CNT_INIT);
+
+ /* check for SRAM correctable error
+ MCUMC_STATUS is a 64 bit register */
+ mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
+ if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, ErrorCodeExt) == 6 &&
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)
+ *error_count += 1;
+}
+
+static void umc_v6_1_querry_uncorrectable_error_count(struct amdgpu_device *adev,
+ uint32_t umc_reg_offset,
+ unsigned long *error_count)
+{
+ uint64_t mc_umc_status;
+ uint32_t mc_umc_status_addr;
+
+ if (adev->asic_type == CHIP_ARCTURUS) {
+ /* UMC 6_1_2 registers */
+ mc_umc_status_addr =
+ SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT);
+ } else {
+ /* UMC 6_1_1 registers */
+ mc_umc_status_addr =
+ SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0);
+ }
+
+ /* check the MCUMC_STATUS */
+ mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
+ if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
+ (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 ||
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 ||
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1))
+ *error_count += 1;
+}
+
+static void umc_v6_1_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_error_status)
+{
+ struct ras_err_data* err_data = (struct ras_err_data*)ras_error_status;
+
+ uint32_t umc_inst = 0;
+ uint32_t ch_inst = 0;
+ uint32_t umc_reg_offset = 0;
+
+ uint32_t rsmu_umc_index_state = umc_v6_1_get_umc_index_mode_state(adev);
+
+ if (rsmu_umc_index_state)
+ umc_v6_1_disable_umc_index_mode(adev);
+
+ LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
+ umc_reg_offset = get_umc_6_reg_offset(adev,
+ umc_inst,
+ ch_inst);
+
+ umc_v6_1_query_correctable_error_count(adev,
+ umc_reg_offset,
+ &(err_data->ce_count));
+ umc_v6_1_querry_uncorrectable_error_count(adev,
+ umc_reg_offset,
+ &(err_data->ue_count));
+ }
+
+ if (rsmu_umc_index_state)
+ umc_v6_1_enable_umc_index_mode(adev);
+}
+
+static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
+ struct ras_err_data *err_data,
+ uint32_t umc_reg_offset,
+ uint32_t ch_inst,
+ uint32_t umc_inst)
+{
+ uint32_t lsb, mc_umc_status_addr;
+ uint64_t mc_umc_status, err_addr, retired_page, mc_umc_addrt0;
+ struct eeprom_table_record *err_rec;
+ uint32_t channel_index = adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
+
+ if (adev->asic_type == CHIP_ARCTURUS) {
+ /* UMC 6_1_2 registers */
+ mc_umc_status_addr =
+ SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT);
+ mc_umc_addrt0 =
+ SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_ADDRT0_ARCT);
+ } else {
+ /* UMC 6_1_1 registers */
+ mc_umc_status_addr =
+ SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0);
+ mc_umc_addrt0 =
+ SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_ADDRT0);
+ }
+
+ /* skip error address process if -ENOMEM */
+ if (!err_data->err_addr) {
+ /* clear umc status */
+ WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
+ return;
+ }
+
+ err_rec = &err_data->err_addr[err_data->err_addr_cnt];
+ mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
+
+ /* calculate error address if ue/ce error is detected */
+ if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
+ (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) {
+
+ err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4);
+ /* the lowest lsb bits should be ignored */
+ lsb = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, LSB);
+ err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
+ err_addr &= ~((0x1ULL << lsb) - 1);
+
+ /* translate umc channel address to soc pa, 3 parts are included */
+ retired_page = ADDR_OF_8KB_BLOCK(err_addr) |
+ ADDR_OF_256B_BLOCK(channel_index) |
+ OFFSET_IN_256B_BLOCK(err_addr);
+
+ /* we only save ue error information currently, ce is skipped */
+ if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC)
+ == 1) {
+ err_rec->address = err_addr;
+ /* page frame address is saved */
+ err_rec->retired_page = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
+ err_rec->ts = (uint64_t)ktime_get_real_seconds();
+ err_rec->err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE;
+ err_rec->cu = 0;
+ err_rec->mem_channel = channel_index;
+ err_rec->mcumc_id = umc_inst;
+
+ err_data->err_addr_cnt++;
+ }
+ }
+
+ /* clear umc status */
+ WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
+}
+
+static void umc_v6_1_query_ras_error_address(struct amdgpu_device *adev,
+ void *ras_error_status)
+{
+ struct ras_err_data* err_data = (struct ras_err_data*)ras_error_status;
+
+ uint32_t umc_inst = 0;
+ uint32_t ch_inst = 0;
+ uint32_t umc_reg_offset = 0;
+
+ uint32_t rsmu_umc_index_state = umc_v6_1_get_umc_index_mode_state(adev);
+
+ if (rsmu_umc_index_state)
+ umc_v6_1_disable_umc_index_mode(adev);
+
+ LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
+ umc_reg_offset = get_umc_6_reg_offset(adev,
+ umc_inst,
+ ch_inst);
+
+ umc_v6_1_query_error_address(adev,
+ err_data,
+ umc_reg_offset,
+ ch_inst,
+ umc_inst);
+ }
+
+ if (rsmu_umc_index_state)
+ umc_v6_1_enable_umc_index_mode(adev);
+}
+
+static void umc_v6_1_err_cnt_init_per_channel(struct amdgpu_device *adev,
+ uint32_t umc_reg_offset)
+{
+ uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr;
+ uint32_t ecc_err_cnt_addr;
+
+ if (adev->asic_type == CHIP_ARCTURUS) {
+ /* UMC 6_1_2 registers */
+ ecc_err_cnt_sel_addr =
+ SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel_ARCT);
+ ecc_err_cnt_addr =
+ SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCnt_ARCT);
+ } else {
+ /* UMC 6_1_1 registers */
+ ecc_err_cnt_sel_addr =
+ SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel);
+ ecc_err_cnt_addr =
+ SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCnt);
+ }
+
+ /* select the lower chip and check the error count */
+ ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4);
+ ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
+ EccErrCntCsSel, 0);
+ /* set ce error interrupt type to APIC based interrupt */
+ ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
+ EccErrInt, 0x1);
+ WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
+ /* set error count to initial value */
+ WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V6_1_CE_CNT_INIT);
+
+ /* select the higher chip and check the err counter */
+ ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
+ EccErrCntCsSel, 1);
+ WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
+ WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V6_1_CE_CNT_INIT);
+}
+
+static void umc_v6_1_err_cnt_init(struct amdgpu_device *adev)
+{
+ uint32_t umc_inst = 0;
+ uint32_t ch_inst = 0;
+ uint32_t umc_reg_offset = 0;
+
+ uint32_t rsmu_umc_index_state = umc_v6_1_get_umc_index_mode_state(adev);
+
+ if (rsmu_umc_index_state)
+ umc_v6_1_disable_umc_index_mode(adev);
+
+ LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
+ umc_reg_offset = get_umc_6_reg_offset(adev,
+ umc_inst,
+ ch_inst);
+
+ umc_v6_1_err_cnt_init_per_channel(adev, umc_reg_offset);
+ }
+
+ if (rsmu_umc_index_state)
+ umc_v6_1_enable_umc_index_mode(adev);
+}
+
+const struct amdgpu_umc_funcs umc_v6_1_funcs = {
+ .err_cnt_init = umc_v6_1_err_cnt_init,
+ .ras_late_init = amdgpu_umc_ras_late_init,
+ .query_ras_error_count = umc_v6_1_query_ras_error_count,
+ .query_ras_error_address = umc_v6_1_query_ras_error_address,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.h b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.h
new file mode 100644
index 000000000000..0ce1d323cfdd
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __UMC_V6_1_H__
+#define __UMC_V6_1_H__
+
+#include "soc15_common.h"
+#include "amdgpu.h"
+
+/* HBM Memory Channel Width */
+#define UMC_V6_1_HBM_MEMORY_CHANNEL_WIDTH 128
+/* number of umc channel instance with memory map register access */
+#define UMC_V6_1_CHANNEL_INSTANCE_NUM 4
+/* number of umc instance with memory map register access */
+#define UMC_V6_1_UMC_INSTANCE_NUM 8
+/* total channel instances in one umc block */
+#define UMC_V6_1_TOTAL_CHANNEL_NUM (UMC_V6_1_CHANNEL_INSTANCE_NUM * UMC_V6_1_UMC_INSTANCE_NUM)
+/* UMC regiser per channel offset */
+#define UMC_V6_1_PER_CHANNEL_OFFSET_VG20 0x800
+#define UMC_V6_1_PER_CHANNEL_OFFSET_ARCT 0x400
+
+/* EccErrCnt max value */
+#define UMC_V6_1_CE_CNT_MAX 0xffff
+/* umc ce interrupt threshold */
+#define UMC_V6_1_CE_INT_THRESHOLD 0xffff
+/* umc ce count initial value */
+#define UMC_V6_1_CE_CNT_INIT (UMC_V6_1_CE_CNT_MAX - UMC_V6_1_CE_INT_THRESHOLD)
+
+extern const struct amdgpu_umc_funcs umc_v6_1_funcs;
+extern const uint32_t
+ umc_v6_1_channel_idx_tbl[UMC_V6_1_UMC_INSTANCE_NUM][UMC_V6_1_CHANNEL_INSTANCE_NUM];
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index 01e62fb8e6e0..0fa8aae2d78e 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -763,7 +763,7 @@ static int uvd_v5_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ bool enable = (state == AMD_CG_STATE_GATE);
if (enable) {
/* wait for STATUS to clear */
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 670784a78512..e0aadcaf6c8b 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -206,13 +206,14 @@ static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring)
* Open up a stream for HW test
*/
static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+ struct amdgpu_bo *bo,
struct dma_fence **fence)
{
const unsigned ib_size_dw = 16;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct dma_fence *f = NULL;
- uint64_t dummy;
+ uint64_t addr;
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
@@ -220,15 +221,15 @@ static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
return r;
ib = &job->ibs[0];
- dummy = ib->gpu_addr + 1024;
+ addr = amdgpu_bo_gpu_offset(bo);
ib->length_dw = 0;
ib->ptr[ib->length_dw++] = 0x00000018;
ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
ib->ptr[ib->length_dw++] = handle;
ib->ptr[ib->length_dw++] = 0x00010000;
- ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
- ib->ptr[ib->length_dw++] = dummy;
+ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+ ib->ptr[ib->length_dw++] = addr;
ib->ptr[ib->length_dw++] = 0x00000014;
ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
@@ -268,13 +269,14 @@ err:
*/
static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
uint32_t handle,
+ struct amdgpu_bo *bo,
struct dma_fence **fence)
{
const unsigned ib_size_dw = 16;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct dma_fence *f = NULL;
- uint64_t dummy;
+ uint64_t addr;
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
@@ -282,15 +284,15 @@ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
return r;
ib = &job->ibs[0];
- dummy = ib->gpu_addr + 1024;
+ addr = amdgpu_bo_gpu_offset(bo);
ib->length_dw = 0;
ib->ptr[ib->length_dw++] = 0x00000018;
ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
ib->ptr[ib->length_dw++] = handle;
ib->ptr[ib->length_dw++] = 0x00010000;
- ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
- ib->ptr[ib->length_dw++] = dummy;
+ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+ ib->ptr[ib->length_dw++] = addr;
ib->ptr[ib->length_dw++] = 0x00000014;
ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
@@ -327,13 +329,20 @@ err:
static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct dma_fence *fence = NULL;
+ struct amdgpu_bo *bo = NULL;
long r;
- r = uvd_v6_0_enc_get_create_msg(ring, 1, NULL);
+ r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &bo, NULL, NULL);
+ if (r)
+ return r;
+
+ r = uvd_v6_0_enc_get_create_msg(ring, 1, bo, NULL);
if (r)
goto error;
- r = uvd_v6_0_enc_get_destroy_msg(ring, 1, &fence);
+ r = uvd_v6_0_enc_get_destroy_msg(ring, 1, bo, &fence);
if (r)
goto error;
@@ -345,6 +354,8 @@ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
error:
dma_fence_put(fence);
+ amdgpu_bo_unreserve(bo);
+ amdgpu_bo_unref(&bo);
return r;
}
@@ -1410,7 +1421,7 @@ static int uvd_v6_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ bool enable = (state == AMD_CG_STATE_GATE);
if (enable) {
/* wait for STATUS to clear */
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index a6bfe7651d07..0995378d8263 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -214,13 +214,14 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
* Open up a stream for HW test
*/
static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+ struct amdgpu_bo *bo,
struct dma_fence **fence)
{
const unsigned ib_size_dw = 16;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct dma_fence *f = NULL;
- uint64_t dummy;
+ uint64_t addr;
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
@@ -228,15 +229,15 @@ static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
return r;
ib = &job->ibs[0];
- dummy = ib->gpu_addr + 1024;
+ addr = amdgpu_bo_gpu_offset(bo);
ib->length_dw = 0;
ib->ptr[ib->length_dw++] = 0x00000018;
ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
ib->ptr[ib->length_dw++] = handle;
ib->ptr[ib->length_dw++] = 0x00000000;
- ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
- ib->ptr[ib->length_dw++] = dummy;
+ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+ ib->ptr[ib->length_dw++] = addr;
ib->ptr[ib->length_dw++] = 0x00000014;
ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
@@ -275,13 +276,14 @@ err:
* Close up a stream for HW test or if userspace failed to do so
*/
static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct dma_fence **fence)
+ struct amdgpu_bo *bo,
+ struct dma_fence **fence)
{
const unsigned ib_size_dw = 16;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct dma_fence *f = NULL;
- uint64_t dummy;
+ uint64_t addr;
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
@@ -289,15 +291,15 @@ static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handl
return r;
ib = &job->ibs[0];
- dummy = ib->gpu_addr + 1024;
+ addr = amdgpu_bo_gpu_offset(bo);
ib->length_dw = 0;
ib->ptr[ib->length_dw++] = 0x00000018;
ib->ptr[ib->length_dw++] = 0x00000001;
ib->ptr[ib->length_dw++] = handle;
ib->ptr[ib->length_dw++] = 0x00000000;
- ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
- ib->ptr[ib->length_dw++] = dummy;
+ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+ ib->ptr[ib->length_dw++] = addr;
ib->ptr[ib->length_dw++] = 0x00000014;
ib->ptr[ib->length_dw++] = 0x00000002;
@@ -334,13 +336,20 @@ err:
static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct dma_fence *fence = NULL;
+ struct amdgpu_bo *bo = NULL;
long r;
- r = uvd_v7_0_enc_get_create_msg(ring, 1, NULL);
+ r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &bo, NULL, NULL);
+ if (r)
+ return r;
+
+ r = uvd_v7_0_enc_get_create_msg(ring, 1, bo, NULL);
if (r)
goto error;
- r = uvd_v7_0_enc_get_destroy_msg(ring, 1, &fence);
+ r = uvd_v7_0_enc_get_destroy_msg(ring, 1, bo, &fence);
if (r)
goto error;
@@ -352,6 +361,8 @@ static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
error:
dma_fence_put(fence);
+ amdgpu_bo_unreserve(bo);
+ amdgpu_bo_unref(&bo);
return r;
}
@@ -1763,7 +1774,7 @@ static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
.align_mask = 0xf,
.support_64bit_ptrs = false,
.no_user_fence = true,
- .vmhub = AMDGPU_MMHUB,
+ .vmhub = AMDGPU_MMHUB_0,
.get_rptr = uvd_v7_0_ring_get_rptr,
.get_wptr = uvd_v7_0_ring_get_wptr,
.set_wptr = uvd_v7_0_ring_set_wptr,
@@ -1796,7 +1807,7 @@ static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = {
.nop = HEVC_ENC_CMD_NO_OP,
.support_64bit_ptrs = false,
.no_user_fence = true,
- .vmhub = AMDGPU_MMHUB,
+ .vmhub = AMDGPU_MMHUB_0,
.get_rptr = uvd_v7_0_enc_ring_get_rptr,
.get_wptr = uvd_v7_0_enc_ring_get_wptr,
.set_wptr = uvd_v7_0_enc_ring_set_wptr,
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 475ae68f38f5..217db187207c 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -739,7 +739,7 @@ static int vce_v3_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ bool enable = (state == AMD_CG_STATE_GATE);
int i;
if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG))
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index eafbe8d8248d..3fd102efb7af 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -887,7 +887,7 @@ static int vce_v4_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ bool enable = (state == AMD_CG_STATE_GATE);
int i;
if ((adev->asic_type == CHIP_POLARIS10) ||
@@ -1070,7 +1070,7 @@ static const struct amdgpu_ring_funcs vce_v4_0_ring_vm_funcs = {
.nop = VCE_CMD_NO_OP,
.support_64bit_ptrs = false,
.no_user_fence = true,
- .vmhub = AMDGPU_MMHUB,
+ .vmhub = AMDGPU_MMHUB_0,
.get_rptr = vce_v4_0_ring_get_rptr,
.get_wptr = vce_v4_0_ring_get_wptr,
.set_wptr = vce_v4_0_ring_set_wptr,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index dde22b7d140d..71f61afdc655 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -25,6 +25,7 @@
#include "amdgpu.h"
#include "amdgpu_vcn.h"
+#include "amdgpu_pm.h"
#include "soc15.h"
#include "soc15d.h"
#include "soc15_common.h"
@@ -36,21 +37,22 @@
#include "mmhub/mmhub_9_1_sh_mask.h"
#include "ivsrcid/vcn/irqsrcs_vcn_1_0.h"
+#include "jpeg_v1_0.h"
-#define mmUVD_RBC_XX_IB_REG_CHECK 0x05ab
-#define mmUVD_RBC_XX_IB_REG_CHECK_BASE_IDX 1
-#define mmUVD_REG_XX_MASK 0x05ac
-#define mmUVD_REG_XX_MASK_BASE_IDX 1
+#define mmUVD_RBC_XX_IB_REG_CHECK_1_0 0x05ab
+#define mmUVD_RBC_XX_IB_REG_CHECK_1_0_BASE_IDX 1
+#define mmUVD_REG_XX_MASK_1_0 0x05ac
+#define mmUVD_REG_XX_MASK_1_0_BASE_IDX 1
static int vcn_v1_0_stop(struct amdgpu_device *adev);
static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev);
-static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev);
static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev);
-static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr);
static int vcn_v1_0_set_powergating_state(void *handle, enum amd_powergating_state state);
static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
- struct dpg_pause_state *new_state);
+ int inst_idx, struct dpg_pause_state *new_state);
+
+static void vcn_v1_0_idle_work_handler(struct work_struct *work);
/**
* vcn_v1_0_early_init - set function pointers
@@ -63,13 +65,15 @@ static int vcn_v1_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ adev->vcn.num_vcn_inst = 1;
adev->vcn.num_enc_rings = 2;
vcn_v1_0_set_dec_ring_funcs(adev);
vcn_v1_0_set_enc_ring_funcs(adev);
- vcn_v1_0_set_jpeg_ring_funcs(adev);
vcn_v1_0_set_irq_funcs(adev);
+ jpeg_v1_0_early_init(handle);
+
return 0;
}
@@ -87,27 +91,26 @@ static int vcn_v1_0_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* VCN DEC TRAP */
- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, VCN_1_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.irq);
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
+ VCN_1_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst->irq);
if (r)
return r;
/* VCN ENC TRAP */
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, i + VCN_1_0__SRCID__UVD_ENC_GENERAL_PURPOSE,
- &adev->vcn.irq);
+ &adev->vcn.inst->irq);
if (r)
return r;
}
- /* VCN JPEG TRAP */
- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 126, &adev->vcn.irq);
- if (r)
- return r;
-
r = amdgpu_vcn_sw_init(adev);
if (r)
return r;
+ /* Override the work func */
+ adev->vcn.idle_work.work.func = vcn_v1_0_idle_work_handler;
+
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
const struct common_firmware_header *hdr;
hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
@@ -122,42 +125,36 @@ static int vcn_v1_0_sw_init(void *handle)
if (r)
return r;
- ring = &adev->vcn.ring_dec;
+ ring = &adev->vcn.inst->ring_dec;
sprintf(ring->name, "vcn_dec");
- r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
if (r)
return r;
- adev->vcn.internal.scratch9 = adev->vcn.external.scratch9 =
+ adev->vcn.internal.scratch9 = adev->vcn.inst->external.scratch9 =
SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9);
- adev->vcn.internal.data0 = adev->vcn.external.data0 =
+ adev->vcn.internal.data0 = adev->vcn.inst->external.data0 =
SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0);
- adev->vcn.internal.data1 = adev->vcn.external.data1 =
+ adev->vcn.internal.data1 = adev->vcn.inst->external.data1 =
SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1);
- adev->vcn.internal.cmd = adev->vcn.external.cmd =
+ adev->vcn.internal.cmd = adev->vcn.inst->external.cmd =
SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD);
- adev->vcn.internal.nop = adev->vcn.external.nop =
+ adev->vcn.internal.nop = adev->vcn.inst->external.nop =
SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP);
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
- ring = &adev->vcn.ring_enc[i];
+ ring = &adev->vcn.inst->ring_enc[i];
sprintf(ring->name, "vcn_enc%d", i);
- r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
if (r)
return r;
}
- ring = &adev->vcn.ring_jpeg;
- sprintf(ring->name, "vcn_jpeg");
- r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0);
- if (r)
- return r;
-
adev->vcn.pause_dpg_mode = vcn_v1_0_pause_dpg_mode;
- adev->vcn.internal.jpeg_pitch = adev->vcn.external.jpeg_pitch =
- SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH);
- return 0;
+ r = jpeg_v1_0_sw_init(handle);
+
+ return r;
}
/**
@@ -176,6 +173,8 @@ static int vcn_v1_0_sw_fini(void *handle)
if (r)
return r;
+ jpeg_v1_0_sw_fini(handle);
+
r = amdgpu_vcn_sw_fini(adev);
return r;
@@ -191,7 +190,7 @@ static int vcn_v1_0_sw_fini(void *handle)
static int vcn_v1_0_hw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct amdgpu_ring *ring = &adev->vcn.ring_dec;
+ struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
int i, r;
r = amdgpu_ring_test_helper(ring);
@@ -199,14 +198,13 @@ static int vcn_v1_0_hw_init(void *handle)
goto done;
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
- ring = &adev->vcn.ring_enc[i];
- ring->sched.ready = true;
+ ring = &adev->vcn.inst->ring_enc[i];
r = amdgpu_ring_test_helper(ring);
if (r)
goto done;
}
- ring = &adev->vcn.ring_jpeg;
+ ring = &adev->jpeg.inst->ring_dec;
r = amdgpu_ring_test_helper(ring);
if (r)
goto done;
@@ -229,7 +227,7 @@ done:
static int vcn_v1_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct amdgpu_ring *ring = &adev->vcn.ring_dec;
+ struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
RREG32_SOC15(VCN, 0, mmUVD_STATUS))
@@ -304,9 +302,9 @@ static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_device *adev)
offset = 0;
} else {
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
- lower_32_bits(adev->vcn.gpu_addr));
+ lower_32_bits(adev->vcn.inst->gpu_addr));
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
- upper_32_bits(adev->vcn.gpu_addr));
+ upper_32_bits(adev->vcn.inst->gpu_addr));
offset = size;
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
@@ -316,17 +314,17 @@ static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_device *adev)
/* cache window 1: stack */
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
- lower_32_bits(adev->vcn.gpu_addr + offset));
+ lower_32_bits(adev->vcn.inst->gpu_addr + offset));
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
- upper_32_bits(adev->vcn.gpu_addr + offset));
+ upper_32_bits(adev->vcn.inst->gpu_addr + offset));
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0);
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
/* cache window 2: context */
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
- lower_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
+ lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
- upper_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
+ upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0);
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
@@ -374,9 +372,9 @@ static void vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_device *adev)
offset = 0;
} else {
WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
- lower_32_bits(adev->vcn.gpu_addr), 0xFFFFFFFF, 0);
+ lower_32_bits(adev->vcn.inst->gpu_addr), 0xFFFFFFFF, 0);
WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
- upper_32_bits(adev->vcn.gpu_addr), 0xFFFFFFFF, 0);
+ upper_32_bits(adev->vcn.inst->gpu_addr), 0xFFFFFFFF, 0);
offset = size;
WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0xFFFFFFFF, 0);
@@ -386,9 +384,9 @@ static void vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_device *adev)
/* cache window 1: stack */
WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
- lower_32_bits(adev->vcn.gpu_addr + offset), 0xFFFFFFFF, 0);
+ lower_32_bits(adev->vcn.inst->gpu_addr + offset), 0xFFFFFFFF, 0);
WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
- upper_32_bits(adev->vcn.gpu_addr + offset), 0xFFFFFFFF, 0);
+ upper_32_bits(adev->vcn.inst->gpu_addr + offset), 0xFFFFFFFF, 0);
WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0,
0xFFFFFFFF, 0);
WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE,
@@ -396,10 +394,10 @@ static void vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_device *adev)
/* cache window 2: context */
WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
- lower_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE),
+ lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE),
0xFFFFFFFF, 0);
WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
- upper_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE),
+ upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE),
0xFFFFFFFF, 0);
WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0, 0xFFFFFFFF, 0);
WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE,
@@ -779,7 +777,7 @@ static void vcn_1_0_enable_static_power_gating(struct amdgpu_device *adev)
*/
static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev)
{
- struct amdgpu_ring *ring = &adev->vcn.ring_dec;
+ struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
uint32_t rb_bufsz, tmp;
uint32_t lmi_swap_cntl;
int i, j, r;
@@ -837,9 +835,9 @@ static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev)
vcn_v1_0_mc_resume_spg_mode(adev);
- WREG32_SOC15(UVD, 0, mmUVD_REG_XX_MASK, 0x10);
- WREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK,
- RREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK) | 0x3);
+ WREG32_SOC15(UVD, 0, mmUVD_REG_XX_MASK_1_0, 0x10);
+ WREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK_1_0,
+ RREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK_1_0) | 0x3);
/* enable VCPU clock */
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK);
@@ -932,43 +930,28 @@ static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev)
WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0,
~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
- ring = &adev->vcn.ring_enc[0];
+ ring = &adev->vcn.inst->ring_enc[0];
WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
- ring = &adev->vcn.ring_enc[1];
+ ring = &adev->vcn.inst->ring_enc[1];
WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
- ring = &adev->vcn.ring_jpeg;
- WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
- WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK |
- UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
- WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW, lower_32_bits(ring->gpu_addr));
- WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, upper_32_bits(ring->gpu_addr));
- WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR, 0);
- WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, 0);
- WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
-
- /* initialize wptr */
- ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
-
- /* copy patch commands to the jpeg ring */
- vcn_v1_0_jpeg_ring_set_patch_ring(ring,
- (ring->wptr + ring->max_dw * amdgpu_sched_hw_submission));
+ jpeg_v1_0_start(adev, 0);
return 0;
}
static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
{
- struct amdgpu_ring *ring = &adev->vcn.ring_dec;
+ struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
uint32_t rb_bufsz, tmp;
uint32_t lmi_swap_cntl;
@@ -1105,13 +1088,7 @@ static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0,
~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
- /* initialize JPEG wptr */
- ring = &adev->vcn.ring_jpeg;
- ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
-
- /* copy patch commands to the jpeg ring */
- vcn_v1_0_jpeg_ring_set_patch_ring(ring,
- (ring->wptr + ring->max_dw * amdgpu_sched_hw_submission));
+ jpeg_v1_0_start(adev, 1);
return 0;
}
@@ -1222,7 +1199,7 @@ static int vcn_v1_0_stop(struct amdgpu_device *adev)
}
static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
- struct dpg_pause_state *new_state)
+ int inst_idx, struct dpg_pause_state *new_state)
{
int ret_code;
uint32_t reg_data = 0;
@@ -1230,9 +1207,10 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
struct amdgpu_ring *ring;
/* pause/unpause if state is changed */
- if (adev->vcn.pause_state.fw_based != new_state->fw_based) {
+ if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
DRM_DEBUG("dpg pause state changed %d:%d -> %d:%d",
- adev->vcn.pause_state.fw_based, adev->vcn.pause_state.jpeg,
+ adev->vcn.inst[inst_idx].pause_state.fw_based,
+ adev->vcn.inst[inst_idx].pause_state.jpeg,
new_state->fw_based, new_state->jpeg);
reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
@@ -1255,21 +1233,21 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code);
/* Restore */
- ring = &adev->vcn.ring_enc[0];
+ ring = &adev->vcn.inst->ring_enc[0];
WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
- ring = &adev->vcn.ring_enc[1];
+ ring = &adev->vcn.inst->ring_enc[1];
WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
- ring = &adev->vcn.ring_dec;
+ ring = &adev->vcn.inst->ring_dec;
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
@@ -1281,13 +1259,14 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
}
- adev->vcn.pause_state.fw_based = new_state->fw_based;
+ adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
}
/* pause/unpause if state is changed */
- if (adev->vcn.pause_state.jpeg != new_state->jpeg) {
+ if (adev->vcn.inst[inst_idx].pause_state.jpeg != new_state->jpeg) {
DRM_DEBUG("dpg pause state changed %d:%d -> %d:%d",
- adev->vcn.pause_state.fw_based, adev->vcn.pause_state.jpeg,
+ adev->vcn.inst[inst_idx].pause_state.fw_based,
+ adev->vcn.inst[inst_idx].pause_state.jpeg,
new_state->fw_based, new_state->jpeg);
reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
@@ -1315,7 +1294,7 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK, ret_code);
/* Restore */
- ring = &adev->vcn.ring_jpeg;
+ ring = &adev->jpeg.inst->ring_dec;
WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL,
UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK |
@@ -1329,7 +1308,7 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL,
UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
- ring = &adev->vcn.ring_dec;
+ ring = &adev->vcn.inst->ring_dec;
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
@@ -1341,7 +1320,7 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
reg_data &= ~UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK;
WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
}
- adev->vcn.pause_state.jpeg = new_state->jpeg;
+ adev->vcn.inst[inst_idx].pause_state.jpeg = new_state->jpeg;
}
return 0;
@@ -1369,7 +1348,7 @@ static int vcn_v1_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ bool enable = (state == AMD_CG_STATE_GATE);
if (enable) {
/* wait for STATUS to clear */
@@ -1596,7 +1575,7 @@ static uint64_t vcn_v1_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- if (ring == &adev->vcn.ring_enc[0])
+ if (ring == &adev->vcn.inst->ring_enc[0])
return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR);
else
return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2);
@@ -1613,7 +1592,7 @@ static uint64_t vcn_v1_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- if (ring == &adev->vcn.ring_enc[0])
+ if (ring == &adev->vcn.inst->ring_enc[0])
return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
else
return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
@@ -1630,7 +1609,7 @@ static void vcn_v1_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- if (ring == &adev->vcn.ring_enc[0])
+ if (ring == &adev->vcn.inst->ring_enc[0])
WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR,
lower_32_bits(ring->wptr));
else
@@ -1715,389 +1694,6 @@ static void vcn_v1_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, val);
}
-
-/**
- * vcn_v1_0_jpeg_ring_get_rptr - get read pointer
- *
- * @ring: amdgpu_ring pointer
- *
- * Returns the current hardware read pointer
- */
-static uint64_t vcn_v1_0_jpeg_ring_get_rptr(struct amdgpu_ring *ring)
-{
- struct amdgpu_device *adev = ring->adev;
-
- return RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR);
-}
-
-/**
- * vcn_v1_0_jpeg_ring_get_wptr - get write pointer
- *
- * @ring: amdgpu_ring pointer
- *
- * Returns the current hardware write pointer
- */
-static uint64_t vcn_v1_0_jpeg_ring_get_wptr(struct amdgpu_ring *ring)
-{
- struct amdgpu_device *adev = ring->adev;
-
- return RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
-}
-
-/**
- * vcn_v1_0_jpeg_ring_set_wptr - set write pointer
- *
- * @ring: amdgpu_ring pointer
- *
- * Commits the write pointer to the hardware
- */
-static void vcn_v1_0_jpeg_ring_set_wptr(struct amdgpu_ring *ring)
-{
- struct amdgpu_device *adev = ring->adev;
-
- WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
-}
-
-/**
- * vcn_v1_0_jpeg_ring_insert_start - insert a start command
- *
- * @ring: amdgpu_ring pointer
- *
- * Write a start command to the ring.
- */
-static void vcn_v1_0_jpeg_ring_insert_start(struct amdgpu_ring *ring)
-{
- struct amdgpu_device *adev = ring->adev;
-
- amdgpu_ring_write(ring,
- PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, 0x68e04);
-
- amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, 0x80010000);
-}
-
-/**
- * vcn_v1_0_jpeg_ring_insert_end - insert a end command
- *
- * @ring: amdgpu_ring pointer
- *
- * Write a end command to the ring.
- */
-static void vcn_v1_0_jpeg_ring_insert_end(struct amdgpu_ring *ring)
-{
- struct amdgpu_device *adev = ring->adev;
-
- amdgpu_ring_write(ring,
- PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, 0x68e04);
-
- amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, 0x00010000);
-}
-
-/**
- * vcn_v1_0_jpeg_ring_emit_fence - emit an fence & trap command
- *
- * @ring: amdgpu_ring pointer
- * @fence: fence to emit
- *
- * Write a fence and a trap command to the ring.
- */
-static void vcn_v1_0_jpeg_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
- unsigned flags)
-{
- struct amdgpu_device *adev = ring->adev;
-
- WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
-
- amdgpu_ring_write(ring,
- PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_DATA0), 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, seq);
-
- amdgpu_ring_write(ring,
- PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_DATA1), 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, seq);
-
- amdgpu_ring_write(ring,
- PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, lower_32_bits(addr));
-
- amdgpu_ring_write(ring,
- PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, upper_32_bits(addr));
-
- amdgpu_ring_write(ring,
- PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_CMD), 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, 0x8);
-
- amdgpu_ring_write(ring,
- PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_CMD), 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE4));
- amdgpu_ring_write(ring, 0);
-
- amdgpu_ring_write(ring,
- PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, 0x01400200);
-
- amdgpu_ring_write(ring,
- PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, seq);
-
- amdgpu_ring_write(ring,
- PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, lower_32_bits(addr));
-
- amdgpu_ring_write(ring,
- PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, upper_32_bits(addr));
-
- amdgpu_ring_write(ring,
- PACKETJ(0, 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE2));
- amdgpu_ring_write(ring, 0xffffffff);
-
- amdgpu_ring_write(ring,
- PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, 0x3fbc);
-
- amdgpu_ring_write(ring,
- PACKETJ(0, 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, 0x1);
-
- /* emit trap */
- amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE7));
- amdgpu_ring_write(ring, 0);
-}
-
-/**
- * vcn_v1_0_jpeg_ring_emit_ib - execute indirect buffer
- *
- * @ring: amdgpu_ring pointer
- * @ib: indirect buffer to execute
- *
- * Write ring commands to execute the indirect buffer.
- */
-static void vcn_v1_0_jpeg_ring_emit_ib(struct amdgpu_ring *ring,
- struct amdgpu_job *job,
- struct amdgpu_ib *ib,
- uint32_t flags)
-{
- struct amdgpu_device *adev = ring->adev;
- unsigned vmid = AMDGPU_JOB_GET_VMID(job);
-
- amdgpu_ring_write(ring,
- PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_VMID), 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, (vmid | (vmid << 4)));
-
- amdgpu_ring_write(ring,
- PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JPEG_VMID), 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, (vmid | (vmid << 4)));
-
- amdgpu_ring_write(ring,
- PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
-
- amdgpu_ring_write(ring,
- PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
-
- amdgpu_ring_write(ring,
- PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_IB_SIZE), 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, ib->length_dw);
-
- amdgpu_ring_write(ring,
- PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, lower_32_bits(ring->gpu_addr));
-
- amdgpu_ring_write(ring,
- PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, upper_32_bits(ring->gpu_addr));
-
- amdgpu_ring_write(ring,
- PACKETJ(0, 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE2));
- amdgpu_ring_write(ring, 0);
-
- amdgpu_ring_write(ring,
- PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, 0x01400200);
-
- amdgpu_ring_write(ring,
- PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, 0x2);
-
- amdgpu_ring_write(ring,
- PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_STATUS), 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE3));
- amdgpu_ring_write(ring, 0x2);
-}
-
-static void vcn_v1_0_jpeg_ring_emit_reg_wait(struct amdgpu_ring *ring,
- uint32_t reg, uint32_t val,
- uint32_t mask)
-{
- struct amdgpu_device *adev = ring->adev;
- uint32_t reg_offset = (reg << 2);
-
- amdgpu_ring_write(ring,
- PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, 0x01400200);
-
- amdgpu_ring_write(ring,
- PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, val);
-
- amdgpu_ring_write(ring,
- PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
- if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
- ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
- amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring,
- PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3));
- } else {
- amdgpu_ring_write(ring, reg_offset);
- amdgpu_ring_write(ring,
- PACKETJ(0, 0, 0, PACKETJ_TYPE3));
- }
- amdgpu_ring_write(ring, mask);
-}
-
-static void vcn_v1_0_jpeg_ring_emit_vm_flush(struct amdgpu_ring *ring,
- unsigned vmid, uint64_t pd_addr)
-{
- struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
- uint32_t data0, data1, mask;
-
- pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
-
- /* wait for register write */
- data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2;
- data1 = lower_32_bits(pd_addr);
- mask = 0xffffffff;
- vcn_v1_0_jpeg_ring_emit_reg_wait(ring, data0, data1, mask);
-}
-
-static void vcn_v1_0_jpeg_ring_emit_wreg(struct amdgpu_ring *ring,
- uint32_t reg, uint32_t val)
-{
- struct amdgpu_device *adev = ring->adev;
- uint32_t reg_offset = (reg << 2);
-
- amdgpu_ring_write(ring,
- PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
- if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
- ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
- amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring,
- PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0));
- } else {
- amdgpu_ring_write(ring, reg_offset);
- amdgpu_ring_write(ring,
- PACKETJ(0, 0, 0, PACKETJ_TYPE0));
- }
- amdgpu_ring_write(ring, val);
-}
-
-static void vcn_v1_0_jpeg_ring_nop(struct amdgpu_ring *ring, uint32_t count)
-{
- int i;
-
- WARN_ON(ring->wptr % 2 || count % 2);
-
- for (i = 0; i < count / 2; i++) {
- amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6));
- amdgpu_ring_write(ring, 0);
- }
-}
-
-static void vcn_v1_0_jpeg_ring_patch_wreg(struct amdgpu_ring *ring, uint32_t *ptr, uint32_t reg_offset, uint32_t val)
-{
- struct amdgpu_device *adev = ring->adev;
- ring->ring[(*ptr)++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0);
- if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
- ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
- ring->ring[(*ptr)++] = 0;
- ring->ring[(*ptr)++] = PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0);
- } else {
- ring->ring[(*ptr)++] = reg_offset;
- ring->ring[(*ptr)++] = PACKETJ(0, 0, 0, PACKETJ_TYPE0);
- }
- ring->ring[(*ptr)++] = val;
-}
-
-static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr)
-{
- struct amdgpu_device *adev = ring->adev;
-
- uint32_t reg, reg_offset, val, mask, i;
-
- // 1st: program mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW
- reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW);
- reg_offset = (reg << 2);
- val = lower_32_bits(ring->gpu_addr);
- vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
-
- // 2nd: program mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH
- reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH);
- reg_offset = (reg << 2);
- val = upper_32_bits(ring->gpu_addr);
- vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
-
- // 3rd to 5th: issue MEM_READ commands
- for (i = 0; i <= 2; i++) {
- ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE2);
- ring->ring[ptr++] = 0;
- }
-
- // 6th: program mmUVD_JRBC_RB_CNTL register to enable NO_FETCH and RPTR write ability
- reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_CNTL);
- reg_offset = (reg << 2);
- val = 0x13;
- vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
-
- // 7th: program mmUVD_JRBC_RB_REF_DATA
- reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA);
- reg_offset = (reg << 2);
- val = 0x1;
- vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
-
- // 8th: issue conditional register read mmUVD_JRBC_RB_CNTL
- reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_CNTL);
- reg_offset = (reg << 2);
- val = 0x1;
- mask = 0x1;
-
- ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0);
- ring->ring[ptr++] = 0x01400200;
- ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0);
- ring->ring[ptr++] = val;
- ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0);
- if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
- ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
- ring->ring[ptr++] = 0;
- ring->ring[ptr++] = PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3);
- } else {
- ring->ring[ptr++] = reg_offset;
- ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE3);
- }
- ring->ring[ptr++] = mask;
-
- //9th to 21st: insert no-op
- for (i = 0; i <= 12; i++) {
- ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE6);
- ring->ring[ptr++] = 0;
- }
-
- //22nd: reset mmUVD_JRBC_RB_RPTR
- reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_RPTR);
- reg_offset = (reg << 2);
- val = 0;
- vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
-
- //23rd: program mmUVD_JRBC_RB_CNTL to disable no_fetch
- reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_CNTL);
- reg_offset = (reg << 2);
- val = 0x12;
- vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
-}
-
static int vcn_v1_0_set_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
@@ -2114,16 +1710,13 @@ static int vcn_v1_0_process_interrupt(struct amdgpu_device *adev,
switch (entry->src_id) {
case 124:
- amdgpu_fence_process(&adev->vcn.ring_dec);
+ amdgpu_fence_process(&adev->vcn.inst->ring_dec);
break;
case 119:
- amdgpu_fence_process(&adev->vcn.ring_enc[0]);
+ amdgpu_fence_process(&adev->vcn.inst->ring_enc[0]);
break;
case 120:
- amdgpu_fence_process(&adev->vcn.ring_enc[1]);
- break;
- case 126:
- amdgpu_fence_process(&adev->vcn.ring_jpeg);
+ amdgpu_fence_process(&adev->vcn.inst->ring_enc[1]);
break;
default:
DRM_ERROR("Unhandled interrupt: %d %d\n",
@@ -2173,6 +1766,86 @@ static int vcn_v1_0_set_powergating_state(void *handle,
return ret;
}
+static void vcn_v1_0_idle_work_handler(struct work_struct *work)
+{
+ struct amdgpu_device *adev =
+ container_of(work, struct amdgpu_device, vcn.idle_work.work);
+ unsigned int fences = 0, i;
+
+ for (i = 0; i < adev->vcn.num_enc_rings; ++i)
+ fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_enc[i]);
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+ struct dpg_pause_state new_state;
+
+ if (fences)
+ new_state.fw_based = VCN_DPG_STATE__PAUSE;
+ else
+ new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
+
+ if (amdgpu_fence_count_emitted(&adev->jpeg.inst->ring_dec))
+ new_state.jpeg = VCN_DPG_STATE__PAUSE;
+ else
+ new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
+
+ adev->vcn.pause_dpg_mode(adev, 0, &new_state);
+ }
+
+ fences += amdgpu_fence_count_emitted(&adev->jpeg.inst->ring_dec);
+ fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_dec);
+
+ if (fences == 0) {
+ amdgpu_gfx_off_ctrl(adev, true);
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_uvd(adev, false);
+ else
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
+ AMD_PG_STATE_GATE);
+ } else {
+ schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
+ }
+}
+
+void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
+
+ if (set_clocks) {
+ amdgpu_gfx_off_ctrl(adev, false);
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_uvd(adev, true);
+ else
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
+ AMD_PG_STATE_UNGATE);
+ }
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+ struct dpg_pause_state new_state;
+ unsigned int fences = 0, i;
+
+ for (i = 0; i < adev->vcn.num_enc_rings; ++i)
+ fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_enc[i]);
+
+ if (fences)
+ new_state.fw_based = VCN_DPG_STATE__PAUSE;
+ else
+ new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
+
+ if (amdgpu_fence_count_emitted(&adev->jpeg.inst->ring_dec))
+ new_state.jpeg = VCN_DPG_STATE__PAUSE;
+ else
+ new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
+
+ if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
+ new_state.fw_based = VCN_DPG_STATE__PAUSE;
+ else if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
+ new_state.jpeg = VCN_DPG_STATE__PAUSE;
+
+ adev->vcn.pause_dpg_mode(adev, 0, &new_state);
+ }
+}
+
static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
.name = "vcn_v1_0",
.early_init = vcn_v1_0_early_init,
@@ -2198,7 +1871,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
.align_mask = 0xf,
.support_64bit_ptrs = false,
.no_user_fence = true,
- .vmhub = AMDGPU_MMHUB,
+ .vmhub = AMDGPU_MMHUB_0,
.get_rptr = vcn_v1_0_dec_ring_get_rptr,
.get_wptr = vcn_v1_0_dec_ring_get_wptr,
.set_wptr = vcn_v1_0_dec_ring_set_wptr,
@@ -2219,7 +1892,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
.insert_start = vcn_v1_0_dec_ring_insert_start,
.insert_end = vcn_v1_0_dec_ring_insert_end,
.pad_ib = amdgpu_ring_generic_pad_ib,
- .begin_use = amdgpu_vcn_ring_begin_use,
+ .begin_use = vcn_v1_0_ring_begin_use,
.end_use = amdgpu_vcn_ring_end_use,
.emit_wreg = vcn_v1_0_dec_ring_emit_wreg,
.emit_reg_wait = vcn_v1_0_dec_ring_emit_reg_wait,
@@ -2232,7 +1905,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = {
.nop = VCN_ENC_CMD_NO_OP,
.support_64bit_ptrs = false,
.no_user_fence = true,
- .vmhub = AMDGPU_MMHUB,
+ .vmhub = AMDGPU_MMHUB_0,
.get_rptr = vcn_v1_0_enc_ring_get_rptr,
.get_wptr = vcn_v1_0_enc_ring_get_wptr,
.set_wptr = vcn_v1_0_enc_ring_set_wptr,
@@ -2251,51 +1924,16 @@ static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = {
.insert_nop = amdgpu_ring_insert_nop,
.insert_end = vcn_v1_0_enc_ring_insert_end,
.pad_ib = amdgpu_ring_generic_pad_ib,
- .begin_use = amdgpu_vcn_ring_begin_use,
+ .begin_use = vcn_v1_0_ring_begin_use,
.end_use = amdgpu_vcn_ring_end_use,
.emit_wreg = vcn_v1_0_enc_ring_emit_wreg,
.emit_reg_wait = vcn_v1_0_enc_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
};
-static const struct amdgpu_ring_funcs vcn_v1_0_jpeg_ring_vm_funcs = {
- .type = AMDGPU_RING_TYPE_VCN_JPEG,
- .align_mask = 0xf,
- .nop = PACKET0(0x81ff, 0),
- .support_64bit_ptrs = false,
- .no_user_fence = true,
- .vmhub = AMDGPU_MMHUB,
- .extra_dw = 64,
- .get_rptr = vcn_v1_0_jpeg_ring_get_rptr,
- .get_wptr = vcn_v1_0_jpeg_ring_get_wptr,
- .set_wptr = vcn_v1_0_jpeg_ring_set_wptr,
- .emit_frame_size =
- 6 + 6 + /* hdp invalidate / flush */
- SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
- SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
- 8 + /* vcn_v1_0_jpeg_ring_emit_vm_flush */
- 26 + 26 + /* vcn_v1_0_jpeg_ring_emit_fence x2 vm fence */
- 6,
- .emit_ib_size = 22, /* vcn_v1_0_jpeg_ring_emit_ib */
- .emit_ib = vcn_v1_0_jpeg_ring_emit_ib,
- .emit_fence = vcn_v1_0_jpeg_ring_emit_fence,
- .emit_vm_flush = vcn_v1_0_jpeg_ring_emit_vm_flush,
- .test_ring = amdgpu_vcn_jpeg_ring_test_ring,
- .test_ib = amdgpu_vcn_jpeg_ring_test_ib,
- .insert_nop = vcn_v1_0_jpeg_ring_nop,
- .insert_start = vcn_v1_0_jpeg_ring_insert_start,
- .insert_end = vcn_v1_0_jpeg_ring_insert_end,
- .pad_ib = amdgpu_ring_generic_pad_ib,
- .begin_use = amdgpu_vcn_ring_begin_use,
- .end_use = amdgpu_vcn_ring_end_use,
- .emit_wreg = vcn_v1_0_jpeg_ring_emit_wreg,
- .emit_reg_wait = vcn_v1_0_jpeg_ring_emit_reg_wait,
- .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
-};
-
static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev)
{
- adev->vcn.ring_dec.funcs = &vcn_v1_0_dec_ring_vm_funcs;
+ adev->vcn.inst->ring_dec.funcs = &vcn_v1_0_dec_ring_vm_funcs;
DRM_INFO("VCN decode is enabled in VM mode\n");
}
@@ -2304,17 +1942,11 @@ static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev)
int i;
for (i = 0; i < adev->vcn.num_enc_rings; ++i)
- adev->vcn.ring_enc[i].funcs = &vcn_v1_0_enc_ring_vm_funcs;
+ adev->vcn.inst->ring_enc[i].funcs = &vcn_v1_0_enc_ring_vm_funcs;
DRM_INFO("VCN encode is enabled in VM mode\n");
}
-static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev)
-{
- adev->vcn.ring_jpeg.funcs = &vcn_v1_0_jpeg_ring_vm_funcs;
- DRM_INFO("VCN jpeg decode is enabled in VM mode\n");
-}
-
static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs = {
.set = vcn_v1_0_set_interrupt_state,
.process = vcn_v1_0_process_interrupt,
@@ -2322,8 +1954,8 @@ static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs = {
static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev)
{
- adev->vcn.irq.num_types = adev->vcn.num_enc_rings + 2;
- adev->vcn.irq.funcs = &vcn_v1_0_irq_funcs;
+ adev->vcn.inst->irq.num_types = adev->vcn.num_enc_rings + 2;
+ adev->vcn.inst->irq.funcs = &vcn_v1_0_irq_funcs;
}
const struct amdgpu_ip_block_version vcn_v1_0_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.h b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.h
index 2a497a7a4840..f67d7391fc21 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.h
@@ -24,6 +24,8 @@
#ifndef __VCN_V1_0_H__
#define __VCN_V1_0_H__
+void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring);
+
extern const struct amdgpu_ip_block_version vcn_v1_0_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index dfde886cc6bd..c387c81f8695 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -22,7 +22,7 @@
*/
#include <linux/firmware.h>
-#include <drm/drmP.h>
+
#include "amdgpu.h"
#include "amdgpu_vcn.h"
#include "soc15.h"
@@ -47,39 +47,13 @@
#define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x5a7
#define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET 0x1e2
-#define mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET 0x1bfff
-#define mmUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET 0x4029
-#define mmUVD_JPEG_GPCOM_DATA0_INTERNAL_OFFSET 0x402a
-#define mmUVD_JPEG_GPCOM_DATA1_INTERNAL_OFFSET 0x402b
-#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40ea
-#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x40eb
-#define mmUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET 0x40cf
-#define mmUVD_LMI_JPEG_VMID_INTERNAL_OFFSET 0x40d1
-#define mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40e8
-#define mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x40e9
-#define mmUVD_JRBC_IB_SIZE_INTERNAL_OFFSET 0x4082
-#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40ec
-#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x40ed
-#define mmUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET 0x4085
-#define mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET 0x4084
-#define mmUVD_JRBC_STATUS_INTERNAL_OFFSET 0x4089
-#define mmUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f
-
-#define JRBC_DEC_EXTERNAL_REG_WRITE_ADDR 0x18000
-
-#define mmUVD_RBC_XX_IB_REG_CHECK 0x026b
-#define mmUVD_RBC_XX_IB_REG_CHECK_BASE_IDX 1
-#define mmUVD_REG_XX_MASK 0x026c
-#define mmUVD_REG_XX_MASK_BASE_IDX 1
-
static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void vcn_v2_0_set_enc_ring_funcs(struct amdgpu_device *adev);
-static void vcn_v2_0_set_jpeg_ring_funcs(struct amdgpu_device *adev);
static void vcn_v2_0_set_irq_funcs(struct amdgpu_device *adev);
static int vcn_v2_0_set_powergating_state(void *handle,
enum amd_powergating_state state);
static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
- struct dpg_pause_state *new_state);
+ int inst_idx, struct dpg_pause_state *new_state);
/**
* vcn_v2_0_early_init - set function pointers
@@ -92,11 +66,11 @@ static int vcn_v2_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ adev->vcn.num_vcn_inst = 1;
adev->vcn.num_enc_rings = 2;
vcn_v2_0_set_dec_ring_funcs(adev);
vcn_v2_0_set_enc_ring_funcs(adev);
- vcn_v2_0_set_jpeg_ring_funcs(adev);
vcn_v2_0_set_irq_funcs(adev);
return 0;
@@ -118,7 +92,7 @@ static int vcn_v2_0_sw_init(void *handle)
/* VCN DEC TRAP */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT,
- &adev->vcn.irq);
+ &adev->vcn.inst->irq);
if (r)
return r;
@@ -126,18 +100,11 @@ static int vcn_v2_0_sw_init(void *handle)
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
i + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE,
- &adev->vcn.irq);
+ &adev->vcn.inst->irq);
if (r)
return r;
}
- /* VCN JPEG TRAP */
- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
- VCN_2_0__SRCID__JPEG_DECODE,
- &adev->vcn.irq);
- if (r)
- return r;
-
r = amdgpu_vcn_sw_init(adev);
if (r)
return r;
@@ -156,50 +123,46 @@ static int vcn_v2_0_sw_init(void *handle)
if (r)
return r;
- ring = &adev->vcn.ring_dec;
+ ring = &adev->vcn.inst->ring_dec;
ring->use_doorbell = true;
ring->doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1 << 1;
sprintf(ring->name, "vcn_dec");
- r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
if (r)
return r;
+ adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
+ adev->vcn.internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET;
+ adev->vcn.internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET;
+ adev->vcn.internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET;
+ adev->vcn.internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET;
+ adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
+
adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
- adev->vcn.external.scratch9 = SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9);
+ adev->vcn.inst->external.scratch9 = SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9);
adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
- adev->vcn.external.data0 = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0);
+ adev->vcn.inst->external.data0 = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0);
adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
- adev->vcn.external.data1 = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1);
+ adev->vcn.inst->external.data1 = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1);
adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
- adev->vcn.external.cmd = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD);
+ adev->vcn.inst->external.cmd = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD);
adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
- adev->vcn.external.nop = SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP);
+ adev->vcn.inst->external.nop = SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP);
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
- ring = &adev->vcn.ring_enc[i];
+ ring = &adev->vcn.inst->ring_enc[i];
ring->use_doorbell = true;
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i;
sprintf(ring->name, "vcn_enc%d", i);
- r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
if (r)
return r;
}
- ring = &adev->vcn.ring_jpeg;
- ring->use_doorbell = true;
- ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1;
- sprintf(ring->name, "vcn_jpeg");
- r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0);
- if (r)
- return r;
-
adev->vcn.pause_dpg_mode = vcn_v2_0_pause_dpg_mode;
- adev->vcn.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET;
- adev->vcn.external.jpeg_pitch = SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH);
-
return 0;
}
@@ -234,35 +197,21 @@ static int vcn_v2_0_sw_fini(void *handle)
static int vcn_v2_0_hw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct amdgpu_ring *ring = &adev->vcn.ring_dec;
+ struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
int i, r;
- adev->nbio_funcs->vcn_doorbell_range(adev, ring->use_doorbell,
- ring->doorbell_index);
+ adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+ ring->doorbell_index, 0);
- ring->sched.ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->sched.ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
goto done;
- }
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
- ring = &adev->vcn.ring_enc[i];
- ring->sched.ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->sched.ready = false;
+ ring = &adev->vcn.inst->ring_enc[i];
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
goto done;
- }
- }
-
- ring = &adev->vcn.ring_jpeg;
- ring->sched.ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->sched.ready = false;
- goto done;
}
done:
@@ -283,7 +232,7 @@ done:
static int vcn_v2_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct amdgpu_ring *ring = &adev->vcn.ring_dec;
+ struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
int i;
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
@@ -294,13 +243,10 @@ static int vcn_v2_0_hw_fini(void *handle)
ring->sched.ready = false;
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
- ring = &adev->vcn.ring_enc[i];
+ ring = &adev->vcn.inst->ring_enc[i];
ring->sched.ready = false;
}
- ring = &adev->vcn.ring_jpeg;
- ring->sched.ready = false;
-
return 0;
}
@@ -368,9 +314,9 @@ static void vcn_v2_0_mc_resume(struct amdgpu_device *adev)
offset = 0;
} else {
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
- lower_32_bits(adev->vcn.gpu_addr));
+ lower_32_bits(adev->vcn.inst->gpu_addr));
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
- upper_32_bits(adev->vcn.gpu_addr));
+ upper_32_bits(adev->vcn.inst->gpu_addr));
offset = size;
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
@@ -380,22 +326,21 @@ static void vcn_v2_0_mc_resume(struct amdgpu_device *adev)
/* cache window 1: stack */
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
- lower_32_bits(adev->vcn.gpu_addr + offset));
+ lower_32_bits(adev->vcn.inst->gpu_addr + offset));
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
- upper_32_bits(adev->vcn.gpu_addr + offset));
+ upper_32_bits(adev->vcn.inst->gpu_addr + offset));
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0);
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
/* cache window 2: context */
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
- lower_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
+ lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
- upper_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
+ upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0);
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
WREG32_SOC15(UVD, 0, mmUVD_GFX10_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
- WREG32_SOC15(UVD, 0, mmJPEG_DEC_GFX10_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
}
static void vcn_v2_0_mc_resume_dpg_mode(struct amdgpu_device *adev, bool indirect)
@@ -406,88 +351,88 @@ static void vcn_v2_0_mc_resume_dpg_mode(struct amdgpu_device *adev, bool indirec
/* cache window 0: fw */
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
if (!indirect) {
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo), 0, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi), 0, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
} else {
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
}
offset = 0;
} else {
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
- lower_32_bits(adev->vcn.gpu_addr), 0, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ lower_32_bits(adev->vcn.inst->gpu_addr), 0, indirect);
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
- upper_32_bits(adev->vcn.gpu_addr), 0, indirect);
+ upper_32_bits(adev->vcn.inst->gpu_addr), 0, indirect);
offset = size;
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
}
if (!indirect)
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
else
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
/* cache window 1: stack */
if (!indirect) {
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
- lower_32_bits(adev->vcn.gpu_addr + offset), 0, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ lower_32_bits(adev->vcn.inst->gpu_addr + offset), 0, indirect);
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
- upper_32_bits(adev->vcn.gpu_addr + offset), 0, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ upper_32_bits(adev->vcn.inst->gpu_addr + offset), 0, indirect);
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
} else {
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
}
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
/* cache window 2: context */
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
- lower_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
- upper_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
/* non-cache window */
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), 0, 0, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), 0, 0, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0), 0, 0, indirect);
/* VCN global tiling registers */
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
}
@@ -633,146 +578,23 @@ static void vcn_v2_0_clock_gating_dpg_mode(struct amdgpu_device *adev,
UVD_CGC_CTRL__WCB_MODE_MASK |
UVD_CGC_CTRL__VCPU_MODE_MASK |
UVD_CGC_CTRL__SCPU_MODE_MASK);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect);
/* turn off clock gating */
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_CGC_GATE), 0, sram_sel, indirect);
/* turn on SUVD clock gating */
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
/* turn on sw mode in UVD_SUVD_CGC_CTRL */
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
}
/**
- * jpeg_v2_0_start - start JPEG block
- *
- * @adev: amdgpu_device pointer
- *
- * Setup and start the JPEG block
- */
-static int jpeg_v2_0_start(struct amdgpu_device *adev)
-{
- struct amdgpu_ring *ring = &adev->vcn.ring_jpeg;
- uint32_t tmp;
- int r = 0;
-
- /* disable power gating */
- tmp = 1 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT;
- WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_PGFSM_CONFIG), tmp);
-
- SOC15_WAIT_ON_RREG(VCN, 0,
- mmUVD_PGFSM_STATUS, UVD_PGFSM_STATUS_UVDJ_PWR_ON,
- UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK, r);
-
- if (r) {
- DRM_ERROR("amdgpu: JPEG disable power gating failed\n");
- return r;
- }
-
- /* Removing the anti hang mechanism to indicate the UVDJ tile is ON */
- tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_POWER_STATUS)) & ~0x1;
- WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_POWER_STATUS), tmp);
-
- /* JPEG disable CGC */
- tmp = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL);
- tmp |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
- tmp |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
- tmp |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
- WREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL, tmp);
-
- tmp = RREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE);
- tmp &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK
- | JPEG_CGC_GATE__JPEG2_DEC_MASK
- | JPEG_CGC_GATE__JPEG_ENC_MASK
- | JPEG_CGC_GATE__JMCIF_MASK
- | JPEG_CGC_GATE__JRBBM_MASK);
- WREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE, tmp);
-
- /* enable JMI channel */
- WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_JMI_CNTL), 0,
- ~UVD_JMI_CNTL__SOFT_RESET_MASK);
-
- /* enable System Interrupt for JRBC */
- WREG32_P(SOC15_REG_OFFSET(VCN, 0, mmJPEG_SYS_INT_EN),
- JPEG_SYS_INT_EN__DJRBC_MASK,
- ~JPEG_SYS_INT_EN__DJRBC_MASK);
-
- WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
- WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
- WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
- lower_32_bits(ring->gpu_addr));
- WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
- upper_32_bits(ring->gpu_addr));
- WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR, 0);
- WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, 0);
- WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, 0x00000002L);
- WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_SIZE, ring->ring_size / 4);
- ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
-
- return 0;
-}
-
-/**
- * jpeg_v2_0_stop - stop JPEG block
- *
- * @adev: amdgpu_device pointer
- *
- * stop the JPEG block
- */
-static int jpeg_v2_0_stop(struct amdgpu_device *adev)
-{
- uint32_t tmp;
- int r = 0;
-
- /* reset JMI */
- WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_JMI_CNTL),
- UVD_JMI_CNTL__SOFT_RESET_MASK,
- ~UVD_JMI_CNTL__SOFT_RESET_MASK);
-
- /* enable JPEG CGC */
- tmp = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL);
- tmp |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
- tmp |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
- tmp |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
- WREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL, tmp);
-
-
- tmp = RREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE);
- tmp |= (JPEG_CGC_GATE__JPEG_DEC_MASK
- |JPEG_CGC_GATE__JPEG2_DEC_MASK
- |JPEG_CGC_GATE__JPEG_ENC_MASK
- |JPEG_CGC_GATE__JMCIF_MASK
- |JPEG_CGC_GATE__JRBBM_MASK);
- WREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE, tmp);
-
- /* enable power gating */
- tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_POWER_STATUS));
- tmp &= ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK;
- tmp |= 0x1; //UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_TILES_OFF;
- WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_POWER_STATUS), tmp);
-
- tmp = 2 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT;
- WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_PGFSM_CONFIG), tmp);
-
- SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS,
- (2 << UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT),
- UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK, r);
-
- if (r) {
- DRM_ERROR("amdgpu: JPEG enable power gating failed\n");
- return r;
- }
-
- return r;
-}
-
-/**
* vcn_v2_0_enable_clock_gating - enable VCN clock gating
*
* @adev: amdgpu_device pointer
@@ -920,7 +742,7 @@ static void vcn_v2_0_enable_static_power_gating(struct amdgpu_device *adev)
static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
{
- struct amdgpu_ring *ring = &adev->vcn.ring_dec;
+ struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
uint32_t rb_bufsz, tmp;
vcn_v2_0_enable_static_power_gating(adev);
@@ -932,7 +754,7 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, tmp);
if (indirect)
- adev->vcn.dpg_sram_curr_addr = (uint32_t*)adev->vcn.dpg_sram_cpu_addr;
+ adev->vcn.inst->dpg_sram_curr_addr = (uint32_t*)adev->vcn.inst->dpg_sram_cpu_addr;
/* enable clock gating */
vcn_v2_0_clock_gating_dpg_mode(adev, 0, indirect);
@@ -941,11 +763,11 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
tmp |= UVD_VCPU_CNTL__MIF_WR_LOW_THRESHOLD_BP_MASK;
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
/* disable master interupt */
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_MASTINT_EN), 0, 0, indirect);
/* setup mmUVD_LMI_CTRL */
@@ -957,28 +779,28 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
(8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
0x00100000L);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_CTRL), tmp, 0, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_MPC_CNTL),
0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_MPC_SET_MUXA0),
((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
(0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
(0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_MPC_SET_MUXB0),
((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
(0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
(0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_MPC_SET_MUX),
((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
(0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
@@ -986,29 +808,29 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
vcn_v2_0_mc_resume_dpg_mode(adev, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);
/* release VCPU reset to boot */
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_SOFT_RESET), 0, 0, indirect);
/* enable LMI MC and UMC channels */
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_CTRL2),
0x1F << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT, 0, indirect);
/* enable master interrupt */
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_MASTINT_EN),
UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
if (indirect)
- psp_update_vcn_sram(adev, 0, adev->vcn.dpg_sram_gpu_addr,
- (uint32_t)((uintptr_t)adev->vcn.dpg_sram_curr_addr -
- (uintptr_t)adev->vcn.dpg_sram_cpu_addr));
+ psp_update_vcn_sram(adev, 0, adev->vcn.inst->dpg_sram_gpu_addr,
+ (uint32_t)((uintptr_t)adev->vcn.inst->dpg_sram_curr_addr -
+ (uintptr_t)adev->vcn.inst->dpg_sram_cpu_addr));
/* force RBC into idle state */
rb_bufsz = order_base_2(ring->ring_size);
@@ -1046,7 +868,7 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
static int vcn_v2_0_start(struct amdgpu_device *adev)
{
- struct amdgpu_ring *ring = &adev->vcn.ring_dec;
+ struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
uint32_t rb_bufsz, tmp;
uint32_t lmi_swap_cntl;
int i, j, r;
@@ -1054,12 +876,8 @@ static int vcn_v2_0_start(struct amdgpu_device *adev)
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_uvd(adev, true);
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
- r = vcn_v2_0_start_dpg_mode(adev, adev->vcn.indirect_sram);
- if (r)
- return r;
- goto jpeg;
- }
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+ return vcn_v2_0_start_dpg_mode(adev, adev->vcn.indirect_sram);
vcn_v2_0_disable_static_power_gating(adev);
@@ -1197,24 +1015,21 @@ static int vcn_v2_0_start(struct amdgpu_device *adev)
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
lower_32_bits(ring->wptr));
- ring = &adev->vcn.ring_enc[0];
+ ring = &adev->vcn.inst->ring_enc[0];
WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
- ring = &adev->vcn.ring_enc[1];
+ ring = &adev->vcn.inst->ring_enc[1];
WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
-jpeg:
- r = jpeg_v2_0_start(adev);
-
- return r;
+ return 0;
}
static int vcn_v2_0_stop_dpg_mode(struct amdgpu_device *adev)
@@ -1233,9 +1048,6 @@ static int vcn_v2_0_stop_dpg_mode(struct amdgpu_device *adev)
tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF, ret_code);
- tmp = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
- SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_JRBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
-
tmp = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
@@ -1254,10 +1066,6 @@ static int vcn_v2_0_stop(struct amdgpu_device *adev)
uint32_t tmp;
int r;
- r = jpeg_v2_0_stop(adev);
- if (r)
- return r;
-
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
r = vcn_v2_0_stop_dpg_mode(adev);
if (r)
@@ -1322,16 +1130,16 @@ power_off:
}
static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
- struct dpg_pause_state *new_state)
+ int inst_idx, struct dpg_pause_state *new_state)
{
struct amdgpu_ring *ring;
uint32_t reg_data = 0;
int ret_code;
/* pause/unpause if state is changed */
- if (adev->vcn.pause_state.fw_based != new_state->fw_based) {
+ if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
DRM_DEBUG("dpg pause state changed %d -> %d",
- adev->vcn.pause_state.fw_based, new_state->fw_based);
+ adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based);
reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
(~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
@@ -1351,14 +1159,14 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code);
/* Restore */
- ring = &adev->vcn.ring_enc[0];
+ ring = &adev->vcn.inst->ring_enc[0];
WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
- ring = &adev->vcn.ring_enc[1];
+ ring = &adev->vcn.inst->ring_enc[1];
WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
@@ -1377,7 +1185,7 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
}
- adev->vcn.pause_state.fw_based = new_state->fw_based;
+ adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
}
return 0;
@@ -1405,7 +1213,7 @@ static int vcn_v2_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ bool enable = (state == AMD_CG_STATE_GATE);
if (enable) {
/* wait for STATUS to clear */
@@ -1480,11 +1288,13 @@ static void vcn_v2_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
*
* Write a start command to the ring.
*/
-static void vcn_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring)
+void vcn_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring)
{
- amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET, 0));
+ struct amdgpu_device *adev = ring->adev;
+
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_START << 1));
}
@@ -1495,9 +1305,11 @@ static void vcn_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring)
*
* Write a end command to the ring.
*/
-static void vcn_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring)
+void vcn_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring)
{
- amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0));
+ struct amdgpu_device *adev = ring->adev;
+
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_END << 1));
}
@@ -1508,14 +1320,15 @@ static void vcn_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring)
*
* Write a nop command to the ring.
*/
-static void vcn_v2_0_dec_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
+void vcn_v2_0_dec_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
{
+ struct amdgpu_device *adev = ring->adev;
int i;
WARN_ON(ring->wptr % 2 || count % 2);
for (i = 0; i < count / 2; i++) {
- amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP_INTERNAL_OFFSET, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.nop, 0));
amdgpu_ring_write(ring, 0);
}
}
@@ -1528,30 +1341,31 @@ static void vcn_v2_0_dec_ring_insert_nop(struct amdgpu_ring *ring, uint32_t coun
*
* Write a fence and a trap command to the ring.
*/
-static void vcn_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
- unsigned flags)
+void vcn_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
+ unsigned flags)
{
- WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
+ struct amdgpu_device *adev = ring->adev;
- amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID_INTERNAL_OFFSET, 0));
+ WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.context_id, 0));
amdgpu_ring_write(ring, seq);
- amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
amdgpu_ring_write(ring, addr & 0xffffffff);
- amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data1, 0));
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
- amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_FENCE << 1));
- amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data1, 0));
amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_TRAP << 1));
}
@@ -1564,44 +1378,46 @@ static void vcn_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64
*
* Write ring commands to execute the indirect buffer
*/
-static void vcn_v2_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
- struct amdgpu_job *job,
- struct amdgpu_ib *ib,
- uint32_t flags)
+void vcn_v2_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib,
+ uint32_t flags)
{
+ struct amdgpu_device *adev = ring->adev;
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
- amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.ib_vmid, 0));
amdgpu_ring_write(ring, vmid);
- amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.ib_bar_low, 0));
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
- amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.ib_bar_high, 0));
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
- amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.ib_size, 0));
amdgpu_ring_write(ring, ib->length_dw);
}
-static void vcn_v2_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring,
- uint32_t reg, uint32_t val,
- uint32_t mask)
+void vcn_v2_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t val, uint32_t mask)
{
- amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET, 0));
+ struct amdgpu_device *adev = ring->adev;
+
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
amdgpu_ring_write(ring, reg << 2);
- amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data1, 0));
amdgpu_ring_write(ring, val);
- amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8_INTERNAL_OFFSET, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.gp_scratch8, 0));
amdgpu_ring_write(ring, mask);
- amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_REG_READ_COND_WAIT << 1));
}
-static void vcn_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
- unsigned vmid, uint64_t pd_addr)
+void vcn_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ unsigned vmid, uint64_t pd_addr)
{
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
uint32_t data0, data1, mask;
@@ -1615,16 +1431,18 @@ static void vcn_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
vcn_v2_0_dec_ring_emit_reg_wait(ring, data0, data1, mask);
}
-static void vcn_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring,
- uint32_t reg, uint32_t val)
+void vcn_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring,
+ uint32_t reg, uint32_t val)
{
- amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET, 0));
+ struct amdgpu_device *adev = ring->adev;
+
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
amdgpu_ring_write(ring, reg << 2);
- amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data1, 0));
amdgpu_ring_write(ring, val);
- amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_WRITE_REG << 1));
}
@@ -1640,7 +1458,7 @@ static uint64_t vcn_v2_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- if (ring == &adev->vcn.ring_enc[0])
+ if (ring == &adev->vcn.inst->ring_enc[0])
return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR);
else
return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2);
@@ -1657,7 +1475,7 @@ static uint64_t vcn_v2_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- if (ring == &adev->vcn.ring_enc[0]) {
+ if (ring == &adev->vcn.inst->ring_enc[0]) {
if (ring->use_doorbell)
return adev->wb.wb[ring->wptr_offs];
else
@@ -1681,7 +1499,7 @@ static void vcn_v2_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- if (ring == &adev->vcn.ring_enc[0]) {
+ if (ring == &adev->vcn.inst->ring_enc[0]) {
if (ring->use_doorbell) {
adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
@@ -1706,8 +1524,8 @@ static void vcn_v2_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
*
* Write enc a fence and a trap command to the ring.
*/
-static void vcn_v2_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
- u64 seq, unsigned flags)
+void vcn_v2_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
+ u64 seq, unsigned flags)
{
WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
@@ -1718,7 +1536,7 @@ static void vcn_v2_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
amdgpu_ring_write(ring, VCN_ENC_CMD_TRAP);
}
-static void vcn_v2_0_enc_ring_insert_end(struct amdgpu_ring *ring)
+void vcn_v2_0_enc_ring_insert_end(struct amdgpu_ring *ring)
{
amdgpu_ring_write(ring, VCN_ENC_CMD_END);
}
@@ -1731,10 +1549,10 @@ static void vcn_v2_0_enc_ring_insert_end(struct amdgpu_ring *ring)
*
* Write enc ring commands to execute the indirect buffer
*/
-static void vcn_v2_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
- struct amdgpu_job *job,
- struct amdgpu_ib *ib,
- uint32_t flags)
+void vcn_v2_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib,
+ uint32_t flags)
{
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
@@ -1745,9 +1563,8 @@ static void vcn_v2_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, ib->length_dw);
}
-static void vcn_v2_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring,
- uint32_t reg, uint32_t val,
- uint32_t mask)
+void vcn_v2_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t val, uint32_t mask)
{
amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT);
amdgpu_ring_write(ring, reg << 2);
@@ -1755,8 +1572,8 @@ static void vcn_v2_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, val);
}
-static void vcn_v2_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
- unsigned int vmid, uint64_t pd_addr)
+void vcn_v2_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ unsigned int vmid, uint64_t pd_addr)
{
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
@@ -1767,282 +1584,13 @@ static void vcn_v2_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
lower_32_bits(pd_addr), 0xffffffff);
}
-static void vcn_v2_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
- uint32_t reg, uint32_t val)
+void vcn_v2_0_enc_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val)
{
amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
amdgpu_ring_write(ring, reg << 2);
amdgpu_ring_write(ring, val);
}
-/**
- * vcn_v2_0_jpeg_ring_get_rptr - get read pointer
- *
- * @ring: amdgpu_ring pointer
- *
- * Returns the current hardware read pointer
- */
-static uint64_t vcn_v2_0_jpeg_ring_get_rptr(struct amdgpu_ring *ring)
-{
- struct amdgpu_device *adev = ring->adev;
-
- return RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR);
-}
-
-/**
- * vcn_v2_0_jpeg_ring_get_wptr - get write pointer
- *
- * @ring: amdgpu_ring pointer
- *
- * Returns the current hardware write pointer
- */
-static uint64_t vcn_v2_0_jpeg_ring_get_wptr(struct amdgpu_ring *ring)
-{
- struct amdgpu_device *adev = ring->adev;
-
- if (ring->use_doorbell)
- return adev->wb.wb[ring->wptr_offs];
- else
- return RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
-}
-
-/**
- * vcn_v2_0_jpeg_ring_set_wptr - set write pointer
- *
- * @ring: amdgpu_ring pointer
- *
- * Commits the write pointer to the hardware
- */
-static void vcn_v2_0_jpeg_ring_set_wptr(struct amdgpu_ring *ring)
-{
- struct amdgpu_device *adev = ring->adev;
-
- if (ring->use_doorbell) {
- adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
- WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
- } else {
- WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
- }
-}
-
-/**
- * vcn_v2_0_jpeg_ring_insert_start - insert a start command
- *
- * @ring: amdgpu_ring pointer
- *
- * Write a start command to the ring.
- */
-static void vcn_v2_0_jpeg_ring_insert_start(struct amdgpu_ring *ring)
-{
- amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
- 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, 0x68e04);
-
- amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
- 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, 0x80010000);
-}
-
-/**
- * vcn_v2_0_jpeg_ring_insert_end - insert a end command
- *
- * @ring: amdgpu_ring pointer
- *
- * Write a end command to the ring.
- */
-static void vcn_v2_0_jpeg_ring_insert_end(struct amdgpu_ring *ring)
-{
- amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
- 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, 0x68e04);
-
- amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
- 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, 0x00010000);
-}
-
-/**
- * vcn_v2_0_jpeg_ring_emit_fence - emit an fence & trap command
- *
- * @ring: amdgpu_ring pointer
- * @fence: fence to emit
- *
- * Write a fence and a trap command to the ring.
- */
-static void vcn_v2_0_jpeg_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
- unsigned flags)
-{
- WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
-
- amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_GPCOM_DATA0_INTERNAL_OFFSET,
- 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, seq);
-
- amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_GPCOM_DATA1_INTERNAL_OFFSET,
- 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, seq);
-
- amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_INTERNAL_OFFSET,
- 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, lower_32_bits(addr));
-
- amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_INTERNAL_OFFSET,
- 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, upper_32_bits(addr));
-
- amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET,
- 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, 0x8);
-
- amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET,
- 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE4));
- amdgpu_ring_write(ring, 0);
-
- amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
- 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, 0x3fbc);
-
- amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
- 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, 0x1);
-
- amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE7));
- amdgpu_ring_write(ring, 0);
-}
-
-/**
- * vcn_v2_0_jpeg_ring_emit_ib - execute indirect buffer
- *
- * @ring: amdgpu_ring pointer
- * @ib: indirect buffer to execute
- *
- * Write ring commands to execute the indirect buffer.
- */
-static void vcn_v2_0_jpeg_ring_emit_ib(struct amdgpu_ring *ring,
- struct amdgpu_job *job,
- struct amdgpu_ib *ib,
- uint32_t flags)
-{
- unsigned vmid = AMDGPU_JOB_GET_VMID(job);
-
- amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET,
- 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, (vmid | (vmid << 4)));
-
- amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JPEG_VMID_INTERNAL_OFFSET,
- 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, (vmid | (vmid << 4)));
-
- amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET,
- 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
-
- amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET,
- 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
-
- amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_IB_SIZE_INTERNAL_OFFSET,
- 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, ib->length_dw);
-
- amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW_INTERNAL_OFFSET,
- 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, lower_32_bits(ring->gpu_addr));
-
- amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH_INTERNAL_OFFSET,
- 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, upper_32_bits(ring->gpu_addr));
-
- amdgpu_ring_write(ring, PACKETJ(0, 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE2));
- amdgpu_ring_write(ring, 0);
-
- amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET,
- 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, 0x01400200);
-
- amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET,
- 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, 0x2);
-
- amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_STATUS_INTERNAL_OFFSET,
- 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE3));
- amdgpu_ring_write(ring, 0x2);
-}
-
-static void vcn_v2_0_jpeg_ring_emit_reg_wait(struct amdgpu_ring *ring,
- uint32_t reg, uint32_t val,
- uint32_t mask)
-{
- uint32_t reg_offset = (reg << 2);
-
- amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET,
- 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, 0x01400200);
-
- amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET,
- 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, val);
-
- amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
- 0, 0, PACKETJ_TYPE0));
- if (reg_offset >= 0x10000 && reg_offset <= 0x105ff) {
- amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring,
- PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3));
- } else {
- amdgpu_ring_write(ring, reg_offset);
- amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
- 0, 0, PACKETJ_TYPE3));
- }
- amdgpu_ring_write(ring, mask);
-}
-
-static void vcn_v2_0_jpeg_ring_emit_vm_flush(struct amdgpu_ring *ring,
- unsigned vmid, uint64_t pd_addr)
-{
- struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
- uint32_t data0, data1, mask;
-
- pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
-
- /* wait for register write */
- data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2;
- data1 = lower_32_bits(pd_addr);
- mask = 0xffffffff;
- vcn_v2_0_jpeg_ring_emit_reg_wait(ring, data0, data1, mask);
-}
-
-static void vcn_v2_0_jpeg_ring_emit_wreg(struct amdgpu_ring *ring,
- uint32_t reg, uint32_t val)
-{
- uint32_t reg_offset = (reg << 2);
-
- amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
- 0, 0, PACKETJ_TYPE0));
- if (reg_offset >= 0x10000 && reg_offset <= 0x105ff) {
- amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring,
- PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0));
- } else {
- amdgpu_ring_write(ring, reg_offset);
- amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
- 0, 0, PACKETJ_TYPE0));
- }
- amdgpu_ring_write(ring, val);
-}
-
-static void vcn_v2_0_jpeg_ring_nop(struct amdgpu_ring *ring, uint32_t count)
-{
- int i;
-
- WARN_ON(ring->wptr % 2 || count % 2);
-
- for (i = 0; i < count / 2; i++) {
- amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6));
- amdgpu_ring_write(ring, 0);
- }
-}
-
static int vcn_v2_0_set_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
@@ -2059,16 +1607,13 @@ static int vcn_v2_0_process_interrupt(struct amdgpu_device *adev,
switch (entry->src_id) {
case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT:
- amdgpu_fence_process(&adev->vcn.ring_dec);
+ amdgpu_fence_process(&adev->vcn.inst->ring_dec);
break;
case VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
- amdgpu_fence_process(&adev->vcn.ring_enc[0]);
+ amdgpu_fence_process(&adev->vcn.inst->ring_enc[0]);
break;
case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY:
- amdgpu_fence_process(&adev->vcn.ring_enc[1]);
- break;
- case VCN_2_0__SRCID__JPEG_DECODE:
- amdgpu_fence_process(&adev->vcn.ring_jpeg);
+ amdgpu_fence_process(&adev->vcn.inst->ring_enc[1]);
break;
default:
DRM_ERROR("Unhandled interrupt: %d %d\n",
@@ -2079,27 +1624,27 @@ static int vcn_v2_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
-static int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring)
+int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
uint32_t tmp = 0;
unsigned i;
int r;
- WREG32(adev->vcn.external.scratch9, 0xCAFEDEAD);
+ WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD);
r = amdgpu_ring_alloc(ring, 4);
if (r)
return r;
- amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_START << 1));
amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0));
amdgpu_ring_write(ring, 0xDEADBEEF);
amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
- tmp = RREG32(adev->vcn.external.scratch9);
+ tmp = RREG32(adev->vcn.inst[ring->me].external.scratch9);
if (tmp == 0xDEADBEEF)
break;
- DRM_UDELAY(1);
+ udelay(1);
}
if (i >= adev->usec_timeout)
@@ -2158,7 +1703,7 @@ static const struct amd_ip_funcs vcn_v2_0_ip_funcs = {
static const struct amdgpu_ring_funcs vcn_v2_0_dec_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_DEC,
.align_mask = 0xf,
- .vmhub = AMDGPU_MMHUB,
+ .vmhub = AMDGPU_MMHUB_0,
.get_rptr = vcn_v2_0_dec_ring_get_rptr,
.get_wptr = vcn_v2_0_dec_ring_get_wptr,
.set_wptr = vcn_v2_0_dec_ring_set_wptr,
@@ -2189,7 +1734,7 @@ static const struct amdgpu_ring_funcs vcn_v2_0_enc_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_ENC,
.align_mask = 0x3f,
.nop = VCN_ENC_CMD_NO_OP,
- .vmhub = AMDGPU_MMHUB,
+ .vmhub = AMDGPU_MMHUB_0,
.get_rptr = vcn_v2_0_enc_ring_get_rptr,
.get_wptr = vcn_v2_0_enc_ring_get_wptr,
.set_wptr = vcn_v2_0_enc_ring_set_wptr,
@@ -2215,39 +1760,9 @@ static const struct amdgpu_ring_funcs vcn_v2_0_enc_ring_vm_funcs = {
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
};
-static const struct amdgpu_ring_funcs vcn_v2_0_jpeg_ring_vm_funcs = {
- .type = AMDGPU_RING_TYPE_VCN_JPEG,
- .align_mask = 0xf,
- .vmhub = AMDGPU_MMHUB,
- .get_rptr = vcn_v2_0_jpeg_ring_get_rptr,
- .get_wptr = vcn_v2_0_jpeg_ring_get_wptr,
- .set_wptr = vcn_v2_0_jpeg_ring_set_wptr,
- .emit_frame_size =
- SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
- SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
- 8 + /* vcn_v2_0_jpeg_ring_emit_vm_flush */
- 18 + 18 + /* vcn_v2_0_jpeg_ring_emit_fence x2 vm fence */
- 8 + 16,
- .emit_ib_size = 22, /* vcn_v2_0_jpeg_ring_emit_ib */
- .emit_ib = vcn_v2_0_jpeg_ring_emit_ib,
- .emit_fence = vcn_v2_0_jpeg_ring_emit_fence,
- .emit_vm_flush = vcn_v2_0_jpeg_ring_emit_vm_flush,
- .test_ring = amdgpu_vcn_jpeg_ring_test_ring,
- .test_ib = amdgpu_vcn_jpeg_ring_test_ib,
- .insert_nop = vcn_v2_0_jpeg_ring_nop,
- .insert_start = vcn_v2_0_jpeg_ring_insert_start,
- .insert_end = vcn_v2_0_jpeg_ring_insert_end,
- .pad_ib = amdgpu_ring_generic_pad_ib,
- .begin_use = amdgpu_vcn_ring_begin_use,
- .end_use = amdgpu_vcn_ring_end_use,
- .emit_wreg = vcn_v2_0_jpeg_ring_emit_wreg,
- .emit_reg_wait = vcn_v2_0_jpeg_ring_emit_reg_wait,
- .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
-};
-
static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev)
{
- adev->vcn.ring_dec.funcs = &vcn_v2_0_dec_ring_vm_funcs;
+ adev->vcn.inst->ring_dec.funcs = &vcn_v2_0_dec_ring_vm_funcs;
DRM_INFO("VCN decode is enabled in VM mode\n");
}
@@ -2256,17 +1771,11 @@ static void vcn_v2_0_set_enc_ring_funcs(struct amdgpu_device *adev)
int i;
for (i = 0; i < adev->vcn.num_enc_rings; ++i)
- adev->vcn.ring_enc[i].funcs = &vcn_v2_0_enc_ring_vm_funcs;
+ adev->vcn.inst->ring_enc[i].funcs = &vcn_v2_0_enc_ring_vm_funcs;
DRM_INFO("VCN encode is enabled in VM mode\n");
}
-static void vcn_v2_0_set_jpeg_ring_funcs(struct amdgpu_device *adev)
-{
- adev->vcn.ring_jpeg.funcs = &vcn_v2_0_jpeg_ring_vm_funcs;
- DRM_INFO("VCN jpeg decode is enabled in VM mode\n");
-}
-
static const struct amdgpu_irq_src_funcs vcn_v2_0_irq_funcs = {
.set = vcn_v2_0_set_interrupt_state,
.process = vcn_v2_0_process_interrupt,
@@ -2274,8 +1783,8 @@ static const struct amdgpu_irq_src_funcs vcn_v2_0_irq_funcs = {
static void vcn_v2_0_set_irq_funcs(struct amdgpu_device *adev)
{
- adev->vcn.irq.num_types = adev->vcn.num_enc_rings + 2;
- adev->vcn.irq.funcs = &vcn_v2_0_irq_funcs;
+ adev->vcn.inst->irq.num_types = adev->vcn.num_enc_rings + 1;
+ adev->vcn.inst->irq.funcs = &vcn_v2_0_irq_funcs;
}
const struct amdgpu_ip_block_version vcn_v2_0_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h
index a74227f4663b..6c9de1882428 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h
@@ -24,6 +24,32 @@
#ifndef __VCN_V2_0_H__
#define __VCN_V2_0_H__
+extern void vcn_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring);
+extern void vcn_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring);
+extern void vcn_v2_0_dec_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
+extern void vcn_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
+ unsigned flags);
+extern void vcn_v2_0_dec_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job,
+ struct amdgpu_ib *ib, uint32_t flags);
+extern void vcn_v2_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t val, uint32_t mask);
+extern void vcn_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ unsigned vmid, uint64_t pd_addr);
+extern void vcn_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring,
+ uint32_t reg, uint32_t val);
+extern int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring);
+
+extern void vcn_v2_0_enc_ring_insert_end(struct amdgpu_ring *ring);
+extern void vcn_v2_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
+ u64 seq, unsigned flags);
+extern void vcn_v2_0_enc_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job,
+ struct amdgpu_ib *ib, uint32_t flags);
+extern void vcn_v2_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t val, uint32_t mask);
+extern void vcn_v2_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ unsigned int vmid, uint64_t pd_addr);
+extern void vcn_v2_0_enc_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
+
extern const struct amdgpu_ip_block_version vcn_v2_0_ip_block;
#endif /* __VCN_V2_0_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
new file mode 100644
index 000000000000..2d64ba1adf99
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -0,0 +1,1799 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/firmware.h>
+
+#include "amdgpu.h"
+#include "amdgpu_vcn.h"
+#include "amdgpu_pm.h"
+#include "soc15.h"
+#include "soc15d.h"
+#include "vcn_v2_0.h"
+#include "mmsch_v1_0.h"
+
+#include "vcn/vcn_2_5_offset.h"
+#include "vcn/vcn_2_5_sh_mask.h"
+#include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
+
+#define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x27
+#define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x0f
+#define mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET 0x10
+#define mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET 0x11
+#define mmUVD_NO_OP_INTERNAL_OFFSET 0x29
+#define mmUVD_GP_SCRATCH8_INTERNAL_OFFSET 0x66
+#define mmUVD_SCRATCH9_INTERNAL_OFFSET 0xc01d
+
+#define mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET 0x431
+#define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x3b4
+#define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x3b5
+#define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET 0x25c
+
+#define VCN25_MAX_HW_INSTANCES_ARCTURUS 2
+
+static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev);
+static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev);
+static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev);
+static int vcn_v2_5_set_powergating_state(void *handle,
+ enum amd_powergating_state state);
+static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
+ int inst_idx, struct dpg_pause_state *new_state);
+static int vcn_v2_5_sriov_start(struct amdgpu_device *adev);
+
+static int amdgpu_ih_clientid_vcns[] = {
+ SOC15_IH_CLIENTID_VCN,
+ SOC15_IH_CLIENTID_VCN1
+};
+
+/**
+ * vcn_v2_5_early_init - set function pointers
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Set ring and irq function pointers
+ */
+static int vcn_v2_5_early_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (adev->asic_type == CHIP_ARCTURUS) {
+ u32 harvest;
+ int i;
+
+ adev->vcn.num_vcn_inst = VCN25_MAX_HW_INSTANCES_ARCTURUS;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ harvest = RREG32_SOC15(UVD, i, mmCC_UVD_HARVESTING);
+ if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
+ adev->vcn.harvest_config |= 1 << i;
+ }
+
+ if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 |
+ AMDGPU_VCN_HARVEST_VCN1))
+ /* both instances are harvested, disable the block */
+ return -ENOENT;
+ } else
+ adev->vcn.num_vcn_inst = 1;
+
+ if (amdgpu_sriov_vf(adev)) {
+ adev->vcn.num_vcn_inst = 2;
+ adev->vcn.harvest_config = 0;
+ adev->vcn.num_enc_rings = 1;
+ } else {
+ adev->vcn.num_enc_rings = 2;
+ }
+
+ vcn_v2_5_set_dec_ring_funcs(adev);
+ vcn_v2_5_set_enc_ring_funcs(adev);
+ vcn_v2_5_set_irq_funcs(adev);
+
+ return 0;
+}
+
+/**
+ * vcn_v2_5_sw_init - sw init for VCN block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Load firmware and sw initialization
+ */
+static int vcn_v2_5_sw_init(void *handle)
+{
+ struct amdgpu_ring *ring;
+ int i, j, r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
+ if (adev->vcn.harvest_config & (1 << j))
+ continue;
+ /* VCN DEC TRAP */
+ r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
+ VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst[j].irq);
+ if (r)
+ return r;
+
+ /* VCN ENC TRAP */
+ for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+ r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
+ i + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[j].irq);
+ if (r)
+ return r;
+ }
+ }
+
+ r = amdgpu_vcn_sw_init(adev);
+ if (r)
+ return r;
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ const struct common_firmware_header *hdr;
+ hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+ adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN;
+ adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
+
+ if (adev->vcn.num_vcn_inst == VCN25_MAX_HW_INSTANCES_ARCTURUS) {
+ adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].ucode_id = AMDGPU_UCODE_ID_VCN1;
+ adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].fw = adev->vcn.fw;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
+ }
+ DRM_INFO("PSP loading VCN firmware\n");
+ }
+
+ r = amdgpu_vcn_resume(adev);
+ if (r)
+ return r;
+
+ for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
+ if (adev->vcn.harvest_config & (1 << j))
+ continue;
+ adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
+ adev->vcn.internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET;
+ adev->vcn.internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET;
+ adev->vcn.internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET;
+ adev->vcn.internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET;
+ adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
+
+ adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
+ adev->vcn.inst[j].external.scratch9 = SOC15_REG_OFFSET(UVD, j, mmUVD_SCRATCH9);
+ adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
+ adev->vcn.inst[j].external.data0 = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_DATA0);
+ adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
+ adev->vcn.inst[j].external.data1 = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_DATA1);
+ adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
+ adev->vcn.inst[j].external.cmd = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_CMD);
+ adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
+ adev->vcn.inst[j].external.nop = SOC15_REG_OFFSET(UVD, j, mmUVD_NO_OP);
+
+ ring = &adev->vcn.inst[j].ring_dec;
+ ring->use_doorbell = true;
+
+ ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
+ (amdgpu_sriov_vf(adev) ? 2*j : 8*j);
+ sprintf(ring->name, "vcn_dec_%d", j);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0);
+ if (r)
+ return r;
+
+ for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+ ring = &adev->vcn.inst[j].ring_enc[i];
+ ring->use_doorbell = true;
+
+ ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
+ (amdgpu_sriov_vf(adev) ? (1 + i + 2*j) : (2 + i + 8*j));
+
+ sprintf(ring->name, "vcn_enc_%d.%d", j, i);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0);
+ if (r)
+ return r;
+ }
+ }
+
+ if (amdgpu_sriov_vf(adev)) {
+ r = amdgpu_virt_alloc_mm_table(adev);
+ if (r)
+ return r;
+ }
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+ adev->vcn.pause_dpg_mode = vcn_v2_5_pause_dpg_mode;
+
+ return 0;
+}
+
+/**
+ * vcn_v2_5_sw_fini - sw fini for VCN block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * VCN suspend and free up sw allocation
+ */
+static int vcn_v2_5_sw_fini(void *handle)
+{
+ int r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_free_mm_table(adev);
+
+ r = amdgpu_vcn_suspend(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_vcn_sw_fini(adev);
+
+ return r;
+}
+
+/**
+ * vcn_v2_5_hw_init - start and test VCN block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Initialize the hardware, boot up the VCPU and do some testing
+ */
+static int vcn_v2_5_hw_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_ring *ring;
+ int i, j, r = 0;
+
+ if (amdgpu_sriov_vf(adev))
+ r = vcn_v2_5_sriov_start(adev);
+
+ for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
+ if (adev->vcn.harvest_config & (1 << j))
+ continue;
+
+ if (amdgpu_sriov_vf(adev)) {
+ adev->vcn.inst[j].ring_enc[0].sched.ready = true;
+ adev->vcn.inst[j].ring_enc[1].sched.ready = false;
+ adev->vcn.inst[j].ring_enc[2].sched.ready = false;
+ adev->vcn.inst[j].ring_dec.sched.ready = true;
+ } else {
+
+ ring = &adev->vcn.inst[j].ring_dec;
+
+ adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+ ring->doorbell_index, j);
+
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
+ goto done;
+
+ for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+ ring = &adev->vcn.inst[j].ring_enc[i];
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
+ goto done;
+ }
+ }
+ }
+
+done:
+ if (!r)
+ DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
+ (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
+
+ return r;
+}
+
+/**
+ * vcn_v2_5_hw_fini - stop the hardware block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Stop the VCN block, mark ring as not ready any more
+ */
+static int vcn_v2_5_hw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_ring *ring;
+ int i, j;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+ ring = &adev->vcn.inst[i].ring_dec;
+
+ if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
+ (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
+ RREG32_SOC15(VCN, i, mmUVD_STATUS)))
+ vcn_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
+
+ ring->sched.ready = false;
+
+ for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
+ ring = &adev->vcn.inst[i].ring_enc[j];
+ ring->sched.ready = false;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * vcn_v2_5_suspend - suspend VCN block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * HW fini and suspend VCN block
+ */
+static int vcn_v2_5_suspend(void *handle)
+{
+ int r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ r = vcn_v2_5_hw_fini(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_vcn_suspend(adev);
+
+ return r;
+}
+
+/**
+ * vcn_v2_5_resume - resume VCN block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Resume firmware and hw init VCN block
+ */
+static int vcn_v2_5_resume(void *handle)
+{
+ int r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ r = amdgpu_vcn_resume(adev);
+ if (r)
+ return r;
+
+ r = vcn_v2_5_hw_init(adev);
+
+ return r;
+}
+
+/**
+ * vcn_v2_5_mc_resume - memory controller programming
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Let the VCN memory controller know it's offsets
+ */
+static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
+{
+ uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ uint32_t offset;
+ int i;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+ /* cache window 0: fw */
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
+ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo));
+ WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
+ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi));
+ WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
+ offset = 0;
+ } else {
+ WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
+ lower_32_bits(adev->vcn.inst[i].gpu_addr));
+ WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
+ upper_32_bits(adev->vcn.inst[i].gpu_addr));
+ offset = size;
+ WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0,
+ AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
+ }
+ WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE0, size);
+
+ /* cache window 1: stack */
+ WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
+ lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
+ WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
+ upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
+ WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET1, 0);
+ WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
+
+ /* cache window 2: context */
+ WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
+ lower_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
+ WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
+ upper_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
+ WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET2, 0);
+ WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
+ }
+}
+
+static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+{
+ uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ uint32_t offset;
+
+ /* cache window 0: fw */
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ if (!indirect) {
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect);
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect);
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
+ } else {
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
+ }
+ offset = 0;
+ } else {
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
+ offset = size;
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
+ AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
+ }
+
+ if (!indirect)
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
+ else
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
+
+ /* cache window 1: stack */
+ if (!indirect) {
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
+ } else {
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
+ }
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
+
+ /* cache window 2: context */
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
+
+ /* non-cache window */
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), 0, 0, indirect);
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), 0, 0, indirect);
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0), 0, 0, indirect);
+
+ /* VCN global tiling registers */
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_GFX8_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
+}
+
+/**
+ * vcn_v2_5_disable_clock_gating - disable VCN clock gating
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Disable clock gating for VCN block
+ */
+static void vcn_v2_5_disable_clock_gating(struct amdgpu_device *adev)
+{
+ uint32_t data;
+ int ret = 0;
+ int i;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+ /* UVD disable CGC */
+ data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
+ if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
+ data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
+ else
+ data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
+ data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
+ data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
+ WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
+
+ data = RREG32_SOC15(VCN, i, mmUVD_CGC_GATE);
+ data &= ~(UVD_CGC_GATE__SYS_MASK
+ | UVD_CGC_GATE__UDEC_MASK
+ | UVD_CGC_GATE__MPEG2_MASK
+ | UVD_CGC_GATE__REGS_MASK
+ | UVD_CGC_GATE__RBC_MASK
+ | UVD_CGC_GATE__LMI_MC_MASK
+ | UVD_CGC_GATE__LMI_UMC_MASK
+ | UVD_CGC_GATE__IDCT_MASK
+ | UVD_CGC_GATE__MPRD_MASK
+ | UVD_CGC_GATE__MPC_MASK
+ | UVD_CGC_GATE__LBSI_MASK
+ | UVD_CGC_GATE__LRBBM_MASK
+ | UVD_CGC_GATE__UDEC_RE_MASK
+ | UVD_CGC_GATE__UDEC_CM_MASK
+ | UVD_CGC_GATE__UDEC_IT_MASK
+ | UVD_CGC_GATE__UDEC_DB_MASK
+ | UVD_CGC_GATE__UDEC_MP_MASK
+ | UVD_CGC_GATE__WCB_MASK
+ | UVD_CGC_GATE__VCPU_MASK
+ | UVD_CGC_GATE__MMSCH_MASK);
+
+ WREG32_SOC15(VCN, i, mmUVD_CGC_GATE, data);
+
+ SOC15_WAIT_ON_RREG(VCN, i, mmUVD_CGC_GATE, 0, 0xFFFFFFFF, ret);
+
+ data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
+ data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
+ | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
+ | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
+ | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
+ | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
+ | UVD_CGC_CTRL__SYS_MODE_MASK
+ | UVD_CGC_CTRL__UDEC_MODE_MASK
+ | UVD_CGC_CTRL__MPEG2_MODE_MASK
+ | UVD_CGC_CTRL__REGS_MODE_MASK
+ | UVD_CGC_CTRL__RBC_MODE_MASK
+ | UVD_CGC_CTRL__LMI_MC_MODE_MASK
+ | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
+ | UVD_CGC_CTRL__IDCT_MODE_MASK
+ | UVD_CGC_CTRL__MPRD_MODE_MASK
+ | UVD_CGC_CTRL__MPC_MODE_MASK
+ | UVD_CGC_CTRL__LBSI_MODE_MASK
+ | UVD_CGC_CTRL__LRBBM_MODE_MASK
+ | UVD_CGC_CTRL__WCB_MODE_MASK
+ | UVD_CGC_CTRL__VCPU_MODE_MASK
+ | UVD_CGC_CTRL__MMSCH_MODE_MASK);
+ WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
+
+ /* turn on */
+ data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_GATE);
+ data |= (UVD_SUVD_CGC_GATE__SRE_MASK
+ | UVD_SUVD_CGC_GATE__SIT_MASK
+ | UVD_SUVD_CGC_GATE__SMP_MASK
+ | UVD_SUVD_CGC_GATE__SCM_MASK
+ | UVD_SUVD_CGC_GATE__SDB_MASK
+ | UVD_SUVD_CGC_GATE__SRE_H264_MASK
+ | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
+ | UVD_SUVD_CGC_GATE__SIT_H264_MASK
+ | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
+ | UVD_SUVD_CGC_GATE__SCM_H264_MASK
+ | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
+ | UVD_SUVD_CGC_GATE__SDB_H264_MASK
+ | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
+ | UVD_SUVD_CGC_GATE__SCLR_MASK
+ | UVD_SUVD_CGC_GATE__UVD_SC_MASK
+ | UVD_SUVD_CGC_GATE__ENT_MASK
+ | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
+ | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
+ | UVD_SUVD_CGC_GATE__SITE_MASK
+ | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
+ | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
+ | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
+ | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
+ | UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
+ WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_GATE, data);
+
+ data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL);
+ data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
+ WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data);
+ }
+}
+
+static void vcn_v2_5_clock_gating_dpg_mode(struct amdgpu_device *adev,
+ uint8_t sram_sel, int inst_idx, uint8_t indirect)
+{
+ uint32_t reg_data = 0;
+
+ /* enable sw clock gating control */
+ if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
+ reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
+ else
+ reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
+ reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
+ reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
+ reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
+ UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
+ UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
+ UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
+ UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
+ UVD_CGC_CTRL__SYS_MODE_MASK |
+ UVD_CGC_CTRL__UDEC_MODE_MASK |
+ UVD_CGC_CTRL__MPEG2_MODE_MASK |
+ UVD_CGC_CTRL__REGS_MODE_MASK |
+ UVD_CGC_CTRL__RBC_MODE_MASK |
+ UVD_CGC_CTRL__LMI_MC_MODE_MASK |
+ UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
+ UVD_CGC_CTRL__IDCT_MODE_MASK |
+ UVD_CGC_CTRL__MPRD_MODE_MASK |
+ UVD_CGC_CTRL__MPC_MODE_MASK |
+ UVD_CGC_CTRL__LBSI_MODE_MASK |
+ UVD_CGC_CTRL__LRBBM_MODE_MASK |
+ UVD_CGC_CTRL__WCB_MODE_MASK |
+ UVD_CGC_CTRL__VCPU_MODE_MASK |
+ UVD_CGC_CTRL__MMSCH_MODE_MASK);
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect);
+
+ /* turn off clock gating */
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_CGC_GATE), 0, sram_sel, indirect);
+
+ /* turn on SUVD clock gating */
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
+
+ /* turn on sw mode in UVD_SUVD_CGC_CTRL */
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
+}
+
+/**
+ * vcn_v2_5_enable_clock_gating - enable VCN clock gating
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Enable clock gating for VCN block
+ */
+static void vcn_v2_5_enable_clock_gating(struct amdgpu_device *adev)
+{
+ uint32_t data = 0;
+ int i;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+ /* enable UVD CGC */
+ data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
+ if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
+ data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
+ else
+ data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
+ data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
+ data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
+ WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
+
+ data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
+ data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
+ | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
+ | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
+ | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
+ | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
+ | UVD_CGC_CTRL__SYS_MODE_MASK
+ | UVD_CGC_CTRL__UDEC_MODE_MASK
+ | UVD_CGC_CTRL__MPEG2_MODE_MASK
+ | UVD_CGC_CTRL__REGS_MODE_MASK
+ | UVD_CGC_CTRL__RBC_MODE_MASK
+ | UVD_CGC_CTRL__LMI_MC_MODE_MASK
+ | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
+ | UVD_CGC_CTRL__IDCT_MODE_MASK
+ | UVD_CGC_CTRL__MPRD_MODE_MASK
+ | UVD_CGC_CTRL__MPC_MODE_MASK
+ | UVD_CGC_CTRL__LBSI_MODE_MASK
+ | UVD_CGC_CTRL__LRBBM_MODE_MASK
+ | UVD_CGC_CTRL__WCB_MODE_MASK
+ | UVD_CGC_CTRL__VCPU_MODE_MASK);
+ WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
+
+ data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL);
+ data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
+ WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data);
+ }
+}
+
+static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+{
+ struct amdgpu_ring *ring;
+ uint32_t rb_bufsz, tmp;
+
+ /* disable register anti-hang mechanism */
+ WREG32_P(SOC15_REG_OFFSET(UVD, inst_idx, mmUVD_POWER_STATUS), 1,
+ ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
+ /* enable dynamic power gating mode */
+ tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_POWER_STATUS);
+ tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
+ tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
+ WREG32_SOC15(UVD, inst_idx, mmUVD_POWER_STATUS, tmp);
+
+ if (indirect)
+ adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t*)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
+
+ /* enable clock gating */
+ vcn_v2_5_clock_gating_dpg_mode(adev, 0, inst_idx, indirect);
+
+ /* enable VCPU clock */
+ tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
+ tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
+ tmp |= UVD_VCPU_CNTL__BLK_RST_MASK;
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
+
+ /* disable master interupt */
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_MASTINT_EN), 0, 0, indirect);
+
+ /* setup mmUVD_LMI_CTRL */
+ tmp = (0x8 | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
+ UVD_LMI_CTRL__REQ_MODE_MASK |
+ UVD_LMI_CTRL__CRC_RESET_MASK |
+ UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
+ UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
+ UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
+ (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
+ 0x00100000L);
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_LMI_CTRL), tmp, 0, indirect);
+
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_MPC_CNTL),
+ 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
+
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_MPC_SET_MUXA0),
+ ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
+ (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
+ (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
+
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_MPC_SET_MUXB0),
+ ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
+ (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
+ (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
+
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_MPC_SET_MUX),
+ ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
+ (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
+
+ vcn_v2_5_mc_resume_dpg_mode(adev, inst_idx, indirect);
+
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);
+
+ /* enable LMI MC and UMC channels */
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_LMI_CTRL2), 0, 0, indirect);
+
+ /* unblock VCPU register access */
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_RB_ARB_CTRL), 0, 0, indirect);
+
+ tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
+ tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
+
+ /* enable master interrupt */
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_MASTINT_EN),
+ UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
+
+ if (indirect)
+ psp_update_vcn_sram(adev, inst_idx, adev->vcn.inst[inst_idx].dpg_sram_gpu_addr,
+ (uint32_t)((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr -
+ (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr));
+
+ ring = &adev->vcn.inst[inst_idx].ring_dec;
+ /* force RBC into idle state */
+ rb_bufsz = order_base_2(ring->ring_size);
+ tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
+ WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_CNTL, tmp);
+
+ /* set the write pointer delay */
+ WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR_CNTL, 0);
+
+ /* set the wb address */
+ WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_RPTR_ADDR,
+ (upper_32_bits(ring->gpu_addr) >> 2));
+
+ /* programm the RB_BASE for ring buffer */
+ WREG32_SOC15(UVD, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
+ lower_32_bits(ring->gpu_addr));
+ WREG32_SOC15(UVD, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
+ upper_32_bits(ring->gpu_addr));
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_RPTR, 0);
+
+ WREG32_SOC15(UVD, inst_idx, mmUVD_SCRATCH2, 0);
+
+ ring->wptr = RREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_RPTR);
+ WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR,
+ lower_32_bits(ring->wptr));
+
+ return 0;
+}
+
+static int vcn_v2_5_start(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ uint32_t rb_bufsz, tmp;
+ int i, j, k, r;
+
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_uvd(adev, true);
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+ r = vcn_v2_5_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
+ continue;
+ }
+
+ /* disable register anti-hang mechanism */
+ WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_POWER_STATUS), 0,
+ ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
+
+ /* set uvd status busy */
+ tmp = RREG32_SOC15(UVD, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
+ WREG32_SOC15(UVD, i, mmUVD_STATUS, tmp);
+ }
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+ return 0;
+
+ /*SW clock gating */
+ vcn_v2_5_disable_clock_gating(adev);
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+ /* enable VCPU clock */
+ WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
+
+ /* disable master interrupt */
+ WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN), 0,
+ ~UVD_MASTINT_EN__VCPU_EN_MASK);
+
+ /* setup mmUVD_LMI_CTRL */
+ tmp = RREG32_SOC15(UVD, i, mmUVD_LMI_CTRL);
+ tmp &= ~0xff;
+ WREG32_SOC15(UVD, i, mmUVD_LMI_CTRL, tmp | 0x8|
+ UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
+ UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
+ UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
+ UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
+
+ /* setup mmUVD_MPC_CNTL */
+ tmp = RREG32_SOC15(UVD, i, mmUVD_MPC_CNTL);
+ tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
+ tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
+ WREG32_SOC15(VCN, i, mmUVD_MPC_CNTL, tmp);
+
+ /* setup UVD_MPC_SET_MUXA0 */
+ WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUXA0,
+ ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
+ (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
+ (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
+
+ /* setup UVD_MPC_SET_MUXB0 */
+ WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUXB0,
+ ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
+ (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
+ (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
+
+ /* setup mmUVD_MPC_SET_MUX */
+ WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUX,
+ ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
+ (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
+ }
+
+ vcn_v2_5_mc_resume(adev);
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+ /* VCN global tiling registers */
+ WREG32_SOC15(UVD, i, mmUVD_GFX8_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+ WREG32_SOC15(UVD, i, mmUVD_GFX8_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+
+ /* enable LMI MC and UMC channels */
+ WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2), 0,
+ ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
+
+ /* unblock VCPU register access */
+ WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_ARB_CTRL), 0,
+ ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
+
+ WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+
+ for (k = 0; k < 10; ++k) {
+ uint32_t status;
+
+ for (j = 0; j < 100; ++j) {
+ status = RREG32_SOC15(UVD, i, mmUVD_STATUS);
+ if (status & 2)
+ break;
+ if (amdgpu_emu_mode == 1)
+ msleep(500);
+ else
+ mdelay(10);
+ }
+ r = 0;
+ if (status & 2)
+ break;
+
+ DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
+ WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__BLK_RST_MASK,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+ mdelay(10);
+ WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+
+ mdelay(10);
+ r = -1;
+ }
+
+ if (r) {
+ DRM_ERROR("VCN decode not responding, giving up!!!\n");
+ return r;
+ }
+
+ /* enable master interrupt */
+ WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
+ UVD_MASTINT_EN__VCPU_EN_MASK,
+ ~UVD_MASTINT_EN__VCPU_EN_MASK);
+
+ /* clear the busy bit of VCN_STATUS */
+ WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), 0,
+ ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
+
+ WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_VMID, 0);
+
+ ring = &adev->vcn.inst[i].ring_dec;
+ /* force RBC into idle state */
+ rb_bufsz = order_base_2(ring->ring_size);
+ tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
+ WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, tmp);
+
+ /* programm the RB_BASE for ring buffer */
+ WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
+ lower_32_bits(ring->gpu_addr));
+ WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
+ upper_32_bits(ring->gpu_addr));
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32_SOC15(UVD, i, mmUVD_RBC_RB_RPTR, 0);
+
+ ring->wptr = RREG32_SOC15(UVD, i, mmUVD_RBC_RB_RPTR);
+ WREG32_SOC15(UVD, i, mmUVD_RBC_RB_WPTR,
+ lower_32_bits(ring->wptr));
+ ring = &adev->vcn.inst[i].ring_enc[0];
+ WREG32_SOC15(UVD, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
+ WREG32_SOC15(UVD, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
+ WREG32_SOC15(UVD, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
+ WREG32_SOC15(UVD, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(UVD, i, mmUVD_RB_SIZE, ring->ring_size / 4);
+
+ ring = &adev->vcn.inst[i].ring_enc[1];
+ WREG32_SOC15(UVD, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
+ WREG32_SOC15(UVD, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
+ WREG32_SOC15(UVD, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
+ WREG32_SOC15(UVD, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(UVD, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
+ }
+
+ return 0;
+}
+
+static int vcn_v2_5_mmsch_start(struct amdgpu_device *adev,
+ struct amdgpu_mm_table *table)
+{
+ uint32_t data = 0, loop = 0, size = 0;
+ uint64_t addr = table->gpu_addr;
+ struct mmsch_v1_1_init_header *header = NULL;;
+
+ header = (struct mmsch_v1_1_init_header *)table->cpu_addr;
+ size = header->total_size;
+
+ /*
+ * 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of
+ * memory descriptor location
+ */
+ WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
+ WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
+
+ /* 2, update vmid of descriptor */
+ data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID);
+ data &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
+ /* use domain0 for MM scheduler */
+ data |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
+ WREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID, data);
+
+ /* 3, notify mmsch about the size of this descriptor */
+ WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_SIZE, size);
+
+ /* 4, set resp to zero */
+ WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP, 0);
+
+ /*
+ * 5, kick off the initialization and wait until
+ * VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero
+ */
+ WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_HOST, 0x10000001);
+
+ data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP);
+ loop = 10;
+ while ((data & 0x10000002) != 0x10000002) {
+ udelay(100);
+ data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP);
+ loop--;
+ if (!loop)
+ break;
+ }
+
+ if (!loop) {
+ dev_err(adev->dev,
+ "failed to init MMSCH, mmMMSCH_VF_MAILBOX_RESP = %x\n",
+ data);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int vcn_v2_5_sriov_start(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ uint32_t offset, size, tmp, i, rb_bufsz;
+ uint32_t table_size = 0;
+ struct mmsch_v1_0_cmd_direct_write direct_wt = { { 0 } };
+ struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { { 0 } };
+ struct mmsch_v1_0_cmd_direct_polling direct_poll = { { 0 } };
+ struct mmsch_v1_0_cmd_end end = { { 0 } };
+ uint32_t *init_table = adev->virt.mm_table.cpu_addr;
+ struct mmsch_v1_1_init_header *header = (struct mmsch_v1_1_init_header *)init_table;
+
+ direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
+ direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
+ direct_poll.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_POLLING;
+ end.cmd_header.command_type = MMSCH_COMMAND__END;
+
+ header->version = MMSCH_VERSION;
+ header->total_size = sizeof(struct mmsch_v1_1_init_header) >> 2;
+ init_table += header->total_size;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ header->eng[i].table_offset = header->total_size;
+ header->eng[i].init_status = 0;
+ header->eng[i].table_size = 0;
+
+ table_size = 0;
+
+ MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
+ ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
+
+ size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ /* mc resume*/
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo);
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi);
+ offset = 0;
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0), 0);
+ } else {
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[i].gpu_addr));
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[i].gpu_addr));
+ offset = size;
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0),
+ AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
+ }
+
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0),
+ size);
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1),
+ 0);
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1),
+ AMDGPU_VCN_STACK_SIZE);
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[i].gpu_addr + offset +
+ AMDGPU_VCN_STACK_SIZE));
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[i].gpu_addr + offset +
+ AMDGPU_VCN_STACK_SIZE));
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2),
+ 0);
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2),
+ AMDGPU_VCN_CONTEXT_SIZE);
+
+ ring = &adev->vcn.inst[i].ring_enc[0];
+ ring->wptr = 0;
+
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO),
+ lower_32_bits(ring->gpu_addr));
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI),
+ upper_32_bits(ring->gpu_addr));
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE),
+ ring->ring_size / 4);
+
+ ring = &adev->vcn.inst[i].ring_dec;
+ ring->wptr = 0;
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
+ lower_32_bits(ring->gpu_addr));
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
+ upper_32_bits(ring->gpu_addr));
+
+ /* force RBC into idle state */
+ rb_bufsz = order_base_2(ring->ring_size);
+ tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp);
+
+ /* add end packet */
+ memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
+ table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
+ init_table += sizeof(struct mmsch_v1_0_cmd_end) / 4;
+
+ /* refine header */
+ header->eng[i].table_size = table_size;
+ header->total_size += table_size;
+ }
+
+ return vcn_v2_5_mmsch_start(adev, &adev->virt.mm_table);
+}
+
+static int vcn_v2_5_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
+{
+ int ret_code = 0;
+ uint32_t tmp;
+
+ /* Wait for power status to be 1 */
+ SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS, 1,
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
+
+ /* wait for read ptr to be equal to write ptr */
+ tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR);
+ SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
+
+ tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR2);
+ SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF, ret_code);
+
+ tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
+ SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
+
+ SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS, 1,
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
+
+ /* disable dynamic power gating mode */
+ WREG32_P(SOC15_REG_OFFSET(UVD, inst_idx, mmUVD_POWER_STATUS), 0,
+ ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
+
+ return 0;
+}
+
+static int vcn_v2_5_stop(struct amdgpu_device *adev)
+{
+ uint32_t tmp;
+ int i, r = 0;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+ r = vcn_v2_5_stop_dpg_mode(adev, i);
+ continue;
+ }
+
+ /* wait for vcn idle */
+ SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7, r);
+ if (r)
+ return r;
+
+ tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
+ UVD_LMI_STATUS__READ_CLEAN_MASK |
+ UVD_LMI_STATUS__WRITE_CLEAN_MASK |
+ UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
+ SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp, r);
+ if (r)
+ return r;
+
+ /* block LMI UMC channel */
+ tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2);
+ tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
+ WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2, tmp);
+
+ tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK|
+ UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
+ SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp, r);
+ if (r)
+ return r;
+
+ /* block VCPU register access */
+ WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_ARB_CTRL),
+ UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
+ ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
+
+ /* reset VCPU */
+ WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__BLK_RST_MASK,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+
+ /* disable VCPU clock */
+ WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0,
+ ~(UVD_VCPU_CNTL__CLK_EN_MASK));
+
+ /* clear status */
+ WREG32_SOC15(VCN, i, mmUVD_STATUS, 0);
+
+ vcn_v2_5_enable_clock_gating(adev);
+
+ /* enable register anti-hang mechanism */
+ WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_POWER_STATUS),
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK,
+ ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
+ }
+
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_uvd(adev, false);
+
+ return 0;
+}
+
+static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
+ int inst_idx, struct dpg_pause_state *new_state)
+{
+ struct amdgpu_ring *ring;
+ uint32_t reg_data = 0;
+ int ret_code;
+
+ /* pause/unpause if state is changed */
+ if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
+ DRM_DEBUG("dpg pause state changed %d -> %d",
+ adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based);
+ reg_data = RREG32_SOC15(UVD, inst_idx, mmUVD_DPG_PAUSE) &
+ (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
+
+ if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
+ ret_code = 0;
+ SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS, 0x1,
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
+
+ if (!ret_code) {
+ /* pause DPG */
+ reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
+ WREG32_SOC15(UVD, inst_idx, mmUVD_DPG_PAUSE, reg_data);
+
+ /* wait for ACK */
+ SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_DPG_PAUSE,
+ UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
+ UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code);
+
+ /* Restore */
+ ring = &adev->vcn.inst[inst_idx].ring_enc[0];
+ WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr);
+ WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(UVD, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4);
+ WREG32_SOC15(UVD, inst_idx, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
+ WREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
+
+ ring = &adev->vcn.inst[inst_idx].ring_enc[1];
+ WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr);
+ WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(UVD, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4);
+ WREG32_SOC15(UVD, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
+ WREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
+
+ WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR,
+ RREG32_SOC15(UVD, inst_idx, mmUVD_SCRATCH2) & 0x7FFFFFFF);
+
+ SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS,
+ UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
+ }
+ } else {
+ /* unpause dpg, no need to wait */
+ reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
+ WREG32_SOC15(UVD, inst_idx, mmUVD_DPG_PAUSE, reg_data);
+ }
+ adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
+ }
+
+ return 0;
+}
+
+/**
+ * vcn_v2_5_dec_ring_get_rptr - get read pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware read pointer
+ */
+static uint64_t vcn_v2_5_dec_ring_get_rptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_RPTR);
+}
+
+/**
+ * vcn_v2_5_dec_ring_get_wptr - get write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware write pointer
+ */
+static uint64_t vcn_v2_5_dec_ring_get_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring->use_doorbell)
+ return adev->wb.wb[ring->wptr_offs];
+ else
+ return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR);
+}
+
+/**
+ * vcn_v2_5_dec_ring_set_wptr - set write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Commits the write pointer to the hardware
+ */
+static void vcn_v2_5_dec_ring_set_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+ WREG32_SOC15(UVD, ring->me, mmUVD_SCRATCH2,
+ lower_32_bits(ring->wptr) | 0x80000000);
+
+ if (ring->use_doorbell) {
+ adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
+ WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
+ } else {
+ WREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
+ }
+}
+
+static const struct amdgpu_ring_funcs vcn_v2_5_dec_ring_vm_funcs = {
+ .type = AMDGPU_RING_TYPE_VCN_DEC,
+ .align_mask = 0xf,
+ .vmhub = AMDGPU_MMHUB_1,
+ .get_rptr = vcn_v2_5_dec_ring_get_rptr,
+ .get_wptr = vcn_v2_5_dec_ring_get_wptr,
+ .set_wptr = vcn_v2_5_dec_ring_set_wptr,
+ .emit_frame_size =
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
+ 8 + /* vcn_v2_0_dec_ring_emit_vm_flush */
+ 14 + 14 + /* vcn_v2_0_dec_ring_emit_fence x2 vm fence */
+ 6,
+ .emit_ib_size = 8, /* vcn_v2_0_dec_ring_emit_ib */
+ .emit_ib = vcn_v2_0_dec_ring_emit_ib,
+ .emit_fence = vcn_v2_0_dec_ring_emit_fence,
+ .emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
+ .test_ring = vcn_v2_0_dec_ring_test_ring,
+ .test_ib = amdgpu_vcn_dec_ring_test_ib,
+ .insert_nop = vcn_v2_0_dec_ring_insert_nop,
+ .insert_start = vcn_v2_0_dec_ring_insert_start,
+ .insert_end = vcn_v2_0_dec_ring_insert_end,
+ .pad_ib = amdgpu_ring_generic_pad_ib,
+ .begin_use = amdgpu_vcn_ring_begin_use,
+ .end_use = amdgpu_vcn_ring_end_use,
+ .emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
+ .emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
+ .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+};
+
+/**
+ * vcn_v2_5_enc_ring_get_rptr - get enc read pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware enc read pointer
+ */
+static uint64_t vcn_v2_5_enc_ring_get_rptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring == &adev->vcn.inst[ring->me].ring_enc[0])
+ return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR);
+ else
+ return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR2);
+}
+
+/**
+ * vcn_v2_5_enc_ring_get_wptr - get enc write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware enc write pointer
+ */
+static uint64_t vcn_v2_5_enc_ring_get_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
+ if (ring->use_doorbell)
+ return adev->wb.wb[ring->wptr_offs];
+ else
+ return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR);
+ } else {
+ if (ring->use_doorbell)
+ return adev->wb.wb[ring->wptr_offs];
+ else
+ return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2);
+ }
+}
+
+/**
+ * vcn_v2_5_enc_ring_set_wptr - set enc write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Commits the enc write pointer to the hardware
+ */
+static void vcn_v2_5_enc_ring_set_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
+ if (ring->use_doorbell) {
+ adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
+ WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
+ } else {
+ WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
+ }
+ } else {
+ if (ring->use_doorbell) {
+ adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
+ WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
+ } else {
+ WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
+ }
+ }
+}
+
+static const struct amdgpu_ring_funcs vcn_v2_5_enc_ring_vm_funcs = {
+ .type = AMDGPU_RING_TYPE_VCN_ENC,
+ .align_mask = 0x3f,
+ .nop = VCN_ENC_CMD_NO_OP,
+ .vmhub = AMDGPU_MMHUB_1,
+ .get_rptr = vcn_v2_5_enc_ring_get_rptr,
+ .get_wptr = vcn_v2_5_enc_ring_get_wptr,
+ .set_wptr = vcn_v2_5_enc_ring_set_wptr,
+ .emit_frame_size =
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
+ 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
+ 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
+ 1, /* vcn_v2_0_enc_ring_insert_end */
+ .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
+ .emit_ib = vcn_v2_0_enc_ring_emit_ib,
+ .emit_fence = vcn_v2_0_enc_ring_emit_fence,
+ .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
+ .test_ring = amdgpu_vcn_enc_ring_test_ring,
+ .test_ib = amdgpu_vcn_enc_ring_test_ib,
+ .insert_nop = amdgpu_ring_insert_nop,
+ .insert_end = vcn_v2_0_enc_ring_insert_end,
+ .pad_ib = amdgpu_ring_generic_pad_ib,
+ .begin_use = amdgpu_vcn_ring_begin_use,
+ .end_use = amdgpu_vcn_ring_end_use,
+ .emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
+ .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
+ .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+};
+
+static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+ adev->vcn.inst[i].ring_dec.funcs = &vcn_v2_5_dec_ring_vm_funcs;
+ adev->vcn.inst[i].ring_dec.me = i;
+ DRM_INFO("VCN(%d) decode is enabled in VM mode\n", i);
+ }
+}
+
+static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev)
+{
+ int i, j;
+
+ for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
+ if (adev->vcn.harvest_config & (1 << j))
+ continue;
+ for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+ adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_5_enc_ring_vm_funcs;
+ adev->vcn.inst[j].ring_enc[i].me = j;
+ }
+ DRM_INFO("VCN(%d) encode is enabled in VM mode\n", j);
+ }
+}
+
+static bool vcn_v2_5_is_idle(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, ret = 1;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+ ret &= (RREG32_SOC15(VCN, i, mmUVD_STATUS) == UVD_STATUS__IDLE);
+ }
+
+ return ret;
+}
+
+static int vcn_v2_5_wait_for_idle(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, ret = 0;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+ SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE,
+ UVD_STATUS__IDLE, ret);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int vcn_v2_5_set_clockgating_state(void *handle,
+ enum amd_clockgating_state state)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ bool enable = (state == AMD_CG_STATE_GATE);
+
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ if (enable) {
+ if (vcn_v2_5_is_idle(handle))
+ return -EBUSY;
+ vcn_v2_5_enable_clock_gating(adev);
+ } else {
+ vcn_v2_5_disable_clock_gating(adev);
+ }
+
+ return 0;
+}
+
+static int vcn_v2_5_set_powergating_state(void *handle,
+ enum amd_powergating_state state)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int ret;
+
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ if(state == adev->vcn.cur_state)
+ return 0;
+
+ if (state == AMD_PG_STATE_GATE)
+ ret = vcn_v2_5_stop(adev);
+ else
+ ret = vcn_v2_5_start(adev);
+
+ if(!ret)
+ adev->vcn.cur_state = state;
+
+ return ret;
+}
+
+static int vcn_v2_5_set_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ return 0;
+}
+
+static int vcn_v2_5_process_interrupt(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ uint32_t ip_instance;
+
+ switch (entry->client_id) {
+ case SOC15_IH_CLIENTID_VCN:
+ ip_instance = 0;
+ break;
+ case SOC15_IH_CLIENTID_VCN1:
+ ip_instance = 1;
+ break;
+ default:
+ DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
+ return 0;
+ }
+
+ DRM_DEBUG("IH: VCN TRAP\n");
+
+ switch (entry->src_id) {
+ case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT:
+ amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_dec);
+ break;
+ case VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
+ amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]);
+ break;
+ case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY:
+ amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[1]);
+ break;
+ default:
+ DRM_ERROR("Unhandled interrupt: %d %d\n",
+ entry->src_id, entry->src_data[0]);
+ break;
+ }
+
+ return 0;
+}
+
+static const struct amdgpu_irq_src_funcs vcn_v2_5_irq_funcs = {
+ .set = vcn_v2_5_set_interrupt_state,
+ .process = vcn_v2_5_process_interrupt,
+};
+
+static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+ adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1;
+ adev->vcn.inst[i].irq.funcs = &vcn_v2_5_irq_funcs;
+ }
+}
+
+static const struct amd_ip_funcs vcn_v2_5_ip_funcs = {
+ .name = "vcn_v2_5",
+ .early_init = vcn_v2_5_early_init,
+ .late_init = NULL,
+ .sw_init = vcn_v2_5_sw_init,
+ .sw_fini = vcn_v2_5_sw_fini,
+ .hw_init = vcn_v2_5_hw_init,
+ .hw_fini = vcn_v2_5_hw_fini,
+ .suspend = vcn_v2_5_suspend,
+ .resume = vcn_v2_5_resume,
+ .is_idle = vcn_v2_5_is_idle,
+ .wait_for_idle = vcn_v2_5_wait_for_idle,
+ .check_soft_reset = NULL,
+ .pre_soft_reset = NULL,
+ .soft_reset = NULL,
+ .post_soft_reset = NULL,
+ .set_clockgating_state = vcn_v2_5_set_clockgating_state,
+ .set_powergating_state = vcn_v2_5_set_powergating_state,
+};
+
+const struct amdgpu_ip_block_version vcn_v2_5_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_VCN,
+ .major = 2,
+ .minor = 5,
+ .rev = 0,
+ .funcs = &vcn_v2_5_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.h b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.h
new file mode 100644
index 000000000000..8d9c0800b8e0
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __VCN_V2_5_H__
+#define __VCN_V2_5_H__
+
+extern const struct amdgpu_ip_block_version vcn_v2_5_ip_block;
+
+#endif /* __VCN_V2_5_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index 22260e6963b8..407c6093c2ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -50,7 +50,7 @@ static void vega10_ih_enable_interrupts(struct amdgpu_device *adev)
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 1);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 1);
- if (amdgpu_virt_support_psp_prg_ih_reg(adev)) {
+ if (amdgpu_sriov_vf(adev)) {
if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
return;
@@ -64,7 +64,7 @@ static void vega10_ih_enable_interrupts(struct amdgpu_device *adev)
ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
RB_ENABLE, 1);
- if (amdgpu_virt_support_psp_prg_ih_reg(adev)) {
+ if (amdgpu_sriov_vf(adev)) {
if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
ih_rb_cntl)) {
DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
@@ -80,7 +80,7 @@ static void vega10_ih_enable_interrupts(struct amdgpu_device *adev)
ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
RB_ENABLE, 1);
- if (amdgpu_virt_support_psp_prg_ih_reg(adev)) {
+ if (amdgpu_sriov_vf(adev)) {
if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
ih_rb_cntl)) {
DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
@@ -106,7 +106,7 @@ static void vega10_ih_disable_interrupts(struct amdgpu_device *adev)
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 0);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 0);
- if (amdgpu_virt_support_psp_prg_ih_reg(adev)) {
+ if (amdgpu_sriov_vf(adev)) {
if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
return;
@@ -125,7 +125,7 @@ static void vega10_ih_disable_interrupts(struct amdgpu_device *adev)
ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
RB_ENABLE, 0);
- if (amdgpu_virt_support_psp_prg_ih_reg(adev)) {
+ if (amdgpu_sriov_vf(adev)) {
if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
ih_rb_cntl)) {
DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
@@ -145,7 +145,7 @@ static void vega10_ih_disable_interrupts(struct amdgpu_device *adev)
ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
RB_ENABLE, 0);
- if (amdgpu_virt_support_psp_prg_ih_reg(adev)) {
+ if (amdgpu_sriov_vf(adev)) {
if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
ih_rb_cntl)) {
DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
@@ -219,14 +219,14 @@ static uint32_t vega10_ih_doorbell_rptr(struct amdgpu_ih_ring *ih)
static int vega10_ih_irq_init(struct amdgpu_device *adev)
{
struct amdgpu_ih_ring *ih;
- u32 ih_rb_cntl;
+ u32 ih_rb_cntl, ih_chicken;
int ret = 0;
u32 tmp;
/* disable irqs */
vega10_ih_disable_interrupts(adev);
- adev->nbio_funcs->ih_control(adev);
+ adev->nbio.funcs->ih_control(adev);
ih = &adev->irq.ih;
/* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
@@ -237,8 +237,7 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RPTR_REARM,
!!adev->irq.msi_enabled);
-
- if (amdgpu_virt_support_psp_prg_ih_reg(adev)) {
+ if (amdgpu_sriov_vf(adev)) {
if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
return -ETIMEDOUT;
@@ -247,6 +246,20 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
}
+ if ((adev->asic_type == CHIP_ARCTURUS &&
+ adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) ||
+ adev->asic_type == CHIP_RENOIR) {
+ ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN);
+ if (adev->irq.ih.use_bus_addr) {
+ ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN,
+ MC_SPACE_GPA_ENABLE, 1);
+ } else {
+ ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN,
+ MC_SPACE_FBPA_ENABLE, 1);
+ }
+ WREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN, ih_chicken);
+ }
+
/* set the writeback address whether it's enabled or not */
WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO,
lower_32_bits(ih->wptr_addr));
@@ -272,7 +285,7 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
WPTR_OVERFLOW_ENABLE, 0);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
RB_FULL_DRAIN_ENABLE, 1);
- if (amdgpu_virt_support_psp_prg_ih_reg(adev)) {
+ if (amdgpu_sriov_vf(adev)) {
if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
ih_rb_cntl)) {
DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
@@ -299,7 +312,7 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl);
- if (amdgpu_virt_support_psp_prg_ih_reg(adev)) {
+ if (amdgpu_sriov_vf(adev)) {
if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
ih_rb_cntl)) {
DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
@@ -664,10 +677,49 @@ static int vega10_ih_soft_reset(void *handle)
return 0;
}
+static void vega10_ih_update_clockgating_state(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t data, def, field_val;
+
+ if (adev->cg_flags & AMD_CG_SUPPORT_IH_CG) {
+ def = data = RREG32_SOC15(OSSSYS, 0, mmIH_CLK_CTRL);
+ field_val = enable ? 0 : 1;
+ /**
+ * Vega10 does not have IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE
+ * and IH_BUFFER_MEM_CLK_SOFT_OVERRIDE field.
+ */
+ if (adev->asic_type > CHIP_VEGA10) {
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE, field_val);
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ IH_BUFFER_MEM_CLK_SOFT_OVERRIDE, field_val);
+ }
+
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ DBUS_MUX_CLK_SOFT_OVERRIDE, field_val);
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ OSSSYS_SHARE_CLK_SOFT_OVERRIDE, field_val);
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ LIMIT_SMN_CLK_SOFT_OVERRIDE, field_val);
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ DYN_CLK_SOFT_OVERRIDE, field_val);
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ REG_CLK_SOFT_OVERRIDE, field_val);
+ if (def != data)
+ WREG32_SOC15(OSSSYS, 0, mmIH_CLK_CTRL, data);
+ }
+}
+
static int vega10_ih_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ vega10_ih_update_clockgating_state(adev,
+ state == AMD_CG_STATE_GATE);
return 0;
+
}
static int vega10_ih_set_powergating_state(void *handle,
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
index a8e92638a2e8..6b52a539d51b 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
@@ -24,7 +24,6 @@
#include "soc15.h"
#include "soc15_common.h"
-#include "soc15_hw_ip.h"
#include "vega10_ip_offset.h"
int vega10_reg_base_init(struct amdgpu_device *adev)
@@ -81,6 +80,10 @@ void vega10_doorbell_index_init(struct amdgpu_device *adev)
adev->doorbell_index.uvd_vce.vce_ring2_3 = AMDGPU_DOORBELL64_VCE_RING2_3;
adev->doorbell_index.uvd_vce.vce_ring4_5 = AMDGPU_DOORBELL64_VCE_RING4_5;
adev->doorbell_index.uvd_vce.vce_ring6_7 = AMDGPU_DOORBELL64_VCE_RING6_7;
+ adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_DOORBELL64_VCN0_1;
+ adev->doorbell_index.vcn.vcn_ring2_3 = AMDGPU_DOORBELL64_VCN2_3;
+ adev->doorbell_index.vcn.vcn_ring4_5 = AMDGPU_DOORBELL64_VCN4_5;
+ adev->doorbell_index.vcn.vcn_ring6_7 = AMDGPU_DOORBELL64_VCN6_7;
adev->doorbell_index.first_non_cp = AMDGPU_DOORBELL64_FIRST_NON_CP;
adev->doorbell_index.last_non_cp = AMDGPU_DOORBELL64_LAST_NON_CP;
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
index 0db84386252a..556f854e3551 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
@@ -24,7 +24,6 @@
#include "soc15.h"
#include "soc15_common.h"
-#include "soc15_hw_ip.h"
#include "vega20_ip_offset.h"
int vega20_reg_base_init(struct amdgpu_device *adev)
@@ -50,6 +49,8 @@ int vega20_reg_base_init(struct amdgpu_device *adev)
adev->reg_offset[NBIF_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i]));
adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
adev->reg_offset[CLK_HWIP][i] = (uint32_t *)(&(CLK_BASE.instance[i]));
+ adev->reg_offset[UMC_HWIP][i] = (uint32_t *)(&(UMC_BASE.instance[i]));
+ adev->reg_offset[RSMU_HWIP][i] = (uint32_t *)(&(RSMU_BASE.instance[i]));
}
return 0;
}
@@ -85,6 +86,10 @@ void vega20_doorbell_index_init(struct amdgpu_device *adev)
adev->doorbell_index.uvd_vce.vce_ring2_3 = AMDGPU_VEGA20_DOORBELL64_VCE_RING2_3;
adev->doorbell_index.uvd_vce.vce_ring4_5 = AMDGPU_VEGA20_DOORBELL64_VCE_RING4_5;
adev->doorbell_index.uvd_vce.vce_ring6_7 = AMDGPU_VEGA20_DOORBELL64_VCE_RING6_7;
+ adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_VEGA20_DOORBELL64_VCN0_1;
+ adev->doorbell_index.vcn.vcn_ring2_3 = AMDGPU_VEGA20_DOORBELL64_VCN2_3;
+ adev->doorbell_index.vcn.vcn_ring4_5 = AMDGPU_VEGA20_DOORBELL64_VCN4_5;
+ adev->doorbell_index.vcn.vcn_ring6_7 = AMDGPU_VEGA20_DOORBELL64_VCN6_7;
adev->doorbell_index.first_non_cp = AMDGPU_VEGA20_DOORBELL64_FIRST_NON_CP;
adev->doorbell_index.last_non_cp = AMDGPU_VEGA20_DOORBELL64_LAST_NON_CP;
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 6575ddcfcf00..78b35901643b 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -690,15 +690,15 @@ static int vi_gpu_pci_config_reset(struct amdgpu_device *adev)
}
/**
- * vi_asic_reset - soft reset GPU
+ * vi_asic_pci_config_reset - soft reset GPU
*
* @adev: amdgpu_device pointer
*
- * Look up which blocks are hung and attempt
- * to reset them.
+ * Use PCI Config method to reset the GPU.
+ *
* Returns 0 for success.
*/
-static int vi_asic_reset(struct amdgpu_device *adev)
+static int vi_asic_pci_config_reset(struct amdgpu_device *adev)
{
int r;
@@ -711,6 +711,70 @@ static int vi_asic_reset(struct amdgpu_device *adev)
return r;
}
+static bool vi_asic_supports_baco(struct amdgpu_device *adev)
+{
+ switch (adev->asic_type) {
+ case CHIP_FIJI:
+ case CHIP_TONGA:
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
+ case CHIP_TOPAZ:
+ return amdgpu_dpm_is_baco_supported(adev);
+ default:
+ return false;
+ }
+}
+
+static enum amd_reset_method
+vi_asic_reset_method(struct amdgpu_device *adev)
+{
+ bool baco_reset;
+
+ switch (adev->asic_type) {
+ case CHIP_FIJI:
+ case CHIP_TONGA:
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
+ case CHIP_TOPAZ:
+ baco_reset = amdgpu_dpm_is_baco_supported(adev);
+ break;
+ default:
+ baco_reset = false;
+ break;
+ }
+
+ if (baco_reset)
+ return AMD_RESET_METHOD_BACO;
+ else
+ return AMD_RESET_METHOD_LEGACY;
+}
+
+/**
+ * vi_asic_reset - soft reset GPU
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Look up which blocks are hung and attempt
+ * to reset them.
+ * Returns 0 for success.
+ */
+static int vi_asic_reset(struct amdgpu_device *adev)
+{
+ int r;
+
+ if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
+ if (!adev->in_suspend)
+ amdgpu_inc_vram_lost(adev);
+ r = amdgpu_dpm_baco_reset(adev);
+ } else {
+ r = vi_asic_pci_config_reset(adev);
+ }
+
+ return r;
+}
+
static u32 vi_get_config_memsize(struct amdgpu_device *adev)
{
return RREG32(mmCONFIG_MEMSIZE);
@@ -1023,6 +1087,7 @@ static const struct amdgpu_asic_funcs vi_asic_funcs =
.read_bios_from_rom = &vi_read_bios_from_rom,
.read_register = &vi_read_register,
.reset = &vi_asic_reset,
+ .reset_method = &vi_asic_reset_method,
.set_vga_state = &vi_vga_set_state,
.get_xclk = &vi_get_xclk,
.set_uvd_clocks = &vi_set_uvd_clocks,
@@ -1035,6 +1100,7 @@ static const struct amdgpu_asic_funcs vi_asic_funcs =
.get_pcie_usage = &vi_get_pcie_usage,
.need_reset_on_init = &vi_need_reset_on_init,
.get_pcie_replay_count = &vi_get_pcie_replay_count,
+ .supports_baco = &vi_asic_supports_baco,
};
#define CZ_REV_BRISTOL(rev) \
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.h b/drivers/gpu/drm/amd/amdgpu/vi.h
index 8de0772f986c..defb4aaf929a 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.h
+++ b/drivers/gpu/drm/amd/amdgpu/vi.h
@@ -31,4 +31,5 @@ void vi_srbm_select(struct amdgpu_device *adev,
int vi_set_ip_blocks(struct amdgpu_device *adev);
void legacy_doorbell_index_init(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig
index a1a35d4d594b..b3672d10ea54 100644
--- a/drivers/gpu/drm/amd/amdkfd/Kconfig
+++ b/drivers/gpu/drm/amd/amdkfd/Kconfig
@@ -1,11 +1,11 @@
-# SPDX-License-Identifier: GPL-2.0-only
+# SPDX-License-Identifier: MIT
#
# Heterogenous system architecture configuration
#
config HSA_AMD
bool "HSA kernel driver for AMD GPU devices"
- depends on DRM_AMDGPU && (X86_64 || ARM64)
+ depends on DRM_AMDGPU && (X86_64 || ARM64 || PPC64)
imply AMD_IOMMU_V2 if X86_64
select MMU_NOTIFIER
help
diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile
index 48155060a57c..61474627a32c 100644
--- a/drivers/gpu/drm/amd/amdkfd/Makefile
+++ b/drivers/gpu/drm/amd/amdkfd/Makefile
@@ -38,11 +38,9 @@ AMDKFD_FILES := $(AMDKFD_PATH)/kfd_module.o \
$(AMDKFD_PATH)/kfd_mqd_manager_v9.o \
$(AMDKFD_PATH)/kfd_mqd_manager_v10.o \
$(AMDKFD_PATH)/kfd_kernel_queue.o \
- $(AMDKFD_PATH)/kfd_kernel_queue_cik.o \
- $(AMDKFD_PATH)/kfd_kernel_queue_vi.o \
- $(AMDKFD_PATH)/kfd_kernel_queue_v9.o \
- $(AMDKFD_PATH)/kfd_kernel_queue_v10.o \
$(AMDKFD_PATH)/kfd_packet_manager.o \
+ $(AMDKFD_PATH)/kfd_packet_manager_vi.o \
+ $(AMDKFD_PATH)/kfd_packet_manager_v9.o \
$(AMDKFD_PATH)/kfd_process_queue_manager.o \
$(AMDKFD_PATH)/kfd_device_queue_manager.o \
$(AMDKFD_PATH)/kfd_device_queue_manager_cik.o \
diff --git a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
index 177d1e5329a5..9f59ba93cfe0 100644
--- a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
+++ b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
@@ -33,7 +33,9 @@ static bool cik_event_interrupt_isr(struct kfd_dev *dev,
const struct cik_ih_ring_entry *ihre =
(const struct cik_ih_ring_entry *)ih_ring_entry;
const struct kfd2kgd_calls *f2g = dev->kfd2kgd;
- unsigned int vmid, pasid;
+ unsigned int vmid;
+ uint16_t pasid;
+ bool ret;
/* This workaround is due to HW/FW limitation on Hawaii that
* VMID and PASID are not written into ih_ring_entry
@@ -48,13 +50,13 @@ static bool cik_event_interrupt_isr(struct kfd_dev *dev,
*tmp_ihre = *ihre;
vmid = f2g->read_vmid_from_vmfault_reg(dev->kgd);
- pasid = f2g->get_atc_vmid_pasid_mapping_pasid(dev->kgd, vmid);
+ ret = f2g->get_atc_vmid_pasid_mapping_info(dev->kgd, vmid, &pasid);
tmp_ihre->ring_id &= 0x000000ff;
tmp_ihre->ring_id |= vmid << 8;
tmp_ihre->ring_id |= pasid << 16;
- return (pasid != 0) &&
+ return ret && (pasid != 0) &&
vmid >= dev->vm_info.first_vmid_kfd &&
vmid <= dev->vm_info.last_vmid_kfd;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
index 826913c70766..d3400da6ab64 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
@@ -274,154 +274,227 @@ static const uint32_t cwsr_trap_gfx8_hex[] = {
static const uint32_t cwsr_trap_gfx9_hex[] = {
- 0xbf820001, 0xbf82015e,
+ 0xbf820001, 0xbf820248,
0xb8f8f802, 0x89788678,
- 0xb8fbf803, 0x866eff7b,
- 0x00000400, 0xbf85003b,
- 0x866eff7b, 0x00000800,
- 0xbf850003, 0x866eff7b,
- 0x00000100, 0xbf84000c,
+ 0xb8eef801, 0x866eff6e,
+ 0x00000800, 0xbf840003,
0x866eff78, 0x00002000,
- 0xbf840005, 0xbf8e0010,
- 0xb8eef803, 0x866eff6e,
- 0x00000400, 0xbf84fffb,
- 0x8778ff78, 0x00002000,
- 0x80ec886c, 0x82ed806d,
- 0xb8eef807, 0x866fff6e,
- 0x001f8000, 0x8e6f8b6f,
- 0x8977ff77, 0xfc000000,
- 0x87776f77, 0x896eff6e,
- 0x001f8000, 0xb96ef807,
- 0xb8faf812, 0xb8fbf813,
- 0x8efa887a, 0xc0071bbd,
- 0x00000000, 0xbf8cc07f,
- 0xc0071ebd, 0x00000008,
- 0xbf8cc07f, 0x86ee6e6e,
- 0xbf840001, 0xbe801d6e,
- 0xb8fbf803, 0x867bff7b,
- 0x000001ff, 0xbf850002,
- 0x806c846c, 0x826d806d,
+ 0xbf840016, 0xb8fbf803,
+ 0x866eff7b, 0x00000400,
+ 0xbf85003b, 0x866eff7b,
+ 0x00000800, 0xbf850003,
+ 0x866eff7b, 0x00000100,
+ 0xbf84000c, 0x866eff78,
+ 0x00002000, 0xbf840005,
+ 0xbf8e0010, 0xb8eef803,
+ 0x866eff6e, 0x00000400,
+ 0xbf84fffb, 0x8778ff78,
+ 0x00002000, 0x80ec886c,
+ 0x82ed806d, 0xb8eef807,
+ 0x866fff6e, 0x001f8000,
+ 0x8e6f8b6f, 0x8977ff77,
+ 0xfc000000, 0x87776f77,
+ 0x896eff6e, 0x001f8000,
+ 0xb96ef807, 0xb8faf812,
+ 0xb8fbf813, 0x8efa887a,
+ 0xc0071bbd, 0x00000000,
+ 0xbf8cc07f, 0xc0071ebd,
+ 0x00000008, 0xbf8cc07f,
+ 0x86ee6e6e, 0xbf840001,
+ 0xbe801d6e, 0xb8fbf803,
+ 0x867bff7b, 0x000001ff,
+ 0xbf850002, 0x806c846c,
+ 0x826d806d, 0x866dff6d,
+ 0x0000ffff, 0x8f6e8b77,
+ 0x866eff6e, 0x001f8000,
+ 0xb96ef807, 0x86fe7e7e,
+ 0x86ea6a6a, 0x8f6e8378,
+ 0xb96ee0c2, 0xbf800002,
+ 0xb9780002, 0xbe801f6c,
0x866dff6d, 0x0000ffff,
- 0x8f6e8b77, 0x866eff6e,
- 0x001f8000, 0xb96ef807,
- 0x86fe7e7e, 0x86ea6a6a,
- 0x8f6e8378, 0xb96ee0c2,
- 0xbf800002, 0xb9780002,
- 0xbe801f6c, 0x866dff6d,
- 0x0000ffff, 0xbefa0080,
- 0xb97a0283, 0xb8fa2407,
- 0x8e7a9b7a, 0x876d7a6d,
- 0xb8fa03c7, 0x8e7a9a7a,
- 0x876d7a6d, 0xb8faf807,
- 0x867aff7a, 0x00007fff,
- 0xb97af807, 0xbeee007e,
- 0xbeef007f, 0xbefe0180,
- 0xbf900004, 0x877a8478,
- 0xb97af802, 0xbf8e0002,
- 0xbf88fffe, 0xb8fa2a05,
- 0x807a817a, 0x8e7a8a7a,
- 0xb8fb1605, 0x807b817b,
- 0x8e7b867b, 0x807a7b7a,
- 0x807a7e7a, 0x827b807f,
- 0x867bff7b, 0x0000ffff,
- 0xc04b1c3d, 0x00000050,
- 0xbf8cc07f, 0xc04b1d3d,
- 0x00000060, 0xbf8cc07f,
- 0xc0431e7d, 0x00000074,
- 0xbf8cc07f, 0xbef4007e,
- 0x8675ff7f, 0x0000ffff,
- 0x8775ff75, 0x00040000,
- 0xbef60080, 0xbef700ff,
- 0x00807fac, 0x867aff7f,
- 0x08000000, 0x8f7a837a,
- 0x87777a77, 0x867aff7f,
- 0x70000000, 0x8f7a817a,
- 0x87777a77, 0xbef1007c,
- 0xbef00080, 0xb8f02a05,
- 0x80708170, 0x8e708a70,
- 0xb8fa1605, 0x807a817a,
- 0x8e7a867a, 0x80707a70,
- 0xbef60084, 0xbef600ff,
- 0x01000000, 0xbefe007c,
- 0xbefc0070, 0xc0611c7a,
- 0x0000007c, 0xbf8cc07f,
- 0x80708470, 0xbefc007e,
+ 0xbefa0080, 0xb97a0283,
+ 0xb8fa2407, 0x8e7a9b7a,
+ 0x876d7a6d, 0xb8fa03c7,
+ 0x8e7a9a7a, 0x876d7a6d,
+ 0xb8faf807, 0x867aff7a,
+ 0x00007fff, 0xb97af807,
+ 0xbeee007e, 0xbeef007f,
+ 0xbefe0180, 0xbf900004,
+ 0x877a8478, 0xb97af802,
+ 0xbf8e0002, 0xbf88fffe,
+ 0xb8fa2a05, 0x807a817a,
+ 0x8e7a8a7a, 0xb8fb1605,
+ 0x807b817b, 0x8e7b867b,
+ 0x807a7b7a, 0x807a7e7a,
+ 0x827b807f, 0x867bff7b,
+ 0x0000ffff, 0xc04b1c3d,
+ 0x00000050, 0xbf8cc07f,
+ 0xc04b1d3d, 0x00000060,
+ 0xbf8cc07f, 0xc0431e7d,
+ 0x00000074, 0xbf8cc07f,
+ 0xbef4007e, 0x8675ff7f,
+ 0x0000ffff, 0x8775ff75,
+ 0x00040000, 0xbef60080,
+ 0xbef700ff, 0x00807fac,
+ 0x867aff7f, 0x08000000,
+ 0x8f7a837a, 0x87777a77,
+ 0x867aff7f, 0x70000000,
+ 0x8f7a817a, 0x87777a77,
+ 0xbef1007c, 0xbef00080,
+ 0xb8f02a05, 0x80708170,
+ 0x8e708a70, 0xb8fa1605,
+ 0x807a817a, 0x8e7a867a,
+ 0x80707a70, 0xbef60084,
+ 0xbef600ff, 0x01000000,
0xbefe007c, 0xbefc0070,
- 0xc0611b3a, 0x0000007c,
+ 0xc0611c7a, 0x0000007c,
0xbf8cc07f, 0x80708470,
0xbefc007e, 0xbefe007c,
- 0xbefc0070, 0xc0611b7a,
+ 0xbefc0070, 0xc0611b3a,
0x0000007c, 0xbf8cc07f,
0x80708470, 0xbefc007e,
0xbefe007c, 0xbefc0070,
- 0xc0611bba, 0x0000007c,
+ 0xc0611b7a, 0x0000007c,
0xbf8cc07f, 0x80708470,
0xbefc007e, 0xbefe007c,
- 0xbefc0070, 0xc0611bfa,
+ 0xbefc0070, 0xc0611bba,
0x0000007c, 0xbf8cc07f,
0x80708470, 0xbefc007e,
0xbefe007c, 0xbefc0070,
- 0xc0611e3a, 0x0000007c,
- 0xbf8cc07f, 0x80708470,
- 0xbefc007e, 0xb8fbf803,
- 0xbefe007c, 0xbefc0070,
- 0xc0611efa, 0x0000007c,
+ 0xc0611bfa, 0x0000007c,
0xbf8cc07f, 0x80708470,
0xbefc007e, 0xbefe007c,
- 0xbefc0070, 0xc0611a3a,
+ 0xbefc0070, 0xc0611e3a,
+ 0x0000007c, 0xbf8cc07f,
+ 0x80708470, 0xbefc007e,
+ 0xb8fbf803, 0xbefe007c,
+ 0xbefc0070, 0xc0611efa,
0x0000007c, 0xbf8cc07f,
0x80708470, 0xbefc007e,
0xbefe007c, 0xbefc0070,
- 0xc0611a7a, 0x0000007c,
- 0xbf8cc07f, 0x80708470,
- 0xbefc007e, 0xb8f1f801,
- 0xbefe007c, 0xbefc0070,
- 0xc0611c7a, 0x0000007c,
+ 0xc0611a3a, 0x0000007c,
0xbf8cc07f, 0x80708470,
- 0xbefc007e, 0x867aff7f,
- 0x04000000, 0xbeef0080,
- 0x876f6f7a, 0xb8f02a05,
+ 0xbefc007e, 0xbefe007c,
+ 0xbefc0070, 0xc0611a7a,
+ 0x0000007c, 0xbf8cc07f,
+ 0x80708470, 0xbefc007e,
+ 0xb8f1f801, 0xbefe007c,
+ 0xbefc0070, 0xc0611c7a,
+ 0x0000007c, 0xbf8cc07f,
+ 0x80708470, 0xbefc007e,
+ 0x867aff7f, 0x04000000,
+ 0xbeef0080, 0x876f6f7a,
+ 0xb8f02a05, 0x80708170,
+ 0x8e708a70, 0xb8fb1605,
+ 0x807b817b, 0x8e7b847b,
+ 0x8e76827b, 0xbef600ff,
+ 0x01000000, 0xbef20174,
+ 0x80747074, 0x82758075,
+ 0xbefc0080, 0xbf800000,
+ 0xbe802b00, 0xbe822b02,
+ 0xbe842b04, 0xbe862b06,
+ 0xbe882b08, 0xbe8a2b0a,
+ 0xbe8c2b0c, 0xbe8e2b0e,
+ 0xc06b003a, 0x00000000,
+ 0xbf8cc07f, 0xc06b013a,
+ 0x00000010, 0xbf8cc07f,
+ 0xc06b023a, 0x00000020,
+ 0xbf8cc07f, 0xc06b033a,
+ 0x00000030, 0xbf8cc07f,
+ 0x8074c074, 0x82758075,
+ 0x807c907c, 0xbf0a7b7c,
+ 0xbf85ffe7, 0xbef40172,
+ 0xbef00080, 0xbefe00c1,
+ 0xbeff00c1, 0xbee80080,
+ 0xbee90080, 0xbef600ff,
+ 0x01000000, 0x867aff78,
+ 0x00400000, 0xbf850003,
+ 0xb8faf803, 0x897a7aff,
+ 0x10000000, 0xbf85004d,
+ 0xbe840080, 0xd2890000,
+ 0x00000900, 0x80048104,
+ 0xd2890001, 0x00000900,
+ 0x80048104, 0xd2890002,
+ 0x00000900, 0x80048104,
+ 0xd2890003, 0x00000900,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000901,
+ 0x80048104, 0xd2890001,
+ 0x00000901, 0x80048104,
+ 0xd2890002, 0x00000901,
+ 0x80048104, 0xd2890003,
+ 0x00000901, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0xbe840080, 0xd2890000,
+ 0x00000902, 0x80048104,
+ 0xd2890001, 0x00000902,
+ 0x80048104, 0xd2890002,
+ 0x00000902, 0x80048104,
+ 0xd2890003, 0x00000902,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000903,
+ 0x80048104, 0xd2890001,
+ 0x00000903, 0x80048104,
+ 0xd2890002, 0x00000903,
+ 0x80048104, 0xd2890003,
+ 0x00000903, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0xbf820008, 0xe0724000,
+ 0x701d0000, 0xe0724100,
+ 0x701d0100, 0xe0724200,
+ 0x701d0200, 0xe0724300,
+ 0x701d0300, 0xbefe00c1,
+ 0xbeff00c1, 0xb8fb4306,
+ 0x867bc17b, 0xbf840063,
+ 0xbf8a0000, 0x867aff6f,
+ 0x04000000, 0xbf84005f,
+ 0x8e7b867b, 0x8e7b827b,
+ 0xbef6007b, 0xb8f02a05,
0x80708170, 0x8e708a70,
- 0xb8fb1605, 0x807b817b,
- 0x8e7b847b, 0x8e76827b,
- 0xbef600ff, 0x01000000,
- 0xbef20174, 0x80747074,
- 0x82758075, 0xbefc0080,
- 0xbf800000, 0xbe802b00,
- 0xbe822b02, 0xbe842b04,
- 0xbe862b06, 0xbe882b08,
- 0xbe8a2b0a, 0xbe8c2b0c,
- 0xbe8e2b0e, 0xc06b003a,
- 0x00000000, 0xbf8cc07f,
- 0xc06b013a, 0x00000010,
- 0xbf8cc07f, 0xc06b023a,
- 0x00000020, 0xbf8cc07f,
- 0xc06b033a, 0x00000030,
- 0xbf8cc07f, 0x8074c074,
- 0x82758075, 0x807c907c,
- 0xbf0a7b7c, 0xbf85ffe7,
- 0xbef40172, 0xbef00080,
- 0xbefe00c1, 0xbeff00c1,
- 0xbee80080, 0xbee90080,
+ 0xb8fa1605, 0x807a817a,
+ 0x8e7a867a, 0x80707a70,
+ 0x8070ff70, 0x00000080,
0xbef600ff, 0x01000000,
- 0xe0724000, 0x701d0000,
- 0xe0724100, 0x701d0100,
- 0xe0724200, 0x701d0200,
- 0xe0724300, 0x701d0300,
- 0xbefe00c1, 0xbeff00c1,
- 0xb8fb4306, 0x867bc17b,
- 0xbf84002c, 0xbf8a0000,
- 0x867aff6f, 0x04000000,
- 0xbf840028, 0x8e7b867b,
- 0x8e7b827b, 0xbef6007b,
- 0xb8f02a05, 0x80708170,
- 0x8e708a70, 0xb8fa1605,
- 0x807a817a, 0x8e7a867a,
- 0x80707a70, 0x8070ff70,
- 0x00000080, 0xbef600ff,
- 0x01000000, 0xbefc0080,
- 0xd28c0002, 0x000100c1,
- 0xd28d0003, 0x000204c1,
+ 0xbefc0080, 0xd28c0002,
+ 0x000100c1, 0xd28d0003,
+ 0x000204c1, 0x867aff78,
+ 0x00400000, 0xbf850003,
+ 0xb8faf803, 0x897a7aff,
+ 0x10000000, 0xbf850030,
+ 0x24040682, 0xd86e4000,
+ 0x00000002, 0xbf8cc07f,
+ 0xbe840080, 0xd2890000,
+ 0x00000900, 0x80048104,
+ 0xd2890001, 0x00000900,
+ 0x80048104, 0xd2890002,
+ 0x00000900, 0x80048104,
+ 0xd2890003, 0x00000900,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000901,
+ 0x80048104, 0xd2890001,
+ 0x00000901, 0x80048104,
+ 0xd2890002, 0x00000901,
+ 0x80048104, 0xd2890003,
+ 0x00000901, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0x680404ff, 0x00000200,
+ 0xd0c9006a, 0x0000f702,
+ 0xbf87ffd2, 0xbf820015,
0xd1060002, 0x00011103,
0x7e0602ff, 0x00000200,
0xbefc00ff, 0x00010000,
@@ -438,9 +511,53 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
0x807b817b, 0x8e7b827b,
0x8e76887b, 0xbef600ff,
0x01000000, 0xbefc0084,
- 0xbf0a7b7c, 0xbf840015,
+ 0xbf0a7b7c, 0xbf84006d,
0xbf11017c, 0x807bff7b,
- 0x00001000, 0x7e000300,
+ 0x00001000, 0x867aff78,
+ 0x00400000, 0xbf850003,
+ 0xb8faf803, 0x897a7aff,
+ 0x10000000, 0xbf850051,
+ 0xbe840080, 0xd2890000,
+ 0x00000900, 0x80048104,
+ 0xd2890001, 0x00000900,
+ 0x80048104, 0xd2890002,
+ 0x00000900, 0x80048104,
+ 0xd2890003, 0x00000900,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000901,
+ 0x80048104, 0xd2890001,
+ 0x00000901, 0x80048104,
+ 0xd2890002, 0x00000901,
+ 0x80048104, 0xd2890003,
+ 0x00000901, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0xbe840080, 0xd2890000,
+ 0x00000902, 0x80048104,
+ 0xd2890001, 0x00000902,
+ 0x80048104, 0xd2890002,
+ 0x00000902, 0x80048104,
+ 0xd2890003, 0x00000902,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000903,
+ 0x80048104, 0xd2890001,
+ 0x00000903, 0x80048104,
+ 0xd2890002, 0x00000903,
+ 0x80048104, 0xd2890003,
+ 0x00000903, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0x807c847c, 0xbf0a7b7c,
+ 0xbf85ffb1, 0xbf9c0000,
+ 0xbf820012, 0x7e000300,
0x7e020301, 0x7e040302,
0x7e060303, 0xe0724000,
0x701d0000, 0xe0724100,
@@ -563,24 +680,47 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
};
static const uint32_t cwsr_trap_gfx10_hex[] = {
- 0xbf820001, 0xbf82012e,
- 0xb0804004, 0xb970f802,
- 0x8a708670, 0xb971f803,
- 0x8771ff71, 0x00000400,
- 0xbf850008, 0xb971f803,
- 0x8771ff71, 0x000001ff,
- 0xbf850001, 0x806c846c,
+ 0xbf820001, 0xbf8201c1,
+ 0xb0804004, 0xb978f802,
+ 0x8a788678, 0xb971f803,
+ 0x876eff71, 0x00000400,
+ 0xbf850033, 0x876eff71,
+ 0x00000100, 0xbf840002,
+ 0x8878ff78, 0x00002000,
+ 0x8a77ff77, 0xff000000,
+ 0xb96ef807, 0x876fff6e,
+ 0x02000000, 0x8f6f866f,
+ 0x88776f77, 0x876fff6e,
+ 0x003f8000, 0x8f6f896f,
+ 0x88776f77, 0x8a6eff6e,
+ 0x023f8000, 0xb9eef807,
+ 0xb97af812, 0xb97bf813,
+ 0x8ffa887a, 0xf4051bbd,
+ 0xfa000000, 0xbf8cc07f,
+ 0xf4051ebd, 0xfa000008,
+ 0xbf8cc07f, 0x87ee6e6e,
+ 0xbf840001, 0xbe80206e,
+ 0xb971f803, 0x8771ff71,
+ 0x000001ff, 0xbf850002,
+ 0x806c846c, 0x826d806d,
+ 0x876dff6d, 0x0000ffff,
+ 0x906e8977, 0x876fff6e,
+ 0x003f8000, 0x906e8677,
+ 0x876eff6e, 0x02000000,
+ 0x886e6f6e, 0xb9eef807,
+ 0x87fe7e7e, 0x87ea6a6a,
+ 0xb9f8f802, 0xbe80226c,
+ 0xb971f803, 0x8771ff71,
+ 0x00000100, 0xbf840006,
+ 0xbef60380, 0xb9f60203,
0x876dff6d, 0x0000ffff,
- 0xbe80226c, 0xb971f803,
- 0x8771ff71, 0x00000100,
- 0xbf840006, 0xbef60380,
- 0xb9f60203, 0x876dff6d,
- 0x0000ffff, 0x80ec886c,
- 0x82ed806d, 0xbef60380,
- 0xb9f60283, 0xb973f816,
- 0xb9762c07, 0x8f769c76,
- 0x886d766d, 0xb97603c7,
- 0x8f769b76, 0x886d766d,
+ 0x80ec886c, 0x82ed806d,
+ 0xbef60380, 0xb9f60283,
+ 0xb972f816, 0xb9762c07,
+ 0x8f769a76, 0x886d766d,
+ 0xb97603c7, 0x8f769976,
+ 0x886d766d, 0xb9760647,
+ 0x8f769876, 0x886d766d,
0xb976f807, 0x8776ff76,
0x00007fff, 0xb9f6f807,
0xbeee037e, 0xbeef037f,
@@ -589,274 +729,834 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
0xbef4037e, 0x8775ff7f,
0x0000ffff, 0x8875ff75,
0x00040000, 0xbef60380,
- 0xbef703ff, 0x00807fac,
+ 0xbef703ff, 0x10807fac,
0x8776ff7f, 0x08000000,
0x90768376, 0x88777677,
0x8776ff7f, 0x70000000,
0x90768176, 0x88777677,
0xbefb037c, 0xbefa0380,
- 0xb97202dc, 0x8872727f,
- 0xbefe03c1, 0x877c8172,
- 0xbf06817c, 0xbf850002,
- 0xbeff0380, 0xbf820001,
- 0xbeff03c1, 0xb9712a05,
- 0x80718171, 0x8f718271,
- 0x877c8172, 0xbf06817c,
- 0xbf85000d, 0x8f768771,
+ 0xb97302dc, 0x8f739973,
+ 0x8873737f, 0xb97a2a05,
+ 0x807a817a, 0x907c9973,
+ 0x877c817c, 0xbf06817c,
+ 0xbf850002, 0x8f7a897a,
+ 0xbf820001, 0x8f7a8a7a,
+ 0xb9761e06, 0x8f768a76,
+ 0x807a767a, 0x807aff7a,
+ 0x00000200, 0xbef603ff,
+ 0x01000000, 0xbefe037c,
+ 0xbefc037a, 0xf4611efa,
+ 0xf8000000, 0x807a847a,
+ 0xbefc037e, 0xbefe037c,
+ 0xbefc037a, 0xf4611b3a,
+ 0xf8000000, 0x807a847a,
+ 0xbefc037e, 0xbefe037c,
+ 0xbefc037a, 0xf4611b7a,
+ 0xf8000000, 0x807a847a,
+ 0xbefc037e, 0xbefe037c,
+ 0xbefc037a, 0xf4611bba,
+ 0xf8000000, 0x807a847a,
+ 0xbefc037e, 0xbefe037c,
+ 0xbefc037a, 0xf4611bfa,
+ 0xf8000000, 0x807a847a,
+ 0xbefc037e, 0xbefe037c,
+ 0xbefc037a, 0xf4611e3a,
+ 0xf8000000, 0x807a847a,
+ 0xbefc037e, 0xb971f803,
+ 0xbefe037c, 0xbefc037a,
+ 0xf4611c7a, 0xf8000000,
+ 0x807a847a, 0xbefc037e,
+ 0xbefe037c, 0xbefc037a,
+ 0xf4611cba, 0xf8000000,
+ 0x807a847a, 0xbefc037e,
+ 0xb97bf801, 0xbefe037c,
+ 0xbefc037a, 0xf4611efa,
+ 0xf8000000, 0x807a847a,
+ 0xbefc037e, 0xb97bf814,
+ 0xbefe037c, 0xbefc037a,
+ 0xf4611efa, 0xf8000000,
+ 0x807a847a, 0xbefc037e,
+ 0xb97bf815, 0xbefe037c,
+ 0xbefc037a, 0xf4611efa,
+ 0xf8000000, 0x807a847a,
+ 0xbefc037e, 0x8776ff7f,
+ 0x04000000, 0xbeef0380,
+ 0x886f6f76, 0xb97a2a05,
+ 0x807a817a, 0x907c9973,
+ 0x877c817c, 0xbf06817c,
+ 0xbf850002, 0x8f7a897a,
+ 0xbf820001, 0x8f7a8a7a,
+ 0xb9761e06, 0x8f768a76,
+ 0x807a767a, 0xbef603ff,
+ 0x01000000, 0xbef20374,
+ 0x80747a74, 0x82758075,
+ 0xbefc0380, 0xbf800000,
+ 0xbe802f00, 0xbe822f02,
+ 0xbe842f04, 0xbe862f06,
+ 0xbe882f08, 0xbe8a2f0a,
+ 0xbe8c2f0c, 0xbe8e2f0e,
+ 0xf469003a, 0xfa000000,
+ 0xf469013a, 0xfa000010,
+ 0xf469023a, 0xfa000020,
+ 0xf469033a, 0xfa000030,
+ 0x8074c074, 0x82758075,
+ 0x807c907c, 0xbf0aff7c,
+ 0x00000060, 0xbf85ffea,
+ 0xbe802f00, 0xbe822f02,
+ 0xbe842f04, 0xbe862f06,
+ 0xbe882f08, 0xbe8a2f0a,
+ 0xf469003a, 0xfa000000,
+ 0xf469013a, 0xfa000010,
+ 0xf469023a, 0xfa000020,
+ 0x8074b074, 0x82758075,
+ 0xbef40372, 0xbefa0380,
+ 0xbefe03c1, 0x907c9973,
+ 0x877c817c, 0xbf06817c,
+ 0xbf850002, 0xbeff0380,
+ 0xbf820002, 0xbeff03c1,
+ 0xbf82000b, 0xbef603ff,
+ 0x01000000, 0xe0704000,
+ 0x7a5d0000, 0xe0704080,
+ 0x7a5d0100, 0xe0704100,
+ 0x7a5d0200, 0xe0704180,
+ 0x7a5d0300, 0xbf82000a,
0xbef603ff, 0x01000000,
- 0xbefc0380, 0x7e008700,
- 0xe0704000, 0x7a5d0000,
- 0x807c817c, 0x807aff7a,
- 0x00000080, 0xbf0a717c,
- 0xbf85fff8, 0xbf82001b,
- 0x8f768871, 0xbef603ff,
- 0x01000000, 0xbefc0380,
- 0x7e008700, 0xe0704000,
- 0x7a5d0000, 0x807c817c,
- 0x807aff7a, 0x00000100,
- 0xbf0a717c, 0xbf85fff8,
- 0xb9711e06, 0x8771c171,
- 0xbf84000c, 0x8f718371,
- 0x80717c71, 0xbefe03c1,
- 0xbeff0380, 0x7e008700,
0xe0704000, 0x7a5d0000,
- 0x807c817c, 0x807aff7a,
- 0x00000080, 0xbf0a717c,
- 0xbf85fff8, 0xbf8a0000,
- 0x8776ff72, 0x04000000,
- 0xbf84002b, 0xbefe03c1,
- 0x877c8172, 0xbf06817c,
+ 0xe0704100, 0x7a5d0100,
+ 0xe0704200, 0x7a5d0200,
+ 0xe0704300, 0x7a5d0300,
+ 0xbefe03c1, 0x907c9973,
+ 0x877c817c, 0xbf06817c,
0xbf850002, 0xbeff0380,
0xbf820001, 0xbeff03c1,
0xb9714306, 0x8771c171,
- 0xbf840021, 0x8f718671,
+ 0xbf840046, 0xbf8a0000,
+ 0x8776ff6f, 0x04000000,
+ 0xbf840042, 0x8f718671,
0x8f718271, 0xbef60371,
+ 0xb97a2a05, 0x807a817a,
+ 0x907c9973, 0x877c817c,
+ 0xbf06817c, 0xbf850002,
+ 0x8f7a897a, 0xbf820001,
+ 0x8f7a8a7a, 0xb9761e06,
+ 0x8f768a76, 0x807a767a,
+ 0x807aff7a, 0x00000200,
+ 0x807aff7a, 0x00000080,
0xbef603ff, 0x01000000,
0xd7650000, 0x000100c1,
0xd7660000, 0x000200c1,
- 0x16000084, 0x877c8172,
- 0xbf06817c, 0xbefc0380,
- 0xbf85000a, 0x807cff7c,
- 0x00000080, 0x807aff7a,
- 0x00000080, 0xd5250000,
- 0x0001ff00, 0x00000080,
- 0xbf0a717c, 0xbf85fff7,
- 0xbf820009, 0x807cff7c,
- 0x00000100, 0x807aff7a,
- 0x00000100, 0xd5250000,
- 0x0001ff00, 0x00000100,
- 0xbf0a717c, 0xbf85fff7,
- 0x877c8172, 0xbf06817c,
- 0xbf850003, 0x8f7687ff,
- 0x0000006a, 0xbf820002,
- 0x8f7688ff, 0x0000006a,
+ 0x16000084, 0x907c9973,
+ 0x877c817c, 0xbf06817c,
+ 0xbefc0380, 0xbf850012,
+ 0xbe8303ff, 0x00000080,
+ 0xbf800000, 0xbf800000,
+ 0xbf800000, 0xd8d80000,
+ 0x01000000, 0xbf8c0000,
+ 0xe0704000, 0x7a5d0100,
+ 0x807c037c, 0x807a037a,
+ 0xd5250000, 0x0001ff00,
+ 0x00000080, 0xbf0a717c,
+ 0xbf85fff4, 0xbf820011,
+ 0xbe8303ff, 0x00000100,
+ 0xbf800000, 0xbf800000,
+ 0xbf800000, 0xd8d80000,
+ 0x01000000, 0xbf8c0000,
+ 0xe0704000, 0x7a5d0100,
+ 0x807c037c, 0x807a037a,
+ 0xd5250000, 0x0001ff00,
+ 0x00000100, 0xbf0a717c,
+ 0xbf85fff4, 0xbefe03c1,
+ 0x907c9973, 0x877c817c,
+ 0xbf06817c, 0xbf850004,
+ 0xbefa03ff, 0x00000200,
+ 0xbeff0380, 0xbf820003,
+ 0xbefa03ff, 0x00000400,
+ 0xbeff03c1, 0xb9712a05,
+ 0x80718171, 0x8f718271,
+ 0x907c9973, 0x877c817c,
+ 0xbf06817c, 0xbf850017,
0xbef603ff, 0x01000000,
- 0x877c8172, 0xbf06817c,
- 0xbefc0380, 0xbf800000,
- 0xbf85000b, 0xbe802e00,
- 0x7e000200, 0xe0704000,
- 0x7a5d0000, 0x807aff7a,
- 0x00000080, 0x807c817c,
- 0xbf0aff7c, 0x0000006a,
- 0xbf85fff6, 0xbf82000a,
- 0xbe802e00, 0x7e000200,
- 0xe0704000, 0x7a5d0000,
- 0x807aff7a, 0x00000100,
- 0x807c817c, 0xbf0aff7c,
- 0x0000006a, 0xbf85fff6,
- 0xbef60384, 0xbef603ff,
- 0x01000000, 0x877c8172,
- 0xbf06817c, 0xbf850030,
- 0x7e00027b, 0xe0704000,
- 0x7a5d0000, 0x807aff7a,
- 0x00000080, 0x7e00026c,
- 0xe0704000, 0x7a5d0000,
- 0x807aff7a, 0x00000080,
- 0x7e00026d, 0xe0704000,
- 0x7a5d0000, 0x807aff7a,
- 0x00000080, 0x7e00026e,
- 0xe0704000, 0x7a5d0000,
- 0x807aff7a, 0x00000080,
- 0x7e00026f, 0xe0704000,
- 0x7a5d0000, 0x807aff7a,
- 0x00000080, 0x7e000270,
- 0xe0704000, 0x7a5d0000,
- 0x807aff7a, 0x00000080,
- 0xb971f803, 0x7e000271,
+ 0xbefc0384, 0xbf0a717c,
+ 0xbf840037, 0x7e008700,
+ 0x7e028701, 0x7e048702,
+ 0x7e068703, 0xe0704000,
+ 0x7a5d0000, 0xe0704080,
+ 0x7a5d0100, 0xe0704100,
+ 0x7a5d0200, 0xe0704180,
+ 0x7a5d0300, 0x807c847c,
+ 0x807aff7a, 0x00000200,
+ 0xbf0a717c, 0xbf85ffef,
+ 0xbf820025, 0xbef603ff,
+ 0x01000000, 0xbefc0384,
+ 0xbf0a717c, 0xbf840020,
+ 0x7e008700, 0x7e028701,
+ 0x7e048702, 0x7e068703,
0xe0704000, 0x7a5d0000,
+ 0xe0704100, 0x7a5d0100,
+ 0xe0704200, 0x7a5d0200,
+ 0xe0704300, 0x7a5d0300,
+ 0x807c847c, 0x807aff7a,
+ 0x00000400, 0xbf0a717c,
+ 0xbf85ffef, 0xb9711e06,
+ 0x8771c171, 0xbf84000c,
+ 0x8f718371, 0x80717c71,
+ 0xbefe03c1, 0xbeff0380,
+ 0x7e008700, 0xe0704000,
+ 0x7a5d0000, 0x807c817c,
0x807aff7a, 0x00000080,
- 0x7e000273, 0xe0704000,
- 0x7a5d0000, 0x807aff7a,
- 0x00000080, 0xb97bf801,
- 0x7e00027b, 0xe0704000,
- 0x7a5d0000, 0x807aff7a,
- 0x00000080, 0xbf82002f,
- 0x7e00027b, 0xe0704000,
- 0x7a5d0000, 0x807aff7a,
- 0x00000100, 0x7e00026c,
- 0xe0704000, 0x7a5d0000,
- 0x807aff7a, 0x00000100,
- 0x7e00026d, 0xe0704000,
- 0x7a5d0000, 0x807aff7a,
- 0x00000100, 0x7e00026e,
- 0xe0704000, 0x7a5d0000,
- 0x807aff7a, 0x00000100,
- 0x7e00026f, 0xe0704000,
- 0x7a5d0000, 0x807aff7a,
- 0x00000100, 0x7e000270,
- 0xe0704000, 0x7a5d0000,
- 0x807aff7a, 0x00000100,
- 0xb971f803, 0x7e000271,
- 0xe0704000, 0x7a5d0000,
- 0x807aff7a, 0x00000100,
- 0x7e000273, 0xe0704000,
- 0x7a5d0000, 0x807aff7a,
- 0x00000100, 0xb97bf801,
- 0x7e00027b, 0xe0704000,
- 0x7a5d0000, 0x807aff7a,
- 0x00000100, 0xbf820119,
- 0xbef4037e, 0x8775ff7f,
- 0x0000ffff, 0x8875ff75,
- 0x00040000, 0xbef60380,
- 0xbef703ff, 0x00807fac,
- 0x8772ff7f, 0x08000000,
- 0x90728372, 0x88777277,
- 0x8772ff7f, 0x70000000,
- 0x90728172, 0x88777277,
- 0xb97902dc, 0x8879797f,
- 0xbef80380, 0xbefe03c1,
- 0x877c8179, 0xbf06817c,
+ 0xbf0a717c, 0xbf85fff8,
+ 0xbf820142, 0xbef4037e,
+ 0x8775ff7f, 0x0000ffff,
+ 0x8875ff75, 0x00040000,
+ 0xbef60380, 0xbef703ff,
+ 0x10807fac, 0x8772ff7f,
+ 0x08000000, 0x90728372,
+ 0x88777277, 0x8772ff7f,
+ 0x70000000, 0x90728172,
+ 0x88777277, 0xb97302dc,
+ 0x8f739973, 0x8873737f,
+ 0x8772ff7f, 0x04000000,
+ 0xbf840036, 0xbefe03c1,
+ 0x907c9973, 0x877c817c,
+ 0xbf06817c, 0xbf850002,
+ 0xbeff0380, 0xbf820001,
+ 0xbeff03c1, 0xb96f4306,
+ 0x876fc16f, 0xbf84002b,
+ 0x8f6f866f, 0x8f6f826f,
+ 0xbef6036f, 0xb9782a05,
+ 0x80788178, 0x907c9973,
+ 0x877c817c, 0xbf06817c,
+ 0xbf850002, 0x8f788978,
+ 0xbf820001, 0x8f788a78,
+ 0xb9721e06, 0x8f728a72,
+ 0x80787278, 0x8078ff78,
+ 0x00000200, 0x8078ff78,
+ 0x00000080, 0xbef603ff,
+ 0x01000000, 0x907c9973,
+ 0x877c817c, 0xbf06817c,
+ 0xbefc0380, 0xbf850009,
+ 0xe0310000, 0x781d0000,
+ 0x807cff7c, 0x00000080,
+ 0x8078ff78, 0x00000080,
+ 0xbf0a6f7c, 0xbf85fff8,
+ 0xbf820008, 0xe0310000,
+ 0x781d0000, 0x807cff7c,
+ 0x00000100, 0x8078ff78,
+ 0x00000100, 0xbf0a6f7c,
+ 0xbf85fff8, 0xbef80380,
+ 0xbefe03c1, 0x907c9973,
+ 0x877c817c, 0xbf06817c,
0xbf850002, 0xbeff0380,
0xbf820001, 0xbeff03c1,
0xb96f2a05, 0x806f816f,
- 0x8f6f826f, 0x877c8179,
- 0xbf06817c, 0xbf850013,
- 0x8f76876f, 0xbef603ff,
+ 0x8f6f826f, 0x907c9973,
+ 0x877c817c, 0xbf06817c,
+ 0xbf850021, 0xbef603ff,
0x01000000, 0xbef20378,
- 0x8078ff78, 0x00000080,
- 0xbefc0381, 0xe0304000,
- 0x785d0000, 0xbf8c3f70,
- 0x7e008500, 0x807c817c,
- 0x8078ff78, 0x00000080,
- 0xbf0a6f7c, 0xbf85fff7,
- 0xe0304000, 0x725d0000,
- 0xbf820023, 0x8f76886f,
+ 0x8078ff78, 0x00000200,
+ 0xbefc0384, 0xe0304000,
+ 0x785d0000, 0xe0304080,
+ 0x785d0100, 0xe0304100,
+ 0x785d0200, 0xe0304180,
+ 0x785d0300, 0xbf8c3f70,
+ 0x7e008500, 0x7e028501,
+ 0x7e048502, 0x7e068503,
+ 0x807c847c, 0x8078ff78,
+ 0x00000200, 0xbf0a6f7c,
+ 0xbf85ffee, 0xe0304000,
+ 0x725d0000, 0xe0304080,
+ 0x725d0100, 0xe0304100,
+ 0x725d0200, 0xe0304180,
+ 0x725d0300, 0xbf820032,
0xbef603ff, 0x01000000,
0xbef20378, 0x8078ff78,
- 0x00000100, 0xbefc0381,
- 0xe0304000, 0x785d0000,
- 0xbf8c3f70, 0x7e008500,
- 0x807c817c, 0x8078ff78,
- 0x00000100, 0xbf0a6f7c,
- 0xbf85fff7, 0xb96f1e06,
- 0x876fc16f, 0xbf84000e,
- 0x8f6f836f, 0x806f7c6f,
- 0xbefe03c1, 0xbeff0380,
+ 0x00000400, 0xbefc0384,
0xe0304000, 0x785d0000,
+ 0xe0304100, 0x785d0100,
+ 0xe0304200, 0x785d0200,
+ 0xe0304300, 0x785d0300,
0xbf8c3f70, 0x7e008500,
- 0x807c817c, 0x8078ff78,
- 0x00000080, 0xbf0a6f7c,
- 0xbf85fff7, 0xbeff03c1,
- 0xe0304000, 0x725d0000,
- 0x8772ff79, 0x04000000,
- 0xbf840020, 0xbefe03c1,
- 0x877c8179, 0xbf06817c,
- 0xbf850002, 0xbeff0380,
- 0xbf820001, 0xbeff03c1,
- 0xb96f4306, 0x876fc16f,
- 0xbf840016, 0x8f6f866f,
- 0x8f6f826f, 0xbef6036f,
- 0xbef603ff, 0x01000000,
- 0x877c8172, 0xbf06817c,
- 0xbefc0380, 0xbf850007,
- 0x807cff7c, 0x00000080,
- 0x8078ff78, 0x00000080,
- 0xbf0a6f7c, 0xbf85fffa,
- 0xbf820006, 0x807cff7c,
- 0x00000100, 0x8078ff78,
- 0x00000100, 0xbf0a6f7c,
- 0xbf85fffa, 0x877c8179,
- 0xbf06817c, 0xbf850003,
- 0x8f7687ff, 0x0000006a,
- 0xbf820002, 0x8f7688ff,
- 0x0000006a, 0xbef603ff,
- 0x01000000, 0x877c8179,
- 0xbf06817c, 0xbf850012,
- 0xf4211cba, 0xf0000000,
+ 0x7e028501, 0x7e048502,
+ 0x7e068503, 0x807c847c,
+ 0x8078ff78, 0x00000400,
+ 0xbf0a6f7c, 0xbf85ffee,
+ 0xb96f1e06, 0x876fc16f,
+ 0xbf84000e, 0x8f6f836f,
+ 0x806f7c6f, 0xbefe03c1,
+ 0xbeff0380, 0xe0304000,
+ 0x785d0000, 0xbf8c3f70,
+ 0x7e008500, 0x807c817c,
0x8078ff78, 0x00000080,
- 0xbefc0381, 0xf421003a,
- 0xf0000000, 0x8078ff78,
- 0x00000080, 0xbf8cc07f,
- 0xbe803000, 0xbf800000,
- 0x807c817c, 0xbf0aff7c,
- 0x0000006a, 0xbf85fff5,
- 0xbe800372, 0xbf820011,
- 0xf4211cba, 0xf0000000,
- 0x8078ff78, 0x00000100,
- 0xbefc0381, 0xf421003a,
- 0xf0000000, 0x8078ff78,
- 0x00000100, 0xbf8cc07f,
- 0xbe803000, 0xbf800000,
- 0x807c817c, 0xbf0aff7c,
- 0x0000006a, 0xbf85fff5,
- 0xbe800372, 0xbef60384,
+ 0xbf0a6f7c, 0xbf85fff7,
+ 0xbeff03c1, 0xe0304000,
+ 0x725d0000, 0xe0304100,
+ 0x725d0100, 0xe0304200,
+ 0x725d0200, 0xe0304300,
+ 0x725d0300, 0xbf8c3f70,
+ 0xb9782a05, 0x80788178,
+ 0x907c9973, 0x877c817c,
+ 0xbf06817c, 0xbf850002,
+ 0x8f788978, 0xbf820001,
+ 0x8f788a78, 0xb9721e06,
+ 0x8f728a72, 0x80787278,
+ 0x8078ff78, 0x00000200,
+ 0x80f8ff78, 0x00000050,
+ 0xbef603ff, 0x01000000,
+ 0xbefc03ff, 0x0000006c,
+ 0x80f89078, 0xf429003a,
+ 0xf0000000, 0xbf8cc07f,
+ 0x80fc847c, 0xbf800000,
+ 0xbe803100, 0xbe823102,
+ 0x80f8a078, 0xf42d003a,
+ 0xf0000000, 0xbf8cc07f,
+ 0x80fc887c, 0xbf800000,
+ 0xbe803100, 0xbe823102,
+ 0xbe843104, 0xbe863106,
+ 0x80f8c078, 0xf431003a,
+ 0xf0000000, 0xbf8cc07f,
+ 0x80fc907c, 0xbf800000,
+ 0xbe803100, 0xbe823102,
+ 0xbe843104, 0xbe863106,
+ 0xbe883108, 0xbe8a310a,
+ 0xbe8c310c, 0xbe8e310e,
+ 0xbf06807c, 0xbf84fff0,
+ 0xb9782a05, 0x80788178,
+ 0x907c9973, 0x877c817c,
+ 0xbf06817c, 0xbf850002,
+ 0x8f788978, 0xbf820001,
+ 0x8f788a78, 0xb9721e06,
+ 0x8f728a72, 0x80787278,
+ 0x8078ff78, 0x00000200,
0xbef603ff, 0x01000000,
- 0x877c8179, 0xbf06817c,
- 0xbf850025, 0xf4211bfa,
- 0xf0000000, 0x8078ff78,
- 0x00000080, 0xf4211b3a,
- 0xf0000000, 0x8078ff78,
- 0x00000080, 0xf4211b7a,
- 0xf0000000, 0x8078ff78,
- 0x00000080, 0xf4211eba,
- 0xf0000000, 0x8078ff78,
- 0x00000080, 0xf4211efa,
- 0xf0000000, 0x8078ff78,
- 0x00000080, 0xf4211c3a,
- 0xf0000000, 0x8078ff78,
- 0x00000080, 0xf4211c7a,
- 0xf0000000, 0x8078ff78,
- 0x00000080, 0xf4211cfa,
- 0xf0000000, 0x8078ff78,
- 0x00000080, 0xf4211e7a,
- 0xf0000000, 0x8078ff78,
- 0x00000080, 0xbf820024,
0xf4211bfa, 0xf0000000,
- 0x8078ff78, 0x00000100,
- 0xf4211b3a, 0xf0000000,
- 0x8078ff78, 0x00000100,
+ 0x80788478, 0xf4211b3a,
+ 0xf0000000, 0x80788478,
0xf4211b7a, 0xf0000000,
- 0x8078ff78, 0x00000100,
- 0xf4211eba, 0xf0000000,
- 0x8078ff78, 0x00000100,
+ 0x80788478, 0xf4211eba,
+ 0xf0000000, 0x80788478,
0xf4211efa, 0xf0000000,
- 0x8078ff78, 0x00000100,
- 0xf4211c3a, 0xf0000000,
- 0x8078ff78, 0x00000100,
+ 0x80788478, 0xf4211c3a,
+ 0xf0000000, 0x80788478,
0xf4211c7a, 0xf0000000,
- 0x8078ff78, 0x00000100,
+ 0x80788478, 0xf4211e7a,
+ 0xf0000000, 0x80788478,
0xf4211cfa, 0xf0000000,
- 0x8078ff78, 0x00000100,
- 0xf4211e7a, 0xf0000000,
- 0x8078ff78, 0x00000100,
- 0xbf8cc07f, 0x876dff6d,
- 0x0000ffff, 0xbefc036f,
- 0xbefe037a, 0xbeff037b,
- 0x876f71ff, 0x000003ff,
- 0xb9ef4803, 0xb9f3f816,
- 0x876f71ff, 0xfffff800,
- 0x906f8b6f, 0xb9efa2c3,
- 0xb9f9f801, 0x876fff6d,
- 0xf0000000, 0x906f9c6f,
- 0x8f6f906f, 0xbef20380,
- 0x88726f72, 0x876fff6d,
- 0x08000000, 0x906f9b6f,
- 0x8f6f8f6f, 0x88726f72,
+ 0x80788478, 0xf4211bba,
+ 0xf0000000, 0x80788478,
+ 0xbf8cc07f, 0xb9eef814,
+ 0xf4211bba, 0xf0000000,
+ 0x80788478, 0xbf8cc07f,
+ 0xb9eef815, 0xbef2036d,
+ 0x876dff72, 0x0000ffff,
+ 0xbefc036f, 0xbefe037a,
+ 0xbeff037b, 0x876f71ff,
+ 0x000003ff, 0xb9ef4803,
+ 0xb9f9f816, 0x876f71ff,
+ 0xfffff800, 0x906f8b6f,
+ 0xb9efa2c3, 0xb9f3f801,
+ 0x876fff72, 0xfc000000,
+ 0x906f9a6f, 0x8f6f906f,
+ 0xbef30380, 0x88736f73,
+ 0x876fff72, 0x02000000,
+ 0x906f996f, 0x8f6f8f6f,
+ 0x88736f73, 0x876fff72,
+ 0x01000000, 0x906f986f,
+ 0x8f6f996f, 0x88736f73,
0x876fff70, 0x00800000,
- 0x906f976f, 0xb9f2f807,
+ 0x906f976f, 0xb9f3f807,
+ 0x87fe7e7e, 0x87ea6a6a,
0xb9f0f802, 0xbf8a0000,
0xbe80226c, 0xbf810000,
0xbf9f0000, 0xbf9f0000,
0xbf9f0000, 0xbf9f0000,
0xbf9f0000, 0x00000000,
};
+static const uint32_t cwsr_trap_arcturus_hex[] = {
+ 0xbf820001, 0xbf8202c4,
+ 0xb8f8f802, 0x89788678,
+ 0xb8eef801, 0x866eff6e,
+ 0x00000800, 0xbf840003,
+ 0x866eff78, 0x00002000,
+ 0xbf840016, 0xb8fbf803,
+ 0x866eff7b, 0x00000400,
+ 0xbf85003b, 0x866eff7b,
+ 0x00000800, 0xbf850003,
+ 0x866eff7b, 0x00000100,
+ 0xbf84000c, 0x866eff78,
+ 0x00002000, 0xbf840005,
+ 0xbf8e0010, 0xb8eef803,
+ 0x866eff6e, 0x00000400,
+ 0xbf84fffb, 0x8778ff78,
+ 0x00002000, 0x80ec886c,
+ 0x82ed806d, 0xb8eef807,
+ 0x866fff6e, 0x001f8000,
+ 0x8e6f8b6f, 0x8977ff77,
+ 0xfc000000, 0x87776f77,
+ 0x896eff6e, 0x001f8000,
+ 0xb96ef807, 0xb8faf812,
+ 0xb8fbf813, 0x8efa887a,
+ 0xc0071bbd, 0x00000000,
+ 0xbf8cc07f, 0xc0071ebd,
+ 0x00000008, 0xbf8cc07f,
+ 0x86ee6e6e, 0xbf840001,
+ 0xbe801d6e, 0xb8fbf803,
+ 0x867bff7b, 0x000001ff,
+ 0xbf850002, 0x806c846c,
+ 0x826d806d, 0x866dff6d,
+ 0x0000ffff, 0x8f6e8b77,
+ 0x866eff6e, 0x001f8000,
+ 0xb96ef807, 0x86fe7e7e,
+ 0x86ea6a6a, 0x8f6e8378,
+ 0xb96ee0c2, 0xbf800002,
+ 0xb9780002, 0xbe801f6c,
+ 0x866dff6d, 0x0000ffff,
+ 0xbefa0080, 0xb97a0283,
+ 0xb8fa2407, 0x8e7a9b7a,
+ 0x876d7a6d, 0xb8fa03c7,
+ 0x8e7a9a7a, 0x876d7a6d,
+ 0xb8faf807, 0x867aff7a,
+ 0x00007fff, 0xb97af807,
+ 0xbeee007e, 0xbeef007f,
+ 0xbefe0180, 0xbf900004,
+ 0x877a8478, 0xb97af802,
+ 0xbf8e0002, 0xbf88fffe,
+ 0xb8fa2a05, 0x807a817a,
+ 0x8e7a8a7a, 0x8e7a817a,
+ 0xb8fb1605, 0x807b817b,
+ 0x8e7b867b, 0x807a7b7a,
+ 0x807a7e7a, 0x827b807f,
+ 0x867bff7b, 0x0000ffff,
+ 0xc04b1c3d, 0x00000050,
+ 0xbf8cc07f, 0xc04b1d3d,
+ 0x00000060, 0xbf8cc07f,
+ 0xc0431e7d, 0x00000074,
+ 0xbf8cc07f, 0xbef4007e,
+ 0x8675ff7f, 0x0000ffff,
+ 0x8775ff75, 0x00040000,
+ 0xbef60080, 0xbef700ff,
+ 0x00807fac, 0x867aff7f,
+ 0x08000000, 0x8f7a837a,
+ 0x87777a77, 0x867aff7f,
+ 0x70000000, 0x8f7a817a,
+ 0x87777a77, 0xbef1007c,
+ 0xbef00080, 0xb8f02a05,
+ 0x80708170, 0x8e708a70,
+ 0x8e708170, 0xb8fa1605,
+ 0x807a817a, 0x8e7a867a,
+ 0x80707a70, 0xbef60084,
+ 0xbef600ff, 0x01000000,
+ 0xbefe007c, 0xbefc0070,
+ 0xc0611c7a, 0x0000007c,
+ 0xbf8cc07f, 0x80708470,
+ 0xbefc007e, 0xbefe007c,
+ 0xbefc0070, 0xc0611b3a,
+ 0x0000007c, 0xbf8cc07f,
+ 0x80708470, 0xbefc007e,
+ 0xbefe007c, 0xbefc0070,
+ 0xc0611b7a, 0x0000007c,
+ 0xbf8cc07f, 0x80708470,
+ 0xbefc007e, 0xbefe007c,
+ 0xbefc0070, 0xc0611bba,
+ 0x0000007c, 0xbf8cc07f,
+ 0x80708470, 0xbefc007e,
+ 0xbefe007c, 0xbefc0070,
+ 0xc0611bfa, 0x0000007c,
+ 0xbf8cc07f, 0x80708470,
+ 0xbefc007e, 0xbefe007c,
+ 0xbefc0070, 0xc0611e3a,
+ 0x0000007c, 0xbf8cc07f,
+ 0x80708470, 0xbefc007e,
+ 0xb8fbf803, 0xbefe007c,
+ 0xbefc0070, 0xc0611efa,
+ 0x0000007c, 0xbf8cc07f,
+ 0x80708470, 0xbefc007e,
+ 0xbefe007c, 0xbefc0070,
+ 0xc0611a3a, 0x0000007c,
+ 0xbf8cc07f, 0x80708470,
+ 0xbefc007e, 0xbefe007c,
+ 0xbefc0070, 0xc0611a7a,
+ 0x0000007c, 0xbf8cc07f,
+ 0x80708470, 0xbefc007e,
+ 0xb8f1f801, 0xbefe007c,
+ 0xbefc0070, 0xc0611c7a,
+ 0x0000007c, 0xbf8cc07f,
+ 0x80708470, 0xbefc007e,
+ 0x867aff7f, 0x04000000,
+ 0xbeef0080, 0x876f6f7a,
+ 0xb8f02a05, 0x80708170,
+ 0x8e708a70, 0x8e708170,
+ 0xb8fb1605, 0x807b817b,
+ 0x8e7b847b, 0x8e76827b,
+ 0xbef600ff, 0x01000000,
+ 0xbef20174, 0x80747074,
+ 0x82758075, 0xbefc0080,
+ 0xbf800000, 0xbe802b00,
+ 0xbe822b02, 0xbe842b04,
+ 0xbe862b06, 0xbe882b08,
+ 0xbe8a2b0a, 0xbe8c2b0c,
+ 0xbe8e2b0e, 0xc06b003a,
+ 0x00000000, 0xbf8cc07f,
+ 0xc06b013a, 0x00000010,
+ 0xbf8cc07f, 0xc06b023a,
+ 0x00000020, 0xbf8cc07f,
+ 0xc06b033a, 0x00000030,
+ 0xbf8cc07f, 0x8074c074,
+ 0x82758075, 0x807c907c,
+ 0xbf0a7b7c, 0xbf85ffe7,
+ 0xbef40172, 0xbef00080,
+ 0xbefe00c1, 0xbeff00c1,
+ 0xbee80080, 0xbee90080,
+ 0xbef600ff, 0x01000000,
+ 0x867aff78, 0x00400000,
+ 0xbf850003, 0xb8faf803,
+ 0x897a7aff, 0x10000000,
+ 0xbf85004d, 0xbe840080,
+ 0xd2890000, 0x00000900,
+ 0x80048104, 0xd2890001,
+ 0x00000900, 0x80048104,
+ 0xd2890002, 0x00000900,
+ 0x80048104, 0xd2890003,
+ 0x00000900, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0xbe840080, 0xd2890000,
+ 0x00000901, 0x80048104,
+ 0xd2890001, 0x00000901,
+ 0x80048104, 0xd2890002,
+ 0x00000901, 0x80048104,
+ 0xd2890003, 0x00000901,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000902,
+ 0x80048104, 0xd2890001,
+ 0x00000902, 0x80048104,
+ 0xd2890002, 0x00000902,
+ 0x80048104, 0xd2890003,
+ 0x00000902, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0xbe840080, 0xd2890000,
+ 0x00000903, 0x80048104,
+ 0xd2890001, 0x00000903,
+ 0x80048104, 0xd2890002,
+ 0x00000903, 0x80048104,
+ 0xd2890003, 0x00000903,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbf820008,
+ 0xe0724000, 0x701d0000,
+ 0xe0724100, 0x701d0100,
+ 0xe0724200, 0x701d0200,
+ 0xe0724300, 0x701d0300,
+ 0xbefe00c1, 0xbeff00c1,
+ 0xb8fb4306, 0x867bc17b,
+ 0xbf840064, 0xbf8a0000,
+ 0x867aff6f, 0x04000000,
+ 0xbf840060, 0x8e7b867b,
+ 0x8e7b827b, 0xbef6007b,
+ 0xb8f02a05, 0x80708170,
+ 0x8e708a70, 0x8e708170,
+ 0xb8fa1605, 0x807a817a,
+ 0x8e7a867a, 0x80707a70,
+ 0x8070ff70, 0x00000080,
+ 0xbef600ff, 0x01000000,
+ 0xbefc0080, 0xd28c0002,
+ 0x000100c1, 0xd28d0003,
+ 0x000204c1, 0x867aff78,
+ 0x00400000, 0xbf850003,
+ 0xb8faf803, 0x897a7aff,
+ 0x10000000, 0xbf850030,
+ 0x24040682, 0xd86e4000,
+ 0x00000002, 0xbf8cc07f,
+ 0xbe840080, 0xd2890000,
+ 0x00000900, 0x80048104,
+ 0xd2890001, 0x00000900,
+ 0x80048104, 0xd2890002,
+ 0x00000900, 0x80048104,
+ 0xd2890003, 0x00000900,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000901,
+ 0x80048104, 0xd2890001,
+ 0x00000901, 0x80048104,
+ 0xd2890002, 0x00000901,
+ 0x80048104, 0xd2890003,
+ 0x00000901, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0x680404ff, 0x00000200,
+ 0xd0c9006a, 0x0000f702,
+ 0xbf87ffd2, 0xbf820015,
+ 0xd1060002, 0x00011103,
+ 0x7e0602ff, 0x00000200,
+ 0xbefc00ff, 0x00010000,
+ 0xbe800077, 0x8677ff77,
+ 0xff7fffff, 0x8777ff77,
+ 0x00058000, 0xd8ec0000,
+ 0x00000002, 0xbf8cc07f,
+ 0xe0765000, 0x701d0002,
+ 0x68040702, 0xd0c9006a,
+ 0x0000f702, 0xbf87fff7,
+ 0xbef70000, 0xbef000ff,
+ 0x00000400, 0xbefe00c1,
+ 0xbeff00c1, 0xb8fb2a05,
+ 0x807b817b, 0x8e7b827b,
+ 0x8e76887b, 0xbef600ff,
+ 0x01000000, 0xbefc0084,
+ 0xbf0a7b7c, 0xbf84006d,
+ 0xbf11017c, 0x807bff7b,
+ 0x00001000, 0x867aff78,
+ 0x00400000, 0xbf850003,
+ 0xb8faf803, 0x897a7aff,
+ 0x10000000, 0xbf850051,
+ 0xbe840080, 0xd2890000,
+ 0x00000900, 0x80048104,
+ 0xd2890001, 0x00000900,
+ 0x80048104, 0xd2890002,
+ 0x00000900, 0x80048104,
+ 0xd2890003, 0x00000900,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000901,
+ 0x80048104, 0xd2890001,
+ 0x00000901, 0x80048104,
+ 0xd2890002, 0x00000901,
+ 0x80048104, 0xd2890003,
+ 0x00000901, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0xbe840080, 0xd2890000,
+ 0x00000902, 0x80048104,
+ 0xd2890001, 0x00000902,
+ 0x80048104, 0xd2890002,
+ 0x00000902, 0x80048104,
+ 0xd2890003, 0x00000902,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000903,
+ 0x80048104, 0xd2890001,
+ 0x00000903, 0x80048104,
+ 0xd2890002, 0x00000903,
+ 0x80048104, 0xd2890003,
+ 0x00000903, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0x807c847c, 0xbf0a7b7c,
+ 0xbf85ffb1, 0xbf9c0000,
+ 0xbf820012, 0x7e000300,
+ 0x7e020301, 0x7e040302,
+ 0x7e060303, 0xe0724000,
+ 0x701d0000, 0xe0724100,
+ 0x701d0100, 0xe0724200,
+ 0x701d0200, 0xe0724300,
+ 0x701d0300, 0x807c847c,
+ 0x8070ff70, 0x00000400,
+ 0xbf0a7b7c, 0xbf85ffef,
+ 0xbf9c0000, 0xbefc0080,
+ 0xbf11017c, 0x867aff78,
+ 0x00400000, 0xbf850003,
+ 0xb8faf803, 0x897a7aff,
+ 0x10000000, 0xbf850059,
+ 0xd3d84000, 0x18000100,
+ 0xd3d84001, 0x18000101,
+ 0xd3d84002, 0x18000102,
+ 0xd3d84003, 0x18000103,
+ 0xbe840080, 0xd2890000,
+ 0x00000900, 0x80048104,
+ 0xd2890001, 0x00000900,
+ 0x80048104, 0xd2890002,
+ 0x00000900, 0x80048104,
+ 0xd2890003, 0x00000900,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000901,
+ 0x80048104, 0xd2890001,
+ 0x00000901, 0x80048104,
+ 0xd2890002, 0x00000901,
+ 0x80048104, 0xd2890003,
+ 0x00000901, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0xbe840080, 0xd2890000,
+ 0x00000902, 0x80048104,
+ 0xd2890001, 0x00000902,
+ 0x80048104, 0xd2890002,
+ 0x00000902, 0x80048104,
+ 0xd2890003, 0x00000902,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000903,
+ 0x80048104, 0xd2890001,
+ 0x00000903, 0x80048104,
+ 0xd2890002, 0x00000903,
+ 0x80048104, 0xd2890003,
+ 0x00000903, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0x807c847c, 0xbf0a7b7c,
+ 0xbf85ffa9, 0xbf9c0000,
+ 0xbf820016, 0xd3d84000,
+ 0x18000100, 0xd3d84001,
+ 0x18000101, 0xd3d84002,
+ 0x18000102, 0xd3d84003,
+ 0x18000103, 0xe0724000,
+ 0x701d0000, 0xe0724100,
+ 0x701d0100, 0xe0724200,
+ 0x701d0200, 0xe0724300,
+ 0x701d0300, 0x807c847c,
+ 0x8070ff70, 0x00000400,
+ 0xbf0a7b7c, 0xbf85ffeb,
+ 0xbf9c0000, 0xbf820106,
+ 0xbef4007e, 0x8675ff7f,
+ 0x0000ffff, 0x8775ff75,
+ 0x00040000, 0xbef60080,
+ 0xbef700ff, 0x00807fac,
+ 0x866eff7f, 0x08000000,
+ 0x8f6e836e, 0x87776e77,
+ 0x866eff7f, 0x70000000,
+ 0x8f6e816e, 0x87776e77,
+ 0x866eff7f, 0x04000000,
+ 0xbf84001f, 0xbefe00c1,
+ 0xbeff00c1, 0xb8ef4306,
+ 0x866fc16f, 0xbf84001a,
+ 0x8e6f866f, 0x8e6f826f,
+ 0xbef6006f, 0xb8f82a05,
+ 0x80788178, 0x8e788a78,
+ 0x8e788178, 0xb8ee1605,
+ 0x806e816e, 0x8e6e866e,
+ 0x80786e78, 0x8078ff78,
+ 0x00000080, 0xbef600ff,
+ 0x01000000, 0xbefc0080,
+ 0xe0510000, 0x781d0000,
+ 0xe0510100, 0x781d0000,
+ 0x807cff7c, 0x00000200,
+ 0x8078ff78, 0x00000200,
+ 0xbf0a6f7c, 0xbf85fff6,
+ 0xbef80080, 0xbefe00c1,
+ 0xbeff00c1, 0xb8ef2a05,
+ 0x806f816f, 0x8e6f826f,
+ 0x8e76886f, 0xbef90076,
+ 0xbef600ff, 0x01000000,
+ 0xbeee0078, 0x8078ff78,
+ 0x00000400, 0xbef30079,
+ 0x8079ff79, 0x00000400,
+ 0xbefc0084, 0xbf11087c,
+ 0x806fff6f, 0x00008000,
+ 0xe0524000, 0x791d0000,
+ 0xe0524100, 0x791d0100,
+ 0xe0524200, 0x791d0200,
+ 0xe0524300, 0x791d0300,
+ 0x8079ff79, 0x00000400,
+ 0xbf8c0f70, 0xd3d94000,
+ 0x18000100, 0xd3d94001,
+ 0x18000101, 0xd3d94002,
+ 0x18000102, 0xd3d94003,
+ 0x18000103, 0xe0524000,
+ 0x781d0000, 0xe0524100,
+ 0x781d0100, 0xe0524200,
+ 0x781d0200, 0xe0524300,
+ 0x781d0300, 0xbf8c0f70,
+ 0x7e000300, 0x7e020301,
+ 0x7e040302, 0x7e060303,
+ 0x807c847c, 0x8078ff78,
+ 0x00000400, 0xbf0a6f7c,
+ 0xbf85ffdb, 0xbf9c0000,
+ 0xe0524000, 0x731d0000,
+ 0xe0524100, 0x731d0100,
+ 0xe0524200, 0x731d0200,
+ 0xe0524300, 0x731d0300,
+ 0xbf8c0f70, 0xd3d94000,
+ 0x18000100, 0xd3d94001,
+ 0x18000101, 0xd3d94002,
+ 0x18000102, 0xd3d94003,
+ 0x18000103, 0xe0524000,
+ 0x6e1d0000, 0xe0524100,
+ 0x6e1d0100, 0xe0524200,
+ 0x6e1d0200, 0xe0524300,
+ 0x6e1d0300, 0xb8f82a05,
+ 0x80788178, 0x8e788a78,
+ 0x8e788178, 0xb8ee1605,
+ 0x806e816e, 0x8e6e866e,
+ 0x80786e78, 0x80f8c078,
+ 0xb8ef1605, 0x806f816f,
+ 0x8e6f846f, 0x8e76826f,
+ 0xbef600ff, 0x01000000,
+ 0xbefc006f, 0xc031003a,
+ 0x00000078, 0x80f8c078,
+ 0xbf8cc07f, 0x80fc907c,
+ 0xbf800000, 0xbe802d00,
+ 0xbe822d02, 0xbe842d04,
+ 0xbe862d06, 0xbe882d08,
+ 0xbe8a2d0a, 0xbe8c2d0c,
+ 0xbe8e2d0e, 0xbf06807c,
+ 0xbf84fff0, 0xb8f82a05,
+ 0x80788178, 0x8e788a78,
+ 0x8e788178, 0xb8ee1605,
+ 0x806e816e, 0x8e6e866e,
+ 0x80786e78, 0xbef60084,
+ 0xbef600ff, 0x01000000,
+ 0xc0211bfa, 0x00000078,
+ 0x80788478, 0xc0211b3a,
+ 0x00000078, 0x80788478,
+ 0xc0211b7a, 0x00000078,
+ 0x80788478, 0xc0211c3a,
+ 0x00000078, 0x80788478,
+ 0xc0211c7a, 0x00000078,
+ 0x80788478, 0xc0211eba,
+ 0x00000078, 0x80788478,
+ 0xc0211efa, 0x00000078,
+ 0x80788478, 0xc0211a3a,
+ 0x00000078, 0x80788478,
+ 0xc0211a7a, 0x00000078,
+ 0x80788478, 0xc0211cfa,
+ 0x00000078, 0x80788478,
+ 0xbf8cc07f, 0xbefc006f,
+ 0xbefe0070, 0xbeff0071,
+ 0x866f7bff, 0x000003ff,
+ 0xb96f4803, 0x866f7bff,
+ 0xfffff800, 0x8f6f8b6f,
+ 0xb96fa2c3, 0xb973f801,
+ 0xb8ee2a05, 0x806e816e,
+ 0x8e6e8a6e, 0x8e6e816e,
+ 0xb8ef1605, 0x806f816f,
+ 0x8e6f866f, 0x806e6f6e,
+ 0x806e746e, 0x826f8075,
+ 0x866fff6f, 0x0000ffff,
+ 0xc00b1c37, 0x00000050,
+ 0xc00b1d37, 0x00000060,
+ 0xc0031e77, 0x00000074,
+ 0xbf8cc07f, 0x866fff6d,
+ 0xf8000000, 0x8f6f9b6f,
+ 0x8e6f906f, 0xbeee0080,
+ 0x876e6f6e, 0x866fff6d,
+ 0x04000000, 0x8f6f9a6f,
+ 0x8e6f8f6f, 0x876e6f6e,
+ 0x866fff7a, 0x00800000,
+ 0x8f6f976f, 0xb96ef807,
+ 0x866dff6d, 0x0000ffff,
+ 0x86fe7e7e, 0x86ea6a6a,
+ 0x8f6e837a, 0xb96ee0c2,
+ 0xbf800002, 0xb97a0002,
+ 0xbf8a0000, 0x95806f6c,
+ 0xbf810000, 0x00000000,
+};
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
index f20e463e748b..4433bda2ce25 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
@@ -20,1105 +20,948 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+var SQ_WAVE_STATUS_INST_ATC_SHIFT = 23
+var SQ_WAVE_STATUS_INST_ATC_MASK = 0x00800000
+var SQ_WAVE_STATUS_SPI_PRIO_MASK = 0x00000006
+var SQ_WAVE_STATUS_HALT_MASK = 0x2000
+
+var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT = 12
+var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE = 9
+var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT = 8
+var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE = 6
+var SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SHIFT = 24
+var SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SIZE = 4
+var SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SHIFT = 24
+var SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SIZE = 4
+var SQ_WAVE_IB_STS2_WAVE64_SHIFT = 11
+var SQ_WAVE_IB_STS2_WAVE64_SIZE = 1
+
+var SQ_WAVE_TRAPSTS_SAVECTX_MASK = 0x400
+var SQ_WAVE_TRAPSTS_EXCE_MASK = 0x1FF
+var SQ_WAVE_TRAPSTS_SAVECTX_SHIFT = 10
+var SQ_WAVE_TRAPSTS_MEM_VIOL_MASK = 0x100
+var SQ_WAVE_TRAPSTS_MEM_VIOL_SHIFT = 8
+var SQ_WAVE_TRAPSTS_PRE_SAVECTX_MASK = 0x3FF
+var SQ_WAVE_TRAPSTS_PRE_SAVECTX_SHIFT = 0x0
+var SQ_WAVE_TRAPSTS_PRE_SAVECTX_SIZE = 10
+var SQ_WAVE_TRAPSTS_POST_SAVECTX_MASK = 0xFFFFF800
+var SQ_WAVE_TRAPSTS_POST_SAVECTX_SHIFT = 11
+var SQ_WAVE_TRAPSTS_POST_SAVECTX_SIZE = 21
+var SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK = 0x800
+
+var SQ_WAVE_IB_STS_RCNT_SHIFT = 16
+var SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT = 15
+var SQ_WAVE_IB_STS_REPLAY_W64H_SHIFT = 25
+var SQ_WAVE_IB_STS_REPLAY_W64H_SIZE = 1
+var SQ_WAVE_IB_STS_REPLAY_W64H_MASK = 0x02000000
+var SQ_WAVE_IB_STS_FIRST_REPLAY_SIZE = 1
+var SQ_WAVE_IB_STS_RCNT_SIZE = 6
+var SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK = 0x003F8000
+var SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK_NEG = 0x00007FFF
+
+var SQ_BUF_RSRC_WORD1_ATC_SHIFT = 24
+var SQ_BUF_RSRC_WORD3_MTYPE_SHIFT = 27
+
+// bits [31:24] unused by SPI debug data
+var TTMP11_SAVE_REPLAY_W64H_SHIFT = 31
+var TTMP11_SAVE_REPLAY_W64H_MASK = 0x80000000
+var TTMP11_SAVE_RCNT_FIRST_REPLAY_SHIFT = 24
+var TTMP11_SAVE_RCNT_FIRST_REPLAY_MASK = 0x7F000000
+
+// SQ_SEL_X/Y/Z/W, BUF_NUM_FORMAT_FLOAT, (0 for MUBUF stride[17:14]
+// when ADD_TID_ENABLE and BUF_DATA_FORMAT_32 for MTBUF), ADD_TID_ENABLE
+var S_SAVE_BUF_RSRC_WORD1_STRIDE = 0x00040000
+var S_SAVE_BUF_RSRC_WORD3_MISC = 0x10807FAC
+
+var S_SAVE_SPI_INIT_ATC_MASK = 0x08000000
+var S_SAVE_SPI_INIT_ATC_SHIFT = 27
+var S_SAVE_SPI_INIT_MTYPE_MASK = 0x70000000
+var S_SAVE_SPI_INIT_MTYPE_SHIFT = 28
+var S_SAVE_SPI_INIT_FIRST_WAVE_MASK = 0x04000000
+var S_SAVE_SPI_INIT_FIRST_WAVE_SHIFT = 26
+
+var S_SAVE_PC_HI_RCNT_SHIFT = 26
+var S_SAVE_PC_HI_RCNT_MASK = 0xFC000000
+var S_SAVE_PC_HI_FIRST_REPLAY_SHIFT = 25
+var S_SAVE_PC_HI_FIRST_REPLAY_MASK = 0x02000000
+var S_SAVE_PC_HI_REPLAY_W64H_SHIFT = 24
+var S_SAVE_PC_HI_REPLAY_W64H_MASK = 0x01000000
+
+var s_sgpr_save_num = 108
+
+var s_save_spi_init_lo = exec_lo
+var s_save_spi_init_hi = exec_hi
+var s_save_pc_lo = ttmp0
+var s_save_pc_hi = ttmp1
+var s_save_exec_lo = ttmp2
+var s_save_exec_hi = ttmp3
+var s_save_status = ttmp12
+var s_save_trapsts = ttmp5
+var s_save_xnack_mask = ttmp6
+var s_wave_size = ttmp7
+var s_save_buf_rsrc0 = ttmp8
+var s_save_buf_rsrc1 = ttmp9
+var s_save_buf_rsrc2 = ttmp10
+var s_save_buf_rsrc3 = ttmp11
+var s_save_mem_offset = ttmp14
+var s_save_alloc_size = s_save_trapsts
+var s_save_tmp = s_save_buf_rsrc2
+var s_save_m0 = ttmp15
+
+var S_RESTORE_BUF_RSRC_WORD1_STRIDE = S_SAVE_BUF_RSRC_WORD1_STRIDE
+var S_RESTORE_BUF_RSRC_WORD3_MISC = S_SAVE_BUF_RSRC_WORD3_MISC
+
+var S_RESTORE_SPI_INIT_ATC_MASK = 0x08000000
+var S_RESTORE_SPI_INIT_ATC_SHIFT = 27
+var S_RESTORE_SPI_INIT_MTYPE_MASK = 0x70000000
+var S_RESTORE_SPI_INIT_MTYPE_SHIFT = 28
+var S_RESTORE_SPI_INIT_FIRST_WAVE_MASK = 0x04000000
+var S_RESTORE_SPI_INIT_FIRST_WAVE_SHIFT = 26
+var S_WAVE_SIZE = 25
+
+var S_RESTORE_PC_HI_RCNT_SHIFT = S_SAVE_PC_HI_RCNT_SHIFT
+var S_RESTORE_PC_HI_RCNT_MASK = S_SAVE_PC_HI_RCNT_MASK
+var S_RESTORE_PC_HI_FIRST_REPLAY_SHIFT = S_SAVE_PC_HI_FIRST_REPLAY_SHIFT
+var S_RESTORE_PC_HI_FIRST_REPLAY_MASK = S_SAVE_PC_HI_FIRST_REPLAY_MASK
+
+var s_restore_spi_init_lo = exec_lo
+var s_restore_spi_init_hi = exec_hi
+var s_restore_mem_offset = ttmp12
+var s_restore_alloc_size = ttmp3
+var s_restore_tmp = ttmp6
+var s_restore_mem_offset_save = s_restore_tmp
+var s_restore_m0 = s_restore_alloc_size
+var s_restore_mode = ttmp7
+var s_restore_flat_scratch = ttmp2
+var s_restore_pc_lo = ttmp0
+var s_restore_pc_hi = ttmp1
+var s_restore_exec_lo = ttmp14
+var s_restore_exec_hi = ttmp15
+var s_restore_status = ttmp4
+var s_restore_trapsts = ttmp5
+var s_restore_xnack_mask = ttmp13
+var s_restore_buf_rsrc0 = ttmp8
+var s_restore_buf_rsrc1 = ttmp9
+var s_restore_buf_rsrc2 = ttmp10
+var s_restore_buf_rsrc3 = ttmp11
+var s_restore_size = ttmp7
shader main
+ asic(DEFAULT)
+ type(CS)
+ wave_size(32)
-asic(DEFAULT)
-
-type(CS)
-
-wave_size(32)
-/*************************************************************************/
-/* control on how to run the shader */
-/*************************************************************************/
-//any hack that needs to be made to run this code in EMU (either becasue various EMU code are not ready or no compute save & restore in EMU run)
-var EMU_RUN_HACK = 0
-var EMU_RUN_HACK_RESTORE_NORMAL = 0
-var EMU_RUN_HACK_SAVE_NORMAL_EXIT = 0
-var EMU_RUN_HACK_SAVE_SINGLE_WAVE = 0
-var EMU_RUN_HACK_SAVE_FIRST_TIME = 0 //for interrupted restore in which the first save is through EMU_RUN_HACK
-var SAVE_LDS = 0
-var WG_BASE_ADDR_LO = 0x9000a000
-var WG_BASE_ADDR_HI = 0x0
-var WAVE_SPACE = 0x9000 //memory size that each wave occupies in workgroup state mem, increase from 5000 to 9000 for more SGPR need to be saved
-var CTX_SAVE_CONTROL = 0x0
-var CTX_RESTORE_CONTROL = CTX_SAVE_CONTROL
-var SIM_RUN_HACK = 0 //any hack that needs to be made to run this code in SIM (either becasue various RTL code are not ready or no compute save & restore in RTL run)
-var SGPR_SAVE_USE_SQC = 0 //use SQC D$ to do the write
-var USE_MTBUF_INSTEAD_OF_MUBUF = 0 //need to change BUF_DATA_FORMAT in S_SAVE_BUF_RSRC_WORD3_MISC from 0 to BUF_DATA_FORMAT_32 if set to 1 (i.e. 0x00827FAC)
-var SWIZZLE_EN = 0 //whether we use swizzled buffer addressing
-var SAVE_RESTORE_HWID_DDID = 0
-var RESTORE_DDID_IN_SGPR18 = 0
-/**************************************************************************/
-/* variables */
-/**************************************************************************/
-var SQ_WAVE_STATUS_INST_ATC_SHIFT = 23
-var SQ_WAVE_STATUS_INST_ATC_MASK = 0x00800000
-var SQ_WAVE_STATUS_SPI_PRIO_MASK = 0x00000006
-
-var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT = 12
-var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE = 9
-var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT = 8
-var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE = 6
-var SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SHIFT = 24
-var SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SIZE = 4 //FIXME sq.blk still has 4 bits at this time while SQ programming guide has 3 bits
-var SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SHIFT = 24
-var SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SIZE = 4
-var SQ_WAVE_IB_STS2_WAVE64_SHIFT = 11
-var SQ_WAVE_IB_STS2_WAVE64_SIZE = 1
-
-var SQ_WAVE_TRAPSTS_SAVECTX_MASK = 0x400
-var SQ_WAVE_TRAPSTS_EXCE_MASK = 0x1FF // Exception mask
-var SQ_WAVE_TRAPSTS_SAVECTX_SHIFT = 10
-var SQ_WAVE_TRAPSTS_MEM_VIOL_MASK = 0x100
-var SQ_WAVE_TRAPSTS_MEM_VIOL_SHIFT = 8
-var SQ_WAVE_TRAPSTS_PRE_SAVECTX_MASK = 0x3FF
-var SQ_WAVE_TRAPSTS_PRE_SAVECTX_SHIFT = 0x0
-var SQ_WAVE_TRAPSTS_PRE_SAVECTX_SIZE = 10
-var SQ_WAVE_TRAPSTS_POST_SAVECTX_MASK = 0xFFFFF800
-var SQ_WAVE_TRAPSTS_POST_SAVECTX_SHIFT = 11
-var SQ_WAVE_TRAPSTS_POST_SAVECTX_SIZE = 21
-
-var SQ_WAVE_IB_STS_RCNT_SHIFT = 16 //FIXME
-var SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT = 15 //FIXME
-var SQ_WAVE_IB_STS_FIRST_REPLAY_SIZE = 1 //FIXME
-var SQ_WAVE_IB_STS_RCNT_SIZE = 6 //FIXME
-var SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK_NEG = 0x00007FFF //FIXME
-
-var SQ_BUF_RSRC_WORD1_ATC_SHIFT = 24
-var SQ_BUF_RSRC_WORD3_MTYPE_SHIFT = 27
-
-
-/* Save */
-var S_SAVE_BUF_RSRC_WORD1_STRIDE = 0x00040000 //stride is 4 bytes
-var S_SAVE_BUF_RSRC_WORD3_MISC = 0x00807FAC //SQ_SEL_X/Y/Z/W, BUF_NUM_FORMAT_FLOAT, (0 for MUBUF stride[17:14] when ADD_TID_ENABLE and BUF_DATA_FORMAT_32 for MTBUF), ADD_TID_ENABLE
-
-var S_SAVE_SPI_INIT_ATC_MASK = 0x08000000 //bit[27]: ATC bit
-var S_SAVE_SPI_INIT_ATC_SHIFT = 27
-var S_SAVE_SPI_INIT_MTYPE_MASK = 0x70000000 //bit[30:28]: Mtype
-var S_SAVE_SPI_INIT_MTYPE_SHIFT = 28
-var S_SAVE_SPI_INIT_FIRST_WAVE_MASK = 0x04000000 //bit[26]: FirstWaveInTG
-var S_SAVE_SPI_INIT_FIRST_WAVE_SHIFT = 26
-
-var S_SAVE_PC_HI_RCNT_SHIFT = 28 //FIXME check with Brian to ensure all fields other than PC[47:0] can be used
-var S_SAVE_PC_HI_RCNT_MASK = 0xF0000000 //FIXME
-var S_SAVE_PC_HI_FIRST_REPLAY_SHIFT = 27 //FIXME
-var S_SAVE_PC_HI_FIRST_REPLAY_MASK = 0x08000000 //FIXME
-
-var s_save_spi_init_lo = exec_lo
-var s_save_spi_init_hi = exec_hi
-
-var s_save_pc_lo = ttmp0 //{TTMP1, TTMP0} = {3ˇŻh0,pc_rewind[3:0], HT[0],trapID[7:0], PC[47:0]}
-var s_save_pc_hi = ttmp1
-var s_save_exec_lo = ttmp2
-var s_save_exec_hi = ttmp3
-var s_save_status = ttmp4
-var s_save_trapsts = ttmp5 //not really used until the end of the SAVE routine
-var s_wave_size = ttmp6 //ttmp6 is not needed now, since it's only 32bit xnack mask, now use it to determine wave32 or wave64 in EMU_HACK
-var s_save_xnack_mask = ttmp7
-var s_save_buf_rsrc0 = ttmp8
-var s_save_buf_rsrc1 = ttmp9
-var s_save_buf_rsrc2 = ttmp10
-var s_save_buf_rsrc3 = ttmp11
-
-var s_save_mem_offset = ttmp14
-var s_sgpr_save_num = 106 //in gfx10, all sgpr must be saved
-var s_save_alloc_size = s_save_trapsts //conflict
-var s_save_tmp = s_save_buf_rsrc2 //shared with s_save_buf_rsrc2 (conflict: should not use mem access with s_save_tmp at the same time)
-var s_save_m0 = ttmp15
-
-/* Restore */
-var S_RESTORE_BUF_RSRC_WORD1_STRIDE = S_SAVE_BUF_RSRC_WORD1_STRIDE
-var S_RESTORE_BUF_RSRC_WORD3_MISC = S_SAVE_BUF_RSRC_WORD3_MISC
-
-var S_RESTORE_SPI_INIT_ATC_MASK = 0x08000000 //bit[27]: ATC bit
-var S_RESTORE_SPI_INIT_ATC_SHIFT = 27
-var S_RESTORE_SPI_INIT_MTYPE_MASK = 0x70000000 //bit[30:28]: Mtype
-var S_RESTORE_SPI_INIT_MTYPE_SHIFT = 28
-var S_RESTORE_SPI_INIT_FIRST_WAVE_MASK = 0x04000000 //bit[26]: FirstWaveInTG
-var S_RESTORE_SPI_INIT_FIRST_WAVE_SHIFT = 26
-
-var S_RESTORE_PC_HI_RCNT_SHIFT = S_SAVE_PC_HI_RCNT_SHIFT
-var S_RESTORE_PC_HI_RCNT_MASK = S_SAVE_PC_HI_RCNT_MASK
-var S_RESTORE_PC_HI_FIRST_REPLAY_SHIFT = S_SAVE_PC_HI_FIRST_REPLAY_SHIFT
-var S_RESTORE_PC_HI_FIRST_REPLAY_MASK = S_SAVE_PC_HI_FIRST_REPLAY_MASK
-
-var s_restore_spi_init_lo = exec_lo
-var s_restore_spi_init_hi = exec_hi
-
-var s_restore_mem_offset = ttmp12
-var s_restore_alloc_size = ttmp3
-var s_restore_tmp = ttmp6
-var s_restore_mem_offset_save = s_restore_tmp //no conflict
-
-var s_restore_m0 = s_restore_alloc_size //no conflict
-
-var s_restore_mode = ttmp13
-var s_restore_hwid1 = ttmp2
-var s_restore_ddid = s_restore_hwid1
-var s_restore_pc_lo = ttmp0
-var s_restore_pc_hi = ttmp1
-var s_restore_exec_lo = ttmp14
-var s_restore_exec_hi = ttmp15
-var s_restore_status = ttmp4
-var s_restore_trapsts = ttmp5
-//var s_restore_xnack_mask_lo = xnack_mask_lo
-//var s_restore_xnack_mask_hi = xnack_mask_hi
-var s_restore_xnack_mask = ttmp7
-var s_restore_buf_rsrc0 = ttmp8
-var s_restore_buf_rsrc1 = ttmp9
-var s_restore_buf_rsrc2 = ttmp10
-var s_restore_buf_rsrc3 = ttmp11
-var s_restore_size = ttmp13 //ttmp13 has no conflict
-
-/**************************************************************************/
-/* trap handler entry points */
-/**************************************************************************/
- if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL)) //hack to use trap_id for determining save/restore
- //FIXME VCCZ un-init assertion s_getreg_b32 s_save_status, hwreg(HW_REG_STATUS) //save STATUS since we will change SCC
- s_and_b32 s_save_tmp, s_save_pc_hi, 0xffff0000 //change SCC
- s_cmp_eq_u32 s_save_tmp, 0x007e0000 //Save: trap_id = 0x7e. Restore: trap_id = 0x7f.
- s_cbranch_scc0 L_JUMP_TO_RESTORE //do not need to recover STATUS here since we are going to RESTORE
- //FIXME s_setreg_b32 hwreg(HW_REG_STATUS), s_save_status //need to recover STATUS since we are going to SAVE
- s_branch L_SKIP_RESTORE //NOT restore, SAVE actually
- else
- s_branch L_SKIP_RESTORE //NOT restore. might be a regular trap or save
- end
+ s_branch L_SKIP_RESTORE //NOT restore. might be a regular trap or save
L_JUMP_TO_RESTORE:
- s_branch L_RESTORE //restore
+ s_branch L_RESTORE
L_SKIP_RESTORE:
-
- s_getreg_b32 s_save_status, hwreg(HW_REG_STATUS) //save STATUS since we will change SCC
- s_andn2_b32 s_save_status, s_save_status, SQ_WAVE_STATUS_SPI_PRIO_MASK //check whether this is for save
- s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS)
- s_and_b32 s_save_trapsts, s_save_trapsts, SQ_WAVE_TRAPSTS_SAVECTX_MASK //check whether this is for save
- s_cbranch_scc1 L_SAVE //this is the operation for save
-
- // ********* Handle non-CWSR traps *******************
- if (!EMU_RUN_HACK)
- s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS)
- s_and_b32 s_save_trapsts, s_save_trapsts, SQ_WAVE_TRAPSTS_EXCE_MASK // Check whether it is an exception
- s_cbranch_scc1 L_EXCP_CASE // Exception, jump back to the shader program directly.
- s_add_u32 ttmp0, ttmp0, 4 // S_TRAP case, add 4 to ttmp0
-
- L_EXCP_CASE:
- s_and_b32 ttmp1, ttmp1, 0xFFFF
- s_rfe_b64 [ttmp0, ttmp1]
- end
- // ********* End handling of non-CWSR traps *******************
-
-/**************************************************************************/
-/* save routine */
-/**************************************************************************/
-
-L_SAVE:
-
+ s_getreg_b32 s_save_status, hwreg(HW_REG_STATUS) //save STATUS since we will change SCC
+ s_andn2_b32 s_save_status, s_save_status, SQ_WAVE_STATUS_SPI_PRIO_MASK
+ s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS)
+ s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_SAVECTX_MASK //check whether this is for save
+ s_cbranch_scc1 L_SAVE
+
+ // If STATUS.MEM_VIOL is asserted then halt the wave to prevent
+ // the exception raising again and blocking context save.
+ s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_MEM_VIOL_MASK
+ s_cbranch_scc0 L_FETCH_2ND_TRAP
+ s_or_b32 s_save_status, s_save_status, SQ_WAVE_STATUS_HALT_MASK
+
+L_FETCH_2ND_TRAP:
+ // Preserve and clear scalar XNACK state before issuing scalar loads.
+ // Save IB_STS.REPLAY_W64H[25], RCNT[21:16], FIRST_REPLAY[15] into
+ // unused space ttmp11[31:24].
+ s_andn2_b32 ttmp11, ttmp11, (TTMP11_SAVE_REPLAY_W64H_MASK | TTMP11_SAVE_RCNT_FIRST_REPLAY_MASK)
+ s_getreg_b32 ttmp2, hwreg(HW_REG_IB_STS)
+ s_and_b32 ttmp3, ttmp2, SQ_WAVE_IB_STS_REPLAY_W64H_MASK
+ s_lshl_b32 ttmp3, ttmp3, (TTMP11_SAVE_REPLAY_W64H_SHIFT - SQ_WAVE_IB_STS_REPLAY_W64H_SHIFT)
+ s_or_b32 ttmp11, ttmp11, ttmp3
+ s_and_b32 ttmp3, ttmp2, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK
+ s_lshl_b32 ttmp3, ttmp3, (TTMP11_SAVE_RCNT_FIRST_REPLAY_SHIFT - SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT)
+ s_or_b32 ttmp11, ttmp11, ttmp3
+ s_andn2_b32 ttmp2, ttmp2, (SQ_WAVE_IB_STS_REPLAY_W64H_MASK | SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK)
+ s_setreg_b32 hwreg(HW_REG_IB_STS), ttmp2
+
+ // Read second-level TBA/TMA from first-level TMA and jump if available.
+ // ttmp[2:5] and ttmp12 can be used (others hold SPI-initialized debug data)
+ // ttmp12 holds SQ_WAVE_STATUS
+ s_getreg_b32 ttmp14, hwreg(HW_REG_SHADER_TMA_LO)
+ s_getreg_b32 ttmp15, hwreg(HW_REG_SHADER_TMA_HI)
+ s_lshl_b64 [ttmp14, ttmp15], [ttmp14, ttmp15], 0x8
+ s_load_dwordx2 [ttmp2, ttmp3], [ttmp14, ttmp15], 0x0 glc:1 // second-level TBA
+ s_waitcnt lgkmcnt(0)
+ s_load_dwordx2 [ttmp14, ttmp15], [ttmp14, ttmp15], 0x8 glc:1 // second-level TMA
+ s_waitcnt lgkmcnt(0)
+ s_and_b64 [ttmp2, ttmp3], [ttmp2, ttmp3], [ttmp2, ttmp3]
+ s_cbranch_scc0 L_NO_NEXT_TRAP // second-level trap handler not been set
+ s_setpc_b64 [ttmp2, ttmp3] // jump to second-level trap handler
+
+L_NO_NEXT_TRAP:
+ s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS)
+ s_and_b32 s_save_trapsts, s_save_trapsts, SQ_WAVE_TRAPSTS_EXCE_MASK
+ s_cbranch_scc1 L_EXCP_CASE // Exception, jump back to the shader program directly.
+ s_add_u32 ttmp0, ttmp0, 4 // S_TRAP case, add 4 to ttmp0
+ s_addc_u32 ttmp1, ttmp1, 0
+L_EXCP_CASE:
+ s_and_b32 ttmp1, ttmp1, 0xFFFF
+
+ // Restore SQ_WAVE_IB_STS.
+ s_lshr_b32 ttmp2, ttmp11, (TTMP11_SAVE_RCNT_FIRST_REPLAY_SHIFT - SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT)
+ s_and_b32 ttmp3, ttmp2, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK
+ s_lshr_b32 ttmp2, ttmp11, (TTMP11_SAVE_REPLAY_W64H_SHIFT - SQ_WAVE_IB_STS_REPLAY_W64H_SHIFT)
+ s_and_b32 ttmp2, ttmp2, SQ_WAVE_IB_STS_REPLAY_W64H_MASK
+ s_or_b32 ttmp2, ttmp2, ttmp3
+ s_setreg_b32 hwreg(HW_REG_IB_STS), ttmp2
+
+ // Restore SQ_WAVE_STATUS.
+ s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32
+ s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32
+ s_setreg_b32 hwreg(HW_REG_STATUS), s_save_status
+
+ s_rfe_b64 [ttmp0, ttmp1]
+
+L_SAVE:
//check whether there is mem_viol
- s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS)
- s_and_b32 s_save_trapsts, s_save_trapsts, SQ_WAVE_TRAPSTS_MEM_VIOL_MASK
+ s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS)
+ s_and_b32 s_save_trapsts, s_save_trapsts, SQ_WAVE_TRAPSTS_MEM_VIOL_MASK
s_cbranch_scc0 L_NO_PC_REWIND
-
+
//if so, need rewind PC assuming GDS operation gets NACKed
- s_mov_b32 s_save_tmp, 0 //clear mem_viol bit
- s_setreg_b32 hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_MEM_VIOL_SHIFT, 1), s_save_tmp //clear mem_viol bit
- s_and_b32 s_save_pc_hi, s_save_pc_hi, 0x0000ffff //pc[47:32]
- s_sub_u32 s_save_pc_lo, s_save_pc_lo, 8 //pc[31:0]-8
- s_subb_u32 s_save_pc_hi, s_save_pc_hi, 0x0 // -scc
+ s_mov_b32 s_save_tmp, 0
+ s_setreg_b32 hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_MEM_VIOL_SHIFT, 1), s_save_tmp //clear mem_viol bit
+ s_and_b32 s_save_pc_hi, s_save_pc_hi, 0x0000ffff //pc[47:32]
+ s_sub_u32 s_save_pc_lo, s_save_pc_lo, 8 //pc[31:0]-8
+ s_subb_u32 s_save_pc_hi, s_save_pc_hi, 0x0
L_NO_PC_REWIND:
- s_mov_b32 s_save_tmp, 0 //clear saveCtx bit
- s_setreg_b32 hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_SAVECTX_SHIFT, 1), s_save_tmp //clear saveCtx bit
-
- //s_mov_b32 s_save_xnack_mask_lo, xnack_mask_lo //save XNACK_MASK
- //s_mov_b32 s_save_xnack_mask_hi, xnack_mask_hi
- s_getreg_b32 s_save_xnack_mask, hwreg(HW_REG_SHADER_XNACK_MASK)
- s_getreg_b32 s_save_tmp, hwreg(HW_REG_IB_STS, SQ_WAVE_IB_STS_RCNT_SHIFT, SQ_WAVE_IB_STS_RCNT_SIZE) //save RCNT
- s_lshl_b32 s_save_tmp, s_save_tmp, S_SAVE_PC_HI_RCNT_SHIFT
- s_or_b32 s_save_pc_hi, s_save_pc_hi, s_save_tmp
- s_getreg_b32 s_save_tmp, hwreg(HW_REG_IB_STS, SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT, SQ_WAVE_IB_STS_FIRST_REPLAY_SIZE) //save FIRST_REPLAY
- s_lshl_b32 s_save_tmp, s_save_tmp, S_SAVE_PC_HI_FIRST_REPLAY_SHIFT
- s_or_b32 s_save_pc_hi, s_save_pc_hi, s_save_tmp
- s_getreg_b32 s_save_tmp, hwreg(HW_REG_IB_STS) //clear RCNT and FIRST_REPLAY in IB_STS
- s_and_b32 s_save_tmp, s_save_tmp, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK_NEG
+ s_mov_b32 s_save_tmp, 0
+ s_setreg_b32 hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_SAVECTX_SHIFT, 1), s_save_tmp //clear saveCtx bit
+
+ s_getreg_b32 s_save_xnack_mask, hwreg(HW_REG_SHADER_XNACK_MASK)
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_IB_STS, SQ_WAVE_IB_STS_RCNT_SHIFT, SQ_WAVE_IB_STS_RCNT_SIZE)
+ s_lshl_b32 s_save_tmp, s_save_tmp, S_SAVE_PC_HI_RCNT_SHIFT
+ s_or_b32 s_save_pc_hi, s_save_pc_hi, s_save_tmp
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_IB_STS, SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT, SQ_WAVE_IB_STS_FIRST_REPLAY_SIZE)
+ s_lshl_b32 s_save_tmp, s_save_tmp, S_SAVE_PC_HI_FIRST_REPLAY_SHIFT
+ s_or_b32 s_save_pc_hi, s_save_pc_hi, s_save_tmp
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_IB_STS, SQ_WAVE_IB_STS_REPLAY_W64H_SHIFT, SQ_WAVE_IB_STS_REPLAY_W64H_SIZE)
+ s_lshl_b32 s_save_tmp, s_save_tmp, S_SAVE_PC_HI_REPLAY_W64H_SHIFT
+ s_or_b32 s_save_pc_hi, s_save_pc_hi, s_save_tmp
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_IB_STS) //clear RCNT and FIRST_REPLAY and REPLAY_W64H in IB_STS
+ s_and_b32 s_save_tmp, s_save_tmp, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK_NEG
s_setreg_b32 hwreg(HW_REG_IB_STS), s_save_tmp
-
- /* inform SPI the readiness and wait for SPI's go signal */
- s_mov_b32 s_save_exec_lo, exec_lo //save EXEC and use EXEC for the go signal from SPI
- s_mov_b32 s_save_exec_hi, exec_hi
- s_mov_b64 exec, 0x0 //clear EXEC to get ready to receive
- if (EMU_RUN_HACK)
-
- else
- s_sendmsg sendmsg(MSG_SAVEWAVE) //send SPI a message and wait for SPI's write to EXEC
- end
-
- L_SLEEP:
- s_sleep 0x2
-
- if (EMU_RUN_HACK)
-
- else
- s_cbranch_execz L_SLEEP
- end
-
-
- /* setup Resource Contants */
- if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_SAVE_SINGLE_WAVE))
- //calculate wd_addr using absolute thread id
- v_readlane_b32 s_save_tmp, v9, 0
- //determine it is wave32 or wave64
- s_getreg_b32 s_wave_size, hwreg(HW_REG_IB_STS2,SQ_WAVE_IB_STS2_WAVE64_SHIFT,SQ_WAVE_IB_STS2_WAVE64_SIZE)
- s_cmp_eq_u32 s_wave_size, 0
- s_cbranch_scc1 L_SAVE_WAVE32
- s_lshr_b32 s_save_tmp, s_save_tmp, 6 //SAVE WAVE64
- s_branch L_SAVE_CON
- L_SAVE_WAVE32:
- s_lshr_b32 s_save_tmp, s_save_tmp, 5 //SAVE WAVE32
- L_SAVE_CON:
- s_mul_i32 s_save_tmp, s_save_tmp, WAVE_SPACE
- s_add_i32 s_save_spi_init_lo, s_save_tmp, WG_BASE_ADDR_LO
- s_mov_b32 s_save_spi_init_hi, WG_BASE_ADDR_HI
- s_and_b32 s_save_spi_init_hi, s_save_spi_init_hi, CTX_SAVE_CONTROL
- else
- end
- if ((EMU_RUN_HACK) && (EMU_RUN_HACK_SAVE_SINGLE_WAVE))
- s_add_i32 s_save_spi_init_lo, s_save_tmp, WG_BASE_ADDR_LO
- s_mov_b32 s_save_spi_init_hi, WG_BASE_ADDR_HI
- s_and_b32 s_save_spi_init_hi, s_save_spi_init_hi, CTX_SAVE_CONTROL
- else
- end
-
-
- s_mov_b32 s_save_buf_rsrc0, s_save_spi_init_lo //base_addr_lo
- s_and_b32 s_save_buf_rsrc1, s_save_spi_init_hi, 0x0000FFFF //base_addr_hi
- s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, S_SAVE_BUF_RSRC_WORD1_STRIDE
- s_mov_b32 s_save_buf_rsrc2, 0 //NUM_RECORDS initial value = 0 (in bytes) although not neccessarily inited
- s_mov_b32 s_save_buf_rsrc3, S_SAVE_BUF_RSRC_WORD3_MISC
- s_and_b32 s_save_tmp, s_save_spi_init_hi, S_SAVE_SPI_INIT_ATC_MASK
- s_lshr_b32 s_save_tmp, s_save_tmp, (S_SAVE_SPI_INIT_ATC_SHIFT-SQ_BUF_RSRC_WORD1_ATC_SHIFT) //get ATC bit into position
- s_or_b32 s_save_buf_rsrc3, s_save_buf_rsrc3, s_save_tmp //or ATC
- s_and_b32 s_save_tmp, s_save_spi_init_hi, S_SAVE_SPI_INIT_MTYPE_MASK
- s_lshr_b32 s_save_tmp, s_save_tmp, (S_SAVE_SPI_INIT_MTYPE_SHIFT-SQ_BUF_RSRC_WORD3_MTYPE_SHIFT) //get MTYPE bits into position
- s_or_b32 s_save_buf_rsrc3, s_save_buf_rsrc3, s_save_tmp //or MTYPE
-
- s_mov_b32 s_save_m0, m0 //save M0
-
- /* global mem offset */
- s_mov_b32 s_save_mem_offset, 0x0 //mem offset initial value = 0
- s_getreg_b32 s_wave_size, hwreg(HW_REG_IB_STS2,SQ_WAVE_IB_STS2_WAVE64_SHIFT,SQ_WAVE_IB_STS2_WAVE64_SIZE) //get wave_save_size
- s_or_b32 s_wave_size, s_save_spi_init_hi, s_wave_size //share s_wave_size with exec_hi
-
- /* save VGPRs */
- //////////////////////////////
- L_SAVE_VGPR:
-
- s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on
- s_and_b32 m0, s_wave_size, 1
- s_cmp_eq_u32 m0, 1
- s_cbranch_scc1 L_ENABLE_SAVE_VGPR_EXEC_HI
- s_mov_b32 exec_hi, 0x00000000
- s_branch L_SAVE_VGPR_NORMAL
- L_ENABLE_SAVE_VGPR_EXEC_HI:
- s_mov_b32 exec_hi, 0xFFFFFFFF
- L_SAVE_VGPR_NORMAL:
- s_getreg_b32 s_save_alloc_size, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE) //vpgr_size
- //for wave32 and wave64, the num of vgpr function is the same?
- s_add_u32 s_save_alloc_size, s_save_alloc_size, 1
- s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 2 //Number of VGPRs = (vgpr_size + 1) * 4 (non-zero value) //FIXME for GFX, zero is possible
- //determine it is wave32 or wave64
- s_and_b32 m0, s_wave_size, 1
- s_cmp_eq_u32 m0, 1
- s_cbranch_scc1 L_SAVE_VGPR_WAVE64
-
- //zhenxu added it for save vgpr for wave32
- s_lshl_b32 s_save_buf_rsrc2, s_save_alloc_size, 7 //NUM_RECORDS in bytes (32 threads*4)
- if (SWIZZLE_EN)
- s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
- else
- s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- end
-
- s_mov_b32 m0, 0x0 //VGPR initial index value =0
- //s_set_gpr_idx_on m0, 0x1 //M0[7:0] = M0[7:0] and M0[15:12] = 0x1
- //s_add_u32 s_save_alloc_size, s_save_alloc_size, 0x1000 //add 0x1000 since we compare m0 against it later, doesn't need this in gfx10
-
- L_SAVE_VGPR_WAVE32_LOOP:
- v_movrels_b32 v0, v0 //v0 = v[0+m0]
-
- if(USE_MTBUF_INSTEAD_OF_MUBUF)
- tbuffer_store_format_x v0, v0, s_save_buf_rsrc0, s_save_mem_offset format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1
- else
- buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
- end
-
- s_add_u32 m0, m0, 1 //next vgpr index
- s_add_u32 s_save_mem_offset, s_save_mem_offset, 128 //every buffer_store_dword does 128 bytes
- s_cmp_lt_u32 m0, s_save_alloc_size //scc = (m0 < s_save_alloc_size) ? 1 : 0
- s_cbranch_scc1 L_SAVE_VGPR_WAVE32_LOOP //VGPR save is complete?
- s_branch L_SAVE_LDS
- //save vgpr for wave32 ends
-
- L_SAVE_VGPR_WAVE64:
- s_lshl_b32 s_save_buf_rsrc2, s_save_alloc_size, 8 //NUM_RECORDS in bytes (64 threads*4)
- if (SWIZZLE_EN)
- s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
- else
- s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- end
-
- s_mov_b32 m0, 0x0 //VGPR initial index value =0
- //s_set_gpr_idx_on m0, 0x1 //M0[7:0] = M0[7:0] and M0[15:12] = 0x1
- //s_add_u32 s_save_alloc_size, s_save_alloc_size, 0x1000 //add 0x1000 since we compare m0 against it later, doesn't need this in gfx10
-
- L_SAVE_VGPR_WAVE64_LOOP:
- v_movrels_b32 v0, v0 //v0 = v[0+m0]
-
- if(USE_MTBUF_INSTEAD_OF_MUBUF)
- tbuffer_store_format_x v0, v0, s_save_buf_rsrc0, s_save_mem_offset format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1
- else
- buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
- end
-
- s_add_u32 m0, m0, 1 //next vgpr index
- s_add_u32 s_save_mem_offset, s_save_mem_offset, 256 //every buffer_store_dword does 256 bytes
- s_cmp_lt_u32 m0, s_save_alloc_size //scc = (m0 < s_save_alloc_size) ? 1 : 0
- s_cbranch_scc1 L_SAVE_VGPR_WAVE64_LOOP //VGPR save is complete?
- //s_set_gpr_idx_off
- //
- //Below part will be the save shared vgpr part (new for gfx10)
- s_getreg_b32 s_save_alloc_size, hwreg(HW_REG_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SIZE) //shared_vgpr_size
- s_and_b32 s_save_alloc_size, s_save_alloc_size, 0xFFFFFFFF //shared_vgpr_size is zero?
- s_cbranch_scc0 L_SAVE_LDS //no shared_vgpr used? jump to L_SAVE_LDS
- s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 3 //Number of SHARED_VGPRs = shared_vgpr_size * 8 (non-zero value)
- //m0 now has the value of normal vgpr count, just add the m0 with shared_vgpr count to get the total count.
- //save shared_vgpr will start from the index of m0
- s_add_u32 s_save_alloc_size, s_save_alloc_size, m0
- s_mov_b32 exec_lo, 0xFFFFFFFF
- s_mov_b32 exec_hi, 0x00000000
- L_SAVE_SHARED_VGPR_WAVE64_LOOP:
- v_movrels_b32 v0, v0 //v0 = v[0+m0]
- buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
- s_add_u32 m0, m0, 1 //next vgpr index
- s_add_u32 s_save_mem_offset, s_save_mem_offset, 128 //every buffer_store_dword does 256 bytes
- s_cmp_lt_u32 m0, s_save_alloc_size //scc = (m0 < s_save_alloc_size) ? 1 : 0
- s_cbranch_scc1 L_SAVE_SHARED_VGPR_WAVE64_LOOP //SHARED_VGPR save is complete?
-
- /* save LDS */
- //////////////////////////////
- L_SAVE_LDS:
-
- //Only check the first wave need LDS
- /* the first wave in the threadgroup */
- s_barrier //FIXME not performance-optimal "LDS is used? wait for other waves in the same TG"
- s_and_b32 s_save_tmp, s_wave_size, S_SAVE_SPI_INIT_FIRST_WAVE_MASK //exec is still used here
- s_cbranch_scc0 L_SAVE_SGPR
-
- s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on
- s_and_b32 m0, s_wave_size, 1
- s_cmp_eq_u32 m0, 1
- s_cbranch_scc1 L_ENABLE_SAVE_LDS_EXEC_HI
- s_mov_b32 exec_hi, 0x00000000
- s_branch L_SAVE_LDS_NORMAL
- L_ENABLE_SAVE_LDS_EXEC_HI:
- s_mov_b32 exec_hi, 0xFFFFFFFF
- L_SAVE_LDS_NORMAL:
- s_getreg_b32 s_save_alloc_size, hwreg(HW_REG_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE) //lds_size
- s_and_b32 s_save_alloc_size, s_save_alloc_size, 0xFFFFFFFF //lds_size is zero?
- s_cbranch_scc0 L_SAVE_SGPR //no lds used? jump to L_SAVE_VGPR
- s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 6 //LDS size in dwords = lds_size * 64dw
- s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 2 //LDS size in bytes
- s_mov_b32 s_save_buf_rsrc2, s_save_alloc_size //NUM_RECORDS in bytes
- if (SWIZZLE_EN)
- s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
- else
- s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- end
-
- //load 0~63*4(byte address) to vgpr v15
- v_mbcnt_lo_u32_b32 v0, -1, 0
- v_mbcnt_hi_u32_b32 v0, -1, v0
- v_mul_u32_u24 v0, 4, v0
-
- s_and_b32 m0, s_wave_size, 1
- s_cmp_eq_u32 m0, 1
- s_mov_b32 m0, 0x0
- s_cbranch_scc1 L_SAVE_LDS_LOOP_W64
-
- L_SAVE_LDS_LOOP_W32:
- if (SAVE_LDS)
- ds_read_b32 v1, v0
- s_waitcnt 0 //ensure data ready
- buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
- //buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1 //save lds to memory doesn't exist in 10
- end
- s_add_u32 m0, m0, 128 //every buffer_store_lds does 128 bytes
- s_add_u32 s_save_mem_offset, s_save_mem_offset, 128 //mem offset increased by 128 bytes
- v_add_nc_u32 v0, v0, 128
- s_cmp_lt_u32 m0, s_save_alloc_size //scc=(m0 < s_save_alloc_size) ? 1 : 0
- s_cbranch_scc1 L_SAVE_LDS_LOOP_W32 //LDS save is complete?
- s_branch L_SAVE_SGPR
-
- L_SAVE_LDS_LOOP_W64:
- if (SAVE_LDS)
- ds_read_b32 v1, v0
- s_waitcnt 0 //ensure data ready
- buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
- //buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1 //save lds to memory doesn't exist in 10
- end
- s_add_u32 m0, m0, 256 //every buffer_store_lds does 256 bytes
- s_add_u32 s_save_mem_offset, s_save_mem_offset, 256 //mem offset increased by 256 bytes
- v_add_nc_u32 v0, v0, 256
- s_cmp_lt_u32 m0, s_save_alloc_size //scc=(m0 < s_save_alloc_size) ? 1 : 0
- s_cbranch_scc1 L_SAVE_LDS_LOOP_W64 //LDS save is complete?
-
-
- /* save SGPRs */
- //////////////////////////////
- //s_getreg_b32 s_save_alloc_size, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SIZE) //spgr_size
- //s_add_u32 s_save_alloc_size, s_save_alloc_size, 1
- //s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 4 //Number of SGPRs = (sgpr_size + 1) * 16 (non-zero value)
- //s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 3 //In gfx10, Number of SGPRs = (sgpr_size + 1) * 8 (non-zero value)
- L_SAVE_SGPR:
- //need to look at it is wave32 or wave64
- s_and_b32 m0, s_wave_size, 1
- s_cmp_eq_u32 m0, 1
- s_cbranch_scc1 L_SAVE_SGPR_VMEM_WAVE64
- if (SGPR_SAVE_USE_SQC)
- s_lshl_b32 s_save_buf_rsrc2, s_sgpr_save_num, 2 //NUM_RECORDS in bytes
- else
- s_lshl_b32 s_save_buf_rsrc2, s_sgpr_save_num, 7 //NUM_RECORDS in bytes (32 threads)
- end
- s_branch L_SAVE_SGPR_CONT
- L_SAVE_SGPR_VMEM_WAVE64:
- if (SGPR_SAVE_USE_SQC)
- s_lshl_b32 s_save_buf_rsrc2, s_sgpr_save_num, 2 //NUM_RECORDS in bytes
- else
- s_lshl_b32 s_save_buf_rsrc2, s_sgpr_save_num, 8 //NUM_RECORDS in bytes (64 threads)
- end
- L_SAVE_SGPR_CONT:
- if (SWIZZLE_EN)
- s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
- else
- s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- end
-
- //s_mov_b32 m0, 0x0 //SGPR initial index value =0
- //s_nop 0x0 //Manually inserted wait states
-
- s_and_b32 m0, s_wave_size, 1
- s_cmp_eq_u32 m0, 1
-
- s_mov_b32 m0, 0x0 //SGPR initial index value =0
- s_nop 0x0 //Manually inserted wait states
-
- s_cbranch_scc1 L_SAVE_SGPR_LOOP_WAVE64
-
- L_SAVE_SGPR_LOOP_WAVE32:
- s_movrels_b32 s0, s0 //s0 = s[0+m0]
- //zhenxu, adding one more argument to save sgpr function, this is only for vmem, using sqc is not change
- write_sgpr_to_mem_wave32(s0, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //PV: the best performance should be using s_buffer_store_dwordx4
- s_add_u32 m0, m0, 1 //next sgpr index
- s_cmp_lt_u32 m0, s_sgpr_save_num //scc = (m0 < s_sgpr_save_num) ? 1 : 0
- s_cbranch_scc1 L_SAVE_SGPR_LOOP_WAVE32 //SGPR save is complete?
- s_branch L_SAVE_HWREG
-
- L_SAVE_SGPR_LOOP_WAVE64:
- s_movrels_b32 s0, s0 //s0 = s[0+m0]
- //zhenxu, adding one more argument to save sgpr function, this is only for vmem, using sqc is not change
- write_sgpr_to_mem_wave64(s0, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //PV: the best performance should be using s_buffer_store_dwordx4
- s_add_u32 m0, m0, 1 //next sgpr index
- s_cmp_lt_u32 m0, s_sgpr_save_num //scc = (m0 < s_sgpr_save_num) ? 1 : 0
- s_cbranch_scc1 L_SAVE_SGPR_LOOP_WAVE64 //SGPR save is complete?
-
-
- /* save HW registers */
- //////////////////////////////
- L_SAVE_HWREG:
- s_mov_b32 s_save_buf_rsrc2, 0x4 //NUM_RECORDS in bytes
- if (SWIZZLE_EN)
- s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
- else
- s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- end
-
- s_and_b32 m0, s_wave_size, 1
- s_cmp_eq_u32 m0, 1
- s_cbranch_scc1 L_SAVE_HWREG_WAVE64
-
- write_sgpr_to_mem_wave32(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //M0
-
- if ((EMU_RUN_HACK) && (EMU_RUN_HACK_SAVE_FIRST_TIME))
- s_add_u32 s_save_pc_lo, s_save_pc_lo, 4 //pc[31:0]+4
- s_addc_u32 s_save_pc_hi, s_save_pc_hi, 0x0 //carry bit over
- end
-
- write_sgpr_to_mem_wave32(s_save_pc_lo, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //PC
- write_sgpr_to_mem_wave32(s_save_pc_hi, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF)
- write_sgpr_to_mem_wave32(s_save_exec_lo, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //EXEC
- write_sgpr_to_mem_wave32(s_save_exec_hi, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF)
- write_sgpr_to_mem_wave32(s_save_status, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //STATUS
-
- //s_save_trapsts conflicts with s_save_alloc_size
- s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS)
- write_sgpr_to_mem_wave32(s_save_trapsts, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //TRAPSTS
-
- //write_sgpr_to_mem_wave32(s_save_xnack_mask_lo, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //XNACK_MASK_LO
- write_sgpr_to_mem_wave32(s_save_xnack_mask, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //XNACK_MASK_HI
-
- //use s_save_tmp would introduce conflict here between s_save_tmp and s_save_buf_rsrc2
- s_getreg_b32 s_save_m0, hwreg(HW_REG_MODE) //MODE
- write_sgpr_to_mem_wave32(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF)
- if(SAVE_RESTORE_HWID_DDID)
- s_getreg_b32 s_save_m0, hwreg(HW_REG_HW_ID1) //HW_ID1, handler records the SE/SA/WGP/SIMD/wave of the original wave
- write_sgpr_to_mem_wave32(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF)
- end
- s_branch L_S_PGM_END_SAVED
-
- L_SAVE_HWREG_WAVE64:
- write_sgpr_to_mem_wave64(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //M0
-
- if ((EMU_RUN_HACK) && (EMU_RUN_HACK_SAVE_FIRST_TIME))
- s_add_u32 s_save_pc_lo, s_save_pc_lo, 4 //pc[31:0]+4
- s_addc_u32 s_save_pc_hi, s_save_pc_hi, 0x0 //carry bit over
- end
-
- write_sgpr_to_mem_wave64(s_save_pc_lo, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //PC
- write_sgpr_to_mem_wave64(s_save_pc_hi, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF)
- write_sgpr_to_mem_wave64(s_save_exec_lo, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //EXEC
- write_sgpr_to_mem_wave64(s_save_exec_hi, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF)
- write_sgpr_to_mem_wave64(s_save_status, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //STATUS
-
- //s_save_trapsts conflicts with s_save_alloc_size
- s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS)
- write_sgpr_to_mem_wave64(s_save_trapsts, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //TRAPSTS
-
- //write_sgpr_to_mem_wave64(s_save_xnack_mask_lo, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //XNACK_MASK_LO
- write_sgpr_to_mem_wave64(s_save_xnack_mask, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //XNACK_MASK_HI
-
- //use s_save_tmp would introduce conflict here between s_save_tmp and s_save_buf_rsrc2
- s_getreg_b32 s_save_m0, hwreg(HW_REG_MODE) //MODE
- write_sgpr_to_mem_wave64(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF)
-
-
- if(SAVE_RESTORE_HWID_DDID)
- s_getreg_b32 s_save_m0, hwreg(HW_REG_HW_ID1) //HW_ID1, handler records the SE/SA/WGP/SIMD/wave of the original wave
- write_sgpr_to_mem_wave64(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF)
-
- /* save DDID */
- //////////////////////////////
- L_SAVE_DDID:
- //EXEC has been saved, no vector inst following
- s_mov_b32 exec_lo, 0x80000000 //Set MSB to 1. Cleared when draw index is returned
- s_sendmsg sendmsg(MSG_GET_DDID)
-
- L_WAIT_DDID_LOOP:
- s_nop 7 // sleep a bit
- s_bitcmp0_b32 exec_lo, 31 // test to see if MSB is cleared, meaning done
- s_cbranch_scc0 L_WAIT_DDID_LOOP
-
- s_mov_b32 s_save_m0, exec_lo
-
-
- s_mov_b32 s_save_buf_rsrc2, 0x4 //NUM_RECORDS in bytes
- if (SWIZZLE_EN)
- s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
- else
- s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- end
- s_and_b32 m0, s_wave_size, 1
- s_cmp_eq_u32 m0, 1
- s_cbranch_scc1 L_SAVE_DDID_WAVE64
-
- write_sgpr_to_mem_wave32(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF)
-
- L_SAVE_DDID_WAVE64:
- write_sgpr_to_mem_wave64(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF)
-
- end
-
- L_S_PGM_END_SAVED:
- /* S_PGM_END_SAVED */ //FIXME graphics ONLY
- if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_SAVE_NORMAL_EXIT))
- s_and_b32 s_save_pc_hi, s_save_pc_hi, 0x0000ffff //pc[47:32]
- s_add_u32 s_save_pc_lo, s_save_pc_lo, 4 //pc[31:0]+4
- s_addc_u32 s_save_pc_hi, s_save_pc_hi, 0x0 //carry bit over
- s_rfe_b64 s_save_pc_lo //Return to the main shader program
- else
- end
-
-
- s_branch L_END_PGM
-
-
-
-/**************************************************************************/
-/* restore routine */
-/**************************************************************************/
+
+ /* inform SPI the readiness and wait for SPI's go signal */
+ s_mov_b32 s_save_exec_lo, exec_lo //save EXEC and use EXEC for the go signal from SPI
+ s_mov_b32 s_save_exec_hi, exec_hi
+ s_mov_b64 exec, 0x0 //clear EXEC to get ready to receive
+
+ s_sendmsg sendmsg(MSG_SAVEWAVE) //send SPI a message and wait for SPI's write to EXEC
+
+L_SLEEP:
+ // sleep 1 (64clk) is not enough for 8 waves per SIMD, which will cause
+ // SQ hang, since the 7,8th wave could not get arbit to exec inst, while
+ // other waves are stuck into the sleep-loop and waiting for wrexec!=0
+ s_sleep 0x2
+ s_cbranch_execz L_SLEEP
+
+ /* setup Resource Contants */
+ s_mov_b32 s_save_buf_rsrc0, s_save_spi_init_lo //base_addr_lo
+ s_and_b32 s_save_buf_rsrc1, s_save_spi_init_hi, 0x0000FFFF //base_addr_hi
+ s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, S_SAVE_BUF_RSRC_WORD1_STRIDE
+ s_mov_b32 s_save_buf_rsrc2, 0 //NUM_RECORDS initial value = 0 (in bytes) although not neccessarily inited
+ s_mov_b32 s_save_buf_rsrc3, S_SAVE_BUF_RSRC_WORD3_MISC
+ s_and_b32 s_save_tmp, s_save_spi_init_hi, S_SAVE_SPI_INIT_ATC_MASK
+ s_lshr_b32 s_save_tmp, s_save_tmp, (S_SAVE_SPI_INIT_ATC_SHIFT-SQ_BUF_RSRC_WORD1_ATC_SHIFT)
+ s_or_b32 s_save_buf_rsrc3, s_save_buf_rsrc3, s_save_tmp //or ATC
+ s_and_b32 s_save_tmp, s_save_spi_init_hi, S_SAVE_SPI_INIT_MTYPE_MASK
+ s_lshr_b32 s_save_tmp, s_save_tmp, (S_SAVE_SPI_INIT_MTYPE_SHIFT-SQ_BUF_RSRC_WORD3_MTYPE_SHIFT)
+ s_or_b32 s_save_buf_rsrc3, s_save_buf_rsrc3, s_save_tmp //or MTYPE
+
+ s_mov_b32 s_save_m0, m0
+
+ /* global mem offset */
+ s_mov_b32 s_save_mem_offset, 0x0
+ s_getreg_b32 s_wave_size, hwreg(HW_REG_IB_STS2,SQ_WAVE_IB_STS2_WAVE64_SHIFT,SQ_WAVE_IB_STS2_WAVE64_SIZE)
+ s_lshl_b32 s_wave_size, s_wave_size, S_WAVE_SIZE
+ s_or_b32 s_wave_size, s_save_spi_init_hi, s_wave_size //share s_wave_size with exec_hi, it's at bit25
+
+ /* save HW registers */
+
+L_SAVE_HWREG:
+ // HWREG SR memory offset : size(VGPR)+size(SVGPR)+size(SGPR)
+ get_vgpr_size_bytes(s_save_mem_offset, s_wave_size)
+ get_svgpr_size_bytes(s_save_tmp)
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, s_save_tmp
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, get_sgpr_size_bytes()
+
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset)
+ write_hwreg_to_mem(s_save_pc_lo, s_save_buf_rsrc0, s_save_mem_offset)
+ write_hwreg_to_mem(s_save_pc_hi, s_save_buf_rsrc0, s_save_mem_offset)
+ write_hwreg_to_mem(s_save_exec_lo, s_save_buf_rsrc0, s_save_mem_offset)
+ write_hwreg_to_mem(s_save_exec_hi, s_save_buf_rsrc0, s_save_mem_offset)
+ write_hwreg_to_mem(s_save_status, s_save_buf_rsrc0, s_save_mem_offset)
+
+ s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS)
+ write_hwreg_to_mem(s_save_trapsts, s_save_buf_rsrc0, s_save_mem_offset)
+ write_hwreg_to_mem(s_save_xnack_mask, s_save_buf_rsrc0, s_save_mem_offset)
+
+ s_getreg_b32 s_save_m0, hwreg(HW_REG_MODE)
+ write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset)
+
+ s_getreg_b32 s_save_m0, hwreg(HW_REG_SHADER_FLAT_SCRATCH_LO)
+ write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset)
+
+ s_getreg_b32 s_save_m0, hwreg(HW_REG_SHADER_FLAT_SCRATCH_HI)
+ write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset)
+
+ /* the first wave in the threadgroup */
+ s_and_b32 s_save_tmp, s_save_spi_init_hi, S_SAVE_SPI_INIT_FIRST_WAVE_MASK
+ s_mov_b32 s_save_exec_hi, 0x0
+ s_or_b32 s_save_exec_hi, s_save_tmp, s_save_exec_hi // save first wave bit to s_save_exec_hi.bits[26]
+
+ /* save SGPRs */
+ // Save SGPR before LDS save, then the s0 to s4 can be used during LDS save...
+
+ // SGPR SR memory offset : size(VGPR)+size(SVGPR)
+ get_vgpr_size_bytes(s_save_mem_offset, s_wave_size)
+ get_svgpr_size_bytes(s_save_tmp)
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, s_save_tmp
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ // backup s_save_buf_rsrc0,1 to s_save_pc_lo/hi, since write_16sgpr_to_mem function will change the rsrc0
+ s_mov_b32 s_save_xnack_mask, s_save_buf_rsrc0
+ s_add_u32 s_save_buf_rsrc0, s_save_buf_rsrc0, s_save_mem_offset
+ s_addc_u32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0
+
+ s_mov_b32 m0, 0x0 //SGPR initial index value =0
+ s_nop 0x0 //Manually inserted wait states
+L_SAVE_SGPR_LOOP:
+ // SGPR is allocated in 16 SGPR granularity
+ s_movrels_b64 s0, s0 //s0 = s[0+m0], s1 = s[1+m0]
+ s_movrels_b64 s2, s2 //s2 = s[2+m0], s3 = s[3+m0]
+ s_movrels_b64 s4, s4 //s4 = s[4+m0], s5 = s[5+m0]
+ s_movrels_b64 s6, s6 //s6 = s[6+m0], s7 = s[7+m0]
+ s_movrels_b64 s8, s8 //s8 = s[8+m0], s9 = s[9+m0]
+ s_movrels_b64 s10, s10 //s10 = s[10+m0], s11 = s[11+m0]
+ s_movrels_b64 s12, s12 //s12 = s[12+m0], s13 = s[13+m0]
+ s_movrels_b64 s14, s14 //s14 = s[14+m0], s15 = s[15+m0]
+
+ write_16sgpr_to_mem(s0, s_save_buf_rsrc0, s_save_mem_offset)
+ s_add_u32 m0, m0, 16 //next sgpr index
+ s_cmp_lt_u32 m0, 96 //scc = (m0 < first 96 SGPR) ? 1 : 0
+ s_cbranch_scc1 L_SAVE_SGPR_LOOP //first 96 SGPR save is complete?
+
+ //save the rest 12 SGPR
+ s_movrels_b64 s0, s0 //s0 = s[0+m0], s1 = s[1+m0]
+ s_movrels_b64 s2, s2 //s2 = s[2+m0], s3 = s[3+m0]
+ s_movrels_b64 s4, s4 //s4 = s[4+m0], s5 = s[5+m0]
+ s_movrels_b64 s6, s6 //s6 = s[6+m0], s7 = s[7+m0]
+ s_movrels_b64 s8, s8 //s8 = s[8+m0], s9 = s[9+m0]
+ s_movrels_b64 s10, s10 //s10 = s[10+m0], s11 = s[11+m0]
+ write_12sgpr_to_mem(s0, s_save_buf_rsrc0, s_save_mem_offset)
+
+ // restore s_save_buf_rsrc0,1
+ s_mov_b32 s_save_buf_rsrc0, s_save_xnack_mask
+
+ /* save first 4 VGPR, then LDS save could use */
+ // each wave will alloc 4 vgprs at least...
+
+ s_mov_b32 s_save_mem_offset, 0
+ s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on
+ s_lshr_b32 m0, s_wave_size, S_WAVE_SIZE
+ s_and_b32 m0, m0, 1
+ s_cmp_eq_u32 m0, 1
+ s_cbranch_scc1 L_ENABLE_SAVE_4VGPR_EXEC_HI
+ s_mov_b32 exec_hi, 0x00000000
+ s_branch L_SAVE_4VGPR_WAVE32
+L_ENABLE_SAVE_4VGPR_EXEC_HI:
+ s_mov_b32 exec_hi, 0xFFFFFFFF
+ s_branch L_SAVE_4VGPR_WAVE64
+L_SAVE_4VGPR_WAVE32:
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ // VGPR Allocated in 4-GPR granularity
+
+ buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
+ buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:128
+ buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:128*2
+ buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:128*3
+ s_branch L_SAVE_LDS
+
+L_SAVE_4VGPR_WAVE64:
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ // VGPR Allocated in 4-GPR granularity
+
+ buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
+ buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256
+ buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*2
+ buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*3
+
+ /* save LDS */
+
+L_SAVE_LDS:
+ // Change EXEC to all threads...
+ s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on
+ s_lshr_b32 m0, s_wave_size, S_WAVE_SIZE
+ s_and_b32 m0, m0, 1
+ s_cmp_eq_u32 m0, 1
+ s_cbranch_scc1 L_ENABLE_SAVE_LDS_EXEC_HI
+ s_mov_b32 exec_hi, 0x00000000
+ s_branch L_SAVE_LDS_NORMAL
+L_ENABLE_SAVE_LDS_EXEC_HI:
+ s_mov_b32 exec_hi, 0xFFFFFFFF
+L_SAVE_LDS_NORMAL:
+ s_getreg_b32 s_save_alloc_size, hwreg(HW_REG_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE)
+ s_and_b32 s_save_alloc_size, s_save_alloc_size, 0xFFFFFFFF //lds_size is zero?
+ s_cbranch_scc0 L_SAVE_LDS_DONE //no lds used? jump to L_SAVE_DONE
+
+ s_barrier //LDS is used? wait for other waves in the same TG
+ s_and_b32 s_save_tmp, s_save_exec_hi, S_SAVE_SPI_INIT_FIRST_WAVE_MASK
+ s_cbranch_scc0 L_SAVE_LDS_DONE
+
+ // first wave do LDS save;
+
+ s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 6 //LDS size in dwords = lds_size * 64dw
+ s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 2 //LDS size in bytes
+ s_mov_b32 s_save_buf_rsrc2, s_save_alloc_size //NUM_RECORDS in bytes
+
+ // LDS at offset: size(VGPR)+size(SVGPR)+SIZE(SGPR)+SIZE(HWREG)
+ //
+ get_vgpr_size_bytes(s_save_mem_offset, s_wave_size)
+ get_svgpr_size_bytes(s_save_tmp)
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, s_save_tmp
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, get_sgpr_size_bytes()
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, get_hwreg_size_bytes()
+
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ //load 0~63*4(byte address) to vgpr v0
+ v_mbcnt_lo_u32_b32 v0, -1, 0
+ v_mbcnt_hi_u32_b32 v0, -1, v0
+ v_mul_u32_u24 v0, 4, v0
+
+ s_lshr_b32 m0, s_wave_size, S_WAVE_SIZE
+ s_and_b32 m0, m0, 1
+ s_cmp_eq_u32 m0, 1
+ s_mov_b32 m0, 0x0
+ s_cbranch_scc1 L_SAVE_LDS_W64
+
+L_SAVE_LDS_W32:
+ s_mov_b32 s3, 128
+ s_nop 0
+ s_nop 0
+ s_nop 0
+L_SAVE_LDS_LOOP_W32:
+ ds_read_b32 v1, v0
+ s_waitcnt 0
+ buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
+
+ s_add_u32 m0, m0, s3 //every buffer_store_lds does 256 bytes
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, s3
+ v_add_nc_u32 v0, v0, 128 //mem offset increased by 128 bytes
+ s_cmp_lt_u32 m0, s_save_alloc_size //scc=(m0 < s_save_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_SAVE_LDS_LOOP_W32 //LDS save is complete?
+
+ s_branch L_SAVE_LDS_DONE
+
+L_SAVE_LDS_W64:
+ s_mov_b32 s3, 256
+ s_nop 0
+ s_nop 0
+ s_nop 0
+L_SAVE_LDS_LOOP_W64:
+ ds_read_b32 v1, v0
+ s_waitcnt 0
+ buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
+
+ s_add_u32 m0, m0, s3 //every buffer_store_lds does 256 bytes
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, s3
+ v_add_nc_u32 v0, v0, 256 //mem offset increased by 256 bytes
+ s_cmp_lt_u32 m0, s_save_alloc_size //scc=(m0 < s_save_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_SAVE_LDS_LOOP_W64 //LDS save is complete?
+
+L_SAVE_LDS_DONE:
+ /* save VGPRs - set the Rest VGPRs */
+L_SAVE_VGPR:
+ // VGPR SR memory offset: 0
+ s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on
+ s_lshr_b32 m0, s_wave_size, S_WAVE_SIZE
+ s_and_b32 m0, m0, 1
+ s_cmp_eq_u32 m0, 1
+ s_cbranch_scc1 L_ENABLE_SAVE_VGPR_EXEC_HI
+ s_mov_b32 s_save_mem_offset, (0+128*4) // for the rest VGPRs
+ s_mov_b32 exec_hi, 0x00000000
+ s_branch L_SAVE_VGPR_NORMAL
+L_ENABLE_SAVE_VGPR_EXEC_HI:
+ s_mov_b32 s_save_mem_offset, (0+256*4) // for the rest VGPRs
+ s_mov_b32 exec_hi, 0xFFFFFFFF
+L_SAVE_VGPR_NORMAL:
+ s_getreg_b32 s_save_alloc_size, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE)
+ s_add_u32 s_save_alloc_size, s_save_alloc_size, 1
+ s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 2 //Number of VGPRs = (vgpr_size + 1) * 4 (non-zero value)
+ //determine it is wave32 or wave64
+ s_lshr_b32 m0, s_wave_size, S_WAVE_SIZE
+ s_and_b32 m0, m0, 1
+ s_cmp_eq_u32 m0, 1
+ s_cbranch_scc1 L_SAVE_VGPR_WAVE64
+
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ // VGPR Allocated in 4-GPR granularity
+
+ // VGPR store using dw burst
+ s_mov_b32 m0, 0x4 //VGPR initial index value =4
+ s_cmp_lt_u32 m0, s_save_alloc_size
+ s_cbranch_scc0 L_SAVE_VGPR_END
+
+L_SAVE_VGPR_W32_LOOP:
+ v_movrels_b32 v0, v0 //v0 = v[0+m0]
+ v_movrels_b32 v1, v1 //v1 = v[1+m0]
+ v_movrels_b32 v2, v2 //v2 = v[2+m0]
+ v_movrels_b32 v3, v3 //v3 = v[3+m0]
+
+ buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
+ buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:128
+ buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:128*2
+ buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:128*3
+
+ s_add_u32 m0, m0, 4 //next vgpr index
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, 128*4 //every buffer_store_dword does 128 bytes
+ s_cmp_lt_u32 m0, s_save_alloc_size //scc = (m0 < s_save_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_SAVE_VGPR_W32_LOOP //VGPR save is complete?
+
+ s_branch L_SAVE_VGPR_END
+
+L_SAVE_VGPR_WAVE64:
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ // VGPR store using dw burst
+ s_mov_b32 m0, 0x4 //VGPR initial index value =4
+ s_cmp_lt_u32 m0, s_save_alloc_size
+ s_cbranch_scc0 L_SAVE_VGPR_END
+
+L_SAVE_VGPR_W64_LOOP:
+ v_movrels_b32 v0, v0 //v0 = v[0+m0]
+ v_movrels_b32 v1, v1 //v1 = v[1+m0]
+ v_movrels_b32 v2, v2 //v2 = v[2+m0]
+ v_movrels_b32 v3, v3 //v3 = v[3+m0]
+
+ buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
+ buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256
+ buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*2
+ buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*3
+
+ s_add_u32 m0, m0, 4 //next vgpr index
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, 256*4 //every buffer_store_dword does 256 bytes
+ s_cmp_lt_u32 m0, s_save_alloc_size //scc = (m0 < s_save_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_SAVE_VGPR_W64_LOOP //VGPR save is complete?
+
+ //Below part will be the save shared vgpr part (new for gfx10)
+ s_getreg_b32 s_save_alloc_size, hwreg(HW_REG_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SIZE)
+ s_and_b32 s_save_alloc_size, s_save_alloc_size, 0xFFFFFFFF //shared_vgpr_size is zero?
+ s_cbranch_scc0 L_SAVE_VGPR_END //no shared_vgpr used? jump to L_SAVE_LDS
+ s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 3 //Number of SHARED_VGPRs = shared_vgpr_size * 8 (non-zero value)
+ //m0 now has the value of normal vgpr count, just add the m0 with shared_vgpr count to get the total count.
+ //save shared_vgpr will start from the index of m0
+ s_add_u32 s_save_alloc_size, s_save_alloc_size, m0
+ s_mov_b32 exec_lo, 0xFFFFFFFF
+ s_mov_b32 exec_hi, 0x00000000
+L_SAVE_SHARED_VGPR_WAVE64_LOOP:
+ v_movrels_b32 v0, v0 //v0 = v[0+m0]
+ buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
+ s_add_u32 m0, m0, 1 //next vgpr index
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, 128
+ s_cmp_lt_u32 m0, s_save_alloc_size //scc = (m0 < s_save_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_SAVE_SHARED_VGPR_WAVE64_LOOP //SHARED_VGPR save is complete?
+
+L_SAVE_VGPR_END:
+ s_branch L_END_PGM
L_RESTORE:
- /* Setup Resource Contants */
- if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL))
- //calculate wd_addr using absolute thread id
- v_readlane_b32 s_restore_tmp, v9, 0
- //determine it is wave32 or wave64
- s_getreg_b32 s_restore_size, hwreg(HW_REG_IB_STS2,SQ_WAVE_IB_STS2_WAVE64_SHIFT,SQ_WAVE_IB_STS2_WAVE64_SIZE) //change to ttmp13
- s_cmp_eq_u32 s_restore_size, 0
- s_cbranch_scc1 L_RESTORE_WAVE32
- s_lshr_b32 s_restore_tmp, s_restore_tmp, 6 //SAVE WAVE64
- s_branch L_RESTORE_CON
- L_RESTORE_WAVE32:
- s_lshr_b32 s_restore_tmp, s_restore_tmp, 5 //SAVE WAVE32
- L_RESTORE_CON:
- s_mul_i32 s_restore_tmp, s_restore_tmp, WAVE_SPACE
- s_add_i32 s_restore_spi_init_lo, s_restore_tmp, WG_BASE_ADDR_LO
- s_mov_b32 s_restore_spi_init_hi, WG_BASE_ADDR_HI
- s_and_b32 s_restore_spi_init_hi, s_restore_spi_init_hi, CTX_RESTORE_CONTROL
- else
- end
-
- s_mov_b32 s_restore_buf_rsrc0, s_restore_spi_init_lo //base_addr_lo
- s_and_b32 s_restore_buf_rsrc1, s_restore_spi_init_hi, 0x0000FFFF //base_addr_hi
- s_or_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, S_RESTORE_BUF_RSRC_WORD1_STRIDE
- s_mov_b32 s_restore_buf_rsrc2, 0 //NUM_RECORDS initial value = 0 (in bytes)
- s_mov_b32 s_restore_buf_rsrc3, S_RESTORE_BUF_RSRC_WORD3_MISC
- s_and_b32 s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_ATC_MASK
- s_lshr_b32 s_restore_tmp, s_restore_tmp, (S_RESTORE_SPI_INIT_ATC_SHIFT-SQ_BUF_RSRC_WORD1_ATC_SHIFT) //get ATC bit into position
- s_or_b32 s_restore_buf_rsrc3, s_restore_buf_rsrc3, s_restore_tmp //or ATC
- s_and_b32 s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_MTYPE_MASK
- s_lshr_b32 s_restore_tmp, s_restore_tmp, (S_RESTORE_SPI_INIT_MTYPE_SHIFT-SQ_BUF_RSRC_WORD3_MTYPE_SHIFT) //get MTYPE bits into position
- s_or_b32 s_restore_buf_rsrc3, s_restore_buf_rsrc3, s_restore_tmp //or MTYPE
- //determine it is wave32 or wave64
- s_getreg_b32 s_restore_size, hwreg(HW_REG_IB_STS2,SQ_WAVE_IB_STS2_WAVE64_SHIFT,SQ_WAVE_IB_STS2_WAVE64_SIZE)
- s_or_b32 s_restore_size, s_restore_spi_init_hi, s_restore_size //share s_wave_size with exec_hi
-
- /* global mem offset */
- s_mov_b32 s_restore_mem_offset, 0x0 //mem offset initial value = 0
-
- /* restore VGPRs */
- //////////////////////////////
- L_RESTORE_VGPR:
-
- s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on //be consistent with SAVE although can be moved ahead
- s_and_b32 m0, s_restore_size, 1
- s_cmp_eq_u32 m0, 1
- s_cbranch_scc1 L_ENABLE_RESTORE_VGPR_EXEC_HI
- s_mov_b32 exec_hi, 0x00000000
- s_branch L_RESTORE_VGPR_NORMAL
- L_ENABLE_RESTORE_VGPR_EXEC_HI:
- s_mov_b32 exec_hi, 0xFFFFFFFF
- L_RESTORE_VGPR_NORMAL:
- s_getreg_b32 s_restore_alloc_size, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE) //vpgr_size
- s_add_u32 s_restore_alloc_size, s_restore_alloc_size, 1
- s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 2 //Number of VGPRs = (vgpr_size + 1) * 4 (non-zero value)
- //determine it is wave32 or wave64
- s_and_b32 m0, s_restore_size, 1
- s_cmp_eq_u32 m0, 1
- s_cbranch_scc1 L_RESTORE_VGPR_WAVE64
-
- s_lshl_b32 s_restore_buf_rsrc2, s_restore_alloc_size, 7 //NUM_RECORDS in bytes (32 threads*4)
- if (SWIZZLE_EN)
- s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
- else
- s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- end
-
- s_mov_b32 s_restore_mem_offset_save, s_restore_mem_offset // restore start with v1, v0 will be the last
- s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 128
- s_mov_b32 m0, 1 //VGPR initial index value = 1
- //s_set_gpr_idx_on m0, 0x8 //M0[7:0] = M0[7:0] and M0[15:12] = 0x8
- //s_add_u32 s_restore_alloc_size, s_restore_alloc_size, 0x8000 //add 0x8000 since we compare m0 against it later, might not need this in gfx10
-
- L_RESTORE_VGPR_WAVE32_LOOP:
- if(USE_MTBUF_INSTEAD_OF_MUBUF)
- tbuffer_load_format_x v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1
- else
- buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1
- end
- s_waitcnt vmcnt(0) //ensure data ready
- v_movreld_b32 v0, v0 //v[0+m0] = v0
- s_add_u32 m0, m0, 1 //next vgpr index
- s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 128 //every buffer_load_dword does 128 bytes
- s_cmp_lt_u32 m0, s_restore_alloc_size //scc = (m0 < s_restore_alloc_size) ? 1 : 0
- s_cbranch_scc1 L_RESTORE_VGPR_WAVE32_LOOP //VGPR restore (except v0) is complete?
- //s_set_gpr_idx_off
- /* VGPR restore on v0 */
- if(USE_MTBUF_INSTEAD_OF_MUBUF)
- tbuffer_load_format_x v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1
- else
- buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1
- end
-
- s_branch L_RESTORE_LDS
-
- L_RESTORE_VGPR_WAVE64:
- s_lshl_b32 s_restore_buf_rsrc2, s_restore_alloc_size, 8 //NUM_RECORDS in bytes (64 threads*4)
- if (SWIZZLE_EN)
- s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
- else
- s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- end
-
- s_mov_b32 s_restore_mem_offset_save, s_restore_mem_offset // restore start with v1, v0 will be the last
- s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256
- s_mov_b32 m0, 1 //VGPR initial index value = 1
- L_RESTORE_VGPR_WAVE64_LOOP:
- if(USE_MTBUF_INSTEAD_OF_MUBUF)
- tbuffer_load_format_x v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1
- else
- buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1
- end
- s_waitcnt vmcnt(0) //ensure data ready
- v_movreld_b32 v0, v0 //v[0+m0] = v0
- s_add_u32 m0, m0, 1 //next vgpr index
- s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256 //every buffer_load_dword does 256 bytes
- s_cmp_lt_u32 m0, s_restore_alloc_size //scc = (m0 < s_restore_alloc_size) ? 1 : 0
- s_cbranch_scc1 L_RESTORE_VGPR_WAVE64_LOOP //VGPR restore (except v0) is complete?
- //s_set_gpr_idx_off
- //
- //Below part will be the restore shared vgpr part (new for gfx10)
- s_getreg_b32 s_restore_alloc_size, hwreg(HW_REG_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SIZE) //shared_vgpr_size
- s_and_b32 s_restore_alloc_size, s_restore_alloc_size, 0xFFFFFFFF //shared_vgpr_size is zero?
- s_cbranch_scc0 L_RESTORE_V0 //no shared_vgpr used? jump to L_SAVE_LDS
- s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 3 //Number of SHARED_VGPRs = shared_vgpr_size * 8 (non-zero value)
- //m0 now has the value of normal vgpr count, just add the m0 with shared_vgpr count to get the total count.
- //restore shared_vgpr will start from the index of m0
- s_add_u32 s_restore_alloc_size, s_restore_alloc_size, m0
- s_mov_b32 exec_lo, 0xFFFFFFFF
- s_mov_b32 exec_hi, 0x00000000
- L_RESTORE_SHARED_VGPR_WAVE64_LOOP:
- buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1
- s_waitcnt vmcnt(0) //ensure data ready
- v_movreld_b32 v0, v0 //v[0+m0] = v0
- s_add_u32 m0, m0, 1 //next vgpr index
- s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 128 //every buffer_load_dword does 256 bytes
- s_cmp_lt_u32 m0, s_restore_alloc_size //scc = (m0 < s_restore_alloc_size) ? 1 : 0
- s_cbranch_scc1 L_RESTORE_SHARED_VGPR_WAVE64_LOOP //VGPR restore (except v0) is complete?
-
- s_mov_b32 exec_hi, 0xFFFFFFFF //restore back exec_hi before restoring V0!!
-
- /* VGPR restore on v0 */
- L_RESTORE_V0:
- if(USE_MTBUF_INSTEAD_OF_MUBUF)
- tbuffer_load_format_x v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1
- else
- buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1
- end
-
-
- /* restore LDS */
- //////////////////////////////
- L_RESTORE_LDS:
-
- //Only need to check the first wave
- /* the first wave in the threadgroup */
- s_and_b32 s_restore_tmp, s_restore_size, S_RESTORE_SPI_INIT_FIRST_WAVE_MASK
- s_cbranch_scc0 L_RESTORE_SGPR
-
- s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on //be consistent with SAVE although can be moved ahead
- s_and_b32 m0, s_restore_size, 1
- s_cmp_eq_u32 m0, 1
- s_cbranch_scc1 L_ENABLE_RESTORE_LDS_EXEC_HI
- s_mov_b32 exec_hi, 0x00000000
- s_branch L_RESTORE_LDS_NORMAL
- L_ENABLE_RESTORE_LDS_EXEC_HI:
- s_mov_b32 exec_hi, 0xFFFFFFFF
- L_RESTORE_LDS_NORMAL:
- s_getreg_b32 s_restore_alloc_size, hwreg(HW_REG_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE) //lds_size
- s_and_b32 s_restore_alloc_size, s_restore_alloc_size, 0xFFFFFFFF //lds_size is zero?
- s_cbranch_scc0 L_RESTORE_SGPR //no lds used? jump to L_RESTORE_VGPR
- s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 6 //LDS size in dwords = lds_size * 64dw
- s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 2 //LDS size in bytes
- s_mov_b32 s_restore_buf_rsrc2, s_restore_alloc_size //NUM_RECORDS in bytes
- if (SWIZZLE_EN)
- s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
- else
- s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- end
-
- s_and_b32 m0, s_wave_size, 1
- s_cmp_eq_u32 m0, 1
- s_mov_b32 m0, 0x0
- s_cbranch_scc1 L_RESTORE_LDS_LOOP_W64
-
- L_RESTORE_LDS_LOOP_W32:
- if (SAVE_LDS)
- buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1
- s_waitcnt 0
- end
- s_add_u32 m0, m0, 128 //every buffer_load_dword does 256 bytes
- s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 128 //mem offset increased by 256 bytes
- s_cmp_lt_u32 m0, s_restore_alloc_size //scc=(m0 < s_restore_alloc_size) ? 1 : 0
- s_cbranch_scc1 L_RESTORE_LDS_LOOP_W32 //LDS restore is complete?
- s_branch L_RESTORE_SGPR
-
- L_RESTORE_LDS_LOOP_W64:
- if (SAVE_LDS)
- buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1
- s_waitcnt 0
- end
- s_add_u32 m0, m0, 256 //every buffer_load_dword does 256 bytes
- s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256 //mem offset increased by 256 bytes
- s_cmp_lt_u32 m0, s_restore_alloc_size //scc=(m0 < s_restore_alloc_size) ? 1 : 0
- s_cbranch_scc1 L_RESTORE_LDS_LOOP_W64 //LDS restore is complete?
-
-
- /* restore SGPRs */
- //////////////////////////////
- //s_getreg_b32 s_restore_alloc_size, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SIZE) //spgr_size
- //s_add_u32 s_restore_alloc_size, s_restore_alloc_size, 1
- //s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 4 //Number of SGPRs = (sgpr_size + 1) * 16 (non-zero value)
- //s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 3 //Number of SGPRs = (sgpr_size + 1) * 8 (non-zero value)
- L_RESTORE_SGPR:
- //need to look at it is wave32 or wave64
- s_and_b32 m0, s_restore_size, 1
- s_cmp_eq_u32 m0, 1
- s_cbranch_scc1 L_RESTORE_SGPR_VMEM_WAVE64
- if (SGPR_SAVE_USE_SQC)
- s_lshl_b32 s_restore_buf_rsrc2, s_sgpr_save_num, 2 //NUM_RECORDS in bytes
- else
- s_lshl_b32 s_restore_buf_rsrc2, s_sgpr_save_num, 7 //NUM_RECORDS in bytes (32 threads)
- end
- s_branch L_RESTORE_SGPR_CONT
- L_RESTORE_SGPR_VMEM_WAVE64:
- if (SGPR_SAVE_USE_SQC)
- s_lshl_b32 s_restore_buf_rsrc2, s_sgpr_save_num, 2 //NUM_RECORDS in bytes
- else
- s_lshl_b32 s_restore_buf_rsrc2, s_sgpr_save_num, 8 //NUM_RECORDS in bytes (64 threads)
- end
-
- L_RESTORE_SGPR_CONT:
- if (SWIZZLE_EN)
- s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
- else
- s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- end
-
- s_and_b32 m0, s_restore_size, 1
- s_cmp_eq_u32 m0, 1
- s_cbranch_scc1 L_RESTORE_SGPR_WAVE64
-
- read_sgpr_from_mem_wave32(s_restore_tmp, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //save s0 to s_restore_tmp
- s_mov_b32 m0, 0x1
-
- L_RESTORE_SGPR_LOOP_WAVE32:
- read_sgpr_from_mem_wave32(s0, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //PV: further performance improvement can be made
- s_waitcnt lgkmcnt(0) //ensure data ready
- s_movreld_b32 s0, s0 //s[0+m0] = s0
- s_nop 0 // hazard SALU M0=> S_MOVREL
- s_add_u32 m0, m0, 1 //next sgpr index
- s_cmp_lt_u32 m0, s_sgpr_save_num //scc = (m0 < s_restore_alloc_size) ? 1 : 0
- s_cbranch_scc1 L_RESTORE_SGPR_LOOP_WAVE32 //SGPR restore (except s0) is complete?
- s_mov_b32 s0, s_restore_tmp /* SGPR restore on s0 */
- s_branch L_RESTORE_HWREG
-
- L_RESTORE_SGPR_WAVE64:
- read_sgpr_from_mem_wave64(s_restore_tmp, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //save s0 to s_restore_tmp
- s_mov_b32 m0, 0x1 //SGPR initial index value =1 //go on with with s1
-
- L_RESTORE_SGPR_LOOP_WAVE64:
- read_sgpr_from_mem_wave64(s0, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //PV: further performance improvement can be made
- s_waitcnt lgkmcnt(0) //ensure data ready
- s_movreld_b32 s0, s0 //s[0+m0] = s0
- s_nop 0 // hazard SALU M0=> S_MOVREL
- s_add_u32 m0, m0, 1 //next sgpr index
- s_cmp_lt_u32 m0, s_sgpr_save_num //scc = (m0 < s_restore_alloc_size) ? 1 : 0
- s_cbranch_scc1 L_RESTORE_SGPR_LOOP_WAVE64 //SGPR restore (except s0) is complete?
- s_mov_b32 s0, s_restore_tmp /* SGPR restore on s0 */
-
-
- /* restore HW registers */
- //////////////////////////////
- L_RESTORE_HWREG:
- s_mov_b32 s_restore_buf_rsrc2, 0x4 //NUM_RECORDS in bytes
- if (SWIZZLE_EN)
- s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
- else
- s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- end
-
- s_and_b32 m0, s_restore_size, 1
- s_cmp_eq_u32 m0, 1
- s_cbranch_scc1 L_RESTORE_HWREG_WAVE64
-
- read_sgpr_from_mem_wave32(s_restore_m0, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //M0
- read_sgpr_from_mem_wave32(s_restore_pc_lo, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //PC
- read_sgpr_from_mem_wave32(s_restore_pc_hi, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC)
- read_sgpr_from_mem_wave32(s_restore_exec_lo, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //EXEC
- read_sgpr_from_mem_wave32(s_restore_exec_hi, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC)
- read_sgpr_from_mem_wave32(s_restore_status, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //STATUS
- read_sgpr_from_mem_wave32(s_restore_trapsts, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //TRAPSTS
- //read_sgpr_from_mem_wave32(xnack_mask_lo, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //XNACK_MASK_LO
- //read_sgpr_from_mem_wave32(xnack_mask_hi, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //XNACK_MASK_HI
- read_sgpr_from_mem_wave32(s_restore_xnack_mask, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //XNACK_MASK
- read_sgpr_from_mem_wave32(s_restore_mode, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //MODE
- if(SAVE_RESTORE_HWID_DDID)
- read_sgpr_from_mem_wave32(s_restore_hwid1, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //HW_ID1
- end
- s_branch L_RESTORE_HWREG_FINISH
-
- L_RESTORE_HWREG_WAVE64:
- read_sgpr_from_mem_wave64(s_restore_m0, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //M0
- read_sgpr_from_mem_wave64(s_restore_pc_lo, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //PC
- read_sgpr_from_mem_wave64(s_restore_pc_hi, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC)
- read_sgpr_from_mem_wave64(s_restore_exec_lo, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //EXEC
- read_sgpr_from_mem_wave64(s_restore_exec_hi, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC)
- read_sgpr_from_mem_wave64(s_restore_status, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //STATUS
- read_sgpr_from_mem_wave64(s_restore_trapsts, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //TRAPSTS
- //read_sgpr_from_mem_wave64(xnack_mask_lo, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //XNACK_MASK_LO
- //read_sgpr_from_mem_wave64(xnack_mask_hi, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //XNACK_MASK_HI
- read_sgpr_from_mem_wave64(s_restore_xnack_mask, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //XNACK_MASK
- read_sgpr_from_mem_wave64(s_restore_mode, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //MODE
- if(SAVE_RESTORE_HWID_DDID)
- read_sgpr_from_mem_wave64(s_restore_hwid1, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //HW_ID1
- end
- L_RESTORE_HWREG_FINISH:
- s_waitcnt lgkmcnt(0) //from now on, it is safe to restore STATUS and IB_STS
-
-
-
- if(SAVE_RESTORE_HWID_DDID)
- L_RESTORE_DDID:
- s_mov_b32 m0, s_restore_hwid1 //virture ttrace support: The save-context handler records the SE/SA/WGP/SIMD/wave of the original wave
- s_ttracedata //and then can output it as SHADER_DATA to ttrace on restore to provide a correlation across the save-restore
-
- s_mov_b32 s_restore_buf_rsrc2, 0x4 //NUM_RECORDS in bytes
- if (SWIZZLE_EN)
- s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
- else
- s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- end
-
- s_and_b32 m0, s_restore_size, 1
- s_cmp_eq_u32 m0, 1
- s_cbranch_scc1 L_RESTORE_DDID_WAVE64
-
- read_sgpr_from_mem_wave32(s_restore_ddid, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC)
- s_branch L_RESTORE_DDID_FINISH
- L_RESTORE_DDID_WAVE64:
- read_sgpr_from_mem_wave64(s_restore_ddid, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC)
-
- L_RESTORE_DDID_FINISH:
- s_waitcnt lgkmcnt(0)
- //s_mov_b32 m0, s_restore_ddid
- //s_ttracedata
- if (RESTORE_DDID_IN_SGPR18)
- s_mov_b32 s18, s_restore_ddid
- end
-
- end
-
- s_and_b32 s_restore_pc_hi, s_restore_pc_hi, 0x0000ffff //pc[47:32] //Do it here in order not to affect STATUS
-
- //for normal save & restore, the saved PC points to the next inst to execute, no adjustment needs to be made, otherwise:
- if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL))
- s_add_u32 s_restore_pc_lo, s_restore_pc_lo, 8 //pc[31:0]+8 //two back-to-back s_trap are used (first for save and second for restore)
- s_addc_u32 s_restore_pc_hi, s_restore_pc_hi, 0x0 //carry bit over
- end
- if ((EMU_RUN_HACK) && (EMU_RUN_HACK_RESTORE_NORMAL))
- s_add_u32 s_restore_pc_lo, s_restore_pc_lo, 4 //pc[31:0]+4 // save is hack through s_trap but restore is normal
- s_addc_u32 s_restore_pc_hi, s_restore_pc_hi, 0x0 //carry bit over
- end
-
- s_mov_b32 m0, s_restore_m0
- s_mov_b32 exec_lo, s_restore_exec_lo
- s_mov_b32 exec_hi, s_restore_exec_hi
-
- s_and_b32 s_restore_m0, SQ_WAVE_TRAPSTS_PRE_SAVECTX_MASK, s_restore_trapsts
+ /* Setup Resource Contants */
+ s_mov_b32 s_restore_buf_rsrc0, s_restore_spi_init_lo //base_addr_lo
+ s_and_b32 s_restore_buf_rsrc1, s_restore_spi_init_hi, 0x0000FFFF //base_addr_hi
+ s_or_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, S_RESTORE_BUF_RSRC_WORD1_STRIDE
+ s_mov_b32 s_restore_buf_rsrc2, 0 //NUM_RECORDS initial value = 0 (in bytes)
+ s_mov_b32 s_restore_buf_rsrc3, S_RESTORE_BUF_RSRC_WORD3_MISC
+ s_and_b32 s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_ATC_MASK
+ s_lshr_b32 s_restore_tmp, s_restore_tmp, (S_RESTORE_SPI_INIT_ATC_SHIFT-SQ_BUF_RSRC_WORD1_ATC_SHIFT)
+ s_or_b32 s_restore_buf_rsrc3, s_restore_buf_rsrc3, s_restore_tmp //or ATC
+ s_and_b32 s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_MTYPE_MASK
+ s_lshr_b32 s_restore_tmp, s_restore_tmp, (S_RESTORE_SPI_INIT_MTYPE_SHIFT-SQ_BUF_RSRC_WORD3_MTYPE_SHIFT)
+ s_or_b32 s_restore_buf_rsrc3, s_restore_buf_rsrc3, s_restore_tmp //or MTYPE
+ //determine it is wave32 or wave64
+ s_getreg_b32 s_restore_size, hwreg(HW_REG_IB_STS2,SQ_WAVE_IB_STS2_WAVE64_SHIFT,SQ_WAVE_IB_STS2_WAVE64_SIZE)
+ s_lshl_b32 s_restore_size, s_restore_size, S_WAVE_SIZE
+ s_or_b32 s_restore_size, s_restore_spi_init_hi, s_restore_size
+
+ s_and_b32 s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_FIRST_WAVE_MASK
+ s_cbranch_scc0 L_RESTORE_VGPR
+
+ /* restore LDS */
+L_RESTORE_LDS:
+ s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on
+ s_lshr_b32 m0, s_restore_size, S_WAVE_SIZE
+ s_and_b32 m0, m0, 1
+ s_cmp_eq_u32 m0, 1
+ s_cbranch_scc1 L_ENABLE_RESTORE_LDS_EXEC_HI
+ s_mov_b32 exec_hi, 0x00000000
+ s_branch L_RESTORE_LDS_NORMAL
+L_ENABLE_RESTORE_LDS_EXEC_HI:
+ s_mov_b32 exec_hi, 0xFFFFFFFF
+L_RESTORE_LDS_NORMAL:
+ s_getreg_b32 s_restore_alloc_size, hwreg(HW_REG_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE)
+ s_and_b32 s_restore_alloc_size, s_restore_alloc_size, 0xFFFFFFFF //lds_size is zero?
+ s_cbranch_scc0 L_RESTORE_VGPR //no lds used? jump to L_RESTORE_VGPR
+ s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 6 //LDS size in dwords = lds_size * 64dw
+ s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 2 //LDS size in bytes
+ s_mov_b32 s_restore_buf_rsrc2, s_restore_alloc_size //NUM_RECORDS in bytes
+
+ // LDS at offset: size(VGPR)+size(SVGPR)+SIZE(SGPR)+SIZE(HWREG)
+ //
+ get_vgpr_size_bytes(s_restore_mem_offset, s_restore_size)
+ get_svgpr_size_bytes(s_restore_tmp)
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, s_restore_tmp
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, get_sgpr_size_bytes()
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, get_hwreg_size_bytes()
+
+ s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ s_lshr_b32 m0, s_wave_size, S_WAVE_SIZE
+ s_and_b32 m0, m0, 1
+ s_cmp_eq_u32 m0, 1
+ s_mov_b32 m0, 0x0
+ s_cbranch_scc1 L_RESTORE_LDS_LOOP_W64
+
+L_RESTORE_LDS_LOOP_W32:
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1 // first 64DW
+ s_add_u32 m0, m0, 128 // 128 DW
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 128 //mem offset increased by 128DW
+ s_cmp_lt_u32 m0, s_restore_alloc_size //scc=(m0 < s_restore_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_RESTORE_LDS_LOOP_W32 //LDS restore is complete?
+ s_branch L_RESTORE_VGPR
+
+L_RESTORE_LDS_LOOP_W64:
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1 // first 64DW
+ s_add_u32 m0, m0, 256 // 256 DW
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256 //mem offset increased by 256DW
+ s_cmp_lt_u32 m0, s_restore_alloc_size //scc=(m0 < s_restore_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_RESTORE_LDS_LOOP_W64 //LDS restore is complete?
+
+ /* restore VGPRs */
+L_RESTORE_VGPR:
+ // VGPR SR memory offset : 0
+ s_mov_b32 s_restore_mem_offset, 0x0
+ s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on
+ s_lshr_b32 m0, s_restore_size, S_WAVE_SIZE
+ s_and_b32 m0, m0, 1
+ s_cmp_eq_u32 m0, 1
+ s_cbranch_scc1 L_ENABLE_RESTORE_VGPR_EXEC_HI
+ s_mov_b32 exec_hi, 0x00000000
+ s_branch L_RESTORE_VGPR_NORMAL
+L_ENABLE_RESTORE_VGPR_EXEC_HI:
+ s_mov_b32 exec_hi, 0xFFFFFFFF
+L_RESTORE_VGPR_NORMAL:
+ s_getreg_b32 s_restore_alloc_size, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE)
+ s_add_u32 s_restore_alloc_size, s_restore_alloc_size, 1
+ s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 2 //Number of VGPRs = (vgpr_size + 1) * 4 (non-zero value)
+ //determine it is wave32 or wave64
+ s_lshr_b32 m0, s_restore_size, S_WAVE_SIZE
+ s_and_b32 m0, m0, 1
+ s_cmp_eq_u32 m0, 1
+ s_cbranch_scc1 L_RESTORE_VGPR_WAVE64
+
+ s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ // VGPR load using dw burst
+ s_mov_b32 s_restore_mem_offset_save, s_restore_mem_offset // restore start with v1, v0 will be the last
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 128*4
+ s_mov_b32 m0, 4 //VGPR initial index value = 4
+
+L_RESTORE_VGPR_WAVE32_LOOP:
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1
+ buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:128
+ buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:128*2
+ buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:128*3
+ s_waitcnt vmcnt(0)
+ v_movreld_b32 v0, v0 //v[0+m0] = v0
+ v_movreld_b32 v1, v1
+ v_movreld_b32 v2, v2
+ v_movreld_b32 v3, v3
+ s_add_u32 m0, m0, 4 //next vgpr index
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 128*4 //every buffer_load_dword does 128 bytes
+ s_cmp_lt_u32 m0, s_restore_alloc_size //scc = (m0 < s_restore_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_RESTORE_VGPR_WAVE32_LOOP //VGPR restore (except v0) is complete?
+
+ /* VGPR restore on v0 */
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1
+ buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:128
+ buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:128*2
+ buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:128*3
+
+ s_branch L_RESTORE_SGPR
+
+L_RESTORE_VGPR_WAVE64:
+ s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ // VGPR load using dw burst
+ s_mov_b32 s_restore_mem_offset_save, s_restore_mem_offset // restore start with v4, v0 will be the last
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256*4
+ s_mov_b32 m0, 4 //VGPR initial index value = 4
+
+L_RESTORE_VGPR_WAVE64_LOOP:
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1
+ buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:256
+ buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:256*2
+ buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:256*3
+ s_waitcnt vmcnt(0)
+ v_movreld_b32 v0, v0 //v[0+m0] = v0
+ v_movreld_b32 v1, v1
+ v_movreld_b32 v2, v2
+ v_movreld_b32 v3, v3
+ s_add_u32 m0, m0, 4 //next vgpr index
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256*4 //every buffer_load_dword does 256 bytes
+ s_cmp_lt_u32 m0, s_restore_alloc_size //scc = (m0 < s_restore_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_RESTORE_VGPR_WAVE64_LOOP //VGPR restore (except v0) is complete?
+
+ //Below part will be the restore shared vgpr part (new for gfx10)
+ s_getreg_b32 s_restore_alloc_size, hwreg(HW_REG_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SIZE) //shared_vgpr_size
+ s_and_b32 s_restore_alloc_size, s_restore_alloc_size, 0xFFFFFFFF //shared_vgpr_size is zero?
+ s_cbranch_scc0 L_RESTORE_V0 //no shared_vgpr used?
+ s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 3 //Number of SHARED_VGPRs = shared_vgpr_size * 8 (non-zero value)
+ //m0 now has the value of normal vgpr count, just add the m0 with shared_vgpr count to get the total count.
+ //restore shared_vgpr will start from the index of m0
+ s_add_u32 s_restore_alloc_size, s_restore_alloc_size, m0
+ s_mov_b32 exec_lo, 0xFFFFFFFF
+ s_mov_b32 exec_hi, 0x00000000
+L_RESTORE_SHARED_VGPR_WAVE64_LOOP:
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1
+ s_waitcnt vmcnt(0)
+ v_movreld_b32 v0, v0 //v[0+m0] = v0
+ s_add_u32 m0, m0, 1 //next vgpr index
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 128
+ s_cmp_lt_u32 m0, s_restore_alloc_size //scc = (m0 < s_restore_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_RESTORE_SHARED_VGPR_WAVE64_LOOP //VGPR restore (except v0) is complete?
+
+ s_mov_b32 exec_hi, 0xFFFFFFFF //restore back exec_hi before restoring V0!!
+
+ /* VGPR restore on v0 */
+L_RESTORE_V0:
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1
+ buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256
+ buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256*2
+ buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256*3
+ s_waitcnt vmcnt(0)
+
+ /* restore SGPRs */
+ //will be 2+8+16*6
+ // SGPR SR memory offset : size(VGPR)+size(SVGPR)
+L_RESTORE_SGPR:
+ get_vgpr_size_bytes(s_restore_mem_offset, s_restore_size)
+ get_svgpr_size_bytes(s_restore_tmp)
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, s_restore_tmp
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, get_sgpr_size_bytes()
+ s_sub_u32 s_restore_mem_offset, s_restore_mem_offset, 20*4 //s108~s127 is not saved
+
+ s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ s_mov_b32 m0, s_sgpr_save_num
+
+ read_4sgpr_from_mem(s0, s_restore_buf_rsrc0, s_restore_mem_offset)
+ s_waitcnt lgkmcnt(0)
+
+ s_sub_u32 m0, m0, 4 // Restore from S[0] to S[104]
+ s_nop 0 // hazard SALU M0=> S_MOVREL
+
+ s_movreld_b64 s0, s0 //s[0+m0] = s0
+ s_movreld_b64 s2, s2
+
+ read_8sgpr_from_mem(s0, s_restore_buf_rsrc0, s_restore_mem_offset)
+ s_waitcnt lgkmcnt(0)
+
+ s_sub_u32 m0, m0, 8 // Restore from S[0] to S[96]
+ s_nop 0 // hazard SALU M0=> S_MOVREL
+
+ s_movreld_b64 s0, s0 //s[0+m0] = s0
+ s_movreld_b64 s2, s2
+ s_movreld_b64 s4, s4
+ s_movreld_b64 s6, s6
+
+ L_RESTORE_SGPR_LOOP:
+ read_16sgpr_from_mem(s0, s_restore_buf_rsrc0, s_restore_mem_offset)
+ s_waitcnt lgkmcnt(0)
+
+ s_sub_u32 m0, m0, 16 // Restore from S[n] to S[0]
+ s_nop 0 // hazard SALU M0=> S_MOVREL
+
+ s_movreld_b64 s0, s0 //s[0+m0] = s0
+ s_movreld_b64 s2, s2
+ s_movreld_b64 s4, s4
+ s_movreld_b64 s6, s6
+ s_movreld_b64 s8, s8
+ s_movreld_b64 s10, s10
+ s_movreld_b64 s12, s12
+ s_movreld_b64 s14, s14
+
+ s_cmp_eq_u32 m0, 0 //scc = (m0 < s_sgpr_save_num) ? 1 : 0
+ s_cbranch_scc0 L_RESTORE_SGPR_LOOP
+
+ /* restore HW registers */
+L_RESTORE_HWREG:
+ // HWREG SR memory offset : size(VGPR)+size(SVGPR)+size(SGPR)
+ get_vgpr_size_bytes(s_restore_mem_offset, s_restore_size)
+ get_svgpr_size_bytes(s_restore_tmp)
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, s_restore_tmp
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, get_sgpr_size_bytes()
+
+ s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ read_hwreg_from_mem(s_restore_m0, s_restore_buf_rsrc0, s_restore_mem_offset)
+ read_hwreg_from_mem(s_restore_pc_lo, s_restore_buf_rsrc0, s_restore_mem_offset)
+ read_hwreg_from_mem(s_restore_pc_hi, s_restore_buf_rsrc0, s_restore_mem_offset)
+ read_hwreg_from_mem(s_restore_exec_lo, s_restore_buf_rsrc0, s_restore_mem_offset)
+ read_hwreg_from_mem(s_restore_exec_hi, s_restore_buf_rsrc0, s_restore_mem_offset)
+ read_hwreg_from_mem(s_restore_status, s_restore_buf_rsrc0, s_restore_mem_offset)
+ read_hwreg_from_mem(s_restore_trapsts, s_restore_buf_rsrc0, s_restore_mem_offset)
+ read_hwreg_from_mem(s_restore_xnack_mask, s_restore_buf_rsrc0, s_restore_mem_offset)
+ read_hwreg_from_mem(s_restore_mode, s_restore_buf_rsrc0, s_restore_mem_offset)
+ read_hwreg_from_mem(s_restore_flat_scratch, s_restore_buf_rsrc0, s_restore_mem_offset)
+ s_waitcnt lgkmcnt(0)
+
+ s_setreg_b32 hwreg(HW_REG_SHADER_FLAT_SCRATCH_LO), s_restore_flat_scratch
+
+ read_hwreg_from_mem(s_restore_flat_scratch, s_restore_buf_rsrc0, s_restore_mem_offset)
+ s_waitcnt lgkmcnt(0) //from now on, it is safe to restore STATUS and IB_STS
+
+ s_setreg_b32 hwreg(HW_REG_SHADER_FLAT_SCRATCH_HI), s_restore_flat_scratch
+
+ s_mov_b32 s_restore_tmp, s_restore_pc_hi
+ s_and_b32 s_restore_pc_hi, s_restore_tmp, 0x0000ffff //pc[47:32] //Do it here in order not to affect STATUS
+
+ s_mov_b32 m0, s_restore_m0
+ s_mov_b32 exec_lo, s_restore_exec_lo
+ s_mov_b32 exec_hi, s_restore_exec_hi
+
+ s_and_b32 s_restore_m0, SQ_WAVE_TRAPSTS_PRE_SAVECTX_MASK, s_restore_trapsts
s_setreg_b32 hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_PRE_SAVECTX_SHIFT, SQ_WAVE_TRAPSTS_PRE_SAVECTX_SIZE), s_restore_m0
- s_setreg_b32 hwreg(HW_REG_SHADER_XNACK_MASK), s_restore_xnack_mask //restore xnack_mask
- s_and_b32 s_restore_m0, SQ_WAVE_TRAPSTS_POST_SAVECTX_MASK, s_restore_trapsts
- s_lshr_b32 s_restore_m0, s_restore_m0, SQ_WAVE_TRAPSTS_POST_SAVECTX_SHIFT
+ s_setreg_b32 hwreg(HW_REG_SHADER_XNACK_MASK), s_restore_xnack_mask
+ s_and_b32 s_restore_m0, SQ_WAVE_TRAPSTS_POST_SAVECTX_MASK, s_restore_trapsts
+ s_lshr_b32 s_restore_m0, s_restore_m0, SQ_WAVE_TRAPSTS_POST_SAVECTX_SHIFT
s_setreg_b32 hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_POST_SAVECTX_SHIFT, SQ_WAVE_TRAPSTS_POST_SAVECTX_SIZE), s_restore_m0
- //s_setreg_b32 hwreg(HW_REG_TRAPSTS), s_restore_trapsts //don't overwrite SAVECTX bit as it may be set through external SAVECTX during restore
- s_setreg_b32 hwreg(HW_REG_MODE), s_restore_mode
- //reuse s_restore_m0 as a temp register
- s_and_b32 s_restore_m0, s_restore_pc_hi, S_SAVE_PC_HI_RCNT_MASK
- s_lshr_b32 s_restore_m0, s_restore_m0, S_SAVE_PC_HI_RCNT_SHIFT
- s_lshl_b32 s_restore_m0, s_restore_m0, SQ_WAVE_IB_STS_RCNT_SHIFT
- s_mov_b32 s_restore_tmp, 0x0 //IB_STS is zero
- s_or_b32 s_restore_tmp, s_restore_tmp, s_restore_m0
- s_and_b32 s_restore_m0, s_restore_pc_hi, S_SAVE_PC_HI_FIRST_REPLAY_MASK
- s_lshr_b32 s_restore_m0, s_restore_m0, S_SAVE_PC_HI_FIRST_REPLAY_SHIFT
- s_lshl_b32 s_restore_m0, s_restore_m0, SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT
- s_or_b32 s_restore_tmp, s_restore_tmp, s_restore_m0
- s_and_b32 s_restore_m0, s_restore_status, SQ_WAVE_STATUS_INST_ATC_MASK
- s_lshr_b32 s_restore_m0, s_restore_m0, SQ_WAVE_STATUS_INST_ATC_SHIFT
- s_setreg_b32 hwreg(HW_REG_IB_STS), s_restore_tmp
- s_setreg_b32 hwreg(HW_REG_STATUS), s_restore_status
-
- s_barrier //barrier to ensure the readiness of LDS before access attemps from any other wave in the same TG //FIXME not performance-optimal at this time
-
-
-// s_rfe_b64 s_restore_pc_lo //Return to the main shader program and resume execution
- s_rfe_b64 s_restore_pc_lo // s_restore_m0[0] is used to set STATUS.inst_atc
-
-
-/**************************************************************************/
-/* the END */
-/**************************************************************************/
-L_END_PGM:
+ s_setreg_b32 hwreg(HW_REG_MODE), s_restore_mode
+ s_and_b32 s_restore_m0, s_restore_tmp, S_SAVE_PC_HI_RCNT_MASK
+ s_lshr_b32 s_restore_m0, s_restore_m0, S_SAVE_PC_HI_RCNT_SHIFT
+ s_lshl_b32 s_restore_m0, s_restore_m0, SQ_WAVE_IB_STS_RCNT_SHIFT
+ s_mov_b32 s_restore_mode, 0x0
+ s_or_b32 s_restore_mode, s_restore_mode, s_restore_m0
+ s_and_b32 s_restore_m0, s_restore_tmp, S_SAVE_PC_HI_FIRST_REPLAY_MASK
+ s_lshr_b32 s_restore_m0, s_restore_m0, S_SAVE_PC_HI_FIRST_REPLAY_SHIFT
+ s_lshl_b32 s_restore_m0, s_restore_m0, SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT
+ s_or_b32 s_restore_mode, s_restore_mode, s_restore_m0
+ s_and_b32 s_restore_m0, s_restore_tmp, S_SAVE_PC_HI_REPLAY_W64H_MASK
+ s_lshr_b32 s_restore_m0, s_restore_m0, S_SAVE_PC_HI_REPLAY_W64H_SHIFT
+ s_lshl_b32 s_restore_m0, s_restore_m0, SQ_WAVE_IB_STS_REPLAY_W64H_SHIFT
+ s_or_b32 s_restore_mode, s_restore_mode, s_restore_m0
+
+ s_and_b32 s_restore_m0, s_restore_status, SQ_WAVE_STATUS_INST_ATC_MASK
+ s_lshr_b32 s_restore_m0, s_restore_m0, SQ_WAVE_STATUS_INST_ATC_SHIFT
+ s_setreg_b32 hwreg(HW_REG_IB_STS), s_restore_mode
+
+ s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32
+ s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32
+ s_setreg_b32 hwreg(HW_REG_STATUS), s_restore_status // SCC is included, which is changed by previous salu
+
+ s_barrier //barrier to ensure the readiness of LDS before access attemps from any other wave in the same TG
+
+ s_rfe_b64 s_restore_pc_lo //Return to the main shader program and resume execution
+
+L_END_PGM:
s_endpgm
-
-end
-
-
-/**************************************************************************/
-/* the helper functions */
-/**************************************************************************/
-function write_sgpr_to_mem_wave32(s, s_rsrc, s_mem_offset, use_sqc, use_mtbuf)
- if (use_sqc)
- s_mov_b32 exec_lo, m0 //assuming exec_lo is not needed anymore from this point on
- s_mov_b32 m0, s_mem_offset
- s_buffer_store_dword s, s_rsrc, m0 glc:1
- s_add_u32 s_mem_offset, s_mem_offset, 4
- s_mov_b32 m0, exec_lo
- elsif (use_mtbuf)
- v_mov_b32 v0, s
- tbuffer_store_format_x v0, v0, s_rsrc, s_mem_offset format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1
- s_add_u32 s_mem_offset, s_mem_offset, 128
- else
- v_mov_b32 v0, s
- buffer_store_dword v0, v0, s_rsrc, s_mem_offset slc:1 glc:1
- s_add_u32 s_mem_offset, s_mem_offset, 128
- end
end
-function write_sgpr_to_mem_wave64(s, s_rsrc, s_mem_offset, use_sqc, use_mtbuf)
- if (use_sqc)
- s_mov_b32 exec_lo, m0 //assuming exec_lo is not needed anymore from this point on
- s_mov_b32 m0, s_mem_offset
- s_buffer_store_dword s, s_rsrc, m0 glc:1
- s_add_u32 s_mem_offset, s_mem_offset, 4
- s_mov_b32 m0, exec_lo
- elsif (use_mtbuf)
- v_mov_b32 v0, s
- tbuffer_store_format_x v0, v0, s_rsrc, s_mem_offset format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1
- s_add_u32 s_mem_offset, s_mem_offset, 256
- else
- v_mov_b32 v0, s
- buffer_store_dword v0, v0, s_rsrc, s_mem_offset slc:1 glc:1
- s_add_u32 s_mem_offset, s_mem_offset, 256
- end
+function write_hwreg_to_mem(s, s_rsrc, s_mem_offset)
+ s_mov_b32 exec_lo, m0
+ s_mov_b32 m0, s_mem_offset
+ s_buffer_store_dword s, s_rsrc, m0 glc:1
+ s_add_u32 s_mem_offset, s_mem_offset, 4
+ s_mov_b32 m0, exec_lo
+end
+
+
+function write_16sgpr_to_mem(s, s_rsrc, s_mem_offset)
+ s_buffer_store_dwordx4 s[0], s_rsrc, 0 glc:1
+ s_buffer_store_dwordx4 s[4], s_rsrc, 16 glc:1
+ s_buffer_store_dwordx4 s[8], s_rsrc, 32 glc:1
+ s_buffer_store_dwordx4 s[12], s_rsrc, 48 glc:1
+ s_add_u32 s_rsrc[0], s_rsrc[0], 4*16
+ s_addc_u32 s_rsrc[1], s_rsrc[1], 0x0
+end
+
+function write_12sgpr_to_mem(s, s_rsrc, s_mem_offset)
+ s_buffer_store_dwordx4 s[0], s_rsrc, 0 glc:1
+ s_buffer_store_dwordx4 s[4], s_rsrc, 16 glc:1
+ s_buffer_store_dwordx4 s[8], s_rsrc, 32 glc:1
+ s_add_u32 s_rsrc[0], s_rsrc[0], 4*12
+ s_addc_u32 s_rsrc[1], s_rsrc[1], 0x0
+end
+
+
+function read_hwreg_from_mem(s, s_rsrc, s_mem_offset)
+ s_buffer_load_dword s, s_rsrc, s_mem_offset glc:1
+ s_add_u32 s_mem_offset, s_mem_offset, 4
end
-function read_sgpr_from_mem_wave32(s, s_rsrc, s_mem_offset, use_sqc)
- s_buffer_load_dword s, s_rsrc, s_mem_offset glc:1
- if (use_sqc)
- s_add_u32 s_mem_offset, s_mem_offset, 4
- else
- s_add_u32 s_mem_offset, s_mem_offset, 128
- end
+function read_16sgpr_from_mem(s, s_rsrc, s_mem_offset)
+ s_sub_u32 s_mem_offset, s_mem_offset, 4*16
+ s_buffer_load_dwordx16 s, s_rsrc, s_mem_offset glc:1
end
-function read_sgpr_from_mem_wave64(s, s_rsrc, s_mem_offset, use_sqc)
- s_buffer_load_dword s, s_rsrc, s_mem_offset glc:1
- if (use_sqc)
- s_add_u32 s_mem_offset, s_mem_offset, 4
- else
- s_add_u32 s_mem_offset, s_mem_offset, 256
- end
+function read_8sgpr_from_mem(s, s_rsrc, s_mem_offset)
+ s_sub_u32 s_mem_offset, s_mem_offset, 4*8
+ s_buffer_load_dwordx8 s, s_rsrc, s_mem_offset glc:1
end
+function read_4sgpr_from_mem(s, s_rsrc, s_mem_offset)
+ s_sub_u32 s_mem_offset, s_mem_offset, 4*4
+ s_buffer_load_dwordx4 s, s_rsrc, s_mem_offset glc:1
+end
+
+
+function get_lds_size_bytes(s_lds_size_byte)
+ s_getreg_b32 s_lds_size_byte, hwreg(HW_REG_LDS_ALLOC, SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT, SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE)
+ s_lshl_b32 s_lds_size_byte, s_lds_size_byte, 8 //LDS size in dwords = lds_size * 64 *4Bytes // granularity 64DW
+end
+
+function get_vgpr_size_bytes(s_vgpr_size_byte, s_size)
+ s_getreg_b32 s_vgpr_size_byte, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE)
+ s_add_u32 s_vgpr_size_byte, s_vgpr_size_byte, 1
+ s_lshr_b32 m0, s_size, S_WAVE_SIZE
+ s_and_b32 m0, m0, 1
+ s_cmp_eq_u32 m0, 1
+ s_cbranch_scc1 L_ENABLE_SHIFT_W64
+ s_lshl_b32 s_vgpr_size_byte, s_vgpr_size_byte, (2+7) //Number of VGPRs = (vgpr_size + 1) * 4 * 32 * 4 (non-zero value)
+ s_branch L_SHIFT_DONE
+L_ENABLE_SHIFT_W64:
+ s_lshl_b32 s_vgpr_size_byte, s_vgpr_size_byte, (2+8) //Number of VGPRs = (vgpr_size + 1) * 4 * 64 * 4 (non-zero value)
+L_SHIFT_DONE:
+end
+
+function get_svgpr_size_bytes(s_svgpr_size_byte)
+ s_getreg_b32 s_svgpr_size_byte, hwreg(HW_REG_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SIZE)
+ s_lshl_b32 s_svgpr_size_byte, s_svgpr_size_byte, (3+7)
+end
+
+function get_sgpr_size_bytes
+ return 512
+end
+
+function get_hwreg_size_bytes
+ return 128
+end
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
index a47f5b933120..b195b7cd8a17 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
@@ -24,78 +24,6 @@
* PROJECT=vi ./sp3 cwsr_trap_handler_gfx8.asm -hex tmp.hex
*/
-/* HW (VI) source code for CWSR trap handler */
-/* Version 18 + multiple trap handler */
-
-// this performance-optimal version was originally from Seven Xu at SRDC
-
-// Revison #18 --...
-/* Rev History
-** #1. Branch from gc dv. //gfxip/gfx8/main/src/test/suites/block/cs/sr/cs_trap_handler.sp3#1,#50, #51, #52-53(Skip, Already Fixed by PV), #54-56(merged),#57-58(mergerd, skiped-already fixed by PV)
-** #4. SR Memory Layout:
-** 1. VGPR-SGPR-HWREG-{LDS}
-** 2. tba_hi.bits.26 - reconfigured as the first wave in tg bits, for defer Save LDS for a threadgroup.. performance concern..
-** #5. Update: 1. Accurate g8sr_ts_save_d timestamp
-** #6. Update: 1. Fix s_barrier usage; 2. VGPR s/r using swizzle buffer?(NoNeed, already matched the swizzle pattern, more investigation)
-** #7. Update: 1. don't barrier if noLDS
-** #8. Branch: 1. Branch to ver#0, which is very similar to gc dv version
-** 2. Fix SQ issue by s_sleep 2
-** #9. Update: 1. Fix scc restore failed issue, restore wave_status at last
-** 2. optimize s_buffer save by burst 16sgprs...
-** #10. Update 1. Optimize restore sgpr by busrt 16 sgprs.
-** #11. Update 1. Add 2 more timestamp for debug version
-** #12. Update 1. Add VGPR SR using DWx4, some case improve and some case drop performance
-** #13. Integ 1. Always use MUBUF for PV trap shader...
-** #14. Update 1. s_buffer_store soft clause...
-** #15. Update 1. PERF - sclar write with glc:0/mtype0 to allow L2 combine. perf improvement a lot.
-** #16. Update 1. PRRF - UNROLL LDS_DMA got 2500cycle save in IP tree
-** #17. Update 1. FUNC - LDS_DMA has issues while ATC, replace with ds_read/buffer_store for save part[TODO restore part]
-** 2. PERF - Save LDS before save VGPR to cover LDS save long latency...
-** #18. Update 1. FUNC - Implicitly estore STATUS.VCCZ, which is not writable by s_setreg_b32
-** 2. FUNC - Handle non-CWSR traps
-*/
-
-var G8SR_WDMEM_HWREG_OFFSET = 0
-var G8SR_WDMEM_SGPR_OFFSET = 128 // in bytes
-
-// Keep definition same as the app shader, These 2 time stamps are part of the app shader... Should before any Save and after restore.
-
-var G8SR_DEBUG_TIMESTAMP = 0
-var G8SR_DEBUG_TS_SAVE_D_OFFSET = 40*4 // ts_save_d timestamp offset relative to SGPR_SR_memory_offset
-var s_g8sr_ts_save_s = s[34:35] // save start
-var s_g8sr_ts_sq_save_msg = s[36:37] // The save shader send SAVEWAVE msg to spi
-var s_g8sr_ts_spi_wrexec = s[38:39] // the SPI write the sr address to SQ
-var s_g8sr_ts_save_d = s[40:41] // save end
-var s_g8sr_ts_restore_s = s[42:43] // restore start
-var s_g8sr_ts_restore_d = s[44:45] // restore end
-
-var G8SR_VGPR_SR_IN_DWX4 = 0
-var G8SR_SAVE_BUF_RSRC_WORD1_STRIDE_DWx4 = 0x00100000 // DWx4 stride is 4*4Bytes
-var G8SR_RESTORE_BUF_RSRC_WORD1_STRIDE_DWx4 = G8SR_SAVE_BUF_RSRC_WORD1_STRIDE_DWx4
-
-
-/*************************************************************************/
-/* control on how to run the shader */
-/*************************************************************************/
-//any hack that needs to be made to run this code in EMU (either because various EMU code are not ready or no compute save & restore in EMU run)
-var EMU_RUN_HACK = 0
-var EMU_RUN_HACK_RESTORE_NORMAL = 0
-var EMU_RUN_HACK_SAVE_NORMAL_EXIT = 0
-var EMU_RUN_HACK_SAVE_SINGLE_WAVE = 0
-var EMU_RUN_HACK_SAVE_FIRST_TIME = 0 //for interrupted restore in which the first save is through EMU_RUN_HACK
-var EMU_RUN_HACK_SAVE_FIRST_TIME_TBA_LO = 0 //for interrupted restore in which the first save is through EMU_RUN_HACK
-var EMU_RUN_HACK_SAVE_FIRST_TIME_TBA_HI = 0 //for interrupted restore in which the first save is through EMU_RUN_HACK
-var SAVE_LDS = 1
-var WG_BASE_ADDR_LO = 0x9000a000
-var WG_BASE_ADDR_HI = 0x0
-var WAVE_SPACE = 0x5000 //memory size that each wave occupies in workgroup state mem
-var CTX_SAVE_CONTROL = 0x0
-var CTX_RESTORE_CONTROL = CTX_SAVE_CONTROL
-var SIM_RUN_HACK = 0 //any hack that needs to be made to run this code in SIM (either because various RTL code are not ready or no compute save & restore in RTL run)
-var SGPR_SAVE_USE_SQC = 1 //use SQC D$ to do the write
-var USE_MTBUF_INSTEAD_OF_MUBUF = 0 //because TC EMU currently asserts on 0 of // overload DFMT field to carry 4 more bits of stride for MUBUF opcodes
-var SWIZZLE_EN = 0 //whether we use swizzled buffer addressing
-
/**************************************************************************/
/* variables */
/**************************************************************************/
@@ -226,16 +154,7 @@ shader main
type(CS)
- if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL)) //hack to use trap_id for determining save/restore
- //FIXME VCCZ un-init assertion s_getreg_b32 s_save_status, hwreg(HW_REG_STATUS) //save STATUS since we will change SCC
- s_and_b32 s_save_tmp, s_save_pc_hi, 0xffff0000 //change SCC
- s_cmp_eq_u32 s_save_tmp, 0x007e0000 //Save: trap_id = 0x7e. Restore: trap_id = 0x7f.
- s_cbranch_scc0 L_JUMP_TO_RESTORE //do not need to recover STATUS here since we are going to RESTORE
- //FIXME s_setreg_b32 hwreg(HW_REG_STATUS), s_save_status //need to recover STATUS since we are going to SAVE
- s_branch L_SKIP_RESTORE //NOT restore, SAVE actually
- else
s_branch L_SKIP_RESTORE //NOT restore. might be a regular trap or save
- end
L_JUMP_TO_RESTORE:
s_branch L_RESTORE //restore
@@ -249,7 +168,7 @@ L_SKIP_RESTORE:
s_cbranch_scc1 L_SAVE //this is the operation for save
// ********* Handle non-CWSR traps *******************
-if (!EMU_RUN_HACK)
+
/* read tba and tma for next level trap handler, ttmp4 is used as s_save_status */
s_load_dwordx4 [ttmp8,ttmp9,ttmp10, ttmp11], [tma_lo,tma_hi], 0
s_waitcnt lgkmcnt(0)
@@ -268,7 +187,7 @@ L_EXCP_CASE:
s_and_b32 ttmp1, ttmp1, 0xFFFF
set_status_without_spi_prio(s_save_status, ttmp2) //restore HW status(SCC)
s_rfe_b64 [ttmp0, ttmp1]
-end
+
// ********* End handling of non-CWSR traps *******************
/**************************************************************************/
@@ -276,12 +195,6 @@ end
/**************************************************************************/
L_SAVE:
-
-if G8SR_DEBUG_TIMESTAMP
- s_memrealtime s_g8sr_ts_save_s
- s_waitcnt lgkmcnt(0) //FIXME, will cause xnack??
-end
-
s_mov_b32 s_save_tmp, 0 //clear saveCtx bit
s_setreg_b32 hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_SAVECTX_SHIFT, 1), s_save_tmp //clear saveCtx bit
@@ -303,16 +216,7 @@ end
s_mov_b32 s_save_exec_hi, exec_hi
s_mov_b64 exec, 0x0 //clear EXEC to get ready to receive
-if G8SR_DEBUG_TIMESTAMP
- s_memrealtime s_g8sr_ts_sq_save_msg
- s_waitcnt lgkmcnt(0)
-end
-
- if (EMU_RUN_HACK)
-
- else
s_sendmsg sendmsg(MSG_SAVEWAVE) //send SPI a message and wait for SPI's write to EXEC
- end
// Set SPI_PRIO=2 to avoid starving instruction fetch in the waves we're waiting for.
s_or_b32 s_save_tmp, s_save_status, (2 << SQ_WAVE_STATUS_SPI_PRIO_SHIFT)
@@ -321,36 +225,9 @@ end
L_SLEEP:
s_sleep 0x2 // sleep 1 (64clk) is not enough for 8 waves per SIMD, which will cause SQ hang, since the 7,8th wave could not get arbit to exec inst, while other waves are stuck into the sleep-loop and waiting for wrexec!=0
- if (EMU_RUN_HACK)
-
- else
s_cbranch_execz L_SLEEP
- end
-
-if G8SR_DEBUG_TIMESTAMP
- s_memrealtime s_g8sr_ts_spi_wrexec
- s_waitcnt lgkmcnt(0)
-end
/* setup Resource Contants */
- if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_SAVE_SINGLE_WAVE))
- //calculate wd_addr using absolute thread id
- v_readlane_b32 s_save_tmp, v9, 0
- s_lshr_b32 s_save_tmp, s_save_tmp, 6
- s_mul_i32 s_save_tmp, s_save_tmp, WAVE_SPACE
- s_add_i32 s_save_spi_init_lo, s_save_tmp, WG_BASE_ADDR_LO
- s_mov_b32 s_save_spi_init_hi, WG_BASE_ADDR_HI
- s_and_b32 s_save_spi_init_hi, s_save_spi_init_hi, CTX_SAVE_CONTROL
- else
- end
- if ((EMU_RUN_HACK) && (EMU_RUN_HACK_SAVE_SINGLE_WAVE))
- s_add_i32 s_save_spi_init_lo, s_save_tmp, WG_BASE_ADDR_LO
- s_mov_b32 s_save_spi_init_hi, WG_BASE_ADDR_HI
- s_and_b32 s_save_spi_init_hi, s_save_spi_init_hi, CTX_SAVE_CONTROL
- else
- end
-
-
s_mov_b32 s_save_buf_rsrc0, s_save_spi_init_lo //base_addr_lo
s_and_b32 s_save_buf_rsrc1, s_save_spi_init_hi, 0x0000FFFF //base_addr_hi
s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, S_SAVE_BUF_RSRC_WORD1_STRIDE
@@ -383,22 +260,10 @@ end
s_mov_b32 s_save_buf_rsrc2, 0x4 //NUM_RECORDS in bytes
- if (SWIZZLE_EN)
- s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
- else
s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- end
write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset) //M0
-
- if ((EMU_RUN_HACK) && (EMU_RUN_HACK_SAVE_FIRST_TIME))
- s_add_u32 s_save_pc_lo, s_save_pc_lo, 4 //pc[31:0]+4
- s_addc_u32 s_save_pc_hi, s_save_pc_hi, 0x0 //carry bit over
- s_mov_b32 tba_lo, EMU_RUN_HACK_SAVE_FIRST_TIME_TBA_LO
- s_mov_b32 tba_hi, EMU_RUN_HACK_SAVE_FIRST_TIME_TBA_HI
- end
-
write_hwreg_to_mem(s_save_pc_lo, s_save_buf_rsrc0, s_save_mem_offset) //PC
write_hwreg_to_mem(s_save_pc_hi, s_save_buf_rsrc0, s_save_mem_offset)
write_hwreg_to_mem(s_save_exec_lo, s_save_buf_rsrc0, s_save_mem_offset) //EXEC
@@ -440,18 +305,8 @@ end
s_add_u32 s_save_alloc_size, s_save_alloc_size, 1
s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 4 //Number of SGPRs = (sgpr_size + 1) * 16 (non-zero value)
- if (SGPR_SAVE_USE_SQC)
s_lshl_b32 s_save_buf_rsrc2, s_save_alloc_size, 2 //NUM_RECORDS in bytes
- else
- s_lshl_b32 s_save_buf_rsrc2, s_save_alloc_size, 8 //NUM_RECORDS in bytes (64 threads)
- end
-
- if (SWIZZLE_EN)
- s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
- else
s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- end
-
// backup s_save_buf_rsrc0,1 to s_save_pc_lo/hi, since write_16sgpr_to_mem function will change the rsrc0
//s_mov_b64 s_save_pc_lo, s_save_buf_rsrc0
@@ -490,30 +345,14 @@ end
s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on
s_mov_b32 exec_hi, 0xFFFFFFFF
- if (SWIZZLE_EN)
- s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
- else
s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- end
-
// VGPR Allocated in 4-GPR granularity
-if G8SR_VGPR_SR_IN_DWX4
- // the const stride for DWx4 is 4*4 bytes
- s_and_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0x0000FFFF // reset const stride to 0
- s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, G8SR_SAVE_BUF_RSRC_WORD1_STRIDE_DWx4 // const stride to 4*4 bytes
-
- buffer_store_dwordx4 v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
-
- s_and_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0x0000FFFF // reset const stride to 0
- s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, S_SAVE_BUF_RSRC_WORD1_STRIDE // reset const stride to 4 bytes
-else
buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256
buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*2
buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*3
-end
@@ -549,64 +388,10 @@ end
s_add_u32 s_save_mem_offset, s_save_mem_offset, get_hwreg_size_bytes()
- if (SWIZZLE_EN)
- s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
- else
s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- end
-
s_mov_b32 m0, 0x0 //lds_offset initial value = 0
-var LDS_DMA_ENABLE = 0
-var UNROLL = 0
-if UNROLL==0 && LDS_DMA_ENABLE==1
- s_mov_b32 s3, 256*2
- s_nop 0
- s_nop 0
- s_nop 0
- L_SAVE_LDS_LOOP:
- //TODO: looks the 2 buffer_store/load clause for s/r will hurt performance.???
- if (SAVE_LDS) //SPI always alloc LDS space in 128DW granularity
- buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1 // first 64DW
- buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1 offset:256 // second 64DW
- end
-
- s_add_u32 m0, m0, s3 //every buffer_store_lds does 256 bytes
- s_add_u32 s_save_mem_offset, s_save_mem_offset, s3 //mem offset increased by 256 bytes
- s_cmp_lt_u32 m0, s_save_alloc_size //scc=(m0 < s_save_alloc_size) ? 1 : 0
- s_cbranch_scc1 L_SAVE_LDS_LOOP //LDS save is complete?
-
-elsif LDS_DMA_ENABLE==1 && UNROLL==1 // UNROOL , has ichace miss
- // store from higest LDS address to lowest
- s_mov_b32 s3, 256*2
- s_sub_u32 m0, s_save_alloc_size, s3
- s_add_u32 s_save_mem_offset, s_save_mem_offset, m0
- s_lshr_b32 s_save_alloc_size, s_save_alloc_size, 9 // how many 128 trunks...
- s_sub_u32 s_save_alloc_size, 128, s_save_alloc_size // store from higheset addr to lowest
- s_mul_i32 s_save_alloc_size, s_save_alloc_size, 6*4 // PC offset increment, each LDS save block cost 6*4 Bytes instruction
- s_add_u32 s_save_alloc_size, s_save_alloc_size, 3*4 //2is the below 2 inst...//s_addc and s_setpc
- s_nop 0
- s_nop 0
- s_nop 0 //pad 3 dw to let LDS_DMA align with 64Bytes
- s_getpc_b64 s[0:1] // reuse s[0:1], since s[0:1] already saved
- s_add_u32 s0, s0,s_save_alloc_size
- s_addc_u32 s1, s1, 0
- s_setpc_b64 s[0:1]
-
-
- for var i =0; i< 128; i++
- // be careful to make here a 64Byte aligned address, which could improve performance...
- buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1 offset:0 // first 64DW
- buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1 offset:256 // second 64DW
-
- if i!=127
- s_sub_u32 m0, m0, s3 // use a sgpr to shrink 2DW-inst to 1DW inst to improve performance , i.e. pack more LDS_DMA inst to one Cacheline
- s_sub_u32 s_save_mem_offset, s_save_mem_offset, s3
- end
- end
-
-else // BUFFER_STORE
v_mbcnt_lo_u32_b32 v2, 0xffffffff, 0x0
v_mbcnt_hi_u32_b32 v3, 0xffffffff, v2 // tid
v_mul_i32_i24 v2, v3, 8 // tid*8
@@ -628,8 +413,6 @@ L_SAVE_LDS_LOOP_VECTOR:
// restore rsrc3
s_mov_b32 s_save_buf_rsrc3, s0
-end
-
L_SAVE_LDS_DONE:
@@ -647,44 +430,8 @@ L_SAVE_LDS_DONE:
s_add_u32 s_save_alloc_size, s_save_alloc_size, 1
s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 2 //Number of VGPRs = (vgpr_size + 1) * 4 (non-zero value) //FIXME for GFX, zero is possible
s_lshl_b32 s_save_buf_rsrc2, s_save_alloc_size, 8 //NUM_RECORDS in bytes (64 threads*4)
- if (SWIZZLE_EN)
- s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
- else
s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- end
-
-
- // VGPR Allocated in 4-GPR granularity
-
-if G8SR_VGPR_SR_IN_DWX4
- // the const stride for DWx4 is 4*4 bytes
- s_and_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0x0000FFFF // reset const stride to 0
- s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, G8SR_SAVE_BUF_RSRC_WORD1_STRIDE_DWx4 // const stride to 4*4 bytes
-
- s_mov_b32 m0, 4 // skip first 4 VGPRs
- s_cmp_lt_u32 m0, s_save_alloc_size
- s_cbranch_scc0 L_SAVE_VGPR_LOOP_END // no more vgprs
- s_set_gpr_idx_on m0, 0x1 // This will change M0
- s_add_u32 s_save_alloc_size, s_save_alloc_size, 0x1000 // because above inst change m0
-L_SAVE_VGPR_LOOP:
- v_mov_b32 v0, v0 // v0 = v[0+m0]
- v_mov_b32 v1, v1
- v_mov_b32 v2, v2
- v_mov_b32 v3, v3
-
-
- buffer_store_dwordx4 v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
- s_add_u32 m0, m0, 4
- s_add_u32 s_save_mem_offset, s_save_mem_offset, 256*4
- s_cmp_lt_u32 m0, s_save_alloc_size
- s_cbranch_scc1 L_SAVE_VGPR_LOOP //VGPR save is complete?
- s_set_gpr_idx_off
-L_SAVE_VGPR_LOOP_END:
-
- s_and_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0x0000FFFF // reset const stride to 0
- s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, S_SAVE_BUF_RSRC_WORD1_STRIDE // reset const stride to 4 bytes
-else
// VGPR store using dw burst
s_mov_b32 m0, 0x4 //VGPR initial index value =0
s_cmp_lt_u32 m0, s_save_alloc_size
@@ -700,52 +447,18 @@ else
v_mov_b32 v2, v2 //v0 = v[0+m0]
v_mov_b32 v3, v3 //v0 = v[0+m0]
- if(USE_MTBUF_INSTEAD_OF_MUBUF)
- tbuffer_store_format_x v0, v0, s_save_buf_rsrc0, s_save_mem_offset format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1
- else
buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256
buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*2
buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*3
- end
s_add_u32 m0, m0, 4 //next vgpr index
s_add_u32 s_save_mem_offset, s_save_mem_offset, 256*4 //every buffer_store_dword does 256 bytes
s_cmp_lt_u32 m0, s_save_alloc_size //scc = (m0 < s_save_alloc_size) ? 1 : 0
s_cbranch_scc1 L_SAVE_VGPR_LOOP //VGPR save is complete?
s_set_gpr_idx_off
-end
L_SAVE_VGPR_END:
-
-
-
-
-
-
- /* S_PGM_END_SAVED */ //FIXME graphics ONLY
- if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_SAVE_NORMAL_EXIT))
- s_and_b32 s_save_pc_hi, s_save_pc_hi, 0x0000ffff //pc[47:32]
- s_add_u32 s_save_pc_lo, s_save_pc_lo, 4 //pc[31:0]+4
- s_addc_u32 s_save_pc_hi, s_save_pc_hi, 0x0 //carry bit over
- s_rfe_b64 s_save_pc_lo //Return to the main shader program
- else
- end
-
-// Save Done timestamp
-if G8SR_DEBUG_TIMESTAMP
- s_memrealtime s_g8sr_ts_save_d
- // SGPR SR memory offset : size(VGPR)
- get_vgpr_size_bytes(s_save_mem_offset)
- s_add_u32 s_save_mem_offset, s_save_mem_offset, G8SR_DEBUG_TS_SAVE_D_OFFSET
- s_waitcnt lgkmcnt(0) //FIXME, will cause xnack??
- // Need reset rsrc2??
- s_mov_b32 m0, s_save_mem_offset
- s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- s_buffer_store_dwordx2 s_g8sr_ts_save_d, s_save_buf_rsrc0, m0 glc:1
-end
-
-
s_branch L_END_PGM
@@ -756,27 +469,6 @@ end
L_RESTORE:
/* Setup Resource Contants */
- if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL))
- //calculate wd_addr using absolute thread id
- v_readlane_b32 s_restore_tmp, v9, 0
- s_lshr_b32 s_restore_tmp, s_restore_tmp, 6
- s_mul_i32 s_restore_tmp, s_restore_tmp, WAVE_SPACE
- s_add_i32 s_restore_spi_init_lo, s_restore_tmp, WG_BASE_ADDR_LO
- s_mov_b32 s_restore_spi_init_hi, WG_BASE_ADDR_HI
- s_and_b32 s_restore_spi_init_hi, s_restore_spi_init_hi, CTX_RESTORE_CONTROL
- else
- end
-
-if G8SR_DEBUG_TIMESTAMP
- s_memrealtime s_g8sr_ts_restore_s
- s_waitcnt lgkmcnt(0) //FIXME, will cause xnack??
- // tma_lo/hi are sgpr 110, 111, which will not used for 112 SGPR allocated case...
- s_mov_b32 s_restore_pc_lo, s_g8sr_ts_restore_s[0]
- s_mov_b32 s_restore_pc_hi, s_g8sr_ts_restore_s[1] //backup ts to ttmp0/1, sicne exec will be finally restored..
-end
-
-
-
s_mov_b32 s_restore_buf_rsrc0, s_restore_spi_init_lo //base_addr_lo
s_and_b32 s_restore_buf_rsrc1, s_restore_spi_init_hi, 0x0000FFFF //base_addr_hi
s_or_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, S_RESTORE_BUF_RSRC_WORD1_STRIDE
@@ -818,18 +510,12 @@ end
s_add_u32 s_restore_mem_offset, s_restore_mem_offset, get_hwreg_size_bytes() //FIXME, Check if offset overflow???
- if (SWIZZLE_EN)
- s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
- else
s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- end
s_mov_b32 m0, 0x0 //lds_offset initial value = 0
L_RESTORE_LDS_LOOP:
- if (SAVE_LDS)
buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1 // first 64DW
buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1 offset:256 // second 64DW
- end
s_add_u32 m0, m0, 256*2 // 128 DW
s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256*2 //mem offset increased by 128DW
s_cmp_lt_u32 m0, s_restore_alloc_size //scc=(m0 < s_restore_alloc_size) ? 1 : 0
@@ -848,40 +534,8 @@ end
s_add_u32 s_restore_alloc_size, s_restore_alloc_size, 1
s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 2 //Number of VGPRs = (vgpr_size + 1) * 4 (non-zero value)
s_lshl_b32 s_restore_buf_rsrc2, s_restore_alloc_size, 8 //NUM_RECORDS in bytes (64 threads*4)
- if (SWIZZLE_EN)
- s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
- else
s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- end
-
-if G8SR_VGPR_SR_IN_DWX4
- get_vgpr_size_bytes(s_restore_mem_offset)
- s_sub_u32 s_restore_mem_offset, s_restore_mem_offset, 256*4
-
- // the const stride for DWx4 is 4*4 bytes
- s_and_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, 0x0000FFFF // reset const stride to 0
- s_or_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, G8SR_RESTORE_BUF_RSRC_WORD1_STRIDE_DWx4 // const stride to 4*4 bytes
-
- s_mov_b32 m0, s_restore_alloc_size
- s_set_gpr_idx_on m0, 0x8 // Note.. This will change m0
-
-L_RESTORE_VGPR_LOOP:
- buffer_load_dwordx4 v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1
- s_waitcnt vmcnt(0)
- s_sub_u32 m0, m0, 4
- v_mov_b32 v0, v0 // v[0+m0] = v0
- v_mov_b32 v1, v1
- v_mov_b32 v2, v2
- v_mov_b32 v3, v3
- s_sub_u32 s_restore_mem_offset, s_restore_mem_offset, 256*4
- s_cmp_eq_u32 m0, 0x8000
- s_cbranch_scc0 L_RESTORE_VGPR_LOOP
- s_set_gpr_idx_off
-
- s_and_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, 0x0000FFFF // reset const stride to 0
- s_or_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, S_RESTORE_BUF_RSRC_WORD1_STRIDE // const stride to 4*4 bytes
-
-else
+
// VGPR load using dw burst
s_mov_b32 s_restore_mem_offset_save, s_restore_mem_offset // restore start with v1, v0 will be the last
s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256*4
@@ -890,14 +544,10 @@ else
s_add_u32 s_restore_alloc_size, s_restore_alloc_size, 0x8000 //add 0x8000 since we compare m0 against it later
L_RESTORE_VGPR_LOOP:
- if(USE_MTBUF_INSTEAD_OF_MUBUF)
- tbuffer_load_format_x v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1
- else
buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1
buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:256
buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:256*2
buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:256*3
- end
s_waitcnt vmcnt(0) //ensure data ready
v_mov_b32 v0, v0 //v[0+m0] = v0
v_mov_b32 v1, v1
@@ -909,16 +559,10 @@ else
s_cbranch_scc1 L_RESTORE_VGPR_LOOP //VGPR restore (except v0) is complete?
s_set_gpr_idx_off
/* VGPR restore on v0 */
- if(USE_MTBUF_INSTEAD_OF_MUBUF)
- tbuffer_load_format_x v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1
- else
buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1
buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256
buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256*2
buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256*3
- end
-
-end
/* restore SGPRs */
//////////////////////////////
@@ -934,16 +578,8 @@ end
s_add_u32 s_restore_alloc_size, s_restore_alloc_size, 1
s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 4 //Number of SGPRs = (sgpr_size + 1) * 16 (non-zero value)
- if (SGPR_SAVE_USE_SQC)
s_lshl_b32 s_restore_buf_rsrc2, s_restore_alloc_size, 2 //NUM_RECORDS in bytes
- else
- s_lshl_b32 s_restore_buf_rsrc2, s_restore_alloc_size, 8 //NUM_RECORDS in bytes (64 threads)
- end
- if (SWIZZLE_EN)
- s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
- else
s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- end
/* If 112 SGPRs ar allocated, 4 sgprs are not used TBA(108,109),TMA(110,111),
However, we are safe to restore these 4 SGPRs anyway, since TBA,TMA will later be restored by HWREG
@@ -972,12 +608,6 @@ end
//////////////////////////////
L_RESTORE_HWREG:
-
-if G8SR_DEBUG_TIMESTAMP
- s_mov_b32 s_g8sr_ts_restore_s[0], s_restore_pc_lo
- s_mov_b32 s_g8sr_ts_restore_s[1], s_restore_pc_hi
-end
-
// HWREG SR memory offset : size(VGPR)+size(SGPR)
get_vgpr_size_bytes(s_restore_mem_offset)
get_sgpr_size_bytes(s_restore_tmp)
@@ -985,11 +615,7 @@ end
s_mov_b32 s_restore_buf_rsrc2, 0x4 //NUM_RECORDS in bytes
- if (SWIZZLE_EN)
- s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
- else
s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- end
read_hwreg_from_mem(s_restore_m0, s_restore_buf_rsrc0, s_restore_mem_offset) //M0
read_hwreg_from_mem(s_restore_pc_lo, s_restore_buf_rsrc0, s_restore_mem_offset) //PC
@@ -1006,16 +632,6 @@ end
s_waitcnt lgkmcnt(0) //from now on, it is safe to restore STATUS and IB_STS
- //for normal save & restore, the saved PC points to the next inst to execute, no adjustment needs to be made, otherwise:
- if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL))
- s_add_u32 s_restore_pc_lo, s_restore_pc_lo, 8 //pc[31:0]+8 //two back-to-back s_trap are used (first for save and second for restore)
- s_addc_u32 s_restore_pc_hi, s_restore_pc_hi, 0x0 //carry bit over
- end
- if ((EMU_RUN_HACK) && (EMU_RUN_HACK_RESTORE_NORMAL))
- s_add_u32 s_restore_pc_lo, s_restore_pc_lo, 4 //pc[31:0]+4 // save is hack through s_trap but restore is normal
- s_addc_u32 s_restore_pc_hi, s_restore_pc_hi, 0x0 //carry bit over
- end
-
s_mov_b32 m0, s_restore_m0
s_mov_b32 exec_lo, s_restore_exec_lo
s_mov_b32 exec_hi, s_restore_exec_hi
@@ -1048,11 +664,6 @@ end
s_barrier //barrier to ensure the readiness of LDS before access attempts from any other wave in the same TG //FIXME not performance-optimal at this time
-if G8SR_DEBUG_TIMESTAMP
- s_memrealtime s_g8sr_ts_restore_d
- s_waitcnt lgkmcnt(0)
-end
-
// s_rfe_b64 s_restore_pc_lo //Return to the main shader program and resume execution
s_rfe_restore_b64 s_restore_pc_lo, s_restore_m0 // s_restore_m0[0] is used to set STATUS.inst_atc
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
index 6bae2e022c6e..75f29d13c90f 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
@@ -24,76 +24,9 @@
* PROJECT=greenland ./sp3 cwsr_trap_handler_gfx9.asm -hex tmp.hex
*/
-/* HW (GFX9) source code for CWSR trap handler */
-/* Version 18 + multiple trap handler */
-
-// this performance-optimal version was originally from Seven Xu at SRDC
-
-// Revison #18 --...
-/* Rev History
-** #1. Branch from gc dv. //gfxip/gfx9/main/src/test/suites/block/cs/sr/cs_trap_handler.sp3#1,#50, #51, #52-53(Skip, Already Fixed by PV), #54-56(merged),#57-58(mergerd, skiped-already fixed by PV)
-** #4. SR Memory Layout:
-** 1. VGPR-SGPR-HWREG-{LDS}
-** 2. tba_hi.bits.26 - reconfigured as the first wave in tg bits, for defer Save LDS for a threadgroup.. performance concern..
-** #5. Update: 1. Accurate g8sr_ts_save_d timestamp
-** #6. Update: 1. Fix s_barrier usage; 2. VGPR s/r using swizzle buffer?(NoNeed, already matched the swizzle pattern, more investigation)
-** #7. Update: 1. don't barrier if noLDS
-** #8. Branch: 1. Branch to ver#0, which is very similar to gc dv version
-** 2. Fix SQ issue by s_sleep 2
-** #9. Update: 1. Fix scc restore failed issue, restore wave_status at last
-** 2. optimize s_buffer save by burst 16sgprs...
-** #10. Update 1. Optimize restore sgpr by busrt 16 sgprs.
-** #11. Update 1. Add 2 more timestamp for debug version
-** #12. Update 1. Add VGPR SR using DWx4, some case improve and some case drop performance
-** #13. Integ 1. Always use MUBUF for PV trap shader...
-** #14. Update 1. s_buffer_store soft clause...
-** #15. Update 1. PERF - sclar write with glc:0/mtype0 to allow L2 combine. perf improvement a lot.
-** #16. Update 1. PRRF - UNROLL LDS_DMA got 2500cycle save in IP tree
-** #17. Update 1. FUNC - LDS_DMA has issues while ATC, replace with ds_read/buffer_store for save part[TODO restore part]
-** 2. PERF - Save LDS before save VGPR to cover LDS save long latency...
-** #18. Update 1. FUNC - Implicitly estore STATUS.VCCZ, which is not writable by s_setreg_b32
-** 2. FUNC - Handle non-CWSR traps
-*/
-
-var G8SR_WDMEM_HWREG_OFFSET = 0
-var G8SR_WDMEM_SGPR_OFFSET = 128 // in bytes
-
-// Keep definition same as the app shader, These 2 time stamps are part of the app shader... Should before any Save and after restore.
-
-var G8SR_DEBUG_TIMESTAMP = 0
-var G8SR_DEBUG_TS_SAVE_D_OFFSET = 40*4 // ts_save_d timestamp offset relative to SGPR_SR_memory_offset
-var s_g8sr_ts_save_s = s[34:35] // save start
-var s_g8sr_ts_sq_save_msg = s[36:37] // The save shader send SAVEWAVE msg to spi
-var s_g8sr_ts_spi_wrexec = s[38:39] // the SPI write the sr address to SQ
-var s_g8sr_ts_save_d = s[40:41] // save end
-var s_g8sr_ts_restore_s = s[42:43] // restore start
-var s_g8sr_ts_restore_d = s[44:45] // restore end
-
-var G8SR_VGPR_SR_IN_DWX4 = 0
-var G8SR_SAVE_BUF_RSRC_WORD1_STRIDE_DWx4 = 0x00100000 // DWx4 stride is 4*4Bytes
-var G8SR_RESTORE_BUF_RSRC_WORD1_STRIDE_DWx4 = G8SR_SAVE_BUF_RSRC_WORD1_STRIDE_DWx4
-
-
-/*************************************************************************/
-/* control on how to run the shader */
-/*************************************************************************/
-//any hack that needs to be made to run this code in EMU (either because various EMU code are not ready or no compute save & restore in EMU run)
-var EMU_RUN_HACK = 0
-var EMU_RUN_HACK_RESTORE_NORMAL = 0
-var EMU_RUN_HACK_SAVE_NORMAL_EXIT = 0
-var EMU_RUN_HACK_SAVE_SINGLE_WAVE = 0
-var EMU_RUN_HACK_SAVE_FIRST_TIME = 0 //for interrupted restore in which the first save is through EMU_RUN_HACK
-var SAVE_LDS = 1
-var WG_BASE_ADDR_LO = 0x9000a000
-var WG_BASE_ADDR_HI = 0x0
-var WAVE_SPACE = 0x5000 //memory size that each wave occupies in workgroup state mem
-var CTX_SAVE_CONTROL = 0x0
-var CTX_RESTORE_CONTROL = CTX_SAVE_CONTROL
-var SIM_RUN_HACK = 0 //any hack that needs to be made to run this code in SIM (either because various RTL code are not ready or no compute save & restore in RTL run)
-var SGPR_SAVE_USE_SQC = 1 //use SQC D$ to do the write
-var USE_MTBUF_INSTEAD_OF_MUBUF = 0 //because TC EMU currently asserts on 0 of // overload DFMT field to carry 4 more bits of stride for MUBUF opcodes
-var SWIZZLE_EN = 0 //whether we use swizzled buffer addressing
var ACK_SQC_STORE = 1 //workaround for suspected SQC store bug causing incorrect stores under concurrency
+var SAVE_AFTER_XNACK_ERROR = 1 //workaround for TCP store failure after XNACK error when ALLOW_REPLAY=0, for debugger
+var SINGLE_STEP_MISSED_WORKAROUND = 1 //workaround for lost MODE.DEBUG_EN exception when SAVECTX raised
/**************************************************************************/
/* variables */
@@ -107,6 +40,7 @@ var SQ_WAVE_STATUS_PRE_SPI_PRIO_SHIFT = 0
var SQ_WAVE_STATUS_PRE_SPI_PRIO_SIZE = 1
var SQ_WAVE_STATUS_POST_SPI_PRIO_SHIFT = 3
var SQ_WAVE_STATUS_POST_SPI_PRIO_SIZE = 29
+var SQ_WAVE_STATUS_ALLOW_REPLAY_MASK = 0x400000
var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT = 12
var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE = 9
@@ -127,12 +61,15 @@ var SQ_WAVE_TRAPSTS_POST_SAVECTX_MASK = 0xFFFFF800
var SQ_WAVE_TRAPSTS_POST_SAVECTX_SHIFT = 11
var SQ_WAVE_TRAPSTS_POST_SAVECTX_SIZE = 21
var SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK = 0x800
+var SQ_WAVE_TRAPSTS_XNACK_ERROR_MASK = 0x10000000
var SQ_WAVE_IB_STS_RCNT_SHIFT = 16 //FIXME
var SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT = 15 //FIXME
var SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK = 0x1F8000
var SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK_NEG = 0x00007FFF //FIXME
+var SQ_WAVE_MODE_DEBUG_EN_MASK = 0x800
+
var SQ_BUF_RSRC_WORD1_ATC_SHIFT = 24
var SQ_BUF_RSRC_WORD3_MTYPE_SHIFT = 27
@@ -197,13 +134,15 @@ var s_restore_spi_init_lo = exec_lo
var s_restore_spi_init_hi = exec_hi
var s_restore_mem_offset = ttmp12
+var s_restore_accvgpr_offset = ttmp13
var s_restore_alloc_size = ttmp3
var s_restore_tmp = ttmp2
var s_restore_mem_offset_save = s_restore_tmp //no conflict
+var s_restore_accvgpr_offset_save = ttmp7
var s_restore_m0 = s_restore_alloc_size //no conflict
-var s_restore_mode = ttmp7
+var s_restore_mode = s_restore_accvgpr_offset_save
var s_restore_pc_lo = ttmp0
var s_restore_pc_hi = ttmp1
@@ -226,20 +165,11 @@ var s_restore_ttmps_hi = s_restore_alloc_size //no conflict
/* Shader Main*/
shader main
- asic(GFX9)
+ asic(DEFAULT)
type(CS)
- if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL)) //hack to use trap_id for determining save/restore
- //FIXME VCCZ un-init assertion s_getreg_b32 s_save_status, hwreg(HW_REG_STATUS) //save STATUS since we will change SCC
- s_and_b32 s_save_tmp, s_save_pc_hi, 0xffff0000 //change SCC
- s_cmp_eq_u32 s_save_tmp, 0x007e0000 //Save: trap_id = 0x7e. Restore: trap_id = 0x7f.
- s_cbranch_scc0 L_JUMP_TO_RESTORE //do not need to recover STATUS here since we are going to RESTORE
- //FIXME s_setreg_b32 hwreg(HW_REG_STATUS), s_save_status //need to recover STATUS since we are going to SAVE
- s_branch L_SKIP_RESTORE //NOT restore, SAVE actually
- else
s_branch L_SKIP_RESTORE //NOT restore. might be a regular trap or save
- end
L_JUMP_TO_RESTORE:
s_branch L_RESTORE //restore
@@ -248,12 +178,29 @@ L_SKIP_RESTORE:
s_getreg_b32 s_save_status, hwreg(HW_REG_STATUS) //save STATUS since we will change SCC
s_andn2_b32 s_save_status, s_save_status, SQ_WAVE_STATUS_SPI_PRIO_MASK //check whether this is for save
+
+if SINGLE_STEP_MISSED_WORKAROUND
+ // No single step exceptions if MODE.DEBUG_EN=0.
+ s_getreg_b32 ttmp2, hwreg(HW_REG_MODE)
+ s_and_b32 ttmp2, ttmp2, SQ_WAVE_MODE_DEBUG_EN_MASK
+ s_cbranch_scc0 L_NO_SINGLE_STEP_WORKAROUND
+
+ // Second-level trap already handled exception if STATUS.HALT=1.
+ s_and_b32 ttmp2, s_save_status, SQ_WAVE_STATUS_HALT_MASK
+
+ // Prioritize single step exception over context save.
+ // Second-level trap will halt wave and RFE, re-entering for SAVECTX.
+ s_cbranch_scc0 L_FETCH_2ND_TRAP
+
+L_NO_SINGLE_STEP_WORKAROUND:
+end
+
s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS)
s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_SAVECTX_MASK //check whether this is for save
s_cbranch_scc1 L_SAVE //this is the operation for save
// ********* Handle non-CWSR traps *******************
-if (!EMU_RUN_HACK)
+
// Illegal instruction is a non-maskable exception which blocks context save.
// Halt the wavefront and return from the trap.
s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK
@@ -330,7 +277,7 @@ L_EXCP_CASE:
set_status_without_spi_prio(s_save_status, ttmp2)
s_rfe_b64 [ttmp0, ttmp1]
-end
+
// ********* End handling of non-CWSR traps *******************
/**************************************************************************/
@@ -338,12 +285,6 @@ end
/**************************************************************************/
L_SAVE:
-
-if G8SR_DEBUG_TIMESTAMP
- s_memrealtime s_g8sr_ts_save_s
- s_waitcnt lgkmcnt(0) //FIXME, will cause xnack??
-end
-
s_and_b32 s_save_pc_hi, s_save_pc_hi, 0x0000ffff //pc[47:32]
s_mov_b32 s_save_tmp, 0 //clear saveCtx bit
@@ -365,16 +306,7 @@ end
s_mov_b32 s_save_exec_hi, exec_hi
s_mov_b64 exec, 0x0 //clear EXEC to get ready to receive
-if G8SR_DEBUG_TIMESTAMP
- s_memrealtime s_g8sr_ts_sq_save_msg
- s_waitcnt lgkmcnt(0)
-end
-
- if (EMU_RUN_HACK)
-
- else
s_sendmsg sendmsg(MSG_SAVEWAVE) //send SPI a message and wait for SPI's write to EXEC
- end
// Set SPI_PRIO=2 to avoid starving instruction fetch in the waves we're waiting for.
s_or_b32 s_save_tmp, s_save_status, (2 << SQ_WAVE_STATUS_SPI_PRIO_SHIFT)
@@ -383,33 +315,7 @@ end
L_SLEEP:
s_sleep 0x2 // sleep 1 (64clk) is not enough for 8 waves per SIMD, which will cause SQ hang, since the 7,8th wave could not get arbit to exec inst, while other waves are stuck into the sleep-loop and waiting for wrexec!=0
- if (EMU_RUN_HACK)
-
- else
s_cbranch_execz L_SLEEP
- end
-
-if G8SR_DEBUG_TIMESTAMP
- s_memrealtime s_g8sr_ts_spi_wrexec
- s_waitcnt lgkmcnt(0)
-end
-
- if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_SAVE_SINGLE_WAVE))
- //calculate wd_addr using absolute thread id
- v_readlane_b32 s_save_tmp, v9, 0
- s_lshr_b32 s_save_tmp, s_save_tmp, 6
- s_mul_i32 s_save_tmp, s_save_tmp, WAVE_SPACE
- s_add_i32 s_save_spi_init_lo, s_save_tmp, WG_BASE_ADDR_LO
- s_mov_b32 s_save_spi_init_hi, WG_BASE_ADDR_HI
- s_and_b32 s_save_spi_init_hi, s_save_spi_init_hi, CTX_SAVE_CONTROL
- else
- end
- if ((EMU_RUN_HACK) && (EMU_RUN_HACK_SAVE_SINGLE_WAVE))
- s_add_i32 s_save_spi_init_lo, s_save_tmp, WG_BASE_ADDR_LO
- s_mov_b32 s_save_spi_init_hi, WG_BASE_ADDR_HI
- s_and_b32 s_save_spi_init_hi, s_save_spi_init_hi, CTX_SAVE_CONTROL
- else
- end
// Save trap temporaries 4-11, 13 initialized by SPI debug dispatch logic
// ttmp SR memory offset : size(VGPR)+size(SGPR)+0x40
@@ -459,20 +365,10 @@ end
s_mov_b32 s_save_buf_rsrc2, 0x4 //NUM_RECORDS in bytes
- if (SWIZZLE_EN)
- s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
- else
s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- end
write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset) //M0
-
- if ((EMU_RUN_HACK) && (EMU_RUN_HACK_SAVE_FIRST_TIME))
- s_add_u32 s_save_pc_lo, s_save_pc_lo, 4 //pc[31:0]+4
- s_addc_u32 s_save_pc_hi, s_save_pc_hi, 0x0 //carry bit over
- end
-
write_hwreg_to_mem(s_save_pc_lo, s_save_buf_rsrc0, s_save_mem_offset) //PC
write_hwreg_to_mem(s_save_pc_hi, s_save_buf_rsrc0, s_save_mem_offset)
write_hwreg_to_mem(s_save_exec_lo, s_save_buf_rsrc0, s_save_mem_offset) //EXEC
@@ -510,17 +406,9 @@ end
s_add_u32 s_save_alloc_size, s_save_alloc_size, 1
s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 4 //Number of SGPRs = (sgpr_size + 1) * 16 (non-zero value)
- if (SGPR_SAVE_USE_SQC)
s_lshl_b32 s_save_buf_rsrc2, s_save_alloc_size, 2 //NUM_RECORDS in bytes
- else
- s_lshl_b32 s_save_buf_rsrc2, s_save_alloc_size, 8 //NUM_RECORDS in bytes (64 threads)
- end
- if (SWIZZLE_EN)
- s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
- else
s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- end
// backup s_save_buf_rsrc0,1 to s_save_pc_lo/hi, since write_16sgpr_to_mem function will change the rsrc0
@@ -563,30 +451,25 @@ end
s_mov_b32 xnack_mask_lo, 0x0
s_mov_b32 xnack_mask_hi, 0x0
- if (SWIZZLE_EN)
- s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
- else
s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- end
// VGPR Allocated in 4-GPR granularity
-if G8SR_VGPR_SR_IN_DWX4
- // the const stride for DWx4 is 4*4 bytes
- s_and_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0x0000FFFF // reset const stride to 0
- s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, G8SR_SAVE_BUF_RSRC_WORD1_STRIDE_DWx4 // const stride to 4*4 bytes
+if SAVE_AFTER_XNACK_ERROR
+ check_if_tcp_store_ok()
+ s_cbranch_scc1 L_SAVE_FIRST_VGPRS_WITH_TCP
+
+ write_vgprs_to_mem_with_sqc(v0, 4, s_save_buf_rsrc0, s_save_mem_offset)
+ s_branch L_SAVE_LDS
- buffer_store_dwordx4 v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
+L_SAVE_FIRST_VGPRS_WITH_TCP:
+end
- s_and_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0x0000FFFF // reset const stride to 0
- s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, S_SAVE_BUF_RSRC_WORD1_STRIDE // reset const stride to 4 bytes
-else
buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256
buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*2
buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*3
-end
@@ -621,66 +504,34 @@ end
s_add_u32 s_save_mem_offset, s_save_mem_offset, get_hwreg_size_bytes()
- if (SWIZZLE_EN)
- s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
- else
s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- end
s_mov_b32 m0, 0x0 //lds_offset initial value = 0
-var LDS_DMA_ENABLE = 0
-var UNROLL = 0
-if UNROLL==0 && LDS_DMA_ENABLE==1
- s_mov_b32 s3, 256*2
- s_nop 0
- s_nop 0
- s_nop 0
- L_SAVE_LDS_LOOP:
- //TODO: looks the 2 buffer_store/load clause for s/r will hurt performance.???
- if (SAVE_LDS) //SPI always alloc LDS space in 128DW granularity
- buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1 // first 64DW
- buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1 offset:256 // second 64DW
- end
-
- s_add_u32 m0, m0, s3 //every buffer_store_lds does 256 bytes
- s_add_u32 s_save_mem_offset, s_save_mem_offset, s3 //mem offset increased by 256 bytes
- s_cmp_lt_u32 m0, s_save_alloc_size //scc=(m0 < s_save_alloc_size) ? 1 : 0
- s_cbranch_scc1 L_SAVE_LDS_LOOP //LDS save is complete?
-
-elsif LDS_DMA_ENABLE==1 && UNROLL==1 // UNROOL , has ichace miss
- // store from higest LDS address to lowest
- s_mov_b32 s3, 256*2
- s_sub_u32 m0, s_save_alloc_size, s3
- s_add_u32 s_save_mem_offset, s_save_mem_offset, m0
- s_lshr_b32 s_save_alloc_size, s_save_alloc_size, 9 // how many 128 trunks...
- s_sub_u32 s_save_alloc_size, 128, s_save_alloc_size // store from higheset addr to lowest
- s_mul_i32 s_save_alloc_size, s_save_alloc_size, 6*4 // PC offset increment, each LDS save block cost 6*4 Bytes instruction
- s_add_u32 s_save_alloc_size, s_save_alloc_size, 3*4 //2is the below 2 inst...//s_addc and s_setpc
- s_nop 0
- s_nop 0
- s_nop 0 //pad 3 dw to let LDS_DMA align with 64Bytes
- s_getpc_b64 s[0:1] // reuse s[0:1], since s[0:1] already saved
- s_add_u32 s0, s0,s_save_alloc_size
- s_addc_u32 s1, s1, 0
- s_setpc_b64 s[0:1]
-
-
- for var i =0; i< 128; i++
- // be careful to make here a 64Byte aligned address, which could improve performance...
- buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1 offset:0 // first 64DW
- buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1 offset:256 // second 64DW
-
- if i!=127
- s_sub_u32 m0, m0, s3 // use a sgpr to shrink 2DW-inst to 1DW inst to improve performance , i.e. pack more LDS_DMA inst to one Cacheline
- s_sub_u32 s_save_mem_offset, s_save_mem_offset, s3
- end
- end
-
-else // BUFFER_STORE
v_mbcnt_lo_u32_b32 v2, 0xffffffff, 0x0
v_mbcnt_hi_u32_b32 v3, 0xffffffff, v2 // tid
+
+if SAVE_AFTER_XNACK_ERROR
+ check_if_tcp_store_ok()
+ s_cbranch_scc1 L_SAVE_LDS_WITH_TCP
+
+ v_lshlrev_b32 v2, 2, v3
+L_SAVE_LDS_LOOP_SQC:
+ ds_read2_b32 v[0:1], v2 offset0:0 offset1:0x40
+ s_waitcnt lgkmcnt(0)
+
+ write_vgprs_to_mem_with_sqc(v0, 2, s_save_buf_rsrc0, s_save_mem_offset)
+
+ v_add_u32 v2, 0x200, v2
+ v_cmp_lt_u32 vcc[0:1], v2, s_save_alloc_size
+ s_cbranch_vccnz L_SAVE_LDS_LOOP_SQC
+
+ s_branch L_SAVE_LDS_DONE
+
+L_SAVE_LDS_WITH_TCP:
+end
+
v_mul_i32_i24 v2, v3, 8 // tid*8
v_mov_b32 v3, 256*2
s_mov_b32 m0, 0x10000
@@ -701,8 +552,6 @@ L_SAVE_LDS_LOOP_VECTOR:
// restore rsrc3
s_mov_b32 s_save_buf_rsrc3, s0
-end
-
L_SAVE_LDS_DONE:
@@ -720,44 +569,9 @@ L_SAVE_LDS_DONE:
s_add_u32 s_save_alloc_size, s_save_alloc_size, 1
s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 2 //Number of VGPRs = (vgpr_size + 1) * 4 (non-zero value) //FIXME for GFX, zero is possible
s_lshl_b32 s_save_buf_rsrc2, s_save_alloc_size, 8 //NUM_RECORDS in bytes (64 threads*4)
- if (SWIZZLE_EN)
- s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
- else
s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- end
-
-
- // VGPR Allocated in 4-GPR granularity
-
-if G8SR_VGPR_SR_IN_DWX4
- // the const stride for DWx4 is 4*4 bytes
- s_and_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0x0000FFFF // reset const stride to 0
- s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, G8SR_SAVE_BUF_RSRC_WORD1_STRIDE_DWx4 // const stride to 4*4 bytes
-
- s_mov_b32 m0, 4 // skip first 4 VGPRs
- s_cmp_lt_u32 m0, s_save_alloc_size
- s_cbranch_scc0 L_SAVE_VGPR_LOOP_END // no more vgprs
-
- s_set_gpr_idx_on m0, 0x1 // This will change M0
- s_add_u32 s_save_alloc_size, s_save_alloc_size, 0x1000 // because above inst change m0
-L_SAVE_VGPR_LOOP:
- v_mov_b32 v0, v0 // v0 = v[0+m0]
- v_mov_b32 v1, v1
- v_mov_b32 v2, v2
- v_mov_b32 v3, v3
-
- buffer_store_dwordx4 v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
- s_add_u32 m0, m0, 4
- s_add_u32 s_save_mem_offset, s_save_mem_offset, 256*4
- s_cmp_lt_u32 m0, s_save_alloc_size
- s_cbranch_scc1 L_SAVE_VGPR_LOOP //VGPR save is complete?
- s_set_gpr_idx_off
-L_SAVE_VGPR_LOOP_END:
- s_and_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0x0000FFFF // reset const stride to 0
- s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, S_SAVE_BUF_RSRC_WORD1_STRIDE // reset const stride to 4 bytes
-else
// VGPR store using dw burst
s_mov_b32 m0, 0x4 //VGPR initial index value =0
s_cmp_lt_u32 m0, s_save_alloc_size
@@ -767,57 +581,82 @@ else
s_set_gpr_idx_on m0, 0x1 //M0[7:0] = M0[7:0] and M0[15:12] = 0x1
s_add_u32 s_save_alloc_size, s_save_alloc_size, 0x1000 //add 0x1000 since we compare m0 against it later
+if SAVE_AFTER_XNACK_ERROR
+ check_if_tcp_store_ok()
+ s_cbranch_scc1 L_SAVE_VGPR_LOOP
+
+L_SAVE_VGPR_LOOP_SQC:
+ write_vgprs_to_mem_with_sqc(v0, 4, s_save_buf_rsrc0, s_save_mem_offset)
+
+ s_add_u32 m0, m0, 4
+ s_cmp_lt_u32 m0, s_save_alloc_size
+ s_cbranch_scc1 L_SAVE_VGPR_LOOP_SQC
+
+ s_set_gpr_idx_off
+ s_branch L_SAVE_VGPR_END
+end
+
L_SAVE_VGPR_LOOP:
v_mov_b32 v0, v0 //v0 = v[0+m0]
v_mov_b32 v1, v1 //v0 = v[0+m0]
v_mov_b32 v2, v2 //v0 = v[0+m0]
v_mov_b32 v3, v3 //v0 = v[0+m0]
- if(USE_MTBUF_INSTEAD_OF_MUBUF)
- tbuffer_store_format_x v0, v0, s_save_buf_rsrc0, s_save_mem_offset format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1
- else
buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256
buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*2
buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*3
- end
s_add_u32 m0, m0, 4 //next vgpr index
s_add_u32 s_save_mem_offset, s_save_mem_offset, 256*4 //every buffer_store_dword does 256 bytes
s_cmp_lt_u32 m0, s_save_alloc_size //scc = (m0 < s_save_alloc_size) ? 1 : 0
s_cbranch_scc1 L_SAVE_VGPR_LOOP //VGPR save is complete?
s_set_gpr_idx_off
-end
L_SAVE_VGPR_END:
+if ASIC_TARGET_ARCTURUS
+ // Save ACC VGPRs
+ s_mov_b32 m0, 0x0 //VGPR initial index value =0
+ s_set_gpr_idx_on m0, 0x1 //M0[7:0] = M0[7:0] and M0[15:12] = 0x1
+
+if SAVE_AFTER_XNACK_ERROR
+ check_if_tcp_store_ok()
+ s_cbranch_scc1 L_SAVE_ACCVGPR_LOOP
+L_SAVE_ACCVGPR_LOOP_SQC:
+ for var vgpr = 0; vgpr < 4; ++ vgpr
+ v_accvgpr_read v[vgpr], acc[vgpr] // v[N] = acc[N+m0]
+ end
+ write_vgprs_to_mem_with_sqc(v0, 4, s_save_buf_rsrc0, s_save_mem_offset)
+ s_add_u32 m0, m0, 4
+ s_cmp_lt_u32 m0, s_save_alloc_size
+ s_cbranch_scc1 L_SAVE_ACCVGPR_LOOP_SQC
+ s_set_gpr_idx_off
+ s_branch L_SAVE_ACCVGPR_END
+end
- /* S_PGM_END_SAVED */ //FIXME graphics ONLY
- if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_SAVE_NORMAL_EXIT))
- s_and_b32 s_save_pc_hi, s_save_pc_hi, 0x0000ffff //pc[47:32]
- s_add_u32 s_save_pc_lo, s_save_pc_lo, 4 //pc[31:0]+4
- s_addc_u32 s_save_pc_hi, s_save_pc_hi, 0x0 //carry bit over
- s_rfe_b64 s_save_pc_lo //Return to the main shader program
- else
+L_SAVE_ACCVGPR_LOOP:
+ for var vgpr = 0; vgpr < 4; ++ vgpr
+ v_accvgpr_read v[vgpr], acc[vgpr] // v[N] = acc[N+m0]
end
-// Save Done timestamp
-if G8SR_DEBUG_TIMESTAMP
- s_memrealtime s_g8sr_ts_save_d
- // SGPR SR memory offset : size(VGPR)
- get_vgpr_size_bytes(s_save_mem_offset)
- s_add_u32 s_save_mem_offset, s_save_mem_offset, G8SR_DEBUG_TS_SAVE_D_OFFSET
- s_waitcnt lgkmcnt(0) //FIXME, will cause xnack??
- // Need reset rsrc2??
- s_mov_b32 m0, s_save_mem_offset
- s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- s_buffer_store_dwordx2 s_g8sr_ts_save_d, s_save_buf_rsrc0, m0 glc:1
-end
+ buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
+ buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256
+ buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*2
+ buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*3
+ s_add_u32 m0, m0, 4
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, 256*4
+ s_cmp_lt_u32 m0, s_save_alloc_size
+ s_cbranch_scc1 L_SAVE_ACCVGPR_LOOP
+ s_set_gpr_idx_off
+
+L_SAVE_ACCVGPR_END:
+end
s_branch L_END_PGM
@@ -829,27 +668,6 @@ end
L_RESTORE:
/* Setup Resource Contants */
- if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL))
- //calculate wd_addr using absolute thread id
- v_readlane_b32 s_restore_tmp, v9, 0
- s_lshr_b32 s_restore_tmp, s_restore_tmp, 6
- s_mul_i32 s_restore_tmp, s_restore_tmp, WAVE_SPACE
- s_add_i32 s_restore_spi_init_lo, s_restore_tmp, WG_BASE_ADDR_LO
- s_mov_b32 s_restore_spi_init_hi, WG_BASE_ADDR_HI
- s_and_b32 s_restore_spi_init_hi, s_restore_spi_init_hi, CTX_RESTORE_CONTROL
- else
- end
-
-if G8SR_DEBUG_TIMESTAMP
- s_memrealtime s_g8sr_ts_restore_s
- s_waitcnt lgkmcnt(0) //FIXME, will cause xnack??
- // tma_lo/hi are sgpr 110, 111, which will not used for 112 SGPR allocated case...
- s_mov_b32 s_restore_pc_lo, s_g8sr_ts_restore_s[0]
- s_mov_b32 s_restore_pc_hi, s_g8sr_ts_restore_s[1] //backup ts to ttmp0/1, sicne exec will be finally restored..
-end
-
-
-
s_mov_b32 s_restore_buf_rsrc0, s_restore_spi_init_lo //base_addr_lo
s_and_b32 s_restore_buf_rsrc1, s_restore_spi_init_hi, 0x0000FFFF //base_addr_hi
s_or_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, S_RESTORE_BUF_RSRC_WORD1_STRIDE
@@ -891,18 +709,12 @@ end
s_add_u32 s_restore_mem_offset, s_restore_mem_offset, get_hwreg_size_bytes() //FIXME, Check if offset overflow???
- if (SWIZZLE_EN)
- s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
- else
s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- end
s_mov_b32 m0, 0x0 //lds_offset initial value = 0
L_RESTORE_LDS_LOOP:
- if (SAVE_LDS)
buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1 // first 64DW
buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1 offset:256 // second 64DW
- end
s_add_u32 m0, m0, 256*2 // 128 DW
s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256*2 //mem offset increased by 128DW
s_cmp_lt_u32 m0, s_restore_alloc_size //scc=(m0 < s_restore_alloc_size) ? 1 : 0
@@ -921,56 +733,43 @@ end
s_add_u32 s_restore_alloc_size, s_restore_alloc_size, 1
s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 2 //Number of VGPRs = (vgpr_size + 1) * 4 (non-zero value)
s_lshl_b32 s_restore_buf_rsrc2, s_restore_alloc_size, 8 //NUM_RECORDS in bytes (64 threads*4)
- if (SWIZZLE_EN)
- s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
- else
+
+if ASIC_TARGET_ARCTURUS
+ s_mov_b32 s_restore_accvgpr_offset, s_restore_buf_rsrc2 //ACC VGPRs at end of VGPRs
+end
+
s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- end
-if G8SR_VGPR_SR_IN_DWX4
- get_vgpr_size_bytes(s_restore_mem_offset)
- s_sub_u32 s_restore_mem_offset, s_restore_mem_offset, 256*4
-
- // the const stride for DWx4 is 4*4 bytes
- s_and_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, 0x0000FFFF // reset const stride to 0
- s_or_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, G8SR_RESTORE_BUF_RSRC_WORD1_STRIDE_DWx4 // const stride to 4*4 bytes
-
- s_mov_b32 m0, s_restore_alloc_size
- s_set_gpr_idx_on m0, 0x8 // Note.. This will change m0
-
-L_RESTORE_VGPR_LOOP:
- buffer_load_dwordx4 v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1
- s_waitcnt vmcnt(0)
- s_sub_u32 m0, m0, 4
- v_mov_b32 v0, v0 // v[0+m0] = v0
- v_mov_b32 v1, v1
- v_mov_b32 v2, v2
- v_mov_b32 v3, v3
- s_sub_u32 s_restore_mem_offset, s_restore_mem_offset, 256*4
- s_cmp_eq_u32 m0, 0x8000
- s_cbranch_scc0 L_RESTORE_VGPR_LOOP
- s_set_gpr_idx_off
-
- s_and_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, 0x0000FFFF // reset const stride to 0
- s_or_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, S_RESTORE_BUF_RSRC_WORD1_STRIDE // const stride to 4*4 bytes
-
-else
// VGPR load using dw burst
s_mov_b32 s_restore_mem_offset_save, s_restore_mem_offset // restore start with v1, v0 will be the last
s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256*4
+if ASIC_TARGET_ARCTURUS
+ s_mov_b32 s_restore_accvgpr_offset_save, s_restore_accvgpr_offset
+ s_add_u32 s_restore_accvgpr_offset, s_restore_accvgpr_offset, 256*4
+end
s_mov_b32 m0, 4 //VGPR initial index value = 1
s_set_gpr_idx_on m0, 0x8 //M0[7:0] = M0[7:0] and M0[15:12] = 0x8
s_add_u32 s_restore_alloc_size, s_restore_alloc_size, 0x8000 //add 0x8000 since we compare m0 against it later
L_RESTORE_VGPR_LOOP:
- if(USE_MTBUF_INSTEAD_OF_MUBUF)
- tbuffer_load_format_x v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1
- else
+
+if ASIC_TARGET_ARCTURUS
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_accvgpr_offset slc:1 glc:1
+ buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_accvgpr_offset slc:1 glc:1 offset:256
+ buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_accvgpr_offset slc:1 glc:1 offset:256*2
+ buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_accvgpr_offset slc:1 glc:1 offset:256*3
+ s_add_u32 s_restore_accvgpr_offset, s_restore_accvgpr_offset, 256*4
+ s_waitcnt vmcnt(0)
+
+ for var vgpr = 0; vgpr < 4; ++ vgpr
+ v_accvgpr_write acc[vgpr], v[vgpr]
+ end
+end
+
buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1
buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:256
buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:256*2
buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:256*3
- end
s_waitcnt vmcnt(0) //ensure data ready
v_mov_b32 v0, v0 //v[0+m0] = v0
v_mov_b32 v1, v1
@@ -982,16 +781,22 @@ else
s_cbranch_scc1 L_RESTORE_VGPR_LOOP //VGPR restore (except v0) is complete?
s_set_gpr_idx_off
/* VGPR restore on v0 */
- if(USE_MTBUF_INSTEAD_OF_MUBUF)
- tbuffer_load_format_x v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1
- else
+if ASIC_TARGET_ARCTURUS
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_accvgpr_offset_save slc:1 glc:1
+ buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_accvgpr_offset_save slc:1 glc:1 offset:256
+ buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_accvgpr_offset_save slc:1 glc:1 offset:256*2
+ buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_accvgpr_offset_save slc:1 glc:1 offset:256*3
+ s_waitcnt vmcnt(0)
+
+ for var vgpr = 0; vgpr < 4; ++ vgpr
+ v_accvgpr_write acc[vgpr], v[vgpr]
+ end
+end
+
buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1
buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256
buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256*2
buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256*3
- end
-
-end
/* restore SGPRs */
//////////////////////////////
@@ -1007,16 +812,8 @@ end
s_add_u32 s_restore_alloc_size, s_restore_alloc_size, 1
s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 4 //Number of SGPRs = (sgpr_size + 1) * 16 (non-zero value)
- if (SGPR_SAVE_USE_SQC)
s_lshl_b32 s_restore_buf_rsrc2, s_restore_alloc_size, 2 //NUM_RECORDS in bytes
- else
- s_lshl_b32 s_restore_buf_rsrc2, s_restore_alloc_size, 8 //NUM_RECORDS in bytes (64 threads)
- end
- if (SWIZZLE_EN)
- s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
- else
s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- end
s_mov_b32 m0, s_restore_alloc_size
@@ -1044,11 +841,6 @@ end
L_RESTORE_HWREG:
-if G8SR_DEBUG_TIMESTAMP
- s_mov_b32 s_g8sr_ts_restore_s[0], s_restore_pc_lo
- s_mov_b32 s_g8sr_ts_restore_s[1], s_restore_pc_hi
-end
-
// HWREG SR memory offset : size(VGPR)+size(SGPR)
get_vgpr_size_bytes(s_restore_mem_offset)
get_sgpr_size_bytes(s_restore_tmp)
@@ -1056,11 +848,7 @@ end
s_mov_b32 s_restore_buf_rsrc2, 0x4 //NUM_RECORDS in bytes
- if (SWIZZLE_EN)
- s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
- else
s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- end
read_hwreg_from_mem(s_restore_m0, s_restore_buf_rsrc0, s_restore_mem_offset) //M0
read_hwreg_from_mem(s_restore_pc_lo, s_restore_buf_rsrc0, s_restore_mem_offset) //PC
@@ -1075,16 +863,6 @@ end
s_waitcnt lgkmcnt(0) //from now on, it is safe to restore STATUS and IB_STS
- //for normal save & restore, the saved PC points to the next inst to execute, no adjustment needs to be made, otherwise:
- if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL))
- s_add_u32 s_restore_pc_lo, s_restore_pc_lo, 8 //pc[31:0]+8 //two back-to-back s_trap are used (first for save and second for restore)
- s_addc_u32 s_restore_pc_hi, s_restore_pc_hi, 0x0 //carry bit over
- end
- if ((EMU_RUN_HACK) && (EMU_RUN_HACK_RESTORE_NORMAL))
- s_add_u32 s_restore_pc_lo, s_restore_pc_lo, 4 //pc[31:0]+4 // save is hack through s_trap but restore is normal
- s_addc_u32 s_restore_pc_hi, s_restore_pc_hi, 0x0 //carry bit over
- end
-
s_mov_b32 m0, s_restore_m0
s_mov_b32 exec_lo, s_restore_exec_lo
s_mov_b32 exec_hi, s_restore_exec_hi
@@ -1131,11 +909,6 @@ end
s_barrier //barrier to ensure the readiness of LDS before access attempts from any other wave in the same TG //FIXME not performance-optimal at this time
-if G8SR_DEBUG_TIMESTAMP
- s_memrealtime s_g8sr_ts_restore_d
- s_waitcnt lgkmcnt(0)
-end
-
// s_rfe_b64 s_restore_pc_lo //Return to the main shader program and resume execution
s_rfe_restore_b64 s_restore_pc_lo, s_restore_m0 // s_restore_m0[0] is used to set STATUS.inst_atc
@@ -1190,7 +963,39 @@ function read_16sgpr_from_mem(s, s_rsrc, s_mem_offset)
s_sub_u32 s_mem_offset, s_mem_offset, 4*16
end
+function check_if_tcp_store_ok
+ // If STATUS.ALLOW_REPLAY=0 and TRAPSTS.XNACK_ERROR=1 then TCP stores will fail.
+ s_and_b32 s_save_tmp, s_save_status, SQ_WAVE_STATUS_ALLOW_REPLAY_MASK
+ s_cbranch_scc1 L_TCP_STORE_CHECK_DONE
+
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_TRAPSTS)
+ s_andn2_b32 s_save_tmp, SQ_WAVE_TRAPSTS_XNACK_ERROR_MASK, s_save_tmp
+
+L_TCP_STORE_CHECK_DONE:
+end
+
+function write_vgpr_to_mem_with_sqc(v, s_rsrc, s_mem_offset)
+ s_mov_b32 s4, 0
+L_WRITE_VGPR_LANE_LOOP:
+ for var lane = 0; lane < 4; ++ lane
+ v_readlane_b32 s[lane], v, s4
+ s_add_u32 s4, s4, 1
+ end
+
+ s_buffer_store_dwordx4 s[0:3], s_rsrc, s_mem_offset glc:1
+ ack_sqc_store_workaround()
+
+ s_add_u32 s_mem_offset, s_mem_offset, 0x10
+ s_cmp_eq_u32 s4, 0x40
+ s_cbranch_scc0 L_WRITE_VGPR_LANE_LOOP
+end
+
+function write_vgprs_to_mem_with_sqc(v, n_vgprs, s_rsrc, s_mem_offset)
+ for var vgpr = 0; vgpr < n_vgprs; ++ vgpr
+ write_vgpr_to_mem_with_sqc(v[vgpr], s_rsrc, s_mem_offset)
+ end
+end
function get_lds_size_bytes(s_lds_size_byte)
// SQ LDS granularity is 64DW, while PGM_RSRC2.lds_size is in granularity 128DW
@@ -1202,6 +1007,10 @@ function get_vgpr_size_bytes(s_vgpr_size_byte)
s_getreg_b32 s_vgpr_size_byte, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE) //vpgr_size
s_add_u32 s_vgpr_size_byte, s_vgpr_size_byte, 1
s_lshl_b32 s_vgpr_size_byte, s_vgpr_size_byte, (2+8) //Number of VGPRs = (vgpr_size + 1) * 4 * 64 * 4 (non-zero value) //FIXME for GFX, zero is possible
+
+if ASIC_TARGET_ARCTURUS
+ s_lshl_b32 s_vgpr_size_byte, s_vgpr_size_byte, 1 // Double size for ACC VGPRs
+end
end
function get_sgpr_size_bytes(s_sgpr_size_byte)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 1d3cd5c50d5f..3f0300e53727 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -42,6 +42,7 @@
static long kfd_ioctl(struct file *, unsigned int, unsigned long);
static int kfd_open(struct inode *, struct file *);
+static int kfd_release(struct inode *, struct file *);
static int kfd_mmap(struct file *, struct vm_area_struct *);
static const char kfd_dev_name[] = "kfd";
@@ -49,8 +50,9 @@ static const char kfd_dev_name[] = "kfd";
static const struct file_operations kfd_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = kfd_ioctl,
- .compat_ioctl = kfd_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = kfd_open,
+ .release = kfd_release,
.mmap = kfd_mmap,
};
@@ -124,8 +126,13 @@ static int kfd_open(struct inode *inode, struct file *filep)
if (IS_ERR(process))
return PTR_ERR(process);
- if (kfd_is_locked())
+ if (kfd_is_locked()) {
+ kfd_unref_process(process);
return -EAGAIN;
+ }
+
+ /* filep now owns the reference returned by kfd_create_process */
+ filep->private_data = process;
dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n",
process->pasid, process->is_32bit_user_mode);
@@ -133,6 +140,16 @@ static int kfd_open(struct inode *inode, struct file *filep)
return 0;
}
+static int kfd_release(struct inode *inode, struct file *filep)
+{
+ struct kfd_process *process = filep->private_data;
+
+ if (process)
+ kfd_unref_process(process);
+
+ return 0;
+}
+
static int kfd_ioctl_get_version(struct file *filep, struct kfd_process *p,
void *data)
{
@@ -258,6 +275,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
unsigned int queue_id;
struct kfd_process_device *pdd;
struct queue_properties q_properties;
+ uint32_t doorbell_offset_in_process = 0;
memset(&q_properties, 0, sizeof(struct queue_properties));
@@ -282,11 +300,12 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
goto err_bind_process;
}
- pr_debug("Creating queue for PASID %d on gpu 0x%x\n",
+ pr_debug("Creating queue for PASID 0x%x on gpu 0x%x\n",
p->pasid,
dev->id);
- err = pqm_create_queue(&p->pqm, dev, filep, &q_properties, &queue_id);
+ err = pqm_create_queue(&p->pqm, dev, filep, &q_properties, &queue_id,
+ &doorbell_offset_in_process);
if (err != 0)
goto err_create_queue;
@@ -296,14 +315,11 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
/* Return gpu_id as doorbell offset for mmap usage */
args->doorbell_offset = KFD_MMAP_TYPE_DOORBELL;
args->doorbell_offset |= KFD_MMAP_GPU_ID(args->gpu_id);
- args->doorbell_offset <<= PAGE_SHIFT;
if (KFD_IS_SOC15(dev->device_info->asic_family))
- /* On SOC15 ASICs, doorbell allocation must be
- * per-device, and independent from the per-process
- * queue_id. Return the doorbell offset within the
- * doorbell aperture to user mode.
+ /* On SOC15 ASICs, include the doorbell offset within the
+ * process doorbell frame, which is 2 pages.
*/
- args->doorbell_offset |= q_properties.doorbell_off;
+ args->doorbell_offset |= doorbell_offset_in_process;
mutex_unlock(&p->mutex);
@@ -332,7 +348,7 @@ static int kfd_ioctl_destroy_queue(struct file *filp, struct kfd_process *p,
int retval;
struct kfd_ioctl_destroy_queue_args *args = data;
- pr_debug("Destroying queue id %d for pasid %d\n",
+ pr_debug("Destroying queue id %d for pasid 0x%x\n",
args->queue_id,
p->pasid);
@@ -378,7 +394,7 @@ static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
properties.queue_percent = args->queue_percentage;
properties.priority = args->queue_priority;
- pr_debug("Updating queue id %d for pasid %d\n",
+ pr_debug("Updating queue id %d for pasid 0x%x\n",
args->queue_id, p->pasid);
mutex_lock(&p->mutex);
@@ -855,7 +871,7 @@ static int kfd_ioctl_get_process_apertures(struct file *filp,
struct kfd_process_device_apertures *pAperture;
struct kfd_process_device *pdd;
- dev_dbg(kfd_device, "get apertures for PASID %d", p->pasid);
+ dev_dbg(kfd_device, "get apertures for PASID 0x%x", p->pasid);
args->num_of_nodes = 0;
@@ -913,7 +929,7 @@ static int kfd_ioctl_get_process_apertures_new(struct file *filp,
uint32_t nodes = 0;
int ret;
- dev_dbg(kfd_device, "get apertures for PASID %d", p->pasid);
+ dev_dbg(kfd_device, "get apertures for PASID 0x%x", p->pasid);
if (args->num_of_nodes == 0) {
/* Return number of nodes, so that user space can alloacate
@@ -1128,7 +1144,7 @@ static int kfd_ioctl_set_scratch_backing_va(struct file *filep,
mutex_unlock(&p->mutex);
if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS &&
- pdd->qpd.vmid != 0)
+ pdd->qpd.vmid != 0 && dev->kfd2kgd->set_scratch_backing_va)
dev->kfd2kgd->set_scratch_backing_va(
dev->kgd, args->va_addr, pdd->qpd.vmid);
@@ -1312,10 +1328,9 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
/* MMIO is mapped through kfd device
* Generate a kfd mmap offset
*/
- if (flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) {
- args->mmap_offset = KFD_MMAP_TYPE_MMIO | KFD_MMAP_GPU_ID(args->gpu_id);
- args->mmap_offset <<= PAGE_SHIFT;
- }
+ if (flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)
+ args->mmap_offset = KFD_MMAP_TYPE_MMIO
+ | KFD_MMAP_GPU_ID(args->gpu_id);
return 0;
@@ -1801,11 +1816,16 @@ static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
} else
goto err_i1;
- dev_dbg(kfd_device, "ioctl cmd 0x%x (#%d), arg 0x%lx\n", cmd, nr, arg);
+ dev_dbg(kfd_device, "ioctl cmd 0x%x (#0x%x), arg 0x%lx\n", cmd, nr, arg);
- process = kfd_get_process(current);
- if (IS_ERR(process)) {
- dev_dbg(kfd_device, "no process\n");
+ /* Get the process struct from the filep. Only the process
+ * that opened /dev/kfd can use the file descriptor. Child
+ * processes need to create their own KFD device context.
+ */
+ process = filep->private_data;
+ if (process->lead_thread != current->group_leader) {
+ dev_dbg(kfd_device, "Using KFD FD in wrong process\n");
+ retcode = -EBADF;
goto err_i1;
}
@@ -1856,7 +1876,8 @@ err_i1:
kfree(kdata);
if (retcode)
- dev_dbg(kfd_device, "ret = %d\n", retcode);
+ dev_dbg(kfd_device, "ioctl cmd (#0x%x), arg 0x%lx, ret = %d\n",
+ nr, arg, retcode);
return retcode;
}
@@ -1877,7 +1898,7 @@ static int kfd_mmio_mmap(struct kfd_dev *dev, struct kfd_process *process,
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- pr_debug("Process %d mapping mmio page\n"
+ pr_debug("pasid 0x%x mapping mmio page\n"
" target user address == 0x%08llX\n"
" physical address == 0x%08llX\n"
" vm_flags == 0x%04lX\n"
@@ -1898,20 +1919,19 @@ static int kfd_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct kfd_process *process;
struct kfd_dev *dev = NULL;
- unsigned long vm_pgoff;
+ unsigned long mmap_offset;
unsigned int gpu_id;
process = kfd_get_process(current);
if (IS_ERR(process))
return PTR_ERR(process);
- vm_pgoff = vma->vm_pgoff;
- vma->vm_pgoff = KFD_MMAP_OFFSET_VALUE_GET(vm_pgoff);
- gpu_id = KFD_MMAP_GPU_ID_GET(vm_pgoff);
+ mmap_offset = vma->vm_pgoff << PAGE_SHIFT;
+ gpu_id = KFD_MMAP_GET_GPU_ID(mmap_offset);
if (gpu_id)
dev = kfd_device_by_id(gpu_id);
- switch (vm_pgoff & KFD_MMAP_TYPE_MASK) {
+ switch (mmap_offset & KFD_MMAP_TYPE_MASK) {
case KFD_MMAP_TYPE_DOORBELL:
if (!dev)
return -ENODEV;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index 4e3fc284f6ac..de9f68d5c312 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -138,6 +138,7 @@ static struct kfd_gpu_cache_info carrizo_cache_info[] = {
/* TODO - check & update Vega10 cache details */
#define vega10_cache_info carrizo_cache_info
#define raven_cache_info carrizo_cache_info
+#define renoir_cache_info carrizo_cache_info
/* TODO - check & update Navi10 cache details */
#define navi10_cache_info carrizo_cache_info
@@ -662,6 +663,7 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
case CHIP_VEGA10:
case CHIP_VEGA12:
case CHIP_VEGA20:
+ case CHIP_ARCTURUS:
pcache_info = vega10_cache_info;
num_of_cache_types = ARRAY_SIZE(vega10_cache_info);
break;
@@ -669,7 +671,13 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
pcache_info = raven_cache_info;
num_of_cache_types = ARRAY_SIZE(raven_cache_info);
break;
+ case CHIP_RENOIR:
+ pcache_info = renoir_cache_info;
+ num_of_cache_types = ARRAY_SIZE(renoir_cache_info);
+ break;
case CHIP_NAVI10:
+ case CHIP_NAVI12:
+ case CHIP_NAVI14:
pcache_info = navi10_cache_info;
num_of_cache_types = ARRAY_SIZE(navi10_cache_info);
break;
@@ -702,7 +710,7 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
pcache_info,
cu_info,
mem_available,
- cu_info->cu_bitmap[i][j],
+ cu_info->cu_bitmap[i % 4][j + i / 4],
ct,
cu_processor_id,
k);
@@ -788,7 +796,7 @@ int kfd_create_crat_image_acpi(void **crat_image, size_t *size)
* is put in the code to ensure we don't overwrite.
*/
#define VCRAT_SIZE_FOR_CPU (2 * PAGE_SIZE)
-#define VCRAT_SIZE_FOR_GPU (3 * PAGE_SIZE)
+#define VCRAT_SIZE_FOR_GPU (4 * PAGE_SIZE)
/* kfd_fill_cu_for_cpu - Fill in Compute info for the given CPU NUMA node
*
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
index a3441b0e385b..27bcc5b472f6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
@@ -72,11 +72,11 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev,
* The receive packet buff will be sitting on the Indirect Buffer
* and in the PQ we put the IB packet + sync packet(s).
*/
- status = kq->ops.acquire_packet_buffer(kq,
+ status = kq_acquire_packet_buffer(kq,
pq_packets_size_in_bytes / sizeof(uint32_t),
&ib_packet_buff);
if (status) {
- pr_err("acquire_packet_buffer failed\n");
+ pr_err("kq_acquire_packet_buffer failed\n");
return status;
}
@@ -115,7 +115,7 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev,
if (status) {
pr_err("Failed to allocate GART memory\n");
- kq->ops.rollback_packet(kq);
+ kq_rollback_packet(kq);
return status;
}
@@ -151,7 +151,7 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev,
rm_packet->data_lo = QUEUESTATE__ACTIVE;
- kq->ops.submit_packet(kq);
+ kq_submit_packet(kq);
/* Wait till CP writes sync code: */
status = amdkfd_fence_wait_timeout(
@@ -185,7 +185,7 @@ static int dbgdev_register_diq(struct kfd_dbgdev *dbgdev)
properties.type = KFD_QUEUE_TYPE_DIQ;
status = pqm_create_queue(dbgdev->pqm, dbgdev->dev, NULL,
- &properties, &qid);
+ &properties, &qid, NULL);
if (status) {
pr_err("Failed to create DIQ\n");
@@ -761,6 +761,7 @@ int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p)
{
int status = 0;
unsigned int vmid;
+ uint16_t queried_pasid;
union SQ_CMD_BITS reg_sq_cmd;
union GRBM_GFX_INDEX_BITS reg_gfx_index;
struct kfd_process_device *pdd;
@@ -782,19 +783,18 @@ int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p)
*/
for (vmid = first_vmid_to_scan; vmid <= last_vmid_to_scan; vmid++) {
- if (dev->kfd2kgd->get_atc_vmid_pasid_mapping_valid
- (dev->kgd, vmid)) {
- if (dev->kfd2kgd->get_atc_vmid_pasid_mapping_pasid
- (dev->kgd, vmid) == p->pasid) {
- pr_debug("Killing wave fronts of vmid %d and pasid %d\n",
- vmid, p->pasid);
- break;
- }
+ status = dev->kfd2kgd->get_atc_vmid_pasid_mapping_info
+ (dev->kgd, vmid, &queried_pasid);
+
+ if (status && queried_pasid == p->pasid) {
+ pr_debug("Killing wave fronts of vmid %d and pasid 0x%x\n",
+ vmid, p->pasid);
+ break;
}
}
if (vmid > last_vmid_to_scan) {
- pr_err("Didn't find vmid for pasid %d\n", p->pasid);
+ pr_err("Didn't find vmid for pasid 0x%x\n", p->pasid);
return -EFAULT;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c
index 9d4af961c5d1..9bfa50633654 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c
@@ -96,7 +96,7 @@ bool kfd_dbgmgr_create(struct kfd_dbgmgr **ppmgr, struct kfd_dev *pdev)
long kfd_dbgmgr_register(struct kfd_dbgmgr *pmgr, struct kfd_process *p)
{
if (pmgr->pasid != 0) {
- pr_debug("H/W debugger is already active using pasid %d\n",
+ pr_debug("H/W debugger is already active using pasid 0x%x\n",
pmgr->pasid);
return -EBUSY;
}
@@ -117,7 +117,7 @@ long kfd_dbgmgr_unregister(struct kfd_dbgmgr *pmgr, struct kfd_process *p)
{
/* Is the requests coming from the already registered process? */
if (pmgr->pasid != p->pasid) {
- pr_debug("H/W debugger is not registered by calling pasid %d\n",
+ pr_debug("H/W debugger is not registered by calling pasid 0x%x\n",
p->pasid);
return -EINVAL;
}
@@ -134,7 +134,7 @@ long kfd_dbgmgr_wave_control(struct kfd_dbgmgr *pmgr,
{
/* Is the requests coming from the already registered process? */
if (pmgr->pasid != wac_info->process->pasid) {
- pr_debug("H/W debugger support was not registered for requester pasid %d\n",
+ pr_debug("H/W debugger support was not registered for requester pasid 0x%x\n",
wac_info->process->pasid);
return -EINVAL;
}
@@ -147,7 +147,7 @@ long kfd_dbgmgr_address_watch(struct kfd_dbgmgr *pmgr,
{
/* Is the requests coming from the already registered process? */
if (pmgr->pasid != adw_info->process->pasid) {
- pr_debug("H/W debugger support was not registered for requester pasid %d\n",
+ pr_debug("H/W debugger support was not registered for requester pasid 0x%x\n",
adw_info->process->pasid);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
index 15c523027285..511712c2e382 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
@@ -93,7 +93,7 @@ void kfd_debugfs_init(void)
kfd_debugfs_hqds_by_device, &kfd_debugfs_fops);
debugfs_create_file("rls", S_IFREG | 0444, debugfs_root,
kfd_debugfs_rls_by_device, &kfd_debugfs_fops);
- debugfs_create_file("hang_hws", S_IFREG | 0644, debugfs_root,
+ debugfs_create_file("hang_hws", S_IFREG | 0200, debugfs_root,
NULL, &kfd_debugfs_hang_hws_fops);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 3322a443dfb2..2a9e40131735 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -39,9 +39,45 @@
*/
static atomic_t kfd_locked = ATOMIC_INIT(0);
+#ifdef CONFIG_DRM_AMDGPU_CIK
+extern const struct kfd2kgd_calls gfx_v7_kfd2kgd;
+#endif
+extern const struct kfd2kgd_calls gfx_v8_kfd2kgd;
+extern const struct kfd2kgd_calls gfx_v9_kfd2kgd;
+extern const struct kfd2kgd_calls arcturus_kfd2kgd;
+extern const struct kfd2kgd_calls gfx_v10_kfd2kgd;
+
+static const struct kfd2kgd_calls *kfd2kgd_funcs[] = {
+#ifdef KFD_SUPPORT_IOMMU_V2
+#ifdef CONFIG_DRM_AMDGPU_CIK
+ [CHIP_KAVERI] = &gfx_v7_kfd2kgd,
+#endif
+ [CHIP_CARRIZO] = &gfx_v8_kfd2kgd,
+ [CHIP_RAVEN] = &gfx_v9_kfd2kgd,
+#endif
+#ifdef CONFIG_DRM_AMDGPU_CIK
+ [CHIP_HAWAII] = &gfx_v7_kfd2kgd,
+#endif
+ [CHIP_TONGA] = &gfx_v8_kfd2kgd,
+ [CHIP_FIJI] = &gfx_v8_kfd2kgd,
+ [CHIP_POLARIS10] = &gfx_v8_kfd2kgd,
+ [CHIP_POLARIS11] = &gfx_v8_kfd2kgd,
+ [CHIP_POLARIS12] = &gfx_v8_kfd2kgd,
+ [CHIP_VEGAM] = &gfx_v8_kfd2kgd,
+ [CHIP_VEGA10] = &gfx_v9_kfd2kgd,
+ [CHIP_VEGA12] = &gfx_v9_kfd2kgd,
+ [CHIP_VEGA20] = &gfx_v9_kfd2kgd,
+ [CHIP_RENOIR] = &gfx_v9_kfd2kgd,
+ [CHIP_ARCTURUS] = &arcturus_kfd2kgd,
+ [CHIP_NAVI10] = &gfx_v10_kfd2kgd,
+ [CHIP_NAVI12] = &gfx_v10_kfd2kgd,
+ [CHIP_NAVI14] = &gfx_v10_kfd2kgd,
+};
+
#ifdef KFD_SUPPORT_IOMMU_V2
static const struct kfd_device_info kaveri_device_info = {
.asic_family = CHIP_KAVERI,
+ .asic_name = "kaveri",
.max_pasid_bits = 16,
/* max num of queues for KV.TODO should be a dynamic value */
.max_no_of_hqd = 24,
@@ -60,6 +96,7 @@ static const struct kfd_device_info kaveri_device_info = {
static const struct kfd_device_info carrizo_device_info = {
.asic_family = CHIP_CARRIZO,
+ .asic_name = "carrizo",
.max_pasid_bits = 16,
/* max num of queues for CZ.TODO should be a dynamic value */
.max_no_of_hqd = 24,
@@ -78,6 +115,7 @@ static const struct kfd_device_info carrizo_device_info = {
static const struct kfd_device_info raven_device_info = {
.asic_family = CHIP_RAVEN,
+ .asic_name = "raven",
.max_pasid_bits = 16,
.max_no_of_hqd = 24,
.doorbell_size = 8,
@@ -96,6 +134,7 @@ static const struct kfd_device_info raven_device_info = {
static const struct kfd_device_info hawaii_device_info = {
.asic_family = CHIP_HAWAII,
+ .asic_name = "hawaii",
.max_pasid_bits = 16,
/* max num of queues for KV.TODO should be a dynamic value */
.max_no_of_hqd = 24,
@@ -114,6 +153,7 @@ static const struct kfd_device_info hawaii_device_info = {
static const struct kfd_device_info tonga_device_info = {
.asic_family = CHIP_TONGA,
+ .asic_name = "tonga",
.max_pasid_bits = 16,
.max_no_of_hqd = 24,
.doorbell_size = 4,
@@ -131,6 +171,7 @@ static const struct kfd_device_info tonga_device_info = {
static const struct kfd_device_info fiji_device_info = {
.asic_family = CHIP_FIJI,
+ .asic_name = "fiji",
.max_pasid_bits = 16,
.max_no_of_hqd = 24,
.doorbell_size = 4,
@@ -148,6 +189,7 @@ static const struct kfd_device_info fiji_device_info = {
static const struct kfd_device_info fiji_vf_device_info = {
.asic_family = CHIP_FIJI,
+ .asic_name = "fiji",
.max_pasid_bits = 16,
.max_no_of_hqd = 24,
.doorbell_size = 4,
@@ -166,6 +208,7 @@ static const struct kfd_device_info fiji_vf_device_info = {
static const struct kfd_device_info polaris10_device_info = {
.asic_family = CHIP_POLARIS10,
+ .asic_name = "polaris10",
.max_pasid_bits = 16,
.max_no_of_hqd = 24,
.doorbell_size = 4,
@@ -183,6 +226,7 @@ static const struct kfd_device_info polaris10_device_info = {
static const struct kfd_device_info polaris10_vf_device_info = {
.asic_family = CHIP_POLARIS10,
+ .asic_name = "polaris10",
.max_pasid_bits = 16,
.max_no_of_hqd = 24,
.doorbell_size = 4,
@@ -200,6 +244,7 @@ static const struct kfd_device_info polaris10_vf_device_info = {
static const struct kfd_device_info polaris11_device_info = {
.asic_family = CHIP_POLARIS11,
+ .asic_name = "polaris11",
.max_pasid_bits = 16,
.max_no_of_hqd = 24,
.doorbell_size = 4,
@@ -217,6 +262,7 @@ static const struct kfd_device_info polaris11_device_info = {
static const struct kfd_device_info polaris12_device_info = {
.asic_family = CHIP_POLARIS12,
+ .asic_name = "polaris12",
.max_pasid_bits = 16,
.max_no_of_hqd = 24,
.doorbell_size = 4,
@@ -234,6 +280,7 @@ static const struct kfd_device_info polaris12_device_info = {
static const struct kfd_device_info vegam_device_info = {
.asic_family = CHIP_VEGAM,
+ .asic_name = "vegam",
.max_pasid_bits = 16,
.max_no_of_hqd = 24,
.doorbell_size = 4,
@@ -251,6 +298,7 @@ static const struct kfd_device_info vegam_device_info = {
static const struct kfd_device_info vega10_device_info = {
.asic_family = CHIP_VEGA10,
+ .asic_name = "vega10",
.max_pasid_bits = 16,
.max_no_of_hqd = 24,
.doorbell_size = 8,
@@ -268,6 +316,7 @@ static const struct kfd_device_info vega10_device_info = {
static const struct kfd_device_info vega10_vf_device_info = {
.asic_family = CHIP_VEGA10,
+ .asic_name = "vega10",
.max_pasid_bits = 16,
.max_no_of_hqd = 24,
.doorbell_size = 8,
@@ -285,6 +334,7 @@ static const struct kfd_device_info vega10_vf_device_info = {
static const struct kfd_device_info vega12_device_info = {
.asic_family = CHIP_VEGA12,
+ .asic_name = "vega12",
.max_pasid_bits = 16,
.max_no_of_hqd = 24,
.doorbell_size = 8,
@@ -302,6 +352,7 @@ static const struct kfd_device_info vega12_device_info = {
static const struct kfd_device_info vega20_device_info = {
.asic_family = CHIP_VEGA20,
+ .asic_name = "vega20",
.max_pasid_bits = 16,
.max_no_of_hqd = 24,
.doorbell_size = 8,
@@ -317,8 +368,45 @@ static const struct kfd_device_info vega20_device_info = {
.num_sdma_queues_per_engine = 8,
};
+static const struct kfd_device_info arcturus_device_info = {
+ .asic_family = CHIP_ARCTURUS,
+ .asic_name = "arcturus",
+ .max_pasid_bits = 16,
+ .max_no_of_hqd = 24,
+ .doorbell_size = 8,
+ .ih_ring_entry_size = 8 * sizeof(uint32_t),
+ .event_interrupt_class = &event_interrupt_class_v9,
+ .num_of_watch_points = 4,
+ .mqd_size_aligned = MQD_SIZE_ALIGNED,
+ .supports_cwsr = true,
+ .needs_iommu_device = false,
+ .needs_pci_atomics = false,
+ .num_sdma_engines = 2,
+ .num_xgmi_sdma_engines = 6,
+ .num_sdma_queues_per_engine = 8,
+};
+
+static const struct kfd_device_info renoir_device_info = {
+ .asic_family = CHIP_RENOIR,
+ .asic_name = "renoir",
+ .max_pasid_bits = 16,
+ .max_no_of_hqd = 24,
+ .doorbell_size = 8,
+ .ih_ring_entry_size = 8 * sizeof(uint32_t),
+ .event_interrupt_class = &event_interrupt_class_v9,
+ .num_of_watch_points = 4,
+ .mqd_size_aligned = MQD_SIZE_ALIGNED,
+ .supports_cwsr = true,
+ .needs_iommu_device = false,
+ .needs_pci_atomics = false,
+ .num_sdma_engines = 1,
+ .num_xgmi_sdma_engines = 0,
+ .num_sdma_queues_per_engine = 2,
+};
+
static const struct kfd_device_info navi10_device_info = {
.asic_family = CHIP_NAVI10,
+ .asic_name = "navi10",
.max_pasid_bits = 16,
.max_no_of_hqd = 24,
.doorbell_size = 8,
@@ -334,130 +422,64 @@ static const struct kfd_device_info navi10_device_info = {
.num_sdma_queues_per_engine = 8,
};
-struct kfd_deviceid {
- unsigned short did;
- const struct kfd_device_info *device_info;
+static const struct kfd_device_info navi12_device_info = {
+ .asic_family = CHIP_NAVI12,
+ .asic_name = "navi12",
+ .max_pasid_bits = 16,
+ .max_no_of_hqd = 24,
+ .doorbell_size = 8,
+ .ih_ring_entry_size = 8 * sizeof(uint32_t),
+ .event_interrupt_class = &event_interrupt_class_v9,
+ .num_of_watch_points = 4,
+ .mqd_size_aligned = MQD_SIZE_ALIGNED,
+ .needs_iommu_device = false,
+ .supports_cwsr = true,
+ .needs_pci_atomics = false,
+ .num_sdma_engines = 2,
+ .num_xgmi_sdma_engines = 0,
+ .num_sdma_queues_per_engine = 8,
+};
+
+static const struct kfd_device_info navi14_device_info = {
+ .asic_family = CHIP_NAVI14,
+ .asic_name = "navi14",
+ .max_pasid_bits = 16,
+ .max_no_of_hqd = 24,
+ .doorbell_size = 8,
+ .ih_ring_entry_size = 8 * sizeof(uint32_t),
+ .event_interrupt_class = &event_interrupt_class_v9,
+ .num_of_watch_points = 4,
+ .mqd_size_aligned = MQD_SIZE_ALIGNED,
+ .needs_iommu_device = false,
+ .supports_cwsr = true,
+ .needs_pci_atomics = false,
+ .num_sdma_engines = 2,
+ .num_xgmi_sdma_engines = 0,
+ .num_sdma_queues_per_engine = 8,
};
-static const struct kfd_deviceid supported_devices[] = {
+/* For each entry, [0] is regular and [1] is virtualisation device. */
+static const struct kfd_device_info *kfd_supported_devices[][2] = {
#ifdef KFD_SUPPORT_IOMMU_V2
- { 0x1304, &kaveri_device_info }, /* Kaveri */
- { 0x1305, &kaveri_device_info }, /* Kaveri */
- { 0x1306, &kaveri_device_info }, /* Kaveri */
- { 0x1307, &kaveri_device_info }, /* Kaveri */
- { 0x1309, &kaveri_device_info }, /* Kaveri */
- { 0x130A, &kaveri_device_info }, /* Kaveri */
- { 0x130B, &kaveri_device_info }, /* Kaveri */
- { 0x130C, &kaveri_device_info }, /* Kaveri */
- { 0x130D, &kaveri_device_info }, /* Kaveri */
- { 0x130E, &kaveri_device_info }, /* Kaveri */
- { 0x130F, &kaveri_device_info }, /* Kaveri */
- { 0x1310, &kaveri_device_info }, /* Kaveri */
- { 0x1311, &kaveri_device_info }, /* Kaveri */
- { 0x1312, &kaveri_device_info }, /* Kaveri */
- { 0x1313, &kaveri_device_info }, /* Kaveri */
- { 0x1315, &kaveri_device_info }, /* Kaveri */
- { 0x1316, &kaveri_device_info }, /* Kaveri */
- { 0x1317, &kaveri_device_info }, /* Kaveri */
- { 0x1318, &kaveri_device_info }, /* Kaveri */
- { 0x131B, &kaveri_device_info }, /* Kaveri */
- { 0x131C, &kaveri_device_info }, /* Kaveri */
- { 0x131D, &kaveri_device_info }, /* Kaveri */
- { 0x9870, &carrizo_device_info }, /* Carrizo */
- { 0x9874, &carrizo_device_info }, /* Carrizo */
- { 0x9875, &carrizo_device_info }, /* Carrizo */
- { 0x9876, &carrizo_device_info }, /* Carrizo */
- { 0x9877, &carrizo_device_info }, /* Carrizo */
- { 0x15DD, &raven_device_info }, /* Raven */
- { 0x15D8, &raven_device_info }, /* Raven */
+ [CHIP_KAVERI] = {&kaveri_device_info, NULL},
+ [CHIP_CARRIZO] = {&carrizo_device_info, NULL},
+ [CHIP_RAVEN] = {&raven_device_info, NULL},
#endif
- { 0x67A0, &hawaii_device_info }, /* Hawaii */
- { 0x67A1, &hawaii_device_info }, /* Hawaii */
- { 0x67A2, &hawaii_device_info }, /* Hawaii */
- { 0x67A8, &hawaii_device_info }, /* Hawaii */
- { 0x67A9, &hawaii_device_info }, /* Hawaii */
- { 0x67AA, &hawaii_device_info }, /* Hawaii */
- { 0x67B0, &hawaii_device_info }, /* Hawaii */
- { 0x67B1, &hawaii_device_info }, /* Hawaii */
- { 0x67B8, &hawaii_device_info }, /* Hawaii */
- { 0x67B9, &hawaii_device_info }, /* Hawaii */
- { 0x67BA, &hawaii_device_info }, /* Hawaii */
- { 0x67BE, &hawaii_device_info }, /* Hawaii */
- { 0x6920, &tonga_device_info }, /* Tonga */
- { 0x6921, &tonga_device_info }, /* Tonga */
- { 0x6928, &tonga_device_info }, /* Tonga */
- { 0x6929, &tonga_device_info }, /* Tonga */
- { 0x692B, &tonga_device_info }, /* Tonga */
- { 0x6938, &tonga_device_info }, /* Tonga */
- { 0x6939, &tonga_device_info }, /* Tonga */
- { 0x7300, &fiji_device_info }, /* Fiji */
- { 0x730F, &fiji_vf_device_info }, /* Fiji vf*/
- { 0x67C0, &polaris10_device_info }, /* Polaris10 */
- { 0x67C1, &polaris10_device_info }, /* Polaris10 */
- { 0x67C2, &polaris10_device_info }, /* Polaris10 */
- { 0x67C4, &polaris10_device_info }, /* Polaris10 */
- { 0x67C7, &polaris10_device_info }, /* Polaris10 */
- { 0x67C8, &polaris10_device_info }, /* Polaris10 */
- { 0x67C9, &polaris10_device_info }, /* Polaris10 */
- { 0x67CA, &polaris10_device_info }, /* Polaris10 */
- { 0x67CC, &polaris10_device_info }, /* Polaris10 */
- { 0x67CF, &polaris10_device_info }, /* Polaris10 */
- { 0x67D0, &polaris10_vf_device_info }, /* Polaris10 vf*/
- { 0x67DF, &polaris10_device_info }, /* Polaris10 */
- { 0x6FDF, &polaris10_device_info }, /* Polaris10 */
- { 0x67E0, &polaris11_device_info }, /* Polaris11 */
- { 0x67E1, &polaris11_device_info }, /* Polaris11 */
- { 0x67E3, &polaris11_device_info }, /* Polaris11 */
- { 0x67E7, &polaris11_device_info }, /* Polaris11 */
- { 0x67E8, &polaris11_device_info }, /* Polaris11 */
- { 0x67E9, &polaris11_device_info }, /* Polaris11 */
- { 0x67EB, &polaris11_device_info }, /* Polaris11 */
- { 0x67EF, &polaris11_device_info }, /* Polaris11 */
- { 0x67FF, &polaris11_device_info }, /* Polaris11 */
- { 0x6980, &polaris12_device_info }, /* Polaris12 */
- { 0x6981, &polaris12_device_info }, /* Polaris12 */
- { 0x6985, &polaris12_device_info }, /* Polaris12 */
- { 0x6986, &polaris12_device_info }, /* Polaris12 */
- { 0x6987, &polaris12_device_info }, /* Polaris12 */
- { 0x6995, &polaris12_device_info }, /* Polaris12 */
- { 0x6997, &polaris12_device_info }, /* Polaris12 */
- { 0x699F, &polaris12_device_info }, /* Polaris12 */
- { 0x694C, &vegam_device_info }, /* VegaM */
- { 0x694E, &vegam_device_info }, /* VegaM */
- { 0x694F, &vegam_device_info }, /* VegaM */
- { 0x6860, &vega10_device_info }, /* Vega10 */
- { 0x6861, &vega10_device_info }, /* Vega10 */
- { 0x6862, &vega10_device_info }, /* Vega10 */
- { 0x6863, &vega10_device_info }, /* Vega10 */
- { 0x6864, &vega10_device_info }, /* Vega10 */
- { 0x6867, &vega10_device_info }, /* Vega10 */
- { 0x6868, &vega10_device_info }, /* Vega10 */
- { 0x6869, &vega10_device_info }, /* Vega10 */
- { 0x686A, &vega10_device_info }, /* Vega10 */
- { 0x686B, &vega10_device_info }, /* Vega10 */
- { 0x686C, &vega10_vf_device_info }, /* Vega10 vf*/
- { 0x686D, &vega10_device_info }, /* Vega10 */
- { 0x686E, &vega10_device_info }, /* Vega10 */
- { 0x686F, &vega10_device_info }, /* Vega10 */
- { 0x687F, &vega10_device_info }, /* Vega10 */
- { 0x69A0, &vega12_device_info }, /* Vega12 */
- { 0x69A1, &vega12_device_info }, /* Vega12 */
- { 0x69A2, &vega12_device_info }, /* Vega12 */
- { 0x69A3, &vega12_device_info }, /* Vega12 */
- { 0x69AF, &vega12_device_info }, /* Vega12 */
- { 0x66a0, &vega20_device_info }, /* Vega20 */
- { 0x66a1, &vega20_device_info }, /* Vega20 */
- { 0x66a2, &vega20_device_info }, /* Vega20 */
- { 0x66a3, &vega20_device_info }, /* Vega20 */
- { 0x66a4, &vega20_device_info }, /* Vega20 */
- { 0x66a7, &vega20_device_info }, /* Vega20 */
- { 0x66af, &vega20_device_info }, /* Vega20 */
- /* Navi10 */
- { 0x7310, &navi10_device_info }, /* Navi10 */
- { 0x7312, &navi10_device_info }, /* Navi10 */
- { 0x7318, &navi10_device_info }, /* Navi10 */
- { 0x731a, &navi10_device_info }, /* Navi10 */
- { 0x731f, &navi10_device_info }, /* Navi10 */
+ [CHIP_HAWAII] = {&hawaii_device_info, NULL},
+ [CHIP_TONGA] = {&tonga_device_info, NULL},
+ [CHIP_FIJI] = {&fiji_device_info, &fiji_vf_device_info},
+ [CHIP_POLARIS10] = {&polaris10_device_info, &polaris10_vf_device_info},
+ [CHIP_POLARIS11] = {&polaris11_device_info, NULL},
+ [CHIP_POLARIS12] = {&polaris12_device_info, NULL},
+ [CHIP_VEGAM] = {&vegam_device_info, NULL},
+ [CHIP_VEGA10] = {&vega10_device_info, &vega10_vf_device_info},
+ [CHIP_VEGA12] = {&vega12_device_info, NULL},
+ [CHIP_VEGA20] = {&vega20_device_info, NULL},
+ [CHIP_RENOIR] = {&renoir_device_info, NULL},
+ [CHIP_ARCTURUS] = {&arcturus_device_info, &arcturus_device_info},
+ [CHIP_NAVI10] = {&navi10_device_info, NULL},
+ [CHIP_NAVI12] = {&navi12_device_info, &navi12_device_info},
+ [CHIP_NAVI14] = {&navi14_device_info, NULL},
};
static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
@@ -466,32 +488,25 @@ static void kfd_gtt_sa_fini(struct kfd_dev *kfd);
static int kfd_resume(struct kfd_dev *kfd);
-static const struct kfd_device_info *lookup_device_info(unsigned short did)
+struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
+ struct pci_dev *pdev, unsigned int asic_type, bool vf)
{
- size_t i;
+ struct kfd_dev *kfd;
+ const struct kfd_device_info *device_info;
+ const struct kfd2kgd_calls *f2g;
- for (i = 0; i < ARRAY_SIZE(supported_devices); i++) {
- if (supported_devices[i].did == did) {
- WARN_ON(!supported_devices[i].device_info);
- return supported_devices[i].device_info;
- }
+ if (asic_type >= sizeof(kfd_supported_devices) / (sizeof(void *) * 2)
+ || asic_type >= sizeof(kfd2kgd_funcs) / sizeof(void *)) {
+ dev_err(kfd_device, "asic_type %d out of range\n", asic_type);
+ return NULL; /* asic_type out of range */
}
- dev_warn(kfd_device, "DID %04x is missing in supported_devices\n",
- did);
-
- return NULL;
-}
+ device_info = kfd_supported_devices[asic_type][vf];
+ f2g = kfd2kgd_funcs[asic_type];
-struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
- struct pci_dev *pdev, const struct kfd2kgd_calls *f2g)
-{
- struct kfd_dev *kfd;
- const struct kfd_device_info *device_info =
- lookup_device_info(pdev->device);
-
- if (!device_info) {
- dev_err(kfd_device, "kgd2kfd_probe failed\n");
+ if (!device_info || !f2g) {
+ dev_err(kfd_device, "%s %s not supported in kfd\n",
+ amdgpu_asic_name[asic_type], vf ? "VF" : "");
return NULL;
}
@@ -536,6 +551,10 @@ static void kfd_cwsr_init(struct kfd_dev *kfd)
BUILD_BUG_ON(sizeof(cwsr_trap_gfx8_hex) > PAGE_SIZE);
kfd->cwsr_isa = cwsr_trap_gfx8_hex;
kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx8_hex);
+ } else if (kfd->device_info->asic_family == CHIP_ARCTURUS) {
+ BUILD_BUG_ON(sizeof(cwsr_trap_arcturus_hex) > PAGE_SIZE);
+ kfd->cwsr_isa = cwsr_trap_arcturus_hex;
+ kfd->cwsr_isa_size = sizeof(cwsr_trap_arcturus_hex);
} else if (kfd->device_info->asic_family < CHIP_NAVI10) {
BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_hex) > PAGE_SIZE);
kfd->cwsr_isa = cwsr_trap_gfx9_hex;
@@ -551,10 +570,12 @@ static void kfd_cwsr_init(struct kfd_dev *kfd)
}
bool kgd2kfd_device_init(struct kfd_dev *kfd,
+ struct drm_device *ddev,
const struct kgd2kfd_shared_resources *gpu_resources)
{
unsigned int size;
+ kfd->ddev = ddev;
kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd,
KGD_ENGINE_MEC1);
kfd->sdma_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd,
@@ -707,10 +728,10 @@ int kgd2kfd_pre_reset(struct kfd_dev *kfd)
{
if (!kfd->init_complete)
return 0;
- kgd2kfd_suspend(kfd);
- /* hold dqm->lock to prevent further execution*/
- dqm_lock(kfd->dqm);
+ kfd->dqm->ops.pre_reset(kfd->dqm);
+
+ kgd2kfd_suspend(kfd);
kfd_signal_reset_event(kfd);
return 0;
@@ -724,17 +745,15 @@ int kgd2kfd_pre_reset(struct kfd_dev *kfd)
int kgd2kfd_post_reset(struct kfd_dev *kfd)
{
- int ret, count;
+ int ret;
if (!kfd->init_complete)
return 0;
- dqm_unlock(kfd->dqm);
-
ret = kfd_resume(kfd);
if (ret)
return ret;
- count = atomic_dec_return(&kfd_locked);
+ atomic_dec(&kfd_locked);
atomic_set(&kfd->sram_ecc_flag, 0);
@@ -806,6 +825,21 @@ dqm_start_error:
return err;
}
+static inline void kfd_queue_work(struct workqueue_struct *wq,
+ struct work_struct *work)
+{
+ int cpu, new_cpu;
+
+ cpu = new_cpu = smp_processor_id();
+ do {
+ new_cpu = cpumask_next(new_cpu, cpu_online_mask) % nr_cpu_ids;
+ if (cpu_to_node(new_cpu) == numa_node_id())
+ break;
+ } while (cpu != new_cpu);
+
+ queue_work_on(new_cpu, wq, work);
+}
+
/* This is called directly from KGD at ISR. */
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
{
@@ -828,7 +862,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
patched_ihre, &is_patched)
&& enqueue_ih_ring_entry(kfd,
is_patched ? patched_ihre : ih_ring_entry))
- queue_work(kfd->ih_wq, &kfd->interrupt_work);
+ kfd_queue_work(kfd->ih_wq, &kfd->interrupt_work);
spin_unlock_irqrestore(&kfd->interrupt_lock, flags);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index e6a4288bfaa6..80d22bf702e8 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -170,7 +170,7 @@ static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q)
}
q->properties.doorbell_off =
- kfd_doorbell_id_to_offset(dev, q->process,
+ kfd_get_doorbell_dw_offset_in_bar(dev, q->process,
q->doorbell_id);
return 0;
@@ -195,20 +195,30 @@ static int allocate_vmid(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
struct queue *q)
{
- int bit, allocated_vmid;
+ int allocated_vmid = -1, i;
- if (dqm->vmid_bitmap == 0)
- return -ENOMEM;
+ for (i = dqm->dev->vm_info.first_vmid_kfd;
+ i <= dqm->dev->vm_info.last_vmid_kfd; i++) {
+ if (!dqm->vmid_pasid[i]) {
+ allocated_vmid = i;
+ break;
+ }
+ }
- bit = ffs(dqm->vmid_bitmap) - 1;
- dqm->vmid_bitmap &= ~(1 << bit);
+ if (allocated_vmid < 0) {
+ pr_err("no more vmid to allocate\n");
+ return -ENOSPC;
+ }
+
+ pr_debug("vmid allocated: %d\n", allocated_vmid);
+
+ dqm->vmid_pasid[allocated_vmid] = q->process->pasid;
+
+ set_pasid_vmid_mapping(dqm, q->process->pasid, allocated_vmid);
- allocated_vmid = bit + dqm->dev->vm_info.first_vmid_kfd;
- pr_debug("vmid allocation %d\n", allocated_vmid);
qpd->vmid = allocated_vmid;
q->properties.vmid = allocated_vmid;
- set_pasid_vmid_mapping(dqm, q->process->pasid, q->properties.vmid);
program_sh_mem_settings(dqm, qpd);
/* qpd->page_table_base is set earlier when register_process()
@@ -220,8 +230,9 @@ static int allocate_vmid(struct device_queue_manager *dqm,
/* invalidate the VM context after pasid and vmid mapping is set up */
kfd_flush_tlb(qpd_to_pdd(qpd));
- dqm->dev->kfd2kgd->set_scratch_backing_va(
- dqm->dev->kgd, qpd->sh_hidden_private_base, qpd->vmid);
+ if (dqm->dev->kfd2kgd->set_scratch_backing_va)
+ dqm->dev->kfd2kgd->set_scratch_backing_va(dqm->dev->kgd,
+ qpd->sh_hidden_private_base, qpd->vmid);
return 0;
}
@@ -248,8 +259,6 @@ static void deallocate_vmid(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
struct queue *q)
{
- int bit = qpd->vmid - dqm->dev->vm_info.first_vmid_kfd;
-
/* On GFX v7, CP doesn't flush TC at dequeue */
if (q->device->device_info->asic_family == CHIP_HAWAII)
if (flush_texture_cache_nocpsch(q->device, qpd))
@@ -259,8 +268,8 @@ static void deallocate_vmid(struct device_queue_manager *dqm,
/* Release the vmid mapping */
set_pasid_vmid_mapping(dqm, 0, qpd->vmid);
+ dqm->vmid_pasid[qpd->vmid] = 0;
- dqm->vmid_bitmap |= (1 << bit);
qpd->vmid = 0;
q->properties.vmid = 0;
}
@@ -331,6 +340,10 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj,
&q->gart_mqd_addr, &q->properties);
if (q->properties.is_active) {
+ if (!dqm->sched_running) {
+ WARN_ONCE(1, "Load non-HWS mqd while stopped\n");
+ goto add_queue_to_list;
+ }
if (WARN(q->process->mm != current->mm,
"should only run in user thread"))
@@ -342,6 +355,7 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
goto out_free_mqd;
}
+add_queue_to_list:
list_add(&q->list, &qpd->queues_list);
qpd->queue_count++;
if (q->properties.is_active)
@@ -449,6 +463,11 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
deallocate_doorbell(qpd, q);
+ if (!dqm->sched_running) {
+ WARN_ONCE(1, "Destroy non-HWS queue while stopped\n");
+ return 0;
+ }
+
retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
KFD_UNMAP_LATENCY_MS,
@@ -524,6 +543,12 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
(q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
q->properties.type == KFD_QUEUE_TYPE_SDMA ||
q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
+
+ if (!dqm->sched_running) {
+ WARN_ONCE(1, "Update non-HWS queue while stopped\n");
+ goto out_unlock;
+ }
+
retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
@@ -579,7 +604,7 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
goto out;
pdd = qpd_to_pdd(qpd);
- pr_info_ratelimited("Evicting PASID %u queues\n",
+ pr_info_ratelimited("Evicting PASID 0x%x queues\n",
pdd->process->pasid);
/* Mark all queues as evicted. Deactivate all active queues on
@@ -593,6 +618,11 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
q->properties.type)];
q->properties.is_active = false;
+ dqm->queue_count--;
+
+ if (WARN_ONCE(!dqm->sched_running, "Evict when stopped\n"))
+ continue;
+
retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
@@ -601,7 +631,6 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
* maintain a consistent eviction state
*/
ret = retval;
- dqm->queue_count--;
}
out:
@@ -621,7 +650,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
goto out;
pdd = qpd_to_pdd(qpd);
- pr_info_ratelimited("Evicting PASID %u queues\n",
+ pr_info_ratelimited("Evicting PASID 0x%x queues\n",
pdd->process->pasid);
/* Mark all queues as evicted. Deactivate all active queues on
@@ -667,7 +696,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
goto out;
}
- pr_info_ratelimited("Restoring PASID %u queues\n",
+ pr_info_ratelimited("Restoring PASID 0x%x queues\n",
pdd->process->pasid);
/* Update PD Base in QPD */
@@ -702,6 +731,11 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
q->properties.type)];
q->properties.is_active = true;
+ dqm->queue_count++;
+
+ if (WARN_ONCE(!dqm->sched_running, "Restore when stopped\n"))
+ continue;
+
retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
q->queue, &q->properties, mm);
if (retval && !ret)
@@ -709,7 +743,6 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
* maintain a consistent eviction state
*/
ret = retval;
- dqm->queue_count++;
}
qpd->evicted = 0;
out:
@@ -739,7 +772,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
goto out;
}
- pr_info_ratelimited("Restoring PASID %u queues\n",
+ pr_info_ratelimited("Restoring PASID 0x%x queues\n",
pdd->process->pasid);
/* Update PD Base in QPD */
@@ -879,9 +912,10 @@ static int initialize_nocpsch(struct device_queue_manager *dqm)
dqm->allocated_queues[pipe] |= 1 << queue;
}
- dqm->vmid_bitmap = (1 << dqm->dev->vm_info.vmid_num_kfd) - 1;
- dqm->sdma_bitmap = (1ULL << get_num_sdma_queues(dqm)) - 1;
- dqm->xgmi_sdma_bitmap = (1ULL << get_num_xgmi_sdma_queues(dqm)) - 1;
+ memset(dqm->vmid_pasid, 0, sizeof(dqm->vmid_pasid));
+
+ dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm));
+ dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm));
return 0;
}
@@ -896,21 +930,36 @@ static void uninitialize(struct device_queue_manager *dqm)
for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
kfree(dqm->mqd_mgrs[i]);
mutex_destroy(&dqm->lock_hidden);
- kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem);
}
static int start_nocpsch(struct device_queue_manager *dqm)
{
+ pr_info("SW scheduler is used");
init_interrupts(dqm);
- return pm_init(&dqm->packets, dqm);
+
+ if (dqm->dev->device_info->asic_family == CHIP_HAWAII)
+ return pm_init(&dqm->packets, dqm);
+ dqm->sched_running = true;
+
+ return 0;
}
static int stop_nocpsch(struct device_queue_manager *dqm)
{
- pm_uninit(&dqm->packets);
+ if (dqm->dev->device_info->asic_family == CHIP_HAWAII)
+ pm_uninit(&dqm->packets, false);
+ dqm->sched_running = false;
+
return 0;
}
+static void pre_reset(struct device_queue_manager *dqm)
+{
+ dqm_lock(dqm);
+ dqm->is_resetting = true;
+ dqm_unlock(dqm);
+}
+
static int allocate_sdma_queue(struct device_queue_manager *dqm,
struct queue *q)
{
@@ -1019,8 +1068,8 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
dqm->sdma_queue_count = 0;
dqm->xgmi_sdma_queue_count = 0;
dqm->active_runlist = false;
- dqm->sdma_bitmap = (1ULL << get_num_sdma_queues(dqm)) - 1;
- dqm->xgmi_sdma_bitmap = (1ULL << get_num_xgmi_sdma_queues(dqm)) - 1;
+ dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm));
+ dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm));
INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception);
@@ -1058,25 +1107,32 @@ static int start_cpsch(struct device_queue_manager *dqm)
dqm_lock(dqm);
/* clear hang status when driver try to start the hw scheduler */
dqm->is_hws_hang = false;
+ dqm->is_resetting = false;
+ dqm->sched_running = true;
execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
dqm_unlock(dqm);
return 0;
fail_allocate_vidmem:
fail_set_sched_resources:
- pm_uninit(&dqm->packets);
+ pm_uninit(&dqm->packets, false);
fail_packet_manager_init:
return retval;
}
static int stop_cpsch(struct device_queue_manager *dqm)
{
+ bool hanging;
+
dqm_lock(dqm);
- unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
+ if (!dqm->is_hws_hang)
+ unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
+ hanging = dqm->is_hws_hang || dqm->is_resetting;
+ dqm->sched_running = false;
dqm_unlock(dqm);
kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
- pm_uninit(&dqm->packets);
+ pm_uninit(&dqm->packets, hanging);
return 0;
}
@@ -1181,16 +1237,18 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
list_add(&q->list, &qpd->queues_list);
qpd->queue_count++;
+
+ if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
+ dqm->sdma_queue_count++;
+ else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
+ dqm->xgmi_sdma_queue_count++;
+
if (q->properties.is_active) {
dqm->queue_count++;
retval = execute_queues_cpsch(dqm,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
}
- if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
- dqm->sdma_queue_count++;
- else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
- dqm->xgmi_sdma_queue_count++;
/*
* Unconditionally increment this counter, regardless of the queue's
* type or whether the queue is active.
@@ -1259,9 +1317,10 @@ static int map_queues_cpsch(struct device_queue_manager *dqm)
{
int retval;
+ if (!dqm->sched_running)
+ return 0;
if (dqm->queue_count <= 0 || dqm->processes_count <= 0)
return 0;
-
if (dqm->active_runlist)
return 0;
@@ -1283,6 +1342,8 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
{
int retval = 0;
+ if (!dqm->sched_running)
+ return 0;
if (dqm->is_hws_hang)
return -EIO;
if (!dqm->active_runlist)
@@ -1305,8 +1366,17 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
/* should be timed out */
retval = amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
queue_preemption_timeout_ms);
- if (retval)
+ if (retval) {
+ pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n");
+ dqm->is_hws_hang = true;
+ /* It's possible we're detecting a HWS hang in the
+ * middle of a GPU reset. No need to schedule another
+ * reset in this case.
+ */
+ if (!dqm->is_resetting)
+ schedule_work(&dqm->hw_exception_work);
return retval;
+ }
pm_release_ib(&dqm->packets);
dqm->active_runlist = false;
@@ -1324,12 +1394,8 @@ static int execute_queues_cpsch(struct device_queue_manager *dqm,
if (dqm->is_hws_hang)
return -EIO;
retval = unmap_queues_cpsch(dqm, filter, filter_param);
- if (retval) {
- pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n");
- dqm->is_hws_hang = true;
- schedule_work(&dqm->hw_exception_work);
+ if (retval)
return retval;
- }
return map_queues_cpsch(dqm);
}
@@ -1548,7 +1614,7 @@ static int get_wave_state(struct device_queue_manager *dqm,
goto dqm_unlock;
}
- mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_COMPUTE];
+ mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_CP];
if (!mqd_mgr->get_wave_state) {
r = -EINVAL;
@@ -1676,7 +1742,8 @@ static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm)
struct kfd_dev *dev = dqm->dev;
struct kfd_mem_obj *mem_obj = &dqm->hiq_sdma_mqd;
uint32_t size = dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size *
- dev->device_info->num_sdma_engines *
+ (dev->device_info->num_sdma_engines +
+ dev->device_info->num_xgmi_sdma_engines) *
dev->device_info->num_sdma_queues_per_engine +
dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size;
@@ -1722,6 +1789,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
dqm->ops.initialize = initialize_cpsch;
dqm->ops.start = start_cpsch;
dqm->ops.stop = stop_cpsch;
+ dqm->ops.pre_reset = pre_reset;
dqm->ops.destroy_queue = destroy_queue_cpsch;
dqm->ops.update_queue = update_queue;
dqm->ops.register_process = register_process;
@@ -1740,6 +1808,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
/* initialize dqm for no cp scheduling */
dqm->ops.start = start_nocpsch;
dqm->ops.stop = stop_nocpsch;
+ dqm->ops.pre_reset = pre_reset;
dqm->ops.create_queue = create_queue_nocpsch;
dqm->ops.destroy_queue = destroy_queue_nocpsch;
dqm->ops.update_queue = update_queue;
@@ -1786,9 +1855,13 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
+ case CHIP_RENOIR:
+ case CHIP_ARCTURUS:
device_queue_manager_init_v9(&dqm->asic_ops);
break;
case CHIP_NAVI10:
+ case CHIP_NAVI12:
+ case CHIP_NAVI14:
device_queue_manager_init_v10_navi10(&dqm->asic_ops);
break;
default:
@@ -1813,7 +1886,8 @@ out_free:
return NULL;
}
-void deallocate_hiq_sdma_mqd(struct kfd_dev *dev, struct kfd_mem_obj *mqd)
+static void deallocate_hiq_sdma_mqd(struct kfd_dev *dev,
+ struct kfd_mem_obj *mqd)
{
WARN(!mqd, "No hiq sdma mqd trunk to free");
@@ -1881,6 +1955,12 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data)
int pipe, queue;
int r = 0;
+ if (!dqm->sched_running) {
+ seq_printf(m, " Device is stopped\n");
+
+ return 0;
+ }
+
r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->kgd,
KFD_CIK_HIQ_PIPE, KFD_CIK_HIQ_QUEUE,
&dump, &n_regs);
@@ -1915,7 +1995,8 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data)
}
}
- for (pipe = 0; pipe < get_num_sdma_engines(dqm); pipe++) {
+ for (pipe = 0; pipe < get_num_sdma_engines(dqm) +
+ get_num_xgmi_sdma_engines(dqm); pipe++) {
for (queue = 0;
queue < dqm->dev->device_info->num_sdma_queues_per_engine;
queue++) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index 90db2c9275f6..871d3b628d2d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -32,6 +32,8 @@
#include "kfd_mqd_manager.h"
+#define VMID_NUM 16
+
struct device_process_node {
struct qcm_process_device *qpd;
struct list_head list;
@@ -102,6 +104,7 @@ struct device_queue_manager_ops {
int (*initialize)(struct device_queue_manager *dqm);
int (*start)(struct device_queue_manager *dqm);
int (*stop)(struct device_queue_manager *dqm);
+ void (*pre_reset)(struct device_queue_manager *dqm);
void (*uninitialize)(struct device_queue_manager *dqm);
int (*create_kernel_queue)(struct device_queue_manager *dqm,
struct kernel_queue *kq,
@@ -185,9 +188,9 @@ struct device_queue_manager {
unsigned int *allocated_queues;
uint64_t sdma_bitmap;
uint64_t xgmi_sdma_bitmap;
- unsigned int vmid_bitmap;
+ /* the pasid mapping for each kfd vmid */
+ uint16_t vmid_pasid[VMID_NUM];
uint64_t pipelines_addr;
- struct kfd_mem_obj *pipeline_mem;
uint64_t fence_gpu_addr;
unsigned int *fence_addr;
struct kfd_mem_obj *fence_mem;
@@ -196,8 +199,10 @@ struct device_queue_manager {
/* hw exception */
bool is_hws_hang;
+ bool is_resetting;
struct work_struct hw_exception_work;
struct kfd_mem_obj hiq_sdma_mqd;
+ bool sched_running;
};
void device_queue_manager_init_cik(
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
index ebe79bf00145..8e0c00b9555e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
@@ -91,7 +91,7 @@ int kfd_doorbell_init(struct kfd_dev *kfd)
kfd->doorbell_base = kfd->shared_resources.doorbell_physical_address +
doorbell_start_offset;
- kfd->doorbell_id_offset = doorbell_start_offset / sizeof(u32);
+ kfd->doorbell_base_dw_offset = doorbell_start_offset / sizeof(u32);
kfd->doorbell_kernel_ptr = ioremap(kfd->doorbell_base,
kfd_doorbell_process_slice(kfd));
@@ -103,8 +103,8 @@ int kfd_doorbell_init(struct kfd_dev *kfd)
pr_debug("doorbell base == 0x%08lX\n",
(uintptr_t)kfd->doorbell_base);
- pr_debug("doorbell_id_offset == 0x%08lX\n",
- kfd->doorbell_id_offset);
+ pr_debug("doorbell_base_dw_offset == 0x%08lX\n",
+ kfd->doorbell_base_dw_offset);
pr_debug("doorbell_process_limit == 0x%08lX\n",
doorbell_process_limit);
@@ -185,7 +185,7 @@ void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
* Calculating the kernel doorbell offset using the first
* doorbell page.
*/
- *doorbell_off = kfd->doorbell_id_offset + inx;
+ *doorbell_off = kfd->doorbell_base_dw_offset + inx;
pr_debug("Get kernel queue doorbell\n"
" doorbell offset == 0x%08X\n"
@@ -225,17 +225,17 @@ void write_kernel_doorbell64(void __iomem *db, u64 value)
}
}
-unsigned int kfd_doorbell_id_to_offset(struct kfd_dev *kfd,
+unsigned int kfd_get_doorbell_dw_offset_in_bar(struct kfd_dev *kfd,
struct kfd_process *process,
unsigned int doorbell_id)
{
/*
- * doorbell_id_offset accounts for doorbells taken by KGD.
+ * doorbell_base_dw_offset accounts for doorbells taken by KGD.
* index * kfd_doorbell_process_slice/sizeof(u32) adjusts to
* the process's doorbells. The offset returned is in dword
* units regardless of the ASIC-dependent doorbell size.
*/
- return kfd->doorbell_id_offset +
+ return kfd->doorbell_base_dw_offset +
process->doorbell_index
* kfd_doorbell_process_slice(kfd) / sizeof(u32) +
doorbell_id * kfd->device_info->doorbell_size / sizeof(u32);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index d674d4b3340f..1f8365575b12 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -346,7 +346,6 @@ int kfd_event_create(struct file *devkfd, struct kfd_process *p,
ret = create_signal_event(devkfd, p, ev);
if (!ret) {
*event_page_offset = KFD_MMAP_TYPE_EVENTS;
- *event_page_offset <<= PAGE_SHIFT;
*event_slot_index = ev->event_id;
}
break;
@@ -852,8 +851,8 @@ static void lookup_events_by_type_and_signal(struct kfd_process *p,
if (type == KFD_EVENT_TYPE_MEMORY) {
dev_warn(kfd_device,
- "Sending SIGSEGV to HSA Process with PID %d ",
- p->lead_thread->pid);
+ "Sending SIGSEGV to process %d (pasid 0x%x)",
+ p->lead_thread->pid, p->pasid);
send_sig(SIGSEGV, p->lead_thread, 0);
}
@@ -861,13 +860,13 @@ static void lookup_events_by_type_and_signal(struct kfd_process *p,
if (send_signal) {
if (send_sigterm) {
dev_warn(kfd_device,
- "Sending SIGTERM to HSA Process with PID %d ",
- p->lead_thread->pid);
+ "Sending SIGTERM to process %d (pasid 0x%x)",
+ p->lead_thread->pid, p->pasid);
send_sig(SIGTERM, p->lead_thread, 0);
} else {
dev_err(kfd_device,
- "HSA Process (PID %d) got unhandled exception",
- p->lead_thread->pid);
+ "Process %d (pasid 0x%x) got unhandled exception",
+ p->lead_thread->pid, p->pasid);
}
}
}
@@ -936,7 +935,8 @@ void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid,
/* Workaround on Raven to not kill the process when memory is freed
* before IOMMU is able to finish processing all the excessive PPRs
*/
- if (dev->device_info->asic_family != CHIP_RAVEN) {
+ if (dev->device_info->asic_family != CHIP_RAVEN &&
+ dev->device_info->asic_family != CHIP_RENOIR) {
mutex_lock(&p->event_mutex);
/* Lookup events by type and signal them */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
index 60521366dd31..bb77b8890e77 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
@@ -369,8 +369,13 @@ int kfd_init_apertures(struct kfd_process *process)
/*Iterating over all devices*/
while (kfd_topology_enum_kfd_devices(id, &dev) == 0) {
- if (!dev) {
- id++; /* Skip non GPU devices */
+ if (!dev || kfd_devcgroup_check_permission(dev)) {
+ /* Skip non GPU devices and devices to which the
+ * current process have no access to. Access can be
+ * limited by placing the process in a specific
+ * cgroup hierarchy
+ */
+ id++;
continue;
}
@@ -405,7 +410,11 @@ int kfd_init_apertures(struct kfd_process *process)
case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
+ case CHIP_RENOIR:
+ case CHIP_ARCTURUS:
case CHIP_NAVI10:
+ case CHIP_NAVI12:
+ case CHIP_NAVI14:
kfd_init_apertures_v9(pdd, id);
break;
default:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
index a85904ad0d5f..e05d75ecda21 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
@@ -54,8 +54,7 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev,
memcpy(patched_ihre, ih_ring_entry,
dev->device_info->ih_ring_entry_size);
- pasid = dev->kfd2kgd->get_atc_vmid_pasid_mapping_pasid(
- dev->kgd, vmid);
+ pasid = dev->dqm->vmid_pasid[vmid];
/* Patch the pasid field */
patched_ihre[3] = cpu_to_le32((le32_to_cpu(patched_ihre[3])
@@ -80,6 +79,7 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev,
source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG ||
source_id == SOC15_INTSRC_CP_BAD_OPCODE ||
client_id == SOC15_IH_CLIENTID_VMC ||
+ client_id == SOC15_IH_CLIENTID_VMC1 ||
client_id == SOC15_IH_CLIENTID_UTCL2;
}
@@ -104,6 +104,7 @@ static void event_interrupt_wq_v9(struct kfd_dev *dev,
else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE)
kfd_signal_hw_exception_event(pasid);
else if (client_id == SOC15_IH_CLIENTID_VMC ||
+ client_id == SOC15_IH_CLIENTID_VMC1 ||
client_id == SOC15_IH_CLIENTID_UTCL2) {
struct kfd_vm_fault_info info = {0};
uint16_t ring_id = SOC15_RING_ID_FROM_IH_ENTRY(ih_ring_entry);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
index c56ac47cd318..bc47f6a44456 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
@@ -62,6 +62,11 @@ int kfd_interrupt_init(struct kfd_dev *kfd)
}
kfd->ih_wq = alloc_workqueue("KFD IH", WQ_HIGHPRI, 1);
+ if (unlikely(!kfd->ih_wq)) {
+ kfifo_free(&kfd->ih_fifo);
+ dev_err(kfd_chardev(), "Failed to allocate KFD IH workqueue\n");
+ return -ENOMEM;
+ }
spin_lock_init(&kfd->interrupt_lock);
INIT_WORK(&kfd->interrupt_work, interrupt_wq);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
index 5f35df23fb18..8d871514671e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
@@ -62,9 +62,6 @@ int kfd_iommu_device_init(struct kfd_dev *kfd)
struct amd_iommu_device_info iommu_info;
unsigned int pasid_limit;
int err;
- struct kfd_topology_device *top_dev;
-
- top_dev = kfd_topology_device_by_id(kfd->id);
if (!kfd->device_info->needs_iommu_device)
return 0;
@@ -160,7 +157,7 @@ static void iommu_pasid_shutdown_callback(struct pci_dev *pdev, int pasid)
if (!p)
return;
- pr_debug("Unbinding process %d from IOMMU\n", pasid);
+ pr_debug("Unbinding process 0x%x from IOMMU\n", pasid);
mutex_lock(kfd_get_dbgmgr_mutex());
@@ -194,7 +191,7 @@ static int iommu_invalid_ppr_cb(struct pci_dev *pdev, int pasid,
struct kfd_dev *dev;
dev_warn_ratelimited(kfd_device,
- "Invalid PPR device %x:%x.%x pasid %d address 0x%lX flags 0x%X",
+ "Invalid PPR device %x:%x.%x pasid 0x%x address 0x%lX flags 0x%X",
PCI_BUS_NUM(pdev->devfn),
PCI_SLOT(pdev->devfn),
PCI_FUNC(pdev->devfn),
@@ -235,7 +232,7 @@ static int kfd_bind_processes_to_device(struct kfd_dev *kfd)
err = amd_iommu_bind_pasid(kfd->pdev, p->pasid,
p->lead_thread);
if (err < 0) {
- pr_err("Unexpected pasid %d binding failure\n",
+ pr_err("Unexpected pasid 0x%x binding failure\n",
p->pasid);
mutex_unlock(&p->mutex);
break;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index 29c0bd2d7a5c..bae706462f96 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -34,7 +34,10 @@
#define PM4_COUNT_ZERO (((1 << 15) - 1) << 16)
-static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
+/* Initialize a kernel queue, including allocations of GART memory
+ * needed for the queue.
+ */
+static bool kq_initialize(struct kernel_queue *kq, struct kfd_dev *dev,
enum kfd_queue_type type, unsigned int queue_size)
{
struct queue_properties prop;
@@ -87,9 +90,17 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
kq->pq_kernel_addr = kq->pq->cpu_ptr;
kq->pq_gpu_addr = kq->pq->gpu_addr;
- retval = kq->ops_asic_specific.initialize(kq, dev, type, queue_size);
- if (!retval)
- goto err_eop_allocate_vidmem;
+ /* For CIK family asics, kq->eop_mem is not needed */
+ if (dev->device_info->asic_family > CHIP_MULLINS) {
+ retval = kfd_gtt_sa_allocate(dev, PAGE_SIZE, &kq->eop_mem);
+ if (retval != 0)
+ goto err_eop_allocate_vidmem;
+
+ kq->eop_gpu_addr = kq->eop_mem->gpu_addr;
+ kq->eop_kernel_addr = kq->eop_mem->cpu_ptr;
+
+ memset(kq->eop_kernel_addr, 0, PAGE_SIZE);
+ }
retval = kfd_gtt_sa_allocate(dev, sizeof(*kq->rptr_kernel),
&kq->rptr_mem);
@@ -183,9 +194,10 @@ err_get_kernel_doorbell:
}
-static void uninitialize(struct kernel_queue *kq)
+/* Uninitialize a kernel queue and free all its memory usages. */
+static void kq_uninitialize(struct kernel_queue *kq, bool hanging)
{
- if (kq->queue->properties.type == KFD_QUEUE_TYPE_HIQ)
+ if (kq->queue->properties.type == KFD_QUEUE_TYPE_HIQ && !hanging)
kq->mqd_mgr->destroy_mqd(kq->mqd_mgr,
kq->queue->mqd,
KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
@@ -200,14 +212,19 @@ static void uninitialize(struct kernel_queue *kq)
kfd_gtt_sa_free(kq->dev, kq->rptr_mem);
kfd_gtt_sa_free(kq->dev, kq->wptr_mem);
- kq->ops_asic_specific.uninitialize(kq);
+
+ /* For CIK family asics, kq->eop_mem is Null, kfd_gtt_sa_free()
+ * is able to handle NULL properly.
+ */
+ kfd_gtt_sa_free(kq->dev, kq->eop_mem);
+
kfd_gtt_sa_free(kq->dev, kq->pq);
kfd_release_kernel_doorbell(kq->dev,
kq->queue->properties.doorbell_ptr);
uninit_queue(kq->queue);
}
-static int acquire_packet_buffer(struct kernel_queue *kq,
+int kq_acquire_packet_buffer(struct kernel_queue *kq,
size_t packet_size_in_dwords, unsigned int **buffer_ptr)
{
size_t available_size;
@@ -268,7 +285,7 @@ err_no_space:
return -ENOMEM;
}
-static void submit_packet(struct kernel_queue *kq)
+void kq_submit_packet(struct kernel_queue *kq)
{
#ifdef DEBUG
int i;
@@ -280,11 +297,18 @@ static void submit_packet(struct kernel_queue *kq)
}
pr_debug("\n");
#endif
-
- kq->ops_asic_specific.submit_packet(kq);
+ if (kq->dev->device_info->doorbell_size == 8) {
+ *kq->wptr64_kernel = kq->pending_wptr64;
+ write_kernel_doorbell64(kq->queue->properties.doorbell_ptr,
+ kq->pending_wptr64);
+ } else {
+ *kq->wptr_kernel = kq->pending_wptr;
+ write_kernel_doorbell(kq->queue->properties.doorbell_ptr,
+ kq->pending_wptr);
+ }
}
-static void rollback_packet(struct kernel_queue *kq)
+void kq_rollback_packet(struct kernel_queue *kq)
{
if (kq->dev->device_info->doorbell_size == 8) {
kq->pending_wptr64 = *kq->wptr64_kernel;
@@ -304,56 +328,18 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
if (!kq)
return NULL;
- kq->ops.initialize = initialize;
- kq->ops.uninitialize = uninitialize;
- kq->ops.acquire_packet_buffer = acquire_packet_buffer;
- kq->ops.submit_packet = submit_packet;
- kq->ops.rollback_packet = rollback_packet;
-
- switch (dev->device_info->asic_family) {
- case CHIP_CARRIZO:
- case CHIP_TONGA:
- case CHIP_FIJI:
- case CHIP_POLARIS10:
- case CHIP_POLARIS11:
- case CHIP_POLARIS12:
- case CHIP_VEGAM:
- kernel_queue_init_vi(&kq->ops_asic_specific);
- break;
-
- case CHIP_KAVERI:
- case CHIP_HAWAII:
- kernel_queue_init_cik(&kq->ops_asic_specific);
- break;
-
- case CHIP_VEGA10:
- case CHIP_VEGA12:
- case CHIP_VEGA20:
- case CHIP_RAVEN:
- kernel_queue_init_v9(&kq->ops_asic_specific);
- break;
- case CHIP_NAVI10:
- kernel_queue_init_v10(&kq->ops_asic_specific);
- break;
- default:
- WARN(1, "Unexpected ASIC family %u",
- dev->device_info->asic_family);
- goto out_free;
- }
-
- if (kq->ops.initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE))
+ if (kq_initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE))
return kq;
pr_err("Failed to init kernel queue\n");
-out_free:
kfree(kq);
return NULL;
}
-void kernel_queue_uninit(struct kernel_queue *kq)
+void kernel_queue_uninit(struct kernel_queue *kq, bool hanging)
{
- kq->ops.uninitialize(kq);
+ kq_uninitialize(kq, hanging);
kfree(kq);
}
@@ -373,7 +359,7 @@ static __attribute__((unused)) void test_kq(struct kfd_dev *dev)
return;
}
- retval = kq->ops.acquire_packet_buffer(kq, 5, &buffer);
+ retval = kq_acquire_packet_buffer(kq, 5, &buffer);
if (unlikely(retval != 0)) {
pr_err(" Failed to acquire packet buffer\n");
pr_err("Kernel queue test failed\n");
@@ -381,7 +367,7 @@ static __attribute__((unused)) void test_kq(struct kfd_dev *dev)
}
for (i = 0; i < 5; i++)
buffer[i] = kq->nop_packet;
- kq->ops.submit_packet(kq);
+ kq_submit_packet(kq);
pr_err("Ending kernel queue test\n");
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
index 365fc674fea4..f4cfe9f1871c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
@@ -29,45 +29,28 @@
#include "kfd_priv.h"
/**
- * struct kernel_queue_ops
- *
- * @initialize: Initialize a kernel queue, including allocations of GART memory
- * needed for the queue.
- *
- * @uninitialize: Uninitialize a kernel queue and free all its memory usages.
- *
- * @acquire_packet_buffer: Returns a pointer to the location in the kernel
+ * kq_acquire_packet_buffer: Returns a pointer to the location in the kernel
* queue ring buffer where the calling function can write its packet. It is
* Guaranteed that there is enough space for that packet. It also updates the
* pending write pointer to that location so subsequent calls to
* acquire_packet_buffer will get a correct write pointer
*
- * @submit_packet: Update the write pointer and doorbell of a kernel queue.
- *
- * @sync_with_hw: Wait until the write pointer and the read pointer of a kernel
- * queue are equal, which means the CP has read all the submitted packets.
+ * kq_submit_packet: Update the write pointer and doorbell of a kernel queue.
*
- * @rollback_packet: This routine is called if we failed to build an acquired
+ * kq_rollback_packet: This routine is called if we failed to build an acquired
* packet for some reason. It just overwrites the pending wptr with the current
* one
*
*/
-struct kernel_queue_ops {
- bool (*initialize)(struct kernel_queue *kq, struct kfd_dev *dev,
- enum kfd_queue_type type, unsigned int queue_size);
- void (*uninitialize)(struct kernel_queue *kq);
- int (*acquire_packet_buffer)(struct kernel_queue *kq,
- size_t packet_size_in_dwords,
- unsigned int **buffer_ptr);
- void (*submit_packet)(struct kernel_queue *kq);
- void (*rollback_packet)(struct kernel_queue *kq);
-};
+int kq_acquire_packet_buffer(struct kernel_queue *kq,
+ size_t packet_size_in_dwords,
+ unsigned int **buffer_ptr);
+void kq_submit_packet(struct kernel_queue *kq);
+void kq_rollback_packet(struct kernel_queue *kq);
-struct kernel_queue {
- struct kernel_queue_ops ops;
- struct kernel_queue_ops ops_asic_specific;
+struct kernel_queue {
/* data */
struct kfd_dev *dev;
struct mqd_manager *mqd_mgr;
@@ -99,9 +82,4 @@ struct kernel_queue {
struct list_head list;
};
-void kernel_queue_init_cik(struct kernel_queue_ops *ops);
-void kernel_queue_init_vi(struct kernel_queue_ops *ops);
-void kernel_queue_init_v9(struct kernel_queue_ops *ops);
-void kernel_queue_init_v10(struct kernel_queue_ops *ops);
-
#endif /* KFD_KERNEL_QUEUE_H_ */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v10.c
deleted file mode 100644
index aed32ab7102e..000000000000
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v10.c
+++ /dev/null
@@ -1,348 +0,0 @@
-/*
- * Copyright 2018 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "kfd_kernel_queue.h"
-#include "kfd_device_queue_manager.h"
-#include "kfd_pm4_headers_ai.h"
-#include "kfd_pm4_opcodes.h"
-#include "gc/gc_10_1_0_sh_mask.h"
-
-static bool initialize_v10(struct kernel_queue *kq, struct kfd_dev *dev,
- enum kfd_queue_type type, unsigned int queue_size);
-static void uninitialize_v10(struct kernel_queue *kq);
-static void submit_packet_v10(struct kernel_queue *kq);
-
-void kernel_queue_init_v10(struct kernel_queue_ops *ops)
-{
- ops->initialize = initialize_v10;
- ops->uninitialize = uninitialize_v10;
- ops->submit_packet = submit_packet_v10;
-}
-
-static bool initialize_v10(struct kernel_queue *kq, struct kfd_dev *dev,
- enum kfd_queue_type type, unsigned int queue_size)
-{
- int retval;
-
- retval = kfd_gtt_sa_allocate(dev, PAGE_SIZE, &kq->eop_mem);
- if (retval != 0)
- return false;
-
- kq->eop_gpu_addr = kq->eop_mem->gpu_addr;
- kq->eop_kernel_addr = kq->eop_mem->cpu_ptr;
-
- memset(kq->eop_kernel_addr, 0, PAGE_SIZE);
-
- return true;
-}
-
-static void uninitialize_v10(struct kernel_queue *kq)
-{
- kfd_gtt_sa_free(kq->dev, kq->eop_mem);
-}
-
-static void submit_packet_v10(struct kernel_queue *kq)
-{
- *kq->wptr64_kernel = kq->pending_wptr64;
- write_kernel_doorbell64(kq->queue->properties.doorbell_ptr,
- kq->pending_wptr64);
-}
-
-static int pm_map_process_v10(struct packet_manager *pm,
- uint32_t *buffer, struct qcm_process_device *qpd)
-{
- struct pm4_mes_map_process *packet;
- uint64_t vm_page_table_base_addr = qpd->page_table_base;
-
- packet = (struct pm4_mes_map_process *)buffer;
- memset(buffer, 0, sizeof(struct pm4_mes_map_process));
-
- packet->header.u32All = pm_build_pm4_header(IT_MAP_PROCESS,
- sizeof(struct pm4_mes_map_process));
- packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
- packet->bitfields2.process_quantum = 1;
- packet->bitfields2.pasid = qpd->pqm->process->pasid;
- packet->bitfields14.gds_size = qpd->gds_size;
- packet->bitfields14.num_gws = qpd->num_gws;
- packet->bitfields14.num_oac = qpd->num_oac;
- packet->bitfields14.sdma_enable = 1;
-
- packet->bitfields14.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count;
-
- packet->sh_mem_config = qpd->sh_mem_config;
- packet->sh_mem_bases = qpd->sh_mem_bases;
- if (qpd->tba_addr) {
- packet->sq_shader_tba_lo = lower_32_bits(qpd->tba_addr >> 8);
- packet->sq_shader_tba_hi = (1 << SQ_SHADER_TBA_HI__TRAP_EN__SHIFT) |
- upper_32_bits(qpd->tba_addr >> 8);
- packet->sq_shader_tma_lo = lower_32_bits(qpd->tma_addr >> 8);
- packet->sq_shader_tma_hi = upper_32_bits(qpd->tma_addr >> 8);
- }
-
- packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area);
- packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area);
-
- packet->vm_context_page_table_base_addr_lo32 =
- lower_32_bits(vm_page_table_base_addr);
- packet->vm_context_page_table_base_addr_hi32 =
- upper_32_bits(vm_page_table_base_addr);
-
- return 0;
-}
-
-static int pm_runlist_v10(struct packet_manager *pm, uint32_t *buffer,
- uint64_t ib, size_t ib_size_in_dwords, bool chain)
-{
- struct pm4_mes_runlist *packet;
-
- int concurrent_proc_cnt = 0;
- struct kfd_dev *kfd = pm->dqm->dev;
-
- /* Determine the number of processes to map together to HW:
- * it can not exceed the number of VMIDs available to the
- * scheduler, and it is determined by the smaller of the number
- * of processes in the runlist and kfd module parameter
- * hws_max_conc_proc.
- * Note: the arbitration between the number of VMIDs and
- * hws_max_conc_proc has been done in
- * kgd2kfd_device_init().
- */
- concurrent_proc_cnt = min(pm->dqm->processes_count,
- kfd->max_proc_per_quantum);
-
-
- packet = (struct pm4_mes_runlist *)buffer;
-
- memset(buffer, 0, sizeof(struct pm4_mes_runlist));
- packet->header.u32All = pm_build_pm4_header(IT_RUN_LIST,
- sizeof(struct pm4_mes_runlist));
-
- packet->bitfields4.ib_size = ib_size_in_dwords;
- packet->bitfields4.chain = chain ? 1 : 0;
- packet->bitfields4.offload_polling = 0;
- packet->bitfields4.valid = 1;
- packet->bitfields4.process_cnt = concurrent_proc_cnt;
- packet->ordinal2 = lower_32_bits(ib);
- packet->ib_base_hi = upper_32_bits(ib);
-
- return 0;
-}
-
-static int pm_map_queues_v10(struct packet_manager *pm, uint32_t *buffer,
- struct queue *q, bool is_static)
-{
- struct pm4_mes_map_queues *packet;
- bool use_static = is_static;
-
- packet = (struct pm4_mes_map_queues *)buffer;
- memset(buffer, 0, sizeof(struct pm4_mes_map_queues));
-
- packet->header.u32All = pm_build_pm4_header(IT_MAP_QUEUES,
- sizeof(struct pm4_mes_map_queues));
- packet->bitfields2.num_queues = 1;
- packet->bitfields2.queue_sel =
- queue_sel__mes_map_queues__map_to_hws_determined_queue_slots_vi;
-
- packet->bitfields2.engine_sel =
- engine_sel__mes_map_queues__compute_vi;
- packet->bitfields2.queue_type =
- queue_type__mes_map_queues__normal_compute_vi;
-
- switch (q->properties.type) {
- case KFD_QUEUE_TYPE_COMPUTE:
- if (use_static)
- packet->bitfields2.queue_type =
- queue_type__mes_map_queues__normal_latency_static_queue_vi;
- break;
- case KFD_QUEUE_TYPE_DIQ:
- packet->bitfields2.queue_type =
- queue_type__mes_map_queues__debug_interface_queue_vi;
- break;
- case KFD_QUEUE_TYPE_SDMA:
- case KFD_QUEUE_TYPE_SDMA_XGMI:
- packet->bitfields2.engine_sel = q->properties.sdma_engine_id +
- engine_sel__mes_map_queues__sdma0_vi;
- use_static = false; /* no static queues under SDMA */
- break;
- default:
- WARN(1, "queue type %d\n", q->properties.type);
- return -EINVAL;
- }
- packet->bitfields3.doorbell_offset =
- q->properties.doorbell_off;
-
- packet->mqd_addr_lo =
- lower_32_bits(q->gart_mqd_addr);
-
- packet->mqd_addr_hi =
- upper_32_bits(q->gart_mqd_addr);
-
- packet->wptr_addr_lo =
- lower_32_bits((uint64_t)q->properties.write_ptr);
-
- packet->wptr_addr_hi =
- upper_32_bits((uint64_t)q->properties.write_ptr);
-
- return 0;
-}
-
-static int pm_unmap_queues_v10(struct packet_manager *pm, uint32_t *buffer,
- enum kfd_queue_type type,
- enum kfd_unmap_queues_filter filter,
- uint32_t filter_param, bool reset,
- unsigned int sdma_engine)
-{
- struct pm4_mes_unmap_queues *packet;
-
- packet = (struct pm4_mes_unmap_queues *)buffer;
- memset(buffer, 0, sizeof(struct pm4_mes_unmap_queues));
-
- packet->header.u32All = pm_build_pm4_header(IT_UNMAP_QUEUES,
- sizeof(struct pm4_mes_unmap_queues));
- switch (type) {
- case KFD_QUEUE_TYPE_COMPUTE:
- case KFD_QUEUE_TYPE_DIQ:
- packet->bitfields2.engine_sel =
- engine_sel__mes_unmap_queues__compute;
- break;
- case KFD_QUEUE_TYPE_SDMA:
- case KFD_QUEUE_TYPE_SDMA_XGMI:
- packet->bitfields2.engine_sel =
- engine_sel__mes_unmap_queues__sdma0 + sdma_engine;
- break;
- default:
- WARN(1, "queue type %d\n", type);
- break;
- }
-
- if (reset)
- packet->bitfields2.action =
- action__mes_unmap_queues__reset_queues;
- else
- packet->bitfields2.action =
- action__mes_unmap_queues__preempt_queues;
-
- switch (filter) {
- case KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE:
- packet->bitfields2.queue_sel =
- queue_sel__mes_unmap_queues__perform_request_on_specified_queues;
- packet->bitfields2.num_queues = 1;
- packet->bitfields3b.doorbell_offset0 = filter_param;
- break;
- case KFD_UNMAP_QUEUES_FILTER_BY_PASID:
- packet->bitfields2.queue_sel =
- queue_sel__mes_unmap_queues__perform_request_on_pasid_queues;
- packet->bitfields3a.pasid = filter_param;
- break;
- case KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES:
- packet->bitfields2.queue_sel =
- queue_sel__mes_unmap_queues__unmap_all_queues;
- break;
- case KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES:
- /* in this case, we do not preempt static queues */
- packet->bitfields2.queue_sel =
- queue_sel__mes_unmap_queues__unmap_all_non_static_queues;
- break;
- default:
- WARN(1, "filter %d\n", filter);
- break;
- }
-
- return 0;
-
-}
-
-static int pm_query_status_v10(struct packet_manager *pm, uint32_t *buffer,
- uint64_t fence_address, uint32_t fence_value)
-{
- struct pm4_mes_query_status *packet;
-
- packet = (struct pm4_mes_query_status *)buffer;
- memset(buffer, 0, sizeof(struct pm4_mes_query_status));
-
-
- packet->header.u32All = pm_build_pm4_header(IT_QUERY_STATUS,
- sizeof(struct pm4_mes_query_status));
-
- packet->bitfields2.context_id = 0;
- packet->bitfields2.interrupt_sel =
- interrupt_sel__mes_query_status__completion_status;
- packet->bitfields2.command =
- command__mes_query_status__fence_only_after_write_ack;
-
- packet->addr_hi = upper_32_bits((uint64_t)fence_address);
- packet->addr_lo = lower_32_bits((uint64_t)fence_address);
- packet->data_hi = upper_32_bits((uint64_t)fence_value);
- packet->data_lo = lower_32_bits((uint64_t)fence_value);
-
- return 0;
-}
-
-
-static int pm_release_mem_v10(uint64_t gpu_addr, uint32_t *buffer)
-{
- struct pm4_mec_release_mem *packet;
-
- WARN_ON(!buffer);
-
- packet = (struct pm4_mec_release_mem *)buffer;
- memset(buffer, 0, sizeof(struct pm4_mec_release_mem));
-
- packet->header.u32All = pm_build_pm4_header(IT_RELEASE_MEM,
- sizeof(struct pm4_mec_release_mem));
-
- packet->bitfields2.event_type = CACHE_FLUSH_AND_INV_TS_EVENT;
- packet->bitfields2.event_index = event_index__mec_release_mem__end_of_pipe;
- packet->bitfields2.tcl1_action_ena = 1;
- packet->bitfields2.tc_action_ena = 1;
- packet->bitfields2.cache_policy = cache_policy__mec_release_mem__lru;
-
- packet->bitfields3.data_sel = data_sel__mec_release_mem__send_32_bit_low;
- packet->bitfields3.int_sel =
- int_sel__mec_release_mem__send_interrupt_after_write_confirm;
-
- packet->bitfields4.address_lo_32b = (gpu_addr & 0xffffffff) >> 2;
- packet->address_hi = upper_32_bits(gpu_addr);
-
- packet->data_lo = 0;
-
- return sizeof(struct pm4_mec_release_mem) / sizeof(unsigned int);
-}
-
-const struct packet_manager_funcs kfd_v10_pm_funcs = {
- .map_process = pm_map_process_v10,
- .runlist = pm_runlist_v10,
- .set_resources = pm_set_resources_vi,
- .map_queues = pm_map_queues_v10,
- .unmap_queues = pm_unmap_queues_v10,
- .query_status = pm_query_status_v10,
- .release_mem = pm_release_mem_v10,
- .map_process_size = sizeof(struct pm4_mes_map_process),
- .runlist_size = sizeof(struct pm4_mes_runlist),
- .set_resources_size = sizeof(struct pm4_mes_set_resources),
- .map_queues_size = sizeof(struct pm4_mes_map_queues),
- .unmap_queues_size = sizeof(struct pm4_mes_unmap_queues),
- .query_status_size = sizeof(struct pm4_mes_query_status),
- .release_mem_size = sizeof(struct pm4_mec_release_mem)
-};
-
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
index 986ff52d5750..f4b7f7e6c40e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
@@ -82,7 +82,7 @@ static void kfd_exit(void)
kfd_chardev_exit();
}
-int kgd2kfd_init()
+int kgd2kfd_init(void)
{
return kfd_init();
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
index d6cf391da591..88813dad731f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
@@ -98,8 +98,8 @@ void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
uint32_t *se_mask)
{
struct kfd_cu_info cu_info;
- uint32_t cu_per_sh[4] = {0};
- int i, se, cu = 0;
+ uint32_t cu_per_se[KFD_MAX_NUM_SE] = {0};
+ int i, se, sh, cu = 0;
amdgpu_amdkfd_get_cu_info(mm->dev->kgd, &cu_info);
@@ -107,8 +107,8 @@ void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
cu_mask_count = cu_info.cu_active_number;
for (se = 0; se < cu_info.num_shader_engines; se++)
- for (i = 0; i < 4; i++)
- cu_per_sh[se] += hweight32(cu_info.cu_bitmap[se][i]);
+ for (sh = 0; sh < cu_info.num_shader_arrays_per_engine; sh++)
+ cu_per_se[se] += hweight32(cu_info.cu_bitmap[se % 4][sh + (se / 4)]);
/* Symmetrically map cu_mask to all SEs:
* cu_mask[0] bit0 -> se_mask[0] bit0;
@@ -128,6 +128,6 @@ void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
se = 0;
cu++;
}
- } while (cu >= cu_per_sh[se] && cu < 32);
+ } while (cu >= cu_per_se[se] && cu < 32);
}
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
index 550b61e81015..fbdb16418847 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
@@ -26,6 +26,8 @@
#include "kfd_priv.h"
+#define KFD_MAX_NUM_SE 8
+
/**
* struct mqd_manager
*
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
index 28876aceb14b..19f0fe547c57 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
@@ -374,7 +374,6 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
switch (type) {
case KFD_MQD_TYPE_CP:
- case KFD_MQD_TYPE_COMPUTE:
mqd->allocate_mqd = allocate_mqd;
mqd->init_mqd = init_mqd;
mqd->free_mqd = free_mqd;
@@ -401,7 +400,7 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
#endif
break;
case KFD_MQD_TYPE_DIQ:
- mqd->allocate_mqd = allocate_hiq_mqd;
+ mqd->allocate_mqd = allocate_mqd;
mqd->init_mqd = init_mqd_hiq;
mqd->free_mqd = free_mqd;
mqd->load_mqd = load_mqd;
@@ -442,7 +441,7 @@ struct mqd_manager *mqd_manager_init_cik_hawaii(enum KFD_MQD_TYPE type,
mqd = mqd_manager_init_cik(type, dev);
if (!mqd)
return NULL;
- if ((type == KFD_MQD_TYPE_CP) || (type == KFD_MQD_TYPE_COMPUTE))
+ if (type == KFD_MQD_TYPE_CP)
mqd->update_mqd = update_mqd_hawaii;
return mqd;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
index 9cd3eb2d90bd..d1d68a51bfb8 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
@@ -66,38 +66,22 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
m->compute_static_thread_mgmt_se3);
}
+static void set_priority(struct v10_compute_mqd *m, struct queue_properties *q)
+{
+ m->cp_hqd_pipe_priority = pipe_priority_map[q->priority];
+ m->cp_hqd_queue_priority = q->priority;
+}
+
static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd,
struct queue_properties *q)
{
- int retval;
- struct kfd_mem_obj *mqd_mem_obj = NULL;
-
- /* From V9, for CWSR, the control stack is located on the next page
- * boundary after the mqd, we will use the gtt allocation function
- * instead of sub-allocation function.
- */
- if (kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) {
- mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_NOIO);
- if (!mqd_mem_obj)
- return NULL;
- retval = amdgpu_amdkfd_alloc_gtt_mem(kfd->kgd,
- ALIGN(q->ctl_stack_size, PAGE_SIZE) +
- ALIGN(sizeof(struct v10_compute_mqd), PAGE_SIZE),
- &(mqd_mem_obj->gtt_mem),
- &(mqd_mem_obj->gpu_addr),
- (void *)&(mqd_mem_obj->cpu_ptr), true);
- } else {
- retval = kfd_gtt_sa_allocate(kfd, sizeof(struct v10_compute_mqd),
- &mqd_mem_obj);
- }
+ struct kfd_mem_obj *mqd_mem_obj;
- if (retval) {
- kfree(mqd_mem_obj);
+ if (kfd_gtt_sa_allocate(kfd, sizeof(struct v10_compute_mqd),
+ &mqd_mem_obj))
return NULL;
- }
return mqd_mem_obj;
-
}
static void init_mqd(struct mqd_manager *mm, void **mqd,
@@ -131,9 +115,6 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
1 << CP_HQD_QUANTUM__QUANTUM_SCALE__SHIFT |
10 << CP_HQD_QUANTUM__QUANTUM_DURATION__SHIFT;
- m->cp_hqd_pipe_priority = 1;
- m->cp_hqd_queue_priority = 15;
-
if (q->format == KFD_QUEUE_FORMAT_AQL) {
m->cp_hqd_aql_control =
1 << CP_HQD_AQL_CONTROL__CONTROL0__SHIFT;
@@ -172,6 +153,14 @@ static int load_mqd(struct mqd_manager *mm, void *mqd,
return r;
}
+static int hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd,
+ uint32_t pipe_id, uint32_t queue_id,
+ struct queue_properties *p, struct mm_struct *mms)
+{
+ return mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->kgd, mqd, pipe_id,
+ queue_id, p->doorbell_off);
+}
+
static void update_mqd(struct mqd_manager *mm, void *mqd,
struct queue_properties *q)
{
@@ -230,11 +219,9 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
m->cp_hqd_ctx_save_control = 0;
update_cu_mask(mm, mqd, q);
+ set_priority(m, q);
- q->is_active = (q->queue_size > 0 &&
- q->queue_address != 0 &&
- q->queue_percent > 0 &&
- !q->is_evicted);
+ q->is_active = QUEUE_IS_ACTIVE(*q);
}
static int destroy_mqd(struct mqd_manager *mm, void *mqd,
@@ -250,14 +237,7 @@ static int destroy_mqd(struct mqd_manager *mm, void *mqd,
static void free_mqd(struct mqd_manager *mm, void *mqd,
struct kfd_mem_obj *mqd_mem_obj)
{
- struct kfd_dev *kfd = mm->dev;
-
- if (mqd_mem_obj->gtt_mem) {
- amdgpu_amdkfd_free_gtt_mem(kfd->kgd, mqd_mem_obj->gtt_mem);
- kfree(mqd_mem_obj);
- } else {
- kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
- }
+ kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
}
static bool is_occupied(struct mqd_manager *mm, void *mqd,
@@ -276,18 +256,22 @@ static int get_wave_state(struct mqd_manager *mm, void *mqd,
{
struct v10_compute_mqd *m;
- /* Control stack is located one page after MQD. */
- void *mqd_ctl_stack = (void *)((uintptr_t)mqd + PAGE_SIZE);
-
m = get_mqd(mqd);
+ /* Control stack is written backwards, while workgroup context data
+ * is written forwards. Both starts from m->cp_hqd_cntl_stack_size.
+ * Current position is at m->cp_hqd_cntl_stack_offset and
+ * m->cp_hqd_wg_state_offset, respectively.
+ */
*ctl_stack_used_size = m->cp_hqd_cntl_stack_size -
m->cp_hqd_cntl_stack_offset;
*save_area_used_size = m->cp_hqd_wg_state_offset -
m->cp_hqd_cntl_stack_size;
- if (copy_to_user(ctl_stack, mqd_ctl_stack, m->cp_hqd_cntl_stack_size))
- return -EFAULT;
+ /* Control stack is not copied to user mode for GFXv10 because
+ * it's part of the context save area that is already
+ * accessible to user mode
+ */
return 0;
}
@@ -306,18 +290,6 @@ static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT;
}
-static void update_mqd_hiq(struct mqd_manager *mm, void *mqd,
- struct queue_properties *q)
-{
- struct v10_compute_mqd *m;
-
- update_mqd(mm, mqd, q);
-
- /* TODO: what's the point? update_mqd already does this. */
- m = get_mqd(mqd);
- m->cp_hqd_vmid = q->vmid;
-}
-
static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *q)
@@ -369,11 +341,7 @@ static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
m->sdma_queue_id = q->sdma_queue_id;
m->sdmax_rlcx_dummy_reg = SDMA_RLC_DUMMY_DEFAULT;
-
- q->is_active = (q->queue_size > 0 &&
- q->queue_address != 0 &&
- q->queue_percent > 0 &&
- !q->is_evicted);
+ q->is_active = QUEUE_IS_ACTIVE(*q);
}
/*
@@ -421,7 +389,7 @@ struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type,
if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
return NULL;
- mqd = kzalloc(sizeof(*mqd), GFP_NOIO);
+ mqd = kzalloc(sizeof(*mqd), GFP_KERNEL);
if (!mqd)
return NULL;
@@ -429,7 +397,6 @@ struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type,
switch (type) {
case KFD_MQD_TYPE_CP:
- case KFD_MQD_TYPE_COMPUTE:
pr_debug("%s@%i\n", __func__, __LINE__);
mqd->allocate_mqd = allocate_mqd;
mqd->init_mqd = init_mqd;
@@ -450,8 +417,8 @@ struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type,
mqd->allocate_mqd = allocate_hiq_mqd;
mqd->init_mqd = init_mqd_hiq;
mqd->free_mqd = free_mqd_hiq_sdma;
- mqd->load_mqd = load_mqd;
- mqd->update_mqd = update_mqd_hiq;
+ mqd->load_mqd = hiq_load_mqd_kiq;
+ mqd->update_mqd = update_mqd;
mqd->destroy_mqd = destroy_mqd;
mqd->is_occupied = is_occupied;
mqd->mqd_size = sizeof(struct v10_compute_mqd);
@@ -461,11 +428,11 @@ struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type,
pr_debug("%s@%i\n", __func__, __LINE__);
break;
case KFD_MQD_TYPE_DIQ:
- mqd->allocate_mqd = allocate_hiq_mqd;
+ mqd->allocate_mqd = allocate_mqd;
mqd->init_mqd = init_mqd_hiq;
mqd->free_mqd = free_mqd;
mqd->load_mqd = load_mqd;
- mqd->update_mqd = update_mqd_hiq;
+ mqd->update_mqd = update_mqd;
mqd->destroy_mqd = destroy_mqd;
mqd->is_occupied = is_occupied;
mqd->mqd_size = sizeof(struct v10_compute_mqd);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index 0c58f91b3ff3..436b7f518979 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -46,7 +46,7 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
struct queue_properties *q)
{
struct v9_mqd *m;
- uint32_t se_mask[4] = {0}; /* 4 is the max # of SEs */
+ uint32_t se_mask[KFD_MAX_NUM_SE] = {0};
if (q->cu_mask_count == 0)
return;
@@ -59,12 +59,20 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
m->compute_static_thread_mgmt_se1 = se_mask[1];
m->compute_static_thread_mgmt_se2 = se_mask[2];
m->compute_static_thread_mgmt_se3 = se_mask[3];
+ m->compute_static_thread_mgmt_se4 = se_mask[4];
+ m->compute_static_thread_mgmt_se5 = se_mask[5];
+ m->compute_static_thread_mgmt_se6 = se_mask[6];
+ m->compute_static_thread_mgmt_se7 = se_mask[7];
- pr_debug("update cu mask to %#x %#x %#x %#x\n",
+ pr_debug("update cu mask to %#x %#x %#x %#x %#x %#x %#x %#x\n",
m->compute_static_thread_mgmt_se0,
m->compute_static_thread_mgmt_se1,
m->compute_static_thread_mgmt_se2,
- m->compute_static_thread_mgmt_se3);
+ m->compute_static_thread_mgmt_se3,
+ m->compute_static_thread_mgmt_se4,
+ m->compute_static_thread_mgmt_se5,
+ m->compute_static_thread_mgmt_se6,
+ m->compute_static_thread_mgmt_se7);
}
static void set_priority(struct v9_mqd *m, struct queue_properties *q)
@@ -84,7 +92,7 @@ static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd,
* instead of sub-allocation function.
*/
if (kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) {
- mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_NOIO);
+ mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
if (!mqd_mem_obj)
return NULL;
retval = amdgpu_amdkfd_alloc_gtt_mem(kfd->kgd,
@@ -125,6 +133,10 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF;
m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF;
m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF;
+ m->compute_static_thread_mgmt_se4 = 0xFFFFFFFF;
+ m->compute_static_thread_mgmt_se5 = 0xFFFFFFFF;
+ m->compute_static_thread_mgmt_se6 = 0xFFFFFFFF;
+ m->compute_static_thread_mgmt_se7 = 0xFFFFFFFF;
m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
0x53 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
@@ -179,6 +191,14 @@ static int load_mqd(struct mqd_manager *mm, void *mqd,
wptr_shift, 0, mms);
}
+static int hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd,
+ uint32_t pipe_id, uint32_t queue_id,
+ struct queue_properties *p, struct mm_struct *mms)
+{
+ return mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->kgd, mqd, pipe_id,
+ queue_id, p->doorbell_off);
+}
+
static void update_mqd(struct mqd_manager *mm, void *mqd,
struct queue_properties *q)
{
@@ -290,7 +310,8 @@ static int get_wave_state(struct mqd_manager *mm, void *mqd,
*ctl_stack_used_size = m->cp_hqd_cntl_stack_size -
m->cp_hqd_cntl_stack_offset;
- *save_area_used_size = m->cp_hqd_wg_state_offset;
+ *save_area_used_size = m->cp_hqd_wg_state_offset -
+ m->cp_hqd_cntl_stack_size;
if (copy_to_user(ctl_stack, mqd_ctl_stack, m->cp_hqd_cntl_stack_size))
return -EFAULT;
@@ -312,18 +333,6 @@ static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT;
}
-static void update_mqd_hiq(struct mqd_manager *mm, void *mqd,
- struct queue_properties *q)
-{
- struct v9_mqd *m;
-
- update_mqd(mm, mqd, q);
-
- /* TODO: what's the point? update_mqd already does this. */
- m = get_mqd(mqd);
- m->cp_hqd_vmid = q->vmid;
-}
-
static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *q)
@@ -431,7 +440,6 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
switch (type) {
case KFD_MQD_TYPE_CP:
- case KFD_MQD_TYPE_COMPUTE:
mqd->allocate_mqd = allocate_mqd;
mqd->init_mqd = init_mqd;
mqd->free_mqd = free_mqd;
@@ -449,8 +457,8 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
mqd->allocate_mqd = allocate_hiq_mqd;
mqd->init_mqd = init_mqd_hiq;
mqd->free_mqd = free_mqd_hiq_sdma;
- mqd->load_mqd = load_mqd;
- mqd->update_mqd = update_mqd_hiq;
+ mqd->load_mqd = hiq_load_mqd_kiq;
+ mqd->update_mqd = update_mqd;
mqd->destroy_mqd = destroy_mqd;
mqd->is_occupied = is_occupied;
mqd->mqd_size = sizeof(struct v9_mqd);
@@ -459,11 +467,11 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
#endif
break;
case KFD_MQD_TYPE_DIQ:
- mqd->allocate_mqd = allocate_hiq_mqd;
+ mqd->allocate_mqd = allocate_mqd;
mqd->init_mqd = init_mqd_hiq;
mqd->free_mqd = free_mqd;
mqd->load_mqd = load_mqd;
- mqd->update_mqd = update_mqd_hiq;
+ mqd->update_mqd = update_mqd;
mqd->destroy_mqd = destroy_mqd;
mqd->is_occupied = is_occupied;
mqd->mqd_size = sizeof(struct v9_mqd);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
index 7d144f56f421..a5e8ff1e5945 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
@@ -312,11 +312,7 @@ static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
static void update_mqd_hiq(struct mqd_manager *mm, void *mqd,
struct queue_properties *q)
{
- struct vi_mqd *m;
__update_mqd(mm, mqd, q, MTYPE_UC, 0);
-
- m = get_mqd(mqd);
- m->cp_hqd_vmid = q->vmid;
}
static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
@@ -425,7 +421,6 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
switch (type) {
case KFD_MQD_TYPE_CP:
- case KFD_MQD_TYPE_COMPUTE:
mqd->allocate_mqd = allocate_mqd;
mqd->init_mqd = init_mqd;
mqd->free_mqd = free_mqd;
@@ -453,7 +448,7 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
#endif
break;
case KFD_MQD_TYPE_DIQ:
- mqd->allocate_mqd = allocate_hiq_mqd;
+ mqd->allocate_mqd = allocate_mqd;
mqd->init_mqd = init_mqd_hiq;
mqd->free_mqd = free_mqd;
mqd->load_mqd = load_mqd;
@@ -494,7 +489,7 @@ struct mqd_manager *mqd_manager_init_vi_tonga(enum KFD_MQD_TYPE type,
mqd = mqd_manager_init_vi(type, dev);
if (!mqd)
return NULL;
- if ((type == KFD_MQD_TYPE_CP) || (type == KFD_MQD_TYPE_COMPUTE))
+ if (type == KFD_MQD_TYPE_CP)
mqd->update_mqd = update_mqd_tonga;
return mqd;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index ccf6b2310316..dc406e6dee23 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -239,10 +239,12 @@ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
- pm->pmf = &kfd_v9_pm_funcs;
- break;
+ case CHIP_RENOIR:
+ case CHIP_ARCTURUS:
case CHIP_NAVI10:
- pm->pmf = &kfd_v10_pm_funcs;
+ case CHIP_NAVI12:
+ case CHIP_NAVI14:
+ pm->pmf = &kfd_v9_pm_funcs;
break;
default:
WARN(1, "Unexpected ASIC family %u",
@@ -262,10 +264,10 @@ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
return 0;
}
-void pm_uninit(struct packet_manager *pm)
+void pm_uninit(struct packet_manager *pm, bool hanging)
{
mutex_destroy(&pm->lock);
- kernel_queue_uninit(pm->priv_queue);
+ kernel_queue_uninit(pm->priv_queue, hanging);
}
int pm_send_set_resources(struct packet_manager *pm,
@@ -276,7 +278,7 @@ int pm_send_set_resources(struct packet_manager *pm,
size = pm->pmf->set_resources_size;
mutex_lock(&pm->lock);
- pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
+ kq_acquire_packet_buffer(pm->priv_queue,
size / sizeof(uint32_t),
(unsigned int **)&buffer);
if (!buffer) {
@@ -287,9 +289,9 @@ int pm_send_set_resources(struct packet_manager *pm,
retval = pm->pmf->set_resources(pm, buffer, res);
if (!retval)
- pm->priv_queue->ops.submit_packet(pm->priv_queue);
+ kq_submit_packet(pm->priv_queue);
else
- pm->priv_queue->ops.rollback_packet(pm->priv_queue);
+ kq_rollback_packet(pm->priv_queue);
out:
mutex_unlock(&pm->lock);
@@ -314,7 +316,7 @@ int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
packet_size_dwords = pm->pmf->runlist_size / sizeof(uint32_t);
mutex_lock(&pm->lock);
- retval = pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
+ retval = kq_acquire_packet_buffer(pm->priv_queue,
packet_size_dwords, &rl_buffer);
if (retval)
goto fail_acquire_packet_buffer;
@@ -324,14 +326,14 @@ int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
if (retval)
goto fail_create_runlist;
- pm->priv_queue->ops.submit_packet(pm->priv_queue);
+ kq_submit_packet(pm->priv_queue);
mutex_unlock(&pm->lock);
return retval;
fail_create_runlist:
- pm->priv_queue->ops.rollback_packet(pm->priv_queue);
+ kq_rollback_packet(pm->priv_queue);
fail_acquire_packet_buffer:
mutex_unlock(&pm->lock);
fail_create_runlist_ib:
@@ -350,7 +352,7 @@ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
size = pm->pmf->query_status_size;
mutex_lock(&pm->lock);
- pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
+ kq_acquire_packet_buffer(pm->priv_queue,
size / sizeof(uint32_t), (unsigned int **)&buffer);
if (!buffer) {
pr_err("Failed to allocate buffer on kernel queue\n");
@@ -360,9 +362,9 @@ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
retval = pm->pmf->query_status(pm, buffer, fence_address, fence_value);
if (!retval)
- pm->priv_queue->ops.submit_packet(pm->priv_queue);
+ kq_submit_packet(pm->priv_queue);
else
- pm->priv_queue->ops.rollback_packet(pm->priv_queue);
+ kq_rollback_packet(pm->priv_queue);
out:
mutex_unlock(&pm->lock);
@@ -379,7 +381,7 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
size = pm->pmf->unmap_queues_size;
mutex_lock(&pm->lock);
- pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
+ kq_acquire_packet_buffer(pm->priv_queue,
size / sizeof(uint32_t), (unsigned int **)&buffer);
if (!buffer) {
pr_err("Failed to allocate buffer on kernel queue\n");
@@ -390,9 +392,9 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
retval = pm->pmf->unmap_queues(pm, buffer, type, filter, filter_param,
reset, sdma_engine);
if (!retval)
- pm->priv_queue->ops.submit_packet(pm->priv_queue);
+ kq_submit_packet(pm->priv_queue);
else
- pm->priv_queue->ops.rollback_packet(pm->priv_queue);
+ kq_rollback_packet(pm->priv_queue);
out:
mutex_unlock(&pm->lock);
@@ -437,7 +439,7 @@ int pm_debugfs_hang_hws(struct packet_manager *pm)
size = pm->pmf->query_status_size;
mutex_lock(&pm->lock);
- pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
+ kq_acquire_packet_buffer(pm->priv_queue,
size / sizeof(uint32_t), (unsigned int **)&buffer);
if (!buffer) {
pr_err("Failed to allocate buffer on kernel queue\n");
@@ -445,7 +447,7 @@ int pm_debugfs_hang_hws(struct packet_manager *pm)
goto out;
}
memset(buffer, 0x55, size);
- pm->priv_queue->ops.submit_packet(pm->priv_queue);
+ kq_submit_packet(pm->priv_queue);
pr_info("Submitting %x %x %x %x %x %x %x to HIQ to hang the HWS.",
buffer[0], buffer[1], buffer[2], buffer[3],
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
index 2d5ddf199bd0..2de01009f1b6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
@@ -25,47 +25,7 @@
#include "kfd_device_queue_manager.h"
#include "kfd_pm4_headers_ai.h"
#include "kfd_pm4_opcodes.h"
-
-static bool initialize_v9(struct kernel_queue *kq, struct kfd_dev *dev,
- enum kfd_queue_type type, unsigned int queue_size);
-static void uninitialize_v9(struct kernel_queue *kq);
-static void submit_packet_v9(struct kernel_queue *kq);
-
-void kernel_queue_init_v9(struct kernel_queue_ops *ops)
-{
- ops->initialize = initialize_v9;
- ops->uninitialize = uninitialize_v9;
- ops->submit_packet = submit_packet_v9;
-}
-
-static bool initialize_v9(struct kernel_queue *kq, struct kfd_dev *dev,
- enum kfd_queue_type type, unsigned int queue_size)
-{
- int retval;
-
- retval = kfd_gtt_sa_allocate(dev, PAGE_SIZE, &kq->eop_mem);
- if (retval)
- return false;
-
- kq->eop_gpu_addr = kq->eop_mem->gpu_addr;
- kq->eop_kernel_addr = kq->eop_mem->cpu_ptr;
-
- memset(kq->eop_kernel_addr, 0, PAGE_SIZE);
-
- return true;
-}
-
-static void uninitialize_v9(struct kernel_queue *kq)
-{
- kfd_gtt_sa_free(kq->dev, kq->eop_mem);
-}
-
-static void submit_packet_v9(struct kernel_queue *kq)
-{
- *kq->wptr64_kernel = kq->pending_wptr64;
- write_kernel_doorbell64(kq->queue->properties.doorbell_ptr,
- kq->pending_wptr64);
-}
+#include "gc/gc_10_1_0_sh_mask.h"
static int pm_map_process_v9(struct packet_manager *pm,
uint32_t *buffer, struct qcm_process_device *qpd)
@@ -81,7 +41,8 @@ static int pm_map_process_v9(struct packet_manager *pm,
packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
packet->bitfields2.process_quantum = 1;
packet->bitfields2.pasid = qpd->pqm->process->pasid;
- packet->bitfields14.gds_size = qpd->gds_size;
+ packet->bitfields14.gds_size = qpd->gds_size & 0x3F;
+ packet->bitfields14.gds_size_hi = (qpd->gds_size >> 6) & 0xF;
packet->bitfields14.num_gws = qpd->num_gws;
packet->bitfields14.num_oac = qpd->num_oac;
packet->bitfields14.sdma_enable = 1;
@@ -89,10 +50,17 @@ static int pm_map_process_v9(struct packet_manager *pm,
packet->sh_mem_config = qpd->sh_mem_config;
packet->sh_mem_bases = qpd->sh_mem_bases;
- packet->sq_shader_tba_lo = lower_32_bits(qpd->tba_addr >> 8);
- packet->sq_shader_tba_hi = upper_32_bits(qpd->tba_addr >> 8);
- packet->sq_shader_tma_lo = lower_32_bits(qpd->tma_addr >> 8);
- packet->sq_shader_tma_hi = upper_32_bits(qpd->tma_addr >> 8);
+ if (qpd->tba_addr) {
+ packet->sq_shader_tba_lo = lower_32_bits(qpd->tba_addr >> 8);
+ /* On GFX9, unlike GFX10, bit TRAP_EN of SQ_SHADER_TBA_HI is
+ * not defined, so setting it won't do any harm.
+ */
+ packet->sq_shader_tba_hi = upper_32_bits(qpd->tba_addr >> 8)
+ | 1 << SQ_SHADER_TBA_HI__TRAP_EN__SHIFT;
+
+ packet->sq_shader_tma_lo = lower_32_bits(qpd->tma_addr >> 8);
+ packet->sq_shader_tma_hi = upper_32_bits(qpd->tma_addr >> 8);
+ }
packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area);
packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area);
@@ -143,6 +111,34 @@ static int pm_runlist_v9(struct packet_manager *pm, uint32_t *buffer,
return 0;
}
+static int pm_set_resources_v9(struct packet_manager *pm, uint32_t *buffer,
+ struct scheduling_resources *res)
+{
+ struct pm4_mes_set_resources *packet;
+
+ packet = (struct pm4_mes_set_resources *)buffer;
+ memset(buffer, 0, sizeof(struct pm4_mes_set_resources));
+
+ packet->header.u32All = pm_build_pm4_header(IT_SET_RESOURCES,
+ sizeof(struct pm4_mes_set_resources));
+
+ packet->bitfields2.queue_type =
+ queue_type__mes_set_resources__hsa_interface_queue_hiq;
+ packet->bitfields2.vmid_mask = res->vmid_mask;
+ packet->bitfields2.unmap_latency = KFD_UNMAP_LATENCY_MS / 100;
+ packet->bitfields7.oac_mask = res->oac_mask;
+ packet->bitfields8.gds_heap_base = res->gds_heap_base;
+ packet->bitfields8.gds_heap_size = res->gds_heap_size;
+
+ packet->gws_mask_lo = lower_32_bits(res->gws_mask);
+ packet->gws_mask_hi = upper_32_bits(res->gws_mask);
+
+ packet->queue_mask_lo = lower_32_bits(res->queue_mask);
+ packet->queue_mask_hi = upper_32_bits(res->queue_mask);
+
+ return 0;
+}
+
static int pm_map_queues_v9(struct packet_manager *pm, uint32_t *buffer,
struct queue *q, bool is_static)
{
@@ -161,6 +157,8 @@ static int pm_map_queues_v9(struct packet_manager *pm, uint32_t *buffer,
packet->bitfields2.engine_sel =
engine_sel__mes_map_queues__compute_vi;
packet->bitfields2.gws_control_queue = q->gws ? 1 : 0;
+ packet->bitfields2.extended_engine_sel =
+ extended_engine_sel__mes_map_queues__legacy_engine_sel;
packet->bitfields2.queue_type =
queue_type__mes_map_queues__normal_compute_vi;
@@ -176,9 +174,15 @@ static int pm_map_queues_v9(struct packet_manager *pm, uint32_t *buffer,
break;
case KFD_QUEUE_TYPE_SDMA:
case KFD_QUEUE_TYPE_SDMA_XGMI:
- packet->bitfields2.engine_sel = q->properties.sdma_engine_id +
- engine_sel__mes_map_queues__sdma0_vi;
use_static = false; /* no static queues under SDMA */
+ if (q->properties.sdma_engine_id < 2)
+ packet->bitfields2.engine_sel = q->properties.sdma_engine_id +
+ engine_sel__mes_map_queues__sdma0_vi;
+ else {
+ packet->bitfields2.extended_engine_sel =
+ extended_engine_sel__mes_map_queues__sdma0_to_7_sel;
+ packet->bitfields2.engine_sel = q->properties.sdma_engine_id;
+ }
break;
default:
WARN(1, "queue type %d", q->properties.type);
@@ -218,13 +222,23 @@ static int pm_unmap_queues_v9(struct packet_manager *pm, uint32_t *buffer,
switch (type) {
case KFD_QUEUE_TYPE_COMPUTE:
case KFD_QUEUE_TYPE_DIQ:
+ packet->bitfields2.extended_engine_sel =
+ extended_engine_sel__mes_unmap_queues__legacy_engine_sel;
packet->bitfields2.engine_sel =
engine_sel__mes_unmap_queues__compute;
break;
case KFD_QUEUE_TYPE_SDMA:
case KFD_QUEUE_TYPE_SDMA_XGMI:
- packet->bitfields2.engine_sel =
- engine_sel__mes_unmap_queues__sdma0 + sdma_engine;
+ if (sdma_engine < 2) {
+ packet->bitfields2.extended_engine_sel =
+ extended_engine_sel__mes_unmap_queues__legacy_engine_sel;
+ packet->bitfields2.engine_sel =
+ engine_sel__mes_unmap_queues__sdma0 + sdma_engine;
+ } else {
+ packet->bitfields2.extended_engine_sel =
+ extended_engine_sel__mes_unmap_queues__sdma0_to_7_sel;
+ packet->bitfields2.engine_sel = sdma_engine;
+ }
break;
default:
WARN(1, "queue type %d", type);
@@ -294,48 +308,19 @@ static int pm_query_status_v9(struct packet_manager *pm, uint32_t *buffer,
return 0;
}
-
-static int pm_release_mem_v9(uint64_t gpu_addr, uint32_t *buffer)
-{
- struct pm4_mec_release_mem *packet;
-
- packet = (struct pm4_mec_release_mem *)buffer;
- memset(buffer, 0, sizeof(struct pm4_mec_release_mem));
-
- packet->header.u32All = pm_build_pm4_header(IT_RELEASE_MEM,
- sizeof(struct pm4_mec_release_mem));
-
- packet->bitfields2.event_type = CACHE_FLUSH_AND_INV_TS_EVENT;
- packet->bitfields2.event_index = event_index__mec_release_mem__end_of_pipe;
- packet->bitfields2.tcl1_action_ena = 1;
- packet->bitfields2.tc_action_ena = 1;
- packet->bitfields2.cache_policy = cache_policy__mec_release_mem__lru;
-
- packet->bitfields3.data_sel = data_sel__mec_release_mem__send_32_bit_low;
- packet->bitfields3.int_sel =
- int_sel__mec_release_mem__send_interrupt_after_write_confirm;
-
- packet->bitfields4.address_lo_32b = (gpu_addr & 0xffffffff) >> 2;
- packet->address_hi = upper_32_bits(gpu_addr);
-
- packet->data_lo = 0;
-
- return 0;
-}
-
const struct packet_manager_funcs kfd_v9_pm_funcs = {
.map_process = pm_map_process_v9,
.runlist = pm_runlist_v9,
- .set_resources = pm_set_resources_vi,
+ .set_resources = pm_set_resources_v9,
.map_queues = pm_map_queues_v9,
.unmap_queues = pm_unmap_queues_v9,
.query_status = pm_query_status_v9,
- .release_mem = pm_release_mem_v9,
+ .release_mem = NULL,
.map_process_size = sizeof(struct pm4_mes_map_process),
.runlist_size = sizeof(struct pm4_mes_runlist),
.set_resources_size = sizeof(struct pm4_mes_set_resources),
.map_queues_size = sizeof(struct pm4_mes_map_queues),
.unmap_queues_size = sizeof(struct pm4_mes_unmap_queues),
.query_status_size = sizeof(struct pm4_mes_query_status),
- .release_mem_size = sizeof(struct pm4_mec_release_mem)
+ .release_mem_size = 0,
};
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c
index 2adaf40027eb..bed4d0ccb6b1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c
@@ -26,47 +26,6 @@
#include "kfd_pm4_headers_vi.h"
#include "kfd_pm4_opcodes.h"
-static bool initialize_vi(struct kernel_queue *kq, struct kfd_dev *dev,
- enum kfd_queue_type type, unsigned int queue_size);
-static void uninitialize_vi(struct kernel_queue *kq);
-static void submit_packet_vi(struct kernel_queue *kq);
-
-void kernel_queue_init_vi(struct kernel_queue_ops *ops)
-{
- ops->initialize = initialize_vi;
- ops->uninitialize = uninitialize_vi;
- ops->submit_packet = submit_packet_vi;
-}
-
-static bool initialize_vi(struct kernel_queue *kq, struct kfd_dev *dev,
- enum kfd_queue_type type, unsigned int queue_size)
-{
- int retval;
-
- retval = kfd_gtt_sa_allocate(dev, PAGE_SIZE, &kq->eop_mem);
- if (retval != 0)
- return false;
-
- kq->eop_gpu_addr = kq->eop_mem->gpu_addr;
- kq->eop_kernel_addr = kq->eop_mem->cpu_ptr;
-
- memset(kq->eop_kernel_addr, 0, PAGE_SIZE);
-
- return true;
-}
-
-static void uninitialize_vi(struct kernel_queue *kq)
-{
- kfd_gtt_sa_free(kq->dev, kq->eop_mem);
-}
-
-static void submit_packet_vi(struct kernel_queue *kq)
-{
- *kq->wptr_kernel = kq->pending_wptr;
- write_kernel_doorbell(kq->queue->properties.doorbell_ptr,
- kq->pending_wptr);
-}
-
unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size)
{
union PM4_MES_TYPE_3_HEADER header;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h
index e3e21404cfa0..4d7add843746 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h
@@ -83,10 +83,10 @@ struct pm4_mes_set_resources {
union {
struct {
- uint32_t gds_heap_base:6;
- uint32_t reserved3:5;
- uint32_t gds_heap_size:6;
- uint32_t reserved4:15;
+ uint32_t gds_heap_base:10;
+ uint32_t reserved3:1;
+ uint32_t gds_heap_size:10;
+ uint32_t reserved4:11;
} bitfields8;
uint32_t ordinal8;
};
@@ -179,7 +179,7 @@ struct pm4_mes_map_process {
uint32_t num_gws:7;
uint32_t sdma_enable:1;
uint32_t num_oac:4;
- uint32_t reserved8:4;
+ uint32_t gds_size_hi:4;
uint32_t gds_size:6;
uint32_t num_queues:10;
} bitfields14;
@@ -260,6 +260,10 @@ enum mes_map_queues_engine_sel_enum {
engine_sel__mes_map_queues__sdma1_vi = 3
};
+enum mes_map_queues_extended_engine_sel_enum {
+ extended_engine_sel__mes_map_queues__legacy_engine_sel = 0,
+ extended_engine_sel__mes_map_queues__sdma0_to_7_sel = 1
+};
struct pm4_mes_map_queues {
union {
@@ -269,7 +273,8 @@ struct pm4_mes_map_queues {
union {
struct {
- uint32_t reserved1:4;
+ uint32_t reserved1:2;
+ enum mes_map_queues_extended_engine_sel_enum extended_engine_sel:2;
enum mes_map_queues_queue_sel_enum queue_sel:2;
uint32_t reserved5:6;
uint32_t gws_control_queue:1;
@@ -382,6 +387,11 @@ enum mes_unmap_queues_engine_sel_enum {
engine_sel__mes_unmap_queues__sdmal = 3
};
+enum mes_unmap_queues_extended_engine_sel_enum {
+ extended_engine_sel__mes_unmap_queues__legacy_engine_sel = 0,
+ extended_engine_sel__mes_unmap_queues__sdma0_to_7_sel = 1
+};
+
struct pm4_mes_unmap_queues {
union {
union PM4_MES_TYPE_3_HEADER header; /* header */
@@ -391,7 +401,7 @@ struct pm4_mes_unmap_queues {
union {
struct {
enum mes_unmap_queues_action_enum action:2;
- uint32_t reserved1:2;
+ enum mes_unmap_queues_extended_engine_sel_enum extended_engine_sel:2;
enum mes_unmap_queues_queue_sel_enum queue_sel:2;
uint32_t reserved2:20;
enum mes_unmap_queues_engine_sel_enum engine_sel:3;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 3933fb6a371e..6af1b5881f43 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -36,6 +36,10 @@
#include <linux/seq_file.h>
#include <linux/kref.h>
#include <linux/sysfs.h>
+#include <linux/device_cgroup.h>
+#include <drm/drm_file.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_device.h>
#include <kgd_kfd_interface.h>
#include "amd_shared.h"
@@ -55,24 +59,21 @@
* NOTE: struct vm_area_struct.vm_pgoff uses offset in pages. Hence, these
* defines are w.r.t to PAGE_SIZE
*/
-#define KFD_MMAP_TYPE_SHIFT (62 - PAGE_SHIFT)
+#define KFD_MMAP_TYPE_SHIFT 62
#define KFD_MMAP_TYPE_MASK (0x3ULL << KFD_MMAP_TYPE_SHIFT)
#define KFD_MMAP_TYPE_DOORBELL (0x3ULL << KFD_MMAP_TYPE_SHIFT)
#define KFD_MMAP_TYPE_EVENTS (0x2ULL << KFD_MMAP_TYPE_SHIFT)
#define KFD_MMAP_TYPE_RESERVED_MEM (0x1ULL << KFD_MMAP_TYPE_SHIFT)
#define KFD_MMAP_TYPE_MMIO (0x0ULL << KFD_MMAP_TYPE_SHIFT)
-#define KFD_MMAP_GPU_ID_SHIFT (46 - PAGE_SHIFT)
+#define KFD_MMAP_GPU_ID_SHIFT 46
#define KFD_MMAP_GPU_ID_MASK (((1ULL << KFD_GPU_ID_HASH_WIDTH) - 1) \
<< KFD_MMAP_GPU_ID_SHIFT)
#define KFD_MMAP_GPU_ID(gpu_id) ((((uint64_t)gpu_id) << KFD_MMAP_GPU_ID_SHIFT)\
& KFD_MMAP_GPU_ID_MASK)
-#define KFD_MMAP_GPU_ID_GET(offset) ((offset & KFD_MMAP_GPU_ID_MASK) \
+#define KFD_MMAP_GET_GPU_ID(offset) ((offset & KFD_MMAP_GPU_ID_MASK) \
>> KFD_MMAP_GPU_ID_SHIFT)
-#define KFD_MMAP_OFFSET_VALUE_MASK (0x3FFFFFFFFFFFULL >> PAGE_SHIFT)
-#define KFD_MMAP_OFFSET_VALUE_GET(offset) (offset & KFD_MMAP_OFFSET_VALUE_MASK)
-
/*
* When working with cp scheduler we should assign the HIQ manually or via
* the amdgpu driver to a fixed hqd slot, here are the fixed HIQ hqd slot
@@ -179,10 +180,6 @@ enum cache_policy {
cache_policy_noncoherent
};
-#define KFD_IS_VI(chip) ((chip) >= CHIP_CARRIZO && (chip) <= CHIP_POLARIS11)
-#define KFD_IS_DGPU(chip) (((chip) >= CHIP_TONGA && \
- (chip) <= CHIP_NAVI10) || \
- (chip) == CHIP_HAWAII)
#define KFD_IS_SOC15(chip) ((chip) >= CHIP_VEGA10)
struct kfd_event_interrupt_class {
@@ -195,6 +192,7 @@ struct kfd_event_interrupt_class {
struct kfd_device_info {
enum amd_asic_type asic_family;
+ const char *asic_name;
const struct kfd_event_interrupt_class *event_interrupt_class;
unsigned int max_pasid_bits;
unsigned int max_no_of_hqd;
@@ -229,6 +227,7 @@ struct kfd_dev {
const struct kfd_device_info *device_info;
struct pci_dev *pdev;
+ struct drm_device *ddev;
unsigned int id; /* topology stub index */
@@ -236,9 +235,10 @@ struct kfd_dev {
* KFD. It is aligned for mapping
* into user mode
*/
- size_t doorbell_id_offset; /* Doorbell offset (from KFD doorbell
- * to HW doorbell, GFX reserved some
- * at the start)
+ size_t doorbell_base_dw_offset; /* Offset from the start of the PCI
+ * doorbell BAR to the first KFD
+ * doorbell in dwords. GFX reserves
+ * the segment before this offset.
*/
u32 __iomem *doorbell_kernel_ptr; /* This is a pointer for a doorbells
* page used by kernel queue
@@ -508,8 +508,7 @@ struct queue {
* Please read the kfd_mqd_manager.h description.
*/
enum KFD_MQD_TYPE {
- KFD_MQD_TYPE_COMPUTE = 0, /* for no cp scheduling */
- KFD_MQD_TYPE_HIQ, /* for hiq */
+ KFD_MQD_TYPE_HIQ = 0, /* for hiq */
KFD_MQD_TYPE_CP, /* for cp queues and diq */
KFD_MQD_TYPE_SDMA, /* for sdma queues */
KFD_MQD_TYPE_DIQ, /* for diq */
@@ -686,10 +685,7 @@ struct kfd_process {
/* We want to receive a notification when the mm_struct is destroyed */
struct mmu_notifier mmu_notifier;
- /* Use for delayed freeing of kfd_process structure */
- struct rcu_head rcu;
-
- unsigned int pasid;
+ uint16_t pasid;
unsigned int doorbell_index;
/*
@@ -819,7 +815,7 @@ void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr);
u32 read_kernel_doorbell(u32 __iomem *db);
void write_kernel_doorbell(void __iomem *db, u32 value);
void write_kernel_doorbell64(void __iomem *db, u64 value);
-unsigned int kfd_doorbell_id_to_offset(struct kfd_dev *kfd,
+unsigned int kfd_get_doorbell_dw_offset_in_bar(struct kfd_dev *kfd,
struct kfd_process *process,
unsigned int doorbell_id);
phys_addr_t kfd_get_process_doorbells(struct kfd_dev *dev,
@@ -887,7 +883,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev);
void device_queue_manager_uninit(struct device_queue_manager *dqm);
struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
enum kfd_queue_type type);
-void kernel_queue_uninit(struct kernel_queue *kq);
+void kernel_queue_uninit(struct kernel_queue *kq, bool hanging);
int kfd_process_vm_fault(struct device_queue_manager *dqm, unsigned int pasid);
/* Process Queue Manager */
@@ -905,7 +901,8 @@ int pqm_create_queue(struct process_queue_manager *pqm,
struct kfd_dev *dev,
struct file *f,
struct queue_properties *properties,
- unsigned int *qid);
+ unsigned int *qid,
+ uint32_t *p_doorbell_offset_in_process);
int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid);
int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
struct queue_properties *p);
@@ -973,10 +970,9 @@ struct packet_manager_funcs {
extern const struct packet_manager_funcs kfd_vi_pm_funcs;
extern const struct packet_manager_funcs kfd_v9_pm_funcs;
-extern const struct packet_manager_funcs kfd_v10_pm_funcs;
int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm);
-void pm_uninit(struct packet_manager *pm);
+void pm_uninit(struct packet_manager *pm, bool hanging);
int pm_send_set_resources(struct packet_manager *pm,
struct scheduling_resources *res);
int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues);
@@ -992,9 +988,6 @@ void pm_release_ib(struct packet_manager *pm);
/* Following PM funcs can be shared among VI and AI */
unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size);
-int pm_set_resources_vi(struct packet_manager *pm, uint32_t *buffer,
- struct scheduling_resources *res);
-
uint64_t kfd_get_number_elems(struct kfd_dev *kfd);
@@ -1042,6 +1035,21 @@ bool kfd_is_locked(void);
void kfd_inc_compute_active(struct kfd_dev *dev);
void kfd_dec_compute_active(struct kfd_dev *dev);
+/* Cgroup Support */
+/* Check with device cgroup if @kfd device is accessible */
+static inline int kfd_devcgroup_check_permission(struct kfd_dev *kfd)
+{
+#if defined(CONFIG_CGROUP_DEVICE)
+ struct drm_device *ddev = kfd->ddev;
+
+ return devcgroup_check_permission(DEVCG_DEV_CHAR, ddev->driver->major,
+ ddev->render->index,
+ DEVCG_ACC_WRITE | DEVCG_ACC_READ);
+#else
+ return 0;
+#endif
+}
+
/* Debugfs */
#if defined(CONFIG_DEBUG_FS)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 8f1076c0c88a..25b90f70aecd 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -32,6 +32,7 @@
#include <linux/mman.h>
#include <linux/file.h>
#include "amdgpu_amdkfd.h"
+#include "amdgpu.h"
struct mm_struct;
@@ -62,8 +63,8 @@ static struct workqueue_struct *kfd_restore_wq;
static struct kfd_process *find_process(const struct task_struct *thread);
static void kfd_process_ref_release(struct kref *ref);
-static struct kfd_process *create_process(const struct task_struct *thread,
- struct file *filep);
+static struct kfd_process *create_process(const struct task_struct *thread);
+static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep);
static void evict_process_worker(struct work_struct *work);
static void restore_process_worker(struct work_struct *work);
@@ -289,7 +290,15 @@ struct kfd_process *kfd_create_process(struct file *filep)
if (process) {
pr_debug("Process already found\n");
} else {
- process = create_process(thread, filep);
+ process = create_process(thread);
+ if (IS_ERR(process))
+ goto out;
+
+ ret = kfd_process_init_cwsr_apu(process, filep);
+ if (ret) {
+ process = ERR_PTR(ret);
+ goto out;
+ }
if (!procfs.kobj)
goto out;
@@ -316,6 +325,8 @@ struct kfd_process *kfd_create_process(struct file *filep)
(int)process->lead_thread->pid);
}
out:
+ if (!IS_ERR(process))
+ kref_get(&process->ref);
mutex_unlock(&kfd_processes_mutex);
return process;
@@ -408,7 +419,7 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
list_for_each_entry_safe(pdd, temp, &p->per_device_data,
per_device_list) {
- pr_debug("Releasing pdd (topology id %d) for process (pasid %d)\n",
+ pr_debug("Releasing pdd (topology id %d) for process (pasid 0x%x)\n",
pdd->dev->id, p->pasid);
if (pdd->drm_file) {
@@ -478,11 +489,9 @@ static void kfd_process_ref_release(struct kref *ref)
queue_work(kfd_process_wq, &p->release_work);
}
-static void kfd_process_destroy_delayed(struct rcu_head *rcu)
+static void kfd_process_free_notifier(struct mmu_notifier *mn)
{
- struct kfd_process *p = container_of(rcu, struct kfd_process, rcu);
-
- kfd_unref_process(p);
+ kfd_unref_process(container_of(mn, struct kfd_process, mmu_notifier));
}
static void kfd_process_notifier_release(struct mmu_notifier *mn,
@@ -534,12 +543,12 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
mutex_unlock(&p->mutex);
- mmu_notifier_unregister_no_release(&p->mmu_notifier, mm);
- mmu_notifier_call_srcu(&p->rcu, &kfd_process_destroy_delayed);
+ mmu_notifier_put(&p->mmu_notifier);
}
static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
.release = kfd_process_notifier_release,
+ .free_notifier = kfd_process_free_notifier,
};
static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
@@ -554,8 +563,7 @@ static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
if (!dev->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base)
continue;
- offset = (KFD_MMAP_TYPE_RESERVED_MEM | KFD_MMAP_GPU_ID(dev->id))
- << PAGE_SHIFT;
+ offset = KFD_MMAP_TYPE_RESERVED_MEM | KFD_MMAP_GPU_ID(dev->id);
qpd->tba_addr = (int64_t)vm_mmap(filep, 0,
KFD_CWSR_TBA_TMA_SIZE, PROT_READ | PROT_EXEC,
MAP_SHARED, offset);
@@ -609,81 +617,69 @@ static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd)
return 0;
}
-static struct kfd_process *create_process(const struct task_struct *thread,
- struct file *filep)
+/*
+ * On return the kfd_process is fully operational and will be freed when the
+ * mm is released
+ */
+static struct kfd_process *create_process(const struct task_struct *thread)
{
struct kfd_process *process;
int err = -ENOMEM;
process = kzalloc(sizeof(*process), GFP_KERNEL);
-
if (!process)
goto err_alloc_process;
- process->pasid = kfd_pasid_alloc();
- if (process->pasid == 0)
- goto err_alloc_pasid;
-
- if (kfd_alloc_process_doorbells(process) < 0)
- goto err_alloc_doorbells;
-
kref_init(&process->ref);
-
mutex_init(&process->mutex);
-
process->mm = thread->mm;
-
- /* register notifier */
- process->mmu_notifier.ops = &kfd_process_mmu_notifier_ops;
- err = mmu_notifier_register(&process->mmu_notifier, process->mm);
- if (err)
- goto err_mmu_notifier;
-
- hash_add_rcu(kfd_processes_table, &process->kfd_processes,
- (uintptr_t)process->mm);
-
process->lead_thread = thread->group_leader;
- get_task_struct(process->lead_thread);
-
INIT_LIST_HEAD(&process->per_device_data);
-
+ INIT_DELAYED_WORK(&process->eviction_work, evict_process_worker);
+ INIT_DELAYED_WORK(&process->restore_work, restore_process_worker);
+ process->last_restore_timestamp = get_jiffies_64();
kfd_event_init_process(process);
+ process->is_32bit_user_mode = in_compat_syscall();
+
+ process->pasid = kfd_pasid_alloc();
+ if (process->pasid == 0)
+ goto err_alloc_pasid;
+
+ if (kfd_alloc_process_doorbells(process) < 0)
+ goto err_alloc_doorbells;
err = pqm_init(&process->pqm, process);
if (err != 0)
goto err_process_pqm_init;
/* init process apertures*/
- process->is_32bit_user_mode = in_compat_syscall();
err = kfd_init_apertures(process);
if (err != 0)
goto err_init_apertures;
- INIT_DELAYED_WORK(&process->eviction_work, evict_process_worker);
- INIT_DELAYED_WORK(&process->restore_work, restore_process_worker);
- process->last_restore_timestamp = get_jiffies_64();
-
- err = kfd_process_init_cwsr_apu(process, filep);
+ /* Must be last, have to use release destruction after this */
+ process->mmu_notifier.ops = &kfd_process_mmu_notifier_ops;
+ err = mmu_notifier_register(&process->mmu_notifier, process->mm);
if (err)
- goto err_init_cwsr;
+ goto err_register_notifier;
+
+ get_task_struct(process->lead_thread);
+ hash_add_rcu(kfd_processes_table, &process->kfd_processes,
+ (uintptr_t)process->mm);
return process;
-err_init_cwsr:
+err_register_notifier:
kfd_process_free_outstanding_kfd_bos(process);
kfd_process_destroy_pdds(process);
err_init_apertures:
pqm_uninit(&process->pqm);
err_process_pqm_init:
- hash_del_rcu(&process->kfd_processes);
- synchronize_rcu();
- mmu_notifier_unregister_no_release(&process->mmu_notifier, process->mm);
-err_mmu_notifier:
- mutex_destroy(&process->mutex);
kfd_free_process_doorbells(process);
err_alloc_doorbells:
kfd_pasid_free(process->pasid);
err_alloc_pasid:
+ mutex_destroy(&process->mutex);
kfree(process);
err_alloc_process:
return ERR_PTR(err);
@@ -693,6 +689,8 @@ static int init_doorbell_bitmap(struct qcm_process_device *qpd,
struct kfd_dev *dev)
{
unsigned int i;
+ int range_start = dev->shared_resources.non_cp_doorbells_start;
+ int range_end = dev->shared_resources.non_cp_doorbells_end;
if (!KFD_IS_SOC15(dev->device_info->asic_family))
return 0;
@@ -704,14 +702,16 @@ static int init_doorbell_bitmap(struct qcm_process_device *qpd,
return -ENOMEM;
/* Mask out doorbells reserved for SDMA, IH, and VCN on SOC15. */
+ pr_debug("reserved doorbell 0x%03x - 0x%03x\n", range_start, range_end);
+ pr_debug("reserved doorbell 0x%03x - 0x%03x\n",
+ range_start + KFD_QUEUE_DOORBELL_MIRROR_OFFSET,
+ range_end + KFD_QUEUE_DOORBELL_MIRROR_OFFSET);
+
for (i = 0; i < KFD_MAX_NUM_OF_QUEUES_PER_PROCESS / 2; i++) {
- if (i >= dev->shared_resources.non_cp_doorbells_start
- && i <= dev->shared_resources.non_cp_doorbells_end) {
+ if (i >= range_start && i <= range_end) {
set_bit(i, qpd->doorbell_bitmap);
set_bit(i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET,
qpd->doorbell_bitmap);
- pr_debug("reserved doorbell 0x%03x and 0x%03x\n", i,
- i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET);
}
}
@@ -801,6 +801,8 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
return ret;
}
+ amdgpu_vm_set_task_info(pdd->vm);
+
ret = kfd_process_device_reserve_ib_mem(pdd);
if (ret)
goto err_reserve_ib_mem;
@@ -1024,7 +1026,7 @@ static void evict_process_worker(struct work_struct *work)
*/
flush_delayed_work(&p->restore_work);
- pr_debug("Started evicting pasid %d\n", p->pasid);
+ pr_debug("Started evicting pasid 0x%x\n", p->pasid);
ret = kfd_process_evict_queues(p);
if (!ret) {
dma_fence_signal(p->ef);
@@ -1033,16 +1035,15 @@ static void evict_process_worker(struct work_struct *work)
queue_delayed_work(kfd_restore_wq, &p->restore_work,
msecs_to_jiffies(PROCESS_RESTORE_TIME_MS));
- pr_debug("Finished evicting pasid %d\n", p->pasid);
+ pr_debug("Finished evicting pasid 0x%x\n", p->pasid);
} else
- pr_err("Failed to evict queues of pasid %d\n", p->pasid);
+ pr_err("Failed to evict queues of pasid 0x%x\n", p->pasid);
}
static void restore_process_worker(struct work_struct *work)
{
struct delayed_work *dwork;
struct kfd_process *p;
- struct kfd_process_device *pdd;
int ret = 0;
dwork = to_delayed_work(work);
@@ -1051,17 +1052,7 @@ static void restore_process_worker(struct work_struct *work)
* lifetime of this thread, kfd_process p will be valid
*/
p = container_of(dwork, struct kfd_process, restore_work);
-
- /* Call restore_process_bos on the first KGD device. This function
- * takes care of restoring the whole process including other devices.
- * Restore can fail if enough memory is not available. If so,
- * reschedule again.
- */
- pdd = list_first_entry(&p->per_device_data,
- struct kfd_process_device,
- per_device_list);
-
- pr_debug("Started restoring pasid %d\n", p->pasid);
+ pr_debug("Started restoring pasid 0x%x\n", p->pasid);
/* Setting last_restore_timestamp before successful restoration.
* Otherwise this would have to be set by KGD (restore_process_bos)
@@ -1077,7 +1068,7 @@ static void restore_process_worker(struct work_struct *work)
ret = amdgpu_amdkfd_gpuvm_restore_process_bos(p->kgd_process_info,
&p->ef);
if (ret) {
- pr_debug("Failed to restore BOs of pasid %d, retry after %d ms\n",
+ pr_debug("Failed to restore BOs of pasid 0x%x, retry after %d ms\n",
p->pasid, PROCESS_BACK_OFF_TIME_MS);
ret = queue_delayed_work(kfd_restore_wq, &p->restore_work,
msecs_to_jiffies(PROCESS_BACK_OFF_TIME_MS));
@@ -1087,9 +1078,9 @@ static void restore_process_worker(struct work_struct *work)
ret = kfd_process_restore_queues(p);
if (!ret)
- pr_debug("Finished restoring pasid %d\n", p->pasid);
+ pr_debug("Finished restoring pasid 0x%x\n", p->pasid);
else
- pr_err("Failed to restore queues of pasid %d\n", p->pasid);
+ pr_err("Failed to restore queues of pasid 0x%x\n", p->pasid);
}
void kfd_suspend_all_processes(void)
@@ -1103,7 +1094,7 @@ void kfd_suspend_all_processes(void)
cancel_delayed_work_sync(&p->restore_work);
if (kfd_process_evict_queues(p))
- pr_err("Failed to suspend process %d\n", p->pasid);
+ pr_err("Failed to suspend process 0x%x\n", p->pasid);
dma_fence_signal(p->ef);
dma_fence_put(p->ef);
p->ef = NULL;
@@ -1162,16 +1153,17 @@ int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process,
void kfd_flush_tlb(struct kfd_process_device *pdd)
{
struct kfd_dev *dev = pdd->dev;
- const struct kfd2kgd_calls *f2g = dev->kfd2kgd;
if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
/* Nothing to flush until a VMID is assigned, which
* only happens when the first queue is created.
*/
if (pdd->qpd.vmid)
- f2g->invalidate_tlbs_vmid(dev->kgd, pdd->qpd.vmid);
+ amdgpu_amdkfd_flush_gpu_tlb_vmid(dev->kgd,
+ pdd->qpd.vmid);
} else {
- f2g->invalidate_tlbs(dev->kgd, pdd->process->pasid);
+ amdgpu_amdkfd_flush_gpu_tlb_pasid(dev->kgd,
+ pdd->process->pasid);
}
}
@@ -1186,7 +1178,7 @@ int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data)
int idx = srcu_read_lock(&kfd_processes_srcu);
hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
- seq_printf(m, "Process %d PASID %d:\n",
+ seq_printf(m, "Process %d PASID 0x%x:\n",
p->lead_thread->tgid, p->pasid);
mutex_lock(&p->mutex);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 7e6c3ee82f5b..31fcd1b51f00 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -53,7 +53,7 @@ static int find_available_queue_slot(struct process_queue_manager *pqm,
pr_debug("The new slot id %lu\n", found);
if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
- pr_info("Cannot open more queues for process with pasid %d\n",
+ pr_info("Cannot open more queues for process with pasid 0x%x\n",
pqm->process->pasid);
return -ENOMEM;
}
@@ -162,7 +162,7 @@ void pqm_uninit(struct process_queue_manager *pqm)
pqm->queue_slot_bitmap = NULL;
}
-static int create_cp_queue(struct process_queue_manager *pqm,
+static int init_user_queue(struct process_queue_manager *pqm,
struct kfd_dev *dev, struct queue **q,
struct queue_properties *q_properties,
struct file *f, unsigned int qid)
@@ -192,7 +192,8 @@ int pqm_create_queue(struct process_queue_manager *pqm,
struct kfd_dev *dev,
struct file *f,
struct queue_properties *properties,
- unsigned int *qid)
+ unsigned int *qid,
+ uint32_t *p_doorbell_offset_in_process)
{
int retval;
struct kfd_process_device *pdd;
@@ -250,7 +251,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
goto err_create_queue;
}
- retval = create_cp_queue(pqm, dev, &q, properties, f, *qid);
+ retval = init_user_queue(pqm, dev, &q, properties, f, *qid);
if (retval != 0)
goto err_create_queue;
pqn->q = q;
@@ -271,7 +272,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
goto err_create_queue;
}
- retval = create_cp_queue(pqm, dev, &q, properties, f, *qid);
+ retval = init_user_queue(pqm, dev, &q, properties, f, *qid);
if (retval != 0)
goto err_create_queue;
pqn->q = q;
@@ -298,17 +299,20 @@ int pqm_create_queue(struct process_queue_manager *pqm,
}
if (retval != 0) {
- pr_err("Pasid %d DQM create queue %d failed. ret %d\n",
+ pr_err("Pasid 0x%x DQM create queue %d failed. ret %d\n",
pqm->process->pasid, type, retval);
goto err_create_queue;
}
- if (q)
+ if (q && p_doorbell_offset_in_process)
/* Return the doorbell offset within the doorbell page
* to the caller so it can be passed up to user mode
* (in bytes).
+ * There are always 1024 doorbells per process, so in case
+ * of 8-byte doorbells, there are two doorbell pages per
+ * process.
*/
- properties->doorbell_off =
+ *p_doorbell_offset_in_process =
(q->properties.doorbell_off * sizeof(uint32_t)) &
(kfd_doorbell_process_slice(dev) - 1);
@@ -370,14 +374,14 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
/* destroy kernel queue (DIQ) */
dqm = pqn->kq->dev->dqm;
dqm->ops.destroy_kernel_queue(dqm, pqn->kq, &pdd->qpd);
- kernel_queue_uninit(pqn->kq);
+ kernel_queue_uninit(pqn->kq, false);
}
if (pqn->q) {
dqm = pqn->q->device->dqm;
retval = dqm->ops.destroy_queue(dqm, &pdd->qpd, pqn->q);
if (retval) {
- pr_err("Pasid %d destroy queue %d failed, ret %d\n",
+ pr_err("Pasid 0x%x destroy queue %d failed, ret %d\n",
pqm->process->pasid,
pqn->q->properties.queue_id, retval);
if (retval != -ETIME)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index c2e6e47abaf2..203c823d65f1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -269,6 +269,8 @@ static ssize_t iolink_show(struct kobject *kobj, struct attribute *attr,
buffer[0] = 0;
iolink = container_of(attr, struct kfd_iolink_properties, attr);
+ if (iolink->gpu && kfd_devcgroup_check_permission(iolink->gpu))
+ return -EPERM;
sysfs_show_32bit_prop(buffer, "type", iolink->iolink_type);
sysfs_show_32bit_prop(buffer, "version_major", iolink->ver_maj);
sysfs_show_32bit_prop(buffer, "version_minor", iolink->ver_min);
@@ -305,6 +307,8 @@ static ssize_t mem_show(struct kobject *kobj, struct attribute *attr,
buffer[0] = 0;
mem = container_of(attr, struct kfd_mem_properties, attr);
+ if (mem->gpu && kfd_devcgroup_check_permission(mem->gpu))
+ return -EPERM;
sysfs_show_32bit_prop(buffer, "heap_type", mem->heap_type);
sysfs_show_64bit_prop(buffer, "size_in_bytes", mem->size_in_bytes);
sysfs_show_32bit_prop(buffer, "flags", mem->flags);
@@ -334,6 +338,8 @@ static ssize_t kfd_cache_show(struct kobject *kobj, struct attribute *attr,
buffer[0] = 0;
cache = container_of(attr, struct kfd_cache_properties, attr);
+ if (cache->gpu && kfd_devcgroup_check_permission(cache->gpu))
+ return -EPERM;
sysfs_show_32bit_prop(buffer, "processor_id_low",
cache->processor_id_low);
sysfs_show_32bit_prop(buffer, "level", cache->cache_level);
@@ -406,8 +412,6 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
char *buffer)
{
struct kfd_topology_device *dev;
- char public_name[KFD_TOPOLOGY_PUBLIC_NAME_SIZE];
- uint32_t i;
uint32_t log_max_watch_addr;
/* Making sure that the buffer is an empty string */
@@ -416,24 +420,24 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
if (strcmp(attr->name, "gpu_id") == 0) {
dev = container_of(attr, struct kfd_topology_device,
attr_gpuid);
+ if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu))
+ return -EPERM;
return sysfs_show_32bit_val(buffer, dev->gpu_id);
}
if (strcmp(attr->name, "name") == 0) {
dev = container_of(attr, struct kfd_topology_device,
attr_name);
- for (i = 0; i < KFD_TOPOLOGY_PUBLIC_NAME_SIZE; i++) {
- public_name[i] =
- (char)dev->node_props.marketing_name[i];
- if (dev->node_props.marketing_name[i] == 0)
- break;
- }
- public_name[KFD_TOPOLOGY_PUBLIC_NAME_SIZE-1] = 0x0;
- return sysfs_show_str_val(buffer, public_name);
+
+ if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu))
+ return -EPERM;
+ return sysfs_show_str_val(buffer, dev->node_props.name);
}
dev = container_of(attr, struct kfd_topology_device,
attr_props);
+ if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu))
+ return -EPERM;
sysfs_show_32bit_prop(buffer, "cpu_cores_count",
dev->node_props.cpu_cores_count);
sysfs_show_32bit_prop(buffer, "simd_count",
@@ -482,6 +486,10 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
dev->node_props.num_sdma_engines);
sysfs_show_32bit_prop(buffer, "num_sdma_xgmi_engines",
dev->node_props.num_sdma_xgmi_engines);
+ sysfs_show_32bit_prop(buffer, "num_sdma_queues_per_engine",
+ dev->node_props.num_sdma_queues_per_engine);
+ sysfs_show_32bit_prop(buffer, "num_cp_queues",
+ dev->node_props.num_cp_queues);
if (dev->gpu) {
log_max_watch_addr =
@@ -1106,6 +1114,9 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
{
struct kfd_topology_device *dev;
struct kfd_topology_device *out_dev = NULL;
+ struct kfd_mem_properties *mem;
+ struct kfd_cache_properties *cache;
+ struct kfd_iolink_properties *iolink;
down_write(&topology_lock);
list_for_each_entry(dev, &topology_device_list, list) {
@@ -1119,6 +1130,13 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
if (!dev->gpu && (dev->node_props.simd_count > 0)) {
dev->gpu = gpu;
out_dev = dev;
+
+ list_for_each_entry(mem, &dev->mem_props, list)
+ mem->gpu = dev->gpu;
+ list_for_each_entry(cache, &dev->cache_props, list)
+ cache->gpu = dev->gpu;
+ list_for_each_entry(iolink, &dev->io_link_props, list)
+ iolink->gpu = dev->gpu;
break;
}
}
@@ -1274,6 +1292,10 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
*/
amdgpu_amdkfd_get_cu_info(dev->gpu->kgd, &cu_info);
+
+ strncpy(dev->node_props.name, gpu->device_info->asic_name,
+ KFD_TOPOLOGY_PUBLIC_NAME_SIZE);
+
dev->node_props.simd_arrays_per_engine =
cu_info.num_shader_arrays_per_engine;
@@ -1291,9 +1313,12 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
dev->node_props.num_sdma_engines = gpu->device_info->num_sdma_engines;
dev->node_props.num_sdma_xgmi_engines =
gpu->device_info->num_xgmi_sdma_engines;
+ dev->node_props.num_sdma_queues_per_engine =
+ gpu->device_info->num_sdma_queues_per_engine;
dev->node_props.num_gws = (hws_gws_support &&
dev->gpu->dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) ?
amdgpu_amdkfd_get_num_gws(dev->gpu->kgd) : 0;
+ dev->node_props.num_cp_queues = get_queues_num(dev->gpu->dqm);
kfd_fill_mem_clk_max_info(dev);
kfd_fill_iolink_non_crat_info(dev);
@@ -1321,7 +1346,11 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
+ case CHIP_RENOIR:
+ case CHIP_ARCTURUS:
case CHIP_NAVI10:
+ case CHIP_NAVI12:
+ case CHIP_NAVI14:
dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_2_0 <<
HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) &
HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
index 276354aa0fcc..74e9b1682af8 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
@@ -27,7 +27,7 @@
#include <linux/list.h>
#include "kfd_crat.h"
-#define KFD_TOPOLOGY_PUBLIC_NAME_SIZE 128
+#define KFD_TOPOLOGY_PUBLIC_NAME_SIZE 32
#define HSA_CAP_HOT_PLUGGABLE 0x00000001
#define HSA_CAP_ATS_PRESENT 0x00000002
@@ -81,7 +81,9 @@ struct kfd_node_properties {
int32_t drm_render_minor;
uint32_t num_sdma_engines;
uint32_t num_sdma_xgmi_engines;
- uint16_t marketing_name[KFD_TOPOLOGY_PUBLIC_NAME_SIZE];
+ uint32_t num_sdma_queues_per_engine;
+ uint32_t num_cp_queues;
+ char name[KFD_TOPOLOGY_PUBLIC_NAME_SIZE];
};
#define HSA_MEM_HEAP_TYPE_SYSTEM 0
@@ -102,6 +104,7 @@ struct kfd_mem_properties {
uint32_t flags;
uint32_t width;
uint32_t mem_clk_max;
+ struct kfd_dev *gpu;
struct kobject *kobj;
struct attribute attr;
};
@@ -123,6 +126,7 @@ struct kfd_cache_properties {
uint32_t cache_latency;
uint32_t cache_type;
uint8_t sibling_map[CRAT_SIBLINGMAP_SIZE];
+ struct kfd_dev *gpu;
struct kobject *kobj;
struct attribute attr;
};
@@ -141,6 +145,7 @@ struct kfd_iolink_properties {
uint32_t max_bandwidth;
uint32_t rec_transfer_size;
uint32_t flags;
+ struct kfd_dev *gpu;
struct kobject *kobj;
struct attribute attr;
};
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index f954bf61af28..87858bc57e64 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0-only
+# SPDX-License-Identifier: MIT
menu "Display Engine Configuration"
depends on DRM && DRM_AMDGPU
@@ -6,35 +6,24 @@ config DRM_AMD_DC
bool "AMD DC - Enable new display engine"
default y
select SND_HDA_COMPONENT if SND_HDA_CORE
- select DRM_AMD_DC_DCN1_0 if X86 && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS)
+ select DRM_AMD_DC_DCN if (X86 || PPC64) && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS)
help
Choose this option if you want to use the new display engine
support for AMDGPU. This adds required support for Vega and
Raven ASICs.
-config DRM_AMD_DC_DCN1_0
+config DRM_AMD_DC_DCN
def_bool n
help
- RV family support for display engine
+ Raven, Navi and Renoir family support for display engine
-config DRM_AMD_DC_DCN2_0
- bool "DCN 2.0 family"
- default y
- depends on DRM_AMD_DC && X86
- depends on DRM_AMD_DC_DCN1_0
- help
- Choose this option if you want to have
- Navi support for display engine
-
-config DRM_AMD_DC_DSC_SUPPORT
- bool "DSC support"
- default y
- depends on DRM_AMD_DC && X86
- depends on DRM_AMD_DC_DCN1_0
- depends on DRM_AMD_DC_DCN2_0
+config DRM_AMD_DC_HDCP
+ bool "Enable HDCP support in DC"
+ depends on DRM_AMD_DC
help
- Choose this option if you want to have
- Dynamic Stream Compression support
+ Choose this option
+ if you want to support
+ HDCP authentication
config DEBUG_KERNEL_DC
bool "Enable kgdb break in DC"
diff --git a/drivers/gpu/drm/amd/display/Makefile b/drivers/gpu/drm/amd/display/Makefile
index 496cee000f10..2633de77de5e 100644
--- a/drivers/gpu/drm/amd/display/Makefile
+++ b/drivers/gpu/drm/amd/display/Makefile
@@ -34,11 +34,20 @@ subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/freesync
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/color
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/info_packet
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/power
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dmub/inc
+
+ifdef CONFIG_DRM_AMD_DC_HDCP
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/hdcp
+endif
#TODO: remove when Timing Sync feature is complete
subdir-ccflags-y += -DBUILD_FEATURE_TIMING_SYNC=0
-DAL_LIBS = amdgpu_dm dc modules/freesync modules/color modules/info_packet modules/power
+DAL_LIBS = amdgpu_dm dc modules/freesync modules/color modules/info_packet modules/power dmub/src
+
+ifdef CONFIG_DRM_AMD_DC_HDCP
+DAL_LIBS += modules/hdcp
+endif
AMD_DAL = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/,$(DAL_LIBS)))
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
index 94911871eb9b..9a3b7bf8ab0b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
@@ -31,6 +31,10 @@ ifneq ($(CONFIG_DRM_AMD_DC),)
AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o amdgpu_dm_pp_smu.o
endif
+ifdef CONFIG_DRM_AMD_DC_HDCP
+AMDGPUDM += amdgpu_dm_hdcp.o
+endif
+
ifneq ($(CONFIG_DEBUG_FS),)
AMDGPUDM += amdgpu_dm_crc.o amdgpu_dm_debugfs.o
endif
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 4a29f72334d0..63e8a12a74bc 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -30,6 +30,10 @@
#include "dc.h"
#include "dc/inc/core_types.h"
#include "dal_asic_id.h"
+#include "dmub/inc/dmub_srv.h"
+#include "dc/inc/hw/dmcu.h"
+#include "dc/inc/hw/abm.h"
+#include "dc/dc_dmub_srv.h"
#include "vid.h"
#include "amdgpu.h"
@@ -37,6 +41,10 @@
#include "amdgpu_ucode.h"
#include "atom.h"
#include "amdgpu_dm.h"
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+#include "amdgpu_dm_hdcp.h"
+#include <drm/drm_hdcp.h>
+#endif
#include "amdgpu_pm.h"
#include "amd_shared.h"
@@ -67,8 +75,9 @@
#include <drm/drm_edid.h>
#include <drm/drm_vblank.h>
#include <drm/drm_audio_component.h>
+#include <drm/drm_hdcp.h>
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
#include "dcn/dcn_1_0_offset.h"
@@ -83,9 +92,18 @@
#include "modules/power/power_helpers.h"
#include "modules/inc/mod_info_packet.h"
+#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
+
#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
+/* Number of bytes in PSP header for firmware. */
+#define PSP_HEADER_BYTES 0x100
+
+/* Number of bytes in PSP footer for firmware. */
+#define PSP_FOOTER_BYTES 0x100
+
/**
* DOC: overview
*
@@ -143,6 +161,12 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
static void handle_cursor_update(struct drm_plane *plane,
struct drm_plane_state *old_plane_state);
+static void amdgpu_dm_set_psr_caps(struct dc_link *link);
+static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
+static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
+static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
+
+
/*
* dm_vblank_get_counter
*
@@ -263,6 +287,13 @@ static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
}
+/**
+ * dm_pflip_high_irq() - Handle pageflip interrupt
+ * @interrupt_params: ignored
+ *
+ * Handles the pageflip interrupt by notifying all interested parties
+ * that the pageflip has been completed.
+ */
static void dm_pflip_high_irq(void *interrupt_params)
{
struct amdgpu_crtc *amdgpu_crtc;
@@ -407,6 +438,13 @@ static void dm_vupdate_high_irq(void *interrupt_params)
}
}
+/**
+ * dm_crtc_high_irq() - Handles CRTC interrupt
+ * @interrupt_params: ignored
+ *
+ * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
+ * event handler.
+ */
static void dm_crtc_high_irq(void *interrupt_params)
{
struct common_irq_params *irq_params = interrupt_params;
@@ -454,6 +492,70 @@ static void dm_crtc_high_irq(void *interrupt_params)
}
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+/**
+ * dm_dcn_crtc_high_irq() - Handles VStartup interrupt for DCN generation ASICs
+ * @interrupt params - interrupt parameters
+ *
+ * Notify DRM's vblank event handler at VSTARTUP
+ *
+ * Unlike DCE hardware, we trigger the handler at VSTARTUP. at which:
+ * * We are close enough to VUPDATE - the point of no return for hw
+ * * We are in the fixed portion of variable front porch when vrr is enabled
+ * * We are before VUPDATE, where double-buffered vrr registers are swapped
+ *
+ * It is therefore the correct place to signal vblank, send user flip events,
+ * and update VRR.
+ */
+static void dm_dcn_crtc_high_irq(void *interrupt_params)
+{
+ struct common_irq_params *irq_params = interrupt_params;
+ struct amdgpu_device *adev = irq_params->adev;
+ struct amdgpu_crtc *acrtc;
+ struct dm_crtc_state *acrtc_state;
+ unsigned long flags;
+
+ acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
+
+ if (!acrtc)
+ return;
+
+ acrtc_state = to_dm_crtc_state(acrtc->base.state);
+
+ DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
+ amdgpu_dm_vrr_active(acrtc_state));
+
+ amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
+ drm_crtc_handle_vblank(&acrtc->base);
+
+ spin_lock_irqsave(&adev->ddev->event_lock, flags);
+
+ if (acrtc_state->vrr_params.supported &&
+ acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
+ mod_freesync_handle_v_update(
+ adev->dm.freesync_module,
+ acrtc_state->stream,
+ &acrtc_state->vrr_params);
+
+ dc_stream_adjust_vmin_vmax(
+ adev->dm.dc,
+ acrtc_state->stream,
+ &acrtc_state->vrr_params.adjust);
+ }
+
+ if (acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED) {
+ if (acrtc->event) {
+ drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
+ acrtc->event = NULL;
+ drm_crtc_vblank_put(&acrtc->base);
+ }
+ acrtc->pflip_status = AMDGPU_FLIP_NONE;
+ }
+
+ spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+}
+#endif
+
static int dm_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
@@ -643,14 +745,135 @@ void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
}
}
+static int dm_dmub_hw_init(struct amdgpu_device *adev)
+{
+ const struct dmcub_firmware_header_v1_0 *hdr;
+ struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
+ struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
+ const struct firmware *dmub_fw = adev->dm.dmub_fw;
+ struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
+ struct abm *abm = adev->dm.dc->res_pool->abm;
+ struct dmub_srv_hw_params hw_params;
+ enum dmub_status status;
+ const unsigned char *fw_inst_const, *fw_bss_data;
+ uint32_t i, fw_inst_const_size, fw_bss_data_size;
+ bool has_hw_support;
+
+ if (!dmub_srv)
+ /* DMUB isn't supported on the ASIC. */
+ return 0;
+
+ if (!fb_info) {
+ DRM_ERROR("No framebuffer info for DMUB service.\n");
+ return -EINVAL;
+ }
+
+ if (!dmub_fw) {
+ /* Firmware required for DMUB support. */
+ DRM_ERROR("No firmware provided for DMUB.\n");
+ return -EINVAL;
+ }
+
+ status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
+ if (status != DMUB_STATUS_OK) {
+ DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
+ return -EINVAL;
+ }
+
+ if (!has_hw_support) {
+ DRM_INFO("DMUB unsupported on ASIC\n");
+ return 0;
+ }
+
+ hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
+
+ fw_inst_const = dmub_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
+ PSP_HEADER_BYTES;
+
+ fw_bss_data = dmub_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
+ le32_to_cpu(hdr->inst_const_bytes);
+
+ /* Copy firmware and bios info into FB memory. */
+ fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
+ PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
+
+ fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
+
+ memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
+ fw_inst_const_size);
+ memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, fw_bss_data,
+ fw_bss_data_size);
+ memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
+ adev->bios_size);
+
+ /* Reset regions that need to be reset. */
+ memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
+ fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
+
+ memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
+ fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
+
+ memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
+ fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
+
+ /* Initialize hardware. */
+ memset(&hw_params, 0, sizeof(hw_params));
+ hw_params.fb_base = adev->gmc.fb_start;
+ hw_params.fb_offset = adev->gmc.aper_base;
+
+ if (dmcu)
+ hw_params.psp_version = dmcu->psp_version;
+
+ for (i = 0; i < fb_info->num_fb; ++i)
+ hw_params.fb[i] = &fb_info->fb[i];
+
+ status = dmub_srv_hw_init(dmub_srv, &hw_params);
+ if (status != DMUB_STATUS_OK) {
+ DRM_ERROR("Error initializing DMUB HW: %d\n", status);
+ return -EINVAL;
+ }
+
+ /* Wait for firmware load to finish. */
+ status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
+ if (status != DMUB_STATUS_OK)
+ DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
+
+ /* Init DMCU and ABM if available. */
+ if (dmcu && abm) {
+ dmcu->funcs->dmcu_init(dmcu);
+ abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
+ }
+
+ adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
+ if (!adev->dm.dc->ctx->dmub_srv) {
+ DRM_ERROR("Couldn't allocate DC DMUB server!\n");
+ return -ENOMEM;
+ }
+
+ DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
+ adev->dm.dmcub_fw_version);
+
+ return 0;
+}
+
static int amdgpu_dm_init(struct amdgpu_device *adev)
{
struct dc_init_data init_data;
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ struct dc_callback_init init_params;
+#endif
+ int r;
+
adev->dm.ddev = adev->ddev;
adev->dm.adev = adev;
/* Zero all the fields */
memset(&init_data, 0, sizeof(init_data));
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ memset(&init_params, 0, sizeof(init_params));
+#endif
mutex_init(&adev->dm.dc_lock);
mutex_init(&adev->dm.audio_lock);
@@ -683,22 +906,29 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
- /*
- * TODO debug why this doesn't work on Raven
- */
- if (adev->flags & AMD_IS_APU &&
- adev->asic_type >= CHIP_CARRIZO &&
- adev->asic_type < CHIP_RAVEN)
+ switch (adev->asic_type) {
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+ case CHIP_RAVEN:
+ case CHIP_RENOIR:
init_data.flags.gpu_vm_support = true;
+ break;
+ default:
+ break;
+ }
if (amdgpu_dc_feature_mask & DC_FBC_MASK)
init_data.flags.fbc_support = true;
+ if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
+ init_data.flags.multi_mon_pp_mclk_switch = true;
+
+ if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
+ init_data.flags.disable_fractional_pwm = true;
+
init_data.flags.power_down_display_on_boot = true;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
init_data.soc_bounding_box = adev->dm.soc_bounding_box;
-#endif
/* Display Core create. */
adev->dm.dc = dc_create(&init_data);
@@ -710,6 +940,14 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
goto error;
}
+ r = dm_dmub_hw_init(adev);
+ if (r) {
+ DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
+ goto error;
+ }
+
+ dc_hardware_init(adev->dm.dc);
+
adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
if (!adev->dm.freesync_module) {
DRM_ERROR(
@@ -720,6 +958,18 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
amdgpu_dm_init_color_mod();
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ if (adev->asic_type >= CHIP_RAVEN) {
+ adev->dm.hdcp_workqueue = hdcp_create_workqueue(&adev->psp, &init_params.cp_psp, adev->dm.dc);
+
+ if (!adev->dm.hdcp_workqueue)
+ DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
+ else
+ DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
+
+ dc_init_callbacks(adev->dm.dc, &init_params);
+ }
+#endif
if (amdgpu_dm_initialize_drm_device(adev)) {
DRM_ERROR(
"amdgpu: failed to initialize sw for display support.\n");
@@ -761,6 +1011,25 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
amdgpu_dm_destroy_drm_device(&adev->dm);
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ if (adev->dm.hdcp_workqueue) {
+ hdcp_destroy(adev->dm.hdcp_workqueue);
+ adev->dm.hdcp_workqueue = NULL;
+ }
+
+ if (adev->dm.dc)
+ dc_deinit_callbacks(adev->dm.dc);
+#endif
+ if (adev->dm.dc->ctx->dmub_srv) {
+ dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
+ adev->dm.dc->ctx->dmub_srv = NULL;
+ }
+
+ if (adev->dm.dmub_bo)
+ amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
+ &adev->dm.dmub_bo_gpu_addr,
+ &adev->dm.dmub_bo_cpu_addr);
+
/* DC Destroy TODO: Replace destroy DAL */
if (adev->dm.dc)
dc_destroy(&adev->dm.dc);
@@ -809,6 +1078,9 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_NAVI10:
+ case CHIP_NAVI14:
+ case CHIP_NAVI12:
+ case CHIP_RENOIR:
return 0;
case CHIP_RAVEN:
if (ASICREV_IS_PICASSO(adev->external_rev_id))
@@ -868,9 +1140,160 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
return 0;
}
+static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
+{
+ struct amdgpu_device *adev = ctx;
+
+ return dm_read_reg(adev->dm.dc->ctx, address);
+}
+
+static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
+ uint32_t value)
+{
+ struct amdgpu_device *adev = ctx;
+
+ return dm_write_reg(adev->dm.dc->ctx, address, value);
+}
+
+static int dm_dmub_sw_init(struct amdgpu_device *adev)
+{
+ struct dmub_srv_create_params create_params;
+ struct dmub_srv_region_params region_params;
+ struct dmub_srv_region_info region_info;
+ struct dmub_srv_fb_params fb_params;
+ struct dmub_srv_fb_info *fb_info;
+ struct dmub_srv *dmub_srv;
+ const struct dmcub_firmware_header_v1_0 *hdr;
+ const char *fw_name_dmub;
+ enum dmub_asic dmub_asic;
+ enum dmub_status status;
+ int r;
+
+ switch (adev->asic_type) {
+ case CHIP_RENOIR:
+ dmub_asic = DMUB_ASIC_DCN21;
+ fw_name_dmub = FIRMWARE_RENOIR_DMUB;
+ break;
+
+ default:
+ /* ASIC doesn't support DMUB. */
+ return 0;
+ }
+
+ r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
+ if (r) {
+ DRM_ERROR("DMUB firmware loading failed: %d\n", r);
+ return 0;
+ }
+
+ r = amdgpu_ucode_validate(adev->dm.dmub_fw);
+ if (r) {
+ DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
+ return 0;
+ }
+
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
+ DRM_WARN("Only PSP firmware loading is supported for DMUB\n");
+ return 0;
+ }
+
+ hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
+ adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
+ AMDGPU_UCODE_ID_DMCUB;
+ adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw = adev->dm.dmub_fw;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
+
+ adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
+
+ DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
+ adev->dm.dmcub_fw_version);
+
+ adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
+ dmub_srv = adev->dm.dmub_srv;
+
+ if (!dmub_srv) {
+ DRM_ERROR("Failed to allocate DMUB service!\n");
+ return -ENOMEM;
+ }
+
+ memset(&create_params, 0, sizeof(create_params));
+ create_params.user_ctx = adev;
+ create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
+ create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
+ create_params.asic = dmub_asic;
+
+ /* Create the DMUB service. */
+ status = dmub_srv_create(dmub_srv, &create_params);
+ if (status != DMUB_STATUS_OK) {
+ DRM_ERROR("Error creating DMUB service: %d\n", status);
+ return -EINVAL;
+ }
+
+ /* Calculate the size of all the regions for the DMUB service. */
+ memset(&region_params, 0, sizeof(region_params));
+
+ region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
+ PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
+ region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
+ region_params.vbios_size = adev->bios_size;
+ region_params.fw_bss_data =
+ adev->dm.dmub_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
+ le32_to_cpu(hdr->inst_const_bytes);
+
+ status = dmub_srv_calc_region_info(dmub_srv, &region_params,
+ &region_info);
+
+ if (status != DMUB_STATUS_OK) {
+ DRM_ERROR("Error calculating DMUB region info: %d\n", status);
+ return -EINVAL;
+ }
+
+ /*
+ * Allocate a framebuffer based on the total size of all the regions.
+ * TODO: Move this into GART.
+ */
+ r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
+ &adev->dm.dmub_bo_gpu_addr,
+ &adev->dm.dmub_bo_cpu_addr);
+ if (r)
+ return r;
+
+ /* Rebase the regions on the framebuffer address. */
+ memset(&fb_params, 0, sizeof(fb_params));
+ fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
+ fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
+ fb_params.region_info = &region_info;
+
+ adev->dm.dmub_fb_info =
+ kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
+ fb_info = adev->dm.dmub_fb_info;
+
+ if (!fb_info) {
+ DRM_ERROR(
+ "Failed to allocate framebuffer info for DMUB service!\n");
+ return -ENOMEM;
+ }
+
+ status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
+ if (status != DMUB_STATUS_OK) {
+ DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int dm_sw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r;
+
+ r = dm_dmub_sw_init(adev);
+ if (r)
+ return r;
return load_dmcu_fw(adev);
}
@@ -879,6 +1302,19 @@ static int dm_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ kfree(adev->dm.dmub_fb_info);
+ adev->dm.dmub_fb_info = NULL;
+
+ if (adev->dm.dmub_srv) {
+ dmub_srv_destroy(adev->dm.dmub_srv);
+ adev->dm.dmub_srv = NULL;
+ }
+
+ if (adev->dm.dmub_fw) {
+ release_firmware(adev->dm.dmub_fw);
+ adev->dm.dmub_fw = NULL;
+ }
+
if(adev->dm.fw_dmcu) {
release_firmware(adev->dm.fw_dmcu);
adev->dm.fw_dmcu = NULL;
@@ -891,27 +1327,29 @@ static int detect_mst_link_for_all_connectors(struct drm_device *dev)
{
struct amdgpu_dm_connector *aconnector;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
int ret = 0;
- drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
aconnector = to_amdgpu_dm_connector(connector);
if (aconnector->dc_link->type == dc_connection_mst_branch &&
aconnector->mst_mgr.aux) {
DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
- aconnector, aconnector->base.base.id);
+ aconnector,
+ aconnector->base.base.id);
ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
if (ret < 0) {
DRM_ERROR("DM_MST: Failed to start MST\n");
- ((struct dc_link *)aconnector->dc_link)->type = dc_connection_single;
- return ret;
- }
+ aconnector->dc_link->type =
+ dc_connection_single;
+ break;
}
+ }
}
+ drm_connector_list_iter_end(&iter);
- drm_modeset_unlock(&dev->mode_config.connection_mutex);
return ret;
}
@@ -934,6 +1372,11 @@ static int dm_late_init(void *handle)
params.backlight_lut_array_size = 16;
params.backlight_lut_array = linear_lut;
+ /* Min backlight level after ABM reduction, Don't allow below 1%
+ * 0xFFFF x 0.01 = 0x28F
+ */
+ params.min_abm_backlight = 0x28F;
+
/* todo will enable for navi10 */
if (adev->asic_type <= CHIP_RAVEN) {
ret = dmcu_load_iram(dmcu, params);
@@ -949,14 +1392,13 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)
{
struct amdgpu_dm_connector *aconnector;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct drm_dp_mst_topology_mgr *mgr;
int ret;
bool need_hotplug = false;
- drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
-
- list_for_each_entry(connector, &dev->mode_config.connector_list,
- head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
aconnector = to_amdgpu_dm_connector(connector);
if (aconnector->dc_link->type != dc_connection_mst_branch ||
aconnector->mst_port)
@@ -967,15 +1409,14 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)
if (suspend) {
drm_dp_mst_topology_mgr_suspend(mgr);
} else {
- ret = drm_dp_mst_topology_mgr_resume(mgr);
+ ret = drm_dp_mst_topology_mgr_resume(mgr, true);
if (ret < 0) {
drm_dp_mst_topology_mgr_set_mst(mgr, false);
need_hotplug = true;
}
}
}
-
- drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ drm_connector_list_iter_end(&iter);
if (need_hotplug)
drm_kms_helper_hotplug_event(dev);
@@ -983,7 +1424,7 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)
/**
* dm_hw_init() - Initialize DC device
- * @handle: The base driver device containing the amdpgu_dm device.
+ * @handle: The base driver device containing the amdgpu_dm device.
*
* Initialize the &struct amdgpu_display_manager device. This involves calling
* the initializers of each DM component, then populating the struct with them.
@@ -1013,7 +1454,7 @@ static int dm_hw_init(void *handle)
/**
* dm_hw_fini() - Teardown DC device
- * @handle: The base driver device containing the amdpgu_dm device.
+ * @handle: The base driver device containing the amdgpu_dm device.
*
* Teardown components within &struct amdgpu_display_manager that require
* cleanup. This involves cleaning up the DRM device, DC, and any modules that
@@ -1157,6 +1598,7 @@ static int dm_resume(void *handle)
struct amdgpu_display_manager *dm = &adev->dm;
struct amdgpu_dm_connector *aconnector;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct drm_crtc *crtc;
struct drm_crtc_state *new_crtc_state;
struct dm_crtc_state *dm_new_crtc_state;
@@ -1165,7 +1607,7 @@ static int dm_resume(void *handle)
struct dm_plane_state *dm_new_plane_state;
struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
enum dc_connection_type new_connection_type = dc_connection_none;
- int i;
+ int i, r;
/* Recreate dc_state - DC invalidates it when setting power state to S3. */
dc_release_state(dm_state->context);
@@ -1173,23 +1615,29 @@ static int dm_resume(void *handle)
/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
dc_resource_state_construct(dm->dc, dm_state->context);
+ /* Before powering on DC we need to re-initialize DMUB. */
+ r = dm_dmub_hw_init(adev);
+ if (r)
+ DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
+
/* power on hardware */
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
/* program HPD filter */
dc_resume(dm->dc);
- /* On resume we need to rewrite the MSTM control bits to enamble MST*/
- s3_handle_mst(ddev, false);
-
/*
* early enable HPD Rx IRQ, should be done before set mode as short
* pulse interrupts are used for MST
*/
amdgpu_dm_irq_resume_early(adev);
+ /* On resume we need to rewrite the MSTM control bits to enable MST*/
+ s3_handle_mst(ddev, false);
+
/* Do detection*/
- list_for_each_entry(connector, &ddev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(ddev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
aconnector = to_amdgpu_dm_connector(connector);
/*
@@ -1217,6 +1665,7 @@ static int dm_resume(void *handle)
amdgpu_dm_update_connector_after_detect(aconnector);
mutex_unlock(&aconnector->hpd_lock);
}
+ drm_connector_list_iter_end(&iter);
/* Force mode set in atomic commit */
for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
@@ -1432,6 +1881,11 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
dc_sink_release(aconnector->dc_sink);
aconnector->dc_sink = NULL;
aconnector->edid = NULL;
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
+ if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
+ connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+#endif
}
mutex_unlock(&dev->mode_config.mutex);
@@ -1446,6 +1900,9 @@ static void handle_hpd_irq(void *param)
struct drm_connector *connector = &aconnector->base;
struct drm_device *dev = connector->dev;
enum dc_connection_type new_connection_type = dc_connection_none;
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ struct amdgpu_device *adev = dev->dev_private;
+#endif
/*
* In case of failure or MST no need to update connector status or notify the OS
@@ -1453,6 +1910,10 @@ static void handle_hpd_irq(void *param)
*/
mutex_lock(&aconnector->hpd_lock);
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ if (adev->asic_type >= CHIP_RAVEN)
+ hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
+#endif
if (aconnector->fake_enable)
aconnector->fake_enable = false;
@@ -1571,6 +2032,12 @@ static void handle_hpd_rx_irq(void *param)
struct dc_link *dc_link = aconnector->dc_link;
bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
enum dc_connection_type new_connection_type = dc_connection_none;
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ union hpd_irq_data hpd_irq_data;
+ struct amdgpu_device *adev = dev->dev_private;
+
+ memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
+#endif
/*
* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
@@ -1580,7 +2047,12 @@ static void handle_hpd_rx_irq(void *param)
if (dc_link->type != dc_connection_mst_branch)
mutex_lock(&aconnector->hpd_lock);
+
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
+#else
if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
+#endif
!is_mst_root_connector) {
/* Downstream Port status changed. */
if (!dc_link_detect_sink(dc_link, &new_connection_type))
@@ -1615,6 +2087,10 @@ static void handle_hpd_rx_irq(void *param)
drm_kms_helper_hotplug_event(dev);
}
}
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ)
+ hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
+#endif
if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
(dc_link->type == dc_connection_mst_branch))
dm_handle_hpd_rx_irq(aconnector);
@@ -1769,7 +2245,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
return 0;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
/* Register IRQ sources and initialize IRQ callbacks */
static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
{
@@ -1815,35 +2291,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
c_irq_params->irq_src = int_params.irq_source;
amdgpu_dm_irq_register_interrupt(adev, &int_params,
- dm_crtc_high_irq, c_irq_params);
- }
-
- /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
- * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
- * to trigger at end of each vblank, regardless of state of the lock,
- * matching DCE behaviour.
- */
- for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
- i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
- i++) {
- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
-
- if (r) {
- DRM_ERROR("Failed to add vupdate irq id!\n");
- return r;
- }
-
- int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
- int_params.irq_source =
- dc_interrupt_to_irq_source(dc, i, 0);
-
- c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
-
- c_irq_params->adev = adev;
- c_irq_params->irq_src = int_params.irq_source;
-
- amdgpu_dm_irq_register_interrupt(adev, &int_params,
- dm_vupdate_high_irq, c_irq_params);
+ dm_dcn_crtc_high_irq, c_irq_params);
}
/* Use GRPH_PFLIP interrupt */
@@ -2107,6 +2555,7 @@ static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
}
static const struct backlight_ops amdgpu_dm_backlight_ops = {
+ .options = BL_CORE_SUSPENDRESUME,
.get_brightness = amdgpu_dm_backlight_get_brightness,
.update_status = amdgpu_dm_backlight_update_status,
};
@@ -2327,6 +2776,8 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
amdgpu_dm_update_connector_after_detect(aconnector);
register_backlight_device(dm, link);
+ if (amdgpu_dc_feature_mask & DC_PSR_MASK)
+ amdgpu_dm_set_psr_caps(link);
}
@@ -2355,11 +2806,12 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
goto fail;
}
break;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
case CHIP_RAVEN:
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
+ case CHIP_NAVI12:
case CHIP_NAVI10:
-#endif
+ case CHIP_NAVI14:
+ case CHIP_RENOIR:
if (dcn10_register_irq_handlers(dm->adev)) {
DRM_ERROR("DM: Failed to initialize IRQ\n");
goto fail;
@@ -2428,8 +2880,7 @@ static ssize_t s3_debug_store(struct device *device,
{
int ret;
int s3_state;
- struct pci_dev *pdev = to_pci_dev(device);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct drm_device *drm_dev = dev_get_drvdata(device);
struct amdgpu_device *adev = drm_dev->dev_private;
ret = kstrtoint(buf, 0, &s3_state);
@@ -2506,20 +2957,29 @@ static int dm_early_init(void *handle)
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 6;
break;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
case CHIP_RAVEN:
adev->mode_info.num_crtc = 4;
adev->mode_info.num_hpd = 4;
adev->mode_info.num_dig = 4;
break;
#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
case CHIP_NAVI10:
+ case CHIP_NAVI12:
adev->mode_info.num_crtc = 6;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 6;
break;
-#endif
+ case CHIP_NAVI14:
+ adev->mode_info.num_crtc = 5;
+ adev->mode_info.num_hpd = 5;
+ adev->mode_info.num_dig = 5;
+ break;
+ case CHIP_RENOIR:
+ adev->mode_info.num_crtc = 4;
+ adev->mode_info.num_hpd = 4;
+ adev->mode_info.num_dig = 4;
+ break;
default:
DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
return -EINVAL;
@@ -2665,7 +3125,7 @@ fill_plane_dcc_attributes(struct amdgpu_device *adev,
const struct amdgpu_framebuffer *afb,
const enum surface_pixel_format format,
const enum dc_rotation_angle rotation,
- const union plane_size *plane_size,
+ const struct plane_size *plane_size,
const union dc_tiling_info *tiling_info,
const uint64_t info,
struct dc_plane_dcc_param *dcc,
@@ -2691,8 +3151,8 @@ fill_plane_dcc_attributes(struct amdgpu_device *adev,
return -EINVAL;
input.format = format;
- input.surface_size.width = plane_size->grph.surface_size.width;
- input.surface_size.height = plane_size->grph.surface_size.height;
+ input.surface_size.width = plane_size->surface_size.width;
+ input.surface_size.height = plane_size->surface_size.height;
input.swizzle_mode = tiling_info->gfx9.swizzle;
if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
@@ -2710,9 +3170,9 @@ fill_plane_dcc_attributes(struct amdgpu_device *adev,
return -EINVAL;
dcc->enable = 1;
- dcc->grph.meta_pitch =
+ dcc->meta_pitch =
AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
- dcc->grph.independent_64b_blks = i64b;
+ dcc->independent_64b_blks = i64b;
dcc_address = get_dcc_address(afb->address, info);
address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
@@ -2728,7 +3188,7 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev,
const enum dc_rotation_angle rotation,
const uint64_t tiling_flags,
union dc_tiling_info *tiling_info,
- union plane_size *plane_size,
+ struct plane_size *plane_size,
struct dc_plane_dcc_param *dcc,
struct dc_plane_address *address)
{
@@ -2741,11 +3201,11 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev,
memset(address, 0, sizeof(*address));
if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
- plane_size->grph.surface_size.x = 0;
- plane_size->grph.surface_size.y = 0;
- plane_size->grph.surface_size.width = fb->width;
- plane_size->grph.surface_size.height = fb->height;
- plane_size->grph.surface_pitch =
+ plane_size->surface_size.x = 0;
+ plane_size->surface_size.y = 0;
+ plane_size->surface_size.width = fb->width;
+ plane_size->surface_size.height = fb->height;
+ plane_size->surface_pitch =
fb->pitches[0] / fb->format->cpp[0];
address->type = PLN_ADDR_TYPE_GRAPHICS;
@@ -2754,20 +3214,20 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev,
} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
uint64_t chroma_addr = afb->address + fb->offsets[1];
- plane_size->video.luma_size.x = 0;
- plane_size->video.luma_size.y = 0;
- plane_size->video.luma_size.width = fb->width;
- plane_size->video.luma_size.height = fb->height;
- plane_size->video.luma_pitch =
+ plane_size->surface_size.x = 0;
+ plane_size->surface_size.y = 0;
+ plane_size->surface_size.width = fb->width;
+ plane_size->surface_size.height = fb->height;
+ plane_size->surface_pitch =
fb->pitches[0] / fb->format->cpp[0];
- plane_size->video.chroma_size.x = 0;
- plane_size->video.chroma_size.y = 0;
+ plane_size->chroma_size.x = 0;
+ plane_size->chroma_size.y = 0;
/* TODO: set these based on surface format */
- plane_size->video.chroma_size.width = fb->width / 2;
- plane_size->video.chroma_size.height = fb->height / 2;
+ plane_size->chroma_size.width = fb->width / 2;
+ plane_size->chroma_size.height = fb->height / 2;
- plane_size->video.chroma_pitch =
+ plane_size->chroma_pitch =
fb->pitches[1] / fb->format->cpp[1];
address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
@@ -2812,9 +3272,10 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev,
if (adev->asic_type == CHIP_VEGA10 ||
adev->asic_type == CHIP_VEGA12 ||
adev->asic_type == CHIP_VEGA20 ||
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
adev->asic_type == CHIP_NAVI10 ||
-#endif
+ adev->asic_type == CHIP_NAVI14 ||
+ adev->asic_type == CHIP_NAVI12 ||
+ adev->asic_type == CHIP_RENOIR ||
adev->asic_type == CHIP_RAVEN) {
/* Fill GFX9 params */
tiling_info->gfx9.num_pipes =
@@ -2995,6 +3456,8 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
plane_info->visible = true;
plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
+ plane_info->layer_index = 0;
+
ret = fill_plane_color_attributes(plane_state, plane_info->format,
&plane_info->color_space);
if (ret)
@@ -3060,6 +3523,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
dc_plane_state->global_alpha = plane_info.global_alpha;
dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
dc_plane_state->dcc = plane_info.dcc;
+ dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
/*
* Always set input transfer function, since plane state is refreshed
@@ -3129,15 +3593,41 @@ static void update_stream_scaling_settings(const struct drm_display_mode *mode,
static enum dc_color_depth
convert_color_depth_from_display_info(const struct drm_connector *connector,
- const struct drm_connector_state *state)
+ const struct drm_connector_state *state,
+ bool is_y420)
{
- uint32_t bpc = connector->display_info.bpc;
+ uint8_t bpc;
+
+ if (is_y420) {
+ bpc = 8;
+
+ /* Cap display bpc based on HDMI 2.0 HF-VSDB */
+ if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
+ bpc = 16;
+ else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
+ bpc = 12;
+ else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
+ bpc = 10;
+ } else {
+ bpc = (uint8_t)connector->display_info.bpc;
+ /* Assume 8 bpc by default if no bpc is specified. */
+ bpc = bpc ? bpc : 8;
+ }
if (!state)
state = connector->state;
if (state) {
- bpc = state->max_bpc;
+ /*
+ * Cap display bpc based on the user requested value.
+ *
+ * The value for state->max_bpc may not correctly updated
+ * depending on when the connector gets added to the state
+ * or if this was called outside of atomic check, so it
+ * can't be used directly.
+ */
+ bpc = min(bpc, state->max_requested_bpc);
+
/* Round down to the nearest even number. */
bpc = bpc - (bpc & 1);
}
@@ -3217,27 +3707,21 @@ get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
return color_space;
}
-static void reduce_mode_colour_depth(struct dc_crtc_timing *timing_out)
-{
- if (timing_out->display_color_depth <= COLOR_DEPTH_888)
- return;
-
- timing_out->display_color_depth--;
-}
-
-static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_out,
- const struct drm_display_info *info)
+static bool adjust_colour_depth_from_display_info(
+ struct dc_crtc_timing *timing_out,
+ const struct drm_display_info *info)
{
+ enum dc_color_depth depth = timing_out->display_color_depth;
int normalized_clk;
- if (timing_out->display_color_depth <= COLOR_DEPTH_888)
- return;
do {
normalized_clk = timing_out->pix_clk_100hz / 10;
/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
normalized_clk /= 2;
/* Adjusting pix clock following on HDMI spec based on colour depth */
- switch (timing_out->display_color_depth) {
+ switch (depth) {
+ case COLOR_DEPTH_888:
+ break;
case COLOR_DEPTH_101010:
normalized_clk = (normalized_clk * 30) / 24;
break;
@@ -3248,14 +3732,15 @@ static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_
normalized_clk = (normalized_clk * 48) / 24;
break;
default:
- return;
+ /* The above depths are the only ones valid for HDMI. */
+ return false;
}
- if (normalized_clk <= info->max_tmds_clock)
- return;
- reduce_mode_colour_depth(timing_out);
-
- } while (timing_out->display_color_depth > COLOR_DEPTH_888);
-
+ if (normalized_clk <= info->max_tmds_clock) {
+ timing_out->display_color_depth = depth;
+ return true;
+ }
+ } while (--depth > COLOR_DEPTH_666);
+ return false;
}
static void fill_stream_properties_from_drm_display_mode(
@@ -3267,8 +3752,12 @@ static void fill_stream_properties_from_drm_display_mode(
{
struct dc_crtc_timing *timing_out = &stream->timing;
const struct drm_display_info *info = &connector->display_info;
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ struct hdmi_vendor_infoframe hv_frame;
+ struct hdmi_avi_infoframe avi_frame;
- memset(timing_out, 0, sizeof(struct dc_crtc_timing));
+ memset(&hv_frame, 0, sizeof(hv_frame));
+ memset(&avi_frame, 0, sizeof(avi_frame));
timing_out->h_border_left = 0;
timing_out->h_border_right = 0;
@@ -3278,6 +3767,9 @@ static void fill_stream_properties_from_drm_display_mode(
if (drm_mode_is_420_only(info, mode_in)
&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
+ else if (drm_mode_is_420_also(info, mode_in)
+ && aconnector->force_yuv420_output)
+ timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
@@ -3286,7 +3778,8 @@ static void fill_stream_properties_from_drm_display_mode(
timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
timing_out->display_color_depth = convert_color_depth_from_display_info(
- connector, connector_state);
+ connector, connector_state,
+ (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420));
timing_out->scan_type = SCANNING_TYPE_NODATA;
timing_out->hdmi_vic = 0;
@@ -3302,6 +3795,13 @@ static void fill_stream_properties_from_drm_display_mode(
timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
}
+ if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
+ drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
+ timing_out->vic = avi_frame.video_code;
+ drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
+ timing_out->hdmi_vic = hv_frame.vic;
+ }
+
timing_out->h_addressable = mode_in->crtc_hdisplay;
timing_out->h_total = mode_in->crtc_htotal;
timing_out->h_sync_width =
@@ -3321,8 +3821,14 @@ static void fill_stream_properties_from_drm_display_mode(
stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
- if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
- adjust_colour_depth_from_display_info(timing_out, info);
+ if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
+ if (!adjust_colour_depth_from_display_info(timing_out, info) &&
+ drm_mode_is_420_also(info, mode_in) &&
+ timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
+ timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
+ adjust_colour_depth_from_display_info(timing_out, info);
+ }
+ }
}
static void fill_audio_info(struct audio_info *audio_info,
@@ -3491,6 +3997,10 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
int mode_refresh;
int preferred_refresh = 0;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ struct dsc_dec_dpcd_caps dsc_caps;
+#endif
+ uint32_t link_bandwidth_kbps;
struct dc_sink *sink = NULL;
if (aconnector == NULL) {
@@ -3518,6 +4028,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
stream->dm_stream_context = aconnector;
+ stream->timing.flags.LTE_340MCSC_SCRAMBLE =
+ drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
+
list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
/* Search for preferred mode */
if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
@@ -3562,19 +4075,29 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
fill_stream_properties_from_drm_display_mode(stream,
&mode, &aconnector->base, con_state, old_stream);
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
- /* stream->timing.flags.DSC = 0; */
- /* */
- /* if (aconnector->dc_link && */
- /* aconnector->dc_link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT #<{(|&& */
- /* aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.is_dsc_supported|)}>#) */
- /* if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc, */
- /* &aconnector->dc_link->dpcd_caps.dsc_caps, */
- /* dc_link_bandwidth_kbps(aconnector->dc_link, dc_link_get_link_cap(aconnector->dc_link)), */
- /* &stream->timing, */
- /* &stream->timing.dsc_cfg)) */
- /* stream->timing.flags.DSC = 1; */
+ stream->timing.flags.DSC = 0;
+
+ if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
+ aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
+ aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,
+ &dsc_caps);
+#endif
+ link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
+ dc_link_get_link_cap(aconnector->dc_link));
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dsc_caps.is_dsc_supported)
+ if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
+ &dsc_caps,
+ aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
+ link_bandwidth_kbps,
+ &stream->timing,
+ &stream->timing.dsc_cfg))
+ stream->timing.flags.DSC = 1;
#endif
+ }
update_stream_scaling_settings(&mode, dm_state, stream);
@@ -3585,6 +4108,20 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
update_stream_signal(stream, sink);
+ if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
+ mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
+ if (stream->link->psr_feature_enabled) {
+ struct dc *core_dc = stream->link->ctx->dc;
+
+ if (dc_is_dmcu_initialized(core_dc)) {
+ struct dmcu *dmcu = core_dc->res_pool->dmcu;
+
+ stream->psr_version = dmcu->dmcu_version.psr_version;
+ mod_build_vsc_infopacket(stream,
+ &stream->vsc_infopacket,
+ &stream->use_vsc_sdp_for_colorimetry);
+ }
+ }
finish:
dc_sink_release(sink);
@@ -3657,7 +4194,7 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc)
state->abm_level = cur->abm_level;
state->vrr_supported = cur->vrr_supported;
state->freesync_config = cur->freesync_config;
- state->crc_enabled = cur->crc_enabled;
+ state->crc_src = cur->crc_src;
state->cm_has_degamma = cur->cm_has_degamma;
state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
@@ -3673,6 +4210,10 @@ static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
struct amdgpu_device *adev = crtc->dev->dev_private;
int rc;
+ /* Do not set vupdate for DCN hardware */
+ if (adev->family > AMDGPU_FAMILY_AI)
+ return 0;
+
irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
@@ -3727,6 +4268,7 @@ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
.atomic_destroy_state = dm_crtc_destroy_state,
.set_crc_source = amdgpu_dm_crtc_set_crc_source,
.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
+ .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
.enable_vblank = dm_enable_vblank,
.disable_vblank = dm_disable_vblank,
};
@@ -3915,7 +4457,8 @@ void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
state->underscan_hborder = 0;
state->underscan_vborder = 0;
state->base.max_requested_bpc = 8;
-
+ state->vcpi_slots = 0;
+ state->pbn = 0;
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
state->abm_level = amdgpu_dm_abm_level;
@@ -3943,7 +4486,8 @@ amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
new_state->underscan_enable = state->underscan_enable;
new_state->underscan_hborder = state->underscan_hborder;
new_state->underscan_vborder = state->underscan_vborder;
-
+ new_state->vcpi_slots = state->vcpi_slots;
+ new_state->pbn = state->pbn;
return &new_state->base;
}
@@ -4059,8 +4603,8 @@ enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connec
result = MODE_OK;
else
DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
- mode->vdisplay,
mode->hdisplay,
+ mode->vdisplay,
mode->clock,
dc_result);
@@ -4340,10 +4884,69 @@ static void dm_encoder_helper_disable(struct drm_encoder *encoder)
}
+static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
+{
+ switch (display_color_depth) {
+ case COLOR_DEPTH_666:
+ return 6;
+ case COLOR_DEPTH_888:
+ return 8;
+ case COLOR_DEPTH_101010:
+ return 10;
+ case COLOR_DEPTH_121212:
+ return 12;
+ case COLOR_DEPTH_141414:
+ return 14;
+ case COLOR_DEPTH_161616:
+ return 16;
+ default:
+ break;
+ }
+ return 0;
+}
+
static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
+ struct drm_atomic_state *state = crtc_state->state;
+ struct drm_connector *connector = conn_state->connector;
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
+ const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
+ struct drm_dp_mst_topology_mgr *mst_mgr;
+ struct drm_dp_mst_port *mst_port;
+ enum dc_color_depth color_depth;
+ int clock, bpp = 0;
+ bool is_y420 = false;
+
+ if (!aconnector->port || !aconnector->dc_sink)
+ return 0;
+
+ mst_port = aconnector->port;
+ mst_mgr = &aconnector->mst_port->mst_mgr;
+
+ if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
+ return 0;
+
+ if (!state->duplicated) {
+ is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
+ aconnector->force_yuv420_output;
+ color_depth = convert_color_depth_from_display_info(connector, conn_state,
+ is_y420);
+ bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
+ clock = adjusted_mode->clock;
+ dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
+ }
+ dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
+ mst_mgr,
+ mst_port,
+ dm_new_connector_state->pbn,
+ 0);
+ if (dm_new_connector_state->vcpi_slots < 0) {
+ DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
+ return dm_new_connector_state->vcpi_slots;
+ }
return 0;
}
@@ -4352,6 +4955,71 @@ const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
.atomic_check = dm_encoder_helper_atomic_check
};
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
+ struct dc_state *dc_state)
+{
+ struct dc_stream_state *stream = NULL;
+ struct drm_connector *connector;
+ struct drm_connector_state *new_con_state, *old_con_state;
+ struct amdgpu_dm_connector *aconnector;
+ struct dm_connector_state *dm_conn_state;
+ int i, j, clock, bpp;
+ int vcpi, pbn_div, pbn = 0;
+
+ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
+
+ aconnector = to_amdgpu_dm_connector(connector);
+
+ if (!aconnector->port)
+ continue;
+
+ if (!new_con_state || !new_con_state->crtc)
+ continue;
+
+ dm_conn_state = to_dm_connector_state(new_con_state);
+
+ for (j = 0; j < dc_state->stream_count; j++) {
+ stream = dc_state->streams[j];
+ if (!stream)
+ continue;
+
+ if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
+ break;
+
+ stream = NULL;
+ }
+
+ if (!stream)
+ continue;
+
+ if (stream->timing.flags.DSC != 1) {
+ drm_dp_mst_atomic_enable_dsc(state,
+ aconnector->port,
+ dm_conn_state->pbn,
+ 0,
+ false);
+ continue;
+ }
+
+ pbn_div = dm_mst_get_pbn_divider(stream->link);
+ bpp = stream->timing.dsc_cfg.bits_per_pixel;
+ clock = stream->timing.pix_clk_100hz / 10;
+ pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
+ vcpi = drm_dp_mst_atomic_enable_dsc(state,
+ aconnector->port,
+ pbn, pbn_div,
+ true);
+ if (vcpi < 0)
+ return vcpi;
+
+ dm_conn_state->pbn = pbn;
+ dm_conn_state->vcpi_slots = vcpi;
+ }
+ return 0;
+}
+#endif
+
static void dm_drm_plane_reset(struct drm_plane *plane)
{
struct dm_plane_state *amdgpu_state = NULL;
@@ -4439,14 +5107,14 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
tv.num_shared = 1;
list_add(&tv.head, &list);
- r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL, true);
+ r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
if (r) {
dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
return r;
}
if (plane->type != DRM_PLANE_TYPE_CURSOR)
- domain = amdgpu_display_supported_domains(adev);
+ domain = amdgpu_display_supported_domains(adev, rbo->flags);
else
domain = AMDGPU_GEM_DOMAIN_VRAM;
@@ -4536,20 +5204,10 @@ static int dm_plane_atomic_check(struct drm_plane *plane,
static int dm_plane_atomic_async_check(struct drm_plane *plane,
struct drm_plane_state *new_plane_state)
{
- struct drm_plane_state *old_plane_state =
- drm_atomic_get_old_plane_state(new_plane_state->state, plane);
-
/* Only support async updates on cursor planes. */
if (plane->type != DRM_PLANE_TYPE_CURSOR)
return -EINVAL;
- /*
- * DRM calls prepare_fb and cleanup_fb on new_plane_state for
- * async commits so don't allow fb changes.
- */
- if (old_plane_state->fb != new_plane_state->fb)
- return -EINVAL;
-
return 0;
}
@@ -4792,7 +5450,13 @@ static int to_drm_connector_type(enum signal_type st)
static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
{
- return drm_encoder_find(connector->dev, NULL, connector->encoder_ids[0]);
+ struct drm_encoder *encoder;
+
+ /* There is only one encoder per connector */
+ drm_connector_for_each_possible_encoder(connector, encoder)
+ return encoder;
+
+ return NULL;
}
static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
@@ -5018,9 +5682,9 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
- /* This defaults to the max in the range, but we want 8bpc. */
- aconnector->base.state->max_bpc = 8;
- aconnector->base.state->max_requested_bpc = 8;
+ /* This defaults to the max in the range, but we want 8bpc for non-edp. */
+ aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
+ aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
if (connector_type == DRM_MODE_CONNECTOR_eDP &&
dc_is_dmcu_initialized(adev->dm.dc)) {
@@ -5037,6 +5701,10 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
drm_connector_attach_vrr_capable_property(
&aconnector->base);
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ if (adev->asic_type >= CHIP_RAVEN)
+ drm_connector_attach_content_protection_property(&aconnector->base, true);
+#endif
}
}
@@ -5144,11 +5812,12 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
connector_type = to_drm_connector_type(link->connector_signal);
- res = drm_connector_init(
+ res = drm_connector_init_with_ddc(
dm->ddev,
&aconnector->base,
&amdgpu_dm_connector_funcs,
- connector_type);
+ connector_type,
+ &i2c->base);
if (res) {
DRM_ERROR("connector_init failed\n");
@@ -5279,6 +5948,48 @@ is_scaling_state_different(const struct dm_connector_state *dm_state,
return false;
}
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+static bool is_content_protection_different(struct drm_connector_state *state,
+ const struct drm_connector_state *old_state,
+ const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
+{
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+
+ if (old_state->hdcp_content_type != state->hdcp_content_type &&
+ state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
+ state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ return true;
+ }
+
+ /* CP is being re enabled, ignore this */
+ if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
+ state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
+ state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
+ return false;
+ }
+
+ /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
+ if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
+ state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
+ state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+
+ /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
+ * hot-plug, headless s3, dpms
+ */
+ if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
+ aconnector->dc_sink != NULL)
+ return true;
+
+ if (old_state->content_protection == state->content_protection)
+ return false;
+
+ if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
+ return true;
+
+ return false;
+}
+
+#endif
static void remove_stream(struct amdgpu_device *adev,
struct amdgpu_crtc *acrtc,
struct dc_stream_state *stream)
@@ -5620,6 +6331,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
uint32_t target_vblank, last_flip_vblank;
bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
bool pflip_present = false;
+ bool swizzle = true;
struct {
struct dc_surface_update surface_updates[MAX_SURFACES];
struct dc_plane_info plane_infos[MAX_SURFACES];
@@ -5665,6 +6377,9 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
dc_plane = dm_new_plane_state->dc_state;
+ if (dc_plane && !dc_plane->tiling_info.gfx9.swizzle)
+ swizzle = false;
+
bundle->surface_updates[planes_count].surface = dc_plane;
if (new_pcrtc_state->color_mgmt_changed) {
bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
@@ -5693,11 +6408,11 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
* deadlock during GPU reset when this fence will not signal
* but we hold reservation lock for the BO.
*/
- r = reservation_object_wait_timeout_rcu(abo->tbo.resv, true,
+ r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
false,
msecs_to_jiffies(5000));
if (unlikely(r <= 0))
- DRM_ERROR("Waiting for fences timed out or interrupted!");
+ DRM_ERROR("Waiting for fences timed out!");
/*
* TODO This might fail and hence better not used, wait
@@ -5721,8 +6436,13 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
bundle->surface_updates[planes_count].plane_info =
&bundle->plane_infos[planes_count];
+ /*
+ * Only allow immediate flips for fast updates that don't
+ * change FB pitch, DCC state, rotation or mirroing.
+ */
bundle->flip_addrs[planes_count].flip_immediate =
- (crtc->state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
+ crtc->state->async_flip &&
+ acrtc_state->update_type == UPDATE_TYPE_FAST;
timestamp_ns = ktime_get_ns();
bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
@@ -5814,6 +6534,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
/* Update the planes if changed or disable if we don't have any. */
if ((planes_count || acrtc_state->active_planes == 0) &&
acrtc_state->stream) {
+ bundle->stream_update.stream = acrtc_state->stream;
if (new_pcrtc_state->mode_changed) {
bundle->stream_update.src = acrtc_state->stream->src;
bundle->stream_update.dst = acrtc_state->stream->dst;
@@ -5849,14 +6570,29 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
&acrtc_state->vrr_params.adjust);
spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
}
-
mutex_lock(&dm->dc_lock);
+ if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
+ acrtc_state->stream->link->psr_allow_active)
+ amdgpu_dm_psr_disable(acrtc_state->stream);
+
dc_commit_updates_for_stream(dm->dc,
bundle->surface_updates,
planes_count,
acrtc_state->stream,
&bundle->stream_update,
dc_state);
+
+ if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
+ acrtc_state->stream->psr_version &&
+ !acrtc_state->stream->link->psr_feature_enabled)
+ amdgpu_dm_link_setup_psr(acrtc_state->stream);
+ else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
+ acrtc_state->stream->link->psr_feature_enabled &&
+ !acrtc_state->stream->link->psr_allow_active &&
+ swizzle) {
+ amdgpu_dm_psr_enable(acrtc_state->stream);
+ }
+
mutex_unlock(&dm->dc_lock);
}
@@ -5967,6 +6703,9 @@ static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev,
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
int i;
+#ifdef CONFIG_DEBUG_FS
+ enum amdgpu_dm_pipe_crc_source source;
+#endif
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
@@ -5992,9 +6731,11 @@ static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev,
#ifdef CONFIG_DEBUG_FS
/* The stream has changed so CRC capture needs to re-enabled. */
- if (dm_new_crtc_state->crc_enabled) {
- dm_new_crtc_state->crc_enabled = false;
- amdgpu_dm_crtc_set_crc_source(crtc, "auto");
+ source = dm_new_crtc_state->crc_src;
+ if (amdgpu_dm_is_valid_crc_source(source)) {
+ amdgpu_dm_crtc_configure_crc_source(
+ crtc, dm_new_crtc_state,
+ dm_new_crtc_state->crc_src);
}
#endif
}
@@ -6045,23 +6786,8 @@ static int amdgpu_dm_atomic_commit(struct drm_device *dev,
if (dm_old_crtc_state->interrupts_enabled &&
(!dm_new_crtc_state->interrupts_enabled ||
- drm_atomic_crtc_needs_modeset(new_crtc_state))) {
- /*
- * Drop the extra vblank reference added by CRC
- * capture if applicable.
- */
- if (dm_new_crtc_state->crc_enabled)
- drm_crtc_vblank_put(crtc);
-
- /*
- * Only keep CRC capture enabled if there's
- * still a stream for the CRTC.
- */
- if (!dm_new_crtc_state->stream)
- dm_new_crtc_state->crc_enabled = false;
-
+ drm_atomic_crtc_needs_modeset(new_crtc_state)))
manage_dm_interrupts(adev, acrtc, false);
- }
}
/*
* Add check here for SoC's that support hardware cursor plane, to
@@ -6175,10 +6901,13 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
crtc->hwmode = new_crtc_state->mode;
} else if (modereset_required(new_crtc_state)) {
DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
-
/* i.e. reset mode */
- if (dm_old_crtc_state->stream)
+ if (dm_old_crtc_state->stream) {
+ if (dm_old_crtc_state->stream->link->psr_allow_active)
+ amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
+
remove_stream(adev, acrtc, dm_old_crtc_state->stream);
+ }
}
} /* for_each_crtc_in_state() */
@@ -6208,6 +6937,34 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
acrtc->otg_inst = status->primary_otg_inst;
}
}
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
+ struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+
+ new_crtc_state = NULL;
+
+ if (acrtc)
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
+
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+ if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
+ connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
+ hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
+ new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ continue;
+ }
+
+ if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
+ hdcp_update_display(
+ adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
+ new_con_state->hdcp_content_type,
+ new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
+ : false);
+ }
+#endif
/* Handle connector state changes */
for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
@@ -6247,9 +7004,10 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
if (!scaling_changed && !abm_changed && !hdr_changed)
continue;
+ stream_update.stream = dm_new_crtc_state->stream;
if (scaling_changed) {
update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
- dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream);
+ dm_new_con_state, dm_new_crtc_state->stream);
stream_update.src = dm_new_crtc_state->stream->src;
stream_update.dst = dm_new_crtc_state->stream->dst;
@@ -6310,7 +7068,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
amdgpu_dm_enable_crtc_interrupts(dev, state, true);
for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
- if (new_crtc_state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC)
+ if (new_crtc_state->async_flip)
wait_for_vblank = false;
/* update planes when needed per crtc*/
@@ -6994,31 +7752,34 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
int i, j, num_plane, ret = 0;
struct drm_plane_state *old_plane_state, *new_plane_state;
struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
- struct drm_crtc *new_plane_crtc, *old_plane_crtc;
+ struct drm_crtc *new_plane_crtc;
struct drm_plane *plane;
struct drm_crtc *crtc;
struct drm_crtc_state *new_crtc_state, *old_crtc_state;
struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
struct dc_stream_status *status = NULL;
-
- struct dc_surface_update *updates;
enum surface_update_type update_type = UPDATE_TYPE_FAST;
+ struct surface_info_bundle {
+ struct dc_surface_update surface_updates[MAX_SURFACES];
+ struct dc_plane_info plane_infos[MAX_SURFACES];
+ struct dc_scaling_info scaling_infos[MAX_SURFACES];
+ struct dc_flip_addrs flip_addrs[MAX_SURFACES];
+ struct dc_stream_update stream_update;
+ } *bundle;
- updates = kcalloc(MAX_SURFACES, sizeof(*updates), GFP_KERNEL);
+ bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
- if (!updates) {
- DRM_ERROR("Failed to allocate plane updates\n");
+ if (!bundle) {
+ DRM_ERROR("Failed to allocate update bundle\n");
/* Set type to FULL to avoid crashing in DC*/
update_type = UPDATE_TYPE_FULL;
goto cleanup;
}
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
- struct dc_scaling_info scaling_info;
- struct dc_stream_update stream_update;
- memset(&stream_update, 0, sizeof(stream_update));
+ memset(bundle, 0, sizeof(struct surface_info_bundle));
new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
@@ -7033,8 +7794,14 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
continue;
for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
+ const struct amdgpu_framebuffer *amdgpu_fb =
+ to_amdgpu_framebuffer(new_plane_state->fb);
+ struct dc_plane_info *plane_info = &bundle->plane_infos[num_plane];
+ struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane];
+ struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane];
+ uint64_t tiling_flags;
+
new_plane_crtc = new_plane_state->crtc;
- old_plane_crtc = old_plane_state->crtc;
new_dm_plane_state = to_dm_plane_state(new_plane_state);
old_dm_plane_state = to_dm_plane_state(old_plane_state);
@@ -7049,32 +7816,49 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
if (crtc != new_plane_crtc)
continue;
- updates[num_plane].surface = new_dm_plane_state->dc_state;
+ bundle->surface_updates[num_plane].surface =
+ new_dm_plane_state->dc_state;
if (new_crtc_state->mode_changed) {
- stream_update.dst = new_dm_crtc_state->stream->dst;
- stream_update.src = new_dm_crtc_state->stream->src;
+ bundle->stream_update.dst = new_dm_crtc_state->stream->dst;
+ bundle->stream_update.src = new_dm_crtc_state->stream->src;
}
if (new_crtc_state->color_mgmt_changed) {
- updates[num_plane].gamma =
+ bundle->surface_updates[num_plane].gamma =
new_dm_plane_state->dc_state->gamma_correction;
- updates[num_plane].in_transfer_func =
+ bundle->surface_updates[num_plane].in_transfer_func =
new_dm_plane_state->dc_state->in_transfer_func;
- stream_update.gamut_remap =
+ bundle->stream_update.gamut_remap =
&new_dm_crtc_state->stream->gamut_remap_matrix;
- stream_update.output_csc_transform =
+ bundle->stream_update.output_csc_transform =
&new_dm_crtc_state->stream->csc_color_matrix;
- stream_update.out_transfer_func =
+ bundle->stream_update.out_transfer_func =
new_dm_crtc_state->stream->out_transfer_func;
}
ret = fill_dc_scaling_info(new_plane_state,
- &scaling_info);
+ scaling_info);
if (ret)
goto cleanup;
- updates[num_plane].scaling_info = &scaling_info;
+ bundle->surface_updates[num_plane].scaling_info = scaling_info;
+
+ if (amdgpu_fb) {
+ ret = get_fb_info(amdgpu_fb, &tiling_flags);
+ if (ret)
+ goto cleanup;
+
+ ret = fill_dc_plane_info_and_addr(
+ dm->adev, new_plane_state, tiling_flags,
+ plane_info,
+ &flip_addr->address);
+ if (ret)
+ goto cleanup;
+
+ bundle->surface_updates[num_plane].plane_info = plane_info;
+ bundle->surface_updates[num_plane].flip_addr = flip_addr;
+ }
num_plane++;
}
@@ -7094,14 +7878,15 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
status = dc_stream_get_status_from_state(old_dm_state->context,
new_dm_crtc_state->stream);
-
+ bundle->stream_update.stream = new_dm_crtc_state->stream;
/*
* TODO: DC modifies the surface during this call so we need
* to lock here - find a way to do this without locking.
*/
mutex_lock(&dm->dc_lock);
- update_type = dc_check_update_surfaces_for_stream(dc, updates, num_plane,
- &stream_update, status);
+ update_type = dc_check_update_surfaces_for_stream(
+ dc, bundle->surface_updates, num_plane,
+ &bundle->stream_update, status);
mutex_unlock(&dm->dc_lock);
if (update_type > UPDATE_TYPE_MED) {
@@ -7111,12 +7896,35 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
}
cleanup:
- kfree(updates);
+ kfree(bundle);
*out_type = update_type;
return ret;
}
+static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
+{
+ struct drm_connector *connector;
+ struct drm_connector_state *conn_state;
+ struct amdgpu_dm_connector *aconnector = NULL;
+ int i;
+ for_each_new_connector_in_state(state, connector, conn_state, i) {
+ if (conn_state->crtc != crtc)
+ continue;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+ if (!aconnector->port || !aconnector->mst_port)
+ aconnector = NULL;
+ else
+ break;
+ }
+
+ if (!aconnector)
+ return 0;
+
+ return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
+}
+
/**
* amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
* @dev: The DRM device
@@ -7169,6 +7977,16 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
if (ret)
goto fail;
+ if (adev->asic_type >= CHIP_NAVI10) {
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
+ ret = add_affected_mst_dsc_crtcs(state, crtc);
+ if (ret)
+ goto fail;
+ }
+ }
+ }
+
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
!new_crtc_state->color_mgmt_changed &&
@@ -7272,6 +8090,26 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
if (ret)
goto fail;
+ if (state->legacy_cursor_update) {
+ /*
+ * This is a fast cursor update coming from the plane update
+ * helper, check if it can be done asynchronously for better
+ * performance.
+ */
+ state->async_update =
+ !drm_atomic_helper_async_check(dev, state);
+
+ /*
+ * Skip the remaining global validation if this is an async
+ * update. Cursor updates can be done without affecting
+ * state or bandwidth calcs and this avoids the performance
+ * penalty of locking the private state object and
+ * allocating a new dc_state.
+ */
+ if (state->async_update)
+ return 0;
+ }
+
/* Check scaling and underscan changes*/
/* TODO Removed scaling changes validation due to inability to commit
* new stream into context w\o causing full reset. Need to
@@ -7320,17 +8158,60 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
if (ret)
goto fail;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
+ goto fail;
+
+ ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
+ if (ret)
+ goto fail;
+#endif
+
+ /*
+ * Perform validation of MST topology in the state:
+ * We need to perform MST atomic check before calling
+ * dc_validate_global_state(), or there is a chance
+ * to get stuck in an infinite loop and hang eventually.
+ */
+ ret = drm_dp_mst_atomic_check(state);
+ if (ret)
+ goto fail;
+
if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) {
ret = -EINVAL;
goto fail;
}
- } else if (state->legacy_cursor_update) {
+ } else {
/*
- * This is a fast cursor update coming from the plane update
- * helper, check if it can be done asynchronously for better
- * performance.
+ * The commit is a fast update. Fast updates shouldn't change
+ * the DC context, affect global validation, and can have their
+ * commit work done in parallel with other commits not touching
+ * the same resource. If we have a new DC context as part of
+ * the DM atomic state from validation we need to free it and
+ * retain the existing one instead.
*/
- state->async_update = !drm_atomic_helper_async_check(dev, state);
+ struct dm_atomic_state *new_dm_state, *old_dm_state;
+
+ new_dm_state = dm_atomic_get_new_state(state);
+ old_dm_state = dm_atomic_get_old_state(state);
+
+ if (new_dm_state && old_dm_state) {
+ if (new_dm_state->context)
+ dc_release_state(new_dm_state->context);
+
+ new_dm_state->context = old_dm_state->context;
+
+ if (old_dm_state->context)
+ dc_retain_state(old_dm_state->context);
+ }
+ }
+
+ /* Store the overall update type for use later in atomic check. */
+ for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
+ struct dm_crtc_state *dm_new_crtc_state =
+ to_dm_crtc_state(new_crtc_state);
+
+ dm_new_crtc_state->update_type = (int)overall_update_type;
}
/* Must be success */
@@ -7461,3 +8342,113 @@ update:
freesync_capable);
}
+static void amdgpu_dm_set_psr_caps(struct dc_link *link)
+{
+ uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
+
+ if (!(link->connector_signal & SIGNAL_TYPE_EDP))
+ return;
+ if (link->type == dc_connection_none)
+ return;
+ if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
+ dpcd_data, sizeof(dpcd_data))) {
+ link->psr_feature_enabled = dpcd_data[0] ? true:false;
+ DRM_INFO("PSR support:%d\n", link->psr_feature_enabled);
+ }
+}
+
+/*
+ * amdgpu_dm_link_setup_psr() - configure psr link
+ * @stream: stream state
+ *
+ * Return: true if success
+ */
+static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
+{
+ struct dc_link *link = NULL;
+ struct psr_config psr_config = {0};
+ struct psr_context psr_context = {0};
+ struct dc *dc = NULL;
+ bool ret = false;
+
+ if (stream == NULL)
+ return false;
+
+ link = stream->link;
+ dc = link->ctx->dc;
+
+ psr_config.psr_version = dc->res_pool->dmcu->dmcu_version.psr_version;
+
+ if (psr_config.psr_version > 0) {
+ psr_config.psr_exit_link_training_required = 0x1;
+ psr_config.psr_frame_capture_indication_req = 0;
+ psr_config.psr_rfb_setup_time = 0x37;
+ psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
+ psr_config.allow_smu_optimizations = 0x0;
+
+ ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
+
+ }
+ DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_feature_enabled);
+
+ return ret;
+}
+
+/*
+ * amdgpu_dm_psr_enable() - enable psr f/w
+ * @stream: stream state
+ *
+ * Return: true if success
+ */
+bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
+{
+ struct dc_link *link = stream->link;
+ unsigned int vsync_rate_hz = 0;
+ struct dc_static_screen_params params = {0};
+ /* Calculate number of static frames before generating interrupt to
+ * enter PSR.
+ */
+ // Init fail safe of 2 frames static
+ unsigned int num_frames_static = 2;
+
+ DRM_DEBUG_DRIVER("Enabling psr...\n");
+
+ vsync_rate_hz = div64_u64(div64_u64((
+ stream->timing.pix_clk_100hz * 100),
+ stream->timing.v_total),
+ stream->timing.h_total);
+
+ /* Round up
+ * Calculate number of frames such that at least 30 ms of time has
+ * passed.
+ */
+ if (vsync_rate_hz != 0) {
+ unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
+ num_frames_static = (30000 / frame_time_microsec) + 1;
+ }
+
+ params.triggers.cursor_update = true;
+ params.triggers.overlay_update = true;
+ params.triggers.surface_update = true;
+ params.num_frames = num_frames_static;
+
+ dc_stream_set_static_screen_params(link->ctx->dc,
+ &stream, 1,
+ &params);
+
+ return dc_link_set_psr_allow_active(link, true, false);
+}
+
+/*
+ * amdgpu_dm_psr_disable() - disable psr f/w
+ * @stream: stream state
+ *
+ * Return: true if success
+ */
+static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
+{
+
+ DRM_DEBUG_DRIVER("Disabling psr...\n");
+
+ return dc_link_set_psr_allow_active(stream->link, false, true);
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index b89cbbfcc0e9..7ea9acb0358d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -50,12 +50,15 @@
#include "irq_types.h"
#include "signal_types.h"
+#include "amdgpu_dm_crc.h"
/* Forward declarations */
struct amdgpu_device;
struct drm_device;
struct amdgpu_dm_irq_handler_data;
struct dc;
+struct amdgpu_bo;
+struct dmub_srv;
struct common_irq_params {
struct amdgpu_device *adev;
@@ -107,6 +110,12 @@ struct amdgpu_dm_backlight_caps {
* @display_indexes_num: Max number of display streams supported
* @irq_handler_list_table_lock: Synchronizes access to IRQ tables
* @backlight_dev: Backlight control device
+ * @backlight_link: Link on which to control backlight
+ * @backlight_caps: Capabilities of the backlight device
+ * @freesync_module: Module handling freesync calculations
+ * @fw_dmcu: Reference to DMCU firmware
+ * @dmcu_fw_version: Version of the DMCU firmware
+ * @soc_bounding_box: SOC bounding box values provided by gpu_info FW
* @cached_state: Caches device atomic state for suspend/resume
* @compressor: Frame buffer compression buffer. See &struct dm_comressor_info
*/
@@ -115,6 +124,57 @@ struct amdgpu_display_manager {
struct dc *dc;
/**
+ * @dmub_srv:
+ *
+ * DMUB service, used for controlling the DMUB on hardware
+ * that supports it. The pointer to the dmub_srv will be
+ * NULL on hardware that does not support it.
+ */
+ struct dmub_srv *dmub_srv;
+
+ /**
+ * @dmub_fb_info:
+ *
+ * Framebuffer regions for the DMUB.
+ */
+ struct dmub_srv_fb_info *dmub_fb_info;
+
+ /**
+ * @dmub_fw:
+ *
+ * DMUB firmware, required on hardware that has DMUB support.
+ */
+ const struct firmware *dmub_fw;
+
+ /**
+ * @dmub_bo:
+ *
+ * Buffer object for the DMUB.
+ */
+ struct amdgpu_bo *dmub_bo;
+
+ /**
+ * @dmub_bo_gpu_addr:
+ *
+ * GPU virtual address for the DMUB buffer object.
+ */
+ u64 dmub_bo_gpu_addr;
+
+ /**
+ * @dmub_bo_cpu_addr:
+ *
+ * CPU address for the DMUB buffer object.
+ */
+ void *dmub_bo_cpu_addr;
+
+ /**
+ * @dmcub_fw_version:
+ *
+ * DMCUB firmware version.
+ */
+ uint32_t dmcub_fw_version;
+
+ /**
* @cgs_device:
*
* The Common Graphics Services device. It provides an interface for
@@ -127,7 +187,7 @@ struct amdgpu_display_manager {
u16 display_indexes_num;
/**
- * @atomic_obj
+ * @atomic_obj:
*
* In combination with &dm_atomic_state it helps manage
* global atomic state that doesn't map cleanly into existing
@@ -224,6 +284,9 @@ struct amdgpu_display_manager {
struct amdgpu_dm_backlight_caps backlight_caps;
struct mod_freesync *freesync_module;
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ struct hdcp_workqueue *hdcp_workqueue;
+#endif
struct drm_atomic_state *cached_state;
@@ -231,13 +294,13 @@ struct amdgpu_display_manager {
const struct firmware *fw_dmcu;
uint32_t dmcu_fw_version;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
/**
+ * @soc_bounding_box:
+ *
* gpu_info FW provided soc bounding box struct or 0 if not
* available in FW
*/
const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box;
-#endif
};
struct amdgpu_dm_connector {
@@ -267,6 +330,7 @@ struct amdgpu_dm_connector {
struct drm_dp_mst_port *port;
struct amdgpu_dm_connector *mst_port;
struct amdgpu_encoder *mst_encoder;
+ struct drm_dp_aux *dsc_aux;
/* TODO see if we can merge with ddc_bus or make a dm_connector */
struct amdgpu_i2c_adapter *i2c;
@@ -286,6 +350,7 @@ struct amdgpu_dm_connector {
uint32_t debugfs_dpcd_address;
uint32_t debugfs_dpcd_size;
#endif
+ bool force_yuv420_output;
};
#define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base)
@@ -309,11 +374,12 @@ struct dm_crtc_state {
bool cm_has_degamma;
bool cm_is_degamma_srgb;
+ int update_type;
int active_planes;
bool interrupts_enabled;
int crc_skip_count;
- bool crc_enabled;
+ enum amdgpu_dm_pipe_crc_source crc_src;
bool freesync_timing_changed;
bool freesync_vrr_info_changed;
@@ -345,6 +411,8 @@ struct dm_connector_state {
bool underscan_enable;
bool freesync_capable;
uint8_t abm_level;
+ int vcpi_slots;
+ uint64_t pbn;
};
#define to_dm_connector_state(x)\
@@ -380,19 +448,6 @@ void dm_restore_drm_connector_state(struct drm_device *dev,
void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
struct edid *edid);
-/* amdgpu_dm_crc.c */
-#ifdef CONFIG_DEBUG_FS
-int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name);
-int amdgpu_dm_crtc_verify_crc_source(struct drm_crtc *crtc,
- const char *src_name,
- size_t *values_cnt);
-void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc);
-#else
-#define amdgpu_dm_crtc_set_crc_source NULL
-#define amdgpu_dm_crtc_verify_crc_source NULL
-#define amdgpu_dm_crtc_handle_crc_irq(x)
-#endif
-
#define MAX_COLOR_LUT_ENTRIES 4096
/* Legacy gamm LUT users such as X doesn't like large LUT sizes */
#define MAX_COLOR_LEGACY_LUT_ENTRIES 256
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
index b43bb7f90e4e..2233d293a707 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
@@ -210,6 +210,8 @@ static int __set_legacy_tf(struct dc_transfer_func *func,
res = mod_color_calculate_regamma_params(func, gamma, true, has_rom,
NULL);
+ dc_gamma_release(&gamma);
+
return res ? 0 : -ENOMEM;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
index bc67e6502733..eaad9099bc0b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
@@ -30,23 +30,57 @@
#include "amdgpu_dm.h"
#include "dc.h"
-enum amdgpu_dm_pipe_crc_source {
- AMDGPU_DM_PIPE_CRC_SOURCE_NONE = 0,
- AMDGPU_DM_PIPE_CRC_SOURCE_AUTO,
- AMDGPU_DM_PIPE_CRC_SOURCE_MAX,
- AMDGPU_DM_PIPE_CRC_SOURCE_INVALID = -1,
+static const char *const pipe_crc_sources[] = {
+ "none",
+ "crtc",
+ "crtc dither",
+ "dprx",
+ "dprx dither",
+ "auto",
};
static enum amdgpu_dm_pipe_crc_source dm_parse_crc_source(const char *source)
{
if (!source || !strcmp(source, "none"))
return AMDGPU_DM_PIPE_CRC_SOURCE_NONE;
- if (!strcmp(source, "auto"))
- return AMDGPU_DM_PIPE_CRC_SOURCE_AUTO;
+ if (!strcmp(source, "auto") || !strcmp(source, "crtc"))
+ return AMDGPU_DM_PIPE_CRC_SOURCE_CRTC;
+ if (!strcmp(source, "dprx"))
+ return AMDGPU_DM_PIPE_CRC_SOURCE_DPRX;
+ if (!strcmp(source, "crtc dither"))
+ return AMDGPU_DM_PIPE_CRC_SOURCE_CRTC_DITHER;
+ if (!strcmp(source, "dprx dither"))
+ return AMDGPU_DM_PIPE_CRC_SOURCE_DPRX_DITHER;
return AMDGPU_DM_PIPE_CRC_SOURCE_INVALID;
}
+static bool dm_is_crc_source_crtc(enum amdgpu_dm_pipe_crc_source src)
+{
+ return (src == AMDGPU_DM_PIPE_CRC_SOURCE_CRTC) ||
+ (src == AMDGPU_DM_PIPE_CRC_SOURCE_CRTC_DITHER);
+}
+
+static bool dm_is_crc_source_dprx(enum amdgpu_dm_pipe_crc_source src)
+{
+ return (src == AMDGPU_DM_PIPE_CRC_SOURCE_DPRX) ||
+ (src == AMDGPU_DM_PIPE_CRC_SOURCE_DPRX_DITHER);
+}
+
+static bool dm_need_crc_dither(enum amdgpu_dm_pipe_crc_source src)
+{
+ return (src == AMDGPU_DM_PIPE_CRC_SOURCE_CRTC_DITHER) ||
+ (src == AMDGPU_DM_PIPE_CRC_SOURCE_DPRX_DITHER) ||
+ (src == AMDGPU_DM_PIPE_CRC_SOURCE_NONE);
+}
+
+const char *const *amdgpu_dm_crtc_get_crc_sources(struct drm_crtc *crtc,
+ size_t *count)
+{
+ *count = ARRAY_SIZE(pipe_crc_sources);
+ return pipe_crc_sources;
+}
+
int
amdgpu_dm_crtc_verify_crc_source(struct drm_crtc *crtc, const char *src_name,
size_t *values_cnt)
@@ -63,14 +97,57 @@ amdgpu_dm_crtc_verify_crc_source(struct drm_crtc *crtc, const char *src_name,
return 0;
}
-int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
+int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
+ struct dm_crtc_state *dm_crtc_state,
+ enum amdgpu_dm_pipe_crc_source source)
{
struct amdgpu_device *adev = crtc->dev->dev_private;
- struct dm_crtc_state *crtc_state = to_dm_crtc_state(crtc->state);
- struct dc_stream_state *stream_state = crtc_state->stream;
- bool enable;
+ struct dc_stream_state *stream_state = dm_crtc_state->stream;
+ bool enable = amdgpu_dm_is_valid_crc_source(source);
+ int ret = 0;
+ /* Configuration will be deferred to stream enable. */
+ if (!stream_state)
+ return 0;
+
+ mutex_lock(&adev->dm.dc_lock);
+
+ /* Enable CRTC CRC generation if necessary. */
+ if (dm_is_crc_source_crtc(source)) {
+ if (!dc_stream_configure_crc(stream_state->ctx->dc,
+ stream_state, enable, enable)) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+ }
+
+ /* Configure dithering */
+ if (!dm_need_crc_dither(source)) {
+ dc_stream_set_dither_option(stream_state, DITHER_OPTION_TRUN8);
+ dc_stream_set_dyn_expansion(stream_state->ctx->dc, stream_state,
+ DYN_EXPANSION_DISABLE);
+ } else {
+ dc_stream_set_dither_option(stream_state,
+ DITHER_OPTION_DEFAULT);
+ dc_stream_set_dyn_expansion(stream_state->ctx->dc, stream_state,
+ DYN_EXPANSION_AUTO);
+ }
+
+unlock:
+ mutex_unlock(&adev->dm.dc_lock);
+
+ return ret;
+}
+
+int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
+{
enum amdgpu_dm_pipe_crc_source source = dm_parse_crc_source(src_name);
+ struct drm_crtc_commit *commit;
+ struct dm_crtc_state *crtc_state;
+ struct drm_dp_aux *aux = NULL;
+ bool enable = false;
+ bool enabled = false;
+ int ret = 0;
if (source < 0) {
DRM_DEBUG_DRIVER("Unknown CRC source %s for CRTC%d\n",
@@ -78,41 +155,124 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
return -EINVAL;
}
- if (!stream_state) {
- DRM_ERROR("No stream state for CRTC%d\n", crtc->index);
- return -EINVAL;
- }
+ ret = drm_modeset_lock(&crtc->mutex, NULL);
+ if (ret)
+ return ret;
- enable = (source == AMDGPU_DM_PIPE_CRC_SOURCE_AUTO);
+ spin_lock(&crtc->commit_lock);
+ commit = list_first_entry_or_null(&crtc->commit_list,
+ struct drm_crtc_commit, commit_entry);
+ if (commit)
+ drm_crtc_commit_get(commit);
+ spin_unlock(&crtc->commit_lock);
- mutex_lock(&adev->dm.dc_lock);
- if (!dc_stream_configure_crc(stream_state->ctx->dc, stream_state,
- enable, enable)) {
- mutex_unlock(&adev->dm.dc_lock);
- return -EINVAL;
+ if (commit) {
+ /*
+ * Need to wait for all outstanding programming to complete
+ * in commit tail since it can modify CRC related fields and
+ * hardware state. Since we're holding the CRTC lock we're
+ * guaranteed that no other commit work can be queued off
+ * before we modify the state below.
+ */
+ ret = wait_for_completion_interruptible_timeout(
+ &commit->hw_done, 10 * HZ);
+ if (ret)
+ goto cleanup;
}
- /* When enabling CRC, we should also disable dithering. */
- dc_stream_set_dither_option(stream_state,
- enable ? DITHER_OPTION_TRUN8
- : DITHER_OPTION_DEFAULT);
+ enable = amdgpu_dm_is_valid_crc_source(source);
+ crtc_state = to_dm_crtc_state(crtc->state);
- mutex_unlock(&adev->dm.dc_lock);
+ /*
+ * USER REQ SRC | CURRENT SRC | BEHAVIOR
+ * -----------------------------
+ * None | None | Do nothing
+ * None | CRTC | Disable CRTC CRC, set default to dither
+ * None | DPRX | Disable DPRX CRC, need 'aux', set default to dither
+ * None | CRTC DITHER | Disable CRTC CRC
+ * None | DPRX DITHER | Disable DPRX CRC, need 'aux'
+ * CRTC | XXXX | Enable CRTC CRC, no dither
+ * DPRX | XXXX | Enable DPRX CRC, need 'aux', no dither
+ * CRTC DITHER | XXXX | Enable CRTC CRC, set dither
+ * DPRX DITHER | XXXX | Enable DPRX CRC, need 'aux', set dither
+ */
+ if (dm_is_crc_source_dprx(source) ||
+ (source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE &&
+ dm_is_crc_source_dprx(crtc_state->crc_src))) {
+ struct amdgpu_dm_connector *aconn = NULL;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+
+ drm_connector_list_iter_begin(crtc->dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ if (!connector->state || connector->state->crtc != crtc)
+ continue;
+
+ aconn = to_amdgpu_dm_connector(connector);
+ break;
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ if (!aconn) {
+ DRM_DEBUG_DRIVER("No amd connector matching CRTC-%d\n", crtc->index);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ aux = &aconn->dm_dp_aux.aux;
+
+ if (!aux) {
+ DRM_DEBUG_DRIVER("No dp aux for amd connector\n");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ }
+
+ if (amdgpu_dm_crtc_configure_crc_source(crtc, crtc_state, source)) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
/*
* Reading the CRC requires the vblank interrupt handler to be
* enabled. Keep a reference until CRC capture stops.
*/
- if (!crtc_state->crc_enabled && enable)
- drm_crtc_vblank_get(crtc);
- else if (crtc_state->crc_enabled && !enable)
+ enabled = amdgpu_dm_is_valid_crc_source(crtc_state->crc_src);
+ if (!enabled && enable) {
+ ret = drm_crtc_vblank_get(crtc);
+ if (ret)
+ goto cleanup;
+
+ if (dm_is_crc_source_dprx(source)) {
+ if (drm_dp_start_crc(aux, crtc)) {
+ DRM_DEBUG_DRIVER("dp start crc failed\n");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ }
+ } else if (enabled && !enable) {
drm_crtc_vblank_put(crtc);
+ if (dm_is_crc_source_dprx(source)) {
+ if (drm_dp_stop_crc(aux)) {
+ DRM_DEBUG_DRIVER("dp stop crc failed\n");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ }
+ }
- crtc_state->crc_enabled = enable;
+ crtc_state->crc_src = source;
/* Reset crc_skipped on dm state */
crtc_state->crc_skip_count = 0;
- return 0;
+
+cleanup:
+ if (commit)
+ drm_crtc_commit_put(commit);
+
+ drm_modeset_unlock(&crtc->mutex);
+
+ return ret;
}
/**
@@ -135,7 +295,7 @@ void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc)
stream_state = crtc_state->stream;
/* Early return if CRC capture is not enabled. */
- if (!crtc_state->crc_enabled)
+ if (!amdgpu_dm_is_valid_crc_source(crtc_state->crc_src))
return;
/*
@@ -149,10 +309,12 @@ void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc)
return;
}
- if (!dc_stream_get_crc(stream_state->ctx->dc, stream_state,
- &crcs[0], &crcs[1], &crcs[2]))
- return;
+ if (dm_is_crc_source_crtc(crtc_state->crc_src)) {
+ if (!dc_stream_get_crc(stream_state->ctx->dc, stream_state,
+ &crcs[0], &crcs[1], &crcs[2]))
+ return;
- drm_crtc_add_crc_entry(crtc, true,
- drm_crtc_accurate_vblank_count(crtc), crcs);
+ drm_crtc_add_crc_entry(crtc, true,
+ drm_crtc_accurate_vblank_count(crtc), crcs);
+ }
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
new file mode 100644
index 000000000000..f7d731797d3f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef AMD_DAL_DEV_AMDGPU_DM_AMDGPU_DM_CRC_H_
+#define AMD_DAL_DEV_AMDGPU_DM_AMDGPU_DM_CRC_H_
+
+struct drm_crtc;
+struct dm_crtc_state;
+
+enum amdgpu_dm_pipe_crc_source {
+ AMDGPU_DM_PIPE_CRC_SOURCE_NONE = 0,
+ AMDGPU_DM_PIPE_CRC_SOURCE_CRTC,
+ AMDGPU_DM_PIPE_CRC_SOURCE_CRTC_DITHER,
+ AMDGPU_DM_PIPE_CRC_SOURCE_DPRX,
+ AMDGPU_DM_PIPE_CRC_SOURCE_DPRX_DITHER,
+ AMDGPU_DM_PIPE_CRC_SOURCE_MAX,
+ AMDGPU_DM_PIPE_CRC_SOURCE_INVALID = -1,
+};
+
+static inline bool amdgpu_dm_is_valid_crc_source(enum amdgpu_dm_pipe_crc_source source)
+{
+ return (source > AMDGPU_DM_PIPE_CRC_SOURCE_NONE) &&
+ (source < AMDGPU_DM_PIPE_CRC_SOURCE_MAX);
+}
+
+/* amdgpu_dm_crc.c */
+#ifdef CONFIG_DEBUG_FS
+int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
+ struct dm_crtc_state *dm_crtc_state,
+ enum amdgpu_dm_pipe_crc_source source);
+int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name);
+int amdgpu_dm_crtc_verify_crc_source(struct drm_crtc *crtc,
+ const char *src_name,
+ size_t *values_cnt);
+const char *const *amdgpu_dm_crtc_get_crc_sources(struct drm_crtc *crtc,
+ size_t *count);
+void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc);
+#else
+#define amdgpu_dm_crtc_set_crc_source NULL
+#define amdgpu_dm_crtc_verify_crc_source NULL
+#define amdgpu_dm_crtc_get_crc_sources NULL
+#define amdgpu_dm_crtc_handle_crc_irq(x)
+#endif
+
+#endif /* AMD_DAL_DEV_AMDGPU_DM_AMDGPU_DM_CRC_H_ */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 36a1d794b4af..f81d3439ee8c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -657,6 +657,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
dc_link_set_test_pattern(
link,
test_pattern,
+ DP_TEST_PATTERN_COLOR_SPACE_RGB,
&link_training_settings,
custom_pattern,
10);
@@ -942,6 +943,52 @@ static const struct {
{"aux_dpcd_data", &dp_dpcd_data_debugfs_fops}
};
+/*
+ * Force YUV420 output if available from the given mode
+ */
+static int force_yuv420_output_set(void *data, u64 val)
+{
+ struct amdgpu_dm_connector *connector = data;
+
+ connector->force_yuv420_output = (bool)val;
+
+ return 0;
+}
+
+/*
+ * Check if YUV420 is forced when available from the given mode
+ */
+static int force_yuv420_output_get(void *data, u64 *val)
+{
+ struct amdgpu_dm_connector *connector = data;
+
+ *val = connector->force_yuv420_output;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(force_yuv420_output_fops, force_yuv420_output_get,
+ force_yuv420_output_set, "%llu\n");
+
+/*
+ * Read PSR state
+ */
+static int psr_get(void *data, u64 *val)
+{
+ struct amdgpu_dm_connector *connector = data;
+ struct dc_link *link = connector->dc_link;
+ uint32_t psr_state = 0;
+
+ dc_link_get_psr_state(link, &psr_state);
+
+ *val = psr_state;
+
+ return 0;
+}
+
+
+DEFINE_DEBUGFS_ATTRIBUTE(psr_fops, psr_get, NULL, "%llu\n");
+
void connector_debugfs_init(struct amdgpu_dm_connector *connector)
{
int i;
@@ -955,6 +1002,12 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector)
dp_debugfs_entries[i].fops);
}
}
+ if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
+ debugfs_create_file_unsafe("psr_state", 0444, dir, connector, &psr_fops);
+
+ debugfs_create_file_unsafe("force_yuv420_output", 0644, dir, connector,
+ &force_yuv420_output_fops);
+
}
/*
@@ -1053,9 +1106,33 @@ static int target_backlight_read(struct seq_file *m, void *data)
return 0;
}
+static int mst_topo(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+ struct amdgpu_dm_connector *aconnector;
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
+ continue;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+
+ seq_printf(m, "\nMST topology for connector %d\n", aconnector->connector_id);
+ drm_dp_mst_dump_topology(m, &aconnector->mst_mgr);
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ return 0;
+}
+
static const struct drm_info_list amdgpu_dm_debugfs_list[] = {
{"amdgpu_current_backlight_pwm", &current_backlight_read},
{"amdgpu_target_backlight_pwm", &target_backlight_read},
+ {"amdgpu_mst_topology", &mst_topo},
};
/*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
new file mode 100644
index 000000000000..0acd3409dd6c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -0,0 +1,386 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "amdgpu_dm_hdcp.h"
+#include "amdgpu.h"
+#include "amdgpu_dm.h"
+#include "dm_helpers.h"
+#include <drm/drm_hdcp.h>
+
+static bool
+lp_write_i2c(void *handle, uint32_t address, const uint8_t *data, uint32_t size)
+{
+
+ struct dc_link *link = handle;
+ struct i2c_payload i2c_payloads[] = {{true, address, size, (void *)data} };
+ struct i2c_command cmd = {i2c_payloads, 1, I2C_COMMAND_ENGINE_HW, link->dc->caps.i2c_speed_in_khz};
+
+ return dm_helpers_submit_i2c(link->ctx, link, &cmd);
+}
+
+static bool
+lp_read_i2c(void *handle, uint32_t address, uint8_t offset, uint8_t *data, uint32_t size)
+{
+ struct dc_link *link = handle;
+
+ struct i2c_payload i2c_payloads[] = {{true, address, 1, &offset}, {false, address, size, data} };
+ struct i2c_command cmd = {i2c_payloads, 2, I2C_COMMAND_ENGINE_HW, link->dc->caps.i2c_speed_in_khz};
+
+ return dm_helpers_submit_i2c(link->ctx, link, &cmd);
+}
+
+static bool
+lp_write_dpcd(void *handle, uint32_t address, const uint8_t *data, uint32_t size)
+{
+ struct dc_link *link = handle;
+
+ return dm_helpers_dp_write_dpcd(link->ctx, link, address, data, size);
+}
+
+static bool
+lp_read_dpcd(void *handle, uint32_t address, uint8_t *data, uint32_t size)
+{
+ struct dc_link *link = handle;
+
+ return dm_helpers_dp_read_dpcd(link->ctx, link, address, data, size);
+}
+
+static void process_output(struct hdcp_workqueue *hdcp_work)
+{
+ struct mod_hdcp_output output = hdcp_work->output;
+
+ if (output.callback_stop)
+ cancel_delayed_work(&hdcp_work->callback_dwork);
+
+ if (output.callback_needed)
+ schedule_delayed_work(&hdcp_work->callback_dwork,
+ msecs_to_jiffies(output.callback_delay));
+
+ if (output.watchdog_timer_stop)
+ cancel_delayed_work(&hdcp_work->watchdog_timer_dwork);
+
+ if (output.watchdog_timer_needed)
+ schedule_delayed_work(&hdcp_work->watchdog_timer_dwork,
+ msecs_to_jiffies(output.watchdog_timer_delay));
+
+ schedule_delayed_work(&hdcp_work->property_validate_dwork, msecs_to_jiffies(0));
+}
+
+void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
+ unsigned int link_index,
+ struct amdgpu_dm_connector *aconnector,
+ uint8_t content_type,
+ bool enable_encryption)
+{
+ struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
+ struct mod_hdcp_display *display = &hdcp_work[link_index].display;
+ struct mod_hdcp_link *link = &hdcp_work[link_index].link;
+ struct mod_hdcp_display_query query;
+
+ mutex_lock(&hdcp_w->mutex);
+ hdcp_w->aconnector = aconnector;
+
+ query.display = NULL;
+ mod_hdcp_query_display(&hdcp_w->hdcp, aconnector->base.index, &query);
+
+ if (query.display != NULL) {
+ memcpy(display, query.display, sizeof(struct mod_hdcp_display));
+ mod_hdcp_remove_display(&hdcp_w->hdcp, aconnector->base.index, &hdcp_w->output);
+
+ hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0;
+
+ if (enable_encryption) {
+ display->adjust.disable = 0;
+ if (content_type == DRM_MODE_HDCP_CONTENT_TYPE0)
+ hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0;
+ else if (content_type == DRM_MODE_HDCP_CONTENT_TYPE1)
+ hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_1;
+
+ schedule_delayed_work(&hdcp_w->property_validate_dwork,
+ msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS));
+ } else {
+ display->adjust.disable = 1;
+ hdcp_w->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
+ cancel_delayed_work(&hdcp_w->property_validate_dwork);
+ }
+
+ display->state = MOD_HDCP_DISPLAY_ACTIVE;
+ }
+
+ mod_hdcp_add_display(&hdcp_w->hdcp, link, display, &hdcp_w->output);
+
+ process_output(hdcp_w);
+ mutex_unlock(&hdcp_w->mutex);
+}
+
+static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work,
+ unsigned int link_index,
+ struct amdgpu_dm_connector *aconnector)
+{
+ struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
+
+ mutex_lock(&hdcp_w->mutex);
+ hdcp_w->aconnector = aconnector;
+
+ mod_hdcp_remove_display(&hdcp_w->hdcp, aconnector->base.index, &hdcp_w->output);
+
+ process_output(hdcp_w);
+ mutex_unlock(&hdcp_w->mutex);
+}
+void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index)
+{
+ struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
+
+ mutex_lock(&hdcp_w->mutex);
+
+ mod_hdcp_reset_connection(&hdcp_w->hdcp, &hdcp_w->output);
+
+ cancel_delayed_work(&hdcp_w->property_validate_dwork);
+ hdcp_w->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
+
+ process_output(hdcp_w);
+
+ mutex_unlock(&hdcp_w->mutex);
+}
+
+void hdcp_handle_cpirq(struct hdcp_workqueue *hdcp_work, unsigned int link_index)
+{
+ struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
+
+ schedule_work(&hdcp_w->cpirq_work);
+}
+
+
+
+
+static void event_callback(struct work_struct *work)
+{
+ struct hdcp_workqueue *hdcp_work;
+
+ hdcp_work = container_of(to_delayed_work(work), struct hdcp_workqueue,
+ callback_dwork);
+
+ mutex_lock(&hdcp_work->mutex);
+
+ cancel_delayed_work(&hdcp_work->watchdog_timer_dwork);
+
+ mod_hdcp_process_event(&hdcp_work->hdcp, MOD_HDCP_EVENT_CALLBACK,
+ &hdcp_work->output);
+
+ process_output(hdcp_work);
+
+ mutex_unlock(&hdcp_work->mutex);
+
+
+}
+static void event_property_update(struct work_struct *work)
+{
+
+ struct hdcp_workqueue *hdcp_work = container_of(work, struct hdcp_workqueue, property_update_work);
+ struct amdgpu_dm_connector *aconnector = hdcp_work->aconnector;
+ struct drm_device *dev = hdcp_work->aconnector->base.dev;
+ long ret;
+
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ mutex_lock(&hdcp_work->mutex);
+
+
+ if (aconnector->base.state->commit) {
+ ret = wait_for_completion_interruptible_timeout(&aconnector->base.state->commit->hw_done, 10 * HZ);
+
+ if (ret == 0) {
+ DRM_ERROR("HDCP state unknown! Setting it to DESIRED");
+ hdcp_work->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
+ }
+ }
+
+ if (hdcp_work->encryption_status != MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF) {
+ if (aconnector->base.state->hdcp_content_type == DRM_MODE_HDCP_CONTENT_TYPE0 &&
+ hdcp_work->encryption_status <= MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON)
+ drm_hdcp_update_content_protection(&aconnector->base, DRM_MODE_CONTENT_PROTECTION_ENABLED);
+ else if (aconnector->base.state->hdcp_content_type == DRM_MODE_HDCP_CONTENT_TYPE1 &&
+ hdcp_work->encryption_status == MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON)
+ drm_hdcp_update_content_protection(&aconnector->base, DRM_MODE_CONTENT_PROTECTION_ENABLED);
+ } else {
+ drm_hdcp_update_content_protection(&aconnector->base, DRM_MODE_CONTENT_PROTECTION_DESIRED);
+ }
+
+
+ mutex_unlock(&hdcp_work->mutex);
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+}
+
+static void event_property_validate(struct work_struct *work)
+{
+ struct hdcp_workqueue *hdcp_work =
+ container_of(to_delayed_work(work), struct hdcp_workqueue, property_validate_dwork);
+ struct mod_hdcp_display_query query;
+ struct amdgpu_dm_connector *aconnector = hdcp_work->aconnector;
+
+ if (!aconnector)
+ return;
+
+ mutex_lock(&hdcp_work->mutex);
+
+ query.encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
+ mod_hdcp_query_display(&hdcp_work->hdcp, aconnector->base.index, &query);
+
+ if (query.encryption_status != hdcp_work->encryption_status) {
+ hdcp_work->encryption_status = query.encryption_status;
+ schedule_work(&hdcp_work->property_update_work);
+ }
+
+ mutex_unlock(&hdcp_work->mutex);
+}
+
+static void event_watchdog_timer(struct work_struct *work)
+{
+ struct hdcp_workqueue *hdcp_work;
+
+ hdcp_work = container_of(to_delayed_work(work),
+ struct hdcp_workqueue,
+ watchdog_timer_dwork);
+
+ mutex_lock(&hdcp_work->mutex);
+
+ mod_hdcp_process_event(&hdcp_work->hdcp,
+ MOD_HDCP_EVENT_WATCHDOG_TIMEOUT,
+ &hdcp_work->output);
+
+ process_output(hdcp_work);
+
+ mutex_unlock(&hdcp_work->mutex);
+
+}
+
+static void event_cpirq(struct work_struct *work)
+{
+ struct hdcp_workqueue *hdcp_work;
+
+ hdcp_work = container_of(work, struct hdcp_workqueue, cpirq_work);
+
+ mutex_lock(&hdcp_work->mutex);
+
+ mod_hdcp_process_event(&hdcp_work->hdcp, MOD_HDCP_EVENT_CPIRQ, &hdcp_work->output);
+
+ process_output(hdcp_work);
+
+ mutex_unlock(&hdcp_work->mutex);
+
+}
+
+
+void hdcp_destroy(struct hdcp_workqueue *hdcp_work)
+{
+ int i = 0;
+
+ for (i = 0; i < hdcp_work->max_link; i++) {
+ cancel_delayed_work_sync(&hdcp_work[i].callback_dwork);
+ cancel_delayed_work_sync(&hdcp_work[i].watchdog_timer_dwork);
+ }
+
+ kfree(hdcp_work);
+
+}
+
+static void update_config(void *handle, struct cp_psp_stream_config *config)
+{
+ struct hdcp_workqueue *hdcp_work = handle;
+ struct amdgpu_dm_connector *aconnector = config->dm_stream_ctx;
+ int link_index = aconnector->dc_link->link_index;
+ struct mod_hdcp_display *display = &hdcp_work[link_index].display;
+ struct mod_hdcp_link *link = &hdcp_work[link_index].link;
+
+ memset(display, 0, sizeof(*display));
+ memset(link, 0, sizeof(*link));
+
+ display->index = aconnector->base.index;
+
+ if (config->dpms_off) {
+ hdcp_remove_display(hdcp_work, link_index, aconnector);
+ return;
+ }
+ display->state = MOD_HDCP_DISPLAY_ACTIVE;
+
+ if (aconnector->dc_sink != NULL)
+ link->mode = mod_hdcp_signal_type_to_operation_mode(aconnector->dc_sink->sink_signal);
+
+ display->controller = CONTROLLER_ID_D0 + config->otg_inst;
+ display->dig_fe = config->stream_enc_inst;
+ link->dig_be = config->link_enc_inst;
+ link->ddc_line = aconnector->dc_link->ddc_hw_inst + 1;
+ link->dp.rev = aconnector->dc_link->dpcd_caps.dpcd_rev.raw;
+ display->adjust.disable = 1;
+ link->adjust.auth_delay = 2;
+
+ hdcp_update_display(hdcp_work, link_index, aconnector, DRM_MODE_HDCP_CONTENT_TYPE0, false);
+}
+
+struct hdcp_workqueue *hdcp_create_workqueue(void *psp_context, struct cp_psp *cp_psp, struct dc *dc)
+{
+
+ int max_caps = dc->caps.max_links;
+ struct hdcp_workqueue *hdcp_work = kzalloc(max_caps*sizeof(*hdcp_work), GFP_KERNEL);
+ int i = 0;
+
+ if (hdcp_work == NULL)
+ goto fail_alloc_context;
+
+ hdcp_work->max_link = max_caps;
+
+ for (i = 0; i < max_caps; i++) {
+
+ mutex_init(&hdcp_work[i].mutex);
+
+ INIT_WORK(&hdcp_work[i].cpirq_work, event_cpirq);
+ INIT_WORK(&hdcp_work[i].property_update_work, event_property_update);
+ INIT_DELAYED_WORK(&hdcp_work[i].callback_dwork, event_callback);
+ INIT_DELAYED_WORK(&hdcp_work[i].watchdog_timer_dwork, event_watchdog_timer);
+ INIT_DELAYED_WORK(&hdcp_work[i].property_validate_dwork, event_property_validate);
+
+ hdcp_work[i].hdcp.config.psp.handle = psp_context;
+ hdcp_work[i].hdcp.config.ddc.handle = dc_get_link_at_index(dc, i);
+ hdcp_work[i].hdcp.config.ddc.funcs.write_i2c = lp_write_i2c;
+ hdcp_work[i].hdcp.config.ddc.funcs.read_i2c = lp_read_i2c;
+ hdcp_work[i].hdcp.config.ddc.funcs.write_dpcd = lp_write_dpcd;
+ hdcp_work[i].hdcp.config.ddc.funcs.read_dpcd = lp_read_dpcd;
+ }
+
+ cp_psp->funcs.update_stream_config = update_config;
+ cp_psp->handle = hdcp_work;
+
+ return hdcp_work;
+
+fail_alloc_context:
+ kfree(hdcp_work);
+
+ return NULL;
+
+
+
+}
+
+
+
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
new file mode 100644
index 000000000000..6abde86bce4a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef AMDGPU_DM_AMDGPU_DM_HDCP_H_
+#define AMDGPU_DM_AMDGPU_DM_HDCP_H_
+
+#include "mod_hdcp.h"
+#include "hdcp.h"
+#include "dc.h"
+#include "dm_cp_psp.h"
+
+struct mod_hdcp;
+struct mod_hdcp_link;
+struct mod_hdcp_display;
+struct cp_psp;
+
+struct hdcp_workqueue {
+ struct work_struct cpirq_work;
+ struct work_struct property_update_work;
+ struct delayed_work callback_dwork;
+ struct delayed_work watchdog_timer_dwork;
+ struct delayed_work property_validate_dwork;
+ struct amdgpu_dm_connector *aconnector;
+ struct mutex mutex;
+
+ struct mod_hdcp hdcp;
+ struct mod_hdcp_output output;
+ struct mod_hdcp_display display;
+ struct mod_hdcp_link link;
+
+ enum mod_hdcp_encryption_status encryption_status;
+ uint8_t max_link;
+};
+
+void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
+ unsigned int link_index,
+ struct amdgpu_dm_connector *aconnector,
+ uint8_t content_type,
+ bool enable_encryption);
+
+void hdcp_reset_display(struct hdcp_workqueue *work, unsigned int link_index);
+void hdcp_handle_cpirq(struct hdcp_workqueue *work, unsigned int link_index);
+void hdcp_destroy(struct hdcp_workqueue *work);
+
+struct hdcp_workqueue *hdcp_create_workqueue(void *psp_context, struct cp_psp *cp_psp, struct dc *dc);
+
+#endif /* AMDGPU_DM_AMDGPU_DM_HDCP_H_ */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index a0ed0154a9f0..318b474ff20e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -37,6 +37,7 @@
#include "dc.h"
#include "amdgpu_dm.h"
#include "amdgpu_dm_irq.h"
+#include "amdgpu_dm_mst_types.h"
#include "dm_helpers.h"
@@ -97,11 +98,8 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
(struct edid *) edid->raw_edid);
sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads);
- if (sad_count <= 0) {
- DRM_INFO("SADs count is: %d, don't need to read it\n",
- sad_count);
+ if (sad_count <= 0)
return result;
- }
edid_caps->audio_mode_count = sad_count < DC_MAX_AUDIO_DESC_COUNT ? sad_count : DC_MAX_AUDIO_DESC_COUNT;
for (i = 0; i < edid_caps->audio_mode_count; ++i) {
@@ -183,19 +181,22 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
bool enable)
{
struct amdgpu_dm_connector *aconnector;
+ struct dm_connector_state *dm_conn_state;
struct drm_dp_mst_topology_mgr *mst_mgr;
struct drm_dp_mst_port *mst_port;
- int slots = 0;
bool ret;
- int clock;
- int bpp = 0;
- int pbn = 0;
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
+ /* Accessing the connector state is required for vcpi_slots allocation
+ * and directly relies on behaviour in commit check
+ * that blocks before commit guaranteeing that the state
+ * is not gonna be swapped while still in use in commit tail */
if (!aconnector || !aconnector->mst_port)
return false;
+ dm_conn_state = to_dm_connector_state(aconnector->base.state);
+
mst_mgr = &aconnector->mst_port->mst_mgr;
if (!mst_mgr->mst_state)
@@ -204,42 +205,10 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
mst_port = aconnector->port;
if (enable) {
- clock = stream->timing.pix_clk_100hz / 10;
-
- switch (stream->timing.display_color_depth) {
-
- case COLOR_DEPTH_666:
- bpp = 6;
- break;
- case COLOR_DEPTH_888:
- bpp = 8;
- break;
- case COLOR_DEPTH_101010:
- bpp = 10;
- break;
- case COLOR_DEPTH_121212:
- bpp = 12;
- break;
- case COLOR_DEPTH_141414:
- bpp = 14;
- break;
- case COLOR_DEPTH_161616:
- bpp = 16;
- break;
- default:
- ASSERT(bpp != 0);
- break;
- }
-
- bpp = bpp * 3;
-
- /* TODO need to know link rate */
-
- pbn = drm_dp_calc_pbn_mode(clock, bpp);
-
- slots = drm_dp_find_vcpi_slots(mst_mgr, pbn);
- ret = drm_dp_mst_allocate_vcpi(mst_mgr, mst_port, pbn, slots);
+ ret = drm_dp_mst_allocate_vcpi(mst_mgr, mst_port,
+ dm_conn_state->pbn,
+ dm_conn_state->vcpi_slots);
if (!ret)
return false;
@@ -247,7 +216,8 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
drm_dp_mst_reset_vcpi_slots(mst_mgr, mst_port);
}
- ret = drm_dp_update_payload_part1(mst_mgr);
+ /* It's OK for this to fail */
+ drm_dp_update_payload_part1(mst_mgr);
/* mst_mgr->->payloads are VC payload notify MST branch using DPCD or
* AUX message. The sequence is slot 1-63 allocated sequence for each
@@ -256,9 +226,6 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
get_payload_table(aconnector, proposed_table);
- if (ret)
- return false;
-
return true;
}
@@ -282,7 +249,7 @@ void dm_helpers_dp_mst_clear_payload_allocation_table(
* Polls for ACT (allocation change trigger) handled and sends
* ALLOCATE_PAYLOAD message.
*/
-bool dm_helpers_dp_mst_poll_for_allocation_change_trigger(
+enum act_return_status dm_helpers_dp_mst_poll_for_allocation_change_trigger(
struct dc_context *ctx,
const struct dc_stream_state *stream)
{
@@ -293,19 +260,19 @@ bool dm_helpers_dp_mst_poll_for_allocation_change_trigger(
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
if (!aconnector || !aconnector->mst_port)
- return false;
+ return ACT_FAILED;
mst_mgr = &aconnector->mst_port->mst_mgr;
if (!mst_mgr->mst_state)
- return false;
+ return ACT_FAILED;
ret = drm_dp_check_act_status(mst_mgr);
if (ret)
- return false;
+ return ACT_FAILED;
- return true;
+ return ACT_SUCCESS;
}
bool dm_helpers_dp_mst_send_payload_allocation(
@@ -316,7 +283,6 @@ bool dm_helpers_dp_mst_send_payload_allocation(
struct amdgpu_dm_connector *aconnector;
struct drm_dp_mst_topology_mgr *mst_mgr;
struct drm_dp_mst_port *mst_port;
- int ret;
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
@@ -330,10 +296,8 @@ bool dm_helpers_dp_mst_send_payload_allocation(
if (!mst_mgr->mst_state)
return false;
- ret = drm_dp_update_payload_part2(mst_mgr);
-
- if (ret)
- return false;
+ /* It's OK for this to fail */
+ drm_dp_update_payload_part2(mst_mgr);
if (!enable)
drm_dp_mst_deallocate_vcpi(mst_mgr, mst_port);
@@ -541,16 +505,32 @@ bool dm_helpers_submit_i2c(
return result;
}
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
bool dm_helpers_dp_write_dsc_enable(
struct dc_context *ctx,
const struct dc_stream_state *stream,
bool enable
)
{
+ uint8_t enable_dsc = enable ? 1 : 0;
+ struct amdgpu_dm_connector *aconnector;
+
+ if (!stream)
+ return false;
+
+ if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
+
+ if (!aconnector->dsc_aux)
+ return false;
+
+ return (drm_dp_dpcd_write(aconnector->dsc_aux, DP_DSC_ENABLE, &enable_dsc, 1) >= 0);
+ }
+
+ if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT)
+ return dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1);
+
return false;
}
-#endif
bool dm_helpers_is_dp_sink_present(struct dc_link *link)
{
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index fa5d503d379c..cbcf504f73a5 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -111,17 +111,12 @@ static void init_handler_common_data(struct amdgpu_dm_irq_handler_data *hcd,
*/
static void dm_irq_work_func(struct work_struct *work)
{
- struct list_head *entry;
struct irq_list_head *irq_list_head =
container_of(work, struct irq_list_head, work);
struct list_head *handler_list = &irq_list_head->head;
struct amdgpu_dm_irq_handler_data *handler_data;
- list_for_each(entry, handler_list) {
- handler_data = list_entry(entry,
- struct amdgpu_dm_irq_handler_data,
- list);
-
+ list_for_each_entry(handler_data, handler_list, list) {
DRM_DEBUG_KMS("DM_IRQ: work_func: for dal_src=%d\n",
handler_data->irq_source);
@@ -528,19 +523,13 @@ static void amdgpu_dm_irq_immediate_work(struct amdgpu_device *adev,
enum dc_irq_source irq_source)
{
struct amdgpu_dm_irq_handler_data *handler_data;
- struct list_head *entry;
unsigned long irq_table_flags;
DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
- list_for_each(
- entry,
- &adev->dm.irq_handler_list_high_tab[irq_source]) {
-
- handler_data = list_entry(entry,
- struct amdgpu_dm_irq_handler_data,
- list);
-
+ list_for_each_entry(handler_data,
+ &adev->dm.irq_handler_list_high_tab[irq_source],
+ list) {
/* Call a subcomponent which registered for immediate
* interrupt notification */
handler_data->handler(handler_data->handler_arg);
@@ -732,8 +721,10 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_dm_connector *amdgpu_dm_connector =
to_amdgpu_dm_connector(connector);
@@ -751,6 +742,7 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
true);
}
}
+ drm_connector_list_iter_end(&iter);
}
/**
@@ -765,8 +757,10 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_dm_connector *amdgpu_dm_connector =
to_amdgpu_dm_connector(connector);
const struct dc_link *dc_link = amdgpu_dm_connector->dc_link;
@@ -779,4 +773,5 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
false);
}
}
+ drm_connector_list_iter_end(&iter);
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 6e205ee36ac3..5672f7765919 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -25,6 +25,7 @@
#include <linux/version.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_dp_mst_helper.h>
#include "dm_services.h"
#include "amdgpu.h"
#include "amdgpu_dm.h"
@@ -36,6 +37,14 @@
#include "dc_link_ddc.h"
#include "i2caux_interface.h"
+#if defined(CONFIG_DEBUG_FS)
+#include "amdgpu_dm_debugfs.h"
+#endif
+
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+#include "dc/dcn20/dcn20_resource.h"
+#endif
/* #define TRACE_DPCD */
@@ -113,6 +122,7 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
result = -EIO;
break;
case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
+ case AUX_CHANNEL_OPERATION_FAILED_ENGINE_ACQUIRE:
result = -EBUSY;
break;
case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
@@ -123,31 +133,14 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
return result;
}
-static enum drm_connector_status
-dm_dp_mst_detect(struct drm_connector *connector, bool force)
-{
- struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
- struct amdgpu_dm_connector *master = aconnector->mst_port;
-
- enum drm_connector_status status =
- drm_dp_mst_detect_port(
- connector,
- &master->mst_mgr,
- aconnector->port);
-
- return status;
-}
-
static void
dm_dp_mst_connector_destroy(struct drm_connector *connector)
{
struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
struct amdgpu_encoder *amdgpu_encoder = amdgpu_dm_connector->mst_encoder;
- if (amdgpu_dm_connector->edid) {
- kfree(amdgpu_dm_connector->edid);
- amdgpu_dm_connector->edid = NULL;
- }
+ kfree(amdgpu_dm_connector->edid);
+ amdgpu_dm_connector->edid = NULL;
drm_encoder_cleanup(&amdgpu_encoder->base);
kfree(amdgpu_encoder);
@@ -156,17 +149,68 @@ dm_dp_mst_connector_destroy(struct drm_connector *connector)
kfree(amdgpu_dm_connector);
}
+static int
+amdgpu_dm_mst_connector_late_register(struct drm_connector *connector)
+{
+ struct amdgpu_dm_connector *amdgpu_dm_connector =
+ to_amdgpu_dm_connector(connector);
+ struct drm_dp_mst_port *port = amdgpu_dm_connector->port;
+
+#if defined(CONFIG_DEBUG_FS)
+ connector_debugfs_init(amdgpu_dm_connector);
+ amdgpu_dm_connector->debugfs_dpcd_address = 0;
+ amdgpu_dm_connector->debugfs_dpcd_size = 0;
+#endif
+
+ return drm_dp_mst_connector_late_register(connector, port);
+}
+
+static void
+amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector)
+{
+ struct amdgpu_dm_connector *amdgpu_dm_connector =
+ to_amdgpu_dm_connector(connector);
+ struct drm_dp_mst_port *port = amdgpu_dm_connector->port;
+
+ drm_dp_mst_connector_early_unregister(connector, port);
+}
+
static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
- .detect = dm_dp_mst_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = dm_dp_mst_connector_destroy,
.reset = amdgpu_dm_connector_funcs_reset,
.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
- .atomic_get_property = amdgpu_dm_connector_atomic_get_property
+ .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
+ .late_register = amdgpu_dm_mst_connector_late_register,
+ .early_unregister = amdgpu_dm_mst_connector_early_unregister,
};
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnector)
+{
+ struct dc_sink *dc_sink = aconnector->dc_sink;
+ struct drm_dp_mst_port *port = aconnector->port;
+ u8 dsc_caps[16] = { 0 };
+
+ aconnector->dsc_aux = drm_dp_mst_dsc_aux_for_port(port);
+
+ if (!aconnector->dsc_aux)
+ return false;
+
+ if (drm_dp_dpcd_read(aconnector->dsc_aux, DP_DSC_SUPPORT, dsc_caps, 16) < 0)
+ return false;
+
+ if (!dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
+ dsc_caps, NULL,
+ &dc_sink->sink_dsc_caps.dsc_dec_caps))
+ return false;
+
+ return true;
+}
+#endif
+
static int dm_dp_mst_get_modes(struct drm_connector *connector)
{
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
@@ -209,10 +253,16 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
/* dc_link_add_remote_sink returns a new reference */
aconnector->dc_sink = dc_sink;
- if (aconnector->dc_sink)
+ if (aconnector->dc_sink) {
amdgpu_dm_update_freesync_caps(
connector, aconnector->edid);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (!validate_dsc_caps_on_connector(aconnector))
+ memset(&aconnector->dc_sink->sink_dsc_caps,
+ 0, sizeof(aconnector->dc_sink->sink_dsc_caps));
+#endif
+ }
}
drm_connector_update_edid_property(
@@ -223,17 +273,61 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
return ret;
}
-static struct drm_encoder *dm_mst_best_encoder(struct drm_connector *connector)
+static struct drm_encoder *
+dm_mst_atomic_best_encoder(struct drm_connector *connector,
+ struct drm_connector_state *connector_state)
{
- struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
+ return &to_amdgpu_dm_connector(connector)->mst_encoder->base;
+}
- return &amdgpu_dm_connector->mst_encoder->base;
+static int
+dm_dp_mst_detect(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx, bool force)
+{
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ struct amdgpu_dm_connector *master = aconnector->mst_port;
+
+ return drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr,
+ aconnector->port);
+}
+
+static int dm_dp_mst_atomic_check(struct drm_connector *connector,
+ struct drm_atomic_state *state)
+{
+ struct drm_connector_state *new_conn_state =
+ drm_atomic_get_new_connector_state(state, connector);
+ struct drm_connector_state *old_conn_state =
+ drm_atomic_get_old_connector_state(state, connector);
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ struct drm_crtc_state *new_crtc_state;
+ struct drm_dp_mst_topology_mgr *mst_mgr;
+ struct drm_dp_mst_port *mst_port;
+
+ mst_port = aconnector->port;
+ mst_mgr = &aconnector->mst_port->mst_mgr;
+
+ if (!old_conn_state->crtc)
+ return 0;
+
+ if (new_conn_state->crtc) {
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
+ if (!new_crtc_state ||
+ !drm_atomic_crtc_needs_modeset(new_crtc_state) ||
+ new_crtc_state->enable)
+ return 0;
+ }
+
+ return drm_dp_atomic_release_vcpi_slots(state,
+ mst_mgr,
+ mst_port);
}
static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs = {
.get_modes = dm_dp_mst_get_modes,
.mode_valid = amdgpu_dm_connector_mode_valid,
- .best_encoder = dm_mst_best_encoder,
+ .atomic_best_encoder = dm_mst_atomic_best_encoder,
+ .detect_ctx = dm_dp_mst_detect,
+ .atomic_check = dm_dp_mst_atomic_check,
};
static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
@@ -388,13 +482,17 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
struct amdgpu_dm_connector *aconnector)
{
aconnector->dm_dp_aux.aux.name = "dmdc";
- aconnector->dm_dp_aux.aux.dev = dm->adev->dev;
+ aconnector->dm_dp_aux.aux.dev = aconnector->base.kdev;
aconnector->dm_dp_aux.aux.transfer = dm_dp_aux_transfer;
aconnector->dm_dp_aux.ddc_service = aconnector->dc_link->ddc;
drm_dp_aux_register(&aconnector->dm_dp_aux.aux);
drm_dp_cec_register_connector(&aconnector->dm_dp_aux.aux,
- aconnector->base.name, dm->adev->dev);
+ &aconnector->base);
+
+ if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
+ return;
+
aconnector->mst_mgr.cbs = &dm_mst_cbs;
drm_dp_mst_topology_mgr_init(
&aconnector->mst_mgr,
@@ -405,3 +503,384 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
aconnector->connector_id);
}
+int dm_mst_get_pbn_divider(struct dc_link *link)
+{
+ if (!link)
+ return 0;
+
+ return dc_link_bandwidth_kbps(link,
+ dc_link_get_link_cap(link)) / (8 * 1000 * 54);
+}
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+
+struct dsc_mst_fairness_params {
+ struct dc_crtc_timing *timing;
+ struct dc_sink *sink;
+ struct dc_dsc_bw_range bw_range;
+ bool compression_possible;
+ struct drm_dp_mst_port *port;
+};
+
+struct dsc_mst_fairness_vars {
+ int pbn;
+ bool dsc_enabled;
+ int bpp_x16;
+};
+
+static int kbps_to_peak_pbn(int kbps)
+{
+ u64 peak_kbps = kbps;
+
+ peak_kbps *= 1006;
+ peak_kbps = div_u64(peak_kbps, 1000);
+ return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000));
+}
+
+static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *params,
+ struct dsc_mst_fairness_vars *vars,
+ int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ memset(&params[i].timing->dsc_cfg, 0, sizeof(params[i].timing->dsc_cfg));
+ if (vars[i].dsc_enabled && dc_dsc_compute_config(
+ params[i].sink->ctx->dc->res_pool->dscs[0],
+ &params[i].sink->sink_dsc_caps.dsc_dec_caps,
+ params[i].sink->ctx->dc->debug.dsc_min_slice_height_override,
+ 0,
+ params[i].timing,
+ &params[i].timing->dsc_cfg)) {
+ params[i].timing->flags.DSC = 1;
+ params[i].timing->dsc_cfg.bits_per_pixel = vars[i].bpp_x16;
+ } else {
+ params[i].timing->flags.DSC = 0;
+ }
+ }
+}
+
+static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn)
+{
+ struct dc_dsc_config dsc_config;
+ u64 kbps;
+
+ kbps = div_u64((u64)pbn * 994 * 8 * 54, 64);
+ dc_dsc_compute_config(
+ param.sink->ctx->dc->res_pool->dscs[0],
+ &param.sink->sink_dsc_caps.dsc_dec_caps,
+ param.sink->ctx->dc->debug.dsc_min_slice_height_override,
+ (int) kbps, param.timing, &dsc_config);
+
+ return dsc_config.bits_per_pixel;
+}
+
+static void increase_dsc_bpp(struct drm_atomic_state *state,
+ struct dc_link *dc_link,
+ struct dsc_mst_fairness_params *params,
+ struct dsc_mst_fairness_vars *vars,
+ int count)
+{
+ int i;
+ bool bpp_increased[MAX_PIPES];
+ int initial_slack[MAX_PIPES];
+ int min_initial_slack;
+ int next_index;
+ int remaining_to_increase = 0;
+ int pbn_per_timeslot;
+ int link_timeslots_used;
+ int fair_pbn_alloc;
+
+ for (i = 0; i < count; i++) {
+ if (vars[i].dsc_enabled) {
+ initial_slack[i] = kbps_to_peak_pbn(params[i].bw_range.max_kbps) - vars[i].pbn;
+ bpp_increased[i] = false;
+ remaining_to_increase += 1;
+ } else {
+ initial_slack[i] = 0;
+ bpp_increased[i] = true;
+ }
+ }
+
+ pbn_per_timeslot = dc_link_bandwidth_kbps(dc_link,
+ dc_link_get_link_cap(dc_link)) / (8 * 1000 * 54);
+
+ while (remaining_to_increase) {
+ next_index = -1;
+ min_initial_slack = -1;
+ for (i = 0; i < count; i++) {
+ if (!bpp_increased[i]) {
+ if (min_initial_slack == -1 || min_initial_slack > initial_slack[i]) {
+ min_initial_slack = initial_slack[i];
+ next_index = i;
+ }
+ }
+ }
+
+ if (next_index == -1)
+ break;
+
+ link_timeslots_used = 0;
+
+ for (i = 0; i < count; i++)
+ link_timeslots_used += DIV_ROUND_UP(vars[i].pbn, pbn_per_timeslot);
+
+ fair_pbn_alloc = (63 - link_timeslots_used) / remaining_to_increase * pbn_per_timeslot;
+
+ if (initial_slack[next_index] > fair_pbn_alloc) {
+ vars[next_index].pbn += fair_pbn_alloc;
+ if (drm_dp_atomic_find_vcpi_slots(state,
+ params[next_index].port->mgr,
+ params[next_index].port,
+ vars[next_index].pbn,
+ dm_mst_get_pbn_divider(dc_link)) < 0)
+ return;
+ if (!drm_dp_mst_atomic_check(state)) {
+ vars[next_index].bpp_x16 = bpp_x16_from_pbn(params[next_index], vars[next_index].pbn);
+ } else {
+ vars[next_index].pbn -= fair_pbn_alloc;
+ if (drm_dp_atomic_find_vcpi_slots(state,
+ params[next_index].port->mgr,
+ params[next_index].port,
+ vars[next_index].pbn,
+ dm_mst_get_pbn_divider(dc_link)) < 0)
+ return;
+ }
+ } else {
+ vars[next_index].pbn += initial_slack[next_index];
+ if (drm_dp_atomic_find_vcpi_slots(state,
+ params[next_index].port->mgr,
+ params[next_index].port,
+ vars[next_index].pbn,
+ dm_mst_get_pbn_divider(dc_link)) < 0)
+ return;
+ if (!drm_dp_mst_atomic_check(state)) {
+ vars[next_index].bpp_x16 = params[next_index].bw_range.max_target_bpp_x16;
+ } else {
+ vars[next_index].pbn -= initial_slack[next_index];
+ if (drm_dp_atomic_find_vcpi_slots(state,
+ params[next_index].port->mgr,
+ params[next_index].port,
+ vars[next_index].pbn,
+ dm_mst_get_pbn_divider(dc_link)) < 0)
+ return;
+ }
+ }
+
+ bpp_increased[next_index] = true;
+ remaining_to_increase--;
+ }
+}
+
+static void try_disable_dsc(struct drm_atomic_state *state,
+ struct dc_link *dc_link,
+ struct dsc_mst_fairness_params *params,
+ struct dsc_mst_fairness_vars *vars,
+ int count)
+{
+ int i;
+ bool tried[MAX_PIPES];
+ int kbps_increase[MAX_PIPES];
+ int max_kbps_increase;
+ int next_index;
+ int remaining_to_try = 0;
+
+ for (i = 0; i < count; i++) {
+ if (vars[i].dsc_enabled && vars[i].bpp_x16 == params[i].bw_range.max_target_bpp_x16) {
+ kbps_increase[i] = params[i].bw_range.stream_kbps - params[i].bw_range.max_kbps;
+ tried[i] = false;
+ remaining_to_try += 1;
+ } else {
+ kbps_increase[i] = 0;
+ tried[i] = true;
+ }
+ }
+
+ while (remaining_to_try) {
+ next_index = -1;
+ max_kbps_increase = -1;
+ for (i = 0; i < count; i++) {
+ if (!tried[i]) {
+ if (max_kbps_increase == -1 || max_kbps_increase < kbps_increase[i]) {
+ max_kbps_increase = kbps_increase[i];
+ next_index = i;
+ }
+ }
+ }
+
+ if (next_index == -1)
+ break;
+
+ vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps);
+ if (drm_dp_atomic_find_vcpi_slots(state,
+ params[next_index].port->mgr,
+ params[next_index].port,
+ vars[next_index].pbn,
+ 0) < 0)
+ return;
+
+ if (!drm_dp_mst_atomic_check(state)) {
+ vars[next_index].dsc_enabled = false;
+ vars[next_index].bpp_x16 = 0;
+ } else {
+ vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps);
+ if (drm_dp_atomic_find_vcpi_slots(state,
+ params[next_index].port->mgr,
+ params[next_index].port,
+ vars[next_index].pbn,
+ dm_mst_get_pbn_divider(dc_link)) < 0)
+ return;
+ }
+
+ tried[next_index] = true;
+ remaining_to_try--;
+ }
+}
+
+static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
+ struct dc_state *dc_state,
+ struct dc_link *dc_link)
+{
+ int i;
+ struct dc_stream_state *stream;
+ struct dsc_mst_fairness_params params[MAX_PIPES];
+ struct dsc_mst_fairness_vars vars[MAX_PIPES];
+ struct amdgpu_dm_connector *aconnector;
+ int count = 0;
+
+ memset(params, 0, sizeof(params));
+
+ /* Set up params */
+ for (i = 0; i < dc_state->stream_count; i++) {
+ struct dc_dsc_policy dsc_policy = {0};
+
+ stream = dc_state->streams[i];
+
+ if (stream->link != dc_link)
+ continue;
+
+ stream->timing.flags.DSC = 0;
+
+ params[count].timing = &stream->timing;
+ params[count].sink = stream->sink;
+ aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
+ params[count].port = aconnector->port;
+ params[count].compression_possible = stream->sink->sink_dsc_caps.dsc_dec_caps.is_dsc_supported;
+ dc_dsc_get_policy_for_timing(params[count].timing, &dsc_policy);
+ if (!dc_dsc_compute_bandwidth_range(
+ stream->sink->ctx->dc->res_pool->dscs[0],
+ stream->sink->ctx->dc->debug.dsc_min_slice_height_override,
+ dsc_policy.min_target_bpp,
+ dsc_policy.max_target_bpp,
+ &stream->sink->sink_dsc_caps.dsc_dec_caps,
+ &stream->timing, &params[count].bw_range))
+ params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
+
+ count++;
+ }
+ /* Try no compression */
+ for (i = 0; i < count; i++) {
+ vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
+ vars[i].dsc_enabled = false;
+ vars[i].bpp_x16 = 0;
+ if (drm_dp_atomic_find_vcpi_slots(state,
+ params[i].port->mgr,
+ params[i].port,
+ vars[i].pbn,
+ 0) < 0)
+ return false;
+ }
+ if (!drm_dp_mst_atomic_check(state)) {
+ set_dsc_configs_from_fairness_vars(params, vars, count);
+ return true;
+ }
+
+ /* Try max compression */
+ for (i = 0; i < count; i++) {
+ if (params[i].compression_possible) {
+ vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps);
+ vars[i].dsc_enabled = true;
+ vars[i].bpp_x16 = params[i].bw_range.min_target_bpp_x16;
+ if (drm_dp_atomic_find_vcpi_slots(state,
+ params[i].port->mgr,
+ params[i].port,
+ vars[i].pbn,
+ dm_mst_get_pbn_divider(dc_link)) < 0)
+ return false;
+ } else {
+ vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
+ vars[i].dsc_enabled = false;
+ vars[i].bpp_x16 = 0;
+ if (drm_dp_atomic_find_vcpi_slots(state,
+ params[i].port->mgr,
+ params[i].port,
+ vars[i].pbn,
+ 0) < 0)
+ return false;
+ }
+ }
+ if (drm_dp_mst_atomic_check(state))
+ return false;
+
+ /* Optimize degree of compression */
+ increase_dsc_bpp(state, dc_link, params, vars, count);
+
+ try_disable_dsc(state, dc_link, params, vars, count);
+
+ set_dsc_configs_from_fairness_vars(params, vars, count);
+
+ return true;
+}
+
+bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
+ struct dc_state *dc_state)
+{
+ int i, j;
+ struct dc_stream_state *stream;
+ bool computed_streams[MAX_PIPES];
+ struct amdgpu_dm_connector *aconnector;
+
+ for (i = 0; i < dc_state->stream_count; i++)
+ computed_streams[i] = false;
+
+ for (i = 0; i < dc_state->stream_count; i++) {
+ stream = dc_state->streams[i];
+
+ if (stream->signal != SIGNAL_TYPE_DISPLAY_PORT_MST)
+ continue;
+
+ aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
+
+ if (!aconnector || !aconnector->dc_sink)
+ continue;
+
+ if (!aconnector->dc_sink->sink_dsc_caps.dsc_dec_caps.is_dsc_supported)
+ continue;
+
+ if (computed_streams[i])
+ continue;
+
+ mutex_lock(&aconnector->mst_mgr.lock);
+ if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link)) {
+ mutex_unlock(&aconnector->mst_mgr.lock);
+ return false;
+ }
+ mutex_unlock(&aconnector->mst_mgr.lock);
+
+ for (j = 0; j < dc_state->stream_count; j++) {
+ if (dc_state->streams[j]->link == stream->link)
+ computed_streams[j] = true;
+ }
+ }
+
+ for (i = 0; i < dc_state->stream_count; i++) {
+ stream = dc_state->streams[i];
+
+ if (stream->timing.flags.DSC == 1)
+ dcn20_add_dsc_to_stream_resource(stream->ctx->dc, dc_state, stream);
+ }
+
+ return true;
+}
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
index 2da851b40042..d6813ce67bbd 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
@@ -29,7 +29,14 @@
struct amdgpu_display_manager;
struct amdgpu_dm_connector;
+int dm_mst_get_pbn_divider(struct dc_link *link);
+
void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
struct amdgpu_dm_connector *aconnector);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
+ struct dc_state *dc_state);
+#endif
+
#endif
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
index 592fa499c9f8..a2e1a73f66b8 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
@@ -151,18 +151,31 @@ static void get_default_clock_levels(
static enum smu_clk_type dc_to_smu_clock_type(
enum dm_pp_clock_type dm_pp_clk_type)
{
-#define DCCLK_MAP_SMUCLK(dcclk, smuclk) \
- [dcclk] = smuclk
+ enum smu_clk_type smu_clk_type = SMU_CLK_COUNT;
- static int dc_clk_type_map[] = {
- DCCLK_MAP_SMUCLK(DM_PP_CLOCK_TYPE_DISPLAY_CLK, SMU_DISPCLK),
- DCCLK_MAP_SMUCLK(DM_PP_CLOCK_TYPE_ENGINE_CLK, SMU_GFXCLK),
- DCCLK_MAP_SMUCLK(DM_PP_CLOCK_TYPE_MEMORY_CLK, SMU_MCLK),
- DCCLK_MAP_SMUCLK(DM_PP_CLOCK_TYPE_DCEFCLK, SMU_DCEFCLK),
- DCCLK_MAP_SMUCLK(DM_PP_CLOCK_TYPE_SOCCLK, SMU_SOCCLK),
- };
+ switch (dm_pp_clk_type) {
+ case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
+ smu_clk_type = SMU_DISPCLK;
+ break;
+ case DM_PP_CLOCK_TYPE_ENGINE_CLK:
+ smu_clk_type = SMU_GFXCLK;
+ break;
+ case DM_PP_CLOCK_TYPE_MEMORY_CLK:
+ smu_clk_type = SMU_MCLK;
+ break;
+ case DM_PP_CLOCK_TYPE_DCEFCLK:
+ smu_clk_type = SMU_DCEFCLK;
+ break;
+ case DM_PP_CLOCK_TYPE_SOCCLK:
+ smu_clk_type = SMU_SOCCLK;
+ break;
+ default:
+ DRM_ERROR("DM_PPLIB: invalid clock type: %d!\n",
+ dm_pp_clk_type);
+ break;
+ }
- return dc_clk_type_map[dm_pp_clk_type];
+ return smu_clk_type;
}
static enum amd_pp_clock_type dc_to_pp_clock_type(
@@ -329,12 +342,13 @@ bool dm_pp_get_clock_levels_by_type(
if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_clock_by_type) {
if (adev->powerplay.pp_funcs->get_clock_by_type(pp_handle,
dc_to_pp_clock_type(clk_type), &pp_clks)) {
- /* Error in pplib. Provide default values. */
+ /* Error in pplib. Provide default values. */
+ get_default_clock_levels(clk_type, dc_clks);
return true;
}
- } else if (adev->smu.funcs && adev->smu.funcs->get_clock_by_type) {
+ } else if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->get_clock_by_type) {
if (smu_get_clock_by_type(&adev->smu,
- dc_to_smu_clock_type(clk_type),
+ dc_to_pp_clock_type(clk_type),
&pp_clks)) {
get_default_clock_levels(clk_type, dc_clks);
return true;
@@ -352,7 +366,7 @@ bool dm_pp_get_clock_levels_by_type(
validation_clks.memory_max_clock = 80000;
validation_clks.level = 0;
}
- } else if (adev->smu.funcs && adev->smu.funcs->get_max_high_clocks) {
+ } else if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->get_max_high_clocks) {
if (smu_get_max_high_clocks(&adev->smu, &validation_clks)) {
DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n");
validation_clks.engine_max_clock = 72000;
@@ -419,7 +433,7 @@ bool dm_pp_get_clock_levels_by_type_with_latency(
return false;
} else if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->get_clock_by_type_with_latency) {
if (smu_get_clock_by_type_with_latency(&adev->smu,
- dc_to_pp_clock_type(clk_type),
+ dc_to_smu_clock_type(clk_type),
&pp_clks))
return false;
}
@@ -493,8 +507,8 @@ bool dm_pp_apply_clock_for_voltage_request(
ret = adev->powerplay.pp_funcs->display_clock_voltage_request(
adev->powerplay.pp_handle,
&pp_clock_request);
- else if (adev->smu.funcs &&
- adev->smu.funcs->display_clock_voltage_request)
+ else if (adev->smu.ppt_funcs &&
+ adev->smu.ppt_funcs->display_clock_voltage_request)
ret = smu_display_clock_voltage_request(&adev->smu,
&pp_clock_request);
if (ret)
@@ -514,7 +528,7 @@ bool dm_pp_get_static_clocks(
ret = adev->powerplay.pp_funcs->get_current_clocks(
adev->powerplay.pp_handle,
&pp_clk_info);
- else if (adev->smu.funcs)
+ else if (adev->smu.ppt_funcs)
ret = smu_get_current_clocks(&adev->smu, &pp_clk_info);
if (ret)
return false;
@@ -576,10 +590,9 @@ void pp_rv_set_wm_ranges(struct pp_smu *pp,
if (pp_funcs && pp_funcs->set_watermarks_for_clocks_ranges)
pp_funcs->set_watermarks_for_clocks_ranges(pp_handle,
&wm_with_clock_ranges);
- else if (adev->smu.funcs &&
- adev->smu.funcs->set_watermarks_for_clock_ranges)
+ else
smu_set_watermarks_for_clock_ranges(&adev->smu,
- &wm_with_clock_ranges);
+ &wm_with_clock_ranges);
}
void pp_rv_set_pme_wa_enable(struct pp_smu *pp)
@@ -591,7 +604,7 @@ void pp_rv_set_pme_wa_enable(struct pp_smu *pp)
if (pp_funcs && pp_funcs->notify_smu_enable_pwe)
pp_funcs->notify_smu_enable_pwe(pp_handle);
- else if (adev->smu.funcs)
+ else if (adev->smu.ppt_funcs)
smu_notify_smu_enable_pwe(&adev->smu);
}
@@ -652,7 +665,6 @@ enum pp_smu_status pp_nv_set_wm_ranges(struct pp_smu *pp,
{
const struct dc_context *ctx = pp->dm;
struct amdgpu_device *adev = ctx->driver_context;
- struct smu_context *smu = &adev->smu;
struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges;
struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks =
wm_with_clock_ranges.wm_dmif_clocks_ranges;
@@ -695,15 +707,7 @@ enum pp_smu_status pp_nv_set_wm_ranges(struct pp_smu *pp,
ranges->writer_wm_sets[i].min_drain_clk_mhz * 1000;
}
- if (!smu->funcs)
- return PP_SMU_RESULT_UNSUPPORTED;
-
- /* 0: successful or smu.funcs->set_watermarks_for_clock_ranges = NULL;
- * 1: fail
- */
- if (smu_set_watermarks_for_clock_ranges(&adev->smu,
- &wm_with_clock_ranges))
- return PP_SMU_RESULT_UNSUPPORTED;
+ smu_set_watermarks_for_clock_ranges(&adev->smu, &wm_with_clock_ranges);
return PP_SMU_RESULT_OK;
}
@@ -714,10 +718,10 @@ enum pp_smu_status pp_nv_set_pme_wa_enable(struct pp_smu *pp)
struct amdgpu_device *adev = ctx->driver_context;
struct smu_context *smu = &adev->smu;
- if (!smu->funcs)
+ if (!smu->ppt_funcs)
return PP_SMU_RESULT_UNSUPPORTED;
- /* 0: successful or smu.funcs->set_azalia_d3_pme = NULL; 1: fail */
+ /* 0: successful or smu.ppt_funcs->set_azalia_d3_pme = NULL; 1: fail */
if (smu_set_azalia_d3_pme(smu))
return PP_SMU_RESULT_FAIL;
@@ -730,10 +734,10 @@ enum pp_smu_status pp_nv_set_display_count(struct pp_smu *pp, int count)
struct amdgpu_device *adev = ctx->driver_context;
struct smu_context *smu = &adev->smu;
- if (!smu->funcs)
+ if (!smu->ppt_funcs)
return PP_SMU_RESULT_UNSUPPORTED;
- /* 0: successful or smu.funcs->set_display_count = NULL; 1: fail */
+ /* 0: successful or smu.ppt_funcs->set_display_count = NULL; 1: fail */
if (smu_set_display_count(smu, count))
return PP_SMU_RESULT_FAIL;
@@ -746,10 +750,10 @@ enum pp_smu_status pp_nv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int mhz)
struct amdgpu_device *adev = ctx->driver_context;
struct smu_context *smu = &adev->smu;
- if (!smu->funcs)
+ if (!smu->ppt_funcs)
return PP_SMU_RESULT_UNSUPPORTED;
- /* 0: successful or smu.funcs->set_deep_sleep_dcefclk = NULL;1: fail */
+ /* 0: successful or smu.ppt_funcs->set_deep_sleep_dcefclk = NULL;1: fail */
if (smu_set_deep_sleep_dcefclk(smu, mhz))
return PP_SMU_RESULT_FAIL;
@@ -764,13 +768,13 @@ enum pp_smu_status pp_nv_set_hard_min_dcefclk_by_freq(
struct smu_context *smu = &adev->smu;
struct pp_display_clock_request clock_req;
- if (!smu->funcs)
+ if (!smu->ppt_funcs)
return PP_SMU_RESULT_UNSUPPORTED;
clock_req.clock_type = amd_pp_dcef_clock;
clock_req.clock_freq_in_khz = mhz * 1000;
- /* 0: successful or smu.funcs->display_clock_voltage_request = NULL
+ /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL
* 1: fail
*/
if (smu_display_clock_voltage_request(smu, &clock_req))
@@ -786,13 +790,13 @@ enum pp_smu_status pp_nv_set_hard_min_uclk_by_freq(struct pp_smu *pp, int mhz)
struct smu_context *smu = &adev->smu;
struct pp_display_clock_request clock_req;
- if (!smu->funcs)
+ if (!smu->ppt_funcs)
return PP_SMU_RESULT_UNSUPPORTED;
clock_req.clock_type = amd_pp_mem_clock;
clock_req.clock_freq_in_khz = mhz * 1000;
- /* 0: successful or smu.funcs->display_clock_voltage_request = NULL
+ /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL
* 1: fail
*/
if (smu_display_clock_voltage_request(smu, &clock_req))
@@ -801,6 +805,19 @@ enum pp_smu_status pp_nv_set_hard_min_uclk_by_freq(struct pp_smu *pp, int mhz)
return PP_SMU_RESULT_OK;
}
+enum pp_smu_status pp_nv_set_pstate_handshake_support(
+ struct pp_smu *pp, BOOLEAN pstate_handshake_supported)
+{
+ const struct dc_context *ctx = pp->dm;
+ struct amdgpu_device *adev = ctx->driver_context;
+ struct smu_context *smu = &adev->smu;
+
+ if (smu_display_disable_memory_clock_switch(smu, !pstate_handshake_supported))
+ return PP_SMU_RESULT_FAIL;
+
+ return PP_SMU_RESULT_OK;
+}
+
enum pp_smu_status pp_nv_set_voltage_by_freq(struct pp_smu *pp,
enum pp_smu_nv_clock_id clock_id, int mhz)
{
@@ -809,7 +826,7 @@ enum pp_smu_status pp_nv_set_voltage_by_freq(struct pp_smu *pp,
struct smu_context *smu = &adev->smu;
struct pp_display_clock_request clock_req;
- if (!smu->funcs)
+ if (!smu->ppt_funcs)
return PP_SMU_RESULT_UNSUPPORTED;
switch (clock_id) {
@@ -827,7 +844,7 @@ enum pp_smu_status pp_nv_set_voltage_by_freq(struct pp_smu *pp,
}
clock_req.clock_freq_in_khz = mhz * 1000;
- /* 0: successful or smu.funcs->display_clock_voltage_request = NULL
+ /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL
* 1: fail
*/
if (smu_display_clock_voltage_request(smu, &clock_req))
@@ -843,13 +860,13 @@ enum pp_smu_status pp_nv_get_maximum_sustainable_clocks(
struct amdgpu_device *adev = ctx->driver_context;
struct smu_context *smu = &adev->smu;
- if (!smu->funcs)
+ if (!smu->ppt_funcs)
return PP_SMU_RESULT_UNSUPPORTED;
- if (!smu->funcs->get_max_sustainable_clocks_by_dc)
+ if (!smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
return PP_SMU_RESULT_UNSUPPORTED;
- if (!smu->funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks))
+ if (!smu_get_max_sustainable_clocks_by_dc(smu, max_clocks))
return PP_SMU_RESULT_OK;
return PP_SMU_RESULT_FAIL;
@@ -868,13 +885,95 @@ enum pp_smu_status pp_nv_get_uclk_dpm_states(struct pp_smu *pp,
if (!smu->ppt_funcs->get_uclk_dpm_states)
return PP_SMU_RESULT_UNSUPPORTED;
- if (!smu->ppt_funcs->get_uclk_dpm_states(smu,
+ if (!smu_get_uclk_dpm_states(smu,
clock_values_in_khz, num_states))
return PP_SMU_RESULT_OK;
return PP_SMU_RESULT_FAIL;
}
+enum pp_smu_status pp_rn_get_dpm_clock_table(
+ struct pp_smu *pp, struct dpm_clocks *clock_table)
+{
+ const struct dc_context *ctx = pp->dm;
+ struct amdgpu_device *adev = ctx->driver_context;
+ struct smu_context *smu = &adev->smu;
+
+ if (!smu->ppt_funcs)
+ return PP_SMU_RESULT_UNSUPPORTED;
+
+ if (!smu->ppt_funcs->get_dpm_clock_table)
+ return PP_SMU_RESULT_UNSUPPORTED;
+
+ if (!smu_get_dpm_clock_table(smu, clock_table))
+ return PP_SMU_RESULT_OK;
+
+ return PP_SMU_RESULT_FAIL;
+}
+
+enum pp_smu_status pp_rn_set_wm_ranges(struct pp_smu *pp,
+ struct pp_smu_wm_range_sets *ranges)
+{
+ const struct dc_context *ctx = pp->dm;
+ struct amdgpu_device *adev = ctx->driver_context;
+ struct smu_context *smu = &adev->smu;
+ struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges;
+ struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks =
+ wm_with_clock_ranges.wm_dmif_clocks_ranges;
+ struct dm_pp_clock_range_for_mcif_wm_set_soc15 *wm_soc_clocks =
+ wm_with_clock_ranges.wm_mcif_clocks_ranges;
+ int32_t i;
+
+ if (!smu->ppt_funcs)
+ return PP_SMU_RESULT_UNSUPPORTED;
+
+ wm_with_clock_ranges.num_wm_dmif_sets = ranges->num_reader_wm_sets;
+ wm_with_clock_ranges.num_wm_mcif_sets = ranges->num_writer_wm_sets;
+
+ for (i = 0; i < wm_with_clock_ranges.num_wm_dmif_sets; i++) {
+ if (ranges->reader_wm_sets[i].wm_inst > 3)
+ wm_dce_clocks[i].wm_set_id = WM_SET_A;
+ else
+ wm_dce_clocks[i].wm_set_id =
+ ranges->reader_wm_sets[i].wm_inst;
+
+ wm_dce_clocks[i].wm_min_dcfclk_clk_in_khz =
+ ranges->reader_wm_sets[i].min_drain_clk_mhz;
+
+ wm_dce_clocks[i].wm_max_dcfclk_clk_in_khz =
+ ranges->reader_wm_sets[i].max_drain_clk_mhz;
+
+ wm_dce_clocks[i].wm_min_mem_clk_in_khz =
+ ranges->reader_wm_sets[i].min_fill_clk_mhz;
+
+ wm_dce_clocks[i].wm_max_mem_clk_in_khz =
+ ranges->reader_wm_sets[i].max_fill_clk_mhz;
+ }
+
+ for (i = 0; i < wm_with_clock_ranges.num_wm_mcif_sets; i++) {
+ if (ranges->writer_wm_sets[i].wm_inst > 3)
+ wm_soc_clocks[i].wm_set_id = WM_SET_A;
+ else
+ wm_soc_clocks[i].wm_set_id =
+ ranges->writer_wm_sets[i].wm_inst;
+ wm_soc_clocks[i].wm_min_socclk_clk_in_khz =
+ ranges->writer_wm_sets[i].min_fill_clk_mhz;
+
+ wm_soc_clocks[i].wm_max_socclk_clk_in_khz =
+ ranges->writer_wm_sets[i].max_fill_clk_mhz;
+
+ wm_soc_clocks[i].wm_min_mem_clk_in_khz =
+ ranges->writer_wm_sets[i].min_drain_clk_mhz;
+
+ wm_soc_clocks[i].wm_max_mem_clk_in_khz =
+ ranges->writer_wm_sets[i].max_drain_clk_mhz;
+ }
+
+ smu_set_watermarks_for_clock_ranges(&adev->smu, &wm_with_clock_ranges);
+
+ return PP_SMU_RESULT_OK;
+}
+
void dm_pp_get_funcs(
struct dc_context *ctx,
struct pp_smu_funcs *funcs)
@@ -895,7 +994,6 @@ void dm_pp_get_funcs(
funcs->rv_funcs.set_hard_min_fclk_by_freq =
pp_rv_set_hard_min_fclk_by_freq;
break;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
case DCN_VERSION_2_0:
funcs->ctx.ver = PP_SMU_VER_NV;
funcs->nv_funcs.pp_smu.dm = ctx;
@@ -916,8 +1014,15 @@ void dm_pp_get_funcs(
funcs->nv_funcs.get_maximum_sustainable_clocks = pp_nv_get_maximum_sustainable_clocks;
/*todo compare data with window driver */
funcs->nv_funcs.get_uclk_dpm_states = pp_nv_get_uclk_dpm_states;
+ funcs->nv_funcs.set_pstate_handshake_support = pp_nv_set_pstate_handshake_support;
+ break;
+
+ case DCN_VERSION_2_1:
+ funcs->ctx.ver = PP_SMU_VER_RN;
+ funcs->rn_funcs.pp_smu.dm = ctx;
+ funcs->rn_funcs.set_wm_ranges = pp_rn_set_wm_ranges;
+ funcs->rn_funcs.get_dpm_clock_table = pp_rn_get_dpm_clock_table;
break;
-#endif
default:
DRM_ERROR("smu version is not supported !\n");
break;
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
index 55ce5b657390..6e3dddc73246 100644
--- a/drivers/gpu/drm/amd/display/dc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/Makefile
@@ -25,17 +25,11 @@
DC_LIBS = basics bios calcs clk_mgr dce gpio irq virtual
-ifdef CONFIG_DRM_AMD_DC_DCN2_0
+ifdef CONFIG_DRM_AMD_DC_DCN
DC_LIBS += dcn20
-endif
-
-
-ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
DC_LIBS += dsc
-endif
-
-ifdef CONFIG_DRM_AMD_DC_DCN1_0
DC_LIBS += dcn10 dml
+DC_LIBS += dcn21
endif
DC_LIBS += dce120
@@ -45,6 +39,10 @@ DC_LIBS += dce110
DC_LIBS += dce100
DC_LIBS += dce80
+ifdef CONFIG_DRM_AMD_DC_HDCP
+DC_LIBS += hdcp
+endif
+
AMD_DC = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/dc/,$(DC_LIBS)))
include $(AMD_DC)
@@ -52,7 +50,7 @@ include $(AMD_DC)
DISPLAY_CORE = dc.o dc_link.o dc_resource.o dc_hw_sequencer.o dc_sink.o \
dc_surface.o dc_link_hwss.o dc_link_dp.o dc_link_ddc.o dc_debug.o dc_stream.o
-ifdef CONFIG_DRM_AMD_DC_DCN2_0
+ifdef CONFIG_DRM_AMD_DC_DCN
DISPLAY_CORE += dc_vm_helper.o
endif
@@ -63,5 +61,6 @@ AMD_DM_REG_UPDATE = $(addprefix $(AMDDALPATH)/dc/,dc_helper.o)
AMD_DISPLAY_FILES += $(AMD_DISPLAY_CORE)
AMD_DISPLAY_FILES += $(AMD_DM_REG_UPDATE)
-
-
+DC_DMUB += dc_dmub_srv.o
+AMD_DISPLAY_DMUB = $(addprefix $(AMDDALPATH)/dc/,$(DC_DMUB))
+AMD_DISPLAY_FILES += $(AMD_DISPLAY_DMUB)
diff --git a/drivers/gpu/drm/amd/display/dc/basics/Makefile b/drivers/gpu/drm/amd/display/dc/basics/Makefile
index a50a76471107..7ad0cad0f4ef 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/basics/Makefile
@@ -25,7 +25,7 @@
# subcomponents.
BASICS = conversion.o fixpt31_32.o \
- log_helpers.o vector.o
+ log_helpers.o vector.o dc_common.o
AMD_DAL_BASICS = $(addprefix $(AMDDALPATH)/dc/basics/,$(BASICS))
diff --git a/drivers/gpu/drm/amd/display/dc/basics/dc_common.c b/drivers/gpu/drm/amd/display/dc/basics/dc_common.c
new file mode 100644
index 000000000000..b2fc4f8e6482
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/basics/dc_common.c
@@ -0,0 +1,101 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "core_types.h"
+#include "dc_common.h"
+#include "basics/conversion.h"
+
+bool is_rgb_cspace(enum dc_color_space output_color_space)
+{
+ switch (output_color_space) {
+ case COLOR_SPACE_SRGB:
+ case COLOR_SPACE_SRGB_LIMITED:
+ case COLOR_SPACE_2020_RGB_FULLRANGE:
+ case COLOR_SPACE_2020_RGB_LIMITEDRANGE:
+ case COLOR_SPACE_ADOBERGB:
+ return true;
+ case COLOR_SPACE_YCBCR601:
+ case COLOR_SPACE_YCBCR709:
+ case COLOR_SPACE_YCBCR601_LIMITED:
+ case COLOR_SPACE_YCBCR709_LIMITED:
+ case COLOR_SPACE_2020_YCBCR:
+ return false;
+ default:
+ /* Add a case to switch */
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+}
+
+bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
+{
+ if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible)
+ return true;
+ if (pipe_ctx->bottom_pipe && is_lower_pipe_tree_visible(pipe_ctx->bottom_pipe))
+ return true;
+ return false;
+}
+
+bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
+{
+ if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible)
+ return true;
+ if (pipe_ctx->top_pipe && is_upper_pipe_tree_visible(pipe_ctx->top_pipe))
+ return true;
+ return false;
+}
+
+bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
+{
+ if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible)
+ return true;
+ if (pipe_ctx->top_pipe && is_upper_pipe_tree_visible(pipe_ctx->top_pipe))
+ return true;
+ if (pipe_ctx->bottom_pipe && is_lower_pipe_tree_visible(pipe_ctx->bottom_pipe))
+ return true;
+ return false;
+}
+
+void build_prescale_params(struct dc_bias_and_scale *bias_and_scale,
+ const struct dc_plane_state *plane_state)
+{
+ if (plane_state->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
+ && plane_state->format != SURFACE_PIXEL_FORMAT_INVALID
+ && plane_state->input_csc_color_matrix.enable_adjustment
+ && plane_state->coeff_reduction_factor.value != 0) {
+ bias_and_scale->scale_blue = fixed_point_to_int_frac(
+ dc_fixpt_mul(plane_state->coeff_reduction_factor,
+ dc_fixpt_from_fraction(256, 255)),
+ 2,
+ 13);
+ bias_and_scale->scale_red = bias_and_scale->scale_blue;
+ bias_and_scale->scale_green = bias_and_scale->scale_blue;
+ } else {
+ bias_and_scale->scale_blue = 0x2000;
+ bias_and_scale->scale_red = 0x2000;
+ bias_and_scale->scale_green = 0x2000;
+ }
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/basics/dc_common.h b/drivers/gpu/drm/amd/display/dc/basics/dc_common.h
new file mode 100644
index 000000000000..7c0cbf47e8ce
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/basics/dc_common.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_DC_COMMON_H__
+#define __DAL_DC_COMMON_H__
+
+#include "core_types.h"
+
+bool is_rgb_cspace(enum dc_color_space output_color_space);
+
+bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx);
+
+bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx);
+
+bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx);
+
+void build_prescale_params(struct dc_bias_and_scale *bias_and_scale,
+ const struct dc_plane_state *plane_state);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
index 461eef1de124..008d4d11339d 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -111,7 +111,7 @@ struct dc_bios *bios_parser_create(
return NULL;
}
-static void destruct(struct bios_parser *bp)
+static void bios_parser_destruct(struct bios_parser *bp)
{
kfree(bp->base.bios_local_image);
kfree(bp->base.integrated_info);
@@ -126,7 +126,7 @@ static void bios_parser_destroy(struct dc_bios **dcb)
return;
}
- destruct(bp);
+ bios_parser_destruct(bp);
kfree(bp);
*dcb = NULL;
@@ -2189,7 +2189,7 @@ static uint32_t get_support_mask_for_device_id(struct device_id device_id)
break;
default:
break;
- };
+ }
/* Unidentified device ID, return empty support mask. */
return 0;
@@ -2543,7 +2543,6 @@ static enum bp_result construct_integrated_info(
/* Sort voltage table from low to high*/
if (result == BP_RESULT_OK) {
- struct clock_voltage_caps temp = {0, 0};
uint32_t i;
uint32_t j;
@@ -2553,10 +2552,8 @@ static enum bp_result construct_integrated_info(
info->disp_clk_voltage[j].max_supported_clk <
info->disp_clk_voltage[j-1].max_supported_clk) {
/* swap j and j - 1*/
- temp = info->disp_clk_voltage[j-1];
- info->disp_clk_voltage[j-1] =
- info->disp_clk_voltage[j];
- info->disp_clk_voltage[j] = temp;
+ swap(info->disp_clk_voltage[j - 1],
+ info->disp_clk_voltage[j]);
}
}
}
@@ -2742,7 +2739,6 @@ static enum bp_result bios_get_board_layout_info(
struct board_layout_info *board_layout_info)
{
unsigned int i;
- struct bios_parser *bp;
enum bp_result record_result;
const unsigned int slot_index_to_vbios_id[MAX_BOARD_SLOTS] = {
@@ -2751,7 +2747,6 @@ static enum bp_result bios_get_board_layout_info(
0, 0
};
- bp = BP_FROM_DCB(dcb);
if (board_layout_info == NULL) {
DC_LOG_DETECTION_EDID_PARSER("Invalid board_layout_info\n");
return BP_RESULT_BADINPUT;
@@ -2796,8 +2791,6 @@ static const struct dc_vbios_funcs vbios_funcs = {
.get_device_tag = bios_parser_get_device_tag,
- .get_firmware_info = bios_parser_get_firmware_info,
-
.get_spread_spectrum_info = bios_parser_get_spread_spectrum_info,
.get_ss_entry_number = bios_parser_get_ss_entry_number,
@@ -2922,6 +2915,7 @@ static bool bios_parser_construct(
dal_bios_parser_init_cmd_tbl_helper(&bp->cmd_helper, dce_version);
bp->base.integrated_info = bios_parser_create_integrated_info(&bp->base);
+ bp->base.fw_info_valid = bios_parser_get_firmware_info(&bp->base, &bp->base.fw_info) == BP_RESULT_OK;
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index 6aa2e56dfb67..2f1c9584ac32 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -111,7 +111,7 @@ static struct atom_encoder_caps_record *get_encoder_cap_record(
#define DATA_TABLES(table) (bp->master_data_tbl->listOfdatatables.table)
-static void destruct(struct bios_parser *bp)
+static void bios_parser2_destruct(struct bios_parser *bp)
{
kfree(bp->base.bios_local_image);
kfree(bp->base.integrated_info);
@@ -126,7 +126,7 @@ static void firmware_parser_destroy(struct dc_bios **dcb)
return;
}
- destruct(bp);
+ bios_parser2_destruct(bp);
kfree(bp);
*dcb = NULL;
@@ -294,11 +294,21 @@ static enum bp_result bios_parser_get_i2c_info(struct dc_bios *dcb,
struct atom_display_object_path_v2 *object;
struct atom_common_record_header *header;
struct atom_i2c_record *record;
+ struct atom_i2c_record dummy_record = {0};
struct bios_parser *bp = BP_FROM_DCB(dcb);
if (!info)
return BP_RESULT_BADINPUT;
+ if (id.type == OBJECT_TYPE_GENERIC) {
+ dummy_record.i2c_id = id.id;
+
+ if (get_gpio_i2c_info(bp, &dummy_record, info) == BP_RESULT_OK)
+ return BP_RESULT_OK;
+ else
+ return BP_RESULT_NORECORD;
+ }
+
object = get_bios_object(bp, id);
if (!object)
@@ -341,6 +351,7 @@ static enum bp_result get_gpio_i2c_info(
struct atom_gpio_pin_lut_v2_1 *header;
uint32_t count = 0;
unsigned int table_index = 0;
+ bool find_valid = false;
if (!info)
return BP_RESULT_BADINPUT;
@@ -368,33 +379,28 @@ static enum bp_result get_gpio_i2c_info(
- sizeof(struct atom_common_table_header))
/ sizeof(struct atom_gpio_pin_assignment);
- table_index = record->i2c_id & I2C_HW_LANE_MUX;
-
- if (count < table_index) {
- bool find_valid = false;
-
- for (table_index = 0; table_index < count; table_index++) {
- if (((record->i2c_id & I2C_HW_CAP) == (
- header->gpio_pin[table_index].gpio_id &
- I2C_HW_CAP)) &&
- ((record->i2c_id & I2C_HW_ENGINE_ID_MASK) ==
- (header->gpio_pin[table_index].gpio_id &
- I2C_HW_ENGINE_ID_MASK)) &&
- ((record->i2c_id & I2C_HW_LANE_MUX) ==
- (header->gpio_pin[table_index].gpio_id &
- I2C_HW_LANE_MUX))) {
- /* still valid */
- find_valid = true;
- break;
- }
+ for (table_index = 0; table_index < count; table_index++) {
+ if (((record->i2c_id & I2C_HW_CAP) == (
+ header->gpio_pin[table_index].gpio_id &
+ I2C_HW_CAP)) &&
+ ((record->i2c_id & I2C_HW_ENGINE_ID_MASK) ==
+ (header->gpio_pin[table_index].gpio_id &
+ I2C_HW_ENGINE_ID_MASK)) &&
+ ((record->i2c_id & I2C_HW_LANE_MUX) ==
+ (header->gpio_pin[table_index].gpio_id &
+ I2C_HW_LANE_MUX))) {
+ /* still valid */
+ find_valid = true;
+ break;
}
- /* If we don't find the entry that we are looking for then
- * we will return BP_Result_BadBiosTable.
- */
- if (find_valid == false)
- return BP_RESULT_BADBIOSTABLE;
}
+ /* If we don't find the entry that we are looking for then
+ * we will return BP_Result_BadBiosTable.
+ */
+ if (find_valid == false)
+ return BP_RESULT_BADBIOSTABLE;
+
/* get the GPIO_I2C_INFO */
info->i2c_hw_assist = (record->i2c_id & I2C_HW_CAP) ? true : false;
info->i2c_line = record->i2c_id & I2C_HW_LANE_MUX;
@@ -828,6 +834,7 @@ static enum bp_result bios_parser_get_spread_spectrum_info(
case 1:
return get_ss_info_v4_1(bp, signal, index, ss_info);
case 2:
+ case 3:
return get_ss_info_v4_2(bp, signal, index, ss_info);
default:
break;
@@ -986,7 +993,7 @@ static uint32_t get_support_mask_for_device_id(struct device_id device_id)
break;
default:
break;
- };
+ }
/* Unidentified device ID, return empty support mask. */
return 0;
@@ -1205,6 +1212,8 @@ static enum bp_result get_firmware_info_v3_1(
bp->cmd_tbl.get_smu_clock_info(bp, SMU9_SYSPLL0_ID) * 10;
}
+ info->oem_i2c_present = false;
+
return BP_RESULT_OK;
}
@@ -1283,6 +1292,13 @@ static enum bp_result get_firmware_info_v3_2(
bp->cmd_tbl.get_smu_clock_info(bp, SMU11_SYSPLL3_0_ID) * 10;
}
+ if (firmware_info->board_i2c_feature_id == 0x2) {
+ info->oem_i2c_present = true;
+ info->oem_i2c_obj_id = firmware_info->board_i2c_feature_gpio_id;
+ } else {
+ info->oem_i2c_present = false;
+ }
+
return BP_RESULT_OK;
}
@@ -1402,10 +1418,8 @@ static enum bp_result get_integrated_info_v11(
info->ma_channel_number = info_v11->umachannelnumber;
info->lvds_ss_percentage =
le16_to_cpu(info_v11->lvds_ss_percentage);
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
info->dp_ss_control =
le16_to_cpu(info_v11->reserved1);
-#endif
info->lvds_sspread_rate_in_10hz =
le16_to_cpu(info_v11->lvds_ss_rate_10hz);
info->hdmi_ss_percentage =
@@ -1613,8 +1627,6 @@ static enum bp_result construct_integrated_info(
struct atom_common_table_header *header;
struct atom_data_revision revision;
-
- struct clock_voltage_caps temp = {0, 0};
uint32_t i;
uint32_t j;
@@ -1627,6 +1639,7 @@ static enum bp_result construct_integrated_info(
/* Don't need to check major revision as they are all 1 */
switch (revision.minor) {
case 11:
+ case 12:
result = get_integrated_info_v11(bp, info);
break;
default:
@@ -1644,10 +1657,8 @@ static enum bp_result construct_integrated_info(
info->disp_clk_voltage[j-1].max_supported_clk
) {
/* swap j and j - 1*/
- temp = info->disp_clk_voltage[j-1];
- info->disp_clk_voltage[j-1] =
- info->disp_clk_voltage[j];
- info->disp_clk_voltage[j] = temp;
+ swap(info->disp_clk_voltage[j - 1],
+ info->disp_clk_voltage[j]);
}
}
}
@@ -1829,7 +1840,6 @@ static enum bp_result bios_get_board_layout_info(
struct board_layout_info *board_layout_info)
{
unsigned int i;
- struct bios_parser *bp;
enum bp_result record_result;
const unsigned int slot_index_to_vbios_id[MAX_BOARD_SLOTS] = {
@@ -1838,7 +1848,6 @@ static enum bp_result bios_get_board_layout_info(
0, 0
};
- bp = BP_FROM_DCB(dcb);
if (board_layout_info == NULL) {
DC_LOG_DETECTION_EDID_PARSER("Invalid board_layout_info\n");
return BP_RESULT_BADINPUT;
@@ -1881,8 +1890,6 @@ static const struct dc_vbios_funcs vbios_funcs = {
.get_device_tag = bios_parser_get_device_tag,
- .get_firmware_info = bios_parser_get_firmware_info,
-
.get_spread_spectrum_info = bios_parser_get_spread_spectrum_info,
.get_ss_entry_number = bios_parser_get_ss_entry_number,
@@ -1920,7 +1927,7 @@ static const struct dc_vbios_funcs vbios_funcs = {
.get_board_layout_info = bios_get_board_layout_info,
};
-static bool bios_parser_construct(
+static bool bios_parser2_construct(
struct bios_parser *bp,
struct bp_init_data *init,
enum dce_version dce_version)
@@ -1998,6 +2005,7 @@ static bool bios_parser_construct(
dal_bios_parser_init_cmd_tbl_helper2(&bp->cmd_helper, dce_version);
bp->base.integrated_info = bios_parser_create_integrated_info(&bp->base);
+ bp->base.fw_info_valid = bios_parser_get_firmware_info(&bp->base, &bp->base.fw_info) == BP_RESULT_OK;
return true;
}
@@ -2012,7 +2020,7 @@ struct dc_bios *firmware_parser_create(
if (!bp)
return NULL;
- if (bios_parser_construct(bp, init, dce_version))
+ if (bios_parser2_construct(bp, init, dce_version))
return &bp->base;
kfree(bp);
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
index bb2e8105e6ab..c4ba6e84db65 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -37,6 +37,8 @@
#include "bios_parser_types_internal2.h"
#include "amdgpu.h"
+#include "dc_dmub_srv.h"
+#include "dc.h"
#define DC_LOGGER \
bp->base.ctx->logger
@@ -87,6 +89,10 @@ static enum bp_result encoder_control_digx_v1_5(
struct bios_parser *bp,
struct bp_encoder_control *cntl);
+static enum bp_result encoder_control_fallback(
+ struct bios_parser *bp,
+ struct bp_encoder_control *cntl);
+
static void init_dig_encoder_control(struct bios_parser *bp)
{
uint32_t version =
@@ -98,11 +104,26 @@ static void init_dig_encoder_control(struct bios_parser *bp)
break;
default:
dm_output_to_console("Don't have dig_encoder_control for v%d\n", version);
- bp->cmd_tbl.dig_encoder_control = NULL;
+ bp->cmd_tbl.dig_encoder_control = encoder_control_fallback;
break;
}
}
+static void encoder_control_dmcub(
+ struct dc_dmub_srv *dmcub,
+ struct dig_encoder_stream_setup_parameters_v1_5 *dig)
+{
+ struct dmub_rb_cmd_digx_encoder_control encoder_control = { 0 };
+
+ encoder_control.header.type = DMUB_CMD__VBIOS;
+ encoder_control.header.sub_type = DMUB_CMD__VBIOS_DIGX_ENCODER_CONTROL;
+ encoder_control.encoder_control.dig.stream_param = *dig;
+
+ dc_dmub_srv_cmd_queue(dmcub, &encoder_control.header);
+ dc_dmub_srv_cmd_execute(dmcub);
+ dc_dmub_srv_wait_idle(dmcub);
+}
+
static enum bp_result encoder_control_digx_v1_5(
struct bios_parser *bp,
struct bp_encoder_control *cntl)
@@ -155,12 +176,30 @@ static enum bp_result encoder_control_digx_v1_5(
break;
}
+ if (bp->base.ctx->dc->ctx->dmub_srv &&
+ bp->base.ctx->dc->debug.dmub_command_table) {
+ encoder_control_dmcub(bp->base.ctx->dmub_srv, &params);
+ return BP_RESULT_OK;
+ }
+
if (EXEC_BIOS_CMD_TABLE(digxencodercontrol, params))
result = BP_RESULT_OK;
return result;
}
+static enum bp_result encoder_control_fallback(
+ struct bios_parser *bp,
+ struct bp_encoder_control *cntl)
+{
+ if (bp->base.ctx->dc->ctx->dmub_srv &&
+ bp->base.ctx->dc->debug.dmub_command_table) {
+ return encoder_control_digx_v1_5(bp, cntl);
+ }
+
+ return BP_RESULT_FAILURE;
+}
+
/*****************************************************************************
******************************************************************************
**
@@ -173,6 +212,10 @@ static enum bp_result transmitter_control_v1_6(
struct bios_parser *bp,
struct bp_transmitter_control *cntl);
+static enum bp_result transmitter_control_fallback(
+ struct bios_parser *bp,
+ struct bp_transmitter_control *cntl);
+
static void init_transmitter_control(struct bios_parser *bp)
{
uint8_t frev;
@@ -186,11 +229,27 @@ static void init_transmitter_control(struct bios_parser *bp)
break;
default:
dm_output_to_console("Don't have transmitter_control for v%d\n", crev);
- bp->cmd_tbl.transmitter_control = NULL;
+ bp->cmd_tbl.transmitter_control = transmitter_control_fallback;
break;
}
}
+static void transmitter_control_dmcub(
+ struct dc_dmub_srv *dmcub,
+ struct dig_transmitter_control_parameters_v1_6 *dig)
+{
+ struct dmub_rb_cmd_dig1_transmitter_control transmitter_control;
+
+ transmitter_control.header.type = DMUB_CMD__VBIOS;
+ transmitter_control.header.sub_type =
+ DMUB_CMD__VBIOS_DIG1_TRANSMITTER_CONTROL;
+ transmitter_control.transmitter_control.dig = *dig;
+
+ dc_dmub_srv_cmd_queue(dmcub, &transmitter_control.header);
+ dc_dmub_srv_cmd_execute(dmcub);
+ dc_dmub_srv_wait_idle(dmcub);
+}
+
static enum bp_result transmitter_control_v1_6(
struct bios_parser *bp,
struct bp_transmitter_control *cntl)
@@ -222,6 +281,11 @@ static enum bp_result transmitter_control_v1_6(
__func__, ps.param.symclk_10khz);
}
+ if (bp->base.ctx->dc->ctx->dmub_srv &&
+ bp->base.ctx->dc->debug.dmub_command_table) {
+ transmitter_control_dmcub(bp->base.ctx->dmub_srv, &ps.param);
+ return BP_RESULT_OK;
+ }
/*color_depth not used any more, driver has deep color factor in the Phyclk*/
if (EXEC_BIOS_CMD_TABLE(dig1transmittercontrol, ps))
@@ -229,6 +293,18 @@ static enum bp_result transmitter_control_v1_6(
return result;
}
+static enum bp_result transmitter_control_fallback(
+ struct bios_parser *bp,
+ struct bp_transmitter_control *cntl)
+{
+ if (bp->base.ctx->dc->ctx->dmub_srv &&
+ bp->base.ctx->dc->debug.dmub_command_table) {
+ return transmitter_control_v1_6(bp, cntl);
+ }
+
+ return BP_RESULT_FAILURE;
+}
+
/******************************************************************************
******************************************************************************
**
@@ -241,6 +317,10 @@ static enum bp_result set_pixel_clock_v7(
struct bios_parser *bp,
struct bp_pixel_clock_parameters *bp_params);
+static enum bp_result set_pixel_clock_fallback(
+ struct bios_parser *bp,
+ struct bp_pixel_clock_parameters *bp_params);
+
static void init_set_pixel_clock(struct bios_parser *bp)
{
switch (BIOS_CMD_TABLE_PARA_REVISION(setpixelclock)) {
@@ -250,12 +330,25 @@ static void init_set_pixel_clock(struct bios_parser *bp)
default:
dm_output_to_console("Don't have set_pixel_clock for v%d\n",
BIOS_CMD_TABLE_PARA_REVISION(setpixelclock));
- bp->cmd_tbl.set_pixel_clock = NULL;
+ bp->cmd_tbl.set_pixel_clock = set_pixel_clock_fallback;
break;
}
}
+static void set_pixel_clock_dmcub(
+ struct dc_dmub_srv *dmcub,
+ struct set_pixel_clock_parameter_v1_7 *clk)
+{
+ struct dmub_rb_cmd_set_pixel_clock pixel_clock = { 0 };
+ pixel_clock.header.type = DMUB_CMD__VBIOS;
+ pixel_clock.header.sub_type = DMUB_CMD__VBIOS_SET_PIXEL_CLOCK;
+ pixel_clock.pixel_clock.clk = *clk;
+
+ dc_dmub_srv_cmd_queue(dmcub, &pixel_clock.header);
+ dc_dmub_srv_cmd_execute(dmcub);
+ dc_dmub_srv_wait_idle(dmcub);
+}
static enum bp_result set_pixel_clock_v7(
struct bios_parser *bp,
@@ -331,12 +424,30 @@ static enum bp_result set_pixel_clock_v7(
if (bp_params->signal_type == SIGNAL_TYPE_DVI_DUAL_LINK)
clk.miscinfo |= PIXEL_CLOCK_V7_MISC_DVI_DUALLINK_EN;
+ if (bp->base.ctx->dc->ctx->dmub_srv &&
+ bp->base.ctx->dc->debug.dmub_command_table) {
+ set_pixel_clock_dmcub(bp->base.ctx->dmub_srv, &clk);
+ return BP_RESULT_OK;
+ }
+
if (EXEC_BIOS_CMD_TABLE(setpixelclock, clk))
result = BP_RESULT_OK;
}
return result;
}
+static enum bp_result set_pixel_clock_fallback(
+ struct bios_parser *bp,
+ struct bp_pixel_clock_parameters *bp_params)
+{
+ if (bp->base.ctx->dc->ctx->dmub_srv &&
+ bp->base.ctx->dc->debug.dmub_command_table) {
+ return set_pixel_clock_v7(bp, bp_params);
+ }
+
+ return BP_RESULT_FAILURE;
+}
+
/******************************************************************************
******************************************************************************
**
@@ -569,6 +680,11 @@ static enum bp_result enable_disp_power_gating_v2_1(
enum controller_id crtc_id,
enum bp_pipe_control_action action);
+static enum bp_result enable_disp_power_gating_fallback(
+ struct bios_parser *bp,
+ enum controller_id crtc_id,
+ enum bp_pipe_control_action action);
+
static void init_enable_disp_power_gating(
struct bios_parser *bp)
{
@@ -580,11 +696,26 @@ static void init_enable_disp_power_gating(
default:
dm_output_to_console("Don't enable_disp_power_gating enable_crtc for v%d\n",
BIOS_CMD_TABLE_PARA_REVISION(enabledisppowergating));
- bp->cmd_tbl.enable_disp_power_gating = NULL;
+ bp->cmd_tbl.enable_disp_power_gating = enable_disp_power_gating_fallback;
break;
}
}
+static void enable_disp_power_gating_dmcub(
+ struct dc_dmub_srv *dmcub,
+ struct enable_disp_power_gating_parameters_v2_1 *pwr)
+{
+ struct dmub_rb_cmd_enable_disp_power_gating power_gating;
+
+ power_gating.header.type = DMUB_CMD__VBIOS;
+ power_gating.header.sub_type = DMUB_CMD__VBIOS_ENABLE_DISP_POWER_GATING;
+ power_gating.power_gating.pwr = *pwr;
+
+ dc_dmub_srv_cmd_queue(dmcub, &power_gating.header);
+ dc_dmub_srv_cmd_execute(dmcub);
+ dc_dmub_srv_wait_idle(dmcub);
+}
+
static enum bp_result enable_disp_power_gating_v2_1(
struct bios_parser *bp,
enum controller_id crtc_id,
@@ -604,12 +735,32 @@ static enum bp_result enable_disp_power_gating_v2_1(
ps.param.enable =
bp->cmd_helper->disp_power_gating_action_to_atom(action);
+ if (bp->base.ctx->dc->ctx->dmub_srv &&
+ bp->base.ctx->dc->debug.dmub_command_table) {
+ enable_disp_power_gating_dmcub(bp->base.ctx->dmub_srv,
+ &ps.param);
+ return BP_RESULT_OK;
+ }
+
if (EXEC_BIOS_CMD_TABLE(enabledisppowergating, ps.param))
result = BP_RESULT_OK;
return result;
}
+static enum bp_result enable_disp_power_gating_fallback(
+ struct bios_parser *bp,
+ enum controller_id crtc_id,
+ enum bp_pipe_control_action action)
+{
+ if (bp->base.ctx->dc->ctx->dmub_srv &&
+ bp->base.ctx->dc->debug.dmub_command_table) {
+ return enable_disp_power_gating_v2_1(bp, crtc_id, action);
+ }
+
+ return BP_RESULT_FAILURE;
+}
+
/******************************************************************************
*******************************************************************************
**
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
index f9439dfc7b75..7388c987c595 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
@@ -55,18 +55,19 @@ bool dal_bios_parser_init_cmd_tbl_helper2(
case DCE_VERSION_11_22:
*h = dal_cmd_tbl_helper_dce112_get_table2();
return true;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
case DCN_VERSION_1_0:
case DCN_VERSION_1_01:
*h = dal_cmd_tbl_helper_dce112_get_table2();
return true;
#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
case DCN_VERSION_2_0:
*h = dal_cmd_tbl_helper_dce112_get_table2();
return true;
-#endif
+ case DCN_VERSION_2_1:
+ *h = dal_cmd_tbl_helper_dce112_get_table2();
+ return true;
case DCE_VERSION_12_0:
case DCE_VERSION_12_1:
*h = dal_cmd_tbl_helper_dce112_get_table2();
diff --git a/drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.c b/drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.c
index ca24154468c7..11bf247bb180 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.c
@@ -153,38 +153,10 @@ static uint8_t hpd_sel_to_atom(enum hpd_source_id id)
static uint8_t dig_encoder_sel_to_atom(enum engine_id id)
{
- uint8_t atom_dig_encoder_sel = 0;
-
- switch (id) {
- case ENGINE_ID_DIGA:
- atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGA_SEL;
- break;
- case ENGINE_ID_DIGB:
- atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGB_SEL;
- break;
- case ENGINE_ID_DIGC:
- atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGC_SEL;
- break;
- case ENGINE_ID_DIGD:
- atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGD_SEL;
- break;
- case ENGINE_ID_DIGE:
- atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGE_SEL;
- break;
- case ENGINE_ID_DIGF:
- atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGF_SEL;
- break;
- case ENGINE_ID_DIGG:
- atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGG_SEL;
- break;
- case ENGINE_ID_UNKNOWN:
- /* No DIG_FRONT is associated to DIG_BACKEND */
- atom_dig_encoder_sel = 0;
- break;
- default:
- atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGA_SEL;
- break;
- }
+ /* On any ASIC after DCE80, we manually program the DIG_FE
+ * selection (see connect_dig_be_to_fe function of the link
+ * encoder), so translation should always return 0 (no FE).
+ */
return 0;
}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.c b/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.c
index 0237ae575068..755b6e33140a 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.c
@@ -150,38 +150,10 @@ static uint8_t hpd_sel_to_atom(enum hpd_source_id id)
static uint8_t dig_encoder_sel_to_atom(enum engine_id id)
{
- uint8_t atom_dig_encoder_sel = 0;
-
- switch (id) {
- case ENGINE_ID_DIGA:
- atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGA_SEL;
- break;
- case ENGINE_ID_DIGB:
- atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGB_SEL;
- break;
- case ENGINE_ID_DIGC:
- atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGC_SEL;
- break;
- case ENGINE_ID_DIGD:
- atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGD_SEL;
- break;
- case ENGINE_ID_DIGE:
- atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGE_SEL;
- break;
- case ENGINE_ID_DIGF:
- atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGF_SEL;
- break;
- case ENGINE_ID_DIGG:
- atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGG_SEL;
- break;
- case ENGINE_ID_UNKNOWN:
- /* No DIG_FRONT is associated to DIG_BACKEND */
- atom_dig_encoder_sel = 0;
- break;
- default:
- atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGA_SEL;
- break;
- }
+ /* On any ASIC after DCE80, we manually program the DIG_FE
+ * selection (see connect_dig_be_to_fe function of the link
+ * encoder), so translation should always return 0 (no FE).
+ */
return 0;
}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.c b/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.c
index 452034f83e4c..06b4f7fa4a50 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.c
@@ -150,38 +150,10 @@ static uint8_t hpd_sel_to_atom(enum hpd_source_id id)
static uint8_t dig_encoder_sel_to_atom(enum engine_id id)
{
- uint8_t atom_dig_encoder_sel = 0;
-
- switch (id) {
- case ENGINE_ID_DIGA:
- atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGA_SEL;
- break;
- case ENGINE_ID_DIGB:
- atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGB_SEL;
- break;
- case ENGINE_ID_DIGC:
- atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGC_SEL;
- break;
- case ENGINE_ID_DIGD:
- atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGD_SEL;
- break;
- case ENGINE_ID_DIGE:
- atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGE_SEL;
- break;
- case ENGINE_ID_DIGF:
- atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGF_SEL;
- break;
- case ENGINE_ID_DIGG:
- atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGG_SEL;
- break;
- case ENGINE_ID_UNKNOWN:
- /* No DIG_FRONT is associated to DIG_BACKEND */
- atom_dig_encoder_sel = 0;
- break;
- default:
- atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGA_SEL;
- break;
- }
+ /* On any ASIC after DCE80, we manually program the DIG_FE
+ * selection (see connect_dig_be_to_fe function of the link
+ * encoder), so translation should always return 0 (no FE).
+ */
return 0;
}
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/Makefile b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
index 95f332ee3e7e..4674aca8f206 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
@@ -1,5 +1,6 @@
#
# Copyright 2017 Advanced Micro Devices, Inc.
+# Copyright 2019 Raptor Engineering, LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
@@ -24,21 +25,38 @@
# It calculates Bandwidth and Watermarks values for HW programming
#
-ifneq ($(call cc-option, -mpreferred-stack-boundary=4),)
- cc_stack_align := -mpreferred-stack-boundary=4
-else ifneq ($(call cc-option, -mstack-alignment=16),)
- cc_stack_align := -mstack-alignment=16
+ifdef CONFIG_X86
+calcs_ccflags := -mhard-float -msse
endif
-calcs_ccflags := -mhard-float -msse $(cc_stack_align)
+ifdef CONFIG_PPC64
+calcs_ccflags := -mhard-float -maltivec
+endif
+
+ifdef CONFIG_CC_IS_GCC
+ifeq ($(call cc-ifversion, -lt, 0701, y), y)
+IS_OLD_GCC = 1
+endif
+endif
+
+ifdef CONFIG_X86
+ifdef IS_OLD_GCC
+# Stack alignment mismatch, proceed with caution.
+# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
+# (8B stack alignment).
+calcs_ccflags += -mpreferred-stack-boundary=4
+else
+calcs_ccflags += -msse2
+endif
+endif
-CFLAGS_dcn_calcs.o := $(calcs_ccflags)
-CFLAGS_dcn_calc_auto.o := $(calcs_ccflags)
-CFLAGS_dcn_calc_math.o := $(calcs_ccflags) -Wno-tautological-compare
+CFLAGS_$(AMDDALPATH)/dc/calcs/dcn_calcs.o := $(calcs_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/calcs/dcn_calc_auto.o := $(calcs_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/calcs/dcn_calc_math.o := $(calcs_ccflags) -Wno-tautological-compare
BW_CALCS = dce_calcs.o bw_fixed.o custom_float.o
-ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ifdef CONFIG_DRM_AMD_DC_DCN
BW_CALCS += dcn_calcs.o dcn_calc_math.o dcn_calc_auto.o
endif
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
index 7108d51a9c5b..5d081c42e81b 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
@@ -25,6 +25,7 @@
#include <linux/slab.h>
+#include "resource.h"
#include "dm_services.h"
#include "dce_calcs.h"
#include "dc.h"
@@ -153,14 +154,14 @@ static void calculate_bandwidth(
- if (data->d0_underlay_mode == bw_def_none) { d0_underlay_enable = 0; }
- else {
- d0_underlay_enable = 1;
- }
- if (data->d1_underlay_mode == bw_def_none) { d1_underlay_enable = 0; }
- else {
- d1_underlay_enable = 1;
- }
+ if (data->d0_underlay_mode == bw_def_none)
+ d0_underlay_enable = false;
+ else
+ d0_underlay_enable = true;
+ if (data->d1_underlay_mode == bw_def_none)
+ d1_underlay_enable = false;
+ else
+ d1_underlay_enable = true;
data->number_of_underlay_surfaces = d0_underlay_enable + d1_underlay_enable;
switch (data->underlay_surface_type) {
case bw_def_420:
@@ -285,8 +286,8 @@ static void calculate_bandwidth(
data->cursor_width_pixels[2] = bw_int_to_fixed(0);
data->cursor_width_pixels[3] = bw_int_to_fixed(0);
/* graphics surface parameters from spreadsheet*/
- fbc_enabled = 0;
- lpt_enabled = 0;
+ fbc_enabled = false;
+ lpt_enabled = false;
for (i = 4; i <= maximum_number_of_surfaces - 3; i++) {
if (i < data->number_of_displays + 4) {
if (i == 4 && data->d0_underlay_mode == bw_def_underlay_only) {
@@ -337,9 +338,9 @@ static void calculate_bandwidth(
data->access_one_channel_only[i] = 0;
}
if (data->fbc_en[i] == 1) {
- fbc_enabled = 1;
+ fbc_enabled = true;
if (data->lpt_en[i] == 1) {
- lpt_enabled = 1;
+ lpt_enabled = true;
}
}
data->cursor_width_pixels[i] = bw_int_to_fixed(vbios->cursor_width);
@@ -2852,7 +2853,7 @@ static void populate_initial_data(
data->src_height[num_displays * 2 + j] = bw_int_to_fixed(pipe[i].bottom_pipe->plane_res.scl_data.viewport.height);
data->src_width[num_displays * 2 + j] = bw_int_to_fixed(pipe[i].bottom_pipe->plane_res.scl_data.viewport.width);
data->pitch_in_pixels[num_displays * 2 + j] = bw_int_to_fixed(
- pipe[i].bottom_pipe->plane_state->plane_size.grph.surface_pitch);
+ pipe[i].bottom_pipe->plane_state->plane_size.surface_pitch);
data->h_taps[num_displays * 2 + j] = bw_int_to_fixed(pipe[i].bottom_pipe->plane_res.scl_data.taps.h_taps);
data->v_taps[num_displays * 2 + j] = bw_int_to_fixed(pipe[i].bottom_pipe->plane_res.scl_data.taps.v_taps);
data->h_scale_ratio[num_displays * 2 + j] = fixed31_32_to_bw_fixed(
@@ -2977,6 +2978,32 @@ static void populate_initial_data(
data->number_of_displays = num_displays;
}
+static bool all_displays_in_sync(const struct pipe_ctx pipe[],
+ int pipe_count)
+{
+ const struct pipe_ctx *active_pipes[MAX_PIPES];
+ int i, num_active_pipes = 0;
+
+ for (i = 0; i < pipe_count; i++) {
+ if (!pipe[i].stream || pipe[i].top_pipe)
+ continue;
+
+ active_pipes[num_active_pipes++] = &pipe[i];
+ }
+
+ if (!num_active_pipes)
+ return false;
+
+ for (i = 1; i < num_active_pipes; ++i) {
+ if (!resource_are_streams_timing_synchronizable(
+ active_pipes[0]->stream, active_pipes[i]->stream)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
/**
* Return:
* true - Display(s) configuration supported.
@@ -2998,8 +3025,10 @@ bool bw_calcs(struct dc_context *ctx,
populate_initial_data(pipe, pipe_count, data);
- /*TODO: this should be taken out calcs output and assigned during timing sync for pplib use*/
- calcs_output->all_displays_in_sync = false;
+ if (ctx->dc->config.multi_mon_pp_mclk_switch)
+ calcs_output->all_displays_in_sync = all_displays_in_sync(pipe, pipe_count);
+ else
+ calcs_output->all_displays_in_sync = false;
if (data->number_of_displays != 0) {
uint8_t yclk_lvl, sclk_lvl;
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index 38365dd911a3..1a37550731de 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -1,5 +1,6 @@
/*
* Copyright 2017 Advanced Micro Devices, Inc.
+ * Copyright 2019 Raptor Engineering, LLC
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -53,13 +54,9 @@
* remain as-is as it provides us with a guarantee from HW that it is correct.
*/
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
/* Defaults from spreadsheet rev#247.
* RV2 delta: dram_clock_change_latency, max_num_dpp
*/
-#else
-/* Defaults from spreadsheet rev#247 */
-#endif
const struct dcn_soc_bounding_box dcn10_soc_defaults = {
/* latencies */
.sr_exit_time = 17, /*us*/
@@ -329,7 +326,7 @@ static void pipe_ctx_to_e2e_pipe_params (
dcc_support_pixel_format(pipe->plane_state->format, &bpe) ? 1 : 0;
}
input->src.dcc_rate = 1;
- input->src.meta_pitch = pipe->plane_state->dcc.grph.meta_pitch;
+ input->src.meta_pitch = pipe->plane_state->dcc.meta_pitch;
input->src.source_scan = dm_horz;
input->src.sw_mode = pipe->plane_state->tiling_info.gfx9.swizzle;
@@ -626,7 +623,7 @@ static bool dcn_bw_apply_registry_override(struct dc *dc)
{
bool updated = false;
- kernel_fpu_begin();
+ DC_FP_START();
if ((int)(dc->dcn_soc->sr_exit_time * 1000) != dc->debug.sr_exit_time_ns
&& dc->debug.sr_exit_time_ns) {
updated = true;
@@ -662,7 +659,7 @@ static bool dcn_bw_apply_registry_override(struct dc *dc)
dc->dcn_soc->dram_clock_change_latency =
dc->debug.dram_clock_change_latency_ns / 1000.0;
}
- kernel_fpu_end();
+ DC_FP_END();
return updated;
}
@@ -705,6 +702,17 @@ static void hack_bounding_box(struct dcn_bw_internal_vars *v,
hack_force_pipe_split(v, context->streams[0]->timing.pix_clk_100hz);
}
+
+unsigned int get_highest_allowed_voltage_level(uint32_t hw_internal_rev)
+{
+ /* for dali & pollock, the highest voltage level we want is 0 */
+ if (ASICREV_IS_POLLOCK(hw_internal_rev) || ASICREV_IS_DALI(hw_internal_rev))
+ return 0;
+
+ /* we are ok with all levels */
+ return 4;
+}
+
bool dcn_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
@@ -731,7 +739,8 @@ bool dcn_validate_bandwidth(
dcn_bw_sync_calcs_and_dml(dc);
memset(v, 0, sizeof(*v));
- kernel_fpu_begin();
+ DC_FP_START();
+
v->sr_exit_time = dc->dcn_soc->sr_exit_time;
v->sr_enter_plus_exit_time = dc->dcn_soc->sr_enter_plus_exit_time;
v->urgent_latency = dc->dcn_soc->urgent_latency;
@@ -1263,12 +1272,12 @@ bool dcn_validate_bandwidth(
bw_limit = dc->dcn_soc->percent_disp_bw_limit * v->fabric_and_dram_bandwidth_vmax0p9;
bw_limit_pass = (v->total_data_read_bandwidth / 1000.0) < bw_limit;
- kernel_fpu_end();
+ DC_FP_END();
PERFORMANCE_TRACE_END();
BW_VAL_TRACE_FINISH();
- if (bw_limit_pass && v->voltage_level != 5)
+ if (bw_limit_pass && v->voltage_level <= get_highest_allowed_voltage_level(dc->ctx->asic_id.hw_internal_rev))
return true;
else
return false;
@@ -1426,37 +1435,49 @@ void dcn_bw_update_from_pplib(struct dc *dc)
struct dc_context *ctx = dc->ctx;
struct dm_pp_clock_levels_with_voltage fclks = {0}, dcfclks = {0};
bool res;
+ unsigned vmin0p65_idx, vmid0p72_idx, vnom0p8_idx, vmax0p9_idx;
/* TODO: This is not the proper way to obtain fabric_and_dram_bandwidth, should be min(fclk, memclk) */
res = dm_pp_get_clock_levels_by_type_with_voltage(
ctx, DM_PP_CLOCK_TYPE_FCLK, &fclks);
- kernel_fpu_begin();
+ DC_FP_START();
if (res)
res = verify_clock_values(&fclks);
if (res) {
- ASSERT(fclks.num_levels >= 3);
- dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 = 32 * (fclks.data[0].clocks_in_khz / 1000.0) / 1000.0;
- dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 = dc->dcn_soc->number_of_channels *
- (fclks.data[fclks.num_levels - (fclks.num_levels > 2 ? 3 : 2)].clocks_in_khz / 1000.0)
- * ddr4_dram_factor_single_Channel / 1000.0;
- dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 = dc->dcn_soc->number_of_channels *
- (fclks.data[fclks.num_levels - 2].clocks_in_khz / 1000.0)
- * ddr4_dram_factor_single_Channel / 1000.0;
- dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = dc->dcn_soc->number_of_channels *
- (fclks.data[fclks.num_levels - 1].clocks_in_khz / 1000.0)
- * ddr4_dram_factor_single_Channel / 1000.0;
+ ASSERT(fclks.num_levels);
+
+ vmin0p65_idx = 0;
+ vmid0p72_idx = fclks.num_levels -
+ (fclks.num_levels > 2 ? 3 : (fclks.num_levels > 1 ? 2 : 1));
+ vnom0p8_idx = fclks.num_levels - (fclks.num_levels > 1 ? 2 : 1);
+ vmax0p9_idx = fclks.num_levels - 1;
+
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 =
+ 32 * (fclks.data[vmin0p65_idx].clocks_in_khz / 1000.0) / 1000.0;
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 =
+ dc->dcn_soc->number_of_channels *
+ (fclks.data[vmid0p72_idx].clocks_in_khz / 1000.0)
+ * ddr4_dram_factor_single_Channel / 1000.0;
+ dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 =
+ dc->dcn_soc->number_of_channels *
+ (fclks.data[vnom0p8_idx].clocks_in_khz / 1000.0)
+ * ddr4_dram_factor_single_Channel / 1000.0;
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 =
+ dc->dcn_soc->number_of_channels *
+ (fclks.data[vmax0p9_idx].clocks_in_khz / 1000.0)
+ * ddr4_dram_factor_single_Channel / 1000.0;
} else
BREAK_TO_DEBUGGER();
- kernel_fpu_end();
+ DC_FP_END();
res = dm_pp_get_clock_levels_by_type_with_voltage(
ctx, DM_PP_CLOCK_TYPE_DCFCLK, &dcfclks);
- kernel_fpu_begin();
+ DC_FP_START();
if (res)
res = verify_clock_values(&dcfclks);
@@ -1469,7 +1490,7 @@ void dcn_bw_update_from_pplib(struct dc *dc)
} else
BREAK_TO_DEBUGGER();
- kernel_fpu_end();
+ DC_FP_END();
}
void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
@@ -1484,11 +1505,11 @@ void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
if (!pp || !pp->set_wm_ranges)
return;
- kernel_fpu_begin();
+ DC_FP_START();
min_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 * 1000000 / 32;
min_dcfclk_khz = dc->dcn_soc->dcfclkv_min0p65 * 1000;
socclk_khz = dc->dcn_soc->socclk * 1000;
- kernel_fpu_end();
+ DC_FP_END();
/* Now notify PPLib/SMU about which Watermarks sets they should select
* depending on DPM state they are in. And update BW MGR GFX Engine and
@@ -1539,7 +1560,7 @@ void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
void dcn_bw_sync_calcs_and_dml(struct dc *dc)
{
- kernel_fpu_begin();
+ DC_FP_START();
DC_LOG_BANDWIDTH_CALCS("sr_exit_time: %f ns\n"
"sr_enter_plus_exit_time: %f ns\n"
"urgent_latency: %f ns\n"
@@ -1728,5 +1749,5 @@ void dcn_bw_sync_calcs_and_dml(struct dc *dc)
dc->dml.ip.bug_forcing_LC_req_same_size_fixed =
dc->dcn_ip->bug_forcing_luma_and_chroma_request_to_same_size_fixed == dcn_bw_yes;
dc->dml.ip.dcfclk_cstate_latency = dc->dcn_ip->dcfclk_cstate_latency;
- kernel_fpu_end();
+ DC_FP_END();
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
index 003c27767e9c..c0f6a8c7de7d 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
@@ -63,7 +63,7 @@ CLK_MGR_DCE120 = dce120_clk_mgr.o
AMD_DAL_CLK_MGR_DCE120 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dce120/,$(CLK_MGR_DCE120))
AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCE120)
-ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ifdef CONFIG_DRM_AMD_DC_DCN
###############################################################################
# DCN10
###############################################################################
@@ -72,9 +72,7 @@ CLK_MGR_DCN10 = rv1_clk_mgr.o rv1_clk_mgr_vbios_smu.o rv2_clk_mgr.o
AMD_DAL_CLK_MGR_DCN10 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn10/,$(CLK_MGR_DCN10))
AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN10)
-endif
-ifdef CONFIG_DRM_AMD_DC_DCN2_0
###############################################################################
# DCN20
###############################################################################
@@ -83,5 +81,19 @@ CLK_MGR_DCN20 = dcn20_clk_mgr.o
AMD_DAL_CLK_MGR_DCN20 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn20/,$(CLK_MGR_DCN20))
AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN20)
+
+###############################################################################
+# DCN21
+###############################################################################
+CLK_MGR_DCN21 = rn_clk_mgr.o rn_clk_mgr_vbios_smu.o
+
+# prevent build errors regarding soft-float vs hard-float FP ABI tags
+# this code is currently unused on ppc64, as it applies to Renoir APUs only
+ifdef CONFIG_PPC64
+CFLAGS_$(AMDDALPATH)/dc/clk_mgr/dcn21/rn_clk_mgr.o := $(call cc-option,-mno-gnu-attribute)
endif
+AMD_DAL_CLK_MGR_DCN21 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn21/,$(CLK_MGR_DCN21))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN21)
+endif
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index 6b8fc5cbabb8..a78e5c74c79c 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -37,6 +37,7 @@
#include "dcn10/rv1_clk_mgr.h"
#include "dcn10/rv2_clk_mgr.h"
#include "dcn20/dcn20_clk_mgr.h"
+#include "dcn21/rn_clk_mgr.h"
int clk_mgr_helper_get_active_display_cnt(
@@ -62,6 +63,31 @@ int clk_mgr_helper_get_active_display_cnt(
return display_count;
}
+void clk_mgr_exit_optimized_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr)
+{
+ struct dc_link *edp_link = get_edp_link(dc);
+
+ if (dc->hwss.exit_optimized_pwr_state)
+ dc->hwss.exit_optimized_pwr_state(dc, dc->current_state);
+
+ if (edp_link) {
+ clk_mgr->psr_allow_active_cache = edp_link->psr_allow_active;
+ dc_link_set_psr_allow_active(edp_link, false, false);
+ }
+
+}
+
+void clk_mgr_optimize_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr)
+{
+ struct dc_link *edp_link = get_edp_link(dc);
+
+ if (edp_link)
+ dc_link_set_psr_allow_active(edp_link, clk_mgr->psr_allow_active_cache, false);
+
+ if (dc->hwss.optimize_pwr_state)
+ dc->hwss.optimize_pwr_state(dc, dc->current_state);
+
+}
struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *pp_smu, struct dccg *dccg)
{
@@ -106,8 +132,19 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
dce120_clk_mgr_construct(ctx, clk_mgr);
break;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
case FAMILY_RV:
+ if (ASICREV_IS_DALI(asic_id.hw_internal_rev) ||
+ ASICREV_IS_POLLOCK(asic_id.hw_internal_rev)) {
+ /* TEMP: this check has to come before ASICREV_IS_RENOIR */
+ /* which also incorrectly returns true for Dali/Pollock*/
+ rv2_clk_mgr_construct(ctx, clk_mgr, pp_smu);
+ break;
+ }
+ if (ASICREV_IS_RENOIR(asic_id.hw_internal_rev)) {
+ rn_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
+ break;
+ }
if (ASICREV_IS_RAVEN2(asic_id.hw_internal_rev)) {
rv2_clk_mgr_construct(ctx, clk_mgr, pp_smu);
break;
@@ -118,13 +155,11 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
break;
}
break;
-#endif /* Family RV */
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
case FAMILY_NV:
dcn20_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
break;
-#endif /* Family NV */
+#endif /* Family RV and NV*/
default:
ASSERT(0); /* Unknown Asic */
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
index 814450fefffa..26db1c5d4e4d 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
@@ -147,7 +147,7 @@ int dce_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base)
/* Calculate the current DFS clock, in kHz.*/
dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
- * clk_mgr->dentist_vco_freq_khz) / target_div;
+ * clk_mgr->base.dentist_vco_freq_khz) / target_div;
return dce_adjust_dp_ref_freq_for_ss(clk_mgr, dp_ref_clk_khz);
}
@@ -239,7 +239,7 @@ int dce_set_clock(
/* Make sure requested clock isn't lower than minimum threshold*/
if (requested_clk_khz > 0)
requested_clk_khz = max(requested_clk_khz,
- clk_mgr_dce->dentist_vco_freq_khz / 64);
+ clk_mgr_dce->base.dentist_vco_freq_khz / 64);
/* Prepare to program display clock*/
pxl_clk_params.target_pixel_clock_100hz = requested_clk_khz * 10;
@@ -273,20 +273,14 @@ static void dce_clock_read_integrated_info(struct clk_mgr_internal *clk_mgr_dce)
{
struct dc_debug_options *debug = &clk_mgr_dce->base.ctx->dc->debug;
struct dc_bios *bp = clk_mgr_dce->base.ctx->dc_bios;
- struct integrated_info info = { { { 0 } } };
- struct dc_firmware_info fw_info = { { 0 } };
int i;
if (bp->integrated_info)
- info = *bp->integrated_info;
-
- clk_mgr_dce->dentist_vco_freq_khz = info.dentist_vco_freq;
- if (clk_mgr_dce->dentist_vco_freq_khz == 0) {
- bp->funcs->get_firmware_info(bp, &fw_info);
- clk_mgr_dce->dentist_vco_freq_khz =
- fw_info.smu_gpu_pll_output_freq;
- if (clk_mgr_dce->dentist_vco_freq_khz == 0)
- clk_mgr_dce->dentist_vco_freq_khz = 3600000;
+ clk_mgr_dce->base.dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq;
+ if (clk_mgr_dce->base.dentist_vco_freq_khz == 0) {
+ clk_mgr_dce->base.dentist_vco_freq_khz = bp->fw_info.smu_gpu_pll_output_freq;
+ if (clk_mgr_dce->base.dentist_vco_freq_khz == 0)
+ clk_mgr_dce->base.dentist_vco_freq_khz = 3600000;
}
/*update the maximum display clock for each power state*/
@@ -317,9 +311,10 @@ static void dce_clock_read_integrated_info(struct clk_mgr_internal *clk_mgr_dce)
/*Do not allow bad VBIOS/SBIOS to override with invalid values,
* check for > 100MHz*/
- if (info.disp_clk_voltage[i].max_supported_clk >= 100000)
- clk_mgr_dce->max_clks_by_state[clk_state].display_clk_khz =
- info.disp_clk_voltage[i].max_supported_clk;
+ if (bp->integrated_info)
+ if (bp->integrated_info->disp_clk_voltage[i].max_supported_clk >= 100000)
+ clk_mgr_dce->max_clks_by_state[clk_state].display_clk_khz =
+ bp->integrated_info->disp_clk_voltage[i].max_supported_clk;
}
if (!debug->disable_dfs_bypass && bp->integrated_info)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c
index 5cc3acccda2a..b1e657e137a9 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c
@@ -98,11 +98,14 @@ uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context)
struct dc_stream_state *stream = context->streams[j];
uint32_t vertical_blank_in_pixels = 0;
uint32_t vertical_blank_time = 0;
+ uint32_t vertical_total_min = stream->timing.v_total;
+ struct dc_crtc_timing_adjust adjust = stream->adjust;
+ if (adjust.v_total_max != adjust.v_total_min)
+ vertical_total_min = adjust.v_total_min;
vertical_blank_in_pixels = stream->timing.h_total *
- (stream->timing.v_total
+ (vertical_total_min
- stream->timing.v_addressable);
-
vertical_blank_time = vertical_blank_in_pixels
* 10000 / stream->timing.pix_clk_100hz;
@@ -171,6 +174,10 @@ void dce11_pplib_apply_display_requirements(
struct dc_state *context)
{
struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
+ int memory_type_multiplier = MEMORY_TYPE_MULTIPLIER_CZ;
+
+ if (dc->bw_vbios && dc->bw_vbios->memory_type == bw_def_hbm)
+ memory_type_multiplier = MEMORY_TYPE_HBM;
pp_display_cfg->all_displays_in_sync =
context->bw_ctx.bw.dce.all_displays_in_sync;
@@ -183,8 +190,20 @@ void dce11_pplib_apply_display_requirements(
pp_display_cfg->cpu_pstate_separation_time =
context->bw_ctx.bw.dce.blackout_recovery_time_us;
- pp_display_cfg->min_memory_clock_khz = context->bw_ctx.bw.dce.yclk_khz
- / MEMORY_TYPE_MULTIPLIER_CZ;
+ /*
+ * TODO: determine whether the bandwidth has reached memory's limitation
+ * , then change minimum memory clock based on real-time bandwidth
+ * limitation.
+ */
+ if (ASICREV_IS_VEGA20_P(dc->ctx->asic_id.hw_internal_rev) && (context->stream_count >= 2)) {
+ pp_display_cfg->min_memory_clock_khz = max(pp_display_cfg->min_memory_clock_khz,
+ (uint32_t) div64_s64(
+ div64_s64(dc->bw_vbios->high_yclk.value,
+ memory_type_multiplier), 10000));
+ } else {
+ pp_display_cfg->min_memory_clock_khz = context->bw_ctx.bw.dce.yclk_khz
+ / memory_type_multiplier;
+ }
pp_display_cfg->min_engine_clock_khz = determine_sclk_from_bounding_box(
dc,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c
index 7c746ef1e32e..d031bd3d3072 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c
@@ -72,8 +72,8 @@ int dce112_set_clock(struct clk_mgr *clk_mgr_base, int requested_clk_khz)
struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct bp_set_dce_clock_parameters dce_clk_params;
struct dc_bios *bp = clk_mgr_base->ctx->dc_bios;
- struct dc *core_dc = clk_mgr_base->ctx->dc;
- struct dmcu *dmcu = core_dc->res_pool->dmcu;
+ struct dc *dc = clk_mgr_base->ctx->dc;
+ struct dmcu *dmcu = dc->res_pool->dmcu;
int actual_clock = requested_clk_khz;
/* Prepare to program display clock*/
memset(&dce_clk_params, 0, sizeof(dce_clk_params));
@@ -81,7 +81,7 @@ int dce112_set_clock(struct clk_mgr *clk_mgr_base, int requested_clk_khz)
/* Make sure requested clock isn't lower than minimum threshold*/
if (requested_clk_khz > 0)
requested_clk_khz = max(requested_clk_khz,
- clk_mgr_dce->dentist_vco_freq_khz / 62);
+ clk_mgr_dce->base.dentist_vco_freq_khz / 62);
dce_clk_params.target_clock_frequency = requested_clk_khz;
dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
@@ -110,7 +110,7 @@ int dce112_set_clock(struct clk_mgr *clk_mgr_base, int requested_clk_khz)
bp->funcs->set_dce_clock(bp, &dce_clk_params);
- if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
+ if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
if (clk_mgr_dce->dfs_bypass_disp_clk != actual_clock)
dmcu->funcs->set_psr_wait_loop(dmcu,
@@ -126,8 +126,8 @@ int dce112_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_clk_khz)
{
struct bp_set_dce_clock_parameters dce_clk_params;
struct dc_bios *bp = clk_mgr->base.ctx->dc_bios;
- struct dc *core_dc = clk_mgr->base.ctx->dc;
- struct dmcu *dmcu = core_dc->res_pool->dmcu;
+ struct dc *dc = clk_mgr->base.ctx->dc;
+ struct dmcu *dmcu = dc->res_pool->dmcu;
int actual_clock = requested_clk_khz;
/* Prepare to program display clock*/
memset(&dce_clk_params, 0, sizeof(dce_clk_params));
@@ -135,7 +135,7 @@ int dce112_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_clk_khz)
/* Make sure requested clock isn't lower than minimum threshold*/
if (requested_clk_khz > 0)
requested_clk_khz = max(requested_clk_khz,
- clk_mgr->dentist_vco_freq_khz / 62);
+ clk_mgr->base.dentist_vco_freq_khz / 62);
dce_clk_params.target_clock_frequency = requested_clk_khz;
dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
@@ -152,7 +152,7 @@ int dce112_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_clk_khz)
clk_mgr->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
- if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
+ if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
if (clk_mgr->dfs_bypass_disp_clk != actual_clock)
dmcu->funcs->set_psr_wait_loop(dmcu,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
index caf8a4a4e442..3fab9296918a 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
@@ -34,6 +34,11 @@
#include "rv1_clk_mgr_vbios_smu.h"
#include "rv1_clk_mgr_clk.h"
+void rv1_init_clocks(struct clk_mgr *clk_mgr)
+{
+ memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
+}
+
static int rv1_determine_dppclk_threshold(struct clk_mgr_internal *clk_mgr, struct dc_clocks *new_clocks)
{
bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
@@ -134,6 +139,9 @@ static void rv1_update_clocks(struct clk_mgr *clk_mgr_base,
ASSERT(clk_mgr->pp_smu);
+ if (dc->work_arounds.skip_clock_update)
+ return;
+
pp_smu = &clk_mgr->pp_smu->rv_funcs;
display_count = clk_mgr_helper_get_active_display_cnt(dc, context);
@@ -232,6 +240,7 @@ static void rv1_enable_pme_wa(struct clk_mgr *clk_mgr_base)
}
static struct clk_mgr_funcs rv1_clk_funcs = {
+ .init_clocks = rv1_init_clocks,
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
.update_clocks = rv1_update_clocks,
.enable_pme_wa = rv1_enable_pme_wa,
@@ -246,7 +255,6 @@ void rv1_clk_mgr_construct(struct dc_context *ctx, struct clk_mgr_internal *clk_
{
struct dc_debug_options *debug = &ctx->dc->debug;
struct dc_bios *bp = ctx->dc_bios;
- struct dc_firmware_info fw_info = { { 0 } };
clk_mgr->base.ctx = ctx;
clk_mgr->pp_smu = pp_smu;
@@ -261,12 +269,11 @@ void rv1_clk_mgr_construct(struct dc_context *ctx, struct clk_mgr_internal *clk_
clk_mgr->base.dprefclk_khz = 600000;
if (bp->integrated_info)
- clk_mgr->dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq;
- if (clk_mgr->dentist_vco_freq_khz == 0) {
- bp->funcs->get_firmware_info(bp, &fw_info);
- clk_mgr->dentist_vco_freq_khz = fw_info.smu_gpu_pll_output_freq;
- if (clk_mgr->dentist_vco_freq_khz == 0)
- clk_mgr->dentist_vco_freq_khz = 3600000;
+ clk_mgr->base.dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq;
+ if (bp->fw_info_valid && clk_mgr->base.dentist_vco_freq_khz == 0) {
+ clk_mgr->base.dentist_vco_freq_khz = bp->fw_info.smu_gpu_pll_output_freq;
+ if (clk_mgr->base.dentist_vco_freq_khz == 0)
+ clk_mgr->base.dentist_vco_freq_khz = 3600000;
}
if (!debug->disable_dfs_bypass && bp->integrated_info)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c
index 1897e91c8ccb..97b7f32294fd 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c
@@ -88,8 +88,8 @@ int rv1_vbios_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, unsigned
int rv1_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispclk_khz)
{
int actual_dispclk_set_mhz = -1;
- struct dc *core_dc = clk_mgr->base.ctx->dc;
- struct dmcu *dmcu = core_dc->res_pool->dmcu;
+ struct dc *dc = clk_mgr->base.ctx->dc;
+ struct dmcu *dmcu = dc->res_pool->dmcu;
/* Unit of SMU msg parameter is Mhz */
actual_dispclk_set_mhz = rv1_vbios_smu_send_msg_with_param(
@@ -100,7 +100,7 @@ int rv1_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_di
/* Actual dispclk set is returned in the parameter register */
actual_dispclk_set_mhz = REG_READ(MP1_SMN_C2PMSG_83) * 1000;
- if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
+ if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
if (clk_mgr->dfs_bypass_disp_clk != actual_dispclk_set_mhz)
dmcu->funcs->set_psr_wait_loop(dmcu,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
index 50bfb5921de0..49ce46b543ea 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
@@ -26,9 +26,8 @@
#include "dccg.h"
#include "clk_mgr_internal.h"
-
-#include "dcn20/dcn20_clk_mgr.h"
#include "dce100/dce_clk_mgr.h"
+#include "dcn20_clk_mgr.h"
#include "reg_helper.h"
#include "core_types.h"
#include "dm_helpers.h"
@@ -102,30 +101,35 @@ uint32_t dentist_get_did_from_divider(int divider)
}
void dcn20_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
- struct dc_state *context)
+ struct dc_state *context, bool safe_to_lower)
{
int i;
clk_mgr->dccg->ref_dppclk = clk_mgr->base.clks.dppclk_khz;
for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
- int dpp_inst, dppclk_khz;
-
- if (!context->res_ctx.pipe_ctx[i].plane_state)
- continue;
+ int dpp_inst, dppclk_khz, prev_dppclk_khz;
- dpp_inst = context->res_ctx.pipe_ctx[i].plane_res.dpp->inst;
+ /* Loop index will match dpp->inst if resource exists,
+ * and we want to avoid dependency on dpp object
+ */
+ dpp_inst = i;
dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
- clk_mgr->dccg->funcs->update_dpp_dto(
- clk_mgr->dccg, dpp_inst, dppclk_khz);
+
+ prev_dppclk_khz = clk_mgr->base.ctx->dc->current_state->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
+
+ if ((prev_dppclk_khz > dppclk_khz && safe_to_lower) || prev_dppclk_khz < dppclk_khz) {
+ clk_mgr->dccg->funcs->update_dpp_dto(
+ clk_mgr->dccg, dpp_inst, dppclk_khz);
+ }
}
}
void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr)
{
int dpp_divider = DENTIST_DIVIDER_RANGE_SCALE_FACTOR
- * clk_mgr->dentist_vco_freq_khz / clk_mgr->base.clks.dppclk_khz;
+ * clk_mgr->base.dentist_vco_freq_khz / clk_mgr->base.clks.dppclk_khz;
int disp_divider = DENTIST_DIVIDER_RANGE_SCALE_FACTOR
- * clk_mgr->dentist_vco_freq_khz / clk_mgr->base.clks.dispclk_khz;
+ * clk_mgr->base.dentist_vco_freq_khz / clk_mgr->base.clks.dispclk_khz;
uint32_t dppclk_wdivider = dentist_get_did_from_divider(dpp_divider);
uint32_t dispclk_wdivider = dentist_get_did_from_divider(disp_divider);
@@ -153,7 +157,20 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
bool enter_display_off = false;
bool dpp_clock_lowered = false;
struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu;
+ bool force_reset = false;
+
+ if (dc->work_arounds.skip_clock_update)
+ return;
+
+ if (clk_mgr_base->clks.dispclk_khz == 0 ||
+ dc->debug.force_clock_mode & 0x1) {
+ //this is from resume or boot up, if forced_clock cfg option used, we bypass program dispclk and DPPCLK, but need set them for S3.
+ force_reset = true;
+
+ dcn2_read_clocks_from_hw_dentist(clk_mgr_base);
+ //force_clock_mode 0x1: force reset the clock even it is the same clock as long as it is in Passive level.
+ }
display_count = clk_mgr_helper_get_active_display_cnt(dc, context);
if (dc->res_pool->pp_smu)
pp_smu = &dc->res_pool->pp_smu->nv_funcs;
@@ -172,6 +189,7 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_PHYCLK, clk_mgr_base->clks.phyclk_khz / 1000);
}
+
if (dc->debug.force_min_dcfclk_mhz > 0)
new_clocks->dcfclk_khz = (new_clocks->dcfclk_khz > (dc->debug.force_min_dcfclk_mhz * 1000)) ?
new_clocks->dcfclk_khz : (dc->debug.force_min_dcfclk_mhz * 1000);
@@ -196,6 +214,7 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
}
if (should_update_pstate_support(safe_to_lower, new_clocks->p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
+ clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support;
clk_mgr_base->clks.p_state_change_support = new_clocks->p_state_change_support;
if (pp_smu && pp_smu->set_pstate_handshake_support)
pp_smu->set_pstate_handshake_support(&pp_smu->pp_smu, clk_mgr_base->clks.p_state_change_support);
@@ -225,19 +244,22 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
update_dispclk = true;
}
- if (dc->config.forced_clocks == false) {
+
+ if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) {
if (dpp_clock_lowered) {
// if clock is being lowered, increase DTO before lowering refclk
- dcn20_update_clocks_update_dpp_dto(clk_mgr, context);
+ dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
dcn20_update_clocks_update_dentist(clk_mgr);
} else {
// if clock is being raised, increase refclk before lowering DTO
if (update_dppclk || update_dispclk)
dcn20_update_clocks_update_dentist(clk_mgr);
- if (update_dppclk)
- dcn20_update_clocks_update_dpp_dto(clk_mgr, context);
+ // always update dtos unless clock is lowered and not safe to lower
+ if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz)
+ dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
}
}
+
if (update_dispclk &&
dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
/*update dmcu for wait_loop count*/
@@ -250,6 +272,8 @@ void dcn2_update_clocks_fpga(struct clk_mgr *clk_mgr,
struct dc_state *context,
bool safe_to_lower)
{
+ struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
+
struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
/* Min fclk = 1.2GHz since all the extra scemi logic seems to run off of it */
int fclk_adj = new_clocks->fclk_khz > 1200000 ? new_clocks->fclk_khz : 1200000;
@@ -287,14 +311,18 @@ void dcn2_update_clocks_fpga(struct clk_mgr *clk_mgr,
clk_mgr->clks.dispclk_khz = new_clocks->dispclk_khz;
}
- /* Both fclk and dppclk ref are run on the same scemi clock so we
- * need to keep the same value for both
+ /* Both fclk and ref_dppclk run on the same scemi clock.
+ * So take the higher value since the DPP DTO is typically programmed
+ * such that max dppclk is 1:1 with ref_dppclk.
*/
if (clk_mgr->clks.fclk_khz > clk_mgr->clks.dppclk_khz)
clk_mgr->clks.dppclk_khz = clk_mgr->clks.fclk_khz;
if (clk_mgr->clks.dppclk_khz > clk_mgr->clks.fclk_khz)
clk_mgr->clks.fclk_khz = clk_mgr->clks.dppclk_khz;
+ // Both fclk and ref_dppclk run on the same scemi clock.
+ clk_mgr_int->dccg->ref_dppclk = clk_mgr->clks.fclk_khz;
+
dm_set_dcn_clocks(clk_mgr->ctx, &clk_mgr->clks);
}
@@ -303,6 +331,7 @@ void dcn2_init_clocks(struct clk_mgr *clk_mgr)
memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
// Assumption is that boot state always supports pstate
clk_mgr->clks.p_state_change_support = true;
+ clk_mgr->clks.prev_p_state_change_support = true;
}
void dcn2_enable_pme_wa(struct clk_mgr *clk_mgr_base)
@@ -318,11 +347,82 @@ void dcn2_enable_pme_wa(struct clk_mgr *clk_mgr_base)
}
}
+
+void dcn2_read_clocks_from_hw_dentist(struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ uint32_t dispclk_wdivider;
+ uint32_t dppclk_wdivider;
+ int disp_divider;
+ int dpp_divider;
+
+ REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, &dispclk_wdivider);
+ REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_WDIVIDER, &dppclk_wdivider);
+
+ disp_divider = dentist_get_divider_from_did(dispclk_wdivider);
+ dpp_divider = dentist_get_divider_from_did(dispclk_wdivider);
+
+ if (disp_divider && dpp_divider) {
+ /* Calculate the current DFS clock, in kHz.*/
+ clk_mgr_base->clks.dispclk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
+ * clk_mgr->base.dentist_vco_freq_khz) / disp_divider;
+
+ clk_mgr_base->clks.dppclk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
+ * clk_mgr->base.dentist_vco_freq_khz) / dpp_divider;
+ }
+
+}
+
+void dcn2_get_clock(struct clk_mgr *clk_mgr,
+ struct dc_state *context,
+ enum dc_clock_type clock_type,
+ struct dc_clock_config *clock_cfg)
+{
+
+ if (clock_type == DC_CLOCK_TYPE_DISPCLK) {
+ clock_cfg->max_clock_khz = context->bw_ctx.bw.dcn.clk.max_supported_dispclk_khz;
+ clock_cfg->min_clock_khz = DCN_MINIMUM_DISPCLK_Khz;
+ clock_cfg->current_clock_khz = clk_mgr->clks.dispclk_khz;
+ clock_cfg->bw_requirequired_clock_khz = context->bw_ctx.bw.dcn.clk.bw_dispclk_khz;
+ }
+ if (clock_type == DC_CLOCK_TYPE_DPPCLK) {
+ clock_cfg->max_clock_khz = context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz;
+ clock_cfg->min_clock_khz = DCN_MINIMUM_DPPCLK_Khz;
+ clock_cfg->current_clock_khz = clk_mgr->clks.dppclk_khz;
+ clock_cfg->bw_requirequired_clock_khz = context->bw_ctx.bw.dcn.clk.bw_dppclk_khz;
+ }
+}
+
+static bool dcn2_are_clock_states_equal(struct dc_clocks *a,
+ struct dc_clocks *b)
+{
+ if (a->dispclk_khz != b->dispclk_khz)
+ return false;
+ else if (a->dppclk_khz != b->dppclk_khz)
+ return false;
+ else if (a->dcfclk_khz != b->dcfclk_khz)
+ return false;
+ else if (a->socclk_khz != b->socclk_khz)
+ return false;
+ else if (a->dcfclk_deep_sleep_khz != b->dcfclk_deep_sleep_khz)
+ return false;
+ else if (a->phyclk_khz != b->phyclk_khz)
+ return false;
+ else if (a->dramclk_khz != b->dramclk_khz)
+ return false;
+ else if (a->p_state_change_support != b->p_state_change_support)
+ return false;
+
+ return true;
+}
+
static struct clk_mgr_funcs dcn2_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
.update_clocks = dcn2_update_clocks,
.init_clocks = dcn2_init_clocks,
- .enable_pme_wa = dcn2_enable_pme_wa
+ .enable_pme_wa = dcn2_enable_pme_wa,
+ .get_clock = dcn2_get_clock,
+ .are_clock_states_equal = dcn2_are_clock_states_equal,
};
@@ -350,7 +450,7 @@ void dcn20_clk_mgr_construct(
if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) {
dcn2_funcs.update_clocks = dcn2_update_clocks_fpga;
- clk_mgr->dentist_vco_freq_khz = 3850000;
+ clk_mgr->base.dentist_vco_freq_khz = 3850000;
} else {
/* DFS Slice 2 should be used for DPREFCLK */
@@ -374,15 +474,15 @@ void dcn20_clk_mgr_construct(
pll_req = dc_fixpt_mul_int(pll_req, 100000);
/* integer part is now VCO frequency in kHz */
- clk_mgr->dentist_vco_freq_khz = dc_fixpt_floor(pll_req);
+ clk_mgr->base.dentist_vco_freq_khz = dc_fixpt_floor(pll_req);
/* in case we don't get a value from the register, use default */
- if (clk_mgr->dentist_vco_freq_khz == 0)
- clk_mgr->dentist_vco_freq_khz = 3850000;
+ if (clk_mgr->base.dentist_vco_freq_khz == 0)
+ clk_mgr->base.dentist_vco_freq_khz = 3850000;
/* Calculate the DPREFCLK in kHz.*/
clk_mgr->base.dprefclk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
- * clk_mgr->dentist_vco_freq_khz) / target_div;
+ * clk_mgr->base.dentist_vco_freq_khz) / target_div;
}
//Integrated_info table does not exist on dGPU projects so should not be referenced
//anywhere in code for dGPUs.
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h
index 5661a5a89847..0b9c045b0c8e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h
@@ -34,7 +34,7 @@ void dcn2_update_clocks_fpga(struct clk_mgr *clk_mgr,
struct dc_state *context,
bool safe_to_lower);
void dcn20_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
- struct dc_state *context);
+ struct dc_state *context, bool safe_to_lower);
void dcn2_init_clocks(struct clk_mgr *clk_mgr);
@@ -45,4 +45,14 @@ void dcn20_clk_mgr_construct(struct dc_context *ctx,
uint32_t dentist_get_did_from_divider(int divider);
+void dcn2_get_clock(struct clk_mgr *clk_mgr,
+ struct dc_state *context,
+ enum dc_clock_type clock_type,
+ struct dc_clock_config *clock_cfg);
+
+void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr);
+
+void dcn2_read_clocks_from_hw_dentist(struct clk_mgr *clk_mgr_base);
+
+
#endif //__DCN20_CLK_MGR_H__
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
new file mode 100644
index 000000000000..9ef3f7b91a1d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -0,0 +1,758 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dccg.h"
+#include "clk_mgr_internal.h"
+
+
+#include "dcn20/dcn20_clk_mgr.h"
+#include "rn_clk_mgr.h"
+
+
+#include "dce100/dce_clk_mgr.h"
+#include "rn_clk_mgr_vbios_smu.h"
+#include "reg_helper.h"
+#include "core_types.h"
+#include "dm_helpers.h"
+
+#include "atomfirmware.h"
+#include "clk/clk_10_0_2_offset.h"
+#include "clk/clk_10_0_2_sh_mask.h"
+#include "renoir_ip_offset.h"
+
+
+/* Constants */
+
+#define LPDDR_MEM_RETRAIN_LATENCY 4.977 /* Number obtained from LPDDR4 Training Counter Requirement doc */
+
+/* Macros */
+
+#define REG(reg_name) \
+ (CLK_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name)
+
+
+/* TODO: evaluate how to lower or disable all dcn clocks in screen off case */
+int rn_get_active_display_cnt_wa(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ int i, display_count;
+ bool tmds_present = false;
+
+ display_count = 0;
+ for (i = 0; i < context->stream_count; i++) {
+ const struct dc_stream_state *stream = context->streams[i];
+
+ if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A ||
+ stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK ||
+ stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK)
+ tmds_present = true;
+ }
+
+ for (i = 0; i < dc->link_count; i++) {
+ const struct dc_link *link = dc->links[i];
+
+ /*
+ * Only notify active stream or virtual stream.
+ * Need to notify virtual stream to work around
+ * headless case. HPD does not fire when system is in
+ * S0i2.
+ */
+ /* abusing the fact that the dig and phy are coupled to see if the phy is enabled */
+ if (link->connector_signal == SIGNAL_TYPE_VIRTUAL ||
+ link->link_enc->funcs->is_dig_enabled(link->link_enc))
+ display_count++;
+ }
+
+ /* WA for hang on HDMI after display off back back on*/
+ if (display_count == 0 && tmds_present)
+ display_count = 1;
+
+ return display_count;
+}
+
+void rn_update_clocks(struct clk_mgr *clk_mgr_base,
+ struct dc_state *context,
+ bool safe_to_lower)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
+ struct dc *dc = clk_mgr_base->ctx->dc;
+ int display_count;
+ bool update_dppclk = false;
+ bool update_dispclk = false;
+ bool dpp_clock_lowered = false;
+
+ struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu;
+
+ if (dc->work_arounds.skip_clock_update)
+ return;
+
+ /*
+ * if it is safe to lower, but we are already in the lower state, we don't have to do anything
+ * also if safe to lower is false, we just go in the higher state
+ */
+ if (safe_to_lower) {
+ /* check that we're not already in lower */
+ if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) {
+
+ display_count = rn_get_active_display_cnt_wa(dc, context);
+ /* if we can go lower, go lower */
+ if (display_count == 0) {
+ rn_vbios_smu_set_dcn_low_power_state(clk_mgr, DCN_PWR_STATE_LOW_POWER);
+ /* update power state */
+ clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER;
+ }
+ }
+ } else {
+ /* check that we're not already in D0 */
+ if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_MISSION_MODE) {
+ rn_vbios_smu_set_dcn_low_power_state(clk_mgr, DCN_PWR_STATE_MISSION_MODE);
+ /* update power state */
+ clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_MISSION_MODE;
+ }
+ }
+
+ if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, clk_mgr_base->clks.phyclk_khz)) {
+ clk_mgr_base->clks.phyclk_khz = new_clocks->phyclk_khz;
+ rn_vbios_smu_set_phyclk(clk_mgr, clk_mgr_base->clks.phyclk_khz);
+ }
+
+ if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz)) {
+ clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz;
+ rn_vbios_smu_set_hard_min_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_khz);
+ }
+
+ if (should_set_clock(safe_to_lower,
+ new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz)) {
+ clk_mgr_base->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
+ rn_vbios_smu_set_min_deep_sleep_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_deep_sleep_khz);
+ }
+
+ // workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow.
+ if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
+ if (new_clocks->dppclk_khz < 100000)
+ new_clocks->dppclk_khz = 100000;
+ }
+
+ if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) {
+ if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz)
+ dpp_clock_lowered = true;
+ clk_mgr_base->clks.dppclk_khz = new_clocks->dppclk_khz;
+ update_dppclk = true;
+ }
+
+ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
+ clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
+ rn_vbios_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
+
+ update_dispclk = true;
+ }
+
+ if (dpp_clock_lowered) {
+ // increase per DPP DTO before lowering global dppclk
+ dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
+ rn_vbios_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz);
+ } else {
+ // increase global DPPCLK before lowering per DPP DTO
+ if (update_dppclk || update_dispclk)
+ rn_vbios_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz);
+ // always update dtos unless clock is lowered and not safe to lower
+ if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz)
+ dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
+ }
+
+ if (update_dispclk &&
+ dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
+ /*update dmcu for wait_loop count*/
+ dmcu->funcs->set_psr_wait_loop(dmcu,
+ clk_mgr_base->clks.dispclk_khz / 1000 / 7);
+ }
+}
+
+
+static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr)
+{
+ /* get FbMult value */
+ struct fixed31_32 pll_req;
+ unsigned int fbmult_frac_val = 0;
+ unsigned int fbmult_int_val = 0;
+
+
+ /*
+ * Register value of fbmult is in 8.16 format, we are converting to 31.32
+ * to leverage the fix point operations available in driver
+ */
+
+ REG_GET(CLK1_CLK_PLL_REQ, FbMult_frac, &fbmult_frac_val); /* 16 bit fractional part*/
+ REG_GET(CLK1_CLK_PLL_REQ, FbMult_int, &fbmult_int_val); /* 8 bit integer part */
+
+ pll_req = dc_fixpt_from_int(fbmult_int_val);
+
+ /*
+ * since fractional part is only 16 bit in register definition but is 32 bit
+ * in our fix point definiton, need to shift left by 16 to obtain correct value
+ */
+ pll_req.value |= fbmult_frac_val << 16;
+
+ /* multiply by REFCLK period */
+ pll_req = dc_fixpt_mul_int(pll_req, clk_mgr->dfs_ref_freq_khz);
+
+ /* integer part is now VCO frequency in kHz */
+ return dc_fixpt_floor(pll_req);
+}
+
+static void rn_dump_clk_registers_internal(struct rn_clk_internal *internal, struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ internal->CLK1_CLK3_CURRENT_CNT = REG_READ(CLK1_CLK3_CURRENT_CNT);
+ internal->CLK1_CLK3_BYPASS_CNTL = REG_READ(CLK1_CLK3_BYPASS_CNTL);
+
+ internal->CLK1_CLK3_DS_CNTL = REG_READ(CLK1_CLK3_DS_CNTL); //dcf deep sleep divider
+ internal->CLK1_CLK3_ALLOW_DS = REG_READ(CLK1_CLK3_ALLOW_DS);
+
+ internal->CLK1_CLK1_CURRENT_CNT = REG_READ(CLK1_CLK1_CURRENT_CNT);
+ internal->CLK1_CLK1_BYPASS_CNTL = REG_READ(CLK1_CLK1_BYPASS_CNTL);
+
+ internal->CLK1_CLK2_CURRENT_CNT = REG_READ(CLK1_CLK2_CURRENT_CNT);
+ internal->CLK1_CLK2_BYPASS_CNTL = REG_READ(CLK1_CLK2_BYPASS_CNTL);
+
+ internal->CLK1_CLK0_CURRENT_CNT = REG_READ(CLK1_CLK0_CURRENT_CNT);
+ internal->CLK1_CLK0_BYPASS_CNTL = REG_READ(CLK1_CLK0_BYPASS_CNTL);
+}
+
+/* This function collect raw clk register values */
+static void rn_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
+ struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info)
+{
+ struct rn_clk_internal internal = {0};
+ char *bypass_clks[5] = {"0x0 DFS", "0x1 REFCLK", "0x2 ERROR", "0x3 400 FCH", "0x4 600 FCH"};
+ unsigned int chars_printed = 0;
+ unsigned int remaining_buffer = log_info->bufSize;
+
+ rn_dump_clk_registers_internal(&internal, clk_mgr_base);
+
+ regs_and_bypass->dcfclk = internal.CLK1_CLK3_CURRENT_CNT / 10;
+ regs_and_bypass->dcf_deep_sleep_divider = internal.CLK1_CLK3_DS_CNTL / 10;
+ regs_and_bypass->dcf_deep_sleep_allow = internal.CLK1_CLK3_ALLOW_DS;
+ regs_and_bypass->dprefclk = internal.CLK1_CLK2_CURRENT_CNT / 10;
+ regs_and_bypass->dispclk = internal.CLK1_CLK0_CURRENT_CNT / 10;
+ regs_and_bypass->dppclk = internal.CLK1_CLK1_CURRENT_CNT / 10;
+
+ regs_and_bypass->dppclk_bypass = internal.CLK1_CLK1_BYPASS_CNTL & 0x0007;
+ if (regs_and_bypass->dppclk_bypass < 0 || regs_and_bypass->dppclk_bypass > 4)
+ regs_and_bypass->dppclk_bypass = 0;
+ regs_and_bypass->dcfclk_bypass = internal.CLK1_CLK3_BYPASS_CNTL & 0x0007;
+ if (regs_and_bypass->dcfclk_bypass < 0 || regs_and_bypass->dcfclk_bypass > 4)
+ regs_and_bypass->dcfclk_bypass = 0;
+ regs_and_bypass->dispclk_bypass = internal.CLK1_CLK0_BYPASS_CNTL & 0x0007;
+ if (regs_and_bypass->dispclk_bypass < 0 || regs_and_bypass->dispclk_bypass > 4)
+ regs_and_bypass->dispclk_bypass = 0;
+ regs_and_bypass->dprefclk_bypass = internal.CLK1_CLK2_BYPASS_CNTL & 0x0007;
+ if (regs_and_bypass->dprefclk_bypass < 0 || regs_and_bypass->dprefclk_bypass > 4)
+ regs_and_bypass->dprefclk_bypass = 0;
+
+ if (log_info->enabled) {
+ chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "clk_type,clk_value,deepsleep_cntl,deepsleep_allow,bypass\n");
+ remaining_buffer -= chars_printed;
+ *log_info->sum_chars_printed += chars_printed;
+ log_info->pBuf += chars_printed;
+
+ chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "dcfclk,%d,%d,%d,%s\n",
+ regs_and_bypass->dcfclk,
+ regs_and_bypass->dcf_deep_sleep_divider,
+ regs_and_bypass->dcf_deep_sleep_allow,
+ bypass_clks[(int) regs_and_bypass->dcfclk_bypass]);
+ remaining_buffer -= chars_printed;
+ *log_info->sum_chars_printed += chars_printed;
+ log_info->pBuf += chars_printed;
+
+ chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "dprefclk,%d,N/A,N/A,%s\n",
+ regs_and_bypass->dprefclk,
+ bypass_clks[(int) regs_and_bypass->dprefclk_bypass]);
+ remaining_buffer -= chars_printed;
+ *log_info->sum_chars_printed += chars_printed;
+ log_info->pBuf += chars_printed;
+
+ chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "dispclk,%d,N/A,N/A,%s\n",
+ regs_and_bypass->dispclk,
+ bypass_clks[(int) regs_and_bypass->dispclk_bypass]);
+ remaining_buffer -= chars_printed;
+ *log_info->sum_chars_printed += chars_printed;
+ log_info->pBuf += chars_printed;
+
+ //split
+ chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "SPLIT\n");
+ remaining_buffer -= chars_printed;
+ *log_info->sum_chars_printed += chars_printed;
+ log_info->pBuf += chars_printed;
+
+ // REGISTER VALUES
+ chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "reg_name,value,clk_type\n");
+ remaining_buffer -= chars_printed;
+ *log_info->sum_chars_printed += chars_printed;
+ log_info->pBuf += chars_printed;
+
+ chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "CLK1_CLK3_CURRENT_CNT,%d,dcfclk\n",
+ internal.CLK1_CLK3_CURRENT_CNT);
+ remaining_buffer -= chars_printed;
+ *log_info->sum_chars_printed += chars_printed;
+ log_info->pBuf += chars_printed;
+
+ chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "CLK1_CLK3_DS_CNTL,%d,dcf_deep_sleep_divider\n",
+ internal.CLK1_CLK3_DS_CNTL);
+ remaining_buffer -= chars_printed;
+ *log_info->sum_chars_printed += chars_printed;
+ log_info->pBuf += chars_printed;
+
+ chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "CLK1_CLK3_ALLOW_DS,%d,dcf_deep_sleep_allow\n",
+ internal.CLK1_CLK3_ALLOW_DS);
+ remaining_buffer -= chars_printed;
+ *log_info->sum_chars_printed += chars_printed;
+ log_info->pBuf += chars_printed;
+
+ chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "CLK1_CLK2_CURRENT_CNT,%d,dprefclk\n",
+ internal.CLK1_CLK2_CURRENT_CNT);
+ remaining_buffer -= chars_printed;
+ *log_info->sum_chars_printed += chars_printed;
+ log_info->pBuf += chars_printed;
+
+ chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "CLK1_CLK0_CURRENT_CNT,%d,dispclk\n",
+ internal.CLK1_CLK0_CURRENT_CNT);
+ remaining_buffer -= chars_printed;
+ *log_info->sum_chars_printed += chars_printed;
+ log_info->pBuf += chars_printed;
+
+ chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "CLK1_CLK1_CURRENT_CNT,%d,dppclk\n",
+ internal.CLK1_CLK1_CURRENT_CNT);
+ remaining_buffer -= chars_printed;
+ *log_info->sum_chars_printed += chars_printed;
+ log_info->pBuf += chars_printed;
+
+ chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "CLK1_CLK3_BYPASS_CNTL,%d,dcfclk_bypass\n",
+ internal.CLK1_CLK3_BYPASS_CNTL);
+ remaining_buffer -= chars_printed;
+ *log_info->sum_chars_printed += chars_printed;
+ log_info->pBuf += chars_printed;
+
+ chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "CLK1_CLK2_BYPASS_CNTL,%d,dprefclk_bypass\n",
+ internal.CLK1_CLK2_BYPASS_CNTL);
+ remaining_buffer -= chars_printed;
+ *log_info->sum_chars_printed += chars_printed;
+ log_info->pBuf += chars_printed;
+
+ chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "CLK1_CLK0_BYPASS_CNTL,%d,dispclk_bypass\n",
+ internal.CLK1_CLK0_BYPASS_CNTL);
+ remaining_buffer -= chars_printed;
+ *log_info->sum_chars_printed += chars_printed;
+ log_info->pBuf += chars_printed;
+
+ chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "CLK1_CLK1_BYPASS_CNTL,%d,dppclk_bypass\n",
+ internal.CLK1_CLK1_BYPASS_CNTL);
+ remaining_buffer -= chars_printed;
+ *log_info->sum_chars_printed += chars_printed;
+ log_info->pBuf += chars_printed;
+ }
+}
+
+/* This function produce translated logical clk state values*/
+void rn_get_clk_states(struct clk_mgr *clk_mgr_base, struct clk_states *s)
+{
+ struct clk_state_registers_and_bypass sb = { 0 };
+ struct clk_log_info log_info = { 0 };
+
+ rn_dump_clk_registers(&sb, clk_mgr_base, &log_info);
+
+ s->dprefclk_khz = sb.dprefclk * 1000;
+}
+
+void rn_enable_pme_wa(struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ rn_vbios_smu_enable_pme_wa(clk_mgr);
+}
+
+void rn_init_clocks(struct clk_mgr *clk_mgr)
+{
+ memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
+ // Assumption is that boot state always supports pstate
+ clk_mgr->clks.p_state_change_support = true;
+ clk_mgr->clks.prev_p_state_change_support = true;
+ clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
+}
+
+void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_smu_wm_range_sets *ranges)
+{
+ int i, num_valid_sets;
+
+ num_valid_sets = 0;
+
+ for (i = 0; i < WM_SET_COUNT; i++) {
+ /* skip empty entries, the smu array has no holes*/
+ if (!bw_params->wm_table.entries[i].valid)
+ continue;
+
+ ranges->reader_wm_sets[num_valid_sets].wm_inst = bw_params->wm_table.entries[i].wm_inst;
+ ranges->reader_wm_sets[num_valid_sets].wm_type = bw_params->wm_table.entries[i].wm_type;
+ /* We will not select WM based on fclk, so leave it as unconstrained */
+ ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
+ ranges->reader_wm_sets[num_valid_sets].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+ /* dcfclk wil be used to select WM*/
+
+ if (ranges->reader_wm_sets[num_valid_sets].wm_type == WM_TYPE_PSTATE_CHG) {
+ if (i == 0)
+ ranges->reader_wm_sets[num_valid_sets].min_drain_clk_mhz = 0;
+ else {
+ /* add 1 to make it non-overlapping with next lvl */
+ ranges->reader_wm_sets[num_valid_sets].min_drain_clk_mhz = bw_params->clk_table.entries[i - 1].dcfclk_mhz + 1;
+ }
+ ranges->reader_wm_sets[num_valid_sets].max_drain_clk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz;
+
+ } else {
+ /* unconstrained for memory retraining */
+ ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
+ ranges->reader_wm_sets[num_valid_sets].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+
+ /* Modify previous watermark range to cover up to max */
+ ranges->reader_wm_sets[num_valid_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+ }
+ num_valid_sets++;
+ }
+
+ ASSERT(num_valid_sets != 0); /* Must have at least one set of valid watermarks */
+ ranges->num_reader_wm_sets = num_valid_sets;
+
+ /* modify the min and max to make sure we cover the whole range*/
+ ranges->reader_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
+ ranges->reader_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
+ ranges->reader_wm_sets[ranges->num_reader_wm_sets - 1].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+ ranges->reader_wm_sets[ranges->num_reader_wm_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+
+ /* This is for writeback only, does not matter currently as no writeback support*/
+ ranges->num_writer_wm_sets = 1;
+ ranges->writer_wm_sets[0].wm_inst = WM_A;
+ ranges->writer_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
+ ranges->writer_wm_sets[0].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+ ranges->writer_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
+ ranges->writer_wm_sets[0].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+
+}
+
+static void rn_notify_wm_ranges(struct clk_mgr *clk_mgr_base)
+{
+ struct dc_debug_options *debug = &clk_mgr_base->ctx->dc->debug;
+ struct pp_smu_wm_range_sets ranges = {0};
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ struct pp_smu_funcs *pp_smu = clk_mgr->pp_smu;
+
+ if (!debug->disable_pplib_wm_range) {
+ build_watermark_ranges(clk_mgr_base->bw_params, &ranges);
+
+ /* Notify PP Lib/SMU which Watermarks to use for which clock ranges */
+ if (pp_smu && pp_smu->rn_funcs.set_wm_ranges)
+ pp_smu->rn_funcs.set_wm_ranges(&pp_smu->rn_funcs.pp_smu, &ranges);
+ }
+
+}
+
+static bool rn_are_clock_states_equal(struct dc_clocks *a,
+ struct dc_clocks *b)
+{
+ if (a->dispclk_khz != b->dispclk_khz)
+ return false;
+ else if (a->dppclk_khz != b->dppclk_khz)
+ return false;
+ else if (a->dcfclk_khz != b->dcfclk_khz)
+ return false;
+ else if (a->dcfclk_deep_sleep_khz != b->dcfclk_deep_sleep_khz)
+ return false;
+
+ return true;
+}
+
+
+static struct clk_mgr_funcs dcn21_funcs = {
+ .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
+ .update_clocks = rn_update_clocks,
+ .init_clocks = rn_init_clocks,
+ .enable_pme_wa = rn_enable_pme_wa,
+ .are_clock_states_equal = rn_are_clock_states_equal,
+ .notify_wm_ranges = rn_notify_wm_ranges
+};
+
+struct clk_bw_params rn_bw_params = {
+ .vram_type = Ddr4MemType,
+ .num_channels = 1,
+ .clk_table = {
+ .entries = {
+ {
+ .voltage = 0,
+ .dcfclk_mhz = 400,
+ .fclk_mhz = 400,
+ .memclk_mhz = 800,
+ .socclk_mhz = 0,
+ },
+ {
+ .voltage = 0,
+ .dcfclk_mhz = 483,
+ .fclk_mhz = 800,
+ .memclk_mhz = 1600,
+ .socclk_mhz = 0,
+ },
+ {
+ .voltage = 0,
+ .dcfclk_mhz = 602,
+ .fclk_mhz = 1067,
+ .memclk_mhz = 1067,
+ .socclk_mhz = 0,
+ },
+ {
+ .voltage = 0,
+ .dcfclk_mhz = 738,
+ .fclk_mhz = 1333,
+ .memclk_mhz = 1600,
+ .socclk_mhz = 0,
+ },
+ },
+
+ .num_entries = 4,
+ },
+
+};
+
+struct wm_table ddr4_wm_table = {
+ .entries = {
+ {
+ .wm_inst = WM_A,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.72,
+ .sr_exit_time_us = 6.09,
+ .sr_enter_plus_exit_time_us = 7.14,
+ .valid = true,
+ },
+ {
+ .wm_inst = WM_B,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.72,
+ .sr_exit_time_us = 10.12,
+ .sr_enter_plus_exit_time_us = 11.48,
+ .valid = true,
+ },
+ {
+ .wm_inst = WM_C,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.72,
+ .sr_exit_time_us = 10.12,
+ .sr_enter_plus_exit_time_us = 11.48,
+ .valid = true,
+ },
+ {
+ .wm_inst = WM_D,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.72,
+ .sr_exit_time_us = 10.12,
+ .sr_enter_plus_exit_time_us = 11.48,
+ .valid = true,
+ },
+ }
+};
+
+struct wm_table lpddr4_wm_table = {
+ .entries = {
+ {
+ .wm_inst = WM_A,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.65333,
+ .sr_exit_time_us = 5.32,
+ .sr_enter_plus_exit_time_us = 6.38,
+ .valid = true,
+ },
+ {
+ .wm_inst = WM_B,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.65333,
+ .sr_exit_time_us = 9.82,
+ .sr_enter_plus_exit_time_us = 11.196,
+ .valid = true,
+ },
+ {
+ .wm_inst = WM_C,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.65333,
+ .sr_exit_time_us = 9.89,
+ .sr_enter_plus_exit_time_us = 11.24,
+ .valid = true,
+ },
+ {
+ .wm_inst = WM_D,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.65333,
+ .sr_exit_time_us = 9.748,
+ .sr_enter_plus_exit_time_us = 11.102,
+ .valid = true,
+ },
+ }
+};
+
+
+static unsigned int find_dcfclk_for_voltage(struct dpm_clocks *clock_table, unsigned int voltage)
+{
+ int i;
+
+ for (i = 0; i < PP_SMU_NUM_DCFCLK_DPM_LEVELS; i++) {
+ if (clock_table->DcfClocks[i].Vol == voltage)
+ return clock_table->DcfClocks[i].Freq;
+ }
+
+ ASSERT(0);
+ return 0;
+}
+
+static void rn_clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params, struct dpm_clocks *clock_table, struct integrated_info *bios_info)
+{
+ int i, j = 0;
+
+ j = -1;
+
+ ASSERT(PP_SMU_NUM_FCLK_DPM_LEVELS <= MAX_NUM_DPM_LVL);
+
+ /* Find lowest DPM, FCLK is filled in reverse order*/
+
+ for (i = PP_SMU_NUM_FCLK_DPM_LEVELS - 1; i >= 0; i--) {
+ if (clock_table->FClocks[i].Freq != 0) {
+ j = i;
+ break;
+ }
+ }
+
+ if (j == -1) {
+ /* clock table is all 0s, just use our own hardcode */
+ ASSERT(0);
+ return;
+ }
+
+ bw_params->clk_table.num_entries = j + 1;
+
+ for (i = 0; i < bw_params->clk_table.num_entries; i++, j--) {
+ bw_params->clk_table.entries[i].fclk_mhz = clock_table->FClocks[j].Freq;
+ bw_params->clk_table.entries[i].memclk_mhz = clock_table->MemClocks[j].Freq;
+ bw_params->clk_table.entries[i].voltage = clock_table->FClocks[j].Vol;
+ bw_params->clk_table.entries[i].dcfclk_mhz = find_dcfclk_for_voltage(clock_table, clock_table->FClocks[j].Vol);
+ }
+
+ bw_params->vram_type = bios_info->memory_type;
+ bw_params->num_channels = bios_info->ma_channel_number;
+
+ for (i = 0; i < WM_SET_COUNT; i++) {
+ bw_params->wm_table.entries[i].wm_inst = i;
+
+ if (i >= bw_params->clk_table.num_entries) {
+ bw_params->wm_table.entries[i].valid = false;
+ continue;
+ }
+
+ bw_params->wm_table.entries[i].wm_type = WM_TYPE_PSTATE_CHG;
+ bw_params->wm_table.entries[i].valid = true;
+ }
+
+ if (bw_params->vram_type == LpDdr4MemType) {
+ /*
+ * WM set D will be re-purposed for memory retraining
+ */
+ bw_params->wm_table.entries[WM_D].pstate_latency_us = LPDDR_MEM_RETRAIN_LATENCY;
+ bw_params->wm_table.entries[WM_D].wm_inst = WM_D;
+ bw_params->wm_table.entries[WM_D].wm_type = WM_TYPE_RETRAINING;
+ bw_params->wm_table.entries[WM_D].valid = true;
+ }
+
+}
+
+void rn_clk_mgr_construct(
+ struct dc_context *ctx,
+ struct clk_mgr_internal *clk_mgr,
+ struct pp_smu_funcs *pp_smu,
+ struct dccg *dccg)
+{
+ struct dc_debug_options *debug = &ctx->dc->debug;
+ struct dpm_clocks clock_table = { 0 };
+
+ clk_mgr->base.ctx = ctx;
+ clk_mgr->base.funcs = &dcn21_funcs;
+
+ clk_mgr->pp_smu = pp_smu;
+
+ clk_mgr->dccg = dccg;
+ clk_mgr->dfs_bypass_disp_clk = 0;
+
+ clk_mgr->dprefclk_ss_percentage = 0;
+ clk_mgr->dprefclk_ss_divider = 1000;
+ clk_mgr->ss_on_dprefclk = false;
+ clk_mgr->dfs_ref_freq_khz = 48000;
+
+ clk_mgr->smu_ver = rn_vbios_smu_get_smu_version(clk_mgr);
+
+ if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) {
+ dcn21_funcs.update_clocks = dcn2_update_clocks_fpga;
+ clk_mgr->base.dentist_vco_freq_khz = 3600000;
+ } else {
+ struct clk_log_info log_info = {0};
+
+ /* TODO: Check we get what we expect during bringup */
+ clk_mgr->base.dentist_vco_freq_khz = get_vco_frequency_from_reg(clk_mgr);
+
+ /* in case we don't get a value from the register, use default */
+ if (clk_mgr->base.dentist_vco_freq_khz == 0)
+ clk_mgr->base.dentist_vco_freq_khz = 3600000;
+
+ if (ctx->dc_bios->integrated_info->memory_type == LpDdr4MemType) {
+ rn_bw_params.wm_table = lpddr4_wm_table;
+ } else {
+ rn_bw_params.wm_table = ddr4_wm_table;
+ }
+ /* Saved clocks configured at boot for debug purposes */
+ rn_dump_clk_registers(&clk_mgr->base.boot_snapshot, &clk_mgr->base, &log_info);
+ }
+
+ clk_mgr->base.dprefclk_khz = 600000;
+ dce_clock_read_ss_info(clk_mgr);
+
+
+ clk_mgr->base.bw_params = &rn_bw_params;
+
+ if (pp_smu && pp_smu->rn_funcs.get_dpm_clock_table) {
+ pp_smu->rn_funcs.get_dpm_clock_table(&pp_smu->rn_funcs.pp_smu, &clock_table);
+ if (ctx->dc_bios && ctx->dc_bios->integrated_info) {
+ rn_clk_mgr_helper_populate_bw_params (clk_mgr->base.bw_params, &clock_table, ctx->dc_bios->integrated_info);
+ }
+ }
+
+ if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment) && clk_mgr->smu_ver >= 0x00371500) {
+ /* enable powerfeatures when displaycount goes to 0 */
+ rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(clk_mgr, !debug->disable_48mhz_pwrdwn);
+ }
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h
new file mode 100644
index 000000000000..e4322fa5475b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __RN_CLK_MGR_H__
+#define __RN_CLK_MGR_H__
+
+#include "clk_mgr.h"
+#include "dm_pp_smu.h"
+
+struct rn_clk_registers {
+ uint32_t CLK1_CLK0_CURRENT_CNT; /* DPREFCLK */
+};
+
+void rn_clk_mgr_construct(struct dc_context *ctx,
+ struct clk_mgr_internal *clk_mgr,
+ struct pp_smu_funcs *pp_smu,
+ struct dccg *dccg);
+
+#endif //__RN_CLK_MGR_H__
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
new file mode 100644
index 000000000000..6878aedf1d3e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
@@ -0,0 +1,198 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "core_types.h"
+#include "clk_mgr_internal.h"
+#include "reg_helper.h"
+
+#include "renoir_ip_offset.h"
+
+#include "mp/mp_12_0_0_offset.h"
+#include "mp/mp_12_0_0_sh_mask.h"
+
+#define REG(reg_name) \
+ (MP0_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name)
+
+#define FN(reg_name, field) \
+ FD(reg_name##__##field)
+
+#define VBIOSSMC_MSG_TestMessage 0x1
+#define VBIOSSMC_MSG_GetSmuVersion 0x2
+#define VBIOSSMC_MSG_PowerUpGfx 0x3
+#define VBIOSSMC_MSG_SetDispclkFreq 0x4
+#define VBIOSSMC_MSG_SetDprefclkFreq 0x5
+#define VBIOSSMC_MSG_PowerDownGfx 0x6
+#define VBIOSSMC_MSG_SetDppclkFreq 0x7
+#define VBIOSSMC_MSG_SetHardMinDcfclkByFreq 0x8
+#define VBIOSSMC_MSG_SetMinDeepSleepDcfclk 0x9
+#define VBIOSSMC_MSG_SetPhyclkVoltageByFreq 0xA
+#define VBIOSSMC_MSG_GetFclkFrequency 0xB
+#define VBIOSSMC_MSG_SetDisplayCount 0xC
+#define VBIOSSMC_MSG_EnableTmdp48MHzRefclkPwrDown 0xD
+#define VBIOSSMC_MSG_UpdatePmeRestore 0xE
+
+int rn_vbios_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, unsigned int msg_id, unsigned int param)
+{
+ /* First clear response register */
+ REG_WRITE(MP1_SMN_C2PMSG_91, 0);
+
+ /* Set the parameter register for the SMU message, unit is Mhz */
+ REG_WRITE(MP1_SMN_C2PMSG_83, param);
+
+ /* Trigger the message transaction by writing the message ID */
+ REG_WRITE(MP1_SMN_C2PMSG_67, msg_id);
+
+ REG_WAIT(MP1_SMN_C2PMSG_91, CONTENT, 1, 10, 200000);
+
+ /* Actual dispclk set is returned in the parameter register */
+ return REG_READ(MP1_SMN_C2PMSG_83);
+}
+
+int rn_vbios_smu_get_smu_version(struct clk_mgr_internal *clk_mgr)
+{
+ return rn_vbios_smu_send_msg_with_param(
+ clk_mgr,
+ VBIOSSMC_MSG_GetSmuVersion,
+ 0);
+}
+
+
+int rn_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispclk_khz)
+{
+ int actual_dispclk_set_mhz = -1;
+ struct dc *dc = clk_mgr->base.ctx->dc;
+ struct dmcu *dmcu = dc->res_pool->dmcu;
+
+ /* Unit of SMU msg parameter is Mhz */
+ actual_dispclk_set_mhz = rn_vbios_smu_send_msg_with_param(
+ clk_mgr,
+ VBIOSSMC_MSG_SetDispclkFreq,
+ requested_dispclk_khz / 1000);
+
+ if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+ if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
+ if (clk_mgr->dfs_bypass_disp_clk != actual_dispclk_set_mhz)
+ dmcu->funcs->set_psr_wait_loop(dmcu,
+ actual_dispclk_set_mhz / 7);
+ }
+ }
+
+ return actual_dispclk_set_mhz * 1000;
+}
+
+int rn_vbios_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr)
+{
+ int actual_dprefclk_set_mhz = -1;
+
+ actual_dprefclk_set_mhz = rn_vbios_smu_send_msg_with_param(
+ clk_mgr,
+ VBIOSSMC_MSG_SetDprefclkFreq,
+ clk_mgr->base.dprefclk_khz / 1000);
+
+ /* TODO: add code for programing DP DTO, currently this is down by command table */
+
+ return actual_dprefclk_set_mhz * 1000;
+}
+
+int rn_vbios_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_dcfclk_khz)
+{
+ int actual_dcfclk_set_mhz = -1;
+
+ if (clk_mgr->smu_ver < 0x370c00)
+ return actual_dcfclk_set_mhz;
+
+ actual_dcfclk_set_mhz = rn_vbios_smu_send_msg_with_param(
+ clk_mgr,
+ VBIOSSMC_MSG_SetHardMinDcfclkByFreq,
+ requested_dcfclk_khz / 1000);
+
+ return actual_dcfclk_set_mhz * 1000;
+}
+
+int rn_vbios_smu_set_min_deep_sleep_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_min_ds_dcfclk_khz)
+{
+ int actual_min_ds_dcfclk_mhz = -1;
+
+ if (clk_mgr->smu_ver < 0x370c00)
+ return actual_min_ds_dcfclk_mhz;
+
+ actual_min_ds_dcfclk_mhz = rn_vbios_smu_send_msg_with_param(
+ clk_mgr,
+ VBIOSSMC_MSG_SetMinDeepSleepDcfclk,
+ requested_min_ds_dcfclk_khz / 1000);
+
+ return actual_min_ds_dcfclk_mhz * 1000;
+}
+
+void rn_vbios_smu_set_phyclk(struct clk_mgr_internal *clk_mgr, int requested_phyclk_khz)
+{
+ rn_vbios_smu_send_msg_with_param(
+ clk_mgr,
+ VBIOSSMC_MSG_SetPhyclkVoltageByFreq,
+ requested_phyclk_khz / 1000);
+}
+
+int rn_vbios_smu_set_dppclk(struct clk_mgr_internal *clk_mgr, int requested_dpp_khz)
+{
+ int actual_dppclk_set_mhz = -1;
+
+ actual_dppclk_set_mhz = rn_vbios_smu_send_msg_with_param(
+ clk_mgr,
+ VBIOSSMC_MSG_SetDppclkFreq,
+ requested_dpp_khz / 1000);
+
+ return actual_dppclk_set_mhz * 1000;
+}
+
+void rn_vbios_smu_set_dcn_low_power_state(struct clk_mgr_internal *clk_mgr, enum dcn_pwr_state state)
+{
+ int disp_count;
+
+ if (state == DCN_PWR_STATE_LOW_POWER)
+ disp_count = 0;
+ else
+ disp_count = 1;
+
+ rn_vbios_smu_send_msg_with_param(
+ clk_mgr,
+ VBIOSSMC_MSG_SetDisplayCount,
+ disp_count);
+}
+
+void rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable)
+{
+ rn_vbios_smu_send_msg_with_param(
+ clk_mgr,
+ VBIOSSMC_MSG_EnableTmdp48MHzRefclkPwrDown,
+ enable);
+}
+
+void rn_vbios_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr)
+{
+ rn_vbios_smu_send_msg_with_param(
+ clk_mgr,
+ VBIOSSMC_MSG_UpdatePmeRestore,
+ 0);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h
new file mode 100644
index 000000000000..ccc01879c9d4
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DAL_DC_RN_CLK_MGR_VBIOS_SMU_H_
+#define DAL_DC_RN_CLK_MGR_VBIOS_SMU_H_
+
+int rn_vbios_smu_get_smu_version(struct clk_mgr_internal *clk_mgr);
+int rn_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispclk_khz);
+int rn_vbios_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr);
+int rn_vbios_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_dcfclk_khz);
+int rn_vbios_smu_set_min_deep_sleep_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_min_ds_dcfclk_khz);
+void rn_vbios_smu_set_phyclk(struct clk_mgr_internal *clk_mgr, int requested_phyclk_khz);
+int rn_vbios_smu_set_dppclk(struct clk_mgr_internal *clk_mgr, int requested_dpp_khz);
+void rn_vbios_smu_set_dcn_low_power_state(struct clk_mgr_internal *clk_mgr, int display_count);
+void rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable);
+void rn_vbios_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr);
+
+#endif /* DAL_DC_DCN10_RV1_CLK_MGR_VBIOS_SMU_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index cbc480a33376..04441dbcba76 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -58,21 +58,21 @@
#include "hubp.h"
#include "dc_link_dp.h"
+#include "dc_dmub_srv.h"
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
#include "dsc.h"
-#endif
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
#include "vm_helper.h"
-#endif
#include "dce/dce_i2c.h"
+#define CTX \
+ dc->ctx
+
#define DC_LOGGER \
dc->ctx->logger
-const static char DC_BUILD_ID[] = "production-build";
+static const char DC_BUILD_ID[] = "production-build";
/**
* DOC: Overview
@@ -181,13 +181,25 @@ static bool create_links(
link = link_create(&link_init_params);
if (link) {
- if (dc->config.edp_not_connected &&
- link->connector_signal == SIGNAL_TYPE_EDP) {
- link_destroy(&link);
- } else {
+ bool should_destory_link = false;
+
+ if (link->connector_signal == SIGNAL_TYPE_EDP) {
+ if (dc->config.edp_not_connected)
+ should_destory_link = true;
+ else if (dc->debug.remove_disconnect_edp) {
+ enum dc_connection_type type;
+ dc_link_detect_sink(link, &type);
+ if (type == dc_connection_none)
+ should_destory_link = true;
+ }
+ }
+
+ if (dc->config.force_enum_edp || !should_destory_link) {
dc->links[dc->link_count] = link;
link->dc = dc;
++dc->link_count;
+ } else {
+ link_destroy(&link);
}
}
}
@@ -275,11 +287,12 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
if (pipe->stream == stream && pipe->stream_res.tg) {
- pipe->stream->adjust = *adjust;
dc->hwss.set_drr(&pipe,
1,
adjust->v_total_min,
- adjust->v_total_max);
+ adjust->v_total_max,
+ adjust->v_total_mid,
+ adjust->v_total_mid_frame_num);
ret = true;
}
@@ -397,6 +410,27 @@ bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
return false;
}
+void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream,
+ enum dc_dynamic_expansion option)
+{
+ /* OPP FMT dyn expansion updates*/
+ int i = 0;
+ struct pipe_ctx *pipe_ctx;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (dc->current_state->res_ctx.pipe_ctx[i].stream
+ == stream) {
+ pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+ pipe_ctx->stream_res.opp->dyn_expansion = option;
+ pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
+ pipe_ctx->stream_res.opp,
+ COLOR_SPACE_YCBCR601,
+ stream->timing.display_color_depth,
+ stream->signal);
+ }
+ }
+}
+
void dc_stream_set_dither_option(struct dc_stream_state *stream,
enum dc_dither_option option)
{
@@ -476,10 +510,10 @@ bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
return ret;
}
-void dc_stream_set_static_screen_events(struct dc *dc,
+void dc_stream_set_static_screen_params(struct dc *dc,
struct dc_stream_state **streams,
int num_streams,
- const struct dc_static_screen_events *events)
+ const struct dc_static_screen_params *params)
{
int i = 0;
int j = 0;
@@ -498,10 +532,10 @@ void dc_stream_set_static_screen_events(struct dc *dc,
}
}
- dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, events);
+ dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, params);
}
-static void destruct(struct dc *dc)
+static void dc_destruct(struct dc *dc)
{
if (dc->current_state) {
dc_release_state(dc->current_state);
@@ -534,7 +568,7 @@ static void destruct(struct dc *dc)
kfree(dc->bw_dceip);
dc->bw_dceip = NULL;
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+#ifdef CONFIG_DRM_AMD_DC_DCN
kfree(dc->dcn_soc);
dc->dcn_soc = NULL;
@@ -542,32 +576,65 @@ static void destruct(struct dc *dc)
dc->dcn_ip = NULL;
#endif
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
kfree(dc->vm_helper);
dc->vm_helper = NULL;
-#endif
}
-static bool construct(struct dc *dc,
+static bool dc_construct_ctx(struct dc *dc,
+ const struct dc_init_data *init_params)
+{
+ struct dc_context *dc_ctx;
+ enum dce_version dc_version = DCE_VERSION_UNKNOWN;
+
+ dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
+ if (!dc_ctx)
+ return false;
+
+ dc_ctx->cgs_device = init_params->cgs_device;
+ dc_ctx->driver_context = init_params->driver;
+ dc_ctx->dc = dc;
+ dc_ctx->asic_id = init_params->asic_id;
+ dc_ctx->dc_sink_id_count = 0;
+ dc_ctx->dc_stream_id_count = 0;
+ dc_ctx->dce_environment = init_params->dce_environment;
+
+ /* Create logger */
+
+ dc_version = resource_parse_asic_id(init_params->asic_id);
+ dc_ctx->dce_version = dc_version;
+
+ dc_ctx->perf_trace = dc_perf_trace_create();
+ if (!dc_ctx->perf_trace) {
+ ASSERT_CRITICAL(false);
+ return false;
+ }
+
+ dc->ctx = dc_ctx;
+
+ return true;
+}
+
+static bool dc_construct(struct dc *dc,
const struct dc_init_data *init_params)
{
struct dc_context *dc_ctx;
struct bw_calcs_dceip *dc_dceip;
struct bw_calcs_vbios *dc_vbios;
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+#ifdef CONFIG_DRM_AMD_DC_DCN
struct dcn_soc_bounding_box *dcn_soc;
struct dcn_ip_params *dcn_ip;
#endif
- enum dce_version dc_version = DCE_VERSION_UNKNOWN;
dc->config = init_params->flags;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
// Allocate memory for the vm_helper
dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL);
+ if (!dc->vm_helper) {
+ dm_error("%s: failed to create dc->vm_helper\n", __func__);
+ goto fail;
+ }
-#endif
memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides));
dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL);
@@ -585,7 +652,7 @@ static bool construct(struct dc *dc,
}
dc->bw_vbios = dc_vbios;
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+#ifdef CONFIG_DRM_AMD_DC_DCN
dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL);
if (!dcn_soc) {
dm_error("%s: failed to create dcn_soc\n", __func__);
@@ -601,31 +668,15 @@ static bool construct(struct dc *dc,
}
dc->dcn_ip = dcn_ip;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
dc->soc_bounding_box = init_params->soc_bounding_box;
#endif
-#endif
- dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
- if (!dc_ctx) {
+ if (!dc_construct_ctx(dc, init_params)) {
dm_error("%s: failed to create ctx\n", __func__);
goto fail;
}
- dc_ctx->cgs_device = init_params->cgs_device;
- dc_ctx->driver_context = init_params->driver;
- dc_ctx->dc = dc;
- dc_ctx->asic_id = init_params->asic_id;
- dc_ctx->dc_sink_id_count = 0;
- dc_ctx->dc_stream_id_count = 0;
- dc->ctx = dc_ctx;
-
- /* Create logger */
-
- dc_ctx->dce_environment = init_params->dce_environment;
-
- dc_version = resource_parse_asic_id(init_params->asic_id);
- dc_ctx->dce_version = dc_version;
+ dc_ctx = dc->ctx;
/* Resource should construct all asic specific resources.
* This should be the only place where we need to parse the asic id
@@ -640,7 +691,7 @@ static bool construct(struct dc *dc,
bp_init_data.bios = init_params->asic_id.atombios_base_address;
dc_ctx->dc_bios = dal_bios_parser_create(
- &bp_init_data, dc_version);
+ &bp_init_data, dc_ctx->dce_version);
if (!dc_ctx->dc_bios) {
ASSERT_CRITICAL(false);
@@ -648,17 +699,13 @@ static bool construct(struct dc *dc,
}
dc_ctx->created_bios = true;
- }
-
- dc_ctx->perf_trace = dc_perf_trace_create();
- if (!dc_ctx->perf_trace) {
- ASSERT_CRITICAL(false);
- goto fail;
}
+
+
/* Create GPIO service */
dc_ctx->gpio_service = dal_gpio_service_create(
- dc_version,
+ dc_ctx->dce_version,
dc_ctx->dce_environment,
dc_ctx);
@@ -667,7 +714,7 @@ static bool construct(struct dc *dc,
goto fail;
}
- dc->res_pool = dc_create_resource_pool(dc, init_params, dc_version);
+ dc->res_pool = dc_create_resource_pool(dc, init_params, dc_ctx->dce_version);
if (!dc->res_pool)
goto fail;
@@ -675,6 +722,9 @@ static bool construct(struct dc *dc,
if (!dc->clk_mgr)
goto fail;
+ if (dc->res_pool->funcs->update_bw_bounding_box)
+ dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
+
/* Creation of current_state must occur after dc->dml
* is initialized in dc_create_resource_pool because
* on creation it copies the contents of dc->dml
@@ -695,12 +745,9 @@ static bool construct(struct dc *dc,
return true;
fail:
-
- destruct(dc);
return false;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
static bool disable_all_writeback_pipes_for_stream(
const struct dc *dc,
struct dc_stream_state *stream,
@@ -713,7 +760,6 @@ static bool disable_all_writeback_pipes_for_stream(
return true;
}
-#endif
static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
{
@@ -739,11 +785,12 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
}
if (should_disable && old_stream) {
dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
-#endif
- dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
+ if (dc->hwss.apply_ctx_for_surface)
+ dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
}
+ if (dc->hwss.program_front_end_for_ctx)
+ dc->hwss.program_front_end_for_ctx(dc, dangling_context);
}
current_ctx = dc->current_state;
@@ -751,6 +798,33 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
dc_release_state(current_ctx);
}
+static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
+{
+ int i;
+ int count = 0;
+ struct pipe_ctx *pipe;
+ PERF_TRACE();
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe->plane_state)
+ continue;
+
+ /* Timeout 100 ms */
+ while (count < 100000) {
+ /* Must set to false to start with, due to OR in update function */
+ pipe->plane_state->status.is_flip_pending = false;
+ dc->hwss.update_pending_status(pipe);
+ if (!pipe->plane_state->status.is_flip_pending)
+ break;
+ udelay(1);
+ count++;
+ }
+ ASSERT(!pipe->plane_state->status.is_flip_pending);
+ }
+ PERF_TRACE();
+}
+
/*******************************************************************************
* Public functions
******************************************************************************/
@@ -763,29 +837,38 @@ struct dc *dc_create(const struct dc_init_data *init_params)
if (NULL == dc)
goto alloc_fail;
- if (false == construct(dc, init_params))
- goto construct_fail;
+ if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) {
+ if (false == dc_construct_ctx(dc, init_params)) {
+ dc_destruct(dc);
+ goto construct_fail;
+ }
+ } else {
+ if (false == dc_construct(dc, init_params)) {
+ dc_destruct(dc);
+ goto construct_fail;
+ }
+
+ full_pipe_count = dc->res_pool->pipe_count;
+ if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
+ full_pipe_count--;
+ dc->caps.max_streams = min(
+ full_pipe_count,
+ dc->res_pool->stream_enc_count);
- /*TODO: separate HW and SW initialization*/
- dc->hwss.init_hw(dc);
+ dc->optimize_seamless_boot_streams = 0;
+ dc->caps.max_links = dc->link_count;
+ dc->caps.max_audios = dc->res_pool->audio_count;
+ dc->caps.linear_pitch_alignment = 64;
- full_pipe_count = dc->res_pool->pipe_count;
- if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
- full_pipe_count--;
- dc->caps.max_streams = min(
- full_pipe_count,
- dc->res_pool->stream_enc_count);
+ dc->caps.max_dp_protocol_version = DP_VERSION_1_4;
- dc->caps.max_links = dc->link_count;
- dc->caps.max_audios = dc->res_pool->audio_count;
- dc->caps.linear_pitch_alignment = 64;
+ if (dc->res_pool->dmcu != NULL)
+ dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
+ }
/* Populate versioning information */
dc->versions.dc_ver = DC_VER;
- if (dc->res_pool->dmcu != NULL)
- dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
-
dc->build_id = DC_BUILD_ID;
DC_LOG_DC("Display Core initialized\n");
@@ -801,14 +884,30 @@ alloc_fail:
return NULL;
}
+void dc_hardware_init(struct dc *dc)
+{
+ if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW)
+ dc->hwss.init_hw(dc);
+}
+
void dc_init_callbacks(struct dc *dc,
const struct dc_callback_init *init_params)
{
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ dc->ctx->cp_psp = init_params->cp_psp;
+#endif
+}
+
+void dc_deinit_callbacks(struct dc *dc)
+{
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ memset(&dc->ctx->cp_psp, 0, sizeof(dc->ctx->cp_psp));
+#endif
}
void dc_destroy(struct dc **dc)
{
- destruct(*dc);
+ dc_destruct(*dc);
kfree(*dc);
*dc = NULL;
}
@@ -882,15 +981,11 @@ static void program_timing_sync(
/* set first pipe with plane as master */
for (j = 0; j < group_size; j++) {
- struct pipe_ctx *temp;
-
if (pipe_set[j]->plane_state) {
if (j == 0)
break;
- temp = pipe_set[0];
- pipe_set[0] = pipe_set[j];
- pipe_set[j] = temp;
+ swap(pipe_set[0], pipe_set[j]);
break;
}
}
@@ -947,31 +1042,87 @@ bool dc_validate_seamless_boot_timing(const struct dc *dc,
struct dc_crtc_timing *crtc_timing)
{
struct timing_generator *tg;
+ struct stream_encoder *se = NULL;
+
+ struct dc_crtc_timing hw_crtc_timing = {0};
+
struct dc_link *link = sink->link;
- unsigned int inst;
+ unsigned int i, enc_inst, tg_inst = 0;
+
+ // Seamless port only support single DP and EDP so far
+ if (sink->sink_signal != SIGNAL_TYPE_DISPLAY_PORT &&
+ sink->sink_signal != SIGNAL_TYPE_EDP)
+ return false;
/* Check for enabled DIG to identify enabled display */
if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
return false;
- /* Check for which front end is used by this encoder.
- * Note the inst is 1 indexed, where 0 is undefined.
- * Note that DIG_FE can source from different OTG but our
- * current implementation always map 1-to-1, so this code makes
- * the same assumption and doesn't check OTG source.
- */
- inst = link->link_enc->funcs->get_dig_frontend(link->link_enc) - 1;
+ enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
+
+ if (enc_inst == ENGINE_ID_UNKNOWN)
+ return false;
+
+ for (i = 0; i < dc->res_pool->stream_enc_count; i++) {
+ if (dc->res_pool->stream_enc[i]->id == enc_inst) {
+
+ se = dc->res_pool->stream_enc[i];
+
+ tg_inst = dc->res_pool->stream_enc[i]->funcs->dig_source_otg(
+ dc->res_pool->stream_enc[i]);
+ break;
+ }
+ }
+
+ // tg_inst not found
+ if (i == dc->res_pool->stream_enc_count)
+ return false;
- /* Instance should be within the range of the pool */
- if (inst >= dc->res_pool->pipe_count)
+ if (tg_inst >= dc->res_pool->timing_generator_count)
return false;
- tg = dc->res_pool->timing_generators[inst];
+ tg = dc->res_pool->timing_generators[tg_inst];
+
+ if (!tg->funcs->get_hw_timing)
+ return false;
+
+ if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing))
+ return false;
+
+ if (crtc_timing->h_total != hw_crtc_timing.h_total)
+ return false;
+
+ if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left)
+ return false;
+
+ if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable)
+ return false;
+
+ if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right)
+ return false;
+
+ if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch)
+ return false;
+
+ if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width)
+ return false;
+
+ if (crtc_timing->v_total != hw_crtc_timing.v_total)
+ return false;
- if (!tg->funcs->is_matching_timing)
+ if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top)
return false;
- if (!tg->funcs->is_matching_timing(tg, crtc_timing))
+ if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable)
+ return false;
+
+ if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom)
+ return false;
+
+ if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch)
+ return false;
+
+ if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width)
return false;
if (dc_is_dp_signal(link->connector_signal)) {
@@ -979,10 +1130,25 @@ bool dc_validate_seamless_boot_timing(const struct dc *dc,
dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
dc->res_pool->dp_clock_source,
- inst, &pix_clk_100hz);
+ tg_inst, &pix_clk_100hz);
if (crtc_timing->pix_clk_100hz != pix_clk_100hz)
return false;
+
+ if (!se->funcs->dp_get_pixel_format)
+ return false;
+
+ if (!se->funcs->dp_get_pixel_format(
+ se,
+ &hw_crtc_timing.pixel_encoding,
+ &hw_crtc_timing.display_color_depth))
+ return false;
+
+ if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth)
+ return false;
+
+ if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding)
+ return false;
}
return true;
@@ -1035,24 +1201,25 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
for (i = 0; i < context->stream_count; i++) {
if (context->streams[i]->apply_seamless_boot_optimization)
- dc->optimize_seamless_boot = true;
+ dc->optimize_seamless_boot_streams++;
}
- if (!dc->optimize_seamless_boot)
+ if (dc->optimize_seamless_boot_streams == 0)
dc->hwss.prepare_bandwidth(dc, context);
/* re-program planes for existing stream, in case we need to
* free up plane resource for later use
*/
- for (i = 0; i < context->stream_count; i++) {
- if (context->streams[i]->mode_changed)
- continue;
+ if (dc->hwss.apply_ctx_for_surface)
+ for (i = 0; i < context->stream_count; i++) {
+ if (context->streams[i]->mode_changed)
+ continue;
- dc->hwss.apply_ctx_for_surface(
- dc, context->streams[i],
- context->stream_status[i].plane_count,
- context); /* use new pipe config in new context */
- }
+ dc->hwss.apply_ctx_for_surface(
+ dc, context->streams[i],
+ context->stream_status[i].plane_count,
+ context); /* use new pipe config in new context */
+ }
/* Program hardware */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -1065,22 +1232,25 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
if (result != DC_OK)
return result;
- if (context->stream_count > 1) {
+ if (context->stream_count > 1 && !dc->debug.disable_timing_sync) {
enable_timing_multisync(dc, context);
program_timing_sync(dc, context);
}
/* Program all planes within new context*/
+ if (dc->hwss.program_front_end_for_ctx)
+ dc->hwss.program_front_end_for_ctx(dc, context);
for (i = 0; i < context->stream_count; i++) {
const struct dc_link *link = context->streams[i]->link;
if (!context->streams[i]->mode_changed)
continue;
- dc->hwss.apply_ctx_for_surface(
- dc, context->streams[i],
- context->stream_status[i].plane_count,
- context);
+ if (dc->hwss.apply_ctx_for_surface)
+ dc->hwss.apply_ctx_for_surface(
+ dc, context->streams[i],
+ context->stream_status[i].plane_count,
+ context);
/*
* enable stereo
@@ -1107,15 +1277,16 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
dc_enable_stereo(dc, context, dc_streams, context->stream_count);
- if (!dc->optimize_seamless_boot)
+ if (dc->optimize_seamless_boot_streams == 0) {
+ /* Must wait for no flips to be pending before doing optimize bw */
+ wait_for_no_pipes_pending(dc, context);
/* pplib is notified if disp_num changed */
dc->hwss.optimize_bandwidth(dc, context);
+ }
for (i = 0; i < context->stream_count; i++)
context->streams[i]->mode_changed = false;
- memset(&context->commit_hints, 0, sizeof(context->commit_hints));
-
dc_release_state(dc->current_state);
dc->current_state = context;
@@ -1147,12 +1318,18 @@ bool dc_commit_state(struct dc *dc, struct dc_state *context)
return (result == DC_OK);
}
+bool dc_is_hw_initialized(struct dc *dc)
+{
+ struct dc_bios *dcb = dc->ctx->dc_bios;
+ return dcb->funcs->is_accelerated_mode(dcb);
+}
+
bool dc_post_update_surfaces_to_stream(struct dc *dc)
{
int i;
struct dc_state *context = dc->current_state;
- if (!dc->optimized_required || dc->optimize_seamless_boot)
+ if (!dc->optimized_required || dc->optimize_seamless_boot_streams > 0)
return true;
post_surface_trace(dc);
@@ -1181,7 +1358,7 @@ struct dc_state *dc_create_state(struct dc *dc)
* initialize and obtain IP and SOC the base DML instance from DC is
* initially copied into every context
*/
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+#ifdef CONFIG_DRM_AMD_DC_DCN
memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib));
#endif
@@ -1208,6 +1385,12 @@ struct dc_state *dc_copy_state(struct dc_state *src_ctx)
if (cur_pipe->bottom_pipe)
cur_pipe->bottom_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
+ if (cur_pipe->prev_odm_pipe)
+ cur_pipe->prev_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx];
+
+ if (cur_pipe->next_odm_pipe)
+ cur_pipe->next_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx];
+
}
for (i = 0; i < new_ctx->stream_count; i++) {
@@ -1239,6 +1422,55 @@ void dc_release_state(struct dc_state *context)
kref_put(&context->refcount, dc_state_free);
}
+bool dc_set_generic_gpio_for_stereo(bool enable,
+ struct gpio_service *gpio_service)
+{
+ enum gpio_result gpio_result = GPIO_RESULT_NON_SPECIFIC_ERROR;
+ struct gpio_pin_info pin_info;
+ struct gpio *generic;
+ struct gpio_generic_mux_config *config = kzalloc(sizeof(struct gpio_generic_mux_config),
+ GFP_KERNEL);
+
+ if (!config)
+ return false;
+ pin_info = dal_gpio_get_generic_pin_info(gpio_service, GPIO_ID_GENERIC, 0);
+
+ if (pin_info.mask == 0xFFFFFFFF || pin_info.offset == 0xFFFFFFFF) {
+ kfree(config);
+ return false;
+ } else {
+ generic = dal_gpio_service_create_generic_mux(
+ gpio_service,
+ pin_info.offset,
+ pin_info.mask);
+ }
+
+ if (!generic) {
+ kfree(config);
+ return false;
+ }
+
+ gpio_result = dal_gpio_open(generic, GPIO_MODE_OUTPUT);
+
+ config->enable_output_from_mux = enable;
+ config->mux_select = GPIO_SIGNAL_SOURCE_PASS_THROUGH_STEREO_SYNC;
+
+ if (gpio_result == GPIO_RESULT_OK)
+ gpio_result = dal_mux_setup_config(generic, config);
+
+ if (gpio_result == GPIO_RESULT_OK) {
+ dal_gpio_close(generic);
+ dal_gpio_destroy_generic_mux(&generic);
+ kfree(config);
+ return true;
+ } else {
+ dal_gpio_close(generic);
+ dal_gpio_destroy_generic_mux(&generic);
+ kfree(config);
+ return false;
+ }
+}
+
static bool is_surface_in_context(
const struct dc_state *context,
const struct dc_plane_state *plane_state)
@@ -1299,14 +1531,9 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa
elevate_update_type(&update_type, UPDATE_TYPE_MED);
}
- if (u->plane_info->sdr_white_level != u->surface->sdr_white_level) {
- update_flags->bits.sdr_white_level = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_MED);
- }
-
if (u->plane_info->dcc.enable != u->surface->dcc.enable
- || u->plane_info->dcc.grph.independent_64b_blks != u->surface->dcc.grph.independent_64b_blks
- || u->plane_info->dcc.grph.meta_pitch != u->surface->dcc.grph.meta_pitch) {
+ || u->plane_info->dcc.independent_64b_blks != u->surface->dcc.independent_64b_blks
+ || u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) {
update_flags->bits.dcc_change = 1;
elevate_update_type(&update_type, UPDATE_TYPE_MED);
}
@@ -1320,9 +1547,8 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa
elevate_update_type(&update_type, UPDATE_TYPE_FULL);
}
- if (u->plane_info->plane_size.grph.surface_pitch != u->surface->plane_size.grph.surface_pitch
- || u->plane_info->plane_size.video.luma_pitch != u->surface->plane_size.video.luma_pitch
- || u->plane_info->plane_size.video.chroma_pitch != u->surface->plane_size.video.chroma_pitch) {
+ if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch
+ || u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) {
update_flags->bits.plane_size_change = 1;
elevate_update_type(&update_type, UPDATE_TYPE_MED);
}
@@ -1360,7 +1586,10 @@ static enum surface_update_type get_scaling_info_update_type(
if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width
|| u->scaling_info->clip_rect.height != u->surface->clip_rect.height
|| u->scaling_info->dst_rect.width != u->surface->dst_rect.width
- || u->scaling_info->dst_rect.height != u->surface->dst_rect.height) {
+ || u->scaling_info->dst_rect.height != u->surface->dst_rect.height
+ || u->scaling_info->scaling_quality.integer_scaling !=
+ u->surface->scaling_quality.integer_scaling
+ ) {
update_flags->bits.scaling_change = 1;
if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width
@@ -1376,7 +1605,7 @@ static enum surface_update_type get_scaling_info_update_type(
update_flags->bits.scaling_change = 1;
if (u->scaling_info->src_rect.width > u->surface->src_rect.width
- && u->scaling_info->src_rect.height > u->surface->src_rect.height)
+ || u->scaling_info->src_rect.height > u->surface->src_rect.height)
/* Making src rect bigger requires a bandwidth change */
update_flags->bits.clock_change = 1;
}
@@ -1390,11 +1619,11 @@ static enum surface_update_type get_scaling_info_update_type(
update_flags->bits.position_change = 1;
if (update_flags->bits.clock_change
- || update_flags->bits.bandwidth_change)
+ || update_flags->bits.bandwidth_change
+ || update_flags->bits.scaling_change)
return UPDATE_TYPE_FULL;
- if (update_flags->bits.scaling_change
- || update_flags->bits.position_change)
+ if (update_flags->bits.position_change)
return UPDATE_TYPE_MED;
return UPDATE_TYPE_FAST;
@@ -1408,20 +1637,15 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
enum surface_update_type overall_type = UPDATE_TYPE_FAST;
union surface_update_flags *update_flags = &u->surface->update_flags;
- update_flags->raw = 0; // Reset all flags
-
if (u->flip_addr)
update_flags->bits.addr_update = 1;
- if (!is_surface_in_context(context, u->surface)) {
- update_flags->bits.new_plane = 1;
+ if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) {
+ update_flags->raw = 0xFFFFFFFF;
return UPDATE_TYPE_FULL;
}
- if (u->surface->force_full_update) {
- update_flags->bits.full_update = 1;
- return UPDATE_TYPE_FULL;
- }
+ update_flags->raw = 0; // Reset all flags
type = get_plane_info_update_type(u);
elevate_update_type(&overall_type, type);
@@ -1453,6 +1677,12 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
update_flags->bits.gamma_change = 1;
}
+ if (u->hdr_mult.value)
+ if (u->hdr_mult.value != u->surface->hdr_mult.value) {
+ update_flags->bits.hdr_mult = 1;
+ elevate_update_type(&overall_type, UPDATE_TYPE_MED);
+ }
+
if (update_flags->bits.in_transfer_func_change) {
type = UPDATE_TYPE_MED;
elevate_update_type(&overall_type, type);
@@ -1479,40 +1709,45 @@ static enum surface_update_type check_update_surfaces_for_stream(
enum surface_update_type overall_type = UPDATE_TYPE_FAST;
if (stream_status == NULL || stream_status->plane_count != surface_count)
- return UPDATE_TYPE_FULL;
+ overall_type = UPDATE_TYPE_FULL;
/* some stream updates require passive update */
if (stream_update) {
- if ((stream_update->src.height != 0) &&
- (stream_update->src.width != 0))
- return UPDATE_TYPE_FULL;
+ union stream_update_flags *su_flags = &stream_update->stream->update_flags;
- if ((stream_update->dst.height != 0) &&
- (stream_update->dst.width != 0))
- return UPDATE_TYPE_FULL;
+ if ((stream_update->src.height != 0 && stream_update->src.width != 0) ||
+ (stream_update->dst.height != 0 && stream_update->dst.width != 0) ||
+ stream_update->integer_scaling_update)
+ su_flags->bits.scaling = 1;
if (stream_update->out_transfer_func)
- return UPDATE_TYPE_FULL;
+ su_flags->bits.out_tf = 1;
if (stream_update->abm_level)
- return UPDATE_TYPE_FULL;
+ su_flags->bits.abm_level = 1;
if (stream_update->dpms_off)
- return UPDATE_TYPE_FULL;
+ su_flags->bits.dpms_off = 1;
+
+ if (stream_update->gamut_remap)
+ su_flags->bits.gamut_remap = 1;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
if (stream_update->wb_update)
- return UPDATE_TYPE_FULL;
-#endif
+ su_flags->bits.wb_update = 1;
+ if (su_flags->raw != 0)
+ overall_type = UPDATE_TYPE_FULL;
+
+ if (stream_update->output_csc_transform || stream_update->output_color_space)
+ su_flags->bits.out_csc = 1;
+
+ if (stream_update->dsc_config)
+ overall_type = UPDATE_TYPE_FULL;
}
for (i = 0 ; i < surface_count; i++) {
enum surface_update_type type =
det_surface_update(dc, &updates[i]);
- if (type == UPDATE_TYPE_FULL)
- return type;
-
elevate_update_type(&overall_type, type);
}
@@ -1534,13 +1769,29 @@ enum surface_update_type dc_check_update_surfaces_for_stream(
int i;
enum surface_update_type type;
+ if (stream_update)
+ stream_update->stream->update_flags.raw = 0;
for (i = 0; i < surface_count; i++)
updates[i].surface->update_flags.raw = 0;
type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
- if (type == UPDATE_TYPE_FULL)
+ if (type == UPDATE_TYPE_FULL) {
+ if (stream_update)
+ stream_update->stream->update_flags.raw = 0xFFFFFFFF;
for (i = 0; i < surface_count; i++)
updates[i].surface->update_flags.raw = 0xFFFFFFFF;
+ }
+
+ if (type == UPDATE_TYPE_FAST) {
+ // If there's an available clock comparator, we use that.
+ if (dc->clk_mgr->funcs->are_clock_states_equal) {
+ if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk))
+ dc->optimized_required = true;
+ // Else we fallback to mem compare.
+ } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
+ dc->optimized_required = true;
+ }
+ }
return type;
}
@@ -1616,8 +1867,8 @@ static void copy_surface_update_to_plane(
srf_update->plane_info->global_alpha_value;
surface->dcc =
srf_update->plane_info->dcc;
- surface->sdr_white_level =
- srf_update->plane_info->sdr_white_level;
+ surface->layer_index =
+ srf_update->plane_info->layer_index;
}
if (srf_update->gamma &&
@@ -1648,7 +1899,6 @@ static void copy_surface_update_to_plane(
sizeof(struct dc_transfer_func_distributed_points));
}
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
if (srf_update->func_shaper &&
(surface->in_shaper_func !=
srf_update->func_shaper))
@@ -1661,13 +1911,16 @@ static void copy_surface_update_to_plane(
memcpy(surface->lut3d_func, srf_update->lut3d_func,
sizeof(*surface->lut3d_func));
+ if (srf_update->hdr_mult.value)
+ surface->hdr_mult =
+ srf_update->hdr_mult;
+
if (srf_update->blend_tf &&
(surface->blend_tf !=
srf_update->blend_tf))
memcpy(surface->blend_tf, srf_update->blend_tf,
sizeof(*surface->blend_tf));
-#endif
if (srf_update->input_csc_color_matrix)
surface->input_csc_color_matrix =
*srf_update->input_csc_color_matrix;
@@ -1680,8 +1933,10 @@ static void copy_surface_update_to_plane(
static void copy_stream_update_to_stream(struct dc *dc,
struct dc_state *context,
struct dc_stream_state *stream,
- const struct dc_stream_update *update)
+ struct dc_stream_update *update)
{
+ struct dc_context *dc_ctx = dc->ctx;
+
if (update == NULL || stream == NULL)
return;
@@ -1742,7 +1997,6 @@ static void copy_stream_update_to_stream(struct dc *dc,
if (update->dither_option)
stream->dither_option = *update->dither_option;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
/* update current stream with writeback info */
if (update->wb_update) {
int i;
@@ -1753,23 +2007,32 @@ static void copy_stream_update_to_stream(struct dc *dc,
stream->writeback_info[i] =
update->wb_update->writeback_info[i];
}
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DSC_SUPPORT)
if (update->dsc_config) {
struct dc_dsc_config old_dsc_cfg = stream->timing.dsc_cfg;
uint32_t old_dsc_enabled = stream->timing.flags.DSC;
uint32_t enable_dsc = (update->dsc_config->num_slices_h != 0 &&
update->dsc_config->num_slices_v != 0);
- stream->timing.dsc_cfg = *update->dsc_config;
- stream->timing.flags.DSC = enable_dsc;
- if (!dc->res_pool->funcs->validate_bandwidth(dc, context,
- true)) {
- stream->timing.dsc_cfg = old_dsc_cfg;
- stream->timing.flags.DSC = old_dsc_enabled;
+ /* Use temporarry context for validating new DSC config */
+ struct dc_state *dsc_validate_context = dc_create_state(dc);
+
+ if (dsc_validate_context) {
+ dc_resource_state_copy_construct(dc->current_state, dsc_validate_context);
+
+ stream->timing.dsc_cfg = *update->dsc_config;
+ stream->timing.flags.DSC = enable_dsc;
+ if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) {
+ stream->timing.dsc_cfg = old_dsc_cfg;
+ stream->timing.flags.DSC = old_dsc_enabled;
+ update->dsc_config = NULL;
+ }
+
+ dc_release_state(dsc_validate_context);
+ } else {
+ DC_ERROR("Failed to allocate new validate context for DSC change\n");
+ update->dsc_config = NULL;
}
}
-#endif
}
static void commit_planes_do_stream_update(struct dc *dc,
@@ -1779,22 +2042,21 @@ static void commit_planes_do_stream_update(struct dc *dc,
struct dc_state *context)
{
int j;
+ bool should_program_abm;
// Stream updates
for (j = 0; j < dc->res_pool->pipe_count; j++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
- if (!pipe_ctx->top_pipe &&
- pipe_ctx->stream &&
- pipe_ctx->stream == stream) {
+ if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->stream == stream) {
if (stream_update->periodic_interrupt0 &&
dc->hwss.setup_periodic_interrupt)
- dc->hwss.setup_periodic_interrupt(pipe_ctx, VLINE0);
+ dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE0);
if (stream_update->periodic_interrupt1 &&
dc->hwss.setup_periodic_interrupt)
- dc->hwss.setup_periodic_interrupt(pipe_ctx, VLINE1);
+ dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE1);
if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) ||
stream_update->vrr_infopacket ||
@@ -1804,6 +2066,12 @@ static void commit_planes_do_stream_update(struct dc *dc,
dc->hwss.update_info_frame(pipe_ctx);
}
+ if (stream_update->hdr_static_metadata &&
+ stream->use_dynamic_meta &&
+ dc->hwss.set_dmdata_attributes &&
+ pipe_ctx->stream->dmdata_address.quad_part != 0)
+ dc->hwss.set_dmdata_attributes(pipe_ctx);
+
if (stream_update->gamut_remap)
dc_stream_set_gamut_remap(dc, stream);
@@ -1811,54 +2079,65 @@ static void commit_planes_do_stream_update(struct dc *dc,
dc_stream_program_csc_matrix(dc, stream);
if (stream_update->dither_option) {
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
- struct pipe_ctx *odm_pipe = dc_res_get_odm_bottom_pipe(pipe_ctx);
-#endif
+ struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
resource_build_bit_depth_reduction_params(pipe_ctx->stream,
&pipe_ctx->stream->bit_depth_params);
pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp,
&stream->bit_depth_params,
&stream->clamping);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
- if (odm_pipe)
+ while (odm_pipe) {
odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp,
&stream->bit_depth_params,
&stream->clamping);
-#endif
+ odm_pipe = odm_pipe->next_odm_pipe;
+ }
}
-#if defined(CONFIG_DRM_AMD_DC_DSC_SUPPORT)
if (stream_update->dsc_config && dc->hwss.pipe_control_lock_global) {
dc->hwss.pipe_control_lock_global(dc, pipe_ctx, true);
dp_update_dsc_config(pipe_ctx);
dc->hwss.pipe_control_lock_global(dc, pipe_ctx, false);
}
-#endif
/* Full fe update*/
if (update_type == UPDATE_TYPE_FAST)
continue;
if (stream_update->dpms_off) {
dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
+
if (*stream_update->dpms_off) {
- core_link_disable_stream(pipe_ctx, KEEP_ACQUIRED_RESOURCE);
+ core_link_disable_stream(pipe_ctx);
+ /* for dpms, keep acquired resources*/
+ if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only)
+ pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
+
dc->hwss.optimize_bandwidth(dc, dc->current_state);
} else {
- dc->hwss.prepare_bandwidth(dc, dc->current_state);
+ if (dc->optimize_seamless_boot_streams == 0)
+ dc->hwss.prepare_bandwidth(dc, dc->current_state);
+
core_link_enable_stream(dc->current_state, pipe_ctx);
}
+
dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
}
if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
- if (pipe_ctx->stream_res.tg->funcs->is_blanked) {
- // if otg funcs defined check if blanked before programming
- if (!pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg))
+ should_program_abm = true;
+
+ // if otg funcs defined check if blanked before programming
+ if (pipe_ctx->stream_res.tg->funcs->is_blanked)
+ if (pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg))
+ should_program_abm = false;
+
+ if (should_program_abm) {
+ if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE) {
+ pipe_ctx->stream_res.abm->funcs->set_abm_immediate_disable(pipe_ctx->stream_res.abm);
+ } else {
pipe_ctx->stream_res.abm->funcs->set_abm_level(
pipe_ctx->stream_res.abm, stream->abm_level);
- } else
- pipe_ctx->stream_res.abm->funcs->set_abm_level(
- pipe_ctx->stream_res.abm, stream->abm_level);
+ }
+ }
}
}
}
@@ -1875,7 +2154,7 @@ static void commit_planes_for_stream(struct dc *dc,
int i, j;
struct pipe_ctx *top_pipe_to_program = NULL;
- if (dc->optimize_seamless_boot && surface_count > 0) {
+ if (dc->optimize_seamless_boot_streams > 0 && surface_count > 0) {
/* Optimize seamless boot flag keeps clocks and watermarks high until
* first flip. After first flip, optimization is required to lower
* bandwidth. Important to note that it is expected UEFI will
@@ -1884,12 +2163,14 @@ static void commit_planes_for_stream(struct dc *dc,
*/
if (stream->apply_seamless_boot_optimization) {
stream->apply_seamless_boot_optimization = false;
- dc->optimize_seamless_boot = false;
- dc->optimized_required = true;
+ dc->optimize_seamless_boot_streams--;
+
+ if (dc->optimize_seamless_boot_streams == 0)
+ dc->optimized_required = true;
}
}
- if (update_type == UPDATE_TYPE_FULL && !dc->optimize_seamless_boot) {
+ if (update_type == UPDATE_TYPE_FULL && dc->optimize_seamless_boot_streams == 0) {
dc->hwss.prepare_bandwidth(dc, context);
context_clock_trace(dc, context);
}
@@ -1903,11 +2184,14 @@ static void commit_planes_for_stream(struct dc *dc,
* In case of turning off screen, no need to program front end a second time.
* just return after program blank.
*/
- dc->hwss.apply_ctx_for_surface(dc, stream, 0, context);
+ if (dc->hwss.apply_ctx_for_surface)
+ dc->hwss.apply_ctx_for_surface(dc, stream, 0, context);
+ if (dc->hwss.program_front_end_for_ctx)
+ dc->hwss.program_front_end_for_ctx(dc, context);
+
return;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
for (i = 0; i < surface_count; i++) {
struct dc_plane_state *plane_state = srf_updates[i].surface;
@@ -1929,13 +2213,13 @@ static void commit_planes_for_stream(struct dc *dc,
}
}
}
-#endif
// Update Type FULL, Surface updates
for (j = 0; j < dc->res_pool->pipe_count; j++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
if (!pipe_ctx->top_pipe &&
+ !pipe_ctx->prev_odm_pipe &&
pipe_ctx->stream &&
pipe_ctx->stream == stream) {
struct dc_stream_status *stream_status = NULL;
@@ -1949,7 +2233,6 @@ static void commit_planes_for_stream(struct dc *dc,
if (update_type == UPDATE_TYPE_FAST)
continue;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
if (dc->hwss.program_triplebuffer != NULL &&
@@ -1958,14 +2241,32 @@ static void commit_planes_for_stream(struct dc *dc,
dc->hwss.program_triplebuffer(
dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
}
-#endif
stream_status =
stream_get_status(context, pipe_ctx->stream);
- dc->hwss.apply_ctx_for_surface(
+ if (dc->hwss.apply_ctx_for_surface)
+ dc->hwss.apply_ctx_for_surface(
dc, pipe_ctx->stream, stream_status->plane_count, context);
}
}
+ if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) {
+ dc->hwss.program_front_end_for_ctx(dc, context);
+#ifdef CONFIG_DRM_AMD_DC_DCN
+ if (dc->debug.validate_dml_output) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx cur_pipe = context->res_ctx.pipe_ctx[i];
+ if (cur_pipe.stream == NULL)
+ continue;
+
+ cur_pipe.plane_res.hubp->funcs->validate_dml_output(
+ cur_pipe.plane_res.hubp, dc->ctx,
+ &context->res_ctx.pipe_ctx[i].rq_regs,
+ &context->res_ctx.pipe_ctx[i].dlg_regs,
+ &context->res_ctx.pipe_ctx[i].ttu_regs);
+ }
+ }
+#endif
+ }
// Update Type FAST, Surface updates
if (update_type == UPDATE_TYPE_FAST) {
@@ -1975,7 +2276,6 @@ static void commit_planes_for_stream(struct dc *dc,
*/
dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
if (dc->hwss.set_flip_control_gsl)
for (i = 0; i < surface_count; i++) {
struct dc_plane_state *plane_state = srf_updates[i].surface;
@@ -1994,7 +2294,6 @@ static void commit_planes_for_stream(struct dc *dc,
plane_state->flip_immediate);
}
}
-#endif
/* Perform requested Updates */
for (i = 0; i < surface_count; i++) {
struct dc_plane_state *plane_state = srf_updates[i].surface;
@@ -2007,7 +2306,6 @@ static void commit_planes_for_stream(struct dc *dc,
if (pipe_ctx->plane_state != plane_state)
continue;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
/*program triple buffer after lock based on flip type*/
if (dc->hwss.program_triplebuffer != NULL &&
!dc->debug.disable_tri_buf) {
@@ -2015,7 +2313,6 @@ static void commit_planes_for_stream(struct dc *dc,
dc->hwss.program_triplebuffer(
dc, pipe_ctx, plane_state->triplebuffer_flips);
}
-#endif
if (srf_updates[i].flip_addr)
dc->hwss.update_plane_addr(dc, pipe_ctx);
}
@@ -2050,7 +2347,7 @@ void dc_commit_updates_for_stream(struct dc *dc,
enum surface_update_type update_type;
struct dc_state *context;
struct dc_context *dc_ctx = dc->ctx;
- int i, j;
+ int i;
stream_status = dc_stream_get_status(stream);
context = dc->current_state;
@@ -2088,16 +2385,6 @@ void dc_commit_updates_for_stream(struct dc *dc,
copy_surface_update_to_plane(surface, &srf_updates[i]);
- if (update_type >= UPDATE_TYPE_MED) {
- for (j = 0; j < dc->res_pool->pipe_count; j++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
-
- if (pipe_ctx->plane_state != surface)
- continue;
-
- resource_build_scaling_params(pipe_ctx);
- }
- }
}
copy_stream_update_to_stream(dc, context, stream, stream_update);
@@ -2175,18 +2462,22 @@ void dc_set_power_state(
enum dc_acpi_cm_power_state power_state)
{
struct kref refcount;
- struct display_mode_lib *dml = kzalloc(sizeof(struct display_mode_lib),
- GFP_KERNEL);
-
- ASSERT(dml);
- if (!dml)
- return;
+ struct display_mode_lib *dml;
switch (power_state) {
case DC_ACPI_CM_POWER_STATE_D0:
dc_resource_state_construct(dc, dc->current_state);
+ if (dc->ctx->dmub_srv)
+ dc_dmub_srv_wait_phy_init(dc->ctx->dmub_srv);
+
dc->hwss.init_hw(dc);
+
+ if (dc->hwss.init_sys_ctx != NULL &&
+ dc->vm_pa_config.valid) {
+ dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config);
+ }
+
break;
default:
ASSERT(dc->current_state->stream_count == 0);
@@ -2194,6 +2485,12 @@ void dc_set_power_state(
* clean state, and dc hw programming optimizations will not
* cause any trouble.
*/
+ dml = kzalloc(sizeof(struct display_mode_lib),
+ GFP_KERNEL);
+
+ ASSERT(dml);
+ if (!dml)
+ return;
/* Preserve refcount */
refcount = dc->current_state->refcount;
@@ -2207,10 +2504,10 @@ void dc_set_power_state(
dc->current_state->refcount = refcount;
dc->current_state->bw_ctx.dml = *dml;
+ kfree(dml);
+
break;
}
-
- kfree(dml);
}
void dc_resume(struct dc *dc)
@@ -2265,6 +2562,17 @@ bool dc_submit_i2c(
cmd);
}
+bool dc_submit_i2c_oem(
+ struct dc *dc,
+ struct i2c_command *cmd)
+{
+ struct ddc_service *ddc = dc->res_pool->oem_device;
+ return dce_i2c_submit_command(
+ dc->res_pool,
+ ddc->ddc_pin,
+ cmd);
+}
+
static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink *sink)
{
if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
@@ -2387,3 +2695,14 @@ void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx
info->fClock = (unsigned int)state->bw_ctx.bw.dcn.clk.fclk_khz;
info->phyClock = (unsigned int)state->bw_ctx.bw.dcn.clk.phyclk_khz;
}
+enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping)
+{
+ if (dc->hwss.set_clock)
+ return dc->hwss.set_clock(dc, clock_type, clk_khz, stepping);
+ return DC_ERROR_UNEXPECTED;
+}
+void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg)
+{
+ if (dc->hwss.get_clock)
+ dc->hwss.get_clock(dc, clock_type, clock_cfg);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
index 5903e7822f98..502ed3c7959d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
@@ -33,7 +33,6 @@
#include "core_status.h"
#include "core_types.h"
-#include "hw_sequencer.h"
#include "resource.h"
@@ -115,16 +114,16 @@ void pre_surface_trace(
plane_state->clip_rect.height);
SURFACE_TRACE(
- "plane_state->plane_size.grph.surface_size.x = %d;\n"
- "plane_state->plane_size.grph.surface_size.y = %d;\n"
- "plane_state->plane_size.grph.surface_size.width = %d;\n"
- "plane_state->plane_size.grph.surface_size.height = %d;\n"
- "plane_state->plane_size.grph.surface_pitch = %d;\n",
- plane_state->plane_size.grph.surface_size.x,
- plane_state->plane_size.grph.surface_size.y,
- plane_state->plane_size.grph.surface_size.width,
- plane_state->plane_size.grph.surface_size.height,
- plane_state->plane_size.grph.surface_pitch);
+ "plane_state->plane_size.surface_size.x = %d;\n"
+ "plane_state->plane_size.surface_size.y = %d;\n"
+ "plane_state->plane_size.surface_size.width = %d;\n"
+ "plane_state->plane_size.surface_size.height = %d;\n"
+ "plane_state->plane_size.surface_pitch = %d;\n",
+ plane_state->plane_size.surface_size.x,
+ plane_state->plane_size.surface_size.y,
+ plane_state->plane_size.surface_size.width,
+ plane_state->plane_size.surface_size.height,
+ plane_state->plane_size.surface_pitch);
SURFACE_TRACE(
@@ -202,20 +201,20 @@ void update_surface_trace(
SURFACE_TRACE(
"plane_info->color_space = %d;\n"
"plane_info->format = %d;\n"
- "plane_info->plane_size.grph.surface_pitch = %d;\n"
- "plane_info->plane_size.grph.surface_size.height = %d;\n"
- "plane_info->plane_size.grph.surface_size.width = %d;\n"
- "plane_info->plane_size.grph.surface_size.x = %d;\n"
- "plane_info->plane_size.grph.surface_size.y = %d;\n"
+ "plane_info->plane_size.surface_pitch = %d;\n"
+ "plane_info->plane_size.surface_size.height = %d;\n"
+ "plane_info->plane_size.surface_size.width = %d;\n"
+ "plane_info->plane_size.surface_size.x = %d;\n"
+ "plane_info->plane_size.surface_size.y = %d;\n"
"plane_info->rotation = %d;\n"
"plane_info->stereo_format = %d;\n",
update->plane_info->color_space,
update->plane_info->format,
- update->plane_info->plane_size.grph.surface_pitch,
- update->plane_info->plane_size.grph.surface_size.height,
- update->plane_info->plane_size.grph.surface_size.width,
- update->plane_info->plane_size.grph.surface_size.x,
- update->plane_info->plane_size.grph.surface_size.y,
+ update->plane_info->plane_size.surface_pitch,
+ update->plane_info->plane_size.surface_size.height,
+ update->plane_info->plane_size.surface_size.width,
+ update->plane_info->plane_size.surface_size.x,
+ update->plane_info->plane_size.surface_size.y,
update->plane_info->rotation,
update->plane_info->stereo_format);
@@ -310,14 +309,13 @@ void context_timing_trace(
struct resource_context *res_ctx)
{
int i;
- struct dc *core_dc = dc;
int h_pos[MAX_PIPES] = {0}, v_pos[MAX_PIPES] = {0};
struct crtc_position position;
- unsigned int underlay_idx = core_dc->res_pool->underlay_pipe_index;
+ unsigned int underlay_idx = dc->res_pool->underlay_pipe_index;
DC_LOGGER_INIT(dc->ctx->logger);
- for (i = 0; i < core_dc->res_pool->pipe_count; i++) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
/* get_position() returns CRTC vertical/horizontal counter
* hence not applicable for underlay pipe
@@ -329,7 +327,7 @@ void context_timing_trace(
h_pos[i] = position.horizontal_count;
v_pos[i] = position.vertical_count;
}
- for (i = 0; i < core_dc->res_pool->pipe_count; i++) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
if (pipe_ctx->stream == NULL || pipe_ctx->pipe_idx == underlay_idx)
@@ -347,7 +345,7 @@ void context_clock_trace(
struct dc *dc,
struct dc_state *context)
{
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
DC_LOGGER_INIT(dc->ctx->logger);
CLOCK_TRACE("Current: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n"
"dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n",
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 355b4ba12796..a09119c10d7c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -45,10 +45,7 @@
#include "dpcd_defs.h"
#include "dmcu.h"
#include "hw/clk_mgr.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
-#include "resource.h"
-#endif
-#include "hw/clk_mgr.h"
+#include "../dce/dmub_psr.h"
#define DC_LOGGER_INIT(logger)
@@ -78,12 +75,11 @@ enum {
/*******************************************************************************
* Private functions
******************************************************************************/
-static void destruct(struct dc_link *link)
+static void dc_link_destruct(struct dc_link *link)
{
int i;
if (link->hpd_gpio != NULL) {
- dal_gpio_close(link->hpd_gpio);
dal_gpio_destroy_irq(&link->hpd_gpio);
link->hpd_gpio = NULL;
}
@@ -377,7 +373,7 @@ bool dc_link_is_dp_sink_present(struct dc_link *link)
if (GPIO_RESULT_OK != dal_ddc_open(
ddc, GPIO_MODE_INPUT, GPIO_DDC_CONFIG_TYPE_MODE_I2C)) {
- dal_gpio_destroy_ddc(&ddc);
+ dal_ddc_close(ddc);
return present;
}
@@ -524,7 +520,7 @@ static void link_disconnect_remap(struct dc_sink *prev_sink, struct dc_link *lin
}
-static void read_edp_current_link_settings_on_detect(struct dc_link *link)
+static void read_current_link_settings_on_detect(struct dc_link *link)
{
union lane_count_set lane_count_set = { {0} };
uint8_t link_bw_set;
@@ -559,17 +555,23 @@ static void read_edp_current_link_settings_on_detect(struct dc_link *link)
&link_bw_set, sizeof(link_bw_set));
if (link_bw_set == 0) {
- /* If standard link rates are not being used,
- * Read DPCD 00115h to find the link rate set used
- */
- core_link_read_dpcd(link, DP_LINK_RATE_SET,
- &link_rate_set, sizeof(link_rate_set));
-
- if (link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) {
- link->cur_link_settings.link_rate =
- link->dpcd_caps.edp_supported_link_rates[link_rate_set];
- link->cur_link_settings.link_rate_set = link_rate_set;
- link->cur_link_settings.use_link_rate_set = true;
+ if (link->connector_signal == SIGNAL_TYPE_EDP) {
+ /* If standard link rates are not being used,
+ * Read DPCD 00115h to find the edp link rate set used
+ */
+ core_link_read_dpcd(link, DP_LINK_RATE_SET,
+ &link_rate_set, sizeof(link_rate_set));
+
+ // edp_supported_link_rates_count = 0 for DP
+ if (link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) {
+ link->cur_link_settings.link_rate =
+ link->dpcd_caps.edp_supported_link_rates[link_rate_set];
+ link->cur_link_settings.link_rate_set = link_rate_set;
+ link->cur_link_settings.use_link_rate_set = true;
+ }
+ } else {
+ // Link Rate not found. Seamless boot may not work.
+ ASSERT(false);
}
} else {
link->cur_link_settings.link_rate = link_bw_set;
@@ -684,6 +686,56 @@ static bool is_same_edid(struct dc_edid *old_edid, struct dc_edid *new_edid)
return (memcmp(old_edid->raw_edid, new_edid->raw_edid, new_edid->length) == 0);
}
+static bool wait_for_alt_mode(struct dc_link *link)
+{
+
+ /**
+ * something is terribly wrong if time out is > 200ms. (5Hz)
+ * 500 microseconds * 400 tries us 200 ms
+ **/
+ unsigned int sleep_time_in_microseconds = 500;
+ unsigned int tries_allowed = 400;
+ bool is_in_alt_mode;
+ unsigned long long enter_timestamp;
+ unsigned long long finish_timestamp;
+ unsigned long long time_taken_in_ns;
+ int tries_taken;
+
+ DC_LOGGER_INIT(link->ctx->logger);
+
+ if (link->link_enc->funcs->is_in_alt_mode == NULL)
+ return true;
+
+ is_in_alt_mode = link->link_enc->funcs->is_in_alt_mode(link->link_enc);
+ DC_LOG_WARNING("DP Alt mode state on HPD: %d\n", is_in_alt_mode);
+
+ if (is_in_alt_mode)
+ return true;
+
+ enter_timestamp = dm_get_timestamp(link->ctx);
+
+ for (tries_taken = 0; tries_taken < tries_allowed; tries_taken++) {
+ udelay(sleep_time_in_microseconds);
+ /* ask the link if alt mode is enabled, if so return ok */
+ if (link->link_enc->funcs->is_in_alt_mode(link->link_enc)) {
+
+ finish_timestamp = dm_get_timestamp(link->ctx);
+ time_taken_in_ns = dm_get_elapse_time_in_ns(
+ link->ctx, finish_timestamp, enter_timestamp);
+ DC_LOG_WARNING("Alt mode entered finished after %llu ms\n",
+ div_u64(time_taken_in_ns, 1000000));
+ return true;
+ }
+
+ }
+ finish_timestamp = dm_get_timestamp(link->ctx);
+ time_taken_in_ns = dm_get_elapse_time_in_ns(link->ctx, finish_timestamp,
+ enter_timestamp);
+ DC_LOG_WARNING("Alt mode has timed out after %llu ms\n",
+ div_u64(time_taken_in_ns, 1000000));
+ return false;
+}
+
/**
* dc_link_detect() - Detect if a sink is attached to a given link
*
@@ -692,7 +744,8 @@ static bool is_same_edid(struct dc_edid *old_edid, struct dc_edid *new_edid)
* This does not create remote sinks but will trigger DM
* to start MST detection if a branch is detected.
*/
-bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
+static bool dc_link_detect_helper(struct dc_link *link,
+ enum dc_detect_reason reason)
{
struct dc_sink_init_data sink_init_data = { 0 };
struct display_sink_capability sink_caps = { 0 };
@@ -707,6 +760,8 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
struct dpcd_caps prev_dpcd_caps;
bool same_dpcd = true;
enum dc_connection_type new_connection_type = dc_connection_none;
+ bool perform_dp_seamless_boot = false;
+
DC_LOGGER_INIT(link->ctx->logger);
if (dc_is_virtual_signal(link->connector_signal))
@@ -763,15 +818,24 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
}
case SIGNAL_TYPE_EDP: {
- read_edp_current_link_settings_on_detect(link);
detect_edp_sink_caps(link);
- sink_caps.transaction_type =
- DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
+ read_current_link_settings_on_detect(link);
+ sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
sink_caps.signal = SIGNAL_TYPE_EDP;
break;
}
case SIGNAL_TYPE_DISPLAY_PORT: {
+
+ /* wa HPD high coming too early*/
+ if (link->link_enc->features.flags.bits.DP_IS_USB_C == 1) {
+
+ /* if alt mode times out, return false */
+ if (wait_for_alt_mode(link) == false) {
+ return false;
+ }
+ }
+
if (!detect_dp(
link,
&sink_caps,
@@ -787,25 +851,12 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
if (memcmp(&link->dpcd_caps, &prev_dpcd_caps, sizeof(struct dpcd_caps)))
same_dpcd = false;
}
- /* Active dongle plug in without display or downstream unplug*/
+ /* Active dongle downstream unplug*/
if (link->type == dc_connection_active_dongle &&
link->dpcd_caps.sink_count.bits.SINK_COUNT == 0) {
- if (prev_sink != NULL) {
+ if (prev_sink != NULL)
/* Downstream unplug */
dc_sink_release(prev_sink);
- } else {
- /* Empty dongle plug in */
- for (i = 0; i < LINK_TRAINING_MAX_VERIFY_RETRY; i++) {
- int fail_count = 0;
-
- dp_verify_link_cap(link,
- &link->reported_link_cap,
- &fail_count);
-
- if (fail_count == 0)
- break;
- }
- }
return true;
}
@@ -817,12 +868,24 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
* empty which leads to allocate_mst_payload() has "0"
* pbn_per_slot value leading to exception on dc_fixpt_div()
*/
- link->verified_link_cap = link->reported_link_cap;
+ dp_verify_mst_link_cap(link);
+
if (prev_sink != NULL)
dc_sink_release(prev_sink);
return false;
}
+ // For seamless boot, to skip verify link cap, we read UEFI settings and set them as verified.
+ if (reason == DETECT_REASON_BOOT &&
+ dc_ctx->dc->config.power_down_display_on_boot == false &&
+ link->link_status.link_active == true)
+ perform_dp_seamless_boot = true;
+
+ if (perform_dp_seamless_boot) {
+ read_current_link_settings_on_detect(link);
+ link->verified_link_cap = link->reported_link_cap;
+ }
+
break;
}
@@ -900,25 +963,17 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
same_edid = is_same_edid(&prev_sink->dc_edid, &sink->dc_edid);
if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
- sink_caps.transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX &&
- reason != DETECT_REASON_HPDRX) {
+ sink_caps.transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
/*
* TODO debug why Dell 2413 doesn't like
* two link trainings
*/
- /* deal with non-mst cases */
- for (i = 0; i < LINK_TRAINING_MAX_VERIFY_RETRY; i++) {
- int fail_count = 0;
-
- dp_verify_link_cap(link,
- &link->reported_link_cap,
- &fail_count);
-
- if (fail_count == 0)
- break;
- }
-
+ // verify link cap for SST non-seamless boot
+ if (!perform_dp_seamless_boot)
+ dp_verify_link_cap_with_retries(link,
+ &link->reported_link_cap,
+ LINK_TRAINING_MAX_VERIFY_RETRY);
} else {
// If edid is the same, then discard new sink and revert back to original sink
if (same_edid) {
@@ -1007,6 +1062,23 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
dc_sink_release(prev_sink);
return true;
+
+}
+
+bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
+{
+ const struct dc *dc = link->dc;
+ bool ret;
+
+ /* get out of low power state */
+ clk_mgr_exit_optimized_pwr_state(dc, dc->clk_mgr);
+
+ ret = dc_link_detect_helper(link, reason);
+
+ /* Go back to power optimized state */
+ clk_mgr_optimize_pwr_state(dc, dc->clk_mgr);
+
+ return ret;
}
bool dc_link_get_hpd_state(struct dc_link *dc_link)
@@ -1166,7 +1238,7 @@ static enum transmitter translate_encoder_to_transmitter(
}
}
-static bool construct(
+static bool dc_link_construct(
struct dc_link *link,
const struct link_init_data *init_params)
{
@@ -1188,6 +1260,9 @@ static bool construct(
link->ctx = dc_ctx;
link->link_index = init_params->link_index;
+ memset(&link->preferred_training_settings, 0, sizeof(struct dc_link_training_overrides));
+ memset(&link->preferred_link_setting, 0, sizeof(struct dc_link_settings));
+
link->link_id = bios->funcs->get_connector_id(bios, init_params->connector_index);
if (link->link_id.type != OBJECT_TYPE_CONNECTOR) {
@@ -1365,7 +1440,7 @@ struct dc_link *link_create(const struct link_init_data *init_params)
if (NULL == link)
goto alloc_fail;
- if (false == construct(link, init_params))
+ if (false == dc_link_construct(link, init_params))
goto construct_fail;
return link;
@@ -1379,62 +1454,11 @@ alloc_fail:
void link_destroy(struct dc_link **link)
{
- destruct(*link);
+ dc_link_destruct(*link);
kfree(*link);
*link = NULL;
}
-static void dpcd_configure_panel_mode(
- struct dc_link *link,
- enum dp_panel_mode panel_mode)
-{
- union dpcd_edp_config edp_config_set;
- bool panel_mode_edp = false;
- DC_LOGGER_INIT(link->ctx->logger);
-
- memset(&edp_config_set, '\0', sizeof(union dpcd_edp_config));
-
- if (DP_PANEL_MODE_DEFAULT != panel_mode) {
-
- switch (panel_mode) {
- case DP_PANEL_MODE_EDP:
- case DP_PANEL_MODE_SPECIAL:
- panel_mode_edp = true;
- break;
-
- default:
- break;
- }
-
- /*set edp panel mode in receiver*/
- core_link_read_dpcd(
- link,
- DP_EDP_CONFIGURATION_SET,
- &edp_config_set.raw,
- sizeof(edp_config_set.raw));
-
- if (edp_config_set.bits.PANEL_MODE_EDP
- != panel_mode_edp) {
- enum ddc_result result = DDC_RESULT_UNKNOWN;
-
- edp_config_set.bits.PANEL_MODE_EDP =
- panel_mode_edp;
- result = core_link_write_dpcd(
- link,
- DP_EDP_CONFIGURATION_SET,
- &edp_config_set.raw,
- sizeof(edp_config_set.raw));
-
- ASSERT(result == DDC_RESULT_SUCESSFULL);
- }
- }
- DC_LOG_DETECTION_DP_CAPS("Link: %d eDP panel mode supported: %d "
- "eDP panel mode enabled: %d \n",
- link->link_index,
- link->dpcd_caps.panel_mode_edp,
- panel_mode_edp);
-}
-
static void enable_stream_features(struct pipe_ctx *pipe_ctx)
{
struct dc_stream_state *stream = pipe_ctx->stream;
@@ -1465,21 +1489,22 @@ static enum dc_status enable_link_dp(
bool skip_video_pattern;
struct dc_link *link = stream->link;
struct dc_link_settings link_settings = {0};
- enum dp_panel_mode panel_mode;
+ bool fec_enable;
+ int i;
+ bool apply_seamless_boot_optimization = false;
+
+ // check for seamless boot
+ for (i = 0; i < state->stream_count; i++) {
+ if (state->streams[i]->apply_seamless_boot_optimization) {
+ apply_seamless_boot_optimization = true;
+ break;
+ }
+ }
/* get link settings for video mode timing */
decide_link_settings(stream, &link_settings);
if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP) {
- /* If link settings are different than current and link already enabled
- * then need to disable before programming to new rate.
- */
- if (link->link_status.link_active &&
- (link->cur_link_settings.lane_count != link_settings.lane_count ||
- link->cur_link_settings.link_rate != link_settings.link_rate)) {
- dp_disable_link_phy(link, pipe_ctx->stream->signal);
- }
-
/*in case it is not on*/
link->dc->hwss.edp_power_control(link, true);
link->dc->hwss.edp_wait_for_hpd_ready(link, true);
@@ -1487,46 +1512,32 @@ static enum dc_status enable_link_dp(
pipe_ctx->stream_res.pix_clk_params.requested_sym_clk =
link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ;
- state->clk_mgr->funcs->update_clocks(state->clk_mgr, state, false);
-
- dp_enable_link_phy(
- link,
- pipe_ctx->stream->signal,
- pipe_ctx->clock_source->id,
- &link_settings);
-
- if (stream->sink_patches.dppowerup_delay > 0) {
- int delay_dp_power_up_in_ms = stream->sink_patches.dppowerup_delay;
-
- msleep(delay_dp_power_up_in_ms);
- }
-
- panel_mode = dp_get_panel_mode(link);
- dpcd_configure_panel_mode(link, panel_mode);
+ if (state->clk_mgr && !apply_seamless_boot_optimization)
+ state->clk_mgr->funcs->update_clocks(state->clk_mgr, state, false);
skip_video_pattern = true;
if (link_settings.link_rate == LINK_RATE_LOW)
skip_video_pattern = false;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
- dp_set_fec_ready(link, true);
-#endif
-
if (perform_link_training_with_retries(
- link,
&link_settings,
skip_video_pattern,
- LINK_TRAINING_ATTEMPTS)) {
+ LINK_TRAINING_ATTEMPTS,
+ pipe_ctx,
+ pipe_ctx->stream->signal)) {
link->cur_link_settings = link_settings;
status = DC_OK;
}
else
status = DC_FAIL_DP_LINK_TRAINING;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
- dp_set_fec_enable(link, true);
-#endif
+ if (link->preferred_training_settings.fec_enable != NULL)
+ fec_enable = *link->preferred_training_settings.fec_enable;
+ else
+ fec_enable = true;
+
+ dp_set_fec_enable(link, fec_enable);
return status;
}
@@ -2013,6 +2024,45 @@ static void write_i2c_redriver_setting(
ASSERT(i2c_success);
}
+static void disable_link(struct dc_link *link, enum signal_type signal)
+{
+ /*
+ * TODO: implement call for dp_set_hw_test_pattern
+ * it is needed for compliance testing
+ */
+
+ /* Here we need to specify that encoder output settings
+ * need to be calculated as for the set mode,
+ * it will lead to querying dynamic link capabilities
+ * which should be done before enable output
+ */
+
+ if (dc_is_dp_signal(signal)) {
+ /* SST DP, eDP */
+ if (dc_is_dp_sst_signal(signal))
+ dp_disable_link_phy(link, signal);
+ else
+ dp_disable_link_phy_mst(link, signal);
+
+ if (dc_is_dp_sst_signal(signal) ||
+ link->mst_stream_alloc_table.stream_count == 0) {
+ dp_set_fec_enable(link, false);
+ dp_set_fec_ready(link, false);
+ }
+ } else {
+ if (signal != SIGNAL_TYPE_VIRTUAL)
+ link->link_enc->funcs->disable_output(link->link_enc, signal);
+ }
+
+ if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ /* MST disable link only when no stream use the link */
+ if (link->mst_stream_alloc_table.stream_count <= 0)
+ link->link_status.link_active = false;
+ } else {
+ link->link_status.link_active = false;
+ }
+}
+
static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
{
struct dc_stream_state *stream = pipe_ctx->stream;
@@ -2097,6 +2147,19 @@ static enum dc_status enable_link(
struct pipe_ctx *pipe_ctx)
{
enum dc_status status = DC_ERROR_UNEXPECTED;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->link;
+
+ /* There's some scenarios where driver is unloaded with display
+ * still enabled. When driver is reloaded, it may cause a display
+ * to not light up if there is a mismatch between old and new
+ * link settings. Need to call disable first before enabling at
+ * new link settings.
+ */
+ if (link->link_status.link_active) {
+ disable_link(link, pipe_ctx->stream->signal);
+ }
+
switch (pipe_ctx->stream->signal) {
case SIGNAL_TYPE_DISPLAY_PORT:
status = enable_link_dp(state, pipe_ctx);
@@ -2131,44 +2194,6 @@ static enum dc_status enable_link(
return status;
}
-static void disable_link(struct dc_link *link, enum signal_type signal)
-{
- /*
- * TODO: implement call for dp_set_hw_test_pattern
- * it is needed for compliance testing
- */
-
- /* here we need to specify that encoder output settings
- * need to be calculated as for the set mode,
- * it will lead to querying dynamic link capabilities
- * which should be done before enable output */
-
- if (dc_is_dp_signal(signal)) {
- /* SST DP, eDP */
- if (dc_is_dp_sst_signal(signal))
- dp_disable_link_phy(link, signal);
- else
- dp_disable_link_phy_mst(link, signal);
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
-
- if (dc_is_dp_sst_signal(signal) ||
- link->mst_stream_alloc_table.stream_count == 0) {
- dp_set_fec_enable(link, false);
- dp_set_fec_ready(link, false);
- }
-#endif
- } else
- link->link_enc->funcs->disable_output(link->link_enc, signal);
-
- if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
- /* MST disable link only when no stream use the link */
- if (link->mst_stream_alloc_table.stream_count <= 0)
- link->link_status.link_active = false;
- } else {
- link->link_status.link_active = false;
- }
-}
-
static uint32_t get_timing_pixel_clock_100hz(const struct dc_crtc_timing *timing)
{
@@ -2205,7 +2230,7 @@ static bool dp_active_dongle_validate_timing(
break;
}
- if (dongle_caps->dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER ||
+ if (dpcd_caps->dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER ||
dongle_caps->extendedCapValid == false)
return true;
@@ -2305,9 +2330,9 @@ bool dc_link_set_backlight_level(const struct dc_link *link,
uint32_t backlight_pwm_u16_16,
uint32_t frame_ramp)
{
- struct dc *core_dc = link->ctx->dc;
- struct abm *abm = core_dc->res_pool->abm;
- struct dmcu *dmcu = core_dc->res_pool->dmcu;
+ struct dc *dc = link->ctx->dc;
+ struct abm *abm = dc->res_pool->abm;
+ struct dmcu *dmcu = dc->res_pool->dmcu;
unsigned int controller_id = 0;
bool use_smooth_brightness = true;
int i;
@@ -2325,22 +2350,22 @@ bool dc_link_set_backlight_level(const struct dc_link *link,
if (dc_is_embedded_signal(link->connector_signal)) {
for (i = 0; i < MAX_PIPES; i++) {
- if (core_dc->current_state->res_ctx.pipe_ctx[i].stream) {
- if (core_dc->current_state->res_ctx.
+ if (dc->current_state->res_ctx.pipe_ctx[i].stream) {
+ if (dc->current_state->res_ctx.
pipe_ctx[i].stream->link
== link) {
/* DMCU -1 for all controller id values,
* therefore +1 here
*/
controller_id =
- core_dc->current_state->
+ dc->current_state->
res_ctx.pipe_ctx[i].stream_res.tg->inst +
1;
/* Disable brightness ramping when the display is blanked
* as it can hang the DMCU
*/
- if (core_dc->current_state->res_ctx.pipe_ctx[i].plane_state == NULL)
+ if (dc->current_state->res_ctx.pipe_ctx[i].plane_state == NULL)
frame_ramp = 0;
}
}
@@ -2358,8 +2383,8 @@ bool dc_link_set_backlight_level(const struct dc_link *link,
bool dc_link_set_abm_disable(const struct dc_link *link)
{
- struct dc *core_dc = link->ctx->dc;
- struct abm *abm = core_dc->res_pool->abm;
+ struct dc *dc = link->ctx->dc;
+ struct abm *abm = dc->res_pool->abm;
if ((abm == NULL) || (abm->funcs->set_backlight_level_pwm == NULL))
return false;
@@ -2369,17 +2394,215 @@ bool dc_link_set_abm_disable(const struct dc_link *link)
return true;
}
-bool dc_link_set_psr_enable(const struct dc_link *link, bool enable, bool wait)
+bool dc_link_set_psr_allow_active(struct dc_link *link, bool allow_active, bool wait)
{
- struct dc *core_dc = link->ctx->dc;
- struct dmcu *dmcu = core_dc->res_pool->dmcu;
+ struct dc *dc = link->ctx->dc;
+ struct dmcu *dmcu = dc->res_pool->dmcu;
+ struct dmub_psr *psr = dc->res_pool->psr;
- if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_enabled)
- dmcu->funcs->set_psr_enable(dmcu, enable, wait);
+ if ((psr != NULL) && link->psr_feature_enabled)
+ psr->funcs->set_psr_enable(psr, allow_active);
+ else if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_feature_enabled)
+ dmcu->funcs->set_psr_enable(dmcu, allow_active, wait);
+
+ link->psr_allow_active = allow_active;
return true;
}
+bool dc_link_get_psr_state(const struct dc_link *link, uint32_t *psr_state)
+{
+ struct dc *dc = link->ctx->dc;
+ struct dmcu *dmcu = dc->res_pool->dmcu;
+ struct dmub_psr *psr = dc->res_pool->psr;
+
+ if (psr != NULL && link->psr_feature_enabled)
+ psr->funcs->get_psr_state(psr_state);
+ else if (dmcu != NULL && link->psr_feature_enabled)
+ dmcu->funcs->get_psr_state(dmcu, psr_state);
+
+ return true;
+}
+
+static inline enum physical_phy_id
+transmitter_to_phy_id(enum transmitter transmitter_value)
+{
+ switch (transmitter_value) {
+ case TRANSMITTER_UNIPHY_A:
+ return PHYLD_0;
+ case TRANSMITTER_UNIPHY_B:
+ return PHYLD_1;
+ case TRANSMITTER_UNIPHY_C:
+ return PHYLD_2;
+ case TRANSMITTER_UNIPHY_D:
+ return PHYLD_3;
+ case TRANSMITTER_UNIPHY_E:
+ return PHYLD_4;
+ case TRANSMITTER_UNIPHY_F:
+ return PHYLD_5;
+ case TRANSMITTER_NUTMEG_CRT:
+ return PHYLD_6;
+ case TRANSMITTER_TRAVIS_CRT:
+ return PHYLD_7;
+ case TRANSMITTER_TRAVIS_LCD:
+ return PHYLD_8;
+ case TRANSMITTER_UNIPHY_G:
+ return PHYLD_9;
+ case TRANSMITTER_COUNT:
+ return PHYLD_COUNT;
+ case TRANSMITTER_UNKNOWN:
+ return PHYLD_UNKNOWN;
+ default:
+ WARN_ONCE(1, "Unknown transmitter value %d\n",
+ transmitter_value);
+ return PHYLD_UNKNOWN;
+ }
+}
+
+bool dc_link_setup_psr(struct dc_link *link,
+ const struct dc_stream_state *stream, struct psr_config *psr_config,
+ struct psr_context *psr_context)
+{
+ struct dc *dc;
+ struct dmcu *dmcu;
+ struct dmub_psr *psr;
+ int i;
+ /* updateSinkPsrDpcdConfig*/
+ union dpcd_psr_configuration psr_configuration;
+
+ psr_context->controllerId = CONTROLLER_ID_UNDEFINED;
+
+ if (!link)
+ return false;
+
+ dc = link->ctx->dc;
+ dmcu = dc->res_pool->dmcu;
+ psr = dc->res_pool->psr;
+
+ if (!dmcu && !psr)
+ return false;
+
+
+ memset(&psr_configuration, 0, sizeof(psr_configuration));
+
+ psr_configuration.bits.ENABLE = 1;
+ psr_configuration.bits.CRC_VERIFICATION = 1;
+ psr_configuration.bits.FRAME_CAPTURE_INDICATION =
+ psr_config->psr_frame_capture_indication_req;
+
+ /* Check for PSR v2*/
+ if (psr_config->psr_version == 0x2) {
+ /* For PSR v2 selective update.
+ * Indicates whether sink should start capturing
+ * immediately following active scan line,
+ * or starting with the 2nd active scan line.
+ */
+ psr_configuration.bits.LINE_CAPTURE_INDICATION = 0;
+ /*For PSR v2, determines whether Sink should generate
+ * IRQ_HPD when CRC mismatch is detected.
+ */
+ psr_configuration.bits.IRQ_HPD_WITH_CRC_ERROR = 1;
+ }
+
+ dm_helpers_dp_write_dpcd(
+ link->ctx,
+ link,
+ 368,
+ &psr_configuration.raw,
+ sizeof(psr_configuration.raw));
+
+ psr_context->channel = link->ddc->ddc_pin->hw_info.ddc_channel;
+ psr_context->transmitterId = link->link_enc->transmitter;
+ psr_context->engineId = link->link_enc->preferred_engine;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (dc->current_state->res_ctx.pipe_ctx[i].stream
+ == stream) {
+ /* dmcu -1 for all controller id values,
+ * therefore +1 here
+ */
+ psr_context->controllerId =
+ dc->current_state->res_ctx.
+ pipe_ctx[i].stream_res.tg->inst + 1;
+ break;
+ }
+ }
+
+ /* Hardcoded for now. Can be Pcie or Uniphy (or Unknown)*/
+ psr_context->phyType = PHY_TYPE_UNIPHY;
+ /*PhyId is associated with the transmitter id*/
+ psr_context->smuPhyId =
+ transmitter_to_phy_id(link->link_enc->transmitter);
+
+ psr_context->crtcTimingVerticalTotal = stream->timing.v_total;
+ psr_context->vsync_rate_hz = div64_u64(div64_u64((stream->
+ timing.pix_clk_100hz * 100),
+ stream->timing.v_total),
+ stream->timing.h_total);
+
+ psr_context->psrSupportedDisplayConfig = true;
+ psr_context->psrExitLinkTrainingRequired =
+ psr_config->psr_exit_link_training_required;
+ psr_context->sdpTransmitLineNumDeadline =
+ psr_config->psr_sdp_transmit_line_num_deadline;
+ psr_context->psrFrameCaptureIndicationReq =
+ psr_config->psr_frame_capture_indication_req;
+
+ psr_context->skipPsrWaitForPllLock = 0; /* only = 1 in KV */
+
+ psr_context->numberOfControllers =
+ link->dc->res_pool->timing_generator_count;
+
+ psr_context->rfb_update_auto_en = true;
+
+ /* 2 frames before enter PSR. */
+ psr_context->timehyst_frames = 2;
+ /* half a frame
+ * (units in 100 lines, i.e. a value of 1 represents 100 lines)
+ */
+ psr_context->hyst_lines = stream->timing.v_total / 2 / 100;
+ psr_context->aux_repeats = 10;
+
+ psr_context->psr_level.u32all = 0;
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ /*skip power down the single pipe since it blocks the cstate*/
+ if (ASICREV_IS_RAVEN(link->ctx->asic_id.hw_internal_rev))
+ psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true;
+#endif
+
+ /* SMU will perform additional powerdown sequence.
+ * For unsupported ASICs, set psr_level flag to skip PSR
+ * static screen notification to SMU.
+ * (Always set for DAL2, did not check ASIC)
+ */
+ psr_context->allow_smu_optimizations = psr_config->allow_smu_optimizations;
+
+ /* Complete PSR entry before aborting to prevent intermittent
+ * freezes on certain eDPs
+ */
+ psr_context->psr_level.bits.DISABLE_PSR_ENTRY_ABORT = 1;
+
+ /* Controls additional delay after remote frame capture before
+ * continuing power down, default = 0
+ */
+ psr_context->frame_delay = 0;
+
+ if (psr)
+ link->psr_feature_enabled = psr->funcs->setup_psr(psr, link, psr_context);
+ else
+ link->psr_feature_enabled = dmcu->funcs->setup_psr(dmcu, link, psr_context);
+
+ /* psr_enabled == 0 indicates setup_psr did not succeed, but this
+ * should not happen since firmware should be running at this point
+ */
+ if (link->psr_feature_enabled == 0)
+ ASSERT(0);
+
+ return true;
+
+}
+
const struct dc_link_status *dc_link_get_status(const struct dc_link *link)
{
return &link->link_status;
@@ -2403,28 +2626,13 @@ static struct fixed31_32 get_pbn_per_slot(struct dc_stream_state *stream)
return dc_fixpt_div_int(mbytes_per_sec, 54);
}
-static int get_color_depth(enum dc_color_depth color_depth)
-{
- switch (color_depth) {
- case COLOR_DEPTH_666: return 6;
- case COLOR_DEPTH_888: return 8;
- case COLOR_DEPTH_101010: return 10;
- case COLOR_DEPTH_121212: return 12;
- case COLOR_DEPTH_141414: return 14;
- case COLOR_DEPTH_161616: return 16;
- default: return 0;
- }
-}
-
static struct fixed31_32 get_pbn_from_timing(struct pipe_ctx *pipe_ctx)
{
- uint32_t bpc;
uint64_t kbps;
struct fixed31_32 peak_kbps;
uint32_t numerator;
uint32_t denominator;
- bpc = get_color_depth(pipe_ctx->stream_res.pix_clk_params.color_depth);
kbps = dc_bandwidth_in_kbps_from_timing(&pipe_ctx->stream->timing);
/*
@@ -2498,7 +2706,7 @@ static void update_mst_stream_alloc_table(
/* convert link_mst_stream_alloc_table to dm dp_mst_stream_alloc_table
* because stream_encoder is not exposed to dm
*/
-static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx)
+enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx)
{
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->link;
@@ -2509,6 +2717,7 @@ static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx)
struct fixed31_32 pbn;
struct fixed31_32 pbn_per_slot;
uint8_t i;
+ enum act_return_status ret;
DC_LOGGER_INIT(link->ctx->logger);
/* enable_link_dp_mst already check link->enabled_stream_count
@@ -2556,14 +2765,16 @@ static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx)
&link->mst_stream_alloc_table);
/* send down message */
- dm_helpers_dp_mst_poll_for_allocation_change_trigger(
+ ret = dm_helpers_dp_mst_poll_for_allocation_change_trigger(
stream->ctx,
stream);
- dm_helpers_dp_mst_send_payload_allocation(
- stream->ctx,
- stream,
- true);
+ if (ret != ACT_LINK_LOST) {
+ dm_helpers_dp_mst_send_payload_allocation(
+ stream->ctx,
+ stream,
+ true);
+ }
/* slot X.Y for only current stream */
pbn_per_slot = get_pbn_per_slot(stream);
@@ -2656,15 +2867,83 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
return DC_OK;
}
+enum dc_status dc_link_reallocate_mst_payload(struct dc_link *link)
+{
+ int i;
+ struct pipe_ctx *pipe_ctx;
+
+ // Clear all of MST payload then reallocate
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
+
+ /* driver enable split pipe for external monitors
+ * we have to check pipe_ctx is split pipe or not
+ * If it's split pipe, driver using top pipe to
+ * reaallocate.
+ */
+ if (!pipe_ctx || pipe_ctx->top_pipe)
+ continue;
+
+ if (pipe_ctx->stream && pipe_ctx->stream->link == link &&
+ pipe_ctx->stream->dpms_off == false &&
+ pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ deallocate_mst_payload(pipe_ctx);
+ }
+ }
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (!pipe_ctx || pipe_ctx->top_pipe)
+ continue;
+
+ if (pipe_ctx->stream && pipe_ctx->stream->link == link &&
+ pipe_ctx->stream->dpms_off == false &&
+ pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ /* enable/disable PHY will clear connection between BE and FE
+ * need to restore it.
+ */
+ link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc,
+ pipe_ctx->stream_res.stream_enc->id, true);
+ dc_link_allocate_mst_payload(pipe_ctx);
+ }
+ }
+
+ return DC_OK;
+}
+
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
+{
+ struct cp_psp *cp_psp = &pipe_ctx->stream->ctx->cp_psp;
+ if (cp_psp && cp_psp->funcs.update_stream_config) {
+ struct cp_psp_stream_config config;
+
+ memset(&config, 0, sizeof(config));
+
+ config.otg_inst = (uint8_t) pipe_ctx->stream_res.tg->inst;
+ config.stream_enc_inst = (uint8_t) pipe_ctx->stream_res.stream_enc->id;
+ config.link_enc_inst = pipe_ctx->stream->link->link_enc_hw_inst;
+ config.dpms_off = dpms_off;
+ config.dm_stream_ctx = pipe_ctx->stream->dm_stream_context;
+ cp_psp->funcs.update_stream_config(cp_psp->handle, &config);
+ }
+}
+#endif
+
void core_link_enable_stream(
struct dc_state *state,
struct pipe_ctx *pipe_ctx)
{
- struct dc *core_dc = pipe_ctx->stream->ctx->dc;
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
struct dc_stream_state *stream = pipe_ctx->stream;
enum dc_status status;
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
+ if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) &&
+ dc_is_virtual_signal(pipe_ctx->stream->signal))
+ return;
+
if (!dc_is_virtual_signal(pipe_ctx->stream->signal)) {
stream->link->link_enc->funcs->setup(
stream->link->link_enc,
@@ -2680,6 +2959,7 @@ void core_link_enable_stream(
pipe_ctx->stream_res.stream_enc,
&stream->timing,
stream->output_color_space,
+ stream->use_vsc_sdp_for_colorimetry,
stream->link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP);
if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
@@ -2703,18 +2983,21 @@ void core_link_enable_stream(
pipe_ctx->stream_res.stream_enc,
&stream->timing);
- if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
+ if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
bool apply_edp_fast_boot_optimization =
pipe_ctx->stream->apply_edp_fast_boot_optimization;
pipe_ctx->stream->apply_edp_fast_boot_optimization = false;
resource_build_info_frame(pipe_ctx);
- core_dc->hwss.update_info_frame(pipe_ctx);
+ dc->hwss.update_info_frame(pipe_ctx);
/* Do not touch link on seamless boot optimization. */
if (pipe_ctx->stream->apply_seamless_boot_optimization) {
pipe_ctx->stream->dpms_off = false;
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+ update_psp_stream_config(pipe_ctx, false);
+#endif
return;
}
@@ -2722,6 +3005,9 @@ void core_link_enable_stream(
if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP &&
apply_edp_fast_boot_optimization) {
pipe_ctx->stream->dpms_off = false;
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+ update_psp_stream_config(pipe_ctx, false);
+#endif
return;
}
@@ -2747,7 +3033,7 @@ void core_link_enable_stream(
}
}
- core_dc->hwss.enable_audio_stream(pipe_ctx);
+ dc->hwss.enable_audio_stream(pipe_ctx);
/* turn off otg test pattern if enable */
if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
@@ -2755,44 +3041,54 @@ void core_link_enable_stream(
CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
COLOR_DEPTH_UNDEFINED);
- core_dc->hwss.enable_stream(pipe_ctx);
+ if (pipe_ctx->stream->timing.flags.DSC) {
+ if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
+ dc_is_virtual_signal(pipe_ctx->stream->signal))
+ dp_set_dsc_enable(pipe_ctx, true);
+ }
+ dc->hwss.enable_stream(pipe_ctx);
+
+ /* Set DPS PPS SDP (AKA "info frames") */
+ if (pipe_ctx->stream->timing.flags.DSC) {
+ if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
+ dc_is_virtual_signal(pipe_ctx->stream->signal))
+ dp_set_dsc_pps_sdp(pipe_ctx, true);
+ }
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
- allocate_mst_payload(pipe_ctx);
+ dc_link_allocate_mst_payload(pipe_ctx);
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
- if (pipe_ctx->stream->timing.flags.DSC &&
- (dc_is_dp_signal(pipe_ctx->stream->signal) ||
- dc_is_virtual_signal(pipe_ctx->stream->signal))) {
- dp_set_dsc_enable(pipe_ctx, true);
- pipe_ctx->stream_res.tg->funcs->wait_for_state(
- pipe_ctx->stream_res.tg,
- CRTC_STATE_VBLANK);
- }
-#endif
- core_dc->hwss.unblank_stream(pipe_ctx,
+ dc->hwss.unblank_stream(pipe_ctx,
&pipe_ctx->stream->link->cur_link_settings);
if (dc_is_dp_signal(pipe_ctx->stream->signal))
enable_stream_features(pipe_ctx);
- }
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
- else { // if (IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment))
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+ update_psp_stream_config(pipe_ctx, false);
+#endif
+ } else { // if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
dc_is_virtual_signal(pipe_ctx->stream->signal))
dp_set_dsc_enable(pipe_ctx, true);
}
-#endif
}
-void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option)
+void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
{
- struct dc *core_dc = pipe_ctx->stream->ctx->dc;
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->sink->link;
- core_dc->hwss.blank_stream(pipe_ctx);
+ if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) &&
+ dc_is_virtual_signal(pipe_ctx->stream->signal))
+ return;
+
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+ update_psp_stream_config(pipe_ctx, true);
+#endif
+
+ dc->hwss.blank_stream(pipe_ctx);
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
deallocate_mst_payload(pipe_ctx);
@@ -2821,25 +3117,23 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option)
write_i2c_redriver_setting(pipe_ctx, false);
}
}
- core_dc->hwss.disable_stream(pipe_ctx, option);
+ dc->hwss.disable_stream(pipe_ctx);
disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal);
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
- if (pipe_ctx->stream->timing.flags.DSC &&
- dc_is_dp_signal(pipe_ctx->stream->signal)) {
- dp_set_dsc_enable(pipe_ctx, false);
+ if (pipe_ctx->stream->timing.flags.DSC) {
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ dp_set_dsc_enable(pipe_ctx, false);
}
-#endif
}
void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
{
- struct dc *core_dc = pipe_ctx->stream->ctx->dc;
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
- if (pipe_ctx->stream->signal != SIGNAL_TYPE_HDMI_TYPE_A)
+ if (!dc_is_hdmi_signal(pipe_ctx->stream->signal))
return;
- core_dc->hwss.set_avmute(pipe_ctx, enable);
+ dc->hwss.set_avmute(pipe_ctx, enable);
}
/**
@@ -2897,13 +3191,11 @@ uint32_t dc_bandwidth_in_kbps_from_timing(
uint32_t bits_per_channel = 0;
uint32_t kbps;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
if (timing->flags.DSC) {
kbps = (timing->pix_clk_100hz * timing->dsc_cfg.bits_per_pixel);
kbps = kbps / 160 + ((kbps % 160) ? 1 : 0);
return kbps;
}
-#endif
switch (timing->display_color_depth) {
case COLOR_DEPTH_666:
@@ -2999,8 +3291,10 @@ void dc_link_set_preferred_link_settings(struct dc *dc,
for (i = 0; i < MAX_PIPES; i++) {
pipe = &dc->current_state->res_ctx.pipe_ctx[i];
if (pipe->stream && pipe->stream->link) {
- if (pipe->stream->link == link)
+ if (pipe->stream->link == link) {
+ link_stream = pipe->stream;
break;
+ }
}
}
@@ -3008,20 +3302,40 @@ void dc_link_set_preferred_link_settings(struct dc *dc,
if (i == MAX_PIPES)
return;
- link_stream = link->dc->current_state->res_ctx.pipe_ctx[i].stream;
-
/* Cannot retrain link if backend is off */
if (link_stream->dpms_off)
return;
- if (link_stream)
- decide_link_settings(link_stream, &store_settings);
+ decide_link_settings(link_stream, &store_settings);
if ((store_settings.lane_count != LANE_COUNT_UNKNOWN) &&
(store_settings.link_rate != LINK_RATE_UNKNOWN))
dp_retrain_link_dp_test(link, &store_settings, false);
}
+void dc_link_set_preferred_training_settings(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ struct dc_link_training_overrides *lt_overrides,
+ struct dc_link *link,
+ bool skip_immediate_retrain)
+{
+ if (lt_overrides != NULL)
+ link->preferred_training_settings = *lt_overrides;
+ else
+ memset(&link->preferred_training_settings, 0, sizeof(link->preferred_training_settings));
+
+ if (link_setting != NULL) {
+ link->preferred_link_setting = *link_setting;
+ } else {
+ link->preferred_link_setting.lane_count = LANE_COUNT_UNKNOWN;
+ link->preferred_link_setting.link_rate = LINK_RATE_UNKNOWN;
+ }
+
+ /* Retrain now, or wait until next stream update to apply */
+ if (skip_immediate_retrain == false)
+ dc_link_set_preferred_link_settings(dc, &link->preferred_link_setting, link);
+}
+
void dc_link_enable_hpd(const struct dc_link *link)
{
dc_link_dp_enable_hpd(link);
@@ -3032,9 +3346,9 @@ void dc_link_disable_hpd(const struct dc_link *link)
dc_link_dp_disable_hpd(link);
}
-
void dc_link_set_test_pattern(struct dc_link *link,
enum dp_test_pattern test_pattern,
+ enum dp_test_pattern_color_space test_pattern_color_space,
const struct link_training_settings *p_link_settings,
const unsigned char *p_custom_pattern,
unsigned int cust_pattern_size)
@@ -3043,6 +3357,7 @@ void dc_link_set_test_pattern(struct dc_link *link,
dc_link_dp_set_test_pattern(
link,
test_pattern,
+ test_pattern_color_space,
p_link_settings,
p_custom_pattern,
cust_pattern_size);
@@ -3058,7 +3373,6 @@ uint32_t dc_link_bandwidth_kbps(
link_bw_kbps *= 8; /* 8 bits per byte*/
link_bw_kbps *= link_setting->lane_count;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
if (link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) {
/* Account for FEC overhead.
* We have to do it based on caps,
@@ -3083,7 +3397,6 @@ uint32_t dc_link_bandwidth_kbps(
link_bw_kbps = mul_u64_u32_shr(BIT_ULL(32) * 970LL / 1000,
link_bw_kbps, 32);
}
-#endif
return link_bw_kbps;
@@ -3097,3 +3410,10 @@ const struct dc_link_settings *dc_link_get_link_cap(
return &link->preferred_link_setting;
return &link->verified_link_cap;
}
+
+void dc_link_overwrite_extended_receiver_cap(
+ struct dc_link *link)
+{
+ dp_overwrite_extended_receiver_cap(link);
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index e6da8506128b..a49c10d5df26 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -187,7 +187,7 @@ void dal_ddc_i2c_payloads_add(
}
-static void construct(
+static void ddc_service_construct(
struct ddc_service *ddc_service,
struct ddc_service_init_data *init_data)
{
@@ -206,7 +206,10 @@ static void construct(
ddc_service->ddc_pin = NULL;
} else {
hw_info.ddc_channel = i2c_info.i2c_line;
- hw_info.hw_supported = i2c_info.i2c_hw_assist;
+ if (ddc_service->link != NULL)
+ hw_info.hw_supported = i2c_info.i2c_hw_assist;
+ else
+ hw_info.hw_supported = false;
ddc_service->ddc_pin = dal_gpio_create_ddc(
gpio_service,
@@ -236,11 +239,11 @@ struct ddc_service *dal_ddc_service_create(
if (!ddc_service)
return NULL;
- construct(ddc_service, init_data);
+ ddc_service_construct(ddc_service, init_data);
return ddc_service;
}
-static void destruct(struct ddc_service *ddc)
+static void ddc_service_destruct(struct ddc_service *ddc)
{
if (ddc->ddc_pin)
dal_gpio_destroy_ddc(&ddc->ddc_pin);
@@ -252,7 +255,7 @@ void dal_ddc_service_destroy(struct ddc_service **ddc)
BREAK_TO_DEBUGGER();
return;
}
- destruct(*ddc);
+ ddc_service_destruct(*ddc);
kfree(*ddc);
*ddc = NULL;
}
@@ -294,7 +297,7 @@ static uint32_t defer_delay_converter_wa(
{
struct dc_link *link = ddc->link;
- if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_4 &&
+ if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_0080E1 &&
!memcmp(link->dpcd_caps.branch_dev_name,
DP_DVI_CONVERTER_ID_4,
sizeof(link->dpcd_caps.branch_dev_name)))
@@ -374,6 +377,7 @@ void dal_ddc_service_i2c_query_dp_dual_mode_adaptor(
enum display_dongle_type *dongle = &sink_cap->dongle_type;
uint8_t type2_dongle_buf[DP_ADAPTOR_TYPE2_SIZE];
bool is_type2_dongle = false;
+ int retry_count = 2;
struct dp_hdmi_dongle_signature_data *dongle_signature;
/* Assume we have no valid DP passive dongle connected */
@@ -386,13 +390,24 @@ void dal_ddc_service_i2c_query_dp_dual_mode_adaptor(
DP_HDMI_DONGLE_ADDRESS,
type2_dongle_buf,
sizeof(type2_dongle_buf))) {
- *dongle = DISPLAY_DONGLE_DP_DVI_DONGLE;
- sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_DVI_MAX_TMDS_CLK;
+ /* Passive HDMI dongles can sometimes fail here without retrying*/
+ while (retry_count > 0) {
+ if (i2c_read(ddc,
+ DP_HDMI_DONGLE_ADDRESS,
+ type2_dongle_buf,
+ sizeof(type2_dongle_buf)))
+ break;
+ retry_count--;
+ }
+ if (retry_count == 0) {
+ *dongle = DISPLAY_DONGLE_DP_DVI_DONGLE;
+ sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_DVI_MAX_TMDS_CLK;
- CONN_DATA_DETECT(ddc->link, type2_dongle_buf, sizeof(type2_dongle_buf),
- "DP-DVI passive dongle %dMhz: ",
- DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000);
- return;
+ CONN_DATA_DETECT(ddc->link, type2_dongle_buf, sizeof(type2_dongle_buf),
+ "DP-DVI passive dongle %dMhz: ",
+ DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000);
+ return;
+ }
}
/* Check if Type 2 dongle.*/
@@ -496,7 +511,7 @@ bool dal_ddc_service_query_ddc_data(
uint8_t *read_buf,
uint32_t read_size)
{
- bool ret;
+ bool ret = false;
uint32_t payload_size =
dal_ddc_service_is_in_aux_transaction_mode(ddc) ?
DEFAULT_AUX_MAX_DATA_SIZE : EDID_SEGMENT_SIZE;
@@ -515,34 +530,32 @@ bool dal_ddc_service_query_ddc_data(
/*TODO: len of payload data for i2c and aux is uint8!!!!,
* but we want to read 256 over i2c!!!!*/
if (dal_ddc_service_is_in_aux_transaction_mode(ddc)) {
- struct aux_payload write_payload = {
- .i2c_over_aux = true,
- .write = true,
- .mot = true,
- .address = address,
- .length = write_size,
- .data = write_buf,
- .reply = NULL,
- .defer_delay = get_defer_delay(ddc),
- };
-
- struct aux_payload read_payload = {
- .i2c_over_aux = true,
- .write = false,
- .mot = false,
- .address = address,
- .length = read_size,
- .data = read_buf,
- .reply = NULL,
- .defer_delay = get_defer_delay(ddc),
- };
-
- ret = dc_link_aux_transfer_with_retries(ddc, &write_payload);
+ struct aux_payload payload;
+ bool read_available = true;
+
+ payload.i2c_over_aux = true;
+ payload.address = address;
+ payload.reply = NULL;
+ payload.defer_delay = get_defer_delay(ddc);
+
+ if (write_size != 0) {
+ payload.write = true;
+ payload.mot = false;
+ payload.length = write_size;
+ payload.data = write_buf;
+
+ ret = dal_ddc_submit_aux_command(ddc, &payload);
+ read_available = ret;
+ }
- if (!ret)
- return false;
+ if (read_size != 0 && read_available) {
+ payload.write = false;
+ payload.mot = false;
+ payload.length = read_size;
+ payload.data = read_buf;
- ret = dc_link_aux_transfer_with_retries(ddc, &read_payload);
+ ret = dal_ddc_submit_aux_command(ddc, &payload);
+ }
} else {
struct i2c_payloads *payloads =
dal_ddc_i2c_payloads_create(ddc->ctx, payloads_num);
@@ -573,6 +586,41 @@ bool dal_ddc_service_query_ddc_data(
return ret;
}
+bool dal_ddc_submit_aux_command(struct ddc_service *ddc,
+ struct aux_payload *payload)
+{
+ uint32_t retrieved = 0;
+ bool ret = false;
+
+ if (!ddc)
+ return false;
+
+ if (!payload)
+ return false;
+
+ do {
+ struct aux_payload current_payload;
+ bool is_end_of_payload = (retrieved + DEFAULT_AUX_MAX_DATA_SIZE) >
+ payload->length ? true : false;
+
+ current_payload.address = payload->address;
+ current_payload.data = &payload->data[retrieved];
+ current_payload.defer_delay = payload->defer_delay;
+ current_payload.i2c_over_aux = payload->i2c_over_aux;
+ current_payload.length = is_end_of_payload ?
+ payload->length - retrieved : DEFAULT_AUX_MAX_DATA_SIZE;
+ current_payload.mot = !is_end_of_payload;
+ current_payload.reply = payload->reply;
+ current_payload.write = payload->write;
+
+ ret = dc_link_aux_transfer_with_retries(ddc, &current_payload);
+
+ retrieved += current_payload.length;
+ } while (retrieved < payload->length && ret == true);
+
+ return ret;
+}
+
/* dc_link_aux_transfer_raw() - Attempt to transfer
* the given aux payload. This function does not perform
* retries or handle error states. The reply is returned
@@ -601,6 +649,19 @@ bool dc_link_aux_transfer_with_retries(struct ddc_service *ddc,
return dce_aux_transfer_with_retries(ddc, payload);
}
+
+uint32_t dc_link_aux_configure_timeout(struct ddc_service *ddc,
+ uint32_t timeout)
+{
+ uint32_t prev_timeout = 0;
+ struct ddc *ddc_pin = ddc->ddc_pin;
+
+ if (ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]->funcs->configure_timeout)
+ prev_timeout =
+ ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]->funcs->configure_timeout(ddc, timeout);
+ return prev_timeout;
+}
+
/*test only function*/
void dal_ddc_service_set_ddc_pin(
struct ddc_service *ddc_service,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 2c7aaed907b9..cb731c1d30b1 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -4,12 +4,8 @@
#include "dc_link_dp.h"
#include "dm_helpers.h"
#include "opp.h"
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
#include "dsc.h"
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#include "resource.h"
-#endif
#include "inc/core_types.h"
#include "link_hwss.h"
@@ -21,6 +17,9 @@
#define DC_LOGGER \
link->ctx->logger
+
+#define DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE 0x50
+
/* maximum pre emphasis level allowed for each voltage swing level*/
static const enum dc_pre_emphasis voltage_swing_to_pre_emphasis[] = {
PRE_EMPHASIS_LEVEL3,
@@ -49,7 +48,7 @@ static struct dc_link_settings get_common_supported_link_settings(
struct dc_link_settings link_setting_a,
struct dc_link_settings link_setting_b);
-static void wait_for_training_aux_rd_interval(
+static uint32_t get_training_aux_rd_interval(
struct dc_link *link,
uint32_t default_wait_in_micro_secs)
{
@@ -68,15 +67,21 @@ static void wait_for_training_aux_rd_interval(
sizeof(training_rd_interval));
if (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL)
- default_wait_in_micro_secs =
- training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000;
+ default_wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000;
}
- udelay(default_wait_in_micro_secs);
+ return default_wait_in_micro_secs;
+}
+
+static void wait_for_training_aux_rd_interval(
+ struct dc_link *link,
+ uint32_t wait_in_micro_secs)
+{
+ udelay(wait_in_micro_secs);
DC_LOG_HW_LINK_TRAINING("%s:\n wait = %d\n",
__func__,
- default_wait_in_micro_secs);
+ wait_in_micro_secs);
}
static void dpcd_set_training_pattern(
@@ -95,27 +100,27 @@ static void dpcd_set_training_pattern(
dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
}
-static enum hw_dp_training_pattern get_supported_tp(struct dc_link *link)
+static enum dc_dp_training_pattern get_supported_tp(struct dc_link *link)
{
- enum hw_dp_training_pattern highest_tp = HW_DP_TRAINING_PATTERN_2;
+ enum dc_dp_training_pattern highest_tp = DP_TRAINING_PATTERN_SEQUENCE_2;
struct encoder_feature_support *features = &link->link_enc->features;
struct dpcd_caps *dpcd_caps = &link->dpcd_caps;
if (features->flags.bits.IS_TPS3_CAPABLE)
- highest_tp = HW_DP_TRAINING_PATTERN_3;
+ highest_tp = DP_TRAINING_PATTERN_SEQUENCE_3;
if (features->flags.bits.IS_TPS4_CAPABLE)
- highest_tp = HW_DP_TRAINING_PATTERN_4;
+ highest_tp = DP_TRAINING_PATTERN_SEQUENCE_4;
if (dpcd_caps->max_down_spread.bits.TPS4_SUPPORTED &&
- highest_tp >= HW_DP_TRAINING_PATTERN_4)
- return HW_DP_TRAINING_PATTERN_4;
+ highest_tp >= DP_TRAINING_PATTERN_SEQUENCE_4)
+ return DP_TRAINING_PATTERN_SEQUENCE_4;
if (dpcd_caps->max_ln_count.bits.TPS3_SUPPORTED &&
- highest_tp >= HW_DP_TRAINING_PATTERN_3)
- return HW_DP_TRAINING_PATTERN_3;
+ highest_tp >= DP_TRAINING_PATTERN_SEQUENCE_3)
+ return DP_TRAINING_PATTERN_SEQUENCE_3;
- return HW_DP_TRAINING_PATTERN_2;
+ return DP_TRAINING_PATTERN_SEQUENCE_2;
}
static void dpcd_set_link_settings(
@@ -126,7 +131,7 @@ static void dpcd_set_link_settings(
union down_spread_ctrl downspread = { {0} };
union lane_count_set lane_count_set = { {0} };
- enum hw_dp_training_pattern hw_tr_pattern;
+ enum dc_dp_training_pattern dp_tr_pattern;
downspread.raw = (uint8_t)
(lt_settings->link_settings.link_spread);
@@ -134,21 +139,21 @@ static void dpcd_set_link_settings(
lane_count_set.bits.LANE_COUNT_SET =
lt_settings->link_settings.lane_count;
- lane_count_set.bits.ENHANCED_FRAMING = 1;
-
+ lane_count_set.bits.ENHANCED_FRAMING = lt_settings->enhanced_framing;
lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0;
- hw_tr_pattern = get_supported_tp(link);
- if (hw_tr_pattern != HW_DP_TRAINING_PATTERN_4) {
+ dp_tr_pattern = get_supported_tp(link);
+
+ if (dp_tr_pattern != DP_TRAINING_PATTERN_SEQUENCE_4) {
lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED =
link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED;
}
core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL,
- &downspread.raw, sizeof(downspread));
+ &downspread.raw, sizeof(downspread));
core_link_write_dpcd(link, DP_LANE_COUNT_SET,
- &lane_count_set.raw, 1);
+ &lane_count_set.raw, 1);
if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14 &&
lt_settings->link_settings.use_link_rate_set == true) {
@@ -162,46 +167,47 @@ static void dpcd_set_link_settings(
}
if (rate) {
- DC_LOG_HW_LINK_TRAINING("%s\n %x rate = %x\n %x lane = %x\n %x spread = %x\n",
+ DC_LOG_HW_LINK_TRAINING("%s\n %x rate = %x\n %x lane = %x framing = %x\n %x spread = %x\n",
__func__,
DP_LINK_BW_SET,
lt_settings->link_settings.link_rate,
DP_LANE_COUNT_SET,
lt_settings->link_settings.lane_count,
+ lt_settings->enhanced_framing,
DP_DOWNSPREAD_CTRL,
lt_settings->link_settings.link_spread);
} else {
- DC_LOG_HW_LINK_TRAINING("%s\n %x rate set = %x\n %x lane = %x\n %x spread = %x\n",
+ DC_LOG_HW_LINK_TRAINING("%s\n %x rate set = %x\n %x lane = %x framing = %x\n %x spread = %x\n",
__func__,
DP_LINK_RATE_SET,
lt_settings->link_settings.link_rate_set,
DP_LANE_COUNT_SET,
lt_settings->link_settings.lane_count,
+ lt_settings->enhanced_framing,
DP_DOWNSPREAD_CTRL,
lt_settings->link_settings.link_spread);
}
-
}
static enum dpcd_training_patterns
- hw_training_pattern_to_dpcd_training_pattern(
+ dc_dp_training_pattern_to_dpcd_training_pattern(
struct dc_link *link,
- enum hw_dp_training_pattern pattern)
+ enum dc_dp_training_pattern pattern)
{
enum dpcd_training_patterns dpcd_tr_pattern =
DPCD_TRAINING_PATTERN_VIDEOIDLE;
switch (pattern) {
- case HW_DP_TRAINING_PATTERN_1:
+ case DP_TRAINING_PATTERN_SEQUENCE_1:
dpcd_tr_pattern = DPCD_TRAINING_PATTERN_1;
break;
- case HW_DP_TRAINING_PATTERN_2:
+ case DP_TRAINING_PATTERN_SEQUENCE_2:
dpcd_tr_pattern = DPCD_TRAINING_PATTERN_2;
break;
- case HW_DP_TRAINING_PATTERN_3:
+ case DP_TRAINING_PATTERN_SEQUENCE_3:
dpcd_tr_pattern = DPCD_TRAINING_PATTERN_3;
break;
- case HW_DP_TRAINING_PATTERN_4:
+ case DP_TRAINING_PATTERN_SEQUENCE_4:
dpcd_tr_pattern = DPCD_TRAINING_PATTERN_4;
break;
default:
@@ -212,37 +218,55 @@ static enum dpcd_training_patterns
}
return dpcd_tr_pattern;
+}
+static inline bool is_repeater(struct dc_link *link, uint32_t offset)
+{
+ return (!link->is_lttpr_mode_transparent && offset != 0);
}
static void dpcd_set_lt_pattern_and_lane_settings(
struct dc_link *link,
const struct link_training_settings *lt_settings,
- enum hw_dp_training_pattern pattern)
+ enum dc_dp_training_pattern pattern,
+ uint32_t offset)
{
union dpcd_training_lane dpcd_lane[LANE_COUNT_DP_MAX] = { { {0} } };
- const uint32_t dpcd_base_lt_offset =
- DP_TRAINING_PATTERN_SET;
+
+ uint32_t dpcd_base_lt_offset;
+
uint8_t dpcd_lt_buffer[5] = {0};
union dpcd_training_pattern dpcd_pattern = { {0} };
uint32_t lane;
uint32_t size_in_bytes;
bool edp_workaround = false; /* TODO link_prop.INTERNAL */
+ dpcd_base_lt_offset = DP_TRAINING_PATTERN_SET;
+
+ if (is_repeater(link, offset))
+ dpcd_base_lt_offset = DP_TRAINING_PATTERN_SET_PHY_REPEATER1 +
+ ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
/*****************************************************************
* DpcdAddress_TrainingPatternSet
*****************************************************************/
dpcd_pattern.v1_4.TRAINING_PATTERN_SET =
- hw_training_pattern_to_dpcd_training_pattern(link, pattern);
+ dc_dp_training_pattern_to_dpcd_training_pattern(link, pattern);
- dpcd_lt_buffer[DP_TRAINING_PATTERN_SET - dpcd_base_lt_offset]
+ dpcd_lt_buffer[DP_TRAINING_PATTERN_SET - DP_TRAINING_PATTERN_SET]
= dpcd_pattern.raw;
- DC_LOG_HW_LINK_TRAINING("%s\n %x pattern = %x\n",
- __func__,
- DP_TRAINING_PATTERN_SET,
- dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
-
+ if (is_repeater(link, offset)) {
+ DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n 0x%X pattern = %x\n",
+ __func__,
+ offset,
+ dpcd_base_lt_offset,
+ dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
+ } else {
+ DC_LOG_HW_LINK_TRAINING("%s\n 0x%X pattern = %x\n",
+ __func__,
+ dpcd_base_lt_offset,
+ dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
+ }
/*****************************************************************
* DpcdAddress_Lane0Set -> DpcdAddress_Lane3Set
*****************************************************************/
@@ -262,24 +286,35 @@ static void dpcd_set_lt_pattern_and_lane_settings(
PRE_EMPHASIS_MAX_LEVEL ? 1 : 0);
}
- /* concatinate everything into one buffer*/
+ /* concatenate everything into one buffer*/
size_in_bytes = lt_settings->link_settings.lane_count * sizeof(dpcd_lane[0]);
// 0x00103 - 0x00102
memmove(
- &dpcd_lt_buffer[DP_TRAINING_LANE0_SET - dpcd_base_lt_offset],
+ &dpcd_lt_buffer[DP_TRAINING_LANE0_SET - DP_TRAINING_PATTERN_SET],
dpcd_lane,
size_in_bytes);
- DC_LOG_HW_LINK_TRAINING("%s:\n %x VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
- __func__,
- DP_TRAINING_LANE0_SET,
- dpcd_lane[0].bits.VOLTAGE_SWING_SET,
- dpcd_lane[0].bits.PRE_EMPHASIS_SET,
- dpcd_lane[0].bits.MAX_SWING_REACHED,
- dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED);
-
+ if (is_repeater(link, offset)) {
+ DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n"
+ " 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
+ __func__,
+ offset,
+ dpcd_base_lt_offset,
+ dpcd_lane[0].bits.VOLTAGE_SWING_SET,
+ dpcd_lane[0].bits.PRE_EMPHASIS_SET,
+ dpcd_lane[0].bits.MAX_SWING_REACHED,
+ dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED);
+ } else {
+ DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
+ __func__,
+ dpcd_base_lt_offset,
+ dpcd_lane[0].bits.VOLTAGE_SWING_SET,
+ dpcd_lane[0].bits.PRE_EMPHASIS_SET,
+ dpcd_lane[0].bits.MAX_SWING_REACHED,
+ dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED);
+ }
if (edp_workaround) {
/* for eDP write in 2 parts because the 5-byte burst is
* causing issues on some eDP panels (EPR#366724)
@@ -346,12 +381,20 @@ static void update_drive_settings(
{
uint32_t lane;
for (lane = 0; lane < src.link_settings.lane_count; lane++) {
- dest->lane_settings[lane].VOLTAGE_SWING =
- src.lane_settings[lane].VOLTAGE_SWING;
- dest->lane_settings[lane].PRE_EMPHASIS =
- src.lane_settings[lane].PRE_EMPHASIS;
- dest->lane_settings[lane].POST_CURSOR2 =
- src.lane_settings[lane].POST_CURSOR2;
+ if (dest->voltage_swing == NULL)
+ dest->lane_settings[lane].VOLTAGE_SWING = src.lane_settings[lane].VOLTAGE_SWING;
+ else
+ dest->lane_settings[lane].VOLTAGE_SWING = *dest->voltage_swing;
+
+ if (dest->pre_emphasis == NULL)
+ dest->lane_settings[lane].PRE_EMPHASIS = src.lane_settings[lane].PRE_EMPHASIS;
+ else
+ dest->lane_settings[lane].PRE_EMPHASIS = *dest->pre_emphasis;
+
+ if (dest->post_cursor2 == NULL)
+ dest->lane_settings[lane].POST_CURSOR2 = src.lane_settings[lane].POST_CURSOR2;
+ else
+ dest->lane_settings[lane].POST_CURSOR2 = *dest->post_cursor2;
}
}
@@ -481,8 +524,12 @@ static void get_lane_status_and_drive_settings(
const struct link_training_settings *link_training_setting,
union lane_status *ln_status,
union lane_align_status_updated *ln_status_updated,
- struct link_training_settings *req_settings)
+ struct link_training_settings *req_settings,
+ uint32_t offset)
{
+ unsigned int lane01_status_address = DP_LANE0_1_STATUS;
+ uint8_t lane_adjust_offset = 4;
+ unsigned int lane01_adjust_address;
uint8_t dpcd_buf[6] = {0};
union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } };
struct link_training_settings request_settings = { {0} };
@@ -490,9 +537,16 @@ static void get_lane_status_and_drive_settings(
memset(req_settings, '\0', sizeof(struct link_training_settings));
+ if (is_repeater(link, offset)) {
+ lane01_status_address =
+ DP_LANE0_1_STATUS_PHY_REPEATER1 +
+ ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
+ lane_adjust_offset = 3;
+ }
+
core_link_read_dpcd(
link,
- DP_LANE0_1_STATUS,
+ lane01_status_address,
(uint8_t *)(dpcd_buf),
sizeof(dpcd_buf));
@@ -503,22 +557,47 @@ static void get_lane_status_and_drive_settings(
ln_status[lane].raw =
get_nibble_at_index(&dpcd_buf[0], lane);
dpcd_lane_adjust[lane].raw =
- get_nibble_at_index(&dpcd_buf[4], lane);
+ get_nibble_at_index(&dpcd_buf[lane_adjust_offset], lane);
}
ln_status_updated->raw = dpcd_buf[2];
- DC_LOG_HW_LINK_TRAINING("%s:\n%x Lane01Status = %x\n %x Lane23Status = %x\n ",
- __func__,
- DP_LANE0_1_STATUS, dpcd_buf[0],
- DP_LANE2_3_STATUS, dpcd_buf[1]);
-
- DC_LOG_HW_LINK_TRAINING("%s:\n %x Lane01AdjustRequest = %x\n %x Lane23AdjustRequest = %x\n",
- __func__,
- DP_ADJUST_REQUEST_LANE0_1,
- dpcd_buf[4],
- DP_ADJUST_REQUEST_LANE2_3,
- dpcd_buf[5]);
+ if (is_repeater(link, offset)) {
+ DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n"
+ " 0x%X Lane01Status = %x\n 0x%X Lane23Status = %x\n ",
+ __func__,
+ offset,
+ lane01_status_address, dpcd_buf[0],
+ lane01_status_address + 1, dpcd_buf[1]);
+ } else {
+ DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X Lane01Status = %x\n 0x%X Lane23Status = %x\n ",
+ __func__,
+ lane01_status_address, dpcd_buf[0],
+ lane01_status_address + 1, dpcd_buf[1]);
+ }
+ lane01_adjust_address = DP_ADJUST_REQUEST_LANE0_1;
+
+ if (is_repeater(link, offset))
+ lane01_adjust_address = DP_ADJUST_REQUEST_LANE0_1_PHY_REPEATER1 +
+ ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
+
+ if (is_repeater(link, offset)) {
+ DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n"
+ " 0x%X Lane01AdjustRequest = %x\n 0x%X Lane23AdjustRequest = %x\n",
+ __func__,
+ offset,
+ lane01_adjust_address,
+ dpcd_buf[lane_adjust_offset],
+ lane01_adjust_address + 1,
+ dpcd_buf[lane_adjust_offset + 1]);
+ } else {
+ DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X Lane01AdjustRequest = %x\n 0x%X Lane23AdjustRequest = %x\n",
+ __func__,
+ lane01_adjust_address,
+ dpcd_buf[lane_adjust_offset],
+ lane01_adjust_address + 1,
+ dpcd_buf[lane_adjust_offset + 1]);
+ }
/*copy to req_settings*/
request_settings.link_settings.lane_count =
@@ -557,10 +636,18 @@ static void get_lane_status_and_drive_settings(
static void dpcd_set_lane_settings(
struct dc_link *link,
- const struct link_training_settings *link_training_setting)
+ const struct link_training_settings *link_training_setting,
+ uint32_t offset)
{
union dpcd_training_lane dpcd_lane[LANE_COUNT_DP_MAX] = {{{0}}};
uint32_t lane;
+ unsigned int lane0_set_address;
+
+ lane0_set_address = DP_TRAINING_LANE0_SET;
+
+ if (is_repeater(link, offset))
+ lane0_set_address = DP_TRAINING_LANE0_SET_PHY_REPEATER1 +
+ ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
for (lane = 0; lane <
(uint32_t)(link_training_setting->
@@ -583,7 +670,7 @@ static void dpcd_set_lane_settings(
}
core_link_write_dpcd(link,
- DP_TRAINING_LANE0_SET,
+ lane0_set_address,
(uint8_t *)(dpcd_lane),
link_training_setting->link_settings.lane_count);
@@ -606,14 +693,26 @@ static void dpcd_set_lane_settings(
}
*/
- DC_LOG_HW_LINK_TRAINING("%s\n %x VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
- __func__,
- DP_TRAINING_LANE0_SET,
- dpcd_lane[0].bits.VOLTAGE_SWING_SET,
- dpcd_lane[0].bits.PRE_EMPHASIS_SET,
- dpcd_lane[0].bits.MAX_SWING_REACHED,
- dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED);
+ if (is_repeater(link, offset)) {
+ DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n"
+ " 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
+ __func__,
+ offset,
+ lane0_set_address,
+ dpcd_lane[0].bits.VOLTAGE_SWING_SET,
+ dpcd_lane[0].bits.PRE_EMPHASIS_SET,
+ dpcd_lane[0].bits.MAX_SWING_REACHED,
+ dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED);
+ } else {
+ DC_LOG_HW_LINK_TRAINING("%s\n 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
+ __func__,
+ lane0_set_address,
+ dpcd_lane[0].bits.VOLTAGE_SWING_SET,
+ dpcd_lane[0].bits.PRE_EMPHASIS_SET,
+ dpcd_lane[0].bits.MAX_SWING_REACHED,
+ dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED);
+ }
link->cur_lane_setting = link_training_setting->lane_settings[0];
}
@@ -633,17 +732,6 @@ static bool is_max_vs_reached(
}
-void dc_link_dp_set_drive_settings(
- struct dc_link *link,
- struct link_training_settings *lt_settings)
-{
- /* program ASIC PHY settings*/
- dp_set_hw_lane_settings(link, lt_settings);
-
- /* Notify DP sink the PHY settings from source */
- dpcd_set_lane_settings(link, lt_settings);
-}
-
static bool perform_post_lt_adj_req_sequence(
struct dc_link *link,
struct link_training_settings *lt_settings)
@@ -676,7 +764,8 @@ static bool perform_post_lt_adj_req_sequence(
lt_settings,
dpcd_lane_status,
&dpcd_lane_status_updated,
- &req_settings);
+ &req_settings,
+ DPRX);
if (dpcd_lane_status_updated.bits.
POST_LT_ADJ_REQ_IN_PROGRESS == 0)
@@ -733,6 +822,31 @@ static bool perform_post_lt_adj_req_sequence(
}
+/* Only used for channel equalization */
+static uint32_t translate_training_aux_read_interval(uint32_t dpcd_aux_read_interval)
+{
+ unsigned int aux_rd_interval_us = 400;
+
+ switch (dpcd_aux_read_interval) {
+ case 0x01:
+ aux_rd_interval_us = 400;
+ break;
+ case 0x02:
+ aux_rd_interval_us = 4000;
+ break;
+ case 0x03:
+ aux_rd_interval_us = 8000;
+ break;
+ case 0x04:
+ aux_rd_interval_us = 16000;
+ break;
+ default:
+ break;
+ }
+
+ return aux_rd_interval_us;
+}
+
static enum link_training_result get_cr_failure(enum dc_lane_count ln_count,
union lane_status *dpcd_lane_status)
{
@@ -751,37 +865,55 @@ static enum link_training_result get_cr_failure(enum dc_lane_count ln_count,
static enum link_training_result perform_channel_equalization_sequence(
struct dc_link *link,
- struct link_training_settings *lt_settings)
+ struct link_training_settings *lt_settings,
+ uint32_t offset)
{
struct link_training_settings req_settings;
- enum hw_dp_training_pattern hw_tr_pattern;
+ enum dc_dp_training_pattern tr_pattern;
uint32_t retries_ch_eq;
+ uint32_t wait_time_microsec;
enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
union lane_align_status_updated dpcd_lane_status_updated = { {0} };
union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = { { {0} } };
- hw_tr_pattern = get_supported_tp(link);
+ /* Note: also check that TPS4 is a supported feature*/
- dp_set_hw_training_pattern(link, hw_tr_pattern);
+ tr_pattern = lt_settings->pattern_for_eq;
+
+ if (is_repeater(link, offset))
+ tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_4;
+
+ dp_set_hw_training_pattern(link, tr_pattern, offset);
for (retries_ch_eq = 0; retries_ch_eq <= LINK_TRAINING_MAX_RETRY_COUNT;
retries_ch_eq++) {
- dp_set_hw_lane_settings(link, lt_settings);
+ dp_set_hw_lane_settings(link, lt_settings, offset);
/* 2. update DPCD*/
if (!retries_ch_eq)
/* EPR #361076 - write as a 5-byte burst,
- * but only for the 1-st iteration*/
+ * but only for the 1-st iteration
+ */
+
dpcd_set_lt_pattern_and_lane_settings(
link,
lt_settings,
- hw_tr_pattern);
+ tr_pattern, offset);
else
- dpcd_set_lane_settings(link, lt_settings);
+ dpcd_set_lane_settings(link, lt_settings, offset);
/* 3. wait for receiver to lock-on*/
- wait_for_training_aux_rd_interval(link, 400);
+ wait_time_microsec = lt_settings->eq_pattern_time;
+
+ if (is_repeater(link, offset))
+ wait_time_microsec =
+ translate_training_aux_read_interval(
+ link->dpcd_caps.lttpr_caps.aux_rd_interval[offset - 1]);
+
+ wait_for_training_aux_rd_interval(
+ link,
+ wait_time_microsec);
/* 4. Read lane status and requested
* drive settings as set by the sink*/
@@ -791,7 +923,8 @@ static enum link_training_result perform_channel_equalization_sequence(
lt_settings,
dpcd_lane_status,
&dpcd_lane_status_updated,
- &req_settings);
+ &req_settings,
+ offset);
/* 5. check CR done*/
if (!is_cr_done(lane_count, dpcd_lane_status))
@@ -810,34 +943,26 @@ static enum link_training_result perform_channel_equalization_sequence(
return LINK_TRAINING_EQ_FAIL_EQ;
}
+#define TRAINING_AUX_RD_INTERVAL 100 //us
static enum link_training_result perform_clock_recovery_sequence(
struct dc_link *link,
- struct link_training_settings *lt_settings)
+ struct link_training_settings *lt_settings,
+ uint32_t offset)
{
uint32_t retries_cr;
uint32_t retry_count;
- uint32_t lane;
+ uint32_t wait_time_microsec;
struct link_training_settings req_settings;
- enum dc_lane_count lane_count =
- lt_settings->link_settings.lane_count;
- enum hw_dp_training_pattern hw_tr_pattern = HW_DP_TRAINING_PATTERN_1;
+ enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
+ enum dc_dp_training_pattern tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_1;
union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX];
union lane_align_status_updated dpcd_lane_status_updated;
retries_cr = 0;
retry_count = 0;
- /* initial drive setting (VS/PE/PC2)*/
- for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
- lt_settings->lane_settings[lane].VOLTAGE_SWING =
- VOLTAGE_SWING_LEVEL0;
- lt_settings->lane_settings[lane].PRE_EMPHASIS =
- PRE_EMPHASIS_DISABLED;
- lt_settings->lane_settings[lane].POST_CURSOR2 =
- POST_CURSOR2_DISABLED;
- }
- dp_set_hw_training_pattern(link, hw_tr_pattern);
+ dp_set_hw_training_pattern(link, tr_pattern, offset);
/* najeeb - The synaptics MST hub can put the LT in
* infinite loop by switching the VS
@@ -845,7 +970,7 @@ static enum link_training_result perform_clock_recovery_sequence(
/* between level 0 and level 1 continuously, here
* we try for CR lock for LinkTrainingMaxCRRetry count*/
while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) &&
- (retry_count < LINK_TRAINING_MAX_CR_RETRY)) {
+ (retry_count < LINK_TRAINING_MAX_CR_RETRY)) {
memset(&dpcd_lane_status, '\0', sizeof(dpcd_lane_status));
memset(&dpcd_lane_status_updated, '\0',
@@ -854,25 +979,33 @@ static enum link_training_result perform_clock_recovery_sequence(
/* 1. call HWSS to set lane settings*/
dp_set_hw_lane_settings(
link,
- lt_settings);
+ lt_settings,
+ offset);
/* 2. update DPCD of the receiver*/
- if (!retries_cr)
+ if (!retry_count)
/* EPR #361076 - write as a 5-byte burst,
* but only for the 1-st iteration.*/
dpcd_set_lt_pattern_and_lane_settings(
link,
lt_settings,
- hw_tr_pattern);
+ tr_pattern,
+ offset);
else
dpcd_set_lane_settings(
link,
- lt_settings);
+ lt_settings,
+ offset);
/* 3. wait receiver to lock-on*/
+ wait_time_microsec = lt_settings->cr_pattern_time;
+
+ if (!link->is_lttpr_mode_transparent)
+ wait_time_microsec = TRAINING_AUX_RD_INTERVAL;
+
wait_for_training_aux_rd_interval(
link,
- 100);
+ wait_time_microsec);
/* 4. Read lane status and requested drive
* settings as set by the sink
@@ -882,7 +1015,8 @@ static enum link_training_result perform_clock_recovery_sequence(
lt_settings,
dpcd_lane_status,
&dpcd_lane_status_updated,
- &req_settings);
+ &req_settings,
+ offset);
/* 5. check CR done*/
if (is_cr_done(lane_count, dpcd_lane_status))
@@ -939,7 +1073,7 @@ static inline enum link_training_result perform_link_training_int(
* TPS4 must be used instead of POST_LT_ADJ_REQ.
*/
if (link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED != 1 ||
- get_supported_tp(link) == HW_DP_TRAINING_PATTERN_4)
+ get_supported_tp(link) == DP_TRAINING_PATTERN_SEQUENCE_4)
return status;
if (status == LINK_TRAINING_SUCCESS &&
@@ -947,7 +1081,7 @@ static inline enum link_training_result perform_link_training_int(
status = LINK_TRAINING_LQA_FAIL;
lane_count_set.bits.LANE_COUNT_SET = lt_settings->link_settings.lane_count;
- lane_count_set.bits.ENHANCED_FRAMING = 1;
+ lane_count_set.bits.ENHANCED_FRAMING = lt_settings->enhanced_framing;
lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0;
core_link_write_dpcd(
@@ -959,24 +1093,29 @@ static inline enum link_training_result perform_link_training_int(
return status;
}
-enum link_training_result dc_link_dp_perform_link_training(
- struct dc_link *link,
+static void initialize_training_settings(
+ struct dc_link *link,
const struct dc_link_settings *link_setting,
- bool skip_video_pattern)
+ const struct dc_link_training_overrides *overrides,
+ struct link_training_settings *lt_settings)
{
- enum link_training_result status = LINK_TRAINING_SUCCESS;
+ uint32_t lane;
- char *link_rate = "Unknown";
- char *lt_result = "Unknown";
+ memset(lt_settings, '\0', sizeof(struct link_training_settings));
- struct link_training_settings lt_settings;
+ /* Initialize link settings */
+ lt_settings->link_settings.use_link_rate_set = link_setting->use_link_rate_set;
+ lt_settings->link_settings.link_rate_set = link_setting->link_rate_set;
- memset(&lt_settings, '\0', sizeof(lt_settings));
+ if (link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN)
+ lt_settings->link_settings.link_rate = link->preferred_link_setting.link_rate;
+ else
+ lt_settings->link_settings.link_rate = link_setting->link_rate;
- lt_settings.link_settings.link_rate = link_setting->link_rate;
- lt_settings.link_settings.lane_count = link_setting->lane_count;
- lt_settings.link_settings.use_link_rate_set = link_setting->use_link_rate_set;
- lt_settings.link_settings.link_rate_set = link_setting->link_rate_set;
+ if (link->preferred_link_setting.lane_count != LANE_COUNT_UNKNOWN)
+ lt_settings->link_settings.lane_count = link->preferred_link_setting.lane_count;
+ else
+ lt_settings->link_settings.lane_count = link_setting->lane_count;
/*@todo[vdevulap] move SS to LS, should not be handled by displaypath*/
@@ -987,31 +1126,171 @@ enum link_training_result dc_link_dp_perform_link_training(
* LINK_SPREAD_05_DOWNSPREAD_30KHZ :
* LINK_SPREAD_DISABLED;
*/
+ /* Initialize link spread */
if (link->dp_ss_off)
- lt_settings.link_settings.link_spread = LINK_SPREAD_DISABLED;
+ lt_settings->link_settings.link_spread = LINK_SPREAD_DISABLED;
+ else if (overrides->downspread != NULL)
+ lt_settings->link_settings.link_spread
+ = *overrides->downspread
+ ? LINK_SPREAD_05_DOWNSPREAD_30KHZ
+ : LINK_SPREAD_DISABLED;
else
- lt_settings.link_settings.link_spread = LINK_SPREAD_05_DOWNSPREAD_30KHZ;
+ lt_settings->link_settings.link_spread = LINK_SPREAD_05_DOWNSPREAD_30KHZ;
- /* 1. set link rate, lane count and spread*/
- dpcd_set_link_settings(link, &lt_settings);
+ /* Initialize lane settings overrides */
+ if (overrides->voltage_swing != NULL)
+ lt_settings->voltage_swing = overrides->voltage_swing;
- /* 2. perform link training (set link training done
- * to false is done as well)*/
- status = perform_clock_recovery_sequence(link, &lt_settings);
- if (status == LINK_TRAINING_SUCCESS) {
- status = perform_channel_equalization_sequence(link,
- &lt_settings);
+ if (overrides->pre_emphasis != NULL)
+ lt_settings->pre_emphasis = overrides->pre_emphasis;
+
+ if (overrides->post_cursor2 != NULL)
+ lt_settings->post_cursor2 = overrides->post_cursor2;
+
+ /* Initialize lane settings (VS/PE/PC2) */
+ for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
+ lt_settings->lane_settings[lane].VOLTAGE_SWING =
+ lt_settings->voltage_swing != NULL ?
+ *lt_settings->voltage_swing :
+ VOLTAGE_SWING_LEVEL0;
+ lt_settings->lane_settings[lane].PRE_EMPHASIS =
+ lt_settings->pre_emphasis != NULL ?
+ *lt_settings->pre_emphasis
+ : PRE_EMPHASIS_DISABLED;
+ lt_settings->lane_settings[lane].POST_CURSOR2 =
+ lt_settings->post_cursor2 != NULL ?
+ *lt_settings->post_cursor2
+ : POST_CURSOR2_DISABLED;
}
- if ((status == LINK_TRAINING_SUCCESS) || !skip_video_pattern) {
- status = perform_link_training_int(link,
- &lt_settings,
- status);
+ /* Initialize training timings */
+ if (overrides->cr_pattern_time != NULL)
+ lt_settings->cr_pattern_time = *overrides->cr_pattern_time;
+ else
+ lt_settings->cr_pattern_time = get_training_aux_rd_interval(link, 100);
+
+ if (overrides->eq_pattern_time != NULL)
+ lt_settings->eq_pattern_time = *overrides->eq_pattern_time;
+ else
+ lt_settings->eq_pattern_time = get_training_aux_rd_interval(link, 400);
+
+ if (overrides->pattern_for_eq != NULL)
+ lt_settings->pattern_for_eq = *overrides->pattern_for_eq;
+ else
+ lt_settings->pattern_for_eq = get_supported_tp(link);
+
+ if (overrides->enhanced_framing != NULL)
+ lt_settings->enhanced_framing = *overrides->enhanced_framing;
+ else
+ lt_settings->enhanced_framing = 1;
+}
+
+static uint8_t convert_to_count(uint8_t lttpr_repeater_count)
+{
+ switch (lttpr_repeater_count) {
+ case 0x80: // 1 lttpr repeater
+ return 1;
+ case 0x40: // 2 lttpr repeaters
+ return 2;
+ case 0x20: // 3 lttpr repeaters
+ return 3;
+ case 0x10: // 4 lttpr repeaters
+ return 4;
+ case 0x08: // 5 lttpr repeaters
+ return 5;
+ case 0x04: // 6 lttpr repeaters
+ return 6;
+ case 0x02: // 7 lttpr repeaters
+ return 7;
+ case 0x01: // 8 lttpr repeaters
+ return 8;
+ default:
+ break;
}
+ return 0; // invalid value
+}
- /* 6. print status message*/
- switch (lt_settings.link_settings.link_rate) {
+static void configure_lttpr_mode(struct dc_link *link)
+{
+ /* aux timeout is already set to extended */
+ /* RESET/SET lttpr mode to enable non transparent mode */
+ uint8_t repeater_cnt;
+ uint32_t aux_interval_address;
+ uint8_t repeater_id;
+ enum dc_status result = DC_ERROR_UNEXPECTED;
+ uint8_t repeater_mode = DP_PHY_REPEATER_MODE_TRANSPARENT;
+
+ DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Transparent Mode\n", __func__);
+ result = core_link_write_dpcd(link,
+ DP_PHY_REPEATER_MODE,
+ (uint8_t *)&repeater_mode,
+ sizeof(repeater_mode));
+
+ if (result == DC_OK) {
+ link->dpcd_caps.lttpr_caps.mode = repeater_mode;
+ }
+
+ if (!link->is_lttpr_mode_transparent) {
+
+ DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Non Transparent Mode\n", __func__);
+
+ repeater_mode = DP_PHY_REPEATER_MODE_NON_TRANSPARENT;
+ result = core_link_write_dpcd(link,
+ DP_PHY_REPEATER_MODE,
+ (uint8_t *)&repeater_mode,
+ sizeof(repeater_mode));
+
+ if (result == DC_OK) {
+ link->dpcd_caps.lttpr_caps.mode = repeater_mode;
+ }
+
+ repeater_cnt = convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+ for (repeater_id = repeater_cnt; repeater_id > 0; repeater_id--) {
+ aux_interval_address = DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1 +
+ ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (repeater_id - 1));
+ core_link_read_dpcd(
+ link,
+ aux_interval_address,
+ (uint8_t *)&link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1],
+ sizeof(link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1]));
+ link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1] &= 0x7F;
+ }
+ }
+}
+
+static void repeater_training_done(struct dc_link *link, uint32_t offset)
+{
+ union dpcd_training_pattern dpcd_pattern = { {0} };
+
+ const uint32_t dpcd_base_lt_offset =
+ DP_TRAINING_PATTERN_SET_PHY_REPEATER1 +
+ ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
+ /* Set training not in progress*/
+ dpcd_pattern.v1_4.TRAINING_PATTERN_SET = DPCD_TRAINING_PATTERN_VIDEOIDLE;
+
+ core_link_write_dpcd(
+ link,
+ dpcd_base_lt_offset,
+ &dpcd_pattern.raw,
+ 1);
+
+ DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Id: %d 0x%X pattern = %x\n",
+ __func__,
+ offset,
+ dpcd_base_lt_offset,
+ dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
+}
+
+static void print_status_message(
+ struct dc_link *link,
+ const struct link_training_settings *lt_settings,
+ enum link_training_result status)
+{
+ char *link_rate = "Unknown";
+ char *lt_result = "Unknown";
+ char *lt_spread = "Disabled";
+ switch (lt_settings->link_settings.link_rate) {
case LINK_RATE_LOW:
link_rate = "RBR";
break;
@@ -1057,13 +1336,158 @@ enum link_training_result dc_link_dp_perform_link_training(
break;
}
+ switch (lt_settings->link_settings.link_spread) {
+ case LINK_SPREAD_DISABLED:
+ lt_spread = "Disabled";
+ break;
+ case LINK_SPREAD_05_DOWNSPREAD_30KHZ:
+ lt_spread = "0.5% 30KHz";
+ break;
+ case LINK_SPREAD_05_DOWNSPREAD_33KHZ:
+ lt_spread = "0.5% 33KHz";
+ break;
+ default:
+ break;
+ }
+
/* Connectivity log: link training */
- CONN_MSG_LT(link, "%sx%d %s VS=%d, PE=%d",
- link_rate,
- lt_settings.link_settings.lane_count,
- lt_result,
- lt_settings.lane_settings[0].VOLTAGE_SWING,
- lt_settings.lane_settings[0].PRE_EMPHASIS);
+ CONN_MSG_LT(link, "%sx%d %s VS=%d, PE=%d, DS=%s",
+ link_rate,
+ lt_settings->link_settings.lane_count,
+ lt_result,
+ lt_settings->lane_settings[0].VOLTAGE_SWING,
+ lt_settings->lane_settings[0].PRE_EMPHASIS,
+ lt_spread);
+}
+
+void dc_link_dp_set_drive_settings(
+ struct dc_link *link,
+ struct link_training_settings *lt_settings)
+{
+ /* program ASIC PHY settings*/
+ dp_set_hw_lane_settings(link, lt_settings, DPRX);
+
+ /* Notify DP sink the PHY settings from source */
+ dpcd_set_lane_settings(link, lt_settings, DPRX);
+}
+
+bool dc_link_dp_perform_link_training_skip_aux(
+ struct dc_link *link,
+ const struct dc_link_settings *link_setting)
+{
+ struct link_training_settings lt_settings;
+ enum dc_dp_training_pattern pattern_for_cr = DP_TRAINING_PATTERN_SEQUENCE_1;
+
+ initialize_training_settings(
+ link,
+ link_setting,
+ &link->preferred_training_settings,
+ &lt_settings);
+
+ /* 1. Perform_clock_recovery_sequence. */
+
+ /* transmit training pattern for clock recovery */
+ dp_set_hw_training_pattern(link, pattern_for_cr, DPRX);
+
+ /* call HWSS to set lane settings*/
+ dp_set_hw_lane_settings(link, &lt_settings, DPRX);
+
+ /* wait receiver to lock-on*/
+ wait_for_training_aux_rd_interval(link, lt_settings.cr_pattern_time);
+
+ /* 2. Perform_channel_equalization_sequence. */
+
+ /* transmit training pattern for channel equalization. */
+ dp_set_hw_training_pattern(link, lt_settings.pattern_for_eq, DPRX);
+
+ /* call HWSS to set lane settings*/
+ dp_set_hw_lane_settings(link, &lt_settings, DPRX);
+
+ /* wait receiver to lock-on. */
+ wait_for_training_aux_rd_interval(link, lt_settings.eq_pattern_time);
+
+ /* 3. Perform_link_training_int. */
+
+ /* Mainlink output idle pattern. */
+ dp_set_hw_test_pattern(link, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
+
+ print_status_message(link, &lt_settings, LINK_TRAINING_SUCCESS);
+
+ return true;
+}
+
+enum link_training_result dc_link_dp_perform_link_training(
+ struct dc_link *link,
+ const struct dc_link_settings *link_setting,
+ bool skip_video_pattern)
+{
+ enum link_training_result status = LINK_TRAINING_SUCCESS;
+ struct link_training_settings lt_settings;
+
+ bool fec_enable;
+ uint8_t repeater_cnt;
+ uint8_t repeater_id;
+
+ initialize_training_settings(
+ link,
+ link_setting,
+ &link->preferred_training_settings,
+ &lt_settings);
+
+ /* 1. set link rate, lane count and spread. */
+ dpcd_set_link_settings(link, &lt_settings);
+
+ if (link->preferred_training_settings.fec_enable != NULL)
+ fec_enable = *link->preferred_training_settings.fec_enable;
+ else
+ fec_enable = true;
+
+ dp_set_fec_ready(link, fec_enable);
+
+ if (!link->is_lttpr_mode_transparent) {
+ /* Configure lttpr mode */
+ configure_lttpr_mode(link);
+
+ /* 2. perform link training (set link training done
+ * to false is done as well)
+ */
+ repeater_cnt = convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+
+ for (repeater_id = repeater_cnt; (repeater_id > 0 && status == LINK_TRAINING_SUCCESS);
+ repeater_id--) {
+ status = perform_clock_recovery_sequence(link, &lt_settings, repeater_id);
+
+ if (status != LINK_TRAINING_SUCCESS)
+ break;
+
+ status = perform_channel_equalization_sequence(link,
+ &lt_settings,
+ repeater_id);
+
+ if (status != LINK_TRAINING_SUCCESS)
+ break;
+
+ repeater_training_done(link, repeater_id);
+ }
+ }
+
+ if (status == LINK_TRAINING_SUCCESS) {
+ status = perform_clock_recovery_sequence(link, &lt_settings, DPRX);
+ if (status == LINK_TRAINING_SUCCESS) {
+ status = perform_channel_equalization_sequence(link,
+ &lt_settings,
+ DPRX);
+ }
+ }
+
+ if ((status == LINK_TRAINING_SUCCESS) || !skip_video_pattern) {
+ status = perform_link_training_int(link,
+ &lt_settings,
+ status);
+ }
+
+ /* 6. print status message*/
+ print_status_message(link, &lt_settings, status);
if (status != LINK_TRAINING_SUCCESS)
link->ctx->dc->debug_data.ltFailCount++;
@@ -1071,31 +1495,200 @@ enum link_training_result dc_link_dp_perform_link_training(
return status;
}
-
bool perform_link_training_with_retries(
- struct dc_link *link,
const struct dc_link_settings *link_setting,
bool skip_video_pattern,
- int attempts)
+ int attempts,
+ struct pipe_ctx *pipe_ctx,
+ enum signal_type signal)
{
uint8_t j;
uint8_t delay_between_attempts = LINK_TRAINING_RETRY_DELAY;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->link;
+ enum dp_panel_mode panel_mode = dp_get_panel_mode(link);
for (j = 0; j < attempts; ++j) {
- if (dc_link_dp_perform_link_training(
+ dp_enable_link_phy(
+ link,
+ signal,
+ pipe_ctx->clock_source->id,
+ link_setting);
+
+ if (stream->sink_patches.dppowerup_delay > 0) {
+ int delay_dp_power_up_in_ms = stream->sink_patches.dppowerup_delay;
+
+ msleep(delay_dp_power_up_in_ms);
+ }
+
+ dp_set_panel_mode(link, panel_mode);
+
+ /* We need to do this before the link training to ensure the idle pattern in SST
+ * mode will be sent right after the link training
+ */
+ link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc,
+ pipe_ctx->stream_res.stream_enc->id, true);
+
+ if (link->aux_access_disabled) {
+ dc_link_dp_perform_link_training_skip_aux(link, link_setting);
+ return true;
+ } else if (dc_link_dp_perform_link_training(
link,
link_setting,
skip_video_pattern) == LINK_TRAINING_SUCCESS)
return true;
+ /* latest link training still fail, skip delay and keep PHY on
+ */
+ if (j == (attempts - 1))
+ break;
+
+ dp_disable_link_phy(link, signal);
+
msleep(delay_between_attempts);
+
delay_between_attempts += LINK_TRAINING_RETRY_DELAY;
}
return false;
}
+static enum clock_source_id get_clock_source_id(struct dc_link *link)
+{
+ enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_UNDEFINED;
+ struct clock_source *dp_cs = link->dc->res_pool->dp_clock_source;
+
+ if (dp_cs != NULL) {
+ dp_cs_id = dp_cs->id;
+ } else {
+ /*
+ * dp clock source is not initialized for some reason.
+ * Should not happen, CLOCK_SOURCE_ID_EXTERNAL will be used
+ */
+ ASSERT(dp_cs);
+ }
+
+ return dp_cs_id;
+}
+
+static void set_dp_mst_mode(struct dc_link *link, bool mst_enable)
+{
+ if (mst_enable == false &&
+ link->type == dc_connection_mst_branch) {
+ /* Disable MST on link. Use only local sink. */
+ dp_disable_link_phy_mst(link, link->connector_signal);
+
+ link->type = dc_connection_single;
+ link->local_sink = link->remote_sinks[0];
+ link->local_sink->sink_signal = SIGNAL_TYPE_DISPLAY_PORT;
+ } else if (mst_enable == true &&
+ link->type == dc_connection_single &&
+ link->remote_sinks[0] != NULL) {
+ /* Re-enable MST on link. */
+ dp_disable_link_phy(link, link->connector_signal);
+ dp_enable_mst_on_sink(link, true);
+
+ link->type = dc_connection_mst_branch;
+ link->local_sink->sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
+ }
+}
+
+bool dc_link_dp_sync_lt_begin(struct dc_link *link)
+{
+ /* Begin Sync LT. During this time,
+ * DPCD:600h must not be powered down.
+ */
+ link->sync_lt_in_progress = true;
+
+ /*Clear any existing preferred settings.*/
+ memset(&link->preferred_training_settings, 0,
+ sizeof(struct dc_link_training_overrides));
+ memset(&link->preferred_link_setting, 0,
+ sizeof(struct dc_link_settings));
+
+ return true;
+}
+
+enum link_training_result dc_link_dp_sync_lt_attempt(
+ struct dc_link *link,
+ struct dc_link_settings *link_settings,
+ struct dc_link_training_overrides *lt_overrides)
+{
+ struct link_training_settings lt_settings;
+ enum link_training_result lt_status = LINK_TRAINING_SUCCESS;
+ enum dp_panel_mode panel_mode = DP_PANEL_MODE_DEFAULT;
+ enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_EXTERNAL;
+ bool fec_enable = false;
+
+ initialize_training_settings(
+ link,
+ link_settings,
+ lt_overrides,
+ &lt_settings);
+
+ /* Setup MST Mode */
+ if (lt_overrides->mst_enable)
+ set_dp_mst_mode(link, *lt_overrides->mst_enable);
+
+ /* Disable link */
+ dp_disable_link_phy(link, link->connector_signal);
+
+ /* Enable link */
+ dp_cs_id = get_clock_source_id(link);
+ dp_enable_link_phy(link, link->connector_signal,
+ dp_cs_id, link_settings);
+
+ /* Set FEC enable */
+ fec_enable = lt_overrides->fec_enable && *lt_overrides->fec_enable;
+ dp_set_fec_ready(link, fec_enable);
+
+ if (lt_overrides->alternate_scrambler_reset) {
+ if (*lt_overrides->alternate_scrambler_reset)
+ panel_mode = DP_PANEL_MODE_EDP;
+ else
+ panel_mode = DP_PANEL_MODE_DEFAULT;
+ } else
+ panel_mode = dp_get_panel_mode(link);
+
+ dp_set_panel_mode(link, panel_mode);
+
+ /* Attempt to train with given link training settings */
+
+ /* Set link rate, lane count and spread. */
+ dpcd_set_link_settings(link, &lt_settings);
+
+ /* 2. perform link training (set link training done
+ * to false is done as well)
+ */
+ lt_status = perform_clock_recovery_sequence(link, &lt_settings, DPRX);
+ if (lt_status == LINK_TRAINING_SUCCESS) {
+ lt_status = perform_channel_equalization_sequence(link,
+ &lt_settings,
+ DPRX);
+ }
+
+ /* 3. Sync LT must skip TRAINING_PATTERN_SET:0 (video pattern)*/
+ /* 4. print status message*/
+ print_status_message(link, &lt_settings, lt_status);
+
+ return lt_status;
+}
+
+bool dc_link_dp_sync_lt_end(struct dc_link *link, bool link_down)
+{
+ /* If input parameter is set, shut down phy.
+ * Still shouldn't turn off dp_receiver (DPCD:600h)
+ */
+ if (link_down == true) {
+ dp_disable_link_phy(link, link->connector_signal);
+ dp_set_fec_ready(link, false);
+ }
+
+ link->sync_lt_in_progress = false;
+ return true;
+}
+
static struct dc_link_settings get_max_link_cap(struct dc_link *link)
{
/* Set Default link settings */
@@ -1109,6 +1702,9 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link)
if (link->link_enc->features.flags.bits.IS_HBR3_CAPABLE)
max_link_cap.link_rate = LINK_RATE_HIGH3;
+ if (link->link_enc->funcs->get_max_link_cap)
+ link->link_enc->funcs->get_max_link_cap(link->link_enc, &max_link_cap);
+
/* Lower link settings based on sink's link cap */
if (link->reported_link_cap.lane_count < max_link_cap.lane_count)
max_link_cap.lane_count =
@@ -1120,6 +1716,22 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link)
max_link_cap.link_spread)
max_link_cap.link_spread =
link->reported_link_cap.link_spread;
+ /*
+ * account for lttpr repeaters cap
+ * notes: repeaters do not snoop in the DPRX Capabilities addresses (3.6.3).
+ */
+ if (!link->is_lttpr_mode_transparent) {
+ if (link->dpcd_caps.lttpr_caps.max_lane_count < max_link_cap.lane_count)
+ max_link_cap.lane_count = link->dpcd_caps.lttpr_caps.max_lane_count;
+
+ if (link->dpcd_caps.lttpr_caps.max_link_rate < max_link_cap.link_rate)
+ max_link_cap.link_rate = link->dpcd_caps.lttpr_caps.max_link_rate;
+
+ DC_LOG_HW_LINK_TRAINING("%s\n Training with LTTPR, max_lane count %d max_link rate %d \n",
+ __func__,
+ max_link_cap.lane_count,
+ max_link_cap.link_rate);
+ }
return max_link_cap;
}
@@ -1250,7 +1862,6 @@ bool dp_verify_link_cap(
bool success;
bool skip_link_training;
bool skip_video_pattern;
- struct clock_source *dp_cs;
enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_EXTERNAL;
enum link_training_result status;
union hpd_irq_data irq_data;
@@ -1266,6 +1877,13 @@ bool dp_verify_link_cap(
max_link_cap = get_max_link_cap(link);
+ /* Grant extended timeout request */
+ if (!link->is_lttpr_mode_transparent && link->dpcd_caps.lttpr_caps.max_ext_timeout > 0) {
+ uint8_t grant = link->dpcd_caps.lttpr_caps.max_ext_timeout & 0x80;
+
+ core_link_write_dpcd(link, DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT, &grant, sizeof(grant));
+ }
+
/* TODO implement override and monitor patch later */
/* try to train the link from high to low to
@@ -1274,18 +1892,18 @@ bool dp_verify_link_cap(
/* disable PHY done possible by BIOS, will be done by driver itself */
dp_disable_link_phy(link, link->connector_signal);
- dp_cs = link->dc->res_pool->dp_clock_source;
-
- if (dp_cs)
- dp_cs_id = dp_cs->id;
- else {
- /*
- * dp clock source is not initialized for some reason.
- * Should not happen, CLOCK_SOURCE_ID_EXTERNAL will be used
- */
- ASSERT(dp_cs);
+ /* Temporary Renoir-specific workaround for SWDEV-215184;
+ * PHY will sometimes be in bad state on hotplugging display from certain USB-C dongle,
+ * so add extra cycle of enabling and disabling the PHY before first link training.
+ */
+ if (link->link_enc->features.flags.bits.DP_IS_USB_C &&
+ link->dc->debug.usbc_combo_phy_reset_wa) {
+ dp_enable_link_phy(link, link->connector_signal, dp_cs_id, cur);
+ dp_disable_link_phy(link, link->connector_signal);
}
+ dp_cs_id = get_clock_source_id(link);
+
/* link training starts with the maximum common settings
* supported by both sink and ASIC.
*/
@@ -1354,6 +1972,49 @@ bool dp_verify_link_cap(
return success;
}
+bool dp_verify_link_cap_with_retries(
+ struct dc_link *link,
+ struct dc_link_settings *known_limit_link_setting,
+ int attempts)
+{
+ uint8_t i = 0;
+ bool success = false;
+
+ for (i = 0; i < attempts; i++) {
+ int fail_count = 0;
+ enum dc_connection_type type = dc_connection_none;
+
+ memset(&link->verified_link_cap, 0,
+ sizeof(struct dc_link_settings));
+ if (!dc_link_detect_sink(link, &type) || type == dc_connection_none) {
+ link->verified_link_cap.lane_count = LANE_COUNT_ONE;
+ link->verified_link_cap.link_rate = LINK_RATE_LOW;
+ link->verified_link_cap.link_spread = LINK_SPREAD_DISABLED;
+ break;
+ } else if (dp_verify_link_cap(link,
+ &link->reported_link_cap,
+ &fail_count) && fail_count == 0) {
+ success = true;
+ break;
+ }
+ msleep(10);
+ }
+ return success;
+}
+
+bool dp_verify_mst_link_cap(
+ struct dc_link *link)
+{
+ struct dc_link_settings max_link_cap = {0};
+
+ max_link_cap = get_max_link_cap(link);
+ link->verified_link_cap = get_common_supported_link_settings(
+ link->reported_link_cap,
+ max_link_cap);
+
+ return true;
+}
+
static struct dc_link_settings get_common_supported_link_settings(
struct dc_link_settings link_setting_a,
struct dc_link_settings link_setting_b)
@@ -1741,11 +2402,11 @@ static bool allow_hpd_rx_irq(const struct dc_link *link)
return false;
}
-static bool handle_hpd_irq_psr_sink(const struct dc_link *link)
+static bool handle_hpd_irq_psr_sink(struct dc_link *link)
{
union dpcd_psr_configuration psr_configuration;
- if (!link->psr_enabled)
+ if (!link->psr_feature_enabled)
return false;
dm_helpers_dp_read_dpcd(
@@ -1784,8 +2445,8 @@ static bool handle_hpd_irq_psr_sink(const struct dc_link *link)
sizeof(psr_error_status.raw));
/* PSR error, disable and re-enable PSR */
- dc_link_set_psr_enable(link, false, true);
- dc_link_set_psr_enable(link, true, true);
+ dc_link_set_psr_allow_active(link, false, true);
+ dc_link_set_psr_allow_active(link, true, true);
return true;
} else if (psr_sink_psr_status.bits.SINK_SELF_REFRESH_STATUS ==
@@ -1945,6 +2606,7 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
dc_link_dp_set_test_pattern(
link,
test_pattern,
+ DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED,
&link_training_settings,
test_80_bit_pattern,
(DP_TEST_80BIT_CUSTOM_PATTERN_79_72 -
@@ -1956,6 +2618,8 @@ static void dp_test_send_link_test_pattern(struct dc_link *link)
union link_test_pattern dpcd_test_pattern;
union test_misc dpcd_test_params;
enum dp_test_pattern test_pattern;
+ enum dp_test_pattern_color_space test_pattern_color_space =
+ DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED;
memset(&dpcd_test_pattern, 0, sizeof(dpcd_test_pattern));
memset(&dpcd_test_params, 0, sizeof(dpcd_test_params));
@@ -1990,14 +2654,105 @@ static void dp_test_send_link_test_pattern(struct dc_link *link)
break;
}
+ test_pattern_color_space = dpcd_test_params.bits.YCBCR_COEFS ?
+ DP_TEST_PATTERN_COLOR_SPACE_YCBCR709 :
+ DP_TEST_PATTERN_COLOR_SPACE_YCBCR601;
+
dc_link_dp_set_test_pattern(
link,
test_pattern,
+ test_pattern_color_space,
NULL,
NULL,
0);
}
+static void dp_test_get_audio_test_data(struct dc_link *link, bool disable_video)
+{
+ union audio_test_mode dpcd_test_mode = {0};
+ struct audio_test_pattern_type dpcd_pattern_type = {0};
+ union audio_test_pattern_period dpcd_pattern_period[AUDIO_CHANNELS_COUNT] = {0};
+ enum dp_test_pattern test_pattern = DP_TEST_PATTERN_AUDIO_OPERATOR_DEFINED;
+
+ struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx;
+ struct pipe_ctx *pipe_ctx = &pipes[0];
+ unsigned int channel_count;
+ unsigned int channel = 0;
+ unsigned int modes = 0;
+ unsigned int sampling_rate_in_hz = 0;
+
+ // get audio test mode and test pattern parameters
+ core_link_read_dpcd(
+ link,
+ DP_TEST_AUDIO_MODE,
+ &dpcd_test_mode.raw,
+ sizeof(dpcd_test_mode));
+
+ core_link_read_dpcd(
+ link,
+ DP_TEST_AUDIO_PATTERN_TYPE,
+ &dpcd_pattern_type.value,
+ sizeof(dpcd_pattern_type));
+
+ channel_count = dpcd_test_mode.bits.channel_count + 1;
+
+ // read pattern periods for requested channels when sawTooth pattern is requested
+ if (dpcd_pattern_type.value == AUDIO_TEST_PATTERN_SAWTOOTH ||
+ dpcd_pattern_type.value == AUDIO_TEST_PATTERN_OPERATOR_DEFINED) {
+
+ test_pattern = (dpcd_pattern_type.value == AUDIO_TEST_PATTERN_SAWTOOTH) ?
+ DP_TEST_PATTERN_AUDIO_SAWTOOTH : DP_TEST_PATTERN_AUDIO_OPERATOR_DEFINED;
+ // read period for each channel
+ for (channel = 0; channel < channel_count; channel++) {
+ core_link_read_dpcd(
+ link,
+ DP_TEST_AUDIO_PERIOD_CH1 + channel,
+ &dpcd_pattern_period[channel].raw,
+ sizeof(dpcd_pattern_period[channel]));
+ }
+ }
+
+ // translate sampling rate
+ switch (dpcd_test_mode.bits.sampling_rate) {
+ case AUDIO_SAMPLING_RATE_32KHZ:
+ sampling_rate_in_hz = 32000;
+ break;
+ case AUDIO_SAMPLING_RATE_44_1KHZ:
+ sampling_rate_in_hz = 44100;
+ break;
+ case AUDIO_SAMPLING_RATE_48KHZ:
+ sampling_rate_in_hz = 48000;
+ break;
+ case AUDIO_SAMPLING_RATE_88_2KHZ:
+ sampling_rate_in_hz = 88200;
+ break;
+ case AUDIO_SAMPLING_RATE_96KHZ:
+ sampling_rate_in_hz = 96000;
+ break;
+ case AUDIO_SAMPLING_RATE_176_4KHZ:
+ sampling_rate_in_hz = 176400;
+ break;
+ case AUDIO_SAMPLING_RATE_192KHZ:
+ sampling_rate_in_hz = 192000;
+ break;
+ default:
+ sampling_rate_in_hz = 0;
+ break;
+ }
+
+ link->audio_test_data.flags.test_requested = 1;
+ link->audio_test_data.flags.disable_video = disable_video;
+ link->audio_test_data.sampling_rate = sampling_rate_in_hz;
+ link->audio_test_data.channel_count = channel_count;
+ link->audio_test_data.pattern_type = test_pattern;
+
+ if (test_pattern == DP_TEST_PATTERN_AUDIO_SAWTOOTH) {
+ for (modes = 0; modes < pipe_ctx->stream->audio_info.mode_count; modes++) {
+ link->audio_test_data.pattern_period[modes] = dpcd_pattern_period[modes].bits.pattern_period;
+ }
+ }
+}
+
static void handle_automated_test(struct dc_link *link)
{
union test_request test_request;
@@ -2027,6 +2782,12 @@ static void handle_automated_test(struct dc_link *link)
dp_test_send_link_test_pattern(link);
test_response.bits.ACK = 1;
}
+
+ if (test_request.bits.AUDIO_TEST_PATTERN) {
+ dp_test_get_audio_test_data(link, test_request.bits.TEST_AUDIO_DISABLED_VIDEO);
+ test_response.bits.ACK = 1;
+ }
+
if (test_request.bits.PHY_TEST_PATTERN) {
dp_test_send_phy_test_pattern(link);
test_response.bits.ACK = 1;
@@ -2046,8 +2807,10 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
union hpd_irq_data hpd_irq_dpcd_data = { { { {0} } } };
union device_service_irq device_service_clear = { { 0 } };
enum dc_status result;
-
bool status = false;
+ struct pipe_ctx *pipe_ctx;
+ struct dc_link_settings previous_link_settings;
+ int i;
if (out_link_loss)
*out_link_loss = false;
@@ -2110,19 +2873,36 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
/* For now we only handle 'Downstream port status' case.
* If we got sink count changed it means
* Downstream port status changed,
- * then DM should call DC to do the detection. */
- if (hpd_rx_irq_check_link_loss_status(
- link,
- &hpd_irq_dpcd_data)) {
+ * then DM should call DC to do the detection.
+ * NOTE: Do not handle link loss on eDP since it is internal link*/
+ if ((link->connector_signal != SIGNAL_TYPE_EDP) &&
+ hpd_rx_irq_check_link_loss_status(
+ link,
+ &hpd_irq_dpcd_data)) {
/* Connectivity log: link loss */
CONN_DATA_LINK_LOSS(link,
hpd_irq_dpcd_data.raw,
sizeof(hpd_irq_dpcd_data),
"Status: ");
- perform_link_training_with_retries(link,
- &link->cur_link_settings,
- true, LINK_TRAINING_ATTEMPTS);
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link)
+ break;
+ }
+
+ if (pipe_ctx == NULL || pipe_ctx->stream == NULL)
+ return false;
+
+ previous_link_settings = link->cur_link_settings;
+
+ perform_link_training_with_retries(&previous_link_settings,
+ true, LINK_TRAINING_ATTEMPTS,
+ pipe_ctx,
+ pipe_ctx->stream->signal);
+
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
+ dc_link_reallocate_mst_payload(link);
status = false;
if (out_link_loss)
@@ -2156,6 +2936,11 @@ bool is_mst_supported(struct dc_link *link)
union dpcd_rev rev;
union mstm_cap cap;
+ if (link->preferred_training_settings.mst_enable &&
+ *link->preferred_training_settings.mst_enable == false) {
+ return false;
+ }
+
rev.raw = 0;
cap.raw = 0;
@@ -2224,6 +3009,7 @@ static void get_active_converter_info(
uint8_t data, struct dc_link *link)
{
union dp_downstream_port_present ds_port = { .byte = data };
+ memset(&link->dpcd_caps.dongle_caps, 0, sizeof(link->dpcd_caps.dongle_caps));
/* decode converter info*/
if (!ds_port.fields.PORT_PRESENT) {
@@ -2345,7 +3131,6 @@ static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data,
int length)
{
int retry = 0;
- union dp_downstream_port_present ds_port = { 0 };
if (!link->dpcd_caps.dpcd_rev.raw) {
do {
@@ -2358,18 +3143,16 @@ static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data,
} while (retry++ < 4 && !link->dpcd_caps.dpcd_rev.raw);
}
- ds_port.byte = dpcd_data[DP_DOWNSTREAMPORT_PRESENT -
- DP_DPCD_REV];
-
if (link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER) {
switch (link->dpcd_caps.branch_dev_id) {
- /* Some active dongles (DP-VGA, DP-DLDVI converters) power down
+ /* 0010FA active dongles (DP-VGA, DP-DLDVI converters) power down
* all internal circuits including AUX communication preventing
* reading DPCD table and EDID (spec violation).
* Encoder will skip DP RX power down on disable_output to
* keep receiver powered all the time.*/
- case DP_BRANCH_DEVICE_ID_1:
- case DP_BRANCH_DEVICE_ID_4:
+ case DP_BRANCH_DEVICE_ID_0010FA:
+ case DP_BRANCH_DEVICE_ID_0080E1:
+ case DP_BRANCH_DEVICE_ID_00E04C:
link->wa_flags.dp_keep_receiver_powered = true;
break;
@@ -2384,7 +3167,11 @@ static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data,
static bool retrieve_link_cap(struct dc_link *link)
{
- uint8_t dpcd_data[DP_ADAPTER_CAP - DP_DPCD_REV + 1];
+ /* DP_ADAPTER_CAP - DP_DPCD_REV + 1 == 16 and also DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT + 1 == 16,
+ * which means size 16 will be good for both of those DPCD register block reads
+ */
+ uint8_t dpcd_data[16];
+ uint8_t lttpr_dpcd_data[6];
/*Only need to read 1 byte starting from DP_DPRX_FEATURE_ENUMERATION_LIST.
*/
@@ -2400,7 +3187,19 @@ static bool retrieve_link_cap(struct dc_link *link)
int i;
struct dp_sink_hw_fw_revision dp_hw_fw_revision;
+ /* Set default timeout to 3.2ms and read LTTPR capabilities */
+ bool ext_timeout_support = link->dc->caps.extended_aux_timeout_support &&
+ !link->dc->config.disable_extended_timeout_support;
+
+ link->is_lttpr_mode_transparent = true;
+
+ if (ext_timeout_support) {
+ dc_link_aux_configure_timeout(link->ddc,
+ LINK_AUX_DEFAULT_EXTENDED_TIMEOUT_PERIOD);
+ }
+
memset(dpcd_data, '\0', sizeof(dpcd_data));
+ memset(lttpr_dpcd_data, '\0', sizeof(lttpr_dpcd_data));
memset(&down_strm_port_count,
'\0', sizeof(union down_stream_port_count));
memset(&edp_config_cap, '\0',
@@ -2432,6 +3231,52 @@ static bool retrieve_link_cap(struct dc_link *link)
return false;
}
+ if (ext_timeout_support) {
+
+ status = core_link_read_dpcd(
+ link,
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV,
+ lttpr_dpcd_data,
+ sizeof(lttpr_dpcd_data));
+
+ link->dpcd_caps.lttpr_caps.revision.raw =
+ lttpr_dpcd_data[DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.max_link_rate =
+ lttpr_dpcd_data[DP_MAX_LINK_RATE_PHY_REPEATER -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.phy_repeater_cnt =
+ lttpr_dpcd_data[DP_PHY_REPEATER_CNT -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.max_lane_count =
+ lttpr_dpcd_data[DP_MAX_LANE_COUNT_PHY_REPEATER -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.mode =
+ lttpr_dpcd_data[DP_PHY_REPEATER_MODE -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.max_ext_timeout =
+ lttpr_dpcd_data[DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ if (link->dpcd_caps.lttpr_caps.phy_repeater_cnt > 0 &&
+ link->dpcd_caps.lttpr_caps.max_lane_count > 0 &&
+ link->dpcd_caps.lttpr_caps.max_lane_count <= 4 &&
+ link->dpcd_caps.lttpr_caps.revision.raw >= 0x14) {
+ link->is_lttpr_mode_transparent = false;
+ } else {
+ /*No lttpr reset timeout to its default value*/
+ link->is_lttpr_mode_transparent = true;
+ dc_link_aux_configure_timeout(link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD);
+ }
+
+ CONN_DATA_DETECT(link, lttpr_dpcd_data, sizeof(lttpr_dpcd_data), "LTTPR Caps: ");
+ }
+
{
union training_aux_rd_interval aux_rd_interval;
@@ -2439,7 +3284,7 @@ static bool retrieve_link_cap(struct dc_link *link)
dpcd_data[DP_TRAINING_AUX_RD_INTERVAL];
link->dpcd_caps.ext_receiver_cap_field_present =
- aux_rd_interval.bits.EXT_RECEIVER_CAP_FIELD_PRESENT == 1 ? true:false;
+ aux_rd_interval.bits.EXT_RECEIVER_CAP_FIELD_PRESENT == 1;
if (aux_rd_interval.bits.EXT_RECEIVER_CAP_FIELD_PRESENT == 1) {
uint8_t ext_cap_data[16];
@@ -2570,7 +3415,6 @@ static bool retrieve_link_cap(struct dc_link *link)
dp_hw_fw_revision.ieee_fw_rev,
sizeof(dp_hw_fw_revision.ieee_fw_rev));
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
memset(&link->dpcd_caps.dsc_caps, '\0',
sizeof(link->dpcd_caps.dsc_caps));
memset(&link->dpcd_caps.fec_cap, '\0', sizeof(link->dpcd_caps.fec_cap));
@@ -2592,7 +3436,6 @@ static bool retrieve_link_cap(struct dc_link *link)
link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,
sizeof(link->dpcd_caps.dsc_caps.dsc_ext_caps.raw));
}
-#endif
/* Connectivity log: detection */
CONN_DATA_DETECT(link, dpcd_data, sizeof(dpcd_data), "Rx Caps: ");
@@ -2600,6 +3443,68 @@ static bool retrieve_link_cap(struct dc_link *link)
return true;
}
+bool dp_overwrite_extended_receiver_cap(struct dc_link *link)
+{
+ uint8_t dpcd_data[16];
+ uint32_t read_dpcd_retry_cnt = 3;
+ enum dc_status status = DC_ERROR_UNEXPECTED;
+ union dp_downstream_port_present ds_port = { 0 };
+ union down_stream_port_count down_strm_port_count;
+ union edp_configuration_cap edp_config_cap;
+
+ int i;
+
+ for (i = 0; i < read_dpcd_retry_cnt; i++) {
+ status = core_link_read_dpcd(
+ link,
+ DP_DPCD_REV,
+ dpcd_data,
+ sizeof(dpcd_data));
+ if (status == DC_OK)
+ break;
+ }
+
+ link->dpcd_caps.dpcd_rev.raw =
+ dpcd_data[DP_DPCD_REV - DP_DPCD_REV];
+
+ if (dpcd_data[DP_MAX_LANE_COUNT - DP_DPCD_REV] == 0)
+ return false;
+
+ ds_port.byte = dpcd_data[DP_DOWNSTREAMPORT_PRESENT -
+ DP_DPCD_REV];
+
+ get_active_converter_info(ds_port.byte, link);
+
+ down_strm_port_count.raw = dpcd_data[DP_DOWN_STREAM_PORT_COUNT -
+ DP_DPCD_REV];
+
+ link->dpcd_caps.allow_invalid_MSA_timing_param =
+ down_strm_port_count.bits.IGNORE_MSA_TIMING_PARAM;
+
+ link->dpcd_caps.max_ln_count.raw = dpcd_data[
+ DP_MAX_LANE_COUNT - DP_DPCD_REV];
+
+ link->dpcd_caps.max_down_spread.raw = dpcd_data[
+ DP_MAX_DOWNSPREAD - DP_DPCD_REV];
+
+ link->reported_link_cap.lane_count =
+ link->dpcd_caps.max_ln_count.bits.MAX_LANE_COUNT;
+ link->reported_link_cap.link_rate = dpcd_data[
+ DP_MAX_LINK_RATE - DP_DPCD_REV];
+ link->reported_link_cap.link_spread =
+ link->dpcd_caps.max_down_spread.bits.MAX_DOWN_SPREAD ?
+ LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED;
+
+ edp_config_cap.raw = dpcd_data[
+ DP_EDP_CONFIGURATION_CAP - DP_DPCD_REV];
+ link->dpcd_caps.panel_mode_edp =
+ edp_config_cap.bits.ALT_SCRAMBLER_RESET;
+ link->dpcd_caps.dpcd_display_control_capable =
+ edp_config_cap.bits.DPCD_DISPLAY_CONTROL_CAPABLE;
+
+ return true;
+}
+
bool detect_dp_sink_caps(struct dc_link *link)
{
return retrieve_link_cap(link);
@@ -2714,21 +3619,20 @@ static bool is_dp_phy_pattern(enum dp_test_pattern test_pattern)
static void set_crtc_test_pattern(struct dc_link *link,
struct pipe_ctx *pipe_ctx,
- enum dp_test_pattern test_pattern)
+ enum dp_test_pattern test_pattern,
+ enum dp_test_pattern_color_space test_pattern_color_space)
{
enum controller_dp_test_pattern controller_test_pattern;
enum dc_color_depth color_depth = pipe_ctx->
stream->timing.display_color_depth;
struct bit_depth_reduction_params params;
struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
int width = pipe_ctx->stream->timing.h_addressable +
pipe_ctx->stream->timing.h_border_left +
pipe_ctx->stream->timing.h_border_right;
int height = pipe_ctx->stream->timing.v_addressable +
pipe_ctx->stream->timing.v_border_bottom +
pipe_ctx->stream->timing.v_border_top;
-#endif
memset(&params, 0, sizeof(params));
@@ -2772,17 +3676,42 @@ static void set_crtc_test_pattern(struct dc_link *link,
if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
controller_test_pattern, color_depth);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
else if (opp->funcs->opp_set_disp_pattern_generator) {
- struct pipe_ctx *bot_odm_pipe = dc_res_get_odm_bottom_pipe(pipe_ctx);
+ struct pipe_ctx *odm_pipe;
+ enum controller_dp_color_space controller_color_space;
+ int opp_cnt = 1;
+ int count;
+
+ switch (test_pattern_color_space) {
+ case DP_TEST_PATTERN_COLOR_SPACE_RGB:
+ controller_color_space = CONTROLLER_DP_COLOR_SPACE_RGB;
+ break;
+ case DP_TEST_PATTERN_COLOR_SPACE_YCBCR601:
+ controller_color_space = CONTROLLER_DP_COLOR_SPACE_YCBCR601;
+ break;
+ case DP_TEST_PATTERN_COLOR_SPACE_YCBCR709:
+ controller_color_space = CONTROLLER_DP_COLOR_SPACE_YCBCR709;
+ break;
+ case DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED:
+ default:
+ controller_color_space = CONTROLLER_DP_COLOR_SPACE_UDEFINED;
+ DC_LOG_ERROR("%s: Color space must be defined for test pattern", __func__);
+ ASSERT(0);
+ break;
+ }
- if (bot_odm_pipe) {
- struct output_pixel_processor *bot_opp = bot_odm_pipe->stream_res.opp;
+ for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
+ opp_cnt++;
- bot_opp->funcs->opp_program_bit_depth_reduction(bot_opp, &params);
- width /= 2;
- bot_opp->funcs->opp_set_disp_pattern_generator(bot_opp,
+ width /= opp_cnt;
+
+ for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
+ struct output_pixel_processor *odm_opp = odm_pipe->stream_res.opp;
+
+ odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, &params);
+ odm_opp->funcs->opp_set_disp_pattern_generator(odm_opp,
controller_test_pattern,
+ controller_color_space,
color_depth,
NULL,
width,
@@ -2790,12 +3719,18 @@ static void set_crtc_test_pattern(struct dc_link *link,
}
opp->funcs->opp_set_disp_pattern_generator(opp,
controller_test_pattern,
+ controller_color_space,
color_depth,
NULL,
width,
height);
+ /* wait for dpg to blank pixel data with test pattern */
+ for (count = 0; count < 1000; count++) {
+ if (opp->funcs->dpg_is_blanked(opp))
+ break;
+ udelay(100);
+ }
}
-#endif
}
break;
case DP_TEST_PATTERN_VIDEO_MODE:
@@ -2808,17 +3743,21 @@ static void set_crtc_test_pattern(struct dc_link *link,
pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
color_depth);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
else if (opp->funcs->opp_set_disp_pattern_generator) {
- struct pipe_ctx *bot_odm_pipe = dc_res_get_odm_bottom_pipe(pipe_ctx);
+ struct pipe_ctx *odm_pipe;
+ int opp_cnt = 1;
- if (bot_odm_pipe) {
- struct output_pixel_processor *bot_opp = bot_odm_pipe->stream_res.opp;
+ for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
+ opp_cnt++;
- bot_opp->funcs->opp_program_bit_depth_reduction(bot_opp, &params);
- width /= 2;
- bot_opp->funcs->opp_set_disp_pattern_generator(bot_opp,
+ width /= opp_cnt;
+ for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
+ struct output_pixel_processor *odm_opp = odm_pipe->stream_res.opp;
+
+ odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, &params);
+ odm_opp->funcs->opp_set_disp_pattern_generator(odm_opp,
CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
+ CONTROLLER_DP_COLOR_SPACE_UDEFINED,
color_depth,
NULL,
width,
@@ -2826,12 +3765,12 @@ static void set_crtc_test_pattern(struct dc_link *link,
}
opp->funcs->opp_set_disp_pattern_generator(opp,
CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
+ CONTROLLER_DP_COLOR_SPACE_UDEFINED,
color_depth,
NULL,
width,
height);
}
-#endif
}
break;
@@ -2843,6 +3782,7 @@ static void set_crtc_test_pattern(struct dc_link *link,
bool dc_link_dp_set_test_pattern(
struct dc_link *link,
enum dp_test_pattern test_pattern,
+ enum dp_test_pattern_color_space test_pattern_color_space,
const struct link_training_settings *p_link_settings,
const unsigned char *p_custom_pattern,
unsigned int cust_pattern_size)
@@ -2858,7 +3798,7 @@ bool dc_link_dp_set_test_pattern(
memset(&training_pattern, 0, sizeof(training_pattern));
for (i = 0; i < MAX_PIPES; i++) {
- if (pipes[i].stream->link == link) {
+ if (pipes[i].stream->link == link && !pipes[i].top_pipe && !pipes[i].prev_odm_pipe) {
pipe_ctx = &pipes[i];
break;
}
@@ -2871,7 +3811,7 @@ bool dc_link_dp_set_test_pattern(
if (link->test_pattern_enabled && test_pattern ==
DP_TEST_PATTERN_VIDEO_MODE) {
/* Set CRTC Test Pattern */
- set_crtc_test_pattern(link, pipe_ctx, test_pattern);
+ set_crtc_test_pattern(link, pipe_ctx, test_pattern, test_pattern_color_space);
dp_set_hw_test_pattern(link, test_pattern,
(uint8_t *)p_custom_pattern,
(uint32_t)cust_pattern_size);
@@ -2894,8 +3834,8 @@ bool dc_link_dp_set_test_pattern(
if (is_dp_phy_pattern(test_pattern)) {
/* Set DPCD Lane Settings before running test pattern */
if (p_link_settings != NULL) {
- dp_set_hw_lane_settings(link, p_link_settings);
- dpcd_set_lane_settings(link, p_link_settings);
+ dp_set_hw_lane_settings(link, p_link_settings, DPRX);
+ dpcd_set_lane_settings(link, p_link_settings, DPRX);
}
/* Blank stream if running test pattern */
@@ -2985,8 +3925,38 @@ bool dc_link_dp_set_test_pattern(
sizeof(training_pattern));
}
} else {
- /* CRTC Patterns */
- set_crtc_test_pattern(link, pipe_ctx, test_pattern);
+ enum dc_color_space color_space = COLOR_SPACE_UNKNOWN;
+
+ switch (test_pattern_color_space) {
+ case DP_TEST_PATTERN_COLOR_SPACE_RGB:
+ color_space = COLOR_SPACE_SRGB;
+ if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA)
+ color_space = COLOR_SPACE_SRGB_LIMITED;
+ break;
+
+ case DP_TEST_PATTERN_COLOR_SPACE_YCBCR601:
+ color_space = COLOR_SPACE_YCBCR601;
+ if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA)
+ color_space = COLOR_SPACE_YCBCR601_LIMITED;
+ break;
+ case DP_TEST_PATTERN_COLOR_SPACE_YCBCR709:
+ color_space = COLOR_SPACE_YCBCR709;
+ if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA)
+ color_space = COLOR_SPACE_YCBCR709_LIMITED;
+ break;
+ default:
+ break;
+ }
+ /* update MSA to requested color space */
+ pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute(pipe_ctx->stream_res.stream_enc,
+ &pipe_ctx->stream->timing,
+ color_space,
+ pipe_ctx->stream->use_vsc_sdp_for_colorimetry,
+ link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP);
+
+ /* CRTC Patterns */
+ set_crtc_test_pattern(link, pipe_ctx, test_pattern, test_pattern_color_space);
+
/* Set Test Pattern state */
link->test_pattern_enabled = true;
}
@@ -3007,7 +3977,105 @@ void dp_enable_mst_on_sink(struct dc_link *link, bool enable)
core_link_write_dpcd(link, DP_MSTM_CTRL, &mstmCntl, 1);
}
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
+void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode)
+{
+ union dpcd_edp_config edp_config_set;
+ bool panel_mode_edp = false;
+
+ memset(&edp_config_set, '\0', sizeof(union dpcd_edp_config));
+
+ if (panel_mode != DP_PANEL_MODE_DEFAULT) {
+
+ switch (panel_mode) {
+ case DP_PANEL_MODE_EDP:
+ case DP_PANEL_MODE_SPECIAL:
+ panel_mode_edp = true;
+ break;
+
+ default:
+ break;
+ }
+
+ /*set edp panel mode in receiver*/
+ core_link_read_dpcd(
+ link,
+ DP_EDP_CONFIGURATION_SET,
+ &edp_config_set.raw,
+ sizeof(edp_config_set.raw));
+
+ if (edp_config_set.bits.PANEL_MODE_EDP
+ != panel_mode_edp) {
+ enum ddc_result result = DDC_RESULT_UNKNOWN;
+
+ edp_config_set.bits.PANEL_MODE_EDP =
+ panel_mode_edp;
+ result = core_link_write_dpcd(
+ link,
+ DP_EDP_CONFIGURATION_SET,
+ &edp_config_set.raw,
+ sizeof(edp_config_set.raw));
+
+ ASSERT(result == DDC_RESULT_SUCESSFULL);
+ }
+ }
+ DC_LOG_DETECTION_DP_CAPS("Link: %d eDP panel mode supported: %d "
+ "eDP panel mode enabled: %d \n",
+ link->link_index,
+ link->dpcd_caps.panel_mode_edp,
+ panel_mode_edp);
+}
+
+enum dp_panel_mode dp_get_panel_mode(struct dc_link *link)
+{
+ /* We need to explicitly check that connector
+ * is not DP. Some Travis_VGA get reported
+ * by video bios as DP.
+ */
+ if (link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT) {
+
+ switch (link->dpcd_caps.branch_dev_id) {
+ case DP_BRANCH_DEVICE_ID_0022B9:
+ /* alternate scrambler reset is required for Travis
+ * for the case when external chip does not
+ * provide sink device id, alternate scrambler
+ * scheme will be overriden later by querying
+ * Encoder features
+ */
+ if (strncmp(
+ link->dpcd_caps.branch_dev_name,
+ DP_VGA_LVDS_CONVERTER_ID_2,
+ sizeof(
+ link->dpcd_caps.
+ branch_dev_name)) == 0) {
+ return DP_PANEL_MODE_SPECIAL;
+ }
+ break;
+ case DP_BRANCH_DEVICE_ID_00001A:
+ /* alternate scrambler reset is required for Travis
+ * for the case when external chip does not provide
+ * sink device id, alternate scrambler scheme will
+ * be overriden later by querying Encoder feature
+ */
+ if (strncmp(link->dpcd_caps.branch_dev_name,
+ DP_VGA_LVDS_CONVERTER_ID_3,
+ sizeof(
+ link->dpcd_caps.
+ branch_dev_name)) == 0) {
+ return DP_PANEL_MODE_SPECIAL;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (link->dpcd_caps.panel_mode_edp) {
+ return DP_PANEL_MODE_EDP;
+ }
+
+ return DP_PANEL_MODE_DEFAULT;
+}
+
void dp_set_fec_ready(struct dc_link *link, bool ready)
{
/* FEC has to be "set ready" before the link training.
@@ -3024,7 +4092,7 @@ void dp_set_fec_ready(struct dc_link *link, bool ready)
if (link_enc->funcs->fec_set_ready &&
link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) {
- if (link->fec_state == dc_link_fec_not_ready && ready) {
+ if (ready) {
fec_config = 1;
if (core_link_write_dpcd(link,
DP_FEC_CONFIGURATION,
@@ -3033,9 +4101,11 @@ void dp_set_fec_ready(struct dc_link *link, bool ready)
link_enc->funcs->fec_set_ready(link_enc, true);
link->fec_state = dc_link_fec_ready;
} else {
+ link->link_enc->funcs->fec_set_ready(link->link_enc, false);
+ link->fec_state = dc_link_fec_not_ready;
dm_error("dpcd write failed to set fec_ready");
}
- } else if (link->fec_state == dc_link_fec_ready && !ready) {
+ } else if (link->fec_state == dc_link_fec_ready) {
fec_config = 0;
core_link_write_dpcd(link,
DP_FEC_CONFIGURATION,
@@ -3059,7 +4129,14 @@ void dp_set_fec_enable(struct dc_link *link, bool enable)
if (link_enc->funcs->fec_set_enable &&
link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) {
if (link->fec_state == dc_link_fec_ready && enable) {
- msleep(1);
+ /* Accord to DP spec, FEC enable sequence can first
+ * be transmitted anytime after 1000 LL codes have
+ * been transmitted on the link after link training
+ * completion. Using 1 lane RBR should have the maximum
+ * time for transmitting 1000 LL codes which is 6.173 us.
+ * So use 7 microseconds delay instead.
+ */
+ udelay(7);
link_enc->funcs->fec_set_enable(link_enc, true);
link->fec_state = dc_link_fec_enabled;
} else if (link->fec_state == dc_link_fec_enabled && !enable) {
@@ -3068,5 +4145,4 @@ void dp_set_fec_enable(struct dc_link *link, bool enable)
}
}
}
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
index 2d019e1f6135..ddb855045767 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
@@ -12,12 +12,38 @@
#include "dc_link_ddc.h"
#include "dm_helpers.h"
#include "dpcd_defs.h"
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
#include "dsc.h"
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#include "resource.h"
-#endif
+
+static uint8_t convert_to_count(uint8_t lttpr_repeater_count)
+{
+ switch (lttpr_repeater_count) {
+ case 0x80: // 1 lttpr repeater
+ return 1;
+ case 0x40: // 2 lttpr repeaters
+ return 2;
+ case 0x20: // 3 lttpr repeaters
+ return 3;
+ case 0x10: // 4 lttpr repeaters
+ return 4;
+ case 0x08: // 5 lttpr repeaters
+ return 5;
+ case 0x04: // 6 lttpr repeaters
+ return 6;
+ case 0x02: // 7 lttpr repeaters
+ return 7;
+ case 0x01: // 8 lttpr repeaters
+ return 8;
+ default:
+ break;
+ }
+ return 0; // invalid value
+}
+
+static inline bool is_immediate_downstream(struct dc_link *link, uint32_t offset)
+{
+ return (convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == offset);
+}
enum dc_status core_link_read_dpcd(
struct dc_link *link,
@@ -25,10 +51,11 @@ enum dc_status core_link_read_dpcd(
uint8_t *data,
uint32_t size)
{
- if (!dm_helpers_dp_read_dpcd(link->ctx,
- link,
- address, data, size))
- return DC_ERROR_UNEXPECTED;
+ if (!link->aux_access_disabled &&
+ !dm_helpers_dp_read_dpcd(link->ctx,
+ link, address, data, size)) {
+ return DC_ERROR_UNEXPECTED;
+ }
return DC_OK;
}
@@ -39,10 +66,11 @@ enum dc_status core_link_write_dpcd(
const uint8_t *data,
uint32_t size)
{
- if (!dm_helpers_dp_write_dpcd(link->ctx,
- link,
- address, data, size))
- return DC_ERROR_UNEXPECTED;
+ if (!link->aux_access_disabled &&
+ !dm_helpers_dp_write_dpcd(link->ctx,
+ link, address, data, size)) {
+ return DC_ERROR_UNEXPECTED;
+ }
return DC_OK;
}
@@ -53,6 +81,9 @@ void dp_receiver_power_ctrl(struct dc_link *link, bool on)
state = on ? DP_POWER_STATE_D0 : DP_POWER_STATE_D3;
+ if (link->sync_lt_in_progress)
+ return;
+
core_link_write_dpcd(link, DP_SET_POWER, &state,
sizeof(state));
}
@@ -64,8 +95,8 @@ void dp_enable_link_phy(
const struct dc_link_settings *link_settings)
{
struct link_encoder *link_enc = link->link_enc;
- struct dc *core_dc = link->ctx->dc;
- struct dmcu *dmcu = core_dc->res_pool->dmcu;
+ struct dc *dc = link->ctx->dc;
+ struct dmcu *dmcu = dc->res_pool->dmcu;
struct pipe_ctx *pipes =
link->dc->current_state->res_ctx.pipe_ctx;
@@ -142,15 +173,20 @@ bool edp_receiver_ready_T9(struct dc_link *link)
}
bool edp_receiver_ready_T7(struct dc_link *link)
{
- unsigned int tries = 0;
unsigned char sinkstatus = 0;
unsigned char edpRev = 0;
enum dc_status result = DC_OK;
+ /* use absolute time stamp to constrain max T7*/
+ unsigned long long enter_timestamp = 0;
+ unsigned long long finish_timestamp = 0;
+ unsigned long long time_taken_in_ns = 0;
+
result = core_link_read_dpcd(link, DP_EDP_DPCD_REV, &edpRev, sizeof(edpRev));
if (result == DC_OK && edpRev < DP_EDP_12)
return true;
/* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/
+ enter_timestamp = dm_get_timestamp(link->ctx);
do {
sinkstatus = 0;
result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus));
@@ -158,15 +194,21 @@ bool edp_receiver_ready_T7(struct dc_link *link)
break;
if (result != DC_OK)
break;
- udelay(25); //MAx T7 is 50ms
- } while (++tries < 300);
+ udelay(25);
+ finish_timestamp = dm_get_timestamp(link->ctx);
+ time_taken_in_ns = dm_get_elapse_time_in_ns(link->ctx, finish_timestamp, enter_timestamp);
+ } while (time_taken_in_ns < 50 * 1000000); //MAx T7 is 50ms
+
+ if (link->local_sink->edid_caps.panel_patch.extra_t7_ms > 0)
+ udelay(link->local_sink->edid_caps.panel_patch.extra_t7_ms * 1000);
+
return result;
}
void dp_disable_link_phy(struct dc_link *link, enum signal_type signal)
{
- struct dc *core_dc = link->ctx->dc;
- struct dmcu *dmcu = core_dc->res_pool->dmcu;
+ struct dc *dc = link->ctx->dc;
+ struct dmcu *dmcu = dc->res_pool->dmcu;
if (!link->wa_flags.dp_keep_receiver_powered)
dp_receiver_power_ctrl(link, false);
@@ -203,21 +245,22 @@ void dp_disable_link_phy_mst(struct dc_link *link, enum signal_type signal)
bool dp_set_hw_training_pattern(
struct dc_link *link,
- enum hw_dp_training_pattern pattern)
+ enum dc_dp_training_pattern pattern,
+ uint32_t offset)
{
enum dp_test_pattern test_pattern = DP_TEST_PATTERN_UNSUPPORTED;
switch (pattern) {
- case HW_DP_TRAINING_PATTERN_1:
+ case DP_TRAINING_PATTERN_SEQUENCE_1:
test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN1;
break;
- case HW_DP_TRAINING_PATTERN_2:
+ case DP_TRAINING_PATTERN_SEQUENCE_2:
test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN2;
break;
- case HW_DP_TRAINING_PATTERN_3:
+ case DP_TRAINING_PATTERN_SEQUENCE_3:
test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN3;
break;
- case HW_DP_TRAINING_PATTERN_4:
+ case DP_TRAINING_PATTERN_SEQUENCE_4:
test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN4;
break;
default:
@@ -231,54 +274,18 @@ bool dp_set_hw_training_pattern(
void dp_set_hw_lane_settings(
struct dc_link *link,
- const struct link_training_settings *link_settings)
+ const struct link_training_settings *link_settings,
+ uint32_t offset)
{
struct link_encoder *encoder = link->link_enc;
+ if (!link->is_lttpr_mode_transparent && !is_immediate_downstream(link, offset))
+ return;
+
/* call Encoder to set lane settings */
encoder->funcs->dp_set_lane_settings(encoder, link_settings);
}
-enum dp_panel_mode dp_get_panel_mode(struct dc_link *link)
-{
- /* We need to explicitly check that connector
- * is not DP. Some Travis_VGA get reported
- * by video bios as DP.
- */
- if (link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT) {
-
- switch (link->dpcd_caps.branch_dev_id) {
- case DP_BRANCH_DEVICE_ID_2:
- if (strncmp(
- link->dpcd_caps.branch_dev_name,
- DP_VGA_LVDS_CONVERTER_ID_2,
- sizeof(
- link->dpcd_caps.
- branch_dev_name)) == 0) {
- return DP_PANEL_MODE_SPECIAL;
- }
- break;
- case DP_BRANCH_DEVICE_ID_3:
- if (strncmp(link->dpcd_caps.branch_dev_name,
- DP_VGA_LVDS_CONVERTER_ID_3,
- sizeof(
- link->dpcd_caps.
- branch_dev_name)) == 0) {
- return DP_PANEL_MODE_SPECIAL;
- }
- break;
- default:
- break;
- }
- }
-
- if (link->dpcd_caps.panel_mode_edp) {
- return DP_PANEL_MODE_EDP;
- }
-
- return DP_PANEL_MODE_DEFAULT;
-}
-
void dp_set_hw_test_pattern(
struct dc_link *link,
enum dp_test_pattern test_pattern,
@@ -306,9 +313,10 @@ void dp_retrain_link_dp_test(struct dc_link *link,
for (i = 0; i < MAX_PIPES; i++) {
if (pipes[i].stream != NULL &&
- !pipes[i].top_pipe &&
+ !pipes[i].top_pipe && !pipes[i].prev_odm_pipe &&
pipes[i].stream->link != NULL &&
- pipes[i].stream_res.stream_enc != NULL) {
+ pipes[i].stream_res.stream_enc != NULL &&
+ pipes[i].stream->link == link) {
udelay(100);
pipes[i].stream_res.stream_enc->funcs->dp_blank(
@@ -320,7 +328,9 @@ void dp_retrain_link_dp_test(struct dc_link *link,
dp_receiver_power_ctrl(link, false);
- link->dc->hwss.disable_stream(&pipes[i], KEEP_ACQUIRED_RESOURCE);
+ link->dc->hwss.disable_stream(&pipes[i]);
+ if ((&pipes[i])->stream_res.audio && !link->dc->debug.az_endpoint_mute_only)
+ (&pipes[i])->stream_res.audio->funcs->az_disable((&pipes[i])->stream_res.audio);
link->link_enc->funcs->disable_output(
link->link_enc,
@@ -330,20 +340,12 @@ void dp_retrain_link_dp_test(struct dc_link *link,
memset(&link->cur_link_settings, 0,
sizeof(link->cur_link_settings));
- link->link_enc->funcs->enable_dp_output(
- link->link_enc,
- link_setting,
- pipes[i].clock_source->id);
- link->cur_link_settings = *link_setting;
-
- dp_receiver_power_ctrl(link, true);
-
perform_link_training_with_retries(
- link,
link_setting,
skip_video_pattern,
- LINK_TRAINING_ATTEMPTS);
-
+ LINK_TRAINING_ATTEMPTS,
+ &pipes[i],
+ SIGNAL_TYPE_DISPLAY_PORT);
link->dc->hwss.enable_stream(&pipes[i]);
@@ -367,80 +369,98 @@ void dp_retrain_link_dp_test(struct dc_link *link,
}
}
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
#define DC_LOGGER \
dsc->ctx->logger
static void dsc_optc_config_log(struct display_stream_compressor *dsc,
struct dsc_optc_config *config)
{
- DC_LOG_DSC("Setting optc DSC config at DSC inst %d", dsc->inst);
- DC_LOG_DSC("\n\tbytes_per_pixel %d\n\tis_pixel_format_444 %d\n\tslice_width %d",
- config->bytes_per_pixel,
- config->is_pixel_format_444, config->slice_width);
+ uint32_t precision = 1 << 28;
+ uint32_t bytes_per_pixel_int = config->bytes_per_pixel / precision;
+ uint32_t bytes_per_pixel_mod = config->bytes_per_pixel % precision;
+ uint64_t ll_bytes_per_pix_fraq = bytes_per_pixel_mod;
+
+ /* 7 fractional digits decimal precision for bytes per pixel is enough because DSC
+ * bits per pixel precision is 1/16th of a pixel, which means bytes per pixel precision is
+ * 1/16/8 = 1/128 of a byte, or 0.0078125 decimal
+ */
+ ll_bytes_per_pix_fraq *= 10000000;
+ ll_bytes_per_pix_fraq /= precision;
+
+ DC_LOG_DSC("\tbytes_per_pixel 0x%08x (%d.%07d)",
+ config->bytes_per_pixel, bytes_per_pixel_int, (uint32_t)ll_bytes_per_pix_fraq);
+ DC_LOG_DSC("\tis_pixel_format_444 %d", config->is_pixel_format_444);
+ DC_LOG_DSC("\tslice_width %d", config->slice_width);
}
static bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable)
{
- struct dc *core_dc = pipe_ctx->stream->ctx->dc;
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
struct dc_stream_state *stream = pipe_ctx->stream;
bool result = false;
- if (IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment))
+ if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
result = true;
else
- result = dm_helpers_dp_write_dsc_enable(core_dc->ctx, stream, enable);
+ result = dm_helpers_dp_write_dsc_enable(dc->ctx, stream, enable);
return result;
}
-/* This has to be done after DSC was enabled on RX first, i.e. after dp_enable_dsc_on_rx() had been called
+/* The stream with these settings can be sent (unblanked) only after DSC was enabled on RX first,
+ * i.e. after dp_enable_dsc_on_rx() had been called
*/
-static void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
+void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
{
struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
- struct dc *core_dc = pipe_ctx->stream->ctx->dc;
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
struct dc_stream_state *stream = pipe_ctx->stream;
- struct pipe_ctx *odm_pipe = dc_res_get_odm_bottom_pipe(pipe_ctx);
+ struct pipe_ctx *odm_pipe;
+ int opp_cnt = 1;
+
+ for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
+ opp_cnt++;
if (enable) {
- /* TODO proper function */
struct dsc_config dsc_cfg;
struct dsc_optc_config dsc_optc_cfg;
enum optc_dsc_mode optc_dsc_mode;
- uint8_t dsc_packed_pps[128];
/* Enable DSC hw block */
- dsc_cfg.pic_width = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right;
+ dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt;
dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom;
dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
dsc_cfg.color_depth = stream->timing.display_color_depth;
dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
+ ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0);
+ dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
- dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg, &dsc_packed_pps[0]);
- if (odm_pipe) {
- struct display_stream_compressor *bot_dsc = odm_pipe->stream_res.dsc;
- uint8_t dsc_packed_pps_odm[128];
-
- dsc_cfg.pic_width /= 2;
- ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % 2 == 0);
- dsc_cfg.dc_dsc_cfg.num_slices_h /= 2;
- dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg, &dsc_packed_pps_odm[0]);
- bot_dsc->funcs->dsc_set_config(bot_dsc, &dsc_cfg, &dsc_optc_cfg, &dsc_packed_pps_odm[0]);
- bot_dsc->funcs->dsc_enable(bot_dsc, odm_pipe->stream_res.opp->inst);
- }
+ dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg);
dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst);
+ for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
+ struct display_stream_compressor *odm_dsc = odm_pipe->stream_res.dsc;
+
+ odm_dsc->funcs->dsc_set_config(odm_dsc, &dsc_cfg, &dsc_optc_cfg);
+ odm_dsc->funcs->dsc_enable(odm_dsc, odm_pipe->stream_res.opp->inst);
+ }
+ dsc_cfg.dc_dsc_cfg.num_slices_h *= opp_cnt;
+ dsc_cfg.pic_width *= opp_cnt;
optc_dsc_mode = dsc_optc_cfg.is_pixel_format_444 ? OPTC_DSC_ENABLED_444 : OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED;
- dsc_optc_config_log(dsc, &dsc_optc_cfg);
/* Enable DSC in encoder */
- if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment) && pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config)
+ if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+ DC_LOG_DSC("Setting stream encoder DSC config for engine %d:", (int)pipe_ctx->stream_res.stream_enc->id);
+ dsc_optc_config_log(dsc, &dsc_optc_cfg);
pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config(pipe_ctx->stream_res.stream_enc,
optc_dsc_mode,
dsc_optc_cfg.bytes_per_pixel,
- dsc_optc_cfg.slice_width,
- &dsc_packed_pps[0]);
+ dsc_optc_cfg.slice_width);
+
+ /* PPS SDP is set elsewhere because it has to be done after DIG FE is connected to DIG BE */
+ }
/* Enable DSC in OPTC */
+ DC_LOG_DSC("Setting optc DSC config for tg instance %d:", pipe_ctx->stream_res.tg->inst);
+ dsc_optc_config_log(dsc, &dsc_optc_cfg);
pipe_ctx->stream_res.tg->funcs->set_dsc_config(pipe_ctx->stream_res.tg,
optc_dsc_mode,
dsc_optc_cfg.bytes_per_pixel,
@@ -452,15 +472,18 @@ static void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
OPTC_DSC_DISABLED, 0, 0);
/* disable DSC in stream encoder */
- if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
+ if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config(
pipe_ctx->stream_res.stream_enc,
- OPTC_DSC_DISABLED, 0, 0, NULL);
+ OPTC_DSC_DISABLED, 0, 0);
+
+ pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet(
+ pipe_ctx->stream_res.stream_enc, false, NULL);
}
/* disable DSC block */
pipe_ctx->stream_res.dsc->funcs->dsc_disable(pipe_ctx->stream_res.dsc);
- if (odm_pipe)
+ for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
odm_pipe->stream_res.dsc->funcs->dsc_disable(odm_pipe->stream_res.dsc);
}
}
@@ -489,6 +512,50 @@ out:
return result;
}
+bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable)
+{
+ struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+
+ if (!pipe_ctx->stream->timing.flags.DSC || !dsc)
+ return false;
+
+ if (enable) {
+ struct dsc_config dsc_cfg;
+ uint8_t dsc_packed_pps[128];
+
+ memset(&dsc_cfg, 0, sizeof(dsc_cfg));
+ memset(dsc_packed_pps, 0, 128);
+
+ /* Enable DSC hw block */
+ dsc_cfg.pic_width = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right;
+ dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom;
+ dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
+ dsc_cfg.color_depth = stream->timing.display_color_depth;
+ dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
+
+ DC_LOG_DSC(" ");
+ dsc->funcs->dsc_get_packed_pps(dsc, &dsc_cfg, &dsc_packed_pps[0]);
+ if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+ DC_LOG_DSC("Setting stream encoder DSC PPS SDP for engine %d\n", (int)pipe_ctx->stream_res.stream_enc->id);
+ pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet(
+ pipe_ctx->stream_res.stream_enc,
+ true,
+ &dsc_packed_pps[0]);
+ }
+ } else {
+ /* disable DSC PPS in stream encoder */
+ if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+ pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet(
+ pipe_ctx->stream_res.stream_enc, false, NULL);
+ }
+ }
+
+ return true;
+}
+
+
bool dp_update_dsc_config(struct pipe_ctx *pipe_ctx)
{
struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
@@ -499,8 +566,7 @@ bool dp_update_dsc_config(struct pipe_ctx *pipe_ctx)
return false;
dp_set_dsc_on_stream(pipe_ctx, true);
+ dp_set_dsc_pps_sdp(pipe_ctx, true);
return true;
}
-#endif
-
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 2ceaab4fb5de..a0eb9e533a61 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -46,12 +46,11 @@
#include "dce100/dce100_resource.h"
#include "dce110/dce110_resource.h"
#include "dce112/dce112_resource.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
#include "dcn10/dcn10_resource.h"
#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#include "dcn20/dcn20_resource.h"
-#endif
+#include "dcn21/dcn21_resource.h"
#include "dce120/dce120_resource.h"
#define DC_LOGGER_INIT(logger)
@@ -96,19 +95,19 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
else
dc_version = DCE_VERSION_12_0;
break;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
case FAMILY_RV:
dc_version = DCN_VERSION_1_0;
if (ASICREV_IS_RAVEN2(asic_id.hw_internal_rev))
dc_version = DCN_VERSION_1_01;
+ if (ASICREV_IS_RENOIR(asic_id.hw_internal_rev))
+ dc_version = DCN_VERSION_2_1;
break;
#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
case FAMILY_NV:
dc_version = DCN_VERSION_2_0;
break;
-#endif
default:
dc_version = DCE_VERSION_UNKNOWN;
break;
@@ -155,30 +154,29 @@ struct resource_pool *dc_create_resource_pool(struct dc *dc,
init_data->num_virtual_links, dc);
break;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
case DCN_VERSION_1_0:
case DCN_VERSION_1_01:
res_pool = dcn10_create_resource_pool(init_data, dc);
break;
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
case DCN_VERSION_2_0:
res_pool = dcn20_create_resource_pool(init_data, dc);
break;
+ case DCN_VERSION_2_1:
+ res_pool = dcn21_create_resource_pool(init_data, dc);
+ break;
#endif
default:
break;
}
- if (res_pool != NULL) {
- struct dc_firmware_info fw_info = { { 0 } };
- if (dc->ctx->dc_bios->funcs->get_firmware_info(dc->ctx->dc_bios,
- &fw_info) == BP_RESULT_OK) {
+ if (res_pool != NULL) {
+ if (dc->ctx->dc_bios->fw_info_valid) {
res_pool->ref_clocks.xtalin_clock_inKhz =
- fw_info.pll_info.crystal_frequency;
+ dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
/* initialize with firmware data first, no all
* ASIC have DCCG SW component. FPGA or
* simulation need initialization of
@@ -265,12 +263,10 @@ bool resource_construct(
DC_ERR("DC: failed to create audio!\n");
return false;
}
-
if (!aud->funcs->endpoint_valid(aud)) {
aud->funcs->destroy(&aud);
break;
}
-
pool->audios[i] = aud;
pool->audio_count++;
}
@@ -396,6 +392,9 @@ bool resource_are_streams_timing_synchronizable(
if (stream1->view_format != stream2->view_format)
return false;
+ if (stream1->ignore_msa_timing_param || stream2->ignore_msa_timing_param)
+ return false;
+
return true;
}
static bool is_dp_and_hdmi_sharable(
@@ -941,11 +940,51 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx)
}
+/*
+ * When handling 270 rotation in mixed SLS mode, we have
+ * stream->timing.h_border_left that is non zero. If we are doing
+ * pipe-splitting, this h_border_left value gets added to recout.x and when it
+ * calls calculate_inits_and_adj_vp() and
+ * adjust_vp_and_init_for_seamless_clip(), it can cause viewport.height for a
+ * pipe to be incorrect.
+ *
+ * To fix this, instead of using stream->timing.h_border_left, we can use
+ * stream->dst.x to represent the border instead. So we will set h_border_left
+ * to 0 and shift the appropriate amount in stream->dst.x. We will then
+ * perform all calculations in resource_build_scaling_params() based on this
+ * and then restore the h_border_left and stream->dst.x to their original
+ * values.
+ *
+ * shift_border_left_to_dst() will shift the amount of h_border_left to
+ * stream->dst.x and set h_border_left to 0. restore_border_left_from_dst()
+ * will restore h_border_left and stream->dst.x back to their original values
+ * We also need to make sure pipe_ctx->plane_res.scl_data.h_active uses the
+ * original h_border_left value in its calculation.
+ */
+int shift_border_left_to_dst(struct pipe_ctx *pipe_ctx)
+{
+ int store_h_border_left = pipe_ctx->stream->timing.h_border_left;
+
+ if (store_h_border_left) {
+ pipe_ctx->stream->timing.h_border_left = 0;
+ pipe_ctx->stream->dst.x += store_h_border_left;
+ }
+ return store_h_border_left;
+}
+
+void restore_border_left_from_dst(struct pipe_ctx *pipe_ctx,
+ int store_h_border_left)
+{
+ pipe_ctx->stream->dst.x -= store_h_border_left;
+ pipe_ctx->stream->timing.h_border_left = store_h_border_left;
+}
+
bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
{
const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
bool res = false;
+ int store_h_border_left = shift_border_left_to_dst(pipe_ctx);
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
/* Important: scaling ratio calculation requires pixel format,
* lb depth calculation requires recout and taps require scaling ratios.
@@ -958,8 +997,14 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
calculate_viewport(pipe_ctx);
- if (pipe_ctx->plane_res.scl_data.viewport.height < 16 || pipe_ctx->plane_res.scl_data.viewport.width < 16)
+ if (pipe_ctx->plane_res.scl_data.viewport.height < 16 ||
+ pipe_ctx->plane_res.scl_data.viewport.width < 16) {
+ if (store_h_border_left) {
+ restore_border_left_from_dst(pipe_ctx,
+ store_h_border_left);
+ }
return false;
+ }
calculate_recout(pipe_ctx);
@@ -972,8 +1017,10 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.scl_data.recout.x += timing->h_border_left;
pipe_ctx->plane_res.scl_data.recout.y += timing->v_border_top;
- pipe_ctx->plane_res.scl_data.h_active = timing->h_addressable + timing->h_border_left + timing->h_border_right;
- pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable + timing->v_border_top + timing->v_border_bottom;
+ pipe_ctx->plane_res.scl_data.h_active = timing->h_addressable +
+ store_h_border_left + timing->h_border_right;
+ pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable +
+ timing->v_border_top + timing->v_border_bottom;
/* Taps calculations */
if (pipe_ctx->plane_res.xfm != NULL)
@@ -983,6 +1030,8 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
if (pipe_ctx->plane_res.dpp != NULL)
res = pipe_ctx->plane_res.dpp->funcs->dpp_get_optimal_number_of_taps(
pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
+
+
if (!res) {
/* Try 24 bpp linebuffer */
pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_24BPP;
@@ -1018,6 +1067,9 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
plane_state->dst_rect.x,
plane_state->dst_rect.y);
+ if (store_h_border_left)
+ restore_border_left_from_dst(pipe_ctx, store_h_border_left);
+
return res;
}
@@ -1103,25 +1155,21 @@ struct pipe_ctx *resource_get_head_pipe_for_stream(
struct dc_stream_state *stream)
{
int i;
+
for (i = 0; i < MAX_PIPES; i++) {
- if (res_ctx->pipe_ctx[i].stream == stream &&
- !res_ctx->pipe_ctx[i].top_pipe) {
+ if (res_ctx->pipe_ctx[i].stream == stream
+ && !res_ctx->pipe_ctx[i].top_pipe
+ && !res_ctx->pipe_ctx[i].prev_odm_pipe)
return &res_ctx->pipe_ctx[i];
- break;
- }
}
return NULL;
}
-static struct pipe_ctx *resource_get_tail_pipe_for_stream(
+static struct pipe_ctx *resource_get_tail_pipe(
struct resource_context *res_ctx,
- struct dc_stream_state *stream)
+ struct pipe_ctx *head_pipe)
{
- struct pipe_ctx *head_pipe, *tail_pipe;
- head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream);
-
- if (!head_pipe)
- return NULL;
+ struct pipe_ctx *tail_pipe;
tail_pipe = head_pipe->bottom_pipe;
@@ -1137,31 +1185,20 @@ static struct pipe_ctx *resource_get_tail_pipe_for_stream(
* A free_pipe for a stream is defined here as a pipe
* that has no surface attached yet
*/
-static struct pipe_ctx *acquire_free_pipe_for_stream(
+static struct pipe_ctx *acquire_free_pipe_for_head(
struct dc_state *context,
const struct resource_pool *pool,
- struct dc_stream_state *stream)
+ struct pipe_ctx *head_pipe)
{
int i;
struct resource_context *res_ctx = &context->res_ctx;
- struct pipe_ctx *head_pipe = NULL;
-
- /* Find head pipe, which has the back end set up*/
-
- head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream);
-
- if (!head_pipe) {
- ASSERT(0);
- return NULL;
- }
-
if (!head_pipe->plane_state)
return head_pipe;
/* Re-use pipe already acquired for this stream if available*/
for (i = pool->pipe_count - 1; i >= 0; i--) {
- if (res_ctx->pipe_ctx[i].stream == stream &&
+ if (res_ctx->pipe_ctx[i].stream == head_pipe->stream &&
!res_ctx->pipe_ctx[i].plane_state) {
return &res_ctx->pipe_ctx[i];
}
@@ -1175,11 +1212,10 @@ static struct pipe_ctx *acquire_free_pipe_for_stream(
if (!pool->funcs->acquire_idle_pipe_for_layer)
return NULL;
- return pool->funcs->acquire_idle_pipe_for_layer(context, pool, stream);
-
+ return pool->funcs->acquire_idle_pipe_for_layer(context, pool, head_pipe->stream);
}
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
static int acquire_first_split_pipe(
struct resource_context *res_ctx,
const struct resource_pool *pool,
@@ -1190,7 +1226,7 @@ static int acquire_first_split_pipe(
for (i = 0; i < pool->pipe_count; i++) {
struct pipe_ctx *split_pipe = &res_ctx->pipe_ctx[i];
- if (split_pipe->top_pipe && !dc_res_is_odm_head_pipe(split_pipe) &&
+ if (split_pipe->top_pipe &&
split_pipe->top_pipe->plane_state == split_pipe->plane_state) {
split_pipe->top_pipe->bottom_pipe = split_pipe->bottom_pipe;
if (split_pipe->bottom_pipe)
@@ -1251,39 +1287,41 @@ bool dc_add_plane_to_context(
return false;
}
- tail_pipe = resource_get_tail_pipe_for_stream(&context->res_ctx, stream);
- ASSERT(tail_pipe);
+ /* retain new surface, but only once per stream */
+ dc_plane_state_retain(plane_state);
- free_pipe = acquire_free_pipe_for_stream(context, pool, stream);
+ while (head_pipe) {
+ tail_pipe = resource_get_tail_pipe(&context->res_ctx, head_pipe);
+ ASSERT(tail_pipe);
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
- if (!free_pipe) {
- int pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream);
- if (pipe_idx >= 0)
- free_pipe = &context->res_ctx.pipe_ctx[pipe_idx];
- }
-#endif
- if (!free_pipe)
- return false;
+ free_pipe = acquire_free_pipe_for_head(context, pool, head_pipe);
- /* retain new surfaces */
- dc_plane_state_retain(plane_state);
- free_pipe->plane_state = plane_state;
-
- if (head_pipe != free_pipe) {
- free_pipe->stream_res.tg = tail_pipe->stream_res.tg;
- free_pipe->stream_res.abm = tail_pipe->stream_res.abm;
- free_pipe->stream_res.opp = tail_pipe->stream_res.opp;
- free_pipe->stream_res.stream_enc = tail_pipe->stream_res.stream_enc;
- free_pipe->stream_res.audio = tail_pipe->stream_res.audio;
- free_pipe->clock_source = tail_pipe->clock_source;
- free_pipe->top_pipe = tail_pipe;
- tail_pipe->bottom_pipe = free_pipe;
- } else if (free_pipe->bottom_pipe && free_pipe->bottom_pipe->plane_state == NULL) {
- ASSERT(free_pipe->bottom_pipe->stream_res.opp != free_pipe->stream_res.opp);
- free_pipe->bottom_pipe->plane_state = plane_state;
- }
+ #if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (!free_pipe) {
+ int pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream);
+ if (pipe_idx >= 0)
+ free_pipe = &context->res_ctx.pipe_ctx[pipe_idx];
+ }
+ #endif
+ if (!free_pipe) {
+ dc_plane_state_release(plane_state);
+ return false;
+ }
+ free_pipe->plane_state = plane_state;
+
+ if (head_pipe != free_pipe) {
+ free_pipe->stream_res.tg = tail_pipe->stream_res.tg;
+ free_pipe->stream_res.abm = tail_pipe->stream_res.abm;
+ free_pipe->stream_res.opp = tail_pipe->stream_res.opp;
+ free_pipe->stream_res.stream_enc = tail_pipe->stream_res.stream_enc;
+ free_pipe->stream_res.audio = tail_pipe->stream_res.audio;
+ free_pipe->clock_source = tail_pipe->clock_source;
+ free_pipe->top_pipe = tail_pipe;
+ tail_pipe->bottom_pipe = free_pipe;
+ }
+ head_pipe = head_pipe->next_odm_pipe;
+ }
/* assign new surfaces*/
stream_status->plane_states[stream_status->plane_count] = plane_state;
@@ -1292,35 +1330,6 @@ bool dc_add_plane_to_context(
return true;
}
-struct pipe_ctx *dc_res_get_odm_bottom_pipe(struct pipe_ctx *pipe_ctx)
-{
- struct pipe_ctx *bottom_pipe = pipe_ctx->bottom_pipe;
-
- /* ODM should only be updated once per otg */
- if (pipe_ctx->top_pipe)
- return NULL;
-
- while (bottom_pipe) {
- if (bottom_pipe->stream_res.opp != pipe_ctx->stream_res.opp)
- break;
- bottom_pipe = bottom_pipe->bottom_pipe;
- }
-
- return bottom_pipe;
-}
-
-bool dc_res_is_odm_head_pipe(struct pipe_ctx *pipe_ctx)
-{
- struct pipe_ctx *top_pipe = pipe_ctx->top_pipe;
-
- if (!top_pipe)
- return false;
- if (top_pipe && top_pipe->stream_res.opp == pipe_ctx->stream_res.opp)
- return false;
-
- return true;
-}
-
bool dc_remove_plane_from_context(
const struct dc *dc,
struct dc_stream_state *stream,
@@ -1347,12 +1356,6 @@ bool dc_remove_plane_from_context(
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
if (pipe_ctx->plane_state == plane_state) {
- if (dc_res_is_odm_head_pipe(pipe_ctx)) {
- pipe_ctx->plane_state = NULL;
- pipe_ctx->bottom_pipe = NULL;
- continue;
- }
-
if (pipe_ctx->top_pipe)
pipe_ctx->top_pipe->bottom_pipe = pipe_ctx->bottom_pipe;
@@ -1367,13 +1370,10 @@ bool dc_remove_plane_from_context(
* For head pipe detach surfaces from pipe for tail
* pipe just zero it out
*/
- if (!pipe_ctx->top_pipe) {
+ if (!pipe_ctx->top_pipe)
pipe_ctx->plane_state = NULL;
- if (!dc_res_get_odm_bottom_pipe(pipe_ctx))
- pipe_ctx->bottom_pipe = NULL;
- } else {
+ else
memset(pipe_ctx, 0, sizeof(*pipe_ctx));
- }
}
}
@@ -1568,6 +1568,9 @@ bool dc_is_stream_unchanged(
if (!are_stream_backends_same(old_stream, stream))
return false;
+ if (old_stream->ignore_msa_timing_param != stream->ignore_msa_timing_param)
+ return false;
+
return true;
}
@@ -1657,26 +1660,28 @@ static int acquire_first_free_pipe(
static struct audio *find_first_free_audio(
struct resource_context *res_ctx,
const struct resource_pool *pool,
- enum engine_id id)
+ enum engine_id id,
+ enum dce_version dc_version)
{
- int i;
- for (i = 0; i < pool->audio_count; i++) {
+ int i, available_audio_count;
+
+ available_audio_count = pool->audio_count;
+
+ for (i = 0; i < available_audio_count; i++) {
if ((res_ctx->is_audio_acquired[i] == false) && (res_ctx->is_stream_enc_acquired[i] == true)) {
/*we have enough audio endpoint, find the matching inst*/
if (id != i)
continue;
-
return pool->audios[i];
}
}
- /* use engine id to find free audio */
- if ((id < pool->audio_count) && (res_ctx->is_audio_acquired[id] == false)) {
+ /* use engine id to find free audio */
+ if ((id < available_audio_count) && (res_ctx->is_audio_acquired[id] == false)) {
return pool->audios[id];
}
-
/*not found the matching one, first come first serve*/
- for (i = 0; i < pool->audio_count; i++) {
+ for (i = 0; i < available_audio_count; i++) {
if (res_ctx->is_audio_acquired[i] == false) {
return pool->audios[i];
}
@@ -1736,50 +1741,45 @@ enum dc_status dc_remove_stream_from_ctx(
{
int i;
struct dc_context *dc_ctx = dc->ctx;
- struct pipe_ctx *del_pipe = NULL;
-
- /* Release primary pipe */
- for (i = 0; i < MAX_PIPES; i++) {
- if (new_ctx->res_ctx.pipe_ctx[i].stream == stream &&
- !new_ctx->res_ctx.pipe_ctx[i].top_pipe) {
- struct pipe_ctx *odm_pipe =
- dc_res_get_odm_bottom_pipe(&new_ctx->res_ctx.pipe_ctx[i]);
+ struct pipe_ctx *del_pipe = resource_get_head_pipe_for_stream(&new_ctx->res_ctx, stream);
+ struct pipe_ctx *odm_pipe;
- del_pipe = &new_ctx->res_ctx.pipe_ctx[i];
+ if (!del_pipe) {
+ DC_ERROR("Pipe not found for stream %p !\n", stream);
+ return DC_ERROR_UNEXPECTED;
+ }
- ASSERT(del_pipe->stream_res.stream_enc);
- update_stream_engine_usage(
- &new_ctx->res_ctx,
- dc->res_pool,
- del_pipe->stream_res.stream_enc,
- false);
+ odm_pipe = del_pipe->next_odm_pipe;
- if (del_pipe->stream_res.audio)
- update_audio_usage(
- &new_ctx->res_ctx,
- dc->res_pool,
- del_pipe->stream_res.audio,
- false);
+ /* Release primary pipe */
+ ASSERT(del_pipe->stream_res.stream_enc);
+ update_stream_engine_usage(
+ &new_ctx->res_ctx,
+ dc->res_pool,
+ del_pipe->stream_res.stream_enc,
+ false);
- resource_unreference_clock_source(&new_ctx->res_ctx,
- dc->res_pool,
- del_pipe->clock_source);
+ if (del_pipe->stream_res.audio)
+ update_audio_usage(
+ &new_ctx->res_ctx,
+ dc->res_pool,
+ del_pipe->stream_res.audio,
+ false);
- if (dc->res_pool->funcs->remove_stream_from_ctx)
- dc->res_pool->funcs->remove_stream_from_ctx(dc, new_ctx, stream);
+ resource_unreference_clock_source(&new_ctx->res_ctx,
+ dc->res_pool,
+ del_pipe->clock_source);
- memset(del_pipe, 0, sizeof(*del_pipe));
- if (odm_pipe)
- memset(odm_pipe, 0, sizeof(*odm_pipe));
+ if (dc->res_pool->funcs->remove_stream_from_ctx)
+ dc->res_pool->funcs->remove_stream_from_ctx(dc, new_ctx, stream);
- break;
- }
- }
+ while (odm_pipe) {
+ struct pipe_ctx *next_odm_pipe = odm_pipe->next_odm_pipe;
- if (!del_pipe) {
- DC_ERROR("Pipe not found for stream %p !\n", stream);
- return DC_ERROR_UNEXPECTED;
+ memset(odm_pipe, 0, sizeof(*odm_pipe));
+ odm_pipe = next_odm_pipe;
}
+ memset(del_pipe, 0, sizeof(*del_pipe));
for (i = 0; i < new_ctx->stream_count; i++)
if (new_ctx->streams[i] == stream)
@@ -1880,40 +1880,67 @@ static int acquire_resource_from_hw_enabled_state(
struct dc_stream_state *stream)
{
struct dc_link *link = stream->link;
- unsigned int inst;
+ unsigned int i, inst, tg_inst = 0;
/* Check for enabled DIG to identify enabled display */
if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
return -1;
- /* Check for which front end is used by this encoder.
- * Note the inst is 1 indexed, where 0 is undefined.
- * Note that DIG_FE can source from different OTG but our
- * current implementation always map 1-to-1, so this code makes
- * the same assumption and doesn't check OTG source.
- */
- inst = link->link_enc->funcs->get_dig_frontend(link->link_enc) - 1;
+ inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
+
+ if (inst == ENGINE_ID_UNKNOWN)
+ return -1;
- /* Instance should be within the range of the pool */
- if (inst >= pool->pipe_count)
+ for (i = 0; i < pool->stream_enc_count; i++) {
+ if (pool->stream_enc[i]->id == inst) {
+ tg_inst = pool->stream_enc[i]->funcs->dig_source_otg(
+ pool->stream_enc[i]);
+ break;
+ }
+ }
+
+ // tg_inst not found
+ if (i == pool->stream_enc_count)
return -1;
- if (!res_ctx->pipe_ctx[inst].stream) {
- struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[inst];
+ if (tg_inst >= pool->timing_generator_count)
+ return -1;
+
+ if (!res_ctx->pipe_ctx[tg_inst].stream) {
+ struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[tg_inst];
+
+ pipe_ctx->stream_res.tg = pool->timing_generators[tg_inst];
+ pipe_ctx->plane_res.mi = pool->mis[tg_inst];
+ pipe_ctx->plane_res.hubp = pool->hubps[tg_inst];
+ pipe_ctx->plane_res.ipp = pool->ipps[tg_inst];
+ pipe_ctx->plane_res.xfm = pool->transforms[tg_inst];
+ pipe_ctx->plane_res.dpp = pool->dpps[tg_inst];
+ pipe_ctx->stream_res.opp = pool->opps[tg_inst];
+
+ if (pool->dpps[tg_inst]) {
+ pipe_ctx->plane_res.mpcc_inst = pool->dpps[tg_inst]->inst;
- pipe_ctx->stream_res.tg = pool->timing_generators[inst];
- pipe_ctx->plane_res.mi = pool->mis[inst];
- pipe_ctx->plane_res.hubp = pool->hubps[inst];
- pipe_ctx->plane_res.ipp = pool->ipps[inst];
- pipe_ctx->plane_res.xfm = pool->transforms[inst];
- pipe_ctx->plane_res.dpp = pool->dpps[inst];
- pipe_ctx->stream_res.opp = pool->opps[inst];
- if (pool->dpps[inst])
- pipe_ctx->plane_res.mpcc_inst = pool->dpps[inst]->inst;
- pipe_ctx->pipe_idx = inst;
+ // Read DPP->MPCC->OPP Pipe from HW State
+ if (pool->mpc->funcs->read_mpcc_state) {
+ struct mpcc_state s = {0};
+
+ pool->mpc->funcs->read_mpcc_state(pool->mpc, pipe_ctx->plane_res.mpcc_inst, &s);
+
+ if (s.dpp_id < MAX_MPCC)
+ pool->mpc->mpcc_array[pipe_ctx->plane_res.mpcc_inst].dpp_id = s.dpp_id;
+
+ if (s.bot_mpcc_id < MAX_MPCC)
+ pool->mpc->mpcc_array[pipe_ctx->plane_res.mpcc_inst].mpcc_bot =
+ &pool->mpc->mpcc_array[s.bot_mpcc_id];
+
+ if (s.opp_id < MAX_OPP)
+ pipe_ctx->stream_res.opp->mpc_tree_params.opp_id = s.opp_id;
+ }
+ }
+ pipe_ctx->pipe_idx = tg_inst;
pipe_ctx->stream = stream;
- return inst;
+ return tg_inst;
}
return -1;
@@ -1961,7 +1988,7 @@ enum dc_status resource_map_pool_resources(
/* acquire new resources */
pipe_idx = acquire_first_free_pipe(&context->res_ctx, pool, stream);
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+#ifdef CONFIG_DRM_AMD_DC_DCN
if (pipe_idx < 0)
pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream);
#endif
@@ -1988,7 +2015,7 @@ enum dc_status resource_map_pool_resources(
dc_is_audio_capable_signal(pipe_ctx->stream->signal) &&
stream->audio_info.mode_count && stream->audio_info.flags.all) {
pipe_ctx->stream_res.audio = find_first_free_audio(
- &context->res_ctx, pool, pipe_ctx->stream_res.stream_enc->id);
+ &context->res_ctx, pool, pipe_ctx->stream_res.stream_enc->id, dc_ctx->dce_version);
/*
* Audio assigned in order first come first get.
@@ -2039,6 +2066,13 @@ void dc_resource_state_construct(
dst_ctx->clk_mgr = dc->clk_mgr;
}
+
+bool dc_resource_is_dsc_encoding_supported(const struct dc *dc)
+{
+ return dc->res_pool->res_cap->num_dsc > 0;
+}
+
+
/**
* dc_validate_global_state() - Determine if HW can support a given state
* Checks HW resource availability and bandwidth requirement.
@@ -2295,7 +2329,7 @@ static void set_avi_info_frame(
if (color_space == COLOR_SPACE_SRGB ||
color_space == COLOR_SPACE_2020_RGB_FULLRANGE) {
hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_FULL_RANGE;
- hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_FULL_RANGE;
+ hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
} else if (color_space == COLOR_SPACE_SRGB_LIMITED ||
color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE) {
hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_LIMITED_RANGE;
@@ -2475,6 +2509,12 @@ void dc_resource_state_copy_construct(
if (cur_pipe->bottom_pipe)
cur_pipe->bottom_pipe = &dst_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
+
+ if (cur_pipe->next_odm_pipe)
+ cur_pipe->next_odm_pipe = &dst_ctx->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx];
+
+ if (cur_pipe->prev_odm_pipe)
+ cur_pipe->prev_odm_pipe = &dst_ctx->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx];
}
for (i = 0; i < dst_ctx->stream_count; i++) {
@@ -2755,9 +2795,8 @@ void resource_build_bit_depth_reduction_params(struct dc_stream_state *stream,
enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream)
{
- struct dc *core_dc = dc;
struct dc_link *link = stream->link;
- struct timing_generator *tg = core_dc->res_pool->timing_generators[0];
+ struct timing_generator *tg = dc->res_pool->timing_generators[0];
enum dc_status res = DC_OK;
calculate_phy_pix_clks(stream);
@@ -2820,3 +2859,48 @@ unsigned int resource_pixel_format_to_bpp(enum surface_pixel_format format)
return -1;
}
}
+static unsigned int get_max_audio_sample_rate(struct audio_mode *modes)
+{
+ if (modes) {
+ if (modes->sample_rates.rate.RATE_192)
+ return 192000;
+ if (modes->sample_rates.rate.RATE_176_4)
+ return 176400;
+ if (modes->sample_rates.rate.RATE_96)
+ return 96000;
+ if (modes->sample_rates.rate.RATE_88_2)
+ return 88200;
+ if (modes->sample_rates.rate.RATE_48)
+ return 48000;
+ if (modes->sample_rates.rate.RATE_44_1)
+ return 44100;
+ if (modes->sample_rates.rate.RATE_32)
+ return 32000;
+ }
+ /*original logic when no audio info*/
+ return 441000;
+}
+
+void get_audio_check(struct audio_info *aud_modes,
+ struct audio_check *audio_chk)
+{
+ unsigned int i;
+ unsigned int max_sample_rate = 0;
+
+ if (aud_modes) {
+ audio_chk->audio_packet_type = 0x2;/*audio sample packet AP = .25 for layout0, 1 for layout1*/
+
+ audio_chk->max_audiosample_rate = 0;
+ for (i = 0; i < aud_modes->mode_count; i++) {
+ max_sample_rate = get_max_audio_sample_rate(&aud_modes->modes[i]);
+ if (audio_chk->max_audiosample_rate < max_sample_rate)
+ audio_chk->max_audiosample_rate = max_sample_rate;
+ /*dts takes the same as type 2: AP = 0.25*/
+ }
+ /*check which one take more bandwidth*/
+ if (audio_chk->max_audiosample_rate > 192000)
+ audio_chk->audio_packet_type = 0x9;/*AP =1*/
+ audio_chk->acat = 0;/*not support*/
+ }
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_sink.c b/drivers/gpu/drm/amd/display/dc/core/dc_sink.c
index 5cbfdf1c4b11..a249a0e5edd0 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_sink.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_sink.c
@@ -33,7 +33,7 @@
* Private functions
******************************************************************************/
-static void destruct(struct dc_sink *sink)
+static void dc_sink_destruct(struct dc_sink *sink)
{
if (sink->dc_container_id) {
kfree(sink->dc_container_id);
@@ -41,7 +41,7 @@ static void destruct(struct dc_sink *sink)
}
}
-static bool construct(struct dc_sink *sink, const struct dc_sink_init_data *init_params)
+static bool dc_sink_construct(struct dc_sink *sink, const struct dc_sink_init_data *init_params)
{
struct dc_link *link = init_params->link;
@@ -75,7 +75,7 @@ void dc_sink_retain(struct dc_sink *sink)
static void dc_sink_free(struct kref *kref)
{
struct dc_sink *sink = container_of(kref, struct dc_sink, refcount);
- destruct(sink);
+ dc_sink_destruct(sink);
kfree(sink);
}
@@ -91,7 +91,7 @@ struct dc_sink *dc_sink_create(const struct dc_sink_init_data *init_params)
if (NULL == sink)
goto alloc_fail;
- if (false == construct(sink, init_params))
+ if (false == dc_sink_construct(sink, init_params))
goto construct_fail;
kref_init(&sink->refcount);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 352862370390..6ddbb00ed37a 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -27,14 +27,12 @@
#include <linux/slab.h>
#include "dm_services.h"
+#include "basics/dc_common.h"
#include "dc.h"
#include "core_types.h"
#include "resource.h"
#include "ipp.h"
#include "timing_generator.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
-#include "dcn10/dcn10_hw_sequencer.h"
-#endif
#define DC_LOGGER dc->ctx->logger
@@ -58,7 +56,7 @@ void update_stream_signal(struct dc_stream_state *stream, struct dc_sink *sink)
}
}
-static void construct(struct dc_stream_state *stream,
+static void dc_stream_construct(struct dc_stream_state *stream,
struct dc_sink *dc_sink_data)
{
uint32_t i = 0;
@@ -108,7 +106,6 @@ static void construct(struct dc_stream_state *stream,
/* EDID CAP translation for HDMI 2.0 */
stream->timing.flags.LTE_340MCSC_SCRAMBLE = dc_sink_data->edid_caps.lte_340mcsc_scramble;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
memset(&stream->timing.dsc_cfg, 0, sizeof(stream->timing.dsc_cfg));
stream->timing.dsc_cfg.num_slices_h = 0;
stream->timing.dsc_cfg.num_slices_v = 0;
@@ -117,7 +114,6 @@ static void construct(struct dc_stream_state *stream,
stream->timing.dsc_cfg.linebuf_depth = 9;
stream->timing.dsc_cfg.version_minor = 2;
stream->timing.dsc_cfg.ycbcr422_simple = 0;
-#endif
update_stream_signal(stream, dc_sink_data);
@@ -129,7 +125,7 @@ static void construct(struct dc_stream_state *stream,
stream->ctx->dc_stream_id_count++;
}
-static void destruct(struct dc_stream_state *stream)
+static void dc_stream_destruct(struct dc_stream_state *stream)
{
dc_sink_release(stream->sink);
if (stream->out_transfer_func != NULL) {
@@ -147,7 +143,7 @@ static void dc_stream_free(struct kref *kref)
{
struct dc_stream_state *stream = container_of(kref, struct dc_stream_state, refcount);
- destruct(stream);
+ dc_stream_destruct(stream);
kfree(stream);
}
@@ -170,7 +166,7 @@ struct dc_stream_state *dc_create_stream_for_sink(
if (stream == NULL)
return NULL;
- construct(stream, sink);
+ dc_stream_construct(stream, sink);
kref_init(&stream->refcount);
@@ -237,7 +233,7 @@ struct dc_stream_status *dc_stream_get_status(
static void delay_cursor_until_vupdate(struct pipe_ctx *pipe_ctx, struct dc *dc)
{
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
unsigned int vupdate_line;
unsigned int lines_to_vupdate, us_to_vupdate, vpos, nvpos;
struct dc_stream_state *stream = pipe_ctx->stream;
@@ -246,7 +242,7 @@ static void delay_cursor_until_vupdate(struct pipe_ctx *pipe_ctx, struct dc *dc)
if (stream->ctx->asic_id.chip_family == FAMILY_RV &&
ASICREV_IS_RAVEN(stream->ctx->asic_id.hw_internal_rev)) {
- vupdate_line = get_vupdate_offset_from_vsync(pipe_ctx);
+ vupdate_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
if (!dc_stream_get_crtc_position(dc, &stream, 1, &vpos, &nvpos))
return;
@@ -272,7 +268,7 @@ bool dc_stream_set_cursor_attributes(
const struct dc_cursor_attributes *attributes)
{
int i;
- struct dc *core_dc;
+ struct dc *dc;
struct resource_context *res_ctx;
struct pipe_ctx *pipe_to_program = NULL;
@@ -290,8 +286,8 @@ bool dc_stream_set_cursor_attributes(
return false;
}
- core_dc = stream->ctx->dc;
- res_ctx = &core_dc->current_state->res_ctx;
+ dc = stream->ctx->dc;
+ res_ctx = &dc->current_state->res_ctx;
stream->cursor_attributes = *attributes;
for (i = 0; i < MAX_PIPES; i++) {
@@ -303,17 +299,17 @@ bool dc_stream_set_cursor_attributes(
if (!pipe_to_program) {
pipe_to_program = pipe_ctx;
- delay_cursor_until_vupdate(pipe_ctx, core_dc);
- core_dc->hwss.pipe_control_lock(core_dc, pipe_to_program, true);
+ delay_cursor_until_vupdate(pipe_ctx, dc);
+ dc->hwss.pipe_control_lock(dc, pipe_to_program, true);
}
- core_dc->hwss.set_cursor_attribute(pipe_ctx);
- if (core_dc->hwss.set_cursor_sdr_white_level)
- core_dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
+ dc->hwss.set_cursor_attribute(pipe_ctx);
+ if (dc->hwss.set_cursor_sdr_white_level)
+ dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
}
if (pipe_to_program)
- core_dc->hwss.pipe_control_lock(core_dc, pipe_to_program, false);
+ dc->hwss.pipe_control_lock(dc, pipe_to_program, false);
return true;
}
@@ -323,7 +319,7 @@ bool dc_stream_set_cursor_position(
const struct dc_cursor_position *position)
{
int i;
- struct dc *core_dc;
+ struct dc *dc;
struct resource_context *res_ctx;
struct pipe_ctx *pipe_to_program = NULL;
@@ -337,8 +333,8 @@ bool dc_stream_set_cursor_position(
return false;
}
- core_dc = stream->ctx->dc;
- res_ctx = &core_dc->current_state->res_ctx;
+ dc = stream->ctx->dc;
+ res_ctx = &dc->current_state->res_ctx;
stream->cursor_position = *position;
for (i = 0; i < MAX_PIPES; i++) {
@@ -354,20 +350,19 @@ bool dc_stream_set_cursor_position(
if (!pipe_to_program) {
pipe_to_program = pipe_ctx;
- delay_cursor_until_vupdate(pipe_ctx, core_dc);
- core_dc->hwss.pipe_control_lock(core_dc, pipe_to_program, true);
+ delay_cursor_until_vupdate(pipe_ctx, dc);
+ dc->hwss.pipe_control_lock(dc, pipe_to_program, true);
}
- core_dc->hwss.set_cursor_position(pipe_ctx);
+ dc->hwss.set_cursor_position(pipe_ctx);
}
if (pipe_to_program)
- core_dc->hwss.pipe_control_lock(core_dc, pipe_to_program, false);
+ dc->hwss.pipe_control_lock(dc, pipe_to_program, false);
return true;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
bool dc_stream_add_writeback(struct dc *dc,
struct dc_stream_state *stream,
struct dc_writeback_info *wb_info)
@@ -411,25 +406,30 @@ bool dc_stream_add_writeback(struct dc *dc,
stream->writeback_info[stream->num_wb_info++] = *wb_info;
}
- if (!dc->hwss.update_bandwidth(dc, dc->current_state)) {
- dm_error("DC: update_bandwidth failed!\n");
- return false;
- }
-
- /* enable writeback */
if (dc->hwss.enable_writeback) {
struct dc_stream_status *stream_status = dc_stream_get_status(stream);
struct dwbc *dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
+ dwb->otg_inst = stream_status->primary_otg_inst;
+ }
+ if (IS_DIAG_DC(dc->ctx->dce_environment)) {
+ if (!dc->hwss.update_bandwidth(dc, dc->current_state)) {
+ dm_error("DC: update_bandwidth failed!\n");
+ return false;
+ }
- if (dwb->funcs->is_enabled(dwb)) {
- /* writeback pipe already enabled, only need to update */
- dc->hwss.update_writeback(dc, stream_status, wb_info);
- } else {
- /* Enable writeback pipe from scratch*/
- dc->hwss.enable_writeback(dc, stream_status, wb_info);
+ /* enable writeback */
+ if (dc->hwss.enable_writeback) {
+ struct dwbc *dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
+
+ if (dwb->funcs->is_enabled(dwb)) {
+ /* writeback pipe already enabled, only need to update */
+ dc->hwss.update_writeback(dc, wb_info, dc->current_state);
+ } else {
+ /* Enable writeback pipe from scratch*/
+ dc->hwss.enable_writeback(dc, wb_info, dc->current_state);
+ }
}
}
-
return true;
}
@@ -468,26 +468,35 @@ bool dc_stream_remove_writeback(struct dc *dc,
}
stream->num_wb_info = j;
- /* recalculate and apply DML parameters */
- if (!dc->hwss.update_bandwidth(dc, dc->current_state)) {
- dm_error("DC: update_bandwidth failed!\n");
- return false;
- }
-
- /* disable writeback */
- if (dc->hwss.disable_writeback)
- dc->hwss.disable_writeback(dc, dwb_pipe_inst);
+ if (IS_DIAG_DC(dc->ctx->dce_environment)) {
+ /* recalculate and apply DML parameters */
+ if (!dc->hwss.update_bandwidth(dc, dc->current_state)) {
+ dm_error("DC: update_bandwidth failed!\n");
+ return false;
+ }
+ /* disable writeback */
+ if (dc->hwss.disable_writeback)
+ dc->hwss.disable_writeback(dc, dwb_pipe_inst);
+ }
return true;
}
-#endif
+bool dc_stream_warmup_writeback(struct dc *dc,
+ int num_dwb,
+ struct dc_writeback_info *wb_info)
+{
+ if (dc->hwss.mmhubbub_warmup)
+ return dc->hwss.mmhubbub_warmup(dc, num_dwb, wb_info);
+ else
+ return false;
+}
uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream)
{
uint8_t i;
- struct dc *core_dc = stream->ctx->dc;
+ struct dc *dc = stream->ctx->dc;
struct resource_context *res_ctx =
- &core_dc->current_state->res_ctx;
+ &dc->current_state->res_ctx;
for (i = 0; i < MAX_PIPES; i++) {
struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg;
@@ -544,9 +553,9 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream,
{
uint8_t i;
bool ret = false;
- struct dc *core_dc = stream->ctx->dc;
+ struct dc *dc = stream->ctx->dc;
struct resource_context *res_ctx =
- &core_dc->current_state->res_ctx;
+ &dc->current_state->res_ctx;
for (i = 0; i < MAX_PIPES; i++) {
struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg;
@@ -566,10 +575,9 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream,
return ret;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
+
bool dc_stream_dmdata_status_done(struct dc *dc, struct dc_stream_state *stream)
{
- bool status = true;
struct pipe_ctx *pipe = NULL;
int i;
@@ -585,8 +593,7 @@ bool dc_stream_dmdata_status_done(struct dc *dc, struct dc_stream_state *stream)
if (i == MAX_PIPES)
return true;
- status = dc->hwss.dmdata_status_done(pipe);
- return status;
+ return dc->hwss.dmdata_status_done(pipe);
}
bool dc_stream_set_dynamic_metadata(struct dc *dc,
@@ -597,6 +604,14 @@ bool dc_stream_set_dynamic_metadata(struct dc *dc,
struct hubp *hubp;
int i;
+ /* Dynamic metadata is only supported on HDMI or DP */
+ if (!dc_is_hdmi_signal(stream->signal) && !dc_is_dp_signal(stream->signal))
+ return false;
+
+ /* Check hardware support */
+ if (!dc->hwss.program_dmdata_engine)
+ return false;
+
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream == stream)
@@ -612,23 +627,7 @@ bool dc_stream_set_dynamic_metadata(struct dc *dc,
pipe_ctx->stream->dmdata_address = attr->address;
- if (pipe_ctx->stream_res.stream_enc &&
- pipe_ctx->stream_res.stream_enc->funcs->set_dynamic_metadata != NULL) {
- if (pipe_ctx->stream->dmdata_address.quad_part != 0) {
- /* if using dynamic meta, don't set up generic infopackets */
- pipe_ctx->stream_res.encoder_info_frame.hdrsmd.valid = false;
- pipe_ctx->stream_res.stream_enc->funcs->set_dynamic_metadata(
- pipe_ctx->stream_res.stream_enc,
- true, pipe_ctx->plane_res.hubp->inst,
- dc_is_dp_signal(pipe_ctx->stream->signal) ?
- dmdata_dp : dmdata_hdmi);
- } else
- pipe_ctx->stream_res.stream_enc->funcs->set_dynamic_metadata(
- pipe_ctx->stream_res.stream_enc,
- false, pipe_ctx->plane_res.hubp->inst,
- dc_is_dp_signal(pipe_ctx->stream->signal) ?
- dmdata_dp : dmdata_hdmi);
- }
+ dc->hwss.program_dmdata_engine(pipe_ctx);
if (hubp->funcs->dmdata_set_attributes != NULL &&
pipe_ctx->stream->dmdata_address.quad_part != 0) {
@@ -637,7 +636,6 @@ bool dc_stream_set_dynamic_metadata(struct dc *dc,
return true;
}
-#endif
void dc_stream_log(const struct dc *dc, const struct dc_stream_state *stream)
{
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
index f40e4fd52fa2..ea1229a3e2b2 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
@@ -37,7 +37,7 @@
/*******************************************************************************
* Private functions
******************************************************************************/
-static void construct(struct dc_context *ctx, struct dc_plane_state *plane_state)
+static void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *plane_state)
{
plane_state->ctx = ctx;
@@ -50,7 +50,6 @@ static void construct(struct dc_context *ctx, struct dc_plane_state *plane_state
plane_state->in_transfer_func->type = TF_TYPE_BYPASS;
plane_state->in_transfer_func->ctx = ctx;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
plane_state->in_shaper_func = dc_create_transfer_func();
if (plane_state->in_shaper_func != NULL) {
plane_state->in_shaper_func->type = TF_TYPE_BYPASS;
@@ -60,7 +59,6 @@ static void construct(struct dc_context *ctx, struct dc_plane_state *plane_state
plane_state->lut3d_func = dc_create_3dlut_func();
if (plane_state->lut3d_func != NULL) {
plane_state->lut3d_func->ctx = ctx;
- plane_state->lut3d_func->initialized = false;
}
plane_state->blend_tf = dc_create_transfer_func();
if (plane_state->blend_tf != NULL) {
@@ -68,10 +66,9 @@ static void construct(struct dc_context *ctx, struct dc_plane_state *plane_state
plane_state->blend_tf->ctx = ctx;
}
-#endif
}
-static void destruct(struct dc_plane_state *plane_state)
+static void dc_plane_destruct(struct dc_plane_state *plane_state)
{
if (plane_state->gamma_correction != NULL) {
dc_gamma_release(&plane_state->gamma_correction);
@@ -81,7 +78,6 @@ static void destruct(struct dc_plane_state *plane_state)
plane_state->in_transfer_func);
plane_state->in_transfer_func = NULL;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
if (plane_state->in_shaper_func != NULL) {
dc_transfer_func_release(
plane_state->in_shaper_func);
@@ -98,7 +94,6 @@ static void destruct(struct dc_plane_state *plane_state)
plane_state->blend_tf = NULL;
}
-#endif
}
/*******************************************************************************
@@ -113,16 +108,14 @@ void enable_surface_flip_reporting(struct dc_plane_state *plane_state,
struct dc_plane_state *dc_create_plane_state(struct dc *dc)
{
- struct dc *core_dc = dc;
-
struct dc_plane_state *plane_state = kvzalloc(sizeof(*plane_state),
- GFP_KERNEL);
+ GFP_KERNEL);
if (NULL == plane_state)
return NULL;
kref_init(&plane_state->refcount);
- construct(core_dc->ctx, plane_state);
+ dc_plane_construct(dc->ctx, plane_state);
return plane_state;
}
@@ -142,7 +135,7 @@ const struct dc_plane_status *dc_plane_get_status(
const struct dc_plane_state *plane_state)
{
const struct dc_plane_status *plane_status;
- struct dc *core_dc;
+ struct dc *dc;
int i;
if (!plane_state ||
@@ -153,15 +146,15 @@ const struct dc_plane_status *dc_plane_get_status(
}
plane_status = &plane_state->status;
- core_dc = plane_state->ctx->dc;
+ dc = plane_state->ctx->dc;
- if (core_dc->current_state == NULL)
+ if (dc->current_state == NULL)
return NULL;
/* Find the current plane state and set its pending bit to false */
- for (i = 0; i < core_dc->res_pool->pipe_count; i++) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx =
- &core_dc->current_state->res_ctx.pipe_ctx[i];
+ &dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->plane_state != plane_state)
continue;
@@ -171,14 +164,14 @@ const struct dc_plane_status *dc_plane_get_status(
break;
}
- for (i = 0; i < core_dc->res_pool->pipe_count; i++) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx =
- &core_dc->current_state->res_ctx.pipe_ctx[i];
+ &dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->plane_state != plane_state)
continue;
- core_dc->hwss.update_pending_status(pipe_ctx);
+ dc->hwss.update_pending_status(pipe_ctx);
}
return plane_status;
@@ -192,7 +185,7 @@ void dc_plane_state_retain(struct dc_plane_state *plane_state)
static void dc_plane_state_free(struct kref *kref)
{
struct dc_plane_state *plane_state = container_of(kref, struct dc_plane_state, refcount);
- destruct(plane_state);
+ dc_plane_destruct(plane_state);
kvfree(plane_state);
}
@@ -263,7 +256,6 @@ alloc_fail:
return NULL;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
static void dc_3dlut_func_free(struct kref *kref)
{
struct dc_3dlut *lut = container_of(kref, struct dc_3dlut, refcount);
@@ -279,7 +271,7 @@ struct dc_3dlut *dc_create_3dlut_func(void)
goto alloc_fail;
kref_init(&lut->refcount);
- lut->initialized = false;
+ lut->state.raw = 0;
return lut;
@@ -297,6 +289,5 @@ void dc_3dlut_func_retain(struct dc_3dlut *lut)
{
kref_get(&lut->refcount);
}
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index e513028faefa..8ff25b5dd2f6 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -39,7 +39,7 @@
#include "inc/hw/dmcu.h"
#include "dml/display_mode_lib.h"
-#define DC_VER "3.2.35"
+#define DC_VER "3.2.69"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -54,6 +54,10 @@ struct dc_versions {
struct dmcu_version dmcu_version;
};
+enum dp_protocol_version {
+ DP_VERSION_1_4,
+};
+
enum dc_plane_type {
DC_PLANE_TYPE_INVALID,
DC_PLANE_TYPE_DCE_RGB,
@@ -111,18 +115,18 @@ struct dc_caps {
bool force_dp_tps4_for_cp2520;
bool disable_dp_clk_share;
bool psp_setup_panel_mode;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
+ bool extended_aux_timeout_support;
+ bool dmcub_support;
bool hw_3d_lut;
-#endif
+ enum dp_protocol_version max_dp_protocol_version;
struct dc_plane_cap planes[MAX_PLANES];
};
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
struct dc_bug_wa {
bool no_connect_phy_config;
bool dedcn20_305_wa;
+ bool skip_clock_update;
};
-#endif
struct dc_dcc_surface_param {
struct dc_size surface_size;
@@ -153,11 +157,14 @@ struct dc_surface_dcc_cap {
bool const_color_support;
};
-struct dc_static_screen_events {
- bool force_trigger;
- bool cursor_update;
- bool surface_update;
- bool overlay_update;
+struct dc_static_screen_params {
+ struct {
+ bool force_trigger;
+ bool cursor_update;
+ bool surface_update;
+ bool overlay_update;
+ } triggers;
+ unsigned int num_frames;
};
@@ -218,14 +225,17 @@ struct dc_config {
bool allow_seamless_boot_optimization;
bool power_down_display_on_boot;
bool edp_not_connected;
+ bool force_enum_edp;
bool forced_clocks;
-
+ bool disable_extended_timeout_support; // Used to disable extended timeout and lttpr feature as well
+ bool multi_mon_pp_mclk_switch;
};
enum visual_confirm {
VISUAL_CONFIRM_DISABLE = 0,
VISUAL_CONFIRM_SURFACE = 1,
VISUAL_CONFIRM_HDR = 2,
+ VISUAL_CONFIRM_MPCTREE = 4,
};
enum dcc_option {
@@ -244,6 +254,19 @@ enum wm_report_mode {
WM_REPORT_DEFAULT = 0,
WM_REPORT_OVERRIDE = 1,
};
+enum dtm_pstate{
+ dtm_level_p0 = 0,/*highest voltage*/
+ dtm_level_p1,
+ dtm_level_p2,
+ dtm_level_p3,
+ dtm_level_p4,/*when active_display_count = 0*/
+};
+
+enum dcn_pwr_state {
+ DCN_PWR_STATE_UNKNOWN = -1,
+ DCN_PWR_STATE_MISSION_MODE = 0,
+ DCN_PWR_STATE_LOW_POWER = 3,
+};
/*
* For any clocks that may differ per pipe
@@ -251,7 +274,6 @@ enum wm_report_mode {
*/
struct dc_clocks {
int dispclk_khz;
- int max_supported_dppclk_khz;
int dppclk_khz;
int dcfclk_khz;
int socclk_khz;
@@ -260,6 +282,17 @@ struct dc_clocks {
int phyclk_khz;
int dramclk_khz;
bool p_state_change_support;
+ enum dcn_pwr_state pwr_state;
+ /*
+ * Elements below are not compared for the purposes of
+ * optimization required
+ */
+ bool prev_p_state_change_support;
+ enum dtm_pstate dtm_level;
+ int max_supported_dppclk_khz;
+ int max_supported_dispclk_khz;
+ int bw_dppclk_khz; /*a copy of dppclk_khz*/
+ int bw_dispclk_khz;
};
struct dc_bw_validation_profile {
@@ -335,12 +368,14 @@ struct dc_debug_options {
bool disable_dfs_bypass;
bool disable_dpp_power_gate;
bool disable_hubp_power_gate;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
bool disable_dsc_power_gate;
-#endif
+ int dsc_min_slice_height_override;
+ int dsc_bpp_increment_div;
+ bool native422_support;
bool disable_pplib_wm_range;
enum wm_report_mode pplib_wm_report_mode;
unsigned int min_disp_clk_khz;
+ unsigned int min_dpp_clk_khz;
int sr_exit_time_dpm0_ns;
int sr_enter_plus_exit_time_dpm0_ns;
int sr_exit_time_ns;
@@ -367,17 +402,30 @@ struct dc_debug_options {
bool scl_reset_length10;
bool hdmi20_disable;
bool skip_detection_link_training;
+ bool remove_disconnect_edp;
unsigned int force_odm_combine; //bit vector based on otg inst
unsigned int force_fclk_khz;
bool disable_tri_buf;
+ bool dmub_offload_enabled;
+ bool dmcub_emulation;
+ bool dmub_command_table; /* for testing only */
struct dc_bw_validation_profile bw_val_profile;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
bool disable_fec;
-#endif
+ bool disable_48mhz_pwrdwn;
/* This forces a hard min on the DCFCLK requested to SMU/PP
* watermarks are not affected.
*/
unsigned int force_min_dcfclk_mhz;
+ bool disable_timing_sync;
+ bool cm_in_bypass;
+ int force_clock_mode;/*every mode change.*/
+
+ bool nv12_iflip_vm_wa;
+ bool disable_dram_clock_change_vactive_support;
+ bool validate_dml_output;
+ bool enable_dmcub_surface_flip;
+ bool usbc_combo_phy_reset_wa;
+ bool disable_dsc;
};
struct dc_debug_data {
@@ -386,7 +434,6 @@ struct dc_debug_data {
uint32_t auxErrorCount;
};
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
struct dc_phy_addr_space_config {
struct {
uint64_t start_addr;
@@ -406,6 +453,7 @@ struct dc_phy_addr_space_config {
} gart_config;
bool valid;
+ uint64_t page_table_default_page_addr;
};
struct dc_virtual_addr_space_config {
@@ -415,7 +463,6 @@ struct dc_virtual_addr_space_config {
uint32_t page_table_block_size_in_bytes;
uint8_t page_table_depth; // 1 = 1 level, 2 = 2 level, etc. 0 = invalid
};
-#endif
struct dc_bounding_box_overrides {
int sr_exit_time_ns;
@@ -441,13 +488,9 @@ struct dc {
struct dc_config config;
struct dc_debug_options debug;
struct dc_bounding_box_overrides bb_overrides;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
struct dc_bug_wa work_arounds;
-#endif
struct dc_context *ctx;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
struct dc_phy_addr_space_config vm_pa_config;
-#endif
uint8_t link_count;
struct dc_link *links[MAX_PIPES * 2];
@@ -463,7 +506,7 @@ struct dc {
/* Inputs into BW and WM calculations. */
struct bw_calcs_dceip *bw_dceip;
struct bw_calcs_vbios *bw_vbios;
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+#ifdef CONFIG_DRM_AMD_DC_DCN
struct dcn_soc_bounding_box *dcn_soc;
struct dcn_ip_params *dcn_ip;
struct display_mode_lib dml;
@@ -477,7 +520,7 @@ struct dc {
bool optimized_required;
/* Require to maintain clocks and bandwidth for UEFI enabled HW */
- bool optimize_seamless_boot;
+ int optimize_seamless_boot_streams;
/* FBC compressor */
struct compressor *fbc_compressor;
@@ -485,10 +528,8 @@ struct dc {
struct dc_debug_data debug_data;
const char *build_id;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
struct vm_helper *vm_helper;
const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box;
-#endif
};
enum frame_buffer_mode {
@@ -520,30 +561,36 @@ struct dc_init_data {
struct dc_bios *vbios_override;
enum dce_environment dce_environment;
+ struct dmub_offload_funcs *dmub_if;
+ struct dc_reg_helper_state *dmub_offload;
+
struct dc_config flags;
uint32_t log_mask;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
/**
* gpu_info FW provided soc bounding box struct or 0 if not
* available in FW
*/
const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box;
-#endif
};
struct dc_callback_init {
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ struct cp_psp cp_psp;
+#else
uint8_t reserved;
+#endif
};
struct dc *dc_create(const struct dc_init_data *init_params);
+void dc_hardware_init(struct dc *dc);
+
int dc_get_vmid_use_vector(struct dc *dc);
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
void dc_setup_vm_context(struct dc *dc, struct dc_virtual_addr_space_config *va_config, int vmid);
/* Returns the number of vmids supported */
int dc_setup_system_context(struct dc *dc, struct dc_phy_addr_space_config *pa_config);
-#endif
void dc_init_callbacks(struct dc *dc,
const struct dc_callback_init *init_params);
+void dc_deinit_callbacks(struct dc *dc);
void dc_destroy(struct dc **dc);
/*******************************************************************************
@@ -597,9 +644,12 @@ enum dc_transfer_func_predefined {
TRANSFER_FUNCTION_UNITY,
TRANSFER_FUNCTION_HLG,
TRANSFER_FUNCTION_HLG12,
- TRANSFER_FUNCTION_GAMMA22
+ TRANSFER_FUNCTION_GAMMA22,
+ TRANSFER_FUNCTION_GAMMA24,
+ TRANSFER_FUNCTION_GAMMA26
};
+
struct dc_transfer_func {
struct kref refcount;
enum dc_transfer_func_type type;
@@ -613,17 +663,29 @@ struct dc_transfer_func {
};
};
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
+
+union dc_3dlut_state {
+ struct {
+ uint32_t initialized:1; /*if 3dlut is went through color module for initialization */
+ uint32_t rmu_idx_valid:1; /*if mux settings are valid*/
+ uint32_t rmu_mux_num:3; /*index of mux to use*/
+ uint32_t mpc_rmu0_mux:4; /*select mpcc on mux, one of the following : mpcc0, mpcc1, mpcc2, mpcc3*/
+ uint32_t mpc_rmu1_mux:4;
+ uint32_t mpc_rmu2_mux:4;
+ uint32_t reserved:15;
+ } bits;
+ uint32_t raw;
+};
struct dc_3dlut {
struct kref refcount;
struct tetrahedral_params lut_3d;
- uint32_t hdr_multiplier;
- bool initialized;
+ struct fixed31_32 hdr_multiplier;
+ bool initialized; /*remove after diag fix*/
+ union dc_3dlut_state state;
struct dc_context *ctx;
};
-#endif
/*
* This structure is filled in by dc_surface_get_status and contains
* the last requested address and the currently active address so the called
@@ -646,7 +708,7 @@ union surface_update_flags {
uint32_t horizontal_mirror_change:1;
uint32_t per_pixel_alpha_change:1;
uint32_t global_alpha_change:1;
- uint32_t sdr_white_level:1;
+ uint32_t hdr_mult:1;
uint32_t rotation_change:1;
uint32_t swizzle_change:1;
uint32_t scaling_change:1;
@@ -674,15 +736,13 @@ union surface_update_flags {
struct dc_plane_state {
struct dc_plane_address address;
struct dc_plane_flip_time time;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
bool triplebuffer_flips;
-#endif
struct scaling_taps scaling_quality;
struct rect src_rect;
struct rect dst_rect;
struct rect clip_rect;
- union plane_size plane_size;
+ struct plane_size plane_size;
union dc_tiling_info tiling_info;
struct dc_plane_dcc_param dcc;
@@ -692,18 +752,16 @@ struct dc_plane_state {
struct dc_bias_and_scale *bias_and_scale;
struct dc_csc_transform input_csc_color_matrix;
struct fixed31_32 coeff_reduction_factor;
- uint32_t sdr_white_level;
+ struct fixed31_32 hdr_mult;
// TODO: No longer used, remove
struct dc_hdr_static_metadata hdr_static_ctx;
enum dc_color_space color_space;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
struct dc_3dlut *lut3d_func;
struct dc_transfer_func *in_shaper_func;
struct dc_transfer_func *blend_tf;
-#endif
enum surface_pixel_format format;
enum dc_rotation_angle rotation;
@@ -716,6 +774,7 @@ struct dc_plane_state {
bool visible;
bool flip_immediate;
bool horizontal_mirror;
+ int layer_index;
union surface_update_flags update_flags;
/* private to DC core */
@@ -731,20 +790,20 @@ struct dc_plane_state {
};
struct dc_plane_info {
- union plane_size plane_size;
+ struct plane_size plane_size;
union dc_tiling_info tiling_info;
struct dc_plane_dcc_param dcc;
enum surface_pixel_format format;
enum dc_rotation_angle rotation;
enum plane_stereo_format stereo_format;
enum dc_color_space color_space;
- unsigned int sdr_white_level;
bool horizontal_mirror;
bool visible;
bool per_pixel_alpha;
bool global_alpha;
int global_alpha_value;
bool input_csc_enabled;
+ int layer_index;
};
struct dc_scaling_info {
@@ -761,7 +820,7 @@ struct dc_surface_update {
const struct dc_flip_addrs *flip_addr;
const struct dc_plane_info *plane_info;
const struct dc_scaling_info *scaling_info;
-
+ struct fixed31_32 hdr_mult;
/* following updates require alloc/sleep/spin that is not isr safe,
* null means no updates
*/
@@ -770,11 +829,9 @@ struct dc_surface_update {
const struct dc_csc_transform *input_csc_color_matrix;
const struct fixed31_32 *coeff_reduction_factor;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
const struct dc_transfer_func *func_shaper;
const struct dc_3dlut *lut3d_func;
const struct dc_transfer_func *blend_tf;
-#endif
};
/*
@@ -795,11 +852,9 @@ void dc_transfer_func_retain(struct dc_transfer_func *dc_tf);
void dc_transfer_func_release(struct dc_transfer_func *dc_tf);
struct dc_transfer_func *dc_create_transfer_func(void);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
struct dc_3dlut *dc_create_3dlut_func(void);
void dc_3dlut_func_release(struct dc_3dlut *lut);
void dc_3dlut_func_retain(struct dc_3dlut *lut);
-#endif
/*
* This structure holds a surface address. There could be multiple addresses
* in cases such as Stereo 3D, Planar YUV, etc. Other per-flip attributes such
@@ -834,6 +889,9 @@ enum dc_status dc_validate_plane(struct dc *dc, const struct dc_plane_state *pla
void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info);
+bool dc_set_generic_gpio_for_stereo(bool enable,
+ struct gpio_service *gpio_service);
+
/*
* fast_validate: we return after determining if we can support the new state,
* but before we populate the programming info
@@ -858,6 +916,8 @@ void dc_resource_state_copy_construct_current(
void dc_resource_state_destruct(struct dc_state *context);
+bool dc_resource_is_dsc_encoding_supported(const struct dc *dc);
+
/*
* TODO update to make it about validation sets
* Set up streams and links associated to drive sinks
@@ -913,10 +973,10 @@ struct dpcd_caps {
bool panel_mode_edp;
bool dpcd_display_control_capable;
bool ext_receiver_cap_field_present;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
union dpcd_fec_capability fec_cap;
struct dpcd_dsc_capabilities dsc_caps;
-#endif
+ struct dc_lttpr_caps lttpr_caps;
+
};
#include "dc_link.h"
@@ -937,14 +997,12 @@ struct dc_container_id {
};
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
struct dc_sink_dsc_caps {
// 'true' if these are virtual DPCD's DSC caps (immediately upstream of sink in MST topology),
// 'false' if they are sink's DSC caps
bool is_virtual_dpcd_dsc;
struct dsc_dec_dpcd_caps dsc_dec_caps;
};
-#endif
/*
* The sink structure contains EDID and other display device properties
@@ -959,9 +1017,7 @@ struct dc_sink {
struct stereo_3d_features features_3d[TIMING_3D_FORMAT_MAX];
bool converter_disable_audio;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
struct dc_sink_dsc_caps sink_dsc_caps;
-#endif
/* private to DC core */
struct dc_link *link;
@@ -1019,11 +1075,12 @@ unsigned int dc_get_current_backlight_pwm(struct dc *dc);
unsigned int dc_get_target_backlight_pwm(struct dc *dc);
bool dc_is_dmcu_initialized(struct dc *dc);
+bool dc_is_hw_initialized(struct dc *dc);
-#if defined(CONFIG_DRM_AMD_DC_DSC_SUPPORT)
+enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping);
+void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg);
/*******************************************************************************
* DSC Interfaces
******************************************************************************/
#include "dc_dsc.h"
-#endif
#endif /* DC_INTERFACE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
index 78c3b300ec45..b1dd0d60d98e 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
@@ -61,9 +61,6 @@ struct dc_vbios_funcs {
struct graphics_object_id connector_object_id,
uint32_t device_tag_index,
struct connector_device_tag_info *info);
- enum bp_result (*get_firmware_info)(
- struct dc_bios *bios,
- struct dc_firmware_info *info);
enum bp_result (*get_spread_spectrum_info)(
struct dc_bios *bios,
enum as_signal_type signal,
@@ -152,6 +149,8 @@ struct dc_bios {
struct dc_context *ctx;
const struct bios_registers *regs;
struct integrated_info *integrated_info;
+ struct dc_firmware_info fw_info;
+ bool fw_info_valid;
};
#endif /* DC_BIOS_TYPES_H */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h b/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
index 4ef97f65e55d..4f8f576d5fcf 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
@@ -49,7 +49,8 @@ enum aux_channel_operation_result {
AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN,
AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY,
AUX_CHANNEL_OPERATION_FAILED_TIMEOUT,
- AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON
+ AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON,
+ AUX_CHANNEL_OPERATION_FAILED_ENGINE_ACQUIRE
};
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
new file mode 100644
index 000000000000..59c298a6484f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -0,0 +1,134 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dc.h"
+#include "dc_dmub_srv.h"
+#include "../dmub/inc/dmub_srv.h"
+
+static void dc_dmub_srv_construct(struct dc_dmub_srv *dc_srv, struct dc *dc,
+ struct dmub_srv *dmub)
+{
+ dc_srv->dmub = dmub;
+ dc_srv->ctx = dc->ctx;
+}
+
+struct dc_dmub_srv *dc_dmub_srv_create(struct dc *dc, struct dmub_srv *dmub)
+{
+ struct dc_dmub_srv *dc_srv =
+ kzalloc(sizeof(struct dc_dmub_srv), GFP_KERNEL);
+
+ if (dc_srv == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dc_dmub_srv_construct(dc_srv, dc, dmub);
+
+ return dc_srv;
+}
+
+void dc_dmub_srv_destroy(struct dc_dmub_srv **dmub_srv)
+{
+ if (*dmub_srv) {
+ kfree(*dmub_srv);
+ *dmub_srv = NULL;
+ }
+}
+
+void dc_dmub_srv_cmd_queue(struct dc_dmub_srv *dc_dmub_srv,
+ struct dmub_cmd_header *cmd)
+{
+ struct dmub_srv *dmub = dc_dmub_srv->dmub;
+ struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+ enum dmub_status status;
+
+ status = dmub_srv_cmd_queue(dmub, cmd);
+ if (status == DMUB_STATUS_OK)
+ return;
+
+ if (status != DMUB_STATUS_QUEUE_FULL)
+ goto error;
+
+ /* Execute and wait for queue to become empty again. */
+ dc_dmub_srv_cmd_execute(dc_dmub_srv);
+ dc_dmub_srv_wait_idle(dc_dmub_srv);
+
+ /* Requeue the command. */
+ status = dmub_srv_cmd_queue(dmub, cmd);
+ if (status == DMUB_STATUS_OK)
+ return;
+
+error:
+ DC_ERROR("Error queuing DMUB command: status=%d\n", status);
+}
+
+void dc_dmub_srv_cmd_execute(struct dc_dmub_srv *dc_dmub_srv)
+{
+ struct dmub_srv *dmub = dc_dmub_srv->dmub;
+ struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+ enum dmub_status status;
+
+ status = dmub_srv_cmd_execute(dmub);
+ if (status != DMUB_STATUS_OK)
+ DC_ERROR("Error starting DMUB execution: status=%d\n", status);
+}
+
+void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv)
+{
+ struct dmub_srv *dmub = dc_dmub_srv->dmub;
+ struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+ enum dmub_status status;
+
+ status = dmub_srv_wait_for_idle(dmub, 100000);
+ if (status != DMUB_STATUS_OK)
+ DC_ERROR("Error waiting for DMUB idle: status=%d\n", status);
+}
+
+void dc_dmub_srv_wait_phy_init(struct dc_dmub_srv *dc_dmub_srv)
+{
+ struct dmub_srv *dmub = dc_dmub_srv->dmub;
+ struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+ enum dmub_status status;
+
+ for (;;) {
+ /* Wait up to a second for PHY init. */
+ status = dmub_srv_wait_for_phy_init(dmub, 1000000);
+ if (status == DMUB_STATUS_OK)
+ /* Initialization OK */
+ break;
+
+ DC_ERROR("DMCUB PHY init failed: status=%d\n", status);
+ ASSERT(0);
+
+ if (status != DMUB_STATUS_TIMEOUT)
+ /*
+ * Server likely initialized or we don't have
+ * DMCUB HW support - this won't end.
+ */
+ break;
+
+ /* Continue spinning so we don't hang the ASIC. */
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
new file mode 100644
index 000000000000..754b6077539c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DMUB_DC_SRV_H_
+#define _DMUB_DC_SRV_H_
+
+#include "os_types.h"
+#include "../dmub/inc/dmub_cmd.h"
+
+struct dmub_srv;
+struct dmub_cmd_header;
+
+struct dc_reg_helper_state {
+ bool gather_in_progress;
+ uint32_t same_addr_count;
+ bool should_burst_write;
+ union dmub_rb_cmd cmd_data;
+ unsigned int reg_seq_count;
+};
+
+struct dc_dmub_srv {
+ struct dmub_srv *dmub;
+ struct dc_reg_helper_state reg_helper_offload;
+
+ struct dc_context *ctx;
+ void *dm;
+};
+
+void dc_dmub_srv_cmd_queue(struct dc_dmub_srv *dc_dmub_srv,
+ struct dmub_cmd_header *cmd);
+
+void dc_dmub_srv_cmd_execute(struct dc_dmub_srv *dc_dmub_srv);
+
+void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv);
+
+void dc_dmub_srv_wait_phy_init(struct dc_dmub_srv *dc_dmub_srv);
+
+#endif /* _DMUB_DC_SRV_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index dfcec4d3e9c0..dfe4472c9e40 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -90,6 +90,13 @@ enum dc_post_cursor2 {
POST_CURSOR2_MAX_LEVEL = POST_CURSOR2_LEVEL3,
};
+enum dc_dp_training_pattern {
+ DP_TRAINING_PATTERN_SEQUENCE_1 = 0,
+ DP_TRAINING_PATTERN_SEQUENCE_2,
+ DP_TRAINING_PATTERN_SEQUENCE_3,
+ DP_TRAINING_PATTERN_SEQUENCE_4,
+};
+
struct dc_link_settings {
enum dc_lane_count lane_count;
enum dc_link_rate link_rate;
@@ -109,6 +116,21 @@ struct dc_link_training_settings {
struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX];
};
+struct dc_link_training_overrides {
+ enum dc_voltage_swing *voltage_swing;
+ enum dc_pre_emphasis *pre_emphasis;
+ enum dc_post_cursor2 *post_cursor2;
+
+ uint16_t *cr_pattern_time;
+ uint16_t *eq_pattern_time;
+ enum dc_dp_training_pattern *pattern_for_eq;
+
+ enum dc_link_spread *downspread;
+ bool *alternate_scrambler_reset;
+ bool *enhanced_framing;
+ bool *mst_enable;
+ bool *fec_enable;
+};
union dpcd_rev {
struct {
@@ -447,13 +469,13 @@ union training_aux_rd_interval {
/* Automated test structures */
union test_request {
struct {
- uint8_t LINK_TRAINING :1;
- uint8_t LINK_TEST_PATTRN :1;
- uint8_t EDID_READ :1;
- uint8_t PHY_TEST_PATTERN :1;
- uint8_t AUDIO_TEST_PATTERN :1;
- uint8_t RESERVED :1;
- uint8_t TEST_STEREO_3D :1;
+ uint8_t LINK_TRAINING :1;
+ uint8_t LINK_TEST_PATTRN :1;
+ uint8_t EDID_READ :1;
+ uint8_t PHY_TEST_PATTERN :1;
+ uint8_t RESERVED :1;
+ uint8_t AUDIO_TEST_PATTERN :1;
+ uint8_t TEST_AUDIO_DISABLED_VIDEO :1;
} bits;
uint8_t raw;
};
@@ -500,19 +522,52 @@ union link_test_pattern {
union test_misc {
struct dpcd_test_misc_bits {
- unsigned char SYNC_CLOCK :1;
+ unsigned char SYNC_CLOCK :1;
/* dpcd_test_color_format */
- unsigned char CLR_FORMAT :2;
+ unsigned char CLR_FORMAT :2;
/* dpcd_test_dyn_range */
- unsigned char DYN_RANGE :1;
- unsigned char YCBCR :1;
+ unsigned char DYN_RANGE :1;
+ unsigned char YCBCR_COEFS :1;
/* dpcd_test_bit_depth */
- unsigned char BPC :3;
+ unsigned char BPC :3;
+ } bits;
+ unsigned char raw;
+};
+
+union audio_test_mode {
+ struct {
+ unsigned char sampling_rate :4;
+ unsigned char channel_count :4;
+ } bits;
+ unsigned char raw;
+};
+
+union audio_test_pattern_period {
+ struct {
+ unsigned char pattern_period :4;
+ unsigned char reserved :4;
} bits;
unsigned char raw;
};
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
+struct audio_test_pattern_type {
+ unsigned char value;
+};
+
+struct dp_audio_test_data_flags {
+ uint8_t test_requested :1;
+ uint8_t disable_video :1;
+};
+
+struct dp_audio_test_data {
+
+ struct dp_audio_test_data_flags flags;
+ uint8_t sampling_rate;
+ uint8_t channel_count;
+ uint8_t pattern_type;
+ uint8_t pattern_period[8];
+};
+
/* FEC capability DPCD register field bits-*/
union dpcd_fec_capability {
struct {
@@ -637,6 +692,5 @@ struct dpcd_dsc_capabilities {
union dpcd_dsc_ext_capabilities dsc_ext_caps;
};
-#endif /* CONFIG_DRM_AMD_DC_DSC_SUPPORT */
#endif /* DC_DP_TYPES_H */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dsc.h b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
index 6e42209f0e20..3800340a5b4f 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
@@ -1,4 +1,3 @@
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
#ifndef DC_DSC_H_
#define DC_DSC_H_
/*
@@ -30,6 +29,7 @@
#define DP_DSC_BRANCH_OVERALL_THROUGHPUT_0 0x0a0 /* DP 1.4a SCR */
#define DP_DSC_BRANCH_OVERALL_THROUGHPUT_1 0x0a1
#define DP_DSC_BRANCH_MAX_LINE_WIDTH 0x0a2
+#include "dc_types.h"
struct dc_dsc_bw_range {
uint32_t min_kbps; /* Bandwidth if min_target_bpp_x16 is used */
@@ -39,24 +39,45 @@ struct dc_dsc_bw_range {
uint32_t stream_kbps; /* Uncompressed stream bandwidth */
};
+struct display_stream_compressor {
+ const struct dsc_funcs *funcs;
+ struct dc_context *ctx;
+ int inst;
+};
+
+struct dc_dsc_policy {
+ bool use_min_slices_h;
+ int max_slices_h; // Maximum available if 0
+ int min_slice_height; // Must not be less than 8
+ uint32_t max_target_bpp;
+ uint32_t min_target_bpp;
+};
-bool dc_dsc_parse_dsc_dpcd(const uint8_t *dpcd_dsc_basic_data,
+bool dc_dsc_parse_dsc_dpcd(const struct dc *dc,
+ const uint8_t *dpcd_dsc_basic_data,
const uint8_t *dpcd_dsc_ext_data,
struct dsc_dec_dpcd_caps *dsc_sink_caps);
bool dc_dsc_compute_bandwidth_range(
- const struct dc *dc,
- const uint32_t min_kbps,
- const uint32_t max_kbps,
+ const struct display_stream_compressor *dsc,
+ const uint32_t dsc_min_slice_height_override,
+ const uint32_t min_bpp,
+ const uint32_t max_bpp,
const struct dsc_dec_dpcd_caps *dsc_sink_caps,
const struct dc_crtc_timing *timing,
struct dc_dsc_bw_range *range);
bool dc_dsc_compute_config(
- const struct dc *dc,
+ const struct display_stream_compressor *dsc,
const struct dsc_dec_dpcd_caps *dsc_sink_caps,
+ const uint32_t dsc_min_slice_height_override,
uint32_t target_bandwidth_kbps,
const struct dc_crtc_timing *timing,
struct dc_dsc_config *dsc_cfg);
-#endif
+
+void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing,
+ struct dc_dsc_policy *policy);
+
+void dc_dsc_policy_set_max_target_bpp_limit(uint32_t limit);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c
index 30b2f9edd42f..737048d8a96c 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c
@@ -32,6 +32,74 @@
#include "dm_services.h"
#include <stdarg.h>
+#include "dc.h"
+#include "dc_dmub_srv.h"
+
+static inline void submit_dmub_read_modify_write(
+ struct dc_reg_helper_state *offload,
+ const struct dc_context *ctx)
+{
+ struct dmub_rb_cmd_read_modify_write *cmd_buf = &offload->cmd_data.read_modify_write;
+ bool gather = false;
+
+ offload->should_burst_write =
+ (offload->same_addr_count == (DMUB_READ_MODIFY_WRITE_SEQ__MAX - 1));
+ cmd_buf->header.payload_bytes =
+ sizeof(struct dmub_cmd_read_modify_write_sequence) * offload->reg_seq_count;
+
+ gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress;
+ ctx->dmub_srv->reg_helper_offload.gather_in_progress = false;
+
+ dc_dmub_srv_cmd_queue(ctx->dmub_srv, &cmd_buf->header);
+
+ ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather;
+
+ memset(cmd_buf, 0, sizeof(*cmd_buf));
+
+ offload->reg_seq_count = 0;
+ offload->same_addr_count = 0;
+}
+
+static inline void submit_dmub_burst_write(
+ struct dc_reg_helper_state *offload,
+ const struct dc_context *ctx)
+{
+ struct dmub_rb_cmd_burst_write *cmd_buf = &offload->cmd_data.burst_write;
+ bool gather = false;
+
+ cmd_buf->header.payload_bytes =
+ sizeof(uint32_t) * offload->reg_seq_count;
+
+ gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress;
+ ctx->dmub_srv->reg_helper_offload.gather_in_progress = false;
+
+ dc_dmub_srv_cmd_queue(ctx->dmub_srv, &cmd_buf->header);
+
+ ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather;
+
+ memset(cmd_buf, 0, sizeof(*cmd_buf));
+
+ offload->reg_seq_count = 0;
+}
+
+static inline void submit_dmub_reg_wait(
+ struct dc_reg_helper_state *offload,
+ const struct dc_context *ctx)
+{
+ struct dmub_rb_cmd_reg_wait *cmd_buf = &offload->cmd_data.reg_wait;
+ bool gather = false;
+
+ gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress;
+ ctx->dmub_srv->reg_helper_offload.gather_in_progress = false;
+
+ dc_dmub_srv_cmd_queue(ctx->dmub_srv, &cmd_buf->header);
+
+ memset(cmd_buf, 0, sizeof(*cmd_buf));
+ offload->reg_seq_count = 0;
+
+ ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather;
+}
+
struct dc_reg_value_masks {
uint32_t value;
uint32_t mask;
@@ -77,6 +145,100 @@ static void set_reg_field_values(struct dc_reg_value_masks *field_value_mask,
}
}
+static void dmub_flush_buffer_execute(
+ struct dc_reg_helper_state *offload,
+ const struct dc_context *ctx)
+{
+ submit_dmub_read_modify_write(offload, ctx);
+ dc_dmub_srv_cmd_execute(ctx->dmub_srv);
+}
+
+static void dmub_flush_burst_write_buffer_execute(
+ struct dc_reg_helper_state *offload,
+ const struct dc_context *ctx)
+{
+ submit_dmub_burst_write(offload, ctx);
+ dc_dmub_srv_cmd_execute(ctx->dmub_srv);
+}
+
+static bool dmub_reg_value_burst_set_pack(const struct dc_context *ctx, uint32_t addr,
+ uint32_t reg_val)
+{
+ struct dc_reg_helper_state *offload = &ctx->dmub_srv->reg_helper_offload;
+ struct dmub_rb_cmd_burst_write *cmd_buf = &offload->cmd_data.burst_write;
+
+ /* flush command if buffer is full */
+ if (offload->reg_seq_count == DMUB_BURST_WRITE_VALUES__MAX)
+ dmub_flush_burst_write_buffer_execute(offload, ctx);
+
+ if (offload->cmd_data.cmd_common.header.type == DMUB_CMD__REG_SEQ_BURST_WRITE &&
+ addr != cmd_buf->addr) {
+ dmub_flush_burst_write_buffer_execute(offload, ctx);
+ return false;
+ }
+
+ cmd_buf->header.type = DMUB_CMD__REG_SEQ_BURST_WRITE;
+ cmd_buf->header.sub_type = 0;
+ cmd_buf->addr = addr;
+ cmd_buf->write_values[offload->reg_seq_count] = reg_val;
+ offload->reg_seq_count++;
+
+ return true;
+}
+
+static uint32_t dmub_reg_value_pack(const struct dc_context *ctx, uint32_t addr,
+ struct dc_reg_value_masks *field_value_mask)
+{
+ struct dc_reg_helper_state *offload = &ctx->dmub_srv->reg_helper_offload;
+ struct dmub_rb_cmd_read_modify_write *cmd_buf = &offload->cmd_data.read_modify_write;
+ struct dmub_cmd_read_modify_write_sequence *seq;
+
+ /* flush command if buffer is full */
+ if (offload->cmd_data.cmd_common.header.type != DMUB_CMD__REG_SEQ_BURST_WRITE &&
+ offload->reg_seq_count == DMUB_READ_MODIFY_WRITE_SEQ__MAX)
+ dmub_flush_buffer_execute(offload, ctx);
+
+ if (offload->should_burst_write) {
+ if (dmub_reg_value_burst_set_pack(ctx, addr, field_value_mask->value))
+ return field_value_mask->value;
+ else
+ offload->should_burst_write = false;
+ }
+
+ /* pack commands */
+ cmd_buf->header.type = DMUB_CMD__REG_SEQ_READ_MODIFY_WRITE;
+ cmd_buf->header.sub_type = 0;
+ seq = &cmd_buf->seq[offload->reg_seq_count];
+
+ if (offload->reg_seq_count) {
+ if (cmd_buf->seq[offload->reg_seq_count - 1].addr == addr)
+ offload->same_addr_count++;
+ else
+ offload->same_addr_count = 0;
+ }
+
+ seq->addr = addr;
+ seq->modify_mask = field_value_mask->mask;
+ seq->modify_value = field_value_mask->value;
+ offload->reg_seq_count++;
+
+ return field_value_mask->value;
+}
+
+static void dmub_reg_wait_done_pack(const struct dc_context *ctx, uint32_t addr,
+ uint32_t mask, uint32_t shift, uint32_t condition_value, uint32_t time_out_us)
+{
+ struct dc_reg_helper_state *offload = &ctx->dmub_srv->reg_helper_offload;
+ struct dmub_rb_cmd_reg_wait *cmd_buf = &offload->cmd_data.reg_wait;
+
+ cmd_buf->header.type = DMUB_CMD__REG_REG_WAIT;
+ cmd_buf->header.sub_type = 0;
+ cmd_buf->reg_wait.addr = addr;
+ cmd_buf->reg_wait.condition_field_value = mask & (condition_value << shift);
+ cmd_buf->reg_wait.mask = mask;
+ cmd_buf->reg_wait.time_out_us = time_out_us;
+}
+
uint32_t generic_reg_update_ex(const struct dc_context *ctx,
uint32_t addr, int n,
uint8_t shift1, uint32_t mask1, uint32_t field_value1,
@@ -93,6 +255,11 @@ uint32_t generic_reg_update_ex(const struct dc_context *ctx,
va_end(ap);
+ if (ctx->dmub_srv &&
+ ctx->dmub_srv->reg_helper_offload.gather_in_progress)
+ return dmub_reg_value_pack(ctx, addr, &field_value_mask);
+ /* todo: return void so we can decouple code running in driver from register states */
+
/* mmio write directly */
reg_val = dm_read_reg(ctx, addr);
reg_val = (reg_val & ~field_value_mask.mask) | field_value_mask.value;
@@ -118,6 +285,13 @@ uint32_t generic_reg_set_ex(const struct dc_context *ctx,
/* mmio write directly */
reg_val = (reg_val & ~field_value_mask.mask) | field_value_mask.value;
+
+ if (ctx->dmub_srv &&
+ ctx->dmub_srv->reg_helper_offload.gather_in_progress) {
+ return dmub_reg_value_burst_set_pack(ctx, addr, reg_val);
+ /* todo: return void so we can decouple code running in driver from register states */
+ }
+
dm_write_reg(ctx, addr, reg_val);
return reg_val;
}
@@ -134,6 +308,14 @@ uint32_t dm_read_reg_func(
return 0;
}
#endif
+
+ if (ctx->dmub_srv &&
+ ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
+ !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
+ ASSERT(false);
+ return 0;
+ }
+
value = cgs_read_register(ctx->cgs_device, address);
trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
@@ -299,7 +481,19 @@ void generic_reg_wait(const struct dc_context *ctx,
uint32_t reg_val;
int i;
- /* something is terribly wrong if time out is > 200ms. (5Hz) */
+ if (ctx->dmub_srv &&
+ ctx->dmub_srv->reg_helper_offload.gather_in_progress) {
+ dmub_reg_wait_done_pack(ctx, addr, mask, shift, condition_value,
+ delay_between_poll_us * time_out_num_tries);
+ return;
+ }
+
+ /*
+ * Something is terribly wrong if time out is > 3000ms.
+ * 3000ms is the maximum time needed for SMU to pass values back.
+ * This value comes from experiments.
+ *
+ */
ASSERT(delay_between_poll_us * time_out_num_tries <= 3000000);
for (i = 0; i <= time_out_num_tries; i++) {
@@ -346,12 +540,48 @@ uint32_t generic_read_indirect_reg(const struct dc_context *ctx,
{
uint32_t value = 0;
+ // when reg read, there should not be any offload.
+ if (ctx->dmub_srv &&
+ ctx->dmub_srv->reg_helper_offload.gather_in_progress) {
+ ASSERT(false);
+ }
+
dm_write_reg(ctx, addr_index, index);
value = dm_read_reg(ctx, addr_data);
return value;
}
+uint32_t generic_indirect_reg_get(const struct dc_context *ctx,
+ uint32_t addr_index, uint32_t addr_data,
+ uint32_t index, int n,
+ uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
+ ...)
+{
+ uint32_t shift, mask, *field_value;
+ uint32_t value = 0;
+ int i = 1;
+
+ va_list ap;
+
+ va_start(ap, field_value1);
+
+ value = generic_read_indirect_reg(ctx, addr_index, addr_data, index);
+ *field_value1 = get_reg_field_value_ex(value, mask1, shift1);
+
+ while (i < n) {
+ shift = va_arg(ap, uint32_t);
+ mask = va_arg(ap, uint32_t);
+ field_value = va_arg(ap, uint32_t *);
+
+ *field_value = get_reg_field_value_ex(value, mask, shift);
+ i++;
+ }
+
+ va_end(ap);
+
+ return value;
+}
uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx,
uint32_t addr_index, uint32_t addr_data,
@@ -382,3 +612,68 @@ uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx,
return reg_val;
}
+
+void reg_sequence_start_gather(const struct dc_context *ctx)
+{
+ /* if reg sequence is supported and enabled, set flag to
+ * indicate we want to have REG_SET, REG_UPDATE macro build
+ * reg sequence command buffer rather than MMIO directly.
+ */
+
+ if (ctx->dmub_srv && ctx->dc->debug.dmub_offload_enabled) {
+ struct dc_reg_helper_state *offload =
+ &ctx->dmub_srv->reg_helper_offload;
+
+ /* caller sequence mismatch. need to debug caller. offload will not work!!! */
+ ASSERT(!offload->gather_in_progress);
+
+ offload->gather_in_progress = true;
+ }
+}
+
+void reg_sequence_start_execute(const struct dc_context *ctx)
+{
+ struct dc_reg_helper_state *offload;
+
+ if (!ctx->dmub_srv)
+ return;
+
+ offload = &ctx->dmub_srv->reg_helper_offload;
+
+ if (offload && offload->gather_in_progress) {
+ offload->gather_in_progress = false;
+ offload->should_burst_write = false;
+ switch (offload->cmd_data.cmd_common.header.type) {
+ case DMUB_CMD__REG_SEQ_READ_MODIFY_WRITE:
+ submit_dmub_read_modify_write(offload, ctx);
+ break;
+ case DMUB_CMD__REG_REG_WAIT:
+ submit_dmub_reg_wait(offload, ctx);
+ break;
+ case DMUB_CMD__REG_SEQ_BURST_WRITE:
+ submit_dmub_burst_write(offload, ctx);
+ break;
+ default:
+ return;
+ }
+
+ dc_dmub_srv_cmd_execute(ctx->dmub_srv);
+ }
+}
+
+void reg_sequence_wait_done(const struct dc_context *ctx)
+{
+ /* callback to DM to poll for last submission done*/
+ struct dc_reg_helper_state *offload;
+
+ if (!ctx->dmub_srv)
+ return;
+
+ offload = &ctx->dmub_srv->reg_helper_offload;
+
+ if (offload &&
+ ctx->dc->debug.dmub_offload_enabled &&
+ !ctx->dc->debug.dmcub_emulation) {
+ dc_dmub_srv_wait_idle(ctx->dmub_srv);
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index 22db5682aa6c..25c50bcab9e9 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -115,56 +115,25 @@ struct rect {
int height;
};
-union plane_size {
- /* Grph or Video will be selected
- * based on format above:
- * Use Video structure if
- * format >= DalPixelFormat_VideoBegin
- * else use Grph structure
+struct plane_size {
+ /* Graphic surface pitch in pixels.
+ * In LINEAR_GENERAL mode, pitch
+ * is 32 pixel aligned.
*/
- struct {
- struct rect surface_size;
- /* Graphic surface pitch in pixels.
- * In LINEAR_GENERAL mode, pitch
- * is 32 pixel aligned.
- */
- int surface_pitch;
- } grph;
-
- struct {
- struct rect luma_size;
- /* Graphic surface pitch in pixels.
- * In LINEAR_GENERAL mode, pitch is
- * 32 pixel aligned.
- */
- int luma_pitch;
-
- struct rect chroma_size;
- /* Graphic surface pitch in pixels.
- * In LINEAR_GENERAL mode, pitch is
- * 32 pixel aligned.
- */
- int chroma_pitch;
- } video;
+ int surface_pitch;
+ int chroma_pitch;
+ struct rect surface_size;
+ struct rect chroma_size;
};
struct dc_plane_dcc_param {
bool enable;
- union {
- struct {
- int meta_pitch;
- bool independent_64b_blks;
- } grph;
-
- struct {
- int meta_pitch_l;
- bool independent_64b_blks_l;
+ int meta_pitch;
+ bool independent_64b_blks;
- int meta_pitch_c;
- bool independent_64b_blks_c;
- } video;
- };
+ int meta_pitch_c;
+ bool independent_64b_blks_c;
};
/*Displayable pixel format in fb*/
@@ -196,12 +165,10 @@ enum surface_pixel_format {
/*swaped & float*/
SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F,
/*grow graphics here if necessary */
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX,
SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FIX,
SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT,
SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FLOAT,
-#endif
SURFACE_PIXEL_FORMAT_VIDEO_BEGIN,
SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr =
SURFACE_PIXEL_FORMAT_VIDEO_BEGIN,
@@ -209,10 +176,8 @@ enum surface_pixel_format {
SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr,
SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb,
SURFACE_PIXEL_FORMAT_SUBSAMPLE_END,
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
SURFACE_PIXEL_FORMAT_VIDEO_ACrYCb2101010,
SURFACE_PIXEL_FORMAT_VIDEO_CrYCbA1010102,
-#endif
SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888,
SURFACE_PIXEL_FORMAT_INVALID
@@ -251,12 +216,10 @@ enum tile_split_values {
DC_ROTATED_MICRO_TILING = 0x3,
};
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
enum tripleBuffer_enable {
DC_TRIPLEBUFFER_DISABLE = 0x0,
DC_TRIPLEBUFFER_ENABLE = 0x1,
};
-#endif
/* TODO: These values come from hardware spec. We need to readdress this
* if they ever change.
@@ -456,13 +419,11 @@ struct dc_csc_transform {
bool enable_adjustment;
};
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
struct dc_rgb_fixed {
struct fixed31_32 red;
struct fixed31_32 green;
struct fixed31_32 blue;
};
-#endif
struct dc_gamma {
struct kref refcount;
@@ -482,7 +443,6 @@ struct dc_gamma {
* is_logical_identity indicates the given gamma ramp regardless of type is identity.
*/
bool is_identity;
- bool is_logical_identity;
};
/* Used by both ipp amd opp functions*/
@@ -498,10 +458,8 @@ enum dc_cursor_color_format {
CURSOR_MODE_COLOR_1BIT_AND,
CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA,
CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA,
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED,
CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED
-#endif
};
/*
@@ -519,7 +477,8 @@ union dc_cursor_attribute_flags {
uint32_t INVERT_PIXEL_DATA:1;
uint32_t ZERO_EXPANSION:1;
uint32_t MIN_MAX_INVERT:1;
- uint32_t RESERVED:25;
+ uint32_t ENABLE_CURSOR_DEGAMMA:1;
+ uint32_t RESERVED:24;
} bits;
uint32_t value;
};
@@ -607,6 +566,11 @@ enum dc_quantization_range {
QUANTIZATION_RANGE_LIMITED
};
+enum dc_dynamic_expansion {
+ DYN_EXPANSION_AUTO,
+ DYN_EXPANSION_DISABLE
+};
+
/* XFM */
/* used in struct dc_plane_state */
@@ -615,6 +579,7 @@ struct scaling_taps {
uint32_t h_taps;
uint32_t v_taps_c;
uint32_t h_taps_c;
+ bool integer_scaling;
};
enum dc_timing_standard {
@@ -647,10 +612,8 @@ enum dc_color_depth {
COLOR_DEPTH_121212,
COLOR_DEPTH_141414,
COLOR_DEPTH_161616,
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
COLOR_DEPTH_999,
COLOR_DEPTH_111111,
-#endif
COLOR_DEPTH_COUNT
};
@@ -711,9 +674,7 @@ struct dc_crtc_timing_flags {
* rates less than or equal to 340Mcsc */
uint32_t LTE_340MCSC_SCRAMBLE:1;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
uint32_t DSC : 1; /* Use DSC with this timing */
-#endif
};
enum dc_timing_3d_format {
@@ -738,29 +699,6 @@ enum dc_timing_3d_format {
TIMING_3D_FORMAT_MAX,
};
-enum trigger_delay {
- TRIGGER_DELAY_NEXT_PIXEL = 0,
- TRIGGER_DELAY_NEXT_LINE,
-};
-
-enum crtc_event {
- CRTC_EVENT_VSYNC_RISING = 0,
- CRTC_EVENT_VSYNC_FALLING
-};
-
-struct crtc_trigger_info {
- bool enabled;
- struct dc_stream_state *event_source;
- enum crtc_event event;
- enum trigger_delay delay;
-};
-
-struct dc_crtc_timing_adjust {
- uint32_t v_total_min;
- uint32_t v_total_max;
-};
-
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
struct dc_dsc_config {
uint32_t num_slices_h; /* Number of DSC slices - horizontal */
uint32_t num_slices_v; /* Number of DSC slices - vertical */
@@ -771,7 +709,6 @@ struct dc_dsc_config {
bool ycbcr422_simple; /* Tell DSC engine to convert YCbCr 4:2:2 to 'YCbCr 4:2:2 simple'. */
int32_t rc_buffer_size; /* DSC RC buffer block size in bytes */
};
-#endif
struct dc_crtc_timing {
uint32_t h_total;
uint32_t h_border_left;
@@ -798,11 +735,34 @@ struct dc_crtc_timing {
enum scanning_type scan_type;
struct dc_crtc_timing_flags flags;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
struct dc_dsc_config dsc_cfg;
-#endif
};
+enum trigger_delay {
+ TRIGGER_DELAY_NEXT_PIXEL = 0,
+ TRIGGER_DELAY_NEXT_LINE,
+};
+
+enum crtc_event {
+ CRTC_EVENT_VSYNC_RISING = 0,
+ CRTC_EVENT_VSYNC_FALLING
+};
+
+struct crtc_trigger_info {
+ bool enabled;
+ struct dc_stream_state *event_source;
+ enum crtc_event event;
+ enum trigger_delay delay;
+};
+
+struct dc_crtc_timing_adjust {
+ uint32_t v_total_min;
+ uint32_t v_total_max;
+ uint32_t v_total_mid;
+ uint32_t v_total_mid_frame_num;
+};
+
+
/* Passed on init */
enum vram_type {
VIDEO_MEMORY_TYPE_GDDR5 = 2,
@@ -812,7 +772,6 @@ enum vram_type {
VIDEO_MEMORY_TYPE_GDDR6 = 6,
};
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
enum dwb_cnv_out_bpc {
DWB_CNV_OUT_BPC_8BPC = 0,
DWB_CNV_OUT_BPC_10BPC = 1,
@@ -863,7 +822,6 @@ struct mcif_buf_params {
unsigned int swlock;
};
-#endif
#define MAX_TG_COLOR_VALUE 0x3FF
struct tg_color {
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index 6f0b80111e58..d25603128394 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -29,13 +29,11 @@
#include "dc_types.h"
#include "grph_object_defs.h"
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
enum dc_link_fec_state {
dc_link_fec_not_ready,
dc_link_fec_ready,
dc_link_fec_enabled
};
-#endif
struct dc_link_status {
bool link_active;
struct dpcd_caps *dpcd_caps;
@@ -83,6 +81,9 @@ struct dc_link {
bool is_hpd_filter_disabled;
bool dp_ss_off;
bool link_state_valid;
+ bool aux_access_disabled;
+ bool sync_lt_in_progress;
+ bool is_lttpr_mode_transparent;
/* caps is the same as reported_link_cap. link_traing use
* reported_link_cap. Will clean up. TODO
@@ -92,6 +93,8 @@ struct dc_link {
struct dc_link_settings cur_link_settings;
struct dc_lane_settings cur_lane_setting;
struct dc_link_settings preferred_link_setting;
+ struct dc_link_training_overrides preferred_training_settings;
+ struct dp_audio_test_data audio_test_data;
uint8_t ddc_hw_inst;
@@ -123,12 +126,14 @@ struct dc_link {
unsigned short chip_caps;
unsigned int dpcd_sink_count;
enum edp_revision edp_revision;
- bool psr_enabled;
+ bool psr_feature_enabled;
+ bool psr_allow_active;
/* MST record stream using this link */
struct link_flags {
bool dp_keep_receiver_powered;
bool dp_skip_DID2;
+ bool dp_skip_reset_segment;
} wa_flags;
struct link_mst_stream_allocation_table mst_stream_alloc_table;
@@ -136,9 +141,7 @@ struct dc_link {
struct link_trace link_trace;
struct gpio *hpd_gpio;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
enum dc_link_fec_state fec_state;
-#endif
};
const struct dc_link_status *dc_link_get_status(const struct dc_link *dc_link);
@@ -155,6 +158,18 @@ static inline struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_
return dc->links[link_index];
}
+static inline struct dc_link *get_edp_link(const struct dc *dc)
+{
+ int i;
+
+ // report any eDP links, even unconnected DDI's
+ for (i = 0; i < dc->link_count; i++) {
+ if (dc->links[i]->connector_signal == SIGNAL_TYPE_EDP)
+ return dc->links[i];
+ }
+ return NULL;
+}
+
/* Set backlight level of an embedded panel (eDP, LVDS).
* backlight_pwm_u16_16 is unsigned 32 bit with 16 bit integer
* and 16 bit fractional, where 1.0 is max backlight value.
@@ -167,7 +182,7 @@ int dc_link_get_backlight_level(const struct dc_link *dc_link);
bool dc_link_set_abm_disable(const struct dc_link *dc_link);
-bool dc_link_set_psr_enable(const struct dc_link *dc_link, bool enable, bool wait);
+bool dc_link_set_psr_allow_active(struct dc_link *dc_link, bool enable, bool wait);
bool dc_link_get_psr_state(const struct dc_link *dc_link, uint32_t *psr_state);
@@ -189,6 +204,8 @@ enum dc_detect_reason {
bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason);
bool dc_link_get_hpd_state(struct dc_link *dc_link);
+enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx);
+enum dc_status dc_link_reallocate_mst_payload(struct dc_link *link);
/* Notify DC about DP RX Interrupt (aka Short Pulse Interrupt).
* Return:
@@ -217,11 +234,24 @@ void dc_link_dp_set_drive_settings(
struct dc_link *link,
struct link_training_settings *lt_settings);
+bool dc_link_dp_perform_link_training_skip_aux(
+ struct dc_link *link,
+ const struct dc_link_settings *link_setting);
+
enum link_training_result dc_link_dp_perform_link_training(
struct dc_link *link,
const struct dc_link_settings *link_setting,
bool skip_video_pattern);
+bool dc_link_dp_sync_lt_begin(struct dc_link *link);
+
+enum link_training_result dc_link_dp_sync_lt_attempt(
+ struct dc_link *link,
+ struct dc_link_settings *link_setting,
+ struct dc_link_training_overrides *lt_settings);
+
+bool dc_link_dp_sync_lt_end(struct dc_link *link, bool link_down);
+
void dc_link_dp_enable_hpd(const struct dc_link *link);
void dc_link_dp_disable_hpd(const struct dc_link *link);
@@ -229,6 +259,7 @@ void dc_link_dp_disable_hpd(const struct dc_link *link);
bool dc_link_dp_set_test_pattern(
struct dc_link *link,
enum dp_test_pattern test_pattern,
+ enum dp_test_pattern_color_space test_pattern_color_space,
const struct link_training_settings *p_link_settings,
const unsigned char *p_custom_pattern,
unsigned int cust_pattern_size);
@@ -251,10 +282,16 @@ void dc_link_perform_link_training(struct dc *dc,
void dc_link_set_preferred_link_settings(struct dc *dc,
struct dc_link_settings *link_setting,
struct dc_link *link);
+void dc_link_set_preferred_training_settings(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ struct dc_link_training_overrides *lt_overrides,
+ struct dc_link *link,
+ bool skip_immediate_retrain);
void dc_link_enable_hpd(const struct dc_link *link);
void dc_link_disable_hpd(const struct dc_link *link);
void dc_link_set_test_pattern(struct dc_link *link,
enum dp_test_pattern test_pattern,
+ enum dp_test_pattern_color_space test_pattern_color_space,
const struct link_training_settings *p_link_settings,
const unsigned char *p_custom_pattern,
unsigned int cust_pattern_size);
@@ -265,11 +302,18 @@ uint32_t dc_link_bandwidth_kbps(
const struct dc_link_settings *dc_link_get_link_cap(
const struct dc_link *link);
+void dc_link_overwrite_extended_receiver_cap(
+ struct dc_link *link);
+
bool dc_submit_i2c(
struct dc *dc,
uint32_t link_index,
struct i2c_command *cmd);
+bool dc_submit_i2c_oem(
+ struct dc *dc,
+ struct i2c_command *cmd);
+
uint32_t dc_bandwidth_in_kbps_from_timing(
const struct dc_crtc_timing *timing);
#endif /* DC_LINK_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index 0fa1c26bc20d..92096de79dec 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -52,7 +52,6 @@ struct freesync_context {
bool dummy;
};
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
enum hubp_dmdata_mode {
DMDATA_SW_MODE,
DMDATA_HW_MODE
@@ -82,9 +81,7 @@ struct dc_dmdata_attributes {
/* An unbounded array of uint32s, represents software dmdata to be loaded */
uint32_t *dmdata_sw_data;
};
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
struct dc_writeback_info {
bool wb_enabled;
int dwb_pipe_inst;
@@ -96,7 +93,6 @@ struct dc_writeback_update {
unsigned int num_wb_info;
struct dc_writeback_info writeback_info[MAX_DWB_PIPES];
};
-#endif
enum vertical_interrupt_ref_point {
START_V_UPDATE = 0,
@@ -113,6 +109,19 @@ struct periodic_interrupt_config {
int lines_offset;
};
+union stream_update_flags {
+ struct {
+ uint32_t scaling:1;
+ uint32_t out_tf:1;
+ uint32_t out_csc:1;
+ uint32_t abm_level:1;
+ uint32_t dpms_off:1;
+ uint32_t gamut_remap:1;
+ uint32_t wb_update:1;
+ } bits;
+
+ uint32_t raw;
+};
struct dc_stream_state {
// sink is deprecated, new code should not reference
@@ -149,6 +158,7 @@ struct dc_stream_state {
enum view_3d_format view_format;
+ bool use_vsc_sdp_for_colorimetry;
bool ignore_msa_timing_param;
bool converter_disable_audio;
uint8_t qs_bit;
@@ -188,11 +198,9 @@ struct dc_stream_state {
struct crtc_trigger_info triggered_crtc_reset;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
/* writeback */
unsigned int num_wb_info;
struct dc_writeback_info writeback_info[MAX_DWB_PIPES];
-#endif
/* Computed state bits */
bool mode_changed : 1;
@@ -211,12 +219,15 @@ struct dc_stream_state {
bool apply_seamless_boot_optimization;
uint32_t stream_id;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
bool is_dsc_enabled;
-#endif
+ union stream_update_flags update_flags;
};
+#define ABM_LEVEL_IMMEDIATE_DISABLE 0xFFFFFFFF
+
struct dc_stream_update {
+ struct dc_stream_state *stream;
+
struct rect src;
struct rect dst;
struct dc_transfer_func *out_transfer_func;
@@ -231,6 +242,7 @@ struct dc_stream_update {
struct dc_info_packet *vsp_infopacket;
bool *dpms_off;
+ bool integer_scaling_update;
struct colorspace_transform *gamut_remap;
enum dc_color_space *output_color_space;
@@ -238,12 +250,8 @@ struct dc_stream_update {
struct dc_csc_transform *output_csc_transform;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
struct dc_writeback_update *wb_update;
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DSC_SUPPORT)
struct dc_dsc_config *dsc_config;
-#endif
};
bool dc_is_stream_unchanged(
@@ -333,18 +341,23 @@ bool dc_add_all_planes_for_stream(
int plane_count,
struct dc_state *context);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
bool dc_stream_add_writeback(struct dc *dc,
struct dc_stream_state *stream,
struct dc_writeback_info *wb_info);
+
bool dc_stream_remove_writeback(struct dc *dc,
struct dc_stream_state *stream,
uint32_t dwb_pipe_inst);
+
+bool dc_stream_warmup_writeback(struct dc *dc,
+ int num_dwb,
+ struct dc_writeback_info *wb_info);
+
bool dc_stream_dmdata_status_done(struct dc *dc, struct dc_stream_state *stream);
+
bool dc_stream_set_dynamic_metadata(struct dc *dc,
struct dc_stream_state *stream,
struct dc_dmdata_attributes *dmdata_attr);
-#endif
enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream);
@@ -426,10 +439,13 @@ bool dc_stream_get_crc(struct dc *dc,
uint32_t *g_y,
uint32_t *b_cb);
-void dc_stream_set_static_screen_events(struct dc *dc,
+void dc_stream_set_static_screen_params(struct dc *dc,
struct dc_stream_state **stream,
int num_streams,
- const struct dc_static_screen_events *events);
+ const struct dc_static_screen_params *params);
+
+void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream,
+ enum dc_dynamic_expansion option);
void dc_stream_set_dither_option(struct dc_stream_state *stream,
enum dc_dither_option option);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 6eabb6491a3d..e59532d98cb4 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -25,6 +25,10 @@
#ifndef DC_TYPES_H_
#define DC_TYPES_H_
+/* AND EdidUtility only needs a portion
+ * of this file, including the rest only
+ * causes additional issues.
+ */
#include "os_types.h"
#include "fixed31_32.h"
#include "irq_types.h"
@@ -33,12 +37,17 @@
#include "dal_types.h"
#include "grph_object_defs.h"
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+#include "dm_cp_psp.h"
+#endif
+
/* forward declarations */
struct dc_plane_state;
struct dc_stream_state;
struct dc_link;
struct dc_sink;
struct dal;
+struct dc_dmub_srv;
/********************************
* Environment definitions
@@ -51,7 +60,12 @@ enum dce_environment {
DCE_ENV_FPGA_MAXIMUS,
/* Emulation on real HW or on FPGA. Used by Diagnostics, enforces
* requirements of Diagnostics team. */
- DCE_ENV_DIAG
+ DCE_ENV_DIAG,
+ /*
+ * Guest VM system, DC HW may exist but is not virtualized and
+ * should not be used. SW support for VDI only.
+ */
+ DCE_ENV_VIRTUAL_HW
};
/* Note: use these macro definitions instead of direct comparison! */
@@ -100,6 +114,11 @@ struct dc_context {
uint32_t dc_sink_id_count;
uint32_t dc_stream_id_count;
uint64_t fbc_gpu_addr;
+ struct dc_dmub_srv *dmub_srv;
+
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ struct cp_psp cp_psp;
+#endif
};
@@ -107,6 +126,7 @@ struct dc_context {
#define DC_EDID_BLOCK_SIZE 128
#define MAX_SURFACE_NUM 4
#define NUM_PIXEL_FORMATS 10
+#define MAX_REPEATER_CNT 8
#include "dc_ddc_types.h"
@@ -159,6 +179,12 @@ enum dc_edid_status {
EDID_THE_SAME,
};
+enum act_return_status {
+ ACT_SUCCESS,
+ ACT_LINK_LOST,
+ ACT_FAILED
+};
+
/* audio capability from EDID*/
struct dc_cea_audio_mode {
uint8_t format_code; /* ucData[0] [6:3]*/
@@ -202,6 +228,8 @@ struct dc_panel_patch {
unsigned int dppowerup_delay;
unsigned int extra_t12_ms;
unsigned int extra_delay_backlight_off;
+ unsigned int extra_t7_ms;
+ unsigned int manage_secondary_link;
};
struct dc_edid_caps {
@@ -383,6 +411,30 @@ enum dpcd_downstream_port_max_bpc {
DOWN_STREAM_MAX_12BPC,
DOWN_STREAM_MAX_16BPC
};
+
+
+enum link_training_offset {
+ DPRX = 0,
+ LTTPR_PHY_REPEATER1 = 1,
+ LTTPR_PHY_REPEATER2 = 2,
+ LTTPR_PHY_REPEATER3 = 3,
+ LTTPR_PHY_REPEATER4 = 4,
+ LTTPR_PHY_REPEATER5 = 5,
+ LTTPR_PHY_REPEATER6 = 6,
+ LTTPR_PHY_REPEATER7 = 7,
+ LTTPR_PHY_REPEATER8 = 8
+};
+
+struct dc_lttpr_caps {
+ union dpcd_rev revision;
+ uint8_t mode;
+ uint8_t max_lane_count;
+ uint8_t max_link_rate;
+ uint8_t phy_repeater_cnt;
+ uint8_t max_ext_timeout;
+ uint8_t aux_rd_interval[MAX_REPEATER_CNT - 1];
+};
+
struct dc_dongle_caps {
/* dongle type (DP converter, CV smart dongle) */
enum display_dongle_type dongle_type;
@@ -421,7 +473,6 @@ enum display_content_type {
DISPLAY_CONTENT_TYPE_GAME = 8
};
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
/* writeback */
struct dwb_stereo_params {
bool stereo_enabled; /* false: normal mode, true: 3D stereo */
@@ -452,7 +503,6 @@ struct dc_dwb_params {
enum dwb_subsample_position subsample_position;
struct dc_transfer_func *out_transfer_func;
};
-#endif
/* audio*/
@@ -554,15 +604,17 @@ struct audio_info {
/* this field must be last in this struct */
struct audio_mode modes[DC_MAX_AUDIO_DESC_COUNT];
};
-
+struct audio_check {
+ unsigned int audio_packet_type;
+ unsigned int max_audiosample_rate;
+ unsigned int acat;
+};
enum dc_infoframe_type {
DC_HDMI_INFOFRAME_TYPE_VENDOR = 0x81,
DC_HDMI_INFOFRAME_TYPE_AVI = 0x82,
DC_HDMI_INFOFRAME_TYPE_SPD = 0x83,
DC_HDMI_INFOFRAME_TYPE_AUDIO = 0x84,
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
DC_DP_INFOFRAME_TYPE_PPS = 0x10,
-#endif
};
struct dc_info_packet {
@@ -677,7 +729,7 @@ struct psr_context {
/* The VSync rate in Hz used to calculate the
* step size for smooth brightness feature
*/
- unsigned int vsyncRateHz;
+ unsigned int vsync_rate_hz;
unsigned int skipPsrWaitForPllLock;
unsigned int numberOfControllers;
/* Unused, for future use. To indicate that first changed frame from
@@ -725,7 +777,19 @@ struct AsicStateEx {
unsigned int phyClock;
};
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
+
+enum dc_clock_type {
+ DC_CLOCK_TYPE_DISPCLK = 0,
+ DC_CLOCK_TYPE_DPPCLK = 1,
+};
+
+struct dc_clock_config {
+ uint32_t max_clock_khz;
+ uint32_t min_clock_khz;
+ uint32_t bw_requirequired_clock_khz;
+ uint32_t current_clock_khz;/*current clock in use*/
+};
+
/* DSC DPCD capabilities */
union dsc_slice_caps1 {
struct {
@@ -795,5 +859,5 @@ struct dsc_dec_dpcd_caps {
uint32_t branch_overall_throughput_1_mps; /* In MPs */
uint32_t branch_max_line_width;
};
-#endif
+
#endif /* DC_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
index 58bd131d5b48..b8a3fc505c9b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
@@ -77,6 +77,9 @@ static bool dce_abm_set_pipe(struct abm *abm, uint32_t controller_id)
/* notifyDMCUMsg */
REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0,
+ 1, 80000);
+
return true;
}
@@ -401,6 +404,10 @@ static bool dce_abm_init_backlight(struct abm *abm)
/* Enable the backlight output */
REG_UPDATE(BL_PWM_CNTL, BL_PWM_EN, 1);
+ /* Disable fractional pwm if configured */
+ REG_UPDATE(BL_PWM_CNTL, BL_PWM_FRACTIONAL_EN,
+ abm->ctx->dc->config.disable_fractional_pwm ? 0 : 1);
+
/* Unlock group 2 backlight registers */
REG_UPDATE(BL_PWM_GRP1_REG_LOCK,
BL_PWM_GRP1_REG_LOCK, 0);
@@ -489,9 +496,6 @@ void dce_abm_destroy(struct abm **abm)
{
struct dce_abm *abm_dce = TO_DCE_ABM(*abm);
- if (abm_dce->base.dmcu_is_running == true)
- abm_dce->base.funcs->set_abm_immediate_disable(*abm);
-
kfree(abm_dce);
*abm = NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h
index 7ba7e6f722f6..ba0caaffa24b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h
@@ -67,7 +67,6 @@
SRI(DC_ABM1_HGLS_REG_READ_PROGRESS, ABM, id), \
NBIO_SR(BIOS_SCRATCH_2)
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#define ABM_DCN20_REG_LIST() \
ABM_COMMON_REG_LIST_DCE_BASE(), \
SR(DC_ABM1_HG_SAMPLE_RATE), \
@@ -81,7 +80,6 @@
SR(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES), \
SR(DC_ABM1_HGLS_REG_READ_PROGRESS), \
NBIO_SR(BIOS_SCRATCH_2)
-#endif
#define ABM_SF(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
@@ -163,9 +161,7 @@
ABM_SF(ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS, \
ABM1_BL_REG_READ_MISSED_FRAME_CLEAR, mask_sh)
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#define ABM_MASK_SH_LIST_DCN20(mask_sh) ABM_MASK_SH_LIST_DCE110(mask_sh)
-#endif
#define ABM_REG_FIELD_LIST(type) \
type ABM1_HG_NUM_OF_BINS_SEL; \
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
index 4a10a5d22c90..5a35495bc11d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
@@ -145,20 +145,20 @@ static void check_audio_bandwidth_hdmi(
if (channel_count > 2) {
/* Based on HDMI spec 1.3 Table 7.5 */
- if ((crtc_info->requested_pixel_clock <= 27000) &&
+ if ((crtc_info->requested_pixel_clock_100Hz <= 270000) &&
(crtc_info->v_active <= 576) &&
!(crtc_info->interlaced) &&
!(crtc_info->pixel_repetition == 2 ||
crtc_info->pixel_repetition == 4)) {
limit_freq_to_48_khz = true;
- } else if ((crtc_info->requested_pixel_clock <= 27000) &&
+ } else if ((crtc_info->requested_pixel_clock_100Hz <= 270000) &&
(crtc_info->v_active <= 576) &&
(crtc_info->interlaced) &&
(crtc_info->pixel_repetition == 2)) {
limit_freq_to_88_2_khz = true;
- } else if ((crtc_info->requested_pixel_clock <= 54000) &&
+ } else if ((crtc_info->requested_pixel_clock_100Hz <= 540000) &&
(crtc_info->v_active <= 576) &&
!(crtc_info->interlaced)) {
limit_freq_to_174_4_khz = true;
@@ -613,6 +613,8 @@ void dce_aud_az_configure(
AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1,
value);
+ DC_LOG_HW_AUDIO("\n\tAUDIO:az_configure: index: %u data, 0x%x, displayName %s: \n",
+ audio->inst, value, audio_info->display_name);
/*
*write the port ID:
@@ -737,8 +739,8 @@ void dce_aud_az_configure(
/* search pixel clock value for Azalia HDMI Audio */
static void get_azalia_clock_info_hdmi(
- uint32_t crtc_pixel_clock_in_khz,
- uint32_t actual_pixel_clock_in_khz,
+ uint32_t crtc_pixel_clock_100hz,
+ uint32_t actual_pixel_clock_100Hz,
struct azalia_clock_info *azalia_clock_info)
{
/* audio_dto_phase= 24 * 10,000;
@@ -749,11 +751,11 @@ static void get_azalia_clock_info_hdmi(
/* audio_dto_module = PCLKFrequency * 10,000;
* [khz] -> [100Hz] */
azalia_clock_info->audio_dto_module =
- actual_pixel_clock_in_khz * 10;
+ actual_pixel_clock_100Hz;
}
static void get_azalia_clock_info_dp(
- uint32_t requested_pixel_clock_in_khz,
+ uint32_t requested_pixel_clock_100Hz,
const struct audio_pll_info *pll_info,
struct azalia_clock_info *azalia_clock_info)
{
@@ -792,15 +794,15 @@ void dce_aud_wall_dto_setup(
/* calculate DTO settings */
get_azalia_clock_info_hdmi(
- crtc_info->requested_pixel_clock,
- crtc_info->calculated_pixel_clock,
+ crtc_info->requested_pixel_clock_100Hz,
+ crtc_info->calculated_pixel_clock_100Hz,
&clock_info);
- DC_LOG_HW_AUDIO("\n%s:Input::requested_pixel_clock = %d"\
- "calculated_pixel_clock =%d\n"\
+ DC_LOG_HW_AUDIO("\n%s:Input::requested_pixel_clock_100Hz = %d"\
+ "calculated_pixel_clock_100Hz =%d\n"\
"audio_dto_module = %d audio_dto_phase =%d \n\n", __func__,\
- crtc_info->requested_pixel_clock,\
- crtc_info->calculated_pixel_clock,\
+ crtc_info->requested_pixel_clock_100Hz,\
+ crtc_info->calculated_pixel_clock_100Hz,\
clock_info.audio_dto_module,\
clock_info.audio_dto_phase);
@@ -833,7 +835,7 @@ void dce_aud_wall_dto_setup(
calculate DTO settings */
get_azalia_clock_info_dp(
- crtc_info->requested_pixel_clock,
+ crtc_info->requested_pixel_clock_100Hz,
pll_info,
&clock_info);
@@ -922,7 +924,6 @@ static const struct audio_funcs funcs = {
.az_configure = dce_aud_az_configure,
.destroy = dce_aud_destroy,
};
-
void dce_aud_destroy(struct audio **audio)
{
struct dce_audio *aud = DCE_AUD(*audio);
@@ -936,7 +937,7 @@ struct audio *dce_audio_create(
unsigned int inst,
const struct dce_audio_registers *reg,
const struct dce_audio_shift *shifts,
- const struct dce_aduio_mask *masks
+ const struct dce_audio_mask *masks
)
{
struct dce_audio *audio = kzalloc(sizeof(*audio), GFP_KERNEL);
@@ -953,7 +954,6 @@ struct audio *dce_audio_create(
audio->regs = reg;
audio->shifts = shifts;
audio->masks = masks;
-
return &audio->base;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h
index a0d5724aab31..1392fab0860b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h
@@ -101,7 +101,7 @@ struct dce_audio_shift {
uint32_t DCCG_AUDIO_DTO1_USE_512FBR_DTO;
};
-struct dce_aduio_mask {
+struct dce_audio_mask {
uint32_t AZALIA_ENDPOINT_REG_INDEX;
uint32_t AZALIA_ENDPOINT_REG_DATA;
@@ -125,7 +125,7 @@ struct dce_audio {
struct audio base;
const struct dce_audio_registers *regs;
const struct dce_audio_shift *shifts;
- const struct dce_aduio_mask *masks;
+ const struct dce_audio_mask *masks;
};
struct audio *dce_audio_create(
@@ -133,7 +133,7 @@ struct audio *dce_audio_create(
unsigned int inst,
const struct dce_audio_registers *reg,
const struct dce_audio_shift *shifts,
- const struct dce_aduio_mask *masks);
+ const struct dce_audio_mask *masks);
void dce_aud_destroy(struct audio **audio);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index f2295e780031..68c4049cbc2a 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -30,6 +30,7 @@
#include "core_types.h"
#include "dce_aux.h"
#include "dce/dce_11_0_sh_mask.h"
+#include "dm_event_log.h"
#define CTX \
aux110->base.ctx
@@ -41,6 +42,10 @@
#include "reg_helper.h"
+#undef FN
+#define FN(reg_name, field_name) \
+ aux110->shift->field_name, aux110->mask->field_name
+
#define FROM_AUX_ENGINE(ptr) \
container_of((ptr), struct aux_engine_dce110, base)
@@ -54,6 +59,16 @@ enum {
AUX_TIMED_OUT_RETRY_COUNTER = 2,
AUX_DEFER_RETRY_COUNTER = 6
};
+
+#define TIME_OUT_INCREMENT 1016
+#define TIME_OUT_MULTIPLIER_8 8
+#define TIME_OUT_MULTIPLIER_16 16
+#define TIME_OUT_MULTIPLIER_32 32
+#define TIME_OUT_MULTIPLIER_64 64
+#define MAX_TIMEOUT_LENGTH 127
+#define DEFAULT_AUX_ENGINE_MULT 0
+#define DEFAULT_AUX_ENGINE_LENGTH 69
+
static void release_engine(
struct dce_aux *engine)
{
@@ -197,7 +212,7 @@ static void submit_channel_request(
REG_UPDATE(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, 1);
REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 0,
- 10, aux110->timeout_period/10);
+ 10, aux110->polling_timeout_period/10);
/* set the delay and the number of bytes to write */
@@ -252,6 +267,8 @@ static void submit_channel_request(
}
REG_UPDATE(AUX_SW_CONTROL, AUX_SW_GO, 1);
+ EVENT_LOG_AUX_REQ(engine->ddc->pin_data->en, EVENT_LOG_AUX_ORIGIN_NATIVE,
+ request->action, request->address, request->length, request->data);
}
static int read_channel_reply(struct dce_aux *engine, uint32_t size,
@@ -324,7 +341,7 @@ static enum aux_channel_operation_result get_channel_status(
/* poll to make sure that SW_DONE is asserted */
REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 1,
- 10, aux110->timeout_period/10);
+ 10, aux110->polling_timeout_period/10);
value = REG_READ(AUX_SW_STATUS);
/* in case HPD is LOW, exit AUX transaction */
@@ -383,7 +400,7 @@ static bool acquire(
{
enum gpio_result result;
- if (!is_engine_available(engine))
+ if ((engine == NULL) || !is_engine_available(engine))
return false;
result = dal_ddc_open(ddc, GPIO_MODE_HARDWARE,
@@ -411,20 +428,103 @@ void dce110_engine_destroy(struct dce_aux **engine)
*engine = NULL;
}
+
+static uint32_t dce_aux_configure_timeout(struct ddc_service *ddc,
+ uint32_t timeout_in_us)
+{
+ uint32_t multiplier = 0;
+ uint32_t length = 0;
+ uint32_t prev_length = 0;
+ uint32_t prev_mult = 0;
+ uint32_t prev_timeout_val = 0;
+ struct ddc *ddc_pin = ddc->ddc_pin;
+ struct dce_aux *aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en];
+ struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(aux_engine);
+
+ /* 1-Update polling timeout period */
+ aux110->polling_timeout_period = timeout_in_us * SW_AUX_TIMEOUT_PERIOD_MULTIPLIER;
+
+ /* 2-Update aux timeout period length and multiplier */
+ if (timeout_in_us == 0) {
+ multiplier = DEFAULT_AUX_ENGINE_MULT;
+ length = DEFAULT_AUX_ENGINE_LENGTH;
+ } else if (timeout_in_us <= TIME_OUT_INCREMENT) {
+ multiplier = 0;
+ length = timeout_in_us/TIME_OUT_MULTIPLIER_8;
+ if (timeout_in_us % TIME_OUT_MULTIPLIER_8 != 0)
+ length++;
+ } else if (timeout_in_us <= 2 * TIME_OUT_INCREMENT) {
+ multiplier = 1;
+ length = timeout_in_us/TIME_OUT_MULTIPLIER_16;
+ if (timeout_in_us % TIME_OUT_MULTIPLIER_16 != 0)
+ length++;
+ } else if (timeout_in_us <= 4 * TIME_OUT_INCREMENT) {
+ multiplier = 2;
+ length = timeout_in_us/TIME_OUT_MULTIPLIER_32;
+ if (timeout_in_us % TIME_OUT_MULTIPLIER_32 != 0)
+ length++;
+ } else if (timeout_in_us > 4 * TIME_OUT_INCREMENT) {
+ multiplier = 3;
+ length = timeout_in_us/TIME_OUT_MULTIPLIER_64;
+ if (timeout_in_us % TIME_OUT_MULTIPLIER_64 != 0)
+ length++;
+ }
+
+ length = (length < MAX_TIMEOUT_LENGTH) ? length : MAX_TIMEOUT_LENGTH;
+
+ REG_GET_2(AUX_DPHY_RX_CONTROL1, AUX_RX_TIMEOUT_LEN, &prev_length, AUX_RX_TIMEOUT_LEN_MUL, &prev_mult);
+
+ switch (prev_mult) {
+ case 0:
+ prev_timeout_val = prev_length * TIME_OUT_MULTIPLIER_8;
+ break;
+ case 1:
+ prev_timeout_val = prev_length * TIME_OUT_MULTIPLIER_16;
+ break;
+ case 2:
+ prev_timeout_val = prev_length * TIME_OUT_MULTIPLIER_32;
+ break;
+ case 3:
+ prev_timeout_val = prev_length * TIME_OUT_MULTIPLIER_64;
+ break;
+ default:
+ prev_timeout_val = DEFAULT_AUX_ENGINE_LENGTH * TIME_OUT_MULTIPLIER_8;
+ break;
+ }
+
+ REG_UPDATE_SEQ_2(AUX_DPHY_RX_CONTROL1, AUX_RX_TIMEOUT_LEN, length, AUX_RX_TIMEOUT_LEN_MUL, multiplier);
+
+ return prev_timeout_val;
+}
+
+static struct dce_aux_funcs aux_functions = {
+ .configure_timeout = NULL,
+ .destroy = NULL,
+};
+
struct dce_aux *dce110_aux_engine_construct(struct aux_engine_dce110 *aux_engine110,
struct dc_context *ctx,
uint32_t inst,
uint32_t timeout_period,
- const struct dce110_aux_registers *regs)
+ const struct dce110_aux_registers *regs,
+ const struct dce110_aux_registers_mask *mask,
+ const struct dce110_aux_registers_shift *shift,
+ bool is_ext_aux_timeout_configurable)
{
aux_engine110->base.ddc = NULL;
aux_engine110->base.ctx = ctx;
aux_engine110->base.delay = 0;
aux_engine110->base.max_defer_write_retry = 0;
aux_engine110->base.inst = inst;
- aux_engine110->timeout_period = timeout_period;
+ aux_engine110->polling_timeout_period = timeout_period;
aux_engine110->regs = regs;
+ aux_engine110->mask = mask;
+ aux_engine110->shift = shift;
+ aux_engine110->base.funcs = &aux_functions;
+ if (is_ext_aux_timeout_configurable)
+ aux_engine110->base.funcs->configure_timeout = &dce_aux_configure_timeout;
+
return &aux_engine110->base;
}
@@ -461,8 +561,10 @@ int dce_aux_transfer_raw(struct ddc_service *ddc,
memset(&aux_rep, 0, sizeof(aux_rep));
aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en];
- if (!acquire(aux_engine, ddc_pin))
+ if (!acquire(aux_engine, ddc_pin)) {
+ *operation_result = AUX_CHANNEL_OPERATION_FAILED_ENGINE_ACQUIRE;
return -1;
+ }
if (payload->i2c_over_aux)
aux_req.type = AUX_TRANSACTION_TYPE_I2C;
@@ -472,7 +574,7 @@ int dce_aux_transfer_raw(struct ddc_service *ddc,
aux_req.action = i2caux_action_from_payload(payload);
aux_req.address = payload->address;
- aux_req.delay = payload->defer_delay * 10;
+ aux_req.delay = 0;
aux_req.length = payload->length;
aux_req.data = payload->data;
@@ -480,9 +582,13 @@ int dce_aux_transfer_raw(struct ddc_service *ddc,
*operation_result = get_channel_status(aux_engine, &returned_bytes);
if (*operation_result == AUX_CHANNEL_OPERATION_SUCCEEDED) {
- read_channel_reply(aux_engine, payload->length,
+ int bytes_replied = 0;
+ bytes_replied = read_channel_reply(aux_engine, payload->length,
payload->data, payload->reply,
&status);
+ EVENT_LOG_AUX_REP(aux_engine->ddc->pin_data->en,
+ EVENT_LOG_AUX_ORIGIN_NATIVE, *payload->reply,
+ bytes_replied, payload->data);
res = returned_bytes;
} else {
res = -1;
@@ -505,6 +611,8 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
uint8_t reply;
bool payload_reply = true;
enum aux_channel_operation_result operation_result;
+ bool retry_on_defer = false;
+
int aux_ack_retries = 0,
aux_defer_retries = 0,
aux_i2c_defer_retries = 0,
@@ -535,10 +643,19 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
break;
case AUX_TRANSACTION_REPLY_AUX_DEFER:
- case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK:
case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER:
- if (++aux_defer_retries >= AUX_MAX_DEFER_RETRIES)
+ retry_on_defer = true;
+ /* fall through */
+ case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK:
+ if (++aux_defer_retries >= AUX_MAX_DEFER_RETRIES) {
goto fail;
+ } else {
+ if ((*payload->reply == AUX_TRANSACTION_REPLY_AUX_DEFER) ||
+ (*payload->reply == AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER)) {
+ if (payload->defer_delay > 0)
+ msleep(payload->defer_delay);
+ }
+ }
break;
case AUX_TRANSACTION_REPLY_I2C_DEFER:
@@ -562,19 +679,29 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
break;
case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
- if (++aux_timeout_retries >= AUX_MAX_TIMEOUT_RETRIES)
- goto fail;
- else {
- /*
- * DP 1.4, 2.8.2: AUX Transaction Response/Reply Timeouts
- * According to the DP spec there should be 3 retries total
- * with a 400us wait inbetween each. Hardware already waits
- * for 550us therefore no wait is required here.
- */
+ // Check whether a DEFER had occurred before the timeout.
+ // If so, treat timeout as a DEFER.
+ if (retry_on_defer) {
+ if (++aux_defer_retries >= AUX_MAX_DEFER_RETRIES)
+ goto fail;
+ else if (payload->defer_delay > 0)
+ msleep(payload->defer_delay);
+ } else {
+ if (++aux_timeout_retries >= AUX_MAX_TIMEOUT_RETRIES)
+ goto fail;
+ else {
+ /*
+ * DP 1.4, 2.8.2: AUX Transaction Response/Reply Timeouts
+ * According to the DP spec there should be 3 retries total
+ * with a 400us wait inbetween each. Hardware already waits
+ * for 550us therefore no wait is required here.
+ */
+ }
}
break;
case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
+ case AUX_CHANNEL_OPERATION_FAILED_ENGINE_ACQUIRE:
case AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN:
default:
goto fail;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
index ed7fec8fe253..382465862f29 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
@@ -29,15 +29,15 @@
#include "i2caux_interface.h"
#include "inc/hw/aux_engine.h"
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
+
#define AUX_COMMON_REG_LIST0(id)\
SRI(AUX_CONTROL, DP_AUX, id), \
SRI(AUX_ARB_CONTROL, DP_AUX, id), \
SRI(AUX_SW_DATA, DP_AUX, id), \
SRI(AUX_SW_CONTROL, DP_AUX, id), \
SRI(AUX_INTERRUPT_CONTROL, DP_AUX, id), \
+ SRI(AUX_DPHY_RX_CONTROL1, DP_AUX, id), \
SRI(AUX_SW_STATUS, DP_AUX, id)
-#endif
#define AUX_COMMON_REG_LIST(id)\
SRI(AUX_CONTROL, DP_AUX, id), \
@@ -55,6 +55,7 @@ struct dce110_aux_registers {
uint32_t AUX_SW_DATA;
uint32_t AUX_SW_CONTROL;
uint32_t AUX_INTERRUPT_CONTROL;
+ uint32_t AUX_DPHY_RX_CONTROL1;
uint32_t AUX_SW_STATUS;
uint32_t AUXN_IMPCAL;
uint32_t AUXP_IMPCAL;
@@ -62,6 +63,156 @@ struct dce110_aux_registers {
uint32_t AUX_RESET_MASK;
};
+#define DCE_AUX_REG_FIELD_LIST(type)\
+ type AUX_EN;\
+ type AUX_RESET;\
+ type AUX_RESET_DONE;\
+ type AUX_REG_RW_CNTL_STATUS;\
+ type AUX_SW_USE_AUX_REG_REQ;\
+ type AUX_SW_DONE_USING_AUX_REG;\
+ type AUX_SW_AUTOINCREMENT_DISABLE;\
+ type AUX_SW_DATA_RW;\
+ type AUX_SW_INDEX;\
+ type AUX_SW_GO;\
+ type AUX_SW_DATA;\
+ type AUX_SW_REPLY_BYTE_COUNT;\
+ type AUX_SW_DONE;\
+ type AUX_SW_DONE_ACK;\
+ type AUXN_IMPCAL_ENABLE;\
+ type AUXP_IMPCAL_ENABLE;\
+ type AUXN_IMPCAL_OVERRIDE_ENABLE;\
+ type AUXP_IMPCAL_OVERRIDE_ENABLE;\
+ type AUX_RX_TIMEOUT_LEN;\
+ type AUX_RX_TIMEOUT_LEN_MUL;\
+ type AUXN_CALOUT_ERROR_AK;\
+ type AUXP_CALOUT_ERROR_AK;\
+ type AUX_SW_START_DELAY;\
+ type AUX_SW_WR_BYTES
+
+#define DCE10_AUX_MASK_SH_LIST(mask_sh)\
+ AUX_SF(AUX_CONTROL, AUX_EN, mask_sh),\
+ AUX_SF(AUX_ARB_CONTROL, AUX_REG_RW_CNTL_STATUS, mask_sh),\
+ AUX_SF(AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, mask_sh),\
+ AUX_SF(AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, mask_sh),\
+ AUX_SF(AUX_SW_CONTROL, AUX_SW_START_DELAY, mask_sh),\
+ AUX_SF(AUX_SW_CONTROL, AUX_SW_WR_BYTES, mask_sh),\
+ AUX_SF(AUX_SW_CONTROL, AUX_SW_GO, mask_sh),\
+ AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
+ AUX_SF(AUX_SW_DATA, AUX_SW_DATA_RW, mask_sh),\
+ AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
+ AUX_SF(AUX_SW_DATA, AUX_SW_INDEX, mask_sh),\
+ AUX_SF(AUX_SW_DATA, AUX_SW_DATA, mask_sh),\
+ AUX_SF(AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, mask_sh),\
+ AUX_SF(AUX_SW_STATUS, AUX_SW_DONE, mask_sh),\
+ AUX_SF(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, mask_sh),\
+ AUX_SF(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK, mask_sh),\
+ AUX_SF(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK, mask_sh),\
+ AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_ENABLE, mask_sh),\
+ AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_ENABLE, mask_sh),\
+ AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE, mask_sh),\
+ AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_OVERRIDE_ENABLE, mask_sh)
+
+#define DCE_AUX_MASK_SH_LIST(mask_sh)\
+ AUX_SF(AUX_CONTROL, AUX_EN, mask_sh),\
+ AUX_SF(AUX_CONTROL, AUX_RESET, mask_sh),\
+ AUX_SF(AUX_CONTROL, AUX_RESET_DONE, mask_sh),\
+ AUX_SF(AUX_ARB_CONTROL, AUX_REG_RW_CNTL_STATUS, mask_sh),\
+ AUX_SF(AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, mask_sh),\
+ AUX_SF(AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, mask_sh),\
+ AUX_SF(AUX_SW_CONTROL, AUX_SW_START_DELAY, mask_sh),\
+ AUX_SF(AUX_SW_CONTROL, AUX_SW_WR_BYTES, mask_sh),\
+ AUX_SF(AUX_SW_CONTROL, AUX_SW_GO, mask_sh),\
+ AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
+ AUX_SF(AUX_SW_DATA, AUX_SW_DATA_RW, mask_sh),\
+ AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
+ AUX_SF(AUX_SW_DATA, AUX_SW_INDEX, mask_sh),\
+ AUX_SF(AUX_SW_DATA, AUX_SW_DATA, mask_sh),\
+ AUX_SF(AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, mask_sh),\
+ AUX_SF(AUX_SW_STATUS, AUX_SW_DONE, mask_sh),\
+ AUX_SF(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, mask_sh),\
+ AUX_SF(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK, mask_sh),\
+ AUX_SF(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK, mask_sh),\
+ AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_ENABLE, mask_sh),\
+ AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_ENABLE, mask_sh),\
+ AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE, mask_sh),\
+ AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_OVERRIDE_ENABLE, mask_sh)
+
+#define DCE12_AUX_MASK_SH_LIST(mask_sh)\
+ AUX_SF(DP_AUX0_AUX_CONTROL, AUX_EN, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_CONTROL, AUX_RESET, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_CONTROL, AUX_RESET_DONE, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_REG_RW_CNTL_STATUS, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_START_DELAY, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_WR_BYTES, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_GO, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_DATA_RW, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_INDEX, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_DATA, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_STATUS, AUX_SW_DONE, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, mask_sh),\
+ AUX_SF(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK, mask_sh),\
+ AUX_SF(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK, mask_sh),\
+ AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_ENABLE, mask_sh),\
+ AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_ENABLE, mask_sh),\
+ AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE, mask_sh),\
+ AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_OVERRIDE_ENABLE, mask_sh)
+
+/* DCN10 MASK */
+#define DCN10_AUX_MASK_SH_LIST(mask_sh)\
+ AUX_SF(DP_AUX0_AUX_CONTROL, AUX_EN, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_CONTROL, AUX_RESET, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_CONTROL, AUX_RESET_DONE, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_REG_RW_CNTL_STATUS, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_START_DELAY, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_WR_BYTES, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_GO, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_DATA_RW, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_INDEX, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_DATA, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_STATUS, AUX_SW_DONE, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, mask_sh),\
+ AUX_SF(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK, mask_sh),\
+ AUX_SF(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK, mask_sh),\
+ AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_ENABLE, mask_sh),\
+ AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_ENABLE, mask_sh),\
+ AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE, mask_sh),\
+ AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_OVERRIDE_ENABLE, mask_sh)
+
+/* for all other DCN */
+#define DCN_AUX_MASK_SH_LIST(mask_sh)\
+ AUX_SF(DP_AUX0_AUX_CONTROL, AUX_EN, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_CONTROL, AUX_RESET, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_CONTROL, AUX_RESET_DONE, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_REG_RW_CNTL_STATUS, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_START_DELAY, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_WR_BYTES, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_GO, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_DATA_RW, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_INDEX, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_DATA, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_STATUS, AUX_SW_DONE, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_DPHY_RX_CONTROL1, AUX_RX_TIMEOUT_LEN, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_DPHY_RX_CONTROL1, AUX_RX_TIMEOUT_LEN_MUL, mask_sh)
+
+#define AUX_SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
enum { /* This is the timeout as defined in DP 1.2a,
* 2.3.4 "Detailed uPacket TX AUX CH State Description".
*/
@@ -97,20 +248,34 @@ struct dce_aux {
uint32_t max_defer_write_retry;
bool acquire_reset;
+ struct dce_aux_funcs *funcs;
};
+struct dce110_aux_registers_mask {
+ DCE_AUX_REG_FIELD_LIST(uint32_t);
+};
+
+struct dce110_aux_registers_shift {
+ DCE_AUX_REG_FIELD_LIST(uint8_t);
+};
+
+
struct aux_engine_dce110 {
struct dce_aux base;
const struct dce110_aux_registers *regs;
+ const struct dce110_aux_registers_mask *mask;
+ const struct dce110_aux_registers_shift *shift;
struct {
uint32_t aux_control;
uint32_t aux_arb_control;
uint32_t aux_sw_data;
uint32_t aux_sw_control;
uint32_t aux_interrupt_control;
+ uint32_t aux_dphy_rx_control1;
+ uint32_t aux_dphy_rx_control0;
uint32_t aux_sw_status;
} addr;
- uint32_t timeout_period;
+ uint32_t polling_timeout_period;
};
struct aux_engine_dce110_init_data {
@@ -120,12 +285,15 @@ struct aux_engine_dce110_init_data {
const struct dce110_aux_registers *regs;
};
-struct dce_aux *dce110_aux_engine_construct(
- struct aux_engine_dce110 *aux_engine110,
+struct dce_aux *dce110_aux_engine_construct(struct aux_engine_dce110 *aux_engine110,
struct dc_context *ctx,
uint32_t inst,
uint32_t timeout_period,
- const struct dce110_aux_registers *regs);
+ const struct dce110_aux_registers *regs,
+
+ const struct dce110_aux_registers_mask *mask,
+ const struct dce110_aux_registers_shift *shift,
+ bool is_ext_aux_timeout_configurable);
void dce110_engine_destroy(struct dce_aux **engine);
@@ -139,4 +307,13 @@ int dce_aux_transfer_raw(struct ddc_service *ddc,
bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
struct aux_payload *cmd);
+
+struct dce_aux_funcs {
+ uint32_t (*configure_timeout)
+ (struct ddc_service *ddc,
+ uint32_t timeout);
+ void (*destroy)
+ (struct aux_engine **ptr);
+};
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index 5fae77e201d5..2e992fbc0d71 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -905,7 +905,7 @@ static bool dce112_program_pix_clk(
struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(clock_source);
struct bp_pixel_clock_parameters bp_pc_params = {0};
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
if (IS_FPGA_MAXIMUS_DC(clock_source->ctx->dce_environment)) {
unsigned int inst = pix_clk_params->controller_id - CONTROLLER_ID_D0;
unsigned dp_dto_ref_100hz = 7000000;
@@ -1004,7 +1004,6 @@ static bool get_pixel_clk_frequency_100hz(
return false;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
/* this table is use to find *1.001 and /1.001 pixel rates from non-precise pixel rate */
struct pixel_rate_range_table_entry {
@@ -1061,9 +1060,9 @@ static bool dcn20_program_pix_clk(
static const struct clock_source_funcs dcn20_clk_src_funcs = {
.cs_power_down = dce110_clock_source_power_down,
.program_pix_clk = dcn20_program_pix_clk,
- .get_pix_clk_dividers = dce112_get_pix_clk_dividers
+ .get_pix_clk_dividers = dce112_get_pix_clk_dividers,
+ .get_pixel_clk_frequency_100hz = get_pixel_clk_frequency_100hz
};
-#endif
/*****************************************/
/* Constructor */
@@ -1234,37 +1233,36 @@ static bool calc_pll_max_vco_construct(
struct calc_pll_clock_source_init_data *init_data)
{
uint32_t i;
- struct dc_firmware_info fw_info = { { 0 } };
+ struct dc_firmware_info *fw_info;
if (calc_pll_cs == NULL ||
init_data == NULL ||
init_data->bp == NULL)
return false;
- if (init_data->bp->funcs->get_firmware_info(
- init_data->bp,
- &fw_info) != BP_RESULT_OK)
+ if (!init_data->bp->fw_info_valid)
return false;
+ fw_info = &init_data->bp->fw_info;
calc_pll_cs->ctx = init_data->ctx;
- calc_pll_cs->ref_freq_khz = fw_info.pll_info.crystal_frequency;
+ calc_pll_cs->ref_freq_khz = fw_info->pll_info.crystal_frequency;
calc_pll_cs->min_vco_khz =
- fw_info.pll_info.min_output_pxl_clk_pll_frequency;
+ fw_info->pll_info.min_output_pxl_clk_pll_frequency;
calc_pll_cs->max_vco_khz =
- fw_info.pll_info.max_output_pxl_clk_pll_frequency;
+ fw_info->pll_info.max_output_pxl_clk_pll_frequency;
if (init_data->max_override_input_pxl_clk_pll_freq_khz != 0)
calc_pll_cs->max_pll_input_freq_khz =
init_data->max_override_input_pxl_clk_pll_freq_khz;
else
calc_pll_cs->max_pll_input_freq_khz =
- fw_info.pll_info.max_input_pxl_clk_pll_frequency;
+ fw_info->pll_info.max_input_pxl_clk_pll_frequency;
if (init_data->min_override_input_pxl_clk_pll_freq_khz != 0)
calc_pll_cs->min_pll_input_freq_khz =
init_data->min_override_input_pxl_clk_pll_freq_khz;
else
calc_pll_cs->min_pll_input_freq_khz =
- fw_info.pll_info.min_input_pxl_clk_pll_frequency;
+ fw_info->pll_info.min_input_pxl_clk_pll_frequency;
calc_pll_cs->min_pix_clock_pll_post_divider =
init_data->min_pix_clk_pll_post_divider;
@@ -1316,7 +1314,6 @@ bool dce110_clk_src_construct(
const struct dce110_clk_src_shift *cs_shift,
const struct dce110_clk_src_mask *cs_mask)
{
- struct dc_firmware_info fw_info = { { 0 } };
struct calc_pll_clock_source_init_data calc_pll_cs_init_data_hdmi;
struct calc_pll_clock_source_init_data calc_pll_cs_init_data;
@@ -1329,14 +1326,12 @@ bool dce110_clk_src_construct(
clk_src->cs_shift = cs_shift;
clk_src->cs_mask = cs_mask;
- if (clk_src->bios->funcs->get_firmware_info(
- clk_src->bios, &fw_info) != BP_RESULT_OK) {
+ if (!clk_src->bios->fw_info_valid) {
ASSERT_CRITICAL(false);
goto unexpected_failure;
}
- clk_src->ext_clk_khz =
- fw_info.external_clock_source_frequency_for_dp;
+ clk_src->ext_clk_khz = clk_src->bios->fw_info.external_clock_source_frequency_for_dp;
/* structure normally used with PLL ranges from ATOMBIOS; DS on by default */
calc_pll_cs_init_data.bp = bios;
@@ -1376,7 +1371,7 @@ bool dce110_clk_src_construct(
FRACT_FB_DIVIDER_DEC_POINTS_MAX_NUM;
calc_pll_cs_init_data_hdmi.ctx = ctx;
- clk_src->ref_freq_khz = fw_info.pll_info.crystal_frequency;
+ clk_src->ref_freq_khz = clk_src->bios->fw_info.pll_info.crystal_frequency;
if (clk_src->base.id == CLOCK_SOURCE_ID_EXTERNAL)
return true;
@@ -1419,8 +1414,6 @@ bool dce112_clk_src_construct(
const struct dce110_clk_src_shift *cs_shift,
const struct dce110_clk_src_mask *cs_mask)
{
- struct dc_firmware_info fw_info = { { 0 } };
-
clk_src->base.ctx = ctx;
clk_src->bios = bios;
clk_src->base.id = id;
@@ -1430,18 +1423,16 @@ bool dce112_clk_src_construct(
clk_src->cs_shift = cs_shift;
clk_src->cs_mask = cs_mask;
- if (clk_src->bios->funcs->get_firmware_info(
- clk_src->bios, &fw_info) != BP_RESULT_OK) {
+ if (!clk_src->bios->fw_info_valid) {
ASSERT_CRITICAL(false);
return false;
}
- clk_src->ext_clk_khz = fw_info.external_clock_source_frequency_for_dp;
+ clk_src->ext_clk_khz = clk_src->bios->fw_info.external_clock_source_frequency_for_dp;
return true;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
bool dcn20_clk_src_construct(
struct dce110_clk_src *clk_src,
struct dc_context *ctx,
@@ -1457,4 +1448,3 @@ bool dcn20_clk_src_construct(
return ret;
}
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
index adae03b1f3a7..51bd25079606 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
@@ -55,7 +55,6 @@
CS_SF(PHYPLLA_PIXCLK_RESYNC_CNTL, PHYPLLA_DCCG_DEEP_COLOR_CNTL, mask_sh),\
CS_SF(PHYPLLA_PIXCLK_RESYNC_CNTL, PHYPLLA_PIXCLK_DOUBLE_RATE_ENABLE, mask_sh)
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#define CS_COMMON_REG_LIST_DCN2_0(index, pllid) \
SRI(PIXCLK_RESYNC_CNTL, PHYPLL, pllid),\
SRII(PHASE, DP_DTO, 0),\
@@ -76,17 +75,29 @@
SRII(PIXEL_RATE_CNTL, OTG, 3),\
SRII(PIXEL_RATE_CNTL, OTG, 4),\
SRII(PIXEL_RATE_CNTL, OTG, 5)
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
+#define CS_COMMON_REG_LIST_DCN2_1(index, pllid) \
+ SRI(PIXCLK_RESYNC_CNTL, PHYPLL, pllid),\
+ SRII(PHASE, DP_DTO, 0),\
+ SRII(PHASE, DP_DTO, 1),\
+ SRII(PHASE, DP_DTO, 2),\
+ SRII(PHASE, DP_DTO, 3),\
+ SRII(MODULO, DP_DTO, 0),\
+ SRII(MODULO, DP_DTO, 1),\
+ SRII(MODULO, DP_DTO, 2),\
+ SRII(MODULO, DP_DTO, 3),\
+ SRII(PIXEL_RATE_CNTL, OTG, 0),\
+ SRII(PIXEL_RATE_CNTL, OTG, 1),\
+ SRII(PIXEL_RATE_CNTL, OTG, 2),\
+ SRII(PIXEL_RATE_CNTL, OTG, 3)
+
#define CS_COMMON_MASK_SH_LIST_DCN2_0(mask_sh)\
CS_SF(DP_DTO0_PHASE, DP_DTO0_PHASE, mask_sh),\
CS_SF(DP_DTO0_MODULO, DP_DTO0_MODULO, mask_sh),\
CS_SF(PHYPLLA_PIXCLK_RESYNC_CNTL, PHYPLLA_DCCG_DEEP_COLOR_CNTL, mask_sh),\
CS_SF(OTG0_PIXEL_RATE_CNTL, DP_DTO0_ENABLE, mask_sh)
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
#define CS_COMMON_REG_LIST_DCN1_0(index, pllid) \
SRI(PIXCLK_RESYNC_CNTL, PHYPLL, pllid),\
@@ -184,7 +195,6 @@ bool dce112_clk_src_construct(
const struct dce110_clk_src_shift *cs_shift,
const struct dce110_clk_src_mask *cs_mask);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
bool dcn20_clk_src_construct(
struct dce110_clk_src *clk_src,
struct dc_context *ctx,
@@ -193,6 +203,5 @@ bool dcn20_clk_src_construct(
const struct dce110_clk_src_regs *regs,
const struct dce110_clk_src_shift *cs_shift,
const struct dce110_clk_src_mask *cs_mask);
-#endif
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
index 0b86cee4876f..30d953acd016 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
@@ -59,6 +59,12 @@
#define MCP_BL_SET_PWM_FRAC 0x6A /* Enable or disable Fractional PWM */
#define MASTER_COMM_CNTL_REG__MASTER_COMM_INTERRUPT_MASK 0x00000001L
+// PSP FW version
+#define mmMP0_SMN_C2PMSG_58 0x1607A
+
+//Register access policy version
+#define mmMP0_SMN_C2PMSG_91 0x1609B
+
static bool dce_dmcu_init(struct dmcu *dmcu)
{
// Do nothing
@@ -318,7 +324,7 @@ static void dce_get_psr_wait_loop(
return;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
static void dcn10_get_dmcu_version(struct dmcu *dmcu)
{
struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
@@ -373,6 +379,7 @@ static bool dcn10_dmcu_init(struct dmcu *dmcu)
const struct dc_config *config = &dmcu->ctx->dc->config;
bool status = false;
+ PERF_TRACE();
/* Definition of DC_DMCU_SCRATCH
* 0 : firmare not loaded
* 1 : PSP load DMCU FW but not initialized
@@ -429,9 +436,21 @@ static bool dcn10_dmcu_init(struct dmcu *dmcu)
break;
}
+ PERF_TRACE();
return status;
}
+static bool dcn21_dmcu_init(struct dmcu *dmcu)
+{
+ struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
+ uint32_t dmcub_psp_version = REG_READ(DMCUB_SCRATCH15);
+
+ if (dmcu->auto_load_dmcu && dmcub_psp_version == 0) {
+ return false;
+ }
+
+ return dcn10_dmcu_init(dmcu);
+}
static bool dcn10_dmcu_load_iram(struct dmcu *dmcu,
unsigned int start_offset,
@@ -518,9 +537,6 @@ static void dcn10_dmcu_set_psr_enable(struct dmcu *dmcu, bool enable, bool wait)
if (dmcu->dmcu_state != DMCU_RUNNING)
return;
- dcn10_get_dmcu_psr_state(dmcu, &psr_state);
- if (psr_state == 0 && !enable)
- return;
/* waitDMCUReadyForCmd */
REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0,
dmcu_wait_reg_ready_interval,
@@ -727,9 +743,7 @@ static bool dcn10_is_dmcu_initialized(struct dmcu *dmcu)
return true;
}
-#endif //(CONFIG_DRM_AMD_DC_DCN1_0)
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
static bool dcn20_lock_phy(struct dmcu *dmcu)
{
@@ -777,7 +791,7 @@ static bool dcn20_unlock_phy(struct dmcu *dmcu)
return true;
}
-#endif //(CONFIG_DRM_AMD_DC_DCN2_0)
+#endif //(CONFIG_DRM_AMD_DC_DCN)
static const struct dmcu_funcs dce_funcs = {
.dmcu_init = dce_dmcu_init,
@@ -790,7 +804,7 @@ static const struct dmcu_funcs dce_funcs = {
.is_dmcu_initialized = dce_is_dmcu_initialized
};
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
static const struct dmcu_funcs dcn10_funcs = {
.dmcu_init = dcn10_dmcu_init,
.load_iram = dcn10_dmcu_load_iram,
@@ -801,9 +815,7 @@ static const struct dmcu_funcs dcn10_funcs = {
.get_psr_wait_loop = dcn10_get_psr_wait_loop,
.is_dmcu_initialized = dcn10_is_dmcu_initialized
};
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
static const struct dmcu_funcs dcn20_funcs = {
.dmcu_init = dcn10_dmcu_init,
.load_iram = dcn10_dmcu_load_iram,
@@ -816,6 +828,19 @@ static const struct dmcu_funcs dcn20_funcs = {
.lock_phy = dcn20_lock_phy,
.unlock_phy = dcn20_unlock_phy
};
+
+static const struct dmcu_funcs dcn21_funcs = {
+ .dmcu_init = dcn21_dmcu_init,
+ .load_iram = dcn10_dmcu_load_iram,
+ .set_psr_enable = dcn10_dmcu_set_psr_enable,
+ .setup_psr = dcn10_dmcu_setup_psr,
+ .get_psr_state = dcn10_get_dmcu_psr_state,
+ .set_psr_wait_loop = dcn10_psr_wait_loop,
+ .get_psr_wait_loop = dcn10_get_psr_wait_loop,
+ .is_dmcu_initialized = dcn10_is_dmcu_initialized,
+ .lock_phy = dcn20_lock_phy,
+ .unlock_phy = dcn20_unlock_phy
+};
#endif
static void dce_dmcu_construct(
@@ -836,6 +861,26 @@ static void dce_dmcu_construct(
dmcu_dce->dmcu_mask = dmcu_mask;
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+static void dcn21_dmcu_construct(
+ struct dce_dmcu *dmcu_dce,
+ struct dc_context *ctx,
+ const struct dce_dmcu_registers *regs,
+ const struct dce_dmcu_shift *dmcu_shift,
+ const struct dce_dmcu_mask *dmcu_mask)
+{
+ uint32_t psp_version = 0;
+
+ dce_dmcu_construct(dmcu_dce, ctx, regs, dmcu_shift, dmcu_mask);
+
+ if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) {
+ psp_version = dm_read_reg(ctx, mmMP0_SMN_C2PMSG_58);
+ dmcu_dce->base.auto_load_dmcu = ((psp_version & 0x00FF00FF) > 0x00110029);
+ dmcu_dce->base.psp_version = psp_version;
+ }
+}
+#endif
+
struct dmcu *dce_dmcu_create(
struct dc_context *ctx,
const struct dce_dmcu_registers *regs,
@@ -857,7 +902,7 @@ struct dmcu *dce_dmcu_create(
return &dmcu_dce->base;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
struct dmcu *dcn10_dmcu_create(
struct dc_context *ctx,
const struct dce_dmcu_registers *regs,
@@ -878,9 +923,7 @@ struct dmcu *dcn10_dmcu_create(
return &dmcu_dce->base;
}
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
struct dmcu *dcn20_dmcu_create(
struct dc_context *ctx,
const struct dce_dmcu_registers *regs,
@@ -901,15 +944,33 @@ struct dmcu *dcn20_dmcu_create(
return &dmcu_dce->base;
}
+
+struct dmcu *dcn21_dmcu_create(
+ struct dc_context *ctx,
+ const struct dce_dmcu_registers *regs,
+ const struct dce_dmcu_shift *dmcu_shift,
+ const struct dce_dmcu_mask *dmcu_mask)
+{
+ struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL);
+
+ if (dmcu_dce == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dcn21_dmcu_construct(
+ dmcu_dce, ctx, regs, dmcu_shift, dmcu_mask);
+
+ dmcu_dce->base.funcs = &dcn21_funcs;
+
+ return &dmcu_dce->base;
+}
#endif
void dce_dmcu_destroy(struct dmcu **dmcu)
{
struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(*dmcu);
- if (dmcu_dce->base.dmcu_state == DMCU_RUNNING)
- dmcu_dce->base.funcs->set_psr_enable(*dmcu, false, true);
-
kfree(dmcu_dce);
*dmcu = NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
index cc8587683b4b..5e044c2d3d6d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
@@ -71,6 +71,10 @@
DMCU_COMMON_REG_LIST_DCE_BASE(), \
SR(DMU_MEM_PWR_CNTL)
+#define DMCU_DCN20_REG_LIST()\
+ DMCU_DCN10_REG_LIST(), \
+ SR(DMCUB_SCRATCH15)
+
#define DMCU_SF(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
@@ -175,6 +179,7 @@ struct dce_dmcu_registers {
uint32_t DMCU_INTERRUPT_TO_UC_EN_MASK;
uint32_t SMU_INTERRUPT_CONTROL;
uint32_t DC_DMCU_SCRATCH;
+ uint32_t DMCUB_SCRATCH15;
};
struct dce_dmcu {
@@ -261,13 +266,17 @@ struct dmcu *dcn10_dmcu_create(
const struct dce_dmcu_shift *dmcu_shift,
const struct dce_dmcu_mask *dmcu_mask);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
struct dmcu *dcn20_dmcu_create(
struct dc_context *ctx,
const struct dce_dmcu_registers *regs,
const struct dce_dmcu_shift *dmcu_shift,
const struct dce_dmcu_mask *dmcu_mask);
-#endif
+
+struct dmcu *dcn21_dmcu_create(
+ struct dc_context *ctx,
+ const struct dce_dmcu_registers *regs,
+ const struct dce_dmcu_shift *dmcu_shift,
+ const struct dce_dmcu_mask *dmcu_mask);
void dce_dmcu_destroy(struct dmcu **dmcu);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c
index 0275d6d60da4..e1c5839a80dc 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c
@@ -25,7 +25,7 @@
#include "dce_hwseq.h"
#include "reg_helper.h"
-#include "hw_sequencer.h"
+#include "hw_sequencer_private.h"
#include "core_types.h"
#define CTX \
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
index cb0a037b1c4a..c5aa1f48593a 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
@@ -25,7 +25,7 @@
#ifndef __DCE_HWSEQ_H__
#define __DCE_HWSEQ_H__
-#include "hw_sequencer.h"
+#include "dc_types.h"
#define BL_REG_LIST()\
SR(LVTMA_PWRSEQ_CNTL), \
@@ -62,6 +62,10 @@
SRII(BLND_CONTROL, BLND, 4), \
SRII(BLND_CONTROL, BLND, 5)
+#define HSWEQ_DCN_PIXEL_RATE_REG_LIST(blk, inst) \
+ SRII(PIXEL_RATE_CNTL, blk, inst), \
+ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, inst)
+
#define HWSEQ_PIXEL_RATE_REG_LIST(blk) \
SRII(PIXEL_RATE_CNTL, blk, 0), \
SRII(PIXEL_RATE_CNTL, blk, 1), \
@@ -151,7 +155,10 @@
SR(DCCG_GATE_DISABLE_CNTL2), \
SR(DCFCLK_CNTL),\
SR(DCFCLK_CNTL), \
- SR(DC_MEM_GLOBAL_PWR_REQ_CNTL), \
+ SR(DC_MEM_GLOBAL_PWR_REQ_CNTL)
+
+
+#define MMHUB_DCN_REG_LIST()\
/* todo: get these from GVM instead of reading registers ourselves */\
MMHUB_SR(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32),\
MMHUB_SR(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32),\
@@ -166,10 +173,14 @@
MMHUB_SR(MC_VM_SYSTEM_APERTURE_LOW_ADDR),\
MMHUB_SR(MC_VM_SYSTEM_APERTURE_HIGH_ADDR)
+
#define HWSEQ_DCN1_REG_LIST()\
HWSEQ_DCN_REG_LIST(), \
- HWSEQ_PIXEL_RATE_REG_LIST(OTG), \
- HWSEQ_PHYPLL_REG_LIST(OTG), \
+ MMHUB_DCN_REG_LIST(), \
+ HSWEQ_DCN_PIXEL_RATE_REG_LIST(OTG, 0), \
+ HSWEQ_DCN_PIXEL_RATE_REG_LIST(OTG, 1), \
+ HSWEQ_DCN_PIXEL_RATE_REG_LIST(OTG, 2), \
+ HSWEQ_DCN_PIXEL_RATE_REG_LIST(OTG, 3), \
SR(DCHUBBUB_SDPIF_FB_BASE),\
SR(DCHUBBUB_SDPIF_FB_OFFSET),\
SR(DCHUBBUB_SDPIF_AGP_BASE),\
@@ -199,11 +210,14 @@
SR(DC_IP_REQUEST_CNTL), \
BL_REG_LIST()
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#define HWSEQ_DCN2_REG_LIST()\
HWSEQ_DCN_REG_LIST(), \
- HWSEQ_PIXEL_RATE_REG_LIST(OTG), \
- HWSEQ_PHYPLL_REG_LIST(OTG), \
+ HSWEQ_DCN_PIXEL_RATE_REG_LIST(OTG, 0), \
+ HSWEQ_DCN_PIXEL_RATE_REG_LIST(OTG, 1), \
+ HSWEQ_DCN_PIXEL_RATE_REG_LIST(OTG, 2), \
+ HSWEQ_DCN_PIXEL_RATE_REG_LIST(OTG, 3), \
+ HSWEQ_DCN_PIXEL_RATE_REG_LIST(OTG, 4), \
+ HSWEQ_DCN_PIXEL_RATE_REG_LIST(OTG, 5), \
SR(MICROSECOND_TIME_BASE_DIV), \
SR(MILLISECOND_TIME_BASE_DIV), \
SR(DISPCLK_FREQ_CHANGE_CNTL), \
@@ -227,8 +241,8 @@
SR(DOMAIN7_PG_CONFIG), \
SR(DOMAIN8_PG_CONFIG), \
SR(DOMAIN9_PG_CONFIG), \
- SR(DOMAIN10_PG_CONFIG), \
- SR(DOMAIN11_PG_CONFIG), \
+/* SR(DOMAIN10_PG_CONFIG), Navi1x HUBP5 not powergate-able*/\
+/* SR(DOMAIN11_PG_CONFIG), Navi1x DPP5 is not powergate-able */\
SR(DOMAIN16_PG_CONFIG), \
SR(DOMAIN17_PG_CONFIG), \
SR(DOMAIN18_PG_CONFIG), \
@@ -261,7 +275,57 @@
SR(D6VGA_CONTROL), \
SR(DC_IP_REQUEST_CNTL), \
BL_REG_LIST()
-#endif
+
+#define HWSEQ_DCN21_REG_LIST()\
+ HWSEQ_DCN_REG_LIST(), \
+ HSWEQ_DCN_PIXEL_RATE_REG_LIST(OTG, 0), \
+ HSWEQ_DCN_PIXEL_RATE_REG_LIST(OTG, 1), \
+ HSWEQ_DCN_PIXEL_RATE_REG_LIST(OTG, 2), \
+ HSWEQ_DCN_PIXEL_RATE_REG_LIST(OTG, 3), \
+ MMHUB_DCN_REG_LIST(), \
+ SR(MICROSECOND_TIME_BASE_DIV), \
+ SR(MILLISECOND_TIME_BASE_DIV), \
+ SR(DISPCLK_FREQ_CHANGE_CNTL), \
+ SR(RBBMIF_TIMEOUT_DIS), \
+ SR(RBBMIF_TIMEOUT_DIS_2), \
+ SR(DCHUBBUB_CRC_CTRL), \
+ SR(DPP_TOP0_DPP_CRC_CTRL), \
+ SR(DPP_TOP0_DPP_CRC_VAL_B_A), \
+ SR(DPP_TOP0_DPP_CRC_VAL_R_G), \
+ SR(MPC_CRC_CTRL), \
+ SR(MPC_CRC_RESULT_GB), \
+ SR(MPC_CRC_RESULT_C), \
+ SR(MPC_CRC_RESULT_AR), \
+ SR(DOMAIN0_PG_CONFIG), \
+ SR(DOMAIN1_PG_CONFIG), \
+ SR(DOMAIN2_PG_CONFIG), \
+ SR(DOMAIN3_PG_CONFIG), \
+ SR(DOMAIN4_PG_CONFIG), \
+ SR(DOMAIN5_PG_CONFIG), \
+ SR(DOMAIN6_PG_CONFIG), \
+ SR(DOMAIN7_PG_CONFIG), \
+ SR(DOMAIN16_PG_CONFIG), \
+ SR(DOMAIN17_PG_CONFIG), \
+ SR(DOMAIN18_PG_CONFIG), \
+ SR(DOMAIN0_PG_STATUS), \
+ SR(DOMAIN1_PG_STATUS), \
+ SR(DOMAIN2_PG_STATUS), \
+ SR(DOMAIN3_PG_STATUS), \
+ SR(DOMAIN4_PG_STATUS), \
+ SR(DOMAIN5_PG_STATUS), \
+ SR(DOMAIN6_PG_STATUS), \
+ SR(DOMAIN7_PG_STATUS), \
+ SR(DOMAIN16_PG_STATUS), \
+ SR(DOMAIN17_PG_STATUS), \
+ SR(DOMAIN18_PG_STATUS), \
+ SR(D1VGA_CONTROL), \
+ SR(D2VGA_CONTROL), \
+ SR(D3VGA_CONTROL), \
+ SR(D4VGA_CONTROL), \
+ SR(D5VGA_CONTROL), \
+ SR(D6VGA_CONTROL), \
+ SR(DC_IP_REQUEST_CNTL), \
+ BL_REG_LIST()
struct dce_hwseq_registers {
@@ -401,36 +465,34 @@ struct dce_hwseq_registers {
HWS_SF1(blk, PHYPLL_PIXEL_RATE_CNTL, PHYPLL_PIXEL_RATE_SOURCE, mask_sh),\
HWS_SF1(blk, PHYPLL_PIXEL_RATE_CNTL, PIXEL_RATE_PLL_SOURCE, mask_sh)
+#define HWSEQ_LVTMA_MASK_SH_LIST(mask_sh)\
+ HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh),\
+ HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_DIGON, mask_sh),\
+ HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_DIGON_OVRD, mask_sh),\
+ HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh)
+
#define HWSEQ_DCE8_MASK_SH_LIST(mask_sh)\
.DCFE_CLOCK_ENABLE = CRTC_DCFE_CLOCK_CONTROL__CRTC_DCFE_CLOCK_ENABLE ## mask_sh, \
HWS_SF(BLND_, V_UPDATE_LOCK, BLND_DCP_GRPH_V_UPDATE_LOCK, mask_sh),\
HWS_SF(BLND_, V_UPDATE_LOCK, BLND_SCL_V_UPDATE_LOCK, mask_sh),\
HWS_SF(BLND_, V_UPDATE_LOCK, BLND_DCP_GRPH_SURF_V_UPDATE_LOCK, mask_sh),\
HWS_SF(BLND_, CONTROL, BLND_MODE, mask_sh),\
- HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh),\
- HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh),\
- HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_)
+ HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_),\
+ HWSEQ_LVTMA_MASK_SH_LIST(mask_sh)
#define HWSEQ_DCE10_MASK_SH_LIST(mask_sh)\
HWSEQ_DCEF_MASK_SH_LIST(mask_sh, DCFE_),\
HWSEQ_BLND_MASK_SH_LIST(mask_sh, BLND_),\
- HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_), \
- HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \
- HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh)
+ HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_),\
+ HWSEQ_LVTMA_MASK_SH_LIST(mask_sh)
#define HWSEQ_DCE11_MASK_SH_LIST(mask_sh)\
HWSEQ_DCE10_MASK_SH_LIST(mask_sh),\
SF(DCFEV_CLOCK_CONTROL, DCFEV_CLOCK_ENABLE, mask_sh),\
- HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh),\
- HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_DIGON, mask_sh),\
- HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_DIGON_OVRD, mask_sh),\
- HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh),\
HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_)
#define HWSEQ_DCE112_MASK_SH_LIST(mask_sh)\
HWSEQ_DCE10_MASK_SH_LIST(mask_sh),\
- HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh),\
- HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh),\
HWSEQ_PHYPLL_MASK_SH_LIST(mask_sh, CRTC0_)
#define HWSEQ_GFX9_DCHUB_MASK_SH_LIST(mask_sh)\
@@ -438,18 +500,15 @@ struct dce_hwseq_registers {
SF(DCHUB_FB_LOCATION, FB_BASE, mask_sh),\
SF(DCHUB_AGP_BASE, AGP_BASE, mask_sh),\
SF(DCHUB_AGP_BOT, AGP_BOT, mask_sh),\
- SF(DCHUB_AGP_TOP, AGP_TOP, mask_sh), \
- HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \
- HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh)
+ SF(DCHUB_AGP_TOP, AGP_TOP, mask_sh)
#define HWSEQ_DCE12_MASK_SH_LIST(mask_sh)\
HWSEQ_DCEF_MASK_SH_LIST(mask_sh, DCFE0_DCFE_),\
HWSEQ_BLND_MASK_SH_LIST(mask_sh, BLND0_BLND_),\
HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_),\
HWSEQ_PHYPLL_MASK_SH_LIST(mask_sh, CRTC0_),\
- HWSEQ_GFX9_DCHUB_MASK_SH_LIST(mask_sh), \
- HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \
- HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh)
+ HWSEQ_GFX9_DCHUB_MASK_SH_LIST(mask_sh),\
+ HWSEQ_LVTMA_MASK_SH_LIST(mask_sh)
#define HWSEQ_VG20_MASK_SH_LIST(mask_sh)\
HWSEQ_DCE12_MASK_SH_LIST(mask_sh),\
@@ -512,12 +571,8 @@ struct dce_hwseq_registers {
HWS_SF(, D4VGA_CONTROL, D4VGA_MODE_ENABLE, mask_sh),\
HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_ENABLE, mask_sh),\
HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_RENDER_START, mask_sh),\
- HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \
- HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_DIGON, mask_sh), \
- HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_DIGON_OVRD, mask_sh), \
- HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh)
+ HWSEQ_LVTMA_MASK_SH_LIST(mask_sh)
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#define HWSEQ_DCN2_MASK_SH_LIST(mask_sh)\
HWSEQ_DCN_MASK_SH_LIST(mask_sh), \
HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \
@@ -576,9 +631,50 @@ struct dce_hwseq_registers {
HWS_SF(, DOMAIN20_PG_STATUS, DOMAIN20_PGFSM_PWR_STATUS, mask_sh), \
HWS_SF(, DOMAIN21_PG_STATUS, DOMAIN21_PGFSM_PWR_STATUS, mask_sh), \
HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \
+ HWSEQ_LVTMA_MASK_SH_LIST(mask_sh)
+
+#define HWSEQ_DCN21_MASK_SH_LIST(mask_sh)\
+ HWSEQ_DCN_MASK_SH_LIST(mask_sh), \
+ HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \
+ HWS_SF(, MMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, PAGE_DIRECTORY_ENTRY_HI32, mask_sh),\
+ HWS_SF(, MMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, PAGE_DIRECTORY_ENTRY_LO32, mask_sh),\
+ HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN0_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN1_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN1_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN2_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN3_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN3_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN4_PG_CONFIG, DOMAIN4_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN4_PG_CONFIG, DOMAIN4_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN5_PG_CONFIG, DOMAIN5_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN5_PG_CONFIG, DOMAIN5_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN6_PG_CONFIG, DOMAIN6_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN6_PG_CONFIG, DOMAIN6_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN7_PG_CONFIG, DOMAIN7_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN16_PG_CONFIG, DOMAIN16_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN16_PG_CONFIG, DOMAIN16_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN17_PG_CONFIG, DOMAIN17_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN17_PG_CONFIG, DOMAIN17_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN18_PG_CONFIG, DOMAIN18_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN18_PG_CONFIG, DOMAIN18_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN0_PG_STATUS, DOMAIN0_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN1_PG_STATUS, DOMAIN1_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN2_PG_STATUS, DOMAIN2_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN3_PG_STATUS, DOMAIN3_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN4_PG_STATUS, DOMAIN4_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN5_PG_STATUS, DOMAIN5_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN6_PG_STATUS, DOMAIN6_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN7_PG_STATUS, DOMAIN7_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN16_PG_STATUS, DOMAIN16_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN17_PG_STATUS, DOMAIN17_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN18_PG_STATUS, DOMAIN18_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \
+ HWSEQ_LVTMA_MASK_SH_LIST(mask_sh), \
HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \
HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh)
-#endif
#define HWSEQ_REG_FIELD_LIST(type) \
type DCFE_CLOCK_ENABLE; \
@@ -612,9 +708,9 @@ struct dce_hwseq_registers {
type ENABLE_L1_TLB;\
type SYSTEM_ACCESS_MODE;\
type LVTMA_BLON;\
- type LVTMA_PWRSEQ_TARGET_STATE_R;\
type LVTMA_DIGON;\
- type LVTMA_DIGON_OVRD;
+ type LVTMA_DIGON_OVRD;\
+ type LVTMA_PWRSEQ_TARGET_STATE_R;
#define HWSEQ_DCN_REG_FIELD_LIST(type) \
type HUBP_VTG_SEL; \
@@ -715,6 +811,10 @@ enum blnd_mode {
BLND_MODE_BLENDING,/* Alpha blending - blend 'current' and 'other' */
};
+struct dce_hwseq;
+struct pipe_ctx;
+struct clock_source;
+
void dce_enable_fe_clock(struct dce_hwseq *hwss,
unsigned int inst, bool enable);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c.c
index 35a75398fcb4..dd41736bb5c4 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c.c
@@ -31,7 +31,7 @@ bool dce_i2c_submit_command(
struct i2c_command *cmd)
{
struct dce_i2c_hw *dce_i2c_hw;
- struct dce_i2c_sw *dce_i2c_sw;
+ struct dce_i2c_sw dce_i2c_sw = {0};
if (!ddc) {
BREAK_TO_DEBUGGER();
@@ -43,18 +43,15 @@ bool dce_i2c_submit_command(
return false;
}
- /* The software engine is only available on dce8 */
- dce_i2c_sw = dce_i2c_acquire_i2c_sw_engine(pool, ddc);
-
- if (!dce_i2c_sw) {
- dce_i2c_hw = acquire_i2c_hw_engine(pool, ddc);
-
- if (!dce_i2c_hw)
- return false;
+ dce_i2c_hw = acquire_i2c_hw_engine(pool, ddc);
+ if (dce_i2c_hw)
return dce_i2c_submit_command_hw(pool, ddc, cmd, dce_i2c_hw);
- }
- return dce_i2c_submit_command_sw(pool, ddc, cmd, dce_i2c_sw);
+ dce_i2c_sw.ctx = ddc->ctx;
+ if (dce_i2c_engine_acquire_sw(&dce_i2c_sw, ddc)) {
+ return dce_i2c_submit_command_sw(pool, ddc, cmd, &dce_i2c_sw);
+ }
+ return false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
index a9061aaf1562..066188ba7949 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
@@ -25,6 +25,7 @@
#include <linux/delay.h>
+#include "resource.h"
#include "dce_i2c.h"
#include "dce_i2c_hw.h"
#include "reg_helper.h"
@@ -99,31 +100,6 @@ static uint32_t get_hw_buffer_available_size(
dce_i2c_hw->buffer_used_bytes;
}
-uint32_t get_reference_clock(
- struct dc_bios *bios)
-{
- struct dc_firmware_info info = { { 0 } };
-
- if (bios->funcs->get_firmware_info(bios, &info) != BP_RESULT_OK)
- return 0;
-
- return info.pll_info.crystal_frequency;
-}
-
-static uint32_t get_speed(
- const struct dce_i2c_hw *dce_i2c_hw)
-{
- uint32_t pre_scale = 0;
-
- REG_GET(SPEED, DC_I2C_DDC1_PRESCALE, &pre_scale);
-
- /* [anaumov] it seems following is unnecessary */
- /*ASSERT(value.bits.DC_I2C_DDC1_PRESCALE);*/
- return pre_scale ?
- dce_i2c_hw->reference_frequency / pre_scale :
- dce_i2c_hw->default_speed;
-}
-
static void process_channel_reply(
struct dce_i2c_hw *dce_i2c_hw,
struct i2c_payload *reply)
@@ -288,16 +264,25 @@ static void set_speed(
struct dce_i2c_hw *dce_i2c_hw,
uint32_t speed)
{
+ uint32_t xtal_ref_div = 0;
+ uint32_t prescale = 0;
+
+ REG_GET(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, &xtal_ref_div);
+
+ if (xtal_ref_div == 0)
+ xtal_ref_div = 2;
+
+ prescale = ((dce_i2c_hw->reference_frequency * 2) / xtal_ref_div) / speed;
if (speed) {
if (dce_i2c_hw->masks->DC_I2C_DDC1_START_STOP_TIMING_CNTL)
REG_UPDATE_N(SPEED, 3,
- FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), dce_i2c_hw->reference_frequency / speed,
+ FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), prescale,
FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2,
FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_START_STOP_TIMING_CNTL), speed > 50 ? 2:1);
else
REG_UPDATE_N(SPEED, 2,
- FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), dce_i2c_hw->reference_frequency / speed,
+ FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), prescale,
FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2);
}
}
@@ -306,9 +291,7 @@ static bool setup_engine(
struct dce_i2c_hw *dce_i2c_hw)
{
uint32_t i2c_setup_limit = I2C_SETUP_TIME_LIMIT_DCE;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
uint32_t reset_length = 0;
-#endif
/* we have checked I2c not used by DMCU, set SW use I2C REQ to 1 to indicate SW using it*/
REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_USE_I2C_REG_REQ, 1);
@@ -332,14 +315,12 @@ static bool setup_engine(
REG_UPDATE_N(SETUP, 2,
FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_TIME_LIMIT), i2c_setup_limit,
FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE), 1);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
} else {
reset_length = dce_i2c_hw->send_reset_length;
REG_UPDATE_N(SETUP, 3,
FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_TIME_LIMIT), i2c_setup_limit,
FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_SEND_RESET_LENGTH), reset_length,
FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE), 1);
-#endif
}
/* Program HW priority
* set to High - interrupt software I2C at any time
@@ -358,9 +339,7 @@ static void release_engine(
bool safe_to_reset;
/* Restore original HW engine speed */
-
- set_speed(dce_i2c_hw, dce_i2c_hw->original_speed);
-
+ set_speed(dce_i2c_hw, dce_i2c_hw->default_speed);
/* Reset HW engine */
{
@@ -392,7 +371,6 @@ struct dce_i2c_hw *acquire_i2c_hw_engine(
{
uint32_t counter = 0;
enum gpio_result result;
- uint32_t current_speed;
struct dce_i2c_hw *dce_i2c_hw = NULL;
if (!ddc)
@@ -401,7 +379,7 @@ struct dce_i2c_hw *acquire_i2c_hw_engine(
if (ddc->hw_info.hw_supported) {
enum gpio_ddc_line line = dal_ddc_get_line(ddc);
- if (line < pool->pipe_count)
+ if (line < pool->res_cap->num_ddc)
dce_i2c_hw = pool->hw_i2cs[line];
}
@@ -430,11 +408,6 @@ struct dce_i2c_hw *acquire_i2c_hw_engine(
dce_i2c_hw->ddc = ddc;
- current_speed = get_speed(dce_i2c_hw);
-
- if (current_speed)
- dce_i2c_hw->original_speed = current_speed;
-
if (!setup_engine(dce_i2c_hw)) {
release_engine(dce_i2c_hw);
return NULL;
@@ -492,13 +465,9 @@ static void submit_channel_request_hw(
static uint32_t get_transaction_timeout_hw(
const struct dce_i2c_hw *dce_i2c_hw,
- uint32_t length)
+ uint32_t length,
+ uint32_t speed)
{
-
- uint32_t speed = get_speed(dce_i2c_hw);
-
-
-
uint32_t period_timeout;
uint32_t num_of_clock_stretches;
@@ -518,7 +487,8 @@ static uint32_t get_transaction_timeout_hw(
bool dce_i2c_hw_engine_submit_payload(
struct dce_i2c_hw *dce_i2c_hw,
struct i2c_payload *payload,
- bool middle_of_transaction)
+ bool middle_of_transaction,
+ uint32_t speed)
{
struct i2c_request_transaction_data request;
@@ -556,7 +526,7 @@ bool dce_i2c_hw_engine_submit_payload(
/* obtain timeout value before submitting request */
transaction_timeout = get_transaction_timeout_hw(
- dce_i2c_hw, payload->length + 1);
+ dce_i2c_hw, payload->length + 1, speed);
submit_channel_request_hw(
dce_i2c_hw, &request);
@@ -602,13 +572,11 @@ bool dce_i2c_submit_command_hw(
struct i2c_payload *payload = cmd->payloads + index_of_payload;
if (!dce_i2c_hw_engine_submit_payload(
- dce_i2c_hw, payload, mot)) {
+ dce_i2c_hw, payload, mot, cmd->speed)) {
result = false;
break;
}
-
-
++index_of_payload;
}
@@ -632,14 +600,13 @@ void dce_i2c_hw_construct(
{
dce_i2c_hw->ctx = ctx;
dce_i2c_hw->engine_id = engine_id;
- dce_i2c_hw->reference_frequency = get_reference_clock(ctx->dc_bios) >> 1;
+ dce_i2c_hw->reference_frequency = (ctx->dc_bios->fw_info.pll_info.crystal_frequency) >> 1;
dce_i2c_hw->regs = regs;
dce_i2c_hw->shifts = shifts;
dce_i2c_hw->masks = masks;
dce_i2c_hw->buffer_used_bytes = 0;
dce_i2c_hw->transaction_count = 0;
dce_i2c_hw->engine_keep_power_up_count = 1;
- dce_i2c_hw->original_speed = DEFAULT_I2C_HW_SPEED;
dce_i2c_hw->default_speed = DEFAULT_I2C_HW_SPEED;
dce_i2c_hw->send_reset_length = 0;
dce_i2c_hw->setup_limit = I2C_SETUP_TIME_LIMIT_DCE;
@@ -654,9 +621,6 @@ void dce100_i2c_hw_construct(
const struct dce_i2c_shift *shifts,
const struct dce_i2c_mask *masks)
{
-
- uint32_t xtal_ref_div = 0;
-
dce_i2c_hw_construct(dce_i2c_hw,
ctx,
engine_id,
@@ -664,21 +628,6 @@ void dce100_i2c_hw_construct(
shifts,
masks);
dce_i2c_hw->buffer_size = I2C_HW_BUFFER_SIZE_DCE100;
-
- REG_GET(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, &xtal_ref_div);
-
- if (xtal_ref_div == 0)
- xtal_ref_div = 2;
-
- /*Calculating Reference Clock by divding original frequency by
- * XTAL_REF_DIV.
- * At upper level, uint32_t reference_frequency =
- * dal_dce_i2c_get_reference_clock(as) >> 1
- * which already divided by 2. So we need x2 to get original
- * reference clock from ppll_info
- */
- dce_i2c_hw->reference_frequency =
- (dce_i2c_hw->reference_frequency * 2) / xtal_ref_div;
}
void dce112_i2c_hw_construct(
@@ -715,7 +664,6 @@ void dcn1_i2c_hw_construct(
dce_i2c_hw->setup_limit = I2C_SETUP_TIME_LIMIT_DCN;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
void dcn2_i2c_hw_construct(
struct dce_i2c_hw *dce_i2c_hw,
struct dc_context *ctx,
@@ -734,4 +682,3 @@ void dcn2_i2c_hw_construct(
if (ctx->dc->debug.scl_reset_length10)
dce_i2c_hw->send_reset_length = I2C_SEND_RESET_LENGTH_10;
}
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h
index cb0234e5d597..fb055e6883c0 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h
@@ -177,9 +177,7 @@ struct dce_i2c_shift {
uint8_t DC_I2C_INDEX;
uint8_t DC_I2C_INDEX_WRITE;
uint8_t XTAL_REF_DIV;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
uint8_t DC_I2C_DDC1_SEND_RESET_LENGTH;
-#endif
uint8_t DC_I2C_REG_RW_CNTL_STATUS;
};
@@ -220,17 +218,13 @@ struct dce_i2c_mask {
uint32_t DC_I2C_INDEX;
uint32_t DC_I2C_INDEX_WRITE;
uint32_t XTAL_REF_DIV;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
uint32_t DC_I2C_DDC1_SEND_RESET_LENGTH;
-#endif
uint32_t DC_I2C_REG_RW_CNTL_STATUS;
};
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#define I2C_COMMON_MASK_SH_LIST_DCN2(mask_sh)\
I2C_COMMON_MASK_SH_LIST_DCE110(mask_sh),\
I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_SEND_RESET_LENGTH, mask_sh)
-#endif
struct dce_i2c_registers {
uint32_t SETUP;
@@ -262,7 +256,6 @@ struct i2c_request_transaction_data {
struct dce_i2c_hw {
struct ddc *ddc;
- uint32_t original_speed;
uint32_t engine_keep_power_up_count;
uint32_t transaction_count;
uint32_t buffer_used_bytes;
@@ -312,7 +305,6 @@ void dcn1_i2c_hw_construct(
const struct dce_i2c_shift *shifts,
const struct dce_i2c_mask *masks);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
void dcn2_i2c_hw_construct(
struct dce_i2c_hw *dce_i2c_hw,
struct dc_context *ctx,
@@ -320,7 +312,6 @@ void dcn2_i2c_hw_construct(
const struct dce_i2c_registers *regs,
const struct dce_i2c_shift *shifts,
const struct dce_i2c_mask *masks);
-#endif
bool dce_i2c_submit_command_hw(
struct resource_pool *pool,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
index a5a11c251e25..87d8428df6c4 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
@@ -73,31 +73,6 @@ static void release_engine_dce_sw(
dce_i2c_sw->ddc = NULL;
}
-static bool get_hw_supported_ddc_line(
- struct ddc *ddc,
- enum gpio_ddc_line *line)
-{
- enum gpio_ddc_line line_found;
-
- *line = GPIO_DDC_LINE_UNKNOWN;
-
- if (!ddc) {
- BREAK_TO_DEBUGGER();
- return false;
- }
-
- if (!ddc->hw_info.hw_supported)
- return false;
-
- line_found = dal_ddc_get_line(ddc);
-
- if (line_found >= GPIO_DDC_LINE_COUNT)
- return false;
-
- *line = line_found;
-
- return true;
-}
static bool wait_for_scl_high_sw(
struct dc_context *ctx,
struct ddc *ddc,
@@ -524,21 +499,3 @@ bool dce_i2c_submit_command_sw(
return result;
}
-struct dce_i2c_sw *dce_i2c_acquire_i2c_sw_engine(
- struct resource_pool *pool,
- struct ddc *ddc)
-{
- enum gpio_ddc_line line;
- struct dce_i2c_sw *engine = NULL;
-
- if (get_hw_supported_ddc_line(ddc, &line))
- engine = pool->sw_i2cs[line];
-
- if (!engine)
- return NULL;
-
- if (!dce_i2c_engine_acquire_sw(engine, ddc))
- return NULL;
-
- return engine;
-}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.h b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.h
index 5bbcdd455614..019fc47bb767 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.h
@@ -49,9 +49,9 @@ bool dce_i2c_submit_command_sw(
struct i2c_command *cmd,
struct dce_i2c_sw *dce_i2c_sw);
-struct dce_i2c_sw *dce_i2c_acquire_i2c_sw_engine(
- struct resource_pool *pool,
- struct ddc *ddc);
+bool dce_i2c_engine_acquire_sw(
+ struct dce_i2c_sw *dce_i2c_sw,
+ struct ddc *ddc_handle);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
index a24a2bda8656..8aa937f496c4 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
@@ -148,7 +148,7 @@ static void dce_mi_program_pte_vm(
pte->min_pte_before_flip_horiz_scan;
REG_UPDATE(GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT,
- GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT, 0xff);
+ GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT, 0x7f);
REG_UPDATE_3(DVMM_PTE_CONTROL,
DVMM_PAGE_WIDTH, page_width,
@@ -157,7 +157,7 @@ static void dce_mi_program_pte_vm(
REG_UPDATE_2(DVMM_PTE_ARB_CONTROL,
DVMM_PTE_REQ_PER_CHUNK, pte->pte_req_per_chunk,
- DVMM_MAX_PTE_REQ_OUTSTANDING, 0xff);
+ DVMM_MAX_PTE_REQ_OUTSTANDING, 0x7f);
}
static void program_urgency_watermark(
@@ -391,10 +391,10 @@ static void program_tiling(
static void program_size_and_rotation(
struct dce_mem_input *dce_mi,
enum dc_rotation_angle rotation,
- const union plane_size *plane_size)
+ const struct plane_size *plane_size)
{
- const struct rect *in_rect = &plane_size->grph.surface_size;
- struct rect hw_rect = plane_size->grph.surface_size;
+ const struct rect *in_rect = &plane_size->surface_size;
+ struct rect hw_rect = plane_size->surface_size;
const uint32_t rotation_angles[ROTATION_ANGLE_COUNT] = {
[ROTATION_ANGLE_0] = 0,
[ROTATION_ANGLE_90] = 1,
@@ -423,7 +423,7 @@ static void program_size_and_rotation(
GRPH_Y_END, hw_rect.height);
REG_SET(GRPH_PITCH, 0,
- GRPH_PITCH, plane_size->grph.surface_pitch);
+ GRPH_PITCH, plane_size->surface_pitch);
REG_SET(HW_ROTATION, 0,
GRPH_ROTATION_ANGLE, rotation_angles[rotation]);
@@ -505,7 +505,7 @@ static void dce_mi_program_surface_config(
struct mem_input *mi,
enum surface_pixel_format format,
union dc_tiling_info *tiling_info,
- union plane_size *plane_size,
+ struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
bool horizontal_mirror)
@@ -606,11 +606,11 @@ static void dce_mi_allocate_dmif(
}
if (dce_mi->wa.single_head_rdreq_dmif_limit) {
- uint32_t eanble = (total_stream_num > 1) ? 0 :
+ uint32_t enable = (total_stream_num > 1) ? 0 :
dce_mi->wa.single_head_rdreq_dmif_limit;
REG_UPDATE(MC_HUB_RDREQ_DMIF_LIMIT,
- ENABLE, eanble);
+ ENABLE, enable);
}
}
@@ -636,11 +636,11 @@ static void dce_mi_free_dmif(
10, 3500);
if (dce_mi->wa.single_head_rdreq_dmif_limit) {
- uint32_t eanble = (total_stream_num > 1) ? 0 :
+ uint32_t enable = (total_stream_num > 1) ? 0 :
dce_mi->wa.single_head_rdreq_dmif_limit;
REG_UPDATE(MC_HUB_RDREQ_DMIF_LIMIT,
- ENABLE, eanble);
+ ENABLE, enable);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
index 5e2b4d47c548..451574971b96 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
@@ -137,7 +137,7 @@ static void dce110_update_generic_info_packet(
AFMT_GENERIC0_UPDATE, (packet_index == 0),
AFMT_GENERIC2_UPDATE, (packet_index == 2));
}
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
if (REG(AFMT_VBI_PACKET_CONTROL1)) {
switch (packet_index) {
case 0:
@@ -231,7 +231,7 @@ static void dce110_update_hdmi_info_packet(
HDMI_GENERIC1_SEND, send,
HDMI_GENERIC1_LINE, line);
break;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
case 4:
if (REG(HDMI_GENERIC_PACKET_CONTROL2))
REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL2,
@@ -275,9 +275,10 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing,
enum dc_color_space output_color_space,
+ bool use_vsc_sdp_for_colorimetry,
uint32_t enable_sdp_splitting)
{
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
uint32_t h_active_start;
uint32_t v_active_start;
uint32_t misc0 = 0;
@@ -329,7 +330,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
if (enc110->se_mask->DP_VID_M_DOUBLE_VALUE_EN)
REG_UPDATE(DP_VID_TIMING, DP_VID_M_DOUBLE_VALUE_EN, 1);
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
if (enc110->se_mask->DP_VID_N_MUL)
REG_UPDATE(DP_VID_TIMING, DP_VID_N_MUL, 1);
#endif
@@ -340,7 +341,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
break;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
if (REG(DP_MSA_MISC))
misc1 = REG_READ(DP_MSA_MISC);
#endif
@@ -374,7 +375,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
/* set dynamic range and YCbCr range */
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
switch (hw_crtc_timing.display_color_depth) {
case COLOR_DEPTH_666:
colorimetry_bpc = 0;
@@ -454,7 +455,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
DP_DYN_RANGE, dynamic_range_rgb,
DP_YCBCR_RANGE, dynamic_range_ycbcr);
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
if (REG(DP_MSA_COLORIMETRY))
REG_SET(DP_MSA_COLORIMETRY, 0, DP_MSA_MISC0, misc0);
@@ -489,7 +490,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
hw_crtc_timing.v_front_porch;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
/* start at begining of left border */
if (REG(DP_MSA_TIMING_PARAM2))
REG_SET_2(DP_MSA_TIMING_PARAM2, 0,
@@ -786,7 +787,7 @@ static void dce110_stream_encoder_update_hdmi_info_packets(
dce110_update_hdmi_info_packet(enc110, 3, &info_frame->hdrsmd);
}
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
if (enc110->se_mask->HDMI_DB_DISABLE) {
/* for bring up, disable dp double TODO */
if (REG(HDMI_DB_CONTROL))
@@ -824,7 +825,7 @@ static void dce110_stream_encoder_stop_hdmi_info_packets(
HDMI_GENERIC1_LINE, 0,
HDMI_GENERIC1_SEND, 0);
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
/* stop generic packets 2 & 3 on HDMI */
if (REG(HDMI_GENERIC_PACKET_CONTROL2))
REG_SET_6(HDMI_GENERIC_PACKET_CONTROL2, 0,
@@ -1038,6 +1039,24 @@ static void dce110_stream_encoder_set_avmute(
}
+static void dce110_reset_hdmi_stream_attribute(
+ struct stream_encoder *enc)
+{
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+ if (enc110->se_mask->HDMI_DATA_SCRAMBLE_EN)
+ REG_UPDATE_5(HDMI_CONTROL,
+ HDMI_PACKET_GEN_VERSION, 1,
+ HDMI_KEEPOUT_MODE, 1,
+ HDMI_DEEP_COLOR_ENABLE, 0,
+ HDMI_DATA_SCRAMBLE_EN, 0,
+ HDMI_CLOCK_CHANNEL_RATE, 0);
+ else
+ REG_UPDATE_3(HDMI_CONTROL,
+ HDMI_PACKET_GEN_VERSION, 1,
+ HDMI_KEEPOUT_MODE, 1,
+ HDMI_DEEP_COLOR_ENABLE, 0);
+}
+
#define DP_SEC_AUD_N__DP_SEC_AUD_N__DEFAULT 0x8000
#define DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE__AUTO_CALC 1
@@ -1251,13 +1270,13 @@ static uint32_t calc_max_audio_packets_per_line(
static void get_audio_clock_info(
enum dc_color_depth color_depth,
- uint32_t crtc_pixel_clock_in_khz,
- uint32_t actual_pixel_clock_in_khz,
+ uint32_t crtc_pixel_clock_100Hz,
+ uint32_t actual_pixel_clock_100Hz,
struct audio_clock_info *audio_clock_info)
{
const struct audio_clock_info *clock_info;
uint32_t index;
- uint32_t crtc_pixel_clock_in_10khz = crtc_pixel_clock_in_khz / 10;
+ uint32_t crtc_pixel_clock_in_10khz = crtc_pixel_clock_100Hz / 100;
uint32_t audio_array_size;
switch (color_depth) {
@@ -1294,16 +1313,16 @@ static void get_audio_clock_info(
}
/* not found */
- if (actual_pixel_clock_in_khz == 0)
- actual_pixel_clock_in_khz = crtc_pixel_clock_in_khz;
+ if (actual_pixel_clock_100Hz == 0)
+ actual_pixel_clock_100Hz = crtc_pixel_clock_100Hz;
/* See HDMI spec the table entry under
* pixel clock of "Other". */
audio_clock_info->pixel_clock_in_10khz =
- actual_pixel_clock_in_khz / 10;
- audio_clock_info->cts_32khz = actual_pixel_clock_in_khz;
- audio_clock_info->cts_44khz = actual_pixel_clock_in_khz;
- audio_clock_info->cts_48khz = actual_pixel_clock_in_khz;
+ actual_pixel_clock_100Hz / 100;
+ audio_clock_info->cts_32khz = actual_pixel_clock_100Hz / 10;
+ audio_clock_info->cts_44khz = actual_pixel_clock_100Hz / 10;
+ audio_clock_info->cts_48khz = actual_pixel_clock_100Hz / 10;
audio_clock_info->n_32khz = 4096;
audio_clock_info->n_44khz = 6272;
@@ -1369,14 +1388,14 @@ static void dce110_se_setup_hdmi_audio(
/* Program audio clock sample/regeneration parameters */
get_audio_clock_info(crtc_info->color_depth,
- crtc_info->requested_pixel_clock,
- crtc_info->calculated_pixel_clock,
+ crtc_info->requested_pixel_clock_100Hz,
+ crtc_info->calculated_pixel_clock_100Hz,
&audio_clock_info);
DC_LOG_HW_AUDIO(
- "\n%s:Input::requested_pixel_clock = %d" \
- "calculated_pixel_clock = %d \n", __func__, \
- crtc_info->requested_pixel_clock, \
- crtc_info->calculated_pixel_clock);
+ "\n%s:Input::requested_pixel_clock_100Hz = %d" \
+ "calculated_pixel_clock_100Hz = %d \n", __func__, \
+ crtc_info->requested_pixel_clock_100Hz, \
+ crtc_info->calculated_pixel_clock_100Hz);
/* HDMI_ACR_32_0__HDMI_ACR_CTS_32_MASK */
REG_UPDATE(HDMI_ACR_32_0, HDMI_ACR_CTS_32, audio_clock_info.cts_32khz);
@@ -1584,6 +1603,17 @@ static void dig_connect_to_otg(
REG_UPDATE(DIG_FE_CNTL, DIG_SOURCE_SELECT, tg_inst);
}
+static unsigned int dig_source_otg(
+ struct stream_encoder *enc)
+{
+ uint32_t tg_inst = 0;
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+
+ REG_GET(DIG_FE_CNTL, DIG_SOURCE_SELECT, &tg_inst);
+
+ return tg_inst;
+}
+
static const struct stream_encoder_funcs dce110_str_enc_funcs = {
.dp_set_stream_attribute =
dce110_stream_encoder_dp_set_stream_attribute,
@@ -1618,6 +1648,8 @@ static const struct stream_encoder_funcs dce110_str_enc_funcs = {
.setup_stereo_sync = setup_stereo_sync,
.set_avmute = dce110_stream_encoder_set_avmute,
.dig_connect_to_otg = dig_connect_to_otg,
+ .hdmi_reset_stream_attribute = dce110_reset_hdmi_stream_attribute,
+ .dig_source_otg = dig_source_otg,
};
void dce110_stream_encoder_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
new file mode 100644
index 000000000000..225955ec6d39
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
@@ -0,0 +1,220 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dmub_psr.h"
+#include "dc.h"
+#include "dc_dmub_srv.h"
+#include "../../dmub/inc/dmub_srv.h"
+#include "dmub_fw_state.h"
+#include "core_types.h"
+#include "ipp.h"
+
+#define MAX_PIPES 6
+
+/**
+ * Get PSR state from firmware.
+ */
+static void dmub_get_psr_state(uint32_t *psr_state)
+{
+ // Not yet implemented
+ // Trigger GPINT interrupt from firmware
+}
+
+/**
+ * Enable/Disable PSR.
+ */
+static void dmub_set_psr_enable(struct dmub_psr *dmub, bool enable)
+{
+ union dmub_rb_cmd cmd;
+ struct dc_context *dc = dmub->ctx;
+
+ cmd.psr_enable.header.type = DMUB_CMD__PSR;
+
+ if (enable)
+ cmd.psr_enable.header.sub_type = DMUB_CMD__PSR_ENABLE;
+ else
+ cmd.psr_enable.header.sub_type = DMUB_CMD__PSR_DISABLE;
+
+ cmd.psr_enable.header.payload_bytes = 0; // Send header only
+
+ dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.psr_enable.header);
+ dc_dmub_srv_cmd_execute(dc->dmub_srv);
+ dc_dmub_srv_wait_idle(dc->dmub_srv);
+}
+
+/**
+ * Set PSR level.
+ */
+static void dmub_set_psr_level(struct dmub_psr *dmub, uint16_t psr_level)
+{
+ union dmub_rb_cmd cmd;
+ uint32_t psr_state = 0;
+ struct dc_context *dc = dmub->ctx;
+
+ dmub_get_psr_state(&psr_state);
+
+ if (psr_state == 0)
+ return;
+
+ cmd.psr_set_level.header.type = DMUB_CMD__PSR;
+ cmd.psr_set_level.header.sub_type = DMUB_CMD__PSR_SET_LEVEL;
+ cmd.psr_set_level.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_level_data);
+ cmd.psr_set_level.psr_set_level_data.psr_level = psr_level;
+
+ dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.psr_set_level.header);
+ dc_dmub_srv_cmd_execute(dc->dmub_srv);
+ dc_dmub_srv_wait_idle(dc->dmub_srv);
+}
+
+/**
+ * Setup PSR by programming phy registers and sending psr hw context values to firmware.
+ */
+static bool dmub_setup_psr(struct dmub_psr *dmub,
+ struct dc_link *link,
+ struct psr_context *psr_context)
+{
+ union dmub_rb_cmd cmd;
+ struct dc_context *dc = dmub->ctx;
+ struct dmub_cmd_psr_copy_settings_data *copy_settings_data
+ = &cmd.psr_copy_settings.psr_copy_settings_data;
+ struct pipe_ctx *pipe_ctx = NULL;
+ struct resource_context *res_ctx = &link->ctx->dc->current_state->res_ctx;
+
+ for (int i = 0; i < MAX_PIPES; i++) {
+ if (res_ctx &&
+ res_ctx->pipe_ctx[i].stream &&
+ res_ctx->pipe_ctx[i].stream->link &&
+ res_ctx->pipe_ctx[i].stream->link == link &&
+ res_ctx->pipe_ctx[i].stream->link->connector_signal == SIGNAL_TYPE_EDP) {
+ pipe_ctx = &res_ctx->pipe_ctx[i];
+ break;
+ }
+ }
+
+ if (!pipe_ctx ||
+ !&pipe_ctx->plane_res ||
+ !&pipe_ctx->stream_res)
+ return false;
+
+ // Program DP DPHY fast training registers
+ link->link_enc->funcs->psr_program_dp_dphy_fast_training(link->link_enc,
+ psr_context->psrExitLinkTrainingRequired);
+
+ // Program DP_SEC_CNTL1 register to set transmission GPS0 line num and priority to high
+ link->link_enc->funcs->psr_program_secondary_packet(link->link_enc,
+ psr_context->sdpTransmitLineNumDeadline);
+
+ cmd.psr_copy_settings.header.type = DMUB_CMD__PSR;
+ cmd.psr_copy_settings.header.sub_type = DMUB_CMD__PSR_COPY_SETTINGS;
+ cmd.psr_copy_settings.header.payload_bytes = sizeof(struct dmub_cmd_psr_copy_settings_data);
+
+ // Hw insts
+ copy_settings_data->dpphy_inst = psr_context->phyType;
+ copy_settings_data->aux_inst = psr_context->channel;
+ copy_settings_data->digfe_inst = psr_context->engineId;
+ copy_settings_data->digbe_inst = psr_context->transmitterId;
+
+ copy_settings_data->mpcc_inst = pipe_ctx->plane_res.mpcc_inst;
+
+ if (pipe_ctx->plane_res.hubp)
+ copy_settings_data->hubp_inst = pipe_ctx->plane_res.hubp->inst;
+ else
+ copy_settings_data->hubp_inst = 0;
+ if (pipe_ctx->plane_res.dpp)
+ copy_settings_data->dpp_inst = pipe_ctx->plane_res.dpp->inst;
+ else
+ copy_settings_data->dpp_inst = 0;
+ if (pipe_ctx->stream_res.opp)
+ copy_settings_data->opp_inst = pipe_ctx->stream_res.opp->inst;
+ else
+ copy_settings_data->opp_inst = 0;
+ if (pipe_ctx->stream_res.tg)
+ copy_settings_data->otg_inst = pipe_ctx->stream_res.tg->inst;
+ else
+ copy_settings_data->otg_inst = 0;
+
+ // Misc
+ copy_settings_data->psr_level = psr_context->psr_level.u32all;
+ copy_settings_data->hyst_frames = psr_context->timehyst_frames;
+ copy_settings_data->hyst_lines = psr_context->hyst_lines;
+ copy_settings_data->phy_type = psr_context->phyType;
+ copy_settings_data->aux_repeat = psr_context->aux_repeats;
+ copy_settings_data->smu_optimizations_en = psr_context->allow_smu_optimizations;
+ copy_settings_data->skip_wait_for_pll_lock = psr_context->skipPsrWaitForPllLock;
+ copy_settings_data->frame_delay = psr_context->frame_delay;
+ copy_settings_data->smu_phy_id = psr_context->smuPhyId;
+ copy_settings_data->num_of_controllers = psr_context->numberOfControllers;
+ copy_settings_data->frame_cap_ind = psr_context->psrFrameCaptureIndicationReq;
+ copy_settings_data->phy_num = psr_context->frame_delay & 0x7;
+ copy_settings_data->link_rate = psr_context->frame_delay & 0xF;
+
+ dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.psr_copy_settings.header);
+ dc_dmub_srv_cmd_execute(dc->dmub_srv);
+ dc_dmub_srv_wait_idle(dc->dmub_srv);
+
+ return true;
+}
+
+static const struct dmub_psr_funcs psr_funcs = {
+ .set_psr_enable = dmub_set_psr_enable,
+ .setup_psr = dmub_setup_psr,
+ .get_psr_state = dmub_get_psr_state,
+ .set_psr_level = dmub_set_psr_level,
+};
+
+/**
+ * Construct PSR object.
+ */
+static void dmub_psr_construct(struct dmub_psr *psr, struct dc_context *ctx)
+{
+ psr->ctx = ctx;
+ psr->funcs = &psr_funcs;
+}
+
+/**
+ * Allocate and initialize PSR object.
+ */
+struct dmub_psr *dmub_psr_create(struct dc_context *ctx)
+{
+ struct dmub_psr *psr = kzalloc(sizeof(struct dmub_psr), GFP_KERNEL);
+
+ if (psr == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dmub_psr_construct(psr, ctx);
+
+ return psr;
+}
+
+/**
+ * Deallocate PSR object.
+ */
+void dmub_psr_destroy(struct dmub_psr **dmub)
+{
+ kfree(dmub);
+ *dmub = NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h
new file mode 100644
index 000000000000..229958de3035
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DMUB_PSR_H_
+#define _DMUB_PSR_H_
+
+#include "os_types.h"
+
+struct dmub_psr {
+ struct dc_context *ctx;
+ const struct dmub_psr_funcs *funcs;
+};
+
+struct dmub_psr_funcs {
+ void (*set_psr_enable)(struct dmub_psr *dmub, bool enable);
+ bool (*setup_psr)(struct dmub_psr *dmub, struct dc_link *link, struct psr_context *psr_context);
+ void (*get_psr_state)(uint32_t *psr_state);
+ void (*set_psr_level)(struct dmub_psr *dmub, uint16_t psr_level);
+};
+
+struct dmub_psr *dmub_psr_create(struct dc_context *ctx);
+void dmub_psr_destroy(struct dmub_psr **dmub);
+
+
+#endif /* _DCE_DMUB_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
index 799d36299c9b..753cb8edd996 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
@@ -26,7 +26,6 @@
#include "dc.h"
#include "core_types.h"
#include "clk_mgr.h"
-#include "hw_sequencer.h"
#include "dce100_hw_sequencer.h"
#include "resource.h"
@@ -136,7 +135,7 @@ void dce100_hw_sequencer_construct(struct dc *dc)
{
dce110_hw_sequencer_construct(dc);
- dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating;
+ dc->hwseq->funcs.enable_display_power_gating = dce100_enable_display_power_gating;
dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth;
dc->hwss.optimize_bandwidth = dce100_optimize_bandwidth;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h
index a6b80fdaa666..34518da20009 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h
@@ -27,6 +27,7 @@
#define __DC_HWSS_DCE100_H__
#include "core_types.h"
+#include "hw_sequencer_private.h"
struct dc;
struct dc_state;
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
index 6248c8455314..8f78bf9abbca 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -304,7 +304,7 @@ static const struct dce_audio_shift audio_shift = {
AUD_COMMON_MASK_SH_LIST(__SHIFT)
};
-static const struct dce_aduio_mask audio_mask = {
+static const struct dce_audio_mask audio_mask = {
AUD_COMMON_MASK_SH_LIST(_MASK)
};
@@ -399,6 +399,37 @@ static const struct dc_plane_cap plane_cap = {
#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8
#endif
+static int map_transmitter_id_to_phy_instance(
+ enum transmitter transmitter)
+{
+ switch (transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ return 0;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ return 1;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ return 2;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ return 3;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ return 4;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ return 5;
+ break;
+ case TRANSMITTER_UNIPHY_G:
+ return 6;
+ break;
+ default:
+ ASSERT(0);
+ return 0;
+ }
+}
+
static void read_dce_straps(
struct dc_context *ctx,
struct resource_straps *straps)
@@ -506,6 +537,14 @@ static const struct dce_mem_input_mask mi_masks = {
.ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE_MASK
};
+static const struct dce110_aux_registers_shift aux_shift = {
+ DCE10_AUX_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce110_aux_registers_mask aux_mask = {
+ DCE10_AUX_MASK_SH_LIST(_MASK)
+};
+
static struct mem_input *dce100_mem_input_create(
struct dc_context *ctx,
uint32_t inst)
@@ -571,14 +610,18 @@ struct link_encoder *dce100_link_encoder_create(
{
struct dce110_link_encoder *enc110 =
kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
+ int link_regs_id;
if (!enc110)
return NULL;
+ link_regs_id =
+ map_transmitter_id_to_phy_instance(enc_init_data->transmitter);
+
dce110_link_encoder_construct(enc110,
enc_init_data,
&link_enc_feature,
- &link_enc_regs[enc_init_data->transmitter],
+ &link_enc_regs[link_regs_id],
&link_enc_aux_regs[enc_init_data->channel - 1],
&link_enc_hpd_regs[enc_init_data->hpd_source]);
return &enc110->base;
@@ -611,7 +654,10 @@ struct dce_aux *dce100_aux_engine_create(
dce110_aux_engine_construct(aux_engine, ctx, inst,
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
- &aux_engine_regs[inst]);
+ &aux_engine_regs[inst],
+ &aux_mask,
+ &aux_shift,
+ ctx->dc->caps.extended_aux_timeout_support);
return &aux_engine->base;
}
@@ -668,6 +714,7 @@ struct clock_source *dce100_clock_source_create(
return &clk_src->base;
}
+ kfree(clk_src);
BREAK_TO_DEBUGGER();
return NULL;
}
@@ -678,7 +725,7 @@ void dce100_clock_source_destroy(struct clock_source **clk_src)
*clk_src = NULL;
}
-static void destruct(struct dce110_resource_pool *pool)
+static void dce100_resource_destruct(struct dce110_resource_pool *pool)
{
unsigned int i;
@@ -838,7 +885,7 @@ static void dce100_destroy_resource_pool(struct resource_pool **pool)
{
struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool);
- destruct(dce110_pool);
+ dce100_resource_destruct(dce110_pool);
kfree(dce110_pool);
*pool = NULL;
}
@@ -903,14 +950,13 @@ static const struct resource_funcs dce100_res_pool_funcs = {
.find_first_free_match_stream_enc_for_link = dce100_find_first_free_match_stream_enc_for_link
};
-static bool construct(
+static bool dce100_resource_construct(
uint8_t num_virtual_links,
struct dc *dc,
struct dce110_resource_pool *pool)
{
unsigned int i;
struct dc_context *ctx = dc->ctx;
- struct dc_firmware_info info;
struct dc_bios *bp;
ctx->dc_bios->regs = &bios_regs;
@@ -921,8 +967,7 @@ static bool construct(
bp = ctx->dc_bios;
- if ((bp->funcs->get_firmware_info(bp, &info) == BP_RESULT_OK) &&
- info.external_clock_source_frequency_for_dp != 0) {
+ if (bp->fw_info_valid && bp->fw_info.external_clock_source_frequency_for_dp != 0) {
pool->base.dp_clock_source =
dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true);
@@ -998,6 +1043,8 @@ static bool construct(
dc->caps.max_cursor_size = 128;
dc->caps.dual_link_dvi = true;
dc->caps.disable_dp_clk_share = true;
+ dc->caps.extended_aux_timeout_support = false;
+
for (i = 0; i < pool->base.pipe_count; i++) {
pool->base.timing_generators[i] =
dce100_timing_generator_create(
@@ -1075,7 +1122,7 @@ static bool construct(
return true;
res_create_fail:
- destruct(pool);
+ dce100_resource_destruct(pool);
return false;
}
@@ -1090,9 +1137,10 @@ struct resource_pool *dce100_create_resource_pool(
if (!pool)
return NULL;
- if (construct(num_virtual_links, dc, pool))
+ if (dce100_resource_construct(num_virtual_links, dc, pool))
return &pool->base;
+ kfree(pool);
BREAK_TO_DEBUGGER();
return NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index fafb4b470140..5b689273ff44 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -61,6 +61,8 @@
#include "atomfirmware.h"
+#define GAMMA_HW_POINTS_NUM 256
+
/*
* All values are in milliseconds;
* For eDP, after power-up/power/down,
@@ -268,7 +270,7 @@ static void build_prescale_params(struct ipp_prescale_params *prescale_params,
}
static bool
-dce110_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
+dce110_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
const struct dc_plane_state *plane_state)
{
struct input_pixel_processor *ipp = pipe_ctx->plane_res.ipp;
@@ -596,7 +598,7 @@ dce110_translate_regamma_to_hw_format(const struct dc_transfer_func *output_tf,
}
static bool
-dce110_set_output_transfer_func(struct pipe_ctx *pipe_ctx,
+dce110_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
const struct dc_stream_state *stream)
{
struct transform *xfm = pipe_ctx->plane_res.xfm;
@@ -651,10 +653,9 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx)
{
enum dc_lane_count lane_count =
pipe_ctx->stream->link->cur_link_settings.lane_count;
-
struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
struct dc_link *link = pipe_ctx->stream->link;
-
+ const struct dc *dc = link->dc;
uint32_t active_total_with_borders;
uint32_t early_control = 0;
@@ -667,29 +668,7 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx)
link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc,
pipe_ctx->stream_res.stream_enc->id, true);
- /* update AVI info frame (HDMI, DP)*/
- /* TODO: FPGA may change to hwss.update_info_frame */
-
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
- if (pipe_ctx->stream_res.stream_enc->funcs->set_dynamic_metadata != NULL &&
- pipe_ctx->plane_res.hubp != NULL) {
- if (pipe_ctx->stream->dmdata_address.quad_part != 0) {
- /* if using dynamic meta, don't set up generic infopackets */
- pipe_ctx->stream_res.encoder_info_frame.hdrsmd.valid = false;
- pipe_ctx->stream_res.stream_enc->funcs->set_dynamic_metadata(
- pipe_ctx->stream_res.stream_enc,
- true, pipe_ctx->plane_res.hubp->inst,
- dc_is_dp_signal(pipe_ctx->stream->signal) ?
- dmdata_dp : dmdata_hdmi);
- } else
- pipe_ctx->stream_res.stream_enc->funcs->set_dynamic_metadata(
- pipe_ctx->stream_res.stream_enc,
- false, pipe_ctx->plane_res.hubp->inst,
- dc_is_dp_signal(pipe_ctx->stream->signal) ?
- dmdata_dp : dmdata_hdmi);
- }
-#endif
- dce110_update_info_frame(pipe_ctx);
+ dc->hwss.update_info_frame(pipe_ctx);
/* enable early control to avoid corruption on DP monitor*/
active_total_with_borders =
@@ -753,7 +732,7 @@ static enum bp_result link_transmitter_control(
* @brief
* eDP only.
*/
-void hwss_edp_wait_for_hpd_ready(
+void dce110_edp_wait_for_hpd_ready(
struct dc_link *link,
bool power_up)
{
@@ -821,7 +800,7 @@ void hwss_edp_wait_for_hpd_ready(
}
}
-void hwss_edp_power_control(
+void dce110_edp_power_control(
struct dc_link *link,
bool power_up)
{
@@ -903,7 +882,7 @@ void hwss_edp_power_control(
* @brief
* eDP only. Control the backlight of the eDP panel
*/
-void hwss_edp_backlight_control(
+void dce110_edp_backlight_control(
struct dc_link *link,
bool enable)
{
@@ -965,27 +944,23 @@ void hwss_edp_backlight_control(
void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
{
/* notify audio driver for audio modes of monitor */
- struct dc *core_dc;
- struct pp_smu_funcs *pp_smu = NULL;
+ struct dc *dc;
struct clk_mgr *clk_mgr;
unsigned int i, num_audio = 1;
if (!pipe_ctx->stream)
return;
- core_dc = pipe_ctx->stream->ctx->dc;
- clk_mgr = core_dc->clk_mgr;
+ dc = pipe_ctx->stream->ctx->dc;
+ clk_mgr = dc->clk_mgr;
if (pipe_ctx->stream_res.audio && pipe_ctx->stream_res.audio->enabled == true)
return;
- if (core_dc->res_pool->pp_smu)
- pp_smu = core_dc->res_pool->pp_smu;
-
if (pipe_ctx->stream_res.audio) {
for (i = 0; i < MAX_PIPES; i++) {
/*current_state not updated yet*/
- if (core_dc->current_state->res_ctx.pipe_ctx[i].stream_res.audio != NULL)
+ if (dc->current_state->res_ctx.pipe_ctx[i].stream_res.audio != NULL)
num_audio++;
}
@@ -1003,10 +978,9 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
}
}
-void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
+void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx)
{
struct dc *dc;
- struct pp_smu_funcs *pp_smu = NULL;
struct clk_mgr *clk_mgr;
if (!pipe_ctx || !pipe_ctx->stream)
@@ -1023,27 +997,13 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
if (pipe_ctx->stream_res.audio) {
pipe_ctx->stream_res.audio->enabled = false;
- if (dc->res_pool->pp_smu)
- pp_smu = dc->res_pool->pp_smu;
-
- if (option != KEEP_ACQUIRED_RESOURCE ||
- !dc->debug.az_endpoint_mute_only)
- /*only disalbe az_endpoint if power down or free*/
- pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
-
if (dc_is_dp_signal(pipe_ctx->stream->signal))
pipe_ctx->stream_res.stream_enc->funcs->dp_audio_disable(
pipe_ctx->stream_res.stream_enc);
else
pipe_ctx->stream_res.stream_enc->funcs->hdmi_audio_disable(
pipe_ctx->stream_res.stream_enc);
- /*don't free audio if it is from retrain or internal disable stream*/
- if (option == FREE_ACQUIRED_RESOURCE && dc->caps.dynamic_audio == true) {
- /*we have to dynamic arbitrate the audio endpoints*/
- /*we free the resource, need reset is_audio_acquired*/
- update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false);
- pipe_ctx->stream_res.audio = NULL;
- }
+
if (clk_mgr->funcs->enable_pme_wa)
/*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
clk_mgr->funcs->enable_pme_wa(clk_mgr);
@@ -1056,21 +1016,24 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
}
}
-void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option)
+void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
{
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->link;
struct dc *dc = pipe_ctx->stream->ctx->dc;
- if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
+ if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal)) {
pipe_ctx->stream_res.stream_enc->funcs->stop_hdmi_info_packets(
pipe_ctx->stream_res.stream_enc);
+ pipe_ctx->stream_res.stream_enc->funcs->hdmi_reset_stream_attribute(
+ pipe_ctx->stream_res.stream_enc);
+ }
if (dc_is_dp_signal(pipe_ctx->stream->signal))
pipe_ctx->stream_res.stream_enc->funcs->stop_dp_info_packets(
pipe_ctx->stream_res.stream_enc);
- dc->hwss.disable_audio_stream(pipe_ctx, option);
+ dc->hwss.disable_audio_stream(pipe_ctx);
link->link_enc->funcs->connect_dig_be_to_fe(
link->link_enc,
@@ -1085,6 +1048,7 @@ void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
struct encoder_unblank_param params = { { 0 } };
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->link;
+ struct dce_hwseq *hws = link->dc->hwseq;
/* only 3 items below are used by unblank */
params.timing = pipe_ctx->stream->timing;
@@ -1094,7 +1058,7 @@ void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, &params);
if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
- link->dc->hwss.edp_backlight_control(link, true);
+ hws->funcs.edp_backlight_control(link, true);
}
}
@@ -1102,9 +1066,10 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
{
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->link;
+ struct dce_hwseq *hws = link->dc->hwseq;
if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
- link->dc->hwss.edp_backlight_control(link, false);
+ hws->funcs.edp_backlight_control(link, false);
dc_link_set_abm_disable(link);
}
@@ -1174,33 +1139,34 @@ static void build_audio_output(
stream->timing.flags.INTERLACE;
audio_output->crtc_info.refresh_rate =
- (stream->timing.pix_clk_100hz*10000)/
+ (stream->timing.pix_clk_100hz*100)/
(stream->timing.h_total*stream->timing.v_total);
audio_output->crtc_info.color_depth =
stream->timing.display_color_depth;
- audio_output->crtc_info.requested_pixel_clock =
- pipe_ctx->stream_res.pix_clk_params.requested_pix_clk_100hz / 10;
+ audio_output->crtc_info.requested_pixel_clock_100Hz =
+ pipe_ctx->stream_res.pix_clk_params.requested_pix_clk_100hz;
- audio_output->crtc_info.calculated_pixel_clock =
- pipe_ctx->stream_res.pix_clk_params.requested_pix_clk_100hz / 10;
+ audio_output->crtc_info.calculated_pixel_clock_100Hz =
+ pipe_ctx->stream_res.pix_clk_params.requested_pix_clk_100hz;
/*for HDMI, audio ACR is with deep color ratio factor*/
if (dc_is_hdmi_signal(pipe_ctx->stream->signal) &&
- audio_output->crtc_info.requested_pixel_clock ==
- (stream->timing.pix_clk_100hz / 10)) {
+ audio_output->crtc_info.requested_pixel_clock_100Hz ==
+ (stream->timing.pix_clk_100hz)) {
if (pipe_ctx->stream_res.pix_clk_params.pixel_encoding == PIXEL_ENCODING_YCBCR420) {
- audio_output->crtc_info.requested_pixel_clock =
- audio_output->crtc_info.requested_pixel_clock/2;
- audio_output->crtc_info.calculated_pixel_clock =
- pipe_ctx->stream_res.pix_clk_params.requested_pix_clk_100hz/20;
+ audio_output->crtc_info.requested_pixel_clock_100Hz =
+ audio_output->crtc_info.requested_pixel_clock_100Hz/2;
+ audio_output->crtc_info.calculated_pixel_clock_100Hz =
+ pipe_ctx->stream_res.pix_clk_params.requested_pix_clk_100hz/2;
}
}
- if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
- pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ if (state->clk_mgr &&
+ (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)) {
audio_output->pll_info.dp_dto_source_clock_in_khz =
state->clk_mgr->funcs->get_dp_ref_clk_frequency(
state->clk_mgr);
@@ -1260,7 +1226,7 @@ static void program_scaler(const struct dc *dc,
{
struct tg_color color = {0};
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
/* TOFPGA */
if (pipe_ctx->plane_res.xfm->funcs->transform_set_pixel_storage_depth == NULL)
return;
@@ -1359,12 +1325,11 @@ static enum dc_status apply_single_controller_ctx_to_hw(
struct dc_stream_state *stream = pipe_ctx->stream;
struct drr_params params = {0};
unsigned int event_triggers = 0;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
- struct pipe_ctx *odm_pipe = dc_res_get_odm_bottom_pipe(pipe_ctx);
-#endif
+ struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
+ struct dce_hwseq *hws = dc->hwseq;
- if (dc->hwss.disable_stream_gating) {
- dc->hwss.disable_stream_gating(dc, pipe_ctx);
+ if (hws->funcs.disable_stream_gating) {
+ hws->funcs.disable_stream_gating(dc, pipe_ctx);
}
if (pipe_ctx->stream_res.audio != NULL) {
@@ -1394,10 +1359,10 @@ static enum dc_status apply_single_controller_ctx_to_hw(
/* */
/* Do not touch stream timing on seamless boot optimization. */
if (!pipe_ctx->stream->apply_seamless_boot_optimization)
- dc->hwss.enable_stream_timing(pipe_ctx, context, dc);
+ hws->funcs.enable_stream_timing(pipe_ctx, context, dc);
- if (dc->hwss.setup_vupdate_interrupt)
- dc->hwss.setup_vupdate_interrupt(pipe_ctx);
+ if (hws->funcs.setup_vupdate_interrupt)
+ hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
params.vertical_total_min = stream->adjust.v_total_min;
params.vertical_total_max = stream->adjust.v_total_max;
@@ -1408,9 +1373,13 @@ static enum dc_status apply_single_controller_ctx_to_hw(
// DRR should set trigger event to monitor surface update event
if (stream->adjust.v_total_min != 0 && stream->adjust.v_total_max != 0)
event_triggers = 0x80;
+ /* Event triggers and num frames initialized for DRR, but can be
+ * later updated for PSR use. Note DRR trigger events are generated
+ * regardless of whether num frames met.
+ */
if (pipe_ctx->stream_res.tg->funcs->set_static_screen_control)
pipe_ctx->stream_res.tg->funcs->set_static_screen_control(
- pipe_ctx->stream_res.tg, event_triggers);
+ pipe_ctx->stream_res.tg, event_triggers, 2);
if (!dc_is_virtual_signal(pipe_ctx->stream->signal))
pipe_ctx->stream_res.stream_enc->funcs->dig_connect_to_otg(
@@ -1427,8 +1396,7 @@ static enum dc_status apply_single_controller_ctx_to_hw(
pipe_ctx->stream_res.opp,
&stream->bit_depth_params,
&stream->clamping);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
- if (odm_pipe) {
+ while (odm_pipe) {
odm_pipe->stream_res.opp->funcs->opp_set_dyn_expansion(
odm_pipe->stream_res.opp,
COLOR_SPACE_YCBCR601,
@@ -1439,15 +1407,15 @@ static enum dc_status apply_single_controller_ctx_to_hw(
odm_pipe->stream_res.opp,
&stream->bit_depth_params,
&stream->clamping);
+ odm_pipe = odm_pipe->next_odm_pipe;
}
-#endif
if (!stream->dpms_off)
core_link_enable_stream(context, pipe_ctx);
pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->bottom_pipe != 0;
- pipe_ctx->stream->link->psr_enabled = false;
+ pipe_ctx->stream->link->psr_feature_enabled = false;
return DC_OK;
}
@@ -1457,8 +1425,6 @@ static enum dc_status apply_single_controller_ctx_to_hw(
static void power_down_encoders(struct dc *dc)
{
int i;
- enum connector_id connector_id;
- enum signal_type signal = SIGNAL_TYPE_NONE;
/* do not know BIOS back-front mapping, simply blank all. It will not
* hurt for non-DP
@@ -1469,15 +1435,15 @@ static void power_down_encoders(struct dc *dc)
}
for (i = 0; i < dc->link_count; i++) {
- connector_id = dal_graphics_object_id_get_connector_id(dc->links[i]->link_id);
- if ((connector_id == CONNECTOR_ID_DISPLAY_PORT) ||
- (connector_id == CONNECTOR_ID_EDP)) {
+ enum signal_type signal = dc->links[i]->connector_signal;
+ if ((signal == SIGNAL_TYPE_EDP) ||
+ (signal == SIGNAL_TYPE_DISPLAY_PORT))
if (!dc->links[i]->wa_flags.dp_keep_receiver_powered)
dp_receiver_power_ctrl(dc->links[i], false);
- if (connector_id == CONNECTOR_ID_EDP)
- signal = SIGNAL_TYPE_EDP;
- }
+
+ if (signal != SIGNAL_TYPE_EDP)
+ signal = SIGNAL_TYPE_NONE;
dc->links[i]->link_enc->funcs->disable_output(
dc->links[i]->link_enc, signal);
@@ -1558,18 +1524,6 @@ static struct dc_stream_state *get_edp_stream(struct dc_state *context)
return NULL;
}
-static struct dc_link *get_edp_link(struct dc *dc)
-{
- int i;
-
- // report any eDP links, even unconnected DDI's
- for (i = 0; i < dc->link_count; i++) {
- if (dc->links[i]->connector_signal == SIGNAL_TYPE_EDP)
- return dc->links[i];
- }
- return NULL;
-}
-
static struct dc_link *get_edp_link_with_sink(
struct dc *dc,
struct dc_state *context)
@@ -1605,9 +1559,10 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
bool can_apply_edp_fast_boot = false;
bool can_apply_seamless_boot = false;
bool keep_edp_vdd_on = false;
+ struct dce_hwseq *hws = dc->hwseq;
- if (dc->hwss.init_pipes)
- dc->hwss.init_pipes(dc, context);
+ if (hws->funcs.init_pipes)
+ hws->funcs.init_pipes(dc, context);
edp_stream = get_edp_stream(context);
@@ -1644,7 +1599,7 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
if (!can_apply_edp_fast_boot && !can_apply_seamless_boot) {
if (edp_link_with_sink && !keep_edp_vdd_on) {
/*turn off backlight before DP_blank and encoder powered down*/
- dc->hwss.edp_backlight_control(edp_link_with_sink, false);
+ hws->funcs.edp_backlight_control(edp_link_with_sink, false);
}
/*resume from S3, no vbios posting, no need to power down again*/
power_down_all_hw_blocks(dc);
@@ -1748,12 +1703,15 @@ void dce110_set_safe_displaymarks(
******************************************************************************/
static void set_drr(struct pipe_ctx **pipe_ctx,
- int num_pipes, int vmin, int vmax)
+ int num_pipes, unsigned int vmin, unsigned int vmax,
+ unsigned int vmid, unsigned int vmid_frame_number)
{
int i = 0;
struct drr_params params = {0};
// DRR should set trigger event to monitor surface update event
unsigned int event_triggers = 0x80;
+ // Note DRR trigger events are generated regardless of whether num frames met.
+ unsigned int num_frames = 2;
params.vertical_total_max = vmax;
params.vertical_total_min = vmin;
@@ -1769,7 +1727,7 @@ static void set_drr(struct pipe_ctx **pipe_ctx,
if (vmax != 0 && vmin != 0)
pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
pipe_ctx[i]->stream_res.tg,
- event_triggers);
+ event_triggers, num_frames);
}
}
@@ -1786,30 +1744,31 @@ static void get_position(struct pipe_ctx **pipe_ctx,
}
static void set_static_screen_control(struct pipe_ctx **pipe_ctx,
- int num_pipes, const struct dc_static_screen_events *events)
+ int num_pipes, const struct dc_static_screen_params *params)
{
unsigned int i;
- unsigned int value = 0;
+ unsigned int triggers = 0;
- if (events->overlay_update)
- value |= 0x100;
- if (events->surface_update)
- value |= 0x80;
- if (events->cursor_update)
- value |= 0x2;
- if (events->force_trigger)
- value |= 0x1;
+ if (params->triggers.overlay_update)
+ triggers |= 0x100;
+ if (params->triggers.surface_update)
+ triggers |= 0x80;
+ if (params->triggers.cursor_update)
+ triggers |= 0x2;
+ if (params->triggers.force_trigger)
+ triggers |= 0x1;
if (num_pipes) {
struct dc *dc = pipe_ctx[0]->stream->ctx->dc;
if (dc->fbc_compressor)
- value |= 0x84;
+ triggers |= 0x84;
}
for (i = 0; i < num_pipes; i++)
pipe_ctx[i]->stream_res.tg->funcs->
- set_static_screen_control(pipe_ctx[i]->stream_res.tg, value);
+ set_static_screen_control(pipe_ctx[i]->stream_res.tg,
+ triggers, params->num_frames);
}
/*
@@ -1862,7 +1821,7 @@ static bool should_enable_fbc(struct dc *dc,
return false;
/* PSR should not be enabled */
- if (pipe_ctx->stream->link->psr_enabled)
+ if (pipe_ctx->stream->link->psr_feature_enabled)
return false;
/* Nothing to compress */
@@ -1932,8 +1891,25 @@ static void dce110_reset_hw_ctx_wrap(
/* Disable if new stream is null. O/w, if stream is
* disabled already, no need to disable again.
*/
- if (!pipe_ctx->stream || !pipe_ctx->stream->dpms_off)
- core_link_disable_stream(pipe_ctx_old, FREE_ACQUIRED_RESOURCE);
+ if (!pipe_ctx->stream || !pipe_ctx->stream->dpms_off) {
+ core_link_disable_stream(pipe_ctx_old);
+
+ /* free acquired resources*/
+ if (pipe_ctx_old->stream_res.audio) {
+ /*disable az_endpoint*/
+ pipe_ctx_old->stream_res.audio->funcs->
+ az_disable(pipe_ctx_old->stream_res.audio);
+
+ /*free audio*/
+ if (dc->caps.dynamic_audio == true) {
+ /*we have to dynamic arbitrate the audio endpoints*/
+ /*we free the resource, need reset is_audio_acquired*/
+ update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
+ pipe_ctx_old->stream_res.audio, false);
+ pipe_ctx_old->stream_res.audio = NULL;
+ }
+ }
+ }
pipe_ctx_old->stream_res.tg->funcs->set_blank(pipe_ctx_old->stream_res.tg, true);
if (!hwss_wait_for_blank_complete(pipe_ctx_old->stream_res.tg)) {
@@ -2041,13 +2017,14 @@ enum dc_status dce110_apply_ctx_to_hw(
struct dc *dc,
struct dc_state *context)
{
+ struct dce_hwseq *hws = dc->hwseq;
struct dc_bios *dcb = dc->ctx->dc_bios;
enum dc_status status;
int i;
/* Reset old context */
/* look up the targets that have been removed since last commit */
- dc->hwss.reset_hw_ctx_wrap(dc, context);
+ hws->funcs.reset_hw_ctx_wrap(dc, context);
/* Skip applying if no targets */
if (context->stream_count <= 0)
@@ -2072,7 +2049,7 @@ enum dc_status dce110_apply_ctx_to_hw(
continue;
}
- dc->hwss.enable_display_power_gating(
+ hws->funcs.enable_display_power_gating(
dc, i, dc->ctx->dc_bios,
PIPE_GATING_CONTROL_DISABLE);
}
@@ -2098,7 +2075,7 @@ enum dc_status dce110_apply_ctx_to_hw(
if (pipe_ctx_old->stream && !pipe_need_reprogram(pipe_ctx_old, pipe_ctx))
continue;
- if (pipe_ctx->top_pipe)
+ if (pipe_ctx->top_pipe || pipe_ctx->prev_odm_pipe)
continue;
status = apply_single_controller_ctx_to_hw(
@@ -2381,19 +2358,20 @@ static void init_hw(struct dc *dc)
struct transform *xfm;
struct abm *abm;
struct dmcu *dmcu;
+ struct dce_hwseq *hws = dc->hwseq;
bp = dc->ctx->dc_bios;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
xfm = dc->res_pool->transforms[i];
xfm->funcs->transform_reset(xfm);
- dc->hwss.enable_display_power_gating(
+ hws->funcs.enable_display_power_gating(
dc, i, bp,
PIPE_GATING_CONTROL_INIT);
- dc->hwss.enable_display_power_gating(
+ hws->funcs.enable_display_power_gating(
dc, i, bp,
PIPE_GATING_CONTROL_DISABLE);
- dc->hwss.enable_display_pipe_clock_gating(
+ hws->funcs.enable_display_pipe_clock_gating(
dc->ctx,
true);
}
@@ -2475,17 +2453,15 @@ static void dce110_program_front_end_for_pipe(
struct dc *dc, struct pipe_ctx *pipe_ctx)
{
struct mem_input *mi = pipe_ctx->plane_res.mi;
- struct pipe_ctx *old_pipe = NULL;
struct dc_plane_state *plane_state = pipe_ctx->plane_state;
struct xfm_grph_csc_adjustment adjust;
struct out_csc_color_matrix tbl_entry;
unsigned int i;
+ struct dce_hwseq *hws = dc->hwseq;
+
DC_LOGGER_INIT();
memset(&tbl_entry, 0, sizeof(tbl_entry));
- if (dc->current_state)
- old_pipe = &dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx];
-
memset(&adjust, 0, sizeof(adjust));
adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
@@ -2541,10 +2517,10 @@ static void dce110_program_front_end_for_pipe(
if (pipe_ctx->plane_state->update_flags.bits.full_update ||
pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
pipe_ctx->plane_state->update_flags.bits.gamma_change)
- dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state);
+ hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
if (pipe_ctx->plane_state->update_flags.bits.full_update)
- dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream);
+ hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
DC_LOG_SURFACE(
"Pipe:%d %p: addr hi:0x%x, "
@@ -2647,6 +2623,7 @@ static void dce110_apply_ctx_for_surface(
static void dce110_power_down_fe(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
+ struct dce_hwseq *hws = dc->hwseq;
int fe_idx = pipe_ctx->plane_res.mi ?
pipe_ctx->plane_res.mi->inst : pipe_ctx->pipe_idx;
@@ -2654,7 +2631,7 @@ static void dce110_power_down_fe(struct dc *dc, struct pipe_ctx *pipe_ctx)
if (dc->current_state->res_ctx.pipe_ctx[fe_idx].stream)
return;
- dc->hwss.enable_display_power_gating(
+ hws->funcs.enable_display_power_gating(
dc, fe_idx, dc->ctx->dc_bios, PIPE_GATING_CONTROL_ENABLE);
dc->res_pool->transforms[fe_idx]->funcs->transform_reset(
@@ -2743,14 +2720,10 @@ static const struct hw_sequencer_funcs dce110_funcs = {
.program_gamut_remap = program_gamut_remap,
.program_output_csc = program_output_csc,
.init_hw = init_hw,
- .init_pipes = init_pipes,
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = dce110_apply_ctx_for_surface,
.update_plane_addr = update_plane_addr,
.update_pending_status = dce110_update_pending_status,
- .set_input_transfer_func = dce110_set_input_transfer_func,
- .set_output_transfer_func = dce110_set_output_transfer_func,
- .power_down = dce110_power_down,
.enable_accelerated_mode = dce110_enable_accelerated_mode,
.enable_timing_synchronization = dce110_enable_timing_synchronization,
.enable_per_frame_crtc_position_reset = dce110_enable_per_frame_crtc_position_reset,
@@ -2761,8 +2734,6 @@ static const struct hw_sequencer_funcs dce110_funcs = {
.blank_stream = dce110_blank_stream,
.enable_audio_stream = dce110_enable_audio_stream,
.disable_audio_stream = dce110_disable_audio_stream,
- .enable_display_pipe_clock_gating = enable_display_pipe_clock_gating,
- .enable_display_power_gating = dce110_enable_display_power_gating,
.disable_plane = dce110_power_down_fe,
.pipe_control_lock = dce_pipe_control_lock,
.prepare_bandwidth = dce110_prepare_bandwidth,
@@ -2770,22 +2741,33 @@ static const struct hw_sequencer_funcs dce110_funcs = {
.set_drr = set_drr,
.get_position = get_position,
.set_static_screen_control = set_static_screen_control,
- .reset_hw_ctx_wrap = dce110_reset_hw_ctx_wrap,
- .enable_stream_timing = dce110_enable_stream_timing,
- .disable_stream_gating = NULL,
- .enable_stream_gating = NULL,
.setup_stereo = NULL,
.set_avmute = dce110_set_avmute,
.wait_for_mpcc_disconnect = dce110_wait_for_mpcc_disconnect,
- .edp_backlight_control = hwss_edp_backlight_control,
- .edp_power_control = hwss_edp_power_control,
- .edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready,
+ .edp_power_control = dce110_edp_power_control,
+ .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
.set_cursor_position = dce110_set_cursor_position,
.set_cursor_attribute = dce110_set_cursor_attribute
};
+static const struct hwseq_private_funcs dce110_private_funcs = {
+ .init_pipes = init_pipes,
+ .update_plane_addr = update_plane_addr,
+ .set_input_transfer_func = dce110_set_input_transfer_func,
+ .set_output_transfer_func = dce110_set_output_transfer_func,
+ .power_down = dce110_power_down,
+ .enable_display_pipe_clock_gating = enable_display_pipe_clock_gating,
+ .enable_display_power_gating = dce110_enable_display_power_gating,
+ .reset_hw_ctx_wrap = dce110_reset_hw_ctx_wrap,
+ .enable_stream_timing = dce110_enable_stream_timing,
+ .disable_stream_gating = NULL,
+ .enable_stream_gating = NULL,
+ .edp_backlight_control = dce110_edp_backlight_control,
+};
+
void dce110_hw_sequencer_construct(struct dc *dc)
{
dc->hwss = dce110_funcs;
+ dc->hwseq->funcs = dce110_private_funcs;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
index cd3e36d52a52..26a9c14a58b1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
@@ -27,8 +27,8 @@
#define __DC_HWSS_DCE110_H__
#include "core_types.h"
+#include "hw_sequencer_private.h"
-#define GAMMA_HW_POINTS_NUM 256
struct dc;
struct dc_state;
struct dm_pp_display_configuration;
@@ -42,7 +42,7 @@ enum dc_status dce110_apply_ctx_to_hw(
void dce110_enable_stream(struct pipe_ctx *pipe_ctx);
-void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option);
+void dce110_disable_stream(struct pipe_ctx *pipe_ctx);
void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
struct dc_link_settings *link_settings);
@@ -50,7 +50,7 @@ void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
void dce110_blank_stream(struct pipe_ctx *pipe_ctx);
void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx);
-void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option);
+void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx);
void dce110_update_info_frame(struct pipe_ctx *pipe_ctx);
@@ -73,15 +73,15 @@ void dce110_optimize_bandwidth(
void dp_receiver_power_ctrl(struct dc_link *link, bool on);
-void hwss_edp_power_control(
+void dce110_edp_power_control(
struct dc_link *link,
bool power_up);
-void hwss_edp_backlight_control(
+void dce110_edp_backlight_control(
struct dc_link *link,
bool enable);
-void hwss_edp_wait_for_hpd_ready(
+void dce110_edp_wait_for_hpd_ready(
struct dc_link *link,
bool power_up);
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
index 9b9fc3d96c07..d54172d88f5f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
@@ -229,26 +229,26 @@ static void program_tiling(
static void program_size_and_rotation(
struct dce_mem_input *mem_input110,
enum dc_rotation_angle rotation,
- const union plane_size *plane_size)
+ const struct plane_size *plane_size)
{
uint32_t value = 0;
- union plane_size local_size = *plane_size;
+ struct plane_size local_size = *plane_size;
if (rotation == ROTATION_ANGLE_90 ||
rotation == ROTATION_ANGLE_270) {
- swap(local_size.video.luma_size.x,
- local_size.video.luma_size.y);
- swap(local_size.video.luma_size.width,
- local_size.video.luma_size.height);
- swap(local_size.video.chroma_size.x,
- local_size.video.chroma_size.y);
- swap(local_size.video.chroma_size.width,
- local_size.video.chroma_size.height);
+ swap(local_size.surface_size.x,
+ local_size.surface_size.y);
+ swap(local_size.surface_size.width,
+ local_size.surface_size.height);
+ swap(local_size.chroma_size.x,
+ local_size.chroma_size.y);
+ swap(local_size.chroma_size.width,
+ local_size.chroma_size.height);
}
value = 0;
- set_reg_field_value(value, local_size.video.luma_pitch,
+ set_reg_field_value(value, local_size.surface_pitch,
UNP_GRPH_PITCH_L, GRPH_PITCH_L);
dm_write_reg(
@@ -257,7 +257,7 @@ static void program_size_and_rotation(
value);
value = 0;
- set_reg_field_value(value, local_size.video.chroma_pitch,
+ set_reg_field_value(value, local_size.chroma_pitch,
UNP_GRPH_PITCH_C, GRPH_PITCH_C);
dm_write_reg(
mem_input110->base.ctx,
@@ -297,8 +297,8 @@ static void program_size_and_rotation(
value);
value = 0;
- set_reg_field_value(value, local_size.video.luma_size.x +
- local_size.video.luma_size.width,
+ set_reg_field_value(value, local_size.surface_size.x +
+ local_size.surface_size.width,
UNP_GRPH_X_END_L, GRPH_X_END_L);
dm_write_reg(
mem_input110->base.ctx,
@@ -306,8 +306,8 @@ static void program_size_and_rotation(
value);
value = 0;
- set_reg_field_value(value, local_size.video.chroma_size.x +
- local_size.video.chroma_size.width,
+ set_reg_field_value(value, local_size.chroma_size.x +
+ local_size.chroma_size.width,
UNP_GRPH_X_END_C, GRPH_X_END_C);
dm_write_reg(
mem_input110->base.ctx,
@@ -315,8 +315,8 @@ static void program_size_and_rotation(
value);
value = 0;
- set_reg_field_value(value, local_size.video.luma_size.y +
- local_size.video.luma_size.height,
+ set_reg_field_value(value, local_size.surface_size.y +
+ local_size.surface_size.height,
UNP_GRPH_Y_END_L, GRPH_Y_END_L);
dm_write_reg(
mem_input110->base.ctx,
@@ -324,8 +324,8 @@ static void program_size_and_rotation(
value);
value = 0;
- set_reg_field_value(value, local_size.video.chroma_size.y +
- local_size.video.chroma_size.height,
+ set_reg_field_value(value, local_size.chroma_size.y +
+ local_size.chroma_size.height,
UNP_GRPH_Y_END_C, GRPH_Y_END_C);
dm_write_reg(
mem_input110->base.ctx,
@@ -637,7 +637,7 @@ void dce_mem_input_v_program_surface_config(
struct mem_input *mem_input,
enum surface_pixel_format format,
union dc_tiling_info *tiling_info,
- union plane_size *plane_size,
+ struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
bool horizotal_mirror)
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index 764329264c3b..bf14e9ab040c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -275,6 +275,14 @@ static const struct dce_stream_encoder_mask se_mask = {
SE_COMMON_MASK_SH_LIST_DCE110(_MASK)
};
+static const struct dce110_aux_registers_shift aux_shift = {
+ DCE_AUX_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce110_aux_registers_mask aux_mask = {
+ DCE_AUX_MASK_SH_LIST(_MASK)
+};
+
#define opp_regs(id)\
[id] = {\
OPP_DCE_110_REG_LIST(id),\
@@ -331,7 +339,7 @@ static const struct dce_audio_shift audio_shift = {
AUD_COMMON_MASK_SH_LIST(__SHIFT)
};
-static const struct dce_aduio_mask audio_mask = {
+static const struct dce_audio_mask audio_mask = {
AUD_COMMON_MASK_SH_LIST(_MASK)
};
@@ -440,6 +448,37 @@ static const struct dc_plane_cap underlay_plane_cap = {
#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8
#endif
+static int map_transmitter_id_to_phy_instance(
+ enum transmitter transmitter)
+{
+ switch (transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ return 0;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ return 1;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ return 2;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ return 3;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ return 4;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ return 5;
+ break;
+ case TRANSMITTER_UNIPHY_G:
+ return 6;
+ break;
+ default:
+ ASSERT(0);
+ return 0;
+ }
+}
+
static void read_dce_straps(
struct dc_context *ctx,
struct resource_straps *straps)
@@ -617,14 +656,18 @@ static struct link_encoder *dce110_link_encoder_create(
{
struct dce110_link_encoder *enc110 =
kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
+ int link_regs_id;
if (!enc110)
return NULL;
+ link_regs_id =
+ map_transmitter_id_to_phy_instance(enc_init_data->transmitter);
+
dce110_link_encoder_construct(enc110,
enc_init_data,
&link_enc_feature,
- &link_enc_regs[enc_init_data->transmitter],
+ &link_enc_regs[link_regs_id],
&link_enc_aux_regs[enc_init_data->channel - 1],
&link_enc_hpd_regs[enc_init_data->hpd_source]);
return &enc110->base;
@@ -657,7 +700,10 @@ struct dce_aux *dce110_aux_engine_create(
dce110_aux_engine_construct(aux_engine, ctx, inst,
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
- &aux_engine_regs[inst]);
+ &aux_engine_regs[inst],
+ &aux_mask,
+ &aux_shift,
+ ctx->dc->caps.extended_aux_timeout_support);
return &aux_engine->base;
}
@@ -714,6 +760,7 @@ struct clock_source *dce110_clock_source_create(
return &clk_src->base;
}
+ kfree(clk_src);
BREAK_TO_DEBUGGER();
return NULL;
}
@@ -735,7 +782,7 @@ void dce110_clock_source_destroy(struct clock_source **clk_src)
*clk_src = NULL;
}
-static void destruct(struct dce110_resource_pool *pool)
+static void dce110_resource_destruct(struct dce110_resource_pool *pool)
{
unsigned int i;
@@ -1050,6 +1097,7 @@ static struct pipe_ctx *dce110_acquire_underlay(
struct dc_stream_state *stream)
{
struct dc *dc = stream->ctx->dc;
+ struct dce_hwseq *hws = dc->hwseq;
struct resource_context *res_ctx = &context->res_ctx;
unsigned int underlay_idx = pool->underlay_pipe_index;
struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[underlay_idx];
@@ -1070,7 +1118,7 @@ static struct pipe_ctx *dce110_acquire_underlay(
struct tg_color black_color = {0};
struct dc_bios *dcb = dc->ctx->dc_bios;
- dc->hwss.enable_display_power_gating(
+ hws->funcs.enable_display_power_gating(
dc,
pipe_ctx->stream_res.tg->inst,
dcb, PIPE_GATING_CONTROL_DISABLE);
@@ -1114,7 +1162,7 @@ static void dce110_destroy_resource_pool(struct resource_pool **pool)
{
struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool);
- destruct(dce110_pool);
+ dce110_resource_destruct(dce110_pool);
kfree(dce110_pool);
*pool = NULL;
}
@@ -1266,7 +1314,7 @@ const struct resource_caps *dce110_resource_cap(
return &carrizo_resource_cap;
}
-static bool construct(
+static bool dce110_resource_construct(
uint8_t num_virtual_links,
struct dc *dc,
struct dce110_resource_pool *pool,
@@ -1274,7 +1322,6 @@ static bool construct(
{
unsigned int i;
struct dc_context *ctx = dc->ctx;
- struct dc_firmware_info info;
struct dc_bios *bp;
ctx->dc_bios->regs = &bios_regs;
@@ -1293,6 +1340,7 @@ static bool construct(
dc->caps.i2c_speed_in_khz = 100;
dc->caps.max_cursor_size = 128;
dc->caps.is_apu = true;
+ dc->caps.extended_aux_timeout_support = false;
/*************************************************
* Create resources *
@@ -1300,8 +1348,7 @@ static bool construct(
bp = ctx->dc_bios;
- if ((bp->funcs->get_firmware_info(bp, &info) == BP_RESULT_OK) &&
- info.external_clock_source_frequency_for_dp != 0) {
+ if (bp->fw_info_valid && bp->fw_info.external_clock_source_frequency_for_dp != 0) {
pool->base.dp_clock_source =
dce110_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true);
@@ -1446,7 +1493,7 @@ static bool construct(
return true;
res_create_fail:
- destruct(pool);
+ dce110_resource_destruct(pool);
return false;
}
@@ -1461,9 +1508,10 @@ struct resource_pool *dce110_create_resource_pool(
if (!pool)
return NULL;
- if (construct(num_virtual_links, dc, pool, asic_id))
+ if (dce110_resource_construct(num_virtual_links, dc, pool, asic_id))
return &pool->base;
+ kfree(pool);
BREAK_TO_DEBUGGER();
return NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
index 5f7c2c5641c4..1ea7db8eeb98 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
@@ -469,22 +469,27 @@ void dce110_timing_generator_set_drr(
void dce110_timing_generator_set_static_screen_control(
struct timing_generator *tg,
- uint32_t value)
+ uint32_t event_triggers,
+ uint32_t num_frames)
{
struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
uint32_t static_screen_cntl = 0;
uint32_t addr = 0;
+ // By register spec, it only takes 8 bit value
+ if (num_frames > 0xFF)
+ num_frames = 0xFF;
+
addr = CRTC_REG(mmCRTC_STATIC_SCREEN_CONTROL);
static_screen_cntl = dm_read_reg(tg->ctx, addr);
set_reg_field_value(static_screen_cntl,
- value,
+ event_triggers,
CRTC_STATIC_SCREEN_CONTROL,
CRTC_STATIC_SCREEN_EVENT_MASK);
set_reg_field_value(static_screen_cntl,
- 2,
+ num_frames,
CRTC_STATIC_SCREEN_CONTROL,
CRTC_STATIC_SCREEN_FRAME_COUNT);
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h
index 768ccf27ada9..d8a5ed7b485d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h
@@ -231,7 +231,8 @@ void dce110_timing_generator_set_drr(
void dce110_timing_generator_set_static_screen_control(
struct timing_generator *tg,
- uint32_t value);
+ uint32_t event_triggers,
+ uint32_t num_frames);
void dce110_timing_generator_get_crtc_scanoutpos(
struct timing_generator *tg,
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c
index 1e4a7c13f0ed..19873ee1f78d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c
@@ -158,6 +158,6 @@ void dce112_hw_sequencer_construct(struct dc *dc)
* structure
*/
dce110_hw_sequencer_construct(dc);
- dc->hwss.enable_display_power_gating = dce112_enable_display_power_gating;
+ dc->hwseq->funcs.enable_display_power_gating = dce112_enable_display_power_gating;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.h
index e646f4a37fa2..943f1b2c5b2f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.h
@@ -27,6 +27,7 @@
#define __DC_HWSS_DCE112_H__
#include "core_types.h"
+#include "hw_sequencer_private.h"
struct dc;
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
index c6136e0ed1a4..700ad8b3e54b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
@@ -172,6 +172,14 @@ static const struct dce_abm_mask abm_mask = {
ABM_MASK_SH_LIST_DCE110(_MASK)
};
+static const struct dce110_aux_registers_shift aux_shift = {
+ DCE_AUX_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce110_aux_registers_mask aux_mask = {
+ DCE_AUX_MASK_SH_LIST(_MASK)
+};
+
#define ipp_regs(id)\
[id] = {\
IPP_DCE110_REG_LIST_DCE_BASE(id)\
@@ -337,7 +345,7 @@ static const struct dce_audio_shift audio_shift = {
AUD_COMMON_MASK_SH_LIST(__SHIFT)
};
-static const struct dce_aduio_mask audio_mask = {
+static const struct dce_audio_mask audio_mask = {
AUD_COMMON_MASK_SH_LIST(_MASK)
};
@@ -417,6 +425,37 @@ static const struct dc_plane_cap plane_cap = {
#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8
#endif
+static int map_transmitter_id_to_phy_instance(
+ enum transmitter transmitter)
+{
+ switch (transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ return 0;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ return 1;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ return 2;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ return 3;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ return 4;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ return 5;
+ break;
+ case TRANSMITTER_UNIPHY_G:
+ return 6;
+ break;
+ default:
+ ASSERT(0);
+ return 0;
+ }
+}
+
static void read_dce_straps(
struct dc_context *ctx,
struct resource_straps *straps)
@@ -575,14 +614,18 @@ struct link_encoder *dce112_link_encoder_create(
{
struct dce110_link_encoder *enc110 =
kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
+ int link_regs_id;
if (!enc110)
return NULL;
+ link_regs_id =
+ map_transmitter_id_to_phy_instance(enc_init_data->transmitter);
+
dce110_link_encoder_construct(enc110,
enc_init_data,
&link_enc_feature,
- &link_enc_regs[enc_init_data->transmitter],
+ &link_enc_regs[link_regs_id],
&link_enc_aux_regs[enc_init_data->channel - 1],
&link_enc_hpd_regs[enc_init_data->hpd_source]);
return &enc110->base;
@@ -630,7 +673,10 @@ struct dce_aux *dce112_aux_engine_create(
dce110_aux_engine_construct(aux_engine, ctx, inst,
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
- &aux_engine_regs[inst]);
+ &aux_engine_regs[inst],
+ &aux_mask,
+ &aux_shift,
+ ctx->dc->caps.extended_aux_timeout_support);
return &aux_engine->base;
}
@@ -687,6 +733,7 @@ struct clock_source *dce112_clock_source_create(
return &clk_src->base;
}
+ kfree(clk_src);
BREAK_TO_DEBUGGER();
return NULL;
}
@@ -697,7 +744,7 @@ void dce112_clock_source_destroy(struct clock_source **clk_src)
*clk_src = NULL;
}
-static void destruct(struct dce110_resource_pool *pool)
+static void dce112_resource_destruct(struct dce110_resource_pool *pool)
{
unsigned int i;
@@ -966,7 +1013,7 @@ static void dce112_destroy_resource_pool(struct resource_pool **pool)
{
struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool);
- destruct(dce110_pool);
+ dce112_resource_destruct(dce110_pool);
kfree(dce110_pool);
*pool = NULL;
}
@@ -987,6 +1034,10 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
struct dm_pp_clock_levels_with_latency mem_clks = {0};
struct dm_pp_wm_sets_with_clock_ranges clk_ranges = {0};
struct dm_pp_clock_levels clks = {0};
+ int memory_type_multiplier = MEMORY_TYPE_MULTIPLIER_CZ;
+
+ if (dc->bw_vbios && dc->bw_vbios->memory_type == bw_def_hbm)
+ memory_type_multiplier = MEMORY_TYPE_HBM;
/*do system clock TODO PPLIB: after PPLIB implement,
* then remove old way
@@ -1026,12 +1077,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
&clks);
dc->bw_vbios->low_yclk = bw_frc_to_fixed(
- clks.clocks_in_khz[0] * MEMORY_TYPE_MULTIPLIER_CZ, 1000);
+ clks.clocks_in_khz[0] * memory_type_multiplier, 1000);
dc->bw_vbios->mid_yclk = bw_frc_to_fixed(
- clks.clocks_in_khz[clks.num_levels>>1] * MEMORY_TYPE_MULTIPLIER_CZ,
+ clks.clocks_in_khz[clks.num_levels>>1] * memory_type_multiplier,
1000);
dc->bw_vbios->high_yclk = bw_frc_to_fixed(
- clks.clocks_in_khz[clks.num_levels-1] * MEMORY_TYPE_MULTIPLIER_CZ,
+ clks.clocks_in_khz[clks.num_levels-1] * memory_type_multiplier,
1000);
return;
@@ -1067,12 +1118,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
* YCLK = UMACLK*m_memoryTypeMultiplier
*/
dc->bw_vbios->low_yclk = bw_frc_to_fixed(
- mem_clks.data[0].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ, 1000);
+ mem_clks.data[0].clocks_in_khz * memory_type_multiplier, 1000);
dc->bw_vbios->mid_yclk = bw_frc_to_fixed(
- mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ,
+ mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * memory_type_multiplier,
1000);
dc->bw_vbios->high_yclk = bw_frc_to_fixed(
- mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ,
+ mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * memory_type_multiplier,
1000);
/* Now notify PPLib/SMU about which Watermarks sets they should select
@@ -1135,7 +1186,7 @@ const struct resource_caps *dce112_resource_cap(
return &polaris_10_resource_cap;
}
-static bool construct(
+static bool dce112_resource_construct(
uint8_t num_virtual_links,
struct dc *dc,
struct dce110_resource_pool *pool)
@@ -1158,7 +1209,7 @@ static bool construct(
dc->caps.i2c_speed_in_khz = 100;
dc->caps.max_cursor_size = 128;
dc->caps.dual_link_dvi = true;
-
+ dc->caps.extended_aux_timeout_support = false;
/*************************************************
* Create resources *
@@ -1321,7 +1372,7 @@ static bool construct(
return true;
res_create_fail:
- destruct(pool);
+ dce112_resource_destruct(pool);
return false;
}
@@ -1335,9 +1386,10 @@ struct resource_pool *dce112_create_resource_pool(
if (!pool)
return NULL;
- if (construct(num_virtual_links, dc, pool))
+ if (dce112_resource_construct(num_virtual_links, dc, pool))
return &pool->base;
+ kfree(pool);
BREAK_TO_DEBUGGER();
return NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
index 1ca30928025e..66a13aa39c95 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
@@ -265,7 +265,7 @@ void dce120_hw_sequencer_construct(struct dc *dc)
* structure
*/
dce110_hw_sequencer_construct(dc);
- dc->hwss.enable_display_power_gating = dce120_enable_display_power_gating;
+ dc->hwseq->funcs.enable_display_power_gating = dce120_enable_display_power_gating;
dc->hwss.update_dchub = dce120_update_dchub;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.h
index c51afbd0b012..bc024534732f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.h
@@ -27,6 +27,7 @@
#define __DC_HWSS_DCE120_H__
#include "core_types.h"
+#include "hw_sequencer_private.h"
struct dc;
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
index 4a6ba3173a5a..53ab88ef71f5 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -63,8 +63,8 @@
#include "soc15_hw_ip.h"
#include "vega10_ip_offset.h"
#include "nbio/nbio_6_1_offset.h"
-#include "mmhub/mmhub_9_4_0_offset.h"
-#include "mmhub/mmhub_9_4_0_sh_mask.h"
+#include "mmhub/mmhub_1_0_offset.h"
+#include "mmhub/mmhub_1_0_sh_mask.h"
#include "reg_helper.h"
#include "dce100/dce100_resource.h"
@@ -293,6 +293,14 @@ static const struct dce_stream_encoder_mask se_mask = {
SE_COMMON_MASK_SH_LIST_DCE120(_MASK)
};
+static const struct dce110_aux_registers_shift aux_shift = {
+ DCE12_AUX_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce110_aux_registers_mask aux_mask = {
+ DCE12_AUX_MASK_SH_LIST(_MASK)
+};
+
#define opp_regs(id)\
[id] = {\
OPP_DCE_120_REG_LIST(id),\
@@ -352,10 +360,41 @@ static const struct dce_audio_shift audio_shift = {
DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT)
};
-static const struct dce_aduio_mask audio_mask = {
+static const struct dce_audio_mask audio_mask = {
DCE120_AUD_COMMON_MASK_SH_LIST(_MASK)
};
+static int map_transmitter_id_to_phy_instance(
+ enum transmitter transmitter)
+{
+ switch (transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ return 0;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ return 1;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ return 2;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ return 3;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ return 4;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ return 5;
+ break;
+ case TRANSMITTER_UNIPHY_G:
+ return 6;
+ break;
+ default:
+ ASSERT(0);
+ return 0;
+ }
+}
+
#define clk_src_regs(index, id)\
[index] = {\
CS_COMMON_REG_LIST_DCE_112(id),\
@@ -404,7 +443,10 @@ struct dce_aux *dce120_aux_engine_create(
dce110_aux_engine_construct(aux_engine, ctx, inst,
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
- &aux_engine_regs[inst]);
+ &aux_engine_regs[inst],
+ &aux_mask,
+ &aux_shift,
+ ctx->dc->caps.extended_aux_timeout_support);
return &aux_engine->base;
}
@@ -500,6 +542,7 @@ static struct clock_source *dce120_clock_source_create(
return &clk_src->base;
}
+ kfree(clk_src);
BREAK_TO_DEBUGGER();
return NULL;
}
@@ -544,7 +587,7 @@ static void dce120_transform_destroy(struct transform **xfm)
*xfm = NULL;
}
-static void destruct(struct dce110_resource_pool *pool)
+static void dce120_resource_destruct(struct dce110_resource_pool *pool)
{
unsigned int i;
@@ -654,14 +697,18 @@ static struct link_encoder *dce120_link_encoder_create(
{
struct dce110_link_encoder *enc110 =
kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
+ int link_regs_id;
if (!enc110)
return NULL;
+ link_regs_id =
+ map_transmitter_id_to_phy_instance(enc_init_data->transmitter);
+
dce110_link_encoder_construct(enc110,
enc_init_data,
&link_enc_feature,
- &link_enc_regs[enc_init_data->transmitter],
+ &link_enc_regs[link_regs_id],
&link_enc_aux_regs[enc_init_data->channel - 1],
&link_enc_hpd_regs[enc_init_data->hpd_source]);
@@ -825,7 +872,7 @@ static void dce120_destroy_resource_pool(struct resource_pool **pool)
{
struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool);
- destruct(dce110_pool);
+ dce120_resource_destruct(dce110_pool);
kfree(dce110_pool);
*pool = NULL;
}
@@ -847,6 +894,8 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
int i;
unsigned int clk;
unsigned int latency;
+ /*original logic in dal3*/
+ int memory_type_multiplier = MEMORY_TYPE_MULTIPLIER_CZ;
/*do system clock*/
if (!dm_pp_get_clock_levels_by_type_with_latency(
@@ -905,13 +954,16 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
* ALSO always convert UMA clock (from PPLIB) to YCLK (HW formula):
* YCLK = UMACLK*m_memoryTypeMultiplier
*/
+ if (dc->bw_vbios->memory_type == bw_def_hbm)
+ memory_type_multiplier = MEMORY_TYPE_HBM;
+
dc->bw_vbios->low_yclk = bw_frc_to_fixed(
- mem_clks.data[0].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ, 1000);
+ mem_clks.data[0].clocks_in_khz * memory_type_multiplier, 1000);
dc->bw_vbios->mid_yclk = bw_frc_to_fixed(
- mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ,
+ mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * memory_type_multiplier,
1000);
dc->bw_vbios->high_yclk = bw_frc_to_fixed(
- mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ,
+ mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * memory_type_multiplier,
1000);
/* Now notify PPLib/SMU about which Watermarks sets they should select
@@ -972,7 +1024,7 @@ static uint32_t read_pipe_fuses(struct dc_context *ctx)
return value;
}
-static bool construct(
+static bool dce120_resource_construct(
uint8_t num_virtual_links,
struct dc *dc,
struct dce110_resource_pool *pool)
@@ -1000,7 +1052,7 @@ static bool construct(
dc->caps.max_cursor_size = 128;
dc->caps.dual_link_dvi = true;
dc->caps.psp_setup_panel_mode = true;
-
+ dc->caps.extended_aux_timeout_support = false;
dc->debug = debug_defaults;
/*************************************************
@@ -1185,7 +1237,7 @@ controller_create_fail:
clk_src_create_fail:
res_create_fail:
- destruct(pool);
+ dce120_resource_destruct(pool);
return false;
}
@@ -1200,9 +1252,10 @@ struct resource_pool *dce120_create_resource_pool(
if (!pool)
return NULL;
- if (construct(num_virtual_links, dc, pool))
+ if (dce120_resource_construct(num_virtual_links, dc, pool))
return &pool->base;
+ kfree(pool);
BREAK_TO_DEBUGGER();
return NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
index 098e56962f2a..82bc4e192bbf 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
@@ -819,13 +819,18 @@ void dce120_tg_set_colors(struct timing_generator *tg,
static void dce120_timing_generator_set_static_screen_control(
struct timing_generator *tg,
- uint32_t value)
+ uint32_t event_triggers,
+ uint32_t num_frames)
{
struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ // By register spec, it only takes 8 bit value
+ if (num_frames > 0xFF)
+ num_frames = 0xFF;
+
CRTC_REG_UPDATE_2(CRTC0_CRTC_STATIC_SCREEN_CONTROL,
- CRTC_STATIC_SCREEN_EVENT_MASK, value,
- CRTC_STATIC_SCREEN_FRAME_COUNT, 2);
+ CRTC_STATIC_SCREEN_EVENT_MASK, event_triggers,
+ CRTC_STATIC_SCREEN_FRAME_COUNT, num_frames);
}
void dce120_timing_generator_set_test_pattern(
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
index c4543178ba20..893261c81854 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
@@ -74,7 +74,7 @@ void dce80_hw_sequencer_construct(struct dc *dc)
{
dce110_hw_sequencer_construct(dc);
- dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating;
+ dc->hwseq->funcs.enable_display_power_gating = dce100_enable_display_power_gating;
dc->hwss.pipe_control_lock = dce_pipe_control_lock;
dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth;
dc->hwss.optimize_bandwidth = dce100_optimize_bandwidth;
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.h
index 7a1b31def66f..e43af832d00c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.h
@@ -27,6 +27,7 @@
#define __DC_HWSS_DCE80_H__
#include "core_types.h"
+#include "hw_sequencer_private.h"
struct dc;
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
index 860a524ebcfa..2ad5c28c6e66 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -288,6 +288,14 @@ static const struct dce_opp_mask opp_mask = {
OPP_COMMON_MASK_SH_LIST_DCE_80(_MASK)
};
+static const struct dce110_aux_registers_shift aux_shift = {
+ DCE10_AUX_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce110_aux_registers_mask aux_mask = {
+ DCE10_AUX_MASK_SH_LIST(_MASK)
+};
+
#define aux_engine_regs(id)\
[id] = {\
AUX_COMMON_REG_LIST(id), \
@@ -322,7 +330,7 @@ static const struct dce_audio_shift audio_shift = {
AUD_COMMON_MASK_SH_LIST(__SHIFT)
};
-static const struct dce_aduio_mask audio_mask = {
+static const struct dce_audio_mask audio_mask = {
AUD_COMMON_MASK_SH_LIST(_MASK)
};
@@ -431,6 +439,37 @@ static const struct dce_abm_mask abm_mask = {
#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8
#endif
+static int map_transmitter_id_to_phy_instance(
+ enum transmitter transmitter)
+{
+ switch (transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ return 0;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ return 1;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ return 2;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ return 3;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ return 4;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ return 5;
+ break;
+ case TRANSMITTER_UNIPHY_G:
+ return 6;
+ break;
+ default:
+ ASSERT(0);
+ return 0;
+ }
+}
+
static void read_dce_straps(
struct dc_context *ctx,
struct resource_straps *straps)
@@ -491,7 +530,10 @@ struct dce_aux *dce80_aux_engine_create(
dce110_aux_engine_construct(aux_engine, ctx, inst,
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
- &aux_engine_regs[inst]);
+ &aux_engine_regs[inst],
+ &aux_mask,
+ &aux_shift,
+ ctx->dc->caps.extended_aux_timeout_support);
return &aux_engine->base;
}
@@ -669,14 +711,18 @@ struct link_encoder *dce80_link_encoder_create(
{
struct dce110_link_encoder *enc110 =
kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
+ int link_regs_id;
if (!enc110)
return NULL;
+ link_regs_id =
+ map_transmitter_id_to_phy_instance(enc_init_data->transmitter);
+
dce110_link_encoder_construct(enc110,
enc_init_data,
&link_enc_feature,
- &link_enc_regs[enc_init_data->transmitter],
+ &link_enc_regs[link_regs_id],
&link_enc_aux_regs[enc_init_data->channel - 1],
&link_enc_hpd_regs[enc_init_data->hpd_source]);
return &enc110->base;
@@ -701,6 +747,7 @@ struct clock_source *dce80_clock_source_create(
return &clk_src->base;
}
+ kfree(clk_src);
BREAK_TO_DEBUGGER();
return NULL;
}
@@ -726,7 +773,7 @@ static struct input_pixel_processor *dce80_ipp_create(
return &ipp->base;
}
-static void destruct(struct dce110_resource_pool *pool)
+static void dce80_resource_destruct(struct dce110_resource_pool *pool)
{
unsigned int i;
@@ -854,7 +901,7 @@ static void dce80_destroy_resource_pool(struct resource_pool **pool)
{
struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool);
- destruct(dce110_pool);
+ dce80_resource_destruct(dce110_pool);
kfree(dce110_pool);
*pool = NULL;
}
@@ -876,7 +923,6 @@ static bool dce80_construct(
{
unsigned int i;
struct dc_context *ctx = dc->ctx;
- struct dc_firmware_info info;
struct dc_bios *bp;
ctx->dc_bios->regs = &bios_regs;
@@ -895,6 +941,7 @@ static bool dce80_construct(
dc->caps.i2c_speed_in_khz = 40;
dc->caps.max_cursor_size = 128;
dc->caps.dual_link_dvi = true;
+ dc->caps.extended_aux_timeout_support = false;
/*************************************************
* Create resources *
@@ -902,8 +949,7 @@ static bool dce80_construct(
bp = ctx->dc_bios;
- if ((bp->funcs->get_firmware_info(bp, &info) == BP_RESULT_OK) &&
- info.external_clock_source_frequency_for_dp != 0) {
+ if (bp->fw_info_valid && bp->fw_info.external_clock_source_frequency_for_dp != 0) {
pool->base.dp_clock_source =
dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true);
@@ -1047,7 +1093,7 @@ static bool dce80_construct(
return true;
res_create_fail:
- destruct(pool);
+ dce80_resource_destruct(pool);
return false;
}
@@ -1075,7 +1121,6 @@ static bool dce81_construct(
{
unsigned int i;
struct dc_context *ctx = dc->ctx;
- struct dc_firmware_info info;
struct dc_bios *bp;
ctx->dc_bios->regs = &bios_regs;
@@ -1101,8 +1146,7 @@ static bool dce81_construct(
bp = ctx->dc_bios;
- if ((bp->funcs->get_firmware_info(bp, &info) == BP_RESULT_OK) &&
- info.external_clock_source_frequency_for_dp != 0) {
+ if (bp->fw_info_valid && bp->fw_info.external_clock_source_frequency_for_dp != 0) {
pool->base.dp_clock_source =
dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true);
@@ -1246,7 +1290,7 @@ static bool dce81_construct(
return true;
res_create_fail:
- destruct(pool);
+ dce80_resource_destruct(pool);
return false;
}
@@ -1274,7 +1318,6 @@ static bool dce83_construct(
{
unsigned int i;
struct dc_context *ctx = dc->ctx;
- struct dc_firmware_info info;
struct dc_bios *bp;
ctx->dc_bios->regs = &bios_regs;
@@ -1300,8 +1343,7 @@ static bool dce83_construct(
bp = ctx->dc_bios;
- if ((bp->funcs->get_firmware_info(bp, &info) == BP_RESULT_OK) &&
- info.external_clock_source_frequency_for_dp != 0) {
+ if (bp->fw_info_valid && bp->fw_info.external_clock_source_frequency_for_dp != 0) {
pool->base.dp_clock_source =
dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true);
@@ -1441,7 +1483,7 @@ static bool dce83_construct(
return true;
res_create_fail:
- destruct(pool);
+ dce80_resource_destruct(pool);
return false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
index 032f872be89c..62ad1a11bff9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
@@ -22,7 +22,8 @@
#
# Makefile for DCN.
-DCN10 = dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o dcn10_hw_sequencer_debug.o \
+DCN10 = dcn10_init.o dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o \
+ dcn10_hw_sequencer_debug.o \
dcn10_dpp.o dcn10_opp.o dcn10_optc.o \
dcn10_hubp.o dcn10_mpc.o \
dcn10_dpp_dscl.o dcn10_dpp_cm.o dcn10_cm_common.o \
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
index 7469333a2c8a..bbd6e01b3eca 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
@@ -343,8 +343,8 @@ bool cm_helper_translate_curve_to_hw_format(
region_start = -MAX_LOW_POINT;
region_end = NUMBER_REGIONS - MAX_LOW_POINT;
} else {
- /* 10 segments
- * segment is from 2^-10 to 2^0
+ /* 11 segments
+ * segment is from 2^-10 to 2^1
* There are less than 256 points, for optimization
*/
seg_distr[0] = 3;
@@ -357,9 +357,10 @@ bool cm_helper_translate_curve_to_hw_format(
seg_distr[7] = 4;
seg_distr[8] = 4;
seg_distr[9] = 4;
+ seg_distr[10] = 1;
region_start = -10;
- region_end = 0;
+ region_end = 1;
}
for (i = region_end - region_start; i < MAX_REGIONS_NUMBER ; i++)
@@ -392,6 +393,10 @@ bool cm_helper_translate_curve_to_hw_format(
rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index];
rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index];
+ rgb_resulted[hw_points].red = rgb_resulted[hw_points - 1].red;
+ rgb_resulted[hw_points].green = rgb_resulted[hw_points - 1].green;
+ rgb_resulted[hw_points].blue = rgb_resulted[hw_points - 1].blue;
+
// All 3 color channels have same x
corner_points[0].red.x = dc_fixpt_pow(dc_fixpt_from_int(2),
dc_fixpt_from_int(region_start));
@@ -463,13 +468,6 @@ bool cm_helper_translate_curve_to_hw_format(
i = 1;
while (i != hw_points + 1) {
- if (dc_fixpt_lt(rgb_plus_1->red, rgb->red))
- rgb_plus_1->red = rgb->red;
- if (dc_fixpt_lt(rgb_plus_1->green, rgb->green))
- rgb_plus_1->green = rgb->green;
- if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue))
- rgb_plus_1->blue = rgb->blue;
-
rgb->delta_red = dc_fixpt_sub(rgb_plus_1->red, rgb->red);
rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green);
rgb->delta_blue = dc_fixpt_sub(rgb_plus_1->blue, rgb->blue);
@@ -561,6 +559,10 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index];
rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index];
+ rgb_resulted[hw_points].red = rgb_resulted[hw_points - 1].red;
+ rgb_resulted[hw_points].green = rgb_resulted[hw_points - 1].green;
+ rgb_resulted[hw_points].blue = rgb_resulted[hw_points - 1].blue;
+
corner_points[0].red.x = dc_fixpt_pow(dc_fixpt_from_int(2),
dc_fixpt_from_int(region_start));
corner_points[0].green.x = corner_points[0].red.x;
@@ -623,13 +625,6 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
i = 1;
while (i != hw_points + 1) {
- if (dc_fixpt_lt(rgb_plus_1->red, rgb->red))
- rgb_plus_1->red = rgb->red;
- if (dc_fixpt_lt(rgb_plus_1->green, rgb->green))
- rgb_plus_1->green = rgb->green;
- if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue))
- rgb_plus_1->blue = rgb->blue;
-
rgb->delta_red = dc_fixpt_sub(rgb_plus_1->red, rgb->red);
rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green);
rgb->delta_blue = dc_fixpt_sub(rgb_plus_1->blue, rgb->blue);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index b95ec73fcae3..0e682b5aa3eb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -129,7 +129,7 @@ void dpp_set_gamut_remap_bypass(struct dcn10_dpp *dpp)
#define IDENTITY_RATIO(ratio) (dc_fixpt_u2d19(ratio) == (1 << 19))
-static bool dpp_get_optimal_number_of_taps(
+bool dpp1_get_optimal_number_of_taps(
struct dpp *dpp,
struct scaler_data *scl_data,
const struct scaling_taps *in_taps)
@@ -290,12 +290,8 @@ void dpp1_cnv_setup (
enum surface_pixel_format format,
enum expansion_mode mode,
struct dc_csc_transform input_csc_color_matrix,
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
enum dc_color_space input_color_space,
struct cnv_alpha_2bit_lut *alpha_2bit_lut)
-#else
- enum dc_color_space input_color_space)
-#endif
{
uint32_t pixel_format;
uint32_t alpha_en;
@@ -426,8 +422,9 @@ void dpp1_cnv_setup (
void dpp1_set_cursor_attributes(
struct dpp *dpp_base,
- enum dc_cursor_color_format color_format)
+ struct dc_cursor_attributes *cursor_attributes)
{
+ enum dc_cursor_color_format color_format = cursor_attributes->color_format;
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
REG_UPDATE_2(CURSOR0_CONTROL,
@@ -456,6 +453,19 @@ void dpp1_set_cursor_position(
int src_y_offset = pos->y - pos->y_hotspot - param->viewport.y;
uint32_t cur_en = pos->enable ? 1 : 0;
+ // Cursor width/height and hotspots need to be rotated for offset calculation
+ if (param->rotation == ROTATION_ANGLE_90 || param->rotation == ROTATION_ANGLE_270) {
+ swap(width, height);
+ if (param->rotation == ROTATION_ANGLE_90) {
+ src_x_offset = pos->x - pos->y_hotspot - param->viewport.x;
+ src_y_offset = pos->y - pos->x_hotspot - param->viewport.y;
+ }
+ } else if (param->rotation == ROTATION_ANGLE_180) {
+ src_x_offset = pos->x - param->viewport.x;
+ src_y_offset = pos->y - param->viewport.y;
+ }
+
+
if (src_x_offset >= (int)param->viewport.width)
cur_en = 0; /* not visible beyond right edge*/
@@ -507,7 +517,7 @@ static const struct dpp_funcs dcn10_dpp_funcs = {
.dpp_read_state = dpp_read_state,
.dpp_reset = dpp_reset,
.dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale,
- .dpp_get_optimal_number_of_taps = dpp_get_optimal_number_of_taps,
+ .dpp_get_optimal_number_of_taps = dpp1_get_optimal_number_of_taps,
.dpp_set_gamut_remap = dpp1_cm_set_gamut_remap,
.dpp_set_csc_adjustment = dpp1_cm_set_output_csc_adjustment,
.dpp_set_csc_default = dpp1_cm_set_output_csc_default,
@@ -528,11 +538,9 @@ static const struct dpp_funcs dcn10_dpp_funcs = {
.set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes,
.dpp_dppclk_control = dpp1_dppclk_control,
.dpp_set_hdr_multiplier = dpp1_set_hdr_multiplier,
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
.dpp_program_blnd_lut = NULL,
.dpp_program_shaper_lut = NULL,
.dpp_program_3dlut = NULL
-#endif
};
static struct dpp_caps dcn10_dpp_cap = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
index 8a5517eebb7c..2edf566b3a72 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
@@ -1368,7 +1368,7 @@ enum dcn10_input_csc_select {
void dpp1_set_cursor_attributes(
struct dpp *dpp_base,
- enum dc_cursor_color_format color_format);
+ struct dc_cursor_attributes *cursor_attributes);
void dpp1_set_cursor_position(
struct dpp *dpp_base,
@@ -1486,12 +1486,8 @@ void dpp1_cnv_setup (
enum surface_pixel_format format,
enum expansion_mode mode,
struct dc_csc_transform input_csc_color_matrix,
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
enum dc_color_space input_color_space,
struct cnv_alpha_2bit_lut *alpha_2bit_lut);
-#else
- enum dc_color_space input_color_space);
-#endif
void dpp1_full_bypass(struct dpp *dpp_base);
@@ -1504,6 +1500,11 @@ void dpp1_set_hdr_multiplier(
struct dpp *dpp_base,
uint32_t multiplier);
+bool dpp1_get_optimal_number_of_taps(
+ struct dpp *dpp,
+ struct scaler_data *scl_data,
+ const struct scaling_taps *in_taps);
+
void dpp1_construct(struct dcn10_dpp *dpp1,
struct dc_context *ctx,
uint32_t inst,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
index aa0c7a7d13a0..4d3f7d5e1473 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
@@ -88,26 +88,6 @@ enum dscl_mode_sel {
DSCL_MODE_DSCL_BYPASS = 6
};
-static const struct dpp_input_csc_matrix dpp_input_csc_matrix[] = {
- {COLOR_SPACE_SRGB,
- {0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} },
- {COLOR_SPACE_SRGB_LIMITED,
- {0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} },
- {COLOR_SPACE_YCBCR601,
- {0x2cdd, 0x2000, 0, 0xe991, 0xe926, 0x2000, 0xf4fd, 0x10ef,
- 0, 0x2000, 0x38b4, 0xe3a6} },
- {COLOR_SPACE_YCBCR601_LIMITED,
- {0x3353, 0x2568, 0, 0xe400, 0xe5dc, 0x2568, 0xf367, 0x1108,
- 0, 0x2568, 0x40de, 0xdd3a} },
- {COLOR_SPACE_YCBCR709,
- {0x3265, 0x2000, 0, 0xe6ce, 0xf105, 0x2000, 0xfa01, 0xa7d, 0,
- 0x2000, 0x3b61, 0xe24f} },
-
- {COLOR_SPACE_YCBCR709_LIMITED,
- {0x39a6, 0x2568, 0, 0xe0d6, 0xeedd, 0x2568, 0xf925, 0x9a8, 0,
- 0x2568, 0x43ee, 0xdbb2} }
-};
-
static void program_gamut_remap(
struct dcn10_dpp *dpp,
const uint16_t *regval,
@@ -352,6 +332,8 @@ void dpp1_cm_program_regamma_lut(struct dpp *dpp_base,
uint32_t i;
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+ REG_SEQ_START();
+
for (i = 0 ; i < num; i++) {
REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].red_reg);
REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].green_reg);
@@ -626,10 +608,16 @@ void dpp1_set_degamma(
case IPP_DEGAMMA_MODE_HW_xvYCC:
REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 2);
break;
+ case IPP_DEGAMMA_MODE_USER_PWL:
+ REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 3);
+ break;
default:
BREAK_TO_DEBUGGER();
break;
}
+
+ REG_SEQ_SUBMIT();
+ REG_SEQ_WAIT_DONE();
}
void dpp1_degamma_ram_select(
@@ -731,10 +719,8 @@ void dpp1_full_bypass(struct dpp *dpp_base)
/* COLOR_KEYER_CONTROL.COLOR_KEYER_EN = 0 this should be default */
if (dpp->tf_mask->CM_BYPASS_EN)
REG_SET(CM_CONTROL, 0, CM_BYPASS_EN, 1);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
else
REG_SET(CM_CONTROL, 0, CM_BYPASS, 1);
-#endif
/* Setting degamma bypass for now */
REG_SET(CM_DGAM_CONTROL, 0, CM_DGAM_LUT_MODE, 0);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
index d67e0abeee93..fce37c527a0b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
@@ -218,14 +218,12 @@ static void dpp1_dscl_set_lb(
INTERLEAVE_EN, lb_params->interleave_en, /* Interleave source enable */
LB_DATA_FORMAT__ALPHA_EN, lb_params->alpha_en); /* Alpha enable */
}
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
else {
/* DSCL caps: pixel data processed in float format */
REG_SET_2(LB_DATA_FORMAT, 0,
INTERLEAVE_EN, lb_params->interleave_en, /* Interleave source enable */
LB_DATA_FORMAT__ALPHA_EN, lb_params->alpha_en); /* Alpha enable */
}
-#endif
REG_SET_2(LB_MEMORY_CTRL, 0,
MEMORY_CONFIG, mem_size_config,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.c
index 374cc9acda3b..b6391a5ead78 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.c
@@ -23,7 +23,7 @@
*
*/
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
#include "reg_helper.h"
#include "resource.h"
@@ -109,9 +109,7 @@ const struct dwbc_funcs dcn10_dwbc_funcs = {
.update = NULL,
.set_stereo = NULL,
.set_new_content = NULL,
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
.set_warmup = NULL,
-#endif
.dwb_set_scaler = NULL,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h
index c175edd0bae7..d56ea7c8171e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h
@@ -24,7 +24,7 @@
#ifndef __DC_DWBC_DCN10_H__
#define __DC_DWBC_DCN10_H__
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
/* DCN */
#define BASE_INNER(seg) \
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
index a780057e2dbc..f36a0d8cedfe 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
@@ -104,7 +104,7 @@ void hubbub1_allow_self_refresh_control(struct hubbub *hubbub, bool allow)
DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, !allow);
}
-bool hububu1_is_allow_self_refresh_enabled(struct hubbub *hubbub)
+bool hubbub1_is_allow_self_refresh_enabled(struct hubbub *hubbub)
{
struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
uint32_t enable = 0;
@@ -930,6 +930,9 @@ static bool hubbub1_get_dcc_compression_cap(struct hubbub *hubbub,
output->grph.rgb.max_compressed_blk_size = 64;
output->grph.rgb.independent_64b_blks = true;
break;
+ default:
+ ASSERT(false);
+ break;
}
output->capable = true;
@@ -945,6 +948,8 @@ static const struct hubbub_funcs hubbub1_funcs = {
.get_dcc_compression_cap = hubbub1_get_dcc_compression_cap,
.wm_read_state = hubbub1_wm_read_state,
.program_watermarks = hubbub1_program_watermarks,
+ .is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled,
+ .allow_self_refresh_control = hubbub1_allow_self_refresh_control,
};
void hubbub1_construct(struct hubbub *hubbub,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
index 7c2559c9ae23..af57751253de 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
@@ -119,6 +119,26 @@ struct dcn_hubbub_registers {
uint32_t DCN_VM_AGP_BOT;
uint32_t DCN_VM_AGP_TOP;
uint32_t DCN_VM_AGP_BASE;
+ uint32_t DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_MSB;
+ uint32_t DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_LSB;
+ uint32_t DCHUBBUB_ARB_FRAC_URG_BW_NOM_A;
+ uint32_t DCHUBBUB_ARB_FRAC_URG_BW_NOM_B;
+ uint32_t DCHUBBUB_ARB_FRAC_URG_BW_NOM_C;
+ uint32_t DCHUBBUB_ARB_FRAC_URG_BW_NOM_D;
+ uint32_t DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A;
+ uint32_t DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B;
+ uint32_t DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C;
+ uint32_t DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D;
+ uint32_t DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A;
+ uint32_t DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B;
+ uint32_t DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C;
+ uint32_t DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D;
+ uint32_t DCHUBBUB_ARB_HOSTVM_CNTL;
+ uint32_t DCHVM_CTRL0;
+ uint32_t DCHVM_MEM_CTRL;
+ uint32_t DCHVM_CLK_CTRL;
+ uint32_t DCHVM_RIOMMU_CTRL0;
+ uint32_t DCHVM_RIOMMU_STAT0;
};
/* set field name */
@@ -196,7 +216,9 @@ struct dcn_hubbub_registers {
type DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A;\
type DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B;\
type DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C;\
- type DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D
+ type DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D;\
+ type DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_MSB;\
+ type DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_LSB
#define HUBBUB_STUTTER_REG_FIELD_LIST(type) \
type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A;\
@@ -208,15 +230,62 @@ struct dcn_hubbub_registers {
type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C;\
type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D
+#define HUBBUB_HVM_REG_FIELD_LIST(type) \
+ type DCHUBBUB_ARB_MIN_REQ_OUTSTAND_COMMIT_THRESHOLD;\
+ type DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_A;\
+ type DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_B;\
+ type DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_C;\
+ type DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_D;\
+ type DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_A;\
+ type DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_B;\
+ type DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_C;\
+ type DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_D;\
+ type DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A;\
+ type DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_B;\
+ type DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_C;\
+ type DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_D;\
+ type DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A;\
+ type DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B;\
+ type DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C;\
+ type DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D;\
+ type DCHUBBUB_ARB_FRAC_URG_BW_NOM_A;\
+ type DCHUBBUB_ARB_FRAC_URG_BW_NOM_B;\
+ type DCHUBBUB_ARB_FRAC_URG_BW_NOM_C;\
+ type DCHUBBUB_ARB_FRAC_URG_BW_NOM_D;\
+ type DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A;\
+ type DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B;\
+ type DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C;\
+ type DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D;\
+ type DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A;\
+ type DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B;\
+ type DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C;\
+ type DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D;\
+ type DCHUBBUB_ARB_MAX_QOS_COMMIT_THRESHOLD;\
+ type HOSTVM_INIT_REQ; \
+ type HVM_GPUVMRET_PWR_REQ_DIS; \
+ type HVM_GPUVMRET_FORCE_REQ; \
+ type HVM_GPUVMRET_POWER_STATUS; \
+ type HVM_DISPCLK_R_GATE_DIS; \
+ type HVM_DISPCLK_G_GATE_DIS; \
+ type HVM_DCFCLK_R_GATE_DIS; \
+ type HVM_DCFCLK_G_GATE_DIS; \
+ type TR_REQ_REQCLKREQ_MODE; \
+ type TW_RSP_COMPCLKREQ_MODE; \
+ type HOSTVM_PREFETCH_REQ; \
+ type HOSTVM_POWERSTATUS; \
+ type RIOMMU_ACTIVE; \
+ type HOSTVM_PREFETCH_DONE
struct dcn_hubbub_shift {
DCN_HUBBUB_REG_FIELD_LIST(uint8_t);
HUBBUB_STUTTER_REG_FIELD_LIST(uint8_t);
+ HUBBUB_HVM_REG_FIELD_LIST(uint8_t);
};
struct dcn_hubbub_mask {
DCN_HUBBUB_REG_FIELD_LIST(uint32_t);
HUBBUB_STUTTER_REG_FIELD_LIST(uint32_t);
+ HUBBUB_HVM_REG_FIELD_LIST(uint32_t);
};
struct dc;
@@ -247,7 +316,7 @@ void hubbub1_program_watermarks(
void hubbub1_allow_self_refresh_control(struct hubbub *hubbub, bool allow);
-bool hububu1_is_allow_self_refresh_enabled(struct hubbub *hubub);
+bool hubbub1_is_allow_self_refresh_enabled(struct hubbub *hubub);
void hubbub1_toggle_watermark_change_req(
struct hubbub *hubbub);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index 934bacc0c6ad..31b64733d693 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -163,7 +163,7 @@ void hubp1_program_tiling(
void hubp1_program_size(
struct hubp *hubp,
enum surface_pixel_format format,
- const union plane_size *plane_size,
+ const struct plane_size *plane_size,
struct dc_plane_dcc_param *dcc)
{
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
@@ -173,16 +173,16 @@ void hubp1_program_size(
* 444 or 420 luma
*/
if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN && format < SURFACE_PIXEL_FORMAT_SUBSAMPLE_END) {
- ASSERT(plane_size->video.chroma_pitch != 0);
+ ASSERT(plane_size->chroma_pitch != 0);
/* Chroma pitch zero can cause system hang! */
- pitch = plane_size->video.luma_pitch - 1;
- meta_pitch = dcc->video.meta_pitch_l - 1;
- pitch_c = plane_size->video.chroma_pitch - 1;
- meta_pitch_c = dcc->video.meta_pitch_c - 1;
+ pitch = plane_size->surface_pitch - 1;
+ meta_pitch = dcc->meta_pitch - 1;
+ pitch_c = plane_size->chroma_pitch - 1;
+ meta_pitch_c = dcc->meta_pitch_c - 1;
} else {
- pitch = plane_size->grph.surface_pitch - 1;
- meta_pitch = dcc->grph.meta_pitch - 1;
+ pitch = plane_size->surface_pitch - 1;
+ meta_pitch = dcc->meta_pitch - 1;
pitch_c = 0;
meta_pitch_c = 0;
}
@@ -306,7 +306,6 @@ void hubp1_program_pixel_format(
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 12);
break;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX:
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 112);
@@ -327,7 +326,6 @@ void hubp1_program_pixel_format(
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 119);
break;
-#endif
default:
BREAK_TO_DEBUGGER();
break;
@@ -509,7 +507,7 @@ bool hubp1_program_surface_flip_and_addr(
}
void hubp1_dcc_control(struct hubp *hubp, bool enable,
- bool independent_64b_blks)
+ enum hubp_ind_block_size independent_64b_blks)
{
uint32_t dcc_en = enable ? 1 : 0;
uint32_t dcc_ind_64b_blk = independent_64b_blks ? 1 : 0;
@@ -526,13 +524,13 @@ void hubp1_program_surface_config(
struct hubp *hubp,
enum surface_pixel_format format,
union dc_tiling_info *tiling_info,
- union plane_size *plane_size,
+ struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
bool horizontal_mirror,
unsigned int compat_level)
{
- hubp1_dcc_control(hubp, dcc->enable, dcc->grph.independent_64b_blks);
+ hubp1_dcc_control(hubp, dcc->enable, dcc->independent_64b_blks);
hubp1_program_tiling(hubp, tiling_info, format);
hubp1_program_size(hubp, format, plane_size, dcc);
hubp1_program_rotation(hubp, rotation, horizontal_mirror);
@@ -841,9 +839,17 @@ void min_set_viewport(
REG_SET_2(DCSURF_PRI_VIEWPORT_START_C, 0,
PRI_VIEWPORT_X_START_C, viewport_c->x,
PRI_VIEWPORT_Y_START_C, viewport_c->y);
+
+ REG_SET_2(DCSURF_SEC_VIEWPORT_DIMENSION_C, 0,
+ SEC_VIEWPORT_WIDTH_C, viewport_c->width,
+ SEC_VIEWPORT_HEIGHT_C, viewport_c->height);
+
+ REG_SET_2(DCSURF_SEC_VIEWPORT_START_C, 0,
+ SEC_VIEWPORT_X_START_C, viewport_c->x,
+ SEC_VIEWPORT_Y_START_C, viewport_c->y);
}
-void hubp1_read_state(struct hubp *hubp)
+void hubp1_read_state_common(struct hubp *hubp)
{
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
struct dcn_hubp_state *s = &hubp1->state;
@@ -859,24 +865,6 @@ void hubp1_read_state(struct hubp *hubp)
PRQ_EXPANSION_MODE, &rq_regs->prq_expansion_mode,
MRQ_EXPANSION_MODE, &rq_regs->mrq_expansion_mode,
CRQ_EXPANSION_MODE, &rq_regs->crq_expansion_mode);
- REG_GET_8(DCHUBP_REQ_SIZE_CONFIG,
- CHUNK_SIZE, &rq_regs->rq_regs_l.chunk_size,
- MIN_CHUNK_SIZE, &rq_regs->rq_regs_l.min_chunk_size,
- META_CHUNK_SIZE, &rq_regs->rq_regs_l.meta_chunk_size,
- MIN_META_CHUNK_SIZE, &rq_regs->rq_regs_l.min_meta_chunk_size,
- DPTE_GROUP_SIZE, &rq_regs->rq_regs_l.dpte_group_size,
- MPTE_GROUP_SIZE, &rq_regs->rq_regs_l.mpte_group_size,
- SWATH_HEIGHT, &rq_regs->rq_regs_l.swath_height,
- PTE_ROW_HEIGHT_LINEAR, &rq_regs->rq_regs_l.pte_row_height_linear);
- REG_GET_8(DCHUBP_REQ_SIZE_CONFIG_C,
- CHUNK_SIZE_C, &rq_regs->rq_regs_c.chunk_size,
- MIN_CHUNK_SIZE_C, &rq_regs->rq_regs_c.min_chunk_size,
- META_CHUNK_SIZE_C, &rq_regs->rq_regs_c.meta_chunk_size,
- MIN_META_CHUNK_SIZE_C, &rq_regs->rq_regs_c.min_meta_chunk_size,
- DPTE_GROUP_SIZE_C, &rq_regs->rq_regs_c.dpte_group_size,
- MPTE_GROUP_SIZE_C, &rq_regs->rq_regs_c.mpte_group_size,
- SWATH_HEIGHT_C, &rq_regs->rq_regs_c.swath_height,
- PTE_ROW_HEIGHT_LINEAR_C, &rq_regs->rq_regs_c.pte_row_height_linear);
/* DLG - Per hubp */
REG_GET_2(BLANK_OFFSET_0,
@@ -1024,14 +1012,47 @@ void hubp1_read_state(struct hubp *hubp)
HUBP_TTU_DISABLE, &s->ttu_disable,
HUBP_UNDERFLOW_STATUS, &s->underflow_status);
+ REG_GET(HUBP_CLK_CNTL,
+ HUBP_CLOCK_ENABLE, &s->clock_en);
+
REG_GET(DCN_GLOBAL_TTU_CNTL,
MIN_TTU_VBLANK, &s->min_ttu_vblank);
REG_GET_2(DCN_TTU_QOS_WM,
QoS_LEVEL_LOW_WM, &s->qos_level_low_wm,
QoS_LEVEL_HIGH_WM, &s->qos_level_high_wm);
+
}
+void hubp1_read_state(struct hubp *hubp)
+{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+ struct dcn_hubp_state *s = &hubp1->state;
+ struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
+
+ hubp1_read_state_common(hubp);
+
+ REG_GET_8(DCHUBP_REQ_SIZE_CONFIG,
+ CHUNK_SIZE, &rq_regs->rq_regs_l.chunk_size,
+ MIN_CHUNK_SIZE, &rq_regs->rq_regs_l.min_chunk_size,
+ META_CHUNK_SIZE, &rq_regs->rq_regs_l.meta_chunk_size,
+ MIN_META_CHUNK_SIZE, &rq_regs->rq_regs_l.min_meta_chunk_size,
+ DPTE_GROUP_SIZE, &rq_regs->rq_regs_l.dpte_group_size,
+ MPTE_GROUP_SIZE, &rq_regs->rq_regs_l.mpte_group_size,
+ SWATH_HEIGHT, &rq_regs->rq_regs_l.swath_height,
+ PTE_ROW_HEIGHT_LINEAR, &rq_regs->rq_regs_l.pte_row_height_linear);
+
+ REG_GET_8(DCHUBP_REQ_SIZE_CONFIG_C,
+ CHUNK_SIZE_C, &rq_regs->rq_regs_c.chunk_size,
+ MIN_CHUNK_SIZE_C, &rq_regs->rq_regs_c.min_chunk_size,
+ META_CHUNK_SIZE_C, &rq_regs->rq_regs_c.meta_chunk_size,
+ MIN_META_CHUNK_SIZE_C, &rq_regs->rq_regs_c.min_meta_chunk_size,
+ DPTE_GROUP_SIZE_C, &rq_regs->rq_regs_c.dpte_group_size,
+ MPTE_GROUP_SIZE_C, &rq_regs->rq_regs_c.mpte_group_size,
+ SWATH_HEIGHT_C, &rq_regs->rq_regs_c.swath_height,
+ PTE_ROW_HEIGHT_LINEAR_C, &rq_regs->rq_regs_c.pte_row_height_linear);
+
+}
enum cursor_pitch hubp1_get_cursor_pitch(unsigned int pitch)
{
enum cursor_pitch hw_pitch;
@@ -1228,10 +1249,8 @@ static const struct hubp_funcs dcn10_hubp_funcs = {
.hubp_get_underflow_status = hubp1_get_underflow_status,
.hubp_init = hubp1_init,
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
.dmdata_set_attributes = NULL,
.dmdata_load = NULL,
-#endif
};
/*****************************************/
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
index 31c8fdd3206c..780af5b3c16f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
@@ -47,6 +47,8 @@
SRI(DCSURF_SEC_VIEWPORT_START, HUBP, id), \
SRI(DCSURF_PRI_VIEWPORT_DIMENSION_C, HUBP, id), \
SRI(DCSURF_PRI_VIEWPORT_START_C, HUBP, id), \
+ SRI(DCSURF_SEC_VIEWPORT_DIMENSION_C, HUBP, id), \
+ SRI(DCSURF_SEC_VIEWPORT_START_C, HUBP, id), \
SRI(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, HUBPREQ, id),\
SRI(DCSURF_PRIMARY_SURFACE_ADDRESS, HUBPREQ, id),\
SRI(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH, HUBPREQ, id),\
@@ -57,8 +59,12 @@
SRI(DCSURF_SECONDARY_META_SURFACE_ADDRESS, HUBPREQ, id),\
SRI(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id),\
SRI(DCSURF_PRIMARY_SURFACE_ADDRESS_C, HUBPREQ, id),\
+ SRI(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id),\
+ SRI(DCSURF_SECONDARY_SURFACE_ADDRESS_C, HUBPREQ, id),\
SRI(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id),\
SRI(DCSURF_PRIMARY_META_SURFACE_ADDRESS_C, HUBPREQ, id),\
+ SRI(DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id),\
+ SRI(DCSURF_SECONDARY_META_SURFACE_ADDRESS_C, HUBPREQ, id),\
SRI(DCSURF_SURFACE_INUSE, HUBPREQ, id),\
SRI(DCSURF_SURFACE_INUSE_HIGH, HUBPREQ, id),\
SRI(DCSURF_SURFACE_INUSE_C, HUBPREQ, id),\
@@ -125,8 +131,6 @@
SRI(DCN_VM_SYSTEM_APERTURE_LOW_ADDR_LSB, HUBPREQ, id),\
SRI(DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_MSB, HUBPREQ, id),\
SRI(DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_LSB, HUBPREQ, id),\
- SR(DCHUBBUB_SDPIF_FB_BASE),\
- SR(DCHUBBUB_SDPIF_FB_OFFSET),\
SRI(CURSOR_SETTINS, HUBPREQ, id), \
SRI(CURSOR_SURFACE_ADDRESS_HIGH, CURSOR, id), \
SRI(CURSOR_SURFACE_ADDRESS, CURSOR, id), \
@@ -152,6 +156,8 @@
uint32_t DCSURF_SEC_VIEWPORT_START; \
uint32_t DCSURF_PRI_VIEWPORT_DIMENSION_C; \
uint32_t DCSURF_PRI_VIEWPORT_START_C; \
+ uint32_t DCSURF_SEC_VIEWPORT_DIMENSION_C; \
+ uint32_t DCSURF_SEC_VIEWPORT_START_C; \
uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH; \
uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS; \
uint32_t DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH; \
@@ -162,8 +168,12 @@
uint32_t DCSURF_SECONDARY_META_SURFACE_ADDRESS; \
uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C; \
uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_C; \
+ uint32_t DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C; \
+ uint32_t DCSURF_SECONDARY_SURFACE_ADDRESS_C; \
uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C; \
uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_C; \
+ uint32_t DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C; \
+ uint32_t DCSURF_SECONDARY_META_SURFACE_ADDRESS_C; \
uint32_t DCSURF_SURFACE_INUSE; \
uint32_t DCSURF_SURFACE_INUSE_HIGH; \
uint32_t DCSURF_SURFACE_INUSE_C; \
@@ -226,14 +236,6 @@
uint32_t DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_LSB; \
uint32_t DCN_VM_SYSTEM_APERTURE_LOW_ADDR; \
uint32_t DCN_VM_SYSTEM_APERTURE_HIGH_ADDR; \
- uint32_t DCHUBBUB_SDPIF_FB_BASE; \
- uint32_t DCHUBBUB_SDPIF_FB_OFFSET; \
- uint32_t DCN_VM_FB_LOCATION_TOP; \
- uint32_t DCN_VM_FB_LOCATION_BASE; \
- uint32_t DCN_VM_FB_OFFSET; \
- uint32_t DCN_VM_AGP_BASE; \
- uint32_t DCN_VM_AGP_BOT; \
- uint32_t DCN_VM_AGP_TOP; \
uint32_t CURSOR_SETTINS; \
uint32_t CURSOR_SETTINGS; \
uint32_t CURSOR_SURFACE_ADDRESS_HIGH; \
@@ -249,7 +251,8 @@
.field_name = reg_name ## __ ## field_name ## post_fix
/* Mask/shift struct generation macro for all ASICs (including those with reduced functionality) */
-#define HUBP_MASK_SH_LIST_DCN_COMMON(mask_sh)\
+/*1.x, 2.x, and 3.x*/
+#define HUBP_MASK_SH_LIST_DCN_SHARE_COMMON(mask_sh)\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_BLANK_EN, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_TTU_DISABLE, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_UNDERFLOW_STATUS, mask_sh),\
@@ -265,7 +268,6 @@
HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, MAX_COMPRESSED_FRAGS, mask_sh),\
HUBP_SF(HUBP0_DCSURF_TILING_CONFIG, SW_MODE, mask_sh),\
HUBP_SF(HUBP0_DCSURF_TILING_CONFIG, META_LINEAR, mask_sh),\
- HUBP_SF(HUBP0_DCSURF_TILING_CONFIG, RB_ALIGNED, mask_sh),\
HUBP_SF(HUBP0_DCSURF_TILING_CONFIG, PIPE_ALIGNED, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_PITCH, META_PITCH, mask_sh),\
@@ -289,6 +291,10 @@
HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C, PRI_VIEWPORT_HEIGHT_C, mask_sh),\
HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_START_C, PRI_VIEWPORT_X_START_C, mask_sh),\
HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_START_C, PRI_VIEWPORT_Y_START_C, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C, SEC_VIEWPORT_WIDTH_C, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C, SEC_VIEWPORT_HEIGHT_C, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_START_C, SEC_VIEWPORT_X_START_C, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_START_C, SEC_VIEWPORT_Y_START_C, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, PRIMARY_SURFACE_ADDRESS_HIGH, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS, PRIMARY_SURFACE_ADDRESS, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH, SECONDARY_SURFACE_ADDRESS_HIGH, mask_sh),\
@@ -299,8 +305,12 @@
HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS, SECONDARY_META_SURFACE_ADDRESS, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, PRIMARY_SURFACE_ADDRESS_HIGH_C, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_C, PRIMARY_SURFACE_ADDRESS_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C, SECONDARY_SURFACE_ADDRESS_HIGH_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_C, SECONDARY_SURFACE_ADDRESS_C, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C, PRIMARY_META_SURFACE_ADDRESS_HIGH_C, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C, PRIMARY_META_SURFACE_ADDRESS_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C, SECONDARY_META_SURFACE_ADDRESS_HIGH_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C, SECONDARY_META_SURFACE_ADDRESS_C, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_INUSE, SURFACE_INUSE_ADDRESS, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH, SURFACE_INUSE_ADDRESS_HIGH, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_INUSE_C, SURFACE_INUSE_ADDRESS_C, mask_sh),\
@@ -372,12 +382,17 @@
HUBP_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL0, QoS_RAMP_DISABLE, mask_sh),\
HUBP_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL1, REFCYC_PER_REQ_DELIVERY_PRE, mask_sh),\
HUBP_SF(HUBP0_HUBP_CLK_CNTL, HUBP_CLOCK_ENABLE, mask_sh)
-
-#define HUBP_MASK_SH_LIST_DCN(mask_sh)\
- HUBP_MASK_SH_LIST_DCN_COMMON(mask_sh),\
+/*2.x and 1.x only*/
+#define HUBP_MASK_SH_LIST_DCN_COMMON(mask_sh)\
+ HUBP_MASK_SH_LIST_DCN_SHARE_COMMON(mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_TILING_CONFIG, RB_ALIGNED, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, MPTE_GROUP_SIZE, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, MPTE_GROUP_SIZE_C, mask_sh)
+/*2.x and 1.x only*/
+#define HUBP_MASK_SH_LIST_DCN(mask_sh)\
+ HUBP_MASK_SH_LIST_DCN_COMMON(mask_sh)
+
/* Mask/shift struct generation macro for ASICs with VM */
#define HUBP_MASK_SH_LIST_DCN_VM(mask_sh)\
HUBP_SF(HUBPREQ0_NOM_PARAMETERS_0, DST_Y_PER_PTE_ROW_NOM_L, mask_sh),\
@@ -412,8 +427,6 @@
HUBP_SF(HUBPREQ0_DCN_VM_SYSTEM_APERTURE_LOW_ADDR_LSB, MC_VM_SYSTEM_APERTURE_LOW_ADDR_LSB, mask_sh),\
HUBP_SF(HUBPREQ0_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_MSB, MC_VM_SYSTEM_APERTURE_HIGH_ADDR_MSB, mask_sh),\
HUBP_SF(HUBPREQ0_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_LSB, MC_VM_SYSTEM_APERTURE_HIGH_ADDR_LSB, mask_sh),\
- HUBP_SF(DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, mask_sh),\
- HUBP_SF(DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, mask_sh),\
HUBP_SF(HUBPREQ0_DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, MC_VM_SYSTEM_APERTURE_DEFAULT_SYSTEM, mask_sh),\
HUBP_SF(HUBPREQ0_DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, mask_sh),\
HUBP_SF(HUBPREQ0_DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, mask_sh),\
@@ -434,7 +447,7 @@
HUBP_SF(CURSOR0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_Y, mask_sh), \
HUBP_SF(CURSOR0_CURSOR_DST_OFFSET, CURSOR_DST_X_OFFSET, mask_sh)
-#define DCN_HUBP_REG_FIELD_LIST(type) \
+#define DCN_HUBP_REG_FIELD_BASE_LIST(type) \
type HUBP_BLANK_EN;\
type HUBP_DISABLE;\
type HUBP_TTU_DISABLE;\
@@ -459,7 +472,6 @@
type ROTATION_ANGLE;\
type H_MIRROR_EN;\
type SURFACE_PIXEL_FORMAT;\
- type ALPHA_PLANE_EN;\
type SURFACE_FLIP_TYPE;\
type SURFACE_FLIP_MODE_FOR_STEREOSYNC;\
type SURFACE_FLIP_IN_STEREOSYNC;\
@@ -477,6 +489,10 @@
type PRI_VIEWPORT_HEIGHT_C; \
type PRI_VIEWPORT_X_START_C; \
type PRI_VIEWPORT_Y_START_C; \
+ type SEC_VIEWPORT_WIDTH_C; \
+ type SEC_VIEWPORT_HEIGHT_C; \
+ type SEC_VIEWPORT_X_START_C; \
+ type SEC_VIEWPORT_Y_START_C; \
type PRIMARY_SURFACE_ADDRESS_HIGH;\
type PRIMARY_SURFACE_ADDRESS;\
type SECONDARY_SURFACE_ADDRESS_HIGH;\
@@ -487,8 +503,12 @@
type SECONDARY_META_SURFACE_ADDRESS;\
type PRIMARY_SURFACE_ADDRESS_HIGH_C;\
type PRIMARY_SURFACE_ADDRESS_C;\
+ type SECONDARY_SURFACE_ADDRESS_HIGH_C;\
+ type SECONDARY_SURFACE_ADDRESS_C;\
type PRIMARY_META_SURFACE_ADDRESS_HIGH_C;\
type PRIMARY_META_SURFACE_ADDRESS_C;\
+ type SECONDARY_META_SURFACE_ADDRESS_HIGH_C;\
+ type SECONDARY_META_SURFACE_ADDRESS_C;\
type SURFACE_INUSE_ADDRESS;\
type SURFACE_INUSE_ADDRESS_HIGH;\
type SURFACE_INUSE_ADDRESS_C;\
@@ -589,18 +609,6 @@
type MC_VM_SYSTEM_APERTURE_HIGH_ADDR_LSB;\
type MC_VM_SYSTEM_APERTURE_LOW_ADDR;\
type MC_VM_SYSTEM_APERTURE_HIGH_ADDR;\
- type SDPIF_FB_TOP;\
- type SDPIF_FB_BASE;\
- type SDPIF_FB_OFFSET;\
- type SDPIF_AGP_BASE;\
- type SDPIF_AGP_BOT;\
- type SDPIF_AGP_TOP;\
- type FB_TOP;\
- type FB_BASE;\
- type FB_OFFSET;\
- type AGP_BASE;\
- type AGP_BOT;\
- type AGP_TOP;\
type DCN_VM_SYSTEM_APERTURE_DEFAULT_SYSTEM;\
type DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB;\
type DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB;\
@@ -632,6 +640,10 @@
type CURSOR_DST_X_OFFSET; \
type OUTPUT_FP
+#define DCN_HUBP_REG_FIELD_LIST(type) \
+ DCN_HUBP_REG_FIELD_BASE_LIST(type);\
+ type ALPHA_PLANE_EN
+
struct dcn_mi_registers {
HUBP_COMMON_REG_VARIABLE_LIST;
};
@@ -658,6 +670,7 @@ struct dcn_hubp_state {
uint32_t sw_mode;
uint32_t dcc_en;
uint32_t blank_en;
+ uint32_t clock_en;
uint32_t underflow_status;
uint32_t ttu_disable;
uint32_t min_ttu_vblank;
@@ -677,7 +690,7 @@ void hubp1_program_surface_config(
struct hubp *hubp,
enum surface_pixel_format format,
union dc_tiling_info *tiling_info,
- union plane_size *plane_size,
+ struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
bool horizontal_mirror,
@@ -699,7 +712,7 @@ void hubp1_program_pixel_format(
void hubp1_program_size(
struct hubp *hubp,
enum surface_pixel_format format,
- const union plane_size *plane_size,
+ const struct plane_size *plane_size,
struct dc_plane_dcc_param *dcc);
void hubp1_program_rotation(
@@ -714,15 +727,13 @@ void hubp1_program_tiling(
void hubp1_dcc_control(struct hubp *hubp,
bool enable,
- bool independent_64b_blks);
+ enum hubp_ind_block_size independent_64b_blks);
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
bool hubp1_program_surface_flip_and_addr(
struct hubp *hubp,
const struct dc_plane_address *address,
bool flip_immediate);
-#endif
bool hubp1_is_flip_pending(struct hubp *hubp);
void hubp1_cursor_set_attributes(
@@ -760,5 +771,6 @@ void hubp1_vready_workaround(struct hubp *hubp,
struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest);
void hubp1_init(struct hubp *hubp);
+void hubp1_read_state_common(struct hubp *hubp);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 2118ea21d7e9..1008ac8a0f2a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -25,17 +25,18 @@
#include <linux/delay.h>
#include "dm_services.h"
+#include "basics/dc_common.h"
#include "core_types.h"
#include "resource.h"
#include "custom_float.h"
#include "dcn10_hw_sequencer.h"
-#include "dce110/dce110_hw_sequencer.h"
+#include "dcn10_hw_sequencer_debug.h"
#include "dce/dce_hwseq.h"
#include "abm.h"
#include "dmcu.h"
#include "dcn10_optc.h"
-#include "dcn10/dcn10_dpp.h"
-#include "dcn10/dcn10_mpc.h"
+#include "dcn10_dpp.h"
+#include "dcn10_mpc.h"
#include "timing_generator.h"
#include "opp.h"
#include "ipp.h"
@@ -49,9 +50,7 @@
#include "clk_mgr.h"
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
#include "dsc.h"
-#endif
#define DC_LOGGER_INIT(logger)
@@ -68,6 +67,8 @@
#define DTN_INFO_MICRO_SEC(ref_cycle) \
print_microsec(dc_ctx, log_ctx, ref_cycle)
+#define GAMMA_HW_POINTS_NUM 256
+
void print_microsec(struct dc_context *dc_ctx,
struct dc_log_buffer_ctx *log_ctx,
uint32_t ref_cycle)
@@ -81,6 +82,33 @@ void print_microsec(struct dc_context *dc_ctx,
us_x10 % frac);
}
+static void dcn10_lock_all_pipes(struct dc *dc,
+ struct dc_state *context,
+ bool lock)
+{
+ struct pipe_ctx *pipe_ctx;
+ struct timing_generator *tg;
+ int i;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ tg = pipe_ctx->stream_res.tg;
+ /*
+ * Only lock the top pipe's tg to prevent redundant
+ * (un)locking. Also skip if pipe is disabled.
+ */
+ if (pipe_ctx->top_pipe ||
+ !pipe_ctx->stream || !pipe_ctx->plane_state ||
+ !tg->funcs->is_tg_enabled(tg))
+ continue;
+
+ if (lock)
+ tg->funcs->lock(tg);
+ else
+ tg->funcs->unlock(tg);
+ }
+}
+
static void log_mpc_crc(struct dc *dc,
struct dc_log_buffer_ctx *log_ctx)
{
@@ -129,9 +157,8 @@ static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
struct resource_pool *pool = dc->res_pool;
int i;
- DTN_INFO("HUBP: format addr_hi width height"
- " rot mir sw_mode dcc_en blank_en ttu_dis underflow"
- " min_ttu_vblank qos_low_wm qos_high_wm\n");
+ DTN_INFO(
+ "HUBP: format addr_hi width height rot mir sw_mode dcc_en blank_en clock_en ttu_dis underflow min_ttu_vblank qos_low_wm qos_high_wm\n");
for (i = 0; i < pool->pipe_count; i++) {
struct hubp *hubp = pool->hubps[i];
struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
@@ -139,8 +166,7 @@ static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
hubp->funcs->hubp_read_state(hubp);
if (!s->blank_en) {
- DTN_INFO("[%2d]: %5xh %6xh %5d %6d %2xh %2xh %6xh"
- " %6d %8d %7d %8xh",
+ DTN_INFO("[%2d]: %5xh %6xh %5d %6d %2xh %2xh %6xh %6d %8d %8d %7d %8xh",
hubp->inst,
s->pixel_format,
s->inuse_addr_hi,
@@ -151,6 +177,7 @@ static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
s->sw_mode,
s->dcc_en,
s->blank_en,
+ s->clock_en,
s->ttu_disable,
s->underflow_status);
DTN_INFO_MICRO_SEC(s->min_ttu_vblank);
@@ -308,21 +335,31 @@ void dcn10_log_hw_state(struct dc *dc,
}
DTN_INFO("\n");
- DTN_INFO("OTG: v_bs v_be v_ss v_se vpol vmax vmin vmax_sel vmin_sel"
- " h_bs h_be h_ss h_se hpol htot vtot underflow\n");
+ DTN_INFO("OTG: v_bs v_be v_ss v_se vpol vmax vmin vmax_sel vmin_sel h_bs h_be h_ss h_se hpol htot vtot underflow blank_en\n");
for (i = 0; i < pool->timing_generator_count; i++) {
struct timing_generator *tg = pool->timing_generators[i];
struct dcn_otg_state s = {0};
-
+ /* Read shared OTG state registers for all DCNx */
optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
+ /*
+ * For DCN2 and greater, a register on the OPP is used to
+ * determine if the CRTC is blanked instead of the OTG. So use
+ * dpg_is_blanked() if exists, otherwise fallback on otg.
+ *
+ * TODO: Implement DCN-specific read_otg_state hooks.
+ */
+ if (pool->opps[i]->funcs->dpg_is_blanked)
+ s.blank_enabled = pool->opps[i]->funcs->dpg_is_blanked(pool->opps[i]);
+ else
+ s.blank_enabled = tg->funcs->is_blanked(tg);
+
//only print if OTG master is enabled
if ((s.otg_enabled & 1) == 0)
continue;
- DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d"
- " %5d %5d %5d %5d %9d\n",
+ DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d %5d %5d %5d %5d %9d %8d\n",
tg->inst,
s.v_blank_start,
s.v_blank_end,
@@ -340,7 +377,8 @@ void dcn10_log_hw_state(struct dc *dc,
s.h_sync_a_pol,
s.h_total,
s.v_total,
- s.underflow_occurred_status);
+ s.underflow_occurred_status,
+ s.blank_enabled);
// Clear underflow for debug purposes
// We want to keep underflow sticky bit on for the longevity tests outside of test environment.
@@ -350,7 +388,6 @@ void dcn10_log_hw_state(struct dc *dc,
}
DTN_INFO("\n");
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
DTN_INFO("DSC: CLOCK_EN SLICE_WIDTH Bytes_pp\n");
for (i = 0; i < pool->res_cap->num_dsc; i++) {
struct display_stream_compressor *dsc = pool->dscs[i];
@@ -387,7 +424,7 @@ void dcn10_log_hw_state(struct dc *dc,
}
DTN_INFO("\n");
- DTN_INFO("L_ENC: DPHY_FEC_EN DPHY_FEC_READY_SHADOW DPHY_FEC_ACTIVE_STATUS\n");
+ DTN_INFO("L_ENC: DPHY_FEC_EN DPHY_FEC_READY_SHADOW DPHY_FEC_ACTIVE_STATUS DP_LINK_TRAINING_COMPLETE\n");
for (i = 0; i < dc->link_count; i++) {
struct link_encoder *lenc = dc->links[i]->link_enc;
@@ -395,16 +432,16 @@ void dcn10_log_hw_state(struct dc *dc,
if (lenc->funcs->read_state) {
lenc->funcs->read_state(lenc, &s);
- DTN_INFO("[%-3d]: %-12d %-22d %-22d\n",
+ DTN_INFO("[%-3d]: %-12d %-22d %-22d %-25d\n",
i,
s.dphy_fec_en,
s.dphy_fec_ready_shadow,
- s.dphy_fec_active_status);
+ s.dphy_fec_active_status,
+ s.dp_link_training_complete);
DTN_INFO("\n");
}
}
DTN_INFO("\n");
-#endif
DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d dcfclk_deep_sleep_khz:%d dispclk_khz:%d\n"
"dppclk_khz:%d max_supported_dppclk_khz:%d fclk_khz:%d socclk_khz:%d\n\n",
@@ -438,14 +475,14 @@ bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx)
return false;
}
-static void enable_power_gating_plane(
+void dcn10_enable_power_gating_plane(
struct dce_hwseq *hws,
bool enable)
{
- bool force_on = 1; /* disable power gating */
+ bool force_on = true; /* disable power gating */
if (enable)
- force_on = 0;
+ force_on = false;
/* DCHUBP0/1/2/3 */
REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on);
@@ -460,7 +497,7 @@ static void enable_power_gating_plane(
REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on);
}
-static void disable_vga(
+void dcn10_disable_vga(
struct dce_hwseq *hws)
{
unsigned int in_vga1_mode = 0;
@@ -493,7 +530,7 @@ static void disable_vga(
REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1);
}
-static void dpp_pg_control(
+void dcn10_dpp_pg_control(
struct dce_hwseq *hws,
unsigned int dpp_inst,
bool power_on)
@@ -545,7 +582,7 @@ static void dpp_pg_control(
}
}
-static void hubp_pg_control(
+void dcn10_hubp_pg_control(
struct dce_hwseq *hws,
unsigned int hubp_inst,
bool power_on)
@@ -605,8 +642,8 @@ static void power_on_plane(
if (REG(DC_IP_REQUEST_CNTL)) {
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 1);
- dpp_pg_control(hws, plane_id, true);
- hubp_pg_control(hws, plane_id, true);
+ hws->funcs.dpp_pg_control(hws, plane_id, true);
+ hws->funcs.hubp_pg_control(hws, plane_id, true);
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 0);
DC_LOG_DEBUG(
@@ -627,7 +664,7 @@ static void undo_DEGVIDCN10_253_wa(struct dc *dc)
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 1);
- hubp_pg_control(hws, 0, false);
+ hws->funcs.hubp_pg_control(hws, 0, false);
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 0);
@@ -656,7 +693,7 @@ static void apply_DEGVIDCN10_253_wa(struct dc *dc)
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 1);
- hubp_pg_control(hws, 0, true);
+ hws->funcs.hubp_pg_control(hws, 0, true);
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 0);
@@ -664,10 +701,27 @@ static void apply_DEGVIDCN10_253_wa(struct dc *dc)
hws->wa_state.DEGVIDCN10_253_applied = true;
}
-static void bios_golden_init(struct dc *dc)
+void dcn10_bios_golden_init(struct dc *dc)
{
+ struct dce_hwseq *hws = dc->hwseq;
struct dc_bios *bp = dc->ctx->dc_bios;
int i;
+ bool allow_self_fresh_force_enable = true;
+
+ if (hws->funcs.s0i3_golden_init_wa && hws->funcs.s0i3_golden_init_wa(dc))
+ return;
+
+ if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled)
+ allow_self_fresh_force_enable =
+ dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub);
+
+
+ /* WA for making DF sleep when idle after resume from S0i3.
+ * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE is set to 1 by
+ * command table, if DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0
+ * before calling command table and it changed to 1 after,
+ * it should be set back to 0.
+ */
/* initialize dcn global */
bp->funcs->enable_disp_power_gating(bp,
@@ -678,6 +732,12 @@ static void bios_golden_init(struct dc *dc)
bp->funcs->enable_disp_power_gating(bp,
CONTROLLER_ID_D0 + i, ASIC_PIPE_DISABLE);
}
+
+ if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
+ if (allow_self_fresh_force_enable == false &&
+ dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub))
+ dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub, true);
+
}
static void false_optc_underflow_wa(
@@ -702,13 +762,14 @@ static void false_optc_underflow_wa(
dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, old_pipe_ctx);
}
- tg->funcs->set_blank_data_double_buffer(tg, true);
+ if (tg->funcs->set_blank_data_double_buffer)
+ tg->funcs->set_blank_data_double_buffer(tg, true);
if (tg->funcs->is_optc_underflow_occurred(tg) && !underflow)
tg->funcs->clear_optc_underflow(tg);
}
-static enum dc_status dcn10_enable_stream_timing(
+enum dc_status dcn10_enable_stream_timing(
struct pipe_ctx *pipe_ctx,
struct dc_state *context,
struct dc *dc)
@@ -799,6 +860,7 @@ static void dcn10_reset_back_end_for_pipe(
struct dc_state *context)
{
int i;
+ struct dc_link *link;
DC_LOGGER_INIT(dc->ctx->logger);
if (pipe_ctx->stream_res.stream_enc == NULL) {
pipe_ctx->stream = NULL;
@@ -806,13 +868,31 @@ static void dcn10_reset_back_end_for_pipe(
}
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
- /* DPMS may already disable */
- if (!pipe_ctx->stream->dpms_off)
- core_link_disable_stream(pipe_ctx, FREE_ACQUIRED_RESOURCE);
- else if (pipe_ctx->stream_res.audio) {
- dc->hwss.disable_audio_stream(pipe_ctx, FREE_ACQUIRED_RESOURCE);
+ link = pipe_ctx->stream->link;
+ /* DPMS may already disable or */
+ /* dpms_off status is incorrect due to fastboot
+ * feature. When system resume from S4 with second
+ * screen only, the dpms_off would be true but
+ * VBIOS lit up eDP, so check link status too.
+ */
+ if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
+ core_link_disable_stream(pipe_ctx);
+ else if (pipe_ctx->stream_res.audio)
+ dc->hwss.disable_audio_stream(pipe_ctx);
+
+ if (pipe_ctx->stream_res.audio) {
+ /*disable az_endpoint*/
+ pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
+
+ /*free audio*/
+ if (dc->caps.dynamic_audio == true) {
+ /*we have to dynamic arbitrate the audio endpoints*/
+ /*we free the resource, need reset is_audio_acquired*/
+ update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
+ pipe_ctx->stream_res.audio, false);
+ pipe_ctx->stream_res.audio = NULL;
+ }
}
-
}
/* by upper caller loop, parent pipe: pipe0, will be reset last.
@@ -823,6 +903,9 @@ static void dcn10_reset_back_end_for_pipe(
pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
+ if (pipe_ctx->stream_res.tg->funcs->set_drr)
+ pipe_ctx->stream_res.tg->funcs->set_drr(
+ pipe_ctx->stream_res.tg, NULL);
}
for (i = 0; i < dc->res_pool->pipe_count; i++)
@@ -939,8 +1022,9 @@ void dcn10_verify_allow_pstate_change_high(struct dc *dc)
}
/* trigger HW to start disconnect plane from stream on the next vsync */
-void hwss1_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
+void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
+ struct dce_hwseq *hws = dc->hwseq;
struct hubp *hubp = pipe_ctx->plane_res.hubp;
int dpp_id = pipe_ctx->plane_res.dpp->inst;
struct mpc *mpc = dc->res_pool->mpc;
@@ -965,10 +1049,10 @@ void hwss1_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
hubp->funcs->hubp_disconnect(hubp);
if (dc->debug.sanity_checks)
- dcn10_verify_allow_pstate_change_high(dc);
+ hws->funcs.verify_allow_pstate_change_high(dc);
}
-static void plane_atomic_power_down(struct dc *dc,
+void dcn10_plane_atomic_power_down(struct dc *dc,
struct dpp *dpp,
struct hubp *hubp)
{
@@ -978,8 +1062,8 @@ static void plane_atomic_power_down(struct dc *dc,
if (REG(DC_IP_REQUEST_CNTL)) {
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 1);
- dpp_pg_control(hws, dpp->inst, false);
- hubp_pg_control(hws, hubp->inst, false);
+ hws->funcs.dpp_pg_control(hws, dpp->inst, false);
+ hws->funcs.hubp_pg_control(hws, hubp->inst, false);
dpp->funcs->dpp_reset(dpp);
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 0);
@@ -991,8 +1075,9 @@ static void plane_atomic_power_down(struct dc *dc,
/* disable HW used by plane.
* note: cannot disable until disconnect is complete
*/
-static void plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
+void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
+ struct dce_hwseq *hws = dc->hwseq;
struct hubp *hubp = pipe_ctx->plane_res.hubp;
struct dpp *dpp = pipe_ctx->plane_res.dpp;
int opp_id = hubp->opp_id;
@@ -1011,7 +1096,7 @@ static void plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
hubp->power_gated = true;
dc->optimized_required = false; /* We're powering off, no need to optimize */
- plane_atomic_power_down(dc,
+ hws->funcs.plane_atomic_power_down(dc,
pipe_ctx->plane_res.dpp,
pipe_ctx->plane_res.hubp);
@@ -1023,14 +1108,15 @@ static void plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_state = NULL;
}
-static void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
+void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
+ struct dce_hwseq *hws = dc->hwseq;
DC_LOGGER_INIT(dc->ctx->logger);
if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
return;
- plane_atomic_disable(dc, pipe_ctx);
+ hws->funcs.plane_atomic_disable(dc, pipe_ctx);
apply_DEGVIDCN10_253_wa(dc);
@@ -1038,9 +1124,10 @@ static void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
pipe_ctx->pipe_idx);
}
-static void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
+void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
{
int i;
+ struct dce_hwseq *hws = dc->hwseq;
bool can_apply_seamless_boot = false;
for (i = 0; i < context->stream_count; i++) {
@@ -1065,15 +1152,28 @@ static void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
* command table.
*/
if (tg->funcs->is_tg_enabled(tg)) {
- tg->funcs->lock(tg);
- tg->funcs->set_blank(tg, true);
- hwss_wait_for_blank_complete(tg);
+ if (hws->funcs.init_blank != NULL) {
+ hws->funcs.init_blank(dc, tg);
+ tg->funcs->lock(tg);
+ } else {
+ tg->funcs->lock(tg);
+ tg->funcs->set_blank(tg, true);
+ hwss_wait_for_blank_complete(tg);
+ }
}
}
- /* Cannot reset the MPC mux if seamless boot */
- if (!can_apply_seamless_boot)
- dc->res_pool->mpc->funcs->mpc_init(dc->res_pool->mpc);
+ /* num_opp will be equal to number of mpcc */
+ for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ /* Cannot reset the MPC mux if seamless boot */
+ if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
+ continue;
+
+ dc->res_pool->mpc->funcs->mpc_init_single_inst(
+ dc->res_pool->mpc, i);
+ }
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct timing_generator *tg = dc->res_pool->timing_generators[i];
@@ -1088,8 +1188,14 @@ static void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
if (can_apply_seamless_boot &&
pipe_ctx->stream != NULL &&
pipe_ctx->stream_res.tg->funcs->is_tg_enabled(
- pipe_ctx->stream_res.tg))
+ pipe_ctx->stream_res.tg)) {
+ // Enable double buffering for OTG_BLANK no matter if
+ // seamless boot is enabled or not to suppress global sync
+ // signals when OTG blanked. This is to prevent pipe from
+ // requesting data while in PSR.
+ tg->funcs->tg_init(tg);
continue;
+ }
/* Disable on the current state so the new one isn't cleared. */
pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
@@ -1111,12 +1217,12 @@ static void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
- hwss1_plane_atomic_disconnect(dc, pipe_ctx);
+ hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);
if (tg->funcs->is_tg_enabled(tg))
tg->funcs->unlock(tg);
- dcn10_disable_plane(dc, pipe_ctx);
+ dc->hwss.disable_plane(dc, pipe_ctx);
pipe_ctx->stream_res.tg = NULL;
pipe_ctx->plane_res.hubp = NULL;
@@ -1125,15 +1231,24 @@ static void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
}
}
-static void dcn10_init_hw(struct dc *dc)
+void dcn10_init_hw(struct dc *dc)
{
int i;
struct abm *abm = dc->res_pool->abm;
struct dmcu *dmcu = dc->res_pool->dmcu;
struct dce_hwseq *hws = dc->hwseq;
struct dc_bios *dcb = dc->ctx->dc_bios;
+ struct resource_pool *res_pool = dc->res_pool;
+
+ if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
+ dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
+
+ // Initialize the dccg
+ if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->dccg_init)
+ dc->res_pool->dccg->funcs->dccg_init(res_pool->dccg);
if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+
REG_WRITE(REFCLK_CNTL, 0);
REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
REG_WRITE(DIO_MEM_PWR_CTRL, 0);
@@ -1147,31 +1262,40 @@ static void dcn10_init_hw(struct dc *dc)
REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
}
- enable_power_gating_plane(dc->hwseq, true);
+ //Enable ability to power gate / don't force power on permanently
+ hws->funcs.enable_power_gating_plane(hws, true);
- /* end of FPGA. Below if real ASIC */
return;
}
- if (!dcb->funcs->is_accelerated_mode(dcb)) {
- bool allow_self_fresh_force_enable =
- hububu1_is_allow_self_refresh_enabled(
- dc->res_pool->hubbub);
+ if (!dcb->funcs->is_accelerated_mode(dcb))
+ hws->funcs.disable_vga(dc->hwseq);
- bios_golden_init(dc);
+ hws->funcs.bios_golden_init(dc);
+ if (dc->ctx->dc_bios->fw_info_valid) {
+ res_pool->ref_clocks.xtalin_clock_inKhz =
+ dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
- /* WA for making DF sleep when idle after resume from S0i3.
- * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE is set to 1 by
- * command table, if DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0
- * before calling command table and it changed to 1 after,
- * it should be set back to 0.
- */
- if (allow_self_fresh_force_enable == false &&
- hububu1_is_allow_self_refresh_enabled(dc->res_pool->hubbub))
- hubbub1_allow_self_refresh_control(dc->res_pool->hubbub, true);
+ if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+ if (res_pool->dccg && res_pool->hubbub) {
- disable_vga(dc->hwseq);
- }
+ (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
+ dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
+ &res_pool->ref_clocks.dccg_ref_clock_inKhz);
+
+ (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
+ res_pool->ref_clocks.dccg_ref_clock_inKhz,
+ &res_pool->ref_clocks.dchub_ref_clock_inKhz);
+ } else {
+ // Not all ASICs have DCCG sw component
+ res_pool->ref_clocks.dccg_ref_clock_inKhz =
+ res_pool->ref_clocks.xtalin_clock_inKhz;
+ res_pool->ref_clocks.dchub_ref_clock_inKhz =
+ res_pool->ref_clocks.xtalin_clock_inKhz;
+ }
+ }
+ } else
+ ASSERT_CRITICAL(false);
for (i = 0; i < dc->link_count; i++) {
/* Power up AND update implementation according to the
@@ -1188,6 +1312,11 @@ static void dcn10_init_hw(struct dc *dc)
link->link_status.link_active = true;
}
+ /* Power gate DSCs */
+ for (i = 0; i < res_pool->res_cap->num_dsc; i++)
+ if (hws->funcs.dsc_pg_control != NULL)
+ hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false);
+
/* If taking control over from VBIOS, we may want to optimize our first
* mode set, so we need to skip powering down pipes until we know which
* pipes we want to use.
@@ -1195,11 +1324,11 @@ static void dcn10_init_hw(struct dc *dc)
* everything down.
*/
if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) {
- dc->hwss.init_pipes(dc, dc->current_state);
+ hws->funcs.init_pipes(dc, dc->current_state);
}
- for (i = 0; i < dc->res_pool->audio_count; i++) {
- struct audio *audio = dc->res_pool->audios[i];
+ for (i = 0; i < res_pool->audio_count; i++) {
+ struct audio *audio = res_pool->audios[i];
audio->funcs->hw_init(audio);
}
@@ -1209,7 +1338,7 @@ static void dcn10_init_hw(struct dc *dc)
abm->funcs->abm_init(abm);
}
- if (dmcu != NULL)
+ if (dmcu != NULL && !dmcu->auto_load_dmcu)
dmcu->funcs->dmcu_init(dmcu);
if (abm != NULL && dmcu != NULL)
@@ -1227,16 +1356,19 @@ static void dcn10_init_hw(struct dc *dc)
REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
}
- enable_power_gating_plane(dc->hwseq, true);
+ hws->funcs.enable_power_gating_plane(dc->hwseq, true);
+
+ if (dc->clk_mgr->funcs->notify_wm_ranges)
+ dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
- memset(&dc->clk_mgr->clks, 0, sizeof(dc->clk_mgr->clks));
}
-static void dcn10_reset_hw_ctx_wrap(
+void dcn10_reset_hw_ctx_wrap(
struct dc *dc,
struct dc_state *context)
{
int i;
+ struct dce_hwseq *hws = dc->hwseq;
/* Reset Back End*/
for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
@@ -1255,8 +1387,8 @@ static void dcn10_reset_hw_ctx_wrap(
struct clock_source *old_clk = pipe_ctx_old->clock_source;
dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
- if (dc->hwss.enable_stream_gating)
- dc->hwss.enable_stream_gating(dc, pipe_ctx);
+ if (hws->funcs.enable_stream_gating)
+ hws->funcs.enable_stream_gating(dc, pipe_ctx);
if (old_clk)
old_clk->funcs->cs_power_down(old_clk);
}
@@ -1289,9 +1421,7 @@ static bool patch_address_for_sbs_tb_stereo(
return false;
}
-
-
-static void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
+void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
{
bool addr_patched = false;
PHYSICAL_ADDRESS_LOC addr;
@@ -1316,8 +1446,8 @@ static void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_c
pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;
}
-static bool dcn10_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
- const struct dc_plane_state *plane_state)
+bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ const struct dc_plane_state *plane_state)
{
struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
const struct dc_transfer_func *tf = NULL;
@@ -1349,6 +1479,11 @@ static bool dcn10_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
break;
case TRANSFER_FUNCTION_PQ:
+ dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_USER_PWL);
+ cm_helper_translate_curve_to_degamma_hw_format(tf, &dpp_base->degamma_params);
+ dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, &dpp_base->degamma_params);
+ result = true;
+ break;
default:
result = false;
break;
@@ -1366,9 +1501,36 @@ static bool dcn10_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
return result;
}
-static bool
-dcn10_set_output_transfer_func(struct pipe_ctx *pipe_ctx,
- const struct dc_stream_state *stream)
+#define MAX_NUM_HW_POINTS 0x200
+
+static void log_tf(struct dc_context *ctx,
+ struct dc_transfer_func *tf, uint32_t hw_points_num)
+{
+ // DC_LOG_GAMMA is default logging of all hw points
+ // DC_LOG_ALL_GAMMA logs all points, not only hw points
+ // DC_LOG_ALL_TF_POINTS logs all channels of the tf
+ int i = 0;
+
+ DC_LOGGER_INIT(ctx->logger);
+ DC_LOG_GAMMA("Gamma Correction TF");
+ DC_LOG_ALL_GAMMA("Logging all tf points...");
+ DC_LOG_ALL_TF_CHANNELS("Logging all channels...");
+
+ for (i = 0; i < hw_points_num; i++) {
+ DC_LOG_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
+ DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
+ DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
+ }
+
+ for (i = hw_points_num; i < MAX_NUM_HW_POINTS; i++) {
+ DC_LOG_ALL_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
+ DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
+ DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
+ }
+}
+
+bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ const struct dc_stream_state *stream)
{
struct dpp *dpp = pipe_ctx->plane_res.dpp;
@@ -1394,14 +1556,23 @@ dcn10_set_output_transfer_func(struct pipe_ctx *pipe_ctx,
} else
dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS);
+ if (stream != NULL && stream->ctx != NULL &&
+ stream->out_transfer_func != NULL) {
+ log_tf(stream->ctx,
+ stream->out_transfer_func,
+ dpp->regamma_params.hw_points_num);
+ }
+
return true;
}
-static void dcn10_pipe_control_lock(
+void dcn10_pipe_control_lock(
struct dc *dc,
struct pipe_ctx *pipe,
bool lock)
{
+ struct dce_hwseq *hws = dc->hwseq;
+
/* use TG master update lock to lock everything on the TG
* therefore only top pipe need to lock
*/
@@ -1409,7 +1580,7 @@ static void dcn10_pipe_control_lock(
return;
if (dc->debug.sanity_checks)
- dcn10_verify_allow_pstate_change_high(dc);
+ hws->funcs.verify_allow_pstate_change_high(dc);
if (lock)
pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
@@ -1417,7 +1588,7 @@ static void dcn10_pipe_control_lock(
pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
if (dc->debug.sanity_checks)
- dcn10_verify_allow_pstate_change_high(dc);
+ hws->funcs.verify_allow_pstate_change_high(dc);
}
static bool wait_for_reset_trigger_to_occur(
@@ -1457,7 +1628,7 @@ static bool wait_for_reset_trigger_to_occur(
return rc;
}
-static void dcn10_enable_timing_synchronization(
+void dcn10_enable_timing_synchronization(
struct dc *dc,
int group_index,
int group_size,
@@ -1487,7 +1658,7 @@ static void dcn10_enable_timing_synchronization(
DC_SYNC_INFO("Sync complete\n");
}
-static void dcn10_enable_per_frame_crtc_position_reset(
+void dcn10_enable_per_frame_crtc_position_reset(
struct dc *dc,
int group_size,
struct pipe_ctx *grouped_pipes[])
@@ -1512,10 +1683,10 @@ static void dcn10_enable_per_frame_crtc_position_reset(
}
/*static void print_rq_dlg_ttu(
- struct dc *core_dc,
+ struct dc *dc,
struct pipe_ctx *pipe_ctx)
{
- DC_LOG_BANDWIDTH_CALCS(core_dc->ctx->logger,
+ DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
"\n============== DML TTU Output parameters [%d] ==============\n"
"qos_level_low_wm: %d, \n"
"qos_level_high_wm: %d, \n"
@@ -1545,7 +1716,7 @@ static void dcn10_enable_per_frame_crtc_position_reset(
pipe_ctx->ttu_regs.refcyc_per_req_delivery_pre_c
);
- DC_LOG_BANDWIDTH_CALCS(core_dc->ctx->logger,
+ DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
"\n============== DML DLG Output parameters [%d] ==============\n"
"refcyc_h_blank_end: %d, \n"
"dlg_vblank_end: %d, \n"
@@ -1580,7 +1751,7 @@ static void dcn10_enable_per_frame_crtc_position_reset(
pipe_ctx->dlg_regs.refcyc_per_pte_group_nom_l
);
- DC_LOG_BANDWIDTH_CALCS(core_dc->ctx->logger,
+ DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
"\ndst_y_per_meta_row_nom_l: %d, \n"
"refcyc_per_meta_chunk_nom_l: %d, \n"
"refcyc_per_line_delivery_pre_l: %d, \n"
@@ -1610,7 +1781,7 @@ static void dcn10_enable_per_frame_crtc_position_reset(
pipe_ctx->dlg_regs.refcyc_per_line_delivery_c
);
- DC_LOG_BANDWIDTH_CALCS(core_dc->ctx->logger,
+ DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
"\n============== DML RQ Output parameters [%d] ==============\n"
"chunk_size: %d \n"
"min_chunk_size: %d \n"
@@ -1725,7 +1896,7 @@ static void dcn10_enable_plane(
struct dce_hwseq *hws = dc->hwseq;
if (dc->debug.sanity_checks) {
- dcn10_verify_allow_pstate_change_high(dc);
+ hws->funcs.verify_allow_pstate_change_high(dc);
}
undo_DEGVIDCN10_253_wa(dc);
@@ -1782,11 +1953,11 @@ static void dcn10_enable_plane(
dcn10_program_pte_vm(hws, pipe_ctx->plane_res.hubp);
if (dc->debug.sanity_checks) {
- dcn10_verify_allow_pstate_change_high(dc);
+ hws->funcs.verify_allow_pstate_change_high(dc);
}
}
-static void program_gamut_remap(struct pipe_ctx *pipe_ctx)
+void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx)
{
int i = 0;
struct dpp_grph_csc_adjustment adjust;
@@ -1804,69 +1975,65 @@ static void program_gamut_remap(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust);
}
-static void dcn10_program_output_csc(struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- enum dc_color_space colorspace,
- uint16_t *matrix,
- int opp_id)
-{
- if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
- if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL)
- pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
- } else {
- if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default != NULL)
- pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
- }
-}
-bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
+static bool dcn10_is_rear_mpo_fix_required(struct pipe_ctx *pipe_ctx, enum dc_color_space colorspace)
{
- if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible)
- return true;
- if (pipe_ctx->bottom_pipe && is_lower_pipe_tree_visible(pipe_ctx->bottom_pipe))
- return true;
- return false;
-}
+ if (pipe_ctx->plane_state && pipe_ctx->plane_state->layer_index > 0 && is_rgb_cspace(colorspace)) {
+ if (pipe_ctx->top_pipe) {
+ struct pipe_ctx *top = pipe_ctx->top_pipe;
-bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
-{
- if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible)
- return true;
- if (pipe_ctx->top_pipe && is_upper_pipe_tree_visible(pipe_ctx->top_pipe))
- return true;
+ while (top->top_pipe)
+ top = top->top_pipe; // Traverse to top pipe_ctx
+ if (top->plane_state && top->plane_state->layer_index == 0)
+ return true; // Front MPO plane not hidden
+ }
+ }
return false;
}
-bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
+static void dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx *pipe_ctx, uint16_t *matrix)
{
- if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible)
- return true;
- if (pipe_ctx->top_pipe && is_upper_pipe_tree_visible(pipe_ctx->top_pipe))
- return true;
- if (pipe_ctx->bottom_pipe && is_lower_pipe_tree_visible(pipe_ctx->bottom_pipe))
- return true;
- return false;
+ // Override rear plane RGB bias to fix MPO brightness
+ uint16_t rgb_bias = matrix[3];
+
+ matrix[3] = 0;
+ matrix[7] = 0;
+ matrix[11] = 0;
+ pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
+ matrix[3] = rgb_bias;
+ matrix[7] = rgb_bias;
+ matrix[11] = rgb_bias;
}
-bool is_rgb_cspace(enum dc_color_space output_color_space)
+void dcn10_program_output_csc(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ enum dc_color_space colorspace,
+ uint16_t *matrix,
+ int opp_id)
{
- switch (output_color_space) {
- case COLOR_SPACE_SRGB:
- case COLOR_SPACE_SRGB_LIMITED:
- case COLOR_SPACE_2020_RGB_FULLRANGE:
- case COLOR_SPACE_2020_RGB_LIMITEDRANGE:
- case COLOR_SPACE_ADOBERGB:
- return true;
- case COLOR_SPACE_YCBCR601:
- case COLOR_SPACE_YCBCR709:
- case COLOR_SPACE_YCBCR601_LIMITED:
- case COLOR_SPACE_YCBCR709_LIMITED:
- case COLOR_SPACE_2020_YCBCR:
- return false;
- default:
- /* Add a case to switch */
- BREAK_TO_DEBUGGER();
- return false;
+ if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
+ if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL) {
+
+ /* MPO is broken with RGB colorspaces when OCSC matrix
+ * brightness offset >= 0 on DCN1 due to OCSC before MPC
+ * Blending adds offsets from front + rear to rear plane
+ *
+ * Fix is to set RGB bias to 0 on rear plane, top plane
+ * black value pixels add offset instead of rear + front
+ */
+
+ int16_t rgb_bias = matrix[3];
+ // matrix[3/7/11] are all the same offset value
+
+ if (rgb_bias > 0 && dcn10_is_rear_mpo_fix_required(pipe_ctx, colorspace)) {
+ dcn10_set_csc_adjustment_rgb_mpo_fix(pipe_ctx, matrix);
+ } else {
+ pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
+ }
+ }
+ } else {
+ if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default != NULL)
+ pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
}
}
@@ -1943,70 +2110,7 @@ void dcn10_get_hdr_visual_confirm_color(
}
}
-static uint16_t fixed_point_to_int_frac(
- struct fixed31_32 arg,
- uint8_t integer_bits,
- uint8_t fractional_bits)
-{
- int32_t numerator;
- int32_t divisor = 1 << fractional_bits;
-
- uint16_t result;
-
- uint16_t d = (uint16_t)dc_fixpt_floor(
- dc_fixpt_abs(
- arg));
-
- if (d <= (uint16_t)(1 << integer_bits) - (1 / (uint16_t)divisor))
- numerator = (uint16_t)dc_fixpt_floor(
- dc_fixpt_mul_int(
- arg,
- divisor));
- else {
- numerator = dc_fixpt_floor(
- dc_fixpt_sub(
- dc_fixpt_from_int(
- 1LL << integer_bits),
- dc_fixpt_recip(
- dc_fixpt_from_int(
- divisor))));
- }
-
- if (numerator >= 0)
- result = (uint16_t)numerator;
- else
- result = (uint16_t)(
- (1 << (integer_bits + fractional_bits + 1)) + numerator);
-
- if ((result != 0) && dc_fixpt_lt(
- arg, dc_fixpt_zero))
- result |= 1 << (integer_bits + fractional_bits);
-
- return result;
-}
-
-void dcn10_build_prescale_params(struct dc_bias_and_scale *bias_and_scale,
- const struct dc_plane_state *plane_state)
-{
- if (plane_state->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
- && plane_state->format != SURFACE_PIXEL_FORMAT_INVALID
- && plane_state->input_csc_color_matrix.enable_adjustment
- && plane_state->coeff_reduction_factor.value != 0) {
- bias_and_scale->scale_blue = fixed_point_to_int_frac(
- dc_fixpt_mul(plane_state->coeff_reduction_factor,
- dc_fixpt_from_fraction(256, 255)),
- 2,
- 13);
- bias_and_scale->scale_red = bias_and_scale->scale_blue;
- bias_and_scale->scale_green = bias_and_scale->scale_blue;
- } else {
- bias_and_scale->scale_blue = 0x2000;
- bias_and_scale->scale_red = 0x2000;
- bias_and_scale->scale_green = 0x2000;
- }
-}
-
-static void update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
+static void dcn10_update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
{
struct dc_bias_and_scale bns_params = {0};
@@ -2015,21 +2119,18 @@ static void update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
plane_state->format,
EXPANSION_MODE_ZERO,
plane_state->input_csc_color_matrix,
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
plane_state->color_space,
NULL);
-#else
- plane_state->color_space);
-#endif
//set scale and bias registers
- dcn10_build_prescale_params(&bns_params, plane_state);
+ build_prescale_params(&bns_params, plane_state);
if (dpp->funcs->dpp_program_bias_and_scale)
dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
}
-static void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
+void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
+ struct dce_hwseq *hws = dc->hwseq;
struct hubp *hubp = pipe_ctx->plane_res.hubp;
struct mpcc_blnd_cfg blnd_cfg = {{0}};
bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
@@ -2039,10 +2140,10 @@ static void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) {
- dcn10_get_hdr_visual_confirm_color(
+ hws->funcs.get_hdr_visual_confirm_color(
pipe_ctx, &blnd_cfg.black_color);
} else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) {
- dcn10_get_surface_visual_confirm_color(
+ hws->funcs.get_surface_visual_confirm_color(
pipe_ctx, &blnd_cfg.black_color);
} else {
color_space_to_black_color(
@@ -2124,15 +2225,16 @@ static void update_scaler(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
}
-void update_dchubp_dpp(
+static void dcn10_update_dchubp_dpp(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_state *context)
{
+ struct dce_hwseq *hws = dc->hwseq;
struct hubp *hubp = pipe_ctx->plane_res.hubp;
struct dpp *dpp = pipe_ctx->plane_res.dpp;
struct dc_plane_state *plane_state = pipe_ctx->plane_state;
- union plane_size size = plane_state->plane_size;
+ struct plane_size size = plane_state->plane_size;
unsigned int compat_level = 0;
/* depends on DML calculation, DPP clock value may change dynamically */
@@ -2178,16 +2280,16 @@ void update_dchubp_dpp(
&pipe_ctx->ttu_regs);
}
- size.grph.surface_size = pipe_ctx->plane_res.scl_data.viewport;
+ size.surface_size = pipe_ctx->plane_res.scl_data.viewport;
if (plane_state->update_flags.bits.full_update ||
plane_state->update_flags.bits.bpp_change)
- update_dpp(dpp, plane_state);
+ dcn10_update_dpp(dpp, plane_state);
if (plane_state->update_flags.bits.full_update ||
plane_state->update_flags.bits.per_pixel_alpha_change ||
plane_state->update_flags.bits.global_alpha_change)
- dc->hwss.update_mpcc(dc, pipe_ctx);
+ hws->funcs.update_mpcc(dc, pipe_ctx);
if (plane_state->update_flags.bits.full_update ||
plane_state->update_flags.bits.per_pixel_alpha_change ||
@@ -2216,7 +2318,7 @@ void update_dchubp_dpp(
if (plane_state->update_flags.bits.full_update) {
/*gamut remap*/
- program_gamut_remap(pipe_ctx);
+ dc->hwss.program_gamut_remap(pipe_ctx);
dc->hwss.program_output_csc(dc,
pipe_ctx,
@@ -2247,13 +2349,13 @@ void update_dchubp_dpp(
hubp->power_gated = false;
- dc->hwss.update_plane_addr(dc, pipe_ctx);
+ hws->funcs.update_plane_addr(dc, pipe_ctx);
if (is_pipe_tree_visible(pipe_ctx))
hubp->funcs->set_blank(hubp, false);
}
-static void dcn10_blank_pixel_data(
+void dcn10_blank_pixel_data(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
bool blank)
@@ -2296,10 +2398,9 @@ static void dcn10_blank_pixel_data(
}
}
-void set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
+void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
{
- struct fixed31_32 multiplier = dc_fixpt_from_fraction(
- pipe_ctx->plane_state->sdr_white_level, 80);
+ struct fixed31_32 multiplier = pipe_ctx->plane_state->hdr_mult;
uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
struct custom_float_format fmt;
@@ -2307,7 +2408,8 @@ void set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
fmt.mantissa_bits = 12;
fmt.sign = true;
- if (pipe_ctx->plane_state->sdr_white_level > 80)
+
+ if (!dc_fixpt_eq(multiplier, dc_fixpt_from_int(0))) // check != 0
convert_to_custom_float_format(multiplier, &fmt, &hw_mult);
pipe_ctx->plane_res.dpp->funcs->dpp_set_hdr_multiplier(
@@ -2319,17 +2421,19 @@ void dcn10_program_pipe(
struct pipe_ctx *pipe_ctx,
struct dc_state *context)
{
+ struct dce_hwseq *hws = dc->hwseq;
+
if (pipe_ctx->plane_state->update_flags.bits.full_update)
dcn10_enable_plane(dc, pipe_ctx, context);
- update_dchubp_dpp(dc, pipe_ctx, context);
+ dcn10_update_dchubp_dpp(dc, pipe_ctx, context);
- set_hdr_multiplier(pipe_ctx);
+ hws->funcs.set_hdr_multiplier(pipe_ctx);
if (pipe_ctx->plane_state->update_flags.bits.full_update ||
pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
pipe_ctx->plane_state->update_flags.bits.gamma_change)
- dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state);
+ hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
/* dcn10_translate_regamma_to_hw_format takes 750us to finish
* only do gamma programming for full update.
@@ -2338,14 +2442,16 @@ void dcn10_program_pipe(
* doing heavy calculation and programming
*/
if (pipe_ctx->plane_state->update_flags.bits.full_update)
- dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream);
+ hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
}
-static void program_all_pipe_in_tree(
+static void dcn10_program_all_pipe_in_tree(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_state *context)
{
+ struct dce_hwseq *hws = dc->hwseq;
+
if (pipe_ctx->top_pipe == NULL) {
bool blank = !is_pipe_tree_visible(pipe_ctx);
@@ -2359,18 +2465,20 @@ static void program_all_pipe_in_tree(
pipe_ctx->stream_res.tg->funcs->set_vtg_params(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
- dc->hwss.blank_pixel_data(dc, pipe_ctx, blank);
+ if (hws->funcs.setup_vupdate_interrupt)
+ hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
+ hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
}
if (pipe_ctx->plane_state != NULL)
- dcn10_program_pipe(dc, pipe_ctx, context);
+ hws->funcs.program_pipe(dc, pipe_ctx, context);
if (pipe_ctx->bottom_pipe != NULL && pipe_ctx->bottom_pipe != pipe_ctx)
- program_all_pipe_in_tree(dc, pipe_ctx->bottom_pipe, context);
+ dcn10_program_all_pipe_in_tree(dc, pipe_ctx->bottom_pipe, context);
}
-struct pipe_ctx *find_top_pipe_for_stream(
+static struct pipe_ctx *dcn10_find_top_pipe_for_stream(
struct dc *dc,
struct dc_state *context,
const struct dc_stream_state *stream)
@@ -2388,25 +2496,26 @@ struct pipe_ctx *find_top_pipe_for_stream(
if (pipe_ctx->stream != stream)
continue;
- if (!pipe_ctx->top_pipe)
+ if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe)
return pipe_ctx;
}
return NULL;
}
-static void dcn10_apply_ctx_for_surface(
+void dcn10_apply_ctx_for_surface(
struct dc *dc,
const struct dc_stream_state *stream,
int num_planes,
struct dc_state *context)
{
+ struct dce_hwseq *hws = dc->hwseq;
int i;
struct timing_generator *tg;
uint32_t underflow_check_delay_us;
bool removed_pipe[4] = { false };
bool interdependent_update = false;
struct pipe_ctx *top_pipe_to_program =
- find_top_pipe_for_stream(dc, context, stream);
+ dcn10_find_top_pipe_for_stream(dc, context, stream);
DC_LOGGER_INIT(dc->ctx->logger);
if (!top_pipe_to_program)
@@ -2419,23 +2528,23 @@ static void dcn10_apply_ctx_for_surface(
underflow_check_delay_us = dc->debug.underflow_assert_delay_us;
- if (underflow_check_delay_us != 0xFFFFFFFF && dc->hwss.did_underflow_occur)
- ASSERT(dc->hwss.did_underflow_occur(dc, top_pipe_to_program));
+ if (underflow_check_delay_us != 0xFFFFFFFF && hws->funcs.did_underflow_occur)
+ ASSERT(hws->funcs.did_underflow_occur(dc, top_pipe_to_program));
if (interdependent_update)
- lock_all_pipes(dc, context, true);
+ dcn10_lock_all_pipes(dc, context, true);
else
dcn10_pipe_control_lock(dc, top_pipe_to_program, true);
if (underflow_check_delay_us != 0xFFFFFFFF)
udelay(underflow_check_delay_us);
- if (underflow_check_delay_us != 0xFFFFFFFF && dc->hwss.did_underflow_occur)
- ASSERT(dc->hwss.did_underflow_occur(dc, top_pipe_to_program));
+ if (underflow_check_delay_us != 0xFFFFFFFF && hws->funcs.did_underflow_occur)
+ ASSERT(hws->funcs.did_underflow_occur(dc, top_pipe_to_program));
if (num_planes == 0) {
/* OTG blank before remove all front end */
- dc->hwss.blank_pixel_data(dc, top_pipe_to_program, true);
+ hws->funcs.blank_pixel_data(dc, top_pipe_to_program, true);
}
/* Disconnect unused mpcc */
@@ -2453,7 +2562,7 @@ static void dcn10_apply_ctx_for_surface(
if (old_pipe_ctx->stream_res.tg == tg &&
old_pipe_ctx->plane_res.hubp &&
old_pipe_ctx->plane_res.hubp->opp_id != OPP_ID_INVALID)
- dcn10_disable_plane(dc, old_pipe_ctx);
+ dc->hwss.disable_plane(dc, old_pipe_ctx);
}
if ((!pipe_ctx->plane_state ||
@@ -2461,7 +2570,7 @@ static void dcn10_apply_ctx_for_surface(
old_pipe_ctx->plane_state &&
old_pipe_ctx->stream_res.tg == tg) {
- dc->hwss.plane_atomic_disconnect(dc, old_pipe_ctx);
+ hws->funcs.plane_atomic_disconnect(dc, old_pipe_ctx);
removed_pipe[i] = true;
DC_LOG_DC("Reset mpcc for pipe %d\n",
@@ -2470,13 +2579,11 @@ static void dcn10_apply_ctx_for_surface(
}
if (num_planes > 0)
- program_all_pipe_in_tree(dc, top_pipe_to_program, context);
+ dcn10_program_all_pipe_in_tree(dc, top_pipe_to_program, context);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
/* Program secondary blending tree and writeback pipes */
- if ((stream->num_wb_info > 0) && (dc->hwss.program_all_writeback_pipes_in_tree))
- dc->hwss.program_all_writeback_pipes_in_tree(dc, stream, context);
-#endif
+ if ((stream->num_wb_info > 0) && (hws->funcs.program_all_writeback_pipes_in_tree))
+ hws->funcs.program_all_writeback_pipes_in_tree(dc, stream, context);
if (interdependent_update)
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
@@ -2492,7 +2599,7 @@ static void dcn10_apply_ctx_for_surface(
}
if (interdependent_update)
- lock_all_pipes(dc, context, false);
+ dcn10_lock_all_pipes(dc, context, false);
else
dcn10_pipe_control_lock(dc, top_pipe_to_program, false);
@@ -2501,7 +2608,7 @@ static void dcn10_apply_ctx_for_surface(
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (removed_pipe[i])
- dcn10_disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
+ dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (removed_pipe[i]) {
@@ -2529,14 +2636,15 @@ static void dcn10_stereo_hw_frame_pack_wa(struct dc *dc, struct dc_state *contex
}
}
-static void dcn10_prepare_bandwidth(
+void dcn10_prepare_bandwidth(
struct dc *dc,
struct dc_state *context)
{
+ struct dce_hwseq *hws = dc->hwseq;
struct hubbub *hubbub = dc->res_pool->hubbub;
if (dc->debug.sanity_checks)
- dcn10_verify_allow_pstate_change_high(dc);
+ hws->funcs.verify_allow_pstate_change_high(dc);
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
if (context->stream_count == 0)
@@ -2558,17 +2666,18 @@ static void dcn10_prepare_bandwidth(
dcn_bw_notify_pplib_of_wm_ranges(dc);
if (dc->debug.sanity_checks)
- dcn10_verify_allow_pstate_change_high(dc);
+ hws->funcs.verify_allow_pstate_change_high(dc);
}
-static void dcn10_optimize_bandwidth(
+void dcn10_optimize_bandwidth(
struct dc *dc,
struct dc_state *context)
{
+ struct dce_hwseq *hws = dc->hwseq;
struct hubbub *hubbub = dc->res_pool->hubbub;
if (dc->debug.sanity_checks)
- dcn10_verify_allow_pstate_change_high(dc);
+ hws->funcs.verify_allow_pstate_change_high(dc);
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
if (context->stream_count == 0)
@@ -2590,19 +2699,24 @@ static void dcn10_optimize_bandwidth(
dcn_bw_notify_pplib_of_wm_ranges(dc);
if (dc->debug.sanity_checks)
- dcn10_verify_allow_pstate_change_high(dc);
+ hws->funcs.verify_allow_pstate_change_high(dc);
}
-static void set_drr(struct pipe_ctx **pipe_ctx,
- int num_pipes, int vmin, int vmax)
+void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
+ int num_pipes, unsigned int vmin, unsigned int vmax,
+ unsigned int vmid, unsigned int vmid_frame_number)
{
int i = 0;
struct drr_params params = {0};
// DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow
unsigned int event_triggers = 0x800;
+ // Note DRR trigger events are generated regardless of whether num frames met.
+ unsigned int num_frames = 2;
params.vertical_total_max = vmax;
params.vertical_total_min = vmin;
+ params.vertical_total_mid = vmid;
+ params.vertical_total_mid_frame_num = vmid_frame_number;
/* TODO: If multiple pipes are to be supported, you need
* some GSL stuff. Static screen triggers may be programmed differently
@@ -2614,11 +2728,11 @@ static void set_drr(struct pipe_ctx **pipe_ctx,
if (vmax != 0 && vmin != 0)
pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
pipe_ctx[i]->stream_res.tg,
- event_triggers);
+ event_triggers, num_frames);
}
}
-static void get_position(struct pipe_ctx **pipe_ctx,
+void dcn10_get_position(struct pipe_ctx **pipe_ctx,
int num_pipes,
struct crtc_position *position)
{
@@ -2630,22 +2744,23 @@ static void get_position(struct pipe_ctx **pipe_ctx,
pipe_ctx[i]->stream_res.tg->funcs->get_position(pipe_ctx[i]->stream_res.tg, position);
}
-static void set_static_screen_control(struct pipe_ctx **pipe_ctx,
- int num_pipes, const struct dc_static_screen_events *events)
+void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx,
+ int num_pipes, const struct dc_static_screen_params *params)
{
unsigned int i;
- unsigned int value = 0;
+ unsigned int triggers = 0;
- if (events->surface_update)
- value |= 0x80;
- if (events->cursor_update)
- value |= 0x2;
- if (events->force_trigger)
- value |= 0x1;
+ if (params->triggers.surface_update)
+ triggers |= 0x80;
+ if (params->triggers.cursor_update)
+ triggers |= 0x2;
+ if (params->triggers.force_trigger)
+ triggers |= 0x1;
for (i = 0; i < num_pipes; i++)
pipe_ctx[i]->stream_res.tg->funcs->
- set_static_screen_control(pipe_ctx[i]->stream_res.tg, value);
+ set_static_screen_control(pipe_ctx[i]->stream_res.tg,
+ triggers, params->num_frames);
}
static void dcn10_config_stereo_parameters(
@@ -2685,13 +2800,20 @@ static void dcn10_config_stereo_parameters(
return;
}
-static void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
+void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
{
struct crtc_stereo_flags flags = { 0 };
struct dc_stream_state *stream = pipe_ctx->stream;
dcn10_config_stereo_parameters(stream, &flags);
+ if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
+ if (!dc_set_generic_gpio_for_stereo(true, dc->ctx->gpio_service))
+ dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
+ } else {
+ dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
+ }
+
pipe_ctx->stream_res.opp->funcs->opp_program_stereo(
pipe_ctx->stream_res.opp,
flags.PROGRAM_STEREO == 1 ? true:false,
@@ -2717,15 +2839,16 @@ static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_in
return NULL;
}
-static void dcn10_wait_for_mpcc_disconnect(
+void dcn10_wait_for_mpcc_disconnect(
struct dc *dc,
struct resource_pool *res_pool,
struct pipe_ctx *pipe_ctx)
{
+ struct dce_hwseq *hws = dc->hwseq;
int mpcc_inst;
if (dc->debug.sanity_checks) {
- dcn10_verify_allow_pstate_change_high(dc);
+ hws->funcs.verify_allow_pstate_change_high(dc);
}
if (!pipe_ctx->stream_res.opp)
@@ -2742,12 +2865,12 @@ static void dcn10_wait_for_mpcc_disconnect(
}
if (dc->debug.sanity_checks) {
- dcn10_verify_allow_pstate_change_high(dc);
+ hws->funcs.verify_allow_pstate_change_high(dc);
}
}
-static bool dcn10_dummy_display_power_gating(
+bool dcn10_dummy_display_power_gating(
struct dc *dc,
uint8_t controller_id,
struct dc_bios *dcb,
@@ -2756,7 +2879,7 @@ static bool dcn10_dummy_display_power_gating(
return true;
}
-static void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
+void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
{
struct dc_plane_state *plane_state = pipe_ctx->plane_state;
struct timing_generator *tg = pipe_ctx->stream_res.tg;
@@ -2780,19 +2903,42 @@ static void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
}
}
-static void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
+void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
{
- if (hws->ctx->dc->res_pool->hubbub != NULL) {
- struct hubp *hubp = hws->ctx->dc->res_pool->hubps[0];
+ struct hubbub *hubbub = hws->ctx->dc->res_pool->hubbub;
- if (hubp->funcs->hubp_update_dchub)
- hubp->funcs->hubp_update_dchub(hubp, dh_data);
- else
- hubbub1_update_dchub(hws->ctx->dc->res_pool->hubbub, dh_data);
+ /* In DCN, this programming sequence is owned by the hubbub */
+ hubbub->funcs->update_dchub(hubbub, dh_data);
+}
+
+static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
+{
+ struct pipe_ctx *test_pipe;
+ const struct rect *r1 = &pipe_ctx->plane_res.scl_data.recout, *r2;
+ int r1_r = r1->x + r1->width, r1_b = r1->y + r1->height, r2_r, r2_b;
+
+ /**
+ * Disable the cursor if there's another pipe above this with a
+ * plane that contains this pipe's viewport to prevent double cursor
+ * and incorrect scaling artifacts.
+ */
+ for (test_pipe = pipe_ctx->top_pipe; test_pipe;
+ test_pipe = test_pipe->top_pipe) {
+ if (!test_pipe->plane_state->visible)
+ continue;
+
+ r2 = &test_pipe->plane_res.scl_data.recout;
+ r2_r = r2->x + r2->width;
+ r2_b = r2->y + r2->height;
+
+ if (r1->x >= r2->x && r1->y >= r2->y && r1_r <= r2_r && r1_b <= r2_b)
+ return true;
}
+
+ return false;
}
-static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
+void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
{
struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
struct hubp *hubp = pipe_ctx->plane_res.hubp;
@@ -2806,35 +2952,108 @@ static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
.rotation = pipe_ctx->plane_state->rotation,
.mirror = pipe_ctx->plane_state->horizontal_mirror
};
- uint32_t x_plane = pipe_ctx->plane_state->dst_rect.x;
- uint32_t y_plane = pipe_ctx->plane_state->dst_rect.y;
- uint32_t x_offset = min(x_plane, pos_cpy.x);
- uint32_t y_offset = min(y_plane, pos_cpy.y);
+ bool pipe_split_on = (pipe_ctx->top_pipe != NULL) ||
+ (pipe_ctx->bottom_pipe != NULL);
- pos_cpy.x -= x_offset;
- pos_cpy.y -= y_offset;
- pos_cpy.x_hotspot += (x_plane - x_offset);
- pos_cpy.y_hotspot += (y_plane - y_offset);
+ int x_plane = pipe_ctx->plane_state->dst_rect.x;
+ int y_plane = pipe_ctx->plane_state->dst_rect.y;
+ int x_pos = pos_cpy.x;
+ int y_pos = pos_cpy.y;
+
+ // translate cursor from stream space to plane space
+ x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.width /
+ pipe_ctx->plane_state->dst_rect.width;
+ y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height /
+ pipe_ctx->plane_state->dst_rect.height;
+
+ if (x_pos < 0) {
+ pos_cpy.x_hotspot -= x_pos;
+ x_pos = 0;
+ }
+
+ if (y_pos < 0) {
+ pos_cpy.y_hotspot -= y_pos;
+ y_pos = 0;
+ }
+
+ pos_cpy.x = (uint32_t)x_pos;
+ pos_cpy.y = (uint32_t)y_pos;
if (pipe_ctx->plane_state->address.type
== PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
pos_cpy.enable = false;
+ if (pos_cpy.enable && dcn10_can_pipe_disable_cursor(pipe_ctx))
+ pos_cpy.enable = false;
+
+ // Swap axis and mirror horizontally
+ if (param.rotation == ROTATION_ANGLE_90) {
+ uint32_t temp_x = pos_cpy.x;
+
+ pos_cpy.x = pipe_ctx->plane_res.scl_data.viewport.width -
+ (pos_cpy.y - pipe_ctx->plane_res.scl_data.viewport.x) + pipe_ctx->plane_res.scl_data.viewport.x;
+ pos_cpy.y = temp_x;
+ }
+ // Swap axis and mirror vertically
+ else if (param.rotation == ROTATION_ANGLE_270) {
+ uint32_t temp_y = pos_cpy.y;
+ int viewport_height =
+ pipe_ctx->plane_res.scl_data.viewport.height;
+
+ if (pipe_split_on) {
+ if (pos_cpy.x > viewport_height) {
+ pos_cpy.x = pos_cpy.x - viewport_height;
+ pos_cpy.y = viewport_height - pos_cpy.x;
+ } else {
+ pos_cpy.y = 2 * viewport_height - pos_cpy.x;
+ }
+ } else
+ pos_cpy.y = viewport_height - pos_cpy.x;
+ pos_cpy.x = temp_y;
+ }
+ // Mirror horizontally and vertically
+ else if (param.rotation == ROTATION_ANGLE_180) {
+ int viewport_width =
+ pipe_ctx->plane_res.scl_data.viewport.width;
+ int viewport_x =
+ pipe_ctx->plane_res.scl_data.viewport.x;
+
+ if (pipe_split_on) {
+ if (pos_cpy.x >= viewport_width + viewport_x) {
+ pos_cpy.x = 2 * viewport_width
+ - pos_cpy.x + 2 * viewport_x;
+ } else {
+ uint32_t temp_x = pos_cpy.x;
+
+ pos_cpy.x = 2 * viewport_x - pos_cpy.x;
+ if (temp_x >= viewport_x +
+ (int)hubp->curs_attr.width || pos_cpy.x
+ <= (int)hubp->curs_attr.width +
+ pipe_ctx->plane_state->src_rect.x) {
+ pos_cpy.x = temp_x + viewport_width;
+ }
+ }
+ } else {
+ pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
+ }
+ pos_cpy.y = pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.y;
+ }
+
hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width, hubp->curs_attr.height);
}
-static void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
+void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
{
struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
pipe_ctx->plane_res.hubp, attributes);
pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
- pipe_ctx->plane_res.dpp, attributes->color_format);
+ pipe_ctx->plane_res.dpp, attributes);
}
-static void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
+void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
{
uint32_t sdr_white_level = pipe_ctx->stream->cursor_attributes.sdr_white_level;
struct fixed31_32 multiplier;
@@ -2861,12 +3080,12 @@ static void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.dpp, &opt_attr);
}
-/**
-* apply_front_porch_workaround TODO FPGA still need?
-*
-* This is a workaround for a bug that has existed since R5xx and has not been
-* fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
-*/
+/*
+ * apply_front_porch_workaround TODO FPGA still need?
+ *
+ * This is a workaround for a bug that has existed since R5xx and has not been
+ * fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
+ */
static void apply_front_porch_workaround(
struct dc_crtc_timing *timing)
{
@@ -2879,7 +3098,7 @@ static void apply_front_porch_workaround(
}
}
-int get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
+int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
{
const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
struct dc_crtc_timing patched_crtc_timing;
@@ -2908,34 +3127,8 @@ int get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
return vertical_line_start;
}
-void lock_all_pipes(struct dc *dc,
- struct dc_state *context,
- bool lock)
-{
- struct pipe_ctx *pipe_ctx;
- struct timing_generator *tg;
- int i;
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- pipe_ctx = &context->res_ctx.pipe_ctx[i];
- tg = pipe_ctx->stream_res.tg;
- /*
- * Only lock the top pipe's tg to prevent redundant
- * (un)locking. Also skip if pipe is disabled.
- */
- if (pipe_ctx->top_pipe ||
- !pipe_ctx->stream || !pipe_ctx->plane_state ||
- !tg->funcs->is_tg_enabled(tg))
- continue;
-
- if (lock)
- tg->funcs->lock(tg);
- else
- tg->funcs->unlock(tg);
- }
-}
-
-static void calc_vupdate_position(
+static void dcn10_calc_vupdate_position(
+ struct dc *dc,
struct pipe_ctx *pipe_ctx,
uint32_t *start_line,
uint32_t *end_line)
@@ -2943,7 +3136,7 @@ static void calc_vupdate_position(
const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
int vline_int_offset_from_vupdate =
pipe_ctx->stream->periodic_interrupt0.lines_offset;
- int vupdate_offset_from_vsync = get_vupdate_offset_from_vsync(pipe_ctx);
+ int vupdate_offset_from_vsync = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
int start_position;
if (vline_int_offset_from_vupdate > 0)
@@ -2964,7 +3157,8 @@ static void calc_vupdate_position(
*end_line = 2;
}
-static void cal_vline_position(
+static void dcn10_cal_vline_position(
+ struct dc *dc,
struct pipe_ctx *pipe_ctx,
enum vline_select vline,
uint32_t *start_line,
@@ -2979,7 +3173,8 @@ static void cal_vline_position(
switch (ref_point) {
case START_V_UPDATE:
- calc_vupdate_position(
+ dcn10_calc_vupdate_position(
+ dc,
pipe_ctx,
start_line,
end_line);
@@ -2993,7 +3188,8 @@ static void cal_vline_position(
}
}
-static void dcn10_setup_periodic_interrupt(
+void dcn10_setup_periodic_interrupt(
+ struct dc *dc,
struct pipe_ctx *pipe_ctx,
enum vline_select vline)
{
@@ -3003,7 +3199,7 @@ static void dcn10_setup_periodic_interrupt(
uint32_t start_line = 0;
uint32_t end_line = 0;
- cal_vline_position(pipe_ctx, vline, &start_line, &end_line);
+ dcn10_cal_vline_position(dc, pipe_ctx, vline, &start_line, &end_line);
tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line);
@@ -3014,10 +3210,10 @@ static void dcn10_setup_periodic_interrupt(
}
}
-static void dcn10_setup_vupdate_interrupt(struct pipe_ctx *pipe_ctx)
+void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
struct timing_generator *tg = pipe_ctx->stream_res.tg;
- int start_line = get_vupdate_offset_from_vsync(pipe_ctx);
+ int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
if (start_line < 0) {
ASSERT(0);
@@ -3028,12 +3224,13 @@ static void dcn10_setup_vupdate_interrupt(struct pipe_ctx *pipe_ctx)
tg->funcs->setup_vertical_interrupt2(tg, start_line);
}
-static void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
+void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
struct dc_link_settings *link_settings)
{
struct encoder_unblank_param params = { { 0 } };
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->link;
+ struct dce_hwseq *hws = link->dc->hwseq;
/* only 3 items below are used by unblank */
params.timing = pipe_ctx->stream->timing;
@@ -3047,11 +3244,11 @@ static void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
}
if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
- link->dc->hwss.edp_backlight_control(link, true);
+ hws->funcs.edp_backlight_control(link, true);
}
}
-static void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx,
+void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx,
const uint8_t *custom_sdp_message,
unsigned int sdp_message_size)
{
@@ -3062,66 +3259,53 @@ static void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx,
sdp_message_size);
}
}
+enum dc_status dcn10_set_clock(struct dc *dc,
+ enum dc_clock_type clock_type,
+ uint32_t clk_khz,
+ uint32_t stepping)
+{
+ struct dc_state *context = dc->current_state;
+ struct dc_clock_config clock_cfg = {0};
+ struct dc_clocks *current_clocks = &context->bw_ctx.bw.dcn.clk;
+
+ if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
+ dc->clk_mgr->funcs->get_clock(dc->clk_mgr,
+ context, clock_type, &clock_cfg);
+
+ if (!dc->clk_mgr->funcs->get_clock)
+ return DC_FAIL_UNSUPPORTED_1;
+
+ if (clk_khz > clock_cfg.max_clock_khz)
+ return DC_FAIL_CLK_EXCEED_MAX;
+
+ if (clk_khz < clock_cfg.min_clock_khz)
+ return DC_FAIL_CLK_BELOW_MIN;
+
+ if (clk_khz < clock_cfg.bw_requirequired_clock_khz)
+ return DC_FAIL_CLK_BELOW_CFG_REQUIRED;
+
+ /*update internal request clock for update clock use*/
+ if (clock_type == DC_CLOCK_TYPE_DISPCLK)
+ current_clocks->dispclk_khz = clk_khz;
+ else if (clock_type == DC_CLOCK_TYPE_DPPCLK)
+ current_clocks->dppclk_khz = clk_khz;
+ else
+ return DC_ERROR_UNEXPECTED;
+
+ if (dc->clk_mgr && dc->clk_mgr->funcs->update_clocks)
+ dc->clk_mgr->funcs->update_clocks(dc->clk_mgr,
+ context, true);
+ return DC_OK;
-static const struct hw_sequencer_funcs dcn10_funcs = {
- .program_gamut_remap = program_gamut_remap,
- .init_hw = dcn10_init_hw,
- .init_pipes = dcn10_init_pipes,
- .apply_ctx_to_hw = dce110_apply_ctx_to_hw,
- .apply_ctx_for_surface = dcn10_apply_ctx_for_surface,
- .update_plane_addr = dcn10_update_plane_addr,
- .plane_atomic_disconnect = hwss1_plane_atomic_disconnect,
- .update_dchub = dcn10_update_dchub,
- .update_mpcc = dcn10_update_mpcc,
- .update_pending_status = dcn10_update_pending_status,
- .set_input_transfer_func = dcn10_set_input_transfer_func,
- .set_output_transfer_func = dcn10_set_output_transfer_func,
- .program_output_csc = dcn10_program_output_csc,
- .power_down = dce110_power_down,
- .enable_accelerated_mode = dce110_enable_accelerated_mode,
- .enable_timing_synchronization = dcn10_enable_timing_synchronization,
- .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset,
- .update_info_frame = dce110_update_info_frame,
- .send_immediate_sdp_message = dcn10_send_immediate_sdp_message,
- .enable_stream = dce110_enable_stream,
- .disable_stream = dce110_disable_stream,
- .unblank_stream = dcn10_unblank_stream,
- .blank_stream = dce110_blank_stream,
- .enable_audio_stream = dce110_enable_audio_stream,
- .disable_audio_stream = dce110_disable_audio_stream,
- .enable_display_power_gating = dcn10_dummy_display_power_gating,
- .disable_plane = dcn10_disable_plane,
- .blank_pixel_data = dcn10_blank_pixel_data,
- .pipe_control_lock = dcn10_pipe_control_lock,
- .prepare_bandwidth = dcn10_prepare_bandwidth,
- .optimize_bandwidth = dcn10_optimize_bandwidth,
- .reset_hw_ctx_wrap = dcn10_reset_hw_ctx_wrap,
- .enable_stream_timing = dcn10_enable_stream_timing,
- .set_drr = set_drr,
- .get_position = get_position,
- .set_static_screen_control = set_static_screen_control,
- .setup_stereo = dcn10_setup_stereo,
- .set_avmute = dce110_set_avmute,
- .log_hw_state = dcn10_log_hw_state,
- .get_hw_state = dcn10_get_hw_state,
- .clear_status_bits = dcn10_clear_status_bits,
- .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
- .edp_backlight_control = hwss_edp_backlight_control,
- .edp_power_control = hwss_edp_power_control,
- .edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready,
- .set_cursor_position = dcn10_set_cursor_position,
- .set_cursor_attribute = dcn10_set_cursor_attribute,
- .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
- .disable_stream_gating = NULL,
- .enable_stream_gating = NULL,
- .setup_periodic_interrupt = dcn10_setup_periodic_interrupt,
- .setup_vupdate_interrupt = dcn10_setup_vupdate_interrupt,
- .did_underflow_occur = dcn10_did_underflow_occur
-};
-
-
-void dcn10_hw_sequencer_construct(struct dc *dc)
-{
- dc->hwss = dcn10_funcs;
}
+void dcn10_get_clock(struct dc *dc,
+ enum dc_clock_type clock_type,
+ struct dc_clock_config *clock_cfg)
+{
+ struct dc_state *context = dc->current_state;
+
+ if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
+ dc->clk_mgr->funcs->get_clock(dc->clk_mgr, context, clock_type, clock_cfg);
+
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
index d3616b1948cc..4d20f6586bb5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
@@ -27,68 +27,160 @@
#define __DC_HWSS_DCN10_H__
#include "core_types.h"
+#include "hw_sequencer_private.h"
struct dc;
void dcn10_hw_sequencer_construct(struct dc *dc);
-extern void fill_display_configs(
- const struct dc_state *context,
- struct dm_pp_display_configuration *pp_display_cfg);
-
-bool is_rgb_cspace(enum dc_color_space output_color_space);
-
-void hwss1_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx);
-
-void dcn10_verify_allow_pstate_change_high(struct dc *dc);
+int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx);
+void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx);
+enum dc_status dcn10_enable_stream_timing(
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct dc *dc);
+void dcn10_optimize_bandwidth(
+ struct dc *dc,
+ struct dc_state *context);
+void dcn10_prepare_bandwidth(
+ struct dc *dc,
+ struct dc_state *context);
+void dcn10_pipe_control_lock(
+ struct dc *dc,
+ struct pipe_ctx *pipe,
+ bool lock);
+void dcn10_blank_pixel_data(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool blank);
+void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
+ struct dc_link_settings *link_settings);
+void dcn10_program_output_csc(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ enum dc_color_space colorspace,
+ uint16_t *matrix,
+ int opp_id);
+bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ const struct dc_stream_state *stream);
+bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ const struct dc_plane_state *plane_state);
+void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn10_reset_hw_ctx_wrap(
+ struct dc *dc,
+ struct dc_state *context);
+void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn10_apply_ctx_for_surface(
+ struct dc *dc,
+ const struct dc_stream_state *stream,
+ int num_planes,
+ struct dc_state *context);
+void dcn10_hubp_pg_control(
+ struct dce_hwseq *hws,
+ unsigned int hubp_inst,
+ bool power_on);
+void dcn10_dpp_pg_control(
+ struct dce_hwseq *hws,
+ unsigned int dpp_inst,
+ bool power_on);
+void dcn10_enable_power_gating_plane(
+ struct dce_hwseq *hws,
+ bool enable);
+void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn10_disable_vga(
+ struct dce_hwseq *hws);
void dcn10_program_pipe(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_state *context);
-
-void dcn10_get_hw_state(
+void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx);
+void dcn10_init_hw(struct dc *dc);
+void dcn10_init_pipes(struct dc *dc, struct dc_state *context);
+enum dc_status dce110_apply_ctx_to_hw(
+ struct dc *dc,
+ struct dc_state *context);
+void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data);
+void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx);
+void dce110_power_down(struct dc *dc);
+void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context);
+void dcn10_enable_timing_synchronization(
+ struct dc *dc,
+ int group_index,
+ int group_size,
+ struct pipe_ctx *grouped_pipes[]);
+void dcn10_enable_per_frame_crtc_position_reset(
+ struct dc *dc,
+ int group_size,
+ struct pipe_ctx *grouped_pipes[]);
+void dce110_update_info_frame(struct pipe_ctx *pipe_ctx);
+void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx,
+ const uint8_t *custom_sdp_message,
+ unsigned int sdp_message_size);
+void dce110_blank_stream(struct pipe_ctx *pipe_ctx);
+void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx);
+void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx);
+bool dcn10_dummy_display_power_gating(
struct dc *dc,
- char *pBuf, unsigned int bufSize,
+ uint8_t controller_id,
+ struct dc_bios *dcb,
+ enum pipe_gating_control power_gating);
+void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
+ int num_pipes, unsigned int vmin, unsigned int vmax,
+ unsigned int vmid, unsigned int vmid_frame_number);
+void dcn10_get_position(struct pipe_ctx **pipe_ctx,
+ int num_pipes,
+ struct crtc_position *position);
+void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx,
+ int num_pipes, const struct dc_static_screen_params *params);
+void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc);
+void dce110_set_avmute(struct pipe_ctx *pipe_ctx, bool enable);
+void dcn10_log_hw_state(struct dc *dc,
+ struct dc_log_buffer_ctx *log_ctx);
+void dcn10_get_hw_state(struct dc *dc,
+ char *pBuf,
+ unsigned int bufSize,
unsigned int mask);
-
void dcn10_clear_status_bits(struct dc *dc, unsigned int mask);
-
-bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx);
-
-bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx);
-
-bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx);
-
-void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp);
-
-void set_hdr_multiplier(struct pipe_ctx *pipe_ctx);
-
+void dcn10_wait_for_mpcc_disconnect(
+ struct dc *dc,
+ struct resource_pool *res_pool,
+ struct pipe_ctx *pipe_ctx);
+void dce110_edp_backlight_control(
+ struct dc_link *link,
+ bool enable);
+void dce110_edp_power_control(
+ struct dc_link *link,
+ bool power_up);
+void dce110_edp_wait_for_hpd_ready(
+ struct dc_link *link,
+ bool power_up);
+void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx);
+void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx);
+void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx);
+void dcn10_setup_periodic_interrupt(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ enum vline_select vline);
+enum dc_status dcn10_set_clock(struct dc *dc,
+ enum dc_clock_type clock_type,
+ uint32_t clk_khz,
+ uint32_t stepping);
+void dcn10_get_clock(struct dc *dc,
+ enum dc_clock_type clock_type,
+ struct dc_clock_config *clock_cfg);
+bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn10_bios_golden_init(struct dc *dc);
+void dcn10_plane_atomic_power_down(struct dc *dc,
+ struct dpp *dpp,
+ struct hubp *hubp);
void dcn10_get_surface_visual_confirm_color(
const struct pipe_ctx *pipe_ctx,
struct tg_color *color);
-
void dcn10_get_hdr_visual_confirm_color(
struct pipe_ctx *pipe_ctx,
struct tg_color *color);
-
-bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx);
-
-void update_dchubp_dpp(
- struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- struct dc_state *context);
-
-struct pipe_ctx *find_top_pipe_for_stream(
- struct dc *dc,
- struct dc_state *context,
- const struct dc_stream_state *stream);
-
-int get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx);
-
-void dcn10_build_prescale_params(struct dc_bias_and_scale *bias_and_scale,
- const struct dc_plane_state *plane_state);
-void lock_all_pipes(struct dc *dc,
- struct dc_state *context,
- bool lock);
+void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx);
+void dcn10_verify_allow_pstate_change_high(struct dc *dc);
#endif /* __DC_HWSS_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
index 6e47444109d7..7f4766e45dff 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
@@ -45,7 +45,7 @@
#include "dcn10_cm_common.h"
#include "clk_mgr.h"
-static unsigned int snprintf_count(char *pBuf, unsigned int bufSize, char *fmt, ...)
+unsigned int snprintf_count(char *pBuf, unsigned int bufSize, char *fmt, ...)
{
unsigned int ret_vsnprintf;
unsigned int chars_printed;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.h
new file mode 100644
index 000000000000..596f95c22e85
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_HWSS_DCN10_DEBUG_H__
+#define __DC_HWSS_DCN10_DEBUG_H__
+
+#include "core_types.h"
+
+struct dc;
+
+void dcn10_clear_status_bits(struct dc *dc, unsigned int mask);
+
+void dcn10_log_hw_state(struct dc *dc,
+ struct dc_log_buffer_ctx *log_ctx);
+
+void dcn10_get_hw_state(struct dc *dc,
+ char *pBuf,
+ unsigned int bufSize,
+ unsigned int mask);
+
+#endif /* __DC_HWSS_DCN10_DEBUG_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
new file mode 100644
index 000000000000..e7e5352ec424
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "hw_sequencer_private.h"
+#include "dce110/dce110_hw_sequencer.h"
+#include "dcn10_hw_sequencer.h"
+
+static const struct hw_sequencer_funcs dcn10_funcs = {
+ .program_gamut_remap = dcn10_program_gamut_remap,
+ .init_hw = dcn10_init_hw,
+ .apply_ctx_to_hw = dce110_apply_ctx_to_hw,
+ .apply_ctx_for_surface = dcn10_apply_ctx_for_surface,
+ .update_plane_addr = dcn10_update_plane_addr,
+ .update_dchub = dcn10_update_dchub,
+ .update_pending_status = dcn10_update_pending_status,
+ .program_output_csc = dcn10_program_output_csc,
+ .enable_accelerated_mode = dce110_enable_accelerated_mode,
+ .enable_timing_synchronization = dcn10_enable_timing_synchronization,
+ .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset,
+ .update_info_frame = dce110_update_info_frame,
+ .send_immediate_sdp_message = dcn10_send_immediate_sdp_message,
+ .enable_stream = dce110_enable_stream,
+ .disable_stream = dce110_disable_stream,
+ .unblank_stream = dcn10_unblank_stream,
+ .blank_stream = dce110_blank_stream,
+ .enable_audio_stream = dce110_enable_audio_stream,
+ .disable_audio_stream = dce110_disable_audio_stream,
+ .disable_plane = dcn10_disable_plane,
+ .pipe_control_lock = dcn10_pipe_control_lock,
+ .prepare_bandwidth = dcn10_prepare_bandwidth,
+ .optimize_bandwidth = dcn10_optimize_bandwidth,
+ .set_drr = dcn10_set_drr,
+ .get_position = dcn10_get_position,
+ .set_static_screen_control = dcn10_set_static_screen_control,
+ .setup_stereo = dcn10_setup_stereo,
+ .set_avmute = dce110_set_avmute,
+ .log_hw_state = dcn10_log_hw_state,
+ .get_hw_state = dcn10_get_hw_state,
+ .clear_status_bits = dcn10_clear_status_bits,
+ .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
+ .edp_power_control = dce110_edp_power_control,
+ .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
+ .set_cursor_position = dcn10_set_cursor_position,
+ .set_cursor_attribute = dcn10_set_cursor_attribute,
+ .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
+ .setup_periodic_interrupt = dcn10_setup_periodic_interrupt,
+ .set_clock = dcn10_set_clock,
+ .get_clock = dcn10_get_clock,
+ .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
+};
+
+static const struct hwseq_private_funcs dcn10_private_funcs = {
+ .init_pipes = dcn10_init_pipes,
+ .update_plane_addr = dcn10_update_plane_addr,
+ .plane_atomic_disconnect = dcn10_plane_atomic_disconnect,
+ .program_pipe = dcn10_program_pipe,
+ .update_mpcc = dcn10_update_mpcc,
+ .set_input_transfer_func = dcn10_set_input_transfer_func,
+ .set_output_transfer_func = dcn10_set_output_transfer_func,
+ .power_down = dce110_power_down,
+ .enable_display_power_gating = dcn10_dummy_display_power_gating,
+ .blank_pixel_data = dcn10_blank_pixel_data,
+ .reset_hw_ctx_wrap = dcn10_reset_hw_ctx_wrap,
+ .enable_stream_timing = dcn10_enable_stream_timing,
+ .edp_backlight_control = dce110_edp_backlight_control,
+ .disable_stream_gating = NULL,
+ .enable_stream_gating = NULL,
+ .setup_vupdate_interrupt = dcn10_setup_vupdate_interrupt,
+ .did_underflow_occur = dcn10_did_underflow_occur,
+ .init_blank = NULL,
+ .disable_vga = dcn10_disable_vga,
+ .bios_golden_init = dcn10_bios_golden_init,
+ .plane_atomic_disable = dcn10_plane_atomic_disable,
+ .plane_atomic_power_down = dcn10_plane_atomic_power_down,
+ .enable_power_gating_plane = dcn10_enable_power_gating_plane,
+ .dpp_pg_control = dcn10_dpp_pg_control,
+ .hubp_pg_control = dcn10_hubp_pg_control,
+ .dsc_pg_control = NULL,
+ .get_surface_visual_confirm_color = dcn10_get_surface_visual_confirm_color,
+ .get_hdr_visual_confirm_color = dcn10_get_hdr_visual_confirm_color,
+ .set_hdr_multiplier = dcn10_set_hdr_multiplier,
+ .verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high,
+};
+
+void dcn10_hw_sequencer_construct(struct dc *dc)
+{
+ dc->hwss = dcn10_funcs;
+ dc->hwseq->funcs = dcn10_private_funcs;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.h
index 112cda8fa1a8..8c6fd7b844a4 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.h
@@ -1,5 +1,5 @@
/*
- * Copyright © 2014 Intel Corporation
+ * Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -8,24 +8,26 @@
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
*/
-#ifndef _I915_GEM_RENDER_STATE_H_
-#define _I915_GEM_RENDER_STATE_H_
+#ifndef __DC_DCN10_INIT_H__
+#define __DC_DCN10_INIT_H__
-struct i915_request;
+struct dc;
-int i915_gem_render_state_emit(struct i915_request *rq);
+void dcn10_hw_sequencer_construct(struct dc *dc);
-#endif /* _I915_GEM_RENDER_STATE_H_ */
+#endif /* __DC_DCN10_INIT_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.c
index 0fb9e440cb9d..f05371c1fc36 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.c
@@ -53,11 +53,9 @@ static const struct ipp_funcs dcn10_ipp_funcs = {
.ipp_destroy = dcn10_ipp_destroy
};
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
static const struct ipp_funcs dcn20_ipp_funcs = {
.ipp_destroy = dcn10_ipp_destroy
};
-#endif
void dcn10_ipp_construct(
struct dcn10_ipp *ippn10,
@@ -76,7 +74,6 @@ void dcn10_ipp_construct(
ippn10->ipp_mask = ipp_mask;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
void dcn20_ipp_construct(
struct dcn10_ipp *ippn10,
struct dc_context *ctx,
@@ -93,4 +90,3 @@ void dcn20_ipp_construct(
ippn10->ipp_shift = ipp_shift;
ippn10->ipp_mask = ipp_mask;
}
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h
index cfa24459242b..f0e0d07b0311 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h
@@ -49,7 +49,6 @@
SRI(CURSOR_HOT_SPOT, CURSOR, id), \
SRI(CURSOR_DST_OFFSET, CURSOR, id)
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#define IPP_REG_LIST_DCN20(id) \
IPP_REG_LIST_DCN(id), \
SRI(CURSOR_SETTINGS, HUBPREQ, id), \
@@ -60,7 +59,6 @@
SRI(CURSOR_POSITION, CURSOR0_, id), \
SRI(CURSOR_HOT_SPOT, CURSOR0_, id), \
SRI(CURSOR_DST_OFFSET, CURSOR0_, id)
-#endif
#define CURSOR0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT 0x4
#define CURSOR0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY_MASK 0x00000010L
@@ -105,7 +103,6 @@
IPP_SF(CURSOR0_CURSOR_DST_OFFSET, CURSOR_DST_X_OFFSET, mask_sh), \
IPP_SF(CNVC_CFG0_FORMAT_CONTROL, OUTPUT_FP, mask_sh)
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#define IPP_MASK_SH_LIST_DCN20(mask_sh) \
IPP_MASK_SH_LIST_DCN(mask_sh), \
IPP_SF(HUBPREQ0_CURSOR_SETTINGS, CURSOR0_DST_Y_OFFSET, mask_sh), \
@@ -124,7 +121,6 @@
IPP_SF(CURSOR0_0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_X, mask_sh), \
IPP_SF(CURSOR0_0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_Y, mask_sh), \
IPP_SF(CURSOR0_0_CURSOR_DST_OFFSET, CURSOR_DST_X_OFFSET, mask_sh)
-#endif
#define IPP_DCN10_REG_FIELD_LIST(type) \
type CNVC_SURFACE_PIXEL_FORMAT; \
@@ -196,13 +192,11 @@ void dcn10_ipp_construct(struct dcn10_ipp *ippn10,
const struct dcn10_ipp_shift *ipp_shift,
const struct dcn10_ipp_mask *ipp_mask);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
void dcn20_ipp_construct(struct dcn10_ipp *ippn10,
struct dc_context *ctx,
int inst,
const struct dcn10_ipp_registers *regs,
const struct dcn10_ipp_shift *ipp_shift,
const struct dcn10_ipp_mask *ipp_mask);
-#endif
#endif /* _DCN10_IPP_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
index 549d423a01f6..1a37c90e9d43 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
@@ -89,6 +89,7 @@ static const struct link_encoder_funcs dcn10_lnk_enc_funcs = {
.disable_hpd = dcn10_link_encoder_disable_hpd,
.is_dig_enabled = dcn10_is_dig_enabled,
.get_dig_frontend = dcn10_get_dig_frontend,
+ .get_dig_mode = dcn10_get_dig_mode,
.destroy = dcn10_link_encoder_destroy
};
@@ -446,6 +447,46 @@ static uint8_t get_frontend_source(
}
}
+unsigned int dcn10_get_dig_frontend(struct link_encoder *enc)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+ int32_t value;
+ enum engine_id result;
+
+ REG_GET(DIG_BE_CNTL, DIG_FE_SOURCE_SELECT, &value);
+
+ switch (value) {
+ case DCN10_DIG_FE_SOURCE_SELECT_DIGA:
+ result = ENGINE_ID_DIGA;
+ break;
+ case DCN10_DIG_FE_SOURCE_SELECT_DIGB:
+ result = ENGINE_ID_DIGB;
+ break;
+ case DCN10_DIG_FE_SOURCE_SELECT_DIGC:
+ result = ENGINE_ID_DIGC;
+ break;
+ case DCN10_DIG_FE_SOURCE_SELECT_DIGD:
+ result = ENGINE_ID_DIGD;
+ break;
+ case DCN10_DIG_FE_SOURCE_SELECT_DIGE:
+ result = ENGINE_ID_DIGE;
+ break;
+ case DCN10_DIG_FE_SOURCE_SELECT_DIGF:
+ result = ENGINE_ID_DIGF;
+ break;
+ case DCN10_DIG_FE_SOURCE_SELECT_DIGG:
+ result = ENGINE_ID_DIGG;
+ break;
+ default:
+ // invalid source select DIG
+ ASSERT(false);
+ result = ENGINE_ID_UNKNOWN;
+ }
+
+ return result;
+
+}
+
void enc1_configure_encoder(
struct dcn10_link_encoder *enc10,
const struct dc_link_settings *link_settings)
@@ -501,15 +542,6 @@ bool dcn10_is_dig_enabled(struct link_encoder *enc)
return value;
}
-unsigned int dcn10_get_dig_frontend(struct link_encoder *enc)
-{
- struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
- uint32_t value;
-
- REG_GET(DIG_BE_CNTL, DIG_FE_SOURCE_SELECT, &value);
- return value;
-}
-
static void link_encoder_disable(struct dcn10_link_encoder *enc10)
{
/* reset training pattern */
@@ -1366,3 +1398,25 @@ void dcn10_aux_initialize(struct dcn10_link_encoder *enc10)
AUX_REG_UPDATE(AUX_DPHY_RX_CONTROL0,
AUX_RX_RECEIVE_WINDOW, 0);
}
+
+enum signal_type dcn10_get_dig_mode(
+ struct link_encoder *enc)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+ uint32_t value;
+ REG_GET(DIG_BE_CNTL, DIG_MODE, &value);
+ switch (value) {
+ case 1:
+ return SIGNAL_TYPE_DISPLAY_PORT;
+ case 2:
+ return SIGNAL_TYPE_DVI_SINGLE_LINK;
+ case 3:
+ return SIGNAL_TYPE_HDMI_TYPE_A;
+ case 5:
+ return SIGNAL_TYPE_DISPLAY_PORT_MST;
+ default:
+ return SIGNAL_TYPE_NONE;
+ }
+ return SIGNAL_TYPE_NONE;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
index 33b2af1a181c..eb13589b9a81 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
@@ -72,9 +72,7 @@
struct dcn10_link_enc_aux_registers {
uint32_t AUX_CONTROL;
uint32_t AUX_DPHY_RX_CONTROL0;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
uint32_t AUX_DPHY_TX_CONTROL;
-#endif
};
struct dcn10_link_enc_hpd_registers {
@@ -106,13 +104,46 @@ struct dcn10_link_enc_registers {
uint32_t DP_DPHY_HBR2_PATTERN_CONTROL;
uint32_t DP_SEC_CNTL1;
uint32_t TMDS_CTL_BITS;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
/* DCCG */
uint32_t CLOCK_ENABLE;
/* DIG */
uint32_t DIG_LANE_ENABLE;
/* UNIPHY */
uint32_t CHANNEL_XBAR_CNTL;
+ /* DPCS */
+ uint32_t RDPCSTX_PHY_CNTL3;
+ uint32_t RDPCSTX_PHY_CNTL4;
+ uint32_t RDPCSTX_PHY_CNTL5;
+ uint32_t RDPCSTX_PHY_CNTL6;
+ uint32_t RDPCSTX_PHY_CNTL7;
+ uint32_t RDPCSTX_PHY_CNTL8;
+ uint32_t RDPCSTX_PHY_CNTL9;
+ uint32_t RDPCSTX_PHY_CNTL10;
+ uint32_t RDPCSTX_PHY_CNTL11;
+ uint32_t RDPCSTX_PHY_CNTL12;
+ uint32_t RDPCSTX_PHY_CNTL13;
+ uint32_t RDPCSTX_PHY_CNTL14;
+ uint32_t RDPCSTX_PHY_CNTL15;
+ uint32_t RDPCSTX_CNTL;
+ uint32_t RDPCSTX_CLOCK_CNTL;
+ uint32_t RDPCSTX_PHY_CNTL0;
+ uint32_t RDPCSTX_PHY_CNTL2;
+ uint32_t RDPCSTX_PLL_UPDATE_DATA;
+ uint32_t RDPCS_TX_CR_ADDR;
+ uint32_t RDPCS_TX_CR_DATA;
+ uint32_t DPCSTX_TX_CLOCK_CNTL;
+ uint32_t DPCSTX_TX_CNTL;
+ uint32_t RDPCSTX_INTERRUPT_CONTROL;
+ uint32_t RDPCSTX_PHY_FUSE0;
+ uint32_t RDPCSTX_PHY_FUSE1;
+ uint32_t RDPCSTX_PHY_FUSE2;
+ uint32_t RDPCSTX_PHY_FUSE3;
+ uint32_t RDPCSTX_PHY_RX_LD_VAL;
+ uint32_t DPCSTX_DEBUG_CONFIG;
+ uint32_t RDPCSTX_DEBUG_CONFIG;
+ uint32_t RDPCSTX0_RDPCSTX_SCRATCH;
+ uint32_t RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG;
+ uint32_t DCIO_SOFT_RESET;
/* indirect registers */
uint32_t RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_2;
uint32_t RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_3;
@@ -122,7 +153,6 @@ struct dcn10_link_enc_registers {
uint32_t RAWLANE2_DIG_PCS_XF_RX_OVRD_IN_3;
uint32_t RAWLANE3_DIG_PCS_XF_RX_OVRD_IN_2;
uint32_t RAWLANE3_DIG_PCS_XF_RX_OVRD_IN_3;
-#endif
};
#define LE_SF(reg_name, field_name, post_fix)\
@@ -228,7 +258,6 @@ struct dcn10_link_enc_registers {
type AUX_LS_READ_EN;\
type AUX_RX_RECEIVE_WINDOW
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#define DCN20_LINK_ENCODER_DPCS_REG_FIELD_LIST(type) \
type RDPCS_PHY_DP_TX0_DATA_EN;\
@@ -250,6 +279,10 @@ struct dcn10_link_enc_registers {
type RDPCS_EXT_REFCLK_EN;\
type RDPCS_TX_FIFO_EN;\
type UNIPHY_LINK_ENABLE;\
+ type UNIPHY_CHANNEL0_XBAR_SOURCE;\
+ type UNIPHY_CHANNEL1_XBAR_SOURCE;\
+ type UNIPHY_CHANNEL2_XBAR_SOURCE;\
+ type UNIPHY_CHANNEL3_XBAR_SOURCE;\
type UNIPHY_CHANNEL0_INVERT;\
type UNIPHY_CHANNEL1_INVERT;\
type UNIPHY_CHANNEL2_INVERT;\
@@ -337,15 +370,46 @@ struct dcn10_link_enc_registers {
type RDPCS_TX_FIFO_ERROR_MASK;\
type RDPCS_DPALT_DISABLE_TOGGLE_MASK;\
type RDPCS_DPALT_4LANE_TOGGLE_MASK;\
+ type RDPCS_PHY_DPALT_DP4;\
+ type RDPCS_PHY_DPALT_DISABLE;\
type RDPCS_PHY_DPALT_DISABLE_ACK;\
type RDPCS_PHY_DP_MPLLB_V2I;\
type RDPCS_PHY_DP_MPLLB_FREQ_VCO;\
+ type RDPCS_PHY_DP_MPLLB_CP_INT_GS;\
+ type RDPCS_PHY_RX_VREF_CTRL;\
type RDPCS_PHY_DP_MPLLB_CP_INT;\
type RDPCS_PHY_DP_MPLLB_CP_PROP;\
type RDPCS_PHY_RX_REF_LD_VAL;\
type RDPCS_PHY_RX_VCO_LD_VAL;\
type DPCSTX_DEBUG_CONFIG; \
- type RDPCSTX_DEBUG_CONFIG
+ type RDPCSTX_DEBUG_CONFIG; \
+ type RDPCS_PHY_DP_TX0_EQ_MAIN;\
+ type RDPCS_PHY_DP_TX0_EQ_PRE;\
+ type RDPCS_PHY_DP_TX0_EQ_POST;\
+ type RDPCS_PHY_DP_TX1_EQ_MAIN;\
+ type RDPCS_PHY_DP_TX1_EQ_PRE;\
+ type RDPCS_PHY_DP_TX1_EQ_POST;\
+ type RDPCS_PHY_DP_TX2_EQ_MAIN;\
+ type RDPCS_PHY_DP_MPLLB_CP_PROP_GS;\
+ type RDPCS_PHY_DP_TX2_EQ_PRE;\
+ type RDPCS_PHY_DP_TX2_EQ_POST;\
+ type RDPCS_PHY_DP_TX3_EQ_MAIN;\
+ type RDPCS_PHY_DCO_RANGE;\
+ type RDPCS_PHY_DCO_FINETUNE;\
+ type RDPCS_PHY_DP_TX3_EQ_PRE;\
+ type RDPCS_PHY_DP_TX3_EQ_POST;\
+ type RDPCS_PHY_SUP_PRE_HP;\
+ type RDPCS_PHY_DP_TX0_VREGDRV_BYP;\
+ type RDPCS_PHY_DP_TX1_VREGDRV_BYP;\
+ type RDPCS_PHY_DP_TX2_VREGDRV_BYP;\
+ type RDPCS_PHY_DP_TX3_VREGDRV_BYP;\
+ type RDPCS_DMCU_DPALT_DIS_BLOCK_REG;\
+ type UNIPHYA_SOFT_RESET;\
+ type UNIPHYB_SOFT_RESET;\
+ type UNIPHYC_SOFT_RESET;\
+ type UNIPHYD_SOFT_RESET;\
+ type UNIPHYE_SOFT_RESET;\
+ type UNIPHYF_SOFT_RESET
#define DCN20_LINK_ENCODER_REG_FIELD_LIST(type) \
type DIG_LANE0EN;\
@@ -374,20 +438,15 @@ struct dcn10_link_enc_registers {
type AUX_TX_PRECHARGE_SYMBOLS; \
type AUX_MODE_DET_CHECK_DELAY;\
type DPCS_DBG_CBUS_DIS
-#endif
struct dcn10_link_enc_shift {
DCN_LINK_ENCODER_REG_FIELD_LIST(uint8_t);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
DCN20_LINK_ENCODER_REG_FIELD_LIST(uint8_t);
-#endif
};
struct dcn10_link_enc_mask {
DCN_LINK_ENCODER_REG_FIELD_LIST(uint32_t);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
DCN20_LINK_ENCODER_REG_FIELD_LIST(uint32_t);
-#endif
};
struct dcn10_link_encoder {
@@ -514,4 +573,6 @@ unsigned int dcn10_get_dig_frontend(struct link_encoder *enc);
void dcn10_aux_initialize(struct dcn10_link_encoder *enc10);
+enum signal_type dcn10_get_dig_mode(
+ struct link_encoder *enc);
#endif /* __DC_LINK_ENCODER__DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
index 0bca011ed7c9..04f863499cfb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
@@ -42,20 +42,27 @@ void mpc1_set_bg_color(struct mpc *mpc,
int mpcc_id)
{
struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
+ struct mpcc *bottommost_mpcc = mpc1_get_mpcc(mpc, mpcc_id);
+ uint32_t bg_r_cr, bg_g_y, bg_b_cb;
+
+ /* find bottommost mpcc. */
+ while (bottommost_mpcc->mpcc_bot) {
+ bottommost_mpcc = bottommost_mpcc->mpcc_bot;
+ }
/* mpc color is 12 bit. tg_color is 10 bit */
/* todo: might want to use 16 bit to represent color and have each
* hw block translate to correct color depth.
*/
- uint32_t bg_r_cr = bg_color->color_r_cr << 2;
- uint32_t bg_g_y = bg_color->color_g_y << 2;
- uint32_t bg_b_cb = bg_color->color_b_cb << 2;
+ bg_r_cr = bg_color->color_r_cr << 2;
+ bg_g_y = bg_color->color_g_y << 2;
+ bg_b_cb = bg_color->color_b_cb << 2;
- REG_SET(MPCC_BG_R_CR[mpcc_id], 0,
+ REG_SET(MPCC_BG_R_CR[bottommost_mpcc->mpcc_id], 0,
MPCC_BG_R_CR, bg_r_cr);
- REG_SET(MPCC_BG_G_Y[mpcc_id], 0,
+ REG_SET(MPCC_BG_G_Y[bottommost_mpcc->mpcc_id], 0,
MPCC_BG_G_Y, bg_g_y);
- REG_SET(MPCC_BG_B_CB[mpcc_id], 0,
+ REG_SET(MPCC_BG_B_CB[bottommost_mpcc->mpcc_id], 0,
MPCC_BG_B_CB, bg_b_cb);
}
@@ -211,7 +218,7 @@ struct mpcc *mpc1_insert_plane(
} else {
new_mpcc->mpcc_bot = NULL;
REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf);
- REG_UPDATE(MPCC_CONTROL[mpcc_id], MPCC_MODE, MPCC_BLEND_MODE_TOP_LAYER_PASSTHROUGH);
+ REG_UPDATE(MPCC_CONTROL[mpcc_id], MPCC_MODE, MPCC_BLEND_MODE_TOP_LAYER_ONLY);
}
REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, dpp_id);
REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, tree->opp_id);
@@ -364,6 +371,24 @@ void mpc1_mpc_init(struct mpc *mpc)
}
}
+void mpc1_mpc_init_single_inst(struct mpc *mpc, unsigned int mpcc_id)
+{
+ struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
+ int opp_id;
+
+ REG_GET(MPCC_OPP_ID[mpcc_id], MPCC_OPP_ID, &opp_id);
+
+ REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf);
+ REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf);
+ REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, 0xf);
+
+ mpc1_init_mpcc(&(mpc->mpcc_array[mpcc_id]), mpcc_id);
+
+ if (opp_id < MAX_OPP && REG(MUX[opp_id]))
+ REG_UPDATE(MUX[opp_id], MPC_OUT_MUX, 0xf);
+}
+
+
void mpc1_init_mpcc_list_from_hw(
struct mpc *mpc,
struct mpc_tree *tree)
@@ -433,17 +458,16 @@ static const struct mpc_funcs dcn10_mpc_funcs = {
.insert_plane = mpc1_insert_plane,
.remove_mpcc = mpc1_remove_mpcc,
.mpc_init = mpc1_mpc_init,
+ .mpc_init_single_inst = mpc1_mpc_init_single_inst,
.get_mpcc_for_dpp = mpc1_get_mpcc_for_dpp,
.wait_for_idle = mpc1_assert_idle_mpcc,
.assert_mpcc_idle_before_connect = mpc1_assert_mpcc_idle_before_connect,
.init_mpcc_list_from_hw = mpc1_init_mpcc_list_from_hw,
.update_blending = mpc1_update_blending,
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
.set_denorm = NULL,
.set_denorm_clamp = NULL,
.set_output_csc = NULL,
.set_output_gamma = NULL,
-#endif
};
void dcn10_mpc_construct(struct dcn10_mpc *mpc10,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h
index d3d16c4cbea3..962a68e322ee 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h
@@ -149,6 +149,10 @@ void mpc1_remove_mpcc(
void mpc1_mpc_init(
struct mpc *mpc);
+void mpc1_mpc_init_single_inst(
+ struct mpc *mpc,
+ unsigned int mpcc_id);
+
void mpc1_assert_idle_mpcc(
struct mpc *mpc,
int id);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
index e9ebbbe256b4..d79718fde5a6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
@@ -168,7 +168,10 @@ static void opp1_set_pixel_encoding(
REG_UPDATE(FMT_CONTROL, FMT_PIXEL_ENCODING, 0);
break;
case PIXEL_ENCODING_YCBCR422:
- REG_UPDATE(FMT_CONTROL, FMT_PIXEL_ENCODING, 1);
+ REG_UPDATE_3(FMT_CONTROL,
+ FMT_PIXEL_ENCODING, 1,
+ FMT_SUBSAMPLING_MODE, 2,
+ FMT_CBCR_BIT_REDUCTION_BYPASS, 0);
break;
case PIXEL_ENCODING_YCBCR420:
REG_UPDATE(FMT_CONTROL, FMT_PIXEL_ENCODING, 2);
@@ -237,6 +240,9 @@ void opp1_set_dyn_expansion(
FMT_DYNAMIC_EXP_EN, 0,
FMT_DYNAMIC_EXP_MODE, 0);
+ if (opp->dyn_expansion == DYN_EXPANSION_DISABLE)
+ return;
+
/*00 - 10-bit -> 12-bit dynamic expansion*/
/*01 - 8-bit -> 12-bit dynamic expansion*/
if (signal == SIGNAL_TYPE_HDMI_TYPE_A ||
@@ -367,11 +373,9 @@ void opp1_program_oppbuf(
*/
REG_UPDATE(OPPBUF_CONTROL, OPPBUF_PIXEL_REPETITION, oppbuf->pixel_repetition);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
/* Controls the number of padded pixels at the end of a segment */
if (REG(OPPBUF_CONTROL1))
REG_UPDATE(OPPBUF_CONTROL1, OPPBUF_NUM_SEGMENT_PADDED_PIXELS, oppbuf->num_segment_padded_pixels);
-#endif
}
void opp1_pipe_clock_control(struct output_pixel_processor *opp, bool enable)
@@ -398,9 +402,8 @@ static const struct opp_funcs dcn10_opp_funcs = {
.opp_program_bit_depth_reduction = opp1_program_bit_depth_reduction,
.opp_program_stereo = opp1_program_stereo,
.opp_pipe_clock_control = opp1_pipe_clock_control,
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
.opp_set_disp_pattern_generator = NULL,
-#endif
+ .dpg_is_blanked = NULL,
.opp_destroy = opp1_destroy
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h
index 0f10adea000c..2c0ecfa5a643 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h
@@ -116,6 +116,8 @@
type FMT_RAND_G_SEED; \
type FMT_RAND_B_SEED; \
type FMT_PIXEL_ENCODING; \
+ type FMT_SUBSAMPLING_MODE; \
+ type FMT_CBCR_BIT_REDUCTION_BYPASS; \
type FMT_CLAMP_DATA_EN; \
type FMT_CLAMP_COLOR_FORMAT; \
type FMT_DYNAMIC_EXP_EN; \
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index a546c2bc9129..a9a43b397db9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -154,7 +154,7 @@ void optc1_program_timing(
uint32_t h_sync_polarity, v_sync_polarity;
uint32_t start_point = 0;
uint32_t field_num = 0;
- uint32_t h_div_2;
+ enum h_timing_div_mode h_div = H_TIMING_NO_DIV;
struct optc *optc1 = DCN10TG_FROM_TG(optc);
@@ -285,10 +285,11 @@ void optc1_program_timing(
* of stereo handled in explicit call
*/
- h_div_2 = optc1_is_two_pixels_per_containter(&patched_crtc_timing);
- REG_UPDATE(OTG_H_TIMING_CNTL,
- OTG_H_TIMING_DIV_BY2, h_div_2 || optc1->comb_opp_id != 0xf);
+ if (optc1_is_two_pixels_per_containter(&patched_crtc_timing) || optc1->opp_count == 2)
+ h_div = H_TIMING_DIV_BY2;
+ REG_UPDATE(OTG_H_TIMING_CNTL,
+ OTG_H_TIMING_DIV_BY2, h_div);
}
void optc1_set_vtg_params(struct timing_generator *optc,
@@ -456,11 +457,16 @@ static bool optc1_enable_crtc(struct timing_generator *optc)
REG_UPDATE(CONTROL,
VTG0_ENABLE, 1);
+ REG_SEQ_START();
+
/* Enable CRTC */
REG_UPDATE_2(OTG_CONTROL,
OTG_DISABLE_POINT_CNTL, 3,
OTG_MASTER_EN, 1);
+ REG_SEQ_SUBMIT();
+ REG_SEQ_WAIT_DONE();
+
return true;
}
@@ -783,21 +789,26 @@ void optc1_set_early_control(
void optc1_set_static_screen_control(
struct timing_generator *optc,
- uint32_t value)
+ uint32_t event_triggers,
+ uint32_t num_frames)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ // By register spec, it only takes 8 bit value
+ if (num_frames > 0xFF)
+ num_frames = 0xFF;
+
/* Bit 8 is no longer applicable in RV for PSR case,
* set bit 8 to 0 if given
*/
- if ((value & STATIC_SCREEN_EVENT_MASK_RANGETIMING_DOUBLE_BUFFER_UPDATE_EN)
+ if ((event_triggers & STATIC_SCREEN_EVENT_MASK_RANGETIMING_DOUBLE_BUFFER_UPDATE_EN)
!= 0)
- value = value &
+ event_triggers = event_triggers &
~STATIC_SCREEN_EVENT_MASK_RANGETIMING_DOUBLE_BUFFER_UPDATE_EN;
REG_SET_2(OTG_STATIC_SCREEN_CONTROL, 0,
- OTG_STATIC_SCREEN_EVENT_MASK, value,
- OTG_STATIC_SCREEN_FRAME_COUNT, 2);
+ OTG_STATIC_SCREEN_EVENT_MASK, event_triggers,
+ OTG_STATIC_SCREEN_FRAME_COUNT, num_frames);
}
void optc1_setup_manual_trigger(struct timing_generator *optc)
@@ -824,6 +835,9 @@ void optc1_program_manual_trigger(struct timing_generator *optc)
REG_SET(OTG_MANUAL_FLOW_CONTROL, 0,
MANUAL_FLOW_CONTROL, 1);
+
+ REG_SET(OTG_MANUAL_FLOW_CONTROL, 0,
+ MANUAL_FLOW_CONTROL, 0);
}
@@ -846,6 +860,18 @@ void optc1_set_drr(
params->vertical_total_max > 0 &&
params->vertical_total_min > 0) {
+ if (params->vertical_total_mid != 0) {
+
+ REG_SET(OTG_V_TOTAL_MID, 0,
+ OTG_V_TOTAL_MID, params->vertical_total_mid - 1);
+
+ REG_UPDATE_2(OTG_V_TOTAL_CONTROL,
+ OTG_VTOTAL_MID_REPLACING_MAX_EN, 1,
+ OTG_VTOTAL_MID_FRAME_NUM,
+ (uint8_t)params->vertical_total_mid_frame_num);
+
+ }
+
REG_SET(OTG_V_TOTAL_MAX, 0,
OTG_V_TOTAL_MAX, params->vertical_total_max - 1);
@@ -1214,59 +1240,25 @@ bool optc1_is_stereo_left_eye(struct timing_generator *optc)
return ret;
}
-bool optc1_is_matching_timing(struct timing_generator *tg,
- const struct dc_crtc_timing *otg_timing)
+bool optc1_get_hw_timing(struct timing_generator *tg,
+ struct dc_crtc_timing *hw_crtc_timing)
{
- struct dc_crtc_timing hw_crtc_timing = {0};
struct dcn_otg_state s = {0};
- if (tg == NULL || otg_timing == NULL)
+ if (tg == NULL || hw_crtc_timing == NULL)
return false;
optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
- hw_crtc_timing.h_total = s.h_total + 1;
- hw_crtc_timing.h_addressable = s.h_total - ((s.h_total - s.h_blank_start) + s.h_blank_end);
- hw_crtc_timing.h_front_porch = s.h_total + 1 - s.h_blank_start;
- hw_crtc_timing.h_sync_width = s.h_sync_a_end - s.h_sync_a_start;
-
- hw_crtc_timing.v_total = s.v_total + 1;
- hw_crtc_timing.v_addressable = s.v_total - ((s.v_total - s.v_blank_start) + s.v_blank_end);
- hw_crtc_timing.v_front_porch = s.v_total + 1 - s.v_blank_start;
- hw_crtc_timing.v_sync_width = s.v_sync_a_end - s.v_sync_a_start;
-
- if (otg_timing->h_total != hw_crtc_timing.h_total)
- return false;
-
- if (otg_timing->h_border_left != hw_crtc_timing.h_border_left)
- return false;
-
- if (otg_timing->h_addressable != hw_crtc_timing.h_addressable)
- return false;
+ hw_crtc_timing->h_total = s.h_total + 1;
+ hw_crtc_timing->h_addressable = s.h_total - ((s.h_total - s.h_blank_start) + s.h_blank_end);
+ hw_crtc_timing->h_front_porch = s.h_total + 1 - s.h_blank_start;
+ hw_crtc_timing->h_sync_width = s.h_sync_a_end - s.h_sync_a_start;
- if (otg_timing->h_border_right != hw_crtc_timing.h_border_right)
- return false;
-
- if (otg_timing->h_front_porch != hw_crtc_timing.h_front_porch)
- return false;
-
- if (otg_timing->h_sync_width != hw_crtc_timing.h_sync_width)
- return false;
-
- if (otg_timing->v_total != hw_crtc_timing.v_total)
- return false;
-
- if (otg_timing->v_border_top != hw_crtc_timing.v_border_top)
- return false;
-
- if (otg_timing->v_addressable != hw_crtc_timing.v_addressable)
- return false;
-
- if (otg_timing->v_border_bottom != hw_crtc_timing.v_border_bottom)
- return false;
-
- if (otg_timing->v_sync_width != hw_crtc_timing.v_sync_width)
- return false;
+ hw_crtc_timing->v_total = s.v_total + 1;
+ hw_crtc_timing->v_addressable = s.v_total - ((s.v_total - s.v_blank_start) + s.v_blank_end);
+ hw_crtc_timing->v_front_porch = s.v_total + 1 - s.v_blank_start;
+ hw_crtc_timing->v_sync_width = s.v_sync_a_end - s.v_sync_a_start;
return true;
}
@@ -1470,7 +1462,6 @@ static const struct timing_generator_funcs dcn10_tg_funcs = {
.get_frame_count = optc1_get_vblank_counter,
.get_scanoutpos = optc1_get_crtc_scanoutpos,
.get_otg_active_size = optc1_get_otg_active_size,
- .is_matching_timing = optc1_is_matching_timing,
.set_early_control = optc1_set_early_control,
/* used by enable_timing_synchronization. Not need for FPGA */
.wait_for_state = optc1_wait_for_state,
@@ -1498,7 +1489,8 @@ static const struct timing_generator_funcs dcn10_tg_funcs = {
.configure_crc = optc1_configure_crc,
.set_vtg_params = optc1_set_vtg_params,
.program_manual_trigger = optc1_program_manual_trigger,
- .setup_manual_trigger = optc1_setup_manual_trigger
+ .setup_manual_trigger = optc1_setup_manual_trigger,
+ .get_hw_timing = optc1_get_hw_timing,
};
void dcn10_timing_generator_init(struct optc *optc1)
@@ -1513,10 +1505,8 @@ void dcn10_timing_generator_init(struct optc *optc1)
optc1->min_v_blank_interlace = 5;
optc1->min_h_sync_width = 8;
optc1->min_v_sync_width = 1;
- optc1->comb_opp_id = 0xf;
}
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
/* "Containter" vs. "pixel" is a concept within HW blocks, mostly those closer to the back-end. It works like this:
*
* - In most of the formats (RGB or YCbCr 4:4:4, 4:2:2 uncompressed and DSC 4:2:2 Simple) pixel rate is the same as
@@ -1529,15 +1519,12 @@ void dcn10_timing_generator_init(struct optc *optc1)
* to it) and has to be treated the same as 4:2:0, i.e. target containter rate has to be halved in this case as well.
*
*/
-#endif
bool optc1_is_two_pixels_per_containter(const struct dc_crtc_timing *timing)
{
bool two_pix = timing->pixel_encoding == PIXEL_ENCODING_YCBCR420;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
two_pix = two_pix || (timing->flags.DSC && timing->pixel_encoding == PIXEL_ENCODING_YCBCR422
&& !timing->dsc_cfg.ycbcr422_simple);
-#endif
return two_pix;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
index 02599eb92ca6..f277656d5464 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
@@ -54,6 +54,7 @@
SRI(OTG_3D_STRUCTURE_CONTROL, OTG, inst),\
SRI(OTG_STEREO_STATUS, OTG, inst),\
SRI(OTG_V_TOTAL_MAX, OTG, inst),\
+ SRI(OTG_V_TOTAL_MID, OTG, inst),\
SRI(OTG_V_TOTAL_MIN, OTG, inst),\
SRI(OTG_V_TOTAL_CONTROL, OTG, inst),\
SRI(OTG_TRIGA_CNTL, OTG, inst),\
@@ -125,6 +126,7 @@ struct dcn_optc_registers {
uint32_t OTG_3D_STRUCTURE_CONTROL;
uint32_t OTG_STEREO_STATUS;
uint32_t OTG_V_TOTAL_MAX;
+ uint32_t OTG_V_TOTAL_MID;
uint32_t OTG_V_TOTAL_MIN;
uint32_t OTG_V_TOTAL_CONTROL;
uint32_t OTG_TRIGA_CNTL;
@@ -163,13 +165,11 @@ struct dcn_optc_registers {
uint32_t OTG_CRC0_WINDOWB_X_CONTROL;
uint32_t OTG_CRC0_WINDOWB_Y_CONTROL;
uint32_t GSL_SOURCE_SELECT;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
uint32_t DWB_SOURCE_SELECT;
uint32_t OTG_DSC_START_POSITION;
uint32_t OPTC_DATA_FORMAT_CONTROL;
uint32_t OPTC_BYTES_PER_PIXEL;
uint32_t OPTC_WIDTH_CONTROL;
-#endif
};
#define TG_COMMON_MASK_SH_LIST_DCN(mask_sh)\
@@ -214,12 +214,15 @@ struct dcn_optc_registers {
SF(OTG0_OTG_3D_STRUCTURE_CONTROL, OTG_3D_STRUCTURE_V_UPDATE_MODE, mask_sh),\
SF(OTG0_OTG_3D_STRUCTURE_CONTROL, OTG_3D_STRUCTURE_STEREO_SEL_OVR, mask_sh),\
SF(OTG0_OTG_V_TOTAL_MAX, OTG_V_TOTAL_MAX, mask_sh),\
+ SF(OTG0_OTG_V_TOTAL_MID, OTG_V_TOTAL_MID, mask_sh),\
SF(OTG0_OTG_V_TOTAL_MIN, OTG_V_TOTAL_MIN, mask_sh),\
SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_V_TOTAL_MIN_SEL, mask_sh),\
SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_V_TOTAL_MAX_SEL, mask_sh),\
SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_FORCE_LOCK_ON_EVENT, mask_sh),\
SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_SET_V_TOTAL_MIN_MASK_EN, mask_sh),\
SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_SET_V_TOTAL_MIN_MASK, mask_sh),\
+ SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_VTOTAL_MID_REPLACING_MAX_EN, mask_sh),\
+ SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_VTOTAL_MID_FRAME_NUM, mask_sh),\
SF(OTG0_OTG_FORCE_COUNT_NOW_CNTL, OTG_FORCE_COUNT_NOW_CLEAR, mask_sh),\
SF(OTG0_OTG_FORCE_COUNT_NOW_CNTL, OTG_FORCE_COUNT_NOW_MODE, mask_sh),\
SF(OTG0_OTG_FORCE_COUNT_NOW_CNTL, OTG_FORCE_COUNT_NOW_OCCURRED, mask_sh),\
@@ -348,9 +351,12 @@ struct dcn_optc_registers {
type OTG_3D_STRUCTURE_V_UPDATE_MODE;\
type OTG_3D_STRUCTURE_STEREO_SEL_OVR;\
type OTG_V_TOTAL_MAX;\
+ type OTG_V_TOTAL_MID;\
type OTG_V_TOTAL_MIN;\
type OTG_V_TOTAL_MIN_SEL;\
type OTG_V_TOTAL_MAX_SEL;\
+ type OTG_VTOTAL_MID_REPLACING_MAX_EN;\
+ type OTG_VTOTAL_MID_FRAME_NUM;\
type OTG_FORCE_LOCK_ON_EVENT;\
type OTG_SET_V_TOTAL_MIN_MASK_EN;\
type OTG_SET_V_TOTAL_MIN_MASK;\
@@ -448,7 +454,6 @@ struct dcn_optc_registers {
type MANUAL_FLOW_CONTROL;\
type MANUAL_FLOW_CONTROL_SEL;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
#define TG_REG_FIELD_LIST(type) \
TG_REG_FIELD_LIST_DCN1_0(type)\
@@ -471,12 +476,6 @@ struct dcn_optc_registers {
type OPTC_DWB0_SOURCE_SELECT;\
type OPTC_DWB1_SOURCE_SELECT;
-#else
-
-#define TG_REG_FIELD_LIST(type) \
- TG_REG_FIELD_LIST_DCN1_0(type)
-
-#endif
struct dcn_optc_shift {
@@ -494,7 +493,7 @@ struct optc {
const struct dcn_optc_shift *tg_shift;
const struct dcn_optc_mask *tg_mask;
- int comb_opp_id;
+ int opp_count;
uint32_t max_h_total;
uint32_t max_v_total;
@@ -534,11 +533,15 @@ struct dcn_otg_state {
uint32_t h_total;
uint32_t underflow_occurred_status;
uint32_t otg_enabled;
+ uint32_t blank_enabled;
};
void optc1_read_otg_state(struct optc *optc1,
struct dcn_otg_state *s);
+bool optc1_get_hw_timing(struct timing_generator *tg,
+ struct dc_crtc_timing *hw_crtc_timing);
+
bool optc1_validate_timing(
struct timing_generator *optc,
const struct dc_crtc_timing *timing);
@@ -622,7 +625,8 @@ void optc1_set_drr(
void optc1_set_static_screen_control(
struct timing_generator *optc,
- uint32_t value);
+ uint32_t event_triggers,
+ uint32_t num_frames);
void optc1_program_stereo(struct timing_generator *optc,
const struct dc_crtc_timing *timing, struct crtc_stereo_flags *flags);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index a12530a3ab9c..3b71898e859e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -28,6 +28,8 @@
#include "dm_services.h"
#include "dc.h"
+#include "dcn10_init.h"
+
#include "resource.h"
#include "include/irq_service_interface.h"
#include "dcn10_resource.h"
@@ -270,7 +272,7 @@ static const struct dce_audio_shift audio_shift = {
DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT)
};
-static const struct dce_aduio_mask audio_mask = {
+static const struct dce_audio_mask audio_mask = {
DCE120_AUD_COMMON_MASK_SH_LIST(_MASK)
};
@@ -319,6 +321,14 @@ static const struct dcn10_link_enc_mask le_mask = {
LINK_ENCODER_MASK_SH_LIST_DCN10(_MASK)
};
+static const struct dce110_aux_registers_shift aux_shift = {
+ DCN10_AUX_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce110_aux_registers_mask aux_mask = {
+ DCN10_AUX_MASK_SH_LIST(_MASK)
+};
+
#define ipp_regs(id)\
[id] = {\
IPP_REG_LIST_DCN10(id),\
@@ -471,6 +481,28 @@ static const struct dcn_hubbub_mask hubbub_mask = {
HUBBUB_MASK_SH_LIST_DCN10(_MASK)
};
+static int map_transmitter_id_to_phy_instance(
+ enum transmitter transmitter)
+{
+ switch (transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ return 0;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ return 1;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ return 2;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ return 3;
+ break;
+ default:
+ ASSERT(0);
+ return 0;
+ }
+}
+
#define clk_src_regs(index, pllid)\
[index] = {\
CS_COMMON_REG_LIST_DCN1_0(index, pllid),\
@@ -642,7 +674,10 @@ struct dce_aux *dcn10_aux_engine_create(
dce110_aux_engine_construct(aux_engine, ctx, inst,
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
- &aux_engine_regs[inst]);
+ &aux_engine_regs[inst],
+ &aux_mask,
+ &aux_shift,
+ ctx->dc->caps.extended_aux_timeout_support);
return &aux_engine->base;
}
@@ -751,14 +786,18 @@ struct link_encoder *dcn10_link_encoder_create(
{
struct dcn10_link_encoder *enc10 =
kzalloc(sizeof(struct dcn10_link_encoder), GFP_KERNEL);
+ int link_regs_id;
if (!enc10)
return NULL;
+ link_regs_id =
+ map_transmitter_id_to_phy_instance(enc_init_data->transmitter);
+
dcn10_link_encoder_construct(enc10,
enc_init_data,
&link_enc_feature,
- &link_enc_regs[enc_init_data->transmitter],
+ &link_enc_regs[link_regs_id],
&link_enc_aux_regs[enc_init_data->channel - 1],
&link_enc_hpd_regs[enc_init_data->hpd_source],
&le_shift,
@@ -786,6 +825,7 @@ struct clock_source *dcn10_clock_source_create(
return &clk_src->base;
}
+ kfree(clk_src);
BREAK_TO_DEBUGGER();
return NULL;
}
@@ -881,7 +921,7 @@ static struct pp_smu_funcs *dcn10_pp_smu_create(struct dc_context *ctx)
return pp_smu;
}
-static void destruct(struct dcn10_resource_pool *pool)
+static void dcn10_resource_destruct(struct dcn10_resource_pool *pool)
{
unsigned int i;
@@ -1128,7 +1168,7 @@ static void dcn10_destroy_resource_pool(struct resource_pool **pool)
{
struct dcn10_resource_pool *dcn10_pool = TO_DCN10_RES_POOL(*pool);
- destruct(dcn10_pool);
+ dcn10_resource_destruct(dcn10_pool);
kfree(dcn10_pool);
*pool = NULL;
}
@@ -1267,7 +1307,7 @@ static uint32_t read_pipe_fuses(struct dc_context *ctx)
return value;
}
-static bool construct(
+static bool dcn10_resource_construct(
uint8_t num_virtual_links,
struct dc *dc,
struct dcn10_resource_pool *pool)
@@ -1307,6 +1347,8 @@ static bool construct(
dc->caps.max_slave_planes = 1;
dc->caps.is_apu = true;
dc->caps.post_blend_color_processing = false;
+ dc->caps.extended_aux_timeout_support = false;
+
/* Raven DP PHY HBR2 eye diagram pattern is not stable. Use TP4 */
dc->caps.force_dp_tps4_for_cp2520 = true;
@@ -1416,6 +1458,14 @@ static bool construct(
pool->base.pp_smu = dcn10_pp_smu_create(ctx);
+ /*
+ * Right now SMU/PPLIB and DAL all have the AZ D3 force PME notification *
+ * implemented. So AZ D3 should work.For issue 197007. *
+ */
+ if (pool->base.pp_smu != NULL
+ && pool->base.pp_smu->rv_funcs.set_pme_wa_enable != NULL)
+ dc->debug.az_endpoint_mute_only = false;
+
if (!dc->debug.disable_pplib_clock_request)
dcn_bw_update_from_pplib(dc);
dcn_bw_sync_calcs_and_dml(dc);
@@ -1544,7 +1594,7 @@ static bool construct(
fail:
- destruct(pool);
+ dcn10_resource_destruct(pool);
return false;
}
@@ -1559,9 +1609,10 @@ struct resource_pool *dcn10_create_resource_pool(
if (!pool)
return NULL;
- if (construct(init_data->num_virtual_links, dc, pool))
+ if (dcn10_resource_construct(init_data->num_virtual_links, dc, pool))
return &pool->base;
+ kfree(pool);
BREAK_TO_DEBUGGER();
return NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
index b9ffbf6b58ff..376c4264d295 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
@@ -247,6 +247,7 @@ void enc1_stream_encoder_dp_set_stream_attribute(
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing,
enum dc_color_space output_color_space,
+ bool use_vsc_sdp_for_colorimetry,
uint32_t enable_sdp_splitting)
{
uint32_t h_active_start;
@@ -312,10 +313,7 @@ void enc1_stream_encoder_dp_set_stream_attribute(
* Pixel Encoding/Colorimetry Format and that a Sink device shall ignore MISC1, bit 7,
* and MISC0, bits 7:1 (MISC1, bit 7, and MISC0, bits 7:1, become "don't care").
*/
- if ((hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) ||
- (output_color_space == COLOR_SPACE_2020_YCBCR) ||
- (output_color_space == COLOR_SPACE_2020_RGB_FULLRANGE) ||
- (output_color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE))
+ if (use_vsc_sdp_for_colorimetry)
misc1 = misc1 | 0x40;
else
misc1 = misc1 & ~0x40;
@@ -512,11 +510,12 @@ void enc1_stream_encoder_hdmi_set_stream_attribute(
enc1_stream_encoder_set_stream_attribute_helper(enc1, crtc_timing);
/* setup HDMI engine */
- REG_UPDATE_5(HDMI_CONTROL,
+ REG_UPDATE_6(HDMI_CONTROL,
HDMI_PACKET_GEN_VERSION, 1,
HDMI_KEEPOUT_MODE, 1,
HDMI_DEEP_COLOR_ENABLE, 0,
HDMI_DATA_SCRAMBLE_EN, 0,
+ HDMI_NO_EXTRA_NULL_PACKET_FILLED, 1,
HDMI_CLOCK_CHANNEL_RATE, 0);
@@ -1003,6 +1002,19 @@ void enc1_stream_encoder_set_avmute(
REG_UPDATE(HDMI_GC, HDMI_GC_AVMUTE, value);
}
+void enc1_reset_hdmi_stream_attribute(
+ struct stream_encoder *enc)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+ REG_UPDATE_5(HDMI_CONTROL,
+ HDMI_PACKET_GEN_VERSION, 1,
+ HDMI_KEEPOUT_MODE, 1,
+ HDMI_DEEP_COLOR_ENABLE, 0,
+ HDMI_DATA_SCRAMBLE_EN, 0,
+ HDMI_CLOCK_CHANNEL_RATE, 0);
+}
+
#define DP_SEC_AUD_N__DP_SEC_AUD_N__DEFAULT 0x8000
#define DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE__AUTO_CALC 1
@@ -1196,13 +1208,13 @@ static union audio_cea_channels speakers_to_channels(
void get_audio_clock_info(
enum dc_color_depth color_depth,
- uint32_t crtc_pixel_clock_in_khz,
- uint32_t actual_pixel_clock_in_khz,
+ uint32_t crtc_pixel_clock_100Hz,
+ uint32_t actual_pixel_clock_100Hz,
struct audio_clock_info *audio_clock_info)
{
const struct audio_clock_info *clock_info;
uint32_t index;
- uint32_t crtc_pixel_clock_in_10khz = crtc_pixel_clock_in_khz / 10;
+ uint32_t crtc_pixel_clock_in_10khz = crtc_pixel_clock_100Hz / 100;
uint32_t audio_array_size;
switch (color_depth) {
@@ -1239,16 +1251,16 @@ void get_audio_clock_info(
}
/* not found */
- if (actual_pixel_clock_in_khz == 0)
- actual_pixel_clock_in_khz = crtc_pixel_clock_in_khz;
+ if (actual_pixel_clock_100Hz == 0)
+ actual_pixel_clock_100Hz = crtc_pixel_clock_100Hz;
/* See HDMI spec the table entry under
* pixel clock of "Other". */
audio_clock_info->pixel_clock_in_10khz =
- actual_pixel_clock_in_khz / 10;
- audio_clock_info->cts_32khz = actual_pixel_clock_in_khz;
- audio_clock_info->cts_44khz = actual_pixel_clock_in_khz;
- audio_clock_info->cts_48khz = actual_pixel_clock_in_khz;
+ actual_pixel_clock_100Hz / 100;
+ audio_clock_info->cts_32khz = actual_pixel_clock_100Hz / 10;
+ audio_clock_info->cts_44khz = actual_pixel_clock_100Hz / 10;
+ audio_clock_info->cts_48khz = actual_pixel_clock_100Hz / 10;
audio_clock_info->n_32khz = 4096;
audio_clock_info->n_44khz = 6272;
@@ -1308,14 +1320,14 @@ static void enc1_se_setup_hdmi_audio(
/* Program audio clock sample/regeneration parameters */
get_audio_clock_info(crtc_info->color_depth,
- crtc_info->requested_pixel_clock,
- crtc_info->calculated_pixel_clock,
+ crtc_info->requested_pixel_clock_100Hz,
+ crtc_info->calculated_pixel_clock_100Hz,
&audio_clock_info);
DC_LOG_HW_AUDIO(
- "\n%s:Input::requested_pixel_clock = %d" \
- "calculated_pixel_clock = %d \n", __func__, \
- crtc_info->requested_pixel_clock, \
- crtc_info->calculated_pixel_clock);
+ "\n%s:Input::requested_pixel_clock_100Hz = %d" \
+ "calculated_pixel_clock_100Hz = %d \n", __func__, \
+ crtc_info->requested_pixel_clock_100Hz, \
+ crtc_info->calculated_pixel_clock_100Hz);
/* HDMI_ACR_32_0__HDMI_ACR_CTS_32_MASK */
REG_UPDATE(HDMI_ACR_32_0, HDMI_ACR_CTS_32, audio_clock_info.cts_32khz);
@@ -1528,6 +1540,77 @@ void enc1_dig_connect_to_otg(
REG_UPDATE(DIG_FE_CNTL, DIG_SOURCE_SELECT, tg_inst);
}
+unsigned int enc1_dig_source_otg(
+ struct stream_encoder *enc)
+{
+ uint32_t tg_inst = 0;
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+ REG_GET(DIG_FE_CNTL, DIG_SOURCE_SELECT, &tg_inst);
+
+ return tg_inst;
+}
+
+bool enc1_stream_encoder_dp_get_pixel_format(
+ struct stream_encoder *enc,
+ enum dc_pixel_encoding *encoding,
+ enum dc_color_depth *depth)
+{
+ uint32_t hw_encoding = 0;
+ uint32_t hw_depth = 0;
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+ if (enc == NULL ||
+ encoding == NULL ||
+ depth == NULL)
+ return false;
+
+ REG_GET_2(DP_PIXEL_FORMAT,
+ DP_PIXEL_ENCODING, &hw_encoding,
+ DP_COMPONENT_DEPTH, &hw_depth);
+
+ switch (hw_depth) {
+ case DP_COMPONENT_PIXEL_DEPTH_6BPC:
+ *depth = COLOR_DEPTH_666;
+ break;
+ case DP_COMPONENT_PIXEL_DEPTH_8BPC:
+ *depth = COLOR_DEPTH_888;
+ break;
+ case DP_COMPONENT_PIXEL_DEPTH_10BPC:
+ *depth = COLOR_DEPTH_101010;
+ break;
+ case DP_COMPONENT_PIXEL_DEPTH_12BPC:
+ *depth = COLOR_DEPTH_121212;
+ break;
+ case DP_COMPONENT_PIXEL_DEPTH_16BPC:
+ *depth = COLOR_DEPTH_161616;
+ break;
+ default:
+ *depth = COLOR_DEPTH_UNDEFINED;
+ break;
+ }
+
+ switch (hw_encoding) {
+ case DP_PIXEL_ENCODING_TYPE_RGB444:
+ *encoding = PIXEL_ENCODING_RGB;
+ break;
+ case DP_PIXEL_ENCODING_TYPE_YCBCR422:
+ *encoding = PIXEL_ENCODING_YCBCR422;
+ break;
+ case DP_PIXEL_ENCODING_TYPE_YCBCR444:
+ case DP_PIXEL_ENCODING_TYPE_Y_ONLY:
+ *encoding = PIXEL_ENCODING_YCBCR444;
+ break;
+ case DP_PIXEL_ENCODING_TYPE_YCBCR420:
+ *encoding = PIXEL_ENCODING_YCBCR420;
+ break;
+ default:
+ *encoding = PIXEL_ENCODING_UNDEFINED;
+ break;
+ }
+ return true;
+}
+
static const struct stream_encoder_funcs dcn10_str_enc_funcs = {
.dp_set_stream_attribute =
enc1_stream_encoder_dp_set_stream_attribute,
@@ -1562,6 +1645,10 @@ static const struct stream_encoder_funcs dcn10_str_enc_funcs = {
.setup_stereo_sync = enc1_setup_stereo_sync,
.set_avmute = enc1_stream_encoder_set_avmute,
.dig_connect_to_otg = enc1_dig_connect_to_otg,
+ .hdmi_reset_stream_attribute = enc1_reset_hdmi_stream_attribute,
+ .dig_source_otg = enc1_dig_source_otg,
+
+ .dp_get_pixel_format = enc1_stream_encoder_dp_get_pixel_format,
};
void dcn10_stream_encoder_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
index bc2b4af9543b..f9b9e221c698 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
@@ -89,7 +89,8 @@
SRI(DP_VID_STREAM_CNTL, DP, id), \
SRI(DP_VID_TIMING, DP, id), \
SRI(DP_SEC_AUD_N, DP, id), \
- SRI(DP_SEC_TIMESTAMP, DP, id)
+ SRI(DP_SEC_TIMESTAMP, DP, id), \
+ SRI(DIG_CLOCK_PATTERN, DIG, id)
#define SE_DCN_REG_LIST(id)\
SE_COMMON_DCN_REG_LIST(id)
@@ -162,14 +163,13 @@ struct dcn10_stream_enc_registers {
uint32_t DP_MSA_TIMING_PARAM3;
uint32_t DP_MSA_TIMING_PARAM4;
uint32_t HDMI_DB_CONTROL;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
uint32_t DP_DSC_CNTL;
uint32_t DP_DSC_BYTES_PER_PIXEL;
uint32_t DME_CONTROL;
uint32_t DP_SEC_METADATA_TRANSMISSION;
uint32_t HDMI_METADATA_PACKET_CONTROL;
uint32_t DP_SEC_FRAMING4;
-#endif
+ uint32_t DIG_CLOCK_PATTERN;
};
@@ -189,6 +189,7 @@ struct dcn10_stream_enc_registers {
SE_SF(DIG0_HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, mask_sh),\
SE_SF(DIG0_HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, mask_sh),\
SE_SF(DIG0_HDMI_CONTROL, HDMI_DATA_SCRAMBLE_EN, mask_sh),\
+ SE_SF(DIG0_HDMI_CONTROL, HDMI_NO_EXTRA_NULL_PACKET_FILLED, mask_sh),\
SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, mask_sh),\
SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, mask_sh),\
SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, mask_sh),\
@@ -297,7 +298,8 @@ struct dcn10_stream_enc_registers {
SE_SF(DP0_DP_MSA_TIMING_PARAM4, DP_MSA_VHEIGHT, mask_sh),\
SE_SF(DIG0_HDMI_DB_CONTROL, HDMI_DB_DISABLE, mask_sh),\
SE_SF(DP0_DP_VID_TIMING, DP_VID_N_MUL, mask_sh),\
- SE_SF(DIG0_DIG_FE_CNTL, DIG_SOURCE_SELECT, mask_sh)
+ SE_SF(DIG0_DIG_FE_CNTL, DIG_SOURCE_SELECT, mask_sh),\
+ SE_SF(DIG0_DIG_CLOCK_PATTERN, DIG_CLOCK_PATTERN, mask_sh)
#define SE_COMMON_MASK_SH_LIST_SOC(mask_sh)\
SE_COMMON_MASK_SH_LIST_SOC_BASE(mask_sh)
@@ -374,6 +376,7 @@ struct dcn10_stream_enc_registers {
type HDMI_GC_SEND;\
type HDMI_NULL_SEND;\
type HDMI_DATA_SCRAMBLE_EN;\
+ type HDMI_NO_EXTRA_NULL_PACKET_FILLED;\
type HDMI_AUDIO_INFO_SEND;\
type AFMT_AUDIO_INFO_UPDATE;\
type HDMI_AUDIO_INFO_LINE;\
@@ -458,9 +461,9 @@ struct dcn10_stream_enc_registers {
type HDMI_DB_DISABLE;\
type DP_VID_N_MUL;\
type DP_VID_M_DOUBLE_VALUE_EN;\
- type DIG_SOURCE_SELECT
+ type DIG_SOURCE_SELECT;\
+ type DIG_CLOCK_PATTERN
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#define SE_REG_FIELD_LIST_DCN2_0(type) \
type DP_DSC_MODE;\
type DP_DSC_SLICE_WIDTH;\
@@ -479,20 +482,15 @@ struct dcn10_stream_enc_registers {
type DOLBY_VISION_EN;\
type DP_PIXEL_COMBINE;\
type DP_SST_SDP_SPLITTING
-#endif
struct dcn10_stream_encoder_shift {
SE_REG_FIELD_LIST_DCN1_0(uint8_t);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
SE_REG_FIELD_LIST_DCN2_0(uint8_t);
-#endif
};
struct dcn10_stream_encoder_mask {
SE_REG_FIELD_LIST_DCN1_0(uint32_t);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
SE_REG_FIELD_LIST_DCN2_0(uint32_t);
-#endif
};
struct dcn10_stream_encoder {
@@ -520,6 +518,7 @@ void enc1_stream_encoder_dp_set_stream_attribute(
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing,
enum dc_color_space output_color_space,
+ bool use_vsc_sdp_for_colorimetry,
uint32_t enable_sdp_splitting);
void enc1_stream_encoder_hdmi_set_stream_attribute(
@@ -592,6 +591,9 @@ void enc1_dig_connect_to_otg(
struct stream_encoder *enc,
int tg_inst);
+unsigned int enc1_dig_source_otg(
+ struct stream_encoder *enc);
+
void enc1_stream_encoder_set_stream_attribute_helper(
struct dcn10_stream_encoder *enc1,
struct dc_crtc_timing *crtc_timing);
@@ -605,8 +607,16 @@ void enc1_se_enable_dp_audio(
void get_audio_clock_info(
enum dc_color_depth color_depth,
- uint32_t crtc_pixel_clock_in_khz,
- uint32_t actual_pixel_clock_in_khz,
+ uint32_t crtc_pixel_clock_100Hz,
+ uint32_t actual_pixel_clock_100Hz,
struct audio_clock_info *audio_clock_info);
+void enc1_reset_hdmi_stream_attribute(
+ struct stream_encoder *enc);
+
+bool enc1_stream_encoder_dp_get_pixel_format(
+ struct stream_encoder *enc,
+ enum dc_pixel_encoding *encoding,
+ enum dc_color_depth *depth);
+
#endif /* __DC_STREAM_ENCODER_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
index e9721a906592..5fcaf78334ff 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
@@ -1,22 +1,38 @@
+# SPDX-License-Identifier: MIT
#
# Makefile for DCN.
-DCN20 = dcn20_resource.o dcn20_hwseq.o dcn20_dpp.o dcn20_dpp_cm.o dcn20_hubp.o \
+DCN20 = dcn20_resource.o dcn20_init.o dcn20_hwseq.o dcn20_dpp.o dcn20_dpp_cm.o dcn20_hubp.o \
dcn20_mpc.o dcn20_opp.o dcn20_hubbub.o dcn20_optc.o dcn20_mmhubbub.o \
dcn20_stream_encoder.o dcn20_link_encoder.o dcn20_dccg.o \
dcn20_vmid.o dcn20_dwb.o dcn20_dwb_scl.o
-ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
DCN20 += dcn20_dsc.o
+
+ifdef CONFIG_X86
+CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mhard-float -msse
+endif
+
+ifdef CONFIG_PPC64
+CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mhard-float -maltivec
endif
-ifneq ($(call cc-option, -mpreferred-stack-boundary=4),)
- cc_stack_align := -mpreferred-stack-boundary=4
-else ifneq ($(call cc-option, -mstack-alignment=16),)
- cc_stack_align := -mstack-alignment=16
+ifdef CONFIG_CC_IS_GCC
+ifeq ($(call cc-ifversion, -lt, 0701, y), y)
+IS_OLD_GCC = 1
+endif
endif
-CFLAGS_dcn20_resource.o := -mhard-float -msse $(cc_stack_align)
+ifdef CONFIG_X86
+ifdef IS_OLD_GCC
+# Stack alignment mismatch, proceed with caution.
+# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
+# (8B stack alignment).
+CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o += -mpreferred-stack-boundary=4
+else
+CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o += -msse2
+endif
+endif
AMD_DAL_DCN20 = $(addprefix $(AMDDALPATH)/dc/dcn20/,$(DCN20))
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
index 31aa6ee5cd5b..50bffbfdd394 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
@@ -50,20 +50,20 @@ void dccg2_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk)
if (dccg->ref_dppclk && req_dppclk) {
int ref_dppclk = dccg->ref_dppclk;
+ int modulo, phase;
- ASSERT(req_dppclk <= ref_dppclk);
- /* need to clamp to 8 bits */
- if (ref_dppclk > 0xff) {
- int divider = (ref_dppclk + 0xfe) / 0xff;
+ // phase / modulo = dpp pipe clk / dpp global clk
+ modulo = 0xff; // use FF at the end
+ phase = ((modulo * req_dppclk) + ref_dppclk - 1) / ref_dppclk;
- ref_dppclk /= divider;
- req_dppclk = (req_dppclk + divider - 1) / divider;
- if (req_dppclk > ref_dppclk)
- req_dppclk = ref_dppclk;
+ if (phase > 0xff) {
+ ASSERT(false);
+ phase = 0xff;
}
+
REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
- DPPCLK0_DTO_PHASE, req_dppclk,
- DPPCLK0_DTO_MODULO, ref_dppclk);
+ DPPCLK0_DTO_PHASE, phase,
+ DPPCLK0_DTO_MODULO, modulo);
REG_UPDATE(DPPCLK_DTO_CTRL,
DPPCLK_DTO_ENABLE[dpp_inst], 1);
} else {
@@ -96,32 +96,6 @@ void dccg2_get_dccg_ref_freq(struct dccg *dccg,
void dccg2_init(struct dccg *dccg)
{
- struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
-
- // Fallthrough intentional to program all available dpp_dto's
- switch (dccg_dcn->base.ctx->dc->res_pool->pipe_count) {
- case 6:
- REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[5], 1);
- /* Fall through */
- case 5:
- REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[4], 1);
- /* Fall through */
- case 4:
- REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[3], 1);
- /* Fall through */
- case 3:
- REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[2], 1);
- /* Fall through */
- case 2:
- REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[1], 1);
- /* Fall through */
- case 1:
- REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[0], 1);
- break;
- default:
- ASSERT(false);
- break;
- }
}
static const struct dccg_funcs dccg2_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
index 9bc5dd23d297..13e057d7ee93 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
@@ -72,6 +72,21 @@ void dpp20_read_state(struct dpp *dpp_base,
}
}
+void dpp2_power_on_obuf(
+ struct dpp *dpp_base,
+ bool power_on)
+{
+ struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
+
+ REG_UPDATE(CM_MEM_PWR_CTRL, SHARED_MEM_PWR_DIS, power_on == true ? 1:0);
+
+ REG_UPDATE(OBUF_MEM_PWR_CTRL,
+ OBUF_MEM_PWR_FORCE, power_on == true ? 0:1);
+
+ REG_UPDATE(DSCL_MEM_PWR_CTRL,
+ LUT_MEM_PWR_FORCE, power_on == true ? 0:1);
+}
+
void dpp2_dummy_program_input_lut(
struct dpp *dpp_base,
const struct dc_gamma *gamma)
@@ -89,7 +104,7 @@ static void dpp2_cnv_setup (
uint32_t pixel_format = 0;
uint32_t alpha_en = 1;
enum dc_color_space color_space = COLOR_SPACE_SRGB;
- enum dcn10_input_csc_select select = INPUT_CSC_SELECT_BYPASS;
+ enum dcn20_input_csc_select select = DCN2_ICSC_SELECT_BYPASS;
bool force_disable_cursor = false;
struct out_csc_color_matrix tbl_entry;
uint32_t is_2bit = 0;
@@ -130,25 +145,25 @@ static void dpp2_cnv_setup (
force_disable_cursor = false;
pixel_format = 65;
color_space = COLOR_SPACE_YCBCR709;
- select = INPUT_CSC_SELECT_ICSC;
+ select = DCN2_ICSC_SELECT_ICSC_A;
break;
case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
force_disable_cursor = true;
pixel_format = 64;
color_space = COLOR_SPACE_YCBCR709;
- select = INPUT_CSC_SELECT_ICSC;
+ select = DCN2_ICSC_SELECT_ICSC_A;
break;
case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
force_disable_cursor = true;
pixel_format = 67;
color_space = COLOR_SPACE_YCBCR709;
- select = INPUT_CSC_SELECT_ICSC;
+ select = DCN2_ICSC_SELECT_ICSC_A;
break;
case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
force_disable_cursor = true;
pixel_format = 66;
color_space = COLOR_SPACE_YCBCR709;
- select = INPUT_CSC_SELECT_ICSC;
+ select = DCN2_ICSC_SELECT_ICSC_A;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
pixel_format = 22;
@@ -162,7 +177,7 @@ static void dpp2_cnv_setup (
case SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888:
pixel_format = 12;
color_space = COLOR_SPACE_YCBCR709;
- select = INPUT_CSC_SELECT_ICSC;
+ select = DCN2_ICSC_SELECT_ICSC_A;
break;
case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX:
pixel_format = 112;
@@ -173,13 +188,13 @@ static void dpp2_cnv_setup (
case SURFACE_PIXEL_FORMAT_VIDEO_ACrYCb2101010:
pixel_format = 114;
color_space = COLOR_SPACE_YCBCR709;
- select = INPUT_CSC_SELECT_ICSC;
+ select = DCN2_ICSC_SELECT_ICSC_A;
is_2bit = 1;
break;
case SURFACE_PIXEL_FORMAT_VIDEO_CrYCbA1010102:
pixel_format = 115;
color_space = COLOR_SPACE_YCBCR709;
- select = INPUT_CSC_SELECT_ICSC;
+ select = DCN2_ICSC_SELECT_ICSC_A;
is_2bit = 1;
break;
case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT:
@@ -212,13 +227,13 @@ static void dpp2_cnv_setup (
tbl_entry.color_space = input_color_space;
if (color_space >= COLOR_SPACE_YCBCR601)
- select = INPUT_CSC_SELECT_ICSC;
+ select = DCN2_ICSC_SELECT_ICSC_A;
else
- select = INPUT_CSC_SELECT_BYPASS;
+ select = DCN2_ICSC_SELECT_BYPASS;
- dpp1_program_input_csc(dpp_base, color_space, select, &tbl_entry);
+ dpp2_program_input_csc(dpp_base, color_space, select, &tbl_entry);
} else
- dpp1_program_input_csc(dpp_base, color_space, select, NULL);
+ dpp2_program_input_csc(dpp_base, color_space, select, NULL);
if (force_disable_cursor) {
REG_UPDATE(CURSOR_CONTROL,
@@ -227,6 +242,7 @@ static void dpp2_cnv_setup (
CUR0_ENABLE, 0);
}
+ dpp2_power_on_obuf(dpp_base, true);
}
@@ -326,14 +342,18 @@ void dpp2_cnv_set_alpha_keyer(
void dpp2_set_cursor_attributes(
struct dpp *dpp_base,
- enum dc_cursor_color_format color_format)
+ struct dc_cursor_attributes *cursor_attributes)
{
+ enum dc_cursor_color_format color_format = cursor_attributes->color_format;
struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
int cur_rom_en = 0;
if (color_format == CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA ||
- color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA)
- cur_rom_en = 1;
+ color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA) {
+ if (cursor_attributes->attribute_flags.bits.ENABLE_CURSOR_DEGAMMA) {
+ cur_rom_en = 1;
+ }
+ }
REG_UPDATE_3(CURSOR0_CONTROL,
CUR0_MODE, color_format,
@@ -356,13 +376,6 @@ bool dpp2_get_optimal_number_of_taps(
struct scaler_data *scl_data,
const struct scaling_taps *in_taps)
{
- uint32_t pixel_width;
-
- if (scl_data->viewport.width > scl_data->recout.width)
- pixel_width = scl_data->recout.width;
- else
- pixel_width = scl_data->viewport.width;
-
/* Some ASICs does not support FP16 scaling, so we reject modes require this*/
if (scl_data->viewport.width != scl_data->h_active &&
scl_data->viewport.height != scl_data->v_active &&
@@ -444,8 +457,8 @@ static struct dpp_funcs dcn20_dpp_funcs = {
.dpp_read_state = dpp20_read_state,
.dpp_reset = dpp_reset,
.dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale,
- .dpp_get_optimal_number_of_taps = dpp2_get_optimal_number_of_taps,
- .dpp_set_gamut_remap = dpp1_cm_set_gamut_remap,
+ .dpp_get_optimal_number_of_taps = dpp1_get_optimal_number_of_taps,
+ .dpp_set_gamut_remap = dpp2_cm_set_gamut_remap,
.dpp_set_csc_adjustment = NULL,
.dpp_set_csc_default = NULL,
.dpp_program_regamma_pwl = oppn20_dummy_program_regamma_pwl,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h
index 59b67ed57c19..27610251c57f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h
@@ -30,16 +30,20 @@
#define TO_DCN20_DPP(dpp)\
container_of(dpp, struct dcn20_dpp, base)
-#define TF_REG_LIST_DCN20(id) \
- TF_REG_LIST_DCN(id), \
+#define TF_REG_LIST_DCN20_COMMON_UPDATED(id) \
SRI(CM_BLNDGAM_LUT_WRITE_EN_MASK, CM, id), \
+ SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_B, CM, id), \
+ SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_G, CM, id), \
+ SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_R, CM, id), \
+ SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_B, CM, id), \
+ SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_G, CM, id), \
+ SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_R, CM, id)
+
+#define TF_REG_LIST_DCN20_COMMON(id) \
SRI(CM_BLNDGAM_CONTROL, CM, id), \
SRI(CM_BLNDGAM_RAMB_START_CNTL_B, CM, id), \
SRI(CM_BLNDGAM_RAMB_START_CNTL_G, CM, id), \
SRI(CM_BLNDGAM_RAMB_START_CNTL_R, CM, id), \
- SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_B, CM, id), \
- SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_G, CM, id), \
- SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_R, CM, id), \
SRI(CM_BLNDGAM_RAMB_END_CNTL1_B, CM, id), \
SRI(CM_BLNDGAM_RAMB_END_CNTL2_B, CM, id), \
SRI(CM_BLNDGAM_RAMB_END_CNTL1_G, CM, id), \
@@ -66,9 +70,6 @@
SRI(CM_BLNDGAM_RAMA_START_CNTL_B, CM, id), \
SRI(CM_BLNDGAM_RAMA_START_CNTL_G, CM, id), \
SRI(CM_BLNDGAM_RAMA_START_CNTL_R, CM, id), \
- SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_B, CM, id), \
- SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_G, CM, id), \
- SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_R, CM, id), \
SRI(CM_BLNDGAM_RAMA_END_CNTL1_B, CM, id), \
SRI(CM_BLNDGAM_RAMA_END_CNTL2_B, CM, id), \
SRI(CM_BLNDGAM_RAMA_END_CNTL1_G, CM, id), \
@@ -147,7 +148,22 @@
SRI(CM_SHAPER_RAMA_REGION_28_29, CM, id), \
SRI(CM_SHAPER_RAMA_REGION_30_31, CM, id), \
SRI(CM_SHAPER_RAMA_REGION_32_33, CM, id), \
- SRI(CM_SHAPER_LUT_INDEX, CM, id), \
+ SRI(CM_SHAPER_LUT_INDEX, CM, id)
+
+#define TF_REG_LIST_DCN20_COMMON_APPEND(id) \
+ SRI(CM_GAMUT_REMAP_B_C11_C12, CM, id),\
+ SRI(CM_GAMUT_REMAP_B_C13_C14, CM, id),\
+ SRI(CM_GAMUT_REMAP_B_C21_C22, CM, id),\
+ SRI(CM_GAMUT_REMAP_B_C23_C24, CM, id),\
+ SRI(CM_GAMUT_REMAP_B_C31_C32, CM, id),\
+ SRI(CM_GAMUT_REMAP_B_C33_C34, CM, id),\
+ SRI(CM_ICSC_B_C11_C12, CM, id), \
+ SRI(CM_ICSC_B_C33_C34, CM, id)
+
+#define TF_REG_LIST_DCN20(id) \
+ TF_REG_LIST_DCN(id), \
+ TF_REG_LIST_DCN20_COMMON(id), \
+ TF_REG_LIST_DCN20_COMMON_UPDATED(id), \
SRI(CURSOR_CONTROL, CURSOR0_, id), \
SRI(ALPHA_2BIT_LUT, CNVC_CFG, id), \
SRI(FCNV_FP_BIAS_R, CNVC_CFG, id), \
@@ -162,29 +178,45 @@
SRI(COLOR_KEYER_GREEN, CNVC_CFG, id), \
SRI(COLOR_KEYER_BLUE, CNVC_CFG, id), \
SRI(CM_SHAPER_LUT_DATA, CM, id), \
- SRI(CURSOR_CONTROL, CURSOR0_, id)
+ SRI(CURSOR_CONTROL, CURSOR0_, id),\
+ SRI(OBUF_MEM_PWR_CTRL, DSCL, id),\
+ SRI(DSCL_MEM_PWR_CTRL, DSCL, id)
-#define TF_REG_LIST_SH_MASK_DCN20(mask_sh)\
- TF_REG_LIST_SH_MASK_DCN(mask_sh), \
+
+#define TF_REG_LIST_SH_MASK_DCN20_UPDATED(mask_sh)\
+ TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_B, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_G, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_R, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_B, CM_BLNDGAM_RAMB_EXP_REGION_END_B, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_G, CM_BLNDGAM_RAMB_EXP_REGION_END_G, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_R, CM_BLNDGAM_RAMB_EXP_REGION_END_R, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_B, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_G, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_R, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_B, CM_BLNDGAM_RAMA_EXP_REGION_END_B, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_G, CM_BLNDGAM_RAMA_EXP_REGION_END_G, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_R, CM_BLNDGAM_RAMA_EXP_REGION_END_R, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_B, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_G, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_R, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_CONTROL, CM_BLNDGAM_LUT_MODE, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_LUT_WRITE_EN_MASK, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_LUT_WRITE_SEL, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_CONFIG_STATUS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_CONTROL, CM_SHAPER_LUT_MODE, mask_sh)
+
+
+#define TF_REG_LIST_SH_MASK_DCN20_COMMON(mask_sh)\
+ TF_SF(CM0_CM_3DLUT_MODE, CM_3DLUT_MODE, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_B, CM_BLNDGAM_RAMB_EXP_REGION_START_B, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_B, CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_B, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_G, CM_BLNDGAM_RAMB_EXP_REGION_START_G, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_G, CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_G, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_R, CM_BLNDGAM_RAMB_EXP_REGION_START_R, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_R, CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_R, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_B, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_G, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_R, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_B, CM_BLNDGAM_RAMB_EXP_REGION_END_B, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_B, CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_B, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_B, CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_B, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_G, CM_BLNDGAM_RAMB_EXP_REGION_END_G, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_G, CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_G, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_G, CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_G, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_R, CM_BLNDGAM_RAMB_EXP_REGION_END_R, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_R, CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_R, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_R, CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_R, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_0_1, CM_BLNDGAM_RAMB_EXP_REGION0_LUT_OFFSET, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_0_1, CM_BLNDGAM_RAMB_EXP_REGION0_NUM_SEGMENTS, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_0_1, CM_BLNDGAM_RAMB_EXP_REGION1_LUT_OFFSET, mask_sh), \
@@ -259,18 +291,9 @@
TF_SF(CM0_CM_BLNDGAM_RAMA_START_CNTL_G, CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_G, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMA_START_CNTL_R, CM_BLNDGAM_RAMA_EXP_REGION_START_R, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMA_START_CNTL_R, CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_R, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_B, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_G, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_R, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_B, CM_BLNDGAM_RAMA_EXP_REGION_END_B, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_B, CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_B, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_G, CM_BLNDGAM_RAMA_EXP_REGION_END_G, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_G, CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_G, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_G, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_R, CM_BLNDGAM_RAMA_EXP_REGION_END_R, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_R, CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_R, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_R, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_0_1, CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_0_1, CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_0_1, CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh), \
@@ -339,9 +362,6 @@
TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_32_33, CM_BLNDGAM_RAMA_EXP_REGION32_NUM_SEGMENTS, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_32_33, CM_BLNDGAM_RAMA_EXP_REGION33_LUT_OFFSET, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_32_33, CM_BLNDGAM_RAMA_EXP_REGION33_NUM_SEGMENTS, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_LUT_WRITE_EN_MASK, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_LUT_WRITE_SEL, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_CONFIG_STATUS, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_LUT_INDEX, CM_BLNDGAM_LUT_INDEX, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_LUT_DATA, CM_BLNDGAM_LUT_DATA, mask_sh), \
TF_SF(CM0_CM_MEM_PWR_CTRL, BLNDGAM_MEM_PWR_FORCE, mask_sh), \
@@ -354,7 +374,6 @@
TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_WRITE_EN_MASK, mask_sh), \
TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_RAM_SEL, mask_sh), \
TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_30BIT_EN, mask_sh), \
- TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_CONFIG_STATUS, mask_sh), \
TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_READ_SEL, mask_sh), \
TF_SF(CM0_CM_SHAPER_CONTROL, CM_SHAPER_LUT_MODE, mask_sh), \
TF_SF(CM0_CM_SHAPER_RAMB_START_CNTL_B, CM_SHAPER_RAMB_EXP_REGION_START_B, mask_sh), \
@@ -519,9 +538,14 @@
TF_SF(CM0_CM_SHAPER_RAMA_REGION_32_33, CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS, mask_sh), \
TF_SF(CM0_CM_SHAPER_LUT_WRITE_EN_MASK, CM_SHAPER_LUT_WRITE_EN_MASK, mask_sh), \
TF_SF(CM0_CM_SHAPER_LUT_WRITE_EN_MASK, CM_SHAPER_LUT_WRITE_SEL, mask_sh), \
- TF_SF(CM0_CM_SHAPER_LUT_WRITE_EN_MASK, CM_SHAPER_CONFIG_STATUS, mask_sh), \
TF_SF(CM0_CM_SHAPER_LUT_INDEX, CM_SHAPER_LUT_INDEX, mask_sh), \
- TF_SF(CM0_CM_SHAPER_LUT_DATA, CM_SHAPER_LUT_DATA, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_LUT_DATA, CM_SHAPER_LUT_DATA, mask_sh)
+
+
+#define TF_REG_LIST_SH_MASK_DCN20(mask_sh)\
+ TF_REG_LIST_SH_MASK_DCN(mask_sh), \
+ TF_REG_LIST_SH_MASK_DCN20_COMMON(mask_sh), \
+ TF_REG_LIST_SH_MASK_DCN20_UPDATED(mask_sh), \
TF_SF(CM0_CM_DGAM_LUT_WRITE_EN_MASK, CM_DGAM_CONFIG_STATUS, mask_sh), \
TF_SF(CM0_CM_CONTROL, CM_BYPASS, mask_sh), \
TF_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_MODE, mask_sh), \
@@ -554,11 +578,33 @@
TF_SF(CNVC_CFG0_COLOR_KEYER_BLUE, COLOR_KEYER_BLUE_HIGH, mask_sh), \
TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_PIX_INV_MODE, mask_sh), \
TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_PIXEL_ALPHA_MOD_EN, mask_sh), \
- TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_ROM_EN, mask_sh)
+ TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_ROM_EN, mask_sh),\
+ TF_SF(DSCL0_OBUF_MEM_PWR_CTRL, OBUF_MEM_PWR_FORCE, mask_sh),\
+ TF_SF(DSCL0_DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, mask_sh)
+
+/* DPP CM debug status register:
+ *
+ * Status index including current ICSC, Gamut Remap Mode is 9
+ * ICSC Mode: [4..3]
+ * Gamut Remap Mode: [10..9]
+ */
+#define CM_TEST_DEBUG_DATA_STATUS_IDX 9
+
+#define TF_DEBUG_REG_LIST_SH_DCN20 \
+ TF_DEBUG_REG_LIST_SH_DCN10, \
+ .CM_TEST_DEBUG_DATA_ICSC_MODE = 3, \
+ .CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE = 9
+
+#define TF_DEBUG_REG_LIST_MASK_DCN20 \
+ TF_DEBUG_REG_LIST_MASK_DCN10, \
+ .CM_TEST_DEBUG_DATA_ICSC_MODE = 0x18, \
+ .CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE = 0x600
#define TF_REG_FIELD_LIST_DCN2_0(type) \
TF_REG_FIELD_LIST(type) \
type CM_BLNDGAM_LUT_DATA; \
+ type CM_TEST_DEBUG_DATA_ICSC_MODE; \
+ type CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE; \
type FORMAT_CNV16; \
type CNVC_BYPASS_MSB_ALIGN; \
type CLAMP_POSITIVE; \
@@ -585,7 +631,10 @@
type COLOR_KEYER_BLUE_HIGH; \
type CUR0_PIX_INV_MODE; \
type CUR0_PIXEL_ALPHA_MOD_EN; \
- type CUR0_ROM_EN
+ type CUR0_ROM_EN;\
+ type OBUF_MEM_PWR_FORCE;\
+ type LUT_MEM_PWR_FORCE
+
struct dcn2_dpp_shift {
TF_REG_FIELD_LIST_DCN2_0(uint8_t);
@@ -609,10 +658,23 @@ struct dcn2_dpp_mask {
uint32_t COLOR_KEYER_ALPHA; \
uint32_t COLOR_KEYER_RED; \
uint32_t COLOR_KEYER_GREEN; \
- uint32_t COLOR_KEYER_BLUE
+ uint32_t COLOR_KEYER_BLUE; \
+ uint32_t OBUF_MEM_PWR_CTRL; \
+ uint32_t DSCL_MEM_PWR_CTRL
+
+#define DPP_DCN2_REG_VARIABLE_LIST_CM_APPEND \
+ uint32_t CM_GAMUT_REMAP_B_C11_C12; \
+ uint32_t CM_GAMUT_REMAP_B_C13_C14; \
+ uint32_t CM_GAMUT_REMAP_B_C21_C22; \
+ uint32_t CM_GAMUT_REMAP_B_C23_C24; \
+ uint32_t CM_GAMUT_REMAP_B_C31_C32; \
+ uint32_t CM_GAMUT_REMAP_B_C33_C34; \
+ uint32_t CM_ICSC_B_C11_C12; \
+ uint32_t CM_ICSC_B_C33_C34
struct dcn2_dpp_registers {
DPP_DCN2_REG_VARIABLE_LIST;
+ DPP_DCN2_REG_VARIABLE_LIST_CM_APPEND;
};
struct dcn20_dpp {
@@ -634,6 +696,18 @@ struct dcn20_dpp {
struct pwl_params pwl_data;
};
+enum dcn20_input_csc_select {
+ DCN2_ICSC_SELECT_BYPASS = 0,
+ DCN2_ICSC_SELECT_ICSC_A = 1,
+ DCN2_ICSC_SELECT_ICSC_B = 2
+};
+
+enum dcn20_gamut_remap_select {
+ DCN2_GAMUT_REMAP_BYPASS = 0,
+ DCN2_GAMUT_REMAP_COEF_A = 1,
+ DCN2_GAMUT_REMAP_COEF_B = 2
+};
+
void dpp20_read_state(struct dpp *dpp_base,
struct dcn_dpp_state *s);
@@ -645,6 +719,16 @@ void dpp2_set_degamma(
struct dpp *dpp_base,
enum ipp_degamma_mode mode);
+void dpp2_cm_set_gamut_remap(
+ struct dpp *dpp_base,
+ const struct dpp_grph_csc_adjustment *adjust);
+
+void dpp2_program_input_csc(
+ struct dpp *dpp_base,
+ enum dc_color_space color_space,
+ enum dcn20_input_csc_select input_select,
+ const struct out_csc_color_matrix *tbl_entry);
+
bool dpp20_program_blnd_lut(
struct dpp *dpp_base, const struct pwl_params *params);
@@ -668,7 +752,7 @@ void dscl2_calc_lb_num_partitions(
void dpp2_set_cursor_attributes(
struct dpp *dpp_base,
- enum dc_cursor_color_format color_format);
+ struct dc_cursor_attributes *cursor_attributes);
void dpp2_dummy_program_input_lut(
struct dpp *dpp_base,
@@ -683,11 +767,6 @@ void dpp2_set_hdr_multiplier(
struct dpp *dpp_base,
uint32_t multiplier);
-bool dpp2_get_optimal_number_of_taps(
- struct dpp *dpp,
- struct scaler_data *scl_data,
- const struct scaling_taps *in_taps);
-
bool dpp2_construct(struct dcn20_dpp *dpp2,
struct dc_context *ctx,
uint32_t inst,
@@ -695,4 +774,7 @@ bool dpp2_construct(struct dcn20_dpp *dpp2,
const struct dcn2_dpp_shift *tf_shift,
const struct dcn2_dpp_mask *tf_mask);
+void dpp2_power_on_obuf(
+ struct dpp *dpp_base,
+ bool power_on);
#endif /* __DC_HWSS_DCN20_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c
index e28b8e7bedf5..8dc3d1f73984 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c
@@ -36,6 +36,9 @@
#define REG(reg)\
dpp->tf_regs->reg
+#define IND_REG(index) \
+ (index)
+
#define CTX \
dpp->base.ctx
@@ -44,15 +47,17 @@
dpp->tf_shift->field_name, dpp->tf_mask->field_name
-
-
-
static void dpp2_enable_cm_block(
struct dpp *dpp_base)
{
struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
- REG_UPDATE(CM_CONTROL, CM_BYPASS, 0);
+ unsigned int cm_bypass_mode = 0;
+ //Temp, put CM in bypass mode
+ if (dpp_base->ctx->dc->debug.cm_in_bypass)
+ cm_bypass_mode = 1;
+
+ REG_UPDATE(CM_CONTROL, CM_BYPASS, cm_bypass_mode);
}
@@ -144,12 +149,164 @@ void dpp2_set_degamma(
case IPP_DEGAMMA_MODE_HW_xvYCC:
REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 2);
break;
+ case IPP_DEGAMMA_MODE_USER_PWL:
+ REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 3);
+ break;
default:
BREAK_TO_DEBUGGER();
break;
}
}
+static void program_gamut_remap(
+ struct dcn20_dpp *dpp,
+ const uint16_t *regval,
+ enum dcn20_gamut_remap_select select)
+{
+ uint32_t cur_select = 0;
+ struct color_matrices_reg gam_regs;
+
+ if (regval == NULL || select == DCN2_GAMUT_REMAP_BYPASS) {
+ REG_SET(CM_GAMUT_REMAP_CONTROL, 0,
+ CM_GAMUT_REMAP_MODE, 0);
+ return;
+ }
+
+ /* determine which gamut_remap coefficients (A or B) we are using
+ * currently. select the alternate set to double buffer
+ * the update so gamut_remap is updated on frame boundary
+ */
+ IX_REG_GET(CM_TEST_DEBUG_INDEX, CM_TEST_DEBUG_DATA,
+ CM_TEST_DEBUG_DATA_STATUS_IDX,
+ CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE, &cur_select);
+
+ /* value stored in dbg reg will be 1 greater than mode we want */
+ if (cur_select != DCN2_GAMUT_REMAP_COEF_A)
+ select = DCN2_GAMUT_REMAP_COEF_A;
+ else
+ select = DCN2_GAMUT_REMAP_COEF_B;
+
+ gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11;
+ gam_regs.masks.csc_c11 = dpp->tf_mask->CM_GAMUT_REMAP_C11;
+ gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12;
+ gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12;
+
+ if (select == DCN2_GAMUT_REMAP_COEF_A) {
+ gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34);
+ } else {
+ gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_B_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_B_C33_C34);
+ }
+
+ cm_helper_program_color_matrices(
+ dpp->base.ctx,
+ regval,
+ &gam_regs);
+
+ REG_SET(
+ CM_GAMUT_REMAP_CONTROL, 0,
+ CM_GAMUT_REMAP_MODE, select);
+
+}
+
+void dpp2_cm_set_gamut_remap(
+ struct dpp *dpp_base,
+ const struct dpp_grph_csc_adjustment *adjust)
+{
+ struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
+ int i = 0;
+
+ if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW)
+ /* Bypass if type is bypass or hw */
+ program_gamut_remap(dpp, NULL, DCN2_GAMUT_REMAP_BYPASS);
+ else {
+ struct fixed31_32 arr_matrix[12];
+ uint16_t arr_reg_val[12];
+
+ for (i = 0; i < 12; i++)
+ arr_matrix[i] = adjust->temperature_matrix[i];
+
+ convert_float_matrix(
+ arr_reg_val, arr_matrix, 12);
+
+ program_gamut_remap(dpp, arr_reg_val, DCN2_GAMUT_REMAP_COEF_A);
+ }
+}
+
+void dpp2_program_input_csc(
+ struct dpp *dpp_base,
+ enum dc_color_space color_space,
+ enum dcn20_input_csc_select input_select,
+ const struct out_csc_color_matrix *tbl_entry)
+{
+ struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
+ int i;
+ int arr_size = sizeof(dpp_input_csc_matrix)/sizeof(struct dpp_input_csc_matrix);
+ const uint16_t *regval = NULL;
+ uint32_t cur_select = 0;
+ enum dcn20_input_csc_select select;
+ struct color_matrices_reg icsc_regs;
+
+ if (input_select == DCN2_ICSC_SELECT_BYPASS) {
+ REG_SET(CM_ICSC_CONTROL, 0, CM_ICSC_MODE, 0);
+ return;
+ }
+
+ if (tbl_entry == NULL) {
+ for (i = 0; i < arr_size; i++)
+ if (dpp_input_csc_matrix[i].color_space == color_space) {
+ regval = dpp_input_csc_matrix[i].regval;
+ break;
+ }
+
+ if (regval == NULL) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+ } else {
+ regval = tbl_entry->regval;
+ }
+
+ /* determine which CSC coefficients (A or B) we are using
+ * currently. select the alternate set to double buffer
+ * the CSC update so CSC is updated on frame boundary
+ */
+ IX_REG_GET(CM_TEST_DEBUG_INDEX, CM_TEST_DEBUG_DATA,
+ CM_TEST_DEBUG_DATA_STATUS_IDX,
+ CM_TEST_DEBUG_DATA_ICSC_MODE, &cur_select);
+
+ if (cur_select != DCN2_ICSC_SELECT_ICSC_A)
+ select = DCN2_ICSC_SELECT_ICSC_A;
+ else
+ select = DCN2_ICSC_SELECT_ICSC_B;
+
+ icsc_regs.shifts.csc_c11 = dpp->tf_shift->CM_ICSC_C11;
+ icsc_regs.masks.csc_c11 = dpp->tf_mask->CM_ICSC_C11;
+ icsc_regs.shifts.csc_c12 = dpp->tf_shift->CM_ICSC_C12;
+ icsc_regs.masks.csc_c12 = dpp->tf_mask->CM_ICSC_C12;
+
+ if (select == DCN2_ICSC_SELECT_ICSC_A) {
+
+ icsc_regs.csc_c11_c12 = REG(CM_ICSC_C11_C12);
+ icsc_regs.csc_c33_c34 = REG(CM_ICSC_C33_C34);
+
+ } else {
+
+ icsc_regs.csc_c11_c12 = REG(CM_ICSC_B_C11_C12);
+ icsc_regs.csc_c33_c34 = REG(CM_ICSC_B_C33_C34);
+
+ }
+
+ cm_helper_program_color_matrices(
+ dpp->base.ctx,
+ regval,
+ &icsc_regs);
+
+ REG_SET(CM_ICSC_CONTROL, 0,
+ CM_ICSC_MODE, select);
+}
+
static void dpp20_power_on_blnd_lut(
struct dpp *dpp_base,
bool power_on)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
index ffd0014ec3b5..6bdfee20b6a7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
@@ -23,13 +23,12 @@
*
*/
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
#include "reg_helper.h"
#include "dcn20_dsc.h"
#include "dsc/dscc_types.h"
static void dsc_log_pps(struct display_stream_compressor *dsc, struct drm_dsc_config *pps);
-static bool dsc_prepare_config(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg,
+static bool dsc_prepare_config(const struct dsc_config *dsc_cfg, struct dsc_reg_values *dsc_reg_vals,
struct dsc_optc_config *dsc_optc_cfg);
static void dsc_init_reg_values(struct dsc_reg_values *reg_vals);
static void dsc_update_from_dsc_parameters(struct dsc_reg_values *reg_vals, const struct dsc_parameters *dsc_params);
@@ -42,7 +41,8 @@ static void dsc2_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock
static void dsc2_read_state(struct display_stream_compressor *dsc, struct dcn_dsc_state *s);
static bool dsc2_validate_stream(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg);
static void dsc2_set_config(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg,
- struct dsc_optc_config *dsc_optc_cfg, uint8_t *dsc_packed_pps);
+ struct dsc_optc_config *dsc_optc_cfg);
+static bool dsc2_get_packed_pps(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg, uint8_t *dsc_packed_pps);
static void dsc2_enable(struct display_stream_compressor *dsc, int opp_pipe);
static void dsc2_disable(struct display_stream_compressor *dsc);
@@ -51,6 +51,7 @@ const struct dsc_funcs dcn20_dsc_funcs = {
.dsc_read_state = dsc2_read_state,
.dsc_validate_stream = dsc2_validate_stream,
.dsc_set_config = dsc2_set_config,
+ .dsc_get_packed_pps = dsc2_get_packed_pps,
.dsc_enable = dsc2_enable,
.dsc_disable = dsc2_disable,
};
@@ -117,7 +118,7 @@ static void dsc2_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock
dsc_enc_caps->color_formats.bits.RGB = 1;
dsc_enc_caps->color_formats.bits.YCBCR_444 = 1;
dsc_enc_caps->color_formats.bits.YCBCR_SIMPLE_422 = 1;
- dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 1;
+ dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 0;
dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_420 = 1;
dsc_enc_caps->color_depth.bits.COLOR_DEPTH_8_BPC = 1;
@@ -162,46 +163,79 @@ static void dsc2_read_state(struct display_stream_compressor *dsc, struct dcn_ds
static bool dsc2_validate_stream(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg)
{
struct dsc_optc_config dsc_optc_cfg;
+ struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc);
- if (dsc_cfg->pic_width > TO_DCN20_DSC(dsc)->max_image_width)
+ if (dsc_cfg->pic_width > dsc20->max_image_width)
return false;
- return dsc_prepare_config(dsc, dsc_cfg, &dsc_optc_cfg);
+ return dsc_prepare_config(dsc_cfg, &dsc20->reg_vals, &dsc_optc_cfg);
}
-static void dsc_config_log(struct display_stream_compressor *dsc,
- const struct dsc_config *config)
+static void dsc_config_log(struct display_stream_compressor *dsc, const struct dsc_config *config)
{
- DC_LOG_DSC("Setting DSC Config at DSC inst %d", dsc->inst);
- DC_LOG_DSC("\n\tnum_slices_h %d\n\tnum_slices_v %d\n\tbits_per_pixel %d\n\tcolor_depth %d",
- config->dc_dsc_cfg.num_slices_h,
- config->dc_dsc_cfg.num_slices_v,
+ DC_LOG_DSC("\tnum_slices_h %d", config->dc_dsc_cfg.num_slices_h);
+ DC_LOG_DSC("\tnum_slices_v %d", config->dc_dsc_cfg.num_slices_v);
+ DC_LOG_DSC("\tbits_per_pixel %d (%d.%04d)",
config->dc_dsc_cfg.bits_per_pixel,
- config->color_depth);
+ config->dc_dsc_cfg.bits_per_pixel / 16,
+ ((config->dc_dsc_cfg.bits_per_pixel % 16) * 10000) / 16);
+ DC_LOG_DSC("\tcolor_depth %d", config->color_depth);
}
static void dsc2_set_config(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg,
- struct dsc_optc_config *dsc_optc_cfg, uint8_t *dsc_packed_pps)
+ struct dsc_optc_config *dsc_optc_cfg)
{
bool is_config_ok;
struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc);
+ DC_LOG_DSC(" ");
+ DC_LOG_DSC("Setting DSC Config at DSC inst %d", dsc->inst);
dsc_config_log(dsc, dsc_cfg);
- is_config_ok = dsc_prepare_config(dsc, dsc_cfg, dsc_optc_cfg);
+ is_config_ok = dsc_prepare_config(dsc_cfg, &dsc20->reg_vals, dsc_optc_cfg);
ASSERT(is_config_ok);
- drm_dsc_pps_payload_pack((struct drm_dsc_picture_parameter_set *)dsc_packed_pps, &dsc20->reg_vals.pps);
+ DC_LOG_DSC("programming DSC Picture Parameter Set (PPS):");
dsc_log_pps(dsc, &dsc20->reg_vals.pps);
dsc_write_to_registers(dsc, &dsc20->reg_vals);
}
+static bool dsc2_get_packed_pps(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg, uint8_t *dsc_packed_pps)
+{
+ bool is_config_ok;
+ struct dsc_reg_values dsc_reg_vals;
+ struct dsc_optc_config dsc_optc_cfg;
+
+ memset(&dsc_reg_vals, 0, sizeof(dsc_reg_vals));
+ memset(&dsc_optc_cfg, 0, sizeof(dsc_optc_cfg));
+
+ DC_LOG_DSC("Getting packed DSC PPS for DSC Config:");
+ dsc_config_log(dsc, dsc_cfg);
+ DC_LOG_DSC("DSC Picture Parameter Set (PPS):");
+ is_config_ok = dsc_prepare_config(dsc_cfg, &dsc_reg_vals, &dsc_optc_cfg);
+ ASSERT(is_config_ok);
+ drm_dsc_pps_payload_pack((struct drm_dsc_picture_parameter_set *)dsc_packed_pps, &dsc_reg_vals.pps);
+ dsc_log_pps(dsc, &dsc_reg_vals.pps);
+
+ return is_config_ok;
+}
+
+
static void dsc2_enable(struct display_stream_compressor *dsc, int opp_pipe)
{
struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc);
+ int dsc_clock_en;
+ int dsc_fw_config;
+ int enabled_opp_pipe;
- /* TODO Check if DSC alreay in use? */
- DC_LOG_DSC("enable DSC at opp pipe %d", opp_pipe);
+ DC_LOG_DSC("enable DSC %d at opp pipe %d", dsc->inst, opp_pipe);
+
+ REG_GET(DSC_TOP_CONTROL, DSC_CLOCK_EN, &dsc_clock_en);
+ REG_GET_2(DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN, &dsc_fw_config, DSCRM_DSC_OPP_PIPE_SOURCE, &enabled_opp_pipe);
+ if ((dsc_clock_en || dsc_fw_config) && enabled_opp_pipe != opp_pipe) {
+ DC_LOG_DSC("ERROR: DSC %d at opp pipe %d already enabled!", dsc->inst, enabled_opp_pipe);
+ ASSERT(0);
+ }
REG_UPDATE(DSC_TOP_CONTROL,
DSC_CLOCK_EN, 1);
@@ -215,8 +249,18 @@ static void dsc2_enable(struct display_stream_compressor *dsc, int opp_pipe)
static void dsc2_disable(struct display_stream_compressor *dsc)
{
struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc);
+ int dsc_clock_en;
+ int dsc_fw_config;
+ int enabled_opp_pipe;
- DC_LOG_DSC("disable DSC");
+ DC_LOG_DSC("disable DSC %d", dsc->inst);
+
+ REG_GET(DSC_TOP_CONTROL, DSC_CLOCK_EN, &dsc_clock_en);
+ REG_GET_2(DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN, &dsc_fw_config, DSCRM_DSC_OPP_PIPE_SOURCE, &enabled_opp_pipe);
+ if (!dsc_clock_en || !dsc_fw_config) {
+ DC_LOG_DSC("ERROR: DSC %d at opp pipe %d already disabled!", dsc->inst, enabled_opp_pipe);
+ ASSERT(0);
+ }
REG_UPDATE(DSCRM_DSC_FORWARD_CONFIG,
DSCRM_DSC_FORWARD_EN, 0);
@@ -232,7 +276,6 @@ static void dsc_log_pps(struct display_stream_compressor *dsc, struct drm_dsc_co
int i;
int bits_per_pixel = pps->bits_per_pixel;
- DC_LOG_DSC("programming DSC Picture Parameter Set (PPS):");
DC_LOG_DSC("\tdsc_version_major %d", pps->dsc_version_major);
DC_LOG_DSC("\tdsc_version_minor %d", pps->dsc_version_minor);
DC_LOG_DSC("\tbits_per_component %d", pps->bits_per_component);
@@ -282,13 +325,11 @@ static void dsc_log_pps(struct display_stream_compressor *dsc, struct drm_dsc_co
}
}
-static bool dsc_prepare_config(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg,
+static bool dsc_prepare_config(const struct dsc_config *dsc_cfg, struct dsc_reg_values *dsc_reg_vals,
struct dsc_optc_config *dsc_optc_cfg)
{
struct dsc_parameters dsc_params;
- struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc);
-
/* Validate input parameters */
ASSERT(dsc_cfg->dc_dsc_cfg.num_slices_h);
ASSERT(dsc_cfg->dc_dsc_cfg.num_slices_v);
@@ -302,7 +343,7 @@ static bool dsc_prepare_config(struct display_stream_compressor *dsc, const stru
dsc_cfg->dc_dsc_cfg.linebuf_depth == 0)));
ASSERT(96 <= dsc_cfg->dc_dsc_cfg.bits_per_pixel && dsc_cfg->dc_dsc_cfg.bits_per_pixel <= 0x3ff); // 6.0 <= bits_per_pixel <= 63.9375
- if (!dsc_cfg->dc_dsc_cfg.num_slices_v || !dsc_cfg->dc_dsc_cfg.num_slices_v ||
+ if (!dsc_cfg->dc_dsc_cfg.num_slices_v || !dsc_cfg->dc_dsc_cfg.num_slices_h ||
!(dsc_cfg->dc_dsc_cfg.version_minor == 1 || dsc_cfg->dc_dsc_cfg.version_minor == 2) ||
!dsc_cfg->pic_width || !dsc_cfg->pic_height ||
!((dsc_cfg->dc_dsc_cfg.version_minor == 1 && // v1.1 line buffer depth range:
@@ -315,54 +356,54 @@ static bool dsc_prepare_config(struct display_stream_compressor *dsc, const stru
return false;
}
- dsc_init_reg_values(&dsc20->reg_vals);
+ dsc_init_reg_values(dsc_reg_vals);
/* Copy input config */
- dsc20->reg_vals.pixel_format = dsc_dc_pixel_encoding_to_dsc_pixel_format(dsc_cfg->pixel_encoding, dsc_cfg->dc_dsc_cfg.ycbcr422_simple);
- dsc20->reg_vals.num_slices_h = dsc_cfg->dc_dsc_cfg.num_slices_h;
- dsc20->reg_vals.num_slices_v = dsc_cfg->dc_dsc_cfg.num_slices_v;
- dsc20->reg_vals.pps.dsc_version_minor = dsc_cfg->dc_dsc_cfg.version_minor;
- dsc20->reg_vals.pps.pic_width = dsc_cfg->pic_width;
- dsc20->reg_vals.pps.pic_height = dsc_cfg->pic_height;
- dsc20->reg_vals.pps.bits_per_component = dsc_dc_color_depth_to_dsc_bits_per_comp(dsc_cfg->color_depth);
- dsc20->reg_vals.pps.block_pred_enable = dsc_cfg->dc_dsc_cfg.block_pred_enable;
- dsc20->reg_vals.pps.line_buf_depth = dsc_cfg->dc_dsc_cfg.linebuf_depth;
- dsc20->reg_vals.alternate_ich_encoding_en = dsc20->reg_vals.pps.dsc_version_minor == 1 ? 0 : 1;
+ dsc_reg_vals->pixel_format = dsc_dc_pixel_encoding_to_dsc_pixel_format(dsc_cfg->pixel_encoding, dsc_cfg->dc_dsc_cfg.ycbcr422_simple);
+ dsc_reg_vals->num_slices_h = dsc_cfg->dc_dsc_cfg.num_slices_h;
+ dsc_reg_vals->num_slices_v = dsc_cfg->dc_dsc_cfg.num_slices_v;
+ dsc_reg_vals->pps.dsc_version_minor = dsc_cfg->dc_dsc_cfg.version_minor;
+ dsc_reg_vals->pps.pic_width = dsc_cfg->pic_width;
+ dsc_reg_vals->pps.pic_height = dsc_cfg->pic_height;
+ dsc_reg_vals->pps.bits_per_component = dsc_dc_color_depth_to_dsc_bits_per_comp(dsc_cfg->color_depth);
+ dsc_reg_vals->pps.block_pred_enable = dsc_cfg->dc_dsc_cfg.block_pred_enable;
+ dsc_reg_vals->pps.line_buf_depth = dsc_cfg->dc_dsc_cfg.linebuf_depth;
+ dsc_reg_vals->alternate_ich_encoding_en = dsc_reg_vals->pps.dsc_version_minor == 1 ? 0 : 1;
// TODO: in addition to validating slice height (pic height must be divisible by slice height),
// see what happens when the same condition doesn't apply for slice_width/pic_width.
- dsc20->reg_vals.pps.slice_width = dsc_cfg->pic_width / dsc_cfg->dc_dsc_cfg.num_slices_h;
- dsc20->reg_vals.pps.slice_height = dsc_cfg->pic_height / dsc_cfg->dc_dsc_cfg.num_slices_v;
+ dsc_reg_vals->pps.slice_width = dsc_cfg->pic_width / dsc_cfg->dc_dsc_cfg.num_slices_h;
+ dsc_reg_vals->pps.slice_height = dsc_cfg->pic_height / dsc_cfg->dc_dsc_cfg.num_slices_v;
- ASSERT(dsc20->reg_vals.pps.slice_height * dsc_cfg->dc_dsc_cfg.num_slices_v == dsc_cfg->pic_height);
- if (!(dsc20->reg_vals.pps.slice_height * dsc_cfg->dc_dsc_cfg.num_slices_v == dsc_cfg->pic_height)) {
+ ASSERT(dsc_reg_vals->pps.slice_height * dsc_cfg->dc_dsc_cfg.num_slices_v == dsc_cfg->pic_height);
+ if (!(dsc_reg_vals->pps.slice_height * dsc_cfg->dc_dsc_cfg.num_slices_v == dsc_cfg->pic_height)) {
dm_output_to_console("%s: pix height %d not divisible by num_slices_v %d\n\n", __func__, dsc_cfg->pic_height, dsc_cfg->dc_dsc_cfg.num_slices_v);
return false;
}
- dsc20->reg_vals.bpp_x32 = dsc_cfg->dc_dsc_cfg.bits_per_pixel << 1;
- if (dsc20->reg_vals.pixel_format == DSC_PIXFMT_NATIVE_YCBCR420 || dsc20->reg_vals.pixel_format == DSC_PIXFMT_NATIVE_YCBCR422)
- dsc20->reg_vals.pps.bits_per_pixel = dsc20->reg_vals.bpp_x32;
+ dsc_reg_vals->bpp_x32 = dsc_cfg->dc_dsc_cfg.bits_per_pixel << 1;
+ if (dsc_reg_vals->pixel_format == DSC_PIXFMT_NATIVE_YCBCR420 || dsc_reg_vals->pixel_format == DSC_PIXFMT_NATIVE_YCBCR422)
+ dsc_reg_vals->pps.bits_per_pixel = dsc_reg_vals->bpp_x32;
else
- dsc20->reg_vals.pps.bits_per_pixel = dsc20->reg_vals.bpp_x32 >> 1;
+ dsc_reg_vals->pps.bits_per_pixel = dsc_reg_vals->bpp_x32 >> 1;
- dsc20->reg_vals.pps.convert_rgb = dsc20->reg_vals.pixel_format == DSC_PIXFMT_RGB ? 1 : 0;
- dsc20->reg_vals.pps.native_422 = (dsc20->reg_vals.pixel_format == DSC_PIXFMT_NATIVE_YCBCR422);
- dsc20->reg_vals.pps.native_420 = (dsc20->reg_vals.pixel_format == DSC_PIXFMT_NATIVE_YCBCR420);
- dsc20->reg_vals.pps.simple_422 = (dsc20->reg_vals.pixel_format == DSC_PIXFMT_SIMPLE_YCBCR422);
+ dsc_reg_vals->pps.convert_rgb = dsc_reg_vals->pixel_format == DSC_PIXFMT_RGB ? 1 : 0;
+ dsc_reg_vals->pps.native_422 = (dsc_reg_vals->pixel_format == DSC_PIXFMT_NATIVE_YCBCR422);
+ dsc_reg_vals->pps.native_420 = (dsc_reg_vals->pixel_format == DSC_PIXFMT_NATIVE_YCBCR420);
+ dsc_reg_vals->pps.simple_422 = (dsc_reg_vals->pixel_format == DSC_PIXFMT_SIMPLE_YCBCR422);
- if (dscc_compute_dsc_parameters(&dsc20->reg_vals.pps, &dsc_params)) {
+ if (dscc_compute_dsc_parameters(&dsc_reg_vals->pps, &dsc_params)) {
dm_output_to_console("%s: DSC config failed\n", __func__);
return false;
}
- dsc_update_from_dsc_parameters(&dsc20->reg_vals, &dsc_params);
+ dsc_update_from_dsc_parameters(dsc_reg_vals, &dsc_params);
dsc_optc_cfg->bytes_per_pixel = dsc_params.bytes_per_pixel;
- dsc_optc_cfg->slice_width = dsc20->reg_vals.pps.slice_width;
- dsc_optc_cfg->is_pixel_format_444 = dsc20->reg_vals.pixel_format == DSC_PIXFMT_RGB ||
- dsc20->reg_vals.pixel_format == DSC_PIXFMT_YCBCR444 ||
- dsc20->reg_vals.pixel_format == DSC_PIXFMT_SIMPLE_YCBCR422;
+ dsc_optc_cfg->slice_width = dsc_reg_vals->pps.slice_width;
+ dsc_optc_cfg->is_pixel_format_444 = dsc_reg_vals->pixel_format == DSC_PIXFMT_RGB ||
+ dsc_reg_vals->pixel_format == DSC_PIXFMT_YCBCR444 ||
+ dsc_reg_vals->pixel_format == DSC_PIXFMT_SIMPLE_YCBCR422;
return true;
}
@@ -427,6 +468,8 @@ static void dsc_init_reg_values(struct dsc_reg_values *reg_vals)
{
int i;
+ memset(reg_vals, 0, sizeof(struct dsc_reg_values));
+
/* Non-PPS values */
reg_vals->dsc_clock_enable = 1;
reg_vals->dsc_clock_gating_disable = 0;
@@ -436,7 +479,7 @@ static void dsc_init_reg_values(struct dsc_reg_values *reg_vals)
reg_vals->ich_reset_at_eol = 0;
reg_vals->alternate_ich_encoding_en = 0;
reg_vals->rc_buffer_model_size = 0;
- reg_vals->disable_ich = 0;
+ /*reg_vals->disable_ich = 0;*/
reg_vals->dsc_dbg_en = 0;
for (i = 0; i < 4; i++)
@@ -518,9 +561,11 @@ static void dsc_write_to_registers(struct display_stream_compressor *dsc, const
ALTERNATE_ICH_ENCODING_EN, reg_vals->alternate_ich_encoding_en,
NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION, reg_vals->num_slices_v - 1);
- REG_SET_2(DSCC_CONFIG1, 0,
+ REG_SET(DSCC_CONFIG1, 0,
+ DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE, reg_vals->rc_buffer_model_size);
+ /*REG_SET_2(DSCC_CONFIG1, 0,
DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE, reg_vals->rc_buffer_model_size,
- DSCC_DISABLE_ICH, reg_vals->disable_ich);
+ DSCC_DISABLE_ICH, reg_vals->disable_ich);*/
REG_SET_4(DSCC_INTERRUPT_CONTROL_STATUS, 0,
DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_INT_EN, reg_vals->rc_buffer_model_overflow_int_en[0],
@@ -691,4 +736,3 @@ static void dsc_write_to_registers(struct display_stream_compressor *dsc, const
}
}
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h
index 168865a16288..9855a7ed0387 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h
@@ -21,7 +21,6 @@
* Authors: AMD
*
*/
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
#ifndef __DCN20_DSC_H__
#define __DCN20_DSC_H__
@@ -103,7 +102,7 @@
DSC_SF(DSCC0_DSCC_CONFIG0, ALTERNATE_ICH_ENCODING_EN, mask_sh), \
DSC_SF(DSCC0_DSCC_CONFIG0, NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION, mask_sh), \
DSC_SF(DSCC0_DSCC_CONFIG1, DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE, mask_sh), \
- DSC_SF(DSCC0_DSCC_CONFIG1, DSCC_DISABLE_ICH, mask_sh), \
+ /*DSC_SF(DSCC0_DSCC_CONFIG1, DSCC_DISABLE_ICH, mask_sh),*/ \
DSC_SF(DSCC0_DSCC_STATUS, DSCC_DOUBLE_BUFFER_REG_UPDATE_PENDING, mask_sh), \
DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED, mask_sh), \
DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED, mask_sh), \
@@ -278,7 +277,7 @@
type ALTERNATE_ICH_ENCODING_EN; \
type NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION; \
type DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE; \
- type DSCC_DISABLE_ICH; \
+ /*type DSCC_DISABLE_ICH;*/ \
type DSCC_DOUBLE_BUFFER_REG_UPDATE_PENDING; \
type DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED; \
type DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED; \
@@ -572,4 +571,3 @@ void dsc2_construct(struct dcn20_dsc *dsc,
#endif
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c
index cd8bc92ce3ba..880954ac0b02 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c
@@ -722,7 +722,6 @@ bool dwb_program_horz_scalar(struct dcn20_dwbc *dwbc20,
struct scaling_taps num_taps)
{
uint32_t h_ratio_luma = 1;
- uint32_t h_ratio_chroma = 1;
uint32_t h_taps_luma = num_taps.h_taps;
uint32_t h_taps_chroma = num_taps.h_taps_c;
int32_t h_init_phase_luma = 0;
@@ -747,7 +746,6 @@ bool dwb_program_horz_scalar(struct dcn20_dwbc *dwbc20,
h_ratio_luma = -1;
else
h_ratio_luma = dc_fixpt_u3d19(tmp_h_ratio_luma) << 5;
- h_ratio_chroma = h_ratio_luma * 2;
/*Program ratio*/
REG_UPDATE(WBSCL_HORZ_FILTER_SCALE_RATIO, WBSCL_H_SCALE_RATIO, h_ratio_luma);
@@ -803,7 +801,6 @@ bool dwb_program_vert_scalar(struct dcn20_dwbc *dwbc20,
enum dwb_subsample_position subsample_position)
{
uint32_t v_ratio_luma = 1;
- uint32_t v_ratio_chroma = 1;
uint32_t v_taps_luma = num_taps.v_taps;
uint32_t v_taps_chroma = num_taps.v_taps_c;
int32_t v_init_phase_luma = 0;
@@ -827,7 +824,6 @@ bool dwb_program_vert_scalar(struct dcn20_dwbc *dwbc20,
v_ratio_luma = -1;
else
v_ratio_luma = dc_fixpt_u3d19(tmp_v_ratio_luma) << 5;
- v_ratio_chroma = v_ratio_luma * 2;
/*Program ratio*/
REG_UPDATE(WBSCL_VERT_FILTER_SCALE_RATIO, WBSCL_V_SCALE_RATIO, v_ratio_luma);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
index 6e2dbd03f9bf..9235f7d29454 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
@@ -26,6 +26,7 @@
#include "dcn20_hubbub.h"
#include "reg_helper.h"
+#include "clk_mgr.h"
#define REG(reg)\
hubbub1->regs->reg
@@ -185,14 +186,13 @@ static void hubbub2_get_blk256_size(unsigned int *blk256_width, unsigned int *bl
}
static void hubbub2_det_request_size(
+ unsigned int detile_buf_size,
unsigned int height,
unsigned int width,
unsigned int bpe,
bool *req128_horz_wc,
bool *req128_vert_wc)
{
- unsigned int detile_buf_size = 164 * 1024; /* 164KB for DCN1.0 */
-
unsigned int blk256_height = 0;
unsigned int blk256_width = 0;
unsigned int swath_bytes_horz_wc, swath_bytes_vert_wc;
@@ -235,7 +235,8 @@ bool hubbub2_get_dcc_compression_cap(struct hubbub *hubbub,
&segment_order_horz, &segment_order_vert))
return false;
- hubbub2_det_request_size(input->surface_size.height, input->surface_size.width,
+ hubbub2_det_request_size(TO_DCN20_HUBBUB(hubbub)->detile_buf_size,
+ input->surface_size.height, input->surface_size.width,
bpe, &req128_horz_wc, &req128_vert_wc);
if (!req128_horz_wc && !req128_vert_wc) {
@@ -292,6 +293,9 @@ bool hubbub2_get_dcc_compression_cap(struct hubbub *hubbub,
output->grph.rgb.max_compressed_blk_size = 64;
output->grph.rgb.independent_64b_blks = true;
break;
+ default:
+ ASSERT(false);
+ break;
}
output->capable = true;
output->const_color_support = true;
@@ -379,6 +383,11 @@ int hubbub2_init_dchub_sys_ctx(struct hubbub *hubbub,
REG_SET(DCN_VM_AGP_BASE, 0,
AGP_BASE, pa_config->system_aperture.agp_base >> 24);
+ REG_SET(DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_MSB, 0,
+ DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_MSB, (pa_config->page_table_default_page_addr >> 44) & 0xF);
+ REG_SET(DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_LSB, 0,
+ DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_LSB, (pa_config->page_table_default_page_addr >> 12) & 0xFFFFFFFF);
+
if (pa_config->gart_config.page_table_start_addr != pa_config->gart_config.page_table_end_addr) {
phys_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr >> 12;
phys_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr >> 12;
@@ -397,54 +406,67 @@ void hubbub2_update_dchub(struct hubbub *hubbub,
{
struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
- if (REG(DCHUBBUB_SDPIF_FB_TOP) == 0) {
- ASSERT(false);
- /*should not come here*/
+ if (REG(DCN_VM_FB_LOCATION_TOP) == 0)
return;
- }
- /* TODO: port code from dal2 */
+
switch (dh_data->fb_mode) {
case FRAME_BUFFER_MODE_ZFB_ONLY:
/*For ZFB case need to put DCHUB FB BASE and TOP upside down to indicate ZFB mode*/
- REG_UPDATE(DCHUBBUB_SDPIF_FB_TOP,
- SDPIF_FB_TOP, 0);
-
- REG_UPDATE(DCHUBBUB_SDPIF_FB_BASE,
- SDPIF_FB_BASE, 0x0FFFF);
-
- REG_UPDATE(DCHUBBUB_SDPIF_AGP_BASE,
- SDPIF_AGP_BASE, dh_data->zfb_phys_addr_base >> 22);
-
- REG_UPDATE(DCHUBBUB_SDPIF_AGP_BOT,
- SDPIF_AGP_BOT, dh_data->zfb_mc_base_addr >> 22);
-
- REG_UPDATE(DCHUBBUB_SDPIF_AGP_TOP,
- SDPIF_AGP_TOP, (dh_data->zfb_mc_base_addr +
- dh_data->zfb_size_in_byte - 1) >> 22);
+ REG_UPDATE(DCN_VM_FB_LOCATION_TOP,
+ FB_TOP, 0);
+
+ REG_UPDATE(DCN_VM_FB_LOCATION_BASE,
+ FB_BASE, 0xFFFFFF);
+
+ /*This field defines the 24 MSBs, bits [47:24] of the 48 bit AGP Base*/
+ REG_UPDATE(DCN_VM_AGP_BASE,
+ AGP_BASE, dh_data->zfb_phys_addr_base >> 24);
+
+ /*This field defines the bottom range of the AGP aperture and represents the 24*/
+ /*MSBs, bits [47:24] of the 48 address bits*/
+ REG_UPDATE(DCN_VM_AGP_BOT,
+ AGP_BOT, dh_data->zfb_mc_base_addr >> 24);
+
+ /*This field defines the top range of the AGP aperture and represents the 24*/
+ /*MSBs, bits [47:24] of the 48 address bits*/
+ REG_UPDATE(DCN_VM_AGP_TOP,
+ AGP_TOP, (dh_data->zfb_mc_base_addr +
+ dh_data->zfb_size_in_byte - 1) >> 24);
break;
case FRAME_BUFFER_MODE_MIXED_ZFB_AND_LOCAL:
/*Should not touch FB LOCATION (done by VBIOS on AsicInit table)*/
- REG_UPDATE(DCHUBBUB_SDPIF_AGP_BASE,
- SDPIF_AGP_BASE, dh_data->zfb_phys_addr_base >> 22);
+ /*This field defines the 24 MSBs, bits [47:24] of the 48 bit AGP Base*/
+ REG_UPDATE(DCN_VM_AGP_BASE,
+ AGP_BASE, dh_data->zfb_phys_addr_base >> 24);
- REG_UPDATE(DCHUBBUB_SDPIF_AGP_BOT,
- SDPIF_AGP_BOT, dh_data->zfb_mc_base_addr >> 22);
+ /*This field defines the bottom range of the AGP aperture and represents the 24*/
+ /*MSBs, bits [47:24] of the 48 address bits*/
+ REG_UPDATE(DCN_VM_AGP_BOT,
+ AGP_BOT, dh_data->zfb_mc_base_addr >> 24);
- REG_UPDATE(DCHUBBUB_SDPIF_AGP_TOP,
- SDPIF_AGP_TOP, (dh_data->zfb_mc_base_addr +
- dh_data->zfb_size_in_byte - 1) >> 22);
+ /*This field defines the top range of the AGP aperture and represents the 24*/
+ /*MSBs, bits [47:24] of the 48 address bits*/
+ REG_UPDATE(DCN_VM_AGP_TOP,
+ AGP_TOP, (dh_data->zfb_mc_base_addr +
+ dh_data->zfb_size_in_byte - 1) >> 24);
break;
case FRAME_BUFFER_MODE_LOCAL_ONLY:
- /*Should not touch FB LOCATION (done by VBIOS on AsicInit table)*/
- REG_UPDATE(DCHUBBUB_SDPIF_AGP_BASE,
- SDPIF_AGP_BASE, 0);
+ /*Should not touch FB LOCATION (should be done by VBIOS)*/
+
+ /*This field defines the 24 MSBs, bits [47:24] of the 48 bit AGP Base*/
+ REG_UPDATE(DCN_VM_AGP_BASE,
+ AGP_BASE, 0);
- REG_UPDATE(DCHUBBUB_SDPIF_AGP_BOT,
- SDPIF_AGP_BOT, 0X03FFFF);
+ /*This field defines the bottom range of the AGP aperture and represents the 24*/
+ /*MSBs, bits [47:24] of the 48 address bits*/
+ REG_UPDATE(DCN_VM_AGP_BOT,
+ AGP_BOT, 0xFFFFFF);
- REG_UPDATE(DCHUBBUB_SDPIF_AGP_TOP,
- SDPIF_AGP_TOP, 0);
+ /*This field defines the top range of the AGP aperture and represents the 24*/
+ /*MSBs, bits [47:24] of the 48 address bits*/
+ REG_UPDATE(DCN_VM_AGP_TOP,
+ AGP_TOP, 0);
break;
default:
break;
@@ -553,13 +575,23 @@ static void hubbub2_program_watermarks(
*/
hubbub1_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
hubbub1_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
+
+ /*
+ * There's a special case when going from p-state support to p-state unsupported
+ * here we are going to LOWER watermarks to go to dummy p-state only, but this has
+ * to be done prepare_bandwidth, not optimize
+ */
+ if (hubbub1->base.ctx->dc->clk_mgr->clks.prev_p_state_change_support == true &&
+ hubbub1->base.ctx->dc->clk_mgr->clks.p_state_change_support == false)
+ safe_to_lower = true;
+
hubbub1_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
REG_SET(DCHUBBUB_ARB_SAT_LEVEL, 0,
DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz);
REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 180);
- hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
+ hubbub->funcs->allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
}
static const struct hubbub_funcs hubbub2_funcs = {
@@ -572,6 +604,8 @@ static const struct hubbub_funcs hubbub2_funcs = {
.wm_read_state = hubbub2_wm_read_state,
.get_dchub_ref_freq = hubbub2_get_dchub_ref_freq,
.program_watermarks = hubbub2_program_watermarks,
+ .is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled,
+ .allow_self_refresh_control = hubbub1_allow_self_refresh_control,
};
void hubbub2_construct(struct dcn20_hubbub *hubbub,
@@ -589,4 +623,5 @@ void hubbub2_construct(struct dcn20_hubbub *hubbub,
hubbub->masks = hubbub_mask;
hubbub->debug_test_index_pstate = 0xB;
+ hubbub->detile_buf_size = 164 * 1024; /* 164KB for DCN2.0 */
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h
index a7b6ca26a9ad..501532dd523a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h
@@ -29,13 +29,21 @@
#include "dcn10/dcn10_hubbub.h"
#include "dcn20_vmid.h"
+#define HUBBUB_REG_LIST_DCN20_COMMON()\
+ HUBBUB_REG_LIST_DCN_COMMON(), \
+ SR(DCHUBBUB_CRC_CTRL), \
+ SR(DCN_VM_FB_LOCATION_BASE),\
+ SR(DCN_VM_FB_LOCATION_TOP),\
+ SR(DCN_VM_FB_OFFSET),\
+ SR(DCN_VM_AGP_BOT),\
+ SR(DCN_VM_AGP_TOP),\
+ SR(DCN_VM_AGP_BASE)
+
#define TO_DCN20_HUBBUB(hubbub)\
container_of(hubbub, struct dcn20_hubbub, base)
-#define HUBBUB_REG_LIST_DCN20(id)\
+#define HUBBUB_REG_LIST_DCN20_COMMON()\
HUBBUB_REG_LIST_DCN_COMMON(), \
- HUBBUB_VM_REG_LIST(), \
- HUBBUB_SR_WATERMARK_REG_LIST(), \
SR(DCHUBBUB_CRC_CTRL), \
SR(DCN_VM_FB_LOCATION_BASE),\
SR(DCN_VM_FB_LOCATION_TOP),\
@@ -44,6 +52,14 @@
SR(DCN_VM_AGP_TOP),\
SR(DCN_VM_AGP_BASE)
+#define HUBBUB_REG_LIST_DCN20(id)\
+ HUBBUB_REG_LIST_DCN20_COMMON(), \
+ HUBBUB_SR_WATERMARK_REG_LIST(), \
+ HUBBUB_VM_REG_LIST(),\
+ SR(DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_MSB),\
+ SR(DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_LSB)
+
+
#define HUBBUB_MASK_SH_LIST_DCN20(mask_sh)\
HUBBUB_MASK_SH_LIST_DCN_COMMON(mask_sh), \
HUBBUB_MASK_SH_LIST_STUTTER(mask_sh), \
@@ -53,7 +69,9 @@
HUBBUB_SF(DCN_VM_FB_OFFSET, FB_OFFSET, mask_sh), \
HUBBUB_SF(DCN_VM_AGP_BOT, AGP_BOT, mask_sh), \
HUBBUB_SF(DCN_VM_AGP_TOP, AGP_TOP, mask_sh), \
- HUBBUB_SF(DCN_VM_AGP_BASE, AGP_BASE, mask_sh)
+ HUBBUB_SF(DCN_VM_AGP_BASE, AGP_BASE, mask_sh), \
+ HUBBUB_SF(DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_MSB, DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_MSB, mask_sh), \
+ HUBBUB_SF(DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_LSB, DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_LSB, mask_sh)
struct dcn20_hubbub {
struct hubbub base;
@@ -63,6 +81,7 @@ struct dcn20_hubbub {
unsigned int debug_test_index_pstate;
struct dcn_watermark_set watermarks;
struct dcn20_vmid vmid[16];
+ unsigned int detile_buf_size;
};
void hubbub2_construct(struct dcn20_hubbub *hubbub,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
index d3f7dd374d50..84d7ac5dd206 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
@@ -30,6 +30,8 @@
#include "reg_helper.h"
#include "basics/conversion.h"
+#define DC_LOGGER_INIT(logger)
+
#define REG(reg)\
hubp2->hubp_regs->reg
@@ -40,81 +42,6 @@
#define FN(reg_name, field_name) \
hubp2->hubp_shift->field_name, hubp2->hubp_mask->field_name
-void hubp2_update_dchub(
- struct hubp *hubp,
- struct dchub_init_data *dh_data)
-{
- struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
- if (REG(DCN_VM_FB_LOCATION_TOP) == 0)
- return;
-
- switch (dh_data->fb_mode) {
- case FRAME_BUFFER_MODE_ZFB_ONLY:
- /*For ZFB case need to put DCHUB FB BASE and TOP upside down to indicate ZFB mode*/
- REG_UPDATE(DCN_VM_FB_LOCATION_TOP,
- FB_TOP, 0);
-
- REG_UPDATE(DCN_VM_FB_LOCATION_BASE,
- FB_BASE, 0xFFFFFF);
-
- /*This field defines the 24 MSBs, bits [47:24] of the 48 bit AGP Base*/
- REG_UPDATE(DCN_VM_AGP_BASE,
- AGP_BASE, dh_data->zfb_phys_addr_base >> 24);
-
- /*This field defines the bottom range of the AGP aperture and represents the 24*/
- /*MSBs, bits [47:24] of the 48 address bits*/
- REG_UPDATE(DCN_VM_AGP_BOT,
- AGP_BOT, dh_data->zfb_mc_base_addr >> 24);
-
- /*This field defines the top range of the AGP aperture and represents the 24*/
- /*MSBs, bits [47:24] of the 48 address bits*/
- REG_UPDATE(DCN_VM_AGP_TOP,
- AGP_TOP, (dh_data->zfb_mc_base_addr +
- dh_data->zfb_size_in_byte - 1) >> 24);
- break;
- case FRAME_BUFFER_MODE_MIXED_ZFB_AND_LOCAL:
- /*Should not touch FB LOCATION (done by VBIOS on AsicInit table)*/
-
- /*This field defines the 24 MSBs, bits [47:24] of the 48 bit AGP Base*/
- REG_UPDATE(DCN_VM_AGP_BASE,
- AGP_BASE, dh_data->zfb_phys_addr_base >> 24);
-
- /*This field defines the bottom range of the AGP aperture and represents the 24*/
- /*MSBs, bits [47:24] of the 48 address bits*/
- REG_UPDATE(DCN_VM_AGP_BOT,
- AGP_BOT, dh_data->zfb_mc_base_addr >> 24);
-
- /*This field defines the top range of the AGP aperture and represents the 24*/
- /*MSBs, bits [47:24] of the 48 address bits*/
- REG_UPDATE(DCN_VM_AGP_TOP,
- AGP_TOP, (dh_data->zfb_mc_base_addr +
- dh_data->zfb_size_in_byte - 1) >> 24);
- break;
- case FRAME_BUFFER_MODE_LOCAL_ONLY:
- /*Should not touch FB LOCATION (should be done by VBIOS)*/
-
- /*This field defines the 24 MSBs, bits [47:24] of the 48 bit AGP Base*/
- REG_UPDATE(DCN_VM_AGP_BASE,
- AGP_BASE, 0);
-
- /*This field defines the bottom range of the AGP aperture and represents the 24*/
- /*MSBs, bits [47:24] of the 48 address bits*/
- REG_UPDATE(DCN_VM_AGP_BOT,
- AGP_BOT, 0xFFFFFF);
-
- /*This field defines the top range of the AGP aperture and represents the 24*/
- /*MSBs, bits [47:24] of the 48 address bits*/
- REG_UPDATE(DCN_VM_AGP_TOP,
- AGP_TOP, 0);
- break;
- default:
- break;
- }
-
- dh_data->dchub_initialzied = true;
- dh_data->dchub_info_valid = false;
-}
-
void hubp2_set_vm_system_aperture_settings(struct hubp *hubp,
struct vm_system_aperture_param *apt)
{
@@ -156,7 +83,85 @@ void hubp2_program_deadline(
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
- hubp1_program_deadline(hubp, dlg_attr, ttu_attr);
+ /* DLG - Per hubp */
+ REG_SET_2(BLANK_OFFSET_0, 0,
+ REFCYC_H_BLANK_END, dlg_attr->refcyc_h_blank_end,
+ DLG_V_BLANK_END, dlg_attr->dlg_vblank_end);
+
+ REG_SET(BLANK_OFFSET_1, 0,
+ MIN_DST_Y_NEXT_START, dlg_attr->min_dst_y_next_start);
+
+ REG_SET(DST_DIMENSIONS, 0,
+ REFCYC_PER_HTOTAL, dlg_attr->refcyc_per_htotal);
+
+ REG_SET_2(DST_AFTER_SCALER, 0,
+ REFCYC_X_AFTER_SCALER, dlg_attr->refcyc_x_after_scaler,
+ DST_Y_AFTER_SCALER, dlg_attr->dst_y_after_scaler);
+
+ REG_SET(REF_FREQ_TO_PIX_FREQ, 0,
+ REF_FREQ_TO_PIX_FREQ, dlg_attr->ref_freq_to_pix_freq);
+
+ /* DLG - Per luma/chroma */
+ REG_SET(VBLANK_PARAMETERS_1, 0,
+ REFCYC_PER_PTE_GROUP_VBLANK_L, dlg_attr->refcyc_per_pte_group_vblank_l);
+
+ if (REG(NOM_PARAMETERS_0))
+ REG_SET(NOM_PARAMETERS_0, 0,
+ DST_Y_PER_PTE_ROW_NOM_L, dlg_attr->dst_y_per_pte_row_nom_l);
+
+ if (REG(NOM_PARAMETERS_1))
+ REG_SET(NOM_PARAMETERS_1, 0,
+ REFCYC_PER_PTE_GROUP_NOM_L, dlg_attr->refcyc_per_pte_group_nom_l);
+
+ REG_SET(NOM_PARAMETERS_4, 0,
+ DST_Y_PER_META_ROW_NOM_L, dlg_attr->dst_y_per_meta_row_nom_l);
+
+ REG_SET(NOM_PARAMETERS_5, 0,
+ REFCYC_PER_META_CHUNK_NOM_L, dlg_attr->refcyc_per_meta_chunk_nom_l);
+
+ REG_SET_2(PER_LINE_DELIVERY, 0,
+ REFCYC_PER_LINE_DELIVERY_L, dlg_attr->refcyc_per_line_delivery_l,
+ REFCYC_PER_LINE_DELIVERY_C, dlg_attr->refcyc_per_line_delivery_c);
+
+ REG_SET(VBLANK_PARAMETERS_2, 0,
+ REFCYC_PER_PTE_GROUP_VBLANK_C, dlg_attr->refcyc_per_pte_group_vblank_c);
+
+ if (REG(NOM_PARAMETERS_2))
+ REG_SET(NOM_PARAMETERS_2, 0,
+ DST_Y_PER_PTE_ROW_NOM_C, dlg_attr->dst_y_per_pte_row_nom_c);
+
+ if (REG(NOM_PARAMETERS_3))
+ REG_SET(NOM_PARAMETERS_3, 0,
+ REFCYC_PER_PTE_GROUP_NOM_C, dlg_attr->refcyc_per_pte_group_nom_c);
+
+ REG_SET(NOM_PARAMETERS_6, 0,
+ DST_Y_PER_META_ROW_NOM_C, dlg_attr->dst_y_per_meta_row_nom_c);
+
+ REG_SET(NOM_PARAMETERS_7, 0,
+ REFCYC_PER_META_CHUNK_NOM_C, dlg_attr->refcyc_per_meta_chunk_nom_c);
+
+ /* TTU - per hubp */
+ REG_SET_2(DCN_TTU_QOS_WM, 0,
+ QoS_LEVEL_LOW_WM, ttu_attr->qos_level_low_wm,
+ QoS_LEVEL_HIGH_WM, ttu_attr->qos_level_high_wm);
+
+ /* TTU - per luma/chroma */
+ /* Assumed surf0 is luma and 1 is chroma */
+
+ REG_SET_3(DCN_SURF0_TTU_CNTL0, 0,
+ REFCYC_PER_REQ_DELIVERY, ttu_attr->refcyc_per_req_delivery_l,
+ QoS_LEVEL_FIXED, ttu_attr->qos_level_fixed_l,
+ QoS_RAMP_DISABLE, ttu_attr->qos_ramp_disable_l);
+
+ REG_SET_3(DCN_SURF1_TTU_CNTL0, 0,
+ REFCYC_PER_REQ_DELIVERY, ttu_attr->refcyc_per_req_delivery_c,
+ QoS_LEVEL_FIXED, ttu_attr->qos_level_fixed_c,
+ QoS_RAMP_DISABLE, ttu_attr->qos_ramp_disable_c);
+
+ REG_SET_3(DCN_CUR0_TTU_CNTL0, 0,
+ REFCYC_PER_REQ_DELIVERY, ttu_attr->refcyc_per_req_delivery_cur0,
+ QoS_LEVEL_FIXED, ttu_attr->qos_level_fixed_cur0,
+ QoS_RAMP_DISABLE, ttu_attr->qos_ramp_disable_cur0);
REG_SET(FLIP_PARAMETERS_1, 0,
REFCYC_PER_PTE_GROUP_FLIP_L, dlg_attr->refcyc_per_pte_group_flip_l);
@@ -184,6 +189,39 @@ void hubp2_vready_at_or_After_vsync(struct hubp *hubp,
REG_UPDATE(DCHUBP_CNTL, HUBP_VREADY_AT_OR_AFTER_VSYNC, value);
}
+void hubp2_program_requestor(
+ struct hubp *hubp,
+ struct _vcs_dpi_display_rq_regs_st *rq_regs)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ REG_UPDATE(HUBPRET_CONTROL,
+ DET_BUF_PLANE1_BASE_ADDRESS, rq_regs->plane1_base_address);
+ REG_SET_4(DCN_EXPANSION_MODE, 0,
+ DRQ_EXPANSION_MODE, rq_regs->drq_expansion_mode,
+ PRQ_EXPANSION_MODE, rq_regs->prq_expansion_mode,
+ MRQ_EXPANSION_MODE, rq_regs->mrq_expansion_mode,
+ CRQ_EXPANSION_MODE, rq_regs->crq_expansion_mode);
+ REG_SET_8(DCHUBP_REQ_SIZE_CONFIG, 0,
+ CHUNK_SIZE, rq_regs->rq_regs_l.chunk_size,
+ MIN_CHUNK_SIZE, rq_regs->rq_regs_l.min_chunk_size,
+ META_CHUNK_SIZE, rq_regs->rq_regs_l.meta_chunk_size,
+ MIN_META_CHUNK_SIZE, rq_regs->rq_regs_l.min_meta_chunk_size,
+ DPTE_GROUP_SIZE, rq_regs->rq_regs_l.dpte_group_size,
+ MPTE_GROUP_SIZE, rq_regs->rq_regs_l.mpte_group_size,
+ SWATH_HEIGHT, rq_regs->rq_regs_l.swath_height,
+ PTE_ROW_HEIGHT_LINEAR, rq_regs->rq_regs_l.pte_row_height_linear);
+ REG_SET_8(DCHUBP_REQ_SIZE_CONFIG_C, 0,
+ CHUNK_SIZE_C, rq_regs->rq_regs_c.chunk_size,
+ MIN_CHUNK_SIZE_C, rq_regs->rq_regs_c.min_chunk_size,
+ META_CHUNK_SIZE_C, rq_regs->rq_regs_c.meta_chunk_size,
+ MIN_META_CHUNK_SIZE_C, rq_regs->rq_regs_c.min_meta_chunk_size,
+ DPTE_GROUP_SIZE_C, rq_regs->rq_regs_c.dpte_group_size,
+ MPTE_GROUP_SIZE_C, rq_regs->rq_regs_c.mpte_group_size,
+ SWATH_HEIGHT_C, rq_regs->rq_regs_c.swath_height,
+ PTE_ROW_HEIGHT_LINEAR_C, rq_regs->rq_regs_c.pte_row_height_linear);
+}
+
static void hubp2_setup(
struct hubp *hubp,
struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
@@ -196,7 +234,7 @@ static void hubp2_setup(
*/
hubp2_vready_at_or_After_vsync(hubp, pipe_dest);
- hubp1_program_requestor(hubp, rq_regs);
+ hubp2_program_requestor(hubp, rq_regs);
hubp2_program_deadline(hubp, dlg_attr, ttu_attr);
}
@@ -283,11 +321,203 @@ static void hubp2_program_tiling(
PIPE_ALIGNED, 0);
}
+void hubp2_program_size(
+ struct hubp *hubp,
+ enum surface_pixel_format format,
+ const struct plane_size *plane_size,
+ struct dc_plane_dcc_param *dcc)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ uint32_t pitch, meta_pitch, pitch_c, meta_pitch_c;
+ bool use_pitch_c = false;
+
+ /* Program data and meta surface pitch (calculation from addrlib)
+ * 444 or 420 luma
+ */
+ use_pitch_c = format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
+ && format < SURFACE_PIXEL_FORMAT_SUBSAMPLE_END;
+ if (use_pitch_c) {
+ ASSERT(plane_size->chroma_pitch != 0);
+ /* Chroma pitch zero can cause system hang! */
+
+ pitch = plane_size->surface_pitch - 1;
+ meta_pitch = dcc->meta_pitch - 1;
+ pitch_c = plane_size->chroma_pitch - 1;
+ meta_pitch_c = dcc->meta_pitch_c - 1;
+ } else {
+ pitch = plane_size->surface_pitch - 1;
+ meta_pitch = dcc->meta_pitch - 1;
+ pitch_c = 0;
+ meta_pitch_c = 0;
+ }
+
+ if (!dcc->enable) {
+ meta_pitch = 0;
+ meta_pitch_c = 0;
+ }
+
+ REG_UPDATE_2(DCSURF_SURFACE_PITCH,
+ PITCH, pitch, META_PITCH, meta_pitch);
+
+ use_pitch_c = format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN;
+ if (use_pitch_c)
+ REG_UPDATE_2(DCSURF_SURFACE_PITCH_C,
+ PITCH_C, pitch_c, META_PITCH_C, meta_pitch_c);
+}
+
+void hubp2_program_rotation(
+ struct hubp *hubp,
+ enum dc_rotation_angle rotation,
+ bool horizontal_mirror)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ uint32_t mirror;
+
+
+ if (horizontal_mirror)
+ mirror = 1;
+ else
+ mirror = 0;
+
+ /* Program rotation angle and horz mirror - no mirror */
+ if (rotation == ROTATION_ANGLE_0)
+ REG_UPDATE_2(DCSURF_SURFACE_CONFIG,
+ ROTATION_ANGLE, 0,
+ H_MIRROR_EN, mirror);
+ else if (rotation == ROTATION_ANGLE_90)
+ REG_UPDATE_2(DCSURF_SURFACE_CONFIG,
+ ROTATION_ANGLE, 1,
+ H_MIRROR_EN, mirror);
+ else if (rotation == ROTATION_ANGLE_180)
+ REG_UPDATE_2(DCSURF_SURFACE_CONFIG,
+ ROTATION_ANGLE, 2,
+ H_MIRROR_EN, mirror);
+ else if (rotation == ROTATION_ANGLE_270)
+ REG_UPDATE_2(DCSURF_SURFACE_CONFIG,
+ ROTATION_ANGLE, 3,
+ H_MIRROR_EN, mirror);
+}
+
+void hubp2_dcc_control(struct hubp *hubp, bool enable,
+ enum hubp_ind_block_size independent_64b_blks)
+{
+ uint32_t dcc_en = enable ? 1 : 0;
+ uint32_t dcc_ind_64b_blk = independent_64b_blks ? 1 : 0;
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ REG_UPDATE_4(DCSURF_SURFACE_CONTROL,
+ PRIMARY_SURFACE_DCC_EN, dcc_en,
+ PRIMARY_SURFACE_DCC_IND_64B_BLK, dcc_ind_64b_blk,
+ SECONDARY_SURFACE_DCC_EN, dcc_en,
+ SECONDARY_SURFACE_DCC_IND_64B_BLK, dcc_ind_64b_blk);
+}
+
+void hubp2_program_pixel_format(
+ struct hubp *hubp,
+ enum surface_pixel_format format)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ uint32_t red_bar = 3;
+ uint32_t blue_bar = 2;
+
+ /* swap for ABGR format */
+ if (format == SURFACE_PIXEL_FORMAT_GRPH_ABGR8888
+ || format == SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010
+ || format == SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS
+ || format == SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F) {
+ red_bar = 2;
+ blue_bar = 3;
+ }
+
+ REG_UPDATE_2(HUBPRET_CONTROL,
+ CROSSBAR_SRC_CB_B, blue_bar,
+ CROSSBAR_SRC_CR_R, red_bar);
+
+ /* Mapping is same as ipp programming (cnvc) */
+
+ switch (format) {
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 1);
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 3);
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 8);
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 10);
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 22);
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:/*we use crossbar already*/
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 24);
+ break;
+
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 65);
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 64);
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 67);
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 66);
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 12);
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 112);
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FIX:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 113);
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_ACrYCb2101010:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 114);
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 118);
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FLOAT:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 119);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+
+ /* don't see the need of program the xbar in DCN 1.0 */
+}
+
void hubp2_program_surface_config(
struct hubp *hubp,
enum surface_pixel_format format,
union dc_tiling_info *tiling_info,
- union plane_size *plane_size,
+ struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
bool horizontal_mirror,
@@ -295,11 +525,11 @@ void hubp2_program_surface_config(
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
- hubp1_dcc_control(hubp, dcc->enable, dcc->grph.independent_64b_blks);
+ hubp2_dcc_control(hubp, dcc->enable, dcc->independent_64b_blks);
hubp2_program_tiling(hubp2, tiling_info, format);
- hubp1_program_size(hubp, format, plane_size, dcc);
- hubp1_program_rotation(hubp, rotation, horizontal_mirror);
- hubp1_program_pixel_format(hubp, format);
+ hubp2_program_size(hubp, format, plane_size, dcc);
+ hubp2_program_rotation(hubp, rotation, horizontal_mirror);
+ hubp2_program_pixel_format(hubp, format);
}
enum cursor_lines_per_chunk hubp2_get_lines_per_chunk(
@@ -652,30 +882,702 @@ void hubp2_set_flip_control_surface_gsl(struct hubp *hubp, bool enable)
REG_UPDATE(DCSURF_FLIP_CONTROL2, SURFACE_GSL_ENABLE, enable ? 1 : 0);
}
+bool hubp2_is_flip_pending(struct hubp *hubp)
+{
+ uint32_t flip_pending = 0;
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ struct dc_plane_address earliest_inuse_address;
+
+ REG_GET(DCSURF_FLIP_CONTROL,
+ SURFACE_FLIP_PENDING, &flip_pending);
+
+ REG_GET(DCSURF_SURFACE_EARLIEST_INUSE,
+ SURFACE_EARLIEST_INUSE_ADDRESS, &earliest_inuse_address.grph.addr.low_part);
+
+ REG_GET(DCSURF_SURFACE_EARLIEST_INUSE_HIGH,
+ SURFACE_EARLIEST_INUSE_ADDRESS_HIGH, &earliest_inuse_address.grph.addr.high_part);
+
+ if (flip_pending)
+ return true;
+
+ if (earliest_inuse_address.grph.addr.quad_part != hubp->request_address.grph.addr.quad_part)
+ return true;
+
+ return false;
+}
+
+void hubp2_set_blank(struct hubp *hubp, bool blank)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ uint32_t blank_en = blank ? 1 : 0;
+
+ REG_UPDATE_2(DCHUBP_CNTL,
+ HUBP_BLANK_EN, blank_en,
+ HUBP_TTU_DISABLE, blank_en);
+
+ if (blank) {
+ uint32_t reg_val = REG_READ(DCHUBP_CNTL);
+
+ if (reg_val) {
+ /* init sequence workaround: in case HUBP is
+ * power gated, this wait would timeout.
+ *
+ * we just wrote reg_val to non-0, if it stay 0
+ * it means HUBP is gated
+ */
+ REG_WAIT(DCHUBP_CNTL,
+ HUBP_NO_OUTSTANDING_REQ, 1,
+ 1, 200);
+ }
+
+ hubp->mpcc_id = 0xf;
+ hubp->opp_id = OPP_ID_INVALID;
+ }
+}
+
+void hubp2_cursor_set_position(
+ struct hubp *hubp,
+ const struct dc_cursor_position *pos,
+ const struct dc_cursor_mi_param *param)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ int src_x_offset = pos->x - pos->x_hotspot - param->viewport.x;
+ int src_y_offset = pos->y - pos->y_hotspot - param->viewport.y;
+ int x_hotspot = pos->x_hotspot;
+ int y_hotspot = pos->y_hotspot;
+ int cursor_height = (int)hubp->curs_attr.height;
+ int cursor_width = (int)hubp->curs_attr.width;
+ uint32_t dst_x_offset;
+ uint32_t cur_en = pos->enable ? 1 : 0;
+
+ /*
+ * Guard aganst cursor_set_position() from being called with invalid
+ * attributes
+ *
+ * TODO: Look at combining cursor_set_position() and
+ * cursor_set_attributes() into cursor_update()
+ */
+ if (hubp->curs_attr.address.quad_part == 0)
+ return;
+
+ // Rotated cursor width/height and hotspots tweaks for offset calculation
+ if (param->rotation == ROTATION_ANGLE_90 || param->rotation == ROTATION_ANGLE_270) {
+ swap(cursor_height, cursor_width);
+ if (param->rotation == ROTATION_ANGLE_90) {
+ src_x_offset = pos->x - pos->y_hotspot - param->viewport.x;
+ src_y_offset = pos->y - pos->x_hotspot - param->viewport.y;
+ }
+ } else if (param->rotation == ROTATION_ANGLE_180) {
+ src_x_offset = pos->x - param->viewport.x;
+ src_y_offset = pos->y - param->viewport.y;
+ }
+
+ if (param->mirror) {
+ x_hotspot = param->viewport.width - x_hotspot;
+ src_x_offset = param->viewport.x + param->viewport.width - src_x_offset;
+ }
+
+ dst_x_offset = (src_x_offset >= 0) ? src_x_offset : 0;
+ dst_x_offset *= param->ref_clk_khz;
+ dst_x_offset /= param->pixel_clk_khz;
+
+ ASSERT(param->h_scale_ratio.value);
+
+ if (param->h_scale_ratio.value)
+ dst_x_offset = dc_fixpt_floor(dc_fixpt_div(
+ dc_fixpt_from_int(dst_x_offset),
+ param->h_scale_ratio));
+
+ if (src_x_offset >= (int)param->viewport.width)
+ cur_en = 0; /* not visible beyond right edge*/
+
+ if (src_x_offset + cursor_width <= 0)
+ cur_en = 0; /* not visible beyond left edge*/
+
+ if (src_y_offset >= (int)param->viewport.height)
+ cur_en = 0; /* not visible beyond bottom edge*/
+
+ if (src_y_offset + cursor_height <= 0)
+ cur_en = 0; /* not visible beyond top edge*/
+
+ if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
+ hubp->funcs->set_cursor_attributes(hubp, &hubp->curs_attr);
+
+ REG_UPDATE(CURSOR_CONTROL,
+ CURSOR_ENABLE, cur_en);
+
+ REG_SET_2(CURSOR_POSITION, 0,
+ CURSOR_X_POSITION, pos->x,
+ CURSOR_Y_POSITION, pos->y);
+
+ REG_SET_2(CURSOR_HOT_SPOT, 0,
+ CURSOR_HOT_SPOT_X, x_hotspot,
+ CURSOR_HOT_SPOT_Y, y_hotspot);
+
+ REG_SET(CURSOR_DST_OFFSET, 0,
+ CURSOR_DST_X_OFFSET, dst_x_offset);
+ /* TODO Handle surface pixel formats other than 4:4:4 */
+}
+
+void hubp2_clk_cntl(struct hubp *hubp, bool enable)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ uint32_t clk_enable = enable ? 1 : 0;
+
+ REG_UPDATE(HUBP_CLK_CNTL, HUBP_CLOCK_ENABLE, clk_enable);
+}
+
+void hubp2_vtg_sel(struct hubp *hubp, uint32_t otg_inst)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ REG_UPDATE(DCHUBP_CNTL, HUBP_VTG_SEL, otg_inst);
+}
+
+void hubp2_clear_underflow(struct hubp *hubp)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ REG_UPDATE(DCHUBP_CNTL, HUBP_UNDERFLOW_CLEAR, 1);
+}
+
+void hubp2_read_state_common(struct hubp *hubp)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ struct dcn_hubp_state *s = &hubp2->state;
+ struct _vcs_dpi_display_dlg_regs_st *dlg_attr = &s->dlg_attr;
+ struct _vcs_dpi_display_ttu_regs_st *ttu_attr = &s->ttu_attr;
+ struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
+
+ /* Requester */
+ REG_GET(HUBPRET_CONTROL,
+ DET_BUF_PLANE1_BASE_ADDRESS, &rq_regs->plane1_base_address);
+ REG_GET_4(DCN_EXPANSION_MODE,
+ DRQ_EXPANSION_MODE, &rq_regs->drq_expansion_mode,
+ PRQ_EXPANSION_MODE, &rq_regs->prq_expansion_mode,
+ MRQ_EXPANSION_MODE, &rq_regs->mrq_expansion_mode,
+ CRQ_EXPANSION_MODE, &rq_regs->crq_expansion_mode);
+
+ /* DLG - Per hubp */
+ REG_GET_2(BLANK_OFFSET_0,
+ REFCYC_H_BLANK_END, &dlg_attr->refcyc_h_blank_end,
+ DLG_V_BLANK_END, &dlg_attr->dlg_vblank_end);
+
+ REG_GET(BLANK_OFFSET_1,
+ MIN_DST_Y_NEXT_START, &dlg_attr->min_dst_y_next_start);
+
+ REG_GET(DST_DIMENSIONS,
+ REFCYC_PER_HTOTAL, &dlg_attr->refcyc_per_htotal);
+
+ REG_GET_2(DST_AFTER_SCALER,
+ REFCYC_X_AFTER_SCALER, &dlg_attr->refcyc_x_after_scaler,
+ DST_Y_AFTER_SCALER, &dlg_attr->dst_y_after_scaler);
+
+ if (REG(PREFETCH_SETTINS))
+ REG_GET_2(PREFETCH_SETTINS,
+ DST_Y_PREFETCH, &dlg_attr->dst_y_prefetch,
+ VRATIO_PREFETCH, &dlg_attr->vratio_prefetch);
+ else
+ REG_GET_2(PREFETCH_SETTINGS,
+ DST_Y_PREFETCH, &dlg_attr->dst_y_prefetch,
+ VRATIO_PREFETCH, &dlg_attr->vratio_prefetch);
+
+ REG_GET_2(VBLANK_PARAMETERS_0,
+ DST_Y_PER_VM_VBLANK, &dlg_attr->dst_y_per_vm_vblank,
+ DST_Y_PER_ROW_VBLANK, &dlg_attr->dst_y_per_row_vblank);
+
+ REG_GET(REF_FREQ_TO_PIX_FREQ,
+ REF_FREQ_TO_PIX_FREQ, &dlg_attr->ref_freq_to_pix_freq);
+
+ /* DLG - Per luma/chroma */
+ REG_GET(VBLANK_PARAMETERS_1,
+ REFCYC_PER_PTE_GROUP_VBLANK_L, &dlg_attr->refcyc_per_pte_group_vblank_l);
+
+ REG_GET(VBLANK_PARAMETERS_3,
+ REFCYC_PER_META_CHUNK_VBLANK_L, &dlg_attr->refcyc_per_meta_chunk_vblank_l);
+
+ if (REG(NOM_PARAMETERS_0))
+ REG_GET(NOM_PARAMETERS_0,
+ DST_Y_PER_PTE_ROW_NOM_L, &dlg_attr->dst_y_per_pte_row_nom_l);
+
+ if (REG(NOM_PARAMETERS_1))
+ REG_GET(NOM_PARAMETERS_1,
+ REFCYC_PER_PTE_GROUP_NOM_L, &dlg_attr->refcyc_per_pte_group_nom_l);
+
+ REG_GET(NOM_PARAMETERS_4,
+ DST_Y_PER_META_ROW_NOM_L, &dlg_attr->dst_y_per_meta_row_nom_l);
+
+ REG_GET(NOM_PARAMETERS_5,
+ REFCYC_PER_META_CHUNK_NOM_L, &dlg_attr->refcyc_per_meta_chunk_nom_l);
+
+ REG_GET_2(PER_LINE_DELIVERY_PRE,
+ REFCYC_PER_LINE_DELIVERY_PRE_L, &dlg_attr->refcyc_per_line_delivery_pre_l,
+ REFCYC_PER_LINE_DELIVERY_PRE_C, &dlg_attr->refcyc_per_line_delivery_pre_c);
+
+ REG_GET_2(PER_LINE_DELIVERY,
+ REFCYC_PER_LINE_DELIVERY_L, &dlg_attr->refcyc_per_line_delivery_l,
+ REFCYC_PER_LINE_DELIVERY_C, &dlg_attr->refcyc_per_line_delivery_c);
+
+ if (REG(PREFETCH_SETTINS_C))
+ REG_GET(PREFETCH_SETTINS_C,
+ VRATIO_PREFETCH_C, &dlg_attr->vratio_prefetch_c);
+ else
+ REG_GET(PREFETCH_SETTINGS_C,
+ VRATIO_PREFETCH_C, &dlg_attr->vratio_prefetch_c);
+
+ REG_GET(VBLANK_PARAMETERS_2,
+ REFCYC_PER_PTE_GROUP_VBLANK_C, &dlg_attr->refcyc_per_pte_group_vblank_c);
+
+ REG_GET(VBLANK_PARAMETERS_4,
+ REFCYC_PER_META_CHUNK_VBLANK_C, &dlg_attr->refcyc_per_meta_chunk_vblank_c);
+
+ if (REG(NOM_PARAMETERS_2))
+ REG_GET(NOM_PARAMETERS_2,
+ DST_Y_PER_PTE_ROW_NOM_C, &dlg_attr->dst_y_per_pte_row_nom_c);
+
+ if (REG(NOM_PARAMETERS_3))
+ REG_GET(NOM_PARAMETERS_3,
+ REFCYC_PER_PTE_GROUP_NOM_C, &dlg_attr->refcyc_per_pte_group_nom_c);
+
+ REG_GET(NOM_PARAMETERS_6,
+ DST_Y_PER_META_ROW_NOM_C, &dlg_attr->dst_y_per_meta_row_nom_c);
+
+ REG_GET(NOM_PARAMETERS_7,
+ REFCYC_PER_META_CHUNK_NOM_C, &dlg_attr->refcyc_per_meta_chunk_nom_c);
+
+ /* TTU - per hubp */
+ REG_GET_2(DCN_TTU_QOS_WM,
+ QoS_LEVEL_LOW_WM, &ttu_attr->qos_level_low_wm,
+ QoS_LEVEL_HIGH_WM, &ttu_attr->qos_level_high_wm);
+
+ REG_GET_2(DCN_GLOBAL_TTU_CNTL,
+ MIN_TTU_VBLANK, &ttu_attr->min_ttu_vblank,
+ QoS_LEVEL_FLIP, &ttu_attr->qos_level_flip);
+
+ /* TTU - per luma/chroma */
+ /* Assumed surf0 is luma and 1 is chroma */
+
+ REG_GET_3(DCN_SURF0_TTU_CNTL0,
+ REFCYC_PER_REQ_DELIVERY, &ttu_attr->refcyc_per_req_delivery_l,
+ QoS_LEVEL_FIXED, &ttu_attr->qos_level_fixed_l,
+ QoS_RAMP_DISABLE, &ttu_attr->qos_ramp_disable_l);
+
+ REG_GET(DCN_SURF0_TTU_CNTL1,
+ REFCYC_PER_REQ_DELIVERY_PRE,
+ &ttu_attr->refcyc_per_req_delivery_pre_l);
+
+ REG_GET_3(DCN_SURF1_TTU_CNTL0,
+ REFCYC_PER_REQ_DELIVERY, &ttu_attr->refcyc_per_req_delivery_c,
+ QoS_LEVEL_FIXED, &ttu_attr->qos_level_fixed_c,
+ QoS_RAMP_DISABLE, &ttu_attr->qos_ramp_disable_c);
+
+ REG_GET(DCN_SURF1_TTU_CNTL1,
+ REFCYC_PER_REQ_DELIVERY_PRE,
+ &ttu_attr->refcyc_per_req_delivery_pre_c);
+
+ /* Rest of hubp */
+ REG_GET(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, &s->pixel_format);
+
+ REG_GET(DCSURF_SURFACE_EARLIEST_INUSE_HIGH,
+ SURFACE_EARLIEST_INUSE_ADDRESS_HIGH, &s->inuse_addr_hi);
+
+ REG_GET(DCSURF_SURFACE_EARLIEST_INUSE,
+ SURFACE_EARLIEST_INUSE_ADDRESS, &s->inuse_addr_lo);
+
+ REG_GET_2(DCSURF_PRI_VIEWPORT_DIMENSION,
+ PRI_VIEWPORT_WIDTH, &s->viewport_width,
+ PRI_VIEWPORT_HEIGHT, &s->viewport_height);
+
+ REG_GET_2(DCSURF_SURFACE_CONFIG,
+ ROTATION_ANGLE, &s->rotation_angle,
+ H_MIRROR_EN, &s->h_mirror_en);
+
+ REG_GET(DCSURF_TILING_CONFIG,
+ SW_MODE, &s->sw_mode);
+
+ REG_GET(DCSURF_SURFACE_CONTROL,
+ PRIMARY_SURFACE_DCC_EN, &s->dcc_en);
+
+ REG_GET_3(DCHUBP_CNTL,
+ HUBP_BLANK_EN, &s->blank_en,
+ HUBP_TTU_DISABLE, &s->ttu_disable,
+ HUBP_UNDERFLOW_STATUS, &s->underflow_status);
+
+ REG_GET(HUBP_CLK_CNTL,
+ HUBP_CLOCK_ENABLE, &s->clock_en);
+
+ REG_GET(DCN_GLOBAL_TTU_CNTL,
+ MIN_TTU_VBLANK, &s->min_ttu_vblank);
+
+ REG_GET_2(DCN_TTU_QOS_WM,
+ QoS_LEVEL_LOW_WM, &s->qos_level_low_wm,
+ QoS_LEVEL_HIGH_WM, &s->qos_level_high_wm);
+
+}
+
+void hubp2_read_state(struct hubp *hubp)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ struct dcn_hubp_state *s = &hubp2->state;
+ struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
+
+ hubp2_read_state_common(hubp);
+
+ REG_GET_8(DCHUBP_REQ_SIZE_CONFIG,
+ CHUNK_SIZE, &rq_regs->rq_regs_l.chunk_size,
+ MIN_CHUNK_SIZE, &rq_regs->rq_regs_l.min_chunk_size,
+ META_CHUNK_SIZE, &rq_regs->rq_regs_l.meta_chunk_size,
+ MIN_META_CHUNK_SIZE, &rq_regs->rq_regs_l.min_meta_chunk_size,
+ DPTE_GROUP_SIZE, &rq_regs->rq_regs_l.dpte_group_size,
+ MPTE_GROUP_SIZE, &rq_regs->rq_regs_l.mpte_group_size,
+ SWATH_HEIGHT, &rq_regs->rq_regs_l.swath_height,
+ PTE_ROW_HEIGHT_LINEAR, &rq_regs->rq_regs_l.pte_row_height_linear);
+
+ REG_GET_8(DCHUBP_REQ_SIZE_CONFIG_C,
+ CHUNK_SIZE_C, &rq_regs->rq_regs_c.chunk_size,
+ MIN_CHUNK_SIZE_C, &rq_regs->rq_regs_c.min_chunk_size,
+ META_CHUNK_SIZE_C, &rq_regs->rq_regs_c.meta_chunk_size,
+ MIN_META_CHUNK_SIZE_C, &rq_regs->rq_regs_c.min_meta_chunk_size,
+ DPTE_GROUP_SIZE_C, &rq_regs->rq_regs_c.dpte_group_size,
+ MPTE_GROUP_SIZE_C, &rq_regs->rq_regs_c.mpte_group_size,
+ SWATH_HEIGHT_C, &rq_regs->rq_regs_c.swath_height,
+ PTE_ROW_HEIGHT_LINEAR_C, &rq_regs->rq_regs_c.pte_row_height_linear);
+
+}
+
+void hubp2_validate_dml_output(struct hubp *hubp,
+ struct dc_context *ctx,
+ struct _vcs_dpi_display_rq_regs_st *dml_rq_regs,
+ struct _vcs_dpi_display_dlg_regs_st *dml_dlg_attr,
+ struct _vcs_dpi_display_ttu_regs_st *dml_ttu_attr)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ struct _vcs_dpi_display_rq_regs_st rq_regs = {0};
+ struct _vcs_dpi_display_dlg_regs_st dlg_attr = {0};
+ struct _vcs_dpi_display_ttu_regs_st ttu_attr = {0};
+ DC_LOGGER_INIT(ctx->logger);
+ DC_LOG_DEBUG("DML Validation | Running Validation");
+
+ /* Requestor Regs */
+ REG_GET(HUBPRET_CONTROL,
+ DET_BUF_PLANE1_BASE_ADDRESS, &rq_regs.plane1_base_address);
+ REG_GET_4(DCN_EXPANSION_MODE,
+ DRQ_EXPANSION_MODE, &rq_regs.drq_expansion_mode,
+ PRQ_EXPANSION_MODE, &rq_regs.prq_expansion_mode,
+ MRQ_EXPANSION_MODE, &rq_regs.mrq_expansion_mode,
+ CRQ_EXPANSION_MODE, &rq_regs.crq_expansion_mode);
+ REG_GET_8(DCHUBP_REQ_SIZE_CONFIG,
+ CHUNK_SIZE, &rq_regs.rq_regs_l.chunk_size,
+ MIN_CHUNK_SIZE, &rq_regs.rq_regs_l.min_chunk_size,
+ META_CHUNK_SIZE, &rq_regs.rq_regs_l.meta_chunk_size,
+ MIN_META_CHUNK_SIZE, &rq_regs.rq_regs_l.min_meta_chunk_size,
+ DPTE_GROUP_SIZE, &rq_regs.rq_regs_l.dpte_group_size,
+ MPTE_GROUP_SIZE, &rq_regs.rq_regs_l.mpte_group_size,
+ SWATH_HEIGHT, &rq_regs.rq_regs_l.swath_height,
+ PTE_ROW_HEIGHT_LINEAR, &rq_regs.rq_regs_l.pte_row_height_linear);
+ REG_GET_8(DCHUBP_REQ_SIZE_CONFIG_C,
+ CHUNK_SIZE_C, &rq_regs.rq_regs_c.chunk_size,
+ MIN_CHUNK_SIZE_C, &rq_regs.rq_regs_c.min_chunk_size,
+ META_CHUNK_SIZE_C, &rq_regs.rq_regs_c.meta_chunk_size,
+ MIN_META_CHUNK_SIZE_C, &rq_regs.rq_regs_c.min_meta_chunk_size,
+ DPTE_GROUP_SIZE_C, &rq_regs.rq_regs_c.dpte_group_size,
+ MPTE_GROUP_SIZE_C, &rq_regs.rq_regs_c.mpte_group_size,
+ SWATH_HEIGHT_C, &rq_regs.rq_regs_c.swath_height,
+ PTE_ROW_HEIGHT_LINEAR_C, &rq_regs.rq_regs_c.pte_row_height_linear);
+
+ if (rq_regs.plane1_base_address != dml_rq_regs->plane1_base_address)
+ DC_LOG_DEBUG("DML Validation | HUBPRET_CONTROL:DET_BUF_PLANE1_BASE_ADDRESS - Expected: %u Actual: %u\n",
+ dml_rq_regs->plane1_base_address, rq_regs.plane1_base_address);
+ if (rq_regs.drq_expansion_mode != dml_rq_regs->drq_expansion_mode)
+ DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:DRQ_EXPANSION_MODE - Expected: %u Actual: %u\n",
+ dml_rq_regs->drq_expansion_mode, rq_regs.drq_expansion_mode);
+ if (rq_regs.prq_expansion_mode != dml_rq_regs->prq_expansion_mode)
+ DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:MRQ_EXPANSION_MODE - Expected: %u Actual: %u\n",
+ dml_rq_regs->prq_expansion_mode, rq_regs.prq_expansion_mode);
+ if (rq_regs.mrq_expansion_mode != dml_rq_regs->mrq_expansion_mode)
+ DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:DET_BUF_PLANE1_BASE_ADDRESS - Expected: %u Actual: %u\n",
+ dml_rq_regs->mrq_expansion_mode, rq_regs.mrq_expansion_mode);
+ if (rq_regs.crq_expansion_mode != dml_rq_regs->crq_expansion_mode)
+ DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:CRQ_EXPANSION_MODE - Expected: %u Actual: %u\n",
+ dml_rq_regs->crq_expansion_mode, rq_regs.crq_expansion_mode);
+
+ if (rq_regs.rq_regs_l.chunk_size != dml_rq_regs->rq_regs_l.chunk_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:CHUNK_SIZE - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_l.chunk_size, rq_regs.rq_regs_l.chunk_size);
+ if (rq_regs.rq_regs_l.min_chunk_size != dml_rq_regs->rq_regs_l.min_chunk_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:MIN_CHUNK_SIZE - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_l.min_chunk_size, rq_regs.rq_regs_l.min_chunk_size);
+ if (rq_regs.rq_regs_l.meta_chunk_size != dml_rq_regs->rq_regs_l.meta_chunk_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:META_CHUNK_SIZE - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_l.meta_chunk_size, rq_regs.rq_regs_l.meta_chunk_size);
+ if (rq_regs.rq_regs_l.min_meta_chunk_size != dml_rq_regs->rq_regs_l.min_meta_chunk_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:MIN_META_CHUNK_SIZE - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs.rq_regs_l.min_meta_chunk_size);
+ if (rq_regs.rq_regs_l.dpte_group_size != dml_rq_regs->rq_regs_l.dpte_group_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:DPTE_GROUP_SIZE - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_l.dpte_group_size, rq_regs.rq_regs_l.dpte_group_size);
+ if (rq_regs.rq_regs_l.mpte_group_size != dml_rq_regs->rq_regs_l.mpte_group_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:MPTE_GROUP_SIZE - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_l.mpte_group_size, rq_regs.rq_regs_l.mpte_group_size);
+ if (rq_regs.rq_regs_l.swath_height != dml_rq_regs->rq_regs_l.swath_height)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:SWATH_HEIGHT - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_l.swath_height, rq_regs.rq_regs_l.swath_height);
+ if (rq_regs.rq_regs_l.pte_row_height_linear != dml_rq_regs->rq_regs_l.pte_row_height_linear)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:PTE_ROW_HEIGHT_LINEAR - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_l.pte_row_height_linear, rq_regs.rq_regs_l.pte_row_height_linear);
+
+ if (rq_regs.rq_regs_c.chunk_size != dml_rq_regs->rq_regs_c.chunk_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:CHUNK_SIZE_C - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_c.chunk_size, rq_regs.rq_regs_c.chunk_size);
+ if (rq_regs.rq_regs_c.min_chunk_size != dml_rq_regs->rq_regs_c.min_chunk_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:MIN_CHUNK_SIZE_C - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_c.min_chunk_size, rq_regs.rq_regs_c.min_chunk_size);
+ if (rq_regs.rq_regs_c.meta_chunk_size != dml_rq_regs->rq_regs_c.meta_chunk_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:META_CHUNK_SIZE_C - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_c.meta_chunk_size, rq_regs.rq_regs_c.meta_chunk_size);
+ if (rq_regs.rq_regs_c.min_meta_chunk_size != dml_rq_regs->rq_regs_c.min_meta_chunk_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:MIN_META_CHUNK_SIZE_C - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_c.min_meta_chunk_size, rq_regs.rq_regs_c.min_meta_chunk_size);
+ if (rq_regs.rq_regs_c.dpte_group_size != dml_rq_regs->rq_regs_c.dpte_group_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:DPTE_GROUP_SIZE_C - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_c.dpte_group_size, rq_regs.rq_regs_c.dpte_group_size);
+ if (rq_regs.rq_regs_c.mpte_group_size != dml_rq_regs->rq_regs_c.mpte_group_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:MPTE_GROUP_SIZE_C - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_c.mpte_group_size, rq_regs.rq_regs_c.mpte_group_size);
+ if (rq_regs.rq_regs_c.swath_height != dml_rq_regs->rq_regs_c.swath_height)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:SWATH_HEIGHT_C - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_c.swath_height, rq_regs.rq_regs_c.swath_height);
+ if (rq_regs.rq_regs_c.pte_row_height_linear != dml_rq_regs->rq_regs_c.pte_row_height_linear)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:PTE_ROW_HEIGHT_LINEAR_C - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_c.pte_row_height_linear, rq_regs.rq_regs_c.pte_row_height_linear);
+
+ /* DLG - Per hubp */
+ REG_GET_2(BLANK_OFFSET_0,
+ REFCYC_H_BLANK_END, &dlg_attr.refcyc_h_blank_end,
+ DLG_V_BLANK_END, &dlg_attr.dlg_vblank_end);
+ REG_GET(BLANK_OFFSET_1,
+ MIN_DST_Y_NEXT_START, &dlg_attr.min_dst_y_next_start);
+ REG_GET(DST_DIMENSIONS,
+ REFCYC_PER_HTOTAL, &dlg_attr.refcyc_per_htotal);
+ REG_GET_2(DST_AFTER_SCALER,
+ REFCYC_X_AFTER_SCALER, &dlg_attr.refcyc_x_after_scaler,
+ DST_Y_AFTER_SCALER, &dlg_attr.dst_y_after_scaler);
+ REG_GET(REF_FREQ_TO_PIX_FREQ,
+ REF_FREQ_TO_PIX_FREQ, &dlg_attr.ref_freq_to_pix_freq);
+
+ if (dlg_attr.refcyc_h_blank_end != dml_dlg_attr->refcyc_h_blank_end)
+ DC_LOG_DEBUG("DML Validation | BLANK_OFFSET_0:REFCYC_H_BLANK_END - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_h_blank_end, dlg_attr.refcyc_h_blank_end);
+ if (dlg_attr.dlg_vblank_end != dml_dlg_attr->dlg_vblank_end)
+ DC_LOG_DEBUG("DML Validation | BLANK_OFFSET_0:DLG_V_BLANK_END - Expected: %u Actual: %u\n",
+ dml_dlg_attr->dlg_vblank_end, dlg_attr.dlg_vblank_end);
+ if (dlg_attr.min_dst_y_next_start != dml_dlg_attr->min_dst_y_next_start)
+ DC_LOG_DEBUG("DML Validation | BLANK_OFFSET_1:MIN_DST_Y_NEXT_START - Expected: %u Actual: %u\n",
+ dml_dlg_attr->min_dst_y_next_start, dlg_attr.min_dst_y_next_start);
+ if (dlg_attr.refcyc_per_htotal != dml_dlg_attr->refcyc_per_htotal)
+ DC_LOG_DEBUG("DML Validation | DST_DIMENSIONS:REFCYC_PER_HTOTAL - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_htotal, dlg_attr.refcyc_per_htotal);
+ if (dlg_attr.refcyc_x_after_scaler != dml_dlg_attr->refcyc_x_after_scaler)
+ DC_LOG_DEBUG("DML Validation | DST_AFTER_SCALER:REFCYC_X_AFTER_SCALER - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_x_after_scaler, dlg_attr.refcyc_x_after_scaler);
+ if (dlg_attr.dst_y_after_scaler != dml_dlg_attr->dst_y_after_scaler)
+ DC_LOG_DEBUG("DML Validation | DST_AFTER_SCALER:DST_Y_AFTER_SCALER - Expected: %u Actual: %u\n",
+ dml_dlg_attr->dst_y_after_scaler, dlg_attr.dst_y_after_scaler);
+ if (dlg_attr.ref_freq_to_pix_freq != dml_dlg_attr->ref_freq_to_pix_freq)
+ DC_LOG_DEBUG("DML Validation | REF_FREQ_TO_PIX_FREQ:REF_FREQ_TO_PIX_FREQ - Expected: %u Actual: %u\n",
+ dml_dlg_attr->ref_freq_to_pix_freq, dlg_attr.ref_freq_to_pix_freq);
+
+ /* DLG - Per luma/chroma */
+ REG_GET(VBLANK_PARAMETERS_1,
+ REFCYC_PER_PTE_GROUP_VBLANK_L, &dlg_attr.refcyc_per_pte_group_vblank_l);
+ if (REG(NOM_PARAMETERS_0))
+ REG_GET(NOM_PARAMETERS_0,
+ DST_Y_PER_PTE_ROW_NOM_L, &dlg_attr.dst_y_per_pte_row_nom_l);
+ if (REG(NOM_PARAMETERS_1))
+ REG_GET(NOM_PARAMETERS_1,
+ REFCYC_PER_PTE_GROUP_NOM_L, &dlg_attr.refcyc_per_pte_group_nom_l);
+ REG_GET(NOM_PARAMETERS_4,
+ DST_Y_PER_META_ROW_NOM_L, &dlg_attr.dst_y_per_meta_row_nom_l);
+ REG_GET(NOM_PARAMETERS_5,
+ REFCYC_PER_META_CHUNK_NOM_L, &dlg_attr.refcyc_per_meta_chunk_nom_l);
+ REG_GET_2(PER_LINE_DELIVERY,
+ REFCYC_PER_LINE_DELIVERY_L, &dlg_attr.refcyc_per_line_delivery_l,
+ REFCYC_PER_LINE_DELIVERY_C, &dlg_attr.refcyc_per_line_delivery_c);
+ REG_GET_2(PER_LINE_DELIVERY_PRE,
+ REFCYC_PER_LINE_DELIVERY_PRE_L, &dlg_attr.refcyc_per_line_delivery_pre_l,
+ REFCYC_PER_LINE_DELIVERY_PRE_C, &dlg_attr.refcyc_per_line_delivery_pre_c);
+ REG_GET(VBLANK_PARAMETERS_2,
+ REFCYC_PER_PTE_GROUP_VBLANK_C, &dlg_attr.refcyc_per_pte_group_vblank_c);
+ if (REG(NOM_PARAMETERS_2))
+ REG_GET(NOM_PARAMETERS_2,
+ DST_Y_PER_PTE_ROW_NOM_C, &dlg_attr.dst_y_per_pte_row_nom_c);
+ if (REG(NOM_PARAMETERS_3))
+ REG_GET(NOM_PARAMETERS_3,
+ REFCYC_PER_PTE_GROUP_NOM_C, &dlg_attr.refcyc_per_pte_group_nom_c);
+ REG_GET(NOM_PARAMETERS_6,
+ DST_Y_PER_META_ROW_NOM_C, &dlg_attr.dst_y_per_meta_row_nom_c);
+ REG_GET(NOM_PARAMETERS_7,
+ REFCYC_PER_META_CHUNK_NOM_C, &dlg_attr.refcyc_per_meta_chunk_nom_c);
+ REG_GET(VBLANK_PARAMETERS_3,
+ REFCYC_PER_META_CHUNK_VBLANK_L, &dlg_attr.refcyc_per_meta_chunk_vblank_l);
+ REG_GET(VBLANK_PARAMETERS_4,
+ REFCYC_PER_META_CHUNK_VBLANK_C, &dlg_attr.refcyc_per_meta_chunk_vblank_c);
+
+ if (dlg_attr.refcyc_per_pte_group_vblank_l != dml_dlg_attr->refcyc_per_pte_group_vblank_l)
+ DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_1:REFCYC_PER_PTE_GROUP_VBLANK_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_pte_group_vblank_l, dlg_attr.refcyc_per_pte_group_vblank_l);
+ if (dlg_attr.dst_y_per_pte_row_nom_l != dml_dlg_attr->dst_y_per_pte_row_nom_l)
+ DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_0:DST_Y_PER_PTE_ROW_NOM_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->dst_y_per_pte_row_nom_l, dlg_attr.dst_y_per_pte_row_nom_l);
+ if (dlg_attr.refcyc_per_pte_group_nom_l != dml_dlg_attr->refcyc_per_pte_group_nom_l)
+ DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_1:REFCYC_PER_PTE_GROUP_NOM_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_pte_group_nom_l, dlg_attr.refcyc_per_pte_group_nom_l);
+ if (dlg_attr.dst_y_per_meta_row_nom_l != dml_dlg_attr->dst_y_per_meta_row_nom_l)
+ DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_4:DST_Y_PER_META_ROW_NOM_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->dst_y_per_meta_row_nom_l, dlg_attr.dst_y_per_meta_row_nom_l);
+ if (dlg_attr.refcyc_per_meta_chunk_nom_l != dml_dlg_attr->refcyc_per_meta_chunk_nom_l)
+ DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_5:REFCYC_PER_META_CHUNK_NOM_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_meta_chunk_nom_l, dlg_attr.refcyc_per_meta_chunk_nom_l);
+ if (dlg_attr.refcyc_per_line_delivery_l != dml_dlg_attr->refcyc_per_line_delivery_l)
+ DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY:REFCYC_PER_LINE_DELIVERY_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_line_delivery_l, dlg_attr.refcyc_per_line_delivery_l);
+ if (dlg_attr.refcyc_per_line_delivery_c != dml_dlg_attr->refcyc_per_line_delivery_c)
+ DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY:REFCYC_PER_LINE_DELIVERY_C - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_line_delivery_c, dlg_attr.refcyc_per_line_delivery_c);
+ if (dlg_attr.refcyc_per_pte_group_vblank_c != dml_dlg_attr->refcyc_per_pte_group_vblank_c)
+ DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_2:REFCYC_PER_PTE_GROUP_VBLANK_C - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_pte_group_vblank_c, dlg_attr.refcyc_per_pte_group_vblank_c);
+ if (dlg_attr.dst_y_per_pte_row_nom_c != dml_dlg_attr->dst_y_per_pte_row_nom_c)
+ DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_2:DST_Y_PER_PTE_ROW_NOM_C - Expected: %u Actual: %u\n",
+ dml_dlg_attr->dst_y_per_pte_row_nom_c, dlg_attr.dst_y_per_pte_row_nom_c);
+ if (dlg_attr.refcyc_per_pte_group_nom_c != dml_dlg_attr->refcyc_per_pte_group_nom_c)
+ DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_3:REFCYC_PER_PTE_GROUP_NOM_C - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_pte_group_nom_c, dlg_attr.refcyc_per_pte_group_nom_c);
+ if (dlg_attr.dst_y_per_meta_row_nom_c != dml_dlg_attr->dst_y_per_meta_row_nom_c)
+ DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_6:DST_Y_PER_META_ROW_NOM_C - Expected: %u Actual: %u\n",
+ dml_dlg_attr->dst_y_per_meta_row_nom_c, dlg_attr.dst_y_per_meta_row_nom_c);
+ if (dlg_attr.refcyc_per_meta_chunk_nom_c != dml_dlg_attr->refcyc_per_meta_chunk_nom_c)
+ DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_7:REFCYC_PER_META_CHUNK_NOM_C - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_meta_chunk_nom_c, dlg_attr.refcyc_per_meta_chunk_nom_c);
+ if (dlg_attr.refcyc_per_line_delivery_pre_l != dml_dlg_attr->refcyc_per_line_delivery_pre_l)
+ DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY_PRE:REFCYC_PER_LINE_DELIVERY_PRE_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_line_delivery_pre_l, dlg_attr.refcyc_per_line_delivery_pre_l);
+ if (dlg_attr.refcyc_per_line_delivery_pre_c != dml_dlg_attr->refcyc_per_line_delivery_pre_c)
+ DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY_PRE:REFCYC_PER_LINE_DELIVERY_PRE_C - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_line_delivery_pre_c, dlg_attr.refcyc_per_line_delivery_pre_c);
+ if (dlg_attr.refcyc_per_meta_chunk_vblank_l != dml_dlg_attr->refcyc_per_meta_chunk_vblank_l)
+ DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_3:REFCYC_PER_META_CHUNK_VBLANK_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_meta_chunk_vblank_l, dlg_attr.refcyc_per_meta_chunk_vblank_l);
+ if (dlg_attr.refcyc_per_meta_chunk_vblank_c != dml_dlg_attr->refcyc_per_meta_chunk_vblank_c)
+ DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_4:REFCYC_PER_META_CHUNK_VBLANK_C - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_meta_chunk_vblank_c, dlg_attr.refcyc_per_meta_chunk_vblank_c);
+
+ /* TTU - per hubp */
+ REG_GET_2(DCN_TTU_QOS_WM,
+ QoS_LEVEL_LOW_WM, &ttu_attr.qos_level_low_wm,
+ QoS_LEVEL_HIGH_WM, &ttu_attr.qos_level_high_wm);
+
+ if (ttu_attr.qos_level_low_wm != dml_ttu_attr->qos_level_low_wm)
+ DC_LOG_DEBUG("DML Validation | DCN_TTU_QOS_WM:QoS_LEVEL_LOW_WM - Expected: %u Actual: %u\n",
+ dml_ttu_attr->qos_level_low_wm, ttu_attr.qos_level_low_wm);
+ if (ttu_attr.qos_level_high_wm != dml_ttu_attr->qos_level_high_wm)
+ DC_LOG_DEBUG("DML Validation | DCN_TTU_QOS_WM:QoS_LEVEL_HIGH_WM - Expected: %u Actual: %u\n",
+ dml_ttu_attr->qos_level_high_wm, ttu_attr.qos_level_high_wm);
+
+ /* TTU - per luma/chroma */
+ /* Assumed surf0 is luma and 1 is chroma */
+ REG_GET_3(DCN_SURF0_TTU_CNTL0,
+ REFCYC_PER_REQ_DELIVERY, &ttu_attr.refcyc_per_req_delivery_l,
+ QoS_LEVEL_FIXED, &ttu_attr.qos_level_fixed_l,
+ QoS_RAMP_DISABLE, &ttu_attr.qos_ramp_disable_l);
+ REG_GET_3(DCN_SURF1_TTU_CNTL0,
+ REFCYC_PER_REQ_DELIVERY, &ttu_attr.refcyc_per_req_delivery_c,
+ QoS_LEVEL_FIXED, &ttu_attr.qos_level_fixed_c,
+ QoS_RAMP_DISABLE, &ttu_attr.qos_ramp_disable_c);
+ REG_GET_3(DCN_CUR0_TTU_CNTL0,
+ REFCYC_PER_REQ_DELIVERY, &ttu_attr.refcyc_per_req_delivery_cur0,
+ QoS_LEVEL_FIXED, &ttu_attr.qos_level_fixed_cur0,
+ QoS_RAMP_DISABLE, &ttu_attr.qos_ramp_disable_cur0);
+ REG_GET(FLIP_PARAMETERS_1,
+ REFCYC_PER_PTE_GROUP_FLIP_L, &dlg_attr.refcyc_per_pte_group_flip_l);
+ REG_GET(DCN_CUR0_TTU_CNTL1,
+ REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_cur0);
+ REG_GET(DCN_CUR1_TTU_CNTL1,
+ REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_cur1);
+ REG_GET(DCN_SURF0_TTU_CNTL1,
+ REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_l);
+ REG_GET(DCN_SURF1_TTU_CNTL1,
+ REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_c);
+
+ if (ttu_attr.refcyc_per_req_delivery_l != dml_ttu_attr->refcyc_per_req_delivery_l)
+ DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL0:REFCYC_PER_REQ_DELIVERY - Expected: %u Actual: %u\n",
+ dml_ttu_attr->refcyc_per_req_delivery_l, ttu_attr.refcyc_per_req_delivery_l);
+ if (ttu_attr.qos_level_fixed_l != dml_ttu_attr->qos_level_fixed_l)
+ DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL0:QoS_LEVEL_FIXED - Expected: %u Actual: %u\n",
+ dml_ttu_attr->qos_level_fixed_l, ttu_attr.qos_level_fixed_l);
+ if (ttu_attr.qos_ramp_disable_l != dml_ttu_attr->qos_ramp_disable_l)
+ DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL0:QoS_RAMP_DISABLE - Expected: %u Actual: %u\n",
+ dml_ttu_attr->qos_ramp_disable_l, ttu_attr.qos_ramp_disable_l);
+ if (ttu_attr.refcyc_per_req_delivery_c != dml_ttu_attr->refcyc_per_req_delivery_c)
+ DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL0:REFCYC_PER_REQ_DELIVERY - Expected: %u Actual: %u\n",
+ dml_ttu_attr->refcyc_per_req_delivery_c, ttu_attr.refcyc_per_req_delivery_c);
+ if (ttu_attr.qos_level_fixed_c != dml_ttu_attr->qos_level_fixed_c)
+ DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL0:QoS_LEVEL_FIXED - Expected: %u Actual: %u\n",
+ dml_ttu_attr->qos_level_fixed_c, ttu_attr.qos_level_fixed_c);
+ if (ttu_attr.qos_ramp_disable_c != dml_ttu_attr->qos_ramp_disable_c)
+ DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL0:QoS_RAMP_DISABLE - Expected: %u Actual: %u\n",
+ dml_ttu_attr->qos_ramp_disable_c, ttu_attr.qos_ramp_disable_c);
+ if (ttu_attr.refcyc_per_req_delivery_cur0 != dml_ttu_attr->refcyc_per_req_delivery_cur0)
+ DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL0:REFCYC_PER_REQ_DELIVERY - Expected: %u Actual: %u\n",
+ dml_ttu_attr->refcyc_per_req_delivery_cur0, ttu_attr.refcyc_per_req_delivery_cur0);
+ if (ttu_attr.qos_level_fixed_cur0 != dml_ttu_attr->qos_level_fixed_cur0)
+ DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL0:QoS_LEVEL_FIXED - Expected: %u Actual: %u\n",
+ dml_ttu_attr->qos_level_fixed_cur0, ttu_attr.qos_level_fixed_cur0);
+ if (ttu_attr.qos_ramp_disable_cur0 != dml_ttu_attr->qos_ramp_disable_cur0)
+ DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL0:QoS_RAMP_DISABLE - Expected: %u Actual: %u\n",
+ dml_ttu_attr->qos_ramp_disable_cur0, ttu_attr.qos_ramp_disable_cur0);
+ if (dlg_attr.refcyc_per_pte_group_flip_l != dml_dlg_attr->refcyc_per_pte_group_flip_l)
+ DC_LOG_DEBUG("DML Validation | FLIP_PARAMETERS_1:REFCYC_PER_PTE_GROUP_FLIP_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_pte_group_flip_l, dlg_attr.refcyc_per_pte_group_flip_l);
+ if (ttu_attr.refcyc_per_req_delivery_pre_cur0 != dml_ttu_attr->refcyc_per_req_delivery_pre_cur0)
+ DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u Actual: %u\n",
+ dml_ttu_attr->refcyc_per_req_delivery_pre_cur0, ttu_attr.refcyc_per_req_delivery_pre_cur0);
+ if (ttu_attr.refcyc_per_req_delivery_pre_cur1 != dml_ttu_attr->refcyc_per_req_delivery_pre_cur1)
+ DC_LOG_DEBUG("DML Validation | DCN_CUR1_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u Actual: %u\n",
+ dml_ttu_attr->refcyc_per_req_delivery_pre_cur1, ttu_attr.refcyc_per_req_delivery_pre_cur1);
+ if (ttu_attr.refcyc_per_req_delivery_pre_l != dml_ttu_attr->refcyc_per_req_delivery_pre_l)
+ DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u Actual: %u\n",
+ dml_ttu_attr->refcyc_per_req_delivery_pre_l, ttu_attr.refcyc_per_req_delivery_pre_l);
+ if (ttu_attr.refcyc_per_req_delivery_pre_c != dml_ttu_attr->refcyc_per_req_delivery_pre_c)
+ DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u Actual: %u\n",
+ dml_ttu_attr->refcyc_per_req_delivery_pre_c, ttu_attr.refcyc_per_req_delivery_pre_c);
+}
+
static struct hubp_funcs dcn20_hubp_funcs = {
.hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,
.hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled,
.hubp_program_surface_flip_and_addr = hubp2_program_surface_flip_and_addr,
.hubp_program_surface_config = hubp2_program_surface_config,
- .hubp_is_flip_pending = hubp1_is_flip_pending,
+ .hubp_is_flip_pending = hubp2_is_flip_pending,
.hubp_setup = hubp2_setup,
.hubp_setup_interdependent = hubp2_setup_interdependent,
.hubp_set_vm_system_aperture_settings = hubp2_set_vm_system_aperture_settings,
- .set_blank = hubp1_set_blank,
- .dcc_control = hubp1_dcc_control,
- .hubp_update_dchub = hubp2_update_dchub,
+ .set_blank = hubp2_set_blank,
+ .dcc_control = hubp2_dcc_control,
.mem_program_viewport = min_set_viewport,
.set_cursor_attributes = hubp2_cursor_set_attributes,
- .set_cursor_position = hubp1_cursor_set_position,
- .hubp_clk_cntl = hubp1_clk_cntl,
- .hubp_vtg_sel = hubp1_vtg_sel,
+ .set_cursor_position = hubp2_cursor_set_position,
+ .hubp_clk_cntl = hubp2_clk_cntl,
+ .hubp_vtg_sel = hubp2_vtg_sel,
.dmdata_set_attributes = hubp2_dmdata_set_attributes,
.dmdata_load = hubp2_dmdata_load,
.dmdata_status_done = hubp2_dmdata_status_done,
- .hubp_read_state = hubp1_read_state,
- .hubp_clear_underflow = hubp1_clear_underflow,
+ .hubp_read_state = hubp2_read_state,
+ .hubp_clear_underflow = hubp2_clear_underflow,
.hubp_set_flip_control_surface_gsl = hubp2_set_flip_control_surface_gsl,
.hubp_init = hubp1_init,
+ .validate_dml_output = hubp2_validate_dml_output,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h
index d5acc348be22..8c04a3606a54 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h
@@ -38,12 +38,6 @@
SRI(PREFETCH_SETTINGS_C, HUBPREQ, id),\
SRI(DCN_VM_SYSTEM_APERTURE_LOW_ADDR, HUBPREQ, id),\
SRI(DCN_VM_SYSTEM_APERTURE_HIGH_ADDR, HUBPREQ, id),\
- SR(DCN_VM_FB_LOCATION_TOP),\
- SR(DCN_VM_FB_LOCATION_BASE),\
- SR(DCN_VM_FB_OFFSET),\
- SR(DCN_VM_AGP_BASE),\
- SR(DCN_VM_AGP_BOT),\
- SR(DCN_VM_AGP_TOP),\
SRI(CURSOR_SETTINGS, HUBPREQ, id), \
SRI(CURSOR_SURFACE_ADDRESS_HIGH, CURSOR0_, id), \
SRI(CURSOR_SURFACE_ADDRESS, CURSOR0_, id), \
@@ -72,8 +66,8 @@
SR(DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB),\
SR(DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB)
-#define HUBP_MASK_SH_LIST_DCN2_COMMON(mask_sh)\
- HUBP_MASK_SH_LIST_DCN(mask_sh),\
+#define HUBP_MASK_SH_LIST_DCN2_SHARE_COMMON(mask_sh)\
+ HUBP_MASK_SH_LIST_DCN_SHARE_COMMON(mask_sh),\
HUBP_MASK_SH_LIST_DCN_VM(mask_sh),\
HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, ROTATION_ANGLE, mask_sh),\
HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, H_MIRROR_EN, mask_sh),\
@@ -82,12 +76,6 @@
HUBP_SF(HUBPREQ0_PREFETCH_SETTINGS_C, VRATIO_PREFETCH_C, mask_sh),\
HUBP_SF(HUBPREQ0_DCN_VM_SYSTEM_APERTURE_LOW_ADDR, MC_VM_SYSTEM_APERTURE_LOW_ADDR, mask_sh),\
HUBP_SF(HUBPREQ0_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR, MC_VM_SYSTEM_APERTURE_HIGH_ADDR, mask_sh),\
- HUBP_SF(DCN_VM_FB_LOCATION_TOP, FB_TOP, mask_sh),\
- HUBP_SF(DCN_VM_FB_LOCATION_BASE, FB_BASE, mask_sh),\
- HUBP_SF(DCN_VM_FB_OFFSET, FB_OFFSET, mask_sh),\
- HUBP_SF(DCN_VM_AGP_BASE, AGP_BASE, mask_sh),\
- HUBP_SF(DCN_VM_AGP_BOT, AGP_BOT, mask_sh),\
- HUBP_SF(DCN_VM_AGP_TOP, AGP_TOP, mask_sh),\
HUBP_SF(HUBPREQ0_CURSOR_SETTINGS, CURSOR0_DST_Y_OFFSET, mask_sh), \
HUBP_SF(HUBPREQ0_CURSOR_SETTINGS, CURSOR0_CHUNK_HDL_ADJUST, mask_sh), \
HUBP_SF(CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH, CURSOR_SURFACE_ADDRESS_HIGH, mask_sh), \
@@ -127,13 +115,21 @@
HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL2, SURFACE_TRIPLE_BUFFER_ENABLE, mask_sh),\
HUBP_SF(HUBPREQ0_VMID_SETTINGS_0, VMID, mask_sh)
+/*DCN2.x and DCN1.x*/
+#define HUBP_MASK_SH_LIST_DCN2_COMMON(mask_sh)\
+ HUBP_MASK_SH_LIST_DCN2_SHARE_COMMON(mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_TILING_CONFIG, RB_ALIGNED, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, MPTE_GROUP_SIZE, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, MPTE_GROUP_SIZE_C, mask_sh)
+
+/*DCN2.0 specific*/
#define HUBP_MASK_SH_LIST_DCN20(mask_sh)\
HUBP_MASK_SH_LIST_DCN2_COMMON(mask_sh),\
HUBP_SF(DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, DCN_VM_SYSTEM_APERTURE_DEFAULT_SYSTEM, mask_sh),\
HUBP_SF(DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, mask_sh),\
HUBP_SF(DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, mask_sh)
-
+/*DCN2.x */
#define DCN2_HUBP_REG_COMMON_VARIABLE_LIST \
HUBP_COMMON_REG_VARIABLE_LIST; \
uint32_t DMDATA_ADDRESS_HIGH; \
@@ -149,14 +145,20 @@
uint32_t FLIP_PARAMETERS_2;\
uint32_t DCN_CUR1_TTU_CNTL0;\
uint32_t DCN_CUR1_TTU_CNTL1;\
- uint32_t VMID_SETTINGS_0;\
+ uint32_t VMID_SETTINGS_0
+
+
+#define DCN21_HUBP_REG_COMMON_VARIABLE_LIST \
+ DCN2_HUBP_REG_COMMON_VARIABLE_LIST; \
uint32_t FLIP_PARAMETERS_3;\
uint32_t FLIP_PARAMETERS_4;\
+ uint32_t FLIP_PARAMETERS_5;\
+ uint32_t FLIP_PARAMETERS_6;\
uint32_t VBLANK_PARAMETERS_5;\
uint32_t VBLANK_PARAMETERS_6
#define DCN2_HUBP_REG_FIELD_VARIABLE_LIST(type) \
- DCN_HUBP_REG_FIELD_LIST(type); \
+ DCN_HUBP_REG_FIELD_BASE_LIST(type); \
type DMDATA_ADDRESS_HIGH;\
type DMDATA_MODE;\
type DMDATA_UPDATED;\
@@ -180,17 +182,27 @@
type SURFACE_TRIPLE_BUFFER_ENABLE;\
type VMID
+#define DCN21_HUBP_REG_FIELD_VARIABLE_LIST(type) \
+ DCN2_HUBP_REG_FIELD_VARIABLE_LIST(type);\
+ type REFCYC_PER_VM_GROUP_FLIP;\
+ type REFCYC_PER_VM_REQ_FLIP;\
+ type REFCYC_PER_VM_GROUP_VBLANK;\
+ type REFCYC_PER_VM_REQ_VBLANK;\
+ type REFCYC_PER_PTE_GROUP_FLIP_C; \
+ type REFCYC_PER_META_CHUNK_FLIP_C; \
+ type VM_GROUP_SIZE
+
struct dcn_hubp2_registers {
- DCN2_HUBP_REG_COMMON_VARIABLE_LIST;
+ DCN21_HUBP_REG_COMMON_VARIABLE_LIST;
};
struct dcn_hubp2_shift {
- DCN2_HUBP_REG_FIELD_VARIABLE_LIST(uint8_t);
+ DCN21_HUBP_REG_FIELD_VARIABLE_LIST(uint8_t);
};
struct dcn_hubp2_mask {
- DCN2_HUBP_REG_FIELD_VARIABLE_LIST(uint32_t);
+ DCN21_HUBP_REG_FIELD_VARIABLE_LIST(uint32_t);
};
struct dcn20_hubp {
@@ -217,10 +229,6 @@ void hubp2_setup_interdependent(
void hubp2_vready_at_or_After_vsync(struct hubp *hubp,
struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest);
-void hubp2_update_dchub(
- struct hubp *hubp,
- struct dchub_init_data *dh_data);
-
void hubp2_cursor_set_attributes(
struct hubp *hubp,
const struct dc_cursor_attributes *attr);
@@ -262,16 +270,53 @@ bool hubp2_program_surface_flip_and_addr(
const struct dc_plane_address *address,
bool flip_immediate);
+void hubp2_dcc_control(struct hubp *hubp, bool enable,
+ enum hubp_ind_block_size independent_64b_blks);
+
+void hubp2_program_size(
+ struct hubp *hubp,
+ enum surface_pixel_format format,
+ const struct plane_size *plane_size,
+ struct dc_plane_dcc_param *dcc);
+
+void hubp2_program_rotation(
+ struct hubp *hubp,
+ enum dc_rotation_angle rotation,
+ bool horizontal_mirror);
+
+void hubp2_program_pixel_format(
+ struct hubp *hubp,
+ enum surface_pixel_format format);
+
void hubp2_program_surface_config(
struct hubp *hubp,
enum surface_pixel_format format,
union dc_tiling_info *tiling_info,
- union plane_size *plane_size,
+ struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
bool horizontal_mirror,
unsigned int compat_level);
+bool hubp2_is_flip_pending(struct hubp *hubp);
+
+void hubp2_set_blank(struct hubp *hubp, bool blank);
+
+void hubp2_cursor_set_position(
+ struct hubp *hubp,
+ const struct dc_cursor_position *pos,
+ const struct dc_cursor_mi_param *param);
+
+void hubp2_clk_cntl(struct hubp *hubp, bool enable);
+
+void hubp2_vtg_sel(struct hubp *hubp, uint32_t otg_inst);
+
+void hubp2_clear_underflow(struct hubp *hubp);
+
+void hubp2_read_state_common(struct hubp *hubp);
+
+void hubp2_read_state(struct hubp *hubp);
+
#endif /* __DC_MEM_INPUT_DCN20_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index d810c8940129..a444fed94184 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -25,17 +25,15 @@
#include <linux/delay.h>
#include "dm_services.h"
+#include "basics/dc_common.h"
#include "dm_helpers.h"
#include "core_types.h"
#include "resource.h"
-#include "dcn20/dcn20_resource.h"
-#include "dce110/dce110_hw_sequencer.h"
-#include "dcn10/dcn10_hw_sequencer.h"
+#include "dcn20_resource.h"
#include "dcn20_hwseq.h"
#include "dce/dce_hwseq.h"
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
-#include "dcn20/dcn20_dsc.h"
-#endif
+#include "dcn20_dsc.h"
+#include "dcn20_optc.h"
#include "abm.h"
#include "clk_mgr.h"
#include "dmcu.h"
@@ -45,10 +43,9 @@
#include "ipp.h"
#include "mpc.h"
#include "mcif_wb.h"
+#include "dchubbub.h"
#include "reg_helper.h"
#include "dcn10/dcn10_cm_common.h"
-#include "dcn10/dcn10_hubbub.h"
-#include "dcn10/dcn10_optc.h"
#include "dc_link_dp.h"
#include "vm_helper.h"
#include "dccg.h"
@@ -64,58 +61,166 @@
#define FN(reg_name, field_name) \
hws->shifts->field_name, hws->masks->field_name
-static void bios_golden_init(struct dc *dc)
+static int find_free_gsl_group(const struct dc *dc)
{
- struct dc_bios *bp = dc->ctx->dc_bios;
- int i;
+ if (dc->res_pool->gsl_groups.gsl_0 == 0)
+ return 1;
+ if (dc->res_pool->gsl_groups.gsl_1 == 0)
+ return 2;
+ if (dc->res_pool->gsl_groups.gsl_2 == 0)
+ return 3;
- /* initialize dcn global */
- bp->funcs->enable_disp_power_gating(bp,
- CONTROLLER_ID_D0, ASIC_PIPE_INIT);
+ return 0;
+}
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- /* initialize dcn per pipe */
- bp->funcs->enable_disp_power_gating(bp,
- CONTROLLER_ID_D0 + i, ASIC_PIPE_DISABLE);
+/* NOTE: This is not a generic setup_gsl function (hence the suffix as_lock)
+ * This is only used to lock pipes in pipe splitting case with immediate flip
+ * Ordinary MPC/OTG locks suppress VUPDATE which doesn't help with immediate,
+ * so we get tearing with freesync since we cannot flip multiple pipes
+ * atomically.
+ * We use GSL for this:
+ * - immediate flip: find first available GSL group if not already assigned
+ * program gsl with that group, set current OTG as master
+ * and always us 0x4 = AND of flip_ready from all pipes
+ * - vsync flip: disable GSL if used
+ *
+ * Groups in stream_res are stored as +1 from HW registers, i.e.
+ * gsl_0 <=> pipe_ctx->stream_res.gsl_group == 1
+ * Using a magic value like -1 would require tracking all inits/resets
+ */
+static void dcn20_setup_gsl_group_as_lock(
+ const struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool enable)
+{
+ struct gsl_params gsl;
+ int group_idx;
+
+ memset(&gsl, 0, sizeof(struct gsl_params));
+
+ if (enable) {
+ /* return if group already assigned since GSL was set up
+ * for vsync flip, we would unassign so it can't be "left over"
+ */
+ if (pipe_ctx->stream_res.gsl_group > 0)
+ return;
+
+ group_idx = find_free_gsl_group(dc);
+ ASSERT(group_idx != 0);
+ pipe_ctx->stream_res.gsl_group = group_idx;
+
+ /* set gsl group reg field and mark resource used */
+ switch (group_idx) {
+ case 1:
+ gsl.gsl0_en = 1;
+ dc->res_pool->gsl_groups.gsl_0 = 1;
+ break;
+ case 2:
+ gsl.gsl1_en = 1;
+ dc->res_pool->gsl_groups.gsl_1 = 1;
+ break;
+ case 3:
+ gsl.gsl2_en = 1;
+ dc->res_pool->gsl_groups.gsl_2 = 1;
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return; // invalid case
+ }
+ gsl.gsl_master_en = 1;
+ } else {
+ group_idx = pipe_ctx->stream_res.gsl_group;
+ if (group_idx == 0)
+ return; // if not in use, just return
+
+ pipe_ctx->stream_res.gsl_group = 0;
+
+ /* unset gsl group reg field and mark resource free */
+ switch (group_idx) {
+ case 1:
+ gsl.gsl0_en = 0;
+ dc->res_pool->gsl_groups.gsl_0 = 0;
+ break;
+ case 2:
+ gsl.gsl1_en = 0;
+ dc->res_pool->gsl_groups.gsl_1 = 0;
+ break;
+ case 3:
+ gsl.gsl2_en = 0;
+ dc->res_pool->gsl_groups.gsl_2 = 0;
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+ gsl.gsl_master_en = 0;
}
+
+ /* at this point we want to program whether it's to enable or disable */
+ if (pipe_ctx->stream_res.tg->funcs->set_gsl != NULL &&
+ pipe_ctx->stream_res.tg->funcs->set_gsl_source_select != NULL) {
+ pipe_ctx->stream_res.tg->funcs->set_gsl(
+ pipe_ctx->stream_res.tg,
+ &gsl);
+
+ pipe_ctx->stream_res.tg->funcs->set_gsl_source_select(
+ pipe_ctx->stream_res.tg, group_idx, enable ? 4 : 0);
+ } else
+ BREAK_TO_DEBUGGER();
+}
+
+void dcn20_set_flip_control_gsl(
+ struct pipe_ctx *pipe_ctx,
+ bool flip_immediate)
+{
+ if (pipe_ctx && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_control_surface_gsl)
+ pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_control_surface_gsl(
+ pipe_ctx->plane_res.hubp, flip_immediate);
+
}
-static void enable_power_gating_plane(
+void dcn20_enable_power_gating_plane(
struct dce_hwseq *hws,
bool enable)
{
- bool force_on = 1; /* disable power gating */
+ bool force_on = true; /* disable power gating */
if (enable)
- force_on = 0;
+ force_on = false;
/* DCHUBP0/1/2/3/4/5 */
REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on);
REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, force_on);
REG_UPDATE(DOMAIN4_PG_CONFIG, DOMAIN4_POWER_FORCEON, force_on);
REG_UPDATE(DOMAIN6_PG_CONFIG, DOMAIN6_POWER_FORCEON, force_on);
- REG_UPDATE(DOMAIN8_PG_CONFIG, DOMAIN8_POWER_FORCEON, force_on);
- /*Do not power gate DCHUB5, should be left at HW default, power on permanently*/
- /*REG_UPDATE(DOMAIN10_PG_CONFIG, DOMAIN10_POWER_FORCEON, force_on);*/
+ if (REG(DOMAIN8_PG_CONFIG))
+ REG_UPDATE(DOMAIN8_PG_CONFIG, DOMAIN8_POWER_FORCEON, force_on);
+ if (REG(DOMAIN10_PG_CONFIG))
+ REG_UPDATE(DOMAIN10_PG_CONFIG, DOMAIN8_POWER_FORCEON, force_on);
/* DPP0/1/2/3/4/5 */
REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN1_POWER_FORCEON, force_on);
REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN3_POWER_FORCEON, force_on);
REG_UPDATE(DOMAIN5_PG_CONFIG, DOMAIN5_POWER_FORCEON, force_on);
REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on);
- REG_UPDATE(DOMAIN9_PG_CONFIG, DOMAIN9_POWER_FORCEON, force_on);
- /*Do not power gate DPP5, should be left at HW default, power on permanently*/
- /*REG_UPDATE(DOMAIN11_PG_CONFIG, DOMAIN11_POWER_FORCEON, force_on);*/
+ if (REG(DOMAIN9_PG_CONFIG))
+ REG_UPDATE(DOMAIN9_PG_CONFIG, DOMAIN9_POWER_FORCEON, force_on);
+ if (REG(DOMAIN11_PG_CONFIG))
+ REG_UPDATE(DOMAIN11_PG_CONFIG, DOMAIN9_POWER_FORCEON, force_on);
+ /* DCS0/1/2/3/4/5 */
REG_UPDATE(DOMAIN16_PG_CONFIG, DOMAIN16_POWER_FORCEON, force_on);
REG_UPDATE(DOMAIN17_PG_CONFIG, DOMAIN17_POWER_FORCEON, force_on);
REG_UPDATE(DOMAIN18_PG_CONFIG, DOMAIN18_POWER_FORCEON, force_on);
- REG_UPDATE(DOMAIN19_PG_CONFIG, DOMAIN19_POWER_FORCEON, force_on);
- REG_UPDATE(DOMAIN20_PG_CONFIG, DOMAIN20_POWER_FORCEON, force_on);
- REG_UPDATE(DOMAIN21_PG_CONFIG, DOMAIN21_POWER_FORCEON, force_on);
+ if (REG(DOMAIN19_PG_CONFIG))
+ REG_UPDATE(DOMAIN19_PG_CONFIG, DOMAIN19_POWER_FORCEON, force_on);
+ if (REG(DOMAIN20_PG_CONFIG))
+ REG_UPDATE(DOMAIN20_PG_CONFIG, DOMAIN20_POWER_FORCEON, force_on);
+ if (REG(DOMAIN21_PG_CONFIG))
+ REG_UPDATE(DOMAIN21_PG_CONFIG, DOMAIN21_POWER_FORCEON, force_on);
}
-static void dcn20_dccg_init(struct dce_hwseq *hws)
+void dcn20_dccg_init(struct dce_hwseq *hws)
{
/*
* set MICROSECOND_TIME_BASE_DIV
@@ -139,7 +244,7 @@ static void dcn20_dccg_init(struct dce_hwseq *hws)
REG_WRITE(DISPCLK_FREQ_CHANGE_CNTL, 0x801003c);
}
-static void disable_vga(
+void dcn20_disable_vga(
struct dce_hwseq *hws)
{
REG_WRITE(D1VGA_CONTROL, 0);
@@ -150,23 +255,24 @@ static void disable_vga(
REG_WRITE(D6VGA_CONTROL, 0);
}
-void dcn20_program_tripleBuffer(
+void dcn20_program_triple_buffer(
const struct dc *dc,
struct pipe_ctx *pipe_ctx,
- bool enableTripleBuffer)
+ bool enable_triple_buffer)
{
if (pipe_ctx->plane_res.hubp && pipe_ctx->plane_res.hubp->funcs) {
pipe_ctx->plane_res.hubp->funcs->hubp_enable_tripleBuffer(
pipe_ctx->plane_res.hubp,
- enableTripleBuffer);
+ enable_triple_buffer);
}
}
/* Blank pixel data during initialization */
-static void dcn20_init_blank(
+void dcn20_init_blank(
struct dc *dc,
struct timing_generator *tg)
{
+ struct dce_hwseq *hws = dc->hwseq;
enum dc_color_space color_space;
struct tg_color black_color = {0};
struct output_pixel_processor *opp = NULL;
@@ -197,6 +303,7 @@ static void dcn20_init_blank(
opp->funcs->opp_set_disp_pattern_generator(
opp,
CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR,
+ CONTROLLER_DP_COLOR_SPACE_UDEFINED,
COLOR_DEPTH_UNDEFINED,
&black_color,
otg_active_width,
@@ -206,17 +313,17 @@ static void dcn20_init_blank(
bottom_opp->funcs->opp_set_disp_pattern_generator(
bottom_opp,
CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR,
+ CONTROLLER_DP_COLOR_SPACE_UDEFINED,
COLOR_DEPTH_UNDEFINED,
&black_color,
otg_active_width,
otg_active_height);
}
- dcn20_hwss_wait_for_blank_complete(opp);
+ hws->funcs.wait_for_blank_complete(opp);
}
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
-static void dcn20_dsc_pg_control(
+void dcn20_dsc_pg_control(
struct dce_hwseq *hws,
unsigned int dsc_inst,
bool power_on)
@@ -292,9 +399,8 @@ static void dcn20_dsc_pg_control(
if (org_ip_request_cntl == 0)
REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0);
}
-#endif
-static void dcn20_dpp_pg_control(
+void dcn20_dpp_pg_control(
struct dce_hwseq *hws,
unsigned int dpp_inst,
bool power_on)
@@ -368,7 +474,7 @@ static void dcn20_dpp_pg_control(
}
-static void dcn20_hubp_pg_control(
+void dcn20_hubp_pg_control(
struct dce_hwseq *hws,
unsigned int hubp_inst,
bool power_on)
@@ -442,34 +548,12 @@ static void dcn20_hubp_pg_control(
}
-
-static void dcn20_plane_atomic_power_down(struct dc *dc, struct pipe_ctx *pipe_ctx)
-{
- struct dce_hwseq *hws = dc->hwseq;
- struct dpp *dpp = pipe_ctx->plane_res.dpp;
-
- DC_LOGGER_INIT(dc->ctx->logger);
-
- if (REG(DC_IP_REQUEST_CNTL)) {
- REG_SET(DC_IP_REQUEST_CNTL, 0,
- IP_REQUEST_EN, 1);
- dcn20_dpp_pg_control(hws, dpp->inst, false);
- dcn20_hubp_pg_control(hws, pipe_ctx->plane_res.hubp->inst, false);
- dpp->funcs->dpp_reset(dpp);
- REG_SET(DC_IP_REQUEST_CNTL, 0,
- IP_REQUEST_EN, 0);
- DC_LOG_DEBUG(
- "Power gated front end %d\n", pipe_ctx->pipe_idx);
- }
-}
-
-
-
/* disable HW used by plane.
* note: cannot disable until disconnect is complete
*/
-static void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
+void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
+ struct dce_hwseq *hws = dc->hwseq;
struct hubp *hubp = pipe_ctx->plane_res.hubp;
struct dpp *dpp = pipe_ctx->plane_res.dpp;
@@ -488,9 +572,10 @@ static void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
dpp->funcs->dpp_dppclk_control(dpp, false, false);
hubp->power_gated = true;
- dc->optimized_required = false; /* We're powering off, no need to optimize */
- dcn20_plane_atomic_power_down(dc, pipe_ctx);
+ hws->funcs.plane_atomic_power_down(dc,
+ pipe_ctx->plane_res.dpp,
+ pipe_ctx->plane_res.hubp);
pipe_ctx->stream = NULL;
memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
@@ -514,212 +599,18 @@ void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
pipe_ctx->pipe_idx);
}
-static void dcn20_init_hw(struct dc *dc)
-{
- int i, j;
- struct abm *abm = dc->res_pool->abm;
- struct dmcu *dmcu = dc->res_pool->dmcu;
- struct dce_hwseq *hws = dc->hwseq;
- struct dc_bios *dcb = dc->ctx->dc_bios;
- struct resource_pool *res_pool = dc->res_pool;
- struct dc_state *context = dc->current_state;
- struct dc_firmware_info fw_info = { { 0 } };
-
- if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
- dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
-
- // Initialize the dccg
- if (res_pool->dccg->funcs->dccg_init)
- res_pool->dccg->funcs->dccg_init(res_pool->dccg);
-
- //Enable ability to power gate / don't force power on permanently
- enable_power_gating_plane(dc->hwseq, true);
-
- if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
- REG_WRITE(RBBMIF_TIMEOUT_DIS, 0xFFFFFFFF);
- REG_WRITE(RBBMIF_TIMEOUT_DIS_2, 0xFFFFFFFF);
-
- dcn20_dccg_init(hws);
-
- REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, 2);
- REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
- REG_WRITE(REFCLK_CNTL, 0);
- } else {
- if (!dcb->funcs->is_accelerated_mode(dcb)) {
- bios_golden_init(dc);
- if (dc->ctx->dc_bios->funcs->get_firmware_info(
- dc->ctx->dc_bios, &fw_info) == BP_RESULT_OK) {
- res_pool->ref_clocks.xtalin_clock_inKhz = fw_info.pll_info.crystal_frequency;
-
- if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
- if (res_pool->dccg && res_pool->hubbub) {
-
- (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
- fw_info.pll_info.crystal_frequency,
- &res_pool->ref_clocks.dccg_ref_clock_inKhz);
-
- (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
- res_pool->ref_clocks.dccg_ref_clock_inKhz,
- &res_pool->ref_clocks.dchub_ref_clock_inKhz);
- } else {
- // Not all ASICs have DCCG sw component
- res_pool->ref_clocks.dccg_ref_clock_inKhz =
- res_pool->ref_clocks.xtalin_clock_inKhz;
- res_pool->ref_clocks.dchub_ref_clock_inKhz =
- res_pool->ref_clocks.xtalin_clock_inKhz;
- }
- }
- } else
- ASSERT_CRITICAL(false);
- disable_vga(dc->hwseq);
- }
-
- for (i = 0; i < dc->link_count; i++) {
- /* Power up AND update implementation according to the
- * required signal (which may be different from the
- * default signal on connector).
- */
- struct dc_link *link = dc->links[i];
-
- link->link_enc->funcs->hw_init(link->link_enc);
- }
- }
-
- /* Blank pixel data with OPP DPG */
- for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
- struct timing_generator *tg = dc->res_pool->timing_generators[i];
-
- if (tg->funcs->is_tg_enabled(tg)) {
- dcn20_init_blank(dc, tg);
- }
- }
-
- for (i = 0; i < res_pool->timing_generator_count; i++) {
- struct timing_generator *tg = dc->res_pool->timing_generators[i];
-
- if (tg->funcs->is_tg_enabled(tg))
- tg->funcs->lock(tg);
- }
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct dpp *dpp = res_pool->dpps[i];
-
- dpp->funcs->dpp_reset(dpp);
- }
-
- /* Reset all MPCC muxes */
- res_pool->mpc->funcs->mpc_init(res_pool->mpc);
-
- /* initialize OPP mpc_tree parameter */
- for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
- res_pool->opps[i]->mpc_tree_params.opp_id = res_pool->opps[i]->inst;
- res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
- for (j = 0; j < MAX_PIPES; j++)
- res_pool->opps[i]->mpcc_disconnect_pending[j] = false;
- }
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct timing_generator *tg = dc->res_pool->timing_generators[i];
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- struct hubp *hubp = dc->res_pool->hubps[i];
- struct dpp *dpp = dc->res_pool->dpps[i];
-
- pipe_ctx->stream_res.tg = tg;
- pipe_ctx->pipe_idx = i;
-
- pipe_ctx->plane_res.hubp = hubp;
- pipe_ctx->plane_res.dpp = dpp;
- pipe_ctx->plane_res.mpcc_inst = dpp->inst;
- hubp->mpcc_id = dpp->inst;
- hubp->opp_id = OPP_ID_INVALID;
- hubp->power_gated = false;
- pipe_ctx->stream_res.opp = NULL;
-
- hubp->funcs->hubp_init(hubp);
-
- //dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
- //dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
- dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
- pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
- /*to do*/
- hwss1_plane_atomic_disconnect(dc, pipe_ctx);
- }
-
- /* initialize DWB pointer to MCIF_WB */
- for (i = 0; i < res_pool->res_cap->num_dwb; i++)
- res_pool->dwbc[i]->mcif = res_pool->mcif_wb[i];
-
- for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
- struct timing_generator *tg = dc->res_pool->timing_generators[i];
-
- if (tg->funcs->is_tg_enabled(tg))
- tg->funcs->unlock(tg);
- }
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
-
- dc->hwss.disable_plane(dc, pipe_ctx);
-
- pipe_ctx->stream_res.tg = NULL;
- pipe_ctx->plane_res.hubp = NULL;
- }
-
- for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
- struct timing_generator *tg = dc->res_pool->timing_generators[i];
-
- tg->funcs->tg_init(tg);
- }
-
- /* end of FPGA. Below if real ASIC */
- if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
- return;
-
-
- for (i = 0; i < res_pool->audio_count; i++) {
- struct audio *audio = res_pool->audios[i];
-
- audio->funcs->hw_init(audio);
- }
-
- if (abm != NULL) {
- abm->funcs->init_backlight(abm);
- abm->funcs->abm_init(abm);
- }
-
- if (dmcu != NULL)
- dmcu->funcs->dmcu_init(dmcu);
-
- if (abm != NULL && dmcu != NULL)
- abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
-
- /* power AFMT HDMI memory TODO: may move to dis/en output save power*/
- REG_WRITE(DIO_MEM_PWR_CTRL, 0);
-
- if (!dc->debug.disable_clock_gate) {
- /* enable all DCN clock gating */
- REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
-
- REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
-
- REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
- }
-
-}
-
enum dc_status dcn20_enable_stream_timing(
struct pipe_ctx *pipe_ctx,
struct dc_state *context,
struct dc *dc)
{
+ struct dce_hwseq *hws = dc->hwseq;
struct dc_stream_state *stream = pipe_ctx->stream;
struct drr_params params = {0};
unsigned int event_triggers = 0;
-
-
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
- struct pipe_ctx *odm_pipe = dc_res_get_odm_bottom_pipe(pipe_ctx);
-#endif
+ struct pipe_ctx *odm_pipe;
+ int opp_cnt = 1;
+ int opp_inst[MAX_PIPES] = { pipe_ctx->stream_res.opp->inst };
/* by upper caller loop, pipe0 is parent pipe and be called first.
* back end is set up by for pipe0. Other children pipe share back end
@@ -730,12 +621,17 @@ enum dc_status dcn20_enable_stream_timing(
/* TODO check if timing_changed, disable stream if timing changed */
- if (odm_pipe)
+ for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
+ opp_inst[opp_cnt] = odm_pipe->stream_res.opp->inst;
+ opp_cnt++;
+ }
+
+ if (opp_cnt > 1)
pipe_ctx->stream_res.tg->funcs->set_odm_combine(
pipe_ctx->stream_res.tg,
- odm_pipe->stream_res.opp->inst,
- pipe_ctx->stream->timing.h_addressable/2,
- pipe_ctx->stream->timing.pixel_encoding);
+ opp_inst, opp_cnt,
+ &pipe_ctx->stream->timing);
+
/* HW program guide assume display already disable
* by unplug sequence. OTG assume stop.
*/
@@ -759,11 +655,7 @@ enum dc_status dcn20_enable_stream_timing(
pipe_ctx->stream->signal,
true);
- if (pipe_ctx->stream_res.tg->funcs->setup_global_lock)
- pipe_ctx->stream_res.tg->funcs->setup_global_lock(
- pipe_ctx->stream_res.tg);
-
- if (odm_pipe)
+ for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
odm_pipe->stream_res.opp->funcs->opp_pipe_clock_control(
odm_pipe->stream_res.opp,
true);
@@ -772,7 +664,7 @@ enum dc_status dcn20_enable_stream_timing(
pipe_ctx->stream_res.opp,
true);
- dc->hwss.blank_pixel_data(dc, pipe_ctx, true);
+ hws->funcs.blank_pixel_data(dc, pipe_ctx, true);
/* VTG is within DCHUB command block. DCFCLK is always on */
if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
@@ -780,10 +672,12 @@ enum dc_status dcn20_enable_stream_timing(
return DC_ERROR_UNEXPECTED;
}
- dcn20_hwss_wait_for_blank_complete(pipe_ctx->stream_res.opp);
+ hws->funcs.wait_for_blank_complete(pipe_ctx->stream_res.opp);
params.vertical_total_min = stream->adjust.v_total_min;
params.vertical_total_max = stream->adjust.v_total_max;
+ params.vertical_total_mid = stream->adjust.v_total_mid;
+ params.vertical_total_mid_frame_num = stream->adjust.v_total_mid_frame_num;
if (pipe_ctx->stream_res.tg->funcs->set_drr)
pipe_ctx->stream_res.tg->funcs->set_drr(
pipe_ctx->stream_res.tg, &params);
@@ -791,9 +685,13 @@ enum dc_status dcn20_enable_stream_timing(
// DRR should set trigger event to monitor surface update event
if (stream->adjust.v_total_min != 0 && stream->adjust.v_total_max != 0)
event_triggers = 0x80;
+ /* Event triggers and num frames initialized for DRR, but can be
+ * later updated for PSR use. Note DRR trigger events are generated
+ * regardless of whether num frames met.
+ */
if (pipe_ctx->stream_res.tg->funcs->set_static_screen_control)
pipe_ctx->stream_res.tg->funcs->set_static_screen_control(
- pipe_ctx->stream_res.tg, event_triggers);
+ pipe_ctx->stream_res.tg, event_triggers, 2);
/* TODO program crtc source select for non-virtual signal*/
/* TODO program FMT */
@@ -814,6 +712,10 @@ void dcn20_program_output_csc(struct dc *dc,
{
struct mpc *mpc = dc->res_pool->mpc;
enum mpc_output_csc_mode ocsc_mode = MPC_OUTPUT_CSC_COEF_A;
+ int mpcc_id = pipe_ctx->plane_res.hubp->inst;
+
+ if (mpc->funcs->power_on_mpc_mem_pwr)
+ mpc->funcs->power_on_mpc_mem_pwr(mpc, mpcc_id, true);
if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
if (mpc->funcs->set_output_csc != NULL)
@@ -830,7 +732,7 @@ void dcn20_program_output_csc(struct dc *dc,
}
}
-bool dcn20_set_output_transfer_func(struct pipe_ctx *pipe_ctx,
+bool dcn20_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
const struct dc_stream_state *stream)
{
int mpcc_id = pipe_ctx->plane_res.hubp->inst;
@@ -843,7 +745,9 @@ bool dcn20_set_output_transfer_func(struct pipe_ctx *pipe_ctx,
* if programming for all pipes is required then remove condition
* pipe_ctx->top_pipe == NULL ,but then fix the diagnostic.
*/
- if ((pipe_ctx->top_pipe == NULL || dc_res_is_odm_head_pipe(pipe_ctx))
+ if (mpc->funcs->power_on_mpc_mem_pwr)
+ mpc->funcs->power_on_mpc_mem_pwr(mpc, mpcc_id, true);
+ if (pipe_ctx->top_pipe == NULL
&& mpc->funcs->set_output_gamma && stream->out_transfer_func) {
if (stream->out_transfer_func->type == TF_TYPE_HWPWL)
params = &stream->out_transfer_func->pwl;
@@ -867,7 +771,7 @@ bool dcn20_set_output_transfer_func(struct pipe_ctx *pipe_ctx,
return true;
}
-static bool dcn20_set_blend_lut(
+bool dcn20_set_blend_lut(
struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state)
{
struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
@@ -889,7 +793,7 @@ static bool dcn20_set_blend_lut(
return result;
}
-static bool dcn20_set_shaper_3dlut(
+bool dcn20_set_shaper_3dlut(
struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state)
{
struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
@@ -909,26 +813,20 @@ static bool dcn20_set_shaper_3dlut(
result = dpp_base->funcs->dpp_program_shaper_lut(dpp_base, shaper_lut);
if (plane_state->lut3d_func &&
- plane_state->lut3d_func->initialized == true)
+ plane_state->lut3d_func->state.bits.initialized == 1)
result = dpp_base->funcs->dpp_program_3dlut(dpp_base,
&plane_state->lut3d_func->lut_3d);
else
result = dpp_base->funcs->dpp_program_3dlut(dpp_base, NULL);
- if (plane_state->lut3d_func &&
- plane_state->lut3d_func->initialized == true &&
- plane_state->lut3d_func->hdr_multiplier != 0)
- dpp_base->funcs->dpp_set_hdr_multiplier(dpp_base,
- plane_state->lut3d_func->hdr_multiplier);
- else
- dpp_base->funcs->dpp_set_hdr_multiplier(dpp_base, 0x1f000);
-
return result;
}
-bool dcn20_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
- const struct dc_plane_state *plane_state)
+bool dcn20_set_input_transfer_func(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ const struct dc_plane_state *plane_state)
{
+ struct dce_hwseq *hws = dc->hwseq;
struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
const struct dc_transfer_func *tf = NULL;
bool result = true;
@@ -937,8 +835,8 @@ bool dcn20_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
if (dpp_base == NULL || plane_state == NULL)
return false;
- dcn20_set_shaper_3dlut(pipe_ctx, plane_state);
- dcn20_set_blend_lut(pipe_ctx, plane_state);
+ hws->funcs.set_shaper_3dlut(pipe_ctx, plane_state);
+ hws->funcs.set_blend_lut(pipe_ctx, plane_state);
if (plane_state->in_transfer_func)
tf = plane_state->in_transfer_func;
@@ -983,6 +881,11 @@ bool dcn20_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
IPP_DEGAMMA_MODE_BYPASS);
break;
case TRANSFER_FUNCTION_PQ:
+ dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_USER_PWL);
+ cm_helper_translate_curve_to_degamma_hw_format(tf, &dpp_base->degamma_params);
+ dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, &dpp_base->degamma_params);
+ result = true;
+ break;
default:
result = false;
break;
@@ -1003,16 +906,22 @@ bool dcn20_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
return result;
}
-static void dcn20_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx)
+void dcn20_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx)
{
- struct pipe_ctx *combine_pipe = dc_res_get_odm_bottom_pipe(pipe_ctx);
+ struct pipe_ctx *odm_pipe;
+ int opp_cnt = 1;
+ int opp_inst[MAX_PIPES] = { pipe_ctx->stream_res.opp->inst };
+
+ for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
+ opp_inst[opp_cnt] = odm_pipe->stream_res.opp->inst;
+ opp_cnt++;
+ }
- if (combine_pipe)
+ if (opp_cnt > 1)
pipe_ctx->stream_res.tg->funcs->set_odm_combine(
pipe_ctx->stream_res.tg,
- combine_pipe->stream_res.opp->inst,
- pipe_ctx->plane_res.scl_data.h_active,
- pipe_ctx->stream->timing.pixel_encoding);
+ opp_inst, opp_cnt,
+ &pipe_ctx->stream->timing);
else
pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
@@ -1028,23 +937,32 @@ void dcn20_blank_pixel_data(
struct dc_stream_state *stream = pipe_ctx->stream;
enum dc_color_space color_space = stream->output_color_space;
enum controller_dp_test_pattern test_pattern = CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR;
- struct pipe_ctx *bot_odm_pipe = dc_res_get_odm_bottom_pipe(pipe_ctx);
+ enum controller_dp_color_space test_pattern_color_space = CONTROLLER_DP_COLOR_SPACE_UDEFINED;
+ struct pipe_ctx *odm_pipe;
+ int odm_cnt = 1;
int width = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right;
int height = stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top;
+ if (stream->link->test_pattern_enabled)
+ return;
+
/* get opp dpg blank color */
color_space_to_black_color(dc, color_space, &black_color);
- if (bot_odm_pipe)
- width = width / 2;
+ for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
+ odm_cnt++;
+
+ width = width / odm_cnt;
if (blank) {
if (stream_res->abm)
stream_res->abm->funcs->set_abm_immediate_disable(stream_res->abm);
- if (dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE)
+ if (dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) {
test_pattern = CONTROLLER_DP_TEST_PATTERN_COLORSQUARES;
+ test_pattern_color_space = CONTROLLER_DP_COLOR_SPACE_RGB;
+ }
} else {
test_pattern = CONTROLLER_DP_TEST_PATTERN_VIDEOMODE;
}
@@ -1052,16 +970,18 @@ void dcn20_blank_pixel_data(
stream_res->opp->funcs->opp_set_disp_pattern_generator(
stream_res->opp,
test_pattern,
+ test_pattern_color_space,
stream->timing.display_color_depth,
&black_color,
width,
height);
- if (bot_odm_pipe) {
- bot_odm_pipe->stream_res.opp->funcs->opp_set_disp_pattern_generator(
- bot_odm_pipe->stream_res.opp,
- dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE ?
+ for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
+ odm_pipe->stream_res.opp->funcs->opp_set_disp_pattern_generator(
+ odm_pipe->stream_res.opp,
+ dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE && blank ?
CONTROLLER_DP_TEST_PATTERN_COLORRAMP : test_pattern,
+ test_pattern_color_space,
stream->timing.display_color_depth,
&black_color,
width,
@@ -1106,6 +1026,9 @@ void dcn20_enable_plane(
/* enable DCFCLK current DCHUB */
pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
+ /* initialize HUBP on power up */
+ pipe_ctx->plane_res.hubp->funcs->hubp_init(pipe_ctx->plane_res.hubp);
+
/* make sure OPP_PIPE_CLOCK_EN = 1 */
pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
pipe_ctx->stream_res.opp,
@@ -1166,67 +1089,6 @@ void dcn20_enable_plane(
}
-static void dcn20_program_pipe(
- struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- struct dc_state *context)
-{
- pipe_ctx->plane_state->update_flags.bits.full_update =
- context->commit_hints.full_update_needed ? 1 : pipe_ctx->plane_state->update_flags.bits.full_update;
-
- if (pipe_ctx->plane_state->update_flags.bits.full_update)
- dcn20_enable_plane(dc, pipe_ctx, context);
-
- update_dchubp_dpp(dc, pipe_ctx, context);
-
- set_hdr_multiplier(pipe_ctx);
-
- if (pipe_ctx->plane_state->update_flags.bits.full_update ||
- pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
- pipe_ctx->plane_state->update_flags.bits.gamma_change)
- dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state);
-
- /* dcn10_translate_regamma_to_hw_format takes 750us to finish
- * only do gamma programming for full update.
- * TODO: This can be further optimized/cleaned up
- * Always call this for now since it does memcmp inside before
- * doing heavy calculation and programming
- */
- if (pipe_ctx->plane_state->update_flags.bits.full_update)
- dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream);
-}
-
-static void dcn20_program_all_pipe_in_tree(
- struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- struct dc_state *context)
-{
- if (pipe_ctx->top_pipe == NULL) {
- bool blank = !is_pipe_tree_visible(pipe_ctx);
-
- pipe_ctx->stream_res.tg->funcs->program_global_sync(
- pipe_ctx->stream_res.tg,
- pipe_ctx->pipe_dlg_param.vready_offset,
- pipe_ctx->pipe_dlg_param.vstartup_start,
- pipe_ctx->pipe_dlg_param.vupdate_offset,
- pipe_ctx->pipe_dlg_param.vupdate_width);
-
- pipe_ctx->stream_res.tg->funcs->set_vtg_params(
- pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
-
- dc->hwss.blank_pixel_data(dc, pipe_ctx, blank);
-
- if (dc->hwss.update_odm)
- dc->hwss.update_odm(dc, context, pipe_ctx);
- }
-
- if (pipe_ctx->plane_state != NULL)
- dcn20_program_pipe(dc, pipe_ctx, context);
-
- if (pipe_ctx->bottom_pipe != NULL && pipe_ctx->bottom_pipe != pipe_ctx)
- dcn20_program_all_pipe_in_tree(dc, pipe_ctx->bottom_pipe, context);
-}
-
void dcn20_pipe_control_lock_global(
struct dc *dc,
struct pipe_ctx *pipe,
@@ -1266,14 +1128,22 @@ void dcn20_pipe_control_lock(
flip_immediate = pipe->plane_state->flip_immediate;
if (flip_immediate && lock) {
- while (pipe->plane_res.hubp->funcs->hubp_is_flip_pending(pipe->plane_res.hubp)) {
+ const int TIMEOUT_FOR_FLIP_PENDING = 100000;
+ int i;
+
+ for (i = 0; i < TIMEOUT_FOR_FLIP_PENDING; ++i) {
+ if (!pipe->plane_res.hubp->funcs->hubp_is_flip_pending(pipe->plane_res.hubp))
+ break;
udelay(1);
}
- if (pipe->bottom_pipe != NULL)
- while (pipe->bottom_pipe->plane_res.hubp->funcs->hubp_is_flip_pending(pipe->bottom_pipe->plane_res.hubp)) {
+ if (pipe->bottom_pipe != NULL) {
+ for (i = 0; i < TIMEOUT_FOR_FLIP_PENDING; ++i) {
+ if (!pipe->bottom_pipe->plane_res.hubp->funcs->hubp_is_flip_pending(pipe->bottom_pipe->plane_res.hubp))
+ break;
udelay(1);
}
+ }
}
/* In flip immediate and pipe splitting case, we need to use GSL
@@ -1297,100 +1167,492 @@ void dcn20_pipe_control_lock(
}
}
-static void dcn20_apply_ctx_for_surface(
- struct dc *dc,
- const struct dc_stream_state *stream,
- int num_planes,
- struct dc_state *context)
+static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx *new_pipe)
{
+ new_pipe->update_flags.raw = 0;
- int i;
- struct timing_generator *tg;
- bool removed_pipe[6] = { false };
- bool interdependent_update = false;
- struct pipe_ctx *top_pipe_to_program =
- find_top_pipe_for_stream(dc, context, stream);
- DC_LOGGER_INIT(dc->ctx->logger);
-
- if (!top_pipe_to_program)
+ /* Exit on unchanged, unused pipe */
+ if (!old_pipe->plane_state && !new_pipe->plane_state)
+ return;
+ /* Detect pipe enable/disable */
+ if (!old_pipe->plane_state && new_pipe->plane_state) {
+ new_pipe->update_flags.bits.enable = 1;
+ new_pipe->update_flags.bits.mpcc = 1;
+ new_pipe->update_flags.bits.dppclk = 1;
+ new_pipe->update_flags.bits.hubp_interdependent = 1;
+ new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1;
+ new_pipe->update_flags.bits.gamut_remap = 1;
+ new_pipe->update_flags.bits.scaler = 1;
+ new_pipe->update_flags.bits.viewport = 1;
+ if (!new_pipe->top_pipe && !new_pipe->prev_odm_pipe) {
+ new_pipe->update_flags.bits.odm = 1;
+ new_pipe->update_flags.bits.global_sync = 1;
+ }
+ return;
+ }
+ if (old_pipe->plane_state && !new_pipe->plane_state) {
+ new_pipe->update_flags.bits.disable = 1;
return;
+ }
- tg = top_pipe_to_program->stream_res.tg;
+ /* Detect top pipe only changes */
+ if (!new_pipe->top_pipe && !new_pipe->prev_odm_pipe) {
+ /* Detect odm changes */
+ if ((old_pipe->next_odm_pipe && new_pipe->next_odm_pipe
+ && old_pipe->next_odm_pipe->pipe_idx != new_pipe->next_odm_pipe->pipe_idx)
+ || (!old_pipe->next_odm_pipe && new_pipe->next_odm_pipe)
+ || (old_pipe->next_odm_pipe && !new_pipe->next_odm_pipe)
+ || old_pipe->stream_res.opp != new_pipe->stream_res.opp)
+ new_pipe->update_flags.bits.odm = 1;
+
+ /* Detect global sync changes */
+ if (old_pipe->pipe_dlg_param.vready_offset != new_pipe->pipe_dlg_param.vready_offset
+ || old_pipe->pipe_dlg_param.vstartup_start != new_pipe->pipe_dlg_param.vstartup_start
+ || old_pipe->pipe_dlg_param.vupdate_offset != new_pipe->pipe_dlg_param.vupdate_offset
+ || old_pipe->pipe_dlg_param.vupdate_width != new_pipe->pipe_dlg_param.vupdate_width)
+ new_pipe->update_flags.bits.global_sync = 1;
+ }
- interdependent_update = top_pipe_to_program->plane_state &&
- top_pipe_to_program->plane_state->update_flags.bits.full_update;
+ /*
+ * Detect opp / tg change, only set on change, not on enable
+ * Assume mpcc inst = pipe index, if not this code needs to be updated
+ * since mpcc is what is affected by these. In fact all of our sequence
+ * makes this assumption at the moment with how hubp reset is matched to
+ * same index mpcc reset.
+ */
+ if (old_pipe->stream_res.opp != new_pipe->stream_res.opp)
+ new_pipe->update_flags.bits.opp_changed = 1;
+ if (old_pipe->stream_res.tg != new_pipe->stream_res.tg)
+ new_pipe->update_flags.bits.tg_changed = 1;
+
+ /* Detect mpcc blending changes, only dpp inst and bot matter here */
+ if (old_pipe->plane_res.dpp != new_pipe->plane_res.dpp
+ || old_pipe->stream_res.opp != new_pipe->stream_res.opp
+ || (!old_pipe->bottom_pipe && new_pipe->bottom_pipe)
+ || (old_pipe->bottom_pipe && !new_pipe->bottom_pipe)
+ || (old_pipe->bottom_pipe && new_pipe->bottom_pipe
+ && old_pipe->bottom_pipe->plane_res.mpcc_inst
+ != new_pipe->bottom_pipe->plane_res.mpcc_inst))
+ new_pipe->update_flags.bits.mpcc = 1;
+
+ /* Detect dppclk change */
+ if (old_pipe->plane_res.bw.dppclk_khz != new_pipe->plane_res.bw.dppclk_khz)
+ new_pipe->update_flags.bits.dppclk = 1;
+
+ /* Check for scl update */
+ if (memcmp(&old_pipe->plane_res.scl_data, &new_pipe->plane_res.scl_data, sizeof(struct scaler_data)))
+ new_pipe->update_flags.bits.scaler = 1;
+ /* Check for vp update */
+ if (memcmp(&old_pipe->plane_res.scl_data.viewport, &new_pipe->plane_res.scl_data.viewport, sizeof(struct rect))
+ || memcmp(&old_pipe->plane_res.scl_data.viewport_c,
+ &new_pipe->plane_res.scl_data.viewport_c, sizeof(struct rect)))
+ new_pipe->update_flags.bits.viewport = 1;
+
+ /* Detect dlg/ttu/rq updates */
+ {
+ struct _vcs_dpi_display_dlg_regs_st old_dlg_attr = old_pipe->dlg_regs;
+ struct _vcs_dpi_display_ttu_regs_st old_ttu_attr = old_pipe->ttu_regs;
+ struct _vcs_dpi_display_dlg_regs_st *new_dlg_attr = &new_pipe->dlg_regs;
+ struct _vcs_dpi_display_ttu_regs_st *new_ttu_attr = &new_pipe->ttu_regs;
+
+ /* Detect pipe interdependent updates */
+ if (old_dlg_attr.dst_y_prefetch != new_dlg_attr->dst_y_prefetch ||
+ old_dlg_attr.vratio_prefetch != new_dlg_attr->vratio_prefetch ||
+ old_dlg_attr.vratio_prefetch_c != new_dlg_attr->vratio_prefetch_c ||
+ old_dlg_attr.dst_y_per_vm_vblank != new_dlg_attr->dst_y_per_vm_vblank ||
+ old_dlg_attr.dst_y_per_row_vblank != new_dlg_attr->dst_y_per_row_vblank ||
+ old_dlg_attr.dst_y_per_vm_flip != new_dlg_attr->dst_y_per_vm_flip ||
+ old_dlg_attr.dst_y_per_row_flip != new_dlg_attr->dst_y_per_row_flip ||
+ old_dlg_attr.refcyc_per_meta_chunk_vblank_l != new_dlg_attr->refcyc_per_meta_chunk_vblank_l ||
+ old_dlg_attr.refcyc_per_meta_chunk_vblank_c != new_dlg_attr->refcyc_per_meta_chunk_vblank_c ||
+ old_dlg_attr.refcyc_per_meta_chunk_flip_l != new_dlg_attr->refcyc_per_meta_chunk_flip_l ||
+ old_dlg_attr.refcyc_per_line_delivery_pre_l != new_dlg_attr->refcyc_per_line_delivery_pre_l ||
+ old_dlg_attr.refcyc_per_line_delivery_pre_c != new_dlg_attr->refcyc_per_line_delivery_pre_c ||
+ old_ttu_attr.refcyc_per_req_delivery_pre_l != new_ttu_attr->refcyc_per_req_delivery_pre_l ||
+ old_ttu_attr.refcyc_per_req_delivery_pre_c != new_ttu_attr->refcyc_per_req_delivery_pre_c ||
+ old_ttu_attr.refcyc_per_req_delivery_pre_cur0 != new_ttu_attr->refcyc_per_req_delivery_pre_cur0 ||
+ old_ttu_attr.refcyc_per_req_delivery_pre_cur1 != new_ttu_attr->refcyc_per_req_delivery_pre_cur1 ||
+ old_ttu_attr.min_ttu_vblank != new_ttu_attr->min_ttu_vblank ||
+ old_ttu_attr.qos_level_flip != new_ttu_attr->qos_level_flip) {
+ old_dlg_attr.dst_y_prefetch = new_dlg_attr->dst_y_prefetch;
+ old_dlg_attr.vratio_prefetch = new_dlg_attr->vratio_prefetch;
+ old_dlg_attr.vratio_prefetch_c = new_dlg_attr->vratio_prefetch_c;
+ old_dlg_attr.dst_y_per_vm_vblank = new_dlg_attr->dst_y_per_vm_vblank;
+ old_dlg_attr.dst_y_per_row_vblank = new_dlg_attr->dst_y_per_row_vblank;
+ old_dlg_attr.dst_y_per_vm_flip = new_dlg_attr->dst_y_per_vm_flip;
+ old_dlg_attr.dst_y_per_row_flip = new_dlg_attr->dst_y_per_row_flip;
+ old_dlg_attr.refcyc_per_meta_chunk_vblank_l = new_dlg_attr->refcyc_per_meta_chunk_vblank_l;
+ old_dlg_attr.refcyc_per_meta_chunk_vblank_c = new_dlg_attr->refcyc_per_meta_chunk_vblank_c;
+ old_dlg_attr.refcyc_per_meta_chunk_flip_l = new_dlg_attr->refcyc_per_meta_chunk_flip_l;
+ old_dlg_attr.refcyc_per_line_delivery_pre_l = new_dlg_attr->refcyc_per_line_delivery_pre_l;
+ old_dlg_attr.refcyc_per_line_delivery_pre_c = new_dlg_attr->refcyc_per_line_delivery_pre_c;
+ old_ttu_attr.refcyc_per_req_delivery_pre_l = new_ttu_attr->refcyc_per_req_delivery_pre_l;
+ old_ttu_attr.refcyc_per_req_delivery_pre_c = new_ttu_attr->refcyc_per_req_delivery_pre_c;
+ old_ttu_attr.refcyc_per_req_delivery_pre_cur0 = new_ttu_attr->refcyc_per_req_delivery_pre_cur0;
+ old_ttu_attr.refcyc_per_req_delivery_pre_cur1 = new_ttu_attr->refcyc_per_req_delivery_pre_cur1;
+ old_ttu_attr.min_ttu_vblank = new_ttu_attr->min_ttu_vblank;
+ old_ttu_attr.qos_level_flip = new_ttu_attr->qos_level_flip;
+ new_pipe->update_flags.bits.hubp_interdependent = 1;
+ }
+ /* Detect any other updates to ttu/rq/dlg */
+ if (memcmp(&old_dlg_attr, &new_pipe->dlg_regs, sizeof(old_dlg_attr)) ||
+ memcmp(&old_ttu_attr, &new_pipe->ttu_regs, sizeof(old_ttu_attr)) ||
+ memcmp(&old_pipe->rq_regs, &new_pipe->rq_regs, sizeof(old_pipe->rq_regs)))
+ new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1;
+ }
+}
- if (interdependent_update)
- lock_all_pipes(dc, context, true);
- else
- dcn20_pipe_control_lock(dc, top_pipe_to_program, true);
+static void dcn20_update_dchubp_dpp(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+ struct dpp *dpp = pipe_ctx->plane_res.dpp;
+ struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ bool viewport_changed = false;
- if (num_planes == 0) {
- /* OTG blank before remove all front end */
- dc->hwss.blank_pixel_data(dc, top_pipe_to_program, true);
+ if (pipe_ctx->update_flags.bits.dppclk)
+ dpp->funcs->dpp_dppclk_control(dpp, false, true);
+
+ /* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
+ * VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
+ * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
+ */
+ if (pipe_ctx->update_flags.bits.hubp_rq_dlg_ttu) {
+ hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst);
+
+ hubp->funcs->hubp_setup(
+ hubp,
+ &pipe_ctx->dlg_regs,
+ &pipe_ctx->ttu_regs,
+ &pipe_ctx->rq_regs,
+ &pipe_ctx->pipe_dlg_param);
+ }
+ if (pipe_ctx->update_flags.bits.hubp_interdependent)
+ hubp->funcs->hubp_setup_interdependent(
+ hubp,
+ &pipe_ctx->dlg_regs,
+ &pipe_ctx->ttu_regs);
+
+ if (pipe_ctx->update_flags.bits.enable ||
+ plane_state->update_flags.bits.bpp_change ||
+ plane_state->update_flags.bits.input_csc_change ||
+ plane_state->update_flags.bits.color_space_change ||
+ plane_state->update_flags.bits.coeff_reduction_change) {
+ struct dc_bias_and_scale bns_params = {0};
+
+ // program the input csc
+ dpp->funcs->dpp_setup(dpp,
+ plane_state->format,
+ EXPANSION_MODE_ZERO,
+ plane_state->input_csc_color_matrix,
+ plane_state->color_space,
+ NULL);
+
+ if (dpp->funcs->dpp_program_bias_and_scale) {
+ //TODO :for CNVC set scale and bias registers if necessary
+ build_prescale_params(&bns_params, plane_state);
+ dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
+ }
}
- /* Disconnect unused mpcc */
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- struct pipe_ctx *old_pipe_ctx =
- &dc->current_state->res_ctx.pipe_ctx[i];
- /*
- * Powergate reused pipes that are not powergated
- * fairly hacky right now, using opp_id as indicator
- * TODO: After move dc_post to dc_update, this will
- * be removed.
- */
- if (pipe_ctx->plane_state && !old_pipe_ctx->plane_state) {
- if (old_pipe_ctx->stream_res.tg == tg &&
- old_pipe_ctx->plane_res.hubp &&
- old_pipe_ctx->plane_res.hubp->opp_id != OPP_ID_INVALID)
- dcn20_disable_plane(dc, old_pipe_ctx);
+ if (pipe_ctx->update_flags.bits.mpcc
+ || plane_state->update_flags.bits.global_alpha_change
+ || plane_state->update_flags.bits.per_pixel_alpha_change) {
+ // MPCC inst is equal to pipe index in practice
+ int mpcc_inst = hubp->inst;
+ int opp_inst;
+ int opp_count = dc->res_pool->pipe_count;
+
+ for (opp_inst = 0; opp_inst < opp_count; opp_inst++) {
+ if (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst]) {
+ dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst);
+ dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst] = false;
+ break;
+ }
}
+ hws->funcs.update_mpcc(dc, pipe_ctx);
+ }
- if ((!pipe_ctx->plane_state ||
- pipe_ctx->stream_res.tg != old_pipe_ctx->stream_res.tg) &&
- old_pipe_ctx->plane_state &&
- old_pipe_ctx->stream_res.tg == tg) {
+ if (pipe_ctx->update_flags.bits.scaler ||
+ plane_state->update_flags.bits.scaling_change ||
+ plane_state->update_flags.bits.position_change ||
+ plane_state->update_flags.bits.per_pixel_alpha_change ||
+ pipe_ctx->stream->update_flags.bits.scaling) {
+ pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->plane_state->per_pixel_alpha;
+ ASSERT(pipe_ctx->plane_res.scl_data.lb_params.depth == LB_PIXEL_DEPTH_30BPP);
+ /* scaler configuration */
+ pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
+ pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
+ }
- dc->hwss.plane_atomic_disconnect(dc, old_pipe_ctx);
- removed_pipe[i] = true;
+ if (pipe_ctx->update_flags.bits.viewport ||
+ (context == dc->current_state && plane_state->update_flags.bits.scaling_change) ||
+ (context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) {
- DC_LOG_DC("Reset mpcc for pipe %d\n",
- old_pipe_ctx->pipe_idx);
- }
+ hubp->funcs->mem_program_viewport(
+ hubp,
+ &pipe_ctx->plane_res.scl_data.viewport,
+ &pipe_ctx->plane_res.scl_data.viewport_c);
+ viewport_changed = true;
+ }
+
+ /* Any updates are handled in dc interface, just need to apply existing for plane enable */
+ if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed ||
+ pipe_ctx->update_flags.bits.scaler || pipe_ctx->update_flags.bits.viewport)
+ && pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
+ dc->hwss.set_cursor_position(pipe_ctx);
+ dc->hwss.set_cursor_attribute(pipe_ctx);
+
+ if (dc->hwss.set_cursor_sdr_white_level)
+ dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
+ }
+
+ /* Any updates are handled in dc interface, just need
+ * to apply existing for plane enable / opp change */
+ if (pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed
+ || pipe_ctx->stream->update_flags.bits.gamut_remap
+ || pipe_ctx->stream->update_flags.bits.out_csc) {
+ /* dpp/cm gamut remap*/
+ dc->hwss.program_gamut_remap(pipe_ctx);
+
+ /*call the dcn2 method which uses mpc csc*/
+ dc->hwss.program_output_csc(dc,
+ pipe_ctx,
+ pipe_ctx->stream->output_color_space,
+ pipe_ctx->stream->csc_color_matrix.matrix,
+ hubp->opp_id);
+ }
+
+ if (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->update_flags.bits.opp_changed ||
+ plane_state->update_flags.bits.pixel_format_change ||
+ plane_state->update_flags.bits.horizontal_mirror_change ||
+ plane_state->update_flags.bits.rotation_change ||
+ plane_state->update_flags.bits.swizzle_change ||
+ plane_state->update_flags.bits.dcc_change ||
+ plane_state->update_flags.bits.bpp_change ||
+ plane_state->update_flags.bits.scaling_change ||
+ plane_state->update_flags.bits.plane_size_change) {
+ struct plane_size size = plane_state->plane_size;
+
+ size.surface_size = pipe_ctx->plane_res.scl_data.viewport;
+ hubp->funcs->hubp_program_surface_config(
+ hubp,
+ plane_state->format,
+ &plane_state->tiling_info,
+ &size,
+ plane_state->rotation,
+ &plane_state->dcc,
+ plane_state->horizontal_mirror,
+ 0);
+ hubp->power_gated = false;
+ }
+
+ if (hubp->funcs->apply_PLAT_54186_wa && viewport_changed)
+ hubp->funcs->apply_PLAT_54186_wa(hubp, &plane_state->address);
+
+ if (pipe_ctx->update_flags.bits.enable || plane_state->update_flags.bits.addr_update)
+ hws->funcs.update_plane_addr(dc, pipe_ctx);
+
+
+
+ if (pipe_ctx->update_flags.bits.enable)
+ hubp->funcs->set_blank(hubp, false);
+}
+
+
+static void dcn20_program_pipe(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ /* Only need to unblank on top pipe */
+ if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->stream->update_flags.bits.abm_level)
+ && !pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe)
+ hws->funcs.blank_pixel_data(dc, pipe_ctx, !pipe_ctx->plane_state->visible);
+
+ if (pipe_ctx->update_flags.bits.global_sync) {
+ pipe_ctx->stream_res.tg->funcs->program_global_sync(
+ pipe_ctx->stream_res.tg,
+ pipe_ctx->pipe_dlg_param.vready_offset,
+ pipe_ctx->pipe_dlg_param.vstartup_start,
+ pipe_ctx->pipe_dlg_param.vupdate_offset,
+ pipe_ctx->pipe_dlg_param.vupdate_width);
+
+ pipe_ctx->stream_res.tg->funcs->set_vtg_params(
+ pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
+
+ if (hws->funcs.setup_vupdate_interrupt)
+ hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
+ }
+
+ if (pipe_ctx->update_flags.bits.odm)
+ hws->funcs.update_odm(dc, context, pipe_ctx);
+
+ if (pipe_ctx->update_flags.bits.enable)
+ dcn20_enable_plane(dc, pipe_ctx, context);
+
+ if (pipe_ctx->update_flags.raw || pipe_ctx->plane_state->update_flags.raw || pipe_ctx->stream->update_flags.raw)
+ dcn20_update_dchubp_dpp(dc, pipe_ctx, context);
+
+ if (pipe_ctx->update_flags.bits.enable
+ || pipe_ctx->plane_state->update_flags.bits.hdr_mult)
+ hws->funcs.set_hdr_multiplier(pipe_ctx);
+
+ if (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
+ pipe_ctx->plane_state->update_flags.bits.gamma_change)
+ hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
+
+ /* dcn10_translate_regamma_to_hw_format takes 750us to finish
+ * only do gamma programming for powering on, internal memcmp to avoid
+ * updating on slave planes
+ */
+ if (pipe_ctx->update_flags.bits.enable || pipe_ctx->stream->update_flags.bits.out_tf)
+ hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
+
+ /* If the pipe has been enabled or has a different opp, we
+ * should reprogram the fmt. This deals with cases where
+ * interation between mpc and odm combine on different streams
+ * causes a different pipe to be chosen to odm combine with.
+ */
+ if (pipe_ctx->update_flags.bits.enable
+ || pipe_ctx->update_flags.bits.opp_changed) {
+
+ pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
+ pipe_ctx->stream_res.opp,
+ COLOR_SPACE_YCBCR601,
+ pipe_ctx->stream->timing.display_color_depth,
+ pipe_ctx->stream->signal);
+
+ pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
+ pipe_ctx->stream_res.opp,
+ &pipe_ctx->stream->bit_depth_params,
+ &pipe_ctx->stream->clamping);
}
+}
+
+static bool does_pipe_need_lock(struct pipe_ctx *pipe)
+{
+ if ((pipe->plane_state && pipe->plane_state->update_flags.raw)
+ || pipe->update_flags.raw)
+ return true;
+ if (pipe->bottom_pipe)
+ return does_pipe_need_lock(pipe->bottom_pipe);
+
+ return false;
+}
- if (num_planes > 0)
- dcn20_program_all_pipe_in_tree(dc, top_pipe_to_program, context);
+void dcn20_program_front_end_for_ctx(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ const unsigned int TIMEOUT_FOR_PIPE_ENABLE_MS = 100;
+ int i;
+ struct dce_hwseq *hws = dc->hwseq;
+ bool pipe_locked[MAX_PIPES] = {false};
+ DC_LOGGER_INIT(dc->ctx->logger);
- /* Program secondary blending tree and writeback pipes */
- if ((stream->num_wb_info > 0) && (dc->hwss.program_all_writeback_pipes_in_tree))
- dc->hwss.program_all_writeback_pipes_in_tree(dc, stream, context);
+ /* Carry over GSL groups in case the context is changing. */
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (context->res_ctx.pipe_ctx[i].stream == dc->current_state->res_ctx.pipe_ctx[i].stream)
+ context->res_ctx.pipe_ctx[i].stream_res.gsl_group =
+ dc->current_state->res_ctx.pipe_ctx[i].stream_res.gsl_group;
- if (interdependent_update)
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ /* Set pipe update flags and lock pipes */
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ dcn20_detect_pipe_changes(&dc->current_state->res_ctx.pipe_ctx[i],
+ &context->res_ctx.pipe_ctx[i]);
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (!context->res_ctx.pipe_ctx[i].top_pipe &&
+ does_pipe_need_lock(&context->res_ctx.pipe_ctx[i])) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- /* Skip inactive pipes and ones already updated */
- if (!pipe_ctx->stream || pipe_ctx->stream == stream ||
- !pipe_ctx->plane_state || !tg->funcs->is_tg_enabled(tg))
- continue;
+ if (pipe_ctx->update_flags.bits.tg_changed || pipe_ctx->update_flags.bits.enable)
+ dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
+ if (!pipe_ctx->update_flags.bits.enable)
+ dc->hwss.pipe_control_lock(dc, &dc->current_state->res_ctx.pipe_ctx[i], true);
+ pipe_locked[i] = true;
+ }
+
+ /* OTG blank before disabling all front ends */
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
+ && !context->res_ctx.pipe_ctx[i].top_pipe
+ && !context->res_ctx.pipe_ctx[i].prev_odm_pipe
+ && context->res_ctx.pipe_ctx[i].stream)
+ hws->funcs.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true);
- pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent(
- pipe_ctx->plane_res.hubp,
- &pipe_ctx->dlg_regs,
- &pipe_ctx->ttu_regs);
+ /* Disconnect mpcc */
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
+ || context->res_ctx.pipe_ctx[i].update_flags.bits.opp_changed) {
+ hws->funcs.plane_atomic_disconnect(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
+ DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx);
}
- if (interdependent_update)
- lock_all_pipes(dc, context, false);
- else
- dcn20_pipe_control_lock(dc, top_pipe_to_program, false);
+ /*
+ * Program all updated pipes, order matters for mpcc setup. Start with
+ * top pipe and program all pipes that follow in order
+ */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->plane_state && !pipe->top_pipe) {
+ while (pipe) {
+ dcn20_program_pipe(dc, pipe, context);
+ pipe = pipe->bottom_pipe;
+ }
+ /* Program secondary blending tree and writeback pipes */
+ pipe = &context->res_ctx.pipe_ctx[i];
+ if (!pipe->prev_odm_pipe && pipe->stream->num_wb_info > 0
+ && (pipe->update_flags.raw || pipe->plane_state->update_flags.raw || pipe->stream->update_flags.raw)
+ && hws->funcs.program_all_writeback_pipes_in_tree)
+ hws->funcs.program_all_writeback_pipes_in_tree(dc, pipe->stream, context);
+ }
+ }
+
+ /* Unlock all locked pipes */
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (pipe_locked[i]) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->update_flags.bits.tg_changed || pipe_ctx->update_flags.bits.enable)
+ dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
+ if (!pipe_ctx->update_flags.bits.enable)
+ dc->hwss.pipe_control_lock(dc, &dc->current_state->res_ctx.pipe_ctx[i], false);
+ }
for (i = 0; i < dc->res_pool->pipe_count; i++)
- if (removed_pipe[i])
- dcn20_disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
+ if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
+ dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
+
+ /*
+ * If we are enabling a pipe, we need to wait for pending clear as this is a critical
+ * part of the enable operation otherwise, DM may request an immediate flip which
+ * will cause HW to perform an "immediate enable" (as opposed to "vsync enable") which
+ * is unsupported on DCN.
+ */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->plane_state && !pipe->top_pipe && pipe->update_flags.bits.enable) {
+ struct hubp *hubp = pipe->plane_res.hubp;
+ int j = 0;
+
+ for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_MS*1000
+ && hubp->funcs->hubp_is_flip_pending(hubp); j++)
+ mdelay(1);
+ }
+ }
+
+ /* WA to apply WM setting*/
+ if (dc->hwseq->wa.DEGVIDCN21)
+ dc->res_pool->hubbub->funcs->apply_DEDCN21_147_wa(dc->res_pool->hubbub);
}
@@ -1400,16 +1662,16 @@ void dcn20_prepare_bandwidth(
{
struct hubbub *hubbub = dc->res_pool->hubbub;
+ dc->clk_mgr->funcs->update_clocks(
+ dc->clk_mgr,
+ context,
+ false);
+
/* program dchubbub watermarks */
hubbub->funcs->program_watermarks(hubbub,
&context->bw_ctx.bw.dcn.watermarks,
dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
false);
-
- dc->clk_mgr->funcs->update_clocks(
- dc->clk_mgr,
- context,
- false);
}
void dcn20_optimize_bandwidth(
@@ -1435,6 +1697,7 @@ bool dcn20_update_bandwidth(
struct dc_state *context)
{
int i;
+ struct dce_hwseq *hws = dc->hwseq;
/* recalculate DML parameters */
if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false))
@@ -1463,7 +1726,11 @@ bool dcn20_update_bandwidth(
pipe_ctx->stream_res.tg->funcs->set_vtg_params(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
- dc->hwss.blank_pixel_data(dc, pipe_ctx, blank);
+ if (pipe_ctx->prev_odm_pipe == NULL)
+ hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
+
+ if (hws->funcs.setup_vupdate_interrupt)
+ hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
}
pipe_ctx->plane_res.hubp->funcs->hubp_setup(
@@ -1477,10 +1744,10 @@ bool dcn20_update_bandwidth(
return true;
}
-static void dcn20_enable_writeback(
+void dcn20_enable_writeback(
struct dc *dc,
- const struct dc_stream_status *stream_status,
- struct dc_writeback_info *wb_info)
+ struct dc_writeback_info *wb_info,
+ struct dc_state *context)
{
struct dwbc *dwb;
struct mcif_wb *mcif_wb;
@@ -1492,12 +1759,11 @@ static void dcn20_enable_writeback(
mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst];
/* set the OPTC source mux */
- ASSERT(stream_status->primary_otg_inst < MAX_PIPES);
- optc = dc->res_pool->timing_generators[stream_status->primary_otg_inst];
+ optc = dc->res_pool->timing_generators[dwb->otg_inst];
optc->funcs->set_dwb_source(optc, wb_info->dwb_pipe_inst);
/* set MCIF_WB buffer and arbitration configuration */
mcif_wb->funcs->config_mcif_buf(mcif_wb, &wb_info->mcif_buf_params, wb_info->dwb_params.dest_height);
- mcif_wb->funcs->config_mcif_arb(mcif_wb, &dc->current_state->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[wb_info->dwb_pipe_inst]);
+ mcif_wb->funcs->config_mcif_arb(mcif_wb, &context->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[wb_info->dwb_pipe_inst]);
/* Enable MCIF_WB */
mcif_wb->funcs->enable_mcif(mcif_wb);
/* Enable DWB */
@@ -1520,7 +1786,7 @@ void dcn20_disable_writeback(
mcif_wb->funcs->disable_mcif(mcif_wb);
}
-bool dcn20_hwss_wait_for_blank_complete(
+bool dcn20_wait_for_blank_complete(
struct output_pixel_processor *opp)
{
int counter;
@@ -1549,32 +1815,34 @@ bool dcn20_dmdata_status_done(struct pipe_ctx *pipe_ctx)
return hubp->funcs->dmdata_status_done(hubp);
}
-static void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx)
+void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
struct dce_hwseq *hws = dc->hwseq;
- struct pipe_ctx *bot_odm_pipe = dc_res_get_odm_bottom_pipe(pipe_ctx);
if (pipe_ctx->stream_res.dsc) {
+ struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
+
dcn20_dsc_pg_control(hws, pipe_ctx->stream_res.dsc->inst, true);
- if (bot_odm_pipe)
- dcn20_dsc_pg_control(hws, bot_odm_pipe->stream_res.dsc->inst, true);
+ while (odm_pipe) {
+ dcn20_dsc_pg_control(hws, odm_pipe->stream_res.dsc->inst, true);
+ odm_pipe = odm_pipe->next_odm_pipe;
+ }
}
-#endif
}
-static void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx)
+void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
struct dce_hwseq *hws = dc->hwseq;
- struct pipe_ctx *bot_odm_pipe = dc_res_get_odm_bottom_pipe(pipe_ctx);
if (pipe_ctx->stream_res.dsc) {
+ struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
+
dcn20_dsc_pg_control(hws, pipe_ctx->stream_res.dsc->inst, false);
- if (bot_odm_pipe)
- dcn20_dsc_pg_control(hws, bot_odm_pipe->stream_res.dsc->inst, false);
+ while (odm_pipe) {
+ dcn20_dsc_pg_control(hws, odm_pipe->stream_res.dsc->inst, false);
+ odm_pipe = odm_pipe->next_odm_pipe;
+ }
}
-#endif
}
void dcn20_set_dmdata_attributes(struct pipe_ctx *pipe_ctx)
@@ -1597,12 +1865,7 @@ void dcn20_set_dmdata_attributes(struct pipe_ctx *pipe_ctx)
hubp->funcs->dmdata_set_attributes(hubp, &attr);
}
-void dcn20_disable_stream(struct pipe_ctx *pipe_ctx, int option)
-{
- dce110_disable_stream(pipe_ctx, option);
-}
-
-static void dcn20_init_vm_ctx(
+void dcn20_init_vm_ctx(
struct dce_hwseq *hws,
struct dc *dc,
struct dc_virtual_addr_space_config *va_config,
@@ -1624,7 +1887,7 @@ static void dcn20_init_vm_ctx(
dc->res_pool->hubbub->funcs->init_vm_ctx(dc->res_pool->hubbub, &config, vmid);
}
-static int dcn20_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config)
+int dcn20_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config)
{
struct dcn_hubbub_phys_addr_config config;
@@ -1637,6 +1900,7 @@ static int dcn20_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_ph
config.gart_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr;
config.gart_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr;
config.gart_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr;
+ config.page_table_default_page_addr = pa_config->page_table_default_page_addr;
return dc->res_pool->hubbub->funcs->init_dchub_sys_ctx(dc->res_pool->hubbub, &config);
}
@@ -1667,8 +1931,7 @@ static bool patch_address_for_sbs_tb_stereo(
return false;
}
-
-static void dcn20_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
+void dcn20_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
{
bool addr_patched = false;
PHYSICAL_ADDRESS_LOC addr;
@@ -1702,30 +1965,35 @@ void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx,
struct encoder_unblank_param params = { { 0 } };
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->link;
- params.odm = dc_res_get_odm_bottom_pipe(pipe_ctx);
+ struct dce_hwseq *hws = link->dc->hwseq;
+ struct pipe_ctx *odm_pipe;
+ params.opp_cnt = 1;
+ for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
+ params.opp_cnt++;
+ }
/* only 3 items below are used by unblank */
params.timing = pipe_ctx->stream->timing;
params.link_settings.link_rate = link_settings->link_rate;
if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
- if (optc1_is_two_pixels_per_containter(&stream->timing) || params.odm)
+ if (optc2_is_two_pixels_per_containter(&stream->timing) || params.opp_cnt > 1)
params.timing.pix_clk_100hz /= 2;
pipe_ctx->stream_res.stream_enc->funcs->dp_set_odm_combine(
- pipe_ctx->stream_res.stream_enc, params.odm);
+ pipe_ctx->stream_res.stream_enc, params.opp_cnt > 1);
pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, &params);
}
if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
- link->dc->hwss.edp_backlight_control(link, true);
+ hws->funcs.edp_backlight_control(link, true);
}
}
-void dcn20_setup_vupdate_interrupt(struct pipe_ctx *pipe_ctx)
+void dcn20_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
struct timing_generator *tg = pipe_ctx->stream_res.tg;
- int start_line = get_vupdate_offset_from_vsync(pipe_ctx);
+ int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
if (start_line < 0)
start_line = 0;
@@ -1740,6 +2008,7 @@ static void dcn20_reset_back_end_for_pipe(
struct dc_state *context)
{
int i;
+ struct dc_link *link;
DC_LOGGER_INIT(dc->ctx->logger);
if (pipe_ctx->stream_res.stream_enc == NULL) {
pipe_ctx->stream = NULL;
@@ -1747,17 +2016,36 @@ static void dcn20_reset_back_end_for_pipe(
}
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
- /* DPMS may already disable */
- if (!pipe_ctx->stream->dpms_off)
- core_link_disable_stream(pipe_ctx, FREE_ACQUIRED_RESOURCE);
- else if (pipe_ctx->stream_res.audio) {
- dc->hwss.disable_audio_stream(pipe_ctx, FREE_ACQUIRED_RESOURCE);
+ link = pipe_ctx->stream->link;
+ /* DPMS may already disable or */
+ /* dpms_off status is incorrect due to fastboot
+ * feature. When system resume from S4 with second
+ * screen only, the dpms_off would be true but
+ * VBIOS lit up eDP, so check link status too.
+ */
+ if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
+ core_link_disable_stream(pipe_ctx);
+ else if (pipe_ctx->stream_res.audio)
+ dc->hwss.disable_audio_stream(pipe_ctx);
+
+ /* free acquired resources */
+ if (pipe_ctx->stream_res.audio) {
+ /*disable az_endpoint*/
+ pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
+
+ /*free audio*/
+ if (dc->caps.dynamic_audio == true) {
+ /*we have to dynamic arbitrate the audio endpoints*/
+ /*we free the resource, need reset is_audio_acquired*/
+ update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
+ pipe_ctx->stream_res.audio, false);
+ pipe_ctx->stream_res.audio = NULL;
+ }
}
}
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
- else if (pipe_ctx->stream_res.dsc)
+ else if (pipe_ctx->stream_res.dsc) {
dp_set_dsc_enable(pipe_ctx, false);
-#endif
+ }
/* by upper caller loop, parent pipe: pipe0, will be reset last.
* back end share by all pipes and will be disable only when disable
@@ -1770,6 +2058,10 @@ static void dcn20_reset_back_end_for_pipe(
if (pipe_ctx->stream_res.tg->funcs->set_odm_bypass)
pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
+
+ if (pipe_ctx->stream_res.tg->funcs->set_drr)
+ pipe_ctx->stream_res.tg->funcs->set_drr(
+ pipe_ctx->stream_res.tg, NULL);
}
for (i = 0; i < dc->res_pool->pipe_count; i++)
@@ -1784,11 +2076,12 @@ static void dcn20_reset_back_end_for_pipe(
pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
}
-static void dcn20_reset_hw_ctx_wrap(
+void dcn20_reset_hw_ctx_wrap(
struct dc *dc,
struct dc_state *context)
{
int i;
+ struct dce_hwseq *hws = dc->hwseq;
/* Reset Back End*/
for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
@@ -1799,7 +2092,7 @@ static void dcn20_reset_hw_ctx_wrap(
if (!pipe_ctx_old->stream)
continue;
- if (pipe_ctx_old->top_pipe)
+ if (pipe_ctx_old->top_pipe || pipe_ctx_old->prev_odm_pipe)
continue;
if (!pipe_ctx->stream ||
@@ -1807,19 +2100,42 @@ static void dcn20_reset_hw_ctx_wrap(
struct clock_source *old_clk = pipe_ctx_old->clock_source;
dcn20_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
- if (dc->hwss.enable_stream_gating)
- dc->hwss.enable_stream_gating(dc, pipe_ctx);
+ if (hws->funcs.enable_stream_gating)
+ hws->funcs.enable_stream_gating(dc, pipe_ctx);
if (old_clk)
old_clk->funcs->cs_power_down(old_clk);
}
}
}
-static void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
+void dcn20_get_mpctree_visual_confirm_color(
+ struct pipe_ctx *pipe_ctx,
+ struct tg_color *color)
+{
+ const struct tg_color pipe_colors[6] = {
+ {MAX_TG_COLOR_VALUE, 0, 0}, // red
+ {MAX_TG_COLOR_VALUE, 0, MAX_TG_COLOR_VALUE}, // yellow
+ {0, MAX_TG_COLOR_VALUE, 0}, // blue
+ {MAX_TG_COLOR_VALUE / 2, 0, MAX_TG_COLOR_VALUE / 2}, // purple
+ {0, 0, MAX_TG_COLOR_VALUE}, // green
+ {MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE * 2 / 3, 0}, // orange
+ };
+
+ struct pipe_ctx *top_pipe = pipe_ctx;
+
+ while (top_pipe->top_pipe) {
+ top_pipe = top_pipe->top_pipe;
+ }
+
+ *color = pipe_colors[top_pipe->pipe_idx];
+}
+
+void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
+ struct dce_hwseq *hws = dc->hwseq;
struct hubp *hubp = pipe_ctx->plane_res.hubp;
struct mpcc_blnd_cfg blnd_cfg = { {0} };
- bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
+ bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha;
int mpcc_id;
struct mpcc *new_mpcc;
struct mpc *mpc = dc->res_pool->mpc;
@@ -1827,10 +2143,13 @@ static void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
// input to MPCC is always RGB, by default leave black_color at 0
if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) {
- dcn10_get_hdr_visual_confirm_color(
+ hws->funcs.get_hdr_visual_confirm_color(
pipe_ctx, &blnd_cfg.black_color);
} else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) {
- dcn10_get_surface_visual_confirm_color(
+ hws->funcs.get_surface_visual_confirm_color(
+ pipe_ctx, &blnd_cfg.black_color);
+ } else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE) {
+ dcn20_get_mpctree_visual_confirm_color(
pipe_ctx, &blnd_cfg.black_color);
}
@@ -1864,12 +2183,6 @@ static void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
*/
mpcc_id = hubp->inst;
- /* If there is no full update, don't need to touch MPC tree*/
- if (!pipe_ctx->plane_state->update_flags.bits.full_update) {
- mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
- return;
- }
-
/* check if this MPCC is already being used */
new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
/* remove MPCC if being used */
@@ -1894,156 +2207,191 @@ static void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
hubp->mpcc_id = mpcc_id;
}
-static int find_free_gsl_group(const struct dc *dc)
+void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
{
- if (dc->res_pool->gsl_groups.gsl_0 == 0)
- return 1;
- if (dc->res_pool->gsl_groups.gsl_1 == 0)
- return 2;
- if (dc->res_pool->gsl_groups.gsl_2 == 0)
- return 3;
+ enum dc_lane_count lane_count =
+ pipe_ctx->stream->link->cur_link_settings.lane_count;
- return 0;
-}
+ struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
+ struct dc_link *link = pipe_ctx->stream->link;
-/* NOTE: This is not a generic setup_gsl function (hence the suffix as_lock)
- * This is only used to lock pipes in pipe splitting case with immediate flip
- * Ordinary MPC/OTG locks suppress VUPDATE which doesn't help with immediate,
- * so we get tearing with freesync since we cannot flip multiple pipes
- * atomically.
- * We use GSL for this:
- * - immediate flip: find first available GSL group if not already assigned
- * program gsl with that group, set current OTG as master
- * and always us 0x4 = AND of flip_ready from all pipes
- * - vsync flip: disable GSL if used
- *
- * Groups in stream_res are stored as +1 from HW registers, i.e.
- * gsl_0 <=> pipe_ctx->stream_res.gsl_group == 1
- * Using a magic value like -1 would require tracking all inits/resets
- */
-void dcn20_setup_gsl_group_as_lock(
- const struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- bool enable)
-{
- struct gsl_params gsl;
- int group_idx;
+ uint32_t active_total_with_borders;
+ uint32_t early_control = 0;
+ struct timing_generator *tg = pipe_ctx->stream_res.tg;
- memset(&gsl, 0, sizeof(struct gsl_params));
+ /* For MST, there are multiply stream go to only one link.
+ * connect DIG back_end to front_end while enable_stream and
+ * disconnect them during disable_stream
+ * BY this, it is logic clean to separate stream and link
+ */
+ link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc,
+ pipe_ctx->stream_res.stream_enc->id, true);
- if (enable) {
- /* return if group already assigned since GSL was set up
- * for vsync flip, we would unassign so it can't be "left over"
- */
- if (pipe_ctx->stream_res.gsl_group > 0)
- return;
+ if (pipe_ctx->plane_state && pipe_ctx->plane_state->flip_immediate != 1) {
+ if (link->dc->hwss.program_dmdata_engine)
+ link->dc->hwss.program_dmdata_engine(pipe_ctx);
+ }
- group_idx = find_free_gsl_group(dc);
- ASSERT(group_idx != 0);
- pipe_ctx->stream_res.gsl_group = group_idx;
+ link->dc->hwss.update_info_frame(pipe_ctx);
- /* set gsl group reg field and mark resource used */
- switch (group_idx) {
- case 1:
- gsl.gsl0_en = 1;
- dc->res_pool->gsl_groups.gsl_0 = 1;
- break;
- case 2:
- gsl.gsl1_en = 1;
- dc->res_pool->gsl_groups.gsl_1 = 1;
- break;
- case 3:
- gsl.gsl2_en = 1;
- dc->res_pool->gsl_groups.gsl_2 = 1;
- break;
- default:
- BREAK_TO_DEBUGGER();
- return; // invalid case
- }
- gsl.gsl_master_en = 1;
- } else {
- group_idx = pipe_ctx->stream_res.gsl_group;
- if (group_idx == 0)
- return; // if not in use, just return
+ /* enable early control to avoid corruption on DP monitor*/
+ active_total_with_borders =
+ timing->h_addressable
+ + timing->h_border_left
+ + timing->h_border_right;
- pipe_ctx->stream_res.gsl_group = 0;
+ if (lane_count != 0)
+ early_control = active_total_with_borders % lane_count;
- /* unset gsl group reg field and mark resource free */
- switch (group_idx) {
- case 1:
- gsl.gsl0_en = 0;
- dc->res_pool->gsl_groups.gsl_0 = 0;
- break;
- case 2:
- gsl.gsl1_en = 0;
- dc->res_pool->gsl_groups.gsl_1 = 0;
- break;
- case 3:
- gsl.gsl2_en = 0;
- dc->res_pool->gsl_groups.gsl_2 = 0;
- break;
- default:
- BREAK_TO_DEBUGGER();
- return;
- }
- gsl.gsl_master_en = 0;
- }
+ if (early_control == 0)
+ early_control = lane_count;
- /* at this point we want to program whether it's to enable or disable */
- if (pipe_ctx->stream_res.tg->funcs->set_gsl != NULL &&
- pipe_ctx->stream_res.tg->funcs->set_gsl_source_select != NULL) {
- pipe_ctx->stream_res.tg->funcs->set_gsl(
- pipe_ctx->stream_res.tg,
- &gsl);
+ tg->funcs->set_early_control(tg, early_control);
- pipe_ctx->stream_res.tg->funcs->set_gsl_source_select(
- pipe_ctx->stream_res.tg, group_idx, enable ? 4 : 0);
- } else
- BREAK_TO_DEBUGGER();
+ /* enable audio only within mode set */
+ if (pipe_ctx->stream_res.audio != NULL) {
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ pipe_ctx->stream_res.stream_enc->funcs->dp_audio_enable(pipe_ctx->stream_res.stream_enc);
+ }
}
-static void dcn20_set_flip_control_gsl(
- struct pipe_ctx *pipe_ctx,
- bool flip_immediate)
+void dcn20_program_dmdata_engine(struct pipe_ctx *pipe_ctx)
{
- if (pipe_ctx && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_control_surface_gsl)
- pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_control_surface_gsl(
- pipe_ctx->plane_res.hubp, flip_immediate);
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+ bool enable = false;
+ struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc;
+ enum dynamic_metadata_mode mode = dc_is_dp_signal(stream->signal)
+ ? dmdata_dp
+ : dmdata_hdmi;
+
+ /* if using dynamic meta, don't set up generic infopackets */
+ if (pipe_ctx->stream->dmdata_address.quad_part != 0) {
+ pipe_ctx->stream_res.encoder_info_frame.hdrsmd.valid = false;
+ enable = true;
+ }
+
+ if (!hubp)
+ return;
+ if (!stream_enc || !stream_enc->funcs->set_dynamic_metadata)
+ return;
+
+ stream_enc->funcs->set_dynamic_metadata(stream_enc, enable,
+ hubp->inst, mode);
}
-void dcn20_hw_sequencer_construct(struct dc *dc)
+void dcn20_fpga_init_hw(struct dc *dc)
{
- dcn10_hw_sequencer_construct(dc);
- dc->hwss.init_hw = dcn20_init_hw;
- dc->hwss.init_pipes = NULL;
- dc->hwss.unblank_stream = dcn20_unblank_stream;
- dc->hwss.update_plane_addr = dcn20_update_plane_addr;
- dc->hwss.disable_plane = dcn20_disable_plane,
- dc->hwss.enable_stream_timing = dcn20_enable_stream_timing;
- dc->hwss.program_triplebuffer = dcn20_program_tripleBuffer;
- dc->hwss.set_input_transfer_func = dcn20_set_input_transfer_func;
- dc->hwss.set_output_transfer_func = dcn20_set_output_transfer_func;
- dc->hwss.apply_ctx_for_surface = dcn20_apply_ctx_for_surface;
- dc->hwss.pipe_control_lock = dcn20_pipe_control_lock;
- dc->hwss.pipe_control_lock_global = dcn20_pipe_control_lock_global;
- dc->hwss.optimize_bandwidth = dcn20_optimize_bandwidth;
- dc->hwss.prepare_bandwidth = dcn20_prepare_bandwidth;
- dc->hwss.update_bandwidth = dcn20_update_bandwidth;
- dc->hwss.enable_writeback = dcn20_enable_writeback;
- dc->hwss.disable_writeback = dcn20_disable_writeback;
- dc->hwss.program_output_csc = dcn20_program_output_csc;
- dc->hwss.update_odm = dcn20_update_odm;
- dc->hwss.blank_pixel_data = dcn20_blank_pixel_data;
- dc->hwss.dmdata_status_done = dcn20_dmdata_status_done;
- dc->hwss.disable_stream = dcn20_disable_stream;
- dc->hwss.init_sys_ctx = dcn20_init_sys_ctx;
- dc->hwss.init_vm_ctx = dcn20_init_vm_ctx;
- dc->hwss.disable_stream_gating = dcn20_disable_stream_gating;
- dc->hwss.enable_stream_gating = dcn20_enable_stream_gating;
- dc->hwss.setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt;
- dc->hwss.reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap;
- dc->hwss.update_mpcc = dcn20_update_mpcc;
- dc->hwss.set_flip_control_gsl = dcn20_set_flip_control_gsl;
- dc->hwss.did_underflow_occur = dcn10_did_underflow_occur;
+ int i, j;
+ struct dce_hwseq *hws = dc->hwseq;
+ struct resource_pool *res_pool = dc->res_pool;
+ struct dc_state *context = dc->current_state;
+
+ if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
+ dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
+
+ // Initialize the dccg
+ if (res_pool->dccg->funcs->dccg_init)
+ res_pool->dccg->funcs->dccg_init(res_pool->dccg);
+
+ //Enable ability to power gate / don't force power on permanently
+ hws->funcs.enable_power_gating_plane(hws, true);
+
+ // Specific to FPGA dccg and registers
+ REG_WRITE(RBBMIF_TIMEOUT_DIS, 0xFFFFFFFF);
+ REG_WRITE(RBBMIF_TIMEOUT_DIS_2, 0xFFFFFFFF);
+
+ hws->funcs.dccg_init(hws);
+
+ REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, 2);
+ REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
+ REG_WRITE(REFCLK_CNTL, 0);
+ //
+
+
+ /* Blank pixel data with OPP DPG */
+ for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
+ struct timing_generator *tg = dc->res_pool->timing_generators[i];
+
+ if (tg->funcs->is_tg_enabled(tg))
+ dcn20_init_blank(dc, tg);
+ }
+
+ for (i = 0; i < res_pool->timing_generator_count; i++) {
+ struct timing_generator *tg = dc->res_pool->timing_generators[i];
+
+ if (tg->funcs->is_tg_enabled(tg))
+ tg->funcs->lock(tg);
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct dpp *dpp = res_pool->dpps[i];
+
+ dpp->funcs->dpp_reset(dpp);
+ }
+
+ /* Reset all MPCC muxes */
+ res_pool->mpc->funcs->mpc_init(res_pool->mpc);
+
+ /* initialize OPP mpc_tree parameter */
+ for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
+ res_pool->opps[i]->mpc_tree_params.opp_id = res_pool->opps[i]->inst;
+ res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
+ for (j = 0; j < MAX_PIPES; j++)
+ res_pool->opps[i]->mpcc_disconnect_pending[j] = false;
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct timing_generator *tg = dc->res_pool->timing_generators[i];
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ struct hubp *hubp = dc->res_pool->hubps[i];
+ struct dpp *dpp = dc->res_pool->dpps[i];
+
+ pipe_ctx->stream_res.tg = tg;
+ pipe_ctx->pipe_idx = i;
+
+ pipe_ctx->plane_res.hubp = hubp;
+ pipe_ctx->plane_res.dpp = dpp;
+ pipe_ctx->plane_res.mpcc_inst = dpp->inst;
+ hubp->mpcc_id = dpp->inst;
+ hubp->opp_id = OPP_ID_INVALID;
+ hubp->power_gated = false;
+ pipe_ctx->stream_res.opp = NULL;
+
+ hubp->funcs->hubp_init(hubp);
+
+ //dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
+ //dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
+ dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
+ pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
+ /*to do*/
+ hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);
+ }
+
+ /* initialize DWB pointer to MCIF_WB */
+ for (i = 0; i < res_pool->res_cap->num_dwb; i++)
+ res_pool->dwbc[i]->mcif = res_pool->mcif_wb[i];
+
+ for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
+ struct timing_generator *tg = dc->res_pool->timing_generators[i];
+
+ if (tg->funcs->is_tg_enabled(tg))
+ tg->funcs->unlock(tg);
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ dc->hwss.disable_plane(dc, pipe_ctx);
+
+ pipe_ctx->stream_res.tg = NULL;
+ pipe_ctx->plane_res.hubp = NULL;
+ }
+
+ for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
+ struct timing_generator *tg = dc->res_pool->timing_generators[i];
+
+ tg->funcs->tg_init(tg);
+ }
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
index 2b0409454073..02c9be5ebd47 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
@@ -26,78 +26,112 @@
#ifndef __DC_HWSS_DCN20_H__
#define __DC_HWSS_DCN20_H__
-struct dc;
+#include "hw_sequencer_private.h"
-void dcn20_hw_sequencer_construct(struct dc *dc);
-
-enum dc_status dcn20_enable_stream_timing(
- struct pipe_ctx *pipe_ctx,
- struct dc_state *context,
- struct dc *dc);
-
-void dcn20_blank_pixel_data(
+bool dcn20_set_blend_lut(
+ struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state);
+bool dcn20_set_shaper_3dlut(
+ struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state);
+void dcn20_program_front_end_for_ctx(
struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- bool blank);
-
+ struct dc_state *context);
+void dcn20_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx);
+bool dcn20_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ const struct dc_plane_state *plane_state);
+bool dcn20_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ const struct dc_stream_state *stream);
void dcn20_program_output_csc(struct dc *dc,
struct pipe_ctx *pipe_ctx,
enum dc_color_space colorspace,
uint16_t *matrix,
int opp_id);
-
+void dcn20_enable_stream(struct pipe_ctx *pipe_ctx);
+void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx,
+ struct dc_link_settings *link_settings);
+void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn20_blank_pixel_data(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool blank);
+void dcn20_pipe_control_lock(
+ struct dc *dc,
+ struct pipe_ctx *pipe,
+ bool lock);
+void dcn20_pipe_control_lock_global(
+ struct dc *dc,
+ struct pipe_ctx *pipe,
+ bool lock);
void dcn20_prepare_bandwidth(
struct dc *dc,
struct dc_state *context);
-
void dcn20_optimize_bandwidth(
struct dc *dc,
struct dc_state *context);
-
bool dcn20_update_bandwidth(
struct dc *dc,
struct dc_state *context);
-
+void dcn20_reset_hw_ctx_wrap(
+ struct dc *dc,
+ struct dc_state *context);
+enum dc_status dcn20_enable_stream_timing(
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct dc *dc);
+void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn20_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn20_init_blank(
+ struct dc *dc,
+ struct timing_generator *tg);
+void dcn20_disable_vga(
+ struct dce_hwseq *hws);
+void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn20_enable_power_gating_plane(
+ struct dce_hwseq *hws,
+ bool enable);
+void dcn20_dpp_pg_control(
+ struct dce_hwseq *hws,
+ unsigned int dpp_inst,
+ bool power_on);
+void dcn20_hubp_pg_control(
+ struct dce_hwseq *hws,
+ unsigned int hubp_inst,
+ bool power_on);
+void dcn20_program_triple_buffer(
+ const struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool enable_triple_buffer);
+void dcn20_enable_writeback(
+ struct dc *dc,
+ struct dc_writeback_info *wb_info,
+ struct dc_state *context);
void dcn20_disable_writeback(
struct dc *dc,
unsigned int dwb_pipe_inst);
-
-bool dcn20_hwss_wait_for_blank_complete(
- struct output_pixel_processor *opp);
-
-bool dcn20_set_output_transfer_func(struct pipe_ctx *pipe_ctx,
- const struct dc_stream_state *stream);
-
-bool dcn20_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
- const struct dc_plane_state *plane_state);
-
+void dcn20_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx);
bool dcn20_dmdata_status_done(struct pipe_ctx *pipe_ctx);
-
+void dcn20_program_dmdata_engine(struct pipe_ctx *pipe_ctx);
void dcn20_set_dmdata_attributes(struct pipe_ctx *pipe_ctx);
-
-void dcn20_disable_stream(struct pipe_ctx *pipe_ctx, int option);
-
-void dcn20_program_tripleBuffer(
- const struct dc *dc,
+void dcn20_init_vm_ctx(
+ struct dce_hwseq *hws,
+ struct dc *dc,
+ struct dc_virtual_addr_space_config *va_config,
+ int vmid);
+void dcn20_set_flip_control_gsl(
struct pipe_ctx *pipe_ctx,
- bool enableTripleBuffer);
-
-void dcn20_setup_vupdate_interrupt(struct pipe_ctx *pipe_ctx);
-
-void dcn20_pipe_control_lock_global(
+ bool flip_immediate);
+void dcn20_dsc_pg_control(
+ struct dce_hwseq *hws,
+ unsigned int dsc_inst,
+ bool power_on);
+void dcn20_fpga_init_hw(struct dc *dc);
+bool dcn20_wait_for_blank_complete(
+ struct output_pixel_processor *opp);
+void dcn20_dccg_init(struct dce_hwseq *hws);
+int dcn20_init_sys_ctx(struct dce_hwseq *hws,
struct dc *dc,
- struct pipe_ctx *pipe,
- bool lock);
-void dcn20_setup_gsl_group_as_lock(const struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- bool enable);
-void dcn20_pipe_control_lock(
- struct dc *dc,
- struct pipe_ctx *pipe,
- bool lock);
-void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx);
-void dcn20_enable_plane(
- struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- struct dc_state *context);
+ struct dc_phy_addr_space_config *pa_config);
+
#endif /* __DC_HWSS_DCN20_H__ */
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
new file mode 100644
index 000000000000..d51e02fdab4d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
@@ -0,0 +1,133 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dce110/dce110_hw_sequencer.h"
+#include "dcn10/dcn10_hw_sequencer.h"
+#include "dcn20_hwseq.h"
+
+static const struct hw_sequencer_funcs dcn20_funcs = {
+ .program_gamut_remap = dcn10_program_gamut_remap,
+ .init_hw = dcn10_init_hw,
+ .apply_ctx_to_hw = dce110_apply_ctx_to_hw,
+ .apply_ctx_for_surface = NULL,
+ .program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
+ .update_plane_addr = dcn20_update_plane_addr,
+ .update_dchub = dcn10_update_dchub,
+ .update_pending_status = dcn10_update_pending_status,
+ .program_output_csc = dcn20_program_output_csc,
+ .enable_accelerated_mode = dce110_enable_accelerated_mode,
+ .enable_timing_synchronization = dcn10_enable_timing_synchronization,
+ .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset,
+ .update_info_frame = dce110_update_info_frame,
+ .send_immediate_sdp_message = dcn10_send_immediate_sdp_message,
+ .enable_stream = dcn20_enable_stream,
+ .disable_stream = dce110_disable_stream,
+ .unblank_stream = dcn20_unblank_stream,
+ .blank_stream = dce110_blank_stream,
+ .enable_audio_stream = dce110_enable_audio_stream,
+ .disable_audio_stream = dce110_disable_audio_stream,
+ .disable_plane = dcn20_disable_plane,
+ .pipe_control_lock = dcn20_pipe_control_lock,
+ .pipe_control_lock_global = dcn20_pipe_control_lock_global,
+ .prepare_bandwidth = dcn20_prepare_bandwidth,
+ .optimize_bandwidth = dcn20_optimize_bandwidth,
+ .update_bandwidth = dcn20_update_bandwidth,
+ .set_drr = dcn10_set_drr,
+ .get_position = dcn10_get_position,
+ .set_static_screen_control = dcn10_set_static_screen_control,
+ .setup_stereo = dcn10_setup_stereo,
+ .set_avmute = dce110_set_avmute,
+ .log_hw_state = dcn10_log_hw_state,
+ .get_hw_state = dcn10_get_hw_state,
+ .clear_status_bits = dcn10_clear_status_bits,
+ .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
+ .edp_power_control = dce110_edp_power_control,
+ .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
+ .set_cursor_position = dcn10_set_cursor_position,
+ .set_cursor_attribute = dcn10_set_cursor_attribute,
+ .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
+ .setup_periodic_interrupt = dcn10_setup_periodic_interrupt,
+ .set_clock = dcn10_set_clock,
+ .get_clock = dcn10_get_clock,
+ .program_triplebuffer = dcn20_program_triple_buffer,
+ .enable_writeback = dcn20_enable_writeback,
+ .disable_writeback = dcn20_disable_writeback,
+ .dmdata_status_done = dcn20_dmdata_status_done,
+ .program_dmdata_engine = dcn20_program_dmdata_engine,
+ .set_dmdata_attributes = dcn20_set_dmdata_attributes,
+ .init_sys_ctx = dcn20_init_sys_ctx,
+ .init_vm_ctx = dcn20_init_vm_ctx,
+ .set_flip_control_gsl = dcn20_set_flip_control_gsl,
+ .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
+};
+
+static const struct hwseq_private_funcs dcn20_private_funcs = {
+ .init_pipes = dcn10_init_pipes,
+ .update_plane_addr = dcn20_update_plane_addr,
+ .plane_atomic_disconnect = dcn10_plane_atomic_disconnect,
+ .update_mpcc = dcn20_update_mpcc,
+ .set_input_transfer_func = dcn20_set_input_transfer_func,
+ .set_output_transfer_func = dcn20_set_output_transfer_func,
+ .power_down = dce110_power_down,
+ .enable_display_power_gating = dcn10_dummy_display_power_gating,
+ .blank_pixel_data = dcn20_blank_pixel_data,
+ .reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap,
+ .enable_stream_timing = dcn20_enable_stream_timing,
+ .edp_backlight_control = dce110_edp_backlight_control,
+ .disable_stream_gating = dcn20_disable_stream_gating,
+ .enable_stream_gating = dcn20_enable_stream_gating,
+ .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt,
+ .did_underflow_occur = dcn10_did_underflow_occur,
+ .init_blank = dcn20_init_blank,
+ .disable_vga = dcn20_disable_vga,
+ .bios_golden_init = dcn10_bios_golden_init,
+ .plane_atomic_disable = dcn20_plane_atomic_disable,
+ .plane_atomic_power_down = dcn10_plane_atomic_power_down,
+ .enable_power_gating_plane = dcn20_enable_power_gating_plane,
+ .dpp_pg_control = dcn20_dpp_pg_control,
+ .hubp_pg_control = dcn20_hubp_pg_control,
+ .dsc_pg_control = NULL,
+ .update_odm = dcn20_update_odm,
+ .dsc_pg_control = dcn20_dsc_pg_control,
+ .get_surface_visual_confirm_color = dcn10_get_surface_visual_confirm_color,
+ .get_hdr_visual_confirm_color = dcn10_get_hdr_visual_confirm_color,
+ .set_hdr_multiplier = dcn10_set_hdr_multiplier,
+ .verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high,
+ .wait_for_blank_complete = dcn20_wait_for_blank_complete,
+ .dccg_init = dcn20_dccg_init,
+ .set_blend_lut = dcn20_set_blend_lut,
+ .set_shaper_3dlut = dcn20_set_shaper_3dlut,
+};
+
+void dcn20_hw_sequencer_construct(struct dc *dc)
+{
+ dc->hwss = dcn20_funcs;
+ dc->hwseq->funcs = dcn20_private_funcs;
+
+ if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+ dc->hwss.init_hw = dcn20_fpga_init_hw;
+ dc->hwseq->funcs.init_pipes = NULL;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.h
new file mode 100644
index 000000000000..12277797cd71
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_DCN20_INIT_H__
+#define __DC_DCN20_INIT_H__
+
+struct dc;
+
+void dcn20_hw_sequencer_construct(struct dc *dc);
+
+#endif /* __DC_DCN20_INIT_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c
index f495582e9e87..e4ac73035c84 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c
@@ -168,10 +168,8 @@ static struct mpll_cfg dcn2_mpll_cfg[] = {
void enc2_fec_set_enable(struct link_encoder *enc, bool enable)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
DC_LOG_DSC("%s FEC at link encoder inst %d",
enable ? "Enabling" : "Disabling", enc->id.enum_id);
-#endif
REG_UPDATE(DP_DPHY_CNTL, DPHY_FEC_EN, enable);
}
@@ -192,7 +190,6 @@ bool enc2_fec_is_active(struct link_encoder *enc)
return (active != 0);
}
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
/* this function reads dsc related register fields to be logged later in dcn10_log_hw_state
* into a dcn_dsc_state struct.
*/
@@ -203,8 +200,8 @@ void link_enc2_read_state(struct link_encoder *enc, struct link_enc_state *s)
REG_GET(DP_DPHY_CNTL, DPHY_FEC_EN, &s->dphy_fec_en);
REG_GET(DP_DPHY_CNTL, DPHY_FEC_READY_SHADOW, &s->dphy_fec_ready_shadow);
REG_GET(DP_DPHY_CNTL, DPHY_FEC_ACTIVE_STATUS, &s->dphy_fec_active_status);
+ REG_GET(DP_LINK_CNTL, DP_LINK_TRAINING_COMPLETE, &s->dp_link_training_complete);
}
-#endif
static bool update_cfg_data(
struct dcn10_link_encoder *enc10,
@@ -315,9 +312,7 @@ void enc2_hw_init(struct link_encoder *enc)
}
static const struct link_encoder_funcs dcn20_link_enc_funcs = {
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
.read_state = link_enc2_read_state,
-#endif
.validate_output_with_stream =
dcn10_link_encoder_validate_output_with_stream,
.hw_init = enc2_hw_init,
@@ -341,6 +336,7 @@ static const struct link_encoder_funcs dcn20_link_enc_funcs = {
.fec_set_enable = enc2_fec_set_enable,
.fec_set_ready = enc2_fec_set_ready,
.fec_is_active = enc2_fec_is_active,
+ .get_dig_mode = dcn10_get_dig_mode,
.get_dig_frontend = dcn10_get_dig_frontend,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
index 3736b5548a25..8cab8107fd94 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
@@ -33,7 +33,142 @@
SRI(AUX_DPHY_TX_CONTROL, DP_AUX, id)
#define UNIPHY_MASK_SH_LIST(mask_sh)\
- LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_LINK_ENABLE, mask_sh)
+ LE_SF(SYMCLKA_CLOCK_ENABLE, SYMCLKA_CLOCK_ENABLE, mask_sh),\
+ LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_LINK_ENABLE, mask_sh),\
+ LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL0_XBAR_SOURCE, mask_sh),\
+ LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL1_XBAR_SOURCE, mask_sh),\
+ LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL2_XBAR_SOURCE, mask_sh),\
+ LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL3_XBAR_SOURCE, mask_sh)
+
+#define DPCS_MASK_SH_LIST(mask_sh)\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_CLK_RDY, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_DATA_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_CLK_RDY, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_DATA_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_CLK_RDY, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_DATA_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_CLK_RDY, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_DATA_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL4, RDPCS_PHY_DP_TX0_TERM_CTRL, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL4, RDPCS_PHY_DP_TX1_TERM_CTRL, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL4, RDPCS_PHY_DP_TX2_TERM_CTRL, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL4, RDPCS_PHY_DP_TX3_TERM_CTRL, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL11, RDPCS_PHY_DP_MPLLB_MULTIPLIER, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX0_WIDTH, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX0_RATE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX1_WIDTH, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX1_RATE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX2_PSTATE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX3_PSTATE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX2_MPLL_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX3_MPLL_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL7, RDPCS_PHY_DP_MPLLB_FRACN_QUOT, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL7, RDPCS_PHY_DP_MPLLB_FRACN_DEN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL8, RDPCS_PHY_DP_MPLLB_SSC_PEAK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL9, RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL9, RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL10, RDPCS_PHY_DP_MPLLB_FRACN_REM, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL11, RDPCS_PHY_DP_REF_CLK_MPLLB_DIV, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL11, RDPCS_PHY_HDMI_MPLLB_HDMI_DIV, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL12, RDPCS_PHY_DP_MPLLB_SSC_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL12, RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL12, RDPCS_PHY_DP_MPLLB_TX_CLK_DIV, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL12, RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL12, RDPCS_PHY_DP_MPLLB_STATE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL13, RDPCS_PHY_DP_MPLLB_DIV_CLK_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL13, RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL14, RDPCS_PHY_DP_MPLLB_FRACN_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL14, RDPCS_PHY_DP_MPLLB_PMIX_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_LANE0_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_LANE1_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_LANE2_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_LANE3_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_RD_START_DELAY, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_EXT_REFCLK_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SRAMCLK_BYPASS, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SRAMCLK_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SRAMCLK_CLOCK_ON, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SYMCLK_DIV2_CLOCK_ON, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SYMCLK_DIV2_GATE_DIS, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SYMCLK_DIV2_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_DISABLE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_DISABLE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_DISABLE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_DISABLE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_REQ, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_REQ, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_REQ, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_REQ, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_ACK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_ACK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_ACK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_ACK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_RESET, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_RESET, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_RESET, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_RESET, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_PHY_RESET, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_PHY_CR_MUX_SEL, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_PHY_REF_RANGE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_SRAM_BYPASS, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_SRAM_EXT_LD_DONE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_PHY_HDMIMODE_ENABLE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_SRAM_INIT_DONE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL2, RDPCS_PHY_DP4_POR, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PLL_UPDATE_DATA, RDPCS_PLL_UPDATE_DATA, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL, RDPCS_REG_FIFO_ERROR_MASK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL, RDPCS_TX_FIFO_ERROR_MASK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL, RDPCS_DPALT_DISABLE_TOGGLE_MASK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL, RDPCS_DPALT_4LANE_TOGGLE_MASK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCS_TX_CR_ADDR, RDPCS_TX_CR_ADDR, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCS_TX_CR_DATA, RDPCS_TX_CR_DATA, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_MPLLB_V2I, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_TX0_EQ_MAIN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_TX0_EQ_PRE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_TX0_EQ_POST, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_MPLLB_FREQ_VCO, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_MPLLB_CP_INT, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_MPLLB_CP_PROP, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_TX1_EQ_MAIN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_TX1_EQ_PRE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_TX1_EQ_POST, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE2, RDPCS_PHY_DP_TX2_EQ_MAIN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE2, RDPCS_PHY_DP_TX2_EQ_PRE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE2, RDPCS_PHY_DP_TX2_EQ_POST, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DP_TX3_EQ_MAIN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DCO_FINETUNE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DCO_RANGE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DP_TX3_EQ_PRE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DP_TX3_EQ_POST, mask_sh),\
+ LE_SF(DPCSTX0_DPCSTX_TX_CLOCK_CNTL, DPCS_SYMCLK_CLOCK_ON, mask_sh),\
+ LE_SF(DPCSTX0_DPCSTX_TX_CLOCK_CNTL, DPCS_SYMCLK_GATE_DIS, mask_sh),\
+ LE_SF(DPCSTX0_DPCSTX_TX_CLOCK_CNTL, DPCS_SYMCLK_EN, mask_sh),\
+ LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_DATA_SWAP, mask_sh),\
+ LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_DATA_ORDER_INVERT, mask_sh),\
+ LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_FIFO_EN, mask_sh),\
+ LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_FIFO_RD_START_DELAY, mask_sh),\
+ LE_SF(DPCSTX0_DPCSTX_DEBUG_CONFIG, DPCS_DBG_CBUS_DIS, mask_sh)
+
+#define DPCS_DCN2_MASK_SH_LIST(mask_sh)\
+ DPCS_MASK_SH_LIST(mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL, RDPCS_PHY_RX_REF_LD_VAL, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL, RDPCS_PHY_RX_VCO_LD_VAL, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE_ACK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX0_PSTATE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX1_PSTATE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX0_MPLL_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX1_MPLL_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_REF_CLK_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX2_WIDTH, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX2_RATE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX3_WIDTH, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX3_RATE, mask_sh),\
+ LE_SF(DCIO_SOFT_RESET, UNIPHYA_SOFT_RESET, mask_sh),\
+ LE_SF(DCIO_SOFT_RESET, UNIPHYB_SOFT_RESET, mask_sh),\
+ LE_SF(DCIO_SOFT_RESET, UNIPHYC_SOFT_RESET, mask_sh),\
+ LE_SF(DCIO_SOFT_RESET, UNIPHYD_SOFT_RESET, mask_sh),\
+ LE_SF(DCIO_SOFT_RESET, UNIPHYE_SOFT_RESET, mask_sh)
#define LINK_ENCODER_MASK_SH_LIST_DCN20(mask_sh)\
LINK_ENCODER_MASK_SH_LIST_DCN10(mask_sh),\
@@ -63,6 +198,49 @@
SRI(CLOCK_ENABLE, SYMCLK, id), \
SRI(CHANNEL_XBAR_CNTL, UNIPHY, id)
+#define DPCS_DCN2_CMN_REG_LIST(id) \
+ SRI(DIG_LANE_ENABLE, DIG, id), \
+ SRI(TMDS_CTL_BITS, DIG, id), \
+ SRI(RDPCSTX_PHY_CNTL3, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL4, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL5, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL6, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL7, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL8, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL9, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL10, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL11, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL12, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL13, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL14, RDPCSTX, id), \
+ SRI(RDPCSTX_CNTL, RDPCSTX, id), \
+ SRI(RDPCSTX_CLOCK_CNTL, RDPCSTX, id), \
+ SRI(RDPCSTX_INTERRUPT_CONTROL, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL0, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL2, RDPCSTX, id), \
+ SRI(RDPCSTX_PLL_UPDATE_DATA, RDPCSTX, id), \
+ SRI(RDPCS_TX_CR_ADDR, RDPCSTX, id), \
+ SRI(RDPCS_TX_CR_DATA, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_FUSE0, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_FUSE1, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_FUSE2, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_FUSE3, RDPCSTX, id), \
+ SRI(DPCSTX_TX_CLOCK_CNTL, DPCSTX, id), \
+ SRI(DPCSTX_TX_CNTL, DPCSTX, id), \
+ SRI(DPCSTX_DEBUG_CONFIG, DPCSTX, id), \
+ SRI(RDPCSTX_DEBUG_CONFIG, RDPCSTX, id), \
+ SR(RDPCSTX0_RDPCSTX_SCRATCH)
+
+
+#define DPCS_DCN2_REG_LIST(id) \
+ DPCS_DCN2_CMN_REG_LIST(id), \
+ SRI(RDPCSTX_PHY_RX_LD_VAL, RDPCSTX, id),\
+ SRI(RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG, RDPCSTX, id)
+
+#define LE_DCN2_REG_LIST(id) \
+ LE_DCN10_REG_LIST(id), \
+ SR(DCIO_SOFT_RESET)
+
struct mpll_cfg {
uint32_t mpllb_ana_v2i;
uint32_t mpllb_ana_freq_vco;
@@ -91,6 +269,13 @@ struct mpll_cfg {
uint32_t ref_range;
uint32_t ref_clk;
bool hdmimode_enable;
+ bool sup_pre_hp;
+ bool dp_tx0_vergdrv_byp;
+ bool dp_tx1_vergdrv_byp;
+ bool dp_tx2_vergdrv_byp;
+ bool dp_tx3_vergdrv_byp;
+
+
};
struct dpcssys_phy_seq_cfg {
@@ -151,9 +336,7 @@ void enc2_fec_set_ready(struct link_encoder *enc, bool ready);
bool enc2_fec_is_active(struct link_encoder *enc);
void enc2_hw_init(struct link_encoder *enc);
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
void link_enc2_read_state(struct link_encoder *enc, struct link_enc_state *s);
-#endif
void dcn20_link_encoder_enable_dp_output(
struct link_encoder *enc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
index 240749e4cf83..de9c857ab3e9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
@@ -33,6 +33,9 @@
#define REG(reg)\
mpc20->mpc_regs->reg
+#define IND_REG(index) \
+ (index)
+
#define CTX \
mpc20->base.ctx
@@ -132,19 +135,33 @@ void mpc2_set_output_csc(
const uint16_t *regval,
enum mpc_output_csc_mode ocsc_mode)
{
+ uint32_t cur_mode;
struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
struct color_matrices_reg ocsc_regs;
- REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode);
-
- if (ocsc_mode == MPC_OUTPUT_CSC_DISABLE)
+ if (ocsc_mode == MPC_OUTPUT_CSC_DISABLE) {
+ REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode);
return;
+ }
if (regval == NULL) {
BREAK_TO_DEBUGGER();
return;
}
+ /* determine which CSC coefficients (A or B) we are using
+ * currently. select the alternate set to double buffer
+ * the CSC update so CSC is updated on frame boundary
+ */
+ IX_REG_GET(MPC_OCSC_TEST_DEBUG_INDEX, MPC_OCSC_TEST_DEBUG_DATA,
+ MPC_OCSC_TEST_DEBUG_DATA_STATUS_IDX,
+ MPC_OCSC_TEST_DEBUG_DATA_OCSC_MODE, &cur_mode);
+
+ if (cur_mode != MPC_OUTPUT_CSC_COEF_A)
+ ocsc_mode = MPC_OUTPUT_CSC_COEF_A;
+ else
+ ocsc_mode = MPC_OUTPUT_CSC_COEF_B;
+
ocsc_regs.shifts.csc_c11 = mpc20->mpc_shift->MPC_OCSC_C11_A;
ocsc_regs.masks.csc_c11 = mpc20->mpc_mask->MPC_OCSC_C11_A;
ocsc_regs.shifts.csc_c12 = mpc20->mpc_shift->MPC_OCSC_C12_A;
@@ -157,10 +174,13 @@ void mpc2_set_output_csc(
ocsc_regs.csc_c11_c12 = REG(CSC_C11_C12_B[opp_id]);
ocsc_regs.csc_c33_c34 = REG(CSC_C33_C34_B[opp_id]);
}
+
cm_helper_program_color_matrices(
mpc20->base.ctx,
regval,
&ocsc_regs);
+
+ REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode);
}
void mpc2_set_ocsc_default(
@@ -169,14 +189,16 @@ void mpc2_set_ocsc_default(
enum dc_color_space color_space,
enum mpc_output_csc_mode ocsc_mode)
{
+ uint32_t cur_mode;
struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
uint32_t arr_size;
struct color_matrices_reg ocsc_regs;
const uint16_t *regval = NULL;
- REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode);
- if (ocsc_mode == MPC_OUTPUT_CSC_DISABLE)
+ if (ocsc_mode == MPC_OUTPUT_CSC_DISABLE) {
+ REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode);
return;
+ }
regval = find_color_matrix(color_space, &arr_size);
@@ -185,6 +207,19 @@ void mpc2_set_ocsc_default(
return;
}
+ /* determine which CSC coefficients (A or B) we are using
+ * currently. select the alternate set to double buffer
+ * the CSC update so CSC is updated on frame boundary
+ */
+ IX_REG_GET(MPC_OCSC_TEST_DEBUG_INDEX, MPC_OCSC_TEST_DEBUG_DATA,
+ MPC_OCSC_TEST_DEBUG_DATA_STATUS_IDX,
+ MPC_OCSC_TEST_DEBUG_DATA_OCSC_MODE, &cur_mode);
+
+ if (cur_mode != MPC_OUTPUT_CSC_COEF_A)
+ ocsc_mode = MPC_OUTPUT_CSC_COEF_A;
+ else
+ ocsc_mode = MPC_OUTPUT_CSC_COEF_B;
+
ocsc_regs.shifts.csc_c11 = mpc20->mpc_shift->MPC_OCSC_C11_A;
ocsc_regs.masks.csc_c11 = mpc20->mpc_mask->MPC_OCSC_C11_A;
ocsc_regs.shifts.csc_c12 = mpc20->mpc_shift->MPC_OCSC_C12_A;
@@ -203,6 +238,8 @@ void mpc2_set_ocsc_default(
mpc20->base.ctx,
regval,
&ocsc_regs);
+
+ REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode);
}
static void mpc2_ogam_get_reg_field(
@@ -233,14 +270,14 @@ static void mpc2_ogam_get_reg_field(
reg->masks.exp_resion_start_segment = mpc20->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B;
}
-static void mpc20_power_on_ogam_lut(
+void mpc20_power_on_ogam_lut(
struct mpc *mpc, int mpcc_id,
bool power_on)
{
struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
REG_SET(MPCC_MEM_PWR_CTRL[mpcc_id], 0,
- MPCC_OGAM_MEM_PWR_FORCE, power_on == true ? 0:1);
+ MPCC_OGAM_MEM_PWR_DIS, power_on == true ? 1:0);
}
@@ -345,6 +382,9 @@ static void mpc20_program_ogam_pwl(
uint32_t i;
struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
+ PERF_TRACE();
+ REG_SEQ_START();
+
for (i = 0 ; i < num; i++) {
REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, MPCC_OGAM_LUT_DATA, rgb[i].red_reg);
REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, MPCC_OGAM_LUT_DATA, rgb[i].green_reg);
@@ -368,6 +408,11 @@ void apply_DEDCN20_305_wa(
{
struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
+ if (mpc->ctx->dc->debug.cm_in_bypass) {
+ REG_SET(MPCC_OGAM_MODE[mpcc_id], 0, MPCC_OGAM_MODE, 0);
+ return;
+ }
+
if (mpc->ctx->dc->work_arounds.dedcn20_305_wa == false) {
/*hw fixed in new review*/
return;
@@ -390,10 +435,16 @@ void mpc2_set_output_gamma(
enum dc_lut_mode next_mode;
struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
+ if (mpc->ctx->dc->debug.cm_in_bypass) {
+ REG_SET(MPCC_OGAM_MODE[mpcc_id], 0, MPCC_OGAM_MODE, 0);
+ return;
+ }
+
if (params == NULL) {
REG_SET(MPCC_OGAM_MODE[mpcc_id], 0, MPCC_OGAM_MODE, 0);
return;
}
+
current_mode = mpc20_get_ogam_current(mpc, mpcc_id);
if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_A)
next_mode = LUT_RAM_B;
@@ -435,24 +486,28 @@ void mpc2_assert_mpcc_idle_before_connect(struct mpc *mpc, int mpcc_id)
{
struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
unsigned int top_sel, mpc_busy, mpc_idle, mpc_disabled;
- REG_GET(MPCC_STATUS[mpcc_id], MPCC_DISABLED, &mpc_disabled);
-
- if (mpc_disabled) {
- ASSERT(0);
- return;
- }
REG_GET(MPCC_TOP_SEL[mpcc_id],
MPCC_TOP_SEL, &top_sel);
- if (top_sel == 0xf) {
- REG_GET_2(MPCC_STATUS[mpcc_id],
- MPCC_BUSY, &mpc_busy,
- MPCC_IDLE, &mpc_idle);
+ REG_GET_3(MPCC_STATUS[mpcc_id],
+ MPCC_BUSY, &mpc_busy,
+ MPCC_IDLE, &mpc_idle,
+ MPCC_DISABLED, &mpc_disabled);
- ASSERT(mpc_busy == 0);
- ASSERT(mpc_idle == 1);
+ if (top_sel == 0xf) {
+ ASSERT(!mpc_busy);
+ ASSERT(mpc_idle);
+ ASSERT(mpc_disabled);
+ } else {
+ ASSERT(!mpc_disabled);
+ ASSERT(!mpc_idle);
}
+
+ REG_SEQ_SUBMIT();
+ PERF_TRACE();
+ REG_SEQ_WAIT_DONE();
+ PERF_TRACE();
}
static void mpc2_init_mpcc(struct mpcc *mpcc, int mpcc_inst)
@@ -488,6 +543,7 @@ const struct mpc_funcs dcn20_mpc_funcs = {
.insert_plane = mpc1_insert_plane,
.remove_mpcc = mpc1_remove_mpcc,
.mpc_init = mpc1_mpc_init,
+ .mpc_init_single_inst = mpc1_mpc_init_single_inst,
.update_blending = mpc2_update_blending,
.get_mpcc_for_dpp = mpc2_get_mpcc_for_dpp,
.wait_for_idle = mpc2_assert_idle_mpcc,
@@ -498,6 +554,7 @@ const struct mpc_funcs dcn20_mpc_funcs = {
.set_output_csc = mpc2_set_output_csc,
.set_ocsc_default = mpc2_set_ocsc_default,
.set_output_gamma = mpc2_set_output_gamma,
+ .power_on_mpc_mem_pwr = mpc20_power_on_ogam_lut,
};
void dcn20_mpc_construct(struct dcn20_mpc *mpc20,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h
index 9750095d2d73..c78fd5123497 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h
@@ -80,6 +80,10 @@
SRII(DENORM_CLAMP_G_Y, MPC_OUT, inst),\
SRII(DENORM_CLAMP_B_CB, MPC_OUT, inst)
+#define MPC_DBG_REG_LIST_DCN2_0() \
+ SR(MPC_OCSC_TEST_DEBUG_DATA),\
+ SR(MPC_OCSC_TEST_DEBUG_INDEX)
+
#define MPC_REG_VARIABLE_LIST_DCN2_0 \
MPC_COMMON_REG_VARIABLE_LIST \
uint32_t MPCC_TOP_GAIN[MAX_MPCC]; \
@@ -118,6 +122,8 @@
uint32_t MPCC_OGAM_LUT_RAM_CONTROL[MAX_MPCC];\
uint32_t MPCC_OGAM_LUT_DATA[MAX_MPCC];\
uint32_t MPCC_OGAM_MODE[MAX_MPCC];\
+ uint32_t MPC_OCSC_TEST_DEBUG_DATA;\
+ uint32_t MPC_OCSC_TEST_DEBUG_INDEX;\
uint32_t CSC_MODE[MAX_OPP]; \
uint32_t CSC_C11_C12_A[MAX_OPP]; \
uint32_t CSC_C33_C34_A[MAX_OPP]; \
@@ -134,6 +140,7 @@
SF(MPCC0_MPCC_TOP_GAIN, MPCC_TOP_GAIN, mask_sh),\
SF(MPCC0_MPCC_BOT_GAIN_INSIDE, MPCC_BOT_GAIN_INSIDE, mask_sh),\
SF(MPCC0_MPCC_BOT_GAIN_OUTSIDE, MPCC_BOT_GAIN_OUTSIDE, mask_sh),\
+ SF(MPC_OCSC_TEST_DEBUG_INDEX, MPC_OCSC_TEST_DEBUG_INDEX, mask_sh),\
SF(MPC_OUT0_CSC_MODE, MPC_OCSC_MODE, mask_sh),\
SF(MPC_OUT0_CSC_C11_C12_A, MPC_OCSC_C11_A, mask_sh),\
SF(MPC_OUT0_CSC_C11_C12_A, MPC_OCSC_C12_A, mask_sh),\
@@ -159,6 +166,7 @@
SF(MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_B, MPCC_OGAM_RAMB_EXP_REGION_START_B, mask_sh),\
SF(MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_B, MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B, mask_sh),\
SF(MPCC0_MPCC_MEM_PWR_CTRL, MPCC_OGAM_MEM_PWR_FORCE, mask_sh),\
+ SF(MPCC0_MPCC_MEM_PWR_CTRL, MPCC_OGAM_MEM_PWR_DIS, mask_sh),\
SF(MPCC_OGAM0_MPCC_OGAM_LUT_INDEX, MPCC_OGAM_LUT_INDEX, mask_sh),\
SF(MPCC_OGAM0_MPCC_OGAM_LUT_RAM_CONTROL, MPCC_OGAM_LUT_WRITE_EN_MASK, mask_sh),\
SF(MPCC_OGAM0_MPCC_OGAM_LUT_RAM_CONTROL, MPCC_OGAM_LUT_RAM_SEL, mask_sh),\
@@ -173,6 +181,20 @@
SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MAX_B_CB, mask_sh),\
SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MIN_B_CB, mask_sh)
+/*
+ * DCN2 MPC_OCSC debug status register:
+ *
+ * Status index including current OCSC Mode is 1
+ * OCSC Mode: [1..0]
+ */
+#define MPC_OCSC_TEST_DEBUG_DATA_STATUS_IDX 1
+
+#define MPC_DEBUG_REG_LIST_SH_DCN20 \
+ .MPC_OCSC_TEST_DEBUG_DATA_OCSC_MODE = 0
+
+#define MPC_DEBUG_REG_LIST_MASK_DCN20 \
+ .MPC_OCSC_TEST_DEBUG_DATA_OCSC_MODE = 0x3
+
#define MPC_REG_FIELD_LIST_DCN2_0(type) \
MPC_REG_FIELD_LIST(type)\
type MPCC_BG_BPC;\
@@ -180,6 +202,8 @@
type MPCC_TOP_GAIN;\
type MPCC_BOT_GAIN_INSIDE;\
type MPCC_BOT_GAIN_OUTSIDE;\
+ type MPC_OCSC_TEST_DEBUG_DATA_OCSC_MODE;\
+ type MPC_OCSC_TEST_DEBUG_INDEX;\
type MPC_OCSC_MODE;\
type MPC_OCSC_C11_A;\
type MPC_OCSC_C12_A;\
@@ -217,7 +241,8 @@
type MPC_OUT_DENORM_CLAMP_MIN_G_Y;\
type MPC_OUT_DENORM_CLAMP_MAX_B_CB;\
type MPC_OUT_DENORM_CLAMP_MIN_B_CB;\
- type MPCC_DISABLED;
+ type MPCC_DISABLED;\
+ type MPCC_OGAM_MEM_PWR_DIS;
struct dcn20_mpc_registers {
MPC_REG_VARIABLE_LIST_DCN2_0
@@ -282,4 +307,5 @@ void mpc2_set_output_gamma(
void mpc2_assert_idle_mpcc(struct mpc *mpc, int id);
void mpc2_assert_mpcc_idle_before_connect(struct mpc *mpc, int mpcc_id);
+void mpc20_power_on_ogam_lut(struct mpc *mpc, int mpcc_id, bool power_on);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c
index d9e7c711a71c..023cc71fad0f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c
@@ -41,6 +41,7 @@
void opp2_set_disp_pattern_generator(
struct output_pixel_processor *opp,
enum controller_dp_test_pattern test_pattern,
+ enum controller_dp_color_space color_space,
enum dc_color_depth color_depth,
const struct tg_color *solid_color,
int width,
@@ -100,9 +101,22 @@ void opp2_set_disp_pattern_generator(
TEST_PATTERN_DYN_RANGE_CEA :
TEST_PATTERN_DYN_RANGE_VESA);
+ switch (color_space) {
+ case CONTROLLER_DP_COLOR_SPACE_YCBCR601:
+ mode = TEST_PATTERN_MODE_COLORSQUARES_YCBCR601;
+ break;
+ case CONTROLLER_DP_COLOR_SPACE_YCBCR709:
+ mode = TEST_PATTERN_MODE_COLORSQUARES_YCBCR709;
+ break;
+ case CONTROLLER_DP_COLOR_SPACE_RGB:
+ default:
+ mode = TEST_PATTERN_MODE_COLORSQUARES_RGB;
+ break;
+ }
+
REG_UPDATE_6(DPG_CONTROL,
DPG_EN, 1,
- DPG_MODE, TEST_PATTERN_MODE_COLORSQUARES_RGB,
+ DPG_MODE, mode,
DPG_DYNAMIC_RANGE, dyn_range,
DPG_BIT_DEPTH, bit_depth,
DPG_VRES, 6,
@@ -332,7 +346,6 @@ static struct opp_funcs dcn20_opp_funcs = {
.opp_set_disp_pattern_generator = opp2_set_disp_pattern_generator,
.dpg_is_blanked = opp2_dpg_is_blanked,
.opp_dpg_set_blank_color = opp2_dpg_set_blank_color,
- .opp_convert_pti = NULL,
.opp_destroy = opp1_destroy,
.opp_program_left_edge_extra_pixel = opp2_program_left_edge_extra_pixel,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h
index abd8de9a78f8..4093bec172c1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h
@@ -140,6 +140,7 @@ void dcn20_opp_construct(struct dcn20_opp *oppn20,
void opp2_set_disp_pattern_generator(
struct output_pixel_processor *opp,
enum controller_dp_test_pattern test_pattern,
+ enum controller_dp_color_space color_space,
enum dc_color_depth color_depth,
const struct tg_color *solid_color,
int width,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
index 1ae973962d53..d875b0c38fde 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
@@ -59,11 +59,16 @@ bool optc2_enable_crtc(struct timing_generator *optc)
REG_UPDATE(CONTROL,
VTG0_ENABLE, 1);
+ REG_SEQ_START();
+
/* Enable CRTC */
REG_UPDATE_2(OTG_CONTROL,
OTG_DISABLE_POINT_CNTL, 3,
OTG_MASTER_EN, 1);
+ REG_SEQ_SUBMIT();
+ REG_SEQ_WAIT_DONE();
+
return true;
}
@@ -167,7 +172,6 @@ void optc2_set_gsl_source_select(
}
}
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
/* DSC encoder frame start controls: x = h position, line_num = # of lines from vstartup */
void optc2_set_dsc_encoder_frame_start(struct timing_generator *optc,
int x_position,
@@ -191,15 +195,6 @@ void optc2_set_dsc_config(struct timing_generator *optc,
uint32_t dsc_slice_width)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
- uint32_t data_format = 0;
- /* skip if dsc mode is not changed */
- data_format = dm_read_reg(CTX, REG(OPTC_DATA_FORMAT_CONTROL));
-
- data_format = data_format & 0x30; /* bit5:4 */
- data_format = data_format >> 4;
-
- if (data_format == dsc_mode)
- return;
REG_UPDATE(OPTC_DATA_FORMAT_CONTROL,
OPTC_DSC_MODE, dsc_mode);
@@ -210,13 +205,12 @@ void optc2_set_dsc_config(struct timing_generator *optc,
REG_UPDATE(OPTC_WIDTH_CONTROL,
OPTC_DSC_SLICE_WIDTH, dsc_slice_width);
}
-#endif
-/**
- * PTI i think is already done somewhere else for 2ka
- * (opp?, please double check.
- * OPTC side only has 1 register to set for PTI_ENABLE)
- */
+/*TEMP: Need to figure out inheritance model here.*/
+bool optc2_is_two_pixels_per_containter(const struct dc_crtc_timing *timing)
+{
+ return optc1_is_two_pixels_per_containter(timing);
+}
void optc2_set_odm_bypass(struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing)
@@ -224,28 +218,31 @@ void optc2_set_odm_bypass(struct timing_generator *optc,
struct optc *optc1 = DCN10TG_FROM_TG(optc);
uint32_t h_div_2 = 0;
- optc1->comb_opp_id = 0xf;
REG_SET_3(OPTC_DATA_SOURCE_SELECT, 0,
OPTC_NUM_OF_INPUT_SEGMENT, 0,
OPTC_SEG0_SRC_SEL, optc->inst,
OPTC_SEG1_SRC_SEL, 0xf);
REG_WRITE(OTG_H_TIMING_CNTL, 0);
- h_div_2 = optc1_is_two_pixels_per_containter(dc_crtc_timing);
+ h_div_2 = optc2_is_two_pixels_per_containter(dc_crtc_timing);
REG_UPDATE(OTG_H_TIMING_CNTL,
OTG_H_TIMING_DIV_BY2, h_div_2);
REG_SET(OPTC_MEMORY_CONFIG, 0,
OPTC_MEM_SEL, 0);
+ optc1->opp_count = 1;
}
-void optc2_set_odm_combine(struct timing_generator *optc, int combine_opp_id,
- int mpcc_hactive, enum dc_pixel_encoding pixel_encoding)
+void optc2_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt,
+ struct dc_crtc_timing *timing)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
- /* 2 pieces of memory required for up to 5120 displays, 4 for up to 8192 */
- int memory_mask = mpcc_hactive <= 2560 ? 0x3 : 0xf;
+ int mpcc_hactive = (timing->h_addressable + timing->h_border_left + timing->h_border_right)
+ / opp_cnt;
+ uint32_t memory_mask;
uint32_t data_fmt = 0;
+ ASSERT(opp_cnt == 2);
+
/* TODO: In pseudocode but does not affect maximus, delete comment if we dont need on asic
* REG_SET(OTG_GLOBAL_CONTROL2, 0, GLOBAL_UPDATE_LOCK_EN, 1);
* Program OTG register MASTER_UPDATE_LOCK_DB_X/Y to the position before DP frame start
@@ -253,27 +250,35 @@ void optc2_set_odm_combine(struct timing_generator *optc, int combine_opp_id,
* MASTER_UPDATE_LOCK_DB_X, 160,
* MASTER_UPDATE_LOCK_DB_Y, 240);
*/
+
+ /* 2 pieces of memory required for up to 5120 displays, 4 for up to 8192,
+ * however, for ODM combine we can simplify by always using 4.
+ * To make sure there's no overlap, each instance "reserves" 2 memories and
+ * they are uniquely combined here.
+ */
+ memory_mask = 0x3 << (opp_id[0] * 2) | 0x3 << (opp_id[1] * 2);
+
if (REG(OPTC_MEMORY_CONFIG))
REG_SET(OPTC_MEMORY_CONFIG, 0,
- OPTC_MEM_SEL, memory_mask << (optc->inst * 4));
+ OPTC_MEM_SEL, memory_mask);
- if (pixel_encoding == PIXEL_ENCODING_YCBCR422)
+ if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
data_fmt = 1;
- else if (pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
data_fmt = 2;
REG_UPDATE(OPTC_DATA_FORMAT_CONTROL, OPTC_DATA_FORMAT, data_fmt);
REG_SET_3(OPTC_DATA_SOURCE_SELECT, 0,
OPTC_NUM_OF_INPUT_SEGMENT, 1,
- OPTC_SEG0_SRC_SEL, optc->inst,
- OPTC_SEG1_SRC_SEL, combine_opp_id);
+ OPTC_SEG0_SRC_SEL, opp_id[0],
+ OPTC_SEG1_SRC_SEL, opp_id[1]);
REG_UPDATE(OPTC_WIDTH_CONTROL,
OPTC_SEGMENT_WIDTH, mpcc_hactive);
REG_SET(OTG_H_TIMING_CNTL, 0, OTG_H_TIMING_DIV_BY2, 1);
- optc1->comb_opp_id = combine_opp_id;
+ optc1->opp_count = opp_cnt;
}
void optc2_get_optc_source(struct timing_generator *optc,
@@ -293,6 +298,10 @@ void optc2_get_optc_source(struct timing_generator *optc,
*num_of_src_opp = 2;
else
*num_of_src_opp = 1;
+
+ /* Work around VBIOS not updating OPTC_NUM_OF_INPUT_SEGMENT */
+ if (*src_opp_id_1 == 0xf)
+ *num_of_src_opp = 1;
}
void optc2_set_dwb_source(struct timing_generator *optc,
@@ -339,65 +348,6 @@ void optc2_triplebuffer_unlock(struct timing_generator *optc)
}
-
-void optc2_setup_global_lock(struct timing_generator *optc)
-{
- struct optc *optc1 = DCN10TG_FROM_TG(optc);
- uint32_t v_blank_start = 0;
- uint32_t h_blank_start = 0, h_total = 0;
-
- REG_SET(OTG_GLOBAL_CONTROL1, 0, MASTER_UPDATE_LOCK_DB_EN, 1);
-
- REG_SET(OTG_GLOBAL_CONTROL2, 0, DIG_UPDATE_LOCATION, 20);
-
- REG_GET(OTG_V_BLANK_START_END, OTG_V_BLANK_START, &v_blank_start);
-
- REG_GET(OTG_H_BLANK_START_END, OTG_H_BLANK_START, &h_blank_start);
-
- REG_GET(OTG_H_TOTAL, OTG_H_TOTAL, &h_total);
- REG_UPDATE_2(OTG_GLOBAL_CONTROL1,
- MASTER_UPDATE_LOCK_DB_X,
- h_blank_start - 200 - 1,
- MASTER_UPDATE_LOCK_DB_Y,
- v_blank_start - 1);
-}
-
-void optc2_lock_global(struct timing_generator *optc)
-{
- struct optc *optc1 = DCN10TG_FROM_TG(optc);
-
- REG_UPDATE(OTG_GLOBAL_CONTROL2, GLOBAL_UPDATE_LOCK_EN, 1);
-
- REG_SET(OTG_GLOBAL_CONTROL0, 0,
- OTG_MASTER_UPDATE_LOCK_SEL, optc->inst);
- REG_SET(OTG_MASTER_UPDATE_LOCK, 0,
- OTG_MASTER_UPDATE_LOCK, 1);
-
- /* Should be fast, status does not update on maximus */
- if (optc->ctx->dce_environment != DCE_ENV_FPGA_MAXIMUS)
- REG_WAIT(OTG_MASTER_UPDATE_LOCK,
- UPDATE_LOCK_STATUS, 1,
- 1, 10);
-}
-
-void optc2_lock(struct timing_generator *optc)
-{
- struct optc *optc1 = DCN10TG_FROM_TG(optc);
-
- REG_UPDATE(OTG_GLOBAL_CONTROL2, GLOBAL_UPDATE_LOCK_EN, 0);
-
- REG_SET(OTG_GLOBAL_CONTROL0, 0,
- OTG_MASTER_UPDATE_LOCK_SEL, optc->inst);
- REG_SET(OTG_MASTER_UPDATE_LOCK, 0,
- OTG_MASTER_UPDATE_LOCK, 1);
-
- /* Should be fast, status does not update on maximus */
- if (optc->ctx->dce_environment != DCE_ENV_FPGA_MAXIMUS)
- REG_WAIT(OTG_MASTER_UPDATE_LOCK,
- UPDATE_LOCK_STATUS, 1,
- 1, 10);
-}
-
void optc2_lock_doublebuffer_enable(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
@@ -440,14 +390,8 @@ void optc2_setup_manual_trigger(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
- REG_SET(OTG_MANUAL_FLOW_CONTROL, 0,
- MANUAL_FLOW_CONTROL, 1);
-
- REG_SET(OTG_GLOBAL_CONTROL2, 0,
- MANUAL_FLOW_CONTROL_SEL, optc->inst);
-
REG_SET_8(OTG_TRIGA_CNTL, 0,
- OTG_TRIGA_SOURCE_SELECT, 22,
+ OTG_TRIGA_SOURCE_SELECT, 21,
OTG_TRIGA_SOURCE_PIPE_SELECT, optc->inst,
OTG_TRIGA_RISING_EDGE_DETECT_CNTL, 1,
OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, 0,
@@ -492,10 +436,8 @@ static struct timing_generator_funcs dcn20_tg_funcs = {
.triplebuffer_lock = optc2_triplebuffer_lock,
.triplebuffer_unlock = optc2_triplebuffer_unlock,
.disable_reset_trigger = optc1_disable_reset_trigger,
- .lock = optc2_lock,
+ .lock = optc1_lock,
.unlock = optc1_unlock,
- .lock_global = optc2_lock_global,
- .setup_global_lock = optc2_setup_global_lock,
.lock_doublebuffer_enable = optc2_lock_doublebuffer_enable,
.lock_doublebuffer_disable = optc2_lock_doublebuffer_disable,
.enable_optc_clock = optc1_enable_optc_clock,
@@ -511,9 +453,7 @@ static struct timing_generator_funcs dcn20_tg_funcs = {
.setup_global_swap_lock = NULL,
.get_crc = optc1_get_crc,
.configure_crc = optc1_configure_crc,
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
.set_dsc_config = optc2_set_dsc_config,
-#endif
.set_dwb_source = optc2_set_dwb_source,
.set_odm_bypass = optc2_set_odm_bypass,
.set_odm_combine = optc2_set_odm_combine,
@@ -522,7 +462,8 @@ static struct timing_generator_funcs dcn20_tg_funcs = {
.set_gsl_source_select = optc2_set_gsl_source_select,
.set_vtg_params = optc1_set_vtg_params,
.program_manual_trigger = optc2_program_manual_trigger,
- .setup_manual_trigger = optc2_setup_manual_trigger
+ .setup_manual_trigger = optc2_setup_manual_trigger,
+ .get_hw_timing = optc1_get_hw_timing,
};
void dcn20_timing_generator_init(struct optc *optc1)
@@ -537,6 +478,5 @@ void dcn20_timing_generator_init(struct optc *optc1)
optc1->min_v_blank_interlace = 5;
optc1->min_h_sync_width = 4;// Minimum HSYNC = 8 pixels asked By HW in the first place for no actual reason. Oculus Rift S will not light up with 8 as it's hsyncWidth is 6. Changing it to 4 to fix that issue.
optc1->min_v_sync_width = 1;
- optc1->comb_opp_id = 0xf;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h
index ebf07c582da2..239cc40ae474 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h
@@ -86,18 +86,16 @@ void optc2_set_gsl_source_select(struct timing_generator *optc,
int group_idx,
uint32_t gsl_ready_signal);
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
void optc2_set_dsc_config(struct timing_generator *optc,
enum optc_dsc_mode dsc_mode,
uint32_t dsc_bytes_per_pixel,
uint32_t dsc_slice_width);
-#endif
void optc2_set_odm_bypass(struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing);
-void optc2_set_odm_combine(struct timing_generator *optc, int combine_opp_id,
- int mpcc_hactive, enum dc_pixel_encoding pixel_encoding);
+void optc2_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt,
+ struct dc_crtc_timing *timing);
void optc2_get_optc_source(struct timing_generator *optc,
uint32_t *num_of_src_opp,
@@ -106,11 +104,9 @@ void optc2_get_optc_source(struct timing_generator *optc,
void optc2_triplebuffer_lock(struct timing_generator *optc);
void optc2_triplebuffer_unlock(struct timing_generator *optc);
-void optc2_lock(struct timing_generator *optc);
-void optc2_lock_global(struct timing_generator *optc);
-void optc2_setup_global_lock(struct timing_generator *optc);
void optc2_lock_doublebuffer_disable(struct timing_generator *optc);
void optc2_lock_doublebuffer_enable(struct timing_generator *optc);
+void optc2_setup_manual_trigger(struct timing_generator *optc);
void optc2_program_manual_trigger(struct timing_generator *optc);
-
+bool optc2_is_two_pixels_per_containter(const struct dc_crtc_timing *timing);
#endif /* __DC_OPTC_DCN20_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index b949e202d6cb..85f90f3e24cb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -1,5 +1,6 @@
/*
* Copyright 2016 Advanced Micro Devices, Inc.
+ * Copyright 2019 Raptor Engineering, LLC
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -28,6 +29,8 @@
#include "dm_services.h"
#include "dc.h"
+#include "dcn20_init.h"
+
#include "resource.h"
#include "include/irq_service_interface.h"
#include "dcn20/dcn20_resource.h"
@@ -45,9 +48,7 @@
#include "dcn10/dcn10_resource.h"
#include "dcn20_opp.h"
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
#include "dcn20_dsc.h"
-#endif
#include "dcn20_link_encoder.h"
#include "dcn20_stream_encoder.h"
@@ -59,11 +60,14 @@
#include "dml/display_mode_vba.h"
#include "dcn20_dccg.h"
#include "dcn20_vmid.h"
+#include "dc_link_ddc.h"
#include "navi10_ip_offset.h"
#include "dcn/dcn_2_0_0_offset.h"
#include "dcn/dcn_2_0_0_sh_mask.h"
+#include "dpcs/dpcs_2_0_0_offset.h"
+#include "dpcs/dpcs_2_0_0_sh_mask.h"
#include "nbio/nbio_2_3_offset.h"
@@ -82,7 +86,6 @@
#include "amdgpu_socbb.h"
-#define SOC_BOUNDING_BOX_VALID false
#define DC_LOGGER_INIT(logger)
struct _vcs_dpi_ip_params_st dcn2_0_ip = {
@@ -93,11 +96,7 @@ struct _vcs_dpi_ip_params_st dcn2_0_ip = {
.hostvm_max_page_table_levels = 4,
.hostvm_cached_page_table_levels = 0,
.pte_group_size_bytes = 2048,
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
.num_dsc = 6,
-#else
- .num_dsc = 0,
-#endif
.rob_buffer_size_kbytes = 168,
.det_buffer_size_kbytes = 164,
.dpte_buffer_size_in_pte_reqs_luma = 84,
@@ -156,8 +155,187 @@ struct _vcs_dpi_ip_params_st dcn2_0_ip = {
.xfc_fill_constant_bytes = 0,
};
-struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = { 0 };
-
+struct _vcs_dpi_ip_params_st dcn2_0_nv14_ip = {
+ .odm_capable = 1,
+ .gpuvm_enable = 0,
+ .hostvm_enable = 0,
+ .gpuvm_max_page_table_levels = 4,
+ .hostvm_max_page_table_levels = 4,
+ .hostvm_cached_page_table_levels = 0,
+ .num_dsc = 5,
+ .rob_buffer_size_kbytes = 168,
+ .det_buffer_size_kbytes = 164,
+ .dpte_buffer_size_in_pte_reqs_luma = 84,
+ .dpte_buffer_size_in_pte_reqs_chroma = 42,//todo
+ .dpp_output_buffer_pixels = 2560,
+ .opp_output_buffer_lines = 1,
+ .pixel_chunk_size_kbytes = 8,
+ .pte_enable = 1,
+ .max_page_table_levels = 4,
+ .pte_chunk_size_kbytes = 2,
+ .meta_chunk_size_kbytes = 2,
+ .writeback_chunk_size_kbytes = 2,
+ .line_buffer_size_bits = 789504,
+ .is_line_buffer_bpp_fixed = 0,
+ .line_buffer_fixed_bpp = 0,
+ .dcc_supported = true,
+ .max_line_buffer_lines = 12,
+ .writeback_luma_buffer_size_kbytes = 12,
+ .writeback_chroma_buffer_size_kbytes = 8,
+ .writeback_chroma_line_buffer_width_pixels = 4,
+ .writeback_max_hscl_ratio = 1,
+ .writeback_max_vscl_ratio = 1,
+ .writeback_min_hscl_ratio = 1,
+ .writeback_min_vscl_ratio = 1,
+ .writeback_max_hscl_taps = 12,
+ .writeback_max_vscl_taps = 12,
+ .writeback_line_buffer_luma_buffer_size = 0,
+ .writeback_line_buffer_chroma_buffer_size = 14643,
+ .cursor_buffer_size = 8,
+ .cursor_chunk_size = 2,
+ .max_num_otg = 5,
+ .max_num_dpp = 5,
+ .max_num_wb = 1,
+ .max_dchub_pscl_bw_pix_per_clk = 4,
+ .max_pscl_lb_bw_pix_per_clk = 2,
+ .max_lb_vscl_bw_pix_per_clk = 4,
+ .max_vscl_hscl_bw_pix_per_clk = 4,
+ .max_hscl_ratio = 8,
+ .max_vscl_ratio = 8,
+ .hscl_mults = 4,
+ .vscl_mults = 4,
+ .max_hscl_taps = 8,
+ .max_vscl_taps = 8,
+ .dispclk_ramp_margin_percent = 1,
+ .underscan_factor = 1.10,
+ .min_vblank_lines = 32, //
+ .dppclk_delay_subtotal = 77, //
+ .dppclk_delay_scl_lb_only = 16,
+ .dppclk_delay_scl = 50,
+ .dppclk_delay_cnvc_formatter = 8,
+ .dppclk_delay_cnvc_cursor = 6,
+ .dispclk_delay_subtotal = 87, //
+ .dcfclk_cstate_latency = 10, // SRExitTime
+ .max_inter_dcn_tile_repeaters = 8,
+ .xfc_supported = true,
+ .xfc_fill_bw_overhead_percent = 10.0,
+ .xfc_fill_constant_bytes = 0,
+ .ptoi_supported = 0
+};
+
+struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = {
+ /* Defaults that get patched on driver load from firmware. */
+ .clock_limits = {
+ {
+ .state = 0,
+ .dcfclk_mhz = 560.0,
+ .fabricclk_mhz = 560.0,
+ .dispclk_mhz = 513.0,
+ .dppclk_mhz = 513.0,
+ .phyclk_mhz = 540.0,
+ .socclk_mhz = 560.0,
+ .dscclk_mhz = 171.0,
+ .dram_speed_mts = 8960.0,
+ },
+ {
+ .state = 1,
+ .dcfclk_mhz = 694.0,
+ .fabricclk_mhz = 694.0,
+ .dispclk_mhz = 642.0,
+ .dppclk_mhz = 642.0,
+ .phyclk_mhz = 600.0,
+ .socclk_mhz = 694.0,
+ .dscclk_mhz = 214.0,
+ .dram_speed_mts = 11104.0,
+ },
+ {
+ .state = 2,
+ .dcfclk_mhz = 875.0,
+ .fabricclk_mhz = 875.0,
+ .dispclk_mhz = 734.0,
+ .dppclk_mhz = 734.0,
+ .phyclk_mhz = 810.0,
+ .socclk_mhz = 875.0,
+ .dscclk_mhz = 245.0,
+ .dram_speed_mts = 14000.0,
+ },
+ {
+ .state = 3,
+ .dcfclk_mhz = 1000.0,
+ .fabricclk_mhz = 1000.0,
+ .dispclk_mhz = 1100.0,
+ .dppclk_mhz = 1100.0,
+ .phyclk_mhz = 810.0,
+ .socclk_mhz = 1000.0,
+ .dscclk_mhz = 367.0,
+ .dram_speed_mts = 16000.0,
+ },
+ {
+ .state = 4,
+ .dcfclk_mhz = 1200.0,
+ .fabricclk_mhz = 1200.0,
+ .dispclk_mhz = 1284.0,
+ .dppclk_mhz = 1284.0,
+ .phyclk_mhz = 810.0,
+ .socclk_mhz = 1200.0,
+ .dscclk_mhz = 428.0,
+ .dram_speed_mts = 16000.0,
+ },
+ /*Extra state, no dispclk ramping*/
+ {
+ .state = 5,
+ .dcfclk_mhz = 1200.0,
+ .fabricclk_mhz = 1200.0,
+ .dispclk_mhz = 1284.0,
+ .dppclk_mhz = 1284.0,
+ .phyclk_mhz = 810.0,
+ .socclk_mhz = 1200.0,
+ .dscclk_mhz = 428.0,
+ .dram_speed_mts = 16000.0,
+ },
+ },
+ .num_states = 5,
+ .sr_exit_time_us = 8.6,
+ .sr_enter_plus_exit_time_us = 10.9,
+ .urgent_latency_us = 4.0,
+ .urgent_latency_pixel_data_only_us = 4.0,
+ .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
+ .urgent_latency_vm_data_only_us = 4.0,
+ .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 40.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 40.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0,
+ .max_avg_sdp_bw_use_normal_percent = 40.0,
+ .max_avg_dram_bw_use_normal_percent = 40.0,
+ .writeback_latency_us = 12.0,
+ .ideal_dram_bw_after_urgent_percent = 40.0,
+ .max_request_size_bytes = 256,
+ .dram_channel_width_bytes = 2,
+ .fabric_datapath_to_dcn_data_return_bytes = 64,
+ .dcn_downspread_percent = 0.5,
+ .downspread_percent = 0.38,
+ .dram_page_open_time_ns = 50.0,
+ .dram_rw_turnaround_time_ns = 17.5,
+ .dram_return_buffer_per_channel_bytes = 8192,
+ .round_trip_ping_latency_dcfclk_cycles = 131,
+ .urgent_out_of_order_return_per_channel_bytes = 256,
+ .channel_interleave_bytes = 256,
+ .num_banks = 8,
+ .num_chans = 16,
+ .vmm_page_size_bytes = 4096,
+ .dram_clock_change_latency_us = 404.0,
+ .dummy_pstate_latency_us = 5.0,
+ .writeback_dram_clock_change_latency_us = 23.0,
+ .return_bus_width_bytes = 64,
+ .dispclk_dppclk_vco_speed_mhz = 3850,
+ .xfc_bus_transport_time_us = 20,
+ .xfc_xbuf_latency_tolerance_us = 4,
+ .use_urgent_burst_bw = 0
+};
+
+struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc = { 0 };
#ifndef mmDP0_DP_DPHY_INTERNAL_CTRL
#define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f
@@ -314,7 +492,7 @@ static const struct dce_audio_shift audio_shift = {
DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT)
};
-static const struct dce_aduio_mask audio_mask = {
+static const struct dce_audio_mask audio_mask = {
DCE120_AUD_COMMON_MASK_SH_LIST(_MASK)
};
@@ -373,6 +551,7 @@ static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = {
[id] = {\
LE_DCN10_REG_LIST(id), \
UNIPHY_DCN2_REG_LIST(phyid), \
+ DPCS_DCN2_REG_LIST(id), \
SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \
}
@@ -386,11 +565,13 @@ static const struct dcn10_link_enc_registers link_enc_regs[] = {
};
static const struct dcn10_link_enc_shift le_shift = {
- LINK_ENCODER_MASK_SH_LIST_DCN20(__SHIFT)
+ LINK_ENCODER_MASK_SH_LIST_DCN20(__SHIFT),\
+ DPCS_DCN2_MASK_SH_LIST(__SHIFT)
};
static const struct dcn10_link_enc_mask le_mask = {
- LINK_ENCODER_MASK_SH_LIST_DCN20(_MASK)
+ LINK_ENCODER_MASK_SH_LIST_DCN20(_MASK),\
+ DPCS_DCN2_MASK_SH_LIST(_MASK)
};
#define ipp_regs(id)\
@@ -457,6 +638,7 @@ static const struct dce110_aux_registers aux_engine_regs[] = {
#define tf_regs(id)\
[id] = {\
TF_REG_LIST_DCN20(id),\
+ TF_REG_LIST_DCN20_COMMON_APPEND(id),\
}
static const struct dcn2_dpp_registers tf_regs[] = {
@@ -469,11 +651,13 @@ static const struct dcn2_dpp_registers tf_regs[] = {
};
static const struct dcn2_dpp_shift tf_shift = {
- TF_REG_LIST_SH_MASK_DCN20(__SHIFT)
+ TF_REG_LIST_SH_MASK_DCN20(__SHIFT),
+ TF_DEBUG_REG_LIST_SH_DCN20
};
static const struct dcn2_dpp_mask tf_mask = {
- TF_REG_LIST_SH_MASK_DCN20(_MASK)
+ TF_REG_LIST_SH_MASK_DCN20(_MASK),
+ TF_DEBUG_REG_LIST_MASK_DCN20
};
#define dwbc_regs_dcn2(id)\
@@ -523,14 +707,17 @@ static const struct dcn20_mpc_registers mpc_regs = {
MPC_OUT_MUX_REG_LIST_DCN2_0(3),
MPC_OUT_MUX_REG_LIST_DCN2_0(4),
MPC_OUT_MUX_REG_LIST_DCN2_0(5),
+ MPC_DBG_REG_LIST_DCN2_0()
};
static const struct dcn20_mpc_shift mpc_shift = {
- MPC_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
+ MPC_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT),
+ MPC_DEBUG_REG_LIST_SH_DCN20
};
static const struct dcn20_mpc_mask mpc_mask = {
- MPC_COMMON_MASK_SH_LIST_DCN2_0(_MASK)
+ MPC_COMMON_MASK_SH_LIST_DCN2_0(_MASK),
+ MPC_DEBUG_REG_LIST_MASK_DCN20
};
#define tg_regs(id)\
@@ -620,7 +807,42 @@ static const struct dcn20_vmid_mask vmid_masks = {
DCN20_VMID_MASK_SH_LIST(_MASK)
};
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
+static const struct dce110_aux_registers_shift aux_shift = {
+ DCN_AUX_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce110_aux_registers_mask aux_mask = {
+ DCN_AUX_MASK_SH_LIST(_MASK)
+};
+
+static int map_transmitter_id_to_phy_instance(
+ enum transmitter transmitter)
+{
+ switch (transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ return 0;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ return 1;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ return 2;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ return 3;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ return 4;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ return 5;
+ break;
+ default:
+ ASSERT(0);
+ return 0;
+ }
+}
+
#define dsc_regsDCN20(id)\
[id] = {\
DSC_REG_LIST_DCN20(id)\
@@ -642,7 +864,6 @@ static const struct dcn20_dsc_shift dsc_shift = {
static const struct dcn20_dsc_mask dsc_mask = {
DSC_REG_LIST_SH_MASK_DCN20(_MASK)
};
-#endif
static const struct dccg_registers dccg_regs = {
DCCG_REG_LIST_DCN2()
@@ -666,9 +887,7 @@ static const struct resource_caps res_cap_nv10 = {
.num_dwb = 1,
.num_ddc = 6,
.num_vmid = 16,
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
.num_dsc = 6,
-#endif
};
static const struct dc_plane_cap plane_cap = {
@@ -695,6 +914,18 @@ static const struct dc_plane_cap plane_cap = {
.fp16 = 1
}
};
+static const struct resource_caps res_cap_nv14 = {
+ .num_timing_generator = 5,
+ .num_opp = 5,
+ .num_video_plane = 5,
+ .num_audio = 6,
+ .num_stream_encoder = 5,
+ .num_pll = 5,
+ .num_dwb = 1,
+ .num_ddc = 5,
+ .num_vmid = 16,
+ .num_dsc = 5,
+};
static const struct dc_debug_options debug_defaults_drv = {
.disable_dmcu = true,
@@ -703,7 +934,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.clock_trace = true,
.disable_pplib_clock_request = true,
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
- .force_single_disp_pipe_split = true,
+ .force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
.performance_trace = false,
@@ -800,7 +1031,10 @@ struct dce_aux *dcn20_aux_engine_create(
dce110_aux_engine_construct(aux_engine, ctx, inst,
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
- &aux_engine_regs[inst]);
+ &aux_engine_regs[inst],
+ &aux_mask,
+ &aux_shift,
+ ctx->dc->caps.extended_aux_timeout_support);
return &aux_engine->base;
}
@@ -920,14 +1154,18 @@ struct link_encoder *dcn20_link_encoder_create(
{
struct dcn20_link_encoder *enc20 =
kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
+ int link_regs_id;
if (!enc20)
return NULL;
+ link_regs_id =
+ map_transmitter_id_to_phy_instance(enc_init_data->transmitter);
+
dcn20_link_encoder_construct(enc20,
enc_init_data,
&link_enc_feature,
- &link_enc_regs[enc_init_data->transmitter],
+ &link_enc_regs[link_regs_id],
&link_enc_aux_regs[enc_init_data->channel - 1],
&link_enc_hpd_regs[enc_init_data->hpd_source],
&le_shift,
@@ -955,6 +1193,7 @@ struct clock_source *dcn20_clock_source_create(
return &clk_src->base;
}
+ kfree(clk_src);
BREAK_TO_DEBUGGER();
return NULL;
}
@@ -984,6 +1223,11 @@ struct stream_encoder *dcn20_stream_encoder_create(
if (!enc1)
return NULL;
+ if (ASICREV_IS_NAVI14_M(ctx->asic_id.hw_internal_rev)) {
+ if (eng_id >= ENGINE_ID_DIGD)
+ eng_id++;
+ }
+
dcn20_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id,
&stream_enc_regs[eng_id],
&se_shift, &se_mask);
@@ -1031,13 +1275,14 @@ static const struct resource_create_funcs res_create_maximus_funcs = {
.create_hwseq = dcn20_hwseq_create,
};
+static void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu);
+
void dcn20_clock_source_destroy(struct clock_source **clk_src)
{
kfree(TO_DCE110_CLK_SRC(*clk_src));
*clk_src = NULL;
}
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
struct display_stream_compressor *dcn20_dsc_create(
struct dc_context *ctx, uint32_t inst)
@@ -1060,9 +1305,8 @@ void dcn20_dsc_destroy(struct display_stream_compressor **dsc)
*dsc = NULL;
}
-#endif
-static void destruct(struct dcn20_resource_pool *pool)
+static void dcn20_resource_destruct(struct dcn20_resource_pool *pool)
{
unsigned int i;
@@ -1073,12 +1317,10 @@ static void destruct(struct dcn20_resource_pool *pool)
}
}
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
for (i = 0; i < pool->base.res_cap->num_dsc; i++) {
if (pool->base.dscs[i] != NULL)
dcn20_dsc_destroy(&pool->base.dscs[i]);
}
-#endif
if (pool->base.mpc != NULL) {
kfree(TO_DCN20_MPC(pool->base.mpc));
@@ -1171,6 +1413,8 @@ static void destruct(struct dcn20_resource_pool *pool)
if (pool->base.pp_smu != NULL)
dcn20_pp_smu_destroy(&pool->base.pp_smu);
+ if (pool->base.oem_device != NULL)
+ dal_ddc_service_destroy(&pool->base.oem_device);
}
struct hubp *dcn20_hubp_create(
@@ -1197,7 +1441,11 @@ static void get_pixel_clock_parameters(
struct pixel_clk_params *pixel_clk_params)
{
const struct dc_stream_state *stream = pipe_ctx->stream;
- bool odm_combine = dc_res_get_odm_bottom_pipe(pipe_ctx) != NULL;
+ struct pipe_ctx *odm_pipe;
+ int opp_cnt = 1;
+
+ for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
+ opp_cnt++;
pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz;
pixel_clk_params->encoder_object_id = stream->link->link_enc->id;
@@ -1215,7 +1463,9 @@ static void get_pixel_clock_parameters(
if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422)
pixel_clk_params->color_depth = COLOR_DEPTH_888;
- if (optc1_is_two_pixels_per_containter(&stream->timing) || odm_combine)
+ if (opp_cnt == 4)
+ pixel_clk_params->requested_pix_clk_100hz /= 4;
+ else if (optc2_is_two_pixels_per_containter(&stream->timing) || opp_cnt == 2)
pixel_clk_params->requested_pix_clk_100hz /= 2;
if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
@@ -1281,17 +1531,23 @@ enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state
return status;
}
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
static void acquire_dsc(struct resource_context *res_ctx,
const struct resource_pool *pool,
- struct display_stream_compressor **dsc)
+ struct display_stream_compressor **dsc,
+ int pipe_idx)
{
int i;
ASSERT(*dsc == NULL);
*dsc = NULL;
+ if (pool->res_cap->num_dsc == pool->res_cap->num_opp) {
+ *dsc = pool->dscs[pipe_idx];
+ res_ctx->is_dsc_acquired[pipe_idx] = true;
+ return;
+ }
+
/* Find first free DSC */
for (i = 0; i < pool->res_cap->num_dsc; i++)
if (!res_ctx->is_dsc_acquired[i]) {
@@ -1315,11 +1571,9 @@ static void release_dsc(struct resource_context *res_ctx,
}
}
-#endif
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
-static enum dc_status add_dsc_to_stream_resource(struct dc *dc,
+enum dc_status dcn20_add_dsc_to_stream_resource(struct dc *dc,
struct dc_state *dc_ctx,
struct dc_stream_state *dc_stream)
{
@@ -1334,11 +1588,13 @@ static enum dc_status add_dsc_to_stream_resource(struct dc *dc,
if (pipe_ctx->stream != dc_stream)
continue;
- acquire_dsc(&dc_ctx->res_ctx, pool, &pipe_ctx->stream_res.dsc);
+ if (pipe_ctx->stream_res.dsc)
+ continue;
+
+ acquire_dsc(&dc_ctx->res_ctx, pool, &pipe_ctx->stream_res.dsc, i);
/* The number of DSCs can be less than the number of pipes */
if (!pipe_ctx->stream_res.dsc) {
- dm_output_to_console("No DSCs available\n");
result = DC_NO_DSC_RESOURCE;
}
@@ -1359,24 +1615,17 @@ static enum dc_status remove_dsc_from_stream_resource(struct dc *dc,
for (i = 0; i < MAX_PIPES; i++) {
if (new_ctx->res_ctx.pipe_ctx[i].stream == dc_stream && !new_ctx->res_ctx.pipe_ctx[i].top_pipe) {
pipe_ctx = &new_ctx->res_ctx.pipe_ctx[i];
- break;
+
+ if (pipe_ctx->stream_res.dsc)
+ release_dsc(&new_ctx->res_ctx, dc->res_pool, &pipe_ctx->stream_res.dsc);
}
}
if (!pipe_ctx)
return DC_ERROR_UNEXPECTED;
-
- if (pipe_ctx->stream_res.dsc) {
- struct pipe_ctx *odm_pipe = dc_res_get_odm_bottom_pipe(pipe_ctx);
-
- release_dsc(&new_ctx->res_ctx, dc->res_pool, &pipe_ctx->stream_res.dsc);
- if (odm_pipe)
- release_dsc(&new_ctx->res_ctx, dc->res_pool, &odm_pipe->stream_res.dsc);
- }
-
- return DC_OK;
+ else
+ return DC_OK;
}
-#endif
enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream)
@@ -1388,11 +1637,9 @@ enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx,
if (result == DC_OK)
result = resource_map_phy_clock_resources(dc, new_ctx, dc_stream);
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
/* Get a DSC if required and available */
if (result == DC_OK && dc_stream->timing.flags.DSC)
- result = add_dsc_to_stream_resource(dc, new_ctx, dc_stream);
-#endif
+ result = dcn20_add_dsc_to_stream_resource(dc, new_ctx, dc_stream);
if (result == DC_OK)
result = dcn20_build_mapped_resource(dc, new_ctx, dc_stream);
@@ -1405,9 +1652,7 @@ enum dc_status dcn20_remove_stream_from_ctx(struct dc *dc, struct dc_state *new_
{
enum dc_status result = DC_OK;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
result = remove_dsc_from_stream_resource(dc, new_ctx, dc_stream);
-#endif
return result;
}
@@ -1473,17 +1718,87 @@ static void swizzle_to_dml_params(
}
}
-static bool dcn20_split_stream_for_combine(
+bool dcn20_split_stream_for_odm(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct pipe_ctx *prev_odm_pipe,
+ struct pipe_ctx *next_odm_pipe)
+{
+ int pipe_idx = next_odm_pipe->pipe_idx;
+
+ *next_odm_pipe = *prev_odm_pipe;
+
+ next_odm_pipe->pipe_idx = pipe_idx;
+ next_odm_pipe->plane_res.mi = pool->mis[next_odm_pipe->pipe_idx];
+ next_odm_pipe->plane_res.hubp = pool->hubps[next_odm_pipe->pipe_idx];
+ next_odm_pipe->plane_res.ipp = pool->ipps[next_odm_pipe->pipe_idx];
+ next_odm_pipe->plane_res.xfm = pool->transforms[next_odm_pipe->pipe_idx];
+ next_odm_pipe->plane_res.dpp = pool->dpps[next_odm_pipe->pipe_idx];
+ next_odm_pipe->plane_res.mpcc_inst = pool->dpps[next_odm_pipe->pipe_idx]->inst;
+ next_odm_pipe->stream_res.dsc = NULL;
+ if (prev_odm_pipe->next_odm_pipe && prev_odm_pipe->next_odm_pipe != next_odm_pipe) {
+ next_odm_pipe->next_odm_pipe = prev_odm_pipe->next_odm_pipe;
+ next_odm_pipe->next_odm_pipe->prev_odm_pipe = next_odm_pipe;
+ }
+ prev_odm_pipe->next_odm_pipe = next_odm_pipe;
+ next_odm_pipe->prev_odm_pipe = prev_odm_pipe;
+ ASSERT(next_odm_pipe->top_pipe == NULL);
+
+ if (prev_odm_pipe->plane_state) {
+ struct scaler_data *sd = &prev_odm_pipe->plane_res.scl_data;
+ int new_width;
+
+ /* HACTIVE halved for odm combine */
+ sd->h_active /= 2;
+ /* Calculate new vp and recout for left pipe */
+ /* Need at least 16 pixels width per side */
+ if (sd->recout.x + 16 >= sd->h_active)
+ return false;
+ new_width = sd->h_active - sd->recout.x;
+ sd->viewport.width -= dc_fixpt_floor(dc_fixpt_mul_int(
+ sd->ratios.horz, sd->recout.width - new_width));
+ sd->viewport_c.width -= dc_fixpt_floor(dc_fixpt_mul_int(
+ sd->ratios.horz_c, sd->recout.width - new_width));
+ sd->recout.width = new_width;
+
+ /* Calculate new vp and recout for right pipe */
+ sd = &next_odm_pipe->plane_res.scl_data;
+ /* HACTIVE halved for odm combine */
+ sd->h_active /= 2;
+ /* Need at least 16 pixels width per side */
+ if (new_width <= 16)
+ return false;
+ new_width = sd->recout.width + sd->recout.x - sd->h_active;
+ sd->viewport.width -= dc_fixpt_floor(dc_fixpt_mul_int(
+ sd->ratios.horz, sd->recout.width - new_width));
+ sd->viewport_c.width -= dc_fixpt_floor(dc_fixpt_mul_int(
+ sd->ratios.horz_c, sd->recout.width - new_width));
+ sd->recout.width = new_width;
+ sd->viewport.x += dc_fixpt_floor(dc_fixpt_mul_int(
+ sd->ratios.horz, sd->h_active - sd->recout.x));
+ sd->viewport_c.x += dc_fixpt_floor(dc_fixpt_mul_int(
+ sd->ratios.horz_c, sd->h_active - sd->recout.x));
+ sd->recout.x = 0;
+ }
+ next_odm_pipe->stream_res.opp = pool->opps[next_odm_pipe->pipe_idx];
+ if (next_odm_pipe->stream->timing.flags.DSC == 1) {
+ acquire_dsc(res_ctx, pool, &next_odm_pipe->stream_res.dsc, next_odm_pipe->pipe_idx);
+ ASSERT(next_odm_pipe->stream_res.dsc);
+ if (next_odm_pipe->stream_res.dsc == NULL)
+ return false;
+ }
+
+ return true;
+}
+
+void dcn20_split_stream_for_mpc(
struct resource_context *res_ctx,
const struct resource_pool *pool,
struct pipe_ctx *primary_pipe,
- struct pipe_ctx *secondary_pipe,
- bool is_odm_combine)
+ struct pipe_ctx *secondary_pipe)
{
int pipe_idx = secondary_pipe->pipe_idx;
- struct scaler_data *sd = &primary_pipe->plane_res.scl_data;
struct pipe_ctx *sec_bot_pipe = secondary_pipe->bottom_pipe;
- int new_width;
*secondary_pipe = *primary_pipe;
secondary_pipe->bottom_pipe = sec_bot_pipe;
@@ -1495,9 +1810,7 @@ static bool dcn20_split_stream_for_combine(
secondary_pipe->plane_res.xfm = pool->transforms[secondary_pipe->pipe_idx];
secondary_pipe->plane_res.dpp = pool->dpps[secondary_pipe->pipe_idx];
secondary_pipe->plane_res.mpcc_inst = pool->dpps[secondary_pipe->pipe_idx]->inst;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
secondary_pipe->stream_res.dsc = NULL;
-#endif
if (primary_pipe->bottom_pipe && primary_pipe->bottom_pipe != secondary_pipe) {
ASSERT(!secondary_pipe->bottom_pipe);
secondary_pipe->bottom_pipe = primary_pipe->bottom_pipe;
@@ -1506,57 +1819,9 @@ static bool dcn20_split_stream_for_combine(
primary_pipe->bottom_pipe = secondary_pipe;
secondary_pipe->top_pipe = primary_pipe;
- if (is_odm_combine) {
- if (primary_pipe->plane_state) {
- /* HACTIVE halved for odm combine */
- sd->h_active /= 2;
- /* Copy scl_data to secondary pipe */
- secondary_pipe->plane_res.scl_data = *sd;
-
- /* Calculate new vp and recout for left pipe */
- /* Need at least 16 pixels width per side */
- if (sd->recout.x + 16 >= sd->h_active)
- return false;
- new_width = sd->h_active - sd->recout.x;
- sd->viewport.width -= dc_fixpt_floor(dc_fixpt_mul_int(
- sd->ratios.horz, sd->recout.width - new_width));
- sd->viewport_c.width -= dc_fixpt_floor(dc_fixpt_mul_int(
- sd->ratios.horz_c, sd->recout.width - new_width));
- sd->recout.width = new_width;
-
- /* Calculate new vp and recout for right pipe */
- sd = &secondary_pipe->plane_res.scl_data;
- new_width = sd->recout.width + sd->recout.x - sd->h_active;
- /* Need at least 16 pixels width per side */
- if (new_width <= 16)
- return false;
- sd->viewport.width -= dc_fixpt_floor(dc_fixpt_mul_int(
- sd->ratios.horz, sd->recout.width - new_width));
- sd->viewport_c.width -= dc_fixpt_floor(dc_fixpt_mul_int(
- sd->ratios.horz_c, sd->recout.width - new_width));
- sd->recout.width = new_width;
- sd->viewport.x += dc_fixpt_floor(dc_fixpt_mul_int(
- sd->ratios.horz, sd->h_active - sd->recout.x));
- sd->viewport_c.x += dc_fixpt_floor(dc_fixpt_mul_int(
- sd->ratios.horz_c, sd->h_active - sd->recout.x));
- sd->recout.x = 0;
- }
- secondary_pipe->stream_res.opp = pool->opps[secondary_pipe->pipe_idx];
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
- if (secondary_pipe->stream->timing.flags.DSC == 1) {
- acquire_dsc(res_ctx, pool, &secondary_pipe->stream_res.dsc);
- ASSERT(secondary_pipe->stream_res.dsc);
- if (secondary_pipe->stream_res.dsc == NULL)
- return false;
- }
-#endif
- } else {
- ASSERT(primary_pipe->plane_state);
- resource_build_scaling_params(primary_pipe);
- resource_build_scaling_params(secondary_pipe);
- }
-
- return true;
+ ASSERT(primary_pipe->plane_state);
+ resource_build_scaling_params(primary_pipe);
+ resource_build_scaling_params(secondary_pipe);
}
void dcn20_populate_dml_writeback_from_context(
@@ -1596,11 +1861,28 @@ void dcn20_populate_dml_writeback_from_context(
}
+static int get_num_odm_heads(struct pipe_ctx *pipe)
+{
+ int odm_head_count = 0;
+ struct pipe_ctx *next_pipe = pipe->next_odm_pipe;
+ while (next_pipe) {
+ odm_head_count++;
+ next_pipe = next_pipe->next_odm_pipe;
+ }
+ pipe = pipe->prev_odm_pipe;
+ while (pipe) {
+ odm_head_count++;
+ pipe = pipe->prev_odm_pipe;
+ }
+ return odm_head_count ? odm_head_count + 1 : 0;
+}
+
int dcn20_populate_dml_pipes_from_context(
- struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes)
+ struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes)
{
int pipe_cnt, i;
bool synchronized_vblank = true;
+ struct resource_context *res_ctx = &context->res_ctx;
for (i = 0, pipe_cnt = -1; i < dc->res_pool->pipe_count; i++) {
if (!res_ctx->pipe_ctx[i].stream)
@@ -1610,7 +1892,7 @@ int dcn20_populate_dml_pipes_from_context(
pipe_cnt = i;
continue;
}
- if (!resource_are_streams_timing_synchronizable(
+ if (dc->debug.disable_timing_sync || !resource_are_streams_timing_synchronizable(
res_ctx->pipe_ctx[pipe_cnt].stream,
res_ctx->pipe_ctx[i].stream)) {
synchronized_vblank = false;
@@ -1620,25 +1902,30 @@ int dcn20_populate_dml_pipes_from_context(
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
struct dc_crtc_timing *timing = &res_ctx->pipe_ctx[i].stream->timing;
+ unsigned int v_total;
+ unsigned int front_porch;
int output_bpc;
if (!res_ctx->pipe_ctx[i].stream)
continue;
+
+ v_total = timing->v_total;
+ front_porch = timing->v_front_porch;
/* todo:
pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = 0;
pipes[pipe_cnt].pipe.src.dcc = 0;
pipes[pipe_cnt].pipe.src.vm = 0;*/
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
+ pipes[pipe_cnt].clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0;
+
pipes[pipe_cnt].dout.dsc_enable = res_ctx->pipe_ctx[i].stream->timing.flags.DSC;
/* todo: rotation?*/
pipes[pipe_cnt].dout.dsc_slices = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.num_slices_h;
-#endif
if (res_ctx->pipe_ctx[i].stream->use_dynamic_meta) {
pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = true;
/* 1/2 vblank */
pipes[pipe_cnt].pipe.src.dynamic_metadata_lines_before_active =
- (timing->v_total - timing->v_addressable
+ (v_total - timing->v_addressable
- timing->v_border_top - timing->v_border_bottom) / 2;
/* 36 bytes dp, 32 hdmi */
pipes[pipe_cnt].pipe.src.dynamic_metadata_xmit_bytes =
@@ -1652,13 +1939,13 @@ int dcn20_populate_dml_pipes_from_context(
- timing->h_addressable
- timing->h_border_left
- timing->h_border_right;
- pipes[pipe_cnt].pipe.dest.vblank_start = timing->v_total - timing->v_front_porch;
+ pipes[pipe_cnt].pipe.dest.vblank_start = v_total - front_porch;
pipes[pipe_cnt].pipe.dest.vblank_end = pipes[pipe_cnt].pipe.dest.vblank_start
- timing->v_addressable
- timing->v_border_top
- timing->v_border_bottom;
pipes[pipe_cnt].pipe.dest.htotal = timing->h_total;
- pipes[pipe_cnt].pipe.dest.vtotal = timing->v_total;
+ pipes[pipe_cnt].pipe.dest.vtotal = v_total;
pipes[pipe_cnt].pipe.dest.hactive = timing->h_addressable;
pipes[pipe_cnt].pipe.dest.vactive = timing->v_addressable;
pipes[pipe_cnt].pipe.dest.interlaced = timing->flags.INTERLACE;
@@ -1669,6 +1956,24 @@ int dcn20_populate_dml_pipes_from_context(
pipes[pipe_cnt].dout.dp_lanes = 4;
pipes[pipe_cnt].pipe.dest.vtotal_min = res_ctx->pipe_ctx[i].stream->adjust.v_total_min;
pipes[pipe_cnt].pipe.dest.vtotal_max = res_ctx->pipe_ctx[i].stream->adjust.v_total_max;
+ switch (get_num_odm_heads(&res_ctx->pipe_ctx[i])) {
+ case 2:
+ pipes[pipe_cnt].pipe.dest.odm_combine = dm_odm_combine_mode_2to1;
+ break;
+ default:
+ pipes[pipe_cnt].pipe.dest.odm_combine = dm_odm_combine_mode_disabled;
+ }
+ pipes[pipe_cnt].pipe.src.hsplit_grp = res_ctx->pipe_ctx[i].pipe_idx;
+ if (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state
+ == res_ctx->pipe_ctx[i].plane_state)
+ pipes[pipe_cnt].pipe.src.hsplit_grp = res_ctx->pipe_ctx[i].top_pipe->pipe_idx;
+ else if (res_ctx->pipe_ctx[i].prev_odm_pipe) {
+ struct pipe_ctx *first_pipe = res_ctx->pipe_ctx[i].prev_odm_pipe;
+
+ while (first_pipe->prev_odm_pipe)
+ first_pipe = first_pipe->prev_odm_pipe;
+ pipes[pipe_cnt].pipe.src.hsplit_grp = first_pipe->pipe_idx;
+ }
switch (res_ctx->pipe_ctx[i].stream->signal) {
case SIGNAL_TYPE_DISPLAY_PORT_MST:
@@ -1708,20 +2013,17 @@ int dcn20_populate_dml_pipes_from_context(
case COLOR_DEPTH_161616:
output_bpc = 16;
break;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
case COLOR_DEPTH_999:
output_bpc = 9;
break;
case COLOR_DEPTH_111111:
output_bpc = 11;
break;
-#endif
default:
output_bpc = 8;
break;
}
-
switch (res_ctx->pipe_ctx[i].stream->timing.pixel_encoding) {
case PIXEL_ENCODING_RGB:
case PIXEL_ENCODING_YCBCR444:
@@ -1730,7 +2032,7 @@ int dcn20_populate_dml_pipes_from_context(
break;
case PIXEL_ENCODING_YCBCR420:
pipes[pipe_cnt].dout.output_format = dm_420;
- pipes[pipe_cnt].dout.output_bpp = (output_bpc * 3) / 2;
+ pipes[pipe_cnt].dout.output_bpp = (output_bpc * 3.0) / 2;
break;
case PIXEL_ENCODING_YCBCR422:
if (true) /* todo */
@@ -1743,10 +2045,9 @@ int dcn20_populate_dml_pipes_from_context(
pipes[pipe_cnt].dout.output_format = dm_444;
pipes[pipe_cnt].dout.output_bpp = output_bpc * 3;
}
- pipes[pipe_cnt].pipe.src.hsplit_grp = res_ctx->pipe_ctx[i].pipe_idx;
- if (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state
- == res_ctx->pipe_ctx[i].plane_state)
- pipes[pipe_cnt].pipe.src.hsplit_grp = res_ctx->pipe_ctx[i].top_pipe->pipe_idx;
+
+ if (res_ctx->pipe_ctx[i].stream->timing.flags.DSC)
+ pipes[pipe_cnt].dout.output_bpp = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.bits_per_pixel / 16.0;
/* todo: default max for now, until there is logic reflecting this in dc*/
pipes[pipe_cnt].dout.output_bpc = 12;
@@ -1770,6 +2071,10 @@ int dcn20_populate_dml_pipes_from_context(
pipes[pipe_cnt].pipe.src.viewport_height = timing->v_addressable;
if (pipes[pipe_cnt].pipe.src.viewport_height > 1080)
pipes[pipe_cnt].pipe.src.viewport_height = 1080;
+ pipes[pipe_cnt].pipe.src.surface_height_y = pipes[pipe_cnt].pipe.src.viewport_height;
+ pipes[pipe_cnt].pipe.src.surface_width_y = pipes[pipe_cnt].pipe.src.viewport_width;
+ pipes[pipe_cnt].pipe.src.surface_height_c = pipes[pipe_cnt].pipe.src.viewport_height;
+ pipes[pipe_cnt].pipe.src.surface_width_c = pipes[pipe_cnt].pipe.src.viewport_width;
pipes[pipe_cnt].pipe.src.data_pitch = ((pipes[pipe_cnt].pipe.src.viewport_width + 63) / 64) * 64; /* linear sw only */
pipes[pipe_cnt].pipe.src.source_format = dm_444_32;
pipes[pipe_cnt].pipe.dest.recout_width = pipes[pipe_cnt].pipe.src.viewport_width; /*vp_width/hratio*/
@@ -1784,8 +2089,8 @@ int dcn20_populate_dml_pipes_from_context(
pipes[pipe_cnt].pipe.scale_taps.vtaps = 1;
pipes[pipe_cnt].pipe.src.is_hsplit = 0;
pipes[pipe_cnt].pipe.dest.odm_combine = 0;
- pipes[pipe_cnt].pipe.dest.vtotal_min = timing->v_total;
- pipes[pipe_cnt].pipe.dest.vtotal_max = timing->v_total;
+ pipes[pipe_cnt].pipe.dest.vtotal_min = v_total;
+ pipes[pipe_cnt].pipe.dest.vtotal_max = v_total;
} else {
struct dc_plane_state *pln = res_ctx->pipe_ctx[i].plane_state;
struct scaler_data *scl = &res_ctx->pipe_ctx[i].plane_res.scl_data;
@@ -1795,14 +2100,6 @@ int dcn20_populate_dml_pipes_from_context(
&& res_ctx->pipe_ctx[i].bottom_pipe->plane_state == pln)
|| (res_ctx->pipe_ctx[i].top_pipe
&& res_ctx->pipe_ctx[i].top_pipe->plane_state == pln);
- pipes[pipe_cnt].pipe.dest.odm_combine = (res_ctx->pipe_ctx[i].bottom_pipe
- && res_ctx->pipe_ctx[i].bottom_pipe->plane_state == pln
- && res_ctx->pipe_ctx[i].bottom_pipe->stream_res.opp
- != res_ctx->pipe_ctx[i].stream_res.opp)
- || (res_ctx->pipe_ctx[i].top_pipe
- && res_ctx->pipe_ctx[i].top_pipe->plane_state == pln
- && res_ctx->pipe_ctx[i].top_pipe->stream_res.opp
- != res_ctx->pipe_ctx[i].stream_res.opp);
pipes[pipe_cnt].pipe.src.source_scan = pln->rotation == ROTATION_ANGLE_90
|| pln->rotation == ROTATION_ANGLE_270 ? dm_vert : dm_horz;
pipes[pipe_cnt].pipe.src.viewport_y_y = scl->viewport.y;
@@ -1811,14 +2108,18 @@ int dcn20_populate_dml_pipes_from_context(
pipes[pipe_cnt].pipe.src.viewport_width_c = scl->viewport_c.width;
pipes[pipe_cnt].pipe.src.viewport_height = scl->viewport.height;
pipes[pipe_cnt].pipe.src.viewport_height_c = scl->viewport_c.height;
+ pipes[pipe_cnt].pipe.src.surface_width_y = pln->plane_size.surface_size.width;
+ pipes[pipe_cnt].pipe.src.surface_height_y = pln->plane_size.surface_size.height;
+ pipes[pipe_cnt].pipe.src.surface_width_c = pln->plane_size.chroma_size.width;
+ pipes[pipe_cnt].pipe.src.surface_height_c = pln->plane_size.chroma_size.height;
if (pln->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
- pipes[pipe_cnt].pipe.src.data_pitch = pln->plane_size.video.luma_pitch;
- pipes[pipe_cnt].pipe.src.data_pitch_c = pln->plane_size.video.chroma_pitch;
- pipes[pipe_cnt].pipe.src.meta_pitch = pln->dcc.video.meta_pitch_l;
- pipes[pipe_cnt].pipe.src.meta_pitch_c = pln->dcc.video.meta_pitch_c;
+ pipes[pipe_cnt].pipe.src.data_pitch = pln->plane_size.surface_pitch;
+ pipes[pipe_cnt].pipe.src.data_pitch_c = pln->plane_size.chroma_pitch;
+ pipes[pipe_cnt].pipe.src.meta_pitch = pln->dcc.meta_pitch;
+ pipes[pipe_cnt].pipe.src.meta_pitch_c = pln->dcc.meta_pitch_c;
} else {
- pipes[pipe_cnt].pipe.src.data_pitch = pln->plane_size.grph.surface_pitch;
- pipes[pipe_cnt].pipe.src.meta_pitch = pln->dcc.grph.meta_pitch;
+ pipes[pipe_cnt].pipe.src.data_pitch = pln->plane_size.surface_pitch;
+ pipes[pipe_cnt].pipe.src.meta_pitch = pln->dcc.meta_pitch;
}
pipes[pipe_cnt].pipe.src.dcc = pln->dcc.enable;
pipes[pipe_cnt].pipe.dest.recout_width = scl->recout.width;
@@ -1976,8 +2277,7 @@ void dcn20_set_mcif_arb_params(
}
}
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
-static bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx)
+bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx)
{
int i;
@@ -1986,53 +2286,144 @@ static bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx)
struct pipe_ctx *pipe_ctx = &new_ctx->res_ctx.pipe_ctx[i];
struct dc_stream_state *stream = pipe_ctx->stream;
struct dsc_config dsc_cfg;
+ struct pipe_ctx *odm_pipe;
+ int opp_cnt = 1;
+
+ for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
+ opp_cnt++;
/* Only need to validate top pipe */
- if (pipe_ctx->top_pipe || !stream || !stream->timing.flags.DSC)
+ if (pipe_ctx->top_pipe || pipe_ctx->prev_odm_pipe || !stream || !stream->timing.flags.DSC)
continue;
- dsc_cfg.pic_width = stream->timing.h_addressable + stream->timing.h_border_left
- + stream->timing.h_border_right;
+ dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left
+ + stream->timing.h_border_right) / opp_cnt;
dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top
+ stream->timing.v_border_bottom;
- if (dc_res_get_odm_bottom_pipe(pipe_ctx))
- dsc_cfg.pic_width /= 2;
dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
dsc_cfg.color_depth = stream->timing.display_color_depth;
dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
+ dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
if (!pipe_ctx->stream_res.dsc->funcs->dsc_validate_stream(pipe_ctx->stream_res.dsc, &dsc_cfg))
return false;
}
return true;
}
-#endif
-bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
- bool fast_validate)
+struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ const struct pipe_ctx *primary_pipe)
{
- bool out = false;
+ struct pipe_ctx *secondary_pipe = NULL;
+
+ if (dc && primary_pipe) {
+ int j;
+ int preferred_pipe_idx = 0;
+
+ /* first check the prev dc state:
+ * if this primary pipe has a bottom pipe in prev. state
+ * and if the bottom pipe is still available (which it should be),
+ * pick that pipe as secondary
+ * Same logic applies for ODM pipes. Since mpo is not allowed with odm
+ * check in else case.
+ */
+ if (dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].bottom_pipe) {
+ preferred_pipe_idx = dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].bottom_pipe->pipe_idx;
+ if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) {
+ secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];
+ secondary_pipe->pipe_idx = preferred_pipe_idx;
+ }
+ } else if (dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe) {
+ preferred_pipe_idx = dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe->pipe_idx;
+ if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) {
+ secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];
+ secondary_pipe->pipe_idx = preferred_pipe_idx;
+ }
+ }
- BW_VAL_TRACE_SETUP();
+ /*
+ * if this primary pipe does not have a bottom pipe in prev. state
+ * start backward and find a pipe that did not used to be a bottom pipe in
+ * prev. dc state. This way we make sure we keep the same assignment as
+ * last state and will not have to reprogram every pipe
+ */
+ if (secondary_pipe == NULL) {
+ for (j = dc->res_pool->pipe_count - 1; j >= 0; j--) {
+ if (dc->current_state->res_ctx.pipe_ctx[j].top_pipe == NULL
+ && dc->current_state->res_ctx.pipe_ctx[j].prev_odm_pipe == NULL) {
+ preferred_pipe_idx = j;
+
+ if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) {
+ secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];
+ secondary_pipe->pipe_idx = preferred_pipe_idx;
+ break;
+ }
+ }
+ }
+ }
+ /*
+ * We should never hit this assert unless assignments are shuffled around
+ * if this happens we will prob. hit a vsync tdr
+ */
+ ASSERT(secondary_pipe);
+ /*
+ * search backwards for the second pipe to keep pipe
+ * assignment more consistent
+ */
+ if (secondary_pipe == NULL) {
+ for (j = dc->res_pool->pipe_count - 1; j >= 0; j--) {
+ preferred_pipe_idx = j;
+
+ if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) {
+ secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];
+ secondary_pipe->pipe_idx = preferred_pipe_idx;
+ break;
+ }
+ }
+ }
+ }
- int pipe_cnt, i, pipe_idx, vlevel, vlevel_unsplit;
- int pipe_split_from[MAX_PIPES];
- bool odm_capable = context->bw_ctx.dml.ip.odm_capable;
- bool force_split = false;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
- bool failed_non_odm_dsc = false;
-#endif
- int split_threshold = dc->res_pool->pipe_count / 2;
- bool avoid_split = dc->debug.pipe_split_policy != MPC_SPLIT_DYNAMIC;
- display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
- DC_LOGGER_INIT(dc->ctx->logger);
+ return secondary_pipe;
+}
- BW_VAL_TRACE_COUNT();
+void dcn20_merge_pipes_for_validate(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ int i;
- ASSERT(pipes);
- if (!pipes)
- return false;
+ /* merge previously split odm pipes since mode support needs to make the decision */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *odm_pipe = pipe->next_odm_pipe;
+
+ if (pipe->prev_odm_pipe)
+ continue;
+ pipe->next_odm_pipe = NULL;
+ while (odm_pipe) {
+ struct pipe_ctx *next_odm_pipe = odm_pipe->next_odm_pipe;
+
+ odm_pipe->plane_state = NULL;
+ odm_pipe->stream = NULL;
+ odm_pipe->top_pipe = NULL;
+ odm_pipe->bottom_pipe = NULL;
+ odm_pipe->prev_odm_pipe = NULL;
+ odm_pipe->next_odm_pipe = NULL;
+ if (odm_pipe->stream_res.dsc)
+ release_dsc(&context->res_ctx, dc->res_pool, &odm_pipe->stream_res.dsc);
+ /* Clear plane_res and stream_res */
+ memset(&odm_pipe->plane_res, 0, sizeof(odm_pipe->plane_res));
+ memset(&odm_pipe->stream_res, 0, sizeof(odm_pipe->stream_res));
+ odm_pipe = next_odm_pipe;
+ }
+ if (pipe->plane_state)
+ resource_build_scaling_params(pipe);
+ }
+
+ /* merge previously mpc split pipes since mode support needs to make the decision */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
struct pipe_ctx *hsplit_pipe = pipe->bottom_pipe;
@@ -2040,7 +2431,6 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
if (!hsplit_pipe || hsplit_pipe->plane_state != pipe->plane_state)
continue;
- /* merge previously split pipe since mode support needs to make the decision */
pipe->bottom_pipe = hsplit_pipe->bottom_pipe;
if (hsplit_pipe->bottom_pipe)
hsplit_pipe->bottom_pipe->top_pipe = pipe;
@@ -2048,60 +2438,26 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
hsplit_pipe->stream = NULL;
hsplit_pipe->top_pipe = NULL;
hsplit_pipe->bottom_pipe = NULL;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
- if (hsplit_pipe->stream_res.dsc && hsplit_pipe->stream_res.dsc != pipe->stream_res.dsc)
- release_dsc(&context->res_ctx, dc->res_pool, &hsplit_pipe->stream_res.dsc);
-#endif
+
/* Clear plane_res and stream_res */
memset(&hsplit_pipe->plane_res, 0, sizeof(hsplit_pipe->plane_res));
memset(&hsplit_pipe->stream_res, 0, sizeof(hsplit_pipe->stream_res));
if (pipe->plane_state)
resource_build_scaling_params(pipe);
}
+}
- if (dc->res_pool->funcs->populate_dml_pipes)
- pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc,
- &context->res_ctx, pipes);
- else
- pipe_cnt = dcn20_populate_dml_pipes_from_context(dc,
- &context->res_ctx, pipes);
-
- if (!pipe_cnt) {
- BW_VAL_TRACE_SKIP(pass);
- out = true;
- goto validate_out;
- }
-
- context->bw_ctx.dml.ip.odm_capable = 0;
-
- vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
-
- context->bw_ctx.dml.ip.odm_capable = odm_capable;
-
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
- /* 1 dsc per stream dsc validation */
- if (vlevel <= context->bw_ctx.dml.soc.num_states)
- if (!dcn20_validate_dsc(dc, context)) {
- failed_non_odm_dsc = true;
- vlevel = context->bw_ctx.dml.soc.num_states + 1;
- }
-#endif
-
- if (vlevel > context->bw_ctx.dml.soc.num_states && odm_capable)
- vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
-
- if (vlevel > context->bw_ctx.dml.soc.num_states)
- goto validate_fail;
-
- if ((context->stream_count > split_threshold && dc->current_state->stream_count <= split_threshold)
- || (context->stream_count <= split_threshold && dc->current_state->stream_count > split_threshold))
- context->commit_hints.full_update_needed = true;
-
- /*initialize pipe_just_split_from to invalid idx*/
- for (i = 0; i < MAX_PIPES; i++)
- pipe_split_from[i] = -1;
+int dcn20_validate_apply_pipe_split_flags(
+ struct dc *dc,
+ struct dc_state *context,
+ int vlevel,
+ bool *split)
+{
+ int i, pipe_idx, vlevel_split;
+ bool force_split = false;
+ bool avoid_split = dc->debug.pipe_split_policy != MPC_SPLIT_DYNAMIC;
- /* Single display only conditionals get set here */
+ /* Single display loop, exits if there is more than one display */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
bool exit_loop = false;
@@ -2128,49 +2484,113 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
if (exit_loop)
break;
}
-
- if (context->stream_count > split_threshold)
+ /* TODO: fix dc bugs and remove this split threshold thing */
+ if (context->stream_count > dc->res_pool->pipe_count / 2)
avoid_split = true;
- vlevel_unsplit = vlevel;
+ /* Avoid split loop looks for lowest voltage level that allows most unsplit pipes possible */
+ if (avoid_split) {
+ for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ if (!context->res_ctx.pipe_ctx[i].stream)
+ continue;
+
+ for (vlevel_split = vlevel; vlevel <= context->bw_ctx.dml.soc.num_states; vlevel++)
+ if (context->bw_ctx.dml.vba.NoOfDPP[vlevel][0][pipe_idx] == 1)
+ break;
+ /* Impossible to not split this pipe */
+ if (vlevel > context->bw_ctx.dml.soc.num_states)
+ vlevel = vlevel_split;
+ pipe_idx++;
+ }
+ context->bw_ctx.dml.vba.maxMpcComb = 0;
+ }
+
+ /* Split loop sets which pipe should be split based on dml outputs and dc flags */
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
- for (; vlevel_unsplit <= context->bw_ctx.dml.soc.num_states; vlevel_unsplit++)
- if (context->bw_ctx.dml.vba.NoOfDPP[vlevel_unsplit][0][pipe_idx] == 1)
- break;
+
+ if (force_split || context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] > 1)
+ split[i] = true;
+ if ((pipe->stream->view_format ==
+ VIEW_3D_FORMAT_SIDE_BY_SIDE ||
+ pipe->stream->view_format ==
+ VIEW_3D_FORMAT_TOP_AND_BOTTOM) &&
+ (pipe->stream->timing.timing_3d_format ==
+ TIMING_3D_FORMAT_TOP_AND_BOTTOM ||
+ pipe->stream->timing.timing_3d_format ==
+ TIMING_3D_FORMAT_SIDE_BY_SIDE))
+ split[i] = true;
+ if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) {
+ split[i] = true;
+ context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx] = dm_odm_combine_mode_2to1;
+ }
+ context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx] =
+ context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx];
+ /* Adjust dppclk when split is forced, do not bother with dispclk */
+ if (split[i] && context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] == 1)
+ context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] /= 2;
pipe_idx++;
}
+ return vlevel;
+}
+
+bool dcn20_fast_validate_bw(
+ struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int *pipe_cnt_out,
+ int *pipe_split_from,
+ int *vlevel_out)
+{
+ bool out = false;
+ bool split[MAX_PIPES] = { false };
+ int pipe_cnt, i, pipe_idx, vlevel;
+
+ ASSERT(pipes);
+ if (!pipes)
+ return false;
+
+ dcn20_merge_pipes_for_validate(dc, context);
+
+ pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes);
+
+ *pipe_cnt_out = pipe_cnt;
+
+ if (!pipe_cnt) {
+ out = true;
+ goto validate_out;
+ }
+
+ vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
+
+ if (vlevel > context->bw_ctx.dml.soc.num_states)
+ goto validate_fail;
+
+ vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split);
+
+ /*initialize pipe_just_split_from to invalid idx*/
+ for (i = 0; i < MAX_PIPES; i++)
+ pipe_split_from[i] = -1;
+
for (i = 0, pipe_idx = -1; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
struct pipe_ctx *hsplit_pipe = pipe->bottom_pipe;
- bool need_split = true;
- bool need_split3d;
if (!pipe->stream || pipe_split_from[i] >= 0)
continue;
pipe_idx++;
- if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) {
- force_split = true;
- context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx] = true;
- context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx] = true;
- }
- if (force_split && context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] == 1)
- context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] /= 2;
- if (dc->config.forced_clocks == true) {
- context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] =
- context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
- }
if (!pipe->top_pipe && !pipe->plane_state && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) {
- hsplit_pipe = find_idle_secondary_pipe(&context->res_ctx, dc->res_pool, pipe);
+ hsplit_pipe = dcn20_find_secondary_pipe(dc, &context->res_ctx, dc->res_pool, pipe);
ASSERT(hsplit_pipe);
- if (!dcn20_split_stream_for_combine(
+ if (!dcn20_split_stream_for_odm(
&context->res_ctx, dc->res_pool,
- pipe, hsplit_pipe,
- true))
+ pipe, hsplit_pipe))
goto validate_fail;
pipe_split_from[hsplit_pipe->pipe_idx] = pipe_idx;
dcn20_build_mapped_resource(dc, context, pipe->stream);
@@ -2182,40 +2602,30 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
if (pipe->top_pipe && pipe->plane_state == pipe->top_pipe->plane_state)
continue;
- need_split3d = ((pipe->stream->view_format ==
- VIEW_3D_FORMAT_SIDE_BY_SIDE ||
- pipe->stream->view_format ==
- VIEW_3D_FORMAT_TOP_AND_BOTTOM) &&
- (pipe->stream->timing.timing_3d_format ==
- TIMING_3D_FORMAT_TOP_AND_BOTTOM ||
- pipe->stream->timing.timing_3d_format ==
- TIMING_3D_FORMAT_SIDE_BY_SIDE));
-
- if (avoid_split && vlevel_unsplit <= context->bw_ctx.dml.soc.num_states && !force_split && !need_split3d) {
- need_split = false;
- vlevel = vlevel_unsplit;
- context->bw_ctx.dml.vba.maxMpcComb = 0;
- } else
- need_split = context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] == 2;
-
/* We do not support mpo + odm at the moment */
if (hsplit_pipe && hsplit_pipe->plane_state != pipe->plane_state
&& context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx])
goto validate_fail;
- if (need_split3d || need_split || force_split) {
+ if (split[i]) {
if (!hsplit_pipe || hsplit_pipe->plane_state != pipe->plane_state) {
/* pipe not split previously needs split */
- hsplit_pipe = find_idle_secondary_pipe(&context->res_ctx, dc->res_pool, pipe);
- ASSERT(hsplit_pipe || force_split);
- if (!hsplit_pipe)
+ hsplit_pipe = dcn20_find_secondary_pipe(dc, &context->res_ctx, dc->res_pool, pipe);
+ ASSERT(hsplit_pipe);
+ if (!hsplit_pipe) {
+ context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] *= 2;
continue;
-
- if (!dcn20_split_stream_for_combine(
+ }
+ if (context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) {
+ if (!dcn20_split_stream_for_odm(
+ &context->res_ctx, dc->res_pool,
+ pipe, hsplit_pipe))
+ goto validate_fail;
+ dcn20_build_mapped_resource(dc, context, pipe->stream);
+ } else
+ dcn20_split_stream_for_mpc(
&context->res_ctx, dc->res_pool,
- pipe, hsplit_pipe,
- context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]))
- goto validate_fail;
+ pipe, hsplit_pipe);
pipe_split_from[hsplit_pipe->pipe_idx] = pipe_idx;
}
} else if (hsplit_pipe && hsplit_pipe->plane_state == pipe->plane_state) {
@@ -2223,22 +2633,33 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
ASSERT(0);
}
}
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
/* Actual dsc count per stream dsc validation*/
- if (failed_non_odm_dsc && !dcn20_validate_dsc(dc, context)) {
+ if (!dcn20_validate_dsc(dc, context)) {
context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states] =
DML_FAIL_DSC_VALIDATION_FAILURE;
goto validate_fail;
}
-#endif
- BW_VAL_TRACE_END_VOLTAGE_LEVEL();
+ *vlevel_out = vlevel;
- if (fast_validate) {
- BW_VAL_TRACE_SKIP(fast);
- out = true;
- goto validate_out;
- }
+ out = true;
+ goto validate_out;
+
+validate_fail:
+ out = false;
+
+validate_out:
+ return out;
+}
+
+static void dcn20_calculate_wm(
+ struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int *out_pipe_cnt,
+ int *pipe_split_from,
+ int vlevel)
+{
+ int pipe_cnt, i, pipe_idx;
for (i = 0, pipe_idx = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
if (!context->res_ctx.pipe_ctx[i].stream)
@@ -2252,7 +2673,7 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx];
if (context->bw_ctx.dml.vba.BlendingAndTiming[pipe_idx] == pipe_idx)
pipes[pipe_cnt].pipe.dest.odm_combine =
- context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx];
+ context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx];
else
pipes[pipe_cnt].pipe.dest.odm_combine = 0;
pipe_idx++;
@@ -2261,26 +2682,34 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_split_from[i]];
if (context->bw_ctx.dml.vba.BlendingAndTiming[pipe_split_from[i]] == pipe_split_from[i])
pipes[pipe_cnt].pipe.dest.odm_combine =
- context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_split_from[i]];
+ context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_split_from[i]];
else
pipes[pipe_cnt].pipe.dest.odm_combine = 0;
}
+
if (dc->config.forced_clocks) {
pipes[pipe_cnt].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
pipes[pipe_cnt].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
}
+ if (dc->debug.min_disp_clk_khz > pipes[pipe_cnt].clks_cfg.dispclk_mhz * 1000)
+ pipes[pipe_cnt].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0;
+ if (dc->debug.min_dpp_clk_khz > pipes[pipe_cnt].clks_cfg.dppclk_mhz * 1000)
+ pipes[pipe_cnt].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0;
+
pipe_cnt++;
}
if (pipe_cnt != pipe_idx) {
if (dc->res_pool->funcs->populate_dml_pipes)
pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc,
- &context->res_ctx, pipes);
+ context, pipes);
else
pipe_cnt = dcn20_populate_dml_pipes_from_context(dc,
- &context->res_ctx, pipes);
+ context, pipes);
}
+ *out_pipe_cnt = pipe_cnt;
+
pipes[0].clks_cfg.voltage = vlevel;
pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].dcfclk_mhz;
pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz;
@@ -2296,6 +2725,9 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
if (vlevel < 2) {
pipes[0].clks_cfg.voltage = 2;
@@ -2307,6 +2739,8 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
if (vlevel < 3) {
pipes[0].clks_cfg.voltage = 3;
@@ -2318,6 +2752,8 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
pipes[0].clks_cfg.voltage = vlevel;
pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].dcfclk_mhz;
@@ -2327,6 +2763,19 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+}
+
+void dcn20_calculate_dlg_params(
+ struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt,
+ int vlevel)
+{
+ int i, j, pipe_idx, pipe_idx_unsplit;
+ bool visited[MAX_PIPES] = { 0 };
+
/* Writeback MCIF_WB arbitration parameters */
dc->res_pool->funcs->set_mcif_arb_params(dc, context, pipes, pipe_cnt);
@@ -2341,26 +2790,63 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
!= dm_dram_clock_change_unsupported;
context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;
- BW_VAL_TRACE_END_WATERMARKS();
+ /*
+ * An artifact of dml pipe split/odm is that pipes get merged back together for
+ * calculation. Therefore we need to only extract for first pipe in ascending index order
+ * and copy into the other split half.
+ */
+ for (i = 0, pipe_idx = 0, pipe_idx_unsplit = 0; i < dc->res_pool->pipe_count; i++) {
+ if (!context->res_ctx.pipe_ctx[i].stream)
+ continue;
+
+ if (!visited[pipe_idx]) {
+ display_pipe_source_params_st *src = &pipes[pipe_idx].pipe.src;
+ display_pipe_dest_params_st *dst = &pipes[pipe_idx].pipe.dest;
+
+ dst->vstartup_start = context->bw_ctx.dml.vba.VStartup[pipe_idx_unsplit];
+ dst->vupdate_offset = context->bw_ctx.dml.vba.VUpdateOffsetPix[pipe_idx_unsplit];
+ dst->vupdate_width = context->bw_ctx.dml.vba.VUpdateWidthPix[pipe_idx_unsplit];
+ dst->vready_offset = context->bw_ctx.dml.vba.VReadyOffsetPix[pipe_idx_unsplit];
+ /*
+ * j iterates inside pipes array, unlike i which iterates inside
+ * pipe_ctx array
+ */
+ if (src->is_hsplit)
+ for (j = pipe_idx + 1; j < pipe_cnt; j++) {
+ display_pipe_source_params_st *src_j = &pipes[j].pipe.src;
+ display_pipe_dest_params_st *dst_j = &pipes[j].pipe.dest;
+
+ if (src_j->is_hsplit && !visited[j]
+ && src->hsplit_grp == src_j->hsplit_grp) {
+ dst_j->vstartup_start = context->bw_ctx.dml.vba.VStartup[pipe_idx_unsplit];
+ dst_j->vupdate_offset = context->bw_ctx.dml.vba.VUpdateOffsetPix[pipe_idx_unsplit];
+ dst_j->vupdate_width = context->bw_ctx.dml.vba.VUpdateWidthPix[pipe_idx_unsplit];
+ dst_j->vready_offset = context->bw_ctx.dml.vba.VReadyOffsetPix[pipe_idx_unsplit];
+ visited[j] = true;
+ }
+ }
+ visited[pipe_idx] = true;
+ pipe_idx_unsplit++;
+ }
+ pipe_idx++;
+ }
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
- pipes[pipe_idx].pipe.dest.vstartup_start = context->bw_ctx.dml.vba.VStartup[pipe_idx];
- pipes[pipe_idx].pipe.dest.vupdate_offset = context->bw_ctx.dml.vba.VUpdateOffsetPix[pipe_idx];
- pipes[pipe_idx].pipe.dest.vupdate_width = context->bw_ctx.dml.vba.VUpdateWidthPix[pipe_idx];
- pipes[pipe_idx].pipe.dest.vready_offset = context->bw_ctx.dml.vba.VReadyOffsetPix[pipe_idx];
if (context->bw_ctx.bw.dcn.clk.dppclk_khz < pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
context->bw_ctx.bw.dcn.clk.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz =
pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
- context->res_ctx.pipe_ctx[i].stream_res.dscclk_khz =
- context->bw_ctx.dml.vba.DSCCLK_calculated[pipe_idx] * 1000;
-#endif
+ ASSERT(visited[pipe_idx]);
context->res_ctx.pipe_ctx[i].pipe_dlg_param = pipes[pipe_idx].pipe.dest;
pipe_idx++;
}
+ /*save a original dppclock copy*/
+ context->bw_ctx.bw.dcn.clk.bw_dppclk_khz = context->bw_ctx.bw.dcn.clk.dppclk_khz;
+ context->bw_ctx.bw.dcn.clk.bw_dispclk_khz = context->bw_ctx.bw.dcn.clk.dispclk_khz;
+ context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz = context->bw_ctx.dml.soc.clock_limits[vlevel].dppclk_mhz * 1000;
+ context->bw_ctx.bw.dcn.clk.max_supported_dispclk_khz = context->bw_ctx.dml.soc.clock_limits[vlevel].dispclk_mhz * 1000;
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
bool cstate_en = context->bw_ctx.dml.vba.PrefetchMode[vlevel][context->bw_ctx.dml.vba.maxMpcComb] != 2;
@@ -2383,8 +2869,43 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
pipes[pipe_idx].pipe);
pipe_idx++;
}
+}
+
+static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *context,
+ bool fast_validate)
+{
+ bool out = false;
+
+ BW_VAL_TRACE_SETUP();
+
+ int vlevel = 0;
+ int pipe_split_from[MAX_PIPES];
+ int pipe_cnt = 0;
+ display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ BW_VAL_TRACE_COUNT();
+
+ out = dcn20_fast_validate_bw(dc, context, pipes, &pipe_cnt, pipe_split_from, &vlevel);
+
+ if (pipe_cnt == 0)
+ goto validate_out;
+
+ if (!out)
+ goto validate_fail;
+
+ BW_VAL_TRACE_END_VOLTAGE_LEVEL();
+
+ if (fast_validate) {
+ BW_VAL_TRACE_SKIP(fast);
+ goto validate_out;
+ }
+
+ dcn20_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel);
+ dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
+
+ BW_VAL_TRACE_END_WATERMARKS();
- out = true;
goto validate_out;
validate_fail:
@@ -2402,6 +2923,58 @@ validate_out:
return out;
}
+
+bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
+ bool fast_validate)
+{
+ bool voltage_supported = false;
+ bool full_pstate_supported = false;
+ bool dummy_pstate_supported = false;
+ double p_state_latency_us;
+
+ DC_FP_START();
+ p_state_latency_us = context->bw_ctx.dml.soc.dram_clock_change_latency_us;
+ context->bw_ctx.dml.soc.disable_dram_clock_change_vactive_support =
+ dc->debug.disable_dram_clock_change_vactive_support;
+
+ if (fast_validate) {
+ voltage_supported = dcn20_validate_bandwidth_internal(dc, context, true);
+
+ DC_FP_END();
+ return voltage_supported;
+ }
+
+ // Best case, we support full UCLK switch latency
+ voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false);
+ full_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support;
+
+ if (context->bw_ctx.dml.soc.dummy_pstate_latency_us == 0 ||
+ (voltage_supported && full_pstate_supported)) {
+ context->bw_ctx.bw.dcn.clk.p_state_change_support = full_pstate_supported;
+ goto restore_dml_state;
+ }
+
+ // Fallback: Try to only support G6 temperature read latency
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us = context->bw_ctx.dml.soc.dummy_pstate_latency_us;
+
+ voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false);
+ dummy_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support;
+
+ if (voltage_supported && dummy_pstate_supported) {
+ context->bw_ctx.bw.dcn.clk.p_state_change_support = false;
+ goto restore_dml_state;
+ }
+
+ // ERROR: fallback is supposed to always work.
+ ASSERT(false);
+
+restore_dml_state:
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us = p_state_latency_us;
+
+ DC_FP_END();
+ return voltage_supported;
+}
+
struct pipe_ctx *dcn20_acquire_idle_pipe_for_layer(
struct dc_state *state,
const struct resource_pool *pool,
@@ -2443,7 +3016,7 @@ static void dcn20_destroy_resource_pool(struct resource_pool **pool)
{
struct dcn20_resource_pool *dcn20_pool = TO_DCN20_RES_POOL(*pool);
- destruct(dcn20_pool);
+ dcn20_resource_destruct(dcn20_pool);
kfree(dcn20_pool);
*pool = NULL;
}
@@ -2482,6 +3055,7 @@ static struct resource_funcs dcn20_res_pool_funcs = {
.populate_dml_writeback_from_context = dcn20_populate_dml_writeback_from_context,
.get_default_swizzle_mode = dcn20_get_default_swizzle_mode,
.set_mcif_arb_params = dcn20_set_mcif_arb_params,
+ .populate_dml_pipes = dcn20_populate_dml_pipes_from_context,
.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link
};
@@ -2490,8 +3064,6 @@ bool dcn20_dwbc_create(struct dc_context *ctx, struct resource_pool *pool)
int i;
uint32_t pipe_count = pool->res_cap->num_dwb;
- ASSERT(pipe_count > 0);
-
for (i = 0; i < pipe_count; i++) {
struct dcn20_dwbc *dwbc20 = kzalloc(sizeof(struct dcn20_dwbc),
GFP_KERNEL);
@@ -2537,7 +3109,7 @@ bool dcn20_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool)
return true;
}
-struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx)
+static struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx)
{
struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL);
@@ -2552,7 +3124,7 @@ struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx)
return pp_smu;
}
-void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu)
+static void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu)
{
if (pp_smu && *pp_smu) {
kfree(*pp_smu);
@@ -2560,7 +3132,7 @@ void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu)
}
}
-static void cap_soc_clocks(
+void dcn20_cap_soc_clocks(
struct _vcs_dpi_soc_bounding_box_st *bb,
struct pp_smu_nv_clock_table max_clocks)
{
@@ -2576,9 +3148,6 @@ static void cap_soc_clocks(
&& max_clocks.uClockInKhz != 0)
bb->clock_limits[i].dram_speed_mts = (max_clocks.uClockInKhz / 1000) * 16;
- // HACK: Force every uclk to max for now to "disable" uclk switching.
- bb->clock_limits[i].dram_speed_mts = (max_clocks.uClockInKhz / 1000) * 16;
-
if ((bb->clock_limits[i].fabricclk_mhz > (max_clocks.fabricClockInKhz / 1000))
&& max_clocks.fabricClockInKhz != 0)
bb->clock_limits[i].fabricclk_mhz = (max_clocks.fabricClockInKhz / 1000);
@@ -2630,10 +3199,10 @@ static void cap_soc_clocks(
}
}
-static void update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb,
+void dcn20_update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb,
struct pp_smu_nv_clock_table *max_clocks, unsigned int *uclk_states, unsigned int num_states)
{
- struct _vcs_dpi_voltage_scaling_st calculated_states[MAX_CLOCK_LIMIT_STATES] = {0};
+ struct _vcs_dpi_voltage_scaling_st calculated_states[MAX_CLOCK_LIMIT_STATES];
int i;
int num_calculated_states = 0;
int min_dcfclk = 0;
@@ -2641,12 +3210,18 @@ static void update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_
if (num_states == 0)
return;
+ memset(calculated_states, 0, sizeof(calculated_states));
+
if (dc->bb_overrides.min_dcfclk_mhz > 0)
min_dcfclk = dc->bb_overrides.min_dcfclk_mhz;
- else
- // Accounting for SOC/DCF relationship, we can go as high as
- // 506Mhz in Vmin. We need to code 507 since SMU will round down to 506.
- min_dcfclk = 507;
+ else {
+ if (ASICREV_IS_NAVI12_P(dc->ctx->asic_id.hw_internal_rev))
+ min_dcfclk = 310;
+ else
+ // Accounting for SOC/DCF relationship, we can go as high as
+ // 506Mhz in Vmin.
+ min_dcfclk = 506;
+ }
for (i = 0; i < num_states; i++) {
int min_fclk_required_by_uclk;
@@ -2674,6 +3249,10 @@ static void update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_
num_calculated_states++;
}
+ calculated_states[num_calculated_states - 1].socclk_mhz = max_clocks->socClockInKhz / 1000;
+ calculated_states[num_calculated_states - 1].fabricclk_mhz = max_clocks->socClockInKhz / 1000;
+ calculated_states[num_calculated_states - 1].dcfclk_mhz = max_clocks->dcfClockInKhz / 1000;
+
memcpy(bb->clock_limits, calculated_states, sizeof(bb->clock_limits));
bb->num_states = num_calculated_states;
@@ -2682,9 +3261,8 @@ static void update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_
bb->clock_limits[num_calculated_states].state = bb->num_states;
}
-static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb)
+void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb)
{
- kernel_fpu_begin();
if ((int)(bb->sr_exit_time_us * 1000) != dc->bb_overrides.sr_exit_time_ns
&& dc->bb_overrides.sr_exit_time_ns) {
bb->sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0;
@@ -2708,7 +3286,31 @@ static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_s
bb->dram_clock_change_latency_us =
dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
}
- kernel_fpu_end();
+}
+
+static struct _vcs_dpi_soc_bounding_box_st *get_asic_rev_soc_bb(
+ uint32_t hw_internal_rev)
+{
+ if (ASICREV_IS_NAVI12_P(hw_internal_rev))
+ return &dcn2_0_nv12_soc;
+
+ return &dcn2_0_soc;
+}
+
+static struct _vcs_dpi_ip_params_st *get_asic_rev_ip_params(
+ uint32_t hw_internal_rev)
+{
+ /* NV14 */
+ if (ASICREV_IS_NAVI14_M(hw_internal_rev))
+ return &dcn2_0_nv14_ip;
+
+ /* NV12 and NV10 */
+ return &dcn2_0_ip;
+}
+
+static enum dml_project get_dml_project_version(uint32_t hw_internal_rev)
+{
+ return DML_PROJECT_NAVI10v2;
}
#define fixed16_to_double(x) (((double) x) / ((double) (1 << 16)))
@@ -2718,113 +3320,119 @@ static bool init_soc_bounding_box(struct dc *dc,
struct dcn20_resource_pool *pool)
{
const struct gpu_info_soc_bounding_box_v1_0 *bb = dc->soc_bounding_box;
+ struct _vcs_dpi_soc_bounding_box_st *loaded_bb =
+ get_asic_rev_soc_bb(dc->ctx->asic_id.hw_internal_rev);
+ struct _vcs_dpi_ip_params_st *loaded_ip =
+ get_asic_rev_ip_params(dc->ctx->asic_id.hw_internal_rev);
+
DC_LOGGER_INIT(dc->ctx->logger);
- if (!bb && !SOC_BOUNDING_BOX_VALID) {
+ /* TODO: upstream NV12 bounding box when its launched */
+ if (!bb && ASICREV_IS_NAVI12_P(dc->ctx->asic_id.hw_internal_rev)) {
DC_LOG_ERROR("%s: not valid soc bounding box/n", __func__);
return false;
}
- if (bb && !SOC_BOUNDING_BOX_VALID) {
+ if (bb && ASICREV_IS_NAVI12_P(dc->ctx->asic_id.hw_internal_rev)) {
int i;
- dcn2_0_soc.sr_exit_time_us =
+ dcn2_0_nv12_soc.sr_exit_time_us =
fixed16_to_double_to_cpu(bb->sr_exit_time_us);
- dcn2_0_soc.sr_enter_plus_exit_time_us =
+ dcn2_0_nv12_soc.sr_enter_plus_exit_time_us =
fixed16_to_double_to_cpu(bb->sr_enter_plus_exit_time_us);
- dcn2_0_soc.urgent_latency_us =
+ dcn2_0_nv12_soc.urgent_latency_us =
fixed16_to_double_to_cpu(bb->urgent_latency_us);
- dcn2_0_soc.urgent_latency_pixel_data_only_us =
+ dcn2_0_nv12_soc.urgent_latency_pixel_data_only_us =
fixed16_to_double_to_cpu(bb->urgent_latency_pixel_data_only_us);
- dcn2_0_soc.urgent_latency_pixel_mixed_with_vm_data_us =
+ dcn2_0_nv12_soc.urgent_latency_pixel_mixed_with_vm_data_us =
fixed16_to_double_to_cpu(bb->urgent_latency_pixel_mixed_with_vm_data_us);
- dcn2_0_soc.urgent_latency_vm_data_only_us =
+ dcn2_0_nv12_soc.urgent_latency_vm_data_only_us =
fixed16_to_double_to_cpu(bb->urgent_latency_vm_data_only_us);
- dcn2_0_soc.urgent_out_of_order_return_per_channel_pixel_only_bytes =
+ dcn2_0_nv12_soc.urgent_out_of_order_return_per_channel_pixel_only_bytes =
le32_to_cpu(bb->urgent_out_of_order_return_per_channel_pixel_only_bytes);
- dcn2_0_soc.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes =
+ dcn2_0_nv12_soc.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes =
le32_to_cpu(bb->urgent_out_of_order_return_per_channel_pixel_and_vm_bytes);
- dcn2_0_soc.urgent_out_of_order_return_per_channel_vm_only_bytes =
+ dcn2_0_nv12_soc.urgent_out_of_order_return_per_channel_vm_only_bytes =
le32_to_cpu(bb->urgent_out_of_order_return_per_channel_vm_only_bytes);
- dcn2_0_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_only =
+ dcn2_0_nv12_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_only =
fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_pixel_only);
- dcn2_0_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm =
+ dcn2_0_nv12_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm =
fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm);
- dcn2_0_soc.pct_ideal_dram_sdp_bw_after_urgent_vm_only =
+ dcn2_0_nv12_soc.pct_ideal_dram_sdp_bw_after_urgent_vm_only =
fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_vm_only);
- dcn2_0_soc.max_avg_sdp_bw_use_normal_percent =
+ dcn2_0_nv12_soc.max_avg_sdp_bw_use_normal_percent =
fixed16_to_double_to_cpu(bb->max_avg_sdp_bw_use_normal_percent);
- dcn2_0_soc.max_avg_dram_bw_use_normal_percent =
+ dcn2_0_nv12_soc.max_avg_dram_bw_use_normal_percent =
fixed16_to_double_to_cpu(bb->max_avg_dram_bw_use_normal_percent);
- dcn2_0_soc.writeback_latency_us =
+ dcn2_0_nv12_soc.writeback_latency_us =
fixed16_to_double_to_cpu(bb->writeback_latency_us);
- dcn2_0_soc.ideal_dram_bw_after_urgent_percent =
+ dcn2_0_nv12_soc.ideal_dram_bw_after_urgent_percent =
fixed16_to_double_to_cpu(bb->ideal_dram_bw_after_urgent_percent);
- dcn2_0_soc.max_request_size_bytes =
+ dcn2_0_nv12_soc.max_request_size_bytes =
le32_to_cpu(bb->max_request_size_bytes);
- dcn2_0_soc.dram_channel_width_bytes =
+ dcn2_0_nv12_soc.dram_channel_width_bytes =
le32_to_cpu(bb->dram_channel_width_bytes);
- dcn2_0_soc.fabric_datapath_to_dcn_data_return_bytes =
+ dcn2_0_nv12_soc.fabric_datapath_to_dcn_data_return_bytes =
le32_to_cpu(bb->fabric_datapath_to_dcn_data_return_bytes);
- dcn2_0_soc.dcn_downspread_percent =
+ dcn2_0_nv12_soc.dcn_downspread_percent =
fixed16_to_double_to_cpu(bb->dcn_downspread_percent);
- dcn2_0_soc.downspread_percent =
+ dcn2_0_nv12_soc.downspread_percent =
fixed16_to_double_to_cpu(bb->downspread_percent);
- dcn2_0_soc.dram_page_open_time_ns =
+ dcn2_0_nv12_soc.dram_page_open_time_ns =
fixed16_to_double_to_cpu(bb->dram_page_open_time_ns);
- dcn2_0_soc.dram_rw_turnaround_time_ns =
+ dcn2_0_nv12_soc.dram_rw_turnaround_time_ns =
fixed16_to_double_to_cpu(bb->dram_rw_turnaround_time_ns);
- dcn2_0_soc.dram_return_buffer_per_channel_bytes =
+ dcn2_0_nv12_soc.dram_return_buffer_per_channel_bytes =
le32_to_cpu(bb->dram_return_buffer_per_channel_bytes);
- dcn2_0_soc.round_trip_ping_latency_dcfclk_cycles =
+ dcn2_0_nv12_soc.round_trip_ping_latency_dcfclk_cycles =
le32_to_cpu(bb->round_trip_ping_latency_dcfclk_cycles);
- dcn2_0_soc.urgent_out_of_order_return_per_channel_bytes =
+ dcn2_0_nv12_soc.urgent_out_of_order_return_per_channel_bytes =
le32_to_cpu(bb->urgent_out_of_order_return_per_channel_bytes);
- dcn2_0_soc.channel_interleave_bytes =
+ dcn2_0_nv12_soc.channel_interleave_bytes =
le32_to_cpu(bb->channel_interleave_bytes);
- dcn2_0_soc.num_banks =
+ dcn2_0_nv12_soc.num_banks =
le32_to_cpu(bb->num_banks);
- dcn2_0_soc.num_chans =
+ dcn2_0_nv12_soc.num_chans =
le32_to_cpu(bb->num_chans);
- dcn2_0_soc.vmm_page_size_bytes =
+ dcn2_0_nv12_soc.vmm_page_size_bytes =
le32_to_cpu(bb->vmm_page_size_bytes);
- dcn2_0_soc.dram_clock_change_latency_us =
+ dcn2_0_nv12_soc.dram_clock_change_latency_us =
fixed16_to_double_to_cpu(bb->dram_clock_change_latency_us);
// HACK!! Lower uclock latency switch time so we don't switch
- dcn2_0_soc.dram_clock_change_latency_us = 10;
- dcn2_0_soc.writeback_dram_clock_change_latency_us =
+ dcn2_0_nv12_soc.dram_clock_change_latency_us = 10;
+ dcn2_0_nv12_soc.writeback_dram_clock_change_latency_us =
fixed16_to_double_to_cpu(bb->writeback_dram_clock_change_latency_us);
- dcn2_0_soc.return_bus_width_bytes =
+ dcn2_0_nv12_soc.return_bus_width_bytes =
le32_to_cpu(bb->return_bus_width_bytes);
- dcn2_0_soc.dispclk_dppclk_vco_speed_mhz =
+ dcn2_0_nv12_soc.dispclk_dppclk_vco_speed_mhz =
le32_to_cpu(bb->dispclk_dppclk_vco_speed_mhz);
- dcn2_0_soc.xfc_bus_transport_time_us =
+ dcn2_0_nv12_soc.xfc_bus_transport_time_us =
le32_to_cpu(bb->xfc_bus_transport_time_us);
- dcn2_0_soc.xfc_xbuf_latency_tolerance_us =
+ dcn2_0_nv12_soc.xfc_xbuf_latency_tolerance_us =
le32_to_cpu(bb->xfc_xbuf_latency_tolerance_us);
- dcn2_0_soc.use_urgent_burst_bw =
+ dcn2_0_nv12_soc.use_urgent_burst_bw =
le32_to_cpu(bb->use_urgent_burst_bw);
- dcn2_0_soc.num_states =
+ dcn2_0_nv12_soc.num_states =
le32_to_cpu(bb->num_states);
- for (i = 0; i < dcn2_0_soc.num_states; i++) {
- dcn2_0_soc.clock_limits[i].state =
+ for (i = 0; i < dcn2_0_nv12_soc.num_states; i++) {
+ dcn2_0_nv12_soc.clock_limits[i].state =
le32_to_cpu(bb->clock_limits[i].state);
- dcn2_0_soc.clock_limits[i].dcfclk_mhz =
+ dcn2_0_nv12_soc.clock_limits[i].dcfclk_mhz =
fixed16_to_double_to_cpu(bb->clock_limits[i].dcfclk_mhz);
- dcn2_0_soc.clock_limits[i].fabricclk_mhz =
+ dcn2_0_nv12_soc.clock_limits[i].fabricclk_mhz =
fixed16_to_double_to_cpu(bb->clock_limits[i].fabricclk_mhz);
- dcn2_0_soc.clock_limits[i].dispclk_mhz =
+ dcn2_0_nv12_soc.clock_limits[i].dispclk_mhz =
fixed16_to_double_to_cpu(bb->clock_limits[i].dispclk_mhz);
- dcn2_0_soc.clock_limits[i].dppclk_mhz =
+ dcn2_0_nv12_soc.clock_limits[i].dppclk_mhz =
fixed16_to_double_to_cpu(bb->clock_limits[i].dppclk_mhz);
- dcn2_0_soc.clock_limits[i].phyclk_mhz =
+ dcn2_0_nv12_soc.clock_limits[i].phyclk_mhz =
fixed16_to_double_to_cpu(bb->clock_limits[i].phyclk_mhz);
- dcn2_0_soc.clock_limits[i].socclk_mhz =
+ dcn2_0_nv12_soc.clock_limits[i].socclk_mhz =
fixed16_to_double_to_cpu(bb->clock_limits[i].socclk_mhz);
- dcn2_0_soc.clock_limits[i].dscclk_mhz =
+ dcn2_0_nv12_soc.clock_limits[i].dscclk_mhz =
fixed16_to_double_to_cpu(bb->clock_limits[i].dscclk_mhz);
- dcn2_0_soc.clock_limits[i].dram_speed_mts =
+ dcn2_0_nv12_soc.clock_limits[i].dram_speed_mts =
fixed16_to_double_to_cpu(bb->clock_limits[i].dram_speed_mts);
}
}
@@ -2833,7 +3441,6 @@ static bool init_soc_bounding_box(struct dc *dc,
struct pp_smu_nv_clock_table max_clocks = {0};
unsigned int uclk_states[8] = {0};
unsigned int num_states = 0;
- int i;
enum pp_smu_status status;
bool clock_limits_available = false;
bool uclk_states_available = false;
@@ -2855,24 +3462,20 @@ static bool init_soc_bounding_box(struct dc *dc,
clock_limits_available = (status == PP_SMU_RESULT_OK);
}
- // HACK: Use the max uclk_states value for all elements.
- for (i = 0; i < num_states; i++)
- uclk_states[i] = uclk_states[num_states - 1];
-
if (clock_limits_available && uclk_states_available && num_states)
- update_bounding_box(dc, &dcn2_0_soc, &max_clocks, uclk_states, num_states);
+ dcn20_update_bounding_box(dc, loaded_bb, &max_clocks, uclk_states, num_states);
else if (clock_limits_available)
- cap_soc_clocks(&dcn2_0_soc, max_clocks);
+ dcn20_cap_soc_clocks(loaded_bb, max_clocks);
}
- dcn2_0_ip.max_num_otg = pool->base.res_cap->num_timing_generator;
- dcn2_0_ip.max_num_dpp = pool->base.pipe_count;
- patch_bounding_box(dc, &dcn2_0_soc);
+ loaded_ip->max_num_otg = pool->base.res_cap->num_timing_generator;
+ loaded_ip->max_num_dpp = pool->base.pipe_count;
+ dcn20_patch_bounding_box(dc, loaded_bb);
return true;
}
-static bool construct(
+static bool dcn20_resource_construct(
uint8_t num_virtual_links,
struct dc *dc,
struct dcn20_resource_pool *pool)
@@ -2880,19 +3483,33 @@ static bool construct(
int i;
struct dc_context *ctx = dc->ctx;
struct irq_service_init_data init_data;
+ struct ddc_service_init_data ddc_init_data;
+ struct _vcs_dpi_soc_bounding_box_st *loaded_bb =
+ get_asic_rev_soc_bb(ctx->asic_id.hw_internal_rev);
+ struct _vcs_dpi_ip_params_st *loaded_ip =
+ get_asic_rev_ip_params(ctx->asic_id.hw_internal_rev);
+ enum dml_project dml_project_version =
+ get_dml_project_version(ctx->asic_id.hw_internal_rev);
- ctx->dc_bios->regs = &bios_regs;
+ DC_FP_START();
- pool->base.res_cap = &res_cap_nv10;
+ ctx->dc_bios->regs = &bios_regs;
pool->base.funcs = &dcn20_res_pool_funcs;
+ if (ASICREV_IS_NAVI14_M(ctx->asic_id.hw_internal_rev)) {
+ pool->base.res_cap = &res_cap_nv14;
+ pool->base.pipe_count = 5;
+ pool->base.mpcc_count = 5;
+ } else {
+ pool->base.res_cap = &res_cap_nv10;
+ pool->base.pipe_count = 6;
+ pool->base.mpcc_count = 6;
+ }
/*************************************************
* Resource + asic cap harcoding *
*************************************************/
pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
- pool->base.pipe_count = 6;
- pool->base.mpcc_count = 6;
dc->caps.max_downscale_ratio = 200;
dc->caps.i2c_speed_in_khz = 100;
dc->caps.max_cursor_size = 256;
@@ -2902,6 +3519,7 @@ static bool construct(
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
dc->caps.hw_3d_lut = true;
+ dc->caps.extended_aux_timeout_support = true;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) {
dc->debug = debug_defaults_drv;
@@ -2998,7 +3616,7 @@ static bool construct(
goto create_fail;
}
- dml_init_instance(&dc->dml, &dcn2_0_soc, &dcn2_0_ip, DML_PROJECT_NAVI10);
+ dml_init_instance(&dc->dml, loaded_bb, loaded_ip, dml_project_version);
if (!dc->debug.disable_pplib_wm_range) {
struct pp_smu_wm_range_sets ranges = {0};
@@ -3006,7 +3624,7 @@ static bool construct(
ranges.num_reader_wm_sets = 0;
- if (dcn2_0_soc.num_states == 1) {
+ if (loaded_bb->num_states == 1) {
ranges.reader_wm_sets[0].wm_inst = i;
ranges.reader_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
ranges.reader_wm_sets[0].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
@@ -3014,13 +3632,13 @@ static bool construct(
ranges.reader_wm_sets[0].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
ranges.num_reader_wm_sets = 1;
- } else if (dcn2_0_soc.num_states > 1) {
- for (i = 0; i < 4 && i < dcn2_0_soc.num_states; i++) {
+ } else if (loaded_bb->num_states > 1) {
+ for (i = 0; i < 4 && i < loaded_bb->num_states; i++) {
ranges.reader_wm_sets[i].wm_inst = i;
ranges.reader_wm_sets[i].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
ranges.reader_wm_sets[i].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
- ranges.reader_wm_sets[i].min_fill_clk_mhz = (i > 0) ? (dcn2_0_soc.clock_limits[i - 1].dram_speed_mts / 16) + 1 : 0;
- ranges.reader_wm_sets[i].max_fill_clk_mhz = dcn2_0_soc.clock_limits[i].dram_speed_mts / 16;
+ ranges.reader_wm_sets[i].min_fill_clk_mhz = (i > 0) ? (loaded_bb->clock_limits[i - 1].dram_speed_mts / 16) + 1 : 0;
+ ranges.reader_wm_sets[i].max_fill_clk_mhz = loaded_bb->clock_limits[i].dram_speed_mts / 16;
ranges.num_reader_wm_sets = i + 1;
}
@@ -3127,7 +3745,6 @@ static bool construct(
goto create_fail;
}
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
for (i = 0; i < pool->base.res_cap->num_dsc; i++) {
pool->base.dscs[i] = dcn20_dsc_create(ctx, i);
if (pool->base.dscs[i] == NULL) {
@@ -3136,7 +3753,6 @@ static bool construct(
goto create_fail;
}
}
-#endif
if (!dcn20_dwbc_create(ctx, &pool->base)) {
BREAK_TO_DEBUGGER();
@@ -3163,11 +3779,24 @@ static bool construct(
dc->cap_funcs = cap_funcs;
+ if (dc->ctx->dc_bios->fw_info.oem_i2c_present) {
+ ddc_init_data.ctx = dc->ctx;
+ ddc_init_data.link = NULL;
+ ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id;
+ ddc_init_data.id.enum_id = 0;
+ ddc_init_data.id.type = OBJECT_TYPE_GENERIC;
+ pool->base.oem_device = dal_ddc_service_create(&ddc_init_data);
+ } else {
+ pool->base.oem_device = NULL;
+ }
+
+ DC_FP_END();
return true;
create_fail:
- destruct(pool);
+ DC_FP_END();
+ dcn20_resource_destruct(pool);
return false;
}
@@ -3182,7 +3811,7 @@ struct resource_pool *dcn20_create_resource_pool(
if (!pool)
return NULL;
- if (construct(init_data->num_virtual_links, dc, pool))
+ if (dcn20_resource_construct(init_data->num_virtual_links, dc, pool))
return &pool->base;
BREAK_TO_DEBUGGER();
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
index b5a75289f444..f5893840b79b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
@@ -50,7 +50,7 @@ unsigned int dcn20_calc_max_scaled_time(
enum mmhubbub_wbif_mode mode,
unsigned int urgent_watermark);
int dcn20_populate_dml_pipes_from_context(
- struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes);
+ struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes);
struct pipe_ctx *dcn20_acquire_idle_pipe_for_layer(
struct dc_state *state,
const struct resource_pool *pool,
@@ -95,9 +95,12 @@ struct display_stream_compressor *dcn20_dsc_create(
struct dc_context *ctx, uint32_t inst);
void dcn20_dsc_destroy(struct display_stream_compressor **dsc);
-struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx);
-void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu);
-
+void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb);
+void dcn20_cap_soc_clocks(
+ struct _vcs_dpi_soc_bounding_box_st *bb,
+ struct pp_smu_nv_clock_table max_clocks);
+void dcn20_update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb,
+ struct pp_smu_nv_clock_table *max_clocks, unsigned int *uclk_states, unsigned int num_states);
struct hubp *dcn20_hubp_create(
struct dc_context *ctx,
uint32_t inst);
@@ -116,9 +119,45 @@ void dcn20_set_mcif_arb_params(
display_e2e_pipe_params_st *pipes,
int pipe_cnt);
bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate);
+void dcn20_merge_pipes_for_validate(
+ struct dc *dc,
+ struct dc_state *context);
+int dcn20_validate_apply_pipe_split_flags(
+ struct dc *dc,
+ struct dc_state *context,
+ int vlevel,
+ bool *split);
+bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx);
+void dcn20_split_stream_for_mpc(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct pipe_ctx *primary_pipe,
+ struct pipe_ctx *secondary_pipe);
+bool dcn20_split_stream_for_odm(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct pipe_ctx *prev_odm_pipe,
+ struct pipe_ctx *next_odm_pipe);
+struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ const struct pipe_ctx *primary_pipe);
+bool dcn20_fast_validate_bw(
+ struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int *pipe_cnt_out,
+ int *pipe_split_from,
+ int *vlevel_out);
+void dcn20_calculate_dlg_params(
+ struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt,
+ int vlevel);
enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream);
enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream);
+enum dc_status dcn20_add_dsc_to_stream_resource(struct dc *dc, struct dc_state *dc_ctx, struct dc_stream_state *dc_stream);
enum dc_status dcn20_remove_stream_from_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream);
enum dc_status dcn20_get_default_swizzle_mode(struct dc_plane_state *plane_state);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
index f5bcffc426b8..9b70a1e7b962 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
@@ -205,11 +205,9 @@ static void enc2_stream_encoder_stop_hdmi_info_packets(
HDMI_GENERIC7_LINE, 0);
}
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
-
/* Update GSP7 SDP 128 byte long */
-static void enc2_send_gsp7_128_info_packet(
+static void enc2_update_gsp7_128_info_packet(
struct dcn10_stream_encoder *enc1,
const struct dc_info_packet_128 *info_packet)
{
@@ -277,18 +275,9 @@ static void enc2_send_gsp7_128_info_packet(
static void enc2_dp_set_dsc_config(struct stream_encoder *enc,
enum optc_dsc_mode dsc_mode,
uint32_t dsc_bytes_per_pixel,
- uint32_t dsc_slice_width,
- uint8_t *dsc_packed_pps)
+ uint32_t dsc_slice_width)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
- uint32_t dsc_value = 0;
-
- dsc_value = REG_READ(DP_DSC_CNTL);
-
- /* dsc disable skip */
- if ((dsc_value & 0x3) == 0x0)
- return;
-
REG_UPDATE_2(DP_DSC_CNTL,
DP_DSC_MODE, dsc_mode,
@@ -296,8 +285,16 @@ static void enc2_dp_set_dsc_config(struct stream_encoder *enc,
REG_SET(DP_DSC_BYTES_PER_PIXEL, 0,
DP_DSC_BYTES_PER_PIXEL, dsc_bytes_per_pixel);
+}
+
- if (dsc_mode != OPTC_DSC_DISABLED) {
+static void enc2_dp_set_dsc_pps_info_packet(struct stream_encoder *enc,
+ bool enable,
+ uint8_t *dsc_packed_pps)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+ if (enable) {
struct dc_info_packet_128 pps_sdp;
ASSERT(dsc_packed_pps);
@@ -309,7 +306,7 @@ static void enc2_dp_set_dsc_config(struct stream_encoder *enc,
pps_sdp.hb2 = 127;
pps_sdp.hb3 = 0;
memcpy(&pps_sdp.sb[0], dsc_packed_pps, sizeof(pps_sdp.sb));
- enc2_send_gsp7_128_info_packet(enc1, &pps_sdp);
+ enc2_update_gsp7_128_info_packet(enc1, &pps_sdp);
/* Enable Generic Stream Packet 7 (GSP) transmission */
//REG_UPDATE(DP_SEC_CNTL,
@@ -340,9 +337,8 @@ static void enc2_dp_set_dsc_config(struct stream_encoder *enc,
REG_UPDATE(DP_SEC_CNTL2, DP_SEC_GSP7_PPS, 0);
}
}
-#endif
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
+
/* this function read dsc related register fields to be logged later in dcn10_log_hw_state
* into a dcn_dsc_state struct.
*/
@@ -363,7 +359,6 @@ static void enc2_read_state(struct stream_encoder *enc, struct enc_state *s)
REG_GET(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, &s->sec_stream_enable);
}
}
-#endif
/* Set Dynamic Metadata-configuration.
* enable_dme: TRUE: enables Dynamic Metadata Enfine, FALSE: disables DME
@@ -373,7 +368,7 @@ static void enc2_read_state(struct stream_encoder *enc, struct enc_state *s)
*
* Ensure the OTG master update lock is set when changing DME configuration.
*/
-static void enc2_set_dynamic_metadata(struct stream_encoder *enc,
+void enc2_set_dynamic_metadata(struct stream_encoder *enc,
bool enable_dme,
uint32_t hubp_requestor_id,
enum dynamic_metadata_mode dmdata_mode)
@@ -443,10 +438,8 @@ static bool is_two_pixels_per_containter(const struct dc_crtc_timing *timing)
{
bool two_pix = timing->pixel_encoding == PIXEL_ENCODING_YCBCR420;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
two_pix = two_pix || (timing->flags.DSC && timing->pixel_encoding == PIXEL_ENCODING_YCBCR422
&& !timing->dsc_cfg.ycbcr422_simple);
-#endif
return two_pix;
}
@@ -463,7 +456,7 @@ void enc2_stream_encoder_dp_unblank(
uint64_t m_vid_l = n_vid;
/* YCbCr 4:2:0 : Computed VID_M will be 2X the input rate */
- if (is_two_pixels_per_containter(&param->timing) || param->odm) {
+ if (is_two_pixels_per_containter(&param->timing) || param->opp_cnt > 1) {
/*this logic should be the same in get_pixel_clock_parameters() */
n_multiply = 1;
}
@@ -495,15 +488,23 @@ void enc2_stream_encoder_dp_unblank(
DP_VID_N_MUL, n_multiply);
}
- /* set DIG_START to 0x1 to reset FIFO */
+ /* make sure stream is disabled before resetting steer fifo */
+ REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, false);
+ REG_WAIT(DP_VID_STREAM_CNTL, DP_VID_STREAM_STATUS, 0, 10, 5000);
+ /* set DIG_START to 0x1 to reset FIFO */
REG_UPDATE(DIG_FE_CNTL, DIG_START, 1);
+ udelay(1);
/* write 0 to take the FIFO out of reset */
REG_UPDATE(DIG_FE_CNTL, DIG_START, 0);
- /* switch DP encoder to CRTC data */
+ /* switch DP encoder to CRTC data, but reset it the fifo first. It may happen
+ * that it overflows during mode transition, and sometimes doesn't recover.
+ */
+ REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 1);
+ udelay(10);
REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 0);
@@ -536,11 +537,16 @@ void enc2_stream_encoder_dp_set_stream_attribute(
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing,
enum dc_color_space output_color_space,
+ bool use_vsc_sdp_for_colorimetry,
uint32_t enable_sdp_splitting)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
- enc1_stream_encoder_dp_set_stream_attribute(enc, crtc_timing, output_color_space, enable_sdp_splitting);
+ enc1_stream_encoder_dp_set_stream_attribute(enc,
+ crtc_timing,
+ output_color_space,
+ use_vsc_sdp_for_colorimetry,
+ enable_sdp_splitting);
REG_UPDATE(DP_SEC_FRAMING4,
DP_SST_SDP_SPLITTING, enable_sdp_splitting);
@@ -563,6 +569,8 @@ static const struct stream_encoder_funcs dcn20_str_enc_funcs = {
enc2_stream_encoder_stop_hdmi_info_packets,
.update_dp_info_packets =
enc2_stream_encoder_update_dp_info_packets,
+ .send_immediate_sdp_message =
+ enc1_stream_encoder_send_immediate_sdp_message,
.stop_dp_info_packets =
enc1_stream_encoder_stop_dp_info_packets,
.dp_blank =
@@ -580,14 +588,16 @@ static const struct stream_encoder_funcs dcn20_str_enc_funcs = {
.setup_stereo_sync = enc1_setup_stereo_sync,
.set_avmute = enc1_stream_encoder_set_avmute,
.dig_connect_to_otg = enc1_dig_connect_to_otg,
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
- .enc_read_state = enc2_read_state,
-#endif
+ .dig_source_otg = enc1_dig_source_otg,
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
+ .dp_get_pixel_format =
+ enc1_stream_encoder_dp_get_pixel_format,
+
+ .enc_read_state = enc2_read_state,
.dp_set_dsc_config = enc2_dp_set_dsc_config,
-#endif
+ .dp_set_dsc_pps_info_packet = enc2_dp_set_dsc_pps_info_packet,
.set_dynamic_metadata = enc2_set_dynamic_metadata,
+ .hdmi_reset_stream_attribute = enc1_reset_hdmi_stream_attribute,
};
void dcn20_stream_encoder_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h
index 6d40e8c9b78f..d2a805bd4573 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h
@@ -98,10 +98,16 @@ void enc2_stream_encoder_dp_set_stream_attribute(
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing,
enum dc_color_space output_color_space,
+ bool use_vsc_sdp_for_colorimetry,
uint32_t enable_sdp_splitting);
void enc2_stream_encoder_dp_unblank(
struct stream_encoder *enc,
const struct encoder_unblank_param *param);
+void enc2_set_dynamic_metadata(struct stream_encoder *enc,
+ bool enable_dme,
+ uint32_t hubp_requestor_id,
+ enum dynamic_metadata_mode dmdata_mode);
+
#endif /* __DC_STREAM_ENCODER_DCN20_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
new file mode 100644
index 000000000000..07684d3e375a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: MIT
+#
+# Makefile for DCN21.
+
+DCN21 = dcn21_init.o dcn21_hubp.o dcn21_hubbub.o dcn21_resource.o \
+ dcn21_hwseq.o dcn21_link_encoder.o
+
+ifdef CONFIG_X86
+CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -msse
+endif
+
+ifdef CONFIG_PPC64
+CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -maltivec
+endif
+
+ifdef CONFIG_CC_IS_GCC
+ifeq ($(call cc-ifversion, -lt, 0701, y), y)
+IS_OLD_GCC = 1
+endif
+endif
+
+ifdef CONFIG_X86
+ifdef IS_OLD_GCC
+# Stack alignment mismatch, proceed with caution.
+# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
+# (8B stack alignment).
+CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o += -mpreferred-stack-boundary=4
+else
+CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o += -msse2
+endif
+endif
+
+AMD_DAL_DCN21 = $(addprefix $(AMDDALPATH)/dc/dcn21/,$(DCN21))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DCN21)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
new file mode 100644
index 000000000000..f546260c15b7
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
@@ -0,0 +1,657 @@
+/*
+* Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include <linux/delay.h>
+#include "dm_services.h"
+#include "dcn20/dcn20_hubbub.h"
+#include "dcn21_hubbub.h"
+#include "reg_helper.h"
+
+#define REG(reg)\
+ hubbub1->regs->reg
+#define DC_LOGGER \
+ hubbub1->base.ctx->logger
+#define CTX \
+ hubbub1->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ hubbub1->shifts->field_name, hubbub1->masks->field_name
+
+#define REG(reg)\
+ hubbub1->regs->reg
+
+#define CTX \
+ hubbub1->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ hubbub1->shifts->field_name, hubbub1->masks->field_name
+
+#ifdef NUM_VMID
+#undef NUM_VMID
+#endif
+#define NUM_VMID 16
+
+static uint32_t convert_and_clamp(
+ uint32_t wm_ns,
+ uint32_t refclk_mhz,
+ uint32_t clamp_value)
+{
+ uint32_t ret_val = 0;
+ ret_val = wm_ns * refclk_mhz;
+ ret_val /= 1000;
+
+ if (ret_val > clamp_value)
+ ret_val = clamp_value;
+
+ return ret_val;
+}
+
+void dcn21_dchvm_init(struct hubbub *hubbub)
+{
+ struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
+ uint32_t riommu_active;
+ int i;
+
+ //Init DCHVM block
+ REG_UPDATE(DCHVM_CTRL0, HOSTVM_INIT_REQ, 1);
+
+ //Poll until RIOMMU_ACTIVE = 1
+ for (i = 0; i < 100; i++) {
+ REG_GET(DCHVM_RIOMMU_STAT0, RIOMMU_ACTIVE, &riommu_active);
+
+ if (riommu_active)
+ break;
+ else
+ udelay(5);
+ }
+
+ if (riommu_active) {
+ //Reflect the power status of DCHUBBUB
+ REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_POWERSTATUS, 1);
+
+ //Start rIOMMU prefetching
+ REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_PREFETCH_REQ, 1);
+
+ // Enable dynamic clock gating
+ REG_UPDATE_4(DCHVM_CLK_CTRL,
+ HVM_DISPCLK_R_GATE_DIS, 0,
+ HVM_DISPCLK_G_GATE_DIS, 0,
+ HVM_DCFCLK_R_GATE_DIS, 0,
+ HVM_DCFCLK_G_GATE_DIS, 0);
+
+ //Poll until HOSTVM_PREFETCH_DONE = 1
+ REG_WAIT(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, 1, 5, 100);
+ }
+}
+
+int hubbub21_init_dchub(struct hubbub *hubbub,
+ struct dcn_hubbub_phys_addr_config *pa_config)
+{
+ struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
+ struct dcn_vmid_page_table_config phys_config;
+
+ REG_SET(DCN_VM_FB_LOCATION_BASE, 0,
+ FB_BASE, pa_config->system_aperture.fb_base >> 24);
+ REG_SET(DCN_VM_FB_LOCATION_TOP, 0,
+ FB_TOP, pa_config->system_aperture.fb_top >> 24);
+ REG_SET(DCN_VM_FB_OFFSET, 0,
+ FB_OFFSET, pa_config->system_aperture.fb_offset >> 24);
+ REG_SET(DCN_VM_AGP_BOT, 0,
+ AGP_BOT, pa_config->system_aperture.agp_bot >> 24);
+ REG_SET(DCN_VM_AGP_TOP, 0,
+ AGP_TOP, pa_config->system_aperture.agp_top >> 24);
+ REG_SET(DCN_VM_AGP_BASE, 0,
+ AGP_BASE, pa_config->system_aperture.agp_base >> 24);
+
+ if (pa_config->gart_config.page_table_start_addr != pa_config->gart_config.page_table_end_addr) {
+ phys_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr >> 12;
+ phys_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr >> 12;
+ phys_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr | 1; //Note: hack
+ phys_config.depth = 0;
+ phys_config.block_size = 0;
+ // Init VMID 0 based on PA config
+ dcn20_vmid_setup(&hubbub1->vmid[0], &phys_config);
+ }
+
+ dcn21_dchvm_init(hubbub);
+
+ return NUM_VMID;
+}
+
+void hubbub21_program_urgent_watermarks(
+ struct hubbub *hubbub,
+ struct dcn_watermark_set *watermarks,
+ unsigned int refclk_mhz,
+ bool safe_to_lower)
+{
+ struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
+ uint32_t prog_wm_value;
+
+ /* Repeat for water mark set A, B, C and D. */
+ /* clock state A */
+ if (safe_to_lower || watermarks->a.urgent_ns > hubbub1->watermarks.a.urgent_ns) {
+ hubbub1->watermarks.a.urgent_ns = watermarks->a.urgent_ns;
+ prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns,
+ refclk_mhz, 0x1fffff);
+ REG_SET_2(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0,
+ DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value,
+ DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_A, prog_wm_value);
+
+ DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->a.urgent_ns, prog_wm_value);
+ }
+
+ /* determine the transfer time for a quantity of data for a particular requestor.*/
+ if (safe_to_lower || watermarks->a.frac_urg_bw_flip
+ > hubbub1->watermarks.a.frac_urg_bw_flip) {
+ hubbub1->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip;
+
+ REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, 0,
+ DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, watermarks->a.frac_urg_bw_flip);
+ }
+
+ if (safe_to_lower || watermarks->a.frac_urg_bw_nom
+ > hubbub1->watermarks.a.frac_urg_bw_nom) {
+ hubbub1->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom;
+
+ REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, 0,
+ DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, watermarks->a.frac_urg_bw_nom);
+ }
+ if (safe_to_lower || watermarks->a.urgent_latency_ns > hubbub1->watermarks.a.urgent_latency_ns) {
+ hubbub1->watermarks.a.urgent_latency_ns = watermarks->a.urgent_latency_ns;
+ prog_wm_value = convert_and_clamp(watermarks->a.urgent_latency_ns,
+ refclk_mhz, 0x1fffff);
+ REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0,
+ DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, prog_wm_value);
+ }
+
+ /* clock state B */
+ if (safe_to_lower || watermarks->b.urgent_ns > hubbub1->watermarks.b.urgent_ns) {
+ hubbub1->watermarks.b.urgent_ns = watermarks->b.urgent_ns;
+ prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns,
+ refclk_mhz, 0x1fffff);
+ REG_SET_2(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0,
+ DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value,
+ DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_B, prog_wm_value);
+
+ DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->b.urgent_ns, prog_wm_value);
+ }
+
+ /* determine the transfer time for a quantity of data for a particular requestor.*/
+ if (safe_to_lower || watermarks->a.frac_urg_bw_flip
+ > hubbub1->watermarks.a.frac_urg_bw_flip) {
+ hubbub1->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip;
+
+ REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, 0,
+ DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, watermarks->a.frac_urg_bw_flip);
+ }
+
+ if (safe_to_lower || watermarks->a.frac_urg_bw_nom
+ > hubbub1->watermarks.a.frac_urg_bw_nom) {
+ hubbub1->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom;
+
+ REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, 0,
+ DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, watermarks->a.frac_urg_bw_nom);
+ }
+
+ if (safe_to_lower || watermarks->b.urgent_latency_ns > hubbub1->watermarks.b.urgent_latency_ns) {
+ hubbub1->watermarks.b.urgent_latency_ns = watermarks->b.urgent_latency_ns;
+ prog_wm_value = convert_and_clamp(watermarks->b.urgent_latency_ns,
+ refclk_mhz, 0x1fffff);
+ REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0,
+ DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, prog_wm_value);
+ }
+
+ /* clock state C */
+ if (safe_to_lower || watermarks->c.urgent_ns > hubbub1->watermarks.c.urgent_ns) {
+ hubbub1->watermarks.c.urgent_ns = watermarks->c.urgent_ns;
+ prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns,
+ refclk_mhz, 0x1fffff);
+ REG_SET_2(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, 0,
+ DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value,
+ DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_C, prog_wm_value);
+
+ DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->c.urgent_ns, prog_wm_value);
+ }
+
+ /* determine the transfer time for a quantity of data for a particular requestor.*/
+ if (safe_to_lower || watermarks->a.frac_urg_bw_flip
+ > hubbub1->watermarks.a.frac_urg_bw_flip) {
+ hubbub1->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip;
+
+ REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, 0,
+ DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, watermarks->a.frac_urg_bw_flip);
+ }
+
+ if (safe_to_lower || watermarks->a.frac_urg_bw_nom
+ > hubbub1->watermarks.a.frac_urg_bw_nom) {
+ hubbub1->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom;
+
+ REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, 0,
+ DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, watermarks->a.frac_urg_bw_nom);
+ }
+
+ if (safe_to_lower || watermarks->c.urgent_latency_ns > hubbub1->watermarks.c.urgent_latency_ns) {
+ hubbub1->watermarks.c.urgent_latency_ns = watermarks->c.urgent_latency_ns;
+ prog_wm_value = convert_and_clamp(watermarks->c.urgent_latency_ns,
+ refclk_mhz, 0x1fffff);
+ REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, 0,
+ DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, prog_wm_value);
+ }
+
+ /* clock state D */
+ if (safe_to_lower || watermarks->d.urgent_ns > hubbub1->watermarks.d.urgent_ns) {
+ hubbub1->watermarks.d.urgent_ns = watermarks->d.urgent_ns;
+ prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns,
+ refclk_mhz, 0x1fffff);
+ REG_SET_2(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, 0,
+ DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value,
+ DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_D, prog_wm_value);
+
+ DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->d.urgent_ns, prog_wm_value);
+ }
+
+ /* determine the transfer time for a quantity of data for a particular requestor.*/
+ if (safe_to_lower || watermarks->a.frac_urg_bw_flip
+ > hubbub1->watermarks.a.frac_urg_bw_flip) {
+ hubbub1->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip;
+
+ REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, 0,
+ DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, watermarks->a.frac_urg_bw_flip);
+ }
+
+ if (safe_to_lower || watermarks->a.frac_urg_bw_nom
+ > hubbub1->watermarks.a.frac_urg_bw_nom) {
+ hubbub1->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom;
+
+ REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, 0,
+ DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, watermarks->a.frac_urg_bw_nom);
+ }
+
+ if (safe_to_lower || watermarks->d.urgent_latency_ns > hubbub1->watermarks.d.urgent_latency_ns) {
+ hubbub1->watermarks.d.urgent_latency_ns = watermarks->d.urgent_latency_ns;
+ prog_wm_value = convert_and_clamp(watermarks->d.urgent_latency_ns,
+ refclk_mhz, 0x1fffff);
+ REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, 0,
+ DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, prog_wm_value);
+ }
+}
+
+void hubbub21_program_stutter_watermarks(
+ struct hubbub *hubbub,
+ struct dcn_watermark_set *watermarks,
+ unsigned int refclk_mhz,
+ bool safe_to_lower)
+{
+ struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
+ uint32_t prog_wm_value;
+
+ /* clock state A */
+ if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
+ > hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) {
+ hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
+ watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0,
+ DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value,
+ DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
+ }
+
+ if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns
+ > hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns) {
+ hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns =
+ watermarks->a.cstate_pstate.cstate_exit_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->a.cstate_pstate.cstate_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0,
+ DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value,
+ DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value);
+ }
+
+ /* clock state B */
+ if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
+ > hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) {
+ hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns =
+ watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0,
+ DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value,
+ DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
+ }
+
+ if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns
+ > hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns) {
+ hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns =
+ watermarks->b.cstate_pstate.cstate_exit_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->b.cstate_pstate.cstate_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0,
+ DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value,
+ DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value);
+ }
+
+ /* clock state C */
+ if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
+ > hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) {
+ hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns =
+ watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0,
+ DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value,
+ DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
+ }
+
+ if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns
+ > hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns) {
+ hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns =
+ watermarks->c.cstate_pstate.cstate_exit_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->c.cstate_pstate.cstate_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0,
+ DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value,
+ DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value);
+ }
+
+ /* clock state D */
+ if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
+ > hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) {
+ hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns =
+ watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0,
+ DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value,
+ DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
+ }
+
+ if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns
+ > hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns) {
+ hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns =
+ watermarks->d.cstate_pstate.cstate_exit_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->d.cstate_pstate.cstate_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0,
+ DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value,
+ DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value);
+ }
+}
+
+void hubbub21_program_pstate_watermarks(
+ struct hubbub *hubbub,
+ struct dcn_watermark_set *watermarks,
+ unsigned int refclk_mhz,
+ bool safe_to_lower)
+{
+ struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
+ uint32_t prog_wm_value;
+
+ /* clock state A */
+ if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns
+ > hubbub1->watermarks.a.cstate_pstate.pstate_change_ns) {
+ hubbub1->watermarks.a.cstate_pstate.pstate_change_ns =
+ watermarks->a.cstate_pstate.pstate_change_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->a.cstate_pstate.pstate_change_ns,
+ refclk_mhz, 0x1fffff);
+ REG_SET_2(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, 0,
+ DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value,
+ DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
+ "HW register value = 0x%x\n\n",
+ watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value);
+ }
+
+ /* clock state B */
+ if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns
+ > hubbub1->watermarks.b.cstate_pstate.pstate_change_ns) {
+ hubbub1->watermarks.b.cstate_pstate.pstate_change_ns =
+ watermarks->b.cstate_pstate.pstate_change_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->b.cstate_pstate.pstate_change_ns,
+ refclk_mhz, 0x1fffff);
+ REG_SET_2(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, 0,
+ DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value,
+ DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
+ "HW register value = 0x%x\n\n",
+ watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value);
+ }
+
+ /* clock state C */
+ if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns
+ > hubbub1->watermarks.c.cstate_pstate.pstate_change_ns) {
+ hubbub1->watermarks.c.cstate_pstate.pstate_change_ns =
+ watermarks->c.cstate_pstate.pstate_change_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->c.cstate_pstate.pstate_change_ns,
+ refclk_mhz, 0x1fffff);
+ REG_SET_2(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, 0,
+ DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value,
+ DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n"
+ "HW register value = 0x%x\n\n",
+ watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value);
+ }
+
+ /* clock state D */
+ if (safe_to_lower || watermarks->d.cstate_pstate.pstate_change_ns
+ > hubbub1->watermarks.d.cstate_pstate.pstate_change_ns) {
+ hubbub1->watermarks.d.cstate_pstate.pstate_change_ns =
+ watermarks->d.cstate_pstate.pstate_change_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->d.cstate_pstate.pstate_change_ns,
+ refclk_mhz, 0x1fffff);
+ REG_SET_2(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, 0,
+ DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value,
+ DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
+ "HW register value = 0x%x\n\n",
+ watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value);
+ }
+}
+
+void hubbub21_program_watermarks(
+ struct hubbub *hubbub,
+ struct dcn_watermark_set *watermarks,
+ unsigned int refclk_mhz,
+ bool safe_to_lower)
+{
+ struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
+
+ hubbub21_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
+ hubbub21_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
+ hubbub21_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
+
+ /*
+ * The DCHub arbiter has a mechanism to dynamically rate limit the DCHub request stream to the fabric.
+ * If the memory controller is fully utilized and the DCHub requestors are
+ * well ahead of their amortized schedule, then it is safe to prevent the next winner
+ * from being committed and sent to the fabric.
+ * The utilization of the memory controller is approximated by ensuring that
+ * the number of outstanding requests is greater than a threshold specified
+ * by the ARB_MIN_REQ_OUTSTANDING. To determine that the DCHub requestors are well ahead of the amortized schedule,
+ * the slack of the next winner is compared with the ARB_SAT_LEVEL in DLG RefClk cycles.
+ *
+ * TODO: Revisit request limit after figure out right number. request limit for Renoir isn't decided yet, set maximum value (0x1FF)
+ * to turn off it for now.
+ */
+ REG_SET(DCHUBBUB_ARB_SAT_LEVEL, 0,
+ DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz);
+ REG_UPDATE_2(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
+ DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF,
+ DCHUBBUB_ARB_MIN_REQ_OUTSTAND_COMMIT_THRESHOLD, 0xA);
+ REG_UPDATE(DCHUBBUB_ARB_HOSTVM_CNTL,
+ DCHUBBUB_ARB_MAX_QOS_COMMIT_THRESHOLD, 0xF);
+
+ hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
+}
+
+void hubbub21_wm_read_state(struct hubbub *hubbub,
+ struct dcn_hubbub_wm *wm)
+{
+ struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
+ struct dcn_hubbub_wm_set *s;
+
+ memset(wm, 0, sizeof(struct dcn_hubbub_wm));
+
+ s = &wm->sets[0];
+ s->wm_set = 0;
+ REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A,
+ DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, &s->data_urgent);
+
+ REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A,
+ DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, &s->sr_enter);
+
+ REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A,
+ DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, &s->sr_exit);
+
+ REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A,
+ DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, &s->dram_clk_chanage);
+
+ s = &wm->sets[1];
+ s->wm_set = 1;
+ REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B,
+ DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, &s->data_urgent);
+
+ REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B,
+ DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, &s->sr_enter);
+
+ REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B,
+ DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, &s->sr_exit);
+
+ REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B,
+ DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, &s->dram_clk_chanage);
+
+ s = &wm->sets[2];
+ s->wm_set = 2;
+ REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C,
+ DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, &s->data_urgent);
+
+ REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C,
+ DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, &s->sr_enter);
+
+ REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C,
+ DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, &s->sr_exit);
+
+ REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C,
+ DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, &s->dram_clk_chanage);
+
+ s = &wm->sets[3];
+ s->wm_set = 3;
+ REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D,
+ DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, &s->data_urgent);
+
+ REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D,
+ DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, &s->sr_enter);
+
+ REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D,
+ DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, &s->sr_exit);
+
+ REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D,
+ DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, &s->dram_clk_chanage);
+}
+
+void hubbub21_apply_DEDCN21_147_wa(struct hubbub *hubbub)
+{
+ struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
+ uint32_t prog_wm_value;
+
+ prog_wm_value = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A);
+ REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
+}
+
+static const struct hubbub_funcs hubbub21_funcs = {
+ .update_dchub = hubbub2_update_dchub,
+ .init_dchub_sys_ctx = hubbub21_init_dchub,
+ .init_vm_ctx = hubbub2_init_vm_ctx,
+ .dcc_support_swizzle = hubbub2_dcc_support_swizzle,
+ .dcc_support_pixel_format = hubbub2_dcc_support_pixel_format,
+ .get_dcc_compression_cap = hubbub2_get_dcc_compression_cap,
+ .wm_read_state = hubbub21_wm_read_state,
+ .get_dchub_ref_freq = hubbub2_get_dchub_ref_freq,
+ .program_watermarks = hubbub21_program_watermarks,
+ .apply_DEDCN21_147_wa = hubbub21_apply_DEDCN21_147_wa,
+};
+
+void hubbub21_construct(struct dcn20_hubbub *hubbub,
+ struct dc_context *ctx,
+ const struct dcn_hubbub_registers *hubbub_regs,
+ const struct dcn_hubbub_shift *hubbub_shift,
+ const struct dcn_hubbub_mask *hubbub_mask)
+{
+ hubbub->base.ctx = ctx;
+
+ hubbub->base.funcs = &hubbub21_funcs;
+
+ hubbub->regs = hubbub_regs;
+ hubbub->shifts = hubbub_shift;
+ hubbub->masks = hubbub_mask;
+
+ hubbub->debug_test_index_pstate = 0xB;
+ hubbub->detile_buf_size = 164 * 1024; /* 164KB for DCN2.0 */
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h
new file mode 100644
index 000000000000..c4840dfb1fa5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h
@@ -0,0 +1,146 @@
+/*
+* Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef DAL_DC_DCN21_DCN21_HUBBUB_H_
+#define DAL_DC_DCN21_DCN21_HUBBUB_H_
+
+#include "dcn20/dcn20_hubbub.h"
+
+#define HUBBUB_HVM_REG_LIST() \
+ SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A),\
+ SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B),\
+ SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C),\
+ SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D),\
+ SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A),\
+ SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B),\
+ SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C),\
+ SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D),\
+ SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A),\
+ SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B),\
+ SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C),\
+ SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D),\
+ SR(DCHUBBUB_ARB_HOSTVM_CNTL), \
+ SR(DCHVM_CTRL0), \
+ SR(DCHVM_MEM_CTRL), \
+ SR(DCHVM_CLK_CTRL), \
+ SR(DCHVM_RIOMMU_CTRL0), \
+ SR(DCHVM_RIOMMU_STAT0)
+
+#define HUBBUB_REG_LIST_DCN21()\
+ HUBBUB_REG_LIST_DCN20_COMMON(), \
+ HUBBUB_SR_WATERMARK_REG_LIST(), \
+ HUBBUB_HVM_REG_LIST()
+
+#define HUBBUB_MASK_SH_LIST_HVM(mask_sh) \
+ HUBBUB_SF(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND_COMMIT_THRESHOLD, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_A, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_B, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_C, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_D, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_A, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_B, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_C, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_D, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_B, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_C, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_D, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_HOSTVM_CNTL, DCHUBBUB_ARB_MAX_QOS_COMMIT_THRESHOLD, mask_sh), \
+ HUBBUB_SF(DCHVM_CTRL0, HOSTVM_INIT_REQ, mask_sh), \
+ HUBBUB_SF(DCHVM_MEM_CTRL, HVM_GPUVMRET_PWR_REQ_DIS, mask_sh), \
+ HUBBUB_SF(DCHVM_MEM_CTRL, HVM_GPUVMRET_FORCE_REQ, mask_sh), \
+ HUBBUB_SF(DCHVM_MEM_CTRL, HVM_GPUVMRET_POWER_STATUS, mask_sh), \
+ HUBBUB_SF(DCHVM_CLK_CTRL, HVM_DISPCLK_R_GATE_DIS, mask_sh), \
+ HUBBUB_SF(DCHVM_CLK_CTRL, HVM_DISPCLK_G_GATE_DIS, mask_sh), \
+ HUBBUB_SF(DCHVM_CLK_CTRL, HVM_DCFCLK_R_GATE_DIS, mask_sh), \
+ HUBBUB_SF(DCHVM_CLK_CTRL, HVM_DCFCLK_G_GATE_DIS, mask_sh), \
+ HUBBUB_SF(DCHVM_CLK_CTRL, TR_REQ_REQCLKREQ_MODE, mask_sh), \
+ HUBBUB_SF(DCHVM_CLK_CTRL, TW_RSP_COMPCLKREQ_MODE, mask_sh), \
+ HUBBUB_SF(DCHVM_RIOMMU_CTRL0, HOSTVM_PREFETCH_REQ, mask_sh), \
+ HUBBUB_SF(DCHVM_RIOMMU_CTRL0, HOSTVM_POWERSTATUS, mask_sh), \
+ HUBBUB_SF(DCHVM_RIOMMU_STAT0, RIOMMU_ACTIVE, mask_sh), \
+ HUBBUB_SF(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, mask_sh)
+
+#define HUBBUB_MASK_SH_LIST_DCN21(mask_sh)\
+ HUBBUB_MASK_SH_LIST_HVM(mask_sh), \
+ HUBBUB_MASK_SH_LIST_DCN_COMMON(mask_sh), \
+ HUBBUB_MASK_SH_LIST_STUTTER(mask_sh), \
+ HUBBUB_SF(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \
+ HUBBUB_SF(DCN_VM_FB_LOCATION_BASE, FB_BASE, mask_sh), \
+ HUBBUB_SF(DCN_VM_FB_LOCATION_TOP, FB_TOP, mask_sh), \
+ HUBBUB_SF(DCN_VM_FB_OFFSET, FB_OFFSET, mask_sh), \
+ HUBBUB_SF(DCN_VM_AGP_BOT, AGP_BOT, mask_sh), \
+ HUBBUB_SF(DCN_VM_AGP_TOP, AGP_TOP, mask_sh), \
+ HUBBUB_SF(DCN_VM_AGP_BASE, AGP_BASE, mask_sh)
+
+void dcn21_dchvm_init(struct hubbub *hubbub);
+int hubbub21_init_dchub(struct hubbub *hubbub,
+ struct dcn_hubbub_phys_addr_config *pa_config);
+void hubbub21_program_watermarks(
+ struct hubbub *hubbub,
+ struct dcn_watermark_set *watermarks,
+ unsigned int refclk_mhz,
+ bool safe_to_lower);
+void hubbub21_program_urgent_watermarks(
+ struct hubbub *hubbub,
+ struct dcn_watermark_set *watermarks,
+ unsigned int refclk_mhz,
+ bool safe_to_lower);
+void hubbub21_program_stutter_watermarks(
+ struct hubbub *hubbub,
+ struct dcn_watermark_set *watermarks,
+ unsigned int refclk_mhz,
+ bool safe_to_lower);
+void hubbub21_program_pstate_watermarks(
+ struct hubbub *hubbub,
+ struct dcn_watermark_set *watermarks,
+ unsigned int refclk_mhz,
+ bool safe_to_lower);
+
+void hubbub21_wm_read_state(struct hubbub *hubbub,
+ struct dcn_hubbub_wm *wm);
+
+void hubbub21_construct(struct dcn20_hubbub *hubbub,
+ struct dc_context *ctx,
+ const struct dcn_hubbub_registers *hubbub_regs,
+ const struct dcn_hubbub_shift *hubbub_shift,
+ const struct dcn_hubbub_mask *hubbub_mask);
+
+#endif /* DAL_DC_DCN21_DCN21_HUBBUB_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
new file mode 100644
index 000000000000..cf09b9335728
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
@@ -0,0 +1,960 @@
+/*
+* Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dcn10/dcn10_hubp.h"
+#include "dcn21_hubp.h"
+
+#include "dm_services.h"
+#include "reg_helper.h"
+
+#include "dc_dmub_srv.h"
+
+#define DC_LOGGER_INIT(logger)
+
+#define REG(reg)\
+ hubp21->hubp_regs->reg
+
+#define CTX \
+ hubp21->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ hubp21->hubp_shift->field_name, hubp21->hubp_mask->field_name
+
+/*
+ * In DCN2.1, the non-double buffered version of the following 4 DLG registers are used in RTL.
+ * As a result, if S/W updates any of these registers during a mode change,
+ * the current frame before the mode change will use the new value right away
+ * and can lead to generating incorrect request deadlines and incorrect TTU/QoS behavior.
+ *
+ * REFCYC_PER_VM_GROUP_FLIP[22:0]
+ * REFCYC_PER_VM_GROUP_VBLANK[22:0]
+ * REFCYC_PER_VM_REQ_FLIP[22:0]
+ * REFCYC_PER_VM_REQ_VBLANK[22:0]
+ *
+ * REFCYC_PER_VM_*_FLIP affects the deadline of the VM requests generated
+ * when flipping to a new surface
+ *
+ * REFCYC_PER_VM_*_VBLANK affects the deadline of the VM requests generated
+ * during prefetch period of a frame. The prefetch starts at a pre-determined
+ * number of lines before the display active per frame
+ *
+ * DCN may underflow due to incorrectly programming these registers
+ * during VM stage of prefetch/iflip. First lines of display active
+ * or a sub-region of active using a new surface will be corrupted
+ * until the VM data returns at flip/mode change transitions
+ *
+ * Work around:
+ * workaround is always opt to use the more aggressive settings.
+ * On any mode switch, if the new reg values are smaller than the current values,
+ * then update the regs with the new values.
+ *
+ * Link to the ticket: http://ontrack-internal.amd.com/browse/DEDCN21-142
+ *
+ */
+void apply_DEDCN21_142_wa_for_hostvm_deadline(
+ struct hubp *hubp,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_attr)
+{
+ struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
+ uint32_t cur_value;
+
+ REG_GET(VBLANK_PARAMETERS_5, REFCYC_PER_VM_GROUP_VBLANK, &cur_value);
+ if (cur_value > dlg_attr->refcyc_per_vm_group_vblank)
+ REG_SET(VBLANK_PARAMETERS_5, 0,
+ REFCYC_PER_VM_GROUP_VBLANK, dlg_attr->refcyc_per_vm_group_vblank);
+
+ REG_GET(VBLANK_PARAMETERS_6,
+ REFCYC_PER_VM_REQ_VBLANK,
+ &cur_value);
+ if (cur_value > dlg_attr->refcyc_per_vm_req_vblank)
+ REG_SET(VBLANK_PARAMETERS_6, 0,
+ REFCYC_PER_VM_REQ_VBLANK, dlg_attr->refcyc_per_vm_req_vblank);
+
+ REG_GET(FLIP_PARAMETERS_3, REFCYC_PER_VM_GROUP_FLIP, &cur_value);
+ if (cur_value > dlg_attr->refcyc_per_vm_group_flip)
+ REG_SET(FLIP_PARAMETERS_3, 0,
+ REFCYC_PER_VM_GROUP_FLIP, dlg_attr->refcyc_per_vm_group_flip);
+
+ REG_GET(FLIP_PARAMETERS_4, REFCYC_PER_VM_REQ_FLIP, &cur_value);
+ if (cur_value > dlg_attr->refcyc_per_vm_req_flip)
+ REG_SET(FLIP_PARAMETERS_4, 0,
+ REFCYC_PER_VM_REQ_FLIP, dlg_attr->refcyc_per_vm_req_flip);
+
+ REG_SET(FLIP_PARAMETERS_5, 0,
+ REFCYC_PER_PTE_GROUP_FLIP_C, dlg_attr->refcyc_per_pte_group_flip_c);
+ REG_SET(FLIP_PARAMETERS_6, 0,
+ REFCYC_PER_META_CHUNK_FLIP_C, dlg_attr->refcyc_per_meta_chunk_flip_c);
+}
+
+void hubp21_program_deadline(
+ struct hubp *hubp,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
+ struct _vcs_dpi_display_ttu_regs_st *ttu_attr)
+{
+ hubp2_program_deadline(hubp, dlg_attr, ttu_attr);
+
+ apply_DEDCN21_142_wa_for_hostvm_deadline(hubp, dlg_attr);
+}
+
+void hubp21_program_requestor(
+ struct hubp *hubp,
+ struct _vcs_dpi_display_rq_regs_st *rq_regs)
+{
+ struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
+
+ REG_UPDATE(HUBPRET_CONTROL,
+ DET_BUF_PLANE1_BASE_ADDRESS, rq_regs->plane1_base_address);
+ REG_SET_4(DCN_EXPANSION_MODE, 0,
+ DRQ_EXPANSION_MODE, rq_regs->drq_expansion_mode,
+ PRQ_EXPANSION_MODE, rq_regs->prq_expansion_mode,
+ MRQ_EXPANSION_MODE, rq_regs->mrq_expansion_mode,
+ CRQ_EXPANSION_MODE, rq_regs->crq_expansion_mode);
+ REG_SET_8(DCHUBP_REQ_SIZE_CONFIG, 0,
+ CHUNK_SIZE, rq_regs->rq_regs_l.chunk_size,
+ MIN_CHUNK_SIZE, rq_regs->rq_regs_l.min_chunk_size,
+ META_CHUNK_SIZE, rq_regs->rq_regs_l.meta_chunk_size,
+ MIN_META_CHUNK_SIZE, rq_regs->rq_regs_l.min_meta_chunk_size,
+ DPTE_GROUP_SIZE, rq_regs->rq_regs_l.dpte_group_size,
+ VM_GROUP_SIZE, rq_regs->rq_regs_l.mpte_group_size,
+ SWATH_HEIGHT, rq_regs->rq_regs_l.swath_height,
+ PTE_ROW_HEIGHT_LINEAR, rq_regs->rq_regs_l.pte_row_height_linear);
+ REG_SET_7(DCHUBP_REQ_SIZE_CONFIG_C, 0,
+ CHUNK_SIZE_C, rq_regs->rq_regs_c.chunk_size,
+ MIN_CHUNK_SIZE_C, rq_regs->rq_regs_c.min_chunk_size,
+ META_CHUNK_SIZE_C, rq_regs->rq_regs_c.meta_chunk_size,
+ MIN_META_CHUNK_SIZE_C, rq_regs->rq_regs_c.min_meta_chunk_size,
+ DPTE_GROUP_SIZE_C, rq_regs->rq_regs_c.dpte_group_size,
+ SWATH_HEIGHT_C, rq_regs->rq_regs_c.swath_height,
+ PTE_ROW_HEIGHT_LINEAR_C, rq_regs->rq_regs_c.pte_row_height_linear);
+}
+
+static void hubp21_setup(
+ struct hubp *hubp,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
+ struct _vcs_dpi_display_ttu_regs_st *ttu_attr,
+ struct _vcs_dpi_display_rq_regs_st *rq_regs,
+ struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest)
+{
+ /* otg is locked when this func is called. Register are double buffered.
+ * disable the requestors is not needed
+ */
+
+ hubp2_vready_at_or_After_vsync(hubp, pipe_dest);
+ hubp21_program_requestor(hubp, rq_regs);
+ hubp21_program_deadline(hubp, dlg_attr, ttu_attr);
+
+}
+
+void hubp21_set_viewport(
+ struct hubp *hubp,
+ const struct rect *viewport,
+ const struct rect *viewport_c)
+{
+ struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
+
+ REG_SET_2(DCSURF_PRI_VIEWPORT_DIMENSION, 0,
+ PRI_VIEWPORT_WIDTH, viewport->width,
+ PRI_VIEWPORT_HEIGHT, viewport->height);
+
+ REG_SET_2(DCSURF_PRI_VIEWPORT_START, 0,
+ PRI_VIEWPORT_X_START, viewport->x,
+ PRI_VIEWPORT_Y_START, viewport->y);
+
+ /*for stereo*/
+ REG_SET_2(DCSURF_SEC_VIEWPORT_DIMENSION, 0,
+ SEC_VIEWPORT_WIDTH, viewport->width,
+ SEC_VIEWPORT_HEIGHT, viewport->height);
+
+ REG_SET_2(DCSURF_SEC_VIEWPORT_START, 0,
+ SEC_VIEWPORT_X_START, viewport->x,
+ SEC_VIEWPORT_Y_START, viewport->y);
+
+ /* DC supports NV12 only at the moment */
+ REG_SET_2(DCSURF_PRI_VIEWPORT_DIMENSION_C, 0,
+ PRI_VIEWPORT_WIDTH_C, viewport_c->width,
+ PRI_VIEWPORT_HEIGHT_C, viewport_c->height);
+
+ REG_SET_2(DCSURF_PRI_VIEWPORT_START_C, 0,
+ PRI_VIEWPORT_X_START_C, viewport_c->x,
+ PRI_VIEWPORT_Y_START_C, viewport_c->y);
+
+ REG_SET_2(DCSURF_SEC_VIEWPORT_DIMENSION_C, 0,
+ SEC_VIEWPORT_WIDTH_C, viewport_c->width,
+ SEC_VIEWPORT_HEIGHT_C, viewport_c->height);
+
+ REG_SET_2(DCSURF_SEC_VIEWPORT_START_C, 0,
+ SEC_VIEWPORT_X_START_C, viewport_c->x,
+ SEC_VIEWPORT_Y_START_C, viewport_c->y);
+}
+
+static void hubp21_apply_PLAT_54186_wa(
+ struct hubp *hubp,
+ const struct dc_plane_address *address)
+{
+ struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
+ struct dc_debug_options *debug = &hubp->ctx->dc->debug;
+ unsigned int chroma_bpe = 2;
+ unsigned int luma_addr_high_part = 0;
+ unsigned int row_height = 0;
+ unsigned int chroma_pitch = 0;
+ unsigned int viewport_c_height = 0;
+ unsigned int viewport_c_width = 0;
+ unsigned int patched_viewport_height = 0;
+ unsigned int patched_viewport_width = 0;
+ unsigned int rotation_angle = 0;
+ unsigned int pix_format = 0;
+ unsigned int h_mirror_en = 0;
+ unsigned int tile_blk_size = 64 * 1024; /* 64KB for 64KB SW, 4KB for 4KB SW */
+
+
+ if (!debug->nv12_iflip_vm_wa)
+ return;
+
+ REG_GET(DCHUBP_REQ_SIZE_CONFIG_C,
+ PTE_ROW_HEIGHT_LINEAR_C, &row_height);
+
+ REG_GET_2(DCSURF_PRI_VIEWPORT_DIMENSION_C,
+ PRI_VIEWPORT_WIDTH_C, &viewport_c_width,
+ PRI_VIEWPORT_HEIGHT_C, &viewport_c_height);
+
+ REG_GET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C,
+ PRIMARY_SURFACE_ADDRESS_HIGH_C, &luma_addr_high_part);
+
+ REG_GET(DCSURF_SURFACE_PITCH_C,
+ PITCH_C, &chroma_pitch);
+
+ chroma_pitch += 1;
+
+ REG_GET_3(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, &pix_format,
+ ROTATION_ANGLE, &rotation_angle,
+ H_MIRROR_EN, &h_mirror_en);
+
+ /* reset persistent cached data */
+ hubp21->PLAT_54186_wa_chroma_addr_offset = 0;
+ /* apply wa only for NV12 surface with scatter gather enabled with viewport > 512 along
+ * the vertical direction*/
+ if (address->type != PLN_ADDR_TYPE_VIDEO_PROGRESSIVE ||
+ address->video_progressive.luma_addr.high_part == 0xf4)
+ return;
+
+ if ((rotation_angle == ROTATION_ANGLE_0 || rotation_angle == ROTATION_ANGLE_180)
+ && viewport_c_height <= 512)
+ return;
+
+ if ((rotation_angle == ROTATION_ANGLE_90 || rotation_angle == ROTATION_ANGLE_270)
+ && viewport_c_width <= 512)
+ return;
+
+ switch (rotation_angle) {
+ case ROTATION_ANGLE_0: /* 0 degree rotation */
+ row_height = 128;
+ patched_viewport_height = (viewport_c_height / row_height + 1) * row_height + 1;
+ patched_viewport_width = viewport_c_width;
+ hubp21->PLAT_54186_wa_chroma_addr_offset = 0;
+ break;
+ case ROTATION_ANGLE_180: /* 180 degree rotation */
+ row_height = 128;
+ patched_viewport_height = viewport_c_height + row_height;
+ patched_viewport_width = viewport_c_width;
+ hubp21->PLAT_54186_wa_chroma_addr_offset = 0 - chroma_pitch * row_height * chroma_bpe;
+ break;
+ case ROTATION_ANGLE_90: /* 90 degree rotation */
+ row_height = 256;
+ if (h_mirror_en) {
+ patched_viewport_height = viewport_c_height;
+ patched_viewport_width = viewport_c_width + row_height;
+ hubp21->PLAT_54186_wa_chroma_addr_offset = 0;
+ } else {
+ patched_viewport_height = viewport_c_height;
+ patched_viewport_width = viewport_c_width + row_height;
+ hubp21->PLAT_54186_wa_chroma_addr_offset = 0 - tile_blk_size;
+ }
+ break;
+ case ROTATION_ANGLE_270: /* 270 degree rotation */
+ row_height = 256;
+ if (h_mirror_en) {
+ patched_viewport_height = viewport_c_height;
+ patched_viewport_width = viewport_c_width + row_height;
+ hubp21->PLAT_54186_wa_chroma_addr_offset = 0 - tile_blk_size;
+ } else {
+ patched_viewport_height = viewport_c_height;
+ patched_viewport_width = viewport_c_width + row_height;
+ hubp21->PLAT_54186_wa_chroma_addr_offset = 0;
+ }
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+
+ /* catch cases where viewport keep growing */
+ ASSERT(patched_viewport_height && patched_viewport_height < 5000);
+ ASSERT(patched_viewport_width && patched_viewport_width < 5000);
+
+ REG_UPDATE_2(DCSURF_PRI_VIEWPORT_DIMENSION_C,
+ PRI_VIEWPORT_WIDTH_C, patched_viewport_width,
+ PRI_VIEWPORT_HEIGHT_C, patched_viewport_height);
+}
+
+void hubp21_set_vm_system_aperture_settings(struct hubp *hubp,
+ struct vm_system_aperture_param *apt)
+{
+ struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
+
+ PHYSICAL_ADDRESS_LOC mc_vm_apt_default;
+ PHYSICAL_ADDRESS_LOC mc_vm_apt_low;
+ PHYSICAL_ADDRESS_LOC mc_vm_apt_high;
+
+ // The format of default addr is 48:12 of the 48 bit addr
+ mc_vm_apt_default.quad_part = apt->sys_default.quad_part >> 12;
+
+ // The format of high/low are 48:18 of the 48 bit addr
+ mc_vm_apt_low.quad_part = apt->sys_low.quad_part >> 18;
+ mc_vm_apt_high.quad_part = apt->sys_high.quad_part >> 18;
+
+ REG_SET(DCN_VM_SYSTEM_APERTURE_LOW_ADDR, 0,
+ MC_VM_SYSTEM_APERTURE_LOW_ADDR, mc_vm_apt_low.quad_part);
+
+ REG_SET(DCN_VM_SYSTEM_APERTURE_HIGH_ADDR, 0,
+ MC_VM_SYSTEM_APERTURE_HIGH_ADDR, mc_vm_apt_high.quad_part);
+
+ REG_SET_2(DCN_VM_MX_L1_TLB_CNTL, 0,
+ ENABLE_L1_TLB, 1,
+ SYSTEM_ACCESS_MODE, 0x3);
+}
+
+void hubp21_validate_dml_output(struct hubp *hubp,
+ struct dc_context *ctx,
+ struct _vcs_dpi_display_rq_regs_st *dml_rq_regs,
+ struct _vcs_dpi_display_dlg_regs_st *dml_dlg_attr,
+ struct _vcs_dpi_display_ttu_regs_st *dml_ttu_attr)
+{
+ struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
+ struct _vcs_dpi_display_rq_regs_st rq_regs = {0};
+ struct _vcs_dpi_display_dlg_regs_st dlg_attr = {0};
+ struct _vcs_dpi_display_ttu_regs_st ttu_attr = {0};
+ DC_LOGGER_INIT(ctx->logger);
+ DC_LOG_DEBUG("DML Validation | Running Validation");
+
+ /* Requester - Per hubp */
+ REG_GET(HUBPRET_CONTROL,
+ DET_BUF_PLANE1_BASE_ADDRESS, &rq_regs.plane1_base_address);
+ REG_GET_4(DCN_EXPANSION_MODE,
+ DRQ_EXPANSION_MODE, &rq_regs.drq_expansion_mode,
+ PRQ_EXPANSION_MODE, &rq_regs.prq_expansion_mode,
+ MRQ_EXPANSION_MODE, &rq_regs.mrq_expansion_mode,
+ CRQ_EXPANSION_MODE, &rq_regs.crq_expansion_mode);
+ REG_GET_8(DCHUBP_REQ_SIZE_CONFIG,
+ CHUNK_SIZE, &rq_regs.rq_regs_l.chunk_size,
+ MIN_CHUNK_SIZE, &rq_regs.rq_regs_l.min_chunk_size,
+ META_CHUNK_SIZE, &rq_regs.rq_regs_l.meta_chunk_size,
+ MIN_META_CHUNK_SIZE, &rq_regs.rq_regs_l.min_meta_chunk_size,
+ DPTE_GROUP_SIZE, &rq_regs.rq_regs_l.dpte_group_size,
+ VM_GROUP_SIZE, &rq_regs.rq_regs_l.mpte_group_size,
+ SWATH_HEIGHT, &rq_regs.rq_regs_l.swath_height,
+ PTE_ROW_HEIGHT_LINEAR, &rq_regs.rq_regs_l.pte_row_height_linear);
+ REG_GET_7(DCHUBP_REQ_SIZE_CONFIG_C,
+ CHUNK_SIZE_C, &rq_regs.rq_regs_c.chunk_size,
+ MIN_CHUNK_SIZE_C, &rq_regs.rq_regs_c.min_chunk_size,
+ META_CHUNK_SIZE_C, &rq_regs.rq_regs_c.meta_chunk_size,
+ MIN_META_CHUNK_SIZE_C, &rq_regs.rq_regs_c.min_meta_chunk_size,
+ DPTE_GROUP_SIZE_C, &rq_regs.rq_regs_c.dpte_group_size,
+ SWATH_HEIGHT_C, &rq_regs.rq_regs_c.swath_height,
+ PTE_ROW_HEIGHT_LINEAR_C, &rq_regs.rq_regs_c.pte_row_height_linear);
+
+ if (rq_regs.plane1_base_address != dml_rq_regs->plane1_base_address)
+ DC_LOG_DEBUG("DML Validation | HUBPRET_CONTROL:DET_BUF_PLANE1_BASE_ADDRESS - Expected: %u Actual: %u\n",
+ dml_rq_regs->plane1_base_address, rq_regs.plane1_base_address);
+ if (rq_regs.drq_expansion_mode != dml_rq_regs->drq_expansion_mode)
+ DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:DRQ_EXPANSION_MODE - Expected: %u Actual: %u\n",
+ dml_rq_regs->drq_expansion_mode, rq_regs.drq_expansion_mode);
+ if (rq_regs.prq_expansion_mode != dml_rq_regs->prq_expansion_mode)
+ DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:MRQ_EXPANSION_MODE - Expected: %u Actual: %u\n",
+ dml_rq_regs->prq_expansion_mode, rq_regs.prq_expansion_mode);
+ if (rq_regs.mrq_expansion_mode != dml_rq_regs->mrq_expansion_mode)
+ DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:DET_BUF_PLANE1_BASE_ADDRESS - Expected: %u Actual: %u\n",
+ dml_rq_regs->mrq_expansion_mode, rq_regs.mrq_expansion_mode);
+ if (rq_regs.crq_expansion_mode != dml_rq_regs->crq_expansion_mode)
+ DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:CRQ_EXPANSION_MODE - Expected: %u Actual: %u\n",
+ dml_rq_regs->crq_expansion_mode, rq_regs.crq_expansion_mode);
+
+ if (rq_regs.rq_regs_l.chunk_size != dml_rq_regs->rq_regs_l.chunk_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:CHUNK_SIZE - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_l.chunk_size, rq_regs.rq_regs_l.chunk_size);
+ if (rq_regs.rq_regs_l.min_chunk_size != dml_rq_regs->rq_regs_l.min_chunk_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:MIN_CHUNK_SIZE - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_l.min_chunk_size, rq_regs.rq_regs_l.min_chunk_size);
+ if (rq_regs.rq_regs_l.meta_chunk_size != dml_rq_regs->rq_regs_l.meta_chunk_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:META_CHUNK_SIZE - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_l.meta_chunk_size, rq_regs.rq_regs_l.meta_chunk_size);
+ if (rq_regs.rq_regs_l.min_meta_chunk_size != dml_rq_regs->rq_regs_l.min_meta_chunk_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:MIN_META_CHUNK_SIZE - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs.rq_regs_l.min_meta_chunk_size);
+ if (rq_regs.rq_regs_l.dpte_group_size != dml_rq_regs->rq_regs_l.dpte_group_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:DPTE_GROUP_SIZE - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_l.dpte_group_size, rq_regs.rq_regs_l.dpte_group_size);
+ if (rq_regs.rq_regs_l.mpte_group_size != dml_rq_regs->rq_regs_l.mpte_group_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:VM_GROUP_SIZE - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_l.mpte_group_size, rq_regs.rq_regs_l.mpte_group_size);
+ if (rq_regs.rq_regs_l.swath_height != dml_rq_regs->rq_regs_l.swath_height)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:SWATH_HEIGHT - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_l.swath_height, rq_regs.rq_regs_l.swath_height);
+ if (rq_regs.rq_regs_l.pte_row_height_linear != dml_rq_regs->rq_regs_l.pte_row_height_linear)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:PTE_ROW_HEIGHT_LINEAR - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_l.pte_row_height_linear, rq_regs.rq_regs_l.pte_row_height_linear);
+
+ if (rq_regs.rq_regs_c.chunk_size != dml_rq_regs->rq_regs_c.chunk_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:CHUNK_SIZE_C - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_c.chunk_size, rq_regs.rq_regs_c.chunk_size);
+ if (rq_regs.rq_regs_c.min_chunk_size != dml_rq_regs->rq_regs_c.min_chunk_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:MIN_CHUNK_SIZE_C - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_c.min_chunk_size, rq_regs.rq_regs_c.min_chunk_size);
+ if (rq_regs.rq_regs_c.meta_chunk_size != dml_rq_regs->rq_regs_c.meta_chunk_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:META_CHUNK_SIZE_C - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_c.meta_chunk_size, rq_regs.rq_regs_c.meta_chunk_size);
+ if (rq_regs.rq_regs_c.min_meta_chunk_size != dml_rq_regs->rq_regs_c.min_meta_chunk_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:MIN_META_CHUNK_SIZE_C - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_c.min_meta_chunk_size, rq_regs.rq_regs_c.min_meta_chunk_size);
+ if (rq_regs.rq_regs_c.dpte_group_size != dml_rq_regs->rq_regs_c.dpte_group_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:DPTE_GROUP_SIZE_C - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_c.dpte_group_size, rq_regs.rq_regs_c.dpte_group_size);
+ if (rq_regs.rq_regs_c.swath_height != dml_rq_regs->rq_regs_c.swath_height)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:SWATH_HEIGHT_C - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_c.swath_height, rq_regs.rq_regs_c.swath_height);
+ if (rq_regs.rq_regs_c.pte_row_height_linear != dml_rq_regs->rq_regs_c.pte_row_height_linear)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:PTE_ROW_HEIGHT_LINEAR_C - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_c.pte_row_height_linear, rq_regs.rq_regs_c.pte_row_height_linear);
+
+
+ /* DLG - Per hubp */
+ REG_GET_2(BLANK_OFFSET_0,
+ REFCYC_H_BLANK_END, &dlg_attr.refcyc_h_blank_end,
+ DLG_V_BLANK_END, &dlg_attr.dlg_vblank_end);
+ REG_GET(BLANK_OFFSET_1,
+ MIN_DST_Y_NEXT_START, &dlg_attr.min_dst_y_next_start);
+ REG_GET(DST_DIMENSIONS,
+ REFCYC_PER_HTOTAL, &dlg_attr.refcyc_per_htotal);
+ REG_GET_2(DST_AFTER_SCALER,
+ REFCYC_X_AFTER_SCALER, &dlg_attr.refcyc_x_after_scaler,
+ DST_Y_AFTER_SCALER, &dlg_attr.dst_y_after_scaler);
+ REG_GET(REF_FREQ_TO_PIX_FREQ,
+ REF_FREQ_TO_PIX_FREQ, &dlg_attr.ref_freq_to_pix_freq);
+
+ if (dlg_attr.refcyc_h_blank_end != dml_dlg_attr->refcyc_h_blank_end)
+ DC_LOG_DEBUG("DML Validation | BLANK_OFFSET_0:REFCYC_H_BLANK_END - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_h_blank_end, dlg_attr.refcyc_h_blank_end);
+ if (dlg_attr.dlg_vblank_end != dml_dlg_attr->dlg_vblank_end)
+ DC_LOG_DEBUG("DML Validation | BLANK_OFFSET_0:DLG_V_BLANK_END - Expected: %u Actual: %u\n",
+ dml_dlg_attr->dlg_vblank_end, dlg_attr.dlg_vblank_end);
+ if (dlg_attr.min_dst_y_next_start != dml_dlg_attr->min_dst_y_next_start)
+ DC_LOG_DEBUG("DML Validation | BLANK_OFFSET_1:MIN_DST_Y_NEXT_START - Expected: %u Actual: %u\n",
+ dml_dlg_attr->min_dst_y_next_start, dlg_attr.min_dst_y_next_start);
+ if (dlg_attr.refcyc_per_htotal != dml_dlg_attr->refcyc_per_htotal)
+ DC_LOG_DEBUG("DML Validation | DST_DIMENSIONS:REFCYC_PER_HTOTAL - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_htotal, dlg_attr.refcyc_per_htotal);
+ if (dlg_attr.refcyc_x_after_scaler != dml_dlg_attr->refcyc_x_after_scaler)
+ DC_LOG_DEBUG("DML Validation | DST_AFTER_SCALER:REFCYC_X_AFTER_SCALER - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_x_after_scaler, dlg_attr.refcyc_x_after_scaler);
+ if (dlg_attr.dst_y_after_scaler != dml_dlg_attr->dst_y_after_scaler)
+ DC_LOG_DEBUG("DML Validation | DST_AFTER_SCALER:DST_Y_AFTER_SCALER - Expected: %u Actual: %u\n",
+ dml_dlg_attr->dst_y_after_scaler, dlg_attr.dst_y_after_scaler);
+ if (dlg_attr.ref_freq_to_pix_freq != dml_dlg_attr->ref_freq_to_pix_freq)
+ DC_LOG_DEBUG("DML Validation | REF_FREQ_TO_PIX_FREQ:REF_FREQ_TO_PIX_FREQ - Expected: %u Actual: %u\n",
+ dml_dlg_attr->ref_freq_to_pix_freq, dlg_attr.ref_freq_to_pix_freq);
+
+ /* DLG - Per luma/chroma */
+ REG_GET(VBLANK_PARAMETERS_1,
+ REFCYC_PER_PTE_GROUP_VBLANK_L, &dlg_attr.refcyc_per_pte_group_vblank_l);
+ if (REG(NOM_PARAMETERS_0))
+ REG_GET(NOM_PARAMETERS_0,
+ DST_Y_PER_PTE_ROW_NOM_L, &dlg_attr.dst_y_per_pte_row_nom_l);
+ if (REG(NOM_PARAMETERS_1))
+ REG_GET(NOM_PARAMETERS_1,
+ REFCYC_PER_PTE_GROUP_NOM_L, &dlg_attr.refcyc_per_pte_group_nom_l);
+ REG_GET(NOM_PARAMETERS_4,
+ DST_Y_PER_META_ROW_NOM_L, &dlg_attr.dst_y_per_meta_row_nom_l);
+ REG_GET(NOM_PARAMETERS_5,
+ REFCYC_PER_META_CHUNK_NOM_L, &dlg_attr.refcyc_per_meta_chunk_nom_l);
+ REG_GET_2(PER_LINE_DELIVERY,
+ REFCYC_PER_LINE_DELIVERY_L, &dlg_attr.refcyc_per_line_delivery_l,
+ REFCYC_PER_LINE_DELIVERY_C, &dlg_attr.refcyc_per_line_delivery_c);
+ REG_GET_2(PER_LINE_DELIVERY_PRE,
+ REFCYC_PER_LINE_DELIVERY_PRE_L, &dlg_attr.refcyc_per_line_delivery_pre_l,
+ REFCYC_PER_LINE_DELIVERY_PRE_C, &dlg_attr.refcyc_per_line_delivery_pre_c);
+ REG_GET(VBLANK_PARAMETERS_2,
+ REFCYC_PER_PTE_GROUP_VBLANK_C, &dlg_attr.refcyc_per_pte_group_vblank_c);
+ if (REG(NOM_PARAMETERS_2))
+ REG_GET(NOM_PARAMETERS_2,
+ DST_Y_PER_PTE_ROW_NOM_C, &dlg_attr.dst_y_per_pte_row_nom_c);
+ if (REG(NOM_PARAMETERS_3))
+ REG_GET(NOM_PARAMETERS_3,
+ REFCYC_PER_PTE_GROUP_NOM_C, &dlg_attr.refcyc_per_pte_group_nom_c);
+ REG_GET(NOM_PARAMETERS_6,
+ DST_Y_PER_META_ROW_NOM_C, &dlg_attr.dst_y_per_meta_row_nom_c);
+ REG_GET(NOM_PARAMETERS_7,
+ REFCYC_PER_META_CHUNK_NOM_C, &dlg_attr.refcyc_per_meta_chunk_nom_c);
+ REG_GET(VBLANK_PARAMETERS_3,
+ REFCYC_PER_META_CHUNK_VBLANK_L, &dlg_attr.refcyc_per_meta_chunk_vblank_l);
+ REG_GET(VBLANK_PARAMETERS_4,
+ REFCYC_PER_META_CHUNK_VBLANK_C, &dlg_attr.refcyc_per_meta_chunk_vblank_c);
+
+ if (dlg_attr.refcyc_per_pte_group_vblank_l != dml_dlg_attr->refcyc_per_pte_group_vblank_l)
+ DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_1:REFCYC_PER_PTE_GROUP_VBLANK_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_pte_group_vblank_l, dlg_attr.refcyc_per_pte_group_vblank_l);
+ if (dlg_attr.dst_y_per_pte_row_nom_l != dml_dlg_attr->dst_y_per_pte_row_nom_l)
+ DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_0:DST_Y_PER_PTE_ROW_NOM_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->dst_y_per_pte_row_nom_l, dlg_attr.dst_y_per_pte_row_nom_l);
+ if (dlg_attr.refcyc_per_pte_group_nom_l != dml_dlg_attr->refcyc_per_pte_group_nom_l)
+ DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_1:REFCYC_PER_PTE_GROUP_NOM_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_pte_group_nom_l, dlg_attr.refcyc_per_pte_group_nom_l);
+ if (dlg_attr.dst_y_per_meta_row_nom_l != dml_dlg_attr->dst_y_per_meta_row_nom_l)
+ DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_4:DST_Y_PER_META_ROW_NOM_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->dst_y_per_meta_row_nom_l, dlg_attr.dst_y_per_meta_row_nom_l);
+ if (dlg_attr.refcyc_per_meta_chunk_nom_l != dml_dlg_attr->refcyc_per_meta_chunk_nom_l)
+ DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_5:REFCYC_PER_META_CHUNK_NOM_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_meta_chunk_nom_l, dlg_attr.refcyc_per_meta_chunk_nom_l);
+ if (dlg_attr.refcyc_per_line_delivery_l != dml_dlg_attr->refcyc_per_line_delivery_l)
+ DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY:REFCYC_PER_LINE_DELIVERY_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_line_delivery_l, dlg_attr.refcyc_per_line_delivery_l);
+ if (dlg_attr.refcyc_per_line_delivery_c != dml_dlg_attr->refcyc_per_line_delivery_c)
+ DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY:REFCYC_PER_LINE_DELIVERY_C - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_line_delivery_c, dlg_attr.refcyc_per_line_delivery_c);
+ if (dlg_attr.refcyc_per_pte_group_vblank_c != dml_dlg_attr->refcyc_per_pte_group_vblank_c)
+ DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_2:REFCYC_PER_PTE_GROUP_VBLANK_C - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_pte_group_vblank_c, dlg_attr.refcyc_per_pte_group_vblank_c);
+ if (dlg_attr.dst_y_per_pte_row_nom_c != dml_dlg_attr->dst_y_per_pte_row_nom_c)
+ DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_2:DST_Y_PER_PTE_ROW_NOM_C - Expected: %u Actual: %u\n",
+ dml_dlg_attr->dst_y_per_pte_row_nom_c, dlg_attr.dst_y_per_pte_row_nom_c);
+ if (dlg_attr.refcyc_per_pte_group_nom_c != dml_dlg_attr->refcyc_per_pte_group_nom_c)
+ DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_3:REFCYC_PER_PTE_GROUP_NOM_C - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_pte_group_nom_c, dlg_attr.refcyc_per_pte_group_nom_c);
+ if (dlg_attr.dst_y_per_meta_row_nom_c != dml_dlg_attr->dst_y_per_meta_row_nom_c)
+ DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_6:DST_Y_PER_META_ROW_NOM_C - Expected: %u Actual: %u\n",
+ dml_dlg_attr->dst_y_per_meta_row_nom_c, dlg_attr.dst_y_per_meta_row_nom_c);
+ if (dlg_attr.refcyc_per_meta_chunk_nom_c != dml_dlg_attr->refcyc_per_meta_chunk_nom_c)
+ DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_7:REFCYC_PER_META_CHUNK_NOM_C - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_meta_chunk_nom_c, dlg_attr.refcyc_per_meta_chunk_nom_c);
+ if (dlg_attr.refcyc_per_line_delivery_pre_l != dml_dlg_attr->refcyc_per_line_delivery_pre_l)
+ DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY_PRE:REFCYC_PER_LINE_DELIVERY_PRE_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_line_delivery_pre_l, dlg_attr.refcyc_per_line_delivery_pre_l);
+ if (dlg_attr.refcyc_per_line_delivery_pre_c != dml_dlg_attr->refcyc_per_line_delivery_pre_c)
+ DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY_PRE:REFCYC_PER_LINE_DELIVERY_PRE_C - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_line_delivery_pre_c, dlg_attr.refcyc_per_line_delivery_pre_c);
+ if (dlg_attr.refcyc_per_meta_chunk_vblank_l != dml_dlg_attr->refcyc_per_meta_chunk_vblank_l)
+ DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_3:REFCYC_PER_META_CHUNK_VBLANK_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_meta_chunk_vblank_l, dlg_attr.refcyc_per_meta_chunk_vblank_l);
+ if (dlg_attr.refcyc_per_meta_chunk_vblank_c != dml_dlg_attr->refcyc_per_meta_chunk_vblank_c)
+ DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_4:REFCYC_PER_META_CHUNK_VBLANK_C - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_meta_chunk_vblank_c, dlg_attr.refcyc_per_meta_chunk_vblank_c);
+
+ /* TTU - per hubp */
+ REG_GET_2(DCN_TTU_QOS_WM,
+ QoS_LEVEL_LOW_WM, &ttu_attr.qos_level_low_wm,
+ QoS_LEVEL_HIGH_WM, &ttu_attr.qos_level_high_wm);
+
+ if (ttu_attr.qos_level_low_wm != dml_ttu_attr->qos_level_low_wm)
+ DC_LOG_DEBUG("DML Validation | DCN_TTU_QOS_WM:QoS_LEVEL_LOW_WM - Expected: %u Actual: %u\n",
+ dml_ttu_attr->qos_level_low_wm, ttu_attr.qos_level_low_wm);
+ if (ttu_attr.qos_level_high_wm != dml_ttu_attr->qos_level_high_wm)
+ DC_LOG_DEBUG("DML Validation | DCN_TTU_QOS_WM:QoS_LEVEL_HIGH_WM - Expected: %u Actual: %u\n",
+ dml_ttu_attr->qos_level_high_wm, ttu_attr.qos_level_high_wm);
+
+ /* TTU - per luma/chroma */
+ /* Assumed surf0 is luma and 1 is chroma */
+ REG_GET_3(DCN_SURF0_TTU_CNTL0,
+ REFCYC_PER_REQ_DELIVERY, &ttu_attr.refcyc_per_req_delivery_l,
+ QoS_LEVEL_FIXED, &ttu_attr.qos_level_fixed_l,
+ QoS_RAMP_DISABLE, &ttu_attr.qos_ramp_disable_l);
+ REG_GET_3(DCN_SURF1_TTU_CNTL0,
+ REFCYC_PER_REQ_DELIVERY, &ttu_attr.refcyc_per_req_delivery_c,
+ QoS_LEVEL_FIXED, &ttu_attr.qos_level_fixed_c,
+ QoS_RAMP_DISABLE, &ttu_attr.qos_ramp_disable_c);
+ REG_GET_3(DCN_CUR0_TTU_CNTL0,
+ REFCYC_PER_REQ_DELIVERY, &ttu_attr.refcyc_per_req_delivery_cur0,
+ QoS_LEVEL_FIXED, &ttu_attr.qos_level_fixed_cur0,
+ QoS_RAMP_DISABLE, &ttu_attr.qos_ramp_disable_cur0);
+ REG_GET(FLIP_PARAMETERS_1,
+ REFCYC_PER_PTE_GROUP_FLIP_L, &dlg_attr.refcyc_per_pte_group_flip_l);
+ REG_GET(DCN_CUR0_TTU_CNTL1,
+ REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_cur0);
+ REG_GET(DCN_CUR1_TTU_CNTL1,
+ REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_cur1);
+ REG_GET(DCN_SURF0_TTU_CNTL1,
+ REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_l);
+ REG_GET(DCN_SURF1_TTU_CNTL1,
+ REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_c);
+
+ if (ttu_attr.refcyc_per_req_delivery_l != dml_ttu_attr->refcyc_per_req_delivery_l)
+ DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL0:REFCYC_PER_REQ_DELIVERY - Expected: %u Actual: %u\n",
+ dml_ttu_attr->refcyc_per_req_delivery_l, ttu_attr.refcyc_per_req_delivery_l);
+ if (ttu_attr.qos_level_fixed_l != dml_ttu_attr->qos_level_fixed_l)
+ DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL0:QoS_LEVEL_FIXED - Expected: %u Actual: %u\n",
+ dml_ttu_attr->qos_level_fixed_l, ttu_attr.qos_level_fixed_l);
+ if (ttu_attr.qos_ramp_disable_l != dml_ttu_attr->qos_ramp_disable_l)
+ DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL0:QoS_RAMP_DISABLE - Expected: %u Actual: %u\n",
+ dml_ttu_attr->qos_ramp_disable_l, ttu_attr.qos_ramp_disable_l);
+ if (ttu_attr.refcyc_per_req_delivery_c != dml_ttu_attr->refcyc_per_req_delivery_c)
+ DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL0:REFCYC_PER_REQ_DELIVERY - Expected: %u Actual: %u\n",
+ dml_ttu_attr->refcyc_per_req_delivery_c, ttu_attr.refcyc_per_req_delivery_c);
+ if (ttu_attr.qos_level_fixed_c != dml_ttu_attr->qos_level_fixed_c)
+ DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL0:QoS_LEVEL_FIXED - Expected: %u Actual: %u\n",
+ dml_ttu_attr->qos_level_fixed_c, ttu_attr.qos_level_fixed_c);
+ if (ttu_attr.qos_ramp_disable_c != dml_ttu_attr->qos_ramp_disable_c)
+ DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL0:QoS_RAMP_DISABLE - Expected: %u Actual: %u\n",
+ dml_ttu_attr->qos_ramp_disable_c, ttu_attr.qos_ramp_disable_c);
+ if (ttu_attr.refcyc_per_req_delivery_cur0 != dml_ttu_attr->refcyc_per_req_delivery_cur0)
+ DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL0:REFCYC_PER_REQ_DELIVERY - Expected: %u Actual: %u\n",
+ dml_ttu_attr->refcyc_per_req_delivery_cur0, ttu_attr.refcyc_per_req_delivery_cur0);
+ if (ttu_attr.qos_level_fixed_cur0 != dml_ttu_attr->qos_level_fixed_cur0)
+ DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL0:QoS_LEVEL_FIXED - Expected: %u Actual: %u\n",
+ dml_ttu_attr->qos_level_fixed_cur0, ttu_attr.qos_level_fixed_cur0);
+ if (ttu_attr.qos_ramp_disable_cur0 != dml_ttu_attr->qos_ramp_disable_cur0)
+ DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL0:QoS_RAMP_DISABLE - Expected: %u Actual: %u\n",
+ dml_ttu_attr->qos_ramp_disable_cur0, ttu_attr.qos_ramp_disable_cur0);
+ if (dlg_attr.refcyc_per_pte_group_flip_l != dml_dlg_attr->refcyc_per_pte_group_flip_l)
+ DC_LOG_DEBUG("DML Validation | FLIP_PARAMETERS_1:REFCYC_PER_PTE_GROUP_FLIP_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_pte_group_flip_l, dlg_attr.refcyc_per_pte_group_flip_l);
+ if (ttu_attr.refcyc_per_req_delivery_pre_cur0 != dml_ttu_attr->refcyc_per_req_delivery_pre_cur0)
+ DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u Actual: %u\n",
+ dml_ttu_attr->refcyc_per_req_delivery_pre_cur0, ttu_attr.refcyc_per_req_delivery_pre_cur0);
+ if (ttu_attr.refcyc_per_req_delivery_pre_cur1 != dml_ttu_attr->refcyc_per_req_delivery_pre_cur1)
+ DC_LOG_DEBUG("DML Validation | DCN_CUR1_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u Actual: %u\n",
+ dml_ttu_attr->refcyc_per_req_delivery_pre_cur1, ttu_attr.refcyc_per_req_delivery_pre_cur1);
+ if (ttu_attr.refcyc_per_req_delivery_pre_l != dml_ttu_attr->refcyc_per_req_delivery_pre_l)
+ DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u Actual: %u\n",
+ dml_ttu_attr->refcyc_per_req_delivery_pre_l, ttu_attr.refcyc_per_req_delivery_pre_l);
+ if (ttu_attr.refcyc_per_req_delivery_pre_c != dml_ttu_attr->refcyc_per_req_delivery_pre_c)
+ DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u Actual: %u\n",
+ dml_ttu_attr->refcyc_per_req_delivery_pre_c, ttu_attr.refcyc_per_req_delivery_pre_c);
+
+ /* Host VM deadline regs */
+ REG_GET(VBLANK_PARAMETERS_5,
+ REFCYC_PER_VM_GROUP_VBLANK, &dlg_attr.refcyc_per_vm_group_vblank);
+ REG_GET(VBLANK_PARAMETERS_6,
+ REFCYC_PER_VM_REQ_VBLANK, &dlg_attr.refcyc_per_vm_req_vblank);
+ REG_GET(FLIP_PARAMETERS_3,
+ REFCYC_PER_VM_GROUP_FLIP, &dlg_attr.refcyc_per_vm_group_flip);
+ REG_GET(FLIP_PARAMETERS_4,
+ REFCYC_PER_VM_REQ_FLIP, &dlg_attr.refcyc_per_vm_req_flip);
+ REG_GET(FLIP_PARAMETERS_5,
+ REFCYC_PER_PTE_GROUP_FLIP_C, &dlg_attr.refcyc_per_pte_group_flip_c);
+ REG_GET(FLIP_PARAMETERS_6,
+ REFCYC_PER_META_CHUNK_FLIP_C, &dlg_attr.refcyc_per_meta_chunk_flip_c);
+ REG_GET(FLIP_PARAMETERS_2,
+ REFCYC_PER_META_CHUNK_FLIP_L, &dlg_attr.refcyc_per_meta_chunk_flip_l);
+
+ if (dlg_attr.refcyc_per_vm_group_vblank != dml_dlg_attr->refcyc_per_vm_group_vblank)
+ DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_5:REFCYC_PER_VM_GROUP_VBLANK - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_vm_group_vblank, dlg_attr.refcyc_per_vm_group_vblank);
+ if (dlg_attr.refcyc_per_vm_req_vblank != dml_dlg_attr->refcyc_per_vm_req_vblank)
+ DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_6:REFCYC_PER_VM_REQ_VBLANK - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_vm_req_vblank, dlg_attr.refcyc_per_vm_req_vblank);
+ if (dlg_attr.refcyc_per_vm_group_flip != dml_dlg_attr->refcyc_per_vm_group_flip)
+ DC_LOG_DEBUG("DML Validation | FLIP_PARAMETERS_3:REFCYC_PER_VM_GROUP_FLIP - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_vm_group_flip, dlg_attr.refcyc_per_vm_group_flip);
+ if (dlg_attr.refcyc_per_vm_req_flip != dml_dlg_attr->refcyc_per_vm_req_flip)
+ DC_LOG_DEBUG("DML Validation | FLIP_PARAMETERS_4:REFCYC_PER_VM_REQ_FLIP - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_vm_req_flip, dlg_attr.refcyc_per_vm_req_flip);
+ if (dlg_attr.refcyc_per_pte_group_flip_c != dml_dlg_attr->refcyc_per_pte_group_flip_c)
+ DC_LOG_DEBUG("DML Validation | FLIP_PARAMETERS_5:REFCYC_PER_PTE_GROUP_FLIP_C - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_pte_group_flip_c, dlg_attr.refcyc_per_pte_group_flip_c);
+ if (dlg_attr.refcyc_per_meta_chunk_flip_c != dml_dlg_attr->refcyc_per_meta_chunk_flip_c)
+ DC_LOG_DEBUG("DML Validation | FLIP_PARAMETERS_6:REFCYC_PER_META_CHUNK_FLIP_C - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_meta_chunk_flip_c, dlg_attr.refcyc_per_meta_chunk_flip_c);
+ if (dlg_attr.refcyc_per_meta_chunk_flip_l != dml_dlg_attr->refcyc_per_meta_chunk_flip_l)
+ DC_LOG_DEBUG("DML Validation | FLIP_PARAMETERS_2:REFCYC_PER_META_CHUNK_FLIP_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_meta_chunk_flip_l, dlg_attr.refcyc_per_meta_chunk_flip_l);
+}
+
+static void program_surface_flip_and_addr(struct hubp *hubp, struct surface_flip_registers *flip_regs)
+{
+ struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
+
+ REG_UPDATE_3(DCSURF_FLIP_CONTROL,
+ SURFACE_FLIP_TYPE, flip_regs->immediate,
+ SURFACE_FLIP_MODE_FOR_STEREOSYNC, flip_regs->grph_stereo,
+ SURFACE_FLIP_IN_STEREOSYNC, flip_regs->grph_stereo);
+
+ REG_UPDATE(VMID_SETTINGS_0,
+ VMID, flip_regs->vmid);
+
+ REG_UPDATE_8(DCSURF_SURFACE_CONTROL,
+ PRIMARY_SURFACE_TMZ, flip_regs->tmz_surface,
+ PRIMARY_SURFACE_TMZ_C, flip_regs->tmz_surface,
+ PRIMARY_META_SURFACE_TMZ, flip_regs->tmz_surface,
+ PRIMARY_META_SURFACE_TMZ_C, flip_regs->tmz_surface,
+ SECONDARY_SURFACE_TMZ, flip_regs->tmz_surface,
+ SECONDARY_SURFACE_TMZ_C, flip_regs->tmz_surface,
+ SECONDARY_META_SURFACE_TMZ, flip_regs->tmz_surface,
+ SECONDARY_META_SURFACE_TMZ_C, flip_regs->tmz_surface);
+
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C, 0,
+ PRIMARY_META_SURFACE_ADDRESS_HIGH_C,
+ flip_regs->DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C);
+
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_C, 0,
+ PRIMARY_META_SURFACE_ADDRESS_C,
+ flip_regs->DCSURF_PRIMARY_META_SURFACE_ADDRESS_C);
+
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH, 0,
+ PRIMARY_META_SURFACE_ADDRESS_HIGH,
+ flip_regs->DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH);
+
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS, 0,
+ PRIMARY_META_SURFACE_ADDRESS,
+ flip_regs->DCSURF_PRIMARY_META_SURFACE_ADDRESS);
+
+ REG_SET(DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH, 0,
+ SECONDARY_META_SURFACE_ADDRESS_HIGH,
+ flip_regs->DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH);
+
+ REG_SET(DCSURF_SECONDARY_META_SURFACE_ADDRESS, 0,
+ SECONDARY_META_SURFACE_ADDRESS,
+ flip_regs->DCSURF_SECONDARY_META_SURFACE_ADDRESS);
+
+
+ REG_SET(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH, 0,
+ SECONDARY_SURFACE_ADDRESS_HIGH,
+ flip_regs->DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH);
+
+ REG_SET(DCSURF_SECONDARY_SURFACE_ADDRESS, 0,
+ SECONDARY_SURFACE_ADDRESS,
+ flip_regs->DCSURF_SECONDARY_SURFACE_ADDRESS);
+
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, 0,
+ PRIMARY_SURFACE_ADDRESS_HIGH_C,
+ flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C);
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_C, 0,
+ PRIMARY_SURFACE_ADDRESS_C,
+ flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_C);
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, 0,
+ PRIMARY_SURFACE_ADDRESS_HIGH,
+ flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH);
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS, 0,
+ PRIMARY_SURFACE_ADDRESS,
+ flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS);
+}
+
+void dmcub_PLAT_54186_wa(struct hubp *hubp, struct surface_flip_registers *flip_regs)
+{
+ struct dc_dmub_srv *dmcub = hubp->ctx->dmub_srv;
+ struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
+ struct dmub_rb_cmd_PLAT_54186_wa PLAT_54186_wa = { 0 };
+
+ PLAT_54186_wa.header.type = DMUB_CMD__PLAT_54186_WA;
+ PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS = flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS;
+ PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS_C = flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_C;
+ PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH = flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH;
+ PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C = flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C;
+ PLAT_54186_wa.flip.flip_params.grph_stereo = flip_regs->grph_stereo;
+ PLAT_54186_wa.flip.flip_params.hubp_inst = hubp->inst;
+ PLAT_54186_wa.flip.flip_params.immediate = flip_regs->immediate;
+ PLAT_54186_wa.flip.flip_params.tmz_surface = flip_regs->tmz_surface;
+ PLAT_54186_wa.flip.flip_params.vmid = flip_regs->vmid;
+
+ PERF_TRACE(); // TODO: remove after performance is stable.
+ dc_dmub_srv_cmd_queue(dmcub, &PLAT_54186_wa.header);
+ PERF_TRACE(); // TODO: remove after performance is stable.
+ dc_dmub_srv_cmd_execute(dmcub);
+ PERF_TRACE(); // TODO: remove after performance is stable.
+ dc_dmub_srv_wait_idle(dmcub);
+ PERF_TRACE(); // TODO: remove after performance is stable.
+}
+
+bool hubp21_program_surface_flip_and_addr(
+ struct hubp *hubp,
+ const struct dc_plane_address *address,
+ bool flip_immediate)
+{
+ struct dc_debug_options *debug = &hubp->ctx->dc->debug;
+ struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
+ struct surface_flip_registers flip_regs = { 0 };
+
+ flip_regs.vmid = address->vmid;
+
+ switch (address->type) {
+ case PLN_ADDR_TYPE_GRAPHICS:
+ if (address->grph.addr.quad_part == 0) {
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+
+ if (address->grph.meta_addr.quad_part != 0) {
+ flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS =
+ address->grph.meta_addr.low_part;
+ flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH =
+ address->grph.meta_addr.high_part;
+ }
+
+ flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS =
+ address->grph.addr.low_part;
+ flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH =
+ address->grph.addr.high_part;
+ break;
+ case PLN_ADDR_TYPE_VIDEO_PROGRESSIVE:
+ if (address->video_progressive.luma_addr.quad_part == 0
+ || address->video_progressive.chroma_addr.quad_part == 0)
+ break;
+
+ if (address->video_progressive.luma_meta_addr.quad_part != 0) {
+ flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS =
+ address->video_progressive.luma_meta_addr.low_part;
+ flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH =
+ address->video_progressive.luma_meta_addr.high_part;
+
+ flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS_C =
+ address->video_progressive.chroma_meta_addr.low_part;
+ flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C =
+ address->video_progressive.chroma_meta_addr.high_part;
+ }
+
+ flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS =
+ address->video_progressive.luma_addr.low_part;
+ flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH =
+ address->video_progressive.luma_addr.high_part;
+
+ if (debug->nv12_iflip_vm_wa) {
+ flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS_C =
+ address->video_progressive.chroma_addr.low_part + hubp21->PLAT_54186_wa_chroma_addr_offset;
+ } else
+ flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS_C =
+ address->video_progressive.chroma_addr.low_part;
+
+ flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C =
+ address->video_progressive.chroma_addr.high_part;
+
+ break;
+ case PLN_ADDR_TYPE_GRPH_STEREO:
+ if (address->grph_stereo.left_addr.quad_part == 0)
+ break;
+ if (address->grph_stereo.right_addr.quad_part == 0)
+ break;
+
+ flip_regs.grph_stereo = true;
+
+ if (address->grph_stereo.right_meta_addr.quad_part != 0) {
+ flip_regs.DCSURF_SECONDARY_META_SURFACE_ADDRESS =
+ address->grph_stereo.right_meta_addr.low_part;
+ flip_regs.DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH =
+ address->grph_stereo.right_meta_addr.high_part;
+ }
+
+ if (address->grph_stereo.left_meta_addr.quad_part != 0) {
+ flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS =
+ address->grph_stereo.left_meta_addr.low_part;
+ flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH =
+ address->grph_stereo.left_meta_addr.high_part;
+ }
+
+ flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS =
+ address->grph_stereo.left_addr.low_part;
+ flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH =
+ address->grph_stereo.left_addr.high_part;
+
+ flip_regs.DCSURF_SECONDARY_SURFACE_ADDRESS =
+ address->grph_stereo.right_addr.low_part;
+ flip_regs.DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH =
+ address->grph_stereo.right_addr.high_part;
+
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+
+ flip_regs.tmz_surface = address->tmz_surface;
+ flip_regs.immediate = flip_immediate;
+
+ if (hubp->ctx->dc->debug.enable_dmcub_surface_flip && address->type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
+ dmcub_PLAT_54186_wa(hubp, &flip_regs);
+ else
+ program_surface_flip_and_addr(hubp, &flip_regs);
+
+ hubp->request_address = *address;
+
+ return true;
+}
+
+void hubp21_init(struct hubp *hubp)
+{
+ // DEDCN21-133: Inconsistent row starting line for flip between DPTE and Meta
+ // This is a chicken bit to enable the ECO fix.
+
+ struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
+ //hubp[i].HUBPREQ_DEBUG.HUBPREQ_DEBUG[26] = 1;
+ REG_WRITE(HUBPREQ_DEBUG, 1 << 26);
+}
+static struct hubp_funcs dcn21_hubp_funcs = {
+ .hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,
+ .hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled,
+ .hubp_program_surface_flip_and_addr = hubp21_program_surface_flip_and_addr,
+ .hubp_program_surface_config = hubp1_program_surface_config,
+ .hubp_is_flip_pending = hubp1_is_flip_pending,
+ .hubp_setup = hubp21_setup,
+ .hubp_setup_interdependent = hubp2_setup_interdependent,
+ .hubp_set_vm_system_aperture_settings = hubp21_set_vm_system_aperture_settings,
+ .set_blank = hubp1_set_blank,
+ .dcc_control = hubp1_dcc_control,
+ .mem_program_viewport = hubp21_set_viewport,
+ .apply_PLAT_54186_wa = hubp21_apply_PLAT_54186_wa,
+ .set_cursor_attributes = hubp2_cursor_set_attributes,
+ .set_cursor_position = hubp1_cursor_set_position,
+ .hubp_clk_cntl = hubp1_clk_cntl,
+ .hubp_vtg_sel = hubp1_vtg_sel,
+ .dmdata_set_attributes = hubp2_dmdata_set_attributes,
+ .dmdata_load = hubp2_dmdata_load,
+ .dmdata_status_done = hubp2_dmdata_status_done,
+ .hubp_read_state = hubp1_read_state,
+ .hubp_clear_underflow = hubp1_clear_underflow,
+ .hubp_set_flip_control_surface_gsl = hubp2_set_flip_control_surface_gsl,
+ .hubp_init = hubp21_init,
+ .validate_dml_output = hubp21_validate_dml_output,
+};
+
+bool hubp21_construct(
+ struct dcn21_hubp *hubp21,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn_hubp2_registers *hubp_regs,
+ const struct dcn_hubp2_shift *hubp_shift,
+ const struct dcn_hubp2_mask *hubp_mask)
+{
+ hubp21->base.funcs = &dcn21_hubp_funcs;
+ hubp21->base.ctx = ctx;
+ hubp21->hubp_regs = hubp_regs;
+ hubp21->hubp_shift = hubp_shift;
+ hubp21->hubp_mask = hubp_mask;
+ hubp21->base.inst = inst;
+ hubp21->base.opp_id = OPP_ID_INVALID;
+ hubp21->base.mpcc_id = 0xf;
+
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.h
new file mode 100644
index 000000000000..9873b6cbc5ba
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.h
@@ -0,0 +1,134 @@
+/*
+* Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DAL_DC_DCN21_DCN21_HUBP_H_
+#define DAL_DC_DCN21_DCN21_HUBP_H_
+
+#include "../dcn20/dcn20_hubp.h"
+#include "../dcn10/dcn10_hubp.h"
+
+#define TO_DCN21_HUBP(hubp)\
+ container_of(hubp, struct dcn21_hubp, base)
+
+#define HUBP_REG_LIST_DCN21(id)\
+ HUBP_REG_LIST_DCN2_COMMON(id),\
+ SRI(FLIP_PARAMETERS_3, HUBPREQ, id),\
+ SRI(FLIP_PARAMETERS_4, HUBPREQ, id),\
+ SRI(FLIP_PARAMETERS_5, HUBPREQ, id),\
+ SRI(FLIP_PARAMETERS_6, HUBPREQ, id),\
+ SRI(VBLANK_PARAMETERS_5, HUBPREQ, id),\
+ SRI(VBLANK_PARAMETERS_6, HUBPREQ, id)
+
+#define HUBP_MASK_SH_LIST_DCN21_COMMON(mask_sh)\
+ HUBP_MASK_SH_LIST_DCN_SHARE_COMMON(mask_sh),\
+ HUBP_MASK_SH_LIST_DCN_VM(mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, ROTATION_ANGLE, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, H_MIRROR_EN, mask_sh),\
+ HUBP_SF(HUBPREQ0_PREFETCH_SETTINGS, DST_Y_PREFETCH, mask_sh),\
+ HUBP_SF(HUBPREQ0_PREFETCH_SETTINGS, VRATIO_PREFETCH, mask_sh),\
+ HUBP_SF(HUBPREQ0_PREFETCH_SETTINGS_C, VRATIO_PREFETCH_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_VM_SYSTEM_APERTURE_LOW_ADDR, MC_VM_SYSTEM_APERTURE_LOW_ADDR, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR, MC_VM_SYSTEM_APERTURE_HIGH_ADDR, mask_sh),\
+ HUBP_SF(HUBPREQ0_CURSOR_SETTINGS, CURSOR0_DST_Y_OFFSET, mask_sh), \
+ HUBP_SF(HUBPREQ0_CURSOR_SETTINGS, CURSOR0_CHUNK_HDL_ADJUST, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH, CURSOR_SURFACE_ADDRESS_HIGH, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_SURFACE_ADDRESS, CURSOR_SURFACE_ADDRESS, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_SIZE, CURSOR_WIDTH, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_SIZE, CURSOR_HEIGHT, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_MODE, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_2X_MAGNIFY, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_PITCH, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_LINES_PER_CHUNK, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_ENABLE, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_POSITION, CURSOR_X_POSITION, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_POSITION, CURSOR_Y_POSITION, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_X, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_Y, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_DST_OFFSET, CURSOR_DST_X_OFFSET, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_ADDRESS_HIGH, DMDATA_ADDRESS_HIGH, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_CNTL, DMDATA_MODE, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_CNTL, DMDATA_UPDATED, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_CNTL, DMDATA_REPEAT, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_CNTL, DMDATA_SIZE, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_SW_CNTL, DMDATA_SW_UPDATED, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_SW_CNTL, DMDATA_SW_REPEAT, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_SW_CNTL, DMDATA_SW_SIZE, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_QOS_CNTL, DMDATA_QOS_MODE, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_QOS_CNTL, DMDATA_QOS_LEVEL, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_QOS_CNTL, DMDATA_DL_DELTA, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_STATUS, DMDATA_DONE, mask_sh),\
+ HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_0, DST_Y_PER_VM_FLIP, mask_sh),\
+ HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_0, DST_Y_PER_ROW_FLIP, mask_sh),\
+ HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_1, REFCYC_PER_PTE_GROUP_FLIP_L, mask_sh),\
+ HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_2, REFCYC_PER_META_CHUNK_FLIP_L, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_VREADY_AT_OR_AFTER_VSYNC, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_DISABLE_STOP_DATA_DURING_VM, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, HUBPREQ_MASTER_UPDATE_LOCK_STATUS, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL2, SURFACE_GSL_ENABLE, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL2, SURFACE_TRIPLE_BUFFER_ENABLE, mask_sh),\
+ HUBP_SF(HUBPREQ0_VMID_SETTINGS_0, VMID, mask_sh),\
+ HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_3, REFCYC_PER_VM_GROUP_FLIP, mask_sh),\
+ HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_4, REFCYC_PER_VM_REQ_FLIP, mask_sh),\
+ HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_5, REFCYC_PER_PTE_GROUP_FLIP_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_6, REFCYC_PER_META_CHUNK_FLIP_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_5, REFCYC_PER_VM_GROUP_VBLANK, mask_sh),\
+ HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_6, REFCYC_PER_VM_REQ_VBLANK, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, VM_GROUP_SIZE, mask_sh)
+
+#define HUBP_MASK_SH_LIST_DCN21(mask_sh)\
+ HUBP_MASK_SH_LIST_DCN21_COMMON(mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_TILING_CONFIG, RB_ALIGNED, mask_sh)
+
+
+struct dcn21_hubp {
+ struct hubp base;
+ struct dcn_hubp_state state;
+ const struct dcn_hubp2_registers *hubp_regs;
+ const struct dcn_hubp2_shift *hubp_shift;
+ const struct dcn_hubp2_mask *hubp_mask;
+ int PLAT_54186_wa_chroma_addr_offset;
+};
+
+bool hubp21_construct(
+ struct dcn21_hubp *hubp21,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn_hubp2_registers *hubp_regs,
+ const struct dcn_hubp2_shift *hubp_shift,
+ const struct dcn_hubp2_mask *hubp_mask);
+
+void apply_DEDCN21_142_wa_for_hostvm_deadline(
+ struct hubp *hubp,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_attr);
+
+void hubp21_program_deadline(
+ struct hubp *hubp,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
+ struct _vcs_dpi_display_ttu_regs_st *ttu_attr);
+
+void hubp21_program_requestor(
+ struct hubp *hubp,
+ struct _vcs_dpi_display_rq_regs_st *rq_regs);
+#endif /* DAL_DC_DCN21_DCN21_HUBP_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
new file mode 100644
index 000000000000..081ad8e43d58
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
@@ -0,0 +1,114 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dm_helpers.h"
+#include "core_types.h"
+#include "resource.h"
+#include "dce/dce_hwseq.h"
+#include "dcn21_hwseq.h"
+#include "vmid.h"
+#include "reg_helper.h"
+#include "hw/clk_mgr.h"
+
+
+#define DC_LOGGER_INIT(logger)
+
+#define CTX \
+ hws->ctx
+#define REG(reg)\
+ hws->regs->reg
+
+#undef FN
+#define FN(reg_name, field_name) \
+ hws->shifts->field_name, hws->masks->field_name
+
+/* Temporary read settings, future will get values from kmd directly */
+static void mmhub_update_page_table_config(struct dcn_hubbub_phys_addr_config *config,
+ struct dce_hwseq *hws)
+{
+ uint32_t page_table_base_hi;
+ uint32_t page_table_base_lo;
+
+ REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
+ PAGE_DIRECTORY_ENTRY_HI32, &page_table_base_hi);
+ REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
+ PAGE_DIRECTORY_ENTRY_LO32, &page_table_base_lo);
+
+ config->gart_config.page_table_base_addr = ((uint64_t)page_table_base_hi << 32) | page_table_base_lo;
+
+}
+
+int dcn21_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config)
+{
+ struct dcn_hubbub_phys_addr_config config;
+
+ config.system_aperture.fb_top = pa_config->system_aperture.fb_top;
+ config.system_aperture.fb_offset = pa_config->system_aperture.fb_offset;
+ config.system_aperture.fb_base = pa_config->system_aperture.fb_base;
+ config.system_aperture.agp_top = pa_config->system_aperture.agp_top;
+ config.system_aperture.agp_bot = pa_config->system_aperture.agp_bot;
+ config.system_aperture.agp_base = pa_config->system_aperture.agp_base;
+ config.gart_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr;
+ config.gart_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr;
+ config.gart_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr;
+
+ mmhub_update_page_table_config(&config, hws);
+
+ return dc->res_pool->hubbub->funcs->init_dchub_sys_ctx(dc->res_pool->hubbub, &config);
+}
+
+// work around for Renoir s0i3, if register is programmed, bypass golden init.
+
+bool dcn21_s0i3_golden_init_wa(struct dc *dc)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ uint32_t value = 0;
+
+ value = REG_READ(MICROSECOND_TIME_BASE_DIV);
+
+ return value != 0x00120464;
+}
+
+void dcn21_exit_optimized_pwr_state(
+ const struct dc *dc,
+ struct dc_state *context)
+{
+ dc->clk_mgr->funcs->update_clocks(
+ dc->clk_mgr,
+ context,
+ false);
+}
+
+void dcn21_optimize_pwr_state(
+ const struct dc *dc,
+ struct dc_state *context)
+{
+ dc->clk_mgr->funcs->update_clocks(
+ dc->clk_mgr,
+ context,
+ true);
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h
new file mode 100644
index 000000000000..182736096123
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h
@@ -0,0 +1,47 @@
+/*
+* Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_HWSS_DCN21_H__
+#define __DC_HWSS_DCN21_H__
+
+#include "hw_sequencer_private.h"
+
+struct dc;
+
+int dcn21_init_sys_ctx(struct dce_hwseq *hws,
+ struct dc *dc,
+ struct dc_phy_addr_space_config *pa_config);
+
+bool dcn21_s0i3_golden_init_wa(struct dc *dc);
+
+void dcn21_exit_optimized_pwr_state(
+ const struct dc *dc,
+ struct dc_state *context);
+
+void dcn21_optimize_pwr_state(
+ const struct dc *dc,
+ struct dc_state *context);
+
+#endif /* __DC_HWSS_DCN21_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
new file mode 100644
index 000000000000..4861aa5c59ae
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
@@ -0,0 +1,142 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dce110/dce110_hw_sequencer.h"
+#include "dcn10/dcn10_hw_sequencer.h"
+#include "dcn20/dcn20_hwseq.h"
+#include "dcn21_hwseq.h"
+
+static const struct hw_sequencer_funcs dcn21_funcs = {
+ .program_gamut_remap = dcn10_program_gamut_remap,
+ .init_hw = dcn10_init_hw,
+ .apply_ctx_to_hw = dce110_apply_ctx_to_hw,
+ .apply_ctx_for_surface = NULL,
+ .program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
+ .update_plane_addr = dcn20_update_plane_addr,
+ .update_dchub = dcn10_update_dchub,
+ .update_pending_status = dcn10_update_pending_status,
+ .program_output_csc = dcn20_program_output_csc,
+ .enable_accelerated_mode = dce110_enable_accelerated_mode,
+ .enable_timing_synchronization = dcn10_enable_timing_synchronization,
+ .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset,
+ .update_info_frame = dce110_update_info_frame,
+ .send_immediate_sdp_message = dcn10_send_immediate_sdp_message,
+ .enable_stream = dcn20_enable_stream,
+ .disable_stream = dce110_disable_stream,
+ .unblank_stream = dcn20_unblank_stream,
+ .blank_stream = dce110_blank_stream,
+ .enable_audio_stream = dce110_enable_audio_stream,
+ .disable_audio_stream = dce110_disable_audio_stream,
+ .disable_plane = dcn20_disable_plane,
+ .pipe_control_lock = dcn20_pipe_control_lock,
+ .pipe_control_lock_global = dcn20_pipe_control_lock_global,
+ .prepare_bandwidth = dcn20_prepare_bandwidth,
+ .optimize_bandwidth = dcn20_optimize_bandwidth,
+ .update_bandwidth = dcn20_update_bandwidth,
+ .set_drr = dcn10_set_drr,
+ .get_position = dcn10_get_position,
+ .set_static_screen_control = dcn10_set_static_screen_control,
+ .setup_stereo = dcn10_setup_stereo,
+ .set_avmute = dce110_set_avmute,
+ .log_hw_state = dcn10_log_hw_state,
+ .get_hw_state = dcn10_get_hw_state,
+ .clear_status_bits = dcn10_clear_status_bits,
+ .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
+ .edp_power_control = dce110_edp_power_control,
+ .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
+ .set_cursor_position = dcn10_set_cursor_position,
+ .set_cursor_attribute = dcn10_set_cursor_attribute,
+ .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
+ .setup_periodic_interrupt = dcn10_setup_periodic_interrupt,
+ .set_clock = dcn10_set_clock,
+ .get_clock = dcn10_get_clock,
+ .program_triplebuffer = dcn20_program_triple_buffer,
+ .enable_writeback = dcn20_enable_writeback,
+ .disable_writeback = dcn20_disable_writeback,
+ .dmdata_status_done = dcn20_dmdata_status_done,
+ .program_dmdata_engine = dcn20_program_dmdata_engine,
+ .set_dmdata_attributes = dcn20_set_dmdata_attributes,
+ .init_sys_ctx = dcn21_init_sys_ctx,
+ .init_vm_ctx = dcn20_init_vm_ctx,
+ .set_flip_control_gsl = dcn20_set_flip_control_gsl,
+ .optimize_pwr_state = dcn21_optimize_pwr_state,
+ .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
+ .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
+ .set_cursor_position = dcn10_set_cursor_position,
+ .set_cursor_attribute = dcn10_set_cursor_attribute,
+ .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
+ .optimize_pwr_state = dcn21_optimize_pwr_state,
+ .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
+};
+
+static const struct hwseq_private_funcs dcn21_private_funcs = {
+ .init_pipes = dcn10_init_pipes,
+ .update_plane_addr = dcn20_update_plane_addr,
+ .plane_atomic_disconnect = dcn10_plane_atomic_disconnect,
+ .update_mpcc = dcn20_update_mpcc,
+ .set_input_transfer_func = dcn20_set_input_transfer_func,
+ .set_output_transfer_func = dcn20_set_output_transfer_func,
+ .power_down = dce110_power_down,
+ .enable_display_power_gating = dcn10_dummy_display_power_gating,
+ .blank_pixel_data = dcn20_blank_pixel_data,
+ .reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap,
+ .enable_stream_timing = dcn20_enable_stream_timing,
+ .edp_backlight_control = dce110_edp_backlight_control,
+ .disable_stream_gating = dcn20_disable_stream_gating,
+ .enable_stream_gating = dcn20_enable_stream_gating,
+ .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt,
+ .did_underflow_occur = dcn10_did_underflow_occur,
+ .init_blank = dcn20_init_blank,
+ .disable_vga = dcn20_disable_vga,
+ .bios_golden_init = dcn10_bios_golden_init,
+ .plane_atomic_disable = dcn20_plane_atomic_disable,
+ .plane_atomic_power_down = dcn10_plane_atomic_power_down,
+ .enable_power_gating_plane = dcn20_enable_power_gating_plane,
+ .dpp_pg_control = dcn20_dpp_pg_control,
+ .hubp_pg_control = dcn20_hubp_pg_control,
+ .dsc_pg_control = NULL,
+ .update_odm = dcn20_update_odm,
+ .dsc_pg_control = dcn20_dsc_pg_control,
+ .get_surface_visual_confirm_color = dcn10_get_surface_visual_confirm_color,
+ .get_hdr_visual_confirm_color = dcn10_get_hdr_visual_confirm_color,
+ .set_hdr_multiplier = dcn10_set_hdr_multiplier,
+ .verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high,
+ .s0i3_golden_init_wa = dcn21_s0i3_golden_init_wa,
+ .wait_for_blank_complete = dcn20_wait_for_blank_complete,
+ .dccg_init = dcn20_dccg_init,
+ .set_blend_lut = dcn20_set_blend_lut,
+ .set_shaper_3dlut = dcn20_set_shaper_3dlut,
+};
+
+void dcn21_hw_sequencer_construct(struct dc *dc)
+{
+ dc->hwss = dcn21_funcs;
+ dc->hwseq->funcs = dcn21_private_funcs;
+
+ if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+ dc->hwss.init_hw = dcn20_fpga_init_hw;
+ dc->hwseq->funcs.init_pipes = NULL;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.h
new file mode 100644
index 000000000000..3ed24292648a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_DCN21_INIT_H__
+#define __DC_DCN21_INIT_H__
+
+struct dc;
+
+void dcn21_hw_sequencer_construct(struct dc *dc);
+
+#endif /* __DC_DCN20_INIT_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c
new file mode 100644
index 000000000000..e45683ac871a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c
@@ -0,0 +1,468 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "reg_helper.h"
+
+#include <linux/delay.h>
+#include "core_types.h"
+#include "link_encoder.h"
+#include "dcn21_link_encoder.h"
+#include "stream_encoder.h"
+
+#include "i2caux_interface.h"
+#include "dc_bios_types.h"
+
+#include "gpio_service_interface.h"
+
+#define CTX \
+ enc10->base.ctx
+#define DC_LOGGER \
+ enc10->base.ctx->logger
+
+#define REG(reg)\
+ (enc10->link_regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ enc10->link_shift->field_name, enc10->link_mask->field_name
+
+#define IND_REG(index) \
+ (enc10->link_regs->index)
+
+static struct mpll_cfg dcn21_mpll_cfg_ref[] = {
+ // RBR
+ {
+ .hdmimode_enable = 0,
+ .ref_range = 1,
+ .ref_clk_mpllb_div = 1,
+ .mpllb_ssc_en = 1,
+ .mpllb_div5_clk_en = 1,
+ .mpllb_multiplier = 238,
+ .mpllb_fracn_en = 0,
+ .mpllb_fracn_quot = 0,
+ .mpllb_fracn_rem = 0,
+ .mpllb_fracn_den = 1,
+ .mpllb_ssc_up_spread = 0,
+ .mpllb_ssc_peak = 44237,
+ .mpllb_ssc_stepsize = 59454,
+ .mpllb_div_clk_en = 0,
+ .mpllb_div_multiplier = 0,
+ .mpllb_hdmi_div = 0,
+ .mpllb_tx_clk_div = 2,
+ .tx_vboost_lvl = 5,
+ .mpllb_pmix_en = 1,
+ .mpllb_word_div2_en = 0,
+ .mpllb_ana_v2i = 2,
+ .mpllb_ana_freq_vco = 2,
+ .mpllb_ana_cp_int = 9,
+ .mpllb_ana_cp_prop = 15,
+ .hdmi_pixel_clk_div = 0,
+ },
+ // HBR
+ {
+ .hdmimode_enable = 0,
+ .ref_range = 1,
+ .ref_clk_mpllb_div = 1,
+ .mpllb_ssc_en = 1,
+ .mpllb_div5_clk_en = 1,
+ .mpllb_multiplier = 192,
+ .mpllb_fracn_en = 1,
+ .mpllb_fracn_quot = 32768,
+ .mpllb_fracn_rem = 0,
+ .mpllb_fracn_den = 1,
+ .mpllb_ssc_up_spread = 0,
+ .mpllb_ssc_peak = 36864,
+ .mpllb_ssc_stepsize = 49545,
+ .mpllb_div_clk_en = 0,
+ .mpllb_div_multiplier = 0,
+ .mpllb_hdmi_div = 0,
+ .mpllb_tx_clk_div = 1,
+ .tx_vboost_lvl = 5,
+ .mpllb_pmix_en = 1,
+ .mpllb_word_div2_en = 0,
+ .mpllb_ana_v2i = 2,
+ .mpllb_ana_freq_vco = 3,
+ .mpllb_ana_cp_int = 9,
+ .mpllb_ana_cp_prop = 15,
+ .hdmi_pixel_clk_div = 0,
+ },
+ //HBR2
+ {
+ .hdmimode_enable = 0,
+ .ref_range = 1,
+ .ref_clk_mpllb_div = 1,
+ .mpllb_ssc_en = 1,
+ .mpllb_div5_clk_en = 1,
+ .mpllb_multiplier = 192,
+ .mpllb_fracn_en = 1,
+ .mpllb_fracn_quot = 32768,
+ .mpllb_fracn_rem = 0,
+ .mpllb_fracn_den = 1,
+ .mpllb_ssc_up_spread = 0,
+ .mpllb_ssc_peak = 36864,
+ .mpllb_ssc_stepsize = 49545,
+ .mpllb_div_clk_en = 0,
+ .mpllb_div_multiplier = 0,
+ .mpllb_hdmi_div = 0,
+ .mpllb_tx_clk_div = 0,
+ .tx_vboost_lvl = 5,
+ .mpllb_pmix_en = 1,
+ .mpllb_word_div2_en = 0,
+ .mpllb_ana_v2i = 2,
+ .mpllb_ana_freq_vco = 3,
+ .mpllb_ana_cp_int = 9,
+ .mpllb_ana_cp_prop = 15,
+ .hdmi_pixel_clk_div = 0,
+ },
+ //HBR3
+ {
+ .hdmimode_enable = 0,
+ .ref_range = 1,
+ .ref_clk_mpllb_div = 1,
+ .mpllb_ssc_en = 1,
+ .mpllb_div5_clk_en = 1,
+ .mpllb_multiplier = 304,
+ .mpllb_fracn_en = 1,
+ .mpllb_fracn_quot = 49152,
+ .mpllb_fracn_rem = 0,
+ .mpllb_fracn_den = 1,
+ .mpllb_ssc_up_spread = 0,
+ .mpllb_ssc_peak = 55296,
+ .mpllb_ssc_stepsize = 74318,
+ .mpllb_div_clk_en = 0,
+ .mpllb_div_multiplier = 0,
+ .mpllb_hdmi_div = 0,
+ .mpllb_tx_clk_div = 0,
+ .tx_vboost_lvl = 5,
+ .mpllb_pmix_en = 1,
+ .mpllb_word_div2_en = 0,
+ .mpllb_ana_v2i = 2,
+ .mpllb_ana_freq_vco = 1,
+ .mpllb_ana_cp_int = 7,
+ .mpllb_ana_cp_prop = 16,
+ .hdmi_pixel_clk_div = 0,
+ },
+};
+
+
+static bool update_cfg_data(
+ struct dcn10_link_encoder *enc10,
+ const struct dc_link_settings *link_settings,
+ struct dpcssys_phy_seq_cfg *cfg)
+{
+ int i;
+
+ cfg->load_sram_fw = false;
+ cfg->use_calibration_setting = true;
+
+ //TODO: need to implement a proper lane mapping for Renoir.
+ for (i = 0; i < 4; i++)
+ cfg->lane_en[i] = true;
+
+ switch (link_settings->link_rate) {
+ case LINK_RATE_LOW:
+ cfg->mpll_cfg = dcn21_mpll_cfg_ref[0];
+ break;
+ case LINK_RATE_HIGH:
+ cfg->mpll_cfg = dcn21_mpll_cfg_ref[1];
+ break;
+ case LINK_RATE_HIGH2:
+ cfg->mpll_cfg = dcn21_mpll_cfg_ref[2];
+ break;
+ case LINK_RATE_HIGH3:
+ cfg->mpll_cfg = dcn21_mpll_cfg_ref[3];
+ break;
+ default:
+ DC_LOG_ERROR("%s: No supported link rate found %X!\n",
+ __func__, link_settings->link_rate);
+ return false;
+ }
+
+ return true;
+}
+
+void dcn21_link_encoder_get_max_link_cap(struct link_encoder *enc,
+ struct dc_link_settings *link_settings)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+ uint32_t value;
+
+ REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &value);
+
+ if (!value && link_settings->lane_count > LANE_COUNT_TWO)
+ link_settings->lane_count = LANE_COUNT_TWO;
+}
+
+bool dcn21_link_encoder_is_in_alt_mode(struct link_encoder *enc)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+ uint32_t value;
+
+ REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &value);
+
+ // if value == 1 alt mode is disabled, otherwise it is enabled
+ return !value;
+}
+
+bool dcn21_link_encoder_acquire_phy(struct link_encoder *enc)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+ int value;
+
+ if (enc->features.flags.bits.DP_IS_USB_C) {
+ REG_GET(RDPCSTX_PHY_CNTL6,
+ RDPCS_PHY_DPALT_DISABLE, &value);
+
+ if (value == 1) {
+ ASSERT(0);
+ return false;
+ }
+ REG_UPDATE(RDPCSTX_PHY_CNTL6,
+ RDPCS_PHY_DPALT_DISABLE_ACK, 0);
+
+ udelay(40);
+
+ REG_GET(RDPCSTX_PHY_CNTL6,
+ RDPCS_PHY_DPALT_DISABLE, &value);
+ if (value == 1) {
+ ASSERT(0);
+ REG_UPDATE(RDPCSTX_PHY_CNTL6,
+ RDPCS_PHY_DPALT_DISABLE_ACK, 1);
+ return false;
+ }
+ }
+
+ REG_UPDATE(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_REF_CLK_EN, 1);
+
+ return true;
+}
+
+
+
+static void dcn21_link_encoder_release_phy(struct link_encoder *enc)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+
+ if (enc->features.flags.bits.DP_IS_USB_C) {
+ REG_UPDATE(RDPCSTX_PHY_CNTL6,
+ RDPCS_PHY_DPALT_DISABLE_ACK, 1);
+ }
+
+ REG_UPDATE(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_REF_CLK_EN, 0);
+
+}
+
+void dcn21_link_encoder_enable_dp_output(
+ struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum clock_source_id clock_source)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+ struct dcn21_link_encoder *enc21 = (struct dcn21_link_encoder *) enc10;
+ struct dpcssys_phy_seq_cfg *cfg = &enc21->phy_seq_cfg;
+
+ if (!dcn21_link_encoder_acquire_phy(enc))
+ return;
+
+ if (!enc->ctx->dc->debug.avoid_vbios_exec_table) {
+ dcn10_link_encoder_enable_dp_output(enc, link_settings, clock_source);
+ return;
+ }
+
+ if (!update_cfg_data(enc10, link_settings, cfg))
+ return;
+
+ enc1_configure_encoder(enc10, link_settings);
+
+ dcn10_link_encoder_setup(enc, SIGNAL_TYPE_DISPLAY_PORT);
+
+}
+
+void dcn21_link_encoder_enable_dp_mst_output(
+ struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum clock_source_id clock_source)
+{
+ if (!dcn21_link_encoder_acquire_phy(enc))
+ return;
+
+ dcn10_link_encoder_enable_dp_mst_output(enc, link_settings, clock_source);
+}
+
+void dcn21_link_encoder_disable_output(
+ struct link_encoder *enc,
+ enum signal_type signal)
+{
+ dcn10_link_encoder_disable_output(enc, signal);
+
+ if (dc_is_dp_signal(signal))
+ dcn21_link_encoder_release_phy(enc);
+}
+
+
+static const struct link_encoder_funcs dcn21_link_enc_funcs = {
+ .read_state = link_enc2_read_state,
+ .validate_output_with_stream =
+ dcn10_link_encoder_validate_output_with_stream,
+ .hw_init = enc2_hw_init,
+ .setup = dcn10_link_encoder_setup,
+ .enable_tmds_output = dcn10_link_encoder_enable_tmds_output,
+ .enable_dp_output = dcn21_link_encoder_enable_dp_output,
+ .enable_dp_mst_output = dcn21_link_encoder_enable_dp_mst_output,
+ .disable_output = dcn21_link_encoder_disable_output,
+ .dp_set_lane_settings = dcn10_link_encoder_dp_set_lane_settings,
+ .dp_set_phy_pattern = dcn10_link_encoder_dp_set_phy_pattern,
+ .update_mst_stream_allocation_table =
+ dcn10_link_encoder_update_mst_stream_allocation_table,
+ .psr_program_dp_dphy_fast_training =
+ dcn10_psr_program_dp_dphy_fast_training,
+ .psr_program_secondary_packet = dcn10_psr_program_secondary_packet,
+ .connect_dig_be_to_fe = dcn10_link_encoder_connect_dig_be_to_fe,
+ .enable_hpd = dcn10_link_encoder_enable_hpd,
+ .disable_hpd = dcn10_link_encoder_disable_hpd,
+ .is_dig_enabled = dcn10_is_dig_enabled,
+ .destroy = dcn10_link_encoder_destroy,
+ .fec_set_enable = enc2_fec_set_enable,
+ .fec_set_ready = enc2_fec_set_ready,
+ .fec_is_active = enc2_fec_is_active,
+ .get_dig_frontend = dcn10_get_dig_frontend,
+ .is_in_alt_mode = dcn21_link_encoder_is_in_alt_mode,
+ .get_max_link_cap = dcn21_link_encoder_get_max_link_cap,
+};
+
+void dcn21_link_encoder_construct(
+ struct dcn21_link_encoder *enc21,
+ const struct encoder_init_data *init_data,
+ const struct encoder_feature_support *enc_features,
+ const struct dcn10_link_enc_registers *link_regs,
+ const struct dcn10_link_enc_aux_registers *aux_regs,
+ const struct dcn10_link_enc_hpd_registers *hpd_regs,
+ const struct dcn10_link_enc_shift *link_shift,
+ const struct dcn10_link_enc_mask *link_mask)
+{
+ struct bp_encoder_cap_info bp_cap_info = {0};
+ const struct dc_vbios_funcs *bp_funcs = init_data->ctx->dc_bios->funcs;
+ enum bp_result result = BP_RESULT_OK;
+ struct dcn10_link_encoder *enc10 = &enc21->enc10;
+
+ enc10->base.funcs = &dcn21_link_enc_funcs;
+ enc10->base.ctx = init_data->ctx;
+ enc10->base.id = init_data->encoder;
+
+ enc10->base.hpd_source = init_data->hpd_source;
+ enc10->base.connector = init_data->connector;
+
+ enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
+
+ enc10->base.features = *enc_features;
+
+ enc10->base.transmitter = init_data->transmitter;
+
+ /* set the flag to indicate whether driver poll the I2C data pin
+ * while doing the DP sink detect
+ */
+
+/* if (dal_adapter_service_is_feature_supported(as,
+ FEATURE_DP_SINK_DETECT_POLL_DATA_PIN))
+ enc10->base.features.flags.bits.
+ DP_SINK_DETECT_POLL_DATA_PIN = true;*/
+
+ enc10->base.output_signals =
+ SIGNAL_TYPE_DVI_SINGLE_LINK |
+ SIGNAL_TYPE_DVI_DUAL_LINK |
+ SIGNAL_TYPE_LVDS |
+ SIGNAL_TYPE_DISPLAY_PORT |
+ SIGNAL_TYPE_DISPLAY_PORT_MST |
+ SIGNAL_TYPE_EDP |
+ SIGNAL_TYPE_HDMI_TYPE_A;
+
+ /* For DCE 8.0 and 8.1, by design, UNIPHY is hardwired to DIG_BE.
+ * SW always assign DIG_FE 1:1 mapped to DIG_FE for non-MST UNIPHY.
+ * SW assign DIG_FE to non-MST UNIPHY first and MST last. So prefer
+ * DIG is per UNIPHY and used by SST DP, eDP, HDMI, DVI and LVDS.
+ * Prefer DIG assignment is decided by board design.
+ * For DCE 8.0, there are only max 6 UNIPHYs, we assume board design
+ * and VBIOS will filter out 7 UNIPHY for DCE 8.0.
+ * By this, adding DIGG should not hurt DCE 8.0.
+ * This will let DCE 8.1 share DCE 8.0 as much as possible
+ */
+
+ enc10->link_regs = link_regs;
+ enc10->aux_regs = aux_regs;
+ enc10->hpd_regs = hpd_regs;
+ enc10->link_shift = link_shift;
+ enc10->link_mask = link_mask;
+
+ switch (enc10->base.transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ enc10->base.preferred_engine = ENGINE_ID_DIGA;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ enc10->base.preferred_engine = ENGINE_ID_DIGB;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ enc10->base.preferred_engine = ENGINE_ID_DIGC;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ enc10->base.preferred_engine = ENGINE_ID_DIGD;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ enc10->base.preferred_engine = ENGINE_ID_DIGE;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ enc10->base.preferred_engine = ENGINE_ID_DIGF;
+ break;
+ case TRANSMITTER_UNIPHY_G:
+ enc10->base.preferred_engine = ENGINE_ID_DIGG;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
+ }
+
+ /* default to one to mirror Windows behavior */
+ enc10->base.features.flags.bits.HDMI_6GB_EN = 1;
+
+ result = bp_funcs->get_encoder_cap_info(enc10->base.ctx->dc_bios,
+ enc10->base.id, &bp_cap_info);
+
+ /* Override features with DCE-specific values */
+ if (result == BP_RESULT_OK) {
+ enc10->base.features.flags.bits.IS_HBR2_CAPABLE =
+ bp_cap_info.DP_HBR2_EN;
+ enc10->base.features.flags.bits.IS_HBR3_CAPABLE =
+ bp_cap_info.DP_HBR3_EN;
+ enc10->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
+ enc10->base.features.flags.bits.DP_IS_USB_C =
+ bp_cap_info.DP_IS_USB_C;
+ } else {
+ DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
+ __func__,
+ result);
+ }
+ if (enc10->base.ctx->dc->debug.hdmi20_disable) {
+ enc10->base.features.flags.bits.HDMI_6GB_EN = 0;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.h
new file mode 100644
index 000000000000..033d5d76f195
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_LINK_ENCODER__DCN21_H__
+#define __DC_LINK_ENCODER__DCN21_H__
+
+#include "dcn20/dcn20_link_encoder.h"
+
+struct dcn21_link_encoder {
+ struct dcn10_link_encoder enc10;
+ struct dpcssys_phy_seq_cfg phy_seq_cfg;
+};
+
+#define DPCS_DCN21_MASK_SH_LIST(mask_sh)\
+ DPCS_DCN2_MASK_SH_LIST(mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_TX_VBOOST_LVL, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE2, RDPCS_PHY_DP_MPLLB_CP_PROP_GS, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_RX_VREF_CTRL, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_MPLLB_CP_INT_GS, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG, RDPCS_DMCU_DPALT_DIS_BLOCK_REG, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL15, RDPCS_PHY_SUP_PRE_HP, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL15, RDPCS_PHY_DP_TX0_VREGDRV_BYP, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL15, RDPCS_PHY_DP_TX1_VREGDRV_BYP, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL15, RDPCS_PHY_DP_TX2_VREGDRV_BYP, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL15, RDPCS_PHY_DP_TX3_VREGDRV_BYP, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_TX0_EQ_MAIN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_TX0_EQ_PRE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_TX0_EQ_POST, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_TX1_EQ_MAIN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_TX1_EQ_PRE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_TX1_EQ_POST, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE2, RDPCS_PHY_DP_TX2_EQ_MAIN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE2, RDPCS_PHY_DP_TX2_EQ_PRE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE2, RDPCS_PHY_DP_TX2_EQ_POST, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DP_TX3_EQ_MAIN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DCO_FINETUNE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DCO_RANGE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DP_TX3_EQ_PRE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DP_TX3_EQ_POST, mask_sh),\
+ LE_SF(DCIO_SOFT_RESET, UNIPHYA_SOFT_RESET, mask_sh),\
+ LE_SF(DCIO_SOFT_RESET, UNIPHYB_SOFT_RESET, mask_sh),\
+ LE_SF(DCIO_SOFT_RESET, UNIPHYC_SOFT_RESET, mask_sh),\
+ LE_SF(DCIO_SOFT_RESET, UNIPHYD_SOFT_RESET, mask_sh),\
+ LE_SF(DCIO_SOFT_RESET, UNIPHYE_SOFT_RESET, mask_sh)
+
+#define DPCS_DCN21_REG_LIST(id) \
+ DPCS_DCN2_REG_LIST(id),\
+ SRI(RDPCSTX_PHY_CNTL15, RDPCSTX, id),\
+ SRI(RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG, RDPCSTX, id)
+
+#define LINK_ENCODER_MASK_SH_LIST_DCN21(mask_sh)\
+ LINK_ENCODER_MASK_SH_LIST_DCN20(mask_sh),\
+ LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL0_XBAR_SOURCE, mask_sh),\
+ LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL1_XBAR_SOURCE, mask_sh),\
+ LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL2_XBAR_SOURCE, mask_sh),\
+ LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL3_XBAR_SOURCE, mask_sh), \
+ SRI(RDPCSTX_PHY_FUSE2, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_FUSE3, RDPCSTX, id), \
+ SR(RDPCSTX0_RDPCSTX_SCRATCH)
+
+void dcn21_link_encoder_enable_dp_output(
+ struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum clock_source_id clock_source);
+
+void dcn21_link_encoder_construct(
+ struct dcn21_link_encoder *enc21,
+ const struct encoder_init_data *init_data,
+ const struct encoder_feature_support *enc_features,
+ const struct dcn10_link_enc_registers *link_regs,
+ const struct dcn10_link_enc_aux_registers *aux_regs,
+ const struct dcn10_link_enc_hpd_registers *hpd_regs,
+ const struct dcn10_link_enc_shift *link_shift,
+ const struct dcn10_link_enc_mask *link_mask);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
new file mode 100644
index 000000000000..33d0a176841a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -0,0 +1,1938 @@
+/*
+* Copyright 2018 Advanced Micro Devices, Inc.
+ * Copyright 2019 Raptor Engineering, LLC
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <linux/slab.h>
+
+#include "dm_services.h"
+#include "dc.h"
+
+#include "dcn21_init.h"
+
+#include "resource.h"
+#include "include/irq_service_interface.h"
+#include "dcn20/dcn20_resource.h"
+
+#include "clk_mgr.h"
+#include "dcn10/dcn10_hubp.h"
+#include "dcn10/dcn10_ipp.h"
+#include "dcn20/dcn20_hubbub.h"
+#include "dcn20/dcn20_mpc.h"
+#include "dcn20/dcn20_hubp.h"
+#include "dcn21_hubp.h"
+#include "irq/dcn21/irq_service_dcn21.h"
+#include "dcn20/dcn20_dpp.h"
+#include "dcn20/dcn20_optc.h"
+#include "dcn21/dcn21_hwseq.h"
+#include "dce110/dce110_hw_sequencer.h"
+#include "dcn20/dcn20_opp.h"
+#include "dcn20/dcn20_dsc.h"
+#include "dcn21/dcn21_link_encoder.h"
+#include "dcn20/dcn20_stream_encoder.h"
+#include "dce/dce_clock_source.h"
+#include "dce/dce_audio.h"
+#include "dce/dce_hwseq.h"
+#include "virtual/virtual_stream_encoder.h"
+#include "dce110/dce110_resource.h"
+#include "dml/display_mode_vba.h"
+#include "dcn20/dcn20_dccg.h"
+#include "dcn21_hubbub.h"
+#include "dcn10/dcn10_resource.h"
+#include "dce110/dce110_resource.h"
+
+#include "dcn20/dcn20_dwb.h"
+#include "dcn20/dcn20_mmhubbub.h"
+#include "dpcs/dpcs_2_1_0_offset.h"
+#include "dpcs/dpcs_2_1_0_sh_mask.h"
+
+#include "renoir_ip_offset.h"
+#include "dcn/dcn_2_1_0_offset.h"
+#include "dcn/dcn_2_1_0_sh_mask.h"
+
+#include "nbio/nbio_7_0_offset.h"
+
+#include "mmhub/mmhub_2_0_0_offset.h"
+#include "mmhub/mmhub_2_0_0_sh_mask.h"
+
+#include "reg_helper.h"
+#include "dce/dce_abm.h"
+#include "dce/dce_dmcu.h"
+#include "dce/dce_aux.h"
+#include "dce/dce_i2c.h"
+#include "dcn21_resource.h"
+#include "vm_helper.h"
+#include "dcn20/dcn20_vmid.h"
+#include "../dce/dmub_psr.h"
+
+#define SOC_BOUNDING_BOX_VALID false
+#define DC_LOGGER_INIT(logger)
+
+
+struct _vcs_dpi_ip_params_st dcn2_1_ip = {
+ .odm_capable = 1,
+ .gpuvm_enable = 1,
+ .hostvm_enable = 1,
+ .gpuvm_max_page_table_levels = 1,
+ .hostvm_max_page_table_levels = 4,
+ .hostvm_cached_page_table_levels = 2,
+ .num_dsc = 3,
+ .rob_buffer_size_kbytes = 168,
+ .det_buffer_size_kbytes = 164,
+ .dpte_buffer_size_in_pte_reqs_luma = 44,
+ .dpte_buffer_size_in_pte_reqs_chroma = 42,//todo
+ .dpp_output_buffer_pixels = 2560,
+ .opp_output_buffer_lines = 1,
+ .pixel_chunk_size_kbytes = 8,
+ .pte_enable = 1,
+ .max_page_table_levels = 4,
+ .pte_chunk_size_kbytes = 2,
+ .meta_chunk_size_kbytes = 2,
+ .writeback_chunk_size_kbytes = 2,
+ .line_buffer_size_bits = 789504,
+ .is_line_buffer_bpp_fixed = 0,
+ .line_buffer_fixed_bpp = 0,
+ .dcc_supported = true,
+ .max_line_buffer_lines = 12,
+ .writeback_luma_buffer_size_kbytes = 12,
+ .writeback_chroma_buffer_size_kbytes = 8,
+ .writeback_chroma_line_buffer_width_pixels = 4,
+ .writeback_max_hscl_ratio = 1,
+ .writeback_max_vscl_ratio = 1,
+ .writeback_min_hscl_ratio = 1,
+ .writeback_min_vscl_ratio = 1,
+ .writeback_max_hscl_taps = 12,
+ .writeback_max_vscl_taps = 12,
+ .writeback_line_buffer_luma_buffer_size = 0,
+ .writeback_line_buffer_chroma_buffer_size = 14643,
+ .cursor_buffer_size = 8,
+ .cursor_chunk_size = 2,
+ .max_num_otg = 4,
+ .max_num_dpp = 4,
+ .max_num_wb = 1,
+ .max_dchub_pscl_bw_pix_per_clk = 4,
+ .max_pscl_lb_bw_pix_per_clk = 2,
+ .max_lb_vscl_bw_pix_per_clk = 4,
+ .max_vscl_hscl_bw_pix_per_clk = 4,
+ .max_hscl_ratio = 4,
+ .max_vscl_ratio = 4,
+ .hscl_mults = 4,
+ .vscl_mults = 4,
+ .max_hscl_taps = 8,
+ .max_vscl_taps = 8,
+ .dispclk_ramp_margin_percent = 1,
+ .underscan_factor = 1.10,
+ .min_vblank_lines = 32, //
+ .dppclk_delay_subtotal = 77, //
+ .dppclk_delay_scl_lb_only = 16,
+ .dppclk_delay_scl = 50,
+ .dppclk_delay_cnvc_formatter = 8,
+ .dppclk_delay_cnvc_cursor = 6,
+ .dispclk_delay_subtotal = 87, //
+ .dcfclk_cstate_latency = 10, // SRExitTime
+ .max_inter_dcn_tile_repeaters = 8,
+
+ .xfc_supported = false,
+ .xfc_fill_bw_overhead_percent = 10.0,
+ .xfc_fill_constant_bytes = 0,
+ .ptoi_supported = 0
+};
+
+struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
+ .clock_limits = {
+ {
+ .state = 0,
+ .dcfclk_mhz = 304.0,
+ .fabricclk_mhz = 600.0,
+ .dispclk_mhz = 618.0,
+ .dppclk_mhz = 440.0,
+ .phyclk_mhz = 600.0,
+ .socclk_mhz = 278.0,
+ .dscclk_mhz = 205.67,
+ .dram_speed_mts = 1600.0,
+ },
+ {
+ .state = 1,
+ .dcfclk_mhz = 304.0,
+ .fabricclk_mhz = 600.0,
+ .dispclk_mhz = 618.0,
+ .dppclk_mhz = 618.0,
+ .phyclk_mhz = 600.0,
+ .socclk_mhz = 278.0,
+ .dscclk_mhz = 205.67,
+ .dram_speed_mts = 1600.0,
+ },
+ {
+ .state = 2,
+ .dcfclk_mhz = 608.0,
+ .fabricclk_mhz = 1066.0,
+ .dispclk_mhz = 888.0,
+ .dppclk_mhz = 888.0,
+ .phyclk_mhz = 810.0,
+ .socclk_mhz = 278.0,
+ .dscclk_mhz = 287.67,
+ .dram_speed_mts = 2133.0,
+ },
+ {
+ .state = 3,
+ .dcfclk_mhz = 676.0,
+ .fabricclk_mhz = 1600.0,
+ .dispclk_mhz = 1015.0,
+ .dppclk_mhz = 1015.0,
+ .phyclk_mhz = 810.0,
+ .socclk_mhz = 715.0,
+ .dscclk_mhz = 318.334,
+ .dram_speed_mts = 4266.0,
+ },
+ {
+ .state = 4,
+ .dcfclk_mhz = 810.0,
+ .fabricclk_mhz = 1600.0,
+ .dispclk_mhz = 1395.0,
+ .dppclk_mhz = 1285.0,
+ .phyclk_mhz = 1325.0,
+ .socclk_mhz = 953.0,
+ .dscclk_mhz = 489.0,
+ .dram_speed_mts = 4266.0,
+ },
+ /*Extra state, no dispclk ramping*/
+ {
+ .state = 5,
+ .dcfclk_mhz = 810.0,
+ .fabricclk_mhz = 1600.0,
+ .dispclk_mhz = 1395.0,
+ .dppclk_mhz = 1285.0,
+ .phyclk_mhz = 1325.0,
+ .socclk_mhz = 953.0,
+ .dscclk_mhz = 489.0,
+ .dram_speed_mts = 4266.0,
+ },
+
+ },
+
+ .sr_exit_time_us = 12.5,
+ .sr_enter_plus_exit_time_us = 17.0,
+ .urgent_latency_us = 4.0,
+ .urgent_latency_pixel_data_only_us = 4.0,
+ .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
+ .urgent_latency_vm_data_only_us = 4.0,
+ .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 80.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 75.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0,
+ .max_avg_sdp_bw_use_normal_percent = 60.0,
+ .max_avg_dram_bw_use_normal_percent = 100.0,
+ .writeback_latency_us = 12.0,
+ .max_request_size_bytes = 256,
+ .dram_channel_width_bytes = 4,
+ .fabric_datapath_to_dcn_data_return_bytes = 32,
+ .dcn_downspread_percent = 0.5,
+ .downspread_percent = 0.5,
+ .dram_page_open_time_ns = 50.0,
+ .dram_rw_turnaround_time_ns = 17.5,
+ .dram_return_buffer_per_channel_bytes = 8192,
+ .round_trip_ping_latency_dcfclk_cycles = 128,
+ .urgent_out_of_order_return_per_channel_bytes = 4096,
+ .channel_interleave_bytes = 256,
+ .num_banks = 8,
+ .num_chans = 4,
+ .vmm_page_size_bytes = 4096,
+ .dram_clock_change_latency_us = 23.84,
+ .return_bus_width_bytes = 64,
+ .dispclk_dppclk_vco_speed_mhz = 3600,
+ .xfc_bus_transport_time_us = 4,
+ .xfc_xbuf_latency_tolerance_us = 4,
+ .use_urgent_burst_bw = 1,
+ .num_states = 5
+};
+
+#ifndef MAX
+#define MAX(X, Y) ((X) > (Y) ? (X) : (Y))
+#endif
+#ifndef MIN
+#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
+#endif
+
+/* begin *********************
+ * macros to expend register list macro defined in HW object header file */
+
+/* DCN */
+/* TODO awful hack. fixup dcn20_dwb.h */
+#undef BASE_INNER
+#define BASE_INNER(seg) DMU_BASE__INST0_SEG ## seg
+
+#define BASE(seg) BASE_INNER(seg)
+
+#define SR(reg_name)\
+ .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
+ mm ## reg_name
+
+#define SRI(reg_name, block, id)\
+ .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+
+#define SRIR(var_name, reg_name, block, id)\
+ .var_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+
+#define SRII(reg_name, block, id)\
+ .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+
+#define DCCG_SRII(reg_name, block, id)\
+ .block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+
+/* NBIO */
+#define NBIO_BASE_INNER(seg) \
+ NBIF0_BASE__INST0_SEG ## seg
+
+#define NBIO_BASE(seg) \
+ NBIO_BASE_INNER(seg)
+
+#define NBIO_SR(reg_name)\
+ .reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) + \
+ mm ## reg_name
+
+/* MMHUB */
+#define MMHUB_BASE_INNER(seg) \
+ MMHUB_BASE__INST0_SEG ## seg
+
+#define MMHUB_BASE(seg) \
+ MMHUB_BASE_INNER(seg)
+
+#define MMHUB_SR(reg_name)\
+ .reg_name = MMHUB_BASE(mmMM ## reg_name ## _BASE_IDX) + \
+ mmMM ## reg_name
+
+#define clk_src_regs(index, pllid)\
+[index] = {\
+ CS_COMMON_REG_LIST_DCN2_1(index, pllid),\
+}
+
+static const struct dce110_clk_src_regs clk_src_regs[] = {
+ clk_src_regs(0, A),
+ clk_src_regs(1, B),
+ clk_src_regs(2, C),
+ clk_src_regs(3, D),
+ clk_src_regs(4, E),
+};
+
+static const struct dce110_clk_src_shift cs_shift = {
+ CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
+};
+
+static const struct dce110_clk_src_mask cs_mask = {
+ CS_COMMON_MASK_SH_LIST_DCN2_0(_MASK)
+};
+
+static const struct bios_registers bios_regs = {
+ NBIO_SR(BIOS_SCRATCH_3),
+ NBIO_SR(BIOS_SCRATCH_6)
+};
+
+static const struct dce_dmcu_registers dmcu_regs = {
+ DMCU_DCN20_REG_LIST()
+};
+
+static const struct dce_dmcu_shift dmcu_shift = {
+ DMCU_MASK_SH_LIST_DCN10(__SHIFT)
+};
+
+static const struct dce_dmcu_mask dmcu_mask = {
+ DMCU_MASK_SH_LIST_DCN10(_MASK)
+};
+
+static const struct dce_abm_registers abm_regs = {
+ ABM_DCN20_REG_LIST()
+};
+
+static const struct dce_abm_shift abm_shift = {
+ ABM_MASK_SH_LIST_DCN20(__SHIFT)
+};
+
+static const struct dce_abm_mask abm_mask = {
+ ABM_MASK_SH_LIST_DCN20(_MASK)
+};
+
+#define audio_regs(id)\
+[id] = {\
+ AUD_COMMON_REG_LIST(id)\
+}
+
+static const struct dce_audio_registers audio_regs[] = {
+ audio_regs(0),
+ audio_regs(1),
+ audio_regs(2),
+ audio_regs(3),
+ audio_regs(4),
+ audio_regs(5),
+};
+
+#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\
+ SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\
+ SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\
+ AUD_COMMON_MASK_SH_LIST_BASE(mask_sh)
+
+static const struct dce_audio_shift audio_shift = {
+ DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_audio_mask audio_mask = {
+ DCE120_AUD_COMMON_MASK_SH_LIST(_MASK)
+};
+
+static const struct dccg_registers dccg_regs = {
+ DCCG_COMMON_REG_LIST_DCN_BASE()
+};
+
+static const struct dccg_shift dccg_shift = {
+ DCCG_MASK_SH_LIST_DCN2(__SHIFT)
+};
+
+static const struct dccg_mask dccg_mask = {
+ DCCG_MASK_SH_LIST_DCN2(_MASK)
+};
+
+#define opp_regs(id)\
+[id] = {\
+ OPP_REG_LIST_DCN20(id),\
+}
+
+static const struct dcn20_opp_registers opp_regs[] = {
+ opp_regs(0),
+ opp_regs(1),
+ opp_regs(2),
+ opp_regs(3),
+ opp_regs(4),
+ opp_regs(5),
+};
+
+static const struct dcn20_opp_shift opp_shift = {
+ OPP_MASK_SH_LIST_DCN20(__SHIFT)
+};
+
+static const struct dcn20_opp_mask opp_mask = {
+ OPP_MASK_SH_LIST_DCN20(_MASK)
+};
+
+#define tg_regs(id)\
+[id] = {TG_COMMON_REG_LIST_DCN2_0(id)}
+
+static const struct dcn_optc_registers tg_regs[] = {
+ tg_regs(0),
+ tg_regs(1),
+ tg_regs(2),
+ tg_regs(3)
+};
+
+static const struct dcn_optc_shift tg_shift = {
+ TG_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
+};
+
+static const struct dcn_optc_mask tg_mask = {
+ TG_COMMON_MASK_SH_LIST_DCN2_0(_MASK)
+};
+
+static const struct dcn20_mpc_registers mpc_regs = {
+ MPC_REG_LIST_DCN2_0(0),
+ MPC_REG_LIST_DCN2_0(1),
+ MPC_REG_LIST_DCN2_0(2),
+ MPC_REG_LIST_DCN2_0(3),
+ MPC_REG_LIST_DCN2_0(4),
+ MPC_REG_LIST_DCN2_0(5),
+ MPC_OUT_MUX_REG_LIST_DCN2_0(0),
+ MPC_OUT_MUX_REG_LIST_DCN2_0(1),
+ MPC_OUT_MUX_REG_LIST_DCN2_0(2),
+ MPC_OUT_MUX_REG_LIST_DCN2_0(3),
+ MPC_DBG_REG_LIST_DCN2_0()
+};
+
+static const struct dcn20_mpc_shift mpc_shift = {
+ MPC_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT),
+ MPC_DEBUG_REG_LIST_SH_DCN20
+};
+
+static const struct dcn20_mpc_mask mpc_mask = {
+ MPC_COMMON_MASK_SH_LIST_DCN2_0(_MASK),
+ MPC_DEBUG_REG_LIST_MASK_DCN20
+};
+
+#define hubp_regs(id)\
+[id] = {\
+ HUBP_REG_LIST_DCN21(id)\
+}
+
+static const struct dcn_hubp2_registers hubp_regs[] = {
+ hubp_regs(0),
+ hubp_regs(1),
+ hubp_regs(2),
+ hubp_regs(3)
+};
+
+static const struct dcn_hubp2_shift hubp_shift = {
+ HUBP_MASK_SH_LIST_DCN21(__SHIFT)
+};
+
+static const struct dcn_hubp2_mask hubp_mask = {
+ HUBP_MASK_SH_LIST_DCN21(_MASK)
+};
+
+static const struct dcn_hubbub_registers hubbub_reg = {
+ HUBBUB_REG_LIST_DCN21()
+};
+
+static const struct dcn_hubbub_shift hubbub_shift = {
+ HUBBUB_MASK_SH_LIST_DCN21(__SHIFT)
+};
+
+static const struct dcn_hubbub_mask hubbub_mask = {
+ HUBBUB_MASK_SH_LIST_DCN21(_MASK)
+};
+
+
+#define vmid_regs(id)\
+[id] = {\
+ DCN20_VMID_REG_LIST(id)\
+}
+
+static const struct dcn_vmid_registers vmid_regs[] = {
+ vmid_regs(0),
+ vmid_regs(1),
+ vmid_regs(2),
+ vmid_regs(3),
+ vmid_regs(4),
+ vmid_regs(5),
+ vmid_regs(6),
+ vmid_regs(7),
+ vmid_regs(8),
+ vmid_regs(9),
+ vmid_regs(10),
+ vmid_regs(11),
+ vmid_regs(12),
+ vmid_regs(13),
+ vmid_regs(14),
+ vmid_regs(15)
+};
+
+static const struct dcn20_vmid_shift vmid_shifts = {
+ DCN20_VMID_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dcn20_vmid_mask vmid_masks = {
+ DCN20_VMID_MASK_SH_LIST(_MASK)
+};
+
+#define dsc_regsDCN20(id)\
+[id] = {\
+ DSC_REG_LIST_DCN20(id)\
+}
+
+static const struct dcn20_dsc_registers dsc_regs[] = {
+ dsc_regsDCN20(0),
+ dsc_regsDCN20(1),
+ dsc_regsDCN20(2),
+ dsc_regsDCN20(3),
+ dsc_regsDCN20(4),
+ dsc_regsDCN20(5)
+};
+
+static const struct dcn20_dsc_shift dsc_shift = {
+ DSC_REG_LIST_SH_MASK_DCN20(__SHIFT)
+};
+
+static const struct dcn20_dsc_mask dsc_mask = {
+ DSC_REG_LIST_SH_MASK_DCN20(_MASK)
+};
+
+#define ipp_regs(id)\
+[id] = {\
+ IPP_REG_LIST_DCN20(id),\
+}
+
+static const struct dcn10_ipp_registers ipp_regs[] = {
+ ipp_regs(0),
+ ipp_regs(1),
+ ipp_regs(2),
+ ipp_regs(3),
+};
+
+static const struct dcn10_ipp_shift ipp_shift = {
+ IPP_MASK_SH_LIST_DCN20(__SHIFT)
+};
+
+static const struct dcn10_ipp_mask ipp_mask = {
+ IPP_MASK_SH_LIST_DCN20(_MASK),
+};
+
+#define opp_regs(id)\
+[id] = {\
+ OPP_REG_LIST_DCN20(id),\
+}
+
+
+#define aux_engine_regs(id)\
+[id] = {\
+ AUX_COMMON_REG_LIST0(id), \
+ .AUXN_IMPCAL = 0, \
+ .AUXP_IMPCAL = 0, \
+ .AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK, \
+}
+
+static const struct dce110_aux_registers aux_engine_regs[] = {
+ aux_engine_regs(0),
+ aux_engine_regs(1),
+ aux_engine_regs(2),
+ aux_engine_regs(3),
+ aux_engine_regs(4),
+};
+
+#define tf_regs(id)\
+[id] = {\
+ TF_REG_LIST_DCN20(id),\
+ TF_REG_LIST_DCN20_COMMON_APPEND(id),\
+}
+
+static const struct dcn2_dpp_registers tf_regs[] = {
+ tf_regs(0),
+ tf_regs(1),
+ tf_regs(2),
+ tf_regs(3),
+};
+
+static const struct dcn2_dpp_shift tf_shift = {
+ TF_REG_LIST_SH_MASK_DCN20(__SHIFT),
+ TF_DEBUG_REG_LIST_SH_DCN20
+};
+
+static const struct dcn2_dpp_mask tf_mask = {
+ TF_REG_LIST_SH_MASK_DCN20(_MASK),
+ TF_DEBUG_REG_LIST_MASK_DCN20
+};
+
+#define stream_enc_regs(id)\
+[id] = {\
+ SE_DCN2_REG_LIST(id)\
+}
+
+static const struct dcn10_stream_enc_registers stream_enc_regs[] = {
+ stream_enc_regs(0),
+ stream_enc_regs(1),
+ stream_enc_regs(2),
+ stream_enc_regs(3),
+ stream_enc_regs(4),
+};
+
+static const struct dce110_aux_registers_shift aux_shift = {
+ DCN_AUX_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce110_aux_registers_mask aux_mask = {
+ DCN_AUX_MASK_SH_LIST(_MASK)
+};
+
+static const struct dcn10_stream_encoder_shift se_shift = {
+ SE_COMMON_MASK_SH_LIST_DCN20(__SHIFT)
+};
+
+static const struct dcn10_stream_encoder_mask se_mask = {
+ SE_COMMON_MASK_SH_LIST_DCN20(_MASK)
+};
+
+static void dcn21_pp_smu_destroy(struct pp_smu_funcs **pp_smu);
+
+static int dcn21_populate_dml_pipes_from_context(
+ struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes);
+
+static struct input_pixel_processor *dcn21_ipp_create(
+ struct dc_context *ctx, uint32_t inst)
+{
+ struct dcn10_ipp *ipp =
+ kzalloc(sizeof(struct dcn10_ipp), GFP_KERNEL);
+
+ if (!ipp) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dcn20_ipp_construct(ipp, ctx, inst,
+ &ipp_regs[inst], &ipp_shift, &ipp_mask);
+ return &ipp->base;
+}
+
+static struct dpp *dcn21_dpp_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dcn20_dpp *dpp =
+ kzalloc(sizeof(struct dcn20_dpp), GFP_KERNEL);
+
+ if (!dpp)
+ return NULL;
+
+ if (dpp2_construct(dpp, ctx, inst,
+ &tf_regs[inst], &tf_shift, &tf_mask))
+ return &dpp->base;
+
+ BREAK_TO_DEBUGGER();
+ kfree(dpp);
+ return NULL;
+}
+
+static struct dce_aux *dcn21_aux_engine_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct aux_engine_dce110 *aux_engine =
+ kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
+
+ if (!aux_engine)
+ return NULL;
+
+ dce110_aux_engine_construct(aux_engine, ctx, inst,
+ SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
+ &aux_engine_regs[inst],
+ &aux_mask,
+ &aux_shift,
+ ctx->dc->caps.extended_aux_timeout_support);
+
+ return &aux_engine->base;
+}
+
+#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) }
+
+static const struct dce_i2c_registers i2c_hw_regs[] = {
+ i2c_inst_regs(1),
+ i2c_inst_regs(2),
+ i2c_inst_regs(3),
+ i2c_inst_regs(4),
+ i2c_inst_regs(5),
+};
+
+static const struct dce_i2c_shift i2c_shifts = {
+ I2C_COMMON_MASK_SH_LIST_DCN2(__SHIFT)
+};
+
+static const struct dce_i2c_mask i2c_masks = {
+ I2C_COMMON_MASK_SH_LIST_DCN2(_MASK)
+};
+
+struct dce_i2c_hw *dcn21_i2c_hw_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dce_i2c_hw *dce_i2c_hw =
+ kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL);
+
+ if (!dce_i2c_hw)
+ return NULL;
+
+ dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst,
+ &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks);
+
+ return dce_i2c_hw;
+}
+
+static const struct resource_caps res_cap_rn = {
+ .num_timing_generator = 4,
+ .num_opp = 4,
+ .num_video_plane = 4,
+ .num_audio = 4, // 4 audio endpoints. 4 audio streams
+ .num_stream_encoder = 5,
+ .num_pll = 5, // maybe 3 because the last two used for USB-c
+ .num_dwb = 1,
+ .num_ddc = 5,
+ .num_vmid = 1,
+ .num_dsc = 3,
+};
+
+#ifdef DIAGS_BUILD
+static const struct resource_caps res_cap_rn_FPGA_4pipe = {
+ .num_timing_generator = 4,
+ .num_opp = 4,
+ .num_video_plane = 4,
+ .num_audio = 7,
+ .num_stream_encoder = 4,
+ .num_pll = 4,
+ .num_dwb = 1,
+ .num_ddc = 4,
+ .num_dsc = 0,
+};
+
+static const struct resource_caps res_cap_rn_FPGA_2pipe_dsc = {
+ .num_timing_generator = 2,
+ .num_opp = 2,
+ .num_video_plane = 2,
+ .num_audio = 7,
+ .num_stream_encoder = 2,
+ .num_pll = 4,
+ .num_dwb = 1,
+ .num_ddc = 4,
+ .num_dsc = 2,
+};
+#endif
+
+static const struct dc_plane_cap plane_cap = {
+ .type = DC_PLANE_TYPE_DCN_UNIVERSAL,
+ .blends_with_above = true,
+ .blends_with_below = true,
+ .per_pixel_alpha = true,
+
+ .pixel_format_support = {
+ .argb8888 = true,
+ .nv12 = true,
+ .fp16 = true
+ },
+
+ .max_upscale_factor = {
+ .argb8888 = 16000,
+ .nv12 = 16000,
+ .fp16 = 16000
+ },
+
+ .max_downscale_factor = {
+ .argb8888 = 250,
+ .nv12 = 250,
+ .fp16 = 250
+ }
+};
+
+static const struct dc_debug_options debug_defaults_drv = {
+ .disable_dmcu = true,
+ .force_abm_enable = false,
+ .timing_trace = false,
+ .clock_trace = true,
+ .disable_pplib_clock_request = true,
+ .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
+ .force_single_disp_pipe_split = false,
+ .disable_dcc = DCC_ENABLE,
+ .vsr_support = true,
+ .performance_trace = false,
+ .max_downscale_src_width = 4096,
+ .disable_pplib_wm_range = false,
+ .scl_reset_length10 = true,
+ .sanity_checks = true,
+ .disable_48mhz_pwrdwn = false,
+ .nv12_iflip_vm_wa = true,
+ .usbc_combo_phy_reset_wa = true
+};
+
+static const struct dc_debug_options debug_defaults_diags = {
+ .disable_dmcu = true,
+ .force_abm_enable = false,
+ .timing_trace = true,
+ .clock_trace = true,
+ .disable_dpp_power_gate = true,
+ .disable_hubp_power_gate = true,
+ .disable_clock_gate = true,
+ .disable_pplib_clock_request = true,
+ .disable_pplib_wm_range = true,
+ .disable_stutter = true,
+ .disable_48mhz_pwrdwn = true,
+};
+
+enum dcn20_clk_src_array_id {
+ DCN20_CLK_SRC_PLL0,
+ DCN20_CLK_SRC_PLL1,
+ DCN20_CLK_SRC_PLL2,
+ DCN20_CLK_SRC_TOTAL_DCN21
+};
+
+static void dcn21_resource_destruct(struct dcn21_resource_pool *pool)
+{
+ unsigned int i;
+
+ for (i = 0; i < pool->base.stream_enc_count; i++) {
+ if (pool->base.stream_enc[i] != NULL) {
+ kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i]));
+ pool->base.stream_enc[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_dsc; i++) {
+ if (pool->base.dscs[i] != NULL)
+ dcn20_dsc_destroy(&pool->base.dscs[i]);
+ }
+
+ if (pool->base.mpc != NULL) {
+ kfree(TO_DCN20_MPC(pool->base.mpc));
+ pool->base.mpc = NULL;
+ }
+ if (pool->base.hubbub != NULL) {
+ kfree(pool->base.hubbub);
+ pool->base.hubbub = NULL;
+ }
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ if (pool->base.dpps[i] != NULL)
+ dcn20_dpp_destroy(&pool->base.dpps[i]);
+
+ if (pool->base.ipps[i] != NULL)
+ pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]);
+
+ if (pool->base.hubps[i] != NULL) {
+ kfree(TO_DCN20_HUBP(pool->base.hubps[i]));
+ pool->base.hubps[i] = NULL;
+ }
+
+ if (pool->base.irqs != NULL) {
+ dal_irq_service_destroy(&pool->base.irqs);
+ }
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
+ if (pool->base.engines[i] != NULL)
+ dce110_engine_destroy(&pool->base.engines[i]);
+ if (pool->base.hw_i2cs[i] != NULL) {
+ kfree(pool->base.hw_i2cs[i]);
+ pool->base.hw_i2cs[i] = NULL;
+ }
+ if (pool->base.sw_i2cs[i] != NULL) {
+ kfree(pool->base.sw_i2cs[i]);
+ pool->base.sw_i2cs[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_opp; i++) {
+ if (pool->base.opps[i] != NULL)
+ pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]);
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
+ if (pool->base.timing_generators[i] != NULL) {
+ kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i]));
+ pool->base.timing_generators[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_dwb; i++) {
+ if (pool->base.dwbc[i] != NULL) {
+ kfree(TO_DCN20_DWBC(pool->base.dwbc[i]));
+ pool->base.dwbc[i] = NULL;
+ }
+ if (pool->base.mcif_wb[i] != NULL) {
+ kfree(TO_DCN20_MMHUBBUB(pool->base.mcif_wb[i]));
+ pool->base.mcif_wb[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.audio_count; i++) {
+ if (pool->base.audios[i])
+ dce_aud_destroy(&pool->base.audios[i]);
+ }
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] != NULL) {
+ dcn20_clock_source_destroy(&pool->base.clock_sources[i]);
+ pool->base.clock_sources[i] = NULL;
+ }
+ }
+
+ if (pool->base.dp_clock_source != NULL) {
+ dcn20_clock_source_destroy(&pool->base.dp_clock_source);
+ pool->base.dp_clock_source = NULL;
+ }
+
+
+ if (pool->base.abm != NULL)
+ dce_abm_destroy(&pool->base.abm);
+
+ if (pool->base.dmcu != NULL)
+ dce_dmcu_destroy(&pool->base.dmcu);
+
+ if (pool->base.dccg != NULL)
+ dcn_dccg_destroy(&pool->base.dccg);
+
+ if (pool->base.pp_smu != NULL)
+ dcn21_pp_smu_destroy(&pool->base.pp_smu);
+}
+
+
+static void calculate_wm_set_for_vlevel(
+ int vlevel,
+ struct wm_range_table_entry *table_entry,
+ struct dcn_watermarks *wm_set,
+ struct display_mode_lib *dml,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt)
+{
+ double dram_clock_change_latency_cached = dml->soc.dram_clock_change_latency_us;
+
+ ASSERT(vlevel < dml->soc.num_states);
+ /* only pipe 0 is read for voltage and dcf/soc clocks */
+ pipes[0].clks_cfg.voltage = vlevel;
+ pipes[0].clks_cfg.dcfclk_mhz = dml->soc.clock_limits[vlevel].dcfclk_mhz;
+ pipes[0].clks_cfg.socclk_mhz = dml->soc.clock_limits[vlevel].socclk_mhz;
+
+ dml->soc.dram_clock_change_latency_us = table_entry->pstate_latency_us;
+ dml->soc.sr_exit_time_us = table_entry->sr_exit_time_us;
+ dml->soc.sr_enter_plus_exit_time_us = table_entry->sr_enter_plus_exit_time_us;
+
+ wm_set->urgent_ns = get_wm_urgent(dml, pipes, pipe_cnt) * 1000;
+ wm_set->cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(dml, pipes, pipe_cnt) * 1000;
+ wm_set->cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(dml, pipes, pipe_cnt) * 1000;
+ wm_set->cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(dml, pipes, pipe_cnt) * 1000;
+ wm_set->pte_meta_urgent_ns = get_wm_memory_trip(dml, pipes, pipe_cnt) * 1000;
+ wm_set->frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(dml, pipes, pipe_cnt) * 1000;
+ wm_set->frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(dml, pipes, pipe_cnt) * 1000;
+ wm_set->urgent_latency_ns = get_urgent_latency(dml, pipes, pipe_cnt) * 1000;
+ dml->soc.dram_clock_change_latency_us = dram_clock_change_latency_cached;
+
+}
+
+static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb)
+{
+ int i;
+
+ DC_FP_START();
+
+ if (dc->bb_overrides.sr_exit_time_ns) {
+ for (i = 0; i < WM_SET_COUNT; i++) {
+ dc->clk_mgr->bw_params->wm_table.entries[i].sr_exit_time_us =
+ dc->bb_overrides.sr_exit_time_ns / 1000.0;
+ }
+ }
+
+ if (dc->bb_overrides.sr_enter_plus_exit_time_ns) {
+ for (i = 0; i < WM_SET_COUNT; i++) {
+ dc->clk_mgr->bw_params->wm_table.entries[i].sr_enter_plus_exit_time_us =
+ dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0;
+ }
+ }
+
+ if (dc->bb_overrides.urgent_latency_ns) {
+ bb->urgent_latency_us = dc->bb_overrides.urgent_latency_ns / 1000.0;
+ }
+
+ if (dc->bb_overrides.dram_clock_change_latency_ns) {
+ for (i = 0; i < WM_SET_COUNT; i++) {
+ dc->clk_mgr->bw_params->wm_table.entries[i].pstate_latency_us =
+ dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
+ }
+ }
+
+ DC_FP_END();
+}
+
+void dcn21_calculate_wm(
+ struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int *out_pipe_cnt,
+ int *pipe_split_from,
+ int vlevel_req)
+{
+ int pipe_cnt, i, pipe_idx;
+ int vlevel, vlevel_max;
+ struct wm_range_table_entry *table_entry;
+ struct clk_bw_params *bw_params = dc->clk_mgr->bw_params;
+
+ ASSERT(bw_params);
+
+ patch_bounding_box(dc, &context->bw_ctx.dml.soc);
+
+ for (i = 0, pipe_idx = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
+ if (!context->res_ctx.pipe_ctx[i].stream)
+ continue;
+
+ pipes[pipe_cnt].clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0;
+ pipes[pipe_cnt].clks_cfg.dispclk_mhz = context->bw_ctx.dml.vba.RequiredDISPCLK[vlevel_req][context->bw_ctx.dml.vba.maxMpcComb];
+
+ if (pipe_split_from[i] < 0) {
+ pipes[pipe_cnt].clks_cfg.dppclk_mhz =
+ context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel_req][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx];
+ if (context->bw_ctx.dml.vba.BlendingAndTiming[pipe_idx] == pipe_idx)
+ pipes[pipe_cnt].pipe.dest.odm_combine =
+ context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel_req][pipe_idx];
+ else
+ pipes[pipe_cnt].pipe.dest.odm_combine = 0;
+ pipe_idx++;
+ } else {
+ pipes[pipe_cnt].clks_cfg.dppclk_mhz =
+ context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel_req][context->bw_ctx.dml.vba.maxMpcComb][pipe_split_from[i]];
+ if (context->bw_ctx.dml.vba.BlendingAndTiming[pipe_split_from[i]] == pipe_split_from[i])
+ pipes[pipe_cnt].pipe.dest.odm_combine =
+ context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel_req][pipe_split_from[i]];
+ else
+ pipes[pipe_cnt].pipe.dest.odm_combine = 0;
+ }
+ pipe_cnt++;
+ }
+
+ if (pipe_cnt != pipe_idx) {
+ if (dc->res_pool->funcs->populate_dml_pipes)
+ pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc,
+ context, pipes);
+ else
+ pipe_cnt = dcn21_populate_dml_pipes_from_context(dc,
+ context, pipes);
+ }
+
+ *out_pipe_cnt = pipe_cnt;
+
+ vlevel_max = bw_params->clk_table.num_entries - 1;
+
+
+ /* WM Set D */
+ table_entry = &bw_params->wm_table.entries[WM_D];
+ if (table_entry->wm_type == WM_TYPE_RETRAINING)
+ vlevel = 0;
+ else
+ vlevel = vlevel_max;
+ calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.d,
+ &context->bw_ctx.dml, pipes, pipe_cnt);
+ /* WM Set C */
+ table_entry = &bw_params->wm_table.entries[WM_C];
+ vlevel = MIN(MAX(vlevel_req, 2), vlevel_max);
+ calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.c,
+ &context->bw_ctx.dml, pipes, pipe_cnt);
+ /* WM Set B */
+ table_entry = &bw_params->wm_table.entries[WM_B];
+ vlevel = MIN(MAX(vlevel_req, 1), vlevel_max);
+ calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.b,
+ &context->bw_ctx.dml, pipes, pipe_cnt);
+
+ /* WM Set A */
+ table_entry = &bw_params->wm_table.entries[WM_A];
+ vlevel = MIN(vlevel_req, vlevel_max);
+ calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.a,
+ &context->bw_ctx.dml, pipes, pipe_cnt);
+}
+
+
+bool dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context,
+ bool fast_validate)
+{
+ bool out = false;
+
+ BW_VAL_TRACE_SETUP();
+
+ int vlevel = 0;
+ int pipe_split_from[MAX_PIPES];
+ int pipe_cnt = 0;
+ display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ BW_VAL_TRACE_COUNT();
+
+ out = dcn20_fast_validate_bw(dc, context, pipes, &pipe_cnt, pipe_split_from, &vlevel);
+
+ if (pipe_cnt == 0)
+ goto validate_out;
+
+ if (!out)
+ goto validate_fail;
+
+ BW_VAL_TRACE_END_VOLTAGE_LEVEL();
+
+ if (fast_validate) {
+ BW_VAL_TRACE_SKIP(fast);
+ goto validate_out;
+ }
+
+ dcn21_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel);
+ dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
+
+ BW_VAL_TRACE_END_WATERMARKS();
+
+ goto validate_out;
+
+validate_fail:
+ DC_LOG_WARNING("Mode Validation Warning: %s failed validation.\n",
+ dml_get_status_message(context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states]));
+
+ BW_VAL_TRACE_SKIP(fail);
+ out = false;
+
+validate_out:
+ kfree(pipes);
+
+ BW_VAL_TRACE_FINISH();
+
+ return out;
+}
+static void dcn21_destroy_resource_pool(struct resource_pool **pool)
+{
+ struct dcn21_resource_pool *dcn21_pool = TO_DCN21_RES_POOL(*pool);
+
+ dcn21_resource_destruct(dcn21_pool);
+ kfree(dcn21_pool);
+ *pool = NULL;
+}
+
+static struct clock_source *dcn21_clock_source_create(
+ struct dc_context *ctx,
+ struct dc_bios *bios,
+ enum clock_source_id id,
+ const struct dce110_clk_src_regs *regs,
+ bool dp_clk_src)
+{
+ struct dce110_clk_src *clk_src =
+ kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL);
+
+ if (!clk_src)
+ return NULL;
+
+ if (dcn20_clk_src_construct(clk_src, ctx, bios, id,
+ regs, &cs_shift, &cs_mask)) {
+ clk_src->base.dp_clk_src = dp_clk_src;
+ return &clk_src->base;
+ }
+
+ BREAK_TO_DEBUGGER();
+ return NULL;
+}
+
+static struct hubp *dcn21_hubp_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dcn21_hubp *hubp21 =
+ kzalloc(sizeof(struct dcn21_hubp), GFP_KERNEL);
+
+ if (!hubp21)
+ return NULL;
+
+ if (hubp21_construct(hubp21, ctx, inst,
+ &hubp_regs[inst], &hubp_shift, &hubp_mask))
+ return &hubp21->base;
+
+ BREAK_TO_DEBUGGER();
+ kfree(hubp21);
+ return NULL;
+}
+
+static struct hubbub *dcn21_hubbub_create(struct dc_context *ctx)
+{
+ int i;
+
+ struct dcn20_hubbub *hubbub = kzalloc(sizeof(struct dcn20_hubbub),
+ GFP_KERNEL);
+
+ if (!hubbub)
+ return NULL;
+
+ hubbub21_construct(hubbub, ctx,
+ &hubbub_reg,
+ &hubbub_shift,
+ &hubbub_mask);
+
+ for (i = 0; i < res_cap_rn.num_vmid; i++) {
+ struct dcn20_vmid *vmid = &hubbub->vmid[i];
+
+ vmid->ctx = ctx;
+
+ vmid->regs = &vmid_regs[i];
+ vmid->shifts = &vmid_shifts;
+ vmid->masks = &vmid_masks;
+ }
+
+ return &hubbub->base;
+}
+
+struct output_pixel_processor *dcn21_opp_create(
+ struct dc_context *ctx, uint32_t inst)
+{
+ struct dcn20_opp *opp =
+ kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL);
+
+ if (!opp) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dcn20_opp_construct(opp, ctx, inst,
+ &opp_regs[inst], &opp_shift, &opp_mask);
+ return &opp->base;
+}
+
+struct timing_generator *dcn21_timing_generator_create(
+ struct dc_context *ctx,
+ uint32_t instance)
+{
+ struct optc *tgn10 =
+ kzalloc(sizeof(struct optc), GFP_KERNEL);
+
+ if (!tgn10)
+ return NULL;
+
+ tgn10->base.inst = instance;
+ tgn10->base.ctx = ctx;
+
+ tgn10->tg_regs = &tg_regs[instance];
+ tgn10->tg_shift = &tg_shift;
+ tgn10->tg_mask = &tg_mask;
+
+ dcn20_timing_generator_init(tgn10);
+
+ return &tgn10->base;
+}
+
+struct mpc *dcn21_mpc_create(struct dc_context *ctx)
+{
+ struct dcn20_mpc *mpc20 = kzalloc(sizeof(struct dcn20_mpc),
+ GFP_KERNEL);
+
+ if (!mpc20)
+ return NULL;
+
+ dcn20_mpc_construct(mpc20, ctx,
+ &mpc_regs,
+ &mpc_shift,
+ &mpc_mask,
+ 6);
+
+ return &mpc20->base;
+}
+
+static void read_dce_straps(
+ struct dc_context *ctx,
+ struct resource_straps *straps)
+{
+ generic_reg_get(ctx, mmDC_PINSTRAPS + BASE(mmDC_PINSTRAPS_BASE_IDX),
+ FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio);
+
+}
+
+
+struct display_stream_compressor *dcn21_dsc_create(
+ struct dc_context *ctx, uint32_t inst)
+{
+ struct dcn20_dsc *dsc =
+ kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL);
+
+ if (!dsc) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask);
+ return &dsc->base;
+}
+
+static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
+{
+ struct dcn21_resource_pool *pool = TO_DCN21_RES_POOL(dc->res_pool);
+ struct clk_limit_table *clk_table = &bw_params->clk_table;
+ int i;
+
+ dcn2_1_ip.max_num_otg = pool->base.res_cap->num_timing_generator;
+ dcn2_1_ip.max_num_dpp = pool->base.pipe_count;
+ dcn2_1_soc.num_chans = bw_params->num_channels;
+
+ for (i = 0; i < clk_table->num_entries; i++) {
+
+ dcn2_1_soc.clock_limits[i].state = i;
+ dcn2_1_soc.clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
+ dcn2_1_soc.clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
+ dcn2_1_soc.clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
+ dcn2_1_soc.clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2;
+ }
+ dcn2_1_soc.clock_limits[i] = dcn2_1_soc.clock_limits[i - 1];
+ dcn2_1_soc.num_states = i;
+
+ // diags does not retrieve proper values from SMU, do not update DML instance for diags
+ if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) && !IS_DIAG_DC(dc->ctx->dce_environment))
+ dml_init_instance(&dc->dml, &dcn2_1_soc, &dcn2_1_ip, DML_PROJECT_DCN21);
+}
+
+/* Temporary Place holder until we can get them from fuse */
+static struct dpm_clocks dummy_clocks = {
+ .DcfClocks = {
+ {.Freq = 400, .Vol = 1},
+ {.Freq = 483, .Vol = 1},
+ {.Freq = 602, .Vol = 1},
+ {.Freq = 738, .Vol = 1} },
+ .SocClocks = {
+ {.Freq = 300, .Vol = 1},
+ {.Freq = 400, .Vol = 1},
+ {.Freq = 400, .Vol = 1},
+ {.Freq = 400, .Vol = 1} },
+ .FClocks = {
+ {.Freq = 400, .Vol = 1},
+ {.Freq = 800, .Vol = 1},
+ {.Freq = 1067, .Vol = 1},
+ {.Freq = 1600, .Vol = 1} },
+ .MemClocks = {
+ {.Freq = 800, .Vol = 1},
+ {.Freq = 1600, .Vol = 1},
+ {.Freq = 1067, .Vol = 1},
+ {.Freq = 1600, .Vol = 1} },
+
+};
+
+static enum pp_smu_status dummy_set_wm_ranges(struct pp_smu *pp,
+ struct pp_smu_wm_range_sets *ranges)
+{
+ return PP_SMU_RESULT_OK;
+}
+
+static enum pp_smu_status dummy_get_dpm_clock_table(struct pp_smu *pp,
+ struct dpm_clocks *clock_table)
+{
+ *clock_table = dummy_clocks;
+ return PP_SMU_RESULT_OK;
+}
+
+static struct pp_smu_funcs *dcn21_pp_smu_create(struct dc_context *ctx)
+{
+ struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL);
+
+ if (!pp_smu)
+ return pp_smu;
+
+ if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment) || IS_DIAG_DC(ctx->dce_environment)) {
+ pp_smu->ctx.ver = PP_SMU_VER_RN;
+ pp_smu->rn_funcs.get_dpm_clock_table = dummy_get_dpm_clock_table;
+ pp_smu->rn_funcs.set_wm_ranges = dummy_set_wm_ranges;
+ } else {
+
+ dm_pp_get_funcs(ctx, pp_smu);
+
+ if (pp_smu->ctx.ver != PP_SMU_VER_RN)
+ pp_smu = memset(pp_smu, 0, sizeof(struct pp_smu_funcs));
+ }
+
+ return pp_smu;
+}
+
+static void dcn21_pp_smu_destroy(struct pp_smu_funcs **pp_smu)
+{
+ if (pp_smu && *pp_smu) {
+ kfree(*pp_smu);
+ *pp_smu = NULL;
+ }
+}
+
+static struct audio *dcn21_create_audio(
+ struct dc_context *ctx, unsigned int inst)
+{
+ return dce_audio_create(ctx, inst,
+ &audio_regs[inst], &audio_shift, &audio_mask);
+}
+
+static struct dc_cap_funcs cap_funcs = {
+ .get_dcc_compression_cap = dcn20_get_dcc_compression_cap
+};
+
+struct stream_encoder *dcn21_stream_encoder_create(
+ enum engine_id eng_id,
+ struct dc_context *ctx)
+{
+ struct dcn10_stream_encoder *enc1 =
+ kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL);
+
+ if (!enc1)
+ return NULL;
+
+ dcn20_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id,
+ &stream_enc_regs[eng_id],
+ &se_shift, &se_mask);
+
+ return &enc1->base;
+}
+
+static const struct dce_hwseq_registers hwseq_reg = {
+ HWSEQ_DCN21_REG_LIST()
+};
+
+static const struct dce_hwseq_shift hwseq_shift = {
+ HWSEQ_DCN21_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_hwseq_mask hwseq_mask = {
+ HWSEQ_DCN21_MASK_SH_LIST(_MASK)
+};
+
+static struct dce_hwseq *dcn21_hwseq_create(
+ struct dc_context *ctx)
+{
+ struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL);
+
+ if (hws) {
+ hws->ctx = ctx;
+ hws->regs = &hwseq_reg;
+ hws->shifts = &hwseq_shift;
+ hws->masks = &hwseq_mask;
+ hws->wa.DEGVIDCN21 = true;
+ }
+ return hws;
+}
+
+static const struct resource_create_funcs res_create_funcs = {
+ .read_dce_straps = read_dce_straps,
+ .create_audio = dcn21_create_audio,
+ .create_stream_encoder = dcn21_stream_encoder_create,
+ .create_hwseq = dcn21_hwseq_create,
+};
+
+static const struct resource_create_funcs res_create_maximus_funcs = {
+ .read_dce_straps = NULL,
+ .create_audio = NULL,
+ .create_stream_encoder = NULL,
+ .create_hwseq = dcn21_hwseq_create,
+};
+
+static const struct encoder_feature_support link_enc_feature = {
+ .max_hdmi_deep_color = COLOR_DEPTH_121212,
+ .max_hdmi_pixel_clock = 600000,
+ .hdmi_ycbcr420_supported = true,
+ .dp_ycbcr420_supported = true,
+ .flags.bits.IS_HBR2_CAPABLE = true,
+ .flags.bits.IS_HBR3_CAPABLE = true,
+ .flags.bits.IS_TPS3_CAPABLE = true,
+ .flags.bits.IS_TPS4_CAPABLE = true
+};
+
+
+#define link_regs(id, phyid)\
+[id] = {\
+ LE_DCN2_REG_LIST(id), \
+ UNIPHY_DCN2_REG_LIST(phyid), \
+ DPCS_DCN21_REG_LIST(id), \
+ SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \
+}
+
+static const struct dcn10_link_enc_registers link_enc_regs[] = {
+ link_regs(0, A),
+ link_regs(1, B),
+ link_regs(2, C),
+ link_regs(3, D),
+ link_regs(4, E),
+};
+
+#define aux_regs(id)\
+[id] = {\
+ DCN2_AUX_REG_LIST(id)\
+}
+
+static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = {
+ aux_regs(0),
+ aux_regs(1),
+ aux_regs(2),
+ aux_regs(3),
+ aux_regs(4)
+};
+
+#define hpd_regs(id)\
+[id] = {\
+ HPD_REG_LIST(id)\
+}
+
+static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = {
+ hpd_regs(0),
+ hpd_regs(1),
+ hpd_regs(2),
+ hpd_regs(3),
+ hpd_regs(4)
+};
+
+static const struct dcn10_link_enc_shift le_shift = {
+ LINK_ENCODER_MASK_SH_LIST_DCN20(__SHIFT),\
+ DPCS_DCN21_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dcn10_link_enc_mask le_mask = {
+ LINK_ENCODER_MASK_SH_LIST_DCN20(_MASK),\
+ DPCS_DCN21_MASK_SH_LIST(_MASK)
+};
+
+static int map_transmitter_id_to_phy_instance(
+ enum transmitter transmitter)
+{
+ switch (transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ return 0;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ return 1;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ return 2;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ return 3;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ return 4;
+ break;
+ default:
+ ASSERT(0);
+ return 0;
+ }
+}
+
+static struct link_encoder *dcn21_link_encoder_create(
+ const struct encoder_init_data *enc_init_data)
+{
+ struct dcn21_link_encoder *enc21 =
+ kzalloc(sizeof(struct dcn21_link_encoder), GFP_KERNEL);
+ int link_regs_id;
+
+ if (!enc21)
+ return NULL;
+
+ link_regs_id =
+ map_transmitter_id_to_phy_instance(enc_init_data->transmitter);
+
+ dcn21_link_encoder_construct(enc21,
+ enc_init_data,
+ &link_enc_feature,
+ &link_enc_regs[link_regs_id],
+ &link_enc_aux_regs[enc_init_data->channel - 1],
+ &link_enc_hpd_regs[enc_init_data->hpd_source],
+ &le_shift,
+ &le_mask);
+
+ return &enc21->enc10.base;
+}
+#define CTX ctx
+
+#define REG(reg_name) \
+ (DCN_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name)
+
+static uint32_t read_pipe_fuses(struct dc_context *ctx)
+{
+ uint32_t value = REG_READ(CC_DC_PIPE_DIS);
+ /* RV1 support max 4 pipes */
+ value = value & 0xf;
+ return value;
+}
+
+static int dcn21_populate_dml_pipes_from_context(
+ struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes)
+{
+ uint32_t pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, context, pipes);
+ int i;
+ struct resource_context *res_ctx = &context->res_ctx;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+
+ if (!res_ctx->pipe_ctx[i].stream)
+ continue;
+
+ pipes[i].pipe.src.hostvm = 1;
+ pipes[i].pipe.src.gpuvm = 1;
+ }
+
+ return pipe_cnt;
+}
+
+static struct resource_funcs dcn21_res_pool_funcs = {
+ .destroy = dcn21_destroy_resource_pool,
+ .link_enc_create = dcn21_link_encoder_create,
+ .validate_bandwidth = dcn21_validate_bandwidth,
+ .populate_dml_pipes = dcn21_populate_dml_pipes_from_context,
+ .add_stream_to_ctx = dcn20_add_stream_to_ctx,
+ .remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
+ .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
+ .populate_dml_writeback_from_context = dcn20_populate_dml_writeback_from_context,
+ .get_default_swizzle_mode = dcn20_get_default_swizzle_mode,
+ .set_mcif_arb_params = dcn20_set_mcif_arb_params,
+ .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link,
+ .update_bw_bounding_box = update_bw_bounding_box
+};
+
+static bool dcn21_resource_construct(
+ uint8_t num_virtual_links,
+ struct dc *dc,
+ struct dcn21_resource_pool *pool)
+{
+ int i, j;
+ struct dc_context *ctx = dc->ctx;
+ struct irq_service_init_data init_data;
+ uint32_t pipe_fuses = read_pipe_fuses(ctx);
+ uint32_t num_pipes;
+
+ ctx->dc_bios->regs = &bios_regs;
+
+ pool->base.res_cap = &res_cap_rn;
+#ifdef DIAGS_BUILD
+ if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
+ //pool->base.res_cap = &res_cap_nv10_FPGA_2pipe_dsc;
+ pool->base.res_cap = &res_cap_rn_FPGA_4pipe;
+#endif
+
+ pool->base.funcs = &dcn21_res_pool_funcs;
+
+ /*************************************************
+ * Resource + asic cap harcoding *
+ *************************************************/
+ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+
+ /* max pipe num for ASIC before check pipe fuses */
+ pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
+
+ dc->caps.max_downscale_ratio = 200;
+ dc->caps.i2c_speed_in_khz = 100;
+ dc->caps.max_cursor_size = 256;
+ dc->caps.dmdata_alloc_size = 2048;
+ dc->caps.hw_3d_lut = true;
+
+ dc->caps.max_slave_planes = 1;
+ dc->caps.post_blend_color_processing = true;
+ dc->caps.force_dp_tps4_for_cp2520 = true;
+ dc->caps.extended_aux_timeout_support = true;
+ dc->caps.dmcub_support = true;
+
+ if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
+ dc->debug = debug_defaults_drv;
+ else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) {
+ pool->base.pipe_count = 4;
+ dc->debug = debug_defaults_diags;
+ } else
+ dc->debug = debug_defaults_diags;
+
+ // Init the vm_helper
+ if (dc->vm_helper)
+ vm_helper_init(dc->vm_helper, 16);
+
+ /*************************************************
+ * Create resources *
+ *************************************************/
+
+ pool->base.clock_sources[DCN20_CLK_SRC_PLL0] =
+ dcn21_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL0,
+ &clk_src_regs[0], false);
+ pool->base.clock_sources[DCN20_CLK_SRC_PLL1] =
+ dcn21_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL1,
+ &clk_src_regs[1], false);
+ pool->base.clock_sources[DCN20_CLK_SRC_PLL2] =
+ dcn21_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL2,
+ &clk_src_regs[2], false);
+
+ pool->base.clk_src_count = DCN20_CLK_SRC_TOTAL_DCN21;
+
+ /* todo: not reuse phy_pll registers */
+ pool->base.dp_clock_source =
+ dcn21_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_ID_DP_DTO,
+ &clk_src_regs[0], true);
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] == NULL) {
+ dm_error("DC: failed to create clock sources!\n");
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+ }
+
+ pool->base.dccg = dccg2_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask);
+ if (pool->base.dccg == NULL) {
+ dm_error("DC: failed to create dccg!\n");
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+
+ pool->base.dmcu = dcn21_dmcu_create(ctx,
+ &dmcu_regs,
+ &dmcu_shift,
+ &dmcu_mask);
+ if (pool->base.dmcu == NULL) {
+ dm_error("DC: failed to create dmcu!\n");
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+
+ // Leave as NULL to not affect current dmcu psr programming sequence
+ // Will be uncommented when functionality is confirmed to be working
+ pool->base.psr = NULL;
+
+ pool->base.abm = dce_abm_create(ctx,
+ &abm_regs,
+ &abm_shift,
+ &abm_mask);
+ if (pool->base.abm == NULL) {
+ dm_error("DC: failed to create abm!\n");
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+
+ pool->base.pp_smu = dcn21_pp_smu_create(ctx);
+
+ num_pipes = dcn2_1_ip.max_num_dpp;
+
+ for (i = 0; i < dcn2_1_ip.max_num_dpp; i++)
+ if (pipe_fuses & 1 << i)
+ num_pipes--;
+ dcn2_1_ip.max_num_dpp = num_pipes;
+ dcn2_1_ip.max_num_otg = num_pipes;
+
+ dml_init_instance(&dc->dml, &dcn2_1_soc, &dcn2_1_ip, DML_PROJECT_DCN21);
+
+ init_data.ctx = dc->ctx;
+ pool->base.irqs = dal_irq_service_dcn21_create(&init_data);
+ if (!pool->base.irqs)
+ goto create_fail;
+
+ j = 0;
+ /* mem input -> ipp -> dpp -> opp -> TG */
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ /* if pipe is disabled, skip instance of HW pipe,
+ * i.e, skip ASIC register instance
+ */
+ if ((pipe_fuses & (1 << i)) != 0)
+ continue;
+
+ pool->base.hubps[j] = dcn21_hubp_create(ctx, i);
+ if (pool->base.hubps[j] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create memory input!\n");
+ goto create_fail;
+ }
+
+ pool->base.ipps[j] = dcn21_ipp_create(ctx, i);
+ if (pool->base.ipps[j] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create input pixel processor!\n");
+ goto create_fail;
+ }
+
+ pool->base.dpps[j] = dcn21_dpp_create(ctx, i);
+ if (pool->base.dpps[j] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create dpps!\n");
+ goto create_fail;
+ }
+
+ pool->base.opps[j] = dcn21_opp_create(ctx, i);
+ if (pool->base.opps[j] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create output pixel processor!\n");
+ goto create_fail;
+ }
+
+ pool->base.timing_generators[j] = dcn21_timing_generator_create(
+ ctx, i);
+ if (pool->base.timing_generators[j] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create tg!\n");
+ goto create_fail;
+ }
+ j++;
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
+ pool->base.engines[i] = dcn21_aux_engine_create(ctx, i);
+ if (pool->base.engines[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC:failed to create aux engine!!\n");
+ goto create_fail;
+ }
+ pool->base.hw_i2cs[i] = dcn21_i2c_hw_create(ctx, i);
+ if (pool->base.hw_i2cs[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC:failed to create hw i2c!!\n");
+ goto create_fail;
+ }
+ pool->base.sw_i2cs[i] = NULL;
+ }
+
+ pool->base.timing_generator_count = j;
+ pool->base.pipe_count = j;
+ pool->base.mpcc_count = j;
+
+ pool->base.mpc = dcn21_mpc_create(ctx);
+ if (pool->base.mpc == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create mpc!\n");
+ goto create_fail;
+ }
+
+ pool->base.hubbub = dcn21_hubbub_create(ctx);
+ if (pool->base.hubbub == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create hubbub!\n");
+ goto create_fail;
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_dsc; i++) {
+ pool->base.dscs[i] = dcn21_dsc_create(ctx, i);
+ if (pool->base.dscs[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create display stream compressor %d!\n", i);
+ goto create_fail;
+ }
+ }
+
+ if (!dcn20_dwbc_create(ctx, &pool->base)) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create dwbc!\n");
+ goto create_fail;
+ }
+ if (!dcn20_mmhubbub_create(ctx, &pool->base)) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create mcif_wb!\n");
+ goto create_fail;
+ }
+
+ if (!resource_construct(num_virtual_links, dc, &pool->base,
+ (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ?
+ &res_create_funcs : &res_create_maximus_funcs)))
+ goto create_fail;
+
+ dcn21_hw_sequencer_construct(dc);
+
+ dc->caps.max_planes = pool->base.pipe_count;
+
+ for (i = 0; i < dc->caps.max_planes; ++i)
+ dc->caps.planes[i] = plane_cap;
+
+ dc->cap_funcs = cap_funcs;
+
+ return true;
+
+create_fail:
+
+ dcn21_resource_destruct(pool);
+
+ return false;
+}
+
+struct resource_pool *dcn21_create_resource_pool(
+ const struct dc_init_data *init_data,
+ struct dc *dc)
+{
+ struct dcn21_resource_pool *pool =
+ kzalloc(sizeof(struct dcn21_resource_pool), GFP_KERNEL);
+
+ if (!pool)
+ return NULL;
+
+ if (dcn21_resource_construct(init_data->num_virtual_links, dc, pool))
+ return &pool->base;
+
+ BREAK_TO_DEBUGGER();
+ kfree(pool);
+ return NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.h
new file mode 100644
index 000000000000..a27355171bca
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DCN21_RESOURCE_H_
+#define _DCN21_RESOURCE_H_
+
+#include "core_types.h"
+
+#define TO_DCN21_RES_POOL(pool)\
+ container_of(pool, struct dcn21_resource_pool, base)
+
+struct dc;
+struct resource_pool;
+struct _vcs_dpi_display_pipe_params_st;
+
+struct dcn21_resource_pool {
+ struct resource_pool base;
+};
+struct resource_pool *dcn21_create_resource_pool(
+ const struct dc_init_data *init_data,
+ struct dc *dc);
+
+#endif /* _DCN21_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
new file mode 100644
index 000000000000..626d22d437f4
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DM_CP_PSP_IF__H
+#define DM_CP_PSP_IF__H
+
+struct dc_link;
+
+struct cp_psp_stream_config {
+ uint8_t otg_inst;
+ uint8_t link_enc_inst;
+ uint8_t stream_enc_inst;
+ void *dm_stream_ctx;
+ bool dpms_off;
+};
+
+struct cp_psp_funcs {
+ void (*update_stream_config)(void *handle, struct cp_psp_stream_config *config);
+};
+
+struct cp_psp {
+ void *handle;
+ struct cp_psp_funcs funcs;
+};
+
+
+#endif /* DM_CP_PSP_IF__H */
diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
index b6b4333737f2..8bde1d688f2e 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
@@ -74,7 +74,7 @@ void dm_helpers_dp_mst_clear_payload_allocation_table(
/*
* Polls for ACT (allocation change trigger) handled and
*/
-bool dm_helpers_dp_mst_poll_for_allocation_change_trigger(
+enum act_return_status dm_helpers_dp_mst_poll_for_allocation_change_trigger(
struct dc_context *ctx,
const struct dc_stream_state *stream);
/*
@@ -118,13 +118,11 @@ bool dm_helpers_submit_i2c(
const struct dc_link *link,
struct i2c_command *cmd);
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
bool dm_helpers_dp_write_dsc_enable(
struct dc_context *ctx,
const struct dc_stream_state *stream,
bool enable
);
-#endif
bool dm_helpers_is_dp_sink_present(
struct dc_link *link);
diff --git a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
index 680689cab5dd..ae608c329366 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
@@ -41,9 +41,8 @@ enum pp_smu_ver {
*/
PP_SMU_UNSUPPORTED,
PP_SMU_VER_RV,
-#ifndef CONFIG_TRIM_DRM_AMD_DC_DCN2_0
PP_SMU_VER_NV,
-#endif
+ PP_SMU_VER_RN,
PP_SMU_VER_MAX
};
@@ -140,7 +139,6 @@ struct pp_smu_funcs_rv {
void (*set_pme_wa_enable)(struct pp_smu *pp);
};
-#ifndef CONFIG_TRIM_DRM_AMD_DC_DCN2_0
/* Used by pp_smu_funcs_nv.set_voltage_by_freq
*
*/
@@ -244,15 +242,51 @@ struct pp_smu_funcs_nv {
enum pp_smu_status (*set_pstate_handshake_support)(struct pp_smu *pp,
BOOLEAN pstate_handshake_supported);
};
-#endif
+
+#define PP_SMU_NUM_SOCCLK_DPM_LEVELS 8
+#define PP_SMU_NUM_DCFCLK_DPM_LEVELS 8
+#define PP_SMU_NUM_FCLK_DPM_LEVELS 4
+#define PP_SMU_NUM_MEMCLK_DPM_LEVELS 4
+
+struct dpm_clock {
+ uint32_t Freq; // In MHz
+ uint32_t Vol; // Millivolts with 2 fractional bits
+};
+
+
+/* this is a copy of the structure defined in smuxx_driver_if.h*/
+struct dpm_clocks {
+ struct dpm_clock DcfClocks[PP_SMU_NUM_DCFCLK_DPM_LEVELS];
+ struct dpm_clock SocClocks[PP_SMU_NUM_SOCCLK_DPM_LEVELS];
+ struct dpm_clock FClocks[PP_SMU_NUM_FCLK_DPM_LEVELS];
+ struct dpm_clock MemClocks[PP_SMU_NUM_MEMCLK_DPM_LEVELS];
+};
+
+
+struct pp_smu_funcs_rn {
+ struct pp_smu pp_smu;
+
+ /*
+ * reader and writer WM's are sent together as part of one table
+ *
+ * PPSMC_MSG_SetDriverDramAddrHigh
+ * PPSMC_MSG_SetDriverDramAddrLow
+ * PPSMC_MSG_TransferTableDram2Smu
+ *
+ */
+ enum pp_smu_status (*set_wm_ranges)(struct pp_smu *pp,
+ struct pp_smu_wm_range_sets *ranges);
+
+ enum pp_smu_status (*get_dpm_clock_table) (struct pp_smu *pp,
+ struct dpm_clocks *clock_table);
+};
struct pp_smu_funcs {
struct pp_smu ctx;
union {
struct pp_smu_funcs_rv rv_funcs;
-#ifndef CONFIG_TRIM_DRM_AMD_DC_DCN2_0
struct pp_smu_funcs_nv nv_funcs;
-#endif
+ struct pp_smu_funcs_rn rn_funcs;
};
};
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h
index b426ba02b793..968ff1fef486 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services.h
@@ -40,6 +40,9 @@
#undef DEPRECATED
+struct dmub_srv;
+struct dc_dmub_srv;
+
irq_handler_idx dm_register_interrupt(
struct dc_context *ctx,
struct dc_interrupt_params *int_params,
@@ -139,6 +142,13 @@ uint32_t generic_reg_update_ex(const struct dc_context *ctx,
uint32_t addr, int n,
uint8_t shift1, uint32_t mask1, uint32_t field_value1, ...);
+struct dc_dmub_srv *dc_dmub_srv_create(struct dc *dc, struct dmub_srv *dmub);
+void dc_dmub_srv_destroy(struct dc_dmub_srv **dmub_srv);
+
+void reg_sequence_start_gather(const struct dc_context *ctx);
+void reg_sequence_start_execute(const struct dc_context *ctx);
+void reg_sequence_wait_done(const struct dc_context *ctx);
+
#define FD(reg_field) reg_field ## __SHIFT, \
reg_field ## _MASK
@@ -151,6 +161,7 @@ void generic_reg_wait(const struct dc_context *ctx,
unsigned int delay_between_poll_us, unsigned int time_out_num_tries,
const char *func_name, int line);
+unsigned int snprintf_count(char *pBuf, unsigned int bufSize, char *fmt, ...);
/* These macros need to be used with soc15 registers in order to retrieve
* the actual offset.
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services_types.h b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
index a3d1be20dd9d..b52ba6ffabe1 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
@@ -220,6 +220,7 @@ struct dm_bl_data_point {
};
/* Total size of the structure should not exceed 256 bytes */
+#define BL_DATA_POINTS 99
struct dm_acpi_atif_backlight_caps {
uint16_t size; /* Bytes 0-1 (2 bytes) */
uint16_t flags; /* Byted 2-3 (2 bytes) */
@@ -229,7 +230,7 @@ struct dm_acpi_atif_backlight_caps {
uint8_t min_input_signal; /* Byte 7 */
uint8_t max_input_signal; /* Byte 8 */
uint8_t num_data_points; /* Byte 9 */
- struct dm_bl_data_point data_points[99]; /* Bytes 10-207 (198 bytes)*/
+ struct dm_bl_data_point data_points[BL_DATA_POINTS]; /* Bytes 10-207 (198 bytes)*/
};
enum dm_acpi_display_type {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index 0bb7a20675c4..7ee8b8460a9b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -1,5 +1,6 @@
#
# Copyright 2017 Advanced Micro Devices, Inc.
+# Copyright 2019 Raptor Engineering, LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
@@ -24,35 +25,56 @@
# It provides the general basic services required by other DAL
# subcomponents.
-ifneq ($(call cc-option, -mpreferred-stack-boundary=4),)
- cc_stack_align := -mpreferred-stack-boundary=4
-else ifneq ($(call cc-option, -mstack-alignment=16),)
- cc_stack_align := -mstack-alignment=16
+ifdef CONFIG_X86
+dml_ccflags := -mhard-float -msse
endif
-dml_ccflags := -mhard-float -msse $(cc_stack_align)
+ifdef CONFIG_PPC64
+dml_ccflags := -mhard-float -maltivec
+endif
-CFLAGS_display_mode_lib.o := $(dml_ccflags)
+ifdef CONFIG_CC_IS_GCC
+ifeq ($(call cc-ifversion, -lt, 0701, y), y)
+IS_OLD_GCC = 1
+endif
+endif
-ifdef CONFIG_DRM_AMD_DC_DCN2_0
-CFLAGS_display_mode_vba.o := $(dml_ccflags)
-CFLAGS_display_mode_vba_20.o := $(dml_ccflags)
-CFLAGS_display_rq_dlg_calc_20.o := $(dml_ccflags)
+ifdef CONFIG_X86
+ifdef IS_OLD_GCC
+# Stack alignment mismatch, proceed with caution.
+# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
+# (8B stack alignment).
+dml_ccflags += -mpreferred-stack-boundary=4
+else
+dml_ccflags += -msse2
endif
-ifdef CONFIG_DRM_AMD_DCN3AG
-CFLAGS_display_mode_vba_3ag.o := $(dml_ccflags)
endif
-CFLAGS_dml1_display_rq_dlg_calc.o := $(dml_ccflags)
-CFLAGS_display_rq_dlg_helpers.o := $(dml_ccflags)
-CFLAGS_dml_common_defs.o := $(dml_ccflags)
+
+CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags)
+
+ifdef CONFIG_DRM_AMD_DC_DCN
+CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20v2.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20v2.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_mode_vba_21.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_rq_dlg_calc_21.o := $(dml_ccflags)
+endif
+CFLAGS_$(AMDDALPATH)/dc/dml/dml1_display_rq_dlg_calc.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/display_rq_dlg_helpers.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dml_common_defs.o := $(dml_ccflags)
DML = display_mode_lib.o display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o \
dml_common_defs.o
-ifdef CONFIG_DRM_AMD_DC_DCN2_0
+ifdef CONFIG_DRM_AMD_DC_DCN
DML += display_mode_vba.o dcn20/display_rq_dlg_calc_20.o dcn20/display_mode_vba_20.o
+DML += dcn20/display_rq_dlg_calc_20v2.o dcn20/display_mode_vba_20v2.o
+DML += dcn21/display_rq_dlg_calc_21.o dcn21/display_mode_vba_21.o
endif
+
AMD_DAL_DML = $(addprefix $(AMDDALPATH)/dc/dml/,$(DML))
AMD_DISPLAY_FILES += $(AMD_DAL_DML)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
index 649883777f62..45f028986a8d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
@@ -38,6 +38,7 @@
#define BPP_INVALID 0
#define BPP_BLENDED_PIPE 0xffffffff
+#define DCN20_MAX_420_IMAGE_WIDTH 4096
static double adjust_ReturnBW(
struct display_mode_lib *mode_lib,
@@ -937,7 +938,7 @@ static unsigned int CalculateVMAndRowBytes(
*MetaRowByte = 0;
}
- if (SurfaceTiling == dm_sw_linear || SurfaceTiling == dm_sw_gfx7_2d_thin_gl || SurfaceTiling == dm_sw_gfx7_2d_thin_lvp) {
+ if (SurfaceTiling == dm_sw_linear || SurfaceTiling == dm_sw_gfx7_2d_thin_gl || SurfaceTiling == dm_sw_gfx7_2d_thin_l_vp) {
MacroTileSizeBytes = 256;
MacroTileHeight = BlockHeight256Bytes;
} else if (SurfaceTiling == dm_sw_4kb_s || SurfaceTiling == dm_sw_4kb_s_x
@@ -1335,11 +1336,11 @@ static void dml20_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPer
else
mode_lib->vba.SwathWidthSingleDPPY[k] = mode_lib->vba.ViewportHeight[k];
- if (mode_lib->vba.ODMCombineEnabled[k] == true)
+ if (mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1)
MainPlaneDoesODMCombine = true;
for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j)
if (mode_lib->vba.BlendingAndTiming[k] == j
- && mode_lib->vba.ODMCombineEnabled[j] == true)
+ && mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1)
MainPlaneDoesODMCombine = true;
if (MainPlaneDoesODMCombine == true)
@@ -2577,7 +2578,9 @@ static void dml20_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPer
mode_lib->vba.MinActiveDRAMClockChangeMargin
+ mode_lib->vba.DRAMClockChangeLatency;
- if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 0) {
+ if (mode_lib->vba.DRAMClockChangeSupportsVActive &&
+ mode_lib->vba.MinActiveDRAMClockChangeMargin > 60) {
+ mode_lib->vba.DRAMClockChangeWatermark += 25;
mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;
} else {
if (mode_lib->vba.SynchronizedVBlank || mode_lib->vba.NumberOfActivePlanes == 1) {
@@ -2846,12 +2849,12 @@ static void dml20_DisplayPipeConfiguration(struct display_mode_lib *mode_lib)
SwathWidth = mode_lib->vba.ViewportHeight[k];
}
- if (mode_lib->vba.ODMCombineEnabled[k] == true) {
+ if (mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) {
MainPlaneDoesODMCombine = true;
}
for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) {
if (mode_lib->vba.BlendingAndTiming[k] == j
- && mode_lib->vba.ODMCombineEnabled[j] == true) {
+ && mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) {
MainPlaneDoesODMCombine = true;
}
}
@@ -3346,7 +3349,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
== dm_420_10))
|| (((mode_lib->vba.SurfaceTiling[k] == dm_sw_gfx7_2d_thin_gl
|| mode_lib->vba.SurfaceTiling[k]
- == dm_sw_gfx7_2d_thin_lvp)
+ == dm_sw_gfx7_2d_thin_l_vp)
&& !((mode_lib->vba.SourcePixelFormat[k]
== dm_444_64
|| mode_lib->vba.SourcePixelFormat[k]
@@ -3444,10 +3447,10 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
locals->FabricAndDRAMBandwidthPerState[i] * 1000)
* locals->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelDataOnly / 100;
- locals->ReturnBWPerState[i] = locals->ReturnBWToDCNPerState;
+ locals->ReturnBWPerState[i][0] = locals->ReturnBWToDCNPerState;
if (locals->DCCEnabledInAnyPlane == true && locals->ReturnBWToDCNPerState > locals->DCFCLKPerState[i] * locals->ReturnBusWidth / 4) {
- locals->ReturnBWPerState[i] = dml_min(locals->ReturnBWPerState[i],
+ locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0],
locals->ReturnBWToDCNPerState * 4 * (1 - locals->UrgentLatency /
((locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024
/ (locals->ReturnBWToDCNPerState - locals->DCFCLKPerState[i]
@@ -3458,7 +3461,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
+ (locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024);
if (locals->DCCEnabledInAnyPlane && locals->CriticalPoint > 1 && locals->CriticalPoint < 4) {
- locals->ReturnBWPerState[i] = dml_min(locals->ReturnBWPerState[i],
+ locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0],
4 * locals->ReturnBWToDCNPerState *
(locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024
* locals->ReturnBusWidth * locals->DCFCLKPerState[i] * locals->UrgentLatency /
@@ -3470,7 +3473,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
locals->DCFCLKPerState[i], locals->FabricAndDRAMBandwidthPerState[i] * 1000);
if (locals->DCCEnabledInAnyPlane == true && locals->ReturnBWToDCNPerState > locals->DCFCLKPerState[i] * locals->ReturnBusWidth / 4) {
- locals->ReturnBWPerState[i] = dml_min(locals->ReturnBWPerState[i],
+ locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0],
locals->ReturnBWToDCNPerState * 4 * (1 - locals->UrgentLatency /
((locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024
/ (locals->ReturnBWToDCNPerState - locals->DCFCLKPerState[i]
@@ -3481,7 +3484,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
+ (locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024);
if (locals->DCCEnabledInAnyPlane && locals->CriticalPoint > 1 && locals->CriticalPoint < 4) {
- locals->ReturnBWPerState[i] = dml_min(locals->ReturnBWPerState[i],
+ locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0],
4 * locals->ReturnBWToDCNPerState *
(locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024
* locals->ReturnBusWidth * locals->DCFCLKPerState[i] * locals->UrgentLatency /
@@ -3519,12 +3522,12 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
locals->UrgentRoundTripAndOutOfOrderLatencyPerState[i] =
(mode_lib->vba.RoundTripPingLatencyCycles + 32.0) / mode_lib->vba.DCFCLKPerState[i]
- + locals->UrgentOutOfOrderReturnPerChannel * mode_lib->vba.NumberOfChannels / locals->ReturnBWPerState[i];
- if ((mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte) * 1024.0 / locals->ReturnBWPerState[i]
+ + locals->UrgentOutOfOrderReturnPerChannel * mode_lib->vba.NumberOfChannels / locals->ReturnBWPerState[i][0];
+ if ((mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte) * 1024.0 / locals->ReturnBWPerState[i][0]
> locals->UrgentRoundTripAndOutOfOrderLatencyPerState[i]) {
- locals->ROBSupport[i] = true;
+ locals->ROBSupport[i][0] = true;
} else {
- locals->ROBSupport[i] = false;
+ locals->ROBSupport[i][0] = false;
}
}
/*Writeback Mode Support Check*/
@@ -3892,16 +3895,22 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
&& i == mode_lib->vba.soc.num_states)
mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2
* (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
- if (mode_lib->vba.ODMCapability == false || mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine <= mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity) {
- locals->ODMCombineEnablePerState[i][k] = false;
- mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine;
- } else {
- locals->ODMCombineEnablePerState[i][k] = true;
- mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
+
+ locals->ODMCombineEnablePerState[i][k] = false;
+ mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine;
+ if (mode_lib->vba.ODMCapability) {
+ if (locals->PlaneRequiredDISPCLKWithoutODMCombine > mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity) {
+ locals->ODMCombineEnablePerState[i][k] = true;
+ mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
+ } else if (locals->HActive[k] > DCN20_MAX_420_IMAGE_WIDTH && locals->OutputFormat[k] == dm_420) {
+ locals->ODMCombineEnablePerState[i][k] = true;
+ mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
+ }
}
+
if (locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) <= mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity
&& locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k]
- && locals->ODMCombineEnablePerState[i][k] == false) {
+ && locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) {
locals->NoOfDPP[i][j][k] = 1;
locals->RequiredDPPCLK[i][j][k] =
locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
@@ -3990,16 +3999,16 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
/*Viewport Size Check*/
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
- locals->ViewportSizeSupport[i] = true;
+ locals->ViewportSizeSupport[i][0] = true;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- if (locals->ODMCombineEnablePerState[i][k] == true) {
+ if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
if (dml_min(locals->SwathWidthYSingleDPP[k], dml_round(mode_lib->vba.HActive[k] / 2.0 * mode_lib->vba.HRatio[k]))
> locals->MaximumSwathWidth[k]) {
- locals->ViewportSizeSupport[i] = false;
+ locals->ViewportSizeSupport[i][0] = false;
}
} else {
if (locals->SwathWidthYSingleDPP[k] / 2.0 > locals->MaximumSwathWidth[k]) {
- locals->ViewportSizeSupport[i] = false;
+ locals->ViewportSizeSupport[i][0] = false;
}
}
}
@@ -4181,8 +4190,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.DSCFormatFactor = 1;
}
if (locals->RequiresDSC[i][k] == true) {
- if (locals->ODMCombineEnablePerState[i][k]
- == true) {
+ if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
if (mode_lib->vba.PixelClockBackEnd[k] / 6.0 / mode_lib->vba.DSCFormatFactor
> (1.0 - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) * mode_lib->vba.MaxDSCCLK[i]) {
locals->DSCCLKRequiredMoreThanSupported[i] =
@@ -4205,7 +4213,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.TotalDSCUnitsRequired = 0.0;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (locals->RequiresDSC[i][k] == true) {
- if (locals->ODMCombineEnablePerState[i][k] == true) {
+ if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
mode_lib->vba.TotalDSCUnitsRequired =
mode_lib->vba.TotalDSCUnitsRequired + 2.0;
} else {
@@ -4247,7 +4255,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.bpp = locals->OutputBppPerState[i][k];
}
if (locals->RequiresDSC[i][k] == true && mode_lib->vba.bpp != 0.0) {
- if (locals->ODMCombineEnablePerState[i][k] == false) {
+ if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) {
locals->DSCDelayPerState[i][k] =
dscceComputeDelay(
mode_lib->vba.DSCInputBitPerComponent[k],
@@ -4290,7 +4298,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
for (j = 0; j < 2; j++) {
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- if (locals->ODMCombineEnablePerState[i][k] == true)
+ if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1)
locals->SwathWidthYPerState[i][j][k] = dml_min(locals->SwathWidthYSingleDPP[k], dml_round(locals->HActive[k] / 2 * locals->HRatio[k]));
else
locals->SwathWidthYPerState[i][j][k] = locals->SwathWidthYSingleDPP[k] / locals->NoOfDPP[i][j][k];
@@ -4343,28 +4351,28 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
locals->EffectiveDETLBLinesLuma = dml_floor(locals->LinesInDETLuma + dml_min(
locals->LinesInDETLuma * locals->RequiredDISPCLK[i][j] * locals->BytePerPixelInDETY[k] *
- locals->PSCL_FACTOR[k] / locals->ReturnBWPerState[i],
+ locals->PSCL_FACTOR[k] / locals->ReturnBWPerState[i][0],
locals->EffectiveLBLatencyHidingSourceLinesLuma),
locals->SwathHeightYPerState[i][j][k]);
locals->EffectiveDETLBLinesChroma = dml_floor(locals->LinesInDETChroma + dml_min(
locals->LinesInDETChroma * locals->RequiredDISPCLK[i][j] * locals->BytePerPixelInDETC[k] *
- locals->PSCL_FACTOR_CHROMA[k] / locals->ReturnBWPerState[i],
+ locals->PSCL_FACTOR_CHROMA[k] / locals->ReturnBWPerState[i][0],
locals->EffectiveLBLatencyHidingSourceLinesChroma),
locals->SwathHeightCPerState[i][j][k]);
if (locals->BytePerPixelInDETC[k] == 0) {
locals->UrgentLatencySupportUsPerState[i][j][k] = locals->EffectiveDETLBLinesLuma * (locals->HTotal[k] / locals->PixelClock[k])
/ locals->VRatio[k] - locals->EffectiveDETLBLinesLuma * locals->SwathWidthYPerState[i][j][k] *
- dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i] / locals->NoOfDPP[i][j][k]);
+ dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i][0] / locals->NoOfDPP[i][j][k]);
} else {
locals->UrgentLatencySupportUsPerState[i][j][k] = dml_min(
locals->EffectiveDETLBLinesLuma * (locals->HTotal[k] / locals->PixelClock[k])
/ locals->VRatio[k] - locals->EffectiveDETLBLinesLuma * locals->SwathWidthYPerState[i][j][k] *
- dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i] / locals->NoOfDPP[i][j][k]),
+ dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i][0] / locals->NoOfDPP[i][j][k]),
locals->EffectiveDETLBLinesChroma * (locals->HTotal[k] / locals->PixelClock[k]) / (locals->VRatio[k] / 2) -
locals->EffectiveDETLBLinesChroma * locals->SwathWidthYPerState[i][j][k] / 2 *
- dml_ceil(locals->BytePerPixelInDETC[k], 2) / (locals->ReturnBWPerState[i] / locals->NoOfDPP[i][j][k]));
+ dml_ceil(locals->BytePerPixelInDETC[k], 2) / (locals->ReturnBWPerState[i][0] / locals->NoOfDPP[i][j][k]));
}
}
}
@@ -4404,14 +4412,14 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
locals->SwathHeightYThisState[k] = locals->SwathHeightYPerState[i][j][k];
locals->SwathHeightCThisState[k] = locals->SwathHeightCPerState[i][j][k];
locals->SwathWidthYThisState[k] = locals->SwathWidthYPerState[i][j][k];
- mode_lib->vba.ProjectedDCFCLKDeepSleep = dml_max(
- mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] = dml_max(
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
mode_lib->vba.PixelClock[k] / 16.0);
if (mode_lib->vba.BytePerPixelInDETC[k] == 0.0) {
if (mode_lib->vba.VRatio[k] <= 1.0) {
- mode_lib->vba.ProjectedDCFCLKDeepSleep =
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =
dml_max(
- mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
1.1
* dml_ceil(
mode_lib->vba.BytePerPixelInDETY[k],
@@ -4421,9 +4429,9 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
* mode_lib->vba.PixelClock[k]
/ mode_lib->vba.NoOfDPP[i][j][k]);
} else {
- mode_lib->vba.ProjectedDCFCLKDeepSleep =
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =
dml_max(
- mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
1.1
* dml_ceil(
mode_lib->vba.BytePerPixelInDETY[k],
@@ -4434,9 +4442,9 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
} else {
if (mode_lib->vba.VRatio[k] <= 1.0) {
- mode_lib->vba.ProjectedDCFCLKDeepSleep =
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =
dml_max(
- mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
1.1
* dml_ceil(
mode_lib->vba.BytePerPixelInDETY[k],
@@ -4446,9 +4454,9 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
* mode_lib->vba.PixelClock[k]
/ mode_lib->vba.NoOfDPP[i][j][k]);
} else {
- mode_lib->vba.ProjectedDCFCLKDeepSleep =
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =
dml_max(
- mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
1.1
* dml_ceil(
mode_lib->vba.BytePerPixelInDETY[k],
@@ -4458,9 +4466,9 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
* mode_lib->vba.RequiredDPPCLK[i][j][k]);
}
if (mode_lib->vba.VRatio[k] / 2.0 <= 1.0) {
- mode_lib->vba.ProjectedDCFCLKDeepSleep =
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =
dml_max(
- mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
1.1
* dml_ceil(
mode_lib->vba.BytePerPixelInDETC[k],
@@ -4471,9 +4479,9 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
* mode_lib->vba.PixelClock[k]
/ mode_lib->vba.NoOfDPP[i][j][k]);
} else {
- mode_lib->vba.ProjectedDCFCLKDeepSleep =
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =
dml_max(
- mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
1.1
* dml_ceil(
mode_lib->vba.BytePerPixelInDETC[k],
@@ -4509,7 +4517,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
&mode_lib->vba.PTEBufferSizeNotExceededY[i][j][k],
&mode_lib->vba.dpte_row_height[k],
&mode_lib->vba.meta_row_height[k]);
- mode_lib->vba.PrefetchLinesY[k] = CalculatePrefetchSourceLines(
+ mode_lib->vba.PrefetchLinesY[0][0][k] = CalculatePrefetchSourceLines(
mode_lib,
mode_lib->vba.VRatio[k],
mode_lib->vba.vtaps[k],
@@ -4548,7 +4556,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
&mode_lib->vba.PTEBufferSizeNotExceededC[i][j][k],
&mode_lib->vba.dpte_row_height_chroma[k],
&mode_lib->vba.meta_row_height_chroma[k]);
- mode_lib->vba.PrefetchLinesC[k] = CalculatePrefetchSourceLines(
+ mode_lib->vba.PrefetchLinesC[0][0][k] = CalculatePrefetchSourceLines(
mode_lib,
mode_lib->vba.VRatio[k] / 2.0,
mode_lib->vba.VTAPsChroma[k],
@@ -4562,14 +4570,14 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.PDEAndMetaPTEBytesPerFrameC = 0.0;
mode_lib->vba.MetaRowBytesC = 0.0;
mode_lib->vba.DPTEBytesPerRowC = 0.0;
- locals->PrefetchLinesC[k] = 0.0;
+ locals->PrefetchLinesC[0][0][k] = 0.0;
locals->PTEBufferSizeNotExceededC[i][j][k] = true;
locals->PTEBufferSizeInRequestsForLuma = mode_lib->vba.PTEBufferSizeInRequestsLuma + mode_lib->vba.PTEBufferSizeInRequestsChroma;
}
- locals->PDEAndMetaPTEBytesPerFrame[k] =
+ locals->PDEAndMetaPTEBytesPerFrame[0][0][k] =
mode_lib->vba.PDEAndMetaPTEBytesPerFrameY + mode_lib->vba.PDEAndMetaPTEBytesPerFrameC;
- locals->MetaRowBytes[k] = mode_lib->vba.MetaRowBytesY + mode_lib->vba.MetaRowBytesC;
- locals->DPTEBytesPerRow[k] = mode_lib->vba.DPTEBytesPerRowY + mode_lib->vba.DPTEBytesPerRowC;
+ locals->MetaRowBytes[0][0][k] = mode_lib->vba.MetaRowBytesY + mode_lib->vba.MetaRowBytesC;
+ locals->DPTEBytesPerRow[0][0][k] = mode_lib->vba.DPTEBytesPerRowY + mode_lib->vba.DPTEBytesPerRowC;
CalculateActiveRowBandwidth(
mode_lib->vba.GPUVMEnable,
@@ -4596,14 +4604,14 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
+ mode_lib->vba.TotalNumberOfDCCActiveDPP[i][j]
* mode_lib->vba.MetaChunkSize)
* 1024.0
- / mode_lib->vba.ReturnBWPerState[i];
+ / mode_lib->vba.ReturnBWPerState[i][0];
if (mode_lib->vba.GPUVMEnable == true) {
mode_lib->vba.ExtraLatency = mode_lib->vba.ExtraLatency
+ mode_lib->vba.TotalNumberOfActiveDPP[i][j]
* mode_lib->vba.PTEGroupSize
- / mode_lib->vba.ReturnBWPerState[i];
+ / mode_lib->vba.ReturnBWPerState[i][0];
}
- mode_lib->vba.TimeCalc = 24.0 / mode_lib->vba.ProjectedDCFCLKDeepSleep;
+ mode_lib->vba.TimeCalc = 24.0 / mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0];
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (mode_lib->vba.BlendingAndTiming[k] == k) {
@@ -4653,7 +4661,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- locals->MaximumVStartup[k] = mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k]
+ locals->MaximumVStartup[0][0][k] = mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k]
- dml_max(1.0, dml_ceil(locals->WritebackDelay[i][k] / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]), 1.0));
}
@@ -4698,7 +4706,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.RequiredDPPCLK[i][j][k],
mode_lib->vba.RequiredDISPCLK[i][j],
mode_lib->vba.PixelClock[k],
- mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
mode_lib->vba.DSCDelayPerState[i][k],
mode_lib->vba.NoOfDPP[i][j][k],
mode_lib->vba.ScalerEnabled[k],
@@ -4716,7 +4724,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
- mode_lib->vba.VActive[k],
mode_lib->vba.HTotal[k],
mode_lib->vba.MaxInterDCNTileRepeaters,
- mode_lib->vba.MaximumVStartup[k],
+ mode_lib->vba.MaximumVStartup[0][0][k],
mode_lib->vba.GPUVMMaxPageTableLevels,
mode_lib->vba.GPUVMEnable,
mode_lib->vba.DynamicMetadataEnable[k],
@@ -4726,15 +4734,15 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.UrgentLatencyPixelDataOnly,
mode_lib->vba.ExtraLatency,
mode_lib->vba.TimeCalc,
- mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k],
- mode_lib->vba.MetaRowBytes[k],
- mode_lib->vba.DPTEBytesPerRow[k],
- mode_lib->vba.PrefetchLinesY[k],
+ mode_lib->vba.PDEAndMetaPTEBytesPerFrame[0][0][k],
+ mode_lib->vba.MetaRowBytes[0][0][k],
+ mode_lib->vba.DPTEBytesPerRow[0][0][k],
+ mode_lib->vba.PrefetchLinesY[0][0][k],
mode_lib->vba.SwathWidthYPerState[i][j][k],
mode_lib->vba.BytePerPixelInDETY[k],
mode_lib->vba.PrefillY[k],
mode_lib->vba.MaxNumSwY[k],
- mode_lib->vba.PrefetchLinesC[k],
+ mode_lib->vba.PrefetchLinesC[0][0][k],
mode_lib->vba.BytePerPixelInDETC[k],
mode_lib->vba.PrefillC[k],
mode_lib->vba.MaxNumSwC[k],
@@ -4765,19 +4773,19 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
locals->prefetch_vm_bw_valid = true;
locals->prefetch_row_bw_valid = true;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- if (locals->PDEAndMetaPTEBytesPerFrame[k] == 0)
+ if (locals->PDEAndMetaPTEBytesPerFrame[0][0][k] == 0)
locals->prefetch_vm_bw[k] = 0;
else if (locals->LinesForMetaPTE[k] > 0)
- locals->prefetch_vm_bw[k] = locals->PDEAndMetaPTEBytesPerFrame[k]
+ locals->prefetch_vm_bw[k] = locals->PDEAndMetaPTEBytesPerFrame[0][0][k]
/ (locals->LinesForMetaPTE[k] * locals->HTotal[k] / locals->PixelClock[k]);
else {
locals->prefetch_vm_bw[k] = 0;
locals->prefetch_vm_bw_valid = false;
}
- if (locals->MetaRowBytes[k] + locals->DPTEBytesPerRow[k] == 0)
+ if (locals->MetaRowBytes[0][0][k] + locals->DPTEBytesPerRow[0][0][k] == 0)
locals->prefetch_row_bw[k] = 0;
else if (locals->LinesForMetaAndDPTERow[k] > 0)
- locals->prefetch_row_bw[k] = (locals->MetaRowBytes[k] + locals->DPTEBytesPerRow[k])
+ locals->prefetch_row_bw[k] = (locals->MetaRowBytes[0][0][k] + locals->DPTEBytesPerRow[0][0][k])
/ (locals->LinesForMetaAndDPTERow[k] * locals->HTotal[k] / locals->PixelClock[k]);
else {
locals->prefetch_row_bw[k] = 0;
@@ -4796,13 +4804,13 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.RequiredPrefetchPixelDataBWLuma[i][j][k])
+ mode_lib->vba.meta_row_bw[k] + mode_lib->vba.dpte_row_bw[k]);
}
- locals->BandwidthWithoutPrefetchSupported[i] = true;
- if (mode_lib->vba.MaximumReadBandwidthWithoutPrefetch > locals->ReturnBWPerState[i]) {
- locals->BandwidthWithoutPrefetchSupported[i] = false;
+ locals->BandwidthWithoutPrefetchSupported[i][0] = true;
+ if (mode_lib->vba.MaximumReadBandwidthWithoutPrefetch > locals->ReturnBWPerState[i][0]) {
+ locals->BandwidthWithoutPrefetchSupported[i][0] = false;
}
locals->PrefetchSupported[i][j] = true;
- if (mode_lib->vba.MaximumReadBandwidthWithPrefetch > locals->ReturnBWPerState[i]) {
+ if (mode_lib->vba.MaximumReadBandwidthWithPrefetch > locals->ReturnBWPerState[i][0]) {
locals->PrefetchSupported[i][j] = false;
}
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
@@ -4827,7 +4835,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
if (mode_lib->vba.PrefetchSupported[i][j] == true
&& mode_lib->vba.VRatioInPrefetchSupported[i][j] == true) {
mode_lib->vba.BandwidthAvailableForImmediateFlip =
- mode_lib->vba.ReturnBWPerState[i];
+ mode_lib->vba.ReturnBWPerState[i][0];
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
mode_lib->vba.BandwidthAvailableForImmediateFlip =
mode_lib->vba.BandwidthAvailableForImmediateFlip
@@ -4841,9 +4849,9 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8
&& mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) {
mode_lib->vba.ImmediateFlipBytes[k] =
- mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k]
- + mode_lib->vba.MetaRowBytes[k]
- + mode_lib->vba.DPTEBytesPerRow[k];
+ mode_lib->vba.PDEAndMetaPTEBytesPerFrame[0][0][k]
+ + mode_lib->vba.MetaRowBytes[0][0][k]
+ + mode_lib->vba.DPTEBytesPerRow[0][0][k];
}
}
mode_lib->vba.TotImmediateFlipBytes = 0.0;
@@ -4871,9 +4879,9 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
/ mode_lib->vba.PixelClock[k],
mode_lib->vba.VRatio[k],
mode_lib->vba.Tno_bw[k],
- mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k],
- mode_lib->vba.MetaRowBytes[k],
- mode_lib->vba.DPTEBytesPerRow[k],
+ mode_lib->vba.PDEAndMetaPTEBytesPerFrame[0][0][k],
+ mode_lib->vba.MetaRowBytes[0][0][k],
+ mode_lib->vba.DPTEBytesPerRow[0][0][k],
mode_lib->vba.DCCEnable[k],
mode_lib->vba.dpte_row_height[k],
mode_lib->vba.meta_row_height[k],
@@ -4898,7 +4906,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
mode_lib->vba.ImmediateFlipSupportedForState[i][j] = true;
if (mode_lib->vba.total_dcn_read_bw_with_flip
- > mode_lib->vba.ReturnBWPerState[i]) {
+ > mode_lib->vba.ReturnBWPerState[i][0]) {
mode_lib->vba.ImmediateFlipSupportedForState[i][j] = false;
}
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
@@ -4917,13 +4925,13 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; k++)
mode_lib->vba.MaxTotalVActiveRDBandwidth = mode_lib->vba.MaxTotalVActiveRDBandwidth + mode_lib->vba.ReadBandwidth[k];
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
- mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i] = dml_min(mode_lib->vba.ReturnBusWidth *
+ mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i][0] = dml_min(mode_lib->vba.ReturnBusWidth *
mode_lib->vba.DCFCLKPerState[i], mode_lib->vba.FabricAndDRAMBandwidthPerState[i] * 1000) *
mode_lib->vba.MaxAveragePercentOfIdealDRAMBWDisplayCanUseInNormalSystemOperation / 100;
- if (mode_lib->vba.MaxTotalVActiveRDBandwidth <= mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i])
- mode_lib->vba.TotalVerticalActiveBandwidthSupport[i] = true;
+ if (mode_lib->vba.MaxTotalVActiveRDBandwidth <= mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i][0])
+ mode_lib->vba.TotalVerticalActiveBandwidthSupport[i][0] = true;
else
- mode_lib->vba.TotalVerticalActiveBandwidthSupport[i] = false;
+ mode_lib->vba.TotalVerticalActiveBandwidthSupport[i][0] = false;
}
/*PTE Buffer Size Check*/
@@ -5011,7 +5019,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
status = DML_FAIL_SCALE_RATIO_TAP;
} else if (mode_lib->vba.SourceFormatPixelAndScanSupport != true) {
status = DML_FAIL_SOURCE_PIXEL_FORMAT;
- } else if (locals->ViewportSizeSupport[i] != true) {
+ } else if (locals->ViewportSizeSupport[i][0] != true) {
status = DML_FAIL_VIEWPORT_SIZE;
} else if (locals->DIOSupport[i] != true) {
status = DML_FAIL_DIO_SUPPORT;
@@ -5021,7 +5029,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
status = DML_FAIL_DSC_CLK_REQUIRED;
} else if (locals->UrgentLatencySupport[i][j] != true) {
status = DML_FAIL_URGENT_LATENCY;
- } else if (locals->ROBSupport[i] != true) {
+ } else if (locals->ROBSupport[i][0] != true) {
status = DML_FAIL_REORDERING_BUFFER;
} else if (locals->DISPCLK_DPPCLK_Support[i][j] != true) {
status = DML_FAIL_DISPCLK_DPPCLK;
@@ -5041,7 +5049,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
status = DML_FAIL_PITCH_SUPPORT;
} else if (locals->PrefetchSupported[i][j] != true) {
status = DML_FAIL_PREFETCH_SUPPORT;
- } else if (locals->TotalVerticalActiveBandwidthSupport[i] != true) {
+ } else if (locals->TotalVerticalActiveBandwidthSupport[i][0] != true) {
status = DML_FAIL_TOTAL_V_ACTIVE_BW;
} else if (locals->VRatioInPrefetchSupported[i][j] != true) {
status = DML_FAIL_V_RATIO_PREFETCH;
@@ -5087,7 +5095,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.DRAMSpeed = mode_lib->vba.DRAMSpeedPerState[mode_lib->vba.VoltageLevel];
mode_lib->vba.FabricClock = mode_lib->vba.FabricClockPerState[mode_lib->vba.VoltageLevel];
mode_lib->vba.SOCCLK = mode_lib->vba.SOCCLKPerState[mode_lib->vba.VoltageLevel];
- mode_lib->vba.ReturnBW = locals->ReturnBWPerState[mode_lib->vba.VoltageLevel];
+ mode_lib->vba.ReturnBW = locals->ReturnBWPerState[mode_lib->vba.VoltageLevel][0];
mode_lib->vba.FabricAndDRAMBandwidth = locals->FabricAndDRAMBandwidthPerState[mode_lib->vba.VoltageLevel];
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (mode_lib->vba.BlendingAndTiming[k] == k) {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
new file mode 100644
index 000000000000..485a9c62ec58
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
@@ -0,0 +1,5155 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "../display_mode_lib.h"
+#include "display_mode_vba_20v2.h"
+#include "../dml_inline_defs.h"
+
+/*
+ * NOTE:
+ * This file is gcc-parseable HW gospel, coming straight from HW engineers.
+ *
+ * It doesn't adhere to Linux kernel style and sometimes will do things in odd
+ * ways. Unless there is something clearly wrong with it the code should
+ * remain as-is as it provides us with a guarantee from HW that it is correct.
+ */
+
+#define BPP_INVALID 0
+#define BPP_BLENDED_PIPE 0xffffffff
+#define DCN20_MAX_DSC_IMAGE_WIDTH 5184
+#define DCN20_MAX_420_IMAGE_WIDTH 4096
+
+static double adjust_ReturnBW(
+ struct display_mode_lib *mode_lib,
+ double ReturnBW,
+ bool DCCEnabledAnyPlane,
+ double ReturnBandwidthToDCN);
+static unsigned int dscceComputeDelay(
+ unsigned int bpc,
+ double bpp,
+ unsigned int sliceWidth,
+ unsigned int numSlices,
+ enum output_format_class pixelFormat);
+static unsigned int dscComputeDelay(enum output_format_class pixelFormat);
+static bool CalculateDelayAfterScaler(
+ struct display_mode_lib *mode_lib,
+ double ReturnBW,
+ double ReadBandwidthPlaneLuma,
+ double ReadBandwidthPlaneChroma,
+ double TotalDataReadBandwidth,
+ double DisplayPipeLineDeliveryTimeLuma,
+ double DisplayPipeLineDeliveryTimeChroma,
+ double DPPCLK,
+ double DISPCLK,
+ double PixelClock,
+ unsigned int DSCDelay,
+ unsigned int DPPPerPlane,
+ bool ScalerEnabled,
+ unsigned int NumberOfCursors,
+ double DPPCLKDelaySubtotal,
+ double DPPCLKDelaySCL,
+ double DPPCLKDelaySCLLBOnly,
+ double DPPCLKDelayCNVCFormater,
+ double DPPCLKDelayCNVCCursor,
+ double DISPCLKDelaySubtotal,
+ unsigned int ScalerRecoutWidth,
+ enum output_format_class OutputFormat,
+ unsigned int HTotal,
+ unsigned int SwathWidthSingleDPPY,
+ double BytePerPixelDETY,
+ double BytePerPixelDETC,
+ unsigned int SwathHeightY,
+ unsigned int SwathHeightC,
+ bool Interlace,
+ bool ProgressiveToInterlaceUnitInOPP,
+ double *DSTXAfterScaler,
+ double *DSTYAfterScaler
+ );
+// Super monster function with some 45 argument
+static bool CalculatePrefetchSchedule(
+ struct display_mode_lib *mode_lib,
+ double DPPCLK,
+ double DISPCLK,
+ double PixelClock,
+ double DCFCLKDeepSleep,
+ unsigned int DPPPerPlane,
+ unsigned int NumberOfCursors,
+ unsigned int VBlank,
+ unsigned int HTotal,
+ unsigned int MaxInterDCNTileRepeaters,
+ unsigned int VStartup,
+ unsigned int PageTableLevels,
+ bool GPUVMEnable,
+ bool DynamicMetadataEnable,
+ unsigned int DynamicMetadataLinesBeforeActiveRequired,
+ unsigned int DynamicMetadataTransmittedBytes,
+ bool DCCEnable,
+ double UrgentLatencyPixelDataOnly,
+ double UrgentExtraLatency,
+ double TCalc,
+ unsigned int PDEAndMetaPTEBytesFrame,
+ unsigned int MetaRowByte,
+ unsigned int PixelPTEBytesPerRow,
+ double PrefetchSourceLinesY,
+ unsigned int SwathWidthY,
+ double BytePerPixelDETY,
+ double VInitPreFillY,
+ unsigned int MaxNumSwathY,
+ double PrefetchSourceLinesC,
+ double BytePerPixelDETC,
+ double VInitPreFillC,
+ unsigned int MaxNumSwathC,
+ unsigned int SwathHeightY,
+ unsigned int SwathHeightC,
+ double TWait,
+ bool XFCEnabled,
+ double XFCRemoteSurfaceFlipDelay,
+ bool InterlaceEnable,
+ bool ProgressiveToInterlaceUnitInOPP,
+ double DSTXAfterScaler,
+ double DSTYAfterScaler,
+ double *DestinationLinesForPrefetch,
+ double *PrefetchBandwidth,
+ double *DestinationLinesToRequestVMInVBlank,
+ double *DestinationLinesToRequestRowInVBlank,
+ double *VRatioPrefetchY,
+ double *VRatioPrefetchC,
+ double *RequiredPrefetchPixDataBW,
+ double *Tno_bw,
+ unsigned int *VUpdateOffsetPix,
+ double *VUpdateWidthPix,
+ double *VReadyOffsetPix);
+static double RoundToDFSGranularityUp(double Clock, double VCOSpeed);
+static double RoundToDFSGranularityDown(double Clock, double VCOSpeed);
+static double CalculatePrefetchSourceLines(
+ struct display_mode_lib *mode_lib,
+ double VRatio,
+ double vtaps,
+ bool Interlace,
+ bool ProgressiveToInterlaceUnitInOPP,
+ unsigned int SwathHeight,
+ unsigned int ViewportYStart,
+ double *VInitPreFill,
+ unsigned int *MaxNumSwath);
+static unsigned int CalculateVMAndRowBytes(
+ struct display_mode_lib *mode_lib,
+ bool DCCEnable,
+ unsigned int BlockHeight256Bytes,
+ unsigned int BlockWidth256Bytes,
+ enum source_format_class SourcePixelFormat,
+ unsigned int SurfaceTiling,
+ unsigned int BytePerPixel,
+ enum scan_direction_class ScanDirection,
+ unsigned int ViewportWidth,
+ unsigned int ViewportHeight,
+ unsigned int SwathWidthY,
+ bool GPUVMEnable,
+ unsigned int VMMPageSize,
+ unsigned int PTEBufferSizeInRequestsLuma,
+ unsigned int PDEProcessingBufIn64KBReqs,
+ unsigned int Pitch,
+ unsigned int DCCMetaPitch,
+ unsigned int *MacroTileWidth,
+ unsigned int *MetaRowByte,
+ unsigned int *PixelPTEBytesPerRow,
+ bool *PTEBufferSizeNotExceeded,
+ unsigned int *dpte_row_height,
+ unsigned int *meta_row_height);
+static double CalculateTWait(
+ unsigned int PrefetchMode,
+ double DRAMClockChangeLatency,
+ double UrgentLatencyPixelDataOnly,
+ double SREnterPlusExitTime);
+static double CalculateRemoteSurfaceFlipDelay(
+ struct display_mode_lib *mode_lib,
+ double VRatio,
+ double SwathWidth,
+ double Bpp,
+ double LineTime,
+ double XFCTSlvVupdateOffset,
+ double XFCTSlvVupdateWidth,
+ double XFCTSlvVreadyOffset,
+ double XFCXBUFLatencyTolerance,
+ double XFCFillBWOverhead,
+ double XFCSlvChunkSize,
+ double XFCBusTransportTime,
+ double TCalc,
+ double TWait,
+ double *SrcActiveDrainRate,
+ double *TInitXFill,
+ double *TslvChk);
+static void CalculateActiveRowBandwidth(
+ bool GPUVMEnable,
+ enum source_format_class SourcePixelFormat,
+ double VRatio,
+ bool DCCEnable,
+ double LineTime,
+ unsigned int MetaRowByteLuma,
+ unsigned int MetaRowByteChroma,
+ unsigned int meta_row_height_luma,
+ unsigned int meta_row_height_chroma,
+ unsigned int PixelPTEBytesPerRowLuma,
+ unsigned int PixelPTEBytesPerRowChroma,
+ unsigned int dpte_row_height_luma,
+ unsigned int dpte_row_height_chroma,
+ double *meta_row_bw,
+ double *dpte_row_bw,
+ double *qual_row_bw);
+static void CalculateFlipSchedule(
+ struct display_mode_lib *mode_lib,
+ double UrgentExtraLatency,
+ double UrgentLatencyPixelDataOnly,
+ unsigned int GPUVMMaxPageTableLevels,
+ bool GPUVMEnable,
+ double BandwidthAvailableForImmediateFlip,
+ unsigned int TotImmediateFlipBytes,
+ enum source_format_class SourcePixelFormat,
+ unsigned int ImmediateFlipBytes,
+ double LineTime,
+ double VRatio,
+ double Tno_bw,
+ double PDEAndMetaPTEBytesFrame,
+ unsigned int MetaRowByte,
+ unsigned int PixelPTEBytesPerRow,
+ bool DCCEnable,
+ unsigned int dpte_row_height,
+ unsigned int meta_row_height,
+ double qual_row_bw,
+ double *DestinationLinesToRequestVMInImmediateFlip,
+ double *DestinationLinesToRequestRowInImmediateFlip,
+ double *final_flip_bw,
+ bool *ImmediateFlipSupportedForPipe);
+static double CalculateWriteBackDelay(
+ enum source_format_class WritebackPixelFormat,
+ double WritebackHRatio,
+ double WritebackVRatio,
+ unsigned int WritebackLumaHTaps,
+ unsigned int WritebackLumaVTaps,
+ unsigned int WritebackChromaHTaps,
+ unsigned int WritebackChromaVTaps,
+ unsigned int WritebackDestinationWidth);
+
+static void dml20v2_DisplayPipeConfiguration(struct display_mode_lib *mode_lib);
+static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation(
+ struct display_mode_lib *mode_lib);
+
+void dml20v2_recalculate(struct display_mode_lib *mode_lib)
+{
+ ModeSupportAndSystemConfiguration(mode_lib);
+ mode_lib->vba.FabricAndDRAMBandwidth = dml_min(
+ mode_lib->vba.DRAMSpeed * mode_lib->vba.NumberOfChannels * mode_lib->vba.DRAMChannelWidth,
+ mode_lib->vba.FabricClock * mode_lib->vba.FabricDatapathToDCNDataReturn) / 1000.0;
+ PixelClockAdjustmentForProgressiveToInterlaceUnit(mode_lib);
+ dml20v2_DisplayPipeConfiguration(mode_lib);
+ dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation(mode_lib);
+}
+
+static double adjust_ReturnBW(
+ struct display_mode_lib *mode_lib,
+ double ReturnBW,
+ bool DCCEnabledAnyPlane,
+ double ReturnBandwidthToDCN)
+{
+ double CriticalCompression;
+
+ if (DCCEnabledAnyPlane
+ && ReturnBandwidthToDCN
+ > mode_lib->vba.DCFCLK * mode_lib->vba.ReturnBusWidth / 4.0)
+ ReturnBW =
+ dml_min(
+ ReturnBW,
+ ReturnBandwidthToDCN * 4
+ * (1.0
+ - mode_lib->vba.UrgentLatencyPixelDataOnly
+ / ((mode_lib->vba.ROBBufferSizeInKByte
+ - mode_lib->vba.PixelChunkSizeInKByte)
+ * 1024
+ / ReturnBandwidthToDCN
+ - mode_lib->vba.DCFCLK
+ * mode_lib->vba.ReturnBusWidth
+ / 4)
+ + mode_lib->vba.UrgentLatencyPixelDataOnly));
+
+ CriticalCompression = 2.0 * mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLK
+ * mode_lib->vba.UrgentLatencyPixelDataOnly
+ / (ReturnBandwidthToDCN * mode_lib->vba.UrgentLatencyPixelDataOnly
+ + (mode_lib->vba.ROBBufferSizeInKByte
+ - mode_lib->vba.PixelChunkSizeInKByte)
+ * 1024);
+
+ if (DCCEnabledAnyPlane && CriticalCompression > 1.0 && CriticalCompression < 4.0)
+ ReturnBW =
+ dml_min(
+ ReturnBW,
+ 4.0 * ReturnBandwidthToDCN
+ * (mode_lib->vba.ROBBufferSizeInKByte
+ - mode_lib->vba.PixelChunkSizeInKByte)
+ * 1024
+ * mode_lib->vba.ReturnBusWidth
+ * mode_lib->vba.DCFCLK
+ * mode_lib->vba.UrgentLatencyPixelDataOnly
+ / dml_pow(
+ (ReturnBandwidthToDCN
+ * mode_lib->vba.UrgentLatencyPixelDataOnly
+ + (mode_lib->vba.ROBBufferSizeInKByte
+ - mode_lib->vba.PixelChunkSizeInKByte)
+ * 1024),
+ 2));
+
+ return ReturnBW;
+}
+
+static unsigned int dscceComputeDelay(
+ unsigned int bpc,
+ double bpp,
+ unsigned int sliceWidth,
+ unsigned int numSlices,
+ enum output_format_class pixelFormat)
+{
+ // valid bpc = source bits per component in the set of {8, 10, 12}
+ // valid bpp = increments of 1/16 of a bit
+ // min = 6/7/8 in N420/N422/444, respectively
+ // max = such that compression is 1:1
+ //valid sliceWidth = number of pixels per slice line, must be less than or equal to 5184/numSlices (or 4096/numSlices in 420 mode)
+ //valid numSlices = number of slices in the horiziontal direction per DSC engine in the set of {1, 2, 3, 4}
+ //valid pixelFormat = pixel/color format in the set of {:N444_RGB, :S422, :N422, :N420}
+
+ // fixed value
+ unsigned int rcModelSize = 8192;
+
+ // N422/N420 operate at 2 pixels per clock
+ unsigned int pixelsPerClock, lstall, D, initalXmitDelay, w, s, ix, wx, p, l0, a, ax, l,
+ Delay, pixels;
+
+ if (pixelFormat == dm_n422 || pixelFormat == dm_420)
+ pixelsPerClock = 2;
+ // #all other modes operate at 1 pixel per clock
+ else
+ pixelsPerClock = 1;
+
+ //initial transmit delay as per PPS
+ initalXmitDelay = dml_round(rcModelSize / 2.0 / bpp / pixelsPerClock);
+
+ //compute ssm delay
+ if (bpc == 8)
+ D = 81;
+ else if (bpc == 10)
+ D = 89;
+ else
+ D = 113;
+
+ //divide by pixel per cycle to compute slice width as seen by DSC
+ w = sliceWidth / pixelsPerClock;
+
+ //422 mode has an additional cycle of delay
+ if (pixelFormat == dm_s422)
+ s = 1;
+ else
+ s = 0;
+
+ //main calculation for the dscce
+ ix = initalXmitDelay + 45;
+ wx = (w + 2) / 3;
+ p = 3 * wx - w;
+ l0 = ix / w;
+ a = ix + p * l0;
+ ax = (a + 2) / 3 + D + 6 + 1;
+ l = (ax + wx - 1) / wx;
+ if ((ix % w) == 0 && p != 0)
+ lstall = 1;
+ else
+ lstall = 0;
+ Delay = l * wx * (numSlices - 1) + ax + s + lstall + 22;
+
+ //dsc processes 3 pixel containers per cycle and a container can contain 1 or 2 pixels
+ pixels = Delay * 3 * pixelsPerClock;
+ return pixels;
+}
+
+static unsigned int dscComputeDelay(enum output_format_class pixelFormat)
+{
+ unsigned int Delay = 0;
+
+ if (pixelFormat == dm_420) {
+ // sfr
+ Delay = Delay + 2;
+ // dsccif
+ Delay = Delay + 0;
+ // dscc - input deserializer
+ Delay = Delay + 3;
+ // dscc gets pixels every other cycle
+ Delay = Delay + 2;
+ // dscc - input cdc fifo
+ Delay = Delay + 12;
+ // dscc gets pixels every other cycle
+ Delay = Delay + 13;
+ // dscc - cdc uncertainty
+ Delay = Delay + 2;
+ // dscc - output cdc fifo
+ Delay = Delay + 7;
+ // dscc gets pixels every other cycle
+ Delay = Delay + 3;
+ // dscc - cdc uncertainty
+ Delay = Delay + 2;
+ // dscc - output serializer
+ Delay = Delay + 1;
+ // sft
+ Delay = Delay + 1;
+ } else if (pixelFormat == dm_n422) {
+ // sfr
+ Delay = Delay + 2;
+ // dsccif
+ Delay = Delay + 1;
+ // dscc - input deserializer
+ Delay = Delay + 5;
+ // dscc - input cdc fifo
+ Delay = Delay + 25;
+ // dscc - cdc uncertainty
+ Delay = Delay + 2;
+ // dscc - output cdc fifo
+ Delay = Delay + 10;
+ // dscc - cdc uncertainty
+ Delay = Delay + 2;
+ // dscc - output serializer
+ Delay = Delay + 1;
+ // sft
+ Delay = Delay + 1;
+ } else {
+ // sfr
+ Delay = Delay + 2;
+ // dsccif
+ Delay = Delay + 0;
+ // dscc - input deserializer
+ Delay = Delay + 3;
+ // dscc - input cdc fifo
+ Delay = Delay + 12;
+ // dscc - cdc uncertainty
+ Delay = Delay + 2;
+ // dscc - output cdc fifo
+ Delay = Delay + 7;
+ // dscc - output serializer
+ Delay = Delay + 1;
+ // dscc - cdc uncertainty
+ Delay = Delay + 2;
+ // sft
+ Delay = Delay + 1;
+ }
+
+ return Delay;
+}
+
+static bool CalculateDelayAfterScaler(
+ struct display_mode_lib *mode_lib,
+ double ReturnBW,
+ double ReadBandwidthPlaneLuma,
+ double ReadBandwidthPlaneChroma,
+ double TotalDataReadBandwidth,
+ double DisplayPipeLineDeliveryTimeLuma,
+ double DisplayPipeLineDeliveryTimeChroma,
+ double DPPCLK,
+ double DISPCLK,
+ double PixelClock,
+ unsigned int DSCDelay,
+ unsigned int DPPPerPlane,
+ bool ScalerEnabled,
+ unsigned int NumberOfCursors,
+ double DPPCLKDelaySubtotal,
+ double DPPCLKDelaySCL,
+ double DPPCLKDelaySCLLBOnly,
+ double DPPCLKDelayCNVCFormater,
+ double DPPCLKDelayCNVCCursor,
+ double DISPCLKDelaySubtotal,
+ unsigned int ScalerRecoutWidth,
+ enum output_format_class OutputFormat,
+ unsigned int HTotal,
+ unsigned int SwathWidthSingleDPPY,
+ double BytePerPixelDETY,
+ double BytePerPixelDETC,
+ unsigned int SwathHeightY,
+ unsigned int SwathHeightC,
+ bool Interlace,
+ bool ProgressiveToInterlaceUnitInOPP,
+ double *DSTXAfterScaler,
+ double *DSTYAfterScaler
+ )
+{
+ unsigned int DPPCycles, DISPCLKCycles;
+ double DataFabricLineDeliveryTimeLuma;
+ double DataFabricLineDeliveryTimeChroma;
+ double DSTTotalPixelsAfterScaler;
+
+ DataFabricLineDeliveryTimeLuma = SwathWidthSingleDPPY * SwathHeightY * dml_ceil(BytePerPixelDETY, 1) / (mode_lib->vba.ReturnBW * ReadBandwidthPlaneLuma / TotalDataReadBandwidth);
+ mode_lib->vba.LastPixelOfLineExtraWatermark = dml_max(mode_lib->vba.LastPixelOfLineExtraWatermark, DataFabricLineDeliveryTimeLuma - DisplayPipeLineDeliveryTimeLuma);
+
+ if (BytePerPixelDETC != 0) {
+ DataFabricLineDeliveryTimeChroma = SwathWidthSingleDPPY / 2 * SwathHeightC * dml_ceil(BytePerPixelDETC, 2) / (mode_lib->vba.ReturnBW * ReadBandwidthPlaneChroma / TotalDataReadBandwidth);
+ mode_lib->vba.LastPixelOfLineExtraWatermark = dml_max(mode_lib->vba.LastPixelOfLineExtraWatermark, DataFabricLineDeliveryTimeChroma - DisplayPipeLineDeliveryTimeChroma);
+ }
+
+ if (ScalerEnabled)
+ DPPCycles = DPPCLKDelaySubtotal + DPPCLKDelaySCL;
+ else
+ DPPCycles = DPPCLKDelaySubtotal + DPPCLKDelaySCLLBOnly;
+
+ DPPCycles = DPPCycles + DPPCLKDelayCNVCFormater + NumberOfCursors * DPPCLKDelayCNVCCursor;
+
+ DISPCLKCycles = DISPCLKDelaySubtotal;
+
+ if (DPPCLK == 0.0 || DISPCLK == 0.0)
+ return true;
+
+ *DSTXAfterScaler = DPPCycles * PixelClock / DPPCLK + DISPCLKCycles * PixelClock / DISPCLK
+ + DSCDelay;
+
+ if (DPPPerPlane > 1)
+ *DSTXAfterScaler = *DSTXAfterScaler + ScalerRecoutWidth;
+
+ if (OutputFormat == dm_420 || (Interlace && ProgressiveToInterlaceUnitInOPP))
+ *DSTYAfterScaler = 1;
+ else
+ *DSTYAfterScaler = 0;
+
+ DSTTotalPixelsAfterScaler = ((double) (*DSTYAfterScaler * HTotal)) + *DSTXAfterScaler;
+ *DSTYAfterScaler = dml_floor(DSTTotalPixelsAfterScaler / HTotal, 1);
+ *DSTXAfterScaler = DSTTotalPixelsAfterScaler - ((double) (*DSTYAfterScaler * HTotal));
+
+ return true;
+}
+
+static bool CalculatePrefetchSchedule(
+ struct display_mode_lib *mode_lib,
+ double DPPCLK,
+ double DISPCLK,
+ double PixelClock,
+ double DCFCLKDeepSleep,
+ unsigned int DPPPerPlane,
+ unsigned int NumberOfCursors,
+ unsigned int VBlank,
+ unsigned int HTotal,
+ unsigned int MaxInterDCNTileRepeaters,
+ unsigned int VStartup,
+ unsigned int PageTableLevels,
+ bool GPUVMEnable,
+ bool DynamicMetadataEnable,
+ unsigned int DynamicMetadataLinesBeforeActiveRequired,
+ unsigned int DynamicMetadataTransmittedBytes,
+ bool DCCEnable,
+ double UrgentLatencyPixelDataOnly,
+ double UrgentExtraLatency,
+ double TCalc,
+ unsigned int PDEAndMetaPTEBytesFrame,
+ unsigned int MetaRowByte,
+ unsigned int PixelPTEBytesPerRow,
+ double PrefetchSourceLinesY,
+ unsigned int SwathWidthY,
+ double BytePerPixelDETY,
+ double VInitPreFillY,
+ unsigned int MaxNumSwathY,
+ double PrefetchSourceLinesC,
+ double BytePerPixelDETC,
+ double VInitPreFillC,
+ unsigned int MaxNumSwathC,
+ unsigned int SwathHeightY,
+ unsigned int SwathHeightC,
+ double TWait,
+ bool XFCEnabled,
+ double XFCRemoteSurfaceFlipDelay,
+ bool InterlaceEnable,
+ bool ProgressiveToInterlaceUnitInOPP,
+ double DSTXAfterScaler,
+ double DSTYAfterScaler,
+ double *DestinationLinesForPrefetch,
+ double *PrefetchBandwidth,
+ double *DestinationLinesToRequestVMInVBlank,
+ double *DestinationLinesToRequestRowInVBlank,
+ double *VRatioPrefetchY,
+ double *VRatioPrefetchC,
+ double *RequiredPrefetchPixDataBW,
+ double *Tno_bw,
+ unsigned int *VUpdateOffsetPix,
+ double *VUpdateWidthPix,
+ double *VReadyOffsetPix)
+{
+ bool MyError = false;
+ double TotalRepeaterDelayTime;
+ double Tdm, LineTime, Tsetup;
+ double dst_y_prefetch_equ;
+ double Tsw_oto;
+ double prefetch_bw_oto;
+ double Tvm_oto;
+ double Tr0_oto;
+ double Tpre_oto;
+ double dst_y_prefetch_oto;
+ double TimeForFetchingMetaPTE = 0;
+ double TimeForFetchingRowInVBlank = 0;
+ double LinesToRequestPrefetchPixelData = 0;
+
+ *VUpdateOffsetPix = dml_ceil(HTotal / 4.0, 1);
+ TotalRepeaterDelayTime = MaxInterDCNTileRepeaters * (2.0 / DPPCLK + 3.0 / DISPCLK);
+ *VUpdateWidthPix = (14.0 / DCFCLKDeepSleep + 12.0 / DPPCLK + TotalRepeaterDelayTime)
+ * PixelClock;
+
+ *VReadyOffsetPix = dml_max(
+ 150.0 / DPPCLK,
+ TotalRepeaterDelayTime + 20.0 / DCFCLKDeepSleep + 10.0 / DPPCLK)
+ * PixelClock;
+
+ Tsetup = (double) (*VUpdateOffsetPix + *VUpdateWidthPix + *VReadyOffsetPix) / PixelClock;
+
+ LineTime = (double) HTotal / PixelClock;
+
+ if (DynamicMetadataEnable) {
+ double Tdmbf, Tdmec, Tdmsks;
+
+ Tdm = dml_max(0.0, UrgentExtraLatency - TCalc);
+ Tdmbf = DynamicMetadataTransmittedBytes / 4.0 / DISPCLK;
+ Tdmec = LineTime;
+ if (DynamicMetadataLinesBeforeActiveRequired == 0)
+ Tdmsks = VBlank * LineTime / 2.0;
+ else
+ Tdmsks = DynamicMetadataLinesBeforeActiveRequired * LineTime;
+ if (InterlaceEnable && !ProgressiveToInterlaceUnitInOPP)
+ Tdmsks = Tdmsks / 2;
+ if (VStartup * LineTime
+ < Tsetup + TWait + UrgentExtraLatency + Tdmbf + Tdmec + Tdmsks) {
+ MyError = true;
+ }
+ } else
+ Tdm = 0;
+
+ if (GPUVMEnable) {
+ if (PageTableLevels == 4)
+ *Tno_bw = UrgentExtraLatency + UrgentLatencyPixelDataOnly;
+ else if (PageTableLevels == 3)
+ *Tno_bw = UrgentExtraLatency;
+ else
+ *Tno_bw = 0;
+ } else if (DCCEnable)
+ *Tno_bw = LineTime;
+ else
+ *Tno_bw = LineTime / 4;
+
+ dst_y_prefetch_equ = VStartup - dml_max(TCalc + TWait, XFCRemoteSurfaceFlipDelay) / LineTime
+ - (Tsetup + Tdm) / LineTime
+ - (DSTYAfterScaler + DSTXAfterScaler / HTotal);
+
+ Tsw_oto = dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) * LineTime;
+
+ prefetch_bw_oto = (MetaRowByte + PixelPTEBytesPerRow
+ + PrefetchSourceLinesY * SwathWidthY * dml_ceil(BytePerPixelDETY, 1)
+ + PrefetchSourceLinesC * SwathWidthY / 2 * dml_ceil(BytePerPixelDETC, 2))
+ / Tsw_oto;
+
+ if (GPUVMEnable == true) {
+ Tvm_oto =
+ dml_max(
+ *Tno_bw + PDEAndMetaPTEBytesFrame / prefetch_bw_oto,
+ dml_max(
+ UrgentExtraLatency
+ + UrgentLatencyPixelDataOnly
+ * (PageTableLevels
+ - 1),
+ LineTime / 4.0));
+ } else
+ Tvm_oto = LineTime / 4.0;
+
+ if ((GPUVMEnable == true || DCCEnable == true)) {
+ Tr0_oto = dml_max(
+ (MetaRowByte + PixelPTEBytesPerRow) / prefetch_bw_oto,
+ dml_max(UrgentLatencyPixelDataOnly, dml_max(LineTime - Tvm_oto, LineTime / 4)));
+ } else
+ Tr0_oto = LineTime - Tvm_oto;
+
+ Tpre_oto = Tvm_oto + Tr0_oto + Tsw_oto;
+
+ dst_y_prefetch_oto = Tpre_oto / LineTime;
+
+ if (dst_y_prefetch_oto < dst_y_prefetch_equ)
+ *DestinationLinesForPrefetch = dst_y_prefetch_oto;
+ else
+ *DestinationLinesForPrefetch = dst_y_prefetch_equ;
+
+ *DestinationLinesForPrefetch = dml_floor(4.0 * (*DestinationLinesForPrefetch + 0.125), 1)
+ / 4;
+
+ dml_print("DML: VStartup: %d\n", VStartup);
+ dml_print("DML: TCalc: %f\n", TCalc);
+ dml_print("DML: TWait: %f\n", TWait);
+ dml_print("DML: XFCRemoteSurfaceFlipDelay: %f\n", XFCRemoteSurfaceFlipDelay);
+ dml_print("DML: LineTime: %f\n", LineTime);
+ dml_print("DML: Tsetup: %f\n", Tsetup);
+ dml_print("DML: Tdm: %f\n", Tdm);
+ dml_print("DML: DSTYAfterScaler: %f\n", DSTYAfterScaler);
+ dml_print("DML: DSTXAfterScaler: %f\n", DSTXAfterScaler);
+ dml_print("DML: HTotal: %d\n", HTotal);
+
+ *PrefetchBandwidth = 0;
+ *DestinationLinesToRequestVMInVBlank = 0;
+ *DestinationLinesToRequestRowInVBlank = 0;
+ *VRatioPrefetchY = 0;
+ *VRatioPrefetchC = 0;
+ *RequiredPrefetchPixDataBW = 0;
+ if (*DestinationLinesForPrefetch > 1) {
+ *PrefetchBandwidth = (PDEAndMetaPTEBytesFrame + 2 * MetaRowByte
+ + 2 * PixelPTEBytesPerRow
+ + PrefetchSourceLinesY * SwathWidthY * dml_ceil(BytePerPixelDETY, 1)
+ + PrefetchSourceLinesC * SwathWidthY / 2
+ * dml_ceil(BytePerPixelDETC, 2))
+ / (*DestinationLinesForPrefetch * LineTime - *Tno_bw);
+ if (GPUVMEnable) {
+ TimeForFetchingMetaPTE =
+ dml_max(
+ *Tno_bw
+ + (double) PDEAndMetaPTEBytesFrame
+ / *PrefetchBandwidth,
+ dml_max(
+ UrgentExtraLatency
+ + UrgentLatencyPixelDataOnly
+ * (PageTableLevels
+ - 1),
+ LineTime / 4));
+ } else {
+ if (NumberOfCursors > 0 || XFCEnabled)
+ TimeForFetchingMetaPTE = LineTime / 4;
+ else
+ TimeForFetchingMetaPTE = 0.0;
+ }
+
+ if ((GPUVMEnable == true || DCCEnable == true)) {
+ TimeForFetchingRowInVBlank =
+ dml_max(
+ (MetaRowByte + PixelPTEBytesPerRow)
+ / *PrefetchBandwidth,
+ dml_max(
+ UrgentLatencyPixelDataOnly,
+ dml_max(
+ LineTime
+ - TimeForFetchingMetaPTE,
+ LineTime
+ / 4.0)));
+ } else {
+ if (NumberOfCursors > 0 || XFCEnabled)
+ TimeForFetchingRowInVBlank = LineTime - TimeForFetchingMetaPTE;
+ else
+ TimeForFetchingRowInVBlank = 0.0;
+ }
+
+ *DestinationLinesToRequestVMInVBlank = dml_floor(
+ 4.0 * (TimeForFetchingMetaPTE / LineTime + 0.125),
+ 1) / 4.0;
+
+ *DestinationLinesToRequestRowInVBlank = dml_floor(
+ 4.0 * (TimeForFetchingRowInVBlank / LineTime + 0.125),
+ 1) / 4.0;
+
+ LinesToRequestPrefetchPixelData =
+ *DestinationLinesForPrefetch
+ - ((NumberOfCursors > 0 || GPUVMEnable
+ || DCCEnable) ?
+ (*DestinationLinesToRequestVMInVBlank
+ + *DestinationLinesToRequestRowInVBlank) :
+ 0.0);
+
+ if (LinesToRequestPrefetchPixelData > 0) {
+
+ *VRatioPrefetchY = (double) PrefetchSourceLinesY
+ / LinesToRequestPrefetchPixelData;
+ *VRatioPrefetchY = dml_max(*VRatioPrefetchY, 1.0);
+ if ((SwathHeightY > 4) && (VInitPreFillY > 3)) {
+ if (LinesToRequestPrefetchPixelData > (VInitPreFillY - 3.0) / 2.0) {
+ *VRatioPrefetchY =
+ dml_max(
+ (double) PrefetchSourceLinesY
+ / LinesToRequestPrefetchPixelData,
+ (double) MaxNumSwathY
+ * SwathHeightY
+ / (LinesToRequestPrefetchPixelData
+ - (VInitPreFillY
+ - 3.0)
+ / 2.0));
+ *VRatioPrefetchY = dml_max(*VRatioPrefetchY, 1.0);
+ } else {
+ MyError = true;
+ *VRatioPrefetchY = 0;
+ }
+ }
+
+ *VRatioPrefetchC = (double) PrefetchSourceLinesC
+ / LinesToRequestPrefetchPixelData;
+ *VRatioPrefetchC = dml_max(*VRatioPrefetchC, 1.0);
+
+ if ((SwathHeightC > 4)) {
+ if (LinesToRequestPrefetchPixelData > (VInitPreFillC - 3.0) / 2.0) {
+ *VRatioPrefetchC =
+ dml_max(
+ *VRatioPrefetchC,
+ (double) MaxNumSwathC
+ * SwathHeightC
+ / (LinesToRequestPrefetchPixelData
+ - (VInitPreFillC
+ - 3.0)
+ / 2.0));
+ *VRatioPrefetchC = dml_max(*VRatioPrefetchC, 1.0);
+ } else {
+ MyError = true;
+ *VRatioPrefetchC = 0;
+ }
+ }
+
+ *RequiredPrefetchPixDataBW =
+ DPPPerPlane
+ * ((double) PrefetchSourceLinesY
+ / LinesToRequestPrefetchPixelData
+ * dml_ceil(
+ BytePerPixelDETY,
+ 1)
+ + (double) PrefetchSourceLinesC
+ / LinesToRequestPrefetchPixelData
+ * dml_ceil(
+ BytePerPixelDETC,
+ 2)
+ / 2)
+ * SwathWidthY / LineTime;
+ } else {
+ MyError = true;
+ *VRatioPrefetchY = 0;
+ *VRatioPrefetchC = 0;
+ *RequiredPrefetchPixDataBW = 0;
+ }
+
+ } else {
+ MyError = true;
+ }
+
+ if (MyError) {
+ *PrefetchBandwidth = 0;
+ TimeForFetchingMetaPTE = 0;
+ TimeForFetchingRowInVBlank = 0;
+ *DestinationLinesToRequestVMInVBlank = 0;
+ *DestinationLinesToRequestRowInVBlank = 0;
+ *DestinationLinesForPrefetch = 0;
+ LinesToRequestPrefetchPixelData = 0;
+ *VRatioPrefetchY = 0;
+ *VRatioPrefetchC = 0;
+ *RequiredPrefetchPixDataBW = 0;
+ }
+
+ return MyError;
+}
+
+static double RoundToDFSGranularityUp(double Clock, double VCOSpeed)
+{
+ return VCOSpeed * 4 / dml_floor(VCOSpeed * 4 / Clock, 1);
+}
+
+static double RoundToDFSGranularityDown(double Clock, double VCOSpeed)
+{
+ return VCOSpeed * 4 / dml_ceil(VCOSpeed * 4 / Clock, 1);
+}
+
+static double CalculatePrefetchSourceLines(
+ struct display_mode_lib *mode_lib,
+ double VRatio,
+ double vtaps,
+ bool Interlace,
+ bool ProgressiveToInterlaceUnitInOPP,
+ unsigned int SwathHeight,
+ unsigned int ViewportYStart,
+ double *VInitPreFill,
+ unsigned int *MaxNumSwath)
+{
+ unsigned int MaxPartialSwath;
+
+ if (ProgressiveToInterlaceUnitInOPP)
+ *VInitPreFill = dml_floor((VRatio + vtaps + 1) / 2.0, 1);
+ else
+ *VInitPreFill = dml_floor((VRatio + vtaps + 1 + Interlace * 0.5 * VRatio) / 2.0, 1);
+
+ if (!mode_lib->vba.IgnoreViewportPositioning) {
+
+ *MaxNumSwath = dml_ceil((*VInitPreFill - 1.0) / SwathHeight, 1) + 1.0;
+
+ if (*VInitPreFill > 1.0)
+ MaxPartialSwath = (unsigned int) (*VInitPreFill - 2) % SwathHeight;
+ else
+ MaxPartialSwath = (unsigned int) (*VInitPreFill + SwathHeight - 2)
+ % SwathHeight;
+ MaxPartialSwath = dml_max(1U, MaxPartialSwath);
+
+ } else {
+
+ if (ViewportYStart != 0)
+ dml_print(
+ "WARNING DML: using viewport y position of 0 even though actual viewport y position is non-zero in prefetch source lines calculation\n");
+
+ *MaxNumSwath = dml_ceil(*VInitPreFill / SwathHeight, 1);
+
+ if (*VInitPreFill > 1.0)
+ MaxPartialSwath = (unsigned int) (*VInitPreFill - 1) % SwathHeight;
+ else
+ MaxPartialSwath = (unsigned int) (*VInitPreFill + SwathHeight - 1)
+ % SwathHeight;
+ }
+
+ return *MaxNumSwath * SwathHeight + MaxPartialSwath;
+}
+
+static unsigned int CalculateVMAndRowBytes(
+ struct display_mode_lib *mode_lib,
+ bool DCCEnable,
+ unsigned int BlockHeight256Bytes,
+ unsigned int BlockWidth256Bytes,
+ enum source_format_class SourcePixelFormat,
+ unsigned int SurfaceTiling,
+ unsigned int BytePerPixel,
+ enum scan_direction_class ScanDirection,
+ unsigned int ViewportWidth,
+ unsigned int ViewportHeight,
+ unsigned int SwathWidth,
+ bool GPUVMEnable,
+ unsigned int VMMPageSize,
+ unsigned int PTEBufferSizeInRequestsLuma,
+ unsigned int PDEProcessingBufIn64KBReqs,
+ unsigned int Pitch,
+ unsigned int DCCMetaPitch,
+ unsigned int *MacroTileWidth,
+ unsigned int *MetaRowByte,
+ unsigned int *PixelPTEBytesPerRow,
+ bool *PTEBufferSizeNotExceeded,
+ unsigned int *dpte_row_height,
+ unsigned int *meta_row_height)
+{
+ unsigned int MetaRequestHeight;
+ unsigned int MetaRequestWidth;
+ unsigned int MetaSurfWidth;
+ unsigned int MetaSurfHeight;
+ unsigned int MPDEBytesFrame;
+ unsigned int MetaPTEBytesFrame;
+ unsigned int DCCMetaSurfaceBytes;
+
+ unsigned int MacroTileSizeBytes;
+ unsigned int MacroTileHeight;
+ unsigned int DPDE0BytesFrame;
+ unsigned int ExtraDPDEBytesFrame;
+ unsigned int PDEAndMetaPTEBytesFrame;
+
+ if (DCCEnable == true) {
+ MetaRequestHeight = 8 * BlockHeight256Bytes;
+ MetaRequestWidth = 8 * BlockWidth256Bytes;
+ if (ScanDirection == dm_horz) {
+ *meta_row_height = MetaRequestHeight;
+ MetaSurfWidth = dml_ceil((double) SwathWidth - 1, MetaRequestWidth)
+ + MetaRequestWidth;
+ *MetaRowByte = MetaSurfWidth * MetaRequestHeight * BytePerPixel / 256.0;
+ } else {
+ *meta_row_height = MetaRequestWidth;
+ MetaSurfHeight = dml_ceil((double) SwathWidth - 1, MetaRequestHeight)
+ + MetaRequestHeight;
+ *MetaRowByte = MetaSurfHeight * MetaRequestWidth * BytePerPixel / 256.0;
+ }
+ if (ScanDirection == dm_horz) {
+ DCCMetaSurfaceBytes = DCCMetaPitch
+ * (dml_ceil(ViewportHeight - 1, 64 * BlockHeight256Bytes)
+ + 64 * BlockHeight256Bytes) * BytePerPixel
+ / 256;
+ } else {
+ DCCMetaSurfaceBytes = DCCMetaPitch
+ * (dml_ceil(
+ (double) ViewportHeight - 1,
+ 64 * BlockHeight256Bytes)
+ + 64 * BlockHeight256Bytes) * BytePerPixel
+ / 256;
+ }
+ if (GPUVMEnable == true) {
+ MetaPTEBytesFrame = (dml_ceil(
+ (double) (DCCMetaSurfaceBytes - VMMPageSize)
+ / (8 * VMMPageSize),
+ 1) + 1) * 64;
+ MPDEBytesFrame = 128 * (mode_lib->vba.GPUVMMaxPageTableLevels - 1);
+ } else {
+ MetaPTEBytesFrame = 0;
+ MPDEBytesFrame = 0;
+ }
+ } else {
+ MetaPTEBytesFrame = 0;
+ MPDEBytesFrame = 0;
+ *MetaRowByte = 0;
+ }
+
+ if (SurfaceTiling == dm_sw_linear || SurfaceTiling == dm_sw_gfx7_2d_thin_gl || SurfaceTiling == dm_sw_gfx7_2d_thin_l_vp) {
+ MacroTileSizeBytes = 256;
+ MacroTileHeight = BlockHeight256Bytes;
+ } else if (SurfaceTiling == dm_sw_4kb_s || SurfaceTiling == dm_sw_4kb_s_x
+ || SurfaceTiling == dm_sw_4kb_d || SurfaceTiling == dm_sw_4kb_d_x) {
+ MacroTileSizeBytes = 4096;
+ MacroTileHeight = 4 * BlockHeight256Bytes;
+ } else if (SurfaceTiling == dm_sw_64kb_s || SurfaceTiling == dm_sw_64kb_s_t
+ || SurfaceTiling == dm_sw_64kb_s_x || SurfaceTiling == dm_sw_64kb_d
+ || SurfaceTiling == dm_sw_64kb_d_t || SurfaceTiling == dm_sw_64kb_d_x
+ || SurfaceTiling == dm_sw_64kb_r_x) {
+ MacroTileSizeBytes = 65536;
+ MacroTileHeight = 16 * BlockHeight256Bytes;
+ } else {
+ MacroTileSizeBytes = 262144;
+ MacroTileHeight = 32 * BlockHeight256Bytes;
+ }
+ *MacroTileWidth = MacroTileSizeBytes / BytePerPixel / MacroTileHeight;
+
+ if (GPUVMEnable == true && mode_lib->vba.GPUVMMaxPageTableLevels > 1) {
+ if (ScanDirection == dm_horz) {
+ DPDE0BytesFrame =
+ 64
+ * (dml_ceil(
+ ((Pitch
+ * (dml_ceil(
+ ViewportHeight
+ - 1,
+ MacroTileHeight)
+ + MacroTileHeight)
+ * BytePerPixel)
+ - MacroTileSizeBytes)
+ / (8
+ * 2097152),
+ 1) + 1);
+ } else {
+ DPDE0BytesFrame =
+ 64
+ * (dml_ceil(
+ ((Pitch
+ * (dml_ceil(
+ (double) SwathWidth
+ - 1,
+ MacroTileHeight)
+ + MacroTileHeight)
+ * BytePerPixel)
+ - MacroTileSizeBytes)
+ / (8
+ * 2097152),
+ 1) + 1);
+ }
+ ExtraDPDEBytesFrame = 128 * (mode_lib->vba.GPUVMMaxPageTableLevels - 2);
+ } else {
+ DPDE0BytesFrame = 0;
+ ExtraDPDEBytesFrame = 0;
+ }
+
+ PDEAndMetaPTEBytesFrame = MetaPTEBytesFrame + MPDEBytesFrame + DPDE0BytesFrame
+ + ExtraDPDEBytesFrame;
+
+ if (GPUVMEnable == true) {
+ unsigned int PTERequestSize;
+ unsigned int PixelPTEReqHeight;
+ unsigned int PixelPTEReqWidth;
+ double FractionOfPTEReturnDrop;
+ unsigned int EffectivePDEProcessingBufIn64KBReqs;
+
+ if (SurfaceTiling == dm_sw_linear) {
+ PixelPTEReqHeight = 1;
+ PixelPTEReqWidth = 8.0 * VMMPageSize / BytePerPixel;
+ PTERequestSize = 64;
+ FractionOfPTEReturnDrop = 0;
+ } else if (MacroTileSizeBytes == 4096) {
+ PixelPTEReqHeight = MacroTileHeight;
+ PixelPTEReqWidth = 8 * *MacroTileWidth;
+ PTERequestSize = 64;
+ if (ScanDirection == dm_horz)
+ FractionOfPTEReturnDrop = 0;
+ else
+ FractionOfPTEReturnDrop = 7 / 8;
+ } else if (VMMPageSize == 4096 && MacroTileSizeBytes > 4096) {
+ PixelPTEReqHeight = 16 * BlockHeight256Bytes;
+ PixelPTEReqWidth = 16 * BlockWidth256Bytes;
+ PTERequestSize = 128;
+ FractionOfPTEReturnDrop = 0;
+ } else {
+ PixelPTEReqHeight = MacroTileHeight;
+ PixelPTEReqWidth = 8 * *MacroTileWidth;
+ PTERequestSize = 64;
+ FractionOfPTEReturnDrop = 0;
+ }
+
+ if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10)
+ EffectivePDEProcessingBufIn64KBReqs = PDEProcessingBufIn64KBReqs / 2;
+ else
+ EffectivePDEProcessingBufIn64KBReqs = PDEProcessingBufIn64KBReqs;
+
+ if (SurfaceTiling == dm_sw_linear) {
+ *dpte_row_height =
+ dml_min(
+ 128,
+ 1
+ << (unsigned int) dml_floor(
+ dml_log2(
+ dml_min(
+ (double) PTEBufferSizeInRequestsLuma
+ * PixelPTEReqWidth,
+ EffectivePDEProcessingBufIn64KBReqs
+ * 65536.0
+ / BytePerPixel)
+ / Pitch),
+ 1));
+ *PixelPTEBytesPerRow = PTERequestSize
+ * (dml_ceil(
+ (double) (Pitch * *dpte_row_height - 1)
+ / PixelPTEReqWidth,
+ 1) + 1);
+ } else if (ScanDirection == dm_horz) {
+ *dpte_row_height = PixelPTEReqHeight;
+ *PixelPTEBytesPerRow = PTERequestSize
+ * (dml_ceil(((double) SwathWidth - 1) / PixelPTEReqWidth, 1)
+ + 1);
+ } else {
+ *dpte_row_height = dml_min(PixelPTEReqWidth, *MacroTileWidth);
+ *PixelPTEBytesPerRow = PTERequestSize
+ * (dml_ceil(
+ ((double) SwathWidth - 1)
+ / PixelPTEReqHeight,
+ 1) + 1);
+ }
+ if (*PixelPTEBytesPerRow * (1 - FractionOfPTEReturnDrop)
+ <= 64 * PTEBufferSizeInRequestsLuma) {
+ *PTEBufferSizeNotExceeded = true;
+ } else {
+ *PTEBufferSizeNotExceeded = false;
+ }
+ } else {
+ *PixelPTEBytesPerRow = 0;
+ *PTEBufferSizeNotExceeded = true;
+ }
+
+ return PDEAndMetaPTEBytesFrame;
+}
+
+static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation(
+ struct display_mode_lib *mode_lib)
+{
+ unsigned int j, k;
+
+ mode_lib->vba.WritebackDISPCLK = 0.0;
+ mode_lib->vba.DISPCLKWithRamping = 0;
+ mode_lib->vba.DISPCLKWithoutRamping = 0;
+ mode_lib->vba.GlobalDPPCLK = 0.0;
+
+ // dml_ml->vba.DISPCLK and dml_ml->vba.DPPCLK Calculation
+ //
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.WritebackEnable[k]) {
+ mode_lib->vba.WritebackDISPCLK =
+ dml_max(
+ mode_lib->vba.WritebackDISPCLK,
+ CalculateWriteBackDISPCLK(
+ mode_lib->vba.WritebackPixelFormat[k],
+ mode_lib->vba.PixelClock[k],
+ mode_lib->vba.WritebackHRatio[k],
+ mode_lib->vba.WritebackVRatio[k],
+ mode_lib->vba.WritebackLumaHTaps[k],
+ mode_lib->vba.WritebackLumaVTaps[k],
+ mode_lib->vba.WritebackChromaHTaps[k],
+ mode_lib->vba.WritebackChromaVTaps[k],
+ mode_lib->vba.WritebackDestinationWidth[k],
+ mode_lib->vba.HTotal[k],
+ mode_lib->vba.WritebackChromaLineBufferWidth));
+ }
+ }
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.HRatio[k] > 1) {
+ mode_lib->vba.PSCL_THROUGHPUT_LUMA[k] = dml_min(
+ mode_lib->vba.MaxDCHUBToPSCLThroughput,
+ mode_lib->vba.MaxPSCLToLBThroughput
+ * mode_lib->vba.HRatio[k]
+ / dml_ceil(
+ mode_lib->vba.htaps[k]
+ / 6.0,
+ 1));
+ } else {
+ mode_lib->vba.PSCL_THROUGHPUT_LUMA[k] = dml_min(
+ mode_lib->vba.MaxDCHUBToPSCLThroughput,
+ mode_lib->vba.MaxPSCLToLBThroughput);
+ }
+
+ mode_lib->vba.DPPCLKUsingSingleDPPLuma =
+ mode_lib->vba.PixelClock[k]
+ * dml_max(
+ mode_lib->vba.vtaps[k] / 6.0
+ * dml_min(
+ 1.0,
+ mode_lib->vba.HRatio[k]),
+ dml_max(
+ mode_lib->vba.HRatio[k]
+ * mode_lib->vba.VRatio[k]
+ / mode_lib->vba.PSCL_THROUGHPUT_LUMA[k],
+ 1.0));
+
+ if ((mode_lib->vba.htaps[k] > 6 || mode_lib->vba.vtaps[k] > 6)
+ && mode_lib->vba.DPPCLKUsingSingleDPPLuma
+ < 2 * mode_lib->vba.PixelClock[k]) {
+ mode_lib->vba.DPPCLKUsingSingleDPPLuma = 2 * mode_lib->vba.PixelClock[k];
+ }
+
+ if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8
+ && mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) {
+ mode_lib->vba.PSCL_THROUGHPUT_CHROMA[k] = 0.0;
+ mode_lib->vba.DPPCLKUsingSingleDPP[k] =
+ mode_lib->vba.DPPCLKUsingSingleDPPLuma;
+ } else {
+ if (mode_lib->vba.HRatio[k] > 1) {
+ mode_lib->vba.PSCL_THROUGHPUT_CHROMA[k] =
+ dml_min(
+ mode_lib->vba.MaxDCHUBToPSCLThroughput,
+ mode_lib->vba.MaxPSCLToLBThroughput
+ * mode_lib->vba.HRatio[k]
+ / 2
+ / dml_ceil(
+ mode_lib->vba.HTAPsChroma[k]
+ / 6.0,
+ 1.0));
+ } else {
+ mode_lib->vba.PSCL_THROUGHPUT_CHROMA[k] = dml_min(
+ mode_lib->vba.MaxDCHUBToPSCLThroughput,
+ mode_lib->vba.MaxPSCLToLBThroughput);
+ }
+ mode_lib->vba.DPPCLKUsingSingleDPPChroma =
+ mode_lib->vba.PixelClock[k]
+ * dml_max(
+ mode_lib->vba.VTAPsChroma[k]
+ / 6.0
+ * dml_min(
+ 1.0,
+ mode_lib->vba.HRatio[k]
+ / 2),
+ dml_max(
+ mode_lib->vba.HRatio[k]
+ * mode_lib->vba.VRatio[k]
+ / 4
+ / mode_lib->vba.PSCL_THROUGHPUT_CHROMA[k],
+ 1.0));
+
+ if ((mode_lib->vba.HTAPsChroma[k] > 6 || mode_lib->vba.VTAPsChroma[k] > 6)
+ && mode_lib->vba.DPPCLKUsingSingleDPPChroma
+ < 2 * mode_lib->vba.PixelClock[k]) {
+ mode_lib->vba.DPPCLKUsingSingleDPPChroma = 2
+ * mode_lib->vba.PixelClock[k];
+ }
+
+ mode_lib->vba.DPPCLKUsingSingleDPP[k] = dml_max(
+ mode_lib->vba.DPPCLKUsingSingleDPPLuma,
+ mode_lib->vba.DPPCLKUsingSingleDPPChroma);
+ }
+ }
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.BlendingAndTiming[k] != k)
+ continue;
+ if (mode_lib->vba.ODMCombineEnabled[k]) {
+ mode_lib->vba.DISPCLKWithRamping =
+ dml_max(
+ mode_lib->vba.DISPCLKWithRamping,
+ mode_lib->vba.PixelClock[k] / 2
+ * (1
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100)
+ * (1
+ + mode_lib->vba.DISPCLKRampingMargin
+ / 100));
+ mode_lib->vba.DISPCLKWithoutRamping =
+ dml_max(
+ mode_lib->vba.DISPCLKWithoutRamping,
+ mode_lib->vba.PixelClock[k] / 2
+ * (1
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100));
+ } else if (!mode_lib->vba.ODMCombineEnabled[k]) {
+ mode_lib->vba.DISPCLKWithRamping =
+ dml_max(
+ mode_lib->vba.DISPCLKWithRamping,
+ mode_lib->vba.PixelClock[k]
+ * (1
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100)
+ * (1
+ + mode_lib->vba.DISPCLKRampingMargin
+ / 100));
+ mode_lib->vba.DISPCLKWithoutRamping =
+ dml_max(
+ mode_lib->vba.DISPCLKWithoutRamping,
+ mode_lib->vba.PixelClock[k]
+ * (1
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100));
+ }
+ }
+
+ mode_lib->vba.DISPCLKWithRamping = dml_max(
+ mode_lib->vba.DISPCLKWithRamping,
+ mode_lib->vba.WritebackDISPCLK);
+ mode_lib->vba.DISPCLKWithoutRamping = dml_max(
+ mode_lib->vba.DISPCLKWithoutRamping,
+ mode_lib->vba.WritebackDISPCLK);
+
+ ASSERT(mode_lib->vba.DISPCLKDPPCLKVCOSpeed != 0);
+ mode_lib->vba.DISPCLKWithRampingRoundedToDFSGranularity = RoundToDFSGranularityUp(
+ mode_lib->vba.DISPCLKWithRamping,
+ mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
+ mode_lib->vba.DISPCLKWithoutRampingRoundedToDFSGranularity = RoundToDFSGranularityUp(
+ mode_lib->vba.DISPCLKWithoutRamping,
+ mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
+ mode_lib->vba.MaxDispclkRoundedToDFSGranularity = RoundToDFSGranularityDown(
+ mode_lib->vba.soc.clock_limits[mode_lib->vba.soc.num_states].dispclk_mhz,
+ mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
+ if (mode_lib->vba.DISPCLKWithoutRampingRoundedToDFSGranularity
+ > mode_lib->vba.MaxDispclkRoundedToDFSGranularity) {
+ mode_lib->vba.DISPCLK_calculated =
+ mode_lib->vba.DISPCLKWithoutRampingRoundedToDFSGranularity;
+ } else if (mode_lib->vba.DISPCLKWithRampingRoundedToDFSGranularity
+ > mode_lib->vba.MaxDispclkRoundedToDFSGranularity) {
+ mode_lib->vba.DISPCLK_calculated = mode_lib->vba.MaxDispclkRoundedToDFSGranularity;
+ } else {
+ mode_lib->vba.DISPCLK_calculated =
+ mode_lib->vba.DISPCLKWithRampingRoundedToDFSGranularity;
+ }
+ DTRACE(" dispclk_mhz (calculated) = %f", mode_lib->vba.DISPCLK_calculated);
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.DPPPerPlane[k] == 0) {
+ mode_lib->vba.DPPCLK_calculated[k] = 0;
+ } else {
+ mode_lib->vba.DPPCLK_calculated[k] = mode_lib->vba.DPPCLKUsingSingleDPP[k]
+ / mode_lib->vba.DPPPerPlane[k]
+ * (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100);
+ }
+ mode_lib->vba.GlobalDPPCLK = dml_max(
+ mode_lib->vba.GlobalDPPCLK,
+ mode_lib->vba.DPPCLK_calculated[k]);
+ }
+ mode_lib->vba.GlobalDPPCLK = RoundToDFSGranularityUp(
+ mode_lib->vba.GlobalDPPCLK,
+ mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ mode_lib->vba.DPPCLK_calculated[k] = mode_lib->vba.GlobalDPPCLK / 255
+ * dml_ceil(
+ mode_lib->vba.DPPCLK_calculated[k] * 255
+ / mode_lib->vba.GlobalDPPCLK,
+ 1);
+ DTRACE(" dppclk_mhz[%i] (calculated) = %f", k, mode_lib->vba.DPPCLK_calculated[k]);
+ }
+
+ // Urgent Watermark
+ mode_lib->vba.DCCEnabledAnyPlane = false;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k)
+ if (mode_lib->vba.DCCEnable[k])
+ mode_lib->vba.DCCEnabledAnyPlane = true;
+
+ mode_lib->vba.ReturnBandwidthToDCN = dml_min(
+ mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLK,
+ mode_lib->vba.FabricAndDRAMBandwidth * 1000)
+ * mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelDataOnly / 100;
+
+ mode_lib->vba.ReturnBW = mode_lib->vba.ReturnBandwidthToDCN;
+ mode_lib->vba.ReturnBW = adjust_ReturnBW(
+ mode_lib,
+ mode_lib->vba.ReturnBW,
+ mode_lib->vba.DCCEnabledAnyPlane,
+ mode_lib->vba.ReturnBandwidthToDCN);
+
+ // Let's do this calculation again??
+ mode_lib->vba.ReturnBandwidthToDCN = dml_min(
+ mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLK,
+ mode_lib->vba.FabricAndDRAMBandwidth * 1000);
+ mode_lib->vba.ReturnBW = adjust_ReturnBW(
+ mode_lib,
+ mode_lib->vba.ReturnBW,
+ mode_lib->vba.DCCEnabledAnyPlane,
+ mode_lib->vba.ReturnBandwidthToDCN);
+
+ DTRACE(" dcfclk_mhz = %f", mode_lib->vba.DCFCLK);
+ DTRACE(" return_bw_to_dcn = %f", mode_lib->vba.ReturnBandwidthToDCN);
+ DTRACE(" return_bus_bw = %f", mode_lib->vba.ReturnBW);
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ bool MainPlaneDoesODMCombine = false;
+
+ if (mode_lib->vba.SourceScan[k] == dm_horz)
+ mode_lib->vba.SwathWidthSingleDPPY[k] = mode_lib->vba.ViewportWidth[k];
+ else
+ mode_lib->vba.SwathWidthSingleDPPY[k] = mode_lib->vba.ViewportHeight[k];
+
+ if (mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1)
+ MainPlaneDoesODMCombine = true;
+ for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j)
+ if (mode_lib->vba.BlendingAndTiming[k] == j
+ && mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1)
+ MainPlaneDoesODMCombine = true;
+
+ if (MainPlaneDoesODMCombine == true)
+ mode_lib->vba.SwathWidthY[k] = dml_min(
+ (double) mode_lib->vba.SwathWidthSingleDPPY[k],
+ dml_round(
+ mode_lib->vba.HActive[k] / 2.0
+ * mode_lib->vba.HRatio[k]));
+ else {
+ if (mode_lib->vba.DPPPerPlane[k] == 0) {
+ mode_lib->vba.SwathWidthY[k] = 0;
+ } else {
+ mode_lib->vba.SwathWidthY[k] = mode_lib->vba.SwathWidthSingleDPPY[k]
+ / mode_lib->vba.DPPPerPlane[k];
+ }
+ }
+ }
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.SourcePixelFormat[k] == dm_444_64) {
+ mode_lib->vba.BytePerPixelDETY[k] = 8;
+ mode_lib->vba.BytePerPixelDETC[k] = 0;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_32) {
+ mode_lib->vba.BytePerPixelDETY[k] = 4;
+ mode_lib->vba.BytePerPixelDETC[k] = 0;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_16) {
+ mode_lib->vba.BytePerPixelDETY[k] = 2;
+ mode_lib->vba.BytePerPixelDETC[k] = 0;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_8) {
+ mode_lib->vba.BytePerPixelDETY[k] = 1;
+ mode_lib->vba.BytePerPixelDETC[k] = 0;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8) {
+ mode_lib->vba.BytePerPixelDETY[k] = 1;
+ mode_lib->vba.BytePerPixelDETC[k] = 2;
+ } else { // dm_420_10
+ mode_lib->vba.BytePerPixelDETY[k] = 4.0 / 3.0;
+ mode_lib->vba.BytePerPixelDETC[k] = 8.0 / 3.0;
+ }
+ }
+
+ mode_lib->vba.TotalDataReadBandwidth = 0.0;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ mode_lib->vba.ReadBandwidthPlaneLuma[k] = mode_lib->vba.SwathWidthSingleDPPY[k]
+ * dml_ceil(mode_lib->vba.BytePerPixelDETY[k], 1)
+ / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k])
+ * mode_lib->vba.VRatio[k];
+ mode_lib->vba.ReadBandwidthPlaneChroma[k] = mode_lib->vba.SwathWidthSingleDPPY[k]
+ / 2 * dml_ceil(mode_lib->vba.BytePerPixelDETC[k], 2)
+ / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k])
+ * mode_lib->vba.VRatio[k] / 2;
+ DTRACE(
+ " read_bw[%i] = %fBps",
+ k,
+ mode_lib->vba.ReadBandwidthPlaneLuma[k]
+ + mode_lib->vba.ReadBandwidthPlaneChroma[k]);
+ mode_lib->vba.TotalDataReadBandwidth += mode_lib->vba.ReadBandwidthPlaneLuma[k]
+ + mode_lib->vba.ReadBandwidthPlaneChroma[k];
+ }
+
+ mode_lib->vba.TotalDCCActiveDPP = 0;
+ mode_lib->vba.TotalActiveDPP = 0;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ mode_lib->vba.TotalActiveDPP = mode_lib->vba.TotalActiveDPP
+ + mode_lib->vba.DPPPerPlane[k];
+ if (mode_lib->vba.DCCEnable[k])
+ mode_lib->vba.TotalDCCActiveDPP = mode_lib->vba.TotalDCCActiveDPP
+ + mode_lib->vba.DPPPerPlane[k];
+ }
+
+ mode_lib->vba.UrgentRoundTripAndOutOfOrderLatency =
+ (mode_lib->vba.RoundTripPingLatencyCycles + 32) / mode_lib->vba.DCFCLK
+ + mode_lib->vba.UrgentOutOfOrderReturnPerChannelPixelDataOnly
+ * mode_lib->vba.NumberOfChannels
+ / mode_lib->vba.ReturnBW;
+
+ mode_lib->vba.LastPixelOfLineExtraWatermark = 0;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.VRatio[k] <= 1.0)
+ mode_lib->vba.DisplayPipeLineDeliveryTimeLuma[k] =
+ (double) mode_lib->vba.SwathWidthY[k]
+ * mode_lib->vba.DPPPerPlane[k]
+ / mode_lib->vba.HRatio[k]
+ / mode_lib->vba.PixelClock[k];
+ else
+ mode_lib->vba.DisplayPipeLineDeliveryTimeLuma[k] =
+ (double) mode_lib->vba.SwathWidthY[k]
+ / mode_lib->vba.PSCL_THROUGHPUT_LUMA[k]
+ / mode_lib->vba.DPPCLK[k];
+
+ if (mode_lib->vba.BytePerPixelDETC[k] == 0)
+ mode_lib->vba.DisplayPipeLineDeliveryTimeChroma[k] = 0.0;
+ else if (mode_lib->vba.VRatio[k] / 2.0 <= 1.0)
+ mode_lib->vba.DisplayPipeLineDeliveryTimeChroma[k] =
+ mode_lib->vba.SwathWidthY[k] / 2.0
+ * mode_lib->vba.DPPPerPlane[k]
+ / (mode_lib->vba.HRatio[k] / 2.0)
+ / mode_lib->vba.PixelClock[k];
+ else
+ mode_lib->vba.DisplayPipeLineDeliveryTimeChroma[k] =
+ mode_lib->vba.SwathWidthY[k] / 2.0
+ / mode_lib->vba.PSCL_THROUGHPUT_CHROMA[k]
+ / mode_lib->vba.DPPCLK[k];
+ }
+
+ mode_lib->vba.UrgentExtraLatency = mode_lib->vba.UrgentRoundTripAndOutOfOrderLatency
+ + (mode_lib->vba.TotalActiveDPP * mode_lib->vba.PixelChunkSizeInKByte
+ + mode_lib->vba.TotalDCCActiveDPP
+ * mode_lib->vba.MetaChunkSize) * 1024.0
+ / mode_lib->vba.ReturnBW;
+
+ if (mode_lib->vba.GPUVMEnable)
+ mode_lib->vba.UrgentExtraLatency += mode_lib->vba.TotalActiveDPP
+ * mode_lib->vba.PTEGroupSize / mode_lib->vba.ReturnBW;
+
+ mode_lib->vba.UrgentWatermark = mode_lib->vba.UrgentLatencyPixelDataOnly
+ + mode_lib->vba.LastPixelOfLineExtraWatermark
+ + mode_lib->vba.UrgentExtraLatency;
+
+ DTRACE(" urgent_extra_latency = %fus", mode_lib->vba.UrgentExtraLatency);
+ DTRACE(" wm_urgent = %fus", mode_lib->vba.UrgentWatermark);
+
+ mode_lib->vba.UrgentLatency = mode_lib->vba.UrgentLatencyPixelDataOnly;
+
+ mode_lib->vba.TotalActiveWriteback = 0;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.WritebackEnable[k])
+ mode_lib->vba.TotalActiveWriteback = mode_lib->vba.TotalActiveWriteback + mode_lib->vba.ActiveWritebacksPerPlane[k];
+ }
+
+ if (mode_lib->vba.TotalActiveWriteback <= 1)
+ mode_lib->vba.WritebackUrgentWatermark = mode_lib->vba.WritebackLatency;
+ else
+ mode_lib->vba.WritebackUrgentWatermark = mode_lib->vba.WritebackLatency
+ + mode_lib->vba.WritebackChunkSize * 1024.0 / 32
+ / mode_lib->vba.SOCCLK;
+
+ DTRACE(" wm_wb_urgent = %fus", mode_lib->vba.WritebackUrgentWatermark);
+
+ // NB P-State/DRAM Clock Change Watermark
+ mode_lib->vba.DRAMClockChangeWatermark = mode_lib->vba.DRAMClockChangeLatency
+ + mode_lib->vba.UrgentWatermark;
+
+ DTRACE(" wm_pstate_change = %fus", mode_lib->vba.DRAMClockChangeWatermark);
+
+ DTRACE(" calculating wb pstate watermark");
+ DTRACE(" total wb outputs %d", mode_lib->vba.TotalActiveWriteback);
+ DTRACE(" socclk frequency %f Mhz", mode_lib->vba.SOCCLK);
+
+ if (mode_lib->vba.TotalActiveWriteback <= 1)
+ mode_lib->vba.WritebackDRAMClockChangeWatermark =
+ mode_lib->vba.DRAMClockChangeLatency
+ + mode_lib->vba.WritebackLatency;
+ else
+ mode_lib->vba.WritebackDRAMClockChangeWatermark =
+ mode_lib->vba.DRAMClockChangeLatency
+ + mode_lib->vba.WritebackLatency
+ + mode_lib->vba.WritebackChunkSize * 1024.0 / 32
+ / mode_lib->vba.SOCCLK;
+
+ DTRACE(" wm_wb_pstate %fus", mode_lib->vba.WritebackDRAMClockChangeWatermark);
+
+ // Stutter Efficiency
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ mode_lib->vba.LinesInDETY[k] = mode_lib->vba.DETBufferSizeY[k]
+ / mode_lib->vba.BytePerPixelDETY[k] / mode_lib->vba.SwathWidthY[k];
+ mode_lib->vba.LinesInDETYRoundedDownToSwath[k] = dml_floor(
+ mode_lib->vba.LinesInDETY[k],
+ mode_lib->vba.SwathHeightY[k]);
+ mode_lib->vba.FullDETBufferingTimeY[k] =
+ mode_lib->vba.LinesInDETYRoundedDownToSwath[k]
+ * (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k])
+ / mode_lib->vba.VRatio[k];
+ if (mode_lib->vba.BytePerPixelDETC[k] > 0) {
+ mode_lib->vba.LinesInDETC[k] = mode_lib->vba.DETBufferSizeC[k]
+ / mode_lib->vba.BytePerPixelDETC[k]
+ / (mode_lib->vba.SwathWidthY[k] / 2);
+ mode_lib->vba.LinesInDETCRoundedDownToSwath[k] = dml_floor(
+ mode_lib->vba.LinesInDETC[k],
+ mode_lib->vba.SwathHeightC[k]);
+ mode_lib->vba.FullDETBufferingTimeC[k] =
+ mode_lib->vba.LinesInDETCRoundedDownToSwath[k]
+ * (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k])
+ / (mode_lib->vba.VRatio[k] / 2);
+ } else {
+ mode_lib->vba.LinesInDETC[k] = 0;
+ mode_lib->vba.LinesInDETCRoundedDownToSwath[k] = 0;
+ mode_lib->vba.FullDETBufferingTimeC[k] = 999999;
+ }
+ }
+
+ mode_lib->vba.MinFullDETBufferingTime = 999999.0;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.FullDETBufferingTimeY[k]
+ < mode_lib->vba.MinFullDETBufferingTime) {
+ mode_lib->vba.MinFullDETBufferingTime =
+ mode_lib->vba.FullDETBufferingTimeY[k];
+ mode_lib->vba.FrameTimeForMinFullDETBufferingTime =
+ (double) mode_lib->vba.VTotal[k] * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k];
+ }
+ if (mode_lib->vba.FullDETBufferingTimeC[k]
+ < mode_lib->vba.MinFullDETBufferingTime) {
+ mode_lib->vba.MinFullDETBufferingTime =
+ mode_lib->vba.FullDETBufferingTimeC[k];
+ mode_lib->vba.FrameTimeForMinFullDETBufferingTime =
+ (double) mode_lib->vba.VTotal[k] * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k];
+ }
+ }
+
+ mode_lib->vba.AverageReadBandwidthGBytePerSecond = 0.0;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.DCCEnable[k]) {
+ mode_lib->vba.AverageReadBandwidthGBytePerSecond =
+ mode_lib->vba.AverageReadBandwidthGBytePerSecond
+ + mode_lib->vba.ReadBandwidthPlaneLuma[k]
+ / mode_lib->vba.DCCRate[k]
+ / 1000
+ + mode_lib->vba.ReadBandwidthPlaneChroma[k]
+ / mode_lib->vba.DCCRate[k]
+ / 1000;
+ } else {
+ mode_lib->vba.AverageReadBandwidthGBytePerSecond =
+ mode_lib->vba.AverageReadBandwidthGBytePerSecond
+ + mode_lib->vba.ReadBandwidthPlaneLuma[k]
+ / 1000
+ + mode_lib->vba.ReadBandwidthPlaneChroma[k]
+ / 1000;
+ }
+ if (mode_lib->vba.DCCEnable[k]) {
+ mode_lib->vba.AverageReadBandwidthGBytePerSecond =
+ mode_lib->vba.AverageReadBandwidthGBytePerSecond
+ + mode_lib->vba.ReadBandwidthPlaneLuma[k]
+ / 1000 / 256
+ + mode_lib->vba.ReadBandwidthPlaneChroma[k]
+ / 1000 / 256;
+ }
+ if (mode_lib->vba.GPUVMEnable) {
+ mode_lib->vba.AverageReadBandwidthGBytePerSecond =
+ mode_lib->vba.AverageReadBandwidthGBytePerSecond
+ + mode_lib->vba.ReadBandwidthPlaneLuma[k]
+ / 1000 / 512
+ + mode_lib->vba.ReadBandwidthPlaneChroma[k]
+ / 1000 / 512;
+ }
+ }
+
+ mode_lib->vba.PartOfBurstThatFitsInROB =
+ dml_min(
+ mode_lib->vba.MinFullDETBufferingTime
+ * mode_lib->vba.TotalDataReadBandwidth,
+ mode_lib->vba.ROBBufferSizeInKByte * 1024
+ * mode_lib->vba.TotalDataReadBandwidth
+ / (mode_lib->vba.AverageReadBandwidthGBytePerSecond
+ * 1000));
+ mode_lib->vba.StutterBurstTime = mode_lib->vba.PartOfBurstThatFitsInROB
+ * (mode_lib->vba.AverageReadBandwidthGBytePerSecond * 1000)
+ / mode_lib->vba.TotalDataReadBandwidth / mode_lib->vba.ReturnBW
+ + (mode_lib->vba.MinFullDETBufferingTime
+ * mode_lib->vba.TotalDataReadBandwidth
+ - mode_lib->vba.PartOfBurstThatFitsInROB)
+ / (mode_lib->vba.DCFCLK * 64);
+ if (mode_lib->vba.TotalActiveWriteback == 0) {
+ mode_lib->vba.StutterEfficiencyNotIncludingVBlank = (1
+ - (mode_lib->vba.SRExitTime + mode_lib->vba.StutterBurstTime)
+ / mode_lib->vba.MinFullDETBufferingTime) * 100;
+ } else {
+ mode_lib->vba.StutterEfficiencyNotIncludingVBlank = 0;
+ }
+
+ mode_lib->vba.SmallestVBlank = 999999;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.SynchronizedVBlank || mode_lib->vba.NumberOfActivePlanes == 1) {
+ mode_lib->vba.VBlankTime = (double) (mode_lib->vba.VTotal[k]
+ - mode_lib->vba.VActive[k]) * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k];
+ } else {
+ mode_lib->vba.VBlankTime = 0;
+ }
+ mode_lib->vba.SmallestVBlank = dml_min(
+ mode_lib->vba.SmallestVBlank,
+ mode_lib->vba.VBlankTime);
+ }
+
+ mode_lib->vba.StutterEfficiency = (mode_lib->vba.StutterEfficiencyNotIncludingVBlank / 100
+ * (mode_lib->vba.FrameTimeForMinFullDETBufferingTime
+ - mode_lib->vba.SmallestVBlank)
+ + mode_lib->vba.SmallestVBlank)
+ / mode_lib->vba.FrameTimeForMinFullDETBufferingTime * 100;
+
+ // dml_ml->vba.DCFCLK Deep Sleep
+ mode_lib->vba.DCFCLKDeepSleep = 8.0;
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; k++) {
+ if (mode_lib->vba.BytePerPixelDETC[k] > 0) {
+ mode_lib->vba.DCFCLKDeepSleepPerPlane[k] =
+ dml_max(
+ 1.1 * mode_lib->vba.SwathWidthY[k]
+ * dml_ceil(
+ mode_lib->vba.BytePerPixelDETY[k],
+ 1) / 32
+ / mode_lib->vba.DisplayPipeLineDeliveryTimeLuma[k],
+ 1.1 * mode_lib->vba.SwathWidthY[k] / 2.0
+ * dml_ceil(
+ mode_lib->vba.BytePerPixelDETC[k],
+ 2) / 32
+ / mode_lib->vba.DisplayPipeLineDeliveryTimeChroma[k]);
+ } else
+ mode_lib->vba.DCFCLKDeepSleepPerPlane[k] = 1.1 * mode_lib->vba.SwathWidthY[k]
+ * dml_ceil(mode_lib->vba.BytePerPixelDETY[k], 1) / 64.0
+ / mode_lib->vba.DisplayPipeLineDeliveryTimeLuma[k];
+ mode_lib->vba.DCFCLKDeepSleepPerPlane[k] = dml_max(
+ mode_lib->vba.DCFCLKDeepSleepPerPlane[k],
+ mode_lib->vba.PixelClock[k] / 16.0);
+ mode_lib->vba.DCFCLKDeepSleep = dml_max(
+ mode_lib->vba.DCFCLKDeepSleep,
+ mode_lib->vba.DCFCLKDeepSleepPerPlane[k]);
+
+ DTRACE(
+ " dcfclk_deepsleep_per_plane[%i] = %fMHz",
+ k,
+ mode_lib->vba.DCFCLKDeepSleepPerPlane[k]);
+ }
+
+ DTRACE(" dcfclk_deepsleep_mhz = %fMHz", mode_lib->vba.DCFCLKDeepSleep);
+
+ // Stutter Watermark
+ mode_lib->vba.StutterExitWatermark = mode_lib->vba.SRExitTime
+ + mode_lib->vba.LastPixelOfLineExtraWatermark
+ + mode_lib->vba.UrgentExtraLatency + 10 / mode_lib->vba.DCFCLKDeepSleep;
+ mode_lib->vba.StutterEnterPlusExitWatermark = mode_lib->vba.SREnterPlusExitTime
+ + mode_lib->vba.LastPixelOfLineExtraWatermark
+ + mode_lib->vba.UrgentExtraLatency;
+
+ DTRACE(" wm_cstate_exit = %fus", mode_lib->vba.StutterExitWatermark);
+ DTRACE(" wm_cstate_enter_exit = %fus", mode_lib->vba.StutterEnterPlusExitWatermark);
+
+ // Urgent Latency Supported
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ mode_lib->vba.EffectiveDETPlusLBLinesLuma =
+ dml_floor(
+ mode_lib->vba.LinesInDETY[k]
+ + dml_min(
+ mode_lib->vba.LinesInDETY[k]
+ * mode_lib->vba.DPPCLK[k]
+ * mode_lib->vba.BytePerPixelDETY[k]
+ * mode_lib->vba.PSCL_THROUGHPUT_LUMA[k]
+ / (mode_lib->vba.ReturnBW
+ / mode_lib->vba.DPPPerPlane[k]),
+ (double) mode_lib->vba.EffectiveLBLatencyHidingSourceLinesLuma),
+ mode_lib->vba.SwathHeightY[k]);
+
+ mode_lib->vba.UrgentLatencySupportUsLuma = mode_lib->vba.EffectiveDETPlusLBLinesLuma
+ * (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k])
+ / mode_lib->vba.VRatio[k]
+ - mode_lib->vba.EffectiveDETPlusLBLinesLuma
+ * mode_lib->vba.SwathWidthY[k]
+ * mode_lib->vba.BytePerPixelDETY[k]
+ / (mode_lib->vba.ReturnBW
+ / mode_lib->vba.DPPPerPlane[k]);
+
+ if (mode_lib->vba.BytePerPixelDETC[k] > 0) {
+ mode_lib->vba.EffectiveDETPlusLBLinesChroma =
+ dml_floor(
+ mode_lib->vba.LinesInDETC[k]
+ + dml_min(
+ mode_lib->vba.LinesInDETC[k]
+ * mode_lib->vba.DPPCLK[k]
+ * mode_lib->vba.BytePerPixelDETC[k]
+ * mode_lib->vba.PSCL_THROUGHPUT_CHROMA[k]
+ / (mode_lib->vba.ReturnBW
+ / mode_lib->vba.DPPPerPlane[k]),
+ (double) mode_lib->vba.EffectiveLBLatencyHidingSourceLinesChroma),
+ mode_lib->vba.SwathHeightC[k]);
+ mode_lib->vba.UrgentLatencySupportUsChroma =
+ mode_lib->vba.EffectiveDETPlusLBLinesChroma
+ * (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k])
+ / (mode_lib->vba.VRatio[k] / 2)
+ - mode_lib->vba.EffectiveDETPlusLBLinesChroma
+ * (mode_lib->vba.SwathWidthY[k]
+ / 2)
+ * mode_lib->vba.BytePerPixelDETC[k]
+ / (mode_lib->vba.ReturnBW
+ / mode_lib->vba.DPPPerPlane[k]);
+ mode_lib->vba.UrgentLatencySupportUs[k] = dml_min(
+ mode_lib->vba.UrgentLatencySupportUsLuma,
+ mode_lib->vba.UrgentLatencySupportUsChroma);
+ } else {
+ mode_lib->vba.UrgentLatencySupportUs[k] =
+ mode_lib->vba.UrgentLatencySupportUsLuma;
+ }
+ }
+
+ mode_lib->vba.MinUrgentLatencySupportUs = 999999;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ mode_lib->vba.MinUrgentLatencySupportUs = dml_min(
+ mode_lib->vba.MinUrgentLatencySupportUs,
+ mode_lib->vba.UrgentLatencySupportUs[k]);
+ }
+
+ // Non-Urgent Latency Tolerance
+ mode_lib->vba.NonUrgentLatencyTolerance = mode_lib->vba.MinUrgentLatencySupportUs
+ - mode_lib->vba.UrgentWatermark;
+
+ // DSCCLK
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if ((mode_lib->vba.BlendingAndTiming[k] != k) || !mode_lib->vba.DSCEnabled[k]) {
+ mode_lib->vba.DSCCLK_calculated[k] = 0.0;
+ } else {
+ if (mode_lib->vba.OutputFormat[k] == dm_420
+ || mode_lib->vba.OutputFormat[k] == dm_n422)
+ mode_lib->vba.DSCFormatFactor = 2;
+ else
+ mode_lib->vba.DSCFormatFactor = 1;
+ if (mode_lib->vba.ODMCombineEnabled[k])
+ mode_lib->vba.DSCCLK_calculated[k] =
+ mode_lib->vba.PixelClockBackEnd[k] / 6
+ / mode_lib->vba.DSCFormatFactor
+ / (1
+ - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100);
+ else
+ mode_lib->vba.DSCCLK_calculated[k] =
+ mode_lib->vba.PixelClockBackEnd[k] / 3
+ / mode_lib->vba.DSCFormatFactor
+ / (1
+ - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100);
+ }
+ }
+
+ // DSC Delay
+ // TODO
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ double bpp = mode_lib->vba.OutputBpp[k];
+ unsigned int slices = mode_lib->vba.NumberOfDSCSlices[k];
+
+ if (mode_lib->vba.DSCEnabled[k] && bpp != 0) {
+ if (!mode_lib->vba.ODMCombineEnabled[k]) {
+ mode_lib->vba.DSCDelay[k] =
+ dscceComputeDelay(
+ mode_lib->vba.DSCInputBitPerComponent[k],
+ bpp,
+ dml_ceil(
+ (double) mode_lib->vba.HActive[k]
+ / mode_lib->vba.NumberOfDSCSlices[k],
+ 1),
+ slices,
+ mode_lib->vba.OutputFormat[k])
+ + dscComputeDelay(
+ mode_lib->vba.OutputFormat[k]);
+ } else {
+ mode_lib->vba.DSCDelay[k] =
+ 2
+ * (dscceComputeDelay(
+ mode_lib->vba.DSCInputBitPerComponent[k],
+ bpp,
+ dml_ceil(
+ (double) mode_lib->vba.HActive[k]
+ / mode_lib->vba.NumberOfDSCSlices[k],
+ 1),
+ slices / 2.0,
+ mode_lib->vba.OutputFormat[k])
+ + dscComputeDelay(
+ mode_lib->vba.OutputFormat[k]));
+ }
+ mode_lib->vba.DSCDelay[k] = mode_lib->vba.DSCDelay[k]
+ * mode_lib->vba.PixelClock[k]
+ / mode_lib->vba.PixelClockBackEnd[k];
+ } else {
+ mode_lib->vba.DSCDelay[k] = 0;
+ }
+ }
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k)
+ for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) // NumberOfPlanes
+ if (j != k && mode_lib->vba.BlendingAndTiming[k] == j
+ && mode_lib->vba.DSCEnabled[j])
+ mode_lib->vba.DSCDelay[k] = mode_lib->vba.DSCDelay[j];
+
+ // Prefetch
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ unsigned int PDEAndMetaPTEBytesFrameY;
+ unsigned int PixelPTEBytesPerRowY;
+ unsigned int MetaRowByteY;
+ unsigned int MetaRowByteC;
+ unsigned int PDEAndMetaPTEBytesFrameC;
+ unsigned int PixelPTEBytesPerRowC;
+
+ Calculate256BBlockSizes(
+ mode_lib->vba.SourcePixelFormat[k],
+ mode_lib->vba.SurfaceTiling[k],
+ dml_ceil(mode_lib->vba.BytePerPixelDETY[k], 1),
+ dml_ceil(mode_lib->vba.BytePerPixelDETC[k], 2),
+ &mode_lib->vba.BlockHeight256BytesY[k],
+ &mode_lib->vba.BlockHeight256BytesC[k],
+ &mode_lib->vba.BlockWidth256BytesY[k],
+ &mode_lib->vba.BlockWidth256BytesC[k]);
+ PDEAndMetaPTEBytesFrameY = CalculateVMAndRowBytes(
+ mode_lib,
+ mode_lib->vba.DCCEnable[k],
+ mode_lib->vba.BlockHeight256BytesY[k],
+ mode_lib->vba.BlockWidth256BytesY[k],
+ mode_lib->vba.SourcePixelFormat[k],
+ mode_lib->vba.SurfaceTiling[k],
+ dml_ceil(mode_lib->vba.BytePerPixelDETY[k], 1),
+ mode_lib->vba.SourceScan[k],
+ mode_lib->vba.ViewportWidth[k],
+ mode_lib->vba.ViewportHeight[k],
+ mode_lib->vba.SwathWidthY[k],
+ mode_lib->vba.GPUVMEnable,
+ mode_lib->vba.VMMPageSize,
+ mode_lib->vba.PTEBufferSizeInRequestsLuma,
+ mode_lib->vba.PDEProcessingBufIn64KBReqs,
+ mode_lib->vba.PitchY[k],
+ mode_lib->vba.DCCMetaPitchY[k],
+ &mode_lib->vba.MacroTileWidthY[k],
+ &MetaRowByteY,
+ &PixelPTEBytesPerRowY,
+ &mode_lib->vba.PTEBufferSizeNotExceeded[mode_lib->vba.VoltageLevel][0],
+ &mode_lib->vba.dpte_row_height[k],
+ &mode_lib->vba.meta_row_height[k]);
+ mode_lib->vba.PrefetchSourceLinesY[k] = CalculatePrefetchSourceLines(
+ mode_lib,
+ mode_lib->vba.VRatio[k],
+ mode_lib->vba.vtaps[k],
+ mode_lib->vba.Interlace[k],
+ mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
+ mode_lib->vba.SwathHeightY[k],
+ mode_lib->vba.ViewportYStartY[k],
+ &mode_lib->vba.VInitPreFillY[k],
+ &mode_lib->vba.MaxNumSwathY[k]);
+
+ if ((mode_lib->vba.SourcePixelFormat[k] != dm_444_64
+ && mode_lib->vba.SourcePixelFormat[k] != dm_444_32
+ && mode_lib->vba.SourcePixelFormat[k] != dm_444_16
+ && mode_lib->vba.SourcePixelFormat[k] != dm_444_8)) {
+ PDEAndMetaPTEBytesFrameC =
+ CalculateVMAndRowBytes(
+ mode_lib,
+ mode_lib->vba.DCCEnable[k],
+ mode_lib->vba.BlockHeight256BytesC[k],
+ mode_lib->vba.BlockWidth256BytesC[k],
+ mode_lib->vba.SourcePixelFormat[k],
+ mode_lib->vba.SurfaceTiling[k],
+ dml_ceil(
+ mode_lib->vba.BytePerPixelDETC[k],
+ 2),
+ mode_lib->vba.SourceScan[k],
+ mode_lib->vba.ViewportWidth[k] / 2,
+ mode_lib->vba.ViewportHeight[k] / 2,
+ mode_lib->vba.SwathWidthY[k] / 2,
+ mode_lib->vba.GPUVMEnable,
+ mode_lib->vba.VMMPageSize,
+ mode_lib->vba.PTEBufferSizeInRequestsLuma,
+ mode_lib->vba.PDEProcessingBufIn64KBReqs,
+ mode_lib->vba.PitchC[k],
+ 0,
+ &mode_lib->vba.MacroTileWidthC[k],
+ &MetaRowByteC,
+ &PixelPTEBytesPerRowC,
+ &mode_lib->vba.PTEBufferSizeNotExceeded[mode_lib->vba.VoltageLevel][0],
+ &mode_lib->vba.dpte_row_height_chroma[k],
+ &mode_lib->vba.meta_row_height_chroma[k]);
+ mode_lib->vba.PrefetchSourceLinesC[k] = CalculatePrefetchSourceLines(
+ mode_lib,
+ mode_lib->vba.VRatio[k] / 2,
+ mode_lib->vba.VTAPsChroma[k],
+ mode_lib->vba.Interlace[k],
+ mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
+ mode_lib->vba.SwathHeightC[k],
+ mode_lib->vba.ViewportYStartC[k],
+ &mode_lib->vba.VInitPreFillC[k],
+ &mode_lib->vba.MaxNumSwathC[k]);
+ } else {
+ PixelPTEBytesPerRowC = 0;
+ PDEAndMetaPTEBytesFrameC = 0;
+ MetaRowByteC = 0;
+ mode_lib->vba.MaxNumSwathC[k] = 0;
+ mode_lib->vba.PrefetchSourceLinesC[k] = 0;
+ }
+
+ mode_lib->vba.PixelPTEBytesPerRow[k] = PixelPTEBytesPerRowY + PixelPTEBytesPerRowC;
+ mode_lib->vba.PDEAndMetaPTEBytesFrame[k] = PDEAndMetaPTEBytesFrameY
+ + PDEAndMetaPTEBytesFrameC;
+ mode_lib->vba.MetaRowByte[k] = MetaRowByteY + MetaRowByteC;
+
+ CalculateActiveRowBandwidth(
+ mode_lib->vba.GPUVMEnable,
+ mode_lib->vba.SourcePixelFormat[k],
+ mode_lib->vba.VRatio[k],
+ mode_lib->vba.DCCEnable[k],
+ mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k],
+ MetaRowByteY,
+ MetaRowByteC,
+ mode_lib->vba.meta_row_height[k],
+ mode_lib->vba.meta_row_height_chroma[k],
+ PixelPTEBytesPerRowY,
+ PixelPTEBytesPerRowC,
+ mode_lib->vba.dpte_row_height[k],
+ mode_lib->vba.dpte_row_height_chroma[k],
+ &mode_lib->vba.meta_row_bw[k],
+ &mode_lib->vba.dpte_row_bw[k],
+ &mode_lib->vba.qual_row_bw[k]);
+ }
+
+ mode_lib->vba.TCalc = 24.0 / mode_lib->vba.DCFCLKDeepSleep;
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.BlendingAndTiming[k] == k) {
+ if (mode_lib->vba.WritebackEnable[k] == true) {
+ mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][k] =
+ mode_lib->vba.WritebackLatency
+ + CalculateWriteBackDelay(
+ mode_lib->vba.WritebackPixelFormat[k],
+ mode_lib->vba.WritebackHRatio[k],
+ mode_lib->vba.WritebackVRatio[k],
+ mode_lib->vba.WritebackLumaHTaps[k],
+ mode_lib->vba.WritebackLumaVTaps[k],
+ mode_lib->vba.WritebackChromaHTaps[k],
+ mode_lib->vba.WritebackChromaVTaps[k],
+ mode_lib->vba.WritebackDestinationWidth[k])
+ / mode_lib->vba.DISPCLK;
+ } else
+ mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][k] = 0;
+ for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) {
+ if (mode_lib->vba.BlendingAndTiming[j] == k
+ && mode_lib->vba.WritebackEnable[j] == true) {
+ mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][k] =
+ dml_max(
+ mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][k],
+ mode_lib->vba.WritebackLatency
+ + CalculateWriteBackDelay(
+ mode_lib->vba.WritebackPixelFormat[j],
+ mode_lib->vba.WritebackHRatio[j],
+ mode_lib->vba.WritebackVRatio[j],
+ mode_lib->vba.WritebackLumaHTaps[j],
+ mode_lib->vba.WritebackLumaVTaps[j],
+ mode_lib->vba.WritebackChromaHTaps[j],
+ mode_lib->vba.WritebackChromaVTaps[j],
+ mode_lib->vba.WritebackDestinationWidth[j])
+ / mode_lib->vba.DISPCLK);
+ }
+ }
+ }
+ }
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k)
+ for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j)
+ if (mode_lib->vba.BlendingAndTiming[k] == j)
+ mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][k] =
+ mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][j];
+
+ mode_lib->vba.VStartupLines = 13;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ mode_lib->vba.MaxVStartupLines[k] =
+ mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k]
+ - dml_max(
+ 1.0,
+ dml_ceil(
+ mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][k]
+ / (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]),
+ 1));
+ }
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k)
+ mode_lib->vba.MaximumMaxVStartupLines = dml_max(
+ mode_lib->vba.MaximumMaxVStartupLines,
+ mode_lib->vba.MaxVStartupLines[k]);
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ mode_lib->vba.cursor_bw[k] = 0.0;
+ for (j = 0; j < mode_lib->vba.NumberOfCursors[k]; ++j)
+ mode_lib->vba.cursor_bw[k] += mode_lib->vba.CursorWidth[k][j]
+ * mode_lib->vba.CursorBPP[k][j] / 8.0
+ / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k])
+ * mode_lib->vba.VRatio[k];
+ }
+
+ do {
+ double MaxTotalRDBandwidth = 0;
+ bool DestinationLineTimesForPrefetchLessThan2 = false;
+ bool VRatioPrefetchMoreThan4 = false;
+ bool prefetch_vm_bw_valid = true;
+ bool prefetch_row_bw_valid = true;
+ double TWait = CalculateTWait(
+ mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb],
+ mode_lib->vba.DRAMClockChangeLatency,
+ mode_lib->vba.UrgentLatencyPixelDataOnly,
+ mode_lib->vba.SREnterPlusExitTime);
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.XFCEnabled[k] == true) {
+ mode_lib->vba.XFCRemoteSurfaceFlipDelay =
+ CalculateRemoteSurfaceFlipDelay(
+ mode_lib,
+ mode_lib->vba.VRatio[k],
+ mode_lib->vba.SwathWidthY[k],
+ dml_ceil(
+ mode_lib->vba.BytePerPixelDETY[k],
+ 1),
+ mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k],
+ mode_lib->vba.XFCTSlvVupdateOffset,
+ mode_lib->vba.XFCTSlvVupdateWidth,
+ mode_lib->vba.XFCTSlvVreadyOffset,
+ mode_lib->vba.XFCXBUFLatencyTolerance,
+ mode_lib->vba.XFCFillBWOverhead,
+ mode_lib->vba.XFCSlvChunkSize,
+ mode_lib->vba.XFCBusTransportTime,
+ mode_lib->vba.TCalc,
+ TWait,
+ &mode_lib->vba.SrcActiveDrainRate,
+ &mode_lib->vba.TInitXFill,
+ &mode_lib->vba.TslvChk);
+ } else {
+ mode_lib->vba.XFCRemoteSurfaceFlipDelay = 0;
+ }
+
+ CalculateDelayAfterScaler(mode_lib, mode_lib->vba.ReturnBW, mode_lib->vba.ReadBandwidthPlaneLuma[k], mode_lib->vba.ReadBandwidthPlaneChroma[k], mode_lib->vba.TotalDataReadBandwidth,
+ mode_lib->vba.DisplayPipeLineDeliveryTimeLuma[k], mode_lib->vba.DisplayPipeLineDeliveryTimeChroma[k],
+ mode_lib->vba.DPPCLK[k], mode_lib->vba.DISPCLK, mode_lib->vba.PixelClock[k], mode_lib->vba.DSCDelay[k], mode_lib->vba.DPPPerPlane[k], mode_lib->vba.ScalerEnabled[k], mode_lib->vba.NumberOfCursors[k],
+ mode_lib->vba.DPPCLKDelaySubtotal, mode_lib->vba.DPPCLKDelaySCL, mode_lib->vba.DPPCLKDelaySCLLBOnly, mode_lib->vba.DPPCLKDelayCNVCFormater, mode_lib->vba.DPPCLKDelayCNVCCursor, mode_lib->vba.DISPCLKDelaySubtotal,
+ mode_lib->vba.SwathWidthY[k] / mode_lib->vba.HRatio[k], mode_lib->vba.OutputFormat[k], mode_lib->vba.HTotal[k],
+ mode_lib->vba.SwathWidthSingleDPPY[k], mode_lib->vba.BytePerPixelDETY[k], mode_lib->vba.BytePerPixelDETC[k], mode_lib->vba.SwathHeightY[k], mode_lib->vba.SwathHeightC[k], mode_lib->vba.Interlace[k],
+ mode_lib->vba.ProgressiveToInterlaceUnitInOPP, &mode_lib->vba.DSTXAfterScaler[k], &mode_lib->vba.DSTYAfterScaler[k]);
+
+ mode_lib->vba.ErrorResult[k] =
+ CalculatePrefetchSchedule(
+ mode_lib,
+ mode_lib->vba.DPPCLK[k],
+ mode_lib->vba.DISPCLK,
+ mode_lib->vba.PixelClock[k],
+ mode_lib->vba.DCFCLKDeepSleep,
+ mode_lib->vba.DPPPerPlane[k],
+ mode_lib->vba.NumberOfCursors[k],
+ mode_lib->vba.VTotal[k]
+ - mode_lib->vba.VActive[k],
+ mode_lib->vba.HTotal[k],
+ mode_lib->vba.MaxInterDCNTileRepeaters,
+ dml_min(
+ mode_lib->vba.VStartupLines,
+ mode_lib->vba.MaxVStartupLines[k]),
+ mode_lib->vba.GPUVMMaxPageTableLevels,
+ mode_lib->vba.GPUVMEnable,
+ mode_lib->vba.DynamicMetadataEnable[k],
+ mode_lib->vba.DynamicMetadataLinesBeforeActiveRequired[k],
+ mode_lib->vba.DynamicMetadataTransmittedBytes[k],
+ mode_lib->vba.DCCEnable[k],
+ mode_lib->vba.UrgentLatencyPixelDataOnly,
+ mode_lib->vba.UrgentExtraLatency,
+ mode_lib->vba.TCalc,
+ mode_lib->vba.PDEAndMetaPTEBytesFrame[k],
+ mode_lib->vba.MetaRowByte[k],
+ mode_lib->vba.PixelPTEBytesPerRow[k],
+ mode_lib->vba.PrefetchSourceLinesY[k],
+ mode_lib->vba.SwathWidthY[k],
+ mode_lib->vba.BytePerPixelDETY[k],
+ mode_lib->vba.VInitPreFillY[k],
+ mode_lib->vba.MaxNumSwathY[k],
+ mode_lib->vba.PrefetchSourceLinesC[k],
+ mode_lib->vba.BytePerPixelDETC[k],
+ mode_lib->vba.VInitPreFillC[k],
+ mode_lib->vba.MaxNumSwathC[k],
+ mode_lib->vba.SwathHeightY[k],
+ mode_lib->vba.SwathHeightC[k],
+ TWait,
+ mode_lib->vba.XFCEnabled[k],
+ mode_lib->vba.XFCRemoteSurfaceFlipDelay,
+ mode_lib->vba.Interlace[k],
+ mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
+ mode_lib->vba.DSTXAfterScaler[k],
+ mode_lib->vba.DSTYAfterScaler[k],
+ &mode_lib->vba.DestinationLinesForPrefetch[k],
+ &mode_lib->vba.PrefetchBandwidth[k],
+ &mode_lib->vba.DestinationLinesToRequestVMInVBlank[k],
+ &mode_lib->vba.DestinationLinesToRequestRowInVBlank[k],
+ &mode_lib->vba.VRatioPrefetchY[k],
+ &mode_lib->vba.VRatioPrefetchC[k],
+ &mode_lib->vba.RequiredPrefetchPixDataBWLuma[k],
+ &mode_lib->vba.Tno_bw[k],
+ &mode_lib->vba.VUpdateOffsetPix[k],
+ &mode_lib->vba.VUpdateWidthPix[k],
+ &mode_lib->vba.VReadyOffsetPix[k]);
+
+ if (mode_lib->vba.BlendingAndTiming[k] == k) {
+ mode_lib->vba.VStartup[k] = dml_min(
+ mode_lib->vba.VStartupLines,
+ mode_lib->vba.MaxVStartupLines[k]);
+ if (mode_lib->vba.VStartupRequiredWhenNotEnoughTimeForDynamicMetadata
+ != 0) {
+ mode_lib->vba.VStartup[k] =
+ mode_lib->vba.VStartupRequiredWhenNotEnoughTimeForDynamicMetadata;
+ }
+ } else {
+ mode_lib->vba.VStartup[k] =
+ dml_min(
+ mode_lib->vba.VStartupLines,
+ mode_lib->vba.MaxVStartupLines[mode_lib->vba.BlendingAndTiming[k]]);
+ }
+ }
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+
+ if (mode_lib->vba.PDEAndMetaPTEBytesFrame[k] == 0)
+ mode_lib->vba.prefetch_vm_bw[k] = 0;
+ else if (mode_lib->vba.DestinationLinesToRequestVMInVBlank[k] > 0) {
+ mode_lib->vba.prefetch_vm_bw[k] =
+ (double) mode_lib->vba.PDEAndMetaPTEBytesFrame[k]
+ / (mode_lib->vba.DestinationLinesToRequestVMInVBlank[k]
+ * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]);
+ } else {
+ mode_lib->vba.prefetch_vm_bw[k] = 0;
+ prefetch_vm_bw_valid = false;
+ }
+ if (mode_lib->vba.MetaRowByte[k] + mode_lib->vba.PixelPTEBytesPerRow[k]
+ == 0)
+ mode_lib->vba.prefetch_row_bw[k] = 0;
+ else if (mode_lib->vba.DestinationLinesToRequestRowInVBlank[k] > 0) {
+ mode_lib->vba.prefetch_row_bw[k] =
+ (double) (mode_lib->vba.MetaRowByte[k]
+ + mode_lib->vba.PixelPTEBytesPerRow[k])
+ / (mode_lib->vba.DestinationLinesToRequestRowInVBlank[k]
+ * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]);
+ } else {
+ mode_lib->vba.prefetch_row_bw[k] = 0;
+ prefetch_row_bw_valid = false;
+ }
+
+ MaxTotalRDBandwidth =
+ MaxTotalRDBandwidth + mode_lib->vba.cursor_bw[k]
+ + dml_max(
+ mode_lib->vba.prefetch_vm_bw[k],
+ dml_max(
+ mode_lib->vba.prefetch_row_bw[k],
+ dml_max(
+ mode_lib->vba.ReadBandwidthPlaneLuma[k]
+ + mode_lib->vba.ReadBandwidthPlaneChroma[k],
+ mode_lib->vba.RequiredPrefetchPixDataBWLuma[k])
+ + mode_lib->vba.meta_row_bw[k]
+ + mode_lib->vba.dpte_row_bw[k]));
+
+ if (mode_lib->vba.DestinationLinesForPrefetch[k] < 2)
+ DestinationLineTimesForPrefetchLessThan2 = true;
+ if (mode_lib->vba.VRatioPrefetchY[k] > 4
+ || mode_lib->vba.VRatioPrefetchC[k] > 4)
+ VRatioPrefetchMoreThan4 = true;
+ }
+
+ if (MaxTotalRDBandwidth <= mode_lib->vba.ReturnBW && prefetch_vm_bw_valid
+ && prefetch_row_bw_valid && !VRatioPrefetchMoreThan4
+ && !DestinationLineTimesForPrefetchLessThan2)
+ mode_lib->vba.PrefetchModeSupported = true;
+ else {
+ mode_lib->vba.PrefetchModeSupported = false;
+ dml_print(
+ "DML: CalculatePrefetchSchedule ***failed***. Bandwidth violation. Results are NOT valid\n");
+ }
+
+ if (mode_lib->vba.PrefetchModeSupported == true) {
+ double final_flip_bw[DC__NUM_DPP__MAX];
+ unsigned int ImmediateFlipBytes[DC__NUM_DPP__MAX];
+ double total_dcn_read_bw_with_flip = 0;
+
+ mode_lib->vba.BandwidthAvailableForImmediateFlip = mode_lib->vba.ReturnBW;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ mode_lib->vba.BandwidthAvailableForImmediateFlip =
+ mode_lib->vba.BandwidthAvailableForImmediateFlip
+ - mode_lib->vba.cursor_bw[k]
+ - dml_max(
+ mode_lib->vba.ReadBandwidthPlaneLuma[k]
+ + mode_lib->vba.ReadBandwidthPlaneChroma[k]
+ + mode_lib->vba.qual_row_bw[k],
+ mode_lib->vba.PrefetchBandwidth[k]);
+ }
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ ImmediateFlipBytes[k] = 0;
+ if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8
+ && mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) {
+ ImmediateFlipBytes[k] =
+ mode_lib->vba.PDEAndMetaPTEBytesFrame[k]
+ + mode_lib->vba.MetaRowByte[k]
+ + mode_lib->vba.PixelPTEBytesPerRow[k];
+ }
+ }
+ mode_lib->vba.TotImmediateFlipBytes = 0;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8
+ && mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) {
+ mode_lib->vba.TotImmediateFlipBytes =
+ mode_lib->vba.TotImmediateFlipBytes
+ + ImmediateFlipBytes[k];
+ }
+ }
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ CalculateFlipSchedule(
+ mode_lib,
+ mode_lib->vba.UrgentExtraLatency,
+ mode_lib->vba.UrgentLatencyPixelDataOnly,
+ mode_lib->vba.GPUVMMaxPageTableLevels,
+ mode_lib->vba.GPUVMEnable,
+ mode_lib->vba.BandwidthAvailableForImmediateFlip,
+ mode_lib->vba.TotImmediateFlipBytes,
+ mode_lib->vba.SourcePixelFormat[k],
+ ImmediateFlipBytes[k],
+ mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k],
+ mode_lib->vba.VRatio[k],
+ mode_lib->vba.Tno_bw[k],
+ mode_lib->vba.PDEAndMetaPTEBytesFrame[k],
+ mode_lib->vba.MetaRowByte[k],
+ mode_lib->vba.PixelPTEBytesPerRow[k],
+ mode_lib->vba.DCCEnable[k],
+ mode_lib->vba.dpte_row_height[k],
+ mode_lib->vba.meta_row_height[k],
+ mode_lib->vba.qual_row_bw[k],
+ &mode_lib->vba.DestinationLinesToRequestVMInImmediateFlip[k],
+ &mode_lib->vba.DestinationLinesToRequestRowInImmediateFlip[k],
+ &final_flip_bw[k],
+ &mode_lib->vba.ImmediateFlipSupportedForPipe[k]);
+ }
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ total_dcn_read_bw_with_flip =
+ total_dcn_read_bw_with_flip
+ + mode_lib->vba.cursor_bw[k]
+ + dml_max(
+ mode_lib->vba.prefetch_vm_bw[k],
+ dml_max(
+ mode_lib->vba.prefetch_row_bw[k],
+ final_flip_bw[k]
+ + dml_max(
+ mode_lib->vba.ReadBandwidthPlaneLuma[k]
+ + mode_lib->vba.ReadBandwidthPlaneChroma[k],
+ mode_lib->vba.RequiredPrefetchPixDataBWLuma[k])));
+ }
+ mode_lib->vba.ImmediateFlipSupported = true;
+ if (total_dcn_read_bw_with_flip > mode_lib->vba.ReturnBW) {
+ mode_lib->vba.ImmediateFlipSupported = false;
+ }
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.ImmediateFlipSupportedForPipe[k] == false) {
+ mode_lib->vba.ImmediateFlipSupported = false;
+ }
+ }
+ } else {
+ mode_lib->vba.ImmediateFlipSupported = false;
+ }
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.ErrorResult[k]) {
+ mode_lib->vba.PrefetchModeSupported = false;
+ dml_print(
+ "DML: CalculatePrefetchSchedule ***failed***. Prefetch schedule violation. Results are NOT valid\n");
+ }
+ }
+
+ mode_lib->vba.VStartupLines = mode_lib->vba.VStartupLines + 1;
+ } while (!((mode_lib->vba.PrefetchModeSupported
+ && (!mode_lib->vba.ImmediateFlipSupport
+ || mode_lib->vba.ImmediateFlipSupported))
+ || mode_lib->vba.MaximumMaxVStartupLines < mode_lib->vba.VStartupLines));
+
+ //Display Pipeline Delivery Time in Prefetch
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.VRatioPrefetchY[k] <= 1) {
+ mode_lib->vba.DisplayPipeLineDeliveryTimeLumaPrefetch[k] =
+ mode_lib->vba.SwathWidthY[k] * mode_lib->vba.DPPPerPlane[k]
+ / mode_lib->vba.HRatio[k]
+ / mode_lib->vba.PixelClock[k];
+ } else {
+ mode_lib->vba.DisplayPipeLineDeliveryTimeLumaPrefetch[k] =
+ mode_lib->vba.SwathWidthY[k]
+ / mode_lib->vba.PSCL_THROUGHPUT_LUMA[k]
+ / mode_lib->vba.DPPCLK[k];
+ }
+ if (mode_lib->vba.BytePerPixelDETC[k] == 0) {
+ mode_lib->vba.DisplayPipeLineDeliveryTimeChromaPrefetch[k] = 0;
+ } else {
+ if (mode_lib->vba.VRatioPrefetchC[k] <= 1) {
+ mode_lib->vba.DisplayPipeLineDeliveryTimeChromaPrefetch[k] =
+ mode_lib->vba.SwathWidthY[k]
+ * mode_lib->vba.DPPPerPlane[k]
+ / mode_lib->vba.HRatio[k]
+ / mode_lib->vba.PixelClock[k];
+ } else {
+ mode_lib->vba.DisplayPipeLineDeliveryTimeChromaPrefetch[k] =
+ mode_lib->vba.SwathWidthY[k]
+ / mode_lib->vba.PSCL_THROUGHPUT_LUMA[k]
+ / mode_lib->vba.DPPCLK[k];
+ }
+ }
+ }
+
+ // Min TTUVBlank
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) {
+ mode_lib->vba.AllowDRAMClockChangeDuringVBlank[k] = true;
+ mode_lib->vba.AllowDRAMSelfRefreshDuringVBlank[k] = true;
+ mode_lib->vba.MinTTUVBlank[k] = dml_max(
+ mode_lib->vba.DRAMClockChangeWatermark,
+ dml_max(
+ mode_lib->vba.StutterEnterPlusExitWatermark,
+ mode_lib->vba.UrgentWatermark));
+ } else if (mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 1) {
+ mode_lib->vba.AllowDRAMClockChangeDuringVBlank[k] = false;
+ mode_lib->vba.AllowDRAMSelfRefreshDuringVBlank[k] = true;
+ mode_lib->vba.MinTTUVBlank[k] = dml_max(
+ mode_lib->vba.StutterEnterPlusExitWatermark,
+ mode_lib->vba.UrgentWatermark);
+ } else {
+ mode_lib->vba.AllowDRAMClockChangeDuringVBlank[k] = false;
+ mode_lib->vba.AllowDRAMSelfRefreshDuringVBlank[k] = false;
+ mode_lib->vba.MinTTUVBlank[k] = mode_lib->vba.UrgentWatermark;
+ }
+ if (!mode_lib->vba.DynamicMetadataEnable[k])
+ mode_lib->vba.MinTTUVBlank[k] = mode_lib->vba.TCalc
+ + mode_lib->vba.MinTTUVBlank[k];
+ }
+
+ // DCC Configuration
+ mode_lib->vba.ActiveDPPs = 0;
+ // NB P-State/DRAM Clock Change Support
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ mode_lib->vba.ActiveDPPs = mode_lib->vba.ActiveDPPs + mode_lib->vba.DPPPerPlane[k];
+ }
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ double EffectiveLBLatencyHidingY;
+ double EffectiveLBLatencyHidingC;
+ double DPPOutputBufferLinesY;
+ double DPPOutputBufferLinesC;
+ double DPPOPPBufferingY;
+ double MaxDETBufferingTimeY;
+ double ActiveDRAMClockChangeLatencyMarginY;
+
+ mode_lib->vba.LBLatencyHidingSourceLinesY =
+ dml_min(
+ mode_lib->vba.MaxLineBufferLines,
+ (unsigned int) dml_floor(
+ (double) mode_lib->vba.LineBufferSize
+ / mode_lib->vba.LBBitPerPixel[k]
+ / (mode_lib->vba.SwathWidthY[k]
+ / dml_max(
+ mode_lib->vba.HRatio[k],
+ 1.0)),
+ 1)) - (mode_lib->vba.vtaps[k] - 1);
+
+ mode_lib->vba.LBLatencyHidingSourceLinesC =
+ dml_min(
+ mode_lib->vba.MaxLineBufferLines,
+ (unsigned int) dml_floor(
+ (double) mode_lib->vba.LineBufferSize
+ / mode_lib->vba.LBBitPerPixel[k]
+ / (mode_lib->vba.SwathWidthY[k]
+ / 2.0
+ / dml_max(
+ mode_lib->vba.HRatio[k]
+ / 2,
+ 1.0)),
+ 1))
+ - (mode_lib->vba.VTAPsChroma[k] - 1);
+
+ EffectiveLBLatencyHidingY = mode_lib->vba.LBLatencyHidingSourceLinesY
+ / mode_lib->vba.VRatio[k]
+ * (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]);
+
+ EffectiveLBLatencyHidingC = mode_lib->vba.LBLatencyHidingSourceLinesC
+ / (mode_lib->vba.VRatio[k] / 2)
+ * (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]);
+
+ if (mode_lib->vba.SwathWidthY[k] > 2 * mode_lib->vba.DPPOutputBufferPixels) {
+ DPPOutputBufferLinesY = mode_lib->vba.DPPOutputBufferPixels
+ / mode_lib->vba.SwathWidthY[k];
+ } else if (mode_lib->vba.SwathWidthY[k] > mode_lib->vba.DPPOutputBufferPixels) {
+ DPPOutputBufferLinesY = 0.5;
+ } else {
+ DPPOutputBufferLinesY = 1;
+ }
+
+ if (mode_lib->vba.SwathWidthY[k] / 2 > 2 * mode_lib->vba.DPPOutputBufferPixels) {
+ DPPOutputBufferLinesC = mode_lib->vba.DPPOutputBufferPixels
+ / (mode_lib->vba.SwathWidthY[k] / 2);
+ } else if (mode_lib->vba.SwathWidthY[k] / 2 > mode_lib->vba.DPPOutputBufferPixels) {
+ DPPOutputBufferLinesC = 0.5;
+ } else {
+ DPPOutputBufferLinesC = 1;
+ }
+
+ DPPOPPBufferingY = (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k])
+ * (DPPOutputBufferLinesY + mode_lib->vba.OPPOutputBufferLines);
+ MaxDETBufferingTimeY = mode_lib->vba.FullDETBufferingTimeY[k]
+ + (mode_lib->vba.LinesInDETY[k]
+ - mode_lib->vba.LinesInDETYRoundedDownToSwath[k])
+ / mode_lib->vba.SwathHeightY[k]
+ * (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]);
+
+ ActiveDRAMClockChangeLatencyMarginY = DPPOPPBufferingY + EffectiveLBLatencyHidingY
+ + MaxDETBufferingTimeY - mode_lib->vba.DRAMClockChangeWatermark;
+
+ if (mode_lib->vba.ActiveDPPs > 1) {
+ ActiveDRAMClockChangeLatencyMarginY =
+ ActiveDRAMClockChangeLatencyMarginY
+ - (1 - 1 / (mode_lib->vba.ActiveDPPs - 1))
+ * mode_lib->vba.SwathHeightY[k]
+ * (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]);
+ }
+
+ if (mode_lib->vba.BytePerPixelDETC[k] > 0) {
+ double DPPOPPBufferingC = (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k])
+ * (DPPOutputBufferLinesC
+ + mode_lib->vba.OPPOutputBufferLines);
+ double MaxDETBufferingTimeC =
+ mode_lib->vba.FullDETBufferingTimeC[k]
+ + (mode_lib->vba.LinesInDETC[k]
+ - mode_lib->vba.LinesInDETCRoundedDownToSwath[k])
+ / mode_lib->vba.SwathHeightC[k]
+ * (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]);
+ double ActiveDRAMClockChangeLatencyMarginC = DPPOPPBufferingC
+ + EffectiveLBLatencyHidingC + MaxDETBufferingTimeC
+ - mode_lib->vba.DRAMClockChangeWatermark;
+
+ if (mode_lib->vba.ActiveDPPs > 1) {
+ ActiveDRAMClockChangeLatencyMarginC =
+ ActiveDRAMClockChangeLatencyMarginC
+ - (1
+ - 1
+ / (mode_lib->vba.ActiveDPPs
+ - 1))
+ * mode_lib->vba.SwathHeightC[k]
+ * (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]);
+ }
+ mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] = dml_min(
+ ActiveDRAMClockChangeLatencyMarginY,
+ ActiveDRAMClockChangeLatencyMarginC);
+ } else {
+ mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] =
+ ActiveDRAMClockChangeLatencyMarginY;
+ }
+
+ if (mode_lib->vba.WritebackEnable[k]) {
+ double WritebackDRAMClockChangeLatencyMargin;
+
+ if (mode_lib->vba.WritebackPixelFormat[k] == dm_444_32) {
+ WritebackDRAMClockChangeLatencyMargin =
+ (double) (mode_lib->vba.WritebackInterfaceLumaBufferSize
+ + mode_lib->vba.WritebackInterfaceChromaBufferSize)
+ / (mode_lib->vba.WritebackDestinationWidth[k]
+ * mode_lib->vba.WritebackDestinationHeight[k]
+ / (mode_lib->vba.WritebackSourceHeight[k]
+ * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k])
+ * 4)
+ - mode_lib->vba.WritebackDRAMClockChangeWatermark;
+ } else if (mode_lib->vba.WritebackPixelFormat[k] == dm_420_10) {
+ WritebackDRAMClockChangeLatencyMargin =
+ dml_min(
+ (double) mode_lib->vba.WritebackInterfaceLumaBufferSize
+ * 8.0 / 10,
+ 2.0
+ * mode_lib->vba.WritebackInterfaceChromaBufferSize
+ * 8 / 10)
+ / (mode_lib->vba.WritebackDestinationWidth[k]
+ * mode_lib->vba.WritebackDestinationHeight[k]
+ / (mode_lib->vba.WritebackSourceHeight[k]
+ * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]))
+ - mode_lib->vba.WritebackDRAMClockChangeWatermark;
+ } else {
+ WritebackDRAMClockChangeLatencyMargin =
+ dml_min(
+ (double) mode_lib->vba.WritebackInterfaceLumaBufferSize,
+ 2.0
+ * mode_lib->vba.WritebackInterfaceChromaBufferSize)
+ / (mode_lib->vba.WritebackDestinationWidth[k]
+ * mode_lib->vba.WritebackDestinationHeight[k]
+ / (mode_lib->vba.WritebackSourceHeight[k]
+ * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]))
+ - mode_lib->vba.WritebackDRAMClockChangeWatermark;
+ }
+ mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] = dml_min(
+ mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k],
+ WritebackDRAMClockChangeLatencyMargin);
+ }
+ }
+
+ mode_lib->vba.MinActiveDRAMClockChangeMargin = 999999;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k]
+ < mode_lib->vba.MinActiveDRAMClockChangeMargin) {
+ mode_lib->vba.MinActiveDRAMClockChangeMargin =
+ mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k];
+ }
+ }
+
+ mode_lib->vba.MinActiveDRAMClockChangeLatencySupported =
+ mode_lib->vba.MinActiveDRAMClockChangeMargin
+ + mode_lib->vba.DRAMClockChangeLatency;
+
+ if (mode_lib->vba.DRAMClockChangeSupportsVActive &&
+ mode_lib->vba.MinActiveDRAMClockChangeMargin > 60) {
+ mode_lib->vba.DRAMClockChangeWatermark += 25;
+ mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;
+ } else if (mode_lib->vba.DummyPStateCheck &&
+ mode_lib->vba.MinActiveDRAMClockChangeMargin > 0) {
+ mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;
+ } else {
+ if (mode_lib->vba.SynchronizedVBlank || mode_lib->vba.NumberOfActivePlanes == 1) {
+ mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vblank;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (!mode_lib->vba.AllowDRAMClockChangeDuringVBlank[k]) {
+ mode_lib->vba.DRAMClockChangeSupport[0][0] =
+ dm_dram_clock_change_unsupported;
+ }
+ }
+ } else {
+ mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_unsupported;
+ }
+ }
+ for (k = 0; k <= mode_lib->vba.soc.num_states; k++)
+ for (j = 0; j < 2; j++)
+ mode_lib->vba.DRAMClockChangeSupport[k][j] = mode_lib->vba.DRAMClockChangeSupport[0][0];
+
+ //XFC Parameters:
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.XFCEnabled[k] == true) {
+ double TWait;
+
+ mode_lib->vba.XFCSlaveVUpdateOffset[k] = mode_lib->vba.XFCTSlvVupdateOffset;
+ mode_lib->vba.XFCSlaveVupdateWidth[k] = mode_lib->vba.XFCTSlvVupdateWidth;
+ mode_lib->vba.XFCSlaveVReadyOffset[k] = mode_lib->vba.XFCTSlvVreadyOffset;
+ TWait = CalculateTWait(
+ mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb],
+ mode_lib->vba.DRAMClockChangeLatency,
+ mode_lib->vba.UrgentLatencyPixelDataOnly,
+ mode_lib->vba.SREnterPlusExitTime);
+ mode_lib->vba.XFCRemoteSurfaceFlipDelay = CalculateRemoteSurfaceFlipDelay(
+ mode_lib,
+ mode_lib->vba.VRatio[k],
+ mode_lib->vba.SwathWidthY[k],
+ dml_ceil(mode_lib->vba.BytePerPixelDETY[k], 1),
+ mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k],
+ mode_lib->vba.XFCTSlvVupdateOffset,
+ mode_lib->vba.XFCTSlvVupdateWidth,
+ mode_lib->vba.XFCTSlvVreadyOffset,
+ mode_lib->vba.XFCXBUFLatencyTolerance,
+ mode_lib->vba.XFCFillBWOverhead,
+ mode_lib->vba.XFCSlvChunkSize,
+ mode_lib->vba.XFCBusTransportTime,
+ mode_lib->vba.TCalc,
+ TWait,
+ &mode_lib->vba.SrcActiveDrainRate,
+ &mode_lib->vba.TInitXFill,
+ &mode_lib->vba.TslvChk);
+ mode_lib->vba.XFCRemoteSurfaceFlipLatency[k] =
+ dml_floor(
+ mode_lib->vba.XFCRemoteSurfaceFlipDelay
+ / (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]),
+ 1);
+ mode_lib->vba.XFCTransferDelay[k] =
+ dml_ceil(
+ mode_lib->vba.XFCBusTransportTime
+ / (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]),
+ 1);
+ mode_lib->vba.XFCPrechargeDelay[k] =
+ dml_ceil(
+ (mode_lib->vba.XFCBusTransportTime
+ + mode_lib->vba.TInitXFill
+ + mode_lib->vba.TslvChk)
+ / (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]),
+ 1);
+ mode_lib->vba.InitFillLevel = mode_lib->vba.XFCXBUFLatencyTolerance
+ * mode_lib->vba.SrcActiveDrainRate;
+ mode_lib->vba.FinalFillMargin =
+ (mode_lib->vba.DestinationLinesToRequestVMInVBlank[k]
+ + mode_lib->vba.DestinationLinesToRequestRowInVBlank[k])
+ * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]
+ * mode_lib->vba.SrcActiveDrainRate
+ + mode_lib->vba.XFCFillConstant;
+ mode_lib->vba.FinalFillLevel = mode_lib->vba.XFCRemoteSurfaceFlipDelay
+ * mode_lib->vba.SrcActiveDrainRate
+ + mode_lib->vba.FinalFillMargin;
+ mode_lib->vba.RemainingFillLevel = dml_max(
+ 0.0,
+ mode_lib->vba.FinalFillLevel - mode_lib->vba.InitFillLevel);
+ mode_lib->vba.TFinalxFill = mode_lib->vba.RemainingFillLevel
+ / (mode_lib->vba.SrcActiveDrainRate
+ * mode_lib->vba.XFCFillBWOverhead / 100);
+ mode_lib->vba.XFCPrefetchMargin[k] =
+ mode_lib->vba.XFCRemoteSurfaceFlipDelay
+ + mode_lib->vba.TFinalxFill
+ + (mode_lib->vba.DestinationLinesToRequestVMInVBlank[k]
+ + mode_lib->vba.DestinationLinesToRequestRowInVBlank[k])
+ * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k];
+ } else {
+ mode_lib->vba.XFCSlaveVUpdateOffset[k] = 0;
+ mode_lib->vba.XFCSlaveVupdateWidth[k] = 0;
+ mode_lib->vba.XFCSlaveVReadyOffset[k] = 0;
+ mode_lib->vba.XFCRemoteSurfaceFlipLatency[k] = 0;
+ mode_lib->vba.XFCPrechargeDelay[k] = 0;
+ mode_lib->vba.XFCTransferDelay[k] = 0;
+ mode_lib->vba.XFCPrefetchMargin[k] = 0;
+ }
+ }
+ {
+ unsigned int VStartupMargin = 0;
+ bool FirstMainPlane = true;
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.BlendingAndTiming[k] == k) {
+ unsigned int Margin = (mode_lib->vba.MaxVStartupLines[k] - mode_lib->vba.VStartup[k])
+ * mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k];
+
+ if (FirstMainPlane) {
+ VStartupMargin = Margin;
+ FirstMainPlane = false;
+ } else
+ VStartupMargin = dml_min(VStartupMargin, Margin);
+ }
+
+ if (mode_lib->vba.UseMaximumVStartup) {
+ if (mode_lib->vba.VTotal_Max[k] == mode_lib->vba.VTotal[k]) {
+ //only use max vstart if it is not drr or lateflip.
+ mode_lib->vba.VStartup[k] = mode_lib->vba.MaxVStartupLines[mode_lib->vba.BlendingAndTiming[k]];
+ }
+ }
+ }
+}
+}
+
+static void dml20v2_DisplayPipeConfiguration(struct display_mode_lib *mode_lib)
+{
+ double BytePerPixDETY;
+ double BytePerPixDETC;
+ double Read256BytesBlockHeightY;
+ double Read256BytesBlockHeightC;
+ double Read256BytesBlockWidthY;
+ double Read256BytesBlockWidthC;
+ double MaximumSwathHeightY;
+ double MaximumSwathHeightC;
+ double MinimumSwathHeightY;
+ double MinimumSwathHeightC;
+ double SwathWidth;
+ double SwathWidthGranularityY;
+ double SwathWidthGranularityC;
+ double RoundedUpMaxSwathSizeBytesY;
+ double RoundedUpMaxSwathSizeBytesC;
+ unsigned int j, k;
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ bool MainPlaneDoesODMCombine = false;
+
+ if (mode_lib->vba.SourcePixelFormat[k] == dm_444_64) {
+ BytePerPixDETY = 8;
+ BytePerPixDETC = 0;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_32) {
+ BytePerPixDETY = 4;
+ BytePerPixDETC = 0;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_16) {
+ BytePerPixDETY = 2;
+ BytePerPixDETC = 0;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_8) {
+ BytePerPixDETY = 1;
+ BytePerPixDETC = 0;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8) {
+ BytePerPixDETY = 1;
+ BytePerPixDETC = 2;
+ } else {
+ BytePerPixDETY = 4.0 / 3.0;
+ BytePerPixDETC = 8.0 / 3.0;
+ }
+
+ if ((mode_lib->vba.SourcePixelFormat[k] == dm_444_64
+ || mode_lib->vba.SourcePixelFormat[k] == dm_444_32
+ || mode_lib->vba.SourcePixelFormat[k] == dm_444_16
+ || mode_lib->vba.SourcePixelFormat[k] == dm_444_8)) {
+ if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear) {
+ Read256BytesBlockHeightY = 1;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_64) {
+ Read256BytesBlockHeightY = 4;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_32
+ || mode_lib->vba.SourcePixelFormat[k] == dm_444_16) {
+ Read256BytesBlockHeightY = 8;
+ } else {
+ Read256BytesBlockHeightY = 16;
+ }
+ Read256BytesBlockWidthY = 256 / dml_ceil(BytePerPixDETY, 1)
+ / Read256BytesBlockHeightY;
+ Read256BytesBlockHeightC = 0;
+ Read256BytesBlockWidthC = 0;
+ } else {
+ if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear) {
+ Read256BytesBlockHeightY = 1;
+ Read256BytesBlockHeightC = 1;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8) {
+ Read256BytesBlockHeightY = 16;
+ Read256BytesBlockHeightC = 8;
+ } else {
+ Read256BytesBlockHeightY = 8;
+ Read256BytesBlockHeightC = 8;
+ }
+ Read256BytesBlockWidthY = 256 / dml_ceil(BytePerPixDETY, 1)
+ / Read256BytesBlockHeightY;
+ Read256BytesBlockWidthC = 256 / dml_ceil(BytePerPixDETC, 2)
+ / Read256BytesBlockHeightC;
+ }
+
+ if (mode_lib->vba.SourceScan[k] == dm_horz) {
+ MaximumSwathHeightY = Read256BytesBlockHeightY;
+ MaximumSwathHeightC = Read256BytesBlockHeightC;
+ } else {
+ MaximumSwathHeightY = Read256BytesBlockWidthY;
+ MaximumSwathHeightC = Read256BytesBlockWidthC;
+ }
+
+ if ((mode_lib->vba.SourcePixelFormat[k] == dm_444_64
+ || mode_lib->vba.SourcePixelFormat[k] == dm_444_32
+ || mode_lib->vba.SourcePixelFormat[k] == dm_444_16
+ || mode_lib->vba.SourcePixelFormat[k] == dm_444_8)) {
+ if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear
+ || (mode_lib->vba.SourcePixelFormat[k] == dm_444_64
+ && (mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_4kb_s
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_4kb_s_x
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_64kb_s
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_64kb_s_t
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_64kb_s_x
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_var_s
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_var_s_x)
+ && mode_lib->vba.SourceScan[k] == dm_horz)) {
+ MinimumSwathHeightY = MaximumSwathHeightY;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_8
+ && mode_lib->vba.SourceScan[k] != dm_horz) {
+ MinimumSwathHeightY = MaximumSwathHeightY;
+ } else {
+ MinimumSwathHeightY = MaximumSwathHeightY / 2.0;
+ }
+ MinimumSwathHeightC = MaximumSwathHeightC;
+ } else {
+ if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear) {
+ MinimumSwathHeightY = MaximumSwathHeightY;
+ MinimumSwathHeightC = MaximumSwathHeightC;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8
+ && mode_lib->vba.SourceScan[k] == dm_horz) {
+ MinimumSwathHeightY = MaximumSwathHeightY / 2.0;
+ MinimumSwathHeightC = MaximumSwathHeightC;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_10
+ && mode_lib->vba.SourceScan[k] == dm_horz) {
+ MinimumSwathHeightC = MaximumSwathHeightC / 2.0;
+ MinimumSwathHeightY = MaximumSwathHeightY;
+ } else {
+ MinimumSwathHeightY = MaximumSwathHeightY;
+ MinimumSwathHeightC = MaximumSwathHeightC;
+ }
+ }
+
+ if (mode_lib->vba.SourceScan[k] == dm_horz) {
+ SwathWidth = mode_lib->vba.ViewportWidth[k];
+ } else {
+ SwathWidth = mode_lib->vba.ViewportHeight[k];
+ }
+
+ if (mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) {
+ MainPlaneDoesODMCombine = true;
+ }
+ for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) {
+ if (mode_lib->vba.BlendingAndTiming[k] == j
+ && mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) {
+ MainPlaneDoesODMCombine = true;
+ }
+ }
+
+ if (MainPlaneDoesODMCombine == true) {
+ SwathWidth = dml_min(
+ SwathWidth,
+ mode_lib->vba.HActive[k] / 2.0 * mode_lib->vba.HRatio[k]);
+ } else {
+ if (mode_lib->vba.DPPPerPlane[k] == 0)
+ SwathWidth = 0;
+ else
+ SwathWidth = SwathWidth / mode_lib->vba.DPPPerPlane[k];
+ }
+
+ SwathWidthGranularityY = 256 / dml_ceil(BytePerPixDETY, 1) / MaximumSwathHeightY;
+ RoundedUpMaxSwathSizeBytesY = (dml_ceil(
+ (double) (SwathWidth - 1),
+ SwathWidthGranularityY) + SwathWidthGranularityY) * BytePerPixDETY
+ * MaximumSwathHeightY;
+ if (mode_lib->vba.SourcePixelFormat[k] == dm_420_10) {
+ RoundedUpMaxSwathSizeBytesY = dml_ceil(RoundedUpMaxSwathSizeBytesY, 256)
+ + 256;
+ }
+ if (MaximumSwathHeightC > 0) {
+ SwathWidthGranularityC = 256.0 / dml_ceil(BytePerPixDETC, 2)
+ / MaximumSwathHeightC;
+ RoundedUpMaxSwathSizeBytesC = (dml_ceil(
+ (double) (SwathWidth / 2.0 - 1),
+ SwathWidthGranularityC) + SwathWidthGranularityC)
+ * BytePerPixDETC * MaximumSwathHeightC;
+ if (mode_lib->vba.SourcePixelFormat[k] == dm_420_10) {
+ RoundedUpMaxSwathSizeBytesC = dml_ceil(
+ RoundedUpMaxSwathSizeBytesC,
+ 256) + 256;
+ }
+ } else
+ RoundedUpMaxSwathSizeBytesC = 0.0;
+
+ if (RoundedUpMaxSwathSizeBytesY + RoundedUpMaxSwathSizeBytesC
+ <= mode_lib->vba.DETBufferSizeInKByte * 1024.0 / 2.0) {
+ mode_lib->vba.SwathHeightY[k] = MaximumSwathHeightY;
+ mode_lib->vba.SwathHeightC[k] = MaximumSwathHeightC;
+ } else {
+ mode_lib->vba.SwathHeightY[k] = MinimumSwathHeightY;
+ mode_lib->vba.SwathHeightC[k] = MinimumSwathHeightC;
+ }
+
+ if (mode_lib->vba.SwathHeightC[k] == 0) {
+ mode_lib->vba.DETBufferSizeY[k] = mode_lib->vba.DETBufferSizeInKByte * 1024;
+ mode_lib->vba.DETBufferSizeC[k] = 0;
+ } else if (mode_lib->vba.SwathHeightY[k] <= mode_lib->vba.SwathHeightC[k]) {
+ mode_lib->vba.DETBufferSizeY[k] = mode_lib->vba.DETBufferSizeInKByte
+ * 1024.0 / 2;
+ mode_lib->vba.DETBufferSizeC[k] = mode_lib->vba.DETBufferSizeInKByte
+ * 1024.0 / 2;
+ } else {
+ mode_lib->vba.DETBufferSizeY[k] = mode_lib->vba.DETBufferSizeInKByte
+ * 1024.0 * 2 / 3;
+ mode_lib->vba.DETBufferSizeC[k] = mode_lib->vba.DETBufferSizeInKByte
+ * 1024.0 / 3;
+ }
+ }
+}
+
+static double CalculateTWait(
+ unsigned int PrefetchMode,
+ double DRAMClockChangeLatency,
+ double UrgentLatencyPixelDataOnly,
+ double SREnterPlusExitTime)
+{
+ if (PrefetchMode == 0) {
+ return dml_max(
+ DRAMClockChangeLatency + UrgentLatencyPixelDataOnly,
+ dml_max(SREnterPlusExitTime, UrgentLatencyPixelDataOnly));
+ } else if (PrefetchMode == 1) {
+ return dml_max(SREnterPlusExitTime, UrgentLatencyPixelDataOnly);
+ } else {
+ return UrgentLatencyPixelDataOnly;
+ }
+}
+
+static double CalculateRemoteSurfaceFlipDelay(
+ struct display_mode_lib *mode_lib,
+ double VRatio,
+ double SwathWidth,
+ double Bpp,
+ double LineTime,
+ double XFCTSlvVupdateOffset,
+ double XFCTSlvVupdateWidth,
+ double XFCTSlvVreadyOffset,
+ double XFCXBUFLatencyTolerance,
+ double XFCFillBWOverhead,
+ double XFCSlvChunkSize,
+ double XFCBusTransportTime,
+ double TCalc,
+ double TWait,
+ double *SrcActiveDrainRate,
+ double *TInitXFill,
+ double *TslvChk)
+{
+ double TSlvSetup, AvgfillRate, result;
+
+ *SrcActiveDrainRate = VRatio * SwathWidth * Bpp / LineTime;
+ TSlvSetup = XFCTSlvVupdateOffset + XFCTSlvVupdateWidth + XFCTSlvVreadyOffset;
+ *TInitXFill = XFCXBUFLatencyTolerance / (1 + XFCFillBWOverhead / 100);
+ AvgfillRate = *SrcActiveDrainRate * (1 + XFCFillBWOverhead / 100);
+ *TslvChk = XFCSlvChunkSize / AvgfillRate;
+ dml_print(
+ "DML::CalculateRemoteSurfaceFlipDelay: SrcActiveDrainRate: %f\n",
+ *SrcActiveDrainRate);
+ dml_print("DML::CalculateRemoteSurfaceFlipDelay: TSlvSetup: %f\n", TSlvSetup);
+ dml_print("DML::CalculateRemoteSurfaceFlipDelay: TInitXFill: %f\n", *TInitXFill);
+ dml_print("DML::CalculateRemoteSurfaceFlipDelay: AvgfillRate: %f\n", AvgfillRate);
+ dml_print("DML::CalculateRemoteSurfaceFlipDelay: TslvChk: %f\n", *TslvChk);
+ result = 2 * XFCBusTransportTime + TSlvSetup + TCalc + TWait + *TslvChk + *TInitXFill; // TODO: This doesn't seem to match programming guide
+ dml_print("DML::CalculateRemoteSurfaceFlipDelay: RemoteSurfaceFlipDelay: %f\n", result);
+ return result;
+}
+
+static double CalculateWriteBackDelay(
+ enum source_format_class WritebackPixelFormat,
+ double WritebackHRatio,
+ double WritebackVRatio,
+ unsigned int WritebackLumaHTaps,
+ unsigned int WritebackLumaVTaps,
+ unsigned int WritebackChromaHTaps,
+ unsigned int WritebackChromaVTaps,
+ unsigned int WritebackDestinationWidth)
+{
+ double CalculateWriteBackDelay =
+ dml_max(
+ dml_ceil(WritebackLumaHTaps / 4.0, 1) / WritebackHRatio,
+ WritebackLumaVTaps * dml_ceil(1.0 / WritebackVRatio, 1)
+ * dml_ceil(
+ WritebackDestinationWidth
+ / 4.0,
+ 1)
+ + dml_ceil(1.0 / WritebackVRatio, 1)
+ * (dml_ceil(
+ WritebackLumaVTaps
+ / 4.0,
+ 1) + 4));
+
+ if (WritebackPixelFormat != dm_444_32) {
+ CalculateWriteBackDelay =
+ dml_max(
+ CalculateWriteBackDelay,
+ dml_max(
+ dml_ceil(
+ WritebackChromaHTaps
+ / 2.0,
+ 1)
+ / (2
+ * WritebackHRatio),
+ WritebackChromaVTaps
+ * dml_ceil(
+ 1
+ / (2
+ * WritebackVRatio),
+ 1)
+ * dml_ceil(
+ WritebackDestinationWidth
+ / 2.0
+ / 2.0,
+ 1)
+ + dml_ceil(
+ 1
+ / (2
+ * WritebackVRatio),
+ 1)
+ * (dml_ceil(
+ WritebackChromaVTaps
+ / 4.0,
+ 1)
+ + 4)));
+ }
+ return CalculateWriteBackDelay;
+}
+
+static void CalculateActiveRowBandwidth(
+ bool GPUVMEnable,
+ enum source_format_class SourcePixelFormat,
+ double VRatio,
+ bool DCCEnable,
+ double LineTime,
+ unsigned int MetaRowByteLuma,
+ unsigned int MetaRowByteChroma,
+ unsigned int meta_row_height_luma,
+ unsigned int meta_row_height_chroma,
+ unsigned int PixelPTEBytesPerRowLuma,
+ unsigned int PixelPTEBytesPerRowChroma,
+ unsigned int dpte_row_height_luma,
+ unsigned int dpte_row_height_chroma,
+ double *meta_row_bw,
+ double *dpte_row_bw,
+ double *qual_row_bw)
+{
+ if (DCCEnable != true) {
+ *meta_row_bw = 0;
+ } else if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10) {
+ *meta_row_bw = VRatio * MetaRowByteLuma / (meta_row_height_luma * LineTime)
+ + VRatio / 2 * MetaRowByteChroma
+ / (meta_row_height_chroma * LineTime);
+ } else {
+ *meta_row_bw = VRatio * MetaRowByteLuma / (meta_row_height_luma * LineTime);
+ }
+
+ if (GPUVMEnable != true) {
+ *dpte_row_bw = 0;
+ } else if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10) {
+ *dpte_row_bw = VRatio * PixelPTEBytesPerRowLuma / (dpte_row_height_luma * LineTime)
+ + VRatio / 2 * PixelPTEBytesPerRowChroma
+ / (dpte_row_height_chroma * LineTime);
+ } else {
+ *dpte_row_bw = VRatio * PixelPTEBytesPerRowLuma / (dpte_row_height_luma * LineTime);
+ }
+
+ if ((SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10)) {
+ *qual_row_bw = *meta_row_bw + *dpte_row_bw;
+ } else {
+ *qual_row_bw = 0;
+ }
+}
+
+static void CalculateFlipSchedule(
+ struct display_mode_lib *mode_lib,
+ double UrgentExtraLatency,
+ double UrgentLatencyPixelDataOnly,
+ unsigned int GPUVMMaxPageTableLevels,
+ bool GPUVMEnable,
+ double BandwidthAvailableForImmediateFlip,
+ unsigned int TotImmediateFlipBytes,
+ enum source_format_class SourcePixelFormat,
+ unsigned int ImmediateFlipBytes,
+ double LineTime,
+ double VRatio,
+ double Tno_bw,
+ double PDEAndMetaPTEBytesFrame,
+ unsigned int MetaRowByte,
+ unsigned int PixelPTEBytesPerRow,
+ bool DCCEnable,
+ unsigned int dpte_row_height,
+ unsigned int meta_row_height,
+ double qual_row_bw,
+ double *DestinationLinesToRequestVMInImmediateFlip,
+ double *DestinationLinesToRequestRowInImmediateFlip,
+ double *final_flip_bw,
+ bool *ImmediateFlipSupportedForPipe)
+{
+ double min_row_time = 0.0;
+
+ if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10) {
+ *DestinationLinesToRequestVMInImmediateFlip = 0.0;
+ *DestinationLinesToRequestRowInImmediateFlip = 0.0;
+ *final_flip_bw = qual_row_bw;
+ *ImmediateFlipSupportedForPipe = true;
+ } else {
+ double TimeForFetchingMetaPTEImmediateFlip;
+ double TimeForFetchingRowInVBlankImmediateFlip;
+
+ if (GPUVMEnable == true) {
+ mode_lib->vba.ImmediateFlipBW[0] = BandwidthAvailableForImmediateFlip
+ * ImmediateFlipBytes / TotImmediateFlipBytes;
+ TimeForFetchingMetaPTEImmediateFlip =
+ dml_max(
+ Tno_bw
+ + PDEAndMetaPTEBytesFrame
+ / mode_lib->vba.ImmediateFlipBW[0],
+ dml_max(
+ UrgentExtraLatency
+ + UrgentLatencyPixelDataOnly
+ * (GPUVMMaxPageTableLevels
+ - 1),
+ LineTime / 4.0));
+ } else {
+ TimeForFetchingMetaPTEImmediateFlip = 0;
+ }
+
+ *DestinationLinesToRequestVMInImmediateFlip = dml_floor(
+ 4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime + 0.125),
+ 1) / 4.0;
+
+ if ((GPUVMEnable == true || DCCEnable == true)) {
+ mode_lib->vba.ImmediateFlipBW[0] = BandwidthAvailableForImmediateFlip
+ * ImmediateFlipBytes / TotImmediateFlipBytes;
+ TimeForFetchingRowInVBlankImmediateFlip = dml_max(
+ (MetaRowByte + PixelPTEBytesPerRow)
+ / mode_lib->vba.ImmediateFlipBW[0],
+ dml_max(UrgentLatencyPixelDataOnly, LineTime / 4.0));
+ } else {
+ TimeForFetchingRowInVBlankImmediateFlip = 0;
+ }
+
+ *DestinationLinesToRequestRowInImmediateFlip = dml_floor(
+ 4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime + 0.125),
+ 1) / 4.0;
+
+ if (GPUVMEnable == true) {
+ *final_flip_bw =
+ dml_max(
+ PDEAndMetaPTEBytesFrame
+ / (*DestinationLinesToRequestVMInImmediateFlip
+ * LineTime),
+ (MetaRowByte + PixelPTEBytesPerRow)
+ / (TimeForFetchingRowInVBlankImmediateFlip
+ * LineTime));
+ } else if (MetaRowByte + PixelPTEBytesPerRow > 0) {
+ *final_flip_bw = (MetaRowByte + PixelPTEBytesPerRow)
+ / (TimeForFetchingRowInVBlankImmediateFlip * LineTime);
+ } else {
+ *final_flip_bw = 0;
+ }
+
+ if (GPUVMEnable && !DCCEnable)
+ min_row_time = dpte_row_height * LineTime / VRatio;
+ else if (!GPUVMEnable && DCCEnable)
+ min_row_time = meta_row_height * LineTime / VRatio;
+ else
+ min_row_time = dml_min(dpte_row_height, meta_row_height) * LineTime
+ / VRatio;
+
+ if (*DestinationLinesToRequestVMInImmediateFlip >= 8
+ || *DestinationLinesToRequestRowInImmediateFlip >= 16
+ || TimeForFetchingMetaPTEImmediateFlip
+ + 2 * TimeForFetchingRowInVBlankImmediateFlip
+ > min_row_time)
+ *ImmediateFlipSupportedForPipe = false;
+ else
+ *ImmediateFlipSupportedForPipe = true;
+ }
+}
+
+static unsigned int TruncToValidBPP(
+ double DecimalBPP,
+ bool DSCEnabled,
+ enum output_encoder_class Output,
+ enum output_format_class Format,
+ unsigned int DSCInputBitPerComponent)
+{
+ if (Output == dm_hdmi) {
+ if (Format == dm_420) {
+ if (DecimalBPP >= 18)
+ return 18;
+ else if (DecimalBPP >= 15)
+ return 15;
+ else if (DecimalBPP >= 12)
+ return 12;
+ else
+ return BPP_INVALID;
+ } else if (Format == dm_444) {
+ if (DecimalBPP >= 36)
+ return 36;
+ else if (DecimalBPP >= 30)
+ return 30;
+ else if (DecimalBPP >= 24)
+ return 24;
+ else if (DecimalBPP >= 18)
+ return 18;
+ else
+ return BPP_INVALID;
+ } else {
+ if (DecimalBPP / 1.5 >= 24)
+ return 24;
+ else if (DecimalBPP / 1.5 >= 20)
+ return 20;
+ else if (DecimalBPP / 1.5 >= 16)
+ return 16;
+ else
+ return BPP_INVALID;
+ }
+ } else {
+ if (DSCEnabled) {
+ if (Format == dm_420) {
+ if (DecimalBPP < 6)
+ return BPP_INVALID;
+ else if (DecimalBPP >= 1.5 * DSCInputBitPerComponent - 1 / 16)
+ return 1.5 * DSCInputBitPerComponent - 1 / 16;
+ else
+ return dml_floor(16 * DecimalBPP, 1) / 16;
+ } else if (Format == dm_n422) {
+ if (DecimalBPP < 7)
+ return BPP_INVALID;
+ else if (DecimalBPP >= 2 * DSCInputBitPerComponent - 1 / 16)
+ return 2 * DSCInputBitPerComponent - 1 / 16;
+ else
+ return dml_floor(16 * DecimalBPP, 1) / 16;
+ } else {
+ if (DecimalBPP < 8)
+ return BPP_INVALID;
+ else if (DecimalBPP >= 3 * DSCInputBitPerComponent - 1 / 16)
+ return 3 * DSCInputBitPerComponent - 1 / 16;
+ else
+ return dml_floor(16 * DecimalBPP, 1) / 16;
+ }
+ } else if (Format == dm_420) {
+ if (DecimalBPP >= 18)
+ return 18;
+ else if (DecimalBPP >= 15)
+ return 15;
+ else if (DecimalBPP >= 12)
+ return 12;
+ else
+ return BPP_INVALID;
+ } else if (Format == dm_s422 || Format == dm_n422) {
+ if (DecimalBPP >= 24)
+ return 24;
+ else if (DecimalBPP >= 20)
+ return 20;
+ else if (DecimalBPP >= 16)
+ return 16;
+ else
+ return BPP_INVALID;
+ } else {
+ if (DecimalBPP >= 36)
+ return 36;
+ else if (DecimalBPP >= 30)
+ return 30;
+ else if (DecimalBPP >= 24)
+ return 24;
+ else if (DecimalBPP >= 18)
+ return 18;
+ else
+ return BPP_INVALID;
+ }
+ }
+}
+
+void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib)
+{
+ struct vba_vars_st *locals = &mode_lib->vba;
+
+ int i;
+ unsigned int j, k, m;
+
+ /*MODE SUPPORT, VOLTAGE STATE AND SOC CONFIGURATION*/
+
+ /*Scale Ratio, taps Support Check*/
+
+ mode_lib->vba.ScaleRatioAndTapsSupport = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.ScalerEnabled[k] == false
+ && ((mode_lib->vba.SourcePixelFormat[k] != dm_444_64
+ && mode_lib->vba.SourcePixelFormat[k] != dm_444_32
+ && mode_lib->vba.SourcePixelFormat[k] != dm_444_16
+ && mode_lib->vba.SourcePixelFormat[k] != dm_mono_16
+ && mode_lib->vba.SourcePixelFormat[k] != dm_mono_8)
+ || mode_lib->vba.HRatio[k] != 1.0
+ || mode_lib->vba.htaps[k] != 1.0
+ || mode_lib->vba.VRatio[k] != 1.0
+ || mode_lib->vba.vtaps[k] != 1.0)) {
+ mode_lib->vba.ScaleRatioAndTapsSupport = false;
+ } else if (mode_lib->vba.vtaps[k] < 1.0 || mode_lib->vba.vtaps[k] > 8.0
+ || mode_lib->vba.htaps[k] < 1.0 || mode_lib->vba.htaps[k] > 8.0
+ || (mode_lib->vba.htaps[k] > 1.0
+ && (mode_lib->vba.htaps[k] % 2) == 1)
+ || mode_lib->vba.HRatio[k] > mode_lib->vba.MaxHSCLRatio
+ || mode_lib->vba.VRatio[k] > mode_lib->vba.MaxVSCLRatio
+ || mode_lib->vba.HRatio[k] > mode_lib->vba.htaps[k]
+ || mode_lib->vba.VRatio[k] > mode_lib->vba.vtaps[k]
+ || (mode_lib->vba.SourcePixelFormat[k] != dm_444_64
+ && mode_lib->vba.SourcePixelFormat[k] != dm_444_32
+ && mode_lib->vba.SourcePixelFormat[k] != dm_444_16
+ && mode_lib->vba.SourcePixelFormat[k] != dm_mono_16
+ && mode_lib->vba.SourcePixelFormat[k] != dm_mono_8
+ && (mode_lib->vba.HRatio[k] / 2.0
+ > mode_lib->vba.HTAPsChroma[k]
+ || mode_lib->vba.VRatio[k] / 2.0
+ > mode_lib->vba.VTAPsChroma[k]))) {
+ mode_lib->vba.ScaleRatioAndTapsSupport = false;
+ }
+ }
+ /*Source Format, Pixel Format and Scan Support Check*/
+
+ mode_lib->vba.SourceFormatPixelAndScanSupport = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if ((mode_lib->vba.SurfaceTiling[k] == dm_sw_linear
+ && mode_lib->vba.SourceScan[k] != dm_horz)
+ || ((mode_lib->vba.SurfaceTiling[k] == dm_sw_4kb_d
+ || mode_lib->vba.SurfaceTiling[k] == dm_sw_4kb_d_x
+ || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_d
+ || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_d_t
+ || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_d_x
+ || mode_lib->vba.SurfaceTiling[k] == dm_sw_var_d
+ || mode_lib->vba.SurfaceTiling[k] == dm_sw_var_d_x)
+ && mode_lib->vba.SourcePixelFormat[k] != dm_444_64)
+ || (mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_r_x
+ && (mode_lib->vba.SourcePixelFormat[k] == dm_mono_8
+ || mode_lib->vba.SourcePixelFormat[k]
+ == dm_420_8
+ || mode_lib->vba.SourcePixelFormat[k]
+ == dm_420_10))
+ || (((mode_lib->vba.SurfaceTiling[k] == dm_sw_gfx7_2d_thin_gl
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_gfx7_2d_thin_l_vp)
+ && !((mode_lib->vba.SourcePixelFormat[k]
+ == dm_444_64
+ || mode_lib->vba.SourcePixelFormat[k]
+ == dm_444_32)
+ && mode_lib->vba.SourceScan[k]
+ == dm_horz
+ && mode_lib->vba.SupportGFX7CompatibleTilingIn32bppAnd64bpp
+ == true
+ && mode_lib->vba.DCCEnable[k]
+ == false))
+ || (mode_lib->vba.DCCEnable[k] == true
+ && (mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_linear
+ || mode_lib->vba.SourcePixelFormat[k]
+ == dm_420_8
+ || mode_lib->vba.SourcePixelFormat[k]
+ == dm_420_10)))) {
+ mode_lib->vba.SourceFormatPixelAndScanSupport = false;
+ }
+ }
+ /*Bandwidth Support Check*/
+
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.SourcePixelFormat[k] == dm_444_64) {
+ locals->BytePerPixelInDETY[k] = 8.0;
+ locals->BytePerPixelInDETC[k] = 0.0;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_32) {
+ locals->BytePerPixelInDETY[k] = 4.0;
+ locals->BytePerPixelInDETC[k] = 0.0;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_16
+ || mode_lib->vba.SourcePixelFormat[k] == dm_mono_16) {
+ locals->BytePerPixelInDETY[k] = 2.0;
+ locals->BytePerPixelInDETC[k] = 0.0;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_mono_8) {
+ locals->BytePerPixelInDETY[k] = 1.0;
+ locals->BytePerPixelInDETC[k] = 0.0;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8) {
+ locals->BytePerPixelInDETY[k] = 1.0;
+ locals->BytePerPixelInDETC[k] = 2.0;
+ } else {
+ locals->BytePerPixelInDETY[k] = 4.0 / 3;
+ locals->BytePerPixelInDETC[k] = 8.0 / 3;
+ }
+ if (mode_lib->vba.SourceScan[k] == dm_horz) {
+ locals->SwathWidthYSingleDPP[k] = mode_lib->vba.ViewportWidth[k];
+ } else {
+ locals->SwathWidthYSingleDPP[k] = mode_lib->vba.ViewportHeight[k];
+ }
+ }
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ locals->ReadBandwidthLuma[k] = locals->SwathWidthYSingleDPP[k] * dml_ceil(locals->BytePerPixelInDETY[k], 1.0)
+ / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]) * mode_lib->vba.VRatio[k];
+ locals->ReadBandwidthChroma[k] = locals->SwathWidthYSingleDPP[k] / 2 * dml_ceil(locals->BytePerPixelInDETC[k], 2.0)
+ / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]) * mode_lib->vba.VRatio[k] / 2.0;
+ locals->ReadBandwidth[k] = locals->ReadBandwidthLuma[k] + locals->ReadBandwidthChroma[k];
+ }
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.WritebackEnable[k] == true
+ && mode_lib->vba.WritebackPixelFormat[k] == dm_444_32) {
+ locals->WriteBandwidth[k] = mode_lib->vba.WritebackDestinationWidth[k]
+ * mode_lib->vba.WritebackDestinationHeight[k]
+ / (mode_lib->vba.WritebackSourceHeight[k]
+ * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]) * 4.0;
+ } else if (mode_lib->vba.WritebackEnable[k] == true
+ && mode_lib->vba.WritebackPixelFormat[k] == dm_420_10) {
+ locals->WriteBandwidth[k] = mode_lib->vba.WritebackDestinationWidth[k]
+ * mode_lib->vba.WritebackDestinationHeight[k]
+ / (mode_lib->vba.WritebackSourceHeight[k]
+ * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]) * 3.0;
+ } else if (mode_lib->vba.WritebackEnable[k] == true) {
+ locals->WriteBandwidth[k] = mode_lib->vba.WritebackDestinationWidth[k]
+ * mode_lib->vba.WritebackDestinationHeight[k]
+ / (mode_lib->vba.WritebackSourceHeight[k]
+ * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]) * 1.5;
+ } else {
+ locals->WriteBandwidth[k] = 0.0;
+ }
+ }
+ mode_lib->vba.DCCEnabledInAnyPlane = false;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.DCCEnable[k] == true) {
+ mode_lib->vba.DCCEnabledInAnyPlane = true;
+ }
+ }
+ for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
+ locals->FabricAndDRAMBandwidthPerState[i] = dml_min(
+ mode_lib->vba.DRAMSpeedPerState[i] * mode_lib->vba.NumberOfChannels
+ * mode_lib->vba.DRAMChannelWidth,
+ mode_lib->vba.FabricClockPerState[i]
+ * mode_lib->vba.FabricDatapathToDCNDataReturn) / 1000;
+ locals->ReturnBWToDCNPerState = dml_min(locals->ReturnBusWidth * locals->DCFCLKPerState[i],
+ locals->FabricAndDRAMBandwidthPerState[i] * 1000)
+ * locals->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelDataOnly / 100;
+
+ locals->ReturnBWPerState[i][0] = locals->ReturnBWToDCNPerState;
+
+ if (locals->DCCEnabledInAnyPlane == true && locals->ReturnBWToDCNPerState > locals->DCFCLKPerState[i] * locals->ReturnBusWidth / 4) {
+ locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0],
+ locals->ReturnBWToDCNPerState * 4 * (1 - locals->UrgentLatency /
+ ((locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024
+ / (locals->ReturnBWToDCNPerState - locals->DCFCLKPerState[i]
+ * locals->ReturnBusWidth / 4) + locals->UrgentLatency)));
+ }
+ locals->CriticalPoint = 2 * locals->ReturnBusWidth * locals->DCFCLKPerState[i] *
+ locals->UrgentLatency / (locals->ReturnBWToDCNPerState * locals->UrgentLatency
+ + (locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024);
+
+ if (locals->DCCEnabledInAnyPlane && locals->CriticalPoint > 1 && locals->CriticalPoint < 4) {
+ locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0],
+ 4 * locals->ReturnBWToDCNPerState *
+ (locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024
+ * locals->ReturnBusWidth * locals->DCFCLKPerState[i] * locals->UrgentLatency /
+ dml_pow((locals->ReturnBWToDCNPerState * locals->UrgentLatency
+ + (locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024), 2));
+ }
+
+ locals->ReturnBWToDCNPerState = dml_min(locals->ReturnBusWidth *
+ locals->DCFCLKPerState[i], locals->FabricAndDRAMBandwidthPerState[i] * 1000);
+
+ if (locals->DCCEnabledInAnyPlane == true && locals->ReturnBWToDCNPerState > locals->DCFCLKPerState[i] * locals->ReturnBusWidth / 4) {
+ locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0],
+ locals->ReturnBWToDCNPerState * 4 * (1 - locals->UrgentLatency /
+ ((locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024
+ / (locals->ReturnBWToDCNPerState - locals->DCFCLKPerState[i]
+ * locals->ReturnBusWidth / 4) + locals->UrgentLatency)));
+ }
+ locals->CriticalPoint = 2 * locals->ReturnBusWidth * locals->DCFCLKPerState[i] *
+ locals->UrgentLatency / (locals->ReturnBWToDCNPerState * locals->UrgentLatency
+ + (locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024);
+
+ if (locals->DCCEnabledInAnyPlane && locals->CriticalPoint > 1 && locals->CriticalPoint < 4) {
+ locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0],
+ 4 * locals->ReturnBWToDCNPerState *
+ (locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024
+ * locals->ReturnBusWidth * locals->DCFCLKPerState[i] * locals->UrgentLatency /
+ dml_pow((locals->ReturnBWToDCNPerState * locals->UrgentLatency
+ + (locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024), 2));
+ }
+ }
+ /*Writeback Latency support check*/
+
+ mode_lib->vba.WritebackLatencySupport = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.WritebackEnable[k] == true) {
+ if (mode_lib->vba.WritebackPixelFormat[k] == dm_444_32) {
+ if (locals->WriteBandwidth[k]
+ > (mode_lib->vba.WritebackInterfaceLumaBufferSize
+ + mode_lib->vba.WritebackInterfaceChromaBufferSize)
+ / mode_lib->vba.WritebackLatency) {
+ mode_lib->vba.WritebackLatencySupport = false;
+ }
+ } else {
+ if (locals->WriteBandwidth[k]
+ > 1.5
+ * dml_min(
+ mode_lib->vba.WritebackInterfaceLumaBufferSize,
+ 2.0
+ * mode_lib->vba.WritebackInterfaceChromaBufferSize)
+ / mode_lib->vba.WritebackLatency) {
+ mode_lib->vba.WritebackLatencySupport = false;
+ }
+ }
+ }
+ }
+ /*Re-ordering Buffer Support Check*/
+
+ for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
+ locals->UrgentRoundTripAndOutOfOrderLatencyPerState[i] =
+ (mode_lib->vba.RoundTripPingLatencyCycles + 32.0) / mode_lib->vba.DCFCLKPerState[i]
+ + locals->UrgentOutOfOrderReturnPerChannel * mode_lib->vba.NumberOfChannels / locals->ReturnBWPerState[i][0];
+ if ((mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte) * 1024.0 / locals->ReturnBWPerState[i][0]
+ > locals->UrgentRoundTripAndOutOfOrderLatencyPerState[i]) {
+ locals->ROBSupport[i][0] = true;
+ } else {
+ locals->ROBSupport[i][0] = false;
+ }
+ }
+ /*Writeback Mode Support Check*/
+
+ mode_lib->vba.TotalNumberOfActiveWriteback = 0;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.WritebackEnable[k] == true) {
+ if (mode_lib->vba.ActiveWritebacksPerPlane[k] == 0)
+ mode_lib->vba.ActiveWritebacksPerPlane[k] = 1;
+ mode_lib->vba.TotalNumberOfActiveWriteback =
+ mode_lib->vba.TotalNumberOfActiveWriteback
+ + mode_lib->vba.ActiveWritebacksPerPlane[k];
+ }
+ }
+ mode_lib->vba.WritebackModeSupport = true;
+ if (mode_lib->vba.TotalNumberOfActiveWriteback > mode_lib->vba.MaxNumWriteback) {
+ mode_lib->vba.WritebackModeSupport = false;
+ }
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.WritebackEnable[k] == true
+ && mode_lib->vba.Writeback10bpc420Supported != true
+ && mode_lib->vba.WritebackPixelFormat[k] == dm_420_10) {
+ mode_lib->vba.WritebackModeSupport = false;
+ }
+ }
+ /*Writeback Scale Ratio and Taps Support Check*/
+
+ mode_lib->vba.WritebackScaleRatioAndTapsSupport = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.WritebackEnable[k] == true) {
+ if (mode_lib->vba.WritebackLumaAndChromaScalingSupported == false
+ && (mode_lib->vba.WritebackHRatio[k] != 1.0
+ || mode_lib->vba.WritebackVRatio[k] != 1.0)) {
+ mode_lib->vba.WritebackScaleRatioAndTapsSupport = false;
+ }
+ if (mode_lib->vba.WritebackHRatio[k] > mode_lib->vba.WritebackMaxHSCLRatio
+ || mode_lib->vba.WritebackVRatio[k]
+ > mode_lib->vba.WritebackMaxVSCLRatio
+ || mode_lib->vba.WritebackHRatio[k]
+ < mode_lib->vba.WritebackMinHSCLRatio
+ || mode_lib->vba.WritebackVRatio[k]
+ < mode_lib->vba.WritebackMinVSCLRatio
+ || mode_lib->vba.WritebackLumaHTaps[k]
+ > mode_lib->vba.WritebackMaxHSCLTaps
+ || mode_lib->vba.WritebackLumaVTaps[k]
+ > mode_lib->vba.WritebackMaxVSCLTaps
+ || mode_lib->vba.WritebackHRatio[k]
+ > mode_lib->vba.WritebackLumaHTaps[k]
+ || mode_lib->vba.WritebackVRatio[k]
+ > mode_lib->vba.WritebackLumaVTaps[k]
+ || (mode_lib->vba.WritebackLumaHTaps[k] > 2.0
+ && ((mode_lib->vba.WritebackLumaHTaps[k] % 2)
+ == 1))
+ || (mode_lib->vba.WritebackPixelFormat[k] != dm_444_32
+ && (mode_lib->vba.WritebackChromaHTaps[k]
+ > mode_lib->vba.WritebackMaxHSCLTaps
+ || mode_lib->vba.WritebackChromaVTaps[k]
+ > mode_lib->vba.WritebackMaxVSCLTaps
+ || 2.0
+ * mode_lib->vba.WritebackHRatio[k]
+ > mode_lib->vba.WritebackChromaHTaps[k]
+ || 2.0
+ * mode_lib->vba.WritebackVRatio[k]
+ > mode_lib->vba.WritebackChromaVTaps[k]
+ || (mode_lib->vba.WritebackChromaHTaps[k] > 2.0
+ && ((mode_lib->vba.WritebackChromaHTaps[k] % 2) == 1))))) {
+ mode_lib->vba.WritebackScaleRatioAndTapsSupport = false;
+ }
+ if (mode_lib->vba.WritebackVRatio[k] < 1.0) {
+ mode_lib->vba.WritebackLumaVExtra =
+ dml_max(1.0 - 2.0 / dml_ceil(1.0 / mode_lib->vba.WritebackVRatio[k], 1.0), 0.0);
+ } else {
+ mode_lib->vba.WritebackLumaVExtra = -1;
+ }
+ if ((mode_lib->vba.WritebackPixelFormat[k] == dm_444_32
+ && mode_lib->vba.WritebackLumaVTaps[k]
+ > (mode_lib->vba.WritebackLineBufferLumaBufferSize
+ + mode_lib->vba.WritebackLineBufferChromaBufferSize)
+ / 3.0
+ / mode_lib->vba.WritebackDestinationWidth[k]
+ - mode_lib->vba.WritebackLumaVExtra)
+ || (mode_lib->vba.WritebackPixelFormat[k] == dm_420_8
+ && mode_lib->vba.WritebackLumaVTaps[k]
+ > mode_lib->vba.WritebackLineBufferLumaBufferSize
+ * 8.0 / 10.0 / mode_lib->vba.WritebackDestinationWidth[k]
+ - mode_lib->vba.WritebackLumaVExtra)
+ || (mode_lib->vba.WritebackPixelFormat[k] == dm_420_10
+ && mode_lib->vba.WritebackLumaVTaps[k]
+ > mode_lib->vba.WritebackLineBufferLumaBufferSize
+ * 8.0 / 10.0
+ / mode_lib->vba.WritebackDestinationWidth[k]
+ - mode_lib->vba.WritebackLumaVExtra)) {
+ mode_lib->vba.WritebackScaleRatioAndTapsSupport = false;
+ }
+ if (2.0 * mode_lib->vba.WritebackVRatio[k] < 1) {
+ mode_lib->vba.WritebackChromaVExtra = 0.0;
+ } else {
+ mode_lib->vba.WritebackChromaVExtra = -1;
+ }
+ if ((mode_lib->vba.WritebackPixelFormat[k] == dm_420_8
+ && mode_lib->vba.WritebackChromaVTaps[k]
+ > mode_lib->vba.WritebackLineBufferChromaBufferSize
+ * 8.0 / 10.0 / mode_lib->vba.WritebackDestinationWidth[k]
+ - mode_lib->vba.WritebackChromaVExtra)
+ || (mode_lib->vba.WritebackPixelFormat[k] == dm_420_10
+ && mode_lib->vba.WritebackChromaVTaps[k]
+ > mode_lib->vba.WritebackLineBufferChromaBufferSize
+ * 8.0 / 10.0
+ / mode_lib->vba.WritebackDestinationWidth[k]
+ - mode_lib->vba.WritebackChromaVExtra)) {
+ mode_lib->vba.WritebackScaleRatioAndTapsSupport = false;
+ }
+ }
+ }
+ /*Maximum DISPCLK/DPPCLK Support check*/
+
+ mode_lib->vba.WritebackRequiredDISPCLK = 0.0;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.WritebackEnable[k] == true) {
+ mode_lib->vba.WritebackRequiredDISPCLK =
+ dml_max(
+ mode_lib->vba.WritebackRequiredDISPCLK,
+ CalculateWriteBackDISPCLK(
+ mode_lib->vba.WritebackPixelFormat[k],
+ mode_lib->vba.PixelClock[k],
+ mode_lib->vba.WritebackHRatio[k],
+ mode_lib->vba.WritebackVRatio[k],
+ mode_lib->vba.WritebackLumaHTaps[k],
+ mode_lib->vba.WritebackLumaVTaps[k],
+ mode_lib->vba.WritebackChromaHTaps[k],
+ mode_lib->vba.WritebackChromaVTaps[k],
+ mode_lib->vba.WritebackDestinationWidth[k],
+ mode_lib->vba.HTotal[k],
+ mode_lib->vba.WritebackChromaLineBufferWidth));
+ }
+ }
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.HRatio[k] > 1.0) {
+ locals->PSCL_FACTOR[k] = dml_min(
+ mode_lib->vba.MaxDCHUBToPSCLThroughput,
+ mode_lib->vba.MaxPSCLToLBThroughput
+ * mode_lib->vba.HRatio[k]
+ / dml_ceil(
+ mode_lib->vba.htaps[k]
+ / 6.0,
+ 1.0));
+ } else {
+ locals->PSCL_FACTOR[k] = dml_min(
+ mode_lib->vba.MaxDCHUBToPSCLThroughput,
+ mode_lib->vba.MaxPSCLToLBThroughput);
+ }
+ if (locals->BytePerPixelInDETC[k] == 0.0) {
+ locals->PSCL_FACTOR_CHROMA[k] = 0.0;
+ locals->MinDPPCLKUsingSingleDPP[k] =
+ mode_lib->vba.PixelClock[k]
+ * dml_max3(
+ mode_lib->vba.vtaps[k] / 6.0
+ * dml_min(
+ 1.0,
+ mode_lib->vba.HRatio[k]),
+ mode_lib->vba.HRatio[k]
+ * mode_lib->vba.VRatio[k]
+ / locals->PSCL_FACTOR[k],
+ 1.0);
+ if ((mode_lib->vba.htaps[k] > 6.0 || mode_lib->vba.vtaps[k] > 6.0)
+ && locals->MinDPPCLKUsingSingleDPP[k]
+ < 2.0 * mode_lib->vba.PixelClock[k]) {
+ locals->MinDPPCLKUsingSingleDPP[k] = 2.0
+ * mode_lib->vba.PixelClock[k];
+ }
+ } else {
+ if (mode_lib->vba.HRatio[k] / 2.0 > 1.0) {
+ locals->PSCL_FACTOR_CHROMA[k] =
+ dml_min(
+ mode_lib->vba.MaxDCHUBToPSCLThroughput,
+ mode_lib->vba.MaxPSCLToLBThroughput
+ * mode_lib->vba.HRatio[k]
+ / 2.0
+ / dml_ceil(
+ mode_lib->vba.HTAPsChroma[k]
+ / 6.0,
+ 1.0));
+ } else {
+ locals->PSCL_FACTOR_CHROMA[k] = dml_min(
+ mode_lib->vba.MaxDCHUBToPSCLThroughput,
+ mode_lib->vba.MaxPSCLToLBThroughput);
+ }
+ locals->MinDPPCLKUsingSingleDPP[k] =
+ mode_lib->vba.PixelClock[k]
+ * dml_max5(
+ mode_lib->vba.vtaps[k] / 6.0
+ * dml_min(
+ 1.0,
+ mode_lib->vba.HRatio[k]),
+ mode_lib->vba.HRatio[k]
+ * mode_lib->vba.VRatio[k]
+ / locals->PSCL_FACTOR[k],
+ mode_lib->vba.VTAPsChroma[k]
+ / 6.0
+ * dml_min(
+ 1.0,
+ mode_lib->vba.HRatio[k]
+ / 2.0),
+ mode_lib->vba.HRatio[k]
+ * mode_lib->vba.VRatio[k]
+ / 4.0
+ / locals->PSCL_FACTOR_CHROMA[k],
+ 1.0);
+ if ((mode_lib->vba.htaps[k] > 6.0 || mode_lib->vba.vtaps[k] > 6.0
+ || mode_lib->vba.HTAPsChroma[k] > 6.0
+ || mode_lib->vba.VTAPsChroma[k] > 6.0)
+ && locals->MinDPPCLKUsingSingleDPP[k]
+ < 2.0 * mode_lib->vba.PixelClock[k]) {
+ locals->MinDPPCLKUsingSingleDPP[k] = 2.0
+ * mode_lib->vba.PixelClock[k];
+ }
+ }
+ }
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ Calculate256BBlockSizes(
+ mode_lib->vba.SourcePixelFormat[k],
+ mode_lib->vba.SurfaceTiling[k],
+ dml_ceil(locals->BytePerPixelInDETY[k], 1.0),
+ dml_ceil(locals->BytePerPixelInDETC[k], 2.0),
+ &locals->Read256BlockHeightY[k],
+ &locals->Read256BlockHeightC[k],
+ &locals->Read256BlockWidthY[k],
+ &locals->Read256BlockWidthC[k]);
+ if (mode_lib->vba.SourceScan[k] == dm_horz) {
+ locals->MaxSwathHeightY[k] = locals->Read256BlockHeightY[k];
+ locals->MaxSwathHeightC[k] = locals->Read256BlockHeightC[k];
+ } else {
+ locals->MaxSwathHeightY[k] = locals->Read256BlockWidthY[k];
+ locals->MaxSwathHeightC[k] = locals->Read256BlockWidthC[k];
+ }
+ if ((mode_lib->vba.SourcePixelFormat[k] == dm_444_64
+ || mode_lib->vba.SourcePixelFormat[k] == dm_444_32
+ || mode_lib->vba.SourcePixelFormat[k] == dm_444_16
+ || mode_lib->vba.SourcePixelFormat[k] == dm_mono_16
+ || mode_lib->vba.SourcePixelFormat[k] == dm_mono_8)) {
+ if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear
+ || (mode_lib->vba.SourcePixelFormat[k] == dm_444_64
+ && (mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_4kb_s
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_4kb_s_x
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_64kb_s
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_64kb_s_t
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_64kb_s_x
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_var_s
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_var_s_x)
+ && mode_lib->vba.SourceScan[k] == dm_horz)) {
+ locals->MinSwathHeightY[k] = locals->MaxSwathHeightY[k];
+ } else {
+ locals->MinSwathHeightY[k] = locals->MaxSwathHeightY[k]
+ / 2.0;
+ }
+ locals->MinSwathHeightC[k] = locals->MaxSwathHeightC[k];
+ } else {
+ if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear) {
+ locals->MinSwathHeightY[k] = locals->MaxSwathHeightY[k];
+ locals->MinSwathHeightC[k] = locals->MaxSwathHeightC[k];
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8
+ && mode_lib->vba.SourceScan[k] == dm_horz) {
+ locals->MinSwathHeightY[k] = locals->MaxSwathHeightY[k]
+ / 2.0;
+ locals->MinSwathHeightC[k] = locals->MaxSwathHeightC[k];
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_10
+ && mode_lib->vba.SourceScan[k] == dm_horz) {
+ locals->MinSwathHeightC[k] = locals->MaxSwathHeightC[k]
+ / 2.0;
+ locals->MinSwathHeightY[k] = locals->MaxSwathHeightY[k];
+ } else {
+ locals->MinSwathHeightY[k] = locals->MaxSwathHeightY[k];
+ locals->MinSwathHeightC[k] = locals->MaxSwathHeightC[k];
+ }
+ }
+ if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear) {
+ mode_lib->vba.MaximumSwathWidthSupport = 8192.0;
+ } else {
+ mode_lib->vba.MaximumSwathWidthSupport = 5120.0;
+ }
+ mode_lib->vba.MaximumSwathWidthInDETBuffer =
+ dml_min(
+ mode_lib->vba.MaximumSwathWidthSupport,
+ mode_lib->vba.DETBufferSizeInKByte * 1024.0 / 2.0
+ / (locals->BytePerPixelInDETY[k]
+ * locals->MinSwathHeightY[k]
+ + locals->BytePerPixelInDETC[k]
+ / 2.0
+ * locals->MinSwathHeightC[k]));
+ if (locals->BytePerPixelInDETC[k] == 0.0) {
+ mode_lib->vba.MaximumSwathWidthInLineBuffer =
+ mode_lib->vba.LineBufferSize
+ * dml_max(mode_lib->vba.HRatio[k], 1.0)
+ / mode_lib->vba.LBBitPerPixel[k]
+ / (mode_lib->vba.vtaps[k]
+ + dml_max(
+ dml_ceil(
+ mode_lib->vba.VRatio[k],
+ 1.0)
+ - 2,
+ 0.0));
+ } else {
+ mode_lib->vba.MaximumSwathWidthInLineBuffer =
+ dml_min(
+ mode_lib->vba.LineBufferSize
+ * dml_max(
+ mode_lib->vba.HRatio[k],
+ 1.0)
+ / mode_lib->vba.LBBitPerPixel[k]
+ / (mode_lib->vba.vtaps[k]
+ + dml_max(
+ dml_ceil(
+ mode_lib->vba.VRatio[k],
+ 1.0)
+ - 2,
+ 0.0)),
+ 2.0 * mode_lib->vba.LineBufferSize
+ * dml_max(
+ mode_lib->vba.HRatio[k]
+ / 2.0,
+ 1.0)
+ / mode_lib->vba.LBBitPerPixel[k]
+ / (mode_lib->vba.VTAPsChroma[k]
+ + dml_max(
+ dml_ceil(
+ mode_lib->vba.VRatio[k]
+ / 2.0,
+ 1.0)
+ - 2,
+ 0.0)));
+ }
+ locals->MaximumSwathWidth[k] = dml_min(
+ mode_lib->vba.MaximumSwathWidthInDETBuffer,
+ mode_lib->vba.MaximumSwathWidthInLineBuffer);
+ }
+ for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
+ double MaxMaxDispclkRoundedDown = RoundToDFSGranularityDown(
+ mode_lib->vba.MaxDispclk[mode_lib->vba.soc.num_states],
+ mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
+
+ for (j = 0; j < 2; j++) {
+ mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown(
+ mode_lib->vba.MaxDispclk[i],
+ mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
+ mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown(
+ mode_lib->vba.MaxDppclk[i],
+ mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
+ locals->RequiredDISPCLK[i][j] = 0.0;
+ locals->DISPCLK_DPPCLK_Support[i][j] = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine =
+ mode_lib->vba.PixelClock[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0)
+ * (1.0 + mode_lib->vba.DISPCLKRampingMargin / 100.0);
+ if (mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine >= mode_lib->vba.MaxDispclk[i]
+ && i == mode_lib->vba.soc.num_states)
+ mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine = mode_lib->vba.PixelClock[k]
+ * (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
+
+ mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2
+ * (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) * (1 + mode_lib->vba.DISPCLKRampingMargin / 100.0);
+ if (mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine >= mode_lib->vba.MaxDispclk[i]
+ && i == mode_lib->vba.soc.num_states)
+ mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2
+ * (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
+
+ locals->ODMCombineEnablePerState[i][k] = false;
+ mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine;
+ if (mode_lib->vba.ODMCapability) {
+ if (locals->PlaneRequiredDISPCLKWithoutODMCombine > MaxMaxDispclkRoundedDown) {
+ locals->ODMCombineEnablePerState[i][k] = true;
+ mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
+ } else if (locals->DSCEnabled[k] && (locals->HActive[k] > DCN20_MAX_DSC_IMAGE_WIDTH)) {
+ locals->ODMCombineEnablePerState[i][k] = true;
+ mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
+ } else if (locals->HActive[k] > DCN20_MAX_420_IMAGE_WIDTH && locals->OutputFormat[k] == dm_420) {
+ locals->ODMCombineEnablePerState[i][k] = true;
+ mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
+ }
+ }
+
+ if (locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) <= mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity
+ && locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k]
+ && locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) {
+ locals->NoOfDPP[i][j][k] = 1;
+ locals->RequiredDPPCLK[i][j][k] =
+ locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
+ } else {
+ locals->NoOfDPP[i][j][k] = 2;
+ locals->RequiredDPPCLK[i][j][k] =
+ locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) / 2.0;
+ }
+ locals->RequiredDISPCLK[i][j] = dml_max(
+ locals->RequiredDISPCLK[i][j],
+ mode_lib->vba.PlaneRequiredDISPCLK);
+ if ((locals->MinDPPCLKUsingSingleDPP[k] / locals->NoOfDPP[i][j][k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0)
+ > mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity)
+ || (mode_lib->vba.PlaneRequiredDISPCLK > mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity)) {
+ locals->DISPCLK_DPPCLK_Support[i][j] = false;
+ }
+ }
+ locals->TotalNumberOfActiveDPP[i][j] = 0.0;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++)
+ locals->TotalNumberOfActiveDPP[i][j] = locals->TotalNumberOfActiveDPP[i][j] + locals->NoOfDPP[i][j][k];
+ if (j == 1) {
+ while (locals->TotalNumberOfActiveDPP[i][j] < mode_lib->vba.MaxNumDPP
+ && locals->TotalNumberOfActiveDPP[i][j] < 2 * mode_lib->vba.NumberOfActivePlanes) {
+ double BWOfNonSplitPlaneOfMaximumBandwidth;
+ unsigned int NumberOfNonSplitPlaneOfMaximumBandwidth;
+
+ BWOfNonSplitPlaneOfMaximumBandwidth = 0;
+ NumberOfNonSplitPlaneOfMaximumBandwidth = 0;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (locals->ReadBandwidth[k] > BWOfNonSplitPlaneOfMaximumBandwidth && locals->NoOfDPP[i][j][k] == 1) {
+ BWOfNonSplitPlaneOfMaximumBandwidth = locals->ReadBandwidth[k];
+ NumberOfNonSplitPlaneOfMaximumBandwidth = k;
+ }
+ }
+ locals->NoOfDPP[i][j][NumberOfNonSplitPlaneOfMaximumBandwidth] = 2;
+ locals->RequiredDPPCLK[i][j][NumberOfNonSplitPlaneOfMaximumBandwidth] =
+ locals->MinDPPCLKUsingSingleDPP[NumberOfNonSplitPlaneOfMaximumBandwidth]
+ * (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100) / 2;
+ locals->TotalNumberOfActiveDPP[i][j] = locals->TotalNumberOfActiveDPP[i][j] + 1;
+ }
+ }
+ if (locals->TotalNumberOfActiveDPP[i][j] > mode_lib->vba.MaxNumDPP) {
+ locals->RequiredDISPCLK[i][j] = 0.0;
+ locals->DISPCLK_DPPCLK_Support[i][j] = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ locals->ODMCombineEnablePerState[i][k] = false;
+ if (locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k]) {
+ locals->NoOfDPP[i][j][k] = 1;
+ locals->RequiredDPPCLK[i][j][k] = locals->MinDPPCLKUsingSingleDPP[k]
+ * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
+ } else {
+ locals->NoOfDPP[i][j][k] = 2;
+ locals->RequiredDPPCLK[i][j][k] = locals->MinDPPCLKUsingSingleDPP[k]
+ * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) / 2.0;
+ }
+ if (i != mode_lib->vba.soc.num_states) {
+ mode_lib->vba.PlaneRequiredDISPCLK =
+ mode_lib->vba.PixelClock[k]
+ * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0)
+ * (1.0 + mode_lib->vba.DISPCLKRampingMargin / 100.0);
+ } else {
+ mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PixelClock[k]
+ * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
+ }
+ locals->RequiredDISPCLK[i][j] = dml_max(
+ locals->RequiredDISPCLK[i][j],
+ mode_lib->vba.PlaneRequiredDISPCLK);
+ if (locals->MinDPPCLKUsingSingleDPP[k] / locals->NoOfDPP[i][j][k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0)
+ > mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity
+ || mode_lib->vba.PlaneRequiredDISPCLK > mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity)
+ locals->DISPCLK_DPPCLK_Support[i][j] = false;
+ }
+ locals->TotalNumberOfActiveDPP[i][j] = 0.0;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++)
+ locals->TotalNumberOfActiveDPP[i][j] = locals->TotalNumberOfActiveDPP[i][j] + locals->NoOfDPP[i][j][k];
+ }
+ locals->RequiredDISPCLK[i][j] = dml_max(
+ locals->RequiredDISPCLK[i][j],
+ mode_lib->vba.WritebackRequiredDISPCLK);
+ if (mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity
+ < mode_lib->vba.WritebackRequiredDISPCLK) {
+ locals->DISPCLK_DPPCLK_Support[i][j] = false;
+ }
+ }
+ }
+ /*Viewport Size Check*/
+
+ for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
+ locals->ViewportSizeSupport[i][0] = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
+ if (dml_min(locals->SwathWidthYSingleDPP[k], dml_round(mode_lib->vba.HActive[k] / 2.0 * mode_lib->vba.HRatio[k]))
+ > locals->MaximumSwathWidth[k]) {
+ locals->ViewportSizeSupport[i][0] = false;
+ }
+ } else {
+ if (locals->SwathWidthYSingleDPP[k] / 2.0 > locals->MaximumSwathWidth[k]) {
+ locals->ViewportSizeSupport[i][0] = false;
+ }
+ }
+ }
+ }
+ /*Total Available Pipes Support Check*/
+
+ for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
+ for (j = 0; j < 2; j++) {
+ if (locals->TotalNumberOfActiveDPP[i][j] <= mode_lib->vba.MaxNumDPP)
+ locals->TotalAvailablePipesSupport[i][j] = true;
+ else
+ locals->TotalAvailablePipesSupport[i][j] = false;
+ }
+ }
+ /*Total Available OTG Support Check*/
+
+ mode_lib->vba.TotalNumberOfActiveOTG = 0.0;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.BlendingAndTiming[k] == k) {
+ mode_lib->vba.TotalNumberOfActiveOTG = mode_lib->vba.TotalNumberOfActiveOTG
+ + 1.0;
+ }
+ }
+ if (mode_lib->vba.TotalNumberOfActiveOTG <= mode_lib->vba.MaxNumOTG) {
+ mode_lib->vba.NumberOfOTGSupport = true;
+ } else {
+ mode_lib->vba.NumberOfOTGSupport = false;
+ }
+ /*Display IO and DSC Support Check*/
+
+ mode_lib->vba.NonsupportedDSCInputBPC = false;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (!(mode_lib->vba.DSCInputBitPerComponent[k] == 12.0
+ || mode_lib->vba.DSCInputBitPerComponent[k] == 10.0
+ || mode_lib->vba.DSCInputBitPerComponent[k] == 8.0)) {
+ mode_lib->vba.NonsupportedDSCInputBPC = true;
+ }
+ }
+ for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ locals->RequiresDSC[i][k] = 0;
+ locals->RequiresFEC[i][k] = 0;
+ if (mode_lib->vba.BlendingAndTiming[k] == k) {
+ if (mode_lib->vba.Output[k] == dm_hdmi) {
+ locals->RequiresDSC[i][k] = 0;
+ locals->RequiresFEC[i][k] = 0;
+ locals->OutputBppPerState[i][k] = TruncToValidBPP(
+ dml_min(600.0, mode_lib->vba.PHYCLKPerState[i]) / mode_lib->vba.PixelClockBackEnd[k] * 24,
+ false,
+ mode_lib->vba.Output[k],
+ mode_lib->vba.OutputFormat[k],
+ mode_lib->vba.DSCInputBitPerComponent[k]);
+ } else if (mode_lib->vba.Output[k] == dm_dp
+ || mode_lib->vba.Output[k] == dm_edp) {
+ if (mode_lib->vba.Output[k] == dm_edp) {
+ mode_lib->vba.EffectiveFECOverhead = 0.0;
+ } else {
+ mode_lib->vba.EffectiveFECOverhead =
+ mode_lib->vba.FECOverhead;
+ }
+ if (mode_lib->vba.PHYCLKPerState[i] >= 270.0) {
+ mode_lib->vba.Outbpp = TruncToValidBPP(
+ (1.0 - mode_lib->vba.Downspreading / 100.0) * 270.0
+ * mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0,
+ false,
+ mode_lib->vba.Output[k],
+ mode_lib->vba.OutputFormat[k],
+ mode_lib->vba.DSCInputBitPerComponent[k]);
+ mode_lib->vba.OutbppDSC = TruncToValidBPP(
+ (1.0 - mode_lib->vba.Downspreading / 100.0) * (1.0 - mode_lib->vba.EffectiveFECOverhead / 100.0) * 270.0
+ * mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0,
+ true,
+ mode_lib->vba.Output[k],
+ mode_lib->vba.OutputFormat[k],
+ mode_lib->vba.DSCInputBitPerComponent[k]);
+ if (mode_lib->vba.DSCEnabled[k] == true) {
+ locals->RequiresDSC[i][k] = true;
+ if (mode_lib->vba.Output[k] == dm_dp) {
+ locals->RequiresFEC[i][k] = true;
+ } else {
+ locals->RequiresFEC[i][k] = false;
+ }
+ mode_lib->vba.Outbpp = mode_lib->vba.OutbppDSC;
+ } else {
+ locals->RequiresDSC[i][k] = false;
+ locals->RequiresFEC[i][k] = false;
+ }
+ locals->OutputBppPerState[i][k] = mode_lib->vba.Outbpp;
+ }
+ if (mode_lib->vba.Outbpp == BPP_INVALID && mode_lib->vba.PHYCLKPerState[i] >= 540.0) {
+ mode_lib->vba.Outbpp = TruncToValidBPP(
+ (1.0 - mode_lib->vba.Downspreading / 100.0) * 540.0
+ * mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0,
+ false,
+ mode_lib->vba.Output[k],
+ mode_lib->vba.OutputFormat[k],
+ mode_lib->vba.DSCInputBitPerComponent[k]);
+ mode_lib->vba.OutbppDSC = TruncToValidBPP(
+ (1.0 - mode_lib->vba.Downspreading / 100.0) * (1.0 - mode_lib->vba.EffectiveFECOverhead / 100.0) * 540.0
+ * mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0,
+ true,
+ mode_lib->vba.Output[k],
+ mode_lib->vba.OutputFormat[k],
+ mode_lib->vba.DSCInputBitPerComponent[k]);
+ if (mode_lib->vba.DSCEnabled[k] == true) {
+ locals->RequiresDSC[i][k] = true;
+ if (mode_lib->vba.Output[k] == dm_dp) {
+ locals->RequiresFEC[i][k] = true;
+ } else {
+ locals->RequiresFEC[i][k] = false;
+ }
+ mode_lib->vba.Outbpp = mode_lib->vba.OutbppDSC;
+ } else {
+ locals->RequiresDSC[i][k] = false;
+ locals->RequiresFEC[i][k] = false;
+ }
+ locals->OutputBppPerState[i][k] = mode_lib->vba.Outbpp;
+ }
+ if (mode_lib->vba.Outbpp == BPP_INVALID
+ && mode_lib->vba.PHYCLKPerState[i]
+ >= 810.0) {
+ mode_lib->vba.Outbpp = TruncToValidBPP(
+ (1.0 - mode_lib->vba.Downspreading / 100.0) * 810.0
+ * mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0,
+ false,
+ mode_lib->vba.Output[k],
+ mode_lib->vba.OutputFormat[k],
+ mode_lib->vba.DSCInputBitPerComponent[k]);
+ mode_lib->vba.OutbppDSC = TruncToValidBPP(
+ (1.0 - mode_lib->vba.Downspreading / 100.0) * (1.0 - mode_lib->vba.EffectiveFECOverhead / 100.0) * 810.0
+ * mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0,
+ true,
+ mode_lib->vba.Output[k],
+ mode_lib->vba.OutputFormat[k],
+ mode_lib->vba.DSCInputBitPerComponent[k]);
+ if (mode_lib->vba.DSCEnabled[k] == true || mode_lib->vba.Outbpp == BPP_INVALID) {
+ locals->RequiresDSC[i][k] = true;
+ if (mode_lib->vba.Output[k] == dm_dp) {
+ locals->RequiresFEC[i][k] = true;
+ } else {
+ locals->RequiresFEC[i][k] = false;
+ }
+ mode_lib->vba.Outbpp = mode_lib->vba.OutbppDSC;
+ } else {
+ locals->RequiresDSC[i][k] = false;
+ locals->RequiresFEC[i][k] = false;
+ }
+ locals->OutputBppPerState[i][k] =
+ mode_lib->vba.Outbpp;
+ }
+ }
+ } else {
+ locals->OutputBppPerState[i][k] = BPP_BLENDED_PIPE;
+ }
+ }
+ }
+ for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
+ locals->DIOSupport[i] = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (locals->OutputBppPerState[i][k] == BPP_INVALID
+ || (mode_lib->vba.OutputFormat[k] == dm_420
+ && mode_lib->vba.Interlace[k] == true
+ && mode_lib->vba.ProgressiveToInterlaceUnitInOPP == true)) {
+ locals->DIOSupport[i] = false;
+ }
+ }
+ }
+ for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ locals->DSCCLKRequiredMoreThanSupported[i] = false;
+ if (mode_lib->vba.BlendingAndTiming[k] == k) {
+ if ((mode_lib->vba.Output[k] == dm_dp
+ || mode_lib->vba.Output[k] == dm_edp)) {
+ if (mode_lib->vba.OutputFormat[k] == dm_420
+ || mode_lib->vba.OutputFormat[k]
+ == dm_n422) {
+ mode_lib->vba.DSCFormatFactor = 2;
+ } else {
+ mode_lib->vba.DSCFormatFactor = 1;
+ }
+ if (locals->RequiresDSC[i][k] == true) {
+ if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
+ if (mode_lib->vba.PixelClockBackEnd[k] / 6.0 / mode_lib->vba.DSCFormatFactor
+ > (1.0 - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) * mode_lib->vba.MaxDSCCLK[i]) {
+ locals->DSCCLKRequiredMoreThanSupported[i] =
+ true;
+ }
+ } else {
+ if (mode_lib->vba.PixelClockBackEnd[k] / 3.0 / mode_lib->vba.DSCFormatFactor
+ > (1.0 - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) * mode_lib->vba.MaxDSCCLK[i]) {
+ locals->DSCCLKRequiredMoreThanSupported[i] =
+ true;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
+ locals->NotEnoughDSCUnits[i] = false;
+ mode_lib->vba.TotalDSCUnitsRequired = 0.0;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (locals->RequiresDSC[i][k] == true) {
+ if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
+ mode_lib->vba.TotalDSCUnitsRequired =
+ mode_lib->vba.TotalDSCUnitsRequired + 2.0;
+ } else {
+ mode_lib->vba.TotalDSCUnitsRequired =
+ mode_lib->vba.TotalDSCUnitsRequired + 1.0;
+ }
+ }
+ }
+ if (mode_lib->vba.TotalDSCUnitsRequired > mode_lib->vba.NumberOfDSC) {
+ locals->NotEnoughDSCUnits[i] = true;
+ }
+ }
+ /*DSC Delay per state*/
+
+ for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.BlendingAndTiming[k] != k) {
+ mode_lib->vba.slices = 0;
+ } else if (locals->RequiresDSC[i][k] == 0
+ || locals->RequiresDSC[i][k] == false) {
+ mode_lib->vba.slices = 0;
+ } else if (mode_lib->vba.PixelClockBackEnd[k] > 3200.0) {
+ mode_lib->vba.slices = dml_ceil(
+ mode_lib->vba.PixelClockBackEnd[k] / 400.0,
+ 4.0);
+ } else if (mode_lib->vba.PixelClockBackEnd[k] > 1360.0) {
+ mode_lib->vba.slices = 8.0;
+ } else if (mode_lib->vba.PixelClockBackEnd[k] > 680.0) {
+ mode_lib->vba.slices = 4.0;
+ } else if (mode_lib->vba.PixelClockBackEnd[k] > 340.0) {
+ mode_lib->vba.slices = 2.0;
+ } else {
+ mode_lib->vba.slices = 1.0;
+ }
+ if (locals->OutputBppPerState[i][k] == BPP_BLENDED_PIPE
+ || locals->OutputBppPerState[i][k] == BPP_INVALID) {
+ mode_lib->vba.bpp = 0.0;
+ } else {
+ mode_lib->vba.bpp = locals->OutputBppPerState[i][k];
+ }
+ if (locals->RequiresDSC[i][k] == true && mode_lib->vba.bpp != 0.0) {
+ if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) {
+ locals->DSCDelayPerState[i][k] =
+ dscceComputeDelay(
+ mode_lib->vba.DSCInputBitPerComponent[k],
+ mode_lib->vba.bpp,
+ dml_ceil(
+ mode_lib->vba.HActive[k]
+ / mode_lib->vba.slices,
+ 1.0),
+ mode_lib->vba.slices,
+ mode_lib->vba.OutputFormat[k])
+ + dscComputeDelay(
+ mode_lib->vba.OutputFormat[k]);
+ } else {
+ locals->DSCDelayPerState[i][k] =
+ 2.0 * (dscceComputeDelay(
+ mode_lib->vba.DSCInputBitPerComponent[k],
+ mode_lib->vba.bpp,
+ dml_ceil(mode_lib->vba.HActive[k] / mode_lib->vba.slices, 1.0),
+ mode_lib->vba.slices / 2,
+ mode_lib->vba.OutputFormat[k])
+ + dscComputeDelay(mode_lib->vba.OutputFormat[k]));
+ }
+ locals->DSCDelayPerState[i][k] =
+ locals->DSCDelayPerState[i][k] * mode_lib->vba.PixelClock[k] / mode_lib->vba.PixelClockBackEnd[k];
+ } else {
+ locals->DSCDelayPerState[i][k] = 0.0;
+ }
+ }
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ for (m = 0; m <= mode_lib->vba.NumberOfActivePlanes - 1; m++) {
+ for (j = 0; j <= mode_lib->vba.NumberOfActivePlanes - 1; j++) {
+ if (mode_lib->vba.BlendingAndTiming[k] == m && locals->RequiresDSC[i][m] == true)
+ locals->DSCDelayPerState[i][k] = locals->DSCDelayPerState[i][m];
+ }
+ }
+ }
+ }
+
+ //Prefetch Check
+ for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
+ for (j = 0; j < 2; j++) {
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1)
+ locals->SwathWidthYPerState[i][j][k] = dml_min(locals->SwathWidthYSingleDPP[k], dml_round(locals->HActive[k] / 2 * locals->HRatio[k]));
+ else
+ locals->SwathWidthYPerState[i][j][k] = locals->SwathWidthYSingleDPP[k] / locals->NoOfDPP[i][j][k];
+ locals->SwathWidthGranularityY = 256 / dml_ceil(locals->BytePerPixelInDETY[k], 1) / locals->MaxSwathHeightY[k];
+ locals->RoundedUpMaxSwathSizeBytesY = (dml_ceil(locals->SwathWidthYPerState[i][j][k] - 1, locals->SwathWidthGranularityY)
+ + locals->SwathWidthGranularityY) * locals->BytePerPixelInDETY[k] * locals->MaxSwathHeightY[k];
+ if (locals->SourcePixelFormat[k] == dm_420_10) {
+ locals->RoundedUpMaxSwathSizeBytesY = dml_ceil(locals->RoundedUpMaxSwathSizeBytesY, 256) + 256;
+ }
+ if (locals->MaxSwathHeightC[k] > 0) {
+ locals->SwathWidthGranularityC = 256 / dml_ceil(locals->BytePerPixelInDETC[k], 2) / locals->MaxSwathHeightC[k];
+
+ locals->RoundedUpMaxSwathSizeBytesC = (dml_ceil(locals->SwathWidthYPerState[i][j][k] / 2 - 1, locals->SwathWidthGranularityC)
+ + locals->SwathWidthGranularityC) * locals->BytePerPixelInDETC[k] * locals->MaxSwathHeightC[k];
+ }
+ if (locals->SourcePixelFormat[k] == dm_420_10) {
+ locals->RoundedUpMaxSwathSizeBytesC = dml_ceil(locals->RoundedUpMaxSwathSizeBytesC, 256) + 256;
+ } else {
+ locals->RoundedUpMaxSwathSizeBytesC = 0;
+ }
+
+ if (locals->RoundedUpMaxSwathSizeBytesY + locals->RoundedUpMaxSwathSizeBytesC <= locals->DETBufferSizeInKByte * 1024 / 2) {
+ locals->SwathHeightYPerState[i][j][k] = locals->MaxSwathHeightY[k];
+ locals->SwathHeightCPerState[i][j][k] = locals->MaxSwathHeightC[k];
+ } else {
+ locals->SwathHeightYPerState[i][j][k] = locals->MinSwathHeightY[k];
+ locals->SwathHeightCPerState[i][j][k] = locals->MinSwathHeightC[k];
+ }
+
+ if (locals->BytePerPixelInDETC[k] == 0) {
+ locals->LinesInDETLuma = locals->DETBufferSizeInKByte * 1024 / locals->BytePerPixelInDETY[k] / locals->SwathWidthYPerState[i][j][k];
+ locals->LinesInDETChroma = 0;
+ } else if (locals->SwathHeightYPerState[i][j][k] <= locals->SwathHeightCPerState[i][j][k]) {
+ locals->LinesInDETLuma = locals->DETBufferSizeInKByte * 1024 / 2 / locals->BytePerPixelInDETY[k] /
+ locals->SwathWidthYPerState[i][j][k];
+ locals->LinesInDETChroma = locals->DETBufferSizeInKByte * 1024 / 2 / locals->BytePerPixelInDETC[k] / (locals->SwathWidthYPerState[i][j][k] / 2);
+ } else {
+ locals->LinesInDETLuma = locals->DETBufferSizeInKByte * 1024 * 2 / 3 / locals->BytePerPixelInDETY[k] / locals->SwathWidthYPerState[i][j][k];
+ locals->LinesInDETChroma = locals->DETBufferSizeInKByte * 1024 / 3 / locals->BytePerPixelInDETY[k] / (locals->SwathWidthYPerState[i][j][k] / 2);
+ }
+
+ locals->EffectiveLBLatencyHidingSourceLinesLuma = dml_min(locals->MaxLineBufferLines,
+ dml_floor(locals->LineBufferSize / locals->LBBitPerPixel[k] / (locals->SwathWidthYPerState[i][j][k]
+ / dml_max(locals->HRatio[k], 1)), 1)) - (locals->vtaps[k] - 1);
+
+ locals->EffectiveLBLatencyHidingSourceLinesChroma = dml_min(locals->MaxLineBufferLines,
+ dml_floor(locals->LineBufferSize / locals->LBBitPerPixel[k]
+ / (locals->SwathWidthYPerState[i][j][k] / 2
+ / dml_max(locals->HRatio[k] / 2, 1)), 1)) - (locals->VTAPsChroma[k] - 1);
+
+ locals->EffectiveDETLBLinesLuma = dml_floor(locals->LinesInDETLuma + dml_min(
+ locals->LinesInDETLuma * locals->RequiredDISPCLK[i][j] * locals->BytePerPixelInDETY[k] *
+ locals->PSCL_FACTOR[k] / locals->ReturnBWPerState[i][0],
+ locals->EffectiveLBLatencyHidingSourceLinesLuma),
+ locals->SwathHeightYPerState[i][j][k]);
+
+ locals->EffectiveDETLBLinesChroma = dml_floor(locals->LinesInDETChroma + dml_min(
+ locals->LinesInDETChroma * locals->RequiredDISPCLK[i][j] * locals->BytePerPixelInDETC[k] *
+ locals->PSCL_FACTOR_CHROMA[k] / locals->ReturnBWPerState[i][0],
+ locals->EffectiveLBLatencyHidingSourceLinesChroma),
+ locals->SwathHeightCPerState[i][j][k]);
+
+ if (locals->BytePerPixelInDETC[k] == 0) {
+ locals->UrgentLatencySupportUsPerState[i][j][k] = locals->EffectiveDETLBLinesLuma * (locals->HTotal[k] / locals->PixelClock[k])
+ / locals->VRatio[k] - locals->EffectiveDETLBLinesLuma * locals->SwathWidthYPerState[i][j][k] *
+ dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i][0] / locals->NoOfDPP[i][j][k]);
+ } else {
+ locals->UrgentLatencySupportUsPerState[i][j][k] = dml_min(
+ locals->EffectiveDETLBLinesLuma * (locals->HTotal[k] / locals->PixelClock[k])
+ / locals->VRatio[k] - locals->EffectiveDETLBLinesLuma * locals->SwathWidthYPerState[i][j][k] *
+ dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i][0] / locals->NoOfDPP[i][j][k]),
+ locals->EffectiveDETLBLinesChroma * (locals->HTotal[k] / locals->PixelClock[k]) / (locals->VRatio[k] / 2) -
+ locals->EffectiveDETLBLinesChroma * locals->SwathWidthYPerState[i][j][k] / 2 *
+ dml_ceil(locals->BytePerPixelInDETC[k], 2) / (locals->ReturnBWPerState[i][0] / locals->NoOfDPP[i][j][k]));
+ }
+ }
+ }
+ }
+
+ for (i = 0; i <= locals->soc.num_states; i++) {
+ for (j = 0; j < 2; j++) {
+ locals->UrgentLatencySupport[i][j] = true;
+ for (k = 0; k < locals->NumberOfActivePlanes; k++) {
+ if (locals->UrgentLatencySupportUsPerState[i][j][k] < locals->UrgentLatency)
+ locals->UrgentLatencySupport[i][j] = false;
+ }
+ }
+ }
+
+
+ /*Prefetch Check*/
+ for (i = 0; i <= locals->soc.num_states; i++) {
+ for (j = 0; j < 2; j++) {
+ locals->TotalNumberOfDCCActiveDPP[i][j] = 0;
+ for (k = 0; k < locals->NumberOfActivePlanes; k++) {
+ if (locals->DCCEnable[k] == true) {
+ locals->TotalNumberOfDCCActiveDPP[i][j] =
+ locals->TotalNumberOfDCCActiveDPP[i][j] + locals->NoOfDPP[i][j][k];
+ }
+ }
+ }
+ }
+
+ CalculateMinAndMaxPrefetchMode(locals->AllowDRAMSelfRefreshOrDRAMClockChangeInVblank, &locals->MinPrefetchMode, &locals->MaxPrefetchMode);
+
+ locals->MaxTotalVActiveRDBandwidth = 0;
+ for (k = 0; k < locals->NumberOfActivePlanes; k++) {
+ locals->MaxTotalVActiveRDBandwidth = locals->MaxTotalVActiveRDBandwidth + locals->ReadBandwidth[k];
+ }
+
+ for (i = 0; i <= locals->soc.num_states; i++) {
+ for (j = 0; j < 2; j++) {
+ for (k = 0; k < locals->NumberOfActivePlanes; k++) {
+ locals->NoOfDPPThisState[k] = locals->NoOfDPP[i][j][k];
+ locals->RequiredDPPCLKThisState[k] = locals->RequiredDPPCLK[i][j][k];
+ locals->SwathHeightYThisState[k] = locals->SwathHeightYPerState[i][j][k];
+ locals->SwathHeightCThisState[k] = locals->SwathHeightCPerState[i][j][k];
+ locals->SwathWidthYThisState[k] = locals->SwathWidthYPerState[i][j][k];
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] = dml_max(
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
+ mode_lib->vba.PixelClock[k] / 16.0);
+ if (mode_lib->vba.BytePerPixelInDETC[k] == 0.0) {
+ if (mode_lib->vba.VRatio[k] <= 1.0) {
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =
+ dml_max(
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
+ 1.1
+ * dml_ceil(
+ mode_lib->vba.BytePerPixelInDETY[k],
+ 1.0)
+ / 64.0
+ * mode_lib->vba.HRatio[k]
+ * mode_lib->vba.PixelClock[k]
+ / mode_lib->vba.NoOfDPP[i][j][k]);
+ } else {
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =
+ dml_max(
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
+ 1.1
+ * dml_ceil(
+ mode_lib->vba.BytePerPixelInDETY[k],
+ 1.0)
+ / 64.0
+ * mode_lib->vba.PSCL_FACTOR[k]
+ * mode_lib->vba.RequiredDPPCLK[i][j][k]);
+ }
+ } else {
+ if (mode_lib->vba.VRatio[k] <= 1.0) {
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =
+ dml_max(
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
+ 1.1
+ * dml_ceil(
+ mode_lib->vba.BytePerPixelInDETY[k],
+ 1.0)
+ / 32.0
+ * mode_lib->vba.HRatio[k]
+ * mode_lib->vba.PixelClock[k]
+ / mode_lib->vba.NoOfDPP[i][j][k]);
+ } else {
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =
+ dml_max(
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
+ 1.1
+ * dml_ceil(
+ mode_lib->vba.BytePerPixelInDETY[k],
+ 1.0)
+ / 32.0
+ * mode_lib->vba.PSCL_FACTOR[k]
+ * mode_lib->vba.RequiredDPPCLK[i][j][k]);
+ }
+ if (mode_lib->vba.VRatio[k] / 2.0 <= 1.0) {
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =
+ dml_max(
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
+ 1.1
+ * dml_ceil(
+ mode_lib->vba.BytePerPixelInDETC[k],
+ 2.0)
+ / 32.0
+ * mode_lib->vba.HRatio[k]
+ / 2.0
+ * mode_lib->vba.PixelClock[k]
+ / mode_lib->vba.NoOfDPP[i][j][k]);
+ } else {
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =
+ dml_max(
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
+ 1.1
+ * dml_ceil(
+ mode_lib->vba.BytePerPixelInDETC[k],
+ 2.0)
+ / 32.0
+ * mode_lib->vba.PSCL_FACTOR_CHROMA[k]
+ * mode_lib->vba.RequiredDPPCLK[i][j][k]);
+ }
+ }
+ }
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.PDEAndMetaPTEBytesPerFrameY = CalculateVMAndRowBytes(
+ mode_lib,
+ mode_lib->vba.DCCEnable[k],
+ mode_lib->vba.Read256BlockHeightY[k],
+ mode_lib->vba.Read256BlockWidthY[k],
+ mode_lib->vba.SourcePixelFormat[k],
+ mode_lib->vba.SurfaceTiling[k],
+ dml_ceil(mode_lib->vba.BytePerPixelInDETY[k], 1.0),
+ mode_lib->vba.SourceScan[k],
+ mode_lib->vba.ViewportWidth[k],
+ mode_lib->vba.ViewportHeight[k],
+ mode_lib->vba.SwathWidthYPerState[i][j][k],
+ mode_lib->vba.GPUVMEnable,
+ mode_lib->vba.VMMPageSize,
+ mode_lib->vba.PTEBufferSizeInRequestsLuma,
+ mode_lib->vba.PDEProcessingBufIn64KBReqs,
+ mode_lib->vba.PitchY[k],
+ mode_lib->vba.DCCMetaPitchY[k],
+ &mode_lib->vba.MacroTileWidthY[k],
+ &mode_lib->vba.MetaRowBytesY,
+ &mode_lib->vba.DPTEBytesPerRowY,
+ &mode_lib->vba.PTEBufferSizeNotExceededY[i][j][k],
+ &mode_lib->vba.dpte_row_height[k],
+ &mode_lib->vba.meta_row_height[k]);
+ mode_lib->vba.PrefetchLinesY[0][0][k] = CalculatePrefetchSourceLines(
+ mode_lib,
+ mode_lib->vba.VRatio[k],
+ mode_lib->vba.vtaps[k],
+ mode_lib->vba.Interlace[k],
+ mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
+ mode_lib->vba.SwathHeightYPerState[i][j][k],
+ mode_lib->vba.ViewportYStartY[k],
+ &mode_lib->vba.PrefillY[k],
+ &mode_lib->vba.MaxNumSwY[k]);
+ if ((mode_lib->vba.SourcePixelFormat[k] != dm_444_64
+ && mode_lib->vba.SourcePixelFormat[k] != dm_444_32
+ && mode_lib->vba.SourcePixelFormat[k] != dm_444_16
+ && mode_lib->vba.SourcePixelFormat[k] != dm_mono_16
+ && mode_lib->vba.SourcePixelFormat[k] != dm_mono_8)) {
+ mode_lib->vba.PDEAndMetaPTEBytesPerFrameC = CalculateVMAndRowBytes(
+ mode_lib,
+ mode_lib->vba.DCCEnable[k],
+ mode_lib->vba.Read256BlockHeightY[k],
+ mode_lib->vba.Read256BlockWidthY[k],
+ mode_lib->vba.SourcePixelFormat[k],
+ mode_lib->vba.SurfaceTiling[k],
+ dml_ceil(mode_lib->vba.BytePerPixelInDETC[k], 2.0),
+ mode_lib->vba.SourceScan[k],
+ mode_lib->vba.ViewportWidth[k] / 2.0,
+ mode_lib->vba.ViewportHeight[k] / 2.0,
+ mode_lib->vba.SwathWidthYPerState[i][j][k] / 2.0,
+ mode_lib->vba.GPUVMEnable,
+ mode_lib->vba.VMMPageSize,
+ mode_lib->vba.PTEBufferSizeInRequestsLuma,
+ mode_lib->vba.PDEProcessingBufIn64KBReqs,
+ mode_lib->vba.PitchC[k],
+ 0.0,
+ &mode_lib->vba.MacroTileWidthC[k],
+ &mode_lib->vba.MetaRowBytesC,
+ &mode_lib->vba.DPTEBytesPerRowC,
+ &mode_lib->vba.PTEBufferSizeNotExceededC[i][j][k],
+ &mode_lib->vba.dpte_row_height_chroma[k],
+ &mode_lib->vba.meta_row_height_chroma[k]);
+ mode_lib->vba.PrefetchLinesC[0][0][k] = CalculatePrefetchSourceLines(
+ mode_lib,
+ mode_lib->vba.VRatio[k] / 2.0,
+ mode_lib->vba.VTAPsChroma[k],
+ mode_lib->vba.Interlace[k],
+ mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
+ mode_lib->vba.SwathHeightCPerState[i][j][k],
+ mode_lib->vba.ViewportYStartC[k],
+ &mode_lib->vba.PrefillC[k],
+ &mode_lib->vba.MaxNumSwC[k]);
+ } else {
+ mode_lib->vba.PDEAndMetaPTEBytesPerFrameC = 0.0;
+ mode_lib->vba.MetaRowBytesC = 0.0;
+ mode_lib->vba.DPTEBytesPerRowC = 0.0;
+ locals->PrefetchLinesC[0][0][k] = 0.0;
+ locals->PTEBufferSizeNotExceededC[i][j][k] = true;
+ locals->PTEBufferSizeInRequestsForLuma = mode_lib->vba.PTEBufferSizeInRequestsLuma + mode_lib->vba.PTEBufferSizeInRequestsChroma;
+ }
+ locals->PDEAndMetaPTEBytesPerFrame[0][0][k] =
+ mode_lib->vba.PDEAndMetaPTEBytesPerFrameY + mode_lib->vba.PDEAndMetaPTEBytesPerFrameC;
+ locals->MetaRowBytes[0][0][k] = mode_lib->vba.MetaRowBytesY + mode_lib->vba.MetaRowBytesC;
+ locals->DPTEBytesPerRow[0][0][k] = mode_lib->vba.DPTEBytesPerRowY + mode_lib->vba.DPTEBytesPerRowC;
+
+ CalculateActiveRowBandwidth(
+ mode_lib->vba.GPUVMEnable,
+ mode_lib->vba.SourcePixelFormat[k],
+ mode_lib->vba.VRatio[k],
+ mode_lib->vba.DCCEnable[k],
+ mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k],
+ mode_lib->vba.MetaRowBytesY,
+ mode_lib->vba.MetaRowBytesC,
+ mode_lib->vba.meta_row_height[k],
+ mode_lib->vba.meta_row_height_chroma[k],
+ mode_lib->vba.DPTEBytesPerRowY,
+ mode_lib->vba.DPTEBytesPerRowC,
+ mode_lib->vba.dpte_row_height[k],
+ mode_lib->vba.dpte_row_height_chroma[k],
+ &mode_lib->vba.meta_row_bw[k],
+ &mode_lib->vba.dpte_row_bw[k],
+ &mode_lib->vba.qual_row_bw[k]);
+ }
+ mode_lib->vba.ExtraLatency =
+ mode_lib->vba.UrgentRoundTripAndOutOfOrderLatencyPerState[i]
+ + (mode_lib->vba.TotalNumberOfActiveDPP[i][j]
+ * mode_lib->vba.PixelChunkSizeInKByte
+ + mode_lib->vba.TotalNumberOfDCCActiveDPP[i][j]
+ * mode_lib->vba.MetaChunkSize)
+ * 1024.0
+ / mode_lib->vba.ReturnBWPerState[i][0];
+ if (mode_lib->vba.GPUVMEnable == true) {
+ mode_lib->vba.ExtraLatency = mode_lib->vba.ExtraLatency
+ + mode_lib->vba.TotalNumberOfActiveDPP[i][j]
+ * mode_lib->vba.PTEGroupSize
+ / mode_lib->vba.ReturnBWPerState[i][0];
+ }
+ mode_lib->vba.TimeCalc = 24.0 / mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0];
+
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.BlendingAndTiming[k] == k) {
+ if (mode_lib->vba.WritebackEnable[k] == true) {
+ locals->WritebackDelay[i][k] = mode_lib->vba.WritebackLatency
+ + CalculateWriteBackDelay(
+ mode_lib->vba.WritebackPixelFormat[k],
+ mode_lib->vba.WritebackHRatio[k],
+ mode_lib->vba.WritebackVRatio[k],
+ mode_lib->vba.WritebackLumaHTaps[k],
+ mode_lib->vba.WritebackLumaVTaps[k],
+ mode_lib->vba.WritebackChromaHTaps[k],
+ mode_lib->vba.WritebackChromaVTaps[k],
+ mode_lib->vba.WritebackDestinationWidth[k]) / locals->RequiredDISPCLK[i][j];
+ } else {
+ locals->WritebackDelay[i][k] = 0.0;
+ }
+ for (m = 0; m <= mode_lib->vba.NumberOfActivePlanes - 1; m++) {
+ if (mode_lib->vba.BlendingAndTiming[m] == k
+ && mode_lib->vba.WritebackEnable[m]
+ == true) {
+ locals->WritebackDelay[i][k] = dml_max(locals->WritebackDelay[i][k],
+ mode_lib->vba.WritebackLatency + CalculateWriteBackDelay(
+ mode_lib->vba.WritebackPixelFormat[m],
+ mode_lib->vba.WritebackHRatio[m],
+ mode_lib->vba.WritebackVRatio[m],
+ mode_lib->vba.WritebackLumaHTaps[m],
+ mode_lib->vba.WritebackLumaVTaps[m],
+ mode_lib->vba.WritebackChromaHTaps[m],
+ mode_lib->vba.WritebackChromaVTaps[m],
+ mode_lib->vba.WritebackDestinationWidth[m]) / locals->RequiredDISPCLK[i][j]);
+ }
+ }
+ }
+ }
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ for (m = 0; m <= mode_lib->vba.NumberOfActivePlanes - 1; m++) {
+ if (mode_lib->vba.BlendingAndTiming[k] == m) {
+ locals->WritebackDelay[i][k] = locals->WritebackDelay[i][m];
+ }
+ }
+ }
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ for (m = 0; m < locals->NumberOfCursors[k]; m++)
+ locals->cursor_bw[k] = locals->NumberOfCursors[k] * locals->CursorWidth[k][m] * locals->CursorBPP[k][m]
+ / 8 / (locals->HTotal[k] / locals->PixelClock[k]) * locals->VRatio[k];
+ }
+
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ locals->MaximumVStartup[0][0][k] = mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k]
+ - dml_max(1.0, dml_ceil(locals->WritebackDelay[i][k] / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]), 1.0));
+ }
+
+ mode_lib->vba.NextPrefetchMode = mode_lib->vba.MinPrefetchMode;
+ do {
+ mode_lib->vba.PrefetchMode[i][j] = mode_lib->vba.NextPrefetchMode;
+ mode_lib->vba.NextPrefetchMode = mode_lib->vba.NextPrefetchMode + 1;
+
+ mode_lib->vba.TWait = CalculateTWait(
+ mode_lib->vba.PrefetchMode[i][j],
+ mode_lib->vba.DRAMClockChangeLatency,
+ mode_lib->vba.UrgentLatency,
+ mode_lib->vba.SREnterPlusExitTime);
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+
+ if (mode_lib->vba.XFCEnabled[k] == true) {
+ mode_lib->vba.XFCRemoteSurfaceFlipDelay =
+ CalculateRemoteSurfaceFlipDelay(
+ mode_lib,
+ mode_lib->vba.VRatio[k],
+ locals->SwathWidthYPerState[i][j][k],
+ dml_ceil(locals->BytePerPixelInDETY[k], 1.0),
+ mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k],
+ mode_lib->vba.XFCTSlvVupdateOffset,
+ mode_lib->vba.XFCTSlvVupdateWidth,
+ mode_lib->vba.XFCTSlvVreadyOffset,
+ mode_lib->vba.XFCXBUFLatencyTolerance,
+ mode_lib->vba.XFCFillBWOverhead,
+ mode_lib->vba.XFCSlvChunkSize,
+ mode_lib->vba.XFCBusTransportTime,
+ mode_lib->vba.TimeCalc,
+ mode_lib->vba.TWait,
+ &mode_lib->vba.SrcActiveDrainRate,
+ &mode_lib->vba.TInitXFill,
+ &mode_lib->vba.TslvChk);
+ } else {
+ mode_lib->vba.XFCRemoteSurfaceFlipDelay = 0.0;
+ }
+
+ CalculateDelayAfterScaler(mode_lib, mode_lib->vba.ReturnBWPerState[i][0], mode_lib->vba.ReadBandwidthLuma[k], mode_lib->vba.ReadBandwidthChroma[k], mode_lib->vba.MaxTotalVActiveRDBandwidth,
+ mode_lib->vba.DisplayPipeLineDeliveryTimeLuma[k], mode_lib->vba.DisplayPipeLineDeliveryTimeChroma[k],
+ mode_lib->vba.RequiredDPPCLK[i][j][k], mode_lib->vba.RequiredDISPCLK[i][j], mode_lib->vba.PixelClock[k], mode_lib->vba.DSCDelayPerState[i][k], mode_lib->vba.NoOfDPP[i][j][k], mode_lib->vba.ScalerEnabled[k], mode_lib->vba.NumberOfCursors[k],
+ mode_lib->vba.DPPCLKDelaySubtotal, mode_lib->vba.DPPCLKDelaySCL, mode_lib->vba.DPPCLKDelaySCLLBOnly, mode_lib->vba.DPPCLKDelayCNVCFormater, mode_lib->vba.DPPCLKDelayCNVCCursor, mode_lib->vba.DISPCLKDelaySubtotal,
+ mode_lib->vba.SwathWidthYPerState[i][j][k] / mode_lib->vba.HRatio[k], mode_lib->vba.OutputFormat[k], mode_lib->vba.HTotal[k],
+ mode_lib->vba.SwathWidthYSingleDPP[k], mode_lib->vba.BytePerPixelInDETY[k], mode_lib->vba.BytePerPixelInDETC[k], mode_lib->vba.SwathHeightYThisState[k], mode_lib->vba.SwathHeightCThisState[k], mode_lib->vba.Interlace[k], mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
+ &mode_lib->vba.DSTXAfterScaler[k], &mode_lib->vba.DSTYAfterScaler[k]);
+
+ mode_lib->vba.IsErrorResult[i][j][k] =
+ CalculatePrefetchSchedule(
+ mode_lib,
+ mode_lib->vba.RequiredDPPCLK[i][j][k],
+ mode_lib->vba.RequiredDISPCLK[i][j],
+ mode_lib->vba.PixelClock[k],
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
+ mode_lib->vba.NoOfDPP[i][j][k],
+ mode_lib->vba.NumberOfCursors[k],
+ mode_lib->vba.VTotal[k]
+ - mode_lib->vba.VActive[k],
+ mode_lib->vba.HTotal[k],
+ mode_lib->vba.MaxInterDCNTileRepeaters,
+ mode_lib->vba.MaximumVStartup[0][0][k],
+ mode_lib->vba.GPUVMMaxPageTableLevels,
+ mode_lib->vba.GPUVMEnable,
+ mode_lib->vba.DynamicMetadataEnable[k],
+ mode_lib->vba.DynamicMetadataLinesBeforeActiveRequired[k],
+ mode_lib->vba.DynamicMetadataTransmittedBytes[k],
+ mode_lib->vba.DCCEnable[k],
+ mode_lib->vba.UrgentLatencyPixelDataOnly,
+ mode_lib->vba.ExtraLatency,
+ mode_lib->vba.TimeCalc,
+ mode_lib->vba.PDEAndMetaPTEBytesPerFrame[0][0][k],
+ mode_lib->vba.MetaRowBytes[0][0][k],
+ mode_lib->vba.DPTEBytesPerRow[0][0][k],
+ mode_lib->vba.PrefetchLinesY[0][0][k],
+ mode_lib->vba.SwathWidthYPerState[i][j][k],
+ mode_lib->vba.BytePerPixelInDETY[k],
+ mode_lib->vba.PrefillY[k],
+ mode_lib->vba.MaxNumSwY[k],
+ mode_lib->vba.PrefetchLinesC[0][0][k],
+ mode_lib->vba.BytePerPixelInDETC[k],
+ mode_lib->vba.PrefillC[k],
+ mode_lib->vba.MaxNumSwC[k],
+ mode_lib->vba.SwathHeightYPerState[i][j][k],
+ mode_lib->vba.SwathHeightCPerState[i][j][k],
+ mode_lib->vba.TWait,
+ mode_lib->vba.XFCEnabled[k],
+ mode_lib->vba.XFCRemoteSurfaceFlipDelay,
+ mode_lib->vba.Interlace[k],
+ mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
+ mode_lib->vba.DSTXAfterScaler[k],
+ mode_lib->vba.DSTYAfterScaler[k],
+ &mode_lib->vba.LineTimesForPrefetch[k],
+ &mode_lib->vba.PrefetchBW[k],
+ &mode_lib->vba.LinesForMetaPTE[k],
+ &mode_lib->vba.LinesForMetaAndDPTERow[k],
+ &mode_lib->vba.VRatioPreY[i][j][k],
+ &mode_lib->vba.VRatioPreC[i][j][k],
+ &mode_lib->vba.RequiredPrefetchPixelDataBWLuma[i][j][k],
+ &mode_lib->vba.Tno_bw[k],
+ &mode_lib->vba.VUpdateOffsetPix[k],
+ &mode_lib->vba.VUpdateWidthPix[k],
+ &mode_lib->vba.VReadyOffsetPix[k]);
+ }
+ mode_lib->vba.MaximumReadBandwidthWithoutPrefetch = 0.0;
+ mode_lib->vba.MaximumReadBandwidthWithPrefetch = 0.0;
+ locals->prefetch_vm_bw_valid = true;
+ locals->prefetch_row_bw_valid = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (locals->PDEAndMetaPTEBytesPerFrame[0][0][k] == 0)
+ locals->prefetch_vm_bw[k] = 0;
+ else if (locals->LinesForMetaPTE[k] > 0)
+ locals->prefetch_vm_bw[k] = locals->PDEAndMetaPTEBytesPerFrame[0][0][k]
+ / (locals->LinesForMetaPTE[k] * locals->HTotal[k] / locals->PixelClock[k]);
+ else {
+ locals->prefetch_vm_bw[k] = 0;
+ locals->prefetch_vm_bw_valid = false;
+ }
+ if (locals->MetaRowBytes[0][0][k] + locals->DPTEBytesPerRow[0][0][k] == 0)
+ locals->prefetch_row_bw[k] = 0;
+ else if (locals->LinesForMetaAndDPTERow[k] > 0)
+ locals->prefetch_row_bw[k] = (locals->MetaRowBytes[0][0][k] + locals->DPTEBytesPerRow[0][0][k])
+ / (locals->LinesForMetaAndDPTERow[k] * locals->HTotal[k] / locals->PixelClock[k]);
+ else {
+ locals->prefetch_row_bw[k] = 0;
+ locals->prefetch_row_bw_valid = false;
+ }
+
+ mode_lib->vba.MaximumReadBandwidthWithoutPrefetch = mode_lib->vba.MaximumReadBandwidthWithPrefetch
+ + mode_lib->vba.cursor_bw[k] + mode_lib->vba.ReadBandwidth[k] + mode_lib->vba.meta_row_bw[k] + mode_lib->vba.dpte_row_bw[k];
+ mode_lib->vba.MaximumReadBandwidthWithPrefetch =
+ mode_lib->vba.MaximumReadBandwidthWithPrefetch
+ + mode_lib->vba.cursor_bw[k]
+ + dml_max3(
+ mode_lib->vba.prefetch_vm_bw[k],
+ mode_lib->vba.prefetch_row_bw[k],
+ dml_max(mode_lib->vba.ReadBandwidth[k],
+ mode_lib->vba.RequiredPrefetchPixelDataBWLuma[i][j][k])
+ + mode_lib->vba.meta_row_bw[k] + mode_lib->vba.dpte_row_bw[k]);
+ }
+ locals->BandwidthWithoutPrefetchSupported[i][0] = true;
+ if (mode_lib->vba.MaximumReadBandwidthWithoutPrefetch > locals->ReturnBWPerState[i][0]) {
+ locals->BandwidthWithoutPrefetchSupported[i][0] = false;
+ }
+
+ locals->PrefetchSupported[i][j] = true;
+ if (mode_lib->vba.MaximumReadBandwidthWithPrefetch > locals->ReturnBWPerState[i][0]) {
+ locals->PrefetchSupported[i][j] = false;
+ }
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (locals->LineTimesForPrefetch[k] < 2.0
+ || locals->LinesForMetaPTE[k] >= 8.0
+ || locals->LinesForMetaAndDPTERow[k] >= 16.0
+ || mode_lib->vba.IsErrorResult[i][j][k] == true) {
+ locals->PrefetchSupported[i][j] = false;
+ }
+ }
+ locals->VRatioInPrefetchSupported[i][j] = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (locals->VRatioPreY[i][j][k] > 4.0
+ || locals->VRatioPreC[i][j][k] > 4.0
+ || mode_lib->vba.IsErrorResult[i][j][k] == true) {
+ locals->VRatioInPrefetchSupported[i][j] = false;
+ }
+ }
+ } while ((locals->PrefetchSupported[i][j] != true || locals->VRatioInPrefetchSupported[i][j] != true)
+ && mode_lib->vba.NextPrefetchMode < mode_lib->vba.MaxPrefetchMode);
+
+ if (mode_lib->vba.PrefetchSupported[i][j] == true
+ && mode_lib->vba.VRatioInPrefetchSupported[i][j] == true) {
+ mode_lib->vba.BandwidthAvailableForImmediateFlip =
+ mode_lib->vba.ReturnBWPerState[i][0];
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.BandwidthAvailableForImmediateFlip =
+ mode_lib->vba.BandwidthAvailableForImmediateFlip
+ - mode_lib->vba.cursor_bw[k]
+ - dml_max(
+ mode_lib->vba.ReadBandwidth[k] + mode_lib->vba.qual_row_bw[k],
+ mode_lib->vba.PrefetchBW[k]);
+ }
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.ImmediateFlipBytes[k] = 0.0;
+ if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8
+ && mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) {
+ mode_lib->vba.ImmediateFlipBytes[k] =
+ mode_lib->vba.PDEAndMetaPTEBytesPerFrame[0][0][k]
+ + mode_lib->vba.MetaRowBytes[0][0][k]
+ + mode_lib->vba.DPTEBytesPerRow[0][0][k];
+ }
+ }
+ mode_lib->vba.TotImmediateFlipBytes = 0.0;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8
+ && mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) {
+ mode_lib->vba.TotImmediateFlipBytes =
+ mode_lib->vba.TotImmediateFlipBytes
+ + mode_lib->vba.ImmediateFlipBytes[k];
+ }
+ }
+
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ CalculateFlipSchedule(
+ mode_lib,
+ mode_lib->vba.ExtraLatency,
+ mode_lib->vba.UrgentLatencyPixelDataOnly,
+ mode_lib->vba.GPUVMMaxPageTableLevels,
+ mode_lib->vba.GPUVMEnable,
+ mode_lib->vba.BandwidthAvailableForImmediateFlip,
+ mode_lib->vba.TotImmediateFlipBytes,
+ mode_lib->vba.SourcePixelFormat[k],
+ mode_lib->vba.ImmediateFlipBytes[k],
+ mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k],
+ mode_lib->vba.VRatio[k],
+ mode_lib->vba.Tno_bw[k],
+ mode_lib->vba.PDEAndMetaPTEBytesPerFrame[0][0][k],
+ mode_lib->vba.MetaRowBytes[0][0][k],
+ mode_lib->vba.DPTEBytesPerRow[0][0][k],
+ mode_lib->vba.DCCEnable[k],
+ mode_lib->vba.dpte_row_height[k],
+ mode_lib->vba.meta_row_height[k],
+ mode_lib->vba.qual_row_bw[k],
+ &mode_lib->vba.DestinationLinesToRequestVMInImmediateFlip[k],
+ &mode_lib->vba.DestinationLinesToRequestRowInImmediateFlip[k],
+ &mode_lib->vba.final_flip_bw[k],
+ &mode_lib->vba.ImmediateFlipSupportedForPipe[k]);
+ }
+ mode_lib->vba.total_dcn_read_bw_with_flip = 0.0;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.total_dcn_read_bw_with_flip =
+ mode_lib->vba.total_dcn_read_bw_with_flip
+ + mode_lib->vba.cursor_bw[k]
+ + dml_max3(
+ mode_lib->vba.prefetch_vm_bw[k],
+ mode_lib->vba.prefetch_row_bw[k],
+ mode_lib->vba.final_flip_bw[k]
+ + dml_max(
+ mode_lib->vba.ReadBandwidth[k],
+ mode_lib->vba.RequiredPrefetchPixelDataBWLuma[i][j][k]));
+ }
+ mode_lib->vba.ImmediateFlipSupportedForState[i][j] = true;
+ if (mode_lib->vba.total_dcn_read_bw_with_flip
+ > mode_lib->vba.ReturnBWPerState[i][0]) {
+ mode_lib->vba.ImmediateFlipSupportedForState[i][j] = false;
+ }
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.ImmediateFlipSupportedForPipe[k] == false) {
+ mode_lib->vba.ImmediateFlipSupportedForState[i][j] = false;
+ }
+ }
+ } else {
+ mode_lib->vba.ImmediateFlipSupportedForState[i][j] = false;
+ }
+ }
+ }
+
+ /*Vertical Active BW support*/
+ for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
+ mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i][0] = dml_min(mode_lib->vba.ReturnBusWidth *
+ mode_lib->vba.DCFCLKPerState[i], mode_lib->vba.FabricAndDRAMBandwidthPerState[i] * 1000) *
+ mode_lib->vba.MaxAveragePercentOfIdealDRAMBWDisplayCanUseInNormalSystemOperation / 100;
+ if (mode_lib->vba.MaxTotalVActiveRDBandwidth <= mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i][0])
+ mode_lib->vba.TotalVerticalActiveBandwidthSupport[i][0] = true;
+ else
+ mode_lib->vba.TotalVerticalActiveBandwidthSupport[i][0] = false;
+ }
+
+ /*PTE Buffer Size Check*/
+
+ for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
+ for (j = 0; j < 2; j++) {
+ locals->PTEBufferSizeNotExceeded[i][j] = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (locals->PTEBufferSizeNotExceededY[i][j][k] == false
+ || locals->PTEBufferSizeNotExceededC[i][j][k] == false) {
+ locals->PTEBufferSizeNotExceeded[i][j] = false;
+ }
+ }
+ }
+ }
+ /*Cursor Support Check*/
+ mode_lib->vba.CursorSupport = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ for (j = 0; j < 2; j++) {
+ if (mode_lib->vba.CursorWidth[k][j] > 0.0) {
+ if (dml_floor(
+ dml_floor(
+ mode_lib->vba.CursorBufferSize
+ - mode_lib->vba.CursorChunkSize,
+ mode_lib->vba.CursorChunkSize) * 1024.0
+ / (mode_lib->vba.CursorWidth[k][j]
+ * mode_lib->vba.CursorBPP[k][j]
+ / 8.0),
+ 1.0)
+ * (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k])
+ / mode_lib->vba.VRatio[k] < mode_lib->vba.UrgentLatencyPixelDataOnly
+ || (mode_lib->vba.CursorBPP[k][j] == 64.0
+ && mode_lib->vba.Cursor64BppSupport == false)) {
+ mode_lib->vba.CursorSupport = false;
+ }
+ }
+ }
+ }
+ /*Valid Pitch Check*/
+
+ mode_lib->vba.PitchSupport = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ locals->AlignedYPitch[k] = dml_ceil(
+ dml_max(mode_lib->vba.PitchY[k], mode_lib->vba.ViewportWidth[k]),
+ locals->MacroTileWidthY[k]);
+ if (locals->AlignedYPitch[k] > mode_lib->vba.PitchY[k]) {
+ mode_lib->vba.PitchSupport = false;
+ }
+ if (mode_lib->vba.DCCEnable[k] == true) {
+ locals->AlignedDCCMetaPitch[k] = dml_ceil(
+ dml_max(
+ mode_lib->vba.DCCMetaPitchY[k],
+ mode_lib->vba.ViewportWidth[k]),
+ 64.0 * locals->Read256BlockWidthY[k]);
+ } else {
+ locals->AlignedDCCMetaPitch[k] = mode_lib->vba.DCCMetaPitchY[k];
+ }
+ if (locals->AlignedDCCMetaPitch[k] > mode_lib->vba.DCCMetaPitchY[k]) {
+ mode_lib->vba.PitchSupport = false;
+ }
+ if (mode_lib->vba.SourcePixelFormat[k] != dm_444_64
+ && mode_lib->vba.SourcePixelFormat[k] != dm_444_32
+ && mode_lib->vba.SourcePixelFormat[k] != dm_444_16
+ && mode_lib->vba.SourcePixelFormat[k] != dm_mono_16
+ && mode_lib->vba.SourcePixelFormat[k] != dm_mono_8) {
+ locals->AlignedCPitch[k] = dml_ceil(
+ dml_max(
+ mode_lib->vba.PitchC[k],
+ mode_lib->vba.ViewportWidth[k] / 2.0),
+ locals->MacroTileWidthC[k]);
+ } else {
+ locals->AlignedCPitch[k] = mode_lib->vba.PitchC[k];
+ }
+ if (locals->AlignedCPitch[k] > mode_lib->vba.PitchC[k]) {
+ mode_lib->vba.PitchSupport = false;
+ }
+ }
+ /*Mode Support, Voltage State and SOC Configuration*/
+
+ for (i = mode_lib->vba.soc.num_states; i >= 0; i--) {
+ for (j = 0; j < 2; j++) {
+ enum dm_validation_status status = DML_VALIDATION_OK;
+
+ if (mode_lib->vba.ScaleRatioAndTapsSupport != true) {
+ status = DML_FAIL_SCALE_RATIO_TAP;
+ } else if (mode_lib->vba.SourceFormatPixelAndScanSupport != true) {
+ status = DML_FAIL_SOURCE_PIXEL_FORMAT;
+ } else if (locals->ViewportSizeSupport[i][0] != true) {
+ status = DML_FAIL_VIEWPORT_SIZE;
+ } else if (locals->DIOSupport[i] != true) {
+ status = DML_FAIL_DIO_SUPPORT;
+ } else if (locals->NotEnoughDSCUnits[i] != false) {
+ status = DML_FAIL_NOT_ENOUGH_DSC;
+ } else if (locals->DSCCLKRequiredMoreThanSupported[i] != false) {
+ status = DML_FAIL_DSC_CLK_REQUIRED;
+ } else if (locals->UrgentLatencySupport[i][j] != true) {
+ status = DML_FAIL_URGENT_LATENCY;
+ } else if (locals->ROBSupport[i][0] != true) {
+ status = DML_FAIL_REORDERING_BUFFER;
+ } else if (locals->DISPCLK_DPPCLK_Support[i][j] != true) {
+ status = DML_FAIL_DISPCLK_DPPCLK;
+ } else if (locals->TotalAvailablePipesSupport[i][j] != true) {
+ status = DML_FAIL_TOTAL_AVAILABLE_PIPES;
+ } else if (mode_lib->vba.NumberOfOTGSupport != true) {
+ status = DML_FAIL_NUM_OTG;
+ } else if (mode_lib->vba.WritebackModeSupport != true) {
+ status = DML_FAIL_WRITEBACK_MODE;
+ } else if (mode_lib->vba.WritebackLatencySupport != true) {
+ status = DML_FAIL_WRITEBACK_LATENCY;
+ } else if (mode_lib->vba.WritebackScaleRatioAndTapsSupport != true) {
+ status = DML_FAIL_WRITEBACK_SCALE_RATIO_TAP;
+ } else if (mode_lib->vba.CursorSupport != true) {
+ status = DML_FAIL_CURSOR_SUPPORT;
+ } else if (mode_lib->vba.PitchSupport != true) {
+ status = DML_FAIL_PITCH_SUPPORT;
+ } else if (locals->PrefetchSupported[i][j] != true) {
+ status = DML_FAIL_PREFETCH_SUPPORT;
+ } else if (locals->TotalVerticalActiveBandwidthSupport[i][0] != true) {
+ status = DML_FAIL_TOTAL_V_ACTIVE_BW;
+ } else if (locals->VRatioInPrefetchSupported[i][j] != true) {
+ status = DML_FAIL_V_RATIO_PREFETCH;
+ } else if (locals->PTEBufferSizeNotExceeded[i][j] != true) {
+ status = DML_FAIL_PTE_BUFFER_SIZE;
+ } else if (mode_lib->vba.NonsupportedDSCInputBPC != false) {
+ status = DML_FAIL_DSC_INPUT_BPC;
+ }
+
+ if (status == DML_VALIDATION_OK) {
+ locals->ModeSupport[i][j] = true;
+ } else {
+ locals->ModeSupport[i][j] = false;
+ }
+ locals->ValidationStatus[i] = status;
+ }
+ }
+ {
+ unsigned int MaximumMPCCombine = 0;
+ mode_lib->vba.VoltageLevel = mode_lib->vba.soc.num_states + 1;
+ for (i = mode_lib->vba.VoltageOverrideLevel; i <= mode_lib->vba.soc.num_states; i++) {
+ if (locals->ModeSupport[i][0] == true || locals->ModeSupport[i][1] == true) {
+ mode_lib->vba.VoltageLevel = i;
+ if (locals->ModeSupport[i][1] == true && (locals->ModeSupport[i][0] == false
+ || mode_lib->vba.WhenToDoMPCCombine == dm_mpc_always_when_possible)) {
+ MaximumMPCCombine = 1;
+ } else {
+ MaximumMPCCombine = 0;
+ }
+ break;
+ }
+ }
+ mode_lib->vba.ImmediateFlipSupport =
+ locals->ImmediateFlipSupportedForState[mode_lib->vba.VoltageLevel][MaximumMPCCombine];
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.DPPPerPlane[k] = locals->NoOfDPP[mode_lib->vba.VoltageLevel][MaximumMPCCombine][k];
+ locals->DPPCLK[k] = locals->RequiredDPPCLK[mode_lib->vba.VoltageLevel][MaximumMPCCombine][k];
+ }
+ mode_lib->vba.DISPCLK = locals->RequiredDISPCLK[mode_lib->vba.VoltageLevel][MaximumMPCCombine];
+ mode_lib->vba.maxMpcComb = MaximumMPCCombine;
+ }
+ mode_lib->vba.DCFCLK = mode_lib->vba.DCFCLKPerState[mode_lib->vba.VoltageLevel];
+ mode_lib->vba.DRAMSpeed = mode_lib->vba.DRAMSpeedPerState[mode_lib->vba.VoltageLevel];
+ mode_lib->vba.FabricClock = mode_lib->vba.FabricClockPerState[mode_lib->vba.VoltageLevel];
+ mode_lib->vba.SOCCLK = mode_lib->vba.SOCCLKPerState[mode_lib->vba.VoltageLevel];
+ mode_lib->vba.ReturnBW = locals->ReturnBWPerState[mode_lib->vba.VoltageLevel][0];
+ mode_lib->vba.FabricAndDRAMBandwidth = locals->FabricAndDRAMBandwidthPerState[mode_lib->vba.VoltageLevel];
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.BlendingAndTiming[k] == k) {
+ mode_lib->vba.ODMCombineEnabled[k] =
+ locals->ODMCombineEnablePerState[mode_lib->vba.VoltageLevel][k];
+ } else {
+ mode_lib->vba.ODMCombineEnabled[k] = 0;
+ }
+ mode_lib->vba.DSCEnabled[k] =
+ locals->RequiresDSC[mode_lib->vba.VoltageLevel][k];
+ mode_lib->vba.OutputBpp[k] =
+ locals->OutputBppPerState[mode_lib->vba.VoltageLevel][k];
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.h b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.h
new file mode 100644
index 000000000000..a989d3ca1e99
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DCN20V2_DISPLAY_MODE_VBA_H_
+#define _DCN20V2_DISPLAY_MODE_VBA_H_
+
+void dml20v2_recalculate(struct display_mode_lib *mode_lib);
+void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
index 878bf4782ce6..ca807846032f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
@@ -107,10 +107,10 @@ static unsigned int get_bytes_per_element(enum source_format_class source_format
static bool is_dual_plane(enum source_format_class source_format)
{
- bool ret_val = 0;
+ bool ret_val = false;
if ((source_format == dm_420_8) || (source_format == dm_420_10))
- ret_val = 1;
+ ret_val = true;
return ret_val;
}
@@ -207,7 +207,7 @@ static void extract_rq_regs(struct display_mode_lib *mode_lib,
rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height);
rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height);
- // FIXME: take the max between luma, chroma chunk size?
+ // TODO: take the max between luma, chroma chunk size?
// okay for now, as we are setting chunk_bytes to 8kb anyways
if (rq_param.sizing.rq_l.chunk_bytes >= 32 * 1024) { //32kb
rq_regs->drq_expansion_mode = 0;
@@ -240,8 +240,8 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
unsigned int swath_bytes_c = 0;
unsigned int full_swath_bytes_packed_l = 0;
unsigned int full_swath_bytes_packed_c = 0;
- bool req128_l = 0;
- bool req128_c = 0;
+ bool req128_l = false;
+ bool req128_c = false;
bool surf_linear = (pipe_src_param.sw_mode == dm_sw_linear);
bool surf_vert = (pipe_src_param.source_scan == dm_vert);
unsigned int log2_swath_height_l = 0;
@@ -264,13 +264,13 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
total_swath_bytes = 2 * full_swath_bytes_packed_l + 2 * full_swath_bytes_packed_c;
if (total_swath_bytes <= detile_buf_size_in_bytes) { //full 256b request
- req128_l = 0;
- req128_c = 0;
+ req128_l = false;
+ req128_c = false;
swath_bytes_l = full_swath_bytes_packed_l;
swath_bytes_c = full_swath_bytes_packed_c;
} else { //128b request (for luma only for yuv420 8bpc)
- req128_l = 1;
- req128_c = 0;
+ req128_l = true;
+ req128_c = false;
swath_bytes_l = full_swath_bytes_packed_l / 2;
swath_bytes_c = full_swath_bytes_packed_c;
}
@@ -280,9 +280,9 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
total_swath_bytes = 2 * full_swath_bytes_packed_l;
if (total_swath_bytes <= detile_buf_size_in_bytes)
- req128_l = 0;
+ req128_l = false;
else
- req128_l = 1;
+ req128_l = true;
swath_bytes_l = total_swath_bytes;
swath_bytes_c = 0;
@@ -670,14 +670,14 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib,
const display_pipe_source_params_st pipe_src_param,
bool is_chroma)
{
- bool mode_422 = 0;
+ bool mode_422 = false;
unsigned int vp_width = 0;
unsigned int vp_height = 0;
unsigned int data_pitch = 0;
unsigned int meta_pitch = 0;
unsigned int ppe = mode_422 ? 2 : 1;
- // FIXME check if ppe apply for both luma and chroma in 422 case
+ // TODO check if ppe apply for both luma and chroma in 422 case
if (is_chroma) {
vp_width = pipe_src_param.viewport_width_c / ppe;
vp_height = pipe_src_param.viewport_height_c;
@@ -929,8 +929,7 @@ static void dml20_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
min_dst_y_ttu_vblank = min_ttu_vblank * pclk_freq_in_mhz / (double) htotal;
dlg_vblank_start = interlaced ? (vblank_start / 2) : vblank_start;
- disp_dlg_regs->min_dst_y_next_start = (unsigned int) (((double) dlg_vblank_start
- + min_dst_y_ttu_vblank) * dml_pow(2, 2));
+ disp_dlg_regs->min_dst_y_next_start = (unsigned int) ((double) dlg_vblank_start * dml_pow(2, 2));
ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int) dml_pow(2, 18));
dml_print("DML_DLG: %s: min_dcfclk_mhz = %3.2f\n",
@@ -959,7 +958,7 @@ static void dml20_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
// Source
// dcc_en = src.dcc;
dual_plane = is_dual_plane((enum source_format_class)(src->source_format));
- mode_422 = 0; // FIXME
+ mode_422 = false; // TODO
access_dir = (src->source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
// bytes_per_element_l = get_bytes_per_element(source_format_class(src.source_format), 0);
// bytes_per_element_c = get_bytes_per_element(source_format_class(src.source_format), 1);
@@ -1655,7 +1654,7 @@ static void calculate_ttu_cursor(struct display_mode_lib *mode_lib,
cur_width_ub = dml_ceil((double) cur_src_width / (double) cur_req_width, 1)
* (double) cur_req_width;
cur_req_per_width = cur_width_ub / (double) cur_req_width;
- hactive_cur = (double) cur_src_width / hscl_ratio; // FIXME: oswin to think about what to do for cursor
+ hactive_cur = (double) cur_src_width / hscl_ratio; // TODO: oswin to think about what to do for cursor
if (vratio_pre_l <= 1.0) {
*refcyc_per_req_delivery_pre_cur = hactive_cur * ref_freq_to_pix_freq
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
new file mode 100644
index 000000000000..287b7a0ad108
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
@@ -0,0 +1,1701 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "../display_mode_lib.h"
+#include "../display_mode_vba.h"
+#include "display_rq_dlg_calc_20v2.h"
+
+// Function: dml20v2_rq_dlg_get_rq_params
+// Calculate requestor related parameters that register definition agnostic
+// (i.e. this layer does try to separate real values from register definition)
+// Input:
+// pipe_src_param - pipe source configuration (e.g. vp, pitch, etc.)
+// Output:
+// rq_param - values that can be used to setup RQ (e.g. swath_height, plane1_addr, etc.)
+//
+static void dml20v2_rq_dlg_get_rq_params(
+ struct display_mode_lib *mode_lib,
+ display_rq_params_st * rq_param,
+ const display_pipe_source_params_st pipe_src_param);
+
+// Function: dml20v2_rq_dlg_get_dlg_params
+// Calculate deadline related parameters
+//
+static void dml20v2_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
+ const display_e2e_pipe_params_st *e2e_pipe_param,
+ const unsigned int num_pipes,
+ const unsigned int pipe_idx,
+ display_dlg_regs_st *disp_dlg_regs,
+ display_ttu_regs_st *disp_ttu_regs,
+ const display_rq_dlg_params_st rq_dlg_param,
+ const display_dlg_sys_params_st dlg_sys_param,
+ const bool cstate_en,
+ const bool pstate_en);
+/*
+ * NOTE:
+ * This file is gcc-parseable HW gospel, coming straight from HW engineers.
+ *
+ * It doesn't adhere to Linux kernel style and sometimes will do things in odd
+ * ways. Unless there is something clearly wrong with it the code should
+ * remain as-is as it provides us with a guarantee from HW that it is correct.
+ */
+
+static void calculate_ttu_cursor(struct display_mode_lib *mode_lib,
+ double *refcyc_per_req_delivery_pre_cur,
+ double *refcyc_per_req_delivery_cur,
+ double refclk_freq_in_mhz,
+ double ref_freq_to_pix_freq,
+ double hscale_pixel_rate_l,
+ double hscl_ratio,
+ double vratio_pre_l,
+ double vratio_l,
+ unsigned int cur_width,
+ enum cursor_bpp cur_bpp);
+
+#include "../dml_inline_defs.h"
+
+static unsigned int get_bytes_per_element(enum source_format_class source_format, bool is_chroma)
+{
+ unsigned int ret_val = 0;
+
+ if (source_format == dm_444_16) {
+ if (!is_chroma)
+ ret_val = 2;
+ } else if (source_format == dm_444_32) {
+ if (!is_chroma)
+ ret_val = 4;
+ } else if (source_format == dm_444_64) {
+ if (!is_chroma)
+ ret_val = 8;
+ } else if (source_format == dm_420_8) {
+ if (is_chroma)
+ ret_val = 2;
+ else
+ ret_val = 1;
+ } else if (source_format == dm_420_10) {
+ if (is_chroma)
+ ret_val = 4;
+ else
+ ret_val = 2;
+ } else if (source_format == dm_444_8) {
+ ret_val = 1;
+ }
+ return ret_val;
+}
+
+static bool is_dual_plane(enum source_format_class source_format)
+{
+ bool ret_val = false;
+
+ if ((source_format == dm_420_8) || (source_format == dm_420_10))
+ ret_val = true;
+
+ return ret_val;
+}
+
+static double get_refcyc_per_delivery(struct display_mode_lib *mode_lib,
+ double refclk_freq_in_mhz,
+ double pclk_freq_in_mhz,
+ bool odm_combine,
+ unsigned int recout_width,
+ unsigned int hactive,
+ double vratio,
+ double hscale_pixel_rate,
+ unsigned int delivery_width,
+ unsigned int req_per_swath_ub)
+{
+ double refcyc_per_delivery = 0.0;
+
+ if (vratio <= 1.0) {
+ if (odm_combine)
+ refcyc_per_delivery = (double) refclk_freq_in_mhz
+ * dml_min((double) recout_width, (double) hactive / 2.0)
+ / pclk_freq_in_mhz / (double) req_per_swath_ub;
+ else
+ refcyc_per_delivery = (double) refclk_freq_in_mhz * (double) recout_width
+ / pclk_freq_in_mhz / (double) req_per_swath_ub;
+ } else {
+ refcyc_per_delivery = (double) refclk_freq_in_mhz * (double) delivery_width
+ / (double) hscale_pixel_rate / (double) req_per_swath_ub;
+ }
+
+ dml_print("DML_DLG: %s: refclk_freq_in_mhz = %3.2f\n", __func__, refclk_freq_in_mhz);
+ dml_print("DML_DLG: %s: pclk_freq_in_mhz = %3.2f\n", __func__, pclk_freq_in_mhz);
+ dml_print("DML_DLG: %s: recout_width = %d\n", __func__, recout_width);
+ dml_print("DML_DLG: %s: vratio = %3.2f\n", __func__, vratio);
+ dml_print("DML_DLG: %s: req_per_swath_ub = %d\n", __func__, req_per_swath_ub);
+ dml_print("DML_DLG: %s: refcyc_per_delivery= %3.2f\n", __func__, refcyc_per_delivery);
+
+ return refcyc_per_delivery;
+
+}
+
+static unsigned int get_blk_size_bytes(const enum source_macro_tile_size tile_size)
+{
+ if (tile_size == dm_256k_tile)
+ return (256 * 1024);
+ else if (tile_size == dm_64k_tile)
+ return (64 * 1024);
+ else
+ return (4 * 1024);
+}
+
+static void extract_rq_sizing_regs(struct display_mode_lib *mode_lib,
+ display_data_rq_regs_st *rq_regs,
+ const display_data_rq_sizing_params_st rq_sizing)
+{
+ dml_print("DML_DLG: %s: rq_sizing param\n", __func__);
+ print__data_rq_sizing_params_st(mode_lib, rq_sizing);
+
+ rq_regs->chunk_size = dml_log2(rq_sizing.chunk_bytes) - 10;
+
+ if (rq_sizing.min_chunk_bytes == 0)
+ rq_regs->min_chunk_size = 0;
+ else
+ rq_regs->min_chunk_size = dml_log2(rq_sizing.min_chunk_bytes) - 8 + 1;
+
+ rq_regs->meta_chunk_size = dml_log2(rq_sizing.meta_chunk_bytes) - 10;
+ if (rq_sizing.min_meta_chunk_bytes == 0)
+ rq_regs->min_meta_chunk_size = 0;
+ else
+ rq_regs->min_meta_chunk_size = dml_log2(rq_sizing.min_meta_chunk_bytes) - 6 + 1;
+
+ rq_regs->dpte_group_size = dml_log2(rq_sizing.dpte_group_bytes) - 6;
+ rq_regs->mpte_group_size = dml_log2(rq_sizing.mpte_group_bytes) - 6;
+}
+
+static void extract_rq_regs(struct display_mode_lib *mode_lib,
+ display_rq_regs_st *rq_regs,
+ const display_rq_params_st rq_param)
+{
+ unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024;
+ unsigned int detile_buf_plane1_addr = 0;
+
+ extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), rq_param.sizing.rq_l);
+
+ rq_regs->rq_regs_l.pte_row_height_linear = dml_floor(dml_log2(rq_param.dlg.rq_l.dpte_row_height),
+ 1) - 3;
+
+ if (rq_param.yuv420) {
+ extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), rq_param.sizing.rq_c);
+ rq_regs->rq_regs_c.pte_row_height_linear = dml_floor(dml_log2(rq_param.dlg.rq_c.dpte_row_height),
+ 1) - 3;
+ }
+
+ rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height);
+ rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height);
+
+ // TODO: take the max between luma, chroma chunk size?
+ // okay for now, as we are setting chunk_bytes to 8kb anyways
+ if (rq_param.sizing.rq_l.chunk_bytes >= 32 * 1024) { //32kb
+ rq_regs->drq_expansion_mode = 0;
+ } else {
+ rq_regs->drq_expansion_mode = 2;
+ }
+ rq_regs->prq_expansion_mode = 1;
+ rq_regs->mrq_expansion_mode = 1;
+ rq_regs->crq_expansion_mode = 1;
+
+ if (rq_param.yuv420) {
+ if ((double) rq_param.misc.rq_l.stored_swath_bytes
+ / (double) rq_param.misc.rq_c.stored_swath_bytes <= 1.5) {
+ detile_buf_plane1_addr = (detile_buf_size_in_bytes / 2.0 / 64.0); // half to chroma
+ } else {
+ detile_buf_plane1_addr = dml_round_to_multiple((unsigned int) ((2.0 * detile_buf_size_in_bytes) / 3.0),
+ 256,
+ 0) / 64.0; // 2/3 to chroma
+ }
+ }
+ rq_regs->plane1_base_address = detile_buf_plane1_addr;
+}
+
+static void handle_det_buf_split(struct display_mode_lib *mode_lib,
+ display_rq_params_st *rq_param,
+ const display_pipe_source_params_st pipe_src_param)
+{
+ unsigned int total_swath_bytes = 0;
+ unsigned int swath_bytes_l = 0;
+ unsigned int swath_bytes_c = 0;
+ unsigned int full_swath_bytes_packed_l = 0;
+ unsigned int full_swath_bytes_packed_c = 0;
+ bool req128_l = false;
+ bool req128_c = false;
+ bool surf_linear = (pipe_src_param.sw_mode == dm_sw_linear);
+ bool surf_vert = (pipe_src_param.source_scan == dm_vert);
+ unsigned int log2_swath_height_l = 0;
+ unsigned int log2_swath_height_c = 0;
+ unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024;
+
+ full_swath_bytes_packed_l = rq_param->misc.rq_l.full_swath_bytes;
+ full_swath_bytes_packed_c = rq_param->misc.rq_c.full_swath_bytes;
+
+ if (rq_param->yuv420_10bpc) {
+ full_swath_bytes_packed_l = dml_round_to_multiple(rq_param->misc.rq_l.full_swath_bytes * 2 / 3,
+ 256,
+ 1) + 256;
+ full_swath_bytes_packed_c = dml_round_to_multiple(rq_param->misc.rq_c.full_swath_bytes * 2 / 3,
+ 256,
+ 1) + 256;
+ }
+
+ if (rq_param->yuv420) {
+ total_swath_bytes = 2 * full_swath_bytes_packed_l + 2 * full_swath_bytes_packed_c;
+
+ if (total_swath_bytes <= detile_buf_size_in_bytes) { //full 256b request
+ req128_l = false;
+ req128_c = false;
+ swath_bytes_l = full_swath_bytes_packed_l;
+ swath_bytes_c = full_swath_bytes_packed_c;
+ } else { //128b request (for luma only for yuv420 8bpc)
+ req128_l = true;
+ req128_c = false;
+ swath_bytes_l = full_swath_bytes_packed_l / 2;
+ swath_bytes_c = full_swath_bytes_packed_c;
+ }
+ // Note: assumption, the config that pass in will fit into
+ // the detiled buffer.
+ } else {
+ total_swath_bytes = 2 * full_swath_bytes_packed_l;
+
+ if (total_swath_bytes <= detile_buf_size_in_bytes)
+ req128_l = false;
+ else
+ req128_l = true;
+
+ swath_bytes_l = total_swath_bytes;
+ swath_bytes_c = 0;
+ }
+ rq_param->misc.rq_l.stored_swath_bytes = swath_bytes_l;
+ rq_param->misc.rq_c.stored_swath_bytes = swath_bytes_c;
+
+ if (surf_linear) {
+ log2_swath_height_l = 0;
+ log2_swath_height_c = 0;
+ } else if (!surf_vert) {
+ log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_height) - req128_l;
+ log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_height) - req128_c;
+ } else {
+ log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_width) - req128_l;
+ log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_width) - req128_c;
+ }
+ rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
+ rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
+
+ dml_print("DML_DLG: %s: req128_l = %0d\n", __func__, req128_l);
+ dml_print("DML_DLG: %s: req128_c = %0d\n", __func__, req128_c);
+ dml_print("DML_DLG: %s: full_swath_bytes_packed_l = %0d\n",
+ __func__,
+ full_swath_bytes_packed_l);
+ dml_print("DML_DLG: %s: full_swath_bytes_packed_c = %0d\n",
+ __func__,
+ full_swath_bytes_packed_c);
+}
+
+static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib,
+ display_data_rq_dlg_params_st *rq_dlg_param,
+ display_data_rq_misc_params_st *rq_misc_param,
+ display_data_rq_sizing_params_st *rq_sizing_param,
+ unsigned int vp_width,
+ unsigned int vp_height,
+ unsigned int data_pitch,
+ unsigned int meta_pitch,
+ unsigned int source_format,
+ unsigned int tiling,
+ unsigned int macro_tile_size,
+ unsigned int source_scan,
+ unsigned int is_chroma)
+{
+ bool surf_linear = (tiling == dm_sw_linear);
+ bool surf_vert = (source_scan == dm_vert);
+
+ unsigned int bytes_per_element;
+ unsigned int bytes_per_element_y = get_bytes_per_element((enum source_format_class)(source_format),
+ false);
+ unsigned int bytes_per_element_c = get_bytes_per_element((enum source_format_class)(source_format),
+ true);
+
+ unsigned int blk256_width = 0;
+ unsigned int blk256_height = 0;
+
+ unsigned int blk256_width_y = 0;
+ unsigned int blk256_height_y = 0;
+ unsigned int blk256_width_c = 0;
+ unsigned int blk256_height_c = 0;
+ unsigned int log2_bytes_per_element;
+ unsigned int log2_blk256_width;
+ unsigned int log2_blk256_height;
+ unsigned int blk_bytes;
+ unsigned int log2_blk_bytes;
+ unsigned int log2_blk_height;
+ unsigned int log2_blk_width;
+ unsigned int log2_meta_req_bytes;
+ unsigned int log2_meta_req_height;
+ unsigned int log2_meta_req_width;
+ unsigned int meta_req_width;
+ unsigned int meta_req_height;
+ unsigned int log2_meta_row_height;
+ unsigned int meta_row_width_ub;
+ unsigned int log2_meta_chunk_bytes;
+ unsigned int log2_meta_chunk_height;
+
+ //full sized meta chunk width in unit of data elements
+ unsigned int log2_meta_chunk_width;
+ unsigned int log2_min_meta_chunk_bytes;
+ unsigned int min_meta_chunk_width;
+ unsigned int meta_chunk_width;
+ unsigned int meta_chunk_per_row_int;
+ unsigned int meta_row_remainder;
+ unsigned int meta_chunk_threshold;
+ unsigned int meta_blk_bytes;
+ unsigned int meta_blk_height;
+ unsigned int meta_blk_width;
+ unsigned int meta_surface_bytes;
+ unsigned int vmpg_bytes;
+ unsigned int meta_pte_req_per_frame_ub;
+ unsigned int meta_pte_bytes_per_frame_ub;
+ const unsigned int log2_vmpg_bytes = dml_log2(mode_lib->soc.vmm_page_size_bytes);
+ const unsigned int dpte_buf_in_pte_reqs = mode_lib->ip.dpte_buffer_size_in_pte_reqs_luma;
+ const unsigned int pde_proc_buffer_size_64k_reqs =
+ mode_lib->ip.pde_proc_buffer_size_64k_reqs;
+
+ unsigned int log2_vmpg_height = 0;
+ unsigned int log2_vmpg_width = 0;
+ unsigned int log2_dpte_req_height_ptes = 0;
+ unsigned int log2_dpte_req_height = 0;
+ unsigned int log2_dpte_req_width = 0;
+ unsigned int log2_dpte_row_height_linear = 0;
+ unsigned int log2_dpte_row_height = 0;
+ unsigned int log2_dpte_group_width = 0;
+ unsigned int dpte_row_width_ub = 0;
+ unsigned int dpte_req_height = 0;
+ unsigned int dpte_req_width = 0;
+ unsigned int dpte_group_width = 0;
+ unsigned int log2_dpte_group_bytes = 0;
+ unsigned int log2_dpte_group_length = 0;
+ unsigned int pde_buf_entries;
+ bool yuv420 = (source_format == dm_420_8 || source_format == dm_420_10);
+
+ Calculate256BBlockSizes((enum source_format_class)(source_format),
+ (enum dm_swizzle_mode)(tiling),
+ bytes_per_element_y,
+ bytes_per_element_c,
+ &blk256_height_y,
+ &blk256_height_c,
+ &blk256_width_y,
+ &blk256_width_c);
+
+ if (!is_chroma) {
+ blk256_width = blk256_width_y;
+ blk256_height = blk256_height_y;
+ bytes_per_element = bytes_per_element_y;
+ } else {
+ blk256_width = blk256_width_c;
+ blk256_height = blk256_height_c;
+ bytes_per_element = bytes_per_element_c;
+ }
+
+ log2_bytes_per_element = dml_log2(bytes_per_element);
+
+ dml_print("DML_DLG: %s: surf_linear = %d\n", __func__, surf_linear);
+ dml_print("DML_DLG: %s: surf_vert = %d\n", __func__, surf_vert);
+ dml_print("DML_DLG: %s: blk256_width = %d\n", __func__, blk256_width);
+ dml_print("DML_DLG: %s: blk256_height = %d\n", __func__, blk256_height);
+
+ log2_blk256_width = dml_log2((double) blk256_width);
+ log2_blk256_height = dml_log2((double) blk256_height);
+ blk_bytes = surf_linear ?
+ 256 : get_blk_size_bytes((enum source_macro_tile_size) macro_tile_size);
+ log2_blk_bytes = dml_log2((double) blk_bytes);
+ log2_blk_height = 0;
+ log2_blk_width = 0;
+
+ // remember log rule
+ // "+" in log is multiply
+ // "-" in log is divide
+ // "/2" is like square root
+ // blk is vertical biased
+ if (tiling != dm_sw_linear)
+ log2_blk_height = log2_blk256_height
+ + dml_ceil((double) (log2_blk_bytes - 8) / 2.0, 1);
+ else
+ log2_blk_height = 0; // blk height of 1
+
+ log2_blk_width = log2_blk_bytes - log2_bytes_per_element - log2_blk_height;
+
+ if (!surf_vert) {
+ rq_dlg_param->swath_width_ub = dml_round_to_multiple(vp_width - 1, blk256_width, 1)
+ + blk256_width;
+ rq_dlg_param->req_per_swath_ub = rq_dlg_param->swath_width_ub >> log2_blk256_width;
+ } else {
+ rq_dlg_param->swath_width_ub = dml_round_to_multiple(vp_height - 1, blk256_height, 1)
+ + blk256_height;
+ rq_dlg_param->req_per_swath_ub = rq_dlg_param->swath_width_ub >> log2_blk256_height;
+ }
+
+ if (!surf_vert)
+ rq_misc_param->full_swath_bytes = rq_dlg_param->swath_width_ub * blk256_height
+ * bytes_per_element;
+ else
+ rq_misc_param->full_swath_bytes = rq_dlg_param->swath_width_ub * blk256_width
+ * bytes_per_element;
+
+ rq_misc_param->blk256_height = blk256_height;
+ rq_misc_param->blk256_width = blk256_width;
+
+ // -------
+ // meta
+ // -------
+ log2_meta_req_bytes = 6; // meta request is 64b and is 8x8byte meta element
+
+ // each 64b meta request for dcn is 8x8 meta elements and
+ // a meta element covers one 256b block of the the data surface.
+ log2_meta_req_height = log2_blk256_height + 3; // meta req is 8x8 byte, each byte represent 1 blk256
+ log2_meta_req_width = log2_meta_req_bytes + 8 - log2_bytes_per_element
+ - log2_meta_req_height;
+ meta_req_width = 1 << log2_meta_req_width;
+ meta_req_height = 1 << log2_meta_req_height;
+ log2_meta_row_height = 0;
+ meta_row_width_ub = 0;
+
+ // the dimensions of a meta row are meta_row_width x meta_row_height in elements.
+ // calculate upper bound of the meta_row_width
+ if (!surf_vert) {
+ log2_meta_row_height = log2_meta_req_height;
+ meta_row_width_ub = dml_round_to_multiple(vp_width - 1, meta_req_width, 1)
+ + meta_req_width;
+ rq_dlg_param->meta_req_per_row_ub = meta_row_width_ub / meta_req_width;
+ } else {
+ log2_meta_row_height = log2_meta_req_width;
+ meta_row_width_ub = dml_round_to_multiple(vp_height - 1, meta_req_height, 1)
+ + meta_req_height;
+ rq_dlg_param->meta_req_per_row_ub = meta_row_width_ub / meta_req_height;
+ }
+ rq_dlg_param->meta_bytes_per_row_ub = rq_dlg_param->meta_req_per_row_ub * 64;
+
+ rq_dlg_param->meta_row_height = 1 << log2_meta_row_height;
+
+ log2_meta_chunk_bytes = dml_log2(rq_sizing_param->meta_chunk_bytes);
+ log2_meta_chunk_height = log2_meta_row_height;
+
+ //full sized meta chunk width in unit of data elements
+ log2_meta_chunk_width = log2_meta_chunk_bytes + 8 - log2_bytes_per_element
+ - log2_meta_chunk_height;
+ log2_min_meta_chunk_bytes = dml_log2(rq_sizing_param->min_meta_chunk_bytes);
+ min_meta_chunk_width = 1
+ << (log2_min_meta_chunk_bytes + 8 - log2_bytes_per_element
+ - log2_meta_chunk_height);
+ meta_chunk_width = 1 << log2_meta_chunk_width;
+ meta_chunk_per_row_int = (unsigned int) (meta_row_width_ub / meta_chunk_width);
+ meta_row_remainder = meta_row_width_ub % meta_chunk_width;
+ meta_chunk_threshold = 0;
+ meta_blk_bytes = 4096;
+ meta_blk_height = blk256_height * 64;
+ meta_blk_width = meta_blk_bytes * 256 / bytes_per_element / meta_blk_height;
+ meta_surface_bytes = meta_pitch
+ * (dml_round_to_multiple(vp_height - 1, meta_blk_height, 1) + meta_blk_height)
+ * bytes_per_element / 256;
+ vmpg_bytes = mode_lib->soc.vmm_page_size_bytes;
+ meta_pte_req_per_frame_ub = (dml_round_to_multiple(meta_surface_bytes - vmpg_bytes,
+ 8 * vmpg_bytes,
+ 1) + 8 * vmpg_bytes) / (8 * vmpg_bytes);
+ meta_pte_bytes_per_frame_ub = meta_pte_req_per_frame_ub * 64; //64B mpte request
+ rq_dlg_param->meta_pte_bytes_per_frame_ub = meta_pte_bytes_per_frame_ub;
+
+ dml_print("DML_DLG: %s: meta_blk_height = %d\n", __func__, meta_blk_height);
+ dml_print("DML_DLG: %s: meta_blk_width = %d\n", __func__, meta_blk_width);
+ dml_print("DML_DLG: %s: meta_surface_bytes = %d\n", __func__, meta_surface_bytes);
+ dml_print("DML_DLG: %s: meta_pte_req_per_frame_ub = %d\n",
+ __func__,
+ meta_pte_req_per_frame_ub);
+ dml_print("DML_DLG: %s: meta_pte_bytes_per_frame_ub = %d\n",
+ __func__,
+ meta_pte_bytes_per_frame_ub);
+
+ if (!surf_vert)
+ meta_chunk_threshold = 2 * min_meta_chunk_width - meta_req_width;
+ else
+ meta_chunk_threshold = 2 * min_meta_chunk_width - meta_req_height;
+
+ if (meta_row_remainder <= meta_chunk_threshold)
+ rq_dlg_param->meta_chunks_per_row_ub = meta_chunk_per_row_int + 1;
+ else
+ rq_dlg_param->meta_chunks_per_row_ub = meta_chunk_per_row_int + 2;
+
+ // ------
+ // dpte
+ // ------
+ if (surf_linear) {
+ log2_vmpg_height = 0; // one line high
+ } else {
+ log2_vmpg_height = (log2_vmpg_bytes - 8) / 2 + log2_blk256_height;
+ }
+ log2_vmpg_width = log2_vmpg_bytes - log2_bytes_per_element - log2_vmpg_height;
+
+ // only 3 possible shapes for dpte request in dimensions of ptes: 8x1, 4x2, 2x4.
+ if (surf_linear) { //one 64B PTE request returns 8 PTEs
+ log2_dpte_req_height_ptes = 0;
+ log2_dpte_req_width = log2_vmpg_width + 3;
+ log2_dpte_req_height = 0;
+ } else if (log2_blk_bytes == 12) { //4KB tile means 4kB page size
+ //one 64B req gives 8x1 PTEs for 4KB tile
+ log2_dpte_req_height_ptes = 0;
+ log2_dpte_req_width = log2_blk_width + 3;
+ log2_dpte_req_height = log2_blk_height + 0;
+ } else if ((log2_blk_bytes >= 16) && (log2_vmpg_bytes == 12)) { // tile block >= 64KB
+ //two 64B reqs of 2x4 PTEs give 16 PTEs to cover 64KB
+ log2_dpte_req_height_ptes = 4;
+ log2_dpte_req_width = log2_blk256_width + 4; // log2_64KB_width
+ log2_dpte_req_height = log2_blk256_height + 4; // log2_64KB_height
+ } else { //64KB page size and must 64KB tile block
+ //one 64B req gives 8x1 PTEs for 64KB tile
+ log2_dpte_req_height_ptes = 0;
+ log2_dpte_req_width = log2_blk_width + 3;
+ log2_dpte_req_height = log2_blk_height + 0;
+ }
+
+ // The dpte request dimensions in data elements is dpte_req_width x dpte_req_height
+ // log2_vmpg_width is how much 1 pte represent, now calculating how much a 64b pte req represent
+ // That depends on the pte shape (i.e. 8x1, 4x2, 2x4)
+ //log2_dpte_req_height = log2_vmpg_height + log2_dpte_req_height_ptes;
+ //log2_dpte_req_width = log2_vmpg_width + log2_dpte_req_width_ptes;
+ dpte_req_height = 1 << log2_dpte_req_height;
+ dpte_req_width = 1 << log2_dpte_req_width;
+
+ // calculate pitch dpte row buffer can hold
+ // round the result down to a power of two.
+ pde_buf_entries = yuv420 ? (pde_proc_buffer_size_64k_reqs >> 1) : pde_proc_buffer_size_64k_reqs;
+ if (surf_linear) {
+ unsigned int dpte_row_height;
+
+ log2_dpte_row_height_linear = dml_floor(dml_log2(dml_min(64 * 1024 * pde_buf_entries
+ / bytes_per_element,
+ dpte_buf_in_pte_reqs
+ * dpte_req_width)
+ / data_pitch),
+ 1);
+
+ ASSERT(log2_dpte_row_height_linear >= 3);
+
+ if (log2_dpte_row_height_linear > 7)
+ log2_dpte_row_height_linear = 7;
+
+ log2_dpte_row_height = log2_dpte_row_height_linear;
+ // For linear, the dpte row is pitch dependent and the pte requests wrap at the pitch boundary.
+ // the dpte_row_width_ub is the upper bound of data_pitch*dpte_row_height in elements with this unique buffering.
+ dpte_row_height = 1 << log2_dpte_row_height;
+ dpte_row_width_ub = dml_round_to_multiple(data_pitch * dpte_row_height - 1,
+ dpte_req_width,
+ 1) + dpte_req_width;
+ rq_dlg_param->dpte_req_per_row_ub = dpte_row_width_ub / dpte_req_width;
+ } else {
+ // the upper bound of the dpte_row_width without dependency on viewport position follows.
+ // for tiled mode, row height is the same as req height and row store up to vp size upper bound
+ if (!surf_vert) {
+ log2_dpte_row_height = log2_dpte_req_height;
+ dpte_row_width_ub = dml_round_to_multiple(vp_width - 1, dpte_req_width, 1)
+ + dpte_req_width;
+ rq_dlg_param->dpte_req_per_row_ub = dpte_row_width_ub / dpte_req_width;
+ } else {
+ log2_dpte_row_height =
+ (log2_blk_width < log2_dpte_req_width) ?
+ log2_blk_width : log2_dpte_req_width;
+ dpte_row_width_ub = dml_round_to_multiple(vp_height - 1, dpte_req_height, 1)
+ + dpte_req_height;
+ rq_dlg_param->dpte_req_per_row_ub = dpte_row_width_ub / dpte_req_height;
+ }
+ }
+ if (log2_blk_bytes >= 16 && log2_vmpg_bytes == 12) // tile block >= 64KB
+ rq_dlg_param->dpte_bytes_per_row_ub = rq_dlg_param->dpte_req_per_row_ub * 128; //2*64B dpte request
+ else
+ rq_dlg_param->dpte_bytes_per_row_ub = rq_dlg_param->dpte_req_per_row_ub * 64; //64B dpte request
+
+ rq_dlg_param->dpte_row_height = 1 << log2_dpte_row_height;
+
+ // the dpte_group_bytes is reduced for the specific case of vertical
+ // access of a tile surface that has dpte request of 8x1 ptes.
+ if (!surf_linear & (log2_dpte_req_height_ptes == 0) & surf_vert) //reduced, in this case, will have page fault within a group
+ rq_sizing_param->dpte_group_bytes = 512;
+ else
+ //full size
+ rq_sizing_param->dpte_group_bytes = 2048;
+
+ //since pte request size is 64byte, the number of data pte requests per full sized group is as follows.
+ log2_dpte_group_bytes = dml_log2(rq_sizing_param->dpte_group_bytes);
+ log2_dpte_group_length = log2_dpte_group_bytes - 6; //length in 64b requests
+
+ // full sized data pte group width in elements
+ if (!surf_vert)
+ log2_dpte_group_width = log2_dpte_group_length + log2_dpte_req_width;
+ else
+ log2_dpte_group_width = log2_dpte_group_length + log2_dpte_req_height;
+
+ //But if the tile block >=64KB and the page size is 4KB, then each dPTE request is 2*64B
+ if ((log2_blk_bytes >= 16) && (log2_vmpg_bytes == 12)) // tile block >= 64KB
+ log2_dpte_group_width = log2_dpte_group_width - 1;
+
+ dpte_group_width = 1 << log2_dpte_group_width;
+
+ // since dpte groups are only aligned to dpte_req_width and not dpte_group_width,
+ // the upper bound for the dpte groups per row is as follows.
+ rq_dlg_param->dpte_groups_per_row_ub = dml_ceil((double) dpte_row_width_ub / dpte_group_width,
+ 1);
+}
+
+static void get_surf_rq_param(struct display_mode_lib *mode_lib,
+ display_data_rq_sizing_params_st *rq_sizing_param,
+ display_data_rq_dlg_params_st *rq_dlg_param,
+ display_data_rq_misc_params_st *rq_misc_param,
+ const display_pipe_source_params_st pipe_src_param,
+ bool is_chroma)
+{
+ bool mode_422 = false;
+ unsigned int vp_width = 0;
+ unsigned int vp_height = 0;
+ unsigned int data_pitch = 0;
+ unsigned int meta_pitch = 0;
+ unsigned int ppe = mode_422 ? 2 : 1;
+
+ // TODO check if ppe apply for both luma and chroma in 422 case
+ if (is_chroma) {
+ vp_width = pipe_src_param.viewport_width_c / ppe;
+ vp_height = pipe_src_param.viewport_height_c;
+ data_pitch = pipe_src_param.data_pitch_c;
+ meta_pitch = pipe_src_param.meta_pitch_c;
+ } else {
+ vp_width = pipe_src_param.viewport_width / ppe;
+ vp_height = pipe_src_param.viewport_height;
+ data_pitch = pipe_src_param.data_pitch;
+ meta_pitch = pipe_src_param.meta_pitch;
+ }
+
+ rq_sizing_param->chunk_bytes = 8192;
+
+ if (rq_sizing_param->chunk_bytes == 64 * 1024)
+ rq_sizing_param->min_chunk_bytes = 0;
+ else
+ rq_sizing_param->min_chunk_bytes = 1024;
+
+ rq_sizing_param->meta_chunk_bytes = 2048;
+ rq_sizing_param->min_meta_chunk_bytes = 256;
+
+ rq_sizing_param->mpte_group_bytes = 2048;
+
+ get_meta_and_pte_attr(mode_lib,
+ rq_dlg_param,
+ rq_misc_param,
+ rq_sizing_param,
+ vp_width,
+ vp_height,
+ data_pitch,
+ meta_pitch,
+ pipe_src_param.source_format,
+ pipe_src_param.sw_mode,
+ pipe_src_param.macro_tile_size,
+ pipe_src_param.source_scan,
+ is_chroma);
+}
+
+static void dml20v2_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib,
+ display_rq_params_st *rq_param,
+ const display_pipe_source_params_st pipe_src_param)
+{
+ // get param for luma surface
+ rq_param->yuv420 = pipe_src_param.source_format == dm_420_8
+ || pipe_src_param.source_format == dm_420_10;
+ rq_param->yuv420_10bpc = pipe_src_param.source_format == dm_420_10;
+
+ get_surf_rq_param(mode_lib,
+ &(rq_param->sizing.rq_l),
+ &(rq_param->dlg.rq_l),
+ &(rq_param->misc.rq_l),
+ pipe_src_param,
+ 0);
+
+ if (is_dual_plane((enum source_format_class)(pipe_src_param.source_format))) {
+ // get param for chroma surface
+ get_surf_rq_param(mode_lib,
+ &(rq_param->sizing.rq_c),
+ &(rq_param->dlg.rq_c),
+ &(rq_param->misc.rq_c),
+ pipe_src_param,
+ 1);
+ }
+
+ // calculate how to split the det buffer space between luma and chroma
+ handle_det_buf_split(mode_lib, rq_param, pipe_src_param);
+ print__rq_params_st(mode_lib, *rq_param);
+}
+
+void dml20v2_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib,
+ display_rq_regs_st *rq_regs,
+ const display_pipe_params_st pipe_param)
+{
+ display_rq_params_st rq_param = {0};
+
+ memset(rq_regs, 0, sizeof(*rq_regs));
+ dml20v2_rq_dlg_get_rq_params(mode_lib, &rq_param, pipe_param.src);
+ extract_rq_regs(mode_lib, rq_regs, rq_param);
+
+ print__rq_regs_st(mode_lib, *rq_regs);
+}
+
+// Note: currently taken in as is.
+// Nice to decouple code from hw register implement and extract code that are repeated for luma and chroma.
+static void dml20v2_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
+ const display_e2e_pipe_params_st *e2e_pipe_param,
+ const unsigned int num_pipes,
+ const unsigned int pipe_idx,
+ display_dlg_regs_st *disp_dlg_regs,
+ display_ttu_regs_st *disp_ttu_regs,
+ const display_rq_dlg_params_st rq_dlg_param,
+ const display_dlg_sys_params_st dlg_sys_param,
+ const bool cstate_en,
+ const bool pstate_en)
+{
+ const display_pipe_source_params_st *src = &e2e_pipe_param[pipe_idx].pipe.src;
+ const display_pipe_dest_params_st *dst = &e2e_pipe_param[pipe_idx].pipe.dest;
+ const display_output_params_st *dout = &e2e_pipe_param[pipe_idx].dout;
+ const display_clocks_and_cfg_st *clks = &e2e_pipe_param[pipe_idx].clks_cfg;
+ const scaler_ratio_depth_st *scl = &e2e_pipe_param[pipe_idx].pipe.scale_ratio_depth;
+ const scaler_taps_st *taps = &e2e_pipe_param[pipe_idx].pipe.scale_taps;
+
+ // -------------------------
+ // Section 1.15.2.1: OTG dependent Params
+ // -------------------------
+ // Timing
+ unsigned int htotal = dst->htotal;
+// unsigned int hblank_start = dst.hblank_start; // TODO: Remove
+ unsigned int hblank_end = dst->hblank_end;
+ unsigned int vblank_start = dst->vblank_start;
+ unsigned int vblank_end = dst->vblank_end;
+ unsigned int min_vblank = mode_lib->ip.min_vblank_lines;
+
+ double dppclk_freq_in_mhz = clks->dppclk_mhz;
+ double dispclk_freq_in_mhz = clks->dispclk_mhz;
+ double refclk_freq_in_mhz = clks->refclk_mhz;
+ double pclk_freq_in_mhz = dst->pixel_rate_mhz;
+ bool interlaced = dst->interlaced;
+
+ double ref_freq_to_pix_freq = refclk_freq_in_mhz / pclk_freq_in_mhz;
+
+ double min_dcfclk_mhz;
+ double t_calc_us;
+ double min_ttu_vblank;
+
+ double min_dst_y_ttu_vblank;
+ unsigned int dlg_vblank_start;
+ bool dual_plane;
+ bool mode_422;
+ unsigned int access_dir;
+ unsigned int vp_height_l;
+ unsigned int vp_width_l;
+ unsigned int vp_height_c;
+ unsigned int vp_width_c;
+
+ // Scaling
+ unsigned int htaps_l;
+ unsigned int htaps_c;
+ double hratio_l;
+ double hratio_c;
+ double vratio_l;
+ double vratio_c;
+ bool scl_enable;
+
+ double line_time_in_us;
+ // double vinit_l;
+ // double vinit_c;
+ // double vinit_bot_l;
+ // double vinit_bot_c;
+
+ // unsigned int swath_height_l;
+ unsigned int swath_width_ub_l;
+ // unsigned int dpte_bytes_per_row_ub_l;
+ unsigned int dpte_groups_per_row_ub_l;
+ // unsigned int meta_pte_bytes_per_frame_ub_l;
+ // unsigned int meta_bytes_per_row_ub_l;
+
+ // unsigned int swath_height_c;
+ unsigned int swath_width_ub_c;
+ // unsigned int dpte_bytes_per_row_ub_c;
+ unsigned int dpte_groups_per_row_ub_c;
+
+ unsigned int meta_chunks_per_row_ub_l;
+ unsigned int meta_chunks_per_row_ub_c;
+ unsigned int vupdate_offset;
+ unsigned int vupdate_width;
+ unsigned int vready_offset;
+
+ unsigned int dppclk_delay_subtotal;
+ unsigned int dispclk_delay_subtotal;
+ unsigned int pixel_rate_delay_subtotal;
+
+ unsigned int vstartup_start;
+ unsigned int dst_x_after_scaler;
+ unsigned int dst_y_after_scaler;
+ double line_wait;
+ double dst_y_prefetch;
+ double dst_y_per_vm_vblank;
+ double dst_y_per_row_vblank;
+ double dst_y_per_vm_flip;
+ double dst_y_per_row_flip;
+ double min_dst_y_per_vm_vblank;
+ double min_dst_y_per_row_vblank;
+ double lsw;
+ double vratio_pre_l;
+ double vratio_pre_c;
+ unsigned int req_per_swath_ub_l;
+ unsigned int req_per_swath_ub_c;
+ unsigned int meta_row_height_l;
+ unsigned int meta_row_height_c;
+ unsigned int swath_width_pixels_ub_l;
+ unsigned int swath_width_pixels_ub_c;
+ unsigned int scaler_rec_in_width_l;
+ unsigned int scaler_rec_in_width_c;
+ unsigned int dpte_row_height_l;
+ unsigned int dpte_row_height_c;
+ double hscale_pixel_rate_l;
+ double hscale_pixel_rate_c;
+ double min_hratio_fact_l;
+ double min_hratio_fact_c;
+ double refcyc_per_line_delivery_pre_l;
+ double refcyc_per_line_delivery_pre_c;
+ double refcyc_per_line_delivery_l;
+ double refcyc_per_line_delivery_c;
+
+ double refcyc_per_req_delivery_pre_l;
+ double refcyc_per_req_delivery_pre_c;
+ double refcyc_per_req_delivery_l;
+ double refcyc_per_req_delivery_c;
+
+ unsigned int full_recout_width;
+ double xfc_transfer_delay;
+ double xfc_precharge_delay;
+ double xfc_remote_surface_flip_latency;
+ double xfc_dst_y_delta_drq_limit;
+ double xfc_prefetch_margin;
+ double refcyc_per_req_delivery_pre_cur0;
+ double refcyc_per_req_delivery_cur0;
+ double refcyc_per_req_delivery_pre_cur1;
+ double refcyc_per_req_delivery_cur1;
+
+ memset(disp_dlg_regs, 0, sizeof(*disp_dlg_regs));
+ memset(disp_ttu_regs, 0, sizeof(*disp_ttu_regs));
+
+ dml_print("DML_DLG: %s: cstate_en = %d\n", __func__, cstate_en);
+ dml_print("DML_DLG: %s: pstate_en = %d\n", __func__, pstate_en);
+
+ dml_print("DML_DLG: %s: dppclk_freq_in_mhz = %3.2f\n", __func__, dppclk_freq_in_mhz);
+ dml_print("DML_DLG: %s: dispclk_freq_in_mhz = %3.2f\n", __func__, dispclk_freq_in_mhz);
+ dml_print("DML_DLG: %s: refclk_freq_in_mhz = %3.2f\n", __func__, refclk_freq_in_mhz);
+ dml_print("DML_DLG: %s: pclk_freq_in_mhz = %3.2f\n", __func__, pclk_freq_in_mhz);
+ dml_print("DML_DLG: %s: interlaced = %d\n", __func__, interlaced);
+ ASSERT(ref_freq_to_pix_freq < 4.0);
+
+ disp_dlg_regs->ref_freq_to_pix_freq =
+ (unsigned int) (ref_freq_to_pix_freq * dml_pow(2, 19));
+ disp_dlg_regs->refcyc_per_htotal = (unsigned int) (ref_freq_to_pix_freq * (double) htotal
+ * dml_pow(2, 8));
+ disp_dlg_regs->dlg_vblank_end = interlaced ? (vblank_end / 2) : vblank_end; // 15 bits
+ disp_dlg_regs->refcyc_h_blank_end = (unsigned int) ((double) hblank_end
+ * (double) ref_freq_to_pix_freq);
+ ASSERT(disp_dlg_regs->refcyc_h_blank_end < (unsigned int) dml_pow(2, 13));
+
+ min_dcfclk_mhz = dlg_sys_param.deepsleep_dcfclk_mhz;
+ t_calc_us = get_tcalc(mode_lib, e2e_pipe_param, num_pipes);
+ min_ttu_vblank = get_min_ttu_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
+
+ min_dst_y_ttu_vblank = min_ttu_vblank * pclk_freq_in_mhz / (double) htotal;
+ dlg_vblank_start = interlaced ? (vblank_start / 2) : vblank_start;
+
+ disp_dlg_regs->min_dst_y_next_start = (unsigned int) (((double) dlg_vblank_start
+ + min_dst_y_ttu_vblank) * dml_pow(2, 2));
+ ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int) dml_pow(2, 18));
+
+ dml_print("DML_DLG: %s: min_dcfclk_mhz = %3.2f\n",
+ __func__,
+ min_dcfclk_mhz);
+ dml_print("DML_DLG: %s: min_ttu_vblank = %3.2f\n",
+ __func__,
+ min_ttu_vblank);
+ dml_print("DML_DLG: %s: min_dst_y_ttu_vblank = %3.2f\n",
+ __func__,
+ min_dst_y_ttu_vblank);
+ dml_print("DML_DLG: %s: t_calc_us = %3.2f\n",
+ __func__,
+ t_calc_us);
+ dml_print("DML_DLG: %s: disp_dlg_regs->min_dst_y_next_start = 0x%0x\n",
+ __func__,
+ disp_dlg_regs->min_dst_y_next_start);
+ dml_print("DML_DLG: %s: ref_freq_to_pix_freq = %3.2f\n",
+ __func__,
+ ref_freq_to_pix_freq);
+
+ // -------------------------
+ // Section 1.15.2.2: Prefetch, Active and TTU
+ // -------------------------
+ // Prefetch Calc
+ // Source
+// dcc_en = src.dcc;
+ dual_plane = is_dual_plane((enum source_format_class)(src->source_format));
+ mode_422 = false; // TODO
+ access_dir = (src->source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
+// bytes_per_element_l = get_bytes_per_element(source_format_class(src.source_format), 0);
+// bytes_per_element_c = get_bytes_per_element(source_format_class(src.source_format), 1);
+ vp_height_l = src->viewport_height;
+ vp_width_l = src->viewport_width;
+ vp_height_c = src->viewport_height_c;
+ vp_width_c = src->viewport_width_c;
+
+ // Scaling
+ htaps_l = taps->htaps;
+ htaps_c = taps->htaps_c;
+ hratio_l = scl->hscl_ratio;
+ hratio_c = scl->hscl_ratio_c;
+ vratio_l = scl->vscl_ratio;
+ vratio_c = scl->vscl_ratio_c;
+ scl_enable = scl->scl_enable;
+
+ line_time_in_us = (htotal / pclk_freq_in_mhz);
+// vinit_l = scl.vinit;
+// vinit_c = scl.vinit_c;
+// vinit_bot_l = scl.vinit_bot;
+// vinit_bot_c = scl.vinit_bot_c;
+
+// unsigned int swath_height_l = rq_dlg_param.rq_l.swath_height;
+ swath_width_ub_l = rq_dlg_param.rq_l.swath_width_ub;
+// unsigned int dpte_bytes_per_row_ub_l = rq_dlg_param.rq_l.dpte_bytes_per_row_ub;
+ dpte_groups_per_row_ub_l = rq_dlg_param.rq_l.dpte_groups_per_row_ub;
+// unsigned int meta_pte_bytes_per_frame_ub_l = rq_dlg_param.rq_l.meta_pte_bytes_per_frame_ub;
+// unsigned int meta_bytes_per_row_ub_l = rq_dlg_param.rq_l.meta_bytes_per_row_ub;
+
+// unsigned int swath_height_c = rq_dlg_param.rq_c.swath_height;
+ swath_width_ub_c = rq_dlg_param.rq_c.swath_width_ub;
+ // dpte_bytes_per_row_ub_c = rq_dlg_param.rq_c.dpte_bytes_per_row_ub;
+ dpte_groups_per_row_ub_c = rq_dlg_param.rq_c.dpte_groups_per_row_ub;
+
+ meta_chunks_per_row_ub_l = rq_dlg_param.rq_l.meta_chunks_per_row_ub;
+ meta_chunks_per_row_ub_c = rq_dlg_param.rq_c.meta_chunks_per_row_ub;
+ vupdate_offset = dst->vupdate_offset;
+ vupdate_width = dst->vupdate_width;
+ vready_offset = dst->vready_offset;
+
+ dppclk_delay_subtotal = mode_lib->ip.dppclk_delay_subtotal;
+ dispclk_delay_subtotal = mode_lib->ip.dispclk_delay_subtotal;
+
+ if (scl_enable)
+ dppclk_delay_subtotal += mode_lib->ip.dppclk_delay_scl;
+ else
+ dppclk_delay_subtotal += mode_lib->ip.dppclk_delay_scl_lb_only;
+
+ dppclk_delay_subtotal += mode_lib->ip.dppclk_delay_cnvc_formatter
+ + src->num_cursors * mode_lib->ip.dppclk_delay_cnvc_cursor;
+
+ if (dout->dsc_enable) {
+ double dsc_delay = get_dsc_delay(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
+
+ dispclk_delay_subtotal += dsc_delay;
+ }
+
+ pixel_rate_delay_subtotal = dppclk_delay_subtotal * pclk_freq_in_mhz / dppclk_freq_in_mhz
+ + dispclk_delay_subtotal * pclk_freq_in_mhz / dispclk_freq_in_mhz;
+
+ vstartup_start = dst->vstartup_start;
+ if (interlaced) {
+ if (vstartup_start / 2.0
+ - (double) (vready_offset + vupdate_width + vupdate_offset) / htotal
+ <= vblank_end / 2.0)
+ disp_dlg_regs->vready_after_vcount0 = 1;
+ else
+ disp_dlg_regs->vready_after_vcount0 = 0;
+ } else {
+ if (vstartup_start
+ - (double) (vready_offset + vupdate_width + vupdate_offset) / htotal
+ <= vblank_end)
+ disp_dlg_regs->vready_after_vcount0 = 1;
+ else
+ disp_dlg_regs->vready_after_vcount0 = 0;
+ }
+
+ // TODO: Where is this coming from?
+ if (interlaced)
+ vstartup_start = vstartup_start / 2;
+
+ // TODO: What if this min_vblank doesn't match the value in the dml_config_settings.cpp?
+ if (vstartup_start >= min_vblank) {
+ dml_print("WARNING: DML_DLG: %s: vblank_start=%d vblank_end=%d\n",
+ __func__,
+ vblank_start,
+ vblank_end);
+ dml_print("WARNING: DML_DLG: %s: vstartup_start=%d should be less than min_vblank=%d\n",
+ __func__,
+ vstartup_start,
+ min_vblank);
+ min_vblank = vstartup_start + 1;
+ dml_print("WARNING: DML_DLG: %s: vstartup_start=%d should be less than min_vblank=%d\n",
+ __func__,
+ vstartup_start,
+ min_vblank);
+ }
+
+ dst_x_after_scaler = get_dst_x_after_scaler(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
+ dst_y_after_scaler = get_dst_y_after_scaler(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
+
+ dml_print("DML_DLG: %s: htotal = %d\n", __func__, htotal);
+ dml_print("DML_DLG: %s: pixel_rate_delay_subtotal = %d\n",
+ __func__,
+ pixel_rate_delay_subtotal);
+ dml_print("DML_DLG: %s: dst_x_after_scaler = %d\n",
+ __func__,
+ dst_x_after_scaler);
+ dml_print("DML_DLG: %s: dst_y_after_scaler = %d\n",
+ __func__,
+ dst_y_after_scaler);
+
+ // Lwait
+ line_wait = mode_lib->soc.urgent_latency_us;
+ if (cstate_en)
+ line_wait = dml_max(mode_lib->soc.sr_enter_plus_exit_time_us, line_wait);
+ if (pstate_en)
+ line_wait = dml_max(mode_lib->soc.dram_clock_change_latency_us
+ + mode_lib->soc.urgent_latency_us,
+ line_wait);
+ line_wait = line_wait / line_time_in_us;
+
+ dst_y_prefetch = get_dst_y_prefetch(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
+ dml_print("DML_DLG: %s: dst_y_prefetch (after rnd) = %3.2f\n", __func__, dst_y_prefetch);
+
+ dst_y_per_vm_vblank = get_dst_y_per_vm_vblank(mode_lib,
+ e2e_pipe_param,
+ num_pipes,
+ pipe_idx);
+ dst_y_per_row_vblank = get_dst_y_per_row_vblank(mode_lib,
+ e2e_pipe_param,
+ num_pipes,
+ pipe_idx);
+ dst_y_per_vm_flip = get_dst_y_per_vm_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
+ dst_y_per_row_flip = get_dst_y_per_row_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
+
+ min_dst_y_per_vm_vblank = 8.0;
+ min_dst_y_per_row_vblank = 16.0;
+
+ // magic!
+ if (htotal <= 75) {
+ min_vblank = 300;
+ min_dst_y_per_vm_vblank = 100.0;
+ min_dst_y_per_row_vblank = 100.0;
+ }
+
+ dml_print("DML_DLG: %s: dst_y_per_vm_vblank = %3.2f\n", __func__, dst_y_per_vm_vblank);
+ dml_print("DML_DLG: %s: dst_y_per_row_vblank = %3.2f\n", __func__, dst_y_per_row_vblank);
+
+ ASSERT(dst_y_per_vm_vblank < min_dst_y_per_vm_vblank);
+ ASSERT(dst_y_per_row_vblank < min_dst_y_per_row_vblank);
+
+ ASSERT(dst_y_prefetch > (dst_y_per_vm_vblank + dst_y_per_row_vblank));
+ lsw = dst_y_prefetch - (dst_y_per_vm_vblank + dst_y_per_row_vblank);
+
+ dml_print("DML_DLG: %s: lsw = %3.2f\n", __func__, lsw);
+
+ vratio_pre_l = get_vratio_prefetch_l(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
+ vratio_pre_c = get_vratio_prefetch_c(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
+
+ dml_print("DML_DLG: %s: vratio_pre_l=%3.2f\n", __func__, vratio_pre_l);
+ dml_print("DML_DLG: %s: vratio_pre_c=%3.2f\n", __func__, vratio_pre_c);
+
+ // Active
+ req_per_swath_ub_l = rq_dlg_param.rq_l.req_per_swath_ub;
+ req_per_swath_ub_c = rq_dlg_param.rq_c.req_per_swath_ub;
+ meta_row_height_l = rq_dlg_param.rq_l.meta_row_height;
+ meta_row_height_c = rq_dlg_param.rq_c.meta_row_height;
+ swath_width_pixels_ub_l = 0;
+ swath_width_pixels_ub_c = 0;
+ scaler_rec_in_width_l = 0;
+ scaler_rec_in_width_c = 0;
+ dpte_row_height_l = rq_dlg_param.rq_l.dpte_row_height;
+ dpte_row_height_c = rq_dlg_param.rq_c.dpte_row_height;
+
+ if (mode_422) {
+ swath_width_pixels_ub_l = swath_width_ub_l * 2; // *2 for 2 pixel per element
+ swath_width_pixels_ub_c = swath_width_ub_c * 2;
+ } else {
+ swath_width_pixels_ub_l = swath_width_ub_l * 1;
+ swath_width_pixels_ub_c = swath_width_ub_c * 1;
+ }
+
+ hscale_pixel_rate_l = 0.;
+ hscale_pixel_rate_c = 0.;
+ min_hratio_fact_l = 1.0;
+ min_hratio_fact_c = 1.0;
+
+ if (htaps_l <= 1)
+ min_hratio_fact_l = 2.0;
+ else if (htaps_l <= 6) {
+ if ((hratio_l * 2.0) > 4.0)
+ min_hratio_fact_l = 4.0;
+ else
+ min_hratio_fact_l = hratio_l * 2.0;
+ } else {
+ if (hratio_l > 4.0)
+ min_hratio_fact_l = 4.0;
+ else
+ min_hratio_fact_l = hratio_l;
+ }
+
+ hscale_pixel_rate_l = min_hratio_fact_l * dppclk_freq_in_mhz;
+
+ if (htaps_c <= 1)
+ min_hratio_fact_c = 2.0;
+ else if (htaps_c <= 6) {
+ if ((hratio_c * 2.0) > 4.0)
+ min_hratio_fact_c = 4.0;
+ else
+ min_hratio_fact_c = hratio_c * 2.0;
+ } else {
+ if (hratio_c > 4.0)
+ min_hratio_fact_c = 4.0;
+ else
+ min_hratio_fact_c = hratio_c;
+ }
+
+ hscale_pixel_rate_c = min_hratio_fact_c * dppclk_freq_in_mhz;
+
+ refcyc_per_line_delivery_pre_l = 0.;
+ refcyc_per_line_delivery_pre_c = 0.;
+ refcyc_per_line_delivery_l = 0.;
+ refcyc_per_line_delivery_c = 0.;
+
+ refcyc_per_req_delivery_pre_l = 0.;
+ refcyc_per_req_delivery_pre_c = 0.;
+ refcyc_per_req_delivery_l = 0.;
+ refcyc_per_req_delivery_c = 0.;
+
+ full_recout_width = 0;
+ // In ODM
+ if (src->is_hsplit) {
+ // This "hack" is only allowed (and valid) for MPC combine. In ODM
+ // combine, you MUST specify the full_recout_width...according to Oswin
+ if (dst->full_recout_width == 0 && !dst->odm_combine) {
+ dml_print("DML_DLG: %s: Warning: full_recout_width not set in hsplit mode\n",
+ __func__);
+ full_recout_width = dst->recout_width * 2; // assume half split for dcn1
+ } else
+ full_recout_width = dst->full_recout_width;
+ } else
+ full_recout_width = dst->recout_width;
+
+ // As of DCN2, mpc_combine and odm_combine are mutually exclusive
+ refcyc_per_line_delivery_pre_l = get_refcyc_per_delivery(mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ dst->odm_combine,
+ full_recout_width,
+ dst->hactive,
+ vratio_pre_l,
+ hscale_pixel_rate_l,
+ swath_width_pixels_ub_l,
+ 1); // per line
+
+ refcyc_per_line_delivery_l = get_refcyc_per_delivery(mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ dst->odm_combine,
+ full_recout_width,
+ dst->hactive,
+ vratio_l,
+ hscale_pixel_rate_l,
+ swath_width_pixels_ub_l,
+ 1); // per line
+
+ dml_print("DML_DLG: %s: full_recout_width = %d\n",
+ __func__,
+ full_recout_width);
+ dml_print("DML_DLG: %s: hscale_pixel_rate_l = %3.2f\n",
+ __func__,
+ hscale_pixel_rate_l);
+ dml_print("DML_DLG: %s: refcyc_per_line_delivery_pre_l = %3.2f\n",
+ __func__,
+ refcyc_per_line_delivery_pre_l);
+ dml_print("DML_DLG: %s: refcyc_per_line_delivery_l = %3.2f\n",
+ __func__,
+ refcyc_per_line_delivery_l);
+
+ if (dual_plane) {
+ refcyc_per_line_delivery_pre_c = get_refcyc_per_delivery(mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ dst->odm_combine,
+ full_recout_width,
+ dst->hactive,
+ vratio_pre_c,
+ hscale_pixel_rate_c,
+ swath_width_pixels_ub_c,
+ 1); // per line
+
+ refcyc_per_line_delivery_c = get_refcyc_per_delivery(mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ dst->odm_combine,
+ full_recout_width,
+ dst->hactive,
+ vratio_c,
+ hscale_pixel_rate_c,
+ swath_width_pixels_ub_c,
+ 1); // per line
+
+ dml_print("DML_DLG: %s: refcyc_per_line_delivery_pre_c = %3.2f\n",
+ __func__,
+ refcyc_per_line_delivery_pre_c);
+ dml_print("DML_DLG: %s: refcyc_per_line_delivery_c = %3.2f\n",
+ __func__,
+ refcyc_per_line_delivery_c);
+ }
+
+ // TTU - Luma / Chroma
+ if (access_dir) { // vertical access
+ scaler_rec_in_width_l = vp_height_l;
+ scaler_rec_in_width_c = vp_height_c;
+ } else {
+ scaler_rec_in_width_l = vp_width_l;
+ scaler_rec_in_width_c = vp_width_c;
+ }
+
+ refcyc_per_req_delivery_pre_l = get_refcyc_per_delivery(mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ dst->odm_combine,
+ full_recout_width,
+ dst->hactive,
+ vratio_pre_l,
+ hscale_pixel_rate_l,
+ scaler_rec_in_width_l,
+ req_per_swath_ub_l); // per req
+ refcyc_per_req_delivery_l = get_refcyc_per_delivery(mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ dst->odm_combine,
+ full_recout_width,
+ dst->hactive,
+ vratio_l,
+ hscale_pixel_rate_l,
+ scaler_rec_in_width_l,
+ req_per_swath_ub_l); // per req
+
+ dml_print("DML_DLG: %s: refcyc_per_req_delivery_pre_l = %3.2f\n",
+ __func__,
+ refcyc_per_req_delivery_pre_l);
+ dml_print("DML_DLG: %s: refcyc_per_req_delivery_l = %3.2f\n",
+ __func__,
+ refcyc_per_req_delivery_l);
+
+ ASSERT(refcyc_per_req_delivery_pre_l < dml_pow(2, 13));
+ ASSERT(refcyc_per_req_delivery_l < dml_pow(2, 13));
+
+ if (dual_plane) {
+ refcyc_per_req_delivery_pre_c = get_refcyc_per_delivery(mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ dst->odm_combine,
+ full_recout_width,
+ dst->hactive,
+ vratio_pre_c,
+ hscale_pixel_rate_c,
+ scaler_rec_in_width_c,
+ req_per_swath_ub_c); // per req
+ refcyc_per_req_delivery_c = get_refcyc_per_delivery(mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ dst->odm_combine,
+ full_recout_width,
+ dst->hactive,
+ vratio_c,
+ hscale_pixel_rate_c,
+ scaler_rec_in_width_c,
+ req_per_swath_ub_c); // per req
+
+ dml_print("DML_DLG: %s: refcyc_per_req_delivery_pre_c = %3.2f\n",
+ __func__,
+ refcyc_per_req_delivery_pre_c);
+ dml_print("DML_DLG: %s: refcyc_per_req_delivery_c = %3.2f\n",
+ __func__,
+ refcyc_per_req_delivery_c);
+
+ ASSERT(refcyc_per_req_delivery_pre_c < dml_pow(2, 13));
+ ASSERT(refcyc_per_req_delivery_c < dml_pow(2, 13));
+ }
+
+ // XFC
+ xfc_transfer_delay = get_xfc_transfer_delay(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
+ xfc_precharge_delay = get_xfc_precharge_delay(mode_lib,
+ e2e_pipe_param,
+ num_pipes,
+ pipe_idx);
+ xfc_remote_surface_flip_latency = get_xfc_remote_surface_flip_latency(mode_lib,
+ e2e_pipe_param,
+ num_pipes,
+ pipe_idx);
+ xfc_dst_y_delta_drq_limit = xfc_remote_surface_flip_latency;
+ xfc_prefetch_margin = get_xfc_prefetch_margin(mode_lib,
+ e2e_pipe_param,
+ num_pipes,
+ pipe_idx);
+
+ // TTU - Cursor
+ refcyc_per_req_delivery_pre_cur0 = 0.0;
+ refcyc_per_req_delivery_cur0 = 0.0;
+ if (src->num_cursors > 0) {
+ calculate_ttu_cursor(mode_lib,
+ &refcyc_per_req_delivery_pre_cur0,
+ &refcyc_per_req_delivery_cur0,
+ refclk_freq_in_mhz,
+ ref_freq_to_pix_freq,
+ hscale_pixel_rate_l,
+ scl->hscl_ratio,
+ vratio_pre_l,
+ vratio_l,
+ src->cur0_src_width,
+ (enum cursor_bpp)(src->cur0_bpp));
+ }
+
+ refcyc_per_req_delivery_pre_cur1 = 0.0;
+ refcyc_per_req_delivery_cur1 = 0.0;
+ if (src->num_cursors > 1) {
+ calculate_ttu_cursor(mode_lib,
+ &refcyc_per_req_delivery_pre_cur1,
+ &refcyc_per_req_delivery_cur1,
+ refclk_freq_in_mhz,
+ ref_freq_to_pix_freq,
+ hscale_pixel_rate_l,
+ scl->hscl_ratio,
+ vratio_pre_l,
+ vratio_l,
+ src->cur1_src_width,
+ (enum cursor_bpp)(src->cur1_bpp));
+ }
+
+ // TTU - Misc
+ // all hard-coded
+
+ // Assignment to register structures
+ disp_dlg_regs->dst_y_after_scaler = dst_y_after_scaler; // in terms of line
+ disp_dlg_regs->refcyc_x_after_scaler = dst_x_after_scaler * ref_freq_to_pix_freq; // in terms of refclk
+ ASSERT(disp_dlg_regs->refcyc_x_after_scaler < (unsigned int) dml_pow(2, 13));
+ disp_dlg_regs->dst_y_prefetch = (unsigned int) (dst_y_prefetch * dml_pow(2, 2));
+ disp_dlg_regs->dst_y_per_vm_vblank = (unsigned int) (dst_y_per_vm_vblank * dml_pow(2, 2));
+ disp_dlg_regs->dst_y_per_row_vblank = (unsigned int) (dst_y_per_row_vblank * dml_pow(2, 2));
+ disp_dlg_regs->dst_y_per_vm_flip = (unsigned int) (dst_y_per_vm_flip * dml_pow(2, 2));
+ disp_dlg_regs->dst_y_per_row_flip = (unsigned int) (dst_y_per_row_flip * dml_pow(2, 2));
+
+ disp_dlg_regs->vratio_prefetch = (unsigned int) (vratio_pre_l * dml_pow(2, 19));
+ disp_dlg_regs->vratio_prefetch_c = (unsigned int) (vratio_pre_c * dml_pow(2, 19));
+
+ disp_dlg_regs->refcyc_per_pte_group_vblank_l =
+ (unsigned int) (dst_y_per_row_vblank * (double) htotal
+ * ref_freq_to_pix_freq / (double) dpte_groups_per_row_ub_l);
+ ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_l < (unsigned int) dml_pow(2, 13));
+
+ if (dual_plane) {
+ disp_dlg_regs->refcyc_per_pte_group_vblank_c = (unsigned int) (dst_y_per_row_vblank
+ * (double) htotal * ref_freq_to_pix_freq
+ / (double) dpte_groups_per_row_ub_c);
+ ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_c
+ < (unsigned int) dml_pow(2, 13));
+ }
+
+ disp_dlg_regs->refcyc_per_meta_chunk_vblank_l =
+ (unsigned int) (dst_y_per_row_vblank * (double) htotal
+ * ref_freq_to_pix_freq / (double) meta_chunks_per_row_ub_l);
+ ASSERT(disp_dlg_regs->refcyc_per_meta_chunk_vblank_l < (unsigned int) dml_pow(2, 13));
+
+ disp_dlg_regs->refcyc_per_meta_chunk_vblank_c =
+ disp_dlg_regs->refcyc_per_meta_chunk_vblank_l; // dcc for 4:2:0 is not supported in dcn1.0. assigned to be the same as _l for now
+
+ disp_dlg_regs->refcyc_per_pte_group_flip_l = (unsigned int) (dst_y_per_row_flip * htotal
+ * ref_freq_to_pix_freq) / dpte_groups_per_row_ub_l;
+ disp_dlg_regs->refcyc_per_meta_chunk_flip_l = (unsigned int) (dst_y_per_row_flip * htotal
+ * ref_freq_to_pix_freq) / meta_chunks_per_row_ub_l;
+
+ if (dual_plane) {
+ disp_dlg_regs->refcyc_per_pte_group_flip_c = (unsigned int) (dst_y_per_row_flip
+ * htotal * ref_freq_to_pix_freq) / dpte_groups_per_row_ub_c;
+ disp_dlg_regs->refcyc_per_meta_chunk_flip_c = (unsigned int) (dst_y_per_row_flip
+ * htotal * ref_freq_to_pix_freq) / meta_chunks_per_row_ub_c;
+ }
+
+ disp_dlg_regs->dst_y_per_pte_row_nom_l = (unsigned int) ((double) dpte_row_height_l
+ / (double) vratio_l * dml_pow(2, 2));
+ ASSERT(disp_dlg_regs->dst_y_per_pte_row_nom_l < (unsigned int) dml_pow(2, 17));
+
+ if (dual_plane) {
+ disp_dlg_regs->dst_y_per_pte_row_nom_c = (unsigned int) ((double) dpte_row_height_c
+ / (double) vratio_c * dml_pow(2, 2));
+ if (disp_dlg_regs->dst_y_per_pte_row_nom_c >= (unsigned int) dml_pow(2, 17)) {
+ dml_print("DML_DLG: %s: Warning dst_y_per_pte_row_nom_c %u larger than supported by register format U15.2 %u\n",
+ __func__,
+ disp_dlg_regs->dst_y_per_pte_row_nom_c,
+ (unsigned int) dml_pow(2, 17) - 1);
+ }
+ }
+
+ disp_dlg_regs->dst_y_per_meta_row_nom_l = (unsigned int) ((double) meta_row_height_l
+ / (double) vratio_l * dml_pow(2, 2));
+ ASSERT(disp_dlg_regs->dst_y_per_meta_row_nom_l < (unsigned int) dml_pow(2, 17));
+
+ disp_dlg_regs->dst_y_per_meta_row_nom_c = disp_dlg_regs->dst_y_per_meta_row_nom_l; // TODO: dcc for 4:2:0 is not supported in dcn1.0. assigned to be the same as _l for now
+
+ disp_dlg_regs->refcyc_per_pte_group_nom_l = (unsigned int) ((double) dpte_row_height_l
+ / (double) vratio_l * (double) htotal * ref_freq_to_pix_freq
+ / (double) dpte_groups_per_row_ub_l);
+ if (disp_dlg_regs->refcyc_per_pte_group_nom_l >= (unsigned int) dml_pow(2, 23))
+ disp_dlg_regs->refcyc_per_pte_group_nom_l = dml_pow(2, 23) - 1;
+ disp_dlg_regs->refcyc_per_meta_chunk_nom_l = (unsigned int) ((double) meta_row_height_l
+ / (double) vratio_l * (double) htotal * ref_freq_to_pix_freq
+ / (double) meta_chunks_per_row_ub_l);
+ if (disp_dlg_regs->refcyc_per_meta_chunk_nom_l >= (unsigned int) dml_pow(2, 23))
+ disp_dlg_regs->refcyc_per_meta_chunk_nom_l = dml_pow(2, 23) - 1;
+
+ if (dual_plane) {
+ disp_dlg_regs->refcyc_per_pte_group_nom_c =
+ (unsigned int) ((double) dpte_row_height_c / (double) vratio_c
+ * (double) htotal * ref_freq_to_pix_freq
+ / (double) dpte_groups_per_row_ub_c);
+ if (disp_dlg_regs->refcyc_per_pte_group_nom_c >= (unsigned int) dml_pow(2, 23))
+ disp_dlg_regs->refcyc_per_pte_group_nom_c = dml_pow(2, 23) - 1;
+
+ // TODO: Is this the right calculation? Does htotal need to be halved?
+ disp_dlg_regs->refcyc_per_meta_chunk_nom_c =
+ (unsigned int) ((double) meta_row_height_c / (double) vratio_c
+ * (double) htotal * ref_freq_to_pix_freq
+ / (double) meta_chunks_per_row_ub_c);
+ if (disp_dlg_regs->refcyc_per_meta_chunk_nom_c >= (unsigned int) dml_pow(2, 23))
+ disp_dlg_regs->refcyc_per_meta_chunk_nom_c = dml_pow(2, 23) - 1;
+ }
+
+ disp_dlg_regs->refcyc_per_line_delivery_pre_l = (unsigned int) dml_floor(refcyc_per_line_delivery_pre_l,
+ 1);
+ disp_dlg_regs->refcyc_per_line_delivery_l = (unsigned int) dml_floor(refcyc_per_line_delivery_l,
+ 1);
+ ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_l < (unsigned int) dml_pow(2, 13));
+ ASSERT(disp_dlg_regs->refcyc_per_line_delivery_l < (unsigned int) dml_pow(2, 13));
+
+ disp_dlg_regs->refcyc_per_line_delivery_pre_c = (unsigned int) dml_floor(refcyc_per_line_delivery_pre_c,
+ 1);
+ disp_dlg_regs->refcyc_per_line_delivery_c = (unsigned int) dml_floor(refcyc_per_line_delivery_c,
+ 1);
+ ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_c < (unsigned int) dml_pow(2, 13));
+ ASSERT(disp_dlg_regs->refcyc_per_line_delivery_c < (unsigned int) dml_pow(2, 13));
+
+ disp_dlg_regs->chunk_hdl_adjust_cur0 = 3;
+ disp_dlg_regs->dst_y_offset_cur0 = 0;
+ disp_dlg_regs->chunk_hdl_adjust_cur1 = 3;
+ disp_dlg_regs->dst_y_offset_cur1 = 0;
+
+ disp_dlg_regs->xfc_reg_transfer_delay = xfc_transfer_delay;
+ disp_dlg_regs->xfc_reg_precharge_delay = xfc_precharge_delay;
+ disp_dlg_regs->xfc_reg_remote_surface_flip_latency = xfc_remote_surface_flip_latency;
+ disp_dlg_regs->xfc_reg_prefetch_margin = dml_ceil(xfc_prefetch_margin * refclk_freq_in_mhz,
+ 1);
+
+ // slave has to have this value also set to off
+ if (src->xfc_enable && !src->xfc_slave)
+ disp_dlg_regs->dst_y_delta_drq_limit = dml_ceil(xfc_dst_y_delta_drq_limit, 1);
+ else
+ disp_dlg_regs->dst_y_delta_drq_limit = 0x7fff; // off
+
+ disp_ttu_regs->refcyc_per_req_delivery_pre_l = (unsigned int) (refcyc_per_req_delivery_pre_l
+ * dml_pow(2, 10));
+ disp_ttu_regs->refcyc_per_req_delivery_l = (unsigned int) (refcyc_per_req_delivery_l
+ * dml_pow(2, 10));
+ disp_ttu_regs->refcyc_per_req_delivery_pre_c = (unsigned int) (refcyc_per_req_delivery_pre_c
+ * dml_pow(2, 10));
+ disp_ttu_regs->refcyc_per_req_delivery_c = (unsigned int) (refcyc_per_req_delivery_c
+ * dml_pow(2, 10));
+ disp_ttu_regs->refcyc_per_req_delivery_pre_cur0 =
+ (unsigned int) (refcyc_per_req_delivery_pre_cur0 * dml_pow(2, 10));
+ disp_ttu_regs->refcyc_per_req_delivery_cur0 = (unsigned int) (refcyc_per_req_delivery_cur0
+ * dml_pow(2, 10));
+ disp_ttu_regs->refcyc_per_req_delivery_pre_cur1 =
+ (unsigned int) (refcyc_per_req_delivery_pre_cur1 * dml_pow(2, 10));
+ disp_ttu_regs->refcyc_per_req_delivery_cur1 = (unsigned int) (refcyc_per_req_delivery_cur1
+ * dml_pow(2, 10));
+ disp_ttu_regs->qos_level_low_wm = 0;
+ ASSERT(disp_ttu_regs->qos_level_low_wm < dml_pow(2, 14));
+ disp_ttu_regs->qos_level_high_wm = (unsigned int) (4.0 * (double) htotal
+ * ref_freq_to_pix_freq);
+ /*ASSERT(disp_ttu_regs->qos_level_high_wm < dml_pow(2, 14));*/
+
+ disp_ttu_regs->qos_level_flip = 14;
+ disp_ttu_regs->qos_level_fixed_l = 8;
+ disp_ttu_regs->qos_level_fixed_c = 8;
+ disp_ttu_regs->qos_level_fixed_cur0 = 8;
+ disp_ttu_regs->qos_ramp_disable_l = 0;
+ disp_ttu_regs->qos_ramp_disable_c = 0;
+ disp_ttu_regs->qos_ramp_disable_cur0 = 0;
+
+ disp_ttu_regs->min_ttu_vblank = min_ttu_vblank * refclk_freq_in_mhz;
+ ASSERT(disp_ttu_regs->min_ttu_vblank < dml_pow(2, 24));
+
+ print__ttu_regs_st(mode_lib, *disp_ttu_regs);
+ print__dlg_regs_st(mode_lib, *disp_dlg_regs);
+}
+
+void dml20v2_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib,
+ display_dlg_regs_st *dlg_regs,
+ display_ttu_regs_st *ttu_regs,
+ display_e2e_pipe_params_st *e2e_pipe_param,
+ const unsigned int num_pipes,
+ const unsigned int pipe_idx,
+ const bool cstate_en,
+ const bool pstate_en,
+ const bool vm_en,
+ const bool ignore_viewport_pos,
+ const bool immediate_flip_support)
+{
+ display_rq_params_st rq_param = {0};
+ display_dlg_sys_params_st dlg_sys_param = {0};
+
+ // Get watermark and Tex.
+ dlg_sys_param.t_urg_wm_us = get_wm_urgent(mode_lib, e2e_pipe_param, num_pipes);
+ dlg_sys_param.deepsleep_dcfclk_mhz = get_clk_dcf_deepsleep(mode_lib,
+ e2e_pipe_param,
+ num_pipes);
+ dlg_sys_param.t_extra_us = get_urgent_extra_latency(mode_lib, e2e_pipe_param, num_pipes);
+ dlg_sys_param.mem_trip_us = get_wm_memory_trip(mode_lib, e2e_pipe_param, num_pipes);
+ dlg_sys_param.t_mclk_wm_us = get_wm_dram_clock_change(mode_lib, e2e_pipe_param, num_pipes);
+ dlg_sys_param.t_sr_wm_us = get_wm_stutter_enter_exit(mode_lib, e2e_pipe_param, num_pipes);
+ dlg_sys_param.total_flip_bw = get_total_immediate_flip_bw(mode_lib,
+ e2e_pipe_param,
+ num_pipes);
+ dlg_sys_param.total_flip_bytes = get_total_immediate_flip_bytes(mode_lib,
+ e2e_pipe_param,
+ num_pipes);
+ dlg_sys_param.t_srx_delay_us = mode_lib->ip.dcfclk_cstate_latency
+ / dlg_sys_param.deepsleep_dcfclk_mhz; // TODO: Deprecated
+
+ print__dlg_sys_params_st(mode_lib, dlg_sys_param);
+
+ // system parameter calculation done
+
+ dml_print("DML_DLG: Calculation for pipe[%d] start\n\n", pipe_idx);
+ dml20v2_rq_dlg_get_rq_params(mode_lib, &rq_param, e2e_pipe_param[pipe_idx].pipe.src);
+ dml20v2_rq_dlg_get_dlg_params(mode_lib,
+ e2e_pipe_param,
+ num_pipes,
+ pipe_idx,
+ dlg_regs,
+ ttu_regs,
+ rq_param.dlg,
+ dlg_sys_param,
+ cstate_en,
+ pstate_en);
+ dml_print("DML_DLG: Calculation for pipe[%d] end\n", pipe_idx);
+}
+
+static void calculate_ttu_cursor(struct display_mode_lib *mode_lib,
+ double *refcyc_per_req_delivery_pre_cur,
+ double *refcyc_per_req_delivery_cur,
+ double refclk_freq_in_mhz,
+ double ref_freq_to_pix_freq,
+ double hscale_pixel_rate_l,
+ double hscl_ratio,
+ double vratio_pre_l,
+ double vratio_l,
+ unsigned int cur_width,
+ enum cursor_bpp cur_bpp)
+{
+ unsigned int cur_src_width = cur_width;
+ unsigned int cur_req_size = 0;
+ unsigned int cur_req_width = 0;
+ double cur_width_ub = 0.0;
+ double cur_req_per_width = 0.0;
+ double hactive_cur = 0.0;
+
+ ASSERT(cur_src_width <= 256);
+
+ *refcyc_per_req_delivery_pre_cur = 0.0;
+ *refcyc_per_req_delivery_cur = 0.0;
+ if (cur_src_width > 0) {
+ unsigned int cur_bit_per_pixel = 0;
+
+ if (cur_bpp == dm_cur_2bit) {
+ cur_req_size = 64; // byte
+ cur_bit_per_pixel = 2;
+ } else { // 32bit
+ cur_bit_per_pixel = 32;
+ if (cur_src_width >= 1 && cur_src_width <= 16)
+ cur_req_size = 64;
+ else if (cur_src_width >= 17 && cur_src_width <= 31)
+ cur_req_size = 128;
+ else
+ cur_req_size = 256;
+ }
+
+ cur_req_width = (double) cur_req_size / ((double) cur_bit_per_pixel / 8.0);
+ cur_width_ub = dml_ceil((double) cur_src_width / (double) cur_req_width, 1)
+ * (double) cur_req_width;
+ cur_req_per_width = cur_width_ub / (double) cur_req_width;
+ hactive_cur = (double) cur_src_width / hscl_ratio; // TODO: oswin to think about what to do for cursor
+
+ if (vratio_pre_l <= 1.0) {
+ *refcyc_per_req_delivery_pre_cur = hactive_cur * ref_freq_to_pix_freq
+ / (double) cur_req_per_width;
+ } else {
+ *refcyc_per_req_delivery_pre_cur = (double) refclk_freq_in_mhz
+ * (double) cur_src_width / hscale_pixel_rate_l
+ / (double) cur_req_per_width;
+ }
+
+ ASSERT(*refcyc_per_req_delivery_pre_cur < dml_pow(2, 13));
+
+ if (vratio_l <= 1.0) {
+ *refcyc_per_req_delivery_cur = hactive_cur * ref_freq_to_pix_freq
+ / (double) cur_req_per_width;
+ } else {
+ *refcyc_per_req_delivery_cur = (double) refclk_freq_in_mhz
+ * (double) cur_src_width / hscale_pixel_rate_l
+ / (double) cur_req_per_width;
+ }
+
+ dml_print("DML_DLG: %s: cur_req_width = %d\n",
+ __func__,
+ cur_req_width);
+ dml_print("DML_DLG: %s: cur_width_ub = %3.2f\n",
+ __func__,
+ cur_width_ub);
+ dml_print("DML_DLG: %s: cur_req_per_width = %3.2f\n",
+ __func__,
+ cur_req_per_width);
+ dml_print("DML_DLG: %s: hactive_cur = %3.2f\n",
+ __func__,
+ hactive_cur);
+ dml_print("DML_DLG: %s: refcyc_per_req_delivery_pre_cur = %3.2f\n",
+ __func__,
+ *refcyc_per_req_delivery_pre_cur);
+ dml_print("DML_DLG: %s: refcyc_per_req_delivery_cur = %3.2f\n",
+ __func__,
+ *refcyc_per_req_delivery_cur);
+
+ ASSERT(*refcyc_per_req_delivery_cur < dml_pow(2, 13));
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h
new file mode 100644
index 000000000000..0378406bf7e7
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DML20V2_DISPLAY_RQ_DLG_CALC_H__
+#define __DML20V2_DISPLAY_RQ_DLG_CALC_H__
+
+#include "../dml_common_defs.h"
+#include "../display_rq_dlg_helpers.h"
+
+struct display_mode_lib;
+
+
+// Function: dml_rq_dlg_get_rq_reg
+// Main entry point for test to get the register values out of this DML class.
+// This function calls <get_rq_param> and <extract_rq_regs> fucntions to calculate
+// and then populate the rq_regs struct
+// Input:
+// pipe_src_param - pipe source configuration (e.g. vp, pitch, etc.)
+// Output:
+// rq_regs - struct that holds all the RQ registers field value.
+// See also: <display_rq_regs_st>
+void dml20v2_rq_dlg_get_rq_reg(
+ struct display_mode_lib *mode_lib,
+ display_rq_regs_st *rq_regs,
+ const display_pipe_params_st pipe_param);
+
+
+// Function: dml_rq_dlg_get_dlg_reg
+// Calculate and return DLG and TTU register struct given the system setting
+// Output:
+// dlg_regs - output DLG register struct
+// ttu_regs - output DLG TTU register struct
+// Input:
+// e2e_pipe_param - "compacted" array of e2e pipe param struct
+// num_pipes - num of active "pipe" or "route"
+// pipe_idx - index that identifies the e2e_pipe_param that corresponding to this dlg
+// cstate - 0: when calculate min_ttu_vblank it is assumed cstate is not required. 1: Normal mode, cstate is considered.
+// Added for legacy or unrealistic timing tests.
+void dml20v2_rq_dlg_get_dlg_reg(
+ struct display_mode_lib *mode_lib,
+ display_dlg_regs_st *dlg_regs,
+ display_ttu_regs_st *ttu_regs,
+ display_e2e_pipe_params_st *e2e_pipe_param,
+ const unsigned int num_pipes,
+ const unsigned int pipe_idx,
+ const bool cstate_en,
+ const bool pstate_en,
+ const bool vm_en,
+ const bool ignore_viewport_pos,
+ const bool immediate_flip_support);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
new file mode 100644
index 000000000000..e6617c958bb8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
@@ -0,0 +1,6134 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#include "../display_mode_lib.h"
+#include "../dml_inline_defs.h"
+#include "../display_mode_vba.h"
+#include "display_mode_vba_21.h"
+
+
+/*
+ * NOTE:
+ * This file is gcc-parsable HW gospel, coming straight from HW engineers.
+ *
+ * It doesn't adhere to Linux kernel style and sometimes will do things in odd
+ * ways. Unless there is something clearly wrong with it the code should
+ * remain as-is as it provides us with a guarantee from HW that it is correct.
+ */
+typedef struct {
+ double DPPCLK;
+ double DISPCLK;
+ double PixelClock;
+ double DCFCLKDeepSleep;
+ unsigned int DPPPerPlane;
+ bool ScalerEnabled;
+ enum scan_direction_class SourceScan;
+ unsigned int BlockWidth256BytesY;
+ unsigned int BlockHeight256BytesY;
+ unsigned int BlockWidth256BytesC;
+ unsigned int BlockHeight256BytesC;
+ unsigned int InterlaceEnable;
+ unsigned int NumberOfCursors;
+ unsigned int VBlank;
+ unsigned int HTotal;
+} Pipe;
+
+typedef struct {
+ bool Enable;
+ unsigned int MaxPageTableLevels;
+ unsigned int CachedPageTableLevels;
+} HostVM;
+
+#define BPP_INVALID 0
+#define BPP_BLENDED_PIPE 0xffffffff
+#define DCN21_MAX_DSC_IMAGE_WIDTH 5184
+#define DCN21_MAX_420_IMAGE_WIDTH 4096
+
+static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib);
+static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation(
+ struct display_mode_lib *mode_lib);
+static unsigned int dscceComputeDelay(
+ unsigned int bpc,
+ double bpp,
+ unsigned int sliceWidth,
+ unsigned int numSlices,
+ enum output_format_class pixelFormat);
+static unsigned int dscComputeDelay(enum output_format_class pixelFormat);
+// Super monster function with some 45 argument
+static bool CalculatePrefetchSchedule(
+ struct display_mode_lib *mode_lib,
+ double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
+ double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
+ Pipe *myPipe,
+ unsigned int DSCDelay,
+ double DPPCLKDelaySubtotal,
+ double DPPCLKDelaySCL,
+ double DPPCLKDelaySCLLBOnly,
+ double DPPCLKDelayCNVCFormater,
+ double DPPCLKDelayCNVCCursor,
+ double DISPCLKDelaySubtotal,
+ unsigned int ScalerRecoutWidth,
+ enum output_format_class OutputFormat,
+ unsigned int MaxInterDCNTileRepeaters,
+ unsigned int VStartup,
+ unsigned int MaxVStartup,
+ unsigned int GPUVMPageTableLevels,
+ bool GPUVMEnable,
+ HostVM *myHostVM,
+ bool DynamicMetadataEnable,
+ int DynamicMetadataLinesBeforeActiveRequired,
+ unsigned int DynamicMetadataTransmittedBytes,
+ bool DCCEnable,
+ double UrgentLatency,
+ double UrgentExtraLatency,
+ double TCalc,
+ unsigned int PDEAndMetaPTEBytesFrame,
+ unsigned int MetaRowByte,
+ unsigned int PixelPTEBytesPerRow,
+ double PrefetchSourceLinesY,
+ unsigned int SwathWidthY,
+ double BytePerPixelDETY,
+ double VInitPreFillY,
+ unsigned int MaxNumSwathY,
+ double PrefetchSourceLinesC,
+ double BytePerPixelDETC,
+ double VInitPreFillC,
+ unsigned int MaxNumSwathC,
+ unsigned int SwathHeightY,
+ unsigned int SwathHeightC,
+ double TWait,
+ bool XFCEnabled,
+ double XFCRemoteSurfaceFlipDelay,
+ bool ProgressiveToInterlaceUnitInOPP,
+ double *DSTXAfterScaler,
+ double *DSTYAfterScaler,
+ double *DestinationLinesForPrefetch,
+ double *PrefetchBandwidth,
+ double *DestinationLinesToRequestVMInVBlank,
+ double *DestinationLinesToRequestRowInVBlank,
+ double *VRatioPrefetchY,
+ double *VRatioPrefetchC,
+ double *RequiredPrefetchPixDataBWLuma,
+ double *RequiredPrefetchPixDataBWChroma,
+ unsigned int *VStartupRequiredWhenNotEnoughTimeForDynamicMetadata,
+ double *Tno_bw,
+ double *prefetch_vmrow_bw,
+ unsigned int *swath_width_luma_ub,
+ unsigned int *swath_width_chroma_ub,
+ unsigned int *VUpdateOffsetPix,
+ double *VUpdateWidthPix,
+ double *VReadyOffsetPix);
+static double RoundToDFSGranularityUp(double Clock, double VCOSpeed);
+static double RoundToDFSGranularityDown(double Clock, double VCOSpeed);
+static double CalculateDCCConfiguration(
+ bool DCCEnabled,
+ bool DCCProgrammingAssumesScanDirectionUnknown,
+ unsigned int ViewportWidth,
+ unsigned int ViewportHeight,
+ double DETBufferSize,
+ unsigned int RequestHeight256Byte,
+ unsigned int SwathHeight,
+ enum dm_swizzle_mode TilingFormat,
+ unsigned int BytePerPixel,
+ enum scan_direction_class ScanOrientation,
+ unsigned int *MaxUncompressedBlock,
+ unsigned int *MaxCompressedBlock,
+ unsigned int *Independent64ByteBlock);
+static double CalculatePrefetchSourceLines(
+ struct display_mode_lib *mode_lib,
+ double VRatio,
+ double vtaps,
+ bool Interlace,
+ bool ProgressiveToInterlaceUnitInOPP,
+ unsigned int SwathHeight,
+ unsigned int ViewportYStart,
+ double *VInitPreFill,
+ unsigned int *MaxNumSwath);
+static unsigned int CalculateVMAndRowBytes(
+ struct display_mode_lib *mode_lib,
+ bool DCCEnable,
+ unsigned int BlockHeight256Bytes,
+ unsigned int BlockWidth256Bytes,
+ enum source_format_class SourcePixelFormat,
+ unsigned int SurfaceTiling,
+ unsigned int BytePerPixel,
+ enum scan_direction_class ScanDirection,
+ unsigned int ViewportWidth,
+ unsigned int ViewportHeight,
+ unsigned int SwathWidthY,
+ bool GPUVMEnable,
+ bool HostVMEnable,
+ unsigned int HostVMMaxPageTableLevels,
+ unsigned int HostVMCachedPageTableLevels,
+ unsigned int VMMPageSize,
+ unsigned int PTEBufferSizeInRequests,
+ unsigned int Pitch,
+ unsigned int DCCMetaPitch,
+ unsigned int *MacroTileWidth,
+ unsigned int *MetaRowByte,
+ unsigned int *PixelPTEBytesPerRow,
+ bool *PTEBufferSizeNotExceeded,
+ unsigned int *dpte_row_width_ub,
+ unsigned int *dpte_row_height,
+ unsigned int *MetaRequestWidth,
+ unsigned int *MetaRequestHeight,
+ unsigned int *meta_row_width,
+ unsigned int *meta_row_height,
+ unsigned int *vm_group_bytes,
+ unsigned int *dpte_group_bytes,
+ unsigned int *PixelPTEReqWidth,
+ unsigned int *PixelPTEReqHeight,
+ unsigned int *PTERequestSize,
+ unsigned int *DPDE0BytesFrame,
+ unsigned int *MetaPTEBytesFrame);
+
+static double CalculateTWait(
+ unsigned int PrefetchMode,
+ double DRAMClockChangeLatency,
+ double UrgentLatency,
+ double SREnterPlusExitTime);
+static double CalculateRemoteSurfaceFlipDelay(
+ struct display_mode_lib *mode_lib,
+ double VRatio,
+ double SwathWidth,
+ double Bpp,
+ double LineTime,
+ double XFCTSlvVupdateOffset,
+ double XFCTSlvVupdateWidth,
+ double XFCTSlvVreadyOffset,
+ double XFCXBUFLatencyTolerance,
+ double XFCFillBWOverhead,
+ double XFCSlvChunkSize,
+ double XFCBusTransportTime,
+ double TCalc,
+ double TWait,
+ double *SrcActiveDrainRate,
+ double *TInitXFill,
+ double *TslvChk);
+static void CalculateActiveRowBandwidth(
+ bool GPUVMEnable,
+ enum source_format_class SourcePixelFormat,
+ double VRatio,
+ bool DCCEnable,
+ double LineTime,
+ unsigned int MetaRowByteLuma,
+ unsigned int MetaRowByteChroma,
+ unsigned int meta_row_height_luma,
+ unsigned int meta_row_height_chroma,
+ unsigned int PixelPTEBytesPerRowLuma,
+ unsigned int PixelPTEBytesPerRowChroma,
+ unsigned int dpte_row_height_luma,
+ unsigned int dpte_row_height_chroma,
+ double *meta_row_bw,
+ double *dpte_row_bw);
+static void CalculateFlipSchedule(
+ struct display_mode_lib *mode_lib,
+ double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
+ double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
+ double UrgentExtraLatency,
+ double UrgentLatency,
+ unsigned int GPUVMMaxPageTableLevels,
+ bool HostVMEnable,
+ unsigned int HostVMMaxPageTableLevels,
+ unsigned int HostVMCachedPageTableLevels,
+ bool GPUVMEnable,
+ double PDEAndMetaPTEBytesPerFrame,
+ double MetaRowBytes,
+ double DPTEBytesPerRow,
+ double BandwidthAvailableForImmediateFlip,
+ unsigned int TotImmediateFlipBytes,
+ enum source_format_class SourcePixelFormat,
+ double LineTime,
+ double VRatio,
+ double Tno_bw,
+ bool DCCEnable,
+ unsigned int dpte_row_height,
+ unsigned int meta_row_height,
+ unsigned int dpte_row_height_chroma,
+ unsigned int meta_row_height_chroma,
+ double *DestinationLinesToRequestVMInImmediateFlip,
+ double *DestinationLinesToRequestRowInImmediateFlip,
+ double *final_flip_bw,
+ bool *ImmediateFlipSupportedForPipe);
+static double CalculateWriteBackDelay(
+ enum source_format_class WritebackPixelFormat,
+ double WritebackHRatio,
+ double WritebackVRatio,
+ unsigned int WritebackLumaHTaps,
+ unsigned int WritebackLumaVTaps,
+ unsigned int WritebackChromaHTaps,
+ unsigned int WritebackChromaVTaps,
+ unsigned int WritebackDestinationWidth);
+static void CalculateWatermarksAndDRAMSpeedChangeSupport(
+ struct display_mode_lib *mode_lib,
+ unsigned int PrefetchMode,
+ unsigned int NumberOfActivePlanes,
+ unsigned int MaxLineBufferLines,
+ unsigned int LineBufferSize,
+ unsigned int DPPOutputBufferPixels,
+ double DETBufferSizeInKByte,
+ unsigned int WritebackInterfaceLumaBufferSize,
+ unsigned int WritebackInterfaceChromaBufferSize,
+ double DCFCLK,
+ double UrgentOutOfOrderReturn,
+ double ReturnBW,
+ bool GPUVMEnable,
+ int dpte_group_bytes[],
+ unsigned int MetaChunkSize,
+ double UrgentLatency,
+ double ExtraLatency,
+ double WritebackLatency,
+ double WritebackChunkSize,
+ double SOCCLK,
+ double DRAMClockChangeLatency,
+ double SRExitTime,
+ double SREnterPlusExitTime,
+ double DCFCLKDeepSleep,
+ int DPPPerPlane[],
+ bool DCCEnable[],
+ double DPPCLK[],
+ double SwathWidthSingleDPPY[],
+ unsigned int SwathHeightY[],
+ double ReadBandwidthPlaneLuma[],
+ unsigned int SwathHeightC[],
+ double ReadBandwidthPlaneChroma[],
+ unsigned int LBBitPerPixel[],
+ double SwathWidthY[],
+ double HRatio[],
+ unsigned int vtaps[],
+ unsigned int VTAPsChroma[],
+ double VRatio[],
+ unsigned int HTotal[],
+ double PixelClock[],
+ unsigned int BlendingAndTiming[],
+ double BytePerPixelDETY[],
+ double BytePerPixelDETC[],
+ bool WritebackEnable[],
+ enum source_format_class WritebackPixelFormat[],
+ double WritebackDestinationWidth[],
+ double WritebackDestinationHeight[],
+ double WritebackSourceHeight[],
+ enum clock_change_support *DRAMClockChangeSupport,
+ double *UrgentWatermark,
+ double *WritebackUrgentWatermark,
+ double *DRAMClockChangeWatermark,
+ double *WritebackDRAMClockChangeWatermark,
+ double *StutterExitWatermark,
+ double *StutterEnterPlusExitWatermark,
+ double *MinActiveDRAMClockChangeLatencySupported);
+static void CalculateDCFCLKDeepSleep(
+ struct display_mode_lib *mode_lib,
+ unsigned int NumberOfActivePlanes,
+ double BytePerPixelDETY[],
+ double BytePerPixelDETC[],
+ double VRatio[],
+ double SwathWidthY[],
+ int DPPPerPlane[],
+ double HRatio[],
+ double PixelClock[],
+ double PSCL_THROUGHPUT[],
+ double PSCL_THROUGHPUT_CHROMA[],
+ double DPPCLK[],
+ double *DCFCLKDeepSleep);
+static void CalculateDETBufferSize(
+ double DETBufferSizeInKByte,
+ unsigned int SwathHeightY,
+ unsigned int SwathHeightC,
+ double *DETBufferSizeY,
+ double *DETBufferSizeC);
+static void CalculateUrgentBurstFactor(
+ unsigned int DETBufferSizeInKByte,
+ unsigned int SwathHeightY,
+ unsigned int SwathHeightC,
+ unsigned int SwathWidthY,
+ double LineTime,
+ double UrgentLatency,
+ double CursorBufferSize,
+ unsigned int CursorWidth,
+ unsigned int CursorBPP,
+ double VRatio,
+ double VRatioPreY,
+ double VRatioPreC,
+ double BytePerPixelInDETY,
+ double BytePerPixelInDETC,
+ double *UrgentBurstFactorCursor,
+ double *UrgentBurstFactorCursorPre,
+ double *UrgentBurstFactorLuma,
+ double *UrgentBurstFactorLumaPre,
+ double *UrgentBurstFactorChroma,
+ double *UrgentBurstFactorChromaPre,
+ unsigned int *NotEnoughUrgentLatencyHiding,
+ unsigned int *NotEnoughUrgentLatencyHidingPre);
+
+static void CalculatePixelDeliveryTimes(
+ unsigned int NumberOfActivePlanes,
+ double VRatio[],
+ double VRatioPrefetchY[],
+ double VRatioPrefetchC[],
+ unsigned int swath_width_luma_ub[],
+ unsigned int swath_width_chroma_ub[],
+ int DPPPerPlane[],
+ double HRatio[],
+ double PixelClock[],
+ double PSCL_THROUGHPUT[],
+ double PSCL_THROUGHPUT_CHROMA[],
+ double DPPCLK[],
+ double BytePerPixelDETC[],
+ enum scan_direction_class SourceScan[],
+ unsigned int BlockWidth256BytesY[],
+ unsigned int BlockHeight256BytesY[],
+ unsigned int BlockWidth256BytesC[],
+ unsigned int BlockHeight256BytesC[],
+ double DisplayPipeLineDeliveryTimeLuma[],
+ double DisplayPipeLineDeliveryTimeChroma[],
+ double DisplayPipeLineDeliveryTimeLumaPrefetch[],
+ double DisplayPipeLineDeliveryTimeChromaPrefetch[],
+ double DisplayPipeRequestDeliveryTimeLuma[],
+ double DisplayPipeRequestDeliveryTimeChroma[],
+ double DisplayPipeRequestDeliveryTimeLumaPrefetch[],
+ double DisplayPipeRequestDeliveryTimeChromaPrefetch[]);
+
+static void CalculateMetaAndPTETimes(
+ unsigned int NumberOfActivePlanes,
+ bool GPUVMEnable,
+ unsigned int MetaChunkSize,
+ unsigned int MinMetaChunkSizeBytes,
+ unsigned int GPUVMMaxPageTableLevels,
+ unsigned int HTotal[],
+ double VRatio[],
+ double VRatioPrefetchY[],
+ double VRatioPrefetchC[],
+ double DestinationLinesToRequestRowInVBlank[],
+ double DestinationLinesToRequestRowInImmediateFlip[],
+ double DestinationLinesToRequestVMInVBlank[],
+ double DestinationLinesToRequestVMInImmediateFlip[],
+ bool DCCEnable[],
+ double PixelClock[],
+ double BytePerPixelDETY[],
+ double BytePerPixelDETC[],
+ enum scan_direction_class SourceScan[],
+ unsigned int dpte_row_height[],
+ unsigned int dpte_row_height_chroma[],
+ unsigned int meta_row_width[],
+ unsigned int meta_row_height[],
+ unsigned int meta_req_width[],
+ unsigned int meta_req_height[],
+ int dpte_group_bytes[],
+ unsigned int PTERequestSizeY[],
+ unsigned int PTERequestSizeC[],
+ unsigned int PixelPTEReqWidthY[],
+ unsigned int PixelPTEReqHeightY[],
+ unsigned int PixelPTEReqWidthC[],
+ unsigned int PixelPTEReqHeightC[],
+ unsigned int dpte_row_width_luma_ub[],
+ unsigned int dpte_row_width_chroma_ub[],
+ unsigned int vm_group_bytes[],
+ unsigned int dpde0_bytes_per_frame_ub_l[],
+ unsigned int dpde0_bytes_per_frame_ub_c[],
+ unsigned int meta_pte_bytes_per_frame_ub_l[],
+ unsigned int meta_pte_bytes_per_frame_ub_c[],
+ double DST_Y_PER_PTE_ROW_NOM_L[],
+ double DST_Y_PER_PTE_ROW_NOM_C[],
+ double DST_Y_PER_META_ROW_NOM_L[],
+ double TimePerMetaChunkNominal[],
+ double TimePerMetaChunkVBlank[],
+ double TimePerMetaChunkFlip[],
+ double time_per_pte_group_nom_luma[],
+ double time_per_pte_group_vblank_luma[],
+ double time_per_pte_group_flip_luma[],
+ double time_per_pte_group_nom_chroma[],
+ double time_per_pte_group_vblank_chroma[],
+ double time_per_pte_group_flip_chroma[],
+ double TimePerVMGroupVBlank[],
+ double TimePerVMGroupFlip[],
+ double TimePerVMRequestVBlank[],
+ double TimePerVMRequestFlip[]);
+
+static double CalculateExtraLatency(
+ double UrgentRoundTripAndOutOfOrderLatency,
+ int TotalNumberOfActiveDPP,
+ int PixelChunkSizeInKByte,
+ int TotalNumberOfDCCActiveDPP,
+ int MetaChunkSize,
+ double ReturnBW,
+ bool GPUVMEnable,
+ bool HostVMEnable,
+ int NumberOfActivePlanes,
+ int NumberOfDPP[],
+ int dpte_group_bytes[],
+ double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
+ double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
+ int HostVMMaxPageTableLevels,
+ int HostVMCachedPageTableLevels);
+
+void dml21_recalculate(struct display_mode_lib *mode_lib)
+{
+ ModeSupportAndSystemConfiguration(mode_lib);
+ PixelClockAdjustmentForProgressiveToInterlaceUnit(mode_lib);
+ DisplayPipeConfiguration(mode_lib);
+ DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation(mode_lib);
+}
+
+static unsigned int dscceComputeDelay(
+ unsigned int bpc,
+ double bpp,
+ unsigned int sliceWidth,
+ unsigned int numSlices,
+ enum output_format_class pixelFormat)
+{
+ // valid bpc = source bits per component in the set of {8, 10, 12}
+ // valid bpp = increments of 1/16 of a bit
+ // min = 6/7/8 in N420/N422/444, respectively
+ // max = such that compression is 1:1
+ //valid sliceWidth = number of pixels per slice line, must be less than or equal to 5184/numSlices (or 4096/numSlices in 420 mode)
+ //valid numSlices = number of slices in the horiziontal direction per DSC engine in the set of {1, 2, 3, 4}
+ //valid pixelFormat = pixel/color format in the set of {:N444_RGB, :S422, :N422, :N420}
+
+ // fixed value
+ unsigned int rcModelSize = 8192;
+
+ // N422/N420 operate at 2 pixels per clock
+ unsigned int pixelsPerClock, lstall, D, initalXmitDelay, w, S, ix, wx, p, l0, a, ax, l,
+ Delay, pixels;
+
+ if (pixelFormat == dm_n422 || pixelFormat == dm_420)
+ pixelsPerClock = 2;
+ // #all other modes operate at 1 pixel per clock
+ else
+ pixelsPerClock = 1;
+
+ //initial transmit delay as per PPS
+ initalXmitDelay = dml_round(rcModelSize / 2.0 / bpp / pixelsPerClock);
+
+ //compute ssm delay
+ if (bpc == 8)
+ D = 81;
+ else if (bpc == 10)
+ D = 89;
+ else
+ D = 113;
+
+ //divide by pixel per cycle to compute slice width as seen by DSC
+ w = sliceWidth / pixelsPerClock;
+
+ //422 mode has an additional cycle of delay
+ if (pixelFormat == dm_s422)
+ S = 1;
+ else
+ S = 0;
+
+ //main calculation for the dscce
+ ix = initalXmitDelay + 45;
+ wx = (w + 2) / 3;
+ p = 3 * wx - w;
+ l0 = ix / w;
+ a = ix + p * l0;
+ ax = (a + 2) / 3 + D + 6 + 1;
+ l = (ax + wx - 1) / wx;
+ if ((ix % w) == 0 && p != 0)
+ lstall = 1;
+ else
+ lstall = 0;
+ Delay = l * wx * (numSlices - 1) + ax + S + lstall + 22;
+
+ //dsc processes 3 pixel containers per cycle and a container can contain 1 or 2 pixels
+ pixels = Delay * 3 * pixelsPerClock;
+ return pixels;
+}
+
+static unsigned int dscComputeDelay(enum output_format_class pixelFormat)
+{
+ unsigned int Delay = 0;
+
+ if (pixelFormat == dm_420) {
+ // sfr
+ Delay = Delay + 2;
+ // dsccif
+ Delay = Delay + 0;
+ // dscc - input deserializer
+ Delay = Delay + 3;
+ // dscc gets pixels every other cycle
+ Delay = Delay + 2;
+ // dscc - input cdc fifo
+ Delay = Delay + 12;
+ // dscc gets pixels every other cycle
+ Delay = Delay + 13;
+ // dscc - cdc uncertainty
+ Delay = Delay + 2;
+ // dscc - output cdc fifo
+ Delay = Delay + 7;
+ // dscc gets pixels every other cycle
+ Delay = Delay + 3;
+ // dscc - cdc uncertainty
+ Delay = Delay + 2;
+ // dscc - output serializer
+ Delay = Delay + 1;
+ // sft
+ Delay = Delay + 1;
+ } else if (pixelFormat == dm_n422) {
+ // sfr
+ Delay = Delay + 2;
+ // dsccif
+ Delay = Delay + 1;
+ // dscc - input deserializer
+ Delay = Delay + 5;
+ // dscc - input cdc fifo
+ Delay = Delay + 25;
+ // dscc - cdc uncertainty
+ Delay = Delay + 2;
+ // dscc - output cdc fifo
+ Delay = Delay + 10;
+ // dscc - cdc uncertainty
+ Delay = Delay + 2;
+ // dscc - output serializer
+ Delay = Delay + 1;
+ // sft
+ Delay = Delay + 1;
+ } else {
+ // sfr
+ Delay = Delay + 2;
+ // dsccif
+ Delay = Delay + 0;
+ // dscc - input deserializer
+ Delay = Delay + 3;
+ // dscc - input cdc fifo
+ Delay = Delay + 12;
+ // dscc - cdc uncertainty
+ Delay = Delay + 2;
+ // dscc - output cdc fifo
+ Delay = Delay + 7;
+ // dscc - output serializer
+ Delay = Delay + 1;
+ // dscc - cdc uncertainty
+ Delay = Delay + 2;
+ // sft
+ Delay = Delay + 1;
+ }
+
+ return Delay;
+}
+
+static bool CalculatePrefetchSchedule(
+ struct display_mode_lib *mode_lib,
+ double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
+ double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
+ Pipe *myPipe,
+ unsigned int DSCDelay,
+ double DPPCLKDelaySubtotal,
+ double DPPCLKDelaySCL,
+ double DPPCLKDelaySCLLBOnly,
+ double DPPCLKDelayCNVCFormater,
+ double DPPCLKDelayCNVCCursor,
+ double DISPCLKDelaySubtotal,
+ unsigned int ScalerRecoutWidth,
+ enum output_format_class OutputFormat,
+ unsigned int MaxInterDCNTileRepeaters,
+ unsigned int VStartup,
+ unsigned int MaxVStartup,
+ unsigned int GPUVMPageTableLevels,
+ bool GPUVMEnable,
+ HostVM *myHostVM,
+ bool DynamicMetadataEnable,
+ int DynamicMetadataLinesBeforeActiveRequired,
+ unsigned int DynamicMetadataTransmittedBytes,
+ bool DCCEnable,
+ double UrgentLatency,
+ double UrgentExtraLatency,
+ double TCalc,
+ unsigned int PDEAndMetaPTEBytesFrame,
+ unsigned int MetaRowByte,
+ unsigned int PixelPTEBytesPerRow,
+ double PrefetchSourceLinesY,
+ unsigned int SwathWidthY,
+ double BytePerPixelDETY,
+ double VInitPreFillY,
+ unsigned int MaxNumSwathY,
+ double PrefetchSourceLinesC,
+ double BytePerPixelDETC,
+ double VInitPreFillC,
+ unsigned int MaxNumSwathC,
+ unsigned int SwathHeightY,
+ unsigned int SwathHeightC,
+ double TWait,
+ bool XFCEnabled,
+ double XFCRemoteSurfaceFlipDelay,
+ bool ProgressiveToInterlaceUnitInOPP,
+ double *DSTXAfterScaler,
+ double *DSTYAfterScaler,
+ double *DestinationLinesForPrefetch,
+ double *PrefetchBandwidth,
+ double *DestinationLinesToRequestVMInVBlank,
+ double *DestinationLinesToRequestRowInVBlank,
+ double *VRatioPrefetchY,
+ double *VRatioPrefetchC,
+ double *RequiredPrefetchPixDataBWLuma,
+ double *RequiredPrefetchPixDataBWChroma,
+ unsigned int *VStartupRequiredWhenNotEnoughTimeForDynamicMetadata,
+ double *Tno_bw,
+ double *prefetch_vmrow_bw,
+ unsigned int *swath_width_luma_ub,
+ unsigned int *swath_width_chroma_ub,
+ unsigned int *VUpdateOffsetPix,
+ double *VUpdateWidthPix,
+ double *VReadyOffsetPix)
+{
+ bool MyError = false;
+ unsigned int DPPCycles, DISPCLKCycles;
+ double DSTTotalPixelsAfterScaler, TotalRepeaterDelayTime;
+ double Tdm, LineTime, Tsetup;
+ double dst_y_prefetch_equ;
+ double Tsw_oto;
+ double prefetch_bw_oto;
+ double Tvm_oto;
+ double Tr0_oto;
+ double Tvm_oto_lines;
+ double Tr0_oto_lines;
+ double Tsw_oto_lines;
+ double dst_y_prefetch_oto;
+ double TimeForFetchingMetaPTE = 0;
+ double TimeForFetchingRowInVBlank = 0;
+ double LinesToRequestPrefetchPixelData = 0;
+ double HostVMInefficiencyFactor;
+ unsigned int HostVMDynamicLevels;
+
+ if (GPUVMEnable == true && myHostVM->Enable == true) {
+ HostVMInefficiencyFactor =
+ PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData
+ / PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly;
+ HostVMDynamicLevels = myHostVM->MaxPageTableLevels
+ - myHostVM->CachedPageTableLevels;
+ } else {
+ HostVMInefficiencyFactor = 1;
+ HostVMDynamicLevels = 0;
+ }
+
+ if (myPipe->ScalerEnabled)
+ DPPCycles = DPPCLKDelaySubtotal + DPPCLKDelaySCL;
+ else
+ DPPCycles = DPPCLKDelaySubtotal + DPPCLKDelaySCLLBOnly;
+
+ DPPCycles = DPPCycles + DPPCLKDelayCNVCFormater + myPipe->NumberOfCursors * DPPCLKDelayCNVCCursor;
+
+ DISPCLKCycles = DISPCLKDelaySubtotal;
+
+ if (myPipe->DPPCLK == 0.0 || myPipe->DISPCLK == 0.0)
+ return true;
+
+ *DSTXAfterScaler = DPPCycles * myPipe->PixelClock / myPipe->DPPCLK
+ + DISPCLKCycles * myPipe->PixelClock / myPipe->DISPCLK + DSCDelay;
+
+ if (myPipe->DPPPerPlane > 1)
+ *DSTXAfterScaler = *DSTXAfterScaler + ScalerRecoutWidth;
+
+ if (OutputFormat == dm_420 || (myPipe->InterlaceEnable && ProgressiveToInterlaceUnitInOPP))
+ *DSTYAfterScaler = 1;
+ else
+ *DSTYAfterScaler = 0;
+
+ DSTTotalPixelsAfterScaler = ((double) (*DSTYAfterScaler * myPipe->HTotal)) + *DSTXAfterScaler;
+ *DSTYAfterScaler = dml_floor(DSTTotalPixelsAfterScaler / myPipe->HTotal, 1);
+ *DSTXAfterScaler = DSTTotalPixelsAfterScaler - ((double) (*DSTYAfterScaler * myPipe->HTotal));
+
+ *VUpdateOffsetPix = dml_ceil(myPipe->HTotal / 4.0, 1);
+ TotalRepeaterDelayTime = MaxInterDCNTileRepeaters * (2.0 / myPipe->DPPCLK + 3.0 / myPipe->DISPCLK);
+ *VUpdateWidthPix = (14.0 / myPipe->DCFCLKDeepSleep + 12.0 / myPipe->DPPCLK + TotalRepeaterDelayTime)
+ * myPipe->PixelClock;
+
+ *VReadyOffsetPix = dml_max(
+ 150.0 / myPipe->DPPCLK,
+ TotalRepeaterDelayTime + 20.0 / myPipe->DCFCLKDeepSleep + 10.0 / myPipe->DPPCLK)
+ * myPipe->PixelClock;
+
+ Tsetup = (double) (*VUpdateOffsetPix + *VUpdateWidthPix + *VReadyOffsetPix) / myPipe->PixelClock;
+
+ LineTime = (double) myPipe->HTotal / myPipe->PixelClock;
+
+ if (DynamicMetadataEnable) {
+ double Tdmbf, Tdmec, Tdmsks;
+
+ Tdm = dml_max(0.0, UrgentExtraLatency - TCalc);
+ Tdmbf = DynamicMetadataTransmittedBytes / 4.0 / myPipe->DISPCLK;
+ Tdmec = LineTime;
+ if (DynamicMetadataLinesBeforeActiveRequired == -1)
+ Tdmsks = myPipe->VBlank * LineTime / 2.0;
+ else
+ Tdmsks = DynamicMetadataLinesBeforeActiveRequired * LineTime;
+ if (myPipe->InterlaceEnable && !ProgressiveToInterlaceUnitInOPP)
+ Tdmsks = Tdmsks / 2;
+ if (VStartup * LineTime
+ < Tsetup + TWait + UrgentExtraLatency + Tdmbf + Tdmec + Tdmsks) {
+ MyError = true;
+ *VStartupRequiredWhenNotEnoughTimeForDynamicMetadata = (Tsetup + TWait
+ + UrgentExtraLatency + Tdmbf + Tdmec + Tdmsks) / LineTime;
+ } else
+ *VStartupRequiredWhenNotEnoughTimeForDynamicMetadata = 0.0;
+ } else
+ Tdm = 0;
+
+ if (GPUVMEnable) {
+ if (GPUVMPageTableLevels >= 3)
+ *Tno_bw = UrgentExtraLatency + UrgentLatency * ((GPUVMPageTableLevels - 2) * (myHostVM->MaxPageTableLevels + 1) - 1);
+ else
+ *Tno_bw = 0;
+ } else if (!DCCEnable)
+ *Tno_bw = LineTime;
+ else
+ *Tno_bw = LineTime / 4;
+
+ dst_y_prefetch_equ = VStartup - dml_max(TCalc + TWait, XFCRemoteSurfaceFlipDelay) / LineTime
+ - (Tsetup + Tdm) / LineTime
+ - (*DSTYAfterScaler + *DSTXAfterScaler / myPipe->HTotal);
+
+ Tsw_oto = dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) * LineTime;
+
+ if (myPipe->SourceScan == dm_horz) {
+ *swath_width_luma_ub = dml_ceil(SwathWidthY - 1, myPipe->BlockWidth256BytesY) + myPipe->BlockWidth256BytesY;
+ *swath_width_chroma_ub = dml_ceil(SwathWidthY / 2 - 1, myPipe->BlockWidth256BytesC) + myPipe->BlockWidth256BytesC;
+ } else {
+ *swath_width_luma_ub = dml_ceil(SwathWidthY - 1, myPipe->BlockHeight256BytesY) + myPipe->BlockHeight256BytesY;
+ *swath_width_chroma_ub = dml_ceil(SwathWidthY / 2 - 1, myPipe->BlockHeight256BytesC) + myPipe->BlockHeight256BytesC;
+ }
+
+ prefetch_bw_oto = (PrefetchSourceLinesY * *swath_width_luma_ub * dml_ceil(BytePerPixelDETY, 1) + PrefetchSourceLinesC * *swath_width_chroma_ub * dml_ceil(BytePerPixelDETC, 2)) / Tsw_oto;
+
+
+ if (GPUVMEnable == true) {
+ Tvm_oto = dml_max(*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / prefetch_bw_oto,
+ dml_max(UrgentExtraLatency + UrgentLatency * (GPUVMPageTableLevels * (HostVMDynamicLevels + 1) - 1),
+ LineTime / 4.0));
+ } else
+ Tvm_oto = LineTime / 4.0;
+
+ if ((GPUVMEnable == true || DCCEnable == true)) {
+ Tr0_oto = dml_max(
+ (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / prefetch_bw_oto,
+ dml_max(UrgentLatency * (HostVMDynamicLevels + 1), dml_max(LineTime - Tvm_oto, LineTime / 4)));
+ } else
+ Tr0_oto = (LineTime - Tvm_oto) / 2.0;
+
+ Tvm_oto_lines = dml_ceil(4 * Tvm_oto / LineTime, 1) / 4.0;
+ Tr0_oto_lines = dml_ceil(4 * Tr0_oto / LineTime, 1) / 4.0;
+ Tsw_oto_lines = dml_ceil(4 * Tsw_oto / LineTime, 1) / 4.0;
+ dst_y_prefetch_oto = Tvm_oto_lines + 2 * Tr0_oto_lines + Tsw_oto_lines + 0.75;
+
+ dst_y_prefetch_equ = dml_floor(4.0 * (dst_y_prefetch_equ + 0.125), 1) / 4.0;
+
+ if (dst_y_prefetch_oto < dst_y_prefetch_equ)
+ *DestinationLinesForPrefetch = dst_y_prefetch_oto;
+ else
+ *DestinationLinesForPrefetch = dst_y_prefetch_equ;
+
+ dml_print("DML: VStartup: %d\n", VStartup);
+ dml_print("DML: TCalc: %f\n", TCalc);
+ dml_print("DML: TWait: %f\n", TWait);
+ dml_print("DML: XFCRemoteSurfaceFlipDelay: %f\n", XFCRemoteSurfaceFlipDelay);
+ dml_print("DML: LineTime: %f\n", LineTime);
+ dml_print("DML: Tsetup: %f\n", Tsetup);
+ dml_print("DML: Tdm: %f\n", Tdm);
+ dml_print("DML: DSTYAfterScaler: %f\n", *DSTYAfterScaler);
+ dml_print("DML: DSTXAfterScaler: %f\n", *DSTXAfterScaler);
+ dml_print("DML: HTotal: %d\n", myPipe->HTotal);
+
+ *PrefetchBandwidth = 0;
+ *DestinationLinesToRequestVMInVBlank = 0;
+ *DestinationLinesToRequestRowInVBlank = 0;
+ *VRatioPrefetchY = 0;
+ *VRatioPrefetchC = 0;
+ *RequiredPrefetchPixDataBWLuma = 0;
+ if (*DestinationLinesForPrefetch > 1) {
+ double PrefetchBandwidth1 = (PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor + 2 * MetaRowByte
+ + 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor
+ + PrefetchSourceLinesY * *swath_width_luma_ub * dml_ceil(BytePerPixelDETY, 1)
+ + PrefetchSourceLinesC * *swath_width_chroma_ub * dml_ceil(BytePerPixelDETC, 2))
+ / (*DestinationLinesForPrefetch * LineTime - *Tno_bw);
+
+ double PrefetchBandwidth2 = (PDEAndMetaPTEBytesFrame *
+ HostVMInefficiencyFactor + PrefetchSourceLinesY *
+ *swath_width_luma_ub * dml_ceil(BytePerPixelDETY, 1) +
+ PrefetchSourceLinesC * *swath_width_chroma_ub *
+ dml_ceil(BytePerPixelDETC, 2)) /
+ (*DestinationLinesForPrefetch * LineTime - *Tno_bw - 2 *
+ UrgentLatency * (1 + HostVMDynamicLevels));
+
+ double PrefetchBandwidth3 = (2 * MetaRowByte + 2 * PixelPTEBytesPerRow
+ * HostVMInefficiencyFactor + PrefetchSourceLinesY *
+ *swath_width_luma_ub * dml_ceil(BytePerPixelDETY, 1) +
+ PrefetchSourceLinesC * *swath_width_chroma_ub *
+ dml_ceil(BytePerPixelDETC, 2)) /
+ (*DestinationLinesForPrefetch * LineTime -
+ UrgentExtraLatency - UrgentLatency * (GPUVMPageTableLevels
+ * (HostVMDynamicLevels + 1) - 1));
+
+ double PrefetchBandwidth4 = (PrefetchSourceLinesY * *swath_width_luma_ub *
+ dml_ceil(BytePerPixelDETY, 1) + PrefetchSourceLinesC *
+ *swath_width_chroma_ub * dml_ceil(BytePerPixelDETC, 2)) /
+ (*DestinationLinesForPrefetch * LineTime -
+ UrgentExtraLatency - UrgentLatency * (GPUVMPageTableLevels
+ * (HostVMDynamicLevels + 1) - 1) - 2 * UrgentLatency *
+ (1 + HostVMDynamicLevels));
+
+ if (VStartup == MaxVStartup && (PrefetchBandwidth1 > 4 * prefetch_bw_oto) && (*DestinationLinesForPrefetch - dml_ceil(Tsw_oto_lines, 1) / 4.0 - 0.75) * LineTime - *Tno_bw > 0) {
+ PrefetchBandwidth1 = (PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor + 2 * MetaRowByte + 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor) / ((*DestinationLinesForPrefetch - dml_ceil(Tsw_oto_lines, 1) / 4.0 - 0.75) * LineTime - *Tno_bw);
+ }
+ if (*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth1 >= UrgentExtraLatency + UrgentLatency * (GPUVMPageTableLevels * (HostVMDynamicLevels + 1) - 1) && (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / PrefetchBandwidth1 >= UrgentLatency * (1 + HostVMDynamicLevels)) {
+ *PrefetchBandwidth = PrefetchBandwidth1;
+ } else if (*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth2 >= UrgentExtraLatency + UrgentLatency * (GPUVMPageTableLevels * (HostVMDynamicLevels + 1) - 1) && (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / PrefetchBandwidth2 < UrgentLatency * (1 + HostVMDynamicLevels)) {
+ *PrefetchBandwidth = PrefetchBandwidth2;
+ } else if (*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth3 < UrgentExtraLatency + UrgentLatency * (GPUVMPageTableLevels * (HostVMDynamicLevels + 1) - 1) && (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / PrefetchBandwidth3 >= UrgentLatency * (1 + HostVMDynamicLevels)) {
+ *PrefetchBandwidth = PrefetchBandwidth3;
+ } else {
+ *PrefetchBandwidth = PrefetchBandwidth4;
+ }
+
+ if (GPUVMEnable) {
+ TimeForFetchingMetaPTE = dml_max(*Tno_bw + (double) PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / *PrefetchBandwidth,
+ dml_max(UrgentExtraLatency + UrgentLatency * (GPUVMPageTableLevels * (HostVMDynamicLevels + 1) - 1), LineTime / 4));
+ } else {
+// 5/30/2018 - This was an optimization requested from Sy but now NumberOfCursors is no longer a factor
+// so if this needs to be reinstated, then it should be officially done in the VBA code as well.
+// if (mode_lib->NumberOfCursors > 0 || XFCEnabled)
+ TimeForFetchingMetaPTE = LineTime / 4;
+// else
+// TimeForFetchingMetaPTE = 0.0;
+ }
+
+ if ((GPUVMEnable == true || DCCEnable == true)) {
+ TimeForFetchingRowInVBlank =
+ dml_max(
+ (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor)
+ / *PrefetchBandwidth,
+ dml_max(
+ UrgentLatency * (1 + HostVMDynamicLevels),
+ dml_max(
+ (LineTime
+ - TimeForFetchingMetaPTE) / 2.0,
+ LineTime
+ / 4.0)));
+ } else {
+// See note above dated 5/30/2018
+// if (NumberOfCursors > 0 || XFCEnabled)
+ TimeForFetchingRowInVBlank = (LineTime - TimeForFetchingMetaPTE) / 2.0;
+// else // TODO: Did someone else add this??
+// TimeForFetchingRowInVBlank = 0.0;
+ }
+
+ *DestinationLinesToRequestVMInVBlank = dml_ceil(4.0 * TimeForFetchingMetaPTE / LineTime, 1.0) / 4.0;
+
+ *DestinationLinesToRequestRowInVBlank = dml_ceil(4.0 * TimeForFetchingRowInVBlank / LineTime, 1.0) / 4.0;
+
+ LinesToRequestPrefetchPixelData = *DestinationLinesForPrefetch
+// See note above dated 5/30/2018
+// - ((NumberOfCursors > 0 || GPUVMEnable || DCCEnable) ?
+ - ((GPUVMEnable || DCCEnable) ?
+ (*DestinationLinesToRequestVMInVBlank + 2 * *DestinationLinesToRequestRowInVBlank) :
+ 0.0); // TODO: Did someone else add this??
+
+ if (LinesToRequestPrefetchPixelData > 0) {
+
+ *VRatioPrefetchY = (double) PrefetchSourceLinesY
+ / LinesToRequestPrefetchPixelData;
+ *VRatioPrefetchY = dml_max(*VRatioPrefetchY, 1.0);
+ if ((SwathHeightY > 4) && (VInitPreFillY > 3)) {
+ if (LinesToRequestPrefetchPixelData > (VInitPreFillY - 3.0) / 2.0) {
+ *VRatioPrefetchY =
+ dml_max(
+ (double) PrefetchSourceLinesY
+ / LinesToRequestPrefetchPixelData,
+ (double) MaxNumSwathY
+ * SwathHeightY
+ / (LinesToRequestPrefetchPixelData
+ - (VInitPreFillY
+ - 3.0)
+ / 2.0));
+ *VRatioPrefetchY = dml_max(*VRatioPrefetchY, 1.0);
+ } else {
+ MyError = true;
+ *VRatioPrefetchY = 0;
+ }
+ }
+
+ *VRatioPrefetchC = (double) PrefetchSourceLinesC
+ / LinesToRequestPrefetchPixelData;
+ *VRatioPrefetchC = dml_max(*VRatioPrefetchC, 1.0);
+
+ if ((SwathHeightC > 4)) {
+ if (LinesToRequestPrefetchPixelData > (VInitPreFillC - 3.0) / 2.0) {
+ *VRatioPrefetchC =
+ dml_max(
+ *VRatioPrefetchC,
+ (double) MaxNumSwathC
+ * SwathHeightC
+ / (LinesToRequestPrefetchPixelData
+ - (VInitPreFillC
+ - 3.0)
+ / 2.0));
+ *VRatioPrefetchC = dml_max(*VRatioPrefetchC, 1.0);
+ } else {
+ MyError = true;
+ *VRatioPrefetchC = 0;
+ }
+ }
+
+ *RequiredPrefetchPixDataBWLuma = myPipe->DPPPerPlane
+ * (double) PrefetchSourceLinesY / LinesToRequestPrefetchPixelData
+ * dml_ceil(BytePerPixelDETY, 1)
+ * *swath_width_luma_ub / LineTime;
+ *RequiredPrefetchPixDataBWChroma = myPipe->DPPPerPlane
+ * (double) PrefetchSourceLinesC / LinesToRequestPrefetchPixelData
+ * dml_ceil(BytePerPixelDETC, 2)
+ * *swath_width_chroma_ub / LineTime;
+ } else {
+ MyError = true;
+ *VRatioPrefetchY = 0;
+ *VRatioPrefetchC = 0;
+ *RequiredPrefetchPixDataBWLuma = 0;
+ *RequiredPrefetchPixDataBWChroma = 0;
+ }
+
+ dml_print("DML: Tvm: %fus\n", TimeForFetchingMetaPTE);
+ dml_print("DML: Tr0: %fus\n", TimeForFetchingRowInVBlank);
+ dml_print("DML: Tsw: %fus\n", (double)(*DestinationLinesForPrefetch) * LineTime - TimeForFetchingMetaPTE - TimeForFetchingRowInVBlank);
+ dml_print("DML: Tpre: %fus\n", (double)(*DestinationLinesForPrefetch) * LineTime);
+ dml_print("DML: row_bytes = dpte_row_bytes (per_pipe) = PixelPTEBytesPerRow = : %d\n", PixelPTEBytesPerRow);
+
+ } else {
+ MyError = true;
+ }
+
+ {
+ double prefetch_vm_bw;
+ double prefetch_row_bw;
+
+ if (PDEAndMetaPTEBytesFrame == 0) {
+ prefetch_vm_bw = 0;
+ } else if (*DestinationLinesToRequestVMInVBlank > 0) {
+ prefetch_vm_bw = PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / (*DestinationLinesToRequestVMInVBlank * LineTime);
+ } else {
+ prefetch_vm_bw = 0;
+ MyError = true;
+ }
+ if (MetaRowByte + PixelPTEBytesPerRow == 0) {
+ prefetch_row_bw = 0;
+ } else if (*DestinationLinesToRequestRowInVBlank > 0) {
+ prefetch_row_bw = (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / (*DestinationLinesToRequestRowInVBlank * LineTime);
+ } else {
+ prefetch_row_bw = 0;
+ MyError = true;
+ }
+
+ *prefetch_vmrow_bw = dml_max(prefetch_vm_bw, prefetch_row_bw);
+ }
+
+ if (MyError) {
+ *PrefetchBandwidth = 0;
+ TimeForFetchingMetaPTE = 0;
+ TimeForFetchingRowInVBlank = 0;
+ *DestinationLinesToRequestVMInVBlank = 0;
+ *DestinationLinesToRequestRowInVBlank = 0;
+ *DestinationLinesForPrefetch = 0;
+ LinesToRequestPrefetchPixelData = 0;
+ *VRatioPrefetchY = 0;
+ *VRatioPrefetchC = 0;
+ *RequiredPrefetchPixDataBWLuma = 0;
+ *RequiredPrefetchPixDataBWChroma = 0;
+ }
+
+ return MyError;
+}
+
+static double RoundToDFSGranularityUp(double Clock, double VCOSpeed)
+{
+ return VCOSpeed * 4 / dml_floor(VCOSpeed * 4 / Clock, 1);
+}
+
+static double RoundToDFSGranularityDown(double Clock, double VCOSpeed)
+{
+ return VCOSpeed * 4 / dml_ceil(VCOSpeed * 4 / Clock, 1);
+}
+
+static double CalculateDCCConfiguration(
+ bool DCCEnabled,
+ bool DCCProgrammingAssumesScanDirectionUnknown,
+ unsigned int ViewportWidth,
+ unsigned int ViewportHeight,
+ double DETBufferSize,
+ unsigned int RequestHeight256Byte,
+ unsigned int SwathHeight,
+ enum dm_swizzle_mode TilingFormat,
+ unsigned int BytePerPixel,
+ enum scan_direction_class ScanOrientation,
+ unsigned int *MaxUncompressedBlock,
+ unsigned int *MaxCompressedBlock,
+ unsigned int *Independent64ByteBlock)
+{
+ double MaximumDCCCompressionSurface = 0.0;
+ enum {
+ REQ_256Bytes,
+ REQ_128BytesNonContiguous,
+ REQ_128BytesContiguous,
+ REQ_NA
+ } Request = REQ_NA;
+
+ if (DCCEnabled == true) {
+ if (DCCProgrammingAssumesScanDirectionUnknown == true) {
+ if (DETBufferSize >= RequestHeight256Byte * ViewportWidth * BytePerPixel
+ && DETBufferSize
+ >= 256 / RequestHeight256Byte
+ * ViewportHeight) {
+ Request = REQ_256Bytes;
+ } else if ((DETBufferSize
+ < RequestHeight256Byte * ViewportWidth * BytePerPixel
+ && (BytePerPixel == 2 || BytePerPixel == 4))
+ || (DETBufferSize
+ < 256 / RequestHeight256Byte
+ * ViewportHeight
+ && BytePerPixel == 8
+ && (TilingFormat == dm_sw_4kb_d
+ || TilingFormat
+ == dm_sw_4kb_d_x
+ || TilingFormat
+ == dm_sw_var_d
+ || TilingFormat
+ == dm_sw_var_d_x
+ || TilingFormat
+ == dm_sw_64kb_d
+ || TilingFormat
+ == dm_sw_64kb_d_x
+ || TilingFormat
+ == dm_sw_64kb_d_t
+ || TilingFormat
+ == dm_sw_64kb_r_x))) {
+ Request = REQ_128BytesNonContiguous;
+ } else {
+ Request = REQ_128BytesContiguous;
+ }
+ } else {
+ if (BytePerPixel == 1) {
+ if (ScanOrientation == dm_vert || SwathHeight == 16) {
+ Request = REQ_256Bytes;
+ } else {
+ Request = REQ_128BytesContiguous;
+ }
+ } else if (BytePerPixel == 2) {
+ if ((ScanOrientation == dm_vert && SwathHeight == 16) || (ScanOrientation != dm_vert && SwathHeight == 8)) {
+ Request = REQ_256Bytes;
+ } else if (ScanOrientation == dm_vert) {
+ Request = REQ_128BytesContiguous;
+ } else {
+ Request = REQ_128BytesNonContiguous;
+ }
+ } else if (BytePerPixel == 4) {
+ if (SwathHeight == 8) {
+ Request = REQ_256Bytes;
+ } else if (ScanOrientation == dm_vert) {
+ Request = REQ_128BytesContiguous;
+ } else {
+ Request = REQ_128BytesNonContiguous;
+ }
+ } else if (BytePerPixel == 8) {
+ if (TilingFormat == dm_sw_4kb_d || TilingFormat == dm_sw_4kb_d_x
+ || TilingFormat == dm_sw_var_d
+ || TilingFormat == dm_sw_var_d_x
+ || TilingFormat == dm_sw_64kb_d
+ || TilingFormat == dm_sw_64kb_d_x
+ || TilingFormat == dm_sw_64kb_d_t
+ || TilingFormat == dm_sw_64kb_r_x) {
+ if ((ScanOrientation == dm_vert && SwathHeight == 8)
+ || (ScanOrientation != dm_vert
+ && SwathHeight == 4)) {
+ Request = REQ_256Bytes;
+ } else if (ScanOrientation != dm_vert) {
+ Request = REQ_128BytesContiguous;
+ } else {
+ Request = REQ_128BytesNonContiguous;
+ }
+ } else {
+ if (ScanOrientation != dm_vert || SwathHeight == 8) {
+ Request = REQ_256Bytes;
+ } else {
+ Request = REQ_128BytesContiguous;
+ }
+ }
+ }
+ }
+ } else {
+ Request = REQ_NA;
+ }
+
+ if (Request == REQ_256Bytes) {
+ *MaxUncompressedBlock = 256;
+ *MaxCompressedBlock = 256;
+ *Independent64ByteBlock = false;
+ MaximumDCCCompressionSurface = 4.0;
+ } else if (Request == REQ_128BytesContiguous) {
+ *MaxUncompressedBlock = 128;
+ *MaxCompressedBlock = 128;
+ *Independent64ByteBlock = false;
+ MaximumDCCCompressionSurface = 2.0;
+ } else if (Request == REQ_128BytesNonContiguous) {
+ *MaxUncompressedBlock = 256;
+ *MaxCompressedBlock = 64;
+ *Independent64ByteBlock = true;
+ MaximumDCCCompressionSurface = 4.0;
+ } else {
+ *MaxUncompressedBlock = 0;
+ *MaxCompressedBlock = 0;
+ *Independent64ByteBlock = 0;
+ MaximumDCCCompressionSurface = 0.0;
+ }
+
+ return MaximumDCCCompressionSurface;
+}
+
+static double CalculatePrefetchSourceLines(
+ struct display_mode_lib *mode_lib,
+ double VRatio,
+ double vtaps,
+ bool Interlace,
+ bool ProgressiveToInterlaceUnitInOPP,
+ unsigned int SwathHeight,
+ unsigned int ViewportYStart,
+ double *VInitPreFill,
+ unsigned int *MaxNumSwath)
+{
+ unsigned int MaxPartialSwath;
+
+ if (ProgressiveToInterlaceUnitInOPP)
+ *VInitPreFill = dml_floor((VRatio + vtaps + 1) / 2.0, 1);
+ else
+ *VInitPreFill = dml_floor((VRatio + vtaps + 1 + Interlace * 0.5 * VRatio) / 2.0, 1);
+
+ if (!mode_lib->vba.IgnoreViewportPositioning) {
+
+ *MaxNumSwath = dml_ceil((*VInitPreFill - 1.0) / SwathHeight, 1) + 1.0;
+
+ if (*VInitPreFill > 1.0)
+ MaxPartialSwath = (unsigned int) (*VInitPreFill - 2) % SwathHeight;
+ else
+ MaxPartialSwath = (unsigned int) (*VInitPreFill + SwathHeight - 2)
+ % SwathHeight;
+ MaxPartialSwath = dml_max(1U, MaxPartialSwath);
+
+ } else {
+
+ if (ViewportYStart != 0)
+ dml_print(
+ "WARNING DML: using viewport y position of 0 even though actual viewport y position is non-zero in prefetch source lines calculation\n");
+
+ *MaxNumSwath = dml_ceil(*VInitPreFill / SwathHeight, 1);
+
+ if (*VInitPreFill > 1.0)
+ MaxPartialSwath = (unsigned int) (*VInitPreFill - 1) % SwathHeight;
+ else
+ MaxPartialSwath = (unsigned int) (*VInitPreFill + SwathHeight - 1)
+ % SwathHeight;
+ }
+
+ return *MaxNumSwath * SwathHeight + MaxPartialSwath;
+}
+
+static unsigned int CalculateVMAndRowBytes(
+ struct display_mode_lib *mode_lib,
+ bool DCCEnable,
+ unsigned int BlockHeight256Bytes,
+ unsigned int BlockWidth256Bytes,
+ enum source_format_class SourcePixelFormat,
+ unsigned int SurfaceTiling,
+ unsigned int BytePerPixel,
+ enum scan_direction_class ScanDirection,
+ unsigned int ViewportWidth,
+ unsigned int ViewportHeight,
+ unsigned int SwathWidth,
+ bool GPUVMEnable,
+ bool HostVMEnable,
+ unsigned int HostVMMaxPageTableLevels,
+ unsigned int HostVMCachedPageTableLevels,
+ unsigned int VMMPageSize,
+ unsigned int PTEBufferSizeInRequests,
+ unsigned int Pitch,
+ unsigned int DCCMetaPitch,
+ unsigned int *MacroTileWidth,
+ unsigned int *MetaRowByte,
+ unsigned int *PixelPTEBytesPerRow,
+ bool *PTEBufferSizeNotExceeded,
+ unsigned int *dpte_row_width_ub,
+ unsigned int *dpte_row_height,
+ unsigned int *MetaRequestWidth,
+ unsigned int *MetaRequestHeight,
+ unsigned int *meta_row_width,
+ unsigned int *meta_row_height,
+ unsigned int *vm_group_bytes,
+ unsigned int *dpte_group_bytes,
+ unsigned int *PixelPTEReqWidth,
+ unsigned int *PixelPTEReqHeight,
+ unsigned int *PTERequestSize,
+ unsigned int *DPDE0BytesFrame,
+ unsigned int *MetaPTEBytesFrame)
+{
+ unsigned int MPDEBytesFrame;
+ unsigned int DCCMetaSurfaceBytes;
+ unsigned int MacroTileSizeBytes;
+ unsigned int MacroTileHeight;
+ unsigned int ExtraDPDEBytesFrame;
+ unsigned int PDEAndMetaPTEBytesFrame;
+ unsigned int PixelPTEReqHeightPTEs;
+
+ if (DCCEnable == true) {
+ *MetaRequestHeight = 8 * BlockHeight256Bytes;
+ *MetaRequestWidth = 8 * BlockWidth256Bytes;
+ if (ScanDirection == dm_horz) {
+ *meta_row_height = *MetaRequestHeight;
+ *meta_row_width = dml_ceil((double) SwathWidth - 1, *MetaRequestWidth)
+ + *MetaRequestWidth;
+ *MetaRowByte = *meta_row_width * *MetaRequestHeight * BytePerPixel / 256.0;
+ } else {
+ *meta_row_height = *MetaRequestWidth;
+ *meta_row_width = dml_ceil((double) SwathWidth - 1, *MetaRequestHeight)
+ + *MetaRequestHeight;
+ *MetaRowByte = *meta_row_width * *MetaRequestWidth * BytePerPixel / 256.0;
+ }
+ if (ScanDirection == dm_horz) {
+ DCCMetaSurfaceBytes = DCCMetaPitch
+ * (dml_ceil(ViewportHeight - 1, 64 * BlockHeight256Bytes)
+ + 64 * BlockHeight256Bytes) * BytePerPixel
+ / 256;
+ } else {
+ DCCMetaSurfaceBytes = DCCMetaPitch
+ * (dml_ceil(
+ (double) ViewportHeight - 1,
+ 64 * BlockHeight256Bytes)
+ + 64 * BlockHeight256Bytes) * BytePerPixel
+ / 256;
+ }
+ if (GPUVMEnable == true) {
+ *MetaPTEBytesFrame = (dml_ceil(
+ (double) (DCCMetaSurfaceBytes - VMMPageSize)
+ / (8 * VMMPageSize),
+ 1) + 1) * 64;
+ MPDEBytesFrame = 128 * ((mode_lib->vba.GPUVMMaxPageTableLevels + 1) * (mode_lib->vba.HostVMMaxPageTableLevels + 1) - 2);
+ } else {
+ *MetaPTEBytesFrame = 0;
+ MPDEBytesFrame = 0;
+ }
+ } else {
+ *MetaPTEBytesFrame = 0;
+ MPDEBytesFrame = 0;
+ *MetaRowByte = 0;
+ }
+
+ if (SurfaceTiling == dm_sw_linear || SurfaceTiling == dm_sw_gfx7_2d_thin_gl || SurfaceTiling == dm_sw_gfx7_2d_thin_l_vp) {
+ MacroTileSizeBytes = 256;
+ MacroTileHeight = BlockHeight256Bytes;
+ } else if (SurfaceTiling == dm_sw_4kb_s || SurfaceTiling == dm_sw_4kb_s_x
+ || SurfaceTiling == dm_sw_4kb_d || SurfaceTiling == dm_sw_4kb_d_x) {
+ MacroTileSizeBytes = 4096;
+ MacroTileHeight = 4 * BlockHeight256Bytes;
+ } else if (SurfaceTiling == dm_sw_64kb_s || SurfaceTiling == dm_sw_64kb_s_t
+ || SurfaceTiling == dm_sw_64kb_s_x || SurfaceTiling == dm_sw_64kb_d
+ || SurfaceTiling == dm_sw_64kb_d_t || SurfaceTiling == dm_sw_64kb_d_x
+ || SurfaceTiling == dm_sw_64kb_r_x) {
+ MacroTileSizeBytes = 65536;
+ MacroTileHeight = 16 * BlockHeight256Bytes;
+ } else {
+ MacroTileSizeBytes = 262144;
+ MacroTileHeight = 32 * BlockHeight256Bytes;
+ }
+ *MacroTileWidth = MacroTileSizeBytes / BytePerPixel / MacroTileHeight;
+
+ if (GPUVMEnable == true && (mode_lib->vba.GPUVMMaxPageTableLevels + 1) * (mode_lib->vba.HostVMMaxPageTableLevels + 1) > 2) {
+ if (ScanDirection == dm_horz) {
+ *DPDE0BytesFrame = 64 * (dml_ceil(((Pitch * (dml_ceil(ViewportHeight - 1, MacroTileHeight) + MacroTileHeight) * BytePerPixel) - MacroTileSizeBytes) / (8 * 2097152), 1) + 1);
+ } else {
+ *DPDE0BytesFrame = 64 * (dml_ceil(((Pitch * (dml_ceil((double) SwathWidth - 1, MacroTileHeight) + MacroTileHeight) * BytePerPixel) - MacroTileSizeBytes) / (8 * 2097152), 1) + 1);
+ }
+ ExtraDPDEBytesFrame = 128 * ((mode_lib->vba.GPUVMMaxPageTableLevels + 1) * (mode_lib->vba.HostVMMaxPageTableLevels + 1) - 3);
+ } else {
+ *DPDE0BytesFrame = 0;
+ ExtraDPDEBytesFrame = 0;
+ }
+
+ PDEAndMetaPTEBytesFrame = *MetaPTEBytesFrame + MPDEBytesFrame + *DPDE0BytesFrame
+ + ExtraDPDEBytesFrame;
+
+ if (HostVMEnable == true) {
+ PDEAndMetaPTEBytesFrame = PDEAndMetaPTEBytesFrame * (1 + 8 * (HostVMMaxPageTableLevels - HostVMCachedPageTableLevels));
+ }
+
+ if (GPUVMEnable == true) {
+ double FractionOfPTEReturnDrop;
+
+ if (SurfaceTiling == dm_sw_linear) {
+ PixelPTEReqHeightPTEs = 1;
+ *PixelPTEReqHeight = 1;
+ *PixelPTEReqWidth = 8.0 * VMMPageSize / BytePerPixel;
+ *PTERequestSize = 64;
+ FractionOfPTEReturnDrop = 0;
+ } else if (MacroTileSizeBytes == 4096) {
+ PixelPTEReqHeightPTEs = 1;
+ *PixelPTEReqHeight = MacroTileHeight;
+ *PixelPTEReqWidth = 8 * *MacroTileWidth;
+ *PTERequestSize = 64;
+ if (ScanDirection == dm_horz)
+ FractionOfPTEReturnDrop = 0;
+ else
+ FractionOfPTEReturnDrop = 7 / 8;
+ } else if (VMMPageSize == 4096 && MacroTileSizeBytes > 4096) {
+ PixelPTEReqHeightPTEs = 16;
+ *PixelPTEReqHeight = 16 * BlockHeight256Bytes;
+ *PixelPTEReqWidth = 16 * BlockWidth256Bytes;
+ *PTERequestSize = 128;
+ FractionOfPTEReturnDrop = 0;
+ } else {
+ PixelPTEReqHeightPTEs = 1;
+ *PixelPTEReqHeight = MacroTileHeight;
+ *PixelPTEReqWidth = 8 * *MacroTileWidth;
+ *PTERequestSize = 64;
+ FractionOfPTEReturnDrop = 0;
+ }
+
+ if (SurfaceTiling == dm_sw_linear) {
+ *dpte_row_height = dml_min(128,
+ 1 << (unsigned int) dml_floor(
+ dml_log2(
+ (double) PTEBufferSizeInRequests * *PixelPTEReqWidth / Pitch),
+ 1));
+ *dpte_row_width_ub = (dml_ceil((double) (Pitch * *dpte_row_height - 1) / *PixelPTEReqWidth, 1) + 1) * *PixelPTEReqWidth;
+ *PixelPTEBytesPerRow = *dpte_row_width_ub / *PixelPTEReqWidth * *PTERequestSize;
+ } else if (ScanDirection == dm_horz) {
+ *dpte_row_height = *PixelPTEReqHeight;
+ *dpte_row_width_ub = (dml_ceil((double) (SwathWidth - 1) / *PixelPTEReqWidth, 1) + 1) * *PixelPTEReqWidth;
+ *PixelPTEBytesPerRow = *dpte_row_width_ub / *PixelPTEReqWidth * *PTERequestSize;
+ } else {
+ *dpte_row_height = dml_min(*PixelPTEReqWidth, *MacroTileWidth);
+ *dpte_row_width_ub = (dml_ceil((double) (SwathWidth - 1) / *PixelPTEReqHeight, 1) + 1) * *PixelPTEReqHeight;
+ *PixelPTEBytesPerRow = *dpte_row_width_ub / *PixelPTEReqHeight * *PTERequestSize;
+ }
+ if (*PixelPTEBytesPerRow * (1 - FractionOfPTEReturnDrop)
+ <= 64 * PTEBufferSizeInRequests) {
+ *PTEBufferSizeNotExceeded = true;
+ } else {
+ *PTEBufferSizeNotExceeded = false;
+ }
+ } else {
+ *PixelPTEBytesPerRow = 0;
+ *PTEBufferSizeNotExceeded = true;
+ }
+ dml_print("DML: vm_bytes = meta_pte_bytes_per_frame (per_pipe) = MetaPTEBytesFrame = : %d\n", *MetaPTEBytesFrame);
+
+ if (HostVMEnable == true) {
+ *PixelPTEBytesPerRow = *PixelPTEBytesPerRow * (1 + 8 * (HostVMMaxPageTableLevels - HostVMCachedPageTableLevels));
+ }
+
+ if (HostVMEnable == true) {
+ *vm_group_bytes = 512;
+ *dpte_group_bytes = 512;
+ } else if (GPUVMEnable == true) {
+ *vm_group_bytes = 2048;
+ if (SurfaceTiling != dm_sw_linear && PixelPTEReqHeightPTEs == 1 && ScanDirection != dm_horz) {
+ *dpte_group_bytes = 512;
+ } else {
+ *dpte_group_bytes = 2048;
+ }
+ } else {
+ *vm_group_bytes = 0;
+ *dpte_group_bytes = 0;
+ }
+
+ return PDEAndMetaPTEBytesFrame;
+}
+
+static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation(
+ struct display_mode_lib *mode_lib)
+{
+ struct vba_vars_st *locals = &mode_lib->vba;
+ unsigned int j, k;
+
+ mode_lib->vba.WritebackDISPCLK = 0.0;
+ mode_lib->vba.DISPCLKWithRamping = 0;
+ mode_lib->vba.DISPCLKWithoutRamping = 0;
+ mode_lib->vba.GlobalDPPCLK = 0.0;
+
+ // DISPCLK and DPPCLK Calculation
+ //
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.WritebackEnable[k]) {
+ mode_lib->vba.WritebackDISPCLK =
+ dml_max(
+ mode_lib->vba.WritebackDISPCLK,
+ CalculateWriteBackDISPCLK(
+ mode_lib->vba.WritebackPixelFormat[k],
+ mode_lib->vba.PixelClock[k],
+ mode_lib->vba.WritebackHRatio[k],
+ mode_lib->vba.WritebackVRatio[k],
+ mode_lib->vba.WritebackLumaHTaps[k],
+ mode_lib->vba.WritebackLumaVTaps[k],
+ mode_lib->vba.WritebackChromaHTaps[k],
+ mode_lib->vba.WritebackChromaVTaps[k],
+ mode_lib->vba.WritebackDestinationWidth[k],
+ mode_lib->vba.HTotal[k],
+ mode_lib->vba.WritebackChromaLineBufferWidth));
+ }
+ }
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.HRatio[k] > 1) {
+ locals->PSCL_THROUGHPUT_LUMA[k] = dml_min(
+ mode_lib->vba.MaxDCHUBToPSCLThroughput,
+ mode_lib->vba.MaxPSCLToLBThroughput
+ * mode_lib->vba.HRatio[k]
+ / dml_ceil(
+ mode_lib->vba.htaps[k]
+ / 6.0,
+ 1));
+ } else {
+ locals->PSCL_THROUGHPUT_LUMA[k] = dml_min(
+ mode_lib->vba.MaxDCHUBToPSCLThroughput,
+ mode_lib->vba.MaxPSCLToLBThroughput);
+ }
+
+ mode_lib->vba.DPPCLKUsingSingleDPPLuma =
+ mode_lib->vba.PixelClock[k]
+ * dml_max(
+ mode_lib->vba.vtaps[k] / 6.0
+ * dml_min(
+ 1.0,
+ mode_lib->vba.HRatio[k]),
+ dml_max(
+ mode_lib->vba.HRatio[k]
+ * mode_lib->vba.VRatio[k]
+ / locals->PSCL_THROUGHPUT_LUMA[k],
+ 1.0));
+
+ if ((mode_lib->vba.htaps[k] > 6 || mode_lib->vba.vtaps[k] > 6)
+ && mode_lib->vba.DPPCLKUsingSingleDPPLuma
+ < 2 * mode_lib->vba.PixelClock[k]) {
+ mode_lib->vba.DPPCLKUsingSingleDPPLuma = 2 * mode_lib->vba.PixelClock[k];
+ }
+
+ if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8
+ && mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) {
+ locals->PSCL_THROUGHPUT_CHROMA[k] = 0.0;
+ locals->DPPCLKUsingSingleDPP[k] =
+ mode_lib->vba.DPPCLKUsingSingleDPPLuma;
+ } else {
+ if (mode_lib->vba.HRatio[k] > 1) {
+ locals->PSCL_THROUGHPUT_CHROMA[k] =
+ dml_min(
+ mode_lib->vba.MaxDCHUBToPSCLThroughput,
+ mode_lib->vba.MaxPSCLToLBThroughput
+ * mode_lib->vba.HRatio[k]
+ / 2
+ / dml_ceil(
+ mode_lib->vba.HTAPsChroma[k]
+ / 6.0,
+ 1.0));
+ } else {
+ locals->PSCL_THROUGHPUT_CHROMA[k] = dml_min(
+ mode_lib->vba.MaxDCHUBToPSCLThroughput,
+ mode_lib->vba.MaxPSCLToLBThroughput);
+ }
+ mode_lib->vba.DPPCLKUsingSingleDPPChroma =
+ mode_lib->vba.PixelClock[k]
+ * dml_max(
+ mode_lib->vba.VTAPsChroma[k]
+ / 6.0
+ * dml_min(
+ 1.0,
+ mode_lib->vba.HRatio[k]
+ / 2),
+ dml_max(
+ mode_lib->vba.HRatio[k]
+ * mode_lib->vba.VRatio[k]
+ / 4
+ / locals->PSCL_THROUGHPUT_CHROMA[k],
+ 1.0));
+
+ if ((mode_lib->vba.HTAPsChroma[k] > 6 || mode_lib->vba.VTAPsChroma[k] > 6)
+ && mode_lib->vba.DPPCLKUsingSingleDPPChroma
+ < 2 * mode_lib->vba.PixelClock[k]) {
+ mode_lib->vba.DPPCLKUsingSingleDPPChroma = 2
+ * mode_lib->vba.PixelClock[k];
+ }
+
+ locals->DPPCLKUsingSingleDPP[k] = dml_max(
+ mode_lib->vba.DPPCLKUsingSingleDPPLuma,
+ mode_lib->vba.DPPCLKUsingSingleDPPChroma);
+ }
+ }
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.BlendingAndTiming[k] != k)
+ continue;
+ if (mode_lib->vba.ODMCombineEnabled[k]) {
+ mode_lib->vba.DISPCLKWithRamping =
+ dml_max(
+ mode_lib->vba.DISPCLKWithRamping,
+ mode_lib->vba.PixelClock[k] / 2
+ * (1
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100)
+ * (1
+ + mode_lib->vba.DISPCLKRampingMargin
+ / 100));
+ mode_lib->vba.DISPCLKWithoutRamping =
+ dml_max(
+ mode_lib->vba.DISPCLKWithoutRamping,
+ mode_lib->vba.PixelClock[k] / 2
+ * (1
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100));
+ } else if (!mode_lib->vba.ODMCombineEnabled[k]) {
+ mode_lib->vba.DISPCLKWithRamping =
+ dml_max(
+ mode_lib->vba.DISPCLKWithRamping,
+ mode_lib->vba.PixelClock[k]
+ * (1
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100)
+ * (1
+ + mode_lib->vba.DISPCLKRampingMargin
+ / 100));
+ mode_lib->vba.DISPCLKWithoutRamping =
+ dml_max(
+ mode_lib->vba.DISPCLKWithoutRamping,
+ mode_lib->vba.PixelClock[k]
+ * (1
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100));
+ }
+ }
+
+ mode_lib->vba.DISPCLKWithRamping = dml_max(
+ mode_lib->vba.DISPCLKWithRamping,
+ mode_lib->vba.WritebackDISPCLK);
+ mode_lib->vba.DISPCLKWithoutRamping = dml_max(
+ mode_lib->vba.DISPCLKWithoutRamping,
+ mode_lib->vba.WritebackDISPCLK);
+
+ ASSERT(mode_lib->vba.DISPCLKDPPCLKVCOSpeed != 0);
+ mode_lib->vba.DISPCLKWithRampingRoundedToDFSGranularity = RoundToDFSGranularityUp(
+ mode_lib->vba.DISPCLKWithRamping,
+ mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
+ mode_lib->vba.DISPCLKWithoutRampingRoundedToDFSGranularity = RoundToDFSGranularityUp(
+ mode_lib->vba.DISPCLKWithoutRamping,
+ mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
+ mode_lib->vba.MaxDispclkRoundedToDFSGranularity = RoundToDFSGranularityDown(
+ mode_lib->vba.soc.clock_limits[mode_lib->vba.soc.num_states - 1].dispclk_mhz,
+ mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
+ if (mode_lib->vba.DISPCLKWithoutRampingRoundedToDFSGranularity
+ > mode_lib->vba.MaxDispclkRoundedToDFSGranularity) {
+ mode_lib->vba.DISPCLK_calculated =
+ mode_lib->vba.DISPCLKWithoutRampingRoundedToDFSGranularity;
+ } else if (mode_lib->vba.DISPCLKWithRampingRoundedToDFSGranularity
+ > mode_lib->vba.MaxDispclkRoundedToDFSGranularity) {
+ mode_lib->vba.DISPCLK_calculated = mode_lib->vba.MaxDispclkRoundedToDFSGranularity;
+ } else {
+ mode_lib->vba.DISPCLK_calculated =
+ mode_lib->vba.DISPCLKWithRampingRoundedToDFSGranularity;
+ }
+ DTRACE(" dispclk_mhz (calculated) = %f", mode_lib->vba.DISPCLK_calculated);
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ mode_lib->vba.DPPCLK_calculated[k] = locals->DPPCLKUsingSingleDPP[k]
+ / mode_lib->vba.DPPPerPlane[k]
+ * (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100);
+ mode_lib->vba.GlobalDPPCLK = dml_max(
+ mode_lib->vba.GlobalDPPCLK,
+ mode_lib->vba.DPPCLK_calculated[k]);
+ }
+ mode_lib->vba.GlobalDPPCLK = RoundToDFSGranularityUp(
+ mode_lib->vba.GlobalDPPCLK,
+ mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ mode_lib->vba.DPPCLK_calculated[k] = mode_lib->vba.GlobalDPPCLK / 255
+ * dml_ceil(
+ mode_lib->vba.DPPCLK_calculated[k] * 255
+ / mode_lib->vba.GlobalDPPCLK,
+ 1);
+ DTRACE(" dppclk_mhz[%i] (calculated) = %f", k, mode_lib->vba.DPPCLK_calculated[k]);
+ }
+
+ // Urgent and B P-State/DRAM Clock Change Watermark
+ DTRACE(" dcfclk_mhz = %f", mode_lib->vba.DCFCLK);
+ DTRACE(" return_bw_to_dcn = %f", mode_lib->vba.ReturnBandwidthToDCN);
+ DTRACE(" return_bus_bw = %f", mode_lib->vba.ReturnBW);
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ bool MainPlaneDoesODMCombine = false;
+
+ if (mode_lib->vba.SourceScan[k] == dm_horz)
+ locals->SwathWidthSingleDPPY[k] = mode_lib->vba.ViewportWidth[k];
+ else
+ locals->SwathWidthSingleDPPY[k] = mode_lib->vba.ViewportHeight[k];
+
+ if (mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1)
+ MainPlaneDoesODMCombine = true;
+ for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j)
+ if (mode_lib->vba.BlendingAndTiming[k] == j
+ && mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1)
+ MainPlaneDoesODMCombine = true;
+
+ if (MainPlaneDoesODMCombine == true)
+ locals->SwathWidthY[k] = dml_min(
+ (double) locals->SwathWidthSingleDPPY[k],
+ dml_round(
+ mode_lib->vba.HActive[k] / 2.0
+ * mode_lib->vba.HRatio[k]));
+ else
+ locals->SwathWidthY[k] = locals->SwathWidthSingleDPPY[k]
+ / mode_lib->vba.DPPPerPlane[k];
+ }
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.SourcePixelFormat[k] == dm_444_64) {
+ locals->BytePerPixelDETY[k] = 8;
+ locals->BytePerPixelDETC[k] = 0;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_32) {
+ locals->BytePerPixelDETY[k] = 4;
+ locals->BytePerPixelDETC[k] = 0;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_16 || mode_lib->vba.SourcePixelFormat[k] == dm_mono_16) {
+ locals->BytePerPixelDETY[k] = 2;
+ locals->BytePerPixelDETC[k] = 0;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_8 || mode_lib->vba.SourcePixelFormat[k] == dm_mono_8) {
+ locals->BytePerPixelDETY[k] = 1;
+ locals->BytePerPixelDETC[k] = 0;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8) {
+ locals->BytePerPixelDETY[k] = 1;
+ locals->BytePerPixelDETC[k] = 2;
+ } else { // dm_420_10
+ locals->BytePerPixelDETY[k] = 4.0 / 3.0;
+ locals->BytePerPixelDETC[k] = 8.0 / 3.0;
+ }
+ }
+
+ mode_lib->vba.TotalDataReadBandwidth = 0.0;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ locals->ReadBandwidthPlaneLuma[k] = locals->SwathWidthSingleDPPY[k]
+ * dml_ceil(locals->BytePerPixelDETY[k], 1)
+ / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k])
+ * mode_lib->vba.VRatio[k];
+ locals->ReadBandwidthPlaneChroma[k] = locals->SwathWidthSingleDPPY[k]
+ / 2 * dml_ceil(locals->BytePerPixelDETC[k], 2)
+ / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k])
+ * mode_lib->vba.VRatio[k] / 2;
+ DTRACE(
+ " read_bw[%i] = %fBps",
+ k,
+ locals->ReadBandwidthPlaneLuma[k]
+ + locals->ReadBandwidthPlaneChroma[k]);
+ mode_lib->vba.TotalDataReadBandwidth += locals->ReadBandwidthPlaneLuma[k]
+ + locals->ReadBandwidthPlaneChroma[k];
+ }
+
+ // DCFCLK Deep Sleep
+ CalculateDCFCLKDeepSleep(
+ mode_lib,
+ mode_lib->vba.NumberOfActivePlanes,
+ locals->BytePerPixelDETY,
+ locals->BytePerPixelDETC,
+ mode_lib->vba.VRatio,
+ locals->SwathWidthY,
+ mode_lib->vba.DPPPerPlane,
+ mode_lib->vba.HRatio,
+ mode_lib->vba.PixelClock,
+ locals->PSCL_THROUGHPUT_LUMA,
+ locals->PSCL_THROUGHPUT_CHROMA,
+ locals->DPPCLK,
+ &mode_lib->vba.DCFCLKDeepSleep);
+
+ // DSCCLK
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if ((mode_lib->vba.BlendingAndTiming[k] != k) || !mode_lib->vba.DSCEnabled[k]) {
+ locals->DSCCLK_calculated[k] = 0.0;
+ } else {
+ if (mode_lib->vba.OutputFormat[k] == dm_420
+ || mode_lib->vba.OutputFormat[k] == dm_n422)
+ mode_lib->vba.DSCFormatFactor = 2;
+ else
+ mode_lib->vba.DSCFormatFactor = 1;
+ if (mode_lib->vba.ODMCombineEnabled[k])
+ locals->DSCCLK_calculated[k] =
+ mode_lib->vba.PixelClockBackEnd[k] / 6
+ / mode_lib->vba.DSCFormatFactor
+ / (1
+ - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100);
+ else
+ locals->DSCCLK_calculated[k] =
+ mode_lib->vba.PixelClockBackEnd[k] / 3
+ / mode_lib->vba.DSCFormatFactor
+ / (1
+ - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100);
+ }
+ }
+
+ // DSC Delay
+ // TODO
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ double bpp = mode_lib->vba.OutputBpp[k];
+ unsigned int slices = mode_lib->vba.NumberOfDSCSlices[k];
+
+ if (mode_lib->vba.DSCEnabled[k] && bpp != 0) {
+ if (!mode_lib->vba.ODMCombineEnabled[k]) {
+ locals->DSCDelay[k] =
+ dscceComputeDelay(
+ mode_lib->vba.DSCInputBitPerComponent[k],
+ bpp,
+ dml_ceil(
+ (double) mode_lib->vba.HActive[k]
+ / mode_lib->vba.NumberOfDSCSlices[k],
+ 1),
+ slices,
+ mode_lib->vba.OutputFormat[k])
+ + dscComputeDelay(
+ mode_lib->vba.OutputFormat[k]);
+ } else {
+ locals->DSCDelay[k] =
+ 2
+ * (dscceComputeDelay(
+ mode_lib->vba.DSCInputBitPerComponent[k],
+ bpp,
+ dml_ceil(
+ (double) mode_lib->vba.HActive[k]
+ / mode_lib->vba.NumberOfDSCSlices[k],
+ 1),
+ slices / 2.0,
+ mode_lib->vba.OutputFormat[k])
+ + dscComputeDelay(
+ mode_lib->vba.OutputFormat[k]));
+ }
+ locals->DSCDelay[k] = locals->DSCDelay[k]
+ * mode_lib->vba.PixelClock[k]
+ / mode_lib->vba.PixelClockBackEnd[k];
+ } else {
+ locals->DSCDelay[k] = 0;
+ }
+ }
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k)
+ for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) // NumberOfPlanes
+ if (j != k && mode_lib->vba.BlendingAndTiming[k] == j
+ && mode_lib->vba.DSCEnabled[j])
+ locals->DSCDelay[k] = locals->DSCDelay[j];
+
+ // Prefetch
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ unsigned int PDEAndMetaPTEBytesFrameY;
+ unsigned int PixelPTEBytesPerRowY;
+ unsigned int MetaRowByteY;
+ unsigned int MetaRowByteC;
+ unsigned int PDEAndMetaPTEBytesFrameC;
+ unsigned int PixelPTEBytesPerRowC;
+ bool PTEBufferSizeNotExceededY;
+ bool PTEBufferSizeNotExceededC;
+
+ Calculate256BBlockSizes(
+ mode_lib->vba.SourcePixelFormat[k],
+ mode_lib->vba.SurfaceTiling[k],
+ dml_ceil(locals->BytePerPixelDETY[k], 1),
+ dml_ceil(locals->BytePerPixelDETC[k], 2),
+ &locals->BlockHeight256BytesY[k],
+ &locals->BlockHeight256BytesC[k],
+ &locals->BlockWidth256BytesY[k],
+ &locals->BlockWidth256BytesC[k]);
+
+ locals->PrefetchSourceLinesY[k] = CalculatePrefetchSourceLines(
+ mode_lib,
+ mode_lib->vba.VRatio[k],
+ mode_lib->vba.vtaps[k],
+ mode_lib->vba.Interlace[k],
+ mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
+ mode_lib->vba.SwathHeightY[k],
+ mode_lib->vba.ViewportYStartY[k],
+ &locals->VInitPreFillY[k],
+ &locals->MaxNumSwathY[k]);
+
+ if ((mode_lib->vba.SourcePixelFormat[k] != dm_444_64
+ && mode_lib->vba.SourcePixelFormat[k] != dm_444_32
+ && mode_lib->vba.SourcePixelFormat[k] != dm_444_16
+ && mode_lib->vba.SourcePixelFormat[k] != dm_444_8)) {
+ PDEAndMetaPTEBytesFrameC =
+ CalculateVMAndRowBytes(
+ mode_lib,
+ mode_lib->vba.DCCEnable[k],
+ locals->BlockHeight256BytesC[k],
+ locals->BlockWidth256BytesC[k],
+ mode_lib->vba.SourcePixelFormat[k],
+ mode_lib->vba.SurfaceTiling[k],
+ dml_ceil(
+ locals->BytePerPixelDETC[k],
+ 2),
+ mode_lib->vba.SourceScan[k],
+ mode_lib->vba.ViewportWidth[k] / 2,
+ mode_lib->vba.ViewportHeight[k] / 2,
+ locals->SwathWidthY[k] / 2,
+ mode_lib->vba.GPUVMEnable,
+ mode_lib->vba.HostVMEnable,
+ mode_lib->vba.HostVMMaxPageTableLevels,
+ mode_lib->vba.HostVMCachedPageTableLevels,
+ mode_lib->vba.VMMPageSize,
+ mode_lib->vba.PTEBufferSizeInRequestsChroma,
+ mode_lib->vba.PitchC[k],
+ mode_lib->vba.DCCMetaPitchC[k],
+ &locals->MacroTileWidthC[k],
+ &MetaRowByteC,
+ &PixelPTEBytesPerRowC,
+ &PTEBufferSizeNotExceededC,
+ &locals->dpte_row_width_chroma_ub[k],
+ &locals->dpte_row_height_chroma[k],
+ &locals->meta_req_width_chroma[k],
+ &locals->meta_req_height_chroma[k],
+ &locals->meta_row_width_chroma[k],
+ &locals->meta_row_height_chroma[k],
+ &locals->vm_group_bytes_chroma,
+ &locals->dpte_group_bytes_chroma,
+ &locals->PixelPTEReqWidthC[k],
+ &locals->PixelPTEReqHeightC[k],
+ &locals->PTERequestSizeC[k],
+ &locals->dpde0_bytes_per_frame_ub_c[k],
+ &locals->meta_pte_bytes_per_frame_ub_c[k]);
+
+ locals->PrefetchSourceLinesC[k] = CalculatePrefetchSourceLines(
+ mode_lib,
+ mode_lib->vba.VRatio[k] / 2,
+ mode_lib->vba.VTAPsChroma[k],
+ mode_lib->vba.Interlace[k],
+ mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
+ mode_lib->vba.SwathHeightC[k],
+ mode_lib->vba.ViewportYStartC[k],
+ &locals->VInitPreFillC[k],
+ &locals->MaxNumSwathC[k]);
+ } else {
+ PixelPTEBytesPerRowC = 0;
+ PDEAndMetaPTEBytesFrameC = 0;
+ MetaRowByteC = 0;
+ locals->MaxNumSwathC[k] = 0;
+ locals->PrefetchSourceLinesC[k] = 0;
+ locals->PTEBufferSizeInRequestsForLuma = mode_lib->vba.PTEBufferSizeInRequestsLuma + mode_lib->vba.PTEBufferSizeInRequestsChroma;
+ }
+
+ PDEAndMetaPTEBytesFrameY = CalculateVMAndRowBytes(
+ mode_lib,
+ mode_lib->vba.DCCEnable[k],
+ locals->BlockHeight256BytesY[k],
+ locals->BlockWidth256BytesY[k],
+ mode_lib->vba.SourcePixelFormat[k],
+ mode_lib->vba.SurfaceTiling[k],
+ dml_ceil(locals->BytePerPixelDETY[k], 1),
+ mode_lib->vba.SourceScan[k],
+ mode_lib->vba.ViewportWidth[k],
+ mode_lib->vba.ViewportHeight[k],
+ locals->SwathWidthY[k],
+ mode_lib->vba.GPUVMEnable,
+ mode_lib->vba.HostVMEnable,
+ mode_lib->vba.HostVMMaxPageTableLevels,
+ mode_lib->vba.HostVMCachedPageTableLevels,
+ mode_lib->vba.VMMPageSize,
+ locals->PTEBufferSizeInRequestsForLuma,
+ mode_lib->vba.PitchY[k],
+ mode_lib->vba.DCCMetaPitchY[k],
+ &locals->MacroTileWidthY[k],
+ &MetaRowByteY,
+ &PixelPTEBytesPerRowY,
+ &PTEBufferSizeNotExceededY,
+ &locals->dpte_row_width_luma_ub[k],
+ &locals->dpte_row_height[k],
+ &locals->meta_req_width[k],
+ &locals->meta_req_height[k],
+ &locals->meta_row_width[k],
+ &locals->meta_row_height[k],
+ &locals->vm_group_bytes[k],
+ &locals->dpte_group_bytes[k],
+ &locals->PixelPTEReqWidthY[k],
+ &locals->PixelPTEReqHeightY[k],
+ &locals->PTERequestSizeY[k],
+ &locals->dpde0_bytes_per_frame_ub_l[k],
+ &locals->meta_pte_bytes_per_frame_ub_l[k]);
+
+ locals->PixelPTEBytesPerRow[k] = PixelPTEBytesPerRowY + PixelPTEBytesPerRowC;
+ locals->PDEAndMetaPTEBytesFrame[k] = PDEAndMetaPTEBytesFrameY
+ + PDEAndMetaPTEBytesFrameC;
+ locals->MetaRowByte[k] = MetaRowByteY + MetaRowByteC;
+
+ CalculateActiveRowBandwidth(
+ mode_lib->vba.GPUVMEnable,
+ mode_lib->vba.SourcePixelFormat[k],
+ mode_lib->vba.VRatio[k],
+ mode_lib->vba.DCCEnable[k],
+ mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k],
+ MetaRowByteY,
+ MetaRowByteC,
+ locals->meta_row_height[k],
+ locals->meta_row_height_chroma[k],
+ PixelPTEBytesPerRowY,
+ PixelPTEBytesPerRowC,
+ locals->dpte_row_height[k],
+ locals->dpte_row_height_chroma[k],
+ &locals->meta_row_bw[k],
+ &locals->dpte_row_bw[k]);
+ }
+
+ mode_lib->vba.TotalDCCActiveDPP = 0;
+ mode_lib->vba.TotalActiveDPP = 0;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ mode_lib->vba.TotalActiveDPP = mode_lib->vba.TotalActiveDPP
+ + mode_lib->vba.DPPPerPlane[k];
+ if (mode_lib->vba.DCCEnable[k])
+ mode_lib->vba.TotalDCCActiveDPP = mode_lib->vba.TotalDCCActiveDPP
+ + mode_lib->vba.DPPPerPlane[k];
+ }
+
+ mode_lib->vba.UrgentOutOfOrderReturnPerChannel = dml_max3(
+ mode_lib->vba.UrgentOutOfOrderReturnPerChannelPixelDataOnly,
+ mode_lib->vba.UrgentOutOfOrderReturnPerChannelPixelMixedWithVMData,
+ mode_lib->vba.UrgentOutOfOrderReturnPerChannelVMDataOnly);
+
+ mode_lib->vba.UrgentRoundTripAndOutOfOrderLatency =
+ (mode_lib->vba.RoundTripPingLatencyCycles + 32) / mode_lib->vba.DCFCLK
+ + mode_lib->vba.UrgentOutOfOrderReturnPerChannel
+ * mode_lib->vba.NumberOfChannels
+ / mode_lib->vba.ReturnBW;
+
+ mode_lib->vba.UrgentExtraLatency = CalculateExtraLatency(
+ mode_lib->vba.UrgentRoundTripAndOutOfOrderLatency,
+ mode_lib->vba.TotalActiveDPP,
+ mode_lib->vba.PixelChunkSizeInKByte,
+ mode_lib->vba.TotalDCCActiveDPP,
+ mode_lib->vba.MetaChunkSize,
+ mode_lib->vba.ReturnBW,
+ mode_lib->vba.GPUVMEnable,
+ mode_lib->vba.HostVMEnable,
+ mode_lib->vba.NumberOfActivePlanes,
+ mode_lib->vba.DPPPerPlane,
+ locals->dpte_group_bytes,
+ mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
+ mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
+ mode_lib->vba.HostVMMaxPageTableLevels,
+ mode_lib->vba.HostVMCachedPageTableLevels);
+
+
+ mode_lib->vba.TCalc = 24.0 / mode_lib->vba.DCFCLKDeepSleep;
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.BlendingAndTiming[k] == k) {
+ if (mode_lib->vba.WritebackEnable[k] == true) {
+ locals->WritebackDelay[mode_lib->vba.VoltageLevel][k] =
+ mode_lib->vba.WritebackLatency
+ + CalculateWriteBackDelay(
+ mode_lib->vba.WritebackPixelFormat[k],
+ mode_lib->vba.WritebackHRatio[k],
+ mode_lib->vba.WritebackVRatio[k],
+ mode_lib->vba.WritebackLumaHTaps[k],
+ mode_lib->vba.WritebackLumaVTaps[k],
+ mode_lib->vba.WritebackChromaHTaps[k],
+ mode_lib->vba.WritebackChromaVTaps[k],
+ mode_lib->vba.WritebackDestinationWidth[k])
+ / mode_lib->vba.DISPCLK;
+ } else
+ locals->WritebackDelay[mode_lib->vba.VoltageLevel][k] = 0;
+ for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) {
+ if (mode_lib->vba.BlendingAndTiming[j] == k
+ && mode_lib->vba.WritebackEnable[j] == true) {
+ locals->WritebackDelay[mode_lib->vba.VoltageLevel][k] =
+ dml_max(
+ locals->WritebackDelay[mode_lib->vba.VoltageLevel][k],
+ mode_lib->vba.WritebackLatency
+ + CalculateWriteBackDelay(
+ mode_lib->vba.WritebackPixelFormat[j],
+ mode_lib->vba.WritebackHRatio[j],
+ mode_lib->vba.WritebackVRatio[j],
+ mode_lib->vba.WritebackLumaHTaps[j],
+ mode_lib->vba.WritebackLumaVTaps[j],
+ mode_lib->vba.WritebackChromaHTaps[j],
+ mode_lib->vba.WritebackChromaVTaps[j],
+ mode_lib->vba.WritebackDestinationWidth[j])
+ / mode_lib->vba.DISPCLK);
+ }
+ }
+ }
+ }
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k)
+ for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j)
+ if (mode_lib->vba.BlendingAndTiming[k] == j)
+ locals->WritebackDelay[mode_lib->vba.VoltageLevel][k] =
+ locals->WritebackDelay[mode_lib->vba.VoltageLevel][j];
+
+ mode_lib->vba.VStartupLines = 13;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ locals->MaxVStartupLines[k] = mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k] - dml_max(1.0, dml_ceil(locals->WritebackDelay[mode_lib->vba.VoltageLevel][k] / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]), 1));
+ }
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k)
+ locals->MaximumMaxVStartupLines = dml_max(locals->MaximumMaxVStartupLines, locals->MaxVStartupLines[k]);
+
+ // We don't really care to iterate between the various prefetch modes
+ //mode_lib->vba.PrefetchERROR = CalculateMinAndMaxPrefetchMode(mode_lib->vba.AllowDRAMSelfRefreshOrDRAMClockChangeInVblank, &mode_lib->vba.MinPrefetchMode, &mode_lib->vba.MaxPrefetchMode);
+ mode_lib->vba.UrgentLatency = dml_max3(mode_lib->vba.UrgentLatencyPixelDataOnly, mode_lib->vba.UrgentLatencyPixelMixedWithVMData, mode_lib->vba.UrgentLatencyVMDataOnly);
+
+ do {
+ double MaxTotalRDBandwidth = 0;
+ double MaxTotalRDBandwidthNoUrgentBurst = 0;
+ bool DestinationLineTimesForPrefetchLessThan2 = false;
+ bool VRatioPrefetchMoreThan4 = false;
+ double TWait = CalculateTWait(
+ mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb],
+ mode_lib->vba.DRAMClockChangeLatency,
+ mode_lib->vba.UrgentLatency,
+ mode_lib->vba.SREnterPlusExitTime);
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ Pipe myPipe;
+ HostVM myHostVM;
+
+ if (mode_lib->vba.XFCEnabled[k] == true) {
+ mode_lib->vba.XFCRemoteSurfaceFlipDelay =
+ CalculateRemoteSurfaceFlipDelay(
+ mode_lib,
+ mode_lib->vba.VRatio[k],
+ locals->SwathWidthY[k],
+ dml_ceil(
+ locals->BytePerPixelDETY[k],
+ 1),
+ mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k],
+ mode_lib->vba.XFCTSlvVupdateOffset,
+ mode_lib->vba.XFCTSlvVupdateWidth,
+ mode_lib->vba.XFCTSlvVreadyOffset,
+ mode_lib->vba.XFCXBUFLatencyTolerance,
+ mode_lib->vba.XFCFillBWOverhead,
+ mode_lib->vba.XFCSlvChunkSize,
+ mode_lib->vba.XFCBusTransportTime,
+ mode_lib->vba.TCalc,
+ TWait,
+ &mode_lib->vba.SrcActiveDrainRate,
+ &mode_lib->vba.TInitXFill,
+ &mode_lib->vba.TslvChk);
+ } else {
+ mode_lib->vba.XFCRemoteSurfaceFlipDelay = 0;
+ }
+
+ myPipe.DPPCLK = locals->DPPCLK[k];
+ myPipe.DISPCLK = mode_lib->vba.DISPCLK;
+ myPipe.PixelClock = mode_lib->vba.PixelClock[k];
+ myPipe.DCFCLKDeepSleep = mode_lib->vba.DCFCLKDeepSleep;
+ myPipe.DPPPerPlane = mode_lib->vba.DPPPerPlane[k];
+ myPipe.ScalerEnabled = mode_lib->vba.ScalerEnabled[k];
+ myPipe.SourceScan = mode_lib->vba.SourceScan[k];
+ myPipe.BlockWidth256BytesY = locals->BlockWidth256BytesY[k];
+ myPipe.BlockHeight256BytesY = locals->BlockHeight256BytesY[k];
+ myPipe.BlockWidth256BytesC = locals->BlockWidth256BytesC[k];
+ myPipe.BlockHeight256BytesC = locals->BlockHeight256BytesC[k];
+ myPipe.InterlaceEnable = mode_lib->vba.Interlace[k];
+ myPipe.NumberOfCursors = mode_lib->vba.NumberOfCursors[k];
+ myPipe.VBlank = mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k];
+ myPipe.HTotal = mode_lib->vba.HTotal[k];
+
+
+ myHostVM.Enable = mode_lib->vba.HostVMEnable;
+ myHostVM.MaxPageTableLevels = mode_lib->vba.HostVMMaxPageTableLevels;
+ myHostVM.CachedPageTableLevels = mode_lib->vba.HostVMCachedPageTableLevels;
+
+ mode_lib->vba.ErrorResult[k] =
+ CalculatePrefetchSchedule(
+ mode_lib,
+ mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
+ mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
+ &myPipe,
+ locals->DSCDelay[k],
+ mode_lib->vba.DPPCLKDelaySubtotal,
+ mode_lib->vba.DPPCLKDelaySCL,
+ mode_lib->vba.DPPCLKDelaySCLLBOnly,
+ mode_lib->vba.DPPCLKDelayCNVCFormater,
+ mode_lib->vba.DPPCLKDelayCNVCCursor,
+ mode_lib->vba.DISPCLKDelaySubtotal,
+ (unsigned int) (locals->SwathWidthY[k]
+ / mode_lib->vba.HRatio[k]),
+ mode_lib->vba.OutputFormat[k],
+ mode_lib->vba.MaxInterDCNTileRepeaters,
+ dml_min(mode_lib->vba.VStartupLines, locals->MaxVStartupLines[k]),
+ locals->MaxVStartupLines[k],
+ mode_lib->vba.GPUVMMaxPageTableLevels,
+ mode_lib->vba.GPUVMEnable,
+ &myHostVM,
+ mode_lib->vba.DynamicMetadataEnable[k],
+ mode_lib->vba.DynamicMetadataLinesBeforeActiveRequired[k],
+ mode_lib->vba.DynamicMetadataTransmittedBytes[k],
+ mode_lib->vba.DCCEnable[k],
+ mode_lib->vba.UrgentLatency,
+ mode_lib->vba.UrgentExtraLatency,
+ mode_lib->vba.TCalc,
+ locals->PDEAndMetaPTEBytesFrame[k],
+ locals->MetaRowByte[k],
+ locals->PixelPTEBytesPerRow[k],
+ locals->PrefetchSourceLinesY[k],
+ locals->SwathWidthY[k],
+ locals->BytePerPixelDETY[k],
+ locals->VInitPreFillY[k],
+ locals->MaxNumSwathY[k],
+ locals->PrefetchSourceLinesC[k],
+ locals->BytePerPixelDETC[k],
+ locals->VInitPreFillC[k],
+ locals->MaxNumSwathC[k],
+ mode_lib->vba.SwathHeightY[k],
+ mode_lib->vba.SwathHeightC[k],
+ TWait,
+ mode_lib->vba.XFCEnabled[k],
+ mode_lib->vba.XFCRemoteSurfaceFlipDelay,
+ mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
+ &locals->DSTXAfterScaler[k],
+ &locals->DSTYAfterScaler[k],
+ &locals->DestinationLinesForPrefetch[k],
+ &locals->PrefetchBandwidth[k],
+ &locals->DestinationLinesToRequestVMInVBlank[k],
+ &locals->DestinationLinesToRequestRowInVBlank[k],
+ &locals->VRatioPrefetchY[k],
+ &locals->VRatioPrefetchC[k],
+ &locals->RequiredPrefetchPixDataBWLuma[k],
+ &locals->RequiredPrefetchPixDataBWChroma[k],
+ &locals->VStartupRequiredWhenNotEnoughTimeForDynamicMetadata,
+ &locals->Tno_bw[k],
+ &locals->prefetch_vmrow_bw[k],
+ &locals->swath_width_luma_ub[k],
+ &locals->swath_width_chroma_ub[k],
+ &mode_lib->vba.VUpdateOffsetPix[k],
+ &mode_lib->vba.VUpdateWidthPix[k],
+ &mode_lib->vba.VReadyOffsetPix[k]);
+ if (mode_lib->vba.BlendingAndTiming[k] == k) {
+ locals->VStartup[k] = dml_min(
+ mode_lib->vba.VStartupLines,
+ locals->MaxVStartupLines[k]);
+ if (locals->VStartupRequiredWhenNotEnoughTimeForDynamicMetadata
+ != 0) {
+ locals->VStartup[k] =
+ locals->VStartupRequiredWhenNotEnoughTimeForDynamicMetadata;
+ }
+ } else {
+ locals->VStartup[k] =
+ dml_min(
+ mode_lib->vba.VStartupLines,
+ locals->MaxVStartupLines[mode_lib->vba.BlendingAndTiming[k]]);
+ }
+ }
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ unsigned int m;
+
+ locals->cursor_bw[k] = 0;
+ locals->cursor_bw_pre[k] = 0;
+ for (m = 0; m < mode_lib->vba.NumberOfCursors[k]; m++) {
+ locals->cursor_bw[k] += mode_lib->vba.CursorWidth[k][m] * mode_lib->vba.CursorBPP[k][m] / 8.0 / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]) * mode_lib->vba.VRatio[k];
+ locals->cursor_bw_pre[k] += mode_lib->vba.CursorWidth[k][m] * mode_lib->vba.CursorBPP[k][m] / 8.0 / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]) * locals->VRatioPrefetchY[k];
+ }
+
+ CalculateUrgentBurstFactor(
+ mode_lib->vba.DETBufferSizeInKByte,
+ mode_lib->vba.SwathHeightY[k],
+ mode_lib->vba.SwathHeightC[k],
+ locals->SwathWidthY[k],
+ mode_lib->vba.HTotal[k] /
+ mode_lib->vba.PixelClock[k],
+ mode_lib->vba.UrgentLatency,
+ mode_lib->vba.CursorBufferSize,
+ mode_lib->vba.CursorWidth[k][0] + mode_lib->vba.CursorWidth[k][1],
+ dml_max(mode_lib->vba.CursorBPP[k][0], mode_lib->vba.CursorBPP[k][1]),
+ mode_lib->vba.VRatio[k],
+ locals->VRatioPrefetchY[k],
+ locals->VRatioPrefetchC[k],
+ locals->BytePerPixelDETY[k],
+ locals->BytePerPixelDETC[k],
+ &locals->UrgentBurstFactorCursor[k],
+ &locals->UrgentBurstFactorCursorPre[k],
+ &locals->UrgentBurstFactorLuma[k],
+ &locals->UrgentBurstFactorLumaPre[k],
+ &locals->UrgentBurstFactorChroma[k],
+ &locals->UrgentBurstFactorChromaPre[k],
+ &locals->NotEnoughUrgentLatencyHiding,
+ &locals->NotEnoughUrgentLatencyHidingPre);
+
+ if (mode_lib->vba.UseUrgentBurstBandwidth == false) {
+ locals->UrgentBurstFactorLuma[k] = 1;
+ locals->UrgentBurstFactorChroma[k] = 1;
+ locals->UrgentBurstFactorCursor[k] = 1;
+ locals->UrgentBurstFactorLumaPre[k] = 1;
+ locals->UrgentBurstFactorChromaPre[k] = 1;
+ locals->UrgentBurstFactorCursorPre[k] = 1;
+ }
+
+ MaxTotalRDBandwidth = MaxTotalRDBandwidth +
+ dml_max3(locals->prefetch_vmrow_bw[k],
+ locals->ReadBandwidthPlaneLuma[k] * locals->UrgentBurstFactorLuma[k]
+ + locals->ReadBandwidthPlaneChroma[k] * locals->UrgentBurstFactorChroma[k] + locals->cursor_bw[k]
+ * locals->UrgentBurstFactorCursor[k] + locals->meta_row_bw[k] + locals->dpte_row_bw[k],
+ locals->RequiredPrefetchPixDataBWLuma[k] * locals->UrgentBurstFactorLumaPre[k] + locals->RequiredPrefetchPixDataBWChroma[k]
+ * locals->UrgentBurstFactorChromaPre[k] + locals->cursor_bw_pre[k] * locals->UrgentBurstFactorCursorPre[k]);
+
+ MaxTotalRDBandwidthNoUrgentBurst = MaxTotalRDBandwidthNoUrgentBurst +
+ dml_max3(locals->prefetch_vmrow_bw[k],
+ locals->ReadBandwidthPlaneLuma[k] + locals->ReadBandwidthPlaneChroma[k] + locals->cursor_bw[k]
+ + locals->meta_row_bw[k] + locals->dpte_row_bw[k],
+ locals->RequiredPrefetchPixDataBWLuma[k] + locals->RequiredPrefetchPixDataBWChroma[k] + locals->cursor_bw_pre[k]);
+
+ if (locals->DestinationLinesForPrefetch[k] < 2)
+ DestinationLineTimesForPrefetchLessThan2 = true;
+ if (locals->VRatioPrefetchY[k] > 4 || locals->VRatioPrefetchC[k] > 4)
+ VRatioPrefetchMoreThan4 = true;
+ }
+ mode_lib->vba.FractionOfUrgentBandwidth = MaxTotalRDBandwidthNoUrgentBurst / mode_lib->vba.ReturnBW;
+
+ if (MaxTotalRDBandwidth <= mode_lib->vba.ReturnBW && locals->NotEnoughUrgentLatencyHiding == 0 && locals->NotEnoughUrgentLatencyHidingPre == 0 && !VRatioPrefetchMoreThan4
+ && !DestinationLineTimesForPrefetchLessThan2)
+ mode_lib->vba.PrefetchModeSupported = true;
+ else {
+ mode_lib->vba.PrefetchModeSupported = false;
+ dml_print(
+ "DML: CalculatePrefetchSchedule ***failed***. Bandwidth violation. Results are NOT valid\n");
+ }
+
+ if (mode_lib->vba.PrefetchModeSupported == true) {
+ mode_lib->vba.BandwidthAvailableForImmediateFlip = mode_lib->vba.ReturnBW;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ mode_lib->vba.BandwidthAvailableForImmediateFlip =
+ mode_lib->vba.BandwidthAvailableForImmediateFlip
+ - dml_max(
+ locals->ReadBandwidthPlaneLuma[k] * locals->UrgentBurstFactorLuma[k]
+ + locals->ReadBandwidthPlaneChroma[k] * locals->UrgentBurstFactorChroma[k]
+ + locals->cursor_bw[k] * locals->UrgentBurstFactorCursor[k],
+ locals->RequiredPrefetchPixDataBWLuma[k] * locals->UrgentBurstFactorLumaPre[k] +
+ locals->RequiredPrefetchPixDataBWChroma[k] * locals->UrgentBurstFactorChromaPre[k] +
+ locals->cursor_bw_pre[k] * locals->UrgentBurstFactorCursorPre[k]);
+ }
+
+ mode_lib->vba.TotImmediateFlipBytes = 0;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ mode_lib->vba.TotImmediateFlipBytes = mode_lib->vba.TotImmediateFlipBytes + locals->PDEAndMetaPTEBytesFrame[k] + locals->MetaRowByte[k] + locals->PixelPTEBytesPerRow[k];
+ }
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ CalculateFlipSchedule(
+ mode_lib,
+ mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
+ mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
+ mode_lib->vba.UrgentExtraLatency,
+ mode_lib->vba.UrgentLatency,
+ mode_lib->vba.GPUVMMaxPageTableLevels,
+ mode_lib->vba.HostVMEnable,
+ mode_lib->vba.HostVMMaxPageTableLevels,
+ mode_lib->vba.HostVMCachedPageTableLevels,
+ mode_lib->vba.GPUVMEnable,
+ locals->PDEAndMetaPTEBytesFrame[k],
+ locals->MetaRowByte[k],
+ locals->PixelPTEBytesPerRow[k],
+ mode_lib->vba.BandwidthAvailableForImmediateFlip,
+ mode_lib->vba.TotImmediateFlipBytes,
+ mode_lib->vba.SourcePixelFormat[k],
+ mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k],
+ mode_lib->vba.VRatio[k],
+ locals->Tno_bw[k],
+ mode_lib->vba.DCCEnable[k],
+ locals->dpte_row_height[k],
+ locals->meta_row_height[k],
+ locals->dpte_row_height_chroma[k],
+ locals->meta_row_height_chroma[k],
+ &locals->DestinationLinesToRequestVMInImmediateFlip[k],
+ &locals->DestinationLinesToRequestRowInImmediateFlip[k],
+ &locals->final_flip_bw[k],
+ &locals->ImmediateFlipSupportedForPipe[k]);
+ }
+ mode_lib->vba.total_dcn_read_bw_with_flip = 0.0;
+ mode_lib->vba.total_dcn_read_bw_with_flip_no_urgent_burst = 0.0;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ mode_lib->vba.total_dcn_read_bw_with_flip =
+ mode_lib->vba.total_dcn_read_bw_with_flip + dml_max3(
+ locals->prefetch_vmrow_bw[k],
+ locals->final_flip_bw[k] + locals->ReadBandwidthLuma[k] * locals->UrgentBurstFactorLuma[k]
+ + locals->ReadBandwidthChroma[k] * locals->UrgentBurstFactorChroma[k] + locals->cursor_bw[k] * locals->UrgentBurstFactorCursor[k],
+ locals->final_flip_bw[k] + locals->RequiredPrefetchPixDataBWLuma[k] * locals->UrgentBurstFactorLumaPre[k]
+ + locals->RequiredPrefetchPixDataBWChroma[k] * locals->UrgentBurstFactorChromaPre[k]
+ + locals->cursor_bw_pre[k] * locals->UrgentBurstFactorCursorPre[k]);
+ mode_lib->vba.total_dcn_read_bw_with_flip_no_urgent_burst =
+ mode_lib->vba.total_dcn_read_bw_with_flip_no_urgent_burst +
+ dml_max3(locals->prefetch_vmrow_bw[k],
+ locals->final_flip_bw[k] + locals->ReadBandwidthPlaneLuma[k] + locals->ReadBandwidthPlaneChroma[k] + locals->cursor_bw[k],
+ locals->final_flip_bw[k] + locals->RequiredPrefetchPixDataBWLuma[k] + locals->RequiredPrefetchPixDataBWChroma[k] + locals->cursor_bw_pre[k]);
+
+ }
+ mode_lib->vba.FractionOfUrgentBandwidthImmediateFlip = mode_lib->vba.total_dcn_read_bw_with_flip_no_urgent_burst / mode_lib->vba.ReturnBW;
+
+ mode_lib->vba.ImmediateFlipSupported = true;
+ if (mode_lib->vba.total_dcn_read_bw_with_flip > mode_lib->vba.ReturnBW) {
+ mode_lib->vba.ImmediateFlipSupported = false;
+ }
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (locals->ImmediateFlipSupportedForPipe[k] == false) {
+ mode_lib->vba.ImmediateFlipSupported = false;
+ }
+ }
+ } else {
+ mode_lib->vba.ImmediateFlipSupported = false;
+ }
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.ErrorResult[k]) {
+ mode_lib->vba.PrefetchModeSupported = false;
+ dml_print(
+ "DML: CalculatePrefetchSchedule ***failed***. Prefetch schedule violation. Results are NOT valid\n");
+ }
+ }
+
+ mode_lib->vba.VStartupLines = mode_lib->vba.VStartupLines + 1;
+ } while (!((mode_lib->vba.PrefetchModeSupported
+ && ((!mode_lib->vba.ImmediateFlipSupport && !mode_lib->vba.HostVMEnable)
+ || mode_lib->vba.ImmediateFlipSupported))
+ || locals->MaximumMaxVStartupLines < mode_lib->vba.VStartupLines));
+
+ //Watermarks and NB P-State/DRAM Clock Change Support
+ {
+ enum clock_change_support DRAMClockChangeSupport; // dummy
+ CalculateWatermarksAndDRAMSpeedChangeSupport(
+ mode_lib,
+ mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb],
+ mode_lib->vba.NumberOfActivePlanes,
+ mode_lib->vba.MaxLineBufferLines,
+ mode_lib->vba.LineBufferSize,
+ mode_lib->vba.DPPOutputBufferPixels,
+ mode_lib->vba.DETBufferSizeInKByte,
+ mode_lib->vba.WritebackInterfaceLumaBufferSize,
+ mode_lib->vba.WritebackInterfaceChromaBufferSize,
+ mode_lib->vba.DCFCLK,
+ mode_lib->vba.UrgentOutOfOrderReturnPerChannel * mode_lib->vba.NumberOfChannels,
+ mode_lib->vba.ReturnBW,
+ mode_lib->vba.GPUVMEnable,
+ locals->dpte_group_bytes,
+ mode_lib->vba.MetaChunkSize,
+ mode_lib->vba.UrgentLatency,
+ mode_lib->vba.UrgentExtraLatency,
+ mode_lib->vba.WritebackLatency,
+ mode_lib->vba.WritebackChunkSize,
+ mode_lib->vba.SOCCLK,
+ mode_lib->vba.DRAMClockChangeLatency,
+ mode_lib->vba.SRExitTime,
+ mode_lib->vba.SREnterPlusExitTime,
+ mode_lib->vba.DCFCLKDeepSleep,
+ mode_lib->vba.DPPPerPlane,
+ mode_lib->vba.DCCEnable,
+ locals->DPPCLK,
+ locals->SwathWidthSingleDPPY,
+ mode_lib->vba.SwathHeightY,
+ locals->ReadBandwidthPlaneLuma,
+ mode_lib->vba.SwathHeightC,
+ locals->ReadBandwidthPlaneChroma,
+ mode_lib->vba.LBBitPerPixel,
+ locals->SwathWidthY,
+ mode_lib->vba.HRatio,
+ mode_lib->vba.vtaps,
+ mode_lib->vba.VTAPsChroma,
+ mode_lib->vba.VRatio,
+ mode_lib->vba.HTotal,
+ mode_lib->vba.PixelClock,
+ mode_lib->vba.BlendingAndTiming,
+ locals->BytePerPixelDETY,
+ locals->BytePerPixelDETC,
+ mode_lib->vba.WritebackEnable,
+ mode_lib->vba.WritebackPixelFormat,
+ mode_lib->vba.WritebackDestinationWidth,
+ mode_lib->vba.WritebackDestinationHeight,
+ mode_lib->vba.WritebackSourceHeight,
+ &DRAMClockChangeSupport,
+ &mode_lib->vba.UrgentWatermark,
+ &mode_lib->vba.WritebackUrgentWatermark,
+ &mode_lib->vba.DRAMClockChangeWatermark,
+ &mode_lib->vba.WritebackDRAMClockChangeWatermark,
+ &mode_lib->vba.StutterExitWatermark,
+ &mode_lib->vba.StutterEnterPlusExitWatermark,
+ &mode_lib->vba.MinActiveDRAMClockChangeLatencySupported);
+ }
+
+
+ //Display Pipeline Delivery Time in Prefetch, Groups
+ CalculatePixelDeliveryTimes(
+ mode_lib->vba.NumberOfActivePlanes,
+ mode_lib->vba.VRatio,
+ locals->VRatioPrefetchY,
+ locals->VRatioPrefetchC,
+ locals->swath_width_luma_ub,
+ locals->swath_width_chroma_ub,
+ mode_lib->vba.DPPPerPlane,
+ mode_lib->vba.HRatio,
+ mode_lib->vba.PixelClock,
+ locals->PSCL_THROUGHPUT_LUMA,
+ locals->PSCL_THROUGHPUT_CHROMA,
+ locals->DPPCLK,
+ locals->BytePerPixelDETC,
+ mode_lib->vba.SourceScan,
+ locals->BlockWidth256BytesY,
+ locals->BlockHeight256BytesY,
+ locals->BlockWidth256BytesC,
+ locals->BlockHeight256BytesC,
+ locals->DisplayPipeLineDeliveryTimeLuma,
+ locals->DisplayPipeLineDeliveryTimeChroma,
+ locals->DisplayPipeLineDeliveryTimeLumaPrefetch,
+ locals->DisplayPipeLineDeliveryTimeChromaPrefetch,
+ locals->DisplayPipeRequestDeliveryTimeLuma,
+ locals->DisplayPipeRequestDeliveryTimeChroma,
+ locals->DisplayPipeRequestDeliveryTimeLumaPrefetch,
+ locals->DisplayPipeRequestDeliveryTimeChromaPrefetch);
+
+ CalculateMetaAndPTETimes(
+ mode_lib->vba.NumberOfActivePlanes,
+ mode_lib->vba.GPUVMEnable,
+ mode_lib->vba.MetaChunkSize,
+ mode_lib->vba.MinMetaChunkSizeBytes,
+ mode_lib->vba.GPUVMMaxPageTableLevels,
+ mode_lib->vba.HTotal,
+ mode_lib->vba.VRatio,
+ locals->VRatioPrefetchY,
+ locals->VRatioPrefetchC,
+ locals->DestinationLinesToRequestRowInVBlank,
+ locals->DestinationLinesToRequestRowInImmediateFlip,
+ locals->DestinationLinesToRequestVMInVBlank,
+ locals->DestinationLinesToRequestVMInImmediateFlip,
+ mode_lib->vba.DCCEnable,
+ mode_lib->vba.PixelClock,
+ locals->BytePerPixelDETY,
+ locals->BytePerPixelDETC,
+ mode_lib->vba.SourceScan,
+ locals->dpte_row_height,
+ locals->dpte_row_height_chroma,
+ locals->meta_row_width,
+ locals->meta_row_height,
+ locals->meta_req_width,
+ locals->meta_req_height,
+ locals->dpte_group_bytes,
+ locals->PTERequestSizeY,
+ locals->PTERequestSizeC,
+ locals->PixelPTEReqWidthY,
+ locals->PixelPTEReqHeightY,
+ locals->PixelPTEReqWidthC,
+ locals->PixelPTEReqHeightC,
+ locals->dpte_row_width_luma_ub,
+ locals->dpte_row_width_chroma_ub,
+ locals->vm_group_bytes,
+ locals->dpde0_bytes_per_frame_ub_l,
+ locals->dpde0_bytes_per_frame_ub_c,
+ locals->meta_pte_bytes_per_frame_ub_l,
+ locals->meta_pte_bytes_per_frame_ub_c,
+ locals->DST_Y_PER_PTE_ROW_NOM_L,
+ locals->DST_Y_PER_PTE_ROW_NOM_C,
+ locals->DST_Y_PER_META_ROW_NOM_L,
+ locals->TimePerMetaChunkNominal,
+ locals->TimePerMetaChunkVBlank,
+ locals->TimePerMetaChunkFlip,
+ locals->time_per_pte_group_nom_luma,
+ locals->time_per_pte_group_vblank_luma,
+ locals->time_per_pte_group_flip_luma,
+ locals->time_per_pte_group_nom_chroma,
+ locals->time_per_pte_group_vblank_chroma,
+ locals->time_per_pte_group_flip_chroma,
+ locals->TimePerVMGroupVBlank,
+ locals->TimePerVMGroupFlip,
+ locals->TimePerVMRequestVBlank,
+ locals->TimePerVMRequestFlip);
+
+
+ // Min TTUVBlank
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) {
+ locals->AllowDRAMClockChangeDuringVBlank[k] = true;
+ locals->AllowDRAMSelfRefreshDuringVBlank[k] = true;
+ locals->MinTTUVBlank[k] = dml_max(
+ mode_lib->vba.DRAMClockChangeWatermark,
+ dml_max(
+ mode_lib->vba.StutterEnterPlusExitWatermark,
+ mode_lib->vba.UrgentWatermark));
+ } else if (mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 1) {
+ locals->AllowDRAMClockChangeDuringVBlank[k] = false;
+ locals->AllowDRAMSelfRefreshDuringVBlank[k] = true;
+ locals->MinTTUVBlank[k] = dml_max(
+ mode_lib->vba.StutterEnterPlusExitWatermark,
+ mode_lib->vba.UrgentWatermark);
+ } else {
+ locals->AllowDRAMClockChangeDuringVBlank[k] = false;
+ locals->AllowDRAMSelfRefreshDuringVBlank[k] = false;
+ locals->MinTTUVBlank[k] = mode_lib->vba.UrgentWatermark;
+ }
+ if (!mode_lib->vba.DynamicMetadataEnable[k])
+ locals->MinTTUVBlank[k] = mode_lib->vba.TCalc
+ + locals->MinTTUVBlank[k];
+ }
+
+ // DCC Configuration
+ mode_lib->vba.ActiveDPPs = 0;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ locals->MaximumDCCCompressionYSurface[k] = CalculateDCCConfiguration(
+ mode_lib->vba.DCCEnable[k],
+ false, // We should always know the direction DCCProgrammingAssumesScanDirectionUnknown,
+ mode_lib->vba.ViewportWidth[k],
+ mode_lib->vba.ViewportHeight[k],
+ mode_lib->vba.DETBufferSizeInKByte * 1024,
+ locals->BlockHeight256BytesY[k],
+ mode_lib->vba.SwathHeightY[k],
+ mode_lib->vba.SurfaceTiling[k],
+ locals->BytePerPixelDETY[k],
+ mode_lib->vba.SourceScan[k],
+ &locals->DCCYMaxUncompressedBlock[k],
+ &locals->DCCYMaxCompressedBlock[k],
+ &locals->DCCYIndependent64ByteBlock[k]);
+ }
+
+ //XFC Parameters:
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.XFCEnabled[k] == true) {
+ double TWait;
+
+ locals->XFCSlaveVUpdateOffset[k] = mode_lib->vba.XFCTSlvVupdateOffset;
+ locals->XFCSlaveVupdateWidth[k] = mode_lib->vba.XFCTSlvVupdateWidth;
+ locals->XFCSlaveVReadyOffset[k] = mode_lib->vba.XFCTSlvVreadyOffset;
+ TWait = CalculateTWait(
+ mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb],
+ mode_lib->vba.DRAMClockChangeLatency,
+ mode_lib->vba.UrgentLatency,
+ mode_lib->vba.SREnterPlusExitTime);
+ mode_lib->vba.XFCRemoteSurfaceFlipDelay = CalculateRemoteSurfaceFlipDelay(
+ mode_lib,
+ mode_lib->vba.VRatio[k],
+ locals->SwathWidthY[k],
+ dml_ceil(locals->BytePerPixelDETY[k], 1),
+ mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k],
+ mode_lib->vba.XFCTSlvVupdateOffset,
+ mode_lib->vba.XFCTSlvVupdateWidth,
+ mode_lib->vba.XFCTSlvVreadyOffset,
+ mode_lib->vba.XFCXBUFLatencyTolerance,
+ mode_lib->vba.XFCFillBWOverhead,
+ mode_lib->vba.XFCSlvChunkSize,
+ mode_lib->vba.XFCBusTransportTime,
+ mode_lib->vba.TCalc,
+ TWait,
+ &mode_lib->vba.SrcActiveDrainRate,
+ &mode_lib->vba.TInitXFill,
+ &mode_lib->vba.TslvChk);
+ locals->XFCRemoteSurfaceFlipLatency[k] =
+ dml_floor(
+ mode_lib->vba.XFCRemoteSurfaceFlipDelay
+ / (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]),
+ 1);
+ locals->XFCTransferDelay[k] =
+ dml_ceil(
+ mode_lib->vba.XFCBusTransportTime
+ / (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]),
+ 1);
+ locals->XFCPrechargeDelay[k] =
+ dml_ceil(
+ (mode_lib->vba.XFCBusTransportTime
+ + mode_lib->vba.TInitXFill
+ + mode_lib->vba.TslvChk)
+ / (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]),
+ 1);
+ mode_lib->vba.InitFillLevel = mode_lib->vba.XFCXBUFLatencyTolerance
+ * mode_lib->vba.SrcActiveDrainRate;
+ mode_lib->vba.FinalFillMargin =
+ (locals->DestinationLinesToRequestVMInVBlank[k]
+ + locals->DestinationLinesToRequestRowInVBlank[k])
+ * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]
+ * mode_lib->vba.SrcActiveDrainRate
+ + mode_lib->vba.XFCFillConstant;
+ mode_lib->vba.FinalFillLevel = mode_lib->vba.XFCRemoteSurfaceFlipDelay
+ * mode_lib->vba.SrcActiveDrainRate
+ + mode_lib->vba.FinalFillMargin;
+ mode_lib->vba.RemainingFillLevel = dml_max(
+ 0.0,
+ mode_lib->vba.FinalFillLevel - mode_lib->vba.InitFillLevel);
+ mode_lib->vba.TFinalxFill = mode_lib->vba.RemainingFillLevel
+ / (mode_lib->vba.SrcActiveDrainRate
+ * mode_lib->vba.XFCFillBWOverhead / 100);
+ locals->XFCPrefetchMargin[k] =
+ mode_lib->vba.XFCRemoteSurfaceFlipDelay
+ + mode_lib->vba.TFinalxFill
+ + (locals->DestinationLinesToRequestVMInVBlank[k]
+ + locals->DestinationLinesToRequestRowInVBlank[k])
+ * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k];
+ } else {
+ locals->XFCSlaveVUpdateOffset[k] = 0;
+ locals->XFCSlaveVupdateWidth[k] = 0;
+ locals->XFCSlaveVReadyOffset[k] = 0;
+ locals->XFCRemoteSurfaceFlipLatency[k] = 0;
+ locals->XFCPrechargeDelay[k] = 0;
+ locals->XFCTransferDelay[k] = 0;
+ locals->XFCPrefetchMargin[k] = 0;
+ }
+ }
+
+ // Stutter Efficiency
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ CalculateDETBufferSize(
+ mode_lib->vba.DETBufferSizeInKByte,
+ mode_lib->vba.SwathHeightY[k],
+ mode_lib->vba.SwathHeightC[k],
+ &locals->DETBufferSizeY[k],
+ &locals->DETBufferSizeC[k]);
+
+ locals->LinesInDETY[k] = locals->DETBufferSizeY[k]
+ / locals->BytePerPixelDETY[k] / locals->SwathWidthY[k];
+ locals->LinesInDETYRoundedDownToSwath[k] = dml_floor(
+ locals->LinesInDETY[k],
+ mode_lib->vba.SwathHeightY[k]);
+ locals->FullDETBufferingTimeY[k] =
+ locals->LinesInDETYRoundedDownToSwath[k]
+ * (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k])
+ / mode_lib->vba.VRatio[k];
+ }
+
+ mode_lib->vba.StutterPeriod = 999999.0;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (locals->FullDETBufferingTimeY[k] < mode_lib->vba.StutterPeriod) {
+ mode_lib->vba.StutterPeriod = locals->FullDETBufferingTimeY[k];
+ mode_lib->vba.FrameTimeForMinFullDETBufferingTime =
+ (double) mode_lib->vba.VTotal[k] * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k];
+ locals->BytePerPixelYCriticalPlane = dml_ceil(locals->BytePerPixelDETY[k], 1);
+ locals->SwathWidthYCriticalPlane = locals->SwathWidthY[k];
+ locals->LinesToFinishSwathTransferStutterCriticalPlane =
+ mode_lib->vba.SwathHeightY[k] - (locals->LinesInDETY[k] - locals->LinesInDETYRoundedDownToSwath[k]);
+ }
+ }
+
+ mode_lib->vba.AverageReadBandwidth = 0.0;
+ mode_lib->vba.TotalRowReadBandwidth = 0.0;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ unsigned int DCCRateLimit;
+
+ if (mode_lib->vba.DCCEnable[k]) {
+ if (locals->DCCYMaxCompressedBlock[k] == 256)
+ DCCRateLimit = 4;
+ else
+ DCCRateLimit = 2;
+
+ mode_lib->vba.AverageReadBandwidth =
+ mode_lib->vba.AverageReadBandwidth
+ + (locals->ReadBandwidthPlaneLuma[k] + locals->ReadBandwidthPlaneChroma[k]) /
+ dml_min(mode_lib->vba.DCCRate[k], DCCRateLimit);
+ } else {
+ mode_lib->vba.AverageReadBandwidth =
+ mode_lib->vba.AverageReadBandwidth
+ + locals->ReadBandwidthPlaneLuma[k]
+ + locals->ReadBandwidthPlaneChroma[k];
+ }
+ mode_lib->vba.TotalRowReadBandwidth = mode_lib->vba.TotalRowReadBandwidth +
+ locals->meta_row_bw[k] + locals->dpte_row_bw[k];
+ }
+
+ mode_lib->vba.AverageDCCCompressionRate = mode_lib->vba.TotalDataReadBandwidth / mode_lib->vba.AverageReadBandwidth;
+
+ mode_lib->vba.PartOfBurstThatFitsInROB =
+ dml_min(
+ mode_lib->vba.StutterPeriod
+ * mode_lib->vba.TotalDataReadBandwidth,
+ mode_lib->vba.ROBBufferSizeInKByte * 1024
+ * mode_lib->vba.AverageDCCCompressionRate);
+ mode_lib->vba.StutterBurstTime = mode_lib->vba.PartOfBurstThatFitsInROB
+ / mode_lib->vba.AverageDCCCompressionRate / mode_lib->vba.ReturnBW
+ + (mode_lib->vba.StutterPeriod * mode_lib->vba.TotalDataReadBandwidth
+ - mode_lib->vba.PartOfBurstThatFitsInROB)
+ / (mode_lib->vba.DCFCLK * 64)
+ + mode_lib->vba.StutterPeriod * mode_lib->vba.TotalRowReadBandwidth / mode_lib->vba.ReturnBW;
+ mode_lib->vba.StutterBurstTime = dml_max(
+ mode_lib->vba.StutterBurstTime,
+ (locals->LinesToFinishSwathTransferStutterCriticalPlane * locals->BytePerPixelYCriticalPlane *
+ locals->SwathWidthYCriticalPlane / mode_lib->vba.ReturnBW)
+ );
+
+ mode_lib->vba.TotalActiveWriteback = 0;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.WritebackEnable[k] == true) {
+ mode_lib->vba.TotalActiveWriteback = mode_lib->vba.TotalActiveWriteback + 1;
+ }
+ }
+
+ if (mode_lib->vba.TotalActiveWriteback == 0) {
+ mode_lib->vba.StutterEfficiencyNotIncludingVBlank = (1
+ - (mode_lib->vba.SRExitTime + mode_lib->vba.StutterBurstTime)
+ / mode_lib->vba.StutterPeriod) * 100;
+ } else {
+ mode_lib->vba.StutterEfficiencyNotIncludingVBlank = 0;
+ }
+
+ mode_lib->vba.SmallestVBlank = 999999;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.SynchronizedVBlank || mode_lib->vba.NumberOfActivePlanes == 1) {
+ mode_lib->vba.VBlankTime = (double) (mode_lib->vba.VTotal[k]
+ - mode_lib->vba.VActive[k]) * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k];
+ } else {
+ mode_lib->vba.VBlankTime = 0;
+ }
+ mode_lib->vba.SmallestVBlank = dml_min(
+ mode_lib->vba.SmallestVBlank,
+ mode_lib->vba.VBlankTime);
+ }
+
+ mode_lib->vba.StutterEfficiency = (mode_lib->vba.StutterEfficiencyNotIncludingVBlank / 100
+ * (mode_lib->vba.FrameTimeForMinFullDETBufferingTime
+ - mode_lib->vba.SmallestVBlank)
+ + mode_lib->vba.SmallestVBlank)
+ / mode_lib->vba.FrameTimeForMinFullDETBufferingTime * 100;
+}
+
+static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib)
+{
+ // Display Pipe Configuration
+ double BytePerPixDETY;
+ double BytePerPixDETC;
+ double Read256BytesBlockHeightY;
+ double Read256BytesBlockHeightC;
+ double Read256BytesBlockWidthY;
+ double Read256BytesBlockWidthC;
+ double MaximumSwathHeightY;
+ double MaximumSwathHeightC;
+ double MinimumSwathHeightY;
+ double MinimumSwathHeightC;
+ double SwathWidth;
+ double SwathWidthGranularityY;
+ double SwathWidthGranularityC;
+ double RoundedUpMaxSwathSizeBytesY;
+ double RoundedUpMaxSwathSizeBytesC;
+ unsigned int j, k;
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ bool MainPlaneDoesODMCombine = false;
+
+ if (mode_lib->vba.SourcePixelFormat[k] == dm_444_64) {
+ BytePerPixDETY = 8;
+ BytePerPixDETC = 0;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_32) {
+ BytePerPixDETY = 4;
+ BytePerPixDETC = 0;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_16) {
+ BytePerPixDETY = 2;
+ BytePerPixDETC = 0;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_8) {
+ BytePerPixDETY = 1;
+ BytePerPixDETC = 0;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8) {
+ BytePerPixDETY = 1;
+ BytePerPixDETC = 2;
+ } else {
+ BytePerPixDETY = 4.0 / 3.0;
+ BytePerPixDETC = 8.0 / 3.0;
+ }
+
+ if ((mode_lib->vba.SourcePixelFormat[k] == dm_444_64
+ || mode_lib->vba.SourcePixelFormat[k] == dm_444_32
+ || mode_lib->vba.SourcePixelFormat[k] == dm_444_16
+ || mode_lib->vba.SourcePixelFormat[k] == dm_444_8)) {
+ if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear) {
+ Read256BytesBlockHeightY = 1;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_64) {
+ Read256BytesBlockHeightY = 4;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_32
+ || mode_lib->vba.SourcePixelFormat[k] == dm_444_16) {
+ Read256BytesBlockHeightY = 8;
+ } else {
+ Read256BytesBlockHeightY = 16;
+ }
+ Read256BytesBlockWidthY = 256 / dml_ceil(BytePerPixDETY, 1)
+ / Read256BytesBlockHeightY;
+ Read256BytesBlockHeightC = 0;
+ Read256BytesBlockWidthC = 0;
+ } else {
+ if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear) {
+ Read256BytesBlockHeightY = 1;
+ Read256BytesBlockHeightC = 1;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8) {
+ Read256BytesBlockHeightY = 16;
+ Read256BytesBlockHeightC = 8;
+ } else {
+ Read256BytesBlockHeightY = 8;
+ Read256BytesBlockHeightC = 8;
+ }
+ Read256BytesBlockWidthY = 256 / dml_ceil(BytePerPixDETY, 1)
+ / Read256BytesBlockHeightY;
+ Read256BytesBlockWidthC = 256 / dml_ceil(BytePerPixDETC, 2)
+ / Read256BytesBlockHeightC;
+ }
+
+ if (mode_lib->vba.SourceScan[k] == dm_horz) {
+ MaximumSwathHeightY = Read256BytesBlockHeightY;
+ MaximumSwathHeightC = Read256BytesBlockHeightC;
+ } else {
+ MaximumSwathHeightY = Read256BytesBlockWidthY;
+ MaximumSwathHeightC = Read256BytesBlockWidthC;
+ }
+
+ if ((mode_lib->vba.SourcePixelFormat[k] == dm_444_64
+ || mode_lib->vba.SourcePixelFormat[k] == dm_444_32
+ || mode_lib->vba.SourcePixelFormat[k] == dm_444_16
+ || mode_lib->vba.SourcePixelFormat[k] == dm_444_8)) {
+ if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear
+ || (mode_lib->vba.SourcePixelFormat[k] == dm_444_64
+ && (mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_4kb_s
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_4kb_s_x
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_64kb_s
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_64kb_s_t
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_64kb_s_x
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_var_s
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_var_s_x)
+ && mode_lib->vba.SourceScan[k] == dm_horz)) {
+ MinimumSwathHeightY = MaximumSwathHeightY;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_8
+ && mode_lib->vba.SourceScan[k] != dm_horz) {
+ MinimumSwathHeightY = MaximumSwathHeightY;
+ } else {
+ MinimumSwathHeightY = MaximumSwathHeightY / 2.0;
+ }
+ MinimumSwathHeightC = MaximumSwathHeightC;
+ } else {
+ if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear) {
+ MinimumSwathHeightY = MaximumSwathHeightY;
+ MinimumSwathHeightC = MaximumSwathHeightC;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8
+ && mode_lib->vba.SourceScan[k] == dm_horz) {
+ MinimumSwathHeightY = MaximumSwathHeightY / 2.0;
+ MinimumSwathHeightC = MaximumSwathHeightC;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_10
+ && mode_lib->vba.SourceScan[k] == dm_horz) {
+ MinimumSwathHeightC = MaximumSwathHeightC / 2.0;
+ MinimumSwathHeightY = MaximumSwathHeightY;
+ } else {
+ MinimumSwathHeightY = MaximumSwathHeightY;
+ MinimumSwathHeightC = MaximumSwathHeightC;
+ }
+ }
+
+ if (mode_lib->vba.SourceScan[k] == dm_horz) {
+ SwathWidth = mode_lib->vba.ViewportWidth[k];
+ } else {
+ SwathWidth = mode_lib->vba.ViewportHeight[k];
+ }
+
+ if (mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) {
+ MainPlaneDoesODMCombine = true;
+ }
+ for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) {
+ if (mode_lib->vba.BlendingAndTiming[k] == j
+ && mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) {
+ MainPlaneDoesODMCombine = true;
+ }
+ }
+
+ if (MainPlaneDoesODMCombine == true) {
+ SwathWidth = dml_min(
+ SwathWidth,
+ mode_lib->vba.HActive[k] / 2.0 * mode_lib->vba.HRatio[k]);
+ } else {
+ SwathWidth = SwathWidth / mode_lib->vba.DPPPerPlane[k];
+ }
+
+ SwathWidthGranularityY = 256 / dml_ceil(BytePerPixDETY, 1) / MaximumSwathHeightY;
+ RoundedUpMaxSwathSizeBytesY = (dml_ceil(
+ (double) (SwathWidth - 1),
+ SwathWidthGranularityY) + SwathWidthGranularityY) * BytePerPixDETY
+ * MaximumSwathHeightY;
+ if (mode_lib->vba.SourcePixelFormat[k] == dm_420_10) {
+ RoundedUpMaxSwathSizeBytesY = dml_ceil(RoundedUpMaxSwathSizeBytesY, 256)
+ + 256;
+ }
+ if (MaximumSwathHeightC > 0) {
+ SwathWidthGranularityC = 256.0 / dml_ceil(BytePerPixDETC, 2)
+ / MaximumSwathHeightC;
+ RoundedUpMaxSwathSizeBytesC = (dml_ceil(
+ (double) (SwathWidth / 2.0 - 1),
+ SwathWidthGranularityC) + SwathWidthGranularityC)
+ * BytePerPixDETC * MaximumSwathHeightC;
+ if (mode_lib->vba.SourcePixelFormat[k] == dm_420_10) {
+ RoundedUpMaxSwathSizeBytesC = dml_ceil(
+ RoundedUpMaxSwathSizeBytesC,
+ 256) + 256;
+ }
+ } else
+ RoundedUpMaxSwathSizeBytesC = 0.0;
+
+ if (RoundedUpMaxSwathSizeBytesY + RoundedUpMaxSwathSizeBytesC
+ <= mode_lib->vba.DETBufferSizeInKByte * 1024.0 / 2.0) {
+ mode_lib->vba.SwathHeightY[k] = MaximumSwathHeightY;
+ mode_lib->vba.SwathHeightC[k] = MaximumSwathHeightC;
+ } else {
+ mode_lib->vba.SwathHeightY[k] = MinimumSwathHeightY;
+ mode_lib->vba.SwathHeightC[k] = MinimumSwathHeightC;
+ }
+
+ CalculateDETBufferSize(
+ mode_lib->vba.DETBufferSizeInKByte,
+ mode_lib->vba.SwathHeightY[k],
+ mode_lib->vba.SwathHeightC[k],
+ &mode_lib->vba.DETBufferSizeY[k],
+ &mode_lib->vba.DETBufferSizeC[k]);
+ }
+}
+
+static double CalculateTWait(
+ unsigned int PrefetchMode,
+ double DRAMClockChangeLatency,
+ double UrgentLatency,
+ double SREnterPlusExitTime)
+{
+ if (PrefetchMode == 0) {
+ return dml_max(
+ DRAMClockChangeLatency + UrgentLatency,
+ dml_max(SREnterPlusExitTime, UrgentLatency));
+ } else if (PrefetchMode == 1) {
+ return dml_max(SREnterPlusExitTime, UrgentLatency);
+ } else {
+ return UrgentLatency;
+ }
+}
+
+static double CalculateRemoteSurfaceFlipDelay(
+ struct display_mode_lib *mode_lib,
+ double VRatio,
+ double SwathWidth,
+ double Bpp,
+ double LineTime,
+ double XFCTSlvVupdateOffset,
+ double XFCTSlvVupdateWidth,
+ double XFCTSlvVreadyOffset,
+ double XFCXBUFLatencyTolerance,
+ double XFCFillBWOverhead,
+ double XFCSlvChunkSize,
+ double XFCBusTransportTime,
+ double TCalc,
+ double TWait,
+ double *SrcActiveDrainRate,
+ double *TInitXFill,
+ double *TslvChk)
+{
+ double TSlvSetup, AvgfillRate, result;
+
+ *SrcActiveDrainRate = VRatio * SwathWidth * Bpp / LineTime;
+ TSlvSetup = XFCTSlvVupdateOffset + XFCTSlvVupdateWidth + XFCTSlvVreadyOffset;
+ *TInitXFill = XFCXBUFLatencyTolerance / (1 + XFCFillBWOverhead / 100);
+ AvgfillRate = *SrcActiveDrainRate * (1 + XFCFillBWOverhead / 100);
+ *TslvChk = XFCSlvChunkSize / AvgfillRate;
+ dml_print(
+ "DML::CalculateRemoteSurfaceFlipDelay: SrcActiveDrainRate: %f\n",
+ *SrcActiveDrainRate);
+ dml_print("DML::CalculateRemoteSurfaceFlipDelay: TSlvSetup: %f\n", TSlvSetup);
+ dml_print("DML::CalculateRemoteSurfaceFlipDelay: TInitXFill: %f\n", *TInitXFill);
+ dml_print("DML::CalculateRemoteSurfaceFlipDelay: AvgfillRate: %f\n", AvgfillRate);
+ dml_print("DML::CalculateRemoteSurfaceFlipDelay: TslvChk: %f\n", *TslvChk);
+ result = 2 * XFCBusTransportTime + TSlvSetup + TCalc + TWait + *TslvChk + *TInitXFill; // TODO: This doesn't seem to match programming guide
+ dml_print("DML::CalculateRemoteSurfaceFlipDelay: RemoteSurfaceFlipDelay: %f\n", result);
+ return result;
+}
+
+static double CalculateWriteBackDelay(
+ enum source_format_class WritebackPixelFormat,
+ double WritebackHRatio,
+ double WritebackVRatio,
+ unsigned int WritebackLumaHTaps,
+ unsigned int WritebackLumaVTaps,
+ unsigned int WritebackChromaHTaps,
+ unsigned int WritebackChromaVTaps,
+ unsigned int WritebackDestinationWidth)
+{
+ double CalculateWriteBackDelay =
+ dml_max(
+ dml_ceil(WritebackLumaHTaps / 4.0, 1) / WritebackHRatio,
+ WritebackLumaVTaps * dml_ceil(1.0 / WritebackVRatio, 1)
+ * dml_ceil(
+ WritebackDestinationWidth
+ / 4.0,
+ 1)
+ + dml_ceil(1.0 / WritebackVRatio, 1)
+ * (dml_ceil(
+ WritebackLumaVTaps
+ / 4.0,
+ 1) + 4));
+
+ if (WritebackPixelFormat != dm_444_32) {
+ CalculateWriteBackDelay =
+ dml_max(
+ CalculateWriteBackDelay,
+ dml_max(
+ dml_ceil(
+ WritebackChromaHTaps
+ / 2.0,
+ 1)
+ / (2
+ * WritebackHRatio),
+ WritebackChromaVTaps
+ * dml_ceil(
+ 1
+ / (2
+ * WritebackVRatio),
+ 1)
+ * dml_ceil(
+ WritebackDestinationWidth
+ / 2.0
+ / 2.0,
+ 1)
+ + dml_ceil(
+ 1
+ / (2
+ * WritebackVRatio),
+ 1)
+ * (dml_ceil(
+ WritebackChromaVTaps
+ / 4.0,
+ 1)
+ + 4)));
+ }
+ return CalculateWriteBackDelay;
+}
+
+static void CalculateActiveRowBandwidth(
+ bool GPUVMEnable,
+ enum source_format_class SourcePixelFormat,
+ double VRatio,
+ bool DCCEnable,
+ double LineTime,
+ unsigned int MetaRowByteLuma,
+ unsigned int MetaRowByteChroma,
+ unsigned int meta_row_height_luma,
+ unsigned int meta_row_height_chroma,
+ unsigned int PixelPTEBytesPerRowLuma,
+ unsigned int PixelPTEBytesPerRowChroma,
+ unsigned int dpte_row_height_luma,
+ unsigned int dpte_row_height_chroma,
+ double *meta_row_bw,
+ double *dpte_row_bw)
+{
+ if (DCCEnable != true) {
+ *meta_row_bw = 0;
+ } else if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10) {
+ *meta_row_bw = VRatio * MetaRowByteLuma / (meta_row_height_luma * LineTime)
+ + VRatio / 2 * MetaRowByteChroma
+ / (meta_row_height_chroma * LineTime);
+ } else {
+ *meta_row_bw = VRatio * MetaRowByteLuma / (meta_row_height_luma * LineTime);
+ }
+
+ if (GPUVMEnable != true) {
+ *dpte_row_bw = 0;
+ } else if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10) {
+ *dpte_row_bw = VRatio * PixelPTEBytesPerRowLuma / (dpte_row_height_luma * LineTime)
+ + VRatio / 2 * PixelPTEBytesPerRowChroma
+ / (dpte_row_height_chroma * LineTime);
+ } else {
+ *dpte_row_bw = VRatio * PixelPTEBytesPerRowLuma / (dpte_row_height_luma * LineTime);
+ }
+}
+
+static void CalculateFlipSchedule(
+ struct display_mode_lib *mode_lib,
+ double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
+ double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
+ double UrgentExtraLatency,
+ double UrgentLatency,
+ unsigned int GPUVMMaxPageTableLevels,
+ bool HostVMEnable,
+ unsigned int HostVMMaxPageTableLevels,
+ unsigned int HostVMCachedPageTableLevels,
+ bool GPUVMEnable,
+ double PDEAndMetaPTEBytesPerFrame,
+ double MetaRowBytes,
+ double DPTEBytesPerRow,
+ double BandwidthAvailableForImmediateFlip,
+ unsigned int TotImmediateFlipBytes,
+ enum source_format_class SourcePixelFormat,
+ double LineTime,
+ double VRatio,
+ double Tno_bw,
+ bool DCCEnable,
+ unsigned int dpte_row_height,
+ unsigned int meta_row_height,
+ unsigned int dpte_row_height_chroma,
+ unsigned int meta_row_height_chroma,
+ double *DestinationLinesToRequestVMInImmediateFlip,
+ double *DestinationLinesToRequestRowInImmediateFlip,
+ double *final_flip_bw,
+ bool *ImmediateFlipSupportedForPipe)
+{
+ double min_row_time = 0.0;
+ unsigned int HostVMDynamicLevels;
+ double TimeForFetchingMetaPTEImmediateFlip;
+ double TimeForFetchingRowInVBlankImmediateFlip;
+ double ImmediateFlipBW;
+ double HostVMInefficiencyFactor;
+
+ if (GPUVMEnable == true && HostVMEnable == true) {
+ HostVMInefficiencyFactor =
+ PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData
+ / PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly;
+ HostVMDynamicLevels = HostVMMaxPageTableLevels - HostVMCachedPageTableLevels;
+ } else {
+ HostVMInefficiencyFactor = 1;
+ HostVMDynamicLevels = 0;
+ }
+
+ ImmediateFlipBW = (PDEAndMetaPTEBytesPerFrame + MetaRowBytes + DPTEBytesPerRow)
+ * BandwidthAvailableForImmediateFlip / TotImmediateFlipBytes;
+
+ if (GPUVMEnable == true) {
+ TimeForFetchingMetaPTEImmediateFlip = dml_max3(
+ Tno_bw + PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / ImmediateFlipBW,
+ UrgentExtraLatency + UrgentLatency * (GPUVMMaxPageTableLevels * (HostVMDynamicLevels + 1) - 1),
+ LineTime / 4.0);
+ } else {
+ TimeForFetchingMetaPTEImmediateFlip = 0;
+ }
+
+ *DestinationLinesToRequestVMInImmediateFlip = dml_ceil(4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime), 1) / 4.0;
+ if ((GPUVMEnable == true || DCCEnable == true)) {
+ TimeForFetchingRowInVBlankImmediateFlip = dml_max3((MetaRowBytes + DPTEBytesPerRow) * HostVMInefficiencyFactor / ImmediateFlipBW, UrgentLatency * (HostVMDynamicLevels + 1), LineTime / 4);
+ } else {
+ TimeForFetchingRowInVBlankImmediateFlip = 0;
+ }
+
+ *DestinationLinesToRequestRowInImmediateFlip = dml_ceil(4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime), 1) / 4.0;
+ *final_flip_bw = dml_max(PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / (*DestinationLinesToRequestVMInImmediateFlip * LineTime), (MetaRowBytes + DPTEBytesPerRow) * HostVMInefficiencyFactor / (*DestinationLinesToRequestRowInImmediateFlip * LineTime));
+ if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10) {
+ if (GPUVMEnable == true && DCCEnable != true) {
+ min_row_time = dml_min(
+ dpte_row_height * LineTime / VRatio,
+ dpte_row_height_chroma * LineTime / (VRatio / 2));
+ } else if (GPUVMEnable != true && DCCEnable == true) {
+ min_row_time = dml_min(
+ meta_row_height * LineTime / VRatio,
+ meta_row_height_chroma * LineTime / (VRatio / 2));
+ } else {
+ min_row_time = dml_min4(
+ dpte_row_height * LineTime / VRatio,
+ meta_row_height * LineTime / VRatio,
+ dpte_row_height_chroma * LineTime / (VRatio / 2),
+ meta_row_height_chroma * LineTime / (VRatio / 2));
+ }
+ } else {
+ if (GPUVMEnable == true && DCCEnable != true) {
+ min_row_time = dpte_row_height * LineTime / VRatio;
+ } else if (GPUVMEnable != true && DCCEnable == true) {
+ min_row_time = meta_row_height * LineTime / VRatio;
+ } else {
+ min_row_time = dml_min(
+ dpte_row_height * LineTime / VRatio,
+ meta_row_height * LineTime / VRatio);
+ }
+ }
+
+ if (*DestinationLinesToRequestVMInImmediateFlip >= 32
+ || *DestinationLinesToRequestRowInImmediateFlip >= 16
+ || TimeForFetchingMetaPTEImmediateFlip + 2 * TimeForFetchingRowInVBlankImmediateFlip > min_row_time) {
+ *ImmediateFlipSupportedForPipe = false;
+ } else {
+ *ImmediateFlipSupportedForPipe = true;
+ }
+}
+
+static unsigned int TruncToValidBPP(
+ double DecimalBPP,
+ double DesiredBPP,
+ bool DSCEnabled,
+ enum output_encoder_class Output,
+ enum output_format_class Format,
+ unsigned int DSCInputBitPerComponent)
+{
+ if (Output == dm_hdmi) {
+ if (Format == dm_420) {
+ if (DecimalBPP >= 18 && (DesiredBPP == 0 || DesiredBPP == 18))
+ return 18;
+ else if (DecimalBPP >= 15 && (DesiredBPP == 0 || DesiredBPP == 15))
+ return 15;
+ else if (DecimalBPP >= 12 && (DesiredBPP == 0 || DesiredBPP == 12))
+ return 12;
+ else
+ return BPP_INVALID;
+ } else if (Format == dm_444) {
+ if (DecimalBPP >= 36 && (DesiredBPP == 0 || DesiredBPP == 36))
+ return 36;
+ else if (DecimalBPP >= 30 && (DesiredBPP == 0 || DesiredBPP == 30))
+ return 30;
+ else if (DecimalBPP >= 24 && (DesiredBPP == 0 || DesiredBPP == 24))
+ return 24;
+ else if (DecimalBPP >= 18 && (DesiredBPP == 0 || DesiredBPP == 18))
+ return 18;
+ else
+ return BPP_INVALID;
+ } else {
+ if (DecimalBPP / 1.5 >= 24 && (DesiredBPP == 0 || DesiredBPP == 24))
+ return 24;
+ else if (DecimalBPP / 1.5 >= 20 && (DesiredBPP == 0 || DesiredBPP == 20))
+ return 20;
+ else if (DecimalBPP / 1.5 >= 16 && (DesiredBPP == 0 || DesiredBPP == 16))
+ return 16;
+ else
+ return BPP_INVALID;
+ }
+ } else {
+ if (DSCEnabled) {
+ if (Format == dm_420) {
+ if (DesiredBPP == 0) {
+ if (DecimalBPP < 6)
+ return BPP_INVALID;
+ else if (DecimalBPP >= 1.5 * DSCInputBitPerComponent - 1.0 / 16.0)
+ return 1.5 * DSCInputBitPerComponent - 1.0 / 16.0;
+ else
+ return dml_floor(16 * DecimalBPP, 1) / 16.0;
+ } else {
+ if (DecimalBPP < 6
+ || DesiredBPP < 6
+ || DesiredBPP > 1.5 * DSCInputBitPerComponent - 1.0 / 16.0
+ || DecimalBPP < DesiredBPP) {
+ return BPP_INVALID;
+ } else {
+ return DesiredBPP;
+ }
+ }
+ } else if (Format == dm_n422) {
+ if (DesiredBPP == 0) {
+ if (DecimalBPP < 7)
+ return BPP_INVALID;
+ else if (DecimalBPP >= 2 * DSCInputBitPerComponent - 1.0 / 16.0)
+ return 2 * DSCInputBitPerComponent - 1.0 / 16.0;
+ else
+ return dml_floor(16 * DecimalBPP, 1) / 16.0;
+ } else {
+ if (DecimalBPP < 7
+ || DesiredBPP < 7
+ || DesiredBPP > 2 * DSCInputBitPerComponent - 1.0 / 16.0
+ || DecimalBPP < DesiredBPP) {
+ return BPP_INVALID;
+ } else {
+ return DesiredBPP;
+ }
+ }
+ } else {
+ if (DesiredBPP == 0) {
+ if (DecimalBPP < 8)
+ return BPP_INVALID;
+ else if (DecimalBPP >= 3 * DSCInputBitPerComponent - 1.0 / 16.0)
+ return 3 * DSCInputBitPerComponent - 1.0 / 16.0;
+ else
+ return dml_floor(16 * DecimalBPP, 1) / 16.0;
+ } else {
+ if (DecimalBPP < 8
+ || DesiredBPP < 8
+ || DesiredBPP > 3 * DSCInputBitPerComponent - 1.0 / 16.0
+ || DecimalBPP < DesiredBPP) {
+ return BPP_INVALID;
+ } else {
+ return DesiredBPP;
+ }
+ }
+ }
+ } else if (Format == dm_420) {
+ if (DecimalBPP >= 18 && (DesiredBPP == 0 || DesiredBPP == 18))
+ return 18;
+ else if (DecimalBPP >= 15 && (DesiredBPP == 0 || DesiredBPP == 15))
+ return 15;
+ else if (DecimalBPP >= 12 && (DesiredBPP == 0 || DesiredBPP == 12))
+ return 12;
+ else
+ return BPP_INVALID;
+ } else if (Format == dm_s422 || Format == dm_n422) {
+ if (DecimalBPP >= 24 && (DesiredBPP == 0 || DesiredBPP == 24))
+ return 24;
+ else if (DecimalBPP >= 20 && (DesiredBPP == 0 || DesiredBPP == 20))
+ return 20;
+ else if (DecimalBPP >= 16 && (DesiredBPP == 0 || DesiredBPP == 16))
+ return 16;
+ else
+ return BPP_INVALID;
+ } else {
+ if (DecimalBPP >= 36 && (DesiredBPP == 0 || DesiredBPP == 36))
+ return 36;
+ else if (DecimalBPP >= 30 && (DesiredBPP == 0 || DesiredBPP == 30))
+ return 30;
+ else if (DecimalBPP >= 24 && (DesiredBPP == 0 || DesiredBPP == 24))
+ return 24;
+ else if (DecimalBPP >= 18 && (DesiredBPP == 0 || DesiredBPP == 18))
+ return 18;
+ else
+ return BPP_INVALID;
+ }
+ }
+}
+
+void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib)
+{
+ struct vba_vars_st *locals = &mode_lib->vba;
+
+ int i;
+ unsigned int j, k, m;
+
+ /*MODE SUPPORT, VOLTAGE STATE AND SOC CONFIGURATION*/
+
+ /*Scale Ratio, taps Support Check*/
+
+ mode_lib->vba.ScaleRatioAndTapsSupport = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.ScalerEnabled[k] == false
+ && ((mode_lib->vba.SourcePixelFormat[k] != dm_444_64
+ && mode_lib->vba.SourcePixelFormat[k] != dm_444_32
+ && mode_lib->vba.SourcePixelFormat[k] != dm_444_16
+ && mode_lib->vba.SourcePixelFormat[k] != dm_mono_16
+ && mode_lib->vba.SourcePixelFormat[k] != dm_mono_8)
+ || mode_lib->vba.HRatio[k] != 1.0
+ || mode_lib->vba.htaps[k] != 1.0
+ || mode_lib->vba.VRatio[k] != 1.0
+ || mode_lib->vba.vtaps[k] != 1.0)) {
+ mode_lib->vba.ScaleRatioAndTapsSupport = false;
+ } else if (mode_lib->vba.vtaps[k] < 1.0 || mode_lib->vba.vtaps[k] > 8.0
+ || mode_lib->vba.htaps[k] < 1.0 || mode_lib->vba.htaps[k] > 8.0
+ || (mode_lib->vba.htaps[k] > 1.0
+ && (mode_lib->vba.htaps[k] % 2) == 1)
+ || mode_lib->vba.HRatio[k] > mode_lib->vba.MaxHSCLRatio
+ || mode_lib->vba.VRatio[k] > mode_lib->vba.MaxVSCLRatio
+ || mode_lib->vba.HRatio[k] > mode_lib->vba.htaps[k]
+ || mode_lib->vba.VRatio[k] > mode_lib->vba.vtaps[k]
+ || (mode_lib->vba.SourcePixelFormat[k] != dm_444_64
+ && mode_lib->vba.SourcePixelFormat[k] != dm_444_32
+ && mode_lib->vba.SourcePixelFormat[k] != dm_444_16
+ && mode_lib->vba.SourcePixelFormat[k] != dm_mono_16
+ && mode_lib->vba.SourcePixelFormat[k] != dm_mono_8
+ && (mode_lib->vba.HRatio[k] / 2.0
+ > mode_lib->vba.HTAPsChroma[k]
+ || mode_lib->vba.VRatio[k] / 2.0
+ > mode_lib->vba.VTAPsChroma[k]))) {
+ mode_lib->vba.ScaleRatioAndTapsSupport = false;
+ }
+ }
+ /*Source Format, Pixel Format and Scan Support Check*/
+
+ mode_lib->vba.SourceFormatPixelAndScanSupport = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if ((mode_lib->vba.SurfaceTiling[k] == dm_sw_linear
+ && mode_lib->vba.SourceScan[k] != dm_horz)
+ || ((mode_lib->vba.SurfaceTiling[k] == dm_sw_4kb_d
+ || mode_lib->vba.SurfaceTiling[k] == dm_sw_4kb_d_x
+ || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_d
+ || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_d_t
+ || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_d_x
+ || mode_lib->vba.SurfaceTiling[k] == dm_sw_var_d
+ || mode_lib->vba.SurfaceTiling[k] == dm_sw_var_d_x)
+ && mode_lib->vba.SourcePixelFormat[k] != dm_444_64)
+ || (mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_r_x
+ && (mode_lib->vba.SourcePixelFormat[k] == dm_mono_8
+ || mode_lib->vba.SourcePixelFormat[k]
+ == dm_420_8
+ || mode_lib->vba.SourcePixelFormat[k]
+ == dm_420_10))
+ || (((mode_lib->vba.SurfaceTiling[k] == dm_sw_gfx7_2d_thin_gl
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_gfx7_2d_thin_l_vp)
+ && !((mode_lib->vba.SourcePixelFormat[k]
+ == dm_444_64
+ || mode_lib->vba.SourcePixelFormat[k]
+ == dm_444_32)
+ && mode_lib->vba.SourceScan[k]
+ == dm_horz
+ && mode_lib->vba.SupportGFX7CompatibleTilingIn32bppAnd64bpp
+ == true
+ && mode_lib->vba.DCCEnable[k]
+ == false))
+ || (mode_lib->vba.DCCEnable[k] == true
+ && (mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_linear
+ || mode_lib->vba.SourcePixelFormat[k]
+ == dm_420_8
+ || mode_lib->vba.SourcePixelFormat[k]
+ == dm_420_10)))) {
+ mode_lib->vba.SourceFormatPixelAndScanSupport = false;
+ }
+ }
+ /*Bandwidth Support Check*/
+
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.SourcePixelFormat[k] == dm_444_64) {
+ locals->BytePerPixelInDETY[k] = 8.0;
+ locals->BytePerPixelInDETC[k] = 0.0;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_32) {
+ locals->BytePerPixelInDETY[k] = 4.0;
+ locals->BytePerPixelInDETC[k] = 0.0;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_16
+ || mode_lib->vba.SourcePixelFormat[k] == dm_mono_16) {
+ locals->BytePerPixelInDETY[k] = 2.0;
+ locals->BytePerPixelInDETC[k] = 0.0;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_mono_8) {
+ locals->BytePerPixelInDETY[k] = 1.0;
+ locals->BytePerPixelInDETC[k] = 0.0;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8) {
+ locals->BytePerPixelInDETY[k] = 1.0;
+ locals->BytePerPixelInDETC[k] = 2.0;
+ } else {
+ locals->BytePerPixelInDETY[k] = 4.0 / 3;
+ locals->BytePerPixelInDETC[k] = 8.0 / 3;
+ }
+ if (mode_lib->vba.SourceScan[k] == dm_horz) {
+ locals->SwathWidthYSingleDPP[k] = mode_lib->vba.ViewportWidth[k];
+ } else {
+ locals->SwathWidthYSingleDPP[k] = mode_lib->vba.ViewportHeight[k];
+ }
+ }
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ locals->ReadBandwidthLuma[k] = locals->SwathWidthYSingleDPP[k] * dml_ceil(locals->BytePerPixelInDETY[k], 1.0)
+ / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]) * mode_lib->vba.VRatio[k];
+ locals->ReadBandwidthChroma[k] = locals->SwathWidthYSingleDPP[k] / 2 * dml_ceil(locals->BytePerPixelInDETC[k], 2.0)
+ / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]) * mode_lib->vba.VRatio[k] / 2.0;
+ locals->ReadBandwidth[k] = locals->ReadBandwidthLuma[k] + locals->ReadBandwidthChroma[k];
+ }
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.WritebackEnable[k] == true
+ && mode_lib->vba.WritebackPixelFormat[k] == dm_444_32) {
+ locals->WriteBandwidth[k] = mode_lib->vba.WritebackDestinationWidth[k]
+ * mode_lib->vba.WritebackDestinationHeight[k]
+ / (mode_lib->vba.WritebackSourceHeight[k]
+ * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]) * 4.0;
+ } else if (mode_lib->vba.WritebackEnable[k] == true
+ && mode_lib->vba.WritebackPixelFormat[k] == dm_420_10) {
+ locals->WriteBandwidth[k] = mode_lib->vba.WritebackDestinationWidth[k]
+ * mode_lib->vba.WritebackDestinationHeight[k]
+ / (mode_lib->vba.WritebackSourceHeight[k]
+ * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]) * 3.0;
+ } else if (mode_lib->vba.WritebackEnable[k] == true) {
+ locals->WriteBandwidth[k] = mode_lib->vba.WritebackDestinationWidth[k]
+ * mode_lib->vba.WritebackDestinationHeight[k]
+ / (mode_lib->vba.WritebackSourceHeight[k]
+ * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]) * 1.5;
+ } else {
+ locals->WriteBandwidth[k] = 0.0;
+ }
+ }
+ mode_lib->vba.DCCEnabledInAnyPlane = false;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.DCCEnable[k] == true) {
+ mode_lib->vba.DCCEnabledInAnyPlane = true;
+ }
+ }
+ for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
+ locals->IdealSDPPortBandwidthPerState[i][0] = dml_min3(
+ mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLKPerState[i],
+ mode_lib->vba.DRAMSpeedPerState[i] * mode_lib->vba.NumberOfChannels
+ * mode_lib->vba.DRAMChannelWidth,
+ mode_lib->vba.FabricClockPerState[i]
+ * mode_lib->vba.FabricDatapathToDCNDataReturn);
+ if (mode_lib->vba.HostVMEnable == false) {
+ locals->ReturnBWPerState[i][0] = locals->IdealSDPPortBandwidthPerState[i][0]
+ * mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelDataOnly / 100.0;
+ } else {
+ locals->ReturnBWPerState[i][0] = locals->IdealSDPPortBandwidthPerState[i][0]
+ * mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData / 100.0;
+ }
+ }
+ /*Writeback Latency support check*/
+
+ mode_lib->vba.WritebackLatencySupport = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.WritebackEnable[k] == true) {
+ if (mode_lib->vba.WritebackPixelFormat[k] == dm_444_32) {
+ if (locals->WriteBandwidth[k]
+ > (mode_lib->vba.WritebackInterfaceLumaBufferSize
+ + mode_lib->vba.WritebackInterfaceChromaBufferSize)
+ / mode_lib->vba.WritebackLatency) {
+ mode_lib->vba.WritebackLatencySupport = false;
+ }
+ } else {
+ if (locals->WriteBandwidth[k]
+ > 1.5
+ * dml_min(
+ mode_lib->vba.WritebackInterfaceLumaBufferSize,
+ 2.0
+ * mode_lib->vba.WritebackInterfaceChromaBufferSize)
+ / mode_lib->vba.WritebackLatency) {
+ mode_lib->vba.WritebackLatencySupport = false;
+ }
+ }
+ }
+ }
+ /*Re-ordering Buffer Support Check*/
+
+ for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
+ locals->UrgentRoundTripAndOutOfOrderLatencyPerState[i] =
+ (mode_lib->vba.RoundTripPingLatencyCycles + 32.0) / mode_lib->vba.DCFCLKPerState[i]
+ + dml_max3(mode_lib->vba.UrgentOutOfOrderReturnPerChannelPixelDataOnly,
+ mode_lib->vba.UrgentOutOfOrderReturnPerChannelPixelMixedWithVMData,
+ mode_lib->vba.UrgentOutOfOrderReturnPerChannelVMDataOnly)
+ * mode_lib->vba.NumberOfChannels / locals->ReturnBWPerState[i][0];
+ if ((mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte) * 1024.0 / locals->ReturnBWPerState[i][0]
+ > locals->UrgentRoundTripAndOutOfOrderLatencyPerState[i]) {
+ locals->ROBSupport[i][0] = true;
+ } else {
+ locals->ROBSupport[i][0] = false;
+ }
+ }
+ /*Writeback Mode Support Check*/
+
+ mode_lib->vba.TotalNumberOfActiveWriteback = 0;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.WritebackEnable[k] == true) {
+ if (mode_lib->vba.ActiveWritebacksPerPlane[k] == 0)
+ mode_lib->vba.ActiveWritebacksPerPlane[k] = 1;
+ mode_lib->vba.TotalNumberOfActiveWriteback =
+ mode_lib->vba.TotalNumberOfActiveWriteback
+ + mode_lib->vba.ActiveWritebacksPerPlane[k];
+ }
+ }
+ mode_lib->vba.WritebackModeSupport = true;
+ if (mode_lib->vba.TotalNumberOfActiveWriteback > mode_lib->vba.MaxNumWriteback) {
+ mode_lib->vba.WritebackModeSupport = false;
+ }
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.WritebackEnable[k] == true
+ && mode_lib->vba.Writeback10bpc420Supported != true
+ && mode_lib->vba.WritebackPixelFormat[k] == dm_420_10) {
+ mode_lib->vba.WritebackModeSupport = false;
+ }
+ }
+ /*Writeback Scale Ratio and Taps Support Check*/
+
+ mode_lib->vba.WritebackScaleRatioAndTapsSupport = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.WritebackEnable[k] == true) {
+ if (mode_lib->vba.WritebackLumaAndChromaScalingSupported == false
+ && (mode_lib->vba.WritebackHRatio[k] != 1.0
+ || mode_lib->vba.WritebackVRatio[k] != 1.0)) {
+ mode_lib->vba.WritebackScaleRatioAndTapsSupport = false;
+ }
+ if (mode_lib->vba.WritebackHRatio[k] > mode_lib->vba.WritebackMaxHSCLRatio
+ || mode_lib->vba.WritebackVRatio[k]
+ > mode_lib->vba.WritebackMaxVSCLRatio
+ || mode_lib->vba.WritebackHRatio[k]
+ < mode_lib->vba.WritebackMinHSCLRatio
+ || mode_lib->vba.WritebackVRatio[k]
+ < mode_lib->vba.WritebackMinVSCLRatio
+ || mode_lib->vba.WritebackLumaHTaps[k]
+ > mode_lib->vba.WritebackMaxHSCLTaps
+ || mode_lib->vba.WritebackLumaVTaps[k]
+ > mode_lib->vba.WritebackMaxVSCLTaps
+ || mode_lib->vba.WritebackHRatio[k]
+ > mode_lib->vba.WritebackLumaHTaps[k]
+ || mode_lib->vba.WritebackVRatio[k]
+ > mode_lib->vba.WritebackLumaVTaps[k]
+ || (mode_lib->vba.WritebackLumaHTaps[k] > 2.0
+ && ((mode_lib->vba.WritebackLumaHTaps[k] % 2)
+ == 1))
+ || (mode_lib->vba.WritebackPixelFormat[k] != dm_444_32
+ && (mode_lib->vba.WritebackChromaHTaps[k]
+ > mode_lib->vba.WritebackMaxHSCLTaps
+ || mode_lib->vba.WritebackChromaVTaps[k]
+ > mode_lib->vba.WritebackMaxVSCLTaps
+ || 2.0
+ * mode_lib->vba.WritebackHRatio[k]
+ > mode_lib->vba.WritebackChromaHTaps[k]
+ || 2.0
+ * mode_lib->vba.WritebackVRatio[k]
+ > mode_lib->vba.WritebackChromaVTaps[k]
+ || (mode_lib->vba.WritebackChromaHTaps[k] > 2.0
+ && ((mode_lib->vba.WritebackChromaHTaps[k] % 2) == 1))))) {
+ mode_lib->vba.WritebackScaleRatioAndTapsSupport = false;
+ }
+ if (mode_lib->vba.WritebackVRatio[k] < 1.0) {
+ mode_lib->vba.WritebackLumaVExtra =
+ dml_max(1.0 - 2.0 / dml_ceil(1.0 / mode_lib->vba.WritebackVRatio[k], 1.0), 0.0);
+ } else {
+ mode_lib->vba.WritebackLumaVExtra = -1;
+ }
+ if ((mode_lib->vba.WritebackPixelFormat[k] == dm_444_32
+ && mode_lib->vba.WritebackLumaVTaps[k]
+ > (mode_lib->vba.WritebackLineBufferLumaBufferSize
+ + mode_lib->vba.WritebackLineBufferChromaBufferSize)
+ / 3.0
+ / mode_lib->vba.WritebackDestinationWidth[k]
+ - mode_lib->vba.WritebackLumaVExtra)
+ || (mode_lib->vba.WritebackPixelFormat[k] == dm_420_8
+ && mode_lib->vba.WritebackLumaVTaps[k]
+ > mode_lib->vba.WritebackLineBufferLumaBufferSize
+ * 8.0 / 10.0 / mode_lib->vba.WritebackDestinationWidth[k]
+ - mode_lib->vba.WritebackLumaVExtra)
+ || (mode_lib->vba.WritebackPixelFormat[k] == dm_420_10
+ && mode_lib->vba.WritebackLumaVTaps[k]
+ > mode_lib->vba.WritebackLineBufferLumaBufferSize
+ * 8.0 / 10.0
+ / mode_lib->vba.WritebackDestinationWidth[k]
+ - mode_lib->vba.WritebackLumaVExtra)) {
+ mode_lib->vba.WritebackScaleRatioAndTapsSupport = false;
+ }
+ if (2.0 * mode_lib->vba.WritebackVRatio[k] < 1) {
+ mode_lib->vba.WritebackChromaVExtra = 0.0;
+ } else {
+ mode_lib->vba.WritebackChromaVExtra = -1;
+ }
+ if ((mode_lib->vba.WritebackPixelFormat[k] == dm_420_8
+ && mode_lib->vba.WritebackChromaVTaps[k]
+ > mode_lib->vba.WritebackLineBufferChromaBufferSize
+ * 8.0 / 10.0 / mode_lib->vba.WritebackDestinationWidth[k]
+ - mode_lib->vba.WritebackChromaVExtra)
+ || (mode_lib->vba.WritebackPixelFormat[k] == dm_420_10
+ && mode_lib->vba.WritebackChromaVTaps[k]
+ > mode_lib->vba.WritebackLineBufferChromaBufferSize
+ * 8.0 / 10.0
+ / mode_lib->vba.WritebackDestinationWidth[k]
+ - mode_lib->vba.WritebackChromaVExtra)) {
+ mode_lib->vba.WritebackScaleRatioAndTapsSupport = false;
+ }
+ }
+ }
+ /*Maximum DISPCLK/DPPCLK Support check*/
+
+ mode_lib->vba.WritebackRequiredDISPCLK = 0.0;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.WritebackEnable[k] == true) {
+ mode_lib->vba.WritebackRequiredDISPCLK =
+ dml_max(
+ mode_lib->vba.WritebackRequiredDISPCLK,
+ CalculateWriteBackDISPCLK(
+ mode_lib->vba.WritebackPixelFormat[k],
+ mode_lib->vba.PixelClock[k],
+ mode_lib->vba.WritebackHRatio[k],
+ mode_lib->vba.WritebackVRatio[k],
+ mode_lib->vba.WritebackLumaHTaps[k],
+ mode_lib->vba.WritebackLumaVTaps[k],
+ mode_lib->vba.WritebackChromaHTaps[k],
+ mode_lib->vba.WritebackChromaVTaps[k],
+ mode_lib->vba.WritebackDestinationWidth[k],
+ mode_lib->vba.HTotal[k],
+ mode_lib->vba.WritebackChromaLineBufferWidth));
+ }
+ }
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.HRatio[k] > 1.0) {
+ locals->PSCL_FACTOR[k] = dml_min(
+ mode_lib->vba.MaxDCHUBToPSCLThroughput,
+ mode_lib->vba.MaxPSCLToLBThroughput
+ * mode_lib->vba.HRatio[k]
+ / dml_ceil(
+ mode_lib->vba.htaps[k]
+ / 6.0,
+ 1.0));
+ } else {
+ locals->PSCL_FACTOR[k] = dml_min(
+ mode_lib->vba.MaxDCHUBToPSCLThroughput,
+ mode_lib->vba.MaxPSCLToLBThroughput);
+ }
+ if (locals->BytePerPixelInDETC[k] == 0.0) {
+ locals->PSCL_FACTOR_CHROMA[k] = 0.0;
+ locals->MinDPPCLKUsingSingleDPP[k] =
+ mode_lib->vba.PixelClock[k]
+ * dml_max3(
+ mode_lib->vba.vtaps[k] / 6.0
+ * dml_min(
+ 1.0,
+ mode_lib->vba.HRatio[k]),
+ mode_lib->vba.HRatio[k]
+ * mode_lib->vba.VRatio[k]
+ / locals->PSCL_FACTOR[k],
+ 1.0);
+ if ((mode_lib->vba.htaps[k] > 6.0 || mode_lib->vba.vtaps[k] > 6.0)
+ && locals->MinDPPCLKUsingSingleDPP[k]
+ < 2.0 * mode_lib->vba.PixelClock[k]) {
+ locals->MinDPPCLKUsingSingleDPP[k] = 2.0
+ * mode_lib->vba.PixelClock[k];
+ }
+ } else {
+ if (mode_lib->vba.HRatio[k] / 2.0 > 1.0) {
+ locals->PSCL_FACTOR_CHROMA[k] =
+ dml_min(
+ mode_lib->vba.MaxDCHUBToPSCLThroughput,
+ mode_lib->vba.MaxPSCLToLBThroughput
+ * mode_lib->vba.HRatio[k]
+ / 2.0
+ / dml_ceil(
+ mode_lib->vba.HTAPsChroma[k]
+ / 6.0,
+ 1.0));
+ } else {
+ locals->PSCL_FACTOR_CHROMA[k] = dml_min(
+ mode_lib->vba.MaxDCHUBToPSCLThroughput,
+ mode_lib->vba.MaxPSCLToLBThroughput);
+ }
+ locals->MinDPPCLKUsingSingleDPP[k] =
+ mode_lib->vba.PixelClock[k]
+ * dml_max5(
+ mode_lib->vba.vtaps[k] / 6.0
+ * dml_min(
+ 1.0,
+ mode_lib->vba.HRatio[k]),
+ mode_lib->vba.HRatio[k]
+ * mode_lib->vba.VRatio[k]
+ / locals->PSCL_FACTOR[k],
+ mode_lib->vba.VTAPsChroma[k]
+ / 6.0
+ * dml_min(
+ 1.0,
+ mode_lib->vba.HRatio[k]
+ / 2.0),
+ mode_lib->vba.HRatio[k]
+ * mode_lib->vba.VRatio[k]
+ / 4.0
+ / locals->PSCL_FACTOR_CHROMA[k],
+ 1.0);
+ if ((mode_lib->vba.htaps[k] > 6.0 || mode_lib->vba.vtaps[k] > 6.0
+ || mode_lib->vba.HTAPsChroma[k] > 6.0
+ || mode_lib->vba.VTAPsChroma[k] > 6.0)
+ && locals->MinDPPCLKUsingSingleDPP[k]
+ < 2.0 * mode_lib->vba.PixelClock[k]) {
+ locals->MinDPPCLKUsingSingleDPP[k] = 2.0
+ * mode_lib->vba.PixelClock[k];
+ }
+ }
+ }
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ Calculate256BBlockSizes(
+ mode_lib->vba.SourcePixelFormat[k],
+ mode_lib->vba.SurfaceTiling[k],
+ dml_ceil(locals->BytePerPixelInDETY[k], 1.0),
+ dml_ceil(locals->BytePerPixelInDETC[k], 2.0),
+ &locals->Read256BlockHeightY[k],
+ &locals->Read256BlockHeightC[k],
+ &locals->Read256BlockWidthY[k],
+ &locals->Read256BlockWidthC[k]);
+ if (mode_lib->vba.SourceScan[k] == dm_horz) {
+ locals->MaxSwathHeightY[k] = locals->Read256BlockHeightY[k];
+ locals->MaxSwathHeightC[k] = locals->Read256BlockHeightC[k];
+ } else {
+ locals->MaxSwathHeightY[k] = locals->Read256BlockWidthY[k];
+ locals->MaxSwathHeightC[k] = locals->Read256BlockWidthC[k];
+ }
+ if ((mode_lib->vba.SourcePixelFormat[k] == dm_444_64
+ || mode_lib->vba.SourcePixelFormat[k] == dm_444_32
+ || mode_lib->vba.SourcePixelFormat[k] == dm_444_16
+ || mode_lib->vba.SourcePixelFormat[k] == dm_mono_16
+ || mode_lib->vba.SourcePixelFormat[k] == dm_mono_8)) {
+ if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear
+ || (mode_lib->vba.SourcePixelFormat[k] == dm_444_64
+ && (mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_4kb_s
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_4kb_s_x
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_64kb_s
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_64kb_s_t
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_64kb_s_x
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_var_s
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_var_s_x)
+ && mode_lib->vba.SourceScan[k] == dm_horz)) {
+ locals->MinSwathHeightY[k] = locals->MaxSwathHeightY[k];
+ } else {
+ locals->MinSwathHeightY[k] = locals->MaxSwathHeightY[k]
+ / 2.0;
+ }
+ locals->MinSwathHeightC[k] = locals->MaxSwathHeightC[k];
+ } else {
+ if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear) {
+ locals->MinSwathHeightY[k] = locals->MaxSwathHeightY[k];
+ locals->MinSwathHeightC[k] = locals->MaxSwathHeightC[k];
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8
+ && mode_lib->vba.SourceScan[k] == dm_horz) {
+ locals->MinSwathHeightY[k] = locals->MaxSwathHeightY[k]
+ / 2.0;
+ locals->MinSwathHeightC[k] = locals->MaxSwathHeightC[k];
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_10
+ && mode_lib->vba.SourceScan[k] == dm_horz) {
+ locals->MinSwathHeightC[k] = locals->MaxSwathHeightC[k]
+ / 2.0;
+ locals->MinSwathHeightY[k] = locals->MaxSwathHeightY[k];
+ } else {
+ locals->MinSwathHeightY[k] = locals->MaxSwathHeightY[k];
+ locals->MinSwathHeightC[k] = locals->MaxSwathHeightC[k];
+ }
+ }
+ if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear) {
+ mode_lib->vba.MaximumSwathWidthSupport = 8192.0;
+ } else {
+ mode_lib->vba.MaximumSwathWidthSupport = 5120.0;
+ }
+ mode_lib->vba.MaximumSwathWidthInDETBuffer =
+ dml_min(
+ mode_lib->vba.MaximumSwathWidthSupport,
+ mode_lib->vba.DETBufferSizeInKByte * 1024.0 / 2.0
+ / (locals->BytePerPixelInDETY[k]
+ * locals->MinSwathHeightY[k]
+ + locals->BytePerPixelInDETC[k]
+ / 2.0
+ * locals->MinSwathHeightC[k]));
+ if (locals->BytePerPixelInDETC[k] == 0.0) {
+ mode_lib->vba.MaximumSwathWidthInLineBuffer =
+ mode_lib->vba.LineBufferSize
+ * dml_max(mode_lib->vba.HRatio[k], 1.0)
+ / mode_lib->vba.LBBitPerPixel[k]
+ / (mode_lib->vba.vtaps[k]
+ + dml_max(
+ dml_ceil(
+ mode_lib->vba.VRatio[k],
+ 1.0)
+ - 2,
+ 0.0));
+ } else {
+ mode_lib->vba.MaximumSwathWidthInLineBuffer =
+ dml_min(
+ mode_lib->vba.LineBufferSize
+ * dml_max(
+ mode_lib->vba.HRatio[k],
+ 1.0)
+ / mode_lib->vba.LBBitPerPixel[k]
+ / (mode_lib->vba.vtaps[k]
+ + dml_max(
+ dml_ceil(
+ mode_lib->vba.VRatio[k],
+ 1.0)
+ - 2,
+ 0.0)),
+ 2.0 * mode_lib->vba.LineBufferSize
+ * dml_max(
+ mode_lib->vba.HRatio[k]
+ / 2.0,
+ 1.0)
+ / mode_lib->vba.LBBitPerPixel[k]
+ / (mode_lib->vba.VTAPsChroma[k]
+ + dml_max(
+ dml_ceil(
+ mode_lib->vba.VRatio[k]
+ / 2.0,
+ 1.0)
+ - 2,
+ 0.0)));
+ }
+ locals->MaximumSwathWidth[k] = dml_min(
+ mode_lib->vba.MaximumSwathWidthInDETBuffer,
+ mode_lib->vba.MaximumSwathWidthInLineBuffer);
+ }
+ for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
+ double MaxMaxDispclkRoundedDown = RoundToDFSGranularityDown(
+ mode_lib->vba.MaxDispclk[mode_lib->vba.soc.num_states],
+ mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
+
+ for (j = 0; j < 2; j++) {
+ mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown(
+ mode_lib->vba.MaxDispclk[i],
+ mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
+ mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown(
+ mode_lib->vba.MaxDppclk[i],
+ mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
+ locals->RequiredDISPCLK[i][j] = 0.0;
+ locals->DISPCLK_DPPCLK_Support[i][j] = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine =
+ mode_lib->vba.PixelClock[k]
+ * (1.0
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100.0)
+ * (1.0
+ + mode_lib->vba.DISPCLKRampingMargin
+ / 100.0);
+ if (mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine >= mode_lib->vba.MaxDispclk[i]
+ && i == mode_lib->vba.soc.num_states)
+ mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine = mode_lib->vba.PixelClock[k]
+ * (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
+
+ mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2
+ * (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) * (1 + mode_lib->vba.DISPCLKRampingMargin / 100.0);
+ if (mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine >= mode_lib->vba.MaxDispclk[i]
+ && i == mode_lib->vba.soc.num_states)
+ mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2
+ * (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
+
+ locals->ODMCombineEnablePerState[i][k] = false;
+ mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine;
+ if (mode_lib->vba.ODMCapability) {
+ if (locals->PlaneRequiredDISPCLKWithoutODMCombine > MaxMaxDispclkRoundedDown) {
+ locals->ODMCombineEnablePerState[i][k] = true;
+ mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
+ } else if (locals->DSCEnabled[k] && (locals->HActive[k] > DCN21_MAX_DSC_IMAGE_WIDTH)) {
+ locals->ODMCombineEnablePerState[i][k] = true;
+ mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
+ } else if (locals->HActive[k] > DCN21_MAX_420_IMAGE_WIDTH && locals->OutputFormat[k] == dm_420) {
+ locals->ODMCombineEnablePerState[i][k] = true;
+ mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
+ }
+ }
+
+ if (locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) <= mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity
+ && locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k]
+ && locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) {
+ locals->NoOfDPP[i][j][k] = 1;
+ locals->RequiredDPPCLK[i][j][k] =
+ locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
+ } else {
+ locals->NoOfDPP[i][j][k] = 2;
+ locals->RequiredDPPCLK[i][j][k] =
+ locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) / 2.0;
+ }
+ locals->RequiredDISPCLK[i][j] = dml_max(
+ locals->RequiredDISPCLK[i][j],
+ mode_lib->vba.PlaneRequiredDISPCLK);
+ if ((locals->MinDPPCLKUsingSingleDPP[k] / locals->NoOfDPP[i][j][k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0)
+ > mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity)
+ || (mode_lib->vba.PlaneRequiredDISPCLK > mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity)) {
+ locals->DISPCLK_DPPCLK_Support[i][j] = false;
+ }
+ }
+ locals->TotalNumberOfActiveDPP[i][j] = 0.0;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++)
+ locals->TotalNumberOfActiveDPP[i][j] = locals->TotalNumberOfActiveDPP[i][j] + locals->NoOfDPP[i][j][k];
+ if (j == 1) {
+ while (locals->TotalNumberOfActiveDPP[i][j] < mode_lib->vba.MaxNumDPP
+ && locals->TotalNumberOfActiveDPP[i][j] < 2 * mode_lib->vba.NumberOfActivePlanes) {
+ double BWOfNonSplitPlaneOfMaximumBandwidth;
+ unsigned int NumberOfNonSplitPlaneOfMaximumBandwidth;
+
+ BWOfNonSplitPlaneOfMaximumBandwidth = 0;
+ NumberOfNonSplitPlaneOfMaximumBandwidth = 0;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (locals->ReadBandwidth[k] > BWOfNonSplitPlaneOfMaximumBandwidth && locals->NoOfDPP[i][j][k] == 1) {
+ BWOfNonSplitPlaneOfMaximumBandwidth = locals->ReadBandwidth[k];
+ NumberOfNonSplitPlaneOfMaximumBandwidth = k;
+ }
+ }
+ locals->NoOfDPP[i][j][NumberOfNonSplitPlaneOfMaximumBandwidth] = 2;
+ locals->RequiredDPPCLK[i][j][NumberOfNonSplitPlaneOfMaximumBandwidth] =
+ locals->MinDPPCLKUsingSingleDPP[NumberOfNonSplitPlaneOfMaximumBandwidth]
+ * (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100) / 2;
+ locals->TotalNumberOfActiveDPP[i][j] = locals->TotalNumberOfActiveDPP[i][j] + 1;
+ }
+ }
+ if (locals->TotalNumberOfActiveDPP[i][j] > mode_lib->vba.MaxNumDPP) {
+ locals->RequiredDISPCLK[i][j] = 0.0;
+ locals->DISPCLK_DPPCLK_Support[i][j] = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ locals->ODMCombineEnablePerState[i][k] = false;
+ if (locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k]) {
+ locals->NoOfDPP[i][j][k] = 1;
+ locals->RequiredDPPCLK[i][j][k] = locals->MinDPPCLKUsingSingleDPP[k]
+ * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
+ } else {
+ locals->NoOfDPP[i][j][k] = 2;
+ locals->RequiredDPPCLK[i][j][k] = locals->MinDPPCLKUsingSingleDPP[k]
+ * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) / 2.0;
+ }
+ if (i != mode_lib->vba.soc.num_states) {
+ mode_lib->vba.PlaneRequiredDISPCLK =
+ mode_lib->vba.PixelClock[k]
+ * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0)
+ * (1.0 + mode_lib->vba.DISPCLKRampingMargin / 100.0);
+ } else {
+ mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PixelClock[k]
+ * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
+ }
+ locals->RequiredDISPCLK[i][j] = dml_max(
+ locals->RequiredDISPCLK[i][j],
+ mode_lib->vba.PlaneRequiredDISPCLK);
+ if (locals->MinDPPCLKUsingSingleDPP[k] / locals->NoOfDPP[i][j][k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0)
+ > mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity
+ || mode_lib->vba.PlaneRequiredDISPCLK > mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity)
+ locals->DISPCLK_DPPCLK_Support[i][j] = false;
+ }
+ locals->TotalNumberOfActiveDPP[i][j] = 0.0;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++)
+ locals->TotalNumberOfActiveDPP[i][j] = locals->TotalNumberOfActiveDPP[i][j] + locals->NoOfDPP[i][j][k];
+ }
+ locals->RequiredDISPCLK[i][j] = dml_max(
+ locals->RequiredDISPCLK[i][j],
+ mode_lib->vba.WritebackRequiredDISPCLK);
+ if (mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity
+ < mode_lib->vba.WritebackRequiredDISPCLK) {
+ locals->DISPCLK_DPPCLK_Support[i][j] = false;
+ }
+ }
+ }
+ /*Viewport Size Check*/
+
+ for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
+ locals->ViewportSizeSupport[i][0] = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
+ if (dml_min(locals->SwathWidthYSingleDPP[k], dml_round(mode_lib->vba.HActive[k] / 2.0 * mode_lib->vba.HRatio[k]))
+ > locals->MaximumSwathWidth[k]) {
+ locals->ViewportSizeSupport[i][0] = false;
+ }
+ } else {
+ if (locals->SwathWidthYSingleDPP[k] / 2.0 > locals->MaximumSwathWidth[k]) {
+ locals->ViewportSizeSupport[i][0] = false;
+ }
+ }
+ }
+ }
+ /*Total Available Pipes Support Check*/
+
+ for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
+ for (j = 0; j < 2; j++) {
+ if (locals->TotalNumberOfActiveDPP[i][j] <= mode_lib->vba.MaxNumDPP)
+ locals->TotalAvailablePipesSupport[i][j] = true;
+ else
+ locals->TotalAvailablePipesSupport[i][j] = false;
+ }
+ }
+ /*Total Available OTG Support Check*/
+
+ mode_lib->vba.TotalNumberOfActiveOTG = 0.0;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.BlendingAndTiming[k] == k) {
+ mode_lib->vba.TotalNumberOfActiveOTG = mode_lib->vba.TotalNumberOfActiveOTG
+ + 1.0;
+ }
+ }
+ if (mode_lib->vba.TotalNumberOfActiveOTG <= mode_lib->vba.MaxNumOTG) {
+ mode_lib->vba.NumberOfOTGSupport = true;
+ } else {
+ mode_lib->vba.NumberOfOTGSupport = false;
+ }
+ /*Display IO and DSC Support Check*/
+
+ mode_lib->vba.NonsupportedDSCInputBPC = false;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (!(mode_lib->vba.DSCInputBitPerComponent[k] == 12.0
+ || mode_lib->vba.DSCInputBitPerComponent[k] == 10.0
+ || mode_lib->vba.DSCInputBitPerComponent[k] == 8.0)) {
+ mode_lib->vba.NonsupportedDSCInputBPC = true;
+ }
+ }
+ for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ locals->RequiresDSC[i][k] = false;
+ locals->RequiresFEC[i][k] = 0;
+ if (mode_lib->vba.BlendingAndTiming[k] == k) {
+ if (mode_lib->vba.Output[k] == dm_hdmi) {
+ locals->RequiresDSC[i][k] = false;
+ locals->RequiresFEC[i][k] = 0;
+ locals->OutputBppPerState[i][k] = TruncToValidBPP(
+ dml_min(600.0, mode_lib->vba.PHYCLKPerState[i]) / mode_lib->vba.PixelClockBackEnd[k] * 24,
+ mode_lib->vba.ForcedOutputLinkBPP[k],
+ false,
+ mode_lib->vba.Output[k],
+ mode_lib->vba.OutputFormat[k],
+ mode_lib->vba.DSCInputBitPerComponent[k]);
+ } else if (mode_lib->vba.Output[k] == dm_dp
+ || mode_lib->vba.Output[k] == dm_edp) {
+ if (mode_lib->vba.Output[k] == dm_edp) {
+ mode_lib->vba.EffectiveFECOverhead = 0.0;
+ } else {
+ mode_lib->vba.EffectiveFECOverhead =
+ mode_lib->vba.FECOverhead;
+ }
+ if (mode_lib->vba.PHYCLKPerState[i] >= 270.0) {
+ mode_lib->vba.Outbpp = TruncToValidBPP(
+ (1.0 - mode_lib->vba.Downspreading / 100.0) * 270.0
+ * mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0,
+ mode_lib->vba.ForcedOutputLinkBPP[k],
+ false,
+ mode_lib->vba.Output[k],
+ mode_lib->vba.OutputFormat[k],
+ mode_lib->vba.DSCInputBitPerComponent[k]);
+ mode_lib->vba.OutbppDSC = TruncToValidBPP(
+ (1.0 - mode_lib->vba.Downspreading / 100.0) * (1.0 - mode_lib->vba.EffectiveFECOverhead / 100.0) * 270.0
+ * mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0,
+ mode_lib->vba.ForcedOutputLinkBPP[k],
+ true,
+ mode_lib->vba.Output[k],
+ mode_lib->vba.OutputFormat[k],
+ mode_lib->vba.DSCInputBitPerComponent[k]);
+ if (mode_lib->vba.DSCEnabled[k] == true) {
+ locals->RequiresDSC[i][k] = true;
+ if (mode_lib->vba.Output[k] == dm_dp) {
+ locals->RequiresFEC[i][k] = true;
+ } else {
+ locals->RequiresFEC[i][k] = false;
+ }
+ mode_lib->vba.Outbpp = mode_lib->vba.OutbppDSC;
+ } else {
+ locals->RequiresDSC[i][k] = false;
+ locals->RequiresFEC[i][k] = false;
+ }
+ locals->OutputBppPerState[i][k] = mode_lib->vba.Outbpp;
+ }
+ if (mode_lib->vba.Outbpp == BPP_INVALID && mode_lib->vba.PHYCLKPerState[i] >= 540.0) {
+ mode_lib->vba.Outbpp = TruncToValidBPP(
+ (1.0 - mode_lib->vba.Downspreading / 100.0) * 540.0
+ * mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0,
+ mode_lib->vba.ForcedOutputLinkBPP[k],
+ false,
+ mode_lib->vba.Output[k],
+ mode_lib->vba.OutputFormat[k],
+ mode_lib->vba.DSCInputBitPerComponent[k]);
+ mode_lib->vba.OutbppDSC = TruncToValidBPP(
+ (1.0 - mode_lib->vba.Downspreading / 100.0) * (1.0 - mode_lib->vba.EffectiveFECOverhead / 100.0) * 540.0
+ * mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0,
+ mode_lib->vba.ForcedOutputLinkBPP[k],
+ true,
+ mode_lib->vba.Output[k],
+ mode_lib->vba.OutputFormat[k],
+ mode_lib->vba.DSCInputBitPerComponent[k]);
+ if (mode_lib->vba.DSCEnabled[k] == true) {
+ locals->RequiresDSC[i][k] = true;
+ if (mode_lib->vba.Output[k] == dm_dp) {
+ locals->RequiresFEC[i][k] = true;
+ } else {
+ locals->RequiresFEC[i][k] = false;
+ }
+ mode_lib->vba.Outbpp = mode_lib->vba.OutbppDSC;
+ } else {
+ locals->RequiresDSC[i][k] = false;
+ locals->RequiresFEC[i][k] = false;
+ }
+ locals->OutputBppPerState[i][k] = mode_lib->vba.Outbpp;
+ }
+ if (mode_lib->vba.Outbpp == BPP_INVALID
+ && mode_lib->vba.PHYCLKPerState[i]
+ >= 810.0) {
+ mode_lib->vba.Outbpp = TruncToValidBPP(
+ (1.0 - mode_lib->vba.Downspreading / 100.0) * 810.0
+ * mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0,
+ mode_lib->vba.ForcedOutputLinkBPP[k],
+ false,
+ mode_lib->vba.Output[k],
+ mode_lib->vba.OutputFormat[k],
+ mode_lib->vba.DSCInputBitPerComponent[k]);
+ mode_lib->vba.OutbppDSC = TruncToValidBPP(
+ (1.0 - mode_lib->vba.Downspreading / 100.0) * (1.0 - mode_lib->vba.EffectiveFECOverhead / 100.0) * 810.0
+ * mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0,
+ mode_lib->vba.ForcedOutputLinkBPP[k],
+ true,
+ mode_lib->vba.Output[k],
+ mode_lib->vba.OutputFormat[k],
+ mode_lib->vba.DSCInputBitPerComponent[k]);
+ if (mode_lib->vba.DSCEnabled[k] == true || mode_lib->vba.Outbpp == BPP_INVALID) {
+ locals->RequiresDSC[i][k] = true;
+ if (mode_lib->vba.Output[k] == dm_dp) {
+ locals->RequiresFEC[i][k] = true;
+ } else {
+ locals->RequiresFEC[i][k] = false;
+ }
+ mode_lib->vba.Outbpp = mode_lib->vba.OutbppDSC;
+ } else {
+ locals->RequiresDSC[i][k] = false;
+ locals->RequiresFEC[i][k] = false;
+ }
+ locals->OutputBppPerState[i][k] =
+ mode_lib->vba.Outbpp;
+ }
+ }
+ } else {
+ locals->OutputBppPerState[i][k] = BPP_BLENDED_PIPE;
+ }
+ }
+ }
+ for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
+ locals->DIOSupport[i] = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (locals->OutputBppPerState[i][k] == BPP_INVALID
+ || (mode_lib->vba.OutputFormat[k] == dm_420
+ && mode_lib->vba.Interlace[k] == true
+ && mode_lib->vba.ProgressiveToInterlaceUnitInOPP == true)) {
+ locals->DIOSupport[i] = false;
+ }
+ }
+ }
+ for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ locals->DSCCLKRequiredMoreThanSupported[i] = false;
+ if (mode_lib->vba.BlendingAndTiming[k] == k) {
+ if ((mode_lib->vba.Output[k] == dm_dp
+ || mode_lib->vba.Output[k] == dm_edp)) {
+ if (mode_lib->vba.OutputFormat[k] == dm_420
+ || mode_lib->vba.OutputFormat[k]
+ == dm_n422) {
+ mode_lib->vba.DSCFormatFactor = 2;
+ } else {
+ mode_lib->vba.DSCFormatFactor = 1;
+ }
+ if (locals->RequiresDSC[i][k] == true) {
+ if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
+ if (mode_lib->vba.PixelClockBackEnd[k] / 6.0 / mode_lib->vba.DSCFormatFactor
+ > (1.0 - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) * mode_lib->vba.MaxDSCCLK[i]) {
+ locals->DSCCLKRequiredMoreThanSupported[i] =
+ true;
+ }
+ } else {
+ if (mode_lib->vba.PixelClockBackEnd[k] / 3.0 / mode_lib->vba.DSCFormatFactor
+ > (1.0 - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) * mode_lib->vba.MaxDSCCLK[i]) {
+ locals->DSCCLKRequiredMoreThanSupported[i] =
+ true;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
+ locals->NotEnoughDSCUnits[i] = false;
+ mode_lib->vba.TotalDSCUnitsRequired = 0.0;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (locals->RequiresDSC[i][k] == true) {
+ if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
+ mode_lib->vba.TotalDSCUnitsRequired =
+ mode_lib->vba.TotalDSCUnitsRequired + 2.0;
+ } else {
+ mode_lib->vba.TotalDSCUnitsRequired =
+ mode_lib->vba.TotalDSCUnitsRequired + 1.0;
+ }
+ }
+ }
+ if (mode_lib->vba.TotalDSCUnitsRequired > mode_lib->vba.NumberOfDSC) {
+ locals->NotEnoughDSCUnits[i] = true;
+ }
+ }
+ /*DSC Delay per state*/
+
+ for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.BlendingAndTiming[k] != k) {
+ mode_lib->vba.slices = 0;
+ } else if (locals->RequiresDSC[i][k] == 0
+ || locals->RequiresDSC[i][k] == false) {
+ mode_lib->vba.slices = 0;
+ } else if (mode_lib->vba.PixelClockBackEnd[k] > 3200.0) {
+ mode_lib->vba.slices = dml_ceil(
+ mode_lib->vba.PixelClockBackEnd[k] / 400.0,
+ 4.0);
+ } else if (mode_lib->vba.PixelClockBackEnd[k] > 1360.0) {
+ mode_lib->vba.slices = 8.0;
+ } else if (mode_lib->vba.PixelClockBackEnd[k] > 680.0) {
+ mode_lib->vba.slices = 4.0;
+ } else if (mode_lib->vba.PixelClockBackEnd[k] > 340.0) {
+ mode_lib->vba.slices = 2.0;
+ } else {
+ mode_lib->vba.slices = 1.0;
+ }
+ if (locals->OutputBppPerState[i][k] == BPP_BLENDED_PIPE
+ || locals->OutputBppPerState[i][k] == BPP_INVALID) {
+ mode_lib->vba.bpp = 0.0;
+ } else {
+ mode_lib->vba.bpp = locals->OutputBppPerState[i][k];
+ }
+ if (locals->RequiresDSC[i][k] == true && mode_lib->vba.bpp != 0.0) {
+ if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) {
+ locals->DSCDelayPerState[i][k] =
+ dscceComputeDelay(
+ mode_lib->vba.DSCInputBitPerComponent[k],
+ mode_lib->vba.bpp,
+ dml_ceil(
+ mode_lib->vba.HActive[k]
+ / mode_lib->vba.slices,
+ 1.0),
+ mode_lib->vba.slices,
+ mode_lib->vba.OutputFormat[k])
+ + dscComputeDelay(
+ mode_lib->vba.OutputFormat[k]);
+ } else {
+ locals->DSCDelayPerState[i][k] =
+ 2.0 * (dscceComputeDelay(
+ mode_lib->vba.DSCInputBitPerComponent[k],
+ mode_lib->vba.bpp,
+ dml_ceil(mode_lib->vba.HActive[k] / mode_lib->vba.slices, 1.0),
+ mode_lib->vba.slices / 2,
+ mode_lib->vba.OutputFormat[k])
+ + dscComputeDelay(mode_lib->vba.OutputFormat[k]));
+ }
+ locals->DSCDelayPerState[i][k] =
+ locals->DSCDelayPerState[i][k] * mode_lib->vba.PixelClock[k] / mode_lib->vba.PixelClockBackEnd[k];
+ } else {
+ locals->DSCDelayPerState[i][k] = 0.0;
+ }
+ }
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ for (m = 0; m <= mode_lib->vba.NumberOfActivePlanes - 1; m++) {
+ for (j = 0; j <= mode_lib->vba.NumberOfActivePlanes - 1; j++) {
+ if (mode_lib->vba.BlendingAndTiming[k] == m && locals->RequiresDSC[i][m] == true)
+ locals->DSCDelayPerState[i][k] = locals->DSCDelayPerState[i][m];
+ }
+ }
+ }
+ }
+
+ //Prefetch Check
+ for (i = 0; i <= mode_lib->vba.soc.num_states; ++i) {
+ for (j = 0; j <= 1; ++j) {
+ locals->TotalNumberOfDCCActiveDPP[i][j] = 0;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.DCCEnable[k] == true)
+ locals->TotalNumberOfDCCActiveDPP[i][j] = locals->TotalNumberOfDCCActiveDPP[i][j] + locals->NoOfDPP[i][j][k];
+ }
+ }
+ }
+
+ mode_lib->vba.UrgentLatency = dml_max3(
+ mode_lib->vba.UrgentLatencyPixelDataOnly,
+ mode_lib->vba.UrgentLatencyPixelMixedWithVMData,
+ mode_lib->vba.UrgentLatencyVMDataOnly);
+ mode_lib->vba.PrefetchERROR = CalculateMinAndMaxPrefetchMode(
+ mode_lib->vba.AllowDRAMSelfRefreshOrDRAMClockChangeInVblank,
+ &mode_lib->vba.MinPrefetchMode,
+ &mode_lib->vba.MaxPrefetchMode);
+
+ for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
+ for (j = 0; j < 2; j++) {
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ locals->RequiredDPPCLKThisState[k] = locals->RequiredDPPCLK[i][j][k];
+ locals->NoOfDPPThisState[k] = locals->NoOfDPP[i][j][k];
+ if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
+ locals->SwathWidthYThisState[k] =
+ dml_min(locals->SwathWidthYSingleDPP[k], dml_round(mode_lib->vba.HActive[k] / 2.0 * mode_lib->vba.HRatio[k]));
+ } else {
+ locals->SwathWidthYThisState[k] = locals->SwathWidthYSingleDPP[k] / locals->NoOfDPP[i][j][k];
+ }
+ mode_lib->vba.SwathWidthGranularityY = 256.0
+ / dml_ceil(locals->BytePerPixelInDETY[k], 1.0)
+ / locals->MaxSwathHeightY[k];
+ mode_lib->vba.RoundedUpMaxSwathSizeBytesY =
+ (dml_ceil(locals->SwathWidthYThisState[k] - 1.0, mode_lib->vba.SwathWidthGranularityY)
+ + mode_lib->vba.SwathWidthGranularityY) * locals->BytePerPixelInDETY[k] * locals->MaxSwathHeightY[k];
+ if (mode_lib->vba.SourcePixelFormat[k] == dm_420_10) {
+ mode_lib->vba.RoundedUpMaxSwathSizeBytesY = dml_ceil(
+ mode_lib->vba.RoundedUpMaxSwathSizeBytesY,
+ 256.0) + 256;
+ }
+ if (locals->MaxSwathHeightC[k] > 0.0) {
+ mode_lib->vba.SwathWidthGranularityC = 256.0 / dml_ceil(locals->BytePerPixelInDETC[k], 2.0) / locals->MaxSwathHeightC[k];
+ mode_lib->vba.RoundedUpMaxSwathSizeBytesC = (dml_ceil(locals->SwathWidthYThisState[k] / 2.0 - 1.0, mode_lib->vba.SwathWidthGranularityC)
+ + mode_lib->vba.SwathWidthGranularityC) * locals->BytePerPixelInDETC[k] * locals->MaxSwathHeightC[k];
+ if (mode_lib->vba.SourcePixelFormat[k] == dm_420_10) {
+ mode_lib->vba.RoundedUpMaxSwathSizeBytesC = dml_ceil(mode_lib->vba.RoundedUpMaxSwathSizeBytesC, 256.0) + 256;
+ }
+ } else {
+ mode_lib->vba.RoundedUpMaxSwathSizeBytesC = 0.0;
+ }
+ if (mode_lib->vba.RoundedUpMaxSwathSizeBytesY + mode_lib->vba.RoundedUpMaxSwathSizeBytesC
+ <= mode_lib->vba.DETBufferSizeInKByte * 1024.0 / 2.0) {
+ locals->SwathHeightYThisState[k] = locals->MaxSwathHeightY[k];
+ locals->SwathHeightCThisState[k] = locals->MaxSwathHeightC[k];
+ } else {
+ locals->SwathHeightYThisState[k] =
+ locals->MinSwathHeightY[k];
+ locals->SwathHeightCThisState[k] =
+ locals->MinSwathHeightC[k];
+ }
+ }
+
+ CalculateDCFCLKDeepSleep(
+ mode_lib,
+ mode_lib->vba.NumberOfActivePlanes,
+ locals->BytePerPixelInDETY,
+ locals->BytePerPixelInDETC,
+ mode_lib->vba.VRatio,
+ locals->SwathWidthYThisState,
+ locals->NoOfDPPThisState,
+ mode_lib->vba.HRatio,
+ mode_lib->vba.PixelClock,
+ locals->PSCL_FACTOR,
+ locals->PSCL_FACTOR_CHROMA,
+ locals->RequiredDPPCLKThisState,
+ &mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0]);
+
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if ((mode_lib->vba.SourcePixelFormat[k] != dm_444_64
+ && mode_lib->vba.SourcePixelFormat[k] != dm_444_32
+ && mode_lib->vba.SourcePixelFormat[k] != dm_444_16
+ && mode_lib->vba.SourcePixelFormat[k] != dm_mono_16
+ && mode_lib->vba.SourcePixelFormat[k] != dm_mono_8)) {
+ mode_lib->vba.PDEAndMetaPTEBytesPerFrameC = CalculateVMAndRowBytes(
+ mode_lib,
+ mode_lib->vba.DCCEnable[k],
+ locals->Read256BlockHeightC[k],
+ locals->Read256BlockWidthC[k],
+ mode_lib->vba.SourcePixelFormat[k],
+ mode_lib->vba.SurfaceTiling[k],
+ dml_ceil(locals->BytePerPixelInDETC[k], 2.0),
+ mode_lib->vba.SourceScan[k],
+ mode_lib->vba.ViewportWidth[k] / 2.0,
+ mode_lib->vba.ViewportHeight[k] / 2.0,
+ locals->SwathWidthYThisState[k] / 2.0,
+ mode_lib->vba.GPUVMEnable,
+ mode_lib->vba.HostVMEnable,
+ mode_lib->vba.HostVMMaxPageTableLevels,
+ mode_lib->vba.HostVMCachedPageTableLevels,
+ mode_lib->vba.VMMPageSize,
+ mode_lib->vba.PTEBufferSizeInRequestsChroma,
+ mode_lib->vba.PitchC[k],
+ 0.0,
+ &locals->MacroTileWidthC[k],
+ &mode_lib->vba.MetaRowBytesC,
+ &mode_lib->vba.DPTEBytesPerRowC,
+ &locals->PTEBufferSizeNotExceededC[i][j][k],
+ locals->dpte_row_width_chroma_ub,
+ &locals->dpte_row_height_chroma[k],
+ &locals->meta_req_width_chroma[k],
+ &locals->meta_req_height_chroma[k],
+ &locals->meta_row_width_chroma[k],
+ &locals->meta_row_height_chroma[k],
+ &locals->vm_group_bytes_chroma,
+ &locals->dpte_group_bytes_chroma,
+ locals->PixelPTEReqWidthC,
+ locals->PixelPTEReqHeightC,
+ locals->PTERequestSizeC,
+ locals->dpde0_bytes_per_frame_ub_c,
+ locals->meta_pte_bytes_per_frame_ub_c);
+ locals->PrefetchLinesC[0][0][k] = CalculatePrefetchSourceLines(
+ mode_lib,
+ mode_lib->vba.VRatio[k]/2,
+ mode_lib->vba.VTAPsChroma[k],
+ mode_lib->vba.Interlace[k],
+ mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
+ locals->SwathHeightCThisState[k],
+ mode_lib->vba.ViewportYStartC[k],
+ &locals->PrefillC[k],
+ &locals->MaxNumSwC[k]);
+ locals->PTEBufferSizeInRequestsForLuma = mode_lib->vba.PTEBufferSizeInRequestsLuma;
+ } else {
+ mode_lib->vba.PDEAndMetaPTEBytesPerFrameC = 0.0;
+ mode_lib->vba.MetaRowBytesC = 0.0;
+ mode_lib->vba.DPTEBytesPerRowC = 0.0;
+ locals->PrefetchLinesC[0][0][k] = 0.0;
+ locals->PTEBufferSizeNotExceededC[i][j][k] = true;
+ locals->PTEBufferSizeInRequestsForLuma = mode_lib->vba.PTEBufferSizeInRequestsLuma + mode_lib->vba.PTEBufferSizeInRequestsChroma;
+ }
+ mode_lib->vba.PDEAndMetaPTEBytesPerFrameY = CalculateVMAndRowBytes(
+ mode_lib,
+ mode_lib->vba.DCCEnable[k],
+ locals->Read256BlockHeightY[k],
+ locals->Read256BlockWidthY[k],
+ mode_lib->vba.SourcePixelFormat[k],
+ mode_lib->vba.SurfaceTiling[k],
+ dml_ceil(locals->BytePerPixelInDETY[k], 1.0),
+ mode_lib->vba.SourceScan[k],
+ mode_lib->vba.ViewportWidth[k],
+ mode_lib->vba.ViewportHeight[k],
+ locals->SwathWidthYThisState[k],
+ mode_lib->vba.GPUVMEnable,
+ mode_lib->vba.HostVMEnable,
+ mode_lib->vba.HostVMMaxPageTableLevels,
+ mode_lib->vba.HostVMCachedPageTableLevels,
+ mode_lib->vba.VMMPageSize,
+ locals->PTEBufferSizeInRequestsForLuma,
+ mode_lib->vba.PitchY[k],
+ mode_lib->vba.DCCMetaPitchY[k],
+ &locals->MacroTileWidthY[k],
+ &mode_lib->vba.MetaRowBytesY,
+ &mode_lib->vba.DPTEBytesPerRowY,
+ &locals->PTEBufferSizeNotExceededY[i][j][k],
+ locals->dpte_row_width_luma_ub,
+ &locals->dpte_row_height[k],
+ &locals->meta_req_width[k],
+ &locals->meta_req_height[k],
+ &locals->meta_row_width[k],
+ &locals->meta_row_height[k],
+ &locals->vm_group_bytes[k],
+ &locals->dpte_group_bytes[k],
+ locals->PixelPTEReqWidthY,
+ locals->PixelPTEReqHeightY,
+ locals->PTERequestSizeY,
+ locals->dpde0_bytes_per_frame_ub_l,
+ locals->meta_pte_bytes_per_frame_ub_l);
+ locals->PrefetchLinesY[0][0][k] = CalculatePrefetchSourceLines(
+ mode_lib,
+ mode_lib->vba.VRatio[k],
+ mode_lib->vba.vtaps[k],
+ mode_lib->vba.Interlace[k],
+ mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
+ locals->SwathHeightYThisState[k],
+ mode_lib->vba.ViewportYStartY[k],
+ &locals->PrefillY[k],
+ &locals->MaxNumSwY[k]);
+ locals->PDEAndMetaPTEBytesPerFrame[0][0][k] =
+ mode_lib->vba.PDEAndMetaPTEBytesPerFrameY + mode_lib->vba.PDEAndMetaPTEBytesPerFrameC;
+ locals->MetaRowBytes[0][0][k] = mode_lib->vba.MetaRowBytesY + mode_lib->vba.MetaRowBytesC;
+ locals->DPTEBytesPerRow[0][0][k] = mode_lib->vba.DPTEBytesPerRowY + mode_lib->vba.DPTEBytesPerRowC;
+
+ CalculateActiveRowBandwidth(
+ mode_lib->vba.GPUVMEnable,
+ mode_lib->vba.SourcePixelFormat[k],
+ mode_lib->vba.VRatio[k],
+ mode_lib->vba.DCCEnable[k],
+ mode_lib->vba.HTotal[k] /
+ mode_lib->vba.PixelClock[k],
+ mode_lib->vba.MetaRowBytesY,
+ mode_lib->vba.MetaRowBytesC,
+ locals->meta_row_height[k],
+ locals->meta_row_height_chroma[k],
+ mode_lib->vba.DPTEBytesPerRowY,
+ mode_lib->vba.DPTEBytesPerRowC,
+ locals->dpte_row_height[k],
+ locals->dpte_row_height_chroma[k],
+ &locals->meta_row_bw[k],
+ &locals->dpte_row_bw[k]);
+ }
+ mode_lib->vba.ExtraLatency = CalculateExtraLatency(
+ locals->UrgentRoundTripAndOutOfOrderLatencyPerState[i],
+ locals->TotalNumberOfActiveDPP[i][j],
+ mode_lib->vba.PixelChunkSizeInKByte,
+ locals->TotalNumberOfDCCActiveDPP[i][j],
+ mode_lib->vba.MetaChunkSize,
+ locals->ReturnBWPerState[i][0],
+ mode_lib->vba.GPUVMEnable,
+ mode_lib->vba.HostVMEnable,
+ mode_lib->vba.NumberOfActivePlanes,
+ locals->NoOfDPPThisState,
+ locals->dpte_group_bytes,
+ mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
+ mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
+ mode_lib->vba.HostVMMaxPageTableLevels,
+ mode_lib->vba.HostVMCachedPageTableLevels);
+
+ mode_lib->vba.TimeCalc = 24.0 / mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0];
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.BlendingAndTiming[k] == k) {
+ if (mode_lib->vba.WritebackEnable[k] == true) {
+ locals->WritebackDelay[i][k] = mode_lib->vba.WritebackLatency
+ + CalculateWriteBackDelay(
+ mode_lib->vba.WritebackPixelFormat[k],
+ mode_lib->vba.WritebackHRatio[k],
+ mode_lib->vba.WritebackVRatio[k],
+ mode_lib->vba.WritebackLumaHTaps[k],
+ mode_lib->vba.WritebackLumaVTaps[k],
+ mode_lib->vba.WritebackChromaHTaps[k],
+ mode_lib->vba.WritebackChromaVTaps[k],
+ mode_lib->vba.WritebackDestinationWidth[k]) / locals->RequiredDISPCLK[i][j];
+ } else {
+ locals->WritebackDelay[i][k] = 0.0;
+ }
+ for (m = 0; m <= mode_lib->vba.NumberOfActivePlanes - 1; m++) {
+ if (mode_lib->vba.BlendingAndTiming[m] == k
+ && mode_lib->vba.WritebackEnable[m]
+ == true) {
+ locals->WritebackDelay[i][k] = dml_max(locals->WritebackDelay[i][k],
+ mode_lib->vba.WritebackLatency + CalculateWriteBackDelay(
+ mode_lib->vba.WritebackPixelFormat[m],
+ mode_lib->vba.WritebackHRatio[m],
+ mode_lib->vba.WritebackVRatio[m],
+ mode_lib->vba.WritebackLumaHTaps[m],
+ mode_lib->vba.WritebackLumaVTaps[m],
+ mode_lib->vba.WritebackChromaHTaps[m],
+ mode_lib->vba.WritebackChromaVTaps[m],
+ mode_lib->vba.WritebackDestinationWidth[m]) / locals->RequiredDISPCLK[i][j]);
+ }
+ }
+ }
+ }
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ for (m = 0; m <= mode_lib->vba.NumberOfActivePlanes - 1; m++) {
+ if (mode_lib->vba.BlendingAndTiming[k] == m) {
+ locals->WritebackDelay[i][k] = locals->WritebackDelay[i][m];
+ }
+ }
+ }
+ mode_lib->vba.MaxMaxVStartup[0][0] = 0;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ locals->MaximumVStartup[0][0][k] = mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k]
+ - dml_max(1.0, dml_ceil(locals->WritebackDelay[i][k] / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]), 1.0));
+ mode_lib->vba.MaxMaxVStartup[0][0] = dml_max(mode_lib->vba.MaxMaxVStartup[0][0], locals->MaximumVStartup[0][0][k]);
+ }
+
+ mode_lib->vba.NextPrefetchMode = mode_lib->vba.MinPrefetchMode;
+ mode_lib->vba.NextMaxVStartup = mode_lib->vba.MaxMaxVStartup[0][0];
+ do {
+ mode_lib->vba.PrefetchMode[i][j] = mode_lib->vba.NextPrefetchMode;
+ mode_lib->vba.MaxVStartup = mode_lib->vba.NextMaxVStartup;
+
+ mode_lib->vba.TWait = CalculateTWait(
+ mode_lib->vba.PrefetchMode[i][j],
+ mode_lib->vba.DRAMClockChangeLatency,
+ mode_lib->vba.UrgentLatency,
+ mode_lib->vba.SREnterPlusExitTime);
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ Pipe myPipe;
+ HostVM myHostVM;
+
+ if (mode_lib->vba.XFCEnabled[k] == true) {
+ mode_lib->vba.XFCRemoteSurfaceFlipDelay =
+ CalculateRemoteSurfaceFlipDelay(
+ mode_lib,
+ mode_lib->vba.VRatio[k],
+ locals->SwathWidthYThisState[k],
+ dml_ceil(locals->BytePerPixelInDETY[k], 1.0),
+ mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k],
+ mode_lib->vba.XFCTSlvVupdateOffset,
+ mode_lib->vba.XFCTSlvVupdateWidth,
+ mode_lib->vba.XFCTSlvVreadyOffset,
+ mode_lib->vba.XFCXBUFLatencyTolerance,
+ mode_lib->vba.XFCFillBWOverhead,
+ mode_lib->vba.XFCSlvChunkSize,
+ mode_lib->vba.XFCBusTransportTime,
+ mode_lib->vba.TimeCalc,
+ mode_lib->vba.TWait,
+ &mode_lib->vba.SrcActiveDrainRate,
+ &mode_lib->vba.TInitXFill,
+ &mode_lib->vba.TslvChk);
+ } else {
+ mode_lib->vba.XFCRemoteSurfaceFlipDelay = 0.0;
+ }
+
+ myPipe.DPPCLK = locals->RequiredDPPCLK[i][j][k];
+ myPipe.DISPCLK = locals->RequiredDISPCLK[i][j];
+ myPipe.PixelClock = mode_lib->vba.PixelClock[k];
+ myPipe.DCFCLKDeepSleep = mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0];
+ myPipe.DPPPerPlane = locals->NoOfDPP[i][j][k];
+ myPipe.ScalerEnabled = mode_lib->vba.ScalerEnabled[k];
+ myPipe.SourceScan = mode_lib->vba.SourceScan[k];
+ myPipe.BlockWidth256BytesY = locals->Read256BlockWidthY[k];
+ myPipe.BlockHeight256BytesY = locals->Read256BlockHeightY[k];
+ myPipe.BlockWidth256BytesC = locals->Read256BlockWidthC[k];
+ myPipe.BlockHeight256BytesC = locals->Read256BlockHeightC[k];
+ myPipe.InterlaceEnable = mode_lib->vba.Interlace[k];
+ myPipe.NumberOfCursors = mode_lib->vba.NumberOfCursors[k];
+ myPipe.VBlank = mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k];
+ myPipe.HTotal = mode_lib->vba.HTotal[k];
+
+
+ myHostVM.Enable = mode_lib->vba.HostVMEnable;
+ myHostVM.MaxPageTableLevels = mode_lib->vba.HostVMMaxPageTableLevels;
+ myHostVM.CachedPageTableLevels = mode_lib->vba.HostVMCachedPageTableLevels;
+
+
+ mode_lib->vba.IsErrorResult[i][j][k] = CalculatePrefetchSchedule(
+ mode_lib,
+ mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
+ mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
+ &myPipe,
+ locals->DSCDelayPerState[i][k],
+ mode_lib->vba.DPPCLKDelaySubtotal,
+ mode_lib->vba.DPPCLKDelaySCL,
+ mode_lib->vba.DPPCLKDelaySCLLBOnly,
+ mode_lib->vba.DPPCLKDelayCNVCFormater,
+ mode_lib->vba.DPPCLKDelayCNVCCursor,
+ mode_lib->vba.DISPCLKDelaySubtotal,
+ locals->SwathWidthYThisState[k] / mode_lib->vba.HRatio[k],
+ mode_lib->vba.OutputFormat[k],
+ mode_lib->vba.MaxInterDCNTileRepeaters,
+ dml_min(mode_lib->vba.MaxVStartup, locals->MaximumVStartup[0][0][k]),
+ locals->MaximumVStartup[0][0][k],
+ mode_lib->vba.GPUVMMaxPageTableLevels,
+ mode_lib->vba.GPUVMEnable,
+ &myHostVM,
+ mode_lib->vba.DynamicMetadataEnable[k],
+ mode_lib->vba.DynamicMetadataLinesBeforeActiveRequired[k],
+ mode_lib->vba.DynamicMetadataTransmittedBytes[k],
+ mode_lib->vba.DCCEnable[k],
+ mode_lib->vba.UrgentLatency,
+ mode_lib->vba.ExtraLatency,
+ mode_lib->vba.TimeCalc,
+ locals->PDEAndMetaPTEBytesPerFrame[0][0][k],
+ locals->MetaRowBytes[0][0][k],
+ locals->DPTEBytesPerRow[0][0][k],
+ locals->PrefetchLinesY[0][0][k],
+ locals->SwathWidthYThisState[k],
+ locals->BytePerPixelInDETY[k],
+ locals->PrefillY[k],
+ locals->MaxNumSwY[k],
+ locals->PrefetchLinesC[0][0][k],
+ locals->BytePerPixelInDETC[k],
+ locals->PrefillC[k],
+ locals->MaxNumSwC[k],
+ locals->SwathHeightYThisState[k],
+ locals->SwathHeightCThisState[k],
+ mode_lib->vba.TWait,
+ mode_lib->vba.XFCEnabled[k],
+ mode_lib->vba.XFCRemoteSurfaceFlipDelay,
+ mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
+ &locals->dst_x_after_scaler,
+ &locals->dst_y_after_scaler,
+ &locals->LineTimesForPrefetch[k],
+ &locals->PrefetchBW[k],
+ &locals->LinesForMetaPTE[k],
+ &locals->LinesForMetaAndDPTERow[k],
+ &locals->VRatioPreY[i][j][k],
+ &locals->VRatioPreC[i][j][k],
+ &locals->RequiredPrefetchPixelDataBWLuma[i][j][k],
+ &locals->RequiredPrefetchPixelDataBWChroma[i][j][k],
+ &locals->VStartupRequiredWhenNotEnoughTimeForDynamicMetadata,
+ &locals->Tno_bw[k],
+ &locals->prefetch_vmrow_bw[k],
+ locals->swath_width_luma_ub,
+ locals->swath_width_chroma_ub,
+ &mode_lib->vba.VUpdateOffsetPix[k],
+ &mode_lib->vba.VUpdateWidthPix[k],
+ &mode_lib->vba.VReadyOffsetPix[k]);
+ }
+ mode_lib->vba.MaximumReadBandwidthWithoutPrefetch = 0.0;
+ mode_lib->vba.MaximumReadBandwidthWithPrefetch = 0.0;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ unsigned int m;
+
+ locals->cursor_bw[k] = 0;
+ locals->cursor_bw_pre[k] = 0;
+ for (m = 0; m < mode_lib->vba.NumberOfCursors[k]; m++) {
+ locals->cursor_bw[k] = mode_lib->vba.CursorWidth[k][m] * mode_lib->vba.CursorBPP[k][m]
+ / 8.0 / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]) * mode_lib->vba.VRatio[k];
+ locals->cursor_bw_pre[k] = mode_lib->vba.CursorWidth[k][m] * mode_lib->vba.CursorBPP[k][m]
+ / 8.0 / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]) * locals->VRatioPreY[i][j][k];
+ }
+
+ CalculateUrgentBurstFactor(
+ mode_lib->vba.DETBufferSizeInKByte,
+ locals->SwathHeightYThisState[k],
+ locals->SwathHeightCThisState[k],
+ locals->SwathWidthYThisState[k],
+ mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k],
+ mode_lib->vba.UrgentLatency,
+ mode_lib->vba.CursorBufferSize,
+ mode_lib->vba.CursorWidth[k][0] + mode_lib->vba.CursorWidth[k][1],
+ dml_max(mode_lib->vba.CursorBPP[k][0], mode_lib->vba.CursorBPP[k][1]),
+ mode_lib->vba.VRatio[k],
+ locals->VRatioPreY[i][j][k],
+ locals->VRatioPreC[i][j][k],
+ locals->BytePerPixelInDETY[k],
+ locals->BytePerPixelInDETC[k],
+ &locals->UrgentBurstFactorCursor[k],
+ &locals->UrgentBurstFactorCursorPre[k],
+ &locals->UrgentBurstFactorLuma[k],
+ &locals->UrgentBurstFactorLumaPre[k],
+ &locals->UrgentBurstFactorChroma[k],
+ &locals->UrgentBurstFactorChromaPre[k],
+ &locals->NotEnoughUrgentLatencyHiding,
+ &locals->NotEnoughUrgentLatencyHidingPre);
+
+ if (mode_lib->vba.UseUrgentBurstBandwidth == false) {
+ locals->UrgentBurstFactorCursor[k] = 1;
+ locals->UrgentBurstFactorCursorPre[k] = 1;
+ locals->UrgentBurstFactorLuma[k] = 1;
+ locals->UrgentBurstFactorLumaPre[k] = 1;
+ locals->UrgentBurstFactorChroma[k] = 1;
+ locals->UrgentBurstFactorChromaPre[k] = 1;
+ }
+
+ mode_lib->vba.MaximumReadBandwidthWithoutPrefetch = mode_lib->vba.MaximumReadBandwidthWithoutPrefetch
+ + locals->cursor_bw[k] * locals->UrgentBurstFactorCursor[k] + locals->ReadBandwidthLuma[k]
+ * locals->UrgentBurstFactorLuma[k] + locals->ReadBandwidthChroma[k]
+ * locals->UrgentBurstFactorChroma[k] + locals->meta_row_bw[k] + locals->dpte_row_bw[k];
+ mode_lib->vba.MaximumReadBandwidthWithPrefetch = mode_lib->vba.MaximumReadBandwidthWithPrefetch
+ + dml_max3(locals->prefetch_vmrow_bw[k],
+ locals->ReadBandwidthLuma[k] * locals->UrgentBurstFactorLuma[k] + locals->ReadBandwidthChroma[k]
+ * locals->UrgentBurstFactorChroma[k] + locals->cursor_bw[k] * locals->UrgentBurstFactorCursor[k]
+ + locals->meta_row_bw[k] + locals->dpte_row_bw[k],
+ locals->RequiredPrefetchPixelDataBWLuma[i][j][k] * locals->UrgentBurstFactorLumaPre[k]
+ + locals->RequiredPrefetchPixelDataBWChroma[i][j][k] * locals->UrgentBurstFactorChromaPre[k]
+ + locals->cursor_bw_pre[k] * locals->UrgentBurstFactorCursorPre[k]);
+ }
+ locals->BandwidthWithoutPrefetchSupported[i][0] = true;
+ if (mode_lib->vba.MaximumReadBandwidthWithoutPrefetch > locals->ReturnBWPerState[i][0]
+ || locals->NotEnoughUrgentLatencyHiding == 1) {
+ locals->BandwidthWithoutPrefetchSupported[i][0] = false;
+ }
+
+ locals->PrefetchSupported[i][j] = true;
+ if (mode_lib->vba.MaximumReadBandwidthWithPrefetch > locals->ReturnBWPerState[i][0]
+ || locals->NotEnoughUrgentLatencyHiding == 1
+ || locals->NotEnoughUrgentLatencyHidingPre == 1) {
+ locals->PrefetchSupported[i][j] = false;
+ }
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (locals->LineTimesForPrefetch[k] < 2.0
+ || locals->LinesForMetaPTE[k] >= 32.0
+ || locals->LinesForMetaAndDPTERow[k] >= 16.0
+ || mode_lib->vba.IsErrorResult[i][j][k] == true) {
+ locals->PrefetchSupported[i][j] = false;
+ }
+ }
+ locals->VRatioInPrefetchSupported[i][j] = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (locals->VRatioPreY[i][j][k] > 4.0
+ || locals->VRatioPreC[i][j][k] > 4.0
+ || mode_lib->vba.IsErrorResult[i][j][k] == true) {
+ locals->VRatioInPrefetchSupported[i][j] = false;
+ }
+ }
+ mode_lib->vba.AnyLinesForVMOrRowTooLarge = false;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (locals->LinesForMetaAndDPTERow[k] >= 16 || locals->LinesForMetaPTE[k] >= 32) {
+ mode_lib->vba.AnyLinesForVMOrRowTooLarge = true;
+ }
+ }
+
+ if (mode_lib->vba.MaxVStartup <= 13 || mode_lib->vba.AnyLinesForVMOrRowTooLarge == false) {
+ mode_lib->vba.NextMaxVStartup = mode_lib->vba.MaxMaxVStartup[0][0];
+ mode_lib->vba.NextPrefetchMode = mode_lib->vba.NextPrefetchMode + 1;
+ } else {
+ mode_lib->vba.NextMaxVStartup = mode_lib->vba.NextMaxVStartup - 1;
+ }
+ } while ((locals->PrefetchSupported[i][j] != true || locals->VRatioInPrefetchSupported[i][j] != true)
+ && (mode_lib->vba.NextMaxVStartup != mode_lib->vba.MaxMaxVStartup[0][0]
+ || mode_lib->vba.NextPrefetchMode < mode_lib->vba.MaxPrefetchMode));
+
+ if (locals->PrefetchSupported[i][j] == true && locals->VRatioInPrefetchSupported[i][j] == true) {
+ mode_lib->vba.BandwidthAvailableForImmediateFlip = locals->ReturnBWPerState[i][0];
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.BandwidthAvailableForImmediateFlip = mode_lib->vba.BandwidthAvailableForImmediateFlip
+ - dml_max(locals->ReadBandwidthLuma[k] * locals->UrgentBurstFactorLuma[k]
+ + locals->ReadBandwidthChroma[k] * locals->UrgentBurstFactorChroma[k]
+ + locals->cursor_bw[k] * locals->UrgentBurstFactorCursor[k],
+ locals->RequiredPrefetchPixelDataBWLuma[i][j][k] * locals->UrgentBurstFactorLumaPre[k]
+ + locals->RequiredPrefetchPixelDataBWChroma[i][j][k] * locals->UrgentBurstFactorChromaPre[k]
+ + locals->cursor_bw_pre[k] * locals->UrgentBurstFactorCursorPre[k]);
+ }
+ mode_lib->vba.TotImmediateFlipBytes = 0.0;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.TotImmediateFlipBytes = mode_lib->vba.TotImmediateFlipBytes
+ + locals->PDEAndMetaPTEBytesPerFrame[0][0][k] + locals->MetaRowBytes[0][0][k] + locals->DPTEBytesPerRow[0][0][k];
+ }
+
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ CalculateFlipSchedule(
+ mode_lib,
+ mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
+ mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
+ mode_lib->vba.ExtraLatency,
+ mode_lib->vba.UrgentLatency,
+ mode_lib->vba.GPUVMMaxPageTableLevels,
+ mode_lib->vba.HostVMEnable,
+ mode_lib->vba.HostVMMaxPageTableLevels,
+ mode_lib->vba.HostVMCachedPageTableLevels,
+ mode_lib->vba.GPUVMEnable,
+ locals->PDEAndMetaPTEBytesPerFrame[0][0][k],
+ locals->MetaRowBytes[0][0][k],
+ locals->DPTEBytesPerRow[0][0][k],
+ mode_lib->vba.BandwidthAvailableForImmediateFlip,
+ mode_lib->vba.TotImmediateFlipBytes,
+ mode_lib->vba.SourcePixelFormat[k],
+ mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k],
+ mode_lib->vba.VRatio[k],
+ locals->Tno_bw[k],
+ mode_lib->vba.DCCEnable[k],
+ locals->dpte_row_height[k],
+ locals->meta_row_height[k],
+ locals->dpte_row_height_chroma[k],
+ locals->meta_row_height_chroma[k],
+ &locals->DestinationLinesToRequestVMInImmediateFlip[k],
+ &locals->DestinationLinesToRequestRowInImmediateFlip[k],
+ &locals->final_flip_bw[k],
+ &locals->ImmediateFlipSupportedForPipe[k]);
+ }
+ mode_lib->vba.total_dcn_read_bw_with_flip = 0.0;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.total_dcn_read_bw_with_flip = mode_lib->vba.total_dcn_read_bw_with_flip + dml_max3(
+ locals->prefetch_vmrow_bw[k],
+ locals->final_flip_bw[k] + locals->ReadBandwidthLuma[k] * locals->UrgentBurstFactorLuma[k]
+ + locals->ReadBandwidthChroma[k] * locals->UrgentBurstFactorChroma[k]
+ + locals->cursor_bw[k] * locals->UrgentBurstFactorCursor[k],
+ locals->final_flip_bw[k] + locals->RequiredPrefetchPixelDataBWLuma[i][j][k]
+ * locals->UrgentBurstFactorLumaPre[k] + locals->RequiredPrefetchPixelDataBWChroma[i][j][k]
+ * locals->UrgentBurstFactorChromaPre[k] + locals->cursor_bw_pre[k]
+ * locals->UrgentBurstFactorCursorPre[k]);
+ }
+ locals->ImmediateFlipSupportedForState[i][j] = true;
+ if (mode_lib->vba.total_dcn_read_bw_with_flip
+ > locals->ReturnBWPerState[i][0]) {
+ locals->ImmediateFlipSupportedForState[i][j] = false;
+ }
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (locals->ImmediateFlipSupportedForPipe[k] == false) {
+ locals->ImmediateFlipSupportedForState[i][j] = false;
+ }
+ }
+ } else {
+ locals->ImmediateFlipSupportedForState[i][j] = false;
+ }
+ mode_lib->vba.UrgentOutOfOrderReturnPerChannel = dml_max3(
+ mode_lib->vba.UrgentOutOfOrderReturnPerChannelPixelDataOnly,
+ mode_lib->vba.UrgentOutOfOrderReturnPerChannelPixelMixedWithVMData,
+ mode_lib->vba.UrgentOutOfOrderReturnPerChannelVMDataOnly);
+ CalculateWatermarksAndDRAMSpeedChangeSupport(
+ mode_lib,
+ mode_lib->vba.PrefetchMode[i][j],
+ mode_lib->vba.NumberOfActivePlanes,
+ mode_lib->vba.MaxLineBufferLines,
+ mode_lib->vba.LineBufferSize,
+ mode_lib->vba.DPPOutputBufferPixels,
+ mode_lib->vba.DETBufferSizeInKByte,
+ mode_lib->vba.WritebackInterfaceLumaBufferSize,
+ mode_lib->vba.WritebackInterfaceChromaBufferSize,
+ mode_lib->vba.DCFCLKPerState[i],
+ mode_lib->vba.UrgentOutOfOrderReturnPerChannel * mode_lib->vba.NumberOfChannels,
+ locals->ReturnBWPerState[i][0],
+ mode_lib->vba.GPUVMEnable,
+ locals->dpte_group_bytes,
+ mode_lib->vba.MetaChunkSize,
+ mode_lib->vba.UrgentLatency,
+ mode_lib->vba.ExtraLatency,
+ mode_lib->vba.WritebackLatency,
+ mode_lib->vba.WritebackChunkSize,
+ mode_lib->vba.SOCCLKPerState[i],
+ mode_lib->vba.DRAMClockChangeLatency,
+ mode_lib->vba.SRExitTime,
+ mode_lib->vba.SREnterPlusExitTime,
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
+ locals->NoOfDPPThisState,
+ mode_lib->vba.DCCEnable,
+ locals->RequiredDPPCLKThisState,
+ locals->SwathWidthYSingleDPP,
+ locals->SwathHeightYThisState,
+ locals->ReadBandwidthLuma,
+ locals->SwathHeightCThisState,
+ locals->ReadBandwidthChroma,
+ mode_lib->vba.LBBitPerPixel,
+ locals->SwathWidthYThisState,
+ mode_lib->vba.HRatio,
+ mode_lib->vba.vtaps,
+ mode_lib->vba.VTAPsChroma,
+ mode_lib->vba.VRatio,
+ mode_lib->vba.HTotal,
+ mode_lib->vba.PixelClock,
+ mode_lib->vba.BlendingAndTiming,
+ locals->BytePerPixelInDETY,
+ locals->BytePerPixelInDETC,
+ mode_lib->vba.WritebackEnable,
+ mode_lib->vba.WritebackPixelFormat,
+ mode_lib->vba.WritebackDestinationWidth,
+ mode_lib->vba.WritebackDestinationHeight,
+ mode_lib->vba.WritebackSourceHeight,
+ &locals->DRAMClockChangeSupport[i][j],
+ &mode_lib->vba.UrgentWatermark,
+ &mode_lib->vba.WritebackUrgentWatermark,
+ &mode_lib->vba.DRAMClockChangeWatermark,
+ &mode_lib->vba.WritebackDRAMClockChangeWatermark,
+ &mode_lib->vba.StutterExitWatermark,
+ &mode_lib->vba.StutterEnterPlusExitWatermark,
+ &mode_lib->vba.MinActiveDRAMClockChangeLatencySupported);
+ }
+ }
+
+ /*Vertical Active BW support*/
+ {
+ double MaxTotalVActiveRDBandwidth = 0.0;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ MaxTotalVActiveRDBandwidth = MaxTotalVActiveRDBandwidth + locals->ReadBandwidth[k];
+ }
+ for (i = 0; i <= mode_lib->vba.soc.num_states; ++i) {
+ locals->MaxTotalVerticalActiveAvailableBandwidth[i][0] = dml_min(
+ locals->IdealSDPPortBandwidthPerState[i][0] *
+ mode_lib->vba.MaxAveragePercentOfIdealSDPPortBWDisplayCanUseInNormalSystemOperation
+ / 100.0, mode_lib->vba.DRAMSpeedPerState[i] *
+ mode_lib->vba.NumberOfChannels *
+ mode_lib->vba.DRAMChannelWidth *
+ mode_lib->vba.MaxAveragePercentOfIdealDRAMBWDisplayCanUseInNormalSystemOperation
+ / 100.0);
+
+ if (MaxTotalVActiveRDBandwidth <= locals->MaxTotalVerticalActiveAvailableBandwidth[i][0]) {
+ locals->TotalVerticalActiveBandwidthSupport[i][0] = true;
+ } else {
+ locals->TotalVerticalActiveBandwidthSupport[i][0] = false;
+ }
+ }
+ }
+
+ /*PTE Buffer Size Check*/
+
+ for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
+ for (j = 0; j < 2; j++) {
+ locals->PTEBufferSizeNotExceeded[i][j] = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (locals->PTEBufferSizeNotExceededY[i][j][k] == false
+ || locals->PTEBufferSizeNotExceededC[i][j][k] == false) {
+ locals->PTEBufferSizeNotExceeded[i][j] = false;
+ }
+ }
+ }
+ }
+ /*Cursor Support Check*/
+
+ mode_lib->vba.CursorSupport = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.CursorWidth[k][0] > 0.0) {
+ for (m = 0; m < mode_lib->vba.NumberOfCursors[k]; m++) {
+ if (mode_lib->vba.CursorBPP[k][m] == 64 && mode_lib->vba.Cursor64BppSupport == false) {
+ mode_lib->vba.CursorSupport = false;
+ }
+ }
+ }
+ }
+ /*Valid Pitch Check*/
+
+ mode_lib->vba.PitchSupport = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ locals->AlignedYPitch[k] = dml_ceil(
+ dml_max(mode_lib->vba.PitchY[k], mode_lib->vba.ViewportWidth[k]),
+ locals->MacroTileWidthY[k]);
+ if (locals->AlignedYPitch[k] > mode_lib->vba.PitchY[k]) {
+ mode_lib->vba.PitchSupport = false;
+ }
+ if (mode_lib->vba.DCCEnable[k] == true) {
+ locals->AlignedDCCMetaPitch[k] = dml_ceil(
+ dml_max(
+ mode_lib->vba.DCCMetaPitchY[k],
+ mode_lib->vba.ViewportWidth[k]),
+ 64.0 * locals->Read256BlockWidthY[k]);
+ } else {
+ locals->AlignedDCCMetaPitch[k] = mode_lib->vba.DCCMetaPitchY[k];
+ }
+ if (locals->AlignedDCCMetaPitch[k] > mode_lib->vba.DCCMetaPitchY[k]) {
+ mode_lib->vba.PitchSupport = false;
+ }
+ if (mode_lib->vba.SourcePixelFormat[k] != dm_444_64
+ && mode_lib->vba.SourcePixelFormat[k] != dm_444_32
+ && mode_lib->vba.SourcePixelFormat[k] != dm_444_16
+ && mode_lib->vba.SourcePixelFormat[k] != dm_mono_16
+ && mode_lib->vba.SourcePixelFormat[k] != dm_mono_8) {
+ locals->AlignedCPitch[k] = dml_ceil(
+ dml_max(
+ mode_lib->vba.PitchC[k],
+ mode_lib->vba.ViewportWidth[k] / 2.0),
+ locals->MacroTileWidthC[k]);
+ } else {
+ locals->AlignedCPitch[k] = mode_lib->vba.PitchC[k];
+ }
+ if (locals->AlignedCPitch[k] > mode_lib->vba.PitchC[k]) {
+ mode_lib->vba.PitchSupport = false;
+ }
+ }
+ /*Mode Support, Voltage State and SOC Configuration*/
+
+ for (i = mode_lib->vba.soc.num_states; i >= 0; i--) {
+ for (j = 0; j < 2; j++) {
+ enum dm_validation_status status = DML_VALIDATION_OK;
+
+ if (mode_lib->vba.ScaleRatioAndTapsSupport != true) {
+ status = DML_FAIL_SCALE_RATIO_TAP;
+ } else if (mode_lib->vba.SourceFormatPixelAndScanSupport != true) {
+ status = DML_FAIL_SOURCE_PIXEL_FORMAT;
+ } else if (locals->ViewportSizeSupport[i][0] != true) {
+ status = DML_FAIL_VIEWPORT_SIZE;
+ } else if (locals->DIOSupport[i] != true) {
+ status = DML_FAIL_DIO_SUPPORT;
+ } else if (locals->NotEnoughDSCUnits[i] != false) {
+ status = DML_FAIL_NOT_ENOUGH_DSC;
+ } else if (locals->DSCCLKRequiredMoreThanSupported[i] != false) {
+ status = DML_FAIL_DSC_CLK_REQUIRED;
+ } else if (locals->ROBSupport[i][0] != true) {
+ status = DML_FAIL_REORDERING_BUFFER;
+ } else if (locals->DISPCLK_DPPCLK_Support[i][j] != true) {
+ status = DML_FAIL_DISPCLK_DPPCLK;
+ } else if (locals->TotalAvailablePipesSupport[i][j] != true) {
+ status = DML_FAIL_TOTAL_AVAILABLE_PIPES;
+ } else if (mode_lib->vba.NumberOfOTGSupport != true) {
+ status = DML_FAIL_NUM_OTG;
+ } else if (mode_lib->vba.WritebackModeSupport != true) {
+ status = DML_FAIL_WRITEBACK_MODE;
+ } else if (mode_lib->vba.WritebackLatencySupport != true) {
+ status = DML_FAIL_WRITEBACK_LATENCY;
+ } else if (mode_lib->vba.WritebackScaleRatioAndTapsSupport != true) {
+ status = DML_FAIL_WRITEBACK_SCALE_RATIO_TAP;
+ } else if (mode_lib->vba.CursorSupport != true) {
+ status = DML_FAIL_CURSOR_SUPPORT;
+ } else if (mode_lib->vba.PitchSupport != true) {
+ status = DML_FAIL_PITCH_SUPPORT;
+ } else if (locals->TotalVerticalActiveBandwidthSupport[i][0] != true) {
+ status = DML_FAIL_TOTAL_V_ACTIVE_BW;
+ } else if (locals->PTEBufferSizeNotExceeded[i][j] != true) {
+ status = DML_FAIL_PTE_BUFFER_SIZE;
+ } else if (mode_lib->vba.NonsupportedDSCInputBPC != false) {
+ status = DML_FAIL_DSC_INPUT_BPC;
+ } else if ((mode_lib->vba.HostVMEnable != false
+ && locals->ImmediateFlipSupportedForState[i][j] != true)) {
+ status = DML_FAIL_HOST_VM_IMMEDIATE_FLIP;
+ } else if (locals->PrefetchSupported[i][j] != true) {
+ status = DML_FAIL_PREFETCH_SUPPORT;
+ } else if (locals->VRatioInPrefetchSupported[i][j] != true) {
+ status = DML_FAIL_V_RATIO_PREFETCH;
+ }
+
+ if (status == DML_VALIDATION_OK) {
+ locals->ModeSupport[i][j] = true;
+ } else {
+ locals->ModeSupport[i][j] = false;
+ }
+ locals->ValidationStatus[i] = status;
+ }
+ }
+ {
+ unsigned int MaximumMPCCombine = 0;
+ mode_lib->vba.VoltageLevel = mode_lib->vba.soc.num_states + 1;
+ for (i = mode_lib->vba.VoltageOverrideLevel; i <= mode_lib->vba.soc.num_states; i++) {
+ if (locals->ModeSupport[i][0] == true || locals->ModeSupport[i][1] == true) {
+ mode_lib->vba.VoltageLevel = i;
+ if (locals->ModeSupport[i][1] == true && (locals->ModeSupport[i][0] == false
+ || mode_lib->vba.WhenToDoMPCCombine == dm_mpc_always_when_possible
+ || (mode_lib->vba.WhenToDoMPCCombine == dm_mpc_reduce_voltage_and_clocks
+ && ((locals->DRAMClockChangeSupport[i][1] == dm_dram_clock_change_vactive
+ && locals->DRAMClockChangeSupport[i][0] != dm_dram_clock_change_vactive)
+ || (locals->DRAMClockChangeSupport[i][1] == dm_dram_clock_change_vblank
+ && locals->DRAMClockChangeSupport[i][0] == dm_dram_clock_change_unsupported))))) {
+ MaximumMPCCombine = 1;
+ } else {
+ MaximumMPCCombine = 0;
+ }
+ break;
+ }
+ }
+ mode_lib->vba.ImmediateFlipSupport =
+ locals->ImmediateFlipSupportedForState[mode_lib->vba.VoltageLevel][MaximumMPCCombine];
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.DPPPerPlane[k] = locals->NoOfDPP[mode_lib->vba.VoltageLevel][MaximumMPCCombine][k];
+ locals->DPPCLK[k] = locals->RequiredDPPCLK[mode_lib->vba.VoltageLevel][MaximumMPCCombine][k];
+ }
+ mode_lib->vba.DISPCLK = locals->RequiredDISPCLK[mode_lib->vba.VoltageLevel][MaximumMPCCombine];
+ mode_lib->vba.maxMpcComb = MaximumMPCCombine;
+ }
+ mode_lib->vba.DCFCLK = mode_lib->vba.DCFCLKPerState[mode_lib->vba.VoltageLevel];
+ mode_lib->vba.DRAMSpeed = mode_lib->vba.DRAMSpeedPerState[mode_lib->vba.VoltageLevel];
+ mode_lib->vba.FabricClock = mode_lib->vba.FabricClockPerState[mode_lib->vba.VoltageLevel];
+ mode_lib->vba.SOCCLK = mode_lib->vba.SOCCLKPerState[mode_lib->vba.VoltageLevel];
+ mode_lib->vba.ReturnBW = locals->ReturnBWPerState[mode_lib->vba.VoltageLevel][0];
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.BlendingAndTiming[k] == k) {
+ mode_lib->vba.ODMCombineEnabled[k] =
+ locals->ODMCombineEnablePerState[mode_lib->vba.VoltageLevel][k];
+ } else {
+ mode_lib->vba.ODMCombineEnabled[k] = false;
+ }
+ mode_lib->vba.DSCEnabled[k] =
+ locals->RequiresDSC[mode_lib->vba.VoltageLevel][k];
+ mode_lib->vba.OutputBpp[k] =
+ locals->OutputBppPerState[mode_lib->vba.VoltageLevel][k];
+ }
+}
+
+static void CalculateWatermarksAndDRAMSpeedChangeSupport(
+ struct display_mode_lib *mode_lib,
+ unsigned int PrefetchMode,
+ unsigned int NumberOfActivePlanes,
+ unsigned int MaxLineBufferLines,
+ unsigned int LineBufferSize,
+ unsigned int DPPOutputBufferPixels,
+ double DETBufferSizeInKByte,
+ unsigned int WritebackInterfaceLumaBufferSize,
+ unsigned int WritebackInterfaceChromaBufferSize,
+ double DCFCLK,
+ double UrgentOutOfOrderReturn,
+ double ReturnBW,
+ bool GPUVMEnable,
+ int dpte_group_bytes[],
+ unsigned int MetaChunkSize,
+ double UrgentLatency,
+ double ExtraLatency,
+ double WritebackLatency,
+ double WritebackChunkSize,
+ double SOCCLK,
+ double DRAMClockChangeLatency,
+ double SRExitTime,
+ double SREnterPlusExitTime,
+ double DCFCLKDeepSleep,
+ int DPPPerPlane[],
+ bool DCCEnable[],
+ double DPPCLK[],
+ double SwathWidthSingleDPPY[],
+ unsigned int SwathHeightY[],
+ double ReadBandwidthPlaneLuma[],
+ unsigned int SwathHeightC[],
+ double ReadBandwidthPlaneChroma[],
+ unsigned int LBBitPerPixel[],
+ double SwathWidthY[],
+ double HRatio[],
+ unsigned int vtaps[],
+ unsigned int VTAPsChroma[],
+ double VRatio[],
+ unsigned int HTotal[],
+ double PixelClock[],
+ unsigned int BlendingAndTiming[],
+ double BytePerPixelDETY[],
+ double BytePerPixelDETC[],
+ bool WritebackEnable[],
+ enum source_format_class WritebackPixelFormat[],
+ double WritebackDestinationWidth[],
+ double WritebackDestinationHeight[],
+ double WritebackSourceHeight[],
+ enum clock_change_support *DRAMClockChangeSupport,
+ double *UrgentWatermark,
+ double *WritebackUrgentWatermark,
+ double *DRAMClockChangeWatermark,
+ double *WritebackDRAMClockChangeWatermark,
+ double *StutterExitWatermark,
+ double *StutterEnterPlusExitWatermark,
+ double *MinActiveDRAMClockChangeLatencySupported)
+{
+ double EffectiveLBLatencyHidingY;
+ double EffectiveLBLatencyHidingC;
+ double DPPOutputBufferLinesY;
+ double DPPOutputBufferLinesC;
+ double DETBufferSizeY;
+ double DETBufferSizeC;
+ double LinesInDETY[DC__NUM_DPP__MAX];
+ double LinesInDETC;
+ unsigned int LinesInDETYRoundedDownToSwath[DC__NUM_DPP__MAX];
+ unsigned int LinesInDETCRoundedDownToSwath;
+ double FullDETBufferingTimeY[DC__NUM_DPP__MAX];
+ double FullDETBufferingTimeC;
+ double ActiveDRAMClockChangeLatencyMarginY;
+ double ActiveDRAMClockChangeLatencyMarginC;
+ double WritebackDRAMClockChangeLatencyMargin;
+ double PlaneWithMinActiveDRAMClockChangeMargin;
+ double SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank;
+ double FullDETBufferingTimeYStutterCriticalPlane = 0;
+ double TimeToFinishSwathTransferStutterCriticalPlane = 0;
+ unsigned int k, j;
+
+ mode_lib->vba.TotalActiveDPP = 0;
+ mode_lib->vba.TotalDCCActiveDPP = 0;
+ for (k = 0; k < NumberOfActivePlanes; ++k) {
+ mode_lib->vba.TotalActiveDPP = mode_lib->vba.TotalActiveDPP + DPPPerPlane[k];
+ if (DCCEnable[k] == true) {
+ mode_lib->vba.TotalDCCActiveDPP = mode_lib->vba.TotalDCCActiveDPP + DPPPerPlane[k];
+ }
+ }
+
+ mode_lib->vba.TotalDataReadBandwidth = 0;
+ for (k = 0; k < NumberOfActivePlanes; ++k) {
+ mode_lib->vba.TotalDataReadBandwidth = mode_lib->vba.TotalDataReadBandwidth
+ + ReadBandwidthPlaneLuma[k] + ReadBandwidthPlaneChroma[k];
+ }
+
+ *UrgentWatermark = UrgentLatency + ExtraLatency;
+
+ *DRAMClockChangeWatermark = DRAMClockChangeLatency + *UrgentWatermark;
+
+ mode_lib->vba.TotalActiveWriteback = 0;
+ for (k = 0; k < NumberOfActivePlanes; ++k) {
+ if (WritebackEnable[k] == true) {
+ mode_lib->vba.TotalActiveWriteback = mode_lib->vba.TotalActiveWriteback + 1;
+ }
+ }
+
+ if (mode_lib->vba.TotalActiveWriteback <= 1) {
+ *WritebackUrgentWatermark = WritebackLatency;
+ } else {
+ *WritebackUrgentWatermark = WritebackLatency
+ + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
+ }
+
+ if (mode_lib->vba.TotalActiveWriteback <= 1) {
+ *WritebackDRAMClockChangeWatermark = DRAMClockChangeLatency + WritebackLatency;
+ } else {
+ *WritebackDRAMClockChangeWatermark = DRAMClockChangeLatency + WritebackLatency
+ + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
+ }
+
+ for (k = 0; k < NumberOfActivePlanes; ++k) {
+
+ mode_lib->vba.LBLatencyHidingSourceLinesY = dml_min((double) MaxLineBufferLines,
+ dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(HRatio[k], 1.0)), 1))
+ - (vtaps[k] - 1);
+
+ mode_lib->vba.LBLatencyHidingSourceLinesC = dml_min((double) MaxLineBufferLines,
+ dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthY[k] / 2 / dml_max(HRatio[k] / 2, 1.0)), 1))
+ - (VTAPsChroma[k] - 1);
+
+ EffectiveLBLatencyHidingY = mode_lib->vba.LBLatencyHidingSourceLinesY / VRatio[k]
+ * (HTotal[k] / PixelClock[k]);
+
+ EffectiveLBLatencyHidingC = mode_lib->vba.LBLatencyHidingSourceLinesC
+ / (VRatio[k] / 2) * (HTotal[k] / PixelClock[k]);
+
+ if (SwathWidthY[k] > 2 * DPPOutputBufferPixels) {
+ DPPOutputBufferLinesY = (double) DPPOutputBufferPixels / SwathWidthY[k];
+ } else if (SwathWidthY[k] > DPPOutputBufferPixels) {
+ DPPOutputBufferLinesY = 0.5;
+ } else {
+ DPPOutputBufferLinesY = 1;
+ }
+
+ if (SwathWidthY[k] / 2.0 > 2 * DPPOutputBufferPixels) {
+ DPPOutputBufferLinesC = (double) DPPOutputBufferPixels
+ / (SwathWidthY[k] / 2.0);
+ } else if (SwathWidthY[k] / 2.0 > DPPOutputBufferPixels) {
+ DPPOutputBufferLinesC = 0.5;
+ } else {
+ DPPOutputBufferLinesC = 1;
+ }
+
+ CalculateDETBufferSize(
+ DETBufferSizeInKByte,
+ SwathHeightY[k],
+ SwathHeightC[k],
+ &DETBufferSizeY,
+ &DETBufferSizeC);
+
+ LinesInDETY[k] = DETBufferSizeY / BytePerPixelDETY[k] / SwathWidthY[k];
+ LinesInDETYRoundedDownToSwath[k] = dml_floor(LinesInDETY[k], SwathHeightY[k]);
+ FullDETBufferingTimeY[k] = LinesInDETYRoundedDownToSwath[k]
+ * (HTotal[k] / PixelClock[k]) / VRatio[k];
+ if (BytePerPixelDETC[k] > 0) {
+ LinesInDETC = DETBufferSizeC / BytePerPixelDETC[k] / (SwathWidthY[k] / 2.0);
+ LinesInDETCRoundedDownToSwath = dml_floor(LinesInDETC, SwathHeightC[k]);
+ FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath
+ * (HTotal[k] / PixelClock[k]) / (VRatio[k] / 2);
+ } else {
+ LinesInDETC = 0;
+ FullDETBufferingTimeC = 999999;
+ }
+
+ ActiveDRAMClockChangeLatencyMarginY = HTotal[k] / PixelClock[k]
+ * DPPOutputBufferLinesY + EffectiveLBLatencyHidingY
+ + FullDETBufferingTimeY[k] - *DRAMClockChangeWatermark;
+
+ if (NumberOfActivePlanes > 1) {
+ ActiveDRAMClockChangeLatencyMarginY = ActiveDRAMClockChangeLatencyMarginY
+ - (1 - 1.0 / NumberOfActivePlanes) * SwathHeightY[k] * HTotal[k] / PixelClock[k] / VRatio[k];
+ }
+
+ if (BytePerPixelDETC[k] > 0) {
+ ActiveDRAMClockChangeLatencyMarginC = HTotal[k] / PixelClock[k]
+ * DPPOutputBufferLinesC + EffectiveLBLatencyHidingC
+ + FullDETBufferingTimeC - *DRAMClockChangeWatermark;
+ if (NumberOfActivePlanes > 1) {
+ ActiveDRAMClockChangeLatencyMarginC = ActiveDRAMClockChangeLatencyMarginC
+ - (1 - 1.0 / NumberOfActivePlanes) * SwathHeightC[k] * HTotal[k] / PixelClock[k] / (VRatio[k] / 2);
+ }
+ mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] = dml_min(
+ ActiveDRAMClockChangeLatencyMarginY,
+ ActiveDRAMClockChangeLatencyMarginC);
+ } else {
+ mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] = ActiveDRAMClockChangeLatencyMarginY;
+ }
+
+ if (WritebackEnable[k] == true) {
+ if (WritebackPixelFormat[k] == dm_444_32) {
+ WritebackDRAMClockChangeLatencyMargin = (WritebackInterfaceLumaBufferSize
+ + WritebackInterfaceChromaBufferSize) / (WritebackDestinationWidth[k]
+ * WritebackDestinationHeight[k] / (WritebackSourceHeight[k] * HTotal[k]
+ / PixelClock[k]) * 4) - *WritebackDRAMClockChangeWatermark;
+ } else {
+ WritebackDRAMClockChangeLatencyMargin = dml_min(
+ WritebackInterfaceLumaBufferSize * 8.0 / 10,
+ 2 * WritebackInterfaceChromaBufferSize * 8.0 / 10) / (WritebackDestinationWidth[k]
+ * WritebackDestinationHeight[k] / (WritebackSourceHeight[k] * HTotal[k] / PixelClock[k]))
+ - *WritebackDRAMClockChangeWatermark;
+ }
+ mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] = dml_min(
+ mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k],
+ WritebackDRAMClockChangeLatencyMargin);
+ }
+ }
+
+ mode_lib->vba.MinActiveDRAMClockChangeMargin = 999999;
+ PlaneWithMinActiveDRAMClockChangeMargin = 0;
+ for (k = 0; k < NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k]
+ < mode_lib->vba.MinActiveDRAMClockChangeMargin) {
+ mode_lib->vba.MinActiveDRAMClockChangeMargin =
+ mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k];
+ if (BlendingAndTiming[k] == k) {
+ PlaneWithMinActiveDRAMClockChangeMargin = k;
+ } else {
+ for (j = 0; j < NumberOfActivePlanes; ++j) {
+ if (BlendingAndTiming[k] == j) {
+ PlaneWithMinActiveDRAMClockChangeMargin = j;
+ }
+ }
+ }
+ }
+ }
+
+ *MinActiveDRAMClockChangeLatencySupported = mode_lib->vba.MinActiveDRAMClockChangeMargin + DRAMClockChangeLatency;
+
+ SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = 999999;
+ for (k = 0; k < NumberOfActivePlanes; ++k) {
+ if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (BlendingAndTiming[k] == k))
+ && !(BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin)
+ && mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k]
+ < SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank) {
+ SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank =
+ mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k];
+ }
+ }
+
+ mode_lib->vba.TotalNumberOfActiveOTG = 0;
+ for (k = 0; k < NumberOfActivePlanes; ++k) {
+ if (BlendingAndTiming[k] == k) {
+ mode_lib->vba.TotalNumberOfActiveOTG = mode_lib->vba.TotalNumberOfActiveOTG + 1;
+ }
+ }
+
+ if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 0) {
+ *DRAMClockChangeSupport = dm_dram_clock_change_vactive;
+ } else if (((mode_lib->vba.SynchronizedVBlank == true
+ || mode_lib->vba.TotalNumberOfActiveOTG == 1
+ || SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank > 0)
+ && PrefetchMode == 0)) {
+ *DRAMClockChangeSupport = dm_dram_clock_change_vblank;
+ } else {
+ *DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
+ }
+
+ FullDETBufferingTimeYStutterCriticalPlane = FullDETBufferingTimeY[0];
+ for (k = 0; k < NumberOfActivePlanes; ++k) {
+ if (FullDETBufferingTimeY[k] <= FullDETBufferingTimeYStutterCriticalPlane) {
+ TimeToFinishSwathTransferStutterCriticalPlane = (SwathHeightY[k]
+ - (LinesInDETY[k] - LinesInDETYRoundedDownToSwath[k]))
+ * (HTotal[k] / PixelClock[k]) / VRatio[k];
+ }
+ }
+
+ *StutterExitWatermark = SRExitTime + mode_lib->vba.LastPixelOfLineExtraWatermark
+ + ExtraLatency + 10 / DCFCLKDeepSleep;
+ *StutterEnterPlusExitWatermark = dml_max(
+ SREnterPlusExitTime + mode_lib->vba.LastPixelOfLineExtraWatermark
+ + ExtraLatency + 10 / DCFCLKDeepSleep,
+ TimeToFinishSwathTransferStutterCriticalPlane);
+
+}
+
+static void CalculateDCFCLKDeepSleep(
+ struct display_mode_lib *mode_lib,
+ unsigned int NumberOfActivePlanes,
+ double BytePerPixelDETY[],
+ double BytePerPixelDETC[],
+ double VRatio[],
+ double SwathWidthY[],
+ int DPPPerPlane[],
+ double HRatio[],
+ double PixelClock[],
+ double PSCL_THROUGHPUT[],
+ double PSCL_THROUGHPUT_CHROMA[],
+ double DPPCLK[],
+ double *DCFCLKDeepSleep)
+{
+ unsigned int k;
+ double DisplayPipeLineDeliveryTimeLuma;
+ double DisplayPipeLineDeliveryTimeChroma;
+ //double DCFCLKDeepSleepPerPlane[DC__NUM_DPP__MAX];
+
+ for (k = 0; k < NumberOfActivePlanes; ++k) {
+ if (VRatio[k] <= 1) {
+ DisplayPipeLineDeliveryTimeLuma = SwathWidthY[k] * DPPPerPlane[k]
+ / HRatio[k] / PixelClock[k];
+ } else {
+ DisplayPipeLineDeliveryTimeLuma = SwathWidthY[k] / PSCL_THROUGHPUT[k]
+ / DPPCLK[k];
+ }
+ if (BytePerPixelDETC[k] == 0) {
+ DisplayPipeLineDeliveryTimeChroma = 0;
+ } else {
+ if (VRatio[k] / 2 <= 1) {
+ DisplayPipeLineDeliveryTimeChroma = SwathWidthY[k] / 2.0
+ * DPPPerPlane[k] / (HRatio[k] / 2) / PixelClock[k];
+ } else {
+ DisplayPipeLineDeliveryTimeChroma = SwathWidthY[k] / 2.0
+ / PSCL_THROUGHPUT_CHROMA[k] / DPPCLK[k];
+ }
+ }
+
+ if (BytePerPixelDETC[k] > 0) {
+ mode_lib->vba.DCFCLKDeepSleepPerPlane[k] = dml_max(
+ 1.1 * SwathWidthY[k] * dml_ceil(BytePerPixelDETY[k], 1)
+ / 32.0 / DisplayPipeLineDeliveryTimeLuma,
+ 1.1 * SwathWidthY[k] / 2.0
+ * dml_ceil(BytePerPixelDETC[k], 2) / 32.0
+ / DisplayPipeLineDeliveryTimeChroma);
+ } else {
+ mode_lib->vba.DCFCLKDeepSleepPerPlane[k] = 1.1 * SwathWidthY[k]
+ * dml_ceil(BytePerPixelDETY[k], 1) / 64.0
+ / DisplayPipeLineDeliveryTimeLuma;
+ }
+ mode_lib->vba.DCFCLKDeepSleepPerPlane[k] = dml_max(
+ mode_lib->vba.DCFCLKDeepSleepPerPlane[k],
+ PixelClock[k] / 16);
+
+ }
+
+ *DCFCLKDeepSleep = 8;
+ for (k = 0; k < NumberOfActivePlanes; ++k) {
+ *DCFCLKDeepSleep = dml_max(
+ *DCFCLKDeepSleep,
+ mode_lib->vba.DCFCLKDeepSleepPerPlane[k]);
+ }
+}
+
+static void CalculateDETBufferSize(
+ double DETBufferSizeInKByte,
+ unsigned int SwathHeightY,
+ unsigned int SwathHeightC,
+ double *DETBufferSizeY,
+ double *DETBufferSizeC)
+{
+ if (SwathHeightC == 0) {
+ *DETBufferSizeY = DETBufferSizeInKByte * 1024;
+ *DETBufferSizeC = 0;
+ } else if (SwathHeightY <= SwathHeightC) {
+ *DETBufferSizeY = DETBufferSizeInKByte * 1024 / 2;
+ *DETBufferSizeC = DETBufferSizeInKByte * 1024 / 2;
+ } else {
+ *DETBufferSizeY = DETBufferSizeInKByte * 1024 * 2 / 3;
+ *DETBufferSizeC = DETBufferSizeInKByte * 1024 / 3;
+ }
+}
+
+static void CalculateUrgentBurstFactor(
+ unsigned int DETBufferSizeInKByte,
+ unsigned int SwathHeightY,
+ unsigned int SwathHeightC,
+ unsigned int SwathWidthY,
+ double LineTime,
+ double UrgentLatency,
+ double CursorBufferSize,
+ unsigned int CursorWidth,
+ unsigned int CursorBPP,
+ double VRatio,
+ double VRatioPreY,
+ double VRatioPreC,
+ double BytePerPixelInDETY,
+ double BytePerPixelInDETC,
+ double *UrgentBurstFactorCursor,
+ double *UrgentBurstFactorCursorPre,
+ double *UrgentBurstFactorLuma,
+ double *UrgentBurstFactorLumaPre,
+ double *UrgentBurstFactorChroma,
+ double *UrgentBurstFactorChromaPre,
+ unsigned int *NotEnoughUrgentLatencyHiding,
+ unsigned int *NotEnoughUrgentLatencyHidingPre)
+{
+ double LinesInDETLuma;
+ double LinesInDETChroma;
+ unsigned int LinesInCursorBuffer;
+ double CursorBufferSizeInTime;
+ double CursorBufferSizeInTimePre;
+ double DETBufferSizeInTimeLuma;
+ double DETBufferSizeInTimeLumaPre;
+ double DETBufferSizeInTimeChroma;
+ double DETBufferSizeInTimeChromaPre;
+ double DETBufferSizeY;
+ double DETBufferSizeC;
+
+ *NotEnoughUrgentLatencyHiding = 0;
+ *NotEnoughUrgentLatencyHidingPre = 0;
+
+ if (CursorWidth > 0) {
+ LinesInCursorBuffer = 1 << (unsigned int) dml_floor(
+ dml_log2(CursorBufferSize * 1024.0 / (CursorWidth * CursorBPP / 8.0)), 1.0);
+ CursorBufferSizeInTime = LinesInCursorBuffer * LineTime / VRatio;
+ if (CursorBufferSizeInTime - UrgentLatency <= 0) {
+ *NotEnoughUrgentLatencyHiding = 1;
+ *UrgentBurstFactorCursor = 0;
+ } else {
+ *UrgentBurstFactorCursor = CursorBufferSizeInTime
+ / (CursorBufferSizeInTime - UrgentLatency);
+ }
+ if (VRatioPreY > 0) {
+ CursorBufferSizeInTimePre = LinesInCursorBuffer * LineTime / VRatioPreY;
+ if (CursorBufferSizeInTimePre - UrgentLatency <= 0) {
+ *NotEnoughUrgentLatencyHidingPre = 1;
+ *UrgentBurstFactorCursorPre = 0;
+ } else {
+ *UrgentBurstFactorCursorPre = CursorBufferSizeInTimePre
+ / (CursorBufferSizeInTimePre - UrgentLatency);
+ }
+ } else {
+ *UrgentBurstFactorCursorPre = 1;
+ }
+ }
+
+ CalculateDETBufferSize(
+ DETBufferSizeInKByte,
+ SwathHeightY,
+ SwathHeightC,
+ &DETBufferSizeY,
+ &DETBufferSizeC);
+
+ LinesInDETLuma = DETBufferSizeY / BytePerPixelInDETY / SwathWidthY;
+ DETBufferSizeInTimeLuma = dml_floor(LinesInDETLuma, SwathHeightY) * LineTime / VRatio;
+ if (DETBufferSizeInTimeLuma - UrgentLatency <= 0) {
+ *NotEnoughUrgentLatencyHiding = 1;
+ *UrgentBurstFactorLuma = 0;
+ } else {
+ *UrgentBurstFactorLuma = DETBufferSizeInTimeLuma
+ / (DETBufferSizeInTimeLuma - UrgentLatency);
+ }
+ if (VRatioPreY > 0) {
+ DETBufferSizeInTimeLumaPre = dml_floor(LinesInDETLuma, SwathHeightY) * LineTime
+ / VRatioPreY;
+ if (DETBufferSizeInTimeLumaPre - UrgentLatency <= 0) {
+ *NotEnoughUrgentLatencyHidingPre = 1;
+ *UrgentBurstFactorLumaPre = 0;
+ } else {
+ *UrgentBurstFactorLumaPre = DETBufferSizeInTimeLumaPre
+ / (DETBufferSizeInTimeLumaPre - UrgentLatency);
+ }
+ } else {
+ *UrgentBurstFactorLumaPre = 1;
+ }
+
+ if (BytePerPixelInDETC > 0) {
+ LinesInDETChroma = DETBufferSizeC / BytePerPixelInDETC / (SwathWidthY / 2);
+ DETBufferSizeInTimeChroma = dml_floor(LinesInDETChroma, SwathHeightC) * LineTime
+ / (VRatio / 2);
+ if (DETBufferSizeInTimeChroma - UrgentLatency <= 0) {
+ *NotEnoughUrgentLatencyHiding = 1;
+ *UrgentBurstFactorChroma = 0;
+ } else {
+ *UrgentBurstFactorChroma = DETBufferSizeInTimeChroma
+ / (DETBufferSizeInTimeChroma - UrgentLatency);
+ }
+ if (VRatioPreC > 0) {
+ DETBufferSizeInTimeChromaPre = dml_floor(LinesInDETChroma, SwathHeightC)
+ * LineTime / VRatioPreC;
+ if (DETBufferSizeInTimeChromaPre - UrgentLatency <= 0) {
+ *NotEnoughUrgentLatencyHidingPre = 1;
+ *UrgentBurstFactorChromaPre = 0;
+ } else {
+ *UrgentBurstFactorChromaPre = DETBufferSizeInTimeChromaPre
+ / (DETBufferSizeInTimeChromaPre - UrgentLatency);
+ }
+ } else {
+ *UrgentBurstFactorChromaPre = 1;
+ }
+ }
+}
+
+static void CalculatePixelDeliveryTimes(
+ unsigned int NumberOfActivePlanes,
+ double VRatio[],
+ double VRatioPrefetchY[],
+ double VRatioPrefetchC[],
+ unsigned int swath_width_luma_ub[],
+ unsigned int swath_width_chroma_ub[],
+ int DPPPerPlane[],
+ double HRatio[],
+ double PixelClock[],
+ double PSCL_THROUGHPUT[],
+ double PSCL_THROUGHPUT_CHROMA[],
+ double DPPCLK[],
+ double BytePerPixelDETC[],
+ enum scan_direction_class SourceScan[],
+ unsigned int BlockWidth256BytesY[],
+ unsigned int BlockHeight256BytesY[],
+ unsigned int BlockWidth256BytesC[],
+ unsigned int BlockHeight256BytesC[],
+ double DisplayPipeLineDeliveryTimeLuma[],
+ double DisplayPipeLineDeliveryTimeChroma[],
+ double DisplayPipeLineDeliveryTimeLumaPrefetch[],
+ double DisplayPipeLineDeliveryTimeChromaPrefetch[],
+ double DisplayPipeRequestDeliveryTimeLuma[],
+ double DisplayPipeRequestDeliveryTimeChroma[],
+ double DisplayPipeRequestDeliveryTimeLumaPrefetch[],
+ double DisplayPipeRequestDeliveryTimeChromaPrefetch[])
+{
+ double req_per_swath_ub;
+ unsigned int k;
+
+ for (k = 0; k < NumberOfActivePlanes; ++k) {
+ if (VRatio[k] <= 1) {
+ DisplayPipeLineDeliveryTimeLuma[k] = swath_width_luma_ub[k] * DPPPerPlane[k]
+ / HRatio[k] / PixelClock[k];
+ } else {
+ DisplayPipeLineDeliveryTimeLuma[k] = swath_width_luma_ub[k]
+ / PSCL_THROUGHPUT[k] / DPPCLK[k];
+ }
+
+ if (BytePerPixelDETC[k] == 0) {
+ DisplayPipeLineDeliveryTimeChroma[k] = 0;
+ } else {
+ if (VRatio[k] / 2 <= 1) {
+ DisplayPipeLineDeliveryTimeChroma[k] = swath_width_chroma_ub[k]
+ * DPPPerPlane[k] / (HRatio[k] / 2) / PixelClock[k];
+ } else {
+ DisplayPipeLineDeliveryTimeChroma[k] = swath_width_chroma_ub[k]
+ / PSCL_THROUGHPUT_CHROMA[k] / DPPCLK[k];
+ }
+ }
+
+ if (VRatioPrefetchY[k] <= 1) {
+ DisplayPipeLineDeliveryTimeLumaPrefetch[k] = swath_width_luma_ub[k]
+ * DPPPerPlane[k] / HRatio[k] / PixelClock[k];
+ } else {
+ DisplayPipeLineDeliveryTimeLumaPrefetch[k] = swath_width_luma_ub[k]
+ / PSCL_THROUGHPUT[k] / DPPCLK[k];
+ }
+
+ if (BytePerPixelDETC[k] == 0) {
+ DisplayPipeLineDeliveryTimeChromaPrefetch[k] = 0;
+ } else {
+ if (VRatioPrefetchC[k] <= 1) {
+ DisplayPipeLineDeliveryTimeChromaPrefetch[k] =
+ swath_width_chroma_ub[k] * DPPPerPlane[k]
+ / (HRatio[k] / 2) / PixelClock[k];
+ } else {
+ DisplayPipeLineDeliveryTimeChromaPrefetch[k] =
+ swath_width_chroma_ub[k] / PSCL_THROUGHPUT_CHROMA[k] / DPPCLK[k];
+ }
+ }
+ }
+
+ for (k = 0; k < NumberOfActivePlanes; ++k) {
+ if (SourceScan[k] == dm_horz) {
+ req_per_swath_ub = swath_width_luma_ub[k] / BlockWidth256BytesY[k];
+ } else {
+ req_per_swath_ub = swath_width_luma_ub[k] / BlockHeight256BytesY[k];
+ }
+ DisplayPipeRequestDeliveryTimeLuma[k] = DisplayPipeLineDeliveryTimeLuma[k]
+ / req_per_swath_ub;
+ DisplayPipeRequestDeliveryTimeLumaPrefetch[k] =
+ DisplayPipeLineDeliveryTimeLumaPrefetch[k] / req_per_swath_ub;
+ if (BytePerPixelDETC[k] == 0) {
+ DisplayPipeRequestDeliveryTimeChroma[k] = 0;
+ DisplayPipeRequestDeliveryTimeChromaPrefetch[k] = 0;
+ } else {
+ if (SourceScan[k] == dm_horz) {
+ req_per_swath_ub = swath_width_chroma_ub[k]
+ / BlockWidth256BytesC[k];
+ } else {
+ req_per_swath_ub = swath_width_chroma_ub[k]
+ / BlockHeight256BytesC[k];
+ }
+ DisplayPipeRequestDeliveryTimeChroma[k] =
+ DisplayPipeLineDeliveryTimeChroma[k] / req_per_swath_ub;
+ DisplayPipeRequestDeliveryTimeChromaPrefetch[k] =
+ DisplayPipeLineDeliveryTimeChromaPrefetch[k] / req_per_swath_ub;
+ }
+ }
+}
+
+static void CalculateMetaAndPTETimes(
+ unsigned int NumberOfActivePlanes,
+ bool GPUVMEnable,
+ unsigned int MetaChunkSize,
+ unsigned int MinMetaChunkSizeBytes,
+ unsigned int GPUVMMaxPageTableLevels,
+ unsigned int HTotal[],
+ double VRatio[],
+ double VRatioPrefetchY[],
+ double VRatioPrefetchC[],
+ double DestinationLinesToRequestRowInVBlank[],
+ double DestinationLinesToRequestRowInImmediateFlip[],
+ double DestinationLinesToRequestVMInVBlank[],
+ double DestinationLinesToRequestVMInImmediateFlip[],
+ bool DCCEnable[],
+ double PixelClock[],
+ double BytePerPixelDETY[],
+ double BytePerPixelDETC[],
+ enum scan_direction_class SourceScan[],
+ unsigned int dpte_row_height[],
+ unsigned int dpte_row_height_chroma[],
+ unsigned int meta_row_width[],
+ unsigned int meta_row_height[],
+ unsigned int meta_req_width[],
+ unsigned int meta_req_height[],
+ int dpte_group_bytes[],
+ unsigned int PTERequestSizeY[],
+ unsigned int PTERequestSizeC[],
+ unsigned int PixelPTEReqWidthY[],
+ unsigned int PixelPTEReqHeightY[],
+ unsigned int PixelPTEReqWidthC[],
+ unsigned int PixelPTEReqHeightC[],
+ unsigned int dpte_row_width_luma_ub[],
+ unsigned int dpte_row_width_chroma_ub[],
+ unsigned int vm_group_bytes[],
+ unsigned int dpde0_bytes_per_frame_ub_l[],
+ unsigned int dpde0_bytes_per_frame_ub_c[],
+ unsigned int meta_pte_bytes_per_frame_ub_l[],
+ unsigned int meta_pte_bytes_per_frame_ub_c[],
+ double DST_Y_PER_PTE_ROW_NOM_L[],
+ double DST_Y_PER_PTE_ROW_NOM_C[],
+ double DST_Y_PER_META_ROW_NOM_L[],
+ double TimePerMetaChunkNominal[],
+ double TimePerMetaChunkVBlank[],
+ double TimePerMetaChunkFlip[],
+ double time_per_pte_group_nom_luma[],
+ double time_per_pte_group_vblank_luma[],
+ double time_per_pte_group_flip_luma[],
+ double time_per_pte_group_nom_chroma[],
+ double time_per_pte_group_vblank_chroma[],
+ double time_per_pte_group_flip_chroma[],
+ double TimePerVMGroupVBlank[],
+ double TimePerVMGroupFlip[],
+ double TimePerVMRequestVBlank[],
+ double TimePerVMRequestFlip[])
+{
+ unsigned int meta_chunk_width;
+ unsigned int min_meta_chunk_width;
+ unsigned int meta_chunk_per_row_int;
+ unsigned int meta_row_remainder;
+ unsigned int meta_chunk_threshold;
+ unsigned int meta_chunks_per_row_ub;
+ unsigned int dpte_group_width_luma;
+ unsigned int dpte_group_width_chroma;
+ unsigned int dpte_groups_per_row_luma_ub;
+ unsigned int dpte_groups_per_row_chroma_ub;
+ unsigned int num_group_per_lower_vm_stage;
+ unsigned int num_req_per_lower_vm_stage;
+ unsigned int k;
+
+ for (k = 0; k < NumberOfActivePlanes; ++k) {
+ if (GPUVMEnable == true) {
+ DST_Y_PER_PTE_ROW_NOM_L[k] = dpte_row_height[k] / VRatio[k];
+ if (BytePerPixelDETC[k] == 0) {
+ DST_Y_PER_PTE_ROW_NOM_C[k] = 0;
+ } else {
+ DST_Y_PER_PTE_ROW_NOM_C[k] = dpte_row_height_chroma[k] / (VRatio[k] / 2);
+ }
+ } else {
+ DST_Y_PER_PTE_ROW_NOM_L[k] = 0;
+ DST_Y_PER_PTE_ROW_NOM_C[k] = 0;
+ }
+ if (DCCEnable[k] == true) {
+ DST_Y_PER_META_ROW_NOM_L[k] = meta_row_height[k] / VRatio[k];
+ } else {
+ DST_Y_PER_META_ROW_NOM_L[k] = 0;
+ }
+ }
+
+ for (k = 0; k < NumberOfActivePlanes; ++k) {
+ if (DCCEnable[k] == true) {
+ meta_chunk_width = MetaChunkSize * 1024 * 256
+ / dml_ceil(BytePerPixelDETY[k], 1) / meta_row_height[k];
+ min_meta_chunk_width = MinMetaChunkSizeBytes * 256
+ / dml_ceil(BytePerPixelDETY[k], 1) / meta_row_height[k];
+ meta_chunk_per_row_int = meta_row_width[k] / meta_chunk_width;
+ meta_row_remainder = meta_row_width[k] % meta_chunk_width;
+ if (SourceScan[k] == dm_horz) {
+ meta_chunk_threshold = 2 * min_meta_chunk_width - meta_req_width[k];
+ } else {
+ meta_chunk_threshold = 2 * min_meta_chunk_width
+ - meta_req_height[k];
+ }
+ if (meta_row_remainder <= meta_chunk_threshold) {
+ meta_chunks_per_row_ub = meta_chunk_per_row_int + 1;
+ } else {
+ meta_chunks_per_row_ub = meta_chunk_per_row_int + 2;
+ }
+ TimePerMetaChunkNominal[k] = meta_row_height[k] / VRatio[k] * HTotal[k]
+ / PixelClock[k] / meta_chunks_per_row_ub;
+ TimePerMetaChunkVBlank[k] = DestinationLinesToRequestRowInVBlank[k]
+ * HTotal[k] / PixelClock[k] / meta_chunks_per_row_ub;
+ TimePerMetaChunkFlip[k] = DestinationLinesToRequestRowInImmediateFlip[k]
+ * HTotal[k] / PixelClock[k] / meta_chunks_per_row_ub;
+ } else {
+ TimePerMetaChunkNominal[k] = 0;
+ TimePerMetaChunkVBlank[k] = 0;
+ TimePerMetaChunkFlip[k] = 0;
+ }
+ }
+
+ for (k = 0; k < NumberOfActivePlanes; ++k) {
+ if (GPUVMEnable == true) {
+ if (SourceScan[k] == dm_horz) {
+ dpte_group_width_luma = dpte_group_bytes[k] / PTERequestSizeY[k]
+ * PixelPTEReqWidthY[k];
+ } else {
+ dpte_group_width_luma = dpte_group_bytes[k] / PTERequestSizeY[k]
+ * PixelPTEReqHeightY[k];
+ }
+ dpte_groups_per_row_luma_ub = dml_ceil(
+ dpte_row_width_luma_ub[k] / dpte_group_width_luma,
+ 1);
+ time_per_pte_group_nom_luma[k] = DST_Y_PER_PTE_ROW_NOM_L[k] * HTotal[k]
+ / PixelClock[k] / dpte_groups_per_row_luma_ub;
+ time_per_pte_group_vblank_luma[k] = DestinationLinesToRequestRowInVBlank[k]
+ * HTotal[k] / PixelClock[k] / dpte_groups_per_row_luma_ub;
+ time_per_pte_group_flip_luma[k] =
+ DestinationLinesToRequestRowInImmediateFlip[k] * HTotal[k]
+ / PixelClock[k]
+ / dpte_groups_per_row_luma_ub;
+ if (BytePerPixelDETC[k] == 0) {
+ time_per_pte_group_nom_chroma[k] = 0;
+ time_per_pte_group_vblank_chroma[k] = 0;
+ time_per_pte_group_flip_chroma[k] = 0;
+ } else {
+ if (SourceScan[k] == dm_horz) {
+ dpte_group_width_chroma = dpte_group_bytes[k]
+ / PTERequestSizeC[k] * PixelPTEReqWidthC[k];
+ } else {
+ dpte_group_width_chroma = dpte_group_bytes[k]
+ / PTERequestSizeC[k]
+ * PixelPTEReqHeightC[k];
+ }
+ dpte_groups_per_row_chroma_ub = dml_ceil(
+ dpte_row_width_chroma_ub[k]
+ / dpte_group_width_chroma,
+ 1);
+ time_per_pte_group_nom_chroma[k] = DST_Y_PER_PTE_ROW_NOM_C[k]
+ * HTotal[k] / PixelClock[k]
+ / dpte_groups_per_row_chroma_ub;
+ time_per_pte_group_vblank_chroma[k] =
+ DestinationLinesToRequestRowInVBlank[k] * HTotal[k]
+ / PixelClock[k]
+ / dpte_groups_per_row_chroma_ub;
+ time_per_pte_group_flip_chroma[k] =
+ DestinationLinesToRequestRowInImmediateFlip[k]
+ * HTotal[k] / PixelClock[k]
+ / dpte_groups_per_row_chroma_ub;
+ }
+ } else {
+ time_per_pte_group_nom_luma[k] = 0;
+ time_per_pte_group_vblank_luma[k] = 0;
+ time_per_pte_group_flip_luma[k] = 0;
+ time_per_pte_group_nom_chroma[k] = 0;
+ time_per_pte_group_vblank_chroma[k] = 0;
+ time_per_pte_group_flip_chroma[k] = 0;
+ }
+ }
+
+ for (k = 0; k < NumberOfActivePlanes; ++k) {
+ if (GPUVMEnable == true && (DCCEnable[k] == true || GPUVMMaxPageTableLevels > 1)) {
+ if (DCCEnable[k] == false) {
+ if (BytePerPixelDETC[k] > 0) {
+ num_group_per_lower_vm_stage =
+ dml_ceil((double) (dpde0_bytes_per_frame_ub_l[k]) / (double) (vm_group_bytes[k]), 1)
+ + dml_ceil((double) (dpde0_bytes_per_frame_ub_c[k]) / (double) (vm_group_bytes[k]), 1);
+ } else {
+ num_group_per_lower_vm_stage =
+ dml_ceil((double) (dpde0_bytes_per_frame_ub_l[k]) / (double) (vm_group_bytes[k]), 1);
+ }
+ } else {
+ if (GPUVMMaxPageTableLevels == 1) {
+ if (BytePerPixelDETC[k] > 0) {
+ num_group_per_lower_vm_stage =
+ dml_ceil((double) (meta_pte_bytes_per_frame_ub_l[k]) / (double) (vm_group_bytes[k]), 1)
+ + dml_ceil((double) (meta_pte_bytes_per_frame_ub_c[k]) / (double) (vm_group_bytes[k]), 1);
+ } else {
+ num_group_per_lower_vm_stage =
+ dml_ceil((double) (meta_pte_bytes_per_frame_ub_l[k]) / (double) (vm_group_bytes[k]), 1);
+ }
+ } else {
+ if (BytePerPixelDETC[k] > 0) {
+ num_group_per_lower_vm_stage =
+ dml_ceil((double) (dpde0_bytes_per_frame_ub_l[k]) / (double) (vm_group_bytes[k]), 1)
+ + dml_ceil((double) (dpde0_bytes_per_frame_ub_c[k]) / (double) (vm_group_bytes[k]), 1)
+ + dml_ceil((double) (meta_pte_bytes_per_frame_ub_l[k]) / (double) (vm_group_bytes[k]), 1)
+ + dml_ceil((double) (meta_pte_bytes_per_frame_ub_c[k]) / (double) (vm_group_bytes[k]), 1);
+ } else {
+ num_group_per_lower_vm_stage =
+ dml_ceil((double) (dpde0_bytes_per_frame_ub_l[k]) / (double) (vm_group_bytes[k]), 1)
+ + dml_ceil((double) (meta_pte_bytes_per_frame_ub_l[k]) / (double) (vm_group_bytes[k]), 1);
+ }
+ }
+ }
+
+ if (DCCEnable[k] == false) {
+ if (BytePerPixelDETC[k] > 0) {
+ num_req_per_lower_vm_stage = dpde0_bytes_per_frame_ub_l[k]
+ / 64 + dpde0_bytes_per_frame_ub_c[k] / 64;
+ } else {
+ num_req_per_lower_vm_stage = dpde0_bytes_per_frame_ub_l[k]
+ / 64;
+ }
+ } else {
+ if (GPUVMMaxPageTableLevels == 1) {
+ if (BytePerPixelDETC[k] > 0) {
+ num_req_per_lower_vm_stage = meta_pte_bytes_per_frame_ub_l[k] / 64
+ + meta_pte_bytes_per_frame_ub_c[k] / 64;
+ } else {
+ num_req_per_lower_vm_stage = meta_pte_bytes_per_frame_ub_l[k] / 64;
+ }
+ } else {
+ if (BytePerPixelDETC[k] > 0) {
+ num_req_per_lower_vm_stage = dpde0_bytes_per_frame_ub_l[k] / 64
+ + dpde0_bytes_per_frame_ub_c[k] / 64
+ + meta_pte_bytes_per_frame_ub_l[k] / 64
+ + meta_pte_bytes_per_frame_ub_c[k] / 64;
+ } else {
+ num_req_per_lower_vm_stage = dpde0_bytes_per_frame_ub_l[k] / 64
+ + meta_pte_bytes_per_frame_ub_l[k] / 64;
+ }
+ }
+ }
+
+ TimePerVMGroupVBlank[k] = DestinationLinesToRequestVMInVBlank[k] * HTotal[k]
+ / PixelClock[k] / num_group_per_lower_vm_stage;
+ TimePerVMGroupFlip[k] = DestinationLinesToRequestVMInImmediateFlip[k]
+ * HTotal[k] / PixelClock[k] / num_group_per_lower_vm_stage;
+ TimePerVMRequestVBlank[k] = DestinationLinesToRequestVMInVBlank[k]
+ * HTotal[k] / PixelClock[k] / num_req_per_lower_vm_stage;
+ TimePerVMRequestFlip[k] = DestinationLinesToRequestVMInImmediateFlip[k]
+ * HTotal[k] / PixelClock[k] / num_req_per_lower_vm_stage;
+
+ if (GPUVMMaxPageTableLevels > 2) {
+ TimePerVMGroupVBlank[k] = TimePerVMGroupVBlank[k] / 2;
+ TimePerVMGroupFlip[k] = TimePerVMGroupFlip[k] / 2;
+ TimePerVMRequestVBlank[k] = TimePerVMRequestVBlank[k] / 2;
+ TimePerVMRequestFlip[k] = TimePerVMRequestFlip[k] / 2;
+ }
+
+ } else {
+ TimePerVMGroupVBlank[k] = 0;
+ TimePerVMGroupFlip[k] = 0;
+ TimePerVMRequestVBlank[k] = 0;
+ TimePerVMRequestFlip[k] = 0;
+ }
+ }
+}
+
+static double CalculateExtraLatency(
+ double UrgentRoundTripAndOutOfOrderLatency,
+ int TotalNumberOfActiveDPP,
+ int PixelChunkSizeInKByte,
+ int TotalNumberOfDCCActiveDPP,
+ int MetaChunkSize,
+ double ReturnBW,
+ bool GPUVMEnable,
+ bool HostVMEnable,
+ int NumberOfActivePlanes,
+ int NumberOfDPP[],
+ int dpte_group_bytes[],
+ double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
+ double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
+ int HostVMMaxPageTableLevels,
+ int HostVMCachedPageTableLevels)
+{
+ double CalculateExtraLatency;
+ double HostVMInefficiencyFactor;
+ int HostVMDynamicLevels;
+
+ if (GPUVMEnable && HostVMEnable) {
+ HostVMInefficiencyFactor =
+ PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData
+ / PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly;
+ HostVMDynamicLevels = HostVMMaxPageTableLevels - HostVMCachedPageTableLevels;
+ } else {
+ HostVMInefficiencyFactor = 1;
+ HostVMDynamicLevels = 0;
+ }
+
+ CalculateExtraLatency = UrgentRoundTripAndOutOfOrderLatency
+ + (TotalNumberOfActiveDPP * PixelChunkSizeInKByte
+ + TotalNumberOfDCCActiveDPP * MetaChunkSize) * 1024.0
+ / ReturnBW;
+
+ if (GPUVMEnable) {
+ int k;
+
+ for (k = 0; k < NumberOfActivePlanes; k++) {
+ CalculateExtraLatency = CalculateExtraLatency
+ + NumberOfDPP[k] * dpte_group_bytes[k]
+ * (1 + 8 * HostVMDynamicLevels)
+ * HostVMInefficiencyFactor / ReturnBW;
+ }
+ }
+ return CalculateExtraLatency;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.h b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.h
new file mode 100644
index 000000000000..fb9548a2f894
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DML21_DISPLAY_MODE_VBA_H__
+#define __DML21_DISPLAY_MODE_VBA_H__
+
+void dml21_recalculate(struct display_mode_lib *mode_lib);
+void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib);
+
+#endif /* _DML21_DISPLAY_MODE_VBA_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
new file mode 100644
index 000000000000..a38baa73d484
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
@@ -0,0 +1,1821 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#include "../display_mode_lib.h"
+#include "../display_mode_vba.h"
+#include "../dml_inline_defs.h"
+#include "display_rq_dlg_calc_21.h"
+
+/*
+ * NOTE:
+ * This file is gcc-parseable HW gospel, coming straight from HW engineers.
+ *
+ * It doesn't adhere to Linux kernel style and sometimes will do things in odd
+ * ways. Unless there is something clearly wrong with it the code should
+ * remain as-is as it provides us with a guarantee from HW that it is correct.
+ */
+
+static void calculate_ttu_cursor(
+ struct display_mode_lib *mode_lib,
+ double *refcyc_per_req_delivery_pre_cur,
+ double *refcyc_per_req_delivery_cur,
+ double refclk_freq_in_mhz,
+ double ref_freq_to_pix_freq,
+ double hscale_pixel_rate_l,
+ double hscl_ratio,
+ double vratio_pre_l,
+ double vratio_l,
+ unsigned int cur_width,
+ enum cursor_bpp cur_bpp);
+
+static unsigned int get_bytes_per_element(enum source_format_class source_format, bool is_chroma)
+{
+ unsigned int ret_val = 0;
+
+ if (source_format == dm_444_16) {
+ if (!is_chroma)
+ ret_val = 2;
+ } else if (source_format == dm_444_32) {
+ if (!is_chroma)
+ ret_val = 4;
+ } else if (source_format == dm_444_64) {
+ if (!is_chroma)
+ ret_val = 8;
+ } else if (source_format == dm_420_8) {
+ if (is_chroma)
+ ret_val = 2;
+ else
+ ret_val = 1;
+ } else if (source_format == dm_420_10) {
+ if (is_chroma)
+ ret_val = 4;
+ else
+ ret_val = 2;
+ } else if (source_format == dm_444_8) {
+ ret_val = 1;
+ }
+ return ret_val;
+}
+
+static bool is_dual_plane(enum source_format_class source_format)
+{
+ bool ret_val = false;
+
+ if ((source_format == dm_420_8) || (source_format == dm_420_10))
+ ret_val = true;
+
+ return ret_val;
+}
+
+static double get_refcyc_per_delivery(
+ struct display_mode_lib *mode_lib,
+ double refclk_freq_in_mhz,
+ double pclk_freq_in_mhz,
+ bool odm_combine,
+ unsigned int recout_width,
+ unsigned int hactive,
+ double vratio,
+ double hscale_pixel_rate,
+ unsigned int delivery_width,
+ unsigned int req_per_swath_ub)
+{
+ double refcyc_per_delivery = 0.0;
+
+ if (vratio <= 1.0) {
+ if (odm_combine)
+ refcyc_per_delivery = (double) refclk_freq_in_mhz
+ * dml_min((double) recout_width, (double) hactive / 2.0)
+ / pclk_freq_in_mhz / (double) req_per_swath_ub;
+ else
+ refcyc_per_delivery = (double) refclk_freq_in_mhz * (double) recout_width
+ / pclk_freq_in_mhz / (double) req_per_swath_ub;
+ } else {
+ refcyc_per_delivery = (double) refclk_freq_in_mhz * (double) delivery_width
+ / (double) hscale_pixel_rate / (double) req_per_swath_ub;
+ }
+
+ dml_print("DML_DLG: %s: refclk_freq_in_mhz = %3.2f\n", __func__, refclk_freq_in_mhz);
+ dml_print("DML_DLG: %s: pclk_freq_in_mhz = %3.2f\n", __func__, pclk_freq_in_mhz);
+ dml_print("DML_DLG: %s: recout_width = %d\n", __func__, recout_width);
+ dml_print("DML_DLG: %s: vratio = %3.2f\n", __func__, vratio);
+ dml_print("DML_DLG: %s: req_per_swath_ub = %d\n", __func__, req_per_swath_ub);
+ dml_print("DML_DLG: %s: refcyc_per_delivery= %3.2f\n", __func__, refcyc_per_delivery);
+
+ return refcyc_per_delivery;
+
+}
+
+static unsigned int get_blk_size_bytes(const enum source_macro_tile_size tile_size)
+{
+ if (tile_size == dm_256k_tile)
+ return (256 * 1024);
+ else if (tile_size == dm_64k_tile)
+ return (64 * 1024);
+ else
+ return (4 * 1024);
+}
+
+static void extract_rq_sizing_regs(
+ struct display_mode_lib *mode_lib,
+ display_data_rq_regs_st *rq_regs,
+ const display_data_rq_sizing_params_st rq_sizing)
+{
+ dml_print("DML_DLG: %s: rq_sizing param\n", __func__);
+ print__data_rq_sizing_params_st(mode_lib, rq_sizing);
+
+ rq_regs->chunk_size = dml_log2(rq_sizing.chunk_bytes) - 10;
+
+ if (rq_sizing.min_chunk_bytes == 0)
+ rq_regs->min_chunk_size = 0;
+ else
+ rq_regs->min_chunk_size = dml_log2(rq_sizing.min_chunk_bytes) - 8 + 1;
+
+ rq_regs->meta_chunk_size = dml_log2(rq_sizing.meta_chunk_bytes) - 10;
+ if (rq_sizing.min_meta_chunk_bytes == 0)
+ rq_regs->min_meta_chunk_size = 0;
+ else
+ rq_regs->min_meta_chunk_size = dml_log2(rq_sizing.min_meta_chunk_bytes) - 6 + 1;
+
+ rq_regs->dpte_group_size = dml_log2(rq_sizing.dpte_group_bytes) - 6;
+ rq_regs->mpte_group_size = dml_log2(rq_sizing.mpte_group_bytes) - 6;
+}
+
+static void extract_rq_regs(
+ struct display_mode_lib *mode_lib,
+ display_rq_regs_st *rq_regs,
+ const display_rq_params_st rq_param)
+{
+ unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024;
+ unsigned int detile_buf_plane1_addr = 0;
+
+ extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), rq_param.sizing.rq_l);
+
+ rq_regs->rq_regs_l.pte_row_height_linear = dml_floor(
+ dml_log2(rq_param.dlg.rq_l.dpte_row_height),
+ 1) - 3;
+
+ if (rq_param.yuv420) {
+ extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), rq_param.sizing.rq_c);
+ rq_regs->rq_regs_c.pte_row_height_linear = dml_floor(
+ dml_log2(rq_param.dlg.rq_c.dpte_row_height),
+ 1) - 3;
+ }
+
+ rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height);
+ rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height);
+
+ // FIXME: take the max between luma, chroma chunk size?
+ // okay for now, as we are setting chunk_bytes to 8kb anyways
+ if (rq_param.sizing.rq_l.chunk_bytes >= 32 * 1024) { //32kb
+ rq_regs->drq_expansion_mode = 0;
+ } else {
+ rq_regs->drq_expansion_mode = 2;
+ }
+ rq_regs->prq_expansion_mode = 1;
+ rq_regs->mrq_expansion_mode = 1;
+ rq_regs->crq_expansion_mode = 1;
+
+ if (rq_param.yuv420) {
+ if ((double) rq_param.misc.rq_l.stored_swath_bytes
+ / (double) rq_param.misc.rq_c.stored_swath_bytes <= 1.5) {
+ detile_buf_plane1_addr = (detile_buf_size_in_bytes / 2.0 / 64.0); // half to chroma
+ } else {
+ detile_buf_plane1_addr = dml_round_to_multiple(
+ (unsigned int) ((2.0 * detile_buf_size_in_bytes) / 3.0),
+ 256,
+ 0) / 64.0; // 2/3 to chroma
+ }
+ }
+ rq_regs->plane1_base_address = detile_buf_plane1_addr;
+}
+
+static void handle_det_buf_split(
+ struct display_mode_lib *mode_lib,
+ display_rq_params_st *rq_param,
+ const display_pipe_source_params_st pipe_src_param)
+{
+ unsigned int total_swath_bytes = 0;
+ unsigned int swath_bytes_l = 0;
+ unsigned int swath_bytes_c = 0;
+ unsigned int full_swath_bytes_packed_l = 0;
+ unsigned int full_swath_bytes_packed_c = 0;
+ bool req128_l = false;
+ bool req128_c = false;
+ bool surf_linear = (pipe_src_param.sw_mode == dm_sw_linear);
+ bool surf_vert = (pipe_src_param.source_scan == dm_vert);
+ unsigned int log2_swath_height_l = 0;
+ unsigned int log2_swath_height_c = 0;
+ unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024;
+
+ full_swath_bytes_packed_l = rq_param->misc.rq_l.full_swath_bytes;
+ full_swath_bytes_packed_c = rq_param->misc.rq_c.full_swath_bytes;
+
+ if (rq_param->yuv420_10bpc) {
+ full_swath_bytes_packed_l = dml_round_to_multiple(
+ rq_param->misc.rq_l.full_swath_bytes * 2 / 3,
+ 256,
+ 1) + 256;
+ full_swath_bytes_packed_c = dml_round_to_multiple(
+ rq_param->misc.rq_c.full_swath_bytes * 2 / 3,
+ 256,
+ 1) + 256;
+ }
+
+ if (rq_param->yuv420) {
+ total_swath_bytes = 2 * full_swath_bytes_packed_l + 2 * full_swath_bytes_packed_c;
+
+ if (total_swath_bytes <= detile_buf_size_in_bytes) { //full 256b request
+ req128_l = false;
+ req128_c = false;
+ swath_bytes_l = full_swath_bytes_packed_l;
+ swath_bytes_c = full_swath_bytes_packed_c;
+ } else { //128b request (for luma only for yuv420 8bpc)
+ req128_l = true;
+ req128_c = false;
+ swath_bytes_l = full_swath_bytes_packed_l / 2;
+ swath_bytes_c = full_swath_bytes_packed_c;
+ }
+ // Note: assumption, the config that pass in will fit into
+ // the detiled buffer.
+ } else {
+ total_swath_bytes = 2 * full_swath_bytes_packed_l;
+
+ if (total_swath_bytes <= detile_buf_size_in_bytes)
+ req128_l = false;
+ else
+ req128_l = true;
+
+ swath_bytes_l = total_swath_bytes;
+ swath_bytes_c = 0;
+ }
+ rq_param->misc.rq_l.stored_swath_bytes = swath_bytes_l;
+ rq_param->misc.rq_c.stored_swath_bytes = swath_bytes_c;
+
+ if (surf_linear) {
+ log2_swath_height_l = 0;
+ log2_swath_height_c = 0;
+ } else if (!surf_vert) {
+ log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_height) - req128_l;
+ log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_height) - req128_c;
+ } else {
+ log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_width) - req128_l;
+ log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_width) - req128_c;
+ }
+ rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
+ rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
+
+ dml_print("DML_DLG: %s: req128_l = %0d\n", __func__, req128_l);
+ dml_print("DML_DLG: %s: req128_c = %0d\n", __func__, req128_c);
+ dml_print(
+ "DML_DLG: %s: full_swath_bytes_packed_l = %0d\n",
+ __func__,
+ full_swath_bytes_packed_l);
+ dml_print(
+ "DML_DLG: %s: full_swath_bytes_packed_c = %0d\n",
+ __func__,
+ full_swath_bytes_packed_c);
+}
+
+static void get_meta_and_pte_attr(
+ struct display_mode_lib *mode_lib,
+ display_data_rq_dlg_params_st *rq_dlg_param,
+ display_data_rq_misc_params_st *rq_misc_param,
+ display_data_rq_sizing_params_st *rq_sizing_param,
+ unsigned int vp_width,
+ unsigned int vp_height,
+ unsigned int data_pitch,
+ unsigned int meta_pitch,
+ unsigned int source_format,
+ unsigned int tiling,
+ unsigned int macro_tile_size,
+ unsigned int source_scan,
+ unsigned int hostvm_enable,
+ unsigned int is_chroma)
+{
+ bool surf_linear = (tiling == dm_sw_linear);
+ bool surf_vert = (source_scan == dm_vert);
+
+ unsigned int bytes_per_element;
+ unsigned int bytes_per_element_y = get_bytes_per_element(
+ (enum source_format_class) (source_format),
+ false);
+ unsigned int bytes_per_element_c = get_bytes_per_element(
+ (enum source_format_class) (source_format),
+ true);
+
+ unsigned int blk256_width = 0;
+ unsigned int blk256_height = 0;
+
+ unsigned int blk256_width_y = 0;
+ unsigned int blk256_height_y = 0;
+ unsigned int blk256_width_c = 0;
+ unsigned int blk256_height_c = 0;
+ unsigned int log2_bytes_per_element;
+ unsigned int log2_blk256_width;
+ unsigned int log2_blk256_height;
+ unsigned int blk_bytes;
+ unsigned int log2_blk_bytes;
+ unsigned int log2_blk_height;
+ unsigned int log2_blk_width;
+ unsigned int log2_meta_req_bytes;
+ unsigned int log2_meta_req_height;
+ unsigned int log2_meta_req_width;
+ unsigned int meta_req_width;
+ unsigned int meta_req_height;
+ unsigned int log2_meta_row_height;
+ unsigned int meta_row_width_ub;
+ unsigned int log2_meta_chunk_bytes;
+ unsigned int log2_meta_chunk_height;
+
+ //full sized meta chunk width in unit of data elements
+ unsigned int log2_meta_chunk_width;
+ unsigned int log2_min_meta_chunk_bytes;
+ unsigned int min_meta_chunk_width;
+ unsigned int meta_chunk_width;
+ unsigned int meta_chunk_per_row_int;
+ unsigned int meta_row_remainder;
+ unsigned int meta_chunk_threshold;
+ unsigned int meta_blk_bytes;
+ unsigned int meta_blk_height;
+ unsigned int meta_blk_width;
+ unsigned int meta_surface_bytes;
+ unsigned int vmpg_bytes;
+ unsigned int meta_pte_req_per_frame_ub;
+ unsigned int meta_pte_bytes_per_frame_ub;
+ const unsigned int log2_vmpg_bytes = dml_log2(mode_lib->soc.vmm_page_size_bytes);
+ const unsigned int dpte_buf_in_pte_reqs =
+ mode_lib->ip.dpte_buffer_size_in_pte_reqs_luma + mode_lib->ip.dpte_buffer_size_in_pte_reqs_chroma;
+ const unsigned int pde_proc_buffer_size_64k_reqs =
+ mode_lib->ip.pde_proc_buffer_size_64k_reqs;
+
+ unsigned int log2_vmpg_height = 0;
+ unsigned int log2_vmpg_width = 0;
+ unsigned int log2_dpte_req_height_ptes = 0;
+ unsigned int log2_dpte_req_height = 0;
+ unsigned int log2_dpte_req_width = 0;
+ unsigned int log2_dpte_row_height_linear = 0;
+ unsigned int log2_dpte_row_height = 0;
+ unsigned int log2_dpte_group_width = 0;
+ unsigned int dpte_row_width_ub = 0;
+ unsigned int dpte_req_height = 0;
+ unsigned int dpte_req_width = 0;
+ unsigned int dpte_group_width = 0;
+ unsigned int log2_dpte_group_bytes = 0;
+ unsigned int log2_dpte_group_length = 0;
+ unsigned int pde_buf_entries;
+ bool yuv420 = (source_format == dm_420_8 || source_format == dm_420_10);
+
+ Calculate256BBlockSizes(
+ (enum source_format_class) (source_format),
+ (enum dm_swizzle_mode) (tiling),
+ bytes_per_element_y,
+ bytes_per_element_c,
+ &blk256_height_y,
+ &blk256_height_c,
+ &blk256_width_y,
+ &blk256_width_c);
+
+ if (!is_chroma) {
+ blk256_width = blk256_width_y;
+ blk256_height = blk256_height_y;
+ bytes_per_element = bytes_per_element_y;
+ } else {
+ blk256_width = blk256_width_c;
+ blk256_height = blk256_height_c;
+ bytes_per_element = bytes_per_element_c;
+ }
+
+ log2_bytes_per_element = dml_log2(bytes_per_element);
+
+ dml_print("DML_DLG: %s: surf_linear = %d\n", __func__, surf_linear);
+ dml_print("DML_DLG: %s: surf_vert = %d\n", __func__, surf_vert);
+ dml_print("DML_DLG: %s: blk256_width = %d\n", __func__, blk256_width);
+ dml_print("DML_DLG: %s: blk256_height = %d\n", __func__, blk256_height);
+
+ log2_blk256_width = dml_log2((double) blk256_width);
+ log2_blk256_height = dml_log2((double) blk256_height);
+ blk_bytes = surf_linear ?
+ 256 : get_blk_size_bytes((enum source_macro_tile_size) macro_tile_size);
+ log2_blk_bytes = dml_log2((double) blk_bytes);
+ log2_blk_height = 0;
+ log2_blk_width = 0;
+
+ // remember log rule
+ // "+" in log is multiply
+ // "-" in log is divide
+ // "/2" is like square root
+ // blk is vertical biased
+ if (tiling != dm_sw_linear)
+ log2_blk_height = log2_blk256_height
+ + dml_ceil((double) (log2_blk_bytes - 8) / 2.0, 1);
+ else
+ log2_blk_height = 0; // blk height of 1
+
+ log2_blk_width = log2_blk_bytes - log2_bytes_per_element - log2_blk_height;
+
+ if (!surf_vert) {
+ rq_dlg_param->swath_width_ub = dml_round_to_multiple(vp_width - 1, blk256_width, 1)
+ + blk256_width;
+ rq_dlg_param->req_per_swath_ub = rq_dlg_param->swath_width_ub >> log2_blk256_width;
+ } else {
+ rq_dlg_param->swath_width_ub = dml_round_to_multiple(
+ vp_height - 1,
+ blk256_height,
+ 1) + blk256_height;
+ rq_dlg_param->req_per_swath_ub = rq_dlg_param->swath_width_ub >> log2_blk256_height;
+ }
+
+ if (!surf_vert)
+ rq_misc_param->full_swath_bytes = rq_dlg_param->swath_width_ub * blk256_height
+ * bytes_per_element;
+ else
+ rq_misc_param->full_swath_bytes = rq_dlg_param->swath_width_ub * blk256_width
+ * bytes_per_element;
+
+ rq_misc_param->blk256_height = blk256_height;
+ rq_misc_param->blk256_width = blk256_width;
+
+ // -------
+ // meta
+ // -------
+ log2_meta_req_bytes = 6; // meta request is 64b and is 8x8byte meta element
+
+ // each 64b meta request for dcn is 8x8 meta elements and
+ // a meta element covers one 256b block of the the data surface.
+ log2_meta_req_height = log2_blk256_height + 3; // meta req is 8x8 byte, each byte represent 1 blk256
+ log2_meta_req_width = log2_meta_req_bytes + 8 - log2_bytes_per_element
+ - log2_meta_req_height;
+ meta_req_width = 1 << log2_meta_req_width;
+ meta_req_height = 1 << log2_meta_req_height;
+ log2_meta_row_height = 0;
+ meta_row_width_ub = 0;
+
+ // the dimensions of a meta row are meta_row_width x meta_row_height in elements.
+ // calculate upper bound of the meta_row_width
+ if (!surf_vert) {
+ log2_meta_row_height = log2_meta_req_height;
+ meta_row_width_ub = dml_round_to_multiple(vp_width - 1, meta_req_width, 1)
+ + meta_req_width;
+ rq_dlg_param->meta_req_per_row_ub = meta_row_width_ub / meta_req_width;
+ } else {
+ log2_meta_row_height = log2_meta_req_width;
+ meta_row_width_ub = dml_round_to_multiple(vp_height - 1, meta_req_height, 1)
+ + meta_req_height;
+ rq_dlg_param->meta_req_per_row_ub = meta_row_width_ub / meta_req_height;
+ }
+ rq_dlg_param->meta_bytes_per_row_ub = rq_dlg_param->meta_req_per_row_ub * 64;
+
+ rq_dlg_param->meta_row_height = 1 << log2_meta_row_height;
+
+ log2_meta_chunk_bytes = dml_log2(rq_sizing_param->meta_chunk_bytes);
+ log2_meta_chunk_height = log2_meta_row_height;
+
+ //full sized meta chunk width in unit of data elements
+ log2_meta_chunk_width = log2_meta_chunk_bytes + 8 - log2_bytes_per_element
+ - log2_meta_chunk_height;
+ log2_min_meta_chunk_bytes = dml_log2(rq_sizing_param->min_meta_chunk_bytes);
+ min_meta_chunk_width = 1
+ << (log2_min_meta_chunk_bytes + 8 - log2_bytes_per_element
+ - log2_meta_chunk_height);
+ meta_chunk_width = 1 << log2_meta_chunk_width;
+ meta_chunk_per_row_int = (unsigned int) (meta_row_width_ub / meta_chunk_width);
+ meta_row_remainder = meta_row_width_ub % meta_chunk_width;
+ meta_chunk_threshold = 0;
+ meta_blk_bytes = 4096;
+ meta_blk_height = blk256_height * 64;
+ meta_blk_width = meta_blk_bytes * 256 / bytes_per_element / meta_blk_height;
+ meta_surface_bytes = meta_pitch
+ * (dml_round_to_multiple(vp_height - 1, meta_blk_height, 1)
+ + meta_blk_height) * bytes_per_element / 256;
+ vmpg_bytes = mode_lib->soc.vmm_page_size_bytes;
+ meta_pte_req_per_frame_ub = (dml_round_to_multiple(
+ meta_surface_bytes - vmpg_bytes,
+ 8 * vmpg_bytes,
+ 1) + 8 * vmpg_bytes) / (8 * vmpg_bytes);
+ meta_pte_bytes_per_frame_ub = meta_pte_req_per_frame_ub * 64; //64B mpte request
+ rq_dlg_param->meta_pte_bytes_per_frame_ub = meta_pte_bytes_per_frame_ub;
+
+ dml_print("DML_DLG: %s: meta_blk_height = %d\n", __func__, meta_blk_height);
+ dml_print("DML_DLG: %s: meta_blk_width = %d\n", __func__, meta_blk_width);
+ dml_print("DML_DLG: %s: meta_surface_bytes = %d\n", __func__, meta_surface_bytes);
+ dml_print(
+ "DML_DLG: %s: meta_pte_req_per_frame_ub = %d\n",
+ __func__,
+ meta_pte_req_per_frame_ub);
+ dml_print(
+ "DML_DLG: %s: meta_pte_bytes_per_frame_ub = %d\n",
+ __func__,
+ meta_pte_bytes_per_frame_ub);
+
+ if (!surf_vert)
+ meta_chunk_threshold = 2 * min_meta_chunk_width - meta_req_width;
+ else
+ meta_chunk_threshold = 2 * min_meta_chunk_width - meta_req_height;
+
+ if (meta_row_remainder <= meta_chunk_threshold)
+ rq_dlg_param->meta_chunks_per_row_ub = meta_chunk_per_row_int + 1;
+ else
+ rq_dlg_param->meta_chunks_per_row_ub = meta_chunk_per_row_int + 2;
+
+ // ------
+ // dpte
+ // ------
+ if (surf_linear) {
+ log2_vmpg_height = 0; // one line high
+ } else {
+ log2_vmpg_height = (log2_vmpg_bytes - 8) / 2 + log2_blk256_height;
+ }
+ log2_vmpg_width = log2_vmpg_bytes - log2_bytes_per_element - log2_vmpg_height;
+
+ // only 3 possible shapes for dpte request in dimensions of ptes: 8x1, 4x2, 2x4.
+ if (surf_linear) { //one 64B PTE request returns 8 PTEs
+ log2_dpte_req_height_ptes = 0;
+ log2_dpte_req_width = log2_vmpg_width + 3;
+ log2_dpte_req_height = 0;
+ } else if (log2_blk_bytes == 12) { //4KB tile means 4kB page size
+ //one 64B req gives 8x1 PTEs for 4KB tile
+ log2_dpte_req_height_ptes = 0;
+ log2_dpte_req_width = log2_blk_width + 3;
+ log2_dpte_req_height = log2_blk_height + 0;
+ } else if ((log2_blk_bytes >= 16) && (log2_vmpg_bytes == 12)) { // tile block >= 64KB
+ //two 64B reqs of 2x4 PTEs give 16 PTEs to cover 64KB
+ log2_dpte_req_height_ptes = 4;
+ log2_dpte_req_width = log2_blk256_width + 4; // log2_64KB_width
+ log2_dpte_req_height = log2_blk256_height + 4; // log2_64KB_height
+ } else { //64KB page size and must 64KB tile block
+ //one 64B req gives 8x1 PTEs for 64KB tile
+ log2_dpte_req_height_ptes = 0;
+ log2_dpte_req_width = log2_blk_width + 3;
+ log2_dpte_req_height = log2_blk_height + 0;
+ }
+
+ // The dpte request dimensions in data elements is dpte_req_width x dpte_req_height
+ // log2_vmpg_width is how much 1 pte represent, now calculating how much a 64b pte req represent
+ // That depends on the pte shape (i.e. 8x1, 4x2, 2x4)
+ //log2_dpte_req_height = log2_vmpg_height + log2_dpte_req_height_ptes;
+ //log2_dpte_req_width = log2_vmpg_width + log2_dpte_req_width_ptes;
+ dpte_req_height = 1 << log2_dpte_req_height;
+ dpte_req_width = 1 << log2_dpte_req_width;
+
+ // calculate pitch dpte row buffer can hold
+ // round the result down to a power of two.
+ pde_buf_entries =
+ yuv420 ? (pde_proc_buffer_size_64k_reqs >> 1) : pde_proc_buffer_size_64k_reqs;
+ if (surf_linear) {
+ unsigned int dpte_row_height;
+
+ log2_dpte_row_height_linear = dml_floor(
+ dml_log2(
+ dml_min(
+ 64 * 1024 * pde_buf_entries
+ / bytes_per_element,
+ dpte_buf_in_pte_reqs
+ * dpte_req_width)
+ / data_pitch),
+ 1);
+
+ ASSERT(log2_dpte_row_height_linear >= 3);
+
+ if (log2_dpte_row_height_linear > 7)
+ log2_dpte_row_height_linear = 7;
+
+ log2_dpte_row_height = log2_dpte_row_height_linear;
+ // For linear, the dpte row is pitch dependent and the pte requests wrap at the pitch boundary.
+ // the dpte_row_width_ub is the upper bound of data_pitch*dpte_row_height in elements with this unique buffering.
+ dpte_row_height = 1 << log2_dpte_row_height;
+ dpte_row_width_ub = dml_round_to_multiple(
+ data_pitch * dpte_row_height - 1,
+ dpte_req_width,
+ 1) + dpte_req_width;
+ rq_dlg_param->dpte_req_per_row_ub = dpte_row_width_ub / dpte_req_width;
+ } else {
+ // the upper bound of the dpte_row_width without dependency on viewport position follows.
+ // for tiled mode, row height is the same as req height and row store up to vp size upper bound
+ if (!surf_vert) {
+ log2_dpte_row_height = log2_dpte_req_height;
+ dpte_row_width_ub = dml_round_to_multiple(vp_width - 1, dpte_req_width, 1)
+ + dpte_req_width;
+ rq_dlg_param->dpte_req_per_row_ub = dpte_row_width_ub / dpte_req_width;
+ } else {
+ log2_dpte_row_height =
+ (log2_blk_width < log2_dpte_req_width) ?
+ log2_blk_width : log2_dpte_req_width;
+ dpte_row_width_ub = dml_round_to_multiple(vp_height - 1, dpte_req_height, 1)
+ + dpte_req_height;
+ rq_dlg_param->dpte_req_per_row_ub = dpte_row_width_ub / dpte_req_height;
+ }
+ }
+ if (log2_blk_bytes >= 16 && log2_vmpg_bytes == 12) // tile block >= 64KB
+ rq_dlg_param->dpte_bytes_per_row_ub = rq_dlg_param->dpte_req_per_row_ub * 128; //2*64B dpte request
+ else
+ rq_dlg_param->dpte_bytes_per_row_ub = rq_dlg_param->dpte_req_per_row_ub * 64; //64B dpte request
+
+ rq_dlg_param->dpte_row_height = 1 << log2_dpte_row_height;
+
+ // the dpte_group_bytes is reduced for the specific case of vertical
+ // access of a tile surface that has dpte request of 8x1 ptes.
+
+ if (hostvm_enable)
+ rq_sizing_param->dpte_group_bytes = 512;
+ else {
+ if (!surf_linear & (log2_dpte_req_height_ptes == 0) & surf_vert) //reduced, in this case, will have page fault within a group
+ rq_sizing_param->dpte_group_bytes = 512;
+ else
+ //full size
+ rq_sizing_param->dpte_group_bytes = 2048;
+ }
+
+ //since pte request size is 64byte, the number of data pte requests per full sized group is as follows.
+ log2_dpte_group_bytes = dml_log2(rq_sizing_param->dpte_group_bytes);
+ log2_dpte_group_length = log2_dpte_group_bytes - 6; //length in 64b requests
+
+ // full sized data pte group width in elements
+ if (!surf_vert)
+ log2_dpte_group_width = log2_dpte_group_length + log2_dpte_req_width;
+ else
+ log2_dpte_group_width = log2_dpte_group_length + log2_dpte_req_height;
+
+ //But if the tile block >=64KB and the page size is 4KB, then each dPTE request is 2*64B
+ if ((log2_blk_bytes >= 16) && (log2_vmpg_bytes == 12)) // tile block >= 64KB
+ log2_dpte_group_width = log2_dpte_group_width - 1;
+
+ dpte_group_width = 1 << log2_dpte_group_width;
+
+ // since dpte groups are only aligned to dpte_req_width and not dpte_group_width,
+ // the upper bound for the dpte groups per row is as follows.
+ rq_dlg_param->dpte_groups_per_row_ub = dml_ceil(
+ (double) dpte_row_width_ub / dpte_group_width,
+ 1);
+}
+
+static void get_surf_rq_param(
+ struct display_mode_lib *mode_lib,
+ display_data_rq_sizing_params_st *rq_sizing_param,
+ display_data_rq_dlg_params_st *rq_dlg_param,
+ display_data_rq_misc_params_st *rq_misc_param,
+ const display_pipe_params_st pipe_param,
+ bool is_chroma)
+{
+ bool mode_422 = false;
+ unsigned int vp_width = 0;
+ unsigned int vp_height = 0;
+ unsigned int data_pitch = 0;
+ unsigned int meta_pitch = 0;
+ unsigned int ppe = mode_422 ? 2 : 1;
+
+ // FIXME check if ppe apply for both luma and chroma in 422 case
+ if (is_chroma) {
+ vp_width = pipe_param.src.viewport_width_c / ppe;
+ vp_height = pipe_param.src.viewport_height_c;
+ data_pitch = pipe_param.src.data_pitch_c;
+ meta_pitch = pipe_param.src.meta_pitch_c;
+ } else {
+ vp_width = pipe_param.src.viewport_width / ppe;
+ vp_height = pipe_param.src.viewport_height;
+ data_pitch = pipe_param.src.data_pitch;
+ meta_pitch = pipe_param.src.meta_pitch;
+ }
+
+ if (pipe_param.dest.odm_combine) {
+ unsigned int access_dir;
+ unsigned int full_src_vp_width;
+ unsigned int hactive_half;
+ unsigned int src_hactive_half;
+ access_dir = (pipe_param.src.source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
+ hactive_half = pipe_param.dest.hactive / 2;
+ if (is_chroma) {
+ full_src_vp_width = pipe_param.scale_ratio_depth.hscl_ratio_c * pipe_param.dest.full_recout_width;
+ src_hactive_half = pipe_param.scale_ratio_depth.hscl_ratio_c * hactive_half;
+ } else {
+ full_src_vp_width = pipe_param.scale_ratio_depth.hscl_ratio * pipe_param.dest.full_recout_width;
+ src_hactive_half = pipe_param.scale_ratio_depth.hscl_ratio * hactive_half;
+ }
+
+ if (access_dir == 0) {
+ vp_width = dml_min(full_src_vp_width, src_hactive_half);
+ dml_print("DML_DLG: %s: vp_width = %d\n", __func__, vp_width);
+ } else {
+ vp_height = dml_min(full_src_vp_width, src_hactive_half);
+ dml_print("DML_DLG: %s: vp_height = %d\n", __func__, vp_height);
+
+ }
+ dml_print("DML_DLG: %s: full_src_vp_width = %d\n", __func__, full_src_vp_width);
+ dml_print("DML_DLG: %s: hactive_half = %d\n", __func__, hactive_half);
+ dml_print("DML_DLG: %s: src_hactive_half = %d\n", __func__, src_hactive_half);
+ }
+ rq_sizing_param->chunk_bytes = 8192;
+
+ if (rq_sizing_param->chunk_bytes == 64 * 1024)
+ rq_sizing_param->min_chunk_bytes = 0;
+ else
+ rq_sizing_param->min_chunk_bytes = 1024;
+
+ rq_sizing_param->meta_chunk_bytes = 2048;
+ rq_sizing_param->min_meta_chunk_bytes = 256;
+
+ if (pipe_param.src.hostvm)
+ rq_sizing_param->mpte_group_bytes = 512;
+ else
+ rq_sizing_param->mpte_group_bytes = 2048;
+
+ get_meta_and_pte_attr(
+ mode_lib,
+ rq_dlg_param,
+ rq_misc_param,
+ rq_sizing_param,
+ vp_width,
+ vp_height,
+ data_pitch,
+ meta_pitch,
+ pipe_param.src.source_format,
+ pipe_param.src.sw_mode,
+ pipe_param.src.macro_tile_size,
+ pipe_param.src.source_scan,
+ pipe_param.src.hostvm,
+ is_chroma);
+}
+
+static void dml_rq_dlg_get_rq_params(
+ struct display_mode_lib *mode_lib,
+ display_rq_params_st *rq_param,
+ const display_pipe_params_st pipe_param)
+{
+ // get param for luma surface
+ rq_param->yuv420 = pipe_param.src.source_format == dm_420_8
+ || pipe_param.src.source_format == dm_420_10;
+ rq_param->yuv420_10bpc = pipe_param.src.source_format == dm_420_10;
+
+ get_surf_rq_param(
+ mode_lib,
+ &(rq_param->sizing.rq_l),
+ &(rq_param->dlg.rq_l),
+ &(rq_param->misc.rq_l),
+ pipe_param,
+ 0);
+
+ if (is_dual_plane((enum source_format_class) (pipe_param.src.source_format))) {
+ // get param for chroma surface
+ get_surf_rq_param(
+ mode_lib,
+ &(rq_param->sizing.rq_c),
+ &(rq_param->dlg.rq_c),
+ &(rq_param->misc.rq_c),
+ pipe_param,
+ 1);
+ }
+
+ // calculate how to split the det buffer space between luma and chroma
+ handle_det_buf_split(mode_lib, rq_param, pipe_param.src);
+ print__rq_params_st(mode_lib, *rq_param);
+}
+
+void dml21_rq_dlg_get_rq_reg(
+ struct display_mode_lib *mode_lib,
+ display_rq_regs_st *rq_regs,
+ const display_pipe_params_st pipe_param)
+{
+ display_rq_params_st rq_param = {0};
+
+ memset(rq_regs, 0, sizeof(*rq_regs));
+ dml_rq_dlg_get_rq_params(mode_lib, &rq_param, pipe_param);
+ extract_rq_regs(mode_lib, rq_regs, rq_param);
+
+ print__rq_regs_st(mode_lib, *rq_regs);
+}
+
+// Note: currently taken in as is.
+// Nice to decouple code from hw register implement and extract code that are repeated for luma and chroma.
+static void dml_rq_dlg_get_dlg_params(
+ struct display_mode_lib *mode_lib,
+ const display_e2e_pipe_params_st *e2e_pipe_param,
+ const unsigned int num_pipes,
+ const unsigned int pipe_idx,
+ display_dlg_regs_st *disp_dlg_regs,
+ display_ttu_regs_st *disp_ttu_regs,
+ const display_rq_dlg_params_st rq_dlg_param,
+ const display_dlg_sys_params_st dlg_sys_param,
+ const bool cstate_en,
+ const bool pstate_en)
+{
+ const display_pipe_source_params_st *src = &e2e_pipe_param[pipe_idx].pipe.src;
+ const display_pipe_dest_params_st *dst = &e2e_pipe_param[pipe_idx].pipe.dest;
+ const display_output_params_st *dout = &e2e_pipe_param[pipe_idx].dout;
+ const display_clocks_and_cfg_st *clks = &e2e_pipe_param[pipe_idx].clks_cfg;
+ const scaler_ratio_depth_st *scl = &e2e_pipe_param[pipe_idx].pipe.scale_ratio_depth;
+ const scaler_taps_st *taps = &e2e_pipe_param[pipe_idx].pipe.scale_taps;
+
+ // -------------------------
+ // Section 1.15.2.1: OTG dependent Params
+ // -------------------------
+ // Timing
+ unsigned int htotal = dst->htotal;
+ // unsigned int hblank_start = dst.hblank_start; // TODO: Remove
+ unsigned int hblank_end = dst->hblank_end;
+ unsigned int vblank_start = dst->vblank_start;
+ unsigned int vblank_end = dst->vblank_end;
+ unsigned int min_vblank = mode_lib->ip.min_vblank_lines;
+
+ double dppclk_freq_in_mhz = clks->dppclk_mhz;
+ double dispclk_freq_in_mhz = clks->dispclk_mhz;
+ double refclk_freq_in_mhz = clks->refclk_mhz;
+ double pclk_freq_in_mhz = dst->pixel_rate_mhz;
+ bool interlaced = dst->interlaced;
+
+ double ref_freq_to_pix_freq = refclk_freq_in_mhz / pclk_freq_in_mhz;
+
+ double min_dcfclk_mhz;
+ double t_calc_us;
+ double min_ttu_vblank;
+
+ double min_dst_y_ttu_vblank;
+ unsigned int dlg_vblank_start;
+ bool dual_plane;
+ bool mode_422;
+ unsigned int access_dir;
+ unsigned int vp_height_l;
+ unsigned int vp_width_l;
+ unsigned int vp_height_c;
+ unsigned int vp_width_c;
+
+ // Scaling
+ unsigned int htaps_l;
+ unsigned int htaps_c;
+ double hratio_l;
+ double hratio_c;
+ double vratio_l;
+ double vratio_c;
+ bool scl_enable;
+
+ double line_time_in_us;
+ // double vinit_l;
+ // double vinit_c;
+ // double vinit_bot_l;
+ // double vinit_bot_c;
+
+ // unsigned int swath_height_l;
+ unsigned int swath_width_ub_l;
+ // unsigned int dpte_bytes_per_row_ub_l;
+ unsigned int dpte_groups_per_row_ub_l;
+ // unsigned int meta_pte_bytes_per_frame_ub_l;
+ // unsigned int meta_bytes_per_row_ub_l;
+
+ // unsigned int swath_height_c;
+ unsigned int swath_width_ub_c;
+ // unsigned int dpte_bytes_per_row_ub_c;
+ unsigned int dpte_groups_per_row_ub_c;
+
+ unsigned int meta_chunks_per_row_ub_l;
+ unsigned int meta_chunks_per_row_ub_c;
+ unsigned int vupdate_offset;
+ unsigned int vupdate_width;
+ unsigned int vready_offset;
+
+ unsigned int dppclk_delay_subtotal;
+ unsigned int dispclk_delay_subtotal;
+ unsigned int pixel_rate_delay_subtotal;
+
+ unsigned int vstartup_start;
+ unsigned int dst_x_after_scaler;
+ unsigned int dst_y_after_scaler;
+ double line_wait;
+ double dst_y_prefetch;
+ double dst_y_per_vm_vblank;
+ double dst_y_per_row_vblank;
+ double dst_y_per_vm_flip;
+ double dst_y_per_row_flip;
+ double max_dst_y_per_vm_vblank;
+ double max_dst_y_per_row_vblank;
+ double lsw;
+ double vratio_pre_l;
+ double vratio_pre_c;
+ unsigned int req_per_swath_ub_l;
+ unsigned int req_per_swath_ub_c;
+ unsigned int meta_row_height_l;
+ unsigned int meta_row_height_c;
+ unsigned int swath_width_pixels_ub_l;
+ unsigned int swath_width_pixels_ub_c;
+ unsigned int scaler_rec_in_width_l;
+ unsigned int scaler_rec_in_width_c;
+ unsigned int dpte_row_height_l;
+ unsigned int dpte_row_height_c;
+ double hscale_pixel_rate_l;
+ double hscale_pixel_rate_c;
+ double min_hratio_fact_l;
+ double min_hratio_fact_c;
+ double refcyc_per_line_delivery_pre_l;
+ double refcyc_per_line_delivery_pre_c;
+ double refcyc_per_line_delivery_l;
+ double refcyc_per_line_delivery_c;
+
+ double refcyc_per_req_delivery_pre_l;
+ double refcyc_per_req_delivery_pre_c;
+ double refcyc_per_req_delivery_l;
+ double refcyc_per_req_delivery_c;
+
+ unsigned int full_recout_width;
+ double xfc_transfer_delay;
+ double xfc_precharge_delay;
+ double xfc_remote_surface_flip_latency;
+ double xfc_dst_y_delta_drq_limit;
+ double xfc_prefetch_margin;
+ double refcyc_per_req_delivery_pre_cur0;
+ double refcyc_per_req_delivery_cur0;
+ double refcyc_per_req_delivery_pre_cur1;
+ double refcyc_per_req_delivery_cur1;
+
+ memset(disp_dlg_regs, 0, sizeof(*disp_dlg_regs));
+ memset(disp_ttu_regs, 0, sizeof(*disp_ttu_regs));
+
+ dml_print("DML_DLG: %s: cstate_en = %d\n", __func__, cstate_en);
+ dml_print("DML_DLG: %s: pstate_en = %d\n", __func__, pstate_en);
+
+ dml_print("DML_DLG: %s: dppclk_freq_in_mhz = %3.2f\n", __func__, dppclk_freq_in_mhz);
+ dml_print("DML_DLG: %s: dispclk_freq_in_mhz = %3.2f\n", __func__, dispclk_freq_in_mhz);
+ dml_print("DML_DLG: %s: refclk_freq_in_mhz = %3.2f\n", __func__, refclk_freq_in_mhz);
+ dml_print("DML_DLG: %s: pclk_freq_in_mhz = %3.2f\n", __func__, pclk_freq_in_mhz);
+ dml_print("DML_DLG: %s: interlaced = %d\n", __func__, interlaced);
+ ASSERT(ref_freq_to_pix_freq < 4.0);
+
+ disp_dlg_regs->ref_freq_to_pix_freq =
+ (unsigned int) (ref_freq_to_pix_freq * dml_pow(2, 19));
+ disp_dlg_regs->refcyc_per_htotal = (unsigned int) (ref_freq_to_pix_freq * (double) htotal
+ * dml_pow(2, 8));
+ disp_dlg_regs->dlg_vblank_end = interlaced ? (vblank_end / 2) : vblank_end; // 15 bits
+ disp_dlg_regs->refcyc_h_blank_end = (unsigned int) ((double) hblank_end
+ * (double) ref_freq_to_pix_freq);
+ ASSERT(disp_dlg_regs->refcyc_h_blank_end < (unsigned int)dml_pow(2, 13));
+
+ min_dcfclk_mhz = dlg_sys_param.deepsleep_dcfclk_mhz;
+ t_calc_us = get_tcalc(mode_lib, e2e_pipe_param, num_pipes);
+ min_ttu_vblank = get_min_ttu_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
+
+ min_dst_y_ttu_vblank = min_ttu_vblank * pclk_freq_in_mhz / (double) htotal;
+ dlg_vblank_start = interlaced ? (vblank_start / 2) : vblank_start;
+
+ disp_dlg_regs->min_dst_y_next_start = (unsigned int) (((double) dlg_vblank_start) * dml_pow(2, 2));
+ ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int)dml_pow(2, 18));
+
+ dml_print(
+ "DML_DLG: %s: min_dcfclk_mhz = %3.2f\n",
+ __func__,
+ min_dcfclk_mhz);
+ dml_print(
+ "DML_DLG: %s: min_ttu_vblank = %3.2f\n",
+ __func__,
+ min_ttu_vblank);
+ dml_print(
+ "DML_DLG: %s: min_dst_y_ttu_vblank = %3.2f\n",
+ __func__,
+ min_dst_y_ttu_vblank);
+ dml_print(
+ "DML_DLG: %s: t_calc_us = %3.2f\n",
+ __func__,
+ t_calc_us);
+ dml_print(
+ "DML_DLG: %s: disp_dlg_regs->min_dst_y_next_start = 0x%0x\n",
+ __func__,
+ disp_dlg_regs->min_dst_y_next_start);
+ dml_print(
+ "DML_DLG: %s: ref_freq_to_pix_freq = %3.2f\n",
+ __func__,
+ ref_freq_to_pix_freq);
+
+ // -------------------------
+ // Section 1.15.2.2: Prefetch, Active and TTU
+ // -------------------------
+ // Prefetch Calc
+ // Source
+ // dcc_en = src.dcc;
+ dual_plane = is_dual_plane((enum source_format_class) (src->source_format));
+ mode_422 = false; // FIXME
+ access_dir = (src->source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
+ // bytes_per_element_l = get_bytes_per_element(source_format_class(src.source_format), 0);
+ // bytes_per_element_c = get_bytes_per_element(source_format_class(src.source_format), 1);
+ vp_height_l = src->viewport_height;
+ vp_width_l = src->viewport_width;
+ vp_height_c = src->viewport_height_c;
+ vp_width_c = src->viewport_width_c;
+
+ // Scaling
+ htaps_l = taps->htaps;
+ htaps_c = taps->htaps_c;
+ hratio_l = scl->hscl_ratio;
+ hratio_c = scl->hscl_ratio_c;
+ vratio_l = scl->vscl_ratio;
+ vratio_c = scl->vscl_ratio_c;
+ scl_enable = scl->scl_enable;
+
+ line_time_in_us = (htotal / pclk_freq_in_mhz);
+ swath_width_ub_l = rq_dlg_param.rq_l.swath_width_ub;
+ dpte_groups_per_row_ub_l = rq_dlg_param.rq_l.dpte_groups_per_row_ub;
+ swath_width_ub_c = rq_dlg_param.rq_c.swath_width_ub;
+ dpte_groups_per_row_ub_c = rq_dlg_param.rq_c.dpte_groups_per_row_ub;
+
+ meta_chunks_per_row_ub_l = rq_dlg_param.rq_l.meta_chunks_per_row_ub;
+ meta_chunks_per_row_ub_c = rq_dlg_param.rq_c.meta_chunks_per_row_ub;
+ vupdate_offset = dst->vupdate_offset;
+ vupdate_width = dst->vupdate_width;
+ vready_offset = dst->vready_offset;
+
+ dppclk_delay_subtotal = mode_lib->ip.dppclk_delay_subtotal;
+ dispclk_delay_subtotal = mode_lib->ip.dispclk_delay_subtotal;
+
+ if (scl_enable)
+ dppclk_delay_subtotal += mode_lib->ip.dppclk_delay_scl;
+ else
+ dppclk_delay_subtotal += mode_lib->ip.dppclk_delay_scl_lb_only;
+
+ dppclk_delay_subtotal += mode_lib->ip.dppclk_delay_cnvc_formatter
+ + src->num_cursors * mode_lib->ip.dppclk_delay_cnvc_cursor;
+
+ if (dout->dsc_enable) {
+ double dsc_delay = get_dsc_delay(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
+
+ dispclk_delay_subtotal += dsc_delay;
+ }
+
+ pixel_rate_delay_subtotal = dppclk_delay_subtotal * pclk_freq_in_mhz / dppclk_freq_in_mhz
+ + dispclk_delay_subtotal * pclk_freq_in_mhz / dispclk_freq_in_mhz;
+
+ vstartup_start = dst->vstartup_start;
+ if (interlaced) {
+ if (vstartup_start / 2.0
+ - (double) (vready_offset + vupdate_width + vupdate_offset) / htotal
+ <= vblank_end / 2.0)
+ disp_dlg_regs->vready_after_vcount0 = 1;
+ else
+ disp_dlg_regs->vready_after_vcount0 = 0;
+ } else {
+ if (vstartup_start
+ - (double) (vready_offset + vupdate_width + vupdate_offset) / htotal
+ <= vblank_end)
+ disp_dlg_regs->vready_after_vcount0 = 1;
+ else
+ disp_dlg_regs->vready_after_vcount0 = 0;
+ }
+
+ // TODO: Where is this coming from?
+ if (interlaced)
+ vstartup_start = vstartup_start / 2;
+
+ // TODO: What if this min_vblank doesn't match the value in the dml_config_settings.cpp?
+ if (vstartup_start >= min_vblank) {
+ dml_print(
+ "WARNING: DML_DLG: %s: vblank_start=%d vblank_end=%d\n",
+ __func__,
+ vblank_start,
+ vblank_end);
+ dml_print(
+ "WARNING: DML_DLG: %s: vstartup_start=%d should be less than min_vblank=%d\n",
+ __func__,
+ vstartup_start,
+ min_vblank);
+ min_vblank = vstartup_start + 1;
+ dml_print(
+ "WARNING: DML_DLG: %s: vstartup_start=%d should be less than min_vblank=%d\n",
+ __func__,
+ vstartup_start,
+ min_vblank);
+ }
+
+ dst_x_after_scaler = get_dst_x_after_scaler(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
+ dst_y_after_scaler = get_dst_y_after_scaler(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
+
+ dml_print("DML_DLG: %s: htotal = %d\n", __func__, htotal);
+ dml_print(
+ "DML_DLG: %s: pixel_rate_delay_subtotal = %d\n",
+ __func__,
+ pixel_rate_delay_subtotal);
+ dml_print(
+ "DML_DLG: %s: dst_x_after_scaler = %d\n",
+ __func__,
+ dst_x_after_scaler);
+ dml_print(
+ "DML_DLG: %s: dst_y_after_scaler = %d\n",
+ __func__,
+ dst_y_after_scaler);
+
+ // Lwait
+ // TODO: Should this be urgent_latency_pixel_mixed_with_vm_data_us?
+ line_wait = mode_lib->soc.urgent_latency_pixel_data_only_us;
+ if (cstate_en)
+ line_wait = dml_max(mode_lib->soc.sr_enter_plus_exit_time_us, line_wait);
+ if (pstate_en)
+ line_wait = dml_max(
+ mode_lib->soc.dram_clock_change_latency_us
+ + mode_lib->soc.urgent_latency_pixel_data_only_us, // TODO: Should this be urgent_latency_pixel_mixed_with_vm_data_us?
+ line_wait);
+ line_wait = line_wait / line_time_in_us;
+
+ dst_y_prefetch = get_dst_y_prefetch(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
+ dml_print("DML_DLG: %s: dst_y_prefetch (after rnd) = %3.2f\n", __func__, dst_y_prefetch);
+
+ dst_y_per_vm_vblank = get_dst_y_per_vm_vblank(
+ mode_lib,
+ e2e_pipe_param,
+ num_pipes,
+ pipe_idx);
+ dst_y_per_row_vblank = get_dst_y_per_row_vblank(
+ mode_lib,
+ e2e_pipe_param,
+ num_pipes,
+ pipe_idx);
+ dst_y_per_vm_flip = get_dst_y_per_vm_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
+ dst_y_per_row_flip = get_dst_y_per_row_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
+
+ max_dst_y_per_vm_vblank = 32.0;
+ max_dst_y_per_row_vblank = 16.0;
+
+ // magic!
+ if (htotal <= 75) {
+ min_vblank = 300;
+ max_dst_y_per_vm_vblank = 100.0;
+ max_dst_y_per_row_vblank = 100.0;
+ }
+
+ dml_print("DML_DLG: %s: dst_y_per_vm_flip = %3.2f\n", __func__, dst_y_per_vm_flip);
+ dml_print("DML_DLG: %s: dst_y_per_row_flip = %3.2f\n", __func__, dst_y_per_row_flip);
+ dml_print("DML_DLG: %s: dst_y_per_vm_vblank = %3.2f\n", __func__, dst_y_per_vm_vblank);
+ dml_print("DML_DLG: %s: dst_y_per_row_vblank = %3.2f\n", __func__, dst_y_per_row_vblank);
+
+ ASSERT(dst_y_per_vm_vblank < max_dst_y_per_vm_vblank);
+ ASSERT(dst_y_per_row_vblank < max_dst_y_per_row_vblank);
+
+ ASSERT(dst_y_prefetch > (dst_y_per_vm_vblank + dst_y_per_row_vblank));
+ lsw = dst_y_prefetch - (dst_y_per_vm_vblank + dst_y_per_row_vblank);
+
+ dml_print("DML_DLG: %s: lsw = %3.2f\n", __func__, lsw);
+
+ vratio_pre_l = get_vratio_prefetch_l(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
+ vratio_pre_c = get_vratio_prefetch_c(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
+
+ dml_print("DML_DLG: %s: vratio_pre_l=%3.2f\n", __func__, vratio_pre_l);
+ dml_print("DML_DLG: %s: vratio_pre_c=%3.2f\n", __func__, vratio_pre_c);
+
+ // Active
+ req_per_swath_ub_l = rq_dlg_param.rq_l.req_per_swath_ub;
+ req_per_swath_ub_c = rq_dlg_param.rq_c.req_per_swath_ub;
+ meta_row_height_l = rq_dlg_param.rq_l.meta_row_height;
+ meta_row_height_c = rq_dlg_param.rq_c.meta_row_height;
+ swath_width_pixels_ub_l = 0;
+ swath_width_pixels_ub_c = 0;
+ scaler_rec_in_width_l = 0;
+ scaler_rec_in_width_c = 0;
+ dpte_row_height_l = rq_dlg_param.rq_l.dpte_row_height;
+ dpte_row_height_c = rq_dlg_param.rq_c.dpte_row_height;
+
+ if (mode_422) {
+ swath_width_pixels_ub_l = swath_width_ub_l * 2; // *2 for 2 pixel per element
+ swath_width_pixels_ub_c = swath_width_ub_c * 2;
+ } else {
+ swath_width_pixels_ub_l = swath_width_ub_l * 1;
+ swath_width_pixels_ub_c = swath_width_ub_c * 1;
+ }
+
+ hscale_pixel_rate_l = 0.;
+ hscale_pixel_rate_c = 0.;
+ min_hratio_fact_l = 1.0;
+ min_hratio_fact_c = 1.0;
+
+ if (htaps_l <= 1)
+ min_hratio_fact_l = 2.0;
+ else if (htaps_l <= 6) {
+ if ((hratio_l * 2.0) > 4.0)
+ min_hratio_fact_l = 4.0;
+ else
+ min_hratio_fact_l = hratio_l * 2.0;
+ } else {
+ if (hratio_l > 4.0)
+ min_hratio_fact_l = 4.0;
+ else
+ min_hratio_fact_l = hratio_l;
+ }
+
+ hscale_pixel_rate_l = min_hratio_fact_l * dppclk_freq_in_mhz;
+
+ if (htaps_c <= 1)
+ min_hratio_fact_c = 2.0;
+ else if (htaps_c <= 6) {
+ if ((hratio_c * 2.0) > 4.0)
+ min_hratio_fact_c = 4.0;
+ else
+ min_hratio_fact_c = hratio_c * 2.0;
+ } else {
+ if (hratio_c > 4.0)
+ min_hratio_fact_c = 4.0;
+ else
+ min_hratio_fact_c = hratio_c;
+ }
+
+ hscale_pixel_rate_c = min_hratio_fact_c * dppclk_freq_in_mhz;
+
+ refcyc_per_line_delivery_pre_l = 0.;
+ refcyc_per_line_delivery_pre_c = 0.;
+ refcyc_per_line_delivery_l = 0.;
+ refcyc_per_line_delivery_c = 0.;
+
+ refcyc_per_req_delivery_pre_l = 0.;
+ refcyc_per_req_delivery_pre_c = 0.;
+ refcyc_per_req_delivery_l = 0.;
+ refcyc_per_req_delivery_c = 0.;
+
+ full_recout_width = 0;
+ // In ODM
+ if (src->is_hsplit) {
+ // This "hack" is only allowed (and valid) for MPC combine. In ODM
+ // combine, you MUST specify the full_recout_width...according to Oswin
+ if (dst->full_recout_width == 0 && !dst->odm_combine) {
+ dml_print(
+ "DML_DLG: %s: Warning: full_recout_width not set in hsplit mode\n",
+ __func__);
+ full_recout_width = dst->recout_width * 2; // assume half split for dcn1
+ } else
+ full_recout_width = dst->full_recout_width;
+ } else
+ full_recout_width = dst->recout_width;
+
+ // As of DCN2, mpc_combine and odm_combine are mutually exclusive
+ refcyc_per_line_delivery_pre_l = get_refcyc_per_delivery(
+ mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ dst->odm_combine,
+ full_recout_width,
+ dst->hactive,
+ vratio_pre_l,
+ hscale_pixel_rate_l,
+ swath_width_pixels_ub_l,
+ 1); // per line
+
+ refcyc_per_line_delivery_l = get_refcyc_per_delivery(
+ mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ dst->odm_combine,
+ full_recout_width,
+ dst->hactive,
+ vratio_l,
+ hscale_pixel_rate_l,
+ swath_width_pixels_ub_l,
+ 1); // per line
+
+ dml_print("DML_DLG: %s: full_recout_width = %d\n", __func__, full_recout_width);
+ dml_print(
+ "DML_DLG: %s: hscale_pixel_rate_l = %3.2f\n",
+ __func__,
+ hscale_pixel_rate_l);
+ dml_print(
+ "DML_DLG: %s: refcyc_per_line_delivery_pre_l = %3.2f\n",
+ __func__,
+ refcyc_per_line_delivery_pre_l);
+ dml_print(
+ "DML_DLG: %s: refcyc_per_line_delivery_l = %3.2f\n",
+ __func__,
+ refcyc_per_line_delivery_l);
+
+ if (dual_plane) {
+ refcyc_per_line_delivery_pre_c = get_refcyc_per_delivery(
+ mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ dst->odm_combine,
+ full_recout_width,
+ dst->hactive,
+ vratio_pre_c,
+ hscale_pixel_rate_c,
+ swath_width_pixels_ub_c,
+ 1); // per line
+
+ refcyc_per_line_delivery_c = get_refcyc_per_delivery(
+ mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ dst->odm_combine,
+ full_recout_width,
+ dst->hactive,
+ vratio_c,
+ hscale_pixel_rate_c,
+ swath_width_pixels_ub_c,
+ 1); // per line
+
+ dml_print(
+ "DML_DLG: %s: refcyc_per_line_delivery_pre_c = %3.2f\n",
+ __func__,
+ refcyc_per_line_delivery_pre_c);
+ dml_print(
+ "DML_DLG: %s: refcyc_per_line_delivery_c = %3.2f\n",
+ __func__,
+ refcyc_per_line_delivery_c);
+ }
+
+ // TTU - Luma / Chroma
+ if (access_dir) { // vertical access
+ scaler_rec_in_width_l = vp_height_l;
+ scaler_rec_in_width_c = vp_height_c;
+ } else {
+ scaler_rec_in_width_l = vp_width_l;
+ scaler_rec_in_width_c = vp_width_c;
+ }
+
+ refcyc_per_req_delivery_pre_l = get_refcyc_per_delivery(
+ mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ dst->odm_combine,
+ full_recout_width,
+ dst->hactive,
+ vratio_pre_l,
+ hscale_pixel_rate_l,
+ scaler_rec_in_width_l,
+ req_per_swath_ub_l); // per req
+ refcyc_per_req_delivery_l = get_refcyc_per_delivery(
+ mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ dst->odm_combine,
+ full_recout_width,
+ dst->hactive,
+ vratio_l,
+ hscale_pixel_rate_l,
+ scaler_rec_in_width_l,
+ req_per_swath_ub_l); // per req
+
+ dml_print(
+ "DML_DLG: %s: refcyc_per_req_delivery_pre_l = %3.2f\n",
+ __func__,
+ refcyc_per_req_delivery_pre_l);
+ dml_print(
+ "DML_DLG: %s: refcyc_per_req_delivery_l = %3.2f\n",
+ __func__,
+ refcyc_per_req_delivery_l);
+
+ ASSERT(refcyc_per_req_delivery_pre_l < dml_pow(2, 13));
+ ASSERT(refcyc_per_req_delivery_l < dml_pow(2, 13));
+
+ if (dual_plane) {
+ refcyc_per_req_delivery_pre_c = get_refcyc_per_delivery(
+ mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ dst->odm_combine,
+ full_recout_width,
+ dst->hactive,
+ vratio_pre_c,
+ hscale_pixel_rate_c,
+ scaler_rec_in_width_c,
+ req_per_swath_ub_c); // per req
+ refcyc_per_req_delivery_c = get_refcyc_per_delivery(
+ mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ dst->odm_combine,
+ full_recout_width,
+ dst->hactive,
+ vratio_c,
+ hscale_pixel_rate_c,
+ scaler_rec_in_width_c,
+ req_per_swath_ub_c); // per req
+
+ dml_print(
+ "DML_DLG: %s: refcyc_per_req_delivery_pre_c = %3.2f\n",
+ __func__,
+ refcyc_per_req_delivery_pre_c);
+ dml_print(
+ "DML_DLG: %s: refcyc_per_req_delivery_c = %3.2f\n",
+ __func__,
+ refcyc_per_req_delivery_c);
+
+ ASSERT(refcyc_per_req_delivery_pre_c < dml_pow(2, 13));
+ ASSERT(refcyc_per_req_delivery_c < dml_pow(2, 13));
+ }
+
+ // XFC
+ xfc_transfer_delay = get_xfc_transfer_delay(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
+ xfc_precharge_delay = get_xfc_precharge_delay(
+ mode_lib,
+ e2e_pipe_param,
+ num_pipes,
+ pipe_idx);
+ xfc_remote_surface_flip_latency = get_xfc_remote_surface_flip_latency(
+ mode_lib,
+ e2e_pipe_param,
+ num_pipes,
+ pipe_idx);
+ xfc_dst_y_delta_drq_limit = xfc_remote_surface_flip_latency;
+ xfc_prefetch_margin = get_xfc_prefetch_margin(
+ mode_lib,
+ e2e_pipe_param,
+ num_pipes,
+ pipe_idx);
+
+ // TTU - Cursor
+ refcyc_per_req_delivery_pre_cur0 = 0.0;
+ refcyc_per_req_delivery_cur0 = 0.0;
+ if (src->num_cursors > 0) {
+ calculate_ttu_cursor(
+ mode_lib,
+ &refcyc_per_req_delivery_pre_cur0,
+ &refcyc_per_req_delivery_cur0,
+ refclk_freq_in_mhz,
+ ref_freq_to_pix_freq,
+ hscale_pixel_rate_l,
+ scl->hscl_ratio,
+ vratio_pre_l,
+ vratio_l,
+ src->cur0_src_width,
+ (enum cursor_bpp) (src->cur0_bpp));
+ }
+
+ refcyc_per_req_delivery_pre_cur1 = 0.0;
+ refcyc_per_req_delivery_cur1 = 0.0;
+ if (src->num_cursors > 1) {
+ calculate_ttu_cursor(
+ mode_lib,
+ &refcyc_per_req_delivery_pre_cur1,
+ &refcyc_per_req_delivery_cur1,
+ refclk_freq_in_mhz,
+ ref_freq_to_pix_freq,
+ hscale_pixel_rate_l,
+ scl->hscl_ratio,
+ vratio_pre_l,
+ vratio_l,
+ src->cur1_src_width,
+ (enum cursor_bpp) (src->cur1_bpp));
+ }
+
+ // TTU - Misc
+ // all hard-coded
+
+ // Assignment to register structures
+ disp_dlg_regs->dst_y_after_scaler = dst_y_after_scaler; // in terms of line
+ disp_dlg_regs->refcyc_x_after_scaler = dst_x_after_scaler * ref_freq_to_pix_freq; // in terms of refclk
+ ASSERT(disp_dlg_regs->refcyc_x_after_scaler < (unsigned int)dml_pow(2, 13));
+ disp_dlg_regs->dst_y_prefetch = (unsigned int) (dst_y_prefetch * dml_pow(2, 2));
+ disp_dlg_regs->dst_y_per_vm_vblank = (unsigned int) (dst_y_per_vm_vblank * dml_pow(2, 2));
+ disp_dlg_regs->dst_y_per_row_vblank = (unsigned int) (dst_y_per_row_vblank * dml_pow(2, 2));
+ disp_dlg_regs->dst_y_per_vm_flip = (unsigned int) (dst_y_per_vm_flip * dml_pow(2, 2));
+ disp_dlg_regs->dst_y_per_row_flip = (unsigned int) (dst_y_per_row_flip * dml_pow(2, 2));
+
+ disp_dlg_regs->vratio_prefetch = (unsigned int) (vratio_pre_l * dml_pow(2, 19));
+ disp_dlg_regs->vratio_prefetch_c = (unsigned int) (vratio_pre_c * dml_pow(2, 19));
+
+ dml_print("DML_DLG: %s: disp_dlg_regs->dst_y_per_vm_vblank = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_vm_vblank);
+ dml_print("DML_DLG: %s: disp_dlg_regs->dst_y_per_row_vblank = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_row_vblank);
+ dml_print("DML_DLG: %s: disp_dlg_regs->dst_y_per_vm_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_vm_flip);
+ dml_print("DML_DLG: %s: disp_dlg_regs->dst_y_per_row_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_row_flip);
+
+ disp_dlg_regs->refcyc_per_pte_group_vblank_l =
+ (unsigned int) (dst_y_per_row_vblank * (double) htotal
+ * ref_freq_to_pix_freq / (double) dpte_groups_per_row_ub_l);
+ ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_l < (unsigned int)dml_pow(2, 13));
+
+ if (dual_plane) {
+ disp_dlg_regs->refcyc_per_pte_group_vblank_c = (unsigned int) (dst_y_per_row_vblank
+ * (double) htotal * ref_freq_to_pix_freq
+ / (double) dpte_groups_per_row_ub_c);
+ ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_c
+ < (unsigned int)dml_pow(2, 13));
+ }
+
+ disp_dlg_regs->refcyc_per_meta_chunk_vblank_l =
+ (unsigned int) (dst_y_per_row_vblank * (double) htotal
+ * ref_freq_to_pix_freq / (double) meta_chunks_per_row_ub_l);
+ ASSERT(disp_dlg_regs->refcyc_per_meta_chunk_vblank_l < (unsigned int)dml_pow(2, 13));
+
+ disp_dlg_regs->refcyc_per_meta_chunk_vblank_c =
+ disp_dlg_regs->refcyc_per_meta_chunk_vblank_l; // dcc for 4:2:0 is not supported in dcn1.0. assigned to be the same as _l for now
+
+ disp_dlg_regs->refcyc_per_pte_group_flip_l = (unsigned int) (dst_y_per_row_flip * htotal
+ * ref_freq_to_pix_freq) / dpte_groups_per_row_ub_l;
+ disp_dlg_regs->refcyc_per_meta_chunk_flip_l = (unsigned int) (dst_y_per_row_flip * htotal
+ * ref_freq_to_pix_freq) / meta_chunks_per_row_ub_l;
+
+ if (dual_plane) {
+ disp_dlg_regs->refcyc_per_pte_group_flip_c = (unsigned int) (dst_y_per_row_flip
+ * htotal * ref_freq_to_pix_freq) / dpte_groups_per_row_ub_c;
+ disp_dlg_regs->refcyc_per_meta_chunk_flip_c = (unsigned int) (dst_y_per_row_flip
+ * htotal * ref_freq_to_pix_freq) / meta_chunks_per_row_ub_c;
+ }
+
+ disp_dlg_regs->refcyc_per_vm_group_vblank = get_refcyc_per_vm_group_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz;
+ disp_dlg_regs->refcyc_per_vm_group_flip = get_refcyc_per_vm_group_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz;
+ disp_dlg_regs->refcyc_per_vm_req_vblank = get_refcyc_per_vm_req_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz;
+ disp_dlg_regs->refcyc_per_vm_req_flip = get_refcyc_per_vm_req_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz;
+
+ // Clamp to max for now
+ if (disp_dlg_regs->refcyc_per_vm_group_vblank >= (unsigned int)dml_pow(2, 23))
+ disp_dlg_regs->refcyc_per_vm_group_vblank = dml_pow(2, 23) - 1;
+
+ if (disp_dlg_regs->refcyc_per_vm_group_flip >= (unsigned int)dml_pow(2, 23))
+ disp_dlg_regs->refcyc_per_vm_group_flip = dml_pow(2, 23) - 1;
+
+ if (disp_dlg_regs->refcyc_per_vm_req_vblank >= (unsigned int)dml_pow(2, 23))
+ disp_dlg_regs->refcyc_per_vm_req_vblank = dml_pow(2, 23) - 1;
+
+ if (disp_dlg_regs->refcyc_per_vm_req_flip >= (unsigned int)dml_pow(2, 23))
+ disp_dlg_regs->refcyc_per_vm_req_flip = dml_pow(2, 23) - 1;
+ disp_dlg_regs->dst_y_per_pte_row_nom_l = (unsigned int) ((double) dpte_row_height_l
+ / (double) vratio_l * dml_pow(2, 2));
+ ASSERT(disp_dlg_regs->dst_y_per_pte_row_nom_l < (unsigned int)dml_pow(2, 17));
+
+ if (dual_plane) {
+ disp_dlg_regs->dst_y_per_pte_row_nom_c = (unsigned int) ((double) dpte_row_height_c
+ / (double) vratio_c * dml_pow(2, 2));
+ if (disp_dlg_regs->dst_y_per_pte_row_nom_c >= (unsigned int) dml_pow(2, 17)) {
+ dml_print(
+ "DML_DLG: %s: Warning dst_y_per_pte_row_nom_c %u larger than supported by register format U15.2 %u\n",
+ __func__,
+ disp_dlg_regs->dst_y_per_pte_row_nom_c,
+ (unsigned int)dml_pow(2, 17) - 1);
+ }
+ }
+
+ disp_dlg_regs->dst_y_per_meta_row_nom_l = (unsigned int) ((double) meta_row_height_l
+ / (double) vratio_l * dml_pow(2, 2));
+ ASSERT(disp_dlg_regs->dst_y_per_meta_row_nom_l < (unsigned int)dml_pow(2, 17));
+
+ disp_dlg_regs->dst_y_per_meta_row_nom_c = disp_dlg_regs->dst_y_per_meta_row_nom_l; // TODO: dcc for 4:2:0 is not supported in dcn1.0. assigned to be the same as _l for now
+
+ dml_print(
+ "DML: Trow: %fus\n",
+ line_time_in_us * (double)dpte_row_height_l / (double)vratio_l);
+
+ disp_dlg_regs->refcyc_per_pte_group_nom_l = (unsigned int) ((double) dpte_row_height_l
+ / (double) vratio_l * (double) htotal * ref_freq_to_pix_freq
+ / (double) dpte_groups_per_row_ub_l);
+ if (disp_dlg_regs->refcyc_per_pte_group_nom_l >= (unsigned int) dml_pow(2, 23))
+ disp_dlg_regs->refcyc_per_pte_group_nom_l = dml_pow(2, 23) - 1;
+ disp_dlg_regs->refcyc_per_meta_chunk_nom_l = (unsigned int) ((double) meta_row_height_l
+ / (double) vratio_l * (double) htotal * ref_freq_to_pix_freq
+ / (double) meta_chunks_per_row_ub_l);
+ if (disp_dlg_regs->refcyc_per_meta_chunk_nom_l >= (unsigned int) dml_pow(2, 23))
+ disp_dlg_regs->refcyc_per_meta_chunk_nom_l = dml_pow(2, 23) - 1;
+
+ if (dual_plane) {
+ disp_dlg_regs->refcyc_per_pte_group_nom_c =
+ (unsigned int) ((double) dpte_row_height_c / (double) vratio_c
+ * (double) htotal * ref_freq_to_pix_freq
+ / (double) dpte_groups_per_row_ub_c);
+ if (disp_dlg_regs->refcyc_per_pte_group_nom_c >= (unsigned int) dml_pow(2, 23))
+ disp_dlg_regs->refcyc_per_pte_group_nom_c = dml_pow(2, 23) - 1;
+
+ // TODO: Is this the right calculation? Does htotal need to be halved?
+ disp_dlg_regs->refcyc_per_meta_chunk_nom_c =
+ (unsigned int) ((double) meta_row_height_c / (double) vratio_c
+ * (double) htotal * ref_freq_to_pix_freq
+ / (double) meta_chunks_per_row_ub_c);
+ if (disp_dlg_regs->refcyc_per_meta_chunk_nom_c >= (unsigned int) dml_pow(2, 23))
+ disp_dlg_regs->refcyc_per_meta_chunk_nom_c = dml_pow(2, 23) - 1;
+ }
+
+ disp_dlg_regs->refcyc_per_line_delivery_pre_l = (unsigned int) dml_floor(
+ refcyc_per_line_delivery_pre_l, 1);
+ disp_dlg_regs->refcyc_per_line_delivery_l = (unsigned int) dml_floor(
+ refcyc_per_line_delivery_l, 1);
+ ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_l < (unsigned int)dml_pow(2, 13));
+ ASSERT(disp_dlg_regs->refcyc_per_line_delivery_l < (unsigned int)dml_pow(2, 13));
+
+ disp_dlg_regs->refcyc_per_line_delivery_pre_c = (unsigned int) dml_floor(
+ refcyc_per_line_delivery_pre_c, 1);
+ disp_dlg_regs->refcyc_per_line_delivery_c = (unsigned int) dml_floor(
+ refcyc_per_line_delivery_c, 1);
+ ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_c < (unsigned int)dml_pow(2, 13));
+ ASSERT(disp_dlg_regs->refcyc_per_line_delivery_c < (unsigned int)dml_pow(2, 13));
+
+ disp_dlg_regs->chunk_hdl_adjust_cur0 = 3;
+ disp_dlg_regs->dst_y_offset_cur0 = 0;
+ disp_dlg_regs->chunk_hdl_adjust_cur1 = 3;
+ disp_dlg_regs->dst_y_offset_cur1 = 0;
+
+ disp_dlg_regs->xfc_reg_transfer_delay = xfc_transfer_delay;
+ disp_dlg_regs->xfc_reg_precharge_delay = xfc_precharge_delay;
+ disp_dlg_regs->xfc_reg_remote_surface_flip_latency = xfc_remote_surface_flip_latency;
+ disp_dlg_regs->xfc_reg_prefetch_margin = dml_ceil(
+ xfc_prefetch_margin * refclk_freq_in_mhz, 1);
+
+ // slave has to have this value also set to off
+ if (src->xfc_enable && !src->xfc_slave)
+ disp_dlg_regs->dst_y_delta_drq_limit = dml_ceil(xfc_dst_y_delta_drq_limit, 1);
+ else
+ disp_dlg_regs->dst_y_delta_drq_limit = 0x7fff; // off
+
+ disp_ttu_regs->refcyc_per_req_delivery_pre_l = (unsigned int) (refcyc_per_req_delivery_pre_l
+ * dml_pow(2, 10));
+ disp_ttu_regs->refcyc_per_req_delivery_l = (unsigned int) (refcyc_per_req_delivery_l
+ * dml_pow(2, 10));
+ disp_ttu_regs->refcyc_per_req_delivery_pre_c = (unsigned int) (refcyc_per_req_delivery_pre_c
+ * dml_pow(2, 10));
+ disp_ttu_regs->refcyc_per_req_delivery_c = (unsigned int) (refcyc_per_req_delivery_c
+ * dml_pow(2, 10));
+ disp_ttu_regs->refcyc_per_req_delivery_pre_cur0 =
+ (unsigned int) (refcyc_per_req_delivery_pre_cur0 * dml_pow(2, 10));
+ disp_ttu_regs->refcyc_per_req_delivery_cur0 = (unsigned int) (refcyc_per_req_delivery_cur0
+ * dml_pow(2, 10));
+ disp_ttu_regs->refcyc_per_req_delivery_pre_cur1 =
+ (unsigned int) (refcyc_per_req_delivery_pre_cur1 * dml_pow(2, 10));
+ disp_ttu_regs->refcyc_per_req_delivery_cur1 = (unsigned int) (refcyc_per_req_delivery_cur1
+ * dml_pow(2, 10));
+ disp_ttu_regs->qos_level_low_wm = 0;
+ ASSERT(disp_ttu_regs->qos_level_low_wm < dml_pow(2, 14));
+ disp_ttu_regs->qos_level_high_wm = (unsigned int) (4.0 * (double) htotal
+ * ref_freq_to_pix_freq);
+ ASSERT(disp_ttu_regs->qos_level_high_wm < dml_pow(2, 14));
+
+ disp_ttu_regs->qos_level_flip = 14;
+ disp_ttu_regs->qos_level_fixed_l = 8;
+ disp_ttu_regs->qos_level_fixed_c = 8;
+ disp_ttu_regs->qos_level_fixed_cur0 = 8;
+ disp_ttu_regs->qos_ramp_disable_l = 0;
+ disp_ttu_regs->qos_ramp_disable_c = 0;
+ disp_ttu_regs->qos_ramp_disable_cur0 = 0;
+
+ disp_ttu_regs->min_ttu_vblank = min_ttu_vblank * refclk_freq_in_mhz;
+ ASSERT(disp_ttu_regs->min_ttu_vblank < dml_pow(2, 24));
+
+ print__ttu_regs_st(mode_lib, *disp_ttu_regs);
+ print__dlg_regs_st(mode_lib, *disp_dlg_regs);
+}
+
+void dml21_rq_dlg_get_dlg_reg(
+ struct display_mode_lib *mode_lib,
+ display_dlg_regs_st *dlg_regs,
+ display_ttu_regs_st *ttu_regs,
+ display_e2e_pipe_params_st *e2e_pipe_param,
+ const unsigned int num_pipes,
+ const unsigned int pipe_idx,
+ const bool cstate_en,
+ const bool pstate_en,
+ const bool vm_en,
+ const bool ignore_viewport_pos,
+ const bool immediate_flip_support)
+{
+ display_rq_params_st rq_param = {0};
+ display_dlg_sys_params_st dlg_sys_param = {0};
+
+ // Get watermark and Tex.
+ dlg_sys_param.t_urg_wm_us = get_wm_urgent(mode_lib, e2e_pipe_param, num_pipes);
+ dlg_sys_param.deepsleep_dcfclk_mhz = get_clk_dcf_deepsleep(
+ mode_lib,
+ e2e_pipe_param,
+ num_pipes);
+ dlg_sys_param.t_extra_us = get_urgent_extra_latency(mode_lib, e2e_pipe_param, num_pipes);
+ dlg_sys_param.mem_trip_us = get_wm_memory_trip(mode_lib, e2e_pipe_param, num_pipes);
+ dlg_sys_param.t_mclk_wm_us = get_wm_dram_clock_change(mode_lib, e2e_pipe_param, num_pipes);
+ dlg_sys_param.t_sr_wm_us = get_wm_stutter_enter_exit(mode_lib, e2e_pipe_param, num_pipes);
+ dlg_sys_param.total_flip_bw = get_total_immediate_flip_bw(
+ mode_lib,
+ e2e_pipe_param,
+ num_pipes);
+ dlg_sys_param.total_flip_bytes = get_total_immediate_flip_bytes(
+ mode_lib,
+ e2e_pipe_param,
+ num_pipes);
+ dlg_sys_param.t_srx_delay_us = mode_lib->ip.dcfclk_cstate_latency
+ / dlg_sys_param.deepsleep_dcfclk_mhz; // TODO: Deprecated
+
+ print__dlg_sys_params_st(mode_lib, dlg_sys_param);
+
+ // system parameter calculation done
+
+ dml_print("DML_DLG: Calculation for pipe[%d] start\n\n", pipe_idx);
+ dml_rq_dlg_get_rq_params(mode_lib, &rq_param, e2e_pipe_param[pipe_idx].pipe);
+ dml_rq_dlg_get_dlg_params(
+ mode_lib,
+ e2e_pipe_param,
+ num_pipes,
+ pipe_idx,
+ dlg_regs,
+ ttu_regs,
+ rq_param.dlg,
+ dlg_sys_param,
+ cstate_en,
+ pstate_en);
+ dml_print("DML_DLG: Calculation for pipe[%d] end\n", pipe_idx);
+}
+
+void dml_rq_dlg_get_arb_params(struct display_mode_lib *mode_lib, display_arb_params_st *arb_param)
+{
+ memset(arb_param, 0, sizeof(*arb_param));
+ arb_param->max_req_outstanding = 256;
+ arb_param->min_req_outstanding = 68;
+ arb_param->sat_level_us = 60;
+}
+
+static void calculate_ttu_cursor(
+ struct display_mode_lib *mode_lib,
+ double *refcyc_per_req_delivery_pre_cur,
+ double *refcyc_per_req_delivery_cur,
+ double refclk_freq_in_mhz,
+ double ref_freq_to_pix_freq,
+ double hscale_pixel_rate_l,
+ double hscl_ratio,
+ double vratio_pre_l,
+ double vratio_l,
+ unsigned int cur_width,
+ enum cursor_bpp cur_bpp)
+{
+ unsigned int cur_src_width = cur_width;
+ unsigned int cur_req_size = 0;
+ unsigned int cur_req_width = 0;
+ double cur_width_ub = 0.0;
+ double cur_req_per_width = 0.0;
+ double hactive_cur = 0.0;
+
+ ASSERT(cur_src_width <= 256);
+
+ *refcyc_per_req_delivery_pre_cur = 0.0;
+ *refcyc_per_req_delivery_cur = 0.0;
+ if (cur_src_width > 0) {
+ unsigned int cur_bit_per_pixel = 0;
+
+ if (cur_bpp == dm_cur_2bit) {
+ cur_req_size = 64; // byte
+ cur_bit_per_pixel = 2;
+ } else { // 32bit
+ cur_bit_per_pixel = 32;
+ if (cur_src_width >= 1 && cur_src_width <= 16)
+ cur_req_size = 64;
+ else if (cur_src_width >= 17 && cur_src_width <= 31)
+ cur_req_size = 128;
+ else
+ cur_req_size = 256;
+ }
+
+ cur_req_width = (double) cur_req_size / ((double) cur_bit_per_pixel / 8.0);
+ cur_width_ub = dml_ceil((double) cur_src_width / (double) cur_req_width, 1)
+ * (double) cur_req_width;
+ cur_req_per_width = cur_width_ub / (double) cur_req_width;
+ hactive_cur = (double) cur_src_width / hscl_ratio; // FIXME: oswin to think about what to do for cursor
+
+ if (vratio_pre_l <= 1.0) {
+ *refcyc_per_req_delivery_pre_cur = hactive_cur * ref_freq_to_pix_freq
+ / (double) cur_req_per_width;
+ } else {
+ *refcyc_per_req_delivery_pre_cur = (double) refclk_freq_in_mhz
+ * (double) cur_src_width / hscale_pixel_rate_l
+ / (double) cur_req_per_width;
+ }
+
+ ASSERT(*refcyc_per_req_delivery_pre_cur < dml_pow(2, 13));
+
+ if (vratio_l <= 1.0) {
+ *refcyc_per_req_delivery_cur = hactive_cur * ref_freq_to_pix_freq
+ / (double) cur_req_per_width;
+ } else {
+ *refcyc_per_req_delivery_cur = (double) refclk_freq_in_mhz
+ * (double) cur_src_width / hscale_pixel_rate_l
+ / (double) cur_req_per_width;
+ }
+
+ dml_print(
+ "DML_DLG: %s: cur_req_width = %d\n",
+ __func__,
+ cur_req_width);
+ dml_print(
+ "DML_DLG: %s: cur_width_ub = %3.2f\n",
+ __func__,
+ cur_width_ub);
+ dml_print(
+ "DML_DLG: %s: cur_req_per_width = %3.2f\n",
+ __func__,
+ cur_req_per_width);
+ dml_print(
+ "DML_DLG: %s: hactive_cur = %3.2f\n",
+ __func__,
+ hactive_cur);
+ dml_print(
+ "DML_DLG: %s: refcyc_per_req_delivery_pre_cur = %3.2f\n",
+ __func__,
+ *refcyc_per_req_delivery_pre_cur);
+ dml_print(
+ "DML_DLG: %s: refcyc_per_req_delivery_cur = %3.2f\n",
+ __func__,
+ *refcyc_per_req_delivery_cur);
+
+ ASSERT(*refcyc_per_req_delivery_cur < dml_pow(2, 13));
+ }
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.h b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.h
new file mode 100644
index 000000000000..83e95f8cbff2
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DML21_DISPLAY_RQ_DLG_CALC_H__
+#define __DML21_DISPLAY_RQ_DLG_CALC_H__
+
+#include "../dml_common_defs.h"
+#include "../display_rq_dlg_helpers.h"
+
+struct display_mode_lib;
+
+
+// Function: dml_rq_dlg_get_rq_reg
+// Main entry point for test to get the register values out of this DML class.
+// This function calls <get_rq_param> and <extract_rq_regs> functions to calculate
+// and then populate the rq_regs struct
+// Input:
+// pipe_src_param - pipe source configuration (e.g. vp, pitch, etc.)
+// Output:
+// rq_regs - struct that holds all the RQ registers field value.
+// See also: <display_rq_regs_st>
+void dml21_rq_dlg_get_rq_reg(
+ struct display_mode_lib *mode_lib,
+ display_rq_regs_st *rq_regs,
+ const display_pipe_params_st pipe_param);
+
+// Function: dml_rq_dlg_get_dlg_reg
+// Calculate and return DLG and TTU register struct given the system setting
+// Output:
+// dlg_regs - output DLG register struct
+// ttu_regs - output DLG TTU register struct
+// Input:
+// e2e_pipe_param - "compacted" array of e2e pipe param struct
+// num_pipes - num of active "pipe" or "route"
+// pipe_idx - index that identifies the e2e_pipe_param that corresponding to this dlg
+// cstate - 0: when calculate min_ttu_vblank it is assumed cstate is not required. 1: Normal mode, cstate is considered.
+// Added for legacy or unrealistic timing tests.
+void dml21_rq_dlg_get_dlg_reg(
+ struct display_mode_lib *mode_lib,
+ display_dlg_regs_st *dlg_regs,
+ display_ttu_regs_st *ttu_regs,
+ display_e2e_pipe_params_st *e2e_pipe_param,
+ const unsigned int num_pipes,
+ const unsigned int pipe_idx,
+ const bool cstate_en,
+ const bool pstate_en,
+ const bool vm_en,
+ const bool ignore_viewport_pos,
+ const bool immediate_flip_support);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h
index 0c2fab1e93b6..bfc2f39bd1ef 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h
@@ -37,11 +37,14 @@ enum source_format_class {
dm_444_64 = 2,
dm_420_8 = 3,
dm_420_10 = 4,
- dm_422_8 = 5,
- dm_422_10 = 6,
- dm_444_8 = 7,
+ dm_420_12 = 5,
+ dm_422_8 = 6,
+ dm_422_10 = 7,
+ dm_444_8 = 8,
dm_mono_8 = dm_444_8,
- dm_mono_16 = dm_444_16
+ dm_mono_16 = dm_444_16,
+ dm_rgbe = 9,
+ dm_rgbe_alpha = 10,
};
enum output_bpc_class {
dm_out_6 = 0, dm_out_8 = 1, dm_out_10 = 2, dm_out_12 = 3, dm_out_16 = 4
@@ -82,8 +85,8 @@ enum dm_swizzle_mode {
dm_sw_var_s_x = 29,
dm_sw_var_d_x = 30,
dm_sw_64kb_r_x,
- dm_sw_gfx7_2d_thin_lvp,
- dm_sw_gfx7_2d_thin_gl
+ dm_sw_gfx7_2d_thin_l_vp,
+ dm_sw_gfx7_2d_thin_gl,
};
enum lb_depth {
dm_lb_10 = 0, dm_lb_8 = 1, dm_lb_6 = 2, dm_lb_12 = 3, dm_lb_16 = 4,
@@ -112,7 +115,12 @@ enum output_standard {
enum mpc_combine_affinity {
dm_mpc_always_when_possible,
dm_mpc_reduce_voltage,
- dm_mpc_reduce_voltage_and_clocks
+ dm_mpc_reduce_voltage_and_clocks,
+ dm_mpc_never
+};
+
+enum RequestType {
+ REQ_256Bytes, REQ_128BytesNonContiguous, REQ_128BytesContiguous, REQ_NA
};
enum self_refresh_affinity {
@@ -131,9 +139,7 @@ enum dm_validation_status {
DML_FAIL_DIO_SUPPORT,
DML_FAIL_NOT_ENOUGH_DSC,
DML_FAIL_DSC_CLK_REQUIRED,
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
DML_FAIL_DSC_VALIDATION_FAILURE,
-#endif
DML_FAIL_URGENT_LATENCY,
DML_FAIL_REORDERING_BUFFER,
DML_FAIL_DISPCLK_DPPCLK,
@@ -157,4 +163,22 @@ enum writeback_config {
dm_whole_buffer_for_single_stream_interleave,
};
+enum odm_combine_mode {
+ dm_odm_combine_mode_disabled,
+ dm_odm_combine_mode_2to1,
+ dm_odm_combine_mode_4to1,
+};
+
+enum odm_combine_policy {
+ dm_odm_combine_policy_dal,
+ dm_odm_combine_policy_none,
+ dm_odm_combine_policy_2to1,
+ dm_odm_combine_policy_4to1,
+};
+
+enum immediate_flip_requirement {
+ dm_immediate_flip_not_required,
+ dm_immediate_flip_required,
+};
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
index 91810c7d5cf5..2689401a03a3 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
@@ -25,19 +25,33 @@
#include "display_mode_lib.h"
#include "dc_features.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#include "dcn20/display_mode_vba_20.h"
#include "dcn20/display_rq_dlg_calc_20.h"
-#endif
+#include "dcn20/display_mode_vba_20v2.h"
+#include "dcn20/display_rq_dlg_calc_20v2.h"
+#include "dcn21/display_mode_vba_21.h"
+#include "dcn21/display_rq_dlg_calc_21.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
const struct dml_funcs dml20_funcs = {
.validate = dml20_ModeSupportAndSystemConfigurationFull,
.recalculate = dml20_recalculate,
.rq_dlg_get_dlg_reg = dml20_rq_dlg_get_dlg_reg,
.rq_dlg_get_rq_reg = dml20_rq_dlg_get_rq_reg
};
-#endif
+
+const struct dml_funcs dml20v2_funcs = {
+ .validate = dml20v2_ModeSupportAndSystemConfigurationFull,
+ .recalculate = dml20v2_recalculate,
+ .rq_dlg_get_dlg_reg = dml20v2_rq_dlg_get_dlg_reg,
+ .rq_dlg_get_rq_reg = dml20v2_rq_dlg_get_rq_reg
+};
+
+const struct dml_funcs dml21_funcs = {
+ .validate = dml21_ModeSupportAndSystemConfigurationFull,
+ .recalculate = dml21_recalculate,
+ .rq_dlg_get_dlg_reg = dml21_rq_dlg_get_dlg_reg,
+ .rq_dlg_get_rq_reg = dml21_rq_dlg_get_rq_reg
+};
void dml_init_instance(struct display_mode_lib *lib,
const struct _vcs_dpi_soc_bounding_box_st *soc_bb,
@@ -48,11 +62,16 @@ void dml_init_instance(struct display_mode_lib *lib,
lib->ip = *ip_params;
lib->project = project;
switch (project) {
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
case DML_PROJECT_NAVI10:
lib->funcs = dml20_funcs;
break;
-#endif
+ case DML_PROJECT_NAVI10v2:
+ lib->funcs = dml20v2_funcs;
+ break;
+ case DML_PROJECT_DCN21:
+ lib->funcs = dml21_funcs;
+ break;
+
default:
break;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
index 5bf13d67f289..cf2758ca5b02 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
@@ -27,16 +27,14 @@
#include "dml_common_defs.h"
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
#include "display_mode_vba.h"
-#endif
enum dml_project {
DML_PROJECT_UNDEFINED,
DML_PROJECT_RAVEN1,
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
DML_PROJECT_NAVI10,
-#endif
+ DML_PROJECT_NAVI10v2,
+ DML_PROJECT_DCN21,
};
struct display_mode_lib;
@@ -66,9 +64,7 @@ struct display_mode_lib {
struct _vcs_dpi_ip_params_st ip;
struct _vcs_dpi_soc_bounding_box_st soc;
enum dml_project project;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
struct vba_vars_st vba;
-#endif
struct dal_logger *logger;
struct dml_funcs funcs;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index 5678472546ab..658f81e757e9 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -63,6 +63,7 @@ struct _vcs_dpi_voltage_scaling_st {
double dispclk_mhz;
double phyclk_mhz;
double dppclk_mhz;
+ double dtbclk_mhz;
};
struct _vcs_dpi_soc_bounding_box_st {
@@ -99,7 +100,9 @@ struct _vcs_dpi_soc_bounding_box_st {
unsigned int num_chans;
unsigned int vmm_page_size_bytes;
unsigned int hostvm_min_page_size_bytes;
+ unsigned int gpuvm_min_page_size_bytes;
double dram_clock_change_latency_us;
+ double dummy_pstate_latency_us;
double writeback_dram_clock_change_latency_us;
unsigned int return_bus_width_bytes;
unsigned int voltage_override;
@@ -108,6 +111,10 @@ struct _vcs_dpi_soc_bounding_box_st {
int use_urgent_burst_bw;
unsigned int num_states;
struct _vcs_dpi_voltage_scaling_st clock_limits[MAX_CLOCK_LIMIT_STATES];
+ bool do_urgent_latency_adjustment;
+ double urgent_latency_adjustment_fabric_clock_component_us;
+ double urgent_latency_adjustment_fabric_clock_reference_mhz;
+ bool disable_dram_clock_change_vactive_support;
};
struct _vcs_dpi_ip_params_st {
@@ -141,7 +148,6 @@ struct _vcs_dpi_ip_params_st {
unsigned int writeback_interface_buffer_size_kbytes;
unsigned int writeback_line_buffer_buffer_size;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
unsigned int writeback_10bpc420_supported;
double writeback_max_hscl_ratio;
double writeback_max_vscl_ratio;
@@ -151,7 +157,6 @@ struct _vcs_dpi_ip_params_st {
unsigned int writeback_max_vscl_taps;
unsigned int writeback_line_buffer_luma_buffer_size;
unsigned int writeback_line_buffer_chroma_buffer_size;
-#endif
unsigned int max_page_table_levels;
unsigned int max_num_dpp;
@@ -210,6 +215,7 @@ struct _vcs_dpi_display_pipe_source_params_st {
int source_format;
unsigned char dcc;
unsigned int dcc_rate;
+ unsigned int dcc_rate_chroma;
unsigned char dcc_use_global;
unsigned char vm;
bool gpuvm; // gpuvm enabled
@@ -221,6 +227,10 @@ struct _vcs_dpi_display_pipe_source_params_st {
int source_scan;
int sw_mode;
int macro_tile_size;
+ unsigned int surface_width_y;
+ unsigned int surface_height_y;
+ unsigned int surface_width_c;
+ unsigned int surface_height_c;
unsigned int viewport_width;
unsigned int viewport_height;
unsigned int viewport_y_y;
@@ -265,7 +275,7 @@ struct writeback_st {
struct _vcs_dpi_display_output_params_st {
int dp_lanes;
- int output_bpp;
+ double output_bpp;
int dsc_enable;
int wb_enable;
int num_active_wb;
@@ -273,6 +283,7 @@ struct _vcs_dpi_display_output_params_st {
int output_type;
int output_format;
int dsc_slices;
+ int max_audio_sample_rate;
struct writeback_st wb;
};
@@ -314,10 +325,11 @@ struct _vcs_dpi_display_pipe_dest_params_st {
unsigned int vupdate_width;
unsigned int vready_offset;
unsigned char interlaced;
+ unsigned char embedded;
double pixel_rate_mhz;
unsigned char synchronized_vblank_all_planes;
unsigned char otg_inst;
- unsigned char odm_combine;
+ unsigned int odm_combine;
unsigned char use_maximum_vstartup;
unsigned int vtotal_max;
unsigned int vtotal_min;
@@ -396,6 +408,7 @@ struct _vcs_dpi_display_rq_misc_params_st {
struct _vcs_dpi_display_rq_params_st {
unsigned char yuv420;
unsigned char yuv420_10bpc;
+ unsigned char rgbe_alpha;
display_rq_misc_params_st misc;
display_rq_sizing_params_st sizing;
display_rq_dlg_params_st dlg;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
index 4d2a1262d9db..b3c96d9b472f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
@@ -23,7 +23,6 @@
*
*/
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
#include "display_mode_lib.h"
#include "display_mode_vba.h"
@@ -222,13 +221,17 @@ static void fetch_socbb_params(struct display_mode_lib *mode_lib)
mode_lib->vba.SRExitTime = soc->sr_exit_time_us;
mode_lib->vba.SREnterPlusExitTime = soc->sr_enter_plus_exit_time_us;
mode_lib->vba.DRAMClockChangeLatency = soc->dram_clock_change_latency_us;
+ mode_lib->vba.DummyPStateCheck = soc->dram_clock_change_latency_us == soc->dummy_pstate_latency_us;
+ mode_lib->vba.DRAMClockChangeSupportsVActive = !soc->disable_dram_clock_change_vactive_support ||
+ mode_lib->vba.DummyPStateCheck;
+
mode_lib->vba.Downspreading = soc->downspread_percent;
mode_lib->vba.DRAMChannelWidth = soc->dram_channel_width_bytes; // new!
mode_lib->vba.FabricDatapathToDCNDataReturn = soc->fabric_datapath_to_dcn_data_return_bytes; // new!
mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading = soc->dcn_downspread_percent; // new
mode_lib->vba.DISPCLKDPPCLKVCOSpeed = soc->dispclk_dppclk_vco_speed_mhz; // new
mode_lib->vba.VMMPageSize = soc->vmm_page_size_bytes;
- mode_lib->vba.GPUVMMinPageSize = soc->vmm_page_size_bytes / 1024;
+ mode_lib->vba.GPUVMMinPageSize = soc->gpuvm_min_page_size_bytes / 1024;
mode_lib->vba.HostVMMinPageSize = soc->hostvm_min_page_size_bytes / 1024;
// Set the voltage scaling clocks as the defaults. Most of these will
// be set to different values by the test
@@ -261,7 +264,17 @@ static void fetch_socbb_params(struct display_mode_lib *mode_lib)
mode_lib->vba.DRAMSpeedPerState[i] = soc->clock_limits[i].dram_speed_mts;
//mode_lib->vba.DRAMSpeedPerState[i] = soc->clock_limits[i].dram_speed_mhz;
mode_lib->vba.MaxDispclk[i] = soc->clock_limits[i].dispclk_mhz;
+ mode_lib->vba.DTBCLKPerState[i] = soc->clock_limits[i].dtbclk_mhz;
}
+ mode_lib->vba.MinVoltageLevel = 0;
+ mode_lib->vba.MaxVoltageLevel = mode_lib->vba.soc.num_states;
+
+ mode_lib->vba.DoUrgentLatencyAdjustment =
+ soc->do_urgent_latency_adjustment;
+ mode_lib->vba.UrgentLatencyAdjustmentFabricClockComponent =
+ soc->urgent_latency_adjustment_fabric_clock_component_us;
+ mode_lib->vba.UrgentLatencyAdjustmentFabricClockReference =
+ soc->urgent_latency_adjustment_fabric_clock_reference_mhz;
}
static void fetch_ip_params(struct display_mode_lib *mode_lib)
@@ -296,8 +309,6 @@ static void fetch_ip_params(struct display_mode_lib *mode_lib)
mode_lib->vba.WritebackInterfaceBufferSize = ip->writeback_interface_buffer_size_kbytes;
mode_lib->vba.WritebackLineBufferSize = ip->writeback_line_buffer_buffer_size;
- mode_lib->vba.MinVoltageLevel = 0;
- mode_lib->vba.MaxVoltageLevel = 5;
mode_lib->vba.WritebackChromaLineBufferWidth =
ip->writeback_chroma_line_buffer_width_pixels;
@@ -368,6 +379,7 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
mode_lib->vba.pipe_plane[j] = mode_lib->vba.NumberOfActivePlanes;
+ mode_lib->vba.EmbeddedPanel[mode_lib->vba.NumberOfActivePlanes] = dst->embedded;
mode_lib->vba.DPPPerPlane[mode_lib->vba.NumberOfActivePlanes] = 1;
mode_lib->vba.SourceScan[mode_lib->vba.NumberOfActivePlanes] =
(enum scan_direction_class) (src->source_scan);
@@ -385,8 +397,10 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
src->viewport_y_c;
mode_lib->vba.PitchY[mode_lib->vba.NumberOfActivePlanes] = src->data_pitch;
mode_lib->vba.SurfaceHeightY[mode_lib->vba.NumberOfActivePlanes] = src->viewport_height;
+ mode_lib->vba.SurfaceWidthY[mode_lib->vba.NumberOfActivePlanes] = src->viewport_width;
mode_lib->vba.PitchC[mode_lib->vba.NumberOfActivePlanes] = src->data_pitch_c;
mode_lib->vba.SurfaceHeightC[mode_lib->vba.NumberOfActivePlanes] = src->viewport_height_c;
+ mode_lib->vba.SurfaceWidthC[mode_lib->vba.NumberOfActivePlanes] = src->viewport_width_c;
mode_lib->vba.DCCMetaPitchY[mode_lib->vba.NumberOfActivePlanes] = src->meta_pitch;
mode_lib->vba.DCCMetaPitchC[mode_lib->vba.NumberOfActivePlanes] = src->meta_pitch_c;
mode_lib->vba.HRatio[mode_lib->vba.NumberOfActivePlanes] = scl->hscl_ratio;
@@ -410,8 +424,8 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
ip->dcc_supported : src->dcc && ip->dcc_supported;
mode_lib->vba.DCCRate[mode_lib->vba.NumberOfActivePlanes] = src->dcc_rate;
/* TODO: Needs to be set based on src->dcc_rate_luma/chroma */
- mode_lib->vba.DCCRateLuma[mode_lib->vba.NumberOfActivePlanes] = 0;
- mode_lib->vba.DCCRateChroma[mode_lib->vba.NumberOfActivePlanes] = 0;
+ mode_lib->vba.DCCRateLuma[mode_lib->vba.NumberOfActivePlanes] = src->dcc_rate;
+ mode_lib->vba.DCCRateChroma[mode_lib->vba.NumberOfActivePlanes] = src->dcc_rate_chroma;
mode_lib->vba.SourcePixelFormat[mode_lib->vba.NumberOfActivePlanes] =
(enum source_format_class) (src->source_format);
@@ -425,6 +439,8 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
dst->odm_combine;
mode_lib->vba.OutputFormat[mode_lib->vba.NumberOfActivePlanes] =
(enum output_format_class) (dout->output_format);
+ mode_lib->vba.OutputBpp[mode_lib->vba.NumberOfActivePlanes] =
+ dout->output_bpp;
mode_lib->vba.Output[mode_lib->vba.NumberOfActivePlanes] =
(enum output_encoder_class) (dout->output_type);
@@ -437,7 +453,7 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
dout->dp_lanes;
/* TODO: Needs to be set based on dout->audio.audio_sample_rate_khz/sample_layout */
mode_lib->vba.AudioSampleRate[mode_lib->vba.NumberOfActivePlanes] =
- 44.1 * 1000;
+ dout->max_audio_sample_rate;
mode_lib->vba.AudioSampleLayout[mode_lib->vba.NumberOfActivePlanes] =
1;
mode_lib->vba.DRAMClockChangeLatencyOverride = 0.0;
@@ -457,6 +473,10 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
dout->wb.wb_dst_width;
mode_lib->vba.WritebackDestinationHeight[mode_lib->vba.NumberOfActivePlanes] =
dout->wb.wb_dst_height;
+ mode_lib->vba.WritebackHRatio[mode_lib->vba.NumberOfActivePlanes] =
+ dout->wb.wb_hratio;
+ mode_lib->vba.WritebackVRatio[mode_lib->vba.NumberOfActivePlanes] =
+ dout->wb.wb_vratio;
mode_lib->vba.WritebackPixelFormat[mode_lib->vba.NumberOfActivePlanes] =
(enum source_format_class) (dout->wb.wb_pixel_format);
mode_lib->vba.WritebackHTaps[mode_lib->vba.NumberOfActivePlanes] =
@@ -568,6 +588,8 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
if (src->is_hsplit) {
for (k = j + 1; k < mode_lib->vba.cache_num_pipes; ++k) {
display_pipe_source_params_st *src_k = &pipes[k].pipe.src;
+ display_pipe_dest_params_st *dst_k = &pipes[k].pipe.dest;
+ display_output_params_st *dout_k = &pipes[j].dout;
if (src_k->is_hsplit && !visited[k]
&& src->hsplit_grp == src_k->hsplit_grp) {
@@ -575,12 +597,21 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
mode_lib->vba.NumberOfActivePlanes;
mode_lib->vba.DPPPerPlane[mode_lib->vba.NumberOfActivePlanes]++;
if (mode_lib->vba.SourceScan[mode_lib->vba.NumberOfActivePlanes]
- == dm_horz)
+ == dm_horz) {
mode_lib->vba.ViewportWidth[mode_lib->vba.NumberOfActivePlanes] +=
src_k->viewport_width;
- else
+ mode_lib->vba.ViewportWidthChroma[mode_lib->vba.NumberOfActivePlanes] +=
+ src_k->viewport_width;
+ mode_lib->vba.ScalerRecoutWidth[mode_lib->vba.NumberOfActivePlanes] +=
+ dst_k->recout_width;
+ } else {
mode_lib->vba.ViewportHeight[mode_lib->vba.NumberOfActivePlanes] +=
src_k->viewport_height;
+ mode_lib->vba.ViewportHeightChroma[mode_lib->vba.NumberOfActivePlanes] +=
+ src_k->viewport_height;
+ }
+ mode_lib->vba.NumberOfDSCSlices[mode_lib->vba.NumberOfActivePlanes] +=
+ dout_k->dsc_slices;
visited[k] = true;
}
@@ -786,7 +817,9 @@ void ModeSupportAndSystemConfiguration(struct display_mode_lib *mode_lib)
unsigned int total_pipes = 0;
mode_lib->vba.VoltageLevel = mode_lib->vba.cache_pipes[0].clks_cfg.voltage;
- mode_lib->vba.ReturnBW = mode_lib->vba.ReturnBWPerState[mode_lib->vba.VoltageLevel];
+ mode_lib->vba.ReturnBW = mode_lib->vba.ReturnBWPerState[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb];
+ if (mode_lib->vba.ReturnBW == 0)
+ mode_lib->vba.ReturnBW = mode_lib->vba.ReturnBWPerState[mode_lib->vba.VoltageLevel][0];
mode_lib->vba.FabricAndDRAMBandwidth = mode_lib->vba.FabricAndDRAMBandwidthPerState[mode_lib->vba.VoltageLevel];
fetch_socbb_params(mode_lib);
@@ -836,4 +869,3 @@ double CalculateWriteBackDISPCLK(
return CalculateWriteBackDISPCLK;
}
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
index 0347f74cda3a..2875efd85467 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
@@ -23,7 +23,6 @@
*
*/
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
#ifndef __DML2_DISPLAY_MODE_VBA_H__
#define __DML2_DISPLAY_MODE_VBA_H__
@@ -155,7 +154,10 @@ struct vba_vars_st {
double UrgentLatencySupportUsChroma;
unsigned int DSCFormatFactor;
+ bool DummyPStateCheck;
+ bool DRAMClockChangeSupportsVActive;
bool PrefetchModeSupported;
+ bool PrefetchAndImmediateFlipSupported;
enum self_refresh_affinity AllowDRAMSelfRefreshOrDRAMClockChangeInVblank; // Mode Support only
double XFCRemoteSurfaceFlipDelay;
double TInitXFill;
@@ -290,6 +292,7 @@ struct vba_vars_st {
double PixelClock[DC__NUM_DPP__MAX];
double PixelClockBackEnd[DC__NUM_DPP__MAX];
bool DCCEnable[DC__NUM_DPP__MAX];
+ bool FECEnable[DC__NUM_DPP__MAX];
unsigned int DCCMetaPitchY[DC__NUM_DPP__MAX];
unsigned int DCCMetaPitchC[DC__NUM_DPP__MAX];
enum scan_direction_class SourceScan[DC__NUM_DPP__MAX];
@@ -316,7 +319,7 @@ struct vba_vars_st {
unsigned int DynamicMetadataTransmittedBytes[DC__NUM_DPP__MAX];
double DCCRate[DC__NUM_DPP__MAX];
double AverageDCCCompressionRate;
- bool ODMCombineEnabled[DC__NUM_DPP__MAX];
+ enum odm_combine_mode ODMCombineEnabled[DC__NUM_DPP__MAX];
double OutputBpp[DC__NUM_DPP__MAX];
bool DSCEnabled[DC__NUM_DPP__MAX];
unsigned int DSCInputBitPerComponent[DC__NUM_DPP__MAX];
@@ -344,6 +347,7 @@ struct vba_vars_st {
unsigned int EffectiveLBLatencyHidingSourceLinesChroma;
double BandwidthAvailableForImmediateFlip;
unsigned int PrefetchMode[DC__VOLTAGE_STATES + 1][2];
+ unsigned int PrefetchModePerState[DC__VOLTAGE_STATES + 1][2];
unsigned int MinPrefetchMode;
unsigned int MaxPrefetchMode;
bool AnyLinesForVMOrRowTooLarge;
@@ -385,6 +389,7 @@ struct vba_vars_st {
/* vba mode support */
/*inputs*/
+ bool EmbeddedPanel[DC__NUM_DPP__MAX];
bool SupportGFX7CompatibleTilingIn32bppAnd64bpp;
double MaxHSCLRatio;
double MaxVSCLRatio;
@@ -392,9 +397,11 @@ struct vba_vars_st {
bool WritebackLumaAndChromaScalingSupported;
bool Cursor64BppSupport;
double DCFCLKPerState[DC__VOLTAGE_STATES + 1];
+ double DCFCLKState[DC__VOLTAGE_STATES + 1][2];
double FabricClockPerState[DC__VOLTAGE_STATES + 1];
double SOCCLKPerState[DC__VOLTAGE_STATES + 1];
double PHYCLKPerState[DC__VOLTAGE_STATES + 1];
+ double DTBCLKPerState[DC__VOLTAGE_STATES + 1];
double MaxDppclk[DC__VOLTAGE_STATES + 1];
double MaxDSCCLK[DC__VOLTAGE_STATES + 1];
double DRAMSpeedPerState[DC__VOLTAGE_STATES + 1];
@@ -439,7 +446,7 @@ struct vba_vars_st {
double OutputLinkDPLanes[DC__NUM_DPP__MAX];
double ForcedOutputLinkBPP[DC__NUM_DPP__MAX]; // Mode Support only
double ImmediateFlipBW[DC__NUM_DPP__MAX];
- double MaxMaxVStartup;
+ double MaxMaxVStartup[DC__VOLTAGE_STATES + 1][2];
double WritebackLumaVExtra;
double WritebackChromaVExtra;
@@ -466,7 +473,7 @@ struct vba_vars_st {
double RoundedUpMaxSwathSizeBytesC;
double EffectiveDETLBLinesLuma;
double EffectiveDETLBLinesChroma;
- double ProjectedDCFCLKDeepSleep;
+ double ProjectedDCFCLKDeepSleep[DC__VOLTAGE_STATES + 1][2];
double PDEAndMetaPTEBytesPerFrameY;
double PDEAndMetaPTEBytesPerFrameC;
unsigned int MetaRowBytesY;
@@ -484,11 +491,11 @@ struct vba_vars_st {
double FractionOfUrgentBandwidthImmediateFlip; // Mode Support debugging output
/* ms locals */
- double IdealSDPPortBandwidthPerState[DC__VOLTAGE_STATES + 1];
+ double IdealSDPPortBandwidthPerState[DC__VOLTAGE_STATES + 1][2];
unsigned int NoOfDPP[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
int NoOfDPPThisState[DC__NUM_DPP__MAX];
- bool ODMCombineEnablePerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
- unsigned int SwathWidthYThisState[DC__NUM_DPP__MAX];
+ enum odm_combine_mode ODMCombineEnablePerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
+ double SwathWidthYThisState[DC__NUM_DPP__MAX];
unsigned int SwathHeightCPerState[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
unsigned int SwathHeightYThisState[DC__NUM_DPP__MAX];
unsigned int SwathHeightCThisState[DC__NUM_DPP__MAX];
@@ -500,7 +507,7 @@ struct vba_vars_st {
double RequiredDPPCLKThisState[DC__NUM_DPP__MAX];
bool PTEBufferSizeNotExceededY[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
bool PTEBufferSizeNotExceededC[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
- bool BandwidthWithoutPrefetchSupported[DC__VOLTAGE_STATES + 1];
+ bool BandwidthWithoutPrefetchSupported[DC__VOLTAGE_STATES + 1][2];
bool PrefetchSupported[DC__VOLTAGE_STATES + 1][2];
bool VRatioInPrefetchSupported[DC__VOLTAGE_STATES + 1][2];
double RequiredDISPCLK[DC__VOLTAGE_STATES + 1][2];
@@ -509,21 +516,22 @@ struct vba_vars_st {
unsigned int TotalNumberOfActiveDPP[DC__VOLTAGE_STATES + 1][2];
unsigned int TotalNumberOfDCCActiveDPP[DC__VOLTAGE_STATES + 1][2];
bool ModeSupport[DC__VOLTAGE_STATES + 1][2];
- double ReturnBWPerState[DC__VOLTAGE_STATES + 1];
+ double ReturnBWPerState[DC__VOLTAGE_STATES + 1][2];
bool DIOSupport[DC__VOLTAGE_STATES + 1];
bool NotEnoughDSCUnits[DC__VOLTAGE_STATES + 1];
bool DSCCLKRequiredMoreThanSupported[DC__VOLTAGE_STATES + 1];
+ bool DTBCLKRequiredMoreThanSupported[DC__VOLTAGE_STATES + 1];
double UrgentRoundTripAndOutOfOrderLatencyPerState[DC__VOLTAGE_STATES + 1];
- bool ROBSupport[DC__VOLTAGE_STATES + 1];
+ bool ROBSupport[DC__VOLTAGE_STATES + 1][2];
bool PTEBufferSizeNotExceeded[DC__VOLTAGE_STATES + 1][2];
- bool TotalVerticalActiveBandwidthSupport[DC__VOLTAGE_STATES + 1];
- double MaxTotalVerticalActiveAvailableBandwidth[DC__VOLTAGE_STATES + 1];
+ bool TotalVerticalActiveBandwidthSupport[DC__VOLTAGE_STATES + 1][2];
+ double MaxTotalVerticalActiveAvailableBandwidth[DC__VOLTAGE_STATES + 1][2];
double PrefetchBW[DC__NUM_DPP__MAX];
- double PDEAndMetaPTEBytesPerFrame[DC__NUM_DPP__MAX];
- double MetaRowBytes[DC__NUM_DPP__MAX];
- double DPTEBytesPerRow[DC__NUM_DPP__MAX];
- double PrefetchLinesY[DC__NUM_DPP__MAX];
- double PrefetchLinesC[DC__NUM_DPP__MAX];
+ double PDEAndMetaPTEBytesPerFrame[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ double MetaRowBytes[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ double DPTEBytesPerRow[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ double PrefetchLinesY[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ double PrefetchLinesC[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
unsigned int MaxNumSwY[DC__NUM_DPP__MAX];
unsigned int MaxNumSwC[DC__NUM_DPP__MAX];
double PrefillY[DC__NUM_DPP__MAX];
@@ -532,7 +540,7 @@ struct vba_vars_st {
double LinesForMetaPTE[DC__NUM_DPP__MAX];
double LinesForMetaAndDPTERow[DC__NUM_DPP__MAX];
double MinDPPCLKUsingSingleDPP[DC__NUM_DPP__MAX];
- unsigned int SwathWidthYSingleDPP[DC__NUM_DPP__MAX];
+ double SwathWidthYSingleDPP[DC__NUM_DPP__MAX];
double BytePerPixelInDETY[DC__NUM_DPP__MAX];
double BytePerPixelInDETC[DC__NUM_DPP__MAX];
bool RequiresDSC[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
@@ -540,7 +548,7 @@ struct vba_vars_st {
double RequiresFEC[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
double OutputBppPerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
double DSCDelayPerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
- bool ViewportSizeSupport[DC__VOLTAGE_STATES + 1];
+ bool ViewportSizeSupport[DC__VOLTAGE_STATES + 1][2];
unsigned int Read256BlockHeightY[DC__NUM_DPP__MAX];
unsigned int Read256BlockWidthY[DC__NUM_DPP__MAX];
unsigned int Read256BlockHeightC[DC__NUM_DPP__MAX];
@@ -555,7 +563,7 @@ struct vba_vars_st {
double WriteBandwidth[DC__NUM_DPP__MAX];
double PSCL_FACTOR[DC__NUM_DPP__MAX];
double PSCL_FACTOR_CHROMA[DC__NUM_DPP__MAX];
- double MaximumVStartup[DC__NUM_DPP__MAX];
+ double MaximumVStartup[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
unsigned int MacroTileWidthY[DC__NUM_DPP__MAX];
unsigned int MacroTileWidthC[DC__NUM_DPP__MAX];
double AlignedDCCMetaPitch[DC__NUM_DPP__MAX];
@@ -572,7 +580,7 @@ struct vba_vars_st {
bool ImmediateFlipSupportedForState[DC__VOLTAGE_STATES + 1][2];
double WritebackDelay[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
unsigned int vm_group_bytes[DC__NUM_DPP__MAX];
- long dpte_group_bytes[DC__NUM_DPP__MAX];
+ unsigned int dpte_group_bytes[DC__NUM_DPP__MAX];
unsigned int dpte_row_height[DC__NUM_DPP__MAX];
unsigned int meta_req_height[DC__NUM_DPP__MAX];
unsigned int meta_req_width[DC__NUM_DPP__MAX];
@@ -598,13 +606,14 @@ struct vba_vars_st {
double UrgentBurstFactorChroma[DC__NUM_DPP__MAX];
double UrgentBurstFactorChromaPre[DC__NUM_DPP__MAX];
+
bool MPCCombine[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
double SwathWidthCSingleDPP[DC__NUM_DPP__MAX];
double MaximumSwathWidthInLineBufferLuma;
double MaximumSwathWidthInLineBufferChroma;
double MaximumSwathWidthLuma[DC__NUM_DPP__MAX];
double MaximumSwathWidthChroma[DC__NUM_DPP__MAX];
- bool odm_combine_dummy[DC__NUM_DPP__MAX];
+ enum odm_combine_mode odm_combine_dummy[DC__NUM_DPP__MAX];
double dummy1[DC__NUM_DPP__MAX];
double dummy2[DC__NUM_DPP__MAX];
double dummy3[DC__NUM_DPP__MAX];
@@ -614,9 +623,9 @@ struct vba_vars_st {
double dummy7[DC__NUM_DPP__MAX];
double dummy8[DC__NUM_DPP__MAX];
unsigned int dummyinteger1ms[DC__NUM_DPP__MAX];
- unsigned int dummyinteger2ms[DC__NUM_DPP__MAX];
+ double dummyinteger2ms[DC__NUM_DPP__MAX];
unsigned int dummyinteger3[DC__NUM_DPP__MAX];
- unsigned int dummyinteger4;
+ unsigned int dummyinteger4[DC__NUM_DPP__MAX];
unsigned int dummyinteger5;
unsigned int dummyinteger6;
unsigned int dummyinteger7;
@@ -625,6 +634,10 @@ struct vba_vars_st {
unsigned int dummyinteger10;
unsigned int dummyinteger11;
unsigned int dummyinteger12;
+ unsigned int dummyintegerarr1[DC__NUM_DPP__MAX];
+ unsigned int dummyintegerarr2[DC__NUM_DPP__MAX];
+ unsigned int dummyintegerarr3[DC__NUM_DPP__MAX];
+ unsigned int dummyintegerarr4[DC__NUM_DPP__MAX];
bool dummysinglestring;
bool SingleDPPViewportSizeSupportPerPlane[DC__NUM_DPP__MAX];
double PlaneRequiredDISPCLKWithODMCombine2To1;
@@ -632,18 +645,19 @@ struct vba_vars_st {
unsigned int TotalNumberOfSingleDPPPlanes[DC__VOLTAGE_STATES + 1][2];
bool LinkDSCEnable;
bool ODMCombine4To1SupportCheckOK[DC__VOLTAGE_STATES + 1];
- bool ODMCombineEnableThisState[DC__NUM_DPP__MAX];
- unsigned int SwathWidthCThisState[DC__NUM_DPP__MAX];
+ enum odm_combine_mode ODMCombineEnableThisState[DC__NUM_DPP__MAX];
+ double SwathWidthCThisState[DC__NUM_DPP__MAX];
bool ViewportSizeSupportPerPlane[DC__NUM_DPP__MAX];
double AlignedDCCMetaPitchY[DC__NUM_DPP__MAX];
double AlignedDCCMetaPitchC[DC__NUM_DPP__MAX];
unsigned int NotEnoughUrgentLatencyHiding;
unsigned int NotEnoughUrgentLatencyHidingPre;
- long PTEBufferSizeInRequestsForLuma;
+ int PTEBufferSizeInRequestsForLuma;
+ int PTEBufferSizeInRequestsForChroma;
// Missing from VBA
- long dpte_group_bytes_chroma;
+ int dpte_group_bytes_chroma;
unsigned int vm_group_bytes_chroma;
double dst_x_after_scaler;
double dst_y_after_scaler;
@@ -668,8 +682,8 @@ struct vba_vars_st {
double MinTTUVBlank[DC__NUM_DPP__MAX];
double BytePerPixelDETY[DC__NUM_DPP__MAX];
double BytePerPixelDETC[DC__NUM_DPP__MAX];
- unsigned int SwathWidthY[DC__NUM_DPP__MAX];
- unsigned int SwathWidthSingleDPPY[DC__NUM_DPP__MAX];
+ double SwathWidthY[DC__NUM_DPP__MAX];
+ double SwathWidthSingleDPPY[DC__NUM_DPP__MAX];
double CursorRequestDeliveryTime[DC__NUM_DPP__MAX];
double CursorRequestDeliveryTimePrefetch[DC__NUM_DPP__MAX];
double ReadBandwidthPlaneLuma[DC__NUM_DPP__MAX];
@@ -745,12 +759,12 @@ struct vba_vars_st {
double LinesInDETY[DC__NUM_DPP__MAX];
double LinesInDETYRoundedDownToSwath[DC__NUM_DPP__MAX];
- unsigned int SwathWidthSingleDPPC[DC__NUM_DPP__MAX];
- unsigned int SwathWidthC[DC__NUM_DPP__MAX];
+ double SwathWidthSingleDPPC[DC__NUM_DPP__MAX];
+ double SwathWidthC[DC__NUM_DPP__MAX];
unsigned int BytePerPixelY[DC__NUM_DPP__MAX];
unsigned int BytePerPixelC[DC__NUM_DPP__MAX];
- long dummyinteger1;
- long dummyinteger2;
+ unsigned int dummyinteger1;
+ unsigned int dummyinteger2;
double FinalDRAMClockChangeLatency;
double Tdmdl_vm[DC__NUM_DPP__MAX];
double Tdmdl[DC__NUM_DPP__MAX];
@@ -764,6 +778,7 @@ struct vba_vars_st {
unsigned int DCCCMaxCompressedBlock[DC__NUM_DPP__MAX];
unsigned int DCCCIndependent64ByteBlock[DC__NUM_DPP__MAX];
double VStartupMargin;
+ bool NotEnoughTimeForDynamicMetadata;
/* Missing from VBA */
unsigned int MaximumMaxVStartupLines;
@@ -787,6 +802,9 @@ struct vba_vars_st {
unsigned int PDEProcessingBufIn64KBReqs;
double MaxTotalVActiveRDBandwidth;
+ bool DoUrgentLatencyAdjustment;
+ double UrgentLatencyAdjustmentFabricClockComponent;
+ double UrgentLatencyAdjustmentFabricClockReference;
double MinUrgentLatencySupportUs;
double MinFullDETBufferingTime;
double AverageReadBandwidthGBytePerSecond;
@@ -796,11 +814,13 @@ struct vba_vars_st {
unsigned int ViewportHeightChroma[DC__NUM_DPP__MAX];
double HRatioChroma[DC__NUM_DPP__MAX];
double VRatioChroma[DC__NUM_DPP__MAX];
- long WritebackSourceWidth[DC__NUM_DPP__MAX];
+ int WritebackSourceWidth[DC__NUM_DPP__MAX];
bool ModeIsSupported;
bool ODMCombine4To1Supported;
+ unsigned int SurfaceWidthY[DC__NUM_DPP__MAX];
+ unsigned int SurfaceWidthC[DC__NUM_DPP__MAX];
unsigned int SurfaceHeightY[DC__NUM_DPP__MAX];
unsigned int SurfaceHeightC[DC__NUM_DPP__MAX];
unsigned int WritebackHTaps[DC__NUM_DPP__MAX];
@@ -830,6 +850,58 @@ struct vba_vars_st {
unsigned int MaxNumHDMIFRLOutputs;
int AudioSampleRate[DC__NUM_DPP__MAX];
int AudioSampleLayout[DC__NUM_DPP__MAX];
+
+ int PercentMarginOverMinimumRequiredDCFCLK;
+ bool DynamicMetadataSupported[DC__VOLTAGE_STATES + 1][2];
+ enum immediate_flip_requirement ImmediateFlipRequirement;
+ double DETBufferSizeYThisState[DC__NUM_DPP__MAX];
+ double DETBufferSizeCThisState[DC__NUM_DPP__MAX];
+ bool NoUrgentLatencyHiding[DC__NUM_DPP__MAX];
+ bool NoUrgentLatencyHidingPre[DC__NUM_DPP__MAX];
+ int swath_width_luma_ub_this_state[DC__NUM_DPP__MAX];
+ int swath_width_chroma_ub_this_state[DC__NUM_DPP__MAX];
+ double UrgLatency[DC__VOLTAGE_STATES + 1];
+ double VActiveCursorBandwidth[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ double VActivePixelBandwidth[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ bool NoTimeForPrefetch[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ bool NoTimeForDynamicMetadata[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ double dpte_row_bandwidth[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ double meta_row_bandwidth[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ double DETBufferSizeYAllStates[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ double DETBufferSizeCAllStates[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ int swath_width_luma_ub_all_states[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ int swath_width_chroma_ub_all_states[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ bool NotUrgentLatencyHiding[DC__VOLTAGE_STATES + 1][2];
+ unsigned int SwathHeightYAllStates[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ unsigned int SwathHeightCAllStates[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ unsigned int SwathWidthYAllStates[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ unsigned int SwathWidthCAllStates[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ double TotalDPTERowBandwidth[DC__VOLTAGE_STATES + 1][2];
+ double TotalMetaRowBandwidth[DC__VOLTAGE_STATES + 1][2];
+ double TotalVActiveCursorBandwidth[DC__VOLTAGE_STATES + 1][2];
+ double TotalVActivePixelBandwidth[DC__VOLTAGE_STATES + 1][2];
+ bool UseMinimumRequiredDCFCLK;
+ double WritebackDelayTime[DC__NUM_DPP__MAX];
+ unsigned int DCCYIndependentBlock[DC__NUM_DPP__MAX];
+ unsigned int DCCCIndependentBlock[DC__NUM_DPP__MAX];
+ unsigned int dummyinteger15;
+ unsigned int dummyinteger16;
+ unsigned int dummyinteger17;
+ unsigned int dummyinteger18;
+ unsigned int dummyinteger19;
+ unsigned int dummyinteger20;
+ unsigned int dummyinteger21;
+ unsigned int dummyinteger22;
+ unsigned int dummyinteger23;
+ unsigned int dummyinteger24;
+ unsigned int dummyinteger25;
+ unsigned int dummyinteger26;
+ unsigned int dummyinteger27;
+ unsigned int dummyinteger28;
+ unsigned int dummyinteger29;
+ bool dummystring[DC__NUM_DPP__MAX];
+ double BPP;
+ enum odm_combine_policy ODMCombinePolicy;
};
bool CalculateMinAndMaxPrefetchMode(
@@ -851,4 +923,3 @@ double CalculateWriteBackDISPCLK(
unsigned int WritebackChromaLineBufferWidth);
#endif /* _DML2_DISPLAY_MODE_VBA_H_ */
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
index ad8571f5a142..4c3e9cc30167 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
@@ -243,7 +243,7 @@ void dml1_extract_rq_regs(
rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height);
rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height);
- /* FIXME: take the max between luma, chroma chunk size?
+ /* TODO: take the max between luma, chroma chunk size?
* okay for now, as we are setting chunk_bytes to 8kb anyways
*/
if (rq_param.sizing.rq_l.chunk_bytes >= 32 * 1024) { /*32kb */
@@ -602,7 +602,7 @@ static void get_surf_rq_param(
unsigned int log2_dpte_group_length;
unsigned int func_meta_row_height, func_dpte_row_height;
- /* FIXME check if ppe apply for both luma and chroma in 422 case */
+ /* TODO check if ppe apply for both luma and chroma in 422 case */
if (is_chroma) {
vp_width = pipe_src_param.viewport_width_c / ppe;
vp_height = pipe_src_param.viewport_height_c;
@@ -1141,7 +1141,7 @@ void dml1_rq_dlg_get_dlg_params(
ASSERT(disp_dlg_regs->refcyc_h_blank_end < (unsigned int) dml_pow(2, 13));
disp_dlg_regs->dlg_vblank_end = interlaced ? (vblank_end / 2) : vblank_end; /* 15 bits */
- prefetch_xy_calc_in_dcfclk = 24.0; /* FIXME: ip_param */
+ prefetch_xy_calc_in_dcfclk = 24.0; /* TODO: ip_param */
min_dcfclk_mhz = dlg_sys_param.deepsleep_dcfclk_mhz;
t_calc_us = prefetch_xy_calc_in_dcfclk / min_dcfclk_mhz;
min_ttu_vblank = dlg_sys_param.t_urg_wm_us;
@@ -1182,7 +1182,7 @@ void dml1_rq_dlg_get_dlg_params(
dcc_en = e2e_pipe_param.pipe.src.dcc;
dual_plane = is_dual_plane(
(enum source_format_class) e2e_pipe_param.pipe.src.source_format);
- mode_422 = 0; /* FIXME */
+ mode_422 = 0; /* TODO */
access_dir = (e2e_pipe_param.pipe.src.source_scan == dm_vert); /* vp access direction: horizontal or vertical accessed */
bytes_per_element_l = get_bytes_per_element(
(enum source_format_class) e2e_pipe_param.pipe.src.source_format,
@@ -1837,7 +1837,7 @@ void dml1_rq_dlg_get_dlg_params(
cur0_width_ub = dml_ceil((double) cur0_src_width / (double) cur0_req_width, 1)
* (double) cur0_req_width;
cur0_req_per_width = cur0_width_ub / (double) cur0_req_width;
- hactive_cur0 = (double) cur0_src_width / hratios_cur0; /* FIXME: oswin to think about what to do for cursor */
+ hactive_cur0 = (double) cur0_src_width / hratios_cur0; /* TODO: oswin to think about what to do for cursor */
if (vratio_pre_l <= 1.0) {
refcyc_per_req_delivery_pre_cur0 = hactive_cur0 * ref_freq_to_pix_freq
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c b/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c
index b953b02a1512..723af0b2dda0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c
@@ -24,7 +24,7 @@
*/
#include "dml_common_defs.h"
-#include "../calcs/dcn_calc_math.h"
+#include "dcn_calc_math.h"
#include "dml_inline_defs.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
index eca140da13d8..ded71ea82413 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
@@ -27,7 +27,7 @@
#define __DML_INLINE_DEFS_H__
#include "dml_common_defs.h"
-#include "../calcs/dcn_calc_math.h"
+#include "dcn_calc_math.h"
#include "dml_logger.h"
static inline double dml_min(double a, double b)
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/Makefile b/drivers/gpu/drm/amd/display/dc/dsc/Makefile
index e019cd9447e8..3f66868df171 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dsc/Makefile
@@ -1,18 +1,35 @@
+# SPDX-License-Identifier: MIT
#
# Makefile for the 'dsc' sub-component of DAL.
-ifneq ($(call cc-option, -mpreferred-stack-boundary=4),)
- cc_stack_align := -mpreferred-stack-boundary=4
-else ifneq ($(call cc-option, -mstack-alignment=16),)
- cc_stack_align := -mstack-alignment=16
+ifdef CONFIG_X86
+dsc_ccflags := -mhard-float -msse
endif
-dsc_ccflags := -mhard-float -msse $(cc_stack_align)
+ifdef CONFIG_PPC64
+dsc_ccflags := -mhard-float -maltivec
+endif
+
+ifdef CONFIG_CC_IS_GCC
+ifeq ($(call cc-ifversion, -lt, 0701, y), y)
+IS_OLD_GCC = 1
+endif
+endif
+
+ifdef CONFIG_X86
+ifdef IS_OLD_GCC
+# Stack alignment mismatch, proceed with caution.
+# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
+# (8B stack alignment).
+dsc_ccflags += -mpreferred-stack-boundary=4
+else
+dsc_ccflags += -msse2
+endif
+endif
-CFLAGS_rc_calc.o := $(dsc_ccflags)
-CFLAGS_rc_calc_dpi.o := $(dsc_ccflags)
-CFLAGS_codec_main_amd.o := $(dsc_ccflags)
-CFLAGS_dc_dsc.o := $(dsc_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dsc/rc_calc.o := $(dsc_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dsc/rc_calc_dpi.o := $(dsc_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dsc/dc_dsc.o := $(dsc_ccflags)
DSC = dc_dsc.o rc_calc.o rc_calc_dpi.o
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
index ef5f84a144c3..87d682d25278 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
@@ -22,14 +22,69 @@
* Author: AMD
*/
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
-#include "dc.h"
-#include "core_types.h"
+#include "dc_hw_types.h"
#include "dsc.h"
#include <drm/drm_dp_helper.h>
+#include "dc.h"
/* This module's internal functions */
+/* default DSC policy target bitrate limit is 16bpp */
+static uint32_t dsc_policy_max_target_bpp_limit = 16;
+
+static uint32_t dc_dsc_bandwidth_in_kbps_from_timing(
+ const struct dc_crtc_timing *timing)
+{
+ uint32_t bits_per_channel = 0;
+ uint32_t kbps;
+
+ if (timing->flags.DSC) {
+ kbps = (timing->pix_clk_100hz * timing->dsc_cfg.bits_per_pixel);
+ kbps = kbps / 160 + ((kbps % 160) ? 1 : 0);
+ return kbps;
+ }
+
+ switch (timing->display_color_depth) {
+ case COLOR_DEPTH_666:
+ bits_per_channel = 6;
+ break;
+ case COLOR_DEPTH_888:
+ bits_per_channel = 8;
+ break;
+ case COLOR_DEPTH_101010:
+ bits_per_channel = 10;
+ break;
+ case COLOR_DEPTH_121212:
+ bits_per_channel = 12;
+ break;
+ case COLOR_DEPTH_141414:
+ bits_per_channel = 14;
+ break;
+ case COLOR_DEPTH_161616:
+ bits_per_channel = 16;
+ break;
+ default:
+ break;
+ }
+
+ ASSERT(bits_per_channel != 0);
+
+ kbps = timing->pix_clk_100hz / 10;
+ kbps *= bits_per_channel;
+
+ if (timing->flags.Y_ONLY != 1) {
+ /*Only YOnly make reduce bandwidth by 1/3 compares to RGB*/
+ kbps *= 3;
+ if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ kbps /= 2;
+ else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
+ kbps = kbps * 2 / 3;
+ }
+
+ return kbps;
+
+}
+
static bool dsc_buff_block_size_from_dpcd(int dpcd_buff_block_size, int *buff_block_size)
{
@@ -161,16 +216,19 @@ static bool dsc_bpp_increment_div_from_dpcd(int bpp_increment_dpcd, uint32_t *bp
}
static void get_dsc_enc_caps(
- const struct dc *dc,
+ const struct display_stream_compressor *dsc,
struct dsc_enc_caps *dsc_enc_caps,
int pixel_clock_100Hz)
{
// This is a static HW query, so we can use any DSC
- struct display_stream_compressor *dsc = dc->res_pool->dscs[0];
memset(dsc_enc_caps, 0, sizeof(struct dsc_enc_caps));
- if (dsc)
- dsc->funcs->dsc_get_enc_caps(dsc_enc_caps, pixel_clock_100Hz);
+ if (dsc) {
+ if (!dsc->ctx->dc->debug.disable_dsc)
+ dsc->funcs->dsc_get_enc_caps(dsc_enc_caps, pixel_clock_100Hz);
+ if (dsc->ctx->dc->debug.native422_support)
+ dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 1;
+ }
}
/* Returns 'false' if no intersection was found for at least one capablity.
@@ -241,14 +299,6 @@ static bool intersect_dsc_caps(
return true;
}
-struct dc_dsc_policy {
- bool use_min_slices_h;
- int max_slices_h; // Maximum available if 0
- int num_slices_v;
- int max_target_bpp;
- int min_target_bpp; // Minimum target bits per pixel
-};
-
static inline uint32_t dsc_div_by_10_round_up(uint32_t value)
{
return (value + 9) / 10;
@@ -270,19 +320,6 @@ static inline uint32_t calc_dsc_bpp_x16(uint32_t stream_bandwidth_kbps, uint32_t
return dsc_target_bpp_x16;
}
-const struct dc_dsc_policy dsc_policy = {
- .use_min_slices_h = true, // DSC Policy: Use minimum number of slices that fits the pixel clock
- .max_slices_h = 0, // DSC Policy: Use max available slices (in our case 4 for or 8, depending on the mode)
- /* DSC Policy: Number of vertical slices set to 2 for no particular reason.
- * Seems small enough to not affect the quality too much, while still providing some error
- * propagation control (which may also help debugging).
- */
- .num_slices_v = 16,
- .max_target_bpp = 16,
- .min_target_bpp = 8,
-};
-
-
/* Get DSC bandwidth range based on [min_bpp, max_bpp] target bitrate range, and timing's pixel clock
* and uncompressed bandwidth.
*/
@@ -294,7 +331,7 @@ static void get_dsc_bandwidth_range(
struct dc_dsc_bw_range *range)
{
/* native stream bandwidth */
- range->stream_kbps = dc_bandwidth_in_kbps_from_timing(timing);
+ range->stream_kbps = dc_dsc_bandwidth_in_kbps_from_timing(timing);
/* max dsc target bpp */
range->max_kbps = dsc_div_by_10_round_up(max_bpp * timing->pix_clk_100hz);
@@ -516,6 +553,7 @@ static bool setup_dsc_config(
const struct dsc_enc_caps *dsc_enc_caps,
int target_bandwidth_kbps,
const struct dc_crtc_timing *timing,
+ int min_slice_height_override,
struct dc_dsc_config *dsc_cfg)
{
struct dsc_enc_caps dsc_common_caps;
@@ -528,11 +566,13 @@ static bool setup_dsc_config(
int sink_per_slice_throughput_mps;
int branch_max_throughput_mps = 0;
bool is_dsc_possible = false;
- int num_slices_v;
int pic_height;
+ int slice_height;
+ struct dc_dsc_policy policy;
memset(dsc_cfg, 0, sizeof(struct dc_dsc_config));
+ dc_dsc_get_policy_for_timing(timing, &policy);
pic_width = timing->h_addressable + timing->h_border_left + timing->h_border_right;
pic_height = timing->v_addressable + timing->v_border_top + timing->v_border_bottom;
@@ -548,7 +588,12 @@ static bool setup_dsc_config(
goto done;
if (target_bandwidth_kbps > 0) {
- is_dsc_possible = decide_dsc_target_bpp_x16(&dsc_policy, &dsc_common_caps, target_bandwidth_kbps, timing, &target_bpp);
+ is_dsc_possible = decide_dsc_target_bpp_x16(
+ &policy,
+ &dsc_common_caps,
+ target_bandwidth_kbps,
+ timing,
+ &target_bpp);
dsc_cfg->bits_per_pixel = target_bpp;
}
if (!is_dsc_possible)
@@ -615,7 +660,7 @@ static bool setup_dsc_config(
if (!is_dsc_possible)
goto done;
- // DSC slicing
+ // Slice width (i.e. number of slices per line)
max_slices_h = get_max_dsc_slices(dsc_common_caps.slice_caps);
while (max_slices_h > 0) {
@@ -650,20 +695,20 @@ static bool setup_dsc_config(
if (!is_dsc_possible)
goto done;
- if (dsc_policy.use_min_slices_h) {
+ if (policy.use_min_slices_h) {
if (min_slices_h > 0)
num_slices_h = min_slices_h;
else if (max_slices_h > 0) { // Fall back to max slices if min slices is not working out
- if (dsc_policy.max_slices_h)
- num_slices_h = min(dsc_policy.max_slices_h, max_slices_h);
+ if (policy.max_slices_h)
+ num_slices_h = min(policy.max_slices_h, max_slices_h);
else
num_slices_h = max_slices_h;
} else
is_dsc_possible = false;
} else {
if (max_slices_h > 0) {
- if (dsc_policy.max_slices_h)
- num_slices_h = min(dsc_policy.max_slices_h, max_slices_h);
+ if (policy.max_slices_h)
+ num_slices_h = min(policy.max_slices_h, max_slices_h);
else
num_slices_h = max_slices_h;
} else if (min_slices_h > 0) // Fall back to min slices if max slices is not possible
@@ -678,29 +723,29 @@ static bool setup_dsc_config(
dsc_cfg->num_slices_h = num_slices_h;
slice_width = pic_width / num_slices_h;
- // Vertical number of slices: start from policy and pick the first one that height is divisible by.
+ is_dsc_possible = slice_width <= dsc_common_caps.max_slice_width;
+ if (!is_dsc_possible)
+ goto done;
+
+ // Slice height (i.e. number of slices per column): start with policy and pick the first one that height is divisible by.
// For 4:2:0 make sure the slice height is divisible by 2 as well.
- num_slices_v = dsc_policy.num_slices_v;
- if (num_slices_v < 1)
- num_slices_v = 1;
-
- while (num_slices_v >= 1) {
- if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) {
- int slice_height = pic_height / num_slices_v;
- if (pic_height % num_slices_v == 0 && slice_height % 2 == 0)
- break;
- } else if (pic_height % num_slices_v == 0)
- break;
+ if (min_slice_height_override == 0)
+ slice_height = min(policy.min_slice_height, pic_height);
+ else
+ slice_height = min(min_slice_height_override, pic_height);
- num_slices_v--;
- }
+ while (slice_height < pic_height && (pic_height % slice_height != 0 ||
+ (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420 && slice_height % 2 != 0)))
+ slice_height++;
- dsc_cfg->num_slices_v = num_slices_v;
+ if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) // For the case when pic_height < dsc_policy.min_sice_height
+ is_dsc_possible = (slice_height % 2 == 0);
- is_dsc_possible = slice_width <= dsc_common_caps.max_slice_width;
if (!is_dsc_possible)
goto done;
+ dsc_cfg->num_slices_v = pic_height/slice_height;
+
// Final decission: can we do DSC or not?
if (is_dsc_possible) {
// Fill out the rest of DSC settings
@@ -716,7 +761,7 @@ done:
return is_dsc_possible;
}
-bool dc_dsc_parse_dsc_dpcd(const uint8_t *dpcd_dsc_basic_data, const uint8_t *dpcd_dsc_ext_data, struct dsc_dec_dpcd_caps *dsc_sink_caps)
+bool dc_dsc_parse_dsc_dpcd(const struct dc *dc, const uint8_t *dpcd_dsc_basic_data, const uint8_t *dpcd_dsc_ext_data, struct dsc_dec_dpcd_caps *dsc_sink_caps)
{
if (!dpcd_dsc_basic_data)
return false;
@@ -769,6 +814,23 @@ bool dc_dsc_parse_dsc_dpcd(const uint8_t *dpcd_dsc_basic_data, const uint8_t *dp
if (!dsc_bpp_increment_div_from_dpcd(dpcd_dsc_basic_data[DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT], &dsc_sink_caps->bpp_increment_div))
return false;
+ if (dc->debug.dsc_bpp_increment_div) {
+ /* dsc_bpp_increment_div should onl be 1, 2, 4, 8 or 16, but rather than rejecting invalid values,
+ * we'll accept all and get it into range. This also makes the above check against 0 redundant,
+ * but that one stresses out the override will be only used if it's not 0.
+ */
+ if (dc->debug.dsc_bpp_increment_div >= 1)
+ dsc_sink_caps->bpp_increment_div = 1;
+ if (dc->debug.dsc_bpp_increment_div >= 2)
+ dsc_sink_caps->bpp_increment_div = 2;
+ if (dc->debug.dsc_bpp_increment_div >= 4)
+ dsc_sink_caps->bpp_increment_div = 4;
+ if (dc->debug.dsc_bpp_increment_div >= 8)
+ dsc_sink_caps->bpp_increment_div = 8;
+ if (dc->debug.dsc_bpp_increment_div >= 16)
+ dsc_sink_caps->bpp_increment_div = 16;
+ }
+
/* Extended caps */
if (dpcd_dsc_ext_data == NULL) { // Extended DPCD DSC data can be null, e.g. because it doesn't apply to SST
dsc_sink_caps->branch_overall_throughput_0_mps = 0;
@@ -809,7 +871,8 @@ bool dc_dsc_parse_dsc_dpcd(const uint8_t *dpcd_dsc_basic_data, const uint8_t *dp
* If DSC is not possible, leave '*range' untouched.
*/
bool dc_dsc_compute_bandwidth_range(
- const struct dc *dc,
+ const struct display_stream_compressor *dsc,
+ const uint32_t dsc_min_slice_height_override,
const uint32_t min_bpp,
const uint32_t max_bpp,
const struct dsc_dec_dpcd_caps *dsc_sink_caps,
@@ -821,16 +884,14 @@ bool dc_dsc_compute_bandwidth_range(
struct dsc_enc_caps dsc_common_caps;
struct dc_dsc_config config;
- get_dsc_enc_caps(dc, &dsc_enc_caps, timing->pix_clk_100hz);
+ get_dsc_enc_caps(dsc, &dsc_enc_caps, timing->pix_clk_100hz);
is_dsc_possible = intersect_dsc_caps(dsc_sink_caps, &dsc_enc_caps,
timing->pixel_encoding, &dsc_common_caps);
if (is_dsc_possible)
- is_dsc_possible = setup_dsc_config(dsc_sink_caps,
- &dsc_enc_caps,
- 0,
- timing, &config);
+ is_dsc_possible = setup_dsc_config(dsc_sink_caps, &dsc_enc_caps, 0, timing,
+ dsc_min_slice_height_override, &config);
if (is_dsc_possible)
get_dsc_bandwidth_range(min_bpp, max_bpp, &dsc_common_caps, timing, range);
@@ -839,8 +900,9 @@ bool dc_dsc_compute_bandwidth_range(
}
bool dc_dsc_compute_config(
- const struct dc *dc,
+ const struct display_stream_compressor *dsc,
const struct dsc_dec_dpcd_caps *dsc_sink_caps,
+ const uint32_t dsc_min_slice_height_override,
uint32_t target_bandwidth_kbps,
const struct dc_crtc_timing *timing,
struct dc_dsc_config *dsc_cfg)
@@ -848,11 +910,74 @@ bool dc_dsc_compute_config(
bool is_dsc_possible = false;
struct dsc_enc_caps dsc_enc_caps;
- get_dsc_enc_caps(dc, &dsc_enc_caps, timing->pix_clk_100hz);
+ get_dsc_enc_caps(dsc, &dsc_enc_caps, timing->pix_clk_100hz);
is_dsc_possible = setup_dsc_config(dsc_sink_caps,
&dsc_enc_caps,
target_bandwidth_kbps,
- timing, dsc_cfg);
+ timing, dsc_min_slice_height_override, dsc_cfg);
return is_dsc_possible;
}
-#endif /* CONFIG_DRM_AMD_DC_DSC_SUPPORT */
+
+void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing, struct dc_dsc_policy *policy)
+{
+ uint32_t bpc = 0;
+
+ policy->min_target_bpp = 0;
+ policy->max_target_bpp = 0;
+
+ /* DSC Policy: Use minimum number of slices that fits the pixel clock */
+ policy->use_min_slices_h = true;
+
+ /* DSC Policy: Use max available slices
+ * (in our case 4 for or 8, depending on the mode)
+ */
+ policy->max_slices_h = 0;
+
+ /* DSC Policy: Use slice height recommended
+ * by VESA DSC Spreadsheet user guide
+ */
+ policy->min_slice_height = 108;
+
+ /* DSC Policy: follow DP specs with an internal upper limit to 16 bpp
+ * for better interoperability
+ */
+ switch (timing->display_color_depth) {
+ case COLOR_DEPTH_888:
+ bpc = 8;
+ break;
+ case COLOR_DEPTH_101010:
+ bpc = 10;
+ break;
+ case COLOR_DEPTH_121212:
+ bpc = 12;
+ break;
+ default:
+ return;
+ }
+ switch (timing->pixel_encoding) {
+ case PIXEL_ENCODING_RGB:
+ case PIXEL_ENCODING_YCBCR444:
+ case PIXEL_ENCODING_YCBCR422: /* assume no YCbCr422 native support */
+ /* DP specs limits to 8 */
+ policy->min_target_bpp = 8;
+ /* DP specs limits to 3 x bpc */
+ policy->max_target_bpp = 3 * bpc;
+ break;
+ case PIXEL_ENCODING_YCBCR420:
+ /* DP specs limits to 6 */
+ policy->min_target_bpp = 6;
+ /* DP specs limits to 1.5 x bpc assume bpc is an even number */
+ policy->max_target_bpp = bpc * 3 / 2;
+ break;
+ default:
+ return;
+ }
+ /* internal upper limit, default 16 bpp */
+ if (policy->max_target_bpp > dsc_policy_max_target_bpp_limit)
+ policy->max_target_bpp = dsc_policy_max_target_bpp_limit;
+}
+
+void dc_dsc_policy_set_max_target_bpp_limit(uint32_t limit)
+{
+ dsc_policy_max_target_bpp_limit = limit;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/drm_dsc_dc.c b/drivers/gpu/drm/amd/display/dc/dsc/drm_dsc_dc.c
deleted file mode 100644
index 340ef4d41ebd..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dsc/drm_dsc_dc.c
+++ /dev/null
@@ -1,388 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018 Intel Corp
- *
- * Author:
- * Manasi Navare <manasi.d.navare@intel.com>
- */
-
-/* DC versions of linux includes */
-#include <include/drm_dsc_dc.h>
-
-#define EXPORT_SYMBOL(symbol) /* nothing */
-#define BUILD_BUG_ON(cond) /* nothing */
-#define DIV_ROUND_UP(a, b) (((b) + (a) - 1) / (b))
-#define ERANGE -1
-#define DRM_DEBUG_KMS(msg) /* nothing */
-#define cpu_to_be16(__x) little_to_big(__x)
-
-static unsigned short little_to_big(int data)
-{
- /* Swap lower and upper byte. DMCU uses big endian format. */
- return (0xff & (data >> 8)) + ((data & 0xff) << 8);
-}
-
-/*
- * Everything below this comment was copied directly from drm_dsc.c.
- * Only the functions needed in DC are included.
- * Please keep this file synced with upstream.
- */
-
-/**
- * DOC: dsc helpers
- *
- * These functions contain some common logic and helpers to deal with VESA
- * Display Stream Compression standard required for DSC on Display Port/eDP or
- * MIPI display interfaces.
- */
-
-/**
- * drm_dsc_pps_payload_pack() - Populates the DSC PPS
- *
- * @pps_payload:
- * Bitwise struct for DSC Picture Parameter Set. This is defined
- * by &struct drm_dsc_picture_parameter_set
- * @dsc_cfg:
- * DSC Configuration data filled by driver as defined by
- * &struct drm_dsc_config
- *
- * DSC source device sends a picture parameter set (PPS) containing the
- * information required by the sink to decode the compressed frame. Driver
- * populates the DSC PPS struct using the DSC configuration parameters in
- * the order expected by the DSC Display Sink device. For the DSC, the sink
- * device expects the PPS payload in big endian format for fields
- * that span more than 1 byte.
- */
-void drm_dsc_pps_payload_pack(struct drm_dsc_picture_parameter_set *pps_payload,
- const struct drm_dsc_config *dsc_cfg)
-{
- int i;
-
- /* Protect against someone accidently changing struct size */
- BUILD_BUG_ON(sizeof(*pps_payload) !=
- DP_SDP_PPS_HEADER_PAYLOAD_BYTES_MINUS_1 + 1);
-
- memset(pps_payload, 0, sizeof(*pps_payload));
-
- /* PPS 0 */
- pps_payload->dsc_version =
- dsc_cfg->dsc_version_minor |
- dsc_cfg->dsc_version_major << DSC_PPS_VERSION_MAJOR_SHIFT;
-
- /* PPS 1, 2 is 0 */
-
- /* PPS 3 */
- pps_payload->pps_3 =
- dsc_cfg->line_buf_depth |
- dsc_cfg->bits_per_component << DSC_PPS_BPC_SHIFT;
-
- /* PPS 4 */
- pps_payload->pps_4 =
- ((dsc_cfg->bits_per_pixel & DSC_PPS_BPP_HIGH_MASK) >>
- DSC_PPS_MSB_SHIFT) |
- dsc_cfg->vbr_enable << DSC_PPS_VBR_EN_SHIFT |
- dsc_cfg->simple_422 << DSC_PPS_SIMPLE422_SHIFT |
- dsc_cfg->convert_rgb << DSC_PPS_CONVERT_RGB_SHIFT |
- dsc_cfg->block_pred_enable << DSC_PPS_BLOCK_PRED_EN_SHIFT;
-
- /* PPS 5 */
- pps_payload->bits_per_pixel_low =
- (dsc_cfg->bits_per_pixel & DSC_PPS_LSB_MASK);
-
- /*
- * The DSC panel expects the PPS packet to have big endian format
- * for data spanning 2 bytes. Use a macro cpu_to_be16() to convert
- * to big endian format. If format is little endian, it will swap
- * bytes to convert to Big endian else keep it unchanged.
- */
-
- /* PPS 6, 7 */
- pps_payload->pic_height = cpu_to_be16(dsc_cfg->pic_height);
-
- /* PPS 8, 9 */
- pps_payload->pic_width = cpu_to_be16(dsc_cfg->pic_width);
-
- /* PPS 10, 11 */
- pps_payload->slice_height = cpu_to_be16(dsc_cfg->slice_height);
-
- /* PPS 12, 13 */
- pps_payload->slice_width = cpu_to_be16(dsc_cfg->slice_width);
-
- /* PPS 14, 15 */
- pps_payload->chunk_size = cpu_to_be16(dsc_cfg->slice_chunk_size);
-
- /* PPS 16 */
- pps_payload->initial_xmit_delay_high =
- ((dsc_cfg->initial_xmit_delay &
- DSC_PPS_INIT_XMIT_DELAY_HIGH_MASK) >>
- DSC_PPS_MSB_SHIFT);
-
- /* PPS 17 */
- pps_payload->initial_xmit_delay_low =
- (dsc_cfg->initial_xmit_delay & DSC_PPS_LSB_MASK);
-
- /* PPS 18, 19 */
- pps_payload->initial_dec_delay =
- cpu_to_be16(dsc_cfg->initial_dec_delay);
-
- /* PPS 20 is 0 */
-
- /* PPS 21 */
- pps_payload->initial_scale_value =
- dsc_cfg->initial_scale_value;
-
- /* PPS 22, 23 */
- pps_payload->scale_increment_interval =
- cpu_to_be16(dsc_cfg->scale_increment_interval);
-
- /* PPS 24 */
- pps_payload->scale_decrement_interval_high =
- ((dsc_cfg->scale_decrement_interval &
- DSC_PPS_SCALE_DEC_INT_HIGH_MASK) >>
- DSC_PPS_MSB_SHIFT);
-
- /* PPS 25 */
- pps_payload->scale_decrement_interval_low =
- (dsc_cfg->scale_decrement_interval & DSC_PPS_LSB_MASK);
-
- /* PPS 26[7:0], PPS 27[7:5] RESERVED */
-
- /* PPS 27 */
- pps_payload->first_line_bpg_offset =
- dsc_cfg->first_line_bpg_offset;
-
- /* PPS 28, 29 */
- pps_payload->nfl_bpg_offset =
- cpu_to_be16(dsc_cfg->nfl_bpg_offset);
-
- /* PPS 30, 31 */
- pps_payload->slice_bpg_offset =
- cpu_to_be16(dsc_cfg->slice_bpg_offset);
-
- /* PPS 32, 33 */
- pps_payload->initial_offset =
- cpu_to_be16(dsc_cfg->initial_offset);
-
- /* PPS 34, 35 */
- pps_payload->final_offset = cpu_to_be16(dsc_cfg->final_offset);
-
- /* PPS 36 */
- pps_payload->flatness_min_qp = dsc_cfg->flatness_min_qp;
-
- /* PPS 37 */
- pps_payload->flatness_max_qp = dsc_cfg->flatness_max_qp;
-
- /* PPS 38, 39 */
- pps_payload->rc_model_size =
- cpu_to_be16(DSC_RC_MODEL_SIZE_CONST);
-
- /* PPS 40 */
- pps_payload->rc_edge_factor = DSC_RC_EDGE_FACTOR_CONST;
-
- /* PPS 41 */
- pps_payload->rc_quant_incr_limit0 =
- dsc_cfg->rc_quant_incr_limit0;
-
- /* PPS 42 */
- pps_payload->rc_quant_incr_limit1 =
- dsc_cfg->rc_quant_incr_limit1;
-
- /* PPS 43 */
- pps_payload->rc_tgt_offset = DSC_RC_TGT_OFFSET_LO_CONST |
- DSC_RC_TGT_OFFSET_HI_CONST << DSC_PPS_RC_TGT_OFFSET_HI_SHIFT;
-
- /* PPS 44 - 57 */
- for (i = 0; i < DSC_NUM_BUF_RANGES - 1; i++)
- pps_payload->rc_buf_thresh[i] =
- dsc_cfg->rc_buf_thresh[i];
-
- /* PPS 58 - 87 */
- /*
- * For DSC sink programming the RC Range parameter fields
- * are as follows: Min_qp[15:11], max_qp[10:6], offset[5:0]
- */
- for (i = 0; i < DSC_NUM_BUF_RANGES; i++) {
- pps_payload->rc_range_parameters[i] =
- ((dsc_cfg->rc_range_params[i].range_min_qp <<
- DSC_PPS_RC_RANGE_MINQP_SHIFT) |
- (dsc_cfg->rc_range_params[i].range_max_qp <<
- DSC_PPS_RC_RANGE_MAXQP_SHIFT) |
- (dsc_cfg->rc_range_params[i].range_bpg_offset));
- pps_payload->rc_range_parameters[i] =
- cpu_to_be16(pps_payload->rc_range_parameters[i]);
- }
-
- /* PPS 88 */
- pps_payload->native_422_420 = dsc_cfg->native_422 |
- dsc_cfg->native_420 << DSC_PPS_NATIVE_420_SHIFT;
-
- /* PPS 89 */
- pps_payload->second_line_bpg_offset =
- dsc_cfg->second_line_bpg_offset;
-
- /* PPS 90, 91 */
- pps_payload->nsl_bpg_offset =
- cpu_to_be16(dsc_cfg->nsl_bpg_offset);
-
- /* PPS 92, 93 */
- pps_payload->second_line_offset_adj =
- cpu_to_be16(dsc_cfg->second_line_offset_adj);
-
- /* PPS 94 - 127 are O */
-}
-EXPORT_SYMBOL(drm_dsc_pps_payload_pack);
-
-/**
- * drm_dsc_compute_rc_parameters() - Write rate control
- * parameters to the dsc configuration defined in
- * &struct drm_dsc_config in accordance with the DSC 1.2
- * specification. Some configuration fields must be present
- * beforehand.
- *
- * @vdsc_cfg:
- * DSC Configuration data partially filled by driver
- */
-int drm_dsc_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg)
-{
- unsigned long groups_per_line = 0;
- unsigned long groups_total = 0;
- unsigned long num_extra_mux_bits = 0;
- unsigned long slice_bits = 0;
- unsigned long hrd_delay = 0;
- unsigned long final_scale = 0;
- unsigned long rbs_min = 0;
-
- if (vdsc_cfg->native_420 || vdsc_cfg->native_422) {
- /* Number of groups used to code each line of a slice */
- groups_per_line = DIV_ROUND_UP(vdsc_cfg->slice_width / 2,
- DSC_RC_PIXELS_PER_GROUP);
-
- /* chunksize in Bytes */
- vdsc_cfg->slice_chunk_size = DIV_ROUND_UP(vdsc_cfg->slice_width / 2 *
- vdsc_cfg->bits_per_pixel,
- (8 * 16));
- } else {
- /* Number of groups used to code each line of a slice */
- groups_per_line = DIV_ROUND_UP(vdsc_cfg->slice_width,
- DSC_RC_PIXELS_PER_GROUP);
-
- /* chunksize in Bytes */
- vdsc_cfg->slice_chunk_size = DIV_ROUND_UP(vdsc_cfg->slice_width *
- vdsc_cfg->bits_per_pixel,
- (8 * 16));
- }
-
- if (vdsc_cfg->convert_rgb)
- num_extra_mux_bits = 3 * (vdsc_cfg->mux_word_size +
- (4 * vdsc_cfg->bits_per_component + 4)
- - 2);
- else if (vdsc_cfg->native_422)
- num_extra_mux_bits = 4 * vdsc_cfg->mux_word_size +
- (4 * vdsc_cfg->bits_per_component + 4) +
- 3 * (4 * vdsc_cfg->bits_per_component) - 2;
- else
- num_extra_mux_bits = 3 * vdsc_cfg->mux_word_size +
- (4 * vdsc_cfg->bits_per_component + 4) +
- 2 * (4 * vdsc_cfg->bits_per_component) - 2;
- /* Number of bits in one Slice */
- slice_bits = 8 * vdsc_cfg->slice_chunk_size * vdsc_cfg->slice_height;
-
- while ((num_extra_mux_bits > 0) &&
- ((slice_bits - num_extra_mux_bits) % vdsc_cfg->mux_word_size))
- num_extra_mux_bits--;
-
- if (groups_per_line < vdsc_cfg->initial_scale_value - 8)
- vdsc_cfg->initial_scale_value = groups_per_line + 8;
-
- /* scale_decrement_interval calculation according to DSC spec 1.11 */
- if (vdsc_cfg->initial_scale_value > 8)
- vdsc_cfg->scale_decrement_interval = groups_per_line /
- (vdsc_cfg->initial_scale_value - 8);
- else
- vdsc_cfg->scale_decrement_interval = DSC_SCALE_DECREMENT_INTERVAL_MAX;
-
- vdsc_cfg->final_offset = vdsc_cfg->rc_model_size -
- (vdsc_cfg->initial_xmit_delay *
- vdsc_cfg->bits_per_pixel + 8) / 16 + num_extra_mux_bits;
-
- if (vdsc_cfg->final_offset >= vdsc_cfg->rc_model_size) {
- DRM_DEBUG_KMS("FinalOfs < RcModelSze for this InitialXmitDelay\n");
- return -ERANGE;
- }
-
- final_scale = (vdsc_cfg->rc_model_size * 8) /
- (vdsc_cfg->rc_model_size - vdsc_cfg->final_offset);
- if (vdsc_cfg->slice_height > 1)
- /*
- * NflBpgOffset is 16 bit value with 11 fractional bits
- * hence we multiply by 2^11 for preserving the
- * fractional part
- */
- vdsc_cfg->nfl_bpg_offset = DIV_ROUND_UP((vdsc_cfg->first_line_bpg_offset << 11),
- (vdsc_cfg->slice_height - 1));
- else
- vdsc_cfg->nfl_bpg_offset = 0;
-
- /* 2^16 - 1 */
- if (vdsc_cfg->nfl_bpg_offset > 65535) {
- DRM_DEBUG_KMS("NflBpgOffset is too large for this slice height\n");
- return -ERANGE;
- }
-
- /* Number of groups used to code the entire slice */
- groups_total = groups_per_line * vdsc_cfg->slice_height;
-
- /* slice_bpg_offset is 16 bit value with 11 fractional bits */
- vdsc_cfg->slice_bpg_offset = DIV_ROUND_UP(((vdsc_cfg->rc_model_size -
- vdsc_cfg->initial_offset +
- num_extra_mux_bits) << 11),
- groups_total);
-
- if (final_scale > 9) {
- /*
- * ScaleIncrementInterval =
- * finaloffset/((NflBpgOffset + SliceBpgOffset)*8(finalscale - 1.125))
- * as (NflBpgOffset + SliceBpgOffset) has 11 bit fractional value,
- * we need divide by 2^11 from pstDscCfg values
- */
- vdsc_cfg->scale_increment_interval =
- (vdsc_cfg->final_offset * (1 << 11)) /
- ((vdsc_cfg->nfl_bpg_offset +
- vdsc_cfg->slice_bpg_offset) *
- (final_scale - 9));
- } else {
- /*
- * If finalScaleValue is less than or equal to 9, a value of 0 should
- * be used to disable the scale increment at the end of the slice
- */
- vdsc_cfg->scale_increment_interval = 0;
- }
-
- if (vdsc_cfg->scale_increment_interval > 65535) {
- DRM_DEBUG_KMS("ScaleIncrementInterval is large for slice height\n");
- return -ERANGE;
- }
-
- /*
- * DSC spec mentions that bits_per_pixel specifies the target
- * bits/pixel (bpp) rate that is used by the encoder,
- * in steps of 1/16 of a bit per pixel
- */
- rbs_min = vdsc_cfg->rc_model_size - vdsc_cfg->initial_offset +
- DIV_ROUND_UP(vdsc_cfg->initial_xmit_delay *
- vdsc_cfg->bits_per_pixel, 16) +
- groups_per_line * vdsc_cfg->first_line_bpg_offset;
-
- hrd_delay = DIV_ROUND_UP((rbs_min * 16), vdsc_cfg->bits_per_pixel);
- vdsc_cfg->rc_bits = (hrd_delay * vdsc_cfg->bits_per_pixel) / 16;
- vdsc_cfg->initial_dec_delay = hrd_delay - vdsc_cfg->initial_xmit_delay;
-
- /* As per DSC spec v1.2a recommendation: */
- if (vdsc_cfg->native_420)
- vdsc_cfg->second_line_offset_adj = 512;
- else
- vdsc_cfg->second_line_offset_adj = 0;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_dsc_compute_rc_parameters);
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dscc_types.h b/drivers/gpu/drm/amd/display/dc/dsc/dscc_types.h
index 020ad8f685ea..9f70e87b3ecb 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dscc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dscc_types.h
@@ -1,4 +1,3 @@
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
/*
* Copyright 2017 Advanced Micro Devices, Inc.
@@ -51,4 +50,3 @@ int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps, struct dsc_par
#endif
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/qp_tables.h b/drivers/gpu/drm/amd/display/dc/dsc/qp_tables.h
index f66d006eac5d..e5fac9f4181d 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/qp_tables.h
+++ b/drivers/gpu/drm/amd/display/dc/dsc/qp_tables.h
@@ -1,4 +1,3 @@
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
/*
* Copyright 2017 Advanced Micro Devices, Inc.
@@ -703,4 +702,3 @@ const qp_table qp_table_422_8bpc_max = {
{ 16, { 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 4} }
};
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c
index ca51e83f8764..03ae15946c6d 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c
@@ -1,4 +1,3 @@
-#if defined(CONFIG_DRM_AMD_DC_DSC_SUPPORT)
/*
* Copyright 2017 Advanced Micro Devices, Inc.
@@ -177,7 +176,6 @@ void calc_rc_params(struct rc_params *rc, enum colour_mode cm, enum bits_per_com
{
float bpp_group;
float initial_xmit_delay_factor;
- int source_bpp;
int padding_pixels;
int i;
@@ -217,8 +215,6 @@ void calc_rc_params(struct rc_params *rc, enum colour_mode cm, enum bits_per_com
rc->initial_xmit_delay++;
}
- source_bpp = MODE_SELECT(bpc * 3, bpc * 2, bpc * 1.5);
-
rc->flatness_min_qp = ((bpc == BPC_8) ? (3) : ((bpc == BPC_10) ? (7) : (11))) - ((minor_version == 1 && cm == CM_444) ? 1 : 0);
rc->flatness_max_qp = ((bpc == BPC_8) ? (12) : ((bpc == BPC_10) ? (16) : (20))) - ((minor_version == 1 && cm == CM_444) ? 1 : 0);
rc->flatness_det_thresh = 2 << (bpc - 8);
@@ -255,4 +251,3 @@ void calc_rc_params(struct rc_params *rc, enum colour_mode cm, enum bits_per_com
rc->rc_buf_thresh[13] = 8064;
}
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h
index f1d6e793bc61..b6b1f09c2009 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h
+++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h
@@ -1,4 +1,3 @@
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
/*
* Copyright 2017 Advanced Micro Devices, Inc.
@@ -82,4 +81,3 @@ void calc_rc_params(struct rc_params *rc, enum colour_mode cm, enum bits_per_com
#endif
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c
index 73172fd0b529..1f6e63b71456 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c
@@ -1,4 +1,3 @@
-#if defined(CONFIG_DRM_AMD_DC_DSC_SUPPORT)
/*
* Copyright 2012-17 Advanced Micro Devices, Inc.
*
@@ -144,4 +143,3 @@ int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps, struct dsc_par
return ret;
}
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/Makefile b/drivers/gpu/drm/amd/display/dc/gpio/Makefile
index c3d92878875d..202baa210cc8 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/gpio/Makefile
@@ -24,7 +24,7 @@
# It provides the control and status of HW GPIO pins.
GPIO = gpio_base.o gpio_service.o hw_factory.o \
- hw_gpio.o hw_hpd.o hw_ddc.o hw_translate.o
+ hw_gpio.o hw_hpd.o hw_ddc.o hw_generic.o hw_translate.o
AMD_DAL_GPIO = $(addprefix $(AMDDALPATH)/dc/gpio/,$(GPIO))
@@ -61,26 +61,32 @@ AMD_DISPLAY_FILES += $(AMD_DAL_GPIO_DCE120)
###############################################################################
# DCN 1x
###############################################################################
-ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ifdef CONFIG_DRM_AMD_DC_DCN
GPIO_DCN10 = hw_translate_dcn10.o hw_factory_dcn10.o
AMD_DAL_GPIO_DCN10 = $(addprefix $(AMDDALPATH)/dc/gpio/dcn10/,$(GPIO_DCN10))
AMD_DISPLAY_FILES += $(AMD_DAL_GPIO_DCN10)
-endif
###############################################################################
# DCN 2
###############################################################################
-ifdef CONFIG_DRM_AMD_DC_DCN2_0
GPIO_DCN20 = hw_translate_dcn20.o hw_factory_dcn20.o
AMD_DAL_GPIO_DCN20 = $(addprefix $(AMDDALPATH)/dc/gpio/dcn20/,$(GPIO_DCN20))
AMD_DISPLAY_FILES += $(AMD_DAL_GPIO_DCN20)
-endif
###############################################################################
+# DCN 21
+###############################################################################
+GPIO_DCN21 = hw_translate_dcn21.o hw_factory_dcn21.o
+
+AMD_DAL_GPIO_DCN21 = $(addprefix $(AMDDALPATH)/dc/gpio/dcn21/,$(GPIO_DCN21))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_GPIO_DCN21)
+endif
+###############################################################################
# Diagnostics on FPGA
###############################################################################
GPIO_DIAG_FPGA = hw_translate_diag.o hw_factory_diag.o
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_factory_dce110.c b/drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_factory_dce110.c
index 20d81bca119c..66e4841f41e4 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_factory_dce110.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_factory_dce110.c
@@ -24,9 +24,15 @@
*/
#include "dm_services.h"
+
#include "include/gpio_types.h"
#include "../hw_factory.h"
+#include "../hw_gpio.h"
+#include "../hw_ddc.h"
+#include "../hw_hpd.h"
+#include "../hw_generic.h"
+
#include "hw_factory_dce110.h"
#include "dce/dce_11_0_d.h"
@@ -143,12 +149,12 @@ static void define_hpd_registers(struct hw_gpio_pin *pin, uint32_t en)
}
static const struct hw_factory_funcs funcs = {
- .create_ddc_data = dal_hw_ddc_create,
- .create_ddc_clock = dal_hw_ddc_create,
- .create_generic = NULL,
- .create_hpd = dal_hw_hpd_create,
- .create_sync = NULL,
- .create_gsl = NULL,
+ .init_ddc_data = dal_hw_ddc_init,
+ .init_generic = NULL,
+ .init_hpd = dal_hw_hpd_init,
+ .get_ddc_pin = dal_hw_ddc_get_pin,
+ .get_hpd_pin = dal_hw_hpd_get_pin,
+ .get_generic_pin = NULL,
.define_hpd_registers = define_hpd_registers,
.define_ddc_registers = define_ddc_registers
};
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.c b/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.c
index ea3f888e5c65..cf98aa827a9a 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.c
@@ -27,10 +27,10 @@
#include "include/gpio_types.h"
#include "../hw_factory.h"
-
#include "../hw_gpio.h"
#include "../hw_ddc.h"
#include "../hw_hpd.h"
+#include "../hw_generic.h"
#include "hw_factory_dce120.h"
@@ -164,12 +164,12 @@ static void define_hpd_registers(struct hw_gpio_pin *pin, uint32_t en)
/* fucntion table */
static const struct hw_factory_funcs funcs = {
- .create_ddc_data = dal_hw_ddc_create,
- .create_ddc_clock = dal_hw_ddc_create,
- .create_generic = NULL,
- .create_hpd = dal_hw_hpd_create,
- .create_sync = NULL,
- .create_gsl = NULL,
+ .init_ddc_data = dal_hw_ddc_init,
+ .init_generic = NULL,
+ .init_hpd = dal_hw_hpd_init,
+ .get_ddc_pin = dal_hw_ddc_get_pin,
+ .get_hpd_pin = dal_hw_hpd_get_pin,
+ .get_generic_pin = NULL,
.define_hpd_registers = define_hpd_registers,
.define_ddc_registers = define_ddc_registers
};
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_factory_dce80.c b/drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_factory_dce80.c
index 48b67866377e..496d3ffb74bb 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_factory_dce80.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_factory_dce80.c
@@ -32,10 +32,12 @@
#include "../hw_gpio.h"
#include "../hw_ddc.h"
#include "../hw_hpd.h"
+#include "../hw_generic.h"
#include "dce/dce_8_0_d.h"
#include "dce/dce_8_0_sh_mask.h"
+
#define REG(reg_name)\
mm ## reg_name
@@ -147,12 +149,12 @@ static void define_hpd_registers(struct hw_gpio_pin *pin, uint32_t en)
}
static const struct hw_factory_funcs funcs = {
- .create_ddc_data = dal_hw_ddc_create,
- .create_ddc_clock = dal_hw_ddc_create,
- .create_generic = NULL,
- .create_hpd = dal_hw_hpd_create,
- .create_sync = NULL,
- .create_gsl = NULL,
+ .init_ddc_data = dal_hw_ddc_init,
+ .init_generic = NULL,
+ .init_hpd = dal_hw_hpd_init,
+ .get_ddc_pin = dal_hw_ddc_get_pin,
+ .get_hpd_pin = dal_hw_hpd_get_pin,
+ .get_generic_pin = NULL,
.define_hpd_registers = define_hpd_registers,
.define_ddc_registers = define_ddc_registers
};
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.c
index 32aa47a04a0d..b38c96c9fed3 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.c
@@ -31,6 +31,7 @@
#include "../hw_gpio.h"
#include "../hw_ddc.h"
#include "../hw_hpd.h"
+#include "../hw_generic.h"
#include "hw_factory_dcn10.h"
@@ -121,6 +122,42 @@ static const struct ddc_sh_mask ddc_mask = {
DDC_MASK_SH_LIST(_MASK)
};
+#include "../generic_regs.h"
+
+/* set field name */
+#define SF_GENERIC(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+#define generic_regs(id) \
+{\
+ GENERIC_REG_LIST(id)\
+}
+
+static const struct generic_registers generic_regs[] = {
+ generic_regs(A),
+ generic_regs(B),
+};
+
+static const struct generic_sh_mask generic_shift[] = {
+ GENERIC_MASK_SH_LIST(__SHIFT, A),
+ GENERIC_MASK_SH_LIST(__SHIFT, B),
+};
+
+static const struct generic_sh_mask generic_mask[] = {
+ GENERIC_MASK_SH_LIST(_MASK, A),
+ GENERIC_MASK_SH_LIST(_MASK, B),
+};
+
+static void define_generic_registers(struct hw_gpio_pin *pin, uint32_t en)
+{
+ struct hw_generic *generic = HW_GENERIC_FROM_BASE(pin);
+
+ generic->regs = &generic_regs[en];
+ generic->shifts = &generic_shift[en];
+ generic->masks = &generic_mask[en];
+ generic->base.regs = &generic_regs[en].gpio;
+}
+
static void define_ddc_registers(
struct hw_gpio_pin *pin,
uint32_t en)
@@ -159,14 +196,15 @@ static void define_hpd_registers(struct hw_gpio_pin *pin, uint32_t en)
/* fucntion table */
static const struct hw_factory_funcs funcs = {
- .create_ddc_data = dal_hw_ddc_create,
- .create_ddc_clock = dal_hw_ddc_create,
- .create_generic = NULL,
- .create_hpd = dal_hw_hpd_create,
- .create_sync = NULL,
- .create_gsl = NULL,
+ .init_ddc_data = dal_hw_ddc_init,
+ .init_generic = dal_hw_generic_init,
+ .init_hpd = dal_hw_hpd_init,
+ .get_ddc_pin = dal_hw_ddc_get_pin,
+ .get_hpd_pin = dal_hw_hpd_get_pin,
+ .get_generic_pin = dal_hw_generic_get_pin,
.define_hpd_registers = define_hpd_registers,
- .define_ddc_registers = define_ddc_registers
+ .define_ddc_registers = define_ddc_registers,
+ .define_generic_registers = define_generic_registers
};
/*
* dal_hw_factory_dcn10_init
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c
index abd76d855375..83f798cb8b21 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c
@@ -22,7 +22,6 @@
* Authors: AMD
*
*/
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#include "dm_services.h"
#include "include/gpio_types.h"
#include "../hw_factory.h"
@@ -31,6 +30,7 @@
#include "../hw_gpio.h"
#include "../hw_ddc.h"
#include "../hw_hpd.h"
+#include "../hw_generic.h"
#include "hw_factory_dcn20.h"
@@ -109,6 +109,12 @@ static const struct ddc_registers ddc_data_regs_dcn[] = {
ddc_data_regs_dcn2(4),
ddc_data_regs_dcn2(5),
ddc_data_regs_dcn2(6),
+ {
+ DDC_GPIO_VGA_REG_LIST(DATA),
+ .ddc_setup = 0,
+ .phy_aux_cntl = 0,
+ .dc_gpio_aux_ctrl_5 = 0
+ }
};
static const struct ddc_registers ddc_clk_regs_dcn[] = {
@@ -118,6 +124,12 @@ static const struct ddc_registers ddc_clk_regs_dcn[] = {
ddc_clk_regs_dcn2(4),
ddc_clk_regs_dcn2(5),
ddc_clk_regs_dcn2(6),
+ {
+ DDC_GPIO_VGA_REG_LIST(CLK),
+ .ddc_setup = 0,
+ .phy_aux_cntl = 0,
+ .dc_gpio_aux_ctrl_5 = 0
+ }
};
static const struct ddc_sh_mask ddc_shift[] = {
@@ -138,6 +150,32 @@ static const struct ddc_sh_mask ddc_mask[] = {
DDC_MASK_SH_LIST_DCN2(_MASK, 6)
};
+#include "../generic_regs.h"
+
+/* set field name */
+#define SF_GENERIC(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+#define generic_regs(id) \
+{\
+ GENERIC_REG_LIST(id)\
+}
+
+static const struct generic_registers generic_regs[] = {
+ generic_regs(A),
+ generic_regs(B),
+};
+
+static const struct generic_sh_mask generic_shift[] = {
+ GENERIC_MASK_SH_LIST(__SHIFT, A),
+ GENERIC_MASK_SH_LIST(__SHIFT, B),
+};
+
+static const struct generic_sh_mask generic_mask[] = {
+ GENERIC_MASK_SH_LIST(_MASK, A),
+ GENERIC_MASK_SH_LIST(_MASK, B),
+};
+
static void define_ddc_registers(
struct hw_gpio_pin *pin,
uint32_t en)
@@ -173,17 +211,27 @@ static void define_hpd_registers(struct hw_gpio_pin *pin, uint32_t en)
hpd->base.regs = &hpd_regs[en].gpio;
}
+static void define_generic_registers(struct hw_gpio_pin *pin, uint32_t en)
+{
+ struct hw_generic *generic = HW_GENERIC_FROM_BASE(pin);
+
+ generic->regs = &generic_regs[en];
+ generic->shifts = &generic_shift[en];
+ generic->masks = &generic_mask[en];
+ generic->base.regs = &generic_regs[en].gpio;
+}
/* fucntion table */
static const struct hw_factory_funcs funcs = {
- .create_ddc_data = dal_hw_ddc_create,
- .create_ddc_clock = dal_hw_ddc_create,
- .create_generic = NULL,
- .create_hpd = dal_hw_hpd_create,
- .create_sync = NULL,
- .create_gsl = NULL,
+ .init_ddc_data = dal_hw_ddc_init,
+ .init_generic = dal_hw_generic_init,
+ .init_hpd = dal_hw_hpd_init,
+ .get_ddc_pin = dal_hw_ddc_get_pin,
+ .get_hpd_pin = dal_hw_hpd_get_pin,
+ .get_generic_pin = dal_hw_generic_get_pin,
.define_hpd_registers = define_hpd_registers,
- .define_ddc_registers = define_ddc_registers
+ .define_ddc_registers = define_ddc_registers,
+ .define_generic_registers = define_generic_registers,
};
/*
* dal_hw_factory_dcn10_init
@@ -209,4 +257,3 @@ void dal_hw_factory_dcn20_init(struct hw_factory *factory)
factory->funcs = &funcs;
}
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.h b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.h
index 43a4ce7aa3bf..0fd9b315bd7a 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.h
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.h
@@ -22,7 +22,6 @@
* Authors: AMD
*
*/
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#ifndef __DAL_HW_FACTORY_DCN20_H__
#define __DAL_HW_FACTORY_DCN20_H__
@@ -30,4 +29,3 @@
void dal_hw_factory_dcn20_init(struct hw_factory *factory);
#endif /* __DAL_HW_FACTORY_DCN20_H__ */
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.c
index b393cc13298a..52ba62b3b5e4 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.c
@@ -26,7 +26,6 @@
/*
* Pre-requisites: headers required by header of this unit
*/
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#include "hw_translate_dcn20.h"
#include "dm_services.h"
@@ -71,7 +70,7 @@ static bool offset_to_id(
{
switch (offset) {
/* GENERIC */
- case REG(DC_GENERICA):
+ case REG(DC_GPIO_GENERIC_A):
*id = GPIO_ID_GENERIC;
switch (mask) {
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK:
@@ -379,4 +378,3 @@ void dal_hw_translate_dcn20_init(struct hw_translate *tr)
tr->funcs = &funcs;
}
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.h b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.h
index 01f52c7bed86..5f7a35530e26 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.h
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.h
@@ -22,7 +22,6 @@
* Authors: AMD
*
*/
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#ifndef __DAL_HW_TRANSLATE_DCN20_H__
#define __DAL_HW_TRANSLATE_DCN20_H__
@@ -32,4 +31,3 @@ struct hw_translate;
void dal_hw_translate_dcn20_init(struct hw_translate *tr);
#endif /* __DAL_HW_TRANSLATE_DCN20_H__ */
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.c
new file mode 100644
index 000000000000..907c5911eb9e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.c
@@ -0,0 +1,240 @@
+/*
+ * Copyright 2013-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "dm_services.h"
+#include "include/gpio_types.h"
+#include "../hw_factory.h"
+
+
+#include "../hw_gpio.h"
+#include "../hw_ddc.h"
+#include "../hw_hpd.h"
+#include "../hw_generic.h"
+
+#include "hw_factory_dcn21.h"
+
+#include "dcn/dcn_2_1_0_offset.h"
+#include "dcn/dcn_2_1_0_sh_mask.h"
+#include "renoir_ip_offset.h"
+
+#include "reg_helper.h"
+#include "../hpd_regs.h"
+/* begin *********************
+ * macros to expend register list macro defined in HW object header file */
+
+/* DCN */
+#define block HPD
+#define reg_num 0
+
+#undef BASE_INNER
+#define BASE_INNER(seg) DMU_BASE__INST0_SEG ## seg
+
+#define BASE(seg) BASE_INNER(seg)
+
+
+
+#define REG(reg_name)\
+ BASE(mm ## reg_name ## _BASE_IDX) + mm ## reg_name
+
+#define SF_HPD(reg_name, field_name, post_fix)\
+ .field_name = HPD0_ ## reg_name ## __ ## field_name ## post_fix
+
+#define REGI(reg_name, block, id)\
+ BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+
+#define SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+/* macros to expend register list macro defined in HW object header file
+ * end *********************/
+
+
+
+#define hpd_regs(id) \
+{\
+ HPD_REG_LIST(id)\
+}
+
+static const struct hpd_registers hpd_regs[] = {
+ hpd_regs(0),
+ hpd_regs(1),
+ hpd_regs(2),
+ hpd_regs(3),
+ hpd_regs(4),
+};
+
+static const struct hpd_sh_mask hpd_shift = {
+ HPD_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct hpd_sh_mask hpd_mask = {
+ HPD_MASK_SH_LIST(_MASK)
+};
+
+#include "../ddc_regs.h"
+
+ /* set field name */
+#define SF_DDC(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+static const struct ddc_registers ddc_data_regs_dcn[] = {
+ ddc_data_regs_dcn2(1),
+ ddc_data_regs_dcn2(2),
+ ddc_data_regs_dcn2(3),
+ ddc_data_regs_dcn2(4),
+ ddc_data_regs_dcn2(5),
+};
+
+static const struct ddc_registers ddc_clk_regs_dcn[] = {
+ ddc_clk_regs_dcn2(1),
+ ddc_clk_regs_dcn2(2),
+ ddc_clk_regs_dcn2(3),
+ ddc_clk_regs_dcn2(4),
+ ddc_clk_regs_dcn2(5),
+};
+
+static const struct ddc_sh_mask ddc_shift[] = {
+ DDC_MASK_SH_LIST_DCN2(__SHIFT, 1),
+ DDC_MASK_SH_LIST_DCN2(__SHIFT, 2),
+ DDC_MASK_SH_LIST_DCN2(__SHIFT, 3),
+ DDC_MASK_SH_LIST_DCN2(__SHIFT, 4),
+ DDC_MASK_SH_LIST_DCN2(__SHIFT, 5),
+ DDC_MASK_SH_LIST_DCN2(__SHIFT, 6)
+};
+
+static const struct ddc_sh_mask ddc_mask[] = {
+ DDC_MASK_SH_LIST_DCN2(_MASK, 1),
+ DDC_MASK_SH_LIST_DCN2(_MASK, 2),
+ DDC_MASK_SH_LIST_DCN2(_MASK, 3),
+ DDC_MASK_SH_LIST_DCN2(_MASK, 4),
+ DDC_MASK_SH_LIST_DCN2(_MASK, 5),
+ DDC_MASK_SH_LIST_DCN2(_MASK, 6)
+};
+
+#include "../generic_regs.h"
+
+/* set field name */
+#define SF_GENERIC(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+#define generic_regs(id) \
+{\
+ GENERIC_REG_LIST(id)\
+}
+
+static const struct generic_registers generic_regs[] = {
+ generic_regs(A),
+};
+
+static const struct generic_sh_mask generic_shift[] = {
+ GENERIC_MASK_SH_LIST(__SHIFT, A),
+};
+
+static const struct generic_sh_mask generic_mask[] = {
+ GENERIC_MASK_SH_LIST(_MASK, A),
+};
+
+static void define_generic_registers(struct hw_gpio_pin *pin, uint32_t en)
+{
+ struct hw_generic *generic = HW_GENERIC_FROM_BASE(pin);
+
+ generic->regs = &generic_regs[en];
+ generic->shifts = &generic_shift[en];
+ generic->masks = &generic_mask[en];
+ generic->base.regs = &generic_regs[en].gpio;
+}
+
+static void define_ddc_registers(
+ struct hw_gpio_pin *pin,
+ uint32_t en)
+{
+ struct hw_ddc *ddc = HW_DDC_FROM_BASE(pin);
+
+ switch (pin->id) {
+ case GPIO_ID_DDC_DATA:
+ ddc->regs = &ddc_data_regs_dcn[en];
+ ddc->base.regs = &ddc_data_regs_dcn[en].gpio;
+ break;
+ case GPIO_ID_DDC_CLOCK:
+ ddc->regs = &ddc_clk_regs_dcn[en];
+ ddc->base.regs = &ddc_clk_regs_dcn[en].gpio;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ return;
+ }
+
+ ddc->shifts = &ddc_shift[en];
+ ddc->masks = &ddc_mask[en];
+
+}
+
+static void define_hpd_registers(struct hw_gpio_pin *pin, uint32_t en)
+{
+ struct hw_hpd *hpd = HW_HPD_FROM_BASE(pin);
+
+ hpd->regs = &hpd_regs[en];
+ hpd->shifts = &hpd_shift;
+ hpd->masks = &hpd_mask;
+ hpd->base.regs = &hpd_regs[en].gpio;
+}
+
+
+/* fucntion table */
+static const struct hw_factory_funcs funcs = {
+ .init_ddc_data = dal_hw_ddc_init,
+ .init_generic = dal_hw_generic_init,
+ .init_hpd = dal_hw_hpd_init,
+ .get_ddc_pin = dal_hw_ddc_get_pin,
+ .get_hpd_pin = dal_hw_hpd_get_pin,
+ .get_generic_pin = dal_hw_generic_get_pin,
+ .define_hpd_registers = define_hpd_registers,
+ .define_ddc_registers = define_ddc_registers,
+ .define_generic_registers = define_generic_registers
+};
+/*
+ * dal_hw_factory_dcn10_init
+ *
+ * @brief
+ * Initialize HW factory function pointers and pin info
+ *
+ * @param
+ * struct hw_factory *factory - [out] struct of function pointers
+ */
+void dal_hw_factory_dcn21_init(struct hw_factory *factory)
+{
+ /*TODO check ASIC CAPs*/
+ factory->number_of_pins[GPIO_ID_DDC_DATA] = 8;
+ factory->number_of_pins[GPIO_ID_DDC_CLOCK] = 8;
+ factory->number_of_pins[GPIO_ID_GENERIC] = 4;
+ factory->number_of_pins[GPIO_ID_HPD] = 6;
+ factory->number_of_pins[GPIO_ID_GPIO_PAD] = 28;
+ factory->number_of_pins[GPIO_ID_VIP_PAD] = 0;
+ factory->number_of_pins[GPIO_ID_SYNC] = 0;
+ factory->number_of_pins[GPIO_ID_GSL] = 0;/*add this*/
+
+ factory->funcs = &funcs;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.h b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.h
new file mode 100644
index 000000000000..4949e0c7fa06
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef __DAL_HW_FACTORY_DCN21_H__
+#define __DAL_HW_FACTORY_DCN21_H__
+
+/* Initialize HW factory function pointers and pin info */
+void dal_hw_factory_dcn21_init(struct hw_factory *factory);
+
+#endif /* __DAL_HW_FACTORY_DCN20_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c
new file mode 100644
index 000000000000..291966efe63d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c
@@ -0,0 +1,383 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+#include "hw_translate_dcn21.h"
+
+#include "dm_services.h"
+#include "include/gpio_types.h"
+#include "../hw_translate.h"
+
+#include "dcn/dcn_2_1_0_offset.h"
+#include "dcn/dcn_2_1_0_sh_mask.h"
+#include "renoir_ip_offset.h"
+
+
+
+
+/* begin *********************
+ * macros to expend register list macro defined in HW object header file */
+
+/* DCN */
+#define block HPD
+#define reg_num 0
+
+#undef BASE_INNER
+#define BASE_INNER(seg) DMU_BASE__INST0_SEG ## seg
+
+#define BASE(seg) BASE_INNER(seg)
+
+#undef REG
+#define REG(reg_name)\
+ BASE(mm ## reg_name ## _BASE_IDX) + mm ## reg_name
+#define SF_HPD(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+/* macros to expend register list macro defined in HW object header file
+ * end *********************/
+
+
+static bool offset_to_id(
+ uint32_t offset,
+ uint32_t mask,
+ enum gpio_id *id,
+ uint32_t *en)
+{
+ switch (offset) {
+ /* GENERIC */
+ case REG(DC_GPIO_GENERIC_A):
+ *id = GPIO_ID_GENERIC;
+ switch (mask) {
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK:
+ *en = GPIO_GENERIC_A;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK:
+ *en = GPIO_GENERIC_B;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK:
+ *en = GPIO_GENERIC_C;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK:
+ *en = GPIO_GENERIC_D;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK:
+ *en = GPIO_GENERIC_E;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK:
+ *en = GPIO_GENERIC_F;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK:
+ *en = GPIO_GENERIC_G;
+ return true;
+ default:
+ ASSERT_CRITICAL(false);
+#ifdef PALLADIUM_SUPPORTED
+ *en = GPIO_DDC_LINE_DDC1;
+ return true;
+#endif
+ return false;
+ }
+ break;
+ /* HPD */
+ case REG(DC_GPIO_HPD_A):
+ *id = GPIO_ID_HPD;
+ switch (mask) {
+ case DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK:
+ *en = GPIO_HPD_1;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK:
+ *en = GPIO_HPD_2;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK:
+ *en = GPIO_HPD_3;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK:
+ *en = GPIO_HPD_4;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK:
+ *en = GPIO_HPD_5;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK:
+ *en = GPIO_HPD_6;
+ return true;
+ default:
+ ASSERT_CRITICAL(false);
+ return false;
+ }
+ break;
+ /* REG(DC_GPIO_GENLK_MASK */
+ case REG(DC_GPIO_GENLK_A):
+ *id = GPIO_ID_GSL;
+ switch (mask) {
+ case DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A_MASK:
+ *en = GPIO_GSL_GENLOCK_CLOCK;
+ return true;
+ case DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A_MASK:
+ *en = GPIO_GSL_GENLOCK_VSYNC;
+ return true;
+ case DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A_MASK:
+ *en = GPIO_GSL_SWAPLOCK_A;
+ return true;
+ case DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A_MASK:
+ *en = GPIO_GSL_SWAPLOCK_B;
+ return true;
+ default:
+ ASSERT_CRITICAL(false);
+ return false;
+ }
+ break;
+ /* DDC */
+ /* we don't care about the GPIO_ID for DDC
+ * in DdcHandle it will use GPIO_ID_DDC_DATA/GPIO_ID_DDC_CLOCK
+ * directly in the create method */
+ case REG(DC_GPIO_DDC1_A):
+ *en = GPIO_DDC_LINE_DDC1;
+ return true;
+ case REG(DC_GPIO_DDC2_A):
+ *en = GPIO_DDC_LINE_DDC2;
+ return true;
+ case REG(DC_GPIO_DDC3_A):
+ *en = GPIO_DDC_LINE_DDC3;
+ return true;
+ case REG(DC_GPIO_DDC4_A):
+ *en = GPIO_DDC_LINE_DDC4;
+ return true;
+ case REG(DC_GPIO_DDC5_A):
+ *en = GPIO_DDC_LINE_DDC5;
+ return true;
+ case REG(DC_GPIO_DDCVGA_A):
+ *en = GPIO_DDC_LINE_DDC_VGA;
+ return true;
+
+// case REG(DC_GPIO_I2CPAD_A): not exit
+// case REG(DC_GPIO_PWRSEQ_A):
+// case REG(DC_GPIO_PAD_STRENGTH_1):
+// case REG(DC_GPIO_PAD_STRENGTH_2):
+// case REG(DC_GPIO_DEBUG):
+ /* UNEXPECTED */
+ default:
+// case REG(DC_GPIO_SYNCA_A): not exist
+#ifdef PALLADIUM_SUPPORTED
+ *id = GPIO_ID_HPD;
+ *en = GPIO_DDC_LINE_DDC1;
+ return true;
+#endif
+ ASSERT_CRITICAL(false);
+ return false;
+ }
+}
+
+static bool id_to_offset(
+ enum gpio_id id,
+ uint32_t en,
+ struct gpio_pin_info *info)
+{
+ bool result = true;
+
+ switch (id) {
+ case GPIO_ID_DDC_DATA:
+ info->mask = DC_GPIO_DDC5_A__DC_GPIO_DDC5DATA_A_MASK;
+ switch (en) {
+ case GPIO_DDC_LINE_DDC1:
+ info->offset = REG(DC_GPIO_DDC1_A);
+ break;
+ case GPIO_DDC_LINE_DDC2:
+ info->offset = REG(DC_GPIO_DDC2_A);
+ break;
+ case GPIO_DDC_LINE_DDC3:
+ info->offset = REG(DC_GPIO_DDC3_A);
+ break;
+ case GPIO_DDC_LINE_DDC4:
+ info->offset = REG(DC_GPIO_DDC4_A);
+ break;
+ case GPIO_DDC_LINE_DDC5:
+ info->offset = REG(DC_GPIO_DDC5_A);
+ break;
+ case GPIO_DDC_LINE_DDC_VGA:
+ info->offset = REG(DC_GPIO_DDCVGA_A);
+ break;
+ case GPIO_DDC_LINE_I2C_PAD:
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_DDC_CLOCK:
+ info->mask = DC_GPIO_DDC5_A__DC_GPIO_DDC5CLK_A_MASK;
+ switch (en) {
+ case GPIO_DDC_LINE_DDC1:
+ info->offset = REG(DC_GPIO_DDC1_A);
+ break;
+ case GPIO_DDC_LINE_DDC2:
+ info->offset = REG(DC_GPIO_DDC2_A);
+ break;
+ case GPIO_DDC_LINE_DDC3:
+ info->offset = REG(DC_GPIO_DDC3_A);
+ break;
+ case GPIO_DDC_LINE_DDC4:
+ info->offset = REG(DC_GPIO_DDC4_A);
+ break;
+ case GPIO_DDC_LINE_DDC5:
+ info->offset = REG(DC_GPIO_DDC5_A);
+ break;
+ case GPIO_DDC_LINE_DDC_VGA:
+ info->offset = REG(DC_GPIO_DDCVGA_A);
+ break;
+ case GPIO_DDC_LINE_I2C_PAD:
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_GENERIC:
+ info->offset = REG(DC_GPIO_GENERIC_A);
+ switch (en) {
+ case GPIO_GENERIC_A:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK;
+ break;
+ case GPIO_GENERIC_B:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK;
+ break;
+ case GPIO_GENERIC_C:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK;
+ break;
+ case GPIO_GENERIC_D:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK;
+ break;
+ case GPIO_GENERIC_E:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK;
+ break;
+ case GPIO_GENERIC_F:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK;
+ break;
+ case GPIO_GENERIC_G:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_HPD:
+ info->offset = REG(DC_GPIO_HPD_A);
+ switch (en) {
+ case GPIO_HPD_1:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK;
+ break;
+ case GPIO_HPD_2:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK;
+ break;
+ case GPIO_HPD_3:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK;
+ break;
+ case GPIO_HPD_4:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK;
+ break;
+ case GPIO_HPD_5:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK;
+ break;
+ case GPIO_HPD_6:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+#ifdef PALLADIUM_SUPPORTED
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK;
+ result = true;
+#endif
+ result = false;
+ }
+ break;
+ case GPIO_ID_GSL:
+ switch (en) {
+ case GPIO_GSL_GENLOCK_CLOCK:
+ /*not implmented*/
+ ASSERT_CRITICAL(false);
+ result = false;
+ break;
+ case GPIO_GSL_GENLOCK_VSYNC:
+ /*not implmented*/
+ ASSERT_CRITICAL(false);
+ result = false;
+ break;
+ case GPIO_GSL_SWAPLOCK_A:
+ /*not implmented*/
+ ASSERT_CRITICAL(false);
+ result = false;
+ break;
+ case GPIO_GSL_SWAPLOCK_B:
+ /*not implmented*/
+ ASSERT_CRITICAL(false);
+ result = false;
+
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_SYNC:
+ case GPIO_ID_VIP_PAD:
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+
+ if (result) {
+ info->offset_y = info->offset + 2;
+ info->offset_en = info->offset + 1;
+ info->offset_mask = info->offset - 1;
+
+ info->mask_y = info->mask;
+ info->mask_en = info->mask;
+ info->mask_mask = info->mask;
+ }
+
+ return result;
+}
+
+/* function table */
+static const struct hw_translate_funcs funcs = {
+ .offset_to_id = offset_to_id,
+ .id_to_offset = id_to_offset,
+};
+
+/*
+ * dal_hw_translate_dcn10_init
+ *
+ * @brief
+ * Initialize Hw translate function pointers.
+ *
+ * @param
+ * struct hw_translate *tr - [out] struct of function pointers
+ *
+ */
+void dal_hw_translate_dcn21_init(struct hw_translate *tr)
+{
+ tr->funcs = &funcs;
+}
+
diff --git a/drivers/gpu/drm/i915/intel_guc_ads.h b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.h
index 7f40f9cd5fb9..9462b0a65200 100644
--- a/drivers/gpu/drm/i915/intel_guc_ads.h
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.h
@@ -1,5 +1,5 @@
/*
- * Copyright © 2014-2017 Intel Corporation
+ * Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -8,27 +8,26 @@
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
*
*/
+#ifndef __DAL_HW_TRANSLATE_DCN21_H__
+#define __DAL_HW_TRANSLATE_DCN21_H__
-#ifndef _INTEL_GUC_ADS_H_
-#define _INTEL_GUC_ADS_H_
-
-struct intel_guc;
+struct hw_translate;
-int intel_guc_ads_create(struct intel_guc *guc);
-void intel_guc_ads_destroy(struct intel_guc *guc);
-void intel_guc_ads_reset(struct intel_guc *guc);
+/* Initialize Hw translate function pointers */
+void dal_hw_translate_dcn21_init(struct hw_translate *tr);
-#endif
+#endif /* __DAL_HW_TRANSLATE_DCN21_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h b/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h
index f91e85b04956..308a543178a5 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h
+++ b/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h
@@ -48,13 +48,11 @@
DDC_GPIO_REG_LIST(cd,id),\
.ddc_setup = REG(DC_I2C_DDC ## id ## _SETUP)
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#define DDC_REG_LIST_DCN2(cd, id) \
DDC_GPIO_REG_LIST(cd, id),\
.ddc_setup = REG(DC_I2C_DDC ## id ## _SETUP),\
.phy_aux_cntl = REG(PHY_AUX_CNTL), \
.dc_gpio_aux_ctrl_5 = REG(DC_GPIO_AUX_CTRL_5)
-#endif
#define DDC_GPIO_VGA_REG_LIST_ENTRY(type,cd)\
.type ## _reg = REG(DC_GPIO_DDCVGA_ ## type),\
@@ -90,13 +88,11 @@
DDC_GPIO_I2C_REG_LIST(cd),\
.ddc_setup = 0
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#define DDC_I2C_REG_LIST_DCN2(cd) \
DDC_GPIO_I2C_REG_LIST(cd),\
.ddc_setup = 0,\
.phy_aux_cntl = REG(PHY_AUX_CNTL), \
.dc_gpio_aux_ctrl_5 = REG(DC_GPIO_AUX_CTRL_5)
-#endif
#define DDC_MASK_SH_LIST_COMMON(mask_sh) \
SF_DDC(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE, mask_sh),\
SF_DDC(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_EDID_DETECT_ENABLE, mask_sh),\
@@ -110,22 +106,18 @@
SF_DDC(DC_GPIO_I2CPAD_MASK, DC_GPIO_SDA_PD_DIS, mask_sh),\
SF_DDC(DC_GPIO_I2CPAD_MASK, DC_GPIO_SCL_PD_DIS, mask_sh)
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#define DDC_MASK_SH_LIST_DCN2(mask_sh, cd) \
{DDC_MASK_SH_LIST_COMMON(mask_sh),\
0,\
0,\
(PHY_AUX_CNTL__AUX## cd ##_PAD_RXSEL## mask_sh),\
(DC_GPIO_AUX_CTRL_5__DDC_PAD## cd ##_I2CMODE## mask_sh)}
-#endif
struct ddc_registers {
struct gpio_registers gpio;
uint32_t ddc_setup;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
uint32_t phy_aux_cntl;
uint32_t dc_gpio_aux_ctrl_5;
-#endif
};
struct ddc_sh_mask {
@@ -140,11 +132,9 @@ struct ddc_sh_mask {
/* i2cpad_mask */
uint32_t DC_GPIO_SDA_PD_DIS;
uint32_t DC_GPIO_SCL_PD_DIS;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
//phy_aux_cntl
uint32_t AUX_PAD_RXSEL;
uint32_t DDC_PAD_I2CMODE;
-#endif
};
@@ -180,7 +170,6 @@ struct ddc_sh_mask {
{\
DDC_I2C_REG_LIST(SCL)\
}
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#define ddc_data_regs_dcn2(id) \
{\
DDC_REG_LIST_DCN2(DATA, id)\
@@ -200,7 +189,6 @@ struct ddc_sh_mask {
{\
DDC_REG_LIST_DCN2(SCL)\
}
-#endif
#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_GPIO_DDC_REGS_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.c b/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.c
index 26695b963c58..df68430aeb0c 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.c
@@ -38,15 +38,13 @@
#include "../hw_gpio.h"
#include "../hw_ddc.h"
#include "../hw_hpd.h"
+#include "../hw_generic.h"
/* function table */
static const struct hw_factory_funcs funcs = {
- .create_ddc_data = NULL,
- .create_ddc_clock = NULL,
- .create_generic = NULL,
- .create_hpd = NULL,
- .create_sync = NULL,
- .create_gsl = NULL,
+ .init_ddc_data = NULL,
+ .init_generic = NULL,
+ .init_hpd = NULL,
};
void dal_hw_factory_diag_fpga_init(struct hw_factory *factory)
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/generic_regs.h b/drivers/gpu/drm/amd/display/dc/gpio/generic_regs.h
new file mode 100644
index 000000000000..8c05295c05c2
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/generic_regs.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DRIVERS_GPU_DRM_AMD_DC_DEV_DC_GPIO_GENERIC_REGS_H_
+#define DRIVERS_GPU_DRM_AMD_DC_DEV_DC_GPIO_GENERIC_REGS_H_
+
+#include "gpio_regs.h"
+
+#define GENERIC_GPIO_REG_LIST_ENTRY(type, cd, id) \
+ .type ## _reg = REG(DC_GPIO_GENERIC_## type),\
+ .type ## _mask = DC_GPIO_GENERIC_ ## type ## __DC_GPIO_GENERIC ## id ## _ ## type ## _MASK,\
+ .type ## _shift = DC_GPIO_GENERIC_ ## type ## __DC_GPIO_GENERIC ## id ## _ ## type ## __SHIFT
+
+#define GENERIC_GPIO_REG_LIST(id) \
+ {\
+ GENERIC_GPIO_REG_LIST_ENTRY(MASK, cd, id),\
+ GENERIC_GPIO_REG_LIST_ENTRY(A, cd, id),\
+ GENERIC_GPIO_REG_LIST_ENTRY(EN, cd, id),\
+ GENERIC_GPIO_REG_LIST_ENTRY(Y, cd, id)\
+ }
+
+#define GENERIC_REG_LIST(id) \
+ GENERIC_GPIO_REG_LIST(id), \
+ .mux = REG(DC_GENERIC ## id),\
+
+#define GENERIC_MASK_SH_LIST(mask_sh, cd) \
+ {(DC_GENERIC ## cd ##__GENERIC ## cd ##_EN## mask_sh),\
+ (DC_GENERIC ## cd ##__GENERIC ## cd ##_SEL## mask_sh)}
+
+struct generic_registers {
+ struct gpio_registers gpio;
+ uint32_t mux;
+};
+
+struct generic_sh_mask {
+ /* enable */
+ uint32_t GENERIC_EN;
+ /* select */
+ uint32_t GENERIC_SEL;
+
+};
+
+
+#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_GPIO_GENERIC_REGS_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c
index d03b38e80d9b..f67c18375bfd 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c
@@ -67,10 +67,14 @@ enum gpio_result dal_gpio_open_ex(
return GPIO_RESULT_ALREADY_OPENED;
}
+ // No action if allocation failed during gpio construct
+ if (!gpio->hw_container.ddc) {
+ ASSERT_CRITICAL(false);
+ return GPIO_RESULT_NON_SPECIFIC_ERROR;
+ }
gpio->mode = mode;
- return dal_gpio_service_open(
- gpio->service, gpio->id, gpio->en, mode, &gpio->pin);
+ return dal_gpio_service_open(gpio);
}
enum gpio_result dal_gpio_get_value(
@@ -231,6 +235,21 @@ enum gpio_pin_output_state dal_gpio_get_output_state(
return gpio->output_state;
}
+struct hw_ddc *dal_gpio_get_ddc(struct gpio *gpio)
+{
+ return gpio->hw_container.ddc;
+}
+
+struct hw_hpd *dal_gpio_get_hpd(struct gpio *gpio)
+{
+ return gpio->hw_container.hpd;
+}
+
+struct hw_generic *dal_gpio_get_generic(struct gpio *gpio)
+{
+ return gpio->hw_container.generic;
+}
+
void dal_gpio_close(
struct gpio *gpio)
{
@@ -267,6 +286,30 @@ struct gpio *dal_gpio_create(
gpio->mode = GPIO_MODE_UNKNOWN;
gpio->output_state = output_state;
+ //initialize hw_container union based on id
+ switch (gpio->id) {
+ case GPIO_ID_DDC_DATA:
+ gpio->service->factory.funcs->init_ddc_data(&gpio->hw_container.ddc, service->ctx, id, en);
+ break;
+ case GPIO_ID_DDC_CLOCK:
+ gpio->service->factory.funcs->init_ddc_data(&gpio->hw_container.ddc, service->ctx, id, en);
+ break;
+ case GPIO_ID_GENERIC:
+ gpio->service->factory.funcs->init_generic(&gpio->hw_container.generic, service->ctx, id, en);
+ break;
+ case GPIO_ID_HPD:
+ gpio->service->factory.funcs->init_hpd(&gpio->hw_container.hpd, service->ctx, id, en);
+ break;
+ // TODO: currently gpio for sync and gsl does not get created, might need it later
+ case GPIO_ID_SYNC:
+ break;
+ case GPIO_ID_GSL:
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ gpio->pin = NULL;
+ }
+
return gpio;
}
@@ -278,7 +321,32 @@ void dal_gpio_destroy(
return;
}
- dal_gpio_close(*gpio);
+ switch ((*gpio)->id) {
+ case GPIO_ID_DDC_DATA:
+ kfree((*gpio)->hw_container.ddc);
+ (*gpio)->hw_container.ddc = NULL;
+ break;
+ case GPIO_ID_DDC_CLOCK:
+ //TODO: might want to change it to init_ddc_clock
+ kfree((*gpio)->hw_container.ddc);
+ (*gpio)->hw_container.ddc = NULL;
+ break;
+ case GPIO_ID_GENERIC:
+ kfree((*gpio)->hw_container.generic);
+ (*gpio)->hw_container.generic = NULL;
+ break;
+ case GPIO_ID_HPD:
+ kfree((*gpio)->hw_container.hpd);
+ (*gpio)->hw_container.hpd = NULL;
+ break;
+ // TODO: currently gpio for sync and gsl does not get created, might need it later
+ case GPIO_ID_SYNC:
+ break;
+ case GPIO_ID_GSL:
+ break;
+ default:
+ break;
+ }
kfree(*gpio);
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
index a7fab44f66b6..92280cc05e2d 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
@@ -141,6 +141,57 @@ struct gpio *dal_gpio_service_create_irq(
return dal_gpio_create_irq(service, id, en);
}
+struct gpio *dal_gpio_service_create_generic_mux(
+ struct gpio_service *service,
+ uint32_t offset,
+ uint32_t mask)
+{
+ enum gpio_id id;
+ uint32_t en;
+ struct gpio *generic;
+
+ if (!service->translate.funcs->offset_to_id(offset, mask, &id, &en)) {
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+
+ generic = dal_gpio_create(
+ service, id, en, GPIO_PIN_OUTPUT_STATE_DEFAULT);
+
+ return generic;
+}
+
+void dal_gpio_destroy_generic_mux(
+ struct gpio **mux)
+{
+ if (!mux || !*mux) {
+ ASSERT_CRITICAL(false);
+ return;
+ }
+
+ dal_gpio_destroy(mux);
+ kfree(*mux);
+
+ *mux = NULL;
+}
+
+struct gpio_pin_info dal_gpio_get_generic_pin_info(
+ struct gpio_service *service,
+ enum gpio_id id,
+ uint32_t en)
+{
+ struct gpio_pin_info pin;
+
+ if (service->translate.funcs->id_to_offset) {
+ service->translate.funcs->id_to_offset(id, en, &pin);
+ } else {
+ pin.mask = 0xFFFFFFFF;
+ pin.offset = 0xFFFFFFFF;
+ }
+
+ return pin;
+}
+
void dal_gpio_service_destroy(
struct gpio_service **ptr)
{
@@ -165,6 +216,21 @@ void dal_gpio_service_destroy(
*ptr = NULL;
}
+enum gpio_result dal_mux_setup_config(
+ struct gpio *mux,
+ struct gpio_generic_mux_config *config)
+{
+ struct gpio_config_data config_data;
+
+ if (!config)
+ return GPIO_RESULT_INVALID_DATA;
+
+ config_data.config.generic_mux = *config;
+ config_data.type = GPIO_CONFIG_TYPE_GENERIC_MUX;
+
+ return dal_gpio_set_config(mux, &config_data);
+}
+
/*
* @brief
* Private API.
@@ -223,13 +289,15 @@ enum gpio_result dal_gpio_service_unlock(
}
enum gpio_result dal_gpio_service_open(
- struct gpio_service *service,
- enum gpio_id id,
- uint32_t en,
- enum gpio_mode mode,
- struct hw_gpio_pin **ptr)
+ struct gpio *gpio)
{
- struct hw_gpio_pin *pin;
+ struct gpio_service *service = gpio->service;
+ enum gpio_id id = gpio->id;
+ uint32_t en = gpio->en;
+ enum gpio_mode mode = gpio->mode;
+
+ struct hw_gpio_pin **pin = &gpio->pin;
+
if (!service->busyness[id]) {
ASSERT_CRITICAL(false);
@@ -243,50 +311,43 @@ enum gpio_result dal_gpio_service_open(
switch (id) {
case GPIO_ID_DDC_DATA:
- pin = service->factory.funcs->create_ddc_data(
- service->ctx, id, en);
- service->factory.funcs->define_ddc_registers(pin, en);
+ *pin = service->factory.funcs->get_ddc_pin(gpio);
+ service->factory.funcs->define_ddc_registers(*pin, en);
break;
case GPIO_ID_DDC_CLOCK:
- pin = service->factory.funcs->create_ddc_clock(
- service->ctx, id, en);
- service->factory.funcs->define_ddc_registers(pin, en);
+ *pin = service->factory.funcs->get_ddc_pin(gpio);
+ service->factory.funcs->define_ddc_registers(*pin, en);
break;
case GPIO_ID_GENERIC:
- pin = service->factory.funcs->create_generic(
- service->ctx, id, en);
+ *pin = service->factory.funcs->get_generic_pin(gpio);
+ service->factory.funcs->define_generic_registers(*pin, en);
break;
case GPIO_ID_HPD:
- pin = service->factory.funcs->create_hpd(
- service->ctx, id, en);
- service->factory.funcs->define_hpd_registers(pin, en);
+ *pin = service->factory.funcs->get_hpd_pin(gpio);
+ service->factory.funcs->define_hpd_registers(*pin, en);
break;
+
+ //TODO: gsl and sync support? create_sync and create_gsl are NULL
case GPIO_ID_SYNC:
- pin = service->factory.funcs->create_sync(
- service->ctx, id, en);
- break;
case GPIO_ID_GSL:
- pin = service->factory.funcs->create_gsl(
- service->ctx, id, en);
break;
default:
ASSERT_CRITICAL(false);
return GPIO_RESULT_NON_SPECIFIC_ERROR;
}
- if (!pin) {
+ if (!*pin) {
ASSERT_CRITICAL(false);
return GPIO_RESULT_NON_SPECIFIC_ERROR;
}
- if (!pin->funcs->open(pin, mode)) {
+ if (!(*pin)->funcs->open(*pin, mode)) {
ASSERT_CRITICAL(false);
- dal_gpio_service_close(service, &pin);
+ dal_gpio_service_close(service, pin);
return GPIO_RESULT_OPEN_FAILED;
}
set_pin_busy(service, id, en);
- *ptr = pin;
return GPIO_RESULT_OK;
}
@@ -308,11 +369,10 @@ void dal_gpio_service_close(
pin->funcs->close(pin);
- pin->funcs->destroy(ptr);
+ *ptr = NULL;
}
}
-
enum dc_irq_source dal_irq_get_source(
const struct gpio *irq)
{
@@ -399,7 +459,6 @@ void dal_gpio_destroy_irq(
return;
}
- dal_gpio_close(*irq);
dal_gpio_destroy(irq);
kfree(*irq);
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.h b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.h
index 0c678af75331..b9775a131ecd 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.h
+++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.h
@@ -42,11 +42,7 @@ struct gpio_service {
};
enum gpio_result dal_gpio_service_open(
- struct gpio_service *service,
- enum gpio_id id,
- uint32_t en,
- enum gpio_mode mode,
- struct hw_gpio_pin **ptr);
+ struct gpio *gpio);
void dal_gpio_service_close(
struct gpio_service *service,
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c
index 408857d19c84..1ae153eab31d 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c
@@ -28,6 +28,7 @@
#include "dm_services.h"
+#include "include/gpio_interface.h"
#include "include/gpio_types.h"
#include "hw_gpio.h"
#include "hw_ddc.h"
@@ -45,18 +46,20 @@
#define REG(reg)\
(ddc->regs->reg)
-static void destruct(
+struct gpio;
+
+static void dal_hw_ddc_destruct(
struct hw_ddc *pin)
{
dal_hw_gpio_destruct(&pin->base);
}
-static void destroy(
+static void dal_hw_ddc_destroy(
struct hw_gpio_pin **ptr)
{
struct hw_ddc *pin = HW_DDC_FROM_BASE(*ptr);
- destruct(pin);
+ dal_hw_ddc_destruct(pin);
kfree(pin);
@@ -147,7 +150,6 @@ static enum gpio_result set_config(
AUX_PAD1_MODE, 0);
}
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
if (ddc->regs->dc_gpio_aux_ctrl_5 != 0) {
REG_UPDATE(dc_gpio_aux_ctrl_5, DDC_PAD_I2CMODE, 1);
}
@@ -155,7 +157,6 @@ static enum gpio_result set_config(
if (ddc->regs->phy_aux_cntl != 0) {
REG_UPDATE(phy_aux_cntl, AUX_PAD_RXSEL, 1);
}
-#endif
return GPIO_RESULT_OK;
case GPIO_DDC_CONFIG_TYPE_MODE_AUX:
/* set the AUX pad mode */
@@ -163,12 +164,10 @@ static enum gpio_result set_config(
REG_SET(gpio.MASK_reg, regval,
AUX_PAD1_MODE, 1);
}
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
if (ddc->regs->dc_gpio_aux_ctrl_5 != 0) {
REG_UPDATE(dc_gpio_aux_ctrl_5,
DDC_PAD_I2CMODE, 0);
}
-#endif
return GPIO_RESULT_OK;
case GPIO_DDC_CONFIG_TYPE_POLL_FOR_CONNECT:
@@ -208,7 +207,7 @@ static enum gpio_result set_config(
}
static const struct hw_gpio_pin_funcs funcs = {
- .destroy = destroy,
+ .destroy = dal_hw_ddc_destroy,
.open = dal_hw_gpio_open,
.get_value = dal_hw_gpio_get_value,
.set_value = dal_hw_gpio_set_value,
@@ -217,7 +216,7 @@ static const struct hw_gpio_pin_funcs funcs = {
.close = dal_hw_gpio_close,
};
-static void construct(
+static void dal_hw_ddc_construct(
struct hw_ddc *ddc,
enum gpio_id id,
uint32_t en,
@@ -227,24 +226,29 @@ static void construct(
ddc->base.base.funcs = &funcs;
}
-struct hw_gpio_pin *dal_hw_ddc_create(
+void dal_hw_ddc_init(
+ struct hw_ddc **hw_ddc,
struct dc_context *ctx,
enum gpio_id id,
uint32_t en)
{
- struct hw_ddc *pin;
-
if ((en < GPIO_DDC_LINE_MIN) || (en > GPIO_DDC_LINE_MAX)) {
ASSERT_CRITICAL(false);
- return NULL;
+ *hw_ddc = NULL;
}
- pin = kzalloc(sizeof(struct hw_ddc), GFP_KERNEL);
- if (!pin) {
+ *hw_ddc = kzalloc(sizeof(struct hw_ddc), GFP_KERNEL);
+ if (!*hw_ddc) {
ASSERT_CRITICAL(false);
- return NULL;
+ return;
}
- construct(pin, id, en, ctx);
- return &pin->base.base;
+ dal_hw_ddc_construct(*hw_ddc, id, en, ctx);
+}
+
+struct hw_gpio_pin *dal_hw_ddc_get_pin(struct gpio *gpio)
+{
+ struct hw_ddc *hw_ddc = dal_gpio_get_ddc(gpio);
+
+ return &hw_ddc->base.base;
}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.h b/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.h
index 9690e2a885d7..cc30e65df431 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.h
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.h
@@ -38,9 +38,12 @@ struct hw_ddc {
#define HW_DDC_FROM_BASE(hw_gpio) \
container_of((HW_GPIO_FROM_BASE(hw_gpio)), struct hw_ddc, base)
-struct hw_gpio_pin *dal_hw_ddc_create(
+void dal_hw_ddc_init(
+ struct hw_ddc **hw_ddc,
struct dc_context *ctx,
enum gpio_id id,
uint32_t en);
+struct hw_gpio_pin *dal_hw_ddc_get_pin(struct gpio *gpio);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
index 78f528f92907..d2d36d48caaa 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
@@ -45,12 +45,11 @@
#include "dce80/hw_factory_dce80.h"
#include "dce110/hw_factory_dce110.h"
#include "dce120/hw_factory_dce120.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
#include "dcn10/hw_factory_dcn10.h"
#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#include "dcn20/hw_factory_dcn20.h"
-#endif
+#include "dcn21/hw_factory_dcn21.h"
#include "diagnostics/hw_factory_diag.h"
@@ -87,17 +86,18 @@ bool dal_hw_factory_init(
case DCE_VERSION_12_1:
dal_hw_factory_dce120_init(factory);
return true;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
case DCN_VERSION_1_0:
case DCN_VERSION_1_01:
dal_hw_factory_dcn10_init(factory);
return true;
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
case DCN_VERSION_2_0:
dal_hw_factory_dcn20_init(factory);
return true;
+ case DCN_VERSION_2_1:
+ dal_hw_factory_dcn21_init(factory);
+ return true;
#endif
default:
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.h b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.h
index 6e4dd3521935..e15b037f3bcd 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.h
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.h
@@ -28,41 +28,44 @@
struct hw_gpio_pin;
struct hw_hpd;
+struct hw_ddc;
+struct hw_generic;
+struct gpio;
struct hw_factory {
uint32_t number_of_pins[GPIO_ID_COUNT];
const struct hw_factory_funcs {
- struct hw_gpio_pin *(*create_ddc_data)(
- struct dc_context *ctx,
- enum gpio_id id,
- uint32_t en);
- struct hw_gpio_pin *(*create_ddc_clock)(
- struct dc_context *ctx,
- enum gpio_id id,
- uint32_t en);
- struct hw_gpio_pin *(*create_generic)(
- struct dc_context *ctx,
- enum gpio_id id,
- uint32_t en);
- struct hw_gpio_pin *(*create_hpd)(
- struct dc_context *ctx,
- enum gpio_id id,
- uint32_t en);
- struct hw_gpio_pin *(*create_sync)(
- struct dc_context *ctx,
- enum gpio_id id,
- uint32_t en);
- struct hw_gpio_pin *(*create_gsl)(
- struct dc_context *ctx,
- enum gpio_id id,
- uint32_t en);
+ void (*init_ddc_data)(
+ struct hw_ddc **hw_ddc,
+ struct dc_context *ctx,
+ enum gpio_id id,
+ uint32_t en);
+ void (*init_generic)(
+ struct hw_generic **hw_generic,
+ struct dc_context *ctx,
+ enum gpio_id id,
+ uint32_t en);
+ void (*init_hpd)(
+ struct hw_hpd **hw_hpd,
+ struct dc_context *ctx,
+ enum gpio_id id,
+ uint32_t en);
+ struct hw_gpio_pin *(*get_hpd_pin)(
+ struct gpio *gpio);
+ struct hw_gpio_pin *(*get_ddc_pin)(
+ struct gpio *gpio);
+ struct hw_gpio_pin *(*get_generic_pin)(
+ struct gpio *gpio);
void (*define_hpd_registers)(
struct hw_gpio_pin *pin,
uint32_t en);
void (*define_ddc_registers)(
struct hw_gpio_pin *pin,
uint32_t en);
+ void (*define_generic_registers)(
+ struct hw_gpio_pin *pin,
+ uint32_t en);
} *funcs;
};
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_generic.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_generic.c
new file mode 100644
index 000000000000..f9e847e6555d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_generic.c
@@ -0,0 +1,129 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <linux/slab.h>
+
+#include "dm_services.h"
+
+#include "include/gpio_interface.h"
+#include "include/gpio_types.h"
+#include "hw_gpio.h"
+#include "hw_generic.h"
+
+#include "reg_helper.h"
+#include "generic_regs.h"
+
+#undef FN
+#define FN(reg_name, field_name) \
+ generic->shifts->field_name, generic->masks->field_name
+
+#define CTX \
+ generic->base.base.ctx
+#define REG(reg)\
+ (generic->regs->reg)
+
+struct gpio;
+
+static void dal_hw_generic_destruct(
+ struct hw_generic *pin)
+{
+ dal_hw_gpio_destruct(&pin->base);
+}
+
+static void dal_hw_generic_destroy(
+ struct hw_gpio_pin **ptr)
+{
+ struct hw_generic *generic = HW_GENERIC_FROM_BASE(*ptr);
+
+ dal_hw_generic_destruct(generic);
+
+ kfree(generic);
+
+ *ptr = NULL;
+}
+
+static enum gpio_result set_config(
+ struct hw_gpio_pin *ptr,
+ const struct gpio_config_data *config_data)
+{
+ struct hw_generic *generic = HW_GENERIC_FROM_BASE(ptr);
+
+ if (!config_data)
+ return GPIO_RESULT_INVALID_DATA;
+
+ REG_UPDATE_2(mux,
+ GENERIC_EN, config_data->config.generic_mux.enable_output_from_mux,
+ GENERIC_SEL, config_data->config.generic_mux.mux_select);
+
+ return GPIO_RESULT_OK;
+}
+
+static const struct hw_gpio_pin_funcs funcs = {
+ .destroy = dal_hw_generic_destroy,
+ .open = dal_hw_gpio_open,
+ .get_value = dal_hw_gpio_get_value,
+ .set_value = dal_hw_gpio_set_value,
+ .set_config = set_config,
+ .change_mode = dal_hw_gpio_change_mode,
+ .close = dal_hw_gpio_close,
+};
+
+static void dal_hw_generic_construct(
+ struct hw_generic *pin,
+ enum gpio_id id,
+ uint32_t en,
+ struct dc_context *ctx)
+{
+ dal_hw_gpio_construct(&pin->base, id, en, ctx);
+ pin->base.base.funcs = &funcs;
+}
+
+void dal_hw_generic_init(
+ struct hw_generic **hw_generic,
+ struct dc_context *ctx,
+ enum gpio_id id,
+ uint32_t en)
+{
+ if ((en < GPIO_DDC_LINE_MIN) || (en > GPIO_DDC_LINE_MAX)) {
+ ASSERT_CRITICAL(false);
+ *hw_generic = NULL;
+ }
+
+ *hw_generic = kzalloc(sizeof(struct hw_generic), GFP_KERNEL);
+ if (!*hw_generic) {
+ ASSERT_CRITICAL(false);
+ return;
+ }
+
+ dal_hw_generic_construct(*hw_generic, id, en, ctx);
+}
+
+
+struct hw_gpio_pin *dal_hw_generic_get_pin(struct gpio *gpio)
+{
+ struct hw_generic *hw_generic = dal_gpio_get_generic(gpio);
+
+ return &hw_generic->base.base;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_generic.h b/drivers/gpu/drm/amd/display/dc/gpio/hw_generic.h
new file mode 100644
index 000000000000..bd6ffeb5e9df
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_generic.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_HW_generic_H__
+#define __DAL_HW_generic_H__
+
+#include "generic_regs.h"
+#include "hw_gpio.h"
+
+struct hw_generic {
+ struct hw_gpio base;
+ const struct generic_registers *regs;
+ const struct generic_sh_mask *shifts;
+ const struct generic_sh_mask *masks;
+};
+
+#define HW_GENERIC_FROM_BASE(hw_gpio) \
+ container_of((HW_GPIO_FROM_BASE(hw_gpio)), struct hw_generic, base)
+
+void dal_hw_generic_init(
+ struct hw_generic **hw_generic,
+ struct dc_context *ctx,
+ enum gpio_id id,
+ uint32_t en);
+
+struct hw_gpio_pin *dal_hw_generic_get_pin(struct gpio *gpio);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c
index 5e11d748e6f3..692f29de7797 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c
@@ -27,6 +27,7 @@
#include "dm_services.h"
+#include "include/gpio_interface.h"
#include "include/gpio_types.h"
#include "hw_gpio.h"
#include "hw_hpd.h"
@@ -43,14 +44,7 @@
#define REG(reg)\
(hpd->regs->reg)
-static void dal_hw_hpd_construct(
- struct hw_hpd *pin,
- enum gpio_id id,
- uint32_t en,
- struct dc_context *ctx)
-{
- dal_hw_gpio_construct(&pin->base, id, en, ctx);
-}
+struct gpio;
static void dal_hw_hpd_destruct(
struct hw_hpd *pin)
@@ -58,19 +52,12 @@ static void dal_hw_hpd_destruct(
dal_hw_gpio_destruct(&pin->base);
}
-
-static void destruct(
- struct hw_hpd *hpd)
-{
- dal_hw_hpd_destruct(hpd);
-}
-
-static void destroy(
+static void dal_hw_hpd_destroy(
struct hw_gpio_pin **ptr)
{
struct hw_hpd *hpd = HW_HPD_FROM_BASE(*ptr);
- destruct(hpd);
+ dal_hw_hpd_destruct(hpd);
kfree(hpd);
@@ -117,7 +104,7 @@ static enum gpio_result set_config(
}
static const struct hw_gpio_pin_funcs funcs = {
- .destroy = destroy,
+ .destroy = dal_hw_hpd_destroy,
.open = dal_hw_gpio_open,
.get_value = get_value,
.set_value = dal_hw_gpio_set_value,
@@ -126,39 +113,39 @@ static const struct hw_gpio_pin_funcs funcs = {
.close = dal_hw_gpio_close,
};
-static void construct(
- struct hw_hpd *hpd,
+static void dal_hw_hpd_construct(
+ struct hw_hpd *pin,
enum gpio_id id,
uint32_t en,
struct dc_context *ctx)
{
- dal_hw_hpd_construct(hpd, id, en, ctx);
- hpd->base.base.funcs = &funcs;
+ dal_hw_gpio_construct(&pin->base, id, en, ctx);
+ pin->base.base.funcs = &funcs;
}
-struct hw_gpio_pin *dal_hw_hpd_create(
+void dal_hw_hpd_init(
+ struct hw_hpd **hw_hpd,
struct dc_context *ctx,
enum gpio_id id,
uint32_t en)
{
- struct hw_hpd *hpd;
-
- if (id != GPIO_ID_HPD) {
+ if ((en < GPIO_DDC_LINE_MIN) || (en > GPIO_DDC_LINE_MAX)) {
ASSERT_CRITICAL(false);
- return NULL;
+ *hw_hpd = NULL;
}
- if ((en < GPIO_HPD_MIN) || (en > GPIO_HPD_MAX)) {
+ *hw_hpd = kzalloc(sizeof(struct hw_hpd), GFP_KERNEL);
+ if (!*hw_hpd) {
ASSERT_CRITICAL(false);
- return NULL;
+ return;
}
- hpd = kzalloc(sizeof(struct hw_hpd), GFP_KERNEL);
- if (!hpd) {
- ASSERT_CRITICAL(false);
- return NULL;
- }
+ dal_hw_hpd_construct(*hw_hpd, id, en, ctx);
+}
+
+struct hw_gpio_pin *dal_hw_hpd_get_pin(struct gpio *gpio)
+{
+ struct hw_hpd *hw_hpd = dal_gpio_get_hpd(gpio);
- construct(hpd, id, en, ctx);
- return &hpd->base.base;
+ return &hw_hpd->base.base;
}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.h b/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.h
index 4ab7a208f781..e7d8b3bb016c 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.h
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.h
@@ -38,9 +38,12 @@ struct hw_hpd {
#define HW_HPD_FROM_BASE(hw_gpio) \
container_of((HW_GPIO_FROM_BASE(hw_gpio)), struct hw_hpd, base)
-struct hw_gpio_pin *dal_hw_hpd_create(
+void dal_hw_hpd_init(
+ struct hw_hpd **hw_hpd,
struct dc_context *ctx,
enum gpio_id id,
uint32_t en);
+struct hw_gpio_pin *dal_hw_hpd_get_pin(struct gpio *gpio);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
index c35fe201d335..5d396657a1ee 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
@@ -43,12 +43,11 @@
#include "dce80/hw_translate_dce80.h"
#include "dce110/hw_translate_dce110.h"
#include "dce120/hw_translate_dce120.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
#include "dcn10/hw_translate_dcn10.h"
#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#include "dcn20/hw_translate_dcn20.h"
-#endif
+#include "dcn21/hw_translate_dcn21.h"
#include "diagnostics/hw_translate_diag.h"
@@ -82,17 +81,18 @@ bool dal_hw_translate_init(
case DCE_VERSION_12_1:
dal_hw_translate_dce120_init(translate);
return true;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
case DCN_VERSION_1_0:
case DCN_VERSION_1_01:
dal_hw_translate_dcn10_init(translate);
return true;
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
case DCN_VERSION_2_0:
dal_hw_translate_dcn20_init(translate);
return true;
+ case DCN_VERSION_2_1:
+ dal_hw_translate_dcn21_init(translate);
+ return true;
#endif
default:
diff --git a/drivers/gpu/drm/amd/display/dc/hdcp/Makefile b/drivers/gpu/drm/amd/display/dc/hdcp/Makefile
new file mode 100644
index 000000000000..4170b6eb9ec0
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/hdcp/Makefile
@@ -0,0 +1,28 @@
+# Copyright 2019 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+# Makefile for the 'hdcp' sub-component of DAL.
+#
+
+HDCP_MSG = hdcp_msg.o
+
+AMD_DAL_HDCP_MSG = $(addprefix $(AMDDALPATH)/dc/hdcp/,$(HDCP_MSG))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_HDCP_MSG)
diff --git a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
new file mode 100644
index 000000000000..6f730b5bfe42
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
@@ -0,0 +1,324 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <linux/slab.h>
+
+#include "dm_services.h"
+#include "dm_helpers.h"
+#include "include/hdcp_types.h"
+#include "include/i2caux_interface.h"
+#include "include/signal_types.h"
+#include "core_types.h"
+#include "dc_link_ddc.h"
+#include "link_hwss.h"
+
+#define DC_LOGGER \
+ link->ctx->logger
+#define HDCP14_KSV_SIZE 5
+#define HDCP14_MAX_KSV_FIFO_SIZE 127*HDCP14_KSV_SIZE
+
+static const bool hdcp_cmd_is_read[] = {
+ [HDCP_MESSAGE_ID_READ_BKSV] = true,
+ [HDCP_MESSAGE_ID_READ_RI_R0] = true,
+ [HDCP_MESSAGE_ID_READ_PJ] = true,
+ [HDCP_MESSAGE_ID_WRITE_AKSV] = false,
+ [HDCP_MESSAGE_ID_WRITE_AINFO] = false,
+ [HDCP_MESSAGE_ID_WRITE_AN] = false,
+ [HDCP_MESSAGE_ID_READ_VH_X] = true,
+ [HDCP_MESSAGE_ID_READ_VH_0] = true,
+ [HDCP_MESSAGE_ID_READ_VH_1] = true,
+ [HDCP_MESSAGE_ID_READ_VH_2] = true,
+ [HDCP_MESSAGE_ID_READ_VH_3] = true,
+ [HDCP_MESSAGE_ID_READ_VH_4] = true,
+ [HDCP_MESSAGE_ID_READ_BCAPS] = true,
+ [HDCP_MESSAGE_ID_READ_BSTATUS] = true,
+ [HDCP_MESSAGE_ID_READ_KSV_FIFO] = true,
+ [HDCP_MESSAGE_ID_READ_BINFO] = true,
+ [HDCP_MESSAGE_ID_HDCP2VERSION] = true,
+ [HDCP_MESSAGE_ID_RX_CAPS] = true,
+ [HDCP_MESSAGE_ID_WRITE_AKE_INIT] = false,
+ [HDCP_MESSAGE_ID_READ_AKE_SEND_CERT] = true,
+ [HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM] = false,
+ [HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM] = false,
+ [HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME] = true,
+ [HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO] = true,
+ [HDCP_MESSAGE_ID_WRITE_LC_INIT] = false,
+ [HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME] = true,
+ [HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS] = false,
+ [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST] = true,
+ [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = false,
+ [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = false,
+ [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = true,
+ [HDCP_MESSAGE_ID_READ_RXSTATUS] = true,
+ [HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE] = false
+};
+
+static const uint8_t hdcp_i2c_offsets[] = {
+ [HDCP_MESSAGE_ID_READ_BKSV] = 0x0,
+ [HDCP_MESSAGE_ID_READ_RI_R0] = 0x8,
+ [HDCP_MESSAGE_ID_READ_PJ] = 0xA,
+ [HDCP_MESSAGE_ID_WRITE_AKSV] = 0x10,
+ [HDCP_MESSAGE_ID_WRITE_AINFO] = 0x15,
+ [HDCP_MESSAGE_ID_WRITE_AN] = 0x18,
+ [HDCP_MESSAGE_ID_READ_VH_X] = 0x20,
+ [HDCP_MESSAGE_ID_READ_VH_0] = 0x20,
+ [HDCP_MESSAGE_ID_READ_VH_1] = 0x24,
+ [HDCP_MESSAGE_ID_READ_VH_2] = 0x28,
+ [HDCP_MESSAGE_ID_READ_VH_3] = 0x2C,
+ [HDCP_MESSAGE_ID_READ_VH_4] = 0x30,
+ [HDCP_MESSAGE_ID_READ_BCAPS] = 0x40,
+ [HDCP_MESSAGE_ID_READ_BSTATUS] = 0x41,
+ [HDCP_MESSAGE_ID_READ_KSV_FIFO] = 0x43,
+ [HDCP_MESSAGE_ID_READ_BINFO] = 0xFF,
+ [HDCP_MESSAGE_ID_HDCP2VERSION] = 0x50,
+ [HDCP_MESSAGE_ID_WRITE_AKE_INIT] = 0x60,
+ [HDCP_MESSAGE_ID_READ_AKE_SEND_CERT] = 0x80,
+ [HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM] = 0x60,
+ [HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM] = 0x60,
+ [HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME] = 0x80,
+ [HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO] = 0x80,
+ [HDCP_MESSAGE_ID_WRITE_LC_INIT] = 0x60,
+ [HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME] = 0x80,
+ [HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS] = 0x60,
+ [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST] = 0x80,
+ [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = 0x60,
+ [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = 0x60,
+ [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = 0x80,
+ [HDCP_MESSAGE_ID_READ_RXSTATUS] = 0x70
+};
+
+struct protection_properties {
+ bool supported;
+ bool (*process_transaction)(
+ struct dc_link *link,
+ struct hdcp_protection_message *message_info);
+};
+
+static const struct protection_properties non_supported_protection = {
+ .supported = false
+};
+
+static bool hdmi_14_process_transaction(
+ struct dc_link *link,
+ struct hdcp_protection_message *message_info)
+{
+ uint8_t *buff = NULL;
+ bool result;
+ const uint8_t hdcp_i2c_addr_link_primary = 0x3a; /* 0x74 >> 1*/
+ const uint8_t hdcp_i2c_addr_link_secondary = 0x3b; /* 0x76 >> 1*/
+ struct i2c_command i2c_command;
+ uint8_t offset = hdcp_i2c_offsets[message_info->msg_id];
+ struct i2c_payload i2c_payloads[] = {
+ { true, 0, 1, &offset },
+ /* actual hdcp payload, will be filled later, zeroed for now*/
+ { 0 }
+ };
+
+ switch (message_info->link) {
+ case HDCP_LINK_SECONDARY:
+ i2c_payloads[0].address = hdcp_i2c_addr_link_secondary;
+ i2c_payloads[1].address = hdcp_i2c_addr_link_secondary;
+ break;
+ case HDCP_LINK_PRIMARY:
+ default:
+ i2c_payloads[0].address = hdcp_i2c_addr_link_primary;
+ i2c_payloads[1].address = hdcp_i2c_addr_link_primary;
+ break;
+ }
+
+ if (hdcp_cmd_is_read[message_info->msg_id]) {
+ i2c_payloads[1].write = false;
+ i2c_command.number_of_payloads = ARRAY_SIZE(i2c_payloads);
+ i2c_payloads[1].length = message_info->length;
+ i2c_payloads[1].data = message_info->data;
+ } else {
+ i2c_command.number_of_payloads = 1;
+ buff = kzalloc(message_info->length + 1, GFP_KERNEL);
+
+ if (!buff)
+ return false;
+
+ buff[0] = offset;
+ memmove(&buff[1], message_info->data, message_info->length);
+ i2c_payloads[0].length = message_info->length + 1;
+ i2c_payloads[0].data = buff;
+ }
+
+ i2c_command.payloads = i2c_payloads;
+ i2c_command.engine = I2C_COMMAND_ENGINE_HW;//only HW
+ i2c_command.speed = link->ddc->ctx->dc->caps.i2c_speed_in_khz;
+
+ result = dm_helpers_submit_i2c(
+ link->ctx,
+ link,
+ &i2c_command);
+ kfree(buff);
+
+ return result;
+}
+
+static const struct protection_properties hdmi_14_protection = {
+ .supported = true,
+ .process_transaction = hdmi_14_process_transaction
+};
+
+static const uint32_t hdcp_dpcd_addrs[] = {
+ [HDCP_MESSAGE_ID_READ_BKSV] = 0x68000,
+ [HDCP_MESSAGE_ID_READ_RI_R0] = 0x68005,
+ [HDCP_MESSAGE_ID_READ_PJ] = 0xFFFFFFFF,
+ [HDCP_MESSAGE_ID_WRITE_AKSV] = 0x68007,
+ [HDCP_MESSAGE_ID_WRITE_AINFO] = 0x6803B,
+ [HDCP_MESSAGE_ID_WRITE_AN] = 0x6800c,
+ [HDCP_MESSAGE_ID_READ_VH_X] = 0x68014,
+ [HDCP_MESSAGE_ID_READ_VH_0] = 0x68014,
+ [HDCP_MESSAGE_ID_READ_VH_1] = 0x68018,
+ [HDCP_MESSAGE_ID_READ_VH_2] = 0x6801c,
+ [HDCP_MESSAGE_ID_READ_VH_3] = 0x68020,
+ [HDCP_MESSAGE_ID_READ_VH_4] = 0x68024,
+ [HDCP_MESSAGE_ID_READ_BCAPS] = 0x68028,
+ [HDCP_MESSAGE_ID_READ_BSTATUS] = 0x68029,
+ [HDCP_MESSAGE_ID_READ_KSV_FIFO] = 0x6802c,
+ [HDCP_MESSAGE_ID_READ_BINFO] = 0x6802a,
+ [HDCP_MESSAGE_ID_RX_CAPS] = 0x6921d,
+ [HDCP_MESSAGE_ID_WRITE_AKE_INIT] = 0x69000,
+ [HDCP_MESSAGE_ID_READ_AKE_SEND_CERT] = 0x6900b,
+ [HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM] = 0x69220,
+ [HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM] = 0x692a0,
+ [HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME] = 0x692c0,
+ [HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO] = 0x692e0,
+ [HDCP_MESSAGE_ID_WRITE_LC_INIT] = 0x692f0,
+ [HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME] = 0x692f8,
+ [HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS] = 0x69318,
+ [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST] = 0x69330,
+ [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = 0x693e0,
+ [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = 0x693f0,
+ [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = 0x69473,
+ [HDCP_MESSAGE_ID_READ_RXSTATUS] = 0x69493,
+ [HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE] = 0x69494
+};
+
+static bool dpcd_access_helper(
+ struct dc_link *link,
+ uint32_t length,
+ uint8_t *data,
+ uint32_t dpcd_addr,
+ bool is_read)
+{
+ enum dc_status status;
+ uint32_t cur_length = 0;
+ uint32_t offset = 0;
+ uint32_t ksv_read_size = 0x6803b - 0x6802c;
+
+ /* Read KSV, need repeatedly handle */
+ if (dpcd_addr == 0x6802c) {
+ if (length % HDCP14_KSV_SIZE) {
+ DC_LOG_ERROR("%s: KsvFifo Size(%d) is not a multiple of HDCP14_KSV_SIZE(%d)\n",
+ __func__,
+ length,
+ HDCP14_KSV_SIZE);
+ }
+ if (length > HDCP14_MAX_KSV_FIFO_SIZE) {
+ DC_LOG_ERROR("%s: KsvFifo Size(%d) is greater than HDCP14_MAX_KSV_FIFO_SIZE(%d)\n",
+ __func__,
+ length,
+ HDCP14_MAX_KSV_FIFO_SIZE);
+ }
+
+ DC_LOG_ERROR("%s: Reading %d Ksv(s) from KsvFifo\n",
+ __func__,
+ length / HDCP14_KSV_SIZE);
+
+ while (length > 0) {
+ if (length > ksv_read_size) {
+ status = core_link_read_dpcd(
+ link,
+ dpcd_addr + offset,
+ data + offset,
+ ksv_read_size);
+
+ data += ksv_read_size;
+ length -= ksv_read_size;
+ } else {
+ status = core_link_read_dpcd(
+ link,
+ dpcd_addr + offset,
+ data + offset,
+ length);
+
+ data += length;
+ length = 0;
+ }
+
+ if (status != DC_OK)
+ return false;
+ }
+ } else {
+ while (length > 0) {
+ if (length > DEFAULT_AUX_MAX_DATA_SIZE)
+ cur_length = DEFAULT_AUX_MAX_DATA_SIZE;
+ else
+ cur_length = length;
+
+ if (is_read) {
+ status = core_link_read_dpcd(
+ link,
+ dpcd_addr + offset,
+ data + offset,
+ cur_length);
+ } else {
+ status = core_link_write_dpcd(
+ link,
+ dpcd_addr + offset,
+ data + offset,
+ cur_length);
+ }
+
+ if (status != DC_OK)
+ return false;
+
+ length -= cur_length;
+ offset += cur_length;
+ }
+ }
+ return true;
+}
+
+static bool dp_11_process_transaction(
+ struct dc_link *link,
+ struct hdcp_protection_message *message_info)
+{
+ return dpcd_access_helper(
+ link,
+ message_info->length,
+ message_info->data,
+ hdcp_dpcd_addrs[message_info->msg_id],
+ hdcp_cmd_is_read[message_info->msg_id]);
+}
+
+static const struct protection_properties dp_11_protection = {
+ .supported = true,
+ .process_transaction = dp_11_process_transaction
+};
+
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_status.h b/drivers/gpu/drm/amd/display/dc/inc/core_status.h
index 0a094d7c9380..4ead89dd7c41 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_status.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_status.h
@@ -43,11 +43,12 @@ enum dc_status {
DC_FAIL_BANDWIDTH_VALIDATE = 13, /* BW and Watermark validation */
DC_FAIL_SCALING = 14,
DC_FAIL_DP_LINK_TRAINING = 15,
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
DC_FAIL_DSC_VALIDATE = 16,
DC_NO_DSC_RESOURCE = 17,
-#endif
DC_FAIL_UNSUPPORTED_1 = 18,
+ DC_FAIL_CLK_EXCEED_MAX = 21,
+ DC_FAIL_CLK_BELOW_MIN = 22, /*THIS IS MIN PER IP*/
+ DC_FAIL_CLK_BELOW_CFG_REQUIRED = 23, /*THIS IS hard_min in PPLIB*/
DC_ERROR_UNEXPECTED = -1
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index a148ffde8b12..f285b76888fb 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -33,13 +33,11 @@
#include "dc_bios_types.h"
#include "mem_input.h"
#include "hubp.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
#include "mpc.h"
#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#include "dwb.h"
#include "mcif_wb.h"
-#endif
#define MAX_CLOCK_SOURCES 7
@@ -52,7 +50,9 @@ void enable_surface_flip_reporting(struct dc_plane_state *plane_state,
#include "clock_source.h"
#include "audio.h"
#include "dm_pp_smu.h"
-
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+#include "dm_cp_psp.h"
+#endif
/************ link *****************/
struct link_init_data {
@@ -63,11 +63,6 @@ struct link_init_data {
TODO: remove it when DC is complete. */
};
-enum {
- FREE_ACQUIRED_RESOURCE = 0,
- KEEP_ACQUIRED_RESOURCE = 1,
-};
-
struct dc_link *link_create(const struct link_init_data *init_params);
void link_destroy(struct dc_link **link);
@@ -82,7 +77,7 @@ void core_link_enable_stream(
struct dc_state *state,
struct pipe_ctx *pipe_ctx);
-void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option);
+void core_link_disable_stream(struct pipe_ctx *pipe_ctx);
void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable);
/********** DAL Core*********************/
@@ -92,6 +87,7 @@ void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable);
struct resource_pool;
struct dc_state;
struct resource_context;
+struct clk_bw_params;
struct resource_funcs {
void (*destroy)(struct resource_pool **pool);
@@ -105,7 +101,7 @@ struct resource_funcs {
int (*populate_dml_pipes)(
struct dc *dc,
- struct resource_context *res_ctx,
+ struct dc_state *context,
display_e2e_pipe_params_st *pipes);
enum dc_status (*validate_global)(
@@ -135,7 +131,6 @@ struct resource_funcs {
struct resource_context *res_ctx,
const struct resource_pool *pool,
struct dc_stream_state *stream);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
void (*populate_dml_writeback_from_context)(
struct dc *dc,
struct resource_context *res_ctx,
@@ -146,7 +141,9 @@ struct resource_funcs {
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int pipe_cnt);
-#endif
+ void (*update_bw_bounding_box)(
+ struct dc *dc,
+ struct clk_bw_params *bw_params);
};
@@ -175,7 +172,6 @@ struct resource_pool {
struct dce_i2c_sw *sw_i2cs[MAX_PIPES];
bool i2c_hw_buffer_in_use;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
struct dwbc *dwbc[MAX_DWB_PIPES];
struct mcif_wb *mcif_wb[MAX_DWB_PIPES];
struct {
@@ -183,11 +179,8 @@ struct resource_pool {
unsigned int gsl_1:1;
unsigned int gsl_2:1;
} gsl_groups;
-#endif
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
struct display_stream_compressor *dscs[MAX_PIPES];
-#endif
unsigned int pipe_count;
unsigned int underlay_pipe_index;
@@ -201,9 +194,7 @@ struct resource_pool {
unsigned int timing_generator_count;
unsigned int mpcc_count;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
unsigned int writeback_pipe_count;
-#endif
/*
* reserved clock source for DP
*/
@@ -221,9 +212,12 @@ struct resource_pool {
struct abm *abm;
struct dmcu *dmcu;
+ struct dmub_psr *psr;
const struct resource_funcs *funcs;
const struct resource_caps *res_cap;
+
+ struct ddc_service *oem_device;
};
struct dcn_fe_bandwidth {
@@ -233,10 +227,7 @@ struct dcn_fe_bandwidth {
struct stream_resource {
struct output_pixel_processor *opp;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
struct display_stream_compressor *dsc;
- int dscclk_khz;
-#endif
struct timing_generator *tg;
struct stream_encoder *stream_enc;
struct audio *audio;
@@ -245,12 +236,10 @@ struct stream_resource {
struct encoder_info_frame encoder_info_frame;
struct abm *abm;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
/* There are only (num_pipes+1)/2 groups. 0 means unassigned,
* otherwise it's using group number 'gsl_group-1'
*/
uint8_t gsl_group;
-#endif
};
struct plane_resource {
@@ -299,18 +288,18 @@ struct pipe_ctx {
struct pipe_ctx *top_pipe;
struct pipe_ctx *bottom_pipe;
+ struct pipe_ctx *next_odm_pipe;
+ struct pipe_ctx *prev_odm_pipe;
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+#ifdef CONFIG_DRM_AMD_DC_DCN
struct _vcs_dpi_display_dlg_regs_st dlg_regs;
struct _vcs_dpi_display_ttu_regs_st ttu_regs;
struct _vcs_dpi_display_rq_regs_st rq_regs;
struct _vcs_dpi_display_pipe_dest_params_st pipe_dlg_param;
#endif
union pipe_update_flags update_flags;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
struct dwbc *dwbc;
struct mcif_wb *mcif_wb;
-#endif
};
struct resource_context {
@@ -319,9 +308,7 @@ struct resource_context {
bool is_audio_acquired[MAX_PIPES];
uint8_t clock_source_ref_count[MAX_CLOCK_SOURCES];
uint8_t dp_clock_source_ref_count;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
bool is_dsc_acquired[MAX_PIPES];
-#endif
};
struct dce_bw_output {
@@ -341,18 +328,14 @@ struct dce_bw_output {
int blackout_recovery_time_us;
};
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
struct dcn_bw_writeback {
struct mcif_arb_params mcif_wb_arb[MAX_DWB_PIPES];
};
-#endif
struct dcn_bw_output {
struct dc_clocks clk;
struct dcn_watermark_set watermarks;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
struct dcn_bw_writeback bw_writeback;
-#endif
};
union bw_output {
@@ -386,16 +369,12 @@ struct dc_state {
/* Note: these are big structures, do *not* put on stack! */
struct dm_pp_display_configuration pp_display_cfg;
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+#ifdef CONFIG_DRM_AMD_DC_DCN
struct dcn_bw_internal_vars dcn_bw_vars;
#endif
struct clk_mgr *clk_mgr;
- struct {
- bool full_update_needed : 1;
- } commit_hints;
-
struct kref refcount;
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
index b1fab251c09b..de2d160114db 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
@@ -95,6 +95,9 @@ bool dal_ddc_service_query_ddc_data(
uint8_t *read_buf,
uint32_t read_size);
+bool dal_ddc_submit_aux_command(struct ddc_service *ddc,
+ struct aux_payload *payload);
+
int dc_link_aux_transfer_raw(struct ddc_service *ddc,
struct aux_payload *payload,
enum aux_channel_operation_result *operation_result);
@@ -102,6 +105,9 @@ int dc_link_aux_transfer_raw(struct ddc_service *ddc,
bool dc_link_aux_transfer_with_retries(struct ddc_service *ddc,
struct aux_payload *payload);
+uint32_t dc_link_aux_configure_timeout(struct ddc_service *ddc,
+ uint32_t timeout);
+
void dal_ddc_service_write_scdc_data(
struct ddc_service *ddc_service,
uint32_t pix_clk,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
index 2d95eff94239..8b1f0ce6c2a7 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
@@ -28,6 +28,8 @@
#define LINK_TRAINING_ATTEMPTS 4
#define LINK_TRAINING_RETRY_DELAY 50 /* ms */
+#define LINK_AUX_DEFAULT_EXTENDED_TIMEOUT_PERIOD 3200 /*us*/
+#define LINK_AUX_DEFAULT_TIMEOUT_PERIOD 552 /*us*/
struct dc_link;
struct dc_stream_state;
@@ -38,6 +40,14 @@ bool dp_verify_link_cap(
struct dc_link_settings *known_limit_link_setting,
int *fail_count);
+bool dp_verify_link_cap_with_retries(
+ struct dc_link *link,
+ struct dc_link_settings *known_limit_link_setting,
+ int attempts);
+
+bool dp_verify_mst_link_cap(
+ struct dc_link *link);
+
bool dp_validate_mode_timing(
struct dc_link *link,
const struct dc_crtc_timing *timing);
@@ -47,10 +57,11 @@ void decide_link_settings(
struct dc_link_settings *link_setting);
bool perform_link_training_with_retries(
- struct dc_link *link,
const struct dc_link_settings *link_setting,
bool skip_video_pattern,
- int attempts);
+ int attempts,
+ struct pipe_ctx *pipe_ctx,
+ enum signal_type signal);
bool is_mst_supported(struct dc_link *link);
@@ -62,11 +73,16 @@ bool is_dp_active_dongle(const struct dc_link *link);
void dp_enable_mst_on_sink(struct dc_link *link, bool enable);
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
+enum dp_panel_mode dp_get_panel_mode(struct dc_link *link);
+void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode);
+
+bool dp_overwrite_extended_receiver_cap(struct dc_link *link);
+
void dp_set_fec_ready(struct dc_link *link, bool ready);
void dp_set_fec_enable(struct dc_link *link, bool enable);
bool dp_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable);
+bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable);
+void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable);
bool dp_update_dsc_config(struct pipe_ctx *pipe_ctx);
-#endif
#endif /* __DC_LINK_DP_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calc_math.h
index 45a07eeffbb6..45a07eeffbb6 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calc_math.h
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h b/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h
index e79cd4e92919..e77b3a76766d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h
@@ -140,6 +140,9 @@ struct write_command_context {
struct aux_engine_funcs {
+ bool (*configure_timeout)(
+ struct ddc_service *ddc,
+ uint32_t timeout);
void (*destroy)(
struct aux_engine **ptr);
bool (*acquire_engine)(
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index 36ebd5bc7863..ac530c057ddd 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -28,6 +28,129 @@
#include "dc.h"
+#define DCN_MINIMUM_DISPCLK_Khz 100000
+#define DCN_MINIMUM_DPPCLK_Khz 100000
+
+/* Constants */
+#define DDR4_DRAM_WIDTH 64
+#define WM_A 0
+#define WM_B 1
+#define WM_C 2
+#define WM_D 3
+#define WM_SET_COUNT 4
+
+#define DCN_MINIMUM_DISPCLK_Khz 100000
+#define DCN_MINIMUM_DPPCLK_Khz 100000
+
+/* Will these bw structures be ASIC specific? */
+
+#define MAX_NUM_DPM_LVL 8
+#define WM_SET_COUNT 4
+
+
+struct clk_limit_table_entry {
+ unsigned int voltage; /* milivolts withh 2 fractional bits */
+ unsigned int dcfclk_mhz;
+ unsigned int fclk_mhz;
+ unsigned int memclk_mhz;
+ unsigned int socclk_mhz;
+};
+
+/* This table is contiguous */
+struct clk_limit_table {
+ struct clk_limit_table_entry entries[MAX_NUM_DPM_LVL];
+ unsigned int num_entries;
+};
+
+struct wm_range_table_entry {
+ unsigned int wm_inst;
+ unsigned int wm_type;
+ double pstate_latency_us;
+ double sr_exit_time_us;
+ double sr_enter_plus_exit_time_us;
+ bool valid;
+};
+
+
+struct clk_log_info {
+ bool enabled;
+ char *pBuf;
+ unsigned int bufSize;
+ unsigned int *sum_chars_printed;
+};
+
+struct clk_state_registers_and_bypass {
+ uint32_t dcfclk;
+ uint32_t dcf_deep_sleep_divider;
+ uint32_t dcf_deep_sleep_allow;
+ uint32_t dprefclk;
+ uint32_t dispclk;
+ uint32_t dppclk;
+
+ uint32_t dppclk_bypass;
+ uint32_t dcfclk_bypass;
+ uint32_t dprefclk_bypass;
+ uint32_t dispclk_bypass;
+};
+
+struct rv1_clk_internal {
+ uint32_t CLK0_CLK8_CURRENT_CNT; //dcfclk
+ uint32_t CLK0_CLK8_DS_CNTL; //dcf_deep_sleep_divider
+ uint32_t CLK0_CLK8_ALLOW_DS; //dcf_deep_sleep_allow
+ uint32_t CLK0_CLK10_CURRENT_CNT; //dprefclk
+ uint32_t CLK0_CLK11_CURRENT_CNT; //dispclk
+
+ uint32_t CLK0_CLK8_BYPASS_CNTL; //dcfclk bypass
+ uint32_t CLK0_CLK10_BYPASS_CNTL; //dprefclk bypass
+ uint32_t CLK0_CLK11_BYPASS_CNTL; //dispclk bypass
+};
+
+struct rn_clk_internal {
+ uint32_t CLK1_CLK0_CURRENT_CNT; //dispclk
+ uint32_t CLK1_CLK1_CURRENT_CNT; //dppclk
+ uint32_t CLK1_CLK2_CURRENT_CNT; //dprefclk
+ uint32_t CLK1_CLK3_CURRENT_CNT; //dcfclk
+ uint32_t CLK1_CLK3_DS_CNTL; //dcf_deep_sleep_divider
+ uint32_t CLK1_CLK3_ALLOW_DS; //dcf_deep_sleep_allow
+
+ uint32_t CLK1_CLK0_BYPASS_CNTL; //dispclk bypass
+ uint32_t CLK1_CLK1_BYPASS_CNTL; //dppclk bypass
+ uint32_t CLK1_CLK2_BYPASS_CNTL; //dprefclk bypass
+ uint32_t CLK1_CLK3_BYPASS_CNTL; //dcfclk bypass
+
+};
+
+/* For dtn logging and debugging */
+struct clk_state_registers {
+ uint32_t CLK0_CLK8_CURRENT_CNT; //dcfclk
+ uint32_t CLK0_CLK8_DS_CNTL; //dcf_deep_sleep_divider
+ uint32_t CLK0_CLK8_ALLOW_DS; //dcf_deep_sleep_allow
+ uint32_t CLK0_CLK10_CURRENT_CNT; //dprefclk
+ uint32_t CLK0_CLK11_CURRENT_CNT; //dispclk
+};
+
+/* TODO: combine this with the above */
+struct clk_bypass {
+ uint32_t dcfclk_bypass;
+ uint32_t dispclk_pypass;
+ uint32_t dprefclk_bypass;
+};
+/*
+ * This table is not contiguous, can have holes, each
+ * entry correspond to one set of WM. For example if
+ * we have 2 DPM and LPDDR, we will WM set A, B and
+ * D occupied, C will be emptry.
+ */
+struct wm_table {
+ struct wm_range_table_entry entries[WM_SET_COUNT];
+};
+
+struct clk_bw_params {
+ unsigned int vram_type;
+ unsigned int num_channels;
+ struct clk_limit_table clk_table;
+ struct wm_table wm_table;
+};
/* Public interfaces */
struct clk_states {
@@ -51,13 +174,25 @@ struct clk_mgr_funcs {
void (*init_clocks)(struct clk_mgr *clk_mgr);
void (*enable_pme_wa) (struct clk_mgr *clk_mgr);
+ void (*get_clock)(struct clk_mgr *clk_mgr,
+ struct dc_state *context,
+ enum dc_clock_type clock_type,
+ struct dc_clock_config *clock_cfg);
+
+ bool (*are_clock_states_equal) (struct dc_clocks *a,
+ struct dc_clocks *b);
+ void (*notify_wm_ranges)(struct clk_mgr *clk_mgr);
};
struct clk_mgr {
struct dc_context *ctx;
struct clk_mgr_funcs *funcs;
struct dc_clocks clks;
+ bool psr_allow_active_cache;
int dprefclk_khz; // Used by program pixel clock in clock source funcs, need to figureout where this goes
+ int dentist_vco_freq_khz;
+ struct clk_state_registers_and_bypass boot_snapshot;
+ struct clk_bw_params *bw_params;
};
/* forward declarations */
@@ -67,4 +202,8 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr);
+void clk_mgr_exit_optimized_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr);
+
+void clk_mgr_optimize_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr);
+
#endif /* __DAL_CLK_MGR_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
index 0835ac041acf..862952c0286a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
@@ -64,6 +64,8 @@ enum dentist_divider_range {
***************************************************************************************
*/
+/* Macros */
+
#define TO_CLK_MGR_INTERNAL(clk_mgr)\
container_of(clk_mgr, struct clk_mgr_internal, base)
@@ -94,12 +96,10 @@ enum dentist_divider_range {
.MP1_SMN_C2PMSG_83 = mmMP1_SMN_C2PMSG_83, \
.MP1_SMN_C2PMSG_67 = mmMP1_SMN_C2PMSG_67
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
#define CLK_REG_LIST_NV10() \
SR(DENTIST_DISPCLK_CNTL), \
CLK_SRI(CLK3_CLK_PLL_REQ, CLK3, 0), \
CLK_SRI(CLK3_CLK2_DFS_CNTL, CLK3, 0)
-#endif
#define CLK_SF(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
@@ -118,7 +118,6 @@ enum dentist_divider_range {
CLK_SF(MP1_SMN_C2PMSG_83, CONTENT, mask_sh),\
CLK_SF(MP1_SMN_C2PMSG_91, CONTENT, mask_sh),
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
#define CLK_COMMON_MASK_SH_LIST_DCN20_BASE(mask_sh) \
CLK_COMMON_MASK_SH_LIST_DCN_COMMON_BASE(mask_sh),\
CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_WDIVIDER, mask_sh),\
@@ -128,7 +127,6 @@ enum dentist_divider_range {
CLK_COMMON_MASK_SH_LIST_DCN20_BASE(mask_sh),\
CLK_SF(CLK3_0_CLK3_CLK_PLL_REQ, FbMult_int, mask_sh),\
CLK_SF(CLK3_0_CLK3_CLK_PLL_REQ, FbMult_frac, mask_sh)
-#endif
#define CLK_REG_FIELD_LIST(type) \
type DPREFCLK_SRC_SEL; \
@@ -141,30 +139,24 @@ enum dentist_divider_range {
****************** Clock Manager Private Structures ***********************************
***************************************************************************************
*/
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
#define CLK20_REG_FIELD_LIST(type) \
type DENTIST_DPPCLK_WDIVIDER; \
type DENTIST_DPPCLK_CHG_DONE; \
type FbMult_int; \
type FbMult_frac;
-#endif
#define VBIOS_SMU_REG_FIELD_LIST(type) \
type CONTENT;
struct clk_mgr_shift {
CLK_REG_FIELD_LIST(uint8_t)
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
CLK20_REG_FIELD_LIST(uint8_t)
-#endif
VBIOS_SMU_REG_FIELD_LIST(uint32_t)
};
struct clk_mgr_mask {
CLK_REG_FIELD_LIST(uint32_t)
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
CLK20_REG_FIELD_LIST(uint32_t)
-#endif
VBIOS_SMU_REG_FIELD_LIST(uint32_t)
};
@@ -172,16 +164,29 @@ struct clk_mgr_registers {
uint32_t DPREFCLK_CNTL;
uint32_t DENTIST_DISPCLK_CNTL;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
uint32_t CLK3_CLK2_DFS_CNTL;
uint32_t CLK3_CLK_PLL_REQ;
-#endif
uint32_t MP1_SMN_C2PMSG_67;
uint32_t MP1_SMN_C2PMSG_83;
uint32_t MP1_SMN_C2PMSG_91;
};
+enum clock_type {
+ clock_type_dispclk = 1,
+ clock_type_dcfclk,
+ clock_type_socclk,
+ clock_type_pixelclk,
+ clock_type_phyclk,
+ clock_type_dppclk,
+ clock_type_fclk,
+ clock_type_dcfdsclk,
+ clock_type_dscclk,
+ clock_type_uclk,
+ clock_type_dramclk,
+};
+
+
struct state_dependent_clocks {
int display_clk_khz;
int pixel_clk_khz;
@@ -189,6 +194,7 @@ struct state_dependent_clocks {
struct clk_mgr_internal {
struct clk_mgr base;
+ int smu_ver;
struct pp_smu_funcs *pp_smu;
struct clk_mgr_internal_funcs *funcs;
@@ -207,12 +213,12 @@ struct clk_mgr_internal {
struct state_dependent_clocks max_clks_by_state[DM_PP_CLOCKS_MAX_STATES];
/*TODO: figure out which of the below fields should be here vs in asic specific portion */
- int dentist_vco_freq_khz;
-
/* Cache the status of DFS-bypass feature*/
bool dfs_bypass_enabled;
/* True if the DFS-bypass feature is enabled and active. */
bool dfs_bypass_active;
+
+ uint32_t dfs_ref_freq_khz;
/*
* Cache the display clock returned by VBIOS if DFS-bypass is enabled.
* This is basically "Crystal Frequency In KHz" (XTALIN) frequency
@@ -276,8 +282,14 @@ static inline bool should_set_clock(bool safe_to_lower, int calc_clk, int cur_cl
static inline bool should_update_pstate_support(bool safe_to_lower, bool calc_support, bool cur_support)
{
- // Whenever we are transitioning pstate support, we always want to notify prior to committing state
- return (calc_support != cur_support) ? !safe_to_lower : false;
+ if (cur_support != calc_support) {
+ if (calc_support == true && safe_to_lower)
+ return true;
+ else if (calc_support == false && !safe_to_lower)
+ return true;
+ }
+
+ return false;
}
int clk_mgr_helper_get_active_display_cnt(
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
index 9502478c4a1b..c0dc1d0f5cae 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
@@ -52,7 +52,6 @@ struct dcn_hubbub_wm {
struct dcn_hubbub_wm_set sets[4];
};
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
enum dcn_hubbub_page_table_depth {
DCN_PAGE_TABLE_DEPTH_1_LEVEL,
DCN_PAGE_TABLE_DEPTH_2_LEVEL,
@@ -80,6 +79,8 @@ struct dcn_hubbub_phys_addr_config {
uint64_t page_table_end_addr;
uint64_t page_table_base_addr;
} gart_config;
+
+ uint64_t page_table_default_page_addr;
};
struct dcn_hubbub_virt_addr_config {
@@ -99,13 +100,11 @@ struct hubbub_addr_config {
} default_addrs;
};
-#endif
struct hubbub_funcs {
void (*update_dchub)(
struct hubbub *hubbub,
struct dchub_init_data *dh_data);
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
int (*init_dchub_sys_ctx)(
struct hubbub *hubbub,
struct dcn_hubbub_phys_addr_config *pa_config);
@@ -114,7 +113,6 @@ struct hubbub_funcs {
struct dcn_hubbub_virt_addr_config *va_config,
int vmid);
-#endif
bool (*get_dcc_compression_cap)(struct hubbub *hubbub,
const struct dc_dcc_surface_param *input,
struct dc_surface_dcc_cap *output);
@@ -141,6 +139,11 @@ struct hubbub_funcs {
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
+
+ bool (*is_allow_self_refresh_enabled)(struct hubbub *hubbub);
+ void (*allow_self_refresh_control)(struct hubbub *hubbub, bool allow);
+
+ void (*apply_DEDCN21_147_wa)(struct hubbub *hubbub);
};
struct hubbub {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
index c68f0ce346c7..5315f1f86b21 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
@@ -52,6 +52,8 @@ struct dmcu {
enum dmcu_state dmcu_state;
struct dmcu_version dmcu_version;
unsigned int cached_wait_loop_number;
+ uint32_t psp_version;
+ bool auto_load_dmcu;
};
struct dmcu_funcs {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
index 60c671fcf186..45ef390ae052 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
@@ -36,13 +36,10 @@ struct dpp {
struct dpp_caps *caps;
struct pwl_params regamma_params;
struct pwl_params degamma_params;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
struct dpp_cursor_attributes cur_attr;
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
struct pwl_params shaper_params;
-#endif
+ bool cm_bypass_mode;
};
struct dpp_input_csc_matrix {
@@ -50,12 +47,31 @@ struct dpp_input_csc_matrix {
uint16_t regval[12];
};
+static const struct dpp_input_csc_matrix dpp_input_csc_matrix[] = {
+ {COLOR_SPACE_SRGB,
+ {0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} },
+ {COLOR_SPACE_SRGB_LIMITED,
+ {0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} },
+ {COLOR_SPACE_YCBCR601,
+ {0x2cdd, 0x2000, 0, 0xe991, 0xe926, 0x2000, 0xf4fd, 0x10ef,
+ 0, 0x2000, 0x38b4, 0xe3a6} },
+ {COLOR_SPACE_YCBCR601_LIMITED,
+ {0x3353, 0x2568, 0, 0xe400, 0xe5dc, 0x2568, 0xf367, 0x1108,
+ 0, 0x2568, 0x40de, 0xdd3a} },
+ {COLOR_SPACE_YCBCR709,
+ {0x3265, 0x2000, 0, 0xe6ce, 0xf105, 0x2000, 0xfa01, 0xa7d, 0,
+ 0x2000, 0x3b61, 0xe24f} },
+
+ {COLOR_SPACE_YCBCR709_LIMITED,
+ {0x39a6, 0x2568, 0, 0xe0d6, 0xeedd, 0x2568, 0xf925, 0x9a8, 0,
+ 0x2568, 0x43ee, 0xdbb2} }
+};
+
struct dpp_grph_csc_adjustment {
struct fixed31_32 temperature_matrix[CSC_TEMPERATURE_MATRIX_SIZE];
enum graphics_gamut_adjust_type gamut_adjust_type;
};
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
struct cnv_color_keyer_params {
int color_keyer_en;
int color_keyer_mode;
@@ -81,7 +97,6 @@ struct cnv_alpha_2bit_lut {
int lut2;
int lut3;
};
-#endif
struct dcn_dpp_state {
uint32_t is_enabled;
@@ -189,18 +204,14 @@ struct dpp_funcs {
enum surface_pixel_format format,
enum expansion_mode mode,
struct dc_csc_transform input_csc_color_matrix,
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
enum dc_color_space input_color_space,
struct cnv_alpha_2bit_lut *alpha_2bit_lut);
-#else
- enum dc_color_space input_color_space);
-#endif
void (*dpp_full_bypass)(struct dpp *dpp_base);
void (*set_cursor_attributes)(
struct dpp *dpp_base,
- enum dc_cursor_color_format color_format);
+ struct dc_cursor_attributes *cursor_attributes);
void (*set_cursor_position)(
struct dpp *dpp_base,
@@ -223,7 +234,6 @@ struct dpp_funcs {
bool dppclk_div,
bool enable);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
bool (*dpp_program_blnd_lut)(
struct dpp *dpp,
const struct pwl_params *params);
@@ -236,7 +246,6 @@ struct dpp_funcs {
void (*dpp_cnv_set_alpha_keyer)(
struct dpp *dpp_base,
struct cnv_color_keyer_params *color_keyer);
-#endif
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h
index c905d020b59e..c59740084ebc 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h
@@ -22,13 +22,16 @@
* Authors: AMD
*
*/
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
#ifndef __DAL_DSC_H__
#define __DAL_DSC_H__
#include "dc_dsc.h"
#include "dc_hw_types.h"
-#include "dc_dp_types.h"
+#include "dc_types.h"
+/* do not include any other headers
+ * or else it might break Edid Utility functionality.
+ */
+
/* Input parameters for configuring DSC from the outside of DSC */
struct dsc_config {
@@ -81,21 +84,16 @@ struct dsc_enc_caps {
uint32_t bpp_increment_div; /* bpp increment divisor, e.g. if 16, it's 1/16th of a bit */
};
-struct display_stream_compressor {
- const struct dsc_funcs *funcs;
- struct dc_context *ctx;
- int inst;
-};
-
struct dsc_funcs {
void (*dsc_get_enc_caps)(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz);
void (*dsc_read_state)(struct display_stream_compressor *dsc, struct dcn_dsc_state *s);
bool (*dsc_validate_stream)(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg);
void (*dsc_set_config)(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg,
- struct dsc_optc_config *dsc_optc_cfg, uint8_t *dsc_packed_pps);
+ struct dsc_optc_config *dsc_optc_cfg);
+ bool (*dsc_get_packed_pps)(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg,
+ uint8_t *dsc_packed_pps);
void (*dsc_enable)(struct display_stream_compressor *dsc, int opp_pipe);
void (*dsc_disable)(struct display_stream_compressor *dsc);
};
#endif
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
index a3409294ae0c..459f95f52486 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
@@ -45,38 +45,21 @@ enum dwb_source {
dwb_src_scl = 0, /* for DCE7x/9x, DCN won't support. */
dwb_src_blnd, /* for DCE7x/9x */
dwb_src_fmt, /* for DCE7x/9x */
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
dwb_src_otg0 = 0x100, /* for DCN1.x/DCN2.x, register: mmDWB_SOURCE_SELECT */
dwb_src_otg1, /* for DCN1.x/DCN2.x */
dwb_src_otg2, /* for DCN1.x/DCN2.x */
dwb_src_otg3, /* for DCN1.x/DCN2.x */
-#else
- dwb_src_otg0 = 0x100, /* for DCN1.x, register: mmDWB_SOURCE_SELECT */
- dwb_src_otg1, /* for DCN1.x */
- dwb_src_otg2, /* for DCN1.x */
- dwb_src_otg3, /* for DCN1.x */
-#endif
- dwb_src_mpc0 = 0x200, /* for DCN2, register: mmMPC_DWB0_MUX, mmMPC_DWB1_MUX, mmMPC_DWB2_MUX */
- dwb_src_mpc1, /* for DCN2 */
- dwb_src_mpc2, /* for DCN2 */
- dwb_src_mpc3, /* for DCN2 */
- dwb_src_mpc4, /* for DCN2 */
};
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
/* DCN1.x, DCN2.x support 2 pipes */
-#else
-/* DCN1.x supports 2 pipes */
-#endif
enum dwb_pipe {
dwb_pipe0 = 0,
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
dwb_pipe1,
#endif
dwb_pipe_max_num,
};
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
enum dwb_frame_capture_enable {
DWB_FRAME_CAPTURE_DISABLE = 0,
DWB_FRAME_CAPTURE_ENABLE = 1,
@@ -89,9 +72,7 @@ enum wbscl_coef_filter_type_sel {
WBSCL_COEF_CHROMA_HORZ_FILTER = 3
};
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
struct dwb_warmup_params {
bool warmup_en; /* false: normal mode, true: enable pattern generator */
bool warmup_mode; /* false: 420, true: 444 */
@@ -100,7 +81,6 @@ struct dwb_warmup_params {
int warmup_width; /* Pattern width (pixels) */
int warmup_height; /* Pattern height (lines) */
};
-#endif
struct dwb_caps {
enum dce_version hw_version; /* DCN engine version. */
@@ -133,7 +113,8 @@ struct dwbc {
int wb_src_plane_inst;/*hubp, mpcc, inst*/
bool update_privacymask;
uint32_t mask_id;
-
+ int otg_inst;
+ bool mvc_cfg;
};
struct dwbc_funcs {
@@ -162,13 +143,11 @@ struct dwbc_funcs {
struct dwbc *dwbc,
bool is_new_content);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
void (*set_warmup)(
struct dwbc *dwbc,
struct dwb_warmup_params *warmup_params);
-#endif
bool (*get_dwb_status)(
struct dwbc *dwbc);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/gpio.h b/drivers/gpu/drm/amd/display/dc/inc/hw/gpio.h
index 90d0148430fb..5253dc8b15f8 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/gpio.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/gpio.h
@@ -28,12 +28,22 @@
#include "gpio_types.h"
+
+union gpio_hw_container {
+ struct hw_ddc *ddc;
+ struct hw_generic *generic;
+ struct hw_hpd *hpd;
+};
+
struct gpio {
struct gpio_service *service;
struct hw_gpio_pin *pin;
enum gpio_id id;
uint32_t en;
+
+ union gpio_hw_container hw_container;
enum gpio_mode mode;
+
/* when GPIO comes from VBIOS, it has defined output state */
enum gpio_pin_output_state output_state;
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index 51bff8717cc9..2cb8466e657b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -38,15 +38,18 @@ enum cursor_pitch {
};
enum cursor_lines_per_chunk {
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
CURSOR_LINE_PER_CHUNK_1 = 0, /* new for DCN2 */
-#endif
CURSOR_LINE_PER_CHUNK_2 = 1,
CURSOR_LINE_PER_CHUNK_4,
CURSOR_LINE_PER_CHUNK_8,
CURSOR_LINE_PER_CHUNK_16
};
+enum hubp_ind_block_size {
+ hubp_ind_block_unconstrained = 0,
+ hubp_ind_block_64b,
+};
+
struct hubp {
const struct hubp_funcs *funcs;
struct dc_context *ctx;
@@ -60,6 +63,26 @@ struct hubp {
bool power_gated;
};
+struct surface_flip_registers {
+ uint32_t DCSURF_SURFACE_CONTROL;
+ uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH;
+ uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS;
+ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH;
+ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS;
+ uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C;
+ uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_C;
+ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C;
+ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_C;
+ uint32_t DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH;
+ uint32_t DCSURF_SECONDARY_META_SURFACE_ADDRESS;
+ uint32_t DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH;
+ uint32_t DCSURF_SECONDARY_SURFACE_ADDRESS;
+ bool tmz_surface;
+ bool immediate;
+ uint8_t vmid;
+ bool grph_stereo;
+};
+
struct hubp_funcs {
void (*hubp_setup)(
struct hubp *hubp,
@@ -74,12 +97,16 @@ struct hubp_funcs {
struct _vcs_dpi_display_ttu_regs_st *ttu_regs);
void (*dcc_control)(struct hubp *hubp, bool enable,
- bool independent_64b_blks);
+ enum hubp_ind_block_size blk_size);
+
void (*mem_program_viewport)(
struct hubp *hubp,
const struct rect *viewport,
const struct rect *viewport_c);
+ void (*apply_PLAT_54186_wa)(struct hubp *hubp,
+ const struct dc_plane_address *address);
+
bool (*hubp_program_surface_flip_and_addr)(
struct hubp *hubp,
const struct dc_plane_address *address,
@@ -103,7 +130,7 @@ struct hubp_funcs {
struct hubp *hubp,
enum surface_pixel_format format,
union dc_tiling_info *tiling_info,
- union plane_size *plane_size,
+ struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
bool horizontal_mirror,
@@ -111,9 +138,6 @@ struct hubp_funcs {
bool (*hubp_is_flip_pending)(struct hubp *hubp);
- void (*hubp_update_dchub)(struct hubp *hubp,
- struct dchub_init_data *dh_data);
-
void (*set_blank)(struct hubp *hubp, bool blank);
void (*set_hubp_blank_en)(struct hubp *hubp, bool blank);
@@ -136,7 +160,6 @@ struct hubp_funcs {
unsigned int (*hubp_get_underflow_status)(struct hubp *hubp);
void (*hubp_init)(struct hubp *hubp);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
void (*dmdata_set_attributes)(
struct hubp *hubp,
const struct dc_dmdata_attributes *attr);
@@ -156,7 +179,13 @@ struct hubp_funcs {
void (*hubp_set_flip_control_surface_gsl)(
struct hubp *hubp,
bool enable);
-#endif
+
+ void (*validate_dml_output)(
+ struct hubp *hubp,
+ struct dc_context *ctx,
+ struct _vcs_dpi_display_rq_regs_st *dml_rq_regs,
+ struct _vcs_dpi_display_dlg_regs_st *dml_dlg_attr,
+ struct _vcs_dpi_display_ttu_regs_st *dml_ttu_attr);
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
index f82365e2d03c..75d419081e76 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
@@ -36,9 +36,7 @@
#define MAX_AUDIOS 7
#define MAX_PIPES 6
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#define MAX_DWB_PIPES 1
-#endif
struct gamma_curve {
uint32_t offset;
@@ -81,7 +79,6 @@ struct pwl_result_data {
uint32_t delta_blue_reg;
};
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
struct dc_rgb {
uint32_t red;
uint32_t green;
@@ -110,7 +107,6 @@ struct tetrahedral_params {
bool use_12bits;
};
-#endif
/* arr_curve_points - regamma regions/segments specification
* arr_points - beginning and end point specified separately (only one on DCE)
@@ -195,13 +191,11 @@ enum opp_regamma {
OPP_REGAMMA_USER
};
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
enum optc_dsc_mode {
OPTC_DSC_DISABLED = 0,
OPTC_DSC_ENABLED_444 = 1, /* 'RGB 444' or 'Simple YCbCr 4:2:2' (4:2:2 upsampled to 4:4:4) */
OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED = 2 /* Native 4:2:2 or 4:2:0 */
};
-#endif
struct dc_bias_and_scale {
uint16_t scale_red;
@@ -224,12 +218,8 @@ enum test_pattern_mode {
TEST_PATTERN_MODE_VERTICALBARS,
TEST_PATTERN_MODE_HORIZONTALBARS,
TEST_PATTERN_MODE_SINGLERAMP_RGB,
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
TEST_PATTERN_MODE_DUALRAMP_RGB,
TEST_PATTERN_MODE_XR_BIAS_RGB
-#else
- TEST_PATTERN_MODE_DUALRAMP_RGB
-#endif
};
enum test_pattern_color_format {
@@ -255,6 +245,13 @@ enum controller_dp_test_pattern {
CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR
};
+enum controller_dp_color_space {
+ CONTROLLER_DP_COLOR_SPACE_RGB,
+ CONTROLLER_DP_COLOR_SPACE_YCBCR601,
+ CONTROLLER_DP_COLOR_SPACE_YCBCR709,
+ CONTROLLER_DP_COLOR_SPACE_UDEFINED
+};
+
enum dc_lut_mode {
LUT_BYPASS,
LUT_RAM_A,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
index e5e8640a9ef3..fb748f082c56 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
@@ -113,26 +113,21 @@ struct link_encoder {
struct encoder_feature_support features;
enum transmitter transmitter;
enum hpd_source_id hpd_source;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
bool usbc_combo_phy;
-#endif
};
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
struct link_enc_state {
uint32_t dphy_fec_en;
uint32_t dphy_fec_ready_shadow;
uint32_t dphy_fec_active_status;
+ uint32_t dp_link_training_complete;
};
-#endif
struct link_encoder_funcs {
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
void (*read_state)(
struct link_encoder *enc, struct link_enc_state *s);
-#endif
bool (*validate_output_with_stream)(
struct link_encoder *enc, const struct dc_stream_state *stream);
void (*hw_init)(struct link_encoder *enc);
@@ -174,7 +169,6 @@ struct link_encoder_funcs {
unsigned int (*get_dig_frontend)(struct link_encoder *enc);
void (*destroy)(struct link_encoder **enc);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
void (*fec_set_enable)(struct link_encoder *enc,
bool enable);
@@ -182,7 +176,13 @@ struct link_encoder_funcs {
bool ready);
bool (*fec_is_active)(struct link_encoder *enc);
-#endif
+ bool (*is_in_alt_mode) (struct link_encoder *enc);
+
+ void (*get_max_link_cap)(struct link_encoder *enc,
+ struct dc_link_settings *link_settings);
+
+ enum signal_type (*get_dig_mode)(
+ struct link_encoder *enc);
};
#endif /* LINK_ENCODER_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
index da89c2edb07c..2e2310f1901a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
@@ -40,6 +40,9 @@ struct cstate_pstate_watermarks_st {
struct dcn_watermarks {
uint32_t pte_meta_urgent_ns;
uint32_t urgent_ns;
+ uint32_t frac_urg_bw_nom;
+ uint32_t frac_urg_bw_flip;
+ int32_t urgent_latency_ns;
struct cstate_pstate_watermarks_st cstate_pstate;
};
@@ -149,7 +152,7 @@ struct mem_input_funcs {
struct mem_input *mem_input,
enum surface_pixel_format format,
union dc_tiling_info *tiling_info,
- union plane_size *plane_size,
+ struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
bool horizontal_mirror);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
index 45b94e319cd4..094afc4c8173 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
@@ -31,9 +31,7 @@
#define MAX_MPCC 6
#define MAX_OPP 6
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#define MAX_DWB 1
-#endif
enum mpc_output_csc_mode {
MPC_OUTPUT_CSC_DISABLE = 0,
@@ -66,14 +64,12 @@ struct mpcc_blnd_cfg {
int global_alpha;
bool overlap_only;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
/* MPCC top/bottom gain settings */
int bottom_gain_mode;
int background_color_bpc;
int top_gain;
int bottom_inside_gain;
int bottom_outside_gain;
-#endif
};
struct mpcc_sm_cfg {
@@ -90,7 +86,6 @@ struct mpcc_sm_cfg {
int force_next_field_polarity;
};
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
struct mpc_denorm_clamp {
int clamp_max_r_cr;
int clamp_min_r_cr;
@@ -99,7 +94,6 @@ struct mpc_denorm_clamp {
int clamp_max_b_cb;
int clamp_min_b_cb;
};
-#endif
/*
* MPCC connection and blending configuration for a single MPCC instance.
@@ -126,9 +120,8 @@ struct mpc {
struct dc_context *ctx;
struct mpcc mpcc_array[MAX_MPCC];
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
struct pwl_params blender_params;
-#endif
+ bool cm_bypass_mode;
};
struct mpcc_state {
@@ -198,6 +191,9 @@ struct mpc_funcs {
* Return: void
*/
void (*mpc_init)(struct mpc *mpc);
+ void (*mpc_init_single_inst)(
+ struct mpc *mpc,
+ unsigned int mpcc_id);
/*
* Update the blending configuration for a specified MPCC.
@@ -226,7 +222,6 @@ struct mpc_funcs {
struct mpc *mpc,
struct mpc_tree *tree);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
void (*set_denorm)(struct mpc *mpc,
int opp_id,
enum dc_color_depth output_depth);
@@ -250,7 +245,10 @@ struct mpc_funcs {
struct mpc *mpc,
int mpcc_id,
const struct pwl_params *params);
-#endif
+ void (*power_on_mpc_mem_pwr)(
+ struct mpc *mpc,
+ int mpcc_id,
+ bool power_on);
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
index 5d8a7bcccc6f..7575564b2265 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
@@ -208,6 +208,7 @@ struct output_pixel_processor {
struct mpc_tree mpc_tree_params;
bool mpcc_disconnect_pending[MAX_PIPES];
const struct opp_funcs *funcs;
+ uint32_t dyn_expansion;
};
enum fmt_stereo_action {
@@ -262,9 +263,7 @@ struct oppbuf_params {
enum oppbuf_display_segmentation mso_segmentation;
uint32_t mso_overlap_pixel_num;
uint32_t pixel_repetition;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
uint32_t num_segment_padded_pixels;
-#endif
};
struct opp_funcs {
@@ -304,10 +303,10 @@ struct opp_funcs {
struct output_pixel_processor *opp,
bool enable);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
void (*opp_set_disp_pattern_generator)(
struct output_pixel_processor *opp,
enum controller_dp_test_pattern test_pattern,
+ enum controller_dp_color_space color_space,
enum dc_color_depth color_depth,
const struct tg_color *solid_color,
int width,
@@ -316,11 +315,6 @@ struct opp_funcs {
bool (*dpg_is_blanked)(
struct output_pixel_processor *opp);
- void (*opp_convert_pti)(
- struct output_pixel_processor *opp,
- bool enable,
- bool polarity);
-
void (*opp_dpg_set_blank_color)(
struct output_pixel_processor *opp,
const struct tg_color *color);
@@ -328,7 +322,6 @@ struct opp_funcs {
void (*opp_program_left_edge_extra_pixel)(
struct output_pixel_processor *opp,
bool count);
-#endif
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
index ed7d9588b309..351b387ad606 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
@@ -65,13 +65,11 @@ struct audio_clock_info {
uint32_t cts_48khz;
};
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
enum dynamic_metadata_mode {
dmdata_dp,
dmdata_hdmi,
dmdata_dolby_vision
};
-#endif
struct encoder_info_frame {
/* auxiliary video information */
@@ -90,9 +88,7 @@ struct encoder_info_frame {
struct encoder_unblank_param {
struct dc_link_settings link_settings;
struct dc_crtc_timing timing;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
- bool odm;
-#endif
+ int opp_cnt;
};
struct encoder_set_dp_phy_pattern_param {
@@ -109,7 +105,6 @@ struct stream_encoder {
enum engine_id id;
};
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
struct enc_state {
uint32_t dsc_mode; // DISABLED 0; 1 or 2 indicate enabled state.
uint32_t dsc_slice_width;
@@ -119,16 +114,13 @@ struct enc_state {
uint32_t sec_gsp_pps_enable;
uint32_t sec_stream_enable;
};
-#endif
struct stream_encoder_funcs {
- #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
- void (*enc_read_state)(struct stream_encoder *enc, struct enc_state *s);
- #endif
void (*dp_set_stream_attribute)(
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing,
enum dc_color_space output_color_space,
+ bool use_vsc_sdp_for_colorimetry,
uint32_t enable_sdp_splitting);
void (*hdmi_set_stream_attribute)(
@@ -211,15 +203,28 @@ struct stream_encoder_funcs {
struct stream_encoder *enc,
int tg_inst);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
+ void (*hdmi_reset_stream_attribute)(
+ struct stream_encoder *enc);
+
+ unsigned int (*dig_source_otg)(
+ struct stream_encoder *enc);
+
+ bool (*dp_get_pixel_format)(
+ struct stream_encoder *enc,
+ enum dc_pixel_encoding *encoding,
+ enum dc_color_depth *depth);
+
+ void (*enc_read_state)(struct stream_encoder *enc, struct enc_state *s);
+
void (*dp_set_dsc_config)(
struct stream_encoder *enc,
enum optc_dsc_mode dsc_mode,
uint32_t dsc_bytes_per_pixel,
- uint32_t dsc_slice_width,
- uint8_t *dsc_packed_pps);
-#endif
+ uint32_t dsc_slice_width);
+
+ void (*dp_set_dsc_pps_info_packet)(struct stream_encoder *enc,
+ bool enable,
+ uint8_t *dsc_packed_pps);
void (*set_dynamic_metadata)(struct stream_encoder *enc,
bool enable,
@@ -229,7 +234,6 @@ struct stream_encoder_funcs {
void (*dp_set_odm_combine)(
struct stream_encoder *enc,
bool odm_combine);
-#endif
};
#endif /* STREAM_ENCODER_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index 5e93bc0e8ff9..e5e7d94026fc 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -59,6 +59,8 @@ struct gsl_params {
struct drr_params {
uint32_t vertical_total_min;
uint32_t vertical_total_max;
+ uint32_t vertical_total_mid;
+ uint32_t vertical_total_mid_frame_num;
bool immediate_flip;
};
@@ -96,6 +98,11 @@ enum crc_selection {
INTERSECT_WINDOW_NOT_A_NOT_B,
};
+enum h_timing_div_mode {
+ H_TIMING_NO_DIV,
+ H_TIMING_DIV_BY2,
+};
+
struct crc_params {
/* Regions used to calculate CRC*/
uint16_t windowa_x_start;
@@ -184,16 +191,12 @@ struct timing_generator_funcs {
bool (*did_triggered_reset_occur)(struct timing_generator *tg);
void (*setup_global_swap_lock)(struct timing_generator *tg,
const struct dcp_gsl_params *gsl_params);
- void (*setup_global_lock)(struct timing_generator *tg);
void (*unlock)(struct timing_generator *tg);
void (*lock)(struct timing_generator *tg);
- void (*lock_global)(struct timing_generator *tg);
void (*lock_doublebuffer_disable)(struct timing_generator *tg);
void (*lock_doublebuffer_enable)(struct timing_generator *tg);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
void(*triplebuffer_unlock)(struct timing_generator *tg);
void(*triplebuffer_lock)(struct timing_generator *tg);
-#endif
void (*enable_reset_trigger)(struct timing_generator *tg,
int source_tg_inst);
void (*enable_crtc_reset)(struct timing_generator *tg,
@@ -205,7 +208,8 @@ struct timing_generator_funcs {
bool enable, const struct dc_crtc_timing *timing);
void (*set_drr)(struct timing_generator *tg, const struct drr_params *params);
void (*set_static_screen_control)(struct timing_generator *tg,
- uint32_t value);
+ uint32_t event_triggers,
+ uint32_t num_frames);
void (*set_test_pattern)(
struct timing_generator *tg,
enum controller_dp_test_pattern test_pattern,
@@ -230,7 +234,6 @@ struct timing_generator_funcs {
bool (*is_optc_underflow_occurred)(struct timing_generator *tg);
void (*clear_optc_underflow)(struct timing_generator *tg);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
void (*set_dwb_source)(struct timing_generator *optc,
uint32_t dwb_pipe_inst);
@@ -238,7 +241,6 @@ struct timing_generator_funcs {
uint32_t *num_of_input_segments,
uint32_t *seg0_src_sel,
uint32_t *seg1_src_sel);
-#endif
/**
* Configure CRCs for the given timing generator. Return false if TG is
@@ -256,25 +258,23 @@ struct timing_generator_funcs {
void (*program_manual_trigger)(struct timing_generator *optc);
void (*setup_manual_trigger)(struct timing_generator *optc);
+ bool (*get_hw_timing)(struct timing_generator *optc,
+ struct dc_crtc_timing *hw_crtc_timing);
void (*set_vtg_params)(struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing);
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
void (*set_dsc_config)(struct timing_generator *optc,
enum optc_dsc_mode dsc_mode,
uint32_t dsc_bytes_per_pixel,
uint32_t dsc_slice_width);
-#endif
- void (*set_odm_bypass)(struct timing_generator *tg, const struct dc_crtc_timing *dc_crtc_timing);
- void (*set_odm_combine)(struct timing_generator *tg, int combine_opp_id,
- int mpcc_hactive, enum dc_pixel_encoding pixel_encoding);
+ void (*set_odm_bypass)(struct timing_generator *optc, const struct dc_crtc_timing *dc_crtc_timing);
+ void (*set_odm_combine)(struct timing_generator *optc, int *opp_id, int opp_cnt,
+ struct dc_crtc_timing *timing);
void (*set_gsl)(struct timing_generator *optc, const struct gsl_params *params);
void (*set_gsl_source_select)(struct timing_generator *optc,
int group_idx,
uint32_t gsl_ready_signal);
-#endif
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index 4d56d48a3179..209118f9f193 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -32,268 +32,160 @@
#include "inc/hw/link_encoder.h"
#include "core_status.h"
-enum pipe_gating_control {
- PIPE_GATING_CONTROL_DISABLE = 0,
- PIPE_GATING_CONTROL_ENABLE,
- PIPE_GATING_CONTROL_INIT
-};
-
enum vline_select {
VLINE0,
VLINE1
};
-struct dce_hwseq_wa {
- bool blnd_crtc_trigger;
- bool DEGVIDCN10_253;
- bool false_optc_underflow;
- bool DEGVIDCN10_254;
-};
-
-struct hwseq_wa_state {
- bool DEGVIDCN10_253_applied;
-};
-
-struct dce_hwseq {
- struct dc_context *ctx;
- const struct dce_hwseq_registers *regs;
- const struct dce_hwseq_shift *shifts;
- const struct dce_hwseq_mask *masks;
- struct dce_hwseq_wa wa;
- struct hwseq_wa_state wa_state;
-};
-
struct pipe_ctx;
struct dc_state;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
struct dc_stream_status;
struct dc_writeback_info;
-#endif
struct dchub_init_data;
-struct dc_static_screen_events;
+struct dc_static_screen_params;
struct resource_pool;
-struct resource_context;
-struct stream_resource;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
struct dc_phy_addr_space_config;
struct dc_virtual_addr_space_config;
-#endif
+struct dpp;
+struct dce_hwseq;
struct hw_sequencer_funcs {
+ /* Embedded Display Related */
+ void (*edp_power_control)(struct dc_link *link, bool enable);
+ void (*edp_wait_for_hpd_ready)(struct dc_link *link, bool power_up);
- void (*disable_stream_gating)(struct dc *dc, struct pipe_ctx *pipe_ctx);
-
- void (*enable_stream_gating)(struct dc *dc, struct pipe_ctx *pipe_ctx);
-
+ /* Pipe Programming Related */
void (*init_hw)(struct dc *dc);
-
- void (*init_pipes)(struct dc *dc, struct dc_state *context);
-
- enum dc_status (*apply_ctx_to_hw)(
- struct dc *dc, struct dc_state *context);
-
- void (*reset_hw_ctx_wrap)(
- struct dc *dc, struct dc_state *context);
-
- void (*apply_ctx_for_surface)(
- struct dc *dc,
+ void (*enable_accelerated_mode)(struct dc *dc,
+ struct dc_state *context);
+ enum dc_status (*apply_ctx_to_hw)(struct dc *dc,
+ struct dc_state *context);
+ void (*disable_plane)(struct dc *dc, struct pipe_ctx *pipe_ctx);
+ void (*apply_ctx_for_surface)(struct dc *dc,
const struct dc_stream_state *stream,
- int num_planes,
+ int num_planes, struct dc_state *context);
+ void (*program_front_end_for_ctx)(struct dc *dc,
struct dc_state *context);
-
- void (*program_gamut_remap)(
+ void (*update_plane_addr)(const struct dc *dc,
struct pipe_ctx *pipe_ctx);
-
- void (*program_output_csc)(struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- enum dc_color_space colorspace,
- uint16_t *matrix,
- int opp_id);
-
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
- void (*program_triplebuffer)(
- const struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- bool enableTripleBuffer);
- void (*set_flip_control_gsl)(
- struct pipe_ctx *pipe_ctx,
- bool flip_immediate);
-#endif
-
- void (*update_plane_addr)(
- const struct dc *dc,
- struct pipe_ctx *pipe_ctx);
-
- void (*plane_atomic_disconnect)(
- struct dc *dc,
- struct pipe_ctx *pipe_ctx);
-
- void (*update_dchub)(
- struct dce_hwseq *hws,
- struct dchub_init_data *dh_data);
-
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
- int (*init_sys_ctx)(
- struct dce_hwseq *hws,
- struct dc *dc,
- struct dc_phy_addr_space_config *pa_config);
- void (*init_vm_ctx)(
- struct dce_hwseq *hws,
- struct dc *dc,
- struct dc_virtual_addr_space_config *va_config,
- int vmid);
-#endif
- void (*update_mpcc)(
- struct dc *dc,
- struct pipe_ctx *pipe_ctx);
-
- void (*update_pending_status)(
+ void (*update_dchub)(struct dce_hwseq *hws,
+ struct dchub_init_data *dh_data);
+ void (*wait_for_mpcc_disconnect)(struct dc *dc,
+ struct resource_pool *res_pool,
struct pipe_ctx *pipe_ctx);
-
- bool (*set_input_transfer_func)(
- struct pipe_ctx *pipe_ctx,
- const struct dc_plane_state *plane_state);
-
- bool (*set_output_transfer_func)(
- struct pipe_ctx *pipe_ctx,
- const struct dc_stream_state *stream);
-
- void (*power_down)(struct dc *dc);
-
- void (*enable_accelerated_mode)(struct dc *dc, struct dc_state *context);
-
- void (*enable_timing_synchronization)(
- struct dc *dc,
- int group_index,
- int group_size,
- struct pipe_ctx *grouped_pipes[]);
-
- void (*enable_per_frame_crtc_position_reset)(
- struct dc *dc,
- int group_size,
+ void (*program_triplebuffer)(const struct dc *dc,
+ struct pipe_ctx *pipe_ctx, bool enableTripleBuffer);
+ void (*update_pending_status)(struct pipe_ctx *pipe_ctx);
+
+ /* Pipe Lock Related */
+ void (*pipe_control_lock_global)(struct dc *dc,
+ struct pipe_ctx *pipe, bool lock);
+ void (*pipe_control_lock)(struct dc *dc,
+ struct pipe_ctx *pipe, bool lock);
+ void (*set_flip_control_gsl)(struct pipe_ctx *pipe_ctx,
+ bool flip_immediate);
+
+ /* Timing Related */
+ void (*get_position)(struct pipe_ctx **pipe_ctx, int num_pipes,
+ struct crtc_position *position);
+ int (*get_vupdate_offset_from_vsync)(struct pipe_ctx *pipe_ctx);
+ void (*enable_per_frame_crtc_position_reset)(struct dc *dc,
+ int group_size, struct pipe_ctx *grouped_pipes[]);
+ void (*enable_timing_synchronization)(struct dc *dc,
+ int group_index, int group_size,
struct pipe_ctx *grouped_pipes[]);
+ void (*setup_periodic_interrupt)(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ enum vline_select vline);
+ void (*set_drr)(struct pipe_ctx **pipe_ctx, int num_pipes,
+ unsigned int vmin, unsigned int vmax,
+ unsigned int vmid, unsigned int vmid_frame_number);
+ void (*set_static_screen_control)(struct pipe_ctx **pipe_ctx,
+ int num_pipes,
+ const struct dc_static_screen_params *events);
- void (*enable_display_pipe_clock_gating)(
- struct dc_context *ctx,
- bool clock_gating);
-
- bool (*enable_display_power_gating)(
- struct dc *dc,
- uint8_t controller_id,
- struct dc_bios *dcb,
- enum pipe_gating_control power_gating);
-
- void (*disable_plane)(struct dc *dc, struct pipe_ctx *pipe_ctx);
-
- void (*update_info_frame)(struct pipe_ctx *pipe_ctx);
-
- void (*send_immediate_sdp_message)(
- struct pipe_ctx *pipe_ctx,
- const uint8_t *custom_sdp_message,
- unsigned int sdp_message_size);
-
+ /* Stream Related */
void (*enable_stream)(struct pipe_ctx *pipe_ctx);
-
- void (*disable_stream)(struct pipe_ctx *pipe_ctx,
- int option);
-
+ void (*disable_stream)(struct pipe_ctx *pipe_ctx);
+ void (*blank_stream)(struct pipe_ctx *pipe_ctx);
void (*unblank_stream)(struct pipe_ctx *pipe_ctx,
struct dc_link_settings *link_settings);
- void (*blank_stream)(struct pipe_ctx *pipe_ctx);
-
- void (*enable_audio_stream)(struct pipe_ctx *pipe_ctx);
+ /* Bandwidth Related */
+ void (*prepare_bandwidth)(struct dc *dc, struct dc_state *context);
+ bool (*update_bandwidth)(struct dc *dc, struct dc_state *context);
+ void (*optimize_bandwidth)(struct dc *dc, struct dc_state *context);
- void (*disable_audio_stream)(struct pipe_ctx *pipe_ctx, int option);
+ /* Infopacket Related */
+ void (*set_avmute)(struct pipe_ctx *pipe_ctx, bool enable);
+ void (*send_immediate_sdp_message)(
+ struct pipe_ctx *pipe_ctx,
+ const uint8_t *custom_sdp_message,
+ unsigned int sdp_message_size);
+ void (*update_info_frame)(struct pipe_ctx *pipe_ctx);
+ void (*set_dmdata_attributes)(struct pipe_ctx *pipe);
+ void (*program_dmdata_engine)(struct pipe_ctx *pipe_ctx);
+ bool (*dmdata_status_done)(struct pipe_ctx *pipe_ctx);
- void (*pipe_control_lock)(
- struct dc *dc,
- struct pipe_ctx *pipe,
- bool lock);
+ /* Cursor Related */
+ void (*set_cursor_position)(struct pipe_ctx *pipe);
+ void (*set_cursor_attribute)(struct pipe_ctx *pipe);
+ void (*set_cursor_sdr_white_level)(struct pipe_ctx *pipe);
- void (*pipe_control_lock_global)(
- struct dc *dc,
- struct pipe_ctx *pipe,
- bool lock);
- void (*blank_pixel_data)(
- struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- bool blank);
+ /* Colour Related */
+ void (*program_gamut_remap)(struct pipe_ctx *pipe_ctx);
+ void (*program_output_csc)(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ enum dc_color_space colorspace,
+ uint16_t *matrix, int opp_id);
- void (*prepare_bandwidth)(
+ /* VM Related */
+ int (*init_sys_ctx)(struct dce_hwseq *hws,
struct dc *dc,
- struct dc_state *context);
- void (*optimize_bandwidth)(
+ struct dc_phy_addr_space_config *pa_config);
+ void (*init_vm_ctx)(struct dce_hwseq *hws,
struct dc *dc,
- struct dc_state *context);
+ struct dc_virtual_addr_space_config *va_config,
+ int vmid);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
- bool (*update_bandwidth)(
- struct dc *dc,
+ /* Writeback Related */
+ void (*update_writeback)(struct dc *dc,
+ struct dc_writeback_info *wb_info,
struct dc_state *context);
- bool (*dmdata_status_done)(struct pipe_ctx *pipe_ctx);
-#endif
-
- void (*set_drr)(struct pipe_ctx **pipe_ctx, int num_pipes,
- int vmin, int vmax);
-
- void (*get_position)(struct pipe_ctx **pipe_ctx, int num_pipes,
- struct crtc_position *position);
+ void (*enable_writeback)(struct dc *dc,
+ struct dc_writeback_info *wb_info,
+ struct dc_state *context);
+ void (*disable_writeback)(struct dc *dc,
+ unsigned int dwb_pipe_inst);
- void (*set_static_screen_control)(struct pipe_ctx **pipe_ctx,
- int num_pipes, const struct dc_static_screen_events *events);
+ bool (*mmhubbub_warmup)(struct dc *dc,
+ unsigned int num_dwb,
+ struct dc_writeback_info *wb_info);
- enum dc_status (*enable_stream_timing)(
- struct pipe_ctx *pipe_ctx,
- struct dc_state *context,
- struct dc *dc);
+ /* Clock Related */
+ enum dc_status (*set_clock)(struct dc *dc,
+ enum dc_clock_type clock_type,
+ uint32_t clk_khz, uint32_t stepping);
+ void (*get_clock)(struct dc *dc, enum dc_clock_type clock_type,
+ struct dc_clock_config *clock_cfg);
+ void (*optimize_pwr_state)(const struct dc *dc,
+ struct dc_state *context);
+ void (*exit_optimized_pwr_state)(const struct dc *dc,
+ struct dc_state *context);
- void (*setup_stereo)(
- struct pipe_ctx *pipe_ctx,
- struct dc *dc);
+ /* Audio Related */
+ void (*enable_audio_stream)(struct pipe_ctx *pipe_ctx);
+ void (*disable_audio_stream)(struct pipe_ctx *pipe_ctx);
- void (*set_avmute)(struct pipe_ctx *pipe_ctx, bool enable);
+ /* Stereo 3D Related */
+ void (*setup_stereo)(struct pipe_ctx *pipe_ctx, struct dc *dc);
- void (*log_hw_state)(struct dc *dc,
- struct dc_log_buffer_ctx *log_ctx);
- void (*get_hw_state)(struct dc *dc, char *pBuf, unsigned int bufSize, unsigned int mask);
+ /* HW State Logging Related */
+ void (*log_hw_state)(struct dc *dc, struct dc_log_buffer_ctx *log_ctx);
+ void (*get_hw_state)(struct dc *dc, char *pBuf,
+ unsigned int bufSize, unsigned int mask);
void (*clear_status_bits)(struct dc *dc, unsigned int mask);
- void (*wait_for_mpcc_disconnect)(struct dc *dc,
- struct resource_pool *res_pool,
- struct pipe_ctx *pipe_ctx);
-
- void (*edp_power_control)(
- struct dc_link *link,
- bool enable);
- void (*edp_backlight_control)(
- struct dc_link *link,
- bool enable);
- void (*edp_wait_for_hpd_ready)(struct dc_link *link, bool power_up);
- void (*set_cursor_position)(struct pipe_ctx *pipe);
- void (*set_cursor_attribute)(struct pipe_ctx *pipe);
- void (*set_cursor_sdr_white_level)(struct pipe_ctx *pipe);
-
- void (*setup_periodic_interrupt)(struct pipe_ctx *pipe_ctx, enum vline_select vline);
- void (*setup_vupdate_interrupt)(struct pipe_ctx *pipe_ctx);
- bool (*did_underflow_occur)(struct dc *dc, struct pipe_ctx *pipe_ctx);
-
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
- void (*update_odm)(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx);
- void (*program_all_writeback_pipes_in_tree)(
- struct dc *dc,
- const struct dc_stream_state *stream,
- struct dc_state *context);
- void (*update_writeback)(struct dc *dc,
- const struct dc_stream_status *stream_status,
- struct dc_writeback_info *wb_info);
- void (*enable_writeback)(struct dc *dc,
- const struct dc_stream_status *stream_status,
- struct dc_writeback_info *wb_info);
- void (*disable_writeback)(struct dc *dc,
- unsigned int dwb_pipe_inst);
-#endif
};
void color_space_to_black_color(
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
new file mode 100644
index 000000000000..ecf566378ccd
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
@@ -0,0 +1,156 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_HW_SEQUENCER_PRIVATE_H__
+#define __DC_HW_SEQUENCER_PRIVATE_H__
+
+#include "dc_types.h"
+
+enum pipe_gating_control {
+ PIPE_GATING_CONTROL_DISABLE = 0,
+ PIPE_GATING_CONTROL_ENABLE,
+ PIPE_GATING_CONTROL_INIT
+};
+
+struct dce_hwseq_wa {
+ bool blnd_crtc_trigger;
+ bool DEGVIDCN10_253;
+ bool false_optc_underflow;
+ bool DEGVIDCN10_254;
+ bool DEGVIDCN21;
+};
+
+struct hwseq_wa_state {
+ bool DEGVIDCN10_253_applied;
+};
+
+struct pipe_ctx;
+struct dc_state;
+struct dc_stream_status;
+struct dc_writeback_info;
+struct dchub_init_data;
+struct dc_static_screen_params;
+struct resource_pool;
+struct resource_context;
+struct stream_resource;
+struct dc_phy_addr_space_config;
+struct dc_virtual_addr_space_config;
+struct hubp;
+struct dpp;
+struct dce_hwseq;
+struct timing_generator;
+struct tg_color;
+struct output_pixel_processor;
+
+struct hwseq_private_funcs {
+
+ void (*disable_stream_gating)(struct dc *dc, struct pipe_ctx *pipe_ctx);
+ void (*enable_stream_gating)(struct dc *dc, struct pipe_ctx *pipe_ctx);
+ void (*init_pipes)(struct dc *dc, struct dc_state *context);
+ void (*reset_hw_ctx_wrap)(struct dc *dc, struct dc_state *context);
+ void (*update_plane_addr)(const struct dc *dc,
+ struct pipe_ctx *pipe_ctx);
+ void (*plane_atomic_disconnect)(struct dc *dc,
+ struct pipe_ctx *pipe_ctx);
+ void (*update_mpcc)(struct dc *dc, struct pipe_ctx *pipe_ctx);
+ bool (*set_input_transfer_func)(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ const struct dc_plane_state *plane_state);
+ bool (*set_output_transfer_func)(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ const struct dc_stream_state *stream);
+ void (*power_down)(struct dc *dc);
+ void (*enable_display_pipe_clock_gating)(struct dc_context *ctx,
+ bool clock_gating);
+ bool (*enable_display_power_gating)(struct dc *dc,
+ uint8_t controller_id,
+ struct dc_bios *dcb,
+ enum pipe_gating_control power_gating);
+ void (*blank_pixel_data)(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool blank);
+ enum dc_status (*enable_stream_timing)(
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct dc *dc);
+ void (*edp_backlight_control)(struct dc_link *link,
+ bool enable);
+ void (*setup_vupdate_interrupt)(struct dc *dc,
+ struct pipe_ctx *pipe_ctx);
+ bool (*did_underflow_occur)(struct dc *dc, struct pipe_ctx *pipe_ctx);
+ void (*init_blank)(struct dc *dc, struct timing_generator *tg);
+ void (*disable_vga)(struct dce_hwseq *hws);
+ void (*bios_golden_init)(struct dc *dc);
+ void (*plane_atomic_power_down)(struct dc *dc,
+ struct dpp *dpp,
+ struct hubp *hubp);
+ void (*plane_atomic_disable)(struct dc *dc, struct pipe_ctx *pipe_ctx);
+ void (*enable_power_gating_plane)(struct dce_hwseq *hws,
+ bool enable);
+ void (*dpp_pg_control)(struct dce_hwseq *hws,
+ unsigned int dpp_inst,
+ bool power_on);
+ void (*hubp_pg_control)(struct dce_hwseq *hws,
+ unsigned int hubp_inst,
+ bool power_on);
+ void (*dsc_pg_control)(struct dce_hwseq *hws,
+ unsigned int dsc_inst,
+ bool power_on);
+ void (*update_odm)(struct dc *dc, struct dc_state *context,
+ struct pipe_ctx *pipe_ctx);
+ void (*program_all_writeback_pipes_in_tree)(struct dc *dc,
+ const struct dc_stream_state *stream,
+ struct dc_state *context);
+ bool (*s0i3_golden_init_wa)(struct dc *dc);
+ void (*get_surface_visual_confirm_color)(
+ const struct pipe_ctx *pipe_ctx,
+ struct tg_color *color);
+ void (*get_hdr_visual_confirm_color)(struct pipe_ctx *pipe_ctx,
+ struct tg_color *color);
+ void (*set_hdr_multiplier)(struct pipe_ctx *pipe_ctx);
+ void (*verify_allow_pstate_change_high)(struct dc *dc);
+ void (*program_pipe)(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context);
+ bool (*wait_for_blank_complete)(struct output_pixel_processor *opp);
+ void (*dccg_init)(struct dce_hwseq *hws);
+ bool (*set_blend_lut)(struct pipe_ctx *pipe_ctx,
+ const struct dc_plane_state *plane_state);
+ bool (*set_shaper_3dlut)(struct pipe_ctx *pipe_ctx,
+ const struct dc_plane_state *plane_state);
+};
+
+struct dce_hwseq {
+ struct dc_context *ctx;
+ const struct dce_hwseq_registers *regs;
+ const struct dce_hwseq_shift *shifts;
+ const struct dce_hwseq_mask *masks;
+ struct dce_hwseq_wa wa;
+ struct hwseq_wa_state wa_state;
+ struct hwseq_private_funcs funcs;
+
+};
+
+#endif /* __DC_HW_SEQUENCER_PRIVATE_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h
index 30be7bb4a01a..9af7ee5bc8ee 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h
@@ -60,11 +60,13 @@ void dp_disable_link_phy_mst(struct dc_link *link, enum signal_type signal);
bool dp_set_hw_training_pattern(
struct dc_link *link,
- enum hw_dp_training_pattern pattern);
+ enum dc_dp_training_pattern pattern,
+ uint32_t offset);
void dp_set_hw_lane_settings(
struct dc_link *link,
- const struct link_training_settings *link_settings);
+ const struct link_training_settings *link_settings,
+ uint32_t offset);
void dp_set_hw_test_pattern(
struct dc_link *link,
@@ -72,8 +74,6 @@ void dp_set_hw_test_pattern(
uint8_t *custom_pattern,
uint32_t custom_pattern_size);
-enum dp_panel_mode dp_get_panel_mode(struct dc_link *link);
-
void dp_retrain_link_dp_test(struct dc_link *link,
struct dc_link_settings *link_setting,
bool skip_video_pattern);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
index 8503d9cc4763..2470405e996b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
@@ -458,7 +458,14 @@ uint32_t generic_reg_get8(const struct dc_context *ctx, uint32_t addr,
#define IX_REG_READ(index_reg_name, data_reg_name, index) \
generic_read_indirect_reg(CTX, REG(index_reg_name), REG(data_reg_name), IND_REG(index))
+#define IX_REG_GET_N(index_reg_name, data_reg_name, index, n, ...) \
+ generic_indirect_reg_get(CTX, REG(index_reg_name), REG(data_reg_name), \
+ IND_REG(index), \
+ n, __VA_ARGS__)
+#define IX_REG_GET(index_reg_name, data_reg_name, index, field, val) \
+ IX_REG_GET_N(index_reg_name, data_reg_name, index, 1, \
+ FN(data_reg_name, field), val)
#define IX_REG_UPDATE_N(index_reg_name, data_reg_name, index, n, ...) \
generic_indirect_reg_update_ex(CTX, \
@@ -479,10 +486,35 @@ uint32_t generic_read_indirect_reg(const struct dc_context *ctx,
uint32_t addr_index, uint32_t addr_data,
uint32_t index);
+uint32_t generic_indirect_reg_get(const struct dc_context *ctx,
+ uint32_t addr_index, uint32_t addr_data,
+ uint32_t index, int n,
+ uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
+ ...);
+
uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx,
uint32_t addr_index, uint32_t addr_data,
uint32_t index, uint32_t reg_val, int n,
uint8_t shift1, uint32_t mask1, uint32_t field_value1,
...);
+/* register offload macros
+ *
+ * instead of MMIO to register directly, in some cases we want
+ * to gather register sequence and execute the register sequence
+ * from another thread so we optimize time required for lengthy ops
+ */
+
+/* start gathering register sequence */
+#define REG_SEQ_START() \
+ reg_sequence_start_gather(CTX)
+
+/* start execution of register sequence gathered since REG_SEQ_START */
+#define REG_SEQ_SUBMIT() \
+ reg_sequence_start_execute(CTX)
+
+/* wait for the last REG_SEQ_SUBMIT to finish */
+#define REG_SEQ_WAIT_DONE() \
+ reg_sequence_wait_done(CTX)
+
#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_REG_HELPER_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index 47f81072d7e9..5ae8ada154ef 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -31,6 +31,8 @@
#include "dm_pp_smu.h"
#define MEMORY_TYPE_MULTIPLIER_CZ 4
+#define MEMORY_TYPE_HBM 2
+
enum dce_version resource_parse_asic_id(
struct hw_asic_id asic_id);
@@ -44,12 +46,8 @@ struct resource_caps {
int num_pll;
int num_dwb;
int num_ddc;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
int num_vmid;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
int num_dsc;
-#endif
-#endif
};
struct resource_straps {
@@ -179,7 +177,6 @@ void update_audio_usage(
unsigned int resource_pixel_format_to_bpp(enum surface_pixel_format format);
-struct pipe_ctx *dc_res_get_odm_bottom_pipe(struct pipe_ctx *pipe_ctx);
-bool dc_res_is_odm_head_pipe(struct pipe_ctx *pipe_ctx);
-
+void get_audio_check(struct audio_info *aud_modes,
+ struct audio_check *aud_chk);
#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/irq/Makefile b/drivers/gpu/drm/amd/display/dc/irq/Makefile
index ad87c2f093e2..0f682ac53bb2 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/irq/Makefile
@@ -60,20 +60,26 @@ AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCE12)
###############################################################################
# DCN 1x
###############################################################################
-ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ifdef CONFIG_DRM_AMD_DC_DCN
IRQ_DCN1 = irq_service_dcn10.o
AMD_DAL_IRQ_DCN1 = $(addprefix $(AMDDALPATH)/dc/irq/dcn10/,$(IRQ_DCN1))
AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCN1)
-endif
###############################################################################
# DCN 20
###############################################################################
-ifdef CONFIG_DRM_AMD_DC_DCN2_0
IRQ_DCN2 = irq_service_dcn20.o
AMD_DAL_IRQ_DCN2 = $(addprefix $(AMDDALPATH)/dc/irq/dcn20/,$(IRQ_DCN2))
AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCN2)
+###############################################################################
+# DCN 21
+###############################################################################
+IRQ_DCN21 = irq_service_dcn21.o
+
+AMD_DAL_IRQ_DCN21= $(addprefix $(AMDDALPATH)/dc/irq/dcn21/,$(IRQ_DCN21))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCN21)
endif
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
index 1a581c464345..378cc11aa047 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
@@ -204,7 +204,7 @@ bool dce110_vblank_set(struct irq_service *irq_service,
bool enable)
{
struct dc_context *dc_ctx = irq_service->ctx;
- struct dc *core_dc = irq_service->ctx->dc;
+ struct dc *dc = irq_service->ctx->dc;
enum dc_irq_source dal_irq_src =
dc_interrupt_to_irq_source(irq_service->ctx->dc,
info->src_id,
@@ -212,7 +212,7 @@ bool dce110_vblank_set(struct irq_service *irq_service,
uint8_t pipe_offset = dal_irq_src - IRQ_TYPE_VBLANK;
struct timing_generator *tg =
- core_dc->current_state->res_ctx.pipe_ctx[pipe_offset].stream_res.tg;
+ dc->current_state->res_ctx.pipe_ctx[pipe_offset].stream_res.tg;
if (enable) {
if (!tg || !tg->funcs->arm_vert_intr(tg, 2)) {
@@ -403,7 +403,7 @@ static const struct irq_service_funcs irq_service_funcs_dce110 = {
.to_dal_irq_source = to_dal_irq_source_dce110
};
-static void construct(struct irq_service *irq_service,
+static void dce110_irq_construct(struct irq_service *irq_service,
struct irq_service_init_data *init_data)
{
dal_irq_service_construct(irq_service, init_data);
@@ -421,6 +421,6 @@ dal_irq_service_dce110_create(struct irq_service_init_data *init_data)
if (!irq_service)
return NULL;
- construct(irq_service, init_data);
+ dce110_irq_construct(irq_service, init_data);
return irq_service;
}
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c b/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c
index 15380336cb51..2fe4703395f3 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c
@@ -273,7 +273,7 @@ static const struct irq_service_funcs irq_service_funcs_dce120 = {
.to_dal_irq_source = to_dal_irq_source_dce110
};
-static void construct(
+static void dce120_irq_construct(
struct irq_service *irq_service,
struct irq_service_init_data *init_data)
{
@@ -292,6 +292,6 @@ struct irq_service *dal_irq_service_dce120_create(
if (!irq_service)
return NULL;
- construct(irq_service, init_data);
+ dce120_irq_construct(irq_service, init_data);
return irq_service;
}
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c b/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c
index 281fee8ad1e5..17e426b80a00 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c
@@ -283,7 +283,7 @@ static const struct irq_service_funcs irq_service_funcs_dce80 = {
.to_dal_irq_source = to_dal_irq_source_dce110
};
-static void construct(
+static void dce80_irq_construct(
struct irq_service *irq_service,
struct irq_service_init_data *init_data)
{
@@ -302,7 +302,7 @@ struct irq_service *dal_irq_service_dce80_create(
if (!irq_service)
return NULL;
- construct(irq_service, init_data);
+ dce80_irq_construct(irq_service, init_data);
return irq_service;
}
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
index cc8e7dedccce..f956b3bde680 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
@@ -355,7 +355,7 @@ static const struct irq_service_funcs irq_service_funcs_dcn10 = {
.to_dal_irq_source = to_dal_irq_source_dcn10
};
-static void construct(
+static void dcn10_irq_construct(
struct irq_service *irq_service,
struct irq_service_init_data *init_data)
{
@@ -374,6 +374,6 @@ struct irq_service *dal_irq_service_dcn10_create(
if (!irq_service)
return NULL;
- construct(irq_service, init_data);
+ dcn10_irq_construct(irq_service, init_data);
return irq_service;
}
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
index 3cc0f2a1f77c..2a1fea501f8c 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
@@ -167,6 +167,11 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
.ack = NULL
};
+static const struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
#undef BASE_INNER
#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
@@ -221,12 +226,15 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
.funcs = &pflip_irq_info_funcs\
}
-#define vupdate_int_entry(reg_num)\
+/* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic
+ * of DCE's DC_IRQ_SOURCE_VUPDATEx.
+ */
+#define vupdate_no_lock_int_entry(reg_num)\
[DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\
IRQ_REG_ENTRY(OTG, reg_num,\
- OTG_GLOBAL_SYNC_STATUS, VUPDATE_INT_EN,\
- OTG_GLOBAL_SYNC_STATUS, VUPDATE_EVENT_CLEAR),\
- .funcs = &vblank_irq_info_funcs\
+ OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_INT_EN,\
+ OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_EVENT_CLEAR),\
+ .funcs = &vupdate_no_lock_irq_info_funcs\
}
#define vblank_int_entry(reg_num)\
@@ -333,12 +341,12 @@ irq_source_info_dcn20[DAL_IRQ_SOURCES_NUMBER] = {
dc_underflow_int_entry(6),
[DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(),
[DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(),
- vupdate_int_entry(0),
- vupdate_int_entry(1),
- vupdate_int_entry(2),
- vupdate_int_entry(3),
- vupdate_int_entry(4),
- vupdate_int_entry(5),
+ vupdate_no_lock_int_entry(0),
+ vupdate_no_lock_int_entry(1),
+ vupdate_no_lock_int_entry(2),
+ vupdate_no_lock_int_entry(3),
+ vupdate_no_lock_int_entry(4),
+ vupdate_no_lock_int_entry(5),
vblank_int_entry(0),
vblank_int_entry(1),
vblank_int_entry(2),
@@ -351,7 +359,7 @@ static const struct irq_service_funcs irq_service_funcs_dcn20 = {
.to_dal_irq_source = to_dal_irq_source_dcn20
};
-static void construct(
+static void dcn20_irq_construct(
struct irq_service *irq_service,
struct irq_service_init_data *init_data)
{
@@ -370,6 +378,6 @@ struct irq_service *dal_irq_service_dcn20_create(
if (!irq_service)
return NULL;
- construct(irq_service, init_data);
+ dcn20_irq_construct(irq_service, init_data);
return irq_service;
}
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
new file mode 100644
index 000000000000..1b971265418b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
@@ -0,0 +1,374 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <linux/slab.h>
+
+#include "dm_services.h"
+
+#include "include/logger_interface.h"
+
+#include "../dce110/irq_service_dce110.h"
+
+#include "dcn/dcn_2_1_0_offset.h"
+#include "dcn/dcn_2_1_0_sh_mask.h"
+#include "renoir_ip_offset.h"
+
+
+#include "irq_service_dcn21.h"
+
+#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
+
+enum dc_irq_source to_dal_irq_source_dcn21(
+ struct irq_service *irq_service,
+ uint32_t src_id,
+ uint32_t ext_id)
+{
+ switch (src_id) {
+ case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP:
+ return DC_IRQ_SOURCE_VBLANK1;
+ case DCN_1_0__SRCID__DC_D2_OTG_VSTARTUP:
+ return DC_IRQ_SOURCE_VBLANK2;
+ case DCN_1_0__SRCID__DC_D3_OTG_VSTARTUP:
+ return DC_IRQ_SOURCE_VBLANK3;
+ case DCN_1_0__SRCID__DC_D4_OTG_VSTARTUP:
+ return DC_IRQ_SOURCE_VBLANK4;
+ case DCN_1_0__SRCID__DC_D5_OTG_VSTARTUP:
+ return DC_IRQ_SOURCE_VBLANK5;
+ case DCN_1_0__SRCID__DC_D6_OTG_VSTARTUP:
+ return DC_IRQ_SOURCE_VBLANK6;
+ case DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT:
+ return DC_IRQ_SOURCE_PFLIP1;
+ case DCN_1_0__SRCID__HUBP1_FLIP_INTERRUPT:
+ return DC_IRQ_SOURCE_PFLIP2;
+ case DCN_1_0__SRCID__HUBP2_FLIP_INTERRUPT:
+ return DC_IRQ_SOURCE_PFLIP3;
+ case DCN_1_0__SRCID__HUBP3_FLIP_INTERRUPT:
+ return DC_IRQ_SOURCE_PFLIP4;
+ case DCN_1_0__SRCID__HUBP4_FLIP_INTERRUPT:
+ return DC_IRQ_SOURCE_PFLIP5;
+ case DCN_1_0__SRCID__HUBP5_FLIP_INTERRUPT:
+ return DC_IRQ_SOURCE_PFLIP6;
+ case DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE1;
+ case DCN_1_0__SRCID__OTG1_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE2;
+ case DCN_1_0__SRCID__OTG2_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE3;
+ case DCN_1_0__SRCID__OTG3_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE4;
+ case DCN_1_0__SRCID__OTG4_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE5;
+ case DCN_1_0__SRCID__OTG5_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE6;
+
+ case DCN_1_0__SRCID__DC_HPD1_INT:
+ /* generic src_id for all HPD and HPDRX interrupts */
+ switch (ext_id) {
+ case DCN_1_0__CTXID__DC_HPD1_INT:
+ return DC_IRQ_SOURCE_HPD1;
+ case DCN_1_0__CTXID__DC_HPD2_INT:
+ return DC_IRQ_SOURCE_HPD2;
+ case DCN_1_0__CTXID__DC_HPD3_INT:
+ return DC_IRQ_SOURCE_HPD3;
+ case DCN_1_0__CTXID__DC_HPD4_INT:
+ return DC_IRQ_SOURCE_HPD4;
+ case DCN_1_0__CTXID__DC_HPD5_INT:
+ return DC_IRQ_SOURCE_HPD5;
+ case DCN_1_0__CTXID__DC_HPD6_INT:
+ return DC_IRQ_SOURCE_HPD6;
+ case DCN_1_0__CTXID__DC_HPD1_RX_INT:
+ return DC_IRQ_SOURCE_HPD1RX;
+ case DCN_1_0__CTXID__DC_HPD2_RX_INT:
+ return DC_IRQ_SOURCE_HPD2RX;
+ case DCN_1_0__CTXID__DC_HPD3_RX_INT:
+ return DC_IRQ_SOURCE_HPD3RX;
+ case DCN_1_0__CTXID__DC_HPD4_RX_INT:
+ return DC_IRQ_SOURCE_HPD4RX;
+ case DCN_1_0__CTXID__DC_HPD5_RX_INT:
+ return DC_IRQ_SOURCE_HPD5RX;
+ case DCN_1_0__CTXID__DC_HPD6_RX_INT:
+ return DC_IRQ_SOURCE_HPD6RX;
+ default:
+ return DC_IRQ_SOURCE_INVALID;
+ }
+ break;
+
+ default:
+ break;
+ }
+ return DC_IRQ_SOURCE_INVALID;
+}
+
+static bool hpd_ack(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info)
+{
+ uint32_t addr = info->status_reg;
+ uint32_t value = dm_read_reg(irq_service->ctx, addr);
+ uint32_t current_status =
+ get_reg_field_value(
+ value,
+ HPD0_DC_HPD_INT_STATUS,
+ DC_HPD_SENSE_DELAYED);
+
+ dal_irq_service_ack_generic(irq_service, info);
+
+ value = dm_read_reg(irq_service->ctx, info->enable_reg);
+
+ set_reg_field_value(
+ value,
+ current_status ? 0 : 1,
+ HPD0_DC_HPD_INT_CONTROL,
+ DC_HPD_INT_POLARITY);
+
+ dm_write_reg(irq_service->ctx, info->enable_reg, value);
+
+ return true;
+}
+
+static const struct irq_source_info_funcs hpd_irq_info_funcs = {
+ .set = NULL,
+ .ack = hpd_ack
+};
+
+static const struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static const struct irq_source_info_funcs pflip_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static const struct irq_source_info_funcs vblank_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+#undef BASE_INNER
+#define BASE_INNER(seg) DMU_BASE__INST0_SEG ## seg
+
+/* compile time expand base address. */
+#define BASE(seg) \
+ BASE_INNER(seg)
+
+
+#define SRI(reg_name, block, id)\
+ BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+
+
+#define IRQ_REG_ENTRY(block, reg_num, reg1, mask1, reg2, mask2)\
+ .enable_reg = SRI(reg1, block, reg_num),\
+ .enable_mask = \
+ block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
+ .enable_value = {\
+ block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
+ ~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \
+ },\
+ .ack_reg = SRI(reg2, block, reg_num),\
+ .ack_mask = \
+ block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK,\
+ .ack_value = \
+ block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK \
+
+
+
+#define hpd_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_HPD1 + reg_num] = {\
+ IRQ_REG_ENTRY(HPD, reg_num,\
+ DC_HPD_INT_CONTROL, DC_HPD_INT_EN,\
+ DC_HPD_INT_CONTROL, DC_HPD_INT_ACK),\
+ .status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num),\
+ .funcs = &hpd_irq_info_funcs\
+ }
+
+#define hpd_rx_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_HPD1RX + reg_num] = {\
+ IRQ_REG_ENTRY(HPD, reg_num,\
+ DC_HPD_INT_CONTROL, DC_HPD_RX_INT_EN,\
+ DC_HPD_INT_CONTROL, DC_HPD_RX_INT_ACK),\
+ .status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num),\
+ .funcs = &hpd_rx_irq_info_funcs\
+ }
+#define pflip_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_PFLIP1 + reg_num] = {\
+ IRQ_REG_ENTRY(HUBPREQ, reg_num,\
+ DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_INT_MASK,\
+ DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_CLEAR),\
+ .funcs = &pflip_irq_info_funcs\
+ }
+
+#define vupdate_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\
+ IRQ_REG_ENTRY(OTG, reg_num,\
+ OTG_GLOBAL_SYNC_STATUS, VUPDATE_INT_EN,\
+ OTG_GLOBAL_SYNC_STATUS, VUPDATE_EVENT_CLEAR),\
+ .funcs = &vblank_irq_info_funcs\
+ }
+
+#define vblank_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
+ IRQ_REG_ENTRY(OTG, reg_num,\
+ OTG_GLOBAL_SYNC_STATUS, VSTARTUP_INT_EN,\
+ OTG_GLOBAL_SYNC_STATUS, VSTARTUP_EVENT_CLEAR),\
+ .funcs = &vblank_irq_info_funcs\
+ }
+
+#define dummy_irq_entry() \
+ {\
+ .funcs = &dummy_irq_info_funcs\
+ }
+
+#define i2c_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_I2C_DDC ## reg_num] = dummy_irq_entry()
+
+#define dp_sink_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_DPSINK ## reg_num] = dummy_irq_entry()
+
+#define gpio_pad_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_GPIOPAD ## reg_num] = dummy_irq_entry()
+
+#define dc_underflow_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_DC ## reg_num ## UNDERFLOW] = dummy_irq_entry()
+
+static const struct irq_source_info_funcs dummy_irq_info_funcs = {
+ .set = dal_irq_service_dummy_set,
+ .ack = dal_irq_service_dummy_ack
+};
+
+static const struct irq_source_info
+irq_source_info_dcn21[DAL_IRQ_SOURCES_NUMBER] = {
+ [DC_IRQ_SOURCE_INVALID] = dummy_irq_entry(),
+ hpd_int_entry(0),
+ hpd_int_entry(1),
+ hpd_int_entry(2),
+ hpd_int_entry(3),
+ hpd_int_entry(4),
+ hpd_rx_int_entry(0),
+ hpd_rx_int_entry(1),
+ hpd_rx_int_entry(2),
+ hpd_rx_int_entry(3),
+ hpd_rx_int_entry(4),
+ i2c_int_entry(1),
+ i2c_int_entry(2),
+ i2c_int_entry(3),
+ i2c_int_entry(4),
+ i2c_int_entry(5),
+ i2c_int_entry(6),
+ dp_sink_int_entry(1),
+ dp_sink_int_entry(2),
+ dp_sink_int_entry(3),
+ dp_sink_int_entry(4),
+ dp_sink_int_entry(5),
+ dp_sink_int_entry(6),
+ [DC_IRQ_SOURCE_TIMER] = dummy_irq_entry(),
+ pflip_int_entry(0),
+ pflip_int_entry(1),
+ pflip_int_entry(2),
+ pflip_int_entry(3),
+ [DC_IRQ_SOURCE_PFLIP5] = dummy_irq_entry(),
+ [DC_IRQ_SOURCE_PFLIP6] = dummy_irq_entry(),
+ [DC_IRQ_SOURCE_PFLIP_UNDERLAY0] = dummy_irq_entry(),
+ gpio_pad_int_entry(0),
+ gpio_pad_int_entry(1),
+ gpio_pad_int_entry(2),
+ gpio_pad_int_entry(3),
+ gpio_pad_int_entry(4),
+ gpio_pad_int_entry(5),
+ gpio_pad_int_entry(6),
+ gpio_pad_int_entry(7),
+ gpio_pad_int_entry(8),
+ gpio_pad_int_entry(9),
+ gpio_pad_int_entry(10),
+ gpio_pad_int_entry(11),
+ gpio_pad_int_entry(12),
+ gpio_pad_int_entry(13),
+ gpio_pad_int_entry(14),
+ gpio_pad_int_entry(15),
+ gpio_pad_int_entry(16),
+ gpio_pad_int_entry(17),
+ gpio_pad_int_entry(18),
+ gpio_pad_int_entry(19),
+ gpio_pad_int_entry(20),
+ gpio_pad_int_entry(21),
+ gpio_pad_int_entry(22),
+ gpio_pad_int_entry(23),
+ gpio_pad_int_entry(24),
+ gpio_pad_int_entry(25),
+ gpio_pad_int_entry(26),
+ gpio_pad_int_entry(27),
+ gpio_pad_int_entry(28),
+ gpio_pad_int_entry(29),
+ gpio_pad_int_entry(30),
+ dc_underflow_int_entry(1),
+ dc_underflow_int_entry(2),
+ dc_underflow_int_entry(3),
+ dc_underflow_int_entry(4),
+ dc_underflow_int_entry(5),
+ dc_underflow_int_entry(6),
+ [DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(),
+ [DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(),
+ vupdate_int_entry(0),
+ vupdate_int_entry(1),
+ vupdate_int_entry(2),
+ vupdate_int_entry(3),
+ vupdate_int_entry(4),
+ vupdate_int_entry(5),
+ vblank_int_entry(0),
+ vblank_int_entry(1),
+ vblank_int_entry(2),
+ vblank_int_entry(3),
+ vblank_int_entry(4),
+ vblank_int_entry(5),
+};
+
+static const struct irq_service_funcs irq_service_funcs_dcn21 = {
+ .to_dal_irq_source = to_dal_irq_source_dcn21
+};
+
+static void dcn21_irq_construct(
+ struct irq_service *irq_service,
+ struct irq_service_init_data *init_data)
+{
+ dal_irq_service_construct(irq_service, init_data);
+
+ irq_service->info = irq_source_info_dcn21;
+ irq_service->funcs = &irq_service_funcs_dcn21;
+}
+
+struct irq_service *dal_irq_service_dcn21_create(
+ struct irq_service_init_data *init_data)
+{
+ struct irq_service *irq_service = kzalloc(sizeof(*irq_service),
+ GFP_KERNEL);
+
+ if (!irq_service)
+ return NULL;
+
+ dcn21_irq_construct(irq_service, init_data);
+ return irq_service;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.h b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.h
new file mode 100644
index 000000000000..da2bd0e93d7a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_IRQ_SERVICE_DCN21_H__
+#define __DAL_IRQ_SERVICE_DCN21_H__
+
+#include "../irq_service.h"
+
+struct irq_service *dal_irq_service_dcn21_create(
+ struct irq_service_init_data *init_data);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
index 0878550a8178..33053b9fe6bd 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
@@ -38,7 +38,7 @@
#include "dce120/irq_service_dce120.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
#include "dcn10/irq_service_dcn10.h"
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h
index 30ec80ac6fc8..c34eba19860a 100644
--- a/drivers/gpu/drm/amd/display/dc/os_types.h
+++ b/drivers/gpu/drm/amd/display/dc/os_types.h
@@ -1,5 +1,6 @@
/*
* Copyright 2012-16 Advanced Micro Devices, Inc.
+ * Copyright 2019 Raptor Engineering, LLC
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -29,6 +30,7 @@
#include <linux/kgdb.h>
#include <linux/kref.h>
#include <linux/types.h>
+#include <linux/slab.h>
#include <asm/byteorder.h>
@@ -48,8 +50,39 @@
#define dm_error(fmt, ...) DRM_ERROR(fmt, ##__VA_ARGS__)
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+#if defined(CONFIG_X86)
#include <asm/fpu/api.h>
+#define DC_FP_START() kernel_fpu_begin()
+#define DC_FP_END() kernel_fpu_end()
+#elif defined(CONFIG_PPC64)
+#include <asm/switch_to.h>
+#include <asm/cputable.h>
+#define DC_FP_START() { \
+ if (cpu_has_feature(CPU_FTR_VSX_COMP)) { \
+ preempt_disable(); \
+ enable_kernel_vsx(); \
+ } else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP)) { \
+ preempt_disable(); \
+ enable_kernel_altivec(); \
+ } else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE)) { \
+ preempt_disable(); \
+ enable_kernel_fp(); \
+ } \
+}
+#define DC_FP_END() { \
+ if (cpu_has_feature(CPU_FTR_VSX_COMP)) { \
+ disable_kernel_vsx(); \
+ preempt_enable(); \
+ } else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP)) { \
+ disable_kernel_altivec(); \
+ preempt_enable(); \
+ } else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE)) { \
+ disable_kernel_fp(); \
+ preempt_enable(); \
+ } \
+}
+#endif
#endif
/*
diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c
index c9a6dd878d9b..b8040da94b9d 100644
--- a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c
@@ -32,6 +32,7 @@ static void virtual_stream_encoder_dp_set_stream_attribute(
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing,
enum dc_color_space output_color_space,
+ bool use_vsc_sdp_for_colorimetry,
uint32_t enable_sdp_splitting) {}
static void virtual_stream_encoder_hdmi_set_stream_attribute(
@@ -77,22 +78,18 @@ static void virtual_audio_mute_control(
struct stream_encoder *enc,
bool mute) {}
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
+static void virtual_stream_encoder_reset_hdmi_stream_attribute(
+ struct stream_encoder *enc)
+{}
+
static void virtual_enc_dp_set_odm_combine(
struct stream_encoder *enc,
bool odm_combine)
{}
-#endif
-#endif
static const struct stream_encoder_funcs virtual_str_enc_funcs = {
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
.dp_set_odm_combine =
virtual_enc_dp_set_odm_combine,
-#endif
-#endif
.dp_set_stream_attribute =
virtual_stream_encoder_dp_set_stream_attribute,
.hdmi_set_stream_attribute =
@@ -116,6 +113,7 @@ static const struct stream_encoder_funcs virtual_str_enc_funcs = {
.audio_mute_control = virtual_audio_mute_control,
.set_avmute = virtual_stream_encoder_set_avmute,
+ .hdmi_reset_stream_attribute = virtual_stream_encoder_reset_hdmi_stream_attribute,
};
bool virtual_stream_encoder_construct(
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
new file mode 100644
index 000000000000..cd9532b4f14d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -0,0 +1,289 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DMUB_CMD_H_
+#define _DMUB_CMD_H_
+
+#include "dmub_types.h"
+#include "dmub_cmd_dal.h"
+#include "dmub_cmd_vbios.h"
+#include "atomfirmware.h"
+
+#define DMUB_RB_CMD_SIZE 64
+#define DMUB_RB_MAX_ENTRY 128
+#define DMUB_RB_SIZE (DMUB_RB_CMD_SIZE * DMUB_RB_MAX_ENTRY)
+#define REG_SET_MASK 0xFFFF
+
+
+/*
+ * Command IDs should be treated as stable ABI.
+ * Do not reuse or modify IDs.
+ */
+
+enum dmub_cmd_type {
+ DMUB_CMD__NULL = 0,
+ DMUB_CMD__REG_SEQ_READ_MODIFY_WRITE = 1,
+ DMUB_CMD__REG_SEQ_FIELD_UPDATE_SEQ = 2,
+ DMUB_CMD__REG_SEQ_BURST_WRITE = 3,
+ DMUB_CMD__REG_REG_WAIT = 4,
+ DMUB_CMD__PLAT_54186_WA = 5,
+ DMUB_CMD__PSR = 64,
+ DMUB_CMD__VBIOS = 128,
+};
+
+#pragma pack(push, 1)
+
+struct dmub_cmd_header {
+ unsigned int type : 8;
+ unsigned int sub_type : 8;
+ unsigned int reserved0 : 8;
+ unsigned int payload_bytes : 6; /* up to 60 bytes */
+ unsigned int reserved1 : 2;
+};
+
+/*
+ * Read modify write
+ *
+ * 60 payload bytes can hold up to 5 sets of read modify writes,
+ * each take 3 dwords.
+ *
+ * number of sequences = header.payload_bytes / sizeof(struct dmub_cmd_read_modify_write_sequence)
+ *
+ * modify_mask = 0xffff'ffff means all fields are going to be updated. in this case
+ * command parser will skip the read and we can use modify_mask = 0xffff'ffff as reg write
+ */
+struct dmub_cmd_read_modify_write_sequence {
+ uint32_t addr;
+ uint32_t modify_mask;
+ uint32_t modify_value;
+};
+
+#define DMUB_READ_MODIFY_WRITE_SEQ__MAX 5
+struct dmub_rb_cmd_read_modify_write {
+ struct dmub_cmd_header header; // type = DMUB_CMD__REG_SEQ_READ_MODIFY_WRITE
+ struct dmub_cmd_read_modify_write_sequence seq[DMUB_READ_MODIFY_WRITE_SEQ__MAX];
+};
+
+/*
+ * Update a register with specified masks and values sequeunce
+ *
+ * 60 payload bytes can hold address + up to 7 sets of mask/value combo, each take 2 dword
+ *
+ * number of field update sequence = (header.payload_bytes - sizeof(addr)) / sizeof(struct read_modify_write_sequence)
+ *
+ *
+ * USE CASE:
+ * 1. auto-increment register where additional read would update pointer and produce wrong result
+ * 2. toggle a bit without read in the middle
+ */
+
+struct dmub_cmd_reg_field_update_sequence {
+ uint32_t modify_mask; // 0xffff'ffff to skip initial read
+ uint32_t modify_value;
+};
+
+#define DMUB_REG_FIELD_UPDATE_SEQ__MAX 7
+
+struct dmub_rb_cmd_reg_field_update_sequence {
+ struct dmub_cmd_header header;
+ uint32_t addr;
+ struct dmub_cmd_reg_field_update_sequence seq[DMUB_REG_FIELD_UPDATE_SEQ__MAX];
+};
+
+
+/*
+ * Burst write
+ *
+ * support use case such as writing out LUTs.
+ *
+ * 60 payload bytes can hold up to 14 values to write to given address
+ *
+ * number of payload = header.payload_bytes / sizeof(struct read_modify_write_sequence)
+ */
+#define DMUB_BURST_WRITE_VALUES__MAX 14
+struct dmub_rb_cmd_burst_write {
+ struct dmub_cmd_header header; // type = DMUB_CMD__REG_SEQ_BURST_WRITE
+ uint32_t addr;
+ uint32_t write_values[DMUB_BURST_WRITE_VALUES__MAX];
+};
+
+
+struct dmub_rb_cmd_common {
+ struct dmub_cmd_header header;
+ uint8_t cmd_buffer[DMUB_RB_CMD_SIZE - sizeof(struct dmub_cmd_header)];
+};
+
+struct dmub_cmd_reg_wait_data {
+ uint32_t addr;
+ uint32_t mask;
+ uint32_t condition_field_value;
+ uint32_t time_out_us;
+};
+
+struct dmub_rb_cmd_reg_wait {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_reg_wait_data reg_wait;
+};
+
+#ifndef PHYSICAL_ADDRESS_LOC
+#define PHYSICAL_ADDRESS_LOC union large_integer
+#endif
+
+struct dmub_cmd_PLAT_54186_wa {
+ uint32_t DCSURF_SURFACE_CONTROL;
+ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH;
+ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS;
+ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C;
+ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_C;
+ struct {
+ uint8_t hubp_inst : 4;
+ uint8_t tmz_surface : 1;
+ uint8_t immediate :1;
+ uint8_t vmid : 4;
+ uint8_t grph_stereo : 1;
+ uint32_t reserved : 21;
+ } flip_params;
+ uint32_t reserved[9];
+};
+
+struct dmub_rb_cmd_PLAT_54186_wa {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_PLAT_54186_wa flip;
+};
+
+struct dmub_cmd_digx_encoder_control_data {
+ union dig_encoder_control_parameters_v1_5 dig;
+};
+
+struct dmub_rb_cmd_digx_encoder_control {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_digx_encoder_control_data encoder_control;
+};
+
+struct dmub_cmd_set_pixel_clock_data {
+ struct set_pixel_clock_parameter_v1_7 clk;
+};
+
+struct dmub_rb_cmd_set_pixel_clock {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_set_pixel_clock_data pixel_clock;
+};
+
+struct dmub_cmd_enable_disp_power_gating_data {
+ struct enable_disp_power_gating_parameters_v2_1 pwr;
+};
+
+struct dmub_rb_cmd_enable_disp_power_gating {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_enable_disp_power_gating_data power_gating;
+};
+
+struct dmub_cmd_dig1_transmitter_control_data {
+ struct dig_transmitter_control_parameters_v1_6 dig;
+};
+
+struct dmub_rb_cmd_dig1_transmitter_control {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_dig1_transmitter_control_data transmitter_control;
+};
+
+struct dmub_rb_cmd_dpphy_init {
+ struct dmub_cmd_header header;
+ uint8_t reserved[60];
+};
+
+struct dmub_cmd_psr_copy_settings_data {
+ uint16_t psr_level;
+ uint8_t hubp_inst;
+ uint8_t dpp_inst;
+ uint8_t mpcc_inst;
+ uint8_t opp_inst;
+ uint8_t otg_inst;
+ uint8_t digfe_inst;
+ uint8_t digbe_inst;
+ uint8_t dpphy_inst;
+ uint8_t aux_inst;
+ uint8_t hyst_frames;
+ uint8_t hyst_lines;
+ uint8_t phy_num;
+ uint8_t phy_type;
+ uint8_t aux_repeat;
+ uint8_t smu_optimizations_en;
+ uint8_t skip_wait_for_pll_lock;
+ uint8_t frame_delay;
+ uint8_t smu_phy_id;
+ uint8_t num_of_controllers;
+ uint8_t link_rate;
+ uint8_t frame_cap_ind;
+};
+
+struct dmub_rb_cmd_psr_copy_settings {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_psr_copy_settings_data psr_copy_settings_data;
+};
+
+struct dmub_cmd_psr_set_level_data {
+ uint16_t psr_level;
+};
+
+struct dmub_rb_cmd_psr_set_level {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_psr_set_level_data psr_set_level_data;
+};
+
+struct dmub_rb_cmd_psr_enable {
+ struct dmub_cmd_header header;
+};
+
+struct dmub_cmd_psr_setup_data {
+ enum psr_version version; // PSR version 1 or 2
+};
+
+struct dmub_rb_cmd_psr_setup {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_psr_setup_data psr_setup_data;
+};
+
+union dmub_rb_cmd {
+ struct dmub_rb_cmd_read_modify_write read_modify_write;
+ struct dmub_rb_cmd_reg_field_update_sequence reg_field_update_seq;
+ struct dmub_rb_cmd_burst_write burst_write;
+ struct dmub_rb_cmd_reg_wait reg_wait;
+ struct dmub_rb_cmd_common cmd_common;
+ struct dmub_rb_cmd_digx_encoder_control digx_encoder_control;
+ struct dmub_rb_cmd_set_pixel_clock set_pixel_clock;
+ struct dmub_rb_cmd_enable_disp_power_gating enable_disp_power_gating;
+ struct dmub_rb_cmd_dpphy_init dpphy_init;
+ struct dmub_rb_cmd_dig1_transmitter_control dig1_transmitter_control;
+ struct dmub_rb_cmd_psr_enable psr_enable;
+ struct dmub_rb_cmd_psr_copy_settings psr_copy_settings;
+ struct dmub_rb_cmd_psr_set_level psr_set_level;
+ struct dmub_rb_cmd_PLAT_54186_wa PLAT_54186_wa;
+ struct dmub_rb_cmd_psr_setup psr_setup;
+};
+
+#pragma pack(pop)
+
+#endif /* _DMUB_CMD_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h
new file mode 100644
index 000000000000..7b69eb37f762
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DMUB_CMD_DAL_H_
+#define _DMUB_CMD_DAL_H_
+
+/*
+ * Command IDs should be treated as stable ABI.
+ * Do not reuse or modify IDs.
+ */
+
+enum dmub_cmd_psr_type {
+ DMUB_CMD__PSR_SETUP = 0,
+ DMUB_CMD__PSR_COPY_SETTINGS = 1,
+ DMUB_CMD__PSR_ENABLE = 2,
+ DMUB_CMD__PSR_DISABLE = 3,
+ DMUB_CMD__PSR_SET_LEVEL = 4,
+};
+
+enum psr_version {
+ PSR_VERSION_1 = 0x10, // PSR Version 1
+ PSR_VERSION_2 = 0x20, // PSR Version 2, includes selective update
+ PSR_VERSION_2_Y_COORD = 0x21, // PSR Version 2, includes Y-coordinate support for SU
+};
+
+#endif /* _DMUB_CMD_DAL_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_vbios.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_vbios.h
new file mode 100644
index 000000000000..b6deb8e2590f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_vbios.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DMUB_CMD_VBIOS_H_
+#define _DMUB_CMD_VBIOS_H_
+
+/*
+ * Command IDs should be treated as stable ABI.
+ * Do not reuse or modify IDs.
+ */
+
+enum dmub_cmd_vbios_type {
+ DMUB_CMD__VBIOS_DIGX_ENCODER_CONTROL = 0,
+ DMUB_CMD__VBIOS_DIG1_TRANSMITTER_CONTROL = 1,
+ DMUB_CMD__VBIOS_SET_PIXEL_CLOCK = 2,
+ DMUB_CMD__VBIOS_ENABLE_DISP_POWER_GATING = 3,
+};
+
+#endif /* _DMUB_CMD_VBIOS_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_fw_meta.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_fw_meta.h
new file mode 100644
index 000000000000..242ec257998c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_fw_meta.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef _DMUB_META_H_
+#define _DMUB_META_H_
+
+#include "dmub_types.h"
+
+#pragma pack(push, 1)
+
+/* Magic value for identifying dmub_fw_meta_info */
+#define DMUB_FW_META_MAGIC 0x444D5542
+
+/* Offset from the end of the file to the dmub_fw_meta_info */
+#define DMUB_FW_META_OFFSET 0x24
+
+/**
+ * struct dmub_fw_meta_info - metadata associated with fw binary
+ *
+ * NOTE: This should be considered a stable API. Fields should
+ * not be repurposed or reordered. New fields should be
+ * added instead to extend the structure.
+ *
+ * @magic_value: magic value identifying DMUB firmware meta info
+ * @fw_region_size: size of the firmware state region
+ * @trace_buffer_size: size of the tracebuffer region
+ */
+struct dmub_fw_meta_info {
+ uint32_t magic_value;
+ uint32_t fw_region_size;
+ uint32_t trace_buffer_size;
+};
+
+/* Ensure that the structure remains 64 bytes. */
+union dmub_fw_meta {
+ struct dmub_fw_meta_info info;
+ uint8_t reserved[64];
+};
+
+#pragma pack(pop)
+
+#endif /* _DMUB_META_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_rb.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_rb.h
new file mode 100644
index 000000000000..df875fdd2ab0
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_rb.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DMUB_RB_H_
+#define _DMUB_RB_H_
+
+#include "dmub_types.h"
+#include "dmub_cmd.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+struct dmub_cmd_header;
+
+struct dmub_rb_init_params {
+ void *ctx;
+ void *base_address;
+ uint32_t capacity;
+};
+
+struct dmub_rb {
+ void *base_address;
+ uint32_t data_count;
+ uint32_t rptr;
+ uint32_t wrpt;
+ uint32_t capacity;
+
+ void *ctx;
+ void *dmub;
+};
+
+
+static inline bool dmub_rb_empty(struct dmub_rb *rb)
+{
+ return (rb->wrpt == rb->rptr);
+}
+
+static inline bool dmub_rb_full(struct dmub_rb *rb)
+{
+ uint32_t data_count;
+
+ if (rb->wrpt >= rb->rptr)
+ data_count = rb->wrpt - rb->rptr;
+ else
+ data_count = rb->capacity - (rb->rptr - rb->wrpt);
+
+ return (data_count == (rb->capacity - DMUB_RB_CMD_SIZE));
+}
+
+static inline bool dmub_rb_push_front(struct dmub_rb *rb,
+ const struct dmub_cmd_header *cmd)
+{
+ uint64_t volatile *dst = (uint64_t volatile *)(rb->base_address) + rb->wrpt / sizeof(uint64_t);
+ const uint64_t *src = (const uint64_t *)cmd;
+ int i;
+
+ if (dmub_rb_full(rb))
+ return false;
+
+ // copying data
+ for (i = 0; i < DMUB_RB_CMD_SIZE / sizeof(uint64_t); i++)
+ *dst++ = *src++;
+
+ rb->wrpt += DMUB_RB_CMD_SIZE;
+
+ if (rb->wrpt >= rb->capacity)
+ rb->wrpt %= rb->capacity;
+
+ return true;
+}
+
+static inline bool dmub_rb_front(struct dmub_rb *rb,
+ struct dmub_cmd_header *cmd)
+{
+ uint8_t *rd_ptr = (uint8_t *)rb->base_address + rb->rptr;
+
+ if (dmub_rb_empty(rb))
+ return false;
+
+ dmub_memcpy(cmd, rd_ptr, DMUB_RB_CMD_SIZE);
+
+ return true;
+}
+
+static inline bool dmub_rb_pop_front(struct dmub_rb *rb)
+{
+ if (dmub_rb_empty(rb))
+ return false;
+
+ rb->rptr += DMUB_RB_CMD_SIZE;
+
+ if (rb->rptr >= rb->capacity)
+ rb->rptr %= rb->capacity;
+
+ return true;
+}
+
+static inline void dmub_rb_flush_pending(const struct dmub_rb *rb)
+{
+ uint32_t rptr = rb->rptr;
+ uint32_t wptr = rb->wrpt;
+
+ while (rptr != wptr) {
+ uint64_t volatile *data = (uint64_t volatile *)rb->base_address + rptr / sizeof(uint64_t);
+ //uint64_t volatile *p = (uint64_t volatile *)data;
+ uint64_t temp;
+ int i;
+
+ for (i = 0; i < DMUB_RB_CMD_SIZE / sizeof(uint64_t); i++)
+ temp = *data++;
+
+ rptr += DMUB_RB_CMD_SIZE;
+ if (rptr >= rb->capacity)
+ rptr %= rb->capacity;
+ }
+}
+
+static inline void dmub_rb_init(struct dmub_rb *rb,
+ struct dmub_rb_init_params *init_params)
+{
+ rb->base_address = init_params->base_address;
+ rb->capacity = init_params->capacity;
+ rb->rptr = 0;
+ rb->wrpt = 0;
+}
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* _DMUB_RB_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h
new file mode 100644
index 000000000000..f8917594036a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h
@@ -0,0 +1,523 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DMUB_SRV_H_
+#define _DMUB_SRV_H_
+
+/**
+ * DOC: DMUB interface and operation
+ *
+ * DMUB is the interface to the display DMCUB microcontroller on DCN hardware.
+ * It delegates hardware initialization and command submission to the
+ * microcontroller. DMUB is the shortname for DMCUB.
+ *
+ * This interface is not thread-safe. Ensure that all access to the interface
+ * is properly synchronized by the caller.
+ *
+ * Initialization and usage of the DMUB service should be done in the
+ * steps given below:
+ *
+ * 1. dmub_srv_create()
+ * 2. dmub_srv_has_hw_support()
+ * 3. dmub_srv_calc_region_info()
+ * 4. dmub_srv_hw_init()
+ *
+ * The call to dmub_srv_create() is required to use the server.
+ *
+ * The calls to dmub_srv_has_hw_support() and dmub_srv_calc_region_info()
+ * are helpers to query cache window size and allocate framebuffer(s)
+ * for the cache windows.
+ *
+ * The call to dmub_srv_hw_init() programs the DMCUB registers to prepare
+ * for command submission. Commands can be queued via dmub_srv_cmd_queue()
+ * and executed via dmub_srv_cmd_execute().
+ *
+ * If the queue is full the dmub_srv_wait_for_idle() call can be used to
+ * wait until the queue has been cleared.
+ *
+ * Destroying the DMUB service can be done by calling dmub_srv_destroy().
+ * This does not clear DMUB hardware state, only software state.
+ *
+ * The interface is intended to be standalone and should not depend on any
+ * other component within DAL.
+ */
+
+#include "dmub_types.h"
+#include "dmub_cmd.h"
+#include "dmub_rb.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/* Forward declarations */
+struct dmub_srv;
+struct dmub_cmd_header;
+struct dmub_srv_common_regs;
+
+/* enum dmub_status - return code for dmcub functions */
+enum dmub_status {
+ DMUB_STATUS_OK = 0,
+ DMUB_STATUS_NO_CTX,
+ DMUB_STATUS_QUEUE_FULL,
+ DMUB_STATUS_TIMEOUT,
+ DMUB_STATUS_INVALID,
+};
+
+/* enum dmub_asic - dmub asic identifier */
+enum dmub_asic {
+ DMUB_ASIC_NONE = 0,
+ DMUB_ASIC_DCN20,
+ DMUB_ASIC_DCN21,
+ DMUB_ASIC_MAX,
+};
+
+/* enum dmub_window_id - dmub window identifier */
+enum dmub_window_id {
+ DMUB_WINDOW_0_INST_CONST = 0,
+ DMUB_WINDOW_1_STACK,
+ DMUB_WINDOW_2_BSS_DATA,
+ DMUB_WINDOW_3_VBIOS,
+ DMUB_WINDOW_4_MAILBOX,
+ DMUB_WINDOW_5_TRACEBUFF,
+ DMUB_WINDOW_6_FW_STATE,
+ DMUB_WINDOW_7_RESERVED,
+ DMUB_WINDOW_TOTAL,
+};
+
+/**
+ * struct dmub_region - dmub hw memory region
+ * @base: base address for region, must be 256 byte aligned
+ * @top: top address for region
+ */
+struct dmub_region {
+ uint32_t base;
+ uint32_t top;
+};
+
+/**
+ * struct dmub_window - dmub hw cache window
+ * @off: offset to the fb memory in gpu address space
+ * @r: region in uc address space for cache window
+ */
+struct dmub_window {
+ union dmub_addr offset;
+ struct dmub_region region;
+};
+
+/**
+ * struct dmub_fb - defines a dmub framebuffer memory region
+ * @cpu_addr: cpu virtual address for the region, NULL if invalid
+ * @gpu_addr: gpu virtual address for the region, NULL if invalid
+ * @size: size of the region in bytes, zero if invalid
+ */
+struct dmub_fb {
+ void *cpu_addr;
+ uint64_t gpu_addr;
+ uint32_t size;
+};
+
+/**
+ * struct dmub_srv_region_params - params used for calculating dmub regions
+ * @inst_const_size: size of the fw inst const section
+ * @bss_data_size: size of the fw bss data section
+ * @vbios_size: size of the vbios data
+ * @fw_bss_data: raw firmware bss data section
+ */
+struct dmub_srv_region_params {
+ uint32_t inst_const_size;
+ uint32_t bss_data_size;
+ uint32_t vbios_size;
+ const uint8_t *fw_bss_data;
+};
+
+/**
+ * struct dmub_srv_region_info - output region info from the dmub service
+ * @fb_size: required minimum fb size for all regions, aligned to 4096 bytes
+ * @num_regions: number of regions used by the dmub service
+ * @regions: region info
+ *
+ * The regions are aligned such that they can be all placed within the
+ * same framebuffer but they can also be placed into different framebuffers.
+ *
+ * The size of each region can be calculated by the caller:
+ * size = reg.top - reg.base
+ *
+ * Care must be taken when performing custom allocations to ensure that each
+ * region base address is 256 byte aligned.
+ */
+struct dmub_srv_region_info {
+ uint32_t fb_size;
+ uint8_t num_regions;
+ struct dmub_region regions[DMUB_WINDOW_TOTAL];
+};
+
+/**
+ * struct dmub_srv_fb_params - parameters used for driver fb setup
+ * @region_info: region info calculated by dmub service
+ * @cpu_addr: base cpu address for the framebuffer
+ * @gpu_addr: base gpu virtual address for the framebuffer
+ */
+struct dmub_srv_fb_params {
+ const struct dmub_srv_region_info *region_info;
+ void *cpu_addr;
+ uint64_t gpu_addr;
+};
+
+/**
+ * struct dmub_srv_fb_info - output fb info from the dmub service
+ * @num_fbs: number of required dmub framebuffers
+ * @fbs: fb data for each region
+ *
+ * Output from the dmub service helper that can be used by the
+ * driver to prepare dmub_fb that can be passed into the dmub
+ * hw init service.
+ *
+ * Assumes that all regions are within the same framebuffer
+ * and have been setup according to the region_info generated
+ * by the dmub service.
+ */
+struct dmub_srv_fb_info {
+ uint8_t num_fb;
+ struct dmub_fb fb[DMUB_WINDOW_TOTAL];
+};
+
+/**
+ * struct dmub_srv_base_funcs - Driver specific base callbacks
+ */
+struct dmub_srv_base_funcs {
+ /**
+ * @reg_read:
+ *
+ * Hook for reading a register.
+ *
+ * Return: The 32-bit register value from the given address.
+ */
+ uint32_t (*reg_read)(void *ctx, uint32_t address);
+
+ /**
+ * @reg_write:
+ *
+ * Hook for writing a value to the register specified by address.
+ */
+ void (*reg_write)(void *ctx, uint32_t address, uint32_t value);
+};
+
+/**
+ * struct dmub_srv_hw_funcs - hardware sequencer funcs for dmub
+ */
+struct dmub_srv_hw_funcs {
+ /* private: internal use only */
+
+ void (*init)(struct dmub_srv *dmub);
+
+ void (*reset)(struct dmub_srv *dmub);
+
+ void (*reset_release)(struct dmub_srv *dmub);
+
+ void (*backdoor_load)(struct dmub_srv *dmub,
+ const struct dmub_window *cw0,
+ const struct dmub_window *cw1);
+
+ void (*setup_windows)(struct dmub_srv *dmub,
+ const struct dmub_window *cw2,
+ const struct dmub_window *cw3,
+ const struct dmub_window *cw4,
+ const struct dmub_window *cw5,
+ const struct dmub_window *cw6);
+
+ void (*setup_mailbox)(struct dmub_srv *dmub,
+ const struct dmub_region *inbox1);
+
+ uint32_t (*get_inbox1_rptr)(struct dmub_srv *dmub);
+
+ void (*set_inbox1_wptr)(struct dmub_srv *dmub, uint32_t wptr_offset);
+
+ bool (*is_supported)(struct dmub_srv *dmub);
+
+ bool (*is_hw_init)(struct dmub_srv *dmub);
+
+ bool (*is_phy_init)(struct dmub_srv *dmub);
+
+ bool (*is_auto_load_done)(struct dmub_srv *dmub);
+};
+
+/**
+ * struct dmub_srv_create_params - params for dmub service creation
+ * @base_funcs: driver supplied base routines
+ * @hw_funcs: optional overrides for hw funcs
+ * @user_ctx: context data for callback funcs
+ * @asic: driver supplied asic
+ * @is_virtual: false for hw support only
+ */
+struct dmub_srv_create_params {
+ struct dmub_srv_base_funcs funcs;
+ struct dmub_srv_hw_funcs *hw_funcs;
+ void *user_ctx;
+ enum dmub_asic asic;
+ bool is_virtual;
+};
+
+/*
+ * struct dmub_srv_hw_params - params for dmub hardware initialization
+ * @fb: framebuffer info for each region
+ * @fb_base: base of the framebuffer aperture
+ * @fb_offset: offset of the framebuffer aperture
+ * @psp_version: psp version to pass for DMCU init
+ * @load_inst_const: true if DMUB should load inst const fw
+ */
+struct dmub_srv_hw_params {
+ struct dmub_fb *fb[DMUB_WINDOW_TOTAL];
+ uint64_t fb_base;
+ uint64_t fb_offset;
+ uint32_t psp_version;
+ bool load_inst_const;
+};
+
+/**
+ * struct dmub_srv - software state for dmcub
+ * @asic: dmub asic identifier
+ * @user_ctx: user provided context for the dmub_srv
+ * @is_virtual: false if hardware support only
+ * @fw_state: dmub firmware state pointer
+ */
+struct dmub_srv {
+ enum dmub_asic asic;
+ void *user_ctx;
+ bool is_virtual;
+ volatile const struct dmub_fw_state *fw_state;
+
+ /* private: internal use only */
+ const struct dmub_srv_common_regs *regs;
+
+ struct dmub_srv_base_funcs funcs;
+ struct dmub_srv_hw_funcs hw_funcs;
+ struct dmub_rb inbox1_rb;
+
+ bool sw_init;
+ bool hw_init;
+
+ uint64_t fb_base;
+ uint64_t fb_offset;
+ uint32_t psp_version;
+};
+
+/**
+ * dmub_srv_create() - creates the DMUB service.
+ * @dmub: the dmub service
+ * @params: creation parameters for the service
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_create(struct dmub_srv *dmub,
+ const struct dmub_srv_create_params *params);
+
+/**
+ * dmub_srv_destroy() - destroys the DMUB service.
+ * @dmub: the dmub service
+ */
+void dmub_srv_destroy(struct dmub_srv *dmub);
+
+/**
+ * dmub_srv_calc_region_info() - retreives region info from the dmub service
+ * @dmub: the dmub service
+ * @params: parameters used to calculate region locations
+ * @info_out: the output region info from dmub
+ *
+ * Calculates the base and top address for all relevant dmub regions
+ * using the parameters given (if any).
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status
+dmub_srv_calc_region_info(struct dmub_srv *dmub,
+ const struct dmub_srv_region_params *params,
+ struct dmub_srv_region_info *out);
+
+/**
+ * dmub_srv_calc_region_info() - retreives fb info from the dmub service
+ * @dmub: the dmub service
+ * @params: parameters used to calculate fb locations
+ * @info_out: the output fb info from dmub
+ *
+ * Calculates the base and top address for all relevant dmub regions
+ * using the parameters given (if any).
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub,
+ const struct dmub_srv_fb_params *params,
+ struct dmub_srv_fb_info *out);
+
+/**
+ * dmub_srv_has_hw_support() - returns hw support state for dmcub
+ * @dmub: the dmub service
+ * @is_supported: hw support state
+ *
+ * Queries the hardware for DMCUB support and returns the result.
+ *
+ * Can be called before dmub_srv_hw_init().
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_has_hw_support(struct dmub_srv *dmub,
+ bool *is_supported);
+
+/**
+ * dmub_srv_is_hw_init() - returns hardware init state
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_is_hw_init(struct dmub_srv *dmub, bool *is_hw_init);
+
+/**
+ * dmub_srv_hw_init() - initializes the underlying DMUB hardware
+ * @dmub: the dmub service
+ * @params: params for hardware initialization
+ *
+ * Resets the DMUB hardware and performs backdoor loading of the
+ * required cache regions based on the input framebuffer regions.
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_NO_CTX - dmcub context not initialized
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
+ const struct dmub_srv_hw_params *params);
+
+/**
+ * dmub_srv_hw_reset() - puts the DMUB hardware in reset state if initialized
+ * @dmub: the dmub service
+ *
+ * Before destroying the DMUB service or releasing the backing framebuffer
+ * memory we'll need to put the DMCUB into reset first.
+ *
+ * A subsequent call to dmub_srv_hw_init() will re-enable the DMCUB.
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub);
+
+/**
+ * dmub_srv_cmd_queue() - queues a command to the DMUB
+ * @dmub: the dmub service
+ * @cmd: the command to queue
+ *
+ * Queues a command to the DMUB service but does not begin execution
+ * immediately.
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_QUEUE_FULL - no remaining room in queue
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
+ const struct dmub_cmd_header *cmd);
+
+/**
+ * dmub_srv_cmd_execute() - Executes a queued sequence to the dmub
+ * @dmub: the dmub service
+ *
+ * Begins execution of queued commands on the dmub.
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub);
+
+/**
+ * dmub_srv_wait_for_auto_load() - Waits for firmware auto load to complete
+ * @dmub: the dmub service
+ * @timeout_us: the maximum number of microseconds to wait
+ *
+ * Waits until firmware has been autoloaded by the DMCUB. The maximum
+ * wait time is given in microseconds to prevent spinning forever.
+ *
+ * On ASICs without firmware autoload support this function will return
+ * immediately.
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_TIMEOUT - wait for phy init timed out
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_wait_for_auto_load(struct dmub_srv *dmub,
+ uint32_t timeout_us);
+
+/**
+ * dmub_srv_wait_for_phy_init() - Waits for DMUB PHY init to complete
+ * @dmub: the dmub service
+ * @timeout_us: the maximum number of microseconds to wait
+ *
+ * Waits until the PHY has been initialized by the DMUB. The maximum
+ * wait time is given in microseconds to prevent spinning forever.
+ *
+ * On ASICs without PHY init support this function will return
+ * immediately.
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_TIMEOUT - wait for phy init timed out
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_wait_for_phy_init(struct dmub_srv *dmub,
+ uint32_t timeout_us);
+
+/**
+ * dmub_srv_wait_for_idle() - Waits for the DMUB to be idle
+ * @dmub: the dmub service
+ * @timeout_us: the maximum number of microseconds to wait
+ *
+ * Waits until the DMUB buffer is empty and all commands have
+ * finished processing. The maximum wait time is given in
+ * microseconds to prevent spinning forever.
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_TIMEOUT - wait for buffer to flush timed out
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_wait_for_idle(struct dmub_srv *dmub,
+ uint32_t timeout_us);
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* _DMUB_SRV_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_trace_buffer.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_trace_buffer.h
new file mode 100644
index 000000000000..6b3ee42db350
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_trace_buffer.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef _DMUB_TRACE_BUFFER_H_
+#define _DMUB_TRACE_BUFFER_H_
+
+#include "dmub_types.h"
+
+#define LOAD_DMCU_FW 1
+#define LOAD_PHY_FW 2
+
+
+enum dmucb_trace_code {
+ DMCUB__UNKNOWN,
+ DMCUB__MAIN_BEGIN,
+ DMCUB__PHY_INIT_BEGIN,
+ DMCUB__PHY_FW_SRAM_LOAD_BEGIN,
+ DMCUB__PHY_FW_SRAM_LOAD_END,
+ DMCUB__PHY_INIT_POLL_DONE,
+ DMCUB__PHY_INIT_END,
+ DMCUB__DMCU_ERAM_LOAD_BEGIN,
+ DMCUB__DMCU_ERAM_LOAD_END,
+ DMCUB__DMCU_ISR_LOAD_BEGIN,
+ DMCUB__DMCU_ISR_LOAD_END,
+ DMCUB__MAIN_IDLE,
+ DMCUB__PERF_TRACE,
+ DMCUB__PG_DONE,
+};
+
+struct dmcub_trace_buf_entry {
+ enum dmucb_trace_code trace_code;
+ uint32_t tick_count;
+ uint32_t param0;
+ uint32_t param1;
+};
+
+#define TRACE_BUF_SIZE (1024) //1 kB
+#define PERF_TRACE_MAX_ENTRY ((TRACE_BUF_SIZE - 8)/sizeof(struct dmcub_trace_buf_entry))
+
+
+struct dmcub_trace_buf {
+ uint32_t entry_count;
+ uint32_t clk_freq;
+ struct dmcub_trace_buf_entry entries[PERF_TRACE_MAX_ENTRY];
+};
+
+
+#endif /* _DMUB_TRACE_BUFFER_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_types.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_types.h
new file mode 100644
index 000000000000..41d524b0db2f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_types.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DMUB_TYPES_H_
+#define _DMUB_TYPES_H_
+
+/* Basic type definitions. */
+#include <asm/byteorder.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <stdarg.h>
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#ifndef dmub_memcpy
+#define dmub_memcpy(dest, source, bytes) memcpy((dest), (source), (bytes))
+#endif
+
+#ifndef dmub_memset
+#define dmub_memset(dest, val, bytes) memset((dest), (val), (bytes))
+#endif
+
+#ifndef dmub_udelay
+#define dmub_udelay(microseconds) udelay(microseconds)
+#endif
+
+union dmub_addr {
+ struct {
+ uint32_t low_part;
+ uint32_t high_part;
+ } u;
+ uint64_t quad_part;
+};
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* _DMUB_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/src/Makefile b/drivers/gpu/drm/amd/display/dmub/src/Makefile
new file mode 100644
index 000000000000..e08dfeea24b0
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/src/Makefile
@@ -0,0 +1,27 @@
+#
+# Copyright 2019 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+
+DMUB = dmub_srv.o dmub_reg.o dmub_dcn20.o dmub_dcn21.o
+
+AMD_DAL_DMUB = $(addprefix $(AMDDALPATH)/dmub/src/,$(DMUB))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DMUB)
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
new file mode 100644
index 000000000000..b2ca8e0dbac9
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
@@ -0,0 +1,219 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "../inc/dmub_srv.h"
+#include "dmub_reg.h"
+#include "dmub_dcn20.h"
+
+#include "dcn/dcn_2_0_0_offset.h"
+#include "dcn/dcn_2_0_0_sh_mask.h"
+#include "soc15_hw_ip.h"
+#include "vega10_ip_offset.h"
+
+#define BASE_INNER(seg) DCN_BASE__INST0_SEG##seg
+#define CTX dmub
+#define REGS dmub->regs
+
+/* Registers. */
+
+const struct dmub_srv_common_regs dmub_srv_dcn20_regs = {
+#define DMUB_SR(reg) REG_OFFSET(reg),
+ { DMUB_COMMON_REGS() },
+#undef DMUB_SR
+
+#define DMUB_SF(reg, field) FD_MASK(reg, field),
+ { DMUB_COMMON_FIELDS() },
+#undef DMUB_SF
+
+#define DMUB_SF(reg, field) FD_SHIFT(reg, field),
+ { DMUB_COMMON_FIELDS() },
+#undef DMUB_SF
+};
+
+/* Shared functions. */
+
+static void dmub_dcn20_get_fb_base_offset(struct dmub_srv *dmub,
+ uint64_t *fb_base,
+ uint64_t *fb_offset)
+{
+ uint32_t tmp;
+
+ REG_GET(DCN_VM_FB_LOCATION_BASE, FB_BASE, &tmp);
+ *fb_base = (uint64_t)tmp << 24;
+
+ REG_GET(DCN_VM_FB_OFFSET, FB_OFFSET, &tmp);
+ *fb_offset = (uint64_t)tmp << 24;
+}
+
+static inline void dmub_dcn20_translate_addr(const union dmub_addr *addr_in,
+ uint64_t fb_base,
+ uint64_t fb_offset,
+ union dmub_addr *addr_out)
+{
+ addr_out->quad_part = addr_in->quad_part - fb_base + fb_offset;
+}
+
+void dmub_dcn20_reset(struct dmub_srv *dmub)
+{
+ REG_UPDATE(DMCUB_CNTL, DMCUB_SOFT_RESET, 1);
+ REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
+ REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1);
+ REG_WRITE(DMCUB_INBOX1_RPTR, 0);
+ REG_WRITE(DMCUB_INBOX1_WPTR, 0);
+}
+
+void dmub_dcn20_reset_release(struct dmub_srv *dmub)
+{
+ REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 0);
+ REG_WRITE(DMCUB_SCRATCH15, dmub->psp_version & 0x001100FF);
+ REG_UPDATE_2(DMCUB_CNTL, DMCUB_ENABLE, 1, DMCUB_TRACEPORT_EN, 1);
+ REG_UPDATE(DMCUB_CNTL, DMCUB_SOFT_RESET, 0);
+}
+
+void dmub_dcn20_backdoor_load(struct dmub_srv *dmub,
+ const struct dmub_window *cw0,
+ const struct dmub_window *cw1)
+{
+ union dmub_addr offset;
+ uint64_t fb_base, fb_offset;
+
+ dmub_dcn20_get_fb_base_offset(dmub, &fb_base, &fb_offset);
+
+ REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1);
+ REG_UPDATE_2(DMCUB_MEM_CNTL, DMCUB_MEM_READ_SPACE, 0x3,
+ DMCUB_MEM_WRITE_SPACE, 0x3);
+
+ dmub_dcn20_translate_addr(&cw0->offset, fb_base, fb_offset, &offset);
+
+ REG_WRITE(DMCUB_REGION3_CW0_OFFSET, offset.u.low_part);
+ REG_WRITE(DMCUB_REGION3_CW0_OFFSET_HIGH, offset.u.high_part);
+ REG_WRITE(DMCUB_REGION3_CW0_BASE_ADDRESS, cw0->region.base);
+ REG_SET_2(DMCUB_REGION3_CW0_TOP_ADDRESS, 0,
+ DMCUB_REGION3_CW0_TOP_ADDRESS, cw0->region.top,
+ DMCUB_REGION3_CW0_ENABLE, 1);
+
+ dmub_dcn20_translate_addr(&cw1->offset, fb_base, fb_offset, &offset);
+
+ REG_WRITE(DMCUB_REGION3_CW1_OFFSET, offset.u.low_part);
+ REG_WRITE(DMCUB_REGION3_CW1_OFFSET_HIGH, offset.u.high_part);
+ REG_WRITE(DMCUB_REGION3_CW1_BASE_ADDRESS, cw1->region.base);
+ REG_SET_2(DMCUB_REGION3_CW1_TOP_ADDRESS, 0,
+ DMCUB_REGION3_CW1_TOP_ADDRESS, cw1->region.top,
+ DMCUB_REGION3_CW1_ENABLE, 1);
+
+ REG_UPDATE_2(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 0, DMCUB_MEM_UNIT_ID,
+ 0x20);
+}
+
+void dmub_dcn20_setup_windows(struct dmub_srv *dmub,
+ const struct dmub_window *cw2,
+ const struct dmub_window *cw3,
+ const struct dmub_window *cw4,
+ const struct dmub_window *cw5,
+ const struct dmub_window *cw6)
+{
+ union dmub_addr offset;
+ uint64_t fb_base, fb_offset;
+
+ dmub_dcn20_get_fb_base_offset(dmub, &fb_base, &fb_offset);
+
+ dmub_dcn20_translate_addr(&cw2->offset, fb_base, fb_offset, &offset);
+
+ REG_WRITE(DMCUB_REGION3_CW2_OFFSET, offset.u.low_part);
+ REG_WRITE(DMCUB_REGION3_CW2_OFFSET_HIGH, offset.u.high_part);
+ REG_WRITE(DMCUB_REGION3_CW2_BASE_ADDRESS, cw2->region.base);
+ REG_SET_2(DMCUB_REGION3_CW2_TOP_ADDRESS, 0,
+ DMCUB_REGION3_CW2_TOP_ADDRESS, cw2->region.top,
+ DMCUB_REGION3_CW2_ENABLE, 1);
+
+ dmub_dcn20_translate_addr(&cw3->offset, fb_base, fb_offset, &offset);
+
+ REG_WRITE(DMCUB_REGION3_CW3_OFFSET, offset.u.low_part);
+ REG_WRITE(DMCUB_REGION3_CW3_OFFSET_HIGH, offset.u.high_part);
+ REG_WRITE(DMCUB_REGION3_CW3_BASE_ADDRESS, cw3->region.base);
+ REG_SET_2(DMCUB_REGION3_CW3_TOP_ADDRESS, 0,
+ DMCUB_REGION3_CW3_TOP_ADDRESS, cw3->region.top,
+ DMCUB_REGION3_CW3_ENABLE, 1);
+
+ /* TODO: Move this to CW4. */
+ dmub_dcn20_translate_addr(&cw4->offset, fb_base, fb_offset, &offset);
+
+ REG_WRITE(DMCUB_REGION4_OFFSET, offset.u.low_part);
+ REG_WRITE(DMCUB_REGION4_OFFSET_HIGH, offset.u.high_part);
+ REG_SET_2(DMCUB_REGION4_TOP_ADDRESS, 0, DMCUB_REGION4_TOP_ADDRESS,
+ cw4->region.top - cw4->region.base - 1, DMCUB_REGION4_ENABLE,
+ 1);
+
+ dmub_dcn20_translate_addr(&cw5->offset, fb_base, fb_offset, &offset);
+
+ REG_WRITE(DMCUB_REGION3_CW5_OFFSET, offset.u.low_part);
+ REG_WRITE(DMCUB_REGION3_CW5_OFFSET_HIGH, offset.u.high_part);
+ REG_WRITE(DMCUB_REGION3_CW5_BASE_ADDRESS, cw5->region.base);
+ REG_SET_2(DMCUB_REGION3_CW5_TOP_ADDRESS, 0,
+ DMCUB_REGION3_CW5_TOP_ADDRESS, cw5->region.top,
+ DMCUB_REGION3_CW5_ENABLE, 1);
+
+ dmub_dcn20_translate_addr(&cw6->offset, fb_base, fb_offset, &offset);
+
+ REG_WRITE(DMCUB_REGION3_CW6_OFFSET, offset.u.low_part);
+ REG_WRITE(DMCUB_REGION3_CW6_OFFSET_HIGH, offset.u.high_part);
+ REG_WRITE(DMCUB_REGION3_CW6_BASE_ADDRESS, cw6->region.base);
+ REG_SET_2(DMCUB_REGION3_CW6_TOP_ADDRESS, 0,
+ DMCUB_REGION3_CW6_TOP_ADDRESS, cw6->region.top,
+ DMCUB_REGION3_CW6_ENABLE, 1);
+}
+
+void dmub_dcn20_setup_mailbox(struct dmub_srv *dmub,
+ const struct dmub_region *inbox1)
+{
+ /* TODO: Use CW4 instead of region 4. */
+
+ REG_WRITE(DMCUB_INBOX1_BASE_ADDRESS, 0x80000000);
+ REG_WRITE(DMCUB_INBOX1_SIZE, inbox1->top - inbox1->base);
+}
+
+uint32_t dmub_dcn20_get_inbox1_rptr(struct dmub_srv *dmub)
+{
+ return REG_READ(DMCUB_INBOX1_RPTR);
+}
+
+void dmub_dcn20_set_inbox1_wptr(struct dmub_srv *dmub, uint32_t wptr_offset)
+{
+ REG_WRITE(DMCUB_INBOX1_WPTR, wptr_offset);
+}
+
+bool dmub_dcn20_is_hw_init(struct dmub_srv *dmub)
+{
+ return REG_READ(DMCUB_REGION3_CW2_BASE_ADDRESS) != 0;
+}
+
+bool dmub_dcn20_is_supported(struct dmub_srv *dmub)
+{
+ uint32_t supported = 0;
+
+ REG_GET(CC_DC_PIPE_DIS, DC_DMCUB_ENABLE, &supported);
+
+ return supported;
+}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
new file mode 100644
index 000000000000..04b0fa13153d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
@@ -0,0 +1,186 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DMUB_DCN20_H_
+#define _DMUB_DCN20_H_
+
+#include "../inc/dmub_types.h"
+
+struct dmub_srv;
+
+/* DCN20 register definitions. */
+
+#define DMUB_COMMON_REGS() \
+ DMUB_SR(DMCUB_CNTL) \
+ DMUB_SR(DMCUB_MEM_CNTL) \
+ DMUB_SR(DMCUB_SEC_CNTL) \
+ DMUB_SR(DMCUB_INBOX1_BASE_ADDRESS) \
+ DMUB_SR(DMCUB_INBOX1_SIZE) \
+ DMUB_SR(DMCUB_INBOX1_RPTR) \
+ DMUB_SR(DMCUB_INBOX1_WPTR) \
+ DMUB_SR(DMCUB_REGION3_CW0_OFFSET) \
+ DMUB_SR(DMCUB_REGION3_CW1_OFFSET) \
+ DMUB_SR(DMCUB_REGION3_CW2_OFFSET) \
+ DMUB_SR(DMCUB_REGION3_CW3_OFFSET) \
+ DMUB_SR(DMCUB_REGION3_CW4_OFFSET) \
+ DMUB_SR(DMCUB_REGION3_CW5_OFFSET) \
+ DMUB_SR(DMCUB_REGION3_CW6_OFFSET) \
+ DMUB_SR(DMCUB_REGION3_CW7_OFFSET) \
+ DMUB_SR(DMCUB_REGION3_CW0_OFFSET_HIGH) \
+ DMUB_SR(DMCUB_REGION3_CW1_OFFSET_HIGH) \
+ DMUB_SR(DMCUB_REGION3_CW2_OFFSET_HIGH) \
+ DMUB_SR(DMCUB_REGION3_CW3_OFFSET_HIGH) \
+ DMUB_SR(DMCUB_REGION3_CW4_OFFSET_HIGH) \
+ DMUB_SR(DMCUB_REGION3_CW5_OFFSET_HIGH) \
+ DMUB_SR(DMCUB_REGION3_CW6_OFFSET_HIGH) \
+ DMUB_SR(DMCUB_REGION3_CW7_OFFSET_HIGH) \
+ DMUB_SR(DMCUB_REGION3_CW0_BASE_ADDRESS) \
+ DMUB_SR(DMCUB_REGION3_CW1_BASE_ADDRESS) \
+ DMUB_SR(DMCUB_REGION3_CW2_BASE_ADDRESS) \
+ DMUB_SR(DMCUB_REGION3_CW3_BASE_ADDRESS) \
+ DMUB_SR(DMCUB_REGION3_CW4_BASE_ADDRESS) \
+ DMUB_SR(DMCUB_REGION3_CW5_BASE_ADDRESS) \
+ DMUB_SR(DMCUB_REGION3_CW6_BASE_ADDRESS) \
+ DMUB_SR(DMCUB_REGION3_CW7_BASE_ADDRESS) \
+ DMUB_SR(DMCUB_REGION3_CW0_TOP_ADDRESS) \
+ DMUB_SR(DMCUB_REGION3_CW1_TOP_ADDRESS) \
+ DMUB_SR(DMCUB_REGION3_CW2_TOP_ADDRESS) \
+ DMUB_SR(DMCUB_REGION3_CW3_TOP_ADDRESS) \
+ DMUB_SR(DMCUB_REGION3_CW4_TOP_ADDRESS) \
+ DMUB_SR(DMCUB_REGION3_CW5_TOP_ADDRESS) \
+ DMUB_SR(DMCUB_REGION3_CW6_TOP_ADDRESS) \
+ DMUB_SR(DMCUB_REGION3_CW7_TOP_ADDRESS) \
+ DMUB_SR(DMCUB_REGION4_OFFSET) \
+ DMUB_SR(DMCUB_REGION4_OFFSET_HIGH) \
+ DMUB_SR(DMCUB_REGION4_TOP_ADDRESS) \
+ DMUB_SR(DMCUB_SCRATCH0) \
+ DMUB_SR(DMCUB_SCRATCH1) \
+ DMUB_SR(DMCUB_SCRATCH2) \
+ DMUB_SR(DMCUB_SCRATCH3) \
+ DMUB_SR(DMCUB_SCRATCH4) \
+ DMUB_SR(DMCUB_SCRATCH5) \
+ DMUB_SR(DMCUB_SCRATCH6) \
+ DMUB_SR(DMCUB_SCRATCH7) \
+ DMUB_SR(DMCUB_SCRATCH8) \
+ DMUB_SR(DMCUB_SCRATCH9) \
+ DMUB_SR(DMCUB_SCRATCH10) \
+ DMUB_SR(DMCUB_SCRATCH11) \
+ DMUB_SR(DMCUB_SCRATCH12) \
+ DMUB_SR(DMCUB_SCRATCH13) \
+ DMUB_SR(DMCUB_SCRATCH14) \
+ DMUB_SR(DMCUB_SCRATCH15) \
+ DMUB_SR(CC_DC_PIPE_DIS) \
+ DMUB_SR(MMHUBBUB_SOFT_RESET) \
+ DMUB_SR(DCN_VM_FB_LOCATION_BASE) \
+ DMUB_SR(DCN_VM_FB_OFFSET)
+
+#define DMUB_COMMON_FIELDS() \
+ DMUB_SF(DMCUB_CNTL, DMCUB_ENABLE) \
+ DMUB_SF(DMCUB_CNTL, DMCUB_SOFT_RESET) \
+ DMUB_SF(DMCUB_CNTL, DMCUB_TRACEPORT_EN) \
+ DMUB_SF(DMCUB_MEM_CNTL, DMCUB_MEM_READ_SPACE) \
+ DMUB_SF(DMCUB_MEM_CNTL, DMCUB_MEM_WRITE_SPACE) \
+ DMUB_SF(DMCUB_SEC_CNTL, DMCUB_SEC_RESET) \
+ DMUB_SF(DMCUB_SEC_CNTL, DMCUB_MEM_UNIT_ID) \
+ DMUB_SF(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_TOP_ADDRESS) \
+ DMUB_SF(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_ENABLE) \
+ DMUB_SF(DMCUB_REGION3_CW1_TOP_ADDRESS, DMCUB_REGION3_CW1_TOP_ADDRESS) \
+ DMUB_SF(DMCUB_REGION3_CW1_TOP_ADDRESS, DMCUB_REGION3_CW1_ENABLE) \
+ DMUB_SF(DMCUB_REGION3_CW2_TOP_ADDRESS, DMCUB_REGION3_CW2_TOP_ADDRESS) \
+ DMUB_SF(DMCUB_REGION3_CW2_TOP_ADDRESS, DMCUB_REGION3_CW2_ENABLE) \
+ DMUB_SF(DMCUB_REGION3_CW3_TOP_ADDRESS, DMCUB_REGION3_CW3_TOP_ADDRESS) \
+ DMUB_SF(DMCUB_REGION3_CW3_TOP_ADDRESS, DMCUB_REGION3_CW3_ENABLE) \
+ DMUB_SF(DMCUB_REGION3_CW4_TOP_ADDRESS, DMCUB_REGION3_CW4_TOP_ADDRESS) \
+ DMUB_SF(DMCUB_REGION3_CW4_TOP_ADDRESS, DMCUB_REGION3_CW4_ENABLE) \
+ DMUB_SF(DMCUB_REGION3_CW5_TOP_ADDRESS, DMCUB_REGION3_CW5_TOP_ADDRESS) \
+ DMUB_SF(DMCUB_REGION3_CW5_TOP_ADDRESS, DMCUB_REGION3_CW5_ENABLE) \
+ DMUB_SF(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_TOP_ADDRESS) \
+ DMUB_SF(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE) \
+ DMUB_SF(DMCUB_REGION3_CW7_TOP_ADDRESS, DMCUB_REGION3_CW7_TOP_ADDRESS) \
+ DMUB_SF(DMCUB_REGION3_CW7_TOP_ADDRESS, DMCUB_REGION3_CW7_ENABLE) \
+ DMUB_SF(DMCUB_REGION4_TOP_ADDRESS, DMCUB_REGION4_TOP_ADDRESS) \
+ DMUB_SF(DMCUB_REGION4_TOP_ADDRESS, DMCUB_REGION4_ENABLE) \
+ DMUB_SF(CC_DC_PIPE_DIS, DC_DMCUB_ENABLE) \
+ DMUB_SF(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET) \
+ DMUB_SF(DCN_VM_FB_LOCATION_BASE, FB_BASE) \
+ DMUB_SF(DCN_VM_FB_OFFSET, FB_OFFSET)
+
+struct dmub_srv_common_reg_offset {
+#define DMUB_SR(reg) uint32_t reg;
+ DMUB_COMMON_REGS()
+#undef DMUB_SR
+};
+
+struct dmub_srv_common_reg_shift {
+#define DMUB_SF(reg, field) uint8_t reg##__##field;
+ DMUB_COMMON_FIELDS()
+#undef DMUB_SF
+};
+
+struct dmub_srv_common_reg_mask {
+#define DMUB_SF(reg, field) uint32_t reg##__##field;
+ DMUB_COMMON_FIELDS()
+#undef DMUB_SF
+};
+
+struct dmub_srv_common_regs {
+ const struct dmub_srv_common_reg_offset offset;
+ const struct dmub_srv_common_reg_mask mask;
+ const struct dmub_srv_common_reg_shift shift;
+};
+
+extern const struct dmub_srv_common_regs dmub_srv_dcn20_regs;
+
+/* Hardware functions. */
+
+void dmub_dcn20_init(struct dmub_srv *dmub);
+
+void dmub_dcn20_reset(struct dmub_srv *dmub);
+
+void dmub_dcn20_reset_release(struct dmub_srv *dmub);
+
+void dmub_dcn20_backdoor_load(struct dmub_srv *dmub,
+ const struct dmub_window *cw0,
+ const struct dmub_window *cw1);
+
+void dmub_dcn20_setup_windows(struct dmub_srv *dmub,
+ const struct dmub_window *cw2,
+ const struct dmub_window *cw3,
+ const struct dmub_window *cw4,
+ const struct dmub_window *cw5,
+ const struct dmub_window *cw6);
+
+void dmub_dcn20_setup_mailbox(struct dmub_srv *dmub,
+ const struct dmub_region *inbox1);
+
+uint32_t dmub_dcn20_get_inbox1_rptr(struct dmub_srv *dmub);
+
+void dmub_dcn20_set_inbox1_wptr(struct dmub_srv *dmub, uint32_t wptr_offset);
+
+bool dmub_dcn20_is_hw_init(struct dmub_srv *dmub);
+
+bool dmub_dcn20_is_supported(struct dmub_srv *dmub);
+
+#endif /* _DMUB_DCN20_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c
new file mode 100644
index 000000000000..5bed9fcd6b5c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "../inc/dmub_srv.h"
+#include "dmub_reg.h"
+#include "dmub_dcn21.h"
+
+#include "dcn/dcn_2_1_0_offset.h"
+#include "dcn/dcn_2_1_0_sh_mask.h"
+#include "renoir_ip_offset.h"
+
+#define BASE_INNER(seg) DMU_BASE__INST0_SEG##seg
+#define CTX dmub
+#define REGS dmub->regs
+
+/* Registers. */
+
+const struct dmub_srv_common_regs dmub_srv_dcn21_regs = {
+#define DMUB_SR(reg) REG_OFFSET(reg),
+ { DMUB_COMMON_REGS() },
+#undef DMUB_SR
+
+#define DMUB_SF(reg, field) FD_MASK(reg, field),
+ { DMUB_COMMON_FIELDS() },
+#undef DMUB_SF
+
+#define DMUB_SF(reg, field) FD_SHIFT(reg, field),
+ { DMUB_COMMON_FIELDS() },
+#undef DMUB_SF
+};
+
+/* Shared functions. */
+
+bool dmub_dcn21_is_auto_load_done(struct dmub_srv *dmub)
+{
+ return (REG_READ(DMCUB_SCRATCH0) == 3);
+}
+
+bool dmub_dcn21_is_phy_init(struct dmub_srv *dmub)
+{
+ return REG_READ(DMCUB_SCRATCH10) == 0;
+}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h
new file mode 100644
index 000000000000..2bbea237137b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DMUB_DCN21_H_
+#define _DMUB_DCN21_H_
+
+#include "dmub_dcn20.h"
+
+/* Registers. */
+
+extern const struct dmub_srv_common_regs dmub_srv_dcn21_regs;
+
+/* Hardware functions. */
+
+bool dmub_dcn21_is_auto_load_done(struct dmub_srv *dmub);
+
+bool dmub_dcn21_is_phy_init(struct dmub_srv *dmub);
+
+#endif /* _DMUB_DCN21_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.c
new file mode 100644
index 000000000000..4094eca212f0
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.c
@@ -0,0 +1,109 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dmub_reg.h"
+#include "../inc/dmub_srv.h"
+
+struct dmub_reg_value_masks {
+ uint32_t value;
+ uint32_t mask;
+};
+
+static inline void
+set_reg_field_value_masks(struct dmub_reg_value_masks *field_value_mask,
+ uint32_t value, uint32_t mask, uint8_t shift)
+{
+ field_value_mask->value =
+ (field_value_mask->value & ~mask) | (mask & (value << shift));
+ field_value_mask->mask = field_value_mask->mask | mask;
+}
+
+static void set_reg_field_values(struct dmub_reg_value_masks *field_value_mask,
+ uint32_t addr, int n, uint8_t shift1,
+ uint32_t mask1, uint32_t field_value1,
+ va_list ap)
+{
+ uint32_t shift, mask, field_value;
+ int i = 1;
+
+ /* gather all bits value/mask getting updated in this register */
+ set_reg_field_value_masks(field_value_mask, field_value1, mask1,
+ shift1);
+
+ while (i < n) {
+ shift = va_arg(ap, uint32_t);
+ mask = va_arg(ap, uint32_t);
+ field_value = va_arg(ap, uint32_t);
+
+ set_reg_field_value_masks(field_value_mask, field_value, mask,
+ shift);
+ i++;
+ }
+}
+
+static inline uint32_t get_reg_field_value_ex(uint32_t reg_value, uint32_t mask,
+ uint8_t shift)
+{
+ return (mask & reg_value) >> shift;
+}
+
+void dmub_reg_update(struct dmub_srv *srv, uint32_t addr, int n, uint8_t shift1,
+ uint32_t mask1, uint32_t field_value1, ...)
+{
+ struct dmub_reg_value_masks field_value_mask = { 0 };
+ uint32_t reg_val;
+ va_list ap;
+
+ va_start(ap, field_value1);
+ set_reg_field_values(&field_value_mask, addr, n, shift1, mask1,
+ field_value1, ap);
+ va_end(ap);
+
+ reg_val = srv->funcs.reg_read(srv->user_ctx, addr);
+ reg_val = (reg_val & ~field_value_mask.mask) | field_value_mask.value;
+ srv->funcs.reg_write(srv->user_ctx, addr, reg_val);
+}
+
+void dmub_reg_set(struct dmub_srv *srv, uint32_t addr, uint32_t reg_val, int n,
+ uint8_t shift1, uint32_t mask1, uint32_t field_value1, ...)
+{
+ struct dmub_reg_value_masks field_value_mask = { 0 };
+ va_list ap;
+
+ va_start(ap, field_value1);
+ set_reg_field_values(&field_value_mask, addr, n, shift1, mask1,
+ field_value1, ap);
+ va_end(ap);
+
+ reg_val = (reg_val & ~field_value_mask.mask) | field_value_mask.value;
+ srv->funcs.reg_write(srv->user_ctx, addr, reg_val);
+}
+
+void dmub_reg_get(struct dmub_srv *srv, uint32_t addr, uint8_t shift,
+ uint32_t mask, uint32_t *field_value)
+{
+ uint32_t reg_val = srv->funcs.reg_read(srv->user_ctx, addr);
+ *field_value = get_reg_field_value_ex(reg_val, mask, shift);
+}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.h
new file mode 100644
index 000000000000..c1f4030929a4
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DMUB_REG_H_
+#define _DMUB_REG_H_
+
+#include "../inc/dmub_types.h"
+
+struct dmub_srv;
+
+/* Register offset and field lookup. */
+
+#define BASE(seg) BASE_INNER(seg)
+
+#define REG_OFFSET(reg_name) (BASE(mm##reg_name##_BASE_IDX) + mm##reg_name)
+
+#define FD_SHIFT(reg_name, field) reg_name##__##field##__SHIFT
+
+#define FD_MASK(reg_name, field) reg_name##__##field##_MASK
+
+#define REG(reg) (REGS)->offset.reg
+
+#define FD(reg_field) (REGS)->shift.reg_field, (REGS)->mask.reg_field
+
+#define FN(reg_name, field) FD(reg_name##__##field)
+
+/* Register reads and writes. */
+
+#define REG_READ(reg) ((CTX)->funcs.reg_read((CTX)->user_ctx, REG(reg)))
+
+#define REG_WRITE(reg, val) \
+ ((CTX)->funcs.reg_write((CTX)->user_ctx, REG(reg), (val)))
+
+/* Register field setting. */
+
+#define REG_SET_N(reg_name, n, initial_val, ...) \
+ dmub_reg_set(CTX, REG(reg_name), initial_val, n, __VA_ARGS__)
+
+#define REG_SET(reg_name, initial_val, field, val) \
+ REG_SET_N(reg_name, 1, initial_val, \
+ FN(reg_name, field), val)
+
+#define REG_SET_2(reg, init_value, f1, v1, f2, v2) \
+ REG_SET_N(reg, 2, init_value, \
+ FN(reg, f1), v1, \
+ FN(reg, f2), v2)
+
+#define REG_SET_3(reg, init_value, f1, v1, f2, v2, f3, v3) \
+ REG_SET_N(reg, 3, init_value, \
+ FN(reg, f1), v1, \
+ FN(reg, f2), v2, \
+ FN(reg, f3), v3)
+
+#define REG_SET_4(reg, init_value, f1, v1, f2, v2, f3, v3, f4, v4) \
+ REG_SET_N(reg, 4, init_value, \
+ FN(reg, f1), v1, \
+ FN(reg, f2), v2, \
+ FN(reg, f3), v3, \
+ FN(reg, f4), v4)
+
+/* Register field updating. */
+
+#define REG_UPDATE_N(reg_name, n, ...)\
+ dmub_reg_update(CTX, REG(reg_name), n, __VA_ARGS__)
+
+#define REG_UPDATE(reg_name, field, val) \
+ REG_UPDATE_N(reg_name, 1, \
+ FN(reg_name, field), val)
+
+#define REG_UPDATE_2(reg, f1, v1, f2, v2) \
+ REG_UPDATE_N(reg, 2,\
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2)
+
+#define REG_UPDATE_3(reg, f1, v1, f2, v2, f3, v3) \
+ REG_UPDATE_N(reg, 3, \
+ FN(reg, f1), v1, \
+ FN(reg, f2), v2, \
+ FN(reg, f3), v3)
+
+#define REG_UPDATE_4(reg, f1, v1, f2, v2, f3, v3, f4, v4) \
+ REG_UPDATE_N(reg, 4, \
+ FN(reg, f1), v1, \
+ FN(reg, f2), v2, \
+ FN(reg, f3), v3, \
+ FN(reg, f4), v4)
+
+/* Register field getting. */
+
+#define REG_GET(reg_name, field, val) \
+ dmub_reg_get(CTX, REG(reg_name), FN(reg_name, field), val)
+
+void dmub_reg_set(struct dmub_srv *srv, uint32_t addr, uint32_t reg_val, int n,
+ uint8_t shift1, uint32_t mask1, uint32_t field_value1, ...);
+
+void dmub_reg_update(struct dmub_srv *srv, uint32_t addr, int n, uint8_t shift1,
+ uint32_t mask1, uint32_t field_value1, ...);
+
+void dmub_reg_get(struct dmub_srv *srv, uint32_t addr, uint8_t shift,
+ uint32_t mask, uint32_t *field_value);
+
+#endif /* _DMUB_REG_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
new file mode 100644
index 000000000000..85a518bf8a76
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -0,0 +1,524 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "../inc/dmub_srv.h"
+#include "dmub_dcn20.h"
+#include "dmub_dcn21.h"
+#include "dmub_fw_meta.h"
+#include "os_types.h"
+/*
+ * Note: the DMUB service is standalone. No additional headers should be
+ * added below or above this line unless they reside within the DMUB
+ * folder.
+ */
+
+/* Alignment for framebuffer memory. */
+#define DMUB_FB_ALIGNMENT (1024 * 1024)
+
+/* Stack size. */
+#define DMUB_STACK_SIZE (128 * 1024)
+
+/* Context size. */
+#define DMUB_CONTEXT_SIZE (512 * 1024)
+
+/* Mailbox size */
+#define DMUB_MAILBOX_SIZE (DMUB_RB_SIZE)
+
+/* Default state size if meta is absent. */
+#define DMUB_FW_STATE_SIZE (1024)
+
+/* Default tracebuffer size if meta is absent. */
+#define DMUB_TRACE_BUFFER_SIZE (1024)
+
+/* Number of windows in use. */
+#define DMUB_NUM_WINDOWS (DMUB_WINDOW_6_FW_STATE + 1)
+/* Base addresses. */
+
+#define DMUB_CW0_BASE (0x60000000)
+#define DMUB_CW1_BASE (0x61000000)
+#define DMUB_CW3_BASE (0x63000000)
+#define DMUB_CW5_BASE (0x65000000)
+#define DMUB_CW6_BASE (0x66000000)
+
+static inline uint32_t dmub_align(uint32_t val, uint32_t factor)
+{
+ return (val + factor - 1) / factor * factor;
+}
+
+static void dmub_flush_buffer_mem(const struct dmub_fb *fb)
+{
+ const uint8_t *base = (const uint8_t *)fb->cpu_addr;
+ uint8_t buf[64];
+ uint32_t pos, end;
+
+ /**
+ * Read 64-byte chunks since we don't want to store a
+ * large temporary buffer for this purpose.
+ */
+ end = fb->size / sizeof(buf) * sizeof(buf);
+
+ for (pos = 0; pos < end; pos += sizeof(buf))
+ dmub_memcpy(buf, base + pos, sizeof(buf));
+
+ /* Read anything leftover into the buffer. */
+ if (end < fb->size)
+ dmub_memcpy(buf, base + pos, fb->size - end);
+}
+
+static const struct dmub_fw_meta_info *
+dmub_get_fw_meta_info(const uint8_t *fw_bss_data, uint32_t fw_bss_data_size)
+{
+ const union dmub_fw_meta *meta;
+
+ if (fw_bss_data == NULL)
+ return NULL;
+
+ if (fw_bss_data_size < sizeof(union dmub_fw_meta) + DMUB_FW_META_OFFSET)
+ return NULL;
+
+ meta = (const union dmub_fw_meta *)(fw_bss_data + fw_bss_data_size -
+ DMUB_FW_META_OFFSET -
+ sizeof(union dmub_fw_meta));
+
+ if (meta->info.magic_value != DMUB_FW_META_MAGIC)
+ return NULL;
+
+ return &meta->info;
+}
+
+static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
+{
+ struct dmub_srv_hw_funcs *funcs = &dmub->hw_funcs;
+
+ switch (asic) {
+ case DMUB_ASIC_DCN20:
+ case DMUB_ASIC_DCN21:
+ dmub->regs = &dmub_srv_dcn20_regs;
+
+ funcs->reset = dmub_dcn20_reset;
+ funcs->reset_release = dmub_dcn20_reset_release;
+ funcs->backdoor_load = dmub_dcn20_backdoor_load;
+ funcs->setup_windows = dmub_dcn20_setup_windows;
+ funcs->setup_mailbox = dmub_dcn20_setup_mailbox;
+ funcs->get_inbox1_rptr = dmub_dcn20_get_inbox1_rptr;
+ funcs->set_inbox1_wptr = dmub_dcn20_set_inbox1_wptr;
+ funcs->is_supported = dmub_dcn20_is_supported;
+ funcs->is_hw_init = dmub_dcn20_is_hw_init;
+
+ if (asic == DMUB_ASIC_DCN21) {
+ dmub->regs = &dmub_srv_dcn21_regs;
+
+ funcs->is_auto_load_done = dmub_dcn21_is_auto_load_done;
+ funcs->is_phy_init = dmub_dcn21_is_phy_init;
+ }
+ break;
+
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+enum dmub_status dmub_srv_create(struct dmub_srv *dmub,
+ const struct dmub_srv_create_params *params)
+{
+ enum dmub_status status = DMUB_STATUS_OK;
+
+ dmub_memset(dmub, 0, sizeof(*dmub));
+
+ dmub->funcs = params->funcs;
+ dmub->user_ctx = params->user_ctx;
+ dmub->asic = params->asic;
+ dmub->is_virtual = params->is_virtual;
+
+ /* Setup asic dependent hardware funcs. */
+ if (!dmub_srv_hw_setup(dmub, params->asic)) {
+ status = DMUB_STATUS_INVALID;
+ goto cleanup;
+ }
+
+ /* Override (some) hardware funcs based on user params. */
+ if (params->hw_funcs) {
+ if (params->hw_funcs->get_inbox1_rptr)
+ dmub->hw_funcs.get_inbox1_rptr =
+ params->hw_funcs->get_inbox1_rptr;
+
+ if (params->hw_funcs->set_inbox1_wptr)
+ dmub->hw_funcs.set_inbox1_wptr =
+ params->hw_funcs->set_inbox1_wptr;
+
+ if (params->hw_funcs->is_supported)
+ dmub->hw_funcs.is_supported =
+ params->hw_funcs->is_supported;
+ }
+
+ /* Sanity checks for required hw func pointers. */
+ if (!dmub->hw_funcs.get_inbox1_rptr ||
+ !dmub->hw_funcs.set_inbox1_wptr) {
+ status = DMUB_STATUS_INVALID;
+ goto cleanup;
+ }
+
+cleanup:
+ if (status == DMUB_STATUS_OK)
+ dmub->sw_init = true;
+ else
+ dmub_srv_destroy(dmub);
+
+ return status;
+}
+
+void dmub_srv_destroy(struct dmub_srv *dmub)
+{
+ dmub_memset(dmub, 0, sizeof(*dmub));
+}
+
+enum dmub_status
+dmub_srv_calc_region_info(struct dmub_srv *dmub,
+ const struct dmub_srv_region_params *params,
+ struct dmub_srv_region_info *out)
+{
+ struct dmub_region *inst = &out->regions[DMUB_WINDOW_0_INST_CONST];
+ struct dmub_region *stack = &out->regions[DMUB_WINDOW_1_STACK];
+ struct dmub_region *data = &out->regions[DMUB_WINDOW_2_BSS_DATA];
+ struct dmub_region *bios = &out->regions[DMUB_WINDOW_3_VBIOS];
+ struct dmub_region *mail = &out->regions[DMUB_WINDOW_4_MAILBOX];
+ struct dmub_region *trace_buff = &out->regions[DMUB_WINDOW_5_TRACEBUFF];
+ struct dmub_region *fw_state = &out->regions[DMUB_WINDOW_6_FW_STATE];
+ const struct dmub_fw_meta_info *fw_info;
+ uint32_t fw_state_size = DMUB_FW_STATE_SIZE;
+ uint32_t trace_buffer_size = DMUB_TRACE_BUFFER_SIZE;
+
+ if (!dmub->sw_init)
+ return DMUB_STATUS_INVALID;
+
+ memset(out, 0, sizeof(*out));
+
+ out->num_regions = DMUB_NUM_WINDOWS;
+
+ inst->base = 0x0;
+ inst->top = inst->base + params->inst_const_size;
+
+ data->base = dmub_align(inst->top, 256);
+ data->top = data->base + params->bss_data_size;
+
+ /*
+ * All cache windows below should be aligned to the size
+ * of the DMCUB cache line, 64 bytes.
+ */
+
+ stack->base = dmub_align(data->top, 256);
+ stack->top = stack->base + DMUB_STACK_SIZE + DMUB_CONTEXT_SIZE;
+
+ bios->base = dmub_align(stack->top, 256);
+ bios->top = bios->base + params->vbios_size;
+
+ mail->base = dmub_align(bios->top, 256);
+ mail->top = mail->base + DMUB_MAILBOX_SIZE;
+
+ fw_info = dmub_get_fw_meta_info(params->fw_bss_data,
+ params->bss_data_size);
+
+ if (fw_info) {
+ fw_state_size = fw_info->fw_region_size;
+ trace_buffer_size = fw_info->trace_buffer_size;
+ }
+
+ trace_buff->base = dmub_align(mail->top, 256);
+ trace_buff->top = trace_buff->base + dmub_align(trace_buffer_size, 64);
+
+ fw_state->base = dmub_align(trace_buff->top, 256);
+ fw_state->top = fw_state->base + dmub_align(fw_state_size, 64);
+
+ out->fb_size = dmub_align(fw_state->top, 4096);
+
+ return DMUB_STATUS_OK;
+}
+
+enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub,
+ const struct dmub_srv_fb_params *params,
+ struct dmub_srv_fb_info *out)
+{
+ uint8_t *cpu_base;
+ uint64_t gpu_base;
+ uint32_t i;
+
+ if (!dmub->sw_init)
+ return DMUB_STATUS_INVALID;
+
+ memset(out, 0, sizeof(*out));
+
+ if (params->region_info->num_regions != DMUB_NUM_WINDOWS)
+ return DMUB_STATUS_INVALID;
+
+ cpu_base = (uint8_t *)params->cpu_addr;
+ gpu_base = params->gpu_addr;
+
+ for (i = 0; i < DMUB_NUM_WINDOWS; ++i) {
+ const struct dmub_region *reg =
+ &params->region_info->regions[i];
+
+ out->fb[i].cpu_addr = cpu_base + reg->base;
+ out->fb[i].gpu_addr = gpu_base + reg->base;
+ out->fb[i].size = reg->top - reg->base;
+ }
+
+ out->num_fb = DMUB_NUM_WINDOWS;
+
+ return DMUB_STATUS_OK;
+}
+
+enum dmub_status dmub_srv_has_hw_support(struct dmub_srv *dmub,
+ bool *is_supported)
+{
+ *is_supported = false;
+
+ if (!dmub->sw_init)
+ return DMUB_STATUS_INVALID;
+
+ if (dmub->hw_funcs.is_supported)
+ *is_supported = dmub->hw_funcs.is_supported(dmub);
+
+ return DMUB_STATUS_OK;
+}
+
+enum dmub_status dmub_srv_is_hw_init(struct dmub_srv *dmub, bool *is_hw_init)
+{
+ *is_hw_init = false;
+
+ if (!dmub->sw_init)
+ return DMUB_STATUS_INVALID;
+
+ if (!dmub->hw_init)
+ return DMUB_STATUS_OK;
+
+ if (dmub->hw_funcs.is_hw_init)
+ *is_hw_init = dmub->hw_funcs.is_hw_init(dmub);
+
+ return DMUB_STATUS_OK;
+}
+
+enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
+ const struct dmub_srv_hw_params *params)
+{
+ struct dmub_fb *inst_fb = params->fb[DMUB_WINDOW_0_INST_CONST];
+ struct dmub_fb *stack_fb = params->fb[DMUB_WINDOW_1_STACK];
+ struct dmub_fb *data_fb = params->fb[DMUB_WINDOW_2_BSS_DATA];
+ struct dmub_fb *bios_fb = params->fb[DMUB_WINDOW_3_VBIOS];
+ struct dmub_fb *mail_fb = params->fb[DMUB_WINDOW_4_MAILBOX];
+ struct dmub_fb *tracebuff_fb = params->fb[DMUB_WINDOW_5_TRACEBUFF];
+ struct dmub_fb *fw_state_fb = params->fb[DMUB_WINDOW_6_FW_STATE];
+
+ struct dmub_rb_init_params rb_params;
+ struct dmub_window cw0, cw1, cw2, cw3, cw4, cw5, cw6;
+ struct dmub_region inbox1;
+
+ if (!dmub->sw_init)
+ return DMUB_STATUS_INVALID;
+
+ dmub->fb_base = params->fb_base;
+ dmub->fb_offset = params->fb_offset;
+ dmub->psp_version = params->psp_version;
+
+ if (inst_fb && data_fb) {
+ cw0.offset.quad_part = inst_fb->gpu_addr;
+ cw0.region.base = DMUB_CW0_BASE;
+ cw0.region.top = cw0.region.base + inst_fb->size - 1;
+
+ cw1.offset.quad_part = stack_fb->gpu_addr;
+ cw1.region.base = DMUB_CW1_BASE;
+ cw1.region.top = cw1.region.base + stack_fb->size - 1;
+
+ /**
+ * Read back all the instruction memory so we don't hang the
+ * DMCUB when backdoor loading if the write from x86 hasn't been
+ * flushed yet. This only occurs in backdoor loading.
+ */
+ dmub_flush_buffer_mem(inst_fb);
+
+ if (params->load_inst_const && dmub->hw_funcs.backdoor_load)
+ dmub->hw_funcs.backdoor_load(dmub, &cw0, &cw1);
+ }
+
+ if (dmub->hw_funcs.reset)
+ dmub->hw_funcs.reset(dmub);
+
+ if (inst_fb && data_fb && bios_fb && mail_fb && tracebuff_fb &&
+ fw_state_fb) {
+ cw2.offset.quad_part = data_fb->gpu_addr;
+ cw2.region.base = DMUB_CW0_BASE + inst_fb->size;
+ cw2.region.top = cw2.region.base + data_fb->size;
+
+ cw3.offset.quad_part = bios_fb->gpu_addr;
+ cw3.region.base = DMUB_CW3_BASE;
+ cw3.region.top = cw3.region.base + bios_fb->size;
+
+ cw4.offset.quad_part = mail_fb->gpu_addr;
+ cw4.region.base = cw3.region.top + 1;
+ cw4.region.top = cw4.region.base + mail_fb->size;
+
+ inbox1.base = cw4.region.base;
+ inbox1.top = cw4.region.top;
+
+ cw5.offset.quad_part = tracebuff_fb->gpu_addr;
+ cw5.region.base = DMUB_CW5_BASE;
+ cw5.region.top = cw5.region.base + tracebuff_fb->size;
+
+ cw6.offset.quad_part = fw_state_fb->gpu_addr;
+ cw6.region.base = DMUB_CW6_BASE;
+ cw6.region.top = cw6.region.base + fw_state_fb->size;
+
+ dmub->fw_state = fw_state_fb->cpu_addr;
+
+ if (dmub->hw_funcs.setup_windows)
+ dmub->hw_funcs.setup_windows(dmub, &cw2, &cw3, &cw4,
+ &cw5, &cw6);
+
+ if (dmub->hw_funcs.setup_mailbox)
+ dmub->hw_funcs.setup_mailbox(dmub, &inbox1);
+ }
+
+ if (mail_fb) {
+ dmub_memset(&rb_params, 0, sizeof(rb_params));
+ rb_params.ctx = dmub;
+ rb_params.base_address = mail_fb->cpu_addr;
+ rb_params.capacity = DMUB_RB_SIZE;
+
+ dmub_rb_init(&dmub->inbox1_rb, &rb_params);
+ }
+
+ if (dmub->hw_funcs.reset_release)
+ dmub->hw_funcs.reset_release(dmub);
+
+ dmub->hw_init = true;
+
+ return DMUB_STATUS_OK;
+}
+
+enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub)
+{
+ if (!dmub->sw_init)
+ return DMUB_STATUS_INVALID;
+
+ if (dmub->hw_init == false)
+ return DMUB_STATUS_OK;
+
+ if (dmub->hw_funcs.reset)
+ dmub->hw_funcs.reset(dmub);
+
+ dmub->hw_init = false;
+
+ return DMUB_STATUS_OK;
+}
+
+enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
+ const struct dmub_cmd_header *cmd)
+{
+ if (!dmub->hw_init)
+ return DMUB_STATUS_INVALID;
+
+ if (dmub_rb_push_front(&dmub->inbox1_rb, cmd))
+ return DMUB_STATUS_OK;
+
+ return DMUB_STATUS_QUEUE_FULL;
+}
+
+enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub)
+{
+ if (!dmub->hw_init)
+ return DMUB_STATUS_INVALID;
+
+ /**
+ * Read back all the queued commands to ensure that they've
+ * been flushed to framebuffer memory. Otherwise DMCUB might
+ * read back stale, fully invalid or partially invalid data.
+ */
+ dmub_rb_flush_pending(&dmub->inbox1_rb);
+
+ dmub->hw_funcs.set_inbox1_wptr(dmub, dmub->inbox1_rb.wrpt);
+ return DMUB_STATUS_OK;
+}
+
+enum dmub_status dmub_srv_wait_for_auto_load(struct dmub_srv *dmub,
+ uint32_t timeout_us)
+{
+ uint32_t i;
+
+ if (!dmub->hw_init)
+ return DMUB_STATUS_INVALID;
+
+ if (!dmub->hw_funcs.is_auto_load_done)
+ return DMUB_STATUS_OK;
+
+ for (i = 0; i <= timeout_us; i += 100) {
+ if (dmub->hw_funcs.is_auto_load_done(dmub))
+ return DMUB_STATUS_OK;
+
+ udelay(100);
+ }
+
+ return DMUB_STATUS_TIMEOUT;
+}
+
+enum dmub_status dmub_srv_wait_for_phy_init(struct dmub_srv *dmub,
+ uint32_t timeout_us)
+{
+ uint32_t i = 0;
+
+ if (!dmub->hw_init)
+ return DMUB_STATUS_INVALID;
+
+ if (!dmub->hw_funcs.is_phy_init)
+ return DMUB_STATUS_OK;
+
+ for (i = 0; i <= timeout_us; i += 10) {
+ if (dmub->hw_funcs.is_phy_init(dmub))
+ return DMUB_STATUS_OK;
+
+ udelay(10);
+ }
+
+ return DMUB_STATUS_TIMEOUT;
+}
+
+enum dmub_status dmub_srv_wait_for_idle(struct dmub_srv *dmub,
+ uint32_t timeout_us)
+{
+ uint32_t i;
+
+ if (!dmub->hw_init)
+ return DMUB_STATUS_INVALID;
+
+ for (i = 0; i <= timeout_us; ++i) {
+ dmub->inbox1_rb.rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
+ if (dmub_rb_empty(&dmub->inbox1_rb))
+ return DMUB_STATUS_OK;
+
+ udelay(1);
+ }
+
+ return DMUB_STATUS_TIMEOUT;
+}
diff --git a/drivers/gpu/drm/amd/display/include/audio_types.h b/drivers/gpu/drm/amd/display/include/audio_types.h
index 6364fbc24cfe..66a54da0641c 100644
--- a/drivers/gpu/drm/amd/display/include/audio_types.h
+++ b/drivers/gpu/drm/amd/display/include/audio_types.h
@@ -38,8 +38,8 @@ struct audio_crtc_info {
uint32_t h_active;
uint32_t v_active;
uint32_t pixel_repetition;
- uint32_t requested_pixel_clock; /* in KHz */
- uint32_t calculated_pixel_clock; /* in KHz */
+ uint32_t requested_pixel_clock_100Hz; /* in 100Hz */
+ uint32_t calculated_pixel_clock_100Hz; /* in 100Hz */
uint32_t refresh_rate;
enum dc_color_depth color_depth;
bool interlaced;
diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
index 887e6a8597c4..a2903985b9e8 100644
--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
@@ -134,24 +134,50 @@
#define PICASSO_A0 0x41
/* DCN1_01 */
#define RAVEN2_A0 0x81
+#define RAVEN2_15D8_REV_94 0x94
+#define RAVEN2_15D8_REV_95 0x95
+#define RAVEN2_15D8_REV_E3 0xE3
+#define RAVEN2_15D8_REV_E4 0xE4
+#define RAVEN2_15D8_REV_E9 0xE9
+#define RAVEN2_15D8_REV_EA 0xEA
+#define RAVEN2_15D8_REV_EB 0xEB
#define RAVEN1_F0 0xF0
#define RAVEN_UNKNOWN 0xFF
-
+#ifndef ASICREV_IS_RAVEN
#define ASICREV_IS_RAVEN(eChipRev) ((eChipRev >= RAVEN_A0) && eChipRev < RAVEN_UNKNOWN)
-#define ASICREV_IS_PICASSO(eChipRev) ((eChipRev >= PICASSO_A0) && (eChipRev < RAVEN2_A0))
-#define ASICREV_IS_RAVEN2(eChipRev) ((eChipRev >= RAVEN2_A0) && (eChipRev < 0xF0))
-
+#endif
+#define ASICREV_IS_PICASSO(eChipRev) ((eChipRev >= PICASSO_A0) && (eChipRev < RAVEN2_A0))
+#ifndef ASICREV_IS_RAVEN2
+#define ASICREV_IS_RAVEN2(eChipRev) ((eChipRev >= RAVEN2_A0) && (eChipRev < RAVEN1_F0))
+#endif
#define ASICREV_IS_RV1_F0(eChipRev) ((eChipRev >= RAVEN1_F0) && (eChipRev < RAVEN_UNKNOWN))
-
+#define ASICREV_IS_DALI(eChipRev) ((eChipRev == RAVEN2_15D8_REV_E3) \
+ || (eChipRev == RAVEN2_15D8_REV_E4))
+#define ASICREV_IS_POLLOCK(eChipRev) (eChipRev == RAVEN2_15D8_REV_94 \
+ || eChipRev == RAVEN2_15D8_REV_95 \
+ || eChipRev == RAVEN2_15D8_REV_E9 \
+ || eChipRev == RAVEN2_15D8_REV_EA \
+ || eChipRev == RAVEN2_15D8_REV_EB)
#define FAMILY_RV 142 /* DCN 1*/
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#define FAMILY_NV 143 /* DCN 2*/
-#endif
+enum {
+ NV_NAVI10_P_A0 = 1,
+ NV_NAVI12_P_A0 = 10,
+ NV_NAVI14_M_A0 = 20,
+ NV_UNKNOWN = 0xFF
+};
+
+#define ASICREV_IS_NAVI10_P(eChipRev) (eChipRev < NV_NAVI12_P_A0)
+#define ASICREV_IS_NAVI12_P(eChipRev) ((eChipRev >= NV_NAVI12_P_A0) && (eChipRev < NV_NAVI14_M_A0))
+#define ASICREV_IS_NAVI14_M(eChipRev) ((eChipRev >= NV_NAVI14_M_A0) && (eChipRev < NV_UNKNOWN))
+#define RENOIR_A0 0x91
+#define DEVICE_ID_RENOIR_1636 0x1636 // Renoir
+#define ASICREV_IS_RENOIR(eChipRev) ((eChipRev >= RENOIR_A0) && (eChipRev < 0xFF))
/*
* ASIC chip ID
diff --git a/drivers/gpu/drm/amd/display/include/dal_types.h b/drivers/gpu/drm/amd/display/include/dal_types.h
index 1e3ce4d847ae..0b6859189ca7 100644
--- a/drivers/gpu/drm/amd/display/include/dal_types.h
+++ b/drivers/gpu/drm/amd/display/include/dal_types.h
@@ -46,9 +46,8 @@ enum dce_version {
DCE_VERSION_MAX,
DCN_VERSION_1_0,
DCN_VERSION_1_01,
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
DCN_VERSION_2_0,
-#endif
+ DCN_VERSION_2_1,
DCN_VERSION_MAX
};
diff --git a/drivers/gpu/drm/amd/display/include/ddc_service_types.h b/drivers/gpu/drm/amd/display/include/ddc_service_types.h
index d968956a10cd..9ad49da50a17 100644
--- a/drivers/gpu/drm/amd/display/include/ddc_service_types.h
+++ b/drivers/gpu/drm/amd/display/include/ddc_service_types.h
@@ -25,10 +25,14 @@
#ifndef __DAL_DDC_SERVICE_TYPES_H__
#define __DAL_DDC_SERVICE_TYPES_H__
-#define DP_BRANCH_DEVICE_ID_1 0x0010FA
-#define DP_BRANCH_DEVICE_ID_2 0x0022B9
-#define DP_BRANCH_DEVICE_ID_3 0x00001A
-#define DP_BRANCH_DEVICE_ID_4 0x0080e1
+/* 0010FA dongles (ST Micro) external converter chip id */
+#define DP_BRANCH_DEVICE_ID_0010FA 0x0010FA
+/* 0022B9 external converter chip id */
+#define DP_BRANCH_DEVICE_ID_0022B9 0x0022B9
+#define DP_BRANCH_DEVICE_ID_00001A 0x00001A
+#define DP_BRANCH_DEVICE_ID_0080E1 0x0080e1
+#define DP_BRANCH_DEVICE_ID_90CC24 0x90CC24
+#define DP_BRANCH_DEVICE_ID_00E04C 0x00E04C
enum ddc_result {
DDC_RESULT_UNKNOWN = 0,
diff --git a/drivers/gpu/drm/amd/display/include/gpio_interface.h b/drivers/gpu/drm/amd/display/include/gpio_interface.h
index 7de64195dc33..5e888a093c16 100644
--- a/drivers/gpu/drm/amd/display/include/gpio_interface.h
+++ b/drivers/gpu/drm/amd/display/include/gpio_interface.h
@@ -93,8 +93,17 @@ enum sync_source dal_gpio_get_sync_source(
enum gpio_pin_output_state dal_gpio_get_output_state(
const struct gpio *gpio);
+struct hw_ddc *dal_gpio_get_ddc(struct gpio *gpio);
+
+struct hw_hpd *dal_gpio_get_hpd(struct gpio *gpio);
+
+struct hw_generic *dal_gpio_get_generic(struct gpio *gpio);
+
/* Close the handle */
void dal_gpio_close(
struct gpio *gpio);
+
+
+
#endif
diff --git a/drivers/gpu/drm/amd/display/include/gpio_service_interface.h b/drivers/gpu/drm/amd/display/include/gpio_service_interface.h
index f40259bade40..9c55d247227e 100644
--- a/drivers/gpu/drm/amd/display/include/gpio_service_interface.h
+++ b/drivers/gpu/drm/amd/display/include/gpio_service_interface.h
@@ -51,13 +51,29 @@ struct gpio *dal_gpio_service_create_irq(
uint32_t offset,
uint32_t mask);
+struct gpio *dal_gpio_service_create_generic_mux(
+ struct gpio_service *service,
+ uint32_t offset,
+ uint32_t mask);
+
+void dal_gpio_destroy_generic_mux(
+ struct gpio **mux);
+
+enum gpio_result dal_mux_setup_config(
+ struct gpio *mux,
+ struct gpio_generic_mux_config *config);
+
+struct gpio_pin_info dal_gpio_get_generic_pin_info(
+ struct gpio_service *service,
+ enum gpio_id id,
+ uint32_t en);
+
struct ddc *dal_gpio_create_ddc(
struct gpio_service *service,
uint32_t offset,
uint32_t mask,
struct gpio_ddc_hw_info *info);
-
void dal_gpio_destroy_ddc(
struct ddc **ddc);
diff --git a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
index f312834fef50..d51de94e4bc3 100644
--- a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
+++ b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
@@ -178,7 +178,8 @@ struct dc_firmware_info {
uint32_t default_engine_clk; /* in KHz */
uint32_t dp_phy_ref_clk; /* in KHz - DCE12 only */
uint32_t i2c_engine_ref_clk; /* in KHz - DCE12 only */
-
+ bool oem_i2c_present;
+ uint8_t oem_i2c_obj_id;
};
diff --git a/drivers/gpu/drm/amd/display/include/hdcp_types.h b/drivers/gpu/drm/amd/display/include/hdcp_types.h
new file mode 100644
index 000000000000..f31e6befc8d6
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/hdcp_types.h
@@ -0,0 +1,96 @@
+/*
+* Copyright 2019 Advanced Micro Devices, Inc.
+*
+* Permission is hereby granted, free of charge, to any person obtaining a
+* copy of this software and associated documentation files (the "Software"),
+* to deal in the Software without restriction, including without limitation
+* the rights to use, copy, modify, merge, publish, distribute, sublicense,
+* and/or sell copies of the Software, and to permit persons to whom the
+* Software is furnished to do so, subject to the following conditions:
+*
+* The above copyright notice and this permission notice shall be included in
+* all copies or substantial portions of the Software.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+* OTHER DEALINGS IN THE SOFTWARE.
+*
+* Authors: AMD
+*
+*/
+
+#ifndef __DC_HDCP_TYPES_H__
+#define __DC_HDCP_TYPES_H__
+
+enum hdcp_message_id {
+ HDCP_MESSAGE_ID_INVALID = -1,
+
+ /* HDCP 1.4 */
+
+ HDCP_MESSAGE_ID_READ_BKSV = 0,
+ /* HDMI is called Ri', DP is called R0' */
+ HDCP_MESSAGE_ID_READ_RI_R0,
+ HDCP_MESSAGE_ID_READ_PJ,
+ HDCP_MESSAGE_ID_WRITE_AKSV,
+ HDCP_MESSAGE_ID_WRITE_AINFO,
+ HDCP_MESSAGE_ID_WRITE_AN,
+ HDCP_MESSAGE_ID_READ_VH_X,
+ HDCP_MESSAGE_ID_READ_VH_0,
+ HDCP_MESSAGE_ID_READ_VH_1,
+ HDCP_MESSAGE_ID_READ_VH_2,
+ HDCP_MESSAGE_ID_READ_VH_3,
+ HDCP_MESSAGE_ID_READ_VH_4,
+ HDCP_MESSAGE_ID_READ_BCAPS,
+ HDCP_MESSAGE_ID_READ_BSTATUS,
+ HDCP_MESSAGE_ID_READ_KSV_FIFO,
+ HDCP_MESSAGE_ID_READ_BINFO,
+
+ /* HDCP 2.2 */
+
+ HDCP_MESSAGE_ID_HDCP2VERSION,
+ HDCP_MESSAGE_ID_RX_CAPS,
+ HDCP_MESSAGE_ID_WRITE_AKE_INIT,
+ HDCP_MESSAGE_ID_READ_AKE_SEND_CERT,
+ HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM,
+ HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM,
+ HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME,
+ HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO,
+ HDCP_MESSAGE_ID_WRITE_LC_INIT,
+ HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME,
+ HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS,
+ HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST,
+ HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK,
+ HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE,
+ HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY,
+ HDCP_MESSAGE_ID_READ_RXSTATUS,
+ HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE,
+
+ HDCP_MESSAGE_ID_MAX
+};
+
+enum hdcp_version {
+ HDCP_Unknown = 0,
+ HDCP_VERSION_14,
+ HDCP_VERSION_22,
+};
+
+enum hdcp_link {
+ HDCP_LINK_PRIMARY,
+ HDCP_LINK_SECONDARY
+};
+
+struct hdcp_protection_message {
+ enum hdcp_version version;
+ /* relevant only for DVI */
+ enum hdcp_link link;
+ enum hdcp_message_id msg_id;
+ uint32_t length;
+ uint8_t max_retries;
+ uint8_t *data;
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/include/i2caux_interface.h b/drivers/gpu/drm/amd/display/include/i2caux_interface.h
index bb012cb1a9f5..c7fbb9c3ad6b 100644
--- a/drivers/gpu/drm/amd/display/include/i2caux_interface.h
+++ b/drivers/gpu/drm/amd/display/include/i2caux_interface.h
@@ -42,7 +42,7 @@ struct aux_payload {
bool write;
bool mot;
uint32_t address;
- uint8_t length;
+ uint32_t length;
uint8_t *data;
/*
* used to return the reply type of the transaction
diff --git a/drivers/gpu/drm/amd/display/include/link_service_types.h b/drivers/gpu/drm/amd/display/include/link_service_types.h
index 80f0d93cfd94..4869d4562e4d 100644
--- a/drivers/gpu/drm/amd/display/include/link_service_types.h
+++ b/drivers/gpu/drm/amd/display/include/link_service_types.h
@@ -71,14 +71,17 @@ enum link_training_result {
struct link_training_settings {
struct dc_link_settings link_settings;
struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX];
- bool allow_invalid_msa_timing_param;
-};
-enum hw_dp_training_pattern {
- HW_DP_TRAINING_PATTERN_1 = 0,
- HW_DP_TRAINING_PATTERN_2,
- HW_DP_TRAINING_PATTERN_3,
- HW_DP_TRAINING_PATTERN_4
+ enum dc_voltage_swing *voltage_swing;
+ enum dc_pre_emphasis *pre_emphasis;
+ enum dc_post_cursor2 *post_cursor2;
+
+ uint16_t cr_pattern_time;
+ uint16_t eq_pattern_time;
+ enum dc_dp_training_pattern pattern_for_eq;
+
+ bool enhanced_framing;
+ bool allow_invalid_msa_timing_param;
};
/*TODO: Move this enum test harness*/
@@ -120,6 +123,13 @@ enum dp_test_pattern {
DP_TEST_PATTERN_UNSUPPORTED
};
+enum dp_test_pattern_color_space {
+ DP_TEST_PATTERN_COLOR_SPACE_RGB,
+ DP_TEST_PATTERN_COLOR_SPACE_YCBCR601,
+ DP_TEST_PATTERN_COLOR_SPACE_YCBCR709,
+ DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED
+};
+
enum dp_panel_mode {
/* not required */
DP_PANEL_MODE_DEFAULT,
diff --git a/drivers/gpu/drm/amd/display/include/logger_interface.h b/drivers/gpu/drm/amd/display/include/logger_interface.h
index a0b68c266dab..6e008de25629 100644
--- a/drivers/gpu/drm/amd/display/include/logger_interface.h
+++ b/drivers/gpu/drm/amd/display/include/logger_interface.h
@@ -155,4 +155,6 @@ void context_clock_trace(
#define DISPLAY_STATS_END(entry) (void)(entry)
+#define LOG_GAMMA_WRITE(msg, ...)
+
#endif /* __DAL_LOGGER_INTERFACE_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h
index ea8d445816b8..89a709267019 100644
--- a/drivers/gpu/drm/amd/display/include/logger_types.h
+++ b/drivers/gpu/drm/amd/display/include/logger_types.h
@@ -63,12 +63,11 @@
#define DC_LOG_IF_TRACE(...) pr_debug("[IF_TRACE]:"__VA_ARGS__)
#define DC_LOG_PERF_TRACE(...) DRM_DEBUG_KMS(__VA_ARGS__)
#define DC_LOG_RETIMER_REDRIVER(...) DRM_DEBUG_KMS(__VA_ARGS__)
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
+#define DC_LOG_GAMMA(...) pr_debug("[GAMMA]:"__VA_ARGS__)
+#define DC_LOG_ALL_GAMMA(...) pr_debug("[GAMMA]:"__VA_ARGS__)
+#define DC_LOG_ALL_TF_CHANNELS(...) pr_debug("[GAMMA]:"__VA_ARGS__)
#define DC_LOG_DSC(...) DRM_DEBUG_KMS(__VA_ARGS__)
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN3_0) || defined(CONFIG_DRM_AMD_DC_DCN2_0)
#define DC_LOG_DWB(...) DRM_DEBUG_KMS(__VA_ARGS__)
-#endif
struct dal_logger;
@@ -113,10 +112,12 @@ enum dc_log_type {
LOG_PERF_TRACE,
LOG_DISPLAYSTATS,
LOG_HDMI_RETIMER_REDRIVER,
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
LOG_DSC,
-#endif
LOG_DWB,
+ LOG_GAMMA_DEBUG,
+ LOG_MAX_HW_POINTS,
+ LOG_ALL_TF_CHANNELS,
+ LOG_SAMPLE_1DLUT,
LOG_SECTION_TOTAL_COUNT
};
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index 88898935a5e6..cac09d500fda 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -30,7 +30,6 @@
#include "opp.h"
#include "color_gamma.h"
-
#define NUM_PTS_IN_REGION 16
#define NUM_REGIONS 32
#define MAX_HW_POINTS (NUM_PTS_IN_REGION*NUM_REGIONS)
@@ -40,6 +39,33 @@ static struct hw_x_point coordinates_x[MAX_HW_POINTS + 2];
static struct fixed31_32 pq_table[MAX_HW_POINTS + 2];
static struct fixed31_32 de_pq_table[MAX_HW_POINTS + 2];
+// these are helpers for calculations to reduce stack usage
+// do not depend on these being preserved across calls
+static struct fixed31_32 scratch_1;
+static struct fixed31_32 scratch_2;
+static struct translate_from_linear_space_args scratch_gamma_args;
+
+/* Helper to optimize gamma calculation, only use in translate_from_linear, in
+ * particular the dc_fixpt_pow function which is very expensive
+ * The idea is that our regions for X points are exponential and currently they all use
+ * the same number of points (NUM_PTS_IN_REGION) and in each region every point
+ * is exactly 2x the one at the same index in the previous region. In other words
+ * X[i] = 2 * X[i-NUM_PTS_IN_REGION] for i>=16
+ * The other fact is that (2x)^gamma = 2^gamma * x^gamma
+ * So we compute and save x^gamma for the first 16 regions, and for every next region
+ * just multiply with 2^gamma which can be computed once, and save the result so we
+ * recursively compute all the values.
+ */
+static struct fixed31_32 pow_buffer[NUM_PTS_IN_REGION];
+static struct fixed31_32 gamma_of_2; // 2^gamma
+int pow_buffer_ptr = -1;
+ /*sRGB 709 2.2 2.4 P3*/
+static const int32_t gamma_numerator01[] = { 31308, 180000, 0, 0, 0};
+static const int32_t gamma_numerator02[] = { 12920, 4500, 0, 0, 0};
+static const int32_t gamma_numerator03[] = { 55, 99, 0, 0, 0};
+static const int32_t gamma_numerator04[] = { 55, 99, 0, 0, 0};
+static const int32_t gamma_numerator05[] = { 2400, 2200, 2200, 2400, 2600};
+
static bool pq_initialized; /* = false; */
static bool de_pq_initialized; /* = false; */
@@ -71,6 +97,18 @@ void setup_x_points_distribution(void)
}
}
+void log_x_points_distribution(struct dal_logger *logger)
+{
+ int i = 0;
+
+ if (logger != NULL) {
+ LOG_GAMMA_WRITE("Log X Distribution\n");
+
+ for (i = 0; i < MAX_HW_POINTS; i++)
+ LOG_GAMMA_WRITE("%llu\n", coordinates_x[i].x.value);
+ }
+}
+
static void compute_pq(struct fixed31_32 in_x, struct fixed31_32 *out_y)
{
/* consts for PQ gamma formula. */
@@ -116,6 +154,7 @@ static void compute_de_pq(struct fixed31_32 in_x, struct fixed31_32 *out_y)
struct fixed31_32 l_pow_m1;
struct fixed31_32 base, div;
+ struct fixed31_32 base2;
if (dc_fixpt_lt(in_x, dc_fixpt_zero))
@@ -125,69 +164,80 @@ static void compute_de_pq(struct fixed31_32 in_x, struct fixed31_32 *out_y)
dc_fixpt_div(dc_fixpt_one, m2));
base = dc_fixpt_sub(l_pow_m1, c1);
- if (dc_fixpt_lt(base, dc_fixpt_zero))
- base = dc_fixpt_zero;
-
div = dc_fixpt_sub(c2, dc_fixpt_mul(c3, l_pow_m1));
- *out_y = dc_fixpt_pow(dc_fixpt_div(base, div),
- dc_fixpt_div(dc_fixpt_one, m1));
+ base2 = dc_fixpt_div(base, div);
+ //avoid complex numbers
+ if (dc_fixpt_lt(base2, dc_fixpt_zero))
+ base2 = dc_fixpt_sub(dc_fixpt_zero, base2);
+
+
+ *out_y = dc_fixpt_pow(base2, dc_fixpt_div(dc_fixpt_one, m1));
}
+
/*de gamma, none linear to linear*/
-static void compute_hlg_oetf(struct fixed31_32 in_x, bool is_light0_12, struct fixed31_32 *out_y)
+static void compute_hlg_eotf(struct fixed31_32 in_x,
+ struct fixed31_32 *out_y,
+ uint32_t sdr_white_level, uint32_t max_luminance_nits)
{
struct fixed31_32 a;
struct fixed31_32 b;
struct fixed31_32 c;
struct fixed31_32 threshold;
- struct fixed31_32 reference_white_level;
+ struct fixed31_32 x;
+ struct fixed31_32 scaling_factor =
+ dc_fixpt_from_fraction(max_luminance_nits, sdr_white_level);
a = dc_fixpt_from_fraction(17883277, 100000000);
- if (is_light0_12) {
- /*light 0-12*/
- b = dc_fixpt_from_fraction(28466892, 100000000);
- c = dc_fixpt_from_fraction(55991073, 100000000);
- threshold = dc_fixpt_one;
- reference_white_level = dc_fixpt_half;
+ b = dc_fixpt_from_fraction(28466892, 100000000);
+ c = dc_fixpt_from_fraction(55991073, 100000000);
+ threshold = dc_fixpt_from_fraction(1, 2);
+
+ if (dc_fixpt_lt(in_x, threshold)) {
+ x = dc_fixpt_mul(in_x, in_x);
+ x = dc_fixpt_div_int(x, 3);
} else {
- /*light 0-1*/
- b = dc_fixpt_from_fraction(2372241, 100000000);
- c = dc_fixpt_add(dc_fixpt_one, dc_fixpt_from_fraction(429347, 100000000));
- threshold = dc_fixpt_from_fraction(1, 12);
- reference_white_level = dc_fixpt_pow(dc_fixpt_from_fraction(3, 1), dc_fixpt_half);
+ x = dc_fixpt_sub(in_x, c);
+ x = dc_fixpt_div(x, a);
+ x = dc_fixpt_exp(x);
+ x = dc_fixpt_add(x, b);
+ x = dc_fixpt_div_int(x, 12);
}
- if (dc_fixpt_lt(threshold, in_x))
- *out_y = dc_fixpt_add(c, dc_fixpt_mul(a, dc_fixpt_log(dc_fixpt_sub(in_x, b))));
- else
- *out_y = dc_fixpt_mul(dc_fixpt_pow(in_x, dc_fixpt_half), reference_white_level);
+ *out_y = dc_fixpt_mul(x, scaling_factor);
+
}
/*re gamma, linear to none linear*/
-static void compute_hlg_eotf(struct fixed31_32 in_x, bool is_light0_12, struct fixed31_32 *out_y)
+static void compute_hlg_oetf(struct fixed31_32 in_x, struct fixed31_32 *out_y,
+ uint32_t sdr_white_level, uint32_t max_luminance_nits)
{
struct fixed31_32 a;
struct fixed31_32 b;
struct fixed31_32 c;
- struct fixed31_32 reference_white_level;
+ struct fixed31_32 threshold;
+ struct fixed31_32 x;
+ struct fixed31_32 scaling_factor =
+ dc_fixpt_from_fraction(sdr_white_level, max_luminance_nits);
a = dc_fixpt_from_fraction(17883277, 100000000);
- if (is_light0_12) {
- /*light 0-12*/
- b = dc_fixpt_from_fraction(28466892, 100000000);
- c = dc_fixpt_from_fraction(55991073, 100000000);
- reference_white_level = dc_fixpt_from_fraction(4, 1);
+ b = dc_fixpt_from_fraction(28466892, 100000000);
+ c = dc_fixpt_from_fraction(55991073, 100000000);
+ threshold = dc_fixpt_from_fraction(1, 12);
+ x = dc_fixpt_mul(in_x, scaling_factor);
+
+
+ if (dc_fixpt_lt(x, threshold)) {
+ x = dc_fixpt_mul(x, dc_fixpt_from_fraction(3, 1));
+ *out_y = dc_fixpt_pow(x, dc_fixpt_half);
} else {
- /*light 0-1*/
- b = dc_fixpt_from_fraction(2372241, 100000000);
- c = dc_fixpt_add(dc_fixpt_one, dc_fixpt_from_fraction(429347, 100000000));
- reference_white_level = dc_fixpt_from_fraction(1, 3);
+ x = dc_fixpt_mul(x, dc_fixpt_from_fraction(12, 1));
+ x = dc_fixpt_sub(x, b);
+ x = dc_fixpt_log(x);
+ x = dc_fixpt_mul(a, x);
+ *out_y = dc_fixpt_add(x, c);
}
- if (dc_fixpt_lt(dc_fixpt_half, in_x))
- *out_y = dc_fixpt_add(dc_fixpt_exp(dc_fixpt_div(dc_fixpt_sub(in_x, c), a)), b);
- else
- *out_y = dc_fixpt_mul(dc_fixpt_pow(in_x, dc_fixpt_from_fraction(2, 1)), reference_white_level);
}
@@ -243,95 +293,144 @@ struct dividers {
struct fixed31_32 divider3;
};
-enum gamma_type_index {
- gamma_type_index_2_4,
- gamma_type_index_2_2,
- gamma_type_index_2_2_flat
-};
-static void build_coefficients(struct gamma_coefficients *coefficients, enum gamma_type_index type)
+static bool build_coefficients(struct gamma_coefficients *coefficients, enum dc_transfer_func_predefined type)
{
- static const int32_t numerator01[] = { 31308, 180000, 0};
- static const int32_t numerator02[] = { 12920, 4500, 0};
- static const int32_t numerator03[] = { 55, 99, 0};
- static const int32_t numerator04[] = { 55, 99, 0};
- static const int32_t numerator05[] = { 2400, 2200, 2200};
uint32_t i = 0;
uint32_t index = 0;
+ bool ret = true;
- if (type == gamma_type_index_2_2)
+ if (type == TRANSFER_FUNCTION_SRGB)
+ index = 0;
+ else if (type == TRANSFER_FUNCTION_BT709)
index = 1;
- else if (type == gamma_type_index_2_2_flat)
+ else if (type == TRANSFER_FUNCTION_GAMMA22)
index = 2;
+ else if (type == TRANSFER_FUNCTION_GAMMA24)
+ index = 3;
+ else if (type == TRANSFER_FUNCTION_GAMMA26)
+ index = 4;
+ else {
+ ret = false;
+ goto release;
+ }
do {
coefficients->a0[i] = dc_fixpt_from_fraction(
- numerator01[index], 10000000);
+ gamma_numerator01[index], 10000000);
coefficients->a1[i] = dc_fixpt_from_fraction(
- numerator02[index], 1000);
+ gamma_numerator02[index], 1000);
coefficients->a2[i] = dc_fixpt_from_fraction(
- numerator03[index], 1000);
+ gamma_numerator03[index], 1000);
coefficients->a3[i] = dc_fixpt_from_fraction(
- numerator04[index], 1000);
+ gamma_numerator04[index], 1000);
coefficients->user_gamma[i] = dc_fixpt_from_fraction(
- numerator05[index], 1000);
+ gamma_numerator05[index], 1000);
++i;
} while (i != ARRAY_SIZE(coefficients->a0));
+release:
+ return ret;
}
static struct fixed31_32 translate_from_linear_space(
- struct fixed31_32 arg,
- struct fixed31_32 a0,
- struct fixed31_32 a1,
- struct fixed31_32 a2,
- struct fixed31_32 a3,
- struct fixed31_32 gamma)
+ struct translate_from_linear_space_args *args)
{
const struct fixed31_32 one = dc_fixpt_from_int(1);
- if (dc_fixpt_lt(one, arg))
+ if (dc_fixpt_le(one, args->arg))
return one;
- if (dc_fixpt_le(arg, dc_fixpt_neg(a0)))
+ if (dc_fixpt_le(args->arg, dc_fixpt_neg(args->a0))) {
+ scratch_1 = dc_fixpt_add(one, args->a3);
+ scratch_2 = dc_fixpt_pow(
+ dc_fixpt_neg(args->arg),
+ dc_fixpt_recip(args->gamma));
+ scratch_1 = dc_fixpt_mul(scratch_1, scratch_2);
+ scratch_1 = dc_fixpt_sub(args->a2, scratch_1);
+
+ return scratch_1;
+ } else if (dc_fixpt_le(args->a0, args->arg)) {
+ if (pow_buffer_ptr == 0) {
+ gamma_of_2 = dc_fixpt_pow(dc_fixpt_from_int(2),
+ dc_fixpt_recip(args->gamma));
+ }
+ scratch_1 = dc_fixpt_add(one, args->a3);
+ if (pow_buffer_ptr < 16)
+ scratch_2 = dc_fixpt_pow(args->arg,
+ dc_fixpt_recip(args->gamma));
+ else
+ scratch_2 = dc_fixpt_mul(gamma_of_2,
+ pow_buffer[pow_buffer_ptr%16]);
+
+ if (pow_buffer_ptr != -1) {
+ pow_buffer[pow_buffer_ptr%16] = scratch_2;
+ pow_buffer_ptr++;
+ }
+
+ scratch_1 = dc_fixpt_mul(scratch_1, scratch_2);
+ scratch_1 = dc_fixpt_sub(scratch_1, args->a2);
+
+ return scratch_1;
+ }
+ else
+ return dc_fixpt_mul(args->arg, args->a1);
+}
+
+
+static struct fixed31_32 translate_from_linear_space_long(
+ struct translate_from_linear_space_args *args)
+{
+ const struct fixed31_32 one = dc_fixpt_from_int(1);
+
+ if (dc_fixpt_lt(one, args->arg))
+ return one;
+
+ if (dc_fixpt_le(args->arg, dc_fixpt_neg(args->a0)))
return dc_fixpt_sub(
- a2,
+ args->a2,
dc_fixpt_mul(
dc_fixpt_add(
one,
- a3),
+ args->a3),
dc_fixpt_pow(
- dc_fixpt_neg(arg),
- dc_fixpt_recip(gamma))));
- else if (dc_fixpt_le(a0, arg))
+ dc_fixpt_neg(args->arg),
+ dc_fixpt_recip(args->gamma))));
+ else if (dc_fixpt_le(args->a0, args->arg))
return dc_fixpt_sub(
dc_fixpt_mul(
dc_fixpt_add(
one,
- a3),
+ args->a3),
dc_fixpt_pow(
- arg,
- dc_fixpt_recip(gamma))),
- a2);
+ args->arg,
+ dc_fixpt_recip(args->gamma))),
+ args->a2);
else
return dc_fixpt_mul(
- arg,
- a1);
+ args->arg,
+ args->a1);
}
-static struct fixed31_32 calculate_gamma22(struct fixed31_32 arg)
+static struct fixed31_32 calculate_gamma22(struct fixed31_32 arg, bool use_eetf)
{
struct fixed31_32 gamma = dc_fixpt_from_fraction(22, 10);
- return translate_from_linear_space(arg,
- dc_fixpt_zero,
- dc_fixpt_zero,
- dc_fixpt_zero,
- dc_fixpt_zero,
- gamma);
+ scratch_gamma_args.arg = arg;
+ scratch_gamma_args.a0 = dc_fixpt_zero;
+ scratch_gamma_args.a1 = dc_fixpt_zero;
+ scratch_gamma_args.a2 = dc_fixpt_zero;
+ scratch_gamma_args.a3 = dc_fixpt_zero;
+ scratch_gamma_args.gamma = gamma;
+
+ if (use_eetf)
+ return translate_from_linear_space_long(&scratch_gamma_args);
+
+ return translate_from_linear_space(&scratch_gamma_args);
}
+
static struct fixed31_32 translate_to_linear_space(
struct fixed31_32 arg,
struct fixed31_32 a0,
@@ -365,18 +464,19 @@ static struct fixed31_32 translate_to_linear_space(
return linear;
}
-static inline struct fixed31_32 translate_from_linear_space_ex(
+static struct fixed31_32 translate_from_linear_space_ex(
struct fixed31_32 arg,
struct gamma_coefficients *coeff,
uint32_t color_index)
{
- return translate_from_linear_space(
- arg,
- coeff->a0[color_index],
- coeff->a1[color_index],
- coeff->a2[color_index],
- coeff->a3[color_index],
- coeff->user_gamma[color_index]);
+ scratch_gamma_args.arg = arg;
+ scratch_gamma_args.a0 = coeff->a0[color_index];
+ scratch_gamma_args.a1 = coeff->a1[color_index];
+ scratch_gamma_args.a2 = coeff->a2[color_index];
+ scratch_gamma_args.a3 = coeff->a3[color_index];
+ scratch_gamma_args.gamma = coeff->user_gamma[color_index];
+
+ return translate_from_linear_space(&scratch_gamma_args);
}
@@ -709,30 +809,42 @@ static void build_de_pq(struct pwl_float_data_ex *de_pq,
}
}
-static void build_regamma(struct pwl_float_data_ex *rgb_regamma,
+static bool build_regamma(struct pwl_float_data_ex *rgb_regamma,
uint32_t hw_points_num,
- const struct hw_x_point *coordinate_x, enum gamma_type_index type)
+ const struct hw_x_point *coordinate_x, enum dc_transfer_func_predefined type)
{
uint32_t i;
+ bool ret = false;
- struct gamma_coefficients coeff;
+ struct gamma_coefficients *coeff;
struct pwl_float_data_ex *rgb = rgb_regamma;
const struct hw_x_point *coord_x = coordinate_x;
- build_coefficients(&coeff, type);
+ coeff = kvzalloc(sizeof(*coeff), GFP_KERNEL);
+ if (!coeff)
+ goto release;
+
+ if (!build_coefficients(coeff, type))
+ goto release;
+ memset(pow_buffer, 0, NUM_PTS_IN_REGION * sizeof(struct fixed31_32));
+ pow_buffer_ptr = 0; // see variable definition for more info
i = 0;
-
- while (i != hw_points_num + 1) {
+ while (i <= hw_points_num) {
/*TODO use y vs r,g,b*/
rgb->r = translate_from_linear_space_ex(
- coord_x->x, &coeff, 0);
+ coord_x->x, coeff, 0);
rgb->g = rgb->r;
rgb->b = rgb->r;
++coord_x;
++rgb;
++i;
}
+ pow_buffer_ptr = -1; // reset back to no optimize
+ ret = true;
+release:
+ kfree(coeff);
+ return ret;
}
static void hermite_spline_eetf(struct fixed31_32 input_x,
@@ -830,7 +942,6 @@ static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma,
struct fixed31_32 max_display;
struct fixed31_32 min_display;
struct fixed31_32 max_content;
- struct fixed31_32 min_content;
struct fixed31_32 clip = dc_fixpt_one;
struct fixed31_32 output;
bool use_eetf = false;
@@ -844,7 +955,6 @@ static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma,
max_display = dc_fixpt_from_int(fs_params->max_display);
min_display = dc_fixpt_from_fraction(fs_params->min_display, 10000);
max_content = dc_fixpt_from_int(fs_params->max_content);
- min_content = dc_fixpt_from_fraction(fs_params->min_content, 10000);
sdr_white_level = dc_fixpt_from_int(fs_params->sdr_white_level);
if (fs_params->min_display > 1000) // cap at 0.1 at the bottom
@@ -852,16 +962,14 @@ static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma,
if (fs_params->max_display < 100) // cap at 100 at the top
max_display = dc_fixpt_from_int(100);
- if (fs_params->min_content < fs_params->min_display)
- use_eetf = true;
- else
- min_content = min_display;
-
+ // only max used, we don't adjust min luminance
if (fs_params->max_content > fs_params->max_display)
use_eetf = true;
else
max_content = max_display;
+ if (!use_eetf)
+ pow_buffer_ptr = 0; // see var definition for more info
rgb += 32; // first 32 points have problems with fixed point, too small
coord_x += 32;
for (i = 32; i <= hw_points_num; i++) {
@@ -880,7 +988,7 @@ static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma,
if (dc_fixpt_lt(scaledX, dc_fixpt_zero))
output = dc_fixpt_zero;
else
- output = calculate_gamma22(scaledX);
+ output = calculate_gamma22(scaledX, use_eetf);
rgb->r = output;
rgb->g = output;
@@ -900,19 +1008,23 @@ static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma,
++coord_x;
++rgb;
}
+ pow_buffer_ptr = -1;
return true;
}
-static void build_degamma(struct pwl_float_data_ex *curve,
+static bool build_degamma(struct pwl_float_data_ex *curve,
uint32_t hw_points_num,
- const struct hw_x_point *coordinate_x, enum gamma_type_index type)
+ const struct hw_x_point *coordinate_x, enum dc_transfer_func_predefined type)
{
uint32_t i;
struct gamma_coefficients coeff;
uint32_t begin_index, end_index;
+ bool ret = false;
+
+ if (!build_coefficients(&coeff, type))
+ goto release;
- build_coefficients(&coeff, type);
i = 0;
/* X points is 2^-25 to 2^7
@@ -941,11 +1053,19 @@ static void build_degamma(struct pwl_float_data_ex *curve,
curve[i].b = dc_fixpt_one;
i++;
}
+ ret = true;
+release:
+ return ret;
}
+
+
+
+
static void build_hlg_degamma(struct pwl_float_data_ex *degamma,
uint32_t hw_points_num,
- const struct hw_x_point *coordinate_x, bool is_light0_12)
+ const struct hw_x_point *coordinate_x,
+ uint32_t sdr_white_level, uint32_t max_luminance_nits)
{
uint32_t i;
@@ -953,9 +1073,9 @@ static void build_hlg_degamma(struct pwl_float_data_ex *degamma,
const struct hw_x_point *coord_x = coordinate_x;
i = 0;
-
+ //check when i == 434
while (i != hw_points_num + 1) {
- compute_hlg_oetf(coord_x->x, is_light0_12, &rgb->r);
+ compute_hlg_eotf(coord_x->x, &rgb->r, sdr_white_level, max_luminance_nits);
rgb->g = rgb->r;
rgb->b = rgb->r;
++coord_x;
@@ -964,9 +1084,11 @@ static void build_hlg_degamma(struct pwl_float_data_ex *degamma,
}
}
+
static void build_hlg_regamma(struct pwl_float_data_ex *regamma,
uint32_t hw_points_num,
- const struct hw_x_point *coordinate_x, bool is_light0_12)
+ const struct hw_x_point *coordinate_x,
+ uint32_t sdr_white_level, uint32_t max_luminance_nits)
{
uint32_t i;
@@ -975,8 +1097,9 @@ static void build_hlg_regamma(struct pwl_float_data_ex *regamma,
i = 0;
+ //when i == 471
while (i != hw_points_num + 1) {
- compute_hlg_eotf(coord_x->x, is_light0_12, &rgb->r);
+ compute_hlg_oetf(coord_x->x, &rgb->r, sdr_white_level, max_luminance_nits);
rgb->g = rgb->r;
rgb->b = rgb->r;
++coord_x;
@@ -1550,124 +1673,6 @@ static bool map_regamma_hw_to_x_user(
#define _EXTRA_POINTS 3
-bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
- const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed,
- const struct freesync_hdr_tf_params *fs_params)
-{
- struct dc_transfer_func_distributed_points *tf_pts = &output_tf->tf_pts;
- struct dividers dividers;
-
- struct pwl_float_data *rgb_user = NULL;
- struct pwl_float_data_ex *rgb_regamma = NULL;
- struct gamma_pixel *axis_x = NULL;
- struct pixel_gamma_point *coeff = NULL;
- enum dc_transfer_func_predefined tf = TRANSFER_FUNCTION_SRGB;
- bool ret = false;
-
- if (output_tf->type == TF_TYPE_BYPASS)
- return false;
-
- /* we can use hardcoded curve for plain SRGB TF */
- if (output_tf->type == TF_TYPE_PREDEFINED && canRomBeUsed == true &&
- output_tf->tf == TRANSFER_FUNCTION_SRGB) {
- if (ramp == NULL)
- return true;
- if ((ramp->is_logical_identity) ||
- (!mapUserRamp && ramp->type == GAMMA_RGB_256))
- return true;
- }
-
- output_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
-
- if (ramp && (mapUserRamp || ramp->type != GAMMA_RGB_256)) {
- rgb_user = kvcalloc(ramp->num_entries + _EXTRA_POINTS,
- sizeof(*rgb_user),
- GFP_KERNEL);
- if (!rgb_user)
- goto rgb_user_alloc_fail;
-
- axis_x = kvcalloc(ramp->num_entries + 3, sizeof(*axis_x),
- GFP_KERNEL);
- if (!axis_x)
- goto axis_x_alloc_fail;
-
- dividers.divider1 = dc_fixpt_from_fraction(3, 2);
- dividers.divider2 = dc_fixpt_from_int(2);
- dividers.divider3 = dc_fixpt_from_fraction(5, 2);
-
- build_evenly_distributed_points(
- axis_x,
- ramp->num_entries,
- dividers);
-
- if (ramp->type == GAMMA_RGB_256 && mapUserRamp)
- scale_gamma(rgb_user, ramp, dividers);
- else if (ramp->type == GAMMA_RGB_FLOAT_1024)
- scale_gamma_dx(rgb_user, ramp, dividers);
- }
-
- rgb_regamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
- sizeof(*rgb_regamma),
- GFP_KERNEL);
- if (!rgb_regamma)
- goto rgb_regamma_alloc_fail;
-
- coeff = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, sizeof(*coeff),
- GFP_KERNEL);
- if (!coeff)
- goto coeff_alloc_fail;
-
- tf = output_tf->tf;
- if (tf == TRANSFER_FUNCTION_PQ) {
- tf_pts->end_exponent = 7;
- tf_pts->x_point_at_y1_red = 125;
- tf_pts->x_point_at_y1_green = 125;
- tf_pts->x_point_at_y1_blue = 125;
-
- build_pq(rgb_regamma,
- MAX_HW_POINTS,
- coordinates_x,
- output_tf->sdr_ref_white_level);
- } else if (tf == TRANSFER_FUNCTION_GAMMA22 &&
- fs_params != NULL && fs_params->skip_tm == 0) {
- build_freesync_hdr(rgb_regamma,
- MAX_HW_POINTS,
- coordinates_x,
- fs_params);
- } else {
- tf_pts->end_exponent = 0;
- tf_pts->x_point_at_y1_red = 1;
- tf_pts->x_point_at_y1_green = 1;
- tf_pts->x_point_at_y1_blue = 1;
-
- build_regamma(rgb_regamma,
- MAX_HW_POINTS,
- coordinates_x, tf == TRANSFER_FUNCTION_SRGB ? gamma_type_index_2_4 :
- tf == TRANSFER_FUNCTION_GAMMA22 ?
- gamma_type_index_2_2_flat : gamma_type_index_2_2);
- }
- map_regamma_hw_to_x_user(ramp, coeff, rgb_user,
- coordinates_x, axis_x, rgb_regamma,
- MAX_HW_POINTS, tf_pts,
- (mapUserRamp || (ramp && ramp->type != GAMMA_RGB_256)) &&
- (ramp && ramp->type != GAMMA_CS_TFM_1D));
-
- if (ramp && ramp->type == GAMMA_CS_TFM_1D)
- apply_lut_1d(ramp, MAX_HW_POINTS, tf_pts);
-
- ret = true;
-
- kvfree(coeff);
-coeff_alloc_fail:
- kvfree(rgb_regamma);
-rgb_regamma_alloc_fail:
- kvfree(axis_x);
-axis_x_alloc_fail:
- kvfree(rgb_user);
-rgb_user_alloc_fail:
- return ret;
-}
-
bool calculate_user_regamma_coeff(struct dc_transfer_func *output_tf,
const struct regamma_lut *regamma)
{
@@ -1845,13 +1850,19 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
MAX_HW_POINTS,
coordinates_x);
else if (tf == TRANSFER_FUNCTION_SRGB ||
- tf == TRANSFER_FUNCTION_BT709)
+ tf == TRANSFER_FUNCTION_BT709 ||
+ tf == TRANSFER_FUNCTION_GAMMA22 ||
+ tf == TRANSFER_FUNCTION_GAMMA24 ||
+ tf == TRANSFER_FUNCTION_GAMMA26)
build_degamma(curve,
MAX_HW_POINTS,
coordinates_x,
- tf == TRANSFER_FUNCTION_SRGB ?
- gamma_type_index_2_4 : tf == TRANSFER_FUNCTION_GAMMA22 ?
- gamma_type_index_2_2_flat : gamma_type_index_2_2);
+ tf);
+ else if (tf == TRANSFER_FUNCTION_HLG)
+ build_hlg_degamma(curve,
+ MAX_HW_POINTS,
+ coordinates_x,
+ 80, 1000);
else if (tf == TRANSFER_FUNCTION_LINEAR) {
// just copy coordinates_x into curve
i = 0;
@@ -1869,10 +1880,28 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
tf_pts->x_point_at_y1_green = 1;
tf_pts->x_point_at_y1_blue = 1;
- map_regamma_hw_to_x_user(ramp, coeff, rgb_user,
- coordinates_x, axis_x, curve,
- MAX_HW_POINTS, tf_pts,
- mapUserRamp && ramp && ramp->type == GAMMA_RGB_256);
+ if (input_tf->tf == TRANSFER_FUNCTION_PQ) {
+ /* just copy current rgb_regamma into tf_pts */
+ struct pwl_float_data_ex *curvePt = curve;
+ int i = 0;
+
+ while (i <= MAX_HW_POINTS) {
+ tf_pts->red[i] = curvePt->r;
+ tf_pts->green[i] = curvePt->g;
+ tf_pts->blue[i] = curvePt->b;
+ ++curvePt;
+ ++i;
+ }
+ } else {
+ //clamps to 0-1
+ map_regamma_hw_to_x_user(ramp, coeff, rgb_user,
+ coordinates_x, axis_x, curve,
+ MAX_HW_POINTS, tf_pts,
+ mapUserRamp && ramp && ramp->type == GAMMA_RGB_256);
+ }
+
+
+
if (ramp->type == GAMMA_CUSTOM)
apply_lut_1d(ramp, MAX_HW_POINTS, tf_pts);
@@ -1891,14 +1920,14 @@ rgb_user_alloc_fail:
return ret;
}
-
-bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans,
+static bool calculate_curve(enum dc_transfer_func_predefined trans,
struct dc_transfer_func_distributed_points *points,
+ struct pwl_float_data_ex *rgb_regamma,
+ const struct freesync_hdr_tf_params *fs_params,
uint32_t sdr_ref_white_level)
{
uint32_t i;
bool ret = false;
- struct pwl_float_data_ex *rgb_regamma = NULL;
if (trans == TRANSFER_FUNCTION_UNITY ||
trans == TRANSFER_FUNCTION_LINEAR) {
@@ -1908,42 +1937,50 @@ bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans,
points->x_point_at_y1_blue = 1;
for (i = 0; i <= MAX_HW_POINTS ; i++) {
- points->red[i] = coordinates_x[i].x;
- points->green[i] = coordinates_x[i].x;
- points->blue[i] = coordinates_x[i].x;
+ rgb_regamma[i].r = coordinates_x[i].x;
+ rgb_regamma[i].g = coordinates_x[i].x;
+ rgb_regamma[i].b = coordinates_x[i].x;
}
+
ret = true;
} else if (trans == TRANSFER_FUNCTION_PQ) {
- rgb_regamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
- sizeof(*rgb_regamma),
- GFP_KERNEL);
- if (!rgb_regamma)
- goto rgb_regamma_alloc_fail;
points->end_exponent = 7;
points->x_point_at_y1_red = 125;
points->x_point_at_y1_green = 125;
points->x_point_at_y1_blue = 125;
-
build_pq(rgb_regamma,
MAX_HW_POINTS,
coordinates_x,
sdr_ref_white_level);
- for (i = 0; i <= MAX_HW_POINTS ; i++) {
- points->red[i] = rgb_regamma[i].r;
- points->green[i] = rgb_regamma[i].g;
- points->blue[i] = rgb_regamma[i].b;
- }
+
ret = true;
+ } else if (trans == TRANSFER_FUNCTION_GAMMA22 &&
+ fs_params != NULL && fs_params->skip_tm == 0) {
+ build_freesync_hdr(rgb_regamma,
+ MAX_HW_POINTS,
+ coordinates_x,
+ fs_params);
- kvfree(rgb_regamma);
- } else if (trans == TRANSFER_FUNCTION_SRGB ||
- trans == TRANSFER_FUNCTION_BT709) {
- rgb_regamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
- sizeof(*rgb_regamma),
- GFP_KERNEL);
- if (!rgb_regamma)
- goto rgb_regamma_alloc_fail;
+ ret = true;
+ } else if (trans == TRANSFER_FUNCTION_HLG) {
+ points->end_exponent = 4;
+ points->x_point_at_y1_red = 12;
+ points->x_point_at_y1_green = 12;
+ points->x_point_at_y1_blue = 12;
+
+ build_hlg_regamma(rgb_regamma,
+ MAX_HW_POINTS,
+ coordinates_x,
+ 80, 1000);
+
+ ret = true;
+ } else {
+ // trans == TRANSFER_FUNCTION_SRGB
+ // trans == TRANSFER_FUNCTION_BT709
+ // trans == TRANSFER_FUNCTION_GAMMA22
+ // trans == TRANSFER_FUNCTION_GAMMA24
+ // trans == TRANSFER_FUNCTION_GAMMA26
points->end_exponent = 0;
points->x_point_at_y1_red = 1;
points->x_point_at_y1_green = 1;
@@ -1952,42 +1989,112 @@ bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans,
build_regamma(rgb_regamma,
MAX_HW_POINTS,
coordinates_x,
- trans == TRANSFER_FUNCTION_SRGB ?
- gamma_type_index_2_4 : trans == TRANSFER_FUNCTION_GAMMA22 ?
- gamma_type_index_2_2_flat : gamma_type_index_2_2);
- for (i = 0; i <= MAX_HW_POINTS ; i++) {
- points->red[i] = rgb_regamma[i].r;
- points->green[i] = rgb_regamma[i].g;
- points->blue[i] = rgb_regamma[i].b;
- }
+ trans);
+
ret = true;
+ }
- kvfree(rgb_regamma);
- } else if (trans == TRANSFER_FUNCTION_HLG ||
- trans == TRANSFER_FUNCTION_HLG12) {
- rgb_regamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
- sizeof(*rgb_regamma),
- GFP_KERNEL);
- if (!rgb_regamma)
- goto rgb_regamma_alloc_fail;
+ return ret;
+}
- build_hlg_regamma(rgb_regamma,
- MAX_HW_POINTS,
- coordinates_x,
- trans == TRANSFER_FUNCTION_HLG12 ? true:false);
- for (i = 0; i <= MAX_HW_POINTS ; i++) {
- points->red[i] = rgb_regamma[i].r;
- points->green[i] = rgb_regamma[i].g;
- points->blue[i] = rgb_regamma[i].b;
- }
- ret = true;
- kvfree(rgb_regamma);
+bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
+ const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed,
+ const struct freesync_hdr_tf_params *fs_params)
+{
+ struct dc_transfer_func_distributed_points *tf_pts = &output_tf->tf_pts;
+ struct dividers dividers;
+
+ struct pwl_float_data *rgb_user = NULL;
+ struct pwl_float_data_ex *rgb_regamma = NULL;
+ struct gamma_pixel *axis_x = NULL;
+ struct pixel_gamma_point *coeff = NULL;
+ enum dc_transfer_func_predefined tf = TRANSFER_FUNCTION_SRGB;
+ bool ret = false;
+
+ if (output_tf->type == TF_TYPE_BYPASS)
+ return false;
+
+ /* we can use hardcoded curve for plain SRGB TF */
+ if (output_tf->type == TF_TYPE_PREDEFINED && canRomBeUsed == true &&
+ output_tf->tf == TRANSFER_FUNCTION_SRGB) {
+ if (ramp == NULL)
+ return true;
+ if ((ramp->is_identity && ramp->type != GAMMA_CS_TFM_1D) ||
+ (!mapUserRamp && ramp->type == GAMMA_RGB_256))
+ return true;
+ }
+
+ output_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
+
+ if (ramp && ramp->type != GAMMA_CS_TFM_1D &&
+ (mapUserRamp || ramp->type != GAMMA_RGB_256)) {
+ rgb_user = kvcalloc(ramp->num_entries + _EXTRA_POINTS,
+ sizeof(*rgb_user),
+ GFP_KERNEL);
+ if (!rgb_user)
+ goto rgb_user_alloc_fail;
+
+ axis_x = kvcalloc(ramp->num_entries + 3, sizeof(*axis_x),
+ GFP_KERNEL);
+ if (!axis_x)
+ goto axis_x_alloc_fail;
+
+ dividers.divider1 = dc_fixpt_from_fraction(3, 2);
+ dividers.divider2 = dc_fixpt_from_int(2);
+ dividers.divider3 = dc_fixpt_from_fraction(5, 2);
+
+ build_evenly_distributed_points(
+ axis_x,
+ ramp->num_entries,
+ dividers);
+
+ if (ramp->type == GAMMA_RGB_256 && mapUserRamp)
+ scale_gamma(rgb_user, ramp, dividers);
+ else if (ramp->type == GAMMA_RGB_FLOAT_1024)
+ scale_gamma_dx(rgb_user, ramp, dividers);
+ }
+
+ rgb_regamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
+ sizeof(*rgb_regamma),
+ GFP_KERNEL);
+ if (!rgb_regamma)
+ goto rgb_regamma_alloc_fail;
+
+ coeff = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, sizeof(*coeff),
+ GFP_KERNEL);
+ if (!coeff)
+ goto coeff_alloc_fail;
+
+ tf = output_tf->tf;
+
+ ret = calculate_curve(tf,
+ tf_pts,
+ rgb_regamma,
+ fs_params,
+ output_tf->sdr_ref_white_level);
+
+ if (ret) {
+ map_regamma_hw_to_x_user(ramp, coeff, rgb_user,
+ coordinates_x, axis_x, rgb_regamma,
+ MAX_HW_POINTS, tf_pts,
+ (mapUserRamp || (ramp && ramp->type != GAMMA_RGB_256)) &&
+ (ramp && ramp->type != GAMMA_CS_TFM_1D));
+
+ if (ramp && ramp->type == GAMMA_CS_TFM_1D)
+ apply_lut_1d(ramp, MAX_HW_POINTS, tf_pts);
}
+
+ kvfree(coeff);
+coeff_alloc_fail:
+ kvfree(rgb_regamma);
rgb_regamma_alloc_fail:
+ kvfree(axis_x);
+axis_x_alloc_fail:
+ kvfree(rgb_user);
+rgb_user_alloc_fail:
return ret;
}
-
bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
struct dc_transfer_func_distributed_points *points)
{
@@ -2024,8 +2131,10 @@ bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
kvfree(rgb_degamma);
} else if (trans == TRANSFER_FUNCTION_SRGB ||
- trans == TRANSFER_FUNCTION_BT709 ||
- trans == TRANSFER_FUNCTION_GAMMA22) {
+ trans == TRANSFER_FUNCTION_BT709 ||
+ trans == TRANSFER_FUNCTION_GAMMA22 ||
+ trans == TRANSFER_FUNCTION_GAMMA24 ||
+ trans == TRANSFER_FUNCTION_GAMMA26) {
rgb_degamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
sizeof(*rgb_degamma),
GFP_KERNEL);
@@ -2035,9 +2144,7 @@ bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
build_degamma(rgb_degamma,
MAX_HW_POINTS,
coordinates_x,
- trans == TRANSFER_FUNCTION_SRGB ?
- gamma_type_index_2_4 : trans == TRANSFER_FUNCTION_GAMMA22 ?
- gamma_type_index_2_2_flat : gamma_type_index_2_2);
+ trans);
for (i = 0; i <= MAX_HW_POINTS ; i++) {
points->red[i] = rgb_degamma[i].r;
points->green[i] = rgb_degamma[i].g;
@@ -2046,8 +2153,7 @@ bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
ret = true;
kvfree(rgb_degamma);
- } else if (trans == TRANSFER_FUNCTION_HLG ||
- trans == TRANSFER_FUNCTION_HLG12) {
+ } else if (trans == TRANSFER_FUNCTION_HLG) {
rgb_degamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
sizeof(*rgb_degamma),
GFP_KERNEL);
@@ -2057,7 +2163,7 @@ bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
build_hlg_degamma(rgb_degamma,
MAX_HW_POINTS,
coordinates_x,
- trans == TRANSFER_FUNCTION_HLG12 ? true:false);
+ 80, 1000);
for (i = 0; i <= MAX_HW_POINTS ; i++) {
points->red[i] = rgb_degamma[i].r;
points->green[i] = rgb_degamma[i].g;
@@ -2074,5 +2180,3 @@ bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
rgb_degamma_alloc_fail:
return ret;
}
-
-
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
index 369953fafadf..9994817a9a03 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
@@ -82,7 +82,17 @@ struct freesync_hdr_tf_params {
unsigned int skip_tm; // skip tm
};
+struct translate_from_linear_space_args {
+ struct fixed31_32 arg;
+ struct fixed31_32 a0;
+ struct fixed31_32 a1;
+ struct fixed31_32 a2;
+ struct fixed31_32 a3;
+ struct fixed31_32 gamma;
+};
+
void setup_x_points_distribution(void);
+void log_x_points_distribution(struct dal_logger *logger);
void precompute_pq(void);
void precompute_de_pq(void);
@@ -93,10 +103,6 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
bool mod_color_calculate_degamma_params(struct dc_transfer_func *output_tf,
const struct dc_gamma *ramp, bool mapUserRamp);
-bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans,
- struct dc_transfer_func_distributed_points *points,
- uint32_t sdr_ref_white_level);
-
bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
struct dc_transfer_func_distributed_points *points);
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index 7c20171a3b6d..b9992ebf77a6 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -37,8 +37,8 @@
#define STATIC_SCREEN_RAMP_DELTA_REFRESH_RATE_PER_FRAME ((1000 / 60) * 65)
/* Number of elements in the render times cache array */
#define RENDER_TIMES_MAX_COUNT 10
-/* Threshold to exit BTR (to avoid frequent enter-exits at the lower limit) */
-#define BTR_EXIT_MARGIN 2000
+/* Threshold to exit/exit BTR (to avoid frequent enter-exits at the lower limit) */
+#define BTR_MAX_MARGIN 2500
/* Threshold to change BTR multiplier (to avoid frequent changes) */
#define BTR_DRIFT_MARGIN 2000
/*Threshold to exit fixed refresh rate*/
@@ -52,93 +52,6 @@ struct core_freesync {
struct dc *dc;
};
-void setFieldWithMask(unsigned char *dest, unsigned int mask, unsigned int value)
-{
- unsigned int shift = 0;
-
- if (!mask || !dest)
- return;
-
- while (!((mask >> shift) & 1))
- shift++;
-
- //reset
- *dest = *dest & ~mask;
- //set
- //dont let value span past mask
- value = value & (mask >> shift);
- //insert value
- *dest = *dest | (value << shift);
-}
-
-// VTEM Byte Offset
-#define VRR_VTEM_PB0 0
-#define VRR_VTEM_PB1 1
-#define VRR_VTEM_PB2 2
-#define VRR_VTEM_PB3 3
-#define VRR_VTEM_PB4 4
-#define VRR_VTEM_PB5 5
-#define VRR_VTEM_PB6 6
-
-#define VRR_VTEM_MD0 7
-#define VRR_VTEM_MD1 8
-#define VRR_VTEM_MD2 9
-#define VRR_VTEM_MD3 10
-
-
-// VTEM Byte Masks
-//PB0
-#define MASK__VRR_VTEM_PB0__RESERVED0 0x01
-#define MASK__VRR_VTEM_PB0__SYNC 0x02
-#define MASK__VRR_VTEM_PB0__VFR 0x04
-#define MASK__VRR_VTEM_PB0__AFR 0x08
-#define MASK__VRR_VTEM_PB0__DS_TYPE 0x30
- //0: Periodic pseudo-static EM Data Set
- //1: Periodic dynamic EM Data Set
- //2: Unique EM Data Set
- //3: Reserved
-#define MASK__VRR_VTEM_PB0__END 0x40
-#define MASK__VRR_VTEM_PB0__NEW 0x80
-
-//PB1
-#define MASK__VRR_VTEM_PB1__RESERVED1 0xFF
-
-//PB2
-#define MASK__VRR_VTEM_PB2__ORGANIZATION_ID 0xFF
- //0: This is a Vendor Specific EM Data Set
- //1: This EM Data Set is defined by This Specification (HDMI 2.1 r102.clean)
- //2: This EM Data Set is defined by CTA-861-G
- //3: This EM Data Set is defined by VESA
-//PB3
-#define MASK__VRR_VTEM_PB3__DATA_SET_TAG_MSB 0xFF
-//PB4
-#define MASK__VRR_VTEM_PB4__DATA_SET_TAG_LSB 0xFF
-//PB5
-#define MASK__VRR_VTEM_PB5__DATA_SET_LENGTH_MSB 0xFF
-//PB6
-#define MASK__VRR_VTEM_PB6__DATA_SET_LENGTH_LSB 0xFF
-
-
-
-//PB7-27 (20 bytes):
-//PB7 = MD0
-#define MASK__VRR_VTEM_MD0__VRR_EN 0x01
-#define MASK__VRR_VTEM_MD0__M_CONST 0x02
-#define MASK__VRR_VTEM_MD0__RESERVED2 0x0C
-#define MASK__VRR_VTEM_MD0__FVA_FACTOR_M1 0xF0
-
-//MD1
-#define MASK__VRR_VTEM_MD1__BASE_VFRONT 0xFF
-
-//MD2
-#define MASK__VRR_VTEM_MD2__BASE_REFRESH_RATE_98 0x03
-#define MASK__VRR_VTEM_MD2__RB 0x04
-#define MASK__VRR_VTEM_MD2__RESERVED3 0xF8
-
-//MD3
-#define MASK__VRR_VTEM_MD3__BASE_REFRESH_RATE_07 0xFF
-
-
#define MOD_FREESYNC_TO_CORE(mod_freesync)\
container_of(mod_freesync, struct core_freesync, public)
@@ -209,7 +122,7 @@ static unsigned int calc_v_total_from_refresh(
const struct dc_stream_state *stream,
unsigned int refresh_in_uhz)
{
- unsigned int v_total = stream->timing.v_total;
+ unsigned int v_total;
unsigned int frame_duration_in_ns;
frame_duration_in_ns =
@@ -321,6 +234,10 @@ static void update_v_total_for_static_ramp(
current_duration_in_us) * (stream->timing.pix_clk_100hz / 10)),
stream->timing.h_total), 1000);
+ /* v_total cannot be less than nominal */
+ if (v_total < stream->timing.v_total)
+ v_total = stream->timing.v_total;
+
in_out_vrr->adjust.v_total_min = v_total;
in_out_vrr->adjust.v_total_max = v_total;
}
@@ -337,24 +254,22 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
unsigned int delta_from_mid_point_in_us_1 = 0xFFFFFFFF;
unsigned int delta_from_mid_point_in_us_2 = 0xFFFFFFFF;
unsigned int frames_to_insert = 0;
- unsigned int min_frame_duration_in_ns = 0;
- unsigned int max_render_time_in_us = in_out_vrr->max_duration_in_us;
unsigned int delta_from_mid_point_delta_in_us;
-
- min_frame_duration_in_ns = ((unsigned int) (div64_u64(
- (1000000000ULL * 1000000),
- in_out_vrr->max_refresh_in_uhz)));
+ unsigned int max_render_time_in_us =
+ in_out_vrr->max_duration_in_us - in_out_vrr->btr.margin_in_us;
/* Program BTR */
- if (last_render_time_in_us + BTR_EXIT_MARGIN < max_render_time_in_us) {
+ if ((last_render_time_in_us + in_out_vrr->btr.margin_in_us / 2) < max_render_time_in_us) {
/* Exit Below the Range */
if (in_out_vrr->btr.btr_active) {
in_out_vrr->btr.frame_counter = 0;
in_out_vrr->btr.btr_active = false;
}
- } else if (last_render_time_in_us > max_render_time_in_us) {
+ } else if (last_render_time_in_us > (max_render_time_in_us + in_out_vrr->btr.margin_in_us / 2)) {
/* Enter Below the Range */
- in_out_vrr->btr.btr_active = true;
+ if (!in_out_vrr->btr.btr_active) {
+ in_out_vrr->btr.btr_active = true;
+ }
}
/* BTR set to "not active" so disengage */
@@ -410,7 +325,9 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
/* Choose number of frames to insert based on how close it
* can get to the mid point of the variable range.
*/
- if (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2) {
+ if ((frame_time_in_us / mid_point_frames_ceil) > in_out_vrr->min_duration_in_us &&
+ (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2 ||
+ mid_point_frames_floor < 2)) {
frames_to_insert = mid_point_frames_ceil;
delta_from_mid_point_delta_in_us = delta_from_mid_point_in_us_2 -
delta_from_mid_point_in_us_1;
@@ -426,7 +343,7 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
if (in_out_vrr->btr.frames_to_insert != 0 &&
delta_from_mid_point_delta_in_us < BTR_DRIFT_MARGIN) {
if (((last_render_time_in_us / in_out_vrr->btr.frames_to_insert) <
- in_out_vrr->max_duration_in_us) &&
+ max_render_time_in_us) &&
((last_render_time_in_us / in_out_vrr->btr.frames_to_insert) >
in_out_vrr->min_duration_in_us))
frames_to_insert = in_out_vrr->btr.frames_to_insert;
@@ -435,6 +352,12 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
/* Either we've calculated the number of frames to insert,
* or we need to insert min duration frames
*/
+ if (last_render_time_in_us / frames_to_insert <
+ in_out_vrr->min_duration_in_us){
+ frames_to_insert -= (frames_to_insert > 1) ?
+ 1 : 0;
+ }
+
if (frames_to_insert > 0)
inserted_frame_duration_in_us = last_render_time_in_us /
frames_to_insert;
@@ -458,7 +381,7 @@ static void apply_fixed_refresh(struct core_freesync *core_freesync,
bool update = false;
unsigned int max_render_time_in_us = in_out_vrr->max_duration_in_us;
- //Compute the exit refresh rate and exit frame duration
+ /* Compute the exit refresh rate and exit frame duration */
unsigned int exit_refresh_rate_in_milli_hz = ((1000000000/max_render_time_in_us)
+ (1000*FIXED_REFRESH_EXIT_MARGIN_IN_HZ));
unsigned int exit_frame_duration_in_us = 1000000000/exit_refresh_rate_in_milli_hz;
@@ -568,22 +491,64 @@ bool mod_freesync_get_v_position(struct mod_freesync *mod_freesync,
return false;
}
-static void build_vrr_infopacket_header_vtem(enum signal_type signal,
+static void build_vrr_infopacket_data(const struct mod_vrr_params *vrr,
+ struct dc_info_packet *infopacket)
+{
+ /* PB1 = 0x1A (24bit AMD IEEE OUI (0x00001A) - Byte 0) */
+ infopacket->sb[1] = 0x1A;
+
+ /* PB2 = 0x00 (24bit AMD IEEE OUI (0x00001A) - Byte 1) */
+ infopacket->sb[2] = 0x00;
+
+ /* PB3 = 0x00 (24bit AMD IEEE OUI (0x00001A) - Byte 2) */
+ infopacket->sb[3] = 0x00;
+
+ /* PB4 = Reserved */
+
+ /* PB5 = Reserved */
+
+ /* PB6 = [Bits 7:3 = Reserved] */
+
+ /* PB6 = [Bit 0 = FreeSync Supported] */
+ if (vrr->state != VRR_STATE_UNSUPPORTED)
+ infopacket->sb[6] |= 0x01;
+
+ /* PB6 = [Bit 1 = FreeSync Enabled] */
+ if (vrr->state != VRR_STATE_DISABLED &&
+ vrr->state != VRR_STATE_UNSUPPORTED)
+ infopacket->sb[6] |= 0x02;
+
+ /* PB6 = [Bit 2 = FreeSync Active] */
+ if (vrr->state == VRR_STATE_ACTIVE_VARIABLE ||
+ vrr->state == VRR_STATE_ACTIVE_FIXED)
+ infopacket->sb[6] |= 0x04;
+
+ /* PB7 = FreeSync Minimum refresh rate (Hz) */
+ infopacket->sb[7] = (unsigned char)(vrr->min_refresh_in_uhz / 1000000);
+
+ /* PB8 = FreeSync Maximum refresh rate (Hz)
+ * Note: We should never go above the field rate of the mode timing set.
+ */
+ infopacket->sb[8] = (unsigned char)(vrr->max_refresh_in_uhz / 1000000);
+
+
+ //FreeSync HDR
+ infopacket->sb[9] = 0;
+ infopacket->sb[10] = 0;
+}
+
+static void build_vrr_infopacket_fs2_data(enum color_transfer_func app_tf,
struct dc_info_packet *infopacket)
{
- // HEADER
-
- // HB0, HB1, HB2 indicates PacketType VTEMPacket
- infopacket->hb0 = 0x7F;
- infopacket->hb1 = 0xC0;
- infopacket->hb2 = 0x00; //sequence_index
-
- setFieldWithMask(&infopacket->sb[VRR_VTEM_PB0], MASK__VRR_VTEM_PB0__VFR, 1);
- setFieldWithMask(&infopacket->sb[VRR_VTEM_PB2], MASK__VRR_VTEM_PB2__ORGANIZATION_ID, 1);
- setFieldWithMask(&infopacket->sb[VRR_VTEM_PB3], MASK__VRR_VTEM_PB3__DATA_SET_TAG_MSB, 0);
- setFieldWithMask(&infopacket->sb[VRR_VTEM_PB4], MASK__VRR_VTEM_PB4__DATA_SET_TAG_LSB, 1);
- setFieldWithMask(&infopacket->sb[VRR_VTEM_PB5], MASK__VRR_VTEM_PB5__DATA_SET_LENGTH_MSB, 0);
- setFieldWithMask(&infopacket->sb[VRR_VTEM_PB6], MASK__VRR_VTEM_PB6__DATA_SET_LENGTH_LSB, 4);
+ if (app_tf != TRANSFER_FUNC_UNKNOWN) {
+ infopacket->valid = true;
+
+ infopacket->sb[6] |= 0x08; // PB6 = [Bit 3 = Native Color Active]
+
+ if (app_tf == TRANSFER_FUNC_GAMMA_22) {
+ infopacket->sb[9] |= 0x04; // PB6 = [Bit 2 = Gamma 2.2 EOTF Active]
+ }
+ }
}
static void build_vrr_infopacket_header_v1(enum signal_type signal,
@@ -684,105 +649,6 @@ static void build_vrr_infopacket_header_v2(enum signal_type signal,
}
}
-static void build_vrr_vtem_infopacket_data(const struct dc_stream_state *stream,
- const struct mod_vrr_params *vrr,
- struct dc_info_packet *infopacket)
-{
- unsigned int fieldRateInHz;
-
- if (vrr->state == VRR_STATE_ACTIVE_VARIABLE ||
- vrr->state == VRR_STATE_ACTIVE_FIXED) {
- setFieldWithMask(&infopacket->sb[VRR_VTEM_MD0], MASK__VRR_VTEM_MD0__VRR_EN, 1);
- } else {
- setFieldWithMask(&infopacket->sb[VRR_VTEM_MD0], MASK__VRR_VTEM_MD0__VRR_EN, 0);
- }
-
- if (!stream->timing.vic) {
- setFieldWithMask(&infopacket->sb[VRR_VTEM_MD1], MASK__VRR_VTEM_MD1__BASE_VFRONT,
- stream->timing.v_front_porch);
-
-
- /* TODO: In dal2, we check mode flags for a reduced blanking timing.
- * Need a way to relay that information to this function.
- * if("ReducedBlanking")
- * {
- * setFieldWithMask(&infopacket->sb[VRR_VTEM_MD2], MASK__VRR_VTEM_MD2__RB, 1;
- * }
- */
-
- //TODO: DAL2 does FixPoint and rounding. Here we might need to account for that
- fieldRateInHz = (stream->timing.pix_clk_100hz * 100)/
- (stream->timing.h_total * stream->timing.v_total);
-
- setFieldWithMask(&infopacket->sb[VRR_VTEM_MD2], MASK__VRR_VTEM_MD2__BASE_REFRESH_RATE_98,
- fieldRateInHz >> 8);
- setFieldWithMask(&infopacket->sb[VRR_VTEM_MD3], MASK__VRR_VTEM_MD3__BASE_REFRESH_RATE_07,
- fieldRateInHz);
-
- }
- infopacket->valid = true;
-}
-
-static void build_vrr_infopacket_data(const struct mod_vrr_params *vrr,
- struct dc_info_packet *infopacket)
-{
- /* PB1 = 0x1A (24bit AMD IEEE OUI (0x00001A) - Byte 0) */
- infopacket->sb[1] = 0x1A;
-
- /* PB2 = 0x00 (24bit AMD IEEE OUI (0x00001A) - Byte 1) */
- infopacket->sb[2] = 0x00;
-
- /* PB3 = 0x00 (24bit AMD IEEE OUI (0x00001A) - Byte 2) */
- infopacket->sb[3] = 0x00;
-
- /* PB4 = Reserved */
-
- /* PB5 = Reserved */
-
- /* PB6 = [Bits 7:3 = Reserved] */
-
- /* PB6 = [Bit 0 = FreeSync Supported] */
- if (vrr->state != VRR_STATE_UNSUPPORTED)
- infopacket->sb[6] |= 0x01;
-
- /* PB6 = [Bit 1 = FreeSync Enabled] */
- if (vrr->state != VRR_STATE_DISABLED &&
- vrr->state != VRR_STATE_UNSUPPORTED)
- infopacket->sb[6] |= 0x02;
-
- /* PB6 = [Bit 2 = FreeSync Active] */
- if (vrr->state == VRR_STATE_ACTIVE_VARIABLE ||
- vrr->state == VRR_STATE_ACTIVE_FIXED)
- infopacket->sb[6] |= 0x04;
-
- /* PB7 = FreeSync Minimum refresh rate (Hz) */
- infopacket->sb[7] = (unsigned char)(vrr->min_refresh_in_uhz / 1000000);
-
- /* PB8 = FreeSync Maximum refresh rate (Hz)
- * Note: We should never go above the field rate of the mode timing set.
- */
- infopacket->sb[8] = (unsigned char)(vrr->max_refresh_in_uhz / 1000000);
-
-
- //FreeSync HDR
- infopacket->sb[9] = 0;
- infopacket->sb[10] = 0;
-}
-
-static void build_vrr_infopacket_fs2_data(enum color_transfer_func app_tf,
- struct dc_info_packet *infopacket)
-{
- if (app_tf != TRANSFER_FUNC_UNKNOWN) {
- infopacket->valid = true;
-
- infopacket->sb[6] |= 0x08; // PB6 = [Bit 3 = Native Color Active]
-
- if (app_tf == TRANSFER_FUNC_GAMMA_22) {
- infopacket->sb[9] |= 0x04; // PB6 = [Bit 2 = Gamma 2.2 EOTF Active]
- }
- }
-}
-
static void build_vrr_infopacket_checksum(unsigned int *payload_size,
struct dc_info_packet *infopacket)
{
@@ -835,21 +701,6 @@ static void build_vrr_infopacket_v2(enum signal_type signal,
infopacket->valid = true;
}
-static void build_vrr_infopacket_vtem(const struct dc_stream_state *stream,
- const struct mod_vrr_params *vrr,
- struct dc_info_packet *infopacket)
-{
- //VTEM info packet for HdmiVrr
-
- memset(infopacket, 0, sizeof(struct dc_info_packet));
-
- //VTEM Packet is structured differently
- build_vrr_infopacket_header_vtem(stream->signal, infopacket);
- build_vrr_vtem_infopacket_data(stream, vrr, infopacket);
-
- infopacket->valid = true;
-}
-
void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync,
const struct dc_stream_state *stream,
const struct mod_vrr_params *vrr,
@@ -862,16 +713,13 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync,
* Check if Freesync is supported. Return if false. If true,
* set the corresponding bit in the info packet
*/
- if (!vrr->supported || (!vrr->send_info_frame && packet_type != PACKET_TYPE_VTEM))
+ if (!vrr->supported || (!vrr->send_info_frame))
return;
switch (packet_type) {
case PACKET_TYPE_FS2:
build_vrr_infopacket_v2(stream->signal, vrr, app_tf, infopacket);
break;
- case PACKET_TYPE_VTEM:
- build_vrr_infopacket_vtem(stream, vrr, infopacket);
- break;
case PACKET_TYPE_VRR:
case PACKET_TYPE_FS1:
default:
@@ -887,8 +735,8 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
struct core_freesync *core_freesync = NULL;
unsigned long long nominal_field_rate_in_uhz = 0;
unsigned int refresh_range = 0;
- unsigned int min_refresh_in_uhz = 0;
- unsigned int max_refresh_in_uhz = 0;
+ unsigned long long min_refresh_in_uhz = 0;
+ unsigned long long max_refresh_in_uhz = 0;
if (mod_freesync == NULL)
return;
@@ -899,6 +747,10 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
nominal_field_rate_in_uhz =
mod_freesync_calc_nominal_field_rate(stream);
+ /* Rounded to the nearest Hz */
+ nominal_field_rate_in_uhz = 1000000ULL *
+ div_u64(nominal_field_rate_in_uhz + 500000, 1000000);
+
min_refresh_in_uhz = in_config->min_refresh_in_uhz;
max_refresh_in_uhz = in_config->max_refresh_in_uhz;
@@ -915,7 +767,7 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
min_refresh_in_uhz = nominal_field_rate_in_uhz;
if (!vrr_settings_require_update(core_freesync,
- in_config, min_refresh_in_uhz, max_refresh_in_uhz,
+ in_config, (unsigned int)min_refresh_in_uhz, (unsigned int)max_refresh_in_uhz,
in_out_vrr))
return;
@@ -931,36 +783,45 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
return;
} else {
- in_out_vrr->min_refresh_in_uhz = min_refresh_in_uhz;
+ in_out_vrr->min_refresh_in_uhz = (unsigned int)min_refresh_in_uhz;
in_out_vrr->max_duration_in_us =
calc_duration_in_us_from_refresh_in_uhz(
- min_refresh_in_uhz);
+ (unsigned int)min_refresh_in_uhz);
- in_out_vrr->max_refresh_in_uhz = max_refresh_in_uhz;
+ in_out_vrr->max_refresh_in_uhz = (unsigned int)max_refresh_in_uhz;
in_out_vrr->min_duration_in_us =
calc_duration_in_us_from_refresh_in_uhz(
- max_refresh_in_uhz);
+ (unsigned int)max_refresh_in_uhz);
refresh_range = in_out_vrr->max_refresh_in_uhz -
in_out_vrr->min_refresh_in_uhz;
+ in_out_vrr->btr.margin_in_us = in_out_vrr->max_duration_in_us -
+ 2 * in_out_vrr->min_duration_in_us;
+ if (in_out_vrr->btr.margin_in_us > BTR_MAX_MARGIN)
+ in_out_vrr->btr.margin_in_us = BTR_MAX_MARGIN;
+
in_out_vrr->supported = true;
}
in_out_vrr->fixed.ramping_active = in_config->ramping;
in_out_vrr->btr.btr_enabled = in_config->btr;
+
if (in_out_vrr->max_refresh_in_uhz <
2 * in_out_vrr->min_refresh_in_uhz)
in_out_vrr->btr.btr_enabled = false;
+
in_out_vrr->btr.btr_active = false;
in_out_vrr->btr.inserted_duration_in_us = 0;
in_out_vrr->btr.frames_to_insert = 0;
in_out_vrr->btr.frame_counter = 0;
+ in_out_vrr->fixed.fixed_active = false;
+ in_out_vrr->fixed.target_refresh_in_uhz = 0;
+
in_out_vrr->btr.mid_point_in_us =
- in_out_vrr->min_duration_in_us +
- (in_out_vrr->max_duration_in_us -
- in_out_vrr->min_duration_in_us) / 2;
+ (in_out_vrr->min_duration_in_us +
+ in_out_vrr->max_duration_in_us) / 2;
if (in_out_vrr->state == VRR_STATE_UNSUPPORTED) {
in_out_vrr->adjust.v_total_min = stream->timing.v_total;
@@ -973,6 +834,7 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
in_out_vrr->adjust.v_total_max = stream->timing.v_total;
} else if (in_out_vrr->state == VRR_STATE_ACTIVE_VARIABLE &&
refresh_range >= MIN_REFRESH_RANGE_IN_US) {
+
in_out_vrr->adjust.v_total_min =
calc_v_total_from_refresh(stream,
in_out_vrr->max_refresh_in_uhz);
@@ -1130,13 +992,9 @@ void mod_freesync_get_settings(struct mod_freesync *mod_freesync,
unsigned int *inserted_frames,
unsigned int *inserted_duration_in_us)
{
- struct core_freesync *core_freesync = NULL;
-
if (mod_freesync == NULL)
return;
- core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
-
if (vrr->supported) {
*v_total_min = vrr->adjust.v_total_min;
*v_total_max = vrr->adjust.v_total_max;
@@ -1151,14 +1009,13 @@ unsigned long long mod_freesync_calc_nominal_field_rate(
const struct dc_stream_state *stream)
{
unsigned long long nominal_field_rate_in_uhz = 0;
+ unsigned int total = stream->timing.h_total * stream->timing.v_total;
- /* Calculate nominal field rate for stream */
+ /* Calculate nominal field rate for stream, rounded up to nearest integer */
nominal_field_rate_in_uhz = stream->timing.pix_clk_100hz / 10;
nominal_field_rate_in_uhz *= 1000ULL * 1000ULL * 1000ULL;
- nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz,
- stream->timing.h_total);
- nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz,
- stream->timing.v_total);
+
+ nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz, total);
return nominal_field_rate_in_uhz;
}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/Makefile b/drivers/gpu/drm/amd/display/modules/hdcp/Makefile
new file mode 100644
index 000000000000..904424da01b5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/Makefile
@@ -0,0 +1,33 @@
+#
+# Copyright 2019 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
+# Makefile for the 'hdcp' sub-module of DAL.
+#
+
+HDCP = hdcp_ddc.o hdcp_log.o hdcp_psp.o hdcp.o \
+ hdcp1_execution.o hdcp1_transition.o \
+ hdcp2_execution.o hdcp2_transition.o
+
+AMD_DAL_HDCP = $(addprefix $(AMDDALPATH)/modules/hdcp/,$(HDCP))
+#$(info ************ DAL-HDCP_MAKEFILE ************)
+
+AMD_DISPLAY_FILES += $(AMD_DAL_HDCP)
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
new file mode 100644
index 000000000000..8aa528e874c4
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
@@ -0,0 +1,509 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "hdcp.h"
+
+static void push_error_status(struct mod_hdcp *hdcp,
+ enum mod_hdcp_status status)
+{
+ struct mod_hdcp_trace *trace = &hdcp->connection.trace;
+
+ if (trace->error_count < MAX_NUM_OF_ERROR_TRACE) {
+ trace->errors[trace->error_count].status = status;
+ trace->errors[trace->error_count].state_id = hdcp->state.id;
+ trace->error_count++;
+ HDCP_ERROR_TRACE(hdcp, status);
+ }
+
+ if (is_hdcp1(hdcp)) {
+ hdcp->connection.hdcp1_retry_count++;
+ } else if (is_hdcp2(hdcp)) {
+ hdcp->connection.hdcp2_retry_count++;
+ }
+}
+
+static uint8_t is_cp_desired_hdcp1(struct mod_hdcp *hdcp)
+{
+ int i, is_auth_needed = 0;
+
+ /* if all displays on the link don't need authentication,
+ * hdcp is not desired
+ */
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
+ if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_INACTIVE &&
+ !hdcp->connection.displays[i].adjust.disable) {
+ is_auth_needed = 1;
+ break;
+ }
+ }
+
+ return (hdcp->connection.hdcp1_retry_count < MAX_NUM_OF_ATTEMPTS) &&
+ is_auth_needed &&
+ !hdcp->connection.link.adjust.hdcp1.disable;
+}
+
+static uint8_t is_cp_desired_hdcp2(struct mod_hdcp *hdcp)
+{
+ int i, is_auth_needed = 0;
+
+ /* if all displays on the link don't need authentication,
+ * hdcp is not desired
+ */
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
+ if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_INACTIVE &&
+ !hdcp->connection.displays[i].adjust.disable) {
+ is_auth_needed = 1;
+ break;
+ }
+ }
+
+ return (hdcp->connection.hdcp2_retry_count < MAX_NUM_OF_ATTEMPTS) &&
+ is_auth_needed &&
+ !hdcp->connection.link.adjust.hdcp2.disable &&
+ !hdcp->connection.is_hdcp2_revoked;
+}
+
+static enum mod_hdcp_status execution(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ union mod_hdcp_transition_input *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (is_in_initialized_state(hdcp)) {
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+ /* initialize transition input */
+ memset(input, 0, sizeof(union mod_hdcp_transition_input));
+ } else if (is_in_cp_not_desired_state(hdcp)) {
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+ /* update topology event if hdcp is not desired */
+ status = mod_hdcp_add_display_topology(hdcp);
+ } else if (is_in_hdcp1_states(hdcp)) {
+ status = mod_hdcp_hdcp1_execution(hdcp, event_ctx, &input->hdcp1);
+ } else if (is_in_hdcp1_dp_states(hdcp)) {
+ status = mod_hdcp_hdcp1_dp_execution(hdcp,
+ event_ctx, &input->hdcp1);
+ } else if (is_in_hdcp2_states(hdcp)) {
+ status = mod_hdcp_hdcp2_execution(hdcp, event_ctx, &input->hdcp2);
+ } else if (is_in_hdcp2_dp_states(hdcp)) {
+ status = mod_hdcp_hdcp2_dp_execution(hdcp,
+ event_ctx, &input->hdcp2);
+ }
+out:
+ return status;
+}
+
+static enum mod_hdcp_status transition(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ union mod_hdcp_transition_input *input,
+ struct mod_hdcp_output *output)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->unexpected_event)
+ goto out;
+
+ if (is_in_initialized_state(hdcp)) {
+ if (is_dp_hdcp(hdcp))
+ if (is_cp_desired_hdcp2(hdcp)) {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, D2_A0_DETERMINE_RX_HDCP_CAPABLE);
+ } else if (is_cp_desired_hdcp1(hdcp)) {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, D1_A0_DETERMINE_RX_HDCP_CAPABLE);
+ } else {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, HDCP_CP_NOT_DESIRED);
+ }
+ else if (is_hdmi_dvi_sl_hdcp(hdcp))
+ if (is_cp_desired_hdcp2(hdcp)) {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H2_A0_KNOWN_HDCP2_CAPABLE_RX);
+ } else if (is_cp_desired_hdcp1(hdcp)) {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H1_A0_WAIT_FOR_ACTIVE_RX);
+ } else {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, HDCP_CP_NOT_DESIRED);
+ }
+ else {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, HDCP_CP_NOT_DESIRED);
+ }
+ } else if (is_in_cp_not_desired_state(hdcp)) {
+ increment_stay_counter(hdcp);
+ } else if (is_in_hdcp1_states(hdcp)) {
+ status = mod_hdcp_hdcp1_transition(hdcp,
+ event_ctx, &input->hdcp1, output);
+ } else if (is_in_hdcp1_dp_states(hdcp)) {
+ status = mod_hdcp_hdcp1_dp_transition(hdcp,
+ event_ctx, &input->hdcp1, output);
+ } else if (is_in_hdcp2_states(hdcp)) {
+ status = mod_hdcp_hdcp2_transition(hdcp,
+ event_ctx, &input->hdcp2, output);
+ } else if (is_in_hdcp2_dp_states(hdcp)) {
+ status = mod_hdcp_hdcp2_dp_transition(hdcp,
+ event_ctx, &input->hdcp2, output);
+ } else {
+ status = MOD_HDCP_STATUS_INVALID_STATE;
+ }
+out:
+ return status;
+}
+
+static enum mod_hdcp_status reset_authentication(struct mod_hdcp *hdcp,
+ struct mod_hdcp_output *output)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (is_hdcp1(hdcp)) {
+ if (hdcp->auth.trans_input.hdcp1.create_session != UNKNOWN) {
+ /* TODO - update psp to unify create session failure
+ * recovery between hdcp1 and 2.
+ */
+ mod_hdcp_hdcp1_destroy_session(hdcp);
+
+ }
+ if (hdcp->auth.trans_input.hdcp1.add_topology == PASS) {
+ status = mod_hdcp_remove_display_topology(hdcp);
+ if (status != MOD_HDCP_STATUS_SUCCESS) {
+ output->callback_needed = 0;
+ output->watchdog_timer_needed = 0;
+ goto out;
+ }
+ }
+ HDCP_TOP_RESET_AUTH_TRACE(hdcp);
+ memset(&hdcp->auth, 0, sizeof(struct mod_hdcp_authentication));
+ memset(&hdcp->state, 0, sizeof(struct mod_hdcp_state));
+ set_state_id(hdcp, output, HDCP_INITIALIZED);
+ } else if (is_hdcp2(hdcp)) {
+ if (hdcp->auth.trans_input.hdcp2.create_session == PASS) {
+ status = mod_hdcp_hdcp2_destroy_session(hdcp);
+ if (status != MOD_HDCP_STATUS_SUCCESS) {
+ output->callback_needed = 0;
+ output->watchdog_timer_needed = 0;
+ goto out;
+ }
+ }
+ if (hdcp->auth.trans_input.hdcp2.add_topology == PASS) {
+ status = mod_hdcp_remove_display_topology(hdcp);
+ if (status != MOD_HDCP_STATUS_SUCCESS) {
+ output->callback_needed = 0;
+ output->watchdog_timer_needed = 0;
+ goto out;
+ }
+ }
+ HDCP_TOP_RESET_AUTH_TRACE(hdcp);
+ memset(&hdcp->auth, 0, sizeof(struct mod_hdcp_authentication));
+ memset(&hdcp->state, 0, sizeof(struct mod_hdcp_state));
+ set_state_id(hdcp, output, HDCP_INITIALIZED);
+ } else if (is_in_cp_not_desired_state(hdcp)) {
+ status = mod_hdcp_remove_display_topology(hdcp);
+ if (status != MOD_HDCP_STATUS_SUCCESS) {
+ output->callback_needed = 0;
+ output->watchdog_timer_needed = 0;
+ goto out;
+ }
+ HDCP_TOP_RESET_AUTH_TRACE(hdcp);
+ memset(&hdcp->auth, 0, sizeof(struct mod_hdcp_authentication));
+ memset(&hdcp->state, 0, sizeof(struct mod_hdcp_state));
+ set_state_id(hdcp, output, HDCP_INITIALIZED);
+ }
+
+out:
+ /* stop callback and watchdog requests from previous authentication*/
+ output->watchdog_timer_stop = 1;
+ output->callback_stop = 1;
+ return status;
+}
+
+static enum mod_hdcp_status reset_connection(struct mod_hdcp *hdcp,
+ struct mod_hdcp_output *output)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ memset(output, 0, sizeof(struct mod_hdcp_output));
+
+ status = reset_authentication(hdcp, output);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ goto out;
+
+ if (current_state(hdcp) != HDCP_UNINITIALIZED) {
+ HDCP_TOP_RESET_CONN_TRACE(hdcp);
+ set_state_id(hdcp, output, HDCP_UNINITIALIZED);
+ }
+ memset(&hdcp->connection, 0, sizeof(hdcp->connection));
+out:
+ return status;
+}
+
+/*
+ * Implementation of functions in mod_hdcp.h
+ */
+size_t mod_hdcp_get_memory_size(void)
+{
+ return sizeof(struct mod_hdcp);
+}
+
+enum mod_hdcp_status mod_hdcp_setup(struct mod_hdcp *hdcp,
+ struct mod_hdcp_config *config)
+{
+ struct mod_hdcp_output output;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ memset(hdcp, 0, sizeof(struct mod_hdcp));
+ memset(&output, 0, sizeof(output));
+ hdcp->config = *config;
+ HDCP_TOP_INTERFACE_TRACE(hdcp);
+ status = reset_connection(hdcp, &output);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ push_error_status(hdcp, status);
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_teardown(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ struct mod_hdcp_output output;
+
+ HDCP_TOP_INTERFACE_TRACE(hdcp);
+ memset(&output, 0, sizeof(output));
+ status = reset_connection(hdcp, &output);
+ if (status == MOD_HDCP_STATUS_SUCCESS)
+ memset(hdcp, 0, sizeof(struct mod_hdcp));
+ else
+ push_error_status(hdcp, status);
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_add_display(struct mod_hdcp *hdcp,
+ struct mod_hdcp_link *link, struct mod_hdcp_display *display,
+ struct mod_hdcp_output *output)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ struct mod_hdcp_display *display_container = NULL;
+
+ HDCP_TOP_INTERFACE_TRACE_WITH_INDEX(hdcp, display->index);
+ memset(output, 0, sizeof(struct mod_hdcp_output));
+
+ /* skip inactive display */
+ if (display->state != MOD_HDCP_DISPLAY_ACTIVE) {
+ status = MOD_HDCP_STATUS_SUCCESS;
+ goto out;
+ }
+
+ /* check existing display container */
+ if (get_active_display_at_index(hdcp, display->index)) {
+ status = MOD_HDCP_STATUS_SUCCESS;
+ goto out;
+ }
+
+ /* find an empty display container */
+ display_container = get_empty_display_container(hdcp);
+ if (!display_container) {
+ status = MOD_HDCP_STATUS_DISPLAY_OUT_OF_BOUND;
+ goto out;
+ }
+
+ /* reset existing authentication status */
+ status = reset_authentication(hdcp, output);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ goto out;
+
+ /* add display to connection */
+ hdcp->connection.link = *link;
+ *display_container = *display;
+
+ /* reset retry counters */
+ reset_retry_counts(hdcp);
+
+ /* reset error trace */
+ memset(&hdcp->connection.trace, 0, sizeof(hdcp->connection.trace));
+
+ /* request authentication */
+ if (current_state(hdcp) != HDCP_INITIALIZED)
+ set_state_id(hdcp, output, HDCP_INITIALIZED);
+ callback_in_ms(hdcp->connection.link.adjust.auth_delay * 1000, output);
+out:
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ push_error_status(hdcp, status);
+
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_remove_display(struct mod_hdcp *hdcp,
+ uint8_t index, struct mod_hdcp_output *output)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ struct mod_hdcp_display *display = NULL;
+
+ HDCP_TOP_INTERFACE_TRACE_WITH_INDEX(hdcp, index);
+ memset(output, 0, sizeof(struct mod_hdcp_output));
+
+ /* find display in connection */
+ display = get_active_display_at_index(hdcp, index);
+ if (!display) {
+ status = MOD_HDCP_STATUS_SUCCESS;
+ goto out;
+ }
+
+ /* stop current authentication */
+ status = reset_authentication(hdcp, output);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ goto out;
+
+ /* remove display */
+ display->state = MOD_HDCP_DISPLAY_INACTIVE;
+
+ /* clear retry counters */
+ reset_retry_counts(hdcp);
+
+ /* reset error trace */
+ memset(&hdcp->connection.trace, 0, sizeof(hdcp->connection.trace));
+
+ /* request authentication for remaining displays*/
+ if (get_active_display_count(hdcp) > 0)
+ callback_in_ms(hdcp->connection.link.adjust.auth_delay * 1000,
+ output);
+out:
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ push_error_status(hdcp, status);
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_query_display(struct mod_hdcp *hdcp,
+ uint8_t index, struct mod_hdcp_display_query *query)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ struct mod_hdcp_display *display = NULL;
+
+ /* find display in connection */
+ display = get_active_display_at_index(hdcp, index);
+ if (!display) {
+ status = MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
+ goto out;
+ }
+
+ /* populate query */
+ query->link = &hdcp->connection.link;
+ query->display = display;
+ query->trace = &hdcp->connection.trace;
+ query->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
+
+ if (is_display_encryption_enabled(display)) {
+ if (is_hdcp1(hdcp)) {
+ query->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP1_ON;
+ } else if (is_hdcp2(hdcp)) {
+ if (query->link->adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_0)
+ query->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON;
+ else if (query->link->adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_1)
+ query->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON;
+ else
+ query->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP2_ON;
+ }
+ } else {
+ query->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
+ }
+
+out:
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_reset_connection(struct mod_hdcp *hdcp,
+ struct mod_hdcp_output *output)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ HDCP_TOP_INTERFACE_TRACE(hdcp);
+ status = reset_connection(hdcp, output);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ push_error_status(hdcp, status);
+
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_process_event(struct mod_hdcp *hdcp,
+ enum mod_hdcp_event event, struct mod_hdcp_output *output)
+{
+ enum mod_hdcp_status exec_status, trans_status, reset_status, status;
+ struct mod_hdcp_event_context event_ctx;
+
+ HDCP_EVENT_TRACE(hdcp, event);
+ memset(output, 0, sizeof(struct mod_hdcp_output));
+ memset(&event_ctx, 0, sizeof(struct mod_hdcp_event_context));
+ event_ctx.event = event;
+
+ /* execute and transition */
+ exec_status = execution(hdcp, &event_ctx, &hdcp->auth.trans_input);
+ trans_status = transition(
+ hdcp, &event_ctx, &hdcp->auth.trans_input, output);
+ if (trans_status == MOD_HDCP_STATUS_SUCCESS) {
+ status = MOD_HDCP_STATUS_SUCCESS;
+ } else if (exec_status == MOD_HDCP_STATUS_SUCCESS) {
+ status = MOD_HDCP_STATUS_INTERNAL_POLICY_FAILURE;
+ push_error_status(hdcp, status);
+ } else {
+ status = exec_status;
+ push_error_status(hdcp, status);
+ }
+
+ /* reset authentication if needed */
+ if (trans_status == MOD_HDCP_STATUS_RESET_NEEDED) {
+ HDCP_FULL_DDC_TRACE(hdcp);
+ reset_status = reset_authentication(hdcp, output);
+ if (reset_status != MOD_HDCP_STATUS_SUCCESS)
+ push_error_status(hdcp, reset_status);
+ }
+ return status;
+}
+
+enum mod_hdcp_operation_mode mod_hdcp_signal_type_to_operation_mode(
+ enum signal_type signal)
+{
+ enum mod_hdcp_operation_mode mode = MOD_HDCP_MODE_OFF;
+
+ switch (signal) {
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ mode = MOD_HDCP_MODE_DEFAULT;
+ break;
+ case SIGNAL_TYPE_EDP:
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ mode = MOD_HDCP_MODE_DP;
+ break;
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ mode = MOD_HDCP_MODE_DP_MST;
+ break;
+ default:
+ break;
+ }
+
+ return mode;
+}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
new file mode 100644
index 000000000000..af78e4f1be68
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
@@ -0,0 +1,587 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef HDCP_H_
+#define HDCP_H_
+
+#include "mod_hdcp.h"
+#include "hdcp_log.h"
+
+#include <drm/drm_hdcp.h>
+#include <drm/drm_dp_helper.h>
+
+enum mod_hdcp_trans_input_result {
+ UNKNOWN = 0,
+ PASS,
+ FAIL
+};
+
+struct mod_hdcp_transition_input_hdcp1 {
+ uint8_t bksv_read;
+ uint8_t bksv_validation;
+ uint8_t add_topology;
+ uint8_t create_session;
+ uint8_t an_write;
+ uint8_t aksv_write;
+ uint8_t ainfo_write;
+ uint8_t bcaps_read;
+ uint8_t r0p_read;
+ uint8_t rx_validation;
+ uint8_t encryption;
+ uint8_t link_maintenance;
+ uint8_t ready_check;
+ uint8_t bstatus_read;
+ uint8_t max_cascade_check;
+ uint8_t max_devs_check;
+ uint8_t device_count_check;
+ uint8_t ksvlist_read;
+ uint8_t vp_read;
+ uint8_t ksvlist_vp_validation;
+
+ uint8_t hdcp_capable_dp;
+ uint8_t binfo_read_dp;
+ uint8_t r0p_available_dp;
+ uint8_t link_integrity_check;
+ uint8_t reauth_request_check;
+ uint8_t stream_encryption_dp;
+};
+
+struct mod_hdcp_transition_input_hdcp2 {
+ uint8_t hdcp2version_read;
+ uint8_t hdcp2_capable_check;
+ uint8_t add_topology;
+ uint8_t create_session;
+ uint8_t ake_init_prepare;
+ uint8_t ake_init_write;
+ uint8_t rxstatus_read;
+ uint8_t ake_cert_available;
+ uint8_t ake_cert_read;
+ uint8_t ake_cert_validation;
+ uint8_t stored_km_write;
+ uint8_t no_stored_km_write;
+ uint8_t h_prime_available;
+ uint8_t h_prime_read;
+ uint8_t pairing_available;
+ uint8_t pairing_info_read;
+ uint8_t h_prime_validation;
+ uint8_t lc_init_prepare;
+ uint8_t lc_init_write;
+ uint8_t l_prime_available_poll;
+ uint8_t l_prime_read;
+ uint8_t l_prime_validation;
+ uint8_t eks_prepare;
+ uint8_t eks_write;
+ uint8_t enable_encryption;
+ uint8_t reauth_request_check;
+ uint8_t rx_id_list_read;
+ uint8_t device_count_check;
+ uint8_t rx_id_list_validation;
+ uint8_t repeater_auth_ack_write;
+ uint8_t prepare_stream_manage;
+ uint8_t stream_manage_write;
+ uint8_t stream_ready_available;
+ uint8_t stream_ready_read;
+ uint8_t stream_ready_validation;
+
+ uint8_t rx_caps_read_dp;
+ uint8_t content_stream_type_write;
+ uint8_t link_integrity_check_dp;
+ uint8_t stream_encryption_dp;
+};
+
+union mod_hdcp_transition_input {
+ struct mod_hdcp_transition_input_hdcp1 hdcp1;
+ struct mod_hdcp_transition_input_hdcp2 hdcp2;
+};
+
+struct mod_hdcp_message_hdcp1 {
+ uint8_t an[8];
+ uint8_t aksv[5];
+ uint8_t ainfo;
+ uint8_t bksv[5];
+ uint16_t r0p;
+ uint8_t bcaps;
+ uint16_t bstatus;
+ uint8_t ksvlist[635];
+ uint16_t ksvlist_size;
+ uint8_t vp[20];
+
+ uint16_t binfo_dp;
+};
+
+struct mod_hdcp_message_hdcp2 {
+ uint8_t hdcp2version_hdmi;
+ uint8_t rxcaps_dp[3];
+ uint8_t rxstatus[2];
+
+ uint8_t ake_init[12];
+ uint8_t ake_cert[534];
+ uint8_t ake_no_stored_km[129];
+ uint8_t ake_stored_km[33];
+ uint8_t ake_h_prime[33];
+ uint8_t ake_pairing_info[17];
+ uint8_t lc_init[9];
+ uint8_t lc_l_prime[33];
+ uint8_t ske_eks[25];
+ uint8_t rx_id_list[177]; // 22 + 5 * 31
+ uint16_t rx_id_list_size;
+ uint8_t repeater_auth_ack[17];
+ uint8_t repeater_auth_stream_manage[68]; // 6 + 2 * 31
+ uint16_t stream_manage_size;
+ uint8_t repeater_auth_stream_ready[33];
+ uint8_t rxstatus_dp;
+ uint8_t content_stream_type_dp[2];
+};
+
+union mod_hdcp_message {
+ struct mod_hdcp_message_hdcp1 hdcp1;
+ struct mod_hdcp_message_hdcp2 hdcp2;
+};
+
+struct mod_hdcp_auth_counters {
+ uint8_t stream_management_retry_count;
+};
+
+/* contains values per connection */
+struct mod_hdcp_connection {
+ struct mod_hdcp_link link;
+ struct mod_hdcp_display displays[MAX_NUM_OF_DISPLAYS];
+ uint8_t is_repeater;
+ uint8_t is_km_stored;
+ uint8_t is_hdcp2_revoked;
+ struct mod_hdcp_trace trace;
+ uint8_t hdcp1_retry_count;
+ uint8_t hdcp2_retry_count;
+};
+
+/* contains values per authentication cycle */
+struct mod_hdcp_authentication {
+ uint32_t id;
+ union mod_hdcp_message msg;
+ union mod_hdcp_transition_input trans_input;
+ struct mod_hdcp_auth_counters count;
+};
+
+/* contains values per state change */
+struct mod_hdcp_state {
+ uint8_t id;
+ uint32_t stay_count;
+};
+
+/* per event in a state */
+struct mod_hdcp_event_context {
+ enum mod_hdcp_event event;
+ uint8_t rx_id_list_ready;
+ uint8_t unexpected_event;
+};
+
+struct mod_hdcp {
+ /* per link */
+ struct mod_hdcp_config config;
+ /* per connection */
+ struct mod_hdcp_connection connection;
+ /* per authentication attempt */
+ struct mod_hdcp_authentication auth;
+ /* per state in an authentication */
+ struct mod_hdcp_state state;
+ /* reserved memory buffer */
+ uint8_t buf[2025];
+};
+
+enum mod_hdcp_initial_state_id {
+ HDCP_UNINITIALIZED = 0x0,
+ HDCP_INITIAL_STATE_START = HDCP_UNINITIALIZED,
+ HDCP_INITIALIZED,
+ HDCP_CP_NOT_DESIRED,
+ HDCP_INITIAL_STATE_END = HDCP_CP_NOT_DESIRED
+};
+
+enum mod_hdcp_hdcp1_state_id {
+ HDCP1_STATE_START = HDCP_INITIAL_STATE_END,
+ H1_A0_WAIT_FOR_ACTIVE_RX,
+ H1_A1_EXCHANGE_KSVS,
+ H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER,
+ H1_A45_AUTHENTICATED,
+ H1_A8_WAIT_FOR_READY,
+ H1_A9_READ_KSV_LIST,
+ HDCP1_STATE_END = H1_A9_READ_KSV_LIST
+};
+
+enum mod_hdcp_hdcp1_dp_state_id {
+ HDCP1_DP_STATE_START = HDCP1_STATE_END,
+ D1_A0_DETERMINE_RX_HDCP_CAPABLE,
+ D1_A1_EXCHANGE_KSVS,
+ D1_A23_WAIT_FOR_R0_PRIME,
+ D1_A2_COMPUTATIONS_A3_VALIDATE_RX_A5_TEST_FOR_REPEATER,
+ D1_A4_AUTHENTICATED,
+ D1_A6_WAIT_FOR_READY,
+ D1_A7_READ_KSV_LIST,
+ HDCP1_DP_STATE_END = D1_A7_READ_KSV_LIST,
+};
+
+enum mod_hdcp_hdcp2_state_id {
+ HDCP2_STATE_START = HDCP1_DP_STATE_END,
+ H2_A0_KNOWN_HDCP2_CAPABLE_RX,
+ H2_A1_SEND_AKE_INIT,
+ H2_A1_VALIDATE_AKE_CERT,
+ H2_A1_SEND_NO_STORED_KM,
+ H2_A1_READ_H_PRIME,
+ H2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME,
+ H2_A1_SEND_STORED_KM,
+ H2_A1_VALIDATE_H_PRIME,
+ H2_A2_LOCALITY_CHECK,
+ H2_A3_EXCHANGE_KS_AND_TEST_FOR_REPEATER,
+ H2_ENABLE_ENCRYPTION,
+ H2_A5_AUTHENTICATED,
+ H2_A6_WAIT_FOR_RX_ID_LIST,
+ H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK,
+ H2_A9_SEND_STREAM_MANAGEMENT,
+ H2_A9_VALIDATE_STREAM_READY,
+ HDCP2_STATE_END = H2_A9_VALIDATE_STREAM_READY,
+};
+
+enum mod_hdcp_hdcp2_dp_state_id {
+ HDCP2_DP_STATE_START = HDCP2_STATE_END,
+ D2_A0_DETERMINE_RX_HDCP_CAPABLE,
+ D2_A1_SEND_AKE_INIT,
+ D2_A1_VALIDATE_AKE_CERT,
+ D2_A1_SEND_NO_STORED_KM,
+ D2_A1_READ_H_PRIME,
+ D2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME,
+ D2_A1_SEND_STORED_KM,
+ D2_A1_VALIDATE_H_PRIME,
+ D2_A2_LOCALITY_CHECK,
+ D2_A34_EXCHANGE_KS_AND_TEST_FOR_REPEATER,
+ D2_SEND_CONTENT_STREAM_TYPE,
+ D2_ENABLE_ENCRYPTION,
+ D2_A5_AUTHENTICATED,
+ D2_A6_WAIT_FOR_RX_ID_LIST,
+ D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK,
+ D2_A9_SEND_STREAM_MANAGEMENT,
+ D2_A9_VALIDATE_STREAM_READY,
+ HDCP2_DP_STATE_END = D2_A9_VALIDATE_STREAM_READY,
+ HDCP_STATE_END = HDCP2_DP_STATE_END,
+};
+
+/* hdcp1 executions and transitions */
+typedef enum mod_hdcp_status (*mod_hdcp_action)(struct mod_hdcp *hdcp);
+uint8_t mod_hdcp_execute_and_set(
+ mod_hdcp_action func, uint8_t *flag,
+ enum mod_hdcp_status *status, struct mod_hdcp *hdcp, char *str);
+enum mod_hdcp_status mod_hdcp_hdcp1_execution(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input);
+enum mod_hdcp_status mod_hdcp_hdcp1_dp_execution(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input);
+enum mod_hdcp_status mod_hdcp_hdcp1_transition(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input,
+ struct mod_hdcp_output *output);
+enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input,
+ struct mod_hdcp_output *output);
+
+/* hdcp2 executions and transitions */
+enum mod_hdcp_status mod_hdcp_hdcp2_execution(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp2 *input);
+enum mod_hdcp_status mod_hdcp_hdcp2_dp_execution(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp2 *input);
+enum mod_hdcp_status mod_hdcp_hdcp2_transition(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp2 *input,
+ struct mod_hdcp_output *output);
+enum mod_hdcp_status mod_hdcp_hdcp2_dp_transition(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp2 *input,
+ struct mod_hdcp_output *output);
+
+/* log functions */
+void mod_hdcp_dump_binary_message(uint8_t *msg, uint32_t msg_size,
+ uint8_t *buf, uint32_t buf_size);
+/* TODO: add adjustment log */
+
+/* psp functions */
+enum mod_hdcp_status mod_hdcp_add_display_topology(
+ struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_remove_display_topology(
+ struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp1_validate_rx(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp1_enable_encryption(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp1_validate_ksvlist_vp(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(
+ struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp1_link_maintenance(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp1_get_link_encryption_status(struct mod_hdcp *hdcp,
+ enum mod_hdcp_encryption_status *encryption_status);
+enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp2_destroy_session(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp2_prepare_ake_init(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp2_validate_ake_cert(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp2_validate_h_prime(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp2_prepare_lc_init(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp2_validate_l_prime(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp2_prepare_eks(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp2_enable_encryption(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp2_validate_rx_id_list(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(
+ struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp2_prepare_stream_management(
+ struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp2_validate_stream_ready(
+ struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp2_get_link_encryption_status(struct mod_hdcp *hdcp,
+ enum mod_hdcp_encryption_status *encryption_status);
+
+/* ddc functions */
+enum mod_hdcp_status mod_hdcp_read_bksv(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_bcaps(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_bstatus(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_r0p(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_ksvlist(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_vp(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_binfo(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_write_aksv(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_write_ainfo(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_write_an(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_hdcp2version(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_rxcaps(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_rxstatus(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_ake_cert(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_h_prime(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_pairing_info(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_l_prime(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_rx_id_list(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_stream_ready(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_write_ake_init(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_write_no_stored_km(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_write_stored_km(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_write_lc_init(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_write_eks(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_write_repeater_auth_ack(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_write_stream_manage(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_write_content_type(struct mod_hdcp *hdcp);
+
+/* hdcp version helpers */
+static inline uint8_t is_dp_hdcp(struct mod_hdcp *hdcp)
+{
+ return (hdcp->connection.link.mode == MOD_HDCP_MODE_DP ||
+ hdcp->connection.link.mode == MOD_HDCP_MODE_DP_MST);
+}
+
+static inline uint8_t is_dp_mst_hdcp(struct mod_hdcp *hdcp)
+{
+ return (hdcp->connection.link.mode == MOD_HDCP_MODE_DP_MST);
+}
+
+static inline uint8_t is_hdmi_dvi_sl_hdcp(struct mod_hdcp *hdcp)
+{
+ return (hdcp->connection.link.mode == MOD_HDCP_MODE_DEFAULT);
+}
+
+/* hdcp state helpers */
+static inline uint8_t current_state(struct mod_hdcp *hdcp)
+{
+ return hdcp->state.id;
+}
+
+static inline void set_state_id(struct mod_hdcp *hdcp,
+ struct mod_hdcp_output *output, uint8_t id)
+{
+ memset(&hdcp->state, 0, sizeof(hdcp->state));
+ hdcp->state.id = id;
+ /* callback timer should be reset per state */
+ output->callback_stop = 1;
+ output->watchdog_timer_stop = 1;
+ HDCP_NEXT_STATE_TRACE(hdcp, id, output);
+}
+
+static inline uint8_t is_in_hdcp1_states(struct mod_hdcp *hdcp)
+{
+ return (current_state(hdcp) > HDCP1_STATE_START &&
+ current_state(hdcp) <= HDCP1_STATE_END);
+}
+
+static inline uint8_t is_in_hdcp1_dp_states(struct mod_hdcp *hdcp)
+{
+ return (current_state(hdcp) > HDCP1_DP_STATE_START &&
+ current_state(hdcp) <= HDCP1_DP_STATE_END);
+}
+
+static inline uint8_t is_in_hdcp2_states(struct mod_hdcp *hdcp)
+{
+ return (current_state(hdcp) > HDCP2_STATE_START &&
+ current_state(hdcp) <= HDCP2_STATE_END);
+}
+
+static inline uint8_t is_in_hdcp2_dp_states(struct mod_hdcp *hdcp)
+{
+ return (current_state(hdcp) > HDCP2_DP_STATE_START &&
+ current_state(hdcp) <= HDCP2_DP_STATE_END);
+}
+
+static inline uint8_t is_hdcp1(struct mod_hdcp *hdcp)
+{
+ return (is_in_hdcp1_states(hdcp) || is_in_hdcp1_dp_states(hdcp));
+}
+
+static inline uint8_t is_hdcp2(struct mod_hdcp *hdcp)
+{
+ return (is_in_hdcp2_states(hdcp) || is_in_hdcp2_dp_states(hdcp));
+}
+
+static inline uint8_t is_in_cp_not_desired_state(struct mod_hdcp *hdcp)
+{
+ return current_state(hdcp) == HDCP_CP_NOT_DESIRED;
+}
+
+static inline uint8_t is_in_initialized_state(struct mod_hdcp *hdcp)
+{
+ return current_state(hdcp) == HDCP_INITIALIZED;
+}
+
+/* transition operation helpers */
+static inline void increment_stay_counter(struct mod_hdcp *hdcp)
+{
+ hdcp->state.stay_count++;
+}
+
+static inline void fail_and_restart_in_ms(uint16_t time,
+ enum mod_hdcp_status *status,
+ struct mod_hdcp_output *output)
+{
+ output->callback_needed = 1;
+ output->callback_delay = time;
+ output->watchdog_timer_needed = 0;
+ output->watchdog_timer_delay = 0;
+ *status = MOD_HDCP_STATUS_RESET_NEEDED;
+}
+
+static inline void callback_in_ms(uint16_t time, struct mod_hdcp_output *output)
+{
+ output->callback_needed = 1;
+ output->callback_delay = time;
+}
+
+static inline void set_watchdog_in_ms(struct mod_hdcp *hdcp, uint16_t time,
+ struct mod_hdcp_output *output)
+{
+ output->watchdog_timer_needed = 1;
+ output->watchdog_timer_delay = time;
+}
+
+/* connection topology helpers */
+static inline uint8_t is_display_active(struct mod_hdcp_display *display)
+{
+ return display->state >= MOD_HDCP_DISPLAY_ACTIVE;
+}
+
+static inline uint8_t is_display_added(struct mod_hdcp_display *display)
+{
+ return display->state >= MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED;
+}
+
+static inline uint8_t is_display_encryption_enabled(struct mod_hdcp_display *display)
+{
+ return display->state >= MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
+}
+
+static inline uint8_t get_active_display_count(struct mod_hdcp *hdcp)
+{
+ uint8_t added_count = 0;
+ uint8_t i;
+
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
+ if (is_display_active(&hdcp->connection.displays[i]))
+ added_count++;
+ return added_count;
+}
+
+static inline uint8_t get_added_display_count(struct mod_hdcp *hdcp)
+{
+ uint8_t added_count = 0;
+ uint8_t i;
+
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
+ if (is_display_added(&hdcp->connection.displays[i]))
+ added_count++;
+ return added_count;
+}
+
+static inline struct mod_hdcp_display *get_first_added_display(
+ struct mod_hdcp *hdcp)
+{
+ uint8_t i;
+ struct mod_hdcp_display *display = NULL;
+
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
+ if (is_display_added(&hdcp->connection.displays[i])) {
+ display = &hdcp->connection.displays[i];
+ break;
+ }
+ return display;
+}
+
+static inline struct mod_hdcp_display *get_active_display_at_index(
+ struct mod_hdcp *hdcp, uint8_t index)
+{
+ uint8_t i;
+ struct mod_hdcp_display *display = NULL;
+
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
+ if (hdcp->connection.displays[i].index == index &&
+ is_display_active(&hdcp->connection.displays[i])) {
+ display = &hdcp->connection.displays[i];
+ break;
+ }
+ return display;
+}
+
+static inline struct mod_hdcp_display *get_empty_display_container(
+ struct mod_hdcp *hdcp)
+{
+ uint8_t i;
+ struct mod_hdcp_display *display = NULL;
+
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
+ if (!is_display_active(&hdcp->connection.displays[i])) {
+ display = &hdcp->connection.displays[i];
+ break;
+ }
+ return display;
+}
+
+static inline void reset_retry_counts(struct mod_hdcp *hdcp)
+{
+ hdcp->connection.hdcp1_retry_count = 0;
+ hdcp->connection.hdcp2_retry_count = 0;
+}
+
+#endif /* HDCP_H_ */
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
new file mode 100644
index 000000000000..37670db64855
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
@@ -0,0 +1,529 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "hdcp.h"
+
+static inline enum mod_hdcp_status validate_bksv(struct mod_hdcp *hdcp)
+{
+ uint64_t n = 0;
+ uint8_t count = 0;
+
+ memcpy(&n, hdcp->auth.msg.hdcp1.bksv, sizeof(uint64_t));
+
+ while (n) {
+ count++;
+ n &= (n - 1);
+ }
+ return (count == 20) ? MOD_HDCP_STATUS_SUCCESS :
+ MOD_HDCP_STATUS_HDCP1_INVALID_BKSV;
+}
+
+static inline enum mod_hdcp_status check_ksv_ready(struct mod_hdcp *hdcp)
+{
+ if (is_dp_hdcp(hdcp))
+ return (hdcp->auth.msg.hdcp1.bstatus & DP_BSTATUS_READY) ?
+ MOD_HDCP_STATUS_SUCCESS :
+ MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY;
+ return (hdcp->auth.msg.hdcp1.bcaps & DRM_HDCP_DDC_BCAPS_KSV_FIFO_READY) ?
+ MOD_HDCP_STATUS_SUCCESS :
+ MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY;
+}
+
+static inline enum mod_hdcp_status check_hdcp_capable_dp(struct mod_hdcp *hdcp)
+{
+ return (hdcp->auth.msg.hdcp1.bcaps & DP_BCAPS_HDCP_CAPABLE) ?
+ MOD_HDCP_STATUS_SUCCESS :
+ MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE;
+}
+
+static inline enum mod_hdcp_status check_r0p_available_dp(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+ if (is_dp_hdcp(hdcp)) {
+ status = (hdcp->auth.msg.hdcp1.bstatus &
+ DP_BSTATUS_R0_PRIME_READY) ?
+ MOD_HDCP_STATUS_SUCCESS :
+ MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING;
+ } else {
+ status = MOD_HDCP_STATUS_INVALID_OPERATION;
+ }
+ return status;
+}
+
+static inline enum mod_hdcp_status check_link_integrity_dp(
+ struct mod_hdcp *hdcp)
+{
+ return (hdcp->auth.msg.hdcp1.bstatus &
+ DP_BSTATUS_LINK_FAILURE) ?
+ MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE :
+ MOD_HDCP_STATUS_SUCCESS;
+}
+
+static inline enum mod_hdcp_status check_no_reauthentication_request_dp(
+ struct mod_hdcp *hdcp)
+{
+ return (hdcp->auth.msg.hdcp1.bstatus & DP_BSTATUS_REAUTH_REQ) ?
+ MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED :
+ MOD_HDCP_STATUS_SUCCESS;
+}
+
+static inline enum mod_hdcp_status check_no_max_cascade(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ if (is_dp_hdcp(hdcp))
+ status = DRM_HDCP_MAX_CASCADE_EXCEEDED(hdcp->auth.msg.hdcp1.binfo_dp >> 8)
+ ? MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE
+ : MOD_HDCP_STATUS_SUCCESS;
+ else
+ status = DRM_HDCP_MAX_CASCADE_EXCEEDED(hdcp->auth.msg.hdcp1.bstatus >> 8)
+ ? MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE
+ : MOD_HDCP_STATUS_SUCCESS;
+ return status;
+}
+
+static inline enum mod_hdcp_status check_no_max_devs(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ if (is_dp_hdcp(hdcp))
+ status = DRM_HDCP_MAX_DEVICE_EXCEEDED(hdcp->auth.msg.hdcp1.binfo_dp) ?
+ MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE :
+ MOD_HDCP_STATUS_SUCCESS;
+ else
+ status = DRM_HDCP_MAX_DEVICE_EXCEEDED(hdcp->auth.msg.hdcp1.bstatus) ?
+ MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE :
+ MOD_HDCP_STATUS_SUCCESS;
+ return status;
+}
+
+static inline uint8_t get_device_count(struct mod_hdcp *hdcp)
+{
+ return is_dp_hdcp(hdcp) ?
+ DRM_HDCP_NUM_DOWNSTREAM(hdcp->auth.msg.hdcp1.binfo_dp) :
+ DRM_HDCP_NUM_DOWNSTREAM(hdcp->auth.msg.hdcp1.bstatus);
+}
+
+static inline enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp)
+{
+ /* device count must be greater than or equal to tracked hdcp displays */
+ return (get_device_count(hdcp) < get_added_display_count(hdcp)) ?
+ MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE :
+ MOD_HDCP_STATUS_SUCCESS;
+}
+
+static enum mod_hdcp_status wait_for_active_rx(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_bksv,
+ &input->bksv_read, &status,
+ hdcp, "bksv_read"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_bcaps,
+ &input->bcaps_read, &status,
+ hdcp, "bcaps_read"))
+ goto out;
+out:
+ return status;
+}
+
+static enum mod_hdcp_status exchange_ksvs(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (!mod_hdcp_execute_and_set(mod_hdcp_add_display_topology,
+ &input->add_topology, &status,
+ hdcp, "add_topology"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_create_session,
+ &input->create_session, &status,
+ hdcp, "create_session"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_write_an,
+ &input->an_write, &status,
+ hdcp, "an_write"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_write_aksv,
+ &input->aksv_write, &status,
+ hdcp, "aksv_write"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_bksv,
+ &input->bksv_read, &status,
+ hdcp, "bksv_read"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(validate_bksv,
+ &input->bksv_validation, &status,
+ hdcp, "bksv_validation"))
+ goto out;
+ if (hdcp->auth.msg.hdcp1.ainfo) {
+ if (!mod_hdcp_execute_and_set(mod_hdcp_write_ainfo,
+ &input->ainfo_write, &status,
+ hdcp, "ainfo_write"))
+ goto out;
+ }
+out:
+ return status;
+}
+
+static enum mod_hdcp_status computations_validate_rx_test_for_repeater(
+ struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_r0p,
+ &input->r0p_read, &status,
+ hdcp, "r0p_read"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_validate_rx,
+ &input->rx_validation, &status,
+ hdcp, "rx_validation"))
+ goto out;
+ if (hdcp->connection.is_repeater) {
+ if (!hdcp->connection.link.adjust.hdcp1.postpone_encryption)
+ if (!mod_hdcp_execute_and_set(
+ mod_hdcp_hdcp1_enable_encryption,
+ &input->encryption, &status,
+ hdcp, "encryption"))
+ goto out;
+ } else {
+ if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_enable_encryption,
+ &input->encryption, &status,
+ hdcp, "encryption"))
+ goto out;
+ if (is_dp_mst_hdcp(hdcp))
+ if (!mod_hdcp_execute_and_set(
+ mod_hdcp_hdcp1_enable_dp_stream_encryption,
+ &input->stream_encryption_dp, &status,
+ hdcp, "stream_encryption_dp"))
+ goto out;
+ }
+out:
+ return status;
+}
+
+static enum mod_hdcp_status authenticated(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_link_maintenance,
+ &input->link_maintenance, &status,
+ hdcp, "link_maintenance"))
+ goto out;
+out:
+ return status;
+}
+
+static enum mod_hdcp_status wait_for_ready(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK &&
+ event_ctx->event != MOD_HDCP_EVENT_CPIRQ &&
+ event_ctx->event != MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (is_dp_hdcp(hdcp)) {
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_bstatus,
+ &input->bstatus_read, &status,
+ hdcp, "bstatus_read"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(check_link_integrity_dp,
+ &input->link_integrity_check, &status,
+ hdcp, "link_integrity_check"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(check_no_reauthentication_request_dp,
+ &input->reauth_request_check, &status,
+ hdcp, "reauth_request_check"))
+ goto out;
+ } else {
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_bcaps,
+ &input->bcaps_read, &status,
+ hdcp, "bcaps_read"))
+ goto out;
+ }
+ if (!mod_hdcp_execute_and_set(check_ksv_ready,
+ &input->ready_check, &status,
+ hdcp, "ready_check"))
+ goto out;
+out:
+ return status;
+}
+
+static enum mod_hdcp_status read_ksv_list(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ uint8_t device_count;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (is_dp_hdcp(hdcp)) {
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_binfo,
+ &input->binfo_read_dp, &status,
+ hdcp, "binfo_read_dp"))
+ goto out;
+ } else {
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_bstatus,
+ &input->bstatus_read, &status,
+ hdcp, "bstatus_read"))
+ goto out;
+ }
+ if (!mod_hdcp_execute_and_set(check_no_max_cascade,
+ &input->max_cascade_check, &status,
+ hdcp, "max_cascade_check"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(check_no_max_devs,
+ &input->max_devs_check, &status,
+ hdcp, "max_devs_check"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(check_device_count,
+ &input->device_count_check, &status,
+ hdcp, "device_count_check"))
+ goto out;
+ device_count = get_device_count(hdcp);
+ hdcp->auth.msg.hdcp1.ksvlist_size = device_count*5;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_ksvlist,
+ &input->ksvlist_read, &status,
+ hdcp, "ksvlist_read"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_vp,
+ &input->vp_read, &status,
+ hdcp, "vp_read"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_validate_ksvlist_vp,
+ &input->ksvlist_vp_validation, &status,
+ hdcp, "ksvlist_vp_validation"))
+ goto out;
+ if (input->encryption != PASS)
+ if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_enable_encryption,
+ &input->encryption, &status,
+ hdcp, "encryption"))
+ goto out;
+ if (is_dp_mst_hdcp(hdcp))
+ if (!mod_hdcp_execute_and_set(
+ mod_hdcp_hdcp1_enable_dp_stream_encryption,
+ &input->stream_encryption_dp, &status,
+ hdcp, "stream_encryption_dp"))
+ goto out;
+out:
+ return status;
+}
+
+static enum mod_hdcp_status determine_rx_hdcp_capable_dp(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_bcaps,
+ &input->bcaps_read, &status,
+ hdcp, "bcaps_read"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(check_hdcp_capable_dp,
+ &input->hdcp_capable_dp, &status,
+ hdcp, "hdcp_capable_dp"))
+ goto out;
+out:
+ return status;
+}
+
+static enum mod_hdcp_status wait_for_r0_prime_dp(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CPIRQ &&
+ event_ctx->event != MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_bstatus,
+ &input->bstatus_read, &status,
+ hdcp, "bstatus_read"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(check_r0p_available_dp,
+ &input->r0p_available_dp, &status,
+ hdcp, "r0p_available_dp"))
+ goto out;
+out:
+ return status;
+}
+
+static enum mod_hdcp_status authenticated_dp(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CPIRQ) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_bstatus,
+ &input->bstatus_read, &status,
+ hdcp, "bstatus_read"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(check_link_integrity_dp,
+ &input->link_integrity_check, &status,
+ hdcp, "link_integrity_check"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(check_no_reauthentication_request_dp,
+ &input->reauth_request_check, &status,
+ hdcp, "reauth_request_check"))
+ goto out;
+out:
+ return status;
+}
+
+uint8_t mod_hdcp_execute_and_set(
+ mod_hdcp_action func, uint8_t *flag,
+ enum mod_hdcp_status *status, struct mod_hdcp *hdcp, char *str)
+{
+ *status = func(hdcp);
+ if (*status == MOD_HDCP_STATUS_SUCCESS && *flag != PASS) {
+ HDCP_INPUT_PASS_TRACE(hdcp, str);
+ *flag = PASS;
+ } else if (*status != MOD_HDCP_STATUS_SUCCESS && *flag != FAIL) {
+ HDCP_INPUT_FAIL_TRACE(hdcp, str);
+ *flag = FAIL;
+ }
+ return (*status == MOD_HDCP_STATUS_SUCCESS);
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp1_execution(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ switch (current_state(hdcp)) {
+ case H1_A0_WAIT_FOR_ACTIVE_RX:
+ status = wait_for_active_rx(hdcp, event_ctx, input);
+ break;
+ case H1_A1_EXCHANGE_KSVS:
+ status = exchange_ksvs(hdcp, event_ctx, input);
+ break;
+ case H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER:
+ status = computations_validate_rx_test_for_repeater(hdcp,
+ event_ctx, input);
+ break;
+ case H1_A45_AUTHENTICATED:
+ status = authenticated(hdcp, event_ctx, input);
+ break;
+ case H1_A8_WAIT_FOR_READY:
+ status = wait_for_ready(hdcp, event_ctx, input);
+ break;
+ case H1_A9_READ_KSV_LIST:
+ status = read_ksv_list(hdcp, event_ctx, input);
+ break;
+ default:
+ status = MOD_HDCP_STATUS_INVALID_STATE;
+ break;
+ }
+
+ return status;
+}
+
+extern enum mod_hdcp_status mod_hdcp_hdcp1_dp_execution(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ switch (current_state(hdcp)) {
+ case D1_A0_DETERMINE_RX_HDCP_CAPABLE:
+ status = determine_rx_hdcp_capable_dp(hdcp, event_ctx, input);
+ break;
+ case D1_A1_EXCHANGE_KSVS:
+ status = exchange_ksvs(hdcp, event_ctx, input);
+ break;
+ case D1_A23_WAIT_FOR_R0_PRIME:
+ status = wait_for_r0_prime_dp(hdcp, event_ctx, input);
+ break;
+ case D1_A2_COMPUTATIONS_A3_VALIDATE_RX_A5_TEST_FOR_REPEATER:
+ status = computations_validate_rx_test_for_repeater(
+ hdcp, event_ctx, input);
+ break;
+ case D1_A4_AUTHENTICATED:
+ status = authenticated_dp(hdcp, event_ctx, input);
+ break;
+ case D1_A6_WAIT_FOR_READY:
+ status = wait_for_ready(hdcp, event_ctx, input);
+ break;
+ case D1_A7_READ_KSV_LIST:
+ status = read_ksv_list(hdcp, event_ctx, input);
+ break;
+ default:
+ status = MOD_HDCP_STATUS_INVALID_STATE;
+ break;
+ }
+
+ return status;
+}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c
new file mode 100644
index 000000000000..76edcbe51f71
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c
@@ -0,0 +1,319 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "hdcp.h"
+
+enum mod_hdcp_status mod_hdcp_hdcp1_transition(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input,
+ struct mod_hdcp_output *output)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ struct mod_hdcp_connection *conn = &hdcp->connection;
+ struct mod_hdcp_link_adjustment *adjust = &hdcp->connection.link.adjust;
+
+ switch (current_state(hdcp)) {
+ case H1_A0_WAIT_FOR_ACTIVE_RX:
+ if (input->bksv_read != PASS || input->bcaps_read != PASS) {
+ /* 1A-04: repeatedly attempts on port access failure */
+ callback_in_ms(500, output);
+ increment_stay_counter(hdcp);
+ break;
+ }
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H1_A1_EXCHANGE_KSVS);
+ break;
+ case H1_A1_EXCHANGE_KSVS:
+ if (input->add_topology != PASS ||
+ input->create_session != PASS) {
+ /* out of sync with psp state */
+ adjust->hdcp1.disable = 1;
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (input->an_write != PASS ||
+ input->aksv_write != PASS ||
+ input->bksv_read != PASS ||
+ input->bksv_validation != PASS ||
+ input->ainfo_write == FAIL) {
+ /* 1A-05: consider invalid bksv a failure */
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ callback_in_ms(300, output);
+ set_state_id(hdcp, output,
+ H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER);
+ break;
+ case H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER:
+ if (input->bcaps_read != PASS ||
+ input->r0p_read != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (input->rx_validation != PASS) {
+ /* 1A-06: consider invalid r0' a failure */
+ /* 1A-08: consider bksv listed in SRM a failure */
+ /*
+ * some slow RX will fail rx validation when it is
+ * not ready. give it more time to react before retry.
+ */
+ fail_and_restart_in_ms(1000, &status, output);
+ break;
+ } else if (!conn->is_repeater && input->encryption != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ if (conn->is_repeater) {
+ callback_in_ms(0, output);
+ set_watchdog_in_ms(hdcp, 5000, output);
+ set_state_id(hdcp, output, H1_A8_WAIT_FOR_READY);
+ } else {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H1_A45_AUTHENTICATED);
+ HDCP_FULL_DDC_TRACE(hdcp);
+ }
+ break;
+ case H1_A45_AUTHENTICATED:
+ if (input->link_maintenance != PASS) {
+ /* 1A-07: consider invalid ri' a failure */
+ /* 1A-07a: consider read ri' not returned a failure */
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ callback_in_ms(500, output);
+ increment_stay_counter(hdcp);
+ break;
+ case H1_A8_WAIT_FOR_READY:
+ if (input->ready_check != PASS) {
+ if (event_ctx->event ==
+ MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
+ /* 1B-03: fail hdcp on ksv list READY timeout */
+ /* prevent black screen in next attempt */
+ adjust->hdcp1.postpone_encryption = 1;
+ fail_and_restart_in_ms(0, &status, output);
+ } else {
+ /* continue ksv list READY polling*/
+ callback_in_ms(500, output);
+ increment_stay_counter(hdcp);
+ }
+ break;
+ }
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H1_A9_READ_KSV_LIST);
+ break;
+ case H1_A9_READ_KSV_LIST:
+ if (input->bstatus_read != PASS ||
+ input->max_cascade_check != PASS ||
+ input->max_devs_check != PASS ||
+ input->device_count_check != PASS ||
+ input->ksvlist_read != PASS ||
+ input->vp_read != PASS ||
+ input->ksvlist_vp_validation != PASS ||
+ input->encryption != PASS) {
+ /* 1B-06: consider MAX_CASCADE_EXCEEDED a failure */
+ /* 1B-05: consider MAX_DEVS_EXCEEDED a failure */
+ /* 1B-04: consider invalid v' a failure */
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H1_A45_AUTHENTICATED);
+ HDCP_FULL_DDC_TRACE(hdcp);
+ break;
+ default:
+ status = MOD_HDCP_STATUS_INVALID_STATE;
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input,
+ struct mod_hdcp_output *output)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ struct mod_hdcp_connection *conn = &hdcp->connection;
+ struct mod_hdcp_link_adjustment *adjust = &hdcp->connection.link.adjust;
+
+ switch (current_state(hdcp)) {
+ case D1_A0_DETERMINE_RX_HDCP_CAPABLE:
+ if (input->bcaps_read != PASS) {
+ /* 1A-04: no authentication on bcaps read failure */
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (input->hdcp_capable_dp != PASS) {
+ adjust->hdcp1.disable = 1;
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, D1_A1_EXCHANGE_KSVS);
+ break;
+ case D1_A1_EXCHANGE_KSVS:
+ if (input->add_topology != PASS ||
+ input->create_session != PASS) {
+ /* out of sync with psp state */
+ adjust->hdcp1.disable = 1;
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (input->an_write != PASS ||
+ input->aksv_write != PASS ||
+ input->bksv_read != PASS ||
+ input->bksv_validation != PASS ||
+ input->ainfo_write == FAIL) {
+ /* 1A-05: consider invalid bksv a failure */
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ set_watchdog_in_ms(hdcp, 100, output);
+ set_state_id(hdcp, output, D1_A23_WAIT_FOR_R0_PRIME);
+ break;
+ case D1_A23_WAIT_FOR_R0_PRIME:
+ if (input->bstatus_read != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (input->r0p_available_dp != PASS) {
+ if (event_ctx->event == MOD_HDCP_EVENT_WATCHDOG_TIMEOUT)
+ fail_and_restart_in_ms(0, &status, output);
+ else
+ increment_stay_counter(hdcp);
+ break;
+ }
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, D1_A2_COMPUTATIONS_A3_VALIDATE_RX_A5_TEST_FOR_REPEATER);
+ break;
+ case D1_A2_COMPUTATIONS_A3_VALIDATE_RX_A5_TEST_FOR_REPEATER:
+ if (input->r0p_read != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (input->rx_validation != PASS) {
+ if (hdcp->state.stay_count < 2) {
+ /* allow 2 additional retries */
+ callback_in_ms(0, output);
+ increment_stay_counter(hdcp);
+ } else {
+ /*
+ * 1A-06: consider invalid r0' a failure
+ * after 3 attempts.
+ * 1A-08: consider bksv listed in SRM a failure
+ */
+ /*
+ * some slow RX will fail rx validation when it is
+ * not ready. give it more time to react before retry.
+ */
+ fail_and_restart_in_ms(1000, &status, output);
+ }
+ break;
+ } else if ((!conn->is_repeater && input->encryption != PASS) ||
+ (!conn->is_repeater && is_dp_mst_hdcp(hdcp) && input->stream_encryption_dp != PASS)) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ if (conn->is_repeater) {
+ set_watchdog_in_ms(hdcp, 5000, output);
+ set_state_id(hdcp, output, D1_A6_WAIT_FOR_READY);
+ } else {
+ set_state_id(hdcp, output, D1_A4_AUTHENTICATED);
+ HDCP_FULL_DDC_TRACE(hdcp);
+ }
+ break;
+ case D1_A4_AUTHENTICATED:
+ if (input->link_integrity_check != PASS ||
+ input->reauth_request_check != PASS) {
+ /* 1A-07: restart hdcp on a link integrity failure */
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ break;
+ case D1_A6_WAIT_FOR_READY:
+ if (input->link_integrity_check == FAIL ||
+ input->reauth_request_check == FAIL) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (input->ready_check != PASS) {
+ if (event_ctx->event ==
+ MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
+ /* 1B-04: fail hdcp on ksv list READY timeout */
+ /* prevent black screen in next attempt */
+ adjust->hdcp1.postpone_encryption = 1;
+ fail_and_restart_in_ms(0, &status, output);
+ } else {
+ increment_stay_counter(hdcp);
+ }
+ break;
+ }
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, D1_A7_READ_KSV_LIST);
+ break;
+ case D1_A7_READ_KSV_LIST:
+ if (input->binfo_read_dp != PASS ||
+ input->max_cascade_check != PASS ||
+ input->max_devs_check != PASS) {
+ /* 1B-06: consider MAX_DEVS_EXCEEDED a failure */
+ /* 1B-07: consider MAX_CASCADE_EXCEEDED a failure */
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (input->device_count_check != PASS) {
+ /*
+ * some slow dongle doesn't update
+ * device count as soon as downstream is connected.
+ * give it more time to react.
+ */
+ adjust->hdcp1.postpone_encryption = 1;
+ fail_and_restart_in_ms(1000, &status, output);
+ break;
+ } else if (input->ksvlist_read != PASS ||
+ input->vp_read != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (input->ksvlist_vp_validation != PASS) {
+ if (hdcp->state.stay_count < 2) {
+ /* allow 2 additional retries */
+ callback_in_ms(0, output);
+ increment_stay_counter(hdcp);
+ } else {
+ /*
+ * 1B-05: consider invalid v' a failure
+ * after 3 attempts.
+ */
+ fail_and_restart_in_ms(0, &status, output);
+ }
+ break;
+ } else if (input->encryption != PASS ||
+ (is_dp_mst_hdcp(hdcp) && input->stream_encryption_dp != PASS)) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ set_state_id(hdcp, output, D1_A4_AUTHENTICATED);
+ HDCP_FULL_DDC_TRACE(hdcp);
+ break;
+ default:
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+
+ return status;
+}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
new file mode 100644
index 000000000000..f730b94ac3c0
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
@@ -0,0 +1,886 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <linux/delay.h>
+
+#include "hdcp.h"
+
+static inline enum mod_hdcp_status check_receiver_id_list_ready(struct mod_hdcp *hdcp)
+{
+ uint8_t is_ready = 0;
+
+ if (is_dp_hdcp(hdcp))
+ is_ready = HDCP_2_2_DP_RXSTATUS_READY(hdcp->auth.msg.hdcp2.rxstatus_dp) ? 1 : 0;
+ else
+ is_ready = (HDCP_2_2_HDMI_RXSTATUS_READY(hdcp->auth.msg.hdcp2.rxstatus[0]) &&
+ (HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 |
+ hdcp->auth.msg.hdcp2.rxstatus[0])) ? 1 : 0;
+ return is_ready ? MOD_HDCP_STATUS_SUCCESS :
+ MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY;
+}
+
+static inline enum mod_hdcp_status check_hdcp2_capable(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ if (is_dp_hdcp(hdcp))
+ status = (hdcp->auth.msg.hdcp2.rxcaps_dp[2] & HDCP_2_2_RX_CAPS_VERSION_VAL) &&
+ HDCP_2_2_DP_HDCP_CAPABLE(hdcp->auth.msg.hdcp2.rxcaps_dp[0]) ?
+ MOD_HDCP_STATUS_SUCCESS :
+ MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE;
+ else
+ status = (hdcp->auth.msg.hdcp2.hdcp2version_hdmi & HDCP_2_2_HDMI_SUPPORT_MASK) ?
+ MOD_HDCP_STATUS_SUCCESS :
+ MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE;
+ return status;
+}
+
+static inline enum mod_hdcp_status check_reauthentication_request(
+ struct mod_hdcp *hdcp)
+{
+ uint8_t ret = 0;
+
+ if (is_dp_hdcp(hdcp))
+ ret = HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(hdcp->auth.msg.hdcp2.rxstatus_dp) ?
+ MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST :
+ MOD_HDCP_STATUS_SUCCESS;
+ else
+ ret = HDCP_2_2_HDMI_RXSTATUS_REAUTH_REQ(hdcp->auth.msg.hdcp2.rxstatus[0]) ?
+ MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST :
+ MOD_HDCP_STATUS_SUCCESS;
+ return ret;
+}
+
+static inline enum mod_hdcp_status check_link_integrity_failure_dp(
+ struct mod_hdcp *hdcp)
+{
+ return HDCP_2_2_DP_RXSTATUS_LINK_FAILED(hdcp->auth.msg.hdcp2.rxstatus_dp) ?
+ MOD_HDCP_STATUS_HDCP2_REAUTH_LINK_INTEGRITY_FAILURE :
+ MOD_HDCP_STATUS_SUCCESS;
+}
+
+static enum mod_hdcp_status check_ake_cert_available(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+ uint16_t size;
+
+ if (is_dp_hdcp(hdcp)) {
+ status = MOD_HDCP_STATUS_SUCCESS;
+ } else {
+ status = mod_hdcp_read_rxstatus(hdcp);
+ if (status == MOD_HDCP_STATUS_SUCCESS) {
+ size = HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 |
+ hdcp->auth.msg.hdcp2.rxstatus[0];
+ status = (size == sizeof(hdcp->auth.msg.hdcp2.ake_cert)) ?
+ MOD_HDCP_STATUS_SUCCESS :
+ MOD_HDCP_STATUS_HDCP2_AKE_CERT_PENDING;
+ }
+ }
+ return status;
+}
+
+static enum mod_hdcp_status check_h_prime_available(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+ uint8_t size;
+
+ status = mod_hdcp_read_rxstatus(hdcp);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ goto out;
+
+ if (is_dp_hdcp(hdcp)) {
+ status = HDCP_2_2_DP_RXSTATUS_H_PRIME(hdcp->auth.msg.hdcp2.rxstatus_dp) ?
+ MOD_HDCP_STATUS_SUCCESS :
+ MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING;
+ } else {
+ size = HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 |
+ hdcp->auth.msg.hdcp2.rxstatus[0];
+ status = (size == sizeof(hdcp->auth.msg.hdcp2.ake_h_prime)) ?
+ MOD_HDCP_STATUS_SUCCESS :
+ MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING;
+ }
+out:
+ return status;
+}
+
+static enum mod_hdcp_status check_pairing_info_available(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+ uint8_t size;
+
+ status = mod_hdcp_read_rxstatus(hdcp);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ goto out;
+
+ if (is_dp_hdcp(hdcp)) {
+ status = HDCP_2_2_DP_RXSTATUS_PAIRING(hdcp->auth.msg.hdcp2.rxstatus_dp) ?
+ MOD_HDCP_STATUS_SUCCESS :
+ MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING;
+ } else {
+ size = HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 |
+ hdcp->auth.msg.hdcp2.rxstatus[0];
+ status = (size == sizeof(hdcp->auth.msg.hdcp2.ake_pairing_info)) ?
+ MOD_HDCP_STATUS_SUCCESS :
+ MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING;
+ }
+out:
+ return status;
+}
+
+static enum mod_hdcp_status poll_l_prime_available(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+ uint8_t size;
+ uint16_t max_wait = 20; // units of ms
+ uint16_t num_polls = 5;
+ uint16_t wait_time = max_wait / num_polls;
+
+ if (is_dp_hdcp(hdcp))
+ status = MOD_HDCP_STATUS_INVALID_OPERATION;
+ else
+ for (; num_polls; num_polls--) {
+ msleep(wait_time);
+
+ status = mod_hdcp_read_rxstatus(hdcp);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ break;
+
+ size = HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 |
+ hdcp->auth.msg.hdcp2.rxstatus[0];
+ status = (size == sizeof(hdcp->auth.msg.hdcp2.lc_l_prime)) ?
+ MOD_HDCP_STATUS_SUCCESS :
+ MOD_HDCP_STATUS_HDCP2_L_PRIME_PENDING;
+ if (status == MOD_HDCP_STATUS_SUCCESS)
+ break;
+ }
+ return status;
+}
+
+static enum mod_hdcp_status check_stream_ready_available(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+ uint8_t size;
+
+ if (is_dp_hdcp(hdcp)) {
+ status = MOD_HDCP_STATUS_INVALID_OPERATION;
+ } else {
+ status = mod_hdcp_read_rxstatus(hdcp);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ goto out;
+ size = HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 |
+ hdcp->auth.msg.hdcp2.rxstatus[0];
+ status = (size == sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready)) ?
+ MOD_HDCP_STATUS_SUCCESS :
+ MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING;
+ }
+out:
+ return status;
+}
+
+static inline uint8_t get_device_count(struct mod_hdcp *hdcp)
+{
+ return HDCP_2_2_DEV_COUNT_LO(hdcp->auth.msg.hdcp2.rx_id_list[2]) +
+ (HDCP_2_2_DEV_COUNT_HI(hdcp->auth.msg.hdcp2.rx_id_list[1]) << 4);
+}
+
+static enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp)
+{
+ /* device count must be greater than or equal to tracked hdcp displays */
+ return (get_device_count(hdcp) < get_added_display_count(hdcp)) ?
+ MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE :
+ MOD_HDCP_STATUS_SUCCESS;
+}
+
+static uint8_t process_rxstatus(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp2 *input,
+ enum mod_hdcp_status *status)
+{
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_rxstatus,
+ &input->rxstatus_read, status,
+ hdcp, "rxstatus_read"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(check_reauthentication_request,
+ &input->reauth_request_check, status,
+ hdcp, "reauth_request_check"))
+ goto out;
+ if (is_dp_hdcp(hdcp)) {
+ if (!mod_hdcp_execute_and_set(check_link_integrity_failure_dp,
+ &input->link_integrity_check_dp, status,
+ hdcp, "link_integrity_check_dp"))
+ goto out;
+ }
+ if (hdcp->connection.is_repeater)
+ if (check_receiver_id_list_ready(hdcp) ==
+ MOD_HDCP_STATUS_SUCCESS) {
+ HDCP_INPUT_PASS_TRACE(hdcp, "rx_id_list_ready");
+ event_ctx->rx_id_list_ready = 1;
+ if (is_dp_hdcp(hdcp))
+ hdcp->auth.msg.hdcp2.rx_id_list_size =
+ sizeof(hdcp->auth.msg.hdcp2.rx_id_list);
+ else
+ hdcp->auth.msg.hdcp2.rx_id_list_size =
+ HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 |
+ hdcp->auth.msg.hdcp2.rxstatus[0];
+ }
+out:
+ return (*status == MOD_HDCP_STATUS_SUCCESS);
+}
+
+static enum mod_hdcp_status known_hdcp2_capable_rx(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp2 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_hdcp2version,
+ &input->hdcp2version_read, &status,
+ hdcp, "hdcp2version_read"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(check_hdcp2_capable,
+ &input->hdcp2_capable_check, &status,
+ hdcp, "hdcp2_capable"))
+ goto out;
+out:
+ return status;
+}
+
+static enum mod_hdcp_status send_ake_init(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp2 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+ if (!mod_hdcp_execute_and_set(mod_hdcp_add_display_topology,
+ &input->add_topology, &status,
+ hdcp, "add_topology"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_create_session,
+ &input->create_session, &status,
+ hdcp, "create_session"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_prepare_ake_init,
+ &input->ake_init_prepare, &status,
+ hdcp, "ake_init_prepare"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_write_ake_init,
+ &input->ake_init_write, &status,
+ hdcp, "ake_init_write"))
+ goto out;
+out:
+ return status;
+}
+
+static enum mod_hdcp_status validate_ake_cert(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp2 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK &&
+ event_ctx->event != MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (is_hdmi_dvi_sl_hdcp(hdcp))
+ if (!mod_hdcp_execute_and_set(check_ake_cert_available,
+ &input->ake_cert_available, &status,
+ hdcp, "ake_cert_available"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_ake_cert,
+ &input->ake_cert_read, &status,
+ hdcp, "ake_cert_read"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_validate_ake_cert,
+ &input->ake_cert_validation, &status,
+ hdcp, "ake_cert_validation"))
+ goto out;
+out:
+ return status;
+}
+
+static enum mod_hdcp_status send_no_stored_km(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp2 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (!mod_hdcp_execute_and_set(mod_hdcp_write_no_stored_km,
+ &input->no_stored_km_write, &status,
+ hdcp, "no_stored_km_write"))
+ goto out;
+out:
+ return status;
+}
+
+static enum mod_hdcp_status read_h_prime(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp2 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK &&
+ event_ctx->event != MOD_HDCP_EVENT_CPIRQ &&
+ event_ctx->event != MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (!mod_hdcp_execute_and_set(check_h_prime_available,
+ &input->h_prime_available, &status,
+ hdcp, "h_prime_available"))
+ goto out;
+
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_h_prime,
+ &input->h_prime_read, &status,
+ hdcp, "h_prime_read"))
+ goto out;
+out:
+ return status;
+}
+
+static enum mod_hdcp_status read_pairing_info_and_validate_h_prime(
+ struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp2 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK &&
+ event_ctx->event != MOD_HDCP_EVENT_CPIRQ &&
+ event_ctx->event != MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (!mod_hdcp_execute_and_set(check_pairing_info_available,
+ &input->pairing_available, &status,
+ hdcp, "pairing_available"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_pairing_info,
+ &input->pairing_info_read, &status,
+ hdcp, "pairing_info_read"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_validate_h_prime,
+ &input->h_prime_validation, &status,
+ hdcp, "h_prime_validation"))
+ goto out;
+out:
+ return status;
+}
+
+static enum mod_hdcp_status send_stored_km(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp2 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (!mod_hdcp_execute_and_set(mod_hdcp_write_stored_km,
+ &input->stored_km_write, &status,
+ hdcp, "stored_km_write"))
+ goto out;
+out:
+ return status;
+}
+
+static enum mod_hdcp_status validate_h_prime(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp2 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK &&
+ event_ctx->event != MOD_HDCP_EVENT_CPIRQ &&
+ event_ctx->event != MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (!mod_hdcp_execute_and_set(check_h_prime_available,
+ &input->h_prime_available, &status,
+ hdcp, "h_prime_available"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_h_prime,
+ &input->h_prime_read, &status,
+ hdcp, "h_prime_read"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_validate_h_prime,
+ &input->h_prime_validation, &status,
+ hdcp, "h_prime_validation"))
+ goto out;
+out:
+ return status;
+}
+
+static enum mod_hdcp_status locality_check(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp2 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_prepare_lc_init,
+ &input->lc_init_prepare, &status,
+ hdcp, "lc_init_prepare"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_write_lc_init,
+ &input->lc_init_write, &status,
+ hdcp, "lc_init_write"))
+ goto out;
+ if (is_dp_hdcp(hdcp))
+ msleep(16);
+ else
+ if (!mod_hdcp_execute_and_set(poll_l_prime_available,
+ &input->l_prime_available_poll, &status,
+ hdcp, "l_prime_available_poll"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_l_prime,
+ &input->l_prime_read, &status,
+ hdcp, "l_prime_read"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_validate_l_prime,
+ &input->l_prime_validation, &status,
+ hdcp, "l_prime_validation"))
+ goto out;
+out:
+ return status;
+}
+
+static enum mod_hdcp_status exchange_ks_and_test_for_repeater(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp2 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_prepare_eks,
+ &input->eks_prepare, &status,
+ hdcp, "eks_prepare"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_write_eks,
+ &input->eks_write, &status,
+ hdcp, "eks_write"))
+ goto out;
+out:
+ return status;
+}
+
+static enum mod_hdcp_status enable_encryption(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp2 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK &&
+ event_ctx->event != MOD_HDCP_EVENT_CPIRQ) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+ if (event_ctx->event == MOD_HDCP_EVENT_CPIRQ) {
+ process_rxstatus(hdcp, event_ctx, input, &status);
+ goto out;
+ }
+
+ if (is_hdmi_dvi_sl_hdcp(hdcp)) {
+ if (!process_rxstatus(hdcp, event_ctx, input, &status))
+ goto out;
+ if (event_ctx->rx_id_list_ready)
+ goto out;
+ }
+ if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_enable_encryption,
+ &input->enable_encryption, &status,
+ hdcp, "enable_encryption"))
+ goto out;
+ if (is_dp_mst_hdcp(hdcp)) {
+ if (!mod_hdcp_execute_and_set(
+ mod_hdcp_hdcp2_enable_dp_stream_encryption,
+ &input->stream_encryption_dp, &status,
+ hdcp, "stream_encryption_dp"))
+ goto out;
+ }
+out:
+ return status;
+}
+
+static enum mod_hdcp_status authenticated(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp2 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK &&
+ event_ctx->event != MOD_HDCP_EVENT_CPIRQ) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (!process_rxstatus(hdcp, event_ctx, input, &status))
+ goto out;
+ if (event_ctx->rx_id_list_ready)
+ goto out;
+out:
+ return status;
+}
+
+static enum mod_hdcp_status wait_for_rx_id_list(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp2 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK &&
+ event_ctx->event != MOD_HDCP_EVENT_CPIRQ &&
+ event_ctx->event != MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (!process_rxstatus(hdcp, event_ctx, input, &status))
+ goto out;
+ if (!event_ctx->rx_id_list_ready) {
+ status = MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY;
+ goto out;
+ }
+out:
+ return status;
+}
+
+static enum mod_hdcp_status verify_rx_id_list_and_send_ack(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp2 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK &&
+ event_ctx->event != MOD_HDCP_EVENT_CPIRQ) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+ if (event_ctx->event == MOD_HDCP_EVENT_CPIRQ) {
+ process_rxstatus(hdcp, event_ctx, input, &status);
+ goto out;
+ }
+
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_rx_id_list,
+ &input->rx_id_list_read,
+ &status, hdcp, "receiver_id_list_read"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(check_device_count,
+ &input->device_count_check,
+ &status, hdcp, "device_count_check"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_validate_rx_id_list,
+ &input->rx_id_list_validation,
+ &status, hdcp, "rx_id_list_validation"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_write_repeater_auth_ack,
+ &input->repeater_auth_ack_write,
+ &status, hdcp, "repeater_auth_ack_write"))
+ goto out;
+out:
+ return status;
+}
+
+static enum mod_hdcp_status send_stream_management(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp2 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK &&
+ event_ctx->event != MOD_HDCP_EVENT_CPIRQ) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+ if (event_ctx->event == MOD_HDCP_EVENT_CPIRQ) {
+ process_rxstatus(hdcp, event_ctx, input, &status);
+ goto out;
+ }
+
+ if (is_hdmi_dvi_sl_hdcp(hdcp)) {
+ if (!process_rxstatus(hdcp, event_ctx, input, &status))
+ goto out;
+ if (event_ctx->rx_id_list_ready)
+ goto out;
+ }
+ if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_prepare_stream_management,
+ &input->prepare_stream_manage,
+ &status, hdcp, "prepare_stream_manage"))
+ goto out;
+
+ if (!mod_hdcp_execute_and_set(mod_hdcp_write_stream_manage,
+ &input->stream_manage_write,
+ &status, hdcp, "stream_manage_write"))
+ goto out;
+out:
+ return status;
+}
+
+static enum mod_hdcp_status validate_stream_ready(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp2 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK &&
+ event_ctx->event != MOD_HDCP_EVENT_CPIRQ &&
+ event_ctx->event != MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+ if (event_ctx->event == MOD_HDCP_EVENT_CPIRQ) {
+ process_rxstatus(hdcp, event_ctx, input, &status);
+ goto out;
+ }
+
+ if (is_hdmi_dvi_sl_hdcp(hdcp)) {
+ if (!process_rxstatus(hdcp, event_ctx, input, &status))
+ goto out;
+ if (event_ctx->rx_id_list_ready) {
+ goto out;
+ }
+ }
+ if (is_hdmi_dvi_sl_hdcp(hdcp))
+ if (!mod_hdcp_execute_and_set(check_stream_ready_available,
+ &input->stream_ready_available,
+ &status, hdcp, "stream_ready_available"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_stream_ready,
+ &input->stream_ready_read,
+ &status, hdcp, "stream_ready_read"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_validate_stream_ready,
+ &input->stream_ready_validation,
+ &status, hdcp, "stream_ready_validation"))
+ goto out;
+
+out:
+ return status;
+}
+
+static enum mod_hdcp_status determine_rx_hdcp_capable_dp(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp2 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_rxcaps,
+ &input->rx_caps_read_dp,
+ &status, hdcp, "rx_caps_read_dp"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(check_hdcp2_capable,
+ &input->hdcp2_capable_check, &status,
+ hdcp, "hdcp2_capable_check"))
+ goto out;
+out:
+ return status;
+}
+
+static enum mod_hdcp_status send_content_stream_type_dp(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp2 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK &&
+ event_ctx->event != MOD_HDCP_EVENT_CPIRQ) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (!process_rxstatus(hdcp, event_ctx, input, &status))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_write_content_type,
+ &input->content_stream_type_write, &status,
+ hdcp, "content_stream_type_write"))
+ goto out;
+out:
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp2_execution(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp2 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ switch (current_state(hdcp)) {
+ case H2_A0_KNOWN_HDCP2_CAPABLE_RX:
+ status = known_hdcp2_capable_rx(hdcp, event_ctx, input);
+ break;
+ case H2_A1_SEND_AKE_INIT:
+ status = send_ake_init(hdcp, event_ctx, input);
+ break;
+ case H2_A1_VALIDATE_AKE_CERT:
+ status = validate_ake_cert(hdcp, event_ctx, input);
+ break;
+ case H2_A1_SEND_NO_STORED_KM:
+ status = send_no_stored_km(hdcp, event_ctx, input);
+ break;
+ case H2_A1_READ_H_PRIME:
+ status = read_h_prime(hdcp, event_ctx, input);
+ break;
+ case H2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME:
+ status = read_pairing_info_and_validate_h_prime(hdcp,
+ event_ctx, input);
+ break;
+ case H2_A1_SEND_STORED_KM:
+ status = send_stored_km(hdcp, event_ctx, input);
+ break;
+ case H2_A1_VALIDATE_H_PRIME:
+ status = validate_h_prime(hdcp, event_ctx, input);
+ break;
+ case H2_A2_LOCALITY_CHECK:
+ status = locality_check(hdcp, event_ctx, input);
+ break;
+ case H2_A3_EXCHANGE_KS_AND_TEST_FOR_REPEATER:
+ status = exchange_ks_and_test_for_repeater(hdcp, event_ctx, input);
+ break;
+ case H2_ENABLE_ENCRYPTION:
+ status = enable_encryption(hdcp, event_ctx, input);
+ break;
+ case H2_A5_AUTHENTICATED:
+ status = authenticated(hdcp, event_ctx, input);
+ break;
+ case H2_A6_WAIT_FOR_RX_ID_LIST:
+ status = wait_for_rx_id_list(hdcp, event_ctx, input);
+ break;
+ case H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK:
+ status = verify_rx_id_list_and_send_ack(hdcp, event_ctx, input);
+ break;
+ case H2_A9_SEND_STREAM_MANAGEMENT:
+ status = send_stream_management(hdcp, event_ctx, input);
+ break;
+ case H2_A9_VALIDATE_STREAM_READY:
+ status = validate_stream_ready(hdcp, event_ctx, input);
+ break;
+ default:
+ status = MOD_HDCP_STATUS_INVALID_STATE;
+ break;
+ }
+
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp2_dp_execution(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp2 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ switch (current_state(hdcp)) {
+ case D2_A0_DETERMINE_RX_HDCP_CAPABLE:
+ status = determine_rx_hdcp_capable_dp(hdcp, event_ctx, input);
+ break;
+ case D2_A1_SEND_AKE_INIT:
+ status = send_ake_init(hdcp, event_ctx, input);
+ break;
+ case D2_A1_VALIDATE_AKE_CERT:
+ status = validate_ake_cert(hdcp, event_ctx, input);
+ break;
+ case D2_A1_SEND_NO_STORED_KM:
+ status = send_no_stored_km(hdcp, event_ctx, input);
+ break;
+ case D2_A1_READ_H_PRIME:
+ status = read_h_prime(hdcp, event_ctx, input);
+ break;
+ case D2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME:
+ status = read_pairing_info_and_validate_h_prime(hdcp,
+ event_ctx, input);
+ break;
+ case D2_A1_SEND_STORED_KM:
+ status = send_stored_km(hdcp, event_ctx, input);
+ break;
+ case D2_A1_VALIDATE_H_PRIME:
+ status = validate_h_prime(hdcp, event_ctx, input);
+ break;
+ case D2_A2_LOCALITY_CHECK:
+ status = locality_check(hdcp, event_ctx, input);
+ break;
+ case D2_A34_EXCHANGE_KS_AND_TEST_FOR_REPEATER:
+ status = exchange_ks_and_test_for_repeater(hdcp,
+ event_ctx, input);
+ break;
+ case D2_SEND_CONTENT_STREAM_TYPE:
+ status = send_content_stream_type_dp(hdcp, event_ctx, input);
+ break;
+ case D2_ENABLE_ENCRYPTION:
+ status = enable_encryption(hdcp, event_ctx, input);
+ break;
+ case D2_A5_AUTHENTICATED:
+ status = authenticated(hdcp, event_ctx, input);
+ break;
+ case D2_A6_WAIT_FOR_RX_ID_LIST:
+ status = wait_for_rx_id_list(hdcp, event_ctx, input);
+ break;
+ case D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK:
+ status = verify_rx_id_list_and_send_ack(hdcp, event_ctx, input);
+ break;
+ case D2_A9_SEND_STREAM_MANAGEMENT:
+ status = send_stream_management(hdcp, event_ctx, input);
+ break;
+ case D2_A9_VALIDATE_STREAM_READY:
+ status = validate_stream_ready(hdcp, event_ctx, input);
+ break;
+ default:
+ status = MOD_HDCP_STATUS_INVALID_STATE;
+ break;
+ }
+
+ return status;
+}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c
new file mode 100644
index 000000000000..8cae3e3aacd5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c
@@ -0,0 +1,679 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "hdcp.h"
+
+enum mod_hdcp_status mod_hdcp_hdcp2_transition(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp2 *input,
+ struct mod_hdcp_output *output)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ struct mod_hdcp_connection *conn = &hdcp->connection;
+ struct mod_hdcp_link_adjustment *adjust = &hdcp->connection.link.adjust;
+
+ switch (current_state(hdcp)) {
+ case H2_A0_KNOWN_HDCP2_CAPABLE_RX:
+ if (input->hdcp2version_read != PASS ||
+ input->hdcp2_capable_check != PASS) {
+ adjust->hdcp2.disable = 1;
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, HDCP_INITIALIZED);
+ } else {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H2_A1_SEND_AKE_INIT);
+ }
+ break;
+ case H2_A1_SEND_AKE_INIT:
+ if (input->add_topology != PASS ||
+ input->create_session != PASS ||
+ input->ake_init_prepare != PASS) {
+ /* out of sync with psp state */
+ adjust->hdcp2.disable = 1;
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (input->ake_init_write != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ set_watchdog_in_ms(hdcp, 100, output);
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H2_A1_VALIDATE_AKE_CERT);
+ break;
+ case H2_A1_VALIDATE_AKE_CERT:
+ if (input->ake_cert_available != PASS) {
+ if (event_ctx->event ==
+ MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
+ /* 1A-08: consider ake timeout a failure */
+ /* some hdmi receivers are not ready for HDCP
+ * immediately after video becomes active,
+ * delay 1s before retry on first HDCP message
+ * timeout.
+ */
+ fail_and_restart_in_ms(1000, &status, output);
+ } else {
+ /* continue ake cert polling*/
+ callback_in_ms(10, output);
+ increment_stay_counter(hdcp);
+ }
+ break;
+ } else if (input->ake_cert_read != PASS ||
+ input->ake_cert_validation != PASS) {
+ /*
+ * 1A-09: consider invalid ake cert a failure
+ * 1A-10: consider receiver id listed in SRM a failure
+ */
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ if (conn->is_km_stored &&
+ !adjust->hdcp2.force_no_stored_km) {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H2_A1_SEND_STORED_KM);
+ } else {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H2_A1_SEND_NO_STORED_KM);
+ }
+ break;
+ case H2_A1_SEND_NO_STORED_KM:
+ if (input->no_stored_km_write != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ if (adjust->hdcp2.increase_h_prime_timeout)
+ set_watchdog_in_ms(hdcp, 2000, output);
+ else
+ set_watchdog_in_ms(hdcp, 1000, output);
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H2_A1_READ_H_PRIME);
+ break;
+ case H2_A1_READ_H_PRIME:
+ if (input->h_prime_available != PASS) {
+ if (event_ctx->event ==
+ MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
+ /* 1A-11-3: consider h' timeout a failure */
+ fail_and_restart_in_ms(1000, &status, output);
+ } else {
+ /* continue h' polling */
+ callback_in_ms(100, output);
+ increment_stay_counter(hdcp);
+ }
+ break;
+ } else if (input->h_prime_read != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ set_watchdog_in_ms(hdcp, 200, output);
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME);
+ break;
+ case H2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME:
+ if (input->pairing_available != PASS) {
+ if (event_ctx->event ==
+ MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
+ /* 1A-12: consider pairing info timeout
+ * a failure
+ */
+ fail_and_restart_in_ms(0, &status, output);
+ } else {
+ /* continue pairing info polling */
+ callback_in_ms(20, output);
+ increment_stay_counter(hdcp);
+ }
+ break;
+ } else if (input->pairing_info_read != PASS ||
+ input->h_prime_validation != PASS) {
+ /* 1A-11-1: consider invalid h' a failure */
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H2_A2_LOCALITY_CHECK);
+ break;
+ case H2_A1_SEND_STORED_KM:
+ if (input->stored_km_write != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ set_watchdog_in_ms(hdcp, 200, output);
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H2_A1_VALIDATE_H_PRIME);
+ break;
+ case H2_A1_VALIDATE_H_PRIME:
+ if (input->h_prime_available != PASS) {
+ if (event_ctx->event ==
+ MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
+ /* 1A-11-2: consider h' timeout a failure */
+ fail_and_restart_in_ms(1000, &status, output);
+ } else {
+ /* continue h' polling */
+ callback_in_ms(20, output);
+ increment_stay_counter(hdcp);
+ }
+ break;
+ } else if (input->h_prime_read != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (input->h_prime_validation != PASS) {
+ /* 1A-11-1: consider invalid h' a failure */
+ adjust->hdcp2.force_no_stored_km = 1;
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H2_A2_LOCALITY_CHECK);
+ break;
+ case H2_A2_LOCALITY_CHECK:
+ if (hdcp->state.stay_count > 10 ||
+ input->lc_init_prepare != PASS ||
+ input->lc_init_write != PASS ||
+ input->l_prime_available_poll != PASS ||
+ input->l_prime_read != PASS) {
+ /*
+ * 1A-05: consider disconnection after LC init a failure
+ * 1A-13-1: consider invalid l' a failure
+ * 1A-13-2: consider l' timeout a failure
+ */
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (input->l_prime_validation != PASS) {
+ callback_in_ms(0, output);
+ increment_stay_counter(hdcp);
+ break;
+ }
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H2_A3_EXCHANGE_KS_AND_TEST_FOR_REPEATER);
+ break;
+ case H2_A3_EXCHANGE_KS_AND_TEST_FOR_REPEATER:
+ if (input->eks_prepare != PASS ||
+ input->eks_write != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ if (conn->is_repeater) {
+ set_watchdog_in_ms(hdcp, 3000, output);
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H2_A6_WAIT_FOR_RX_ID_LIST);
+ } else {
+ /* some CTS equipment requires a delay GREATER than
+ * 200 ms, so delay 210 ms instead of 200 ms
+ */
+ callback_in_ms(210, output);
+ set_state_id(hdcp, output, H2_ENABLE_ENCRYPTION);
+ }
+ break;
+ case H2_ENABLE_ENCRYPTION:
+ if (input->rxstatus_read != PASS ||
+ input->reauth_request_check != PASS) {
+ /*
+ * 1A-07: restart hdcp on REAUTH_REQ
+ * 1B-08: restart hdcp on REAUTH_REQ
+ */
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (event_ctx->rx_id_list_ready && conn->is_repeater) {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK);
+ break;
+ } else if (input->enable_encryption != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H2_A5_AUTHENTICATED);
+ HDCP_FULL_DDC_TRACE(hdcp);
+ break;
+ case H2_A5_AUTHENTICATED:
+ if (input->rxstatus_read != PASS ||
+ input->reauth_request_check != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (event_ctx->rx_id_list_ready && conn->is_repeater) {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK);
+ break;
+ }
+ callback_in_ms(500, output);
+ increment_stay_counter(hdcp);
+ break;
+ case H2_A6_WAIT_FOR_RX_ID_LIST:
+ if (input->rxstatus_read != PASS ||
+ input->reauth_request_check != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (!event_ctx->rx_id_list_ready) {
+ if (event_ctx->event == MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
+ /* 1B-02: consider rx id list timeout a failure */
+ /* some CTS equipment's actual timeout
+ * measurement is slightly greater than 3000 ms.
+ * Delay 100 ms to ensure it is fully timeout
+ * before re-authentication.
+ */
+ fail_and_restart_in_ms(100, &status, output);
+ } else {
+ callback_in_ms(300, output);
+ increment_stay_counter(hdcp);
+ }
+ break;
+ }
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK);
+ break;
+ case H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK:
+ if (input->rxstatus_read != PASS ||
+ input->reauth_request_check != PASS ||
+ input->rx_id_list_read != PASS ||
+ input->device_count_check != PASS ||
+ input->rx_id_list_validation != PASS ||
+ input->repeater_auth_ack_write != PASS) {
+ /* 1B-03: consider invalid v' a failure
+ * 1B-04: consider MAX_DEVS_EXCEEDED a failure
+ * 1B-05: consider MAX_CASCADE_EXCEEDED a failure
+ * 1B-06: consider invalid seq_num_V a failure
+ * 1B-09: consider seq_num_V rollover a failure
+ */
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H2_A9_SEND_STREAM_MANAGEMENT);
+ break;
+ case H2_A9_SEND_STREAM_MANAGEMENT:
+ if (input->rxstatus_read != PASS ||
+ input->reauth_request_check != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (event_ctx->rx_id_list_ready && conn->is_repeater) {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK);
+ break;
+ } else if (input->prepare_stream_manage != PASS ||
+ input->stream_manage_write != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ set_watchdog_in_ms(hdcp, 100, output);
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H2_A9_VALIDATE_STREAM_READY);
+ break;
+ case H2_A9_VALIDATE_STREAM_READY:
+ if (input->rxstatus_read != PASS ||
+ input->reauth_request_check != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (event_ctx->rx_id_list_ready && conn->is_repeater) {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK);
+ break;
+ } else if (input->stream_ready_available != PASS) {
+ if (event_ctx->event == MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
+ /* 1B-10-2: restart content stream management on
+ * stream ready timeout
+ */
+ hdcp->auth.count.stream_management_retry_count++;
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H2_A9_SEND_STREAM_MANAGEMENT);
+ } else {
+ callback_in_ms(10, output);
+ increment_stay_counter(hdcp);
+ }
+ break;
+ } else if (input->stream_ready_read != PASS ||
+ input->stream_ready_validation != PASS) {
+ /*
+ * 1B-10-1: restart content stream management
+ * on invalid M'
+ */
+ if (hdcp->auth.count.stream_management_retry_count > 10) {
+ fail_and_restart_in_ms(0, &status, output);
+ } else {
+ hdcp->auth.count.stream_management_retry_count++;
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H2_A9_SEND_STREAM_MANAGEMENT);
+ }
+ break;
+ }
+ callback_in_ms(200, output);
+ set_state_id(hdcp, output, H2_ENABLE_ENCRYPTION);
+ break;
+ default:
+ status = MOD_HDCP_STATUS_INVALID_STATE;
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp2_dp_transition(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp2 *input,
+ struct mod_hdcp_output *output)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ struct mod_hdcp_connection *conn = &hdcp->connection;
+ struct mod_hdcp_link_adjustment *adjust = &hdcp->connection.link.adjust;
+
+ switch (current_state(hdcp)) {
+ case D2_A0_DETERMINE_RX_HDCP_CAPABLE:
+ if (input->rx_caps_read_dp != PASS ||
+ input->hdcp2_capable_check != PASS) {
+ adjust->hdcp2.disable = 1;
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, HDCP_INITIALIZED);
+ } else {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, D2_A1_SEND_AKE_INIT);
+ }
+ break;
+ case D2_A1_SEND_AKE_INIT:
+ if (input->add_topology != PASS ||
+ input->create_session != PASS ||
+ input->ake_init_prepare != PASS) {
+ /* out of sync with psp state */
+ adjust->hdcp2.disable = 1;
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (input->ake_init_write != PASS) {
+ /* possibly display not ready */
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ callback_in_ms(100, output);
+ set_state_id(hdcp, output, D2_A1_VALIDATE_AKE_CERT);
+ break;
+ case D2_A1_VALIDATE_AKE_CERT:
+ if (input->ake_cert_read != PASS ||
+ input->ake_cert_validation != PASS) {
+ /*
+ * 1A-08: consider invalid ake cert a failure
+ * 1A-09: consider receiver id listed in SRM a failure
+ */
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ if (conn->is_km_stored &&
+ !adjust->hdcp2.force_no_stored_km) {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, D2_A1_SEND_STORED_KM);
+ } else {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, D2_A1_SEND_NO_STORED_KM);
+ }
+ break;
+ case D2_A1_SEND_NO_STORED_KM:
+ if (input->no_stored_km_write != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ if (adjust->hdcp2.increase_h_prime_timeout)
+ set_watchdog_in_ms(hdcp, 2000, output);
+ else
+ set_watchdog_in_ms(hdcp, 1000, output);
+ set_state_id(hdcp, output, D2_A1_READ_H_PRIME);
+ break;
+ case D2_A1_READ_H_PRIME:
+ if (input->h_prime_available != PASS) {
+ if (event_ctx->event ==
+ MOD_HDCP_EVENT_WATCHDOG_TIMEOUT)
+ /* 1A-10-3: consider h' timeout a failure */
+ fail_and_restart_in_ms(1000, &status, output);
+ else
+ increment_stay_counter(hdcp);
+ break;
+ } else if (input->h_prime_read != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ set_watchdog_in_ms(hdcp, 200, output);
+ set_state_id(hdcp, output, D2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME);
+ break;
+ case D2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME:
+ if (input->pairing_available != PASS) {
+ if (event_ctx->event ==
+ MOD_HDCP_EVENT_WATCHDOG_TIMEOUT)
+ /*
+ * 1A-11: consider pairing info timeout
+ * a failure
+ */
+ fail_and_restart_in_ms(0, &status, output);
+ else
+ increment_stay_counter(hdcp);
+ break;
+ } else if (input->pairing_info_read != PASS ||
+ input->h_prime_validation != PASS) {
+ /* 1A-10-1: consider invalid h' a failure */
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, D2_A2_LOCALITY_CHECK);
+ break;
+ case D2_A1_SEND_STORED_KM:
+ if (input->stored_km_write != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ set_watchdog_in_ms(hdcp, 200, output);
+ set_state_id(hdcp, output, D2_A1_VALIDATE_H_PRIME);
+ break;
+ case D2_A1_VALIDATE_H_PRIME:
+ if (input->h_prime_available != PASS) {
+ if (event_ctx->event ==
+ MOD_HDCP_EVENT_WATCHDOG_TIMEOUT)
+ /* 1A-10-2: consider h' timeout a failure */
+ fail_and_restart_in_ms(1000, &status, output);
+ else
+ increment_stay_counter(hdcp);
+ break;
+ } else if (input->h_prime_read != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (input->h_prime_validation != PASS) {
+ /* 1A-10-1: consider invalid h' a failure */
+ adjust->hdcp2.force_no_stored_km = 1;
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, D2_A2_LOCALITY_CHECK);
+ break;
+ case D2_A2_LOCALITY_CHECK:
+ if (hdcp->state.stay_count > 10 ||
+ input->lc_init_prepare != PASS ||
+ input->lc_init_write != PASS ||
+ input->l_prime_read != PASS) {
+ /* 1A-12: consider invalid l' a failure */
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (input->l_prime_validation != PASS) {
+ callback_in_ms(0, output);
+ increment_stay_counter(hdcp);
+ break;
+ }
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, D2_A34_EXCHANGE_KS_AND_TEST_FOR_REPEATER);
+ break;
+ case D2_A34_EXCHANGE_KS_AND_TEST_FOR_REPEATER:
+ if (input->eks_prepare != PASS ||
+ input->eks_write != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ if (conn->is_repeater) {
+ set_watchdog_in_ms(hdcp, 3000, output);
+ set_state_id(hdcp, output, D2_A6_WAIT_FOR_RX_ID_LIST);
+ } else {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, D2_SEND_CONTENT_STREAM_TYPE);
+ }
+ break;
+ case D2_SEND_CONTENT_STREAM_TYPE:
+ if (input->rxstatus_read != PASS ||
+ input->reauth_request_check != PASS ||
+ input->link_integrity_check_dp != PASS ||
+ input->content_stream_type_write != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ callback_in_ms(210, output);
+ set_state_id(hdcp, output, D2_ENABLE_ENCRYPTION);
+ break;
+ case D2_ENABLE_ENCRYPTION:
+ if (input->rxstatus_read != PASS ||
+ input->reauth_request_check != PASS ||
+ input->link_integrity_check_dp != PASS) {
+ /*
+ * 1A-07: restart hdcp on REAUTH_REQ
+ * 1B-08: restart hdcp on REAUTH_REQ
+ */
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (event_ctx->rx_id_list_ready && conn->is_repeater) {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK);
+ break;
+ } else if (input->enable_encryption != PASS ||
+ (is_dp_mst_hdcp(hdcp) && input->stream_encryption_dp != PASS)) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ set_state_id(hdcp, output, D2_A5_AUTHENTICATED);
+ HDCP_FULL_DDC_TRACE(hdcp);
+ break;
+ case D2_A5_AUTHENTICATED:
+ if (input->rxstatus_read != PASS ||
+ input->reauth_request_check != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (input->link_integrity_check_dp != PASS) {
+ if (hdcp->connection.hdcp2_retry_count >= 1)
+ adjust->hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0;
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (event_ctx->rx_id_list_ready && conn->is_repeater) {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK);
+ break;
+ }
+ increment_stay_counter(hdcp);
+ break;
+ case D2_A6_WAIT_FOR_RX_ID_LIST:
+ if (input->rxstatus_read != PASS ||
+ input->reauth_request_check != PASS ||
+ input->link_integrity_check_dp != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (!event_ctx->rx_id_list_ready) {
+ if (event_ctx->event == MOD_HDCP_EVENT_WATCHDOG_TIMEOUT)
+ /* 1B-02: consider rx id list timeout a failure */
+ fail_and_restart_in_ms(0, &status, output);
+ else
+ increment_stay_counter(hdcp);
+ break;
+ }
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK);
+ break;
+ case D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK:
+ if (input->rxstatus_read != PASS ||
+ input->reauth_request_check != PASS ||
+ input->link_integrity_check_dp != PASS ||
+ input->rx_id_list_read != PASS ||
+ input->device_count_check != PASS ||
+ input->rx_id_list_validation != PASS ||
+ input->repeater_auth_ack_write != PASS) {
+ /*
+ * 1B-03: consider invalid v' a failure
+ * 1B-04: consider MAX_DEVS_EXCEEDED a failure
+ * 1B-05: consider MAX_CASCADE_EXCEEDED a failure
+ * 1B-06: consider invalid seq_num_V a failure
+ * 1B-09: consider seq_num_V rollover a failure
+ */
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, D2_A9_SEND_STREAM_MANAGEMENT);
+ break;
+ case D2_A9_SEND_STREAM_MANAGEMENT:
+ if (input->rxstatus_read != PASS ||
+ input->reauth_request_check != PASS ||
+ input->link_integrity_check_dp != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (event_ctx->rx_id_list_ready) {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK);
+ break;
+ } else if (input->prepare_stream_manage != PASS ||
+ input->stream_manage_write != PASS) {
+ if (event_ctx->event == MOD_HDCP_EVENT_CALLBACK)
+ fail_and_restart_in_ms(0, &status, output);
+ else
+ increment_stay_counter(hdcp);
+ break;
+ }
+ callback_in_ms(100, output);
+ set_state_id(hdcp, output, D2_A9_VALIDATE_STREAM_READY);
+ break;
+ case D2_A9_VALIDATE_STREAM_READY:
+ if (input->rxstatus_read != PASS ||
+ input->reauth_request_check != PASS ||
+ input->link_integrity_check_dp != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (event_ctx->rx_id_list_ready) {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK);
+ break;
+ } else if (input->stream_ready_read != PASS ||
+ input->stream_ready_validation != PASS) {
+ /*
+ * 1B-10-1: restart content stream management
+ * on invalid M'
+ * 1B-10-2: consider stream ready timeout a failure
+ */
+ if (hdcp->auth.count.stream_management_retry_count > 10) {
+ fail_and_restart_in_ms(0, &status, output);
+ } else if (event_ctx->event == MOD_HDCP_EVENT_CALLBACK) {
+ hdcp->auth.count.stream_management_retry_count++;
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, D2_A9_SEND_STREAM_MANAGEMENT);
+ } else {
+ increment_stay_counter(hdcp);
+ }
+ break;
+ }
+ callback_in_ms(200, output);
+ set_state_id(hdcp, output, D2_ENABLE_ENCRYPTION);
+ break;
+ default:
+ status = MOD_HDCP_STATUS_INVALID_STATE;
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ return status;
+}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
new file mode 100644
index 000000000000..ff9d54812e62
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
@@ -0,0 +1,631 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "hdcp.h"
+
+#define MIN(a, b) ((a) < (b) ? (a) : (b))
+#define HDCP_I2C_ADDR 0x3a /* 0x74 >> 1*/
+#define KSV_READ_SIZE 0xf /* 0x6803b - 0x6802c */
+#define HDCP_MAX_AUX_TRANSACTION_SIZE 16
+
+enum mod_hdcp_ddc_message_id {
+ MOD_HDCP_MESSAGE_ID_INVALID = -1,
+
+ /* HDCP 1.4 */
+
+ MOD_HDCP_MESSAGE_ID_READ_BKSV = 0,
+ MOD_HDCP_MESSAGE_ID_READ_RI_R0,
+ MOD_HDCP_MESSAGE_ID_WRITE_AKSV,
+ MOD_HDCP_MESSAGE_ID_WRITE_AINFO,
+ MOD_HDCP_MESSAGE_ID_WRITE_AN,
+ MOD_HDCP_MESSAGE_ID_READ_VH_X,
+ MOD_HDCP_MESSAGE_ID_READ_VH_0,
+ MOD_HDCP_MESSAGE_ID_READ_VH_1,
+ MOD_HDCP_MESSAGE_ID_READ_VH_2,
+ MOD_HDCP_MESSAGE_ID_READ_VH_3,
+ MOD_HDCP_MESSAGE_ID_READ_VH_4,
+ MOD_HDCP_MESSAGE_ID_READ_BCAPS,
+ MOD_HDCP_MESSAGE_ID_READ_BSTATUS,
+ MOD_HDCP_MESSAGE_ID_READ_KSV_FIFO,
+ MOD_HDCP_MESSAGE_ID_READ_BINFO,
+
+ /* HDCP 2.2 */
+
+ MOD_HDCP_MESSAGE_ID_HDCP2VERSION,
+ MOD_HDCP_MESSAGE_ID_RX_CAPS,
+ MOD_HDCP_MESSAGE_ID_WRITE_AKE_INIT,
+ MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_CERT,
+ MOD_HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM,
+ MOD_HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM,
+ MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME,
+ MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO,
+ MOD_HDCP_MESSAGE_ID_WRITE_LC_INIT,
+ MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME,
+ MOD_HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS,
+ MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST,
+ MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK,
+ MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE,
+ MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY,
+ MOD_HDCP_MESSAGE_ID_READ_RXSTATUS,
+ MOD_HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE,
+
+ MOD_HDCP_MESSAGE_ID_MAX
+};
+
+static const uint8_t hdcp_i2c_offsets[] = {
+ [MOD_HDCP_MESSAGE_ID_READ_BKSV] = 0x0,
+ [MOD_HDCP_MESSAGE_ID_READ_RI_R0] = 0x8,
+ [MOD_HDCP_MESSAGE_ID_WRITE_AKSV] = 0x10,
+ [MOD_HDCP_MESSAGE_ID_WRITE_AINFO] = 0x15,
+ [MOD_HDCP_MESSAGE_ID_WRITE_AN] = 0x18,
+ [MOD_HDCP_MESSAGE_ID_READ_VH_X] = 0x20,
+ [MOD_HDCP_MESSAGE_ID_READ_VH_0] = 0x20,
+ [MOD_HDCP_MESSAGE_ID_READ_VH_1] = 0x24,
+ [MOD_HDCP_MESSAGE_ID_READ_VH_2] = 0x28,
+ [MOD_HDCP_MESSAGE_ID_READ_VH_3] = 0x2C,
+ [MOD_HDCP_MESSAGE_ID_READ_VH_4] = 0x30,
+ [MOD_HDCP_MESSAGE_ID_READ_BCAPS] = 0x40,
+ [MOD_HDCP_MESSAGE_ID_READ_BSTATUS] = 0x41,
+ [MOD_HDCP_MESSAGE_ID_READ_KSV_FIFO] = 0x43,
+ [MOD_HDCP_MESSAGE_ID_READ_BINFO] = 0xFF,
+ [MOD_HDCP_MESSAGE_ID_HDCP2VERSION] = 0x50,
+ [MOD_HDCP_MESSAGE_ID_WRITE_AKE_INIT] = 0x60,
+ [MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_CERT] = 0x80,
+ [MOD_HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM] = 0x60,
+ [MOD_HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM] = 0x60,
+ [MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME] = 0x80,
+ [MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO] = 0x80,
+ [MOD_HDCP_MESSAGE_ID_WRITE_LC_INIT] = 0x60,
+ [MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME] = 0x80,
+ [MOD_HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS] = 0x60,
+ [MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST] = 0x80,
+ [MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = 0x60,
+ [MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = 0x60,
+ [MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = 0x80,
+ [MOD_HDCP_MESSAGE_ID_READ_RXSTATUS] = 0x70,
+ [MOD_HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE] = 0x0
+};
+
+static const uint32_t hdcp_dpcd_addrs[] = {
+ [MOD_HDCP_MESSAGE_ID_READ_BKSV] = 0x68000,
+ [MOD_HDCP_MESSAGE_ID_READ_RI_R0] = 0x68005,
+ [MOD_HDCP_MESSAGE_ID_WRITE_AKSV] = 0x68007,
+ [MOD_HDCP_MESSAGE_ID_WRITE_AINFO] = 0x6803B,
+ [MOD_HDCP_MESSAGE_ID_WRITE_AN] = 0x6800c,
+ [MOD_HDCP_MESSAGE_ID_READ_VH_X] = 0x68014,
+ [MOD_HDCP_MESSAGE_ID_READ_VH_0] = 0x68014,
+ [MOD_HDCP_MESSAGE_ID_READ_VH_1] = 0x68018,
+ [MOD_HDCP_MESSAGE_ID_READ_VH_2] = 0x6801c,
+ [MOD_HDCP_MESSAGE_ID_READ_VH_3] = 0x68020,
+ [MOD_HDCP_MESSAGE_ID_READ_VH_4] = 0x68024,
+ [MOD_HDCP_MESSAGE_ID_READ_BCAPS] = 0x68028,
+ [MOD_HDCP_MESSAGE_ID_READ_BSTATUS] = 0x68029,
+ [MOD_HDCP_MESSAGE_ID_READ_KSV_FIFO] = 0x6802c,
+ [MOD_HDCP_MESSAGE_ID_READ_BINFO] = 0x6802a,
+ [MOD_HDCP_MESSAGE_ID_RX_CAPS] = 0x6921d,
+ [MOD_HDCP_MESSAGE_ID_WRITE_AKE_INIT] = 0x69000,
+ [MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_CERT] = 0x6900b,
+ [MOD_HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM] = 0x69220,
+ [MOD_HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM] = 0x692a0,
+ [MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME] = 0x692c0,
+ [MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO] = 0x692e0,
+ [MOD_HDCP_MESSAGE_ID_WRITE_LC_INIT] = 0x692f0,
+ [MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME] = 0x692f8,
+ [MOD_HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS] = 0x69318,
+ [MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST] = 0x69330,
+ [MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = 0x693e0,
+ [MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = 0x693f0,
+ [MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = 0x69473,
+ [MOD_HDCP_MESSAGE_ID_READ_RXSTATUS] = 0x69493,
+ [MOD_HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE] = 0x69494
+};
+
+static enum mod_hdcp_status read(struct mod_hdcp *hdcp,
+ enum mod_hdcp_ddc_message_id msg_id,
+ uint8_t *buf,
+ uint32_t buf_len)
+{
+ bool success = true;
+ uint32_t cur_size = 0;
+ uint32_t data_offset = 0;
+
+ if (is_dp_hdcp(hdcp)) {
+ while (buf_len > 0) {
+ cur_size = MIN(buf_len, HDCP_MAX_AUX_TRANSACTION_SIZE);
+ success = hdcp->config.ddc.funcs.read_dpcd(hdcp->config.ddc.handle,
+ hdcp_dpcd_addrs[msg_id] + data_offset,
+ buf + data_offset,
+ cur_size);
+
+ if (!success)
+ break;
+
+ buf_len -= cur_size;
+ data_offset += cur_size;
+ }
+ } else {
+ success = hdcp->config.ddc.funcs.read_i2c(
+ hdcp->config.ddc.handle,
+ HDCP_I2C_ADDR,
+ hdcp_i2c_offsets[msg_id],
+ buf,
+ (uint32_t)buf_len);
+ }
+
+ return success ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_DDC_FAILURE;
+}
+
+static enum mod_hdcp_status read_repeatedly(struct mod_hdcp *hdcp,
+ enum mod_hdcp_ddc_message_id msg_id,
+ uint8_t *buf,
+ uint32_t buf_len,
+ uint8_t read_size)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_DDC_FAILURE;
+ uint32_t cur_size = 0;
+ uint32_t data_offset = 0;
+
+ while (buf_len > 0) {
+ cur_size = MIN(buf_len, read_size);
+ status = read(hdcp, msg_id, buf + data_offset, cur_size);
+
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ break;
+
+ buf_len -= cur_size;
+ data_offset += cur_size;
+ }
+
+ return status;
+}
+
+static enum mod_hdcp_status write(struct mod_hdcp *hdcp,
+ enum mod_hdcp_ddc_message_id msg_id,
+ uint8_t *buf,
+ uint32_t buf_len)
+{
+ bool success = true;
+ uint32_t cur_size = 0;
+ uint32_t data_offset = 0;
+
+ if (is_dp_hdcp(hdcp)) {
+ while (buf_len > 0) {
+ cur_size = MIN(buf_len, HDCP_MAX_AUX_TRANSACTION_SIZE);
+ success = hdcp->config.ddc.funcs.write_dpcd(
+ hdcp->config.ddc.handle,
+ hdcp_dpcd_addrs[msg_id] + data_offset,
+ buf + data_offset,
+ cur_size);
+
+ if (!success)
+ break;
+
+ buf_len -= cur_size;
+ data_offset += cur_size;
+ }
+ } else {
+ hdcp->buf[0] = hdcp_i2c_offsets[msg_id];
+ memmove(&hdcp->buf[1], buf, buf_len);
+ success = hdcp->config.ddc.funcs.write_i2c(
+ hdcp->config.ddc.handle,
+ HDCP_I2C_ADDR,
+ hdcp->buf,
+ (uint32_t)(buf_len+1));
+ }
+
+ return success ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_DDC_FAILURE;
+}
+
+enum mod_hdcp_status mod_hdcp_read_bksv(struct mod_hdcp *hdcp)
+{
+ return read(hdcp, MOD_HDCP_MESSAGE_ID_READ_BKSV,
+ hdcp->auth.msg.hdcp1.bksv,
+ sizeof(hdcp->auth.msg.hdcp1.bksv));
+}
+
+enum mod_hdcp_status mod_hdcp_read_bcaps(struct mod_hdcp *hdcp)
+{
+ return read(hdcp, MOD_HDCP_MESSAGE_ID_READ_BCAPS,
+ &hdcp->auth.msg.hdcp1.bcaps,
+ sizeof(hdcp->auth.msg.hdcp1.bcaps));
+}
+
+enum mod_hdcp_status mod_hdcp_read_bstatus(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ if (is_dp_hdcp(hdcp))
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_BSTATUS,
+ (uint8_t *)&hdcp->auth.msg.hdcp1.bstatus,
+ 1);
+ else
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_BSTATUS,
+ (uint8_t *)&hdcp->auth.msg.hdcp1.bstatus,
+ sizeof(hdcp->auth.msg.hdcp1.bstatus));
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_read_r0p(struct mod_hdcp *hdcp)
+{
+ return read(hdcp, MOD_HDCP_MESSAGE_ID_READ_RI_R0,
+ (uint8_t *)&hdcp->auth.msg.hdcp1.r0p,
+ sizeof(hdcp->auth.msg.hdcp1.r0p));
+}
+
+/* special case, reading repeatedly at the same address, don't use read() */
+enum mod_hdcp_status mod_hdcp_read_ksvlist(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ if (is_dp_hdcp(hdcp))
+ status = read_repeatedly(hdcp, MOD_HDCP_MESSAGE_ID_READ_KSV_FIFO,
+ hdcp->auth.msg.hdcp1.ksvlist,
+ hdcp->auth.msg.hdcp1.ksvlist_size,
+ KSV_READ_SIZE);
+ else
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_KSV_FIFO,
+ (uint8_t *)&hdcp->auth.msg.hdcp1.ksvlist,
+ hdcp->auth.msg.hdcp1.ksvlist_size);
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_read_vp(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_VH_0,
+ &hdcp->auth.msg.hdcp1.vp[0], 4);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ goto out;
+
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_VH_1,
+ &hdcp->auth.msg.hdcp1.vp[4], 4);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ goto out;
+
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_VH_2,
+ &hdcp->auth.msg.hdcp1.vp[8], 4);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ goto out;
+
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_VH_3,
+ &hdcp->auth.msg.hdcp1.vp[12], 4);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ goto out;
+
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_VH_4,
+ &hdcp->auth.msg.hdcp1.vp[16], 4);
+out:
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_read_binfo(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ if (is_dp_hdcp(hdcp))
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_BINFO,
+ (uint8_t *)&hdcp->auth.msg.hdcp1.binfo_dp,
+ sizeof(hdcp->auth.msg.hdcp1.binfo_dp));
+ else
+ status = MOD_HDCP_STATUS_INVALID_OPERATION;
+
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_write_aksv(struct mod_hdcp *hdcp)
+{
+ return write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AKSV,
+ hdcp->auth.msg.hdcp1.aksv,
+ sizeof(hdcp->auth.msg.hdcp1.aksv));
+}
+
+enum mod_hdcp_status mod_hdcp_write_ainfo(struct mod_hdcp *hdcp)
+{
+ return write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AINFO,
+ &hdcp->auth.msg.hdcp1.ainfo,
+ sizeof(hdcp->auth.msg.hdcp1.ainfo));
+}
+
+enum mod_hdcp_status mod_hdcp_write_an(struct mod_hdcp *hdcp)
+{
+ return write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AN,
+ hdcp->auth.msg.hdcp1.an,
+ sizeof(hdcp->auth.msg.hdcp1.an));
+}
+
+enum mod_hdcp_status mod_hdcp_read_hdcp2version(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ if (is_dp_hdcp(hdcp))
+ status = MOD_HDCP_STATUS_INVALID_OPERATION;
+ else
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_HDCP2VERSION,
+ &hdcp->auth.msg.hdcp2.hdcp2version_hdmi,
+ sizeof(hdcp->auth.msg.hdcp2.hdcp2version_hdmi));
+
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_read_rxcaps(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ if (!is_dp_hdcp(hdcp))
+ status = MOD_HDCP_STATUS_INVALID_OPERATION;
+ else
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_RX_CAPS,
+ hdcp->auth.msg.hdcp2.rxcaps_dp,
+ sizeof(hdcp->auth.msg.hdcp2.rxcaps_dp));
+
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_read_rxstatus(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ if (is_dp_hdcp(hdcp)) {
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_RXSTATUS,
+ &hdcp->auth.msg.hdcp2.rxstatus_dp,
+ 1);
+ } else {
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_RXSTATUS,
+ (uint8_t *)&hdcp->auth.msg.hdcp2.rxstatus,
+ sizeof(hdcp->auth.msg.hdcp2.rxstatus));
+ }
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_read_ake_cert(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ if (is_dp_hdcp(hdcp)) {
+ hdcp->auth.msg.hdcp2.ake_cert[0] = 3;
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_CERT,
+ hdcp->auth.msg.hdcp2.ake_cert+1,
+ sizeof(hdcp->auth.msg.hdcp2.ake_cert)-1);
+
+ } else {
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_CERT,
+ hdcp->auth.msg.hdcp2.ake_cert,
+ sizeof(hdcp->auth.msg.hdcp2.ake_cert));
+ }
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_read_h_prime(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ if (is_dp_hdcp(hdcp)) {
+ hdcp->auth.msg.hdcp2.ake_h_prime[0] = 7;
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME,
+ hdcp->auth.msg.hdcp2.ake_h_prime+1,
+ sizeof(hdcp->auth.msg.hdcp2.ake_h_prime)-1);
+
+ } else {
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME,
+ hdcp->auth.msg.hdcp2.ake_h_prime,
+ sizeof(hdcp->auth.msg.hdcp2.ake_h_prime));
+ }
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_read_pairing_info(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ if (is_dp_hdcp(hdcp)) {
+ hdcp->auth.msg.hdcp2.ake_pairing_info[0] = 8;
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO,
+ hdcp->auth.msg.hdcp2.ake_pairing_info+1,
+ sizeof(hdcp->auth.msg.hdcp2.ake_pairing_info)-1);
+
+ } else {
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO,
+ hdcp->auth.msg.hdcp2.ake_pairing_info,
+ sizeof(hdcp->auth.msg.hdcp2.ake_pairing_info));
+ }
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_read_l_prime(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ if (is_dp_hdcp(hdcp)) {
+ hdcp->auth.msg.hdcp2.lc_l_prime[0] = 10;
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME,
+ hdcp->auth.msg.hdcp2.lc_l_prime+1,
+ sizeof(hdcp->auth.msg.hdcp2.lc_l_prime)-1);
+
+ } else {
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME,
+ hdcp->auth.msg.hdcp2.lc_l_prime,
+ sizeof(hdcp->auth.msg.hdcp2.lc_l_prime));
+ }
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_read_rx_id_list(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ if (is_dp_hdcp(hdcp)) {
+ hdcp->auth.msg.hdcp2.rx_id_list[0] = 12;
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST,
+ hdcp->auth.msg.hdcp2.rx_id_list+1,
+ sizeof(hdcp->auth.msg.hdcp2.rx_id_list)-1);
+
+ } else {
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST,
+ hdcp->auth.msg.hdcp2.rx_id_list,
+ hdcp->auth.msg.hdcp2.rx_id_list_size);
+ }
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_read_stream_ready(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ if (is_dp_hdcp(hdcp)) {
+ hdcp->auth.msg.hdcp2.repeater_auth_stream_ready[0] = 17;
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY,
+ hdcp->auth.msg.hdcp2.repeater_auth_stream_ready+1,
+ sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready)-1);
+
+ } else {
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY,
+ hdcp->auth.msg.hdcp2.repeater_auth_stream_ready,
+ sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready));
+ }
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_write_ake_init(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ if (is_dp_hdcp(hdcp))
+ status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AKE_INIT,
+ hdcp->auth.msg.hdcp2.ake_init+1,
+ sizeof(hdcp->auth.msg.hdcp2.ake_init)-1);
+ else
+ status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AKE_INIT,
+ hdcp->auth.msg.hdcp2.ake_init,
+ sizeof(hdcp->auth.msg.hdcp2.ake_init));
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_write_no_stored_km(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ if (is_dp_hdcp(hdcp))
+ status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM,
+ hdcp->auth.msg.hdcp2.ake_no_stored_km+1,
+ sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km)-1);
+ else
+ status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM,
+ hdcp->auth.msg.hdcp2.ake_no_stored_km,
+ sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km));
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_write_stored_km(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ if (is_dp_hdcp(hdcp))
+ status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM,
+ hdcp->auth.msg.hdcp2.ake_stored_km+1,
+ sizeof(hdcp->auth.msg.hdcp2.ake_stored_km)-1);
+ else
+ status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM,
+ hdcp->auth.msg.hdcp2.ake_stored_km,
+ sizeof(hdcp->auth.msg.hdcp2.ake_stored_km));
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_write_lc_init(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ if (is_dp_hdcp(hdcp))
+ status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_LC_INIT,
+ hdcp->auth.msg.hdcp2.lc_init+1,
+ sizeof(hdcp->auth.msg.hdcp2.lc_init)-1);
+ else
+ status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_LC_INIT,
+ hdcp->auth.msg.hdcp2.lc_init,
+ sizeof(hdcp->auth.msg.hdcp2.lc_init));
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_write_eks(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ if (is_dp_hdcp(hdcp))
+ status = write(hdcp,
+ MOD_HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS,
+ hdcp->auth.msg.hdcp2.ske_eks+1,
+ sizeof(hdcp->auth.msg.hdcp2.ske_eks)-1);
+ else
+ status = write(hdcp,
+ MOD_HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS,
+ hdcp->auth.msg.hdcp2.ske_eks,
+ sizeof(hdcp->auth.msg.hdcp2.ske_eks));
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_write_repeater_auth_ack(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ if (is_dp_hdcp(hdcp))
+ status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK,
+ hdcp->auth.msg.hdcp2.repeater_auth_ack+1,
+ sizeof(hdcp->auth.msg.hdcp2.repeater_auth_ack)-1);
+ else
+ status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK,
+ hdcp->auth.msg.hdcp2.repeater_auth_ack,
+ sizeof(hdcp->auth.msg.hdcp2.repeater_auth_ack));
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_write_stream_manage(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ if (is_dp_hdcp(hdcp))
+ status = write(hdcp,
+ MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE,
+ hdcp->auth.msg.hdcp2.repeater_auth_stream_manage+1,
+ hdcp->auth.msg.hdcp2.stream_manage_size-1);
+ else
+ status = write(hdcp,
+ MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE,
+ hdcp->auth.msg.hdcp2.repeater_auth_stream_manage,
+ hdcp->auth.msg.hdcp2.stream_manage_size);
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_write_content_type(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ if (is_dp_hdcp(hdcp))
+ status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE,
+ hdcp->auth.msg.hdcp2.content_stream_type_dp+1,
+ sizeof(hdcp->auth.msg.hdcp2.content_stream_type_dp)-1);
+ else
+ status = MOD_HDCP_STATUS_INVALID_OPERATION;
+ return status;
+}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c
new file mode 100644
index 000000000000..724ebcee9a19
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c
@@ -0,0 +1,281 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#include "hdcp.h"
+
+void mod_hdcp_dump_binary_message(uint8_t *msg, uint32_t msg_size,
+ uint8_t *buf, uint32_t buf_size)
+{
+ const uint8_t bytes_per_line = 16,
+ byte_size = 3,
+ newline_size = 1,
+ terminator_size = 1;
+ uint32_t line_count = msg_size / bytes_per_line,
+ trailing_bytes = msg_size % bytes_per_line;
+ uint32_t target_size = (byte_size * bytes_per_line + newline_size) * line_count +
+ byte_size * trailing_bytes + newline_size + terminator_size;
+ uint32_t buf_pos = 0;
+ uint32_t i = 0;
+
+ if (buf_size >= target_size) {
+ for (i = 0; i < msg_size; i++) {
+ if (i % bytes_per_line == 0)
+ buf[buf_pos++] = '\n';
+ sprintf(&buf[buf_pos], "%02X ", msg[i]);
+ buf_pos += byte_size;
+ }
+ buf[buf_pos++] = '\0';
+ }
+}
+
+char *mod_hdcp_status_to_str(int32_t status)
+{
+ switch (status) {
+ case MOD_HDCP_STATUS_SUCCESS:
+ return "MOD_HDCP_STATUS_SUCCESS";
+ case MOD_HDCP_STATUS_FAILURE:
+ return "MOD_HDCP_STATUS_FAILURE";
+ case MOD_HDCP_STATUS_RESET_NEEDED:
+ return "MOD_HDCP_STATUS_RESET_NEEDED";
+ case MOD_HDCP_STATUS_DISPLAY_OUT_OF_BOUND:
+ return "MOD_HDCP_STATUS_DISPLAY_OUT_OF_BOUND";
+ case MOD_HDCP_STATUS_DISPLAY_NOT_FOUND:
+ return "MOD_HDCP_STATUS_DISPLAY_NOT_FOUND";
+ case MOD_HDCP_STATUS_INVALID_STATE:
+ return "MOD_HDCP_STATUS_INVALID_STATE";
+ case MOD_HDCP_STATUS_NOT_IMPLEMENTED:
+ return "MOD_HDCP_STATUS_NOT_IMPLEMENTED";
+ case MOD_HDCP_STATUS_INTERNAL_POLICY_FAILURE:
+ return "MOD_HDCP_STATUS_INTERNAL_POLICY_FAILURE";
+ case MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE:
+ return "MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE";
+ case MOD_HDCP_STATUS_CREATE_PSP_SERVICE_FAILURE:
+ return "MOD_HDCP_STATUS_CREATE_PSP_SERVICE_FAILURE";
+ case MOD_HDCP_STATUS_DESTROY_PSP_SERVICE_FAILURE:
+ return "MOD_HDCP_STATUS_DESTROY_PSP_SERVICE_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_VALIDATE_ENCRYPTION_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP1_VALIDATE_ENCRYPTION_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_NOT_HDCP_REPEATER:
+ return "MOD_HDCP_STATUS_HDCP1_NOT_HDCP_REPEATER";
+ case MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE:
+ return "MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE";
+ case MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING:
+ return "MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING";
+ case MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY:
+ return "MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY";
+ case MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION:
+ return "MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION";
+ case MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED:
+ return "MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED";
+ case MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_INVALID_BKSV:
+ return "MOD_HDCP_STATUS_HDCP1_INVALID_BKSV";
+ case MOD_HDCP_STATUS_DDC_FAILURE:
+ return "MOD_HDCP_STATUS_DDC_FAILURE";
+ case MOD_HDCP_STATUS_INVALID_OPERATION:
+ return "MOD_HDCP_STATUS_INVALID_OPERATION";
+ case MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE:
+ return "MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE";
+ case MOD_HDCP_STATUS_HDCP2_CREATE_SESSION_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP2_CREATE_SESSION_FAILURE";
+ case MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE";
+ case MOD_HDCP_STATUS_HDCP2_PREP_AKE_INIT_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP2_PREP_AKE_INIT_FAILURE";
+ case MOD_HDCP_STATUS_HDCP2_AKE_CERT_PENDING:
+ return "MOD_HDCP_STATUS_HDCP2_AKE_CERT_PENDING";
+ case MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING:
+ return "MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING";
+ case MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING:
+ return "MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING";
+ case MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE";
+ case MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED:
+ return "MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED";
+ case MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE";
+ case MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE";
+ case MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE";
+ case MOD_HDCP_STATUS_HDCP2_L_PRIME_PENDING:
+ return "MOD_HDCP_STATUS_HDCP2_L_PRIME_PENDING";
+ case MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE";
+ case MOD_HDCP_STATUS_HDCP2_PREP_EKS_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP2_PREP_EKS_FAILURE";
+ case MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE";
+ case MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE";
+ case MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED:
+ return "MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED";
+ case MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY:
+ return "MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY";
+ case MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION:
+ return "MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION";
+ case MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING:
+ return "MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING";
+ case MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE";
+ case MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE";
+ case MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST:
+ return "MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST";
+ case MOD_HDCP_STATUS_HDCP2_REAUTH_LINK_INTEGRITY_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP2_REAUTH_LINK_INTEGRITY_FAILURE";
+ case MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE";
+ default:
+ return "MOD_HDCP_STATUS_UNKNOWN";
+ }
+}
+
+char *mod_hdcp_state_id_to_str(int32_t id)
+{
+ switch (id) {
+ case HDCP_UNINITIALIZED:
+ return "HDCP_UNINITIALIZED";
+ case HDCP_INITIALIZED:
+ return "HDCP_INITIALIZED";
+ case HDCP_CP_NOT_DESIRED:
+ return "HDCP_CP_NOT_DESIRED";
+ case H1_A0_WAIT_FOR_ACTIVE_RX:
+ return "H1_A0_WAIT_FOR_ACTIVE_RX";
+ case H1_A1_EXCHANGE_KSVS:
+ return "H1_A1_EXCHANGE_KSVS";
+ case H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER:
+ return "H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER";
+ case H1_A45_AUTHENTICATED:
+ return "H1_A45_AUTHENTICATED";
+ case H1_A8_WAIT_FOR_READY:
+ return "H1_A8_WAIT_FOR_READY";
+ case H1_A9_READ_KSV_LIST:
+ return "H1_A9_READ_KSV_LIST";
+ case D1_A0_DETERMINE_RX_HDCP_CAPABLE:
+ return "D1_A0_DETERMINE_RX_HDCP_CAPABLE";
+ case D1_A1_EXCHANGE_KSVS:
+ return "D1_A1_EXCHANGE_KSVS";
+ case D1_A23_WAIT_FOR_R0_PRIME:
+ return "D1_A23_WAIT_FOR_R0_PRIME";
+ case D1_A2_COMPUTATIONS_A3_VALIDATE_RX_A5_TEST_FOR_REPEATER:
+ return "D1_A2_COMPUTATIONS_A3_VALIDATE_RX_A5_TEST_FOR_REPEATER";
+ case D1_A4_AUTHENTICATED:
+ return "D1_A4_AUTHENTICATED";
+ case D1_A6_WAIT_FOR_READY:
+ return "D1_A6_WAIT_FOR_READY";
+ case D1_A7_READ_KSV_LIST:
+ return "D1_A7_READ_KSV_LIST";
+ case H2_A0_KNOWN_HDCP2_CAPABLE_RX:
+ return "H2_A0_KNOWN_HDCP2_CAPABLE_RX";
+ case H2_A1_SEND_AKE_INIT:
+ return "H2_A1_SEND_AKE_INIT";
+ case H2_A1_VALIDATE_AKE_CERT:
+ return "H2_A1_VALIDATE_AKE_CERT";
+ case H2_A1_SEND_NO_STORED_KM:
+ return "H2_A1_SEND_NO_STORED_KM";
+ case H2_A1_READ_H_PRIME:
+ return "H2_A1_READ_H_PRIME";
+ case H2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME:
+ return "H2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME";
+ case H2_A1_SEND_STORED_KM:
+ return "H2_A1_SEND_STORED_KM";
+ case H2_A1_VALIDATE_H_PRIME:
+ return "H2_A1_VALIDATE_H_PRIME";
+ case H2_A2_LOCALITY_CHECK:
+ return "H2_A2_LOCALITY_CHECK";
+ case H2_A3_EXCHANGE_KS_AND_TEST_FOR_REPEATER:
+ return "H2_A3_EXCHANGE_KS_AND_TEST_FOR_REPEATER";
+ case H2_ENABLE_ENCRYPTION:
+ return "H2_ENABLE_ENCRYPTION";
+ case H2_A5_AUTHENTICATED:
+ return "H2_A5_AUTHENTICATED";
+ case H2_A6_WAIT_FOR_RX_ID_LIST:
+ return "H2_A6_WAIT_FOR_RX_ID_LIST";
+ case H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK:
+ return "H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK";
+ case H2_A9_SEND_STREAM_MANAGEMENT:
+ return "H2_A9_SEND_STREAM_MANAGEMENT";
+ case H2_A9_VALIDATE_STREAM_READY:
+ return "H2_A9_VALIDATE_STREAM_READY";
+ case D2_A0_DETERMINE_RX_HDCP_CAPABLE:
+ return "D2_A0_DETERMINE_RX_HDCP_CAPABLE";
+ case D2_A1_SEND_AKE_INIT:
+ return "D2_A1_SEND_AKE_INIT";
+ case D2_A1_VALIDATE_AKE_CERT:
+ return "D2_A1_VALIDATE_AKE_CERT";
+ case D2_A1_SEND_NO_STORED_KM:
+ return "D2_A1_SEND_NO_STORED_KM";
+ case D2_A1_READ_H_PRIME:
+ return "D2_A1_READ_H_PRIME";
+ case D2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME:
+ return "D2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME";
+ case D2_A1_SEND_STORED_KM:
+ return "D2_A1_SEND_STORED_KM";
+ case D2_A1_VALIDATE_H_PRIME:
+ return "D2_A1_VALIDATE_H_PRIME";
+ case D2_A2_LOCALITY_CHECK:
+ return "D2_A2_LOCALITY_CHECK";
+ case D2_A34_EXCHANGE_KS_AND_TEST_FOR_REPEATER:
+ return "D2_A34_EXCHANGE_KS_AND_TEST_FOR_REPEATER";
+ case D2_SEND_CONTENT_STREAM_TYPE:
+ return "D2_SEND_CONTENT_STREAM_TYPE";
+ case D2_ENABLE_ENCRYPTION:
+ return "D2_ENABLE_ENCRYPTION";
+ case D2_A5_AUTHENTICATED:
+ return "D2_A5_AUTHENTICATED";
+ case D2_A6_WAIT_FOR_RX_ID_LIST:
+ return "D2_A6_WAIT_FOR_RX_ID_LIST";
+ case D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK:
+ return "D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK";
+ case D2_A9_SEND_STREAM_MANAGEMENT:
+ return "D2_A9_SEND_STREAM_MANAGEMENT";
+ case D2_A9_VALIDATE_STREAM_READY:
+ return "D2_A9_VALIDATE_STREAM_READY";
+ default:
+ return "UNKNOWN_STATE_ID";
+ };
+}
+
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
new file mode 100644
index 000000000000..ff91373ebada
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
@@ -0,0 +1,193 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef MOD_HDCP_LOG_H_
+#define MOD_HDCP_LOG_H_
+
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+#define HDCP_LOG_ERR(hdcp, ...) DRM_WARN(__VA_ARGS__)
+#define HDCP_LOG_VER(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__)
+#define HDCP_LOG_FSM(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__)
+#define HDCP_LOG_TOP(hdcp, ...) pr_debug("[HDCP_TOP]:"__VA_ARGS__)
+#define HDCP_LOG_DDC(hdcp, ...) pr_debug("[HDCP_DDC]:"__VA_ARGS__)
+#endif
+
+/* default logs */
+#define HDCP_ERROR_TRACE(hdcp, status) \
+ HDCP_LOG_ERR(hdcp, \
+ "[Link %d] WARNING %s IN STATE %s", \
+ hdcp->config.index, \
+ mod_hdcp_status_to_str(status), \
+ mod_hdcp_state_id_to_str(hdcp->state.id))
+#define HDCP_HDCP1_ENABLED_TRACE(hdcp, displayIndex) \
+ HDCP_LOG_VER(hdcp, \
+ "[Link %d] HDCP 1.4 enabled on display %d", \
+ hdcp->config.index, displayIndex)
+#define HDCP_HDCP2_ENABLED_TRACE(hdcp, displayIndex) \
+ HDCP_LOG_VER(hdcp, \
+ "[Link %d] HDCP 2.2 enabled on display %d", \
+ hdcp->config.index, displayIndex)
+/* state machine logs */
+#define HDCP_REMOVE_DISPLAY_TRACE(hdcp, displayIndex) \
+ HDCP_LOG_FSM(hdcp, \
+ "[Link %d] HDCP_REMOVE_DISPLAY index %d", \
+ hdcp->config.index, displayIndex)
+#define HDCP_INPUT_PASS_TRACE(hdcp, str) \
+ HDCP_LOG_FSM(hdcp, \
+ "[Link %d]\tPASS %s", \
+ hdcp->config.index, str)
+#define HDCP_INPUT_FAIL_TRACE(hdcp, str) \
+ HDCP_LOG_FSM(hdcp, \
+ "[Link %d]\tFAIL %s", \
+ hdcp->config.index, str)
+#define HDCP_NEXT_STATE_TRACE(hdcp, id, output) do { \
+ if (output->watchdog_timer_needed) \
+ HDCP_LOG_FSM(hdcp, \
+ "[Link %d] > %s with %d ms watchdog", \
+ hdcp->config.index, \
+ mod_hdcp_state_id_to_str(id), output->watchdog_timer_delay); \
+ else \
+ HDCP_LOG_FSM(hdcp, \
+ "[Link %d] > %s", hdcp->config.index, \
+ mod_hdcp_state_id_to_str(id)); \
+} while (0)
+#define HDCP_TIMEOUT_TRACE(hdcp) \
+ HDCP_LOG_FSM(hdcp, "[Link %d] --> TIMEOUT", hdcp->config.index)
+#define HDCP_CPIRQ_TRACE(hdcp) \
+ HDCP_LOG_FSM(hdcp, "[Link %d] --> CPIRQ", hdcp->config.index)
+#define HDCP_EVENT_TRACE(hdcp, event) \
+ if (event == MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) \
+ HDCP_TIMEOUT_TRACE(hdcp); \
+ else if (event == MOD_HDCP_EVENT_CPIRQ) \
+ HDCP_CPIRQ_TRACE(hdcp)
+/* TODO: find some way to tell if logging is off to save time */
+#define HDCP_DDC_READ_TRACE(hdcp, msg_name, msg, msg_size) do { \
+ mod_hdcp_dump_binary_message(msg, msg_size, hdcp->buf, \
+ sizeof(hdcp->buf)); \
+ HDCP_LOG_DDC(hdcp, "[Link %d] Read %s%s", hdcp->config.index, \
+ msg_name, hdcp->buf); \
+} while (0)
+#define HDCP_DDC_WRITE_TRACE(hdcp, msg_name, msg, msg_size) do { \
+ mod_hdcp_dump_binary_message(msg, msg_size, hdcp->buf, \
+ sizeof(hdcp->buf)); \
+ HDCP_LOG_DDC(hdcp, "[Link %d] Write %s%s", \
+ hdcp->config.index, msg_name,\
+ hdcp->buf); \
+} while (0)
+#define HDCP_FULL_DDC_TRACE(hdcp) do { \
+ if (is_hdcp1(hdcp)) { \
+ HDCP_DDC_READ_TRACE(hdcp, "BKSV", hdcp->auth.msg.hdcp1.bksv, \
+ sizeof(hdcp->auth.msg.hdcp1.bksv)); \
+ HDCP_DDC_READ_TRACE(hdcp, "BCAPS", &hdcp->auth.msg.hdcp1.bcaps, \
+ sizeof(hdcp->auth.msg.hdcp1.bcaps)); \
+ HDCP_DDC_WRITE_TRACE(hdcp, "AN", hdcp->auth.msg.hdcp1.an, \
+ sizeof(hdcp->auth.msg.hdcp1.an)); \
+ HDCP_DDC_WRITE_TRACE(hdcp, "AKSV", hdcp->auth.msg.hdcp1.aksv, \
+ sizeof(hdcp->auth.msg.hdcp1.aksv)); \
+ HDCP_DDC_WRITE_TRACE(hdcp, "AINFO", &hdcp->auth.msg.hdcp1.ainfo, \
+ sizeof(hdcp->auth.msg.hdcp1.ainfo)); \
+ HDCP_DDC_READ_TRACE(hdcp, "RI' / R0'", \
+ (uint8_t *)&hdcp->auth.msg.hdcp1.r0p, \
+ sizeof(hdcp->auth.msg.hdcp1.r0p)); \
+ HDCP_DDC_READ_TRACE(hdcp, "BINFO", \
+ (uint8_t *)&hdcp->auth.msg.hdcp1.binfo_dp, \
+ sizeof(hdcp->auth.msg.hdcp1.binfo_dp)); \
+ HDCP_DDC_READ_TRACE(hdcp, "KSVLIST", hdcp->auth.msg.hdcp1.ksvlist, \
+ hdcp->auth.msg.hdcp1.ksvlist_size); \
+ HDCP_DDC_READ_TRACE(hdcp, "V'", hdcp->auth.msg.hdcp1.vp, \
+ sizeof(hdcp->auth.msg.hdcp1.vp)); \
+ } else { \
+ HDCP_DDC_READ_TRACE(hdcp, "HDCP2Version", \
+ &hdcp->auth.msg.hdcp2.hdcp2version_hdmi, \
+ sizeof(hdcp->auth.msg.hdcp2.hdcp2version_hdmi)); \
+ HDCP_DDC_READ_TRACE(hdcp, "Rx Caps", hdcp->auth.msg.hdcp2.rxcaps_dp, \
+ sizeof(hdcp->auth.msg.hdcp2.rxcaps_dp)); \
+ HDCP_DDC_WRITE_TRACE(hdcp, "AKE Init", hdcp->auth.msg.hdcp2.ake_init, \
+ sizeof(hdcp->auth.msg.hdcp2.ake_init)); \
+ HDCP_DDC_READ_TRACE(hdcp, "AKE Cert", hdcp->auth.msg.hdcp2.ake_cert, \
+ sizeof(hdcp->auth.msg.hdcp2.ake_cert)); \
+ HDCP_DDC_WRITE_TRACE(hdcp, "Stored KM", \
+ hdcp->auth.msg.hdcp2.ake_stored_km, \
+ sizeof(hdcp->auth.msg.hdcp2.ake_stored_km)); \
+ HDCP_DDC_WRITE_TRACE(hdcp, "No Stored KM", \
+ hdcp->auth.msg.hdcp2.ake_no_stored_km, \
+ sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km)); \
+ HDCP_DDC_READ_TRACE(hdcp, "H'", hdcp->auth.msg.hdcp2.ake_h_prime, \
+ sizeof(hdcp->auth.msg.hdcp2.ake_h_prime)); \
+ HDCP_DDC_READ_TRACE(hdcp, "Pairing Info", \
+ hdcp->auth.msg.hdcp2.ake_pairing_info, \
+ sizeof(hdcp->auth.msg.hdcp2.ake_pairing_info)); \
+ HDCP_DDC_WRITE_TRACE(hdcp, "LC Init", hdcp->auth.msg.hdcp2.lc_init, \
+ sizeof(hdcp->auth.msg.hdcp2.lc_init)); \
+ HDCP_DDC_READ_TRACE(hdcp, "L'", hdcp->auth.msg.hdcp2.lc_l_prime, \
+ sizeof(hdcp->auth.msg.hdcp2.lc_l_prime)); \
+ HDCP_DDC_WRITE_TRACE(hdcp, "Exchange KS", hdcp->auth.msg.hdcp2.ske_eks, \
+ sizeof(hdcp->auth.msg.hdcp2.ske_eks)); \
+ HDCP_DDC_READ_TRACE(hdcp, "Rx Status", \
+ (uint8_t *)&hdcp->auth.msg.hdcp2.rxstatus, \
+ sizeof(hdcp->auth.msg.hdcp2.rxstatus)); \
+ HDCP_DDC_READ_TRACE(hdcp, "Rx Id List", \
+ hdcp->auth.msg.hdcp2.rx_id_list, \
+ hdcp->auth.msg.hdcp2.rx_id_list_size); \
+ HDCP_DDC_WRITE_TRACE(hdcp, "Rx Id List Ack", \
+ hdcp->auth.msg.hdcp2.repeater_auth_ack, \
+ sizeof(hdcp->auth.msg.hdcp2.repeater_auth_ack)); \
+ HDCP_DDC_WRITE_TRACE(hdcp, "Content Stream Management", \
+ hdcp->auth.msg.hdcp2.repeater_auth_stream_manage, \
+ hdcp->auth.msg.hdcp2.stream_manage_size); \
+ HDCP_DDC_READ_TRACE(hdcp, "Stream Ready", \
+ hdcp->auth.msg.hdcp2.repeater_auth_stream_ready, \
+ sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready)); \
+ HDCP_DDC_WRITE_TRACE(hdcp, "Content Stream Type", \
+ hdcp->auth.msg.hdcp2.content_stream_type_dp, \
+ sizeof(hdcp->auth.msg.hdcp2.content_stream_type_dp)); \
+ } \
+} while (0)
+#define HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, i) \
+ HDCP_LOG_TOP(hdcp, "[Link %d]\tadd display %d", \
+ hdcp->config.index, i)
+#define HDCP_TOP_REMOVE_DISPLAY_TRACE(hdcp, i) \
+ HDCP_LOG_TOP(hdcp, "[Link %d]\tremove display %d", \
+ hdcp->config.index, i)
+#define HDCP_TOP_HDCP1_DESTROY_SESSION_TRACE(hdcp) \
+ HDCP_LOG_TOP(hdcp, "[Link %d]\tdestroy hdcp1 session", \
+ hdcp->config.index)
+#define HDCP_TOP_HDCP2_DESTROY_SESSION_TRACE(hdcp) \
+ HDCP_LOG_TOP(hdcp, "[Link %d]\tdestroy hdcp2 session", \
+ hdcp->config.index)
+#define HDCP_TOP_RESET_AUTH_TRACE(hdcp) \
+ HDCP_LOG_TOP(hdcp, "[Link %d]\treset authentication", hdcp->config.index)
+#define HDCP_TOP_RESET_CONN_TRACE(hdcp) \
+ HDCP_LOG_TOP(hdcp, "[Link %d]\treset connection", hdcp->config.index)
+#define HDCP_TOP_INTERFACE_TRACE(hdcp) do { \
+ HDCP_LOG_TOP(hdcp, "\n"); \
+ HDCP_LOG_TOP(hdcp, "[Link %d] %s", hdcp->config.index, __func__); \
+} while (0)
+#define HDCP_TOP_INTERFACE_TRACE_WITH_INDEX(hdcp, i) do { \
+ HDCP_LOG_TOP(hdcp, "\n"); \
+ HDCP_LOG_TOP(hdcp, "[Link %d] %s display %d", hdcp->config.index, __func__, i); \
+} while (0)
+
+#endif // MOD_HDCP_LOG_H_
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
new file mode 100644
index 000000000000..7911dc157d5a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
@@ -0,0 +1,832 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#define MAX_NUM_DISPLAYS 24
+
+
+#include "hdcp.h"
+
+#include "amdgpu.h"
+#include "hdcp_psp.h"
+
+static void hdcp2_message_init(struct mod_hdcp *hdcp,
+ struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *in)
+{
+ in->session_handle = hdcp->auth.id;
+ in->prepare.msg1_id = TA_HDCP_HDCP2_MSG_ID__NULL_MESSAGE;
+ in->prepare.msg2_id = TA_HDCP_HDCP2_MSG_ID__NULL_MESSAGE;
+ in->process.msg1_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__NULL_MESSAGE;
+ in->process.msg1_desc.msg_size = 0;
+ in->process.msg2_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__NULL_MESSAGE;
+ in->process.msg2_desc.msg_size = 0;
+ in->process.msg3_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__NULL_MESSAGE;
+ in->process.msg3_desc.msg_size = 0;
+}
+enum mod_hdcp_status mod_hdcp_remove_display_topology(struct mod_hdcp *hdcp)
+{
+
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_dtm_shared_memory *dtm_cmd;
+ struct mod_hdcp_display *display = NULL;
+ uint8_t i;
+
+ dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf;
+
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
+ if (is_display_added(&(hdcp->connection.displays[i]))) {
+
+ memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
+
+ display = &hdcp->connection.displays[i];
+
+ dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2;
+ dtm_cmd->dtm_in_message.topology_update_v2.display_handle = display->index;
+ dtm_cmd->dtm_in_message.topology_update_v2.is_active = 0;
+ dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE;
+
+ psp_dtm_invoke(psp, dtm_cmd->cmd_id);
+
+ if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
+
+ display->state = MOD_HDCP_DISPLAY_ACTIVE;
+ HDCP_TOP_REMOVE_DISPLAY_TRACE(hdcp, display->index);
+ }
+ }
+
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_add_display_topology(struct mod_hdcp *hdcp)
+{
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_dtm_shared_memory *dtm_cmd;
+ struct mod_hdcp_display *display = NULL;
+ struct mod_hdcp_link *link = &hdcp->connection.link;
+ uint8_t i;
+
+ if (!psp->dtm_context.dtm_initialized) {
+ DRM_ERROR("Failed to add display topology, DTM TA is not initialized.");
+ return MOD_HDCP_STATUS_FAILURE;
+ }
+
+ dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf;
+
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
+ if (hdcp->connection.displays[i].state == MOD_HDCP_DISPLAY_ACTIVE) {
+ display = &hdcp->connection.displays[i];
+
+ memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
+
+ dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2;
+ dtm_cmd->dtm_in_message.topology_update_v2.display_handle = display->index;
+ dtm_cmd->dtm_in_message.topology_update_v2.is_active = 1;
+ dtm_cmd->dtm_in_message.topology_update_v2.controller = display->controller;
+ dtm_cmd->dtm_in_message.topology_update_v2.ddc_line = link->ddc_line;
+ dtm_cmd->dtm_in_message.topology_update_v2.dig_be = link->dig_be;
+ dtm_cmd->dtm_in_message.topology_update_v2.dig_fe = display->dig_fe;
+ dtm_cmd->dtm_in_message.topology_update_v2.dp_mst_vcid = display->vc_id;
+ dtm_cmd->dtm_in_message.topology_update_v2.max_hdcp_supported_version =
+ TA_DTM_HDCP_VERSION_MAX_SUPPORTED__2_2;
+ dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE;
+
+ psp_dtm_invoke(psp, dtm_cmd->cmd_id);
+
+ if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
+
+ display->state = MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED;
+ HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, display->index);
+ }
+ }
+
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp)
+{
+
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct mod_hdcp_display *display = get_first_added_display(hdcp);
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+
+ if (!psp->hdcp_context.hdcp_initialized) {
+ DRM_ERROR("Failed to create hdcp session. HDCP TA is not initialized.");
+ return MOD_HDCP_STATUS_FAILURE;
+ }
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ hdcp_cmd->in_msg.hdcp1_create_session.display_handle = display->index;
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_CREATE_SESSION;
+
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ hdcp->auth.id = hdcp_cmd->out_msg.hdcp1_create_session.session_handle;
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE;
+
+ hdcp->auth.msg.hdcp1.ainfo = hdcp_cmd->out_msg.hdcp1_create_session.ainfo_primary;
+ memcpy(hdcp->auth.msg.hdcp1.aksv, hdcp_cmd->out_msg.hdcp1_create_session.aksv_primary,
+ sizeof(hdcp->auth.msg.hdcp1.aksv));
+ memcpy(hdcp->auth.msg.hdcp1.an, hdcp_cmd->out_msg.hdcp1_create_session.an_primary,
+ sizeof(hdcp->auth.msg.hdcp1.an));
+
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp)
+{
+
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ hdcp_cmd->in_msg.hdcp1_destroy_session.session_handle = hdcp->auth.id;
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_DESTROY_SESSION;
+
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE;
+
+ HDCP_TOP_HDCP1_DESTROY_SESSION_TRACE(hdcp);
+
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp1_validate_rx(struct mod_hdcp *hdcp)
+{
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ hdcp_cmd->in_msg.hdcp1_first_part_authentication.session_handle = hdcp->auth.id;
+
+ memcpy(hdcp_cmd->in_msg.hdcp1_first_part_authentication.bksv_primary, hdcp->auth.msg.hdcp1.bksv,
+ TA_HDCP__HDCP1_KSV_SIZE);
+
+ hdcp_cmd->in_msg.hdcp1_first_part_authentication.r0_prime_primary = hdcp->auth.msg.hdcp1.r0p;
+ hdcp_cmd->in_msg.hdcp1_first_part_authentication.bcaps = hdcp->auth.msg.hdcp1.bcaps;
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_FIRST_PART_AUTHENTICATION;
+
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE;
+
+ if (hdcp_cmd->out_msg.hdcp1_first_part_authentication.authentication_status ==
+ TA_HDCP_AUTHENTICATION_STATUS__HDCP1_FIRST_PART_COMPLETE) {
+ /* needs second part of authentication */
+ hdcp->connection.is_repeater = 1;
+ } else if (hdcp_cmd->out_msg.hdcp1_first_part_authentication.authentication_status ==
+ TA_HDCP_AUTHENTICATION_STATUS__HDCP1_AUTHENTICATED) {
+ hdcp->connection.is_repeater = 0;
+ } else
+ return MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE;
+
+
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp1_enable_encryption(struct mod_hdcp *hdcp)
+{
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+ struct mod_hdcp_display *display = get_first_added_display(hdcp);
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ hdcp_cmd->in_msg.hdcp1_enable_encryption.session_handle = hdcp->auth.id;
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_ENABLE_ENCRYPTION;
+
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION;
+
+ if (!is_dp_mst_hdcp(hdcp)) {
+ display->state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
+ HDCP_HDCP1_ENABLED_TRACE(hdcp, display->index);
+ }
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp1_validate_ksvlist_vp(struct mod_hdcp *hdcp)
+{
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ hdcp_cmd->in_msg.hdcp1_second_part_authentication.session_handle = hdcp->auth.id;
+
+ hdcp_cmd->in_msg.hdcp1_second_part_authentication.ksv_list_size = hdcp->auth.msg.hdcp1.ksvlist_size;
+ memcpy(hdcp_cmd->in_msg.hdcp1_second_part_authentication.ksv_list, hdcp->auth.msg.hdcp1.ksvlist,
+ hdcp->auth.msg.hdcp1.ksvlist_size);
+
+ memcpy(hdcp_cmd->in_msg.hdcp1_second_part_authentication.v_prime, hdcp->auth.msg.hdcp1.vp,
+ sizeof(hdcp->auth.msg.hdcp1.vp));
+
+ hdcp_cmd->in_msg.hdcp1_second_part_authentication.bstatus_binfo =
+ is_dp_hdcp(hdcp) ? hdcp->auth.msg.hdcp1.binfo_dp : hdcp->auth.msg.hdcp1.bstatus;
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_SECOND_PART_AUTHENTICATION;
+
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE;
+
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(struct mod_hdcp *hdcp)
+{
+
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+ int i = 0;
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
+
+ if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED ||
+ hdcp->connection.displays[i].adjust.disable)
+ continue;
+
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ hdcp_cmd->in_msg.hdcp1_enable_dp_stream_encryption.session_handle = hdcp->auth.id;
+ hdcp_cmd->in_msg.hdcp1_enable_dp_stream_encryption.display_handle = hdcp->connection.displays[i].index;
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_ENABLE_DP_STREAM_ENCRYPTION;
+
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE;
+
+ hdcp->connection.displays[i].state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
+ HDCP_HDCP1_ENABLED_TRACE(hdcp, hdcp->connection.displays[i].index);
+ }
+
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp1_link_maintenance(struct mod_hdcp *hdcp)
+{
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ hdcp_cmd->in_msg.hdcp1_get_encryption_status.session_handle = hdcp->auth.id;
+
+ hdcp_cmd->out_msg.hdcp1_get_encryption_status.protection_level = 0;
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_GET_ENCRYPTION_STATUS;
+
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE;
+
+ return (hdcp_cmd->out_msg.hdcp1_get_encryption_status.protection_level == 1)
+ ? MOD_HDCP_STATUS_SUCCESS
+ : MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp1_get_link_encryption_status(struct mod_hdcp *hdcp,
+ enum mod_hdcp_encryption_status *encryption_status)
+{
+ *encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
+
+ if (mod_hdcp_hdcp1_link_maintenance(hdcp) != MOD_HDCP_STATUS_SUCCESS)
+ return MOD_HDCP_STATUS_FAILURE;
+
+ *encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP1_ON;
+
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp)
+{
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+ struct mod_hdcp_display *display = get_first_added_display(hdcp);
+
+ if (!psp->hdcp_context.hdcp_initialized) {
+ DRM_ERROR("Failed to create hdcp session, HDCP TA is not initialized");
+ return MOD_HDCP_STATUS_FAILURE;
+ }
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ if (!display)
+ return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
+
+ hdcp_cmd->in_msg.hdcp2_create_session_v2.display_handle = display->index;
+
+ if (hdcp->connection.link.adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_0)
+ hdcp_cmd->in_msg.hdcp2_create_session_v2.negotiate_content_type =
+ TA_HDCP2_CONTENT_TYPE_NEGOTIATION_TYPE__FORCE_TYPE0;
+ else if (hdcp->connection.link.adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_1)
+ hdcp_cmd->in_msg.hdcp2_create_session_v2.negotiate_content_type =
+ TA_HDCP2_CONTENT_TYPE_NEGOTIATION_TYPE__FORCE_TYPE1;
+ else if (hdcp->connection.link.adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_MAX)
+ hdcp_cmd->in_msg.hdcp2_create_session_v2.negotiate_content_type =
+ TA_HDCP2_CONTENT_TYPE_NEGOTIATION_TYPE__MAX_SUPPORTED;
+
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_CREATE_SESSION_V2;
+
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP2_CREATE_SESSION_FAILURE;
+
+ hdcp->auth.id = hdcp_cmd->out_msg.hdcp2_create_session_v2.session_handle;
+
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp2_destroy_session(struct mod_hdcp *hdcp)
+{
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ hdcp_cmd->in_msg.hdcp2_destroy_session.session_handle = hdcp->auth.id;
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_DESTROY_SESSION;
+
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE;
+
+ HDCP_TOP_HDCP2_DESTROY_SESSION_TRACE(hdcp);
+
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp2_prepare_ake_init(struct mod_hdcp *hdcp)
+{
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+ struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
+ struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
+ msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2;
+
+ hdcp2_message_init(hdcp, msg_in);
+
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2;
+ msg_in->prepare.msg1_id = TA_HDCP_HDCP2_MSG_ID__AKE_INIT;
+
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP2_PREP_AKE_INIT_FAILURE;
+
+ memcpy(&hdcp->auth.msg.hdcp2.ake_init[0], &msg_out->prepare.transmitter_message[0],
+ sizeof(hdcp->auth.msg.hdcp2.ake_init));
+
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp2_validate_ake_cert(struct mod_hdcp *hdcp)
+{
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+ struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
+ struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
+ msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2;
+
+ hdcp2_message_init(hdcp, msg_in);
+
+ msg_in->process.msg1_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__AKE_SEND_CERT;
+ msg_in->process.msg1_desc.msg_size = TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_CERT;
+
+ memcpy(&msg_in->process.receiver_message[0], hdcp->auth.msg.hdcp2.ake_cert,
+ sizeof(hdcp->auth.msg.hdcp2.ake_cert));
+
+ msg_in->prepare.msg1_id = TA_HDCP_HDCP2_MSG_ID__AKE_NO_STORED_KM;
+ msg_in->prepare.msg2_id = TA_HDCP_HDCP2_MSG_ID__AKE_STORED_KM;
+
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2;
+
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE;
+
+ memcpy(hdcp->auth.msg.hdcp2.ake_no_stored_km, &msg_out->prepare.transmitter_message[0],
+ sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km));
+
+ memcpy(hdcp->auth.msg.hdcp2.ake_stored_km,
+ &msg_out->prepare.transmitter_message[sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km)],
+ sizeof(hdcp->auth.msg.hdcp2.ake_stored_km));
+
+ if (msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS) {
+ hdcp->connection.is_km_stored = msg_out->process.is_km_stored ? 1 : 0;
+ hdcp->connection.is_repeater = msg_out->process.is_repeater ? 1 : 0;
+ return MOD_HDCP_STATUS_SUCCESS;
+ }
+
+ return MOD_HDCP_STATUS_FAILURE;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp2_validate_h_prime(struct mod_hdcp *hdcp)
+{
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+ struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
+ struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
+ msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2;
+
+ hdcp2_message_init(hdcp, msg_in);
+
+ msg_in->process.msg1_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__AKE_SEND_H_PRIME;
+ msg_in->process.msg1_desc.msg_size = TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_H_PRIME;
+
+ memcpy(&msg_in->process.receiver_message[0], hdcp->auth.msg.hdcp2.ake_h_prime,
+ sizeof(hdcp->auth.msg.hdcp2.ake_h_prime));
+
+ if (!hdcp->connection.is_km_stored) {
+ msg_in->process.msg2_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__AKE_SEND_PAIRING_INFO;
+ msg_in->process.msg2_desc.msg_size = TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_PAIRING_INFO;
+ memcpy(&msg_in->process.receiver_message[sizeof(hdcp->auth.msg.hdcp2.ake_h_prime)],
+ hdcp->auth.msg.hdcp2.ake_pairing_info, sizeof(hdcp->auth.msg.hdcp2.ake_pairing_info));
+ }
+
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2;
+
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE;
+
+ if (msg_out->process.msg1_status != TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE;
+ else if (!hdcp->connection.is_km_stored &&
+ msg_out->process.msg2_status != TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE;
+
+
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp2_prepare_lc_init(struct mod_hdcp *hdcp)
+{
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+ struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
+ struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
+ msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2;
+
+ hdcp2_message_init(hdcp, msg_in);
+
+ msg_in->prepare.msg1_id = TA_HDCP_HDCP2_MSG_ID__LC_INIT;
+
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2;
+
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE;
+
+ memcpy(hdcp->auth.msg.hdcp2.lc_init, &msg_out->prepare.transmitter_message[0],
+ sizeof(hdcp->auth.msg.hdcp2.lc_init));
+
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp2_validate_l_prime(struct mod_hdcp *hdcp)
+{
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+ struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
+ struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
+ msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2;
+
+ hdcp2_message_init(hdcp, msg_in);
+
+ msg_in->process.msg1_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__LC_SEND_L_PRIME;
+ msg_in->process.msg1_desc.msg_size = TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__LC_SEND_L_PRIME;
+
+ memcpy(&msg_in->process.receiver_message[0], hdcp->auth.msg.hdcp2.lc_l_prime,
+ sizeof(hdcp->auth.msg.hdcp2.lc_l_prime));
+
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2;
+
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE;
+
+ if (msg_out->process.msg1_status != TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE;
+
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp2_prepare_eks(struct mod_hdcp *hdcp)
+{
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+ struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
+ struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
+ msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2;
+
+ hdcp2_message_init(hdcp, msg_in);
+
+ msg_in->prepare.msg1_id = TA_HDCP_HDCP2_MSG_ID__SKE_SEND_EKS;
+
+ if (is_dp_hdcp(hdcp))
+ msg_in->prepare.msg2_id = TA_HDCP_HDCP2_MSG_ID__SIGNAL_CONTENT_STREAM_TYPE_DP;
+
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2;
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP2_PREP_EKS_FAILURE;
+
+ memcpy(hdcp->auth.msg.hdcp2.ske_eks, &msg_out->prepare.transmitter_message[0],
+ sizeof(hdcp->auth.msg.hdcp2.ske_eks));
+ msg_out->prepare.msg1_desc.msg_size = sizeof(hdcp->auth.msg.hdcp2.ske_eks);
+
+ if (is_dp_hdcp(hdcp)) {
+ memcpy(hdcp->auth.msg.hdcp2.content_stream_type_dp,
+ &msg_out->prepare.transmitter_message[sizeof(hdcp->auth.msg.hdcp2.ske_eks)],
+ sizeof(hdcp->auth.msg.hdcp2.content_stream_type_dp));
+ }
+
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp2_enable_encryption(struct mod_hdcp *hdcp)
+{
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+ struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
+ struct mod_hdcp_display *display = get_first_added_display(hdcp);
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
+
+ hdcp2_message_init(hdcp, msg_in);
+
+ if (!display)
+ return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
+
+ hdcp_cmd->in_msg.hdcp1_enable_encryption.session_handle = hdcp->auth.id;
+
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_SET_ENCRYPTION;
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE;
+
+ if (!is_dp_mst_hdcp(hdcp)) {
+ display->state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
+ HDCP_HDCP2_ENABLED_TRACE(hdcp, display->index);
+ }
+
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp2_validate_rx_id_list(struct mod_hdcp *hdcp)
+{
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+ struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
+ struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
+ msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2;
+
+ hdcp2_message_init(hdcp, msg_in);
+
+ msg_in->process.msg1_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__REPEATERAUTH_SEND_RECEIVERID_LIST;
+ msg_in->process.msg1_desc.msg_size = sizeof(hdcp->auth.msg.hdcp2.rx_id_list);
+ memcpy(&msg_in->process.receiver_message[0], hdcp->auth.msg.hdcp2.rx_id_list,
+ sizeof(hdcp->auth.msg.hdcp2.rx_id_list));
+
+ msg_in->prepare.msg1_id = TA_HDCP_HDCP2_MSG_ID__REPEATERAUTH_SEND_ACK;
+
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2;
+
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE;
+
+ memcpy(hdcp->auth.msg.hdcp2.repeater_auth_ack, &msg_out->prepare.transmitter_message[0],
+ sizeof(hdcp->auth.msg.hdcp2.repeater_auth_ack));
+
+ if (msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS) {
+ hdcp->connection.is_km_stored = msg_out->process.is_km_stored ? 1 : 0;
+ hdcp->connection.is_repeater = msg_out->process.is_repeater ? 1 : 0;
+ return MOD_HDCP_STATUS_SUCCESS;
+ }
+
+
+ return MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp *hdcp)
+{
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+ struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
+ uint8_t i;
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
+
+ hdcp2_message_init(hdcp, msg_in);
+
+
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
+ if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED ||
+ hdcp->connection.displays[i].adjust.disable)
+ continue;
+ hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.display_handle = hdcp->connection.displays[i].index;
+ hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.session_handle = hdcp->auth.id;
+
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_ENABLE_DP_STREAM_ENCRYPTION;
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ break;
+
+ hdcp->connection.displays[i].state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
+ HDCP_HDCP2_ENABLED_TRACE(hdcp, hdcp->connection.displays[i].index);
+ }
+
+ return (hdcp_cmd->hdcp_status == TA_HDCP_STATUS__SUCCESS) ? MOD_HDCP_STATUS_SUCCESS
+ : MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp2_prepare_stream_management(struct mod_hdcp *hdcp)
+{
+
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+ struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
+ struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
+ msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2;
+
+ hdcp2_message_init(hdcp, msg_in);
+
+ msg_in->prepare.msg1_id = TA_HDCP_HDCP2_MSG_ID__REPEATERAUTH_STREAM_MANAGE;
+
+
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2;
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE;
+
+ hdcp->auth.msg.hdcp2.stream_manage_size = msg_out->prepare.msg1_desc.msg_size;
+
+ memcpy(hdcp->auth.msg.hdcp2.repeater_auth_stream_manage, &msg_out->prepare.transmitter_message[0],
+ sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_manage));
+
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp2_validate_stream_ready(struct mod_hdcp *hdcp)
+{
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+ struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
+ struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
+ msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2;
+
+ hdcp2_message_init(hdcp, msg_in);
+
+ msg_in->process.msg1_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__REPEATERAUTH_STREAM_READY;
+
+ msg_in->process.msg1_desc.msg_size = sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready);
+
+ memcpy(&msg_in->process.receiver_message[0], hdcp->auth.msg.hdcp2.repeater_auth_stream_ready,
+ sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready));
+
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2;
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ return (hdcp_cmd->hdcp_status == TA_HDCP_STATUS__SUCCESS) &&
+ (msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS)
+ ? MOD_HDCP_STATUS_SUCCESS
+ : MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp2_get_link_encryption_status(struct mod_hdcp *hdcp,
+ enum mod_hdcp_encryption_status *encryption_status)
+{
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ hdcp_cmd->in_msg.hdcp2_get_encryption_status.session_handle = hdcp->auth.id;
+ hdcp_cmd->out_msg.hdcp2_get_encryption_status.protection_level = 0;
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_GET_ENCRYPTION_STATUS;
+ *encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
+
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_FAILURE;
+
+ if (hdcp_cmd->out_msg.hdcp2_get_encryption_status.protection_level == 1) {
+ if (hdcp_cmd->out_msg.hdcp2_get_encryption_status.hdcp2_type == TA_HDCP2_CONTENT_TYPE__TYPE1)
+ *encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON;
+ else
+ *encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON;
+ }
+
+ return MOD_HDCP_STATUS_SUCCESS;
+}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h
new file mode 100644
index 000000000000..82a5e997d573
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h
@@ -0,0 +1,466 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef MODULES_HDCP_HDCP_PSP_H_
+#define MODULES_HDCP_HDCP_PSP_H_
+
+/*
+ * NOTE: These parameters are a one-to-one copy of the
+ * parameters required by PSP
+ */
+enum bgd_security_hdcp_encryption_level {
+ HDCP_ENCRYPTION_LEVEL__INVALID = 0,
+ HDCP_ENCRYPTION_LEVEL__OFF,
+ HDCP_ENCRYPTION_LEVEL__ON
+};
+
+enum bgd_security_hdcp2_content_type {
+ HDCP2_CONTENT_TYPE__INVALID = 0,
+ HDCP2_CONTENT_TYPE__TYPE0,
+ HDCP2_CONTENT_TYPE__TYPE1
+};
+enum ta_dtm_command {
+ TA_DTM_COMMAND__UNUSED_1 = 1,
+ TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2,
+ TA_DTM_COMMAND__TOPOLOGY_ASSR_ENABLE
+};
+
+/* DTM related enumerations */
+/**********************************************************/
+
+enum ta_dtm_status {
+ TA_DTM_STATUS__SUCCESS = 0x00,
+ TA_DTM_STATUS__GENERIC_FAILURE = 0x01,
+ TA_DTM_STATUS__INVALID_PARAMETER = 0x02,
+ TA_DTM_STATUS__NULL_POINTER = 0x3
+};
+
+/* input/output structures for DTM commands */
+/**********************************************************/
+/**
+ * Input structures
+ */
+enum ta_dtm_hdcp_version_max_supported {
+ TA_DTM_HDCP_VERSION_MAX_SUPPORTED__NONE = 0,
+ TA_DTM_HDCP_VERSION_MAX_SUPPORTED__1_x = 10,
+ TA_DTM_HDCP_VERSION_MAX_SUPPORTED__2_0 = 20,
+ TA_DTM_HDCP_VERSION_MAX_SUPPORTED__2_1 = 21,
+ TA_DTM_HDCP_VERSION_MAX_SUPPORTED__2_2 = 22,
+ TA_DTM_HDCP_VERSION_MAX_SUPPORTED__2_3 = 23
+};
+
+struct ta_dtm_topology_update_input_v2 {
+ /* display handle is unique across the driver and is used to identify a display */
+ /* for all security interfaces which reference displays such as HDCP */
+ uint32_t display_handle;
+ uint32_t is_active;
+ uint32_t is_miracast;
+ uint32_t controller;
+ uint32_t ddc_line;
+ uint32_t dig_be;
+ uint32_t dig_fe;
+ uint32_t dp_mst_vcid;
+ uint32_t is_assr;
+ uint32_t max_hdcp_supported_version;
+};
+
+struct ta_dtm_topology_assr_enable {
+ uint32_t display_topology_dig_be_index;
+};
+
+/**
+ * Output structures
+ */
+
+/* No output structures yet */
+
+union ta_dtm_cmd_input {
+ struct ta_dtm_topology_update_input_v2 topology_update_v2;
+ struct ta_dtm_topology_assr_enable topology_assr_enable;
+};
+
+union ta_dtm_cmd_output {
+ uint32_t reserved;
+};
+
+struct ta_dtm_shared_memory {
+ uint32_t cmd_id;
+ uint32_t resp_id;
+ enum ta_dtm_status dtm_status;
+ uint32_t reserved;
+ union ta_dtm_cmd_input dtm_in_message;
+ union ta_dtm_cmd_output dtm_out_message;
+};
+
+int psp_cmd_submit_buf(struct psp_context *psp, struct amdgpu_firmware_info *ucode, struct psp_gfx_cmd_resp *cmd,
+ uint64_t fence_mc_addr);
+
+enum ta_hdcp_command {
+ TA_HDCP_COMMAND__INITIALIZE,
+ TA_HDCP_COMMAND__HDCP1_CREATE_SESSION,
+ TA_HDCP_COMMAND__HDCP1_DESTROY_SESSION,
+ TA_HDCP_COMMAND__HDCP1_FIRST_PART_AUTHENTICATION,
+ TA_HDCP_COMMAND__HDCP1_SECOND_PART_AUTHENTICATION,
+ TA_HDCP_COMMAND__HDCP1_ENABLE_ENCRYPTION,
+ TA_HDCP_COMMAND__HDCP1_ENABLE_DP_STREAM_ENCRYPTION,
+ TA_HDCP_COMMAND__HDCP1_GET_ENCRYPTION_STATUS,
+ TA_HDCP_COMMAND__UNUSED_1,
+ TA_HDCP_COMMAND__HDCP2_DESTROY_SESSION,
+ TA_HDCP_COMMAND__UNUSED_2,
+ TA_HDCP_COMMAND__HDCP2_SET_ENCRYPTION,
+ TA_HDCP_COMMAND__HDCP2_GET_ENCRYPTION_STATUS,
+ TA_HDCP_COMMAND__UNUSED_3,
+ TA_HDCP_COMMAND__HDCP2_CREATE_SESSION_V2,
+ TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2,
+ TA_HDCP_COMMAND__HDCP2_ENABLE_DP_STREAM_ENCRYPTION
+};
+
+enum ta_hdcp2_msg_id {
+ TA_HDCP_HDCP2_MSG_ID__NULL_MESSAGE = 1,
+ TA_HDCP_HDCP2_MSG_ID__AKE_INIT = 2,
+ TA_HDCP_HDCP2_MSG_ID__AKE_SEND_CERT = 3,
+ TA_HDCP_HDCP2_MSG_ID__AKE_NO_STORED_KM = 4,
+ TA_HDCP_HDCP2_MSG_ID__AKE_STORED_KM = 5,
+ TA_HDCP_HDCP2_MSG_ID__AKE_SEND_RRX = 6,
+ TA_HDCP_HDCP2_MSG_ID__AKE_SEND_H_PRIME = 7,
+ TA_HDCP_HDCP2_MSG_ID__AKE_SEND_PAIRING_INFO = 8,
+ TA_HDCP_HDCP2_MSG_ID__LC_INIT = 9,
+ TA_HDCP_HDCP2_MSG_ID__LC_SEND_L_PRIME = 10,
+ TA_HDCP_HDCP2_MSG_ID__SKE_SEND_EKS = 11,
+ TA_HDCP_HDCP2_MSG_ID__REPEATERAUTH_SEND_RECEIVERID_LIST = 12,
+ TA_HDCP_HDCP2_MSG_ID__RTT_READY = 13,
+ TA_HDCP_HDCP2_MSG_ID__RTT_CHALLENGE = 14,
+ TA_HDCP_HDCP2_MSG_ID__REPEATERAUTH_SEND_ACK = 15,
+ TA_HDCP_HDCP2_MSG_ID__REPEATERAUTH_STREAM_MANAGE = 16,
+ TA_HDCP_HDCP2_MSG_ID__REPEATERAUTH_STREAM_READY = 17,
+ TA_HDCP_HDCP2_MSG_ID__RECEIVER_AUTH_STATUS = 18,
+ TA_HDCP_HDCP2_MSG_ID__AKE_TRANSMITTER_INFO = 19,
+ TA_HDCP_HDCP2_MSG_ID__AKE_RECEIVER_INFO = 20,
+ TA_HDCP_HDCP2_MSG_ID__SIGNAL_CONTENT_STREAM_TYPE_DP = 129
+};
+
+enum ta_hdcp2_hdcp2_msg_id_max_size {
+ TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__NULL_MESSAGE = 0,
+ TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_INIT = 12,
+ TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_CERT = 534,
+ TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_NO_STORED_KM = 129,
+ TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_STORED_KM = 33,
+ TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_RRX = 9,
+ TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_H_PRIME = 33,
+ TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_PAIRING_INFO = 17,
+ TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__LC_INIT = 9,
+ TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__LC_SEND_L_PRIME = 33,
+ TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__SKE_SEND_EKS = 25,
+ TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__REPEATERAUTH_SEND_RECEIVERID_LIST = 181,
+ TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__RTT_READY = 1,
+ TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__RTT_CHALLENGE = 17,
+ TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__REPEATERAUTH_SEND_RACK = 17,
+ TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__REPEATERAUTH_STREAM_MANAGE = 13,
+ TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__REPEATERAUTH_STREAM_READY = 33,
+ TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__RECEIVER_AUTH_STATUS = 4,
+ TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_TRANSMITTER_INFO = 6,
+ TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_RECEIVER_INFO = 6,
+ TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__SIGNAL_CONTENT_STREAM_TYPE_DP = 1
+};
+
+/* HDCP related enumerations */
+/**********************************************************/
+#define TA_HDCP__INVALID_SESSION 0xFFFF
+#define TA_HDCP__HDCP1_AN_SIZE 8
+#define TA_HDCP__HDCP1_KSV_SIZE 5
+#define TA_HDCP__HDCP1_KSV_LIST_MAX_ENTRIES 127
+#define TA_HDCP__HDCP1_V_PRIME_SIZE 20
+#define TA_HDCP__HDCP2_TX_BUF_MAX_SIZE \
+ TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_NO_STORED_KM + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_STORED_KM + 6
+
+// 64 bits boundaries
+#define TA_HDCP__HDCP2_RX_BUF_MAX_SIZE \
+ TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_CERT + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_RECEIVER_INFO + 4
+
+enum ta_hdcp_status {
+ TA_HDCP_STATUS__SUCCESS = 0x00,
+ TA_HDCP_STATUS__GENERIC_FAILURE = 0x01,
+ TA_HDCP_STATUS__NULL_POINTER = 0x02,
+ TA_HDCP_STATUS__FAILED_ALLOCATING_SESSION = 0x03,
+ TA_HDCP_STATUS__FAILED_SETUP_TX = 0x04,
+ TA_HDCP_STATUS__INVALID_PARAMETER = 0x05,
+ TA_HDCP_STATUS__VHX_ERROR = 0x06,
+ TA_HDCP_STATUS__SESSION_NOT_CLOSED_PROPERLY = 0x07,
+ TA_HDCP_STATUS__SRM_FAILURE = 0x08,
+ TA_HDCP_STATUS__MST_AUTHENTICATED_ALREADY_STARTED = 0x09,
+ TA_HDCP_STATUS__AKE_SEND_CERT_FAILURE = 0x0A,
+ TA_HDCP_STATUS__AKE_NO_STORED_KM_FAILURE = 0x0B,
+ TA_HDCP_STATUS__AKE_SEND_HPRIME_FAILURE = 0x0C,
+ TA_HDCP_STATUS__LC_SEND_LPRIME_FAILURE = 0x0D,
+ TA_HDCP_STATUS__SKE_SEND_EKS_FAILURE = 0x0E,
+ TA_HDCP_STATUS__REPAUTH_SEND_RXIDLIST_FAILURE = 0x0F,
+ TA_HDCP_STATUS__REPAUTH_STREAM_READY_FAILURE = 0x10,
+ TA_HDCP_STATUS__ASD_GENERIC_FAILURE = 0x11,
+ TA_HDCP_STATUS__UNWRAP_SECRET_FAILURE = 0x12,
+ TA_HDCP_STATUS__ENABLE_ENCR_FAILURE = 0x13,
+ TA_HDCP_STATUS__DISABLE_ENCR_FAILURE = 0x14,
+ TA_HDCP_STATUS__NOT_ENOUGH_MEMORY_FAILURE = 0x15,
+ TA_HDCP_STATUS__UNKNOWN_MESSAGE = 0x16,
+ TA_HDCP_STATUS__TOO_MANY_STREAM = 0x17
+};
+
+enum ta_hdcp_authentication_status {
+ TA_HDCP_AUTHENTICATION_STATUS__NOT_STARTED = 0x00,
+ TA_HDCP_AUTHENTICATION_STATUS__HDCP1_FIRST_PART_FAILED = 0x01,
+ TA_HDCP_AUTHENTICATION_STATUS__HDCP1_FIRST_PART_COMPLETE = 0x02,
+ TA_HDCP_AUTHENTICATION_STATUS__HDCP1_SECOND_PART_FAILED = 0x03,
+ TA_HDCP_AUTHENTICATION_STATUS__HDCP1_AUTHENTICATED = 0x04,
+ TA_HDCP_AUTHENTICATION_STATUS__HDCP22_AUTHENTICATION_PENDING = 0x06,
+ TA_HDCP_AUTHENTICATION_STATUS__HDCP22_AUTHENTICATION_FAILED = 0x07,
+ TA_HDCP_AUTHENTICATION_STATUS__HDCP22_AUTHENTICATED = 0x08,
+ TA_HDCP_AUTHENTICATION_STATUS__HDCP1_KSV_VALIDATION_FAILED = 0x09
+};
+
+enum ta_hdcp2_msg_authentication_status {
+ TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS = 0,
+ TA_HDCP2_MSG_AUTHENTICATION_STATUS__KM_NOT_AVAILABLE,
+ TA_HDCP2_MSG_AUTHENTICATION_STATUS__UNUSED,
+ TA_HDCP2_MSG_AUTHENTICATION_STATUS__INVALID = 100, // everything above does not fail the request
+ TA_HDCP2_MSG_AUTHENTICATION_STATUS__NOT_ENOUGH_MEMORY,
+ TA_HDCP2_MSG_AUTHENTICATION_STATUS__NOT_EXPECTED_MSG,
+ TA_HDCP2_MSG_AUTHENTICATION_STATUS__SIGNATURE_CERTIFICAT_ERROR,
+ TA_HDCP2_MSG_AUTHENTICATION_STATUS__INCORRECT_HDCP_VERSION,
+ TA_HDCP2_MSG_AUTHENTICATION_STATUS__UNKNOWN_MESSAGE,
+ TA_HDCP2_MSG_AUTHENTICATION_STATUS__INVALID_HMAC,
+ TA_HDCP2_MSG_AUTHENTICATION_STATUS__INVALID_TOPOLOGY,
+ TA_HDCP2_MSG_AUTHENTICATION_STATUS__INVALID_SEQ_NUM,
+ TA_HDCP2_MSG_AUTHENTICATION_STATUS__INVALID_SIZE,
+ TA_HDCP2_MSG_AUTHENTICATION_STATUS__INVALID_LENGTH,
+ TA_HDCP2_MSG_AUTHENTICATION_STATUS__REAUTH_REQUEST
+};
+
+enum ta_hdcp_content_type {
+ TA_HDCP2_CONTENT_TYPE__TYPE0 = 1,
+ TA_HDCP2_CONTENT_TYPE__TYPE1,
+};
+
+enum ta_hdcp_content_type_negotiation_type {
+ TA_HDCP2_CONTENT_TYPE_NEGOTIATION_TYPE__FORCE_TYPE0 = 1,
+ TA_HDCP2_CONTENT_TYPE_NEGOTIATION_TYPE__FORCE_TYPE1,
+ TA_HDCP2_CONTENT_TYPE_NEGOTIATION_TYPE__MAX_SUPPORTED
+};
+
+enum ta_hdcp2_version {
+ TA_HDCP2_VERSION_UNKNOWN = 0,
+ TA_HDCP2_VERSION_2_0 = 20,
+ TA_HDCP2_VERSION_2_1 = 21,
+ TA_HDCP2_VERSION_2_2 = 22
+};
+
+/* input/output structures for HDCP commands */
+/**********************************************************/
+struct ta_hdcp_cmd_hdcp1_create_session_input {
+ uint8_t display_handle;
+};
+
+struct ta_hdcp_cmd_hdcp1_create_session_output {
+ uint32_t session_handle;
+ uint8_t an_primary[TA_HDCP__HDCP1_AN_SIZE];
+ uint8_t aksv_primary[TA_HDCP__HDCP1_KSV_SIZE];
+ uint8_t ainfo_primary;
+ uint8_t an_secondary[TA_HDCP__HDCP1_AN_SIZE];
+ uint8_t aksv_secondary[TA_HDCP__HDCP1_KSV_SIZE];
+ uint8_t ainfo_secondary;
+};
+
+struct ta_hdcp_cmd_hdcp1_destroy_session_input {
+ uint32_t session_handle;
+};
+
+struct ta_hdcp_cmd_hdcp1_first_part_authentication_input {
+ uint32_t session_handle;
+ uint8_t bksv_primary[TA_HDCP__HDCP1_KSV_SIZE];
+ uint8_t bksv_secondary[TA_HDCP__HDCP1_KSV_SIZE];
+ uint8_t bcaps;
+ uint16_t r0_prime_primary;
+ uint16_t r0_prime_secondary;
+};
+
+struct ta_hdcp_cmd_hdcp1_first_part_authentication_output {
+ enum ta_hdcp_authentication_status authentication_status;
+};
+
+struct ta_hdcp_cmd_hdcp1_second_part_authentication_input {
+ uint32_t session_handle;
+ uint16_t bstatus_binfo;
+ uint8_t ksv_list[TA_HDCP__HDCP1_KSV_LIST_MAX_ENTRIES][TA_HDCP__HDCP1_KSV_SIZE];
+ uint32_t ksv_list_size;
+ uint8_t pj_prime;
+ uint8_t v_prime[TA_HDCP__HDCP1_V_PRIME_SIZE];
+};
+
+struct ta_hdcp_cmd_hdcp1_second_part_authentication_output {
+ enum ta_hdcp_authentication_status authentication_status;
+};
+
+struct ta_hdcp_cmd_hdcp1_enable_encryption_input {
+ uint32_t session_handle;
+};
+
+struct ta_hdcp_cmd_hdcp1_enable_dp_stream_encryption_input {
+ uint32_t session_handle;
+ uint32_t display_handle;
+};
+
+struct ta_hdcp_cmd_hdcp1_get_encryption_status_input {
+ uint32_t session_handle;
+};
+
+struct ta_hdcp_cmd_hdcp1_get_encryption_status_output {
+ uint32_t protection_level;
+};
+
+struct ta_hdcp_cmd_hdcp2_create_session_input_v2 {
+ uint32_t display_handle;
+ enum ta_hdcp_content_type_negotiation_type negotiate_content_type;
+};
+
+struct ta_hdcp_cmd_hdcp2_create_session_output_v2 {
+ uint32_t session_handle;
+};
+
+struct ta_hdcp_cmd_hdcp2_destroy_session_input {
+ uint32_t session_handle;
+};
+
+struct ta_hdcp_cmd_hdcp2_authentication_message_v2 {
+ enum ta_hdcp2_msg_id msg_id;
+ uint32_t msg_size;
+};
+
+struct ta_hdcp_cmd_hdcp2_process_authentication_message_input_v2 {
+ struct ta_hdcp_cmd_hdcp2_authentication_message_v2 msg1_desc;
+ struct ta_hdcp_cmd_hdcp2_authentication_message_v2 msg2_desc;
+ struct ta_hdcp_cmd_hdcp2_authentication_message_v2 msg3_desc;
+ uint8_t receiver_message[TA_HDCP__HDCP2_RX_BUF_MAX_SIZE];
+};
+
+struct ta_hdcp_cmd_hdcp2_process_authentication_message_output_v2 {
+ uint32_t hdcp_version;
+ uint32_t is_km_stored;
+ uint32_t is_locality_precompute_support;
+ uint32_t is_repeater;
+ enum ta_hdcp2_msg_authentication_status msg1_status;
+ enum ta_hdcp2_msg_authentication_status msg2_status;
+ enum ta_hdcp2_msg_authentication_status msg3_status;
+};
+
+struct ta_hdcp_cmd_hdcp2_prepare_authentication_message_input_v2 {
+ enum ta_hdcp2_msg_id msg1_id;
+ enum ta_hdcp2_msg_id msg2_id;
+};
+
+struct ta_hdcp_cmd_hdcp2_prepare_authentication_message_output_v2 {
+ enum ta_hdcp2_msg_authentication_status msg1_status;
+ enum ta_hdcp2_msg_authentication_status msg2_status;
+ struct ta_hdcp_cmd_hdcp2_authentication_message_v2 msg1_desc;
+ struct ta_hdcp_cmd_hdcp2_authentication_message_v2 msg2_desc;
+ uint8_t transmitter_message[TA_HDCP__HDCP2_TX_BUF_MAX_SIZE];
+};
+
+struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 {
+ uint32_t session_handle;
+ struct ta_hdcp_cmd_hdcp2_process_authentication_message_input_v2 process;
+ struct ta_hdcp_cmd_hdcp2_prepare_authentication_message_input_v2 prepare;
+};
+
+struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 {
+ uint32_t authentication_status;
+ struct ta_hdcp_cmd_hdcp2_process_authentication_message_output_v2 process;
+ struct ta_hdcp_cmd_hdcp2_prepare_authentication_message_output_v2 prepare;
+};
+
+struct ta_hdcp_cmd_hdcp2_set_encryption_input {
+ uint32_t session_handle;
+};
+
+struct ta_hdcp_cmd_hdcp2_get_encryption_status_input {
+ uint32_t session_handle;
+};
+
+struct ta_hdcp_cmd_hdcp2_get_encryption_status_output {
+ enum ta_hdcp_content_type hdcp2_type;
+ uint32_t protection_level;
+};
+
+struct ta_hdcp_cmd_hdcp2_enable_dp_stream_encryption_input {
+ uint32_t session_handle;
+ uint32_t display_handle;
+};
+
+/**********************************************************/
+/* Common input structure for HDCP callbacks */
+union ta_hdcp_cmd_input {
+ struct ta_hdcp_cmd_hdcp1_create_session_input hdcp1_create_session;
+ struct ta_hdcp_cmd_hdcp1_destroy_session_input hdcp1_destroy_session;
+ struct ta_hdcp_cmd_hdcp1_first_part_authentication_input hdcp1_first_part_authentication;
+ struct ta_hdcp_cmd_hdcp1_second_part_authentication_input hdcp1_second_part_authentication;
+ struct ta_hdcp_cmd_hdcp1_enable_encryption_input hdcp1_enable_encryption;
+ struct ta_hdcp_cmd_hdcp1_enable_dp_stream_encryption_input hdcp1_enable_dp_stream_encryption;
+ struct ta_hdcp_cmd_hdcp1_get_encryption_status_input hdcp1_get_encryption_status;
+ struct ta_hdcp_cmd_hdcp2_destroy_session_input hdcp2_destroy_session;
+ struct ta_hdcp_cmd_hdcp2_set_encryption_input hdcp2_set_encryption;
+ struct ta_hdcp_cmd_hdcp2_get_encryption_status_input hdcp2_get_encryption_status;
+ struct ta_hdcp_cmd_hdcp2_create_session_input_v2 hdcp2_create_session_v2;
+ struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2
+ hdcp2_prepare_process_authentication_message_v2;
+ struct ta_hdcp_cmd_hdcp2_enable_dp_stream_encryption_input hdcp2_enable_dp_stream_encryption;
+};
+
+/* Common output structure for HDCP callbacks */
+union ta_hdcp_cmd_output {
+ struct ta_hdcp_cmd_hdcp1_create_session_output hdcp1_create_session;
+ struct ta_hdcp_cmd_hdcp1_first_part_authentication_output hdcp1_first_part_authentication;
+ struct ta_hdcp_cmd_hdcp1_second_part_authentication_output hdcp1_second_part_authentication;
+ struct ta_hdcp_cmd_hdcp1_get_encryption_status_output hdcp1_get_encryption_status;
+ struct ta_hdcp_cmd_hdcp2_get_encryption_status_output hdcp2_get_encryption_status;
+ struct ta_hdcp_cmd_hdcp2_create_session_output_v2 hdcp2_create_session_v2;
+ struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2
+ hdcp2_prepare_process_authentication_message_v2;
+};
+/**********************************************************/
+
+struct ta_hdcp_shared_memory {
+ uint32_t cmd_id;
+ enum ta_hdcp_status hdcp_status;
+ uint32_t reserved;
+ union ta_hdcp_cmd_input in_msg;
+ union ta_hdcp_cmd_output out_msg;
+};
+
+enum psp_status {
+ PSP_STATUS__SUCCESS = 0,
+ PSP_STATUS__ERROR_INVALID_PARAMS,
+ PSP_STATUS__ERROR_GENERIC,
+ PSP_STATUS__ERROR_OUT_OF_MEMORY,
+ PSP_STATUS__ERROR_UNSUPPORTED_FEATURE
+};
+
+#endif /* MODULES_HDCP_HDCP_PSP_H_ */
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
index dcef85994c45..dbe7835aabcf 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
@@ -92,6 +92,7 @@ struct mod_vrr_params_btr {
uint32_t inserted_duration_in_us;
uint32_t frames_to_insert;
uint32_t frame_counter;
+ uint32_t margin_in_us;
};
struct mod_vrr_params_fixed_refresh {
@@ -173,4 +174,6 @@ bool mod_freesync_is_valid_range(struct mod_freesync *mod_freesync,
uint32_t min_refresh_request_in_uhz,
uint32_t max_refresh_request_in_uhz);
+
+
#endif
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
new file mode 100644
index 000000000000..f2a0e1a064da
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
@@ -0,0 +1,298 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef MOD_HDCP_H_
+#define MOD_HDCP_H_
+
+#include "os_types.h"
+#include "signal_types.h"
+
+/* Forward Declarations */
+struct mod_hdcp;
+
+#define MAX_NUM_OF_DISPLAYS 6
+#define MAX_NUM_OF_ATTEMPTS 4
+#define MAX_NUM_OF_ERROR_TRACE 10
+
+/* detailed return status */
+enum mod_hdcp_status {
+ MOD_HDCP_STATUS_SUCCESS = 0,
+ MOD_HDCP_STATUS_FAILURE,
+ MOD_HDCP_STATUS_RESET_NEEDED,
+ MOD_HDCP_STATUS_DISPLAY_OUT_OF_BOUND,
+ MOD_HDCP_STATUS_DISPLAY_NOT_FOUND,
+ MOD_HDCP_STATUS_INVALID_STATE,
+ MOD_HDCP_STATUS_NOT_IMPLEMENTED,
+ MOD_HDCP_STATUS_INTERNAL_POLICY_FAILURE,
+ MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE,
+ MOD_HDCP_STATUS_CREATE_PSP_SERVICE_FAILURE,
+ MOD_HDCP_STATUS_DESTROY_PSP_SERVICE_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_VALIDATE_ENCRYPTION_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_NOT_HDCP_REPEATER,
+ MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE,
+ MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING,
+ MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY,
+ MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION,
+ MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED,
+ MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_INVALID_BKSV,
+ MOD_HDCP_STATUS_DDC_FAILURE, /* TODO: specific errors */
+ MOD_HDCP_STATUS_INVALID_OPERATION,
+ MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE,
+ MOD_HDCP_STATUS_HDCP2_CREATE_SESSION_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_PREP_AKE_INIT_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_AKE_CERT_PENDING,
+ MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING,
+ MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING,
+ MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED,
+ MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_L_PRIME_PENDING,
+ MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_PREP_EKS_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY,
+ MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED,
+ MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION,
+ MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING,
+ MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST,
+ MOD_HDCP_STATUS_HDCP2_REAUTH_LINK_INTEGRITY_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE,
+};
+
+struct mod_hdcp_displayport {
+ uint8_t rev;
+ uint8_t assr_supported;
+};
+
+struct mod_hdcp_hdmi {
+ uint8_t reserved;
+};
+enum mod_hdcp_operation_mode {
+ MOD_HDCP_MODE_OFF,
+ MOD_HDCP_MODE_DEFAULT,
+ MOD_HDCP_MODE_DP,
+ MOD_HDCP_MODE_DP_MST
+};
+
+enum mod_hdcp_display_state {
+ MOD_HDCP_DISPLAY_INACTIVE = 0,
+ MOD_HDCP_DISPLAY_ACTIVE,
+ MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED,
+ MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED
+};
+
+struct mod_hdcp_ddc {
+ void *handle;
+ struct {
+ bool (*read_i2c)(void *handle,
+ uint32_t address,
+ uint8_t offset,
+ uint8_t *data,
+ uint32_t size);
+ bool (*write_i2c)(void *handle,
+ uint32_t address,
+ const uint8_t *data,
+ uint32_t size);
+ bool (*read_dpcd)(void *handle,
+ uint32_t address,
+ uint8_t *data,
+ uint32_t size);
+ bool (*write_dpcd)(void *handle,
+ uint32_t address,
+ const uint8_t *data,
+ uint32_t size);
+ } funcs;
+};
+
+struct mod_hdcp_psp {
+ void *handle;
+ void *funcs;
+};
+
+struct mod_hdcp_display_adjustment {
+ uint8_t disable : 1;
+ uint8_t reserved : 7;
+};
+
+struct mod_hdcp_link_adjustment_hdcp1 {
+ uint8_t disable : 1;
+ uint8_t postpone_encryption : 1;
+ uint8_t reserved : 6;
+};
+
+enum mod_hdcp_force_hdcp_type {
+ MOD_HDCP_FORCE_TYPE_MAX = 0,
+ MOD_HDCP_FORCE_TYPE_0,
+ MOD_HDCP_FORCE_TYPE_1
+};
+
+struct mod_hdcp_link_adjustment_hdcp2 {
+ uint8_t disable : 1;
+ uint8_t force_type : 2;
+ uint8_t force_no_stored_km : 1;
+ uint8_t increase_h_prime_timeout: 1;
+ uint8_t reserved : 3;
+};
+
+struct mod_hdcp_link_adjustment {
+ uint8_t auth_delay;
+ struct mod_hdcp_link_adjustment_hdcp1 hdcp1;
+ struct mod_hdcp_link_adjustment_hdcp2 hdcp2;
+};
+
+struct mod_hdcp_error {
+ enum mod_hdcp_status status;
+ uint8_t state_id;
+};
+
+struct mod_hdcp_trace {
+ struct mod_hdcp_error errors[MAX_NUM_OF_ERROR_TRACE];
+ uint8_t error_count;
+};
+
+enum mod_hdcp_encryption_status {
+ MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF = 0,
+ MOD_HDCP_ENCRYPTION_STATUS_HDCP1_ON,
+ MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON,
+ MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON,
+ MOD_HDCP_ENCRYPTION_STATUS_HDCP2_ON
+};
+
+/* per link events dm has to notify to hdcp module */
+enum mod_hdcp_event {
+ MOD_HDCP_EVENT_CALLBACK = 0,
+ MOD_HDCP_EVENT_WATCHDOG_TIMEOUT,
+ MOD_HDCP_EVENT_CPIRQ
+};
+
+/* output flags from module requesting timer operations */
+struct mod_hdcp_output {
+ uint8_t callback_needed;
+ uint8_t callback_stop;
+ uint8_t watchdog_timer_needed;
+ uint8_t watchdog_timer_stop;
+ uint16_t callback_delay;
+ uint16_t watchdog_timer_delay;
+};
+
+/* used to represent per display info */
+struct mod_hdcp_display {
+ enum mod_hdcp_display_state state;
+ uint8_t index;
+ uint8_t controller;
+ uint8_t dig_fe;
+ union {
+ uint8_t vc_id;
+ };
+ struct mod_hdcp_display_adjustment adjust;
+};
+
+/* used to represent per link info */
+/* in case a link has multiple displays, they share the same link info */
+struct mod_hdcp_link {
+ enum mod_hdcp_operation_mode mode;
+ uint8_t dig_be;
+ uint8_t ddc_line;
+ union {
+ struct mod_hdcp_displayport dp;
+ struct mod_hdcp_hdmi hdmi;
+ };
+ struct mod_hdcp_link_adjustment adjust;
+};
+
+/* a query structure for a display's hdcp information */
+struct mod_hdcp_display_query {
+ const struct mod_hdcp_display *display;
+ const struct mod_hdcp_link *link;
+ const struct mod_hdcp_trace *trace;
+ enum mod_hdcp_encryption_status encryption_status;
+};
+
+/* contains values per on external display configuration change */
+struct mod_hdcp_config {
+ struct mod_hdcp_psp psp;
+ struct mod_hdcp_ddc ddc;
+ uint8_t index;
+};
+
+struct mod_hdcp;
+
+/* dm allocates memory of mod_hdcp per dc_link on dm init based on memory size*/
+size_t mod_hdcp_get_memory_size(void);
+
+/* called per link on link creation */
+enum mod_hdcp_status mod_hdcp_setup(struct mod_hdcp *hdcp,
+ struct mod_hdcp_config *config);
+
+/* called per link on link destroy */
+enum mod_hdcp_status mod_hdcp_teardown(struct mod_hdcp *hdcp);
+
+/* called per display on cp_desired set to true */
+enum mod_hdcp_status mod_hdcp_add_display(struct mod_hdcp *hdcp,
+ struct mod_hdcp_link *link, struct mod_hdcp_display *display,
+ struct mod_hdcp_output *output);
+
+/* called per display on cp_desired set to false */
+enum mod_hdcp_status mod_hdcp_remove_display(struct mod_hdcp *hdcp,
+ uint8_t index, struct mod_hdcp_output *output);
+
+/* called to query hdcp information on a specific index */
+enum mod_hdcp_status mod_hdcp_query_display(struct mod_hdcp *hdcp,
+ uint8_t index, struct mod_hdcp_display_query *query);
+
+/* called per link on connectivity change */
+enum mod_hdcp_status mod_hdcp_reset_connection(struct mod_hdcp *hdcp,
+ struct mod_hdcp_output *output);
+
+/* called per link on events (i.e. callback, watchdog, CP_IRQ) */
+enum mod_hdcp_status mod_hdcp_process_event(struct mod_hdcp *hdcp,
+ enum mod_hdcp_event event, struct mod_hdcp_output *output);
+
+/* called to convert enum mod_hdcp_status to c string */
+char *mod_hdcp_status_to_str(int32_t status);
+
+/* called to convert state id to c string */
+char *mod_hdcp_state_id_to_str(int32_t id);
+
+/* called to convert signal type to operation mode */
+enum mod_hdcp_operation_mode mod_hdcp_signal_type_to_operation_mode(
+ enum signal_type signal);
+#endif /* MOD_HDCP_H_ */
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
index 5b1c9a4c7643..42cbeffac640 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
@@ -26,13 +26,18 @@
#ifndef MOD_INFO_PACKET_H_
#define MOD_INFO_PACKET_H_
+#include "dm_services.h"
#include "mod_shared.h"
-
//Forward Declarations
struct dc_stream_state;
struct dc_info_packet;
+struct mod_vrr_params;
void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
- struct dc_info_packet *info_packet);
+ struct dc_info_packet *info_packet,
+ bool *use_vsc_sdp_for_colorimetry);
+
+void mod_build_hf_vsif_infopacket(const struct dc_stream_state *stream,
+ struct dc_info_packet *info_packet, int ALLMEnabled, int ALLMValue);
#endif
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h b/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h
index b45f7d65e76a..fe2117904329 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h
@@ -45,7 +45,6 @@ enum vrr_packet_type {
PACKET_TYPE_VTEM
};
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
union lut3d_control_flags {
unsigned int raw;
struct {
@@ -104,6 +103,5 @@ struct lut3d_settings {
enum lut3d_control_gamut_map map2;
enum lut3d_control_rotation_mode rotation2;
};
-#endif
#endif /* MOD_SHARED_H_ */
diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
index bc13c552797f..6a8a056424b8 100644
--- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
+++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
@@ -27,8 +27,92 @@
#include "core_types.h"
#include "dc_types.h"
#include "mod_shared.h"
+#include "mod_freesync.h"
+#include "dc.h"
+
+enum vsc_packet_revision {
+ vsc_packet_undefined = 0,
+ //01h = VSC SDP supports only 3D stereo.
+ vsc_packet_rev1 = 1,
+ //02h = 3D stereo + PSR.
+ vsc_packet_rev2 = 2,
+ //03h = 3D stereo + PSR2.
+ vsc_packet_rev3 = 3,
+ //04h = 3D stereo + PSR/PSR2 + Y-coordinate.
+ vsc_packet_rev4 = 4,
+ //05h = 3D stereo + PSR/PSR2 + Y-coordinate + Pixel Encoding/Colorimetry Format
+ vsc_packet_rev5 = 5,
+};
#define HDMI_INFOFRAME_TYPE_VENDOR 0x81
+#define HF_VSIF_VERSION 1
+
+// VTEM Byte Offset
+#define VTEM_PB0 0
+#define VTEM_PB1 1
+#define VTEM_PB2 2
+#define VTEM_PB3 3
+#define VTEM_PB4 4
+#define VTEM_PB5 5
+#define VTEM_PB6 6
+
+#define VTEM_MD0 7
+#define VTEM_MD1 8
+#define VTEM_MD2 9
+#define VTEM_MD3 10
+
+
+// VTEM Byte Masks
+//PB0
+#define MASK_VTEM_PB0__RESERVED0 0x01
+#define MASK_VTEM_PB0__SYNC 0x02
+#define MASK_VTEM_PB0__VFR 0x04
+#define MASK_VTEM_PB0__AFR 0x08
+#define MASK_VTEM_PB0__DS_TYPE 0x30
+ //0: Periodic pseudo-static EM Data Set
+ //1: Periodic dynamic EM Data Set
+ //2: Unique EM Data Set
+ //3: Reserved
+#define MASK_VTEM_PB0__END 0x40
+#define MASK_VTEM_PB0__NEW 0x80
+
+//PB1
+#define MASK_VTEM_PB1__RESERVED1 0xFF
+
+//PB2
+#define MASK_VTEM_PB2__ORGANIZATION_ID 0xFF
+ //0: This is a Vendor Specific EM Data Set
+ //1: This EM Data Set is defined by This Specification (HDMI 2.1 r102.clean)
+ //2: This EM Data Set is defined by CTA-861-G
+ //3: This EM Data Set is defined by VESA
+//PB3
+#define MASK_VTEM_PB3__DATA_SET_TAG_MSB 0xFF
+//PB4
+#define MASK_VTEM_PB4__DATA_SET_TAG_LSB 0xFF
+//PB5
+#define MASK_VTEM_PB5__DATA_SET_LENGTH_MSB 0xFF
+//PB6
+#define MASK_VTEM_PB6__DATA_SET_LENGTH_LSB 0xFF
+
+
+
+//PB7-27 (20 bytes):
+//PB7 = MD0
+#define MASK_VTEM_MD0__VRR_EN 0x01
+#define MASK_VTEM_MD0__M_CONST 0x02
+#define MASK_VTEM_MD0__RESERVED2 0x0C
+#define MASK_VTEM_MD0__FVA_FACTOR_M1 0xF0
+
+//MD1
+#define MASK_VTEM_MD1__BASE_VFRONT 0xFF
+
+//MD2
+#define MASK_VTEM_MD2__BASE_REFRESH_RATE_98 0x03
+#define MASK_VTEM_MD2__RB 0x04
+#define MASK_VTEM_MD2__RESERVED3 0xF8
+
+//MD3
+#define MASK_VTEM_MD3__BASE_REFRESH_RATE_07 0xFF
enum ColorimetryRGBDP {
ColorimetryRGB_DP_sRGB = 0,
@@ -46,35 +130,41 @@ enum ColorimetryYCCDP {
};
void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
- struct dc_info_packet *info_packet)
+ struct dc_info_packet *info_packet,
+ bool *use_vsc_sdp_for_colorimetry)
{
- unsigned int vscPacketRevision = 0;
+ unsigned int vsc_packet_revision = vsc_packet_undefined;
unsigned int i;
unsigned int pixelEncoding = 0;
unsigned int colorimetryFormat = 0;
bool stereo3dSupport = false;
+ /* Initialize first, later if infopacket is valid determine if VSC SDP
+ * should be used to signal colorimetry format and pixel encoding.
+ */
+ *use_vsc_sdp_for_colorimetry = false;
+
if (stream->timing.timing_3d_format != TIMING_3D_FORMAT_NONE && stream->view_format != VIEW_3D_FORMAT_NONE) {
- vscPacketRevision = 1;
+ vsc_packet_revision = vsc_packet_rev1;
stereo3dSupport = true;
}
/*VSC packet set to 2 when DP revision >= 1.2*/
if (stream->psr_version != 0)
- vscPacketRevision = 2;
+ vsc_packet_revision = vsc_packet_rev2;
/* Update to revision 5 for extended colorimetry support for DPCD 1.4+ */
if (stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
- vscPacketRevision = 5;
+ vsc_packet_revision = vsc_packet_rev5;
/* VSC packet not needed based on the features
* supported by this DP display
*/
- if (vscPacketRevision == 0)
+ if (vsc_packet_revision == vsc_packet_undefined)
return;
- if (vscPacketRevision == 0x2) {
+ if (vsc_packet_revision == vsc_packet_rev2) {
/* Secondary-data Packet ID = 0*/
info_packet->hb0 = 0x00;
/* 07h - Packet Type Value indicating Video
@@ -96,7 +186,7 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
info_packet->valid = true;
}
- if (vscPacketRevision == 0x1) {
+ if (vsc_packet_revision == vsc_packet_rev1) {
info_packet->hb0 = 0x00; // Secondary-data Packet ID = 0
info_packet->hb1 = 0x07; // 07h = Packet Type Value indicating Video Stream Configuration packet
@@ -167,7 +257,7 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
* the Pixel Encoding/Colorimetry Format and that a Sink device must ignore MISC1, bit 7, and
* MISC0, bits 7:1 (MISC1, bit 7. and MISC0, bits 7:1 become "don't care").)
*/
- if (vscPacketRevision == 0x5) {
+ if (vsc_packet_revision == vsc_packet_rev5) {
/* Secondary-data Packet ID = 0 */
info_packet->hb0 = 0x00;
/* 07h - Packet Type Value indicating Video Stream Configuration packet */
@@ -179,6 +269,13 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
info_packet->valid = true;
+ /* If we are using VSC SDP revision 05h, use this to signal for
+ * colorimetry format and pixel encoding. HW should later be
+ * programmed to set MSA MISC1 bit 6 to indicate ignore
+ * colorimetry format and pixel encoding in the MSA.
+ */
+ *use_vsc_sdp_for_colorimetry = true;
+
/* Set VSC SDP fields for pixel encoding and colorimetry format from DP 1.3 specs
* Data Bytes DB 18~16
* Bits 3:0 (Colorimetry Format) | Bits 7:4 (Pixel Encoding)
@@ -323,6 +420,102 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
*/
info_packet->sb[18] = 0;
}
+}
+
+/**
+ *****************************************************************************
+ * Function: mod_build_hf_vsif_infopacket
+ *
+ * @brief
+ * Prepare HDMI Vendor Specific info frame.
+ * Follows HDMI Spec to build up Vendor Specific info frame
+ *
+ * @param [in] stream: contains data we may need to construct VSIF (i.e. timing_3d_format, etc.)
+ * @param [out] info_packet: output structure where to store VSIF
+ *****************************************************************************
+ */
+void mod_build_hf_vsif_infopacket(const struct dc_stream_state *stream,
+ struct dc_info_packet *info_packet, int ALLMEnabled, int ALLMValue)
+{
+ unsigned int length = 5;
+ bool hdmi_vic_mode = false;
+ uint8_t checksum = 0;
+ uint32_t i = 0;
+ enum dc_timing_3d_format format;
+ bool bALLM = (bool)ALLMEnabled;
+ bool bALLMVal = (bool)ALLMValue;
+
+ info_packet->valid = false;
+ format = stream->timing.timing_3d_format;
+ if (stream->view_format == VIEW_3D_FORMAT_NONE)
+ format = TIMING_3D_FORMAT_NONE;
+
+ if (stream->timing.hdmi_vic != 0
+ && stream->timing.h_total >= 3840
+ && stream->timing.v_total >= 2160
+ && format == TIMING_3D_FORMAT_NONE)
+ hdmi_vic_mode = true;
+
+ if ((format == TIMING_3D_FORMAT_NONE) && !hdmi_vic_mode && !bALLM)
+ return;
+
+ info_packet->sb[1] = 0x03;
+ info_packet->sb[2] = 0x0C;
+ info_packet->sb[3] = 0x00;
+ if (bALLM) {
+ info_packet->sb[1] = 0xD8;
+ info_packet->sb[2] = 0x5D;
+ info_packet->sb[3] = 0xC4;
+ info_packet->sb[4] = HF_VSIF_VERSION;
+ }
+
+ if (format != TIMING_3D_FORMAT_NONE)
+ info_packet->sb[4] = (2 << 5);
+
+ else if (hdmi_vic_mode)
+ info_packet->sb[4] = (1 << 5);
+
+ switch (format) {
+ case TIMING_3D_FORMAT_HW_FRAME_PACKING:
+ case TIMING_3D_FORMAT_SW_FRAME_PACKING:
+ info_packet->sb[5] = (0x0 << 4);
+ break;
+
+ case TIMING_3D_FORMAT_SIDE_BY_SIDE:
+ case TIMING_3D_FORMAT_SBS_SW_PACKED:
+ info_packet->sb[5] = (0x8 << 4);
+ length = 6;
+ break;
+
+ case TIMING_3D_FORMAT_TOP_AND_BOTTOM:
+ case TIMING_3D_FORMAT_TB_SW_PACKED:
+ info_packet->sb[5] = (0x6 << 4);
+ break;
+
+ default:
+ break;
+ }
+
+ if (hdmi_vic_mode)
+ info_packet->sb[5] = stream->timing.hdmi_vic;
+
+ info_packet->hb0 = HDMI_INFOFRAME_TYPE_VENDOR;
+ info_packet->hb1 = 0x01;
+ info_packet->hb2 = (uint8_t) (length);
+
+ if (bALLM)
+ info_packet->sb[5] = (info_packet->sb[5] & ~0x02) | (bALLMVal << 1);
+
+ checksum += info_packet->hb0;
+ checksum += info_packet->hb1;
+ checksum += info_packet->hb2;
+
+ for (i = 1; i <= length; i++)
+ checksum += info_packet->sb[i];
+
+ info_packet->sb[0] = (uint8_t) (0x100 - checksum);
+
+ info_packet->valid = true;
}
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index b3810b864676..e75a4bb94488 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -66,6 +66,39 @@ static const unsigned char abm_config[abm_defines_max_config][abm_defines_max_le
{ 3, 6, 10, 12 }, /* Alt #3 - Super aggressiveness */
};
+struct abm_parameters {
+ unsigned char min_reduction;
+ unsigned char max_reduction;
+ unsigned char bright_pos_gain;
+ unsigned char dark_pos_gain;
+ unsigned char brightness_gain;
+ unsigned char contrast_factor;
+ unsigned char deviation_gain;
+ unsigned char min_knee;
+ unsigned char max_knee;
+};
+
+static const struct abm_parameters abm_settings_config0[abm_defines_max_level] = {
+// min_red max_red bright_pos dark_pos brightness_gain contrast deviation min_knee max_knee
+ {0xff, 0xbf, 0x20, 0x00, 0xff, 0x99, 0xb3, 0x40, 0xe0},
+ {0xde, 0x85, 0x20, 0x00, 0xff, 0x90, 0xa8, 0x40, 0xdf},
+ {0xb0, 0x50, 0x20, 0x00, 0xc0, 0x88, 0x78, 0x70, 0xa0},
+ {0x82, 0x40, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70},
+};
+
+static const struct abm_parameters abm_settings_config1[abm_defines_max_level] = {
+// min_red max_red bright_pos dark_pos brightness_gain contrast deviation min_knee max_knee
+ {0xf0, 0xd9, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70},
+ {0xcd, 0xa5, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70},
+ {0x99, 0x65, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70},
+ {0x82, 0x4d, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70},
+};
+
+static const struct abm_parameters * const abm_settings[] = {
+ abm_settings_config0,
+ abm_settings_config1,
+};
+
#define NUM_AMBI_LEVEL 5
#define NUM_AGGR_LEVEL 4
#define NUM_POWER_FN_SEGS 8
@@ -82,7 +115,7 @@ static const unsigned char abm_config[abm_defines_max_config][abm_defines_max_le
/* NOTE: iRAM is 256B in size */
struct iram_table_v_2 {
/* flags */
- uint16_t flags; /* 0x00 U16 */
+ uint16_t min_abm_backlight; /* 0x00 U16 */
/* parameters for ABM2.0 algorithm */
uint8_t min_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x02 U0.8 */
@@ -107,10 +140,10 @@ struct iram_table_v_2 {
/* For reading PSR State directly from IRAM */
uint8_t psr_state; /* 0xf0 */
- uint8_t dmcu_mcp_interface_version; /* 0xf1 */
- uint8_t dmcu_abm_feature_version; /* 0xf2 */
- uint8_t dmcu_psr_feature_version; /* 0xf3 */
- uint16_t dmcu_version; /* 0xf4 */
+ uint8_t dmcu_mcp_interface_version; /* 0xf1 */
+ uint8_t dmcu_abm_feature_version; /* 0xf2 */
+ uint8_t dmcu_psr_feature_version; /* 0xf3 */
+ uint16_t dmcu_version; /* 0xf4 */
uint8_t dmcu_state; /* 0xf6 */
uint16_t blRampReduction; /* 0xf7 */
@@ -131,40 +164,43 @@ struct iram_table_v_2_2 {
uint8_t max_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x16 U0.8 */
uint8_t bright_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x2a U2.6 */
uint8_t dark_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x3e U2.6 */
- uint8_t hybridFactor[NUM_AGGR_LEVEL]; /* 0x52 U0.8 */
- uint8_t contrastFactor[NUM_AGGR_LEVEL]; /* 0x56 U0.8 */
- uint8_t deviation_gain[NUM_AGGR_LEVEL]; /* 0x5a U0.8 */
- uint8_t iir_curve[NUM_AMBI_LEVEL]; /* 0x5e U0.8 */
- uint8_t pad[29]; /* 0x63 U0.8 */
+ uint8_t hybrid_factor[NUM_AGGR_LEVEL]; /* 0x52 U0.8 */
+ uint8_t contrast_factor[NUM_AGGR_LEVEL]; /* 0x56 U0.8 */
+ uint8_t deviation_gain[NUM_AGGR_LEVEL]; /* 0x5a U0.8 */
+ uint8_t iir_curve[NUM_AMBI_LEVEL]; /* 0x5e U0.8 */
+ uint8_t min_knee[NUM_AGGR_LEVEL]; /* 0x63 U0.8 */
+ uint8_t max_knee[NUM_AGGR_LEVEL]; /* 0x67 U0.8 */
+ uint16_t min_abm_backlight; /* 0x6b U16 */
+ uint8_t pad[19]; /* 0x6d U0.8 */
/* parameters for crgb conversion */
- uint16_t crgb_thresh[NUM_POWER_FN_SEGS]; /* 0x80 U3.13 */
- uint16_t crgb_offset[NUM_POWER_FN_SEGS]; /* 0x90 U1.15 */
- uint16_t crgb_slope[NUM_POWER_FN_SEGS]; /* 0xa0 U4.12 */
+ uint16_t crgb_thresh[NUM_POWER_FN_SEGS]; /* 0x80 U3.13 */
+ uint16_t crgb_offset[NUM_POWER_FN_SEGS]; /* 0x90 U1.15 */
+ uint16_t crgb_slope[NUM_POWER_FN_SEGS]; /* 0xa0 U4.12 */
/* parameters for custom curve */
/* thresholds for brightness --> backlight */
- uint16_t backlight_thresholds[NUM_BL_CURVE_SEGS]; /* 0xb0 U16.0 */
+ uint16_t backlight_thresholds[NUM_BL_CURVE_SEGS]; /* 0xb0 U16.0 */
/* offsets for brightness --> backlight */
- uint16_t backlight_offsets[NUM_BL_CURVE_SEGS]; /* 0xd0 U16.0 */
+ uint16_t backlight_offsets[NUM_BL_CURVE_SEGS]; /* 0xd0 U16.0 */
/* For reading PSR State directly from IRAM */
- uint8_t psr_state; /* 0xf0 */
- uint8_t dmcu_mcp_interface_version; /* 0xf1 */
- uint8_t dmcu_abm_feature_version; /* 0xf2 */
- uint8_t dmcu_psr_feature_version; /* 0xf3 */
- uint16_t dmcu_version; /* 0xf4 */
- uint8_t dmcu_state; /* 0xf6 */
-
- uint8_t dummy1; /* 0xf7 */
- uint8_t dummy2; /* 0xf8 */
- uint8_t dummy3; /* 0xf9 */
- uint8_t dummy4; /* 0xfa */
- uint8_t dummy5; /* 0xfb */
- uint8_t dummy6; /* 0xfc */
- uint8_t dummy7; /* 0xfd */
- uint8_t dummy8; /* 0xfe */
- uint8_t dummy9; /* 0xff */
+ uint8_t psr_state; /* 0xf0 */
+ uint8_t dmcu_mcp_interface_version; /* 0xf1 */
+ uint8_t dmcu_abm_feature_version; /* 0xf2 */
+ uint8_t dmcu_psr_feature_version; /* 0xf3 */
+ uint16_t dmcu_version; /* 0xf4 */
+ uint8_t dmcu_state; /* 0xf6 */
+
+ uint8_t dummy1; /* 0xf7 */
+ uint8_t dummy2; /* 0xf8 */
+ uint8_t dummy3; /* 0xf9 */
+ uint8_t dummy4; /* 0xfa */
+ uint8_t dummy5; /* 0xfb */
+ uint8_t dummy6; /* 0xfc */
+ uint8_t dummy7; /* 0xfd */
+ uint8_t dummy8; /* 0xfe */
+ uint8_t dummy9; /* 0xff */
};
#pragma pack(pop)
@@ -236,7 +272,8 @@ void fill_iram_v_2(struct iram_table_v_2 *ram_table, struct dmcu_iram_parameters
{
unsigned int set = params.set;
- ram_table->flags = 0x0;
+ ram_table->min_abm_backlight =
+ cpu_to_be16(params.min_abm_backlight);
ram_table->deviation_gain = 0xb3;
ram_table->blRampReduction =
@@ -410,6 +447,9 @@ void fill_iram_v_2_2(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parame
ram_table->flags = 0x0;
+ ram_table->min_abm_backlight =
+ cpu_to_be16(params.min_abm_backlight);
+
ram_table->deviation_gain[0] = 0xb3;
ram_table->deviation_gain[1] = 0xa8;
ram_table->deviation_gain[2] = 0x98;
@@ -501,15 +541,76 @@ void fill_iram_v_2_2(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parame
ram_table->dark_pos_gain[4][2] = 0x00;
ram_table->dark_pos_gain[4][3] = 0x00;
- ram_table->hybridFactor[0] = 0xff;
- ram_table->hybridFactor[1] = 0xff;
- ram_table->hybridFactor[2] = 0xff;
- ram_table->hybridFactor[3] = 0xc0;
+ ram_table->hybrid_factor[0] = 0xff;
+ ram_table->hybrid_factor[1] = 0xff;
+ ram_table->hybrid_factor[2] = 0xff;
+ ram_table->hybrid_factor[3] = 0xc0;
- ram_table->contrastFactor[0] = 0x99;
- ram_table->contrastFactor[1] = 0x99;
- ram_table->contrastFactor[2] = 0x90;
- ram_table->contrastFactor[3] = 0x80;
+ ram_table->contrast_factor[0] = 0x99;
+ ram_table->contrast_factor[1] = 0x99;
+ ram_table->contrast_factor[2] = 0x90;
+ ram_table->contrast_factor[3] = 0x80;
+
+ ram_table->iir_curve[0] = 0x65;
+ ram_table->iir_curve[1] = 0x65;
+ ram_table->iir_curve[2] = 0x65;
+ ram_table->iir_curve[3] = 0x65;
+ ram_table->iir_curve[4] = 0x65;
+
+ //Gamma 2.2
+ ram_table->crgb_thresh[0] = cpu_to_be16(0x127c);
+ ram_table->crgb_thresh[1] = cpu_to_be16(0x151b);
+ ram_table->crgb_thresh[2] = cpu_to_be16(0x17d5);
+ ram_table->crgb_thresh[3] = cpu_to_be16(0x1a56);
+ ram_table->crgb_thresh[4] = cpu_to_be16(0x1c83);
+ ram_table->crgb_thresh[5] = cpu_to_be16(0x1e72);
+ ram_table->crgb_thresh[6] = cpu_to_be16(0x20f0);
+ ram_table->crgb_thresh[7] = cpu_to_be16(0x232b);
+ ram_table->crgb_offset[0] = cpu_to_be16(0x2999);
+ ram_table->crgb_offset[1] = cpu_to_be16(0x3999);
+ ram_table->crgb_offset[2] = cpu_to_be16(0x4666);
+ ram_table->crgb_offset[3] = cpu_to_be16(0x5999);
+ ram_table->crgb_offset[4] = cpu_to_be16(0x6333);
+ ram_table->crgb_offset[5] = cpu_to_be16(0x7800);
+ ram_table->crgb_offset[6] = cpu_to_be16(0x8c00);
+ ram_table->crgb_offset[7] = cpu_to_be16(0xa000);
+ ram_table->crgb_slope[0] = cpu_to_be16(0x3609);
+ ram_table->crgb_slope[1] = cpu_to_be16(0x2dfa);
+ ram_table->crgb_slope[2] = cpu_to_be16(0x27ea);
+ ram_table->crgb_slope[3] = cpu_to_be16(0x235d);
+ ram_table->crgb_slope[4] = cpu_to_be16(0x2042);
+ ram_table->crgb_slope[5] = cpu_to_be16(0x1dc3);
+ ram_table->crgb_slope[6] = cpu_to_be16(0x1b1a);
+ ram_table->crgb_slope[7] = cpu_to_be16(0x1910);
+
+ fill_backlight_transform_table_v_2_2(
+ params, ram_table);
+}
+
+void fill_iram_v_2_3(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parameters params)
+{
+ unsigned int i, j;
+ unsigned int set = params.set;
+
+ ram_table->flags = 0x0;
+
+ ram_table->min_abm_backlight =
+ cpu_to_be16(params.min_abm_backlight);
+
+ for (i = 0; i < NUM_AGGR_LEVEL; i++) {
+ ram_table->hybrid_factor[i] = abm_settings[set][i].brightness_gain;
+ ram_table->contrast_factor[i] = abm_settings[set][i].contrast_factor;
+ ram_table->deviation_gain[i] = abm_settings[set][i].deviation_gain;
+ ram_table->min_knee[i] = abm_settings[set][i].min_knee;
+ ram_table->max_knee[i] = abm_settings[set][i].max_knee;
+
+ for (j = 0; j < NUM_AMBI_LEVEL; j++) {
+ ram_table->min_reduction[j][i] = abm_settings[set][i].min_reduction;
+ ram_table->max_reduction[j][i] = abm_settings[set][i].max_reduction;
+ ram_table->bright_pos_gain[j][i] = abm_settings[set][i].bright_pos_gain;
+ ram_table->dark_pos_gain[j][i] = abm_settings[set][i].dark_pos_gain;
+ }
+ }
ram_table->iir_curve[0] = 0x65;
ram_table->iir_curve[1] = 0x65;
@@ -561,7 +662,16 @@ bool dmcu_load_iram(struct dmcu *dmcu,
memset(&ram_table, 0, sizeof(ram_table));
- if (dmcu->dmcu_version.abm_version == 0x22) {
+ if (dmcu->dmcu_version.abm_version == 0x24) {
+ fill_iram_v_2_3((struct iram_table_v_2_2 *)ram_table, params);
+ result = dmcu->funcs->load_iram(
+ dmcu, 0, (char *)(&ram_table), IRAM_RESERVE_AREA_START_V2_2);
+ } else if (dmcu->dmcu_version.abm_version == 0x23) {
+ fill_iram_v_2_3((struct iram_table_v_2_2 *)ram_table, params);
+
+ result = dmcu->funcs->load_iram(
+ dmcu, 0, (char *)(&ram_table), IRAM_RESERVE_AREA_START_V2_2);
+ } else if (dmcu->dmcu_version.abm_version == 0x22) {
fill_iram_v_2_2((struct iram_table_v_2_2 *)ram_table, params);
result = dmcu->funcs->load_iram(
@@ -581,3 +691,4 @@ bool dmcu_load_iram(struct dmcu *dmcu,
return result;
}
+
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
index da5df00fedce..e54157026330 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
@@ -38,6 +38,7 @@ struct dmcu_iram_parameters {
unsigned int backlight_lut_array_size;
unsigned int backlight_ramping_reduction;
unsigned int backlight_ramping_start;
+ unsigned int min_abm_backlight;
unsigned int set;
};
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index a0a7211438f2..d655a76bedc6 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -53,7 +53,8 @@ enum amd_ip_block_type {
AMD_IP_BLOCK_TYPE_VCE,
AMD_IP_BLOCK_TYPE_ACP,
AMD_IP_BLOCK_TYPE_VCN,
- AMD_IP_BLOCK_TYPE_MES
+ AMD_IP_BLOCK_TYPE_MES,
+ AMD_IP_BLOCK_TYPE_JPEG
};
enum amd_clockgating_state {
@@ -99,6 +100,7 @@ enum amd_powergating_state {
#define AMD_CG_SUPPORT_IH_CG (1 << 27)
#define AMD_CG_SUPPORT_ATHUB_LS (1 << 28)
#define AMD_CG_SUPPORT_ATHUB_MGCG (1 << 29)
+#define AMD_CG_SUPPORT_JPEG_MGCG (1 << 30)
/* PG flags */
#define AMD_PG_SUPPORT_GFX_PG (1 << 0)
#define AMD_PG_SUPPORT_GFX_SMG (1 << 1)
@@ -117,6 +119,7 @@ enum amd_powergating_state {
#define AMD_PG_SUPPORT_VCN (1 << 14)
#define AMD_PG_SUPPORT_VCN_DPG (1 << 15)
#define AMD_PG_SUPPORT_ATHUB (1 << 16)
+#define AMD_PG_SUPPORT_JPEG (1 << 17)
enum PP_FEATURE_MASK {
PP_SCLK_DPM_MASK = 0x1,
@@ -142,6 +145,9 @@ enum PP_FEATURE_MASK {
enum DC_FEATURE_MASK {
DC_FBC_MASK = 0x1,
+ DC_MULTI_MON_PP_MCLK_SWITCH_MASK = 0x2,
+ DC_DISABLE_FRACTIONAL_PWM_MASK = 0x4,
+ DC_PSR_MASK = 0x8,
};
enum amd_dpm_forced_level;
diff --git a/drivers/gpu/drm/amd/include/arct_ip_offset.h b/drivers/gpu/drm/amd/include/arct_ip_offset.h
new file mode 100644
index 000000000000..a7791a9e1f90
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/arct_ip_offset.h
@@ -0,0 +1,1650 @@
+/*
+ * Copyright (C) 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _arct_ip_offset_HEADER
+#define _arct_ip_offset_HEADER
+
+#define MAX_INSTANCE 8
+#define MAX_SEGMENT 6
+
+
+struct IP_BASE_INSTANCE
+{
+ unsigned int segment[MAX_SEGMENT];
+};
+
+struct IP_BASE
+{
+ struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
+};
+
+
+static const struct IP_BASE ATHUB_BASE ={ { { { 0x00000C20, 0x00012460, 0x00408C00, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE CLK_BASE ={ { { { 0x000120C0, 0x00016C00, 0x00401800, 0, 0, 0 } },
+ { { 0x000120E0, 0x00016E00, 0x00401C00, 0, 0, 0 } },
+ { { 0x00012100, 0x00017000, 0x00402000, 0, 0, 0 } },
+ { { 0x00012120, 0x00017200, 0x00402400, 0, 0, 0 } },
+ { { 0x000136C0, 0x0001B000, 0x0042D800, 0, 0, 0 } },
+ { { 0x00013720, 0x0001B200, 0x0042E400, 0, 0, 0 } },
+ { { 0x000125E0, 0x00017E00, 0x0040BC00, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE DF_BASE ={ { { { 0x00007000, 0x000125C0, 0x0040B800, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE FUSE_BASE ={ { { { 0x000120A0, 0x00017400, 0x00401400, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE GC_BASE ={ { { { 0x00002000, 0x0000A000, 0x00012160, 0x00402C00, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE HDP_BASE ={ { { { 0x00000F20, 0x00012520, 0x0040A400, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE MMHUB_BASE ={ { { { 0x00012440, 0x0001A000, 0x00408800, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE MP0_BASE ={ { { { 0x00016000, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE MP1_BASE ={ { { { 0x00016000, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE NBIF0_BASE ={ { { { 0x00000000, 0x00000014, 0x00000D20, 0x00010400, 0x00012D80, 0x0041B000 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE OSSSYS_BASE ={ { { { 0x000010A0, 0x00012500, 0x0040A000, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE PCIE0_BASE ={ { { { 0x000128C0, 0x00411800, 0x04440000, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE SDMA0_BASE ={ { { { 0x00001260, 0x00012540, 0x0040A800, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE SDMA1_BASE ={ { { { 0x00001860, 0x00012560, 0x0040AC00, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE SDMA2_BASE ={ { { { 0x00013760, 0x0001E000, 0x0042EC00, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE SDMA3_BASE ={ { { { 0x00013780, 0x0001E400, 0x0042F000, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE SDMA4_BASE ={ { { { 0x000137A0, 0x0001E800, 0x0042F400, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE SDMA5_BASE ={ { { { 0x000137C0, 0x0001EC00, 0x0042F800, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE SDMA6_BASE ={ { { { 0x000137E0, 0x0001F000, 0x0042FC00, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE SDMA7_BASE ={ { { { 0x00013800, 0x0001F400, 0x00430000, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE SMUIO_BASE ={ { { { 0x00016800, 0x00016A00, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE THM_BASE ={ { { { 0x00016600, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE UMC_BASE ={ { { { 0x000132C0, 0x00014000, 0x00425800, 0, 0, 0 } },
+ { { 0x000132E0, 0x00054000, 0x00425C00, 0, 0, 0 } },
+ { { 0x00013300, 0x00094000, 0x00426000, 0, 0, 0 } },
+ { { 0x00013320, 0x000D4000, 0x00426400, 0, 0, 0 } },
+ { { 0x00013340, 0x00114000, 0x00426800, 0, 0, 0 } },
+ { { 0x00013360, 0x00154000, 0x00426C00, 0, 0, 0 } },
+ { { 0x00013380, 0x00194000, 0x00427000, 0, 0, 0 } },
+ { { 0x000133A0, 0x001D4000, 0x00427400, 0, 0, 0 } } } };
+static const struct IP_BASE UVD_BASE ={ { { { 0x00007800, 0x00007E00, 0x00012180, 0x00403000, 0, 0 } },
+ { { 0x00007A00, 0x00009000, 0x000136E0, 0x0042DC00, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE DBGU_IO_BASE ={ { { { 0x000001E0, 0x000125A0, 0x0040B400, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE RSMU_BASE ={ { { { 0x00012000, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } } } };
+
+
+
+#define ATHUB_BASE__INST0_SEG0 0x00000C20
+#define ATHUB_BASE__INST0_SEG1 0x00012460
+#define ATHUB_BASE__INST0_SEG2 0x00408C00
+#define ATHUB_BASE__INST0_SEG3 0
+#define ATHUB_BASE__INST0_SEG4 0
+#define ATHUB_BASE__INST0_SEG5 0
+
+#define ATHUB_BASE__INST1_SEG0 0
+#define ATHUB_BASE__INST1_SEG1 0
+#define ATHUB_BASE__INST1_SEG2 0
+#define ATHUB_BASE__INST1_SEG3 0
+#define ATHUB_BASE__INST1_SEG4 0
+#define ATHUB_BASE__INST1_SEG5 0
+
+#define ATHUB_BASE__INST2_SEG0 0
+#define ATHUB_BASE__INST2_SEG1 0
+#define ATHUB_BASE__INST2_SEG2 0
+#define ATHUB_BASE__INST2_SEG3 0
+#define ATHUB_BASE__INST2_SEG4 0
+#define ATHUB_BASE__INST2_SEG5 0
+
+#define ATHUB_BASE__INST3_SEG0 0
+#define ATHUB_BASE__INST3_SEG1 0
+#define ATHUB_BASE__INST3_SEG2 0
+#define ATHUB_BASE__INST3_SEG3 0
+#define ATHUB_BASE__INST3_SEG4 0
+#define ATHUB_BASE__INST3_SEG5 0
+
+#define ATHUB_BASE__INST4_SEG0 0
+#define ATHUB_BASE__INST4_SEG1 0
+#define ATHUB_BASE__INST4_SEG2 0
+#define ATHUB_BASE__INST4_SEG3 0
+#define ATHUB_BASE__INST4_SEG4 0
+#define ATHUB_BASE__INST4_SEG5 0
+
+#define ATHUB_BASE__INST5_SEG0 0
+#define ATHUB_BASE__INST5_SEG1 0
+#define ATHUB_BASE__INST5_SEG2 0
+#define ATHUB_BASE__INST5_SEG3 0
+#define ATHUB_BASE__INST5_SEG4 0
+#define ATHUB_BASE__INST5_SEG5 0
+
+#define ATHUB_BASE__INST6_SEG0 0
+#define ATHUB_BASE__INST6_SEG1 0
+#define ATHUB_BASE__INST6_SEG2 0
+#define ATHUB_BASE__INST6_SEG3 0
+#define ATHUB_BASE__INST6_SEG4 0
+#define ATHUB_BASE__INST6_SEG5 0
+
+#define ATHUB_BASE__INST7_SEG0 0
+#define ATHUB_BASE__INST7_SEG1 0
+#define ATHUB_BASE__INST7_SEG2 0
+#define ATHUB_BASE__INST7_SEG3 0
+#define ATHUB_BASE__INST7_SEG4 0
+#define ATHUB_BASE__INST7_SEG5 0
+
+#define CLK_BASE__INST0_SEG0 0x000120C0
+#define CLK_BASE__INST0_SEG1 0x00016C00
+#define CLK_BASE__INST0_SEG2 0x00401800
+#define CLK_BASE__INST0_SEG3 0
+#define CLK_BASE__INST0_SEG4 0
+#define CLK_BASE__INST0_SEG5 0
+
+#define CLK_BASE__INST1_SEG0 0x000120E0
+#define CLK_BASE__INST1_SEG1 0x00016E00
+#define CLK_BASE__INST1_SEG2 0x00401C00
+#define CLK_BASE__INST1_SEG3 0
+#define CLK_BASE__INST1_SEG4 0
+#define CLK_BASE__INST1_SEG5 0
+
+#define CLK_BASE__INST2_SEG0 0x00012100
+#define CLK_BASE__INST2_SEG1 0x00017000
+#define CLK_BASE__INST2_SEG2 0x00402000
+#define CLK_BASE__INST2_SEG3 0
+#define CLK_BASE__INST2_SEG4 0
+#define CLK_BASE__INST2_SEG5 0
+
+#define CLK_BASE__INST3_SEG0 0x00012120
+#define CLK_BASE__INST3_SEG1 0x00017200
+#define CLK_BASE__INST3_SEG2 0x00402400
+#define CLK_BASE__INST3_SEG3 0
+#define CLK_BASE__INST3_SEG4 0
+#define CLK_BASE__INST3_SEG5 0
+
+#define CLK_BASE__INST4_SEG0 0x000136C0
+#define CLK_BASE__INST4_SEG1 0x0001B000
+#define CLK_BASE__INST4_SEG2 0x0042D800
+#define CLK_BASE__INST4_SEG3 0
+#define CLK_BASE__INST4_SEG4 0
+#define CLK_BASE__INST4_SEG5 0
+
+#define CLK_BASE__INST5_SEG0 0x00013720
+#define CLK_BASE__INST5_SEG1 0x0001B200
+#define CLK_BASE__INST5_SEG2 0x0042E400
+#define CLK_BASE__INST5_SEG3 0
+#define CLK_BASE__INST5_SEG4 0
+#define CLK_BASE__INST5_SEG5 0
+
+#define CLK_BASE__INST6_SEG0 0x000125E0
+#define CLK_BASE__INST6_SEG1 0x00017E00
+#define CLK_BASE__INST6_SEG2 0x0040BC00
+#define CLK_BASE__INST6_SEG3 0
+#define CLK_BASE__INST6_SEG4 0
+#define CLK_BASE__INST6_SEG5 0
+
+#define CLK_BASE__INST7_SEG0 0
+#define CLK_BASE__INST7_SEG1 0
+#define CLK_BASE__INST7_SEG2 0
+#define CLK_BASE__INST7_SEG3 0
+#define CLK_BASE__INST7_SEG4 0
+#define CLK_BASE__INST7_SEG5 0
+
+#define DF_BASE__INST0_SEG0 0x00007000
+#define DF_BASE__INST0_SEG1 0x000125C0
+#define DF_BASE__INST0_SEG2 0x0040B800
+#define DF_BASE__INST0_SEG3 0
+#define DF_BASE__INST0_SEG4 0
+#define DF_BASE__INST0_SEG5 0
+
+#define DF_BASE__INST1_SEG0 0
+#define DF_BASE__INST1_SEG1 0
+#define DF_BASE__INST1_SEG2 0
+#define DF_BASE__INST1_SEG3 0
+#define DF_BASE__INST1_SEG4 0
+#define DF_BASE__INST1_SEG5 0
+
+#define DF_BASE__INST2_SEG0 0
+#define DF_BASE__INST2_SEG1 0
+#define DF_BASE__INST2_SEG2 0
+#define DF_BASE__INST2_SEG3 0
+#define DF_BASE__INST2_SEG4 0
+#define DF_BASE__INST2_SEG5 0
+
+#define DF_BASE__INST3_SEG0 0
+#define DF_BASE__INST3_SEG1 0
+#define DF_BASE__INST3_SEG2 0
+#define DF_BASE__INST3_SEG3 0
+#define DF_BASE__INST3_SEG4 0
+#define DF_BASE__INST3_SEG5 0
+
+#define DF_BASE__INST4_SEG0 0
+#define DF_BASE__INST4_SEG1 0
+#define DF_BASE__INST4_SEG2 0
+#define DF_BASE__INST4_SEG3 0
+#define DF_BASE__INST4_SEG4 0
+#define DF_BASE__INST4_SEG5 0
+
+#define DF_BASE__INST5_SEG0 0
+#define DF_BASE__INST5_SEG1 0
+#define DF_BASE__INST5_SEG2 0
+#define DF_BASE__INST5_SEG3 0
+#define DF_BASE__INST5_SEG4 0
+#define DF_BASE__INST5_SEG5 0
+
+#define DF_BASE__INST6_SEG0 0
+#define DF_BASE__INST6_SEG1 0
+#define DF_BASE__INST6_SEG2 0
+#define DF_BASE__INST6_SEG3 0
+#define DF_BASE__INST6_SEG4 0
+#define DF_BASE__INST6_SEG5 0
+
+#define DF_BASE__INST7_SEG0 0
+#define DF_BASE__INST7_SEG1 0
+#define DF_BASE__INST7_SEG2 0
+#define DF_BASE__INST7_SEG3 0
+#define DF_BASE__INST7_SEG4 0
+#define DF_BASE__INST7_SEG5 0
+
+#define FUSE_BASE__INST0_SEG0 0x000120A0
+#define FUSE_BASE__INST0_SEG1 0x00017400
+#define FUSE_BASE__INST0_SEG2 0x00401400
+#define FUSE_BASE__INST0_SEG3 0
+#define FUSE_BASE__INST0_SEG4 0
+#define FUSE_BASE__INST0_SEG5 0
+
+#define FUSE_BASE__INST1_SEG0 0
+#define FUSE_BASE__INST1_SEG1 0
+#define FUSE_BASE__INST1_SEG2 0
+#define FUSE_BASE__INST1_SEG3 0
+#define FUSE_BASE__INST1_SEG4 0
+#define FUSE_BASE__INST1_SEG5 0
+
+#define FUSE_BASE__INST2_SEG0 0
+#define FUSE_BASE__INST2_SEG1 0
+#define FUSE_BASE__INST2_SEG2 0
+#define FUSE_BASE__INST2_SEG3 0
+#define FUSE_BASE__INST2_SEG4 0
+#define FUSE_BASE__INST2_SEG5 0
+
+#define FUSE_BASE__INST3_SEG0 0
+#define FUSE_BASE__INST3_SEG1 0
+#define FUSE_BASE__INST3_SEG2 0
+#define FUSE_BASE__INST3_SEG3 0
+#define FUSE_BASE__INST3_SEG4 0
+#define FUSE_BASE__INST3_SEG5 0
+
+#define FUSE_BASE__INST4_SEG0 0
+#define FUSE_BASE__INST4_SEG1 0
+#define FUSE_BASE__INST4_SEG2 0
+#define FUSE_BASE__INST4_SEG3 0
+#define FUSE_BASE__INST4_SEG4 0
+#define FUSE_BASE__INST4_SEG5 0
+
+#define FUSE_BASE__INST5_SEG0 0
+#define FUSE_BASE__INST5_SEG1 0
+#define FUSE_BASE__INST5_SEG2 0
+#define FUSE_BASE__INST5_SEG3 0
+#define FUSE_BASE__INST5_SEG4 0
+#define FUSE_BASE__INST5_SEG5 0
+
+#define FUSE_BASE__INST6_SEG0 0
+#define FUSE_BASE__INST6_SEG1 0
+#define FUSE_BASE__INST6_SEG2 0
+#define FUSE_BASE__INST6_SEG3 0
+#define FUSE_BASE__INST6_SEG4 0
+#define FUSE_BASE__INST6_SEG5 0
+
+#define FUSE_BASE__INST7_SEG0 0
+#define FUSE_BASE__INST7_SEG1 0
+#define FUSE_BASE__INST7_SEG2 0
+#define FUSE_BASE__INST7_SEG3 0
+#define FUSE_BASE__INST7_SEG4 0
+#define FUSE_BASE__INST7_SEG5 0
+
+#define GC_BASE__INST0_SEG0 0x00002000
+#define GC_BASE__INST0_SEG1 0x0000A000
+#define GC_BASE__INST0_SEG2 0x00012160
+#define GC_BASE__INST0_SEG3 0x00402C00
+#define GC_BASE__INST0_SEG4 0
+#define GC_BASE__INST0_SEG5 0
+
+#define GC_BASE__INST1_SEG0 0
+#define GC_BASE__INST1_SEG1 0
+#define GC_BASE__INST1_SEG2 0
+#define GC_BASE__INST1_SEG3 0
+#define GC_BASE__INST1_SEG4 0
+#define GC_BASE__INST1_SEG5 0
+
+#define GC_BASE__INST2_SEG0 0
+#define GC_BASE__INST2_SEG1 0
+#define GC_BASE__INST2_SEG2 0
+#define GC_BASE__INST2_SEG3 0
+#define GC_BASE__INST2_SEG4 0
+#define GC_BASE__INST2_SEG5 0
+
+#define GC_BASE__INST3_SEG0 0
+#define GC_BASE__INST3_SEG1 0
+#define GC_BASE__INST3_SEG2 0
+#define GC_BASE__INST3_SEG3 0
+#define GC_BASE__INST3_SEG4 0
+#define GC_BASE__INST3_SEG5 0
+
+#define GC_BASE__INST4_SEG0 0
+#define GC_BASE__INST4_SEG1 0
+#define GC_BASE__INST4_SEG2 0
+#define GC_BASE__INST4_SEG3 0
+#define GC_BASE__INST4_SEG4 0
+#define GC_BASE__INST4_SEG5 0
+
+#define GC_BASE__INST5_SEG0 0
+#define GC_BASE__INST5_SEG1 0
+#define GC_BASE__INST5_SEG2 0
+#define GC_BASE__INST5_SEG3 0
+#define GC_BASE__INST5_SEG4 0
+#define GC_BASE__INST5_SEG5 0
+
+#define GC_BASE__INST6_SEG0 0
+#define GC_BASE__INST6_SEG1 0
+#define GC_BASE__INST6_SEG2 0
+#define GC_BASE__INST6_SEG3 0
+#define GC_BASE__INST6_SEG4 0
+#define GC_BASE__INST6_SEG5 0
+
+#define GC_BASE__INST7_SEG0 0
+#define GC_BASE__INST7_SEG1 0
+#define GC_BASE__INST7_SEG2 0
+#define GC_BASE__INST7_SEG3 0
+#define GC_BASE__INST7_SEG4 0
+#define GC_BASE__INST7_SEG5 0
+
+#define HDP_BASE__INST0_SEG0 0x00000F20
+#define HDP_BASE__INST0_SEG1 0x00012520
+#define HDP_BASE__INST0_SEG2 0x0040A400
+#define HDP_BASE__INST0_SEG3 0
+#define HDP_BASE__INST0_SEG4 0
+#define HDP_BASE__INST0_SEG5 0
+
+#define HDP_BASE__INST1_SEG0 0
+#define HDP_BASE__INST1_SEG1 0
+#define HDP_BASE__INST1_SEG2 0
+#define HDP_BASE__INST1_SEG3 0
+#define HDP_BASE__INST1_SEG4 0
+#define HDP_BASE__INST1_SEG5 0
+
+#define HDP_BASE__INST2_SEG0 0
+#define HDP_BASE__INST2_SEG1 0
+#define HDP_BASE__INST2_SEG2 0
+#define HDP_BASE__INST2_SEG3 0
+#define HDP_BASE__INST2_SEG4 0
+#define HDP_BASE__INST2_SEG5 0
+
+#define HDP_BASE__INST3_SEG0 0
+#define HDP_BASE__INST3_SEG1 0
+#define HDP_BASE__INST3_SEG2 0
+#define HDP_BASE__INST3_SEG3 0
+#define HDP_BASE__INST3_SEG4 0
+#define HDP_BASE__INST3_SEG5 0
+
+#define HDP_BASE__INST4_SEG0 0
+#define HDP_BASE__INST4_SEG1 0
+#define HDP_BASE__INST4_SEG2 0
+#define HDP_BASE__INST4_SEG3 0
+#define HDP_BASE__INST4_SEG4 0
+#define HDP_BASE__INST4_SEG5 0
+
+#define HDP_BASE__INST5_SEG0 0
+#define HDP_BASE__INST5_SEG1 0
+#define HDP_BASE__INST5_SEG2 0
+#define HDP_BASE__INST5_SEG3 0
+#define HDP_BASE__INST5_SEG4 0
+#define HDP_BASE__INST5_SEG5 0
+
+#define HDP_BASE__INST6_SEG0 0
+#define HDP_BASE__INST6_SEG1 0
+#define HDP_BASE__INST6_SEG2 0
+#define HDP_BASE__INST6_SEG3 0
+#define HDP_BASE__INST6_SEG4 0
+#define HDP_BASE__INST6_SEG5 0
+
+#define HDP_BASE__INST7_SEG0 0
+#define HDP_BASE__INST7_SEG1 0
+#define HDP_BASE__INST7_SEG2 0
+#define HDP_BASE__INST7_SEG3 0
+#define HDP_BASE__INST7_SEG4 0
+#define HDP_BASE__INST7_SEG5 0
+
+#define MMHUB_BASE__INST0_SEG0 0x00012440
+#define MMHUB_BASE__INST0_SEG1 0x0001A000
+#define MMHUB_BASE__INST0_SEG2 0x00408800
+#define MMHUB_BASE__INST0_SEG3 0
+#define MMHUB_BASE__INST0_SEG4 0
+#define MMHUB_BASE__INST0_SEG5 0
+
+#define MMHUB_BASE__INST1_SEG0 0
+#define MMHUB_BASE__INST1_SEG1 0
+#define MMHUB_BASE__INST1_SEG2 0
+#define MMHUB_BASE__INST1_SEG3 0
+#define MMHUB_BASE__INST1_SEG4 0
+#define MMHUB_BASE__INST1_SEG5 0
+
+#define MMHUB_BASE__INST2_SEG0 0
+#define MMHUB_BASE__INST2_SEG1 0
+#define MMHUB_BASE__INST2_SEG2 0
+#define MMHUB_BASE__INST2_SEG3 0
+#define MMHUB_BASE__INST2_SEG4 0
+#define MMHUB_BASE__INST2_SEG5 0
+
+#define MMHUB_BASE__INST3_SEG0 0
+#define MMHUB_BASE__INST3_SEG1 0
+#define MMHUB_BASE__INST3_SEG2 0
+#define MMHUB_BASE__INST3_SEG3 0
+#define MMHUB_BASE__INST3_SEG4 0
+#define MMHUB_BASE__INST3_SEG5 0
+
+#define MMHUB_BASE__INST4_SEG0 0
+#define MMHUB_BASE__INST4_SEG1 0
+#define MMHUB_BASE__INST4_SEG2 0
+#define MMHUB_BASE__INST4_SEG3 0
+#define MMHUB_BASE__INST4_SEG4 0
+#define MMHUB_BASE__INST4_SEG5 0
+
+#define MMHUB_BASE__INST5_SEG0 0
+#define MMHUB_BASE__INST5_SEG1 0
+#define MMHUB_BASE__INST5_SEG2 0
+#define MMHUB_BASE__INST5_SEG3 0
+#define MMHUB_BASE__INST5_SEG4 0
+#define MMHUB_BASE__INST5_SEG5 0
+
+#define MMHUB_BASE__INST6_SEG0 0
+#define MMHUB_BASE__INST6_SEG1 0
+#define MMHUB_BASE__INST6_SEG2 0
+#define MMHUB_BASE__INST6_SEG3 0
+#define MMHUB_BASE__INST6_SEG4 0
+#define MMHUB_BASE__INST6_SEG5 0
+
+#define MMHUB_BASE__INST7_SEG0 0
+#define MMHUB_BASE__INST7_SEG1 0
+#define MMHUB_BASE__INST7_SEG2 0
+#define MMHUB_BASE__INST7_SEG3 0
+#define MMHUB_BASE__INST7_SEG4 0
+#define MMHUB_BASE__INST7_SEG5 0
+
+#define MP0_BASE__INST0_SEG0 0x00013FE0
+#define MP0_BASE__INST0_SEG1 0x00016000
+#define MP0_BASE__INST0_SEG2 0x0043FC00
+#define MP0_BASE__INST0_SEG3 0x00DC0000
+#define MP0_BASE__INST0_SEG4 0x00E00000
+#define MP0_BASE__INST0_SEG5 0x00E40000
+
+#define MP0_BASE__INST1_SEG0 0
+#define MP0_BASE__INST1_SEG1 0
+#define MP0_BASE__INST1_SEG2 0
+#define MP0_BASE__INST1_SEG3 0
+#define MP0_BASE__INST1_SEG4 0
+#define MP0_BASE__INST1_SEG5 0
+
+#define MP0_BASE__INST2_SEG0 0
+#define MP0_BASE__INST2_SEG1 0
+#define MP0_BASE__INST2_SEG2 0
+#define MP0_BASE__INST2_SEG3 0
+#define MP0_BASE__INST2_SEG4 0
+#define MP0_BASE__INST2_SEG5 0
+
+#define MP0_BASE__INST3_SEG0 0
+#define MP0_BASE__INST3_SEG1 0
+#define MP0_BASE__INST3_SEG2 0
+#define MP0_BASE__INST3_SEG3 0
+#define MP0_BASE__INST3_SEG4 0
+#define MP0_BASE__INST3_SEG5 0
+
+#define MP0_BASE__INST4_SEG0 0
+#define MP0_BASE__INST4_SEG1 0
+#define MP0_BASE__INST4_SEG2 0
+#define MP0_BASE__INST4_SEG3 0
+#define MP0_BASE__INST4_SEG4 0
+#define MP0_BASE__INST4_SEG5 0
+
+#define MP0_BASE__INST5_SEG0 0
+#define MP0_BASE__INST5_SEG1 0
+#define MP0_BASE__INST5_SEG2 0
+#define MP0_BASE__INST5_SEG3 0
+#define MP0_BASE__INST5_SEG4 0
+#define MP0_BASE__INST5_SEG5 0
+
+#define MP0_BASE__INST6_SEG0 0
+#define MP0_BASE__INST6_SEG1 0
+#define MP0_BASE__INST6_SEG2 0
+#define MP0_BASE__INST6_SEG3 0
+#define MP0_BASE__INST6_SEG4 0
+#define MP0_BASE__INST6_SEG5 0
+
+#define MP0_BASE__INST7_SEG0 0
+#define MP0_BASE__INST7_SEG1 0
+#define MP0_BASE__INST7_SEG2 0
+#define MP0_BASE__INST7_SEG3 0
+#define MP0_BASE__INST7_SEG4 0
+#define MP0_BASE__INST7_SEG5 0
+
+#define MP1_BASE__INST0_SEG0 0x00012020
+#define MP1_BASE__INST0_SEG1 0x00016200
+#define MP1_BASE__INST0_SEG2 0x00400400
+#define MP1_BASE__INST0_SEG3 0x00E80000
+#define MP1_BASE__INST0_SEG4 0x00EC0000
+#define MP1_BASE__INST0_SEG5 0x00F00000
+
+#define MP1_BASE__INST1_SEG0 0
+#define MP1_BASE__INST1_SEG1 0
+#define MP1_BASE__INST1_SEG2 0
+#define MP1_BASE__INST1_SEG3 0
+#define MP1_BASE__INST1_SEG4 0
+#define MP1_BASE__INST1_SEG5 0
+
+#define MP1_BASE__INST2_SEG0 0
+#define MP1_BASE__INST2_SEG1 0
+#define MP1_BASE__INST2_SEG2 0
+#define MP1_BASE__INST2_SEG3 0
+#define MP1_BASE__INST2_SEG4 0
+#define MP1_BASE__INST2_SEG5 0
+
+#define MP1_BASE__INST3_SEG0 0
+#define MP1_BASE__INST3_SEG1 0
+#define MP1_BASE__INST3_SEG2 0
+#define MP1_BASE__INST3_SEG3 0
+#define MP1_BASE__INST3_SEG4 0
+#define MP1_BASE__INST3_SEG5 0
+
+#define MP1_BASE__INST4_SEG0 0
+#define MP1_BASE__INST4_SEG1 0
+#define MP1_BASE__INST4_SEG2 0
+#define MP1_BASE__INST4_SEG3 0
+#define MP1_BASE__INST4_SEG4 0
+#define MP1_BASE__INST4_SEG5 0
+
+#define MP1_BASE__INST5_SEG0 0
+#define MP1_BASE__INST5_SEG1 0
+#define MP1_BASE__INST5_SEG2 0
+#define MP1_BASE__INST5_SEG3 0
+#define MP1_BASE__INST5_SEG4 0
+#define MP1_BASE__INST5_SEG5 0
+
+#define MP1_BASE__INST6_SEG0 0
+#define MP1_BASE__INST6_SEG1 0
+#define MP1_BASE__INST6_SEG2 0
+#define MP1_BASE__INST6_SEG3 0
+#define MP1_BASE__INST6_SEG4 0
+#define MP1_BASE__INST6_SEG5 0
+
+#define MP1_BASE__INST7_SEG0 0
+#define MP1_BASE__INST7_SEG1 0
+#define MP1_BASE__INST7_SEG2 0
+#define MP1_BASE__INST7_SEG3 0
+#define MP1_BASE__INST7_SEG4 0
+#define MP1_BASE__INST7_SEG5 0
+
+#define NBIF0_BASE__INST0_SEG0 0x00000000
+#define NBIF0_BASE__INST0_SEG1 0x00000014
+#define NBIF0_BASE__INST0_SEG2 0x00000D20
+#define NBIF0_BASE__INST0_SEG3 0x00010400
+#define NBIF0_BASE__INST0_SEG4 0x00012D80
+#define NBIF0_BASE__INST0_SEG5 0x0041B000
+
+#define NBIF0_BASE__INST1_SEG0 0
+#define NBIF0_BASE__INST1_SEG1 0
+#define NBIF0_BASE__INST1_SEG2 0
+#define NBIF0_BASE__INST1_SEG3 0
+#define NBIF0_BASE__INST1_SEG4 0
+#define NBIF0_BASE__INST1_SEG5 0
+
+#define NBIF0_BASE__INST2_SEG0 0
+#define NBIF0_BASE__INST2_SEG1 0
+#define NBIF0_BASE__INST2_SEG2 0
+#define NBIF0_BASE__INST2_SEG3 0
+#define NBIF0_BASE__INST2_SEG4 0
+#define NBIF0_BASE__INST2_SEG5 0
+
+#define NBIF0_BASE__INST3_SEG0 0
+#define NBIF0_BASE__INST3_SEG1 0
+#define NBIF0_BASE__INST3_SEG2 0
+#define NBIF0_BASE__INST3_SEG3 0
+#define NBIF0_BASE__INST3_SEG4 0
+#define NBIF0_BASE__INST3_SEG5 0
+
+#define NBIF0_BASE__INST4_SEG0 0
+#define NBIF0_BASE__INST4_SEG1 0
+#define NBIF0_BASE__INST4_SEG2 0
+#define NBIF0_BASE__INST4_SEG3 0
+#define NBIF0_BASE__INST4_SEG4 0
+#define NBIF0_BASE__INST4_SEG5 0
+
+#define NBIF0_BASE__INST5_SEG0 0
+#define NBIF0_BASE__INST5_SEG1 0
+#define NBIF0_BASE__INST5_SEG2 0
+#define NBIF0_BASE__INST5_SEG3 0
+#define NBIF0_BASE__INST5_SEG4 0
+#define NBIF0_BASE__INST5_SEG5 0
+
+#define NBIF0_BASE__INST6_SEG0 0
+#define NBIF0_BASE__INST6_SEG1 0
+#define NBIF0_BASE__INST6_SEG2 0
+#define NBIF0_BASE__INST6_SEG3 0
+#define NBIF0_BASE__INST6_SEG4 0
+#define NBIF0_BASE__INST6_SEG5 0
+
+#define NBIF0_BASE__INST7_SEG0 0
+#define NBIF0_BASE__INST7_SEG1 0
+#define NBIF0_BASE__INST7_SEG2 0
+#define NBIF0_BASE__INST7_SEG3 0
+#define NBIF0_BASE__INST7_SEG4 0
+#define NBIF0_BASE__INST7_SEG5 0
+
+#define OSSSYS_BASE__INST0_SEG0 0x000010A0
+#define OSSSYS_BASE__INST0_SEG1 0x00012500
+#define OSSSYS_BASE__INST0_SEG2 0x0040A000
+#define OSSSYS_BASE__INST0_SEG3 0
+#define OSSSYS_BASE__INST0_SEG4 0
+#define OSSSYS_BASE__INST0_SEG5 0
+
+#define OSSSYS_BASE__INST1_SEG0 0
+#define OSSSYS_BASE__INST1_SEG1 0
+#define OSSSYS_BASE__INST1_SEG2 0
+#define OSSSYS_BASE__INST1_SEG3 0
+#define OSSSYS_BASE__INST1_SEG4 0
+#define OSSSYS_BASE__INST1_SEG5 0
+
+#define OSSSYS_BASE__INST2_SEG0 0
+#define OSSSYS_BASE__INST2_SEG1 0
+#define OSSSYS_BASE__INST2_SEG2 0
+#define OSSSYS_BASE__INST2_SEG3 0
+#define OSSSYS_BASE__INST2_SEG4 0
+#define OSSSYS_BASE__INST2_SEG5 0
+
+#define OSSSYS_BASE__INST3_SEG0 0
+#define OSSSYS_BASE__INST3_SEG1 0
+#define OSSSYS_BASE__INST3_SEG2 0
+#define OSSSYS_BASE__INST3_SEG3 0
+#define OSSSYS_BASE__INST3_SEG4 0
+#define OSSSYS_BASE__INST3_SEG5 0
+
+#define OSSSYS_BASE__INST4_SEG0 0
+#define OSSSYS_BASE__INST4_SEG1 0
+#define OSSSYS_BASE__INST4_SEG2 0
+#define OSSSYS_BASE__INST4_SEG3 0
+#define OSSSYS_BASE__INST4_SEG4 0
+#define OSSSYS_BASE__INST4_SEG5 0
+
+#define OSSSYS_BASE__INST5_SEG0 0
+#define OSSSYS_BASE__INST5_SEG1 0
+#define OSSSYS_BASE__INST5_SEG2 0
+#define OSSSYS_BASE__INST5_SEG3 0
+#define OSSSYS_BASE__INST5_SEG4 0
+#define OSSSYS_BASE__INST5_SEG5 0
+
+#define OSSSYS_BASE__INST6_SEG0 0
+#define OSSSYS_BASE__INST6_SEG1 0
+#define OSSSYS_BASE__INST6_SEG2 0
+#define OSSSYS_BASE__INST6_SEG3 0
+#define OSSSYS_BASE__INST6_SEG4 0
+#define OSSSYS_BASE__INST6_SEG5 0
+
+#define OSSSYS_BASE__INST7_SEG0 0
+#define OSSSYS_BASE__INST7_SEG1 0
+#define OSSSYS_BASE__INST7_SEG2 0
+#define OSSSYS_BASE__INST7_SEG3 0
+#define OSSSYS_BASE__INST7_SEG4 0
+#define OSSSYS_BASE__INST7_SEG5 0
+
+#define PCIE0_BASE__INST0_SEG0 0x000128C0
+#define PCIE0_BASE__INST0_SEG1 0x00411800
+#define PCIE0_BASE__INST0_SEG2 0x04440000
+#define PCIE0_BASE__INST0_SEG3 0
+#define PCIE0_BASE__INST0_SEG4 0
+#define PCIE0_BASE__INST0_SEG5 0
+
+#define PCIE0_BASE__INST1_SEG0 0
+#define PCIE0_BASE__INST1_SEG1 0
+#define PCIE0_BASE__INST1_SEG2 0
+#define PCIE0_BASE__INST1_SEG3 0
+#define PCIE0_BASE__INST1_SEG4 0
+#define PCIE0_BASE__INST1_SEG5 0
+
+#define PCIE0_BASE__INST2_SEG0 0
+#define PCIE0_BASE__INST2_SEG1 0
+#define PCIE0_BASE__INST2_SEG2 0
+#define PCIE0_BASE__INST2_SEG3 0
+#define PCIE0_BASE__INST2_SEG4 0
+#define PCIE0_BASE__INST2_SEG5 0
+
+#define PCIE0_BASE__INST3_SEG0 0
+#define PCIE0_BASE__INST3_SEG1 0
+#define PCIE0_BASE__INST3_SEG2 0
+#define PCIE0_BASE__INST3_SEG3 0
+#define PCIE0_BASE__INST3_SEG4 0
+#define PCIE0_BASE__INST3_SEG5 0
+
+#define PCIE0_BASE__INST4_SEG0 0
+#define PCIE0_BASE__INST4_SEG1 0
+#define PCIE0_BASE__INST4_SEG2 0
+#define PCIE0_BASE__INST4_SEG3 0
+#define PCIE0_BASE__INST4_SEG4 0
+#define PCIE0_BASE__INST4_SEG5 0
+
+#define PCIE0_BASE__INST5_SEG0 0
+#define PCIE0_BASE__INST5_SEG1 0
+#define PCIE0_BASE__INST5_SEG2 0
+#define PCIE0_BASE__INST5_SEG3 0
+#define PCIE0_BASE__INST5_SEG4 0
+#define PCIE0_BASE__INST5_SEG5 0
+
+#define PCIE0_BASE__INST6_SEG0 0
+#define PCIE0_BASE__INST6_SEG1 0
+#define PCIE0_BASE__INST6_SEG2 0
+#define PCIE0_BASE__INST6_SEG3 0
+#define PCIE0_BASE__INST6_SEG4 0
+#define PCIE0_BASE__INST6_SEG5 0
+
+#define PCIE0_BASE__INST7_SEG0 0
+#define PCIE0_BASE__INST7_SEG1 0
+#define PCIE0_BASE__INST7_SEG2 0
+#define PCIE0_BASE__INST7_SEG3 0
+#define PCIE0_BASE__INST7_SEG4 0
+#define PCIE0_BASE__INST7_SEG5 0
+
+#define SDMA0_BASE__INST0_SEG0 0x00001260
+#define SDMA0_BASE__INST0_SEG1 0x00012540
+#define SDMA0_BASE__INST0_SEG2 0x0040A800
+#define SDMA0_BASE__INST0_SEG3 0
+#define SDMA0_BASE__INST0_SEG4 0
+#define SDMA0_BASE__INST0_SEG5 0
+
+#define SDMA0_BASE__INST1_SEG0 0
+#define SDMA0_BASE__INST1_SEG1 0
+#define SDMA0_BASE__INST1_SEG2 0
+#define SDMA0_BASE__INST1_SEG3 0
+#define SDMA0_BASE__INST1_SEG4 0
+#define SDMA0_BASE__INST1_SEG5 0
+
+#define SDMA0_BASE__INST2_SEG0 0
+#define SDMA0_BASE__INST2_SEG1 0
+#define SDMA0_BASE__INST2_SEG2 0
+#define SDMA0_BASE__INST2_SEG3 0
+#define SDMA0_BASE__INST2_SEG4 0
+#define SDMA0_BASE__INST2_SEG5 0
+
+#define SDMA0_BASE__INST3_SEG0 0
+#define SDMA0_BASE__INST3_SEG1 0
+#define SDMA0_BASE__INST3_SEG2 0
+#define SDMA0_BASE__INST3_SEG3 0
+#define SDMA0_BASE__INST3_SEG4 0
+#define SDMA0_BASE__INST3_SEG5 0
+
+#define SDMA0_BASE__INST4_SEG0 0
+#define SDMA0_BASE__INST4_SEG1 0
+#define SDMA0_BASE__INST4_SEG2 0
+#define SDMA0_BASE__INST4_SEG3 0
+#define SDMA0_BASE__INST4_SEG4 0
+#define SDMA0_BASE__INST4_SEG5 0
+
+#define SDMA0_BASE__INST5_SEG0 0
+#define SDMA0_BASE__INST5_SEG1 0
+#define SDMA0_BASE__INST5_SEG2 0
+#define SDMA0_BASE__INST5_SEG3 0
+#define SDMA0_BASE__INST5_SEG4 0
+#define SDMA0_BASE__INST5_SEG5 0
+
+#define SDMA0_BASE__INST6_SEG0 0
+#define SDMA0_BASE__INST6_SEG1 0
+#define SDMA0_BASE__INST6_SEG2 0
+#define SDMA0_BASE__INST6_SEG3 0
+#define SDMA0_BASE__INST6_SEG4 0
+#define SDMA0_BASE__INST6_SEG5 0
+
+#define SDMA1_BASE__INST0_SEG0 0x00001860
+#define SDMA1_BASE__INST0_SEG1 0x00012560
+#define SDMA1_BASE__INST0_SEG2 0x0040AC00
+#define SDMA1_BASE__INST0_SEG3 0
+#define SDMA1_BASE__INST0_SEG4 0
+#define SDMA1_BASE__INST0_SEG5 0
+
+#define SDMA1_BASE__INST1_SEG0 0
+#define SDMA1_BASE__INST1_SEG1 0
+#define SDMA1_BASE__INST1_SEG2 0
+#define SDMA1_BASE__INST1_SEG3 0
+#define SDMA1_BASE__INST1_SEG4 0
+#define SDMA1_BASE__INST1_SEG5 0
+
+#define SDMA1_BASE__INST2_SEG0 0
+#define SDMA1_BASE__INST2_SEG1 0
+#define SDMA1_BASE__INST2_SEG2 0
+#define SDMA1_BASE__INST2_SEG3 0
+#define SDMA1_BASE__INST2_SEG4 0
+#define SDMA1_BASE__INST2_SEG5 0
+
+#define SDMA1_BASE__INST3_SEG0 0
+#define SDMA1_BASE__INST3_SEG1 0
+#define SDMA1_BASE__INST3_SEG2 0
+#define SDMA1_BASE__INST3_SEG3 0
+#define SDMA1_BASE__INST3_SEG4 0
+#define SDMA1_BASE__INST3_SEG5 0
+
+#define SDMA1_BASE__INST4_SEG0 0
+#define SDMA1_BASE__INST4_SEG1 0
+#define SDMA1_BASE__INST4_SEG2 0
+#define SDMA1_BASE__INST4_SEG3 0
+#define SDMA1_BASE__INST4_SEG4 0
+#define SDMA1_BASE__INST4_SEG5 0
+
+#define SDMA1_BASE__INST5_SEG0 0
+#define SDMA1_BASE__INST5_SEG1 0
+#define SDMA1_BASE__INST5_SEG2 0
+#define SDMA1_BASE__INST5_SEG3 0
+#define SDMA1_BASE__INST5_SEG4 0
+#define SDMA1_BASE__INST5_SEG5 0
+
+
+#define SDMA1_BASE__INST6_SEG0 0
+#define SDMA1_BASE__INST6_SEG1 0
+#define SDMA1_BASE__INST6_SEG2 0
+#define SDMA1_BASE__INST6_SEG3 0
+#define SDMA1_BASE__INST6_SEG4 0
+#define SDMA1_BASE__INST6_SEG5 0
+
+
+#define SDMA2_BASE__INST0_SEG0 0x00013760
+#define SDMA2_BASE__INST0_SEG1 0x0001E000
+#define SDMA2_BASE__INST0_SEG2 0x0042EC00
+#define SDMA2_BASE__INST0_SEG3 0
+#define SDMA2_BASE__INST0_SEG4 0
+#define SDMA2_BASE__INST0_SEG5 0
+
+
+#define SDMA2_BASE__INST1_SEG0 0
+#define SDMA2_BASE__INST1_SEG1 0
+#define SDMA2_BASE__INST1_SEG2 0
+#define SDMA2_BASE__INST1_SEG3 0
+#define SDMA2_BASE__INST1_SEG4 0
+#define SDMA2_BASE__INST1_SEG5 0
+
+#define SDMA2_BASE__INST2_SEG0 0
+#define SDMA2_BASE__INST2_SEG1 0
+#define SDMA2_BASE__INST2_SEG2 0
+#define SDMA2_BASE__INST2_SEG3 0
+#define SDMA2_BASE__INST2_SEG4 0
+#define SDMA2_BASE__INST2_SEG5 0
+
+#define SDMA2_BASE__INST3_SEG0 0
+#define SDMA2_BASE__INST3_SEG1 0
+#define SDMA2_BASE__INST3_SEG2 0
+#define SDMA2_BASE__INST3_SEG3 0
+#define SDMA2_BASE__INST3_SEG4 0
+#define SDMA2_BASE__INST3_SEG5 0
+
+#define SDMA2_BASE__INST4_SEG0 0
+#define SDMA2_BASE__INST4_SEG1 0
+#define SDMA2_BASE__INST4_SEG2 0
+#define SDMA2_BASE__INST4_SEG3 0
+#define SDMA2_BASE__INST4_SEG4 0
+#define SDMA2_BASE__INST4_SEG5 0
+
+#define SDMA2_BASE__INST5_SEG0 0
+#define SDMA2_BASE__INST5_SEG1 0
+#define SDMA2_BASE__INST5_SEG2 0
+#define SDMA2_BASE__INST5_SEG3 0
+#define SDMA2_BASE__INST5_SEG4 0
+#define SDMA2_BASE__INST5_SEG5 0
+
+#define SDMA2_BASE__INST6_SEG0 0
+#define SDMA2_BASE__INST6_SEG1 0
+#define SDMA2_BASE__INST6_SEG2 0
+#define SDMA2_BASE__INST6_SEG3 0
+#define SDMA2_BASE__INST6_SEG4 0
+#define SDMA2_BASE__INST6_SEG5 0
+
+#define SDMA3_BASE__INST0_SEG0 0x00013780
+#define SDMA3_BASE__INST0_SEG1 0x0001E400
+#define SDMA3_BASE__INST0_SEG2 0x0042F000
+#define SDMA3_BASE__INST0_SEG3 0
+#define SDMA3_BASE__INST0_SEG4 0
+#define SDMA3_BASE__INST0_SEG5 0
+
+#define SDMA3_BASE__INST1_SEG0 0
+#define SDMA3_BASE__INST1_SEG1 0
+#define SDMA3_BASE__INST1_SEG2 0
+#define SDMA3_BASE__INST1_SEG3 0
+#define SDMA3_BASE__INST1_SEG4 0
+#define SDMA3_BASE__INST1_SEG5 0
+
+#define SDMA3_BASE__INST2_SEG0 0
+#define SDMA3_BASE__INST2_SEG1 0
+#define SDMA3_BASE__INST2_SEG2 0
+#define SDMA3_BASE__INST2_SEG3 0
+#define SDMA3_BASE__INST2_SEG4 0
+#define SDMA3_BASE__INST2_SEG5 0
+
+#define SDMA3_BASE__INST3_SEG0 0
+#define SDMA3_BASE__INST3_SEG1 0
+#define SDMA3_BASE__INST3_SEG2 0
+#define SDMA3_BASE__INST3_SEG3 0
+#define SDMA3_BASE__INST3_SEG4 0
+#define SDMA3_BASE__INST3_SEG5 0
+
+#define SDMA3_BASE__INST4_SEG0 0
+#define SDMA3_BASE__INST4_SEG1 0
+#define SDMA3_BASE__INST4_SEG2 0
+#define SDMA3_BASE__INST4_SEG3 0
+#define SDMA3_BASE__INST4_SEG4 0
+#define SDMA3_BASE__INST4_SEG5 0
+
+#define SDMA3_BASE__INST5_SEG0 0
+#define SDMA3_BASE__INST5_SEG1 0
+#define SDMA3_BASE__INST5_SEG2 0
+#define SDMA3_BASE__INST5_SEG3 0
+#define SDMA3_BASE__INST5_SEG4 0
+#define SDMA3_BASE__INST5_SEG5 0
+
+#define SDMA3_BASE__INST6_SEG0 0
+#define SDMA3_BASE__INST6_SEG1 0
+#define SDMA3_BASE__INST6_SEG2 0
+#define SDMA3_BASE__INST6_SEG3 0
+#define SDMA3_BASE__INST6_SEG4 0
+#define SDMA3_BASE__INST6_SEG5 0
+
+#define SDMA4_BASE__INST0_SEG0 0x000137A0
+#define SDMA4_BASE__INST0_SEG1 0x0001E800
+#define SDMA4_BASE__INST0_SEG2 0x0042F400
+#define SDMA4_BASE__INST0_SEG3 0
+#define SDMA4_BASE__INST0_SEG4 0
+#define SDMA4_BASE__INST0_SEG5 0
+
+#define SDMA4_BASE__INST1_SEG0 0
+#define SDMA4_BASE__INST1_SEG1 0
+#define SDMA4_BASE__INST1_SEG2 0
+#define SDMA4_BASE__INST1_SEG3 0
+#define SDMA4_BASE__INST1_SEG4 0
+#define SDMA4_BASE__INST1_SEG5 0
+
+#define SDMA4_BASE__INST2_SEG0 0
+#define SDMA4_BASE__INST2_SEG1 0
+#define SDMA4_BASE__INST2_SEG2 0
+#define SDMA4_BASE__INST2_SEG3 0
+#define SDMA4_BASE__INST2_SEG4 0
+#define SDMA4_BASE__INST2_SEG5 0
+
+#define SDMA4_BASE__INST3_SEG0 0
+#define SDMA4_BASE__INST3_SEG1 0
+#define SDMA4_BASE__INST3_SEG2 0
+#define SDMA4_BASE__INST3_SEG3 0
+#define SDMA4_BASE__INST3_SEG4 0
+#define SDMA4_BASE__INST3_SEG5 0
+
+#define SDMA4_BASE__INST4_SEG0 0
+#define SDMA4_BASE__INST4_SEG1 0
+#define SDMA4_BASE__INST4_SEG2 0
+#define SDMA4_BASE__INST4_SEG3 0
+#define SDMA4_BASE__INST4_SEG4 0
+#define SDMA4_BASE__INST4_SEG5 0
+
+#define SDMA4_BASE__INST5_SEG0 0
+#define SDMA4_BASE__INST5_SEG1 0
+#define SDMA4_BASE__INST5_SEG2 0
+#define SDMA4_BASE__INST5_SEG3 0
+#define SDMA4_BASE__INST5_SEG4 0
+#define SDMA4_BASE__INST5_SEG5 0
+
+#define SDMA4_BASE__INST6_SEG0 0
+#define SDMA4_BASE__INST6_SEG1 0
+#define SDMA4_BASE__INST6_SEG2 0
+#define SDMA4_BASE__INST6_SEG3 0
+#define SDMA4_BASE__INST6_SEG4 0
+#define SDMA4_BASE__INST6_SEG5 0
+
+#define SDMA5_BASE__INST0_SEG0 0x000137C0
+#define SDMA5_BASE__INST0_SEG1 0x0001EC00
+#define SDMA5_BASE__INST0_SEG2 0x0042F800
+#define SDMA5_BASE__INST0_SEG3 0
+#define SDMA5_BASE__INST0_SEG4 0
+#define SDMA5_BASE__INST0_SEG5 0
+
+#define SDMA5_BASE__INST1_SEG0 0
+#define SDMA5_BASE__INST1_SEG1 0
+#define SDMA5_BASE__INST1_SEG2 0
+#define SDMA5_BASE__INST1_SEG3 0
+#define SDMA5_BASE__INST1_SEG4 0
+#define SDMA5_BASE__INST1_SEG5 0
+
+#define SDMA5_BASE__INST2_SEG0 0
+#define SDMA5_BASE__INST2_SEG1 0
+#define SDMA5_BASE__INST2_SEG2 0
+#define SDMA5_BASE__INST2_SEG3 0
+#define SDMA5_BASE__INST2_SEG4 0
+#define SDMA5_BASE__INST2_SEG5 0
+
+#define SDMA5_BASE__INST3_SEG0 0
+#define SDMA5_BASE__INST3_SEG1 0
+#define SDMA5_BASE__INST3_SEG2 0
+#define SDMA5_BASE__INST3_SEG3 0
+#define SDMA5_BASE__INST3_SEG4 0
+#define SDMA5_BASE__INST3_SEG5 0
+
+#define SDMA5_BASE__INST4_SEG0 0
+#define SDMA5_BASE__INST4_SEG1 0
+#define SDMA5_BASE__INST4_SEG2 0
+#define SDMA5_BASE__INST4_SEG3 0
+#define SDMA5_BASE__INST4_SEG4 0
+#define SDMA5_BASE__INST4_SEG5 0
+
+#define SDMA5_BASE__INST5_SEG0 0
+#define SDMA5_BASE__INST5_SEG1 0
+#define SDMA5_BASE__INST5_SEG2 0
+#define SDMA5_BASE__INST5_SEG3 0
+#define SDMA5_BASE__INST5_SEG4 0
+#define SDMA5_BASE__INST5_SEG5 0
+
+#define SDMA5_BASE__INST6_SEG0 0
+#define SDMA5_BASE__INST6_SEG1 0
+#define SDMA5_BASE__INST6_SEG2 0
+#define SDMA5_BASE__INST6_SEG3 0
+#define SDMA5_BASE__INST6_SEG4 0
+#define SDMA5_BASE__INST6_SEG5 0
+
+#define SDMA6_BASE__INST0_SEG0 0x000137E0
+#define SDMA6_BASE__INST0_SEG1 0x0001F000
+#define SDMA6_BASE__INST0_SEG2 0x0042FC00
+#define SDMA6_BASE__INST0_SEG3 0
+#define SDMA6_BASE__INST0_SEG4 0
+#define SDMA6_BASE__INST0_SEG5 0
+
+#define SDMA6_BASE__INST1_SEG0 0
+#define SDMA6_BASE__INST1_SEG1 0
+#define SDMA6_BASE__INST1_SEG2 0
+#define SDMA6_BASE__INST1_SEG3 0
+#define SDMA6_BASE__INST1_SEG4 0
+#define SDMA6_BASE__INST1_SEG5 0
+
+#define SDMA6_BASE__INST2_SEG0 0
+#define SDMA6_BASE__INST2_SEG1 0
+#define SDMA6_BASE__INST2_SEG2 0
+#define SDMA6_BASE__INST2_SEG3 0
+#define SDMA6_BASE__INST2_SEG4 0
+#define SDMA6_BASE__INST2_SEG5 0
+
+#define SDMA6_BASE__INST3_SEG0 0
+#define SDMA6_BASE__INST3_SEG1 0
+#define SDMA6_BASE__INST3_SEG2 0
+#define SDMA6_BASE__INST3_SEG3 0
+#define SDMA6_BASE__INST3_SEG4 0
+#define SDMA6_BASE__INST3_SEG5 0
+
+#define SDMA6_BASE__INST4_SEG0 0
+#define SDMA6_BASE__INST4_SEG1 0
+#define SDMA6_BASE__INST4_SEG2 0
+#define SDMA6_BASE__INST4_SEG3 0
+#define SDMA6_BASE__INST4_SEG4 0
+#define SDMA6_BASE__INST4_SEG5 0
+
+#define SDMA6_BASE__INST5_SEG0 0
+#define SDMA6_BASE__INST5_SEG1 0
+#define SDMA6_BASE__INST5_SEG2 0
+#define SDMA6_BASE__INST5_SEG3 0
+#define SDMA6_BASE__INST5_SEG4 0
+#define SDMA6_BASE__INST5_SEG5 0
+
+#define SDMA6_BASE__INST6_SEG0 0
+#define SDMA6_BASE__INST6_SEG1 0
+#define SDMA6_BASE__INST6_SEG2 0
+#define SDMA6_BASE__INST6_SEG3 0
+#define SDMA6_BASE__INST6_SEG4 0
+#define SDMA6_BASE__INST6_SEG5 0
+
+#define SDMA7_BASE__INST0_SEG0 0x00013800
+#define SDMA7_BASE__INST0_SEG1 0x0001F400
+#define SDMA7_BASE__INST0_SEG2 0x00430000
+#define SDMA7_BASE__INST0_SEG3 0
+#define SDMA7_BASE__INST0_SEG4 0
+#define SDMA7_BASE__INST0_SEG5 0
+
+#define SDMA7_BASE__INST1_SEG0 0
+#define SDMA7_BASE__INST1_SEG1 0
+#define SDMA7_BASE__INST1_SEG2 0
+#define SDMA7_BASE__INST1_SEG3 0
+#define SDMA7_BASE__INST1_SEG4 0
+#define SDMA7_BASE__INST1_SEG5 0
+
+#define SDMA7_BASE__INST2_SEG0 0
+#define SDMA7_BASE__INST2_SEG1 0
+#define SDMA7_BASE__INST2_SEG2 0
+#define SDMA7_BASE__INST2_SEG3 0
+#define SDMA7_BASE__INST2_SEG4 0
+#define SDMA7_BASE__INST2_SEG5 0
+
+#define SDMA7_BASE__INST3_SEG0 0
+#define SDMA7_BASE__INST3_SEG1 0
+#define SDMA7_BASE__INST3_SEG2 0
+#define SDMA7_BASE__INST3_SEG3 0
+#define SDMA7_BASE__INST3_SEG4 0
+#define SDMA7_BASE__INST3_SEG5 0
+
+#define SDMA7_BASE__INST4_SEG0 0
+#define SDMA7_BASE__INST4_SEG1 0
+#define SDMA7_BASE__INST4_SEG2 0
+#define SDMA7_BASE__INST4_SEG3 0
+#define SDMA7_BASE__INST4_SEG4 0
+#define SDMA7_BASE__INST4_SEG5 0
+
+#define SDMA7_BASE__INST5_SEG0 0
+#define SDMA7_BASE__INST5_SEG1 0
+#define SDMA7_BASE__INST5_SEG2 0
+#define SDMA7_BASE__INST5_SEG3 0
+#define SDMA7_BASE__INST5_SEG4 0
+#define SDMA7_BASE__INST5_SEG5 0
+
+#define SDMA7_BASE__INST6_SEG0 0
+#define SDMA7_BASE__INST6_SEG1 0
+#define SDMA7_BASE__INST6_SEG2 0
+#define SDMA7_BASE__INST6_SEG3 0
+#define SDMA7_BASE__INST6_SEG4 0
+#define SDMA7_BASE__INST6_SEG5 0
+
+#define SMUIO_BASE__INST0_SEG0 0x00012080
+#define SMUIO_BASE__INST0_SEG1 0x00016800
+#define SMUIO_BASE__INST0_SEG2 0x00016A00
+#define SMUIO_BASE__INST0_SEG3 0x00401000
+#define SMUIO_BASE__INST0_SEG4 0x00440000
+#define SMUIO_BASE__INST0_SEG5 0
+
+#define SMUIO_BASE__INST1_SEG0 0
+#define SMUIO_BASE__INST1_SEG1 0
+#define SMUIO_BASE__INST1_SEG2 0
+#define SMUIO_BASE__INST1_SEG3 0
+#define SMUIO_BASE__INST1_SEG4 0
+#define SMUIO_BASE__INST1_SEG5 0
+
+#define SMUIO_BASE__INST2_SEG0 0
+#define SMUIO_BASE__INST2_SEG1 0
+#define SMUIO_BASE__INST2_SEG2 0
+#define SMUIO_BASE__INST2_SEG3 0
+#define SMUIO_BASE__INST2_SEG4 0
+#define SMUIO_BASE__INST2_SEG5 0
+
+#define SMUIO_BASE__INST3_SEG0 0
+#define SMUIO_BASE__INST3_SEG1 0
+#define SMUIO_BASE__INST3_SEG2 0
+#define SMUIO_BASE__INST3_SEG3 0
+#define SMUIO_BASE__INST3_SEG4 0
+#define SMUIO_BASE__INST3_SEG5 0
+
+#define SMUIO_BASE__INST4_SEG0 0
+#define SMUIO_BASE__INST4_SEG1 0
+#define SMUIO_BASE__INST4_SEG2 0
+#define SMUIO_BASE__INST4_SEG3 0
+#define SMUIO_BASE__INST4_SEG4 0
+#define SMUIO_BASE__INST4_SEG5 0
+
+#define SMUIO_BASE__INST5_SEG0 0
+#define SMUIO_BASE__INST5_SEG1 0
+#define SMUIO_BASE__INST5_SEG2 0
+#define SMUIO_BASE__INST5_SEG3 0
+#define SMUIO_BASE__INST5_SEG4 0
+#define SMUIO_BASE__INST5_SEG5 0
+
+#define SMUIO_BASE__INST6_SEG0 0
+#define SMUIO_BASE__INST6_SEG1 0
+#define SMUIO_BASE__INST6_SEG2 0
+#define SMUIO_BASE__INST6_SEG3 0
+#define SMUIO_BASE__INST6_SEG4 0
+#define SMUIO_BASE__INST6_SEG5 0
+
+#define SMUIO_BASE__INST7_SEG0 0
+#define SMUIO_BASE__INST7_SEG1 0
+#define SMUIO_BASE__INST7_SEG2 0
+#define SMUIO_BASE__INST7_SEG3 0
+#define SMUIO_BASE__INST7_SEG4 0
+#define SMUIO_BASE__INST7_SEG5 0
+
+#define THM_BASE__INST0_SEG0 0x00012060
+#define THM_BASE__INST0_SEG1 0x00016600
+#define THM_BASE__INST0_SEG2 0x00400C00
+#define THM_BASE__INST0_SEG3 0
+#define THM_BASE__INST0_SEG4 0
+#define THM_BASE__INST0_SEG5 0
+
+#define THM_BASE__INST1_SEG0 0
+#define THM_BASE__INST1_SEG1 0
+#define THM_BASE__INST1_SEG2 0
+#define THM_BASE__INST1_SEG3 0
+#define THM_BASE__INST1_SEG4 0
+#define THM_BASE__INST1_SEG5 0
+
+#define THM_BASE__INST2_SEG0 0
+#define THM_BASE__INST2_SEG1 0
+#define THM_BASE__INST2_SEG2 0
+#define THM_BASE__INST2_SEG3 0
+#define THM_BASE__INST2_SEG4 0
+#define THM_BASE__INST2_SEG5 0
+
+#define THM_BASE__INST3_SEG0 0
+#define THM_BASE__INST3_SEG1 0
+#define THM_BASE__INST3_SEG2 0
+#define THM_BASE__INST3_SEG3 0
+#define THM_BASE__INST3_SEG4 0
+#define THM_BASE__INST3_SEG5 0
+
+#define THM_BASE__INST4_SEG0 0
+#define THM_BASE__INST4_SEG1 0
+#define THM_BASE__INST4_SEG2 0
+#define THM_BASE__INST4_SEG3 0
+#define THM_BASE__INST4_SEG4 0
+#define THM_BASE__INST4_SEG5 0
+
+#define THM_BASE__INST5_SEG0 0
+#define THM_BASE__INST5_SEG1 0
+#define THM_BASE__INST5_SEG2 0
+#define THM_BASE__INST5_SEG3 0
+#define THM_BASE__INST5_SEG4 0
+#define THM_BASE__INST5_SEG5 0
+
+#define THM_BASE__INST6_SEG0 0
+#define THM_BASE__INST6_SEG1 0
+#define THM_BASE__INST6_SEG2 0
+#define THM_BASE__INST6_SEG3 0
+#define THM_BASE__INST6_SEG4 0
+#define THM_BASE__INST6_SEG5 0
+
+#define THM_BASE__INST7_SEG0 0
+#define THM_BASE__INST7_SEG1 0
+#define THM_BASE__INST7_SEG2 0
+#define THM_BASE__INST7_SEG3 0
+#define THM_BASE__INST7_SEG4 0
+#define THM_BASE__INST7_SEG5 0
+
+#define UMC_BASE__INST0_SEG0 0x000132C0
+#define UMC_BASE__INST0_SEG1 0x00014000
+#define UMC_BASE__INST0_SEG2 0x00425800
+#define UMC_BASE__INST0_SEG3 0
+#define UMC_BASE__INST0_SEG4 0
+#define UMC_BASE__INST0_SEG5 0
+
+#define UMC_BASE__INST1_SEG0 0x000132E0
+#define UMC_BASE__INST1_SEG1 0x00054000
+#define UMC_BASE__INST1_SEG2 0x00425C00
+#define UMC_BASE__INST1_SEG3 0
+#define UMC_BASE__INST1_SEG4 0
+#define UMC_BASE__INST1_SEG5 0
+
+#define UMC_BASE__INST2_SEG0 0x00013300
+#define UMC_BASE__INST2_SEG1 0x00094000
+#define UMC_BASE__INST2_SEG2 0x00426000
+#define UMC_BASE__INST2_SEG3 0
+#define UMC_BASE__INST2_SEG4 0
+#define UMC_BASE__INST2_SEG5 0
+
+#define UMC_BASE__INST3_SEG0 0x00013320
+#define UMC_BASE__INST3_SEG1 0x000D4000
+#define UMC_BASE__INST3_SEG2 0x00426400
+#define UMC_BASE__INST3_SEG3 0
+#define UMC_BASE__INST3_SEG4 0
+#define UMC_BASE__INST3_SEG5 0
+
+#define UMC_BASE__INST4_SEG0 0x00013340
+#define UMC_BASE__INST4_SEG1 0x00114000
+#define UMC_BASE__INST4_SEG2 0x00426800
+#define UMC_BASE__INST4_SEG3 0
+#define UMC_BASE__INST4_SEG4 0
+#define UMC_BASE__INST4_SEG5 0
+
+#define UMC_BASE__INST5_SEG0 0x00013360
+#define UMC_BASE__INST5_SEG1 0x00154000
+#define UMC_BASE__INST5_SEG2 0x00426C00
+#define UMC_BASE__INST5_SEG3 0
+#define UMC_BASE__INST5_SEG4 0
+#define UMC_BASE__INST5_SEG5 0
+
+#define UMC_BASE__INST6_SEG0 0x00013380
+#define UMC_BASE__INST6_SEG1 0x00194000
+#define UMC_BASE__INST6_SEG2 0x00427000
+#define UMC_BASE__INST6_SEG3 0
+#define UMC_BASE__INST6_SEG4 0
+#define UMC_BASE__INST6_SEG5 0
+
+#define UMC_BASE__INST7_SEG0 0x000133A0
+#define UMC_BASE__INST7_SEG1 0x001D4000
+#define UMC_BASE__INST7_SEG2 0x00427400
+#define UMC_BASE__INST7_SEG3 0
+#define UMC_BASE__INST7_SEG4 0
+#define UMC_BASE__INST7_SEG5 0
+
+#define UVD_BASE__INST0_SEG0 0x00007800
+#define UVD_BASE__INST0_SEG1 0x00007E00
+#define UVD_BASE__INST0_SEG2 0x00012180
+#define UVD_BASE__INST0_SEG3 0x00403000
+#define UVD_BASE__INST0_SEG4 0
+#define UVD_BASE__INST0_SEG5 0
+
+#define UVD_BASE__INST1_SEG0 0x00007A00
+#define UVD_BASE__INST1_SEG1 0x00009000
+#define UVD_BASE__INST1_SEG2 0x000136E0
+#define UVD_BASE__INST1_SEG3 0x0042DC00
+#define UVD_BASE__INST1_SEG4 0
+#define UVD_BASE__INST1_SEG5 0
+
+#define UVD_BASE__INST2_SEG0 0
+#define UVD_BASE__INST2_SEG1 0
+#define UVD_BASE__INST2_SEG2 0
+#define UVD_BASE__INST2_SEG3 0
+#define UVD_BASE__INST2_SEG4 0
+#define UVD_BASE__INST2_SEG5 0
+
+#define UVD_BASE__INST3_SEG0 0
+#define UVD_BASE__INST3_SEG1 0
+#define UVD_BASE__INST3_SEG2 0
+#define UVD_BASE__INST3_SEG3 0
+#define UVD_BASE__INST3_SEG4 0
+#define UVD_BASE__INST3_SEG5 0
+
+#define UVD_BASE__INST4_SEG0 0
+#define UVD_BASE__INST4_SEG1 0
+#define UVD_BASE__INST4_SEG2 0
+#define UVD_BASE__INST4_SEG3 0
+#define UVD_BASE__INST4_SEG4 0
+#define UVD_BASE__INST4_SEG5 0
+
+#define UVD_BASE__INST5_SEG0 0
+#define UVD_BASE__INST5_SEG1 0
+#define UVD_BASE__INST5_SEG2 0
+#define UVD_BASE__INST5_SEG3 0
+#define UVD_BASE__INST5_SEG4 0
+#define UVD_BASE__INST5_SEG5 0
+
+#define UVD_BASE__INST6_SEG0 0
+#define UVD_BASE__INST6_SEG1 0
+#define UVD_BASE__INST6_SEG2 0
+#define UVD_BASE__INST6_SEG3 0
+#define UVD_BASE__INST6_SEG4 0
+#define UVD_BASE__INST6_SEG5 0
+
+#define UVD_BASE__INST7_SEG0 0
+#define UVD_BASE__INST7_SEG1 0
+#define UVD_BASE__INST7_SEG2 0
+#define UVD_BASE__INST7_SEG3 0
+#define UVD_BASE__INST7_SEG4 0
+#define UVD_BASE__INST7_SEG5 0
+
+#define DBGU_IO_BASE__INST0_SEG0 0x000001E0
+#define DBGU_IO_BASE__INST0_SEG1 0x000125A0
+#define DBGU_IO_BASE__INST0_SEG2 0x0040B400
+#define DBGU_IO_BASE__INST0_SEG3 0
+#define DBGU_IO_BASE__INST0_SEG4 0
+#define DBGU_IO_BASE__INST0_SEG5 0
+
+#define DBGU_IO_BASE__INST1_SEG0 0
+#define DBGU_IO_BASE__INST1_SEG1 0
+#define DBGU_IO_BASE__INST1_SEG2 0
+#define DBGU_IO_BASE__INST1_SEG3 0
+#define DBGU_IO_BASE__INST1_SEG4 0
+#define DBGU_IO_BASE__INST1_SEG5 0
+
+#define DBGU_IO_BASE__INST2_SEG0 0
+#define DBGU_IO_BASE__INST2_SEG1 0
+#define DBGU_IO_BASE__INST2_SEG2 0
+#define DBGU_IO_BASE__INST2_SEG3 0
+#define DBGU_IO_BASE__INST2_SEG4 0
+#define DBGU_IO_BASE__INST2_SEG5 0
+
+#define DBGU_IO_BASE__INST3_SEG0 0
+#define DBGU_IO_BASE__INST3_SEG1 0
+#define DBGU_IO_BASE__INST3_SEG2 0
+#define DBGU_IO_BASE__INST3_SEG3 0
+#define DBGU_IO_BASE__INST3_SEG4 0
+#define DBGU_IO_BASE__INST3_SEG5 0
+
+#define DBGU_IO_BASE__INST4_SEG0 0
+#define DBGU_IO_BASE__INST4_SEG1 0
+#define DBGU_IO_BASE__INST4_SEG2 0
+#define DBGU_IO_BASE__INST4_SEG3 0
+#define DBGU_IO_BASE__INST4_SEG4 0
+#define DBGU_IO_BASE__INST4_SEG5 0
+
+#define DBGU_IO_BASE__INST5_SEG0 0
+#define DBGU_IO_BASE__INST5_SEG1 0
+#define DBGU_IO_BASE__INST5_SEG2 0
+#define DBGU_IO_BASE__INST5_SEG3 0
+#define DBGU_IO_BASE__INST5_SEG4 0
+#define DBGU_IO_BASE__INST5_SEG5 0
+
+#define DBGU_IO_BASE__INST6_SEG0 0
+#define DBGU_IO_BASE__INST6_SEG1 0
+#define DBGU_IO_BASE__INST6_SEG2 0
+#define DBGU_IO_BASE__INST6_SEG3 0
+#define DBGU_IO_BASE__INST6_SEG4 0
+#define DBGU_IO_BASE__INST6_SEG5 0
+
+#define DBGU_IO_BASE__INST7_SEG0 0
+#define DBGU_IO_BASE__INST7_SEG1 0
+#define DBGU_IO_BASE__INST7_SEG2 0
+#define DBGU_IO_BASE__INST7_SEG3 0
+#define DBGU_IO_BASE__INST7_SEG4 0
+#define DBGU_IO_BASE__INST7_SEG5 0
+
+#define RSMU_BASE__INST0_SEG0 0x00012000
+#define RSMU_BASE__INST0_SEG1 0
+#define RSMU_BASE__INST0_SEG2 0
+#define RSMU_BASE__INST0_SEG3 0
+#define RSMU_BASE__INST0_SEG4 0
+#define RSMU_BASE__INST0_SEG5 0
+
+#define RSMU_BASE__INST1_SEG0 0
+#define RSMU_BASE__INST1_SEG1 0
+#define RSMU_BASE__INST1_SEG2 0
+#define RSMU_BASE__INST1_SEG3 0
+#define RSMU_BASE__INST1_SEG4 0
+#define RSMU_BASE__INST1_SEG5 0
+
+#define RSMU_BASE__INST2_SEG0 0
+#define RSMU_BASE__INST2_SEG1 0
+#define RSMU_BASE__INST2_SEG2 0
+#define RSMU_BASE__INST2_SEG3 0
+#define RSMU_BASE__INST2_SEG4 0
+#define RSMU_BASE__INST2_SEG5 0
+
+#define RSMU_BASE__INST3_SEG0 0
+#define RSMU_BASE__INST3_SEG1 0
+#define RSMU_BASE__INST3_SEG2 0
+#define RSMU_BASE__INST3_SEG3 0
+#define RSMU_BASE__INST3_SEG4 0
+#define RSMU_BASE__INST3_SEG5 0
+
+#define RSMU_BASE__INST4_SEG0 0
+#define RSMU_BASE__INST4_SEG1 0
+#define RSMU_BASE__INST4_SEG2 0
+#define RSMU_BASE__INST4_SEG3 0
+#define RSMU_BASE__INST4_SEG4 0
+#define RSMU_BASE__INST4_SEG5 0
+
+#define RSMU_BASE__INST5_SEG0 0
+#define RSMU_BASE__INST5_SEG1 0
+#define RSMU_BASE__INST5_SEG2 0
+#define RSMU_BASE__INST5_SEG3 0
+#define RSMU_BASE__INST5_SEG4 0
+#define RSMU_BASE__INST5_SEG5 0
+
+#define RSMU_BASE__INST6_SEG0 0
+#define RSMU_BASE__INST6_SEG1 0
+#define RSMU_BASE__INST6_SEG2 0
+#define RSMU_BASE__INST6_SEG3 0
+#define RSMU_BASE__INST6_SEG4 0
+#define RSMU_BASE__INST6_SEG5 0
+
+#define RSMU_BASE__INST7_SEG0 0
+#define RSMU_BASE__INST7_SEG1 0
+#define RSMU_BASE__INST7_SEG2 0
+#define RSMU_BASE__INST7_SEG3 0
+#define RSMU_BASE__INST7_SEG4 0
+#define RSMU_BASE__INST7_SEG5 0
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_d.h b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_d.h
index a761ba07f937..fce965984e76 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_d.h
@@ -27,6 +27,7 @@
#define mmMM_INDEX 0x0
#define mmMM_INDEX_HI 0x6
#define mmMM_DATA 0x1
+#define mmCC_BIF_BX_FUSESTRAP0 0x14D7
#define mmBUS_CNTL 0x1508
#define mmCONFIG_CNTL 0x1509
#define mmCONFIG_MEMSIZE 0x150a
diff --git a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_sh_mask.h
index 8fbfd0261d27..39cc4880beb4 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_sh_mask.h
@@ -32,6 +32,8 @@
#define MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
#define MM_DATA__MM_DATA_MASK 0xffffffff
#define MM_DATA__MM_DATA__SHIFT 0x0
+#define CC_BIF_BX_FUSESTRAP0__STRAP_BIF_PX_CAPABLE_MASK 0x2
+#define CC_BIF_BX_FUSESTRAP0__STRAP_BIF_PX_CAPABLE__SHIFT 0x1
#define BUS_CNTL__BIOS_ROM_WRT_EN_MASK 0x1
#define BUS_CNTL__BIOS_ROM_WRT_EN__SHIFT 0x0
#define BUS_CNTL__BIOS_ROM_DIS_MASK 0x2
diff --git a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h
index 809759f7bb81..8d05d6ca1c8d 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h
@@ -27,6 +27,7 @@
#define mmMM_INDEX 0x0
#define mmMM_INDEX_HI 0x6
#define mmMM_DATA 0x1
+#define mmCC_BIF_BX_FUSESTRAP0 0x14D7
#define mmCC_BIF_BX_STRAP2 0x152A
#define mmBIF_MM_INDACCESS_CNTL 0x1500
#define mmBIF_DOORBELL_APER_EN 0x1501
diff --git a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_sh_mask.h
index adc71b01f793..73435687d049 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_sh_mask.h
@@ -32,6 +32,8 @@
#define MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
#define MM_DATA__MM_DATA_MASK 0xffffffff
#define MM_DATA__MM_DATA__SHIFT 0x0
+#define CC_BIF_BX_FUSESTRAP0__STRAP_BIF_PX_CAPABLE_MASK 0x2
+#define CC_BIF_BX_FUSESTRAP0__STRAP_BIF_PX_CAPABLE__SHIFT 0x1
#define BIF_MM_INDACCESS_CNTL__MM_INDACCESS_DIS_MASK 0x2
#define BIF_MM_INDACCESS_CNTL__MM_INDACCESS_DIS__SHIFT 0x1
#define BIF_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x1
diff --git a/drivers/gpu/drm/amd/include/asic_reg/clk/clk_10_0_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/clk/clk_10_0_2_offset.h
new file mode 100644
index 000000000000..2de450361fb5
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/clk/clk_10_0_2_offset.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _clk_10_0_2_OFFSET_HEADER
+#define _clk_10_0_2_OFFSET_HEADER
+
+
+
+// addressBlock: clk_clk1_0_SmuClkDec
+// base address: 0x5b800
+#define mmCLK1_CLK_PLL_REQ 0x000f
+#define mmCLK1_CLK_PLL_REQ_BASE_IDX 1
+#define mmCLK1_CLK0_BYPASS_CNTL 0x0049
+#define mmCLK1_CLK0_BYPASS_CNTL_BASE_IDX 1
+#define mmCLK1_CLK1_BYPASS_CNTL 0x0053
+#define mmCLK1_CLK1_BYPASS_CNTL_BASE_IDX 1
+#define mmCLK1_CLK2_BYPASS_CNTL 0x005d
+#define mmCLK1_CLK2_BYPASS_CNTL_BASE_IDX 1
+#define mmCLK1_CLK2_STATUS 0x005e
+#define mmCLK1_CLK2_STATUS_BASE_IDX 1
+#define mmCLK1_CLK3_DFS_CNTL 0x005f
+#define mmCLK1_CLK3_DFS_CNTL_BASE_IDX 1
+#define mmCLK1_CLK3_DS_CNTL 0x0060
+#define mmCLK1_CLK3_DS_CNTL_BASE_IDX 1
+#define mmCLK1_CLK3_ALLOW_DS 0x0061
+#define mmCLK1_CLK3_ALLOW_DS_BASE_IDX 1
+#define mmCLK1_CLK3_BYPASS_CNTL 0x0067
+#define mmCLK1_CLK3_BYPASS_CNTL_BASE_IDX 1
+#define mmCLK1_CLK0_CURRENT_CNT 0x008a
+#define mmCLK1_CLK0_CURRENT_CNT_BASE_IDX 1
+#define mmCLK1_CLK1_CURRENT_CNT 0x008b
+#define mmCLK1_CLK1_CURRENT_CNT_BASE_IDX 1
+#define mmCLK1_CLK2_CURRENT_CNT 0x008c
+#define mmCLK1_CLK2_CURRENT_CNT_BASE_IDX 1
+#define mmCLK1_CLK3_CURRENT_CNT 0x008d
+#define mmCLK1_CLK3_CURRENT_CNT_BASE_IDX 1
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/clk/clk_10_0_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/clk/clk_10_0_2_sh_mask.h
new file mode 100644
index 000000000000..c949d0e662db
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/clk/clk_10_0_2_sh_mask.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _clk_10_0_2_SH_MASK_HEADER
+#define _clk_10_0_2_SH_MASK_HEADER
+
+
+// addressBlock: clk_clk1_0_SmuClkDec
+//CLK1_CLK_PLL_REQ
+#define CLK1_CLK_PLL_REQ__FbMult_int__SHIFT 0x0
+#define CLK1_CLK_PLL_REQ__PllSpineDiv__SHIFT 0xc
+#define CLK1_CLK_PLL_REQ__FbMult_frac__SHIFT 0x10
+#define CLK1_CLK_PLL_REQ__FbMult_int_MASK 0x000001FFL
+#define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L
+#define CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L
+//CLK1_CLK0_BYPASS_CNTL
+#define CLK1_CLK0_BYPASS_CNTL__CLK0_BYPASS_SEL__SHIFT 0x0
+#define CLK1_CLK0_BYPASS_CNTL__CLK0_BYPASS_DIV__SHIFT 0x10
+#define CLK1_CLK0_BYPASS_CNTL__CLK0_BYPASS_SEL_MASK 0x00000007L
+#define CLK1_CLK0_BYPASS_CNTL__CLK0_BYPASS_DIV_MASK 0x000F0000L
+//CLK1_CLK1_BYPASS_CNTL
+#define CLK1_CLK1_BYPASS_CNTL__CLK1_BYPASS_SEL__SHIFT 0x0
+#define CLK1_CLK1_BYPASS_CNTL__CLK1_BYPASS_DIV__SHIFT 0x10
+#define CLK1_CLK1_BYPASS_CNTL__CLK1_BYPASS_SEL_MASK 0x00000007L
+#define CLK1_CLK1_BYPASS_CNTL__CLK1_BYPASS_DIV_MASK 0x000F0000L
+//CLK1_CLK2_BYPASS_CNTL
+#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL__SHIFT 0x0
+#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV__SHIFT 0x10
+#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK 0x00000007L
+#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV_MASK 0x000F0000L
+//CLK1_CLK3_DS_CNTL
+#define CLK1_CLK3_DS_CNTL__CLK3_DS_DIV_ID__SHIFT 0x0
+#define CLK1_CLK3_DS_CNTL__CLK3_DS_DIV_ID_MASK 0x00000007L
+//CLK1_CLK3_ALLOW_DS
+#define CLK1_CLK3_ALLOW_DS__CLK3_ALLOW_DS__SHIFT 0x0
+#define CLK1_CLK3_ALLOW_DS__CLK3_ALLOW_DS_MASK 0x00000001L
+//CLK1_CLK3_BYPASS_CNTL
+#define CLK1_CLK3_BYPASS_CNTL__CLK3_BYPASS_SEL__SHIFT 0x0
+#define CLK1_CLK3_BYPASS_CNTL__CLK3_BYPASS_DIV__SHIFT 0x10
+#define CLK1_CLK3_BYPASS_CNTL__CLK3_BYPASS_SEL_MASK 0x00000007L
+#define CLK1_CLK3_BYPASS_CNTL__CLK3_BYPASS_DIV_MASK 0x000F0000L
+//CLK1_CLK0_CURRENT_CNT
+#define CLK1_CLK0_CURRENT_CNT__CURRENT_COUNT__SHIFT 0x0
+#define CLK1_CLK0_CURRENT_CNT__CURRENT_COUNT_MASK 0xFFFFFFFFL
+//CLK1_CLK1_CURRENT_CNT
+#define CLK1_CLK1_CURRENT_CNT__CURRENT_COUNT__SHIFT 0x0
+#define CLK1_CLK1_CURRENT_CNT__CURRENT_COUNT_MASK 0xFFFFFFFFL
+//CLK1_CLK2_CURRENT_CNT
+#define CLK1_CLK2_CURRENT_CNT__CURRENT_COUNT__SHIFT 0x0
+#define CLK1_CLK2_CURRENT_CNT__CURRENT_COUNT_MASK 0xFFFFFFFFL
+//CLK1_CLK3_CURRENT_CNT
+#define CLK1_CLK3_CURRENT_CNT__CURRENT_COUNT__SHIFT 0x0
+#define CLK1_CLK3_CURRENT_CNT__CURRENT_COUNT_MASK 0xFFFFFFFFL
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_0_offset.h
index cff8f91555d3..e9b2bd84cfed 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_0_offset.h
@@ -8134,6 +8134,10 @@
#define mmMPC_OUT5_CSC_C33_C34_B 0x1604
#define mmMPC_OUT5_CSC_C33_C34_B_BASE_IDX 2
+#define mmMPC_OCSC_TEST_DEBUG_INDEX 0x163b
+#define mmMPC_OCSC_TEST_DEBUG_INDEX_BASE_IDX 2
+#define mmMPC_OCSC_TEST_DEBUG_DATA_BASE_IDX 2
+#define mmMPC_OCSC_TEST_DEBUG_DATA 0x163c
// addressBlock: dce_dc_mpc_mpc_dcperfmon_dc_perfmon_dispdec
// base address: 0x5964
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_0_sh_mask.h
index 10c83fecd147..dc8ce7aaa0cf 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_0_sh_mask.h
@@ -28263,7 +28263,14 @@
#define MPC_OUT5_CSC_C33_C34_B__MPC_OCSC_C34_B__SHIFT 0x10
#define MPC_OUT5_CSC_C33_C34_B__MPC_OCSC_C33_B_MASK 0x0000FFFFL
#define MPC_OUT5_CSC_C33_C34_B__MPC_OCSC_C34_B_MASK 0xFFFF0000L
-
+//MPC_OCSC_TEST_DEBUG_INDEX
+#define MPC_OCSC_TEST_DEBUG_INDEX__MPC_OCSC_TEST_DEBUG_INDEX__SHIFT 0x0
+#define MPC_OCSC_TEST_DEBUG_INDEX__MPC_OCSC_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define MPC_OCSC_TEST_DEBUG_INDEX__MPC_OCSC_TEST_DEBUG_INDEX_MASK 0x000000FFL
+#define MPC_OCSC_TEST_DEBUG_INDEX__MPC_OCSC_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+//MPC_OCSC_TEST_DEBUG_DATA
+#define MPC_OCSC_TEST_DEBUG_DATA__MPC_OCSC_TEST_DEBUG_DATA__SHIFT 0x0
+#define MPC_OCSC_TEST_DEBUG_DATA__MPC_OCSC_TEST_DEBUG_DATA_MASK 0xFFFFFFFFL
// addressBlock: dce_dc_mpc_mpc_dcperfmon_dc_perfmon_dispdec
//DC_PERFMON17_PERFCOUNTER_CNTL
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_offset.h
new file mode 100644
index 000000000000..7cd0ee61c030
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_offset.h
@@ -0,0 +1,13875 @@
+/*
+ * Copyright (C) 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _dcn_2_1_0_OFFSET_HEADER
+#define _dcn_2_1_0_OFFSET_HEADER
+
+
+
+// addressBlock: dce_dc_mmhubbub_vga_dispdec[72..76]
+// base address: 0x48
+#define mmVGA_MEM_WRITE_PAGE_ADDR 0x0000
+#define mmVGA_MEM_WRITE_PAGE_ADDR_BASE_IDX 0
+#define mmVGA_MEM_READ_PAGE_ADDR 0x0001
+#define mmVGA_MEM_READ_PAGE_ADDR_BASE_IDX 0
+
+
+// addressBlock: dce_dc_mmhubbub_vga_dispdec[948..986]
+// base address: 0x3b4
+#define mmCRTC8_IDX 0x002d
+#define mmCRTC8_IDX_BASE_IDX 1
+#define mmCRTC8_DATA 0x002d
+#define mmCRTC8_DATA_BASE_IDX 1
+#define mmGENFC_WT 0x002e
+#define mmGENFC_WT_BASE_IDX 1
+#define mmGENS1 0x002e
+#define mmGENS1_BASE_IDX 1
+#define mmATTRDW 0x0030
+#define mmATTRDW_BASE_IDX 1
+#define mmATTRX 0x0030
+#define mmATTRX_BASE_IDX 1
+#define mmATTRDR 0x0030
+#define mmATTRDR_BASE_IDX 1
+#define mmGENMO_WT 0x0030
+#define mmGENMO_WT_BASE_IDX 1
+#define mmGENS0 0x0030
+#define mmGENS0_BASE_IDX 1
+#define mmGENENB 0x0030
+#define mmGENENB_BASE_IDX 1
+#define mmSEQ8_IDX 0x0031
+#define mmSEQ8_IDX_BASE_IDX 1
+#define mmSEQ8_DATA 0x0031
+#define mmSEQ8_DATA_BASE_IDX 1
+#define mmDAC_MASK 0x0031
+#define mmDAC_MASK_BASE_IDX 1
+#define mmDAC_R_INDEX 0x0031
+#define mmDAC_R_INDEX_BASE_IDX 1
+#define mmDAC_W_INDEX 0x0032
+#define mmDAC_W_INDEX_BASE_IDX 1
+#define mmDAC_DATA 0x0032
+#define mmDAC_DATA_BASE_IDX 1
+#define mmGENFC_RD 0x0032
+#define mmGENFC_RD_BASE_IDX 1
+#define mmGENMO_RD 0x0033
+#define mmGENMO_RD_BASE_IDX 1
+#define mmGRPH8_IDX 0x0033
+#define mmGRPH8_IDX_BASE_IDX 1
+#define mmGRPH8_DATA 0x0033
+#define mmGRPH8_DATA_BASE_IDX 1
+#define mmCRTC8_IDX_1 0x0035
+#define mmCRTC8_IDX_1_BASE_IDX 1
+#define mmCRTC8_DATA_1 0x0035
+#define mmCRTC8_DATA_1_BASE_IDX 1
+#define mmGENFC_WT_1 0x0036
+#define mmGENFC_WT_1_BASE_IDX 1
+#define mmGENS1_1 0x0036
+#define mmGENS1_1_BASE_IDX 1
+
+
+// addressBlock: dce_dc_mmhubbub_vga_dispdec
+// base address: 0x0
+#define mmVGA_RENDER_CONTROL 0x0000
+#define mmVGA_RENDER_CONTROL_BASE_IDX 1
+#define mmVGA_SEQUENCER_RESET_CONTROL 0x0001
+#define mmVGA_SEQUENCER_RESET_CONTROL_BASE_IDX 1
+#define mmVGA_MODE_CONTROL 0x0002
+#define mmVGA_MODE_CONTROL_BASE_IDX 1
+#define mmVGA_SURFACE_PITCH_SELECT 0x0003
+#define mmVGA_SURFACE_PITCH_SELECT_BASE_IDX 1
+#define mmVGA_MEMORY_BASE_ADDRESS 0x0004
+#define mmVGA_MEMORY_BASE_ADDRESS_BASE_IDX 1
+#define mmVGA_TEST_DEBUG_INDEX 0x0005
+#define mmVGA_TEST_DEBUG_INDEX_BASE_IDX 1
+#define mmVGA_DISPBUF1_SURFACE_ADDR 0x0006
+#define mmVGA_DISPBUF1_SURFACE_ADDR_BASE_IDX 1
+#define mmVGA_TEST_DEBUG_DATA 0x0007
+#define mmVGA_TEST_DEBUG_DATA_BASE_IDX 1
+#define mmVGA_DISPBUF2_SURFACE_ADDR 0x0008
+#define mmVGA_DISPBUF2_SURFACE_ADDR_BASE_IDX 1
+#define mmVGA_MEMORY_BASE_ADDRESS_HIGH 0x0009
+#define mmVGA_MEMORY_BASE_ADDRESS_HIGH_BASE_IDX 1
+#define mmVGA_HDP_CONTROL 0x000a
+#define mmVGA_HDP_CONTROL_BASE_IDX 1
+#define mmVGA_CACHE_CONTROL 0x000b
+#define mmVGA_CACHE_CONTROL_BASE_IDX 1
+#define mmD1VGA_CONTROL 0x000c
+#define mmD1VGA_CONTROL_BASE_IDX 1
+#define mmVGA_SECURITY_LEVEL 0x000d
+#define mmVGA_SECURITY_LEVEL_BASE_IDX 1
+#define mmD2VGA_CONTROL 0x000e
+#define mmD2VGA_CONTROL_BASE_IDX 1
+#define mmVGA_HW_DEBUG 0x000f
+#define mmVGA_HW_DEBUG_BASE_IDX 1
+#define mmVGA_STATUS 0x0010
+#define mmVGA_STATUS_BASE_IDX 1
+#define mmVGA_INTERRUPT_CONTROL 0x0011
+#define mmVGA_INTERRUPT_CONTROL_BASE_IDX 1
+#define mmVGA_STATUS_CLEAR 0x0012
+#define mmVGA_STATUS_CLEAR_BASE_IDX 1
+#define mmVGA_INTERRUPT_STATUS 0x0013
+#define mmVGA_INTERRUPT_STATUS_BASE_IDX 1
+#define mmVGA_MAIN_CONTROL 0x0014
+#define mmVGA_MAIN_CONTROL_BASE_IDX 1
+#define mmVGA_TEST_CONTROL 0x0015
+#define mmVGA_TEST_CONTROL_BASE_IDX 1
+#define mmVGA_DEBUG_READBACK_INDEX 0x0016
+#define mmVGA_DEBUG_READBACK_INDEX_BASE_IDX 1
+#define mmVGA_DEBUG_READBACK_DATA 0x0017
+#define mmVGA_DEBUG_READBACK_DATA_BASE_IDX 1
+#define mmVGA_QOS_CTRL 0x0018
+#define mmVGA_QOS_CTRL_BASE_IDX 1
+#define mmD3VGA_CONTROL 0x0038
+#define mmD3VGA_CONTROL_BASE_IDX 1
+#define mmD4VGA_CONTROL 0x0039
+#define mmD4VGA_CONTROL_BASE_IDX 1
+#define mmD5VGA_CONTROL 0x003a
+#define mmD5VGA_CONTROL_BASE_IDX 1
+#define mmD6VGA_CONTROL 0x003b
+#define mmD6VGA_CONTROL_BASE_IDX 1
+#define mmVGA_SOURCE_SELECT 0x003c
+#define mmVGA_SOURCE_SELECT_BASE_IDX 1
+
+
+// addressBlock: dce_dc_dccg_dccg_dispdec
+// base address: 0x0
+#define mmPHYPLLA_PIXCLK_RESYNC_CNTL 0x0040
+#define mmPHYPLLA_PIXCLK_RESYNC_CNTL_BASE_IDX 1
+#define mmPHYPLLB_PIXCLK_RESYNC_CNTL 0x0041
+#define mmPHYPLLB_PIXCLK_RESYNC_CNTL_BASE_IDX 1
+#define mmPHYPLLC_PIXCLK_RESYNC_CNTL 0x0042
+#define mmPHYPLLC_PIXCLK_RESYNC_CNTL_BASE_IDX 1
+#define mmPHYPLLD_PIXCLK_RESYNC_CNTL 0x0043
+#define mmPHYPLLD_PIXCLK_RESYNC_CNTL_BASE_IDX 1
+#define mmDP_DTO_DBUF_EN 0x0044
+#define mmDP_DTO_DBUF_EN_BASE_IDX 1
+#define mmDPREFCLK_CGTT_BLK_CTRL_REG 0x0048
+#define mmDPREFCLK_CGTT_BLK_CTRL_REG_BASE_IDX 1
+#define mmREFCLK_CNTL 0x0049
+#define mmREFCLK_CNTL_BASE_IDX 1
+#define mmREFCLK_CGTT_BLK_CTRL_REG 0x004b
+#define mmREFCLK_CGTT_BLK_CTRL_REG_BASE_IDX 1
+#define mmPHYPLLE_PIXCLK_RESYNC_CNTL 0x004c
+#define mmPHYPLLE_PIXCLK_RESYNC_CNTL_BASE_IDX 1
+#define mmDCCG_PERFMON_CNTL2 0x004e
+#define mmDCCG_PERFMON_CNTL2_BASE_IDX 1
+#define mmDCCG_DS_DTO_INCR 0x0053
+#define mmDCCG_DS_DTO_INCR_BASE_IDX 1
+#define mmDCCG_DS_DTO_MODULO 0x0054
+#define mmDCCG_DS_DTO_MODULO_BASE_IDX 1
+#define mmDCCG_DS_CNTL 0x0055
+#define mmDCCG_DS_CNTL_BASE_IDX 1
+#define mmDCCG_DS_HW_CAL_INTERVAL 0x0056
+#define mmDCCG_DS_HW_CAL_INTERVAL_BASE_IDX 1
+#define mmDPREFCLK_CNTL 0x0058
+#define mmDPREFCLK_CNTL_BASE_IDX 1
+#define mmDCE_VERSION 0x005e
+#define mmDCE_VERSION_BASE_IDX 1
+#define mmDCCG_GTC_CNTL 0x0060
+#define mmDCCG_GTC_CNTL_BASE_IDX 1
+#define mmDCCG_GTC_DTO_INCR 0x0061
+#define mmDCCG_GTC_DTO_INCR_BASE_IDX 1
+#define mmDCCG_GTC_DTO_MODULO 0x0062
+#define mmDCCG_GTC_DTO_MODULO_BASE_IDX 1
+#define mmDCCG_GTC_CURRENT 0x0063
+#define mmDCCG_GTC_CURRENT_BASE_IDX 1
+#define mmDSCCLK0_DTO_PARAM 0x006c
+#define mmDSCCLK0_DTO_PARAM_BASE_IDX 1
+#define mmDSCCLK1_DTO_PARAM 0x006d
+#define mmDSCCLK1_DTO_PARAM_BASE_IDX 1
+#define mmDSCCLK2_DTO_PARAM 0x006e
+#define mmDSCCLK2_DTO_PARAM_BASE_IDX 1
+#define mmMILLISECOND_TIME_BASE_DIV 0x0070
+#define mmMILLISECOND_TIME_BASE_DIV_BASE_IDX 1
+#define mmDISPCLK_FREQ_CHANGE_CNTL 0x0071
+#define mmDISPCLK_FREQ_CHANGE_CNTL_BASE_IDX 1
+#define mmDC_MEM_GLOBAL_PWR_REQ_CNTL 0x0072
+#define mmDC_MEM_GLOBAL_PWR_REQ_CNTL_BASE_IDX 1
+#define mmDCCG_PERFMON_CNTL 0x0073
+#define mmDCCG_PERFMON_CNTL_BASE_IDX 1
+#define mmDCCG_GATE_DISABLE_CNTL 0x0074
+#define mmDCCG_GATE_DISABLE_CNTL_BASE_IDX 1
+#define mmDISPCLK_CGTT_BLK_CTRL_REG 0x0075
+#define mmDISPCLK_CGTT_BLK_CTRL_REG_BASE_IDX 1
+#define mmSOCCLK_CGTT_BLK_CTRL_REG 0x0076
+#define mmSOCCLK_CGTT_BLK_CTRL_REG_BASE_IDX 1
+#define mmDCCG_CAC_STATUS 0x0077
+#define mmDCCG_CAC_STATUS_BASE_IDX 1
+#define mmMICROSECOND_TIME_BASE_DIV 0x007b
+#define mmMICROSECOND_TIME_BASE_DIV_BASE_IDX 1
+#define mmDCCG_GATE_DISABLE_CNTL2 0x007c
+#define mmDCCG_GATE_DISABLE_CNTL2_BASE_IDX 1
+#define mmSYMCLK_CGTT_BLK_CTRL_REG 0x007d
+#define mmSYMCLK_CGTT_BLK_CTRL_REG_BASE_IDX 1
+#define mmDCCG_DISP_CNTL_REG 0x007f
+#define mmDCCG_DISP_CNTL_REG_BASE_IDX 1
+#define mmOTG0_PIXEL_RATE_CNTL 0x0080
+#define mmOTG0_PIXEL_RATE_CNTL_BASE_IDX 1
+#define mmDP_DTO0_PHASE 0x0081
+#define mmDP_DTO0_PHASE_BASE_IDX 1
+#define mmDP_DTO0_MODULO 0x0082
+#define mmDP_DTO0_MODULO_BASE_IDX 1
+#define mmOTG0_PHYPLL_PIXEL_RATE_CNTL 0x0083
+#define mmOTG0_PHYPLL_PIXEL_RATE_CNTL_BASE_IDX 1
+#define mmOTG1_PIXEL_RATE_CNTL 0x0084
+#define mmOTG1_PIXEL_RATE_CNTL_BASE_IDX 1
+#define mmDP_DTO1_PHASE 0x0085
+#define mmDP_DTO1_PHASE_BASE_IDX 1
+#define mmDP_DTO1_MODULO 0x0086
+#define mmDP_DTO1_MODULO_BASE_IDX 1
+#define mmOTG1_PHYPLL_PIXEL_RATE_CNTL 0x0087
+#define mmOTG1_PHYPLL_PIXEL_RATE_CNTL_BASE_IDX 1
+#define mmOTG2_PIXEL_RATE_CNTL 0x0088
+#define mmOTG2_PIXEL_RATE_CNTL_BASE_IDX 1
+#define mmDP_DTO2_PHASE 0x0089
+#define mmDP_DTO2_PHASE_BASE_IDX 1
+#define mmDP_DTO2_MODULO 0x008a
+#define mmDP_DTO2_MODULO_BASE_IDX 1
+#define mmOTG2_PHYPLL_PIXEL_RATE_CNTL 0x008b
+#define mmOTG2_PHYPLL_PIXEL_RATE_CNTL_BASE_IDX 1
+#define mmOTG3_PIXEL_RATE_CNTL 0x008c
+#define mmOTG3_PIXEL_RATE_CNTL_BASE_IDX 1
+#define mmDP_DTO3_PHASE 0x008d
+#define mmDP_DTO3_PHASE_BASE_IDX 1
+#define mmDP_DTO3_MODULO 0x008e
+#define mmDP_DTO3_MODULO_BASE_IDX 1
+#define mmOTG3_PHYPLL_PIXEL_RATE_CNTL 0x008f
+#define mmOTG3_PHYPLL_PIXEL_RATE_CNTL_BASE_IDX 1
+#define mmDPPCLK_CGTT_BLK_CTRL_REG 0x0098
+#define mmDPPCLK_CGTT_BLK_CTRL_REG_BASE_IDX 1
+#define mmDPPCLK0_DTO_PARAM 0x0099
+#define mmDPPCLK0_DTO_PARAM_BASE_IDX 1
+#define mmDPPCLK1_DTO_PARAM 0x009a
+#define mmDPPCLK1_DTO_PARAM_BASE_IDX 1
+#define mmDPPCLK2_DTO_PARAM 0x009b
+#define mmDPPCLK2_DTO_PARAM_BASE_IDX 1
+#define mmDPPCLK3_DTO_PARAM 0x009c
+#define mmDPPCLK3_DTO_PARAM_BASE_IDX 1
+#define mmDCCG_CAC_STATUS2 0x009f
+#define mmDCCG_CAC_STATUS2_BASE_IDX 1
+#define mmSYMCLKA_CLOCK_ENABLE 0x00a0
+#define mmSYMCLKA_CLOCK_ENABLE_BASE_IDX 1
+#define mmSYMCLKB_CLOCK_ENABLE 0x00a1
+#define mmSYMCLKB_CLOCK_ENABLE_BASE_IDX 1
+#define mmSYMCLKC_CLOCK_ENABLE 0x00a2
+#define mmSYMCLKC_CLOCK_ENABLE_BASE_IDX 1
+#define mmSYMCLKD_CLOCK_ENABLE 0x00a3
+#define mmSYMCLKD_CLOCK_ENABLE_BASE_IDX 1
+#define mmSYMCLKE_CLOCK_ENABLE 0x00a4
+#define mmSYMCLKE_CLOCK_ENABLE_BASE_IDX 1
+#define mmDCCG_SOFT_RESET 0x00a6
+#define mmDCCG_SOFT_RESET_BASE_IDX 1
+#define mmDSCCLK_DTO_CTRL 0x00a7
+#define mmDSCCLK_DTO_CTRL_BASE_IDX 1
+#define mmDCCG_AUDIO_DTO_SOURCE 0x00ab
+#define mmDCCG_AUDIO_DTO_SOURCE_BASE_IDX 1
+#define mmDCCG_AUDIO_DTO0_PHASE 0x00ac
+#define mmDCCG_AUDIO_DTO0_PHASE_BASE_IDX 1
+#define mmDCCG_AUDIO_DTO0_MODULE 0x00ad
+#define mmDCCG_AUDIO_DTO0_MODULE_BASE_IDX 1
+#define mmDCCG_AUDIO_DTO1_PHASE 0x00ae
+#define mmDCCG_AUDIO_DTO1_PHASE_BASE_IDX 1
+#define mmDCCG_AUDIO_DTO1_MODULE 0x00af
+#define mmDCCG_AUDIO_DTO1_MODULE_BASE_IDX 1
+#define mmDCCG_VSYNC_OTG0_LATCH_VALUE 0x00b0
+#define mmDCCG_VSYNC_OTG0_LATCH_VALUE_BASE_IDX 1
+#define mmDCCG_VSYNC_OTG1_LATCH_VALUE 0x00b1
+#define mmDCCG_VSYNC_OTG1_LATCH_VALUE_BASE_IDX 1
+#define mmDCCG_VSYNC_OTG2_LATCH_VALUE 0x00b2
+#define mmDCCG_VSYNC_OTG2_LATCH_VALUE_BASE_IDX 1
+#define mmDCCG_VSYNC_OTG3_LATCH_VALUE 0x00b3
+#define mmDCCG_VSYNC_OTG3_LATCH_VALUE_BASE_IDX 1
+#define mmDCCG_VSYNC_OTG4_LATCH_VALUE 0x00b4
+#define mmDCCG_VSYNC_OTG4_LATCH_VALUE_BASE_IDX 1
+#define mmDCCG_VSYNC_OTG5_LATCH_VALUE 0x00b5
+#define mmDCCG_VSYNC_OTG5_LATCH_VALUE_BASE_IDX 1
+#define mmDPPCLK_DTO_CTRL 0x00b6
+#define mmDPPCLK_DTO_CTRL_BASE_IDX 1
+#define mmDCCG_VSYNC_CNT_CTRL 0x00b8
+#define mmDCCG_VSYNC_CNT_CTRL_BASE_IDX 1
+#define mmDCCG_VSYNC_CNT_INT_CTRL 0x00b9
+#define mmDCCG_VSYNC_CNT_INT_CTRL_BASE_IDX 1
+#define mmFORCE_SYMCLK_DISABLE 0x00ba
+#define mmFORCE_SYMCLK_DISABLE_BASE_IDX 1
+#define mmDCCG_TEST_CLK_SEL 0x00be
+#define mmDCCG_TEST_CLK_SEL_BASE_IDX 1
+
+
+// addressBlock: dce_dc_dccg_dccg_dfs_dispdec
+// base address: 0x0
+#define mmDENTIST_DISPCLK_CNTL 0x0064
+#define mmDENTIST_DISPCLK_CNTL_BASE_IDX 1
+
+
+// addressBlock: dce_dc_dccg_dccg_dcperfmon0_dc_perfmon_dispdec
+// base address: 0x0
+#define mmDC_PERFMON0_PERFCOUNTER_CNTL 0x0000
+#define mmDC_PERFMON0_PERFCOUNTER_CNTL_BASE_IDX 2
+#define mmDC_PERFMON0_PERFCOUNTER_CNTL2 0x0001
+#define mmDC_PERFMON0_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define mmDC_PERFMON0_PERFCOUNTER_STATE 0x0002
+#define mmDC_PERFMON0_PERFCOUNTER_STATE_BASE_IDX 2
+#define mmDC_PERFMON0_PERFMON_CNTL 0x0003
+#define mmDC_PERFMON0_PERFMON_CNTL_BASE_IDX 2
+#define mmDC_PERFMON0_PERFMON_CNTL2 0x0004
+#define mmDC_PERFMON0_PERFMON_CNTL2_BASE_IDX 2
+#define mmDC_PERFMON0_PERFMON_CVALUE_INT_MISC 0x0005
+#define mmDC_PERFMON0_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define mmDC_PERFMON0_PERFMON_CVALUE_LOW 0x0006
+#define mmDC_PERFMON0_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define mmDC_PERFMON0_PERFMON_HI 0x0007
+#define mmDC_PERFMON0_PERFMON_HI_BASE_IDX 2
+#define mmDC_PERFMON0_PERFMON_LOW 0x0008
+#define mmDC_PERFMON0_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dccg_dccg_dcperfmon1_dc_perfmon_dispdec
+// base address: 0x30
+#define mmDC_PERFMON1_PERFCOUNTER_CNTL 0x000c
+#define mmDC_PERFMON1_PERFCOUNTER_CNTL_BASE_IDX 2
+#define mmDC_PERFMON1_PERFCOUNTER_CNTL2 0x000d
+#define mmDC_PERFMON1_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define mmDC_PERFMON1_PERFCOUNTER_STATE 0x000e
+#define mmDC_PERFMON1_PERFCOUNTER_STATE_BASE_IDX 2
+#define mmDC_PERFMON1_PERFMON_CNTL 0x000f
+#define mmDC_PERFMON1_PERFMON_CNTL_BASE_IDX 2
+#define mmDC_PERFMON1_PERFMON_CNTL2 0x0010
+#define mmDC_PERFMON1_PERFMON_CNTL2_BASE_IDX 2
+#define mmDC_PERFMON1_PERFMON_CVALUE_INT_MISC 0x0011
+#define mmDC_PERFMON1_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define mmDC_PERFMON1_PERFMON_CVALUE_LOW 0x0012
+#define mmDC_PERFMON1_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define mmDC_PERFMON1_PERFMON_HI 0x0013
+#define mmDC_PERFMON1_PERFMON_HI_BASE_IDX 2
+#define mmDC_PERFMON1_PERFMON_LOW 0x0014
+#define mmDC_PERFMON1_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dccg_dccg_pll_dispdec
+// base address: 0x0
+#define mmPLL_MACRO_CNTL_RESERVED0 0x0018
+#define mmPLL_MACRO_CNTL_RESERVED0_BASE_IDX 2
+#define mmPLL_MACRO_CNTL_RESERVED1 0x0019
+#define mmPLL_MACRO_CNTL_RESERVED1_BASE_IDX 2
+#define mmPLL_MACRO_CNTL_RESERVED2 0x001a
+#define mmPLL_MACRO_CNTL_RESERVED2_BASE_IDX 2
+#define mmPLL_MACRO_CNTL_RESERVED3 0x001b
+#define mmPLL_MACRO_CNTL_RESERVED3_BASE_IDX 2
+#define mmPLL_MACRO_CNTL_RESERVED4 0x001c
+#define mmPLL_MACRO_CNTL_RESERVED4_BASE_IDX 2
+#define mmPLL_MACRO_CNTL_RESERVED5 0x001d
+#define mmPLL_MACRO_CNTL_RESERVED5_BASE_IDX 2
+#define mmPLL_MACRO_CNTL_RESERVED6 0x001e
+#define mmPLL_MACRO_CNTL_RESERVED6_BASE_IDX 2
+#define mmPLL_MACRO_CNTL_RESERVED7 0x001f
+#define mmPLL_MACRO_CNTL_RESERVED7_BASE_IDX 2
+#define mmPLL_MACRO_CNTL_RESERVED8 0x0020
+#define mmPLL_MACRO_CNTL_RESERVED8_BASE_IDX 2
+#define mmPLL_MACRO_CNTL_RESERVED9 0x0021
+#define mmPLL_MACRO_CNTL_RESERVED9_BASE_IDX 2
+#define mmPLL_MACRO_CNTL_RESERVED10 0x0022
+#define mmPLL_MACRO_CNTL_RESERVED10_BASE_IDX 2
+#define mmPLL_MACRO_CNTL_RESERVED11 0x0023
+#define mmPLL_MACRO_CNTL_RESERVED11_BASE_IDX 2
+#define mmPLL_MACRO_CNTL_RESERVED12 0x0024
+#define mmPLL_MACRO_CNTL_RESERVED12_BASE_IDX 2
+#define mmPLL_MACRO_CNTL_RESERVED13 0x0025
+#define mmPLL_MACRO_CNTL_RESERVED13_BASE_IDX 2
+#define mmPLL_MACRO_CNTL_RESERVED14 0x0026
+#define mmPLL_MACRO_CNTL_RESERVED14_BASE_IDX 2
+#define mmPLL_MACRO_CNTL_RESERVED15 0x0027
+#define mmPLL_MACRO_CNTL_RESERVED15_BASE_IDX 2
+#define mmPLL_MACRO_CNTL_RESERVED16 0x0028
+#define mmPLL_MACRO_CNTL_RESERVED16_BASE_IDX 2
+#define mmPLL_MACRO_CNTL_RESERVED17 0x0029
+#define mmPLL_MACRO_CNTL_RESERVED17_BASE_IDX 2
+#define mmPLL_MACRO_CNTL_RESERVED18 0x002a
+#define mmPLL_MACRO_CNTL_RESERVED18_BASE_IDX 2
+#define mmPLL_MACRO_CNTL_RESERVED19 0x002b
+#define mmPLL_MACRO_CNTL_RESERVED19_BASE_IDX 2
+#define mmPLL_MACRO_CNTL_RESERVED20 0x002c
+#define mmPLL_MACRO_CNTL_RESERVED20_BASE_IDX 2
+#define mmPLL_MACRO_CNTL_RESERVED21 0x002d
+#define mmPLL_MACRO_CNTL_RESERVED21_BASE_IDX 2
+#define mmPLL_MACRO_CNTL_RESERVED22 0x002e
+#define mmPLL_MACRO_CNTL_RESERVED22_BASE_IDX 2
+#define mmPLL_MACRO_CNTL_RESERVED23 0x002f
+#define mmPLL_MACRO_CNTL_RESERVED23_BASE_IDX 2
+#define mmPLL_MACRO_CNTL_RESERVED24 0x0030
+#define mmPLL_MACRO_CNTL_RESERVED24_BASE_IDX 2
+#define mmPLL_MACRO_CNTL_RESERVED25 0x0031
+#define mmPLL_MACRO_CNTL_RESERVED25_BASE_IDX 2
+#define mmPLL_MACRO_CNTL_RESERVED26 0x0032
+#define mmPLL_MACRO_CNTL_RESERVED26_BASE_IDX 2
+#define mmPLL_MACRO_CNTL_RESERVED27 0x0033
+#define mmPLL_MACRO_CNTL_RESERVED27_BASE_IDX 2
+#define mmPLL_MACRO_CNTL_RESERVED28 0x0034
+#define mmPLL_MACRO_CNTL_RESERVED28_BASE_IDX 2
+#define mmPLL_MACRO_CNTL_RESERVED29 0x0035
+#define mmPLL_MACRO_CNTL_RESERVED29_BASE_IDX 2
+#define mmPLL_MACRO_CNTL_RESERVED30 0x0036
+#define mmPLL_MACRO_CNTL_RESERVED30_BASE_IDX 2
+#define mmPLL_MACRO_CNTL_RESERVED31 0x0037
+#define mmPLL_MACRO_CNTL_RESERVED31_BASE_IDX 2
+#define mmPLL_MACRO_CNTL_RESERVED32 0x0038
+#define mmPLL_MACRO_CNTL_RESERVED32_BASE_IDX 2
+#define mmPLL_MACRO_CNTL_RESERVED33 0x0039
+#define mmPLL_MACRO_CNTL_RESERVED33_BASE_IDX 2
+#define mmPLL_MACRO_CNTL_RESERVED34 0x003a
+#define mmPLL_MACRO_CNTL_RESERVED34_BASE_IDX 2
+#define mmPLL_MACRO_CNTL_RESERVED35 0x003b
+#define mmPLL_MACRO_CNTL_RESERVED35_BASE_IDX 2
+#define mmPLL_MACRO_CNTL_RESERVED36 0x003c
+#define mmPLL_MACRO_CNTL_RESERVED36_BASE_IDX 2
+#define mmPLL_MACRO_CNTL_RESERVED37 0x003d
+#define mmPLL_MACRO_CNTL_RESERVED37_BASE_IDX 2
+#define mmPLL_MACRO_CNTL_RESERVED38 0x003e
+#define mmPLL_MACRO_CNTL_RESERVED38_BASE_IDX 2
+#define mmPLL_MACRO_CNTL_RESERVED39 0x003f
+#define mmPLL_MACRO_CNTL_RESERVED39_BASE_IDX 2
+#define mmPLL_MACRO_CNTL_RESERVED40 0x0040
+#define mmPLL_MACRO_CNTL_RESERVED40_BASE_IDX 2
+#define mmPLL_MACRO_CNTL_RESERVED41 0x0041
+#define mmPLL_MACRO_CNTL_RESERVED41_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dmu_rbbmif_dispdec
+// base address: 0x0
+#define mmRBBMIF_TIMEOUT 0x005b
+#define mmRBBMIF_TIMEOUT_BASE_IDX 2
+#define mmRBBMIF_STATUS 0x005c
+#define mmRBBMIF_STATUS_BASE_IDX 2
+#define mmRBBMIF_STATUS_2 0x005d
+#define mmRBBMIF_STATUS_2_BASE_IDX 2
+#define mmRBBMIF_INT_STATUS 0x005e
+#define mmRBBMIF_INT_STATUS_BASE_IDX 2
+#define mmRBBMIF_TIMEOUT_DIS 0x005f
+#define mmRBBMIF_TIMEOUT_DIS_BASE_IDX 2
+#define mmRBBMIF_TIMEOUT_DIS_2 0x0060
+#define mmRBBMIF_TIMEOUT_DIS_2_BASE_IDX 2
+#define mmRBBMIF_STATUS_FLAG 0x0061
+#define mmRBBMIF_STATUS_FLAG_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dmu_dc_pg_dispdec
+// base address: 0x0
+#define mmDOMAIN0_PG_CONFIG 0x0080
+#define mmDOMAIN0_PG_CONFIG_BASE_IDX 2
+#define mmDOMAIN0_PG_STATUS 0x0081
+#define mmDOMAIN0_PG_STATUS_BASE_IDX 2
+#define mmDOMAIN1_PG_CONFIG 0x0082
+#define mmDOMAIN1_PG_CONFIG_BASE_IDX 2
+#define mmDOMAIN1_PG_STATUS 0x0083
+#define mmDOMAIN1_PG_STATUS_BASE_IDX 2
+#define mmDOMAIN2_PG_CONFIG 0x0084
+#define mmDOMAIN2_PG_CONFIG_BASE_IDX 2
+#define mmDOMAIN2_PG_STATUS 0x0085
+#define mmDOMAIN2_PG_STATUS_BASE_IDX 2
+#define mmDOMAIN3_PG_CONFIG 0x0086
+#define mmDOMAIN3_PG_CONFIG_BASE_IDX 2
+#define mmDOMAIN3_PG_STATUS 0x0087
+#define mmDOMAIN3_PG_STATUS_BASE_IDX 2
+#define mmDOMAIN4_PG_CONFIG 0x0088
+#define mmDOMAIN4_PG_CONFIG_BASE_IDX 2
+#define mmDOMAIN4_PG_STATUS 0x0089
+#define mmDOMAIN4_PG_STATUS_BASE_IDX 2
+#define mmDOMAIN5_PG_CONFIG 0x008a
+#define mmDOMAIN5_PG_CONFIG_BASE_IDX 2
+#define mmDOMAIN5_PG_STATUS 0x008b
+#define mmDOMAIN5_PG_STATUS_BASE_IDX 2
+#define mmDOMAIN6_PG_CONFIG 0x008c
+#define mmDOMAIN6_PG_CONFIG_BASE_IDX 2
+#define mmDOMAIN6_PG_STATUS 0x008d
+#define mmDOMAIN6_PG_STATUS_BASE_IDX 2
+#define mmDOMAIN7_PG_CONFIG 0x008e
+#define mmDOMAIN7_PG_CONFIG_BASE_IDX 2
+#define mmDOMAIN7_PG_STATUS 0x008f
+#define mmDOMAIN7_PG_STATUS_BASE_IDX 2
+#define mmDOMAIN16_PG_CONFIG 0x00a1
+#define mmDOMAIN16_PG_CONFIG_BASE_IDX 2
+#define mmDOMAIN16_PG_STATUS 0x00a2
+#define mmDOMAIN16_PG_STATUS_BASE_IDX 2
+#define mmDOMAIN17_PG_CONFIG 0x00a3
+#define mmDOMAIN17_PG_CONFIG_BASE_IDX 2
+#define mmDOMAIN17_PG_STATUS 0x00a4
+#define mmDOMAIN17_PG_STATUS_BASE_IDX 2
+#define mmDOMAIN18_PG_CONFIG 0x00a5
+#define mmDOMAIN18_PG_CONFIG_BASE_IDX 2
+#define mmDOMAIN18_PG_STATUS 0x00a6
+#define mmDOMAIN18_PG_STATUS_BASE_IDX 2
+#define mmDCPG_INTERRUPT_STATUS 0x00ad
+#define mmDCPG_INTERRUPT_STATUS_BASE_IDX 2
+#define mmDCPG_INTERRUPT_STATUS_2 0x00ae
+#define mmDCPG_INTERRUPT_STATUS_2_BASE_IDX 2
+#define mmDCPG_INTERRUPT_CONTROL_1 0x00af
+#define mmDCPG_INTERRUPT_CONTROL_1_BASE_IDX 2
+#define mmDCPG_INTERRUPT_CONTROL_2 0x00b0
+#define mmDCPG_INTERRUPT_CONTROL_2_BASE_IDX 2
+#define mmDCPG_INTERRUPT_CONTROL_3 0x00b1
+#define mmDCPG_INTERRUPT_CONTROL_3_BASE_IDX 2
+#define mmDC_IP_REQUEST_CNTL 0x00b2
+#define mmDC_IP_REQUEST_CNTL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dmu_dmu_dcperfmon_dc_perfmon_dispdec
+// base address: 0x2f8
+#define mmDC_PERFMON2_PERFCOUNTER_CNTL 0x00be
+#define mmDC_PERFMON2_PERFCOUNTER_CNTL_BASE_IDX 2
+#define mmDC_PERFMON2_PERFCOUNTER_CNTL2 0x00bf
+#define mmDC_PERFMON2_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define mmDC_PERFMON2_PERFCOUNTER_STATE 0x00c0
+#define mmDC_PERFMON2_PERFCOUNTER_STATE_BASE_IDX 2
+#define mmDC_PERFMON2_PERFMON_CNTL 0x00c1
+#define mmDC_PERFMON2_PERFMON_CNTL_BASE_IDX 2
+#define mmDC_PERFMON2_PERFMON_CNTL2 0x00c2
+#define mmDC_PERFMON2_PERFMON_CNTL2_BASE_IDX 2
+#define mmDC_PERFMON2_PERFMON_CVALUE_INT_MISC 0x00c3
+#define mmDC_PERFMON2_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define mmDC_PERFMON2_PERFMON_CVALUE_LOW 0x00c4
+#define mmDC_PERFMON2_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define mmDC_PERFMON2_PERFMON_HI 0x00c5
+#define mmDC_PERFMON2_PERFMON_HI_BASE_IDX 2
+#define mmDC_PERFMON2_PERFMON_LOW 0x00c6
+#define mmDC_PERFMON2_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dmu_dmu_misc_dispdec
+// base address: 0x0
+#define mmCC_DC_PIPE_DIS 0x00ca
+#define mmCC_DC_PIPE_DIS_BASE_IDX 2
+#define mmDMU_CLK_CNTL 0x00cb
+#define mmDMU_CLK_CNTL_BASE_IDX 2
+#define mmDMU_MEM_PWR_CNTL 0x00cc
+#define mmDMU_MEM_PWR_CNTL_BASE_IDX 2
+#define mmDMCU_SMU_INTERRUPT_CNTL 0x00cd
+#define mmDMCU_SMU_INTERRUPT_CNTL_BASE_IDX 2
+#define mmSMU_INTERRUPT_CONTROL 0x00ce
+#define mmSMU_INTERRUPT_CONTROL_BASE_IDX 2
+#define mmDMU_MISC_ALLOW_DS_FORCE 0x00d6
+#define mmDMU_MISC_ALLOW_DS_FORCE_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dmu_dmcu_dispdec
+// base address: 0x0
+#define mmDMCU_CTRL 0x00da
+#define mmDMCU_CTRL_BASE_IDX 2
+#define mmDMCU_STATUS 0x00db
+#define mmDMCU_STATUS_BASE_IDX 2
+#define mmDMCU_PC_START_ADDR 0x00dc
+#define mmDMCU_PC_START_ADDR_BASE_IDX 2
+#define mmDMCU_FW_START_ADDR 0x00dd
+#define mmDMCU_FW_START_ADDR_BASE_IDX 2
+#define mmDMCU_FW_END_ADDR 0x00de
+#define mmDMCU_FW_END_ADDR_BASE_IDX 2
+#define mmDMCU_FW_ISR_START_ADDR 0x00df
+#define mmDMCU_FW_ISR_START_ADDR_BASE_IDX 2
+#define mmDMCU_FW_CS_HI 0x00e0
+#define mmDMCU_FW_CS_HI_BASE_IDX 2
+#define mmDMCU_FW_CS_LO 0x00e1
+#define mmDMCU_FW_CS_LO_BASE_IDX 2
+#define mmDMCU_RAM_ACCESS_CTRL 0x00e2
+#define mmDMCU_RAM_ACCESS_CTRL_BASE_IDX 2
+#define mmDMCU_ERAM_WR_CTRL 0x00e3
+#define mmDMCU_ERAM_WR_CTRL_BASE_IDX 2
+#define mmDMCU_ERAM_WR_DATA 0x00e4
+#define mmDMCU_ERAM_WR_DATA_BASE_IDX 2
+#define mmDMCU_ERAM_RD_CTRL 0x00e5
+#define mmDMCU_ERAM_RD_CTRL_BASE_IDX 2
+#define mmDMCU_ERAM_RD_DATA 0x00e6
+#define mmDMCU_ERAM_RD_DATA_BASE_IDX 2
+#define mmDMCU_IRAM_WR_CTRL 0x00e7
+#define mmDMCU_IRAM_WR_CTRL_BASE_IDX 2
+#define mmDMCU_IRAM_WR_DATA 0x00e8
+#define mmDMCU_IRAM_WR_DATA_BASE_IDX 2
+#define mmDMCU_IRAM_RD_CTRL 0x00e9
+#define mmDMCU_IRAM_RD_CTRL_BASE_IDX 2
+#define mmDMCU_IRAM_RD_DATA 0x00ea
+#define mmDMCU_IRAM_RD_DATA_BASE_IDX 2
+#define mmDMCU_EVENT_TRIGGER 0x00eb
+#define mmDMCU_EVENT_TRIGGER_BASE_IDX 2
+#define mmDMCU_UC_INTERNAL_INT_STATUS 0x00ec
+#define mmDMCU_UC_INTERNAL_INT_STATUS_BASE_IDX 2
+#define mmDMCU_SS_INTERRUPT_CNTL_STATUS 0x00ed
+#define mmDMCU_SS_INTERRUPT_CNTL_STATUS_BASE_IDX 2
+#define mmDMCU_INTERRUPT_STATUS 0x00ee
+#define mmDMCU_INTERRUPT_STATUS_BASE_IDX 2
+#define mmDMCU_INTERRUPT_STATUS_1 0x00ef
+#define mmDMCU_INTERRUPT_STATUS_1_BASE_IDX 2
+#define mmDMCU_INTERRUPT_TO_HOST_EN_MASK 0x00f0
+#define mmDMCU_INTERRUPT_TO_HOST_EN_MASK_BASE_IDX 2
+#define mmDMCU_INTERRUPT_TO_UC_EN_MASK 0x00f1
+#define mmDMCU_INTERRUPT_TO_UC_EN_MASK_BASE_IDX 2
+#define mmDMCU_INTERRUPT_TO_UC_EN_MASK_1 0x00f2
+#define mmDMCU_INTERRUPT_TO_UC_EN_MASK_1_BASE_IDX 2
+#define mmDMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL 0x00f3
+#define mmDMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_BASE_IDX 2
+#define mmDMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_1 0x00f4
+#define mmDMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_1_BASE_IDX 2
+#define mmDC_DMCU_SCRATCH 0x00f5
+#define mmDC_DMCU_SCRATCH_BASE_IDX 2
+#define mmDMCU_INT_CNT 0x00f6
+#define mmDMCU_INT_CNT_BASE_IDX 2
+#define mmDMCU_FW_CHECKSUM_SMPL_BYTE_POS 0x00f7
+#define mmDMCU_FW_CHECKSUM_SMPL_BYTE_POS_BASE_IDX 2
+#define mmDMCU_UC_CLK_GATING_CNTL 0x00f8
+#define mmDMCU_UC_CLK_GATING_CNTL_BASE_IDX 2
+#define mmMASTER_COMM_DATA_REG1 0x00f9
+#define mmMASTER_COMM_DATA_REG1_BASE_IDX 2
+#define mmMASTER_COMM_DATA_REG2 0x00fa
+#define mmMASTER_COMM_DATA_REG2_BASE_IDX 2
+#define mmMASTER_COMM_DATA_REG3 0x00fb
+#define mmMASTER_COMM_DATA_REG3_BASE_IDX 2
+#define mmMASTER_COMM_CMD_REG 0x00fc
+#define mmMASTER_COMM_CMD_REG_BASE_IDX 2
+#define mmMASTER_COMM_CNTL_REG 0x00fd
+#define mmMASTER_COMM_CNTL_REG_BASE_IDX 2
+#define mmSLAVE_COMM_DATA_REG1 0x00fe
+#define mmSLAVE_COMM_DATA_REG1_BASE_IDX 2
+#define mmSLAVE_COMM_DATA_REG2 0x00ff
+#define mmSLAVE_COMM_DATA_REG2_BASE_IDX 2
+#define mmSLAVE_COMM_DATA_REG3 0x0100
+#define mmSLAVE_COMM_DATA_REG3_BASE_IDX 2
+#define mmSLAVE_COMM_CMD_REG 0x0101
+#define mmSLAVE_COMM_CMD_REG_BASE_IDX 2
+#define mmSLAVE_COMM_CNTL_REG 0x0102
+#define mmSLAVE_COMM_CNTL_REG_BASE_IDX 2
+#define mmDMCU_PERFMON_INTERRUPT_STATUS1 0x0105
+#define mmDMCU_PERFMON_INTERRUPT_STATUS1_BASE_IDX 2
+#define mmDMCU_PERFMON_INTERRUPT_STATUS2 0x0106
+#define mmDMCU_PERFMON_INTERRUPT_STATUS2_BASE_IDX 2
+#define mmDMCU_PERFMON_INTERRUPT_STATUS3 0x0107
+#define mmDMCU_PERFMON_INTERRUPT_STATUS3_BASE_IDX 2
+#define mmDMCU_PERFMON_INTERRUPT_STATUS4 0x0108
+#define mmDMCU_PERFMON_INTERRUPT_STATUS4_BASE_IDX 2
+#define mmDMCU_PERFMON_INTERRUPT_STATUS5 0x0109
+#define mmDMCU_PERFMON_INTERRUPT_STATUS5_BASE_IDX 2
+#define mmDMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1 0x010a
+#define mmDMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1_BASE_IDX 2
+#define mmDMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2 0x010b
+#define mmDMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2_BASE_IDX 2
+#define mmDMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3 0x010c
+#define mmDMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3_BASE_IDX 2
+#define mmDMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4 0x010d
+#define mmDMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4_BASE_IDX 2
+#define mmDMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5 0x010e
+#define mmDMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5_BASE_IDX 2
+#define mmDMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1 0x010f
+#define mmDMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1_BASE_IDX 2
+#define mmDMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2 0x0110
+#define mmDMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2_BASE_IDX 2
+#define mmDMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3 0x0111
+#define mmDMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3_BASE_IDX 2
+#define mmDMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4 0x0112
+#define mmDMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4_BASE_IDX 2
+#define mmDMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5 0x0113
+#define mmDMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5_BASE_IDX 2
+#define mmDMCU_DPRX_INTERRUPT_STATUS1 0x0114
+#define mmDMCU_DPRX_INTERRUPT_STATUS1_BASE_IDX 2
+#define mmDMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1 0x0115
+#define mmDMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1_BASE_IDX 2
+#define mmDMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1 0x0116
+#define mmDMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1_BASE_IDX 2
+#define mmDMCU_INTERRUPT_STATUS_CONTINUE 0x0119
+#define mmDMCU_INTERRUPT_STATUS_CONTINUE_BASE_IDX 2
+#define mmDMCU_INTERRUPT_TO_UC_EN_MASK_CONTINUE 0x011a
+#define mmDMCU_INTERRUPT_TO_UC_EN_MASK_CONTINUE_BASE_IDX 2
+#define mmDMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONTINUE 0x011b
+#define mmDMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONTINUE_BASE_IDX 2
+#define mmDMCU_INT_CNT_CONTINUE 0x011c
+#define mmDMCU_INT_CNT_CONTINUE_BASE_IDX 2
+#define mmDMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONT2 0x011d
+#define mmDMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONT2_BASE_IDX 2
+#define mmDMCU_INTERRUPT_STATUS_2 0x011e
+#define mmDMCU_INTERRUPT_STATUS_2_BASE_IDX 2
+#define mmDMCU_INTERRUPT_TO_UC_EN_MASK_2 0x011f
+#define mmDMCU_INTERRUPT_TO_UC_EN_MASK_2_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dmu_ihc_dispdec
+// base address: 0x0
+#define mmDC_GPU_TIMER_START_POSITION_V_UPDATE 0x0126
+#define mmDC_GPU_TIMER_START_POSITION_V_UPDATE_BASE_IDX 2
+#define mmDC_GPU_TIMER_START_POSITION_VSTARTUP 0x0127
+#define mmDC_GPU_TIMER_START_POSITION_VSTARTUP_BASE_IDX 2
+#define mmDC_GPU_TIMER_READ 0x0128
+#define mmDC_GPU_TIMER_READ_BASE_IDX 2
+#define mmDC_GPU_TIMER_READ_CNTL 0x0129
+#define mmDC_GPU_TIMER_READ_CNTL_BASE_IDX 2
+#define mmDISP_INTERRUPT_STATUS 0x012a
+#define mmDISP_INTERRUPT_STATUS_BASE_IDX 2
+#define mmDISP_INTERRUPT_STATUS_CONTINUE 0x012b
+#define mmDISP_INTERRUPT_STATUS_CONTINUE_BASE_IDX 2
+#define mmDISP_INTERRUPT_STATUS_CONTINUE2 0x012c
+#define mmDISP_INTERRUPT_STATUS_CONTINUE2_BASE_IDX 2
+#define mmDISP_INTERRUPT_STATUS_CONTINUE3 0x012d
+#define mmDISP_INTERRUPT_STATUS_CONTINUE3_BASE_IDX 2
+#define mmDISP_INTERRUPT_STATUS_CONTINUE4 0x012e
+#define mmDISP_INTERRUPT_STATUS_CONTINUE4_BASE_IDX 2
+#define mmDISP_INTERRUPT_STATUS_CONTINUE5 0x012f
+#define mmDISP_INTERRUPT_STATUS_CONTINUE5_BASE_IDX 2
+#define mmDISP_INTERRUPT_STATUS_CONTINUE6 0x0130
+#define mmDISP_INTERRUPT_STATUS_CONTINUE6_BASE_IDX 2
+#define mmDISP_INTERRUPT_STATUS_CONTINUE7 0x0131
+#define mmDISP_INTERRUPT_STATUS_CONTINUE7_BASE_IDX 2
+#define mmDISP_INTERRUPT_STATUS_CONTINUE8 0x0132
+#define mmDISP_INTERRUPT_STATUS_CONTINUE8_BASE_IDX 2
+#define mmDISP_INTERRUPT_STATUS_CONTINUE9 0x0133
+#define mmDISP_INTERRUPT_STATUS_CONTINUE9_BASE_IDX 2
+#define mmDISP_INTERRUPT_STATUS_CONTINUE10 0x0134
+#define mmDISP_INTERRUPT_STATUS_CONTINUE10_BASE_IDX 2
+#define mmDISP_INTERRUPT_STATUS_CONTINUE11 0x0135
+#define mmDISP_INTERRUPT_STATUS_CONTINUE11_BASE_IDX 2
+#define mmDISP_INTERRUPT_STATUS_CONTINUE12 0x0136
+#define mmDISP_INTERRUPT_STATUS_CONTINUE12_BASE_IDX 2
+#define mmDISP_INTERRUPT_STATUS_CONTINUE13 0x0137
+#define mmDISP_INTERRUPT_STATUS_CONTINUE13_BASE_IDX 2
+#define mmDISP_INTERRUPT_STATUS_CONTINUE14 0x0138
+#define mmDISP_INTERRUPT_STATUS_CONTINUE14_BASE_IDX 2
+#define mmDISP_INTERRUPT_STATUS_CONTINUE15 0x0139
+#define mmDISP_INTERRUPT_STATUS_CONTINUE15_BASE_IDX 2
+#define mmDISP_INTERRUPT_STATUS_CONTINUE16 0x013a
+#define mmDISP_INTERRUPT_STATUS_CONTINUE16_BASE_IDX 2
+#define mmDISP_INTERRUPT_STATUS_CONTINUE17 0x013b
+#define mmDISP_INTERRUPT_STATUS_CONTINUE17_BASE_IDX 2
+#define mmDISP_INTERRUPT_STATUS_CONTINUE18 0x013c
+#define mmDISP_INTERRUPT_STATUS_CONTINUE18_BASE_IDX 2
+#define mmDISP_INTERRUPT_STATUS_CONTINUE19 0x013d
+#define mmDISP_INTERRUPT_STATUS_CONTINUE19_BASE_IDX 2
+#define mmDISP_INTERRUPT_STATUS_CONTINUE20 0x013e
+#define mmDISP_INTERRUPT_STATUS_CONTINUE20_BASE_IDX 2
+#define mmDISP_INTERRUPT_STATUS_CONTINUE21 0x013f
+#define mmDISP_INTERRUPT_STATUS_CONTINUE21_BASE_IDX 2
+#define mmDISP_INTERRUPT_STATUS_CONTINUE22 0x0140
+#define mmDISP_INTERRUPT_STATUS_CONTINUE22_BASE_IDX 2
+#define mmDC_GPU_TIMER_START_POSITION_VREADY 0x0141
+#define mmDC_GPU_TIMER_START_POSITION_VREADY_BASE_IDX 2
+#define mmDC_GPU_TIMER_START_POSITION_FLIP 0x0142
+#define mmDC_GPU_TIMER_START_POSITION_FLIP_BASE_IDX 2
+#define mmDC_GPU_TIMER_START_POSITION_V_UPDATE_NO_LOCK 0x0143
+#define mmDC_GPU_TIMER_START_POSITION_V_UPDATE_NO_LOCK_BASE_IDX 2
+#define mmDC_GPU_TIMER_START_POSITION_FLIP_AWAY 0x0144
+#define mmDC_GPU_TIMER_START_POSITION_FLIP_AWAY_BASE_IDX 2
+#define mmDISP_INTERRUPT_STATUS_CONTINUE23 0x0145
+#define mmDISP_INTERRUPT_STATUS_CONTINUE23_BASE_IDX 2
+#define mmDISP_INTERRUPT_STATUS_CONTINUE24 0x0146
+#define mmDISP_INTERRUPT_STATUS_CONTINUE24_BASE_IDX 2
+#define mmDCCG_INTERRUPT_DEST 0x0147
+#define mmDCCG_INTERRUPT_DEST_BASE_IDX 2
+#define mmDMU_INTERRUPT_DEST 0x0148
+#define mmDMU_INTERRUPT_DEST_BASE_IDX 2
+#define mmDCPG_INTERRUPT_DEST 0x0149
+#define mmDCPG_INTERRUPT_DEST_BASE_IDX 2
+#define mmDCPG_INTERRUPT_DEST2 0x014a
+#define mmDCPG_INTERRUPT_DEST2_BASE_IDX 2
+#define mmMMHUBBUB_INTERRUPT_DEST 0x014b
+#define mmMMHUBBUB_INTERRUPT_DEST_BASE_IDX 2
+#define mmWB_INTERRUPT_DEST 0x014c
+#define mmWB_INTERRUPT_DEST_BASE_IDX 2
+#define mmDCHUB_INTERRUPT_DEST 0x014d
+#define mmDCHUB_INTERRUPT_DEST_BASE_IDX 2
+#define mmDCHUB_PERFCOUNTER_INTERRUPT_DEST 0x014e
+#define mmDCHUB_PERFCOUNTER_INTERRUPT_DEST_BASE_IDX 2
+#define mmDCHUB_INTERRUPT_DEST2 0x014f
+#define mmDCHUB_INTERRUPT_DEST2_BASE_IDX 2
+#define mmDPP_PERFCOUNTER_INTERRUPT_DEST 0x0150
+#define mmDPP_PERFCOUNTER_INTERRUPT_DEST_BASE_IDX 2
+#define mmMPC_INTERRUPT_DEST 0x0151
+#define mmMPC_INTERRUPT_DEST_BASE_IDX 2
+#define mmOPP_INTERRUPT_DEST 0x0152
+#define mmOPP_INTERRUPT_DEST_BASE_IDX 2
+#define mmOPTC_INTERRUPT_DEST 0x0153
+#define mmOPTC_INTERRUPT_DEST_BASE_IDX 2
+#define mmOTG0_INTERRUPT_DEST 0x0154
+#define mmOTG0_INTERRUPT_DEST_BASE_IDX 2
+#define mmOTG1_INTERRUPT_DEST 0x0155
+#define mmOTG1_INTERRUPT_DEST_BASE_IDX 2
+#define mmOTG2_INTERRUPT_DEST 0x0156
+#define mmOTG2_INTERRUPT_DEST_BASE_IDX 2
+#define mmOTG3_INTERRUPT_DEST 0x0157
+#define mmOTG3_INTERRUPT_DEST_BASE_IDX 2
+#define mmOTG4_INTERRUPT_DEST 0x0158
+#define mmOTG4_INTERRUPT_DEST_BASE_IDX 2
+#define mmOTG5_INTERRUPT_DEST 0x0159
+#define mmOTG5_INTERRUPT_DEST_BASE_IDX 2
+#define mmDIG_INTERRUPT_DEST 0x015a
+#define mmDIG_INTERRUPT_DEST_BASE_IDX 2
+#define mmI2C_DDC_HPD_INTERRUPT_DEST 0x015b
+#define mmI2C_DDC_HPD_INTERRUPT_DEST_BASE_IDX 2
+#define mmDIO_INTERRUPT_DEST 0x015d
+#define mmDIO_INTERRUPT_DEST_BASE_IDX 2
+#define mmDCIO_INTERRUPT_DEST 0x015e
+#define mmDCIO_INTERRUPT_DEST_BASE_IDX 2
+#define mmHPD_INTERRUPT_DEST 0x015f
+#define mmHPD_INTERRUPT_DEST_BASE_IDX 2
+#define mmAZ_INTERRUPT_DEST 0x0160
+#define mmAZ_INTERRUPT_DEST_BASE_IDX 2
+#define mmAUX_INTERRUPT_DEST 0x0161
+#define mmAUX_INTERRUPT_DEST_BASE_IDX 2
+#define mmDSC_INTERRUPT_DEST 0x0162
+#define mmDSC_INTERRUPT_DEST_BASE_IDX 2
+
+
+// addressBlock: dce_dc_wb0_dispdec_cnv_dispdec
+// base address: 0x0
+#define mmWB_ENABLE 0x01da
+#define mmWB_ENABLE_BASE_IDX 2
+#define mmWB_EC_CONFIG 0x01db
+#define mmWB_EC_CONFIG_BASE_IDX 2
+#define mmCNV_MODE 0x01dc
+#define mmCNV_MODE_BASE_IDX 2
+#define mmCNV_WINDOW_START 0x01dd
+#define mmCNV_WINDOW_START_BASE_IDX 2
+#define mmCNV_WINDOW_SIZE 0x01de
+#define mmCNV_WINDOW_SIZE_BASE_IDX 2
+#define mmCNV_UPDATE 0x01df
+#define mmCNV_UPDATE_BASE_IDX 2
+#define mmCNV_SOURCE_SIZE 0x01e0
+#define mmCNV_SOURCE_SIZE_BASE_IDX 2
+#define mmCNV_TEST_CNTL 0x01ee
+#define mmCNV_TEST_CNTL_BASE_IDX 2
+#define mmCNV_TEST_CRC_RED 0x01ef
+#define mmCNV_TEST_CRC_RED_BASE_IDX 2
+#define mmCNV_TEST_CRC_GREEN 0x01f0
+#define mmCNV_TEST_CRC_GREEN_BASE_IDX 2
+#define mmCNV_TEST_CRC_BLUE 0x01f1
+#define mmCNV_TEST_CRC_BLUE_BASE_IDX 2
+#define mmWB_DEBUG_CTRL 0x01f2
+#define mmWB_DEBUG_CTRL_BASE_IDX 2
+#define mmWB_DBG_MODE 0x01f3
+#define mmWB_DBG_MODE_BASE_IDX 2
+#define mmWB_HW_DEBUG 0x01f4
+#define mmWB_HW_DEBUG_BASE_IDX 2
+#define mmWB_SOFT_RESET 0x01f5
+#define mmWB_SOFT_RESET_BASE_IDX 2
+#define mmWB_WARM_UP_MODE_CTL1 0x01f6
+#define mmWB_WARM_UP_MODE_CTL1_BASE_IDX 2
+#define mmWB_WARM_UP_MODE_CTL2 0x01f7
+#define mmWB_WARM_UP_MODE_CTL2_BASE_IDX 2
+#define mmCNV_TEST_DEBUG_INDEX 0x01f8
+#define mmCNV_TEST_DEBUG_INDEX_BASE_IDX 2
+#define mmCNV_TEST_DEBUG_DATA 0x01f9
+#define mmCNV_TEST_DEBUG_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_wb0_dispdec_wbscl_dispdec
+// base address: 0x0
+#define mmWBSCL_COEF_RAM_SELECT 0x020a
+#define mmWBSCL_COEF_RAM_SELECT_BASE_IDX 2
+#define mmWBSCL_COEF_RAM_TAP_DATA 0x020b
+#define mmWBSCL_COEF_RAM_TAP_DATA_BASE_IDX 2
+#define mmWBSCL_MODE 0x020c
+#define mmWBSCL_MODE_BASE_IDX 2
+#define mmWBSCL_TAP_CONTROL 0x020d
+#define mmWBSCL_TAP_CONTROL_BASE_IDX 2
+#define mmWBSCL_DEST_SIZE 0x020e
+#define mmWBSCL_DEST_SIZE_BASE_IDX 2
+#define mmWBSCL_HORZ_FILTER_SCALE_RATIO 0x020f
+#define mmWBSCL_HORZ_FILTER_SCALE_RATIO_BASE_IDX 2
+#define mmWBSCL_HORZ_FILTER_INIT_Y_RGB 0x0210
+#define mmWBSCL_HORZ_FILTER_INIT_Y_RGB_BASE_IDX 2
+#define mmWBSCL_HORZ_FILTER_INIT_CBCR 0x0211
+#define mmWBSCL_HORZ_FILTER_INIT_CBCR_BASE_IDX 2
+#define mmWBSCL_VERT_FILTER_SCALE_RATIO 0x0212
+#define mmWBSCL_VERT_FILTER_SCALE_RATIO_BASE_IDX 2
+#define mmWBSCL_VERT_FILTER_INIT_Y_RGB 0x0213
+#define mmWBSCL_VERT_FILTER_INIT_Y_RGB_BASE_IDX 2
+#define mmWBSCL_VERT_FILTER_INIT_CBCR 0x0214
+#define mmWBSCL_VERT_FILTER_INIT_CBCR_BASE_IDX 2
+#define mmWBSCL_ROUND_OFFSET 0x0215
+#define mmWBSCL_ROUND_OFFSET_BASE_IDX 2
+#define mmWBSCL_OVERFLOW_STATUS 0x0216
+#define mmWBSCL_OVERFLOW_STATUS_BASE_IDX 2
+#define mmWBSCL_COEF_RAM_CONFLICT_STATUS 0x0217
+#define mmWBSCL_COEF_RAM_CONFLICT_STATUS_BASE_IDX 2
+#define mmWBSCL_TEST_CNTL 0x0218
+#define mmWBSCL_TEST_CNTL_BASE_IDX 2
+#define mmWBSCL_TEST_CRC_RED 0x0219
+#define mmWBSCL_TEST_CRC_RED_BASE_IDX 2
+#define mmWBSCL_TEST_CRC_GREEN 0x021a
+#define mmWBSCL_TEST_CRC_GREEN_BASE_IDX 2
+#define mmWBSCL_TEST_CRC_BLUE 0x021b
+#define mmWBSCL_TEST_CRC_BLUE_BASE_IDX 2
+#define mmWBSCL_BACKPRESSURE_CNT_EN 0x021c
+#define mmWBSCL_BACKPRESSURE_CNT_EN_BASE_IDX 2
+#define mmWB_MCIF_BACKPRESSURE_CNT 0x021d
+#define mmWB_MCIF_BACKPRESSURE_CNT_BASE_IDX 2
+#define mmWBSCL_CLAMP_Y_RGB 0x021e
+#define mmWBSCL_CLAMP_Y_RGB_BASE_IDX 2
+#define mmWBSCL_CLAMP_CBCR 0x021f
+#define mmWBSCL_CLAMP_CBCR_BASE_IDX 2
+#define mmWBSCL_OUTSIDE_PIX_STRATEGY 0x0220
+#define mmWBSCL_OUTSIDE_PIX_STRATEGY_BASE_IDX 2
+#define mmWBSCL_OUTSIDE_PIX_STRATEGY_CBCR 0x0221
+#define mmWBSCL_OUTSIDE_PIX_STRATEGY_CBCR_BASE_IDX 2
+#define mmWBSCL_DEBUG 0x0222
+#define mmWBSCL_DEBUG_BASE_IDX 2
+#define mmWBSCL_TEST_DEBUG_INDEX 0x0223
+#define mmWBSCL_TEST_DEBUG_INDEX_BASE_IDX 2
+#define mmWBSCL_TEST_DEBUG_DATA 0x0224
+#define mmWBSCL_TEST_DEBUG_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_wb0_dispdec_wb_dcperfmon_dc_perfmon_dispdec
+// base address: 0x8e8
+#define mmDC_PERFMON3_PERFCOUNTER_CNTL 0x023a
+#define mmDC_PERFMON3_PERFCOUNTER_CNTL_BASE_IDX 2
+#define mmDC_PERFMON3_PERFCOUNTER_CNTL2 0x023b
+#define mmDC_PERFMON3_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define mmDC_PERFMON3_PERFCOUNTER_STATE 0x023c
+#define mmDC_PERFMON3_PERFCOUNTER_STATE_BASE_IDX 2
+#define mmDC_PERFMON3_PERFMON_CNTL 0x023d
+#define mmDC_PERFMON3_PERFMON_CNTL_BASE_IDX 2
+#define mmDC_PERFMON3_PERFMON_CNTL2 0x023e
+#define mmDC_PERFMON3_PERFMON_CNTL2_BASE_IDX 2
+#define mmDC_PERFMON3_PERFMON_CVALUE_INT_MISC 0x023f
+#define mmDC_PERFMON3_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define mmDC_PERFMON3_PERFMON_CVALUE_LOW 0x0240
+#define mmDC_PERFMON3_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define mmDC_PERFMON3_PERFMON_HI 0x0241
+#define mmDC_PERFMON3_PERFMON_HI_BASE_IDX 2
+#define mmDC_PERFMON3_PERFMON_LOW 0x0242
+#define mmDC_PERFMON3_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_mmhubbub_mcif_wb0_dispdec
+// base address: 0x0
+#define mmMCIF_WB0_MCIF_WB_BUFMGR_SW_CONTROL 0x02b2
+#define mmMCIF_WB0_MCIF_WB_BUFMGR_SW_CONTROL_BASE_IDX 2
+#define mmMCIF_WB0_MCIF_WB_BUFMGR_CUR_LINE_R 0x02b3
+#define mmMCIF_WB0_MCIF_WB_BUFMGR_CUR_LINE_R_BASE_IDX 2
+#define mmMCIF_WB0_MCIF_WB_BUFMGR_STATUS 0x02b4
+#define mmMCIF_WB0_MCIF_WB_BUFMGR_STATUS_BASE_IDX 2
+#define mmMCIF_WB0_MCIF_WB_BUF_PITCH 0x02b5
+#define mmMCIF_WB0_MCIF_WB_BUF_PITCH_BASE_IDX 2
+#define mmMCIF_WB0_MCIF_WB_BUF_1_STATUS 0x02b6
+#define mmMCIF_WB0_MCIF_WB_BUF_1_STATUS_BASE_IDX 2
+#define mmMCIF_WB0_MCIF_WB_BUF_1_STATUS2 0x02b7
+#define mmMCIF_WB0_MCIF_WB_BUF_1_STATUS2_BASE_IDX 2
+#define mmMCIF_WB0_MCIF_WB_BUF_2_STATUS 0x02b8
+#define mmMCIF_WB0_MCIF_WB_BUF_2_STATUS_BASE_IDX 2
+#define mmMCIF_WB0_MCIF_WB_BUF_2_STATUS2 0x02b9
+#define mmMCIF_WB0_MCIF_WB_BUF_2_STATUS2_BASE_IDX 2
+#define mmMCIF_WB0_MCIF_WB_BUF_3_STATUS 0x02ba
+#define mmMCIF_WB0_MCIF_WB_BUF_3_STATUS_BASE_IDX 2
+#define mmMCIF_WB0_MCIF_WB_BUF_3_STATUS2 0x02bb
+#define mmMCIF_WB0_MCIF_WB_BUF_3_STATUS2_BASE_IDX 2
+#define mmMCIF_WB0_MCIF_WB_BUF_4_STATUS 0x02bc
+#define mmMCIF_WB0_MCIF_WB_BUF_4_STATUS_BASE_IDX 2
+#define mmMCIF_WB0_MCIF_WB_BUF_4_STATUS2 0x02bd
+#define mmMCIF_WB0_MCIF_WB_BUF_4_STATUS2_BASE_IDX 2
+#define mmMCIF_WB0_MCIF_WB_ARBITRATION_CONTROL 0x02be
+#define mmMCIF_WB0_MCIF_WB_ARBITRATION_CONTROL_BASE_IDX 2
+#define mmMCIF_WB0_MCIF_WB_SCLK_CHANGE 0x02bf
+#define mmMCIF_WB0_MCIF_WB_SCLK_CHANGE_BASE_IDX 2
+#define mmMCIF_WB0_MCIF_WB_TEST_DEBUG_INDEX 0x02c0
+#define mmMCIF_WB0_MCIF_WB_TEST_DEBUG_INDEX_BASE_IDX 2
+#define mmMCIF_WB0_MCIF_WB_TEST_DEBUG_DATA 0x02c1
+#define mmMCIF_WB0_MCIF_WB_TEST_DEBUG_DATA_BASE_IDX 2
+#define mmMCIF_WB0_MCIF_WB_BUF_1_ADDR_Y 0x02c2
+#define mmMCIF_WB0_MCIF_WB_BUF_1_ADDR_Y_BASE_IDX 2
+#define mmMCIF_WB0_MCIF_WB_BUF_1_ADDR_Y_OFFSET 0x02c3
+#define mmMCIF_WB0_MCIF_WB_BUF_1_ADDR_Y_OFFSET_BASE_IDX 2
+#define mmMCIF_WB0_MCIF_WB_BUF_1_ADDR_C 0x02c4
+#define mmMCIF_WB0_MCIF_WB_BUF_1_ADDR_C_BASE_IDX 2
+#define mmMCIF_WB0_MCIF_WB_BUF_1_ADDR_C_OFFSET 0x02c5
+#define mmMCIF_WB0_MCIF_WB_BUF_1_ADDR_C_OFFSET_BASE_IDX 2
+#define mmMCIF_WB0_MCIF_WB_BUF_2_ADDR_Y 0x02c6
+#define mmMCIF_WB0_MCIF_WB_BUF_2_ADDR_Y_BASE_IDX 2
+#define mmMCIF_WB0_MCIF_WB_BUF_2_ADDR_Y_OFFSET 0x02c7
+#define mmMCIF_WB0_MCIF_WB_BUF_2_ADDR_Y_OFFSET_BASE_IDX 2
+#define mmMCIF_WB0_MCIF_WB_BUF_2_ADDR_C 0x02c8
+#define mmMCIF_WB0_MCIF_WB_BUF_2_ADDR_C_BASE_IDX 2
+#define mmMCIF_WB0_MCIF_WB_BUF_2_ADDR_C_OFFSET 0x02c9
+#define mmMCIF_WB0_MCIF_WB_BUF_2_ADDR_C_OFFSET_BASE_IDX 2
+#define mmMCIF_WB0_MCIF_WB_BUF_3_ADDR_Y 0x02ca
+#define mmMCIF_WB0_MCIF_WB_BUF_3_ADDR_Y_BASE_IDX 2
+#define mmMCIF_WB0_MCIF_WB_BUF_3_ADDR_Y_OFFSET 0x02cb
+#define mmMCIF_WB0_MCIF_WB_BUF_3_ADDR_Y_OFFSET_BASE_IDX 2
+#define mmMCIF_WB0_MCIF_WB_BUF_3_ADDR_C 0x02cc
+#define mmMCIF_WB0_MCIF_WB_BUF_3_ADDR_C_BASE_IDX 2
+#define mmMCIF_WB0_MCIF_WB_BUF_3_ADDR_C_OFFSET 0x02cd
+#define mmMCIF_WB0_MCIF_WB_BUF_3_ADDR_C_OFFSET_BASE_IDX 2
+#define mmMCIF_WB0_MCIF_WB_BUF_4_ADDR_Y 0x02ce
+#define mmMCIF_WB0_MCIF_WB_BUF_4_ADDR_Y_BASE_IDX 2
+#define mmMCIF_WB0_MCIF_WB_BUF_4_ADDR_Y_OFFSET 0x02cf
+#define mmMCIF_WB0_MCIF_WB_BUF_4_ADDR_Y_OFFSET_BASE_IDX 2
+#define mmMCIF_WB0_MCIF_WB_BUF_4_ADDR_C 0x02d0
+#define mmMCIF_WB0_MCIF_WB_BUF_4_ADDR_C_BASE_IDX 2
+#define mmMCIF_WB0_MCIF_WB_BUF_4_ADDR_C_OFFSET 0x02d1
+#define mmMCIF_WB0_MCIF_WB_BUF_4_ADDR_C_OFFSET_BASE_IDX 2
+#define mmMCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL 0x02d2
+#define mmMCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL_BASE_IDX 2
+#define mmMCIF_WB0_MCIF_WB_NB_PSTATE_LATENCY_WATERMARK 0x02d3
+#define mmMCIF_WB0_MCIF_WB_NB_PSTATE_LATENCY_WATERMARK_BASE_IDX 2
+#define mmMCIF_WB0_MCIF_WB_NB_PSTATE_CONTROL 0x02d4
+#define mmMCIF_WB0_MCIF_WB_NB_PSTATE_CONTROL_BASE_IDX 2
+#define mmMCIF_WB0_MCIF_WB_WATERMARK 0x02d5
+#define mmMCIF_WB0_MCIF_WB_WATERMARK_BASE_IDX 2
+#define mmMCIF_WB0_MCIF_WB_CLOCK_GATER_CONTROL 0x02d6
+#define mmMCIF_WB0_MCIF_WB_CLOCK_GATER_CONTROL_BASE_IDX 2
+#define mmMCIF_WB0_MCIF_WB_WARM_UP_CNTL 0x02d7
+#define mmMCIF_WB0_MCIF_WB_WARM_UP_CNTL_BASE_IDX 2
+#define mmMCIF_WB0_MCIF_WB_SELF_REFRESH_CONTROL 0x02d8
+#define mmMCIF_WB0_MCIF_WB_SELF_REFRESH_CONTROL_BASE_IDX 2
+#define mmMCIF_WB0_MULTI_LEVEL_QOS_CTRL 0x02d9
+#define mmMCIF_WB0_MULTI_LEVEL_QOS_CTRL_BASE_IDX 2
+#define mmMCIF_WB0_MCIF_WB_SECURITY_LEVEL 0x02da
+#define mmMCIF_WB0_MCIF_WB_SECURITY_LEVEL_BASE_IDX 2
+#define mmMCIF_WB0_MCIF_WB_BUF_LUMA_SIZE 0x02db
+#define mmMCIF_WB0_MCIF_WB_BUF_LUMA_SIZE_BASE_IDX 2
+#define mmMCIF_WB0_MCIF_WB_BUF_CHROMA_SIZE 0x02dc
+#define mmMCIF_WB0_MCIF_WB_BUF_CHROMA_SIZE_BASE_IDX 2
+#define mmMCIF_WB0_MCIF_WB_BUF_1_ADDR_Y_HIGH 0x02dd
+#define mmMCIF_WB0_MCIF_WB_BUF_1_ADDR_Y_HIGH_BASE_IDX 2
+#define mmMCIF_WB0_MCIF_WB_BUF_1_ADDR_C_HIGH 0x02de
+#define mmMCIF_WB0_MCIF_WB_BUF_1_ADDR_C_HIGH_BASE_IDX 2
+#define mmMCIF_WB0_MCIF_WB_BUF_2_ADDR_Y_HIGH 0x02df
+#define mmMCIF_WB0_MCIF_WB_BUF_2_ADDR_Y_HIGH_BASE_IDX 2
+#define mmMCIF_WB0_MCIF_WB_BUF_2_ADDR_C_HIGH 0x02e0
+#define mmMCIF_WB0_MCIF_WB_BUF_2_ADDR_C_HIGH_BASE_IDX 2
+#define mmMCIF_WB0_MCIF_WB_BUF_3_ADDR_Y_HIGH 0x02e1
+#define mmMCIF_WB0_MCIF_WB_BUF_3_ADDR_Y_HIGH_BASE_IDX 2
+#define mmMCIF_WB0_MCIF_WB_BUF_3_ADDR_C_HIGH 0x02e2
+#define mmMCIF_WB0_MCIF_WB_BUF_3_ADDR_C_HIGH_BASE_IDX 2
+#define mmMCIF_WB0_MCIF_WB_BUF_4_ADDR_Y_HIGH 0x02e3
+#define mmMCIF_WB0_MCIF_WB_BUF_4_ADDR_Y_HIGH_BASE_IDX 2
+#define mmMCIF_WB0_MCIF_WB_BUF_4_ADDR_C_HIGH 0x02e4
+#define mmMCIF_WB0_MCIF_WB_BUF_4_ADDR_C_HIGH_BASE_IDX 2
+#define mmMCIF_WB0_MCIF_WB_BUF_1_RESOLUTION 0x02e5
+#define mmMCIF_WB0_MCIF_WB_BUF_1_RESOLUTION_BASE_IDX 2
+#define mmMCIF_WB0_MCIF_WB_BUF_2_RESOLUTION 0x02e6
+#define mmMCIF_WB0_MCIF_WB_BUF_2_RESOLUTION_BASE_IDX 2
+#define mmMCIF_WB0_MCIF_WB_BUF_3_RESOLUTION 0x02e7
+#define mmMCIF_WB0_MCIF_WB_BUF_3_RESOLUTION_BASE_IDX 2
+#define mmMCIF_WB0_MCIF_WB_BUF_4_RESOLUTION 0x02e8
+#define mmMCIF_WB0_MCIF_WB_BUF_4_RESOLUTION_BASE_IDX 2
+
+
+// addressBlock: dce_dc_mmhubbub_mcif_wb1_dispdec
+// base address: 0x100
+#define mmMCIF_WB1_MCIF_WB_BUFMGR_SW_CONTROL 0x02f2
+#define mmMCIF_WB1_MCIF_WB_BUFMGR_SW_CONTROL_BASE_IDX 2
+#define mmMCIF_WB1_MCIF_WB_BUFMGR_CUR_LINE_R 0x02f3
+#define mmMCIF_WB1_MCIF_WB_BUFMGR_CUR_LINE_R_BASE_IDX 2
+#define mmMCIF_WB1_MCIF_WB_BUFMGR_STATUS 0x02f4
+#define mmMCIF_WB1_MCIF_WB_BUFMGR_STATUS_BASE_IDX 2
+#define mmMCIF_WB1_MCIF_WB_BUF_PITCH 0x02f5
+#define mmMCIF_WB1_MCIF_WB_BUF_PITCH_BASE_IDX 2
+#define mmMCIF_WB1_MCIF_WB_BUF_1_STATUS 0x02f6
+#define mmMCIF_WB1_MCIF_WB_BUF_1_STATUS_BASE_IDX 2
+#define mmMCIF_WB1_MCIF_WB_BUF_1_STATUS2 0x02f7
+#define mmMCIF_WB1_MCIF_WB_BUF_1_STATUS2_BASE_IDX 2
+#define mmMCIF_WB1_MCIF_WB_BUF_2_STATUS 0x02f8
+#define mmMCIF_WB1_MCIF_WB_BUF_2_STATUS_BASE_IDX 2
+#define mmMCIF_WB1_MCIF_WB_BUF_2_STATUS2 0x02f9
+#define mmMCIF_WB1_MCIF_WB_BUF_2_STATUS2_BASE_IDX 2
+#define mmMCIF_WB1_MCIF_WB_BUF_3_STATUS 0x02fa
+#define mmMCIF_WB1_MCIF_WB_BUF_3_STATUS_BASE_IDX 2
+#define mmMCIF_WB1_MCIF_WB_BUF_3_STATUS2 0x02fb
+#define mmMCIF_WB1_MCIF_WB_BUF_3_STATUS2_BASE_IDX 2
+#define mmMCIF_WB1_MCIF_WB_BUF_4_STATUS 0x02fc
+#define mmMCIF_WB1_MCIF_WB_BUF_4_STATUS_BASE_IDX 2
+#define mmMCIF_WB1_MCIF_WB_BUF_4_STATUS2 0x02fd
+#define mmMCIF_WB1_MCIF_WB_BUF_4_STATUS2_BASE_IDX 2
+#define mmMCIF_WB1_MCIF_WB_ARBITRATION_CONTROL 0x02fe
+#define mmMCIF_WB1_MCIF_WB_ARBITRATION_CONTROL_BASE_IDX 2
+#define mmMCIF_WB1_MCIF_WB_SCLK_CHANGE 0x02ff
+#define mmMCIF_WB1_MCIF_WB_SCLK_CHANGE_BASE_IDX 2
+#define mmMCIF_WB1_MCIF_WB_TEST_DEBUG_INDEX 0x0300
+#define mmMCIF_WB1_MCIF_WB_TEST_DEBUG_INDEX_BASE_IDX 2
+#define mmMCIF_WB1_MCIF_WB_TEST_DEBUG_DATA 0x0301
+#define mmMCIF_WB1_MCIF_WB_TEST_DEBUG_DATA_BASE_IDX 2
+#define mmMCIF_WB1_MCIF_WB_BUF_1_ADDR_Y 0x0302
+#define mmMCIF_WB1_MCIF_WB_BUF_1_ADDR_Y_BASE_IDX 2
+#define mmMCIF_WB1_MCIF_WB_BUF_1_ADDR_Y_OFFSET 0x0303
+#define mmMCIF_WB1_MCIF_WB_BUF_1_ADDR_Y_OFFSET_BASE_IDX 2
+#define mmMCIF_WB1_MCIF_WB_BUF_1_ADDR_C 0x0304
+#define mmMCIF_WB1_MCIF_WB_BUF_1_ADDR_C_BASE_IDX 2
+#define mmMCIF_WB1_MCIF_WB_BUF_1_ADDR_C_OFFSET 0x0305
+#define mmMCIF_WB1_MCIF_WB_BUF_1_ADDR_C_OFFSET_BASE_IDX 2
+#define mmMCIF_WB1_MCIF_WB_BUF_2_ADDR_Y 0x0306
+#define mmMCIF_WB1_MCIF_WB_BUF_2_ADDR_Y_BASE_IDX 2
+#define mmMCIF_WB1_MCIF_WB_BUF_2_ADDR_Y_OFFSET 0x0307
+#define mmMCIF_WB1_MCIF_WB_BUF_2_ADDR_Y_OFFSET_BASE_IDX 2
+#define mmMCIF_WB1_MCIF_WB_BUF_2_ADDR_C 0x0308
+#define mmMCIF_WB1_MCIF_WB_BUF_2_ADDR_C_BASE_IDX 2
+#define mmMCIF_WB1_MCIF_WB_BUF_2_ADDR_C_OFFSET 0x0309
+#define mmMCIF_WB1_MCIF_WB_BUF_2_ADDR_C_OFFSET_BASE_IDX 2
+#define mmMCIF_WB1_MCIF_WB_BUF_3_ADDR_Y 0x030a
+#define mmMCIF_WB1_MCIF_WB_BUF_3_ADDR_Y_BASE_IDX 2
+#define mmMCIF_WB1_MCIF_WB_BUF_3_ADDR_Y_OFFSET 0x030b
+#define mmMCIF_WB1_MCIF_WB_BUF_3_ADDR_Y_OFFSET_BASE_IDX 2
+#define mmMCIF_WB1_MCIF_WB_BUF_3_ADDR_C 0x030c
+#define mmMCIF_WB1_MCIF_WB_BUF_3_ADDR_C_BASE_IDX 2
+#define mmMCIF_WB1_MCIF_WB_BUF_3_ADDR_C_OFFSET 0x030d
+#define mmMCIF_WB1_MCIF_WB_BUF_3_ADDR_C_OFFSET_BASE_IDX 2
+#define mmMCIF_WB1_MCIF_WB_BUF_4_ADDR_Y 0x030e
+#define mmMCIF_WB1_MCIF_WB_BUF_4_ADDR_Y_BASE_IDX 2
+#define mmMCIF_WB1_MCIF_WB_BUF_4_ADDR_Y_OFFSET 0x030f
+#define mmMCIF_WB1_MCIF_WB_BUF_4_ADDR_Y_OFFSET_BASE_IDX 2
+#define mmMCIF_WB1_MCIF_WB_BUF_4_ADDR_C 0x0310
+#define mmMCIF_WB1_MCIF_WB_BUF_4_ADDR_C_BASE_IDX 2
+#define mmMCIF_WB1_MCIF_WB_BUF_4_ADDR_C_OFFSET 0x0311
+#define mmMCIF_WB1_MCIF_WB_BUF_4_ADDR_C_OFFSET_BASE_IDX 2
+#define mmMCIF_WB1_MCIF_WB_BUFMGR_VCE_CONTROL 0x0312
+#define mmMCIF_WB1_MCIF_WB_BUFMGR_VCE_CONTROL_BASE_IDX 2
+#define mmMCIF_WB1_MCIF_WB_NB_PSTATE_LATENCY_WATERMARK 0x0313
+#define mmMCIF_WB1_MCIF_WB_NB_PSTATE_LATENCY_WATERMARK_BASE_IDX 2
+#define mmMCIF_WB1_MCIF_WB_NB_PSTATE_CONTROL 0x0314
+#define mmMCIF_WB1_MCIF_WB_NB_PSTATE_CONTROL_BASE_IDX 2
+#define mmMCIF_WB1_MCIF_WB_WATERMARK 0x0315
+#define mmMCIF_WB1_MCIF_WB_WATERMARK_BASE_IDX 2
+#define mmMCIF_WB1_MCIF_WB_CLOCK_GATER_CONTROL 0x0316
+#define mmMCIF_WB1_MCIF_WB_CLOCK_GATER_CONTROL_BASE_IDX 2
+#define mmMCIF_WB1_MCIF_WB_WARM_UP_CNTL 0x0317
+#define mmMCIF_WB1_MCIF_WB_WARM_UP_CNTL_BASE_IDX 2
+#define mmMCIF_WB1_MCIF_WB_SELF_REFRESH_CONTROL 0x0318
+#define mmMCIF_WB1_MCIF_WB_SELF_REFRESH_CONTROL_BASE_IDX 2
+#define mmMCIF_WB1_MULTI_LEVEL_QOS_CTRL 0x0319
+#define mmMCIF_WB1_MULTI_LEVEL_QOS_CTRL_BASE_IDX 2
+#define mmMCIF_WB1_MCIF_WB_BUF_LUMA_SIZE 0x031b
+#define mmMCIF_WB1_MCIF_WB_BUF_LUMA_SIZE_BASE_IDX 2
+#define mmMCIF_WB1_MCIF_WB_BUF_CHROMA_SIZE 0x031c
+#define mmMCIF_WB1_MCIF_WB_BUF_CHROMA_SIZE_BASE_IDX 2
+#define mmMCIF_WB1_MCIF_WB_BUF_1_ADDR_Y_HIGH 0x031d
+#define mmMCIF_WB1_MCIF_WB_BUF_1_ADDR_Y_HIGH_BASE_IDX 2
+#define mmMCIF_WB1_MCIF_WB_BUF_1_ADDR_C_HIGH 0x031e
+#define mmMCIF_WB1_MCIF_WB_BUF_1_ADDR_C_HIGH_BASE_IDX 2
+#define mmMCIF_WB1_MCIF_WB_BUF_2_ADDR_Y_HIGH 0x031f
+#define mmMCIF_WB1_MCIF_WB_BUF_2_ADDR_Y_HIGH_BASE_IDX 2
+#define mmMCIF_WB1_MCIF_WB_BUF_2_ADDR_C_HIGH 0x0320
+#define mmMCIF_WB1_MCIF_WB_BUF_2_ADDR_C_HIGH_BASE_IDX 2
+#define mmMCIF_WB1_MCIF_WB_BUF_3_ADDR_Y_HIGH 0x0321
+#define mmMCIF_WB1_MCIF_WB_BUF_3_ADDR_Y_HIGH_BASE_IDX 2
+#define mmMCIF_WB1_MCIF_WB_BUF_3_ADDR_C_HIGH 0x0322
+#define mmMCIF_WB1_MCIF_WB_BUF_3_ADDR_C_HIGH_BASE_IDX 2
+#define mmMCIF_WB1_MCIF_WB_BUF_4_ADDR_Y_HIGH 0x0323
+#define mmMCIF_WB1_MCIF_WB_BUF_4_ADDR_Y_HIGH_BASE_IDX 2
+#define mmMCIF_WB1_MCIF_WB_BUF_4_ADDR_C_HIGH 0x0324
+#define mmMCIF_WB1_MCIF_WB_BUF_4_ADDR_C_HIGH_BASE_IDX 2
+#define mmMCIF_WB1_MCIF_WB_BUF_1_RESOLUTION 0x0325
+#define mmMCIF_WB1_MCIF_WB_BUF_1_RESOLUTION_BASE_IDX 2
+#define mmMCIF_WB1_MCIF_WB_BUF_2_RESOLUTION 0x0326
+#define mmMCIF_WB1_MCIF_WB_BUF_2_RESOLUTION_BASE_IDX 2
+#define mmMCIF_WB1_MCIF_WB_BUF_3_RESOLUTION 0x0327
+#define mmMCIF_WB1_MCIF_WB_BUF_3_RESOLUTION_BASE_IDX 2
+#define mmMCIF_WB1_MCIF_WB_BUF_4_RESOLUTION 0x0328
+#define mmMCIF_WB1_MCIF_WB_BUF_4_RESOLUTION_BASE_IDX 2
+
+
+// addressBlock: dce_dc_mmhubbub_mmhubbub_dispdec
+// base address: 0x0
+#define mmWBIF0_MISC_CTRL 0x0333
+#define mmWBIF0_MISC_CTRL_BASE_IDX 2
+#define mmWBIF0_SMU_WM_CONTROL 0x0334
+#define mmWBIF0_SMU_WM_CONTROL_BASE_IDX 2
+#define mmWBIF0_PHASE0_OUTSTANDING_COUNTER 0x0335
+#define mmWBIF0_PHASE0_OUTSTANDING_COUNTER_BASE_IDX 2
+#define mmWBIF0_PHASE1_OUTSTANDING_COUNTER 0x0336
+#define mmWBIF0_PHASE1_OUTSTANDING_COUNTER_BASE_IDX 2
+#define mmVGA_SRC_SPLIT_CNTL 0x033f
+#define mmVGA_SRC_SPLIT_CNTL_BASE_IDX 2
+#define mmMMHUBBUB_MEM_PWR_STATUS 0x0340
+#define mmMMHUBBUB_MEM_PWR_STATUS_BASE_IDX 2
+#define mmMMHUBBUB_MEM_PWR_CNTL 0x0341
+#define mmMMHUBBUB_MEM_PWR_CNTL_BASE_IDX 2
+#define mmMMHUBBUB_CLOCK_CNTL 0x0342
+#define mmMMHUBBUB_CLOCK_CNTL_BASE_IDX 2
+#define mmMMHUBBUB_SOFT_RESET 0x0343
+#define mmMMHUBBUB_SOFT_RESET_BASE_IDX 2
+#define mmDMU_IF_ERR_STATUS 0x0347
+#define mmDMU_IF_ERR_STATUS_BASE_IDX 2
+#define mmMMHUBBUB_CLIENT_UNIT_ID 0x0348
+#define mmMMHUBBUB_CLIENT_UNIT_ID_BASE_IDX 2
+
+
+// addressBlock: dce_dc_mmhubbub_vgaif_dispdec
+// base address: 0x0
+#define mmMCIF_CONTROL 0x034a
+#define mmMCIF_CONTROL_BASE_IDX 2
+#define mmMCIF_WRITE_COMBINE_CONTROL 0x034b
+#define mmMCIF_WRITE_COMBINE_CONTROL_BASE_IDX 2
+#define mmMCIF_PHASE0_OUTSTANDING_COUNTER 0x034e
+#define mmMCIF_PHASE0_OUTSTANDING_COUNTER_BASE_IDX 2
+#define mmMCIF_PHASE1_OUTSTANDING_COUNTER 0x034f
+#define mmMCIF_PHASE1_OUTSTANDING_COUNTER_BASE_IDX 2
+#define mmMCIF_PHASE2_OUTSTANDING_COUNTER 0x0350
+#define mmMCIF_PHASE2_OUTSTANDING_COUNTER_BASE_IDX 2
+
+
+// addressBlock: dce_dc_mmhubbub_mmhubbub_dcperfmon_dc_perfmon_dispdec
+// base address: 0xd48
+#define mmDC_PERFMON4_PERFCOUNTER_CNTL 0x0352
+#define mmDC_PERFMON4_PERFCOUNTER_CNTL_BASE_IDX 2
+#define mmDC_PERFMON4_PERFCOUNTER_CNTL2 0x0353
+#define mmDC_PERFMON4_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define mmDC_PERFMON4_PERFCOUNTER_STATE 0x0354
+#define mmDC_PERFMON4_PERFCOUNTER_STATE_BASE_IDX 2
+#define mmDC_PERFMON4_PERFMON_CNTL 0x0355
+#define mmDC_PERFMON4_PERFMON_CNTL_BASE_IDX 2
+#define mmDC_PERFMON4_PERFMON_CNTL2 0x0356
+#define mmDC_PERFMON4_PERFMON_CNTL2_BASE_IDX 2
+#define mmDC_PERFMON4_PERFMON_CVALUE_INT_MISC 0x0357
+#define mmDC_PERFMON4_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define mmDC_PERFMON4_PERFMON_CVALUE_LOW 0x0358
+#define mmDC_PERFMON4_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define mmDC_PERFMON4_PERFMON_HI 0x0359
+#define mmDC_PERFMON4_PERFMON_HI_BASE_IDX 2
+#define mmDC_PERFMON4_PERFMON_LOW 0x035a
+#define mmDC_PERFMON4_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0stream0_dispdec
+// base address: 0x0
+#define mmAZF0STREAM0_AZALIA_STREAM_INDEX 0x035e
+#define mmAZF0STREAM0_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define mmAZF0STREAM0_AZALIA_STREAM_DATA 0x035f
+#define mmAZF0STREAM0_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0stream1_dispdec
+// base address: 0x8
+#define mmAZF0STREAM1_AZALIA_STREAM_INDEX 0x0360
+#define mmAZF0STREAM1_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define mmAZF0STREAM1_AZALIA_STREAM_DATA 0x0361
+#define mmAZF0STREAM1_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0stream2_dispdec
+// base address: 0x10
+#define mmAZF0STREAM2_AZALIA_STREAM_INDEX 0x0362
+#define mmAZF0STREAM2_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define mmAZF0STREAM2_AZALIA_STREAM_DATA 0x0363
+#define mmAZF0STREAM2_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0stream3_dispdec
+// base address: 0x18
+#define mmAZF0STREAM3_AZALIA_STREAM_INDEX 0x0364
+#define mmAZF0STREAM3_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define mmAZF0STREAM3_AZALIA_STREAM_DATA 0x0365
+#define mmAZF0STREAM3_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0stream4_dispdec
+// base address: 0x20
+#define mmAZF0STREAM4_AZALIA_STREAM_INDEX 0x0366
+#define mmAZF0STREAM4_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define mmAZF0STREAM4_AZALIA_STREAM_DATA 0x0367
+#define mmAZF0STREAM4_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0stream5_dispdec
+// base address: 0x28
+#define mmAZF0STREAM5_AZALIA_STREAM_INDEX 0x0368
+#define mmAZF0STREAM5_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define mmAZF0STREAM5_AZALIA_STREAM_DATA 0x0369
+#define mmAZF0STREAM5_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0stream6_dispdec
+// base address: 0x30
+#define mmAZF0STREAM6_AZALIA_STREAM_INDEX 0x036a
+#define mmAZF0STREAM6_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define mmAZF0STREAM6_AZALIA_STREAM_DATA 0x036b
+#define mmAZF0STREAM6_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0stream7_dispdec
+// base address: 0x38
+#define mmAZF0STREAM7_AZALIA_STREAM_INDEX 0x036c
+#define mmAZF0STREAM7_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define mmAZF0STREAM7_AZALIA_STREAM_DATA 0x036d
+#define mmAZF0STREAM7_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_az_misc_dispdec
+// base address: 0x0
+#define mmAZ_CLOCK_CNTL 0x0372
+#define mmAZ_CLOCK_CNTL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_az_dcperfmon_dc_perfmon_dispdec
+// base address: 0xde8
+#define mmDC_PERFMON5_PERFCOUNTER_CNTL 0x037a
+#define mmDC_PERFMON5_PERFCOUNTER_CNTL_BASE_IDX 2
+#define mmDC_PERFMON5_PERFCOUNTER_CNTL2 0x037b
+#define mmDC_PERFMON5_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define mmDC_PERFMON5_PERFCOUNTER_STATE 0x037c
+#define mmDC_PERFMON5_PERFCOUNTER_STATE_BASE_IDX 2
+#define mmDC_PERFMON5_PERFMON_CNTL 0x037d
+#define mmDC_PERFMON5_PERFMON_CNTL_BASE_IDX 2
+#define mmDC_PERFMON5_PERFMON_CNTL2 0x037e
+#define mmDC_PERFMON5_PERFMON_CNTL2_BASE_IDX 2
+#define mmDC_PERFMON5_PERFMON_CVALUE_INT_MISC 0x037f
+#define mmDC_PERFMON5_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define mmDC_PERFMON5_PERFMON_CVALUE_LOW 0x0380
+#define mmDC_PERFMON5_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define mmDC_PERFMON5_PERFMON_HI 0x0381
+#define mmDC_PERFMON5_PERFMON_HI_BASE_IDX 2
+#define mmDC_PERFMON5_PERFMON_LOW 0x0382
+#define mmDC_PERFMON5_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0endpoint0_dispdec
+// base address: 0x0
+#define mmAZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX 0x0386
+#define mmAZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX_BASE_IDX 2
+#define mmAZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA 0x0387
+#define mmAZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0endpoint1_dispdec
+// base address: 0x18
+#define mmAZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_INDEX 0x038c
+#define mmAZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_INDEX_BASE_IDX 2
+#define mmAZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_DATA 0x038d
+#define mmAZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0endpoint2_dispdec
+// base address: 0x30
+#define mmAZF0ENDPOINT2_AZALIA_F0_CODEC_ENDPOINT_INDEX 0x0392
+#define mmAZF0ENDPOINT2_AZALIA_F0_CODEC_ENDPOINT_INDEX_BASE_IDX 2
+#define mmAZF0ENDPOINT2_AZALIA_F0_CODEC_ENDPOINT_DATA 0x0393
+#define mmAZF0ENDPOINT2_AZALIA_F0_CODEC_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0endpoint3_dispdec
+// base address: 0x48
+#define mmAZF0ENDPOINT3_AZALIA_F0_CODEC_ENDPOINT_INDEX 0x0398
+#define mmAZF0ENDPOINT3_AZALIA_F0_CODEC_ENDPOINT_INDEX_BASE_IDX 2
+#define mmAZF0ENDPOINT3_AZALIA_F0_CODEC_ENDPOINT_DATA 0x0399
+#define mmAZF0ENDPOINT3_AZALIA_F0_CODEC_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0endpoint4_dispdec
+// base address: 0x60
+#define mmAZF0ENDPOINT4_AZALIA_F0_CODEC_ENDPOINT_INDEX 0x039e
+#define mmAZF0ENDPOINT4_AZALIA_F0_CODEC_ENDPOINT_INDEX_BASE_IDX 2
+#define mmAZF0ENDPOINT4_AZALIA_F0_CODEC_ENDPOINT_DATA 0x039f
+#define mmAZF0ENDPOINT4_AZALIA_F0_CODEC_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0endpoint5_dispdec
+// base address: 0x78
+#define mmAZF0ENDPOINT5_AZALIA_F0_CODEC_ENDPOINT_INDEX 0x03a4
+#define mmAZF0ENDPOINT5_AZALIA_F0_CODEC_ENDPOINT_INDEX_BASE_IDX 2
+#define mmAZF0ENDPOINT5_AZALIA_F0_CODEC_ENDPOINT_DATA 0x03a5
+#define mmAZF0ENDPOINT5_AZALIA_F0_CODEC_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0endpoint6_dispdec
+// base address: 0x90
+#define mmAZF0ENDPOINT6_AZALIA_F0_CODEC_ENDPOINT_INDEX 0x03aa
+#define mmAZF0ENDPOINT6_AZALIA_F0_CODEC_ENDPOINT_INDEX_BASE_IDX 2
+#define mmAZF0ENDPOINT6_AZALIA_F0_CODEC_ENDPOINT_DATA 0x03ab
+#define mmAZF0ENDPOINT6_AZALIA_F0_CODEC_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0endpoint7_dispdec
+// base address: 0xa8
+#define mmAZF0ENDPOINT7_AZALIA_F0_CODEC_ENDPOINT_INDEX 0x03b0
+#define mmAZF0ENDPOINT7_AZALIA_F0_CODEC_ENDPOINT_INDEX_BASE_IDX 2
+#define mmAZF0ENDPOINT7_AZALIA_F0_CODEC_ENDPOINT_DATA 0x03b1
+#define mmAZF0ENDPOINT7_AZALIA_F0_CODEC_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0controller_dispdec
+// base address: 0x0
+#define mmAZALIA_CONTROLLER_CLOCK_GATING 0x03c2
+#define mmAZALIA_CONTROLLER_CLOCK_GATING_BASE_IDX 2
+#define mmAZALIA_AUDIO_DTO 0x03c3
+#define mmAZALIA_AUDIO_DTO_BASE_IDX 2
+#define mmAZALIA_AUDIO_DTO_CONTROL 0x03c4
+#define mmAZALIA_AUDIO_DTO_CONTROL_BASE_IDX 2
+#define mmAZALIA_SOCCLK_CONTROL 0x03c5
+#define mmAZALIA_SOCCLK_CONTROL_BASE_IDX 2
+#define mmAZALIA_UNDERFLOW_FILLER_SAMPLE 0x03c6
+#define mmAZALIA_UNDERFLOW_FILLER_SAMPLE_BASE_IDX 2
+#define mmAZALIA_DATA_DMA_CONTROL 0x03c7
+#define mmAZALIA_DATA_DMA_CONTROL_BASE_IDX 2
+#define mmAZALIA_BDL_DMA_CONTROL 0x03c8
+#define mmAZALIA_BDL_DMA_CONTROL_BASE_IDX 2
+#define mmAZALIA_RIRB_AND_DP_CONTROL 0x03c9
+#define mmAZALIA_RIRB_AND_DP_CONTROL_BASE_IDX 2
+#define mmAZALIA_CORB_DMA_CONTROL 0x03ca
+#define mmAZALIA_CORB_DMA_CONTROL_BASE_IDX 2
+#define mmAZALIA_APPLICATION_POSITION_IN_CYCLIC_BUFFER 0x03d1
+#define mmAZALIA_APPLICATION_POSITION_IN_CYCLIC_BUFFER_BASE_IDX 2
+#define mmAZALIA_CYCLIC_BUFFER_SYNC 0x03d2
+#define mmAZALIA_CYCLIC_BUFFER_SYNC_BASE_IDX 2
+#define mmAZALIA_GLOBAL_CAPABILITIES 0x03d3
+#define mmAZALIA_GLOBAL_CAPABILITIES_BASE_IDX 2
+#define mmAZALIA_OUTPUT_PAYLOAD_CAPABILITY 0x03d4
+#define mmAZALIA_OUTPUT_PAYLOAD_CAPABILITY_BASE_IDX 2
+#define mmAZALIA_OUTPUT_STREAM_ARBITER_CONTROL 0x03d5
+#define mmAZALIA_OUTPUT_STREAM_ARBITER_CONTROL_BASE_IDX 2
+#define mmAZALIA_INPUT_PAYLOAD_CAPABILITY 0x03d6
+#define mmAZALIA_INPUT_PAYLOAD_CAPABILITY_BASE_IDX 2
+#define mmAZALIA_INPUT_CRC0_CONTROL0 0x03d9
+#define mmAZALIA_INPUT_CRC0_CONTROL0_BASE_IDX 2
+#define mmAZALIA_INPUT_CRC0_CONTROL1 0x03da
+#define mmAZALIA_INPUT_CRC0_CONTROL1_BASE_IDX 2
+#define mmAZALIA_INPUT_CRC0_CONTROL2 0x03db
+#define mmAZALIA_INPUT_CRC0_CONTROL2_BASE_IDX 2
+#define mmAZALIA_INPUT_CRC0_CONTROL3 0x03dc
+#define mmAZALIA_INPUT_CRC0_CONTROL3_BASE_IDX 2
+#define mmAZALIA_INPUT_CRC0_RESULT 0x03dd
+#define mmAZALIA_INPUT_CRC0_RESULT_BASE_IDX 2
+#define mmAZALIA_INPUT_CRC1_CONTROL0 0x03de
+#define mmAZALIA_INPUT_CRC1_CONTROL0_BASE_IDX 2
+#define mmAZALIA_INPUT_CRC1_CONTROL1 0x03df
+#define mmAZALIA_INPUT_CRC1_CONTROL1_BASE_IDX 2
+#define mmAZALIA_INPUT_CRC1_CONTROL2 0x03e0
+#define mmAZALIA_INPUT_CRC1_CONTROL2_BASE_IDX 2
+#define mmAZALIA_INPUT_CRC1_CONTROL3 0x03e1
+#define mmAZALIA_INPUT_CRC1_CONTROL3_BASE_IDX 2
+#define mmAZALIA_INPUT_CRC1_RESULT 0x03e2
+#define mmAZALIA_INPUT_CRC1_RESULT_BASE_IDX 2
+#define mmAZALIA_CRC0_CONTROL0 0x03e3
+#define mmAZALIA_CRC0_CONTROL0_BASE_IDX 2
+#define mmAZALIA_CRC0_CONTROL1 0x03e4
+#define mmAZALIA_CRC0_CONTROL1_BASE_IDX 2
+#define mmAZALIA_CRC0_CONTROL2 0x03e5
+#define mmAZALIA_CRC0_CONTROL2_BASE_IDX 2
+#define mmAZALIA_CRC0_CONTROL3 0x03e6
+#define mmAZALIA_CRC0_CONTROL3_BASE_IDX 2
+#define mmAZALIA_CRC0_RESULT 0x03e7
+#define mmAZALIA_CRC0_RESULT_BASE_IDX 2
+#define mmAZALIA_CRC1_CONTROL0 0x03e8
+#define mmAZALIA_CRC1_CONTROL0_BASE_IDX 2
+#define mmAZALIA_CRC1_CONTROL1 0x03e9
+#define mmAZALIA_CRC1_CONTROL1_BASE_IDX 2
+#define mmAZALIA_CRC1_CONTROL2 0x03ea
+#define mmAZALIA_CRC1_CONTROL2_BASE_IDX 2
+#define mmAZALIA_CRC1_CONTROL3 0x03eb
+#define mmAZALIA_CRC1_CONTROL3_BASE_IDX 2
+#define mmAZALIA_CRC1_RESULT 0x03ec
+#define mmAZALIA_CRC1_RESULT_BASE_IDX 2
+#define mmAZALIA_MEM_PWR_CTRL 0x03ee
+#define mmAZALIA_MEM_PWR_CTRL_BASE_IDX 2
+#define mmAZALIA_MEM_PWR_STATUS 0x03ef
+#define mmAZALIA_MEM_PWR_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0root_dispdec
+// base address: 0x0
+#define mmAZALIA_F0_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID 0x0406
+#define mmAZALIA_F0_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID_BASE_IDX 2
+#define mmAZALIA_F0_CODEC_ROOT_PARAMETER_REVISION_ID 0x0407
+#define mmAZALIA_F0_CODEC_ROOT_PARAMETER_REVISION_ID_BASE_IDX 2
+#define mmAZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL 0x0408
+#define mmAZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL_BASE_IDX 2
+#define mmAZALIA_F0_CODEC_RESYNC_FIFO_CONTROL 0x0409
+#define mmAZALIA_F0_CODEC_RESYNC_FIFO_CONTROL_BASE_IDX 2
+#define mmAZALIA_F0_CODEC_FUNCTION_PARAMETER_GROUP_TYPE 0x040a
+#define mmAZALIA_F0_CODEC_FUNCTION_PARAMETER_GROUP_TYPE_BASE_IDX 2
+#define mmAZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES 0x040b
+#define mmAZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES_BASE_IDX 2
+#define mmAZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS 0x040c
+#define mmAZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS_BASE_IDX 2
+#define mmAZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES 0x040d
+#define mmAZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES_BASE_IDX 2
+#define mmAZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE 0x040e
+#define mmAZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE_BASE_IDX 2
+#define mmAZALIA_F0_CODEC_FUNCTION_CONTROL_RESET 0x040f
+#define mmAZALIA_F0_CODEC_FUNCTION_CONTROL_RESET_BASE_IDX 2
+#define mmAZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID 0x0410
+#define mmAZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_BASE_IDX 2
+#define mmAZALIA_F0_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION 0x0411
+#define mmAZALIA_F0_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION_BASE_IDX 2
+#define mmCC_RCU_DC_AUDIO_PORT_CONNECTIVITY 0x0412
+#define mmCC_RCU_DC_AUDIO_PORT_CONNECTIVITY_BASE_IDX 2
+#define mmCC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY 0x0413
+#define mmCC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY_BASE_IDX 2
+#define mmAZALIA_F0_GTC_GROUP_OFFSET0 0x0415
+#define mmAZALIA_F0_GTC_GROUP_OFFSET0_BASE_IDX 2
+#define mmAZALIA_F0_GTC_GROUP_OFFSET1 0x0416
+#define mmAZALIA_F0_GTC_GROUP_OFFSET1_BASE_IDX 2
+#define mmAZALIA_F0_GTC_GROUP_OFFSET2 0x0417
+#define mmAZALIA_F0_GTC_GROUP_OFFSET2_BASE_IDX 2
+#define mmAZALIA_F0_GTC_GROUP_OFFSET3 0x0418
+#define mmAZALIA_F0_GTC_GROUP_OFFSET3_BASE_IDX 2
+#define mmAZALIA_F0_GTC_GROUP_OFFSET4 0x0419
+#define mmAZALIA_F0_GTC_GROUP_OFFSET4_BASE_IDX 2
+#define mmAZALIA_F0_GTC_GROUP_OFFSET5 0x041a
+#define mmAZALIA_F0_GTC_GROUP_OFFSET5_BASE_IDX 2
+#define mmAZALIA_F0_GTC_GROUP_OFFSET6 0x041b
+#define mmAZALIA_F0_GTC_GROUP_OFFSET6_BASE_IDX 2
+#define mmREG_DC_AUDIO_PORT_CONNECTIVITY 0x041c
+#define mmREG_DC_AUDIO_PORT_CONNECTIVITY_BASE_IDX 2
+#define mmREG_DC_AUDIO_INPUT_PORT_CONNECTIVITY 0x041d
+#define mmREG_DC_AUDIO_INPUT_PORT_CONNECTIVITY_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0stream8_dispdec
+// base address: 0x320
+#define mmAZF0STREAM8_AZALIA_STREAM_INDEX 0x0426
+#define mmAZF0STREAM8_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define mmAZF0STREAM8_AZALIA_STREAM_DATA 0x0427
+#define mmAZF0STREAM8_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0stream9_dispdec
+// base address: 0x328
+#define mmAZF0STREAM9_AZALIA_STREAM_INDEX 0x0428
+#define mmAZF0STREAM9_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define mmAZF0STREAM9_AZALIA_STREAM_DATA 0x0429
+#define mmAZF0STREAM9_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0stream10_dispdec
+// base address: 0x330
+#define mmAZF0STREAM10_AZALIA_STREAM_INDEX 0x042a
+#define mmAZF0STREAM10_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define mmAZF0STREAM10_AZALIA_STREAM_DATA 0x042b
+#define mmAZF0STREAM10_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0stream11_dispdec
+// base address: 0x338
+#define mmAZF0STREAM11_AZALIA_STREAM_INDEX 0x042c
+#define mmAZF0STREAM11_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define mmAZF0STREAM11_AZALIA_STREAM_DATA 0x042d
+#define mmAZF0STREAM11_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0stream12_dispdec
+// base address: 0x340
+#define mmAZF0STREAM12_AZALIA_STREAM_INDEX 0x042e
+#define mmAZF0STREAM12_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define mmAZF0STREAM12_AZALIA_STREAM_DATA 0x042f
+#define mmAZF0STREAM12_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0stream13_dispdec
+// base address: 0x348
+#define mmAZF0STREAM13_AZALIA_STREAM_INDEX 0x0430
+#define mmAZF0STREAM13_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define mmAZF0STREAM13_AZALIA_STREAM_DATA 0x0431
+#define mmAZF0STREAM13_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0stream14_dispdec
+// base address: 0x350
+#define mmAZF0STREAM14_AZALIA_STREAM_INDEX 0x0432
+#define mmAZF0STREAM14_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define mmAZF0STREAM14_AZALIA_STREAM_DATA 0x0433
+#define mmAZF0STREAM14_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0stream15_dispdec
+// base address: 0x358
+#define mmAZF0STREAM15_AZALIA_STREAM_INDEX 0x0434
+#define mmAZF0STREAM15_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define mmAZF0STREAM15_AZALIA_STREAM_DATA 0x0435
+#define mmAZF0STREAM15_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0inputendpoint0_dispdec
+// base address: 0x0
+#define mmAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX 0x043a
+#define mmAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX_BASE_IDX 2
+#define mmAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA 0x043b
+#define mmAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0inputendpoint1_dispdec
+// base address: 0x10
+#define mmAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX 0x043e
+#define mmAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX_BASE_IDX 2
+#define mmAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA 0x043f
+#define mmAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0inputendpoint2_dispdec
+// base address: 0x20
+#define mmAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX 0x0442
+#define mmAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX_BASE_IDX 2
+#define mmAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA 0x0443
+#define mmAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0inputendpoint3_dispdec
+// base address: 0x30
+#define mmAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX 0x0446
+#define mmAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX_BASE_IDX 2
+#define mmAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA 0x0447
+#define mmAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0inputendpoint4_dispdec
+// base address: 0x40
+#define mmAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX 0x044a
+#define mmAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX_BASE_IDX 2
+#define mmAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA 0x044b
+#define mmAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0inputendpoint5_dispdec
+// base address: 0x50
+#define mmAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX 0x044e
+#define mmAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX_BASE_IDX 2
+#define mmAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA 0x044f
+#define mmAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0inputendpoint6_dispdec
+// base address: 0x60
+#define mmAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX 0x0452
+#define mmAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX_BASE_IDX 2
+#define mmAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA 0x0453
+#define mmAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0inputendpoint7_dispdec
+// base address: 0x70
+#define mmAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX 0x0456
+#define mmAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX_BASE_IDX 2
+#define mmAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA 0x0457
+#define mmAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dchubbub_hubbub_sdpif_dispdec
+// base address: 0x0
+#define mmDCHUBBUB_SDPIF_CFG0 0x048f
+#define mmDCHUBBUB_SDPIF_CFG0_BASE_IDX 2
+#define mmVM_REQUEST_PHYSICAL 0x0490
+#define mmVM_REQUEST_PHYSICAL_BASE_IDX 2
+#define mmDCHUBBUB_FORCE_IO_STATUS_0 0x0491
+#define mmDCHUBBUB_FORCE_IO_STATUS_0_BASE_IDX 2
+#define mmDCHUBBUB_FORCE_IO_STATUS_1 0x0492
+#define mmDCHUBBUB_FORCE_IO_STATUS_1_BASE_IDX 2
+#define mmDCN_VM_FB_LOCATION_BASE 0x0493
+#define mmDCN_VM_FB_LOCATION_BASE_BASE_IDX 2
+#define mmDCN_VM_FB_LOCATION_TOP 0x0494
+#define mmDCN_VM_FB_LOCATION_TOP_BASE_IDX 2
+#define mmDCN_VM_FB_OFFSET 0x0495
+#define mmDCN_VM_FB_OFFSET_BASE_IDX 2
+#define mmDCN_VM_AGP_BOT 0x0496
+#define mmDCN_VM_AGP_BOT_BASE_IDX 2
+#define mmDCN_VM_AGP_TOP 0x0497
+#define mmDCN_VM_AGP_TOP_BASE_IDX 2
+#define mmDCN_VM_AGP_BASE 0x0498
+#define mmDCN_VM_AGP_BASE_BASE_IDX 2
+#define mmDCN_VM_LOCAL_HBM_ADDRESS_START 0x0499
+#define mmDCN_VM_LOCAL_HBM_ADDRESS_START_BASE_IDX 2
+#define mmDCN_VM_LOCAL_HBM_ADDRESS_END 0x049a
+#define mmDCN_VM_LOCAL_HBM_ADDRESS_END_BASE_IDX 2
+#define mmDCN_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL 0x049b
+#define mmDCN_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL_BASE_IDX 2
+#define mmDCHUBBUB_SDPIF_PIPE_SEC_LVL 0x04b8
+#define mmDCHUBBUB_SDPIF_PIPE_SEC_LVL_BASE_IDX 2
+#define mmDCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL 0x04b9
+#define mmDCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL_BASE_IDX 2
+#define mmDCHUBBUB_SDPIF_MEM_PWR_CTRL 0x04ba
+#define mmDCHUBBUB_SDPIF_MEM_PWR_CTRL_BASE_IDX 2
+#define mmDCHUBBUB_SDPIF_MEM_PWR_STATUS 0x04bb
+#define mmDCHUBBUB_SDPIF_MEM_PWR_STATUS_BASE_IDX 2
+#define mmDCHUBBUB_SDPIF_CFG1 0x04bf
+#define mmDCHUBBUB_SDPIF_CFG1_BASE_IDX 2
+#define mmDCHUBBUB_SDPIF_CFG2 0x04c0
+#define mmDCHUBBUB_SDPIF_CFG2_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dchubbub_hubbub_ret_path_dispdec
+// base address: 0x0
+#define mmDCHUBBUB_RET_PATH_DCC_CFG 0x04cf
+#define mmDCHUBBUB_RET_PATH_DCC_CFG_BASE_IDX 2
+#define mmDCHUBBUB_RET_PATH_DCC_CFG0_0 0x04d0
+#define mmDCHUBBUB_RET_PATH_DCC_CFG0_0_BASE_IDX 2
+#define mmDCHUBBUB_RET_PATH_DCC_CFG0_1 0x04d1
+#define mmDCHUBBUB_RET_PATH_DCC_CFG0_1_BASE_IDX 2
+#define mmDCHUBBUB_RET_PATH_DCC_CFG1_0 0x04d2
+#define mmDCHUBBUB_RET_PATH_DCC_CFG1_0_BASE_IDX 2
+#define mmDCHUBBUB_RET_PATH_DCC_CFG1_1 0x04d3
+#define mmDCHUBBUB_RET_PATH_DCC_CFG1_1_BASE_IDX 2
+#define mmDCHUBBUB_RET_PATH_DCC_CFG2_0 0x04d4
+#define mmDCHUBBUB_RET_PATH_DCC_CFG2_0_BASE_IDX 2
+#define mmDCHUBBUB_RET_PATH_DCC_CFG2_1 0x04d5
+#define mmDCHUBBUB_RET_PATH_DCC_CFG2_1_BASE_IDX 2
+#define mmDCHUBBUB_RET_PATH_DCC_CFG3_0 0x04d6
+#define mmDCHUBBUB_RET_PATH_DCC_CFG3_0_BASE_IDX 2
+#define mmDCHUBBUB_RET_PATH_DCC_CFG3_1 0x04d7
+#define mmDCHUBBUB_RET_PATH_DCC_CFG3_1_BASE_IDX 2
+#define mmDCHUBBUB_RET_PATH_DCC_CFG4_0 0x04d8
+#define mmDCHUBBUB_RET_PATH_DCC_CFG4_0_BASE_IDX 2
+#define mmDCHUBBUB_RET_PATH_DCC_CFG4_1 0x04d9
+#define mmDCHUBBUB_RET_PATH_DCC_CFG4_1_BASE_IDX 2
+#define mmDCHUBBUB_RET_PATH_DCC_CFG5_0 0x04da
+#define mmDCHUBBUB_RET_PATH_DCC_CFG5_0_BASE_IDX 2
+#define mmDCHUBBUB_RET_PATH_DCC_CFG5_1 0x04db
+#define mmDCHUBBUB_RET_PATH_DCC_CFG5_1_BASE_IDX 2
+#define mmDCHUBBUB_RET_PATH_DCC_CFG6_0 0x04dc
+#define mmDCHUBBUB_RET_PATH_DCC_CFG6_0_BASE_IDX 2
+#define mmDCHUBBUB_RET_PATH_DCC_CFG6_1 0x04dd
+#define mmDCHUBBUB_RET_PATH_DCC_CFG6_1_BASE_IDX 2
+#define mmDCHUBBUB_RET_PATH_DCC_CFG7_0 0x04de
+#define mmDCHUBBUB_RET_PATH_DCC_CFG7_0_BASE_IDX 2
+#define mmDCHUBBUB_RET_PATH_DCC_CFG7_1 0x04df
+#define mmDCHUBBUB_RET_PATH_DCC_CFG7_1_BASE_IDX 2
+#define mmDCHUBBUB_RET_PATH_MEM_PWR_CTRL 0x04ef
+#define mmDCHUBBUB_RET_PATH_MEM_PWR_CTRL_BASE_IDX 2
+#define mmDCHUBBUB_RET_PATH_MEM_PWR_STATUS 0x04f0
+#define mmDCHUBBUB_RET_PATH_MEM_PWR_STATUS_BASE_IDX 2
+#define mmDCHUBBUB_CRC_CTRL 0x04f1
+#define mmDCHUBBUB_CRC_CTRL_BASE_IDX 2
+#define mmDCHUBBUB_CRC0_VAL_R_G 0x04f2
+#define mmDCHUBBUB_CRC0_VAL_R_G_BASE_IDX 2
+#define mmDCHUBBUB_CRC0_VAL_B_A 0x04f3
+#define mmDCHUBBUB_CRC0_VAL_B_A_BASE_IDX 2
+#define mmDCHUBBUB_CRC1_VAL_R_G 0x04f4
+#define mmDCHUBBUB_CRC1_VAL_R_G_BASE_IDX 2
+#define mmDCHUBBUB_CRC1_VAL_B_A 0x04f5
+#define mmDCHUBBUB_CRC1_VAL_B_A_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dchubbub_hubbub_dispdec
+// base address: 0x0
+#define mmDCHUBBUB_ARB_DF_REQ_OUTSTAND 0x0505
+#define mmDCHUBBUB_ARB_DF_REQ_OUTSTAND_BASE_IDX 2
+#define mmDCHUBBUB_ARB_SAT_LEVEL 0x0506
+#define mmDCHUBBUB_ARB_SAT_LEVEL_BASE_IDX 2
+#define mmDCHUBBUB_ARB_QOS_FORCE 0x0507
+#define mmDCHUBBUB_ARB_QOS_FORCE_BASE_IDX 2
+#define mmDCHUBBUB_ARB_DRAM_STATE_CNTL 0x0508
+#define mmDCHUBBUB_ARB_DRAM_STATE_CNTL_BASE_IDX 2
+#define mmDCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A 0x0509
+#define mmDCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A_BASE_IDX 2
+#define mmDCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A 0x050a
+#define mmDCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A_BASE_IDX 2
+#define mmDCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A 0x050b
+#define mmDCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A_BASE_IDX 2
+#define mmDCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A 0x050c
+#define mmDCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A_BASE_IDX 2
+#define mmDCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A 0x050d
+#define mmDCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A_BASE_IDX 2
+#define mmDCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B 0x050e
+#define mmDCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B_BASE_IDX 2
+#define mmDCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B 0x050f
+#define mmDCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B_BASE_IDX 2
+#define mmDCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B 0x0510
+#define mmDCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B_BASE_IDX 2
+#define mmDCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B 0x0511
+#define mmDCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B_BASE_IDX 2
+#define mmDCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B 0x0512
+#define mmDCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B_BASE_IDX 2
+#define mmDCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C 0x0513
+#define mmDCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C_BASE_IDX 2
+#define mmDCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C 0x0514
+#define mmDCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C_BASE_IDX 2
+#define mmDCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C 0x0515
+#define mmDCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C_BASE_IDX 2
+#define mmDCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C 0x0516
+#define mmDCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C_BASE_IDX 2
+#define mmDCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C 0x0517
+#define mmDCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C_BASE_IDX 2
+#define mmDCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D 0x0518
+#define mmDCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D_BASE_IDX 2
+#define mmDCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D 0x0519
+#define mmDCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D_BASE_IDX 2
+#define mmDCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D 0x051a
+#define mmDCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D_BASE_IDX 2
+#define mmDCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D 0x051b
+#define mmDCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D_BASE_IDX 2
+#define mmDCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D 0x051c
+#define mmDCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D_BASE_IDX 2
+#define mmDCHUBBUB_ARB_WATERMARK_CHANGE_CNTL 0x051d
+#define mmDCHUBBUB_ARB_WATERMARK_CHANGE_CNTL_BASE_IDX 2
+#define mmDCHUBBUB_ARB_TIMEOUT_ENABLE 0x051e
+#define mmDCHUBBUB_ARB_TIMEOUT_ENABLE_BASE_IDX 2
+#define mmDCHUBBUB_GLOBAL_TIMER_CNTL 0x051f
+#define mmDCHUBBUB_GLOBAL_TIMER_CNTL_BASE_IDX 2
+#define mmSURFACE_CHECK0_ADDRESS_LSB 0x0520
+#define mmSURFACE_CHECK0_ADDRESS_LSB_BASE_IDX 2
+#define mmSURFACE_CHECK0_ADDRESS_MSB 0x0521
+#define mmSURFACE_CHECK0_ADDRESS_MSB_BASE_IDX 2
+#define mmSURFACE_CHECK1_ADDRESS_LSB 0x0522
+#define mmSURFACE_CHECK1_ADDRESS_LSB_BASE_IDX 2
+#define mmSURFACE_CHECK1_ADDRESS_MSB 0x0523
+#define mmSURFACE_CHECK1_ADDRESS_MSB_BASE_IDX 2
+#define mmSURFACE_CHECK2_ADDRESS_LSB 0x0524
+#define mmSURFACE_CHECK2_ADDRESS_LSB_BASE_IDX 2
+#define mmSURFACE_CHECK2_ADDRESS_MSB 0x0525
+#define mmSURFACE_CHECK2_ADDRESS_MSB_BASE_IDX 2
+#define mmSURFACE_CHECK3_ADDRESS_LSB 0x0526
+#define mmSURFACE_CHECK3_ADDRESS_LSB_BASE_IDX 2
+#define mmSURFACE_CHECK3_ADDRESS_MSB 0x0527
+#define mmSURFACE_CHECK3_ADDRESS_MSB_BASE_IDX 2
+#define mmVTG0_CONTROL 0x0528
+#define mmVTG0_CONTROL_BASE_IDX 2
+#define mmVTG1_CONTROL 0x0529
+#define mmVTG1_CONTROL_BASE_IDX 2
+#define mmVTG2_CONTROL 0x052a
+#define mmVTG2_CONTROL_BASE_IDX 2
+#define mmVTG3_CONTROL 0x052b
+#define mmVTG3_CONTROL_BASE_IDX 2
+#define mmDCHUBBUB_SOFT_RESET 0x052e
+#define mmDCHUBBUB_SOFT_RESET_BASE_IDX 2
+#define mmDCHUBBUB_CLOCK_CNTL 0x052f
+#define mmDCHUBBUB_CLOCK_CNTL_BASE_IDX 2
+#define mmDCFCLK_CNTL 0x0530
+#define mmDCFCLK_CNTL_BASE_IDX 2
+#define mmDCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL 0x0531
+#define mmDCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL_BASE_IDX 2
+#define mmDCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL2 0x0532
+#define mmDCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL2_BASE_IDX 2
+#define mmDCHUBBUB_VLINE_SNAPSHOT 0x0533
+#define mmDCHUBBUB_VLINE_SNAPSHOT_BASE_IDX 2
+#define mmDCHUBBUB_CTRL_STATUS 0x0534
+#define mmDCHUBBUB_CTRL_STATUS_BASE_IDX 2
+#define mmDCHUBBUB_TIMEOUT_DETECTION_CTRL1 0x053a
+#define mmDCHUBBUB_TIMEOUT_DETECTION_CTRL1_BASE_IDX 2
+#define mmDCHUBBUB_TIMEOUT_DETECTION_CTRL2 0x053b
+#define mmDCHUBBUB_TIMEOUT_DETECTION_CTRL2_BASE_IDX 2
+#define mmDCHUBBUB_TIMEOUT_INTERRUPT_STATUS 0x053c
+#define mmDCHUBBUB_TIMEOUT_INTERRUPT_STATUS_BASE_IDX 2
+#define mmDCHUBBUB_TEST_DEBUG_INDEX 0x053d
+#define mmDCHUBBUB_TEST_DEBUG_INDEX_BASE_IDX 2
+#define mmDCHUBBUB_TEST_DEBUG_DATA 0x053e
+#define mmDCHUBBUB_TEST_DEBUG_DATA_BASE_IDX 2
+#define mmDCHUBBUB_ARB_FRAC_URG_BW_NOM_A 0x053f
+#define mmDCHUBBUB_ARB_FRAC_URG_BW_NOM_A_BASE_IDX 2
+#define mmDCHUBBUB_ARB_FRAC_URG_BW_FLIP_A 0x0540
+#define mmDCHUBBUB_ARB_FRAC_URG_BW_FLIP_A_BASE_IDX 2
+#define mmDCHUBBUB_ARB_FRAC_URG_BW_NOM_B 0x0541
+#define mmDCHUBBUB_ARB_FRAC_URG_BW_NOM_B_BASE_IDX 2
+#define mmDCHUBBUB_ARB_FRAC_URG_BW_FLIP_B 0x0542
+#define mmDCHUBBUB_ARB_FRAC_URG_BW_FLIP_B_BASE_IDX 2
+#define mmDCHUBBUB_ARB_FRAC_URG_BW_NOM_C 0x0543
+#define mmDCHUBBUB_ARB_FRAC_URG_BW_NOM_C_BASE_IDX 2
+#define mmDCHUBBUB_ARB_FRAC_URG_BW_FLIP_C 0x0544
+#define mmDCHUBBUB_ARB_FRAC_URG_BW_FLIP_C_BASE_IDX 2
+#define mmDCHUBBUB_ARB_FRAC_URG_BW_NOM_D 0x0545
+#define mmDCHUBBUB_ARB_FRAC_URG_BW_NOM_D_BASE_IDX 2
+#define mmDCHUBBUB_ARB_FRAC_URG_BW_FLIP_D 0x0546
+#define mmDCHUBBUB_ARB_FRAC_URG_BW_FLIP_D_BASE_IDX 2
+#define mmDCHUBBUB_ARB_HOSTVM_CNTL 0x0547
+#define mmDCHUBBUB_ARB_HOSTVM_CNTL_BASE_IDX 2
+#define mmFMON_CTRL 0x0548
+#define mmFMON_CTRL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dchubbub_dchubbub_dcperfmon_dc_perfmon_dispdec
+// base address: 0x1534
+#define mmDC_PERFMON6_PERFCOUNTER_CNTL 0x054d
+#define mmDC_PERFMON6_PERFCOUNTER_CNTL_BASE_IDX 2
+#define mmDC_PERFMON6_PERFCOUNTER_CNTL2 0x054e
+#define mmDC_PERFMON6_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define mmDC_PERFMON6_PERFCOUNTER_STATE 0x054f
+#define mmDC_PERFMON6_PERFCOUNTER_STATE_BASE_IDX 2
+#define mmDC_PERFMON6_PERFMON_CNTL 0x0550
+#define mmDC_PERFMON6_PERFMON_CNTL_BASE_IDX 2
+#define mmDC_PERFMON6_PERFMON_CNTL2 0x0551
+#define mmDC_PERFMON6_PERFMON_CNTL2_BASE_IDX 2
+#define mmDC_PERFMON6_PERFMON_CVALUE_INT_MISC 0x0552
+#define mmDC_PERFMON6_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define mmDC_PERFMON6_PERFMON_CVALUE_LOW 0x0553
+#define mmDC_PERFMON6_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define mmDC_PERFMON6_PERFMON_HI 0x0554
+#define mmDC_PERFMON6_PERFMON_HI_BASE_IDX 2
+#define mmDC_PERFMON6_PERFMON_LOW 0x0555
+#define mmDC_PERFMON6_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dchubbub_hubbub_vmrq_if_dispdec
+// base address: 0x0
+#define mmDCN_VM_CONTEXT0_CNTL 0x0559
+#define mmDCN_VM_CONTEXT0_CNTL_BASE_IDX 2
+#define mmDCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32 0x055a
+#define mmDCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32 0x055b
+#define mmDCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32 0x055c
+#define mmDCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32 0x055d
+#define mmDCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32 0x055e
+#define mmDCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32 0x055f
+#define mmDCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT1_CNTL 0x0560
+#define mmDCN_VM_CONTEXT1_CNTL_BASE_IDX 2
+#define mmDCN_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32 0x0561
+#define mmDCN_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 0x0562
+#define mmDCN_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32 0x0563
+#define mmDCN_VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32 0x0564
+#define mmDCN_VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32 0x0565
+#define mmDCN_VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32 0x0566
+#define mmDCN_VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT2_CNTL 0x0567
+#define mmDCN_VM_CONTEXT2_CNTL_BASE_IDX 2
+#define mmDCN_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32 0x0568
+#define mmDCN_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32 0x0569
+#define mmDCN_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32 0x056a
+#define mmDCN_VM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32 0x056b
+#define mmDCN_VM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32 0x056c
+#define mmDCN_VM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32 0x056d
+#define mmDCN_VM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT3_CNTL 0x056e
+#define mmDCN_VM_CONTEXT3_CNTL_BASE_IDX 2
+#define mmDCN_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32 0x056f
+#define mmDCN_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32 0x0570
+#define mmDCN_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32 0x0571
+#define mmDCN_VM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32 0x0572
+#define mmDCN_VM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32 0x0573
+#define mmDCN_VM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32 0x0574
+#define mmDCN_VM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT4_CNTL 0x0575
+#define mmDCN_VM_CONTEXT4_CNTL_BASE_IDX 2
+#define mmDCN_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32 0x0576
+#define mmDCN_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32 0x0577
+#define mmDCN_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32 0x0578
+#define mmDCN_VM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32 0x0579
+#define mmDCN_VM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32 0x057a
+#define mmDCN_VM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32 0x057b
+#define mmDCN_VM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT5_CNTL 0x057c
+#define mmDCN_VM_CONTEXT5_CNTL_BASE_IDX 2
+#define mmDCN_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32 0x057d
+#define mmDCN_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32 0x057e
+#define mmDCN_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32 0x057f
+#define mmDCN_VM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32 0x0580
+#define mmDCN_VM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32 0x0581
+#define mmDCN_VM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32 0x0582
+#define mmDCN_VM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT6_CNTL 0x0583
+#define mmDCN_VM_CONTEXT6_CNTL_BASE_IDX 2
+#define mmDCN_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32 0x0584
+#define mmDCN_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32 0x0585
+#define mmDCN_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32 0x0586
+#define mmDCN_VM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32 0x0587
+#define mmDCN_VM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32 0x0588
+#define mmDCN_VM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32 0x0589
+#define mmDCN_VM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT7_CNTL 0x058a
+#define mmDCN_VM_CONTEXT7_CNTL_BASE_IDX 2
+#define mmDCN_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32 0x058b
+#define mmDCN_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32 0x058c
+#define mmDCN_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32 0x058d
+#define mmDCN_VM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32 0x058e
+#define mmDCN_VM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32 0x058f
+#define mmDCN_VM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32 0x0590
+#define mmDCN_VM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT8_CNTL 0x0591
+#define mmDCN_VM_CONTEXT8_CNTL_BASE_IDX 2
+#define mmDCN_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32 0x0592
+#define mmDCN_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32 0x0593
+#define mmDCN_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32 0x0594
+#define mmDCN_VM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32 0x0595
+#define mmDCN_VM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32 0x0596
+#define mmDCN_VM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32 0x0597
+#define mmDCN_VM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT9_CNTL 0x0598
+#define mmDCN_VM_CONTEXT9_CNTL_BASE_IDX 2
+#define mmDCN_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32 0x0599
+#define mmDCN_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32 0x059a
+#define mmDCN_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32 0x059b
+#define mmDCN_VM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32 0x059c
+#define mmDCN_VM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32 0x059d
+#define mmDCN_VM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32 0x059e
+#define mmDCN_VM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT10_CNTL 0x059f
+#define mmDCN_VM_CONTEXT10_CNTL_BASE_IDX 2
+#define mmDCN_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32 0x05a0
+#define mmDCN_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32 0x05a1
+#define mmDCN_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32 0x05a2
+#define mmDCN_VM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32 0x05a3
+#define mmDCN_VM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32 0x05a4
+#define mmDCN_VM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32 0x05a5
+#define mmDCN_VM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT11_CNTL 0x05a6
+#define mmDCN_VM_CONTEXT11_CNTL_BASE_IDX 2
+#define mmDCN_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32 0x05a7
+#define mmDCN_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32 0x05a8
+#define mmDCN_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32 0x05a9
+#define mmDCN_VM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32 0x05aa
+#define mmDCN_VM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32 0x05ab
+#define mmDCN_VM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32 0x05ac
+#define mmDCN_VM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT12_CNTL 0x05ad
+#define mmDCN_VM_CONTEXT12_CNTL_BASE_IDX 2
+#define mmDCN_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32 0x05ae
+#define mmDCN_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32 0x05af
+#define mmDCN_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32 0x05b0
+#define mmDCN_VM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32 0x05b1
+#define mmDCN_VM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32 0x05b2
+#define mmDCN_VM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32 0x05b3
+#define mmDCN_VM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT13_CNTL 0x05b4
+#define mmDCN_VM_CONTEXT13_CNTL_BASE_IDX 2
+#define mmDCN_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32 0x05b5
+#define mmDCN_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32 0x05b6
+#define mmDCN_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32 0x05b7
+#define mmDCN_VM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32 0x05b8
+#define mmDCN_VM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32 0x05b9
+#define mmDCN_VM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32 0x05ba
+#define mmDCN_VM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT14_CNTL 0x05bb
+#define mmDCN_VM_CONTEXT14_CNTL_BASE_IDX 2
+#define mmDCN_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32 0x05bc
+#define mmDCN_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32 0x05bd
+#define mmDCN_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32 0x05be
+#define mmDCN_VM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32 0x05bf
+#define mmDCN_VM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32 0x05c0
+#define mmDCN_VM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32 0x05c1
+#define mmDCN_VM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT15_CNTL 0x05c2
+#define mmDCN_VM_CONTEXT15_CNTL_BASE_IDX 2
+#define mmDCN_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32 0x05c3
+#define mmDCN_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32 0x05c4
+#define mmDCN_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32 0x05c5
+#define mmDCN_VM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32 0x05c6
+#define mmDCN_VM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32 0x05c7
+#define mmDCN_VM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define mmDCN_VM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32 0x05c8
+#define mmDCN_VM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define mmDCN_VM_DEFAULT_ADDR_MSB 0x05c9
+#define mmDCN_VM_DEFAULT_ADDR_MSB_BASE_IDX 2
+#define mmDCN_VM_DEFAULT_ADDR_LSB 0x05ca
+#define mmDCN_VM_DEFAULT_ADDR_LSB_BASE_IDX 2
+#define mmDCN_VM_FAULT_CNTL 0x05cb
+#define mmDCN_VM_FAULT_CNTL_BASE_IDX 2
+#define mmDCN_VM_FAULT_STATUS 0x05cc
+#define mmDCN_VM_FAULT_STATUS_BASE_IDX 2
+#define mmDCN_VM_FAULT_ADDR_MSB 0x05cd
+#define mmDCN_VM_FAULT_ADDR_MSB_BASE_IDX 2
+#define mmDCN_VM_FAULT_ADDR_LSB 0x05ce
+#define mmDCN_VM_FAULT_ADDR_LSB_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp0_dispdec_hubp_dispdec
+// base address: 0x0
+#define mmHUBP0_DCSURF_SURFACE_CONFIG 0x05e5
+#define mmHUBP0_DCSURF_SURFACE_CONFIG_BASE_IDX 2
+#define mmHUBP0_DCSURF_ADDR_CONFIG 0x05e6
+#define mmHUBP0_DCSURF_ADDR_CONFIG_BASE_IDX 2
+#define mmHUBP0_DCSURF_TILING_CONFIG 0x05e7
+#define mmHUBP0_DCSURF_TILING_CONFIG_BASE_IDX 2
+#define mmHUBP0_DCSURF_PRI_VIEWPORT_START 0x05e9
+#define mmHUBP0_DCSURF_PRI_VIEWPORT_START_BASE_IDX 2
+#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x05ea
+#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
+#define mmHUBP0_DCSURF_PRI_VIEWPORT_START_C 0x05eb
+#define mmHUBP0_DCSURF_PRI_VIEWPORT_START_C_BASE_IDX 2
+#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C 0x05ec
+#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C_BASE_IDX 2
+#define mmHUBP0_DCSURF_SEC_VIEWPORT_START 0x05ed
+#define mmHUBP0_DCSURF_SEC_VIEWPORT_START_BASE_IDX 2
+#define mmHUBP0_DCSURF_SEC_VIEWPORT_DIMENSION 0x05ee
+#define mmHUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_BASE_IDX 2
+#define mmHUBP0_DCSURF_SEC_VIEWPORT_START_C 0x05ef
+#define mmHUBP0_DCSURF_SEC_VIEWPORT_START_C_BASE_IDX 2
+#define mmHUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C 0x05f0
+#define mmHUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C_BASE_IDX 2
+#define mmHUBP0_DCHUBP_REQ_SIZE_CONFIG 0x05f1
+#define mmHUBP0_DCHUBP_REQ_SIZE_CONFIG_BASE_IDX 2
+#define mmHUBP0_DCHUBP_REQ_SIZE_CONFIG_C 0x05f2
+#define mmHUBP0_DCHUBP_REQ_SIZE_CONFIG_C_BASE_IDX 2
+#define mmHUBP0_DCHUBP_CNTL 0x05f3
+#define mmHUBP0_DCHUBP_CNTL_BASE_IDX 2
+#define mmHUBP0_HUBP_CLK_CNTL 0x05f4
+#define mmHUBP0_HUBP_CLK_CNTL_BASE_IDX 2
+#define mmHUBP0_DCHUBP_VMPG_CONFIG 0x05f5
+#define mmHUBP0_DCHUBP_VMPG_CONFIG_BASE_IDX 2
+#define mmHUBP0_HUBPREQ_DEBUG_DB 0x05f6
+#define mmHUBP0_HUBPREQ_DEBUG_DB_BASE_IDX 2
+#define mmHUBP0_HUBPREQ_DEBUG 0x05f7
+#define mmHUBP0_HUBPREQ_DEBUG_BASE_IDX 2
+#define mmHUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK 0x05fb
+#define mmHUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK_BASE_IDX 2
+#define mmHUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK 0x05fc
+#define mmHUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp0_dispdec_hubpreq_dispdec
+// base address: 0x0
+#define mmHUBPREQ0_DCSURF_SURFACE_PITCH 0x0607
+#define mmHUBPREQ0_DCSURF_SURFACE_PITCH_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_SURFACE_PITCH_C 0x0608
+#define mmHUBPREQ0_DCSURF_SURFACE_PITCH_C_BASE_IDX 2
+#define mmHUBPREQ0_VMID_SETTINGS_0 0x0609
+#define mmHUBPREQ0_VMID_SETTINGS_0_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS 0x060a
+#define mmHUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH 0x060b
+#define mmHUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_C 0x060c
+#define mmHUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_C_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C 0x060d
+#define mmHUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS 0x060e
+#define mmHUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH 0x060f
+#define mmHUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_C 0x0610
+#define mmHUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_C_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C 0x0611
+#define mmHUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS 0x0612
+#define mmHUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH 0x0613
+#define mmHUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C 0x0614
+#define mmHUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C 0x0615
+#define mmHUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS 0x0616
+#define mmHUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH 0x0617
+#define mmHUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C 0x0618
+#define mmHUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C 0x0619
+#define mmHUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_SURFACE_CONTROL 0x061a
+#define mmHUBPREQ0_DCSURF_SURFACE_CONTROL_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_FLIP_CONTROL 0x061b
+#define mmHUBPREQ0_DCSURF_FLIP_CONTROL_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_FLIP_CONTROL2 0x061c
+#define mmHUBPREQ0_DCSURF_FLIP_CONTROL2_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT 0x0620
+#define mmHUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_SURFACE_INUSE 0x0621
+#define mmHUBPREQ0_DCSURF_SURFACE_INUSE_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_SURFACE_INUSE_HIGH 0x0622
+#define mmHUBPREQ0_DCSURF_SURFACE_INUSE_HIGH_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_SURFACE_INUSE_C 0x0623
+#define mmHUBPREQ0_DCSURF_SURFACE_INUSE_C_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_SURFACE_INUSE_HIGH_C 0x0624
+#define mmHUBPREQ0_DCSURF_SURFACE_INUSE_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE 0x0625
+#define mmHUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH 0x0626
+#define mmHUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_C 0x0627
+#define mmHUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_C_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C 0x0628
+#define mmHUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ0_DCN_EXPANSION_MODE 0x062c
+#define mmHUBPREQ0_DCN_EXPANSION_MODE_BASE_IDX 2
+#define mmHUBPREQ0_DCN_TTU_QOS_WM 0x062d
+#define mmHUBPREQ0_DCN_TTU_QOS_WM_BASE_IDX 2
+#define mmHUBPREQ0_DCN_GLOBAL_TTU_CNTL 0x062e
+#define mmHUBPREQ0_DCN_GLOBAL_TTU_CNTL_BASE_IDX 2
+#define mmHUBPREQ0_DCN_SURF0_TTU_CNTL0 0x062f
+#define mmHUBPREQ0_DCN_SURF0_TTU_CNTL0_BASE_IDX 2
+#define mmHUBPREQ0_DCN_SURF0_TTU_CNTL1 0x0630
+#define mmHUBPREQ0_DCN_SURF0_TTU_CNTL1_BASE_IDX 2
+#define mmHUBPREQ0_DCN_SURF1_TTU_CNTL0 0x0631
+#define mmHUBPREQ0_DCN_SURF1_TTU_CNTL0_BASE_IDX 2
+#define mmHUBPREQ0_DCN_SURF1_TTU_CNTL1 0x0632
+#define mmHUBPREQ0_DCN_SURF1_TTU_CNTL1_BASE_IDX 2
+#define mmHUBPREQ0_DCN_CUR0_TTU_CNTL0 0x0633
+#define mmHUBPREQ0_DCN_CUR0_TTU_CNTL0_BASE_IDX 2
+#define mmHUBPREQ0_DCN_CUR0_TTU_CNTL1 0x0634
+#define mmHUBPREQ0_DCN_CUR0_TTU_CNTL1_BASE_IDX 2
+#define mmHUBPREQ0_DCN_CUR1_TTU_CNTL0 0x0635
+#define mmHUBPREQ0_DCN_CUR1_TTU_CNTL0_BASE_IDX 2
+#define mmHUBPREQ0_DCN_CUR1_TTU_CNTL1 0x0636
+#define mmHUBPREQ0_DCN_CUR1_TTU_CNTL1_BASE_IDX 2
+#define mmHUBPREQ0_DCN_VM_SYSTEM_APERTURE_LOW_ADDR 0x0637
+#define mmHUBPREQ0_DCN_VM_SYSTEM_APERTURE_LOW_ADDR_BASE_IDX 2
+#define mmHUBPREQ0_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR 0x0638
+#define mmHUBPREQ0_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_BASE_IDX 2
+#define mmHUBPREQ0_DCN_VM_MX_L1_TLB_CNTL 0x0645
+#define mmHUBPREQ0_DCN_VM_MX_L1_TLB_CNTL_BASE_IDX 2
+#define mmHUBPREQ0_BLANK_OFFSET_0 0x0646
+#define mmHUBPREQ0_BLANK_OFFSET_0_BASE_IDX 2
+#define mmHUBPREQ0_BLANK_OFFSET_1 0x0647
+#define mmHUBPREQ0_BLANK_OFFSET_1_BASE_IDX 2
+#define mmHUBPREQ0_DST_DIMENSIONS 0x0648
+#define mmHUBPREQ0_DST_DIMENSIONS_BASE_IDX 2
+#define mmHUBPREQ0_DST_AFTER_SCALER 0x0649
+#define mmHUBPREQ0_DST_AFTER_SCALER_BASE_IDX 2
+#define mmHUBPREQ0_PREFETCH_SETTINGS 0x064a
+#define mmHUBPREQ0_PREFETCH_SETTINGS_BASE_IDX 2
+#define mmHUBPREQ0_PREFETCH_SETTINGS_C 0x064b
+#define mmHUBPREQ0_PREFETCH_SETTINGS_C_BASE_IDX 2
+#define mmHUBPREQ0_VBLANK_PARAMETERS_0 0x064c
+#define mmHUBPREQ0_VBLANK_PARAMETERS_0_BASE_IDX 2
+#define mmHUBPREQ0_VBLANK_PARAMETERS_1 0x064d
+#define mmHUBPREQ0_VBLANK_PARAMETERS_1_BASE_IDX 2
+#define mmHUBPREQ0_VBLANK_PARAMETERS_2 0x064e
+#define mmHUBPREQ0_VBLANK_PARAMETERS_2_BASE_IDX 2
+#define mmHUBPREQ0_VBLANK_PARAMETERS_3 0x064f
+#define mmHUBPREQ0_VBLANK_PARAMETERS_3_BASE_IDX 2
+#define mmHUBPREQ0_VBLANK_PARAMETERS_4 0x0650
+#define mmHUBPREQ0_VBLANK_PARAMETERS_4_BASE_IDX 2
+#define mmHUBPREQ0_FLIP_PARAMETERS_0 0x0651
+#define mmHUBPREQ0_FLIP_PARAMETERS_0_BASE_IDX 2
+#define mmHUBPREQ0_FLIP_PARAMETERS_1 0x0652
+#define mmHUBPREQ0_FLIP_PARAMETERS_1_BASE_IDX 2
+#define mmHUBPREQ0_FLIP_PARAMETERS_2 0x0653
+#define mmHUBPREQ0_FLIP_PARAMETERS_2_BASE_IDX 2
+#define mmHUBPREQ0_NOM_PARAMETERS_0 0x0654
+#define mmHUBPREQ0_NOM_PARAMETERS_0_BASE_IDX 2
+#define mmHUBPREQ0_NOM_PARAMETERS_1 0x0655
+#define mmHUBPREQ0_NOM_PARAMETERS_1_BASE_IDX 2
+#define mmHUBPREQ0_NOM_PARAMETERS_2 0x0656
+#define mmHUBPREQ0_NOM_PARAMETERS_2_BASE_IDX 2
+#define mmHUBPREQ0_NOM_PARAMETERS_3 0x0657
+#define mmHUBPREQ0_NOM_PARAMETERS_3_BASE_IDX 2
+#define mmHUBPREQ0_NOM_PARAMETERS_4 0x0658
+#define mmHUBPREQ0_NOM_PARAMETERS_4_BASE_IDX 2
+#define mmHUBPREQ0_NOM_PARAMETERS_5 0x0659
+#define mmHUBPREQ0_NOM_PARAMETERS_5_BASE_IDX 2
+#define mmHUBPREQ0_NOM_PARAMETERS_6 0x065a
+#define mmHUBPREQ0_NOM_PARAMETERS_6_BASE_IDX 2
+#define mmHUBPREQ0_NOM_PARAMETERS_7 0x065b
+#define mmHUBPREQ0_NOM_PARAMETERS_7_BASE_IDX 2
+#define mmHUBPREQ0_PER_LINE_DELIVERY_PRE 0x065c
+#define mmHUBPREQ0_PER_LINE_DELIVERY_PRE_BASE_IDX 2
+#define mmHUBPREQ0_PER_LINE_DELIVERY 0x065d
+#define mmHUBPREQ0_PER_LINE_DELIVERY_BASE_IDX 2
+#define mmHUBPREQ0_CURSOR_SETTINGS 0x065e
+#define mmHUBPREQ0_CURSOR_SETTINGS_BASE_IDX 2
+#define mmHUBPREQ0_REF_FREQ_TO_PIX_FREQ 0x065f
+#define mmHUBPREQ0_REF_FREQ_TO_PIX_FREQ_BASE_IDX 2
+#define mmHUBPREQ0_DST_Y_DELTA_DRQ_LIMIT 0x0660
+#define mmHUBPREQ0_DST_Y_DELTA_DRQ_LIMIT_BASE_IDX 2
+#define mmHUBPREQ0_HUBPREQ_MEM_PWR_CTRL 0x0661
+#define mmHUBPREQ0_HUBPREQ_MEM_PWR_CTRL_BASE_IDX 2
+#define mmHUBPREQ0_HUBPREQ_MEM_PWR_STATUS 0x0662
+#define mmHUBPREQ0_HUBPREQ_MEM_PWR_STATUS_BASE_IDX 2
+#define mmHUBPREQ0_VBLANK_PARAMETERS_5 0x0665
+#define mmHUBPREQ0_VBLANK_PARAMETERS_5_BASE_IDX 2
+#define mmHUBPREQ0_VBLANK_PARAMETERS_6 0x0666
+#define mmHUBPREQ0_VBLANK_PARAMETERS_6_BASE_IDX 2
+#define mmHUBPREQ0_FLIP_PARAMETERS_3 0x0667
+#define mmHUBPREQ0_FLIP_PARAMETERS_3_BASE_IDX 2
+#define mmHUBPREQ0_FLIP_PARAMETERS_4 0x0668
+#define mmHUBPREQ0_FLIP_PARAMETERS_4_BASE_IDX 2
+#define mmHUBPREQ0_FLIP_PARAMETERS_5 0x0669
+#define mmHUBPREQ0_FLIP_PARAMETERS_5_BASE_IDX 2
+#define mmHUBPREQ0_FLIP_PARAMETERS_6 0x066a
+#define mmHUBPREQ0_FLIP_PARAMETERS_6_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp0_dispdec_hubpret_dispdec
+// base address: 0x0
+#define mmHUBPRET0_HUBPRET_CONTROL 0x066c
+#define mmHUBPRET0_HUBPRET_CONTROL_BASE_IDX 2
+#define mmHUBPRET0_HUBPRET_MEM_PWR_CTRL 0x066d
+#define mmHUBPRET0_HUBPRET_MEM_PWR_CTRL_BASE_IDX 2
+#define mmHUBPRET0_HUBPRET_MEM_PWR_STATUS 0x066e
+#define mmHUBPRET0_HUBPRET_MEM_PWR_STATUS_BASE_IDX 2
+#define mmHUBPRET0_HUBPRET_READ_LINE_CTRL0 0x066f
+#define mmHUBPRET0_HUBPRET_READ_LINE_CTRL0_BASE_IDX 2
+#define mmHUBPRET0_HUBPRET_READ_LINE_CTRL1 0x0670
+#define mmHUBPRET0_HUBPRET_READ_LINE_CTRL1_BASE_IDX 2
+#define mmHUBPRET0_HUBPRET_READ_LINE0 0x0671
+#define mmHUBPRET0_HUBPRET_READ_LINE0_BASE_IDX 2
+#define mmHUBPRET0_HUBPRET_READ_LINE1 0x0672
+#define mmHUBPRET0_HUBPRET_READ_LINE1_BASE_IDX 2
+#define mmHUBPRET0_HUBPRET_INTERRUPT 0x0673
+#define mmHUBPRET0_HUBPRET_INTERRUPT_BASE_IDX 2
+#define mmHUBPRET0_HUBPRET_READ_LINE_VALUE 0x0674
+#define mmHUBPRET0_HUBPRET_READ_LINE_VALUE_BASE_IDX 2
+#define mmHUBPRET0_HUBPRET_READ_LINE_STATUS 0x0675
+#define mmHUBPRET0_HUBPRET_READ_LINE_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp0_dispdec_cursor0_dispdec
+// base address: 0x0
+#define mmCURSOR0_0_CURSOR_CONTROL 0x0678
+#define mmCURSOR0_0_CURSOR_CONTROL_BASE_IDX 2
+#define mmCURSOR0_0_CURSOR_SURFACE_ADDRESS 0x0679
+#define mmCURSOR0_0_CURSOR_SURFACE_ADDRESS_BASE_IDX 2
+#define mmCURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH 0x067a
+#define mmCURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define mmCURSOR0_0_CURSOR_SIZE 0x067b
+#define mmCURSOR0_0_CURSOR_SIZE_BASE_IDX 2
+#define mmCURSOR0_0_CURSOR_POSITION 0x067c
+#define mmCURSOR0_0_CURSOR_POSITION_BASE_IDX 2
+#define mmCURSOR0_0_CURSOR_HOT_SPOT 0x067d
+#define mmCURSOR0_0_CURSOR_HOT_SPOT_BASE_IDX 2
+#define mmCURSOR0_0_CURSOR_STEREO_CONTROL 0x067e
+#define mmCURSOR0_0_CURSOR_STEREO_CONTROL_BASE_IDX 2
+#define mmCURSOR0_0_CURSOR_DST_OFFSET 0x067f
+#define mmCURSOR0_0_CURSOR_DST_OFFSET_BASE_IDX 2
+#define mmCURSOR0_0_CURSOR_MEM_PWR_CTRL 0x0680
+#define mmCURSOR0_0_CURSOR_MEM_PWR_CTRL_BASE_IDX 2
+#define mmCURSOR0_0_CURSOR_MEM_PWR_STATUS 0x0681
+#define mmCURSOR0_0_CURSOR_MEM_PWR_STATUS_BASE_IDX 2
+#define mmCURSOR0_0_DMDATA_ADDRESS_HIGH 0x0682
+#define mmCURSOR0_0_DMDATA_ADDRESS_HIGH_BASE_IDX 2
+#define mmCURSOR0_0_DMDATA_ADDRESS_LOW 0x0683
+#define mmCURSOR0_0_DMDATA_ADDRESS_LOW_BASE_IDX 2
+#define mmCURSOR0_0_DMDATA_CNTL 0x0684
+#define mmCURSOR0_0_DMDATA_CNTL_BASE_IDX 2
+#define mmCURSOR0_0_DMDATA_QOS_CNTL 0x0685
+#define mmCURSOR0_0_DMDATA_QOS_CNTL_BASE_IDX 2
+#define mmCURSOR0_0_DMDATA_STATUS 0x0686
+#define mmCURSOR0_0_DMDATA_STATUS_BASE_IDX 2
+#define mmCURSOR0_0_DMDATA_SW_CNTL 0x0687
+#define mmCURSOR0_0_DMDATA_SW_CNTL_BASE_IDX 2
+#define mmCURSOR0_0_DMDATA_SW_DATA 0x0688
+#define mmCURSOR0_0_DMDATA_SW_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp0_dispdec_hubp_dcperfmon_dc_perfmon_dispdec
+// base address: 0x1a74
+#define mmDC_PERFMON7_PERFCOUNTER_CNTL 0x069d
+#define mmDC_PERFMON7_PERFCOUNTER_CNTL_BASE_IDX 2
+#define mmDC_PERFMON7_PERFCOUNTER_CNTL2 0x069e
+#define mmDC_PERFMON7_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define mmDC_PERFMON7_PERFCOUNTER_STATE 0x069f
+#define mmDC_PERFMON7_PERFCOUNTER_STATE_BASE_IDX 2
+#define mmDC_PERFMON7_PERFMON_CNTL 0x06a0
+#define mmDC_PERFMON7_PERFMON_CNTL_BASE_IDX 2
+#define mmDC_PERFMON7_PERFMON_CNTL2 0x06a1
+#define mmDC_PERFMON7_PERFMON_CNTL2_BASE_IDX 2
+#define mmDC_PERFMON7_PERFMON_CVALUE_INT_MISC 0x06a2
+#define mmDC_PERFMON7_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define mmDC_PERFMON7_PERFMON_CVALUE_LOW 0x06a3
+#define mmDC_PERFMON7_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define mmDC_PERFMON7_PERFMON_HI 0x06a4
+#define mmDC_PERFMON7_PERFMON_HI_BASE_IDX 2
+#define mmDC_PERFMON7_PERFMON_LOW 0x06a5
+#define mmDC_PERFMON7_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp1_dispdec_hubp_dispdec
+// base address: 0x370
+#define mmHUBP1_DCSURF_SURFACE_CONFIG 0x06c1
+#define mmHUBP1_DCSURF_SURFACE_CONFIG_BASE_IDX 2
+#define mmHUBP1_DCSURF_ADDR_CONFIG 0x06c2
+#define mmHUBP1_DCSURF_ADDR_CONFIG_BASE_IDX 2
+#define mmHUBP1_DCSURF_TILING_CONFIG 0x06c3
+#define mmHUBP1_DCSURF_TILING_CONFIG_BASE_IDX 2
+#define mmHUBP1_DCSURF_PRI_VIEWPORT_START 0x06c5
+#define mmHUBP1_DCSURF_PRI_VIEWPORT_START_BASE_IDX 2
+#define mmHUBP1_DCSURF_PRI_VIEWPORT_DIMENSION 0x06c6
+#define mmHUBP1_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
+#define mmHUBP1_DCSURF_PRI_VIEWPORT_START_C 0x06c7
+#define mmHUBP1_DCSURF_PRI_VIEWPORT_START_C_BASE_IDX 2
+#define mmHUBP1_DCSURF_PRI_VIEWPORT_DIMENSION_C 0x06c8
+#define mmHUBP1_DCSURF_PRI_VIEWPORT_DIMENSION_C_BASE_IDX 2
+#define mmHUBP1_DCSURF_SEC_VIEWPORT_START 0x06c9
+#define mmHUBP1_DCSURF_SEC_VIEWPORT_START_BASE_IDX 2
+#define mmHUBP1_DCSURF_SEC_VIEWPORT_DIMENSION 0x06ca
+#define mmHUBP1_DCSURF_SEC_VIEWPORT_DIMENSION_BASE_IDX 2
+#define mmHUBP1_DCSURF_SEC_VIEWPORT_START_C 0x06cb
+#define mmHUBP1_DCSURF_SEC_VIEWPORT_START_C_BASE_IDX 2
+#define mmHUBP1_DCSURF_SEC_VIEWPORT_DIMENSION_C 0x06cc
+#define mmHUBP1_DCSURF_SEC_VIEWPORT_DIMENSION_C_BASE_IDX 2
+#define mmHUBP1_DCHUBP_REQ_SIZE_CONFIG 0x06cd
+#define mmHUBP1_DCHUBP_REQ_SIZE_CONFIG_BASE_IDX 2
+#define mmHUBP1_DCHUBP_REQ_SIZE_CONFIG_C 0x06ce
+#define mmHUBP1_DCHUBP_REQ_SIZE_CONFIG_C_BASE_IDX 2
+#define mmHUBP1_DCHUBP_CNTL 0x06cf
+#define mmHUBP1_DCHUBP_CNTL_BASE_IDX 2
+#define mmHUBP1_HUBP_CLK_CNTL 0x06d0
+#define mmHUBP1_HUBP_CLK_CNTL_BASE_IDX 2
+#define mmHUBP1_DCHUBP_VMPG_CONFIG 0x06d1
+#define mmHUBP1_DCHUBP_VMPG_CONFIG_BASE_IDX 2
+#define mmHUBP1_HUBPREQ_DEBUG_DB 0x06d2
+#define mmHUBP1_HUBPREQ_DEBUG_DB_BASE_IDX 2
+#define mmHUBP1_HUBPREQ_DEBUG 0x06d3
+#define mmHUBP1_HUBPREQ_DEBUG_BASE_IDX 2
+#define mmHUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK 0x06d7
+#define mmHUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK_BASE_IDX 2
+#define mmHUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK 0x06d8
+#define mmHUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp1_dispdec_hubpreq_dispdec
+// base address: 0x370
+#define mmHUBPREQ1_DCSURF_SURFACE_PITCH 0x06e3
+#define mmHUBPREQ1_DCSURF_SURFACE_PITCH_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_SURFACE_PITCH_C 0x06e4
+#define mmHUBPREQ1_DCSURF_SURFACE_PITCH_C_BASE_IDX 2
+#define mmHUBPREQ1_VMID_SETTINGS_0 0x06e5
+#define mmHUBPREQ1_VMID_SETTINGS_0_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS 0x06e6
+#define mmHUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH 0x06e7
+#define mmHUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_C 0x06e8
+#define mmHUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_C_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C 0x06e9
+#define mmHUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS 0x06ea
+#define mmHUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH 0x06eb
+#define mmHUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_C 0x06ec
+#define mmHUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_C_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C 0x06ed
+#define mmHUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS 0x06ee
+#define mmHUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH 0x06ef
+#define mmHUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C 0x06f0
+#define mmHUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C 0x06f1
+#define mmHUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS 0x06f2
+#define mmHUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH 0x06f3
+#define mmHUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C 0x06f4
+#define mmHUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C 0x06f5
+#define mmHUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_SURFACE_CONTROL 0x06f6
+#define mmHUBPREQ1_DCSURF_SURFACE_CONTROL_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_FLIP_CONTROL 0x06f7
+#define mmHUBPREQ1_DCSURF_FLIP_CONTROL_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_FLIP_CONTROL2 0x06f8
+#define mmHUBPREQ1_DCSURF_FLIP_CONTROL2_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT 0x06fc
+#define mmHUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_SURFACE_INUSE 0x06fd
+#define mmHUBPREQ1_DCSURF_SURFACE_INUSE_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_SURFACE_INUSE_HIGH 0x06fe
+#define mmHUBPREQ1_DCSURF_SURFACE_INUSE_HIGH_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_SURFACE_INUSE_C 0x06ff
+#define mmHUBPREQ1_DCSURF_SURFACE_INUSE_C_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_SURFACE_INUSE_HIGH_C 0x0700
+#define mmHUBPREQ1_DCSURF_SURFACE_INUSE_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE 0x0701
+#define mmHUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH 0x0702
+#define mmHUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_C 0x0703
+#define mmHUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_C_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C 0x0704
+#define mmHUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ1_DCN_EXPANSION_MODE 0x0708
+#define mmHUBPREQ1_DCN_EXPANSION_MODE_BASE_IDX 2
+#define mmHUBPREQ1_DCN_TTU_QOS_WM 0x0709
+#define mmHUBPREQ1_DCN_TTU_QOS_WM_BASE_IDX 2
+#define mmHUBPREQ1_DCN_GLOBAL_TTU_CNTL 0x070a
+#define mmHUBPREQ1_DCN_GLOBAL_TTU_CNTL_BASE_IDX 2
+#define mmHUBPREQ1_DCN_SURF0_TTU_CNTL0 0x070b
+#define mmHUBPREQ1_DCN_SURF0_TTU_CNTL0_BASE_IDX 2
+#define mmHUBPREQ1_DCN_SURF0_TTU_CNTL1 0x070c
+#define mmHUBPREQ1_DCN_SURF0_TTU_CNTL1_BASE_IDX 2
+#define mmHUBPREQ1_DCN_SURF1_TTU_CNTL0 0x070d
+#define mmHUBPREQ1_DCN_SURF1_TTU_CNTL0_BASE_IDX 2
+#define mmHUBPREQ1_DCN_SURF1_TTU_CNTL1 0x070e
+#define mmHUBPREQ1_DCN_SURF1_TTU_CNTL1_BASE_IDX 2
+#define mmHUBPREQ1_DCN_CUR0_TTU_CNTL0 0x070f
+#define mmHUBPREQ1_DCN_CUR0_TTU_CNTL0_BASE_IDX 2
+#define mmHUBPREQ1_DCN_CUR0_TTU_CNTL1 0x0710
+#define mmHUBPREQ1_DCN_CUR0_TTU_CNTL1_BASE_IDX 2
+#define mmHUBPREQ1_DCN_CUR1_TTU_CNTL0 0x0711
+#define mmHUBPREQ1_DCN_CUR1_TTU_CNTL0_BASE_IDX 2
+#define mmHUBPREQ1_DCN_CUR1_TTU_CNTL1 0x0712
+#define mmHUBPREQ1_DCN_CUR1_TTU_CNTL1_BASE_IDX 2
+#define mmHUBPREQ1_DCN_VM_SYSTEM_APERTURE_LOW_ADDR 0x0713
+#define mmHUBPREQ1_DCN_VM_SYSTEM_APERTURE_LOW_ADDR_BASE_IDX 2
+#define mmHUBPREQ1_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR 0x0714
+#define mmHUBPREQ1_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_BASE_IDX 2
+#define mmHUBPREQ1_DCN_VM_MX_L1_TLB_CNTL 0x0721
+#define mmHUBPREQ1_DCN_VM_MX_L1_TLB_CNTL_BASE_IDX 2
+#define mmHUBPREQ1_BLANK_OFFSET_0 0x0722
+#define mmHUBPREQ1_BLANK_OFFSET_0_BASE_IDX 2
+#define mmHUBPREQ1_BLANK_OFFSET_1 0x0723
+#define mmHUBPREQ1_BLANK_OFFSET_1_BASE_IDX 2
+#define mmHUBPREQ1_DST_DIMENSIONS 0x0724
+#define mmHUBPREQ1_DST_DIMENSIONS_BASE_IDX 2
+#define mmHUBPREQ1_DST_AFTER_SCALER 0x0725
+#define mmHUBPREQ1_DST_AFTER_SCALER_BASE_IDX 2
+#define mmHUBPREQ1_PREFETCH_SETTINGS 0x0726
+#define mmHUBPREQ1_PREFETCH_SETTINGS_BASE_IDX 2
+#define mmHUBPREQ1_PREFETCH_SETTINGS_C 0x0727
+#define mmHUBPREQ1_PREFETCH_SETTINGS_C_BASE_IDX 2
+#define mmHUBPREQ1_VBLANK_PARAMETERS_0 0x0728
+#define mmHUBPREQ1_VBLANK_PARAMETERS_0_BASE_IDX 2
+#define mmHUBPREQ1_VBLANK_PARAMETERS_1 0x0729
+#define mmHUBPREQ1_VBLANK_PARAMETERS_1_BASE_IDX 2
+#define mmHUBPREQ1_VBLANK_PARAMETERS_2 0x072a
+#define mmHUBPREQ1_VBLANK_PARAMETERS_2_BASE_IDX 2
+#define mmHUBPREQ1_VBLANK_PARAMETERS_3 0x072b
+#define mmHUBPREQ1_VBLANK_PARAMETERS_3_BASE_IDX 2
+#define mmHUBPREQ1_VBLANK_PARAMETERS_4 0x072c
+#define mmHUBPREQ1_VBLANK_PARAMETERS_4_BASE_IDX 2
+#define mmHUBPREQ1_FLIP_PARAMETERS_0 0x072d
+#define mmHUBPREQ1_FLIP_PARAMETERS_0_BASE_IDX 2
+#define mmHUBPREQ1_FLIP_PARAMETERS_1 0x072e
+#define mmHUBPREQ1_FLIP_PARAMETERS_1_BASE_IDX 2
+#define mmHUBPREQ1_FLIP_PARAMETERS_2 0x072f
+#define mmHUBPREQ1_FLIP_PARAMETERS_2_BASE_IDX 2
+#define mmHUBPREQ1_NOM_PARAMETERS_0 0x0730
+#define mmHUBPREQ1_NOM_PARAMETERS_0_BASE_IDX 2
+#define mmHUBPREQ1_NOM_PARAMETERS_1 0x0731
+#define mmHUBPREQ1_NOM_PARAMETERS_1_BASE_IDX 2
+#define mmHUBPREQ1_NOM_PARAMETERS_2 0x0732
+#define mmHUBPREQ1_NOM_PARAMETERS_2_BASE_IDX 2
+#define mmHUBPREQ1_NOM_PARAMETERS_3 0x0733
+#define mmHUBPREQ1_NOM_PARAMETERS_3_BASE_IDX 2
+#define mmHUBPREQ1_NOM_PARAMETERS_4 0x0734
+#define mmHUBPREQ1_NOM_PARAMETERS_4_BASE_IDX 2
+#define mmHUBPREQ1_NOM_PARAMETERS_5 0x0735
+#define mmHUBPREQ1_NOM_PARAMETERS_5_BASE_IDX 2
+#define mmHUBPREQ1_NOM_PARAMETERS_6 0x0736
+#define mmHUBPREQ1_NOM_PARAMETERS_6_BASE_IDX 2
+#define mmHUBPREQ1_NOM_PARAMETERS_7 0x0737
+#define mmHUBPREQ1_NOM_PARAMETERS_7_BASE_IDX 2
+#define mmHUBPREQ1_PER_LINE_DELIVERY_PRE 0x0738
+#define mmHUBPREQ1_PER_LINE_DELIVERY_PRE_BASE_IDX 2
+#define mmHUBPREQ1_PER_LINE_DELIVERY 0x0739
+#define mmHUBPREQ1_PER_LINE_DELIVERY_BASE_IDX 2
+#define mmHUBPREQ1_CURSOR_SETTINGS 0x073a
+#define mmHUBPREQ1_CURSOR_SETTINGS_BASE_IDX 2
+#define mmHUBPREQ1_REF_FREQ_TO_PIX_FREQ 0x073b
+#define mmHUBPREQ1_REF_FREQ_TO_PIX_FREQ_BASE_IDX 2
+#define mmHUBPREQ1_DST_Y_DELTA_DRQ_LIMIT 0x073c
+#define mmHUBPREQ1_DST_Y_DELTA_DRQ_LIMIT_BASE_IDX 2
+#define mmHUBPREQ1_HUBPREQ_MEM_PWR_CTRL 0x073d
+#define mmHUBPREQ1_HUBPREQ_MEM_PWR_CTRL_BASE_IDX 2
+#define mmHUBPREQ1_HUBPREQ_MEM_PWR_STATUS 0x073e
+#define mmHUBPREQ1_HUBPREQ_MEM_PWR_STATUS_BASE_IDX 2
+#define mmHUBPREQ1_VBLANK_PARAMETERS_5 0x0741
+#define mmHUBPREQ1_VBLANK_PARAMETERS_5_BASE_IDX 2
+#define mmHUBPREQ1_VBLANK_PARAMETERS_6 0x0742
+#define mmHUBPREQ1_VBLANK_PARAMETERS_6_BASE_IDX 2
+#define mmHUBPREQ1_FLIP_PARAMETERS_3 0x0743
+#define mmHUBPREQ1_FLIP_PARAMETERS_3_BASE_IDX 2
+#define mmHUBPREQ1_FLIP_PARAMETERS_4 0x0744
+#define mmHUBPREQ1_FLIP_PARAMETERS_4_BASE_IDX 2
+#define mmHUBPREQ1_FLIP_PARAMETERS_5 0x0745
+#define mmHUBPREQ1_FLIP_PARAMETERS_5_BASE_IDX 2
+#define mmHUBPREQ1_FLIP_PARAMETERS_6 0x0746
+#define mmHUBPREQ1_FLIP_PARAMETERS_6_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp1_dispdec_hubpret_dispdec
+// base address: 0x370
+#define mmHUBPRET1_HUBPRET_CONTROL 0x0748
+#define mmHUBPRET1_HUBPRET_CONTROL_BASE_IDX 2
+#define mmHUBPRET1_HUBPRET_MEM_PWR_CTRL 0x0749
+#define mmHUBPRET1_HUBPRET_MEM_PWR_CTRL_BASE_IDX 2
+#define mmHUBPRET1_HUBPRET_MEM_PWR_STATUS 0x074a
+#define mmHUBPRET1_HUBPRET_MEM_PWR_STATUS_BASE_IDX 2
+#define mmHUBPRET1_HUBPRET_READ_LINE_CTRL0 0x074b
+#define mmHUBPRET1_HUBPRET_READ_LINE_CTRL0_BASE_IDX 2
+#define mmHUBPRET1_HUBPRET_READ_LINE_CTRL1 0x074c
+#define mmHUBPRET1_HUBPRET_READ_LINE_CTRL1_BASE_IDX 2
+#define mmHUBPRET1_HUBPRET_READ_LINE0 0x074d
+#define mmHUBPRET1_HUBPRET_READ_LINE0_BASE_IDX 2
+#define mmHUBPRET1_HUBPRET_READ_LINE1 0x074e
+#define mmHUBPRET1_HUBPRET_READ_LINE1_BASE_IDX 2
+#define mmHUBPRET1_HUBPRET_INTERRUPT 0x074f
+#define mmHUBPRET1_HUBPRET_INTERRUPT_BASE_IDX 2
+#define mmHUBPRET1_HUBPRET_READ_LINE_VALUE 0x0750
+#define mmHUBPRET1_HUBPRET_READ_LINE_VALUE_BASE_IDX 2
+#define mmHUBPRET1_HUBPRET_READ_LINE_STATUS 0x0751
+#define mmHUBPRET1_HUBPRET_READ_LINE_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp1_dispdec_cursor0_dispdec
+// base address: 0x370
+#define mmCURSOR0_1_CURSOR_CONTROL 0x0754
+#define mmCURSOR0_1_CURSOR_CONTROL_BASE_IDX 2
+#define mmCURSOR0_1_CURSOR_SURFACE_ADDRESS 0x0755
+#define mmCURSOR0_1_CURSOR_SURFACE_ADDRESS_BASE_IDX 2
+#define mmCURSOR0_1_CURSOR_SURFACE_ADDRESS_HIGH 0x0756
+#define mmCURSOR0_1_CURSOR_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define mmCURSOR0_1_CURSOR_SIZE 0x0757
+#define mmCURSOR0_1_CURSOR_SIZE_BASE_IDX 2
+#define mmCURSOR0_1_CURSOR_POSITION 0x0758
+#define mmCURSOR0_1_CURSOR_POSITION_BASE_IDX 2
+#define mmCURSOR0_1_CURSOR_HOT_SPOT 0x0759
+#define mmCURSOR0_1_CURSOR_HOT_SPOT_BASE_IDX 2
+#define mmCURSOR0_1_CURSOR_STEREO_CONTROL 0x075a
+#define mmCURSOR0_1_CURSOR_STEREO_CONTROL_BASE_IDX 2
+#define mmCURSOR0_1_CURSOR_DST_OFFSET 0x075b
+#define mmCURSOR0_1_CURSOR_DST_OFFSET_BASE_IDX 2
+#define mmCURSOR0_1_CURSOR_MEM_PWR_CTRL 0x075c
+#define mmCURSOR0_1_CURSOR_MEM_PWR_CTRL_BASE_IDX 2
+#define mmCURSOR0_1_CURSOR_MEM_PWR_STATUS 0x075d
+#define mmCURSOR0_1_CURSOR_MEM_PWR_STATUS_BASE_IDX 2
+#define mmCURSOR0_1_DMDATA_ADDRESS_HIGH 0x075e
+#define mmCURSOR0_1_DMDATA_ADDRESS_HIGH_BASE_IDX 2
+#define mmCURSOR0_1_DMDATA_ADDRESS_LOW 0x075f
+#define mmCURSOR0_1_DMDATA_ADDRESS_LOW_BASE_IDX 2
+#define mmCURSOR0_1_DMDATA_CNTL 0x0760
+#define mmCURSOR0_1_DMDATA_CNTL_BASE_IDX 2
+#define mmCURSOR0_1_DMDATA_QOS_CNTL 0x0761
+#define mmCURSOR0_1_DMDATA_QOS_CNTL_BASE_IDX 2
+#define mmCURSOR0_1_DMDATA_STATUS 0x0762
+#define mmCURSOR0_1_DMDATA_STATUS_BASE_IDX 2
+#define mmCURSOR0_1_DMDATA_SW_CNTL 0x0763
+#define mmCURSOR0_1_DMDATA_SW_CNTL_BASE_IDX 2
+#define mmCURSOR0_1_DMDATA_SW_DATA 0x0764
+#define mmCURSOR0_1_DMDATA_SW_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp1_dispdec_hubp_dcperfmon_dc_perfmon_dispdec
+// base address: 0x1de4
+#define mmDC_PERFMON8_PERFCOUNTER_CNTL 0x0779
+#define mmDC_PERFMON8_PERFCOUNTER_CNTL_BASE_IDX 2
+#define mmDC_PERFMON8_PERFCOUNTER_CNTL2 0x077a
+#define mmDC_PERFMON8_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define mmDC_PERFMON8_PERFCOUNTER_STATE 0x077b
+#define mmDC_PERFMON8_PERFCOUNTER_STATE_BASE_IDX 2
+#define mmDC_PERFMON8_PERFMON_CNTL 0x077c
+#define mmDC_PERFMON8_PERFMON_CNTL_BASE_IDX 2
+#define mmDC_PERFMON8_PERFMON_CNTL2 0x077d
+#define mmDC_PERFMON8_PERFMON_CNTL2_BASE_IDX 2
+#define mmDC_PERFMON8_PERFMON_CVALUE_INT_MISC 0x077e
+#define mmDC_PERFMON8_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define mmDC_PERFMON8_PERFMON_CVALUE_LOW 0x077f
+#define mmDC_PERFMON8_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define mmDC_PERFMON8_PERFMON_HI 0x0780
+#define mmDC_PERFMON8_PERFMON_HI_BASE_IDX 2
+#define mmDC_PERFMON8_PERFMON_LOW 0x0781
+#define mmDC_PERFMON8_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp2_dispdec_hubp_dispdec
+// base address: 0x6e0
+#define mmHUBP2_DCSURF_SURFACE_CONFIG 0x079d
+#define mmHUBP2_DCSURF_SURFACE_CONFIG_BASE_IDX 2
+#define mmHUBP2_DCSURF_ADDR_CONFIG 0x079e
+#define mmHUBP2_DCSURF_ADDR_CONFIG_BASE_IDX 2
+#define mmHUBP2_DCSURF_TILING_CONFIG 0x079f
+#define mmHUBP2_DCSURF_TILING_CONFIG_BASE_IDX 2
+#define mmHUBP2_DCSURF_PRI_VIEWPORT_START 0x07a1
+#define mmHUBP2_DCSURF_PRI_VIEWPORT_START_BASE_IDX 2
+#define mmHUBP2_DCSURF_PRI_VIEWPORT_DIMENSION 0x07a2
+#define mmHUBP2_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
+#define mmHUBP2_DCSURF_PRI_VIEWPORT_START_C 0x07a3
+#define mmHUBP2_DCSURF_PRI_VIEWPORT_START_C_BASE_IDX 2
+#define mmHUBP2_DCSURF_PRI_VIEWPORT_DIMENSION_C 0x07a4
+#define mmHUBP2_DCSURF_PRI_VIEWPORT_DIMENSION_C_BASE_IDX 2
+#define mmHUBP2_DCSURF_SEC_VIEWPORT_START 0x07a5
+#define mmHUBP2_DCSURF_SEC_VIEWPORT_START_BASE_IDX 2
+#define mmHUBP2_DCSURF_SEC_VIEWPORT_DIMENSION 0x07a6
+#define mmHUBP2_DCSURF_SEC_VIEWPORT_DIMENSION_BASE_IDX 2
+#define mmHUBP2_DCSURF_SEC_VIEWPORT_START_C 0x07a7
+#define mmHUBP2_DCSURF_SEC_VIEWPORT_START_C_BASE_IDX 2
+#define mmHUBP2_DCSURF_SEC_VIEWPORT_DIMENSION_C 0x07a8
+#define mmHUBP2_DCSURF_SEC_VIEWPORT_DIMENSION_C_BASE_IDX 2
+#define mmHUBP2_DCHUBP_REQ_SIZE_CONFIG 0x07a9
+#define mmHUBP2_DCHUBP_REQ_SIZE_CONFIG_BASE_IDX 2
+#define mmHUBP2_DCHUBP_REQ_SIZE_CONFIG_C 0x07aa
+#define mmHUBP2_DCHUBP_REQ_SIZE_CONFIG_C_BASE_IDX 2
+#define mmHUBP2_DCHUBP_CNTL 0x07ab
+#define mmHUBP2_DCHUBP_CNTL_BASE_IDX 2
+#define mmHUBP2_HUBP_CLK_CNTL 0x07ac
+#define mmHUBP2_HUBP_CLK_CNTL_BASE_IDX 2
+#define mmHUBP2_DCHUBP_VMPG_CONFIG 0x07ad
+#define mmHUBP2_DCHUBP_VMPG_CONFIG_BASE_IDX 2
+#define mmHUBP2_HUBPREQ_DEBUG_DB 0x07ae
+#define mmHUBP2_HUBPREQ_DEBUG_DB_BASE_IDX 2
+#define mmHUBP2_HUBPREQ_DEBUG 0x07af
+#define mmHUBP2_HUBPREQ_DEBUG_BASE_IDX 2
+#define mmHUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK 0x07b3
+#define mmHUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK_BASE_IDX 2
+#define mmHUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK 0x07b4
+#define mmHUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp2_dispdec_hubpreq_dispdec
+// base address: 0x6e0
+#define mmHUBPREQ2_DCSURF_SURFACE_PITCH 0x07bf
+#define mmHUBPREQ2_DCSURF_SURFACE_PITCH_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_SURFACE_PITCH_C 0x07c0
+#define mmHUBPREQ2_DCSURF_SURFACE_PITCH_C_BASE_IDX 2
+#define mmHUBPREQ2_VMID_SETTINGS_0 0x07c1
+#define mmHUBPREQ2_VMID_SETTINGS_0_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS 0x07c2
+#define mmHUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH 0x07c3
+#define mmHUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_C 0x07c4
+#define mmHUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_C_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C 0x07c5
+#define mmHUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS 0x07c6
+#define mmHUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH 0x07c7
+#define mmHUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_C 0x07c8
+#define mmHUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_C_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C 0x07c9
+#define mmHUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS 0x07ca
+#define mmHUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH 0x07cb
+#define mmHUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C 0x07cc
+#define mmHUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C 0x07cd
+#define mmHUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS 0x07ce
+#define mmHUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH 0x07cf
+#define mmHUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C 0x07d0
+#define mmHUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C 0x07d1
+#define mmHUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_SURFACE_CONTROL 0x07d2
+#define mmHUBPREQ2_DCSURF_SURFACE_CONTROL_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_FLIP_CONTROL 0x07d3
+#define mmHUBPREQ2_DCSURF_FLIP_CONTROL_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_FLIP_CONTROL2 0x07d4
+#define mmHUBPREQ2_DCSURF_FLIP_CONTROL2_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT 0x07d8
+#define mmHUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_SURFACE_INUSE 0x07d9
+#define mmHUBPREQ2_DCSURF_SURFACE_INUSE_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_SURFACE_INUSE_HIGH 0x07da
+#define mmHUBPREQ2_DCSURF_SURFACE_INUSE_HIGH_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_SURFACE_INUSE_C 0x07db
+#define mmHUBPREQ2_DCSURF_SURFACE_INUSE_C_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_SURFACE_INUSE_HIGH_C 0x07dc
+#define mmHUBPREQ2_DCSURF_SURFACE_INUSE_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE 0x07dd
+#define mmHUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH 0x07de
+#define mmHUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_C 0x07df
+#define mmHUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_C_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C 0x07e0
+#define mmHUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ2_DCN_EXPANSION_MODE 0x07e4
+#define mmHUBPREQ2_DCN_EXPANSION_MODE_BASE_IDX 2
+#define mmHUBPREQ2_DCN_TTU_QOS_WM 0x07e5
+#define mmHUBPREQ2_DCN_TTU_QOS_WM_BASE_IDX 2
+#define mmHUBPREQ2_DCN_GLOBAL_TTU_CNTL 0x07e6
+#define mmHUBPREQ2_DCN_GLOBAL_TTU_CNTL_BASE_IDX 2
+#define mmHUBPREQ2_DCN_SURF0_TTU_CNTL0 0x07e7
+#define mmHUBPREQ2_DCN_SURF0_TTU_CNTL0_BASE_IDX 2
+#define mmHUBPREQ2_DCN_SURF0_TTU_CNTL1 0x07e8
+#define mmHUBPREQ2_DCN_SURF0_TTU_CNTL1_BASE_IDX 2
+#define mmHUBPREQ2_DCN_SURF1_TTU_CNTL0 0x07e9
+#define mmHUBPREQ2_DCN_SURF1_TTU_CNTL0_BASE_IDX 2
+#define mmHUBPREQ2_DCN_SURF1_TTU_CNTL1 0x07ea
+#define mmHUBPREQ2_DCN_SURF1_TTU_CNTL1_BASE_IDX 2
+#define mmHUBPREQ2_DCN_CUR0_TTU_CNTL0 0x07eb
+#define mmHUBPREQ2_DCN_CUR0_TTU_CNTL0_BASE_IDX 2
+#define mmHUBPREQ2_DCN_CUR0_TTU_CNTL1 0x07ec
+#define mmHUBPREQ2_DCN_CUR0_TTU_CNTL1_BASE_IDX 2
+#define mmHUBPREQ2_DCN_CUR1_TTU_CNTL0 0x07ed
+#define mmHUBPREQ2_DCN_CUR1_TTU_CNTL0_BASE_IDX 2
+#define mmHUBPREQ2_DCN_CUR1_TTU_CNTL1 0x07ee
+#define mmHUBPREQ2_DCN_CUR1_TTU_CNTL1_BASE_IDX 2
+#define mmHUBPREQ2_DCN_VM_SYSTEM_APERTURE_LOW_ADDR 0x07ef
+#define mmHUBPREQ2_DCN_VM_SYSTEM_APERTURE_LOW_ADDR_BASE_IDX 2
+#define mmHUBPREQ2_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR 0x07f0
+#define mmHUBPREQ2_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_BASE_IDX 2
+#define mmHUBPREQ2_DCN_VM_MX_L1_TLB_CNTL 0x07fd
+#define mmHUBPREQ2_DCN_VM_MX_L1_TLB_CNTL_BASE_IDX 2
+#define mmHUBPREQ2_BLANK_OFFSET_0 0x07fe
+#define mmHUBPREQ2_BLANK_OFFSET_0_BASE_IDX 2
+#define mmHUBPREQ2_BLANK_OFFSET_1 0x07ff
+#define mmHUBPREQ2_BLANK_OFFSET_1_BASE_IDX 2
+#define mmHUBPREQ2_DST_DIMENSIONS 0x0800
+#define mmHUBPREQ2_DST_DIMENSIONS_BASE_IDX 2
+#define mmHUBPREQ2_DST_AFTER_SCALER 0x0801
+#define mmHUBPREQ2_DST_AFTER_SCALER_BASE_IDX 2
+#define mmHUBPREQ2_PREFETCH_SETTINGS 0x0802
+#define mmHUBPREQ2_PREFETCH_SETTINGS_BASE_IDX 2
+#define mmHUBPREQ2_PREFETCH_SETTINGS_C 0x0803
+#define mmHUBPREQ2_PREFETCH_SETTINGS_C_BASE_IDX 2
+#define mmHUBPREQ2_VBLANK_PARAMETERS_0 0x0804
+#define mmHUBPREQ2_VBLANK_PARAMETERS_0_BASE_IDX 2
+#define mmHUBPREQ2_VBLANK_PARAMETERS_1 0x0805
+#define mmHUBPREQ2_VBLANK_PARAMETERS_1_BASE_IDX 2
+#define mmHUBPREQ2_VBLANK_PARAMETERS_2 0x0806
+#define mmHUBPREQ2_VBLANK_PARAMETERS_2_BASE_IDX 2
+#define mmHUBPREQ2_VBLANK_PARAMETERS_3 0x0807
+#define mmHUBPREQ2_VBLANK_PARAMETERS_3_BASE_IDX 2
+#define mmHUBPREQ2_VBLANK_PARAMETERS_4 0x0808
+#define mmHUBPREQ2_VBLANK_PARAMETERS_4_BASE_IDX 2
+#define mmHUBPREQ2_FLIP_PARAMETERS_0 0x0809
+#define mmHUBPREQ2_FLIP_PARAMETERS_0_BASE_IDX 2
+#define mmHUBPREQ2_FLIP_PARAMETERS_1 0x080a
+#define mmHUBPREQ2_FLIP_PARAMETERS_1_BASE_IDX 2
+#define mmHUBPREQ2_FLIP_PARAMETERS_2 0x080b
+#define mmHUBPREQ2_FLIP_PARAMETERS_2_BASE_IDX 2
+#define mmHUBPREQ2_NOM_PARAMETERS_0 0x080c
+#define mmHUBPREQ2_NOM_PARAMETERS_0_BASE_IDX 2
+#define mmHUBPREQ2_NOM_PARAMETERS_1 0x080d
+#define mmHUBPREQ2_NOM_PARAMETERS_1_BASE_IDX 2
+#define mmHUBPREQ2_NOM_PARAMETERS_2 0x080e
+#define mmHUBPREQ2_NOM_PARAMETERS_2_BASE_IDX 2
+#define mmHUBPREQ2_NOM_PARAMETERS_3 0x080f
+#define mmHUBPREQ2_NOM_PARAMETERS_3_BASE_IDX 2
+#define mmHUBPREQ2_NOM_PARAMETERS_4 0x0810
+#define mmHUBPREQ2_NOM_PARAMETERS_4_BASE_IDX 2
+#define mmHUBPREQ2_NOM_PARAMETERS_5 0x0811
+#define mmHUBPREQ2_NOM_PARAMETERS_5_BASE_IDX 2
+#define mmHUBPREQ2_NOM_PARAMETERS_6 0x0812
+#define mmHUBPREQ2_NOM_PARAMETERS_6_BASE_IDX 2
+#define mmHUBPREQ2_NOM_PARAMETERS_7 0x0813
+#define mmHUBPREQ2_NOM_PARAMETERS_7_BASE_IDX 2
+#define mmHUBPREQ2_PER_LINE_DELIVERY_PRE 0x0814
+#define mmHUBPREQ2_PER_LINE_DELIVERY_PRE_BASE_IDX 2
+#define mmHUBPREQ2_PER_LINE_DELIVERY 0x0815
+#define mmHUBPREQ2_PER_LINE_DELIVERY_BASE_IDX 2
+#define mmHUBPREQ2_CURSOR_SETTINGS 0x0816
+#define mmHUBPREQ2_CURSOR_SETTINGS_BASE_IDX 2
+#define mmHUBPREQ2_REF_FREQ_TO_PIX_FREQ 0x0817
+#define mmHUBPREQ2_REF_FREQ_TO_PIX_FREQ_BASE_IDX 2
+#define mmHUBPREQ2_DST_Y_DELTA_DRQ_LIMIT 0x0818
+#define mmHUBPREQ2_DST_Y_DELTA_DRQ_LIMIT_BASE_IDX 2
+#define mmHUBPREQ2_HUBPREQ_MEM_PWR_CTRL 0x0819
+#define mmHUBPREQ2_HUBPREQ_MEM_PWR_CTRL_BASE_IDX 2
+#define mmHUBPREQ2_HUBPREQ_MEM_PWR_STATUS 0x081a
+#define mmHUBPREQ2_HUBPREQ_MEM_PWR_STATUS_BASE_IDX 2
+#define mmHUBPREQ2_VBLANK_PARAMETERS_5 0x081d
+#define mmHUBPREQ2_VBLANK_PARAMETERS_5_BASE_IDX 2
+#define mmHUBPREQ2_VBLANK_PARAMETERS_6 0x081e
+#define mmHUBPREQ2_VBLANK_PARAMETERS_6_BASE_IDX 2
+#define mmHUBPREQ2_FLIP_PARAMETERS_3 0x081f
+#define mmHUBPREQ2_FLIP_PARAMETERS_3_BASE_IDX 2
+#define mmHUBPREQ2_FLIP_PARAMETERS_4 0x0820
+#define mmHUBPREQ2_FLIP_PARAMETERS_4_BASE_IDX 2
+#define mmHUBPREQ2_FLIP_PARAMETERS_5 0x0821
+#define mmHUBPREQ2_FLIP_PARAMETERS_5_BASE_IDX 2
+#define mmHUBPREQ2_FLIP_PARAMETERS_6 0x0822
+#define mmHUBPREQ2_FLIP_PARAMETERS_6_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp2_dispdec_hubpret_dispdec
+// base address: 0x6e0
+#define mmHUBPRET2_HUBPRET_CONTROL 0x0824
+#define mmHUBPRET2_HUBPRET_CONTROL_BASE_IDX 2
+#define mmHUBPRET2_HUBPRET_MEM_PWR_CTRL 0x0825
+#define mmHUBPRET2_HUBPRET_MEM_PWR_CTRL_BASE_IDX 2
+#define mmHUBPRET2_HUBPRET_MEM_PWR_STATUS 0x0826
+#define mmHUBPRET2_HUBPRET_MEM_PWR_STATUS_BASE_IDX 2
+#define mmHUBPRET2_HUBPRET_READ_LINE_CTRL0 0x0827
+#define mmHUBPRET2_HUBPRET_READ_LINE_CTRL0_BASE_IDX 2
+#define mmHUBPRET2_HUBPRET_READ_LINE_CTRL1 0x0828
+#define mmHUBPRET2_HUBPRET_READ_LINE_CTRL1_BASE_IDX 2
+#define mmHUBPRET2_HUBPRET_READ_LINE0 0x0829
+#define mmHUBPRET2_HUBPRET_READ_LINE0_BASE_IDX 2
+#define mmHUBPRET2_HUBPRET_READ_LINE1 0x082a
+#define mmHUBPRET2_HUBPRET_READ_LINE1_BASE_IDX 2
+#define mmHUBPRET2_HUBPRET_INTERRUPT 0x082b
+#define mmHUBPRET2_HUBPRET_INTERRUPT_BASE_IDX 2
+#define mmHUBPRET2_HUBPRET_READ_LINE_VALUE 0x082c
+#define mmHUBPRET2_HUBPRET_READ_LINE_VALUE_BASE_IDX 2
+#define mmHUBPRET2_HUBPRET_READ_LINE_STATUS 0x082d
+#define mmHUBPRET2_HUBPRET_READ_LINE_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp2_dispdec_cursor0_dispdec
+// base address: 0x6e0
+#define mmCURSOR0_2_CURSOR_CONTROL 0x0830
+#define mmCURSOR0_2_CURSOR_CONTROL_BASE_IDX 2
+#define mmCURSOR0_2_CURSOR_SURFACE_ADDRESS 0x0831
+#define mmCURSOR0_2_CURSOR_SURFACE_ADDRESS_BASE_IDX 2
+#define mmCURSOR0_2_CURSOR_SURFACE_ADDRESS_HIGH 0x0832
+#define mmCURSOR0_2_CURSOR_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define mmCURSOR0_2_CURSOR_SIZE 0x0833
+#define mmCURSOR0_2_CURSOR_SIZE_BASE_IDX 2
+#define mmCURSOR0_2_CURSOR_POSITION 0x0834
+#define mmCURSOR0_2_CURSOR_POSITION_BASE_IDX 2
+#define mmCURSOR0_2_CURSOR_HOT_SPOT 0x0835
+#define mmCURSOR0_2_CURSOR_HOT_SPOT_BASE_IDX 2
+#define mmCURSOR0_2_CURSOR_STEREO_CONTROL 0x0836
+#define mmCURSOR0_2_CURSOR_STEREO_CONTROL_BASE_IDX 2
+#define mmCURSOR0_2_CURSOR_DST_OFFSET 0x0837
+#define mmCURSOR0_2_CURSOR_DST_OFFSET_BASE_IDX 2
+#define mmCURSOR0_2_CURSOR_MEM_PWR_CTRL 0x0838
+#define mmCURSOR0_2_CURSOR_MEM_PWR_CTRL_BASE_IDX 2
+#define mmCURSOR0_2_CURSOR_MEM_PWR_STATUS 0x0839
+#define mmCURSOR0_2_CURSOR_MEM_PWR_STATUS_BASE_IDX 2
+#define mmCURSOR0_2_DMDATA_ADDRESS_HIGH 0x083a
+#define mmCURSOR0_2_DMDATA_ADDRESS_HIGH_BASE_IDX 2
+#define mmCURSOR0_2_DMDATA_ADDRESS_LOW 0x083b
+#define mmCURSOR0_2_DMDATA_ADDRESS_LOW_BASE_IDX 2
+#define mmCURSOR0_2_DMDATA_CNTL 0x083c
+#define mmCURSOR0_2_DMDATA_CNTL_BASE_IDX 2
+#define mmCURSOR0_2_DMDATA_QOS_CNTL 0x083d
+#define mmCURSOR0_2_DMDATA_QOS_CNTL_BASE_IDX 2
+#define mmCURSOR0_2_DMDATA_STATUS 0x083e
+#define mmCURSOR0_2_DMDATA_STATUS_BASE_IDX 2
+#define mmCURSOR0_2_DMDATA_SW_CNTL 0x083f
+#define mmCURSOR0_2_DMDATA_SW_CNTL_BASE_IDX 2
+#define mmCURSOR0_2_DMDATA_SW_DATA 0x0840
+#define mmCURSOR0_2_DMDATA_SW_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp2_dispdec_hubp_dcperfmon_dc_perfmon_dispdec
+// base address: 0x2154
+#define mmDC_PERFMON9_PERFCOUNTER_CNTL 0x0855
+#define mmDC_PERFMON9_PERFCOUNTER_CNTL_BASE_IDX 2
+#define mmDC_PERFMON9_PERFCOUNTER_CNTL2 0x0856
+#define mmDC_PERFMON9_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define mmDC_PERFMON9_PERFCOUNTER_STATE 0x0857
+#define mmDC_PERFMON9_PERFCOUNTER_STATE_BASE_IDX 2
+#define mmDC_PERFMON9_PERFMON_CNTL 0x0858
+#define mmDC_PERFMON9_PERFMON_CNTL_BASE_IDX 2
+#define mmDC_PERFMON9_PERFMON_CNTL2 0x0859
+#define mmDC_PERFMON9_PERFMON_CNTL2_BASE_IDX 2
+#define mmDC_PERFMON9_PERFMON_CVALUE_INT_MISC 0x085a
+#define mmDC_PERFMON9_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define mmDC_PERFMON9_PERFMON_CVALUE_LOW 0x085b
+#define mmDC_PERFMON9_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define mmDC_PERFMON9_PERFMON_HI 0x085c
+#define mmDC_PERFMON9_PERFMON_HI_BASE_IDX 2
+#define mmDC_PERFMON9_PERFMON_LOW 0x085d
+#define mmDC_PERFMON9_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp3_dispdec_hubp_dispdec
+// base address: 0xa50
+#define mmHUBP3_DCSURF_SURFACE_CONFIG 0x0879
+#define mmHUBP3_DCSURF_SURFACE_CONFIG_BASE_IDX 2
+#define mmHUBP3_DCSURF_ADDR_CONFIG 0x087a
+#define mmHUBP3_DCSURF_ADDR_CONFIG_BASE_IDX 2
+#define mmHUBP3_DCSURF_TILING_CONFIG 0x087b
+#define mmHUBP3_DCSURF_TILING_CONFIG_BASE_IDX 2
+#define mmHUBP3_DCSURF_PRI_VIEWPORT_START 0x087d
+#define mmHUBP3_DCSURF_PRI_VIEWPORT_START_BASE_IDX 2
+#define mmHUBP3_DCSURF_PRI_VIEWPORT_DIMENSION 0x087e
+#define mmHUBP3_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
+#define mmHUBP3_DCSURF_PRI_VIEWPORT_START_C 0x087f
+#define mmHUBP3_DCSURF_PRI_VIEWPORT_START_C_BASE_IDX 2
+#define mmHUBP3_DCSURF_PRI_VIEWPORT_DIMENSION_C 0x0880
+#define mmHUBP3_DCSURF_PRI_VIEWPORT_DIMENSION_C_BASE_IDX 2
+#define mmHUBP3_DCSURF_SEC_VIEWPORT_START 0x0881
+#define mmHUBP3_DCSURF_SEC_VIEWPORT_START_BASE_IDX 2
+#define mmHUBP3_DCSURF_SEC_VIEWPORT_DIMENSION 0x0882
+#define mmHUBP3_DCSURF_SEC_VIEWPORT_DIMENSION_BASE_IDX 2
+#define mmHUBP3_DCSURF_SEC_VIEWPORT_START_C 0x0883
+#define mmHUBP3_DCSURF_SEC_VIEWPORT_START_C_BASE_IDX 2
+#define mmHUBP3_DCSURF_SEC_VIEWPORT_DIMENSION_C 0x0884
+#define mmHUBP3_DCSURF_SEC_VIEWPORT_DIMENSION_C_BASE_IDX 2
+#define mmHUBP3_DCHUBP_REQ_SIZE_CONFIG 0x0885
+#define mmHUBP3_DCHUBP_REQ_SIZE_CONFIG_BASE_IDX 2
+#define mmHUBP3_DCHUBP_REQ_SIZE_CONFIG_C 0x0886
+#define mmHUBP3_DCHUBP_REQ_SIZE_CONFIG_C_BASE_IDX 2
+#define mmHUBP3_DCHUBP_CNTL 0x0887
+#define mmHUBP3_DCHUBP_CNTL_BASE_IDX 2
+#define mmHUBP3_HUBP_CLK_CNTL 0x0888
+#define mmHUBP3_HUBP_CLK_CNTL_BASE_IDX 2
+#define mmHUBP3_DCHUBP_VMPG_CONFIG 0x0889
+#define mmHUBP3_DCHUBP_VMPG_CONFIG_BASE_IDX 2
+#define mmHUBP3_HUBPREQ_DEBUG_DB 0x088a
+#define mmHUBP3_HUBPREQ_DEBUG_DB_BASE_IDX 2
+#define mmHUBP3_HUBPREQ_DEBUG 0x088b
+#define mmHUBP3_HUBPREQ_DEBUG_BASE_IDX 2
+#define mmHUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK 0x088f
+#define mmHUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK_BASE_IDX 2
+#define mmHUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK 0x0890
+#define mmHUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp3_dispdec_hubpreq_dispdec
+// base address: 0xa50
+#define mmHUBPREQ3_DCSURF_SURFACE_PITCH 0x089b
+#define mmHUBPREQ3_DCSURF_SURFACE_PITCH_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_SURFACE_PITCH_C 0x089c
+#define mmHUBPREQ3_DCSURF_SURFACE_PITCH_C_BASE_IDX 2
+#define mmHUBPREQ3_VMID_SETTINGS_0 0x089d
+#define mmHUBPREQ3_VMID_SETTINGS_0_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS 0x089e
+#define mmHUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH 0x089f
+#define mmHUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_C 0x08a0
+#define mmHUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_C_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C 0x08a1
+#define mmHUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS 0x08a2
+#define mmHUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH 0x08a3
+#define mmHUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_C 0x08a4
+#define mmHUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_C_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C 0x08a5
+#define mmHUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS 0x08a6
+#define mmHUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH 0x08a7
+#define mmHUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C 0x08a8
+#define mmHUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C 0x08a9
+#define mmHUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS 0x08aa
+#define mmHUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH 0x08ab
+#define mmHUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C 0x08ac
+#define mmHUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C 0x08ad
+#define mmHUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_SURFACE_CONTROL 0x08ae
+#define mmHUBPREQ3_DCSURF_SURFACE_CONTROL_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_FLIP_CONTROL 0x08af
+#define mmHUBPREQ3_DCSURF_FLIP_CONTROL_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_FLIP_CONTROL2 0x08b0
+#define mmHUBPREQ3_DCSURF_FLIP_CONTROL2_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT 0x08b4
+#define mmHUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_SURFACE_INUSE 0x08b5
+#define mmHUBPREQ3_DCSURF_SURFACE_INUSE_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_SURFACE_INUSE_HIGH 0x08b6
+#define mmHUBPREQ3_DCSURF_SURFACE_INUSE_HIGH_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_SURFACE_INUSE_C 0x08b7
+#define mmHUBPREQ3_DCSURF_SURFACE_INUSE_C_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_SURFACE_INUSE_HIGH_C 0x08b8
+#define mmHUBPREQ3_DCSURF_SURFACE_INUSE_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE 0x08b9
+#define mmHUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH 0x08ba
+#define mmHUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_C 0x08bb
+#define mmHUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_C_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C 0x08bc
+#define mmHUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ3_DCN_EXPANSION_MODE 0x08c0
+#define mmHUBPREQ3_DCN_EXPANSION_MODE_BASE_IDX 2
+#define mmHUBPREQ3_DCN_TTU_QOS_WM 0x08c1
+#define mmHUBPREQ3_DCN_TTU_QOS_WM_BASE_IDX 2
+#define mmHUBPREQ3_DCN_GLOBAL_TTU_CNTL 0x08c2
+#define mmHUBPREQ3_DCN_GLOBAL_TTU_CNTL_BASE_IDX 2
+#define mmHUBPREQ3_DCN_SURF0_TTU_CNTL0 0x08c3
+#define mmHUBPREQ3_DCN_SURF0_TTU_CNTL0_BASE_IDX 2
+#define mmHUBPREQ3_DCN_SURF0_TTU_CNTL1 0x08c4
+#define mmHUBPREQ3_DCN_SURF0_TTU_CNTL1_BASE_IDX 2
+#define mmHUBPREQ3_DCN_SURF1_TTU_CNTL0 0x08c5
+#define mmHUBPREQ3_DCN_SURF1_TTU_CNTL0_BASE_IDX 2
+#define mmHUBPREQ3_DCN_SURF1_TTU_CNTL1 0x08c6
+#define mmHUBPREQ3_DCN_SURF1_TTU_CNTL1_BASE_IDX 2
+#define mmHUBPREQ3_DCN_CUR0_TTU_CNTL0 0x08c7
+#define mmHUBPREQ3_DCN_CUR0_TTU_CNTL0_BASE_IDX 2
+#define mmHUBPREQ3_DCN_CUR0_TTU_CNTL1 0x08c8
+#define mmHUBPREQ3_DCN_CUR0_TTU_CNTL1_BASE_IDX 2
+#define mmHUBPREQ3_DCN_CUR1_TTU_CNTL0 0x08c9
+#define mmHUBPREQ3_DCN_CUR1_TTU_CNTL0_BASE_IDX 2
+#define mmHUBPREQ3_DCN_CUR1_TTU_CNTL1 0x08ca
+#define mmHUBPREQ3_DCN_CUR1_TTU_CNTL1_BASE_IDX 2
+#define mmHUBPREQ3_DCN_VM_SYSTEM_APERTURE_LOW_ADDR 0x08cb
+#define mmHUBPREQ3_DCN_VM_SYSTEM_APERTURE_LOW_ADDR_BASE_IDX 2
+#define mmHUBPREQ3_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR 0x08cc
+#define mmHUBPREQ3_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_BASE_IDX 2
+#define mmHUBPREQ3_DCN_VM_MX_L1_TLB_CNTL 0x08d9
+#define mmHUBPREQ3_DCN_VM_MX_L1_TLB_CNTL_BASE_IDX 2
+#define mmHUBPREQ3_BLANK_OFFSET_0 0x08da
+#define mmHUBPREQ3_BLANK_OFFSET_0_BASE_IDX 2
+#define mmHUBPREQ3_BLANK_OFFSET_1 0x08db
+#define mmHUBPREQ3_BLANK_OFFSET_1_BASE_IDX 2
+#define mmHUBPREQ3_DST_DIMENSIONS 0x08dc
+#define mmHUBPREQ3_DST_DIMENSIONS_BASE_IDX 2
+#define mmHUBPREQ3_DST_AFTER_SCALER 0x08dd
+#define mmHUBPREQ3_DST_AFTER_SCALER_BASE_IDX 2
+#define mmHUBPREQ3_PREFETCH_SETTINGS 0x08de
+#define mmHUBPREQ3_PREFETCH_SETTINGS_BASE_IDX 2
+#define mmHUBPREQ3_PREFETCH_SETTINGS_C 0x08df
+#define mmHUBPREQ3_PREFETCH_SETTINGS_C_BASE_IDX 2
+#define mmHUBPREQ3_VBLANK_PARAMETERS_0 0x08e0
+#define mmHUBPREQ3_VBLANK_PARAMETERS_0_BASE_IDX 2
+#define mmHUBPREQ3_VBLANK_PARAMETERS_1 0x08e1
+#define mmHUBPREQ3_VBLANK_PARAMETERS_1_BASE_IDX 2
+#define mmHUBPREQ3_VBLANK_PARAMETERS_2 0x08e2
+#define mmHUBPREQ3_VBLANK_PARAMETERS_2_BASE_IDX 2
+#define mmHUBPREQ3_VBLANK_PARAMETERS_3 0x08e3
+#define mmHUBPREQ3_VBLANK_PARAMETERS_3_BASE_IDX 2
+#define mmHUBPREQ3_VBLANK_PARAMETERS_4 0x08e4
+#define mmHUBPREQ3_VBLANK_PARAMETERS_4_BASE_IDX 2
+#define mmHUBPREQ3_FLIP_PARAMETERS_0 0x08e5
+#define mmHUBPREQ3_FLIP_PARAMETERS_0_BASE_IDX 2
+#define mmHUBPREQ3_FLIP_PARAMETERS_1 0x08e6
+#define mmHUBPREQ3_FLIP_PARAMETERS_1_BASE_IDX 2
+#define mmHUBPREQ3_FLIP_PARAMETERS_2 0x08e7
+#define mmHUBPREQ3_FLIP_PARAMETERS_2_BASE_IDX 2
+#define mmHUBPREQ3_NOM_PARAMETERS_0 0x08e8
+#define mmHUBPREQ3_NOM_PARAMETERS_0_BASE_IDX 2
+#define mmHUBPREQ3_NOM_PARAMETERS_1 0x08e9
+#define mmHUBPREQ3_NOM_PARAMETERS_1_BASE_IDX 2
+#define mmHUBPREQ3_NOM_PARAMETERS_2 0x08ea
+#define mmHUBPREQ3_NOM_PARAMETERS_2_BASE_IDX 2
+#define mmHUBPREQ3_NOM_PARAMETERS_3 0x08eb
+#define mmHUBPREQ3_NOM_PARAMETERS_3_BASE_IDX 2
+#define mmHUBPREQ3_NOM_PARAMETERS_4 0x08ec
+#define mmHUBPREQ3_NOM_PARAMETERS_4_BASE_IDX 2
+#define mmHUBPREQ3_NOM_PARAMETERS_5 0x08ed
+#define mmHUBPREQ3_NOM_PARAMETERS_5_BASE_IDX 2
+#define mmHUBPREQ3_NOM_PARAMETERS_6 0x08ee
+#define mmHUBPREQ3_NOM_PARAMETERS_6_BASE_IDX 2
+#define mmHUBPREQ3_NOM_PARAMETERS_7 0x08ef
+#define mmHUBPREQ3_NOM_PARAMETERS_7_BASE_IDX 2
+#define mmHUBPREQ3_PER_LINE_DELIVERY_PRE 0x08f0
+#define mmHUBPREQ3_PER_LINE_DELIVERY_PRE_BASE_IDX 2
+#define mmHUBPREQ3_PER_LINE_DELIVERY 0x08f1
+#define mmHUBPREQ3_PER_LINE_DELIVERY_BASE_IDX 2
+#define mmHUBPREQ3_CURSOR_SETTINGS 0x08f2
+#define mmHUBPREQ3_CURSOR_SETTINGS_BASE_IDX 2
+#define mmHUBPREQ3_REF_FREQ_TO_PIX_FREQ 0x08f3
+#define mmHUBPREQ3_REF_FREQ_TO_PIX_FREQ_BASE_IDX 2
+#define mmHUBPREQ3_DST_Y_DELTA_DRQ_LIMIT 0x08f4
+#define mmHUBPREQ3_DST_Y_DELTA_DRQ_LIMIT_BASE_IDX 2
+#define mmHUBPREQ3_HUBPREQ_MEM_PWR_CTRL 0x08f5
+#define mmHUBPREQ3_HUBPREQ_MEM_PWR_CTRL_BASE_IDX 2
+#define mmHUBPREQ3_HUBPREQ_MEM_PWR_STATUS 0x08f6
+#define mmHUBPREQ3_HUBPREQ_MEM_PWR_STATUS_BASE_IDX 2
+#define mmHUBPREQ3_VBLANK_PARAMETERS_5 0x08f9
+#define mmHUBPREQ3_VBLANK_PARAMETERS_5_BASE_IDX 2
+#define mmHUBPREQ3_VBLANK_PARAMETERS_6 0x08fa
+#define mmHUBPREQ3_VBLANK_PARAMETERS_6_BASE_IDX 2
+#define mmHUBPREQ3_FLIP_PARAMETERS_3 0x08fb
+#define mmHUBPREQ3_FLIP_PARAMETERS_3_BASE_IDX 2
+#define mmHUBPREQ3_FLIP_PARAMETERS_4 0x08fc
+#define mmHUBPREQ3_FLIP_PARAMETERS_4_BASE_IDX 2
+#define mmHUBPREQ3_FLIP_PARAMETERS_5 0x08fd
+#define mmHUBPREQ3_FLIP_PARAMETERS_5_BASE_IDX 2
+#define mmHUBPREQ3_FLIP_PARAMETERS_6 0x08fe
+#define mmHUBPREQ3_FLIP_PARAMETERS_6_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp3_dispdec_hubpret_dispdec
+// base address: 0xa50
+#define mmHUBPRET3_HUBPRET_CONTROL 0x0900
+#define mmHUBPRET3_HUBPRET_CONTROL_BASE_IDX 2
+#define mmHUBPRET3_HUBPRET_MEM_PWR_CTRL 0x0901
+#define mmHUBPRET3_HUBPRET_MEM_PWR_CTRL_BASE_IDX 2
+#define mmHUBPRET3_HUBPRET_MEM_PWR_STATUS 0x0902
+#define mmHUBPRET3_HUBPRET_MEM_PWR_STATUS_BASE_IDX 2
+#define mmHUBPRET3_HUBPRET_READ_LINE_CTRL0 0x0903
+#define mmHUBPRET3_HUBPRET_READ_LINE_CTRL0_BASE_IDX 2
+#define mmHUBPRET3_HUBPRET_READ_LINE_CTRL1 0x0904
+#define mmHUBPRET3_HUBPRET_READ_LINE_CTRL1_BASE_IDX 2
+#define mmHUBPRET3_HUBPRET_READ_LINE0 0x0905
+#define mmHUBPRET3_HUBPRET_READ_LINE0_BASE_IDX 2
+#define mmHUBPRET3_HUBPRET_READ_LINE1 0x0906
+#define mmHUBPRET3_HUBPRET_READ_LINE1_BASE_IDX 2
+#define mmHUBPRET3_HUBPRET_INTERRUPT 0x0907
+#define mmHUBPRET3_HUBPRET_INTERRUPT_BASE_IDX 2
+#define mmHUBPRET3_HUBPRET_READ_LINE_VALUE 0x0908
+#define mmHUBPRET3_HUBPRET_READ_LINE_VALUE_BASE_IDX 2
+#define mmHUBPRET3_HUBPRET_READ_LINE_STATUS 0x0909
+#define mmHUBPRET3_HUBPRET_READ_LINE_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp3_dispdec_cursor0_dispdec
+// base address: 0xa50
+#define mmCURSOR0_3_CURSOR_CONTROL 0x090c
+#define mmCURSOR0_3_CURSOR_CONTROL_BASE_IDX 2
+#define mmCURSOR0_3_CURSOR_SURFACE_ADDRESS 0x090d
+#define mmCURSOR0_3_CURSOR_SURFACE_ADDRESS_BASE_IDX 2
+#define mmCURSOR0_3_CURSOR_SURFACE_ADDRESS_HIGH 0x090e
+#define mmCURSOR0_3_CURSOR_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define mmCURSOR0_3_CURSOR_SIZE 0x090f
+#define mmCURSOR0_3_CURSOR_SIZE_BASE_IDX 2
+#define mmCURSOR0_3_CURSOR_POSITION 0x0910
+#define mmCURSOR0_3_CURSOR_POSITION_BASE_IDX 2
+#define mmCURSOR0_3_CURSOR_HOT_SPOT 0x0911
+#define mmCURSOR0_3_CURSOR_HOT_SPOT_BASE_IDX 2
+#define mmCURSOR0_3_CURSOR_STEREO_CONTROL 0x0912
+#define mmCURSOR0_3_CURSOR_STEREO_CONTROL_BASE_IDX 2
+#define mmCURSOR0_3_CURSOR_DST_OFFSET 0x0913
+#define mmCURSOR0_3_CURSOR_DST_OFFSET_BASE_IDX 2
+#define mmCURSOR0_3_CURSOR_MEM_PWR_CTRL 0x0914
+#define mmCURSOR0_3_CURSOR_MEM_PWR_CTRL_BASE_IDX 2
+#define mmCURSOR0_3_CURSOR_MEM_PWR_STATUS 0x0915
+#define mmCURSOR0_3_CURSOR_MEM_PWR_STATUS_BASE_IDX 2
+#define mmCURSOR0_3_DMDATA_ADDRESS_HIGH 0x0916
+#define mmCURSOR0_3_DMDATA_ADDRESS_HIGH_BASE_IDX 2
+#define mmCURSOR0_3_DMDATA_ADDRESS_LOW 0x0917
+#define mmCURSOR0_3_DMDATA_ADDRESS_LOW_BASE_IDX 2
+#define mmCURSOR0_3_DMDATA_CNTL 0x0918
+#define mmCURSOR0_3_DMDATA_CNTL_BASE_IDX 2
+#define mmCURSOR0_3_DMDATA_QOS_CNTL 0x0919
+#define mmCURSOR0_3_DMDATA_QOS_CNTL_BASE_IDX 2
+#define mmCURSOR0_3_DMDATA_STATUS 0x091a
+#define mmCURSOR0_3_DMDATA_STATUS_BASE_IDX 2
+#define mmCURSOR0_3_DMDATA_SW_CNTL 0x091b
+#define mmCURSOR0_3_DMDATA_SW_CNTL_BASE_IDX 2
+#define mmCURSOR0_3_DMDATA_SW_DATA 0x091c
+#define mmCURSOR0_3_DMDATA_SW_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp3_dispdec_hubp_dcperfmon_dc_perfmon_dispdec
+// base address: 0x24c4
+#define mmDC_PERFMON10_PERFCOUNTER_CNTL 0x0931
+#define mmDC_PERFMON10_PERFCOUNTER_CNTL_BASE_IDX 2
+#define mmDC_PERFMON10_PERFCOUNTER_CNTL2 0x0932
+#define mmDC_PERFMON10_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define mmDC_PERFMON10_PERFCOUNTER_STATE 0x0933
+#define mmDC_PERFMON10_PERFCOUNTER_STATE_BASE_IDX 2
+#define mmDC_PERFMON10_PERFMON_CNTL 0x0934
+#define mmDC_PERFMON10_PERFMON_CNTL_BASE_IDX 2
+#define mmDC_PERFMON10_PERFMON_CNTL2 0x0935
+#define mmDC_PERFMON10_PERFMON_CNTL2_BASE_IDX 2
+#define mmDC_PERFMON10_PERFMON_CVALUE_INT_MISC 0x0936
+#define mmDC_PERFMON10_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define mmDC_PERFMON10_PERFMON_CVALUE_LOW 0x0937
+#define mmDC_PERFMON10_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define mmDC_PERFMON10_PERFMON_HI 0x0938
+#define mmDC_PERFMON10_PERFMON_HI_BASE_IDX 2
+#define mmDC_PERFMON10_PERFMON_LOW 0x0939
+#define mmDC_PERFMON10_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp0_dispdec_dpp_top_dispdec
+// base address: 0x0
+#define mmDPP_TOP0_DPP_CONTROL 0x0cc5
+#define mmDPP_TOP0_DPP_CONTROL_BASE_IDX 2
+#define mmDPP_TOP0_DPP_SOFT_RESET 0x0cc6
+#define mmDPP_TOP0_DPP_SOFT_RESET_BASE_IDX 2
+#define mmDPP_TOP0_DPP_CRC_VAL_R_G 0x0cc7
+#define mmDPP_TOP0_DPP_CRC_VAL_R_G_BASE_IDX 2
+#define mmDPP_TOP0_DPP_CRC_VAL_B_A 0x0cc8
+#define mmDPP_TOP0_DPP_CRC_VAL_B_A_BASE_IDX 2
+#define mmDPP_TOP0_DPP_CRC_CTRL 0x0cc9
+#define mmDPP_TOP0_DPP_CRC_CTRL_BASE_IDX 2
+#define mmDPP_TOP0_HOST_READ_CONTROL 0x0cca
+#define mmDPP_TOP0_HOST_READ_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp0_dispdec_cnvc_cfg_dispdec
+// base address: 0x0
+#define mmCNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT 0x0ccf
+#define mmCNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT_BASE_IDX 2
+#define mmCNVC_CFG0_FORMAT_CONTROL 0x0cd0
+#define mmCNVC_CFG0_FORMAT_CONTROL_BASE_IDX 2
+#define mmCNVC_CFG0_FCNV_FP_BIAS_R 0x0cd1
+#define mmCNVC_CFG0_FCNV_FP_BIAS_R_BASE_IDX 2
+#define mmCNVC_CFG0_FCNV_FP_BIAS_G 0x0cd2
+#define mmCNVC_CFG0_FCNV_FP_BIAS_G_BASE_IDX 2
+#define mmCNVC_CFG0_FCNV_FP_BIAS_B 0x0cd3
+#define mmCNVC_CFG0_FCNV_FP_BIAS_B_BASE_IDX 2
+#define mmCNVC_CFG0_FCNV_FP_SCALE_R 0x0cd4
+#define mmCNVC_CFG0_FCNV_FP_SCALE_R_BASE_IDX 2
+#define mmCNVC_CFG0_FCNV_FP_SCALE_G 0x0cd5
+#define mmCNVC_CFG0_FCNV_FP_SCALE_G_BASE_IDX 2
+#define mmCNVC_CFG0_FCNV_FP_SCALE_B 0x0cd6
+#define mmCNVC_CFG0_FCNV_FP_SCALE_B_BASE_IDX 2
+#define mmCNVC_CFG0_COLOR_KEYER_CONTROL 0x0cd7
+#define mmCNVC_CFG0_COLOR_KEYER_CONTROL_BASE_IDX 2
+#define mmCNVC_CFG0_COLOR_KEYER_ALPHA 0x0cd8
+#define mmCNVC_CFG0_COLOR_KEYER_ALPHA_BASE_IDX 2
+#define mmCNVC_CFG0_COLOR_KEYER_RED 0x0cd9
+#define mmCNVC_CFG0_COLOR_KEYER_RED_BASE_IDX 2
+#define mmCNVC_CFG0_COLOR_KEYER_GREEN 0x0cda
+#define mmCNVC_CFG0_COLOR_KEYER_GREEN_BASE_IDX 2
+#define mmCNVC_CFG0_COLOR_KEYER_BLUE 0x0cdb
+#define mmCNVC_CFG0_COLOR_KEYER_BLUE_BASE_IDX 2
+#define mmCNVC_CFG0_ALPHA_2BIT_LUT 0x0cdd
+#define mmCNVC_CFG0_ALPHA_2BIT_LUT_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp0_dispdec_cnvc_cur_dispdec
+// base address: 0x0
+#define mmCNVC_CUR0_CURSOR0_CONTROL 0x0ce0
+#define mmCNVC_CUR0_CURSOR0_CONTROL_BASE_IDX 2
+#define mmCNVC_CUR0_CURSOR0_COLOR0 0x0ce1
+#define mmCNVC_CUR0_CURSOR0_COLOR0_BASE_IDX 2
+#define mmCNVC_CUR0_CURSOR0_COLOR1 0x0ce2
+#define mmCNVC_CUR0_CURSOR0_COLOR1_BASE_IDX 2
+#define mmCNVC_CUR0_CURSOR0_FP_SCALE_BIAS 0x0ce3
+#define mmCNVC_CUR0_CURSOR0_FP_SCALE_BIAS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp0_dispdec_dscl_dispdec
+// base address: 0x0
+#define mmDSCL0_SCL_COEF_RAM_TAP_SELECT 0x0cea
+#define mmDSCL0_SCL_COEF_RAM_TAP_SELECT_BASE_IDX 2
+#define mmDSCL0_SCL_COEF_RAM_TAP_DATA 0x0ceb
+#define mmDSCL0_SCL_COEF_RAM_TAP_DATA_BASE_IDX 2
+#define mmDSCL0_SCL_MODE 0x0cec
+#define mmDSCL0_SCL_MODE_BASE_IDX 2
+#define mmDSCL0_SCL_TAP_CONTROL 0x0ced
+#define mmDSCL0_SCL_TAP_CONTROL_BASE_IDX 2
+#define mmDSCL0_DSCL_CONTROL 0x0cee
+#define mmDSCL0_DSCL_CONTROL_BASE_IDX 2
+#define mmDSCL0_DSCL_2TAP_CONTROL 0x0cef
+#define mmDSCL0_DSCL_2TAP_CONTROL_BASE_IDX 2
+#define mmDSCL0_SCL_MANUAL_REPLICATE_CONTROL 0x0cf0
+#define mmDSCL0_SCL_MANUAL_REPLICATE_CONTROL_BASE_IDX 2
+#define mmDSCL0_SCL_HORZ_FILTER_SCALE_RATIO 0x0cf1
+#define mmDSCL0_SCL_HORZ_FILTER_SCALE_RATIO_BASE_IDX 2
+#define mmDSCL0_SCL_HORZ_FILTER_INIT 0x0cf2
+#define mmDSCL0_SCL_HORZ_FILTER_INIT_BASE_IDX 2
+#define mmDSCL0_SCL_HORZ_FILTER_SCALE_RATIO_C 0x0cf3
+#define mmDSCL0_SCL_HORZ_FILTER_SCALE_RATIO_C_BASE_IDX 2
+#define mmDSCL0_SCL_HORZ_FILTER_INIT_C 0x0cf4
+#define mmDSCL0_SCL_HORZ_FILTER_INIT_C_BASE_IDX 2
+#define mmDSCL0_SCL_VERT_FILTER_SCALE_RATIO 0x0cf5
+#define mmDSCL0_SCL_VERT_FILTER_SCALE_RATIO_BASE_IDX 2
+#define mmDSCL0_SCL_VERT_FILTER_INIT 0x0cf6
+#define mmDSCL0_SCL_VERT_FILTER_INIT_BASE_IDX 2
+#define mmDSCL0_SCL_VERT_FILTER_INIT_BOT 0x0cf7
+#define mmDSCL0_SCL_VERT_FILTER_INIT_BOT_BASE_IDX 2
+#define mmDSCL0_SCL_VERT_FILTER_SCALE_RATIO_C 0x0cf8
+#define mmDSCL0_SCL_VERT_FILTER_SCALE_RATIO_C_BASE_IDX 2
+#define mmDSCL0_SCL_VERT_FILTER_INIT_C 0x0cf9
+#define mmDSCL0_SCL_VERT_FILTER_INIT_C_BASE_IDX 2
+#define mmDSCL0_SCL_VERT_FILTER_INIT_BOT_C 0x0cfa
+#define mmDSCL0_SCL_VERT_FILTER_INIT_BOT_C_BASE_IDX 2
+#define mmDSCL0_SCL_BLACK_OFFSET 0x0cfb
+#define mmDSCL0_SCL_BLACK_OFFSET_BASE_IDX 2
+#define mmDSCL0_DSCL_UPDATE 0x0cfc
+#define mmDSCL0_DSCL_UPDATE_BASE_IDX 2
+#define mmDSCL0_DSCL_AUTOCAL 0x0cfd
+#define mmDSCL0_DSCL_AUTOCAL_BASE_IDX 2
+#define mmDSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT 0x0cfe
+#define mmDSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT_BASE_IDX 2
+#define mmDSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM 0x0cff
+#define mmDSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM_BASE_IDX 2
+#define mmDSCL0_OTG_H_BLANK 0x0d00
+#define mmDSCL0_OTG_H_BLANK_BASE_IDX 2
+#define mmDSCL0_OTG_V_BLANK 0x0d01
+#define mmDSCL0_OTG_V_BLANK_BASE_IDX 2
+#define mmDSCL0_RECOUT_START 0x0d02
+#define mmDSCL0_RECOUT_START_BASE_IDX 2
+#define mmDSCL0_RECOUT_SIZE 0x0d03
+#define mmDSCL0_RECOUT_SIZE_BASE_IDX 2
+#define mmDSCL0_MPC_SIZE 0x0d04
+#define mmDSCL0_MPC_SIZE_BASE_IDX 2
+#define mmDSCL0_LB_DATA_FORMAT 0x0d05
+#define mmDSCL0_LB_DATA_FORMAT_BASE_IDX 2
+#define mmDSCL0_LB_MEMORY_CTRL 0x0d06
+#define mmDSCL0_LB_MEMORY_CTRL_BASE_IDX 2
+#define mmDSCL0_LB_V_COUNTER 0x0d07
+#define mmDSCL0_LB_V_COUNTER_BASE_IDX 2
+#define mmDSCL0_DSCL_MEM_PWR_CTRL 0x0d08
+#define mmDSCL0_DSCL_MEM_PWR_CTRL_BASE_IDX 2
+#define mmDSCL0_DSCL_MEM_PWR_STATUS 0x0d09
+#define mmDSCL0_DSCL_MEM_PWR_STATUS_BASE_IDX 2
+#define mmDSCL0_OBUF_CONTROL 0x0d0a
+#define mmDSCL0_OBUF_CONTROL_BASE_IDX 2
+#define mmDSCL0_OBUF_MEM_PWR_CTRL 0x0d0b
+#define mmDSCL0_OBUF_MEM_PWR_CTRL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp0_dispdec_cm_dispdec
+// base address: 0x0
+#define mmCM0_CM_CONTROL 0x0d1a
+#define mmCM0_CM_CONTROL_BASE_IDX 2
+#define mmCM0_CM_ICSC_CONTROL 0x0d1b
+#define mmCM0_CM_ICSC_CONTROL_BASE_IDX 2
+#define mmCM0_CM_ICSC_C11_C12 0x0d1c
+#define mmCM0_CM_ICSC_C11_C12_BASE_IDX 2
+#define mmCM0_CM_ICSC_C13_C14 0x0d1d
+#define mmCM0_CM_ICSC_C13_C14_BASE_IDX 2
+#define mmCM0_CM_ICSC_C21_C22 0x0d1e
+#define mmCM0_CM_ICSC_C21_C22_BASE_IDX 2
+#define mmCM0_CM_ICSC_C23_C24 0x0d1f
+#define mmCM0_CM_ICSC_C23_C24_BASE_IDX 2
+#define mmCM0_CM_ICSC_C31_C32 0x0d20
+#define mmCM0_CM_ICSC_C31_C32_BASE_IDX 2
+#define mmCM0_CM_ICSC_C33_C34 0x0d21
+#define mmCM0_CM_ICSC_C33_C34_BASE_IDX 2
+#define mmCM0_CM_ICSC_B_C11_C12 0x0d22
+#define mmCM0_CM_ICSC_B_C11_C12_BASE_IDX 2
+#define mmCM0_CM_ICSC_B_C13_C14 0x0d23
+#define mmCM0_CM_ICSC_B_C13_C14_BASE_IDX 2
+#define mmCM0_CM_ICSC_B_C21_C22 0x0d24
+#define mmCM0_CM_ICSC_B_C21_C22_BASE_IDX 2
+#define mmCM0_CM_ICSC_B_C23_C24 0x0d25
+#define mmCM0_CM_ICSC_B_C23_C24_BASE_IDX 2
+#define mmCM0_CM_ICSC_B_C31_C32 0x0d26
+#define mmCM0_CM_ICSC_B_C31_C32_BASE_IDX 2
+#define mmCM0_CM_ICSC_B_C33_C34 0x0d27
+#define mmCM0_CM_ICSC_B_C33_C34_BASE_IDX 2
+#define mmCM0_CM_GAMUT_REMAP_CONTROL 0x0d28
+#define mmCM0_CM_GAMUT_REMAP_CONTROL_BASE_IDX 2
+#define mmCM0_CM_GAMUT_REMAP_C11_C12 0x0d29
+#define mmCM0_CM_GAMUT_REMAP_C11_C12_BASE_IDX 2
+#define mmCM0_CM_GAMUT_REMAP_C13_C14 0x0d2a
+#define mmCM0_CM_GAMUT_REMAP_C13_C14_BASE_IDX 2
+#define mmCM0_CM_GAMUT_REMAP_C21_C22 0x0d2b
+#define mmCM0_CM_GAMUT_REMAP_C21_C22_BASE_IDX 2
+#define mmCM0_CM_GAMUT_REMAP_C23_C24 0x0d2c
+#define mmCM0_CM_GAMUT_REMAP_C23_C24_BASE_IDX 2
+#define mmCM0_CM_GAMUT_REMAP_C31_C32 0x0d2d
+#define mmCM0_CM_GAMUT_REMAP_C31_C32_BASE_IDX 2
+#define mmCM0_CM_GAMUT_REMAP_C33_C34 0x0d2e
+#define mmCM0_CM_GAMUT_REMAP_C33_C34_BASE_IDX 2
+#define mmCM0_CM_GAMUT_REMAP_B_C11_C12 0x0d2f
+#define mmCM0_CM_GAMUT_REMAP_B_C11_C12_BASE_IDX 2
+#define mmCM0_CM_GAMUT_REMAP_B_C13_C14 0x0d30
+#define mmCM0_CM_GAMUT_REMAP_B_C13_C14_BASE_IDX 2
+#define mmCM0_CM_GAMUT_REMAP_B_C21_C22 0x0d31
+#define mmCM0_CM_GAMUT_REMAP_B_C21_C22_BASE_IDX 2
+#define mmCM0_CM_GAMUT_REMAP_B_C23_C24 0x0d32
+#define mmCM0_CM_GAMUT_REMAP_B_C23_C24_BASE_IDX 2
+#define mmCM0_CM_GAMUT_REMAP_B_C31_C32 0x0d33
+#define mmCM0_CM_GAMUT_REMAP_B_C31_C32_BASE_IDX 2
+#define mmCM0_CM_GAMUT_REMAP_B_C33_C34 0x0d34
+#define mmCM0_CM_GAMUT_REMAP_B_C33_C34_BASE_IDX 2
+#define mmCM0_CM_BIAS_CR_R 0x0d35
+#define mmCM0_CM_BIAS_CR_R_BASE_IDX 2
+#define mmCM0_CM_BIAS_Y_G_CB_B 0x0d36
+#define mmCM0_CM_BIAS_Y_G_CB_B_BASE_IDX 2
+#define mmCM0_CM_DGAM_CONTROL 0x0d37
+#define mmCM0_CM_DGAM_CONTROL_BASE_IDX 2
+#define mmCM0_CM_DGAM_LUT_INDEX 0x0d38
+#define mmCM0_CM_DGAM_LUT_INDEX_BASE_IDX 2
+#define mmCM0_CM_DGAM_LUT_DATA 0x0d39
+#define mmCM0_CM_DGAM_LUT_DATA_BASE_IDX 2
+#define mmCM0_CM_DGAM_LUT_WRITE_EN_MASK 0x0d3a
+#define mmCM0_CM_DGAM_LUT_WRITE_EN_MASK_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMA_START_CNTL_B 0x0d3b
+#define mmCM0_CM_DGAM_RAMA_START_CNTL_B_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMA_START_CNTL_G 0x0d3c
+#define mmCM0_CM_DGAM_RAMA_START_CNTL_G_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMA_START_CNTL_R 0x0d3d
+#define mmCM0_CM_DGAM_RAMA_START_CNTL_R_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMA_SLOPE_CNTL_B 0x0d3e
+#define mmCM0_CM_DGAM_RAMA_SLOPE_CNTL_B_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMA_SLOPE_CNTL_G 0x0d3f
+#define mmCM0_CM_DGAM_RAMA_SLOPE_CNTL_G_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMA_SLOPE_CNTL_R 0x0d40
+#define mmCM0_CM_DGAM_RAMA_SLOPE_CNTL_R_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMA_END_CNTL1_B 0x0d41
+#define mmCM0_CM_DGAM_RAMA_END_CNTL1_B_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMA_END_CNTL2_B 0x0d42
+#define mmCM0_CM_DGAM_RAMA_END_CNTL2_B_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMA_END_CNTL1_G 0x0d43
+#define mmCM0_CM_DGAM_RAMA_END_CNTL1_G_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMA_END_CNTL2_G 0x0d44
+#define mmCM0_CM_DGAM_RAMA_END_CNTL2_G_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMA_END_CNTL1_R 0x0d45
+#define mmCM0_CM_DGAM_RAMA_END_CNTL1_R_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMA_END_CNTL2_R 0x0d46
+#define mmCM0_CM_DGAM_RAMA_END_CNTL2_R_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMA_REGION_0_1 0x0d47
+#define mmCM0_CM_DGAM_RAMA_REGION_0_1_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMA_REGION_2_3 0x0d48
+#define mmCM0_CM_DGAM_RAMA_REGION_2_3_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMA_REGION_4_5 0x0d49
+#define mmCM0_CM_DGAM_RAMA_REGION_4_5_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMA_REGION_6_7 0x0d4a
+#define mmCM0_CM_DGAM_RAMA_REGION_6_7_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMA_REGION_8_9 0x0d4b
+#define mmCM0_CM_DGAM_RAMA_REGION_8_9_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMA_REGION_10_11 0x0d4c
+#define mmCM0_CM_DGAM_RAMA_REGION_10_11_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMA_REGION_12_13 0x0d4d
+#define mmCM0_CM_DGAM_RAMA_REGION_12_13_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMA_REGION_14_15 0x0d4e
+#define mmCM0_CM_DGAM_RAMA_REGION_14_15_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMB_START_CNTL_B 0x0d4f
+#define mmCM0_CM_DGAM_RAMB_START_CNTL_B_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMB_START_CNTL_G 0x0d50
+#define mmCM0_CM_DGAM_RAMB_START_CNTL_G_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMB_START_CNTL_R 0x0d51
+#define mmCM0_CM_DGAM_RAMB_START_CNTL_R_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMB_SLOPE_CNTL_B 0x0d52
+#define mmCM0_CM_DGAM_RAMB_SLOPE_CNTL_B_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMB_SLOPE_CNTL_G 0x0d53
+#define mmCM0_CM_DGAM_RAMB_SLOPE_CNTL_G_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMB_SLOPE_CNTL_R 0x0d54
+#define mmCM0_CM_DGAM_RAMB_SLOPE_CNTL_R_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMB_END_CNTL1_B 0x0d55
+#define mmCM0_CM_DGAM_RAMB_END_CNTL1_B_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMB_END_CNTL2_B 0x0d56
+#define mmCM0_CM_DGAM_RAMB_END_CNTL2_B_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMB_END_CNTL1_G 0x0d57
+#define mmCM0_CM_DGAM_RAMB_END_CNTL1_G_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMB_END_CNTL2_G 0x0d58
+#define mmCM0_CM_DGAM_RAMB_END_CNTL2_G_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMB_END_CNTL1_R 0x0d59
+#define mmCM0_CM_DGAM_RAMB_END_CNTL1_R_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMB_END_CNTL2_R 0x0d5a
+#define mmCM0_CM_DGAM_RAMB_END_CNTL2_R_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMB_REGION_0_1 0x0d5b
+#define mmCM0_CM_DGAM_RAMB_REGION_0_1_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMB_REGION_2_3 0x0d5c
+#define mmCM0_CM_DGAM_RAMB_REGION_2_3_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMB_REGION_4_5 0x0d5d
+#define mmCM0_CM_DGAM_RAMB_REGION_4_5_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMB_REGION_6_7 0x0d5e
+#define mmCM0_CM_DGAM_RAMB_REGION_6_7_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMB_REGION_8_9 0x0d5f
+#define mmCM0_CM_DGAM_RAMB_REGION_8_9_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMB_REGION_10_11 0x0d60
+#define mmCM0_CM_DGAM_RAMB_REGION_10_11_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMB_REGION_12_13 0x0d61
+#define mmCM0_CM_DGAM_RAMB_REGION_12_13_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMB_REGION_14_15 0x0d62
+#define mmCM0_CM_DGAM_RAMB_REGION_14_15_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_CONTROL 0x0d63
+#define mmCM0_CM_BLNDGAM_CONTROL_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_LUT_INDEX 0x0d64
+#define mmCM0_CM_BLNDGAM_LUT_INDEX_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_LUT_DATA 0x0d65
+#define mmCM0_CM_BLNDGAM_LUT_DATA_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_LUT_WRITE_EN_MASK 0x0d66
+#define mmCM0_CM_BLNDGAM_LUT_WRITE_EN_MASK_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_START_CNTL_B 0x0d67
+#define mmCM0_CM_BLNDGAM_RAMA_START_CNTL_B_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_START_CNTL_G 0x0d68
+#define mmCM0_CM_BLNDGAM_RAMA_START_CNTL_G_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_START_CNTL_R 0x0d69
+#define mmCM0_CM_BLNDGAM_RAMA_START_CNTL_R_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_B 0x0d6a
+#define mmCM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_B_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_G 0x0d6b
+#define mmCM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_G_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_R 0x0d6c
+#define mmCM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_R_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_END_CNTL1_B 0x0d6d
+#define mmCM0_CM_BLNDGAM_RAMA_END_CNTL1_B_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_END_CNTL2_B 0x0d6e
+#define mmCM0_CM_BLNDGAM_RAMA_END_CNTL2_B_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_END_CNTL1_G 0x0d6f
+#define mmCM0_CM_BLNDGAM_RAMA_END_CNTL1_G_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_END_CNTL2_G 0x0d70
+#define mmCM0_CM_BLNDGAM_RAMA_END_CNTL2_G_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_END_CNTL1_R 0x0d71
+#define mmCM0_CM_BLNDGAM_RAMA_END_CNTL1_R_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_END_CNTL2_R 0x0d72
+#define mmCM0_CM_BLNDGAM_RAMA_END_CNTL2_R_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_0_1 0x0d73
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_0_1_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_2_3 0x0d74
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_2_3_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_4_5 0x0d75
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_4_5_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_6_7 0x0d76
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_6_7_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_8_9 0x0d77
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_8_9_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_10_11 0x0d78
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_10_11_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_12_13 0x0d79
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_12_13_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_14_15 0x0d7a
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_14_15_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_16_17 0x0d7b
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_16_17_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_18_19 0x0d7c
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_18_19_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_20_21 0x0d7d
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_20_21_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_22_23 0x0d7e
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_22_23_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_24_25 0x0d7f
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_24_25_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_26_27 0x0d80
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_26_27_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_28_29 0x0d81
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_28_29_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_30_31 0x0d82
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_30_31_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_32_33 0x0d83
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_32_33_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_START_CNTL_B 0x0d84
+#define mmCM0_CM_BLNDGAM_RAMB_START_CNTL_B_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_START_CNTL_G 0x0d85
+#define mmCM0_CM_BLNDGAM_RAMB_START_CNTL_G_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_START_CNTL_R 0x0d86
+#define mmCM0_CM_BLNDGAM_RAMB_START_CNTL_R_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_B 0x0d87
+#define mmCM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_B_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_G 0x0d88
+#define mmCM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_G_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_R 0x0d89
+#define mmCM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_R_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_END_CNTL1_B 0x0d8a
+#define mmCM0_CM_BLNDGAM_RAMB_END_CNTL1_B_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_END_CNTL2_B 0x0d8b
+#define mmCM0_CM_BLNDGAM_RAMB_END_CNTL2_B_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_END_CNTL1_G 0x0d8c
+#define mmCM0_CM_BLNDGAM_RAMB_END_CNTL1_G_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_END_CNTL2_G 0x0d8d
+#define mmCM0_CM_BLNDGAM_RAMB_END_CNTL2_G_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_END_CNTL1_R 0x0d8e
+#define mmCM0_CM_BLNDGAM_RAMB_END_CNTL1_R_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_END_CNTL2_R 0x0d8f
+#define mmCM0_CM_BLNDGAM_RAMB_END_CNTL2_R_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_0_1 0x0d90
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_0_1_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_2_3 0x0d91
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_2_3_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_4_5 0x0d92
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_4_5_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_6_7 0x0d93
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_6_7_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_8_9 0x0d94
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_8_9_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_10_11 0x0d95
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_10_11_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_12_13 0x0d96
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_12_13_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_14_15 0x0d97
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_14_15_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_16_17 0x0d98
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_16_17_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_18_19 0x0d99
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_18_19_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_20_21 0x0d9a
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_20_21_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_22_23 0x0d9b
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_22_23_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_24_25 0x0d9c
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_24_25_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_26_27 0x0d9d
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_26_27_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_28_29 0x0d9e
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_28_29_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_30_31 0x0d9f
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_30_31_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_32_33 0x0da0
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_32_33_BASE_IDX 2
+#define mmCM0_CM_HDR_MULT_COEF 0x0da1
+#define mmCM0_CM_HDR_MULT_COEF_BASE_IDX 2
+#define mmCM0_CM_MEM_PWR_CTRL 0x0da2
+#define mmCM0_CM_MEM_PWR_CTRL_BASE_IDX 2
+#define mmCM0_CM_MEM_PWR_STATUS 0x0da3
+#define mmCM0_CM_MEM_PWR_STATUS_BASE_IDX 2
+#define mmCM0_CM_DEALPHA 0x0da5
+#define mmCM0_CM_DEALPHA_BASE_IDX 2
+#define mmCM0_CM_COEF_FORMAT 0x0da6
+#define mmCM0_CM_COEF_FORMAT_BASE_IDX 2
+#define mmCM0_CM_SHAPER_CONTROL 0x0da7
+#define mmCM0_CM_SHAPER_CONTROL_BASE_IDX 2
+#define mmCM0_CM_SHAPER_OFFSET_R 0x0da8
+#define mmCM0_CM_SHAPER_OFFSET_R_BASE_IDX 2
+#define mmCM0_CM_SHAPER_OFFSET_G 0x0da9
+#define mmCM0_CM_SHAPER_OFFSET_G_BASE_IDX 2
+#define mmCM0_CM_SHAPER_OFFSET_B 0x0daa
+#define mmCM0_CM_SHAPER_OFFSET_B_BASE_IDX 2
+#define mmCM0_CM_SHAPER_SCALE_R 0x0dab
+#define mmCM0_CM_SHAPER_SCALE_R_BASE_IDX 2
+#define mmCM0_CM_SHAPER_SCALE_G_B 0x0dac
+#define mmCM0_CM_SHAPER_SCALE_G_B_BASE_IDX 2
+#define mmCM0_CM_SHAPER_LUT_INDEX 0x0dad
+#define mmCM0_CM_SHAPER_LUT_INDEX_BASE_IDX 2
+#define mmCM0_CM_SHAPER_LUT_DATA 0x0dae
+#define mmCM0_CM_SHAPER_LUT_DATA_BASE_IDX 2
+#define mmCM0_CM_SHAPER_LUT_WRITE_EN_MASK 0x0daf
+#define mmCM0_CM_SHAPER_LUT_WRITE_EN_MASK_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_START_CNTL_B 0x0db0
+#define mmCM0_CM_SHAPER_RAMA_START_CNTL_B_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_START_CNTL_G 0x0db1
+#define mmCM0_CM_SHAPER_RAMA_START_CNTL_G_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_START_CNTL_R 0x0db2
+#define mmCM0_CM_SHAPER_RAMA_START_CNTL_R_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_END_CNTL_B 0x0db3
+#define mmCM0_CM_SHAPER_RAMA_END_CNTL_B_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_END_CNTL_G 0x0db4
+#define mmCM0_CM_SHAPER_RAMA_END_CNTL_G_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_END_CNTL_R 0x0db5
+#define mmCM0_CM_SHAPER_RAMA_END_CNTL_R_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_REGION_0_1 0x0db6
+#define mmCM0_CM_SHAPER_RAMA_REGION_0_1_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_REGION_2_3 0x0db7
+#define mmCM0_CM_SHAPER_RAMA_REGION_2_3_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_REGION_4_5 0x0db8
+#define mmCM0_CM_SHAPER_RAMA_REGION_4_5_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_REGION_6_7 0x0db9
+#define mmCM0_CM_SHAPER_RAMA_REGION_6_7_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_REGION_8_9 0x0dba
+#define mmCM0_CM_SHAPER_RAMA_REGION_8_9_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_REGION_10_11 0x0dbb
+#define mmCM0_CM_SHAPER_RAMA_REGION_10_11_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_REGION_12_13 0x0dbc
+#define mmCM0_CM_SHAPER_RAMA_REGION_12_13_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_REGION_14_15 0x0dbd
+#define mmCM0_CM_SHAPER_RAMA_REGION_14_15_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_REGION_16_17 0x0dbe
+#define mmCM0_CM_SHAPER_RAMA_REGION_16_17_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_REGION_18_19 0x0dbf
+#define mmCM0_CM_SHAPER_RAMA_REGION_18_19_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_REGION_20_21 0x0dc0
+#define mmCM0_CM_SHAPER_RAMA_REGION_20_21_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_REGION_22_23 0x0dc1
+#define mmCM0_CM_SHAPER_RAMA_REGION_22_23_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_REGION_24_25 0x0dc2
+#define mmCM0_CM_SHAPER_RAMA_REGION_24_25_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_REGION_26_27 0x0dc3
+#define mmCM0_CM_SHAPER_RAMA_REGION_26_27_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_REGION_28_29 0x0dc4
+#define mmCM0_CM_SHAPER_RAMA_REGION_28_29_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_REGION_30_31 0x0dc5
+#define mmCM0_CM_SHAPER_RAMA_REGION_30_31_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_REGION_32_33 0x0dc6
+#define mmCM0_CM_SHAPER_RAMA_REGION_32_33_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_START_CNTL_B 0x0dc7
+#define mmCM0_CM_SHAPER_RAMB_START_CNTL_B_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_START_CNTL_G 0x0dc8
+#define mmCM0_CM_SHAPER_RAMB_START_CNTL_G_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_START_CNTL_R 0x0dc9
+#define mmCM0_CM_SHAPER_RAMB_START_CNTL_R_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_END_CNTL_B 0x0dca
+#define mmCM0_CM_SHAPER_RAMB_END_CNTL_B_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_END_CNTL_G 0x0dcb
+#define mmCM0_CM_SHAPER_RAMB_END_CNTL_G_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_END_CNTL_R 0x0dcc
+#define mmCM0_CM_SHAPER_RAMB_END_CNTL_R_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_REGION_0_1 0x0dcd
+#define mmCM0_CM_SHAPER_RAMB_REGION_0_1_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_REGION_2_3 0x0dce
+#define mmCM0_CM_SHAPER_RAMB_REGION_2_3_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_REGION_4_5 0x0dcf
+#define mmCM0_CM_SHAPER_RAMB_REGION_4_5_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_REGION_6_7 0x0dd0
+#define mmCM0_CM_SHAPER_RAMB_REGION_6_7_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_REGION_8_9 0x0dd1
+#define mmCM0_CM_SHAPER_RAMB_REGION_8_9_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_REGION_10_11 0x0dd2
+#define mmCM0_CM_SHAPER_RAMB_REGION_10_11_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_REGION_12_13 0x0dd3
+#define mmCM0_CM_SHAPER_RAMB_REGION_12_13_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_REGION_14_15 0x0dd4
+#define mmCM0_CM_SHAPER_RAMB_REGION_14_15_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_REGION_16_17 0x0dd5
+#define mmCM0_CM_SHAPER_RAMB_REGION_16_17_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_REGION_18_19 0x0dd6
+#define mmCM0_CM_SHAPER_RAMB_REGION_18_19_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_REGION_20_21 0x0dd7
+#define mmCM0_CM_SHAPER_RAMB_REGION_20_21_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_REGION_22_23 0x0dd8
+#define mmCM0_CM_SHAPER_RAMB_REGION_22_23_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_REGION_24_25 0x0dd9
+#define mmCM0_CM_SHAPER_RAMB_REGION_24_25_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_REGION_26_27 0x0dda
+#define mmCM0_CM_SHAPER_RAMB_REGION_26_27_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_REGION_28_29 0x0ddb
+#define mmCM0_CM_SHAPER_RAMB_REGION_28_29_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_REGION_30_31 0x0ddc
+#define mmCM0_CM_SHAPER_RAMB_REGION_30_31_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_REGION_32_33 0x0ddd
+#define mmCM0_CM_SHAPER_RAMB_REGION_32_33_BASE_IDX 2
+#define mmCM0_CM_MEM_PWR_CTRL2 0x0dde
+#define mmCM0_CM_MEM_PWR_CTRL2_BASE_IDX 2
+#define mmCM0_CM_MEM_PWR_STATUS2 0x0ddf
+#define mmCM0_CM_MEM_PWR_STATUS2_BASE_IDX 2
+#define mmCM0_CM_3DLUT_MODE 0x0de0
+#define mmCM0_CM_3DLUT_MODE_BASE_IDX 2
+#define mmCM0_CM_3DLUT_INDEX 0x0de1
+#define mmCM0_CM_3DLUT_INDEX_BASE_IDX 2
+#define mmCM0_CM_3DLUT_DATA 0x0de2
+#define mmCM0_CM_3DLUT_DATA_BASE_IDX 2
+#define mmCM0_CM_3DLUT_DATA_30BIT 0x0de3
+#define mmCM0_CM_3DLUT_DATA_30BIT_BASE_IDX 2
+#define mmCM0_CM_3DLUT_READ_WRITE_CONTROL 0x0de4
+#define mmCM0_CM_3DLUT_READ_WRITE_CONTROL_BASE_IDX 2
+#define mmCM0_CM_3DLUT_OUT_NORM_FACTOR 0x0de5
+#define mmCM0_CM_3DLUT_OUT_NORM_FACTOR_BASE_IDX 2
+#define mmCM0_CM_3DLUT_OUT_OFFSET_R 0x0de6
+#define mmCM0_CM_3DLUT_OUT_OFFSET_R_BASE_IDX 2
+#define mmCM0_CM_3DLUT_OUT_OFFSET_G 0x0de7
+#define mmCM0_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2
+#define mmCM0_CM_3DLUT_OUT_OFFSET_B 0x0de8
+#define mmCM0_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2
+#define mmCM0_CM_TEST_DEBUG_INDEX 0x0de9
+#define mmCM0_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define mmCM0_CM_TEST_DEBUG_DATA 0x0dea
+#define mmCM0_CM_TEST_DEBUG_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp0_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
+// base address: 0x3890
+#define mmDC_PERFMON11_PERFCOUNTER_CNTL 0x0e24
+#define mmDC_PERFMON11_PERFCOUNTER_CNTL_BASE_IDX 2
+#define mmDC_PERFMON11_PERFCOUNTER_CNTL2 0x0e25
+#define mmDC_PERFMON11_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define mmDC_PERFMON11_PERFCOUNTER_STATE 0x0e26
+#define mmDC_PERFMON11_PERFCOUNTER_STATE_BASE_IDX 2
+#define mmDC_PERFMON11_PERFMON_CNTL 0x0e27
+#define mmDC_PERFMON11_PERFMON_CNTL_BASE_IDX 2
+#define mmDC_PERFMON11_PERFMON_CNTL2 0x0e28
+#define mmDC_PERFMON11_PERFMON_CNTL2_BASE_IDX 2
+#define mmDC_PERFMON11_PERFMON_CVALUE_INT_MISC 0x0e29
+#define mmDC_PERFMON11_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define mmDC_PERFMON11_PERFMON_CVALUE_LOW 0x0e2a
+#define mmDC_PERFMON11_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define mmDC_PERFMON11_PERFMON_HI 0x0e2b
+#define mmDC_PERFMON11_PERFMON_HI_BASE_IDX 2
+#define mmDC_PERFMON11_PERFMON_LOW 0x0e2c
+#define mmDC_PERFMON11_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp1_dispdec_dpp_top_dispdec
+// base address: 0x5ac
+#define mmDPP_TOP1_DPP_CONTROL 0x0e30
+#define mmDPP_TOP1_DPP_CONTROL_BASE_IDX 2
+#define mmDPP_TOP1_DPP_SOFT_RESET 0x0e31
+#define mmDPP_TOP1_DPP_SOFT_RESET_BASE_IDX 2
+#define mmDPP_TOP1_DPP_CRC_VAL_R_G 0x0e32
+#define mmDPP_TOP1_DPP_CRC_VAL_R_G_BASE_IDX 2
+#define mmDPP_TOP1_DPP_CRC_VAL_B_A 0x0e33
+#define mmDPP_TOP1_DPP_CRC_VAL_B_A_BASE_IDX 2
+#define mmDPP_TOP1_DPP_CRC_CTRL 0x0e34
+#define mmDPP_TOP1_DPP_CRC_CTRL_BASE_IDX 2
+#define mmDPP_TOP1_HOST_READ_CONTROL 0x0e35
+#define mmDPP_TOP1_HOST_READ_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp1_dispdec_cnvc_cfg_dispdec
+// base address: 0x5ac
+#define mmCNVC_CFG1_CNVC_SURFACE_PIXEL_FORMAT 0x0e3a
+#define mmCNVC_CFG1_CNVC_SURFACE_PIXEL_FORMAT_BASE_IDX 2
+#define mmCNVC_CFG1_FORMAT_CONTROL 0x0e3b
+#define mmCNVC_CFG1_FORMAT_CONTROL_BASE_IDX 2
+#define mmCNVC_CFG1_FCNV_FP_BIAS_R 0x0e3c
+#define mmCNVC_CFG1_FCNV_FP_BIAS_R_BASE_IDX 2
+#define mmCNVC_CFG1_FCNV_FP_BIAS_G 0x0e3d
+#define mmCNVC_CFG1_FCNV_FP_BIAS_G_BASE_IDX 2
+#define mmCNVC_CFG1_FCNV_FP_BIAS_B 0x0e3e
+#define mmCNVC_CFG1_FCNV_FP_BIAS_B_BASE_IDX 2
+#define mmCNVC_CFG1_FCNV_FP_SCALE_R 0x0e3f
+#define mmCNVC_CFG1_FCNV_FP_SCALE_R_BASE_IDX 2
+#define mmCNVC_CFG1_FCNV_FP_SCALE_G 0x0e40
+#define mmCNVC_CFG1_FCNV_FP_SCALE_G_BASE_IDX 2
+#define mmCNVC_CFG1_FCNV_FP_SCALE_B 0x0e41
+#define mmCNVC_CFG1_FCNV_FP_SCALE_B_BASE_IDX 2
+#define mmCNVC_CFG1_COLOR_KEYER_CONTROL 0x0e42
+#define mmCNVC_CFG1_COLOR_KEYER_CONTROL_BASE_IDX 2
+#define mmCNVC_CFG1_COLOR_KEYER_ALPHA 0x0e43
+#define mmCNVC_CFG1_COLOR_KEYER_ALPHA_BASE_IDX 2
+#define mmCNVC_CFG1_COLOR_KEYER_RED 0x0e44
+#define mmCNVC_CFG1_COLOR_KEYER_RED_BASE_IDX 2
+#define mmCNVC_CFG1_COLOR_KEYER_GREEN 0x0e45
+#define mmCNVC_CFG1_COLOR_KEYER_GREEN_BASE_IDX 2
+#define mmCNVC_CFG1_COLOR_KEYER_BLUE 0x0e46
+#define mmCNVC_CFG1_COLOR_KEYER_BLUE_BASE_IDX 2
+#define mmCNVC_CFG1_ALPHA_2BIT_LUT 0x0e48
+#define mmCNVC_CFG1_ALPHA_2BIT_LUT_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp1_dispdec_cnvc_cur_dispdec
+// base address: 0x5ac
+#define mmCNVC_CUR1_CURSOR0_CONTROL 0x0e4b
+#define mmCNVC_CUR1_CURSOR0_CONTROL_BASE_IDX 2
+#define mmCNVC_CUR1_CURSOR0_COLOR0 0x0e4c
+#define mmCNVC_CUR1_CURSOR0_COLOR0_BASE_IDX 2
+#define mmCNVC_CUR1_CURSOR0_COLOR1 0x0e4d
+#define mmCNVC_CUR1_CURSOR0_COLOR1_BASE_IDX 2
+#define mmCNVC_CUR1_CURSOR0_FP_SCALE_BIAS 0x0e4e
+#define mmCNVC_CUR1_CURSOR0_FP_SCALE_BIAS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp1_dispdec_dscl_dispdec
+// base address: 0x5ac
+#define mmDSCL1_SCL_COEF_RAM_TAP_SELECT 0x0e55
+#define mmDSCL1_SCL_COEF_RAM_TAP_SELECT_BASE_IDX 2
+#define mmDSCL1_SCL_COEF_RAM_TAP_DATA 0x0e56
+#define mmDSCL1_SCL_COEF_RAM_TAP_DATA_BASE_IDX 2
+#define mmDSCL1_SCL_MODE 0x0e57
+#define mmDSCL1_SCL_MODE_BASE_IDX 2
+#define mmDSCL1_SCL_TAP_CONTROL 0x0e58
+#define mmDSCL1_SCL_TAP_CONTROL_BASE_IDX 2
+#define mmDSCL1_DSCL_CONTROL 0x0e59
+#define mmDSCL1_DSCL_CONTROL_BASE_IDX 2
+#define mmDSCL1_DSCL_2TAP_CONTROL 0x0e5a
+#define mmDSCL1_DSCL_2TAP_CONTROL_BASE_IDX 2
+#define mmDSCL1_SCL_MANUAL_REPLICATE_CONTROL 0x0e5b
+#define mmDSCL1_SCL_MANUAL_REPLICATE_CONTROL_BASE_IDX 2
+#define mmDSCL1_SCL_HORZ_FILTER_SCALE_RATIO 0x0e5c
+#define mmDSCL1_SCL_HORZ_FILTER_SCALE_RATIO_BASE_IDX 2
+#define mmDSCL1_SCL_HORZ_FILTER_INIT 0x0e5d
+#define mmDSCL1_SCL_HORZ_FILTER_INIT_BASE_IDX 2
+#define mmDSCL1_SCL_HORZ_FILTER_SCALE_RATIO_C 0x0e5e
+#define mmDSCL1_SCL_HORZ_FILTER_SCALE_RATIO_C_BASE_IDX 2
+#define mmDSCL1_SCL_HORZ_FILTER_INIT_C 0x0e5f
+#define mmDSCL1_SCL_HORZ_FILTER_INIT_C_BASE_IDX 2
+#define mmDSCL1_SCL_VERT_FILTER_SCALE_RATIO 0x0e60
+#define mmDSCL1_SCL_VERT_FILTER_SCALE_RATIO_BASE_IDX 2
+#define mmDSCL1_SCL_VERT_FILTER_INIT 0x0e61
+#define mmDSCL1_SCL_VERT_FILTER_INIT_BASE_IDX 2
+#define mmDSCL1_SCL_VERT_FILTER_INIT_BOT 0x0e62
+#define mmDSCL1_SCL_VERT_FILTER_INIT_BOT_BASE_IDX 2
+#define mmDSCL1_SCL_VERT_FILTER_SCALE_RATIO_C 0x0e63
+#define mmDSCL1_SCL_VERT_FILTER_SCALE_RATIO_C_BASE_IDX 2
+#define mmDSCL1_SCL_VERT_FILTER_INIT_C 0x0e64
+#define mmDSCL1_SCL_VERT_FILTER_INIT_C_BASE_IDX 2
+#define mmDSCL1_SCL_VERT_FILTER_INIT_BOT_C 0x0e65
+#define mmDSCL1_SCL_VERT_FILTER_INIT_BOT_C_BASE_IDX 2
+#define mmDSCL1_SCL_BLACK_OFFSET 0x0e66
+#define mmDSCL1_SCL_BLACK_OFFSET_BASE_IDX 2
+#define mmDSCL1_DSCL_UPDATE 0x0e67
+#define mmDSCL1_DSCL_UPDATE_BASE_IDX 2
+#define mmDSCL1_DSCL_AUTOCAL 0x0e68
+#define mmDSCL1_DSCL_AUTOCAL_BASE_IDX 2
+#define mmDSCL1_DSCL_EXT_OVERSCAN_LEFT_RIGHT 0x0e69
+#define mmDSCL1_DSCL_EXT_OVERSCAN_LEFT_RIGHT_BASE_IDX 2
+#define mmDSCL1_DSCL_EXT_OVERSCAN_TOP_BOTTOM 0x0e6a
+#define mmDSCL1_DSCL_EXT_OVERSCAN_TOP_BOTTOM_BASE_IDX 2
+#define mmDSCL1_OTG_H_BLANK 0x0e6b
+#define mmDSCL1_OTG_H_BLANK_BASE_IDX 2
+#define mmDSCL1_OTG_V_BLANK 0x0e6c
+#define mmDSCL1_OTG_V_BLANK_BASE_IDX 2
+#define mmDSCL1_RECOUT_START 0x0e6d
+#define mmDSCL1_RECOUT_START_BASE_IDX 2
+#define mmDSCL1_RECOUT_SIZE 0x0e6e
+#define mmDSCL1_RECOUT_SIZE_BASE_IDX 2
+#define mmDSCL1_MPC_SIZE 0x0e6f
+#define mmDSCL1_MPC_SIZE_BASE_IDX 2
+#define mmDSCL1_LB_DATA_FORMAT 0x0e70
+#define mmDSCL1_LB_DATA_FORMAT_BASE_IDX 2
+#define mmDSCL1_LB_MEMORY_CTRL 0x0e71
+#define mmDSCL1_LB_MEMORY_CTRL_BASE_IDX 2
+#define mmDSCL1_LB_V_COUNTER 0x0e72
+#define mmDSCL1_LB_V_COUNTER_BASE_IDX 2
+#define mmDSCL1_DSCL_MEM_PWR_CTRL 0x0e73
+#define mmDSCL1_DSCL_MEM_PWR_CTRL_BASE_IDX 2
+#define mmDSCL1_DSCL_MEM_PWR_STATUS 0x0e74
+#define mmDSCL1_DSCL_MEM_PWR_STATUS_BASE_IDX 2
+#define mmDSCL1_OBUF_CONTROL 0x0e75
+#define mmDSCL1_OBUF_CONTROL_BASE_IDX 2
+#define mmDSCL1_OBUF_MEM_PWR_CTRL 0x0e76
+#define mmDSCL1_OBUF_MEM_PWR_CTRL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp1_dispdec_cm_dispdec
+// base address: 0x5ac
+#define mmCM1_CM_CONTROL 0x0e85
+#define mmCM1_CM_CONTROL_BASE_IDX 2
+#define mmCM1_CM_ICSC_CONTROL 0x0e86
+#define mmCM1_CM_ICSC_CONTROL_BASE_IDX 2
+#define mmCM1_CM_ICSC_C11_C12 0x0e87
+#define mmCM1_CM_ICSC_C11_C12_BASE_IDX 2
+#define mmCM1_CM_ICSC_C13_C14 0x0e88
+#define mmCM1_CM_ICSC_C13_C14_BASE_IDX 2
+#define mmCM1_CM_ICSC_C21_C22 0x0e89
+#define mmCM1_CM_ICSC_C21_C22_BASE_IDX 2
+#define mmCM1_CM_ICSC_C23_C24 0x0e8a
+#define mmCM1_CM_ICSC_C23_C24_BASE_IDX 2
+#define mmCM1_CM_ICSC_C31_C32 0x0e8b
+#define mmCM1_CM_ICSC_C31_C32_BASE_IDX 2
+#define mmCM1_CM_ICSC_C33_C34 0x0e8c
+#define mmCM1_CM_ICSC_C33_C34_BASE_IDX 2
+#define mmCM1_CM_ICSC_B_C11_C12 0x0e8d
+#define mmCM1_CM_ICSC_B_C11_C12_BASE_IDX 2
+#define mmCM1_CM_ICSC_B_C13_C14 0x0e8e
+#define mmCM1_CM_ICSC_B_C13_C14_BASE_IDX 2
+#define mmCM1_CM_ICSC_B_C21_C22 0x0e8f
+#define mmCM1_CM_ICSC_B_C21_C22_BASE_IDX 2
+#define mmCM1_CM_ICSC_B_C23_C24 0x0e90
+#define mmCM1_CM_ICSC_B_C23_C24_BASE_IDX 2
+#define mmCM1_CM_ICSC_B_C31_C32 0x0e91
+#define mmCM1_CM_ICSC_B_C31_C32_BASE_IDX 2
+#define mmCM1_CM_ICSC_B_C33_C34 0x0e92
+#define mmCM1_CM_ICSC_B_C33_C34_BASE_IDX 2
+#define mmCM1_CM_GAMUT_REMAP_CONTROL 0x0e93
+#define mmCM1_CM_GAMUT_REMAP_CONTROL_BASE_IDX 2
+#define mmCM1_CM_GAMUT_REMAP_C11_C12 0x0e94
+#define mmCM1_CM_GAMUT_REMAP_C11_C12_BASE_IDX 2
+#define mmCM1_CM_GAMUT_REMAP_C13_C14 0x0e95
+#define mmCM1_CM_GAMUT_REMAP_C13_C14_BASE_IDX 2
+#define mmCM1_CM_GAMUT_REMAP_C21_C22 0x0e96
+#define mmCM1_CM_GAMUT_REMAP_C21_C22_BASE_IDX 2
+#define mmCM1_CM_GAMUT_REMAP_C23_C24 0x0e97
+#define mmCM1_CM_GAMUT_REMAP_C23_C24_BASE_IDX 2
+#define mmCM1_CM_GAMUT_REMAP_C31_C32 0x0e98
+#define mmCM1_CM_GAMUT_REMAP_C31_C32_BASE_IDX 2
+#define mmCM1_CM_GAMUT_REMAP_C33_C34 0x0e99
+#define mmCM1_CM_GAMUT_REMAP_C33_C34_BASE_IDX 2
+#define mmCM1_CM_GAMUT_REMAP_B_C11_C12 0x0e9a
+#define mmCM1_CM_GAMUT_REMAP_B_C11_C12_BASE_IDX 2
+#define mmCM1_CM_GAMUT_REMAP_B_C13_C14 0x0e9b
+#define mmCM1_CM_GAMUT_REMAP_B_C13_C14_BASE_IDX 2
+#define mmCM1_CM_GAMUT_REMAP_B_C21_C22 0x0e9c
+#define mmCM1_CM_GAMUT_REMAP_B_C21_C22_BASE_IDX 2
+#define mmCM1_CM_GAMUT_REMAP_B_C23_C24 0x0e9d
+#define mmCM1_CM_GAMUT_REMAP_B_C23_C24_BASE_IDX 2
+#define mmCM1_CM_GAMUT_REMAP_B_C31_C32 0x0e9e
+#define mmCM1_CM_GAMUT_REMAP_B_C31_C32_BASE_IDX 2
+#define mmCM1_CM_GAMUT_REMAP_B_C33_C34 0x0e9f
+#define mmCM1_CM_GAMUT_REMAP_B_C33_C34_BASE_IDX 2
+#define mmCM1_CM_BIAS_CR_R 0x0ea0
+#define mmCM1_CM_BIAS_CR_R_BASE_IDX 2
+#define mmCM1_CM_BIAS_Y_G_CB_B 0x0ea1
+#define mmCM1_CM_BIAS_Y_G_CB_B_BASE_IDX 2
+#define mmCM1_CM_DGAM_CONTROL 0x0ea2
+#define mmCM1_CM_DGAM_CONTROL_BASE_IDX 2
+#define mmCM1_CM_DGAM_LUT_INDEX 0x0ea3
+#define mmCM1_CM_DGAM_LUT_INDEX_BASE_IDX 2
+#define mmCM1_CM_DGAM_LUT_DATA 0x0ea4
+#define mmCM1_CM_DGAM_LUT_DATA_BASE_IDX 2
+#define mmCM1_CM_DGAM_LUT_WRITE_EN_MASK 0x0ea5
+#define mmCM1_CM_DGAM_LUT_WRITE_EN_MASK_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMA_START_CNTL_B 0x0ea6
+#define mmCM1_CM_DGAM_RAMA_START_CNTL_B_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMA_START_CNTL_G 0x0ea7
+#define mmCM1_CM_DGAM_RAMA_START_CNTL_G_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMA_START_CNTL_R 0x0ea8
+#define mmCM1_CM_DGAM_RAMA_START_CNTL_R_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMA_SLOPE_CNTL_B 0x0ea9
+#define mmCM1_CM_DGAM_RAMA_SLOPE_CNTL_B_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMA_SLOPE_CNTL_G 0x0eaa
+#define mmCM1_CM_DGAM_RAMA_SLOPE_CNTL_G_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMA_SLOPE_CNTL_R 0x0eab
+#define mmCM1_CM_DGAM_RAMA_SLOPE_CNTL_R_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMA_END_CNTL1_B 0x0eac
+#define mmCM1_CM_DGAM_RAMA_END_CNTL1_B_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMA_END_CNTL2_B 0x0ead
+#define mmCM1_CM_DGAM_RAMA_END_CNTL2_B_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMA_END_CNTL1_G 0x0eae
+#define mmCM1_CM_DGAM_RAMA_END_CNTL1_G_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMA_END_CNTL2_G 0x0eaf
+#define mmCM1_CM_DGAM_RAMA_END_CNTL2_G_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMA_END_CNTL1_R 0x0eb0
+#define mmCM1_CM_DGAM_RAMA_END_CNTL1_R_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMA_END_CNTL2_R 0x0eb1
+#define mmCM1_CM_DGAM_RAMA_END_CNTL2_R_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMA_REGION_0_1 0x0eb2
+#define mmCM1_CM_DGAM_RAMA_REGION_0_1_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMA_REGION_2_3 0x0eb3
+#define mmCM1_CM_DGAM_RAMA_REGION_2_3_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMA_REGION_4_5 0x0eb4
+#define mmCM1_CM_DGAM_RAMA_REGION_4_5_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMA_REGION_6_7 0x0eb5
+#define mmCM1_CM_DGAM_RAMA_REGION_6_7_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMA_REGION_8_9 0x0eb6
+#define mmCM1_CM_DGAM_RAMA_REGION_8_9_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMA_REGION_10_11 0x0eb7
+#define mmCM1_CM_DGAM_RAMA_REGION_10_11_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMA_REGION_12_13 0x0eb8
+#define mmCM1_CM_DGAM_RAMA_REGION_12_13_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMA_REGION_14_15 0x0eb9
+#define mmCM1_CM_DGAM_RAMA_REGION_14_15_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMB_START_CNTL_B 0x0eba
+#define mmCM1_CM_DGAM_RAMB_START_CNTL_B_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMB_START_CNTL_G 0x0ebb
+#define mmCM1_CM_DGAM_RAMB_START_CNTL_G_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMB_START_CNTL_R 0x0ebc
+#define mmCM1_CM_DGAM_RAMB_START_CNTL_R_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMB_SLOPE_CNTL_B 0x0ebd
+#define mmCM1_CM_DGAM_RAMB_SLOPE_CNTL_B_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMB_SLOPE_CNTL_G 0x0ebe
+#define mmCM1_CM_DGAM_RAMB_SLOPE_CNTL_G_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMB_SLOPE_CNTL_R 0x0ebf
+#define mmCM1_CM_DGAM_RAMB_SLOPE_CNTL_R_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMB_END_CNTL1_B 0x0ec0
+#define mmCM1_CM_DGAM_RAMB_END_CNTL1_B_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMB_END_CNTL2_B 0x0ec1
+#define mmCM1_CM_DGAM_RAMB_END_CNTL2_B_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMB_END_CNTL1_G 0x0ec2
+#define mmCM1_CM_DGAM_RAMB_END_CNTL1_G_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMB_END_CNTL2_G 0x0ec3
+#define mmCM1_CM_DGAM_RAMB_END_CNTL2_G_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMB_END_CNTL1_R 0x0ec4
+#define mmCM1_CM_DGAM_RAMB_END_CNTL1_R_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMB_END_CNTL2_R 0x0ec5
+#define mmCM1_CM_DGAM_RAMB_END_CNTL2_R_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMB_REGION_0_1 0x0ec6
+#define mmCM1_CM_DGAM_RAMB_REGION_0_1_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMB_REGION_2_3 0x0ec7
+#define mmCM1_CM_DGAM_RAMB_REGION_2_3_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMB_REGION_4_5 0x0ec8
+#define mmCM1_CM_DGAM_RAMB_REGION_4_5_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMB_REGION_6_7 0x0ec9
+#define mmCM1_CM_DGAM_RAMB_REGION_6_7_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMB_REGION_8_9 0x0eca
+#define mmCM1_CM_DGAM_RAMB_REGION_8_9_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMB_REGION_10_11 0x0ecb
+#define mmCM1_CM_DGAM_RAMB_REGION_10_11_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMB_REGION_12_13 0x0ecc
+#define mmCM1_CM_DGAM_RAMB_REGION_12_13_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMB_REGION_14_15 0x0ecd
+#define mmCM1_CM_DGAM_RAMB_REGION_14_15_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_CONTROL 0x0ece
+#define mmCM1_CM_BLNDGAM_CONTROL_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_LUT_INDEX 0x0ecf
+#define mmCM1_CM_BLNDGAM_LUT_INDEX_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_LUT_DATA 0x0ed0
+#define mmCM1_CM_BLNDGAM_LUT_DATA_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_LUT_WRITE_EN_MASK 0x0ed1
+#define mmCM1_CM_BLNDGAM_LUT_WRITE_EN_MASK_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_START_CNTL_B 0x0ed2
+#define mmCM1_CM_BLNDGAM_RAMA_START_CNTL_B_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_START_CNTL_G 0x0ed3
+#define mmCM1_CM_BLNDGAM_RAMA_START_CNTL_G_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_START_CNTL_R 0x0ed4
+#define mmCM1_CM_BLNDGAM_RAMA_START_CNTL_R_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_SLOPE_CNTL_B 0x0ed5
+#define mmCM1_CM_BLNDGAM_RAMA_SLOPE_CNTL_B_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_SLOPE_CNTL_G 0x0ed6
+#define mmCM1_CM_BLNDGAM_RAMA_SLOPE_CNTL_G_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_SLOPE_CNTL_R 0x0ed7
+#define mmCM1_CM_BLNDGAM_RAMA_SLOPE_CNTL_R_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_END_CNTL1_B 0x0ed8
+#define mmCM1_CM_BLNDGAM_RAMA_END_CNTL1_B_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_END_CNTL2_B 0x0ed9
+#define mmCM1_CM_BLNDGAM_RAMA_END_CNTL2_B_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_END_CNTL1_G 0x0eda
+#define mmCM1_CM_BLNDGAM_RAMA_END_CNTL1_G_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_END_CNTL2_G 0x0edb
+#define mmCM1_CM_BLNDGAM_RAMA_END_CNTL2_G_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_END_CNTL1_R 0x0edc
+#define mmCM1_CM_BLNDGAM_RAMA_END_CNTL1_R_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_END_CNTL2_R 0x0edd
+#define mmCM1_CM_BLNDGAM_RAMA_END_CNTL2_R_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_0_1 0x0ede
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_0_1_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_2_3 0x0edf
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_2_3_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_4_5 0x0ee0
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_4_5_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_6_7 0x0ee1
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_6_7_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_8_9 0x0ee2
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_8_9_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_10_11 0x0ee3
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_10_11_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_12_13 0x0ee4
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_12_13_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_14_15 0x0ee5
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_14_15_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_16_17 0x0ee6
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_16_17_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_18_19 0x0ee7
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_18_19_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_20_21 0x0ee8
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_20_21_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_22_23 0x0ee9
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_22_23_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_24_25 0x0eea
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_24_25_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_26_27 0x0eeb
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_26_27_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_28_29 0x0eec
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_28_29_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_30_31 0x0eed
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_30_31_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_32_33 0x0eee
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_32_33_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_START_CNTL_B 0x0eef
+#define mmCM1_CM_BLNDGAM_RAMB_START_CNTL_B_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_START_CNTL_G 0x0ef0
+#define mmCM1_CM_BLNDGAM_RAMB_START_CNTL_G_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_START_CNTL_R 0x0ef1
+#define mmCM1_CM_BLNDGAM_RAMB_START_CNTL_R_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_SLOPE_CNTL_B 0x0ef2
+#define mmCM1_CM_BLNDGAM_RAMB_SLOPE_CNTL_B_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_SLOPE_CNTL_G 0x0ef3
+#define mmCM1_CM_BLNDGAM_RAMB_SLOPE_CNTL_G_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_SLOPE_CNTL_R 0x0ef4
+#define mmCM1_CM_BLNDGAM_RAMB_SLOPE_CNTL_R_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_END_CNTL1_B 0x0ef5
+#define mmCM1_CM_BLNDGAM_RAMB_END_CNTL1_B_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_END_CNTL2_B 0x0ef6
+#define mmCM1_CM_BLNDGAM_RAMB_END_CNTL2_B_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_END_CNTL1_G 0x0ef7
+#define mmCM1_CM_BLNDGAM_RAMB_END_CNTL1_G_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_END_CNTL2_G 0x0ef8
+#define mmCM1_CM_BLNDGAM_RAMB_END_CNTL2_G_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_END_CNTL1_R 0x0ef9
+#define mmCM1_CM_BLNDGAM_RAMB_END_CNTL1_R_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_END_CNTL2_R 0x0efa
+#define mmCM1_CM_BLNDGAM_RAMB_END_CNTL2_R_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_0_1 0x0efb
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_0_1_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_2_3 0x0efc
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_2_3_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_4_5 0x0efd
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_4_5_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_6_7 0x0efe
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_6_7_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_8_9 0x0eff
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_8_9_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_10_11 0x0f00
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_10_11_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_12_13 0x0f01
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_12_13_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_14_15 0x0f02
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_14_15_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_16_17 0x0f03
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_16_17_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_18_19 0x0f04
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_18_19_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_20_21 0x0f05
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_20_21_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_22_23 0x0f06
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_22_23_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_24_25 0x0f07
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_24_25_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_26_27 0x0f08
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_26_27_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_28_29 0x0f09
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_28_29_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_30_31 0x0f0a
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_30_31_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_32_33 0x0f0b
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_32_33_BASE_IDX 2
+#define mmCM1_CM_HDR_MULT_COEF 0x0f0c
+#define mmCM1_CM_HDR_MULT_COEF_BASE_IDX 2
+#define mmCM1_CM_MEM_PWR_CTRL 0x0f0d
+#define mmCM1_CM_MEM_PWR_CTRL_BASE_IDX 2
+#define mmCM1_CM_MEM_PWR_STATUS 0x0f0e
+#define mmCM1_CM_MEM_PWR_STATUS_BASE_IDX 2
+#define mmCM1_CM_DEALPHA 0x0f10
+#define mmCM1_CM_DEALPHA_BASE_IDX 2
+#define mmCM1_CM_COEF_FORMAT 0x0f11
+#define mmCM1_CM_COEF_FORMAT_BASE_IDX 2
+#define mmCM1_CM_SHAPER_CONTROL 0x0f12
+#define mmCM1_CM_SHAPER_CONTROL_BASE_IDX 2
+#define mmCM1_CM_SHAPER_OFFSET_R 0x0f13
+#define mmCM1_CM_SHAPER_OFFSET_R_BASE_IDX 2
+#define mmCM1_CM_SHAPER_OFFSET_G 0x0f14
+#define mmCM1_CM_SHAPER_OFFSET_G_BASE_IDX 2
+#define mmCM1_CM_SHAPER_OFFSET_B 0x0f15
+#define mmCM1_CM_SHAPER_OFFSET_B_BASE_IDX 2
+#define mmCM1_CM_SHAPER_SCALE_R 0x0f16
+#define mmCM1_CM_SHAPER_SCALE_R_BASE_IDX 2
+#define mmCM1_CM_SHAPER_SCALE_G_B 0x0f17
+#define mmCM1_CM_SHAPER_SCALE_G_B_BASE_IDX 2
+#define mmCM1_CM_SHAPER_LUT_INDEX 0x0f18
+#define mmCM1_CM_SHAPER_LUT_INDEX_BASE_IDX 2
+#define mmCM1_CM_SHAPER_LUT_DATA 0x0f19
+#define mmCM1_CM_SHAPER_LUT_DATA_BASE_IDX 2
+#define mmCM1_CM_SHAPER_LUT_WRITE_EN_MASK 0x0f1a
+#define mmCM1_CM_SHAPER_LUT_WRITE_EN_MASK_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_START_CNTL_B 0x0f1b
+#define mmCM1_CM_SHAPER_RAMA_START_CNTL_B_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_START_CNTL_G 0x0f1c
+#define mmCM1_CM_SHAPER_RAMA_START_CNTL_G_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_START_CNTL_R 0x0f1d
+#define mmCM1_CM_SHAPER_RAMA_START_CNTL_R_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_END_CNTL_B 0x0f1e
+#define mmCM1_CM_SHAPER_RAMA_END_CNTL_B_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_END_CNTL_G 0x0f1f
+#define mmCM1_CM_SHAPER_RAMA_END_CNTL_G_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_END_CNTL_R 0x0f20
+#define mmCM1_CM_SHAPER_RAMA_END_CNTL_R_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_REGION_0_1 0x0f21
+#define mmCM1_CM_SHAPER_RAMA_REGION_0_1_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_REGION_2_3 0x0f22
+#define mmCM1_CM_SHAPER_RAMA_REGION_2_3_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_REGION_4_5 0x0f23
+#define mmCM1_CM_SHAPER_RAMA_REGION_4_5_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_REGION_6_7 0x0f24
+#define mmCM1_CM_SHAPER_RAMA_REGION_6_7_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_REGION_8_9 0x0f25
+#define mmCM1_CM_SHAPER_RAMA_REGION_8_9_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_REGION_10_11 0x0f26
+#define mmCM1_CM_SHAPER_RAMA_REGION_10_11_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_REGION_12_13 0x0f27
+#define mmCM1_CM_SHAPER_RAMA_REGION_12_13_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_REGION_14_15 0x0f28
+#define mmCM1_CM_SHAPER_RAMA_REGION_14_15_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_REGION_16_17 0x0f29
+#define mmCM1_CM_SHAPER_RAMA_REGION_16_17_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_REGION_18_19 0x0f2a
+#define mmCM1_CM_SHAPER_RAMA_REGION_18_19_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_REGION_20_21 0x0f2b
+#define mmCM1_CM_SHAPER_RAMA_REGION_20_21_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_REGION_22_23 0x0f2c
+#define mmCM1_CM_SHAPER_RAMA_REGION_22_23_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_REGION_24_25 0x0f2d
+#define mmCM1_CM_SHAPER_RAMA_REGION_24_25_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_REGION_26_27 0x0f2e
+#define mmCM1_CM_SHAPER_RAMA_REGION_26_27_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_REGION_28_29 0x0f2f
+#define mmCM1_CM_SHAPER_RAMA_REGION_28_29_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_REGION_30_31 0x0f30
+#define mmCM1_CM_SHAPER_RAMA_REGION_30_31_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_REGION_32_33 0x0f31
+#define mmCM1_CM_SHAPER_RAMA_REGION_32_33_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_START_CNTL_B 0x0f32
+#define mmCM1_CM_SHAPER_RAMB_START_CNTL_B_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_START_CNTL_G 0x0f33
+#define mmCM1_CM_SHAPER_RAMB_START_CNTL_G_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_START_CNTL_R 0x0f34
+#define mmCM1_CM_SHAPER_RAMB_START_CNTL_R_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_END_CNTL_B 0x0f35
+#define mmCM1_CM_SHAPER_RAMB_END_CNTL_B_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_END_CNTL_G 0x0f36
+#define mmCM1_CM_SHAPER_RAMB_END_CNTL_G_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_END_CNTL_R 0x0f37
+#define mmCM1_CM_SHAPER_RAMB_END_CNTL_R_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_REGION_0_1 0x0f38
+#define mmCM1_CM_SHAPER_RAMB_REGION_0_1_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_REGION_2_3 0x0f39
+#define mmCM1_CM_SHAPER_RAMB_REGION_2_3_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_REGION_4_5 0x0f3a
+#define mmCM1_CM_SHAPER_RAMB_REGION_4_5_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_REGION_6_7 0x0f3b
+#define mmCM1_CM_SHAPER_RAMB_REGION_6_7_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_REGION_8_9 0x0f3c
+#define mmCM1_CM_SHAPER_RAMB_REGION_8_9_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_REGION_10_11 0x0f3d
+#define mmCM1_CM_SHAPER_RAMB_REGION_10_11_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_REGION_12_13 0x0f3e
+#define mmCM1_CM_SHAPER_RAMB_REGION_12_13_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_REGION_14_15 0x0f3f
+#define mmCM1_CM_SHAPER_RAMB_REGION_14_15_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_REGION_16_17 0x0f40
+#define mmCM1_CM_SHAPER_RAMB_REGION_16_17_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_REGION_18_19 0x0f41
+#define mmCM1_CM_SHAPER_RAMB_REGION_18_19_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_REGION_20_21 0x0f42
+#define mmCM1_CM_SHAPER_RAMB_REGION_20_21_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_REGION_22_23 0x0f43
+#define mmCM1_CM_SHAPER_RAMB_REGION_22_23_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_REGION_24_25 0x0f44
+#define mmCM1_CM_SHAPER_RAMB_REGION_24_25_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_REGION_26_27 0x0f45
+#define mmCM1_CM_SHAPER_RAMB_REGION_26_27_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_REGION_28_29 0x0f46
+#define mmCM1_CM_SHAPER_RAMB_REGION_28_29_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_REGION_30_31 0x0f47
+#define mmCM1_CM_SHAPER_RAMB_REGION_30_31_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_REGION_32_33 0x0f48
+#define mmCM1_CM_SHAPER_RAMB_REGION_32_33_BASE_IDX 2
+#define mmCM1_CM_MEM_PWR_CTRL2 0x0f49
+#define mmCM1_CM_MEM_PWR_CTRL2_BASE_IDX 2
+#define mmCM1_CM_MEM_PWR_STATUS2 0x0f4a
+#define mmCM1_CM_MEM_PWR_STATUS2_BASE_IDX 2
+#define mmCM1_CM_3DLUT_MODE 0x0f4b
+#define mmCM1_CM_3DLUT_MODE_BASE_IDX 2
+#define mmCM1_CM_3DLUT_INDEX 0x0f4c
+#define mmCM1_CM_3DLUT_INDEX_BASE_IDX 2
+#define mmCM1_CM_3DLUT_DATA 0x0f4d
+#define mmCM1_CM_3DLUT_DATA_BASE_IDX 2
+#define mmCM1_CM_3DLUT_DATA_30BIT 0x0f4e
+#define mmCM1_CM_3DLUT_DATA_30BIT_BASE_IDX 2
+#define mmCM1_CM_3DLUT_READ_WRITE_CONTROL 0x0f4f
+#define mmCM1_CM_3DLUT_READ_WRITE_CONTROL_BASE_IDX 2
+#define mmCM1_CM_3DLUT_OUT_NORM_FACTOR 0x0f50
+#define mmCM1_CM_3DLUT_OUT_NORM_FACTOR_BASE_IDX 2
+#define mmCM1_CM_3DLUT_OUT_OFFSET_R 0x0f51
+#define mmCM1_CM_3DLUT_OUT_OFFSET_R_BASE_IDX 2
+#define mmCM1_CM_3DLUT_OUT_OFFSET_G 0x0f52
+#define mmCM1_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2
+#define mmCM1_CM_3DLUT_OUT_OFFSET_B 0x0f53
+#define mmCM1_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2
+#define mmCM1_CM_TEST_DEBUG_INDEX 0x0f54
+#define mmCM1_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define mmCM1_CM_TEST_DEBUG_DATA 0x0f55
+#define mmCM1_CM_TEST_DEBUG_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp1_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
+// base address: 0x3e3c
+#define mmDC_PERFMON12_PERFCOUNTER_CNTL 0x0f8f
+#define mmDC_PERFMON12_PERFCOUNTER_CNTL_BASE_IDX 2
+#define mmDC_PERFMON12_PERFCOUNTER_CNTL2 0x0f90
+#define mmDC_PERFMON12_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define mmDC_PERFMON12_PERFCOUNTER_STATE 0x0f91
+#define mmDC_PERFMON12_PERFCOUNTER_STATE_BASE_IDX 2
+#define mmDC_PERFMON12_PERFMON_CNTL 0x0f92
+#define mmDC_PERFMON12_PERFMON_CNTL_BASE_IDX 2
+#define mmDC_PERFMON12_PERFMON_CNTL2 0x0f93
+#define mmDC_PERFMON12_PERFMON_CNTL2_BASE_IDX 2
+#define mmDC_PERFMON12_PERFMON_CVALUE_INT_MISC 0x0f94
+#define mmDC_PERFMON12_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define mmDC_PERFMON12_PERFMON_CVALUE_LOW 0x0f95
+#define mmDC_PERFMON12_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define mmDC_PERFMON12_PERFMON_HI 0x0f96
+#define mmDC_PERFMON12_PERFMON_HI_BASE_IDX 2
+#define mmDC_PERFMON12_PERFMON_LOW 0x0f97
+#define mmDC_PERFMON12_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp2_dispdec_dpp_top_dispdec
+// base address: 0xb58
+#define mmDPP_TOP2_DPP_CONTROL 0x0f9b
+#define mmDPP_TOP2_DPP_CONTROL_BASE_IDX 2
+#define mmDPP_TOP2_DPP_SOFT_RESET 0x0f9c
+#define mmDPP_TOP2_DPP_SOFT_RESET_BASE_IDX 2
+#define mmDPP_TOP2_DPP_CRC_VAL_R_G 0x0f9d
+#define mmDPP_TOP2_DPP_CRC_VAL_R_G_BASE_IDX 2
+#define mmDPP_TOP2_DPP_CRC_VAL_B_A 0x0f9e
+#define mmDPP_TOP2_DPP_CRC_VAL_B_A_BASE_IDX 2
+#define mmDPP_TOP2_DPP_CRC_CTRL 0x0f9f
+#define mmDPP_TOP2_DPP_CRC_CTRL_BASE_IDX 2
+#define mmDPP_TOP2_HOST_READ_CONTROL 0x0fa0
+#define mmDPP_TOP2_HOST_READ_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp2_dispdec_cnvc_cfg_dispdec
+// base address: 0xb58
+#define mmCNVC_CFG2_CNVC_SURFACE_PIXEL_FORMAT 0x0fa5
+#define mmCNVC_CFG2_CNVC_SURFACE_PIXEL_FORMAT_BASE_IDX 2
+#define mmCNVC_CFG2_FORMAT_CONTROL 0x0fa6
+#define mmCNVC_CFG2_FORMAT_CONTROL_BASE_IDX 2
+#define mmCNVC_CFG2_FCNV_FP_BIAS_R 0x0fa7
+#define mmCNVC_CFG2_FCNV_FP_BIAS_R_BASE_IDX 2
+#define mmCNVC_CFG2_FCNV_FP_BIAS_G 0x0fa8
+#define mmCNVC_CFG2_FCNV_FP_BIAS_G_BASE_IDX 2
+#define mmCNVC_CFG2_FCNV_FP_BIAS_B 0x0fa9
+#define mmCNVC_CFG2_FCNV_FP_BIAS_B_BASE_IDX 2
+#define mmCNVC_CFG2_FCNV_FP_SCALE_R 0x0faa
+#define mmCNVC_CFG2_FCNV_FP_SCALE_R_BASE_IDX 2
+#define mmCNVC_CFG2_FCNV_FP_SCALE_G 0x0fab
+#define mmCNVC_CFG2_FCNV_FP_SCALE_G_BASE_IDX 2
+#define mmCNVC_CFG2_FCNV_FP_SCALE_B 0x0fac
+#define mmCNVC_CFG2_FCNV_FP_SCALE_B_BASE_IDX 2
+#define mmCNVC_CFG2_COLOR_KEYER_CONTROL 0x0fad
+#define mmCNVC_CFG2_COLOR_KEYER_CONTROL_BASE_IDX 2
+#define mmCNVC_CFG2_COLOR_KEYER_ALPHA 0x0fae
+#define mmCNVC_CFG2_COLOR_KEYER_ALPHA_BASE_IDX 2
+#define mmCNVC_CFG2_COLOR_KEYER_RED 0x0faf
+#define mmCNVC_CFG2_COLOR_KEYER_RED_BASE_IDX 2
+#define mmCNVC_CFG2_COLOR_KEYER_GREEN 0x0fb0
+#define mmCNVC_CFG2_COLOR_KEYER_GREEN_BASE_IDX 2
+#define mmCNVC_CFG2_COLOR_KEYER_BLUE 0x0fb1
+#define mmCNVC_CFG2_COLOR_KEYER_BLUE_BASE_IDX 2
+#define mmCNVC_CFG2_ALPHA_2BIT_LUT 0x0fb3
+#define mmCNVC_CFG2_ALPHA_2BIT_LUT_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp2_dispdec_cnvc_cur_dispdec
+// base address: 0xb58
+#define mmCNVC_CUR2_CURSOR0_CONTROL 0x0fb6
+#define mmCNVC_CUR2_CURSOR0_CONTROL_BASE_IDX 2
+#define mmCNVC_CUR2_CURSOR0_COLOR0 0x0fb7
+#define mmCNVC_CUR2_CURSOR0_COLOR0_BASE_IDX 2
+#define mmCNVC_CUR2_CURSOR0_COLOR1 0x0fb8
+#define mmCNVC_CUR2_CURSOR0_COLOR1_BASE_IDX 2
+#define mmCNVC_CUR2_CURSOR0_FP_SCALE_BIAS 0x0fb9
+#define mmCNVC_CUR2_CURSOR0_FP_SCALE_BIAS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp2_dispdec_dscl_dispdec
+// base address: 0xb58
+#define mmDSCL2_SCL_COEF_RAM_TAP_SELECT 0x0fc0
+#define mmDSCL2_SCL_COEF_RAM_TAP_SELECT_BASE_IDX 2
+#define mmDSCL2_SCL_COEF_RAM_TAP_DATA 0x0fc1
+#define mmDSCL2_SCL_COEF_RAM_TAP_DATA_BASE_IDX 2
+#define mmDSCL2_SCL_MODE 0x0fc2
+#define mmDSCL2_SCL_MODE_BASE_IDX 2
+#define mmDSCL2_SCL_TAP_CONTROL 0x0fc3
+#define mmDSCL2_SCL_TAP_CONTROL_BASE_IDX 2
+#define mmDSCL2_DSCL_CONTROL 0x0fc4
+#define mmDSCL2_DSCL_CONTROL_BASE_IDX 2
+#define mmDSCL2_DSCL_2TAP_CONTROL 0x0fc5
+#define mmDSCL2_DSCL_2TAP_CONTROL_BASE_IDX 2
+#define mmDSCL2_SCL_MANUAL_REPLICATE_CONTROL 0x0fc6
+#define mmDSCL2_SCL_MANUAL_REPLICATE_CONTROL_BASE_IDX 2
+#define mmDSCL2_SCL_HORZ_FILTER_SCALE_RATIO 0x0fc7
+#define mmDSCL2_SCL_HORZ_FILTER_SCALE_RATIO_BASE_IDX 2
+#define mmDSCL2_SCL_HORZ_FILTER_INIT 0x0fc8
+#define mmDSCL2_SCL_HORZ_FILTER_INIT_BASE_IDX 2
+#define mmDSCL2_SCL_HORZ_FILTER_SCALE_RATIO_C 0x0fc9
+#define mmDSCL2_SCL_HORZ_FILTER_SCALE_RATIO_C_BASE_IDX 2
+#define mmDSCL2_SCL_HORZ_FILTER_INIT_C 0x0fca
+#define mmDSCL2_SCL_HORZ_FILTER_INIT_C_BASE_IDX 2
+#define mmDSCL2_SCL_VERT_FILTER_SCALE_RATIO 0x0fcb
+#define mmDSCL2_SCL_VERT_FILTER_SCALE_RATIO_BASE_IDX 2
+#define mmDSCL2_SCL_VERT_FILTER_INIT 0x0fcc
+#define mmDSCL2_SCL_VERT_FILTER_INIT_BASE_IDX 2
+#define mmDSCL2_SCL_VERT_FILTER_INIT_BOT 0x0fcd
+#define mmDSCL2_SCL_VERT_FILTER_INIT_BOT_BASE_IDX 2
+#define mmDSCL2_SCL_VERT_FILTER_SCALE_RATIO_C 0x0fce
+#define mmDSCL2_SCL_VERT_FILTER_SCALE_RATIO_C_BASE_IDX 2
+#define mmDSCL2_SCL_VERT_FILTER_INIT_C 0x0fcf
+#define mmDSCL2_SCL_VERT_FILTER_INIT_C_BASE_IDX 2
+#define mmDSCL2_SCL_VERT_FILTER_INIT_BOT_C 0x0fd0
+#define mmDSCL2_SCL_VERT_FILTER_INIT_BOT_C_BASE_IDX 2
+#define mmDSCL2_SCL_BLACK_OFFSET 0x0fd1
+#define mmDSCL2_SCL_BLACK_OFFSET_BASE_IDX 2
+#define mmDSCL2_DSCL_UPDATE 0x0fd2
+#define mmDSCL2_DSCL_UPDATE_BASE_IDX 2
+#define mmDSCL2_DSCL_AUTOCAL 0x0fd3
+#define mmDSCL2_DSCL_AUTOCAL_BASE_IDX 2
+#define mmDSCL2_DSCL_EXT_OVERSCAN_LEFT_RIGHT 0x0fd4
+#define mmDSCL2_DSCL_EXT_OVERSCAN_LEFT_RIGHT_BASE_IDX 2
+#define mmDSCL2_DSCL_EXT_OVERSCAN_TOP_BOTTOM 0x0fd5
+#define mmDSCL2_DSCL_EXT_OVERSCAN_TOP_BOTTOM_BASE_IDX 2
+#define mmDSCL2_OTG_H_BLANK 0x0fd6
+#define mmDSCL2_OTG_H_BLANK_BASE_IDX 2
+#define mmDSCL2_OTG_V_BLANK 0x0fd7
+#define mmDSCL2_OTG_V_BLANK_BASE_IDX 2
+#define mmDSCL2_RECOUT_START 0x0fd8
+#define mmDSCL2_RECOUT_START_BASE_IDX 2
+#define mmDSCL2_RECOUT_SIZE 0x0fd9
+#define mmDSCL2_RECOUT_SIZE_BASE_IDX 2
+#define mmDSCL2_MPC_SIZE 0x0fda
+#define mmDSCL2_MPC_SIZE_BASE_IDX 2
+#define mmDSCL2_LB_DATA_FORMAT 0x0fdb
+#define mmDSCL2_LB_DATA_FORMAT_BASE_IDX 2
+#define mmDSCL2_LB_MEMORY_CTRL 0x0fdc
+#define mmDSCL2_LB_MEMORY_CTRL_BASE_IDX 2
+#define mmDSCL2_LB_V_COUNTER 0x0fdd
+#define mmDSCL2_LB_V_COUNTER_BASE_IDX 2
+#define mmDSCL2_DSCL_MEM_PWR_CTRL 0x0fde
+#define mmDSCL2_DSCL_MEM_PWR_CTRL_BASE_IDX 2
+#define mmDSCL2_DSCL_MEM_PWR_STATUS 0x0fdf
+#define mmDSCL2_DSCL_MEM_PWR_STATUS_BASE_IDX 2
+#define mmDSCL2_OBUF_CONTROL 0x0fe0
+#define mmDSCL2_OBUF_CONTROL_BASE_IDX 2
+#define mmDSCL2_OBUF_MEM_PWR_CTRL 0x0fe1
+#define mmDSCL2_OBUF_MEM_PWR_CTRL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp2_dispdec_cm_dispdec
+// base address: 0xb58
+#define mmCM2_CM_CONTROL 0x0ff0
+#define mmCM2_CM_CONTROL_BASE_IDX 2
+#define mmCM2_CM_ICSC_CONTROL 0x0ff1
+#define mmCM2_CM_ICSC_CONTROL_BASE_IDX 2
+#define mmCM2_CM_ICSC_C11_C12 0x0ff2
+#define mmCM2_CM_ICSC_C11_C12_BASE_IDX 2
+#define mmCM2_CM_ICSC_C13_C14 0x0ff3
+#define mmCM2_CM_ICSC_C13_C14_BASE_IDX 2
+#define mmCM2_CM_ICSC_C21_C22 0x0ff4
+#define mmCM2_CM_ICSC_C21_C22_BASE_IDX 2
+#define mmCM2_CM_ICSC_C23_C24 0x0ff5
+#define mmCM2_CM_ICSC_C23_C24_BASE_IDX 2
+#define mmCM2_CM_ICSC_C31_C32 0x0ff6
+#define mmCM2_CM_ICSC_C31_C32_BASE_IDX 2
+#define mmCM2_CM_ICSC_C33_C34 0x0ff7
+#define mmCM2_CM_ICSC_C33_C34_BASE_IDX 2
+#define mmCM2_CM_ICSC_B_C11_C12 0x0ff8
+#define mmCM2_CM_ICSC_B_C11_C12_BASE_IDX 2
+#define mmCM2_CM_ICSC_B_C13_C14 0x0ff9
+#define mmCM2_CM_ICSC_B_C13_C14_BASE_IDX 2
+#define mmCM2_CM_ICSC_B_C21_C22 0x0ffa
+#define mmCM2_CM_ICSC_B_C21_C22_BASE_IDX 2
+#define mmCM2_CM_ICSC_B_C23_C24 0x0ffb
+#define mmCM2_CM_ICSC_B_C23_C24_BASE_IDX 2
+#define mmCM2_CM_ICSC_B_C31_C32 0x0ffc
+#define mmCM2_CM_ICSC_B_C31_C32_BASE_IDX 2
+#define mmCM2_CM_ICSC_B_C33_C34 0x0ffd
+#define mmCM2_CM_ICSC_B_C33_C34_BASE_IDX 2
+#define mmCM2_CM_GAMUT_REMAP_CONTROL 0x0ffe
+#define mmCM2_CM_GAMUT_REMAP_CONTROL_BASE_IDX 2
+#define mmCM2_CM_GAMUT_REMAP_C11_C12 0x0fff
+#define mmCM2_CM_GAMUT_REMAP_C11_C12_BASE_IDX 2
+#define mmCM2_CM_GAMUT_REMAP_C13_C14 0x1000
+#define mmCM2_CM_GAMUT_REMAP_C13_C14_BASE_IDX 2
+#define mmCM2_CM_GAMUT_REMAP_C21_C22 0x1001
+#define mmCM2_CM_GAMUT_REMAP_C21_C22_BASE_IDX 2
+#define mmCM2_CM_GAMUT_REMAP_C23_C24 0x1002
+#define mmCM2_CM_GAMUT_REMAP_C23_C24_BASE_IDX 2
+#define mmCM2_CM_GAMUT_REMAP_C31_C32 0x1003
+#define mmCM2_CM_GAMUT_REMAP_C31_C32_BASE_IDX 2
+#define mmCM2_CM_GAMUT_REMAP_C33_C34 0x1004
+#define mmCM2_CM_GAMUT_REMAP_C33_C34_BASE_IDX 2
+#define mmCM2_CM_GAMUT_REMAP_B_C11_C12 0x1005
+#define mmCM2_CM_GAMUT_REMAP_B_C11_C12_BASE_IDX 2
+#define mmCM2_CM_GAMUT_REMAP_B_C13_C14 0x1006
+#define mmCM2_CM_GAMUT_REMAP_B_C13_C14_BASE_IDX 2
+#define mmCM2_CM_GAMUT_REMAP_B_C21_C22 0x1007
+#define mmCM2_CM_GAMUT_REMAP_B_C21_C22_BASE_IDX 2
+#define mmCM2_CM_GAMUT_REMAP_B_C23_C24 0x1008
+#define mmCM2_CM_GAMUT_REMAP_B_C23_C24_BASE_IDX 2
+#define mmCM2_CM_GAMUT_REMAP_B_C31_C32 0x1009
+#define mmCM2_CM_GAMUT_REMAP_B_C31_C32_BASE_IDX 2
+#define mmCM2_CM_GAMUT_REMAP_B_C33_C34 0x100a
+#define mmCM2_CM_GAMUT_REMAP_B_C33_C34_BASE_IDX 2
+#define mmCM2_CM_BIAS_CR_R 0x100b
+#define mmCM2_CM_BIAS_CR_R_BASE_IDX 2
+#define mmCM2_CM_BIAS_Y_G_CB_B 0x100c
+#define mmCM2_CM_BIAS_Y_G_CB_B_BASE_IDX 2
+#define mmCM2_CM_DGAM_CONTROL 0x100d
+#define mmCM2_CM_DGAM_CONTROL_BASE_IDX 2
+#define mmCM2_CM_DGAM_LUT_INDEX 0x100e
+#define mmCM2_CM_DGAM_LUT_INDEX_BASE_IDX 2
+#define mmCM2_CM_DGAM_LUT_DATA 0x100f
+#define mmCM2_CM_DGAM_LUT_DATA_BASE_IDX 2
+#define mmCM2_CM_DGAM_LUT_WRITE_EN_MASK 0x1010
+#define mmCM2_CM_DGAM_LUT_WRITE_EN_MASK_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMA_START_CNTL_B 0x1011
+#define mmCM2_CM_DGAM_RAMA_START_CNTL_B_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMA_START_CNTL_G 0x1012
+#define mmCM2_CM_DGAM_RAMA_START_CNTL_G_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMA_START_CNTL_R 0x1013
+#define mmCM2_CM_DGAM_RAMA_START_CNTL_R_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMA_SLOPE_CNTL_B 0x1014
+#define mmCM2_CM_DGAM_RAMA_SLOPE_CNTL_B_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMA_SLOPE_CNTL_G 0x1015
+#define mmCM2_CM_DGAM_RAMA_SLOPE_CNTL_G_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMA_SLOPE_CNTL_R 0x1016
+#define mmCM2_CM_DGAM_RAMA_SLOPE_CNTL_R_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMA_END_CNTL1_B 0x1017
+#define mmCM2_CM_DGAM_RAMA_END_CNTL1_B_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMA_END_CNTL2_B 0x1018
+#define mmCM2_CM_DGAM_RAMA_END_CNTL2_B_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMA_END_CNTL1_G 0x1019
+#define mmCM2_CM_DGAM_RAMA_END_CNTL1_G_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMA_END_CNTL2_G 0x101a
+#define mmCM2_CM_DGAM_RAMA_END_CNTL2_G_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMA_END_CNTL1_R 0x101b
+#define mmCM2_CM_DGAM_RAMA_END_CNTL1_R_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMA_END_CNTL2_R 0x101c
+#define mmCM2_CM_DGAM_RAMA_END_CNTL2_R_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMA_REGION_0_1 0x101d
+#define mmCM2_CM_DGAM_RAMA_REGION_0_1_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMA_REGION_2_3 0x101e
+#define mmCM2_CM_DGAM_RAMA_REGION_2_3_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMA_REGION_4_5 0x101f
+#define mmCM2_CM_DGAM_RAMA_REGION_4_5_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMA_REGION_6_7 0x1020
+#define mmCM2_CM_DGAM_RAMA_REGION_6_7_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMA_REGION_8_9 0x1021
+#define mmCM2_CM_DGAM_RAMA_REGION_8_9_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMA_REGION_10_11 0x1022
+#define mmCM2_CM_DGAM_RAMA_REGION_10_11_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMA_REGION_12_13 0x1023
+#define mmCM2_CM_DGAM_RAMA_REGION_12_13_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMA_REGION_14_15 0x1024
+#define mmCM2_CM_DGAM_RAMA_REGION_14_15_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMB_START_CNTL_B 0x1025
+#define mmCM2_CM_DGAM_RAMB_START_CNTL_B_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMB_START_CNTL_G 0x1026
+#define mmCM2_CM_DGAM_RAMB_START_CNTL_G_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMB_START_CNTL_R 0x1027
+#define mmCM2_CM_DGAM_RAMB_START_CNTL_R_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMB_SLOPE_CNTL_B 0x1028
+#define mmCM2_CM_DGAM_RAMB_SLOPE_CNTL_B_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMB_SLOPE_CNTL_G 0x1029
+#define mmCM2_CM_DGAM_RAMB_SLOPE_CNTL_G_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMB_SLOPE_CNTL_R 0x102a
+#define mmCM2_CM_DGAM_RAMB_SLOPE_CNTL_R_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMB_END_CNTL1_B 0x102b
+#define mmCM2_CM_DGAM_RAMB_END_CNTL1_B_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMB_END_CNTL2_B 0x102c
+#define mmCM2_CM_DGAM_RAMB_END_CNTL2_B_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMB_END_CNTL1_G 0x102d
+#define mmCM2_CM_DGAM_RAMB_END_CNTL1_G_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMB_END_CNTL2_G 0x102e
+#define mmCM2_CM_DGAM_RAMB_END_CNTL2_G_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMB_END_CNTL1_R 0x102f
+#define mmCM2_CM_DGAM_RAMB_END_CNTL1_R_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMB_END_CNTL2_R 0x1030
+#define mmCM2_CM_DGAM_RAMB_END_CNTL2_R_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMB_REGION_0_1 0x1031
+#define mmCM2_CM_DGAM_RAMB_REGION_0_1_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMB_REGION_2_3 0x1032
+#define mmCM2_CM_DGAM_RAMB_REGION_2_3_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMB_REGION_4_5 0x1033
+#define mmCM2_CM_DGAM_RAMB_REGION_4_5_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMB_REGION_6_7 0x1034
+#define mmCM2_CM_DGAM_RAMB_REGION_6_7_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMB_REGION_8_9 0x1035
+#define mmCM2_CM_DGAM_RAMB_REGION_8_9_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMB_REGION_10_11 0x1036
+#define mmCM2_CM_DGAM_RAMB_REGION_10_11_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMB_REGION_12_13 0x1037
+#define mmCM2_CM_DGAM_RAMB_REGION_12_13_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMB_REGION_14_15 0x1038
+#define mmCM2_CM_DGAM_RAMB_REGION_14_15_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_CONTROL 0x1039
+#define mmCM2_CM_BLNDGAM_CONTROL_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_LUT_INDEX 0x103a
+#define mmCM2_CM_BLNDGAM_LUT_INDEX_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_LUT_DATA 0x103b
+#define mmCM2_CM_BLNDGAM_LUT_DATA_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_LUT_WRITE_EN_MASK 0x103c
+#define mmCM2_CM_BLNDGAM_LUT_WRITE_EN_MASK_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_START_CNTL_B 0x103d
+#define mmCM2_CM_BLNDGAM_RAMA_START_CNTL_B_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_START_CNTL_G 0x103e
+#define mmCM2_CM_BLNDGAM_RAMA_START_CNTL_G_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_START_CNTL_R 0x103f
+#define mmCM2_CM_BLNDGAM_RAMA_START_CNTL_R_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_SLOPE_CNTL_B 0x1040
+#define mmCM2_CM_BLNDGAM_RAMA_SLOPE_CNTL_B_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_SLOPE_CNTL_G 0x1041
+#define mmCM2_CM_BLNDGAM_RAMA_SLOPE_CNTL_G_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_SLOPE_CNTL_R 0x1042
+#define mmCM2_CM_BLNDGAM_RAMA_SLOPE_CNTL_R_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_END_CNTL1_B 0x1043
+#define mmCM2_CM_BLNDGAM_RAMA_END_CNTL1_B_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_END_CNTL2_B 0x1044
+#define mmCM2_CM_BLNDGAM_RAMA_END_CNTL2_B_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_END_CNTL1_G 0x1045
+#define mmCM2_CM_BLNDGAM_RAMA_END_CNTL1_G_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_END_CNTL2_G 0x1046
+#define mmCM2_CM_BLNDGAM_RAMA_END_CNTL2_G_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_END_CNTL1_R 0x1047
+#define mmCM2_CM_BLNDGAM_RAMA_END_CNTL1_R_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_END_CNTL2_R 0x1048
+#define mmCM2_CM_BLNDGAM_RAMA_END_CNTL2_R_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_0_1 0x1049
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_0_1_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_2_3 0x104a
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_2_3_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_4_5 0x104b
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_4_5_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_6_7 0x104c
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_6_7_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_8_9 0x104d
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_8_9_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_10_11 0x104e
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_10_11_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_12_13 0x104f
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_12_13_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_14_15 0x1050
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_14_15_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_16_17 0x1051
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_16_17_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_18_19 0x1052
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_18_19_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_20_21 0x1053
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_20_21_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_22_23 0x1054
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_22_23_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_24_25 0x1055
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_24_25_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_26_27 0x1056
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_26_27_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_28_29 0x1057
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_28_29_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_30_31 0x1058
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_30_31_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_32_33 0x1059
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_32_33_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_START_CNTL_B 0x105a
+#define mmCM2_CM_BLNDGAM_RAMB_START_CNTL_B_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_START_CNTL_G 0x105b
+#define mmCM2_CM_BLNDGAM_RAMB_START_CNTL_G_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_START_CNTL_R 0x105c
+#define mmCM2_CM_BLNDGAM_RAMB_START_CNTL_R_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_SLOPE_CNTL_B 0x105d
+#define mmCM2_CM_BLNDGAM_RAMB_SLOPE_CNTL_B_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_SLOPE_CNTL_G 0x105e
+#define mmCM2_CM_BLNDGAM_RAMB_SLOPE_CNTL_G_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_SLOPE_CNTL_R 0x105f
+#define mmCM2_CM_BLNDGAM_RAMB_SLOPE_CNTL_R_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_END_CNTL1_B 0x1060
+#define mmCM2_CM_BLNDGAM_RAMB_END_CNTL1_B_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_END_CNTL2_B 0x1061
+#define mmCM2_CM_BLNDGAM_RAMB_END_CNTL2_B_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_END_CNTL1_G 0x1062
+#define mmCM2_CM_BLNDGAM_RAMB_END_CNTL1_G_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_END_CNTL2_G 0x1063
+#define mmCM2_CM_BLNDGAM_RAMB_END_CNTL2_G_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_END_CNTL1_R 0x1064
+#define mmCM2_CM_BLNDGAM_RAMB_END_CNTL1_R_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_END_CNTL2_R 0x1065
+#define mmCM2_CM_BLNDGAM_RAMB_END_CNTL2_R_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_0_1 0x1066
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_0_1_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_2_3 0x1067
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_2_3_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_4_5 0x1068
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_4_5_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_6_7 0x1069
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_6_7_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_8_9 0x106a
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_8_9_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_10_11 0x106b
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_10_11_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_12_13 0x106c
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_12_13_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_14_15 0x106d
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_14_15_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_16_17 0x106e
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_16_17_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_18_19 0x106f
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_18_19_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_20_21 0x1070
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_20_21_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_22_23 0x1071
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_22_23_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_24_25 0x1072
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_24_25_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_26_27 0x1073
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_26_27_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_28_29 0x1074
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_28_29_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_30_31 0x1075
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_30_31_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_32_33 0x1076
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_32_33_BASE_IDX 2
+#define mmCM2_CM_HDR_MULT_COEF 0x1077
+#define mmCM2_CM_HDR_MULT_COEF_BASE_IDX 2
+#define mmCM2_CM_MEM_PWR_CTRL 0x1078
+#define mmCM2_CM_MEM_PWR_CTRL_BASE_IDX 2
+#define mmCM2_CM_MEM_PWR_STATUS 0x1079
+#define mmCM2_CM_MEM_PWR_STATUS_BASE_IDX 2
+#define mmCM2_CM_DEALPHA 0x107b
+#define mmCM2_CM_DEALPHA_BASE_IDX 2
+#define mmCM2_CM_COEF_FORMAT 0x107c
+#define mmCM2_CM_COEF_FORMAT_BASE_IDX 2
+#define mmCM2_CM_SHAPER_CONTROL 0x107d
+#define mmCM2_CM_SHAPER_CONTROL_BASE_IDX 2
+#define mmCM2_CM_SHAPER_OFFSET_R 0x107e
+#define mmCM2_CM_SHAPER_OFFSET_R_BASE_IDX 2
+#define mmCM2_CM_SHAPER_OFFSET_G 0x107f
+#define mmCM2_CM_SHAPER_OFFSET_G_BASE_IDX 2
+#define mmCM2_CM_SHAPER_OFFSET_B 0x1080
+#define mmCM2_CM_SHAPER_OFFSET_B_BASE_IDX 2
+#define mmCM2_CM_SHAPER_SCALE_R 0x1081
+#define mmCM2_CM_SHAPER_SCALE_R_BASE_IDX 2
+#define mmCM2_CM_SHAPER_SCALE_G_B 0x1082
+#define mmCM2_CM_SHAPER_SCALE_G_B_BASE_IDX 2
+#define mmCM2_CM_SHAPER_LUT_INDEX 0x1083
+#define mmCM2_CM_SHAPER_LUT_INDEX_BASE_IDX 2
+#define mmCM2_CM_SHAPER_LUT_DATA 0x1084
+#define mmCM2_CM_SHAPER_LUT_DATA_BASE_IDX 2
+#define mmCM2_CM_SHAPER_LUT_WRITE_EN_MASK 0x1085
+#define mmCM2_CM_SHAPER_LUT_WRITE_EN_MASK_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_START_CNTL_B 0x1086
+#define mmCM2_CM_SHAPER_RAMA_START_CNTL_B_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_START_CNTL_G 0x1087
+#define mmCM2_CM_SHAPER_RAMA_START_CNTL_G_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_START_CNTL_R 0x1088
+#define mmCM2_CM_SHAPER_RAMA_START_CNTL_R_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_END_CNTL_B 0x1089
+#define mmCM2_CM_SHAPER_RAMA_END_CNTL_B_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_END_CNTL_G 0x108a
+#define mmCM2_CM_SHAPER_RAMA_END_CNTL_G_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_END_CNTL_R 0x108b
+#define mmCM2_CM_SHAPER_RAMA_END_CNTL_R_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_REGION_0_1 0x108c
+#define mmCM2_CM_SHAPER_RAMA_REGION_0_1_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_REGION_2_3 0x108d
+#define mmCM2_CM_SHAPER_RAMA_REGION_2_3_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_REGION_4_5 0x108e
+#define mmCM2_CM_SHAPER_RAMA_REGION_4_5_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_REGION_6_7 0x108f
+#define mmCM2_CM_SHAPER_RAMA_REGION_6_7_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_REGION_8_9 0x1090
+#define mmCM2_CM_SHAPER_RAMA_REGION_8_9_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_REGION_10_11 0x1091
+#define mmCM2_CM_SHAPER_RAMA_REGION_10_11_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_REGION_12_13 0x1092
+#define mmCM2_CM_SHAPER_RAMA_REGION_12_13_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_REGION_14_15 0x1093
+#define mmCM2_CM_SHAPER_RAMA_REGION_14_15_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_REGION_16_17 0x1094
+#define mmCM2_CM_SHAPER_RAMA_REGION_16_17_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_REGION_18_19 0x1095
+#define mmCM2_CM_SHAPER_RAMA_REGION_18_19_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_REGION_20_21 0x1096
+#define mmCM2_CM_SHAPER_RAMA_REGION_20_21_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_REGION_22_23 0x1097
+#define mmCM2_CM_SHAPER_RAMA_REGION_22_23_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_REGION_24_25 0x1098
+#define mmCM2_CM_SHAPER_RAMA_REGION_24_25_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_REGION_26_27 0x1099
+#define mmCM2_CM_SHAPER_RAMA_REGION_26_27_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_REGION_28_29 0x109a
+#define mmCM2_CM_SHAPER_RAMA_REGION_28_29_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_REGION_30_31 0x109b
+#define mmCM2_CM_SHAPER_RAMA_REGION_30_31_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_REGION_32_33 0x109c
+#define mmCM2_CM_SHAPER_RAMA_REGION_32_33_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_START_CNTL_B 0x109d
+#define mmCM2_CM_SHAPER_RAMB_START_CNTL_B_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_START_CNTL_G 0x109e
+#define mmCM2_CM_SHAPER_RAMB_START_CNTL_G_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_START_CNTL_R 0x109f
+#define mmCM2_CM_SHAPER_RAMB_START_CNTL_R_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_END_CNTL_B 0x10a0
+#define mmCM2_CM_SHAPER_RAMB_END_CNTL_B_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_END_CNTL_G 0x10a1
+#define mmCM2_CM_SHAPER_RAMB_END_CNTL_G_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_END_CNTL_R 0x10a2
+#define mmCM2_CM_SHAPER_RAMB_END_CNTL_R_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_REGION_0_1 0x10a3
+#define mmCM2_CM_SHAPER_RAMB_REGION_0_1_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_REGION_2_3 0x10a4
+#define mmCM2_CM_SHAPER_RAMB_REGION_2_3_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_REGION_4_5 0x10a5
+#define mmCM2_CM_SHAPER_RAMB_REGION_4_5_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_REGION_6_7 0x10a6
+#define mmCM2_CM_SHAPER_RAMB_REGION_6_7_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_REGION_8_9 0x10a7
+#define mmCM2_CM_SHAPER_RAMB_REGION_8_9_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_REGION_10_11 0x10a8
+#define mmCM2_CM_SHAPER_RAMB_REGION_10_11_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_REGION_12_13 0x10a9
+#define mmCM2_CM_SHAPER_RAMB_REGION_12_13_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_REGION_14_15 0x10aa
+#define mmCM2_CM_SHAPER_RAMB_REGION_14_15_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_REGION_16_17 0x10ab
+#define mmCM2_CM_SHAPER_RAMB_REGION_16_17_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_REGION_18_19 0x10ac
+#define mmCM2_CM_SHAPER_RAMB_REGION_18_19_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_REGION_20_21 0x10ad
+#define mmCM2_CM_SHAPER_RAMB_REGION_20_21_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_REGION_22_23 0x10ae
+#define mmCM2_CM_SHAPER_RAMB_REGION_22_23_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_REGION_24_25 0x10af
+#define mmCM2_CM_SHAPER_RAMB_REGION_24_25_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_REGION_26_27 0x10b0
+#define mmCM2_CM_SHAPER_RAMB_REGION_26_27_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_REGION_28_29 0x10b1
+#define mmCM2_CM_SHAPER_RAMB_REGION_28_29_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_REGION_30_31 0x10b2
+#define mmCM2_CM_SHAPER_RAMB_REGION_30_31_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_REGION_32_33 0x10b3
+#define mmCM2_CM_SHAPER_RAMB_REGION_32_33_BASE_IDX 2
+#define mmCM2_CM_MEM_PWR_CTRL2 0x10b4
+#define mmCM2_CM_MEM_PWR_CTRL2_BASE_IDX 2
+#define mmCM2_CM_MEM_PWR_STATUS2 0x10b5
+#define mmCM2_CM_MEM_PWR_STATUS2_BASE_IDX 2
+#define mmCM2_CM_3DLUT_MODE 0x10b6
+#define mmCM2_CM_3DLUT_MODE_BASE_IDX 2
+#define mmCM2_CM_3DLUT_INDEX 0x10b7
+#define mmCM2_CM_3DLUT_INDEX_BASE_IDX 2
+#define mmCM2_CM_3DLUT_DATA 0x10b8
+#define mmCM2_CM_3DLUT_DATA_BASE_IDX 2
+#define mmCM2_CM_3DLUT_DATA_30BIT 0x10b9
+#define mmCM2_CM_3DLUT_DATA_30BIT_BASE_IDX 2
+#define mmCM2_CM_3DLUT_READ_WRITE_CONTROL 0x10ba
+#define mmCM2_CM_3DLUT_READ_WRITE_CONTROL_BASE_IDX 2
+#define mmCM2_CM_3DLUT_OUT_NORM_FACTOR 0x10bb
+#define mmCM2_CM_3DLUT_OUT_NORM_FACTOR_BASE_IDX 2
+#define mmCM2_CM_3DLUT_OUT_OFFSET_R 0x10bc
+#define mmCM2_CM_3DLUT_OUT_OFFSET_R_BASE_IDX 2
+#define mmCM2_CM_3DLUT_OUT_OFFSET_G 0x10bd
+#define mmCM2_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2
+#define mmCM2_CM_3DLUT_OUT_OFFSET_B 0x10be
+#define mmCM2_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2
+#define mmCM2_CM_TEST_DEBUG_INDEX 0x10bf
+#define mmCM2_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define mmCM2_CM_TEST_DEBUG_DATA 0x10c0
+#define mmCM2_CM_TEST_DEBUG_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp2_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
+// base address: 0x43e8
+#define mmDC_PERFMON13_PERFCOUNTER_CNTL 0x10fa
+#define mmDC_PERFMON13_PERFCOUNTER_CNTL_BASE_IDX 2
+#define mmDC_PERFMON13_PERFCOUNTER_CNTL2 0x10fb
+#define mmDC_PERFMON13_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define mmDC_PERFMON13_PERFCOUNTER_STATE 0x10fc
+#define mmDC_PERFMON13_PERFCOUNTER_STATE_BASE_IDX 2
+#define mmDC_PERFMON13_PERFMON_CNTL 0x10fd
+#define mmDC_PERFMON13_PERFMON_CNTL_BASE_IDX 2
+#define mmDC_PERFMON13_PERFMON_CNTL2 0x10fe
+#define mmDC_PERFMON13_PERFMON_CNTL2_BASE_IDX 2
+#define mmDC_PERFMON13_PERFMON_CVALUE_INT_MISC 0x10ff
+#define mmDC_PERFMON13_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define mmDC_PERFMON13_PERFMON_CVALUE_LOW 0x1100
+#define mmDC_PERFMON13_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define mmDC_PERFMON13_PERFMON_HI 0x1101
+#define mmDC_PERFMON13_PERFMON_HI_BASE_IDX 2
+#define mmDC_PERFMON13_PERFMON_LOW 0x1102
+#define mmDC_PERFMON13_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp3_dispdec_dpp_top_dispdec
+// base address: 0x1104
+#define mmDPP_TOP3_DPP_CONTROL 0x1106
+#define mmDPP_TOP3_DPP_CONTROL_BASE_IDX 2
+#define mmDPP_TOP3_DPP_SOFT_RESET 0x1107
+#define mmDPP_TOP3_DPP_SOFT_RESET_BASE_IDX 2
+#define mmDPP_TOP3_DPP_CRC_VAL_R_G 0x1108
+#define mmDPP_TOP3_DPP_CRC_VAL_R_G_BASE_IDX 2
+#define mmDPP_TOP3_DPP_CRC_VAL_B_A 0x1109
+#define mmDPP_TOP3_DPP_CRC_VAL_B_A_BASE_IDX 2
+#define mmDPP_TOP3_DPP_CRC_CTRL 0x110a
+#define mmDPP_TOP3_DPP_CRC_CTRL_BASE_IDX 2
+#define mmDPP_TOP3_HOST_READ_CONTROL 0x110b
+#define mmDPP_TOP3_HOST_READ_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp3_dispdec_cnvc_cfg_dispdec
+// base address: 0x1104
+#define mmCNVC_CFG3_CNVC_SURFACE_PIXEL_FORMAT 0x1110
+#define mmCNVC_CFG3_CNVC_SURFACE_PIXEL_FORMAT_BASE_IDX 2
+#define mmCNVC_CFG3_FORMAT_CONTROL 0x1111
+#define mmCNVC_CFG3_FORMAT_CONTROL_BASE_IDX 2
+#define mmCNVC_CFG3_FCNV_FP_BIAS_R 0x1112
+#define mmCNVC_CFG3_FCNV_FP_BIAS_R_BASE_IDX 2
+#define mmCNVC_CFG3_FCNV_FP_BIAS_G 0x1113
+#define mmCNVC_CFG3_FCNV_FP_BIAS_G_BASE_IDX 2
+#define mmCNVC_CFG3_FCNV_FP_BIAS_B 0x1114
+#define mmCNVC_CFG3_FCNV_FP_BIAS_B_BASE_IDX 2
+#define mmCNVC_CFG3_FCNV_FP_SCALE_R 0x1115
+#define mmCNVC_CFG3_FCNV_FP_SCALE_R_BASE_IDX 2
+#define mmCNVC_CFG3_FCNV_FP_SCALE_G 0x1116
+#define mmCNVC_CFG3_FCNV_FP_SCALE_G_BASE_IDX 2
+#define mmCNVC_CFG3_FCNV_FP_SCALE_B 0x1117
+#define mmCNVC_CFG3_FCNV_FP_SCALE_B_BASE_IDX 2
+#define mmCNVC_CFG3_COLOR_KEYER_CONTROL 0x1118
+#define mmCNVC_CFG3_COLOR_KEYER_CONTROL_BASE_IDX 2
+#define mmCNVC_CFG3_COLOR_KEYER_ALPHA 0x1119
+#define mmCNVC_CFG3_COLOR_KEYER_ALPHA_BASE_IDX 2
+#define mmCNVC_CFG3_COLOR_KEYER_RED 0x111a
+#define mmCNVC_CFG3_COLOR_KEYER_RED_BASE_IDX 2
+#define mmCNVC_CFG3_COLOR_KEYER_GREEN 0x111b
+#define mmCNVC_CFG3_COLOR_KEYER_GREEN_BASE_IDX 2
+#define mmCNVC_CFG3_COLOR_KEYER_BLUE 0x111c
+#define mmCNVC_CFG3_COLOR_KEYER_BLUE_BASE_IDX 2
+#define mmCNVC_CFG3_ALPHA_2BIT_LUT 0x111e
+#define mmCNVC_CFG3_ALPHA_2BIT_LUT_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp3_dispdec_cnvc_cur_dispdec
+// base address: 0x1104
+#define mmCNVC_CUR3_CURSOR0_CONTROL 0x1121
+#define mmCNVC_CUR3_CURSOR0_CONTROL_BASE_IDX 2
+#define mmCNVC_CUR3_CURSOR0_COLOR0 0x1122
+#define mmCNVC_CUR3_CURSOR0_COLOR0_BASE_IDX 2
+#define mmCNVC_CUR3_CURSOR0_COLOR1 0x1123
+#define mmCNVC_CUR3_CURSOR0_COLOR1_BASE_IDX 2
+#define mmCNVC_CUR3_CURSOR0_FP_SCALE_BIAS 0x1124
+#define mmCNVC_CUR3_CURSOR0_FP_SCALE_BIAS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp3_dispdec_dscl_dispdec
+// base address: 0x1104
+#define mmDSCL3_SCL_COEF_RAM_TAP_SELECT 0x112b
+#define mmDSCL3_SCL_COEF_RAM_TAP_SELECT_BASE_IDX 2
+#define mmDSCL3_SCL_COEF_RAM_TAP_DATA 0x112c
+#define mmDSCL3_SCL_COEF_RAM_TAP_DATA_BASE_IDX 2
+#define mmDSCL3_SCL_MODE 0x112d
+#define mmDSCL3_SCL_MODE_BASE_IDX 2
+#define mmDSCL3_SCL_TAP_CONTROL 0x112e
+#define mmDSCL3_SCL_TAP_CONTROL_BASE_IDX 2
+#define mmDSCL3_DSCL_CONTROL 0x112f
+#define mmDSCL3_DSCL_CONTROL_BASE_IDX 2
+#define mmDSCL3_DSCL_2TAP_CONTROL 0x1130
+#define mmDSCL3_DSCL_2TAP_CONTROL_BASE_IDX 2
+#define mmDSCL3_SCL_MANUAL_REPLICATE_CONTROL 0x1131
+#define mmDSCL3_SCL_MANUAL_REPLICATE_CONTROL_BASE_IDX 2
+#define mmDSCL3_SCL_HORZ_FILTER_SCALE_RATIO 0x1132
+#define mmDSCL3_SCL_HORZ_FILTER_SCALE_RATIO_BASE_IDX 2
+#define mmDSCL3_SCL_HORZ_FILTER_INIT 0x1133
+#define mmDSCL3_SCL_HORZ_FILTER_INIT_BASE_IDX 2
+#define mmDSCL3_SCL_HORZ_FILTER_SCALE_RATIO_C 0x1134
+#define mmDSCL3_SCL_HORZ_FILTER_SCALE_RATIO_C_BASE_IDX 2
+#define mmDSCL3_SCL_HORZ_FILTER_INIT_C 0x1135
+#define mmDSCL3_SCL_HORZ_FILTER_INIT_C_BASE_IDX 2
+#define mmDSCL3_SCL_VERT_FILTER_SCALE_RATIO 0x1136
+#define mmDSCL3_SCL_VERT_FILTER_SCALE_RATIO_BASE_IDX 2
+#define mmDSCL3_SCL_VERT_FILTER_INIT 0x1137
+#define mmDSCL3_SCL_VERT_FILTER_INIT_BASE_IDX 2
+#define mmDSCL3_SCL_VERT_FILTER_INIT_BOT 0x1138
+#define mmDSCL3_SCL_VERT_FILTER_INIT_BOT_BASE_IDX 2
+#define mmDSCL3_SCL_VERT_FILTER_SCALE_RATIO_C 0x1139
+#define mmDSCL3_SCL_VERT_FILTER_SCALE_RATIO_C_BASE_IDX 2
+#define mmDSCL3_SCL_VERT_FILTER_INIT_C 0x113a
+#define mmDSCL3_SCL_VERT_FILTER_INIT_C_BASE_IDX 2
+#define mmDSCL3_SCL_VERT_FILTER_INIT_BOT_C 0x113b
+#define mmDSCL3_SCL_VERT_FILTER_INIT_BOT_C_BASE_IDX 2
+#define mmDSCL3_SCL_BLACK_OFFSET 0x113c
+#define mmDSCL3_SCL_BLACK_OFFSET_BASE_IDX 2
+#define mmDSCL3_DSCL_UPDATE 0x113d
+#define mmDSCL3_DSCL_UPDATE_BASE_IDX 2
+#define mmDSCL3_DSCL_AUTOCAL 0x113e
+#define mmDSCL3_DSCL_AUTOCAL_BASE_IDX 2
+#define mmDSCL3_DSCL_EXT_OVERSCAN_LEFT_RIGHT 0x113f
+#define mmDSCL3_DSCL_EXT_OVERSCAN_LEFT_RIGHT_BASE_IDX 2
+#define mmDSCL3_DSCL_EXT_OVERSCAN_TOP_BOTTOM 0x1140
+#define mmDSCL3_DSCL_EXT_OVERSCAN_TOP_BOTTOM_BASE_IDX 2
+#define mmDSCL3_OTG_H_BLANK 0x1141
+#define mmDSCL3_OTG_H_BLANK_BASE_IDX 2
+#define mmDSCL3_OTG_V_BLANK 0x1142
+#define mmDSCL3_OTG_V_BLANK_BASE_IDX 2
+#define mmDSCL3_RECOUT_START 0x1143
+#define mmDSCL3_RECOUT_START_BASE_IDX 2
+#define mmDSCL3_RECOUT_SIZE 0x1144
+#define mmDSCL3_RECOUT_SIZE_BASE_IDX 2
+#define mmDSCL3_MPC_SIZE 0x1145
+#define mmDSCL3_MPC_SIZE_BASE_IDX 2
+#define mmDSCL3_LB_DATA_FORMAT 0x1146
+#define mmDSCL3_LB_DATA_FORMAT_BASE_IDX 2
+#define mmDSCL3_LB_MEMORY_CTRL 0x1147
+#define mmDSCL3_LB_MEMORY_CTRL_BASE_IDX 2
+#define mmDSCL3_LB_V_COUNTER 0x1148
+#define mmDSCL3_LB_V_COUNTER_BASE_IDX 2
+#define mmDSCL3_DSCL_MEM_PWR_CTRL 0x1149
+#define mmDSCL3_DSCL_MEM_PWR_CTRL_BASE_IDX 2
+#define mmDSCL3_DSCL_MEM_PWR_STATUS 0x114a
+#define mmDSCL3_DSCL_MEM_PWR_STATUS_BASE_IDX 2
+#define mmDSCL3_OBUF_CONTROL 0x114b
+#define mmDSCL3_OBUF_CONTROL_BASE_IDX 2
+#define mmDSCL3_OBUF_MEM_PWR_CTRL 0x114c
+#define mmDSCL3_OBUF_MEM_PWR_CTRL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp3_dispdec_cm_dispdec
+// base address: 0x1104
+#define mmCM3_CM_CONTROL 0x115b
+#define mmCM3_CM_CONTROL_BASE_IDX 2
+#define mmCM3_CM_ICSC_CONTROL 0x115c
+#define mmCM3_CM_ICSC_CONTROL_BASE_IDX 2
+#define mmCM3_CM_ICSC_C11_C12 0x115d
+#define mmCM3_CM_ICSC_C11_C12_BASE_IDX 2
+#define mmCM3_CM_ICSC_C13_C14 0x115e
+#define mmCM3_CM_ICSC_C13_C14_BASE_IDX 2
+#define mmCM3_CM_ICSC_C21_C22 0x115f
+#define mmCM3_CM_ICSC_C21_C22_BASE_IDX 2
+#define mmCM3_CM_ICSC_C23_C24 0x1160
+#define mmCM3_CM_ICSC_C23_C24_BASE_IDX 2
+#define mmCM3_CM_ICSC_C31_C32 0x1161
+#define mmCM3_CM_ICSC_C31_C32_BASE_IDX 2
+#define mmCM3_CM_ICSC_C33_C34 0x1162
+#define mmCM3_CM_ICSC_C33_C34_BASE_IDX 2
+#define mmCM3_CM_ICSC_B_C11_C12 0x1163
+#define mmCM3_CM_ICSC_B_C11_C12_BASE_IDX 2
+#define mmCM3_CM_ICSC_B_C13_C14 0x1164
+#define mmCM3_CM_ICSC_B_C13_C14_BASE_IDX 2
+#define mmCM3_CM_ICSC_B_C21_C22 0x1165
+#define mmCM3_CM_ICSC_B_C21_C22_BASE_IDX 2
+#define mmCM3_CM_ICSC_B_C23_C24 0x1166
+#define mmCM3_CM_ICSC_B_C23_C24_BASE_IDX 2
+#define mmCM3_CM_ICSC_B_C31_C32 0x1167
+#define mmCM3_CM_ICSC_B_C31_C32_BASE_IDX 2
+#define mmCM3_CM_ICSC_B_C33_C34 0x1168
+#define mmCM3_CM_ICSC_B_C33_C34_BASE_IDX 2
+#define mmCM3_CM_GAMUT_REMAP_CONTROL 0x1169
+#define mmCM3_CM_GAMUT_REMAP_CONTROL_BASE_IDX 2
+#define mmCM3_CM_GAMUT_REMAP_C11_C12 0x116a
+#define mmCM3_CM_GAMUT_REMAP_C11_C12_BASE_IDX 2
+#define mmCM3_CM_GAMUT_REMAP_C13_C14 0x116b
+#define mmCM3_CM_GAMUT_REMAP_C13_C14_BASE_IDX 2
+#define mmCM3_CM_GAMUT_REMAP_C21_C22 0x116c
+#define mmCM3_CM_GAMUT_REMAP_C21_C22_BASE_IDX 2
+#define mmCM3_CM_GAMUT_REMAP_C23_C24 0x116d
+#define mmCM3_CM_GAMUT_REMAP_C23_C24_BASE_IDX 2
+#define mmCM3_CM_GAMUT_REMAP_C31_C32 0x116e
+#define mmCM3_CM_GAMUT_REMAP_C31_C32_BASE_IDX 2
+#define mmCM3_CM_GAMUT_REMAP_C33_C34 0x116f
+#define mmCM3_CM_GAMUT_REMAP_C33_C34_BASE_IDX 2
+#define mmCM3_CM_GAMUT_REMAP_B_C11_C12 0x1170
+#define mmCM3_CM_GAMUT_REMAP_B_C11_C12_BASE_IDX 2
+#define mmCM3_CM_GAMUT_REMAP_B_C13_C14 0x1171
+#define mmCM3_CM_GAMUT_REMAP_B_C13_C14_BASE_IDX 2
+#define mmCM3_CM_GAMUT_REMAP_B_C21_C22 0x1172
+#define mmCM3_CM_GAMUT_REMAP_B_C21_C22_BASE_IDX 2
+#define mmCM3_CM_GAMUT_REMAP_B_C23_C24 0x1173
+#define mmCM3_CM_GAMUT_REMAP_B_C23_C24_BASE_IDX 2
+#define mmCM3_CM_GAMUT_REMAP_B_C31_C32 0x1174
+#define mmCM3_CM_GAMUT_REMAP_B_C31_C32_BASE_IDX 2
+#define mmCM3_CM_GAMUT_REMAP_B_C33_C34 0x1175
+#define mmCM3_CM_GAMUT_REMAP_B_C33_C34_BASE_IDX 2
+#define mmCM3_CM_BIAS_CR_R 0x1176
+#define mmCM3_CM_BIAS_CR_R_BASE_IDX 2
+#define mmCM3_CM_BIAS_Y_G_CB_B 0x1177
+#define mmCM3_CM_BIAS_Y_G_CB_B_BASE_IDX 2
+#define mmCM3_CM_DGAM_CONTROL 0x1178
+#define mmCM3_CM_DGAM_CONTROL_BASE_IDX 2
+#define mmCM3_CM_DGAM_LUT_INDEX 0x1179
+#define mmCM3_CM_DGAM_LUT_INDEX_BASE_IDX 2
+#define mmCM3_CM_DGAM_LUT_DATA 0x117a
+#define mmCM3_CM_DGAM_LUT_DATA_BASE_IDX 2
+#define mmCM3_CM_DGAM_LUT_WRITE_EN_MASK 0x117b
+#define mmCM3_CM_DGAM_LUT_WRITE_EN_MASK_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMA_START_CNTL_B 0x117c
+#define mmCM3_CM_DGAM_RAMA_START_CNTL_B_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMA_START_CNTL_G 0x117d
+#define mmCM3_CM_DGAM_RAMA_START_CNTL_G_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMA_START_CNTL_R 0x117e
+#define mmCM3_CM_DGAM_RAMA_START_CNTL_R_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMA_SLOPE_CNTL_B 0x117f
+#define mmCM3_CM_DGAM_RAMA_SLOPE_CNTL_B_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMA_SLOPE_CNTL_G 0x1180
+#define mmCM3_CM_DGAM_RAMA_SLOPE_CNTL_G_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMA_SLOPE_CNTL_R 0x1181
+#define mmCM3_CM_DGAM_RAMA_SLOPE_CNTL_R_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMA_END_CNTL1_B 0x1182
+#define mmCM3_CM_DGAM_RAMA_END_CNTL1_B_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMA_END_CNTL2_B 0x1183
+#define mmCM3_CM_DGAM_RAMA_END_CNTL2_B_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMA_END_CNTL1_G 0x1184
+#define mmCM3_CM_DGAM_RAMA_END_CNTL1_G_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMA_END_CNTL2_G 0x1185
+#define mmCM3_CM_DGAM_RAMA_END_CNTL2_G_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMA_END_CNTL1_R 0x1186
+#define mmCM3_CM_DGAM_RAMA_END_CNTL1_R_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMA_END_CNTL2_R 0x1187
+#define mmCM3_CM_DGAM_RAMA_END_CNTL2_R_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMA_REGION_0_1 0x1188
+#define mmCM3_CM_DGAM_RAMA_REGION_0_1_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMA_REGION_2_3 0x1189
+#define mmCM3_CM_DGAM_RAMA_REGION_2_3_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMA_REGION_4_5 0x118a
+#define mmCM3_CM_DGAM_RAMA_REGION_4_5_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMA_REGION_6_7 0x118b
+#define mmCM3_CM_DGAM_RAMA_REGION_6_7_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMA_REGION_8_9 0x118c
+#define mmCM3_CM_DGAM_RAMA_REGION_8_9_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMA_REGION_10_11 0x118d
+#define mmCM3_CM_DGAM_RAMA_REGION_10_11_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMA_REGION_12_13 0x118e
+#define mmCM3_CM_DGAM_RAMA_REGION_12_13_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMA_REGION_14_15 0x118f
+#define mmCM3_CM_DGAM_RAMA_REGION_14_15_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMB_START_CNTL_B 0x1190
+#define mmCM3_CM_DGAM_RAMB_START_CNTL_B_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMB_START_CNTL_G 0x1191
+#define mmCM3_CM_DGAM_RAMB_START_CNTL_G_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMB_START_CNTL_R 0x1192
+#define mmCM3_CM_DGAM_RAMB_START_CNTL_R_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMB_SLOPE_CNTL_B 0x1193
+#define mmCM3_CM_DGAM_RAMB_SLOPE_CNTL_B_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMB_SLOPE_CNTL_G 0x1194
+#define mmCM3_CM_DGAM_RAMB_SLOPE_CNTL_G_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMB_SLOPE_CNTL_R 0x1195
+#define mmCM3_CM_DGAM_RAMB_SLOPE_CNTL_R_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMB_END_CNTL1_B 0x1196
+#define mmCM3_CM_DGAM_RAMB_END_CNTL1_B_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMB_END_CNTL2_B 0x1197
+#define mmCM3_CM_DGAM_RAMB_END_CNTL2_B_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMB_END_CNTL1_G 0x1198
+#define mmCM3_CM_DGAM_RAMB_END_CNTL1_G_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMB_END_CNTL2_G 0x1199
+#define mmCM3_CM_DGAM_RAMB_END_CNTL2_G_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMB_END_CNTL1_R 0x119a
+#define mmCM3_CM_DGAM_RAMB_END_CNTL1_R_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMB_END_CNTL2_R 0x119b
+#define mmCM3_CM_DGAM_RAMB_END_CNTL2_R_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMB_REGION_0_1 0x119c
+#define mmCM3_CM_DGAM_RAMB_REGION_0_1_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMB_REGION_2_3 0x119d
+#define mmCM3_CM_DGAM_RAMB_REGION_2_3_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMB_REGION_4_5 0x119e
+#define mmCM3_CM_DGAM_RAMB_REGION_4_5_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMB_REGION_6_7 0x119f
+#define mmCM3_CM_DGAM_RAMB_REGION_6_7_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMB_REGION_8_9 0x11a0
+#define mmCM3_CM_DGAM_RAMB_REGION_8_9_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMB_REGION_10_11 0x11a1
+#define mmCM3_CM_DGAM_RAMB_REGION_10_11_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMB_REGION_12_13 0x11a2
+#define mmCM3_CM_DGAM_RAMB_REGION_12_13_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMB_REGION_14_15 0x11a3
+#define mmCM3_CM_DGAM_RAMB_REGION_14_15_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_CONTROL 0x11a4
+#define mmCM3_CM_BLNDGAM_CONTROL_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_LUT_INDEX 0x11a5
+#define mmCM3_CM_BLNDGAM_LUT_INDEX_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_LUT_DATA 0x11a6
+#define mmCM3_CM_BLNDGAM_LUT_DATA_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_LUT_WRITE_EN_MASK 0x11a7
+#define mmCM3_CM_BLNDGAM_LUT_WRITE_EN_MASK_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_START_CNTL_B 0x11a8
+#define mmCM3_CM_BLNDGAM_RAMA_START_CNTL_B_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_START_CNTL_G 0x11a9
+#define mmCM3_CM_BLNDGAM_RAMA_START_CNTL_G_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_START_CNTL_R 0x11aa
+#define mmCM3_CM_BLNDGAM_RAMA_START_CNTL_R_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_SLOPE_CNTL_B 0x11ab
+#define mmCM3_CM_BLNDGAM_RAMA_SLOPE_CNTL_B_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_SLOPE_CNTL_G 0x11ac
+#define mmCM3_CM_BLNDGAM_RAMA_SLOPE_CNTL_G_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_SLOPE_CNTL_R 0x11ad
+#define mmCM3_CM_BLNDGAM_RAMA_SLOPE_CNTL_R_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_END_CNTL1_B 0x11ae
+#define mmCM3_CM_BLNDGAM_RAMA_END_CNTL1_B_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_END_CNTL2_B 0x11af
+#define mmCM3_CM_BLNDGAM_RAMA_END_CNTL2_B_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_END_CNTL1_G 0x11b0
+#define mmCM3_CM_BLNDGAM_RAMA_END_CNTL1_G_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_END_CNTL2_G 0x11b1
+#define mmCM3_CM_BLNDGAM_RAMA_END_CNTL2_G_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_END_CNTL1_R 0x11b2
+#define mmCM3_CM_BLNDGAM_RAMA_END_CNTL1_R_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_END_CNTL2_R 0x11b3
+#define mmCM3_CM_BLNDGAM_RAMA_END_CNTL2_R_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_0_1 0x11b4
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_0_1_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_2_3 0x11b5
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_2_3_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_4_5 0x11b6
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_4_5_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_6_7 0x11b7
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_6_7_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_8_9 0x11b8
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_8_9_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_10_11 0x11b9
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_10_11_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_12_13 0x11ba
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_12_13_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_14_15 0x11bb
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_14_15_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_16_17 0x11bc
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_16_17_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_18_19 0x11bd
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_18_19_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_20_21 0x11be
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_20_21_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_22_23 0x11bf
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_22_23_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_24_25 0x11c0
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_24_25_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_26_27 0x11c1
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_26_27_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_28_29 0x11c2
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_28_29_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_30_31 0x11c3
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_30_31_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_32_33 0x11c4
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_32_33_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_START_CNTL_B 0x11c5
+#define mmCM3_CM_BLNDGAM_RAMB_START_CNTL_B_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_START_CNTL_G 0x11c6
+#define mmCM3_CM_BLNDGAM_RAMB_START_CNTL_G_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_START_CNTL_R 0x11c7
+#define mmCM3_CM_BLNDGAM_RAMB_START_CNTL_R_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_SLOPE_CNTL_B 0x11c8
+#define mmCM3_CM_BLNDGAM_RAMB_SLOPE_CNTL_B_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_SLOPE_CNTL_G 0x11c9
+#define mmCM3_CM_BLNDGAM_RAMB_SLOPE_CNTL_G_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_SLOPE_CNTL_R 0x11ca
+#define mmCM3_CM_BLNDGAM_RAMB_SLOPE_CNTL_R_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_END_CNTL1_B 0x11cb
+#define mmCM3_CM_BLNDGAM_RAMB_END_CNTL1_B_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_END_CNTL2_B 0x11cc
+#define mmCM3_CM_BLNDGAM_RAMB_END_CNTL2_B_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_END_CNTL1_G 0x11cd
+#define mmCM3_CM_BLNDGAM_RAMB_END_CNTL1_G_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_END_CNTL2_G 0x11ce
+#define mmCM3_CM_BLNDGAM_RAMB_END_CNTL2_G_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_END_CNTL1_R 0x11cf
+#define mmCM3_CM_BLNDGAM_RAMB_END_CNTL1_R_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_END_CNTL2_R 0x11d0
+#define mmCM3_CM_BLNDGAM_RAMB_END_CNTL2_R_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_0_1 0x11d1
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_0_1_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_2_3 0x11d2
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_2_3_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_4_5 0x11d3
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_4_5_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_6_7 0x11d4
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_6_7_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_8_9 0x11d5
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_8_9_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_10_11 0x11d6
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_10_11_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_12_13 0x11d7
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_12_13_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_14_15 0x11d8
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_14_15_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_16_17 0x11d9
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_16_17_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_18_19 0x11da
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_18_19_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_20_21 0x11db
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_20_21_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_22_23 0x11dc
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_22_23_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_24_25 0x11dd
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_24_25_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_26_27 0x11de
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_26_27_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_28_29 0x11df
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_28_29_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_30_31 0x11e0
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_30_31_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_32_33 0x11e1
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_32_33_BASE_IDX 2
+#define mmCM3_CM_HDR_MULT_COEF 0x11e2
+#define mmCM3_CM_HDR_MULT_COEF_BASE_IDX 2
+#define mmCM3_CM_MEM_PWR_CTRL 0x11e3
+#define mmCM3_CM_MEM_PWR_CTRL_BASE_IDX 2
+#define mmCM3_CM_MEM_PWR_STATUS 0x11e4
+#define mmCM3_CM_MEM_PWR_STATUS_BASE_IDX 2
+#define mmCM3_CM_DEALPHA 0x11e6
+#define mmCM3_CM_DEALPHA_BASE_IDX 2
+#define mmCM3_CM_COEF_FORMAT 0x11e7
+#define mmCM3_CM_COEF_FORMAT_BASE_IDX 2
+#define mmCM3_CM_SHAPER_CONTROL 0x11e8
+#define mmCM3_CM_SHAPER_CONTROL_BASE_IDX 2
+#define mmCM3_CM_SHAPER_OFFSET_R 0x11e9
+#define mmCM3_CM_SHAPER_OFFSET_R_BASE_IDX 2
+#define mmCM3_CM_SHAPER_OFFSET_G 0x11ea
+#define mmCM3_CM_SHAPER_OFFSET_G_BASE_IDX 2
+#define mmCM3_CM_SHAPER_OFFSET_B 0x11eb
+#define mmCM3_CM_SHAPER_OFFSET_B_BASE_IDX 2
+#define mmCM3_CM_SHAPER_SCALE_R 0x11ec
+#define mmCM3_CM_SHAPER_SCALE_R_BASE_IDX 2
+#define mmCM3_CM_SHAPER_SCALE_G_B 0x11ed
+#define mmCM3_CM_SHAPER_SCALE_G_B_BASE_IDX 2
+#define mmCM3_CM_SHAPER_LUT_INDEX 0x11ee
+#define mmCM3_CM_SHAPER_LUT_INDEX_BASE_IDX 2
+#define mmCM3_CM_SHAPER_LUT_DATA 0x11ef
+#define mmCM3_CM_SHAPER_LUT_DATA_BASE_IDX 2
+#define mmCM3_CM_SHAPER_LUT_WRITE_EN_MASK 0x11f0
+#define mmCM3_CM_SHAPER_LUT_WRITE_EN_MASK_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_START_CNTL_B 0x11f1
+#define mmCM3_CM_SHAPER_RAMA_START_CNTL_B_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_START_CNTL_G 0x11f2
+#define mmCM3_CM_SHAPER_RAMA_START_CNTL_G_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_START_CNTL_R 0x11f3
+#define mmCM3_CM_SHAPER_RAMA_START_CNTL_R_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_END_CNTL_B 0x11f4
+#define mmCM3_CM_SHAPER_RAMA_END_CNTL_B_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_END_CNTL_G 0x11f5
+#define mmCM3_CM_SHAPER_RAMA_END_CNTL_G_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_END_CNTL_R 0x11f6
+#define mmCM3_CM_SHAPER_RAMA_END_CNTL_R_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_REGION_0_1 0x11f7
+#define mmCM3_CM_SHAPER_RAMA_REGION_0_1_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_REGION_2_3 0x11f8
+#define mmCM3_CM_SHAPER_RAMA_REGION_2_3_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_REGION_4_5 0x11f9
+#define mmCM3_CM_SHAPER_RAMA_REGION_4_5_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_REGION_6_7 0x11fa
+#define mmCM3_CM_SHAPER_RAMA_REGION_6_7_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_REGION_8_9 0x11fb
+#define mmCM3_CM_SHAPER_RAMA_REGION_8_9_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_REGION_10_11 0x11fc
+#define mmCM3_CM_SHAPER_RAMA_REGION_10_11_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_REGION_12_13 0x11fd
+#define mmCM3_CM_SHAPER_RAMA_REGION_12_13_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_REGION_14_15 0x11fe
+#define mmCM3_CM_SHAPER_RAMA_REGION_14_15_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_REGION_16_17 0x11ff
+#define mmCM3_CM_SHAPER_RAMA_REGION_16_17_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_REGION_18_19 0x1200
+#define mmCM3_CM_SHAPER_RAMA_REGION_18_19_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_REGION_20_21 0x1201
+#define mmCM3_CM_SHAPER_RAMA_REGION_20_21_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_REGION_22_23 0x1202
+#define mmCM3_CM_SHAPER_RAMA_REGION_22_23_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_REGION_24_25 0x1203
+#define mmCM3_CM_SHAPER_RAMA_REGION_24_25_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_REGION_26_27 0x1204
+#define mmCM3_CM_SHAPER_RAMA_REGION_26_27_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_REGION_28_29 0x1205
+#define mmCM3_CM_SHAPER_RAMA_REGION_28_29_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_REGION_30_31 0x1206
+#define mmCM3_CM_SHAPER_RAMA_REGION_30_31_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_REGION_32_33 0x1207
+#define mmCM3_CM_SHAPER_RAMA_REGION_32_33_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_START_CNTL_B 0x1208
+#define mmCM3_CM_SHAPER_RAMB_START_CNTL_B_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_START_CNTL_G 0x1209
+#define mmCM3_CM_SHAPER_RAMB_START_CNTL_G_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_START_CNTL_R 0x120a
+#define mmCM3_CM_SHAPER_RAMB_START_CNTL_R_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_END_CNTL_B 0x120b
+#define mmCM3_CM_SHAPER_RAMB_END_CNTL_B_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_END_CNTL_G 0x120c
+#define mmCM3_CM_SHAPER_RAMB_END_CNTL_G_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_END_CNTL_R 0x120d
+#define mmCM3_CM_SHAPER_RAMB_END_CNTL_R_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_REGION_0_1 0x120e
+#define mmCM3_CM_SHAPER_RAMB_REGION_0_1_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_REGION_2_3 0x120f
+#define mmCM3_CM_SHAPER_RAMB_REGION_2_3_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_REGION_4_5 0x1210
+#define mmCM3_CM_SHAPER_RAMB_REGION_4_5_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_REGION_6_7 0x1211
+#define mmCM3_CM_SHAPER_RAMB_REGION_6_7_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_REGION_8_9 0x1212
+#define mmCM3_CM_SHAPER_RAMB_REGION_8_9_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_REGION_10_11 0x1213
+#define mmCM3_CM_SHAPER_RAMB_REGION_10_11_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_REGION_12_13 0x1214
+#define mmCM3_CM_SHAPER_RAMB_REGION_12_13_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_REGION_14_15 0x1215
+#define mmCM3_CM_SHAPER_RAMB_REGION_14_15_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_REGION_16_17 0x1216
+#define mmCM3_CM_SHAPER_RAMB_REGION_16_17_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_REGION_18_19 0x1217
+#define mmCM3_CM_SHAPER_RAMB_REGION_18_19_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_REGION_20_21 0x1218
+#define mmCM3_CM_SHAPER_RAMB_REGION_20_21_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_REGION_22_23 0x1219
+#define mmCM3_CM_SHAPER_RAMB_REGION_22_23_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_REGION_24_25 0x121a
+#define mmCM3_CM_SHAPER_RAMB_REGION_24_25_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_REGION_26_27 0x121b
+#define mmCM3_CM_SHAPER_RAMB_REGION_26_27_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_REGION_28_29 0x121c
+#define mmCM3_CM_SHAPER_RAMB_REGION_28_29_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_REGION_30_31 0x121d
+#define mmCM3_CM_SHAPER_RAMB_REGION_30_31_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_REGION_32_33 0x121e
+#define mmCM3_CM_SHAPER_RAMB_REGION_32_33_BASE_IDX 2
+#define mmCM3_CM_MEM_PWR_CTRL2 0x121f
+#define mmCM3_CM_MEM_PWR_CTRL2_BASE_IDX 2
+#define mmCM3_CM_MEM_PWR_STATUS2 0x1220
+#define mmCM3_CM_MEM_PWR_STATUS2_BASE_IDX 2
+#define mmCM3_CM_3DLUT_MODE 0x1221
+#define mmCM3_CM_3DLUT_MODE_BASE_IDX 2
+#define mmCM3_CM_3DLUT_INDEX 0x1222
+#define mmCM3_CM_3DLUT_INDEX_BASE_IDX 2
+#define mmCM3_CM_3DLUT_DATA 0x1223
+#define mmCM3_CM_3DLUT_DATA_BASE_IDX 2
+#define mmCM3_CM_3DLUT_DATA_30BIT 0x1224
+#define mmCM3_CM_3DLUT_DATA_30BIT_BASE_IDX 2
+#define mmCM3_CM_3DLUT_READ_WRITE_CONTROL 0x1225
+#define mmCM3_CM_3DLUT_READ_WRITE_CONTROL_BASE_IDX 2
+#define mmCM3_CM_3DLUT_OUT_NORM_FACTOR 0x1226
+#define mmCM3_CM_3DLUT_OUT_NORM_FACTOR_BASE_IDX 2
+#define mmCM3_CM_3DLUT_OUT_OFFSET_R 0x1227
+#define mmCM3_CM_3DLUT_OUT_OFFSET_R_BASE_IDX 2
+#define mmCM3_CM_3DLUT_OUT_OFFSET_G 0x1228
+#define mmCM3_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2
+#define mmCM3_CM_3DLUT_OUT_OFFSET_B 0x1229
+#define mmCM3_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2
+#define mmCM3_CM_TEST_DEBUG_INDEX 0x122a
+#define mmCM3_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define mmCM3_CM_TEST_DEBUG_DATA 0x122b
+#define mmCM3_CM_TEST_DEBUG_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp3_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
+// base address: 0x4994
+#define mmDC_PERFMON14_PERFCOUNTER_CNTL 0x1265
+#define mmDC_PERFMON14_PERFCOUNTER_CNTL_BASE_IDX 2
+#define mmDC_PERFMON14_PERFCOUNTER_CNTL2 0x1266
+#define mmDC_PERFMON14_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define mmDC_PERFMON14_PERFCOUNTER_STATE 0x1267
+#define mmDC_PERFMON14_PERFCOUNTER_STATE_BASE_IDX 2
+#define mmDC_PERFMON14_PERFMON_CNTL 0x1268
+#define mmDC_PERFMON14_PERFMON_CNTL_BASE_IDX 2
+#define mmDC_PERFMON14_PERFMON_CNTL2 0x1269
+#define mmDC_PERFMON14_PERFMON_CNTL2_BASE_IDX 2
+#define mmDC_PERFMON14_PERFMON_CVALUE_INT_MISC 0x126a
+#define mmDC_PERFMON14_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define mmDC_PERFMON14_PERFMON_CVALUE_LOW 0x126b
+#define mmDC_PERFMON14_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define mmDC_PERFMON14_PERFMON_HI 0x126c
+#define mmDC_PERFMON14_PERFMON_HI_BASE_IDX 2
+#define mmDC_PERFMON14_PERFMON_LOW 0x126d
+#define mmDC_PERFMON14_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_mpc_mpcc0_dispdec
+// base address: 0x0
+#define mmMPCC0_MPCC_TOP_SEL 0x1271
+#define mmMPCC0_MPCC_TOP_SEL_BASE_IDX 2
+#define mmMPCC0_MPCC_BOT_SEL 0x1272
+#define mmMPCC0_MPCC_BOT_SEL_BASE_IDX 2
+#define mmMPCC0_MPCC_OPP_ID 0x1273
+#define mmMPCC0_MPCC_OPP_ID_BASE_IDX 2
+#define mmMPCC0_MPCC_CONTROL 0x1274
+#define mmMPCC0_MPCC_CONTROL_BASE_IDX 2
+#define mmMPCC0_MPCC_SM_CONTROL 0x1275
+#define mmMPCC0_MPCC_SM_CONTROL_BASE_IDX 2
+#define mmMPCC0_MPCC_UPDATE_LOCK_SEL 0x1276
+#define mmMPCC0_MPCC_UPDATE_LOCK_SEL_BASE_IDX 2
+#define mmMPCC0_MPCC_TOP_GAIN 0x1277
+#define mmMPCC0_MPCC_TOP_GAIN_BASE_IDX 2
+#define mmMPCC0_MPCC_BOT_GAIN_INSIDE 0x1278
+#define mmMPCC0_MPCC_BOT_GAIN_INSIDE_BASE_IDX 2
+#define mmMPCC0_MPCC_BOT_GAIN_OUTSIDE 0x1279
+#define mmMPCC0_MPCC_BOT_GAIN_OUTSIDE_BASE_IDX 2
+#define mmMPCC0_MPCC_BG_R_CR 0x127a
+#define mmMPCC0_MPCC_BG_R_CR_BASE_IDX 2
+#define mmMPCC0_MPCC_BG_G_Y 0x127b
+#define mmMPCC0_MPCC_BG_G_Y_BASE_IDX 2
+#define mmMPCC0_MPCC_BG_B_CB 0x127c
+#define mmMPCC0_MPCC_BG_B_CB_BASE_IDX 2
+#define mmMPCC0_MPCC_MEM_PWR_CTRL 0x127d
+#define mmMPCC0_MPCC_MEM_PWR_CTRL_BASE_IDX 2
+#define mmMPCC0_MPCC_STALL_STATUS 0x127e
+#define mmMPCC0_MPCC_STALL_STATUS_BASE_IDX 2
+#define mmMPCC0_MPCC_STATUS 0x127f
+#define mmMPCC0_MPCC_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_mpc_mpcc1_dispdec
+// base address: 0x6c
+#define mmMPCC1_MPCC_TOP_SEL 0x128c
+#define mmMPCC1_MPCC_TOP_SEL_BASE_IDX 2
+#define mmMPCC1_MPCC_BOT_SEL 0x128d
+#define mmMPCC1_MPCC_BOT_SEL_BASE_IDX 2
+#define mmMPCC1_MPCC_OPP_ID 0x128e
+#define mmMPCC1_MPCC_OPP_ID_BASE_IDX 2
+#define mmMPCC1_MPCC_CONTROL 0x128f
+#define mmMPCC1_MPCC_CONTROL_BASE_IDX 2
+#define mmMPCC1_MPCC_SM_CONTROL 0x1290
+#define mmMPCC1_MPCC_SM_CONTROL_BASE_IDX 2
+#define mmMPCC1_MPCC_UPDATE_LOCK_SEL 0x1291
+#define mmMPCC1_MPCC_UPDATE_LOCK_SEL_BASE_IDX 2
+#define mmMPCC1_MPCC_TOP_GAIN 0x1292
+#define mmMPCC1_MPCC_TOP_GAIN_BASE_IDX 2
+#define mmMPCC1_MPCC_BOT_GAIN_INSIDE 0x1293
+#define mmMPCC1_MPCC_BOT_GAIN_INSIDE_BASE_IDX 2
+#define mmMPCC1_MPCC_BOT_GAIN_OUTSIDE 0x1294
+#define mmMPCC1_MPCC_BOT_GAIN_OUTSIDE_BASE_IDX 2
+#define mmMPCC1_MPCC_BG_R_CR 0x1295
+#define mmMPCC1_MPCC_BG_R_CR_BASE_IDX 2
+#define mmMPCC1_MPCC_BG_G_Y 0x1296
+#define mmMPCC1_MPCC_BG_G_Y_BASE_IDX 2
+#define mmMPCC1_MPCC_BG_B_CB 0x1297
+#define mmMPCC1_MPCC_BG_B_CB_BASE_IDX 2
+#define mmMPCC1_MPCC_MEM_PWR_CTRL 0x1298
+#define mmMPCC1_MPCC_MEM_PWR_CTRL_BASE_IDX 2
+#define mmMPCC1_MPCC_STALL_STATUS 0x1299
+#define mmMPCC1_MPCC_STALL_STATUS_BASE_IDX 2
+#define mmMPCC1_MPCC_STATUS 0x129a
+#define mmMPCC1_MPCC_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_mpc_mpcc2_dispdec
+// base address: 0xd8
+#define mmMPCC2_MPCC_TOP_SEL 0x12a7
+#define mmMPCC2_MPCC_TOP_SEL_BASE_IDX 2
+#define mmMPCC2_MPCC_BOT_SEL 0x12a8
+#define mmMPCC2_MPCC_BOT_SEL_BASE_IDX 2
+#define mmMPCC2_MPCC_OPP_ID 0x12a9
+#define mmMPCC2_MPCC_OPP_ID_BASE_IDX 2
+#define mmMPCC2_MPCC_CONTROL 0x12aa
+#define mmMPCC2_MPCC_CONTROL_BASE_IDX 2
+#define mmMPCC2_MPCC_SM_CONTROL 0x12ab
+#define mmMPCC2_MPCC_SM_CONTROL_BASE_IDX 2
+#define mmMPCC2_MPCC_UPDATE_LOCK_SEL 0x12ac
+#define mmMPCC2_MPCC_UPDATE_LOCK_SEL_BASE_IDX 2
+#define mmMPCC2_MPCC_TOP_GAIN 0x12ad
+#define mmMPCC2_MPCC_TOP_GAIN_BASE_IDX 2
+#define mmMPCC2_MPCC_BOT_GAIN_INSIDE 0x12ae
+#define mmMPCC2_MPCC_BOT_GAIN_INSIDE_BASE_IDX 2
+#define mmMPCC2_MPCC_BOT_GAIN_OUTSIDE 0x12af
+#define mmMPCC2_MPCC_BOT_GAIN_OUTSIDE_BASE_IDX 2
+#define mmMPCC2_MPCC_BG_R_CR 0x12b0
+#define mmMPCC2_MPCC_BG_R_CR_BASE_IDX 2
+#define mmMPCC2_MPCC_BG_G_Y 0x12b1
+#define mmMPCC2_MPCC_BG_G_Y_BASE_IDX 2
+#define mmMPCC2_MPCC_BG_B_CB 0x12b2
+#define mmMPCC2_MPCC_BG_B_CB_BASE_IDX 2
+#define mmMPCC2_MPCC_MEM_PWR_CTRL 0x12b3
+#define mmMPCC2_MPCC_MEM_PWR_CTRL_BASE_IDX 2
+#define mmMPCC2_MPCC_STALL_STATUS 0x12b4
+#define mmMPCC2_MPCC_STALL_STATUS_BASE_IDX 2
+#define mmMPCC2_MPCC_STATUS 0x12b5
+#define mmMPCC2_MPCC_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_mpc_mpcc3_dispdec
+// base address: 0x144
+#define mmMPCC3_MPCC_TOP_SEL 0x12c2
+#define mmMPCC3_MPCC_TOP_SEL_BASE_IDX 2
+#define mmMPCC3_MPCC_BOT_SEL 0x12c3
+#define mmMPCC3_MPCC_BOT_SEL_BASE_IDX 2
+#define mmMPCC3_MPCC_OPP_ID 0x12c4
+#define mmMPCC3_MPCC_OPP_ID_BASE_IDX 2
+#define mmMPCC3_MPCC_CONTROL 0x12c5
+#define mmMPCC3_MPCC_CONTROL_BASE_IDX 2
+#define mmMPCC3_MPCC_SM_CONTROL 0x12c6
+#define mmMPCC3_MPCC_SM_CONTROL_BASE_IDX 2
+#define mmMPCC3_MPCC_UPDATE_LOCK_SEL 0x12c7
+#define mmMPCC3_MPCC_UPDATE_LOCK_SEL_BASE_IDX 2
+#define mmMPCC3_MPCC_TOP_GAIN 0x12c8
+#define mmMPCC3_MPCC_TOP_GAIN_BASE_IDX 2
+#define mmMPCC3_MPCC_BOT_GAIN_INSIDE 0x12c9
+#define mmMPCC3_MPCC_BOT_GAIN_INSIDE_BASE_IDX 2
+#define mmMPCC3_MPCC_BOT_GAIN_OUTSIDE 0x12ca
+#define mmMPCC3_MPCC_BOT_GAIN_OUTSIDE_BASE_IDX 2
+#define mmMPCC3_MPCC_BG_R_CR 0x12cb
+#define mmMPCC3_MPCC_BG_R_CR_BASE_IDX 2
+#define mmMPCC3_MPCC_BG_G_Y 0x12cc
+#define mmMPCC3_MPCC_BG_G_Y_BASE_IDX 2
+#define mmMPCC3_MPCC_BG_B_CB 0x12cd
+#define mmMPCC3_MPCC_BG_B_CB_BASE_IDX 2
+#define mmMPCC3_MPCC_MEM_PWR_CTRL 0x12ce
+#define mmMPCC3_MPCC_MEM_PWR_CTRL_BASE_IDX 2
+#define mmMPCC3_MPCC_STALL_STATUS 0x12cf
+#define mmMPCC3_MPCC_STALL_STATUS_BASE_IDX 2
+#define mmMPCC3_MPCC_STATUS 0x12d0
+#define mmMPCC3_MPCC_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_mpc_mpcc4_dispdec
+// base address: 0x1b0
+#define mmMPCC4_MPCC_TOP_SEL 0x12dd
+#define mmMPCC4_MPCC_TOP_SEL_BASE_IDX 2
+#define mmMPCC4_MPCC_BOT_SEL 0x12de
+#define mmMPCC4_MPCC_BOT_SEL_BASE_IDX 2
+#define mmMPCC4_MPCC_OPP_ID 0x12df
+#define mmMPCC4_MPCC_OPP_ID_BASE_IDX 2
+#define mmMPCC4_MPCC_CONTROL 0x12e0
+#define mmMPCC4_MPCC_CONTROL_BASE_IDX 2
+#define mmMPCC4_MPCC_SM_CONTROL 0x12e1
+#define mmMPCC4_MPCC_SM_CONTROL_BASE_IDX 2
+#define mmMPCC4_MPCC_UPDATE_LOCK_SEL 0x12e2
+#define mmMPCC4_MPCC_UPDATE_LOCK_SEL_BASE_IDX 2
+#define mmMPCC4_MPCC_TOP_GAIN 0x12e3
+#define mmMPCC4_MPCC_TOP_GAIN_BASE_IDX 2
+#define mmMPCC4_MPCC_BOT_GAIN_INSIDE 0x12e4
+#define mmMPCC4_MPCC_BOT_GAIN_INSIDE_BASE_IDX 2
+#define mmMPCC4_MPCC_BOT_GAIN_OUTSIDE 0x12e5
+#define mmMPCC4_MPCC_BOT_GAIN_OUTSIDE_BASE_IDX 2
+#define mmMPCC4_MPCC_BG_R_CR 0x12e6
+#define mmMPCC4_MPCC_BG_R_CR_BASE_IDX 2
+#define mmMPCC4_MPCC_BG_G_Y 0x12e7
+#define mmMPCC4_MPCC_BG_G_Y_BASE_IDX 2
+#define mmMPCC4_MPCC_BG_B_CB 0x12e8
+#define mmMPCC4_MPCC_BG_B_CB_BASE_IDX 2
+#define mmMPCC4_MPCC_MEM_PWR_CTRL 0x12e9
+#define mmMPCC4_MPCC_MEM_PWR_CTRL_BASE_IDX 2
+#define mmMPCC4_MPCC_STALL_STATUS 0x12ea
+#define mmMPCC4_MPCC_STALL_STATUS_BASE_IDX 2
+#define mmMPCC4_MPCC_STATUS 0x12eb
+#define mmMPCC4_MPCC_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_mpc_mpcc5_dispdec
+// base address: 0x21c
+#define mmMPCC5_MPCC_TOP_SEL 0x12f8
+#define mmMPCC5_MPCC_TOP_SEL_BASE_IDX 2
+#define mmMPCC5_MPCC_BOT_SEL 0x12f9
+#define mmMPCC5_MPCC_BOT_SEL_BASE_IDX 2
+#define mmMPCC5_MPCC_OPP_ID 0x12fa
+#define mmMPCC5_MPCC_OPP_ID_BASE_IDX 2
+#define mmMPCC5_MPCC_CONTROL 0x12fb
+#define mmMPCC5_MPCC_CONTROL_BASE_IDX 2
+#define mmMPCC5_MPCC_SM_CONTROL 0x12fc
+#define mmMPCC5_MPCC_SM_CONTROL_BASE_IDX 2
+#define mmMPCC5_MPCC_UPDATE_LOCK_SEL 0x12fd
+#define mmMPCC5_MPCC_UPDATE_LOCK_SEL_BASE_IDX 2
+#define mmMPCC5_MPCC_TOP_GAIN 0x12fe
+#define mmMPCC5_MPCC_TOP_GAIN_BASE_IDX 2
+#define mmMPCC5_MPCC_BOT_GAIN_INSIDE 0x12ff
+#define mmMPCC5_MPCC_BOT_GAIN_INSIDE_BASE_IDX 2
+#define mmMPCC5_MPCC_BOT_GAIN_OUTSIDE 0x1300
+#define mmMPCC5_MPCC_BOT_GAIN_OUTSIDE_BASE_IDX 2
+#define mmMPCC5_MPCC_BG_R_CR 0x1301
+#define mmMPCC5_MPCC_BG_R_CR_BASE_IDX 2
+#define mmMPCC5_MPCC_BG_G_Y 0x1302
+#define mmMPCC5_MPCC_BG_G_Y_BASE_IDX 2
+#define mmMPCC5_MPCC_BG_B_CB 0x1303
+#define mmMPCC5_MPCC_BG_B_CB_BASE_IDX 2
+#define mmMPCC5_MPCC_MEM_PWR_CTRL 0x1304
+#define mmMPCC5_MPCC_MEM_PWR_CTRL_BASE_IDX 2
+#define mmMPCC5_MPCC_STALL_STATUS 0x1305
+#define mmMPCC5_MPCC_STALL_STATUS_BASE_IDX 2
+#define mmMPCC5_MPCC_STATUS 0x1306
+#define mmMPCC5_MPCC_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_mpc_mpcc6_dispdec
+// base address: 0x288
+#define mmMPCC6_MPCC_TOP_SEL 0x1313
+#define mmMPCC6_MPCC_TOP_SEL_BASE_IDX 2
+#define mmMPCC6_MPCC_BOT_SEL 0x1314
+#define mmMPCC6_MPCC_BOT_SEL_BASE_IDX 2
+#define mmMPCC6_MPCC_OPP_ID 0x1315
+#define mmMPCC6_MPCC_OPP_ID_BASE_IDX 2
+#define mmMPCC6_MPCC_CONTROL 0x1316
+#define mmMPCC6_MPCC_CONTROL_BASE_IDX 2
+#define mmMPCC6_MPCC_SM_CONTROL 0x1317
+#define mmMPCC6_MPCC_SM_CONTROL_BASE_IDX 2
+#define mmMPCC6_MPCC_UPDATE_LOCK_SEL 0x1318
+#define mmMPCC6_MPCC_UPDATE_LOCK_SEL_BASE_IDX 2
+#define mmMPCC6_MPCC_TOP_GAIN 0x1319
+#define mmMPCC6_MPCC_TOP_GAIN_BASE_IDX 2
+#define mmMPCC6_MPCC_BOT_GAIN_INSIDE 0x131a
+#define mmMPCC6_MPCC_BOT_GAIN_INSIDE_BASE_IDX 2
+#define mmMPCC6_MPCC_BOT_GAIN_OUTSIDE 0x131b
+#define mmMPCC6_MPCC_BOT_GAIN_OUTSIDE_BASE_IDX 2
+#define mmMPCC6_MPCC_BG_R_CR 0x131c
+#define mmMPCC6_MPCC_BG_R_CR_BASE_IDX 2
+#define mmMPCC6_MPCC_BG_G_Y 0x131d
+#define mmMPCC6_MPCC_BG_G_Y_BASE_IDX 2
+#define mmMPCC6_MPCC_BG_B_CB 0x131e
+#define mmMPCC6_MPCC_BG_B_CB_BASE_IDX 2
+#define mmMPCC6_MPCC_MEM_PWR_CTRL 0x131f
+#define mmMPCC6_MPCC_MEM_PWR_CTRL_BASE_IDX 2
+#define mmMPCC6_MPCC_STALL_STATUS 0x1320
+#define mmMPCC6_MPCC_STALL_STATUS_BASE_IDX 2
+#define mmMPCC6_MPCC_STATUS 0x1321
+#define mmMPCC6_MPCC_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_mpc_mpcc7_dispdec
+// base address: 0x2f4
+#define mmMPCC7_MPCC_TOP_SEL 0x132e
+#define mmMPCC7_MPCC_TOP_SEL_BASE_IDX 2
+#define mmMPCC7_MPCC_BOT_SEL 0x132f
+#define mmMPCC7_MPCC_BOT_SEL_BASE_IDX 2
+#define mmMPCC7_MPCC_OPP_ID 0x1330
+#define mmMPCC7_MPCC_OPP_ID_BASE_IDX 2
+#define mmMPCC7_MPCC_CONTROL 0x1331
+#define mmMPCC7_MPCC_CONTROL_BASE_IDX 2
+#define mmMPCC7_MPCC_SM_CONTROL 0x1332
+#define mmMPCC7_MPCC_SM_CONTROL_BASE_IDX 2
+#define mmMPCC7_MPCC_UPDATE_LOCK_SEL 0x1333
+#define mmMPCC7_MPCC_UPDATE_LOCK_SEL_BASE_IDX 2
+#define mmMPCC7_MPCC_TOP_GAIN 0x1334
+#define mmMPCC7_MPCC_TOP_GAIN_BASE_IDX 2
+#define mmMPCC7_MPCC_BOT_GAIN_INSIDE 0x1335
+#define mmMPCC7_MPCC_BOT_GAIN_INSIDE_BASE_IDX 2
+#define mmMPCC7_MPCC_BOT_GAIN_OUTSIDE 0x1336
+#define mmMPCC7_MPCC_BOT_GAIN_OUTSIDE_BASE_IDX 2
+#define mmMPCC7_MPCC_BG_R_CR 0x1337
+#define mmMPCC7_MPCC_BG_R_CR_BASE_IDX 2
+#define mmMPCC7_MPCC_BG_G_Y 0x1338
+#define mmMPCC7_MPCC_BG_G_Y_BASE_IDX 2
+#define mmMPCC7_MPCC_BG_B_CB 0x1339
+#define mmMPCC7_MPCC_BG_B_CB_BASE_IDX 2
+#define mmMPCC7_MPCC_MEM_PWR_CTRL 0x133a
+#define mmMPCC7_MPCC_MEM_PWR_CTRL_BASE_IDX 2
+#define mmMPCC7_MPCC_STALL_STATUS 0x133b
+#define mmMPCC7_MPCC_STALL_STATUS_BASE_IDX 2
+#define mmMPCC7_MPCC_STATUS 0x133c
+#define mmMPCC7_MPCC_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_mpc_mpc_cfg_dispdec
+// base address: 0x0
+#define mmMPC_CLOCK_CONTROL 0x1349
+#define mmMPC_CLOCK_CONTROL_BASE_IDX 2
+#define mmMPC_SOFT_RESET 0x134a
+#define mmMPC_SOFT_RESET_BASE_IDX 2
+#define mmMPC_CRC_CTRL 0x134b
+#define mmMPC_CRC_CTRL_BASE_IDX 2
+#define mmMPC_CRC_SEL_CONTROL 0x134c
+#define mmMPC_CRC_SEL_CONTROL_BASE_IDX 2
+#define mmMPC_CRC_RESULT_AR 0x134d
+#define mmMPC_CRC_RESULT_AR_BASE_IDX 2
+#define mmMPC_CRC_RESULT_GB 0x134e
+#define mmMPC_CRC_RESULT_GB_BASE_IDX 2
+#define mmMPC_CRC_RESULT_C 0x134f
+#define mmMPC_CRC_RESULT_C_BASE_IDX 2
+#define mmMPC_PERFMON_EVENT_CTRL 0x1352
+#define mmMPC_PERFMON_EVENT_CTRL_BASE_IDX 2
+#define mmMPC_BYPASS_BG_AR 0x1353
+#define mmMPC_BYPASS_BG_AR_BASE_IDX 2
+#define mmMPC_BYPASS_BG_GB 0x1354
+#define mmMPC_BYPASS_BG_GB_BASE_IDX 2
+#define mmMPC_STALL_GRACE_WINDOW 0x1355
+#define mmMPC_STALL_GRACE_WINDOW_BASE_IDX 2
+#define mmMPC_HOST_READ_CONTROL 0x1356
+#define mmMPC_HOST_READ_CONTROL_BASE_IDX 2
+#define mmMPC_PENDING_TAKEN_STATUS_REG1 0x1357
+#define mmMPC_PENDING_TAKEN_STATUS_REG1_BASE_IDX 2
+#define mmMPC_PENDING_TAKEN_STATUS_REG3 0x1359
+#define mmMPC_PENDING_TAKEN_STATUS_REG3_BASE_IDX 2
+#define mmMPC_UPDATE_ACK_REG5 0x135b
+#define mmMPC_UPDATE_ACK_REG5_BASE_IDX 2
+#define mmADR_CFG_CUR_VUPDATE_LOCK_SET0 0x135d
+#define mmADR_CFG_CUR_VUPDATE_LOCK_SET0_BASE_IDX 2
+#define mmADR_CFG_VUPDATE_LOCK_SET0 0x135e
+#define mmADR_CFG_VUPDATE_LOCK_SET0_BASE_IDX 2
+#define mmADR_VUPDATE_LOCK_SET0 0x135f
+#define mmADR_VUPDATE_LOCK_SET0_BASE_IDX 2
+#define mmCFG_VUPDATE_LOCK_SET0 0x1360
+#define mmCFG_VUPDATE_LOCK_SET0_BASE_IDX 2
+#define mmCUR_VUPDATE_LOCK_SET0 0x1361
+#define mmCUR_VUPDATE_LOCK_SET0_BASE_IDX 2
+#define mmADR_CFG_CUR_VUPDATE_LOCK_SET1 0x1362
+#define mmADR_CFG_CUR_VUPDATE_LOCK_SET1_BASE_IDX 2
+#define mmADR_CFG_VUPDATE_LOCK_SET1 0x1363
+#define mmADR_CFG_VUPDATE_LOCK_SET1_BASE_IDX 2
+#define mmADR_VUPDATE_LOCK_SET1 0x1364
+#define mmADR_VUPDATE_LOCK_SET1_BASE_IDX 2
+#define mmCFG_VUPDATE_LOCK_SET1 0x1365
+#define mmCFG_VUPDATE_LOCK_SET1_BASE_IDX 2
+#define mmCUR_VUPDATE_LOCK_SET1 0x1366
+#define mmCUR_VUPDATE_LOCK_SET1_BASE_IDX 2
+#define mmADR_CFG_CUR_VUPDATE_LOCK_SET2 0x1367
+#define mmADR_CFG_CUR_VUPDATE_LOCK_SET2_BASE_IDX 2
+#define mmADR_CFG_VUPDATE_LOCK_SET2 0x1368
+#define mmADR_CFG_VUPDATE_LOCK_SET2_BASE_IDX 2
+#define mmADR_VUPDATE_LOCK_SET2 0x1369
+#define mmADR_VUPDATE_LOCK_SET2_BASE_IDX 2
+#define mmCFG_VUPDATE_LOCK_SET2 0x136a
+#define mmCFG_VUPDATE_LOCK_SET2_BASE_IDX 2
+#define mmCUR_VUPDATE_LOCK_SET2 0x136b
+#define mmCUR_VUPDATE_LOCK_SET2_BASE_IDX 2
+#define mmADR_CFG_CUR_VUPDATE_LOCK_SET3 0x136c
+#define mmADR_CFG_CUR_VUPDATE_LOCK_SET3_BASE_IDX 2
+#define mmADR_CFG_VUPDATE_LOCK_SET3 0x136d
+#define mmADR_CFG_VUPDATE_LOCK_SET3_BASE_IDX 2
+#define mmADR_VUPDATE_LOCK_SET3 0x136e
+#define mmADR_VUPDATE_LOCK_SET3_BASE_IDX 2
+#define mmCFG_VUPDATE_LOCK_SET3 0x136f
+#define mmCFG_VUPDATE_LOCK_SET3_BASE_IDX 2
+#define mmCUR_VUPDATE_LOCK_SET3 0x1370
+#define mmCUR_VUPDATE_LOCK_SET3_BASE_IDX 2
+#define mmMPC_OUT0_MUX 0x1385
+#define mmMPC_OUT0_MUX_BASE_IDX 2
+#define mmMPC_OUT0_DENORM_CONTROL 0x1386
+#define mmMPC_OUT0_DENORM_CONTROL_BASE_IDX 2
+#define mmMPC_OUT0_DENORM_CLAMP_G_Y 0x1387
+#define mmMPC_OUT0_DENORM_CLAMP_G_Y_BASE_IDX 2
+#define mmMPC_OUT0_DENORM_CLAMP_B_CB 0x1388
+#define mmMPC_OUT0_DENORM_CLAMP_B_CB_BASE_IDX 2
+#define mmMPC_OUT1_MUX 0x1389
+#define mmMPC_OUT1_MUX_BASE_IDX 2
+#define mmMPC_OUT1_DENORM_CONTROL 0x138a
+#define mmMPC_OUT1_DENORM_CONTROL_BASE_IDX 2
+#define mmMPC_OUT1_DENORM_CLAMP_G_Y 0x138b
+#define mmMPC_OUT1_DENORM_CLAMP_G_Y_BASE_IDX 2
+#define mmMPC_OUT1_DENORM_CLAMP_B_CB 0x138c
+#define mmMPC_OUT1_DENORM_CLAMP_B_CB_BASE_IDX 2
+#define mmMPC_OUT2_MUX 0x138d
+#define mmMPC_OUT2_MUX_BASE_IDX 2
+#define mmMPC_OUT2_DENORM_CONTROL 0x138e
+#define mmMPC_OUT2_DENORM_CONTROL_BASE_IDX 2
+#define mmMPC_OUT2_DENORM_CLAMP_G_Y 0x138f
+#define mmMPC_OUT2_DENORM_CLAMP_G_Y_BASE_IDX 2
+#define mmMPC_OUT2_DENORM_CLAMP_B_CB 0x1390
+#define mmMPC_OUT2_DENORM_CLAMP_B_CB_BASE_IDX 2
+#define mmMPC_OUT3_MUX 0x1391
+#define mmMPC_OUT3_MUX_BASE_IDX 2
+#define mmMPC_OUT3_DENORM_CONTROL 0x1392
+#define mmMPC_OUT3_DENORM_CONTROL_BASE_IDX 2
+#define mmMPC_OUT3_DENORM_CLAMP_G_Y 0x1393
+#define mmMPC_OUT3_DENORM_CLAMP_G_Y_BASE_IDX 2
+#define mmMPC_OUT3_DENORM_CLAMP_B_CB 0x1394
+#define mmMPC_OUT3_DENORM_CLAMP_B_CB_BASE_IDX 2
+
+
+// addressBlock: dce_dc_mpc_mpcc_ogam0_dispdec
+// base address: 0x0
+#define mmMPCC_OGAM0_MPCC_OGAM_MODE 0x13ae
+#define mmMPCC_OGAM0_MPCC_OGAM_MODE_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_LUT_INDEX 0x13af
+#define mmMPCC_OGAM0_MPCC_OGAM_LUT_INDEX_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_LUT_DATA 0x13b0
+#define mmMPCC_OGAM0_MPCC_OGAM_LUT_DATA_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_LUT_RAM_CONTROL 0x13b1
+#define mmMPCC_OGAM0_MPCC_OGAM_LUT_RAM_CONTROL_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B 0x13b2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_G 0x13b3
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_R 0x13b4
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_B 0x13b5
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_G 0x13b6
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_R 0x13b7
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_B 0x13b8
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_B_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B 0x13b9
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_G 0x13ba
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_G_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_G 0x13bb
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_G_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_R 0x13bc
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_R_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_R 0x13bd
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_R_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1 0x13be
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3 0x13bf
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5 0x13c0
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7 0x13c1
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9 0x13c2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11 0x13c3
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13 0x13c4
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15 0x13c5
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17 0x13c6
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19 0x13c7
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21 0x13c8
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23 0x13c9
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25 0x13ca
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27 0x13cb
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29 0x13cc
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31 0x13cd
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33 0x13ce
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_B 0x13cf
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_G 0x13d0
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_R 0x13d1
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_B 0x13d2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_G 0x13d3
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_R 0x13d4
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_B 0x13d5
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_B_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_B 0x13d6
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_B_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_G 0x13d7
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_G_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_G 0x13d8
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_G_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_R 0x13d9
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_R_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_R 0x13da
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_R_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1 0x13db
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3 0x13dc
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5 0x13dd
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7 0x13de
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9 0x13df
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11 0x13e0
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13 0x13e1
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15 0x13e2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17 0x13e3
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19 0x13e4
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21 0x13e5
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23 0x13e6
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25 0x13e7
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27 0x13e8
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29 0x13e9
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31 0x13ea
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33 0x13eb
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33_BASE_IDX 2
+
+
+// addressBlock: dce_dc_mpc_mpcc_ogam1_dispdec
+// base address: 0x104
+#define mmMPCC_OGAM1_MPCC_OGAM_MODE 0x13ef
+#define mmMPCC_OGAM1_MPCC_OGAM_MODE_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_LUT_INDEX 0x13f0
+#define mmMPCC_OGAM1_MPCC_OGAM_LUT_INDEX_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_LUT_DATA 0x13f1
+#define mmMPCC_OGAM1_MPCC_OGAM_LUT_DATA_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_LUT_RAM_CONTROL 0x13f2
+#define mmMPCC_OGAM1_MPCC_OGAM_LUT_RAM_CONTROL_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_B 0x13f3
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_G 0x13f4
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_R 0x13f5
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_SLOPE_CNTL_B 0x13f6
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_SLOPE_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_SLOPE_CNTL_G 0x13f7
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_SLOPE_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_SLOPE_CNTL_R 0x13f8
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_SLOPE_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_B 0x13f9
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_B_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_B 0x13fa
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_B_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_G 0x13fb
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_G_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_G 0x13fc
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_G_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_R 0x13fd
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_R_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_R 0x13fe
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_R_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1 0x13ff
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3 0x1400
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5 0x1401
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7 0x1402
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9 0x1403
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11 0x1404
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13 0x1405
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15 0x1406
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17 0x1407
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19 0x1408
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21 0x1409
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23 0x140a
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25 0x140b
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27 0x140c
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29 0x140d
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31 0x140e
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33 0x140f
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_B 0x1410
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_G 0x1411
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_R 0x1412
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_SLOPE_CNTL_B 0x1413
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_SLOPE_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_SLOPE_CNTL_G 0x1414
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_SLOPE_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_SLOPE_CNTL_R 0x1415
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_SLOPE_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_B 0x1416
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_B_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_B 0x1417
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_B_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_G 0x1418
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_G_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_G 0x1419
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_G_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_R 0x141a
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_R_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_R 0x141b
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_R_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1 0x141c
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3 0x141d
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5 0x141e
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7 0x141f
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9 0x1420
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11 0x1421
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13 0x1422
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15 0x1423
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17 0x1424
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19 0x1425
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21 0x1426
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23 0x1427
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25 0x1428
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27 0x1429
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29 0x142a
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31 0x142b
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33 0x142c
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33_BASE_IDX 2
+
+
+// addressBlock: dce_dc_mpc_mpcc_ogam2_dispdec
+// base address: 0x208
+#define mmMPCC_OGAM2_MPCC_OGAM_MODE 0x1430
+#define mmMPCC_OGAM2_MPCC_OGAM_MODE_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_LUT_INDEX 0x1431
+#define mmMPCC_OGAM2_MPCC_OGAM_LUT_INDEX_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_LUT_DATA 0x1432
+#define mmMPCC_OGAM2_MPCC_OGAM_LUT_DATA_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_LUT_RAM_CONTROL 0x1433
+#define mmMPCC_OGAM2_MPCC_OGAM_LUT_RAM_CONTROL_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_B 0x1434
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_G 0x1435
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_R 0x1436
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_SLOPE_CNTL_B 0x1437
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_SLOPE_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_SLOPE_CNTL_G 0x1438
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_SLOPE_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_SLOPE_CNTL_R 0x1439
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_SLOPE_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_B 0x143a
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_B_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_B 0x143b
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_B_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_G 0x143c
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_G_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_G 0x143d
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_G_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_R 0x143e
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_R_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_R 0x143f
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_R_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1 0x1440
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3 0x1441
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5 0x1442
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7 0x1443
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9 0x1444
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11 0x1445
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13 0x1446
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15 0x1447
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17 0x1448
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19 0x1449
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21 0x144a
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23 0x144b
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25 0x144c
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27 0x144d
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29 0x144e
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31 0x144f
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33 0x1450
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_B 0x1451
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_G 0x1452
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_R 0x1453
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_SLOPE_CNTL_B 0x1454
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_SLOPE_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_SLOPE_CNTL_G 0x1455
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_SLOPE_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_SLOPE_CNTL_R 0x1456
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_SLOPE_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_B 0x1457
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_B_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_B 0x1458
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_B_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_G 0x1459
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_G_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_G 0x145a
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_G_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_R 0x145b
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_R_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_R 0x145c
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_R_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1 0x145d
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3 0x145e
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5 0x145f
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7 0x1460
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9 0x1461
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11 0x1462
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13 0x1463
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15 0x1464
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17 0x1465
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19 0x1466
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21 0x1467
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23 0x1468
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25 0x1469
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27 0x146a
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29 0x146b
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31 0x146c
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33 0x146d
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33_BASE_IDX 2
+
+
+// addressBlock: dce_dc_mpc_mpcc_ogam3_dispdec
+// base address: 0x30c
+#define mmMPCC_OGAM3_MPCC_OGAM_MODE 0x1471
+#define mmMPCC_OGAM3_MPCC_OGAM_MODE_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_LUT_INDEX 0x1472
+#define mmMPCC_OGAM3_MPCC_OGAM_LUT_INDEX_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_LUT_DATA 0x1473
+#define mmMPCC_OGAM3_MPCC_OGAM_LUT_DATA_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_LUT_RAM_CONTROL 0x1474
+#define mmMPCC_OGAM3_MPCC_OGAM_LUT_RAM_CONTROL_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_B 0x1475
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_G 0x1476
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_R 0x1477
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_SLOPE_CNTL_B 0x1478
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_SLOPE_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_SLOPE_CNTL_G 0x1479
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_SLOPE_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_SLOPE_CNTL_R 0x147a
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_SLOPE_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_B 0x147b
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_B_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_B 0x147c
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_B_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_G 0x147d
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_G_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_G 0x147e
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_G_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_R 0x147f
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_R_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_R 0x1480
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_R_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1 0x1481
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3 0x1482
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5 0x1483
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7 0x1484
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9 0x1485
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11 0x1486
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13 0x1487
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15 0x1488
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17 0x1489
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19 0x148a
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21 0x148b
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23 0x148c
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25 0x148d
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27 0x148e
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29 0x148f
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31 0x1490
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33 0x1491
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_B 0x1492
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_G 0x1493
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_R 0x1494
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_SLOPE_CNTL_B 0x1495
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_SLOPE_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_SLOPE_CNTL_G 0x1496
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_SLOPE_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_SLOPE_CNTL_R 0x1497
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_SLOPE_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_B 0x1498
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_B_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_B 0x1499
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_B_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_G 0x149a
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_G_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_G 0x149b
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_G_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_R 0x149c
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_R_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_R 0x149d
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_R_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1 0x149e
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3 0x149f
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5 0x14a0
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7 0x14a1
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9 0x14a2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11 0x14a3
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13 0x14a4
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15 0x14a5
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17 0x14a6
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19 0x14a7
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21 0x14a8
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23 0x14a9
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25 0x14aa
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27 0x14ab
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29 0x14ac
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31 0x14ad
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33 0x14ae
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33_BASE_IDX 2
+
+
+// addressBlock: dce_dc_mpc_mpcc_ogam4_dispdec
+// base address: 0x410
+#define mmMPCC_OGAM4_MPCC_OGAM_MODE 0x14b2
+#define mmMPCC_OGAM4_MPCC_OGAM_MODE_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_LUT_INDEX 0x14b3
+#define mmMPCC_OGAM4_MPCC_OGAM_LUT_INDEX_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_LUT_DATA 0x14b4
+#define mmMPCC_OGAM4_MPCC_OGAM_LUT_DATA_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_LUT_RAM_CONTROL 0x14b5
+#define mmMPCC_OGAM4_MPCC_OGAM_LUT_RAM_CONTROL_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_B 0x14b6
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_G 0x14b7
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_R 0x14b8
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_SLOPE_CNTL_B 0x14b9
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_SLOPE_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_SLOPE_CNTL_G 0x14ba
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_SLOPE_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_SLOPE_CNTL_R 0x14bb
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_SLOPE_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL1_B 0x14bc
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL1_B_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_B 0x14bd
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_B_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL1_G 0x14be
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL1_G_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_G 0x14bf
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_G_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL1_R 0x14c0
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL1_R_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_R 0x14c1
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_R_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_0_1 0x14c2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_0_1_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_2_3 0x14c3
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_2_3_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_4_5 0x14c4
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_4_5_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_6_7 0x14c5
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_6_7_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_8_9 0x14c6
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_8_9_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_10_11 0x14c7
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_10_11_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_12_13 0x14c8
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_12_13_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_14_15 0x14c9
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_14_15_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_16_17 0x14ca
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_16_17_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_18_19 0x14cb
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_18_19_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_20_21 0x14cc
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_20_21_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_22_23 0x14cd
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_22_23_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_24_25 0x14ce
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_24_25_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_26_27 0x14cf
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_26_27_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_28_29 0x14d0
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_28_29_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_30_31 0x14d1
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_30_31_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_32_33 0x14d2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_32_33_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_B 0x14d3
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_G 0x14d4
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_R 0x14d5
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_SLOPE_CNTL_B 0x14d6
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_SLOPE_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_SLOPE_CNTL_G 0x14d7
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_SLOPE_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_SLOPE_CNTL_R 0x14d8
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_SLOPE_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL1_B 0x14d9
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL1_B_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_B 0x14da
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_B_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL1_G 0x14db
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL1_G_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_G 0x14dc
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_G_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL1_R 0x14dd
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL1_R_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_R 0x14de
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_R_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_0_1 0x14df
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_0_1_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_2_3 0x14e0
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_2_3_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_4_5 0x14e1
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_4_5_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_6_7 0x14e2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_6_7_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_8_9 0x14e3
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_8_9_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_10_11 0x14e4
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_10_11_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_12_13 0x14e5
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_12_13_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_14_15 0x14e6
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_14_15_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_16_17 0x14e7
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_16_17_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_18_19 0x14e8
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_18_19_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_20_21 0x14e9
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_20_21_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_22_23 0x14ea
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_22_23_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_24_25 0x14eb
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_24_25_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_26_27 0x14ec
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_26_27_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_28_29 0x14ed
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_28_29_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_30_31 0x14ee
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_30_31_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_32_33 0x14ef
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_32_33_BASE_IDX 2
+
+
+// addressBlock: dce_dc_mpc_mpcc_ogam5_dispdec
+// base address: 0x514
+#define mmMPCC_OGAM5_MPCC_OGAM_MODE 0x14f3
+#define mmMPCC_OGAM5_MPCC_OGAM_MODE_BASE_IDX 2
+#define mmMPCC_OGAM5_MPCC_OGAM_LUT_INDEX 0x14f4
+#define mmMPCC_OGAM5_MPCC_OGAM_LUT_INDEX_BASE_IDX 2
+#define mmMPCC_OGAM5_MPCC_OGAM_LUT_DATA 0x14f5
+#define mmMPCC_OGAM5_MPCC_OGAM_LUT_DATA_BASE_IDX 2
+#define mmMPCC_OGAM5_MPCC_OGAM_LUT_RAM_CONTROL 0x14f6
+#define mmMPCC_OGAM5_MPCC_OGAM_LUT_RAM_CONTROL_BASE_IDX 2
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMA_START_CNTL_B 0x14f7
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMA_START_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMA_START_CNTL_G 0x14f8
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMA_START_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMA_START_CNTL_R 0x14f9
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMA_START_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMA_SLOPE_CNTL_B 0x14fa
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMA_SLOPE_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMA_SLOPE_CNTL_G 0x14fb
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMA_SLOPE_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMA_SLOPE_CNTL_R 0x14fc
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMA_SLOPE_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMA_END_CNTL1_B 0x14fd
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMA_END_CNTL1_B_BASE_IDX 2
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMA_END_CNTL2_B 0x14fe
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMA_END_CNTL2_B_BASE_IDX 2
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMA_END_CNTL1_G 0x14ff
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMA_END_CNTL1_G_BASE_IDX 2
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMA_END_CNTL2_G 0x1500
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMA_END_CNTL2_G_BASE_IDX 2
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMA_END_CNTL1_R 0x1501
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMA_END_CNTL1_R_BASE_IDX 2
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMA_END_CNTL2_R 0x1502
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMA_END_CNTL2_R_BASE_IDX 2
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMA_REGION_0_1 0x1503
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMA_REGION_0_1_BASE_IDX 2
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMA_REGION_2_3 0x1504
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMA_REGION_2_3_BASE_IDX 2
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMA_REGION_4_5 0x1505
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMA_REGION_4_5_BASE_IDX 2
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMA_REGION_6_7 0x1506
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMA_REGION_6_7_BASE_IDX 2
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMA_REGION_8_9 0x1507
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMA_REGION_8_9_BASE_IDX 2
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMA_REGION_10_11 0x1508
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMA_REGION_10_11_BASE_IDX 2
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMA_REGION_12_13 0x1509
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMA_REGION_12_13_BASE_IDX 2
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMA_REGION_14_15 0x150a
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMA_REGION_14_15_BASE_IDX 2
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMA_REGION_16_17 0x150b
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMA_REGION_16_17_BASE_IDX 2
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMA_REGION_18_19 0x150c
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMA_REGION_18_19_BASE_IDX 2
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMA_REGION_20_21 0x150d
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMA_REGION_20_21_BASE_IDX 2
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMA_REGION_22_23 0x150e
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMA_REGION_22_23_BASE_IDX 2
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMA_REGION_24_25 0x150f
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMA_REGION_24_25_BASE_IDX 2
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMA_REGION_26_27 0x1510
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMA_REGION_26_27_BASE_IDX 2
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMA_REGION_28_29 0x1511
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMA_REGION_28_29_BASE_IDX 2
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMA_REGION_30_31 0x1512
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMA_REGION_30_31_BASE_IDX 2
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMA_REGION_32_33 0x1513
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMA_REGION_32_33_BASE_IDX 2
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMB_START_CNTL_B 0x1514
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMB_START_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMB_START_CNTL_G 0x1515
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMB_START_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMB_START_CNTL_R 0x1516
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMB_START_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMB_SLOPE_CNTL_B 0x1517
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMB_SLOPE_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMB_SLOPE_CNTL_G 0x1518
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMB_SLOPE_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMB_SLOPE_CNTL_R 0x1519
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMB_SLOPE_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMB_END_CNTL1_B 0x151a
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMB_END_CNTL1_B_BASE_IDX 2
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMB_END_CNTL2_B 0x151b
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMB_END_CNTL2_B_BASE_IDX 2
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMB_END_CNTL1_G 0x151c
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMB_END_CNTL1_G_BASE_IDX 2
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMB_END_CNTL2_G 0x151d
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMB_END_CNTL2_G_BASE_IDX 2
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMB_END_CNTL1_R 0x151e
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMB_END_CNTL1_R_BASE_IDX 2
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMB_END_CNTL2_R 0x151f
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMB_END_CNTL2_R_BASE_IDX 2
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMB_REGION_0_1 0x1520
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMB_REGION_0_1_BASE_IDX 2
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMB_REGION_2_3 0x1521
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMB_REGION_2_3_BASE_IDX 2
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMB_REGION_4_5 0x1522
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMB_REGION_4_5_BASE_IDX 2
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMB_REGION_6_7 0x1523
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMB_REGION_6_7_BASE_IDX 2
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMB_REGION_8_9 0x1524
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMB_REGION_8_9_BASE_IDX 2
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMB_REGION_10_11 0x1525
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMB_REGION_10_11_BASE_IDX 2
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMB_REGION_12_13 0x1526
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMB_REGION_12_13_BASE_IDX 2
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMB_REGION_14_15 0x1527
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMB_REGION_14_15_BASE_IDX 2
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMB_REGION_16_17 0x1528
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMB_REGION_16_17_BASE_IDX 2
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMB_REGION_18_19 0x1529
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMB_REGION_18_19_BASE_IDX 2
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMB_REGION_20_21 0x152a
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMB_REGION_20_21_BASE_IDX 2
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMB_REGION_22_23 0x152b
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMB_REGION_22_23_BASE_IDX 2
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMB_REGION_24_25 0x152c
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMB_REGION_24_25_BASE_IDX 2
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMB_REGION_26_27 0x152d
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMB_REGION_26_27_BASE_IDX 2
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMB_REGION_28_29 0x152e
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMB_REGION_28_29_BASE_IDX 2
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMB_REGION_30_31 0x152f
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMB_REGION_30_31_BASE_IDX 2
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMB_REGION_32_33 0x1530
+#define mmMPCC_OGAM5_MPCC_OGAM_RAMB_REGION_32_33_BASE_IDX 2
+
+
+// addressBlock: dce_dc_mpc_mpcc_ogam6_dispdec
+// base address: 0x618
+#define mmMPCC_OGAM6_MPCC_OGAM_MODE 0x1534
+#define mmMPCC_OGAM6_MPCC_OGAM_MODE_BASE_IDX 2
+#define mmMPCC_OGAM6_MPCC_OGAM_LUT_INDEX 0x1535
+#define mmMPCC_OGAM6_MPCC_OGAM_LUT_INDEX_BASE_IDX 2
+#define mmMPCC_OGAM6_MPCC_OGAM_LUT_DATA 0x1536
+#define mmMPCC_OGAM6_MPCC_OGAM_LUT_DATA_BASE_IDX 2
+#define mmMPCC_OGAM6_MPCC_OGAM_LUT_RAM_CONTROL 0x1537
+#define mmMPCC_OGAM6_MPCC_OGAM_LUT_RAM_CONTROL_BASE_IDX 2
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMA_START_CNTL_B 0x1538
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMA_START_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMA_START_CNTL_G 0x1539
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMA_START_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMA_START_CNTL_R 0x153a
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMA_START_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMA_SLOPE_CNTL_B 0x153b
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMA_SLOPE_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMA_SLOPE_CNTL_G 0x153c
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMA_SLOPE_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMA_SLOPE_CNTL_R 0x153d
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMA_SLOPE_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMA_END_CNTL1_B 0x153e
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMA_END_CNTL1_B_BASE_IDX 2
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMA_END_CNTL2_B 0x153f
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMA_END_CNTL2_B_BASE_IDX 2
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMA_END_CNTL1_G 0x1540
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMA_END_CNTL1_G_BASE_IDX 2
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMA_END_CNTL2_G 0x1541
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMA_END_CNTL2_G_BASE_IDX 2
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMA_END_CNTL1_R 0x1542
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMA_END_CNTL1_R_BASE_IDX 2
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMA_END_CNTL2_R 0x1543
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMA_END_CNTL2_R_BASE_IDX 2
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMA_REGION_0_1 0x1544
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMA_REGION_0_1_BASE_IDX 2
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMA_REGION_2_3 0x1545
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMA_REGION_2_3_BASE_IDX 2
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMA_REGION_4_5 0x1546
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMA_REGION_4_5_BASE_IDX 2
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMA_REGION_6_7 0x1547
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMA_REGION_6_7_BASE_IDX 2
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMA_REGION_8_9 0x1548
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMA_REGION_8_9_BASE_IDX 2
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMA_REGION_10_11 0x1549
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMA_REGION_10_11_BASE_IDX 2
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMA_REGION_12_13 0x154a
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMA_REGION_12_13_BASE_IDX 2
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMA_REGION_14_15 0x154b
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMA_REGION_14_15_BASE_IDX 2
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMA_REGION_16_17 0x154c
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMA_REGION_16_17_BASE_IDX 2
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMA_REGION_18_19 0x154d
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMA_REGION_18_19_BASE_IDX 2
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMA_REGION_20_21 0x154e
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMA_REGION_20_21_BASE_IDX 2
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMA_REGION_22_23 0x154f
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMA_REGION_22_23_BASE_IDX 2
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMA_REGION_24_25 0x1550
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMA_REGION_24_25_BASE_IDX 2
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMA_REGION_26_27 0x1551
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMA_REGION_26_27_BASE_IDX 2
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMA_REGION_28_29 0x1552
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMA_REGION_28_29_BASE_IDX 2
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMA_REGION_30_31 0x1553
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMA_REGION_30_31_BASE_IDX 2
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMA_REGION_32_33 0x1554
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMA_REGION_32_33_BASE_IDX 2
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMB_START_CNTL_B 0x1555
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMB_START_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMB_START_CNTL_G 0x1556
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMB_START_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMB_START_CNTL_R 0x1557
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMB_START_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMB_SLOPE_CNTL_B 0x1558
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMB_SLOPE_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMB_SLOPE_CNTL_G 0x1559
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMB_SLOPE_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMB_SLOPE_CNTL_R 0x155a
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMB_SLOPE_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMB_END_CNTL1_B 0x155b
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMB_END_CNTL1_B_BASE_IDX 2
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMB_END_CNTL2_B 0x155c
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMB_END_CNTL2_B_BASE_IDX 2
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMB_END_CNTL1_G 0x155d
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMB_END_CNTL1_G_BASE_IDX 2
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMB_END_CNTL2_G 0x155e
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMB_END_CNTL2_G_BASE_IDX 2
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMB_END_CNTL1_R 0x155f
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMB_END_CNTL1_R_BASE_IDX 2
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMB_END_CNTL2_R 0x1560
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMB_END_CNTL2_R_BASE_IDX 2
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMB_REGION_0_1 0x1561
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMB_REGION_0_1_BASE_IDX 2
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMB_REGION_2_3 0x1562
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMB_REGION_2_3_BASE_IDX 2
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMB_REGION_4_5 0x1563
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMB_REGION_4_5_BASE_IDX 2
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMB_REGION_6_7 0x1564
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMB_REGION_6_7_BASE_IDX 2
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMB_REGION_8_9 0x1565
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMB_REGION_8_9_BASE_IDX 2
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMB_REGION_10_11 0x1566
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMB_REGION_10_11_BASE_IDX 2
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMB_REGION_12_13 0x1567
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMB_REGION_12_13_BASE_IDX 2
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMB_REGION_14_15 0x1568
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMB_REGION_14_15_BASE_IDX 2
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMB_REGION_16_17 0x1569
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMB_REGION_16_17_BASE_IDX 2
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMB_REGION_18_19 0x156a
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMB_REGION_18_19_BASE_IDX 2
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMB_REGION_20_21 0x156b
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMB_REGION_20_21_BASE_IDX 2
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMB_REGION_22_23 0x156c
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMB_REGION_22_23_BASE_IDX 2
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMB_REGION_24_25 0x156d
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMB_REGION_24_25_BASE_IDX 2
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMB_REGION_26_27 0x156e
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMB_REGION_26_27_BASE_IDX 2
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMB_REGION_28_29 0x156f
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMB_REGION_28_29_BASE_IDX 2
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMB_REGION_30_31 0x1570
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMB_REGION_30_31_BASE_IDX 2
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMB_REGION_32_33 0x1571
+#define mmMPCC_OGAM6_MPCC_OGAM_RAMB_REGION_32_33_BASE_IDX 2
+
+
+// addressBlock: dce_dc_mpc_mpcc_ogam7_dispdec
+// base address: 0x71c
+#define mmMPCC_OGAM7_MPCC_OGAM_MODE 0x1575
+#define mmMPCC_OGAM7_MPCC_OGAM_MODE_BASE_IDX 2
+#define mmMPCC_OGAM7_MPCC_OGAM_LUT_INDEX 0x1576
+#define mmMPCC_OGAM7_MPCC_OGAM_LUT_INDEX_BASE_IDX 2
+#define mmMPCC_OGAM7_MPCC_OGAM_LUT_DATA 0x1577
+#define mmMPCC_OGAM7_MPCC_OGAM_LUT_DATA_BASE_IDX 2
+#define mmMPCC_OGAM7_MPCC_OGAM_LUT_RAM_CONTROL 0x1578
+#define mmMPCC_OGAM7_MPCC_OGAM_LUT_RAM_CONTROL_BASE_IDX 2
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMA_START_CNTL_B 0x1579
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMA_START_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMA_START_CNTL_G 0x157a
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMA_START_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMA_START_CNTL_R 0x157b
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMA_START_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMA_SLOPE_CNTL_B 0x157c
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMA_SLOPE_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMA_SLOPE_CNTL_G 0x157d
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMA_SLOPE_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMA_SLOPE_CNTL_R 0x157e
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMA_SLOPE_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMA_END_CNTL1_B 0x157f
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMA_END_CNTL1_B_BASE_IDX 2
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMA_END_CNTL2_B 0x1580
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMA_END_CNTL2_B_BASE_IDX 2
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMA_END_CNTL1_G 0x1581
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMA_END_CNTL1_G_BASE_IDX 2
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMA_END_CNTL2_G 0x1582
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMA_END_CNTL2_G_BASE_IDX 2
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMA_END_CNTL1_R 0x1583
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMA_END_CNTL1_R_BASE_IDX 2
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMA_END_CNTL2_R 0x1584
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMA_END_CNTL2_R_BASE_IDX 2
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMA_REGION_0_1 0x1585
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMA_REGION_0_1_BASE_IDX 2
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMA_REGION_2_3 0x1586
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMA_REGION_2_3_BASE_IDX 2
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMA_REGION_4_5 0x1587
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMA_REGION_4_5_BASE_IDX 2
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMA_REGION_6_7 0x1588
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMA_REGION_6_7_BASE_IDX 2
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMA_REGION_8_9 0x1589
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMA_REGION_8_9_BASE_IDX 2
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMA_REGION_10_11 0x158a
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMA_REGION_10_11_BASE_IDX 2
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMA_REGION_12_13 0x158b
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMA_REGION_12_13_BASE_IDX 2
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMA_REGION_14_15 0x158c
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMA_REGION_14_15_BASE_IDX 2
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMA_REGION_16_17 0x158d
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMA_REGION_16_17_BASE_IDX 2
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMA_REGION_18_19 0x158e
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMA_REGION_18_19_BASE_IDX 2
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMA_REGION_20_21 0x158f
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMA_REGION_20_21_BASE_IDX 2
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMA_REGION_22_23 0x1590
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMA_REGION_22_23_BASE_IDX 2
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMA_REGION_24_25 0x1591
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMA_REGION_24_25_BASE_IDX 2
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMA_REGION_26_27 0x1592
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMA_REGION_26_27_BASE_IDX 2
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMA_REGION_28_29 0x1593
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMA_REGION_28_29_BASE_IDX 2
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMA_REGION_30_31 0x1594
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMA_REGION_30_31_BASE_IDX 2
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMA_REGION_32_33 0x1595
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMA_REGION_32_33_BASE_IDX 2
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMB_START_CNTL_B 0x1596
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMB_START_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMB_START_CNTL_G 0x1597
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMB_START_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMB_START_CNTL_R 0x1598
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMB_START_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMB_SLOPE_CNTL_B 0x1599
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMB_SLOPE_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMB_SLOPE_CNTL_G 0x159a
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMB_SLOPE_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMB_SLOPE_CNTL_R 0x159b
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMB_SLOPE_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMB_END_CNTL1_B 0x159c
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMB_END_CNTL1_B_BASE_IDX 2
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMB_END_CNTL2_B 0x159d
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMB_END_CNTL2_B_BASE_IDX 2
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMB_END_CNTL1_G 0x159e
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMB_END_CNTL1_G_BASE_IDX 2
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMB_END_CNTL2_G 0x159f
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMB_END_CNTL2_G_BASE_IDX 2
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMB_END_CNTL1_R 0x15a0
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMB_END_CNTL1_R_BASE_IDX 2
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMB_END_CNTL2_R 0x15a1
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMB_END_CNTL2_R_BASE_IDX 2
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMB_REGION_0_1 0x15a2
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMB_REGION_0_1_BASE_IDX 2
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMB_REGION_2_3 0x15a3
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMB_REGION_2_3_BASE_IDX 2
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMB_REGION_4_5 0x15a4
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMB_REGION_4_5_BASE_IDX 2
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMB_REGION_6_7 0x15a5
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMB_REGION_6_7_BASE_IDX 2
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMB_REGION_8_9 0x15a6
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMB_REGION_8_9_BASE_IDX 2
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMB_REGION_10_11 0x15a7
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMB_REGION_10_11_BASE_IDX 2
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMB_REGION_12_13 0x15a8
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMB_REGION_12_13_BASE_IDX 2
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMB_REGION_14_15 0x15a9
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMB_REGION_14_15_BASE_IDX 2
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMB_REGION_16_17 0x15aa
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMB_REGION_16_17_BASE_IDX 2
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMB_REGION_18_19 0x15ab
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMB_REGION_18_19_BASE_IDX 2
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMB_REGION_20_21 0x15ac
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMB_REGION_20_21_BASE_IDX 2
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMB_REGION_22_23 0x15ad
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMB_REGION_22_23_BASE_IDX 2
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMB_REGION_24_25 0x15ae
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMB_REGION_24_25_BASE_IDX 2
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMB_REGION_26_27 0x15af
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMB_REGION_26_27_BASE_IDX 2
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMB_REGION_28_29 0x15b0
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMB_REGION_28_29_BASE_IDX 2
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMB_REGION_30_31 0x15b1
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMB_REGION_30_31_BASE_IDX 2
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMB_REGION_32_33 0x15b2
+#define mmMPCC_OGAM7_MPCC_OGAM_RAMB_REGION_32_33_BASE_IDX 2
+
+
+// addressBlock: dce_dc_mpc_mpc_ocsc_dispdec
+// base address: 0x0
+#define mmMPC_OUT_CSC_COEF_FORMAT 0x15b6
+#define mmMPC_OUT_CSC_COEF_FORMAT_BASE_IDX 2
+#define mmMPC_OUT0_CSC_MODE 0x15b7
+#define mmMPC_OUT0_CSC_MODE_BASE_IDX 2
+#define mmMPC_OUT0_CSC_C11_C12_A 0x15b8
+#define mmMPC_OUT0_CSC_C11_C12_A_BASE_IDX 2
+#define mmMPC_OUT0_CSC_C13_C14_A 0x15b9
+#define mmMPC_OUT0_CSC_C13_C14_A_BASE_IDX 2
+#define mmMPC_OUT0_CSC_C21_C22_A 0x15ba
+#define mmMPC_OUT0_CSC_C21_C22_A_BASE_IDX 2
+#define mmMPC_OUT0_CSC_C23_C24_A 0x15bb
+#define mmMPC_OUT0_CSC_C23_C24_A_BASE_IDX 2
+#define mmMPC_OUT0_CSC_C31_C32_A 0x15bc
+#define mmMPC_OUT0_CSC_C31_C32_A_BASE_IDX 2
+#define mmMPC_OUT0_CSC_C33_C34_A 0x15bd
+#define mmMPC_OUT0_CSC_C33_C34_A_BASE_IDX 2
+#define mmMPC_OUT0_CSC_C11_C12_B 0x15be
+#define mmMPC_OUT0_CSC_C11_C12_B_BASE_IDX 2
+#define mmMPC_OUT0_CSC_C13_C14_B 0x15bf
+#define mmMPC_OUT0_CSC_C13_C14_B_BASE_IDX 2
+#define mmMPC_OUT0_CSC_C21_C22_B 0x15c0
+#define mmMPC_OUT0_CSC_C21_C22_B_BASE_IDX 2
+#define mmMPC_OUT0_CSC_C23_C24_B 0x15c1
+#define mmMPC_OUT0_CSC_C23_C24_B_BASE_IDX 2
+#define mmMPC_OUT0_CSC_C31_C32_B 0x15c2
+#define mmMPC_OUT0_CSC_C31_C32_B_BASE_IDX 2
+#define mmMPC_OUT0_CSC_C33_C34_B 0x15c3
+#define mmMPC_OUT0_CSC_C33_C34_B_BASE_IDX 2
+#define mmMPC_OUT1_CSC_MODE 0x15c4
+#define mmMPC_OUT1_CSC_MODE_BASE_IDX 2
+#define mmMPC_OUT1_CSC_C11_C12_A 0x15c5
+#define mmMPC_OUT1_CSC_C11_C12_A_BASE_IDX 2
+#define mmMPC_OUT1_CSC_C13_C14_A 0x15c6
+#define mmMPC_OUT1_CSC_C13_C14_A_BASE_IDX 2
+#define mmMPC_OUT1_CSC_C21_C22_A 0x15c7
+#define mmMPC_OUT1_CSC_C21_C22_A_BASE_IDX 2
+#define mmMPC_OUT1_CSC_C23_C24_A 0x15c8
+#define mmMPC_OUT1_CSC_C23_C24_A_BASE_IDX 2
+#define mmMPC_OUT1_CSC_C31_C32_A 0x15c9
+#define mmMPC_OUT1_CSC_C31_C32_A_BASE_IDX 2
+#define mmMPC_OUT1_CSC_C33_C34_A 0x15ca
+#define mmMPC_OUT1_CSC_C33_C34_A_BASE_IDX 2
+#define mmMPC_OUT1_CSC_C11_C12_B 0x15cb
+#define mmMPC_OUT1_CSC_C11_C12_B_BASE_IDX 2
+#define mmMPC_OUT1_CSC_C13_C14_B 0x15cc
+#define mmMPC_OUT1_CSC_C13_C14_B_BASE_IDX 2
+#define mmMPC_OUT1_CSC_C21_C22_B 0x15cd
+#define mmMPC_OUT1_CSC_C21_C22_B_BASE_IDX 2
+#define mmMPC_OUT1_CSC_C23_C24_B 0x15ce
+#define mmMPC_OUT1_CSC_C23_C24_B_BASE_IDX 2
+#define mmMPC_OUT1_CSC_C31_C32_B 0x15cf
+#define mmMPC_OUT1_CSC_C31_C32_B_BASE_IDX 2
+#define mmMPC_OUT1_CSC_C33_C34_B 0x15d0
+#define mmMPC_OUT1_CSC_C33_C34_B_BASE_IDX 2
+#define mmMPC_OUT2_CSC_MODE 0x15d1
+#define mmMPC_OUT2_CSC_MODE_BASE_IDX 2
+#define mmMPC_OUT2_CSC_C11_C12_A 0x15d2
+#define mmMPC_OUT2_CSC_C11_C12_A_BASE_IDX 2
+#define mmMPC_OUT2_CSC_C13_C14_A 0x15d3
+#define mmMPC_OUT2_CSC_C13_C14_A_BASE_IDX 2
+#define mmMPC_OUT2_CSC_C21_C22_A 0x15d4
+#define mmMPC_OUT2_CSC_C21_C22_A_BASE_IDX 2
+#define mmMPC_OUT2_CSC_C23_C24_A 0x15d5
+#define mmMPC_OUT2_CSC_C23_C24_A_BASE_IDX 2
+#define mmMPC_OUT2_CSC_C31_C32_A 0x15d6
+#define mmMPC_OUT2_CSC_C31_C32_A_BASE_IDX 2
+#define mmMPC_OUT2_CSC_C33_C34_A 0x15d7
+#define mmMPC_OUT2_CSC_C33_C34_A_BASE_IDX 2
+#define mmMPC_OUT2_CSC_C11_C12_B 0x15d8
+#define mmMPC_OUT2_CSC_C11_C12_B_BASE_IDX 2
+#define mmMPC_OUT2_CSC_C13_C14_B 0x15d9
+#define mmMPC_OUT2_CSC_C13_C14_B_BASE_IDX 2
+#define mmMPC_OUT2_CSC_C21_C22_B 0x15da
+#define mmMPC_OUT2_CSC_C21_C22_B_BASE_IDX 2
+#define mmMPC_OUT2_CSC_C23_C24_B 0x15db
+#define mmMPC_OUT2_CSC_C23_C24_B_BASE_IDX 2
+#define mmMPC_OUT2_CSC_C31_C32_B 0x15dc
+#define mmMPC_OUT2_CSC_C31_C32_B_BASE_IDX 2
+#define mmMPC_OUT2_CSC_C33_C34_B 0x15dd
+#define mmMPC_OUT2_CSC_C33_C34_B_BASE_IDX 2
+#define mmMPC_OUT3_CSC_MODE 0x15de
+#define mmMPC_OUT3_CSC_MODE_BASE_IDX 2
+#define mmMPC_OUT3_CSC_C11_C12_A 0x15df
+#define mmMPC_OUT3_CSC_C11_C12_A_BASE_IDX 2
+#define mmMPC_OUT3_CSC_C13_C14_A 0x15e0
+#define mmMPC_OUT3_CSC_C13_C14_A_BASE_IDX 2
+#define mmMPC_OUT3_CSC_C21_C22_A 0x15e1
+#define mmMPC_OUT3_CSC_C21_C22_A_BASE_IDX 2
+#define mmMPC_OUT3_CSC_C23_C24_A 0x15e2
+#define mmMPC_OUT3_CSC_C23_C24_A_BASE_IDX 2
+#define mmMPC_OUT3_CSC_C31_C32_A 0x15e3
+#define mmMPC_OUT3_CSC_C31_C32_A_BASE_IDX 2
+#define mmMPC_OUT3_CSC_C33_C34_A 0x15e4
+#define mmMPC_OUT3_CSC_C33_C34_A_BASE_IDX 2
+#define mmMPC_OUT3_CSC_C11_C12_B 0x15e5
+#define mmMPC_OUT3_CSC_C11_C12_B_BASE_IDX 2
+#define mmMPC_OUT3_CSC_C13_C14_B 0x15e6
+#define mmMPC_OUT3_CSC_C13_C14_B_BASE_IDX 2
+#define mmMPC_OUT3_CSC_C21_C22_B 0x15e7
+#define mmMPC_OUT3_CSC_C21_C22_B_BASE_IDX 2
+#define mmMPC_OUT3_CSC_C23_C24_B 0x15e8
+#define mmMPC_OUT3_CSC_C23_C24_B_BASE_IDX 2
+#define mmMPC_OUT3_CSC_C31_C32_B 0x15e9
+#define mmMPC_OUT3_CSC_C31_C32_B_BASE_IDX 2
+#define mmMPC_OUT3_CSC_C33_C34_B 0x15ea
+#define mmMPC_OUT3_CSC_C33_C34_B_BASE_IDX 2
+#define mmMPC_OCSC_TEST_DEBUG_INDEX 0x163b
+#define mmMPC_OCSC_TEST_DEBUG_INDEX_BASE_IDX 2
+#define mmMPC_OCSC_TEST_DEBUG_DATA_BASE_IDX 2
+#define mmMPC_OCSC_TEST_DEBUG_DATA 0x163c
+
+// addressBlock: dce_dc_mpc_mpc_dcperfmon_dc_perfmon_dispdec
+// base address: 0x5964
+#define mmDC_PERFMON15_PERFCOUNTER_CNTL 0x1659
+#define mmDC_PERFMON15_PERFCOUNTER_CNTL_BASE_IDX 2
+#define mmDC_PERFMON15_PERFCOUNTER_CNTL2 0x165a
+#define mmDC_PERFMON15_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define mmDC_PERFMON15_PERFCOUNTER_STATE 0x165b
+#define mmDC_PERFMON15_PERFCOUNTER_STATE_BASE_IDX 2
+#define mmDC_PERFMON15_PERFMON_CNTL 0x165c
+#define mmDC_PERFMON15_PERFMON_CNTL_BASE_IDX 2
+#define mmDC_PERFMON15_PERFMON_CNTL2 0x165d
+#define mmDC_PERFMON15_PERFMON_CNTL2_BASE_IDX 2
+#define mmDC_PERFMON15_PERFMON_CVALUE_INT_MISC 0x165e
+#define mmDC_PERFMON15_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define mmDC_PERFMON15_PERFMON_CVALUE_LOW 0x165f
+#define mmDC_PERFMON15_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define mmDC_PERFMON15_PERFMON_HI 0x1660
+#define mmDC_PERFMON15_PERFMON_HI_BASE_IDX 2
+#define mmDC_PERFMON15_PERFMON_LOW 0x1661
+#define mmDC_PERFMON15_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_abm0_dispdec
+// base address: 0x0
+#define mmBL1_PWM_AMBIENT_LIGHT_LEVEL 0x17b0
+#define mmBL1_PWM_AMBIENT_LIGHT_LEVEL_BASE_IDX 2
+#define mmBL1_PWM_USER_LEVEL 0x17b1
+#define mmBL1_PWM_USER_LEVEL_BASE_IDX 2
+#define mmBL1_PWM_TARGET_ABM_LEVEL 0x17b2
+#define mmBL1_PWM_TARGET_ABM_LEVEL_BASE_IDX 2
+#define mmBL1_PWM_CURRENT_ABM_LEVEL 0x17b3
+#define mmBL1_PWM_CURRENT_ABM_LEVEL_BASE_IDX 2
+#define mmBL1_PWM_FINAL_DUTY_CYCLE 0x17b4
+#define mmBL1_PWM_FINAL_DUTY_CYCLE_BASE_IDX 2
+#define mmBL1_PWM_MINIMUM_DUTY_CYCLE 0x17b5
+#define mmBL1_PWM_MINIMUM_DUTY_CYCLE_BASE_IDX 2
+#define mmBL1_PWM_ABM_CNTL 0x17b6
+#define mmBL1_PWM_ABM_CNTL_BASE_IDX 2
+#define mmBL1_PWM_BL_UPDATE_SAMPLE_RATE 0x17b7
+#define mmBL1_PWM_BL_UPDATE_SAMPLE_RATE_BASE_IDX 2
+#define mmBL1_PWM_GRP2_REG_LOCK 0x17b8
+#define mmBL1_PWM_GRP2_REG_LOCK_BASE_IDX 2
+#define mmDC_ABM1_CNTL 0x17b9
+#define mmDC_ABM1_CNTL_BASE_IDX 2
+#define mmDC_ABM1_IPCSC_COEFF_SEL 0x17ba
+#define mmDC_ABM1_IPCSC_COEFF_SEL_BASE_IDX 2
+#define mmDC_ABM1_ACE_OFFSET_SLOPE_0 0x17bb
+#define mmDC_ABM1_ACE_OFFSET_SLOPE_0_BASE_IDX 2
+#define mmDC_ABM1_ACE_OFFSET_SLOPE_1 0x17bc
+#define mmDC_ABM1_ACE_OFFSET_SLOPE_1_BASE_IDX 2
+#define mmDC_ABM1_ACE_OFFSET_SLOPE_2 0x17bd
+#define mmDC_ABM1_ACE_OFFSET_SLOPE_2_BASE_IDX 2
+#define mmDC_ABM1_ACE_OFFSET_SLOPE_3 0x17be
+#define mmDC_ABM1_ACE_OFFSET_SLOPE_3_BASE_IDX 2
+#define mmDC_ABM1_ACE_OFFSET_SLOPE_4 0x17bf
+#define mmDC_ABM1_ACE_OFFSET_SLOPE_4_BASE_IDX 2
+#define mmDC_ABM1_ACE_THRES_12 0x17c0
+#define mmDC_ABM1_ACE_THRES_12_BASE_IDX 2
+#define mmDC_ABM1_ACE_THRES_34 0x17c1
+#define mmDC_ABM1_ACE_THRES_34_BASE_IDX 2
+#define mmDC_ABM1_ACE_CNTL_MISC 0x17c2
+#define mmDC_ABM1_ACE_CNTL_MISC_BASE_IDX 2
+#define mmDC_ABM1_HGLS_REG_READ_PROGRESS 0x17c4
+#define mmDC_ABM1_HGLS_REG_READ_PROGRESS_BASE_IDX 2
+#define mmDC_ABM1_HG_MISC_CTRL 0x17c5
+#define mmDC_ABM1_HG_MISC_CTRL_BASE_IDX 2
+#define mmDC_ABM1_LS_SUM_OF_LUMA 0x17c6
+#define mmDC_ABM1_LS_SUM_OF_LUMA_BASE_IDX 2
+#define mmDC_ABM1_LS_MIN_MAX_LUMA 0x17c7
+#define mmDC_ABM1_LS_MIN_MAX_LUMA_BASE_IDX 2
+#define mmDC_ABM1_LS_FILTERED_MIN_MAX_LUMA 0x17c8
+#define mmDC_ABM1_LS_FILTERED_MIN_MAX_LUMA_BASE_IDX 2
+#define mmDC_ABM1_LS_PIXEL_COUNT 0x17c9
+#define mmDC_ABM1_LS_PIXEL_COUNT_BASE_IDX 2
+#define mmDC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES 0x17ca
+#define mmDC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES_BASE_IDX 2
+#define mmDC_ABM1_LS_MIN_PIXEL_VALUE_COUNT 0x17cb
+#define mmDC_ABM1_LS_MIN_PIXEL_VALUE_COUNT_BASE_IDX 2
+#define mmDC_ABM1_LS_MAX_PIXEL_VALUE_COUNT 0x17cc
+#define mmDC_ABM1_LS_MAX_PIXEL_VALUE_COUNT_BASE_IDX 2
+#define mmDC_ABM1_HG_SAMPLE_RATE 0x17cd
+#define mmDC_ABM1_HG_SAMPLE_RATE_BASE_IDX 2
+#define mmDC_ABM1_LS_SAMPLE_RATE 0x17ce
+#define mmDC_ABM1_LS_SAMPLE_RATE_BASE_IDX 2
+#define mmDC_ABM1_HG_BIN_1_32_SHIFT_FLAG 0x17cf
+#define mmDC_ABM1_HG_BIN_1_32_SHIFT_FLAG_BASE_IDX 2
+#define mmDC_ABM1_HG_BIN_1_8_SHIFT_INDEX 0x17d0
+#define mmDC_ABM1_HG_BIN_1_8_SHIFT_INDEX_BASE_IDX 2
+#define mmDC_ABM1_HG_BIN_9_16_SHIFT_INDEX 0x17d1
+#define mmDC_ABM1_HG_BIN_9_16_SHIFT_INDEX_BASE_IDX 2
+#define mmDC_ABM1_HG_BIN_17_24_SHIFT_INDEX 0x17d2
+#define mmDC_ABM1_HG_BIN_17_24_SHIFT_INDEX_BASE_IDX 2
+#define mmDC_ABM1_HG_BIN_25_32_SHIFT_INDEX 0x17d3
+#define mmDC_ABM1_HG_BIN_25_32_SHIFT_INDEX_BASE_IDX 2
+#define mmDC_ABM1_HG_RESULT_1 0x17d4
+#define mmDC_ABM1_HG_RESULT_1_BASE_IDX 2
+#define mmDC_ABM1_HG_RESULT_2 0x17d5
+#define mmDC_ABM1_HG_RESULT_2_BASE_IDX 2
+#define mmDC_ABM1_HG_RESULT_3 0x17d6
+#define mmDC_ABM1_HG_RESULT_3_BASE_IDX 2
+#define mmDC_ABM1_HG_RESULT_4 0x17d7
+#define mmDC_ABM1_HG_RESULT_4_BASE_IDX 2
+#define mmDC_ABM1_HG_RESULT_5 0x17d8
+#define mmDC_ABM1_HG_RESULT_5_BASE_IDX 2
+#define mmDC_ABM1_HG_RESULT_6 0x17d9
+#define mmDC_ABM1_HG_RESULT_6_BASE_IDX 2
+#define mmDC_ABM1_HG_RESULT_7 0x17da
+#define mmDC_ABM1_HG_RESULT_7_BASE_IDX 2
+#define mmDC_ABM1_HG_RESULT_8 0x17db
+#define mmDC_ABM1_HG_RESULT_8_BASE_IDX 2
+#define mmDC_ABM1_HG_RESULT_9 0x17dc
+#define mmDC_ABM1_HG_RESULT_9_BASE_IDX 2
+#define mmDC_ABM1_HG_RESULT_10 0x17dd
+#define mmDC_ABM1_HG_RESULT_10_BASE_IDX 2
+#define mmDC_ABM1_HG_RESULT_11 0x17de
+#define mmDC_ABM1_HG_RESULT_11_BASE_IDX 2
+#define mmDC_ABM1_HG_RESULT_12 0x17df
+#define mmDC_ABM1_HG_RESULT_12_BASE_IDX 2
+#define mmDC_ABM1_HG_RESULT_13 0x17e0
+#define mmDC_ABM1_HG_RESULT_13_BASE_IDX 2
+#define mmDC_ABM1_HG_RESULT_14 0x17e1
+#define mmDC_ABM1_HG_RESULT_14_BASE_IDX 2
+#define mmDC_ABM1_HG_RESULT_15 0x17e2
+#define mmDC_ABM1_HG_RESULT_15_BASE_IDX 2
+#define mmDC_ABM1_HG_RESULT_16 0x17e3
+#define mmDC_ABM1_HG_RESULT_16_BASE_IDX 2
+#define mmDC_ABM1_HG_RESULT_17 0x17e4
+#define mmDC_ABM1_HG_RESULT_17_BASE_IDX 2
+#define mmDC_ABM1_HG_RESULT_18 0x17e5
+#define mmDC_ABM1_HG_RESULT_18_BASE_IDX 2
+#define mmDC_ABM1_HG_RESULT_19 0x17e6
+#define mmDC_ABM1_HG_RESULT_19_BASE_IDX 2
+#define mmDC_ABM1_HG_RESULT_20 0x17e7
+#define mmDC_ABM1_HG_RESULT_20_BASE_IDX 2
+#define mmDC_ABM1_HG_RESULT_21 0x17e8
+#define mmDC_ABM1_HG_RESULT_21_BASE_IDX 2
+#define mmDC_ABM1_HG_RESULT_22 0x17e9
+#define mmDC_ABM1_HG_RESULT_22_BASE_IDX 2
+#define mmDC_ABM1_HG_RESULT_23 0x17ea
+#define mmDC_ABM1_HG_RESULT_23_BASE_IDX 2
+#define mmDC_ABM1_HG_RESULT_24 0x17eb
+#define mmDC_ABM1_HG_RESULT_24_BASE_IDX 2
+#define mmDC_ABM1_BL_MASTER_LOCK 0x17ec
+#define mmDC_ABM1_BL_MASTER_LOCK_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_fmt0_dispdec
+// base address: 0x0
+#define mmFMT0_FMT_CLAMP_COMPONENT_R 0x183c
+#define mmFMT0_FMT_CLAMP_COMPONENT_R_BASE_IDX 2
+#define mmFMT0_FMT_CLAMP_COMPONENT_G 0x183d
+#define mmFMT0_FMT_CLAMP_COMPONENT_G_BASE_IDX 2
+#define mmFMT0_FMT_CLAMP_COMPONENT_B 0x183e
+#define mmFMT0_FMT_CLAMP_COMPONENT_B_BASE_IDX 2
+#define mmFMT0_FMT_DYNAMIC_EXP_CNTL 0x183f
+#define mmFMT0_FMT_DYNAMIC_EXP_CNTL_BASE_IDX 2
+#define mmFMT0_FMT_CONTROL 0x1840
+#define mmFMT0_FMT_CONTROL_BASE_IDX 2
+#define mmFMT0_FMT_BIT_DEPTH_CONTROL 0x1841
+#define mmFMT0_FMT_BIT_DEPTH_CONTROL_BASE_IDX 2
+#define mmFMT0_FMT_DITHER_RAND_R_SEED 0x1842
+#define mmFMT0_FMT_DITHER_RAND_R_SEED_BASE_IDX 2
+#define mmFMT0_FMT_DITHER_RAND_G_SEED 0x1843
+#define mmFMT0_FMT_DITHER_RAND_G_SEED_BASE_IDX 2
+#define mmFMT0_FMT_DITHER_RAND_B_SEED 0x1844
+#define mmFMT0_FMT_DITHER_RAND_B_SEED_BASE_IDX 2
+#define mmFMT0_FMT_CLAMP_CNTL 0x1845
+#define mmFMT0_FMT_CLAMP_CNTL_BASE_IDX 2
+#define mmFMT0_FMT_SIDE_BY_SIDE_STEREO_CONTROL 0x1846
+#define mmFMT0_FMT_SIDE_BY_SIDE_STEREO_CONTROL_BASE_IDX 2
+#define mmFMT0_FMT_MAP420_MEMORY_CONTROL 0x1847
+#define mmFMT0_FMT_MAP420_MEMORY_CONTROL_BASE_IDX 2
+#define mmFMT0_FMT_422_CONTROL 0x1849
+#define mmFMT0_FMT_422_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_dpg0_dispdec
+// base address: 0x0
+#define mmDPG0_DPG_CONTROL 0x1854
+#define mmDPG0_DPG_CONTROL_BASE_IDX 2
+#define mmDPG0_DPG_RAMP_CONTROL 0x1855
+#define mmDPG0_DPG_RAMP_CONTROL_BASE_IDX 2
+#define mmDPG0_DPG_DIMENSIONS 0x1856
+#define mmDPG0_DPG_DIMENSIONS_BASE_IDX 2
+#define mmDPG0_DPG_COLOUR_R_CR 0x1857
+#define mmDPG0_DPG_COLOUR_R_CR_BASE_IDX 2
+#define mmDPG0_DPG_COLOUR_G_Y 0x1858
+#define mmDPG0_DPG_COLOUR_G_Y_BASE_IDX 2
+#define mmDPG0_DPG_COLOUR_B_CB 0x1859
+#define mmDPG0_DPG_COLOUR_B_CB_BASE_IDX 2
+#define mmDPG0_DPG_OFFSET_SEGMENT 0x185a
+#define mmDPG0_DPG_OFFSET_SEGMENT_BASE_IDX 2
+#define mmDPG0_DPG_STATUS 0x185b
+#define mmDPG0_DPG_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_oppbuf0_dispdec
+// base address: 0x0
+#define mmOPPBUF0_OPPBUF_CONTROL 0x1884
+#define mmOPPBUF0_OPPBUF_CONTROL_BASE_IDX 2
+#define mmOPPBUF0_OPPBUF_3D_PARAMETERS_0 0x1885
+#define mmOPPBUF0_OPPBUF_3D_PARAMETERS_0_BASE_IDX 2
+#define mmOPPBUF0_OPPBUF_3D_PARAMETERS_1 0x1886
+#define mmOPPBUF0_OPPBUF_3D_PARAMETERS_1_BASE_IDX 2
+#define mmOPPBUF0_OPPBUF_CONTROL1 0x1889
+#define mmOPPBUF0_OPPBUF_CONTROL1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_opp_pipe0_dispdec
+// base address: 0x0
+#define mmOPP_PIPE0_OPP_PIPE_CONTROL 0x188c
+#define mmOPP_PIPE0_OPP_PIPE_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_opp_pipe_crc0_dispdec
+// base address: 0x0
+#define mmOPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL 0x1891
+#define mmOPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL_BASE_IDX 2
+#define mmOPP_PIPE_CRC0_OPP_PIPE_CRC_MASK 0x1892
+#define mmOPP_PIPE_CRC0_OPP_PIPE_CRC_MASK_BASE_IDX 2
+#define mmOPP_PIPE_CRC0_OPP_PIPE_CRC_RESULT0 0x1893
+#define mmOPP_PIPE_CRC0_OPP_PIPE_CRC_RESULT0_BASE_IDX 2
+#define mmOPP_PIPE_CRC0_OPP_PIPE_CRC_RESULT1 0x1894
+#define mmOPP_PIPE_CRC0_OPP_PIPE_CRC_RESULT1_BASE_IDX 2
+#define mmOPP_PIPE_CRC0_OPP_PIPE_CRC_RESULT2 0x1895
+#define mmOPP_PIPE_CRC0_OPP_PIPE_CRC_RESULT2_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_fmt1_dispdec
+// base address: 0x168
+#define mmFMT1_FMT_CLAMP_COMPONENT_R 0x1896
+#define mmFMT1_FMT_CLAMP_COMPONENT_R_BASE_IDX 2
+#define mmFMT1_FMT_CLAMP_COMPONENT_G 0x1897
+#define mmFMT1_FMT_CLAMP_COMPONENT_G_BASE_IDX 2
+#define mmFMT1_FMT_CLAMP_COMPONENT_B 0x1898
+#define mmFMT1_FMT_CLAMP_COMPONENT_B_BASE_IDX 2
+#define mmFMT1_FMT_DYNAMIC_EXP_CNTL 0x1899
+#define mmFMT1_FMT_DYNAMIC_EXP_CNTL_BASE_IDX 2
+#define mmFMT1_FMT_CONTROL 0x189a
+#define mmFMT1_FMT_CONTROL_BASE_IDX 2
+#define mmFMT1_FMT_BIT_DEPTH_CONTROL 0x189b
+#define mmFMT1_FMT_BIT_DEPTH_CONTROL_BASE_IDX 2
+#define mmFMT1_FMT_DITHER_RAND_R_SEED 0x189c
+#define mmFMT1_FMT_DITHER_RAND_R_SEED_BASE_IDX 2
+#define mmFMT1_FMT_DITHER_RAND_G_SEED 0x189d
+#define mmFMT1_FMT_DITHER_RAND_G_SEED_BASE_IDX 2
+#define mmFMT1_FMT_DITHER_RAND_B_SEED 0x189e
+#define mmFMT1_FMT_DITHER_RAND_B_SEED_BASE_IDX 2
+#define mmFMT1_FMT_CLAMP_CNTL 0x189f
+#define mmFMT1_FMT_CLAMP_CNTL_BASE_IDX 2
+#define mmFMT1_FMT_SIDE_BY_SIDE_STEREO_CONTROL 0x18a0
+#define mmFMT1_FMT_SIDE_BY_SIDE_STEREO_CONTROL_BASE_IDX 2
+#define mmFMT1_FMT_MAP420_MEMORY_CONTROL 0x18a1
+#define mmFMT1_FMT_MAP420_MEMORY_CONTROL_BASE_IDX 2
+#define mmFMT1_FMT_422_CONTROL 0x18a3
+#define mmFMT1_FMT_422_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_dpg1_dispdec
+// base address: 0x168
+#define mmDPG1_DPG_CONTROL 0x18ae
+#define mmDPG1_DPG_CONTROL_BASE_IDX 2
+#define mmDPG1_DPG_RAMP_CONTROL 0x18af
+#define mmDPG1_DPG_RAMP_CONTROL_BASE_IDX 2
+#define mmDPG1_DPG_DIMENSIONS 0x18b0
+#define mmDPG1_DPG_DIMENSIONS_BASE_IDX 2
+#define mmDPG1_DPG_COLOUR_R_CR 0x18b1
+#define mmDPG1_DPG_COLOUR_R_CR_BASE_IDX 2
+#define mmDPG1_DPG_COLOUR_G_Y 0x18b2
+#define mmDPG1_DPG_COLOUR_G_Y_BASE_IDX 2
+#define mmDPG1_DPG_COLOUR_B_CB 0x18b3
+#define mmDPG1_DPG_COLOUR_B_CB_BASE_IDX 2
+#define mmDPG1_DPG_OFFSET_SEGMENT 0x18b4
+#define mmDPG1_DPG_OFFSET_SEGMENT_BASE_IDX 2
+#define mmDPG1_DPG_STATUS 0x18b5
+#define mmDPG1_DPG_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_oppbuf1_dispdec
+// base address: 0x168
+#define mmOPPBUF1_OPPBUF_CONTROL 0x18de
+#define mmOPPBUF1_OPPBUF_CONTROL_BASE_IDX 2
+#define mmOPPBUF1_OPPBUF_3D_PARAMETERS_0 0x18df
+#define mmOPPBUF1_OPPBUF_3D_PARAMETERS_0_BASE_IDX 2
+#define mmOPPBUF1_OPPBUF_3D_PARAMETERS_1 0x18e0
+#define mmOPPBUF1_OPPBUF_3D_PARAMETERS_1_BASE_IDX 2
+#define mmOPPBUF1_OPPBUF_CONTROL1 0x18e3
+#define mmOPPBUF1_OPPBUF_CONTROL1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_opp_pipe1_dispdec
+// base address: 0x168
+#define mmOPP_PIPE1_OPP_PIPE_CONTROL 0x18e6
+#define mmOPP_PIPE1_OPP_PIPE_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_opp_pipe_crc1_dispdec
+// base address: 0x168
+#define mmOPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL 0x18eb
+#define mmOPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL_BASE_IDX 2
+#define mmOPP_PIPE_CRC1_OPP_PIPE_CRC_MASK 0x18ec
+#define mmOPP_PIPE_CRC1_OPP_PIPE_CRC_MASK_BASE_IDX 2
+#define mmOPP_PIPE_CRC1_OPP_PIPE_CRC_RESULT0 0x18ed
+#define mmOPP_PIPE_CRC1_OPP_PIPE_CRC_RESULT0_BASE_IDX 2
+#define mmOPP_PIPE_CRC1_OPP_PIPE_CRC_RESULT1 0x18ee
+#define mmOPP_PIPE_CRC1_OPP_PIPE_CRC_RESULT1_BASE_IDX 2
+#define mmOPP_PIPE_CRC1_OPP_PIPE_CRC_RESULT2 0x18ef
+#define mmOPP_PIPE_CRC1_OPP_PIPE_CRC_RESULT2_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_fmt2_dispdec
+// base address: 0x2d0
+#define mmFMT2_FMT_CLAMP_COMPONENT_R 0x18f0
+#define mmFMT2_FMT_CLAMP_COMPONENT_R_BASE_IDX 2
+#define mmFMT2_FMT_CLAMP_COMPONENT_G 0x18f1
+#define mmFMT2_FMT_CLAMP_COMPONENT_G_BASE_IDX 2
+#define mmFMT2_FMT_CLAMP_COMPONENT_B 0x18f2
+#define mmFMT2_FMT_CLAMP_COMPONENT_B_BASE_IDX 2
+#define mmFMT2_FMT_DYNAMIC_EXP_CNTL 0x18f3
+#define mmFMT2_FMT_DYNAMIC_EXP_CNTL_BASE_IDX 2
+#define mmFMT2_FMT_CONTROL 0x18f4
+#define mmFMT2_FMT_CONTROL_BASE_IDX 2
+#define mmFMT2_FMT_BIT_DEPTH_CONTROL 0x18f5
+#define mmFMT2_FMT_BIT_DEPTH_CONTROL_BASE_IDX 2
+#define mmFMT2_FMT_DITHER_RAND_R_SEED 0x18f6
+#define mmFMT2_FMT_DITHER_RAND_R_SEED_BASE_IDX 2
+#define mmFMT2_FMT_DITHER_RAND_G_SEED 0x18f7
+#define mmFMT2_FMT_DITHER_RAND_G_SEED_BASE_IDX 2
+#define mmFMT2_FMT_DITHER_RAND_B_SEED 0x18f8
+#define mmFMT2_FMT_DITHER_RAND_B_SEED_BASE_IDX 2
+#define mmFMT2_FMT_CLAMP_CNTL 0x18f9
+#define mmFMT2_FMT_CLAMP_CNTL_BASE_IDX 2
+#define mmFMT2_FMT_SIDE_BY_SIDE_STEREO_CONTROL 0x18fa
+#define mmFMT2_FMT_SIDE_BY_SIDE_STEREO_CONTROL_BASE_IDX 2
+#define mmFMT2_FMT_MAP420_MEMORY_CONTROL 0x18fb
+#define mmFMT2_FMT_MAP420_MEMORY_CONTROL_BASE_IDX 2
+#define mmFMT2_FMT_422_CONTROL 0x18fd
+#define mmFMT2_FMT_422_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_dpg2_dispdec
+// base address: 0x2d0
+#define mmDPG2_DPG_CONTROL 0x1908
+#define mmDPG2_DPG_CONTROL_BASE_IDX 2
+#define mmDPG2_DPG_RAMP_CONTROL 0x1909
+#define mmDPG2_DPG_RAMP_CONTROL_BASE_IDX 2
+#define mmDPG2_DPG_DIMENSIONS 0x190a
+#define mmDPG2_DPG_DIMENSIONS_BASE_IDX 2
+#define mmDPG2_DPG_COLOUR_R_CR 0x190b
+#define mmDPG2_DPG_COLOUR_R_CR_BASE_IDX 2
+#define mmDPG2_DPG_COLOUR_G_Y 0x190c
+#define mmDPG2_DPG_COLOUR_G_Y_BASE_IDX 2
+#define mmDPG2_DPG_COLOUR_B_CB 0x190d
+#define mmDPG2_DPG_COLOUR_B_CB_BASE_IDX 2
+#define mmDPG2_DPG_OFFSET_SEGMENT 0x190e
+#define mmDPG2_DPG_OFFSET_SEGMENT_BASE_IDX 2
+#define mmDPG2_DPG_STATUS 0x190f
+#define mmDPG2_DPG_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_oppbuf2_dispdec
+// base address: 0x2d0
+#define mmOPPBUF2_OPPBUF_CONTROL 0x1938
+#define mmOPPBUF2_OPPBUF_CONTROL_BASE_IDX 2
+#define mmOPPBUF2_OPPBUF_3D_PARAMETERS_0 0x1939
+#define mmOPPBUF2_OPPBUF_3D_PARAMETERS_0_BASE_IDX 2
+#define mmOPPBUF2_OPPBUF_3D_PARAMETERS_1 0x193a
+#define mmOPPBUF2_OPPBUF_3D_PARAMETERS_1_BASE_IDX 2
+#define mmOPPBUF2_OPPBUF_CONTROL1 0x193d
+#define mmOPPBUF2_OPPBUF_CONTROL1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_opp_pipe2_dispdec
+// base address: 0x2d0
+#define mmOPP_PIPE2_OPP_PIPE_CONTROL 0x1940
+#define mmOPP_PIPE2_OPP_PIPE_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_opp_pipe_crc2_dispdec
+// base address: 0x2d0
+#define mmOPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL 0x1945
+#define mmOPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL_BASE_IDX 2
+#define mmOPP_PIPE_CRC2_OPP_PIPE_CRC_MASK 0x1946
+#define mmOPP_PIPE_CRC2_OPP_PIPE_CRC_MASK_BASE_IDX 2
+#define mmOPP_PIPE_CRC2_OPP_PIPE_CRC_RESULT0 0x1947
+#define mmOPP_PIPE_CRC2_OPP_PIPE_CRC_RESULT0_BASE_IDX 2
+#define mmOPP_PIPE_CRC2_OPP_PIPE_CRC_RESULT1 0x1948
+#define mmOPP_PIPE_CRC2_OPP_PIPE_CRC_RESULT1_BASE_IDX 2
+#define mmOPP_PIPE_CRC2_OPP_PIPE_CRC_RESULT2 0x1949
+#define mmOPP_PIPE_CRC2_OPP_PIPE_CRC_RESULT2_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_fmt3_dispdec
+// base address: 0x438
+#define mmFMT3_FMT_CLAMP_COMPONENT_R 0x194a
+#define mmFMT3_FMT_CLAMP_COMPONENT_R_BASE_IDX 2
+#define mmFMT3_FMT_CLAMP_COMPONENT_G 0x194b
+#define mmFMT3_FMT_CLAMP_COMPONENT_G_BASE_IDX 2
+#define mmFMT3_FMT_CLAMP_COMPONENT_B 0x194c
+#define mmFMT3_FMT_CLAMP_COMPONENT_B_BASE_IDX 2
+#define mmFMT3_FMT_DYNAMIC_EXP_CNTL 0x194d
+#define mmFMT3_FMT_DYNAMIC_EXP_CNTL_BASE_IDX 2
+#define mmFMT3_FMT_CONTROL 0x194e
+#define mmFMT3_FMT_CONTROL_BASE_IDX 2
+#define mmFMT3_FMT_BIT_DEPTH_CONTROL 0x194f
+#define mmFMT3_FMT_BIT_DEPTH_CONTROL_BASE_IDX 2
+#define mmFMT3_FMT_DITHER_RAND_R_SEED 0x1950
+#define mmFMT3_FMT_DITHER_RAND_R_SEED_BASE_IDX 2
+#define mmFMT3_FMT_DITHER_RAND_G_SEED 0x1951
+#define mmFMT3_FMT_DITHER_RAND_G_SEED_BASE_IDX 2
+#define mmFMT3_FMT_DITHER_RAND_B_SEED 0x1952
+#define mmFMT3_FMT_DITHER_RAND_B_SEED_BASE_IDX 2
+#define mmFMT3_FMT_CLAMP_CNTL 0x1953
+#define mmFMT3_FMT_CLAMP_CNTL_BASE_IDX 2
+#define mmFMT3_FMT_SIDE_BY_SIDE_STEREO_CONTROL 0x1954
+#define mmFMT3_FMT_SIDE_BY_SIDE_STEREO_CONTROL_BASE_IDX 2
+#define mmFMT3_FMT_MAP420_MEMORY_CONTROL 0x1955
+#define mmFMT3_FMT_MAP420_MEMORY_CONTROL_BASE_IDX 2
+#define mmFMT3_FMT_422_CONTROL 0x1957
+#define mmFMT3_FMT_422_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_dpg3_dispdec
+// base address: 0x438
+#define mmDPG3_DPG_CONTROL 0x1962
+#define mmDPG3_DPG_CONTROL_BASE_IDX 2
+#define mmDPG3_DPG_RAMP_CONTROL 0x1963
+#define mmDPG3_DPG_RAMP_CONTROL_BASE_IDX 2
+#define mmDPG3_DPG_DIMENSIONS 0x1964
+#define mmDPG3_DPG_DIMENSIONS_BASE_IDX 2
+#define mmDPG3_DPG_COLOUR_R_CR 0x1965
+#define mmDPG3_DPG_COLOUR_R_CR_BASE_IDX 2
+#define mmDPG3_DPG_COLOUR_G_Y 0x1966
+#define mmDPG3_DPG_COLOUR_G_Y_BASE_IDX 2
+#define mmDPG3_DPG_COLOUR_B_CB 0x1967
+#define mmDPG3_DPG_COLOUR_B_CB_BASE_IDX 2
+#define mmDPG3_DPG_OFFSET_SEGMENT 0x1968
+#define mmDPG3_DPG_OFFSET_SEGMENT_BASE_IDX 2
+#define mmDPG3_DPG_STATUS 0x1969
+#define mmDPG3_DPG_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_oppbuf3_dispdec
+// base address: 0x438
+#define mmOPPBUF3_OPPBUF_CONTROL 0x1992
+#define mmOPPBUF3_OPPBUF_CONTROL_BASE_IDX 2
+#define mmOPPBUF3_OPPBUF_3D_PARAMETERS_0 0x1993
+#define mmOPPBUF3_OPPBUF_3D_PARAMETERS_0_BASE_IDX 2
+#define mmOPPBUF3_OPPBUF_3D_PARAMETERS_1 0x1994
+#define mmOPPBUF3_OPPBUF_3D_PARAMETERS_1_BASE_IDX 2
+#define mmOPPBUF3_OPPBUF_CONTROL1 0x1997
+#define mmOPPBUF3_OPPBUF_CONTROL1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_opp_pipe3_dispdec
+// base address: 0x438
+#define mmOPP_PIPE3_OPP_PIPE_CONTROL 0x199a
+#define mmOPP_PIPE3_OPP_PIPE_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_opp_pipe_crc3_dispdec
+// base address: 0x438
+#define mmOPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL 0x199f
+#define mmOPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL_BASE_IDX 2
+#define mmOPP_PIPE_CRC3_OPP_PIPE_CRC_MASK 0x19a0
+#define mmOPP_PIPE_CRC3_OPP_PIPE_CRC_MASK_BASE_IDX 2
+#define mmOPP_PIPE_CRC3_OPP_PIPE_CRC_RESULT0 0x19a1
+#define mmOPP_PIPE_CRC3_OPP_PIPE_CRC_RESULT0_BASE_IDX 2
+#define mmOPP_PIPE_CRC3_OPP_PIPE_CRC_RESULT1 0x19a2
+#define mmOPP_PIPE_CRC3_OPP_PIPE_CRC_RESULT1_BASE_IDX 2
+#define mmOPP_PIPE_CRC3_OPP_PIPE_CRC_RESULT2 0x19a3
+#define mmOPP_PIPE_CRC3_OPP_PIPE_CRC_RESULT2_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_fmt4_dispdec
+// base address: 0x5a0
+#define mmFMT4_FMT_CLAMP_COMPONENT_R 0x19a4
+#define mmFMT4_FMT_CLAMP_COMPONENT_R_BASE_IDX 2
+#define mmFMT4_FMT_CLAMP_COMPONENT_G 0x19a5
+#define mmFMT4_FMT_CLAMP_COMPONENT_G_BASE_IDX 2
+#define mmFMT4_FMT_CLAMP_COMPONENT_B 0x19a6
+#define mmFMT4_FMT_CLAMP_COMPONENT_B_BASE_IDX 2
+#define mmFMT4_FMT_DYNAMIC_EXP_CNTL 0x19a7
+#define mmFMT4_FMT_DYNAMIC_EXP_CNTL_BASE_IDX 2
+#define mmFMT4_FMT_CONTROL 0x19a8
+#define mmFMT4_FMT_CONTROL_BASE_IDX 2
+#define mmFMT4_FMT_BIT_DEPTH_CONTROL 0x19a9
+#define mmFMT4_FMT_BIT_DEPTH_CONTROL_BASE_IDX 2
+#define mmFMT4_FMT_DITHER_RAND_R_SEED 0x19aa
+#define mmFMT4_FMT_DITHER_RAND_R_SEED_BASE_IDX 2
+#define mmFMT4_FMT_DITHER_RAND_G_SEED 0x19ab
+#define mmFMT4_FMT_DITHER_RAND_G_SEED_BASE_IDX 2
+#define mmFMT4_FMT_DITHER_RAND_B_SEED 0x19ac
+#define mmFMT4_FMT_DITHER_RAND_B_SEED_BASE_IDX 2
+#define mmFMT4_FMT_CLAMP_CNTL 0x19ad
+#define mmFMT4_FMT_CLAMP_CNTL_BASE_IDX 2
+#define mmFMT4_FMT_SIDE_BY_SIDE_STEREO_CONTROL 0x19ae
+#define mmFMT4_FMT_SIDE_BY_SIDE_STEREO_CONTROL_BASE_IDX 2
+#define mmFMT4_FMT_MAP420_MEMORY_CONTROL 0x19af
+#define mmFMT4_FMT_MAP420_MEMORY_CONTROL_BASE_IDX 2
+#define mmFMT4_FMT_422_CONTROL 0x19b1
+#define mmFMT4_FMT_422_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_dpg4_dispdec
+// base address: 0x5a0
+#define mmDPG4_DPG_CONTROL 0x19bc
+#define mmDPG4_DPG_CONTROL_BASE_IDX 2
+#define mmDPG4_DPG_RAMP_CONTROL 0x19bd
+#define mmDPG4_DPG_RAMP_CONTROL_BASE_IDX 2
+#define mmDPG4_DPG_DIMENSIONS 0x19be
+#define mmDPG4_DPG_DIMENSIONS_BASE_IDX 2
+#define mmDPG4_DPG_COLOUR_R_CR 0x19bf
+#define mmDPG4_DPG_COLOUR_R_CR_BASE_IDX 2
+#define mmDPG4_DPG_COLOUR_G_Y 0x19c0
+#define mmDPG4_DPG_COLOUR_G_Y_BASE_IDX 2
+#define mmDPG4_DPG_COLOUR_B_CB 0x19c1
+#define mmDPG4_DPG_COLOUR_B_CB_BASE_IDX 2
+#define mmDPG4_DPG_OFFSET_SEGMENT 0x19c2
+#define mmDPG4_DPG_OFFSET_SEGMENT_BASE_IDX 2
+#define mmDPG4_DPG_STATUS 0x19c3
+#define mmDPG4_DPG_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_oppbuf4_dispdec
+// base address: 0x5a0
+#define mmOPPBUF4_OPPBUF_CONTROL 0x19ec
+#define mmOPPBUF4_OPPBUF_CONTROL_BASE_IDX 2
+#define mmOPPBUF4_OPPBUF_3D_PARAMETERS_0 0x19ed
+#define mmOPPBUF4_OPPBUF_3D_PARAMETERS_0_BASE_IDX 2
+#define mmOPPBUF4_OPPBUF_3D_PARAMETERS_1 0x19ee
+#define mmOPPBUF4_OPPBUF_3D_PARAMETERS_1_BASE_IDX 2
+#define mmOPPBUF4_OPPBUF_CONTROL1 0x19f1
+#define mmOPPBUF4_OPPBUF_CONTROL1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_opp_pipe4_dispdec
+// base address: 0x5a0
+#define mmOPP_PIPE4_OPP_PIPE_CONTROL 0x19f4
+#define mmOPP_PIPE4_OPP_PIPE_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_opp_pipe_crc4_dispdec
+// base address: 0x5a0
+#define mmOPP_PIPE_CRC4_OPP_PIPE_CRC_CONTROL 0x19f9
+#define mmOPP_PIPE_CRC4_OPP_PIPE_CRC_CONTROL_BASE_IDX 2
+#define mmOPP_PIPE_CRC4_OPP_PIPE_CRC_MASK 0x19fa
+#define mmOPP_PIPE_CRC4_OPP_PIPE_CRC_MASK_BASE_IDX 2
+#define mmOPP_PIPE_CRC4_OPP_PIPE_CRC_RESULT0 0x19fb
+#define mmOPP_PIPE_CRC4_OPP_PIPE_CRC_RESULT0_BASE_IDX 2
+#define mmOPP_PIPE_CRC4_OPP_PIPE_CRC_RESULT1 0x19fc
+#define mmOPP_PIPE_CRC4_OPP_PIPE_CRC_RESULT1_BASE_IDX 2
+#define mmOPP_PIPE_CRC4_OPP_PIPE_CRC_RESULT2 0x19fd
+#define mmOPP_PIPE_CRC4_OPP_PIPE_CRC_RESULT2_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_fmt5_dispdec
+// base address: 0x708
+#define mmFMT5_FMT_CLAMP_COMPONENT_R 0x19fe
+#define mmFMT5_FMT_CLAMP_COMPONENT_R_BASE_IDX 2
+#define mmFMT5_FMT_CLAMP_COMPONENT_G 0x19ff
+#define mmFMT5_FMT_CLAMP_COMPONENT_G_BASE_IDX 2
+#define mmFMT5_FMT_CLAMP_COMPONENT_B 0x1a00
+#define mmFMT5_FMT_CLAMP_COMPONENT_B_BASE_IDX 2
+#define mmFMT5_FMT_DYNAMIC_EXP_CNTL 0x1a01
+#define mmFMT5_FMT_DYNAMIC_EXP_CNTL_BASE_IDX 2
+#define mmFMT5_FMT_CONTROL 0x1a02
+#define mmFMT5_FMT_CONTROL_BASE_IDX 2
+#define mmFMT5_FMT_BIT_DEPTH_CONTROL 0x1a03
+#define mmFMT5_FMT_BIT_DEPTH_CONTROL_BASE_IDX 2
+#define mmFMT5_FMT_DITHER_RAND_R_SEED 0x1a04
+#define mmFMT5_FMT_DITHER_RAND_R_SEED_BASE_IDX 2
+#define mmFMT5_FMT_DITHER_RAND_G_SEED 0x1a05
+#define mmFMT5_FMT_DITHER_RAND_G_SEED_BASE_IDX 2
+#define mmFMT5_FMT_DITHER_RAND_B_SEED 0x1a06
+#define mmFMT5_FMT_DITHER_RAND_B_SEED_BASE_IDX 2
+#define mmFMT5_FMT_CLAMP_CNTL 0x1a07
+#define mmFMT5_FMT_CLAMP_CNTL_BASE_IDX 2
+#define mmFMT5_FMT_SIDE_BY_SIDE_STEREO_CONTROL 0x1a08
+#define mmFMT5_FMT_SIDE_BY_SIDE_STEREO_CONTROL_BASE_IDX 2
+#define mmFMT5_FMT_MAP420_MEMORY_CONTROL 0x1a09
+#define mmFMT5_FMT_MAP420_MEMORY_CONTROL_BASE_IDX 2
+#define mmFMT5_FMT_422_CONTROL 0x1a0b
+#define mmFMT5_FMT_422_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_dpg5_dispdec
+// base address: 0x708
+#define mmDPG5_DPG_CONTROL 0x1a16
+#define mmDPG5_DPG_CONTROL_BASE_IDX 2
+#define mmDPG5_DPG_RAMP_CONTROL 0x1a17
+#define mmDPG5_DPG_RAMP_CONTROL_BASE_IDX 2
+#define mmDPG5_DPG_DIMENSIONS 0x1a18
+#define mmDPG5_DPG_DIMENSIONS_BASE_IDX 2
+#define mmDPG5_DPG_COLOUR_R_CR 0x1a19
+#define mmDPG5_DPG_COLOUR_R_CR_BASE_IDX 2
+#define mmDPG5_DPG_COLOUR_G_Y 0x1a1a
+#define mmDPG5_DPG_COLOUR_G_Y_BASE_IDX 2
+#define mmDPG5_DPG_COLOUR_B_CB 0x1a1b
+#define mmDPG5_DPG_COLOUR_B_CB_BASE_IDX 2
+#define mmDPG5_DPG_OFFSET_SEGMENT 0x1a1c
+#define mmDPG5_DPG_OFFSET_SEGMENT_BASE_IDX 2
+#define mmDPG5_DPG_STATUS 0x1a1d
+#define mmDPG5_DPG_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_oppbuf5_dispdec
+// base address: 0x708
+#define mmOPPBUF5_OPPBUF_CONTROL 0x1a46
+#define mmOPPBUF5_OPPBUF_CONTROL_BASE_IDX 2
+#define mmOPPBUF5_OPPBUF_3D_PARAMETERS_0 0x1a47
+#define mmOPPBUF5_OPPBUF_3D_PARAMETERS_0_BASE_IDX 2
+#define mmOPPBUF5_OPPBUF_3D_PARAMETERS_1 0x1a48
+#define mmOPPBUF5_OPPBUF_3D_PARAMETERS_1_BASE_IDX 2
+#define mmOPPBUF5_OPPBUF_CONTROL1 0x1a4b
+#define mmOPPBUF5_OPPBUF_CONTROL1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_opp_pipe5_dispdec
+// base address: 0x708
+#define mmOPP_PIPE5_OPP_PIPE_CONTROL 0x1a4e
+#define mmOPP_PIPE5_OPP_PIPE_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_opp_pipe_crc5_dispdec
+// base address: 0x708
+#define mmOPP_PIPE_CRC5_OPP_PIPE_CRC_CONTROL 0x1a53
+#define mmOPP_PIPE_CRC5_OPP_PIPE_CRC_CONTROL_BASE_IDX 2
+#define mmOPP_PIPE_CRC5_OPP_PIPE_CRC_MASK 0x1a54
+#define mmOPP_PIPE_CRC5_OPP_PIPE_CRC_MASK_BASE_IDX 2
+#define mmOPP_PIPE_CRC5_OPP_PIPE_CRC_RESULT0 0x1a55
+#define mmOPP_PIPE_CRC5_OPP_PIPE_CRC_RESULT0_BASE_IDX 2
+#define mmOPP_PIPE_CRC5_OPP_PIPE_CRC_RESULT1 0x1a56
+#define mmOPP_PIPE_CRC5_OPP_PIPE_CRC_RESULT1_BASE_IDX 2
+#define mmOPP_PIPE_CRC5_OPP_PIPE_CRC_RESULT2 0x1a57
+#define mmOPP_PIPE_CRC5_OPP_PIPE_CRC_RESULT2_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_opp_top_dispdec
+// base address: 0x0
+#define mmOPP_TOP_CLK_CONTROL 0x1a5e
+#define mmOPP_TOP_CLK_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_dscrm0_dispdec
+// base address: 0x0
+#define mmDSCRM0_DSCRM_DSC_FORWARD_CONFIG 0x1a64
+#define mmDSCRM0_DSCRM_DSC_FORWARD_CONFIG_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_dscrm1_dispdec
+// base address: 0x4
+#define mmDSCRM1_DSCRM_DSC_FORWARD_CONFIG 0x1a65
+#define mmDSCRM1_DSCRM_DSC_FORWARD_CONFIG_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_dscrm2_dispdec
+// base address: 0x8
+#define mmDSCRM2_DSCRM_DSC_FORWARD_CONFIG 0x1a66
+#define mmDSCRM2_DSCRM_DSC_FORWARD_CONFIG_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_dscrm3_dispdec
+// base address: 0xc
+#define mmDSCRM3_DSCRM_DSC_FORWARD_CONFIG 0x1a67
+#define mmDSCRM3_DSCRM_DSC_FORWARD_CONFIG_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_dscrm4_dispdec
+// base address: 0x10
+#define mmDSCRM4_DSCRM_DSC_FORWARD_CONFIG 0x1a68
+#define mmDSCRM4_DSCRM_DSC_FORWARD_CONFIG_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_dscrm5_dispdec
+// base address: 0x14
+#define mmDSCRM5_DSCRM_DSC_FORWARD_CONFIG 0x1a69
+#define mmDSCRM5_DSCRM_DSC_FORWARD_CONFIG_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_opp_dcperfmon_dc_perfmon_dispdec
+// base address: 0x6af8
+#define mmDC_PERFMON16_PERFCOUNTER_CNTL 0x1abe
+#define mmDC_PERFMON16_PERFCOUNTER_CNTL_BASE_IDX 2
+#define mmDC_PERFMON16_PERFCOUNTER_CNTL2 0x1abf
+#define mmDC_PERFMON16_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define mmDC_PERFMON16_PERFCOUNTER_STATE 0x1ac0
+#define mmDC_PERFMON16_PERFCOUNTER_STATE_BASE_IDX 2
+#define mmDC_PERFMON16_PERFMON_CNTL 0x1ac1
+#define mmDC_PERFMON16_PERFMON_CNTL_BASE_IDX 2
+#define mmDC_PERFMON16_PERFMON_CNTL2 0x1ac2
+#define mmDC_PERFMON16_PERFMON_CNTL2_BASE_IDX 2
+#define mmDC_PERFMON16_PERFMON_CVALUE_INT_MISC 0x1ac3
+#define mmDC_PERFMON16_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define mmDC_PERFMON16_PERFMON_CVALUE_LOW 0x1ac4
+#define mmDC_PERFMON16_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define mmDC_PERFMON16_PERFMON_HI 0x1ac5
+#define mmDC_PERFMON16_PERFMON_HI_BASE_IDX 2
+#define mmDC_PERFMON16_PERFMON_LOW 0x1ac6
+#define mmDC_PERFMON16_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_optc_odm0_dispdec
+// base address: 0x0
+#define mmODM0_OPTC_INPUT_GLOBAL_CONTROL 0x1aca
+#define mmODM0_OPTC_INPUT_GLOBAL_CONTROL_BASE_IDX 2
+#define mmODM0_OPTC_DATA_SOURCE_SELECT 0x1acb
+#define mmODM0_OPTC_DATA_SOURCE_SELECT_BASE_IDX 2
+#define mmODM0_OPTC_DATA_FORMAT_CONTROL 0x1acc
+#define mmODM0_OPTC_DATA_FORMAT_CONTROL_BASE_IDX 2
+#define mmODM0_OPTC_BYTES_PER_PIXEL 0x1acd
+#define mmODM0_OPTC_BYTES_PER_PIXEL_BASE_IDX 2
+#define mmODM0_OPTC_WIDTH_CONTROL 0x1ace
+#define mmODM0_OPTC_WIDTH_CONTROL_BASE_IDX 2
+#define mmODM0_OPTC_INPUT_CLOCK_CONTROL 0x1acf
+#define mmODM0_OPTC_INPUT_CLOCK_CONTROL_BASE_IDX 2
+#define mmODM0_OPTC_MEMORY_CONFIG 0x1ad0
+#define mmODM0_OPTC_MEMORY_CONFIG_BASE_IDX 2
+#define mmODM0_OPTC_INPUT_SPARE_REGISTER 0x1ad1
+#define mmODM0_OPTC_INPUT_SPARE_REGISTER_BASE_IDX 2
+
+
+// addressBlock: dce_dc_optc_odm1_dispdec
+// base address: 0x40
+#define mmODM1_OPTC_INPUT_GLOBAL_CONTROL 0x1ada
+#define mmODM1_OPTC_INPUT_GLOBAL_CONTROL_BASE_IDX 2
+#define mmODM1_OPTC_DATA_SOURCE_SELECT 0x1adb
+#define mmODM1_OPTC_DATA_SOURCE_SELECT_BASE_IDX 2
+#define mmODM1_OPTC_DATA_FORMAT_CONTROL 0x1adc
+#define mmODM1_OPTC_DATA_FORMAT_CONTROL_BASE_IDX 2
+#define mmODM1_OPTC_BYTES_PER_PIXEL 0x1add
+#define mmODM1_OPTC_BYTES_PER_PIXEL_BASE_IDX 2
+#define mmODM1_OPTC_WIDTH_CONTROL 0x1ade
+#define mmODM1_OPTC_WIDTH_CONTROL_BASE_IDX 2
+#define mmODM1_OPTC_INPUT_CLOCK_CONTROL 0x1adf
+#define mmODM1_OPTC_INPUT_CLOCK_CONTROL_BASE_IDX 2
+#define mmODM1_OPTC_MEMORY_CONFIG 0x1ae0
+#define mmODM1_OPTC_MEMORY_CONFIG_BASE_IDX 2
+#define mmODM1_OPTC_INPUT_SPARE_REGISTER 0x1ae1
+#define mmODM1_OPTC_INPUT_SPARE_REGISTER_BASE_IDX 2
+
+
+// addressBlock: dce_dc_optc_odm2_dispdec
+// base address: 0x80
+#define mmODM2_OPTC_INPUT_GLOBAL_CONTROL 0x1aea
+#define mmODM2_OPTC_INPUT_GLOBAL_CONTROL_BASE_IDX 2
+#define mmODM2_OPTC_DATA_SOURCE_SELECT 0x1aeb
+#define mmODM2_OPTC_DATA_SOURCE_SELECT_BASE_IDX 2
+#define mmODM2_OPTC_DATA_FORMAT_CONTROL 0x1aec
+#define mmODM2_OPTC_DATA_FORMAT_CONTROL_BASE_IDX 2
+#define mmODM2_OPTC_BYTES_PER_PIXEL 0x1aed
+#define mmODM2_OPTC_BYTES_PER_PIXEL_BASE_IDX 2
+#define mmODM2_OPTC_WIDTH_CONTROL 0x1aee
+#define mmODM2_OPTC_WIDTH_CONTROL_BASE_IDX 2
+#define mmODM2_OPTC_INPUT_CLOCK_CONTROL 0x1aef
+#define mmODM2_OPTC_INPUT_CLOCK_CONTROL_BASE_IDX 2
+#define mmODM2_OPTC_MEMORY_CONFIG 0x1af0
+#define mmODM2_OPTC_MEMORY_CONFIG_BASE_IDX 2
+#define mmODM2_OPTC_INPUT_SPARE_REGISTER 0x1af1
+#define mmODM2_OPTC_INPUT_SPARE_REGISTER_BASE_IDX 2
+
+
+// addressBlock: dce_dc_optc_odm3_dispdec
+// base address: 0xc0
+#define mmODM3_OPTC_INPUT_GLOBAL_CONTROL 0x1afa
+#define mmODM3_OPTC_INPUT_GLOBAL_CONTROL_BASE_IDX 2
+#define mmODM3_OPTC_DATA_SOURCE_SELECT 0x1afb
+#define mmODM3_OPTC_DATA_SOURCE_SELECT_BASE_IDX 2
+#define mmODM3_OPTC_DATA_FORMAT_CONTROL 0x1afc
+#define mmODM3_OPTC_DATA_FORMAT_CONTROL_BASE_IDX 2
+#define mmODM3_OPTC_BYTES_PER_PIXEL 0x1afd
+#define mmODM3_OPTC_BYTES_PER_PIXEL_BASE_IDX 2
+#define mmODM3_OPTC_WIDTH_CONTROL 0x1afe
+#define mmODM3_OPTC_WIDTH_CONTROL_BASE_IDX 2
+#define mmODM3_OPTC_INPUT_CLOCK_CONTROL 0x1aff
+#define mmODM3_OPTC_INPUT_CLOCK_CONTROL_BASE_IDX 2
+#define mmODM3_OPTC_MEMORY_CONFIG 0x1b00
+#define mmODM3_OPTC_MEMORY_CONFIG_BASE_IDX 2
+#define mmODM3_OPTC_INPUT_SPARE_REGISTER 0x1b01
+#define mmODM3_OPTC_INPUT_SPARE_REGISTER_BASE_IDX 2
+
+
+// addressBlock: dce_dc_optc_odm4_dispdec
+// base address: 0x100
+#define mmODM4_OPTC_INPUT_GLOBAL_CONTROL 0x1b0a
+#define mmODM4_OPTC_INPUT_GLOBAL_CONTROL_BASE_IDX 2
+#define mmODM4_OPTC_DATA_SOURCE_SELECT 0x1b0b
+#define mmODM4_OPTC_DATA_SOURCE_SELECT_BASE_IDX 2
+#define mmODM4_OPTC_DATA_FORMAT_CONTROL 0x1b0c
+#define mmODM4_OPTC_DATA_FORMAT_CONTROL_BASE_IDX 2
+#define mmODM4_OPTC_BYTES_PER_PIXEL 0x1b0d
+#define mmODM4_OPTC_BYTES_PER_PIXEL_BASE_IDX 2
+#define mmODM4_OPTC_WIDTH_CONTROL 0x1b0e
+#define mmODM4_OPTC_WIDTH_CONTROL_BASE_IDX 2
+#define mmODM4_OPTC_INPUT_CLOCK_CONTROL 0x1b0f
+#define mmODM4_OPTC_INPUT_CLOCK_CONTROL_BASE_IDX 2
+#define mmODM4_OPTC_MEMORY_CONFIG 0x1b10
+#define mmODM4_OPTC_MEMORY_CONFIG_BASE_IDX 2
+#define mmODM4_OPTC_INPUT_SPARE_REGISTER 0x1b11
+#define mmODM4_OPTC_INPUT_SPARE_REGISTER_BASE_IDX 2
+
+
+// addressBlock: dce_dc_optc_odm5_dispdec
+// base address: 0x140
+#define mmODM5_OPTC_INPUT_GLOBAL_CONTROL 0x1b1a
+#define mmODM5_OPTC_INPUT_GLOBAL_CONTROL_BASE_IDX 2
+#define mmODM5_OPTC_DATA_SOURCE_SELECT 0x1b1b
+#define mmODM5_OPTC_DATA_SOURCE_SELECT_BASE_IDX 2
+#define mmODM5_OPTC_DATA_FORMAT_CONTROL 0x1b1c
+#define mmODM5_OPTC_DATA_FORMAT_CONTROL_BASE_IDX 2
+#define mmODM5_OPTC_BYTES_PER_PIXEL 0x1b1d
+#define mmODM5_OPTC_BYTES_PER_PIXEL_BASE_IDX 2
+#define mmODM5_OPTC_WIDTH_CONTROL 0x1b1e
+#define mmODM5_OPTC_WIDTH_CONTROL_BASE_IDX 2
+#define mmODM5_OPTC_INPUT_CLOCK_CONTROL 0x1b1f
+#define mmODM5_OPTC_INPUT_CLOCK_CONTROL_BASE_IDX 2
+#define mmODM5_OPTC_MEMORY_CONFIG 0x1b20
+#define mmODM5_OPTC_MEMORY_CONFIG_BASE_IDX 2
+#define mmODM5_OPTC_INPUT_SPARE_REGISTER 0x1b21
+#define mmODM5_OPTC_INPUT_SPARE_REGISTER_BASE_IDX 2
+
+
+// addressBlock: dce_dc_optc_otg0_dispdec
+// base address: 0x0
+#define mmOTG0_OTG_H_TOTAL 0x1b2a
+#define mmOTG0_OTG_H_TOTAL_BASE_IDX 2
+#define mmOTG0_OTG_H_BLANK_START_END 0x1b2b
+#define mmOTG0_OTG_H_BLANK_START_END_BASE_IDX 2
+#define mmOTG0_OTG_H_SYNC_A 0x1b2c
+#define mmOTG0_OTG_H_SYNC_A_BASE_IDX 2
+#define mmOTG0_OTG_H_SYNC_A_CNTL 0x1b2d
+#define mmOTG0_OTG_H_SYNC_A_CNTL_BASE_IDX 2
+#define mmOTG0_OTG_H_TIMING_CNTL 0x1b2e
+#define mmOTG0_OTG_H_TIMING_CNTL_BASE_IDX 2
+#define mmOTG0_OTG_V_TOTAL 0x1b2f
+#define mmOTG0_OTG_V_TOTAL_BASE_IDX 2
+#define mmOTG0_OTG_V_TOTAL_MIN 0x1b30
+#define mmOTG0_OTG_V_TOTAL_MIN_BASE_IDX 2
+#define mmOTG0_OTG_V_TOTAL_MAX 0x1b31
+#define mmOTG0_OTG_V_TOTAL_MAX_BASE_IDX 2
+#define mmOTG0_OTG_V_TOTAL_MID 0x1b32
+#define mmOTG0_OTG_V_TOTAL_MID_BASE_IDX 2
+#define mmOTG0_OTG_V_TOTAL_CONTROL 0x1b33
+#define mmOTG0_OTG_V_TOTAL_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_V_TOTAL_INT_STATUS 0x1b34
+#define mmOTG0_OTG_V_TOTAL_INT_STATUS_BASE_IDX 2
+#define mmOTG0_OTG_VSYNC_NOM_INT_STATUS 0x1b35
+#define mmOTG0_OTG_VSYNC_NOM_INT_STATUS_BASE_IDX 2
+#define mmOTG0_OTG_V_BLANK_START_END 0x1b36
+#define mmOTG0_OTG_V_BLANK_START_END_BASE_IDX 2
+#define mmOTG0_OTG_V_SYNC_A 0x1b37
+#define mmOTG0_OTG_V_SYNC_A_BASE_IDX 2
+#define mmOTG0_OTG_V_SYNC_A_CNTL 0x1b38
+#define mmOTG0_OTG_V_SYNC_A_CNTL_BASE_IDX 2
+#define mmOTG0_OTG_TRIGA_CNTL 0x1b39
+#define mmOTG0_OTG_TRIGA_CNTL_BASE_IDX 2
+#define mmOTG0_OTG_TRIGA_MANUAL_TRIG 0x1b3a
+#define mmOTG0_OTG_TRIGA_MANUAL_TRIG_BASE_IDX 2
+#define mmOTG0_OTG_TRIGB_CNTL 0x1b3b
+#define mmOTG0_OTG_TRIGB_CNTL_BASE_IDX 2
+#define mmOTG0_OTG_TRIGB_MANUAL_TRIG 0x1b3c
+#define mmOTG0_OTG_TRIGB_MANUAL_TRIG_BASE_IDX 2
+#define mmOTG0_OTG_FORCE_COUNT_NOW_CNTL 0x1b3d
+#define mmOTG0_OTG_FORCE_COUNT_NOW_CNTL_BASE_IDX 2
+#define mmOTG0_OTG_FLOW_CONTROL 0x1b3e
+#define mmOTG0_OTG_FLOW_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_STEREO_FORCE_NEXT_EYE 0x1b3f
+#define mmOTG0_OTG_STEREO_FORCE_NEXT_EYE_BASE_IDX 2
+#define mmOTG0_OTG_CONTROL 0x1b41
+#define mmOTG0_OTG_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_BLANK_CONTROL 0x1b42
+#define mmOTG0_OTG_BLANK_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_PIPE_ABORT_CONTROL 0x1b43
+#define mmOTG0_OTG_PIPE_ABORT_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_INTERLACE_CONTROL 0x1b44
+#define mmOTG0_OTG_INTERLACE_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_INTERLACE_STATUS 0x1b45
+#define mmOTG0_OTG_INTERLACE_STATUS_BASE_IDX 2
+#define mmOTG0_OTG_PIXEL_DATA_READBACK0 0x1b47
+#define mmOTG0_OTG_PIXEL_DATA_READBACK0_BASE_IDX 2
+#define mmOTG0_OTG_PIXEL_DATA_READBACK1 0x1b48
+#define mmOTG0_OTG_PIXEL_DATA_READBACK1_BASE_IDX 2
+#define mmOTG0_OTG_STATUS 0x1b49
+#define mmOTG0_OTG_STATUS_BASE_IDX 2
+#define mmOTG0_OTG_STATUS_POSITION 0x1b4a
+#define mmOTG0_OTG_STATUS_POSITION_BASE_IDX 2
+#define mmOTG0_OTG_NOM_VERT_POSITION 0x1b4b
+#define mmOTG0_OTG_NOM_VERT_POSITION_BASE_IDX 2
+#define mmOTG0_OTG_STATUS_FRAME_COUNT 0x1b4c
+#define mmOTG0_OTG_STATUS_FRAME_COUNT_BASE_IDX 2
+#define mmOTG0_OTG_STATUS_VF_COUNT 0x1b4d
+#define mmOTG0_OTG_STATUS_VF_COUNT_BASE_IDX 2
+#define mmOTG0_OTG_STATUS_HV_COUNT 0x1b4e
+#define mmOTG0_OTG_STATUS_HV_COUNT_BASE_IDX 2
+#define mmOTG0_OTG_COUNT_CONTROL 0x1b4f
+#define mmOTG0_OTG_COUNT_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_COUNT_RESET 0x1b50
+#define mmOTG0_OTG_COUNT_RESET_BASE_IDX 2
+#define mmOTG0_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE 0x1b51
+#define mmOTG0_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE_BASE_IDX 2
+#define mmOTG0_OTG_VERT_SYNC_CONTROL 0x1b52
+#define mmOTG0_OTG_VERT_SYNC_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_STEREO_STATUS 0x1b53
+#define mmOTG0_OTG_STEREO_STATUS_BASE_IDX 2
+#define mmOTG0_OTG_STEREO_CONTROL 0x1b54
+#define mmOTG0_OTG_STEREO_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_SNAPSHOT_STATUS 0x1b55
+#define mmOTG0_OTG_SNAPSHOT_STATUS_BASE_IDX 2
+#define mmOTG0_OTG_SNAPSHOT_CONTROL 0x1b56
+#define mmOTG0_OTG_SNAPSHOT_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_SNAPSHOT_POSITION 0x1b57
+#define mmOTG0_OTG_SNAPSHOT_POSITION_BASE_IDX 2
+#define mmOTG0_OTG_SNAPSHOT_FRAME 0x1b58
+#define mmOTG0_OTG_SNAPSHOT_FRAME_BASE_IDX 2
+#define mmOTG0_OTG_INTERRUPT_CONTROL 0x1b59
+#define mmOTG0_OTG_INTERRUPT_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_UPDATE_LOCK 0x1b5a
+#define mmOTG0_OTG_UPDATE_LOCK_BASE_IDX 2
+#define mmOTG0_OTG_DOUBLE_BUFFER_CONTROL 0x1b5b
+#define mmOTG0_OTG_DOUBLE_BUFFER_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_MASTER_EN 0x1b5c
+#define mmOTG0_OTG_MASTER_EN_BASE_IDX 2
+#define mmOTG0_OTG_BLANK_DATA_COLOR 0x1b5e
+#define mmOTG0_OTG_BLANK_DATA_COLOR_BASE_IDX 2
+#define mmOTG0_OTG_BLANK_DATA_COLOR_EXT 0x1b5f
+#define mmOTG0_OTG_BLANK_DATA_COLOR_EXT_BASE_IDX 2
+#define mmOTG0_OTG_BLACK_COLOR 0x1b60
+#define mmOTG0_OTG_BLACK_COLOR_BASE_IDX 2
+#define mmOTG0_OTG_BLACK_COLOR_EXT 0x1b61
+#define mmOTG0_OTG_BLACK_COLOR_EXT_BASE_IDX 2
+#define mmOTG0_OTG_VERTICAL_INTERRUPT0_POSITION 0x1b62
+#define mmOTG0_OTG_VERTICAL_INTERRUPT0_POSITION_BASE_IDX 2
+#define mmOTG0_OTG_VERTICAL_INTERRUPT0_CONTROL 0x1b63
+#define mmOTG0_OTG_VERTICAL_INTERRUPT0_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_VERTICAL_INTERRUPT1_POSITION 0x1b64
+#define mmOTG0_OTG_VERTICAL_INTERRUPT1_POSITION_BASE_IDX 2
+#define mmOTG0_OTG_VERTICAL_INTERRUPT1_CONTROL 0x1b65
+#define mmOTG0_OTG_VERTICAL_INTERRUPT1_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_VERTICAL_INTERRUPT2_POSITION 0x1b66
+#define mmOTG0_OTG_VERTICAL_INTERRUPT2_POSITION_BASE_IDX 2
+#define mmOTG0_OTG_VERTICAL_INTERRUPT2_CONTROL 0x1b67
+#define mmOTG0_OTG_VERTICAL_INTERRUPT2_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_CRC_CNTL 0x1b68
+#define mmOTG0_OTG_CRC_CNTL_BASE_IDX 2
+#define mmOTG0_OTG_CRC_CNTL2 0x1b69
+#define mmOTG0_OTG_CRC_CNTL2_BASE_IDX 2
+#define mmOTG0_OTG_CRC0_WINDOWA_X_CONTROL 0x1b6a
+#define mmOTG0_OTG_CRC0_WINDOWA_X_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_CRC0_WINDOWA_Y_CONTROL 0x1b6b
+#define mmOTG0_OTG_CRC0_WINDOWA_Y_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_CRC0_WINDOWB_X_CONTROL 0x1b6c
+#define mmOTG0_OTG_CRC0_WINDOWB_X_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_CRC0_WINDOWB_Y_CONTROL 0x1b6d
+#define mmOTG0_OTG_CRC0_WINDOWB_Y_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_CRC0_DATA_RG 0x1b6e
+#define mmOTG0_OTG_CRC0_DATA_RG_BASE_IDX 2
+#define mmOTG0_OTG_CRC0_DATA_B 0x1b6f
+#define mmOTG0_OTG_CRC0_DATA_B_BASE_IDX 2
+#define mmOTG0_OTG_CRC1_WINDOWA_X_CONTROL 0x1b70
+#define mmOTG0_OTG_CRC1_WINDOWA_X_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_CRC1_WINDOWA_Y_CONTROL 0x1b71
+#define mmOTG0_OTG_CRC1_WINDOWA_Y_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_CRC1_WINDOWB_X_CONTROL 0x1b72
+#define mmOTG0_OTG_CRC1_WINDOWB_X_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_CRC1_WINDOWB_Y_CONTROL 0x1b73
+#define mmOTG0_OTG_CRC1_WINDOWB_Y_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_CRC1_DATA_RG 0x1b74
+#define mmOTG0_OTG_CRC1_DATA_RG_BASE_IDX 2
+#define mmOTG0_OTG_CRC1_DATA_B 0x1b75
+#define mmOTG0_OTG_CRC1_DATA_B_BASE_IDX 2
+#define mmOTG0_OTG_CRC2_DATA_RG 0x1b76
+#define mmOTG0_OTG_CRC2_DATA_RG_BASE_IDX 2
+#define mmOTG0_OTG_CRC2_DATA_B 0x1b77
+#define mmOTG0_OTG_CRC2_DATA_B_BASE_IDX 2
+#define mmOTG0_OTG_CRC3_DATA_RG 0x1b78
+#define mmOTG0_OTG_CRC3_DATA_RG_BASE_IDX 2
+#define mmOTG0_OTG_CRC3_DATA_B 0x1b79
+#define mmOTG0_OTG_CRC3_DATA_B_BASE_IDX 2
+#define mmOTG0_OTG_CRC_SIG_RED_GREEN_MASK 0x1b7a
+#define mmOTG0_OTG_CRC_SIG_RED_GREEN_MASK_BASE_IDX 2
+#define mmOTG0_OTG_CRC_SIG_BLUE_CONTROL_MASK 0x1b7b
+#define mmOTG0_OTG_CRC_SIG_BLUE_CONTROL_MASK_BASE_IDX 2
+#define mmOTG0_OTG_STATIC_SCREEN_CONTROL 0x1b82
+#define mmOTG0_OTG_STATIC_SCREEN_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_3D_STRUCTURE_CONTROL 0x1b83
+#define mmOTG0_OTG_3D_STRUCTURE_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_GSL_VSYNC_GAP 0x1b84
+#define mmOTG0_OTG_GSL_VSYNC_GAP_BASE_IDX 2
+#define mmOTG0_OTG_MASTER_UPDATE_MODE 0x1b85
+#define mmOTG0_OTG_MASTER_UPDATE_MODE_BASE_IDX 2
+#define mmOTG0_OTG_CLOCK_CONTROL 0x1b86
+#define mmOTG0_OTG_CLOCK_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_VSTARTUP_PARAM 0x1b87
+#define mmOTG0_OTG_VSTARTUP_PARAM_BASE_IDX 2
+#define mmOTG0_OTG_VUPDATE_PARAM 0x1b88
+#define mmOTG0_OTG_VUPDATE_PARAM_BASE_IDX 2
+#define mmOTG0_OTG_VREADY_PARAM 0x1b89
+#define mmOTG0_OTG_VREADY_PARAM_BASE_IDX 2
+#define mmOTG0_OTG_GLOBAL_SYNC_STATUS 0x1b8a
+#define mmOTG0_OTG_GLOBAL_SYNC_STATUS_BASE_IDX 2
+#define mmOTG0_OTG_MASTER_UPDATE_LOCK 0x1b8b
+#define mmOTG0_OTG_MASTER_UPDATE_LOCK_BASE_IDX 2
+#define mmOTG0_OTG_GSL_CONTROL 0x1b8c
+#define mmOTG0_OTG_GSL_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_GSL_WINDOW_X 0x1b8d
+#define mmOTG0_OTG_GSL_WINDOW_X_BASE_IDX 2
+#define mmOTG0_OTG_GSL_WINDOW_Y 0x1b8e
+#define mmOTG0_OTG_GSL_WINDOW_Y_BASE_IDX 2
+#define mmOTG0_OTG_VUPDATE_KEEPOUT 0x1b8f
+#define mmOTG0_OTG_VUPDATE_KEEPOUT_BASE_IDX 2
+#define mmOTG0_OTG_GLOBAL_CONTROL0 0x1b90
+#define mmOTG0_OTG_GLOBAL_CONTROL0_BASE_IDX 2
+#define mmOTG0_OTG_GLOBAL_CONTROL1 0x1b91
+#define mmOTG0_OTG_GLOBAL_CONTROL1_BASE_IDX 2
+#define mmOTG0_OTG_GLOBAL_CONTROL2 0x1b92
+#define mmOTG0_OTG_GLOBAL_CONTROL2_BASE_IDX 2
+#define mmOTG0_OTG_GLOBAL_CONTROL3 0x1b93
+#define mmOTG0_OTG_GLOBAL_CONTROL3_BASE_IDX 2
+#define mmOTG0_OTG_TRIG_MANUAL_CONTROL 0x1b94
+#define mmOTG0_OTG_TRIG_MANUAL_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_MANUAL_FLOW_CONTROL 0x1b95
+#define mmOTG0_OTG_MANUAL_FLOW_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_RANGE_TIMING_INT_STATUS 0x1b96
+#define mmOTG0_OTG_RANGE_TIMING_INT_STATUS_BASE_IDX 2
+#define mmOTG0_OTG_DRR_CONTROL 0x1b97
+#define mmOTG0_OTG_DRR_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_REQUEST_CONTROL 0x1b98
+#define mmOTG0_OTG_REQUEST_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_DSC_START_POSITION 0x1b99
+#define mmOTG0_OTG_DSC_START_POSITION_BASE_IDX 2
+#define mmOTG0_OTG_PIPE_UPDATE_STATUS 0x1b9a
+#define mmOTG0_OTG_PIPE_UPDATE_STATUS_BASE_IDX 2
+#define mmOTG0_OTG_SPARE_REGISTER 0x1b9c
+#define mmOTG0_OTG_SPARE_REGISTER_BASE_IDX 2
+
+
+// addressBlock: dce_dc_optc_otg1_dispdec
+// base address: 0x200
+#define mmOTG1_OTG_H_TOTAL 0x1baa
+#define mmOTG1_OTG_H_TOTAL_BASE_IDX 2
+#define mmOTG1_OTG_H_BLANK_START_END 0x1bab
+#define mmOTG1_OTG_H_BLANK_START_END_BASE_IDX 2
+#define mmOTG1_OTG_H_SYNC_A 0x1bac
+#define mmOTG1_OTG_H_SYNC_A_BASE_IDX 2
+#define mmOTG1_OTG_H_SYNC_A_CNTL 0x1bad
+#define mmOTG1_OTG_H_SYNC_A_CNTL_BASE_IDX 2
+#define mmOTG1_OTG_H_TIMING_CNTL 0x1bae
+#define mmOTG1_OTG_H_TIMING_CNTL_BASE_IDX 2
+#define mmOTG1_OTG_V_TOTAL 0x1baf
+#define mmOTG1_OTG_V_TOTAL_BASE_IDX 2
+#define mmOTG1_OTG_V_TOTAL_MIN 0x1bb0
+#define mmOTG1_OTG_V_TOTAL_MIN_BASE_IDX 2
+#define mmOTG1_OTG_V_TOTAL_MAX 0x1bb1
+#define mmOTG1_OTG_V_TOTAL_MAX_BASE_IDX 2
+#define mmOTG1_OTG_V_TOTAL_MID 0x1bb2
+#define mmOTG1_OTG_V_TOTAL_MID_BASE_IDX 2
+#define mmOTG1_OTG_V_TOTAL_CONTROL 0x1bb3
+#define mmOTG1_OTG_V_TOTAL_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_V_TOTAL_INT_STATUS 0x1bb4
+#define mmOTG1_OTG_V_TOTAL_INT_STATUS_BASE_IDX 2
+#define mmOTG1_OTG_VSYNC_NOM_INT_STATUS 0x1bb5
+#define mmOTG1_OTG_VSYNC_NOM_INT_STATUS_BASE_IDX 2
+#define mmOTG1_OTG_V_BLANK_START_END 0x1bb6
+#define mmOTG1_OTG_V_BLANK_START_END_BASE_IDX 2
+#define mmOTG1_OTG_V_SYNC_A 0x1bb7
+#define mmOTG1_OTG_V_SYNC_A_BASE_IDX 2
+#define mmOTG1_OTG_V_SYNC_A_CNTL 0x1bb8
+#define mmOTG1_OTG_V_SYNC_A_CNTL_BASE_IDX 2
+#define mmOTG1_OTG_TRIGA_CNTL 0x1bb9
+#define mmOTG1_OTG_TRIGA_CNTL_BASE_IDX 2
+#define mmOTG1_OTG_TRIGA_MANUAL_TRIG 0x1bba
+#define mmOTG1_OTG_TRIGA_MANUAL_TRIG_BASE_IDX 2
+#define mmOTG1_OTG_TRIGB_CNTL 0x1bbb
+#define mmOTG1_OTG_TRIGB_CNTL_BASE_IDX 2
+#define mmOTG1_OTG_TRIGB_MANUAL_TRIG 0x1bbc
+#define mmOTG1_OTG_TRIGB_MANUAL_TRIG_BASE_IDX 2
+#define mmOTG1_OTG_FORCE_COUNT_NOW_CNTL 0x1bbd
+#define mmOTG1_OTG_FORCE_COUNT_NOW_CNTL_BASE_IDX 2
+#define mmOTG1_OTG_FLOW_CONTROL 0x1bbe
+#define mmOTG1_OTG_FLOW_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_STEREO_FORCE_NEXT_EYE 0x1bbf
+#define mmOTG1_OTG_STEREO_FORCE_NEXT_EYE_BASE_IDX 2
+#define mmOTG1_OTG_CONTROL 0x1bc1
+#define mmOTG1_OTG_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_BLANK_CONTROL 0x1bc2
+#define mmOTG1_OTG_BLANK_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_PIPE_ABORT_CONTROL 0x1bc3
+#define mmOTG1_OTG_PIPE_ABORT_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_INTERLACE_CONTROL 0x1bc4
+#define mmOTG1_OTG_INTERLACE_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_INTERLACE_STATUS 0x1bc5
+#define mmOTG1_OTG_INTERLACE_STATUS_BASE_IDX 2
+#define mmOTG1_OTG_PIXEL_DATA_READBACK0 0x1bc7
+#define mmOTG1_OTG_PIXEL_DATA_READBACK0_BASE_IDX 2
+#define mmOTG1_OTG_PIXEL_DATA_READBACK1 0x1bc8
+#define mmOTG1_OTG_PIXEL_DATA_READBACK1_BASE_IDX 2
+#define mmOTG1_OTG_STATUS 0x1bc9
+#define mmOTG1_OTG_STATUS_BASE_IDX 2
+#define mmOTG1_OTG_STATUS_POSITION 0x1bca
+#define mmOTG1_OTG_STATUS_POSITION_BASE_IDX 2
+#define mmOTG1_OTG_NOM_VERT_POSITION 0x1bcb
+#define mmOTG1_OTG_NOM_VERT_POSITION_BASE_IDX 2
+#define mmOTG1_OTG_STATUS_FRAME_COUNT 0x1bcc
+#define mmOTG1_OTG_STATUS_FRAME_COUNT_BASE_IDX 2
+#define mmOTG1_OTG_STATUS_VF_COUNT 0x1bcd
+#define mmOTG1_OTG_STATUS_VF_COUNT_BASE_IDX 2
+#define mmOTG1_OTG_STATUS_HV_COUNT 0x1bce
+#define mmOTG1_OTG_STATUS_HV_COUNT_BASE_IDX 2
+#define mmOTG1_OTG_COUNT_CONTROL 0x1bcf
+#define mmOTG1_OTG_COUNT_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_COUNT_RESET 0x1bd0
+#define mmOTG1_OTG_COUNT_RESET_BASE_IDX 2
+#define mmOTG1_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE 0x1bd1
+#define mmOTG1_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE_BASE_IDX 2
+#define mmOTG1_OTG_VERT_SYNC_CONTROL 0x1bd2
+#define mmOTG1_OTG_VERT_SYNC_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_STEREO_STATUS 0x1bd3
+#define mmOTG1_OTG_STEREO_STATUS_BASE_IDX 2
+#define mmOTG1_OTG_STEREO_CONTROL 0x1bd4
+#define mmOTG1_OTG_STEREO_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_SNAPSHOT_STATUS 0x1bd5
+#define mmOTG1_OTG_SNAPSHOT_STATUS_BASE_IDX 2
+#define mmOTG1_OTG_SNAPSHOT_CONTROL 0x1bd6
+#define mmOTG1_OTG_SNAPSHOT_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_SNAPSHOT_POSITION 0x1bd7
+#define mmOTG1_OTG_SNAPSHOT_POSITION_BASE_IDX 2
+#define mmOTG1_OTG_SNAPSHOT_FRAME 0x1bd8
+#define mmOTG1_OTG_SNAPSHOT_FRAME_BASE_IDX 2
+#define mmOTG1_OTG_INTERRUPT_CONTROL 0x1bd9
+#define mmOTG1_OTG_INTERRUPT_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_UPDATE_LOCK 0x1bda
+#define mmOTG1_OTG_UPDATE_LOCK_BASE_IDX 2
+#define mmOTG1_OTG_DOUBLE_BUFFER_CONTROL 0x1bdb
+#define mmOTG1_OTG_DOUBLE_BUFFER_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_MASTER_EN 0x1bdc
+#define mmOTG1_OTG_MASTER_EN_BASE_IDX 2
+#define mmOTG1_OTG_BLANK_DATA_COLOR 0x1bde
+#define mmOTG1_OTG_BLANK_DATA_COLOR_BASE_IDX 2
+#define mmOTG1_OTG_BLANK_DATA_COLOR_EXT 0x1bdf
+#define mmOTG1_OTG_BLANK_DATA_COLOR_EXT_BASE_IDX 2
+#define mmOTG1_OTG_BLACK_COLOR 0x1be0
+#define mmOTG1_OTG_BLACK_COLOR_BASE_IDX 2
+#define mmOTG1_OTG_BLACK_COLOR_EXT 0x1be1
+#define mmOTG1_OTG_BLACK_COLOR_EXT_BASE_IDX 2
+#define mmOTG1_OTG_VERTICAL_INTERRUPT0_POSITION 0x1be2
+#define mmOTG1_OTG_VERTICAL_INTERRUPT0_POSITION_BASE_IDX 2
+#define mmOTG1_OTG_VERTICAL_INTERRUPT0_CONTROL 0x1be3
+#define mmOTG1_OTG_VERTICAL_INTERRUPT0_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_VERTICAL_INTERRUPT1_POSITION 0x1be4
+#define mmOTG1_OTG_VERTICAL_INTERRUPT1_POSITION_BASE_IDX 2
+#define mmOTG1_OTG_VERTICAL_INTERRUPT1_CONTROL 0x1be5
+#define mmOTG1_OTG_VERTICAL_INTERRUPT1_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_VERTICAL_INTERRUPT2_POSITION 0x1be6
+#define mmOTG1_OTG_VERTICAL_INTERRUPT2_POSITION_BASE_IDX 2
+#define mmOTG1_OTG_VERTICAL_INTERRUPT2_CONTROL 0x1be7
+#define mmOTG1_OTG_VERTICAL_INTERRUPT2_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_CRC_CNTL 0x1be8
+#define mmOTG1_OTG_CRC_CNTL_BASE_IDX 2
+#define mmOTG1_OTG_CRC_CNTL2 0x1be9
+#define mmOTG1_OTG_CRC_CNTL2_BASE_IDX 2
+#define mmOTG1_OTG_CRC0_WINDOWA_X_CONTROL 0x1bea
+#define mmOTG1_OTG_CRC0_WINDOWA_X_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_CRC0_WINDOWA_Y_CONTROL 0x1beb
+#define mmOTG1_OTG_CRC0_WINDOWA_Y_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_CRC0_WINDOWB_X_CONTROL 0x1bec
+#define mmOTG1_OTG_CRC0_WINDOWB_X_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_CRC0_WINDOWB_Y_CONTROL 0x1bed
+#define mmOTG1_OTG_CRC0_WINDOWB_Y_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_CRC0_DATA_RG 0x1bee
+#define mmOTG1_OTG_CRC0_DATA_RG_BASE_IDX 2
+#define mmOTG1_OTG_CRC0_DATA_B 0x1bef
+#define mmOTG1_OTG_CRC0_DATA_B_BASE_IDX 2
+#define mmOTG1_OTG_CRC1_WINDOWA_X_CONTROL 0x1bf0
+#define mmOTG1_OTG_CRC1_WINDOWA_X_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_CRC1_WINDOWA_Y_CONTROL 0x1bf1
+#define mmOTG1_OTG_CRC1_WINDOWA_Y_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_CRC1_WINDOWB_X_CONTROL 0x1bf2
+#define mmOTG1_OTG_CRC1_WINDOWB_X_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_CRC1_WINDOWB_Y_CONTROL 0x1bf3
+#define mmOTG1_OTG_CRC1_WINDOWB_Y_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_CRC1_DATA_RG 0x1bf4
+#define mmOTG1_OTG_CRC1_DATA_RG_BASE_IDX 2
+#define mmOTG1_OTG_CRC1_DATA_B 0x1bf5
+#define mmOTG1_OTG_CRC1_DATA_B_BASE_IDX 2
+#define mmOTG1_OTG_CRC2_DATA_RG 0x1bf6
+#define mmOTG1_OTG_CRC2_DATA_RG_BASE_IDX 2
+#define mmOTG1_OTG_CRC2_DATA_B 0x1bf7
+#define mmOTG1_OTG_CRC2_DATA_B_BASE_IDX 2
+#define mmOTG1_OTG_CRC3_DATA_RG 0x1bf8
+#define mmOTG1_OTG_CRC3_DATA_RG_BASE_IDX 2
+#define mmOTG1_OTG_CRC3_DATA_B 0x1bf9
+#define mmOTG1_OTG_CRC3_DATA_B_BASE_IDX 2
+#define mmOTG1_OTG_CRC_SIG_RED_GREEN_MASK 0x1bfa
+#define mmOTG1_OTG_CRC_SIG_RED_GREEN_MASK_BASE_IDX 2
+#define mmOTG1_OTG_CRC_SIG_BLUE_CONTROL_MASK 0x1bfb
+#define mmOTG1_OTG_CRC_SIG_BLUE_CONTROL_MASK_BASE_IDX 2
+#define mmOTG1_OTG_STATIC_SCREEN_CONTROL 0x1c02
+#define mmOTG1_OTG_STATIC_SCREEN_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_3D_STRUCTURE_CONTROL 0x1c03
+#define mmOTG1_OTG_3D_STRUCTURE_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_GSL_VSYNC_GAP 0x1c04
+#define mmOTG1_OTG_GSL_VSYNC_GAP_BASE_IDX 2
+#define mmOTG1_OTG_MASTER_UPDATE_MODE 0x1c05
+#define mmOTG1_OTG_MASTER_UPDATE_MODE_BASE_IDX 2
+#define mmOTG1_OTG_CLOCK_CONTROL 0x1c06
+#define mmOTG1_OTG_CLOCK_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_VSTARTUP_PARAM 0x1c07
+#define mmOTG1_OTG_VSTARTUP_PARAM_BASE_IDX 2
+#define mmOTG1_OTG_VUPDATE_PARAM 0x1c08
+#define mmOTG1_OTG_VUPDATE_PARAM_BASE_IDX 2
+#define mmOTG1_OTG_VREADY_PARAM 0x1c09
+#define mmOTG1_OTG_VREADY_PARAM_BASE_IDX 2
+#define mmOTG1_OTG_GLOBAL_SYNC_STATUS 0x1c0a
+#define mmOTG1_OTG_GLOBAL_SYNC_STATUS_BASE_IDX 2
+#define mmOTG1_OTG_MASTER_UPDATE_LOCK 0x1c0b
+#define mmOTG1_OTG_MASTER_UPDATE_LOCK_BASE_IDX 2
+#define mmOTG1_OTG_GSL_CONTROL 0x1c0c
+#define mmOTG1_OTG_GSL_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_GSL_WINDOW_X 0x1c0d
+#define mmOTG1_OTG_GSL_WINDOW_X_BASE_IDX 2
+#define mmOTG1_OTG_GSL_WINDOW_Y 0x1c0e
+#define mmOTG1_OTG_GSL_WINDOW_Y_BASE_IDX 2
+#define mmOTG1_OTG_VUPDATE_KEEPOUT 0x1c0f
+#define mmOTG1_OTG_VUPDATE_KEEPOUT_BASE_IDX 2
+#define mmOTG1_OTG_GLOBAL_CONTROL0 0x1c10
+#define mmOTG1_OTG_GLOBAL_CONTROL0_BASE_IDX 2
+#define mmOTG1_OTG_GLOBAL_CONTROL1 0x1c11
+#define mmOTG1_OTG_GLOBAL_CONTROL1_BASE_IDX 2
+#define mmOTG1_OTG_GLOBAL_CONTROL2 0x1c12
+#define mmOTG1_OTG_GLOBAL_CONTROL2_BASE_IDX 2
+#define mmOTG1_OTG_GLOBAL_CONTROL3 0x1c13
+#define mmOTG1_OTG_GLOBAL_CONTROL3_BASE_IDX 2
+#define mmOTG1_OTG_TRIG_MANUAL_CONTROL 0x1c14
+#define mmOTG1_OTG_TRIG_MANUAL_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_MANUAL_FLOW_CONTROL 0x1c15
+#define mmOTG1_OTG_MANUAL_FLOW_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_RANGE_TIMING_INT_STATUS 0x1c16
+#define mmOTG1_OTG_RANGE_TIMING_INT_STATUS_BASE_IDX 2
+#define mmOTG1_OTG_DRR_CONTROL 0x1c17
+#define mmOTG1_OTG_DRR_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_REQUEST_CONTROL 0x1c18
+#define mmOTG1_OTG_REQUEST_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_DSC_START_POSITION 0x1c19
+#define mmOTG1_OTG_DSC_START_POSITION_BASE_IDX 2
+#define mmOTG1_OTG_PIPE_UPDATE_STATUS 0x1c1a
+#define mmOTG1_OTG_PIPE_UPDATE_STATUS_BASE_IDX 2
+#define mmOTG1_OTG_SPARE_REGISTER 0x1c1c
+#define mmOTG1_OTG_SPARE_REGISTER_BASE_IDX 2
+
+
+// addressBlock: dce_dc_optc_otg2_dispdec
+// base address: 0x400
+#define mmOTG2_OTG_H_TOTAL 0x1c2a
+#define mmOTG2_OTG_H_TOTAL_BASE_IDX 2
+#define mmOTG2_OTG_H_BLANK_START_END 0x1c2b
+#define mmOTG2_OTG_H_BLANK_START_END_BASE_IDX 2
+#define mmOTG2_OTG_H_SYNC_A 0x1c2c
+#define mmOTG2_OTG_H_SYNC_A_BASE_IDX 2
+#define mmOTG2_OTG_H_SYNC_A_CNTL 0x1c2d
+#define mmOTG2_OTG_H_SYNC_A_CNTL_BASE_IDX 2
+#define mmOTG2_OTG_H_TIMING_CNTL 0x1c2e
+#define mmOTG2_OTG_H_TIMING_CNTL_BASE_IDX 2
+#define mmOTG2_OTG_V_TOTAL 0x1c2f
+#define mmOTG2_OTG_V_TOTAL_BASE_IDX 2
+#define mmOTG2_OTG_V_TOTAL_MIN 0x1c30
+#define mmOTG2_OTG_V_TOTAL_MIN_BASE_IDX 2
+#define mmOTG2_OTG_V_TOTAL_MAX 0x1c31
+#define mmOTG2_OTG_V_TOTAL_MAX_BASE_IDX 2
+#define mmOTG2_OTG_V_TOTAL_MID 0x1c32
+#define mmOTG2_OTG_V_TOTAL_MID_BASE_IDX 2
+#define mmOTG2_OTG_V_TOTAL_CONTROL 0x1c33
+#define mmOTG2_OTG_V_TOTAL_CONTROL_BASE_IDX 2
+#define mmOTG2_OTG_V_TOTAL_INT_STATUS 0x1c34
+#define mmOTG2_OTG_V_TOTAL_INT_STATUS_BASE_IDX 2
+#define mmOTG2_OTG_VSYNC_NOM_INT_STATUS 0x1c35
+#define mmOTG2_OTG_VSYNC_NOM_INT_STATUS_BASE_IDX 2
+#define mmOTG2_OTG_V_BLANK_START_END 0x1c36
+#define mmOTG2_OTG_V_BLANK_START_END_BASE_IDX 2
+#define mmOTG2_OTG_V_SYNC_A 0x1c37
+#define mmOTG2_OTG_V_SYNC_A_BASE_IDX 2
+#define mmOTG2_OTG_V_SYNC_A_CNTL 0x1c38
+#define mmOTG2_OTG_V_SYNC_A_CNTL_BASE_IDX 2
+#define mmOTG2_OTG_TRIGA_CNTL 0x1c39
+#define mmOTG2_OTG_TRIGA_CNTL_BASE_IDX 2
+#define mmOTG2_OTG_TRIGA_MANUAL_TRIG 0x1c3a
+#define mmOTG2_OTG_TRIGA_MANUAL_TRIG_BASE_IDX 2
+#define mmOTG2_OTG_TRIGB_CNTL 0x1c3b
+#define mmOTG2_OTG_TRIGB_CNTL_BASE_IDX 2
+#define mmOTG2_OTG_TRIGB_MANUAL_TRIG 0x1c3c
+#define mmOTG2_OTG_TRIGB_MANUAL_TRIG_BASE_IDX 2
+#define mmOTG2_OTG_FORCE_COUNT_NOW_CNTL 0x1c3d
+#define mmOTG2_OTG_FORCE_COUNT_NOW_CNTL_BASE_IDX 2
+#define mmOTG2_OTG_FLOW_CONTROL 0x1c3e
+#define mmOTG2_OTG_FLOW_CONTROL_BASE_IDX 2
+#define mmOTG2_OTG_STEREO_FORCE_NEXT_EYE 0x1c3f
+#define mmOTG2_OTG_STEREO_FORCE_NEXT_EYE_BASE_IDX 2
+#define mmOTG2_OTG_CONTROL 0x1c41
+#define mmOTG2_OTG_CONTROL_BASE_IDX 2
+#define mmOTG2_OTG_BLANK_CONTROL 0x1c42
+#define mmOTG2_OTG_BLANK_CONTROL_BASE_IDX 2
+#define mmOTG2_OTG_PIPE_ABORT_CONTROL 0x1c43
+#define mmOTG2_OTG_PIPE_ABORT_CONTROL_BASE_IDX 2
+#define mmOTG2_OTG_INTERLACE_CONTROL 0x1c44
+#define mmOTG2_OTG_INTERLACE_CONTROL_BASE_IDX 2
+#define mmOTG2_OTG_INTERLACE_STATUS 0x1c45
+#define mmOTG2_OTG_INTERLACE_STATUS_BASE_IDX 2
+#define mmOTG2_OTG_PIXEL_DATA_READBACK0 0x1c47
+#define mmOTG2_OTG_PIXEL_DATA_READBACK0_BASE_IDX 2
+#define mmOTG2_OTG_PIXEL_DATA_READBACK1 0x1c48
+#define mmOTG2_OTG_PIXEL_DATA_READBACK1_BASE_IDX 2
+#define mmOTG2_OTG_STATUS 0x1c49
+#define mmOTG2_OTG_STATUS_BASE_IDX 2
+#define mmOTG2_OTG_STATUS_POSITION 0x1c4a
+#define mmOTG2_OTG_STATUS_POSITION_BASE_IDX 2
+#define mmOTG2_OTG_NOM_VERT_POSITION 0x1c4b
+#define mmOTG2_OTG_NOM_VERT_POSITION_BASE_IDX 2
+#define mmOTG2_OTG_STATUS_FRAME_COUNT 0x1c4c
+#define mmOTG2_OTG_STATUS_FRAME_COUNT_BASE_IDX 2
+#define mmOTG2_OTG_STATUS_VF_COUNT 0x1c4d
+#define mmOTG2_OTG_STATUS_VF_COUNT_BASE_IDX 2
+#define mmOTG2_OTG_STATUS_HV_COUNT 0x1c4e
+#define mmOTG2_OTG_STATUS_HV_COUNT_BASE_IDX 2
+#define mmOTG2_OTG_COUNT_CONTROL 0x1c4f
+#define mmOTG2_OTG_COUNT_CONTROL_BASE_IDX 2
+#define mmOTG2_OTG_COUNT_RESET 0x1c50
+#define mmOTG2_OTG_COUNT_RESET_BASE_IDX 2
+#define mmOTG2_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE 0x1c51
+#define mmOTG2_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE_BASE_IDX 2
+#define mmOTG2_OTG_VERT_SYNC_CONTROL 0x1c52
+#define mmOTG2_OTG_VERT_SYNC_CONTROL_BASE_IDX 2
+#define mmOTG2_OTG_STEREO_STATUS 0x1c53
+#define mmOTG2_OTG_STEREO_STATUS_BASE_IDX 2
+#define mmOTG2_OTG_STEREO_CONTROL 0x1c54
+#define mmOTG2_OTG_STEREO_CONTROL_BASE_IDX 2
+#define mmOTG2_OTG_SNAPSHOT_STATUS 0x1c55
+#define mmOTG2_OTG_SNAPSHOT_STATUS_BASE_IDX 2
+#define mmOTG2_OTG_SNAPSHOT_CONTROL 0x1c56
+#define mmOTG2_OTG_SNAPSHOT_CONTROL_BASE_IDX 2
+#define mmOTG2_OTG_SNAPSHOT_POSITION 0x1c57
+#define mmOTG2_OTG_SNAPSHOT_POSITION_BASE_IDX 2
+#define mmOTG2_OTG_SNAPSHOT_FRAME 0x1c58
+#define mmOTG2_OTG_SNAPSHOT_FRAME_BASE_IDX 2
+#define mmOTG2_OTG_INTERRUPT_CONTROL 0x1c59
+#define mmOTG2_OTG_INTERRUPT_CONTROL_BASE_IDX 2
+#define mmOTG2_OTG_UPDATE_LOCK 0x1c5a
+#define mmOTG2_OTG_UPDATE_LOCK_BASE_IDX 2
+#define mmOTG2_OTG_DOUBLE_BUFFER_CONTROL 0x1c5b
+#define mmOTG2_OTG_DOUBLE_BUFFER_CONTROL_BASE_IDX 2
+#define mmOTG2_OTG_MASTER_EN 0x1c5c
+#define mmOTG2_OTG_MASTER_EN_BASE_IDX 2
+#define mmOTG2_OTG_BLANK_DATA_COLOR 0x1c5e
+#define mmOTG2_OTG_BLANK_DATA_COLOR_BASE_IDX 2
+#define mmOTG2_OTG_BLANK_DATA_COLOR_EXT 0x1c5f
+#define mmOTG2_OTG_BLANK_DATA_COLOR_EXT_BASE_IDX 2
+#define mmOTG2_OTG_BLACK_COLOR 0x1c60
+#define mmOTG2_OTG_BLACK_COLOR_BASE_IDX 2
+#define mmOTG2_OTG_BLACK_COLOR_EXT 0x1c61
+#define mmOTG2_OTG_BLACK_COLOR_EXT_BASE_IDX 2
+#define mmOTG2_OTG_VERTICAL_INTERRUPT0_POSITION 0x1c62
+#define mmOTG2_OTG_VERTICAL_INTERRUPT0_POSITION_BASE_IDX 2
+#define mmOTG2_OTG_VERTICAL_INTERRUPT0_CONTROL 0x1c63
+#define mmOTG2_OTG_VERTICAL_INTERRUPT0_CONTROL_BASE_IDX 2
+#define mmOTG2_OTG_VERTICAL_INTERRUPT1_POSITION 0x1c64
+#define mmOTG2_OTG_VERTICAL_INTERRUPT1_POSITION_BASE_IDX 2
+#define mmOTG2_OTG_VERTICAL_INTERRUPT1_CONTROL 0x1c65
+#define mmOTG2_OTG_VERTICAL_INTERRUPT1_CONTROL_BASE_IDX 2
+#define mmOTG2_OTG_VERTICAL_INTERRUPT2_POSITION 0x1c66
+#define mmOTG2_OTG_VERTICAL_INTERRUPT2_POSITION_BASE_IDX 2
+#define mmOTG2_OTG_VERTICAL_INTERRUPT2_CONTROL 0x1c67
+#define mmOTG2_OTG_VERTICAL_INTERRUPT2_CONTROL_BASE_IDX 2
+#define mmOTG2_OTG_CRC_CNTL 0x1c68
+#define mmOTG2_OTG_CRC_CNTL_BASE_IDX 2
+#define mmOTG2_OTG_CRC_CNTL2 0x1c69
+#define mmOTG2_OTG_CRC_CNTL2_BASE_IDX 2
+#define mmOTG2_OTG_CRC0_WINDOWA_X_CONTROL 0x1c6a
+#define mmOTG2_OTG_CRC0_WINDOWA_X_CONTROL_BASE_IDX 2
+#define mmOTG2_OTG_CRC0_WINDOWA_Y_CONTROL 0x1c6b
+#define mmOTG2_OTG_CRC0_WINDOWA_Y_CONTROL_BASE_IDX 2
+#define mmOTG2_OTG_CRC0_WINDOWB_X_CONTROL 0x1c6c
+#define mmOTG2_OTG_CRC0_WINDOWB_X_CONTROL_BASE_IDX 2
+#define mmOTG2_OTG_CRC0_WINDOWB_Y_CONTROL 0x1c6d
+#define mmOTG2_OTG_CRC0_WINDOWB_Y_CONTROL_BASE_IDX 2
+#define mmOTG2_OTG_CRC0_DATA_RG 0x1c6e
+#define mmOTG2_OTG_CRC0_DATA_RG_BASE_IDX 2
+#define mmOTG2_OTG_CRC0_DATA_B 0x1c6f
+#define mmOTG2_OTG_CRC0_DATA_B_BASE_IDX 2
+#define mmOTG2_OTG_CRC1_WINDOWA_X_CONTROL 0x1c70
+#define mmOTG2_OTG_CRC1_WINDOWA_X_CONTROL_BASE_IDX 2
+#define mmOTG2_OTG_CRC1_WINDOWA_Y_CONTROL 0x1c71
+#define mmOTG2_OTG_CRC1_WINDOWA_Y_CONTROL_BASE_IDX 2
+#define mmOTG2_OTG_CRC1_WINDOWB_X_CONTROL 0x1c72
+#define mmOTG2_OTG_CRC1_WINDOWB_X_CONTROL_BASE_IDX 2
+#define mmOTG2_OTG_CRC1_WINDOWB_Y_CONTROL 0x1c73
+#define mmOTG2_OTG_CRC1_WINDOWB_Y_CONTROL_BASE_IDX 2
+#define mmOTG2_OTG_CRC1_DATA_RG 0x1c74
+#define mmOTG2_OTG_CRC1_DATA_RG_BASE_IDX 2
+#define mmOTG2_OTG_CRC1_DATA_B 0x1c75
+#define mmOTG2_OTG_CRC1_DATA_B_BASE_IDX 2
+#define mmOTG2_OTG_CRC2_DATA_RG 0x1c76
+#define mmOTG2_OTG_CRC2_DATA_RG_BASE_IDX 2
+#define mmOTG2_OTG_CRC2_DATA_B 0x1c77
+#define mmOTG2_OTG_CRC2_DATA_B_BASE_IDX 2
+#define mmOTG2_OTG_CRC3_DATA_RG 0x1c78
+#define mmOTG2_OTG_CRC3_DATA_RG_BASE_IDX 2
+#define mmOTG2_OTG_CRC3_DATA_B 0x1c79
+#define mmOTG2_OTG_CRC3_DATA_B_BASE_IDX 2
+#define mmOTG2_OTG_CRC_SIG_RED_GREEN_MASK 0x1c7a
+#define mmOTG2_OTG_CRC_SIG_RED_GREEN_MASK_BASE_IDX 2
+#define mmOTG2_OTG_CRC_SIG_BLUE_CONTROL_MASK 0x1c7b
+#define mmOTG2_OTG_CRC_SIG_BLUE_CONTROL_MASK_BASE_IDX 2
+#define mmOTG2_OTG_STATIC_SCREEN_CONTROL 0x1c82
+#define mmOTG2_OTG_STATIC_SCREEN_CONTROL_BASE_IDX 2
+#define mmOTG2_OTG_3D_STRUCTURE_CONTROL 0x1c83
+#define mmOTG2_OTG_3D_STRUCTURE_CONTROL_BASE_IDX 2
+#define mmOTG2_OTG_GSL_VSYNC_GAP 0x1c84
+#define mmOTG2_OTG_GSL_VSYNC_GAP_BASE_IDX 2
+#define mmOTG2_OTG_MASTER_UPDATE_MODE 0x1c85
+#define mmOTG2_OTG_MASTER_UPDATE_MODE_BASE_IDX 2
+#define mmOTG2_OTG_CLOCK_CONTROL 0x1c86
+#define mmOTG2_OTG_CLOCK_CONTROL_BASE_IDX 2
+#define mmOTG2_OTG_VSTARTUP_PARAM 0x1c87
+#define mmOTG2_OTG_VSTARTUP_PARAM_BASE_IDX 2
+#define mmOTG2_OTG_VUPDATE_PARAM 0x1c88
+#define mmOTG2_OTG_VUPDATE_PARAM_BASE_IDX 2
+#define mmOTG2_OTG_VREADY_PARAM 0x1c89
+#define mmOTG2_OTG_VREADY_PARAM_BASE_IDX 2
+#define mmOTG2_OTG_GLOBAL_SYNC_STATUS 0x1c8a
+#define mmOTG2_OTG_GLOBAL_SYNC_STATUS_BASE_IDX 2
+#define mmOTG2_OTG_MASTER_UPDATE_LOCK 0x1c8b
+#define mmOTG2_OTG_MASTER_UPDATE_LOCK_BASE_IDX 2
+#define mmOTG2_OTG_GSL_CONTROL 0x1c8c
+#define mmOTG2_OTG_GSL_CONTROL_BASE_IDX 2
+#define mmOTG2_OTG_GSL_WINDOW_X 0x1c8d
+#define mmOTG2_OTG_GSL_WINDOW_X_BASE_IDX 2
+#define mmOTG2_OTG_GSL_WINDOW_Y 0x1c8e
+#define mmOTG2_OTG_GSL_WINDOW_Y_BASE_IDX 2
+#define mmOTG2_OTG_VUPDATE_KEEPOUT 0x1c8f
+#define mmOTG2_OTG_VUPDATE_KEEPOUT_BASE_IDX 2
+#define mmOTG2_OTG_GLOBAL_CONTROL0 0x1c90
+#define mmOTG2_OTG_GLOBAL_CONTROL0_BASE_IDX 2
+#define mmOTG2_OTG_GLOBAL_CONTROL1 0x1c91
+#define mmOTG2_OTG_GLOBAL_CONTROL1_BASE_IDX 2
+#define mmOTG2_OTG_GLOBAL_CONTROL2 0x1c92
+#define mmOTG2_OTG_GLOBAL_CONTROL2_BASE_IDX 2
+#define mmOTG2_OTG_GLOBAL_CONTROL3 0x1c93
+#define mmOTG2_OTG_GLOBAL_CONTROL3_BASE_IDX 2
+#define mmOTG2_OTG_TRIG_MANUAL_CONTROL 0x1c94
+#define mmOTG2_OTG_TRIG_MANUAL_CONTROL_BASE_IDX 2
+#define mmOTG2_OTG_MANUAL_FLOW_CONTROL 0x1c95
+#define mmOTG2_OTG_MANUAL_FLOW_CONTROL_BASE_IDX 2
+#define mmOTG2_OTG_RANGE_TIMING_INT_STATUS 0x1c96
+#define mmOTG2_OTG_RANGE_TIMING_INT_STATUS_BASE_IDX 2
+#define mmOTG2_OTG_DRR_CONTROL 0x1c97
+#define mmOTG2_OTG_DRR_CONTROL_BASE_IDX 2
+#define mmOTG2_OTG_REQUEST_CONTROL 0x1c98
+#define mmOTG2_OTG_REQUEST_CONTROL_BASE_IDX 2
+#define mmOTG2_OTG_DSC_START_POSITION 0x1c99
+#define mmOTG2_OTG_DSC_START_POSITION_BASE_IDX 2
+#define mmOTG2_OTG_PIPE_UPDATE_STATUS 0x1c9a
+#define mmOTG2_OTG_PIPE_UPDATE_STATUS_BASE_IDX 2
+#define mmOTG2_OTG_SPARE_REGISTER 0x1c9c
+#define mmOTG2_OTG_SPARE_REGISTER_BASE_IDX 2
+
+
+// addressBlock: dce_dc_optc_otg3_dispdec
+// base address: 0x600
+#define mmOTG3_OTG_H_TOTAL 0x1caa
+#define mmOTG3_OTG_H_TOTAL_BASE_IDX 2
+#define mmOTG3_OTG_H_BLANK_START_END 0x1cab
+#define mmOTG3_OTG_H_BLANK_START_END_BASE_IDX 2
+#define mmOTG3_OTG_H_SYNC_A 0x1cac
+#define mmOTG3_OTG_H_SYNC_A_BASE_IDX 2
+#define mmOTG3_OTG_H_SYNC_A_CNTL 0x1cad
+#define mmOTG3_OTG_H_SYNC_A_CNTL_BASE_IDX 2
+#define mmOTG3_OTG_H_TIMING_CNTL 0x1cae
+#define mmOTG3_OTG_H_TIMING_CNTL_BASE_IDX 2
+#define mmOTG3_OTG_V_TOTAL 0x1caf
+#define mmOTG3_OTG_V_TOTAL_BASE_IDX 2
+#define mmOTG3_OTG_V_TOTAL_MIN 0x1cb0
+#define mmOTG3_OTG_V_TOTAL_MIN_BASE_IDX 2
+#define mmOTG3_OTG_V_TOTAL_MAX 0x1cb1
+#define mmOTG3_OTG_V_TOTAL_MAX_BASE_IDX 2
+#define mmOTG3_OTG_V_TOTAL_MID 0x1cb2
+#define mmOTG3_OTG_V_TOTAL_MID_BASE_IDX 2
+#define mmOTG3_OTG_V_TOTAL_CONTROL 0x1cb3
+#define mmOTG3_OTG_V_TOTAL_CONTROL_BASE_IDX 2
+#define mmOTG3_OTG_V_TOTAL_INT_STATUS 0x1cb4
+#define mmOTG3_OTG_V_TOTAL_INT_STATUS_BASE_IDX 2
+#define mmOTG3_OTG_VSYNC_NOM_INT_STATUS 0x1cb5
+#define mmOTG3_OTG_VSYNC_NOM_INT_STATUS_BASE_IDX 2
+#define mmOTG3_OTG_V_BLANK_START_END 0x1cb6
+#define mmOTG3_OTG_V_BLANK_START_END_BASE_IDX 2
+#define mmOTG3_OTG_V_SYNC_A 0x1cb7
+#define mmOTG3_OTG_V_SYNC_A_BASE_IDX 2
+#define mmOTG3_OTG_V_SYNC_A_CNTL 0x1cb8
+#define mmOTG3_OTG_V_SYNC_A_CNTL_BASE_IDX 2
+#define mmOTG3_OTG_TRIGA_CNTL 0x1cb9
+#define mmOTG3_OTG_TRIGA_CNTL_BASE_IDX 2
+#define mmOTG3_OTG_TRIGA_MANUAL_TRIG 0x1cba
+#define mmOTG3_OTG_TRIGA_MANUAL_TRIG_BASE_IDX 2
+#define mmOTG3_OTG_TRIGB_CNTL 0x1cbb
+#define mmOTG3_OTG_TRIGB_CNTL_BASE_IDX 2
+#define mmOTG3_OTG_TRIGB_MANUAL_TRIG 0x1cbc
+#define mmOTG3_OTG_TRIGB_MANUAL_TRIG_BASE_IDX 2
+#define mmOTG3_OTG_FORCE_COUNT_NOW_CNTL 0x1cbd
+#define mmOTG3_OTG_FORCE_COUNT_NOW_CNTL_BASE_IDX 2
+#define mmOTG3_OTG_FLOW_CONTROL 0x1cbe
+#define mmOTG3_OTG_FLOW_CONTROL_BASE_IDX 2
+#define mmOTG3_OTG_STEREO_FORCE_NEXT_EYE 0x1cbf
+#define mmOTG3_OTG_STEREO_FORCE_NEXT_EYE_BASE_IDX 2
+#define mmOTG3_OTG_CONTROL 0x1cc1
+#define mmOTG3_OTG_CONTROL_BASE_IDX 2
+#define mmOTG3_OTG_BLANK_CONTROL 0x1cc2
+#define mmOTG3_OTG_BLANK_CONTROL_BASE_IDX 2
+#define mmOTG3_OTG_PIPE_ABORT_CONTROL 0x1cc3
+#define mmOTG3_OTG_PIPE_ABORT_CONTROL_BASE_IDX 2
+#define mmOTG3_OTG_INTERLACE_CONTROL 0x1cc4
+#define mmOTG3_OTG_INTERLACE_CONTROL_BASE_IDX 2
+#define mmOTG3_OTG_INTERLACE_STATUS 0x1cc5
+#define mmOTG3_OTG_INTERLACE_STATUS_BASE_IDX 2
+#define mmOTG3_OTG_PIXEL_DATA_READBACK0 0x1cc7
+#define mmOTG3_OTG_PIXEL_DATA_READBACK0_BASE_IDX 2
+#define mmOTG3_OTG_PIXEL_DATA_READBACK1 0x1cc8
+#define mmOTG3_OTG_PIXEL_DATA_READBACK1_BASE_IDX 2
+#define mmOTG3_OTG_STATUS 0x1cc9
+#define mmOTG3_OTG_STATUS_BASE_IDX 2
+#define mmOTG3_OTG_STATUS_POSITION 0x1cca
+#define mmOTG3_OTG_STATUS_POSITION_BASE_IDX 2
+#define mmOTG3_OTG_NOM_VERT_POSITION 0x1ccb
+#define mmOTG3_OTG_NOM_VERT_POSITION_BASE_IDX 2
+#define mmOTG3_OTG_STATUS_FRAME_COUNT 0x1ccc
+#define mmOTG3_OTG_STATUS_FRAME_COUNT_BASE_IDX 2
+#define mmOTG3_OTG_STATUS_VF_COUNT 0x1ccd
+#define mmOTG3_OTG_STATUS_VF_COUNT_BASE_IDX 2
+#define mmOTG3_OTG_STATUS_HV_COUNT 0x1cce
+#define mmOTG3_OTG_STATUS_HV_COUNT_BASE_IDX 2
+#define mmOTG3_OTG_COUNT_CONTROL 0x1ccf
+#define mmOTG3_OTG_COUNT_CONTROL_BASE_IDX 2
+#define mmOTG3_OTG_COUNT_RESET 0x1cd0
+#define mmOTG3_OTG_COUNT_RESET_BASE_IDX 2
+#define mmOTG3_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE 0x1cd1
+#define mmOTG3_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE_BASE_IDX 2
+#define mmOTG3_OTG_VERT_SYNC_CONTROL 0x1cd2
+#define mmOTG3_OTG_VERT_SYNC_CONTROL_BASE_IDX 2
+#define mmOTG3_OTG_STEREO_STATUS 0x1cd3
+#define mmOTG3_OTG_STEREO_STATUS_BASE_IDX 2
+#define mmOTG3_OTG_STEREO_CONTROL 0x1cd4
+#define mmOTG3_OTG_STEREO_CONTROL_BASE_IDX 2
+#define mmOTG3_OTG_SNAPSHOT_STATUS 0x1cd5
+#define mmOTG3_OTG_SNAPSHOT_STATUS_BASE_IDX 2
+#define mmOTG3_OTG_SNAPSHOT_CONTROL 0x1cd6
+#define mmOTG3_OTG_SNAPSHOT_CONTROL_BASE_IDX 2
+#define mmOTG3_OTG_SNAPSHOT_POSITION 0x1cd7
+#define mmOTG3_OTG_SNAPSHOT_POSITION_BASE_IDX 2
+#define mmOTG3_OTG_SNAPSHOT_FRAME 0x1cd8
+#define mmOTG3_OTG_SNAPSHOT_FRAME_BASE_IDX 2
+#define mmOTG3_OTG_INTERRUPT_CONTROL 0x1cd9
+#define mmOTG3_OTG_INTERRUPT_CONTROL_BASE_IDX 2
+#define mmOTG3_OTG_UPDATE_LOCK 0x1cda
+#define mmOTG3_OTG_UPDATE_LOCK_BASE_IDX 2
+#define mmOTG3_OTG_DOUBLE_BUFFER_CONTROL 0x1cdb
+#define mmOTG3_OTG_DOUBLE_BUFFER_CONTROL_BASE_IDX 2
+#define mmOTG3_OTG_MASTER_EN 0x1cdc
+#define mmOTG3_OTG_MASTER_EN_BASE_IDX 2
+#define mmOTG3_OTG_BLANK_DATA_COLOR 0x1cde
+#define mmOTG3_OTG_BLANK_DATA_COLOR_BASE_IDX 2
+#define mmOTG3_OTG_BLANK_DATA_COLOR_EXT 0x1cdf
+#define mmOTG3_OTG_BLANK_DATA_COLOR_EXT_BASE_IDX 2
+#define mmOTG3_OTG_BLACK_COLOR 0x1ce0
+#define mmOTG3_OTG_BLACK_COLOR_BASE_IDX 2
+#define mmOTG3_OTG_BLACK_COLOR_EXT 0x1ce1
+#define mmOTG3_OTG_BLACK_COLOR_EXT_BASE_IDX 2
+#define mmOTG3_OTG_VERTICAL_INTERRUPT0_POSITION 0x1ce2
+#define mmOTG3_OTG_VERTICAL_INTERRUPT0_POSITION_BASE_IDX 2
+#define mmOTG3_OTG_VERTICAL_INTERRUPT0_CONTROL 0x1ce3
+#define mmOTG3_OTG_VERTICAL_INTERRUPT0_CONTROL_BASE_IDX 2
+#define mmOTG3_OTG_VERTICAL_INTERRUPT1_POSITION 0x1ce4
+#define mmOTG3_OTG_VERTICAL_INTERRUPT1_POSITION_BASE_IDX 2
+#define mmOTG3_OTG_VERTICAL_INTERRUPT1_CONTROL 0x1ce5
+#define mmOTG3_OTG_VERTICAL_INTERRUPT1_CONTROL_BASE_IDX 2
+#define mmOTG3_OTG_VERTICAL_INTERRUPT2_POSITION 0x1ce6
+#define mmOTG3_OTG_VERTICAL_INTERRUPT2_POSITION_BASE_IDX 2
+#define mmOTG3_OTG_VERTICAL_INTERRUPT2_CONTROL 0x1ce7
+#define mmOTG3_OTG_VERTICAL_INTERRUPT2_CONTROL_BASE_IDX 2
+#define mmOTG3_OTG_CRC_CNTL 0x1ce8
+#define mmOTG3_OTG_CRC_CNTL_BASE_IDX 2
+#define mmOTG3_OTG_CRC_CNTL2 0x1ce9
+#define mmOTG3_OTG_CRC_CNTL2_BASE_IDX 2
+#define mmOTG3_OTG_CRC0_WINDOWA_X_CONTROL 0x1cea
+#define mmOTG3_OTG_CRC0_WINDOWA_X_CONTROL_BASE_IDX 2
+#define mmOTG3_OTG_CRC0_WINDOWA_Y_CONTROL 0x1ceb
+#define mmOTG3_OTG_CRC0_WINDOWA_Y_CONTROL_BASE_IDX 2
+#define mmOTG3_OTG_CRC0_WINDOWB_X_CONTROL 0x1cec
+#define mmOTG3_OTG_CRC0_WINDOWB_X_CONTROL_BASE_IDX 2
+#define mmOTG3_OTG_CRC0_WINDOWB_Y_CONTROL 0x1ced
+#define mmOTG3_OTG_CRC0_WINDOWB_Y_CONTROL_BASE_IDX 2
+#define mmOTG3_OTG_CRC0_DATA_RG 0x1cee
+#define mmOTG3_OTG_CRC0_DATA_RG_BASE_IDX 2
+#define mmOTG3_OTG_CRC0_DATA_B 0x1cef
+#define mmOTG3_OTG_CRC0_DATA_B_BASE_IDX 2
+#define mmOTG3_OTG_CRC1_WINDOWA_X_CONTROL 0x1cf0
+#define mmOTG3_OTG_CRC1_WINDOWA_X_CONTROL_BASE_IDX 2
+#define mmOTG3_OTG_CRC1_WINDOWA_Y_CONTROL 0x1cf1
+#define mmOTG3_OTG_CRC1_WINDOWA_Y_CONTROL_BASE_IDX 2
+#define mmOTG3_OTG_CRC1_WINDOWB_X_CONTROL 0x1cf2
+#define mmOTG3_OTG_CRC1_WINDOWB_X_CONTROL_BASE_IDX 2
+#define mmOTG3_OTG_CRC1_WINDOWB_Y_CONTROL 0x1cf3
+#define mmOTG3_OTG_CRC1_WINDOWB_Y_CONTROL_BASE_IDX 2
+#define mmOTG3_OTG_CRC1_DATA_RG 0x1cf4
+#define mmOTG3_OTG_CRC1_DATA_RG_BASE_IDX 2
+#define mmOTG3_OTG_CRC1_DATA_B 0x1cf5
+#define mmOTG3_OTG_CRC1_DATA_B_BASE_IDX 2
+#define mmOTG3_OTG_CRC2_DATA_RG 0x1cf6
+#define mmOTG3_OTG_CRC2_DATA_RG_BASE_IDX 2
+#define mmOTG3_OTG_CRC2_DATA_B 0x1cf7
+#define mmOTG3_OTG_CRC2_DATA_B_BASE_IDX 2
+#define mmOTG3_OTG_CRC3_DATA_RG 0x1cf8
+#define mmOTG3_OTG_CRC3_DATA_RG_BASE_IDX 2
+#define mmOTG3_OTG_CRC3_DATA_B 0x1cf9
+#define mmOTG3_OTG_CRC3_DATA_B_BASE_IDX 2
+#define mmOTG3_OTG_CRC_SIG_RED_GREEN_MASK 0x1cfa
+#define mmOTG3_OTG_CRC_SIG_RED_GREEN_MASK_BASE_IDX 2
+#define mmOTG3_OTG_CRC_SIG_BLUE_CONTROL_MASK 0x1cfb
+#define mmOTG3_OTG_CRC_SIG_BLUE_CONTROL_MASK_BASE_IDX 2
+#define mmOTG3_OTG_STATIC_SCREEN_CONTROL 0x1d02
+#define mmOTG3_OTG_STATIC_SCREEN_CONTROL_BASE_IDX 2
+#define mmOTG3_OTG_3D_STRUCTURE_CONTROL 0x1d03
+#define mmOTG3_OTG_3D_STRUCTURE_CONTROL_BASE_IDX 2
+#define mmOTG3_OTG_GSL_VSYNC_GAP 0x1d04
+#define mmOTG3_OTG_GSL_VSYNC_GAP_BASE_IDX 2
+#define mmOTG3_OTG_MASTER_UPDATE_MODE 0x1d05
+#define mmOTG3_OTG_MASTER_UPDATE_MODE_BASE_IDX 2
+#define mmOTG3_OTG_CLOCK_CONTROL 0x1d06
+#define mmOTG3_OTG_CLOCK_CONTROL_BASE_IDX 2
+#define mmOTG3_OTG_VSTARTUP_PARAM 0x1d07
+#define mmOTG3_OTG_VSTARTUP_PARAM_BASE_IDX 2
+#define mmOTG3_OTG_VUPDATE_PARAM 0x1d08
+#define mmOTG3_OTG_VUPDATE_PARAM_BASE_IDX 2
+#define mmOTG3_OTG_VREADY_PARAM 0x1d09
+#define mmOTG3_OTG_VREADY_PARAM_BASE_IDX 2
+#define mmOTG3_OTG_GLOBAL_SYNC_STATUS 0x1d0a
+#define mmOTG3_OTG_GLOBAL_SYNC_STATUS_BASE_IDX 2
+#define mmOTG3_OTG_MASTER_UPDATE_LOCK 0x1d0b
+#define mmOTG3_OTG_MASTER_UPDATE_LOCK_BASE_IDX 2
+#define mmOTG3_OTG_GSL_CONTROL 0x1d0c
+#define mmOTG3_OTG_GSL_CONTROL_BASE_IDX 2
+#define mmOTG3_OTG_GSL_WINDOW_X 0x1d0d
+#define mmOTG3_OTG_GSL_WINDOW_X_BASE_IDX 2
+#define mmOTG3_OTG_GSL_WINDOW_Y 0x1d0e
+#define mmOTG3_OTG_GSL_WINDOW_Y_BASE_IDX 2
+#define mmOTG3_OTG_VUPDATE_KEEPOUT 0x1d0f
+#define mmOTG3_OTG_VUPDATE_KEEPOUT_BASE_IDX 2
+#define mmOTG3_OTG_GLOBAL_CONTROL0 0x1d10
+#define mmOTG3_OTG_GLOBAL_CONTROL0_BASE_IDX 2
+#define mmOTG3_OTG_GLOBAL_CONTROL1 0x1d11
+#define mmOTG3_OTG_GLOBAL_CONTROL1_BASE_IDX 2
+#define mmOTG3_OTG_GLOBAL_CONTROL2 0x1d12
+#define mmOTG3_OTG_GLOBAL_CONTROL2_BASE_IDX 2
+#define mmOTG3_OTG_GLOBAL_CONTROL3 0x1d13
+#define mmOTG3_OTG_GLOBAL_CONTROL3_BASE_IDX 2
+#define mmOTG3_OTG_TRIG_MANUAL_CONTROL 0x1d14
+#define mmOTG3_OTG_TRIG_MANUAL_CONTROL_BASE_IDX 2
+#define mmOTG3_OTG_MANUAL_FLOW_CONTROL 0x1d15
+#define mmOTG3_OTG_MANUAL_FLOW_CONTROL_BASE_IDX 2
+#define mmOTG3_OTG_RANGE_TIMING_INT_STATUS 0x1d16
+#define mmOTG3_OTG_RANGE_TIMING_INT_STATUS_BASE_IDX 2
+#define mmOTG3_OTG_DRR_CONTROL 0x1d17
+#define mmOTG3_OTG_DRR_CONTROL_BASE_IDX 2
+#define mmOTG3_OTG_REQUEST_CONTROL 0x1d18
+#define mmOTG3_OTG_REQUEST_CONTROL_BASE_IDX 2
+#define mmOTG3_OTG_DSC_START_POSITION 0x1d19
+#define mmOTG3_OTG_DSC_START_POSITION_BASE_IDX 2
+#define mmOTG3_OTG_PIPE_UPDATE_STATUS 0x1d1a
+#define mmOTG3_OTG_PIPE_UPDATE_STATUS_BASE_IDX 2
+#define mmOTG3_OTG_SPARE_REGISTER 0x1d1c
+#define mmOTG3_OTG_SPARE_REGISTER_BASE_IDX 2
+
+
+// addressBlock: dce_dc_optc_otg4_dispdec
+// base address: 0x800
+#define mmOTG4_OTG_H_TOTAL 0x1d2a
+#define mmOTG4_OTG_H_TOTAL_BASE_IDX 2
+#define mmOTG4_OTG_H_BLANK_START_END 0x1d2b
+#define mmOTG4_OTG_H_BLANK_START_END_BASE_IDX 2
+#define mmOTG4_OTG_H_SYNC_A 0x1d2c
+#define mmOTG4_OTG_H_SYNC_A_BASE_IDX 2
+#define mmOTG4_OTG_H_SYNC_A_CNTL 0x1d2d
+#define mmOTG4_OTG_H_SYNC_A_CNTL_BASE_IDX 2
+#define mmOTG4_OTG_H_TIMING_CNTL 0x1d2e
+#define mmOTG4_OTG_H_TIMING_CNTL_BASE_IDX 2
+#define mmOTG4_OTG_V_TOTAL 0x1d2f
+#define mmOTG4_OTG_V_TOTAL_BASE_IDX 2
+#define mmOTG4_OTG_V_TOTAL_MIN 0x1d30
+#define mmOTG4_OTG_V_TOTAL_MIN_BASE_IDX 2
+#define mmOTG4_OTG_V_TOTAL_MAX 0x1d31
+#define mmOTG4_OTG_V_TOTAL_MAX_BASE_IDX 2
+#define mmOTG4_OTG_V_TOTAL_MID 0x1d32
+#define mmOTG4_OTG_V_TOTAL_MID_BASE_IDX 2
+#define mmOTG4_OTG_V_TOTAL_CONTROL 0x1d33
+#define mmOTG4_OTG_V_TOTAL_CONTROL_BASE_IDX 2
+#define mmOTG4_OTG_V_TOTAL_INT_STATUS 0x1d34
+#define mmOTG4_OTG_V_TOTAL_INT_STATUS_BASE_IDX 2
+#define mmOTG4_OTG_VSYNC_NOM_INT_STATUS 0x1d35
+#define mmOTG4_OTG_VSYNC_NOM_INT_STATUS_BASE_IDX 2
+#define mmOTG4_OTG_V_BLANK_START_END 0x1d36
+#define mmOTG4_OTG_V_BLANK_START_END_BASE_IDX 2
+#define mmOTG4_OTG_V_SYNC_A 0x1d37
+#define mmOTG4_OTG_V_SYNC_A_BASE_IDX 2
+#define mmOTG4_OTG_V_SYNC_A_CNTL 0x1d38
+#define mmOTG4_OTG_V_SYNC_A_CNTL_BASE_IDX 2
+#define mmOTG4_OTG_TRIGA_CNTL 0x1d39
+#define mmOTG4_OTG_TRIGA_CNTL_BASE_IDX 2
+#define mmOTG4_OTG_TRIGA_MANUAL_TRIG 0x1d3a
+#define mmOTG4_OTG_TRIGA_MANUAL_TRIG_BASE_IDX 2
+#define mmOTG4_OTG_TRIGB_CNTL 0x1d3b
+#define mmOTG4_OTG_TRIGB_CNTL_BASE_IDX 2
+#define mmOTG4_OTG_TRIGB_MANUAL_TRIG 0x1d3c
+#define mmOTG4_OTG_TRIGB_MANUAL_TRIG_BASE_IDX 2
+#define mmOTG4_OTG_FORCE_COUNT_NOW_CNTL 0x1d3d
+#define mmOTG4_OTG_FORCE_COUNT_NOW_CNTL_BASE_IDX 2
+#define mmOTG4_OTG_FLOW_CONTROL 0x1d3e
+#define mmOTG4_OTG_FLOW_CONTROL_BASE_IDX 2
+#define mmOTG4_OTG_STEREO_FORCE_NEXT_EYE 0x1d3f
+#define mmOTG4_OTG_STEREO_FORCE_NEXT_EYE_BASE_IDX 2
+#define mmOTG4_OTG_CONTROL 0x1d41
+#define mmOTG4_OTG_CONTROL_BASE_IDX 2
+#define mmOTG4_OTG_BLANK_CONTROL 0x1d42
+#define mmOTG4_OTG_BLANK_CONTROL_BASE_IDX 2
+#define mmOTG4_OTG_PIPE_ABORT_CONTROL 0x1d43
+#define mmOTG4_OTG_PIPE_ABORT_CONTROL_BASE_IDX 2
+#define mmOTG4_OTG_INTERLACE_CONTROL 0x1d44
+#define mmOTG4_OTG_INTERLACE_CONTROL_BASE_IDX 2
+#define mmOTG4_OTG_INTERLACE_STATUS 0x1d45
+#define mmOTG4_OTG_INTERLACE_STATUS_BASE_IDX 2
+#define mmOTG4_OTG_PIXEL_DATA_READBACK0 0x1d47
+#define mmOTG4_OTG_PIXEL_DATA_READBACK0_BASE_IDX 2
+#define mmOTG4_OTG_PIXEL_DATA_READBACK1 0x1d48
+#define mmOTG4_OTG_PIXEL_DATA_READBACK1_BASE_IDX 2
+#define mmOTG4_OTG_STATUS 0x1d49
+#define mmOTG4_OTG_STATUS_BASE_IDX 2
+#define mmOTG4_OTG_STATUS_POSITION 0x1d4a
+#define mmOTG4_OTG_STATUS_POSITION_BASE_IDX 2
+#define mmOTG4_OTG_NOM_VERT_POSITION 0x1d4b
+#define mmOTG4_OTG_NOM_VERT_POSITION_BASE_IDX 2
+#define mmOTG4_OTG_STATUS_FRAME_COUNT 0x1d4c
+#define mmOTG4_OTG_STATUS_FRAME_COUNT_BASE_IDX 2
+#define mmOTG4_OTG_STATUS_VF_COUNT 0x1d4d
+#define mmOTG4_OTG_STATUS_VF_COUNT_BASE_IDX 2
+#define mmOTG4_OTG_STATUS_HV_COUNT 0x1d4e
+#define mmOTG4_OTG_STATUS_HV_COUNT_BASE_IDX 2
+#define mmOTG4_OTG_COUNT_CONTROL 0x1d4f
+#define mmOTG4_OTG_COUNT_CONTROL_BASE_IDX 2
+#define mmOTG4_OTG_COUNT_RESET 0x1d50
+#define mmOTG4_OTG_COUNT_RESET_BASE_IDX 2
+#define mmOTG4_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE 0x1d51
+#define mmOTG4_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE_BASE_IDX 2
+#define mmOTG4_OTG_VERT_SYNC_CONTROL 0x1d52
+#define mmOTG4_OTG_VERT_SYNC_CONTROL_BASE_IDX 2
+#define mmOTG4_OTG_STEREO_STATUS 0x1d53
+#define mmOTG4_OTG_STEREO_STATUS_BASE_IDX 2
+#define mmOTG4_OTG_STEREO_CONTROL 0x1d54
+#define mmOTG4_OTG_STEREO_CONTROL_BASE_IDX 2
+#define mmOTG4_OTG_SNAPSHOT_STATUS 0x1d55
+#define mmOTG4_OTG_SNAPSHOT_STATUS_BASE_IDX 2
+#define mmOTG4_OTG_SNAPSHOT_CONTROL 0x1d56
+#define mmOTG4_OTG_SNAPSHOT_CONTROL_BASE_IDX 2
+#define mmOTG4_OTG_SNAPSHOT_POSITION 0x1d57
+#define mmOTG4_OTG_SNAPSHOT_POSITION_BASE_IDX 2
+#define mmOTG4_OTG_SNAPSHOT_FRAME 0x1d58
+#define mmOTG4_OTG_SNAPSHOT_FRAME_BASE_IDX 2
+#define mmOTG4_OTG_INTERRUPT_CONTROL 0x1d59
+#define mmOTG4_OTG_INTERRUPT_CONTROL_BASE_IDX 2
+#define mmOTG4_OTG_UPDATE_LOCK 0x1d5a
+#define mmOTG4_OTG_UPDATE_LOCK_BASE_IDX 2
+#define mmOTG4_OTG_DOUBLE_BUFFER_CONTROL 0x1d5b
+#define mmOTG4_OTG_DOUBLE_BUFFER_CONTROL_BASE_IDX 2
+#define mmOTG4_OTG_MASTER_EN 0x1d5c
+#define mmOTG4_OTG_MASTER_EN_BASE_IDX 2
+#define mmOTG4_OTG_BLANK_DATA_COLOR 0x1d5e
+#define mmOTG4_OTG_BLANK_DATA_COLOR_BASE_IDX 2
+#define mmOTG4_OTG_BLANK_DATA_COLOR_EXT 0x1d5f
+#define mmOTG4_OTG_BLANK_DATA_COLOR_EXT_BASE_IDX 2
+#define mmOTG4_OTG_BLACK_COLOR 0x1d60
+#define mmOTG4_OTG_BLACK_COLOR_BASE_IDX 2
+#define mmOTG4_OTG_BLACK_COLOR_EXT 0x1d61
+#define mmOTG4_OTG_BLACK_COLOR_EXT_BASE_IDX 2
+#define mmOTG4_OTG_VERTICAL_INTERRUPT0_POSITION 0x1d62
+#define mmOTG4_OTG_VERTICAL_INTERRUPT0_POSITION_BASE_IDX 2
+#define mmOTG4_OTG_VERTICAL_INTERRUPT0_CONTROL 0x1d63
+#define mmOTG4_OTG_VERTICAL_INTERRUPT0_CONTROL_BASE_IDX 2
+#define mmOTG4_OTG_VERTICAL_INTERRUPT1_POSITION 0x1d64
+#define mmOTG4_OTG_VERTICAL_INTERRUPT1_POSITION_BASE_IDX 2
+#define mmOTG4_OTG_VERTICAL_INTERRUPT1_CONTROL 0x1d65
+#define mmOTG4_OTG_VERTICAL_INTERRUPT1_CONTROL_BASE_IDX 2
+#define mmOTG4_OTG_VERTICAL_INTERRUPT2_POSITION 0x1d66
+#define mmOTG4_OTG_VERTICAL_INTERRUPT2_POSITION_BASE_IDX 2
+#define mmOTG4_OTG_VERTICAL_INTERRUPT2_CONTROL 0x1d67
+#define mmOTG4_OTG_VERTICAL_INTERRUPT2_CONTROL_BASE_IDX 2
+#define mmOTG4_OTG_CRC_CNTL 0x1d68
+#define mmOTG4_OTG_CRC_CNTL_BASE_IDX 2
+#define mmOTG4_OTG_CRC_CNTL2 0x1d69
+#define mmOTG4_OTG_CRC_CNTL2_BASE_IDX 2
+#define mmOTG4_OTG_CRC0_WINDOWA_X_CONTROL 0x1d6a
+#define mmOTG4_OTG_CRC0_WINDOWA_X_CONTROL_BASE_IDX 2
+#define mmOTG4_OTG_CRC0_WINDOWA_Y_CONTROL 0x1d6b
+#define mmOTG4_OTG_CRC0_WINDOWA_Y_CONTROL_BASE_IDX 2
+#define mmOTG4_OTG_CRC0_WINDOWB_X_CONTROL 0x1d6c
+#define mmOTG4_OTG_CRC0_WINDOWB_X_CONTROL_BASE_IDX 2
+#define mmOTG4_OTG_CRC0_WINDOWB_Y_CONTROL 0x1d6d
+#define mmOTG4_OTG_CRC0_WINDOWB_Y_CONTROL_BASE_IDX 2
+#define mmOTG4_OTG_CRC0_DATA_RG 0x1d6e
+#define mmOTG4_OTG_CRC0_DATA_RG_BASE_IDX 2
+#define mmOTG4_OTG_CRC0_DATA_B 0x1d6f
+#define mmOTG4_OTG_CRC0_DATA_B_BASE_IDX 2
+#define mmOTG4_OTG_CRC1_WINDOWA_X_CONTROL 0x1d70
+#define mmOTG4_OTG_CRC1_WINDOWA_X_CONTROL_BASE_IDX 2
+#define mmOTG4_OTG_CRC1_WINDOWA_Y_CONTROL 0x1d71
+#define mmOTG4_OTG_CRC1_WINDOWA_Y_CONTROL_BASE_IDX 2
+#define mmOTG4_OTG_CRC1_WINDOWB_X_CONTROL 0x1d72
+#define mmOTG4_OTG_CRC1_WINDOWB_X_CONTROL_BASE_IDX 2
+#define mmOTG4_OTG_CRC1_WINDOWB_Y_CONTROL 0x1d73
+#define mmOTG4_OTG_CRC1_WINDOWB_Y_CONTROL_BASE_IDX 2
+#define mmOTG4_OTG_CRC1_DATA_RG 0x1d74
+#define mmOTG4_OTG_CRC1_DATA_RG_BASE_IDX 2
+#define mmOTG4_OTG_CRC1_DATA_B 0x1d75
+#define mmOTG4_OTG_CRC1_DATA_B_BASE_IDX 2
+#define mmOTG4_OTG_CRC2_DATA_RG 0x1d76
+#define mmOTG4_OTG_CRC2_DATA_RG_BASE_IDX 2
+#define mmOTG4_OTG_CRC2_DATA_B 0x1d77
+#define mmOTG4_OTG_CRC2_DATA_B_BASE_IDX 2
+#define mmOTG4_OTG_CRC3_DATA_RG 0x1d78
+#define mmOTG4_OTG_CRC3_DATA_RG_BASE_IDX 2
+#define mmOTG4_OTG_CRC3_DATA_B 0x1d79
+#define mmOTG4_OTG_CRC3_DATA_B_BASE_IDX 2
+#define mmOTG4_OTG_CRC_SIG_RED_GREEN_MASK 0x1d7a
+#define mmOTG4_OTG_CRC_SIG_RED_GREEN_MASK_BASE_IDX 2
+#define mmOTG4_OTG_CRC_SIG_BLUE_CONTROL_MASK 0x1d7b
+#define mmOTG4_OTG_CRC_SIG_BLUE_CONTROL_MASK_BASE_IDX 2
+#define mmOTG4_OTG_STATIC_SCREEN_CONTROL 0x1d82
+#define mmOTG4_OTG_STATIC_SCREEN_CONTROL_BASE_IDX 2
+#define mmOTG4_OTG_3D_STRUCTURE_CONTROL 0x1d83
+#define mmOTG4_OTG_3D_STRUCTURE_CONTROL_BASE_IDX 2
+#define mmOTG4_OTG_GSL_VSYNC_GAP 0x1d84
+#define mmOTG4_OTG_GSL_VSYNC_GAP_BASE_IDX 2
+#define mmOTG4_OTG_MASTER_UPDATE_MODE 0x1d85
+#define mmOTG4_OTG_MASTER_UPDATE_MODE_BASE_IDX 2
+#define mmOTG4_OTG_CLOCK_CONTROL 0x1d86
+#define mmOTG4_OTG_CLOCK_CONTROL_BASE_IDX 2
+#define mmOTG4_OTG_VSTARTUP_PARAM 0x1d87
+#define mmOTG4_OTG_VSTARTUP_PARAM_BASE_IDX 2
+#define mmOTG4_OTG_VUPDATE_PARAM 0x1d88
+#define mmOTG4_OTG_VUPDATE_PARAM_BASE_IDX 2
+#define mmOTG4_OTG_VREADY_PARAM 0x1d89
+#define mmOTG4_OTG_VREADY_PARAM_BASE_IDX 2
+#define mmOTG4_OTG_GLOBAL_SYNC_STATUS 0x1d8a
+#define mmOTG4_OTG_GLOBAL_SYNC_STATUS_BASE_IDX 2
+#define mmOTG4_OTG_MASTER_UPDATE_LOCK 0x1d8b
+#define mmOTG4_OTG_MASTER_UPDATE_LOCK_BASE_IDX 2
+#define mmOTG4_OTG_GSL_CONTROL 0x1d8c
+#define mmOTG4_OTG_GSL_CONTROL_BASE_IDX 2
+#define mmOTG4_OTG_GSL_WINDOW_X 0x1d8d
+#define mmOTG4_OTG_GSL_WINDOW_X_BASE_IDX 2
+#define mmOTG4_OTG_GSL_WINDOW_Y 0x1d8e
+#define mmOTG4_OTG_GSL_WINDOW_Y_BASE_IDX 2
+#define mmOTG4_OTG_VUPDATE_KEEPOUT 0x1d8f
+#define mmOTG4_OTG_VUPDATE_KEEPOUT_BASE_IDX 2
+#define mmOTG4_OTG_GLOBAL_CONTROL0 0x1d90
+#define mmOTG4_OTG_GLOBAL_CONTROL0_BASE_IDX 2
+#define mmOTG4_OTG_GLOBAL_CONTROL1 0x1d91
+#define mmOTG4_OTG_GLOBAL_CONTROL1_BASE_IDX 2
+#define mmOTG4_OTG_GLOBAL_CONTROL2 0x1d92
+#define mmOTG4_OTG_GLOBAL_CONTROL2_BASE_IDX 2
+#define mmOTG4_OTG_GLOBAL_CONTROL3 0x1d93
+#define mmOTG4_OTG_GLOBAL_CONTROL3_BASE_IDX 2
+#define mmOTG4_OTG_TRIG_MANUAL_CONTROL 0x1d94
+#define mmOTG4_OTG_TRIG_MANUAL_CONTROL_BASE_IDX 2
+#define mmOTG4_OTG_MANUAL_FLOW_CONTROL 0x1d95
+#define mmOTG4_OTG_MANUAL_FLOW_CONTROL_BASE_IDX 2
+#define mmOTG4_OTG_RANGE_TIMING_INT_STATUS 0x1d96
+#define mmOTG4_OTG_RANGE_TIMING_INT_STATUS_BASE_IDX 2
+#define mmOTG4_OTG_DRR_CONTROL 0x1d97
+#define mmOTG4_OTG_DRR_CONTROL_BASE_IDX 2
+#define mmOTG4_OTG_REQUEST_CONTROL 0x1d98
+#define mmOTG4_OTG_REQUEST_CONTROL_BASE_IDX 2
+#define mmOTG4_OTG_DSC_START_POSITION 0x1d99
+#define mmOTG4_OTG_DSC_START_POSITION_BASE_IDX 2
+#define mmOTG4_OTG_PIPE_UPDATE_STATUS 0x1d9a
+#define mmOTG4_OTG_PIPE_UPDATE_STATUS_BASE_IDX 2
+#define mmOTG4_OTG_SPARE_REGISTER 0x1d9c
+#define mmOTG4_OTG_SPARE_REGISTER_BASE_IDX 2
+
+
+// addressBlock: dce_dc_optc_otg5_dispdec
+// base address: 0xa00
+#define mmOTG5_OTG_H_TOTAL 0x1daa
+#define mmOTG5_OTG_H_TOTAL_BASE_IDX 2
+#define mmOTG5_OTG_H_BLANK_START_END 0x1dab
+#define mmOTG5_OTG_H_BLANK_START_END_BASE_IDX 2
+#define mmOTG5_OTG_H_SYNC_A 0x1dac
+#define mmOTG5_OTG_H_SYNC_A_BASE_IDX 2
+#define mmOTG5_OTG_H_SYNC_A_CNTL 0x1dad
+#define mmOTG5_OTG_H_SYNC_A_CNTL_BASE_IDX 2
+#define mmOTG5_OTG_H_TIMING_CNTL 0x1dae
+#define mmOTG5_OTG_H_TIMING_CNTL_BASE_IDX 2
+#define mmOTG5_OTG_V_TOTAL 0x1daf
+#define mmOTG5_OTG_V_TOTAL_BASE_IDX 2
+#define mmOTG5_OTG_V_TOTAL_MIN 0x1db0
+#define mmOTG5_OTG_V_TOTAL_MIN_BASE_IDX 2
+#define mmOTG5_OTG_V_TOTAL_MAX 0x1db1
+#define mmOTG5_OTG_V_TOTAL_MAX_BASE_IDX 2
+#define mmOTG5_OTG_V_TOTAL_MID 0x1db2
+#define mmOTG5_OTG_V_TOTAL_MID_BASE_IDX 2
+#define mmOTG5_OTG_V_TOTAL_CONTROL 0x1db3
+#define mmOTG5_OTG_V_TOTAL_CONTROL_BASE_IDX 2
+#define mmOTG5_OTG_V_TOTAL_INT_STATUS 0x1db4
+#define mmOTG5_OTG_V_TOTAL_INT_STATUS_BASE_IDX 2
+#define mmOTG5_OTG_VSYNC_NOM_INT_STATUS 0x1db5
+#define mmOTG5_OTG_VSYNC_NOM_INT_STATUS_BASE_IDX 2
+#define mmOTG5_OTG_V_BLANK_START_END 0x1db6
+#define mmOTG5_OTG_V_BLANK_START_END_BASE_IDX 2
+#define mmOTG5_OTG_V_SYNC_A 0x1db7
+#define mmOTG5_OTG_V_SYNC_A_BASE_IDX 2
+#define mmOTG5_OTG_V_SYNC_A_CNTL 0x1db8
+#define mmOTG5_OTG_V_SYNC_A_CNTL_BASE_IDX 2
+#define mmOTG5_OTG_TRIGA_CNTL 0x1db9
+#define mmOTG5_OTG_TRIGA_CNTL_BASE_IDX 2
+#define mmOTG5_OTG_TRIGA_MANUAL_TRIG 0x1dba
+#define mmOTG5_OTG_TRIGA_MANUAL_TRIG_BASE_IDX 2
+#define mmOTG5_OTG_TRIGB_CNTL 0x1dbb
+#define mmOTG5_OTG_TRIGB_CNTL_BASE_IDX 2
+#define mmOTG5_OTG_TRIGB_MANUAL_TRIG 0x1dbc
+#define mmOTG5_OTG_TRIGB_MANUAL_TRIG_BASE_IDX 2
+#define mmOTG5_OTG_FORCE_COUNT_NOW_CNTL 0x1dbd
+#define mmOTG5_OTG_FORCE_COUNT_NOW_CNTL_BASE_IDX 2
+#define mmOTG5_OTG_FLOW_CONTROL 0x1dbe
+#define mmOTG5_OTG_FLOW_CONTROL_BASE_IDX 2
+#define mmOTG5_OTG_STEREO_FORCE_NEXT_EYE 0x1dbf
+#define mmOTG5_OTG_STEREO_FORCE_NEXT_EYE_BASE_IDX 2
+#define mmOTG5_OTG_CONTROL 0x1dc1
+#define mmOTG5_OTG_CONTROL_BASE_IDX 2
+#define mmOTG5_OTG_BLANK_CONTROL 0x1dc2
+#define mmOTG5_OTG_BLANK_CONTROL_BASE_IDX 2
+#define mmOTG5_OTG_PIPE_ABORT_CONTROL 0x1dc3
+#define mmOTG5_OTG_PIPE_ABORT_CONTROL_BASE_IDX 2
+#define mmOTG5_OTG_INTERLACE_CONTROL 0x1dc4
+#define mmOTG5_OTG_INTERLACE_CONTROL_BASE_IDX 2
+#define mmOTG5_OTG_INTERLACE_STATUS 0x1dc5
+#define mmOTG5_OTG_INTERLACE_STATUS_BASE_IDX 2
+#define mmOTG5_OTG_PIXEL_DATA_READBACK0 0x1dc7
+#define mmOTG5_OTG_PIXEL_DATA_READBACK0_BASE_IDX 2
+#define mmOTG5_OTG_PIXEL_DATA_READBACK1 0x1dc8
+#define mmOTG5_OTG_PIXEL_DATA_READBACK1_BASE_IDX 2
+#define mmOTG5_OTG_STATUS 0x1dc9
+#define mmOTG5_OTG_STATUS_BASE_IDX 2
+#define mmOTG5_OTG_STATUS_POSITION 0x1dca
+#define mmOTG5_OTG_STATUS_POSITION_BASE_IDX 2
+#define mmOTG5_OTG_NOM_VERT_POSITION 0x1dcb
+#define mmOTG5_OTG_NOM_VERT_POSITION_BASE_IDX 2
+#define mmOTG5_OTG_STATUS_FRAME_COUNT 0x1dcc
+#define mmOTG5_OTG_STATUS_FRAME_COUNT_BASE_IDX 2
+#define mmOTG5_OTG_STATUS_VF_COUNT 0x1dcd
+#define mmOTG5_OTG_STATUS_VF_COUNT_BASE_IDX 2
+#define mmOTG5_OTG_STATUS_HV_COUNT 0x1dce
+#define mmOTG5_OTG_STATUS_HV_COUNT_BASE_IDX 2
+#define mmOTG5_OTG_COUNT_CONTROL 0x1dcf
+#define mmOTG5_OTG_COUNT_CONTROL_BASE_IDX 2
+#define mmOTG5_OTG_COUNT_RESET 0x1dd0
+#define mmOTG5_OTG_COUNT_RESET_BASE_IDX 2
+#define mmOTG5_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE 0x1dd1
+#define mmOTG5_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE_BASE_IDX 2
+#define mmOTG5_OTG_VERT_SYNC_CONTROL 0x1dd2
+#define mmOTG5_OTG_VERT_SYNC_CONTROL_BASE_IDX 2
+#define mmOTG5_OTG_STEREO_STATUS 0x1dd3
+#define mmOTG5_OTG_STEREO_STATUS_BASE_IDX 2
+#define mmOTG5_OTG_STEREO_CONTROL 0x1dd4
+#define mmOTG5_OTG_STEREO_CONTROL_BASE_IDX 2
+#define mmOTG5_OTG_SNAPSHOT_STATUS 0x1dd5
+#define mmOTG5_OTG_SNAPSHOT_STATUS_BASE_IDX 2
+#define mmOTG5_OTG_SNAPSHOT_CONTROL 0x1dd6
+#define mmOTG5_OTG_SNAPSHOT_CONTROL_BASE_IDX 2
+#define mmOTG5_OTG_SNAPSHOT_POSITION 0x1dd7
+#define mmOTG5_OTG_SNAPSHOT_POSITION_BASE_IDX 2
+#define mmOTG5_OTG_SNAPSHOT_FRAME 0x1dd8
+#define mmOTG5_OTG_SNAPSHOT_FRAME_BASE_IDX 2
+#define mmOTG5_OTG_INTERRUPT_CONTROL 0x1dd9
+#define mmOTG5_OTG_INTERRUPT_CONTROL_BASE_IDX 2
+#define mmOTG5_OTG_UPDATE_LOCK 0x1dda
+#define mmOTG5_OTG_UPDATE_LOCK_BASE_IDX 2
+#define mmOTG5_OTG_DOUBLE_BUFFER_CONTROL 0x1ddb
+#define mmOTG5_OTG_DOUBLE_BUFFER_CONTROL_BASE_IDX 2
+#define mmOTG5_OTG_MASTER_EN 0x1ddc
+#define mmOTG5_OTG_MASTER_EN_BASE_IDX 2
+#define mmOTG5_OTG_BLANK_DATA_COLOR 0x1dde
+#define mmOTG5_OTG_BLANK_DATA_COLOR_BASE_IDX 2
+#define mmOTG5_OTG_BLANK_DATA_COLOR_EXT 0x1ddf
+#define mmOTG5_OTG_BLANK_DATA_COLOR_EXT_BASE_IDX 2
+#define mmOTG5_OTG_BLACK_COLOR 0x1de0
+#define mmOTG5_OTG_BLACK_COLOR_BASE_IDX 2
+#define mmOTG5_OTG_BLACK_COLOR_EXT 0x1de1
+#define mmOTG5_OTG_BLACK_COLOR_EXT_BASE_IDX 2
+#define mmOTG5_OTG_VERTICAL_INTERRUPT0_POSITION 0x1de2
+#define mmOTG5_OTG_VERTICAL_INTERRUPT0_POSITION_BASE_IDX 2
+#define mmOTG5_OTG_VERTICAL_INTERRUPT0_CONTROL 0x1de3
+#define mmOTG5_OTG_VERTICAL_INTERRUPT0_CONTROL_BASE_IDX 2
+#define mmOTG5_OTG_VERTICAL_INTERRUPT1_POSITION 0x1de4
+#define mmOTG5_OTG_VERTICAL_INTERRUPT1_POSITION_BASE_IDX 2
+#define mmOTG5_OTG_VERTICAL_INTERRUPT1_CONTROL 0x1de5
+#define mmOTG5_OTG_VERTICAL_INTERRUPT1_CONTROL_BASE_IDX 2
+#define mmOTG5_OTG_VERTICAL_INTERRUPT2_POSITION 0x1de6
+#define mmOTG5_OTG_VERTICAL_INTERRUPT2_POSITION_BASE_IDX 2
+#define mmOTG5_OTG_VERTICAL_INTERRUPT2_CONTROL 0x1de7
+#define mmOTG5_OTG_VERTICAL_INTERRUPT2_CONTROL_BASE_IDX 2
+#define mmOTG5_OTG_CRC_CNTL 0x1de8
+#define mmOTG5_OTG_CRC_CNTL_BASE_IDX 2
+#define mmOTG5_OTG_CRC_CNTL2 0x1de9
+#define mmOTG5_OTG_CRC_CNTL2_BASE_IDX 2
+#define mmOTG5_OTG_CRC0_WINDOWA_X_CONTROL 0x1dea
+#define mmOTG5_OTG_CRC0_WINDOWA_X_CONTROL_BASE_IDX 2
+#define mmOTG5_OTG_CRC0_WINDOWA_Y_CONTROL 0x1deb
+#define mmOTG5_OTG_CRC0_WINDOWA_Y_CONTROL_BASE_IDX 2
+#define mmOTG5_OTG_CRC0_WINDOWB_X_CONTROL 0x1dec
+#define mmOTG5_OTG_CRC0_WINDOWB_X_CONTROL_BASE_IDX 2
+#define mmOTG5_OTG_CRC0_WINDOWB_Y_CONTROL 0x1ded
+#define mmOTG5_OTG_CRC0_WINDOWB_Y_CONTROL_BASE_IDX 2
+#define mmOTG5_OTG_CRC0_DATA_RG 0x1dee
+#define mmOTG5_OTG_CRC0_DATA_RG_BASE_IDX 2
+#define mmOTG5_OTG_CRC0_DATA_B 0x1def
+#define mmOTG5_OTG_CRC0_DATA_B_BASE_IDX 2
+#define mmOTG5_OTG_CRC1_WINDOWA_X_CONTROL 0x1df0
+#define mmOTG5_OTG_CRC1_WINDOWA_X_CONTROL_BASE_IDX 2
+#define mmOTG5_OTG_CRC1_WINDOWA_Y_CONTROL 0x1df1
+#define mmOTG5_OTG_CRC1_WINDOWA_Y_CONTROL_BASE_IDX 2
+#define mmOTG5_OTG_CRC1_WINDOWB_X_CONTROL 0x1df2
+#define mmOTG5_OTG_CRC1_WINDOWB_X_CONTROL_BASE_IDX 2
+#define mmOTG5_OTG_CRC1_WINDOWB_Y_CONTROL 0x1df3
+#define mmOTG5_OTG_CRC1_WINDOWB_Y_CONTROL_BASE_IDX 2
+#define mmOTG5_OTG_CRC1_DATA_RG 0x1df4
+#define mmOTG5_OTG_CRC1_DATA_RG_BASE_IDX 2
+#define mmOTG5_OTG_CRC1_DATA_B 0x1df5
+#define mmOTG5_OTG_CRC1_DATA_B_BASE_IDX 2
+#define mmOTG5_OTG_CRC2_DATA_RG 0x1df6
+#define mmOTG5_OTG_CRC2_DATA_RG_BASE_IDX 2
+#define mmOTG5_OTG_CRC2_DATA_B 0x1df7
+#define mmOTG5_OTG_CRC2_DATA_B_BASE_IDX 2
+#define mmOTG5_OTG_CRC3_DATA_RG 0x1df8
+#define mmOTG5_OTG_CRC3_DATA_RG_BASE_IDX 2
+#define mmOTG5_OTG_CRC3_DATA_B 0x1df9
+#define mmOTG5_OTG_CRC3_DATA_B_BASE_IDX 2
+#define mmOTG5_OTG_CRC_SIG_RED_GREEN_MASK 0x1dfa
+#define mmOTG5_OTG_CRC_SIG_RED_GREEN_MASK_BASE_IDX 2
+#define mmOTG5_OTG_CRC_SIG_BLUE_CONTROL_MASK 0x1dfb
+#define mmOTG5_OTG_CRC_SIG_BLUE_CONTROL_MASK_BASE_IDX 2
+#define mmOTG5_OTG_STATIC_SCREEN_CONTROL 0x1e02
+#define mmOTG5_OTG_STATIC_SCREEN_CONTROL_BASE_IDX 2
+#define mmOTG5_OTG_3D_STRUCTURE_CONTROL 0x1e03
+#define mmOTG5_OTG_3D_STRUCTURE_CONTROL_BASE_IDX 2
+#define mmOTG5_OTG_GSL_VSYNC_GAP 0x1e04
+#define mmOTG5_OTG_GSL_VSYNC_GAP_BASE_IDX 2
+#define mmOTG5_OTG_MASTER_UPDATE_MODE 0x1e05
+#define mmOTG5_OTG_MASTER_UPDATE_MODE_BASE_IDX 2
+#define mmOTG5_OTG_CLOCK_CONTROL 0x1e06
+#define mmOTG5_OTG_CLOCK_CONTROL_BASE_IDX 2
+#define mmOTG5_OTG_VSTARTUP_PARAM 0x1e07
+#define mmOTG5_OTG_VSTARTUP_PARAM_BASE_IDX 2
+#define mmOTG5_OTG_VUPDATE_PARAM 0x1e08
+#define mmOTG5_OTG_VUPDATE_PARAM_BASE_IDX 2
+#define mmOTG5_OTG_VREADY_PARAM 0x1e09
+#define mmOTG5_OTG_VREADY_PARAM_BASE_IDX 2
+#define mmOTG5_OTG_GLOBAL_SYNC_STATUS 0x1e0a
+#define mmOTG5_OTG_GLOBAL_SYNC_STATUS_BASE_IDX 2
+#define mmOTG5_OTG_MASTER_UPDATE_LOCK 0x1e0b
+#define mmOTG5_OTG_MASTER_UPDATE_LOCK_BASE_IDX 2
+#define mmOTG5_OTG_GSL_CONTROL 0x1e0c
+#define mmOTG5_OTG_GSL_CONTROL_BASE_IDX 2
+#define mmOTG5_OTG_GSL_WINDOW_X 0x1e0d
+#define mmOTG5_OTG_GSL_WINDOW_X_BASE_IDX 2
+#define mmOTG5_OTG_GSL_WINDOW_Y 0x1e0e
+#define mmOTG5_OTG_GSL_WINDOW_Y_BASE_IDX 2
+#define mmOTG5_OTG_VUPDATE_KEEPOUT 0x1e0f
+#define mmOTG5_OTG_VUPDATE_KEEPOUT_BASE_IDX 2
+#define mmOTG5_OTG_GLOBAL_CONTROL0 0x1e10
+#define mmOTG5_OTG_GLOBAL_CONTROL0_BASE_IDX 2
+#define mmOTG5_OTG_GLOBAL_CONTROL1 0x1e11
+#define mmOTG5_OTG_GLOBAL_CONTROL1_BASE_IDX 2
+#define mmOTG5_OTG_GLOBAL_CONTROL2 0x1e12
+#define mmOTG5_OTG_GLOBAL_CONTROL2_BASE_IDX 2
+#define mmOTG5_OTG_GLOBAL_CONTROL3 0x1e13
+#define mmOTG5_OTG_GLOBAL_CONTROL3_BASE_IDX 2
+#define mmOTG5_OTG_TRIG_MANUAL_CONTROL 0x1e14
+#define mmOTG5_OTG_TRIG_MANUAL_CONTROL_BASE_IDX 2
+#define mmOTG5_OTG_MANUAL_FLOW_CONTROL 0x1e15
+#define mmOTG5_OTG_MANUAL_FLOW_CONTROL_BASE_IDX 2
+#define mmOTG5_OTG_RANGE_TIMING_INT_STATUS 0x1e16
+#define mmOTG5_OTG_RANGE_TIMING_INT_STATUS_BASE_IDX 2
+#define mmOTG5_OTG_DRR_CONTROL 0x1e17
+#define mmOTG5_OTG_DRR_CONTROL_BASE_IDX 2
+#define mmOTG5_OTG_REQUEST_CONTROL 0x1e18
+#define mmOTG5_OTG_REQUEST_CONTROL_BASE_IDX 2
+#define mmOTG5_OTG_DSC_START_POSITION 0x1e19
+#define mmOTG5_OTG_DSC_START_POSITION_BASE_IDX 2
+#define mmOTG5_OTG_PIPE_UPDATE_STATUS 0x1e1a
+#define mmOTG5_OTG_PIPE_UPDATE_STATUS_BASE_IDX 2
+#define mmOTG5_OTG_SPARE_REGISTER 0x1e1c
+#define mmOTG5_OTG_SPARE_REGISTER_BASE_IDX 2
+
+
+// addressBlock: dce_dc_optc_optc_misc_dispdec
+// base address: 0x0
+#define mmDWB_SOURCE_SELECT 0x1e2a
+#define mmDWB_SOURCE_SELECT_BASE_IDX 2
+#define mmGSL_SOURCE_SELECT 0x1e2b
+#define mmGSL_SOURCE_SELECT_BASE_IDX 2
+#define mmOPTC_CLOCK_CONTROL 0x1e2c
+#define mmOPTC_CLOCK_CONTROL_BASE_IDX 2
+#define mmODM_MEM_PWR_CTRL 0x1e2d
+#define mmODM_MEM_PWR_CTRL_BASE_IDX 2
+#define mmODM_MEM_PWR_CTRL2 0x1e2e
+#define mmODM_MEM_PWR_CTRL2_BASE_IDX 2
+#define mmODM_MEM_PWR_CTRL3 0x1e2f
+#define mmODM_MEM_PWR_CTRL3_BASE_IDX 2
+#define mmODM_MEM_PWR_STATUS 0x1e30
+#define mmODM_MEM_PWR_STATUS_BASE_IDX 2
+#define mmOPTC_MISC_SPARE_REGISTER 0x1e31
+#define mmOPTC_MISC_SPARE_REGISTER_BASE_IDX 2
+
+
+// addressBlock: dce_dc_optc_optc_dcperfmon_dc_perfmon_dispdec
+// base address: 0x79a8
+#define mmDC_PERFMON17_PERFCOUNTER_CNTL 0x1e6a
+#define mmDC_PERFMON17_PERFCOUNTER_CNTL_BASE_IDX 2
+#define mmDC_PERFMON17_PERFCOUNTER_CNTL2 0x1e6b
+#define mmDC_PERFMON17_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define mmDC_PERFMON17_PERFCOUNTER_STATE 0x1e6c
+#define mmDC_PERFMON17_PERFCOUNTER_STATE_BASE_IDX 2
+#define mmDC_PERFMON17_PERFMON_CNTL 0x1e6d
+#define mmDC_PERFMON17_PERFMON_CNTL_BASE_IDX 2
+#define mmDC_PERFMON17_PERFMON_CNTL2 0x1e6e
+#define mmDC_PERFMON17_PERFMON_CNTL2_BASE_IDX 2
+#define mmDC_PERFMON17_PERFMON_CVALUE_INT_MISC 0x1e6f
+#define mmDC_PERFMON17_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define mmDC_PERFMON17_PERFMON_CVALUE_LOW 0x1e70
+#define mmDC_PERFMON17_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define mmDC_PERFMON17_PERFMON_HI 0x1e71
+#define mmDC_PERFMON17_PERFMON_HI_BASE_IDX 2
+#define mmDC_PERFMON17_PERFMON_LOW 0x1e72
+#define mmDC_PERFMON17_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dout_i2c_dispdec
+// base address: 0x0
+#define mmDC_I2C_CONTROL 0x1e98
+#define mmDC_I2C_CONTROL_BASE_IDX 2
+#define mmDC_I2C_ARBITRATION 0x1e99
+#define mmDC_I2C_ARBITRATION_BASE_IDX 2
+#define mmDC_I2C_INTERRUPT_CONTROL 0x1e9a
+#define mmDC_I2C_INTERRUPT_CONTROL_BASE_IDX 2
+#define mmDC_I2C_SW_STATUS 0x1e9b
+#define mmDC_I2C_SW_STATUS_BASE_IDX 2
+#define mmDC_I2C_DDC1_HW_STATUS 0x1e9c
+#define mmDC_I2C_DDC1_HW_STATUS_BASE_IDX 2
+#define mmDC_I2C_DDC2_HW_STATUS 0x1e9d
+#define mmDC_I2C_DDC2_HW_STATUS_BASE_IDX 2
+#define mmDC_I2C_DDC3_HW_STATUS 0x1e9e
+#define mmDC_I2C_DDC3_HW_STATUS_BASE_IDX 2
+#define mmDC_I2C_DDC4_HW_STATUS 0x1e9f
+#define mmDC_I2C_DDC4_HW_STATUS_BASE_IDX 2
+#define mmDC_I2C_DDC5_HW_STATUS 0x1ea0
+#define mmDC_I2C_DDC5_HW_STATUS_BASE_IDX 2
+#define mmDC_I2C_DDC1_SPEED 0x1ea2
+#define mmDC_I2C_DDC1_SPEED_BASE_IDX 2
+#define mmDC_I2C_DDC1_SETUP 0x1ea3
+#define mmDC_I2C_DDC1_SETUP_BASE_IDX 2
+#define mmDC_I2C_DDC2_SPEED 0x1ea4
+#define mmDC_I2C_DDC2_SPEED_BASE_IDX 2
+#define mmDC_I2C_DDC2_SETUP 0x1ea5
+#define mmDC_I2C_DDC2_SETUP_BASE_IDX 2
+#define mmDC_I2C_DDC3_SPEED 0x1ea6
+#define mmDC_I2C_DDC3_SPEED_BASE_IDX 2
+#define mmDC_I2C_DDC3_SETUP 0x1ea7
+#define mmDC_I2C_DDC3_SETUP_BASE_IDX 2
+#define mmDC_I2C_DDC4_SPEED 0x1ea8
+#define mmDC_I2C_DDC4_SPEED_BASE_IDX 2
+#define mmDC_I2C_DDC4_SETUP 0x1ea9
+#define mmDC_I2C_DDC4_SETUP_BASE_IDX 2
+#define mmDC_I2C_DDC5_SPEED 0x1eaa
+#define mmDC_I2C_DDC5_SPEED_BASE_IDX 2
+#define mmDC_I2C_DDC5_SETUP 0x1eab
+#define mmDC_I2C_DDC5_SETUP_BASE_IDX 2
+#define mmDC_I2C_TRANSACTION0 0x1eae
+#define mmDC_I2C_TRANSACTION0_BASE_IDX 2
+#define mmDC_I2C_TRANSACTION1 0x1eaf
+#define mmDC_I2C_TRANSACTION1_BASE_IDX 2
+#define mmDC_I2C_TRANSACTION2 0x1eb0
+#define mmDC_I2C_TRANSACTION2_BASE_IDX 2
+#define mmDC_I2C_TRANSACTION3 0x1eb1
+#define mmDC_I2C_TRANSACTION3_BASE_IDX 2
+#define mmDC_I2C_DATA 0x1eb2
+#define mmDC_I2C_DATA_BASE_IDX 2
+#define mmDC_I2C_EDID_DETECT_CTRL 0x1eb6
+#define mmDC_I2C_EDID_DETECT_CTRL_BASE_IDX 2
+#define mmDC_I2C_READ_REQUEST_INTERRUPT 0x1eb7
+#define mmDC_I2C_READ_REQUEST_INTERRUPT_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dio_misc_dispdec
+// base address: 0x0
+#define mmDIO_SCRATCH0 0x1eca
+#define mmDIO_SCRATCH0_BASE_IDX 2
+#define mmDIO_SCRATCH1 0x1ecb
+#define mmDIO_SCRATCH1_BASE_IDX 2
+#define mmDIO_SCRATCH2 0x1ecc
+#define mmDIO_SCRATCH2_BASE_IDX 2
+#define mmDIO_SCRATCH3 0x1ecd
+#define mmDIO_SCRATCH3_BASE_IDX 2
+#define mmDIO_SCRATCH4 0x1ece
+#define mmDIO_SCRATCH4_BASE_IDX 2
+#define mmDIO_SCRATCH5 0x1ecf
+#define mmDIO_SCRATCH5_BASE_IDX 2
+#define mmDIO_SCRATCH6 0x1ed0
+#define mmDIO_SCRATCH6_BASE_IDX 2
+#define mmDIO_SCRATCH7 0x1ed1
+#define mmDIO_SCRATCH7_BASE_IDX 2
+#define mmDCE_VCE_CONTROL 0x1ed2
+#define mmDCE_VCE_CONTROL_BASE_IDX 2
+#define mmDIO_MEM_PWR_STATUS 0x1edd
+#define mmDIO_MEM_PWR_STATUS_BASE_IDX 2
+#define mmDIO_MEM_PWR_CTRL 0x1ede
+#define mmDIO_MEM_PWR_CTRL_BASE_IDX 2
+#define mmDIO_MEM_PWR_CTRL2 0x1edf
+#define mmDIO_MEM_PWR_CTRL2_BASE_IDX 2
+#define mmDIO_CLK_CNTL 0x1ee0
+#define mmDIO_CLK_CNTL_BASE_IDX 2
+#define mmDIO_MEM_PWR_CTRL3 0x1ee1
+#define mmDIO_MEM_PWR_CTRL3_BASE_IDX 2
+#define mmDIO_POWER_MANAGEMENT_CNTL 0x1ee4
+#define mmDIO_POWER_MANAGEMENT_CNTL_BASE_IDX 2
+#define mmDIG_SOFT_RESET 0x1eee
+#define mmDIG_SOFT_RESET_BASE_IDX 2
+#define mmDIO_MEM_PWR_STATUS1 0x1ef0
+#define mmDIO_MEM_PWR_STATUS1_BASE_IDX 2
+#define mmDIO_CLK_CNTL2 0x1ef2
+#define mmDIO_CLK_CNTL2_BASE_IDX 2
+#define mmDIO_CLK_CNTL3 0x1ef3
+#define mmDIO_CLK_CNTL3_BASE_IDX 2
+#define mmDIO_HDMI_RXSTATUS_TIMER_CONTROL 0x1eff
+#define mmDIO_HDMI_RXSTATUS_TIMER_CONTROL_BASE_IDX 2
+#define mmDIO_PSP_INTERRUPT_STATUS 0x1f00
+#define mmDIO_PSP_INTERRUPT_STATUS_BASE_IDX 2
+#define mmDIO_PSP_INTERRUPT_CLEAR 0x1f01
+#define mmDIO_PSP_INTERRUPT_CLEAR_BASE_IDX 2
+#define mmDIO_GENERIC_INTERRUPT_MESSAGE 0x1f02
+#define mmDIO_GENERIC_INTERRUPT_MESSAGE_BASE_IDX 2
+#define mmDIO_GENERIC_INTERRUPT_CLEAR 0x1f03
+#define mmDIO_GENERIC_INTERRUPT_CLEAR_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_hpd0_dispdec
+// base address: 0x0
+#define mmHPD0_DC_HPD_INT_STATUS 0x1f14
+#define mmHPD0_DC_HPD_INT_STATUS_BASE_IDX 2
+#define mmHPD0_DC_HPD_INT_CONTROL 0x1f15
+#define mmHPD0_DC_HPD_INT_CONTROL_BASE_IDX 2
+#define mmHPD0_DC_HPD_CONTROL 0x1f16
+#define mmHPD0_DC_HPD_CONTROL_BASE_IDX 2
+#define mmHPD0_DC_HPD_FAST_TRAIN_CNTL 0x1f17
+#define mmHPD0_DC_HPD_FAST_TRAIN_CNTL_BASE_IDX 2
+#define mmHPD0_DC_HPD_TOGGLE_FILT_CNTL 0x1f18
+#define mmHPD0_DC_HPD_TOGGLE_FILT_CNTL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_hpd1_dispdec
+// base address: 0x20
+#define mmHPD1_DC_HPD_INT_STATUS 0x1f1c
+#define mmHPD1_DC_HPD_INT_STATUS_BASE_IDX 2
+#define mmHPD1_DC_HPD_INT_CONTROL 0x1f1d
+#define mmHPD1_DC_HPD_INT_CONTROL_BASE_IDX 2
+#define mmHPD1_DC_HPD_CONTROL 0x1f1e
+#define mmHPD1_DC_HPD_CONTROL_BASE_IDX 2
+#define mmHPD1_DC_HPD_FAST_TRAIN_CNTL 0x1f1f
+#define mmHPD1_DC_HPD_FAST_TRAIN_CNTL_BASE_IDX 2
+#define mmHPD1_DC_HPD_TOGGLE_FILT_CNTL 0x1f20
+#define mmHPD1_DC_HPD_TOGGLE_FILT_CNTL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_hpd2_dispdec
+// base address: 0x40
+#define mmHPD2_DC_HPD_INT_STATUS 0x1f24
+#define mmHPD2_DC_HPD_INT_STATUS_BASE_IDX 2
+#define mmHPD2_DC_HPD_INT_CONTROL 0x1f25
+#define mmHPD2_DC_HPD_INT_CONTROL_BASE_IDX 2
+#define mmHPD2_DC_HPD_CONTROL 0x1f26
+#define mmHPD2_DC_HPD_CONTROL_BASE_IDX 2
+#define mmHPD2_DC_HPD_FAST_TRAIN_CNTL 0x1f27
+#define mmHPD2_DC_HPD_FAST_TRAIN_CNTL_BASE_IDX 2
+#define mmHPD2_DC_HPD_TOGGLE_FILT_CNTL 0x1f28
+#define mmHPD2_DC_HPD_TOGGLE_FILT_CNTL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_hpd3_dispdec
+// base address: 0x60
+#define mmHPD3_DC_HPD_INT_STATUS 0x1f2c
+#define mmHPD3_DC_HPD_INT_STATUS_BASE_IDX 2
+#define mmHPD3_DC_HPD_INT_CONTROL 0x1f2d
+#define mmHPD3_DC_HPD_INT_CONTROL_BASE_IDX 2
+#define mmHPD3_DC_HPD_CONTROL 0x1f2e
+#define mmHPD3_DC_HPD_CONTROL_BASE_IDX 2
+#define mmHPD3_DC_HPD_FAST_TRAIN_CNTL 0x1f2f
+#define mmHPD3_DC_HPD_FAST_TRAIN_CNTL_BASE_IDX 2
+#define mmHPD3_DC_HPD_TOGGLE_FILT_CNTL 0x1f30
+#define mmHPD3_DC_HPD_TOGGLE_FILT_CNTL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_hpd4_dispdec
+// base address: 0x80
+#define mmHPD4_DC_HPD_INT_STATUS 0x1f34
+#define mmHPD4_DC_HPD_INT_STATUS_BASE_IDX 2
+#define mmHPD4_DC_HPD_INT_CONTROL 0x1f35
+#define mmHPD4_DC_HPD_INT_CONTROL_BASE_IDX 2
+#define mmHPD4_DC_HPD_CONTROL 0x1f36
+#define mmHPD4_DC_HPD_CONTROL_BASE_IDX 2
+#define mmHPD4_DC_HPD_FAST_TRAIN_CNTL 0x1f37
+#define mmHPD4_DC_HPD_FAST_TRAIN_CNTL_BASE_IDX 2
+#define mmHPD4_DC_HPD_TOGGLE_FILT_CNTL 0x1f38
+#define mmHPD4_DC_HPD_TOGGLE_FILT_CNTL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dio_dcperfmon_dc_perfmon_dispdec
+// base address: 0x7d10
+#define mmDC_PERFMON18_PERFCOUNTER_CNTL 0x1f44
+#define mmDC_PERFMON18_PERFCOUNTER_CNTL_BASE_IDX 2
+#define mmDC_PERFMON18_PERFCOUNTER_CNTL2 0x1f45
+#define mmDC_PERFMON18_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define mmDC_PERFMON18_PERFCOUNTER_STATE 0x1f46
+#define mmDC_PERFMON18_PERFCOUNTER_STATE_BASE_IDX 2
+#define mmDC_PERFMON18_PERFMON_CNTL 0x1f47
+#define mmDC_PERFMON18_PERFMON_CNTL_BASE_IDX 2
+#define mmDC_PERFMON18_PERFMON_CNTL2 0x1f48
+#define mmDC_PERFMON18_PERFMON_CNTL2_BASE_IDX 2
+#define mmDC_PERFMON18_PERFMON_CVALUE_INT_MISC 0x1f49
+#define mmDC_PERFMON18_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define mmDC_PERFMON18_PERFMON_CVALUE_LOW 0x1f4a
+#define mmDC_PERFMON18_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define mmDC_PERFMON18_PERFMON_HI 0x1f4b
+#define mmDC_PERFMON18_PERFMON_HI_BASE_IDX 2
+#define mmDC_PERFMON18_PERFMON_LOW 0x1f4c
+#define mmDC_PERFMON18_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dp_aux0_dispdec
+// base address: 0x0
+#define mmDP_AUX0_AUX_CONTROL 0x1f50
+#define mmDP_AUX0_AUX_CONTROL_BASE_IDX 2
+#define mmDP_AUX0_AUX_SW_CONTROL 0x1f51
+#define mmDP_AUX0_AUX_SW_CONTROL_BASE_IDX 2
+#define mmDP_AUX0_AUX_ARB_CONTROL 0x1f52
+#define mmDP_AUX0_AUX_ARB_CONTROL_BASE_IDX 2
+#define mmDP_AUX0_AUX_INTERRUPT_CONTROL 0x1f53
+#define mmDP_AUX0_AUX_INTERRUPT_CONTROL_BASE_IDX 2
+#define mmDP_AUX0_AUX_SW_STATUS 0x1f54
+#define mmDP_AUX0_AUX_SW_STATUS_BASE_IDX 2
+#define mmDP_AUX0_AUX_LS_STATUS 0x1f55
+#define mmDP_AUX0_AUX_LS_STATUS_BASE_IDX 2
+#define mmDP_AUX0_AUX_SW_DATA 0x1f56
+#define mmDP_AUX0_AUX_SW_DATA_BASE_IDX 2
+#define mmDP_AUX0_AUX_LS_DATA 0x1f57
+#define mmDP_AUX0_AUX_LS_DATA_BASE_IDX 2
+#define mmDP_AUX0_AUX_DPHY_TX_REF_CONTROL 0x1f58
+#define mmDP_AUX0_AUX_DPHY_TX_REF_CONTROL_BASE_IDX 2
+#define mmDP_AUX0_AUX_DPHY_TX_CONTROL 0x1f59
+#define mmDP_AUX0_AUX_DPHY_TX_CONTROL_BASE_IDX 2
+#define mmDP_AUX0_AUX_DPHY_RX_CONTROL0 0x1f5a
+#define mmDP_AUX0_AUX_DPHY_RX_CONTROL0_BASE_IDX 2
+#define mmDP_AUX0_AUX_DPHY_RX_CONTROL1 0x1f5b
+#define mmDP_AUX0_AUX_DPHY_RX_CONTROL1_BASE_IDX 2
+#define mmDP_AUX0_AUX_DPHY_TX_STATUS 0x1f5c
+#define mmDP_AUX0_AUX_DPHY_TX_STATUS_BASE_IDX 2
+#define mmDP_AUX0_AUX_DPHY_RX_STATUS 0x1f5d
+#define mmDP_AUX0_AUX_DPHY_RX_STATUS_BASE_IDX 2
+#define mmDP_AUX0_AUX_GTC_SYNC_CONTROL 0x1f5e
+#define mmDP_AUX0_AUX_GTC_SYNC_CONTROL_BASE_IDX 2
+#define mmDP_AUX0_AUX_GTC_SYNC_ERROR_CONTROL 0x1f5f
+#define mmDP_AUX0_AUX_GTC_SYNC_ERROR_CONTROL_BASE_IDX 2
+#define mmDP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS 0x1f60
+#define mmDP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS_BASE_IDX 2
+#define mmDP_AUX0_AUX_GTC_SYNC_STATUS 0x1f61
+#define mmDP_AUX0_AUX_GTC_SYNC_STATUS_BASE_IDX 2
+#define mmDP_AUX0_AUX_PHY_WAKE_CNTL 0x1f66
+#define mmDP_AUX0_AUX_PHY_WAKE_CNTL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dp_aux1_dispdec
+// base address: 0x70
+#define mmDP_AUX1_AUX_CONTROL 0x1f6c
+#define mmDP_AUX1_AUX_CONTROL_BASE_IDX 2
+#define mmDP_AUX1_AUX_SW_CONTROL 0x1f6d
+#define mmDP_AUX1_AUX_SW_CONTROL_BASE_IDX 2
+#define mmDP_AUX1_AUX_ARB_CONTROL 0x1f6e
+#define mmDP_AUX1_AUX_ARB_CONTROL_BASE_IDX 2
+#define mmDP_AUX1_AUX_INTERRUPT_CONTROL 0x1f6f
+#define mmDP_AUX1_AUX_INTERRUPT_CONTROL_BASE_IDX 2
+#define mmDP_AUX1_AUX_SW_STATUS 0x1f70
+#define mmDP_AUX1_AUX_SW_STATUS_BASE_IDX 2
+#define mmDP_AUX1_AUX_LS_STATUS 0x1f71
+#define mmDP_AUX1_AUX_LS_STATUS_BASE_IDX 2
+#define mmDP_AUX1_AUX_SW_DATA 0x1f72
+#define mmDP_AUX1_AUX_SW_DATA_BASE_IDX 2
+#define mmDP_AUX1_AUX_LS_DATA 0x1f73
+#define mmDP_AUX1_AUX_LS_DATA_BASE_IDX 2
+#define mmDP_AUX1_AUX_DPHY_TX_REF_CONTROL 0x1f74
+#define mmDP_AUX1_AUX_DPHY_TX_REF_CONTROL_BASE_IDX 2
+#define mmDP_AUX1_AUX_DPHY_TX_CONTROL 0x1f75
+#define mmDP_AUX1_AUX_DPHY_TX_CONTROL_BASE_IDX 2
+#define mmDP_AUX1_AUX_DPHY_RX_CONTROL0 0x1f76
+#define mmDP_AUX1_AUX_DPHY_RX_CONTROL0_BASE_IDX 2
+#define mmDP_AUX1_AUX_DPHY_RX_CONTROL1 0x1f77
+#define mmDP_AUX1_AUX_DPHY_RX_CONTROL1_BASE_IDX 2
+#define mmDP_AUX1_AUX_DPHY_TX_STATUS 0x1f78
+#define mmDP_AUX1_AUX_DPHY_TX_STATUS_BASE_IDX 2
+#define mmDP_AUX1_AUX_DPHY_RX_STATUS 0x1f79
+#define mmDP_AUX1_AUX_DPHY_RX_STATUS_BASE_IDX 2
+#define mmDP_AUX1_AUX_GTC_SYNC_CONTROL 0x1f7a
+#define mmDP_AUX1_AUX_GTC_SYNC_CONTROL_BASE_IDX 2
+#define mmDP_AUX1_AUX_GTC_SYNC_ERROR_CONTROL 0x1f7b
+#define mmDP_AUX1_AUX_GTC_SYNC_ERROR_CONTROL_BASE_IDX 2
+#define mmDP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS 0x1f7c
+#define mmDP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS_BASE_IDX 2
+#define mmDP_AUX1_AUX_GTC_SYNC_STATUS 0x1f7d
+#define mmDP_AUX1_AUX_GTC_SYNC_STATUS_BASE_IDX 2
+#define mmDP_AUX1_AUX_PHY_WAKE_CNTL 0x1f82
+#define mmDP_AUX1_AUX_PHY_WAKE_CNTL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dp_aux2_dispdec
+// base address: 0xe0
+#define mmDP_AUX2_AUX_CONTROL 0x1f88
+#define mmDP_AUX2_AUX_CONTROL_BASE_IDX 2
+#define mmDP_AUX2_AUX_SW_CONTROL 0x1f89
+#define mmDP_AUX2_AUX_SW_CONTROL_BASE_IDX 2
+#define mmDP_AUX2_AUX_ARB_CONTROL 0x1f8a
+#define mmDP_AUX2_AUX_ARB_CONTROL_BASE_IDX 2
+#define mmDP_AUX2_AUX_INTERRUPT_CONTROL 0x1f8b
+#define mmDP_AUX2_AUX_INTERRUPT_CONTROL_BASE_IDX 2
+#define mmDP_AUX2_AUX_SW_STATUS 0x1f8c
+#define mmDP_AUX2_AUX_SW_STATUS_BASE_IDX 2
+#define mmDP_AUX2_AUX_LS_STATUS 0x1f8d
+#define mmDP_AUX2_AUX_LS_STATUS_BASE_IDX 2
+#define mmDP_AUX2_AUX_SW_DATA 0x1f8e
+#define mmDP_AUX2_AUX_SW_DATA_BASE_IDX 2
+#define mmDP_AUX2_AUX_LS_DATA 0x1f8f
+#define mmDP_AUX2_AUX_LS_DATA_BASE_IDX 2
+#define mmDP_AUX2_AUX_DPHY_TX_REF_CONTROL 0x1f90
+#define mmDP_AUX2_AUX_DPHY_TX_REF_CONTROL_BASE_IDX 2
+#define mmDP_AUX2_AUX_DPHY_TX_CONTROL 0x1f91
+#define mmDP_AUX2_AUX_DPHY_TX_CONTROL_BASE_IDX 2
+#define mmDP_AUX2_AUX_DPHY_RX_CONTROL0 0x1f92
+#define mmDP_AUX2_AUX_DPHY_RX_CONTROL0_BASE_IDX 2
+#define mmDP_AUX2_AUX_DPHY_RX_CONTROL1 0x1f93
+#define mmDP_AUX2_AUX_DPHY_RX_CONTROL1_BASE_IDX 2
+#define mmDP_AUX2_AUX_DPHY_TX_STATUS 0x1f94
+#define mmDP_AUX2_AUX_DPHY_TX_STATUS_BASE_IDX 2
+#define mmDP_AUX2_AUX_DPHY_RX_STATUS 0x1f95
+#define mmDP_AUX2_AUX_DPHY_RX_STATUS_BASE_IDX 2
+#define mmDP_AUX2_AUX_GTC_SYNC_CONTROL 0x1f96
+#define mmDP_AUX2_AUX_GTC_SYNC_CONTROL_BASE_IDX 2
+#define mmDP_AUX2_AUX_GTC_SYNC_ERROR_CONTROL 0x1f97
+#define mmDP_AUX2_AUX_GTC_SYNC_ERROR_CONTROL_BASE_IDX 2
+#define mmDP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS 0x1f98
+#define mmDP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS_BASE_IDX 2
+#define mmDP_AUX2_AUX_GTC_SYNC_STATUS 0x1f99
+#define mmDP_AUX2_AUX_GTC_SYNC_STATUS_BASE_IDX 2
+#define mmDP_AUX2_AUX_PHY_WAKE_CNTL 0x1f9e
+#define mmDP_AUX2_AUX_PHY_WAKE_CNTL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dp_aux3_dispdec
+// base address: 0x150
+#define mmDP_AUX3_AUX_CONTROL 0x1fa4
+#define mmDP_AUX3_AUX_CONTROL_BASE_IDX 2
+#define mmDP_AUX3_AUX_SW_CONTROL 0x1fa5
+#define mmDP_AUX3_AUX_SW_CONTROL_BASE_IDX 2
+#define mmDP_AUX3_AUX_ARB_CONTROL 0x1fa6
+#define mmDP_AUX3_AUX_ARB_CONTROL_BASE_IDX 2
+#define mmDP_AUX3_AUX_INTERRUPT_CONTROL 0x1fa7
+#define mmDP_AUX3_AUX_INTERRUPT_CONTROL_BASE_IDX 2
+#define mmDP_AUX3_AUX_SW_STATUS 0x1fa8
+#define mmDP_AUX3_AUX_SW_STATUS_BASE_IDX 2
+#define mmDP_AUX3_AUX_LS_STATUS 0x1fa9
+#define mmDP_AUX3_AUX_LS_STATUS_BASE_IDX 2
+#define mmDP_AUX3_AUX_SW_DATA 0x1faa
+#define mmDP_AUX3_AUX_SW_DATA_BASE_IDX 2
+#define mmDP_AUX3_AUX_LS_DATA 0x1fab
+#define mmDP_AUX3_AUX_LS_DATA_BASE_IDX 2
+#define mmDP_AUX3_AUX_DPHY_TX_REF_CONTROL 0x1fac
+#define mmDP_AUX3_AUX_DPHY_TX_REF_CONTROL_BASE_IDX 2
+#define mmDP_AUX3_AUX_DPHY_TX_CONTROL 0x1fad
+#define mmDP_AUX3_AUX_DPHY_TX_CONTROL_BASE_IDX 2
+#define mmDP_AUX3_AUX_DPHY_RX_CONTROL0 0x1fae
+#define mmDP_AUX3_AUX_DPHY_RX_CONTROL0_BASE_IDX 2
+#define mmDP_AUX3_AUX_DPHY_RX_CONTROL1 0x1faf
+#define mmDP_AUX3_AUX_DPHY_RX_CONTROL1_BASE_IDX 2
+#define mmDP_AUX3_AUX_DPHY_TX_STATUS 0x1fb0
+#define mmDP_AUX3_AUX_DPHY_TX_STATUS_BASE_IDX 2
+#define mmDP_AUX3_AUX_DPHY_RX_STATUS 0x1fb1
+#define mmDP_AUX3_AUX_DPHY_RX_STATUS_BASE_IDX 2
+#define mmDP_AUX3_AUX_GTC_SYNC_CONTROL 0x1fb2
+#define mmDP_AUX3_AUX_GTC_SYNC_CONTROL_BASE_IDX 2
+#define mmDP_AUX3_AUX_GTC_SYNC_ERROR_CONTROL 0x1fb3
+#define mmDP_AUX3_AUX_GTC_SYNC_ERROR_CONTROL_BASE_IDX 2
+#define mmDP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS 0x1fb4
+#define mmDP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS_BASE_IDX 2
+#define mmDP_AUX3_AUX_GTC_SYNC_STATUS 0x1fb5
+#define mmDP_AUX3_AUX_GTC_SYNC_STATUS_BASE_IDX 2
+#define mmDP_AUX3_AUX_PHY_WAKE_CNTL 0x1fba
+#define mmDP_AUX3_AUX_PHY_WAKE_CNTL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dp_aux4_dispdec
+// base address: 0x1c0
+#define mmDP_AUX4_AUX_CONTROL 0x1fc0
+#define mmDP_AUX4_AUX_CONTROL_BASE_IDX 2
+#define mmDP_AUX4_AUX_SW_CONTROL 0x1fc1
+#define mmDP_AUX4_AUX_SW_CONTROL_BASE_IDX 2
+#define mmDP_AUX4_AUX_ARB_CONTROL 0x1fc2
+#define mmDP_AUX4_AUX_ARB_CONTROL_BASE_IDX 2
+#define mmDP_AUX4_AUX_INTERRUPT_CONTROL 0x1fc3
+#define mmDP_AUX4_AUX_INTERRUPT_CONTROL_BASE_IDX 2
+#define mmDP_AUX4_AUX_SW_STATUS 0x1fc4
+#define mmDP_AUX4_AUX_SW_STATUS_BASE_IDX 2
+#define mmDP_AUX4_AUX_LS_STATUS 0x1fc5
+#define mmDP_AUX4_AUX_LS_STATUS_BASE_IDX 2
+#define mmDP_AUX4_AUX_SW_DATA 0x1fc6
+#define mmDP_AUX4_AUX_SW_DATA_BASE_IDX 2
+#define mmDP_AUX4_AUX_LS_DATA 0x1fc7
+#define mmDP_AUX4_AUX_LS_DATA_BASE_IDX 2
+#define mmDP_AUX4_AUX_DPHY_TX_REF_CONTROL 0x1fc8
+#define mmDP_AUX4_AUX_DPHY_TX_REF_CONTROL_BASE_IDX 2
+#define mmDP_AUX4_AUX_DPHY_TX_CONTROL 0x1fc9
+#define mmDP_AUX4_AUX_DPHY_TX_CONTROL_BASE_IDX 2
+#define mmDP_AUX4_AUX_DPHY_RX_CONTROL0 0x1fca
+#define mmDP_AUX4_AUX_DPHY_RX_CONTROL0_BASE_IDX 2
+#define mmDP_AUX4_AUX_DPHY_RX_CONTROL1 0x1fcb
+#define mmDP_AUX4_AUX_DPHY_RX_CONTROL1_BASE_IDX 2
+#define mmDP_AUX4_AUX_DPHY_TX_STATUS 0x1fcc
+#define mmDP_AUX4_AUX_DPHY_TX_STATUS_BASE_IDX 2
+#define mmDP_AUX4_AUX_DPHY_RX_STATUS 0x1fcd
+#define mmDP_AUX4_AUX_DPHY_RX_STATUS_BASE_IDX 2
+#define mmDP_AUX4_AUX_GTC_SYNC_CONTROL 0x1fce
+#define mmDP_AUX4_AUX_GTC_SYNC_CONTROL_BASE_IDX 2
+#define mmDP_AUX4_AUX_GTC_SYNC_ERROR_CONTROL 0x1fcf
+#define mmDP_AUX4_AUX_GTC_SYNC_ERROR_CONTROL_BASE_IDX 2
+#define mmDP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS 0x1fd0
+#define mmDP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS_BASE_IDX 2
+#define mmDP_AUX4_AUX_GTC_SYNC_STATUS 0x1fd1
+#define mmDP_AUX4_AUX_GTC_SYNC_STATUS_BASE_IDX 2
+#define mmDP_AUX4_AUX_PHY_WAKE_CNTL 0x1fd6
+#define mmDP_AUX4_AUX_PHY_WAKE_CNTL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig0_dispdec
+// base address: 0x0
+#define mmDIG0_DIG_FE_CNTL 0x2068
+#define mmDIG0_DIG_FE_CNTL_BASE_IDX 2
+#define mmDIG0_DIG_OUTPUT_CRC_CNTL 0x2069
+#define mmDIG0_DIG_OUTPUT_CRC_CNTL_BASE_IDX 2
+#define mmDIG0_DIG_OUTPUT_CRC_RESULT 0x206a
+#define mmDIG0_DIG_OUTPUT_CRC_RESULT_BASE_IDX 2
+#define mmDIG0_DIG_CLOCK_PATTERN 0x206b
+#define mmDIG0_DIG_CLOCK_PATTERN_BASE_IDX 2
+#define mmDIG0_DIG_TEST_PATTERN 0x206c
+#define mmDIG0_DIG_TEST_PATTERN_BASE_IDX 2
+#define mmDIG0_DIG_RANDOM_PATTERN_SEED 0x206d
+#define mmDIG0_DIG_RANDOM_PATTERN_SEED_BASE_IDX 2
+#define mmDIG0_DIG_FIFO_STATUS 0x206e
+#define mmDIG0_DIG_FIFO_STATUS_BASE_IDX 2
+#define mmDIG0_HDMI_METADATA_PACKET_CONTROL 0x206f
+#define mmDIG0_HDMI_METADATA_PACKET_CONTROL_BASE_IDX 2
+#define mmDIG0_HDMI_GENERIC_PACKET_CONTROL4 0x2070
+#define mmDIG0_HDMI_GENERIC_PACKET_CONTROL4_BASE_IDX 2
+#define mmDIG0_HDMI_CONTROL 0x2071
+#define mmDIG0_HDMI_CONTROL_BASE_IDX 2
+#define mmDIG0_HDMI_STATUS 0x2072
+#define mmDIG0_HDMI_STATUS_BASE_IDX 2
+#define mmDIG0_HDMI_AUDIO_PACKET_CONTROL 0x2073
+#define mmDIG0_HDMI_AUDIO_PACKET_CONTROL_BASE_IDX 2
+#define mmDIG0_HDMI_ACR_PACKET_CONTROL 0x2074
+#define mmDIG0_HDMI_ACR_PACKET_CONTROL_BASE_IDX 2
+#define mmDIG0_HDMI_VBI_PACKET_CONTROL 0x2075
+#define mmDIG0_HDMI_VBI_PACKET_CONTROL_BASE_IDX 2
+#define mmDIG0_HDMI_INFOFRAME_CONTROL0 0x2076
+#define mmDIG0_HDMI_INFOFRAME_CONTROL0_BASE_IDX 2
+#define mmDIG0_HDMI_INFOFRAME_CONTROL1 0x2077
+#define mmDIG0_HDMI_INFOFRAME_CONTROL1_BASE_IDX 2
+#define mmDIG0_HDMI_GENERIC_PACKET_CONTROL0 0x2078
+#define mmDIG0_HDMI_GENERIC_PACKET_CONTROL0_BASE_IDX 2
+#define mmDIG0_AFMT_INTERRUPT_STATUS 0x2079
+#define mmDIG0_AFMT_INTERRUPT_STATUS_BASE_IDX 2
+#define mmDIG0_HDMI_GC 0x207b
+#define mmDIG0_HDMI_GC_BASE_IDX 2
+#define mmDIG0_AFMT_AUDIO_PACKET_CONTROL2 0x207c
+#define mmDIG0_AFMT_AUDIO_PACKET_CONTROL2_BASE_IDX 2
+#define mmDIG0_AFMT_ISRC1_0 0x207d
+#define mmDIG0_AFMT_ISRC1_0_BASE_IDX 2
+#define mmDIG0_AFMT_ISRC1_1 0x207e
+#define mmDIG0_AFMT_ISRC1_1_BASE_IDX 2
+#define mmDIG0_AFMT_ISRC1_2 0x207f
+#define mmDIG0_AFMT_ISRC1_2_BASE_IDX 2
+#define mmDIG0_AFMT_ISRC1_3 0x2080
+#define mmDIG0_AFMT_ISRC1_3_BASE_IDX 2
+#define mmDIG0_AFMT_ISRC1_4 0x2081
+#define mmDIG0_AFMT_ISRC1_4_BASE_IDX 2
+#define mmDIG0_AFMT_ISRC2_0 0x2082
+#define mmDIG0_AFMT_ISRC2_0_BASE_IDX 2
+#define mmDIG0_AFMT_ISRC2_1 0x2083
+#define mmDIG0_AFMT_ISRC2_1_BASE_IDX 2
+#define mmDIG0_AFMT_ISRC2_2 0x2084
+#define mmDIG0_AFMT_ISRC2_2_BASE_IDX 2
+#define mmDIG0_AFMT_ISRC2_3 0x2085
+#define mmDIG0_AFMT_ISRC2_3_BASE_IDX 2
+#define mmDIG0_HDMI_GENERIC_PACKET_CONTROL2 0x2086
+#define mmDIG0_HDMI_GENERIC_PACKET_CONTROL2_BASE_IDX 2
+#define mmDIG0_HDMI_GENERIC_PACKET_CONTROL3 0x2087
+#define mmDIG0_HDMI_GENERIC_PACKET_CONTROL3_BASE_IDX 2
+#define mmDIG0_HDMI_DB_CONTROL 0x2088
+#define mmDIG0_HDMI_DB_CONTROL_BASE_IDX 2
+#define mmDIG0_DME_CONTROL 0x2089
+#define mmDIG0_DME_CONTROL_BASE_IDX 2
+#define mmDIG0_AFMT_MPEG_INFO0 0x208a
+#define mmDIG0_AFMT_MPEG_INFO0_BASE_IDX 2
+#define mmDIG0_AFMT_MPEG_INFO1 0x208b
+#define mmDIG0_AFMT_MPEG_INFO1_BASE_IDX 2
+#define mmDIG0_AFMT_GENERIC_HDR 0x208c
+#define mmDIG0_AFMT_GENERIC_HDR_BASE_IDX 2
+#define mmDIG0_AFMT_GENERIC_0 0x208d
+#define mmDIG0_AFMT_GENERIC_0_BASE_IDX 2
+#define mmDIG0_AFMT_GENERIC_1 0x208e
+#define mmDIG0_AFMT_GENERIC_1_BASE_IDX 2
+#define mmDIG0_AFMT_GENERIC_2 0x208f
+#define mmDIG0_AFMT_GENERIC_2_BASE_IDX 2
+#define mmDIG0_AFMT_GENERIC_3 0x2090
+#define mmDIG0_AFMT_GENERIC_3_BASE_IDX 2
+#define mmDIG0_AFMT_GENERIC_4 0x2091
+#define mmDIG0_AFMT_GENERIC_4_BASE_IDX 2
+#define mmDIG0_AFMT_GENERIC_5 0x2092
+#define mmDIG0_AFMT_GENERIC_5_BASE_IDX 2
+#define mmDIG0_AFMT_GENERIC_6 0x2093
+#define mmDIG0_AFMT_GENERIC_6_BASE_IDX 2
+#define mmDIG0_AFMT_GENERIC_7 0x2094
+#define mmDIG0_AFMT_GENERIC_7_BASE_IDX 2
+#define mmDIG0_HDMI_GENERIC_PACKET_CONTROL1 0x2095
+#define mmDIG0_HDMI_GENERIC_PACKET_CONTROL1_BASE_IDX 2
+#define mmDIG0_HDMI_ACR_32_0 0x2096
+#define mmDIG0_HDMI_ACR_32_0_BASE_IDX 2
+#define mmDIG0_HDMI_ACR_32_1 0x2097
+#define mmDIG0_HDMI_ACR_32_1_BASE_IDX 2
+#define mmDIG0_HDMI_ACR_44_0 0x2098
+#define mmDIG0_HDMI_ACR_44_0_BASE_IDX 2
+#define mmDIG0_HDMI_ACR_44_1 0x2099
+#define mmDIG0_HDMI_ACR_44_1_BASE_IDX 2
+#define mmDIG0_HDMI_ACR_48_0 0x209a
+#define mmDIG0_HDMI_ACR_48_0_BASE_IDX 2
+#define mmDIG0_HDMI_ACR_48_1 0x209b
+#define mmDIG0_HDMI_ACR_48_1_BASE_IDX 2
+#define mmDIG0_HDMI_ACR_STATUS_0 0x209c
+#define mmDIG0_HDMI_ACR_STATUS_0_BASE_IDX 2
+#define mmDIG0_HDMI_ACR_STATUS_1 0x209d
+#define mmDIG0_HDMI_ACR_STATUS_1_BASE_IDX 2
+#define mmDIG0_AFMT_AUDIO_INFO0 0x209e
+#define mmDIG0_AFMT_AUDIO_INFO0_BASE_IDX 2
+#define mmDIG0_AFMT_AUDIO_INFO1 0x209f
+#define mmDIG0_AFMT_AUDIO_INFO1_BASE_IDX 2
+#define mmDIG0_AFMT_60958_0 0x20a0
+#define mmDIG0_AFMT_60958_0_BASE_IDX 2
+#define mmDIG0_AFMT_60958_1 0x20a1
+#define mmDIG0_AFMT_60958_1_BASE_IDX 2
+#define mmDIG0_AFMT_AUDIO_CRC_CONTROL 0x20a2
+#define mmDIG0_AFMT_AUDIO_CRC_CONTROL_BASE_IDX 2
+#define mmDIG0_AFMT_RAMP_CONTROL0 0x20a3
+#define mmDIG0_AFMT_RAMP_CONTROL0_BASE_IDX 2
+#define mmDIG0_AFMT_RAMP_CONTROL1 0x20a4
+#define mmDIG0_AFMT_RAMP_CONTROL1_BASE_IDX 2
+#define mmDIG0_AFMT_RAMP_CONTROL2 0x20a5
+#define mmDIG0_AFMT_RAMP_CONTROL2_BASE_IDX 2
+#define mmDIG0_AFMT_RAMP_CONTROL3 0x20a6
+#define mmDIG0_AFMT_RAMP_CONTROL3_BASE_IDX 2
+#define mmDIG0_AFMT_60958_2 0x20a7
+#define mmDIG0_AFMT_60958_2_BASE_IDX 2
+#define mmDIG0_AFMT_AUDIO_CRC_RESULT 0x20a8
+#define mmDIG0_AFMT_AUDIO_CRC_RESULT_BASE_IDX 2
+#define mmDIG0_AFMT_STATUS 0x20a9
+#define mmDIG0_AFMT_STATUS_BASE_IDX 2
+#define mmDIG0_AFMT_AUDIO_PACKET_CONTROL 0x20aa
+#define mmDIG0_AFMT_AUDIO_PACKET_CONTROL_BASE_IDX 2
+#define mmDIG0_AFMT_VBI_PACKET_CONTROL 0x20ab
+#define mmDIG0_AFMT_VBI_PACKET_CONTROL_BASE_IDX 2
+#define mmDIG0_AFMT_INFOFRAME_CONTROL0 0x20ac
+#define mmDIG0_AFMT_INFOFRAME_CONTROL0_BASE_IDX 2
+#define mmDIG0_AFMT_AUDIO_SRC_CONTROL 0x20ad
+#define mmDIG0_AFMT_AUDIO_SRC_CONTROL_BASE_IDX 2
+#define mmDIG0_DIG_BE_CNTL 0x20af
+#define mmDIG0_DIG_BE_CNTL_BASE_IDX 2
+#define mmDIG0_DIG_BE_EN_CNTL 0x20b0
+#define mmDIG0_DIG_BE_EN_CNTL_BASE_IDX 2
+#define mmDIG0_TMDS_CNTL 0x20d3
+#define mmDIG0_TMDS_CNTL_BASE_IDX 2
+#define mmDIG0_TMDS_CONTROL_CHAR 0x20d4
+#define mmDIG0_TMDS_CONTROL_CHAR_BASE_IDX 2
+#define mmDIG0_TMDS_CONTROL0_FEEDBACK 0x20d5
+#define mmDIG0_TMDS_CONTROL0_FEEDBACK_BASE_IDX 2
+#define mmDIG0_TMDS_STEREOSYNC_CTL_SEL 0x20d6
+#define mmDIG0_TMDS_STEREOSYNC_CTL_SEL_BASE_IDX 2
+#define mmDIG0_TMDS_SYNC_CHAR_PATTERN_0_1 0x20d7
+#define mmDIG0_TMDS_SYNC_CHAR_PATTERN_0_1_BASE_IDX 2
+#define mmDIG0_TMDS_SYNC_CHAR_PATTERN_2_3 0x20d8
+#define mmDIG0_TMDS_SYNC_CHAR_PATTERN_2_3_BASE_IDX 2
+#define mmDIG0_TMDS_CTL_BITS 0x20da
+#define mmDIG0_TMDS_CTL_BITS_BASE_IDX 2
+#define mmDIG0_TMDS_DCBALANCER_CONTROL 0x20db
+#define mmDIG0_TMDS_DCBALANCER_CONTROL_BASE_IDX 2
+#define mmDIG0_TMDS_SYNC_DCBALANCE_CHAR 0x20dc
+#define mmDIG0_TMDS_SYNC_DCBALANCE_CHAR_BASE_IDX 2
+#define mmDIG0_TMDS_CTL0_1_GEN_CNTL 0x20dd
+#define mmDIG0_TMDS_CTL0_1_GEN_CNTL_BASE_IDX 2
+#define mmDIG0_TMDS_CTL2_3_GEN_CNTL 0x20de
+#define mmDIG0_TMDS_CTL2_3_GEN_CNTL_BASE_IDX 2
+#define mmDIG0_DIG_VERSION 0x20e0
+#define mmDIG0_DIG_VERSION_BASE_IDX 2
+#define mmDIG0_DIG_LANE_ENABLE 0x20e1
+#define mmDIG0_DIG_LANE_ENABLE_BASE_IDX 2
+#define mmDIG0_AFMT_CNTL 0x20e6
+#define mmDIG0_AFMT_CNTL_BASE_IDX 2
+#define mmDIG0_AFMT_VBI_PACKET_CONTROL1 0x20e7
+#define mmDIG0_AFMT_VBI_PACKET_CONTROL1_BASE_IDX 2
+#define mmDIG0_HDMI_GENERIC_PACKET_CONTROL5 0x20f6
+#define mmDIG0_HDMI_GENERIC_PACKET_CONTROL5_BASE_IDX 2
+#define mmDIG0_FORCE_DIG_DISABLE 0x20f7
+#define mmDIG0_FORCE_DIG_DISABLE_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dp0_dispdec
+// base address: 0x0
+#define mmDP0_DP_LINK_CNTL 0x2108
+#define mmDP0_DP_LINK_CNTL_BASE_IDX 2
+#define mmDP0_DP_PIXEL_FORMAT 0x2109
+#define mmDP0_DP_PIXEL_FORMAT_BASE_IDX 2
+#define mmDP0_DP_MSA_COLORIMETRY 0x210a
+#define mmDP0_DP_MSA_COLORIMETRY_BASE_IDX 2
+#define mmDP0_DP_CONFIG 0x210b
+#define mmDP0_DP_CONFIG_BASE_IDX 2
+#define mmDP0_DP_VID_STREAM_CNTL 0x210c
+#define mmDP0_DP_VID_STREAM_CNTL_BASE_IDX 2
+#define mmDP0_DP_STEER_FIFO 0x210d
+#define mmDP0_DP_STEER_FIFO_BASE_IDX 2
+#define mmDP0_DP_MSA_MISC 0x210e
+#define mmDP0_DP_MSA_MISC_BASE_IDX 2
+#define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f
+#define mmDP0_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+#define mmDP0_DP_VID_TIMING 0x2110
+#define mmDP0_DP_VID_TIMING_BASE_IDX 2
+#define mmDP0_DP_VID_N 0x2111
+#define mmDP0_DP_VID_N_BASE_IDX 2
+#define mmDP0_DP_VID_M 0x2112
+#define mmDP0_DP_VID_M_BASE_IDX 2
+#define mmDP0_DP_LINK_FRAMING_CNTL 0x2113
+#define mmDP0_DP_LINK_FRAMING_CNTL_BASE_IDX 2
+#define mmDP0_DP_HBR2_EYE_PATTERN 0x2114
+#define mmDP0_DP_HBR2_EYE_PATTERN_BASE_IDX 2
+#define mmDP0_DP_VID_MSA_VBID 0x2115
+#define mmDP0_DP_VID_MSA_VBID_BASE_IDX 2
+#define mmDP0_DP_VID_INTERRUPT_CNTL 0x2116
+#define mmDP0_DP_VID_INTERRUPT_CNTL_BASE_IDX 2
+#define mmDP0_DP_DPHY_CNTL 0x2117
+#define mmDP0_DP_DPHY_CNTL_BASE_IDX 2
+#define mmDP0_DP_DPHY_TRAINING_PATTERN_SEL 0x2118
+#define mmDP0_DP_DPHY_TRAINING_PATTERN_SEL_BASE_IDX 2
+#define mmDP0_DP_DPHY_SYM0 0x2119
+#define mmDP0_DP_DPHY_SYM0_BASE_IDX 2
+#define mmDP0_DP_DPHY_SYM1 0x211a
+#define mmDP0_DP_DPHY_SYM1_BASE_IDX 2
+#define mmDP0_DP_DPHY_SYM2 0x211b
+#define mmDP0_DP_DPHY_SYM2_BASE_IDX 2
+#define mmDP0_DP_DPHY_8B10B_CNTL 0x211c
+#define mmDP0_DP_DPHY_8B10B_CNTL_BASE_IDX 2
+#define mmDP0_DP_DPHY_PRBS_CNTL 0x211d
+#define mmDP0_DP_DPHY_PRBS_CNTL_BASE_IDX 2
+#define mmDP0_DP_DPHY_SCRAM_CNTL 0x211e
+#define mmDP0_DP_DPHY_SCRAM_CNTL_BASE_IDX 2
+#define mmDP0_DP_DPHY_CRC_EN 0x211f
+#define mmDP0_DP_DPHY_CRC_EN_BASE_IDX 2
+#define mmDP0_DP_DPHY_CRC_CNTL 0x2120
+#define mmDP0_DP_DPHY_CRC_CNTL_BASE_IDX 2
+#define mmDP0_DP_DPHY_CRC_RESULT 0x2121
+#define mmDP0_DP_DPHY_CRC_RESULT_BASE_IDX 2
+#define mmDP0_DP_DPHY_CRC_MST_CNTL 0x2122
+#define mmDP0_DP_DPHY_CRC_MST_CNTL_BASE_IDX 2
+#define mmDP0_DP_DPHY_CRC_MST_STATUS 0x2123
+#define mmDP0_DP_DPHY_CRC_MST_STATUS_BASE_IDX 2
+#define mmDP0_DP_DPHY_FAST_TRAINING 0x2124
+#define mmDP0_DP_DPHY_FAST_TRAINING_BASE_IDX 2
+#define mmDP0_DP_DPHY_FAST_TRAINING_STATUS 0x2125
+#define mmDP0_DP_DPHY_FAST_TRAINING_STATUS_BASE_IDX 2
+#define mmDP0_DP_SEC_CNTL 0x212b
+#define mmDP0_DP_SEC_CNTL_BASE_IDX 2
+#define mmDP0_DP_SEC_CNTL1 0x212c
+#define mmDP0_DP_SEC_CNTL1_BASE_IDX 2
+#define mmDP0_DP_SEC_FRAMING1 0x212d
+#define mmDP0_DP_SEC_FRAMING1_BASE_IDX 2
+#define mmDP0_DP_SEC_FRAMING2 0x212e
+#define mmDP0_DP_SEC_FRAMING2_BASE_IDX 2
+#define mmDP0_DP_SEC_FRAMING3 0x212f
+#define mmDP0_DP_SEC_FRAMING3_BASE_IDX 2
+#define mmDP0_DP_SEC_FRAMING4 0x2130
+#define mmDP0_DP_SEC_FRAMING4_BASE_IDX 2
+#define mmDP0_DP_SEC_AUD_N 0x2131
+#define mmDP0_DP_SEC_AUD_N_BASE_IDX 2
+#define mmDP0_DP_SEC_AUD_N_READBACK 0x2132
+#define mmDP0_DP_SEC_AUD_N_READBACK_BASE_IDX 2
+#define mmDP0_DP_SEC_AUD_M 0x2133
+#define mmDP0_DP_SEC_AUD_M_BASE_IDX 2
+#define mmDP0_DP_SEC_AUD_M_READBACK 0x2134
+#define mmDP0_DP_SEC_AUD_M_READBACK_BASE_IDX 2
+#define mmDP0_DP_SEC_TIMESTAMP 0x2135
+#define mmDP0_DP_SEC_TIMESTAMP_BASE_IDX 2
+#define mmDP0_DP_SEC_PACKET_CNTL 0x2136
+#define mmDP0_DP_SEC_PACKET_CNTL_BASE_IDX 2
+#define mmDP0_DP_MSE_RATE_CNTL 0x2137
+#define mmDP0_DP_MSE_RATE_CNTL_BASE_IDX 2
+#define mmDP0_DP_MSE_RATE_UPDATE 0x2139
+#define mmDP0_DP_MSE_RATE_UPDATE_BASE_IDX 2
+#define mmDP0_DP_MSE_SAT0 0x213a
+#define mmDP0_DP_MSE_SAT0_BASE_IDX 2
+#define mmDP0_DP_MSE_SAT1 0x213b
+#define mmDP0_DP_MSE_SAT1_BASE_IDX 2
+#define mmDP0_DP_MSE_SAT2 0x213c
+#define mmDP0_DP_MSE_SAT2_BASE_IDX 2
+#define mmDP0_DP_MSE_SAT_UPDATE 0x213d
+#define mmDP0_DP_MSE_SAT_UPDATE_BASE_IDX 2
+#define mmDP0_DP_MSE_LINK_TIMING 0x213e
+#define mmDP0_DP_MSE_LINK_TIMING_BASE_IDX 2
+#define mmDP0_DP_MSE_MISC_CNTL 0x213f
+#define mmDP0_DP_MSE_MISC_CNTL_BASE_IDX 2
+#define mmDP0_DP_DPHY_BS_SR_SWAP_CNTL 0x2144
+#define mmDP0_DP_DPHY_BS_SR_SWAP_CNTL_BASE_IDX 2
+#define mmDP0_DP_DPHY_HBR2_PATTERN_CONTROL 0x2145
+#define mmDP0_DP_DPHY_HBR2_PATTERN_CONTROL_BASE_IDX 2
+#define mmDP0_DP_MSE_SAT0_STATUS 0x2147
+#define mmDP0_DP_MSE_SAT0_STATUS_BASE_IDX 2
+#define mmDP0_DP_MSE_SAT1_STATUS 0x2148
+#define mmDP0_DP_MSE_SAT1_STATUS_BASE_IDX 2
+#define mmDP0_DP_MSE_SAT2_STATUS 0x2149
+#define mmDP0_DP_MSE_SAT2_STATUS_BASE_IDX 2
+#define mmDP0_DP_MSA_TIMING_PARAM1 0x214c
+#define mmDP0_DP_MSA_TIMING_PARAM1_BASE_IDX 2
+#define mmDP0_DP_MSA_TIMING_PARAM2 0x214d
+#define mmDP0_DP_MSA_TIMING_PARAM2_BASE_IDX 2
+#define mmDP0_DP_MSA_TIMING_PARAM3 0x214e
+#define mmDP0_DP_MSA_TIMING_PARAM3_BASE_IDX 2
+#define mmDP0_DP_MSA_TIMING_PARAM4 0x214f
+#define mmDP0_DP_MSA_TIMING_PARAM4_BASE_IDX 2
+#define mmDP0_DP_MSO_CNTL 0x2150
+#define mmDP0_DP_MSO_CNTL_BASE_IDX 2
+#define mmDP0_DP_MSO_CNTL1 0x2151
+#define mmDP0_DP_MSO_CNTL1_BASE_IDX 2
+#define mmDP0_DP_DSC_CNTL 0x2152
+#define mmDP0_DP_DSC_CNTL_BASE_IDX 2
+#define mmDP0_DP_SEC_CNTL2 0x2153
+#define mmDP0_DP_SEC_CNTL2_BASE_IDX 2
+#define mmDP0_DP_SEC_CNTL3 0x2154
+#define mmDP0_DP_SEC_CNTL3_BASE_IDX 2
+#define mmDP0_DP_SEC_CNTL4 0x2155
+#define mmDP0_DP_SEC_CNTL4_BASE_IDX 2
+#define mmDP0_DP_SEC_CNTL5 0x2156
+#define mmDP0_DP_SEC_CNTL5_BASE_IDX 2
+#define mmDP0_DP_SEC_CNTL6 0x2157
+#define mmDP0_DP_SEC_CNTL6_BASE_IDX 2
+#define mmDP0_DP_SEC_CNTL7 0x2158
+#define mmDP0_DP_SEC_CNTL7_BASE_IDX 2
+#define mmDP0_DP_DB_CNTL 0x2159
+#define mmDP0_DP_DB_CNTL_BASE_IDX 2
+#define mmDP0_DP_MSA_VBID_MISC 0x215a
+#define mmDP0_DP_MSA_VBID_MISC_BASE_IDX 2
+#define mmDP0_DP_SEC_METADATA_TRANSMISSION 0x215b
+#define mmDP0_DP_SEC_METADATA_TRANSMISSION_BASE_IDX 2
+#define mmDP0_DP_DSC_BYTES_PER_PIXEL 0x215c
+#define mmDP0_DP_DSC_BYTES_PER_PIXEL_BASE_IDX 2
+#define mmDP0_DP_ALPM_CNTL 0x215d
+#define mmDP0_DP_ALPM_CNTL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig1_dispdec
+// base address: 0x400
+#define mmDIG1_DIG_FE_CNTL 0x2168
+#define mmDIG1_DIG_FE_CNTL_BASE_IDX 2
+#define mmDIG1_DIG_OUTPUT_CRC_CNTL 0x2169
+#define mmDIG1_DIG_OUTPUT_CRC_CNTL_BASE_IDX 2
+#define mmDIG1_DIG_OUTPUT_CRC_RESULT 0x216a
+#define mmDIG1_DIG_OUTPUT_CRC_RESULT_BASE_IDX 2
+#define mmDIG1_DIG_CLOCK_PATTERN 0x216b
+#define mmDIG1_DIG_CLOCK_PATTERN_BASE_IDX 2
+#define mmDIG1_DIG_TEST_PATTERN 0x216c
+#define mmDIG1_DIG_TEST_PATTERN_BASE_IDX 2
+#define mmDIG1_DIG_RANDOM_PATTERN_SEED 0x216d
+#define mmDIG1_DIG_RANDOM_PATTERN_SEED_BASE_IDX 2
+#define mmDIG1_DIG_FIFO_STATUS 0x216e
+#define mmDIG1_DIG_FIFO_STATUS_BASE_IDX 2
+#define mmDIG1_HDMI_METADATA_PACKET_CONTROL 0x216f
+#define mmDIG1_HDMI_METADATA_PACKET_CONTROL_BASE_IDX 2
+#define mmDIG1_HDMI_GENERIC_PACKET_CONTROL4 0x2170
+#define mmDIG1_HDMI_GENERIC_PACKET_CONTROL4_BASE_IDX 2
+#define mmDIG1_HDMI_CONTROL 0x2171
+#define mmDIG1_HDMI_CONTROL_BASE_IDX 2
+#define mmDIG1_HDMI_STATUS 0x2172
+#define mmDIG1_HDMI_STATUS_BASE_IDX 2
+#define mmDIG1_HDMI_AUDIO_PACKET_CONTROL 0x2173
+#define mmDIG1_HDMI_AUDIO_PACKET_CONTROL_BASE_IDX 2
+#define mmDIG1_HDMI_ACR_PACKET_CONTROL 0x2174
+#define mmDIG1_HDMI_ACR_PACKET_CONTROL_BASE_IDX 2
+#define mmDIG1_HDMI_VBI_PACKET_CONTROL 0x2175
+#define mmDIG1_HDMI_VBI_PACKET_CONTROL_BASE_IDX 2
+#define mmDIG1_HDMI_INFOFRAME_CONTROL0 0x2176
+#define mmDIG1_HDMI_INFOFRAME_CONTROL0_BASE_IDX 2
+#define mmDIG1_HDMI_INFOFRAME_CONTROL1 0x2177
+#define mmDIG1_HDMI_INFOFRAME_CONTROL1_BASE_IDX 2
+#define mmDIG1_HDMI_GENERIC_PACKET_CONTROL0 0x2178
+#define mmDIG1_HDMI_GENERIC_PACKET_CONTROL0_BASE_IDX 2
+#define mmDIG1_AFMT_INTERRUPT_STATUS 0x2179
+#define mmDIG1_AFMT_INTERRUPT_STATUS_BASE_IDX 2
+#define mmDIG1_HDMI_GC 0x217b
+#define mmDIG1_HDMI_GC_BASE_IDX 2
+#define mmDIG1_AFMT_AUDIO_PACKET_CONTROL2 0x217c
+#define mmDIG1_AFMT_AUDIO_PACKET_CONTROL2_BASE_IDX 2
+#define mmDIG1_AFMT_ISRC1_0 0x217d
+#define mmDIG1_AFMT_ISRC1_0_BASE_IDX 2
+#define mmDIG1_AFMT_ISRC1_1 0x217e
+#define mmDIG1_AFMT_ISRC1_1_BASE_IDX 2
+#define mmDIG1_AFMT_ISRC1_2 0x217f
+#define mmDIG1_AFMT_ISRC1_2_BASE_IDX 2
+#define mmDIG1_AFMT_ISRC1_3 0x2180
+#define mmDIG1_AFMT_ISRC1_3_BASE_IDX 2
+#define mmDIG1_AFMT_ISRC1_4 0x2181
+#define mmDIG1_AFMT_ISRC1_4_BASE_IDX 2
+#define mmDIG1_AFMT_ISRC2_0 0x2182
+#define mmDIG1_AFMT_ISRC2_0_BASE_IDX 2
+#define mmDIG1_AFMT_ISRC2_1 0x2183
+#define mmDIG1_AFMT_ISRC2_1_BASE_IDX 2
+#define mmDIG1_AFMT_ISRC2_2 0x2184
+#define mmDIG1_AFMT_ISRC2_2_BASE_IDX 2
+#define mmDIG1_AFMT_ISRC2_3 0x2185
+#define mmDIG1_AFMT_ISRC2_3_BASE_IDX 2
+#define mmDIG1_HDMI_GENERIC_PACKET_CONTROL2 0x2186
+#define mmDIG1_HDMI_GENERIC_PACKET_CONTROL2_BASE_IDX 2
+#define mmDIG1_HDMI_GENERIC_PACKET_CONTROL3 0x2187
+#define mmDIG1_HDMI_GENERIC_PACKET_CONTROL3_BASE_IDX 2
+#define mmDIG1_HDMI_DB_CONTROL 0x2188
+#define mmDIG1_HDMI_DB_CONTROL_BASE_IDX 2
+#define mmDIG1_DME_CONTROL 0x2189
+#define mmDIG1_DME_CONTROL_BASE_IDX 2
+#define mmDIG1_AFMT_MPEG_INFO0 0x218a
+#define mmDIG1_AFMT_MPEG_INFO0_BASE_IDX 2
+#define mmDIG1_AFMT_MPEG_INFO1 0x218b
+#define mmDIG1_AFMT_MPEG_INFO1_BASE_IDX 2
+#define mmDIG1_AFMT_GENERIC_HDR 0x218c
+#define mmDIG1_AFMT_GENERIC_HDR_BASE_IDX 2
+#define mmDIG1_AFMT_GENERIC_0 0x218d
+#define mmDIG1_AFMT_GENERIC_0_BASE_IDX 2
+#define mmDIG1_AFMT_GENERIC_1 0x218e
+#define mmDIG1_AFMT_GENERIC_1_BASE_IDX 2
+#define mmDIG1_AFMT_GENERIC_2 0x218f
+#define mmDIG1_AFMT_GENERIC_2_BASE_IDX 2
+#define mmDIG1_AFMT_GENERIC_3 0x2190
+#define mmDIG1_AFMT_GENERIC_3_BASE_IDX 2
+#define mmDIG1_AFMT_GENERIC_4 0x2191
+#define mmDIG1_AFMT_GENERIC_4_BASE_IDX 2
+#define mmDIG1_AFMT_GENERIC_5 0x2192
+#define mmDIG1_AFMT_GENERIC_5_BASE_IDX 2
+#define mmDIG1_AFMT_GENERIC_6 0x2193
+#define mmDIG1_AFMT_GENERIC_6_BASE_IDX 2
+#define mmDIG1_AFMT_GENERIC_7 0x2194
+#define mmDIG1_AFMT_GENERIC_7_BASE_IDX 2
+#define mmDIG1_HDMI_GENERIC_PACKET_CONTROL1 0x2195
+#define mmDIG1_HDMI_GENERIC_PACKET_CONTROL1_BASE_IDX 2
+#define mmDIG1_HDMI_ACR_32_0 0x2196
+#define mmDIG1_HDMI_ACR_32_0_BASE_IDX 2
+#define mmDIG1_HDMI_ACR_32_1 0x2197
+#define mmDIG1_HDMI_ACR_32_1_BASE_IDX 2
+#define mmDIG1_HDMI_ACR_44_0 0x2198
+#define mmDIG1_HDMI_ACR_44_0_BASE_IDX 2
+#define mmDIG1_HDMI_ACR_44_1 0x2199
+#define mmDIG1_HDMI_ACR_44_1_BASE_IDX 2
+#define mmDIG1_HDMI_ACR_48_0 0x219a
+#define mmDIG1_HDMI_ACR_48_0_BASE_IDX 2
+#define mmDIG1_HDMI_ACR_48_1 0x219b
+#define mmDIG1_HDMI_ACR_48_1_BASE_IDX 2
+#define mmDIG1_HDMI_ACR_STATUS_0 0x219c
+#define mmDIG1_HDMI_ACR_STATUS_0_BASE_IDX 2
+#define mmDIG1_HDMI_ACR_STATUS_1 0x219d
+#define mmDIG1_HDMI_ACR_STATUS_1_BASE_IDX 2
+#define mmDIG1_AFMT_AUDIO_INFO0 0x219e
+#define mmDIG1_AFMT_AUDIO_INFO0_BASE_IDX 2
+#define mmDIG1_AFMT_AUDIO_INFO1 0x219f
+#define mmDIG1_AFMT_AUDIO_INFO1_BASE_IDX 2
+#define mmDIG1_AFMT_60958_0 0x21a0
+#define mmDIG1_AFMT_60958_0_BASE_IDX 2
+#define mmDIG1_AFMT_60958_1 0x21a1
+#define mmDIG1_AFMT_60958_1_BASE_IDX 2
+#define mmDIG1_AFMT_AUDIO_CRC_CONTROL 0x21a2
+#define mmDIG1_AFMT_AUDIO_CRC_CONTROL_BASE_IDX 2
+#define mmDIG1_AFMT_RAMP_CONTROL0 0x21a3
+#define mmDIG1_AFMT_RAMP_CONTROL0_BASE_IDX 2
+#define mmDIG1_AFMT_RAMP_CONTROL1 0x21a4
+#define mmDIG1_AFMT_RAMP_CONTROL1_BASE_IDX 2
+#define mmDIG1_AFMT_RAMP_CONTROL2 0x21a5
+#define mmDIG1_AFMT_RAMP_CONTROL2_BASE_IDX 2
+#define mmDIG1_AFMT_RAMP_CONTROL3 0x21a6
+#define mmDIG1_AFMT_RAMP_CONTROL3_BASE_IDX 2
+#define mmDIG1_AFMT_60958_2 0x21a7
+#define mmDIG1_AFMT_60958_2_BASE_IDX 2
+#define mmDIG1_AFMT_AUDIO_CRC_RESULT 0x21a8
+#define mmDIG1_AFMT_AUDIO_CRC_RESULT_BASE_IDX 2
+#define mmDIG1_AFMT_STATUS 0x21a9
+#define mmDIG1_AFMT_STATUS_BASE_IDX 2
+#define mmDIG1_AFMT_AUDIO_PACKET_CONTROL 0x21aa
+#define mmDIG1_AFMT_AUDIO_PACKET_CONTROL_BASE_IDX 2
+#define mmDIG1_AFMT_VBI_PACKET_CONTROL 0x21ab
+#define mmDIG1_AFMT_VBI_PACKET_CONTROL_BASE_IDX 2
+#define mmDIG1_AFMT_INFOFRAME_CONTROL0 0x21ac
+#define mmDIG1_AFMT_INFOFRAME_CONTROL0_BASE_IDX 2
+#define mmDIG1_AFMT_AUDIO_SRC_CONTROL 0x21ad
+#define mmDIG1_AFMT_AUDIO_SRC_CONTROL_BASE_IDX 2
+#define mmDIG1_DIG_BE_CNTL 0x21af
+#define mmDIG1_DIG_BE_CNTL_BASE_IDX 2
+#define mmDIG1_DIG_BE_EN_CNTL 0x21b0
+#define mmDIG1_DIG_BE_EN_CNTL_BASE_IDX 2
+#define mmDIG1_TMDS_CNTL 0x21d3
+#define mmDIG1_TMDS_CNTL_BASE_IDX 2
+#define mmDIG1_TMDS_CONTROL_CHAR 0x21d4
+#define mmDIG1_TMDS_CONTROL_CHAR_BASE_IDX 2
+#define mmDIG1_TMDS_CONTROL0_FEEDBACK 0x21d5
+#define mmDIG1_TMDS_CONTROL0_FEEDBACK_BASE_IDX 2
+#define mmDIG1_TMDS_STEREOSYNC_CTL_SEL 0x21d6
+#define mmDIG1_TMDS_STEREOSYNC_CTL_SEL_BASE_IDX 2
+#define mmDIG1_TMDS_SYNC_CHAR_PATTERN_0_1 0x21d7
+#define mmDIG1_TMDS_SYNC_CHAR_PATTERN_0_1_BASE_IDX 2
+#define mmDIG1_TMDS_SYNC_CHAR_PATTERN_2_3 0x21d8
+#define mmDIG1_TMDS_SYNC_CHAR_PATTERN_2_3_BASE_IDX 2
+#define mmDIG1_TMDS_CTL_BITS 0x21da
+#define mmDIG1_TMDS_CTL_BITS_BASE_IDX 2
+#define mmDIG1_TMDS_DCBALANCER_CONTROL 0x21db
+#define mmDIG1_TMDS_DCBALANCER_CONTROL_BASE_IDX 2
+#define mmDIG1_TMDS_SYNC_DCBALANCE_CHAR 0x21dc
+#define mmDIG1_TMDS_SYNC_DCBALANCE_CHAR_BASE_IDX 2
+#define mmDIG1_TMDS_CTL0_1_GEN_CNTL 0x21dd
+#define mmDIG1_TMDS_CTL0_1_GEN_CNTL_BASE_IDX 2
+#define mmDIG1_TMDS_CTL2_3_GEN_CNTL 0x21de
+#define mmDIG1_TMDS_CTL2_3_GEN_CNTL_BASE_IDX 2
+#define mmDIG1_DIG_VERSION 0x21e0
+#define mmDIG1_DIG_VERSION_BASE_IDX 2
+#define mmDIG1_DIG_LANE_ENABLE 0x21e1
+#define mmDIG1_DIG_LANE_ENABLE_BASE_IDX 2
+#define mmDIG1_AFMT_CNTL 0x21e6
+#define mmDIG1_AFMT_CNTL_BASE_IDX 2
+#define mmDIG1_AFMT_VBI_PACKET_CONTROL1 0x21e7
+#define mmDIG1_AFMT_VBI_PACKET_CONTROL1_BASE_IDX 2
+#define mmDIG1_HDMI_GENERIC_PACKET_CONTROL5 0x21f6
+#define mmDIG1_HDMI_GENERIC_PACKET_CONTROL5_BASE_IDX 2
+#define mmDIG1_FORCE_DIG_DISABLE 0x21f7
+#define mmDIG1_FORCE_DIG_DISABLE_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dp1_dispdec
+// base address: 0x400
+#define mmDP1_DP_LINK_CNTL 0x2208
+#define mmDP1_DP_LINK_CNTL_BASE_IDX 2
+#define mmDP1_DP_PIXEL_FORMAT 0x2209
+#define mmDP1_DP_PIXEL_FORMAT_BASE_IDX 2
+#define mmDP1_DP_MSA_COLORIMETRY 0x220a
+#define mmDP1_DP_MSA_COLORIMETRY_BASE_IDX 2
+#define mmDP1_DP_CONFIG 0x220b
+#define mmDP1_DP_CONFIG_BASE_IDX 2
+#define mmDP1_DP_VID_STREAM_CNTL 0x220c
+#define mmDP1_DP_VID_STREAM_CNTL_BASE_IDX 2
+#define mmDP1_DP_STEER_FIFO 0x220d
+#define mmDP1_DP_STEER_FIFO_BASE_IDX 2
+#define mmDP1_DP_MSA_MISC 0x220e
+#define mmDP1_DP_MSA_MISC_BASE_IDX 2
+#define mmDP1_DP_DPHY_INTERNAL_CTRL 0x220f
+#define mmDP1_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+#define mmDP1_DP_VID_TIMING 0x2210
+#define mmDP1_DP_VID_TIMING_BASE_IDX 2
+#define mmDP1_DP_VID_N 0x2211
+#define mmDP1_DP_VID_N_BASE_IDX 2
+#define mmDP1_DP_VID_M 0x2212
+#define mmDP1_DP_VID_M_BASE_IDX 2
+#define mmDP1_DP_LINK_FRAMING_CNTL 0x2213
+#define mmDP1_DP_LINK_FRAMING_CNTL_BASE_IDX 2
+#define mmDP1_DP_HBR2_EYE_PATTERN 0x2214
+#define mmDP1_DP_HBR2_EYE_PATTERN_BASE_IDX 2
+#define mmDP1_DP_VID_MSA_VBID 0x2215
+#define mmDP1_DP_VID_MSA_VBID_BASE_IDX 2
+#define mmDP1_DP_VID_INTERRUPT_CNTL 0x2216
+#define mmDP1_DP_VID_INTERRUPT_CNTL_BASE_IDX 2
+#define mmDP1_DP_DPHY_CNTL 0x2217
+#define mmDP1_DP_DPHY_CNTL_BASE_IDX 2
+#define mmDP1_DP_DPHY_TRAINING_PATTERN_SEL 0x2218
+#define mmDP1_DP_DPHY_TRAINING_PATTERN_SEL_BASE_IDX 2
+#define mmDP1_DP_DPHY_SYM0 0x2219
+#define mmDP1_DP_DPHY_SYM0_BASE_IDX 2
+#define mmDP1_DP_DPHY_SYM1 0x221a
+#define mmDP1_DP_DPHY_SYM1_BASE_IDX 2
+#define mmDP1_DP_DPHY_SYM2 0x221b
+#define mmDP1_DP_DPHY_SYM2_BASE_IDX 2
+#define mmDP1_DP_DPHY_8B10B_CNTL 0x221c
+#define mmDP1_DP_DPHY_8B10B_CNTL_BASE_IDX 2
+#define mmDP1_DP_DPHY_PRBS_CNTL 0x221d
+#define mmDP1_DP_DPHY_PRBS_CNTL_BASE_IDX 2
+#define mmDP1_DP_DPHY_SCRAM_CNTL 0x221e
+#define mmDP1_DP_DPHY_SCRAM_CNTL_BASE_IDX 2
+#define mmDP1_DP_DPHY_CRC_EN 0x221f
+#define mmDP1_DP_DPHY_CRC_EN_BASE_IDX 2
+#define mmDP1_DP_DPHY_CRC_CNTL 0x2220
+#define mmDP1_DP_DPHY_CRC_CNTL_BASE_IDX 2
+#define mmDP1_DP_DPHY_CRC_RESULT 0x2221
+#define mmDP1_DP_DPHY_CRC_RESULT_BASE_IDX 2
+#define mmDP1_DP_DPHY_CRC_MST_CNTL 0x2222
+#define mmDP1_DP_DPHY_CRC_MST_CNTL_BASE_IDX 2
+#define mmDP1_DP_DPHY_CRC_MST_STATUS 0x2223
+#define mmDP1_DP_DPHY_CRC_MST_STATUS_BASE_IDX 2
+#define mmDP1_DP_DPHY_FAST_TRAINING 0x2224
+#define mmDP1_DP_DPHY_FAST_TRAINING_BASE_IDX 2
+#define mmDP1_DP_DPHY_FAST_TRAINING_STATUS 0x2225
+#define mmDP1_DP_DPHY_FAST_TRAINING_STATUS_BASE_IDX 2
+#define mmDP1_DP_SEC_CNTL 0x222b
+#define mmDP1_DP_SEC_CNTL_BASE_IDX 2
+#define mmDP1_DP_SEC_CNTL1 0x222c
+#define mmDP1_DP_SEC_CNTL1_BASE_IDX 2
+#define mmDP1_DP_SEC_FRAMING1 0x222d
+#define mmDP1_DP_SEC_FRAMING1_BASE_IDX 2
+#define mmDP1_DP_SEC_FRAMING2 0x222e
+#define mmDP1_DP_SEC_FRAMING2_BASE_IDX 2
+#define mmDP1_DP_SEC_FRAMING3 0x222f
+#define mmDP1_DP_SEC_FRAMING3_BASE_IDX 2
+#define mmDP1_DP_SEC_FRAMING4 0x2230
+#define mmDP1_DP_SEC_FRAMING4_BASE_IDX 2
+#define mmDP1_DP_SEC_AUD_N 0x2231
+#define mmDP1_DP_SEC_AUD_N_BASE_IDX 2
+#define mmDP1_DP_SEC_AUD_N_READBACK 0x2232
+#define mmDP1_DP_SEC_AUD_N_READBACK_BASE_IDX 2
+#define mmDP1_DP_SEC_AUD_M 0x2233
+#define mmDP1_DP_SEC_AUD_M_BASE_IDX 2
+#define mmDP1_DP_SEC_AUD_M_READBACK 0x2234
+#define mmDP1_DP_SEC_AUD_M_READBACK_BASE_IDX 2
+#define mmDP1_DP_SEC_TIMESTAMP 0x2235
+#define mmDP1_DP_SEC_TIMESTAMP_BASE_IDX 2
+#define mmDP1_DP_SEC_PACKET_CNTL 0x2236
+#define mmDP1_DP_SEC_PACKET_CNTL_BASE_IDX 2
+#define mmDP1_DP_MSE_RATE_CNTL 0x2237
+#define mmDP1_DP_MSE_RATE_CNTL_BASE_IDX 2
+#define mmDP1_DP_MSE_RATE_UPDATE 0x2239
+#define mmDP1_DP_MSE_RATE_UPDATE_BASE_IDX 2
+#define mmDP1_DP_MSE_SAT0 0x223a
+#define mmDP1_DP_MSE_SAT0_BASE_IDX 2
+#define mmDP1_DP_MSE_SAT1 0x223b
+#define mmDP1_DP_MSE_SAT1_BASE_IDX 2
+#define mmDP1_DP_MSE_SAT2 0x223c
+#define mmDP1_DP_MSE_SAT2_BASE_IDX 2
+#define mmDP1_DP_MSE_SAT_UPDATE 0x223d
+#define mmDP1_DP_MSE_SAT_UPDATE_BASE_IDX 2
+#define mmDP1_DP_MSE_LINK_TIMING 0x223e
+#define mmDP1_DP_MSE_LINK_TIMING_BASE_IDX 2
+#define mmDP1_DP_MSE_MISC_CNTL 0x223f
+#define mmDP1_DP_MSE_MISC_CNTL_BASE_IDX 2
+#define mmDP1_DP_DPHY_BS_SR_SWAP_CNTL 0x2244
+#define mmDP1_DP_DPHY_BS_SR_SWAP_CNTL_BASE_IDX 2
+#define mmDP1_DP_DPHY_HBR2_PATTERN_CONTROL 0x2245
+#define mmDP1_DP_DPHY_HBR2_PATTERN_CONTROL_BASE_IDX 2
+#define mmDP1_DP_MSE_SAT0_STATUS 0x2247
+#define mmDP1_DP_MSE_SAT0_STATUS_BASE_IDX 2
+#define mmDP1_DP_MSE_SAT1_STATUS 0x2248
+#define mmDP1_DP_MSE_SAT1_STATUS_BASE_IDX 2
+#define mmDP1_DP_MSE_SAT2_STATUS 0x2249
+#define mmDP1_DP_MSE_SAT2_STATUS_BASE_IDX 2
+#define mmDP1_DP_MSA_TIMING_PARAM1 0x224c
+#define mmDP1_DP_MSA_TIMING_PARAM1_BASE_IDX 2
+#define mmDP1_DP_MSA_TIMING_PARAM2 0x224d
+#define mmDP1_DP_MSA_TIMING_PARAM2_BASE_IDX 2
+#define mmDP1_DP_MSA_TIMING_PARAM3 0x224e
+#define mmDP1_DP_MSA_TIMING_PARAM3_BASE_IDX 2
+#define mmDP1_DP_MSA_TIMING_PARAM4 0x224f
+#define mmDP1_DP_MSA_TIMING_PARAM4_BASE_IDX 2
+#define mmDP1_DP_MSO_CNTL 0x2250
+#define mmDP1_DP_MSO_CNTL_BASE_IDX 2
+#define mmDP1_DP_MSO_CNTL1 0x2251
+#define mmDP1_DP_MSO_CNTL1_BASE_IDX 2
+#define mmDP1_DP_DSC_CNTL 0x2252
+#define mmDP1_DP_DSC_CNTL_BASE_IDX 2
+#define mmDP1_DP_SEC_CNTL2 0x2253
+#define mmDP1_DP_SEC_CNTL2_BASE_IDX 2
+#define mmDP1_DP_SEC_CNTL3 0x2254
+#define mmDP1_DP_SEC_CNTL3_BASE_IDX 2
+#define mmDP1_DP_SEC_CNTL4 0x2255
+#define mmDP1_DP_SEC_CNTL4_BASE_IDX 2
+#define mmDP1_DP_SEC_CNTL5 0x2256
+#define mmDP1_DP_SEC_CNTL5_BASE_IDX 2
+#define mmDP1_DP_SEC_CNTL6 0x2257
+#define mmDP1_DP_SEC_CNTL6_BASE_IDX 2
+#define mmDP1_DP_SEC_CNTL7 0x2258
+#define mmDP1_DP_SEC_CNTL7_BASE_IDX 2
+#define mmDP1_DP_DB_CNTL 0x2259
+#define mmDP1_DP_DB_CNTL_BASE_IDX 2
+#define mmDP1_DP_MSA_VBID_MISC 0x225a
+#define mmDP1_DP_MSA_VBID_MISC_BASE_IDX 2
+#define mmDP1_DP_SEC_METADATA_TRANSMISSION 0x225b
+#define mmDP1_DP_SEC_METADATA_TRANSMISSION_BASE_IDX 2
+#define mmDP1_DP_DSC_BYTES_PER_PIXEL 0x225c
+#define mmDP1_DP_DSC_BYTES_PER_PIXEL_BASE_IDX 2
+#define mmDP1_DP_ALPM_CNTL 0x225d
+#define mmDP1_DP_ALPM_CNTL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig2_dispdec
+// base address: 0x800
+#define mmDIG2_DIG_FE_CNTL 0x2268
+#define mmDIG2_DIG_FE_CNTL_BASE_IDX 2
+#define mmDIG2_DIG_OUTPUT_CRC_CNTL 0x2269
+#define mmDIG2_DIG_OUTPUT_CRC_CNTL_BASE_IDX 2
+#define mmDIG2_DIG_OUTPUT_CRC_RESULT 0x226a
+#define mmDIG2_DIG_OUTPUT_CRC_RESULT_BASE_IDX 2
+#define mmDIG2_DIG_CLOCK_PATTERN 0x226b
+#define mmDIG2_DIG_CLOCK_PATTERN_BASE_IDX 2
+#define mmDIG2_DIG_TEST_PATTERN 0x226c
+#define mmDIG2_DIG_TEST_PATTERN_BASE_IDX 2
+#define mmDIG2_DIG_RANDOM_PATTERN_SEED 0x226d
+#define mmDIG2_DIG_RANDOM_PATTERN_SEED_BASE_IDX 2
+#define mmDIG2_DIG_FIFO_STATUS 0x226e
+#define mmDIG2_DIG_FIFO_STATUS_BASE_IDX 2
+#define mmDIG2_HDMI_METADATA_PACKET_CONTROL 0x226f
+#define mmDIG2_HDMI_METADATA_PACKET_CONTROL_BASE_IDX 2
+#define mmDIG2_HDMI_GENERIC_PACKET_CONTROL4 0x2270
+#define mmDIG2_HDMI_GENERIC_PACKET_CONTROL4_BASE_IDX 2
+#define mmDIG2_HDMI_CONTROL 0x2271
+#define mmDIG2_HDMI_CONTROL_BASE_IDX 2
+#define mmDIG2_HDMI_STATUS 0x2272
+#define mmDIG2_HDMI_STATUS_BASE_IDX 2
+#define mmDIG2_HDMI_AUDIO_PACKET_CONTROL 0x2273
+#define mmDIG2_HDMI_AUDIO_PACKET_CONTROL_BASE_IDX 2
+#define mmDIG2_HDMI_ACR_PACKET_CONTROL 0x2274
+#define mmDIG2_HDMI_ACR_PACKET_CONTROL_BASE_IDX 2
+#define mmDIG2_HDMI_VBI_PACKET_CONTROL 0x2275
+#define mmDIG2_HDMI_VBI_PACKET_CONTROL_BASE_IDX 2
+#define mmDIG2_HDMI_INFOFRAME_CONTROL0 0x2276
+#define mmDIG2_HDMI_INFOFRAME_CONTROL0_BASE_IDX 2
+#define mmDIG2_HDMI_INFOFRAME_CONTROL1 0x2277
+#define mmDIG2_HDMI_INFOFRAME_CONTROL1_BASE_IDX 2
+#define mmDIG2_HDMI_GENERIC_PACKET_CONTROL0 0x2278
+#define mmDIG2_HDMI_GENERIC_PACKET_CONTROL0_BASE_IDX 2
+#define mmDIG2_AFMT_INTERRUPT_STATUS 0x2279
+#define mmDIG2_AFMT_INTERRUPT_STATUS_BASE_IDX 2
+#define mmDIG2_HDMI_GC 0x227b
+#define mmDIG2_HDMI_GC_BASE_IDX 2
+#define mmDIG2_AFMT_AUDIO_PACKET_CONTROL2 0x227c
+#define mmDIG2_AFMT_AUDIO_PACKET_CONTROL2_BASE_IDX 2
+#define mmDIG2_AFMT_ISRC1_0 0x227d
+#define mmDIG2_AFMT_ISRC1_0_BASE_IDX 2
+#define mmDIG2_AFMT_ISRC1_1 0x227e
+#define mmDIG2_AFMT_ISRC1_1_BASE_IDX 2
+#define mmDIG2_AFMT_ISRC1_2 0x227f
+#define mmDIG2_AFMT_ISRC1_2_BASE_IDX 2
+#define mmDIG2_AFMT_ISRC1_3 0x2280
+#define mmDIG2_AFMT_ISRC1_3_BASE_IDX 2
+#define mmDIG2_AFMT_ISRC1_4 0x2281
+#define mmDIG2_AFMT_ISRC1_4_BASE_IDX 2
+#define mmDIG2_AFMT_ISRC2_0 0x2282
+#define mmDIG2_AFMT_ISRC2_0_BASE_IDX 2
+#define mmDIG2_AFMT_ISRC2_1 0x2283
+#define mmDIG2_AFMT_ISRC2_1_BASE_IDX 2
+#define mmDIG2_AFMT_ISRC2_2 0x2284
+#define mmDIG2_AFMT_ISRC2_2_BASE_IDX 2
+#define mmDIG2_AFMT_ISRC2_3 0x2285
+#define mmDIG2_AFMT_ISRC2_3_BASE_IDX 2
+#define mmDIG2_HDMI_GENERIC_PACKET_CONTROL2 0x2286
+#define mmDIG2_HDMI_GENERIC_PACKET_CONTROL2_BASE_IDX 2
+#define mmDIG2_HDMI_GENERIC_PACKET_CONTROL3 0x2287
+#define mmDIG2_HDMI_GENERIC_PACKET_CONTROL3_BASE_IDX 2
+#define mmDIG2_HDMI_DB_CONTROL 0x2288
+#define mmDIG2_HDMI_DB_CONTROL_BASE_IDX 2
+#define mmDIG2_DME_CONTROL 0x2289
+#define mmDIG2_DME_CONTROL_BASE_IDX 2
+#define mmDIG2_AFMT_MPEG_INFO0 0x228a
+#define mmDIG2_AFMT_MPEG_INFO0_BASE_IDX 2
+#define mmDIG2_AFMT_MPEG_INFO1 0x228b
+#define mmDIG2_AFMT_MPEG_INFO1_BASE_IDX 2
+#define mmDIG2_AFMT_GENERIC_HDR 0x228c
+#define mmDIG2_AFMT_GENERIC_HDR_BASE_IDX 2
+#define mmDIG2_AFMT_GENERIC_0 0x228d
+#define mmDIG2_AFMT_GENERIC_0_BASE_IDX 2
+#define mmDIG2_AFMT_GENERIC_1 0x228e
+#define mmDIG2_AFMT_GENERIC_1_BASE_IDX 2
+#define mmDIG2_AFMT_GENERIC_2 0x228f
+#define mmDIG2_AFMT_GENERIC_2_BASE_IDX 2
+#define mmDIG2_AFMT_GENERIC_3 0x2290
+#define mmDIG2_AFMT_GENERIC_3_BASE_IDX 2
+#define mmDIG2_AFMT_GENERIC_4 0x2291
+#define mmDIG2_AFMT_GENERIC_4_BASE_IDX 2
+#define mmDIG2_AFMT_GENERIC_5 0x2292
+#define mmDIG2_AFMT_GENERIC_5_BASE_IDX 2
+#define mmDIG2_AFMT_GENERIC_6 0x2293
+#define mmDIG2_AFMT_GENERIC_6_BASE_IDX 2
+#define mmDIG2_AFMT_GENERIC_7 0x2294
+#define mmDIG2_AFMT_GENERIC_7_BASE_IDX 2
+#define mmDIG2_HDMI_GENERIC_PACKET_CONTROL1 0x2295
+#define mmDIG2_HDMI_GENERIC_PACKET_CONTROL1_BASE_IDX 2
+#define mmDIG2_HDMI_ACR_32_0 0x2296
+#define mmDIG2_HDMI_ACR_32_0_BASE_IDX 2
+#define mmDIG2_HDMI_ACR_32_1 0x2297
+#define mmDIG2_HDMI_ACR_32_1_BASE_IDX 2
+#define mmDIG2_HDMI_ACR_44_0 0x2298
+#define mmDIG2_HDMI_ACR_44_0_BASE_IDX 2
+#define mmDIG2_HDMI_ACR_44_1 0x2299
+#define mmDIG2_HDMI_ACR_44_1_BASE_IDX 2
+#define mmDIG2_HDMI_ACR_48_0 0x229a
+#define mmDIG2_HDMI_ACR_48_0_BASE_IDX 2
+#define mmDIG2_HDMI_ACR_48_1 0x229b
+#define mmDIG2_HDMI_ACR_48_1_BASE_IDX 2
+#define mmDIG2_HDMI_ACR_STATUS_0 0x229c
+#define mmDIG2_HDMI_ACR_STATUS_0_BASE_IDX 2
+#define mmDIG2_HDMI_ACR_STATUS_1 0x229d
+#define mmDIG2_HDMI_ACR_STATUS_1_BASE_IDX 2
+#define mmDIG2_AFMT_AUDIO_INFO0 0x229e
+#define mmDIG2_AFMT_AUDIO_INFO0_BASE_IDX 2
+#define mmDIG2_AFMT_AUDIO_INFO1 0x229f
+#define mmDIG2_AFMT_AUDIO_INFO1_BASE_IDX 2
+#define mmDIG2_AFMT_60958_0 0x22a0
+#define mmDIG2_AFMT_60958_0_BASE_IDX 2
+#define mmDIG2_AFMT_60958_1 0x22a1
+#define mmDIG2_AFMT_60958_1_BASE_IDX 2
+#define mmDIG2_AFMT_AUDIO_CRC_CONTROL 0x22a2
+#define mmDIG2_AFMT_AUDIO_CRC_CONTROL_BASE_IDX 2
+#define mmDIG2_AFMT_RAMP_CONTROL0 0x22a3
+#define mmDIG2_AFMT_RAMP_CONTROL0_BASE_IDX 2
+#define mmDIG2_AFMT_RAMP_CONTROL1 0x22a4
+#define mmDIG2_AFMT_RAMP_CONTROL1_BASE_IDX 2
+#define mmDIG2_AFMT_RAMP_CONTROL2 0x22a5
+#define mmDIG2_AFMT_RAMP_CONTROL2_BASE_IDX 2
+#define mmDIG2_AFMT_RAMP_CONTROL3 0x22a6
+#define mmDIG2_AFMT_RAMP_CONTROL3_BASE_IDX 2
+#define mmDIG2_AFMT_60958_2 0x22a7
+#define mmDIG2_AFMT_60958_2_BASE_IDX 2
+#define mmDIG2_AFMT_AUDIO_CRC_RESULT 0x22a8
+#define mmDIG2_AFMT_AUDIO_CRC_RESULT_BASE_IDX 2
+#define mmDIG2_AFMT_STATUS 0x22a9
+#define mmDIG2_AFMT_STATUS_BASE_IDX 2
+#define mmDIG2_AFMT_AUDIO_PACKET_CONTROL 0x22aa
+#define mmDIG2_AFMT_AUDIO_PACKET_CONTROL_BASE_IDX 2
+#define mmDIG2_AFMT_VBI_PACKET_CONTROL 0x22ab
+#define mmDIG2_AFMT_VBI_PACKET_CONTROL_BASE_IDX 2
+#define mmDIG2_AFMT_INFOFRAME_CONTROL0 0x22ac
+#define mmDIG2_AFMT_INFOFRAME_CONTROL0_BASE_IDX 2
+#define mmDIG2_AFMT_AUDIO_SRC_CONTROL 0x22ad
+#define mmDIG2_AFMT_AUDIO_SRC_CONTROL_BASE_IDX 2
+#define mmDIG2_DIG_BE_CNTL 0x22af
+#define mmDIG2_DIG_BE_CNTL_BASE_IDX 2
+#define mmDIG2_DIG_BE_EN_CNTL 0x22b0
+#define mmDIG2_DIG_BE_EN_CNTL_BASE_IDX 2
+#define mmDIG2_TMDS_CNTL 0x22d3
+#define mmDIG2_TMDS_CNTL_BASE_IDX 2
+#define mmDIG2_TMDS_CONTROL_CHAR 0x22d4
+#define mmDIG2_TMDS_CONTROL_CHAR_BASE_IDX 2
+#define mmDIG2_TMDS_CONTROL0_FEEDBACK 0x22d5
+#define mmDIG2_TMDS_CONTROL0_FEEDBACK_BASE_IDX 2
+#define mmDIG2_TMDS_STEREOSYNC_CTL_SEL 0x22d6
+#define mmDIG2_TMDS_STEREOSYNC_CTL_SEL_BASE_IDX 2
+#define mmDIG2_TMDS_SYNC_CHAR_PATTERN_0_1 0x22d7
+#define mmDIG2_TMDS_SYNC_CHAR_PATTERN_0_1_BASE_IDX 2
+#define mmDIG2_TMDS_SYNC_CHAR_PATTERN_2_3 0x22d8
+#define mmDIG2_TMDS_SYNC_CHAR_PATTERN_2_3_BASE_IDX 2
+#define mmDIG2_TMDS_CTL_BITS 0x22da
+#define mmDIG2_TMDS_CTL_BITS_BASE_IDX 2
+#define mmDIG2_TMDS_DCBALANCER_CONTROL 0x22db
+#define mmDIG2_TMDS_DCBALANCER_CONTROL_BASE_IDX 2
+#define mmDIG2_TMDS_SYNC_DCBALANCE_CHAR 0x22dc
+#define mmDIG2_TMDS_SYNC_DCBALANCE_CHAR_BASE_IDX 2
+#define mmDIG2_TMDS_CTL0_1_GEN_CNTL 0x22dd
+#define mmDIG2_TMDS_CTL0_1_GEN_CNTL_BASE_IDX 2
+#define mmDIG2_TMDS_CTL2_3_GEN_CNTL 0x22de
+#define mmDIG2_TMDS_CTL2_3_GEN_CNTL_BASE_IDX 2
+#define mmDIG2_DIG_VERSION 0x22e0
+#define mmDIG2_DIG_VERSION_BASE_IDX 2
+#define mmDIG2_DIG_LANE_ENABLE 0x22e1
+#define mmDIG2_DIG_LANE_ENABLE_BASE_IDX 2
+#define mmDIG2_AFMT_CNTL 0x22e6
+#define mmDIG2_AFMT_CNTL_BASE_IDX 2
+#define mmDIG2_AFMT_VBI_PACKET_CONTROL1 0x22e7
+#define mmDIG2_AFMT_VBI_PACKET_CONTROL1_BASE_IDX 2
+#define mmDIG2_HDMI_GENERIC_PACKET_CONTROL5 0x22f6
+#define mmDIG2_HDMI_GENERIC_PACKET_CONTROL5_BASE_IDX 2
+#define mmDIG2_FORCE_DIG_DISABLE 0x22f7
+#define mmDIG2_FORCE_DIG_DISABLE_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dp2_dispdec
+// base address: 0x800
+#define mmDP2_DP_LINK_CNTL 0x2308
+#define mmDP2_DP_LINK_CNTL_BASE_IDX 2
+#define mmDP2_DP_PIXEL_FORMAT 0x2309
+#define mmDP2_DP_PIXEL_FORMAT_BASE_IDX 2
+#define mmDP2_DP_MSA_COLORIMETRY 0x230a
+#define mmDP2_DP_MSA_COLORIMETRY_BASE_IDX 2
+#define mmDP2_DP_CONFIG 0x230b
+#define mmDP2_DP_CONFIG_BASE_IDX 2
+#define mmDP2_DP_VID_STREAM_CNTL 0x230c
+#define mmDP2_DP_VID_STREAM_CNTL_BASE_IDX 2
+#define mmDP2_DP_STEER_FIFO 0x230d
+#define mmDP2_DP_STEER_FIFO_BASE_IDX 2
+#define mmDP2_DP_MSA_MISC 0x230e
+#define mmDP2_DP_MSA_MISC_BASE_IDX 2
+#define mmDP2_DP_DPHY_INTERNAL_CTRL 0x230f
+#define mmDP2_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+#define mmDP2_DP_VID_TIMING 0x2310
+#define mmDP2_DP_VID_TIMING_BASE_IDX 2
+#define mmDP2_DP_VID_N 0x2311
+#define mmDP2_DP_VID_N_BASE_IDX 2
+#define mmDP2_DP_VID_M 0x2312
+#define mmDP2_DP_VID_M_BASE_IDX 2
+#define mmDP2_DP_LINK_FRAMING_CNTL 0x2313
+#define mmDP2_DP_LINK_FRAMING_CNTL_BASE_IDX 2
+#define mmDP2_DP_HBR2_EYE_PATTERN 0x2314
+#define mmDP2_DP_HBR2_EYE_PATTERN_BASE_IDX 2
+#define mmDP2_DP_VID_MSA_VBID 0x2315
+#define mmDP2_DP_VID_MSA_VBID_BASE_IDX 2
+#define mmDP2_DP_VID_INTERRUPT_CNTL 0x2316
+#define mmDP2_DP_VID_INTERRUPT_CNTL_BASE_IDX 2
+#define mmDP2_DP_DPHY_CNTL 0x2317
+#define mmDP2_DP_DPHY_CNTL_BASE_IDX 2
+#define mmDP2_DP_DPHY_TRAINING_PATTERN_SEL 0x2318
+#define mmDP2_DP_DPHY_TRAINING_PATTERN_SEL_BASE_IDX 2
+#define mmDP2_DP_DPHY_SYM0 0x2319
+#define mmDP2_DP_DPHY_SYM0_BASE_IDX 2
+#define mmDP2_DP_DPHY_SYM1 0x231a
+#define mmDP2_DP_DPHY_SYM1_BASE_IDX 2
+#define mmDP2_DP_DPHY_SYM2 0x231b
+#define mmDP2_DP_DPHY_SYM2_BASE_IDX 2
+#define mmDP2_DP_DPHY_8B10B_CNTL 0x231c
+#define mmDP2_DP_DPHY_8B10B_CNTL_BASE_IDX 2
+#define mmDP2_DP_DPHY_PRBS_CNTL 0x231d
+#define mmDP2_DP_DPHY_PRBS_CNTL_BASE_IDX 2
+#define mmDP2_DP_DPHY_SCRAM_CNTL 0x231e
+#define mmDP2_DP_DPHY_SCRAM_CNTL_BASE_IDX 2
+#define mmDP2_DP_DPHY_CRC_EN 0x231f
+#define mmDP2_DP_DPHY_CRC_EN_BASE_IDX 2
+#define mmDP2_DP_DPHY_CRC_CNTL 0x2320
+#define mmDP2_DP_DPHY_CRC_CNTL_BASE_IDX 2
+#define mmDP2_DP_DPHY_CRC_RESULT 0x2321
+#define mmDP2_DP_DPHY_CRC_RESULT_BASE_IDX 2
+#define mmDP2_DP_DPHY_CRC_MST_CNTL 0x2322
+#define mmDP2_DP_DPHY_CRC_MST_CNTL_BASE_IDX 2
+#define mmDP2_DP_DPHY_CRC_MST_STATUS 0x2323
+#define mmDP2_DP_DPHY_CRC_MST_STATUS_BASE_IDX 2
+#define mmDP2_DP_DPHY_FAST_TRAINING 0x2324
+#define mmDP2_DP_DPHY_FAST_TRAINING_BASE_IDX 2
+#define mmDP2_DP_DPHY_FAST_TRAINING_STATUS 0x2325
+#define mmDP2_DP_DPHY_FAST_TRAINING_STATUS_BASE_IDX 2
+#define mmDP2_DP_SEC_CNTL 0x232b
+#define mmDP2_DP_SEC_CNTL_BASE_IDX 2
+#define mmDP2_DP_SEC_CNTL1 0x232c
+#define mmDP2_DP_SEC_CNTL1_BASE_IDX 2
+#define mmDP2_DP_SEC_FRAMING1 0x232d
+#define mmDP2_DP_SEC_FRAMING1_BASE_IDX 2
+#define mmDP2_DP_SEC_FRAMING2 0x232e
+#define mmDP2_DP_SEC_FRAMING2_BASE_IDX 2
+#define mmDP2_DP_SEC_FRAMING3 0x232f
+#define mmDP2_DP_SEC_FRAMING3_BASE_IDX 2
+#define mmDP2_DP_SEC_FRAMING4 0x2330
+#define mmDP2_DP_SEC_FRAMING4_BASE_IDX 2
+#define mmDP2_DP_SEC_AUD_N 0x2331
+#define mmDP2_DP_SEC_AUD_N_BASE_IDX 2
+#define mmDP2_DP_SEC_AUD_N_READBACK 0x2332
+#define mmDP2_DP_SEC_AUD_N_READBACK_BASE_IDX 2
+#define mmDP2_DP_SEC_AUD_M 0x2333
+#define mmDP2_DP_SEC_AUD_M_BASE_IDX 2
+#define mmDP2_DP_SEC_AUD_M_READBACK 0x2334
+#define mmDP2_DP_SEC_AUD_M_READBACK_BASE_IDX 2
+#define mmDP2_DP_SEC_TIMESTAMP 0x2335
+#define mmDP2_DP_SEC_TIMESTAMP_BASE_IDX 2
+#define mmDP2_DP_SEC_PACKET_CNTL 0x2336
+#define mmDP2_DP_SEC_PACKET_CNTL_BASE_IDX 2
+#define mmDP2_DP_MSE_RATE_CNTL 0x2337
+#define mmDP2_DP_MSE_RATE_CNTL_BASE_IDX 2
+#define mmDP2_DP_MSE_RATE_UPDATE 0x2339
+#define mmDP2_DP_MSE_RATE_UPDATE_BASE_IDX 2
+#define mmDP2_DP_MSE_SAT0 0x233a
+#define mmDP2_DP_MSE_SAT0_BASE_IDX 2
+#define mmDP2_DP_MSE_SAT1 0x233b
+#define mmDP2_DP_MSE_SAT1_BASE_IDX 2
+#define mmDP2_DP_MSE_SAT2 0x233c
+#define mmDP2_DP_MSE_SAT2_BASE_IDX 2
+#define mmDP2_DP_MSE_SAT_UPDATE 0x233d
+#define mmDP2_DP_MSE_SAT_UPDATE_BASE_IDX 2
+#define mmDP2_DP_MSE_LINK_TIMING 0x233e
+#define mmDP2_DP_MSE_LINK_TIMING_BASE_IDX 2
+#define mmDP2_DP_MSE_MISC_CNTL 0x233f
+#define mmDP2_DP_MSE_MISC_CNTL_BASE_IDX 2
+#define mmDP2_DP_DPHY_BS_SR_SWAP_CNTL 0x2344
+#define mmDP2_DP_DPHY_BS_SR_SWAP_CNTL_BASE_IDX 2
+#define mmDP2_DP_DPHY_HBR2_PATTERN_CONTROL 0x2345
+#define mmDP2_DP_DPHY_HBR2_PATTERN_CONTROL_BASE_IDX 2
+#define mmDP2_DP_MSE_SAT0_STATUS 0x2347
+#define mmDP2_DP_MSE_SAT0_STATUS_BASE_IDX 2
+#define mmDP2_DP_MSE_SAT1_STATUS 0x2348
+#define mmDP2_DP_MSE_SAT1_STATUS_BASE_IDX 2
+#define mmDP2_DP_MSE_SAT2_STATUS 0x2349
+#define mmDP2_DP_MSE_SAT2_STATUS_BASE_IDX 2
+#define mmDP2_DP_MSA_TIMING_PARAM1 0x234c
+#define mmDP2_DP_MSA_TIMING_PARAM1_BASE_IDX 2
+#define mmDP2_DP_MSA_TIMING_PARAM2 0x234d
+#define mmDP2_DP_MSA_TIMING_PARAM2_BASE_IDX 2
+#define mmDP2_DP_MSA_TIMING_PARAM3 0x234e
+#define mmDP2_DP_MSA_TIMING_PARAM3_BASE_IDX 2
+#define mmDP2_DP_MSA_TIMING_PARAM4 0x234f
+#define mmDP2_DP_MSA_TIMING_PARAM4_BASE_IDX 2
+#define mmDP2_DP_MSO_CNTL 0x2350
+#define mmDP2_DP_MSO_CNTL_BASE_IDX 2
+#define mmDP2_DP_MSO_CNTL1 0x2351
+#define mmDP2_DP_MSO_CNTL1_BASE_IDX 2
+#define mmDP2_DP_DSC_CNTL 0x2352
+#define mmDP2_DP_DSC_CNTL_BASE_IDX 2
+#define mmDP2_DP_SEC_CNTL2 0x2353
+#define mmDP2_DP_SEC_CNTL2_BASE_IDX 2
+#define mmDP2_DP_SEC_CNTL3 0x2354
+#define mmDP2_DP_SEC_CNTL3_BASE_IDX 2
+#define mmDP2_DP_SEC_CNTL4 0x2355
+#define mmDP2_DP_SEC_CNTL4_BASE_IDX 2
+#define mmDP2_DP_SEC_CNTL5 0x2356
+#define mmDP2_DP_SEC_CNTL5_BASE_IDX 2
+#define mmDP2_DP_SEC_CNTL6 0x2357
+#define mmDP2_DP_SEC_CNTL6_BASE_IDX 2
+#define mmDP2_DP_SEC_CNTL7 0x2358
+#define mmDP2_DP_SEC_CNTL7_BASE_IDX 2
+#define mmDP2_DP_DB_CNTL 0x2359
+#define mmDP2_DP_DB_CNTL_BASE_IDX 2
+#define mmDP2_DP_MSA_VBID_MISC 0x235a
+#define mmDP2_DP_MSA_VBID_MISC_BASE_IDX 2
+#define mmDP2_DP_SEC_METADATA_TRANSMISSION 0x235b
+#define mmDP2_DP_SEC_METADATA_TRANSMISSION_BASE_IDX 2
+#define mmDP2_DP_DSC_BYTES_PER_PIXEL 0x235c
+#define mmDP2_DP_DSC_BYTES_PER_PIXEL_BASE_IDX 2
+#define mmDP2_DP_ALPM_CNTL 0x235d
+#define mmDP2_DP_ALPM_CNTL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig3_dispdec
+// base address: 0xc00
+#define mmDIG3_DIG_FE_CNTL 0x2368
+#define mmDIG3_DIG_FE_CNTL_BASE_IDX 2
+#define mmDIG3_DIG_OUTPUT_CRC_CNTL 0x2369
+#define mmDIG3_DIG_OUTPUT_CRC_CNTL_BASE_IDX 2
+#define mmDIG3_DIG_OUTPUT_CRC_RESULT 0x236a
+#define mmDIG3_DIG_OUTPUT_CRC_RESULT_BASE_IDX 2
+#define mmDIG3_DIG_CLOCK_PATTERN 0x236b
+#define mmDIG3_DIG_CLOCK_PATTERN_BASE_IDX 2
+#define mmDIG3_DIG_TEST_PATTERN 0x236c
+#define mmDIG3_DIG_TEST_PATTERN_BASE_IDX 2
+#define mmDIG3_DIG_RANDOM_PATTERN_SEED 0x236d
+#define mmDIG3_DIG_RANDOM_PATTERN_SEED_BASE_IDX 2
+#define mmDIG3_DIG_FIFO_STATUS 0x236e
+#define mmDIG3_DIG_FIFO_STATUS_BASE_IDX 2
+#define mmDIG3_HDMI_METADATA_PACKET_CONTROL 0x236f
+#define mmDIG3_HDMI_METADATA_PACKET_CONTROL_BASE_IDX 2
+#define mmDIG3_HDMI_GENERIC_PACKET_CONTROL4 0x2370
+#define mmDIG3_HDMI_GENERIC_PACKET_CONTROL4_BASE_IDX 2
+#define mmDIG3_HDMI_CONTROL 0x2371
+#define mmDIG3_HDMI_CONTROL_BASE_IDX 2
+#define mmDIG3_HDMI_STATUS 0x2372
+#define mmDIG3_HDMI_STATUS_BASE_IDX 2
+#define mmDIG3_HDMI_AUDIO_PACKET_CONTROL 0x2373
+#define mmDIG3_HDMI_AUDIO_PACKET_CONTROL_BASE_IDX 2
+#define mmDIG3_HDMI_ACR_PACKET_CONTROL 0x2374
+#define mmDIG3_HDMI_ACR_PACKET_CONTROL_BASE_IDX 2
+#define mmDIG3_HDMI_VBI_PACKET_CONTROL 0x2375
+#define mmDIG3_HDMI_VBI_PACKET_CONTROL_BASE_IDX 2
+#define mmDIG3_HDMI_INFOFRAME_CONTROL0 0x2376
+#define mmDIG3_HDMI_INFOFRAME_CONTROL0_BASE_IDX 2
+#define mmDIG3_HDMI_INFOFRAME_CONTROL1 0x2377
+#define mmDIG3_HDMI_INFOFRAME_CONTROL1_BASE_IDX 2
+#define mmDIG3_HDMI_GENERIC_PACKET_CONTROL0 0x2378
+#define mmDIG3_HDMI_GENERIC_PACKET_CONTROL0_BASE_IDX 2
+#define mmDIG3_AFMT_INTERRUPT_STATUS 0x2379
+#define mmDIG3_AFMT_INTERRUPT_STATUS_BASE_IDX 2
+#define mmDIG3_HDMI_GC 0x237b
+#define mmDIG3_HDMI_GC_BASE_IDX 2
+#define mmDIG3_AFMT_AUDIO_PACKET_CONTROL2 0x237c
+#define mmDIG3_AFMT_AUDIO_PACKET_CONTROL2_BASE_IDX 2
+#define mmDIG3_AFMT_ISRC1_0 0x237d
+#define mmDIG3_AFMT_ISRC1_0_BASE_IDX 2
+#define mmDIG3_AFMT_ISRC1_1 0x237e
+#define mmDIG3_AFMT_ISRC1_1_BASE_IDX 2
+#define mmDIG3_AFMT_ISRC1_2 0x237f
+#define mmDIG3_AFMT_ISRC1_2_BASE_IDX 2
+#define mmDIG3_AFMT_ISRC1_3 0x2380
+#define mmDIG3_AFMT_ISRC1_3_BASE_IDX 2
+#define mmDIG3_AFMT_ISRC1_4 0x2381
+#define mmDIG3_AFMT_ISRC1_4_BASE_IDX 2
+#define mmDIG3_AFMT_ISRC2_0 0x2382
+#define mmDIG3_AFMT_ISRC2_0_BASE_IDX 2
+#define mmDIG3_AFMT_ISRC2_1 0x2383
+#define mmDIG3_AFMT_ISRC2_1_BASE_IDX 2
+#define mmDIG3_AFMT_ISRC2_2 0x2384
+#define mmDIG3_AFMT_ISRC2_2_BASE_IDX 2
+#define mmDIG3_AFMT_ISRC2_3 0x2385
+#define mmDIG3_AFMT_ISRC2_3_BASE_IDX 2
+#define mmDIG3_HDMI_GENERIC_PACKET_CONTROL2 0x2386
+#define mmDIG3_HDMI_GENERIC_PACKET_CONTROL2_BASE_IDX 2
+#define mmDIG3_HDMI_GENERIC_PACKET_CONTROL3 0x2387
+#define mmDIG3_HDMI_GENERIC_PACKET_CONTROL3_BASE_IDX 2
+#define mmDIG3_HDMI_DB_CONTROL 0x2388
+#define mmDIG3_HDMI_DB_CONTROL_BASE_IDX 2
+#define mmDIG3_DME_CONTROL 0x2389
+#define mmDIG3_DME_CONTROL_BASE_IDX 2
+#define mmDIG3_AFMT_MPEG_INFO0 0x238a
+#define mmDIG3_AFMT_MPEG_INFO0_BASE_IDX 2
+#define mmDIG3_AFMT_MPEG_INFO1 0x238b
+#define mmDIG3_AFMT_MPEG_INFO1_BASE_IDX 2
+#define mmDIG3_AFMT_GENERIC_HDR 0x238c
+#define mmDIG3_AFMT_GENERIC_HDR_BASE_IDX 2
+#define mmDIG3_AFMT_GENERIC_0 0x238d
+#define mmDIG3_AFMT_GENERIC_0_BASE_IDX 2
+#define mmDIG3_AFMT_GENERIC_1 0x238e
+#define mmDIG3_AFMT_GENERIC_1_BASE_IDX 2
+#define mmDIG3_AFMT_GENERIC_2 0x238f
+#define mmDIG3_AFMT_GENERIC_2_BASE_IDX 2
+#define mmDIG3_AFMT_GENERIC_3 0x2390
+#define mmDIG3_AFMT_GENERIC_3_BASE_IDX 2
+#define mmDIG3_AFMT_GENERIC_4 0x2391
+#define mmDIG3_AFMT_GENERIC_4_BASE_IDX 2
+#define mmDIG3_AFMT_GENERIC_5 0x2392
+#define mmDIG3_AFMT_GENERIC_5_BASE_IDX 2
+#define mmDIG3_AFMT_GENERIC_6 0x2393
+#define mmDIG3_AFMT_GENERIC_6_BASE_IDX 2
+#define mmDIG3_AFMT_GENERIC_7 0x2394
+#define mmDIG3_AFMT_GENERIC_7_BASE_IDX 2
+#define mmDIG3_HDMI_GENERIC_PACKET_CONTROL1 0x2395
+#define mmDIG3_HDMI_GENERIC_PACKET_CONTROL1_BASE_IDX 2
+#define mmDIG3_HDMI_ACR_32_0 0x2396
+#define mmDIG3_HDMI_ACR_32_0_BASE_IDX 2
+#define mmDIG3_HDMI_ACR_32_1 0x2397
+#define mmDIG3_HDMI_ACR_32_1_BASE_IDX 2
+#define mmDIG3_HDMI_ACR_44_0 0x2398
+#define mmDIG3_HDMI_ACR_44_0_BASE_IDX 2
+#define mmDIG3_HDMI_ACR_44_1 0x2399
+#define mmDIG3_HDMI_ACR_44_1_BASE_IDX 2
+#define mmDIG3_HDMI_ACR_48_0 0x239a
+#define mmDIG3_HDMI_ACR_48_0_BASE_IDX 2
+#define mmDIG3_HDMI_ACR_48_1 0x239b
+#define mmDIG3_HDMI_ACR_48_1_BASE_IDX 2
+#define mmDIG3_HDMI_ACR_STATUS_0 0x239c
+#define mmDIG3_HDMI_ACR_STATUS_0_BASE_IDX 2
+#define mmDIG3_HDMI_ACR_STATUS_1 0x239d
+#define mmDIG3_HDMI_ACR_STATUS_1_BASE_IDX 2
+#define mmDIG3_AFMT_AUDIO_INFO0 0x239e
+#define mmDIG3_AFMT_AUDIO_INFO0_BASE_IDX 2
+#define mmDIG3_AFMT_AUDIO_INFO1 0x239f
+#define mmDIG3_AFMT_AUDIO_INFO1_BASE_IDX 2
+#define mmDIG3_AFMT_60958_0 0x23a0
+#define mmDIG3_AFMT_60958_0_BASE_IDX 2
+#define mmDIG3_AFMT_60958_1 0x23a1
+#define mmDIG3_AFMT_60958_1_BASE_IDX 2
+#define mmDIG3_AFMT_AUDIO_CRC_CONTROL 0x23a2
+#define mmDIG3_AFMT_AUDIO_CRC_CONTROL_BASE_IDX 2
+#define mmDIG3_AFMT_RAMP_CONTROL0 0x23a3
+#define mmDIG3_AFMT_RAMP_CONTROL0_BASE_IDX 2
+#define mmDIG3_AFMT_RAMP_CONTROL1 0x23a4
+#define mmDIG3_AFMT_RAMP_CONTROL1_BASE_IDX 2
+#define mmDIG3_AFMT_RAMP_CONTROL2 0x23a5
+#define mmDIG3_AFMT_RAMP_CONTROL2_BASE_IDX 2
+#define mmDIG3_AFMT_RAMP_CONTROL3 0x23a6
+#define mmDIG3_AFMT_RAMP_CONTROL3_BASE_IDX 2
+#define mmDIG3_AFMT_60958_2 0x23a7
+#define mmDIG3_AFMT_60958_2_BASE_IDX 2
+#define mmDIG3_AFMT_AUDIO_CRC_RESULT 0x23a8
+#define mmDIG3_AFMT_AUDIO_CRC_RESULT_BASE_IDX 2
+#define mmDIG3_AFMT_STATUS 0x23a9
+#define mmDIG3_AFMT_STATUS_BASE_IDX 2
+#define mmDIG3_AFMT_AUDIO_PACKET_CONTROL 0x23aa
+#define mmDIG3_AFMT_AUDIO_PACKET_CONTROL_BASE_IDX 2
+#define mmDIG3_AFMT_VBI_PACKET_CONTROL 0x23ab
+#define mmDIG3_AFMT_VBI_PACKET_CONTROL_BASE_IDX 2
+#define mmDIG3_AFMT_INFOFRAME_CONTROL0 0x23ac
+#define mmDIG3_AFMT_INFOFRAME_CONTROL0_BASE_IDX 2
+#define mmDIG3_AFMT_AUDIO_SRC_CONTROL 0x23ad
+#define mmDIG3_AFMT_AUDIO_SRC_CONTROL_BASE_IDX 2
+#define mmDIG3_DIG_BE_CNTL 0x23af
+#define mmDIG3_DIG_BE_CNTL_BASE_IDX 2
+#define mmDIG3_DIG_BE_EN_CNTL 0x23b0
+#define mmDIG3_DIG_BE_EN_CNTL_BASE_IDX 2
+#define mmDIG3_TMDS_CNTL 0x23d3
+#define mmDIG3_TMDS_CNTL_BASE_IDX 2
+#define mmDIG3_TMDS_CONTROL_CHAR 0x23d4
+#define mmDIG3_TMDS_CONTROL_CHAR_BASE_IDX 2
+#define mmDIG3_TMDS_CONTROL0_FEEDBACK 0x23d5
+#define mmDIG3_TMDS_CONTROL0_FEEDBACK_BASE_IDX 2
+#define mmDIG3_TMDS_STEREOSYNC_CTL_SEL 0x23d6
+#define mmDIG3_TMDS_STEREOSYNC_CTL_SEL_BASE_IDX 2
+#define mmDIG3_TMDS_SYNC_CHAR_PATTERN_0_1 0x23d7
+#define mmDIG3_TMDS_SYNC_CHAR_PATTERN_0_1_BASE_IDX 2
+#define mmDIG3_TMDS_SYNC_CHAR_PATTERN_2_3 0x23d8
+#define mmDIG3_TMDS_SYNC_CHAR_PATTERN_2_3_BASE_IDX 2
+#define mmDIG3_TMDS_CTL_BITS 0x23da
+#define mmDIG3_TMDS_CTL_BITS_BASE_IDX 2
+#define mmDIG3_TMDS_DCBALANCER_CONTROL 0x23db
+#define mmDIG3_TMDS_DCBALANCER_CONTROL_BASE_IDX 2
+#define mmDIG3_TMDS_SYNC_DCBALANCE_CHAR 0x23dc
+#define mmDIG3_TMDS_SYNC_DCBALANCE_CHAR_BASE_IDX 2
+#define mmDIG3_TMDS_CTL0_1_GEN_CNTL 0x23dd
+#define mmDIG3_TMDS_CTL0_1_GEN_CNTL_BASE_IDX 2
+#define mmDIG3_TMDS_CTL2_3_GEN_CNTL 0x23de
+#define mmDIG3_TMDS_CTL2_3_GEN_CNTL_BASE_IDX 2
+#define mmDIG3_DIG_VERSION 0x23e0
+#define mmDIG3_DIG_VERSION_BASE_IDX 2
+#define mmDIG3_DIG_LANE_ENABLE 0x23e1
+#define mmDIG3_DIG_LANE_ENABLE_BASE_IDX 2
+#define mmDIG3_AFMT_CNTL 0x23e6
+#define mmDIG3_AFMT_CNTL_BASE_IDX 2
+#define mmDIG3_AFMT_VBI_PACKET_CONTROL1 0x23e7
+#define mmDIG3_AFMT_VBI_PACKET_CONTROL1_BASE_IDX 2
+#define mmDIG3_HDMI_GENERIC_PACKET_CONTROL5 0x23f6
+#define mmDIG3_HDMI_GENERIC_PACKET_CONTROL5_BASE_IDX 2
+#define mmDIG3_FORCE_DIG_DISABLE 0x23f7
+#define mmDIG3_FORCE_DIG_DISABLE_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dp3_dispdec
+// base address: 0xc00
+#define mmDP3_DP_LINK_CNTL 0x2408
+#define mmDP3_DP_LINK_CNTL_BASE_IDX 2
+#define mmDP3_DP_PIXEL_FORMAT 0x2409
+#define mmDP3_DP_PIXEL_FORMAT_BASE_IDX 2
+#define mmDP3_DP_MSA_COLORIMETRY 0x240a
+#define mmDP3_DP_MSA_COLORIMETRY_BASE_IDX 2
+#define mmDP3_DP_CONFIG 0x240b
+#define mmDP3_DP_CONFIG_BASE_IDX 2
+#define mmDP3_DP_VID_STREAM_CNTL 0x240c
+#define mmDP3_DP_VID_STREAM_CNTL_BASE_IDX 2
+#define mmDP3_DP_STEER_FIFO 0x240d
+#define mmDP3_DP_STEER_FIFO_BASE_IDX 2
+#define mmDP3_DP_MSA_MISC 0x240e
+#define mmDP3_DP_MSA_MISC_BASE_IDX 2
+#define mmDP3_DP_DPHY_INTERNAL_CTRL 0x240f
+#define mmDP3_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+#define mmDP3_DP_VID_TIMING 0x2410
+#define mmDP3_DP_VID_TIMING_BASE_IDX 2
+#define mmDP3_DP_VID_N 0x2411
+#define mmDP3_DP_VID_N_BASE_IDX 2
+#define mmDP3_DP_VID_M 0x2412
+#define mmDP3_DP_VID_M_BASE_IDX 2
+#define mmDP3_DP_LINK_FRAMING_CNTL 0x2413
+#define mmDP3_DP_LINK_FRAMING_CNTL_BASE_IDX 2
+#define mmDP3_DP_HBR2_EYE_PATTERN 0x2414
+#define mmDP3_DP_HBR2_EYE_PATTERN_BASE_IDX 2
+#define mmDP3_DP_VID_MSA_VBID 0x2415
+#define mmDP3_DP_VID_MSA_VBID_BASE_IDX 2
+#define mmDP3_DP_VID_INTERRUPT_CNTL 0x2416
+#define mmDP3_DP_VID_INTERRUPT_CNTL_BASE_IDX 2
+#define mmDP3_DP_DPHY_CNTL 0x2417
+#define mmDP3_DP_DPHY_CNTL_BASE_IDX 2
+#define mmDP3_DP_DPHY_TRAINING_PATTERN_SEL 0x2418
+#define mmDP3_DP_DPHY_TRAINING_PATTERN_SEL_BASE_IDX 2
+#define mmDP3_DP_DPHY_SYM0 0x2419
+#define mmDP3_DP_DPHY_SYM0_BASE_IDX 2
+#define mmDP3_DP_DPHY_SYM1 0x241a
+#define mmDP3_DP_DPHY_SYM1_BASE_IDX 2
+#define mmDP3_DP_DPHY_SYM2 0x241b
+#define mmDP3_DP_DPHY_SYM2_BASE_IDX 2
+#define mmDP3_DP_DPHY_8B10B_CNTL 0x241c
+#define mmDP3_DP_DPHY_8B10B_CNTL_BASE_IDX 2
+#define mmDP3_DP_DPHY_PRBS_CNTL 0x241d
+#define mmDP3_DP_DPHY_PRBS_CNTL_BASE_IDX 2
+#define mmDP3_DP_DPHY_SCRAM_CNTL 0x241e
+#define mmDP3_DP_DPHY_SCRAM_CNTL_BASE_IDX 2
+#define mmDP3_DP_DPHY_CRC_EN 0x241f
+#define mmDP3_DP_DPHY_CRC_EN_BASE_IDX 2
+#define mmDP3_DP_DPHY_CRC_CNTL 0x2420
+#define mmDP3_DP_DPHY_CRC_CNTL_BASE_IDX 2
+#define mmDP3_DP_DPHY_CRC_RESULT 0x2421
+#define mmDP3_DP_DPHY_CRC_RESULT_BASE_IDX 2
+#define mmDP3_DP_DPHY_CRC_MST_CNTL 0x2422
+#define mmDP3_DP_DPHY_CRC_MST_CNTL_BASE_IDX 2
+#define mmDP3_DP_DPHY_CRC_MST_STATUS 0x2423
+#define mmDP3_DP_DPHY_CRC_MST_STATUS_BASE_IDX 2
+#define mmDP3_DP_DPHY_FAST_TRAINING 0x2424
+#define mmDP3_DP_DPHY_FAST_TRAINING_BASE_IDX 2
+#define mmDP3_DP_DPHY_FAST_TRAINING_STATUS 0x2425
+#define mmDP3_DP_DPHY_FAST_TRAINING_STATUS_BASE_IDX 2
+#define mmDP3_DP_SEC_CNTL 0x242b
+#define mmDP3_DP_SEC_CNTL_BASE_IDX 2
+#define mmDP3_DP_SEC_CNTL1 0x242c
+#define mmDP3_DP_SEC_CNTL1_BASE_IDX 2
+#define mmDP3_DP_SEC_FRAMING1 0x242d
+#define mmDP3_DP_SEC_FRAMING1_BASE_IDX 2
+#define mmDP3_DP_SEC_FRAMING2 0x242e
+#define mmDP3_DP_SEC_FRAMING2_BASE_IDX 2
+#define mmDP3_DP_SEC_FRAMING3 0x242f
+#define mmDP3_DP_SEC_FRAMING3_BASE_IDX 2
+#define mmDP3_DP_SEC_FRAMING4 0x2430
+#define mmDP3_DP_SEC_FRAMING4_BASE_IDX 2
+#define mmDP3_DP_SEC_AUD_N 0x2431
+#define mmDP3_DP_SEC_AUD_N_BASE_IDX 2
+#define mmDP3_DP_SEC_AUD_N_READBACK 0x2432
+#define mmDP3_DP_SEC_AUD_N_READBACK_BASE_IDX 2
+#define mmDP3_DP_SEC_AUD_M 0x2433
+#define mmDP3_DP_SEC_AUD_M_BASE_IDX 2
+#define mmDP3_DP_SEC_AUD_M_READBACK 0x2434
+#define mmDP3_DP_SEC_AUD_M_READBACK_BASE_IDX 2
+#define mmDP3_DP_SEC_TIMESTAMP 0x2435
+#define mmDP3_DP_SEC_TIMESTAMP_BASE_IDX 2
+#define mmDP3_DP_SEC_PACKET_CNTL 0x2436
+#define mmDP3_DP_SEC_PACKET_CNTL_BASE_IDX 2
+#define mmDP3_DP_MSE_RATE_CNTL 0x2437
+#define mmDP3_DP_MSE_RATE_CNTL_BASE_IDX 2
+#define mmDP3_DP_MSE_RATE_UPDATE 0x2439
+#define mmDP3_DP_MSE_RATE_UPDATE_BASE_IDX 2
+#define mmDP3_DP_MSE_SAT0 0x243a
+#define mmDP3_DP_MSE_SAT0_BASE_IDX 2
+#define mmDP3_DP_MSE_SAT1 0x243b
+#define mmDP3_DP_MSE_SAT1_BASE_IDX 2
+#define mmDP3_DP_MSE_SAT2 0x243c
+#define mmDP3_DP_MSE_SAT2_BASE_IDX 2
+#define mmDP3_DP_MSE_SAT_UPDATE 0x243d
+#define mmDP3_DP_MSE_SAT_UPDATE_BASE_IDX 2
+#define mmDP3_DP_MSE_LINK_TIMING 0x243e
+#define mmDP3_DP_MSE_LINK_TIMING_BASE_IDX 2
+#define mmDP3_DP_MSE_MISC_CNTL 0x243f
+#define mmDP3_DP_MSE_MISC_CNTL_BASE_IDX 2
+#define mmDP3_DP_DPHY_BS_SR_SWAP_CNTL 0x2444
+#define mmDP3_DP_DPHY_BS_SR_SWAP_CNTL_BASE_IDX 2
+#define mmDP3_DP_DPHY_HBR2_PATTERN_CONTROL 0x2445
+#define mmDP3_DP_DPHY_HBR2_PATTERN_CONTROL_BASE_IDX 2
+#define mmDP3_DP_MSE_SAT0_STATUS 0x2447
+#define mmDP3_DP_MSE_SAT0_STATUS_BASE_IDX 2
+#define mmDP3_DP_MSE_SAT1_STATUS 0x2448
+#define mmDP3_DP_MSE_SAT1_STATUS_BASE_IDX 2
+#define mmDP3_DP_MSE_SAT2_STATUS 0x2449
+#define mmDP3_DP_MSE_SAT2_STATUS_BASE_IDX 2
+#define mmDP3_DP_MSA_TIMING_PARAM1 0x244c
+#define mmDP3_DP_MSA_TIMING_PARAM1_BASE_IDX 2
+#define mmDP3_DP_MSA_TIMING_PARAM2 0x244d
+#define mmDP3_DP_MSA_TIMING_PARAM2_BASE_IDX 2
+#define mmDP3_DP_MSA_TIMING_PARAM3 0x244e
+#define mmDP3_DP_MSA_TIMING_PARAM3_BASE_IDX 2
+#define mmDP3_DP_MSA_TIMING_PARAM4 0x244f
+#define mmDP3_DP_MSA_TIMING_PARAM4_BASE_IDX 2
+#define mmDP3_DP_MSO_CNTL 0x2450
+#define mmDP3_DP_MSO_CNTL_BASE_IDX 2
+#define mmDP3_DP_MSO_CNTL1 0x2451
+#define mmDP3_DP_MSO_CNTL1_BASE_IDX 2
+#define mmDP3_DP_DSC_CNTL 0x2452
+#define mmDP3_DP_DSC_CNTL_BASE_IDX 2
+#define mmDP3_DP_SEC_CNTL2 0x2453
+#define mmDP3_DP_SEC_CNTL2_BASE_IDX 2
+#define mmDP3_DP_SEC_CNTL3 0x2454
+#define mmDP3_DP_SEC_CNTL3_BASE_IDX 2
+#define mmDP3_DP_SEC_CNTL4 0x2455
+#define mmDP3_DP_SEC_CNTL4_BASE_IDX 2
+#define mmDP3_DP_SEC_CNTL5 0x2456
+#define mmDP3_DP_SEC_CNTL5_BASE_IDX 2
+#define mmDP3_DP_SEC_CNTL6 0x2457
+#define mmDP3_DP_SEC_CNTL6_BASE_IDX 2
+#define mmDP3_DP_SEC_CNTL7 0x2458
+#define mmDP3_DP_SEC_CNTL7_BASE_IDX 2
+#define mmDP3_DP_DB_CNTL 0x2459
+#define mmDP3_DP_DB_CNTL_BASE_IDX 2
+#define mmDP3_DP_MSA_VBID_MISC 0x245a
+#define mmDP3_DP_MSA_VBID_MISC_BASE_IDX 2
+#define mmDP3_DP_SEC_METADATA_TRANSMISSION 0x245b
+#define mmDP3_DP_SEC_METADATA_TRANSMISSION_BASE_IDX 2
+#define mmDP3_DP_DSC_BYTES_PER_PIXEL 0x245c
+#define mmDP3_DP_DSC_BYTES_PER_PIXEL_BASE_IDX 2
+#define mmDP3_DP_ALPM_CNTL 0x245d
+#define mmDP3_DP_ALPM_CNTL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig4_dispdec
+// base address: 0x1000
+#define mmDIG4_DIG_FE_CNTL 0x2468
+#define mmDIG4_DIG_FE_CNTL_BASE_IDX 2
+#define mmDIG4_DIG_OUTPUT_CRC_CNTL 0x2469
+#define mmDIG4_DIG_OUTPUT_CRC_CNTL_BASE_IDX 2
+#define mmDIG4_DIG_OUTPUT_CRC_RESULT 0x246a
+#define mmDIG4_DIG_OUTPUT_CRC_RESULT_BASE_IDX 2
+#define mmDIG4_DIG_CLOCK_PATTERN 0x246b
+#define mmDIG4_DIG_CLOCK_PATTERN_BASE_IDX 2
+#define mmDIG4_DIG_TEST_PATTERN 0x246c
+#define mmDIG4_DIG_TEST_PATTERN_BASE_IDX 2
+#define mmDIG4_DIG_RANDOM_PATTERN_SEED 0x246d
+#define mmDIG4_DIG_RANDOM_PATTERN_SEED_BASE_IDX 2
+#define mmDIG4_DIG_FIFO_STATUS 0x246e
+#define mmDIG4_DIG_FIFO_STATUS_BASE_IDX 2
+#define mmDIG4_HDMI_METADATA_PACKET_CONTROL 0x246f
+#define mmDIG4_HDMI_METADATA_PACKET_CONTROL_BASE_IDX 2
+#define mmDIG4_HDMI_GENERIC_PACKET_CONTROL4 0x2470
+#define mmDIG4_HDMI_GENERIC_PACKET_CONTROL4_BASE_IDX 2
+#define mmDIG4_HDMI_CONTROL 0x2471
+#define mmDIG4_HDMI_CONTROL_BASE_IDX 2
+#define mmDIG4_HDMI_STATUS 0x2472
+#define mmDIG4_HDMI_STATUS_BASE_IDX 2
+#define mmDIG4_HDMI_AUDIO_PACKET_CONTROL 0x2473
+#define mmDIG4_HDMI_AUDIO_PACKET_CONTROL_BASE_IDX 2
+#define mmDIG4_HDMI_ACR_PACKET_CONTROL 0x2474
+#define mmDIG4_HDMI_ACR_PACKET_CONTROL_BASE_IDX 2
+#define mmDIG4_HDMI_VBI_PACKET_CONTROL 0x2475
+#define mmDIG4_HDMI_VBI_PACKET_CONTROL_BASE_IDX 2
+#define mmDIG4_HDMI_INFOFRAME_CONTROL0 0x2476
+#define mmDIG4_HDMI_INFOFRAME_CONTROL0_BASE_IDX 2
+#define mmDIG4_HDMI_INFOFRAME_CONTROL1 0x2477
+#define mmDIG4_HDMI_INFOFRAME_CONTROL1_BASE_IDX 2
+#define mmDIG4_HDMI_GENERIC_PACKET_CONTROL0 0x2478
+#define mmDIG4_HDMI_GENERIC_PACKET_CONTROL0_BASE_IDX 2
+#define mmDIG4_AFMT_INTERRUPT_STATUS 0x2479
+#define mmDIG4_AFMT_INTERRUPT_STATUS_BASE_IDX 2
+#define mmDIG4_HDMI_GC 0x247b
+#define mmDIG4_HDMI_GC_BASE_IDX 2
+#define mmDIG4_AFMT_AUDIO_PACKET_CONTROL2 0x247c
+#define mmDIG4_AFMT_AUDIO_PACKET_CONTROL2_BASE_IDX 2
+#define mmDIG4_AFMT_ISRC1_0 0x247d
+#define mmDIG4_AFMT_ISRC1_0_BASE_IDX 2
+#define mmDIG4_AFMT_ISRC1_1 0x247e
+#define mmDIG4_AFMT_ISRC1_1_BASE_IDX 2
+#define mmDIG4_AFMT_ISRC1_2 0x247f
+#define mmDIG4_AFMT_ISRC1_2_BASE_IDX 2
+#define mmDIG4_AFMT_ISRC1_3 0x2480
+#define mmDIG4_AFMT_ISRC1_3_BASE_IDX 2
+#define mmDIG4_AFMT_ISRC1_4 0x2481
+#define mmDIG4_AFMT_ISRC1_4_BASE_IDX 2
+#define mmDIG4_AFMT_ISRC2_0 0x2482
+#define mmDIG4_AFMT_ISRC2_0_BASE_IDX 2
+#define mmDIG4_AFMT_ISRC2_1 0x2483
+#define mmDIG4_AFMT_ISRC2_1_BASE_IDX 2
+#define mmDIG4_AFMT_ISRC2_2 0x2484
+#define mmDIG4_AFMT_ISRC2_2_BASE_IDX 2
+#define mmDIG4_AFMT_ISRC2_3 0x2485
+#define mmDIG4_AFMT_ISRC2_3_BASE_IDX 2
+#define mmDIG4_HDMI_GENERIC_PACKET_CONTROL2 0x2486
+#define mmDIG4_HDMI_GENERIC_PACKET_CONTROL2_BASE_IDX 2
+#define mmDIG4_HDMI_GENERIC_PACKET_CONTROL3 0x2487
+#define mmDIG4_HDMI_GENERIC_PACKET_CONTROL3_BASE_IDX 2
+#define mmDIG4_HDMI_DB_CONTROL 0x2488
+#define mmDIG4_HDMI_DB_CONTROL_BASE_IDX 2
+#define mmDIG4_DME_CONTROL 0x2489
+#define mmDIG4_DME_CONTROL_BASE_IDX 2
+#define mmDIG4_AFMT_MPEG_INFO0 0x248a
+#define mmDIG4_AFMT_MPEG_INFO0_BASE_IDX 2
+#define mmDIG4_AFMT_MPEG_INFO1 0x248b
+#define mmDIG4_AFMT_MPEG_INFO1_BASE_IDX 2
+#define mmDIG4_AFMT_GENERIC_HDR 0x248c
+#define mmDIG4_AFMT_GENERIC_HDR_BASE_IDX 2
+#define mmDIG4_AFMT_GENERIC_0 0x248d
+#define mmDIG4_AFMT_GENERIC_0_BASE_IDX 2
+#define mmDIG4_AFMT_GENERIC_1 0x248e
+#define mmDIG4_AFMT_GENERIC_1_BASE_IDX 2
+#define mmDIG4_AFMT_GENERIC_2 0x248f
+#define mmDIG4_AFMT_GENERIC_2_BASE_IDX 2
+#define mmDIG4_AFMT_GENERIC_3 0x2490
+#define mmDIG4_AFMT_GENERIC_3_BASE_IDX 2
+#define mmDIG4_AFMT_GENERIC_4 0x2491
+#define mmDIG4_AFMT_GENERIC_4_BASE_IDX 2
+#define mmDIG4_AFMT_GENERIC_5 0x2492
+#define mmDIG4_AFMT_GENERIC_5_BASE_IDX 2
+#define mmDIG4_AFMT_GENERIC_6 0x2493
+#define mmDIG4_AFMT_GENERIC_6_BASE_IDX 2
+#define mmDIG4_AFMT_GENERIC_7 0x2494
+#define mmDIG4_AFMT_GENERIC_7_BASE_IDX 2
+#define mmDIG4_HDMI_GENERIC_PACKET_CONTROL1 0x2495
+#define mmDIG4_HDMI_GENERIC_PACKET_CONTROL1_BASE_IDX 2
+#define mmDIG4_HDMI_ACR_32_0 0x2496
+#define mmDIG4_HDMI_ACR_32_0_BASE_IDX 2
+#define mmDIG4_HDMI_ACR_32_1 0x2497
+#define mmDIG4_HDMI_ACR_32_1_BASE_IDX 2
+#define mmDIG4_HDMI_ACR_44_0 0x2498
+#define mmDIG4_HDMI_ACR_44_0_BASE_IDX 2
+#define mmDIG4_HDMI_ACR_44_1 0x2499
+#define mmDIG4_HDMI_ACR_44_1_BASE_IDX 2
+#define mmDIG4_HDMI_ACR_48_0 0x249a
+#define mmDIG4_HDMI_ACR_48_0_BASE_IDX 2
+#define mmDIG4_HDMI_ACR_48_1 0x249b
+#define mmDIG4_HDMI_ACR_48_1_BASE_IDX 2
+#define mmDIG4_HDMI_ACR_STATUS_0 0x249c
+#define mmDIG4_HDMI_ACR_STATUS_0_BASE_IDX 2
+#define mmDIG4_HDMI_ACR_STATUS_1 0x249d
+#define mmDIG4_HDMI_ACR_STATUS_1_BASE_IDX 2
+#define mmDIG4_AFMT_AUDIO_INFO0 0x249e
+#define mmDIG4_AFMT_AUDIO_INFO0_BASE_IDX 2
+#define mmDIG4_AFMT_AUDIO_INFO1 0x249f
+#define mmDIG4_AFMT_AUDIO_INFO1_BASE_IDX 2
+#define mmDIG4_AFMT_60958_0 0x24a0
+#define mmDIG4_AFMT_60958_0_BASE_IDX 2
+#define mmDIG4_AFMT_60958_1 0x24a1
+#define mmDIG4_AFMT_60958_1_BASE_IDX 2
+#define mmDIG4_AFMT_AUDIO_CRC_CONTROL 0x24a2
+#define mmDIG4_AFMT_AUDIO_CRC_CONTROL_BASE_IDX 2
+#define mmDIG4_AFMT_RAMP_CONTROL0 0x24a3
+#define mmDIG4_AFMT_RAMP_CONTROL0_BASE_IDX 2
+#define mmDIG4_AFMT_RAMP_CONTROL1 0x24a4
+#define mmDIG4_AFMT_RAMP_CONTROL1_BASE_IDX 2
+#define mmDIG4_AFMT_RAMP_CONTROL2 0x24a5
+#define mmDIG4_AFMT_RAMP_CONTROL2_BASE_IDX 2
+#define mmDIG4_AFMT_RAMP_CONTROL3 0x24a6
+#define mmDIG4_AFMT_RAMP_CONTROL3_BASE_IDX 2
+#define mmDIG4_AFMT_60958_2 0x24a7
+#define mmDIG4_AFMT_60958_2_BASE_IDX 2
+#define mmDIG4_AFMT_AUDIO_CRC_RESULT 0x24a8
+#define mmDIG4_AFMT_AUDIO_CRC_RESULT_BASE_IDX 2
+#define mmDIG4_AFMT_STATUS 0x24a9
+#define mmDIG4_AFMT_STATUS_BASE_IDX 2
+#define mmDIG4_AFMT_AUDIO_PACKET_CONTROL 0x24aa
+#define mmDIG4_AFMT_AUDIO_PACKET_CONTROL_BASE_IDX 2
+#define mmDIG4_AFMT_VBI_PACKET_CONTROL 0x24ab
+#define mmDIG4_AFMT_VBI_PACKET_CONTROL_BASE_IDX 2
+#define mmDIG4_AFMT_INFOFRAME_CONTROL0 0x24ac
+#define mmDIG4_AFMT_INFOFRAME_CONTROL0_BASE_IDX 2
+#define mmDIG4_AFMT_AUDIO_SRC_CONTROL 0x24ad
+#define mmDIG4_AFMT_AUDIO_SRC_CONTROL_BASE_IDX 2
+#define mmDIG4_DIG_BE_CNTL 0x24af
+#define mmDIG4_DIG_BE_CNTL_BASE_IDX 2
+#define mmDIG4_DIG_BE_EN_CNTL 0x24b0
+#define mmDIG4_DIG_BE_EN_CNTL_BASE_IDX 2
+#define mmDIG4_TMDS_CNTL 0x24d3
+#define mmDIG4_TMDS_CNTL_BASE_IDX 2
+#define mmDIG4_TMDS_CONTROL_CHAR 0x24d4
+#define mmDIG4_TMDS_CONTROL_CHAR_BASE_IDX 2
+#define mmDIG4_TMDS_CONTROL0_FEEDBACK 0x24d5
+#define mmDIG4_TMDS_CONTROL0_FEEDBACK_BASE_IDX 2
+#define mmDIG4_TMDS_STEREOSYNC_CTL_SEL 0x24d6
+#define mmDIG4_TMDS_STEREOSYNC_CTL_SEL_BASE_IDX 2
+#define mmDIG4_TMDS_SYNC_CHAR_PATTERN_0_1 0x24d7
+#define mmDIG4_TMDS_SYNC_CHAR_PATTERN_0_1_BASE_IDX 2
+#define mmDIG4_TMDS_SYNC_CHAR_PATTERN_2_3 0x24d8
+#define mmDIG4_TMDS_SYNC_CHAR_PATTERN_2_3_BASE_IDX 2
+#define mmDIG4_TMDS_CTL_BITS 0x24da
+#define mmDIG4_TMDS_CTL_BITS_BASE_IDX 2
+#define mmDIG4_TMDS_DCBALANCER_CONTROL 0x24db
+#define mmDIG4_TMDS_DCBALANCER_CONTROL_BASE_IDX 2
+#define mmDIG4_TMDS_SYNC_DCBALANCE_CHAR 0x24dc
+#define mmDIG4_TMDS_SYNC_DCBALANCE_CHAR_BASE_IDX 2
+#define mmDIG4_TMDS_CTL0_1_GEN_CNTL 0x24dd
+#define mmDIG4_TMDS_CTL0_1_GEN_CNTL_BASE_IDX 2
+#define mmDIG4_TMDS_CTL2_3_GEN_CNTL 0x24de
+#define mmDIG4_TMDS_CTL2_3_GEN_CNTL_BASE_IDX 2
+#define mmDIG4_DIG_VERSION 0x24e0
+#define mmDIG4_DIG_VERSION_BASE_IDX 2
+#define mmDIG4_DIG_LANE_ENABLE 0x24e1
+#define mmDIG4_DIG_LANE_ENABLE_BASE_IDX 2
+#define mmDIG4_AFMT_CNTL 0x24e6
+#define mmDIG4_AFMT_CNTL_BASE_IDX 2
+#define mmDIG4_AFMT_VBI_PACKET_CONTROL1 0x24e7
+#define mmDIG4_AFMT_VBI_PACKET_CONTROL1_BASE_IDX 2
+#define mmDIG4_HDMI_GENERIC_PACKET_CONTROL5 0x24f6
+#define mmDIG4_HDMI_GENERIC_PACKET_CONTROL5_BASE_IDX 2
+#define mmDIG4_FORCE_DIG_DISABLE 0x24f7
+#define mmDIG4_FORCE_DIG_DISABLE_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dp4_dispdec
+// base address: 0x1000
+#define mmDP4_DP_LINK_CNTL 0x2508
+#define mmDP4_DP_LINK_CNTL_BASE_IDX 2
+#define mmDP4_DP_PIXEL_FORMAT 0x2509
+#define mmDP4_DP_PIXEL_FORMAT_BASE_IDX 2
+#define mmDP4_DP_MSA_COLORIMETRY 0x250a
+#define mmDP4_DP_MSA_COLORIMETRY_BASE_IDX 2
+#define mmDP4_DP_CONFIG 0x250b
+#define mmDP4_DP_CONFIG_BASE_IDX 2
+#define mmDP4_DP_VID_STREAM_CNTL 0x250c
+#define mmDP4_DP_VID_STREAM_CNTL_BASE_IDX 2
+#define mmDP4_DP_STEER_FIFO 0x250d
+#define mmDP4_DP_STEER_FIFO_BASE_IDX 2
+#define mmDP4_DP_MSA_MISC 0x250e
+#define mmDP4_DP_MSA_MISC_BASE_IDX 2
+#define mmDP4_DP_DPHY_INTERNAL_CTRL 0x250f
+#define mmDP4_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+#define mmDP4_DP_VID_TIMING 0x2510
+#define mmDP4_DP_VID_TIMING_BASE_IDX 2
+#define mmDP4_DP_VID_N 0x2511
+#define mmDP4_DP_VID_N_BASE_IDX 2
+#define mmDP4_DP_VID_M 0x2512
+#define mmDP4_DP_VID_M_BASE_IDX 2
+#define mmDP4_DP_LINK_FRAMING_CNTL 0x2513
+#define mmDP4_DP_LINK_FRAMING_CNTL_BASE_IDX 2
+#define mmDP4_DP_HBR2_EYE_PATTERN 0x2514
+#define mmDP4_DP_HBR2_EYE_PATTERN_BASE_IDX 2
+#define mmDP4_DP_VID_MSA_VBID 0x2515
+#define mmDP4_DP_VID_MSA_VBID_BASE_IDX 2
+#define mmDP4_DP_VID_INTERRUPT_CNTL 0x2516
+#define mmDP4_DP_VID_INTERRUPT_CNTL_BASE_IDX 2
+#define mmDP4_DP_DPHY_CNTL 0x2517
+#define mmDP4_DP_DPHY_CNTL_BASE_IDX 2
+#define mmDP4_DP_DPHY_TRAINING_PATTERN_SEL 0x2518
+#define mmDP4_DP_DPHY_TRAINING_PATTERN_SEL_BASE_IDX 2
+#define mmDP4_DP_DPHY_SYM0 0x2519
+#define mmDP4_DP_DPHY_SYM0_BASE_IDX 2
+#define mmDP4_DP_DPHY_SYM1 0x251a
+#define mmDP4_DP_DPHY_SYM1_BASE_IDX 2
+#define mmDP4_DP_DPHY_SYM2 0x251b
+#define mmDP4_DP_DPHY_SYM2_BASE_IDX 2
+#define mmDP4_DP_DPHY_8B10B_CNTL 0x251c
+#define mmDP4_DP_DPHY_8B10B_CNTL_BASE_IDX 2
+#define mmDP4_DP_DPHY_PRBS_CNTL 0x251d
+#define mmDP4_DP_DPHY_PRBS_CNTL_BASE_IDX 2
+#define mmDP4_DP_DPHY_SCRAM_CNTL 0x251e
+#define mmDP4_DP_DPHY_SCRAM_CNTL_BASE_IDX 2
+#define mmDP4_DP_DPHY_CRC_EN 0x251f
+#define mmDP4_DP_DPHY_CRC_EN_BASE_IDX 2
+#define mmDP4_DP_DPHY_CRC_CNTL 0x2520
+#define mmDP4_DP_DPHY_CRC_CNTL_BASE_IDX 2
+#define mmDP4_DP_DPHY_CRC_RESULT 0x2521
+#define mmDP4_DP_DPHY_CRC_RESULT_BASE_IDX 2
+#define mmDP4_DP_DPHY_CRC_MST_CNTL 0x2522
+#define mmDP4_DP_DPHY_CRC_MST_CNTL_BASE_IDX 2
+#define mmDP4_DP_DPHY_CRC_MST_STATUS 0x2523
+#define mmDP4_DP_DPHY_CRC_MST_STATUS_BASE_IDX 2
+#define mmDP4_DP_DPHY_FAST_TRAINING 0x2524
+#define mmDP4_DP_DPHY_FAST_TRAINING_BASE_IDX 2
+#define mmDP4_DP_DPHY_FAST_TRAINING_STATUS 0x2525
+#define mmDP4_DP_DPHY_FAST_TRAINING_STATUS_BASE_IDX 2
+#define mmDP4_DP_SEC_CNTL 0x252b
+#define mmDP4_DP_SEC_CNTL_BASE_IDX 2
+#define mmDP4_DP_SEC_CNTL1 0x252c
+#define mmDP4_DP_SEC_CNTL1_BASE_IDX 2
+#define mmDP4_DP_SEC_FRAMING1 0x252d
+#define mmDP4_DP_SEC_FRAMING1_BASE_IDX 2
+#define mmDP4_DP_SEC_FRAMING2 0x252e
+#define mmDP4_DP_SEC_FRAMING2_BASE_IDX 2
+#define mmDP4_DP_SEC_FRAMING3 0x252f
+#define mmDP4_DP_SEC_FRAMING3_BASE_IDX 2
+#define mmDP4_DP_SEC_FRAMING4 0x2530
+#define mmDP4_DP_SEC_FRAMING4_BASE_IDX 2
+#define mmDP4_DP_SEC_AUD_N 0x2531
+#define mmDP4_DP_SEC_AUD_N_BASE_IDX 2
+#define mmDP4_DP_SEC_AUD_N_READBACK 0x2532
+#define mmDP4_DP_SEC_AUD_N_READBACK_BASE_IDX 2
+#define mmDP4_DP_SEC_AUD_M 0x2533
+#define mmDP4_DP_SEC_AUD_M_BASE_IDX 2
+#define mmDP4_DP_SEC_AUD_M_READBACK 0x2534
+#define mmDP4_DP_SEC_AUD_M_READBACK_BASE_IDX 2
+#define mmDP4_DP_SEC_TIMESTAMP 0x2535
+#define mmDP4_DP_SEC_TIMESTAMP_BASE_IDX 2
+#define mmDP4_DP_SEC_PACKET_CNTL 0x2536
+#define mmDP4_DP_SEC_PACKET_CNTL_BASE_IDX 2
+#define mmDP4_DP_MSE_RATE_CNTL 0x2537
+#define mmDP4_DP_MSE_RATE_CNTL_BASE_IDX 2
+#define mmDP4_DP_MSE_RATE_UPDATE 0x2539
+#define mmDP4_DP_MSE_RATE_UPDATE_BASE_IDX 2
+#define mmDP4_DP_MSE_SAT0 0x253a
+#define mmDP4_DP_MSE_SAT0_BASE_IDX 2
+#define mmDP4_DP_MSE_SAT1 0x253b
+#define mmDP4_DP_MSE_SAT1_BASE_IDX 2
+#define mmDP4_DP_MSE_SAT2 0x253c
+#define mmDP4_DP_MSE_SAT2_BASE_IDX 2
+#define mmDP4_DP_MSE_SAT_UPDATE 0x253d
+#define mmDP4_DP_MSE_SAT_UPDATE_BASE_IDX 2
+#define mmDP4_DP_MSE_LINK_TIMING 0x253e
+#define mmDP4_DP_MSE_LINK_TIMING_BASE_IDX 2
+#define mmDP4_DP_MSE_MISC_CNTL 0x253f
+#define mmDP4_DP_MSE_MISC_CNTL_BASE_IDX 2
+#define mmDP4_DP_DPHY_BS_SR_SWAP_CNTL 0x2544
+#define mmDP4_DP_DPHY_BS_SR_SWAP_CNTL_BASE_IDX 2
+#define mmDP4_DP_DPHY_HBR2_PATTERN_CONTROL 0x2545
+#define mmDP4_DP_DPHY_HBR2_PATTERN_CONTROL_BASE_IDX 2
+#define mmDP4_DP_MSE_SAT0_STATUS 0x2547
+#define mmDP4_DP_MSE_SAT0_STATUS_BASE_IDX 2
+#define mmDP4_DP_MSE_SAT1_STATUS 0x2548
+#define mmDP4_DP_MSE_SAT1_STATUS_BASE_IDX 2
+#define mmDP4_DP_MSE_SAT2_STATUS 0x2549
+#define mmDP4_DP_MSE_SAT2_STATUS_BASE_IDX 2
+#define mmDP4_DP_MSA_TIMING_PARAM1 0x254c
+#define mmDP4_DP_MSA_TIMING_PARAM1_BASE_IDX 2
+#define mmDP4_DP_MSA_TIMING_PARAM2 0x254d
+#define mmDP4_DP_MSA_TIMING_PARAM2_BASE_IDX 2
+#define mmDP4_DP_MSA_TIMING_PARAM3 0x254e
+#define mmDP4_DP_MSA_TIMING_PARAM3_BASE_IDX 2
+#define mmDP4_DP_MSA_TIMING_PARAM4 0x254f
+#define mmDP4_DP_MSA_TIMING_PARAM4_BASE_IDX 2
+#define mmDP4_DP_MSO_CNTL 0x2550
+#define mmDP4_DP_MSO_CNTL_BASE_IDX 2
+#define mmDP4_DP_MSO_CNTL1 0x2551
+#define mmDP4_DP_MSO_CNTL1_BASE_IDX 2
+#define mmDP4_DP_DSC_CNTL 0x2552
+#define mmDP4_DP_DSC_CNTL_BASE_IDX 2
+#define mmDP4_DP_SEC_CNTL2 0x2553
+#define mmDP4_DP_SEC_CNTL2_BASE_IDX 2
+#define mmDP4_DP_SEC_CNTL3 0x2554
+#define mmDP4_DP_SEC_CNTL3_BASE_IDX 2
+#define mmDP4_DP_SEC_CNTL4 0x2555
+#define mmDP4_DP_SEC_CNTL4_BASE_IDX 2
+#define mmDP4_DP_SEC_CNTL5 0x2556
+#define mmDP4_DP_SEC_CNTL5_BASE_IDX 2
+#define mmDP4_DP_SEC_CNTL6 0x2557
+#define mmDP4_DP_SEC_CNTL6_BASE_IDX 2
+#define mmDP4_DP_SEC_CNTL7 0x2558
+#define mmDP4_DP_SEC_CNTL7_BASE_IDX 2
+#define mmDP4_DP_DB_CNTL 0x2559
+#define mmDP4_DP_DB_CNTL_BASE_IDX 2
+#define mmDP4_DP_MSA_VBID_MISC 0x255a
+#define mmDP4_DP_MSA_VBID_MISC_BASE_IDX 2
+#define mmDP4_DP_SEC_METADATA_TRANSMISSION 0x255b
+#define mmDP4_DP_SEC_METADATA_TRANSMISSION_BASE_IDX 2
+#define mmDP4_DP_DSC_BYTES_PER_PIXEL 0x255c
+#define mmDP4_DP_DSC_BYTES_PER_PIXEL_BASE_IDX 2
+#define mmDP4_DP_ALPM_CNTL 0x255d
+#define mmDP4_DP_ALPM_CNTL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcio_dcio_dispdec
+// base address: 0x0
+#define mmDC_GENERICA 0x2868
+#define mmDC_GENERICA_BASE_IDX 2
+#define mmDC_GENERICB 0x2869
+#define mmDC_GENERICB_BASE_IDX 2
+#define mmDC_REF_CLK_CNTL 0x286b
+#define mmDC_REF_CLK_CNTL_BASE_IDX 2
+#define mmUNIPHYA_LINK_CNTL 0x286d
+#define mmUNIPHYA_LINK_CNTL_BASE_IDX 2
+#define mmUNIPHYA_CHANNEL_XBAR_CNTL 0x286e
+#define mmUNIPHYA_CHANNEL_XBAR_CNTL_BASE_IDX 2
+#define mmUNIPHYB_LINK_CNTL 0x286f
+#define mmUNIPHYB_LINK_CNTL_BASE_IDX 2
+#define mmUNIPHYB_CHANNEL_XBAR_CNTL 0x2870
+#define mmUNIPHYB_CHANNEL_XBAR_CNTL_BASE_IDX 2
+#define mmUNIPHYC_LINK_CNTL 0x2871
+#define mmUNIPHYC_LINK_CNTL_BASE_IDX 2
+#define mmUNIPHYC_CHANNEL_XBAR_CNTL 0x2872
+#define mmUNIPHYC_CHANNEL_XBAR_CNTL_BASE_IDX 2
+#define mmUNIPHYD_LINK_CNTL 0x2873
+#define mmUNIPHYD_LINK_CNTL_BASE_IDX 2
+#define mmUNIPHYD_CHANNEL_XBAR_CNTL 0x2874
+#define mmUNIPHYD_CHANNEL_XBAR_CNTL_BASE_IDX 2
+#define mmUNIPHYE_LINK_CNTL 0x2875
+#define mmUNIPHYE_LINK_CNTL_BASE_IDX 2
+#define mmUNIPHYE_CHANNEL_XBAR_CNTL 0x2876
+#define mmUNIPHYE_CHANNEL_XBAR_CNTL_BASE_IDX 2
+#define mmDCIO_WRCMD_DELAY 0x287e
+#define mmDCIO_WRCMD_DELAY_BASE_IDX 2
+#define mmDC_PINSTRAPS 0x2880
+#define mmDC_PINSTRAPS_BASE_IDX 2
+#define mmLVTMA_PWRSEQ_CNTL 0x2883
+#define mmLVTMA_PWRSEQ_CNTL_BASE_IDX 2
+#define mmLVTMA_PWRSEQ_STATE 0x2884
+#define mmLVTMA_PWRSEQ_STATE_BASE_IDX 2
+#define mmLVTMA_PWRSEQ_REF_DIV 0x2885
+#define mmLVTMA_PWRSEQ_REF_DIV_BASE_IDX 2
+#define mmLVTMA_PWRSEQ_DELAY1 0x2886
+#define mmLVTMA_PWRSEQ_DELAY1_BASE_IDX 2
+#define mmLVTMA_PWRSEQ_DELAY2 0x2887
+#define mmLVTMA_PWRSEQ_DELAY2_BASE_IDX 2
+#define mmBL_PWM_CNTL 0x2888
+#define mmBL_PWM_CNTL_BASE_IDX 2
+#define mmBL_PWM_CNTL2 0x2889
+#define mmBL_PWM_CNTL2_BASE_IDX 2
+#define mmBL_PWM_PERIOD_CNTL 0x288a
+#define mmBL_PWM_PERIOD_CNTL_BASE_IDX 2
+#define mmBL_PWM_GRP1_REG_LOCK 0x288b
+#define mmBL_PWM_GRP1_REG_LOCK_BASE_IDX 2
+#define mmDCIO_GSL_GENLK_PAD_CNTL 0x288c
+#define mmDCIO_GSL_GENLK_PAD_CNTL_BASE_IDX 2
+#define mmDCIO_GSL_SWAPLOCK_PAD_CNTL 0x288d
+#define mmDCIO_GSL_SWAPLOCK_PAD_CNTL_BASE_IDX 2
+#define mmDCIO_CLOCK_CNTL 0x2895
+#define mmDCIO_CLOCK_CNTL_BASE_IDX 2
+#define mmDCIO_SOFT_RESET 0x289e
+#define mmDCIO_SOFT_RESET_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcio_dcio_chip_dispdec
+// base address: 0x0
+#define mmDC_GPIO_GENERIC_MASK 0x28c8
+#define mmDC_GPIO_GENERIC_MASK_BASE_IDX 2
+#define mmDC_GPIO_GENERIC_A 0x28c9
+#define mmDC_GPIO_GENERIC_A_BASE_IDX 2
+#define mmDC_GPIO_GENERIC_EN 0x28ca
+#define mmDC_GPIO_GENERIC_EN_BASE_IDX 2
+#define mmDC_GPIO_GENERIC_Y 0x28cb
+#define mmDC_GPIO_GENERIC_Y_BASE_IDX 2
+#define mmDC_GPIO_DDC1_MASK 0x28d0
+#define mmDC_GPIO_DDC1_MASK_BASE_IDX 2
+#define mmDC_GPIO_DDC1_A 0x28d1
+#define mmDC_GPIO_DDC1_A_BASE_IDX 2
+#define mmDC_GPIO_DDC1_EN 0x28d2
+#define mmDC_GPIO_DDC1_EN_BASE_IDX 2
+#define mmDC_GPIO_DDC1_Y 0x28d3
+#define mmDC_GPIO_DDC1_Y_BASE_IDX 2
+#define mmDC_GPIO_DDC2_MASK 0x28d4
+#define mmDC_GPIO_DDC2_MASK_BASE_IDX 2
+#define mmDC_GPIO_DDC2_A 0x28d5
+#define mmDC_GPIO_DDC2_A_BASE_IDX 2
+#define mmDC_GPIO_DDC2_EN 0x28d6
+#define mmDC_GPIO_DDC2_EN_BASE_IDX 2
+#define mmDC_GPIO_DDC2_Y 0x28d7
+#define mmDC_GPIO_DDC2_Y_BASE_IDX 2
+#define mmDC_GPIO_DDC3_MASK 0x28d8
+#define mmDC_GPIO_DDC3_MASK_BASE_IDX 2
+#define mmDC_GPIO_DDC3_A 0x28d9
+#define mmDC_GPIO_DDC3_A_BASE_IDX 2
+#define mmDC_GPIO_DDC3_EN 0x28da
+#define mmDC_GPIO_DDC3_EN_BASE_IDX 2
+#define mmDC_GPIO_DDC3_Y 0x28db
+#define mmDC_GPIO_DDC3_Y_BASE_IDX 2
+#define mmDC_GPIO_DDC4_MASK 0x28dc
+#define mmDC_GPIO_DDC4_MASK_BASE_IDX 2
+#define mmDC_GPIO_DDC4_A 0x28dd
+#define mmDC_GPIO_DDC4_A_BASE_IDX 2
+#define mmDC_GPIO_DDC4_EN 0x28de
+#define mmDC_GPIO_DDC4_EN_BASE_IDX 2
+#define mmDC_GPIO_DDC4_Y 0x28df
+#define mmDC_GPIO_DDC4_Y_BASE_IDX 2
+#define mmDC_GPIO_DDC5_MASK 0x28e0
+#define mmDC_GPIO_DDC5_MASK_BASE_IDX 2
+#define mmDC_GPIO_DDC5_A 0x28e1
+#define mmDC_GPIO_DDC5_A_BASE_IDX 2
+#define mmDC_GPIO_DDC5_EN 0x28e2
+#define mmDC_GPIO_DDC5_EN_BASE_IDX 2
+#define mmDC_GPIO_DDC5_Y 0x28e3
+#define mmDC_GPIO_DDC5_Y_BASE_IDX 2
+#define mmDC_GPIO_DDCVGA_MASK 0x28e8
+#define mmDC_GPIO_DDCVGA_MASK_BASE_IDX 2
+#define mmDC_GPIO_DDCVGA_A 0x28e9
+#define mmDC_GPIO_DDCVGA_A_BASE_IDX 2
+#define mmDC_GPIO_DDCVGA_EN 0x28ea
+#define mmDC_GPIO_DDCVGA_EN_BASE_IDX 2
+#define mmDC_GPIO_DDCVGA_Y 0x28eb
+#define mmDC_GPIO_DDCVGA_Y_BASE_IDX 2
+#define mmDC_GPIO_GENLK_MASK 0x28f0
+#define mmDC_GPIO_GENLK_MASK_BASE_IDX 2
+#define mmDC_GPIO_GENLK_A 0x28f1
+#define mmDC_GPIO_GENLK_A_BASE_IDX 2
+#define mmDC_GPIO_GENLK_EN 0x28f2
+#define mmDC_GPIO_GENLK_EN_BASE_IDX 2
+#define mmDC_GPIO_GENLK_Y 0x28f3
+#define mmDC_GPIO_GENLK_Y_BASE_IDX 2
+#define mmDC_GPIO_HPD_MASK 0x28f4
+#define mmDC_GPIO_HPD_MASK_BASE_IDX 2
+#define mmDC_GPIO_HPD_A 0x28f5
+#define mmDC_GPIO_HPD_A_BASE_IDX 2
+#define mmDC_GPIO_HPD_EN 0x28f6
+#define mmDC_GPIO_HPD_EN_BASE_IDX 2
+#define mmDC_GPIO_HPD_Y 0x28f7
+#define mmDC_GPIO_HPD_Y_BASE_IDX 2
+#define mmDC_GPIO_PWRSEQ_MASK 0x28f8
+#define mmDC_GPIO_PWRSEQ_MASK_BASE_IDX 2
+#define mmDC_GPIO_PWRSEQ_A 0x28f9
+#define mmDC_GPIO_PWRSEQ_A_BASE_IDX 2
+#define mmDC_GPIO_PWRSEQ_EN 0x28fa
+#define mmDC_GPIO_PWRSEQ_EN_BASE_IDX 2
+#define mmDC_GPIO_PWRSEQ_Y 0x28fb
+#define mmDC_GPIO_PWRSEQ_Y_BASE_IDX 2
+#define mmDC_GPIO_PAD_STRENGTH_1 0x28fc
+#define mmDC_GPIO_PAD_STRENGTH_1_BASE_IDX 2
+#define mmDC_GPIO_PAD_STRENGTH_2 0x28fd
+#define mmDC_GPIO_PAD_STRENGTH_2_BASE_IDX 2
+#define mmPHY_AUX_CNTL 0x28ff
+#define mmPHY_AUX_CNTL_BASE_IDX 2
+#define mmDC_GPIO_TX12_EN 0x2915
+#define mmDC_GPIO_TX12_EN_BASE_IDX 2
+#define mmDC_GPIO_AUX_CTRL_0 0x2916
+#define mmDC_GPIO_AUX_CTRL_0_BASE_IDX 2
+#define mmDC_GPIO_AUX_CTRL_1 0x2917
+#define mmDC_GPIO_AUX_CTRL_1_BASE_IDX 2
+#define mmDC_GPIO_AUX_CTRL_2 0x2918
+#define mmDC_GPIO_AUX_CTRL_2_BASE_IDX 2
+#define mmDC_GPIO_RXEN 0x2919
+#define mmDC_GPIO_RXEN_BASE_IDX 2
+#define mmDC_GPIO_PULLUPEN 0x291a
+#define mmDC_GPIO_PULLUPEN_BASE_IDX 2
+#define mmDC_GPIO_AUX_CTRL_3 0x291b
+#define mmDC_GPIO_AUX_CTRL_3_BASE_IDX 2
+#define mmDC_GPIO_AUX_CTRL_4 0x291c
+#define mmDC_GPIO_AUX_CTRL_4_BASE_IDX 2
+#define mmDC_GPIO_AUX_CTRL_5 0x291d
+#define mmDC_GPIO_AUX_CTRL_5_BASE_IDX 2
+#define mmAUXI2C_PAD_ALL_PWR_OK 0x291e
+#define mmAUXI2C_PAD_ALL_PWR_OK_BASE_IDX 2
+
+// addressBlock: dce_dc_dsc0_dispdec_dsc_top_dispdec
+// base address: 0x0
+#define mmDSC_TOP0_DSC_TOP_CONTROL 0x3000
+#define mmDSC_TOP0_DSC_TOP_CONTROL_BASE_IDX 2
+#define mmDSC_TOP0_DSC_DEBUG_CONTROL 0x3001
+#define mmDSC_TOP0_DSC_DEBUG_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc0_dispdec_dsccif_dispdec
+// base address: 0x0
+#define mmDSCCIF0_DSCCIF_CONFIG0 0x3005
+#define mmDSCCIF0_DSCCIF_CONFIG0_BASE_IDX 2
+#define mmDSCCIF0_DSCCIF_CONFIG1 0x3006
+#define mmDSCCIF0_DSCCIF_CONFIG1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc0_dispdec_dscc_dispdec
+// base address: 0x0
+#define mmDSCC0_DSCC_CONFIG0 0x300a
+#define mmDSCC0_DSCC_CONFIG0_BASE_IDX 2
+#define mmDSCC0_DSCC_CONFIG1 0x300b
+#define mmDSCC0_DSCC_CONFIG1_BASE_IDX 2
+#define mmDSCC0_DSCC_STATUS 0x300c
+#define mmDSCC0_DSCC_STATUS_BASE_IDX 2
+#define mmDSCC0_DSCC_INTERRUPT_CONTROL_STATUS 0x300d
+#define mmDSCC0_DSCC_INTERRUPT_CONTROL_STATUS_BASE_IDX 2
+#define mmDSCC0_DSCC_PPS_CONFIG0 0x300e
+#define mmDSCC0_DSCC_PPS_CONFIG0_BASE_IDX 2
+#define mmDSCC0_DSCC_PPS_CONFIG1 0x300f
+#define mmDSCC0_DSCC_PPS_CONFIG1_BASE_IDX 2
+#define mmDSCC0_DSCC_PPS_CONFIG2 0x3010
+#define mmDSCC0_DSCC_PPS_CONFIG2_BASE_IDX 2
+#define mmDSCC0_DSCC_PPS_CONFIG3 0x3011
+#define mmDSCC0_DSCC_PPS_CONFIG3_BASE_IDX 2
+#define mmDSCC0_DSCC_PPS_CONFIG4 0x3012
+#define mmDSCC0_DSCC_PPS_CONFIG4_BASE_IDX 2
+#define mmDSCC0_DSCC_PPS_CONFIG5 0x3013
+#define mmDSCC0_DSCC_PPS_CONFIG5_BASE_IDX 2
+#define mmDSCC0_DSCC_PPS_CONFIG6 0x3014
+#define mmDSCC0_DSCC_PPS_CONFIG6_BASE_IDX 2
+#define mmDSCC0_DSCC_PPS_CONFIG7 0x3015
+#define mmDSCC0_DSCC_PPS_CONFIG7_BASE_IDX 2
+#define mmDSCC0_DSCC_PPS_CONFIG8 0x3016
+#define mmDSCC0_DSCC_PPS_CONFIG8_BASE_IDX 2
+#define mmDSCC0_DSCC_PPS_CONFIG9 0x3017
+#define mmDSCC0_DSCC_PPS_CONFIG9_BASE_IDX 2
+#define mmDSCC0_DSCC_PPS_CONFIG10 0x3018
+#define mmDSCC0_DSCC_PPS_CONFIG10_BASE_IDX 2
+#define mmDSCC0_DSCC_PPS_CONFIG11 0x3019
+#define mmDSCC0_DSCC_PPS_CONFIG11_BASE_IDX 2
+#define mmDSCC0_DSCC_PPS_CONFIG12 0x301a
+#define mmDSCC0_DSCC_PPS_CONFIG12_BASE_IDX 2
+#define mmDSCC0_DSCC_PPS_CONFIG13 0x301b
+#define mmDSCC0_DSCC_PPS_CONFIG13_BASE_IDX 2
+#define mmDSCC0_DSCC_PPS_CONFIG14 0x301c
+#define mmDSCC0_DSCC_PPS_CONFIG14_BASE_IDX 2
+#define mmDSCC0_DSCC_PPS_CONFIG15 0x301d
+#define mmDSCC0_DSCC_PPS_CONFIG15_BASE_IDX 2
+#define mmDSCC0_DSCC_PPS_CONFIG16 0x301e
+#define mmDSCC0_DSCC_PPS_CONFIG16_BASE_IDX 2
+#define mmDSCC0_DSCC_PPS_CONFIG17 0x301f
+#define mmDSCC0_DSCC_PPS_CONFIG17_BASE_IDX 2
+#define mmDSCC0_DSCC_PPS_CONFIG18 0x3020
+#define mmDSCC0_DSCC_PPS_CONFIG18_BASE_IDX 2
+#define mmDSCC0_DSCC_PPS_CONFIG19 0x3021
+#define mmDSCC0_DSCC_PPS_CONFIG19_BASE_IDX 2
+#define mmDSCC0_DSCC_PPS_CONFIG20 0x3022
+#define mmDSCC0_DSCC_PPS_CONFIG20_BASE_IDX 2
+#define mmDSCC0_DSCC_PPS_CONFIG21 0x3023
+#define mmDSCC0_DSCC_PPS_CONFIG21_BASE_IDX 2
+#define mmDSCC0_DSCC_PPS_CONFIG22 0x3024
+#define mmDSCC0_DSCC_PPS_CONFIG22_BASE_IDX 2
+#define mmDSCC0_DSCC_MEM_POWER_CONTROL 0x3025
+#define mmDSCC0_DSCC_MEM_POWER_CONTROL_BASE_IDX 2
+#define mmDSCC0_DSCC_R_Y_SQUARED_ERROR_LOWER 0x3026
+#define mmDSCC0_DSCC_R_Y_SQUARED_ERROR_LOWER_BASE_IDX 2
+#define mmDSCC0_DSCC_R_Y_SQUARED_ERROR_UPPER 0x3027
+#define mmDSCC0_DSCC_R_Y_SQUARED_ERROR_UPPER_BASE_IDX 2
+#define mmDSCC0_DSCC_G_CB_SQUARED_ERROR_LOWER 0x3028
+#define mmDSCC0_DSCC_G_CB_SQUARED_ERROR_LOWER_BASE_IDX 2
+#define mmDSCC0_DSCC_G_CB_SQUARED_ERROR_UPPER 0x3029
+#define mmDSCC0_DSCC_G_CB_SQUARED_ERROR_UPPER_BASE_IDX 2
+#define mmDSCC0_DSCC_B_CR_SQUARED_ERROR_LOWER 0x302a
+#define mmDSCC0_DSCC_B_CR_SQUARED_ERROR_LOWER_BASE_IDX 2
+#define mmDSCC0_DSCC_B_CR_SQUARED_ERROR_UPPER 0x302b
+#define mmDSCC0_DSCC_B_CR_SQUARED_ERROR_UPPER_BASE_IDX 2
+#define mmDSCC0_DSCC_MAX_ABS_ERROR0 0x302c
+#define mmDSCC0_DSCC_MAX_ABS_ERROR0_BASE_IDX 2
+#define mmDSCC0_DSCC_MAX_ABS_ERROR1 0x302d
+#define mmDSCC0_DSCC_MAX_ABS_ERROR1_BASE_IDX 2
+#define mmDSCC0_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL 0x302e
+#define mmDSCC0_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define mmDSCC0_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL 0x302f
+#define mmDSCC0_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define mmDSCC0_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL 0x3030
+#define mmDSCC0_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define mmDSCC0_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL 0x3031
+#define mmDSCC0_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define mmDSCC0_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL 0x3032
+#define mmDSCC0_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define mmDSCC0_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL 0x3033
+#define mmDSCC0_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define mmDSCC0_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL 0x3034
+#define mmDSCC0_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define mmDSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x3035
+#define mmDSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define mmDSCC0_DSCC_TEST_DEBUG_BUS_ROTATE 0x303a
+#define mmDSCC0_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc0_dispdec_dsc_dcperfmon_dc_perfmon_dispdec
+// base address: 0xc140
+#define mmDC_PERFMON19_PERFCOUNTER_CNTL 0x3050
+#define mmDC_PERFMON19_PERFCOUNTER_CNTL_BASE_IDX 2
+#define mmDC_PERFMON19_PERFCOUNTER_CNTL2 0x3051
+#define mmDC_PERFMON19_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define mmDC_PERFMON19_PERFCOUNTER_STATE 0x3052
+#define mmDC_PERFMON19_PERFCOUNTER_STATE_BASE_IDX 2
+#define mmDC_PERFMON19_PERFMON_CNTL 0x3053
+#define mmDC_PERFMON19_PERFMON_CNTL_BASE_IDX 2
+#define mmDC_PERFMON19_PERFMON_CNTL2 0x3054
+#define mmDC_PERFMON19_PERFMON_CNTL2_BASE_IDX 2
+#define mmDC_PERFMON19_PERFMON_CVALUE_INT_MISC 0x3055
+#define mmDC_PERFMON19_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define mmDC_PERFMON19_PERFMON_CVALUE_LOW 0x3056
+#define mmDC_PERFMON19_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define mmDC_PERFMON19_PERFMON_HI 0x3057
+#define mmDC_PERFMON19_PERFMON_HI_BASE_IDX 2
+#define mmDC_PERFMON19_PERFMON_LOW 0x3058
+#define mmDC_PERFMON19_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc1_dispdec_dsc_top_dispdec
+// base address: 0x170
+#define mmDSC_TOP1_DSC_TOP_CONTROL 0x305c
+#define mmDSC_TOP1_DSC_TOP_CONTROL_BASE_IDX 2
+#define mmDSC_TOP1_DSC_DEBUG_CONTROL 0x305d
+#define mmDSC_TOP1_DSC_DEBUG_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc1_dispdec_dsccif_dispdec
+// base address: 0x170
+#define mmDSCCIF1_DSCCIF_CONFIG0 0x3061
+#define mmDSCCIF1_DSCCIF_CONFIG0_BASE_IDX 2
+#define mmDSCCIF1_DSCCIF_CONFIG1 0x3062
+#define mmDSCCIF1_DSCCIF_CONFIG1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc1_dispdec_dscc_dispdec
+// base address: 0x170
+#define mmDSCC1_DSCC_CONFIG0 0x3066
+#define mmDSCC1_DSCC_CONFIG0_BASE_IDX 2
+#define mmDSCC1_DSCC_CONFIG1 0x3067
+#define mmDSCC1_DSCC_CONFIG1_BASE_IDX 2
+#define mmDSCC1_DSCC_STATUS 0x3068
+#define mmDSCC1_DSCC_STATUS_BASE_IDX 2
+#define mmDSCC1_DSCC_INTERRUPT_CONTROL_STATUS 0x3069
+#define mmDSCC1_DSCC_INTERRUPT_CONTROL_STATUS_BASE_IDX 2
+#define mmDSCC1_DSCC_PPS_CONFIG0 0x306a
+#define mmDSCC1_DSCC_PPS_CONFIG0_BASE_IDX 2
+#define mmDSCC1_DSCC_PPS_CONFIG1 0x306b
+#define mmDSCC1_DSCC_PPS_CONFIG1_BASE_IDX 2
+#define mmDSCC1_DSCC_PPS_CONFIG2 0x306c
+#define mmDSCC1_DSCC_PPS_CONFIG2_BASE_IDX 2
+#define mmDSCC1_DSCC_PPS_CONFIG3 0x306d
+#define mmDSCC1_DSCC_PPS_CONFIG3_BASE_IDX 2
+#define mmDSCC1_DSCC_PPS_CONFIG4 0x306e
+#define mmDSCC1_DSCC_PPS_CONFIG4_BASE_IDX 2
+#define mmDSCC1_DSCC_PPS_CONFIG5 0x306f
+#define mmDSCC1_DSCC_PPS_CONFIG5_BASE_IDX 2
+#define mmDSCC1_DSCC_PPS_CONFIG6 0x3070
+#define mmDSCC1_DSCC_PPS_CONFIG6_BASE_IDX 2
+#define mmDSCC1_DSCC_PPS_CONFIG7 0x3071
+#define mmDSCC1_DSCC_PPS_CONFIG7_BASE_IDX 2
+#define mmDSCC1_DSCC_PPS_CONFIG8 0x3072
+#define mmDSCC1_DSCC_PPS_CONFIG8_BASE_IDX 2
+#define mmDSCC1_DSCC_PPS_CONFIG9 0x3073
+#define mmDSCC1_DSCC_PPS_CONFIG9_BASE_IDX 2
+#define mmDSCC1_DSCC_PPS_CONFIG10 0x3074
+#define mmDSCC1_DSCC_PPS_CONFIG10_BASE_IDX 2
+#define mmDSCC1_DSCC_PPS_CONFIG11 0x3075
+#define mmDSCC1_DSCC_PPS_CONFIG11_BASE_IDX 2
+#define mmDSCC1_DSCC_PPS_CONFIG12 0x3076
+#define mmDSCC1_DSCC_PPS_CONFIG12_BASE_IDX 2
+#define mmDSCC1_DSCC_PPS_CONFIG13 0x3077
+#define mmDSCC1_DSCC_PPS_CONFIG13_BASE_IDX 2
+#define mmDSCC1_DSCC_PPS_CONFIG14 0x3078
+#define mmDSCC1_DSCC_PPS_CONFIG14_BASE_IDX 2
+#define mmDSCC1_DSCC_PPS_CONFIG15 0x3079
+#define mmDSCC1_DSCC_PPS_CONFIG15_BASE_IDX 2
+#define mmDSCC1_DSCC_PPS_CONFIG16 0x307a
+#define mmDSCC1_DSCC_PPS_CONFIG16_BASE_IDX 2
+#define mmDSCC1_DSCC_PPS_CONFIG17 0x307b
+#define mmDSCC1_DSCC_PPS_CONFIG17_BASE_IDX 2
+#define mmDSCC1_DSCC_PPS_CONFIG18 0x307c
+#define mmDSCC1_DSCC_PPS_CONFIG18_BASE_IDX 2
+#define mmDSCC1_DSCC_PPS_CONFIG19 0x307d
+#define mmDSCC1_DSCC_PPS_CONFIG19_BASE_IDX 2
+#define mmDSCC1_DSCC_PPS_CONFIG20 0x307e
+#define mmDSCC1_DSCC_PPS_CONFIG20_BASE_IDX 2
+#define mmDSCC1_DSCC_PPS_CONFIG21 0x307f
+#define mmDSCC1_DSCC_PPS_CONFIG21_BASE_IDX 2
+#define mmDSCC1_DSCC_PPS_CONFIG22 0x3080
+#define mmDSCC1_DSCC_PPS_CONFIG22_BASE_IDX 2
+#define mmDSCC1_DSCC_MEM_POWER_CONTROL 0x3081
+#define mmDSCC1_DSCC_MEM_POWER_CONTROL_BASE_IDX 2
+#define mmDSCC1_DSCC_R_Y_SQUARED_ERROR_LOWER 0x3082
+#define mmDSCC1_DSCC_R_Y_SQUARED_ERROR_LOWER_BASE_IDX 2
+#define mmDSCC1_DSCC_R_Y_SQUARED_ERROR_UPPER 0x3083
+#define mmDSCC1_DSCC_R_Y_SQUARED_ERROR_UPPER_BASE_IDX 2
+#define mmDSCC1_DSCC_G_CB_SQUARED_ERROR_LOWER 0x3084
+#define mmDSCC1_DSCC_G_CB_SQUARED_ERROR_LOWER_BASE_IDX 2
+#define mmDSCC1_DSCC_G_CB_SQUARED_ERROR_UPPER 0x3085
+#define mmDSCC1_DSCC_G_CB_SQUARED_ERROR_UPPER_BASE_IDX 2
+#define mmDSCC1_DSCC_B_CR_SQUARED_ERROR_LOWER 0x3086
+#define mmDSCC1_DSCC_B_CR_SQUARED_ERROR_LOWER_BASE_IDX 2
+#define mmDSCC1_DSCC_B_CR_SQUARED_ERROR_UPPER 0x3087
+#define mmDSCC1_DSCC_B_CR_SQUARED_ERROR_UPPER_BASE_IDX 2
+#define mmDSCC1_DSCC_MAX_ABS_ERROR0 0x3088
+#define mmDSCC1_DSCC_MAX_ABS_ERROR0_BASE_IDX 2
+#define mmDSCC1_DSCC_MAX_ABS_ERROR1 0x3089
+#define mmDSCC1_DSCC_MAX_ABS_ERROR1_BASE_IDX 2
+#define mmDSCC1_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL 0x308a
+#define mmDSCC1_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define mmDSCC1_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL 0x308b
+#define mmDSCC1_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define mmDSCC1_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL 0x308c
+#define mmDSCC1_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define mmDSCC1_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL 0x308d
+#define mmDSCC1_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define mmDSCC1_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL 0x308e
+#define mmDSCC1_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define mmDSCC1_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL 0x308f
+#define mmDSCC1_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define mmDSCC1_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL 0x3090
+#define mmDSCC1_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define mmDSCC1_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x3091
+#define mmDSCC1_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define mmDSCC1_DSCC_TEST_DEBUG_BUS_ROTATE 0x3096
+#define mmDSCC1_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc1_dispdec_dsc_dcperfmon_dc_perfmon_dispdec
+// base address: 0xc2b0
+#define mmDC_PERFMON20_PERFCOUNTER_CNTL 0x30ac
+#define mmDC_PERFMON20_PERFCOUNTER_CNTL_BASE_IDX 2
+#define mmDC_PERFMON20_PERFCOUNTER_CNTL2 0x30ad
+#define mmDC_PERFMON20_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define mmDC_PERFMON20_PERFCOUNTER_STATE 0x30ae
+#define mmDC_PERFMON20_PERFCOUNTER_STATE_BASE_IDX 2
+#define mmDC_PERFMON20_PERFMON_CNTL 0x30af
+#define mmDC_PERFMON20_PERFMON_CNTL_BASE_IDX 2
+#define mmDC_PERFMON20_PERFMON_CNTL2 0x30b0
+#define mmDC_PERFMON20_PERFMON_CNTL2_BASE_IDX 2
+#define mmDC_PERFMON20_PERFMON_CVALUE_INT_MISC 0x30b1
+#define mmDC_PERFMON20_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define mmDC_PERFMON20_PERFMON_CVALUE_LOW 0x30b2
+#define mmDC_PERFMON20_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define mmDC_PERFMON20_PERFMON_HI 0x30b3
+#define mmDC_PERFMON20_PERFMON_HI_BASE_IDX 2
+#define mmDC_PERFMON20_PERFMON_LOW 0x30b4
+#define mmDC_PERFMON20_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc2_dispdec_dsc_top_dispdec
+// base address: 0x2e0
+#define mmDSC_TOP2_DSC_TOP_CONTROL 0x30b8
+#define mmDSC_TOP2_DSC_TOP_CONTROL_BASE_IDX 2
+#define mmDSC_TOP2_DSC_DEBUG_CONTROL 0x30b9
+#define mmDSC_TOP2_DSC_DEBUG_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc2_dispdec_dsccif_dispdec
+// base address: 0x2e0
+#define mmDSCCIF2_DSCCIF_CONFIG0 0x30bd
+#define mmDSCCIF2_DSCCIF_CONFIG0_BASE_IDX 2
+#define mmDSCCIF2_DSCCIF_CONFIG1 0x30be
+#define mmDSCCIF2_DSCCIF_CONFIG1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc2_dispdec_dscc_dispdec
+// base address: 0x2e0
+#define mmDSCC2_DSCC_CONFIG0 0x30c2
+#define mmDSCC2_DSCC_CONFIG0_BASE_IDX 2
+#define mmDSCC2_DSCC_CONFIG1 0x30c3
+#define mmDSCC2_DSCC_CONFIG1_BASE_IDX 2
+#define mmDSCC2_DSCC_STATUS 0x30c4
+#define mmDSCC2_DSCC_STATUS_BASE_IDX 2
+#define mmDSCC2_DSCC_INTERRUPT_CONTROL_STATUS 0x30c5
+#define mmDSCC2_DSCC_INTERRUPT_CONTROL_STATUS_BASE_IDX 2
+#define mmDSCC2_DSCC_PPS_CONFIG0 0x30c6
+#define mmDSCC2_DSCC_PPS_CONFIG0_BASE_IDX 2
+#define mmDSCC2_DSCC_PPS_CONFIG1 0x30c7
+#define mmDSCC2_DSCC_PPS_CONFIG1_BASE_IDX 2
+#define mmDSCC2_DSCC_PPS_CONFIG2 0x30c8
+#define mmDSCC2_DSCC_PPS_CONFIG2_BASE_IDX 2
+#define mmDSCC2_DSCC_PPS_CONFIG3 0x30c9
+#define mmDSCC2_DSCC_PPS_CONFIG3_BASE_IDX 2
+#define mmDSCC2_DSCC_PPS_CONFIG4 0x30ca
+#define mmDSCC2_DSCC_PPS_CONFIG4_BASE_IDX 2
+#define mmDSCC2_DSCC_PPS_CONFIG5 0x30cb
+#define mmDSCC2_DSCC_PPS_CONFIG5_BASE_IDX 2
+#define mmDSCC2_DSCC_PPS_CONFIG6 0x30cc
+#define mmDSCC2_DSCC_PPS_CONFIG6_BASE_IDX 2
+#define mmDSCC2_DSCC_PPS_CONFIG7 0x30cd
+#define mmDSCC2_DSCC_PPS_CONFIG7_BASE_IDX 2
+#define mmDSCC2_DSCC_PPS_CONFIG8 0x30ce
+#define mmDSCC2_DSCC_PPS_CONFIG8_BASE_IDX 2
+#define mmDSCC2_DSCC_PPS_CONFIG9 0x30cf
+#define mmDSCC2_DSCC_PPS_CONFIG9_BASE_IDX 2
+#define mmDSCC2_DSCC_PPS_CONFIG10 0x30d0
+#define mmDSCC2_DSCC_PPS_CONFIG10_BASE_IDX 2
+#define mmDSCC2_DSCC_PPS_CONFIG11 0x30d1
+#define mmDSCC2_DSCC_PPS_CONFIG11_BASE_IDX 2
+#define mmDSCC2_DSCC_PPS_CONFIG12 0x30d2
+#define mmDSCC2_DSCC_PPS_CONFIG12_BASE_IDX 2
+#define mmDSCC2_DSCC_PPS_CONFIG13 0x30d3
+#define mmDSCC2_DSCC_PPS_CONFIG13_BASE_IDX 2
+#define mmDSCC2_DSCC_PPS_CONFIG14 0x30d4
+#define mmDSCC2_DSCC_PPS_CONFIG14_BASE_IDX 2
+#define mmDSCC2_DSCC_PPS_CONFIG15 0x30d5
+#define mmDSCC2_DSCC_PPS_CONFIG15_BASE_IDX 2
+#define mmDSCC2_DSCC_PPS_CONFIG16 0x30d6
+#define mmDSCC2_DSCC_PPS_CONFIG16_BASE_IDX 2
+#define mmDSCC2_DSCC_PPS_CONFIG17 0x30d7
+#define mmDSCC2_DSCC_PPS_CONFIG17_BASE_IDX 2
+#define mmDSCC2_DSCC_PPS_CONFIG18 0x30d8
+#define mmDSCC2_DSCC_PPS_CONFIG18_BASE_IDX 2
+#define mmDSCC2_DSCC_PPS_CONFIG19 0x30d9
+#define mmDSCC2_DSCC_PPS_CONFIG19_BASE_IDX 2
+#define mmDSCC2_DSCC_PPS_CONFIG20 0x30da
+#define mmDSCC2_DSCC_PPS_CONFIG20_BASE_IDX 2
+#define mmDSCC2_DSCC_PPS_CONFIG21 0x30db
+#define mmDSCC2_DSCC_PPS_CONFIG21_BASE_IDX 2
+#define mmDSCC2_DSCC_PPS_CONFIG22 0x30dc
+#define mmDSCC2_DSCC_PPS_CONFIG22_BASE_IDX 2
+#define mmDSCC2_DSCC_MEM_POWER_CONTROL 0x30dd
+#define mmDSCC2_DSCC_MEM_POWER_CONTROL_BASE_IDX 2
+#define mmDSCC2_DSCC_R_Y_SQUARED_ERROR_LOWER 0x30de
+#define mmDSCC2_DSCC_R_Y_SQUARED_ERROR_LOWER_BASE_IDX 2
+#define mmDSCC2_DSCC_R_Y_SQUARED_ERROR_UPPER 0x30df
+#define mmDSCC2_DSCC_R_Y_SQUARED_ERROR_UPPER_BASE_IDX 2
+#define mmDSCC2_DSCC_G_CB_SQUARED_ERROR_LOWER 0x30e0
+#define mmDSCC2_DSCC_G_CB_SQUARED_ERROR_LOWER_BASE_IDX 2
+#define mmDSCC2_DSCC_G_CB_SQUARED_ERROR_UPPER 0x30e1
+#define mmDSCC2_DSCC_G_CB_SQUARED_ERROR_UPPER_BASE_IDX 2
+#define mmDSCC2_DSCC_B_CR_SQUARED_ERROR_LOWER 0x30e2
+#define mmDSCC2_DSCC_B_CR_SQUARED_ERROR_LOWER_BASE_IDX 2
+#define mmDSCC2_DSCC_B_CR_SQUARED_ERROR_UPPER 0x30e3
+#define mmDSCC2_DSCC_B_CR_SQUARED_ERROR_UPPER_BASE_IDX 2
+#define mmDSCC2_DSCC_MAX_ABS_ERROR0 0x30e4
+#define mmDSCC2_DSCC_MAX_ABS_ERROR0_BASE_IDX 2
+#define mmDSCC2_DSCC_MAX_ABS_ERROR1 0x30e5
+#define mmDSCC2_DSCC_MAX_ABS_ERROR1_BASE_IDX 2
+#define mmDSCC2_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL 0x30e6
+#define mmDSCC2_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define mmDSCC2_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL 0x30e7
+#define mmDSCC2_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define mmDSCC2_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL 0x30e8
+#define mmDSCC2_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define mmDSCC2_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL 0x30e9
+#define mmDSCC2_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define mmDSCC2_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL 0x30ea
+#define mmDSCC2_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define mmDSCC2_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL 0x30eb
+#define mmDSCC2_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define mmDSCC2_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL 0x30ec
+#define mmDSCC2_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define mmDSCC2_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x30ed
+#define mmDSCC2_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define mmDSCC2_DSCC_TEST_DEBUG_BUS_ROTATE 0x30f2
+#define mmDSCC2_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc2_dispdec_dsc_dcperfmon_dc_perfmon_dispdec
+// base address: 0xc420
+#define mmDC_PERFMON21_PERFCOUNTER_CNTL 0x3108
+#define mmDC_PERFMON21_PERFCOUNTER_CNTL_BASE_IDX 2
+#define mmDC_PERFMON21_PERFCOUNTER_CNTL2 0x3109
+#define mmDC_PERFMON21_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define mmDC_PERFMON21_PERFCOUNTER_STATE 0x310a
+#define mmDC_PERFMON21_PERFCOUNTER_STATE_BASE_IDX 2
+#define mmDC_PERFMON21_PERFMON_CNTL 0x310b
+#define mmDC_PERFMON21_PERFMON_CNTL_BASE_IDX 2
+#define mmDC_PERFMON21_PERFMON_CNTL2 0x310c
+#define mmDC_PERFMON21_PERFMON_CNTL2_BASE_IDX 2
+#define mmDC_PERFMON21_PERFMON_CVALUE_INT_MISC 0x310d
+#define mmDC_PERFMON21_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define mmDC_PERFMON21_PERFMON_CVALUE_LOW 0x310e
+#define mmDC_PERFMON21_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define mmDC_PERFMON21_PERFMON_HI 0x310f
+#define mmDC_PERFMON21_PERFMON_HI_BASE_IDX 2
+#define mmDC_PERFMON21_PERFMON_LOW 0x3110
+#define mmDC_PERFMON21_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc3_dispdec_dsc_top_dispdec
+// base address: 0x450
+#define mmDSC_TOP3_DSC_TOP_CONTROL 0x3114
+#define mmDSC_TOP3_DSC_TOP_CONTROL_BASE_IDX 2
+#define mmDSC_TOP3_DSC_DEBUG_CONTROL 0x3115
+#define mmDSC_TOP3_DSC_DEBUG_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc3_dispdec_dsccif_dispdec
+// base address: 0x450
+#define mmDSCCIF3_DSCCIF_CONFIG0 0x3119
+#define mmDSCCIF3_DSCCIF_CONFIG0_BASE_IDX 2
+#define mmDSCCIF3_DSCCIF_CONFIG1 0x311a
+#define mmDSCCIF3_DSCCIF_CONFIG1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc3_dispdec_dscc_dispdec
+// base address: 0x450
+#define mmDSCC3_DSCC_CONFIG0 0x311e
+#define mmDSCC3_DSCC_CONFIG0_BASE_IDX 2
+#define mmDSCC3_DSCC_CONFIG1 0x311f
+#define mmDSCC3_DSCC_CONFIG1_BASE_IDX 2
+#define mmDSCC3_DSCC_STATUS 0x3120
+#define mmDSCC3_DSCC_STATUS_BASE_IDX 2
+#define mmDSCC3_DSCC_INTERRUPT_CONTROL_STATUS 0x3121
+#define mmDSCC3_DSCC_INTERRUPT_CONTROL_STATUS_BASE_IDX 2
+#define mmDSCC3_DSCC_PPS_CONFIG0 0x3122
+#define mmDSCC3_DSCC_PPS_CONFIG0_BASE_IDX 2
+#define mmDSCC3_DSCC_PPS_CONFIG1 0x3123
+#define mmDSCC3_DSCC_PPS_CONFIG1_BASE_IDX 2
+#define mmDSCC3_DSCC_PPS_CONFIG2 0x3124
+#define mmDSCC3_DSCC_PPS_CONFIG2_BASE_IDX 2
+#define mmDSCC3_DSCC_PPS_CONFIG3 0x3125
+#define mmDSCC3_DSCC_PPS_CONFIG3_BASE_IDX 2
+#define mmDSCC3_DSCC_PPS_CONFIG4 0x3126
+#define mmDSCC3_DSCC_PPS_CONFIG4_BASE_IDX 2
+#define mmDSCC3_DSCC_PPS_CONFIG5 0x3127
+#define mmDSCC3_DSCC_PPS_CONFIG5_BASE_IDX 2
+#define mmDSCC3_DSCC_PPS_CONFIG6 0x3128
+#define mmDSCC3_DSCC_PPS_CONFIG6_BASE_IDX 2
+#define mmDSCC3_DSCC_PPS_CONFIG7 0x3129
+#define mmDSCC3_DSCC_PPS_CONFIG7_BASE_IDX 2
+#define mmDSCC3_DSCC_PPS_CONFIG8 0x312a
+#define mmDSCC3_DSCC_PPS_CONFIG8_BASE_IDX 2
+#define mmDSCC3_DSCC_PPS_CONFIG9 0x312b
+#define mmDSCC3_DSCC_PPS_CONFIG9_BASE_IDX 2
+#define mmDSCC3_DSCC_PPS_CONFIG10 0x312c
+#define mmDSCC3_DSCC_PPS_CONFIG10_BASE_IDX 2
+#define mmDSCC3_DSCC_PPS_CONFIG11 0x312d
+#define mmDSCC3_DSCC_PPS_CONFIG11_BASE_IDX 2
+#define mmDSCC3_DSCC_PPS_CONFIG12 0x312e
+#define mmDSCC3_DSCC_PPS_CONFIG12_BASE_IDX 2
+#define mmDSCC3_DSCC_PPS_CONFIG13 0x312f
+#define mmDSCC3_DSCC_PPS_CONFIG13_BASE_IDX 2
+#define mmDSCC3_DSCC_PPS_CONFIG14 0x3130
+#define mmDSCC3_DSCC_PPS_CONFIG14_BASE_IDX 2
+#define mmDSCC3_DSCC_PPS_CONFIG15 0x3131
+#define mmDSCC3_DSCC_PPS_CONFIG15_BASE_IDX 2
+#define mmDSCC3_DSCC_PPS_CONFIG16 0x3132
+#define mmDSCC3_DSCC_PPS_CONFIG16_BASE_IDX 2
+#define mmDSCC3_DSCC_PPS_CONFIG17 0x3133
+#define mmDSCC3_DSCC_PPS_CONFIG17_BASE_IDX 2
+#define mmDSCC3_DSCC_PPS_CONFIG18 0x3134
+#define mmDSCC3_DSCC_PPS_CONFIG18_BASE_IDX 2
+#define mmDSCC3_DSCC_PPS_CONFIG19 0x3135
+#define mmDSCC3_DSCC_PPS_CONFIG19_BASE_IDX 2
+#define mmDSCC3_DSCC_PPS_CONFIG20 0x3136
+#define mmDSCC3_DSCC_PPS_CONFIG20_BASE_IDX 2
+#define mmDSCC3_DSCC_PPS_CONFIG21 0x3137
+#define mmDSCC3_DSCC_PPS_CONFIG21_BASE_IDX 2
+#define mmDSCC3_DSCC_PPS_CONFIG22 0x3138
+#define mmDSCC3_DSCC_PPS_CONFIG22_BASE_IDX 2
+#define mmDSCC3_DSCC_MEM_POWER_CONTROL 0x3139
+#define mmDSCC3_DSCC_MEM_POWER_CONTROL_BASE_IDX 2
+#define mmDSCC3_DSCC_R_Y_SQUARED_ERROR_LOWER 0x313a
+#define mmDSCC3_DSCC_R_Y_SQUARED_ERROR_LOWER_BASE_IDX 2
+#define mmDSCC3_DSCC_R_Y_SQUARED_ERROR_UPPER 0x313b
+#define mmDSCC3_DSCC_R_Y_SQUARED_ERROR_UPPER_BASE_IDX 2
+#define mmDSCC3_DSCC_G_CB_SQUARED_ERROR_LOWER 0x313c
+#define mmDSCC3_DSCC_G_CB_SQUARED_ERROR_LOWER_BASE_IDX 2
+#define mmDSCC3_DSCC_G_CB_SQUARED_ERROR_UPPER 0x313d
+#define mmDSCC3_DSCC_G_CB_SQUARED_ERROR_UPPER_BASE_IDX 2
+#define mmDSCC3_DSCC_B_CR_SQUARED_ERROR_LOWER 0x313e
+#define mmDSCC3_DSCC_B_CR_SQUARED_ERROR_LOWER_BASE_IDX 2
+#define mmDSCC3_DSCC_B_CR_SQUARED_ERROR_UPPER 0x313f
+#define mmDSCC3_DSCC_B_CR_SQUARED_ERROR_UPPER_BASE_IDX 2
+#define mmDSCC3_DSCC_MAX_ABS_ERROR0 0x3140
+#define mmDSCC3_DSCC_MAX_ABS_ERROR0_BASE_IDX 2
+#define mmDSCC3_DSCC_MAX_ABS_ERROR1 0x3141
+#define mmDSCC3_DSCC_MAX_ABS_ERROR1_BASE_IDX 2
+#define mmDSCC3_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL 0x3142
+#define mmDSCC3_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define mmDSCC3_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL 0x3143
+#define mmDSCC3_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define mmDSCC3_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL 0x3144
+#define mmDSCC3_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define mmDSCC3_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL 0x3145
+#define mmDSCC3_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define mmDSCC3_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL 0x3146
+#define mmDSCC3_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define mmDSCC3_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL 0x3147
+#define mmDSCC3_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define mmDSCC3_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL 0x3148
+#define mmDSCC3_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define mmDSCC3_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x3149
+#define mmDSCC3_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define mmDSCC3_DSCC_TEST_DEBUG_BUS_ROTATE 0x314e
+#define mmDSCC3_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2
+
+// addressBlock: dce_dc_dsc3_dispdec_dsc_dcperfmon_dc_perfmon_dispdec
+// base address: 0xc590
+#define mmDC_PERFMON22_PERFCOUNTER_CNTL 0x3164
+#define mmDC_PERFMON22_PERFCOUNTER_CNTL_BASE_IDX 2
+#define mmDC_PERFMON22_PERFCOUNTER_CNTL2 0x3165
+#define mmDC_PERFMON22_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define mmDC_PERFMON22_PERFCOUNTER_STATE 0x3166
+#define mmDC_PERFMON22_PERFCOUNTER_STATE_BASE_IDX 2
+#define mmDC_PERFMON22_PERFMON_CNTL 0x3167
+#define mmDC_PERFMON22_PERFMON_CNTL_BASE_IDX 2
+#define mmDC_PERFMON22_PERFMON_CNTL2 0x3168
+#define mmDC_PERFMON22_PERFMON_CNTL2_BASE_IDX 2
+#define mmDC_PERFMON22_PERFMON_CVALUE_INT_MISC 0x3169
+#define mmDC_PERFMON22_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define mmDC_PERFMON22_PERFMON_CVALUE_LOW 0x316a
+#define mmDC_PERFMON22_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define mmDC_PERFMON22_PERFMON_HI 0x316b
+#define mmDC_PERFMON22_PERFMON_HI_BASE_IDX 2
+#define mmDC_PERFMON22_PERFMON_LOW 0x316c
+#define mmDC_PERFMON22_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc4_dispdec_dsc_top_dispdec
+// base address: 0x5c0
+#define mmDSC_TOP4_DSC_TOP_CONTROL 0x3170
+#define mmDSC_TOP4_DSC_TOP_CONTROL_BASE_IDX 2
+#define mmDSC_TOP4_DSC_DEBUG_CONTROL 0x3171
+#define mmDSC_TOP4_DSC_DEBUG_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc4_dispdec_dsccif_dispdec
+// base address: 0x5c0
+#define mmDSCCIF4_DSCCIF_CONFIG0 0x3175
+#define mmDSCCIF4_DSCCIF_CONFIG0_BASE_IDX 2
+#define mmDSCCIF4_DSCCIF_CONFIG1 0x3176
+#define mmDSCCIF4_DSCCIF_CONFIG1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc4_dispdec_dscc_dispdec
+// base address: 0x5c0
+#define mmDSCC4_DSCC_CONFIG0 0x317a
+#define mmDSCC4_DSCC_CONFIG0_BASE_IDX 2
+#define mmDSCC4_DSCC_CONFIG1 0x317b
+#define mmDSCC4_DSCC_CONFIG1_BASE_IDX 2
+#define mmDSCC4_DSCC_STATUS 0x317c
+#define mmDSCC4_DSCC_STATUS_BASE_IDX 2
+#define mmDSCC4_DSCC_INTERRUPT_CONTROL_STATUS 0x317d
+#define mmDSCC4_DSCC_INTERRUPT_CONTROL_STATUS_BASE_IDX 2
+#define mmDSCC4_DSCC_PPS_CONFIG0 0x317e
+#define mmDSCC4_DSCC_PPS_CONFIG0_BASE_IDX 2
+#define mmDSCC4_DSCC_PPS_CONFIG1 0x317f
+#define mmDSCC4_DSCC_PPS_CONFIG1_BASE_IDX 2
+#define mmDSCC4_DSCC_PPS_CONFIG2 0x3180
+#define mmDSCC4_DSCC_PPS_CONFIG2_BASE_IDX 2
+#define mmDSCC4_DSCC_PPS_CONFIG3 0x3181
+#define mmDSCC4_DSCC_PPS_CONFIG3_BASE_IDX 2
+#define mmDSCC4_DSCC_PPS_CONFIG4 0x3182
+#define mmDSCC4_DSCC_PPS_CONFIG4_BASE_IDX 2
+#define mmDSCC4_DSCC_PPS_CONFIG5 0x3183
+#define mmDSCC4_DSCC_PPS_CONFIG5_BASE_IDX 2
+#define mmDSCC4_DSCC_PPS_CONFIG6 0x3184
+#define mmDSCC4_DSCC_PPS_CONFIG6_BASE_IDX 2
+#define mmDSCC4_DSCC_PPS_CONFIG7 0x3185
+#define mmDSCC4_DSCC_PPS_CONFIG7_BASE_IDX 2
+#define mmDSCC4_DSCC_PPS_CONFIG8 0x3186
+#define mmDSCC4_DSCC_PPS_CONFIG8_BASE_IDX 2
+#define mmDSCC4_DSCC_PPS_CONFIG9 0x3187
+#define mmDSCC4_DSCC_PPS_CONFIG9_BASE_IDX 2
+#define mmDSCC4_DSCC_PPS_CONFIG10 0x3188
+#define mmDSCC4_DSCC_PPS_CONFIG10_BASE_IDX 2
+#define mmDSCC4_DSCC_PPS_CONFIG11 0x3189
+#define mmDSCC4_DSCC_PPS_CONFIG11_BASE_IDX 2
+#define mmDSCC4_DSCC_PPS_CONFIG12 0x318a
+#define mmDSCC4_DSCC_PPS_CONFIG12_BASE_IDX 2
+#define mmDSCC4_DSCC_PPS_CONFIG13 0x318b
+#define mmDSCC4_DSCC_PPS_CONFIG13_BASE_IDX 2
+#define mmDSCC4_DSCC_PPS_CONFIG14 0x318c
+#define mmDSCC4_DSCC_PPS_CONFIG14_BASE_IDX 2
+#define mmDSCC4_DSCC_PPS_CONFIG15 0x318d
+#define mmDSCC4_DSCC_PPS_CONFIG15_BASE_IDX 2
+#define mmDSCC4_DSCC_PPS_CONFIG16 0x318e
+#define mmDSCC4_DSCC_PPS_CONFIG16_BASE_IDX 2
+#define mmDSCC4_DSCC_PPS_CONFIG17 0x318f
+#define mmDSCC4_DSCC_PPS_CONFIG17_BASE_IDX 2
+#define mmDSCC4_DSCC_PPS_CONFIG18 0x3190
+#define mmDSCC4_DSCC_PPS_CONFIG18_BASE_IDX 2
+#define mmDSCC4_DSCC_PPS_CONFIG19 0x3191
+#define mmDSCC4_DSCC_PPS_CONFIG19_BASE_IDX 2
+#define mmDSCC4_DSCC_PPS_CONFIG20 0x3192
+#define mmDSCC4_DSCC_PPS_CONFIG20_BASE_IDX 2
+#define mmDSCC4_DSCC_PPS_CONFIG21 0x3193
+#define mmDSCC4_DSCC_PPS_CONFIG21_BASE_IDX 2
+#define mmDSCC4_DSCC_PPS_CONFIG22 0x3194
+#define mmDSCC4_DSCC_PPS_CONFIG22_BASE_IDX 2
+#define mmDSCC4_DSCC_MEM_POWER_CONTROL 0x3195
+#define mmDSCC4_DSCC_MEM_POWER_CONTROL_BASE_IDX 2
+#define mmDSCC4_DSCC_R_Y_SQUARED_ERROR_LOWER 0x3196
+#define mmDSCC4_DSCC_R_Y_SQUARED_ERROR_LOWER_BASE_IDX 2
+#define mmDSCC4_DSCC_R_Y_SQUARED_ERROR_UPPER 0x3197
+#define mmDSCC4_DSCC_R_Y_SQUARED_ERROR_UPPER_BASE_IDX 2
+#define mmDSCC4_DSCC_G_CB_SQUARED_ERROR_LOWER 0x3198
+#define mmDSCC4_DSCC_G_CB_SQUARED_ERROR_LOWER_BASE_IDX 2
+#define mmDSCC4_DSCC_G_CB_SQUARED_ERROR_UPPER 0x3199
+#define mmDSCC4_DSCC_G_CB_SQUARED_ERROR_UPPER_BASE_IDX 2
+#define mmDSCC4_DSCC_B_CR_SQUARED_ERROR_LOWER 0x319a
+#define mmDSCC4_DSCC_B_CR_SQUARED_ERROR_LOWER_BASE_IDX 2
+#define mmDSCC4_DSCC_B_CR_SQUARED_ERROR_UPPER 0x319b
+#define mmDSCC4_DSCC_B_CR_SQUARED_ERROR_UPPER_BASE_IDX 2
+#define mmDSCC4_DSCC_MAX_ABS_ERROR0 0x319c
+#define mmDSCC4_DSCC_MAX_ABS_ERROR0_BASE_IDX 2
+#define mmDSCC4_DSCC_MAX_ABS_ERROR1 0x319d
+#define mmDSCC4_DSCC_MAX_ABS_ERROR1_BASE_IDX 2
+#define mmDSCC4_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL 0x319e
+#define mmDSCC4_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define mmDSCC4_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL 0x319f
+#define mmDSCC4_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define mmDSCC4_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL 0x31a0
+#define mmDSCC4_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define mmDSCC4_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL 0x31a1
+#define mmDSCC4_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define mmDSCC4_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL 0x31a2
+#define mmDSCC4_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define mmDSCC4_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL 0x31a3
+#define mmDSCC4_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define mmDSCC4_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL 0x31a4
+#define mmDSCC4_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define mmDSCC4_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x31a5
+#define mmDSCC4_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define mmDSCC4_DSCC_TEST_DEBUG_BUS_ROTATE 0x31aa
+#define mmDSCC4_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2
+
+// addressBlock: dce_dc_dsc4_dispdec_dsc_dcperfmon_dc_perfmon_dispdec
+// base address: 0xc700
+#define mmDC_PERFMON23_PERFCOUNTER_CNTL 0x31c0
+#define mmDC_PERFMON23_PERFCOUNTER_CNTL_BASE_IDX 2
+#define mmDC_PERFMON23_PERFCOUNTER_CNTL2 0x31c1
+#define mmDC_PERFMON23_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define mmDC_PERFMON23_PERFCOUNTER_STATE 0x31c2
+#define mmDC_PERFMON23_PERFCOUNTER_STATE_BASE_IDX 2
+#define mmDC_PERFMON23_PERFMON_CNTL 0x31c3
+#define mmDC_PERFMON23_PERFMON_CNTL_BASE_IDX 2
+#define mmDC_PERFMON23_PERFMON_CNTL2 0x31c4
+#define mmDC_PERFMON23_PERFMON_CNTL2_BASE_IDX 2
+#define mmDC_PERFMON23_PERFMON_CVALUE_INT_MISC 0x31c5
+#define mmDC_PERFMON23_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define mmDC_PERFMON23_PERFMON_CVALUE_LOW 0x31c6
+#define mmDC_PERFMON23_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define mmDC_PERFMON23_PERFMON_HI 0x31c7
+#define mmDC_PERFMON23_PERFMON_HI_BASE_IDX 2
+#define mmDC_PERFMON23_PERFMON_LOW 0x31c8
+#define mmDC_PERFMON23_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc5_dispdec_dsc_top_dispdec
+// base address: 0x730
+#define mmDSC_TOP5_DSC_TOP_CONTROL 0x31cc
+#define mmDSC_TOP5_DSC_TOP_CONTROL_BASE_IDX 2
+#define mmDSC_TOP5_DSC_DEBUG_CONTROL 0x31cd
+#define mmDSC_TOP5_DSC_DEBUG_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc5_dispdec_dsccif_dispdec
+// base address: 0x730
+#define mmDSCCIF5_DSCCIF_CONFIG0 0x31d1
+#define mmDSCCIF5_DSCCIF_CONFIG0_BASE_IDX 2
+#define mmDSCCIF5_DSCCIF_CONFIG1 0x31d2
+#define mmDSCCIF5_DSCCIF_CONFIG1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc5_dispdec_dscc_dispdec
+// base address: 0x730
+#define mmDSCC5_DSCC_CONFIG0 0x31d6
+#define mmDSCC5_DSCC_CONFIG0_BASE_IDX 2
+#define mmDSCC5_DSCC_CONFIG1 0x31d7
+#define mmDSCC5_DSCC_CONFIG1_BASE_IDX 2
+#define mmDSCC5_DSCC_STATUS 0x31d8
+#define mmDSCC5_DSCC_STATUS_BASE_IDX 2
+#define mmDSCC5_DSCC_INTERRUPT_CONTROL_STATUS 0x31d9
+#define mmDSCC5_DSCC_INTERRUPT_CONTROL_STATUS_BASE_IDX 2
+#define mmDSCC5_DSCC_PPS_CONFIG0 0x31da
+#define mmDSCC5_DSCC_PPS_CONFIG0_BASE_IDX 2
+#define mmDSCC5_DSCC_PPS_CONFIG1 0x31db
+#define mmDSCC5_DSCC_PPS_CONFIG1_BASE_IDX 2
+#define mmDSCC5_DSCC_PPS_CONFIG2 0x31dc
+#define mmDSCC5_DSCC_PPS_CONFIG2_BASE_IDX 2
+#define mmDSCC5_DSCC_PPS_CONFIG3 0x31dd
+#define mmDSCC5_DSCC_PPS_CONFIG3_BASE_IDX 2
+#define mmDSCC5_DSCC_PPS_CONFIG4 0x31de
+#define mmDSCC5_DSCC_PPS_CONFIG4_BASE_IDX 2
+#define mmDSCC5_DSCC_PPS_CONFIG5 0x31df
+#define mmDSCC5_DSCC_PPS_CONFIG5_BASE_IDX 2
+#define mmDSCC5_DSCC_PPS_CONFIG6 0x31e0
+#define mmDSCC5_DSCC_PPS_CONFIG6_BASE_IDX 2
+#define mmDSCC5_DSCC_PPS_CONFIG7 0x31e1
+#define mmDSCC5_DSCC_PPS_CONFIG7_BASE_IDX 2
+#define mmDSCC5_DSCC_PPS_CONFIG8 0x31e2
+#define mmDSCC5_DSCC_PPS_CONFIG8_BASE_IDX 2
+#define mmDSCC5_DSCC_PPS_CONFIG9 0x31e3
+#define mmDSCC5_DSCC_PPS_CONFIG9_BASE_IDX 2
+#define mmDSCC5_DSCC_PPS_CONFIG10 0x31e4
+#define mmDSCC5_DSCC_PPS_CONFIG10_BASE_IDX 2
+#define mmDSCC5_DSCC_PPS_CONFIG11 0x31e5
+#define mmDSCC5_DSCC_PPS_CONFIG11_BASE_IDX 2
+#define mmDSCC5_DSCC_PPS_CONFIG12 0x31e6
+#define mmDSCC5_DSCC_PPS_CONFIG12_BASE_IDX 2
+#define mmDSCC5_DSCC_PPS_CONFIG13 0x31e7
+#define mmDSCC5_DSCC_PPS_CONFIG13_BASE_IDX 2
+#define mmDSCC5_DSCC_PPS_CONFIG14 0x31e8
+#define mmDSCC5_DSCC_PPS_CONFIG14_BASE_IDX 2
+#define mmDSCC5_DSCC_PPS_CONFIG15 0x31e9
+#define mmDSCC5_DSCC_PPS_CONFIG15_BASE_IDX 2
+#define mmDSCC5_DSCC_PPS_CONFIG16 0x31ea
+#define mmDSCC5_DSCC_PPS_CONFIG16_BASE_IDX 2
+#define mmDSCC5_DSCC_PPS_CONFIG17 0x31eb
+#define mmDSCC5_DSCC_PPS_CONFIG17_BASE_IDX 2
+#define mmDSCC5_DSCC_PPS_CONFIG18 0x31ec
+#define mmDSCC5_DSCC_PPS_CONFIG18_BASE_IDX 2
+#define mmDSCC5_DSCC_PPS_CONFIG19 0x31ed
+#define mmDSCC5_DSCC_PPS_CONFIG19_BASE_IDX 2
+#define mmDSCC5_DSCC_PPS_CONFIG20 0x31ee
+#define mmDSCC5_DSCC_PPS_CONFIG20_BASE_IDX 2
+#define mmDSCC5_DSCC_PPS_CONFIG21 0x31ef
+#define mmDSCC5_DSCC_PPS_CONFIG21_BASE_IDX 2
+#define mmDSCC5_DSCC_PPS_CONFIG22 0x31f0
+#define mmDSCC5_DSCC_PPS_CONFIG22_BASE_IDX 2
+#define mmDSCC5_DSCC_MEM_POWER_CONTROL 0x31f1
+#define mmDSCC5_DSCC_MEM_POWER_CONTROL_BASE_IDX 2
+#define mmDSCC5_DSCC_R_Y_SQUARED_ERROR_LOWER 0x31f2
+#define mmDSCC5_DSCC_R_Y_SQUARED_ERROR_LOWER_BASE_IDX 2
+#define mmDSCC5_DSCC_R_Y_SQUARED_ERROR_UPPER 0x31f3
+#define mmDSCC5_DSCC_R_Y_SQUARED_ERROR_UPPER_BASE_IDX 2
+#define mmDSCC5_DSCC_G_CB_SQUARED_ERROR_LOWER 0x31f4
+#define mmDSCC5_DSCC_G_CB_SQUARED_ERROR_LOWER_BASE_IDX 2
+#define mmDSCC5_DSCC_G_CB_SQUARED_ERROR_UPPER 0x31f5
+#define mmDSCC5_DSCC_G_CB_SQUARED_ERROR_UPPER_BASE_IDX 2
+#define mmDSCC5_DSCC_B_CR_SQUARED_ERROR_LOWER 0x31f6
+#define mmDSCC5_DSCC_B_CR_SQUARED_ERROR_LOWER_BASE_IDX 2
+#define mmDSCC5_DSCC_B_CR_SQUARED_ERROR_UPPER 0x31f7
+#define mmDSCC5_DSCC_B_CR_SQUARED_ERROR_UPPER_BASE_IDX 2
+#define mmDSCC5_DSCC_MAX_ABS_ERROR0 0x31f8
+#define mmDSCC5_DSCC_MAX_ABS_ERROR0_BASE_IDX 2
+#define mmDSCC5_DSCC_MAX_ABS_ERROR1 0x31f9
+#define mmDSCC5_DSCC_MAX_ABS_ERROR1_BASE_IDX 2
+#define mmDSCC5_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL 0x31fa
+#define mmDSCC5_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define mmDSCC5_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL 0x31fb
+#define mmDSCC5_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define mmDSCC5_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL 0x31fc
+#define mmDSCC5_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define mmDSCC5_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL 0x31fd
+#define mmDSCC5_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define mmDSCC5_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL 0x31fe
+#define mmDSCC5_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define mmDSCC5_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL 0x31ff
+#define mmDSCC5_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define mmDSCC5_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL 0x3200
+#define mmDSCC5_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define mmDSCC5_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x3201
+#define mmDSCC5_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define mmDSCC5_DSCC_TEST_DEBUG_BUS_ROTATE 0x3206
+#define mmDSCC5_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc5_dispdec_dsc_dcperfmon_dc_perfmon_dispdec
+// base address: 0xc870
+#define mmDC_PERFMON24_PERFCOUNTER_CNTL 0x321c
+#define mmDC_PERFMON24_PERFCOUNTER_CNTL_BASE_IDX 2
+#define mmDC_PERFMON24_PERFCOUNTER_CNTL2 0x321d
+#define mmDC_PERFMON24_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define mmDC_PERFMON24_PERFCOUNTER_STATE 0x321e
+#define mmDC_PERFMON24_PERFCOUNTER_STATE_BASE_IDX 2
+#define mmDC_PERFMON24_PERFMON_CNTL 0x321f
+#define mmDC_PERFMON24_PERFMON_CNTL_BASE_IDX 2
+#define mmDC_PERFMON24_PERFMON_CNTL2 0x3220
+#define mmDC_PERFMON24_PERFMON_CNTL2_BASE_IDX 2
+#define mmDC_PERFMON24_PERFMON_CVALUE_INT_MISC 0x3221
+#define mmDC_PERFMON24_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define mmDC_PERFMON24_PERFMON_CVALUE_LOW 0x3222
+#define mmDC_PERFMON24_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define mmDC_PERFMON24_PERFMON_HI 0x3223
+#define mmDC_PERFMON24_PERFMON_HI_BASE_IDX 2
+#define mmDC_PERFMON24_PERFMON_LOW 0x3224
+#define mmDC_PERFMON24_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dmu_dmcub_dispdec
+// base address: 0x0
+#define mmDMCUB_REGION0_OFFSET 0x3238
+#define mmDMCUB_REGION0_OFFSET_BASE_IDX 2
+#define mmDMCUB_REGION0_OFFSET_HIGH 0x3239
+#define mmDMCUB_REGION0_OFFSET_HIGH_BASE_IDX 2
+#define mmDMCUB_REGION1_OFFSET 0x323a
+#define mmDMCUB_REGION1_OFFSET_BASE_IDX 2
+#define mmDMCUB_REGION1_OFFSET_HIGH 0x323b
+#define mmDMCUB_REGION1_OFFSET_HIGH_BASE_IDX 2
+#define mmDMCUB_REGION2_OFFSET 0x323c
+#define mmDMCUB_REGION2_OFFSET_BASE_IDX 2
+#define mmDMCUB_REGION2_OFFSET_HIGH 0x323d
+#define mmDMCUB_REGION2_OFFSET_HIGH_BASE_IDX 2
+#define mmDMCUB_REGION4_OFFSET 0x3240
+#define mmDMCUB_REGION4_OFFSET_BASE_IDX 2
+#define mmDMCUB_REGION4_OFFSET_HIGH 0x3241
+#define mmDMCUB_REGION4_OFFSET_HIGH_BASE_IDX 2
+#define mmDMCUB_REGION5_OFFSET 0x3242
+#define mmDMCUB_REGION5_OFFSET_BASE_IDX 2
+#define mmDMCUB_REGION5_OFFSET_HIGH 0x3243
+#define mmDMCUB_REGION5_OFFSET_HIGH_BASE_IDX 2
+#define mmDMCUB_REGION6_OFFSET 0x3244
+#define mmDMCUB_REGION6_OFFSET_BASE_IDX 2
+#define mmDMCUB_REGION6_OFFSET_HIGH 0x3245
+#define mmDMCUB_REGION6_OFFSET_HIGH_BASE_IDX 2
+#define mmDMCUB_REGION7_OFFSET 0x3246
+#define mmDMCUB_REGION7_OFFSET_BASE_IDX 2
+#define mmDMCUB_REGION7_OFFSET_HIGH 0x3247
+#define mmDMCUB_REGION7_OFFSET_HIGH_BASE_IDX 2
+#define mmDMCUB_REGION0_TOP_ADDRESS 0x3248
+#define mmDMCUB_REGION0_TOP_ADDRESS_BASE_IDX 2
+#define mmDMCUB_REGION1_TOP_ADDRESS 0x3249
+#define mmDMCUB_REGION1_TOP_ADDRESS_BASE_IDX 2
+#define mmDMCUB_REGION2_TOP_ADDRESS 0x324a
+#define mmDMCUB_REGION2_TOP_ADDRESS_BASE_IDX 2
+#define mmDMCUB_REGION4_TOP_ADDRESS 0x324b
+#define mmDMCUB_REGION4_TOP_ADDRESS_BASE_IDX 2
+#define mmDMCUB_REGION5_TOP_ADDRESS 0x324c
+#define mmDMCUB_REGION5_TOP_ADDRESS_BASE_IDX 2
+#define mmDMCUB_REGION6_TOP_ADDRESS 0x324d
+#define mmDMCUB_REGION6_TOP_ADDRESS_BASE_IDX 2
+#define mmDMCUB_REGION7_TOP_ADDRESS 0x324e
+#define mmDMCUB_REGION7_TOP_ADDRESS_BASE_IDX 2
+#define mmDMCUB_REGION3_CW0_BASE_ADDRESS 0x324f
+#define mmDMCUB_REGION3_CW0_BASE_ADDRESS_BASE_IDX 2
+#define mmDMCUB_REGION3_CW1_BASE_ADDRESS 0x3250
+#define mmDMCUB_REGION3_CW1_BASE_ADDRESS_BASE_IDX 2
+#define mmDMCUB_REGION3_CW2_BASE_ADDRESS 0x3251
+#define mmDMCUB_REGION3_CW2_BASE_ADDRESS_BASE_IDX 2
+#define mmDMCUB_REGION3_CW3_BASE_ADDRESS 0x3252
+#define mmDMCUB_REGION3_CW3_BASE_ADDRESS_BASE_IDX 2
+#define mmDMCUB_REGION3_CW4_BASE_ADDRESS 0x3253
+#define mmDMCUB_REGION3_CW4_BASE_ADDRESS_BASE_IDX 2
+#define mmDMCUB_REGION3_CW5_BASE_ADDRESS 0x3254
+#define mmDMCUB_REGION3_CW5_BASE_ADDRESS_BASE_IDX 2
+#define mmDMCUB_REGION3_CW6_BASE_ADDRESS 0x3255
+#define mmDMCUB_REGION3_CW6_BASE_ADDRESS_BASE_IDX 2
+#define mmDMCUB_REGION3_CW7_BASE_ADDRESS 0x3256
+#define mmDMCUB_REGION3_CW7_BASE_ADDRESS_BASE_IDX 2
+#define mmDMCUB_REGION3_CW0_TOP_ADDRESS 0x3257
+#define mmDMCUB_REGION3_CW0_TOP_ADDRESS_BASE_IDX 2
+#define mmDMCUB_REGION3_CW1_TOP_ADDRESS 0x3258
+#define mmDMCUB_REGION3_CW1_TOP_ADDRESS_BASE_IDX 2
+#define mmDMCUB_REGION3_CW2_TOP_ADDRESS 0x3259
+#define mmDMCUB_REGION3_CW2_TOP_ADDRESS_BASE_IDX 2
+#define mmDMCUB_REGION3_CW3_TOP_ADDRESS 0x325a
+#define mmDMCUB_REGION3_CW3_TOP_ADDRESS_BASE_IDX 2
+#define mmDMCUB_REGION3_CW4_TOP_ADDRESS 0x325b
+#define mmDMCUB_REGION3_CW4_TOP_ADDRESS_BASE_IDX 2
+#define mmDMCUB_REGION3_CW5_TOP_ADDRESS 0x325c
+#define mmDMCUB_REGION3_CW5_TOP_ADDRESS_BASE_IDX 2
+#define mmDMCUB_REGION3_CW6_TOP_ADDRESS 0x325d
+#define mmDMCUB_REGION3_CW6_TOP_ADDRESS_BASE_IDX 2
+#define mmDMCUB_REGION3_CW7_TOP_ADDRESS 0x325e
+#define mmDMCUB_REGION3_CW7_TOP_ADDRESS_BASE_IDX 2
+#define mmDMCUB_REGION3_CW0_OFFSET 0x325f
+#define mmDMCUB_REGION3_CW0_OFFSET_BASE_IDX 2
+#define mmDMCUB_REGION3_CW0_OFFSET_HIGH 0x3260
+#define mmDMCUB_REGION3_CW0_OFFSET_HIGH_BASE_IDX 2
+#define mmDMCUB_REGION3_CW1_OFFSET 0x3261
+#define mmDMCUB_REGION3_CW1_OFFSET_BASE_IDX 2
+#define mmDMCUB_REGION3_CW1_OFFSET_HIGH 0x3262
+#define mmDMCUB_REGION3_CW1_OFFSET_HIGH_BASE_IDX 2
+#define mmDMCUB_REGION3_CW2_OFFSET 0x3263
+#define mmDMCUB_REGION3_CW2_OFFSET_BASE_IDX 2
+#define mmDMCUB_REGION3_CW2_OFFSET_HIGH 0x3264
+#define mmDMCUB_REGION3_CW2_OFFSET_HIGH_BASE_IDX 2
+#define mmDMCUB_REGION3_CW3_OFFSET 0x3265
+#define mmDMCUB_REGION3_CW3_OFFSET_BASE_IDX 2
+#define mmDMCUB_REGION3_CW3_OFFSET_HIGH 0x3266
+#define mmDMCUB_REGION3_CW3_OFFSET_HIGH_BASE_IDX 2
+#define mmDMCUB_REGION3_CW4_OFFSET 0x3267
+#define mmDMCUB_REGION3_CW4_OFFSET_BASE_IDX 2
+#define mmDMCUB_REGION3_CW4_OFFSET_HIGH 0x3268
+#define mmDMCUB_REGION3_CW4_OFFSET_HIGH_BASE_IDX 2
+#define mmDMCUB_REGION3_CW5_OFFSET 0x3269
+#define mmDMCUB_REGION3_CW5_OFFSET_BASE_IDX 2
+#define mmDMCUB_REGION3_CW5_OFFSET_HIGH 0x326a
+#define mmDMCUB_REGION3_CW5_OFFSET_HIGH_BASE_IDX 2
+#define mmDMCUB_REGION3_CW6_OFFSET 0x326b
+#define mmDMCUB_REGION3_CW6_OFFSET_BASE_IDX 2
+#define mmDMCUB_REGION3_CW6_OFFSET_HIGH 0x326c
+#define mmDMCUB_REGION3_CW6_OFFSET_HIGH_BASE_IDX 2
+#define mmDMCUB_REGION3_CW7_OFFSET 0x326d
+#define mmDMCUB_REGION3_CW7_OFFSET_BASE_IDX 2
+#define mmDMCUB_REGION3_CW7_OFFSET_HIGH 0x326e
+#define mmDMCUB_REGION3_CW7_OFFSET_HIGH_BASE_IDX 2
+#define mmDMCUB_INTERRUPT_ENABLE 0x326f
+#define mmDMCUB_INTERRUPT_ENABLE_BASE_IDX 2
+#define mmDMCUB_INTERRUPT_ACK 0x3270
+#define mmDMCUB_INTERRUPT_ACK_BASE_IDX 2
+#define mmDMCUB_INTERRUPT_STATUS 0x3271
+#define mmDMCUB_INTERRUPT_STATUS_BASE_IDX 2
+#define mmDMCUB_INTERRUPT_TYPE 0x3272
+#define mmDMCUB_INTERRUPT_TYPE_BASE_IDX 2
+#define mmDMCUB_EXT_INTERRUPT_STATUS 0x3273
+#define mmDMCUB_EXT_INTERRUPT_STATUS_BASE_IDX 2
+#define mmDMCUB_EXT_INTERRUPT_CTXID 0x3274
+#define mmDMCUB_EXT_INTERRUPT_CTXID_BASE_IDX 2
+#define mmDMCUB_EXT_INTERRUPT_ACK 0x3275
+#define mmDMCUB_EXT_INTERRUPT_ACK_BASE_IDX 2
+#define mmDMCUB_INST_FETCH_FAULT_ADDR 0x3276
+#define mmDMCUB_INST_FETCH_FAULT_ADDR_BASE_IDX 2
+#define mmDMCUB_DATA_WRITE_FAULT_ADDR 0x3277
+#define mmDMCUB_DATA_WRITE_FAULT_ADDR_BASE_IDX 2
+#define mmDMCUB_SEC_CNTL 0x3278
+#define mmDMCUB_SEC_CNTL_BASE_IDX 2
+#define mmDMCUB_MEM_CNTL 0x3279
+#define mmDMCUB_MEM_CNTL_BASE_IDX 2
+#define mmDMCUB_INBOX0_BASE_ADDRESS 0x327a
+#define mmDMCUB_INBOX0_BASE_ADDRESS_BASE_IDX 2
+#define mmDMCUB_INBOX0_SIZE 0x327b
+#define mmDMCUB_INBOX0_SIZE_BASE_IDX 2
+#define mmDMCUB_INBOX0_WPTR 0x327c
+#define mmDMCUB_INBOX0_WPTR_BASE_IDX 2
+#define mmDMCUB_INBOX0_RPTR 0x327d
+#define mmDMCUB_INBOX0_RPTR_BASE_IDX 2
+#define mmDMCUB_INBOX1_BASE_ADDRESS 0x327e
+#define mmDMCUB_INBOX1_BASE_ADDRESS_BASE_IDX 2
+#define mmDMCUB_INBOX1_SIZE 0x327f
+#define mmDMCUB_INBOX1_SIZE_BASE_IDX 2
+#define mmDMCUB_INBOX1_WPTR 0x3280
+#define mmDMCUB_INBOX1_WPTR_BASE_IDX 2
+#define mmDMCUB_INBOX1_RPTR 0x3281
+#define mmDMCUB_INBOX1_RPTR_BASE_IDX 2
+#define mmDMCUB_OUTBOX0_BASE_ADDRESS 0x3282
+#define mmDMCUB_OUTBOX0_BASE_ADDRESS_BASE_IDX 2
+#define mmDMCUB_OUTBOX0_SIZE 0x3283
+#define mmDMCUB_OUTBOX0_SIZE_BASE_IDX 2
+#define mmDMCUB_OUTBOX0_WPTR 0x3284
+#define mmDMCUB_OUTBOX0_WPTR_BASE_IDX 2
+#define mmDMCUB_OUTBOX0_RPTR 0x3285
+#define mmDMCUB_OUTBOX0_RPTR_BASE_IDX 2
+#define mmDMCUB_OUTBOX1_BASE_ADDRESS 0x3286
+#define mmDMCUB_OUTBOX1_BASE_ADDRESS_BASE_IDX 2
+#define mmDMCUB_OUTBOX1_SIZE 0x3287
+#define mmDMCUB_OUTBOX1_SIZE_BASE_IDX 2
+#define mmDMCUB_OUTBOX1_WPTR 0x3288
+#define mmDMCUB_OUTBOX1_WPTR_BASE_IDX 2
+#define mmDMCUB_OUTBOX1_RPTR 0x3289
+#define mmDMCUB_OUTBOX1_RPTR_BASE_IDX 2
+#define mmDMCUB_TIMER_TRIGGER0 0x328a
+#define mmDMCUB_TIMER_TRIGGER0_BASE_IDX 2
+#define mmDMCUB_TIMER_TRIGGER1 0x328b
+#define mmDMCUB_TIMER_TRIGGER1_BASE_IDX 2
+#define mmDMCUB_TIMER_WINDOW 0x328c
+#define mmDMCUB_TIMER_WINDOW_BASE_IDX 2
+#define mmDMCUB_SCRATCH0 0x328d
+#define mmDMCUB_SCRATCH0_BASE_IDX 2
+#define mmDMCUB_SCRATCH1 0x328e
+#define mmDMCUB_SCRATCH1_BASE_IDX 2
+#define mmDMCUB_SCRATCH2 0x328f
+#define mmDMCUB_SCRATCH2_BASE_IDX 2
+#define mmDMCUB_SCRATCH3 0x3290
+#define mmDMCUB_SCRATCH3_BASE_IDX 2
+#define mmDMCUB_SCRATCH4 0x3291
+#define mmDMCUB_SCRATCH4_BASE_IDX 2
+#define mmDMCUB_SCRATCH5 0x3292
+#define mmDMCUB_SCRATCH5_BASE_IDX 2
+#define mmDMCUB_SCRATCH6 0x3293
+#define mmDMCUB_SCRATCH6_BASE_IDX 2
+#define mmDMCUB_SCRATCH7 0x3294
+#define mmDMCUB_SCRATCH7_BASE_IDX 2
+#define mmDMCUB_SCRATCH8 0x3295
+#define mmDMCUB_SCRATCH8_BASE_IDX 2
+#define mmDMCUB_SCRATCH9 0x3296
+#define mmDMCUB_SCRATCH9_BASE_IDX 2
+#define mmDMCUB_SCRATCH10 0x3297
+#define mmDMCUB_SCRATCH10_BASE_IDX 2
+#define mmDMCUB_SCRATCH11 0x3298
+#define mmDMCUB_SCRATCH11_BASE_IDX 2
+#define mmDMCUB_SCRATCH12 0x3299
+#define mmDMCUB_SCRATCH12_BASE_IDX 2
+#define mmDMCUB_SCRATCH13 0x329a
+#define mmDMCUB_SCRATCH13_BASE_IDX 2
+#define mmDMCUB_SCRATCH14 0x329b
+#define mmDMCUB_SCRATCH14_BASE_IDX 2
+#define mmDMCUB_SCRATCH15 0x329c
+#define mmDMCUB_SCRATCH15_BASE_IDX 2
+#define mmDMCUB_CNTL 0x32a0
+#define mmDMCUB_CNTL_BASE_IDX 2
+#define mmDMCUB_GPINT_DATAIN0 0x32a1
+#define mmDMCUB_GPINT_DATAIN0_BASE_IDX 2
+#define mmDMCUB_GPINT_DATAIN1 0x32a2
+#define mmDMCUB_GPINT_DATAIN1_BASE_IDX 2
+#define mmDMCUB_GPINT_DATAOUT 0x32a3
+#define mmDMCUB_GPINT_DATAOUT_BASE_IDX 2
+#define mmDMCUB_UNDEFINED_ADDRESS_FAULT_ADDR 0x32a4
+#define mmDMCUB_UNDEFINED_ADDRESS_FAULT_ADDR_BASE_IDX 2
+#define mmDMCUB_LS_WAKE_INT_ENABLE 0x32a5
+#define mmDMCUB_LS_WAKE_INT_ENABLE_BASE_IDX 2
+#define mmDMCUB_MEM_PWR_CNTL 0x32a6
+#define mmDMCUB_MEM_PWR_CNTL_BASE_IDX 2
+#define mmDMCUB_TIMER_CURRENT 0x32a7
+#define mmDMCUB_TIMER_CURRENT_BASE_IDX 2
+#define mmDMCUB_PROC_ID 0x32a9
+#define mmDMCUB_PROC_ID_BASE_IDX 2
+
+
+// addressBlock: dce_dc_mmhubbub_mcif_wb2_dispdec
+// base address: 0xc6b8
+#define mmMCIF_WB2_MCIF_WB_BUFMGR_SW_CONTROL 0x3460
+#define mmMCIF_WB2_MCIF_WB_BUFMGR_SW_CONTROL_BASE_IDX 2
+#define mmMCIF_WB2_MCIF_WB_BUFMGR_CUR_LINE_R 0x3461
+#define mmMCIF_WB2_MCIF_WB_BUFMGR_CUR_LINE_R_BASE_IDX 2
+#define mmMCIF_WB2_MCIF_WB_BUFMGR_STATUS 0x3462
+#define mmMCIF_WB2_MCIF_WB_BUFMGR_STATUS_BASE_IDX 2
+#define mmMCIF_WB2_MCIF_WB_BUF_PITCH 0x3463
+#define mmMCIF_WB2_MCIF_WB_BUF_PITCH_BASE_IDX 2
+#define mmMCIF_WB2_MCIF_WB_BUF_1_STATUS 0x3464
+#define mmMCIF_WB2_MCIF_WB_BUF_1_STATUS_BASE_IDX 2
+#define mmMCIF_WB2_MCIF_WB_BUF_1_STATUS2 0x3465
+#define mmMCIF_WB2_MCIF_WB_BUF_1_STATUS2_BASE_IDX 2
+#define mmMCIF_WB2_MCIF_WB_BUF_2_STATUS 0x3466
+#define mmMCIF_WB2_MCIF_WB_BUF_2_STATUS_BASE_IDX 2
+#define mmMCIF_WB2_MCIF_WB_BUF_2_STATUS2 0x3467
+#define mmMCIF_WB2_MCIF_WB_BUF_2_STATUS2_BASE_IDX 2
+#define mmMCIF_WB2_MCIF_WB_BUF_3_STATUS 0x3468
+#define mmMCIF_WB2_MCIF_WB_BUF_3_STATUS_BASE_IDX 2
+#define mmMCIF_WB2_MCIF_WB_BUF_3_STATUS2 0x3469
+#define mmMCIF_WB2_MCIF_WB_BUF_3_STATUS2_BASE_IDX 2
+#define mmMCIF_WB2_MCIF_WB_BUF_4_STATUS 0x346a
+#define mmMCIF_WB2_MCIF_WB_BUF_4_STATUS_BASE_IDX 2
+#define mmMCIF_WB2_MCIF_WB_BUF_4_STATUS2 0x346b
+#define mmMCIF_WB2_MCIF_WB_BUF_4_STATUS2_BASE_IDX 2
+#define mmMCIF_WB2_MCIF_WB_ARBITRATION_CONTROL 0x346c
+#define mmMCIF_WB2_MCIF_WB_ARBITRATION_CONTROL_BASE_IDX 2
+#define mmMCIF_WB2_MCIF_WB_SCLK_CHANGE 0x346d
+#define mmMCIF_WB2_MCIF_WB_SCLK_CHANGE_BASE_IDX 2
+#define mmMCIF_WB2_MCIF_WB_TEST_DEBUG_INDEX 0x346e
+#define mmMCIF_WB2_MCIF_WB_TEST_DEBUG_INDEX_BASE_IDX 2
+#define mmMCIF_WB2_MCIF_WB_TEST_DEBUG_DATA 0x346f
+#define mmMCIF_WB2_MCIF_WB_TEST_DEBUG_DATA_BASE_IDX 2
+#define mmMCIF_WB2_MCIF_WB_BUF_1_ADDR_Y 0x3470
+#define mmMCIF_WB2_MCIF_WB_BUF_1_ADDR_Y_BASE_IDX 2
+#define mmMCIF_WB2_MCIF_WB_BUF_1_ADDR_Y_OFFSET 0x3471
+#define mmMCIF_WB2_MCIF_WB_BUF_1_ADDR_Y_OFFSET_BASE_IDX 2
+#define mmMCIF_WB2_MCIF_WB_BUF_1_ADDR_C 0x3472
+#define mmMCIF_WB2_MCIF_WB_BUF_1_ADDR_C_BASE_IDX 2
+#define mmMCIF_WB2_MCIF_WB_BUF_1_ADDR_C_OFFSET 0x3473
+#define mmMCIF_WB2_MCIF_WB_BUF_1_ADDR_C_OFFSET_BASE_IDX 2
+#define mmMCIF_WB2_MCIF_WB_BUF_2_ADDR_Y 0x3474
+#define mmMCIF_WB2_MCIF_WB_BUF_2_ADDR_Y_BASE_IDX 2
+#define mmMCIF_WB2_MCIF_WB_BUF_2_ADDR_Y_OFFSET 0x3475
+#define mmMCIF_WB2_MCIF_WB_BUF_2_ADDR_Y_OFFSET_BASE_IDX 2
+#define mmMCIF_WB2_MCIF_WB_BUF_2_ADDR_C 0x3476
+#define mmMCIF_WB2_MCIF_WB_BUF_2_ADDR_C_BASE_IDX 2
+#define mmMCIF_WB2_MCIF_WB_BUF_2_ADDR_C_OFFSET 0x3477
+#define mmMCIF_WB2_MCIF_WB_BUF_2_ADDR_C_OFFSET_BASE_IDX 2
+#define mmMCIF_WB2_MCIF_WB_BUF_3_ADDR_Y 0x3478
+#define mmMCIF_WB2_MCIF_WB_BUF_3_ADDR_Y_BASE_IDX 2
+#define mmMCIF_WB2_MCIF_WB_BUF_3_ADDR_Y_OFFSET 0x3479
+#define mmMCIF_WB2_MCIF_WB_BUF_3_ADDR_Y_OFFSET_BASE_IDX 2
+#define mmMCIF_WB2_MCIF_WB_BUF_3_ADDR_C 0x347a
+#define mmMCIF_WB2_MCIF_WB_BUF_3_ADDR_C_BASE_IDX 2
+#define mmMCIF_WB2_MCIF_WB_BUF_3_ADDR_C_OFFSET 0x347b
+#define mmMCIF_WB2_MCIF_WB_BUF_3_ADDR_C_OFFSET_BASE_IDX 2
+#define mmMCIF_WB2_MCIF_WB_BUF_4_ADDR_Y 0x347c
+#define mmMCIF_WB2_MCIF_WB_BUF_4_ADDR_Y_BASE_IDX 2
+#define mmMCIF_WB2_MCIF_WB_BUF_4_ADDR_Y_OFFSET 0x347d
+#define mmMCIF_WB2_MCIF_WB_BUF_4_ADDR_Y_OFFSET_BASE_IDX 2
+#define mmMCIF_WB2_MCIF_WB_BUF_4_ADDR_C 0x347e
+#define mmMCIF_WB2_MCIF_WB_BUF_4_ADDR_C_BASE_IDX 2
+#define mmMCIF_WB2_MCIF_WB_BUF_4_ADDR_C_OFFSET 0x347f
+#define mmMCIF_WB2_MCIF_WB_BUF_4_ADDR_C_OFFSET_BASE_IDX 2
+#define mmMCIF_WB2_MCIF_WB_BUFMGR_VCE_CONTROL 0x3480
+#define mmMCIF_WB2_MCIF_WB_BUFMGR_VCE_CONTROL_BASE_IDX 2
+#define mmMCIF_WB2_MCIF_WB_NB_PSTATE_LATENCY_WATERMARK 0x3481
+#define mmMCIF_WB2_MCIF_WB_NB_PSTATE_LATENCY_WATERMARK_BASE_IDX 2
+#define mmMCIF_WB2_MCIF_WB_NB_PSTATE_CONTROL 0x3482
+#define mmMCIF_WB2_MCIF_WB_NB_PSTATE_CONTROL_BASE_IDX 2
+#define mmMCIF_WB2_MCIF_WB_WATERMARK 0x3483
+#define mmMCIF_WB2_MCIF_WB_WATERMARK_BASE_IDX 2
+#define mmMCIF_WB2_MCIF_WB_CLOCK_GATER_CONTROL 0x3484
+#define mmMCIF_WB2_MCIF_WB_CLOCK_GATER_CONTROL_BASE_IDX 2
+#define mmMCIF_WB2_MCIF_WB_WARM_UP_CNTL 0x3485
+#define mmMCIF_WB2_MCIF_WB_WARM_UP_CNTL_BASE_IDX 2
+#define mmMCIF_WB2_MCIF_WB_SELF_REFRESH_CONTROL 0x3486
+#define mmMCIF_WB2_MCIF_WB_SELF_REFRESH_CONTROL_BASE_IDX 2
+#define mmMCIF_WB2_MULTI_LEVEL_QOS_CTRL 0x3487
+#define mmMCIF_WB2_MULTI_LEVEL_QOS_CTRL_BASE_IDX 2
+#define mmMCIF_WB2_MCIF_WB_BUF_LUMA_SIZE 0x3489
+#define mmMCIF_WB2_MCIF_WB_BUF_LUMA_SIZE_BASE_IDX 2
+#define mmMCIF_WB2_MCIF_WB_BUF_CHROMA_SIZE 0x348a
+#define mmMCIF_WB2_MCIF_WB_BUF_CHROMA_SIZE_BASE_IDX 2
+#define mmMCIF_WB2_MCIF_WB_BUF_1_ADDR_Y_HIGH 0x348b
+#define mmMCIF_WB2_MCIF_WB_BUF_1_ADDR_Y_HIGH_BASE_IDX 2
+#define mmMCIF_WB2_MCIF_WB_BUF_1_ADDR_C_HIGH 0x348c
+#define mmMCIF_WB2_MCIF_WB_BUF_1_ADDR_C_HIGH_BASE_IDX 2
+#define mmMCIF_WB2_MCIF_WB_BUF_2_ADDR_Y_HIGH 0x348d
+#define mmMCIF_WB2_MCIF_WB_BUF_2_ADDR_Y_HIGH_BASE_IDX 2
+#define mmMCIF_WB2_MCIF_WB_BUF_2_ADDR_C_HIGH 0x348e
+#define mmMCIF_WB2_MCIF_WB_BUF_2_ADDR_C_HIGH_BASE_IDX 2
+#define mmMCIF_WB2_MCIF_WB_BUF_3_ADDR_Y_HIGH 0x348f
+#define mmMCIF_WB2_MCIF_WB_BUF_3_ADDR_Y_HIGH_BASE_IDX 2
+#define mmMCIF_WB2_MCIF_WB_BUF_3_ADDR_C_HIGH 0x3490
+#define mmMCIF_WB2_MCIF_WB_BUF_3_ADDR_C_HIGH_BASE_IDX 2
+#define mmMCIF_WB2_MCIF_WB_BUF_4_ADDR_Y_HIGH 0x3491
+#define mmMCIF_WB2_MCIF_WB_BUF_4_ADDR_Y_HIGH_BASE_IDX 2
+#define mmMCIF_WB2_MCIF_WB_BUF_4_ADDR_C_HIGH 0x3492
+#define mmMCIF_WB2_MCIF_WB_BUF_4_ADDR_C_HIGH_BASE_IDX 2
+#define mmMCIF_WB2_MCIF_WB_BUF_1_RESOLUTION 0x3493
+#define mmMCIF_WB2_MCIF_WB_BUF_1_RESOLUTION_BASE_IDX 2
+#define mmMCIF_WB2_MCIF_WB_BUF_2_RESOLUTION 0x3494
+#define mmMCIF_WB2_MCIF_WB_BUF_2_RESOLUTION_BASE_IDX 2
+#define mmMCIF_WB2_MCIF_WB_BUF_3_RESOLUTION 0x3495
+#define mmMCIF_WB2_MCIF_WB_BUF_3_RESOLUTION_BASE_IDX 2
+#define mmMCIF_WB2_MCIF_WB_BUF_4_RESOLUTION 0x3496
+#define mmMCIF_WB2_MCIF_WB_BUF_4_RESOLUTION_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dchvm_hvm_dispdec
+// base address: 0x0
+#define mmDCHVM_CTRL0 0x016b
+#define mmDCHVM_CTRL0_BASE_IDX 3
+#define mmDCHVM_CTRL1 0x016c
+#define mmDCHVM_CTRL1_BASE_IDX 3
+#define mmDCHVM_CLK_CTRL 0x016d
+#define mmDCHVM_CLK_CTRL_BASE_IDX 3
+#define mmDCHVM_MEM_CTRL 0x016e
+#define mmDCHVM_MEM_CTRL_BASE_IDX 3
+#define mmDCHVM_RIOMMU_CTRL0 0x016f
+#define mmDCHVM_RIOMMU_CTRL0_BASE_IDX 3
+#define mmDCHVM_RIOMMU_STAT0 0x0170
+#define mmDCHVM_RIOMMU_STAT0_BASE_IDX 3
+
+
+// addressBlock: vga_vgaseqind
+// base address: 0x0
+#define ixSEQ00 0x0000
+#define ixSEQ01 0x0001
+#define ixSEQ02 0x0002
+#define ixSEQ03 0x0003
+#define ixSEQ04 0x0004
+
+
+// addressBlock: vga_vgacrtind
+// base address: 0x0
+#define ixCRT00 0x0000
+#define ixCRT01 0x0001
+#define ixCRT02 0x0002
+#define ixCRT03 0x0003
+#define ixCRT04 0x0004
+#define ixCRT05 0x0005
+#define ixCRT06 0x0006
+#define ixCRT07 0x0007
+#define ixCRT08 0x0008
+#define ixCRT09 0x0009
+#define ixCRT0A 0x000a
+#define ixCRT0B 0x000b
+#define ixCRT0C 0x000c
+#define ixCRT0D 0x000d
+#define ixCRT0E 0x000e
+#define ixCRT0F 0x000f
+#define ixCRT10 0x0010
+#define ixCRT11 0x0011
+#define ixCRT12 0x0012
+#define ixCRT13 0x0013
+#define ixCRT14 0x0014
+#define ixCRT15 0x0015
+#define ixCRT16 0x0016
+#define ixCRT17 0x0017
+#define ixCRT18 0x0018
+#define ixCRT1E 0x001e
+#define ixCRT1F 0x001f
+#define ixCRT22 0x0022
+
+
+// addressBlock: vga_vgagrphind
+// base address: 0x0
+#define ixGRA00 0x0000
+#define ixGRA01 0x0001
+#define ixGRA02 0x0002
+#define ixGRA03 0x0003
+#define ixGRA04 0x0004
+#define ixGRA05 0x0005
+#define ixGRA06 0x0006
+#define ixGRA07 0x0007
+#define ixGRA08 0x0008
+
+
+// addressBlock: vga_vgaattrind
+// base address: 0x0
+#define ixATTR00 0x0000
+#define ixATTR01 0x0001
+#define ixATTR02 0x0002
+#define ixATTR03 0x0003
+#define ixATTR04 0x0004
+#define ixATTR05 0x0005
+#define ixATTR06 0x0006
+#define ixATTR07 0x0007
+#define ixATTR08 0x0008
+#define ixATTR09 0x0009
+#define ixATTR0A 0x000a
+#define ixATTR0B 0x000b
+#define ixATTR0C 0x000c
+#define ixATTR0D 0x000d
+#define ixATTR0E 0x000e
+#define ixATTR0F 0x000f
+#define ixATTR10 0x0010
+#define ixATTR11 0x0011
+#define ixATTR12 0x0012
+#define ixATTR13 0x0013
+#define ixATTR14 0x0014
+
+
+// addressBlock: azendpoint_f2codecind
+// base address: 0x0
+#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT 0x2200
+#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x2706
+#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x270d
+#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_2 0x270e
+#define ixAZALIA_F2_CODEC_CONVERTER_STRIPE_CONTROL 0x2724
+#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_3 0x273e
+#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_RAMP_RATE 0x2770
+#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING 0x2771
+#define ixAZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x2f09
+#define ixAZALIA_F2_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x2f0a
+#define ixAZALIA_F2_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS 0x2f0b
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONNECTION_LIST_ENTRY 0x3702
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_WIDGET_CONTROL 0x3707
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE 0x3708
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE 0x3709
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x371c
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2 0x371d
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3 0x371e
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4 0x371f
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION 0x3770
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_CHANNEL_ALLOCATION 0x3771
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_INFO 0x3772
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR 0x3776
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR_DATA 0x3776
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE 0x3777
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE 0x3778
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE 0x3779
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE 0x377a
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_LIPSYNC 0x377b
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_HBR 0x377c
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_AUDIO_SINK_INFO_INDEX 0x3780
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_AUDIO_SINK_INFO_DATA 0x3781
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE 0x3785
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE 0x3786
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE 0x3787
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE 0x3788
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL_MODE 0x3789
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_0 0x378a
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1 0x378b
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_2 0x378c
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_3 0x378d
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4 0x378e
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_5 0x378f
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_6 0x3790
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_7 0x3791
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_8 0x3792
+#define ixAZALIA_F2_CODEC_PIN_ASSOCIATION_INFO 0x3793
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS 0x3797
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x3798
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_LPIB 0x3799
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x379a
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_CODING_TYPE 0x379b
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_FORMAT_CHANGED 0x379c
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION 0x379d
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE 0x379e
+#define ixAZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x3f09
+#define ixAZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES 0x3f0c
+#define ixAZALIA_F2_CODEC_PIN_PARAMETER_CONNECTION_LIST_LENGTH 0x3f0e
+
+
+// addressBlock: azendpoint_descriptorind
+// base address: 0x0
+#define ixAUDIO_DESCRIPTOR0 0x0001
+#define ixAUDIO_DESCRIPTOR1 0x0002
+#define ixAUDIO_DESCRIPTOR2 0x0003
+#define ixAUDIO_DESCRIPTOR3 0x0004
+#define ixAUDIO_DESCRIPTOR4 0x0005
+#define ixAUDIO_DESCRIPTOR5 0x0006
+#define ixAUDIO_DESCRIPTOR6 0x0007
+#define ixAUDIO_DESCRIPTOR7 0x0008
+#define ixAUDIO_DESCRIPTOR8 0x0009
+#define ixAUDIO_DESCRIPTOR9 0x000a
+#define ixAUDIO_DESCRIPTOR10 0x000b
+#define ixAUDIO_DESCRIPTOR11 0x000c
+#define ixAUDIO_DESCRIPTOR12 0x000d
+#define ixAUDIO_DESCRIPTOR13 0x000e
+
+
+// addressBlock: azendpoint_sinkinfoind
+// base address: 0x0
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MANUFACTURER_ID 0x0000
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_PRODUCT_ID 0x0001
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_SINK_DESCRIPTION_LEN 0x0002
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_PORTID0 0x0003
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_PORTID1 0x0004
+#define ixSINK_DESCRIPTION0 0x0005
+#define ixSINK_DESCRIPTION1 0x0006
+#define ixSINK_DESCRIPTION2 0x0007
+#define ixSINK_DESCRIPTION3 0x0008
+#define ixSINK_DESCRIPTION4 0x0009
+#define ixSINK_DESCRIPTION5 0x000a
+#define ixSINK_DESCRIPTION6 0x000b
+#define ixSINK_DESCRIPTION7 0x000c
+#define ixSINK_DESCRIPTION8 0x000d
+#define ixSINK_DESCRIPTION9 0x000e
+#define ixSINK_DESCRIPTION10 0x000f
+#define ixSINK_DESCRIPTION11 0x0010
+#define ixSINK_DESCRIPTION12 0x0011
+#define ixSINK_DESCRIPTION13 0x0012
+#define ixSINK_DESCRIPTION14 0x0013
+#define ixSINK_DESCRIPTION15 0x0014
+#define ixSINK_DESCRIPTION16 0x0015
+#define ixSINK_DESCRIPTION17 0x0016
+
+
+// addressBlock: azf0controller_azinputcrc0resultind
+// base address: 0x0
+#define ixAZALIA_INPUT_CRC0_CHANNEL0 0x0000
+#define ixAZALIA_INPUT_CRC0_CHANNEL1 0x0001
+#define ixAZALIA_INPUT_CRC0_CHANNEL2 0x0002
+#define ixAZALIA_INPUT_CRC0_CHANNEL3 0x0003
+#define ixAZALIA_INPUT_CRC0_CHANNEL4 0x0004
+#define ixAZALIA_INPUT_CRC0_CHANNEL5 0x0005
+#define ixAZALIA_INPUT_CRC0_CHANNEL6 0x0006
+#define ixAZALIA_INPUT_CRC0_CHANNEL7 0x0007
+
+
+// addressBlock: azf0controller_azinputcrc1resultind
+// base address: 0x0
+#define ixAZALIA_INPUT_CRC1_CHANNEL0 0x0000
+#define ixAZALIA_INPUT_CRC1_CHANNEL1 0x0001
+#define ixAZALIA_INPUT_CRC1_CHANNEL2 0x0002
+#define ixAZALIA_INPUT_CRC1_CHANNEL3 0x0003
+#define ixAZALIA_INPUT_CRC1_CHANNEL4 0x0004
+#define ixAZALIA_INPUT_CRC1_CHANNEL5 0x0005
+#define ixAZALIA_INPUT_CRC1_CHANNEL6 0x0006
+#define ixAZALIA_INPUT_CRC1_CHANNEL7 0x0007
+
+
+// addressBlock: azf0controller_azcrc0resultind
+// base address: 0x0
+#define ixAZALIA_CRC0_CHANNEL0 0x0000
+#define ixAZALIA_CRC0_CHANNEL1 0x0001
+#define ixAZALIA_CRC0_CHANNEL2 0x0002
+#define ixAZALIA_CRC0_CHANNEL3 0x0003
+#define ixAZALIA_CRC0_CHANNEL4 0x0004
+#define ixAZALIA_CRC0_CHANNEL5 0x0005
+#define ixAZALIA_CRC0_CHANNEL6 0x0006
+#define ixAZALIA_CRC0_CHANNEL7 0x0007
+
+
+// addressBlock: azf0controller_azcrc1resultind
+// base address: 0x0
+#define ixAZALIA_CRC1_CHANNEL0 0x0000
+#define ixAZALIA_CRC1_CHANNEL1 0x0001
+#define ixAZALIA_CRC1_CHANNEL2 0x0002
+#define ixAZALIA_CRC1_CHANNEL3 0x0003
+#define ixAZALIA_CRC1_CHANNEL4 0x0004
+#define ixAZALIA_CRC1_CHANNEL5 0x0005
+#define ixAZALIA_CRC1_CHANNEL6 0x0006
+#define ixAZALIA_CRC1_CHANNEL7 0x0007
+
+
+// addressBlock: azinputendpoint_f2codecind
+// base address: 0x0
+#define ixAZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT 0x6200
+#define ixAZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x6706
+#define ixAZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x670d
+#define ixAZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x6f09
+#define ixAZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x6f0a
+#define ixAZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS 0x6f0b
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL 0x7707
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE 0x7708
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_PIN_SENSE 0x7709
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x771c
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2 0x771d
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3 0x771e
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4 0x771f
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION 0x7771
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL0_ENABLE 0x7777
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL2_ENABLE 0x7778
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL4_ENABLE 0x7779
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL6_ENABLE 0x777a
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_HBR 0x777c
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL1_ENABLE 0x7785
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL3_ENABLE 0x7786
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL5_ENABLE 0x7787
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL7_ENABLE 0x7788
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x7798
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB 0x7799
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x779a
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL 0x779b
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_INFOFRAME 0x779c
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_STATUS_L 0x779d
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_STATUS_H 0x779e
+#define ixAZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x7f09
+#define ixAZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES 0x7f0c
+
+
+// addressBlock: azroot_f2codecind
+// base address: 0x0
+#define ixAZALIA_F2_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID 0x0f00
+#define ixAZALIA_F2_CODEC_ROOT_PARAMETER_REVISION_ID 0x0f02
+#define ixAZALIA_F2_CODEC_ROOT_PARAMETER_SUBORDINATE_NODE_COUNT 0x0f04
+#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE 0x1705
+#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID 0x1720
+#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_2 0x1721
+#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_3 0x1722
+#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_4 0x1723
+#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION 0x1770
+#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_RESET 0x17ff
+#define ixAZALIA_F2_CODEC_FUNCTION_PARAMETER_SUBORDINATE_NODE_COUNT 0x1f04
+#define ixAZALIA_F2_CODEC_FUNCTION_PARAMETER_GROUP_TYPE 0x1f05
+#define ixAZALIA_F2_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES 0x1f0a
+#define ixAZALIA_F2_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS 0x1f0b
+#define ixAZALIA_F2_CODEC_FUNCTION_PARAMETER_POWER_STATES 0x1f0f
+
+
+// addressBlock: azf0stream0_streamind
+// base address: 0x0
+#define ixAZF0STREAM0_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM0_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM0_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM0_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM0_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0stream1_streamind
+// base address: 0x0
+#define ixAZF0STREAM1_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM1_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM1_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM1_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM1_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0stream2_streamind
+// base address: 0x0
+#define ixAZF0STREAM2_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM2_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM2_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM2_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM2_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0stream3_streamind
+// base address: 0x0
+#define ixAZF0STREAM3_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM3_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM3_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM3_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM3_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0stream4_streamind
+// base address: 0x0
+#define ixAZF0STREAM4_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM4_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM4_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM4_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM4_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0stream5_streamind
+// base address: 0x0
+#define ixAZF0STREAM5_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM5_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM5_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM5_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM5_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0stream6_streamind
+// base address: 0x0
+#define ixAZF0STREAM6_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM6_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM6_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM6_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM6_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0stream7_streamind
+// base address: 0x0
+#define ixAZF0STREAM7_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM7_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM7_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM7_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM7_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0stream8_streamind
+// base address: 0x0
+#define ixAZF0STREAM8_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM8_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM8_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM8_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM8_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0stream9_streamind
+// base address: 0x0
+#define ixAZF0STREAM9_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM9_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM9_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM9_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM9_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0stream10_streamind
+// base address: 0x0
+#define ixAZF0STREAM10_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM10_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM10_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM10_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM10_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0stream11_streamind
+// base address: 0x0
+#define ixAZF0STREAM11_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM11_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM11_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM11_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM11_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0stream12_streamind
+// base address: 0x0
+#define ixAZF0STREAM12_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM12_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM12_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM12_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM12_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0stream13_streamind
+// base address: 0x0
+#define ixAZF0STREAM13_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM13_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM13_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM13_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM13_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0stream14_streamind
+// base address: 0x0
+#define ixAZF0STREAM14_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM14_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM14_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM14_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM14_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0stream15_streamind
+// base address: 0x0
+#define ixAZF0STREAM15_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM15_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM15_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM15_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM15_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0endpoint0_endpointind
+// base address: 0x0
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL 0x0007
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE 0x0008
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING 0x0009
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA 0x000c
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN 0x000d
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX 0x000e
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE 0x0023
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER 0x0025
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 0x0028
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1 0x0029
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2 0x002a
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3 0x002b
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4 0x002c
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5 0x002d
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6 0x002e
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7 0x002f
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8 0x0030
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9 0x0031
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10 0x0032
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11 0x0033
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12 0x0034
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13 0x0035
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC 0x0037
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0 0x003a
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1 0x003b
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2 0x003c
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3 0x003d
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4 0x003e
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5 0x003f
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6 0x0040
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7 0x0041
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8 0x0042
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0057
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE 0x0058
+#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0 0x0059
+#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1 0x005a
+#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2 0x005b
+#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3 0x005c
+#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4 0x005d
+#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5 0x005e
+#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6 0x005f
+#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7 0x0060
+#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8 0x0061
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO 0x0062
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS 0x0063
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE 0x0067
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED 0x0068
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION 0x0069
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE 0x006a
+#define ixAZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLE_STATUS 0x006b
+#define ixAZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLED_INT_STATUS 0x006c
+#define ixAZF0ENDPOINT0_AZALIA_F0_AUDIO_DISABLED_INT_STATUS 0x006d
+#define ixAZF0ENDPOINT0_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS 0x006e
+
+
+// addressBlock: azf0endpoint1_endpointind
+// base address: 0x0
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL 0x0007
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE 0x0008
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING 0x0009
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA 0x000c
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN 0x000d
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX 0x000e
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE 0x0023
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER 0x0025
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 0x0028
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1 0x0029
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2 0x002a
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3 0x002b
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4 0x002c
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5 0x002d
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6 0x002e
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7 0x002f
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8 0x0030
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9 0x0031
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10 0x0032
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11 0x0033
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12 0x0034
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13 0x0035
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC 0x0037
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0 0x003a
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1 0x003b
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2 0x003c
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3 0x003d
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4 0x003e
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5 0x003f
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6 0x0040
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7 0x0041
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8 0x0042
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0057
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE 0x0058
+#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0 0x0059
+#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1 0x005a
+#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2 0x005b
+#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3 0x005c
+#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4 0x005d
+#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5 0x005e
+#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6 0x005f
+#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7 0x0060
+#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8 0x0061
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO 0x0062
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS 0x0063
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE 0x0067
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED 0x0068
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION 0x0069
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE 0x006a
+#define ixAZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLE_STATUS 0x006b
+#define ixAZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLED_INT_STATUS 0x006c
+#define ixAZF0ENDPOINT1_AZALIA_F0_AUDIO_DISABLED_INT_STATUS 0x006d
+#define ixAZF0ENDPOINT1_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS 0x006e
+
+
+// addressBlock: azf0endpoint2_endpointind
+// base address: 0x0
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL 0x0007
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE 0x0008
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING 0x0009
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA 0x000c
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN 0x000d
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX 0x000e
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE 0x0023
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER 0x0025
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 0x0028
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1 0x0029
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2 0x002a
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3 0x002b
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4 0x002c
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5 0x002d
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6 0x002e
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7 0x002f
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8 0x0030
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9 0x0031
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10 0x0032
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11 0x0033
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12 0x0034
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13 0x0035
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC 0x0037
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0 0x003a
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1 0x003b
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2 0x003c
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3 0x003d
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4 0x003e
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5 0x003f
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6 0x0040
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7 0x0041
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8 0x0042
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0057
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE 0x0058
+#define ixAZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0 0x0059
+#define ixAZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1 0x005a
+#define ixAZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2 0x005b
+#define ixAZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3 0x005c
+#define ixAZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4 0x005d
+#define ixAZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5 0x005e
+#define ixAZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6 0x005f
+#define ixAZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7 0x0060
+#define ixAZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8 0x0061
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO 0x0062
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS 0x0063
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE 0x0067
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED 0x0068
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION 0x0069
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE 0x006a
+#define ixAZF0ENDPOINT2_AZALIA_F0_AUDIO_ENABLE_STATUS 0x006b
+#define ixAZF0ENDPOINT2_AZALIA_F0_AUDIO_ENABLED_INT_STATUS 0x006c
+#define ixAZF0ENDPOINT2_AZALIA_F0_AUDIO_DISABLED_INT_STATUS 0x006d
+#define ixAZF0ENDPOINT2_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS 0x006e
+
+
+// addressBlock: azf0endpoint3_endpointind
+// base address: 0x0
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL 0x0007
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE 0x0008
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING 0x0009
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA 0x000c
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN 0x000d
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX 0x000e
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE 0x0023
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER 0x0025
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 0x0028
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1 0x0029
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2 0x002a
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3 0x002b
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4 0x002c
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5 0x002d
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6 0x002e
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7 0x002f
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8 0x0030
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9 0x0031
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10 0x0032
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11 0x0033
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12 0x0034
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13 0x0035
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC 0x0037
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0 0x003a
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1 0x003b
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2 0x003c
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3 0x003d
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4 0x003e
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5 0x003f
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6 0x0040
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7 0x0041
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8 0x0042
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0057
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE 0x0058
+#define ixAZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0 0x0059
+#define ixAZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1 0x005a
+#define ixAZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2 0x005b
+#define ixAZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3 0x005c
+#define ixAZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4 0x005d
+#define ixAZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5 0x005e
+#define ixAZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6 0x005f
+#define ixAZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7 0x0060
+#define ixAZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8 0x0061
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO 0x0062
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS 0x0063
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE 0x0067
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED 0x0068
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION 0x0069
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE 0x006a
+#define ixAZF0ENDPOINT3_AZALIA_F0_AUDIO_ENABLE_STATUS 0x006b
+#define ixAZF0ENDPOINT3_AZALIA_F0_AUDIO_ENABLED_INT_STATUS 0x006c
+#define ixAZF0ENDPOINT3_AZALIA_F0_AUDIO_DISABLED_INT_STATUS 0x006d
+#define ixAZF0ENDPOINT3_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS 0x006e
+
+
+// addressBlock: azf0endpoint4_endpointind
+// base address: 0x0
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL 0x0007
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE 0x0008
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING 0x0009
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA 0x000c
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN 0x000d
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX 0x000e
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE 0x0023
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER 0x0025
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 0x0028
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1 0x0029
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2 0x002a
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3 0x002b
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4 0x002c
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5 0x002d
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6 0x002e
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7 0x002f
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8 0x0030
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9 0x0031
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10 0x0032
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11 0x0033
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12 0x0034
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13 0x0035
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC 0x0037
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0 0x003a
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1 0x003b
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2 0x003c
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3 0x003d
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4 0x003e
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5 0x003f
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6 0x0040
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7 0x0041
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8 0x0042
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0057
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE 0x0058
+#define ixAZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0 0x0059
+#define ixAZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1 0x005a
+#define ixAZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2 0x005b
+#define ixAZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3 0x005c
+#define ixAZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4 0x005d
+#define ixAZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5 0x005e
+#define ixAZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6 0x005f
+#define ixAZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7 0x0060
+#define ixAZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8 0x0061
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO 0x0062
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS 0x0063
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE 0x0067
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED 0x0068
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION 0x0069
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE 0x006a
+#define ixAZF0ENDPOINT4_AZALIA_F0_AUDIO_ENABLE_STATUS 0x006b
+#define ixAZF0ENDPOINT4_AZALIA_F0_AUDIO_ENABLED_INT_STATUS 0x006c
+#define ixAZF0ENDPOINT4_AZALIA_F0_AUDIO_DISABLED_INT_STATUS 0x006d
+#define ixAZF0ENDPOINT4_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS 0x006e
+
+
+// addressBlock: azf0endpoint5_endpointind
+// base address: 0x0
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL 0x0007
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE 0x0008
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING 0x0009
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA 0x000c
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN 0x000d
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX 0x000e
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE 0x0023
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER 0x0025
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 0x0028
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1 0x0029
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2 0x002a
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3 0x002b
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4 0x002c
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5 0x002d
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6 0x002e
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7 0x002f
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8 0x0030
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9 0x0031
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10 0x0032
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11 0x0033
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12 0x0034
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13 0x0035
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC 0x0037
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0 0x003a
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1 0x003b
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2 0x003c
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3 0x003d
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4 0x003e
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5 0x003f
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6 0x0040
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7 0x0041
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8 0x0042
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0057
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE 0x0058
+#define ixAZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0 0x0059
+#define ixAZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1 0x005a
+#define ixAZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2 0x005b
+#define ixAZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3 0x005c
+#define ixAZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4 0x005d
+#define ixAZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5 0x005e
+#define ixAZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6 0x005f
+#define ixAZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7 0x0060
+#define ixAZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8 0x0061
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO 0x0062
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS 0x0063
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE 0x0067
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED 0x0068
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION 0x0069
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE 0x006a
+#define ixAZF0ENDPOINT5_AZALIA_F0_AUDIO_ENABLE_STATUS 0x006b
+#define ixAZF0ENDPOINT5_AZALIA_F0_AUDIO_ENABLED_INT_STATUS 0x006c
+#define ixAZF0ENDPOINT5_AZALIA_F0_AUDIO_DISABLED_INT_STATUS 0x006d
+#define ixAZF0ENDPOINT5_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS 0x006e
+
+
+// addressBlock: azf0endpoint6_endpointind
+// base address: 0x0
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL 0x0007
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE 0x0008
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING 0x0009
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA 0x000c
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN 0x000d
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX 0x000e
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE 0x0023
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER 0x0025
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 0x0028
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1 0x0029
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2 0x002a
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3 0x002b
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4 0x002c
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5 0x002d
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6 0x002e
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7 0x002f
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8 0x0030
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9 0x0031
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10 0x0032
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11 0x0033
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12 0x0034
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13 0x0035
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC 0x0037
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0 0x003a
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1 0x003b
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2 0x003c
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3 0x003d
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4 0x003e
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5 0x003f
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6 0x0040
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7 0x0041
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8 0x0042
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0057
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE 0x0058
+#define ixAZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0 0x0059
+#define ixAZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1 0x005a
+#define ixAZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2 0x005b
+#define ixAZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3 0x005c
+#define ixAZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4 0x005d
+#define ixAZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5 0x005e
+#define ixAZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6 0x005f
+#define ixAZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7 0x0060
+#define ixAZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8 0x0061
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO 0x0062
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS 0x0063
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE 0x0067
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED 0x0068
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION 0x0069
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE 0x006a
+#define ixAZF0ENDPOINT6_AZALIA_F0_AUDIO_ENABLE_STATUS 0x006b
+#define ixAZF0ENDPOINT6_AZALIA_F0_AUDIO_ENABLED_INT_STATUS 0x006c
+#define ixAZF0ENDPOINT6_AZALIA_F0_AUDIO_DISABLED_INT_STATUS 0x006d
+#define ixAZF0ENDPOINT6_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS 0x006e
+
+
+// addressBlock: azf0endpoint7_endpointind
+// base address: 0x0
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL 0x0007
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE 0x0008
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING 0x0009
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA 0x000c
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN 0x000d
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX 0x000e
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE 0x0023
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER 0x0025
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 0x0028
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1 0x0029
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2 0x002a
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3 0x002b
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4 0x002c
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5 0x002d
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6 0x002e
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7 0x002f
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8 0x0030
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9 0x0031
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10 0x0032
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11 0x0033
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12 0x0034
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13 0x0035
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC 0x0037
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0 0x003a
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1 0x003b
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2 0x003c
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3 0x003d
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4 0x003e
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5 0x003f
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6 0x0040
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7 0x0041
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8 0x0042
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0057
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE 0x0058
+#define ixAZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0 0x0059
+#define ixAZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1 0x005a
+#define ixAZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2 0x005b
+#define ixAZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3 0x005c
+#define ixAZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4 0x005d
+#define ixAZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5 0x005e
+#define ixAZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6 0x005f
+#define ixAZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7 0x0060
+#define ixAZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8 0x0061
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO 0x0062
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS 0x0063
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE 0x0067
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED 0x0068
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION 0x0069
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE 0x006a
+#define ixAZF0ENDPOINT7_AZALIA_F0_AUDIO_ENABLE_STATUS 0x006b
+#define ixAZF0ENDPOINT7_AZALIA_F0_AUDIO_ENABLED_INT_STATUS 0x006c
+#define ixAZF0ENDPOINT7_AZALIA_F0_AUDIO_DISABLED_INT_STATUS 0x006d
+#define ixAZF0ENDPOINT7_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS 0x006e
+
+
+// addressBlock: azf0inputendpoint0_inputendpointind
+// base address: 0x0
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE 0x0023
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0037
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION 0x0053
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL 0x0067
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME 0x0068
+
+
+// addressBlock: azf0inputendpoint1_inputendpointind
+// base address: 0x0
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE 0x0023
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0037
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION 0x0053
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL 0x0067
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME 0x0068
+
+
+// addressBlock: azf0inputendpoint2_inputendpointind
+// base address: 0x0
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE 0x0023
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0037
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION 0x0053
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL 0x0067
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME 0x0068
+
+
+// addressBlock: azf0inputendpoint3_inputendpointind
+// base address: 0x0
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE 0x0023
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0037
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION 0x0053
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL 0x0067
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME 0x0068
+
+
+// addressBlock: azf0inputendpoint4_inputendpointind
+// base address: 0x0
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE 0x0023
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0037
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION 0x0053
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL 0x0067
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME 0x0068
+
+
+// addressBlock: azf0inputendpoint5_inputendpointind
+// base address: 0x0
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE 0x0023
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0037
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION 0x0053
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL 0x0067
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME 0x0068
+
+
+// addressBlock: azf0inputendpoint6_inputendpointind
+// base address: 0x0
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE 0x0023
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0037
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION 0x0053
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL 0x0067
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME 0x0068
+
+
+// addressBlock: azf0inputendpoint7_inputendpointind
+// base address: 0x0
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE 0x0023
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0037
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION 0x0053
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL 0x0067
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME 0x0068
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_sh_mask.h
new file mode 100644
index 000000000000..2f780aefc722
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_sh_mask.h
@@ -0,0 +1,56646 @@
+/*
+ * Copyright (C) 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _dcn_2_1_0_SH_MASK_HEADER
+#define _dcn_2_1_0_SH_MASK_HEADER
+
+
+// addressBlock: dce_dc_mmhubbub_vga_dispdec[72..76]
+//VGA_MEM_WRITE_PAGE_ADDR
+#define VGA_MEM_WRITE_PAGE_ADDR__VGA_MEM_WRITE_PAGE0_ADDR__SHIFT 0x0
+#define VGA_MEM_WRITE_PAGE_ADDR__VGA_MEM_WRITE_PAGE1_ADDR__SHIFT 0x10
+#define VGA_MEM_WRITE_PAGE_ADDR__VGA_MEM_WRITE_PAGE0_ADDR_MASK 0x000003FFL
+#define VGA_MEM_WRITE_PAGE_ADDR__VGA_MEM_WRITE_PAGE1_ADDR_MASK 0x03FF0000L
+//VGA_MEM_READ_PAGE_ADDR
+#define VGA_MEM_READ_PAGE_ADDR__VGA_MEM_READ_PAGE0_ADDR__SHIFT 0x0
+#define VGA_MEM_READ_PAGE_ADDR__VGA_MEM_READ_PAGE1_ADDR__SHIFT 0x10
+#define VGA_MEM_READ_PAGE_ADDR__VGA_MEM_READ_PAGE0_ADDR_MASK 0x000003FFL
+#define VGA_MEM_READ_PAGE_ADDR__VGA_MEM_READ_PAGE1_ADDR_MASK 0x03FF0000L
+// addressBlock: dce_dc_mmhubbub_vga_dispdec
+//VGA_RENDER_CONTROL
+#define VGA_RENDER_CONTROL__VGA_BLINK_RATE__SHIFT 0x0
+#define VGA_RENDER_CONTROL__VGA_BLINK_MODE__SHIFT 0x5
+#define VGA_RENDER_CONTROL__VGA_CURSOR_BLINK_INVERT__SHIFT 0x7
+#define VGA_RENDER_CONTROL__VGA_EXTD_ADDR_COUNT_ENABLE__SHIFT 0x8
+#define VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL__SHIFT 0x10
+#define VGA_RENDER_CONTROL__VGA_LOCK_8DOT__SHIFT 0x18
+#define VGA_RENDER_CONTROL__VGAREG_LINECMP_COMPATIBILITY_SEL__SHIFT 0x19
+#define VGA_RENDER_CONTROL__VGA_BLINK_RATE_MASK 0x0000001FL
+#define VGA_RENDER_CONTROL__VGA_BLINK_MODE_MASK 0x00000060L
+#define VGA_RENDER_CONTROL__VGA_CURSOR_BLINK_INVERT_MASK 0x00000080L
+#define VGA_RENDER_CONTROL__VGA_EXTD_ADDR_COUNT_ENABLE_MASK 0x00000100L
+#define VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK 0x00030000L
+#define VGA_RENDER_CONTROL__VGA_LOCK_8DOT_MASK 0x01000000L
+#define VGA_RENDER_CONTROL__VGAREG_LINECMP_COMPATIBILITY_SEL_MASK 0x02000000L
+//VGA_SEQUENCER_RESET_CONTROL
+#define VGA_SEQUENCER_RESET_CONTROL__D1_BLANK_DISPLAY_WHEN_SEQUENCER_RESET__SHIFT 0x0
+#define VGA_SEQUENCER_RESET_CONTROL__D2_BLANK_DISPLAY_WHEN_SEQUENCER_RESET__SHIFT 0x1
+#define VGA_SEQUENCER_RESET_CONTROL__D3_BLANK_DISPLAY_WHEN_SEQUENCER_RESET__SHIFT 0x2
+#define VGA_SEQUENCER_RESET_CONTROL__D4_BLANK_DISPLAY_WHEN_SEQUENCER_RESET__SHIFT 0x3
+#define VGA_SEQUENCER_RESET_CONTROL__D5_BLANK_DISPLAY_WHEN_SEQUENCER_RESET__SHIFT 0x4
+#define VGA_SEQUENCER_RESET_CONTROL__D6_BLANK_DISPLAY_WHEN_SEQUENCER_RESET__SHIFT 0x5
+#define VGA_SEQUENCER_RESET_CONTROL__D1_DISABLE_SYNCS_AND_DE_WHEN_SEQUENCER_RESET__SHIFT 0x8
+#define VGA_SEQUENCER_RESET_CONTROL__D2_DISABLE_SYNCS_AND_DE_WHEN_SEQUENCER_RESET__SHIFT 0x9
+#define VGA_SEQUENCER_RESET_CONTROL__D3_DISABLE_SYNCS_AND_DE_WHEN_SEQUENCER_RESET__SHIFT 0xa
+#define VGA_SEQUENCER_RESET_CONTROL__D4_DISABLE_SYNCS_AND_DE_WHEN_SEQUENCER_RESET__SHIFT 0xb
+#define VGA_SEQUENCER_RESET_CONTROL__D5_DISABLE_SYNCS_AND_DE_WHEN_SEQUENCER_RESET__SHIFT 0xc
+#define VGA_SEQUENCER_RESET_CONTROL__D6_DISABLE_SYNCS_AND_DE_WHEN_SEQUENCER_RESET__SHIFT 0xd
+#define VGA_SEQUENCER_RESET_CONTROL__VGA_MODE_AUTO_TRIGGER_ENABLE__SHIFT 0x10
+#define VGA_SEQUENCER_RESET_CONTROL__VGA_MODE_AUTO_TRIGGER_REGISTER_SELECT__SHIFT 0x11
+#define VGA_SEQUENCER_RESET_CONTROL__VGA_MODE_AUTO_TRIGGER_INDEX_SELECT__SHIFT 0x12
+#define VGA_SEQUENCER_RESET_CONTROL__D1_BLANK_DISPLAY_WHEN_SEQUENCER_RESET_MASK 0x00000001L
+#define VGA_SEQUENCER_RESET_CONTROL__D2_BLANK_DISPLAY_WHEN_SEQUENCER_RESET_MASK 0x00000002L
+#define VGA_SEQUENCER_RESET_CONTROL__D3_BLANK_DISPLAY_WHEN_SEQUENCER_RESET_MASK 0x00000004L
+#define VGA_SEQUENCER_RESET_CONTROL__D4_BLANK_DISPLAY_WHEN_SEQUENCER_RESET_MASK 0x00000008L
+#define VGA_SEQUENCER_RESET_CONTROL__D5_BLANK_DISPLAY_WHEN_SEQUENCER_RESET_MASK 0x00000010L
+#define VGA_SEQUENCER_RESET_CONTROL__D6_BLANK_DISPLAY_WHEN_SEQUENCER_RESET_MASK 0x00000020L
+#define VGA_SEQUENCER_RESET_CONTROL__D1_DISABLE_SYNCS_AND_DE_WHEN_SEQUENCER_RESET_MASK 0x00000100L
+#define VGA_SEQUENCER_RESET_CONTROL__D2_DISABLE_SYNCS_AND_DE_WHEN_SEQUENCER_RESET_MASK 0x00000200L
+#define VGA_SEQUENCER_RESET_CONTROL__D3_DISABLE_SYNCS_AND_DE_WHEN_SEQUENCER_RESET_MASK 0x00000400L
+#define VGA_SEQUENCER_RESET_CONTROL__D4_DISABLE_SYNCS_AND_DE_WHEN_SEQUENCER_RESET_MASK 0x00000800L
+#define VGA_SEQUENCER_RESET_CONTROL__D5_DISABLE_SYNCS_AND_DE_WHEN_SEQUENCER_RESET_MASK 0x00001000L
+#define VGA_SEQUENCER_RESET_CONTROL__D6_DISABLE_SYNCS_AND_DE_WHEN_SEQUENCER_RESET_MASK 0x00002000L
+#define VGA_SEQUENCER_RESET_CONTROL__VGA_MODE_AUTO_TRIGGER_ENABLE_MASK 0x00010000L
+#define VGA_SEQUENCER_RESET_CONTROL__VGA_MODE_AUTO_TRIGGER_REGISTER_SELECT_MASK 0x00020000L
+#define VGA_SEQUENCER_RESET_CONTROL__VGA_MODE_AUTO_TRIGGER_INDEX_SELECT_MASK 0x00FC0000L
+//VGA_MODE_CONTROL
+#define VGA_MODE_CONTROL__VGA_ATI_LINEAR__SHIFT 0x0
+#define VGA_MODE_CONTROL__VGA_LUT_PALETTE_UPDATE_MODE__SHIFT 0x4
+#define VGA_MODE_CONTROL__VGA_128K_APERTURE_PAGING__SHIFT 0x8
+#define VGA_MODE_CONTROL__VGA_TEXT_132_COLUMNS_EN__SHIFT 0x10
+#define VGA_MODE_CONTROL__VGA_DEEP_SLEEP_FORCE_EXIT__SHIFT 0x18
+#define VGA_MODE_CONTROL__VGA_ATI_LINEAR_MASK 0x00000001L
+#define VGA_MODE_CONTROL__VGA_LUT_PALETTE_UPDATE_MODE_MASK 0x00000030L
+#define VGA_MODE_CONTROL__VGA_128K_APERTURE_PAGING_MASK 0x00000100L
+#define VGA_MODE_CONTROL__VGA_TEXT_132_COLUMNS_EN_MASK 0x00010000L
+#define VGA_MODE_CONTROL__VGA_DEEP_SLEEP_FORCE_EXIT_MASK 0x01000000L
+//VGA_SURFACE_PITCH_SELECT
+#define VGA_SURFACE_PITCH_SELECT__VGA_SURFACE_PITCH_SELECT__SHIFT 0x0
+#define VGA_SURFACE_PITCH_SELECT__VGA_SURFACE_HEIGHT_SELECT__SHIFT 0x8
+#define VGA_SURFACE_PITCH_SELECT__VGA_SURFACE_PITCH_SELECT_MASK 0x00000003L
+#define VGA_SURFACE_PITCH_SELECT__VGA_SURFACE_HEIGHT_SELECT_MASK 0x00000300L
+//VGA_MEMORY_BASE_ADDRESS
+#define VGA_MEMORY_BASE_ADDRESS__VGA_MEMORY_BASE_ADDRESS__SHIFT 0x0
+#define VGA_MEMORY_BASE_ADDRESS__VGA_MEMORY_BASE_ADDRESS_MASK 0xFFFFFFFFL
+//VGA_DISPBUF1_SURFACE_ADDR
+#define VGA_DISPBUF1_SURFACE_ADDR__VGA_DISPBUF1_SURFACE_ADDR__SHIFT 0x0
+#define VGA_DISPBUF1_SURFACE_ADDR__VGA_DISPBUF1_SURFACE_ADDR_MASK 0x01FFFFFFL
+//VGA_DISPBUF2_SURFACE_ADDR
+#define VGA_DISPBUF2_SURFACE_ADDR__VGA_DISPBUF2_SURFACE_ADDR__SHIFT 0x0
+#define VGA_DISPBUF2_SURFACE_ADDR__VGA_DISPBUF2_SURFACE_ADDR_MASK 0x01FFFFFFL
+//VGA_MEMORY_BASE_ADDRESS_HIGH
+#define VGA_MEMORY_BASE_ADDRESS_HIGH__VGA_MEMORY_BASE_ADDRESS_HIGH__SHIFT 0x0
+#define VGA_MEMORY_BASE_ADDRESS_HIGH__VGA_MEMORY_BASE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//VGA_HDP_CONTROL
+#define VGA_HDP_CONTROL__VGA_MEM_PAGE_SELECT_EN__SHIFT 0x0
+#define VGA_HDP_CONTROL__VGA_MEMORY_DISABLE__SHIFT 0x4
+#define VGA_HDP_CONTROL__VGA_RBBM_LOCK_DISABLE__SHIFT 0x8
+#define VGA_HDP_CONTROL__VGA_SOFT_RESET__SHIFT 0x10
+#define VGA_HDP_CONTROL__VGA_TEST_RESET_CONTROL__SHIFT 0x18
+#define VGA_HDP_CONTROL__VGA_MEM_PAGE_SELECT_EN_MASK 0x00000001L
+#define VGA_HDP_CONTROL__VGA_MEMORY_DISABLE_MASK 0x00000010L
+#define VGA_HDP_CONTROL__VGA_RBBM_LOCK_DISABLE_MASK 0x00000100L
+#define VGA_HDP_CONTROL__VGA_SOFT_RESET_MASK 0x00010000L
+#define VGA_HDP_CONTROL__VGA_TEST_RESET_CONTROL_MASK 0x01000000L
+//VGA_CACHE_CONTROL
+#define VGA_CACHE_CONTROL__VGA_WRITE_THROUGH_CACHE_DIS__SHIFT 0x0
+#define VGA_CACHE_CONTROL__VGA_READ_CACHE_DISABLE__SHIFT 0x8
+#define VGA_CACHE_CONTROL__VGA_READ_BUFFER_INVALIDATE__SHIFT 0x10
+#define VGA_CACHE_CONTROL__VGA_DCCIF_W256ONLY__SHIFT 0x14
+#define VGA_CACHE_CONTROL__VGA_DCCIF_WC_TIMEOUT__SHIFT 0x18
+#define VGA_CACHE_CONTROL__VGA_WRITE_THROUGH_CACHE_DIS_MASK 0x00000001L
+#define VGA_CACHE_CONTROL__VGA_READ_CACHE_DISABLE_MASK 0x00000100L
+#define VGA_CACHE_CONTROL__VGA_READ_BUFFER_INVALIDATE_MASK 0x00010000L
+#define VGA_CACHE_CONTROL__VGA_DCCIF_W256ONLY_MASK 0x00100000L
+#define VGA_CACHE_CONTROL__VGA_DCCIF_WC_TIMEOUT_MASK 0x3F000000L
+//D1VGA_CONTROL
+#define D1VGA_CONTROL__D1VGA_MODE_ENABLE__SHIFT 0x0
+#define D1VGA_CONTROL__D1VGA_TIMING_SELECT__SHIFT 0x8
+#define D1VGA_CONTROL__D1VGA_SYNC_POLARITY_SELECT__SHIFT 0x9
+#define D1VGA_CONTROL__D1VGA_OVERSCAN_COLOR_EN__SHIFT 0x10
+#define D1VGA_CONTROL__D1VGA_ROTATE__SHIFT 0x18
+#define D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK 0x00000001L
+#define D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK 0x00000100L
+#define D1VGA_CONTROL__D1VGA_SYNC_POLARITY_SELECT_MASK 0x00000200L
+#define D1VGA_CONTROL__D1VGA_OVERSCAN_COLOR_EN_MASK 0x00010000L
+#define D1VGA_CONTROL__D1VGA_ROTATE_MASK 0x03000000L
+//D2VGA_CONTROL
+#define D2VGA_CONTROL__D2VGA_MODE_ENABLE__SHIFT 0x0
+#define D2VGA_CONTROL__D2VGA_TIMING_SELECT__SHIFT 0x8
+#define D2VGA_CONTROL__D2VGA_SYNC_POLARITY_SELECT__SHIFT 0x9
+#define D2VGA_CONTROL__D2VGA_OVERSCAN_COLOR_EN__SHIFT 0x10
+#define D2VGA_CONTROL__D2VGA_ROTATE__SHIFT 0x18
+#define D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK 0x00000001L
+#define D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK 0x00000100L
+#define D2VGA_CONTROL__D2VGA_SYNC_POLARITY_SELECT_MASK 0x00000200L
+#define D2VGA_CONTROL__D2VGA_OVERSCAN_COLOR_EN_MASK 0x00010000L
+#define D2VGA_CONTROL__D2VGA_ROTATE_MASK 0x03000000L
+//VGA_STATUS
+#define VGA_STATUS__VGA_MEM_ACCESS_STATUS__SHIFT 0x0
+#define VGA_STATUS__VGA_REG_ACCESS_STATUS__SHIFT 0x1
+#define VGA_STATUS__VGA_DISPLAY_SWITCH_STATUS__SHIFT 0x2
+#define VGA_STATUS__VGA_MODE_AUTO_TRIGGER_STATUS__SHIFT 0x3
+#define VGA_STATUS__VGA_MEM_ACCESS_STATUS_MASK 0x00000001L
+#define VGA_STATUS__VGA_REG_ACCESS_STATUS_MASK 0x00000002L
+#define VGA_STATUS__VGA_DISPLAY_SWITCH_STATUS_MASK 0x00000004L
+#define VGA_STATUS__VGA_MODE_AUTO_TRIGGER_STATUS_MASK 0x00000008L
+//VGA_INTERRUPT_CONTROL
+#define VGA_INTERRUPT_CONTROL__VGA_MEM_ACCESS_INT_MASK__SHIFT 0x0
+#define VGA_INTERRUPT_CONTROL__VGA_REG_ACCESS_INT_MASK__SHIFT 0x8
+#define VGA_INTERRUPT_CONTROL__VGA_DISPLAY_SWITCH_INT_MASK__SHIFT 0x10
+#define VGA_INTERRUPT_CONTROL__VGA_MODE_AUTO_TRIGGER_INT_MASK__SHIFT 0x18
+#define VGA_INTERRUPT_CONTROL__VGA_MEM_ACCESS_INT_MASK_MASK 0x00000001L
+#define VGA_INTERRUPT_CONTROL__VGA_REG_ACCESS_INT_MASK_MASK 0x00000100L
+#define VGA_INTERRUPT_CONTROL__VGA_DISPLAY_SWITCH_INT_MASK_MASK 0x00010000L
+#define VGA_INTERRUPT_CONTROL__VGA_MODE_AUTO_TRIGGER_INT_MASK_MASK 0x01000000L
+//VGA_STATUS_CLEAR
+#define VGA_STATUS_CLEAR__VGA_MEM_ACCESS_INT_CLEAR__SHIFT 0x0
+#define VGA_STATUS_CLEAR__VGA_REG_ACCESS_INT_CLEAR__SHIFT 0x8
+#define VGA_STATUS_CLEAR__VGA_DISPLAY_SWITCH_INT_CLEAR__SHIFT 0x10
+#define VGA_STATUS_CLEAR__VGA_MODE_AUTO_TRIGGER_INT_CLEAR__SHIFT 0x18
+#define VGA_STATUS_CLEAR__VGA_MEM_ACCESS_INT_CLEAR_MASK 0x00000001L
+#define VGA_STATUS_CLEAR__VGA_REG_ACCESS_INT_CLEAR_MASK 0x00000100L
+#define VGA_STATUS_CLEAR__VGA_DISPLAY_SWITCH_INT_CLEAR_MASK 0x00010000L
+#define VGA_STATUS_CLEAR__VGA_MODE_AUTO_TRIGGER_INT_CLEAR_MASK 0x01000000L
+//VGA_INTERRUPT_STATUS
+#define VGA_INTERRUPT_STATUS__VGA_MEM_ACCESS_INT_STATUS__SHIFT 0x0
+#define VGA_INTERRUPT_STATUS__VGA_REG_ACCESS_INT_STATUS__SHIFT 0x1
+#define VGA_INTERRUPT_STATUS__VGA_DISPLAY_SWITCH_INT_STATUS__SHIFT 0x2
+#define VGA_INTERRUPT_STATUS__VGA_MODE_AUTO_TRIGGER_INT_STATUS__SHIFT 0x3
+#define VGA_INTERRUPT_STATUS__VGA_MEM_ACCESS_INT_STATUS_MASK 0x00000001L
+#define VGA_INTERRUPT_STATUS__VGA_REG_ACCESS_INT_STATUS_MASK 0x00000002L
+#define VGA_INTERRUPT_STATUS__VGA_DISPLAY_SWITCH_INT_STATUS_MASK 0x00000004L
+#define VGA_INTERRUPT_STATUS__VGA_MODE_AUTO_TRIGGER_INT_STATUS_MASK 0x00000008L
+//VGA_MAIN_CONTROL
+#define VGA_MAIN_CONTROL__VGA_CRTC_TIMEOUT__SHIFT 0x0
+#define VGA_MAIN_CONTROL__VGA_RENDER_TIMEOUT_COUNT__SHIFT 0x3
+#define VGA_MAIN_CONTROL__VGA_VIRTUAL_VERTICAL_RETRACE_DURATION__SHIFT 0x5
+#define VGA_MAIN_CONTROL__VGA_READBACK_VGA_VSTATUS_SOURCE_SELECT__SHIFT 0x8
+#define VGA_MAIN_CONTROL__VGA_MC_WRITE_CLEAN_WAIT_DELAY__SHIFT 0xc
+#define VGA_MAIN_CONTROL__VGA_READBACK_NO_DISPLAY_SOURCE_SELECT__SHIFT 0x10
+#define VGA_MAIN_CONTROL__VGA_READBACK_CRT_INTR_SOURCE_SELECT__SHIFT 0x18
+#define VGA_MAIN_CONTROL__VGA_READBACK_SENSE_SWITCH_SELECT__SHIFT 0x1a
+#define VGA_MAIN_CONTROL__VGA_EXTERNAL_DAC_SENSE__SHIFT 0x1d
+#define VGA_MAIN_CONTROL__VGA_MAIN_TEST_VSTATUS_NO_DISPLAY_CRTC_TIMEOUT__SHIFT 0x1f
+#define VGA_MAIN_CONTROL__VGA_CRTC_TIMEOUT_MASK 0x00000003L
+#define VGA_MAIN_CONTROL__VGA_RENDER_TIMEOUT_COUNT_MASK 0x00000018L
+#define VGA_MAIN_CONTROL__VGA_VIRTUAL_VERTICAL_RETRACE_DURATION_MASK 0x000000E0L
+#define VGA_MAIN_CONTROL__VGA_READBACK_VGA_VSTATUS_SOURCE_SELECT_MASK 0x00000300L
+#define VGA_MAIN_CONTROL__VGA_MC_WRITE_CLEAN_WAIT_DELAY_MASK 0x0000F000L
+#define VGA_MAIN_CONTROL__VGA_READBACK_NO_DISPLAY_SOURCE_SELECT_MASK 0x00030000L
+#define VGA_MAIN_CONTROL__VGA_READBACK_CRT_INTR_SOURCE_SELECT_MASK 0x03000000L
+#define VGA_MAIN_CONTROL__VGA_READBACK_SENSE_SWITCH_SELECT_MASK 0x04000000L
+#define VGA_MAIN_CONTROL__VGA_EXTERNAL_DAC_SENSE_MASK 0x20000000L
+#define VGA_MAIN_CONTROL__VGA_MAIN_TEST_VSTATUS_NO_DISPLAY_CRTC_TIMEOUT_MASK 0x80000000L
+//VGA_TEST_CONTROL
+#define VGA_TEST_CONTROL__VGA_TEST_ENABLE__SHIFT 0x0
+#define VGA_TEST_CONTROL__VGA_TEST_RENDER_START__SHIFT 0x8
+#define VGA_TEST_CONTROL__VGA_TEST_RENDER_DONE__SHIFT 0x10
+#define VGA_TEST_CONTROL__VGA_TEST_RENDER_DISPBUF_SELECT__SHIFT 0x18
+#define VGA_TEST_CONTROL__VGA_TEST_ENABLE_MASK 0x00000001L
+#define VGA_TEST_CONTROL__VGA_TEST_RENDER_START_MASK 0x00000100L
+#define VGA_TEST_CONTROL__VGA_TEST_RENDER_DONE_MASK 0x00010000L
+#define VGA_TEST_CONTROL__VGA_TEST_RENDER_DISPBUF_SELECT_MASK 0x01000000L
+//VGA_QOS_CTRL
+#define VGA_QOS_CTRL__VGA_READ_QOS__SHIFT 0x0
+#define VGA_QOS_CTRL__VGA_WRITE_QOS__SHIFT 0x4
+#define VGA_QOS_CTRL__VGA_READ_QOS_MASK 0x0000000FL
+#define VGA_QOS_CTRL__VGA_WRITE_QOS_MASK 0x000000F0L
+//D3VGA_CONTROL
+#define D3VGA_CONTROL__D3VGA_MODE_ENABLE__SHIFT 0x0
+#define D3VGA_CONTROL__D3VGA_TIMING_SELECT__SHIFT 0x8
+#define D3VGA_CONTROL__D3VGA_SYNC_POLARITY_SELECT__SHIFT 0x9
+#define D3VGA_CONTROL__D3VGA_OVERSCAN_COLOR_EN__SHIFT 0x10
+#define D3VGA_CONTROL__D3VGA_ROTATE__SHIFT 0x18
+#define D3VGA_CONTROL__D3VGA_MODE_ENABLE_MASK 0x00000001L
+#define D3VGA_CONTROL__D3VGA_TIMING_SELECT_MASK 0x00000100L
+#define D3VGA_CONTROL__D3VGA_SYNC_POLARITY_SELECT_MASK 0x00000200L
+#define D3VGA_CONTROL__D3VGA_OVERSCAN_COLOR_EN_MASK 0x00010000L
+#define D3VGA_CONTROL__D3VGA_ROTATE_MASK 0x03000000L
+//D4VGA_CONTROL
+#define D4VGA_CONTROL__D4VGA_MODE_ENABLE__SHIFT 0x0
+#define D4VGA_CONTROL__D4VGA_TIMING_SELECT__SHIFT 0x8
+#define D4VGA_CONTROL__D4VGA_SYNC_POLARITY_SELECT__SHIFT 0x9
+#define D4VGA_CONTROL__D4VGA_OVERSCAN_COLOR_EN__SHIFT 0x10
+#define D4VGA_CONTROL__D4VGA_ROTATE__SHIFT 0x18
+#define D4VGA_CONTROL__D4VGA_MODE_ENABLE_MASK 0x00000001L
+#define D4VGA_CONTROL__D4VGA_TIMING_SELECT_MASK 0x00000100L
+#define D4VGA_CONTROL__D4VGA_SYNC_POLARITY_SELECT_MASK 0x00000200L
+#define D4VGA_CONTROL__D4VGA_OVERSCAN_COLOR_EN_MASK 0x00010000L
+#define D4VGA_CONTROL__D4VGA_ROTATE_MASK 0x03000000L
+//D5VGA_CONTROL
+#define D5VGA_CONTROL__D5VGA_MODE_ENABLE__SHIFT 0x0
+#define D5VGA_CONTROL__D5VGA_TIMING_SELECT__SHIFT 0x8
+#define D5VGA_CONTROL__D5VGA_SYNC_POLARITY_SELECT__SHIFT 0x9
+#define D5VGA_CONTROL__D5VGA_OVERSCAN_COLOR_EN__SHIFT 0x10
+#define D5VGA_CONTROL__D5VGA_ROTATE__SHIFT 0x18
+#define D5VGA_CONTROL__D5VGA_MODE_ENABLE_MASK 0x00000001L
+#define D5VGA_CONTROL__D5VGA_TIMING_SELECT_MASK 0x00000100L
+#define D5VGA_CONTROL__D5VGA_SYNC_POLARITY_SELECT_MASK 0x00000200L
+#define D5VGA_CONTROL__D5VGA_OVERSCAN_COLOR_EN_MASK 0x00010000L
+#define D5VGA_CONTROL__D5VGA_ROTATE_MASK 0x03000000L
+//D6VGA_CONTROL
+#define D6VGA_CONTROL__D6VGA_MODE_ENABLE__SHIFT 0x0
+#define D6VGA_CONTROL__D6VGA_TIMING_SELECT__SHIFT 0x8
+#define D6VGA_CONTROL__D6VGA_SYNC_POLARITY_SELECT__SHIFT 0x9
+#define D6VGA_CONTROL__D6VGA_OVERSCAN_COLOR_EN__SHIFT 0x10
+#define D6VGA_CONTROL__D6VGA_ROTATE__SHIFT 0x18
+#define D6VGA_CONTROL__D6VGA_MODE_ENABLE_MASK 0x00000001L
+#define D6VGA_CONTROL__D6VGA_TIMING_SELECT_MASK 0x00000100L
+#define D6VGA_CONTROL__D6VGA_SYNC_POLARITY_SELECT_MASK 0x00000200L
+#define D6VGA_CONTROL__D6VGA_OVERSCAN_COLOR_EN_MASK 0x00010000L
+#define D6VGA_CONTROL__D6VGA_ROTATE_MASK 0x03000000L
+//VGA_SOURCE_SELECT
+#define VGA_SOURCE_SELECT__VGA_SOURCE_SEL_A__SHIFT 0x0
+#define VGA_SOURCE_SELECT__VGA_SOURCE_SEL_B__SHIFT 0x8
+#define VGA_SOURCE_SELECT__VGA_SOURCE_SEL_A_MASK 0x00000007L
+#define VGA_SOURCE_SELECT__VGA_SOURCE_SEL_B_MASK 0x00000700L
+
+
+// addressBlock: dce_dc_dccg_dccg_dispdec
+//PHYPLLA_PIXCLK_RESYNC_CNTL
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_PIXCLK_RESYNC_ENABLE__SHIFT 0x0
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_PIXCLK_ENABLE__SHIFT 0x8
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_PIXCLK_DOUBLE_RATE_ENABLE__SHIFT 0x9
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_PIXCLK_RESYNC_ENABLE_MASK 0x00000001L
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_DCCG_DEEP_COLOR_CNTL_MASK 0x00000030L
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_PIXCLK_ENABLE_MASK 0x00000100L
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_PIXCLK_DOUBLE_RATE_ENABLE_MASK 0x00000200L
+//PHYPLLB_PIXCLK_RESYNC_CNTL
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_PIXCLK_RESYNC_ENABLE__SHIFT 0x0
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_PIXCLK_ENABLE__SHIFT 0x8
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_PIXCLK_DOUBLE_RATE_ENABLE__SHIFT 0x9
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_PIXCLK_RESYNC_ENABLE_MASK 0x00000001L
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_DCCG_DEEP_COLOR_CNTL_MASK 0x00000030L
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_PIXCLK_ENABLE_MASK 0x00000100L
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_PIXCLK_DOUBLE_RATE_ENABLE_MASK 0x00000200L
+//PHYPLLC_PIXCLK_RESYNC_CNTL
+#define PHYPLLC_PIXCLK_RESYNC_CNTL__PHYPLLC_PIXCLK_RESYNC_ENABLE__SHIFT 0x0
+#define PHYPLLC_PIXCLK_RESYNC_CNTL__PHYPLLC_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4
+#define PHYPLLC_PIXCLK_RESYNC_CNTL__PHYPLLC_PIXCLK_ENABLE__SHIFT 0x8
+#define PHYPLLC_PIXCLK_RESYNC_CNTL__PHYPLLC_PIXCLK_DOUBLE_RATE_ENABLE__SHIFT 0x9
+#define PHYPLLC_PIXCLK_RESYNC_CNTL__PHYPLLC_PIXCLK_RESYNC_ENABLE_MASK 0x00000001L
+#define PHYPLLC_PIXCLK_RESYNC_CNTL__PHYPLLC_DCCG_DEEP_COLOR_CNTL_MASK 0x00000030L
+#define PHYPLLC_PIXCLK_RESYNC_CNTL__PHYPLLC_PIXCLK_ENABLE_MASK 0x00000100L
+#define PHYPLLC_PIXCLK_RESYNC_CNTL__PHYPLLC_PIXCLK_DOUBLE_RATE_ENABLE_MASK 0x00000200L
+//PHYPLLD_PIXCLK_RESYNC_CNTL
+#define PHYPLLD_PIXCLK_RESYNC_CNTL__PHYPLLD_PIXCLK_RESYNC_ENABLE__SHIFT 0x0
+#define PHYPLLD_PIXCLK_RESYNC_CNTL__PHYPLLD_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4
+#define PHYPLLD_PIXCLK_RESYNC_CNTL__PHYPLLD_PIXCLK_ENABLE__SHIFT 0x8
+#define PHYPLLD_PIXCLK_RESYNC_CNTL__PHYPLLD_PIXCLK_DOUBLE_RATE_ENABLE__SHIFT 0x9
+#define PHYPLLD_PIXCLK_RESYNC_CNTL__PHYPLLD_PIXCLK_RESYNC_ENABLE_MASK 0x00000001L
+#define PHYPLLD_PIXCLK_RESYNC_CNTL__PHYPLLD_DCCG_DEEP_COLOR_CNTL_MASK 0x00000030L
+#define PHYPLLD_PIXCLK_RESYNC_CNTL__PHYPLLD_PIXCLK_ENABLE_MASK 0x00000100L
+#define PHYPLLD_PIXCLK_RESYNC_CNTL__PHYPLLD_PIXCLK_DOUBLE_RATE_ENABLE_MASK 0x00000200L
+//DP_DTO_DBUF_EN
+#define DP_DTO_DBUF_EN__DP_DTO0_DBUF_EN__SHIFT 0x0
+#define DP_DTO_DBUF_EN__DP_DTO1_DBUF_EN__SHIFT 0x1
+#define DP_DTO_DBUF_EN__DP_DTO2_DBUF_EN__SHIFT 0x2
+#define DP_DTO_DBUF_EN__DP_DTO3_DBUF_EN__SHIFT 0x3
+#define DP_DTO_DBUF_EN__DP_DTO4_DBUF_EN__SHIFT 0x4
+#define DP_DTO_DBUF_EN__DP_DTO5_DBUF_EN__SHIFT 0x5
+#define DP_DTO_DBUF_EN__DP_DTO6_DBUF_EN__SHIFT 0x6
+#define DP_DTO_DBUF_EN__DP_DTO7_DBUF_EN__SHIFT 0x7
+#define DP_DTO_DBUF_EN__DP_DTO0_DBUF_EN_MASK 0x00000001L
+#define DP_DTO_DBUF_EN__DP_DTO1_DBUF_EN_MASK 0x00000002L
+#define DP_DTO_DBUF_EN__DP_DTO2_DBUF_EN_MASK 0x00000004L
+#define DP_DTO_DBUF_EN__DP_DTO3_DBUF_EN_MASK 0x00000008L
+#define DP_DTO_DBUF_EN__DP_DTO4_DBUF_EN_MASK 0x00000010L
+#define DP_DTO_DBUF_EN__DP_DTO5_DBUF_EN_MASK 0x00000020L
+#define DP_DTO_DBUF_EN__DP_DTO6_DBUF_EN_MASK 0x00000040L
+#define DP_DTO_DBUF_EN__DP_DTO7_DBUF_EN_MASK 0x00000080L
+//DPREFCLK_CGTT_BLK_CTRL_REG
+#define DPREFCLK_CGTT_BLK_CTRL_REG__DPREFCLK_TURN_ON_DELAY__SHIFT 0x0
+#define DPREFCLK_CGTT_BLK_CTRL_REG__DPREFCLK_TURN_OFF_DELAY__SHIFT 0x4
+#define DPREFCLK_CGTT_BLK_CTRL_REG__DPREFCLK_TURN_ON_DELAY_MASK 0x0000000FL
+#define DPREFCLK_CGTT_BLK_CTRL_REG__DPREFCLK_TURN_OFF_DELAY_MASK 0x00000FF0L
+//REFCLK_CNTL
+#define REFCLK_CNTL__REFCLK_CLOCK_EN__SHIFT 0x0
+#define REFCLK_CNTL__REFCLK_SRC_SEL__SHIFT 0x1
+#define REFCLK_CNTL__REFCLK_CLOCK_EN_MASK 0x00000001L
+#define REFCLK_CNTL__REFCLK_SRC_SEL_MASK 0x00000002L
+//REFCLK_CGTT_BLK_CTRL_REG
+#define REFCLK_CGTT_BLK_CTRL_REG__REFCLK_TURN_ON_DELAY__SHIFT 0x0
+#define REFCLK_CGTT_BLK_CTRL_REG__REFCLK_TURN_OFF_DELAY__SHIFT 0x4
+#define REFCLK_CGTT_BLK_CTRL_REG__REFCLK_TURN_ON_DELAY_MASK 0x0000000FL
+#define REFCLK_CGTT_BLK_CTRL_REG__REFCLK_TURN_OFF_DELAY_MASK 0x00000FF0L
+//PHYPLLE_PIXCLK_RESYNC_CNTL
+#define PHYPLLE_PIXCLK_RESYNC_CNTL__PHYPLLE_PIXCLK_RESYNC_ENABLE__SHIFT 0x0
+#define PHYPLLE_PIXCLK_RESYNC_CNTL__PHYPLLE_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4
+#define PHYPLLE_PIXCLK_RESYNC_CNTL__PHYPLLE_PIXCLK_ENABLE__SHIFT 0x8
+#define PHYPLLE_PIXCLK_RESYNC_CNTL__PHYPLLE_PIXCLK_DOUBLE_RATE_ENABLE__SHIFT 0x9
+#define PHYPLLE_PIXCLK_RESYNC_CNTL__PHYPLLE_PIXCLK_RESYNC_ENABLE_MASK 0x00000001L
+#define PHYPLLE_PIXCLK_RESYNC_CNTL__PHYPLLE_DCCG_DEEP_COLOR_CNTL_MASK 0x00000030L
+#define PHYPLLE_PIXCLK_RESYNC_CNTL__PHYPLLE_PIXCLK_ENABLE_MASK 0x00000100L
+#define PHYPLLE_PIXCLK_RESYNC_CNTL__PHYPLLE_PIXCLK_DOUBLE_RATE_ENABLE_MASK 0x00000200L
+//DCCG_PERFMON_CNTL2
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_DSICLK_ENABLE__SHIFT 0x0
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_REFCLK_ENABLE__SHIFT 0x1
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_PIXCLK1_ENABLE__SHIFT 0x2
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_PIXCLK2_ENABLE__SHIFT 0x3
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_UNIPHYC_PIXCLK_ENABLE__SHIFT 0x4
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_UNIPHYD_PIXCLK_ENABLE__SHIFT 0x5
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_UNIPHYE_PIXCLK_ENABLE__SHIFT 0x6
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_UNIPHYF_PIXCLK_ENABLE__SHIFT 0x7
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_UNIPHYG_PIXCLK_ENABLE__SHIFT 0x8
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_DSICLK_ENABLE_MASK 0x00000001L
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_REFCLK_ENABLE_MASK 0x00000002L
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_PIXCLK1_ENABLE_MASK 0x00000004L
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_PIXCLK2_ENABLE_MASK 0x00000008L
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_UNIPHYC_PIXCLK_ENABLE_MASK 0x00000010L
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_UNIPHYD_PIXCLK_ENABLE_MASK 0x00000020L
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_UNIPHYE_PIXCLK_ENABLE_MASK 0x00000040L
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_UNIPHYF_PIXCLK_ENABLE_MASK 0x00000080L
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_UNIPHYG_PIXCLK_ENABLE_MASK 0x00000100L
+//DCCG_DS_DTO_INCR
+#define DCCG_DS_DTO_INCR__DCCG_DS_DTO_INCR__SHIFT 0x0
+#define DCCG_DS_DTO_INCR__DCCG_DS_DTO_INCR_MASK 0xFFFFFFFFL
+//DCCG_DS_DTO_MODULO
+#define DCCG_DS_DTO_MODULO__DCCG_DS_DTO_MODULO__SHIFT 0x0
+#define DCCG_DS_DTO_MODULO__DCCG_DS_DTO_MODULO_MASK 0xFFFFFFFFL
+//DCCG_DS_CNTL
+#define DCCG_DS_CNTL__DCCG_DS_ENABLE__SHIFT 0x0
+#define DCCG_DS_CNTL__DCCG_DS_REF_SRC__SHIFT 0x4
+#define DCCG_DS_CNTL__DCCG_DS_HW_CAL_ENABLE__SHIFT 0x8
+#define DCCG_DS_CNTL__DCCG_DS_ENABLED_STATUS__SHIFT 0x9
+#define DCCG_DS_CNTL__DCCG_DS_XTALIN_RATE_DIV__SHIFT 0x10
+#define DCCG_DS_CNTL__DCCG_DS_JITTER_REMOVE_DIS__SHIFT 0x18
+#define DCCG_DS_CNTL__DCCG_DS_DELAY_XTAL_SEL__SHIFT 0x19
+#define DCCG_DS_CNTL__DCCG_DS_ENABLE_MASK 0x00000001L
+#define DCCG_DS_CNTL__DCCG_DS_REF_SRC_MASK 0x00000030L
+#define DCCG_DS_CNTL__DCCG_DS_HW_CAL_ENABLE_MASK 0x00000100L
+#define DCCG_DS_CNTL__DCCG_DS_ENABLED_STATUS_MASK 0x00000200L
+#define DCCG_DS_CNTL__DCCG_DS_XTALIN_RATE_DIV_MASK 0x00030000L
+#define DCCG_DS_CNTL__DCCG_DS_JITTER_REMOVE_DIS_MASK 0x01000000L
+#define DCCG_DS_CNTL__DCCG_DS_DELAY_XTAL_SEL_MASK 0x02000000L
+//DCCG_DS_HW_CAL_INTERVAL
+#define DCCG_DS_HW_CAL_INTERVAL__DCCG_DS_HW_CAL_INTERVAL__SHIFT 0x0
+#define DCCG_DS_HW_CAL_INTERVAL__DCCG_DS_HW_CAL_INTERVAL_MASK 0xFFFFFFFFL
+//DPREFCLK_CNTL
+#define DPREFCLK_CNTL__DPREFCLK_SRC_SEL__SHIFT 0x0
+#define DPREFCLK_CNTL__DPREFCLK_SRC_SEL_MASK 0x00000007L
+//DCE_VERSION
+#define DCE_VERSION__MAJOR_VERSION__SHIFT 0x0
+#define DCE_VERSION__MINOR_VERSION__SHIFT 0x8
+#define DCE_VERSION__MAJOR_VERSION_MASK 0x000000FFL
+#define DCE_VERSION__MINOR_VERSION_MASK 0x0000FF00L
+//DCCG_GTC_CNTL
+#define DCCG_GTC_CNTL__DCCG_GTC_ENABLE__SHIFT 0x0
+#define DCCG_GTC_CNTL__DCCG_GTC_ENABLE_MASK 0x00000001L
+//DCCG_GTC_DTO_INCR
+#define DCCG_GTC_DTO_INCR__DCCG_GTC_DTO_INCR__SHIFT 0x0
+#define DCCG_GTC_DTO_INCR__DCCG_GTC_DTO_INCR_MASK 0xFFFFFFFFL
+//DCCG_GTC_DTO_MODULO
+#define DCCG_GTC_DTO_MODULO__DCCG_GTC_DTO_MODULO__SHIFT 0x0
+#define DCCG_GTC_DTO_MODULO__DCCG_GTC_DTO_MODULO_MASK 0xFFFFFFFFL
+//DCCG_GTC_CURRENT
+#define DCCG_GTC_CURRENT__DCCG_GTC_CURRENT__SHIFT 0x0
+#define DCCG_GTC_CURRENT__DCCG_GTC_CURRENT_MASK 0xFFFFFFFFL
+//DSCCLK0_DTO_PARAM
+#define DSCCLK0_DTO_PARAM__DSCCLK0_DTO_PHASE__SHIFT 0x0
+#define DSCCLK0_DTO_PARAM__DSCCLK0_DTO_MODULO__SHIFT 0x10
+#define DSCCLK0_DTO_PARAM__DSCCLK0_DTO_PHASE_MASK 0x000000FFL
+#define DSCCLK0_DTO_PARAM__DSCCLK0_DTO_MODULO_MASK 0x00FF0000L
+//DSCCLK1_DTO_PARAM
+#define DSCCLK1_DTO_PARAM__DSCCLK1_DTO_PHASE__SHIFT 0x0
+#define DSCCLK1_DTO_PARAM__DSCCLK1_DTO_MODULO__SHIFT 0x10
+#define DSCCLK1_DTO_PARAM__DSCCLK1_DTO_PHASE_MASK 0x000000FFL
+#define DSCCLK1_DTO_PARAM__DSCCLK1_DTO_MODULO_MASK 0x00FF0000L
+//DSCCLK2_DTO_PARAM
+#define DSCCLK2_DTO_PARAM__DSCCLK2_DTO_PHASE__SHIFT 0x0
+#define DSCCLK2_DTO_PARAM__DSCCLK2_DTO_MODULO__SHIFT 0x10
+#define DSCCLK2_DTO_PARAM__DSCCLK2_DTO_PHASE_MASK 0x000000FFL
+#define DSCCLK2_DTO_PARAM__DSCCLK2_DTO_MODULO_MASK 0x00FF0000L
+//MILLISECOND_TIME_BASE_DIV
+#define MILLISECOND_TIME_BASE_DIV__MILLISECOND_TIME_BASE_DIV__SHIFT 0x0
+#define MILLISECOND_TIME_BASE_DIV__MILLISECOND_TIME_BASE_CLOCK_SOURCE_SEL__SHIFT 0x14
+#define MILLISECOND_TIME_BASE_DIV__MILLISECOND_TIME_BASE_DIV_MASK 0x0001FFFFL
+#define MILLISECOND_TIME_BASE_DIV__MILLISECOND_TIME_BASE_CLOCK_SOURCE_SEL_MASK 0x00100000L
+//DISPCLK_FREQ_CHANGE_CNTL
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_STEP_DELAY__SHIFT 0x0
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_STEP_SIZE__SHIFT 0x10
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_FREQ_RAMP_DONE__SHIFT 0x14
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_MAX_ERRDET_CYCLES__SHIFT 0x19
+#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_RESET__SHIFT 0x1c
+#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_STATE__SHIFT 0x1d
+#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_OVR_EN__SHIFT 0x1e
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_CHG_FWD_CORR_DISABLE__SHIFT 0x1f
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_STEP_DELAY_MASK 0x00003FFFL
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_STEP_SIZE_MASK 0x000F0000L
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_FREQ_RAMP_DONE_MASK 0x00100000L
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_MAX_ERRDET_CYCLES_MASK 0x0E000000L
+#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_RESET_MASK 0x10000000L
+#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_STATE_MASK 0x20000000L
+#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_OVR_EN_MASK 0x40000000L
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_CHG_FWD_CORR_DISABLE_MASK 0x80000000L
+//DC_MEM_GLOBAL_PWR_REQ_CNTL
+#define DC_MEM_GLOBAL_PWR_REQ_CNTL__DC_MEM_GLOBAL_PWR_REQ_DIS__SHIFT 0x0
+#define DC_MEM_GLOBAL_PWR_REQ_CNTL__DC_MEM_GLOBAL_PWR_REQ_DIS_MASK 0x00000001L
+//DCCG_PERFMON_CNTL
+#define DCCG_PERFMON_CNTL__DCCG_PERF_DISPCLK_ENABLE__SHIFT 0x0
+#define DCCG_PERFMON_CNTL__DCCG_PERF_DPREFCLK_ENABLE__SHIFT 0x1
+#define DCCG_PERFMON_CNTL__DCCG_PERF_UNIPHYA_PIXCLK_ENABLE__SHIFT 0x2
+#define DCCG_PERFMON_CNTL__DCCG_PERF_UNIPHYB_PIXCLK_ENABLE__SHIFT 0x3
+#define DCCG_PERFMON_CNTL__DCCG_PERF_PIXCLK0_ENABLE__SHIFT 0x4
+#define DCCG_PERFMON_CNTL__DCCG_PERF_RUN__SHIFT 0x5
+#define DCCG_PERFMON_CNTL__DCCG_PERF_MODE_VSYNC__SHIFT 0x6
+#define DCCG_PERFMON_CNTL__DCCG_PERF_MODE_HSYNC__SHIFT 0x7
+#define DCCG_PERFMON_CNTL__DCCG_PERF_OTG_SEL__SHIFT 0x8
+#define DCCG_PERFMON_CNTL__DCCG_PERF_XTALIN_PULSE_DIV__SHIFT 0xb
+#define DCCG_PERFMON_CNTL__DCCG_PERF_DISPCLK_ENABLE_MASK 0x00000001L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_DPREFCLK_ENABLE_MASK 0x00000002L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_UNIPHYA_PIXCLK_ENABLE_MASK 0x00000004L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_UNIPHYB_PIXCLK_ENABLE_MASK 0x00000008L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_PIXCLK0_ENABLE_MASK 0x00000010L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_RUN_MASK 0x00000020L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_MODE_VSYNC_MASK 0x00000040L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_MODE_HSYNC_MASK 0x00000080L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_OTG_SEL_MASK 0x00000700L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_XTALIN_PULSE_DIV_MASK 0xFFFFF800L
+//DCCG_GATE_DISABLE_CNTL
+#define DCCG_GATE_DISABLE_CNTL__DISPCLK_DCCG_GATE_DISABLE__SHIFT 0x0
+#define DCCG_GATE_DISABLE_CNTL__DISPCLK_R_DCCG_GATE_DISABLE__SHIFT 0x1
+#define DCCG_GATE_DISABLE_CNTL__SOCCLK_GATE_DISABLE__SHIFT 0x2
+#define DCCG_GATE_DISABLE_CNTL__DPREFCLK_GATE_DISABLE__SHIFT 0x3
+#define DCCG_GATE_DISABLE_CNTL__DACACLK_GATE_DISABLE__SHIFT 0x4
+#define DCCG_GATE_DISABLE_CNTL__DVOACLK_GATE_DISABLE__SHIFT 0x6
+#define DCCG_GATE_DISABLE_CNTL__DPREFCLK_R_DCCG_GATE_DISABLE__SHIFT 0x8
+#define DCCG_GATE_DISABLE_CNTL__DPPCLK_GATE_DISABLE__SHIFT 0x9
+#define DCCG_GATE_DISABLE_CNTL__DPPCLK_R_DCCG_GATE_DISABLE__SHIFT 0xa
+#define DCCG_GATE_DISABLE_CNTL__DSCCLK_GATE_DISABLE__SHIFT 0xb
+#define DCCG_GATE_DISABLE_CNTL__DMCUBCLK_GATE_DISABLE__SHIFT 0xc
+#define DCCG_GATE_DISABLE_CNTL__AOMCLK0_GATE_DISABLE__SHIFT 0x11
+#define DCCG_GATE_DISABLE_CNTL__AOMCLK1_GATE_DISABLE__SHIFT 0x12
+#define DCCG_GATE_DISABLE_CNTL__AOMCLK2_GATE_DISABLE__SHIFT 0x13
+#define DCCG_GATE_DISABLE_CNTL__AUDIO_DTO2_CLK_GATE_DISABLE__SHIFT 0x15
+#define DCCG_GATE_DISABLE_CNTL__DPREFCLK_GTC_GATE_DISABLE__SHIFT 0x16
+#define DCCG_GATE_DISABLE_CNTL__REFCLK_GATE_DISABLE__SHIFT 0x1a
+#define DCCG_GATE_DISABLE_CNTL__REFCLK_R_DIG_GATE_DISABLE__SHIFT 0x1b
+#define DCCG_GATE_DISABLE_CNTL__DSICLK_GATE_DISABLE__SHIFT 0x1c
+#define DCCG_GATE_DISABLE_CNTL__BYTECLK_GATE_DISABLE__SHIFT 0x1d
+#define DCCG_GATE_DISABLE_CNTL__ESCCLK_GATE_DISABLE__SHIFT 0x1e
+#define DCCG_GATE_DISABLE_CNTL__DISPCLK_DCCG_GATE_DISABLE_MASK 0x00000001L
+#define DCCG_GATE_DISABLE_CNTL__DISPCLK_R_DCCG_GATE_DISABLE_MASK 0x00000002L
+#define DCCG_GATE_DISABLE_CNTL__SOCCLK_GATE_DISABLE_MASK 0x00000004L
+#define DCCG_GATE_DISABLE_CNTL__DPREFCLK_GATE_DISABLE_MASK 0x00000008L
+#define DCCG_GATE_DISABLE_CNTL__DACACLK_GATE_DISABLE_MASK 0x00000010L
+#define DCCG_GATE_DISABLE_CNTL__DVOACLK_GATE_DISABLE_MASK 0x00000040L
+#define DCCG_GATE_DISABLE_CNTL__DPREFCLK_R_DCCG_GATE_DISABLE_MASK 0x00000100L
+#define DCCG_GATE_DISABLE_CNTL__DPPCLK_GATE_DISABLE_MASK 0x00000200L
+#define DCCG_GATE_DISABLE_CNTL__DPPCLK_R_DCCG_GATE_DISABLE_MASK 0x00000400L
+#define DCCG_GATE_DISABLE_CNTL__DSCCLK_GATE_DISABLE_MASK 0x00000800L
+#define DCCG_GATE_DISABLE_CNTL__DMCUBCLK_GATE_DISABLE_MASK 0x00001000L
+#define DCCG_GATE_DISABLE_CNTL__AOMCLK0_GATE_DISABLE_MASK 0x00020000L
+#define DCCG_GATE_DISABLE_CNTL__AOMCLK1_GATE_DISABLE_MASK 0x00040000L
+#define DCCG_GATE_DISABLE_CNTL__AOMCLK2_GATE_DISABLE_MASK 0x00080000L
+#define DCCG_GATE_DISABLE_CNTL__AUDIO_DTO2_CLK_GATE_DISABLE_MASK 0x00200000L
+#define DCCG_GATE_DISABLE_CNTL__DPREFCLK_GTC_GATE_DISABLE_MASK 0x00400000L
+#define DCCG_GATE_DISABLE_CNTL__REFCLK_GATE_DISABLE_MASK 0x04000000L
+#define DCCG_GATE_DISABLE_CNTL__REFCLK_R_DIG_GATE_DISABLE_MASK 0x08000000L
+#define DCCG_GATE_DISABLE_CNTL__DSICLK_GATE_DISABLE_MASK 0x10000000L
+#define DCCG_GATE_DISABLE_CNTL__BYTECLK_GATE_DISABLE_MASK 0x20000000L
+#define DCCG_GATE_DISABLE_CNTL__ESCCLK_GATE_DISABLE_MASK 0x40000000L
+//DISPCLK_CGTT_BLK_CTRL_REG
+#define DISPCLK_CGTT_BLK_CTRL_REG__DISPCLK_TURN_ON_DELAY__SHIFT 0x0
+#define DISPCLK_CGTT_BLK_CTRL_REG__DISPCLK_TURN_OFF_DELAY__SHIFT 0x4
+#define DISPCLK_CGTT_BLK_CTRL_REG__DISPCLK_TURN_ON_DELAY_MASK 0x0000000FL
+#define DISPCLK_CGTT_BLK_CTRL_REG__DISPCLK_TURN_OFF_DELAY_MASK 0x00000FF0L
+//SOCCLK_CGTT_BLK_CTRL_REG
+#define SOCCLK_CGTT_BLK_CTRL_REG__SOCCLK_TURN_ON_DELAY__SHIFT 0x0
+#define SOCCLK_CGTT_BLK_CTRL_REG__SOCCLK_TURN_OFF_DELAY__SHIFT 0x4
+#define SOCCLK_CGTT_BLK_CTRL_REG__SOCCLK_TURN_ON_DELAY_MASK 0x0000000FL
+#define SOCCLK_CGTT_BLK_CTRL_REG__SOCCLK_TURN_OFF_DELAY_MASK 0x00000FF0L
+//DCCG_CAC_STATUS
+#define DCCG_CAC_STATUS__CAC_STATUS_RDDATA__SHIFT 0x0
+#define DCCG_CAC_STATUS__CAC_STATUS_RDDATA_MASK 0xFFFFFFFFL
+//MICROSECOND_TIME_BASE_DIV
+#define MICROSECOND_TIME_BASE_DIV__MICROSECOND_TIME_BASE_DIV__SHIFT 0x0
+#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_DIV__SHIFT 0x8
+#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_SEL__SHIFT 0x10
+#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_CLOCK_SOURCE_SEL__SHIFT 0x11
+#define MICROSECOND_TIME_BASE_DIV__MICROSECOND_TIME_BASE_CLOCK_SOURCE_SEL__SHIFT 0x14
+#define MICROSECOND_TIME_BASE_DIV__MICROSECOND_TIME_BASE_DIV_MASK 0x0000007FL
+#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_DIV_MASK 0x00007F00L
+#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_SEL_MASK 0x00010000L
+#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_CLOCK_SOURCE_SEL_MASK 0x00020000L
+#define MICROSECOND_TIME_BASE_DIV__MICROSECOND_TIME_BASE_CLOCK_SOURCE_SEL_MASK 0x00100000L
+//DCCG_GATE_DISABLE_CNTL2
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKA_FE_GATE_DISABLE__SHIFT 0x0
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKB_FE_GATE_DISABLE__SHIFT 0x1
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKC_FE_GATE_DISABLE__SHIFT 0x2
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKD_FE_GATE_DISABLE__SHIFT 0x3
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKE_FE_GATE_DISABLE__SHIFT 0x4
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKF_FE_GATE_DISABLE__SHIFT 0x5
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKG_FE_GATE_DISABLE__SHIFT 0x6
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKA_GATE_DISABLE__SHIFT 0x10
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKB_GATE_DISABLE__SHIFT 0x11
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKC_GATE_DISABLE__SHIFT 0x12
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKD_GATE_DISABLE__SHIFT 0x13
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKE_GATE_DISABLE__SHIFT 0x14
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKF_GATE_DISABLE__SHIFT 0x15
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKG_GATE_DISABLE__SHIFT 0x16
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKA_FE_GATE_DISABLE_MASK 0x00000001L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKB_FE_GATE_DISABLE_MASK 0x00000002L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKC_FE_GATE_DISABLE_MASK 0x00000004L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKD_FE_GATE_DISABLE_MASK 0x00000008L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKE_FE_GATE_DISABLE_MASK 0x00000010L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKF_FE_GATE_DISABLE_MASK 0x00000020L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKG_FE_GATE_DISABLE_MASK 0x00000040L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKA_GATE_DISABLE_MASK 0x00010000L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKB_GATE_DISABLE_MASK 0x00020000L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKC_GATE_DISABLE_MASK 0x00040000L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKD_GATE_DISABLE_MASK 0x00080000L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKE_GATE_DISABLE_MASK 0x00100000L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKF_GATE_DISABLE_MASK 0x00200000L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKG_GATE_DISABLE_MASK 0x00400000L
+//SYMCLK_CGTT_BLK_CTRL_REG
+#define SYMCLK_CGTT_BLK_CTRL_REG__SYMCLK_TURN_ON_DELAY__SHIFT 0x0
+#define SYMCLK_CGTT_BLK_CTRL_REG__SYMCLK_TURN_OFF_DELAY__SHIFT 0x4
+#define SYMCLK_CGTT_BLK_CTRL_REG__SYMCLK_TURN_ON_DELAY_MASK 0x0000000FL
+#define SYMCLK_CGTT_BLK_CTRL_REG__SYMCLK_TURN_OFF_DELAY_MASK 0x00000FF0L
+//DCCG_DISP_CNTL_REG
+#define DCCG_DISP_CNTL_REG__ALLOW_SR_ON_TRANS_REQ__SHIFT 0x8
+#define DCCG_DISP_CNTL_REG__ALLOW_SR_ON_TRANS_REQ_MASK 0x00000100L
+//OTG0_PIXEL_RATE_CNTL
+#define OTG0_PIXEL_RATE_CNTL__OTG0_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define OTG0_PIXEL_RATE_CNTL__DP_DTO0_ENABLE__SHIFT 0x4
+#define OTG0_PIXEL_RATE_CNTL__DP_DTO0_DS_DISABLE__SHIFT 0x5
+#define OTG0_PIXEL_RATE_CNTL__OTG0_ADD_PIXEL__SHIFT 0x8
+#define OTG0_PIXEL_RATE_CNTL__OTG0_DROP_PIXEL__SHIFT 0x9
+#define OTG0_PIXEL_RATE_CNTL__OTG0_DISPOUT_HALF_RATE_EN__SHIFT 0xb
+#define OTG0_PIXEL_RATE_CNTL__OTG0_DIO_FIFO_ERROR__SHIFT 0xe
+#define OTG0_PIXEL_RATE_CNTL__OTG0_DIO_ERROR_COUNT__SHIFT 0x10
+#define OTG0_PIXEL_RATE_CNTL__OTG0_PIXEL_RATE_SOURCE_MASK 0x00000003L
+#define OTG0_PIXEL_RATE_CNTL__DP_DTO0_ENABLE_MASK 0x00000010L
+#define OTG0_PIXEL_RATE_CNTL__DP_DTO0_DS_DISABLE_MASK 0x00000020L
+#define OTG0_PIXEL_RATE_CNTL__OTG0_ADD_PIXEL_MASK 0x00000100L
+#define OTG0_PIXEL_RATE_CNTL__OTG0_DROP_PIXEL_MASK 0x00000200L
+#define OTG0_PIXEL_RATE_CNTL__OTG0_DISPOUT_HALF_RATE_EN_MASK 0x00000800L
+#define OTG0_PIXEL_RATE_CNTL__OTG0_DIO_FIFO_ERROR_MASK 0x0000C000L
+#define OTG0_PIXEL_RATE_CNTL__OTG0_DIO_ERROR_COUNT_MASK 0x0FFF0000L
+//DP_DTO0_PHASE
+#define DP_DTO0_PHASE__DP_DTO0_PHASE__SHIFT 0x0
+#define DP_DTO0_PHASE__DP_DTO0_PHASE_MASK 0xFFFFFFFFL
+//DP_DTO0_MODULO
+#define DP_DTO0_MODULO__DP_DTO0_MODULO__SHIFT 0x0
+#define DP_DTO0_MODULO__DP_DTO0_MODULO_MASK 0xFFFFFFFFL
+//OTG0_PHYPLL_PIXEL_RATE_CNTL
+#define OTG0_PHYPLL_PIXEL_RATE_CNTL__OTG0_PHYPLL_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define OTG0_PHYPLL_PIXEL_RATE_CNTL__OTG0_PIXEL_RATE_PLL_SOURCE__SHIFT 0x4
+#define OTG0_PHYPLL_PIXEL_RATE_CNTL__OTG0_PHYPLL_PIXEL_RATE_SOURCE_MASK 0x00000007L
+#define OTG0_PHYPLL_PIXEL_RATE_CNTL__OTG0_PIXEL_RATE_PLL_SOURCE_MASK 0x00000010L
+//OTG1_PIXEL_RATE_CNTL
+#define OTG1_PIXEL_RATE_CNTL__OTG1_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define OTG1_PIXEL_RATE_CNTL__DP_DTO1_ENABLE__SHIFT 0x4
+#define OTG1_PIXEL_RATE_CNTL__DP_DTO1_DS_DISABLE__SHIFT 0x5
+#define OTG1_PIXEL_RATE_CNTL__OTG1_ADD_PIXEL__SHIFT 0x8
+#define OTG1_PIXEL_RATE_CNTL__OTG1_DROP_PIXEL__SHIFT 0x9
+#define OTG1_PIXEL_RATE_CNTL__OTG1_DISPOUT_HALF_RATE_EN__SHIFT 0xb
+#define OTG1_PIXEL_RATE_CNTL__OTG1_DIO_FIFO_ERROR__SHIFT 0xe
+#define OTG1_PIXEL_RATE_CNTL__OTG1_DIO_ERROR_COUNT__SHIFT 0x10
+#define OTG1_PIXEL_RATE_CNTL__OTG1_PIXEL_RATE_SOURCE_MASK 0x00000003L
+#define OTG1_PIXEL_RATE_CNTL__DP_DTO1_ENABLE_MASK 0x00000010L
+#define OTG1_PIXEL_RATE_CNTL__DP_DTO1_DS_DISABLE_MASK 0x00000020L
+#define OTG1_PIXEL_RATE_CNTL__OTG1_ADD_PIXEL_MASK 0x00000100L
+#define OTG1_PIXEL_RATE_CNTL__OTG1_DROP_PIXEL_MASK 0x00000200L
+#define OTG1_PIXEL_RATE_CNTL__OTG1_DISPOUT_HALF_RATE_EN_MASK 0x00000800L
+#define OTG1_PIXEL_RATE_CNTL__OTG1_DIO_FIFO_ERROR_MASK 0x0000C000L
+#define OTG1_PIXEL_RATE_CNTL__OTG1_DIO_ERROR_COUNT_MASK 0x0FFF0000L
+//DP_DTO1_PHASE
+#define DP_DTO1_PHASE__DP_DTO1_PHASE__SHIFT 0x0
+#define DP_DTO1_PHASE__DP_DTO1_PHASE_MASK 0xFFFFFFFFL
+//DP_DTO1_MODULO
+#define DP_DTO1_MODULO__DP_DTO1_MODULO__SHIFT 0x0
+#define DP_DTO1_MODULO__DP_DTO1_MODULO_MASK 0xFFFFFFFFL
+//OTG1_PHYPLL_PIXEL_RATE_CNTL
+#define OTG1_PHYPLL_PIXEL_RATE_CNTL__OTG1_PHYPLL_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define OTG1_PHYPLL_PIXEL_RATE_CNTL__OTG1_PIXEL_RATE_PLL_SOURCE__SHIFT 0x4
+#define OTG1_PHYPLL_PIXEL_RATE_CNTL__OTG1_PHYPLL_PIXEL_RATE_SOURCE_MASK 0x00000007L
+#define OTG1_PHYPLL_PIXEL_RATE_CNTL__OTG1_PIXEL_RATE_PLL_SOURCE_MASK 0x00000010L
+//OTG2_PIXEL_RATE_CNTL
+#define OTG2_PIXEL_RATE_CNTL__OTG2_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define OTG2_PIXEL_RATE_CNTL__DP_DTO2_ENABLE__SHIFT 0x4
+#define OTG2_PIXEL_RATE_CNTL__DP_DTO2_DS_DISABLE__SHIFT 0x5
+#define OTG2_PIXEL_RATE_CNTL__OTG2_ADD_PIXEL__SHIFT 0x8
+#define OTG2_PIXEL_RATE_CNTL__OTG2_DROP_PIXEL__SHIFT 0x9
+#define OTG2_PIXEL_RATE_CNTL__OTG2_DISPOUT_HALF_RATE_EN__SHIFT 0xb
+#define OTG2_PIXEL_RATE_CNTL__OTG2_DIO_FIFO_ERROR__SHIFT 0xe
+#define OTG2_PIXEL_RATE_CNTL__OTG2_DIO_ERROR_COUNT__SHIFT 0x10
+#define OTG2_PIXEL_RATE_CNTL__OTG2_PIXEL_RATE_SOURCE_MASK 0x00000003L
+#define OTG2_PIXEL_RATE_CNTL__DP_DTO2_ENABLE_MASK 0x00000010L
+#define OTG2_PIXEL_RATE_CNTL__DP_DTO2_DS_DISABLE_MASK 0x00000020L
+#define OTG2_PIXEL_RATE_CNTL__OTG2_ADD_PIXEL_MASK 0x00000100L
+#define OTG2_PIXEL_RATE_CNTL__OTG2_DROP_PIXEL_MASK 0x00000200L
+#define OTG2_PIXEL_RATE_CNTL__OTG2_DISPOUT_HALF_RATE_EN_MASK 0x00000800L
+#define OTG2_PIXEL_RATE_CNTL__OTG2_DIO_FIFO_ERROR_MASK 0x0000C000L
+#define OTG2_PIXEL_RATE_CNTL__OTG2_DIO_ERROR_COUNT_MASK 0x0FFF0000L
+//DP_DTO2_PHASE
+#define DP_DTO2_PHASE__DP_DTO2_PHASE__SHIFT 0x0
+#define DP_DTO2_PHASE__DP_DTO2_PHASE_MASK 0xFFFFFFFFL
+//DP_DTO2_MODULO
+#define DP_DTO2_MODULO__DP_DTO2_MODULO__SHIFT 0x0
+#define DP_DTO2_MODULO__DP_DTO2_MODULO_MASK 0xFFFFFFFFL
+//OTG2_PHYPLL_PIXEL_RATE_CNTL
+#define OTG2_PHYPLL_PIXEL_RATE_CNTL__OTG2_PHYPLL_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define OTG2_PHYPLL_PIXEL_RATE_CNTL__OTG2_PIXEL_RATE_PLL_SOURCE__SHIFT 0x4
+#define OTG2_PHYPLL_PIXEL_RATE_CNTL__OTG2_PHYPLL_PIXEL_RATE_SOURCE_MASK 0x00000007L
+#define OTG2_PHYPLL_PIXEL_RATE_CNTL__OTG2_PIXEL_RATE_PLL_SOURCE_MASK 0x00000010L
+//OTG3_PIXEL_RATE_CNTL
+#define OTG3_PIXEL_RATE_CNTL__OTG3_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define OTG3_PIXEL_RATE_CNTL__DP_DTO3_ENABLE__SHIFT 0x4
+#define OTG3_PIXEL_RATE_CNTL__DP_DTO3_DS_DISABLE__SHIFT 0x5
+#define OTG3_PIXEL_RATE_CNTL__OTG3_ADD_PIXEL__SHIFT 0x8
+#define OTG3_PIXEL_RATE_CNTL__OTG3_DROP_PIXEL__SHIFT 0x9
+#define OTG3_PIXEL_RATE_CNTL__OTG3_DISPOUT_HALF_RATE_EN__SHIFT 0xb
+#define OTG3_PIXEL_RATE_CNTL__OTG3_DIO_FIFO_ERROR__SHIFT 0xe
+#define OTG3_PIXEL_RATE_CNTL__OTG3_DIO_ERROR_COUNT__SHIFT 0x10
+#define OTG3_PIXEL_RATE_CNTL__OTG3_PIXEL_RATE_SOURCE_MASK 0x00000003L
+#define OTG3_PIXEL_RATE_CNTL__DP_DTO3_ENABLE_MASK 0x00000010L
+#define OTG3_PIXEL_RATE_CNTL__DP_DTO3_DS_DISABLE_MASK 0x00000020L
+#define OTG3_PIXEL_RATE_CNTL__OTG3_ADD_PIXEL_MASK 0x00000100L
+#define OTG3_PIXEL_RATE_CNTL__OTG3_DROP_PIXEL_MASK 0x00000200L
+#define OTG3_PIXEL_RATE_CNTL__OTG3_DISPOUT_HALF_RATE_EN_MASK 0x00000800L
+#define OTG3_PIXEL_RATE_CNTL__OTG3_DIO_FIFO_ERROR_MASK 0x0000C000L
+#define OTG3_PIXEL_RATE_CNTL__OTG3_DIO_ERROR_COUNT_MASK 0x0FFF0000L
+//DP_DTO3_PHASE
+#define DP_DTO3_PHASE__DP_DTO3_PHASE__SHIFT 0x0
+#define DP_DTO3_PHASE__DP_DTO3_PHASE_MASK 0xFFFFFFFFL
+//DP_DTO3_MODULO
+#define DP_DTO3_MODULO__DP_DTO3_MODULO__SHIFT 0x0
+#define DP_DTO3_MODULO__DP_DTO3_MODULO_MASK 0xFFFFFFFFL
+//OTG3_PHYPLL_PIXEL_RATE_CNTL
+#define OTG3_PHYPLL_PIXEL_RATE_CNTL__OTG3_PHYPLL_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define OTG3_PHYPLL_PIXEL_RATE_CNTL__OTG3_PIXEL_RATE_PLL_SOURCE__SHIFT 0x4
+#define OTG3_PHYPLL_PIXEL_RATE_CNTL__OTG3_PHYPLL_PIXEL_RATE_SOURCE_MASK 0x00000007L
+#define OTG3_PHYPLL_PIXEL_RATE_CNTL__OTG3_PIXEL_RATE_PLL_SOURCE_MASK 0x00000010L
+//DPPCLK_CGTT_BLK_CTRL_REG
+#define DPPCLK_CGTT_BLK_CTRL_REG__DPPCLK_TURN_ON_DELAY__SHIFT 0x0
+#define DPPCLK_CGTT_BLK_CTRL_REG__DPPCLK_TURN_OFF_DELAY__SHIFT 0x4
+#define DPPCLK_CGTT_BLK_CTRL_REG__DPPCLK_TURN_ON_DELAY_MASK 0x0000000FL
+#define DPPCLK_CGTT_BLK_CTRL_REG__DPPCLK_TURN_OFF_DELAY_MASK 0x00000FF0L
+//DPPCLK0_DTO_PARAM
+#define DPPCLK0_DTO_PARAM__DPPCLK0_DTO_PHASE__SHIFT 0x0
+#define DPPCLK0_DTO_PARAM__DPPCLK0_DTO_MODULO__SHIFT 0x10
+#define DPPCLK0_DTO_PARAM__DPPCLK0_DTO_PHASE_MASK 0x000000FFL
+#define DPPCLK0_DTO_PARAM__DPPCLK0_DTO_MODULO_MASK 0x00FF0000L
+//DPPCLK1_DTO_PARAM
+#define DPPCLK1_DTO_PARAM__DPPCLK1_DTO_PHASE__SHIFT 0x0
+#define DPPCLK1_DTO_PARAM__DPPCLK1_DTO_MODULO__SHIFT 0x10
+#define DPPCLK1_DTO_PARAM__DPPCLK1_DTO_PHASE_MASK 0x000000FFL
+#define DPPCLK1_DTO_PARAM__DPPCLK1_DTO_MODULO_MASK 0x00FF0000L
+//DPPCLK2_DTO_PARAM
+#define DPPCLK2_DTO_PARAM__DPPCLK2_DTO_PHASE__SHIFT 0x0
+#define DPPCLK2_DTO_PARAM__DPPCLK2_DTO_MODULO__SHIFT 0x10
+#define DPPCLK2_DTO_PARAM__DPPCLK2_DTO_PHASE_MASK 0x000000FFL
+#define DPPCLK2_DTO_PARAM__DPPCLK2_DTO_MODULO_MASK 0x00FF0000L
+//DPPCLK3_DTO_PARAM
+#define DPPCLK3_DTO_PARAM__DPPCLK3_DTO_PHASE__SHIFT 0x0
+#define DPPCLK3_DTO_PARAM__DPPCLK3_DTO_MODULO__SHIFT 0x10
+#define DPPCLK3_DTO_PARAM__DPPCLK3_DTO_PHASE_MASK 0x000000FFL
+#define DPPCLK3_DTO_PARAM__DPPCLK3_DTO_MODULO_MASK 0x00FF0000L
+//DCCG_CAC_STATUS2
+#define DCCG_CAC_STATUS2__CAC_STATUS_RDDATA2__SHIFT 0x0
+#define DCCG_CAC_STATUS2__CAC_STATUS_RDDATA2_MASK 0x0000007FL
+//SYMCLKA_CLOCK_ENABLE
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_CLOCK_ENABLE__SHIFT 0x0
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_FE_FORCE_EN__SHIFT 0x4
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_FE_FORCE_SRC__SHIFT 0x8
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_CLOCK_ENABLE_MASK 0x00000001L
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_FE_FORCE_EN_MASK 0x00000010L
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_FE_FORCE_SRC_MASK 0x00000700L
+//SYMCLKB_CLOCK_ENABLE
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_CLOCK_ENABLE__SHIFT 0x0
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_FE_FORCE_EN__SHIFT 0x4
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_FE_FORCE_SRC__SHIFT 0x8
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_CLOCK_ENABLE_MASK 0x00000001L
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_FE_FORCE_EN_MASK 0x00000010L
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_FE_FORCE_SRC_MASK 0x00000700L
+//SYMCLKC_CLOCK_ENABLE
+#define SYMCLKC_CLOCK_ENABLE__SYMCLKC_CLOCK_ENABLE__SHIFT 0x0
+#define SYMCLKC_CLOCK_ENABLE__SYMCLKC_FE_FORCE_EN__SHIFT 0x4
+#define SYMCLKC_CLOCK_ENABLE__SYMCLKC_FE_FORCE_SRC__SHIFT 0x8
+#define SYMCLKC_CLOCK_ENABLE__SYMCLKC_CLOCK_ENABLE_MASK 0x00000001L
+#define SYMCLKC_CLOCK_ENABLE__SYMCLKC_FE_FORCE_EN_MASK 0x00000010L
+#define SYMCLKC_CLOCK_ENABLE__SYMCLKC_FE_FORCE_SRC_MASK 0x00000700L
+//SYMCLKD_CLOCK_ENABLE
+#define SYMCLKD_CLOCK_ENABLE__SYMCLKD_CLOCK_ENABLE__SHIFT 0x0
+#define SYMCLKD_CLOCK_ENABLE__SYMCLKD_FE_FORCE_EN__SHIFT 0x4
+#define SYMCLKD_CLOCK_ENABLE__SYMCLKD_FE_FORCE_SRC__SHIFT 0x8
+#define SYMCLKD_CLOCK_ENABLE__SYMCLKD_CLOCK_ENABLE_MASK 0x00000001L
+#define SYMCLKD_CLOCK_ENABLE__SYMCLKD_FE_FORCE_EN_MASK 0x00000010L
+#define SYMCLKD_CLOCK_ENABLE__SYMCLKD_FE_FORCE_SRC_MASK 0x00000700L
+//SYMCLKE_CLOCK_ENABLE
+#define SYMCLKE_CLOCK_ENABLE__SYMCLKE_CLOCK_ENABLE__SHIFT 0x0
+#define SYMCLKE_CLOCK_ENABLE__SYMCLKE_FE_FORCE_EN__SHIFT 0x4
+#define SYMCLKE_CLOCK_ENABLE__SYMCLKE_FE_FORCE_SRC__SHIFT 0x8
+#define SYMCLKE_CLOCK_ENABLE__SYMCLKE_CLOCK_ENABLE_MASK 0x00000001L
+#define SYMCLKE_CLOCK_ENABLE__SYMCLKE_FE_FORCE_EN_MASK 0x00000010L
+#define SYMCLKE_CLOCK_ENABLE__SYMCLKE_FE_FORCE_SRC_MASK 0x00000700L
+//DCCG_SOFT_RESET
+#define DCCG_SOFT_RESET__REFCLK_SOFT_RESET__SHIFT 0x0
+#define DCCG_SOFT_RESET__PCIE_REFCLK_SOFT_RESET__SHIFT 0x1
+#define DCCG_SOFT_RESET__SOFT_RESET_DVO__SHIFT 0x2
+#define DCCG_SOFT_RESET__DVO_ENABLE_RST__SHIFT 0x3
+#define DCCG_SOFT_RESET__AUDIO_DTO2_CLK_SOFT_RESET__SHIFT 0x4
+#define DCCG_SOFT_RESET__DPREFCLK_SOFT_RESET__SHIFT 0x8
+#define DCCG_SOFT_RESET__AMCLK0_SOFT_RESET__SHIFT 0xc
+#define DCCG_SOFT_RESET__AMCLK1_SOFT_RESET__SHIFT 0xd
+#define DCCG_SOFT_RESET__P0PLL_CFG_IF_SOFT_RESET__SHIFT 0xe
+#define DCCG_SOFT_RESET__P1PLL_CFG_IF_SOFT_RESET__SHIFT 0xf
+#define DCCG_SOFT_RESET__P2PLL_CFG_IF_SOFT_RESET__SHIFT 0x10
+#define DCCG_SOFT_RESET__A0PLL_CFG_IF_SOFT_RESET__SHIFT 0x11
+#define DCCG_SOFT_RESET__A1PLL_CFG_IF_SOFT_RESET__SHIFT 0x12
+#define DCCG_SOFT_RESET__C0PLL_CFG_IF_SOFT_RESET__SHIFT 0x13
+#define DCCG_SOFT_RESET__C1PLL_CFG_IF_SOFT_RESET__SHIFT 0x14
+#define DCCG_SOFT_RESET__C2PLL_CFG_IF_SOFT_RESET__SHIFT 0x15
+#define DCCG_SOFT_RESET__REFCLK_SOFT_RESET_MASK 0x00000001L
+#define DCCG_SOFT_RESET__PCIE_REFCLK_SOFT_RESET_MASK 0x00000002L
+#define DCCG_SOFT_RESET__SOFT_RESET_DVO_MASK 0x00000004L
+#define DCCG_SOFT_RESET__DVO_ENABLE_RST_MASK 0x00000008L
+#define DCCG_SOFT_RESET__AUDIO_DTO2_CLK_SOFT_RESET_MASK 0x00000010L
+#define DCCG_SOFT_RESET__DPREFCLK_SOFT_RESET_MASK 0x00000100L
+#define DCCG_SOFT_RESET__AMCLK0_SOFT_RESET_MASK 0x00001000L
+#define DCCG_SOFT_RESET__AMCLK1_SOFT_RESET_MASK 0x00002000L
+#define DCCG_SOFT_RESET__P0PLL_CFG_IF_SOFT_RESET_MASK 0x00004000L
+#define DCCG_SOFT_RESET__P1PLL_CFG_IF_SOFT_RESET_MASK 0x00008000L
+#define DCCG_SOFT_RESET__P2PLL_CFG_IF_SOFT_RESET_MASK 0x00010000L
+#define DCCG_SOFT_RESET__A0PLL_CFG_IF_SOFT_RESET_MASK 0x00020000L
+#define DCCG_SOFT_RESET__A1PLL_CFG_IF_SOFT_RESET_MASK 0x00040000L
+#define DCCG_SOFT_RESET__C0PLL_CFG_IF_SOFT_RESET_MASK 0x00080000L
+#define DCCG_SOFT_RESET__C1PLL_CFG_IF_SOFT_RESET_MASK 0x00100000L
+#define DCCG_SOFT_RESET__C2PLL_CFG_IF_SOFT_RESET_MASK 0x00200000L
+//DSCCLK_DTO_CTRL
+#define DSCCLK_DTO_CTRL__DSCCLK0_DTO_ENABLE__SHIFT 0x0
+#define DSCCLK_DTO_CTRL__DSCCLK1_DTO_ENABLE__SHIFT 0x1
+#define DSCCLK_DTO_CTRL__DSCCLK2_DTO_ENABLE__SHIFT 0x2
+#define DSCCLK_DTO_CTRL__DSCCLK3_DTO_ENABLE__SHIFT 0x3
+#define DSCCLK_DTO_CTRL__DSCCLK4_DTO_ENABLE__SHIFT 0x4
+#define DSCCLK_DTO_CTRL__DSCCLK5_DTO_ENABLE__SHIFT 0x5
+#define DSCCLK_DTO_CTRL__DSCCLK6_DTO_ENABLE__SHIFT 0x6
+#define DSCCLK_DTO_CTRL__DSCCLK0_DTO_DB_EN__SHIFT 0x8
+#define DSCCLK_DTO_CTRL__DSCCLK1_DTO_DB_EN__SHIFT 0x9
+#define DSCCLK_DTO_CTRL__DSCCLK2_DTO_DB_EN__SHIFT 0xa
+#define DSCCLK_DTO_CTRL__DSCCLK3_DTO_DB_EN__SHIFT 0xb
+#define DSCCLK_DTO_CTRL__DSCCLK4_DTO_DB_EN__SHIFT 0xc
+#define DSCCLK_DTO_CTRL__DSCCLK5_DTO_DB_EN__SHIFT 0xd
+#define DSCCLK_DTO_CTRL__DSCCLK6_DTO_DB_EN__SHIFT 0xe
+#define DSCCLK_DTO_CTRL__DSCCLK0_DTO_ENABLE_MASK 0x00000001L
+#define DSCCLK_DTO_CTRL__DSCCLK1_DTO_ENABLE_MASK 0x00000002L
+#define DSCCLK_DTO_CTRL__DSCCLK2_DTO_ENABLE_MASK 0x00000004L
+#define DSCCLK_DTO_CTRL__DSCCLK3_DTO_ENABLE_MASK 0x00000008L
+#define DSCCLK_DTO_CTRL__DSCCLK4_DTO_ENABLE_MASK 0x00000010L
+#define DSCCLK_DTO_CTRL__DSCCLK5_DTO_ENABLE_MASK 0x00000020L
+#define DSCCLK_DTO_CTRL__DSCCLK6_DTO_ENABLE_MASK 0x00000040L
+#define DSCCLK_DTO_CTRL__DSCCLK0_DTO_DB_EN_MASK 0x00000100L
+#define DSCCLK_DTO_CTRL__DSCCLK1_DTO_DB_EN_MASK 0x00000200L
+#define DSCCLK_DTO_CTRL__DSCCLK2_DTO_DB_EN_MASK 0x00000400L
+#define DSCCLK_DTO_CTRL__DSCCLK3_DTO_DB_EN_MASK 0x00000800L
+#define DSCCLK_DTO_CTRL__DSCCLK4_DTO_DB_EN_MASK 0x00001000L
+#define DSCCLK_DTO_CTRL__DSCCLK5_DTO_DB_EN_MASK 0x00002000L
+#define DSCCLK_DTO_CTRL__DSCCLK6_DTO_DB_EN_MASK 0x00004000L
+//DCCG_AUDIO_DTO_SOURCE
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_SOURCE_SEL__SHIFT 0x0
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO_SEL__SHIFT 0x4
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO2_SOURCE_SEL__SHIFT 0xc
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO2_CLOCK_EN__SHIFT 0x10
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO2_USE_512FBR_DTO__SHIFT 0x14
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_USE_512FBR_DTO__SHIFT 0x18
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO1_USE_512FBR_DTO__SHIFT 0x1c
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_SOURCE_SEL_MASK 0x00000007L
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO_SEL_MASK 0x00000030L
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO2_SOURCE_SEL_MASK 0x00003000L
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO2_CLOCK_EN_MASK 0x00010000L
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO2_USE_512FBR_DTO_MASK 0x00100000L
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_USE_512FBR_DTO_MASK 0x01000000L
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO1_USE_512FBR_DTO_MASK 0x10000000L
+//DCCG_AUDIO_DTO0_PHASE
+#define DCCG_AUDIO_DTO0_PHASE__DCCG_AUDIO_DTO0_PHASE__SHIFT 0x0
+#define DCCG_AUDIO_DTO0_PHASE__DCCG_AUDIO_DTO0_PHASE_MASK 0xFFFFFFFFL
+//DCCG_AUDIO_DTO0_MODULE
+#define DCCG_AUDIO_DTO0_MODULE__DCCG_AUDIO_DTO0_MODULE__SHIFT 0x0
+#define DCCG_AUDIO_DTO0_MODULE__DCCG_AUDIO_DTO0_MODULE_MASK 0xFFFFFFFFL
+//DCCG_AUDIO_DTO1_PHASE
+#define DCCG_AUDIO_DTO1_PHASE__DCCG_AUDIO_DTO1_PHASE__SHIFT 0x0
+#define DCCG_AUDIO_DTO1_PHASE__DCCG_AUDIO_DTO1_PHASE_MASK 0xFFFFFFFFL
+//DCCG_AUDIO_DTO1_MODULE
+#define DCCG_AUDIO_DTO1_MODULE__DCCG_AUDIO_DTO1_MODULE__SHIFT 0x0
+#define DCCG_AUDIO_DTO1_MODULE__DCCG_AUDIO_DTO1_MODULE_MASK 0xFFFFFFFFL
+//DCCG_VSYNC_OTG0_LATCH_VALUE
+#define DCCG_VSYNC_OTG0_LATCH_VALUE__DCCG_VSYNC_CNT_OTG0_LATCH_VALUE__SHIFT 0x0
+#define DCCG_VSYNC_OTG0_LATCH_VALUE__DCCG_VSYNC_CNT_OTG0_LATCH_VALUE_MASK 0xFFFFFFFFL
+//DCCG_VSYNC_OTG1_LATCH_VALUE
+#define DCCG_VSYNC_OTG1_LATCH_VALUE__DCCG_VSYNC_CNT_OTG1_LATCH_VALUE__SHIFT 0x0
+#define DCCG_VSYNC_OTG1_LATCH_VALUE__DCCG_VSYNC_CNT_OTG1_LATCH_VALUE_MASK 0xFFFFFFFFL
+//DCCG_VSYNC_OTG2_LATCH_VALUE
+#define DCCG_VSYNC_OTG2_LATCH_VALUE__DCCG_VSYNC_CNT_OTG2_LATCH_VALUE__SHIFT 0x0
+#define DCCG_VSYNC_OTG2_LATCH_VALUE__DCCG_VSYNC_CNT_OTG2_LATCH_VALUE_MASK 0xFFFFFFFFL
+//DCCG_VSYNC_OTG3_LATCH_VALUE
+#define DCCG_VSYNC_OTG3_LATCH_VALUE__DCCG_VSYNC_CNT_OTG3_LATCH_VALUE__SHIFT 0x0
+#define DCCG_VSYNC_OTG3_LATCH_VALUE__DCCG_VSYNC_CNT_OTG3_LATCH_VALUE_MASK 0xFFFFFFFFL
+//DCCG_VSYNC_OTG4_LATCH_VALUE
+#define DCCG_VSYNC_OTG4_LATCH_VALUE__DCCG_VSYNC_CNT_OTG4_LATCH_VALUE__SHIFT 0x0
+#define DCCG_VSYNC_OTG4_LATCH_VALUE__DCCG_VSYNC_CNT_OTG4_LATCH_VALUE_MASK 0xFFFFFFFFL
+//DCCG_VSYNC_OTG5_LATCH_VALUE
+#define DCCG_VSYNC_OTG5_LATCH_VALUE__DCCG_VSYNC_CNT_OTG5_LATCH_VALUE__SHIFT 0x0
+#define DCCG_VSYNC_OTG5_LATCH_VALUE__DCCG_VSYNC_CNT_OTG5_LATCH_VALUE_MASK 0xFFFFFFFFL
+//DPPCLK_DTO_CTRL
+#define DPPCLK_DTO_CTRL__DPPCLK0_DTO_ENABLE__SHIFT 0x0
+#define DPPCLK_DTO_CTRL__DPPCLK0_DTO_DB_EN__SHIFT 0x1
+#define DPPCLK_DTO_CTRL__DPPCLK1_DTO_ENABLE__SHIFT 0x4
+#define DPPCLK_DTO_CTRL__DPPCLK1_DTO_DB_EN__SHIFT 0x5
+#define DPPCLK_DTO_CTRL__DPPCLK2_DTO_ENABLE__SHIFT 0x8
+#define DPPCLK_DTO_CTRL__DPPCLK2_DTO_DB_EN__SHIFT 0x9
+#define DPPCLK_DTO_CTRL__DPPCLK3_DTO_ENABLE__SHIFT 0xc
+#define DPPCLK_DTO_CTRL__DPPCLK3_DTO_DB_EN__SHIFT 0xd
+#define DPPCLK_DTO_CTRL__DPPCLK4_DTO_ENABLE__SHIFT 0x10
+#define DPPCLK_DTO_CTRL__DPPCLK4_DTO_DB_EN__SHIFT 0x11
+#define DPPCLK_DTO_CTRL__DPPCLK5_DTO_ENABLE__SHIFT 0x14
+#define DPPCLK_DTO_CTRL__DPPCLK5_DTO_DB_EN__SHIFT 0x15
+#define DPPCLK_DTO_CTRL__DPPCLK0_DTO_ENABLE_MASK 0x00000001L
+#define DPPCLK_DTO_CTRL__DPPCLK0_DTO_DB_EN_MASK 0x00000002L
+#define DPPCLK_DTO_CTRL__DPPCLK1_DTO_ENABLE_MASK 0x00000010L
+#define DPPCLK_DTO_CTRL__DPPCLK1_DTO_DB_EN_MASK 0x00000020L
+#define DPPCLK_DTO_CTRL__DPPCLK2_DTO_ENABLE_MASK 0x00000100L
+#define DPPCLK_DTO_CTRL__DPPCLK2_DTO_DB_EN_MASK 0x00000200L
+#define DPPCLK_DTO_CTRL__DPPCLK3_DTO_ENABLE_MASK 0x00001000L
+#define DPPCLK_DTO_CTRL__DPPCLK3_DTO_DB_EN_MASK 0x00002000L
+#define DPPCLK_DTO_CTRL__DPPCLK4_DTO_ENABLE_MASK 0x00010000L
+#define DPPCLK_DTO_CTRL__DPPCLK4_DTO_DB_EN_MASK 0x00020000L
+#define DPPCLK_DTO_CTRL__DPPCLK5_DTO_ENABLE_MASK 0x00100000L
+#define DPPCLK_DTO_CTRL__DPPCLK5_DTO_DB_EN_MASK 0x00200000L
+//DCCG_VSYNC_CNT_CTRL
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_ENABLE__SHIFT 0x0
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_REFCLK_SEL__SHIFT 0x1
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_SW_RESET__SHIFT 0x2
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_RESET_SEL__SHIFT 0x3
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_EXT_TRIG_SEL__SHIFT 0x4
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_FRAME_CNT__SHIFT 0x8
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG0_LATCH_EN__SHIFT 0x10
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG1_LATCH_EN__SHIFT 0x11
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG2_LATCH_EN__SHIFT 0x12
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG3_LATCH_EN__SHIFT 0x13
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG4_LATCH_EN__SHIFT 0x14
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG5_LATCH_EN__SHIFT 0x15
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG0_VSYNC_TRIG_SEL__SHIFT 0x18
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG1_VSYNC_TRIG_SEL__SHIFT 0x19
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG2_VSYNC_TRIG_SEL__SHIFT 0x1a
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG3_VSYNC_TRIG_SEL__SHIFT 0x1b
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG4_VSYNC_TRIG_SEL__SHIFT 0x1c
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG5_VSYNC_TRIG_SEL__SHIFT 0x1d
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_ENABLE_MASK 0x00000001L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_REFCLK_SEL_MASK 0x00000002L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_SW_RESET_MASK 0x00000004L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_RESET_SEL_MASK 0x00000008L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_EXT_TRIG_SEL_MASK 0x000000F0L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_FRAME_CNT_MASK 0x00000F00L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG0_LATCH_EN_MASK 0x00010000L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG1_LATCH_EN_MASK 0x00020000L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG2_LATCH_EN_MASK 0x00040000L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG3_LATCH_EN_MASK 0x00080000L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG4_LATCH_EN_MASK 0x00100000L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG5_LATCH_EN_MASK 0x00200000L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG0_VSYNC_TRIG_SEL_MASK 0x01000000L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG1_VSYNC_TRIG_SEL_MASK 0x02000000L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG2_VSYNC_TRIG_SEL_MASK 0x04000000L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG3_VSYNC_TRIG_SEL_MASK 0x08000000L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG4_VSYNC_TRIG_SEL_MASK 0x10000000L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG5_VSYNC_TRIG_SEL_MASK 0x20000000L
+//DCCG_VSYNC_CNT_INT_CTRL
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG0_LATCH_INTERRUPT__SHIFT 0x0
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG0_LATCH_INTERRUPT_CLEAR__SHIFT 0x0
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG1_LATCH_INTERRUPT__SHIFT 0x1
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG1_LATCH_INTERRUPT_CLEAR__SHIFT 0x1
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG2_LATCH_INTERRUPT__SHIFT 0x2
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG2_LATCH_INTERRUPT_CLEAR__SHIFT 0x2
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG3_LATCH_INTERRUPT__SHIFT 0x3
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG3_LATCH_INTERRUPT_CLEAR__SHIFT 0x3
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG4_LATCH_INTERRUPT__SHIFT 0x4
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG4_LATCH_INTERRUPT_CLEAR__SHIFT 0x4
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG5_LATCH_INTERRUPT__SHIFT 0x5
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG5_LATCH_INTERRUPT_CLEAR__SHIFT 0x5
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG0_LATCH_MASK__SHIFT 0x8
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG1_LATCH_MASK__SHIFT 0x9
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG2_LATCH_MASK__SHIFT 0xa
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG3_LATCH_MASK__SHIFT 0xb
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG4_LATCH_MASK__SHIFT 0xc
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG5_LATCH_MASK__SHIFT 0xd
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG0_LATCH_INTERRUPT_MASK 0x00000001L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG0_LATCH_INTERRUPT_CLEAR_MASK 0x00000001L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG1_LATCH_INTERRUPT_MASK 0x00000002L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG1_LATCH_INTERRUPT_CLEAR_MASK 0x00000002L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG2_LATCH_INTERRUPT_MASK 0x00000004L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG2_LATCH_INTERRUPT_CLEAR_MASK 0x00000004L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG3_LATCH_INTERRUPT_MASK 0x00000008L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG3_LATCH_INTERRUPT_CLEAR_MASK 0x00000008L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG4_LATCH_INTERRUPT_MASK 0x00000010L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG4_LATCH_INTERRUPT_CLEAR_MASK 0x00000010L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG5_LATCH_INTERRUPT_MASK 0x00000020L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG5_LATCH_INTERRUPT_CLEAR_MASK 0x00000020L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG0_LATCH_MASK_MASK 0x00000100L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG1_LATCH_MASK_MASK 0x00000200L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG2_LATCH_MASK_MASK 0x00000400L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG3_LATCH_MASK_MASK 0x00000800L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG4_LATCH_MASK_MASK 0x00001000L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG5_LATCH_MASK_MASK 0x00002000L
+//FORCE_SYMCLK_DISABLE
+#define FORCE_SYMCLK_DISABLE__FORCE_SYMCLKA_DISABLE__SHIFT 0x0
+#define FORCE_SYMCLK_DISABLE__FORCE_SYMCLKB_DISABLE__SHIFT 0x1
+#define FORCE_SYMCLK_DISABLE__FORCE_SYMCLKC_DISABLE__SHIFT 0x2
+#define FORCE_SYMCLK_DISABLE__FORCE_SYMCLKD_DISABLE__SHIFT 0x3
+#define FORCE_SYMCLK_DISABLE__FORCE_SYMCLKE_DISABLE__SHIFT 0x4
+#define FORCE_SYMCLK_DISABLE__FORCE_SYMCLKF_DISABLE__SHIFT 0x5
+#define FORCE_SYMCLK_DISABLE__FORCE_SYMCLKG_DISABLE__SHIFT 0x6
+#define FORCE_SYMCLK_DISABLE__FORCE_SYMCLKA_DISABLE_MASK 0x00000001L
+#define FORCE_SYMCLK_DISABLE__FORCE_SYMCLKB_DISABLE_MASK 0x00000002L
+#define FORCE_SYMCLK_DISABLE__FORCE_SYMCLKC_DISABLE_MASK 0x00000004L
+#define FORCE_SYMCLK_DISABLE__FORCE_SYMCLKD_DISABLE_MASK 0x00000008L
+#define FORCE_SYMCLK_DISABLE__FORCE_SYMCLKE_DISABLE_MASK 0x00000010L
+#define FORCE_SYMCLK_DISABLE__FORCE_SYMCLKF_DISABLE_MASK 0x00000020L
+#define FORCE_SYMCLK_DISABLE__FORCE_SYMCLKG_DISABLE_MASK 0x00000040L
+//DCCG_TEST_CLK_SEL
+#define DCCG_TEST_CLK_SEL__DCCG_TEST_CLK_GENERICA_SEL__SHIFT 0x0
+#define DCCG_TEST_CLK_SEL__DCCG_TEST_CLK_GENERICA_INV__SHIFT 0xc
+#define DCCG_TEST_CLK_SEL__DCCG_TEST_CLK_GENERICA_DIV_SEL__SHIFT 0xe
+#define DCCG_TEST_CLK_SEL__DCCG_TEST_CLK_GENERICB_SEL__SHIFT 0x10
+#define DCCG_TEST_CLK_SEL__DCCG_TEST_CLK_GENERICB_INV__SHIFT 0x1c
+#define DCCG_TEST_CLK_SEL__DCCG_TEST_CLK_GENERICA_SEL_MASK 0x000001FFL
+#define DCCG_TEST_CLK_SEL__DCCG_TEST_CLK_GENERICA_INV_MASK 0x00001000L
+#define DCCG_TEST_CLK_SEL__DCCG_TEST_CLK_GENERICA_DIV_SEL_MASK 0x0000C000L
+#define DCCG_TEST_CLK_SEL__DCCG_TEST_CLK_GENERICB_SEL_MASK 0x01FF0000L
+#define DCCG_TEST_CLK_SEL__DCCG_TEST_CLK_GENERICB_INV_MASK 0x10000000L
+
+
+// addressBlock: dce_dc_dccg_dccg_dfs_dispdec
+//DENTIST_DISPCLK_CNTL
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_WDIVIDER__SHIFT 0x0
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_RDIVIDER__SHIFT 0x8
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_MODE__SHIFT 0xf
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHGTOG__SHIFT 0x11
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_DONETOG__SHIFT 0x12
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_DONE__SHIFT 0x13
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHG_DONE__SHIFT 0x14
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHGTOG__SHIFT 0x15
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_DONETOG__SHIFT 0x16
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_WDIVIDER__SHIFT 0x18
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_WDIVIDER_MASK 0x0000007FL
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_RDIVIDER_MASK 0x00007F00L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_MODE_MASK 0x00018000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHGTOG_MASK 0x00020000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_DONETOG_MASK 0x00040000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_DONE_MASK 0x00080000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHG_DONE_MASK 0x00100000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHGTOG_MASK 0x00200000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_DONETOG_MASK 0x00400000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_WDIVIDER_MASK 0x7F000000L
+
+
+// addressBlock: dce_dc_dccg_dccg_dcperfmon0_dc_perfmon_dispdec
+//DC_PERFMON0_PERFCOUNTER_CNTL
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON0_PERFCOUNTER_CNTL2
+#define DC_PERFMON0_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON0_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON0_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON0_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON0_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON0_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON0_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON0_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON0_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON0_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON0_PERFCOUNTER_STATE
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON0_PERFMON_CNTL
+#define DC_PERFMON0_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON0_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON0_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON0_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON0_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON0_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON0_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON0_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON0_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON0_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON0_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON0_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON0_PERFMON_CNTL2
+#define DC_PERFMON0_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON0_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON0_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON0_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON0_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON0_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON0_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON0_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON0_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON0_PERFMON_CVALUE_LOW
+#define DC_PERFMON0_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON0_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON0_PERFMON_HI
+#define DC_PERFMON0_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON0_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON0_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON0_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON0_PERFMON_LOW
+#define DC_PERFMON0_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON0_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dccg_dccg_dcperfmon1_dc_perfmon_dispdec
+//DC_PERFMON1_PERFCOUNTER_CNTL
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON1_PERFCOUNTER_CNTL2
+#define DC_PERFMON1_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON1_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON1_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON1_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON1_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON1_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON1_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON1_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON1_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON1_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON1_PERFCOUNTER_STATE
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON1_PERFMON_CNTL
+#define DC_PERFMON1_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON1_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON1_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON1_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON1_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON1_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON1_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON1_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON1_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON1_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON1_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON1_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON1_PERFMON_CNTL2
+#define DC_PERFMON1_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON1_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON1_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON1_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON1_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON1_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON1_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON1_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON1_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON1_PERFMON_CVALUE_LOW
+#define DC_PERFMON1_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON1_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON1_PERFMON_HI
+#define DC_PERFMON1_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON1_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON1_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON1_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON1_PERFMON_LOW
+#define DC_PERFMON1_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON1_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dccg_dccg_pll_dispdec
+//PLL_MACRO_CNTL_RESERVED0
+#define PLL_MACRO_CNTL_RESERVED0__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED0__PLL_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//PLL_MACRO_CNTL_RESERVED1
+#define PLL_MACRO_CNTL_RESERVED1__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED1__PLL_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//PLL_MACRO_CNTL_RESERVED2
+#define PLL_MACRO_CNTL_RESERVED2__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED2__PLL_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//PLL_MACRO_CNTL_RESERVED3
+#define PLL_MACRO_CNTL_RESERVED3__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED3__PLL_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//PLL_MACRO_CNTL_RESERVED4
+#define PLL_MACRO_CNTL_RESERVED4__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED4__PLL_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//PLL_MACRO_CNTL_RESERVED5
+#define PLL_MACRO_CNTL_RESERVED5__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED5__PLL_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//PLL_MACRO_CNTL_RESERVED6
+#define PLL_MACRO_CNTL_RESERVED6__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED6__PLL_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//PLL_MACRO_CNTL_RESERVED7
+#define PLL_MACRO_CNTL_RESERVED7__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED7__PLL_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//PLL_MACRO_CNTL_RESERVED8
+#define PLL_MACRO_CNTL_RESERVED8__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED8__PLL_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//PLL_MACRO_CNTL_RESERVED9
+#define PLL_MACRO_CNTL_RESERVED9__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED9__PLL_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//PLL_MACRO_CNTL_RESERVED10
+#define PLL_MACRO_CNTL_RESERVED10__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED10__PLL_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//PLL_MACRO_CNTL_RESERVED11
+#define PLL_MACRO_CNTL_RESERVED11__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED11__PLL_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//PLL_MACRO_CNTL_RESERVED12
+#define PLL_MACRO_CNTL_RESERVED12__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED12__PLL_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//PLL_MACRO_CNTL_RESERVED13
+#define PLL_MACRO_CNTL_RESERVED13__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED13__PLL_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//PLL_MACRO_CNTL_RESERVED14
+#define PLL_MACRO_CNTL_RESERVED14__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED14__PLL_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//PLL_MACRO_CNTL_RESERVED15
+#define PLL_MACRO_CNTL_RESERVED15__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED15__PLL_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//PLL_MACRO_CNTL_RESERVED16
+#define PLL_MACRO_CNTL_RESERVED16__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED16__PLL_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//PLL_MACRO_CNTL_RESERVED17
+#define PLL_MACRO_CNTL_RESERVED17__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED17__PLL_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//PLL_MACRO_CNTL_RESERVED18
+#define PLL_MACRO_CNTL_RESERVED18__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED18__PLL_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//PLL_MACRO_CNTL_RESERVED19
+#define PLL_MACRO_CNTL_RESERVED19__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED19__PLL_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//PLL_MACRO_CNTL_RESERVED20
+#define PLL_MACRO_CNTL_RESERVED20__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED20__PLL_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//PLL_MACRO_CNTL_RESERVED21
+#define PLL_MACRO_CNTL_RESERVED21__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED21__PLL_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//PLL_MACRO_CNTL_RESERVED22
+#define PLL_MACRO_CNTL_RESERVED22__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED22__PLL_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//PLL_MACRO_CNTL_RESERVED23
+#define PLL_MACRO_CNTL_RESERVED23__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED23__PLL_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//PLL_MACRO_CNTL_RESERVED24
+#define PLL_MACRO_CNTL_RESERVED24__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED24__PLL_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//PLL_MACRO_CNTL_RESERVED25
+#define PLL_MACRO_CNTL_RESERVED25__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED25__PLL_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//PLL_MACRO_CNTL_RESERVED26
+#define PLL_MACRO_CNTL_RESERVED26__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED26__PLL_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//PLL_MACRO_CNTL_RESERVED27
+#define PLL_MACRO_CNTL_RESERVED27__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED27__PLL_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//PLL_MACRO_CNTL_RESERVED28
+#define PLL_MACRO_CNTL_RESERVED28__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED28__PLL_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//PLL_MACRO_CNTL_RESERVED29
+#define PLL_MACRO_CNTL_RESERVED29__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED29__PLL_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//PLL_MACRO_CNTL_RESERVED30
+#define PLL_MACRO_CNTL_RESERVED30__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED30__PLL_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//PLL_MACRO_CNTL_RESERVED31
+#define PLL_MACRO_CNTL_RESERVED31__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED31__PLL_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//PLL_MACRO_CNTL_RESERVED32
+#define PLL_MACRO_CNTL_RESERVED32__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED32__PLL_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//PLL_MACRO_CNTL_RESERVED33
+#define PLL_MACRO_CNTL_RESERVED33__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED33__PLL_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//PLL_MACRO_CNTL_RESERVED34
+#define PLL_MACRO_CNTL_RESERVED34__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED34__PLL_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//PLL_MACRO_CNTL_RESERVED35
+#define PLL_MACRO_CNTL_RESERVED35__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED35__PLL_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//PLL_MACRO_CNTL_RESERVED36
+#define PLL_MACRO_CNTL_RESERVED36__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED36__PLL_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//PLL_MACRO_CNTL_RESERVED37
+#define PLL_MACRO_CNTL_RESERVED37__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED37__PLL_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//PLL_MACRO_CNTL_RESERVED38
+#define PLL_MACRO_CNTL_RESERVED38__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED38__PLL_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//PLL_MACRO_CNTL_RESERVED39
+#define PLL_MACRO_CNTL_RESERVED39__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED39__PLL_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//PLL_MACRO_CNTL_RESERVED40
+#define PLL_MACRO_CNTL_RESERVED40__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED40__PLL_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//PLL_MACRO_CNTL_RESERVED41
+#define PLL_MACRO_CNTL_RESERVED41__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED41__PLL_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dmu_rbbmif_dispdec
+//RBBMIF_TIMEOUT
+#define RBBMIF_TIMEOUT__RBBMIF_TIMEOUT_DELAY__SHIFT 0x0
+#define RBBMIF_TIMEOUT__RBBMIF_TIMEOUT_TO_REQ_HOLD__SHIFT 0x14
+#define RBBMIF_TIMEOUT__RBBMIF_TIMEOUT_DELAY_MASK 0x000FFFFFL
+#define RBBMIF_TIMEOUT__RBBMIF_TIMEOUT_TO_REQ_HOLD_MASK 0xFFF00000L
+//RBBMIF_STATUS
+#define RBBMIF_STATUS__RBBMIF_TIMEOUT_CLIENTS_DEC__SHIFT 0x0
+#define RBBMIF_STATUS__RBBMIF_TIMEOUT_CLIENTS_DEC_MASK 0xFFFFFFFFL
+//RBBMIF_STATUS_2
+#define RBBMIF_STATUS_2__RBBMIF_TIMEOUT_CLIENTS_DEC_2__SHIFT 0x0
+#define RBBMIF_STATUS_2__RBBMIF_TIMEOUT_CLIENTS_DEC_2_MASK 0x0000003FL
+//RBBMIF_INT_STATUS
+#define RBBMIF_INT_STATUS__RBBMIF_TIMEOUT_ADDR__SHIFT 0x2
+#define RBBMIF_INT_STATUS__RBBMIF_TIMEOUT_OP__SHIFT 0x1c
+#define RBBMIF_INT_STATUS__RBBMIF_TIMEOUT_RDWR_STATUS__SHIFT 0x1d
+#define RBBMIF_INT_STATUS__RBBMIF_TIMEOUT_ACK__SHIFT 0x1e
+#define RBBMIF_INT_STATUS__RBBMIF_TIMEOUT_MASK__SHIFT 0x1f
+#define RBBMIF_INT_STATUS__RBBMIF_TIMEOUT_ADDR_MASK 0x0003FFFCL
+#define RBBMIF_INT_STATUS__RBBMIF_TIMEOUT_OP_MASK 0x10000000L
+#define RBBMIF_INT_STATUS__RBBMIF_TIMEOUT_RDWR_STATUS_MASK 0x20000000L
+#define RBBMIF_INT_STATUS__RBBMIF_TIMEOUT_ACK_MASK 0x40000000L
+#define RBBMIF_INT_STATUS__RBBMIF_TIMEOUT_MASK_MASK 0x80000000L
+//RBBMIF_TIMEOUT_DIS
+#define RBBMIF_TIMEOUT_DIS__CLIENT0_TIMEOUT_DIS__SHIFT 0x0
+#define RBBMIF_TIMEOUT_DIS__CLIENT1_TIMEOUT_DIS__SHIFT 0x1
+#define RBBMIF_TIMEOUT_DIS__CLIENT2_TIMEOUT_DIS__SHIFT 0x2
+#define RBBMIF_TIMEOUT_DIS__CLIENT3_TIMEOUT_DIS__SHIFT 0x3
+#define RBBMIF_TIMEOUT_DIS__CLIENT4_TIMEOUT_DIS__SHIFT 0x4
+#define RBBMIF_TIMEOUT_DIS__CLIENT5_TIMEOUT_DIS__SHIFT 0x5
+#define RBBMIF_TIMEOUT_DIS__CLIENT6_TIMEOUT_DIS__SHIFT 0x6
+#define RBBMIF_TIMEOUT_DIS__CLIENT7_TIMEOUT_DIS__SHIFT 0x7
+#define RBBMIF_TIMEOUT_DIS__CLIENT8_TIMEOUT_DIS__SHIFT 0x8
+#define RBBMIF_TIMEOUT_DIS__CLIENT9_TIMEOUT_DIS__SHIFT 0x9
+#define RBBMIF_TIMEOUT_DIS__CLIENT10_TIMEOUT_DIS__SHIFT 0xa
+#define RBBMIF_TIMEOUT_DIS__CLIENT11_TIMEOUT_DIS__SHIFT 0xb
+#define RBBMIF_TIMEOUT_DIS__CLIENT12_TIMEOUT_DIS__SHIFT 0xc
+#define RBBMIF_TIMEOUT_DIS__CLIENT13_TIMEOUT_DIS__SHIFT 0xd
+#define RBBMIF_TIMEOUT_DIS__CLIENT14_TIMEOUT_DIS__SHIFT 0xe
+#define RBBMIF_TIMEOUT_DIS__CLIENT15_TIMEOUT_DIS__SHIFT 0xf
+#define RBBMIF_TIMEOUT_DIS__CLIENT16_TIMEOUT_DIS__SHIFT 0x10
+#define RBBMIF_TIMEOUT_DIS__CLIENT17_TIMEOUT_DIS__SHIFT 0x11
+#define RBBMIF_TIMEOUT_DIS__CLIENT18_TIMEOUT_DIS__SHIFT 0x12
+#define RBBMIF_TIMEOUT_DIS__CLIENT19_TIMEOUT_DIS__SHIFT 0x13
+#define RBBMIF_TIMEOUT_DIS__CLIENT20_TIMEOUT_DIS__SHIFT 0x14
+#define RBBMIF_TIMEOUT_DIS__CLIENT21_TIMEOUT_DIS__SHIFT 0x15
+#define RBBMIF_TIMEOUT_DIS__CLIENT22_TIMEOUT_DIS__SHIFT 0x16
+#define RBBMIF_TIMEOUT_DIS__CLIENT23_TIMEOUT_DIS__SHIFT 0x17
+#define RBBMIF_TIMEOUT_DIS__CLIENT24_TIMEOUT_DIS__SHIFT 0x18
+#define RBBMIF_TIMEOUT_DIS__CLIENT25_TIMEOUT_DIS__SHIFT 0x19
+#define RBBMIF_TIMEOUT_DIS__CLIENT26_TIMEOUT_DIS__SHIFT 0x1a
+#define RBBMIF_TIMEOUT_DIS__CLIENT27_TIMEOUT_DIS__SHIFT 0x1b
+#define RBBMIF_TIMEOUT_DIS__CLIENT28_TIMEOUT_DIS__SHIFT 0x1c
+#define RBBMIF_TIMEOUT_DIS__CLIENT29_TIMEOUT_DIS__SHIFT 0x1d
+#define RBBMIF_TIMEOUT_DIS__CLIENT30_TIMEOUT_DIS__SHIFT 0x1e
+#define RBBMIF_TIMEOUT_DIS__CLIENT31_TIMEOUT_DIS__SHIFT 0x1f
+#define RBBMIF_TIMEOUT_DIS__CLIENT0_TIMEOUT_DIS_MASK 0x00000001L
+#define RBBMIF_TIMEOUT_DIS__CLIENT1_TIMEOUT_DIS_MASK 0x00000002L
+#define RBBMIF_TIMEOUT_DIS__CLIENT2_TIMEOUT_DIS_MASK 0x00000004L
+#define RBBMIF_TIMEOUT_DIS__CLIENT3_TIMEOUT_DIS_MASK 0x00000008L
+#define RBBMIF_TIMEOUT_DIS__CLIENT4_TIMEOUT_DIS_MASK 0x00000010L
+#define RBBMIF_TIMEOUT_DIS__CLIENT5_TIMEOUT_DIS_MASK 0x00000020L
+#define RBBMIF_TIMEOUT_DIS__CLIENT6_TIMEOUT_DIS_MASK 0x00000040L
+#define RBBMIF_TIMEOUT_DIS__CLIENT7_TIMEOUT_DIS_MASK 0x00000080L
+#define RBBMIF_TIMEOUT_DIS__CLIENT8_TIMEOUT_DIS_MASK 0x00000100L
+#define RBBMIF_TIMEOUT_DIS__CLIENT9_TIMEOUT_DIS_MASK 0x00000200L
+#define RBBMIF_TIMEOUT_DIS__CLIENT10_TIMEOUT_DIS_MASK 0x00000400L
+#define RBBMIF_TIMEOUT_DIS__CLIENT11_TIMEOUT_DIS_MASK 0x00000800L
+#define RBBMIF_TIMEOUT_DIS__CLIENT12_TIMEOUT_DIS_MASK 0x00001000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT13_TIMEOUT_DIS_MASK 0x00002000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT14_TIMEOUT_DIS_MASK 0x00004000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT15_TIMEOUT_DIS_MASK 0x00008000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT16_TIMEOUT_DIS_MASK 0x00010000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT17_TIMEOUT_DIS_MASK 0x00020000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT18_TIMEOUT_DIS_MASK 0x00040000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT19_TIMEOUT_DIS_MASK 0x00080000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT20_TIMEOUT_DIS_MASK 0x00100000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT21_TIMEOUT_DIS_MASK 0x00200000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT22_TIMEOUT_DIS_MASK 0x00400000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT23_TIMEOUT_DIS_MASK 0x00800000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT24_TIMEOUT_DIS_MASK 0x01000000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT25_TIMEOUT_DIS_MASK 0x02000000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT26_TIMEOUT_DIS_MASK 0x04000000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT27_TIMEOUT_DIS_MASK 0x08000000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT28_TIMEOUT_DIS_MASK 0x10000000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT29_TIMEOUT_DIS_MASK 0x20000000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT30_TIMEOUT_DIS_MASK 0x40000000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT31_TIMEOUT_DIS_MASK 0x80000000L
+//RBBMIF_TIMEOUT_DIS_2
+#define RBBMIF_TIMEOUT_DIS_2__CLIENT32_TIMEOUT_DIS__SHIFT 0x0
+#define RBBMIF_TIMEOUT_DIS_2__CLIENT33_TIMEOUT_DIS__SHIFT 0x1
+#define RBBMIF_TIMEOUT_DIS_2__CLIENT34_TIMEOUT_DIS__SHIFT 0x2
+#define RBBMIF_TIMEOUT_DIS_2__CLIENT35_TIMEOUT_DIS__SHIFT 0x3
+#define RBBMIF_TIMEOUT_DIS_2__CLIENT36_TIMEOUT_DIS__SHIFT 0x4
+#define RBBMIF_TIMEOUT_DIS_2__CLIENT37_TIMEOUT_DIS__SHIFT 0x5
+#define RBBMIF_TIMEOUT_DIS_2__CLIENT32_TIMEOUT_DIS_MASK 0x00000001L
+#define RBBMIF_TIMEOUT_DIS_2__CLIENT33_TIMEOUT_DIS_MASK 0x00000002L
+#define RBBMIF_TIMEOUT_DIS_2__CLIENT34_TIMEOUT_DIS_MASK 0x00000004L
+#define RBBMIF_TIMEOUT_DIS_2__CLIENT35_TIMEOUT_DIS_MASK 0x00000008L
+#define RBBMIF_TIMEOUT_DIS_2__CLIENT36_TIMEOUT_DIS_MASK 0x00000010L
+#define RBBMIF_TIMEOUT_DIS_2__CLIENT37_TIMEOUT_DIS_MASK 0x00000020L
+//RBBMIF_STATUS_FLAG
+#define RBBMIF_STATUS_FLAG__RBBMIF_STATE__SHIFT 0x0
+#define RBBMIF_STATUS_FLAG__RBBMIF_READ_TIMEOUT__SHIFT 0x4
+#define RBBMIF_STATUS_FLAG__RBBMIF_FIFO_EMPTY__SHIFT 0x5
+#define RBBMIF_STATUS_FLAG__RBBMIF_FIFO_FULL__SHIFT 0x6
+#define RBBMIF_STATUS_FLAG__RBBMIF_INVALID_ACCESS_FLAG__SHIFT 0x8
+#define RBBMIF_STATUS_FLAG__RBBMIF_INVALID_ACCESS_TYPE__SHIFT 0x9
+#define RBBMIF_STATUS_FLAG__RBBMIF_INVALID_ACCESS_ADDR__SHIFT 0x10
+#define RBBMIF_STATUS_FLAG__RBBMIF_STATE_MASK 0x00000003L
+#define RBBMIF_STATUS_FLAG__RBBMIF_READ_TIMEOUT_MASK 0x00000010L
+#define RBBMIF_STATUS_FLAG__RBBMIF_FIFO_EMPTY_MASK 0x00000020L
+#define RBBMIF_STATUS_FLAG__RBBMIF_FIFO_FULL_MASK 0x00000040L
+#define RBBMIF_STATUS_FLAG__RBBMIF_INVALID_ACCESS_FLAG_MASK 0x00000100L
+#define RBBMIF_STATUS_FLAG__RBBMIF_INVALID_ACCESS_TYPE_MASK 0x00000E00L
+#define RBBMIF_STATUS_FLAG__RBBMIF_INVALID_ACCESS_ADDR_MASK 0xFFFF0000L
+
+
+// addressBlock: dce_dc_dmu_dc_pg_dispdec
+//DOMAIN0_PG_CONFIG
+#define DOMAIN0_PG_CONFIG__DOMAIN0_POWER_FORCEON__SHIFT 0x0
+#define DOMAIN0_PG_CONFIG__DOMAIN0_POWER_GATE__SHIFT 0x8
+#define DOMAIN0_PG_CONFIG__DOMAIN0_POWER_FORCEON_MASK 0x00000001L
+#define DOMAIN0_PG_CONFIG__DOMAIN0_POWER_GATE_MASK 0x00000100L
+//DOMAIN0_PG_STATUS
+#define DOMAIN0_PG_STATUS__DOMAIN0_DESIRED_PWR_STATE__SHIFT 0x1c
+#define DOMAIN0_PG_STATUS__DOMAIN0_PGFSM_PWR_STATUS__SHIFT 0x1e
+#define DOMAIN0_PG_STATUS__DOMAIN0_DESIRED_PWR_STATE_MASK 0x10000000L
+#define DOMAIN0_PG_STATUS__DOMAIN0_PGFSM_PWR_STATUS_MASK 0xC0000000L
+//DOMAIN1_PG_CONFIG
+#define DOMAIN1_PG_CONFIG__DOMAIN1_POWER_FORCEON__SHIFT 0x0
+#define DOMAIN1_PG_CONFIG__DOMAIN1_POWER_GATE__SHIFT 0x8
+#define DOMAIN1_PG_CONFIG__DOMAIN1_POWER_FORCEON_MASK 0x00000001L
+#define DOMAIN1_PG_CONFIG__DOMAIN1_POWER_GATE_MASK 0x00000100L
+//DOMAIN1_PG_STATUS
+#define DOMAIN1_PG_STATUS__DOMAIN1_DESIRED_PWR_STATE__SHIFT 0x1c
+#define DOMAIN1_PG_STATUS__DOMAIN1_PGFSM_PWR_STATUS__SHIFT 0x1e
+#define DOMAIN1_PG_STATUS__DOMAIN1_DESIRED_PWR_STATE_MASK 0x10000000L
+#define DOMAIN1_PG_STATUS__DOMAIN1_PGFSM_PWR_STATUS_MASK 0xC0000000L
+//DOMAIN2_PG_CONFIG
+#define DOMAIN2_PG_CONFIG__DOMAIN2_POWER_FORCEON__SHIFT 0x0
+#define DOMAIN2_PG_CONFIG__DOMAIN2_POWER_GATE__SHIFT 0x8
+#define DOMAIN2_PG_CONFIG__DOMAIN2_POWER_FORCEON_MASK 0x00000001L
+#define DOMAIN2_PG_CONFIG__DOMAIN2_POWER_GATE_MASK 0x00000100L
+//DOMAIN2_PG_STATUS
+#define DOMAIN2_PG_STATUS__DOMAIN2_DESIRED_PWR_STATE__SHIFT 0x1c
+#define DOMAIN2_PG_STATUS__DOMAIN2_PGFSM_PWR_STATUS__SHIFT 0x1e
+#define DOMAIN2_PG_STATUS__DOMAIN2_DESIRED_PWR_STATE_MASK 0x10000000L
+#define DOMAIN2_PG_STATUS__DOMAIN2_PGFSM_PWR_STATUS_MASK 0xC0000000L
+//DOMAIN3_PG_CONFIG
+#define DOMAIN3_PG_CONFIG__DOMAIN3_POWER_FORCEON__SHIFT 0x0
+#define DOMAIN3_PG_CONFIG__DOMAIN3_POWER_GATE__SHIFT 0x8
+#define DOMAIN3_PG_CONFIG__DOMAIN3_POWER_FORCEON_MASK 0x00000001L
+#define DOMAIN3_PG_CONFIG__DOMAIN3_POWER_GATE_MASK 0x00000100L
+//DOMAIN3_PG_STATUS
+#define DOMAIN3_PG_STATUS__DOMAIN3_DESIRED_PWR_STATE__SHIFT 0x1c
+#define DOMAIN3_PG_STATUS__DOMAIN3_PGFSM_PWR_STATUS__SHIFT 0x1e
+#define DOMAIN3_PG_STATUS__DOMAIN3_DESIRED_PWR_STATE_MASK 0x10000000L
+#define DOMAIN3_PG_STATUS__DOMAIN3_PGFSM_PWR_STATUS_MASK 0xC0000000L
+//DOMAIN4_PG_CONFIG
+#define DOMAIN4_PG_CONFIG__DOMAIN4_POWER_FORCEON__SHIFT 0x0
+#define DOMAIN4_PG_CONFIG__DOMAIN4_POWER_GATE__SHIFT 0x8
+#define DOMAIN4_PG_CONFIG__DOMAIN4_POWER_FORCEON_MASK 0x00000001L
+#define DOMAIN4_PG_CONFIG__DOMAIN4_POWER_GATE_MASK 0x00000100L
+//DOMAIN4_PG_STATUS
+#define DOMAIN4_PG_STATUS__DOMAIN4_DESIRED_PWR_STATE__SHIFT 0x1c
+#define DOMAIN4_PG_STATUS__DOMAIN4_PGFSM_PWR_STATUS__SHIFT 0x1e
+#define DOMAIN4_PG_STATUS__DOMAIN4_DESIRED_PWR_STATE_MASK 0x10000000L
+#define DOMAIN4_PG_STATUS__DOMAIN4_PGFSM_PWR_STATUS_MASK 0xC0000000L
+//DOMAIN5_PG_CONFIG
+#define DOMAIN5_PG_CONFIG__DOMAIN5_POWER_FORCEON__SHIFT 0x0
+#define DOMAIN5_PG_CONFIG__DOMAIN5_POWER_GATE__SHIFT 0x8
+#define DOMAIN5_PG_CONFIG__DOMAIN5_POWER_FORCEON_MASK 0x00000001L
+#define DOMAIN5_PG_CONFIG__DOMAIN5_POWER_GATE_MASK 0x00000100L
+//DOMAIN5_PG_STATUS
+#define DOMAIN5_PG_STATUS__DOMAIN5_DESIRED_PWR_STATE__SHIFT 0x1c
+#define DOMAIN5_PG_STATUS__DOMAIN5_PGFSM_PWR_STATUS__SHIFT 0x1e
+#define DOMAIN5_PG_STATUS__DOMAIN5_DESIRED_PWR_STATE_MASK 0x10000000L
+#define DOMAIN5_PG_STATUS__DOMAIN5_PGFSM_PWR_STATUS_MASK 0xC0000000L
+//DOMAIN6_PG_CONFIG
+#define DOMAIN6_PG_CONFIG__DOMAIN6_POWER_FORCEON__SHIFT 0x0
+#define DOMAIN6_PG_CONFIG__DOMAIN6_POWER_GATE__SHIFT 0x8
+#define DOMAIN6_PG_CONFIG__DOMAIN6_POWER_FORCEON_MASK 0x00000001L
+#define DOMAIN6_PG_CONFIG__DOMAIN6_POWER_GATE_MASK 0x00000100L
+//DOMAIN6_PG_STATUS
+#define DOMAIN6_PG_STATUS__DOMAIN6_DESIRED_PWR_STATE__SHIFT 0x1c
+#define DOMAIN6_PG_STATUS__DOMAIN6_PGFSM_PWR_STATUS__SHIFT 0x1e
+#define DOMAIN6_PG_STATUS__DOMAIN6_DESIRED_PWR_STATE_MASK 0x10000000L
+#define DOMAIN6_PG_STATUS__DOMAIN6_PGFSM_PWR_STATUS_MASK 0xC0000000L
+//DOMAIN7_PG_CONFIG
+#define DOMAIN7_PG_CONFIG__DOMAIN7_POWER_FORCEON__SHIFT 0x0
+#define DOMAIN7_PG_CONFIG__DOMAIN7_POWER_GATE__SHIFT 0x8
+#define DOMAIN7_PG_CONFIG__DOMAIN7_POWER_FORCEON_MASK 0x00000001L
+#define DOMAIN7_PG_CONFIG__DOMAIN7_POWER_GATE_MASK 0x00000100L
+//DOMAIN7_PG_STATUS
+#define DOMAIN7_PG_STATUS__DOMAIN7_DESIRED_PWR_STATE__SHIFT 0x1c
+#define DOMAIN7_PG_STATUS__DOMAIN7_PGFSM_PWR_STATUS__SHIFT 0x1e
+#define DOMAIN7_PG_STATUS__DOMAIN7_DESIRED_PWR_STATE_MASK 0x10000000L
+#define DOMAIN7_PG_STATUS__DOMAIN7_PGFSM_PWR_STATUS_MASK 0xC0000000L
+//DOMAIN16_PG_CONFIG
+#define DOMAIN16_PG_CONFIG__DOMAIN16_POWER_FORCEON__SHIFT 0x0
+#define DOMAIN16_PG_CONFIG__DOMAIN16_POWER_GATE__SHIFT 0x8
+#define DOMAIN16_PG_CONFIG__DOMAIN16_POWER_FORCEON_MASK 0x00000001L
+#define DOMAIN16_PG_CONFIG__DOMAIN16_POWER_GATE_MASK 0x00000100L
+//DOMAIN16_PG_STATUS
+#define DOMAIN16_PG_STATUS__DOMAIN16_DESIRED_PWR_STATE__SHIFT 0x1c
+#define DOMAIN16_PG_STATUS__DOMAIN16_PGFSM_PWR_STATUS__SHIFT 0x1e
+#define DOMAIN16_PG_STATUS__DOMAIN16_DESIRED_PWR_STATE_MASK 0x10000000L
+#define DOMAIN16_PG_STATUS__DOMAIN16_PGFSM_PWR_STATUS_MASK 0xC0000000L
+//DOMAIN17_PG_CONFIG
+#define DOMAIN17_PG_CONFIG__DOMAIN17_POWER_FORCEON__SHIFT 0x0
+#define DOMAIN17_PG_CONFIG__DOMAIN17_POWER_GATE__SHIFT 0x8
+#define DOMAIN17_PG_CONFIG__DOMAIN17_POWER_FORCEON_MASK 0x00000001L
+#define DOMAIN17_PG_CONFIG__DOMAIN17_POWER_GATE_MASK 0x00000100L
+//DOMAIN17_PG_STATUS
+#define DOMAIN17_PG_STATUS__DOMAIN17_DESIRED_PWR_STATE__SHIFT 0x1c
+#define DOMAIN17_PG_STATUS__DOMAIN17_PGFSM_PWR_STATUS__SHIFT 0x1e
+#define DOMAIN17_PG_STATUS__DOMAIN17_DESIRED_PWR_STATE_MASK 0x10000000L
+#define DOMAIN17_PG_STATUS__DOMAIN17_PGFSM_PWR_STATUS_MASK 0xC0000000L
+//DOMAIN18_PG_CONFIG
+#define DOMAIN18_PG_CONFIG__DOMAIN18_POWER_FORCEON__SHIFT 0x0
+#define DOMAIN18_PG_CONFIG__DOMAIN18_POWER_GATE__SHIFT 0x8
+#define DOMAIN18_PG_CONFIG__DOMAIN18_POWER_FORCEON_MASK 0x00000001L
+#define DOMAIN18_PG_CONFIG__DOMAIN18_POWER_GATE_MASK 0x00000100L
+//DOMAIN18_PG_STATUS
+#define DOMAIN18_PG_STATUS__DOMAIN18_DESIRED_PWR_STATE__SHIFT 0x1c
+#define DOMAIN18_PG_STATUS__DOMAIN18_PGFSM_PWR_STATUS__SHIFT 0x1e
+#define DOMAIN18_PG_STATUS__DOMAIN18_DESIRED_PWR_STATE_MASK 0x10000000L
+#define DOMAIN18_PG_STATUS__DOMAIN18_PGFSM_PWR_STATUS_MASK 0xC0000000L
+//DCPG_INTERRUPT_STATUS
+#define DCPG_INTERRUPT_STATUS__DOMAIN0_POWER_UP_INT_OCCURRED__SHIFT 0x0
+#define DCPG_INTERRUPT_STATUS__DOMAIN0_POWER_DOWN_INT_OCCURRED__SHIFT 0x1
+#define DCPG_INTERRUPT_STATUS__DOMAIN1_POWER_UP_INT_OCCURRED__SHIFT 0x2
+#define DCPG_INTERRUPT_STATUS__DOMAIN1_POWER_DOWN_INT_OCCURRED__SHIFT 0x3
+#define DCPG_INTERRUPT_STATUS__DOMAIN2_POWER_UP_INT_OCCURRED__SHIFT 0x4
+#define DCPG_INTERRUPT_STATUS__DOMAIN2_POWER_DOWN_INT_OCCURRED__SHIFT 0x5
+#define DCPG_INTERRUPT_STATUS__DOMAIN3_POWER_UP_INT_OCCURRED__SHIFT 0x6
+#define DCPG_INTERRUPT_STATUS__DOMAIN3_POWER_DOWN_INT_OCCURRED__SHIFT 0x7
+#define DCPG_INTERRUPT_STATUS__DOMAIN4_POWER_UP_INT_OCCURRED__SHIFT 0x8
+#define DCPG_INTERRUPT_STATUS__DOMAIN4_POWER_DOWN_INT_OCCURRED__SHIFT 0x9
+#define DCPG_INTERRUPT_STATUS__DOMAIN5_POWER_UP_INT_OCCURRED__SHIFT 0xa
+#define DCPG_INTERRUPT_STATUS__DOMAIN5_POWER_DOWN_INT_OCCURRED__SHIFT 0xb
+#define DCPG_INTERRUPT_STATUS__DOMAIN6_POWER_UP_INT_OCCURRED__SHIFT 0xc
+#define DCPG_INTERRUPT_STATUS__DOMAIN6_POWER_DOWN_INT_OCCURRED__SHIFT 0xd
+#define DCPG_INTERRUPT_STATUS__DOMAIN7_POWER_UP_INT_OCCURRED__SHIFT 0xe
+#define DCPG_INTERRUPT_STATUS__DOMAIN7_POWER_DOWN_INT_OCCURRED__SHIFT 0xf
+#define DCPG_INTERRUPT_STATUS__DOMAIN8_POWER_UP_INT_OCCURRED__SHIFT 0x10
+#define DCPG_INTERRUPT_STATUS__DOMAIN8_POWER_DOWN_INT_OCCURRED__SHIFT 0x11
+#define DCPG_INTERRUPT_STATUS__DOMAIN9_POWER_UP_INT_OCCURRED__SHIFT 0x12
+#define DCPG_INTERRUPT_STATUS__DOMAIN9_POWER_DOWN_INT_OCCURRED__SHIFT 0x13
+#define DCPG_INTERRUPT_STATUS__DOMAIN10_POWER_UP_INT_OCCURRED__SHIFT 0x14
+#define DCPG_INTERRUPT_STATUS__DOMAIN10_POWER_DOWN_INT_OCCURRED__SHIFT 0x15
+#define DCPG_INTERRUPT_STATUS__DOMAIN11_POWER_UP_INT_OCCURRED__SHIFT 0x16
+#define DCPG_INTERRUPT_STATUS__DOMAIN11_POWER_DOWN_INT_OCCURRED__SHIFT 0x17
+#define DCPG_INTERRUPT_STATUS__DOMAIN12_POWER_UP_INT_OCCURRED__SHIFT 0x18
+#define DCPG_INTERRUPT_STATUS__DOMAIN12_POWER_DOWN_INT_OCCURRED__SHIFT 0x19
+#define DCPG_INTERRUPT_STATUS__DOMAIN13_POWER_UP_INT_OCCURRED__SHIFT 0x1a
+#define DCPG_INTERRUPT_STATUS__DOMAIN13_POWER_DOWN_INT_OCCURRED__SHIFT 0x1b
+#define DCPG_INTERRUPT_STATUS__DOMAIN14_POWER_UP_INT_OCCURRED__SHIFT 0x1c
+#define DCPG_INTERRUPT_STATUS__DOMAIN14_POWER_DOWN_INT_OCCURRED__SHIFT 0x1d
+#define DCPG_INTERRUPT_STATUS__DOMAIN15_POWER_UP_INT_OCCURRED__SHIFT 0x1e
+#define DCPG_INTERRUPT_STATUS__DOMAIN15_POWER_DOWN_INT_OCCURRED__SHIFT 0x1f
+#define DCPG_INTERRUPT_STATUS__DOMAIN0_POWER_UP_INT_OCCURRED_MASK 0x00000001L
+#define DCPG_INTERRUPT_STATUS__DOMAIN0_POWER_DOWN_INT_OCCURRED_MASK 0x00000002L
+#define DCPG_INTERRUPT_STATUS__DOMAIN1_POWER_UP_INT_OCCURRED_MASK 0x00000004L
+#define DCPG_INTERRUPT_STATUS__DOMAIN1_POWER_DOWN_INT_OCCURRED_MASK 0x00000008L
+#define DCPG_INTERRUPT_STATUS__DOMAIN2_POWER_UP_INT_OCCURRED_MASK 0x00000010L
+#define DCPG_INTERRUPT_STATUS__DOMAIN2_POWER_DOWN_INT_OCCURRED_MASK 0x00000020L
+#define DCPG_INTERRUPT_STATUS__DOMAIN3_POWER_UP_INT_OCCURRED_MASK 0x00000040L
+#define DCPG_INTERRUPT_STATUS__DOMAIN3_POWER_DOWN_INT_OCCURRED_MASK 0x00000080L
+#define DCPG_INTERRUPT_STATUS__DOMAIN4_POWER_UP_INT_OCCURRED_MASK 0x00000100L
+#define DCPG_INTERRUPT_STATUS__DOMAIN4_POWER_DOWN_INT_OCCURRED_MASK 0x00000200L
+#define DCPG_INTERRUPT_STATUS__DOMAIN5_POWER_UP_INT_OCCURRED_MASK 0x00000400L
+#define DCPG_INTERRUPT_STATUS__DOMAIN5_POWER_DOWN_INT_OCCURRED_MASK 0x00000800L
+#define DCPG_INTERRUPT_STATUS__DOMAIN6_POWER_UP_INT_OCCURRED_MASK 0x00001000L
+#define DCPG_INTERRUPT_STATUS__DOMAIN6_POWER_DOWN_INT_OCCURRED_MASK 0x00002000L
+#define DCPG_INTERRUPT_STATUS__DOMAIN7_POWER_UP_INT_OCCURRED_MASK 0x00004000L
+#define DCPG_INTERRUPT_STATUS__DOMAIN7_POWER_DOWN_INT_OCCURRED_MASK 0x00008000L
+#define DCPG_INTERRUPT_STATUS__DOMAIN8_POWER_UP_INT_OCCURRED_MASK 0x00010000L
+#define DCPG_INTERRUPT_STATUS__DOMAIN8_POWER_DOWN_INT_OCCURRED_MASK 0x00020000L
+#define DCPG_INTERRUPT_STATUS__DOMAIN9_POWER_UP_INT_OCCURRED_MASK 0x00040000L
+#define DCPG_INTERRUPT_STATUS__DOMAIN9_POWER_DOWN_INT_OCCURRED_MASK 0x00080000L
+#define DCPG_INTERRUPT_STATUS__DOMAIN10_POWER_UP_INT_OCCURRED_MASK 0x00100000L
+#define DCPG_INTERRUPT_STATUS__DOMAIN10_POWER_DOWN_INT_OCCURRED_MASK 0x00200000L
+#define DCPG_INTERRUPT_STATUS__DOMAIN11_POWER_UP_INT_OCCURRED_MASK 0x00400000L
+#define DCPG_INTERRUPT_STATUS__DOMAIN11_POWER_DOWN_INT_OCCURRED_MASK 0x00800000L
+#define DCPG_INTERRUPT_STATUS__DOMAIN12_POWER_UP_INT_OCCURRED_MASK 0x01000000L
+#define DCPG_INTERRUPT_STATUS__DOMAIN12_POWER_DOWN_INT_OCCURRED_MASK 0x02000000L
+#define DCPG_INTERRUPT_STATUS__DOMAIN13_POWER_UP_INT_OCCURRED_MASK 0x04000000L
+#define DCPG_INTERRUPT_STATUS__DOMAIN13_POWER_DOWN_INT_OCCURRED_MASK 0x08000000L
+#define DCPG_INTERRUPT_STATUS__DOMAIN14_POWER_UP_INT_OCCURRED_MASK 0x10000000L
+#define DCPG_INTERRUPT_STATUS__DOMAIN14_POWER_DOWN_INT_OCCURRED_MASK 0x20000000L
+#define DCPG_INTERRUPT_STATUS__DOMAIN15_POWER_UP_INT_OCCURRED_MASK 0x40000000L
+#define DCPG_INTERRUPT_STATUS__DOMAIN15_POWER_DOWN_INT_OCCURRED_MASK 0x80000000L
+//DCPG_INTERRUPT_STATUS_2
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN16_POWER_UP_INT_OCCURRED__SHIFT 0x0
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN16_POWER_DOWN_INT_OCCURRED__SHIFT 0x1
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN17_POWER_UP_INT_OCCURRED__SHIFT 0x2
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN17_POWER_DOWN_INT_OCCURRED__SHIFT 0x3
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN18_POWER_UP_INT_OCCURRED__SHIFT 0x4
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN18_POWER_DOWN_INT_OCCURRED__SHIFT 0x5
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN19_POWER_UP_INT_OCCURRED__SHIFT 0x6
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN19_POWER_DOWN_INT_OCCURRED__SHIFT 0x7
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN20_POWER_UP_INT_OCCURRED__SHIFT 0x8
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN20_POWER_DOWN_INT_OCCURRED__SHIFT 0x9
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN21_POWER_UP_INT_OCCURRED__SHIFT 0xa
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN21_POWER_DOWN_INT_OCCURRED__SHIFT 0xb
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN16_POWER_UP_INT_OCCURRED_MASK 0x00000001L
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN16_POWER_DOWN_INT_OCCURRED_MASK 0x00000002L
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN17_POWER_UP_INT_OCCURRED_MASK 0x00000004L
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN17_POWER_DOWN_INT_OCCURRED_MASK 0x00000008L
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN18_POWER_UP_INT_OCCURRED_MASK 0x00000010L
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN18_POWER_DOWN_INT_OCCURRED_MASK 0x00000020L
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN19_POWER_UP_INT_OCCURRED_MASK 0x00000040L
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN19_POWER_DOWN_INT_OCCURRED_MASK 0x00000080L
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN20_POWER_UP_INT_OCCURRED_MASK 0x00000100L
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN20_POWER_DOWN_INT_OCCURRED_MASK 0x00000200L
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN21_POWER_UP_INT_OCCURRED_MASK 0x00000400L
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN21_POWER_DOWN_INT_OCCURRED_MASK 0x00000800L
+//DCPG_INTERRUPT_CONTROL_1
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN0_POWER_UP_INT_MASK__SHIFT 0x0
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN0_POWER_UP_INT_CLEAR__SHIFT 0x1
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN0_POWER_DOWN_INT_MASK__SHIFT 0x2
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN0_POWER_DOWN_INT_CLEAR__SHIFT 0x3
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN1_POWER_UP_INT_MASK__SHIFT 0x4
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN1_POWER_UP_INT_CLEAR__SHIFT 0x5
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN1_POWER_DOWN_INT_MASK__SHIFT 0x6
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN1_POWER_DOWN_INT_CLEAR__SHIFT 0x7
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN2_POWER_UP_INT_MASK__SHIFT 0x8
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN2_POWER_UP_INT_CLEAR__SHIFT 0x9
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN2_POWER_DOWN_INT_MASK__SHIFT 0xa
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN2_POWER_DOWN_INT_CLEAR__SHIFT 0xb
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN3_POWER_UP_INT_MASK__SHIFT 0xc
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN3_POWER_UP_INT_CLEAR__SHIFT 0xd
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN3_POWER_DOWN_INT_MASK__SHIFT 0xe
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN3_POWER_DOWN_INT_CLEAR__SHIFT 0xf
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN4_POWER_UP_INT_MASK__SHIFT 0x10
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN4_POWER_UP_INT_CLEAR__SHIFT 0x11
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN4_POWER_DOWN_INT_MASK__SHIFT 0x12
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN4_POWER_DOWN_INT_CLEAR__SHIFT 0x13
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN5_POWER_UP_INT_MASK__SHIFT 0x14
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN5_POWER_UP_INT_CLEAR__SHIFT 0x15
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN5_POWER_DOWN_INT_MASK__SHIFT 0x16
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN5_POWER_DOWN_INT_CLEAR__SHIFT 0x17
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN6_POWER_UP_INT_MASK__SHIFT 0x18
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN6_POWER_UP_INT_CLEAR__SHIFT 0x19
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN6_POWER_DOWN_INT_MASK__SHIFT 0x1a
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN6_POWER_DOWN_INT_CLEAR__SHIFT 0x1b
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN7_POWER_UP_INT_MASK__SHIFT 0x1c
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN7_POWER_UP_INT_CLEAR__SHIFT 0x1d
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN7_POWER_DOWN_INT_MASK__SHIFT 0x1e
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN7_POWER_DOWN_INT_CLEAR__SHIFT 0x1f
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN0_POWER_UP_INT_MASK_MASK 0x00000001L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN0_POWER_UP_INT_CLEAR_MASK 0x00000002L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN0_POWER_DOWN_INT_MASK_MASK 0x00000004L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN0_POWER_DOWN_INT_CLEAR_MASK 0x00000008L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN1_POWER_UP_INT_MASK_MASK 0x00000010L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN1_POWER_UP_INT_CLEAR_MASK 0x00000020L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN1_POWER_DOWN_INT_MASK_MASK 0x00000040L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN1_POWER_DOWN_INT_CLEAR_MASK 0x00000080L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN2_POWER_UP_INT_MASK_MASK 0x00000100L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN2_POWER_UP_INT_CLEAR_MASK 0x00000200L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN2_POWER_DOWN_INT_MASK_MASK 0x00000400L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN2_POWER_DOWN_INT_CLEAR_MASK 0x00000800L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN3_POWER_UP_INT_MASK_MASK 0x00001000L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN3_POWER_UP_INT_CLEAR_MASK 0x00002000L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN3_POWER_DOWN_INT_MASK_MASK 0x00004000L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN3_POWER_DOWN_INT_CLEAR_MASK 0x00008000L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN4_POWER_UP_INT_MASK_MASK 0x00010000L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN4_POWER_UP_INT_CLEAR_MASK 0x00020000L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN4_POWER_DOWN_INT_MASK_MASK 0x00040000L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN4_POWER_DOWN_INT_CLEAR_MASK 0x00080000L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN5_POWER_UP_INT_MASK_MASK 0x00100000L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN5_POWER_UP_INT_CLEAR_MASK 0x00200000L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN5_POWER_DOWN_INT_MASK_MASK 0x00400000L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN5_POWER_DOWN_INT_CLEAR_MASK 0x00800000L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN6_POWER_UP_INT_MASK_MASK 0x01000000L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN6_POWER_UP_INT_CLEAR_MASK 0x02000000L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN6_POWER_DOWN_INT_MASK_MASK 0x04000000L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN6_POWER_DOWN_INT_CLEAR_MASK 0x08000000L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN7_POWER_UP_INT_MASK_MASK 0x10000000L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN7_POWER_UP_INT_CLEAR_MASK 0x20000000L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN7_POWER_DOWN_INT_MASK_MASK 0x40000000L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN7_POWER_DOWN_INT_CLEAR_MASK 0x80000000L
+//DCPG_INTERRUPT_CONTROL_2
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN8_POWER_UP_INT_MASK__SHIFT 0x0
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN8_POWER_UP_INT_CLEAR__SHIFT 0x1
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN8_POWER_DOWN_INT_MASK__SHIFT 0x2
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN8_POWER_DOWN_INT_CLEAR__SHIFT 0x3
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN9_POWER_UP_INT_MASK__SHIFT 0x4
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN9_POWER_UP_INT_CLEAR__SHIFT 0x5
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN9_POWER_DOWN_INT_MASK__SHIFT 0x6
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN9_POWER_DOWN_INT_CLEAR__SHIFT 0x7
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN10_POWER_UP_INT_MASK__SHIFT 0x8
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN10_POWER_UP_INT_CLEAR__SHIFT 0x9
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN10_POWER_DOWN_INT_MASK__SHIFT 0xa
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN10_POWER_DOWN_INT_CLEAR__SHIFT 0xb
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN11_POWER_UP_INT_MASK__SHIFT 0xc
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN11_POWER_UP_INT_CLEAR__SHIFT 0xd
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN11_POWER_DOWN_INT_MASK__SHIFT 0xe
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN11_POWER_DOWN_INT_CLEAR__SHIFT 0xf
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN12_POWER_UP_INT_MASK__SHIFT 0x10
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN12_POWER_UP_INT_CLEAR__SHIFT 0x11
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN12_POWER_DOWN_INT_MASK__SHIFT 0x12
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN12_POWER_DOWN_INT_CLEAR__SHIFT 0x13
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN13_POWER_UP_INT_MASK__SHIFT 0x14
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN13_POWER_UP_INT_CLEAR__SHIFT 0x15
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN13_POWER_DOWN_INT_MASK__SHIFT 0x16
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN13_POWER_DOWN_INT_CLEAR__SHIFT 0x17
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN14_POWER_UP_INT_MASK__SHIFT 0x18
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN14_POWER_UP_INT_CLEAR__SHIFT 0x19
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN14_POWER_DOWN_INT_MASK__SHIFT 0x1a
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN14_POWER_DOWN_INT_CLEAR__SHIFT 0x1b
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN15_POWER_UP_INT_MASK__SHIFT 0x1c
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN15_POWER_UP_INT_CLEAR__SHIFT 0x1d
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN15_POWER_DOWN_INT_MASK__SHIFT 0x1e
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN15_POWER_DOWN_INT_CLEAR__SHIFT 0x1f
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN8_POWER_UP_INT_MASK_MASK 0x00000001L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN8_POWER_UP_INT_CLEAR_MASK 0x00000002L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN8_POWER_DOWN_INT_MASK_MASK 0x00000004L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN8_POWER_DOWN_INT_CLEAR_MASK 0x00000008L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN9_POWER_UP_INT_MASK_MASK 0x00000010L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN9_POWER_UP_INT_CLEAR_MASK 0x00000020L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN9_POWER_DOWN_INT_MASK_MASK 0x00000040L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN9_POWER_DOWN_INT_CLEAR_MASK 0x00000080L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN10_POWER_UP_INT_MASK_MASK 0x00000100L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN10_POWER_UP_INT_CLEAR_MASK 0x00000200L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN10_POWER_DOWN_INT_MASK_MASK 0x00000400L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN10_POWER_DOWN_INT_CLEAR_MASK 0x00000800L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN11_POWER_UP_INT_MASK_MASK 0x00001000L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN11_POWER_UP_INT_CLEAR_MASK 0x00002000L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN11_POWER_DOWN_INT_MASK_MASK 0x00004000L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN11_POWER_DOWN_INT_CLEAR_MASK 0x00008000L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN12_POWER_UP_INT_MASK_MASK 0x00010000L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN12_POWER_UP_INT_CLEAR_MASK 0x00020000L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN12_POWER_DOWN_INT_MASK_MASK 0x00040000L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN12_POWER_DOWN_INT_CLEAR_MASK 0x00080000L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN13_POWER_UP_INT_MASK_MASK 0x00100000L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN13_POWER_UP_INT_CLEAR_MASK 0x00200000L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN13_POWER_DOWN_INT_MASK_MASK 0x00400000L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN13_POWER_DOWN_INT_CLEAR_MASK 0x00800000L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN14_POWER_UP_INT_MASK_MASK 0x01000000L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN14_POWER_UP_INT_CLEAR_MASK 0x02000000L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN14_POWER_DOWN_INT_MASK_MASK 0x04000000L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN14_POWER_DOWN_INT_CLEAR_MASK 0x08000000L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN15_POWER_UP_INT_MASK_MASK 0x10000000L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN15_POWER_UP_INT_CLEAR_MASK 0x20000000L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN15_POWER_DOWN_INT_MASK_MASK 0x40000000L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN15_POWER_DOWN_INT_CLEAR_MASK 0x80000000L
+//DCPG_INTERRUPT_CONTROL_3
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN16_POWER_UP_INT_MASK__SHIFT 0x0
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN16_POWER_UP_INT_CLEAR__SHIFT 0x1
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN16_POWER_DOWN_INT_MASK__SHIFT 0x2
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN16_POWER_DOWN_INT_CLEAR__SHIFT 0x3
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN17_POWER_UP_INT_MASK__SHIFT 0x4
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN17_POWER_UP_INT_CLEAR__SHIFT 0x5
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN17_POWER_DOWN_INT_MASK__SHIFT 0x6
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN17_POWER_DOWN_INT_CLEAR__SHIFT 0x7
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN18_POWER_UP_INT_MASK__SHIFT 0x8
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN18_POWER_UP_INT_CLEAR__SHIFT 0x9
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN18_POWER_DOWN_INT_MASK__SHIFT 0xa
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN18_POWER_DOWN_INT_CLEAR__SHIFT 0xb
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN19_POWER_UP_INT_MASK__SHIFT 0xc
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN19_POWER_UP_INT_CLEAR__SHIFT 0xd
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN19_POWER_DOWN_INT_MASK__SHIFT 0xe
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN19_POWER_DOWN_INT_CLEAR__SHIFT 0xf
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN20_POWER_UP_INT_MASK__SHIFT 0x10
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN20_POWER_UP_INT_CLEAR__SHIFT 0x11
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN20_POWER_DOWN_INT_MASK__SHIFT 0x12
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN20_POWER_DOWN_INT_CLEAR__SHIFT 0x13
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN21_POWER_UP_INT_MASK__SHIFT 0x14
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN21_POWER_UP_INT_CLEAR__SHIFT 0x15
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN21_POWER_DOWN_INT_MASK__SHIFT 0x16
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN21_POWER_DOWN_INT_CLEAR__SHIFT 0x17
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN16_POWER_UP_INT_MASK_MASK 0x00000001L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN16_POWER_UP_INT_CLEAR_MASK 0x00000002L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN16_POWER_DOWN_INT_MASK_MASK 0x00000004L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN16_POWER_DOWN_INT_CLEAR_MASK 0x00000008L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN17_POWER_UP_INT_MASK_MASK 0x00000010L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN17_POWER_UP_INT_CLEAR_MASK 0x00000020L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN17_POWER_DOWN_INT_MASK_MASK 0x00000040L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN17_POWER_DOWN_INT_CLEAR_MASK 0x00000080L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN18_POWER_UP_INT_MASK_MASK 0x00000100L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN18_POWER_UP_INT_CLEAR_MASK 0x00000200L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN18_POWER_DOWN_INT_MASK_MASK 0x00000400L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN18_POWER_DOWN_INT_CLEAR_MASK 0x00000800L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN19_POWER_UP_INT_MASK_MASK 0x00001000L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN19_POWER_UP_INT_CLEAR_MASK 0x00002000L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN19_POWER_DOWN_INT_MASK_MASK 0x00004000L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN19_POWER_DOWN_INT_CLEAR_MASK 0x00008000L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN20_POWER_UP_INT_MASK_MASK 0x00010000L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN20_POWER_UP_INT_CLEAR_MASK 0x00020000L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN20_POWER_DOWN_INT_MASK_MASK 0x00040000L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN20_POWER_DOWN_INT_CLEAR_MASK 0x00080000L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN21_POWER_UP_INT_MASK_MASK 0x00100000L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN21_POWER_UP_INT_CLEAR_MASK 0x00200000L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN21_POWER_DOWN_INT_MASK_MASK 0x00400000L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN21_POWER_DOWN_INT_CLEAR_MASK 0x00800000L
+//DC_IP_REQUEST_CNTL
+#define DC_IP_REQUEST_CNTL__IP_REQUEST_EN__SHIFT 0x0
+#define DC_IP_REQUEST_CNTL__IP_REQUEST_EN_MASK 0x00000001L
+
+
+// addressBlock: dce_dc_dmu_dmu_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON2_PERFCOUNTER_CNTL
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON2_PERFCOUNTER_CNTL2
+#define DC_PERFMON2_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON2_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON2_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON2_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON2_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON2_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON2_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON2_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON2_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON2_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON2_PERFCOUNTER_STATE
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON2_PERFMON_CNTL
+#define DC_PERFMON2_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON2_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON2_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON2_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON2_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON2_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON2_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON2_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON2_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON2_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON2_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON2_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON2_PERFMON_CNTL2
+#define DC_PERFMON2_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON2_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON2_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON2_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON2_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON2_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON2_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON2_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON2_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON2_PERFMON_CVALUE_LOW
+#define DC_PERFMON2_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON2_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON2_PERFMON_HI
+#define DC_PERFMON2_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON2_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON2_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON2_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON2_PERFMON_LOW
+#define DC_PERFMON2_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON2_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dmu_dmu_misc_dispdec
+//CC_DC_PIPE_DIS
+#define CC_DC_PIPE_DIS__DC_PIPE_DIS__SHIFT 0x0
+#define CC_DC_PIPE_DIS__DC_DMCUB_ENABLE__SHIFT 0x10
+#define CC_DC_PIPE_DIS__DC_PIPE_DIS_MASK 0x000000FFL
+#define CC_DC_PIPE_DIS__DC_DMCUB_ENABLE_MASK 0x00010000L
+//DMU_CLK_CNTL
+#define DMU_CLK_CNTL__DMU_TEST_CLK_SEL__SHIFT 0x0
+#define DMU_CLK_CNTL__DISPCLK_R_DMU_GATE_DIS__SHIFT 0x4
+#define DMU_CLK_CNTL__DISPCLK_G_DMCU_GATE_DIS__SHIFT 0x5
+#define DMU_CLK_CNTL__DISPCLK_G_RBBMIF_GATE_DIS__SHIFT 0x6
+#define DMU_CLK_CNTL__DISPCLK_R_CLOCK_ON__SHIFT 0x8
+#define DMU_CLK_CNTL__DISPCLK_G_DMCU_CLOCK_ON__SHIFT 0x9
+#define DMU_CLK_CNTL__DISPCLK_G_RBBMIF_CLOCK_ON__SHIFT 0xa
+#define DMU_CLK_CNTL__DMU_TEST_CLK_SEL_MASK 0x0000000FL
+#define DMU_CLK_CNTL__DISPCLK_R_DMU_GATE_DIS_MASK 0x00000010L
+#define DMU_CLK_CNTL__DISPCLK_G_DMCU_GATE_DIS_MASK 0x00000020L
+#define DMU_CLK_CNTL__DISPCLK_G_RBBMIF_GATE_DIS_MASK 0x00000040L
+#define DMU_CLK_CNTL__DISPCLK_R_CLOCK_ON_MASK 0x00000100L
+#define DMU_CLK_CNTL__DISPCLK_G_DMCU_CLOCK_ON_MASK 0x00000200L
+#define DMU_CLK_CNTL__DISPCLK_G_RBBMIF_CLOCK_ON_MASK 0x00000400L
+//DMU_MEM_PWR_CNTL
+#define DMU_MEM_PWR_CNTL__DMCU_ERAM_MEM_PWR_MODE_SEL__SHIFT 0x0
+#define DMU_MEM_PWR_CNTL__DMCU_ERAM_MEM_PWR_FORCE__SHIFT 0x1
+#define DMU_MEM_PWR_CNTL__DMCU_ERAM_MEM_PWR_DIS__SHIFT 0x3
+#define DMU_MEM_PWR_CNTL__DMCU_ERAM_MEM_PWR_STATE__SHIFT 0x4
+#define DMU_MEM_PWR_CNTL__DMCU_IRAM_MEM_PWR_FORCE__SHIFT 0x8
+#define DMU_MEM_PWR_CNTL__DMCU_IRAM_MEM_PWR_DIS__SHIFT 0x9
+#define DMU_MEM_PWR_CNTL__DMCU_IRAM_MEM_PWR_STATE__SHIFT 0xa
+#define DMU_MEM_PWR_CNTL__DMCU_ERAM_MEM_PWR_MODE_SEL_MASK 0x00000001L
+#define DMU_MEM_PWR_CNTL__DMCU_ERAM_MEM_PWR_FORCE_MASK 0x00000006L
+#define DMU_MEM_PWR_CNTL__DMCU_ERAM_MEM_PWR_DIS_MASK 0x00000008L
+#define DMU_MEM_PWR_CNTL__DMCU_ERAM_MEM_PWR_STATE_MASK 0x00000030L
+#define DMU_MEM_PWR_CNTL__DMCU_IRAM_MEM_PWR_FORCE_MASK 0x00000100L
+#define DMU_MEM_PWR_CNTL__DMCU_IRAM_MEM_PWR_DIS_MASK 0x00000200L
+#define DMU_MEM_PWR_CNTL__DMCU_IRAM_MEM_PWR_STATE_MASK 0x00000400L
+//DMCU_SMU_INTERRUPT_CNTL
+#define DMCU_SMU_INTERRUPT_CNTL__DMCU_SMU_STATIC_SCREEN_INT__SHIFT 0x0
+#define DMCU_SMU_INTERRUPT_CNTL__DMCU_SMU_STATIC_SCREEN_STATUS__SHIFT 0x10
+#define DMCU_SMU_INTERRUPT_CNTL__DMCU_SMU_STATIC_SCREEN_INT_MASK 0x00000001L
+#define DMCU_SMU_INTERRUPT_CNTL__DMCU_SMU_STATIC_SCREEN_STATUS_MASK 0xFFFF0000L
+//SMU_INTERRUPT_CONTROL
+#define SMU_INTERRUPT_CONTROL__DC_SMU_INT_ENABLE__SHIFT 0x0
+#define SMU_INTERRUPT_CONTROL__DC_SMU_INT_STATUS__SHIFT 0x4
+#define SMU_INTERRUPT_CONTROL__DC_SMU_INT_EVENT__SHIFT 0x10
+#define SMU_INTERRUPT_CONTROL__DC_SMU_INT_ENABLE_MASK 0x00000001L
+#define SMU_INTERRUPT_CONTROL__DC_SMU_INT_STATUS_MASK 0x00000010L
+#define SMU_INTERRUPT_CONTROL__DC_SMU_INT_EVENT_MASK 0xFFFF0000L
+//DMU_MISC_ALLOW_DS_FORCE
+#define DMU_MISC_ALLOW_DS_FORCE__DMU_MISC_ALLOW_DS_FORCE_EN__SHIFT 0x0
+#define DMU_MISC_ALLOW_DS_FORCE__DMU_MISC_ALLOW_DS_FORCE_VALUE__SHIFT 0x4
+#define DMU_MISC_ALLOW_DS_FORCE__DMU_MISC_ALLOW_DS_FORCE_EN_MASK 0x00000001L
+#define DMU_MISC_ALLOW_DS_FORCE__DMU_MISC_ALLOW_DS_FORCE_VALUE_MASK 0x00000010L
+
+
+// addressBlock: dce_dc_dmu_dmcu_dispdec
+//DMCU_CTRL
+#define DMCU_CTRL__RESET_UC__SHIFT 0x0
+#define DMCU_CTRL__IGNORE_PWRMGT__SHIFT 0x1
+#define DMCU_CTRL__DISABLE_IRQ_TO_UC__SHIFT 0x2
+#define DMCU_CTRL__DISABLE_XIRQ_TO_UC__SHIFT 0x3
+#define DMCU_CTRL__DMCU_ENABLE__SHIFT 0x4
+#define DMCU_CTRL__DMCU_DYN_CLK_GATING_EN__SHIFT 0x8
+#define DMCU_CTRL__UC_REG_RD_TIMEOUT__SHIFT 0x10
+#define DMCU_CTRL__RESET_UC_MASK 0x00000001L
+#define DMCU_CTRL__IGNORE_PWRMGT_MASK 0x00000002L
+#define DMCU_CTRL__DISABLE_IRQ_TO_UC_MASK 0x00000004L
+#define DMCU_CTRL__DISABLE_XIRQ_TO_UC_MASK 0x00000008L
+#define DMCU_CTRL__DMCU_ENABLE_MASK 0x00000010L
+#define DMCU_CTRL__DMCU_DYN_CLK_GATING_EN_MASK 0x00000100L
+#define DMCU_CTRL__UC_REG_RD_TIMEOUT_MASK 0xFFFF0000L
+//DMCU_STATUS
+#define DMCU_STATUS__UC_IN_RESET__SHIFT 0x0
+#define DMCU_STATUS__UC_IN_WAIT_MODE__SHIFT 0x1
+#define DMCU_STATUS__UC_IN_STOP_MODE__SHIFT 0x2
+#define DMCU_STATUS__UC_IN_RESET_MASK 0x00000001L
+#define DMCU_STATUS__UC_IN_WAIT_MODE_MASK 0x00000002L
+#define DMCU_STATUS__UC_IN_STOP_MODE_MASK 0x00000004L
+//DMCU_PC_START_ADDR
+#define DMCU_PC_START_ADDR__PC_START_ADDR_LSB__SHIFT 0x0
+#define DMCU_PC_START_ADDR__PC_START_ADDR_MSB__SHIFT 0x8
+#define DMCU_PC_START_ADDR__PC_START_ADDR_LSB_MASK 0x000000FFL
+#define DMCU_PC_START_ADDR__PC_START_ADDR_MSB_MASK 0x0000FF00L
+//DMCU_FW_START_ADDR
+#define DMCU_FW_START_ADDR__FW_START_ADDR_LSB__SHIFT 0x0
+#define DMCU_FW_START_ADDR__FW_START_ADDR_MSB__SHIFT 0x8
+#define DMCU_FW_START_ADDR__FW_START_ADDR_LSB_MASK 0x000000FFL
+#define DMCU_FW_START_ADDR__FW_START_ADDR_MSB_MASK 0x0000FF00L
+//DMCU_FW_END_ADDR
+#define DMCU_FW_END_ADDR__FW_END_ADDR_LSB__SHIFT 0x0
+#define DMCU_FW_END_ADDR__FW_END_ADDR_MSB__SHIFT 0x8
+#define DMCU_FW_END_ADDR__FW_END_ADDR_LSB_MASK 0x000000FFL
+#define DMCU_FW_END_ADDR__FW_END_ADDR_MSB_MASK 0x0000FF00L
+//DMCU_FW_ISR_START_ADDR
+#define DMCU_FW_ISR_START_ADDR__FW_ISR_START_ADDR_LSB__SHIFT 0x0
+#define DMCU_FW_ISR_START_ADDR__FW_ISR_START_ADDR_MSB__SHIFT 0x8
+#define DMCU_FW_ISR_START_ADDR__FW_ISR_START_ADDR_LSB_MASK 0x000000FFL
+#define DMCU_FW_ISR_START_ADDR__FW_ISR_START_ADDR_MSB_MASK 0x0000FF00L
+//DMCU_FW_CS_HI
+#define DMCU_FW_CS_HI__FW_CHECKSUM_HI__SHIFT 0x0
+#define DMCU_FW_CS_HI__FW_CHECKSUM_HI_MASK 0xFFFFFFFFL
+//DMCU_FW_CS_LO
+#define DMCU_FW_CS_LO__FW_CHECKSUM_LO__SHIFT 0x0
+#define DMCU_FW_CS_LO__FW_CHECKSUM_LO_MASK 0xFFFFFFFFL
+//DMCU_RAM_ACCESS_CTRL
+#define DMCU_RAM_ACCESS_CTRL__ERAM_WR_ADDR_AUTO_INC__SHIFT 0x0
+#define DMCU_RAM_ACCESS_CTRL__ERAM_RD_ADDR_AUTO_INC__SHIFT 0x1
+#define DMCU_RAM_ACCESS_CTRL__IRAM_WR_ADDR_AUTO_INC__SHIFT 0x2
+#define DMCU_RAM_ACCESS_CTRL__IRAM_RD_ADDR_AUTO_INC__SHIFT 0x3
+#define DMCU_RAM_ACCESS_CTRL__ERAM_HOST_ACCESS_EN__SHIFT 0x4
+#define DMCU_RAM_ACCESS_CTRL__IRAM_HOST_ACCESS_EN__SHIFT 0x5
+#define DMCU_RAM_ACCESS_CTRL__ERAM_WR_ADDR_AUTO_INC_MASK 0x00000001L
+#define DMCU_RAM_ACCESS_CTRL__ERAM_RD_ADDR_AUTO_INC_MASK 0x00000002L
+#define DMCU_RAM_ACCESS_CTRL__IRAM_WR_ADDR_AUTO_INC_MASK 0x00000004L
+#define DMCU_RAM_ACCESS_CTRL__IRAM_RD_ADDR_AUTO_INC_MASK 0x00000008L
+#define DMCU_RAM_ACCESS_CTRL__ERAM_HOST_ACCESS_EN_MASK 0x00000010L
+#define DMCU_RAM_ACCESS_CTRL__IRAM_HOST_ACCESS_EN_MASK 0x00000020L
+//DMCU_ERAM_WR_CTRL
+#define DMCU_ERAM_WR_CTRL__ERAM_WR_ADDR__SHIFT 0x0
+#define DMCU_ERAM_WR_CTRL__ERAM_WR_BE__SHIFT 0x10
+#define DMCU_ERAM_WR_CTRL__ERAM_WR_BYTE_MODE__SHIFT 0x14
+#define DMCU_ERAM_WR_CTRL__ERAM_WR_ADDR_MASK 0x0000FFFFL
+#define DMCU_ERAM_WR_CTRL__ERAM_WR_BE_MASK 0x000F0000L
+#define DMCU_ERAM_WR_CTRL__ERAM_WR_BYTE_MODE_MASK 0x00100000L
+//DMCU_ERAM_WR_DATA
+#define DMCU_ERAM_WR_DATA__ERAM_WR_DATA__SHIFT 0x0
+#define DMCU_ERAM_WR_DATA__ERAM_WR_DATA_MASK 0xFFFFFFFFL
+//DMCU_ERAM_RD_CTRL
+#define DMCU_ERAM_RD_CTRL__ERAM_RD_ADDR__SHIFT 0x0
+#define DMCU_ERAM_RD_CTRL__ERAM_RD_BE__SHIFT 0x10
+#define DMCU_ERAM_RD_CTRL__ERAM_RD_BYTE_MODE__SHIFT 0x14
+#define DMCU_ERAM_RD_CTRL__ERAM_RD_ADDR_MASK 0x0000FFFFL
+#define DMCU_ERAM_RD_CTRL__ERAM_RD_BE_MASK 0x000F0000L
+#define DMCU_ERAM_RD_CTRL__ERAM_RD_BYTE_MODE_MASK 0x00100000L
+//DMCU_ERAM_RD_DATA
+#define DMCU_ERAM_RD_DATA__ERAM_RD_DATA__SHIFT 0x0
+#define DMCU_ERAM_RD_DATA__ERAM_RD_DATA_MASK 0xFFFFFFFFL
+//DMCU_IRAM_WR_CTRL
+#define DMCU_IRAM_WR_CTRL__IRAM_WR_ADDR__SHIFT 0x0
+#define DMCU_IRAM_WR_CTRL__IRAM_WR_ADDR_MASK 0x000003FFL
+//DMCU_IRAM_WR_DATA
+#define DMCU_IRAM_WR_DATA__IRAM_WR_DATA__SHIFT 0x0
+#define DMCU_IRAM_WR_DATA__IRAM_WR_DATA_MASK 0x000000FFL
+//DMCU_IRAM_RD_CTRL
+#define DMCU_IRAM_RD_CTRL__IRAM_RD_ADDR__SHIFT 0x0
+#define DMCU_IRAM_RD_CTRL__IRAM_RD_ADDR_MASK 0x000003FFL
+//DMCU_IRAM_RD_DATA
+#define DMCU_IRAM_RD_DATA__IRAM_RD_DATA__SHIFT 0x0
+#define DMCU_IRAM_RD_DATA__IRAM_RD_DATA_MASK 0x000000FFL
+//DMCU_EVENT_TRIGGER
+#define DMCU_EVENT_TRIGGER__GEN_SW_INT_TO_UC__SHIFT 0x0
+#define DMCU_EVENT_TRIGGER__UC_INTERNAL_INT_CODE__SHIFT 0x10
+#define DMCU_EVENT_TRIGGER__GEN_UC_INTERNAL_INT_TO_HOST__SHIFT 0x17
+#define DMCU_EVENT_TRIGGER__GEN_SW_INT_TO_UC_MASK 0x00000001L
+#define DMCU_EVENT_TRIGGER__UC_INTERNAL_INT_CODE_MASK 0x007F0000L
+#define DMCU_EVENT_TRIGGER__GEN_UC_INTERNAL_INT_TO_HOST_MASK 0x00800000L
+//DMCU_UC_INTERNAL_INT_STATUS
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_IRQ_N_PIN__SHIFT 0x0
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_XIRQ_N_PIN__SHIFT 0x1
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_SOFTWARE_INTERRUPT__SHIFT 0x2
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_ILLEGAL_OPCODE_TRAP__SHIFT 0x3
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_OUTPUT_COMPARE_4__SHIFT 0x4
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_OUTPUT_COMPARE_3__SHIFT 0x5
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_OUTPUT_COMPARE_2__SHIFT 0x6
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_OUTPUT_COMPARE_1__SHIFT 0x7
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_OVERFLOW__SHIFT 0x8
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_REAL_TIME_INTERRUPT__SHIFT 0x9
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_INPUT_CAPTURE_4_OUTPUT_COMPARE_5__SHIFT 0xa
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_INPUT_CAPTURE_3__SHIFT 0xb
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_INPUT_CAPTURE_2__SHIFT 0xc
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_INPUT_CAPTURE_1__SHIFT 0xd
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_PULSE_ACCUMULATOR_INPUT_EDGE__SHIFT 0xe
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_PULSE_ACCUMULATOR_OVERFLOW__SHIFT 0xf
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_IRQ_N_PIN_MASK 0x00000001L
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_XIRQ_N_PIN_MASK 0x00000002L
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_SOFTWARE_INTERRUPT_MASK 0x00000004L
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_ILLEGAL_OPCODE_TRAP_MASK 0x00000008L
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_OUTPUT_COMPARE_4_MASK 0x00000010L
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_OUTPUT_COMPARE_3_MASK 0x00000020L
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_OUTPUT_COMPARE_2_MASK 0x00000040L
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_OUTPUT_COMPARE_1_MASK 0x00000080L
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_OVERFLOW_MASK 0x00000100L
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_REAL_TIME_INTERRUPT_MASK 0x00000200L
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_INPUT_CAPTURE_4_OUTPUT_COMPARE_5_MASK 0x00000400L
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_INPUT_CAPTURE_3_MASK 0x00000800L
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_INPUT_CAPTURE_2_MASK 0x00001000L
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_INPUT_CAPTURE_1_MASK 0x00002000L
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_PULSE_ACCUMULATOR_INPUT_EDGE_MASK 0x00004000L
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_PULSE_ACCUMULATOR_OVERFLOW_MASK 0x00008000L
+//DMCU_SS_INTERRUPT_CNTL_STATUS
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN1_INT_STATUS__SHIFT 0xd
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN1_INT_OCCURRED__SHIFT 0xe
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN1_INT_CLEAR__SHIFT 0xe
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN2_INT_STATUS__SHIFT 0xf
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN2_INT_OCCURRED__SHIFT 0x10
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN2_INT_CLEAR__SHIFT 0x10
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN3_INT_STATUS__SHIFT 0x11
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN3_INT_OCCURRED__SHIFT 0x12
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN3_INT_CLEAR__SHIFT 0x12
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN4_INT_STATUS__SHIFT 0x13
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN4_INT_OCCURRED__SHIFT 0x14
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN4_INT_CLEAR__SHIFT 0x14
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN5_INT_STATUS__SHIFT 0x15
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN5_INT_OCCURRED__SHIFT 0x16
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN5_INT_CLEAR__SHIFT 0x16
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN6_INT_STATUS__SHIFT 0x17
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN6_INT_OCCURRED__SHIFT 0x18
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN6_INT_CLEAR__SHIFT 0x18
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN1_INT_STATUS_MASK 0x00002000L
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN1_INT_OCCURRED_MASK 0x00004000L
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN1_INT_CLEAR_MASK 0x00004000L
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN2_INT_STATUS_MASK 0x00008000L
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN2_INT_OCCURRED_MASK 0x00010000L
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN2_INT_CLEAR_MASK 0x00010000L
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN3_INT_STATUS_MASK 0x00020000L
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN3_INT_OCCURRED_MASK 0x00040000L
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN3_INT_CLEAR_MASK 0x00040000L
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN4_INT_STATUS_MASK 0x00080000L
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN4_INT_OCCURRED_MASK 0x00100000L
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN4_INT_CLEAR_MASK 0x00100000L
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN5_INT_STATUS_MASK 0x00200000L
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN5_INT_OCCURRED_MASK 0x00400000L
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN5_INT_CLEAR_MASK 0x00400000L
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN6_INT_STATUS_MASK 0x00800000L
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN6_INT_OCCURRED_MASK 0x01000000L
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN6_INT_CLEAR_MASK 0x01000000L
+//DMCU_INTERRUPT_STATUS
+#define DMCU_INTERRUPT_STATUS__ABM1_HG_READY_INT_OCCURRED__SHIFT 0x0
+#define DMCU_INTERRUPT_STATUS__ABM1_HG_READY_INT_CLEAR__SHIFT 0x0
+#define DMCU_INTERRUPT_STATUS__ABM1_LS_READY_INT_OCCURRED__SHIFT 0x1
+#define DMCU_INTERRUPT_STATUS__ABM1_LS_READY_INT_CLEAR__SHIFT 0x1
+#define DMCU_INTERRUPT_STATUS__ABM1_BL_UPDATE_INT_OCCURRED__SHIFT 0x2
+#define DMCU_INTERRUPT_STATUS__ABM1_BL_UPDATE_INT_CLEAR__SHIFT 0x2
+#define DMCU_INTERRUPT_STATUS__MCP_INT_OCCURRED__SHIFT 0x3
+#define DMCU_INTERRUPT_STATUS__EXTERNAL_SW_INT_OCCURRED__SHIFT 0x8
+#define DMCU_INTERRUPT_STATUS__EXTERNAL_SW_INT_CLEAR__SHIFT 0x8
+#define DMCU_INTERRUPT_STATUS__SCP_INT_OCCURRED__SHIFT 0x9
+#define DMCU_INTERRUPT_STATUS__UC_INTERNAL_INT_OCCURRED__SHIFT 0xa
+#define DMCU_INTERRUPT_STATUS__UC_INTERNAL_INT_CLEAR__SHIFT 0xa
+#define DMCU_INTERRUPT_STATUS__UC_REG_RD_TIMEOUT_INT_OCCURRED__SHIFT 0xb
+#define DMCU_INTERRUPT_STATUS__UC_REG_RD_TIMEOUT_INT_CLEAR__SHIFT 0xb
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DOMAIN0_POWER_UP_INT_OCCURRED__SHIFT 0xc
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DOMAIN0_POWER_UP_INT_CLEAR__SHIFT 0xc
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DOMAIN1_POWER_UP_INT_OCCURRED__SHIFT 0xd
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DOMAIN1_POWER_UP_INT_CLEAR__SHIFT 0xd
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DOMAIN2_POWER_UP_INT_OCCURRED__SHIFT 0xe
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DOMAIN2_POWER_UP_INT_CLEAR__SHIFT 0xe
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DOMAIN3_POWER_UP_INT_OCCURRED__SHIFT 0xf
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DOMAIN3_POWER_UP_INT_CLEAR__SHIFT 0xf
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DOMAIN4_POWER_UP_INT_OCCURRED__SHIFT 0x10
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DOMAIN4_POWER_UP_INT_CLEAR__SHIFT 0x10
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DOMAIN5_POWER_UP_INT_OCCURRED__SHIFT 0x11
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DOMAIN5_POWER_UP_INT_CLEAR__SHIFT 0x11
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DOMAIN0_POWER_DOWN_INT_OCCURRED__SHIFT 0x12
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DOMAIN0_POWER_DOWN_INT_CLEAR__SHIFT 0x12
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DOMAIN1_POWER_DOWN_INT_OCCURRED__SHIFT 0x13
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DOMAIN1_POWER_DOWN_INT_CLEAR__SHIFT 0x13
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DOMAIN2_POWER_DOWN_INT_OCCURRED__SHIFT 0x14
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DOMAIN2_POWER_DOWN_INT_CLEAR__SHIFT 0x14
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DOMAIN3_POWER_DOWN_INT_OCCURRED__SHIFT 0x15
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DOMAIN3_POWER_DOWN_INT_CLEAR__SHIFT 0x15
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DOMAIN4_POWER_DOWN_INT_OCCURRED__SHIFT 0x16
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DOMAIN4_POWER_DOWN_INT_CLEAR__SHIFT 0x16
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DOMAIN5_POWER_DOWN_INT_OCCURRED__SHIFT 0x17
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DOMAIN5_POWER_DOWN_INT_CLEAR__SHIFT 0x17
+#define DMCU_INTERRUPT_STATUS__VBLANK1_INT_OCCURRED__SHIFT 0x18
+#define DMCU_INTERRUPT_STATUS__VBLANK1_INT_CLEAR__SHIFT 0x18
+#define DMCU_INTERRUPT_STATUS__VBLANK2_INT_OCCURRED__SHIFT 0x19
+#define DMCU_INTERRUPT_STATUS__VBLANK2_INT_CLEAR__SHIFT 0x19
+#define DMCU_INTERRUPT_STATUS__VBLANK3_INT_OCCURRED__SHIFT 0x1a
+#define DMCU_INTERRUPT_STATUS__VBLANK3_INT_CLEAR__SHIFT 0x1a
+#define DMCU_INTERRUPT_STATUS__VBLANK4_INT_OCCURRED__SHIFT 0x1b
+#define DMCU_INTERRUPT_STATUS__VBLANK4_INT_CLEAR__SHIFT 0x1b
+#define DMCU_INTERRUPT_STATUS__VBLANK5_INT_OCCURRED__SHIFT 0x1c
+#define DMCU_INTERRUPT_STATUS__VBLANK5_INT_CLEAR__SHIFT 0x1c
+#define DMCU_INTERRUPT_STATUS__VBLANK6_INT_OCCURRED__SHIFT 0x1d
+#define DMCU_INTERRUPT_STATUS__VBLANK6_INT_CLEAR__SHIFT 0x1d
+#define DMCU_INTERRUPT_STATUS__ABM1_HG_READY_INT_OCCURRED_MASK 0x00000001L
+#define DMCU_INTERRUPT_STATUS__ABM1_HG_READY_INT_CLEAR_MASK 0x00000001L
+#define DMCU_INTERRUPT_STATUS__ABM1_LS_READY_INT_OCCURRED_MASK 0x00000002L
+#define DMCU_INTERRUPT_STATUS__ABM1_LS_READY_INT_CLEAR_MASK 0x00000002L
+#define DMCU_INTERRUPT_STATUS__ABM1_BL_UPDATE_INT_OCCURRED_MASK 0x00000004L
+#define DMCU_INTERRUPT_STATUS__ABM1_BL_UPDATE_INT_CLEAR_MASK 0x00000004L
+#define DMCU_INTERRUPT_STATUS__MCP_INT_OCCURRED_MASK 0x00000008L
+#define DMCU_INTERRUPT_STATUS__EXTERNAL_SW_INT_OCCURRED_MASK 0x00000100L
+#define DMCU_INTERRUPT_STATUS__EXTERNAL_SW_INT_CLEAR_MASK 0x00000100L
+#define DMCU_INTERRUPT_STATUS__SCP_INT_OCCURRED_MASK 0x00000200L
+#define DMCU_INTERRUPT_STATUS__UC_INTERNAL_INT_OCCURRED_MASK 0x00000400L
+#define DMCU_INTERRUPT_STATUS__UC_INTERNAL_INT_CLEAR_MASK 0x00000400L
+#define DMCU_INTERRUPT_STATUS__UC_REG_RD_TIMEOUT_INT_OCCURRED_MASK 0x00000800L
+#define DMCU_INTERRUPT_STATUS__UC_REG_RD_TIMEOUT_INT_CLEAR_MASK 0x00000800L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DOMAIN0_POWER_UP_INT_OCCURRED_MASK 0x00001000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DOMAIN0_POWER_UP_INT_CLEAR_MASK 0x00001000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DOMAIN1_POWER_UP_INT_OCCURRED_MASK 0x00002000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DOMAIN1_POWER_UP_INT_CLEAR_MASK 0x00002000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DOMAIN2_POWER_UP_INT_OCCURRED_MASK 0x00004000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DOMAIN2_POWER_UP_INT_CLEAR_MASK 0x00004000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DOMAIN3_POWER_UP_INT_OCCURRED_MASK 0x00008000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DOMAIN3_POWER_UP_INT_CLEAR_MASK 0x00008000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DOMAIN4_POWER_UP_INT_OCCURRED_MASK 0x00010000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DOMAIN4_POWER_UP_INT_CLEAR_MASK 0x00010000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DOMAIN5_POWER_UP_INT_OCCURRED_MASK 0x00020000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DOMAIN5_POWER_UP_INT_CLEAR_MASK 0x00020000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DOMAIN0_POWER_DOWN_INT_OCCURRED_MASK 0x00040000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DOMAIN0_POWER_DOWN_INT_CLEAR_MASK 0x00040000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DOMAIN1_POWER_DOWN_INT_OCCURRED_MASK 0x00080000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DOMAIN1_POWER_DOWN_INT_CLEAR_MASK 0x00080000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DOMAIN2_POWER_DOWN_INT_OCCURRED_MASK 0x00100000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DOMAIN2_POWER_DOWN_INT_CLEAR_MASK 0x00100000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DOMAIN3_POWER_DOWN_INT_OCCURRED_MASK 0x00200000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DOMAIN3_POWER_DOWN_INT_CLEAR_MASK 0x00200000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DOMAIN4_POWER_DOWN_INT_OCCURRED_MASK 0x00400000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DOMAIN4_POWER_DOWN_INT_CLEAR_MASK 0x00400000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DOMAIN5_POWER_DOWN_INT_OCCURRED_MASK 0x00800000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DOMAIN5_POWER_DOWN_INT_CLEAR_MASK 0x00800000L
+#define DMCU_INTERRUPT_STATUS__VBLANK1_INT_OCCURRED_MASK 0x01000000L
+#define DMCU_INTERRUPT_STATUS__VBLANK1_INT_CLEAR_MASK 0x01000000L
+#define DMCU_INTERRUPT_STATUS__VBLANK2_INT_OCCURRED_MASK 0x02000000L
+#define DMCU_INTERRUPT_STATUS__VBLANK2_INT_CLEAR_MASK 0x02000000L
+#define DMCU_INTERRUPT_STATUS__VBLANK3_INT_OCCURRED_MASK 0x04000000L
+#define DMCU_INTERRUPT_STATUS__VBLANK3_INT_CLEAR_MASK 0x04000000L
+#define DMCU_INTERRUPT_STATUS__VBLANK4_INT_OCCURRED_MASK 0x08000000L
+#define DMCU_INTERRUPT_STATUS__VBLANK4_INT_CLEAR_MASK 0x08000000L
+#define DMCU_INTERRUPT_STATUS__VBLANK5_INT_OCCURRED_MASK 0x10000000L
+#define DMCU_INTERRUPT_STATUS__VBLANK5_INT_CLEAR_MASK 0x10000000L
+#define DMCU_INTERRUPT_STATUS__VBLANK6_INT_OCCURRED_MASK 0x20000000L
+#define DMCU_INTERRUPT_STATUS__VBLANK6_INT_CLEAR_MASK 0x20000000L
+//DMCU_INTERRUPT_STATUS_1
+#define DMCU_INTERRUPT_STATUS_1__OTG0_RANGE_TIMING_UPDATE_OCCURRED__SHIFT 0x6
+#define DMCU_INTERRUPT_STATUS_1__OTG0_RANGE_TIMING_UPDATE_CLEAR__SHIFT 0x6
+#define DMCU_INTERRUPT_STATUS_1__OTG1_RANGE_TIMING_UPDATE_OCCURRED__SHIFT 0x7
+#define DMCU_INTERRUPT_STATUS_1__OTG1_RANGE_TIMING_UPDATE_CLEAR__SHIFT 0x7
+#define DMCU_INTERRUPT_STATUS_1__OTG2_RANGE_TIMING_UPDATE_OCCURRED__SHIFT 0x8
+#define DMCU_INTERRUPT_STATUS_1__OTG2_RANGE_TIMING_UPDATE_CLEAR__SHIFT 0x8
+#define DMCU_INTERRUPT_STATUS_1__OTG3_RANGE_TIMING_UPDATE_OCCURRED__SHIFT 0x9
+#define DMCU_INTERRUPT_STATUS_1__OTG3_RANGE_TIMING_UPDATE_CLEAR__SHIFT 0x9
+#define DMCU_INTERRUPT_STATUS_1__OTG4_RANGE_TIMING_UPDATE_OCCURRED__SHIFT 0xa
+#define DMCU_INTERRUPT_STATUS_1__OTG4_RANGE_TIMING_UPDATE_CLEAR__SHIFT 0xa
+#define DMCU_INTERRUPT_STATUS_1__OTG5_RANGE_TIMING_UPDATE_OCCURRED__SHIFT 0xb
+#define DMCU_INTERRUPT_STATUS_1__OTG5_RANGE_TIMING_UPDATE_CLEAR__SHIFT 0xb
+#define DMCU_INTERRUPT_STATUS_1__DMCU_GENERIC_INTERRUPT_OCCURRED__SHIFT 0xd
+#define DMCU_INTERRUPT_STATUS_1__DMCU_GENERIC_INTERRUPT_CLEAR__SHIFT 0xd
+#define DMCU_INTERRUPT_STATUS_1__OTG0_RANGE_TIMING_UPDATE_OCCURRED_MASK 0x00000040L
+#define DMCU_INTERRUPT_STATUS_1__OTG0_RANGE_TIMING_UPDATE_CLEAR_MASK 0x00000040L
+#define DMCU_INTERRUPT_STATUS_1__OTG1_RANGE_TIMING_UPDATE_OCCURRED_MASK 0x00000080L
+#define DMCU_INTERRUPT_STATUS_1__OTG1_RANGE_TIMING_UPDATE_CLEAR_MASK 0x00000080L
+#define DMCU_INTERRUPT_STATUS_1__OTG2_RANGE_TIMING_UPDATE_OCCURRED_MASK 0x00000100L
+#define DMCU_INTERRUPT_STATUS_1__OTG2_RANGE_TIMING_UPDATE_CLEAR_MASK 0x00000100L
+#define DMCU_INTERRUPT_STATUS_1__OTG3_RANGE_TIMING_UPDATE_OCCURRED_MASK 0x00000200L
+#define DMCU_INTERRUPT_STATUS_1__OTG3_RANGE_TIMING_UPDATE_CLEAR_MASK 0x00000200L
+#define DMCU_INTERRUPT_STATUS_1__OTG4_RANGE_TIMING_UPDATE_OCCURRED_MASK 0x00000400L
+#define DMCU_INTERRUPT_STATUS_1__OTG4_RANGE_TIMING_UPDATE_CLEAR_MASK 0x00000400L
+#define DMCU_INTERRUPT_STATUS_1__OTG5_RANGE_TIMING_UPDATE_OCCURRED_MASK 0x00000800L
+#define DMCU_INTERRUPT_STATUS_1__OTG5_RANGE_TIMING_UPDATE_CLEAR_MASK 0x00000800L
+#define DMCU_INTERRUPT_STATUS_1__DMCU_GENERIC_INTERRUPT_OCCURRED_MASK 0x00002000L
+#define DMCU_INTERRUPT_STATUS_1__DMCU_GENERIC_INTERRUPT_CLEAR_MASK 0x00002000L
+//DMCU_INTERRUPT_TO_HOST_EN_MASK
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__ABM0_HG_READY_INT_MASK__SHIFT 0x0
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__ABM0_LS_READY_INT_MASK__SHIFT 0x1
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__ABM0_BL_UPDATE_INT_MASK__SHIFT 0x2
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__ABM1_HG_READY_INT_MASK__SHIFT 0x3
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__ABM1_LS_READY_INT_MASK__SHIFT 0x4
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__ABM1_BL_UPDATE_INT_MASK__SHIFT 0x5
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__SCP_INT_MASK__SHIFT 0x9
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__UC_INTERNAL_INT_MASK__SHIFT 0xa
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__UC_REG_RD_TIMEOUT_INT_MASK__SHIFT 0xb
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__ABM0_HG_READY_INT_MASK_MASK 0x00000001L
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__ABM0_LS_READY_INT_MASK_MASK 0x00000002L
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__ABM0_BL_UPDATE_INT_MASK_MASK 0x00000004L
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__ABM1_HG_READY_INT_MASK_MASK 0x00000008L
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__ABM1_LS_READY_INT_MASK_MASK 0x00000010L
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__ABM1_BL_UPDATE_INT_MASK_MASK 0x00000020L
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__SCP_INT_MASK_MASK 0x00000200L
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__UC_INTERNAL_INT_MASK_MASK 0x00000400L
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__UC_REG_RD_TIMEOUT_INT_MASK_MASK 0x00000800L
+//DMCU_INTERRUPT_TO_UC_EN_MASK
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__ABM1_HG_READY_INT_TO_UC_EN__SHIFT 0x0
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__ABM1_LS_READY_INT_TO_UC_EN__SHIFT 0x1
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__ABM1_BL_UPDATE_INT_TO_UC_EN__SHIFT 0x2
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__MCP_INT_TO_UC_EN__SHIFT 0x3
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__STATIC_SCREEN1_INT_TO_UC_EN__SHIFT 0x6
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__STATIC_SCREEN2_INT_TO_UC_EN__SHIFT 0x7
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__EXTERNAL_SW_INT_TO_UC_EN__SHIFT 0x8
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__STATIC_SCREEN3_INT_TO_UC_EN__SHIFT 0x9
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__STATIC_SCREEN4_INT_TO_UC_EN__SHIFT 0xa
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__STATIC_SCREEN5_INT_TO_UC_EN__SHIFT 0xb
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DOMAIN0_POWER_UP_INT_TO_UC_EN__SHIFT 0xc
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DOMAIN1_POWER_UP_INT_TO_UC_EN__SHIFT 0xd
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DOMAIN2_POWER_UP_INT_TO_UC_EN__SHIFT 0xe
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DOMAIN3_POWER_UP_INT_TO_UC_EN__SHIFT 0xf
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DOMAIN4_POWER_UP_INT_TO_UC_EN__SHIFT 0x10
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DOMAIN5_POWER_UP_INT_TO_UC_EN__SHIFT 0x11
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DOMAIN0_POWER_DOWN_INT_TO_UC_EN__SHIFT 0x12
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DOMAIN1_POWER_DOWN_INT_TO_UC_EN__SHIFT 0x13
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DOMAIN2_POWER_DOWN_INT_TO_UC_EN__SHIFT 0x14
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DOMAIN3_POWER_DOWN_INT_TO_UC_EN__SHIFT 0x15
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DOMAIN4_POWER_DOWN_INT_TO_UC_EN__SHIFT 0x16
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DOMAIN5_POWER_DOWN_INT_TO_UC_EN__SHIFT 0x17
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__VBLANK1_INT_TO_UC_EN__SHIFT 0x18
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__VBLANK2_INT_TO_UC_EN__SHIFT 0x19
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__VBLANK3_INT_TO_UC_EN__SHIFT 0x1a
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__VBLANK4_INT_TO_UC_EN__SHIFT 0x1b
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__VBLANK5_INT_TO_UC_EN__SHIFT 0x1c
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__VBLANK6_INT_TO_UC_EN__SHIFT 0x1d
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__STATIC_SCREEN6_INT_TO_UC_EN__SHIFT 0x1e
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__ABM1_HG_READY_INT_TO_UC_EN_MASK 0x00000001L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__ABM1_LS_READY_INT_TO_UC_EN_MASK 0x00000002L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__ABM1_BL_UPDATE_INT_TO_UC_EN_MASK 0x00000004L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__MCP_INT_TO_UC_EN_MASK 0x00000008L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__STATIC_SCREEN1_INT_TO_UC_EN_MASK 0x00000040L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__STATIC_SCREEN2_INT_TO_UC_EN_MASK 0x00000080L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__EXTERNAL_SW_INT_TO_UC_EN_MASK 0x00000100L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__STATIC_SCREEN3_INT_TO_UC_EN_MASK 0x00000200L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__STATIC_SCREEN4_INT_TO_UC_EN_MASK 0x00000400L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__STATIC_SCREEN5_INT_TO_UC_EN_MASK 0x00000800L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DOMAIN0_POWER_UP_INT_TO_UC_EN_MASK 0x00001000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DOMAIN1_POWER_UP_INT_TO_UC_EN_MASK 0x00002000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DOMAIN2_POWER_UP_INT_TO_UC_EN_MASK 0x00004000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DOMAIN3_POWER_UP_INT_TO_UC_EN_MASK 0x00008000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DOMAIN4_POWER_UP_INT_TO_UC_EN_MASK 0x00010000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DOMAIN5_POWER_UP_INT_TO_UC_EN_MASK 0x00020000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DOMAIN0_POWER_DOWN_INT_TO_UC_EN_MASK 0x00040000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DOMAIN1_POWER_DOWN_INT_TO_UC_EN_MASK 0x00080000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DOMAIN2_POWER_DOWN_INT_TO_UC_EN_MASK 0x00100000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DOMAIN3_POWER_DOWN_INT_TO_UC_EN_MASK 0x00200000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DOMAIN4_POWER_DOWN_INT_TO_UC_EN_MASK 0x00400000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DOMAIN5_POWER_DOWN_INT_TO_UC_EN_MASK 0x00800000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__VBLANK1_INT_TO_UC_EN_MASK 0x01000000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__VBLANK2_INT_TO_UC_EN_MASK 0x02000000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__VBLANK3_INT_TO_UC_EN_MASK 0x04000000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__VBLANK4_INT_TO_UC_EN_MASK 0x08000000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__VBLANK5_INT_TO_UC_EN_MASK 0x10000000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__VBLANK6_INT_TO_UC_EN_MASK 0x20000000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__STATIC_SCREEN6_INT_TO_UC_EN_MASK 0x40000000L
+//DMCU_INTERRUPT_TO_UC_EN_MASK_1
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_1__OTG0_RANGE_TIMING_UPDATE_INT_TO_UC_EN__SHIFT 0x6
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_1__OTG1_RANGE_TIMING_UPDATE_INT_TO_UC_EN__SHIFT 0x7
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_1__OTG2_RANGE_TIMING_UPDATE_INT_TO_UC_EN__SHIFT 0x8
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_1__OTG3_RANGE_TIMING_UPDATE_INT_TO_UC_EN__SHIFT 0x9
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_1__OTG4_RANGE_TIMING_UPDATE_INT_TO_UC_EN__SHIFT 0xa
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_1__OTG5_RANGE_TIMING_UPDATE_INT_TO_UC_EN__SHIFT 0xb
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_1__DMCU_GENERIC_INT_TO_UC_EN__SHIFT 0xd
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_1__OTG0_RANGE_TIMING_UPDATE_INT_TO_UC_EN_MASK 0x00000040L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_1__OTG1_RANGE_TIMING_UPDATE_INT_TO_UC_EN_MASK 0x00000080L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_1__OTG2_RANGE_TIMING_UPDATE_INT_TO_UC_EN_MASK 0x00000100L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_1__OTG3_RANGE_TIMING_UPDATE_INT_TO_UC_EN_MASK 0x00000200L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_1__OTG4_RANGE_TIMING_UPDATE_INT_TO_UC_EN_MASK 0x00000400L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_1__OTG5_RANGE_TIMING_UPDATE_INT_TO_UC_EN_MASK 0x00000800L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_1__DMCU_GENERIC_INT_TO_UC_EN_MASK 0x00002000L
+//DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__ABM1_HG_READY_INT_XIRQ_IRQ_SEL__SHIFT 0x0
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__ABM1_LS_READY_INT_XIRQ_IRQ_SEL__SHIFT 0x1
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__ABM1_BL_UPDATE_INT_XIRQ_IRQ_SEL__SHIFT 0x2
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__MCP_INT_XIRQ_IRQ_SEL__SHIFT 0x3
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__STATIC_SCREEN1_INT_XIRQ_IRQ_SEL__SHIFT 0x6
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__STATIC_SCREEN2_INT_XIRQ_IRQ_SEL__SHIFT 0x7
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__EXTERNAL_SW_INT_XIRQ_IRQ_SEL__SHIFT 0x8
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__STATIC_SCREEN3_INT_XIRQ_IRQ_SEL__SHIFT 0x9
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__STATIC_SCREEN4_INT_XIRQ_IRQ_SEL__SHIFT 0xa
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__STATIC_SCREEN5_INT_XIRQ_IRQ_SEL__SHIFT 0xb
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DOMAIN0_POWER_UP_INT_XIRQ_IRQ_SEL__SHIFT 0xc
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DOMAIN1_POWER_UP_INT_XIRQ_IRQ_SEL__SHIFT 0xd
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DOMAIN2_POWER_UP_INT_XIRQ_IRQ_SEL__SHIFT 0xe
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DOMAIN3_POWER_UP_INT_XIRQ_IRQ_SEL__SHIFT 0xf
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DOMAIN4_POWER_UP_INT_XIRQ_IRQ_SEL__SHIFT 0x10
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DOMAIN5_POWER_UP_INT_XIRQ_IRQ_SEL__SHIFT 0x11
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DOMAIN0_POWER_DOWN_INT_XIRQ_IRQ_SEL__SHIFT 0x12
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DOMAIN1_POWER_DOWN_INT_XIRQ_IRQ_SEL__SHIFT 0x13
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DOMAIN2_POWER_DOWN_INT_XIRQ_IRQ_SEL__SHIFT 0x14
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DOMAIN3_POWER_DOWN_INT_XIRQ_IRQ_SEL__SHIFT 0x15
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DOMAIN4_POWER_DOWN_INT_XIRQ_IRQ_SEL__SHIFT 0x16
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DOMAIN5_POWER_DOWN_INT_XIRQ_IRQ_SEL__SHIFT 0x17
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__VBLANK1_INT_XIRQ_IRQ_SEL__SHIFT 0x18
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__VBLANK2_INT_XIRQ_IRQ_SEL__SHIFT 0x19
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__VBLANK3_INT_XIRQ_IRQ_SEL__SHIFT 0x1a
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__VBLANK4_INT_XIRQ_IRQ_SEL__SHIFT 0x1b
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__VBLANK5_INT_XIRQ_IRQ_SEL__SHIFT 0x1c
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__VBLANK6_INT_XIRQ_IRQ_SEL__SHIFT 0x1d
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__STATIC_SCREEN6_INT_XIRQ_IRQ_SEL__SHIFT 0x1e
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__ABM1_HG_READY_INT_XIRQ_IRQ_SEL_MASK 0x00000001L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__ABM1_LS_READY_INT_XIRQ_IRQ_SEL_MASK 0x00000002L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__ABM1_BL_UPDATE_INT_XIRQ_IRQ_SEL_MASK 0x00000004L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__MCP_INT_XIRQ_IRQ_SEL_MASK 0x00000008L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__STATIC_SCREEN1_INT_XIRQ_IRQ_SEL_MASK 0x00000040L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__STATIC_SCREEN2_INT_XIRQ_IRQ_SEL_MASK 0x00000080L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__EXTERNAL_SW_INT_XIRQ_IRQ_SEL_MASK 0x00000100L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__STATIC_SCREEN3_INT_XIRQ_IRQ_SEL_MASK 0x00000200L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__STATIC_SCREEN4_INT_XIRQ_IRQ_SEL_MASK 0x00000400L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__STATIC_SCREEN5_INT_XIRQ_IRQ_SEL_MASK 0x00000800L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DOMAIN0_POWER_UP_INT_XIRQ_IRQ_SEL_MASK 0x00001000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DOMAIN1_POWER_UP_INT_XIRQ_IRQ_SEL_MASK 0x00002000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DOMAIN2_POWER_UP_INT_XIRQ_IRQ_SEL_MASK 0x00004000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DOMAIN3_POWER_UP_INT_XIRQ_IRQ_SEL_MASK 0x00008000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DOMAIN4_POWER_UP_INT_XIRQ_IRQ_SEL_MASK 0x00010000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DOMAIN5_POWER_UP_INT_XIRQ_IRQ_SEL_MASK 0x00020000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DOMAIN0_POWER_DOWN_INT_XIRQ_IRQ_SEL_MASK 0x00040000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DOMAIN1_POWER_DOWN_INT_XIRQ_IRQ_SEL_MASK 0x00080000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DOMAIN2_POWER_DOWN_INT_XIRQ_IRQ_SEL_MASK 0x00100000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DOMAIN3_POWER_DOWN_INT_XIRQ_IRQ_SEL_MASK 0x00200000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DOMAIN4_POWER_DOWN_INT_XIRQ_IRQ_SEL_MASK 0x00400000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DOMAIN5_POWER_DOWN_INT_XIRQ_IRQ_SEL_MASK 0x00800000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__VBLANK1_INT_XIRQ_IRQ_SEL_MASK 0x01000000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__VBLANK2_INT_XIRQ_IRQ_SEL_MASK 0x02000000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__VBLANK3_INT_XIRQ_IRQ_SEL_MASK 0x04000000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__VBLANK4_INT_XIRQ_IRQ_SEL_MASK 0x08000000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__VBLANK5_INT_XIRQ_IRQ_SEL_MASK 0x10000000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__VBLANK6_INT_XIRQ_IRQ_SEL_MASK 0x20000000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__STATIC_SCREEN6_INT_XIRQ_IRQ_SEL_MASK 0x40000000L
+//DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_1
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_1__OTG0_RANGE_TIMING_UPDATE_INT_XIRQ_IRQ_SEL__SHIFT 0x6
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_1__OTG1_RANGE_TIMING_UPDATE_INT_XIRQ_IRQ_SEL__SHIFT 0x7
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_1__OTG2_RANGE_TIMING_UPDATE_INT_XIRQ_IRQ_SEL__SHIFT 0x8
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_1__OTG3_RANGE_TIMING_UPDATE_INT_XIRQ_IRQ_SEL__SHIFT 0x9
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_1__OTG4_RANGE_TIMING_UPDATE_INT_XIRQ_IRQ_SEL__SHIFT 0xa
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_1__OTG5_RANGE_TIMING_UPDATE_INT_XIRQ_IRQ_SEL__SHIFT 0xb
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_1__DMCU_GENERIC_INT_XIRQ_IRQ_SEL__SHIFT 0xd
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_1__OTG0_RANGE_TIMING_UPDATE_INT_XIRQ_IRQ_SEL_MASK 0x00000040L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_1__OTG1_RANGE_TIMING_UPDATE_INT_XIRQ_IRQ_SEL_MASK 0x00000080L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_1__OTG2_RANGE_TIMING_UPDATE_INT_XIRQ_IRQ_SEL_MASK 0x00000100L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_1__OTG3_RANGE_TIMING_UPDATE_INT_XIRQ_IRQ_SEL_MASK 0x00000200L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_1__OTG4_RANGE_TIMING_UPDATE_INT_XIRQ_IRQ_SEL_MASK 0x00000400L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_1__OTG5_RANGE_TIMING_UPDATE_INT_XIRQ_IRQ_SEL_MASK 0x00000800L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_1__DMCU_GENERIC_INT_XIRQ_IRQ_SEL_MASK 0x00002000L
+//DC_DMCU_SCRATCH
+#define DC_DMCU_SCRATCH__DMCU_SCRATCH__SHIFT 0x0
+#define DC_DMCU_SCRATCH__DMCU_SCRATCH_MASK 0xFFFFFFFFL
+//DMCU_INT_CNT
+#define DMCU_INT_CNT__DMCU_ABM1_HG_READY_INT_CNT__SHIFT 0x0
+#define DMCU_INT_CNT__DMCU_ABM1_LS_READY_INT_CNT__SHIFT 0x8
+#define DMCU_INT_CNT__DMCU_ABM1_BL_UPDATE_INT_CNT__SHIFT 0x10
+#define DMCU_INT_CNT__DMCU_ABM1_HG_READY_INT_CNT_MASK 0x000000FFL
+#define DMCU_INT_CNT__DMCU_ABM1_LS_READY_INT_CNT_MASK 0x0000FF00L
+#define DMCU_INT_CNT__DMCU_ABM1_BL_UPDATE_INT_CNT_MASK 0x00FF0000L
+//DMCU_FW_CHECKSUM_SMPL_BYTE_POS
+#define DMCU_FW_CHECKSUM_SMPL_BYTE_POS__DMCU_FW_CHECKSUM_LO_SMPL_BYTE_POS__SHIFT 0x0
+#define DMCU_FW_CHECKSUM_SMPL_BYTE_POS__DMCU_FW_CHECKSUM_HI_SMPL_BYTE_POS__SHIFT 0x2
+#define DMCU_FW_CHECKSUM_SMPL_BYTE_POS__DMCU_FW_CHECKSUM_LO_SMPL_BYTE_POS_MASK 0x00000003L
+#define DMCU_FW_CHECKSUM_SMPL_BYTE_POS__DMCU_FW_CHECKSUM_HI_SMPL_BYTE_POS_MASK 0x0000000CL
+//DMCU_UC_CLK_GATING_CNTL
+#define DMCU_UC_CLK_GATING_CNTL__UC_IRAM_RD_DELAY__SHIFT 0x0
+#define DMCU_UC_CLK_GATING_CNTL__UC_ERAM_RD_DELAY__SHIFT 0x8
+#define DMCU_UC_CLK_GATING_CNTL__UC_RBBM_RD_CLK_GATING_EN__SHIFT 0x10
+#define DMCU_UC_CLK_GATING_CNTL__UC_IRAM_RD_DELAY_MASK 0x00000007L
+#define DMCU_UC_CLK_GATING_CNTL__UC_ERAM_RD_DELAY_MASK 0x00000700L
+#define DMCU_UC_CLK_GATING_CNTL__UC_RBBM_RD_CLK_GATING_EN_MASK 0x00010000L
+//MASTER_COMM_DATA_REG1
+#define MASTER_COMM_DATA_REG1__MASTER_COMM_DATA_REG1_BYTE0__SHIFT 0x0
+#define MASTER_COMM_DATA_REG1__MASTER_COMM_DATA_REG1_BYTE1__SHIFT 0x8
+#define MASTER_COMM_DATA_REG1__MASTER_COMM_DATA_REG1_BYTE2__SHIFT 0x10
+#define MASTER_COMM_DATA_REG1__MASTER_COMM_DATA_REG1_BYTE3__SHIFT 0x18
+#define MASTER_COMM_DATA_REG1__MASTER_COMM_DATA_REG1_BYTE0_MASK 0x000000FFL
+#define MASTER_COMM_DATA_REG1__MASTER_COMM_DATA_REG1_BYTE1_MASK 0x0000FF00L
+#define MASTER_COMM_DATA_REG1__MASTER_COMM_DATA_REG1_BYTE2_MASK 0x00FF0000L
+#define MASTER_COMM_DATA_REG1__MASTER_COMM_DATA_REG1_BYTE3_MASK 0xFF000000L
+//MASTER_COMM_DATA_REG2
+#define MASTER_COMM_DATA_REG2__MASTER_COMM_DATA_REG2_BYTE0__SHIFT 0x0
+#define MASTER_COMM_DATA_REG2__MASTER_COMM_DATA_REG2_BYTE1__SHIFT 0x8
+#define MASTER_COMM_DATA_REG2__MASTER_COMM_DATA_REG2_BYTE2__SHIFT 0x10
+#define MASTER_COMM_DATA_REG2__MASTER_COMM_DATA_REG2_BYTE3__SHIFT 0x18
+#define MASTER_COMM_DATA_REG2__MASTER_COMM_DATA_REG2_BYTE0_MASK 0x000000FFL
+#define MASTER_COMM_DATA_REG2__MASTER_COMM_DATA_REG2_BYTE1_MASK 0x0000FF00L
+#define MASTER_COMM_DATA_REG2__MASTER_COMM_DATA_REG2_BYTE2_MASK 0x00FF0000L
+#define MASTER_COMM_DATA_REG2__MASTER_COMM_DATA_REG2_BYTE3_MASK 0xFF000000L
+//MASTER_COMM_DATA_REG3
+#define MASTER_COMM_DATA_REG3__MASTER_COMM_DATA_REG3_BYTE0__SHIFT 0x0
+#define MASTER_COMM_DATA_REG3__MASTER_COMM_DATA_REG3_BYTE1__SHIFT 0x8
+#define MASTER_COMM_DATA_REG3__MASTER_COMM_DATA_REG3_BYTE2__SHIFT 0x10
+#define MASTER_COMM_DATA_REG3__MASTER_COMM_DATA_REG3_BYTE3__SHIFT 0x18
+#define MASTER_COMM_DATA_REG3__MASTER_COMM_DATA_REG3_BYTE0_MASK 0x000000FFL
+#define MASTER_COMM_DATA_REG3__MASTER_COMM_DATA_REG3_BYTE1_MASK 0x0000FF00L
+#define MASTER_COMM_DATA_REG3__MASTER_COMM_DATA_REG3_BYTE2_MASK 0x00FF0000L
+#define MASTER_COMM_DATA_REG3__MASTER_COMM_DATA_REG3_BYTE3_MASK 0xFF000000L
+//MASTER_COMM_CMD_REG
+#define MASTER_COMM_CMD_REG__MASTER_COMM_CMD_REG_BYTE0__SHIFT 0x0
+#define MASTER_COMM_CMD_REG__MASTER_COMM_CMD_REG_BYTE1__SHIFT 0x8
+#define MASTER_COMM_CMD_REG__MASTER_COMM_CMD_REG_BYTE2__SHIFT 0x10
+#define MASTER_COMM_CMD_REG__MASTER_COMM_CMD_REG_BYTE3__SHIFT 0x18
+#define MASTER_COMM_CMD_REG__MASTER_COMM_CMD_REG_BYTE0_MASK 0x000000FFL
+#define MASTER_COMM_CMD_REG__MASTER_COMM_CMD_REG_BYTE1_MASK 0x0000FF00L
+#define MASTER_COMM_CMD_REG__MASTER_COMM_CMD_REG_BYTE2_MASK 0x00FF0000L
+#define MASTER_COMM_CMD_REG__MASTER_COMM_CMD_REG_BYTE3_MASK 0xFF000000L
+//MASTER_COMM_CNTL_REG
+#define MASTER_COMM_CNTL_REG__MASTER_COMM_INTERRUPT__SHIFT 0x0
+#define MASTER_COMM_CNTL_REG__MASTER_COMM_INTERRUPT_MASK 0x00000001L
+//SLAVE_COMM_DATA_REG1
+#define SLAVE_COMM_DATA_REG1__SLAVE_COMM_DATA_REG1_BYTE0__SHIFT 0x0
+#define SLAVE_COMM_DATA_REG1__SLAVE_COMM_DATA_REG1_BYTE1__SHIFT 0x8
+#define SLAVE_COMM_DATA_REG1__SLAVE_COMM_DATA_REG1_BYTE2__SHIFT 0x10
+#define SLAVE_COMM_DATA_REG1__SLAVE_COMM_DATA_REG1_BYTE3__SHIFT 0x18
+#define SLAVE_COMM_DATA_REG1__SLAVE_COMM_DATA_REG1_BYTE0_MASK 0x000000FFL
+#define SLAVE_COMM_DATA_REG1__SLAVE_COMM_DATA_REG1_BYTE1_MASK 0x0000FF00L
+#define SLAVE_COMM_DATA_REG1__SLAVE_COMM_DATA_REG1_BYTE2_MASK 0x00FF0000L
+#define SLAVE_COMM_DATA_REG1__SLAVE_COMM_DATA_REG1_BYTE3_MASK 0xFF000000L
+//SLAVE_COMM_DATA_REG2
+#define SLAVE_COMM_DATA_REG2__SLAVE_COMM_DATA_REG2_BYTE0__SHIFT 0x0
+#define SLAVE_COMM_DATA_REG2__SLAVE_COMM_DATA_REG2_BYTE1__SHIFT 0x8
+#define SLAVE_COMM_DATA_REG2__SLAVE_COMM_DATA_REG2_BYTE2__SHIFT 0x10
+#define SLAVE_COMM_DATA_REG2__SLAVE_COMM_DATA_REG2_BYTE3__SHIFT 0x18
+#define SLAVE_COMM_DATA_REG2__SLAVE_COMM_DATA_REG2_BYTE0_MASK 0x000000FFL
+#define SLAVE_COMM_DATA_REG2__SLAVE_COMM_DATA_REG2_BYTE1_MASK 0x0000FF00L
+#define SLAVE_COMM_DATA_REG2__SLAVE_COMM_DATA_REG2_BYTE2_MASK 0x00FF0000L
+#define SLAVE_COMM_DATA_REG2__SLAVE_COMM_DATA_REG2_BYTE3_MASK 0xFF000000L
+//SLAVE_COMM_DATA_REG3
+#define SLAVE_COMM_DATA_REG3__SLAVE_COMM_DATA_REG3_BYTE0__SHIFT 0x0
+#define SLAVE_COMM_DATA_REG3__SLAVE_COMM_DATA_REG3_BYTE1__SHIFT 0x8
+#define SLAVE_COMM_DATA_REG3__SLAVE_COMM_DATA_REG3_BYTE2__SHIFT 0x10
+#define SLAVE_COMM_DATA_REG3__SLAVE_COMM_DATA_REG3_BYTE3__SHIFT 0x18
+#define SLAVE_COMM_DATA_REG3__SLAVE_COMM_DATA_REG3_BYTE0_MASK 0x000000FFL
+#define SLAVE_COMM_DATA_REG3__SLAVE_COMM_DATA_REG3_BYTE1_MASK 0x0000FF00L
+#define SLAVE_COMM_DATA_REG3__SLAVE_COMM_DATA_REG3_BYTE2_MASK 0x00FF0000L
+#define SLAVE_COMM_DATA_REG3__SLAVE_COMM_DATA_REG3_BYTE3_MASK 0xFF000000L
+//SLAVE_COMM_CMD_REG
+#define SLAVE_COMM_CMD_REG__SLAVE_COMM_CMD_REG_BYTE0__SHIFT 0x0
+#define SLAVE_COMM_CMD_REG__SLAVE_COMM_CMD_REG_BYTE1__SHIFT 0x8
+#define SLAVE_COMM_CMD_REG__SLAVE_COMM_CMD_REG_BYTE2__SHIFT 0x10
+#define SLAVE_COMM_CMD_REG__SLAVE_COMM_CMD_REG_BYTE3__SHIFT 0x18
+#define SLAVE_COMM_CMD_REG__SLAVE_COMM_CMD_REG_BYTE0_MASK 0x000000FFL
+#define SLAVE_COMM_CMD_REG__SLAVE_COMM_CMD_REG_BYTE1_MASK 0x0000FF00L
+#define SLAVE_COMM_CMD_REG__SLAVE_COMM_CMD_REG_BYTE2_MASK 0x00FF0000L
+#define SLAVE_COMM_CMD_REG__SLAVE_COMM_CMD_REG_BYTE3_MASK 0xFF000000L
+//SLAVE_COMM_CNTL_REG
+#define SLAVE_COMM_CNTL_REG__SLAVE_COMM_INTERRUPT__SHIFT 0x0
+#define SLAVE_COMM_CNTL_REG__COMM_PORT_MSG_TO_HOST_IN_PROGRESS__SHIFT 0x8
+#define SLAVE_COMM_CNTL_REG__SLAVE_COMM_INTERRUPT_MASK 0x00000001L
+#define SLAVE_COMM_CNTL_REG__COMM_PORT_MSG_TO_HOST_IN_PROGRESS_MASK 0x00000100L
+//DMCU_PERFMON_INTERRUPT_STATUS1
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DMU_PERFMON_COUNTER_INT_OCCURRED__SHIFT 0x0
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DMU_PERFMON_COUNTER_INT_CLEAR__SHIFT 0x0
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DIO_PERFMON_COUNTER_INT_OCCURRED__SHIFT 0x1
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DIO_PERFMON_COUNTER_INT_CLEAR__SHIFT 0x1
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER_INT_OCCURRED__SHIFT 0x2
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER_INT_CLEAR__SHIFT 0x2
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DMU_PERFMON_COUNTER_INT_OCCURRED_MASK 0x00000001L
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DMU_PERFMON_COUNTER_INT_CLEAR_MASK 0x00000001L
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DIO_PERFMON_COUNTER_INT_OCCURRED_MASK 0x00000002L
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DIO_PERFMON_COUNTER_INT_CLEAR_MASK 0x00000002L
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER_INT_OCCURRED_MASK 0x00000004L
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER_INT_CLEAR_MASK 0x00000004L
+//DMCU_PERFMON_INTERRUPT_STATUS2
+#define DMCU_PERFMON_INTERRUPT_STATUS2__HUBP0_PERFMON_COUNTER_INT_OCCURRED__SHIFT 0x0
+#define DMCU_PERFMON_INTERRUPT_STATUS2__HUBP0_PERFMON_COUNTER_INT_CLEAR__SHIFT 0x0
+#define DMCU_PERFMON_INTERRUPT_STATUS2__HUBP1_PERFMON_COUNTER_INT_OCCURRED__SHIFT 0x1
+#define DMCU_PERFMON_INTERRUPT_STATUS2__HUBP1_PERFMON_COUNTER_INT_CLEAR__SHIFT 0x1
+#define DMCU_PERFMON_INTERRUPT_STATUS2__HUBP2_PERFMON_COUNTER_INT_OCCURRED__SHIFT 0x2
+#define DMCU_PERFMON_INTERRUPT_STATUS2__HUBP2_PERFMON_COUNTER_INT_CLEAR__SHIFT 0x2
+#define DMCU_PERFMON_INTERRUPT_STATUS2__HUBP3_PERFMON_COUNTER_INT_OCCURRED__SHIFT 0x3
+#define DMCU_PERFMON_INTERRUPT_STATUS2__HUBP3_PERFMON_COUNTER_INT_CLEAR__SHIFT 0x3
+#define DMCU_PERFMON_INTERRUPT_STATUS2__HUBP4_PERFMON_COUNTER_INT_OCCURRED__SHIFT 0x4
+#define DMCU_PERFMON_INTERRUPT_STATUS2__HUBP4_PERFMON_COUNTER_INT_CLEAR__SHIFT 0x4
+#define DMCU_PERFMON_INTERRUPT_STATUS2__HUBP5_PERFMON_COUNTER_INT_OCCURRED__SHIFT 0x5
+#define DMCU_PERFMON_INTERRUPT_STATUS2__HUBP5_PERFMON_COUNTER_INT_CLEAR__SHIFT 0x5
+#define DMCU_PERFMON_INTERRUPT_STATUS2__HUBP6_PERFMON_COUNTER_INT_OCCURRED__SHIFT 0x6
+#define DMCU_PERFMON_INTERRUPT_STATUS2__HUBP6_PERFMON_COUNTER_INT_CLEAR__SHIFT 0x6
+#define DMCU_PERFMON_INTERRUPT_STATUS2__HUBP7_PERFMON_COUNTER_INT_OCCURRED__SHIFT 0x7
+#define DMCU_PERFMON_INTERRUPT_STATUS2__HUBP7_PERFMON_COUNTER_INT_CLEAR__SHIFT 0x7
+#define DMCU_PERFMON_INTERRUPT_STATUS2__HUBBUB_PERFMON_COUNTER_INT_OCCURRED__SHIFT 0x8
+#define DMCU_PERFMON_INTERRUPT_STATUS2__HUBBUB_PERFMON_COUNTER_INT_CLEAR__SHIFT 0x8
+#define DMCU_PERFMON_INTERRUPT_STATUS2__HUBP0_PERFMON_COUNTER_INT_OCCURRED_MASK 0x00000001L
+#define DMCU_PERFMON_INTERRUPT_STATUS2__HUBP0_PERFMON_COUNTER_INT_CLEAR_MASK 0x00000001L
+#define DMCU_PERFMON_INTERRUPT_STATUS2__HUBP1_PERFMON_COUNTER_INT_OCCURRED_MASK 0x00000002L
+#define DMCU_PERFMON_INTERRUPT_STATUS2__HUBP1_PERFMON_COUNTER_INT_CLEAR_MASK 0x00000002L
+#define DMCU_PERFMON_INTERRUPT_STATUS2__HUBP2_PERFMON_COUNTER_INT_OCCURRED_MASK 0x00000004L
+#define DMCU_PERFMON_INTERRUPT_STATUS2__HUBP2_PERFMON_COUNTER_INT_CLEAR_MASK 0x00000004L
+#define DMCU_PERFMON_INTERRUPT_STATUS2__HUBP3_PERFMON_COUNTER_INT_OCCURRED_MASK 0x00000008L
+#define DMCU_PERFMON_INTERRUPT_STATUS2__HUBP3_PERFMON_COUNTER_INT_CLEAR_MASK 0x00000008L
+#define DMCU_PERFMON_INTERRUPT_STATUS2__HUBP4_PERFMON_COUNTER_INT_OCCURRED_MASK 0x00000010L
+#define DMCU_PERFMON_INTERRUPT_STATUS2__HUBP4_PERFMON_COUNTER_INT_CLEAR_MASK 0x00000010L
+#define DMCU_PERFMON_INTERRUPT_STATUS2__HUBP5_PERFMON_COUNTER_INT_OCCURRED_MASK 0x00000020L
+#define DMCU_PERFMON_INTERRUPT_STATUS2__HUBP5_PERFMON_COUNTER_INT_CLEAR_MASK 0x00000020L
+#define DMCU_PERFMON_INTERRUPT_STATUS2__HUBP6_PERFMON_COUNTER_INT_OCCURRED_MASK 0x00000040L
+#define DMCU_PERFMON_INTERRUPT_STATUS2__HUBP6_PERFMON_COUNTER_INT_CLEAR_MASK 0x00000040L
+#define DMCU_PERFMON_INTERRUPT_STATUS2__HUBP7_PERFMON_COUNTER_INT_OCCURRED_MASK 0x00000080L
+#define DMCU_PERFMON_INTERRUPT_STATUS2__HUBP7_PERFMON_COUNTER_INT_CLEAR_MASK 0x00000080L
+#define DMCU_PERFMON_INTERRUPT_STATUS2__HUBBUB_PERFMON_COUNTER_INT_OCCURRED_MASK 0x00000100L
+#define DMCU_PERFMON_INTERRUPT_STATUS2__HUBBUB_PERFMON_COUNTER_INT_CLEAR_MASK 0x00000100L
+//DMCU_PERFMON_INTERRUPT_STATUS3
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DPP0_PERFMON_COUNTER_INT_OCCURRED__SHIFT 0x0
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DPP0_PERFMON_COUNTER_INT_CLEAR__SHIFT 0x0
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DPP1_PERFMON_COUNTER_INT_OCCURRED__SHIFT 0x1
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DPP1_PERFMON_COUNTER_INT_CLEAR__SHIFT 0x1
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DPP2_PERFMON_COUNTER_INT_OCCURRED__SHIFT 0x2
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DPP2_PERFMON_COUNTER_INT_CLEAR__SHIFT 0x2
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DPP3_PERFMON_COUNTER_INT_OCCURRED__SHIFT 0x3
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DPP3_PERFMON_COUNTER_INT_CLEAR__SHIFT 0x3
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DPP4_PERFMON_COUNTER_INT_OCCURRED__SHIFT 0x4
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DPP4_PERFMON_COUNTER_INT_CLEAR__SHIFT 0x4
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DPP5_PERFMON_COUNTER_INT_OCCURRED__SHIFT 0x5
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DPP5_PERFMON_COUNTER_INT_CLEAR__SHIFT 0x5
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DPP6_PERFMON_COUNTER_INT_OCCURRED__SHIFT 0x6
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DPP6_PERFMON_COUNTER_INT_CLEAR__SHIFT 0x6
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DPP7_PERFMON_COUNTER_INT_OCCURRED__SHIFT 0x7
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DPP7_PERFMON_COUNTER_INT_CLEAR__SHIFT 0x7
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DPP0_PERFMON_COUNTER_INT_OCCURRED_MASK 0x00000001L
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DPP0_PERFMON_COUNTER_INT_CLEAR_MASK 0x00000001L
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DPP1_PERFMON_COUNTER_INT_OCCURRED_MASK 0x00000002L
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DPP1_PERFMON_COUNTER_INT_CLEAR_MASK 0x00000002L
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DPP2_PERFMON_COUNTER_INT_OCCURRED_MASK 0x00000004L
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DPP2_PERFMON_COUNTER_INT_CLEAR_MASK 0x00000004L
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DPP3_PERFMON_COUNTER_INT_OCCURRED_MASK 0x00000008L
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DPP3_PERFMON_COUNTER_INT_CLEAR_MASK 0x00000008L
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DPP4_PERFMON_COUNTER_INT_OCCURRED_MASK 0x00000010L
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DPP4_PERFMON_COUNTER_INT_CLEAR_MASK 0x00000010L
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DPP5_PERFMON_COUNTER_INT_OCCURRED_MASK 0x00000020L
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DPP5_PERFMON_COUNTER_INT_CLEAR_MASK 0x00000020L
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DPP6_PERFMON_COUNTER_INT_OCCURRED_MASK 0x00000040L
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DPP6_PERFMON_COUNTER_INT_CLEAR_MASK 0x00000040L
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DPP7_PERFMON_COUNTER_INT_OCCURRED_MASK 0x00000080L
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DPP7_PERFMON_COUNTER_INT_CLEAR_MASK 0x00000080L
+//DMCU_PERFMON_INTERRUPT_STATUS4
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB0_PERFMON_COUNTER_INT_OCCURRED__SHIFT 0x0
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB0_PERFMON_COUNTER_INT_CLEAR__SHIFT 0x0
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB1_PERFMON_COUNTER_INT_OCCURRED__SHIFT 0x1
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB1_PERFMON_COUNTER_INT_CLEAR__SHIFT 0x1
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER_INT_OCCURRED__SHIFT 0x2
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER_INT_CLEAR__SHIFT 0x2
+#define DMCU_PERFMON_INTERRUPT_STATUS4__MMHUBBUB_PERFMON_COUNTER_INT_OCCURRED__SHIFT 0x3
+#define DMCU_PERFMON_INTERRUPT_STATUS4__MMHUBBUB_PERFMON_COUNTER_INT_CLEAR__SHIFT 0x3
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB2_PERFMON_COUNTER_INT_OCCURRED__SHIFT 0x4
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB2_PERFMON_COUNTER_INT_CLEAR__SHIFT 0x4
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB0_PERFMON_COUNTER_INT_OCCURRED_MASK 0x00000001L
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB0_PERFMON_COUNTER_INT_CLEAR_MASK 0x00000001L
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB1_PERFMON_COUNTER_INT_OCCURRED_MASK 0x00000002L
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB1_PERFMON_COUNTER_INT_CLEAR_MASK 0x00000002L
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER_INT_OCCURRED_MASK 0x00000004L
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER_INT_CLEAR_MASK 0x00000004L
+#define DMCU_PERFMON_INTERRUPT_STATUS4__MMHUBBUB_PERFMON_COUNTER_INT_OCCURRED_MASK 0x00000008L
+#define DMCU_PERFMON_INTERRUPT_STATUS4__MMHUBBUB_PERFMON_COUNTER_INT_CLEAR_MASK 0x00000008L
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB2_PERFMON_COUNTER_INT_OCCURRED_MASK 0x00000010L
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB2_PERFMON_COUNTER_INT_CLEAR_MASK 0x00000010L
+//DMCU_PERFMON_INTERRUPT_STATUS5
+#define DMCU_PERFMON_INTERRUPT_STATUS5__MPC_PERFMON_COUNTER_INT_OCCURRED__SHIFT 0x0
+#define DMCU_PERFMON_INTERRUPT_STATUS5__MPC_PERFMON_COUNTER_INT_CLEAR__SHIFT 0x0
+#define DMCU_PERFMON_INTERRUPT_STATUS5__OPP_PERFMON_COUNTER_INT_OCCURRED__SHIFT 0x1
+#define DMCU_PERFMON_INTERRUPT_STATUS5__OPP_PERFMON_COUNTER_INT_CLEAR__SHIFT 0x1
+#define DMCU_PERFMON_INTERRUPT_STATUS5__OPTC_PERFMON_COUNTER_INT_OCCURRED__SHIFT 0x2
+#define DMCU_PERFMON_INTERRUPT_STATUS5__OPTC_PERFMON_COUNTER_INT_CLEAR__SHIFT 0x2
+#define DMCU_PERFMON_INTERRUPT_STATUS5__HDA_PERFMON_COUNTER_INT_OCCURRED__SHIFT 0x3
+#define DMCU_PERFMON_INTERRUPT_STATUS5__HDA_PERFMON_COUNTER_INT_CLEAR__SHIFT 0x3
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DSC0_PERFMON_COUNTER_INT_OCCURRED__SHIFT 0x4
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DSC0_PERFMON_COUNTER_INT_CLEAR__SHIFT 0x4
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DSC1_PERFMON_COUNTER_INT_OCCURRED__SHIFT 0x5
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DSC1_PERFMON_COUNTER_INT_CLEAR__SHIFT 0x5
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DSC2_PERFMON_COUNTER_INT_OCCURRED__SHIFT 0x6
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DSC2_PERFMON_COUNTER_INT_CLEAR__SHIFT 0x6
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DSC3_PERFMON_COUNTER_INT_OCCURRED__SHIFT 0x7
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DSC3_PERFMON_COUNTER_INT_CLEAR__SHIFT 0x7
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DSC4_PERFMON_COUNTER_INT_OCCURRED__SHIFT 0x8
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DSC4_PERFMON_COUNTER_INT_CLEAR__SHIFT 0x8
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DSC5_PERFMON_COUNTER_INT_OCCURRED__SHIFT 0x9
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DSC5_PERFMON_COUNTER_INT_CLEAR__SHIFT 0x9
+#define DMCU_PERFMON_INTERRUPT_STATUS5__MPC_PERFMON_COUNTER_INT_OCCURRED_MASK 0x00000001L
+#define DMCU_PERFMON_INTERRUPT_STATUS5__MPC_PERFMON_COUNTER_INT_CLEAR_MASK 0x00000001L
+#define DMCU_PERFMON_INTERRUPT_STATUS5__OPP_PERFMON_COUNTER_INT_OCCURRED_MASK 0x00000002L
+#define DMCU_PERFMON_INTERRUPT_STATUS5__OPP_PERFMON_COUNTER_INT_CLEAR_MASK 0x00000002L
+#define DMCU_PERFMON_INTERRUPT_STATUS5__OPTC_PERFMON_COUNTER_INT_OCCURRED_MASK 0x00000004L
+#define DMCU_PERFMON_INTERRUPT_STATUS5__OPTC_PERFMON_COUNTER_INT_CLEAR_MASK 0x00000004L
+#define DMCU_PERFMON_INTERRUPT_STATUS5__HDA_PERFMON_COUNTER_INT_OCCURRED_MASK 0x00000008L
+#define DMCU_PERFMON_INTERRUPT_STATUS5__HDA_PERFMON_COUNTER_INT_CLEAR_MASK 0x00000008L
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DSC0_PERFMON_COUNTER_INT_OCCURRED_MASK 0x00000010L
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DSC0_PERFMON_COUNTER_INT_CLEAR_MASK 0x00000010L
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DSC1_PERFMON_COUNTER_INT_OCCURRED_MASK 0x00000020L
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DSC1_PERFMON_COUNTER_INT_CLEAR_MASK 0x00000020L
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DSC2_PERFMON_COUNTER_INT_OCCURRED_MASK 0x00000040L
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DSC2_PERFMON_COUNTER_INT_CLEAR_MASK 0x00000040L
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DSC3_PERFMON_COUNTER_INT_OCCURRED_MASK 0x00000080L
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DSC3_PERFMON_COUNTER_INT_CLEAR_MASK 0x00000080L
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DSC4_PERFMON_COUNTER_INT_OCCURRED_MASK 0x00000100L
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DSC4_PERFMON_COUNTER_INT_CLEAR_MASK 0x00000100L
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DSC5_PERFMON_COUNTER_INT_OCCURRED_MASK 0x00000200L
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DSC5_PERFMON_COUNTER_INT_CLEAR_MASK 0x00000200L
+//DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DMU_PERFMON_COUNTER_INT_TO_UC_EN__SHIFT 0x0
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DIO_PERFMON_COUNTER_INT_TO_UC_EN__SHIFT 0x1
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCCG_PERFMON_COUNTER_INT_TO_UC_EN__SHIFT 0x2
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DMU_PERFMON_COUNTER_INT_TO_UC_EN_MASK 0x00000001L
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DIO_PERFMON_COUNTER_INT_TO_UC_EN_MASK 0x00000002L
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCCG_PERFMON_COUNTER_INT_TO_UC_EN_MASK 0x00000004L
+//DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__HUBP0_PERFMON_COUNTER_INT_TO_UC_EN__SHIFT 0x0
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__HUBP1_PERFMON_COUNTER_INT_TO_UC_EN__SHIFT 0x1
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__HUBP2_PERFMON_COUNTER_INT_TO_UC_EN__SHIFT 0x2
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__HUBP3_PERFMON_COUNTER_INT_TO_UC_EN__SHIFT 0x3
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__HUBP4_PERFMON_COUNTER_INT_TO_UC_EN__SHIFT 0x4
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__HUBP5_PERFMON_COUNTER_INT_TO_UC_EN__SHIFT 0x5
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__HUBP6_PERFMON_COUNTER_INT_TO_UC_EN__SHIFT 0x6
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__HUBP7_PERFMON_COUNTER_INT_TO_UC_EN__SHIFT 0x7
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__HUBBUB_PERFMON_COUNTER_INT_TO_UC_EN__SHIFT 0x8
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__HUBP0_PERFMON_COUNTER_INT_TO_UC_EN_MASK 0x00000001L
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__HUBP1_PERFMON_COUNTER_INT_TO_UC_EN_MASK 0x00000002L
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__HUBP2_PERFMON_COUNTER_INT_TO_UC_EN_MASK 0x00000004L
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__HUBP3_PERFMON_COUNTER_INT_TO_UC_EN_MASK 0x00000008L
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__HUBP4_PERFMON_COUNTER_INT_TO_UC_EN_MASK 0x00000010L
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__HUBP5_PERFMON_COUNTER_INT_TO_UC_EN_MASK 0x00000020L
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__HUBP6_PERFMON_COUNTER_INT_TO_UC_EN_MASK 0x00000040L
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__HUBP7_PERFMON_COUNTER_INT_TO_UC_EN_MASK 0x00000080L
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__HUBBUB_PERFMON_COUNTER_INT_TO_UC_EN_MASK 0x00000100L
+//DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DPP0_PERFMON_COUNTER_INT_TO_UC_EN__SHIFT 0x0
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DPP1_PERFMON_COUNTER_INT_TO_UC_EN__SHIFT 0x1
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DPP2_PERFMON_COUNTER_INT_TO_UC_EN__SHIFT 0x2
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DPP3_PERFMON_COUNTER_INT_TO_UC_EN__SHIFT 0x3
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DPP4_PERFMON_COUNTER_INT_TO_UC_EN__SHIFT 0x4
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DPP5_PERFMON_COUNTER_INT_TO_UC_EN__SHIFT 0x5
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DPP6_PERFMON_COUNTER_INT_TO_UC_EN__SHIFT 0x6
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DPP7_PERFMON_COUNTER_INT_TO_UC_EN__SHIFT 0x7
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DPP0_PERFMON_COUNTER_INT_TO_UC_EN_MASK 0x00000001L
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DPP1_PERFMON_COUNTER_INT_TO_UC_EN_MASK 0x00000002L
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DPP2_PERFMON_COUNTER_INT_TO_UC_EN_MASK 0x00000004L
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DPP3_PERFMON_COUNTER_INT_TO_UC_EN_MASK 0x00000008L
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DPP4_PERFMON_COUNTER_INT_TO_UC_EN_MASK 0x00000010L
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DPP5_PERFMON_COUNTER_INT_TO_UC_EN_MASK 0x00000020L
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DPP6_PERFMON_COUNTER_INT_TO_UC_EN_MASK 0x00000040L
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DPP7_PERFMON_COUNTER_INT_TO_UC_EN_MASK 0x00000080L
+//DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__WB0_PERFMON_COUNTER_INT_TO_UC_EN__SHIFT 0x0
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__WB1_PERFMON_COUNTER_INT_TO_UC_EN__SHIFT 0x1
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCCG_PERFMON2_COUNTER_INT_TO_UC_EN__SHIFT 0x2
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__MMHUBBUB_PERFMON_COUNTER_INT_TO_UC_EN__SHIFT 0x3
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__WB2_PERFMON_COUNTER_INT_TO_UC_EN__SHIFT 0x4
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__WB0_PERFMON_COUNTER_INT_TO_UC_EN_MASK 0x00000001L
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__WB1_PERFMON_COUNTER_INT_TO_UC_EN_MASK 0x00000002L
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCCG_PERFMON2_COUNTER_INT_TO_UC_EN_MASK 0x00000004L
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__MMHUBBUB_PERFMON_COUNTER_INT_TO_UC_EN_MASK 0x00000008L
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__WB2_PERFMON_COUNTER_INT_TO_UC_EN_MASK 0x00000010L
+//DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__MPC_PERFMON_COUNTER_INT_TO_UC_EN__SHIFT 0x0
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__OPP_PERFMON_COUNTER_INT_TO_UC_EN__SHIFT 0x1
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__OPTC_PERFMON_COUNTER_INT_TO_UC_EN__SHIFT 0x2
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__HDA_PERFMON_COUNTER_INT_TO_UC_EN__SHIFT 0x3
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DSC0_PERFMON_COUNTER_INT_TO_UC_EN__SHIFT 0x4
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DSC1_PERFMON_COUNTER_INT_TO_UC_EN__SHIFT 0x5
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DSC2_PERFMON_COUNTER_INT_TO_UC_EN__SHIFT 0x6
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DSC3_PERFMON_COUNTER_INT_TO_UC_EN__SHIFT 0x7
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DSC4_PERFMON_COUNTER_INT_TO_UC_EN__SHIFT 0x8
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DSC5_PERFMON_COUNTER_INT_TO_UC_EN__SHIFT 0x9
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__MPC_PERFMON_COUNTER_INT_TO_UC_EN_MASK 0x00000001L
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__OPP_PERFMON_COUNTER_INT_TO_UC_EN_MASK 0x00000002L
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__OPTC_PERFMON_COUNTER_INT_TO_UC_EN_MASK 0x00000004L
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__HDA_PERFMON_COUNTER_INT_TO_UC_EN_MASK 0x00000008L
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DSC0_PERFMON_COUNTER_INT_TO_UC_EN_MASK 0x00000010L
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DSC1_PERFMON_COUNTER_INT_TO_UC_EN_MASK 0x00000020L
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DSC2_PERFMON_COUNTER_INT_TO_UC_EN_MASK 0x00000040L
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DSC3_PERFMON_COUNTER_INT_TO_UC_EN_MASK 0x00000080L
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DSC4_PERFMON_COUNTER_INT_TO_UC_EN_MASK 0x00000100L
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DSC5_PERFMON_COUNTER_INT_TO_UC_EN_MASK 0x00000200L
+//DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DMU_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL__SHIFT 0x0
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DIO_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL__SHIFT 0x1
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCCG_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL__SHIFT 0x2
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DMU_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL_MASK 0x00000001L
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DIO_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL_MASK 0x00000002L
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCCG_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL_MASK 0x00000004L
+//DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__HUBP0_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL__SHIFT 0x0
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__HUBP1_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL__SHIFT 0x1
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__HUBP2_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL__SHIFT 0x2
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__HUBP3_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL__SHIFT 0x3
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__HUBP4_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL__SHIFT 0x4
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__HUBP5_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL__SHIFT 0x5
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__HUBP6_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL__SHIFT 0x6
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__HUBP7_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL__SHIFT 0x7
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__HUBBUB_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL__SHIFT 0x8
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__HUBP0_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL_MASK 0x00000001L
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__HUBP1_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL_MASK 0x00000002L
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__HUBP2_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL_MASK 0x00000004L
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__HUBP3_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL_MASK 0x00000008L
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__HUBP4_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL_MASK 0x00000010L
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__HUBP5_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL_MASK 0x00000020L
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__HUBP6_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL_MASK 0x00000040L
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__HUBP7_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL_MASK 0x00000080L
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__HUBBUB_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL_MASK 0x00000100L
+//DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DPP0_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL__SHIFT 0x0
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DPP1_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL__SHIFT 0x1
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DPP2_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL__SHIFT 0x2
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DPP3_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL__SHIFT 0x3
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DPP4_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL__SHIFT 0x4
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DPP5_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL__SHIFT 0x5
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DPP6_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL__SHIFT 0x6
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DPP7_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL__SHIFT 0x7
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DPP0_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL_MASK 0x00000001L
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DPP1_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL_MASK 0x00000002L
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DPP2_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL_MASK 0x00000004L
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DPP3_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL_MASK 0x00000008L
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DPP4_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL_MASK 0x00000010L
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DPP5_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL_MASK 0x00000020L
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DPP6_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL_MASK 0x00000040L
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DPP7_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL_MASK 0x00000080L
+//DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__WB0_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL__SHIFT 0x0
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__WB1_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL__SHIFT 0x1
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCCG_PERFMON2_COUNTER_INT_XIRQ_IRQ_SEL__SHIFT 0x2
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__MMHUBBUB_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL__SHIFT 0x3
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__WB2_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL__SHIFT 0x4
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__WB0_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL_MASK 0x00000001L
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__WB1_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL_MASK 0x00000002L
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCCG_PERFMON2_COUNTER_INT_XIRQ_IRQ_SEL_MASK 0x00000004L
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__MMHUBBUB_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL_MASK 0x00000008L
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__WB2_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL_MASK 0x00000010L
+//DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__MPC_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL__SHIFT 0x0
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__OPTC_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL__SHIFT 0x1
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__OPP_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL__SHIFT 0x2
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__HDA_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL__SHIFT 0x3
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DSC0_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL__SHIFT 0x4
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DSC1_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL__SHIFT 0x5
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DSC2_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL__SHIFT 0x6
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DSC3_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL__SHIFT 0x7
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DSC4_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL__SHIFT 0x8
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DSC5_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL__SHIFT 0x9
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__MPC_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL_MASK 0x00000001L
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__OPTC_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL_MASK 0x00000002L
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__OPP_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL_MASK 0x00000004L
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__HDA_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL_MASK 0x00000008L
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DSC0_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL_MASK 0x00000010L
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DSC1_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL_MASK 0x00000020L
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DSC2_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL_MASK 0x00000040L
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DSC3_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL_MASK 0x00000080L
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DSC4_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL_MASK 0x00000100L
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DSC5_PERFMON_COUNTER_INT_XIRQ_IRQ_SEL_MASK 0x00000200L
+//DMCU_DPRX_INTERRUPT_STATUS1
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD0P0_MSA_RECEIVED_INT_OCCURRED__SHIFT 0x0
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD0P0_MSA_RECEIVED_INT_CLEAR__SHIFT 0x0
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD0P0_VBID_VID_STREAM_STATUS_TOGGLED_INT_OCCURRED__SHIFT 0x1
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD0P0_VBID_VID_STREAM_STATUS_TOGGLED_INT_CLEAR__SHIFT 0x1
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD0P0_VERTICAL_INT0_OCCURRED__SHIFT 0x2
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD0P0_VERTICAL_INT0_CLEAR__SHIFT 0x2
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD0P0_VERTICAL_INT1_OCCURRED__SHIFT 0x3
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD0P0_VERTICAL_INT1_CLEAR__SHIFT 0x3
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD0P0_SDP_RECEIVED_INT_OCCURRED__SHIFT 0x4
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD0P0_SDP_RECEIVED_INT_CLEAR__SHIFT 0x4
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD1P0_MSA_RECEIVED_INT_OCCURRED__SHIFT 0x5
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD1P0_MSA_RECEIVED_INT_CLEAR__SHIFT 0x5
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD1P0_VBID_VID_STREAM_STATUS_TOGGLED_INT_OCCURRED__SHIFT 0x6
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD1P0_VBID_VID_STREAM_STATUS_TOGGLED_INT_CLEAR__SHIFT 0x6
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD1P0_VERTICAL_INT0_OCCURRED__SHIFT 0x7
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD1P0_VERTICAL_INT0_CLEAR__SHIFT 0x7
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD1P0_VERTICAL_INT1_OCCURRED__SHIFT 0x8
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD1P0_VERTICAL_INT1_CLEAR__SHIFT 0x8
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD1P0_SDP_RECEIVED_INT_OCCURRED__SHIFT 0x9
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD1P0_SDP_RECEIVED_INT_CLEAR__SHIFT 0x9
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_BS_INTERVAL_ERROR_THRESH_EXCEEDED_INT_OCCURRED__SHIFT 0xa
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_BS_INTERVAL_ERROR_THRESH_EXCEEDED_INT_CLEAR__SHIFT 0xa
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_SR_INTERVAL_ERROR_THRESH_EXCEEDED_INT_OCCURRED__SHIFT 0xb
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_SR_INTERVAL_ERROR_THRESH_EXCEEDED_INT_CLEAR__SHIFT 0xb
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_SYMBOL_ERROR_THRESH_EXCEEDED_INT_OCCURRED__SHIFT 0xc
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_SYMBOL_ERROR_THRESH_EXCEEDED_INT_CLEAR__SHIFT 0xc
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_DISPARITY_ERROR_THRESH_EXCEEDED_INT_OCCURRED__SHIFT 0xd
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_DISPARITY_ERROR_THRESH_EXCEEDED_INT_CLEAR__SHIFT 0xd
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_TRAINING_ERROR_THRESH_EXCEEDED_INT_OCCURRED__SHIFT 0xe
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_TRAINING_ERROR_THRESH_EXCEEDED_INT_CLEAR__SHIFT 0xe
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_TEST_PATTERN_ERROR_THRESH_EXCEEDED_INT_OCCURRED__SHIFT 0xf
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_TEST_PATTERN_ERROR_THRESH_EXCEEDED_INT_CLEAR__SHIFT 0xf
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_ECF_ERROR_THRESH_EXCEEDED_INT_OCCURRED__SHIFT 0x10
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_ECF_ERROR_THRESH_EXCEEDED_INT_CLEAR__SHIFT 0x10
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_DETECT_SR_LOCK_INT_OCCURRED__SHIFT 0x11
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_DETECT_SR_LOCK_INT_CLEAR__SHIFT 0x11
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_LOSS_OF_ALIGN_INT_OCCURRED__SHIFT 0x12
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_LOSS_OF_ALIGN_INT_CLEAR__SHIFT 0x12
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_LOSS_OF_DESKEW_INT_OCCURRED__SHIFT 0x13
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_LOSS_OF_DESKEW_INT_CLEAR__SHIFT 0x13
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_EXCESSIVE_ERROR_INT_OCCURRED__SHIFT 0x14
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_EXCESSIVE_ERROR_INT_CLEAR__SHIFT 0x14
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_DESKEW_FIFO_OVERFLOW_INT_OCCURRED__SHIFT 0x15
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_DESKEW_FIFO_OVERFLOW_INT_CLEAR__SHIFT 0x15
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_AUX_INT_OCCURRED__SHIFT 0x16
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_AUX_INT_CLEAR__SHIFT 0x16
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_I2C_INT_OCCURRED__SHIFT 0x17
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_I2C_INT_CLEAR__SHIFT 0x17
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_CPU_INT_OCCURRED__SHIFT 0x18
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_CPU_INT_CLEAR__SHIFT 0x18
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_MSG1_TIMEOUT_INT_OCCURRED__SHIFT 0x19
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_MSG1_TIMEOUT_INT_CLEAR__SHIFT 0x19
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_MSG2_TIMEOUT_INT_OCCURRED__SHIFT 0x1a
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_MSG2_TIMEOUT_INT_CLEAR__SHIFT 0x1a
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_MSG3_TIMEOUT_INT_OCCURRED__SHIFT 0x1b
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_MSG3_TIMEOUT_INT_CLEAR__SHIFT 0x1b
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_MSG4_TIMEOUT_INT_OCCURRED__SHIFT 0x1c
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_MSG4_TIMEOUT_INT_CLEAR__SHIFT 0x1c
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD0P0_MSA_RECEIVED_INT_OCCURRED_MASK 0x00000001L
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD0P0_MSA_RECEIVED_INT_CLEAR_MASK 0x00000001L
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD0P0_VBID_VID_STREAM_STATUS_TOGGLED_INT_OCCURRED_MASK 0x00000002L
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD0P0_VBID_VID_STREAM_STATUS_TOGGLED_INT_CLEAR_MASK 0x00000002L
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD0P0_VERTICAL_INT0_OCCURRED_MASK 0x00000004L
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD0P0_VERTICAL_INT0_CLEAR_MASK 0x00000004L
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD0P0_VERTICAL_INT1_OCCURRED_MASK 0x00000008L
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD0P0_VERTICAL_INT1_CLEAR_MASK 0x00000008L
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD0P0_SDP_RECEIVED_INT_OCCURRED_MASK 0x00000010L
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD0P0_SDP_RECEIVED_INT_CLEAR_MASK 0x00000010L
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD1P0_MSA_RECEIVED_INT_OCCURRED_MASK 0x00000020L
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD1P0_MSA_RECEIVED_INT_CLEAR_MASK 0x00000020L
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD1P0_VBID_VID_STREAM_STATUS_TOGGLED_INT_OCCURRED_MASK 0x00000040L
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD1P0_VBID_VID_STREAM_STATUS_TOGGLED_INT_CLEAR_MASK 0x00000040L
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD1P0_VERTICAL_INT0_OCCURRED_MASK 0x00000080L
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD1P0_VERTICAL_INT0_CLEAR_MASK 0x00000080L
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD1P0_VERTICAL_INT1_OCCURRED_MASK 0x00000100L
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD1P0_VERTICAL_INT1_CLEAR_MASK 0x00000100L
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD1P0_SDP_RECEIVED_INT_OCCURRED_MASK 0x00000200L
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD1P0_SDP_RECEIVED_INT_CLEAR_MASK 0x00000200L
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_BS_INTERVAL_ERROR_THRESH_EXCEEDED_INT_OCCURRED_MASK 0x00000400L
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_BS_INTERVAL_ERROR_THRESH_EXCEEDED_INT_CLEAR_MASK 0x00000400L
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_SR_INTERVAL_ERROR_THRESH_EXCEEDED_INT_OCCURRED_MASK 0x00000800L
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_SR_INTERVAL_ERROR_THRESH_EXCEEDED_INT_CLEAR_MASK 0x00000800L
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_SYMBOL_ERROR_THRESH_EXCEEDED_INT_OCCURRED_MASK 0x00001000L
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_SYMBOL_ERROR_THRESH_EXCEEDED_INT_CLEAR_MASK 0x00001000L
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_DISPARITY_ERROR_THRESH_EXCEEDED_INT_OCCURRED_MASK 0x00002000L
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_DISPARITY_ERROR_THRESH_EXCEEDED_INT_CLEAR_MASK 0x00002000L
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_TRAINING_ERROR_THRESH_EXCEEDED_INT_OCCURRED_MASK 0x00004000L
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_TRAINING_ERROR_THRESH_EXCEEDED_INT_CLEAR_MASK 0x00004000L
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_TEST_PATTERN_ERROR_THRESH_EXCEEDED_INT_OCCURRED_MASK 0x00008000L
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_TEST_PATTERN_ERROR_THRESH_EXCEEDED_INT_CLEAR_MASK 0x00008000L
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_ECF_ERROR_THRESH_EXCEEDED_INT_OCCURRED_MASK 0x00010000L
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_ECF_ERROR_THRESH_EXCEEDED_INT_CLEAR_MASK 0x00010000L
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_DETECT_SR_LOCK_INT_OCCURRED_MASK 0x00020000L
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_DETECT_SR_LOCK_INT_CLEAR_MASK 0x00020000L
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_LOSS_OF_ALIGN_INT_OCCURRED_MASK 0x00040000L
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_LOSS_OF_ALIGN_INT_CLEAR_MASK 0x00040000L
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_LOSS_OF_DESKEW_INT_OCCURRED_MASK 0x00080000L
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_LOSS_OF_DESKEW_INT_CLEAR_MASK 0x00080000L
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_EXCESSIVE_ERROR_INT_OCCURRED_MASK 0x00100000L
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_EXCESSIVE_ERROR_INT_CLEAR_MASK 0x00100000L
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_DESKEW_FIFO_OVERFLOW_INT_OCCURRED_MASK 0x00200000L
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_DESKEW_FIFO_OVERFLOW_INT_CLEAR_MASK 0x00200000L
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_AUX_INT_OCCURRED_MASK 0x00400000L
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_AUX_INT_CLEAR_MASK 0x00400000L
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_I2C_INT_OCCURRED_MASK 0x00800000L
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_I2C_INT_CLEAR_MASK 0x00800000L
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_CPU_INT_OCCURRED_MASK 0x01000000L
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_CPU_INT_CLEAR_MASK 0x01000000L
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_MSG1_TIMEOUT_INT_OCCURRED_MASK 0x02000000L
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_MSG1_TIMEOUT_INT_CLEAR_MASK 0x02000000L
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_MSG2_TIMEOUT_INT_OCCURRED_MASK 0x04000000L
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_MSG2_TIMEOUT_INT_CLEAR_MASK 0x04000000L
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_MSG3_TIMEOUT_INT_OCCURRED_MASK 0x08000000L
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_MSG3_TIMEOUT_INT_CLEAR_MASK 0x08000000L
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_MSG4_TIMEOUT_INT_OCCURRED_MASK 0x10000000L
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_MSG4_TIMEOUT_INT_CLEAR_MASK 0x10000000L
+//DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_SD0P0_MSA_RECEIVED_INT_TO_UC_EN__SHIFT 0x0
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_SD0P0_VBID_VID_STREAM_STATUS_TOGGLED_INT_TO_UC_EN__SHIFT 0x1
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_SD0P0_VERTICAL_INT0_TO_UC_EN__SHIFT 0x2
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_SD0P0_VERTICAL_INT1_TO_UC_EN__SHIFT 0x3
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_SD0P0_SDP_RECEIVED_INT_TO_UC_EN__SHIFT 0x4
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_SD1P0_MSA_RECEIVED_INT_TO_UC_EN__SHIFT 0x5
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_SD1P0_VBID_VID_STREAM_STATUS_TOGGLED_INT_TO_UC_EN__SHIFT 0x6
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_SD1P0_VERTICAL_INT0_TO_UC_EN__SHIFT 0x7
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_SD1P0_VERTICAL_INT1_TO_UC_EN__SHIFT 0x8
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_SD1P0_SDP_RECEIVED_INT_TO_UC_EN__SHIFT 0x9
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_BS_INTERVAL_ERROR_THRESH_EXCEEDED_INT_TO_UC_EN__SHIFT 0xa
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_SR_INTERVAL_ERROR_THRESH_EXCEEDED_INT_TO_UC_EN__SHIFT 0xb
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_SYMBOL_ERROR_THRESH_EXCEEDED_INT_TO_UC_EN__SHIFT 0xc
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_DISPARITY_ERROR_THRESH_EXCEEDED_INT_TO_UC_EN__SHIFT 0xd
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_TRAINING_ERROR_THRESH_EXCEEDED_INT_TO_UC_EN__SHIFT 0xe
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_TEST_PATTERN_ERROR_THRESH_EXCEEDED_INT_TO_UC_EN__SHIFT 0xf
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_ECF_ERROR_THRESH_EXCEEDED_INT_TO_UC_EN__SHIFT 0x10
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_DETECT_SR_LOCK_INT_TO_UC_EN__SHIFT 0x11
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_LOSS_OF_ALIGN_INT_TO_UC_EN__SHIFT 0x12
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_LOSS_OF_DESKEW_INT_TO_UC_EN__SHIFT 0x13
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_EXCESSIVE_ERROR_INT_TO_UC_EN__SHIFT 0x14
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_DESKEW_FIFO_OVERFLOW_INT_TO_UC_EN__SHIFT 0x15
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_AUX_P0_AUX_INT_TO_UC_EN__SHIFT 0x16
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_AUX_P0_I2C_INT_TO_UC_EN__SHIFT 0x17
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_AUX_P0_CPU_INT_TO_UC_EN__SHIFT 0x18
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_AUX_P0_MSG1_TIMEOUT_INT_TO_UC_EN__SHIFT 0x19
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_AUX_P0_MSG2_TIMEOUT_INT_TO_UC_EN__SHIFT 0x1a
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_AUX_P0_MSG3_TIMEOUT_INT_TO_UC_EN__SHIFT 0x1b
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_AUX_P0_MSG4_TIMEOUT_INT_TO_UC_EN__SHIFT 0x1c
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_SD0P0_MSA_RECEIVED_INT_TO_UC_EN_MASK 0x00000001L
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_SD0P0_VBID_VID_STREAM_STATUS_TOGGLED_INT_TO_UC_EN_MASK 0x00000002L
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_SD0P0_VERTICAL_INT0_TO_UC_EN_MASK 0x00000004L
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_SD0P0_VERTICAL_INT1_TO_UC_EN_MASK 0x00000008L
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_SD0P0_SDP_RECEIVED_INT_TO_UC_EN_MASK 0x00000010L
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_SD1P0_MSA_RECEIVED_INT_TO_UC_EN_MASK 0x00000020L
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_SD1P0_VBID_VID_STREAM_STATUS_TOGGLED_INT_TO_UC_EN_MASK 0x00000040L
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_SD1P0_VERTICAL_INT0_TO_UC_EN_MASK 0x00000080L
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_SD1P0_VERTICAL_INT1_TO_UC_EN_MASK 0x00000100L
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_SD1P0_SDP_RECEIVED_INT_TO_UC_EN_MASK 0x00000200L
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_BS_INTERVAL_ERROR_THRESH_EXCEEDED_INT_TO_UC_EN_MASK 0x00000400L
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_SR_INTERVAL_ERROR_THRESH_EXCEEDED_INT_TO_UC_EN_MASK 0x00000800L
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_SYMBOL_ERROR_THRESH_EXCEEDED_INT_TO_UC_EN_MASK 0x00001000L
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_DISPARITY_ERROR_THRESH_EXCEEDED_INT_TO_UC_EN_MASK 0x00002000L
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_TRAINING_ERROR_THRESH_EXCEEDED_INT_TO_UC_EN_MASK 0x00004000L
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_TEST_PATTERN_ERROR_THRESH_EXCEEDED_INT_TO_UC_EN_MASK 0x00008000L
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_ECF_ERROR_THRESH_EXCEEDED_INT_TO_UC_EN_MASK 0x00010000L
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_DETECT_SR_LOCK_INT_TO_UC_EN_MASK 0x00020000L
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_LOSS_OF_ALIGN_INT_TO_UC_EN_MASK 0x00040000L
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_LOSS_OF_DESKEW_INT_TO_UC_EN_MASK 0x00080000L
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_EXCESSIVE_ERROR_INT_TO_UC_EN_MASK 0x00100000L
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_DESKEW_FIFO_OVERFLOW_INT_TO_UC_EN_MASK 0x00200000L
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_AUX_P0_AUX_INT_TO_UC_EN_MASK 0x00400000L
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_AUX_P0_I2C_INT_TO_UC_EN_MASK 0x00800000L
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_AUX_P0_CPU_INT_TO_UC_EN_MASK 0x01000000L
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_AUX_P0_MSG1_TIMEOUT_INT_TO_UC_EN_MASK 0x02000000L
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_AUX_P0_MSG2_TIMEOUT_INT_TO_UC_EN_MASK 0x04000000L
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_AUX_P0_MSG3_TIMEOUT_INT_TO_UC_EN_MASK 0x08000000L
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_AUX_P0_MSG4_TIMEOUT_INT_TO_UC_EN_MASK 0x10000000L
+//DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_SD0P0_MSA_RECEIVED_INT_XIRQ_IRQ_SEL__SHIFT 0x0
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_SD0P0_VBID_VID_STREAM_STATUS_TOGGLED_INT_XIRQ_IRQ_SEL__SHIFT 0x1
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_SD0P0_VERTICAL_INT0_XIRQ_IRQ_SEL__SHIFT 0x2
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_SD0P0_VERTICAL_INT1_XIRQ_IRQ_SEL__SHIFT 0x3
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_SD0P0_SDP_RECEIVED_INT_XIRQ_IRQ_SEL__SHIFT 0x4
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_SD1P0_MSA_RECEIVED_INT_XIRQ_IRQ_SEL__SHIFT 0x5
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_SD1P0_VBID_VID_STREAM_STATUS_TOGGLED_INT_XIRQ_IRQ_SEL__SHIFT 0x6
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_SD1P0_VERTICAL_INT0_XIRQ_IRQ_SEL__SHIFT 0x7
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_SD1P0_VERTICAL_INT1_XIRQ_IRQ_SEL__SHIFT 0x8
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_SD1P0_SDP_RECEIVED_INT_XIRQ_IRQ_SEL__SHIFT 0x9
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_BS_INTERVAL_ERROR_THRESH_EXCEEDED_INT_XIRQ_IRQ_SEL__SHIFT 0xa
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_SR_INTERVAL_ERROR_THRESH_EXCEEDED_INT_XIRQ_IRQ_SEL__SHIFT 0xb
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_SYMBOL_ERROR_THRESH_EXCEEDED_INT_XIRQ_IRQ_SEL__SHIFT 0xc
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_DISPARITY_ERROR_THRESH_EXCEEDED_INT_XIRQ_IRQ_SEL__SHIFT 0xd
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_TRAINING_ERROR_THRESH_EXCEEDED_INT_XIRQ_IRQ_SEL__SHIFT 0xe
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_TEST_PATTERN_ERROR_THRESH_EXCEEDED_INT_XIRQ_IRQ_SEL__SHIFT 0xf
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_ECF_ERROR_THRESH_EXCEEDED_INT_XIRQ_IRQ_SEL__SHIFT 0x10
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_DETECT_SR_LOCK_INT_XIRQ_IRQ_SEL__SHIFT 0x11
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_LOSS_OF_ALIGN_INT_XIRQ_IRQ_SEL__SHIFT 0x12
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_LOSS_OF_DESKEW_INT_XIRQ_IRQ_SEL__SHIFT 0x13
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_EXCESSIVE_ERROR_INT_XIRQ_IRQ_SEL__SHIFT 0x14
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_DESKEW_FIFO_OVERFLOW_INT_XIRQ_IRQ_SEL__SHIFT 0x15
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_AUX_P0_AUX_INT_XIRQ_IRQ_SEL__SHIFT 0x16
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_AUX_P0_I2C_INT_XIRQ_IRQ_SEL__SHIFT 0x17
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_AUX_P0_CPU_INT_XIRQ_IRQ_SEL__SHIFT 0x18
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_AUX_P0_MSG1_TIMEOUT_INT_XIRQ_IRQ_SEL__SHIFT 0x19
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_AUX_P0_MSG2_TIMEOUT_INT_XIRQ_IRQ_SEL__SHIFT 0x1a
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_AUX_P0_MSG3_TIMEOUT_INT_XIRQ_IRQ_SEL__SHIFT 0x1b
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_AUX_P0_MSG4_TIMEOUT_INT_XIRQ_IRQ_SEL__SHIFT 0x1c
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_SD0P0_MSA_RECEIVED_INT_XIRQ_IRQ_SEL_MASK 0x00000001L
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_SD0P0_VBID_VID_STREAM_STATUS_TOGGLED_INT_XIRQ_IRQ_SEL_MASK 0x00000002L
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_SD0P0_VERTICAL_INT0_XIRQ_IRQ_SEL_MASK 0x00000004L
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_SD0P0_VERTICAL_INT1_XIRQ_IRQ_SEL_MASK 0x00000008L
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_SD0P0_SDP_RECEIVED_INT_XIRQ_IRQ_SEL_MASK 0x00000010L
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_SD1P0_MSA_RECEIVED_INT_XIRQ_IRQ_SEL_MASK 0x00000020L
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_SD1P0_VBID_VID_STREAM_STATUS_TOGGLED_INT_XIRQ_IRQ_SEL_MASK 0x00000040L
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_SD1P0_VERTICAL_INT0_XIRQ_IRQ_SEL_MASK 0x00000080L
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_SD1P0_VERTICAL_INT1_XIRQ_IRQ_SEL_MASK 0x00000100L
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_SD1P0_SDP_RECEIVED_INT_XIRQ_IRQ_SEL_MASK 0x00000200L
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_BS_INTERVAL_ERROR_THRESH_EXCEEDED_INT_XIRQ_IRQ_SEL_MASK 0x00000400L
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_SR_INTERVAL_ERROR_THRESH_EXCEEDED_INT_XIRQ_IRQ_SEL_MASK 0x00000800L
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_SYMBOL_ERROR_THRESH_EXCEEDED_INT_XIRQ_IRQ_SEL_MASK 0x00001000L
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_DISPARITY_ERROR_THRESH_EXCEEDED_INT_XIRQ_IRQ_SEL_MASK 0x00002000L
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_TRAINING_ERROR_THRESH_EXCEEDED_INT_XIRQ_IRQ_SEL_MASK 0x00004000L
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_TEST_PATTERN_ERROR_THRESH_EXCEEDED_INT_XIRQ_IRQ_SEL_MASK 0x00008000L
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_ECF_ERROR_THRESH_EXCEEDED_INT_XIRQ_IRQ_SEL_MASK 0x00010000L
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_DETECT_SR_LOCK_INT_XIRQ_IRQ_SEL_MASK 0x00020000L
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_LOSS_OF_ALIGN_INT_XIRQ_IRQ_SEL_MASK 0x00040000L
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_LOSS_OF_DESKEW_INT_XIRQ_IRQ_SEL_MASK 0x00080000L
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_EXCESSIVE_ERROR_INT_XIRQ_IRQ_SEL_MASK 0x00100000L
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_DESKEW_FIFO_OVERFLOW_INT_XIRQ_IRQ_SEL_MASK 0x00200000L
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_AUX_P0_AUX_INT_XIRQ_IRQ_SEL_MASK 0x00400000L
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_AUX_P0_I2C_INT_XIRQ_IRQ_SEL_MASK 0x00800000L
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_AUX_P0_CPU_INT_XIRQ_IRQ_SEL_MASK 0x01000000L
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_AUX_P0_MSG1_TIMEOUT_INT_XIRQ_IRQ_SEL_MASK 0x02000000L
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_AUX_P0_MSG2_TIMEOUT_INT_XIRQ_IRQ_SEL_MASK 0x04000000L
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_AUX_P0_MSG3_TIMEOUT_INT_XIRQ_IRQ_SEL_MASK 0x08000000L
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_AUX_P0_MSG4_TIMEOUT_INT_XIRQ_IRQ_SEL_MASK 0x10000000L
+//DMCU_INTERRUPT_STATUS_CONTINUE
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN6_POWER_UP_INT_OCCURRED__SHIFT 0x0
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN6_POWER_UP_INT_CLEAR__SHIFT 0x0
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN7_POWER_UP_INT_OCCURRED__SHIFT 0x1
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN7_POWER_UP_INT_CLEAR__SHIFT 0x1
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN8_POWER_UP_INT_OCCURRED__SHIFT 0x2
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN8_POWER_UP_INT_CLEAR__SHIFT 0x2
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN9_POWER_UP_INT_OCCURRED__SHIFT 0x3
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN9_POWER_UP_INT_CLEAR__SHIFT 0x3
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN10_POWER_UP_INT_OCCURRED__SHIFT 0x4
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN10_POWER_UP_INT_CLEAR__SHIFT 0x4
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN11_POWER_UP_INT_OCCURRED__SHIFT 0x5
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN11_POWER_UP_INT_CLEAR__SHIFT 0x5
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN12_POWER_UP_INT_OCCURRED__SHIFT 0x6
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN12_POWER_UP_INT_CLEAR__SHIFT 0x6
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN13_POWER_UP_INT_OCCURRED__SHIFT 0x7
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN13_POWER_UP_INT_CLEAR__SHIFT 0x7
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN14_POWER_UP_INT_OCCURRED__SHIFT 0x8
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN14_POWER_UP_INT_CLEAR__SHIFT 0x8
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN15_POWER_UP_INT_OCCURRED__SHIFT 0x9
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN15_POWER_UP_INT_CLEAR__SHIFT 0x9
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN6_POWER_DOWN_INT_OCCURRED__SHIFT 0xa
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN6_POWER_DOWN_INT_CLEAR__SHIFT 0xa
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN7_POWER_DOWN_INT_OCCURRED__SHIFT 0xb
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN7_POWER_DOWN_INT_CLEAR__SHIFT 0xb
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN8_POWER_DOWN_INT_OCCURRED__SHIFT 0xc
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN8_POWER_DOWN_INT_CLEAR__SHIFT 0xc
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN9_POWER_DOWN_INT_OCCURRED__SHIFT 0xd
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN9_POWER_DOWN_INT_CLEAR__SHIFT 0xd
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN10_POWER_DOWN_INT_OCCURRED__SHIFT 0xe
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN10_POWER_DOWN_INT_CLEAR__SHIFT 0xe
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN11_POWER_DOWN_INT_OCCURRED__SHIFT 0xf
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN11_POWER_DOWN_INT_CLEAR__SHIFT 0xf
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN12_POWER_DOWN_INT_OCCURRED__SHIFT 0x10
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN12_POWER_DOWN_INT_CLEAR__SHIFT 0x10
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN13_POWER_DOWN_INT_OCCURRED__SHIFT 0x11
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN13_POWER_DOWN_INT_CLEAR__SHIFT 0x11
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN14_POWER_DOWN_INT_OCCURRED__SHIFT 0x12
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN14_POWER_DOWN_INT_CLEAR__SHIFT 0x12
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN15_POWER_DOWN_INT_OCCURRED__SHIFT 0x13
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN15_POWER_DOWN_INT_CLEAR__SHIFT 0x13
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCCG_DMCU_INT_VSYNC_CNT_OTG0_OCCURRED__SHIFT 0x14
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCCG_DMCU_INT_VSYNC_CNT_OTG0_CLEAR__SHIFT 0x14
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCCG_DMCU_INT_VSYNC_CNT_OTG1_OCCURRED__SHIFT 0x15
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCCG_DMCU_INT_VSYNC_CNT_OTG1_CLEAR__SHIFT 0x15
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCCG_DMCU_INT_VSYNC_CNT_OTG2_OCCURRED__SHIFT 0x16
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCCG_DMCU_INT_VSYNC_CNT_OTG2_CLEAR__SHIFT 0x16
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCCG_DMCU_INT_VSYNC_CNT_OTG3_OCCURRED__SHIFT 0x17
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCCG_DMCU_INT_VSYNC_CNT_OTG3_CLEAR__SHIFT 0x17
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCCG_DMCU_INT_VSYNC_CNT_OTG4_OCCURRED__SHIFT 0x18
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCCG_DMCU_INT_VSYNC_CNT_OTG4_CLEAR__SHIFT 0x18
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCCG_DMCU_INT_VSYNC_CNT_OTG5_OCCURRED__SHIFT 0x19
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCCG_DMCU_INT_VSYNC_CNT_OTG5_CLEAR__SHIFT 0x19
+#define DMCU_INTERRUPT_STATUS_CONTINUE__ABM0_HG_READY_INT_OCCURRED__SHIFT 0x1a
+#define DMCU_INTERRUPT_STATUS_CONTINUE__ABM0_HG_READY_INT_CLEAR__SHIFT 0x1a
+#define DMCU_INTERRUPT_STATUS_CONTINUE__ABM0_LS_READY_INT_OCCURRED__SHIFT 0x1b
+#define DMCU_INTERRUPT_STATUS_CONTINUE__ABM0_LS_READY_INT_CLEAR__SHIFT 0x1b
+#define DMCU_INTERRUPT_STATUS_CONTINUE__ABM0_BL_UPDATE_INT_OCCURRED__SHIFT 0x1c
+#define DMCU_INTERRUPT_STATUS_CONTINUE__ABM0_BL_UPDATE_INT_CLEAR__SHIFT 0x1c
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN6_POWER_UP_INT_OCCURRED_MASK 0x00000001L
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN6_POWER_UP_INT_CLEAR_MASK 0x00000001L
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN7_POWER_UP_INT_OCCURRED_MASK 0x00000002L
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN7_POWER_UP_INT_CLEAR_MASK 0x00000002L
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN8_POWER_UP_INT_OCCURRED_MASK 0x00000004L
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN8_POWER_UP_INT_CLEAR_MASK 0x00000004L
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN9_POWER_UP_INT_OCCURRED_MASK 0x00000008L
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN9_POWER_UP_INT_CLEAR_MASK 0x00000008L
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN10_POWER_UP_INT_OCCURRED_MASK 0x00000010L
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN10_POWER_UP_INT_CLEAR_MASK 0x00000010L
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN11_POWER_UP_INT_OCCURRED_MASK 0x00000020L
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN11_POWER_UP_INT_CLEAR_MASK 0x00000020L
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN12_POWER_UP_INT_OCCURRED_MASK 0x00000040L
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN12_POWER_UP_INT_CLEAR_MASK 0x00000040L
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN13_POWER_UP_INT_OCCURRED_MASK 0x00000080L
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN13_POWER_UP_INT_CLEAR_MASK 0x00000080L
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN14_POWER_UP_INT_OCCURRED_MASK 0x00000100L
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN14_POWER_UP_INT_CLEAR_MASK 0x00000100L
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN15_POWER_UP_INT_OCCURRED_MASK 0x00000200L
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN15_POWER_UP_INT_CLEAR_MASK 0x00000200L
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN6_POWER_DOWN_INT_OCCURRED_MASK 0x00000400L
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN6_POWER_DOWN_INT_CLEAR_MASK 0x00000400L
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN7_POWER_DOWN_INT_OCCURRED_MASK 0x00000800L
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN7_POWER_DOWN_INT_CLEAR_MASK 0x00000800L
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN8_POWER_DOWN_INT_OCCURRED_MASK 0x00001000L
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN8_POWER_DOWN_INT_CLEAR_MASK 0x00001000L
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN9_POWER_DOWN_INT_OCCURRED_MASK 0x00002000L
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN9_POWER_DOWN_INT_CLEAR_MASK 0x00002000L
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN10_POWER_DOWN_INT_OCCURRED_MASK 0x00004000L
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN10_POWER_DOWN_INT_CLEAR_MASK 0x00004000L
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN11_POWER_DOWN_INT_OCCURRED_MASK 0x00008000L
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN11_POWER_DOWN_INT_CLEAR_MASK 0x00008000L
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN12_POWER_DOWN_INT_OCCURRED_MASK 0x00010000L
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN12_POWER_DOWN_INT_CLEAR_MASK 0x00010000L
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN13_POWER_DOWN_INT_OCCURRED_MASK 0x00020000L
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN13_POWER_DOWN_INT_CLEAR_MASK 0x00020000L
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN14_POWER_DOWN_INT_OCCURRED_MASK 0x00040000L
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN14_POWER_DOWN_INT_CLEAR_MASK 0x00040000L
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN15_POWER_DOWN_INT_OCCURRED_MASK 0x00080000L
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCPG_IHC_DOMAIN15_POWER_DOWN_INT_CLEAR_MASK 0x00080000L
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCCG_DMCU_INT_VSYNC_CNT_OTG0_OCCURRED_MASK 0x00100000L
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCCG_DMCU_INT_VSYNC_CNT_OTG0_CLEAR_MASK 0x00100000L
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCCG_DMCU_INT_VSYNC_CNT_OTG1_OCCURRED_MASK 0x00200000L
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCCG_DMCU_INT_VSYNC_CNT_OTG1_CLEAR_MASK 0x00200000L
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCCG_DMCU_INT_VSYNC_CNT_OTG2_OCCURRED_MASK 0x00400000L
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCCG_DMCU_INT_VSYNC_CNT_OTG2_CLEAR_MASK 0x00400000L
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCCG_DMCU_INT_VSYNC_CNT_OTG3_OCCURRED_MASK 0x00800000L
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCCG_DMCU_INT_VSYNC_CNT_OTG3_CLEAR_MASK 0x00800000L
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCCG_DMCU_INT_VSYNC_CNT_OTG4_OCCURRED_MASK 0x01000000L
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCCG_DMCU_INT_VSYNC_CNT_OTG4_CLEAR_MASK 0x01000000L
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCCG_DMCU_INT_VSYNC_CNT_OTG5_OCCURRED_MASK 0x02000000L
+#define DMCU_INTERRUPT_STATUS_CONTINUE__DCCG_DMCU_INT_VSYNC_CNT_OTG5_CLEAR_MASK 0x02000000L
+#define DMCU_INTERRUPT_STATUS_CONTINUE__ABM0_HG_READY_INT_OCCURRED_MASK 0x04000000L
+#define DMCU_INTERRUPT_STATUS_CONTINUE__ABM0_HG_READY_INT_CLEAR_MASK 0x04000000L
+#define DMCU_INTERRUPT_STATUS_CONTINUE__ABM0_LS_READY_INT_OCCURRED_MASK 0x08000000L
+#define DMCU_INTERRUPT_STATUS_CONTINUE__ABM0_LS_READY_INT_CLEAR_MASK 0x08000000L
+#define DMCU_INTERRUPT_STATUS_CONTINUE__ABM0_BL_UPDATE_INT_OCCURRED_MASK 0x10000000L
+#define DMCU_INTERRUPT_STATUS_CONTINUE__ABM0_BL_UPDATE_INT_CLEAR_MASK 0x10000000L
+//DMCU_INTERRUPT_TO_UC_EN_MASK_CONTINUE
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_CONTINUE__DCPG_IHC_DOMAIN6_POWER_UP_INT_TO_UC_EN__SHIFT 0x0
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_CONTINUE__DCPG_IHC_DOMAIN7_POWER_UP_INT_TO_UC_EN__SHIFT 0x1
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_CONTINUE__DCPG_IHC_DOMAIN8_POWER_UP_INT_TO_UC_EN__SHIFT 0x2
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_CONTINUE__DCPG_IHC_DOMAIN9_POWER_UP_INT_TO_UC_EN__SHIFT 0x3
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_CONTINUE__DCPG_IHC_DOMAIN10_POWER_UP_INT_TO_UC_EN__SHIFT 0x4
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_CONTINUE__DCPG_IHC_DOMAIN11_POWER_UP_INT_TO_UC_EN__SHIFT 0x5
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_CONTINUE__DCPG_IHC_DOMAIN12_POWER_UP_INT_TO_UC_EN__SHIFT 0x6
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_CONTINUE__DCPG_IHC_DOMAIN13_POWER_UP_INT_TO_UC_EN__SHIFT 0x7
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_CONTINUE__DCPG_IHC_DOMAIN14_POWER_UP_INT_TO_UC_EN__SHIFT 0x8
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_CONTINUE__DCPG_IHC_DOMAIN15_POWER_UP_INT_TO_UC_EN__SHIFT 0x9
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_CONTINUE__DCPG_IHC_DOMAIN6_POWER_DOWN_INT_TO_UC_EN__SHIFT 0xa
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_CONTINUE__DCPG_IHC_DOMAIN7_POWER_DOWN_INT_TO_UC_EN__SHIFT 0xb
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_CONTINUE__DCPG_IHC_DOMAIN8_POWER_DOWN_INT_TO_UC_EN__SHIFT 0xc
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_CONTINUE__DCPG_IHC_DOMAIN9_POWER_DOWN_INT_TO_UC_EN__SHIFT 0xd
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_CONTINUE__DCPG_IHC_DOMAIN10_POWER_DOWN_INT_TO_UC_EN__SHIFT 0xe
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_CONTINUE__DCPG_IHC_DOMAIN11_POWER_DOWN_INT_TO_UC_EN__SHIFT 0xf
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_CONTINUE__DCPG_IHC_DOMAIN12_POWER_DOWN_INT_TO_UC_EN__SHIFT 0x10
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_CONTINUE__DCPG_IHC_DOMAIN13_POWER_DOWN_INT_TO_UC_EN__SHIFT 0x11
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_CONTINUE__DCPG_IHC_DOMAIN14_POWER_DOWN_INT_TO_UC_EN__SHIFT 0x12
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_CONTINUE__DCPG_IHC_DOMAIN15_POWER_DOWN_INT_TO_UC_EN__SHIFT 0x13
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_CONTINUE__DCCG_DMCU_INT_VSYNC_CNT_OTG0_TO_UC_EN__SHIFT 0x14
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_CONTINUE__DCCG_DMCU_INT_VSYNC_CNT_OTG1_TO_UC_EN__SHIFT 0x15
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_CONTINUE__DCCG_DMCU_INT_VSYNC_CNT_OTG2_TO_UC_EN__SHIFT 0x16
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_CONTINUE__DCCG_DMCU_INT_VSYNC_CNT_OTG3_TO_UC_EN__SHIFT 0x17
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_CONTINUE__DCCG_DMCU_INT_VSYNC_CNT_OTG4_TO_UC_EN__SHIFT 0x18
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_CONTINUE__DCCG_DMCU_INT_VSYNC_CNT_OTG5_TO_UC_EN__SHIFT 0x19
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_CONTINUE__ABM0_HG_READY_INT_TO_UC_EN__SHIFT 0x1a
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_CONTINUE__ABM0_LS_READY_INT_TO_UC_EN__SHIFT 0x1b
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_CONTINUE__ABM0_BL_UPDATE_INT_TO_UC_EN__SHIFT 0x1c
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_CONTINUE__DCPG_IHC_DOMAIN6_POWER_UP_INT_TO_UC_EN_MASK 0x00000001L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_CONTINUE__DCPG_IHC_DOMAIN7_POWER_UP_INT_TO_UC_EN_MASK 0x00000002L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_CONTINUE__DCPG_IHC_DOMAIN8_POWER_UP_INT_TO_UC_EN_MASK 0x00000004L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_CONTINUE__DCPG_IHC_DOMAIN9_POWER_UP_INT_TO_UC_EN_MASK 0x00000008L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_CONTINUE__DCPG_IHC_DOMAIN10_POWER_UP_INT_TO_UC_EN_MASK 0x00000010L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_CONTINUE__DCPG_IHC_DOMAIN11_POWER_UP_INT_TO_UC_EN_MASK 0x00000020L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_CONTINUE__DCPG_IHC_DOMAIN12_POWER_UP_INT_TO_UC_EN_MASK 0x00000040L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_CONTINUE__DCPG_IHC_DOMAIN13_POWER_UP_INT_TO_UC_EN_MASK 0x00000080L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_CONTINUE__DCPG_IHC_DOMAIN14_POWER_UP_INT_TO_UC_EN_MASK 0x00000100L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_CONTINUE__DCPG_IHC_DOMAIN15_POWER_UP_INT_TO_UC_EN_MASK 0x00000200L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_CONTINUE__DCPG_IHC_DOMAIN6_POWER_DOWN_INT_TO_UC_EN_MASK 0x00000400L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_CONTINUE__DCPG_IHC_DOMAIN7_POWER_DOWN_INT_TO_UC_EN_MASK 0x00000800L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_CONTINUE__DCPG_IHC_DOMAIN8_POWER_DOWN_INT_TO_UC_EN_MASK 0x00001000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_CONTINUE__DCPG_IHC_DOMAIN9_POWER_DOWN_INT_TO_UC_EN_MASK 0x00002000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_CONTINUE__DCPG_IHC_DOMAIN10_POWER_DOWN_INT_TO_UC_EN_MASK 0x00004000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_CONTINUE__DCPG_IHC_DOMAIN11_POWER_DOWN_INT_TO_UC_EN_MASK 0x00008000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_CONTINUE__DCPG_IHC_DOMAIN12_POWER_DOWN_INT_TO_UC_EN_MASK 0x00010000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_CONTINUE__DCPG_IHC_DOMAIN13_POWER_DOWN_INT_TO_UC_EN_MASK 0x00020000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_CONTINUE__DCPG_IHC_DOMAIN14_POWER_DOWN_INT_TO_UC_EN_MASK 0x00040000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_CONTINUE__DCPG_IHC_DOMAIN15_POWER_DOWN_INT_TO_UC_EN_MASK 0x00080000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_CONTINUE__DCCG_DMCU_INT_VSYNC_CNT_OTG0_TO_UC_EN_MASK 0x00100000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_CONTINUE__DCCG_DMCU_INT_VSYNC_CNT_OTG1_TO_UC_EN_MASK 0x00200000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_CONTINUE__DCCG_DMCU_INT_VSYNC_CNT_OTG2_TO_UC_EN_MASK 0x00400000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_CONTINUE__DCCG_DMCU_INT_VSYNC_CNT_OTG3_TO_UC_EN_MASK 0x00800000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_CONTINUE__DCCG_DMCU_INT_VSYNC_CNT_OTG4_TO_UC_EN_MASK 0x01000000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_CONTINUE__DCCG_DMCU_INT_VSYNC_CNT_OTG5_TO_UC_EN_MASK 0x02000000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_CONTINUE__ABM0_HG_READY_INT_TO_UC_EN_MASK 0x04000000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_CONTINUE__ABM0_LS_READY_INT_TO_UC_EN_MASK 0x08000000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_CONTINUE__ABM0_BL_UPDATE_INT_TO_UC_EN_MASK 0x10000000L
+//DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONTINUE
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONTINUE__DCPG_IHC_DOMAIN6_POWER_UP_INT_XIRQ_IRQ_SEL__SHIFT 0x0
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONTINUE__DCPG_IHC_DOMAIN7_POWER_UP_INT_XIRQ_IRQ_SEL__SHIFT 0x1
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONTINUE__DCPG_IHC_DOMAIN8_POWER_UP_INT_XIRQ_IRQ_SEL__SHIFT 0x2
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONTINUE__DCPG_IHC_DOMAIN9_POWER_UP_INT_XIRQ_IRQ_SEL__SHIFT 0x3
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONTINUE__DCPG_IHC_DOMAIN10_POWER_UP_INT_XIRQ_IRQ_SEL__SHIFT 0x4
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONTINUE__DCPG_IHC_DOMAIN11_POWER_UP_INT_XIRQ_IRQ_SEL__SHIFT 0x5
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONTINUE__DCPG_IHC_DOMAIN12_POWER_UP_INT_XIRQ_IRQ_SEL__SHIFT 0x6
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONTINUE__DCPG_IHC_DOMAIN13_POWER_UP_INT_XIRQ_IRQ_SEL__SHIFT 0x7
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONTINUE__DCPG_IHC_DOMAIN14_POWER_UP_INT_XIRQ_IRQ_SEL__SHIFT 0x8
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONTINUE__DCPG_IHC_DOMAIN15_POWER_UP_INT_XIRQ_IRQ_SEL__SHIFT 0x9
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONTINUE__DCPG_IHC_DOMAIN6_POWER_DOWN_INT_XIRQ_IRQ_SEL__SHIFT 0xa
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONTINUE__DCPG_IHC_DOMAIN7_POWER_DOWN_INT_XIRQ_IRQ_SEL__SHIFT 0xb
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONTINUE__DCPG_IHC_DOMAIN8_POWER_DOWN_INT_XIRQ_IRQ_SEL__SHIFT 0xc
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONTINUE__DCPG_IHC_DOMAIN9_POWER_DOWN_INT_XIRQ_IRQ_SEL__SHIFT 0xd
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONTINUE__DCPG_IHC_DOMAIN10_POWER_DOWN_INT_XIRQ_IRQ_SEL__SHIFT 0xe
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONTINUE__DCPG_IHC_DOMAIN11_POWER_DOWN_INT_XIRQ_IRQ_SEL__SHIFT 0xf
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONTINUE__DCPG_IHC_DOMAIN12_POWER_DOWN_INT_XIRQ_IRQ_SEL__SHIFT 0x10
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONTINUE__DCPG_IHC_DOMAIN13_POWER_DOWN_INT_XIRQ_IRQ_SEL__SHIFT 0x11
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONTINUE__DCPG_IHC_DOMAIN14_POWER_DOWN_INT_XIRQ_IRQ_SEL__SHIFT 0x12
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONTINUE__DCPG_IHC_DOMAIN15_POWER_DOWN_INT_XIRQ_IRQ_SEL__SHIFT 0x13
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONTINUE__DCCG_DMCU_INT_VSYNC_CNT_OTG0_XIRQ_IRQ_SEL__SHIFT 0x14
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONTINUE__DCCG_DMCU_INT_VSYNC_CNT_OTG1_XIRQ_IRQ_SEL__SHIFT 0x15
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONTINUE__DCCG_DMCU_INT_VSYNC_CNT_OTG2_XIRQ_IRQ_SEL__SHIFT 0x16
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONTINUE__DCCG_DMCU_INT_VSYNC_CNT_OTG3_XIRQ_IRQ_SEL__SHIFT 0x17
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONTINUE__DCCG_DMCU_INT_VSYNC_CNT_OTG4_XIRQ_IRQ_SEL__SHIFT 0x18
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONTINUE__DCCG_DMCU_INT_VSYNC_CNT_OTG5_XIRQ_IRQ_SEL__SHIFT 0x19
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONTINUE__ABM0_HG_READY_INT_XIRQ_IRQ_SEL__SHIFT 0x1a
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONTINUE__ABM0_LS_READY_INT_XIRQ_IRQ_SEL__SHIFT 0x1b
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONTINUE__ABM0_BL_UPDATE_INT_XIRQ_IRQ_SEL__SHIFT 0x1c
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONTINUE__DCPG_IHC_DOMAIN6_POWER_UP_INT_XIRQ_IRQ_SEL_MASK 0x00000001L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONTINUE__DCPG_IHC_DOMAIN7_POWER_UP_INT_XIRQ_IRQ_SEL_MASK 0x00000002L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONTINUE__DCPG_IHC_DOMAIN8_POWER_UP_INT_XIRQ_IRQ_SEL_MASK 0x00000004L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONTINUE__DCPG_IHC_DOMAIN9_POWER_UP_INT_XIRQ_IRQ_SEL_MASK 0x00000008L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONTINUE__DCPG_IHC_DOMAIN10_POWER_UP_INT_XIRQ_IRQ_SEL_MASK 0x00000010L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONTINUE__DCPG_IHC_DOMAIN11_POWER_UP_INT_XIRQ_IRQ_SEL_MASK 0x00000020L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONTINUE__DCPG_IHC_DOMAIN12_POWER_UP_INT_XIRQ_IRQ_SEL_MASK 0x00000040L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONTINUE__DCPG_IHC_DOMAIN13_POWER_UP_INT_XIRQ_IRQ_SEL_MASK 0x00000080L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONTINUE__DCPG_IHC_DOMAIN14_POWER_UP_INT_XIRQ_IRQ_SEL_MASK 0x00000100L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONTINUE__DCPG_IHC_DOMAIN15_POWER_UP_INT_XIRQ_IRQ_SEL_MASK 0x00000200L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONTINUE__DCPG_IHC_DOMAIN6_POWER_DOWN_INT_XIRQ_IRQ_SEL_MASK 0x00000400L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONTINUE__DCPG_IHC_DOMAIN7_POWER_DOWN_INT_XIRQ_IRQ_SEL_MASK 0x00000800L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONTINUE__DCPG_IHC_DOMAIN8_POWER_DOWN_INT_XIRQ_IRQ_SEL_MASK 0x00001000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONTINUE__DCPG_IHC_DOMAIN9_POWER_DOWN_INT_XIRQ_IRQ_SEL_MASK 0x00002000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONTINUE__DCPG_IHC_DOMAIN10_POWER_DOWN_INT_XIRQ_IRQ_SEL_MASK 0x00004000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONTINUE__DCPG_IHC_DOMAIN11_POWER_DOWN_INT_XIRQ_IRQ_SEL_MASK 0x00008000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONTINUE__DCPG_IHC_DOMAIN12_POWER_DOWN_INT_XIRQ_IRQ_SEL_MASK 0x00010000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONTINUE__DCPG_IHC_DOMAIN13_POWER_DOWN_INT_XIRQ_IRQ_SEL_MASK 0x00020000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONTINUE__DCPG_IHC_DOMAIN14_POWER_DOWN_INT_XIRQ_IRQ_SEL_MASK 0x00040000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONTINUE__DCPG_IHC_DOMAIN15_POWER_DOWN_INT_XIRQ_IRQ_SEL_MASK 0x00080000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONTINUE__DCCG_DMCU_INT_VSYNC_CNT_OTG0_XIRQ_IRQ_SEL_MASK 0x00100000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONTINUE__DCCG_DMCU_INT_VSYNC_CNT_OTG1_XIRQ_IRQ_SEL_MASK 0x00200000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONTINUE__DCCG_DMCU_INT_VSYNC_CNT_OTG2_XIRQ_IRQ_SEL_MASK 0x00400000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONTINUE__DCCG_DMCU_INT_VSYNC_CNT_OTG3_XIRQ_IRQ_SEL_MASK 0x00800000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONTINUE__DCCG_DMCU_INT_VSYNC_CNT_OTG4_XIRQ_IRQ_SEL_MASK 0x01000000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONTINUE__DCCG_DMCU_INT_VSYNC_CNT_OTG5_XIRQ_IRQ_SEL_MASK 0x02000000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONTINUE__ABM0_HG_READY_INT_XIRQ_IRQ_SEL_MASK 0x04000000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONTINUE__ABM0_LS_READY_INT_XIRQ_IRQ_SEL_MASK 0x08000000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONTINUE__ABM0_BL_UPDATE_INT_XIRQ_IRQ_SEL_MASK 0x10000000L
+//DMCU_INT_CNT_CONTINUE
+#define DMCU_INT_CNT_CONTINUE__DMCU_ABM0_HG_READY_INT_CNT__SHIFT 0x0
+#define DMCU_INT_CNT_CONTINUE__DMCU_ABM0_LS_READY_INT_CNT__SHIFT 0x8
+#define DMCU_INT_CNT_CONTINUE__DMCU_ABM0_BL_UPDATE_INT_CNT__SHIFT 0x10
+#define DMCU_INT_CNT_CONTINUE__DMCU_ABM0_HG_READY_INT_CNT_MASK 0x000000FFL
+#define DMCU_INT_CNT_CONTINUE__DMCU_ABM0_LS_READY_INT_CNT_MASK 0x0000FF00L
+#define DMCU_INT_CNT_CONTINUE__DMCU_ABM0_BL_UPDATE_INT_CNT_MASK 0x00FF0000L
+//DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONT2
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONT2__DCPG_IHC_DOMAIN16_POWER_UP_INT_XIRQ_IRQ_SEL__SHIFT 0x0
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONT2__DCPG_IHC_DOMAIN17_POWER_UP_INT_XIRQ_IRQ_SEL__SHIFT 0x1
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONT2__DCPG_IHC_DOMAIN18_POWER_UP_INT_XIRQ_IRQ_SEL__SHIFT 0x2
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONT2__DCPG_IHC_DOMAIN19_POWER_UP_INT_XIRQ_IRQ_SEL__SHIFT 0x3
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONT2__DCPG_IHC_DOMAIN20_POWER_UP_INT_XIRQ_IRQ_SEL__SHIFT 0x4
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONT2__DCPG_IHC_DOMAIN21_POWER_UP_INT_XIRQ_IRQ_SEL__SHIFT 0x5
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONT2__DCPG_IHC_DOMAIN16_POWER_DOWN_INT_XIRQ_IRQ_SEL__SHIFT 0x6
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONT2__DCPG_IHC_DOMAIN17_POWER_DOWN_INT_XIRQ_IRQ_SEL__SHIFT 0x7
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONT2__DCPG_IHC_DOMAIN18_POWER_DOWN_INT_XIRQ_IRQ_SEL__SHIFT 0x8
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONT2__DCPG_IHC_DOMAIN19_POWER_DOWN_INT_XIRQ_IRQ_SEL__SHIFT 0x9
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONT2__DCPG_IHC_DOMAIN20_POWER_DOWN_INT_XIRQ_IRQ_SEL__SHIFT 0xa
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONT2__DCPG_IHC_DOMAIN21_POWER_DOWN_INT_XIRQ_IRQ_SEL__SHIFT 0xb
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONT2__DCIO_DPCS_TXA_INT_XIRQ_IRQ_SEL__SHIFT 0x10
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONT2__DCIO_DPCS_TXB_INT_XIRQ_IRQ_SEL__SHIFT 0x11
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONT2__DCIO_DPCS_TXC_INT_XIRQ_IRQ_SEL__SHIFT 0x12
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONT2__DCIO_DPCS_TXD_INT_XIRQ_IRQ_SEL__SHIFT 0x13
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONT2__DCIO_DPCS_TXE_INT_XIRQ_IRQ_SEL__SHIFT 0x14
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONT2__DCIO_DPCS_TXF_INT_XIRQ_IRQ_SEL__SHIFT 0x15
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONT2__DCIO_DPCS_TXG_INT_XIRQ_IRQ_SEL__SHIFT 0x16
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONT2__DCPG_IHC_DOMAIN16_POWER_UP_INT_XIRQ_IRQ_SEL_MASK 0x00000001L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONT2__DCPG_IHC_DOMAIN17_POWER_UP_INT_XIRQ_IRQ_SEL_MASK 0x00000002L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONT2__DCPG_IHC_DOMAIN18_POWER_UP_INT_XIRQ_IRQ_SEL_MASK 0x00000004L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONT2__DCPG_IHC_DOMAIN19_POWER_UP_INT_XIRQ_IRQ_SEL_MASK 0x00000008L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONT2__DCPG_IHC_DOMAIN20_POWER_UP_INT_XIRQ_IRQ_SEL_MASK 0x00000010L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONT2__DCPG_IHC_DOMAIN21_POWER_UP_INT_XIRQ_IRQ_SEL_MASK 0x00000020L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONT2__DCPG_IHC_DOMAIN16_POWER_DOWN_INT_XIRQ_IRQ_SEL_MASK 0x00000040L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONT2__DCPG_IHC_DOMAIN17_POWER_DOWN_INT_XIRQ_IRQ_SEL_MASK 0x00000080L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONT2__DCPG_IHC_DOMAIN18_POWER_DOWN_INT_XIRQ_IRQ_SEL_MASK 0x00000100L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONT2__DCPG_IHC_DOMAIN19_POWER_DOWN_INT_XIRQ_IRQ_SEL_MASK 0x00000200L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONT2__DCPG_IHC_DOMAIN20_POWER_DOWN_INT_XIRQ_IRQ_SEL_MASK 0x00000400L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONT2__DCPG_IHC_DOMAIN21_POWER_DOWN_INT_XIRQ_IRQ_SEL_MASK 0x00000800L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONT2__DCIO_DPCS_TXA_INT_XIRQ_IRQ_SEL_MASK 0x00010000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONT2__DCIO_DPCS_TXB_INT_XIRQ_IRQ_SEL_MASK 0x00020000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONT2__DCIO_DPCS_TXC_INT_XIRQ_IRQ_SEL_MASK 0x00040000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONT2__DCIO_DPCS_TXD_INT_XIRQ_IRQ_SEL_MASK 0x00080000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONT2__DCIO_DPCS_TXE_INT_XIRQ_IRQ_SEL_MASK 0x00100000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONT2__DCIO_DPCS_TXF_INT_XIRQ_IRQ_SEL_MASK 0x00200000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONT2__DCIO_DPCS_TXG_INT_XIRQ_IRQ_SEL_MASK 0x00400000L
+//DMCU_INTERRUPT_STATUS_2
+#define DMCU_INTERRUPT_STATUS_2__DCPG_IHC_DOMAIN16_POWER_UP_INT_OCCURRED__SHIFT 0x0
+#define DMCU_INTERRUPT_STATUS_2__DCPG_IHC_DOMAIN16_POWER_UP_INT_CLEAR__SHIFT 0x0
+#define DMCU_INTERRUPT_STATUS_2__DCPG_IHC_DOMAIN17_POWER_UP_INT_OCCURRED__SHIFT 0x1
+#define DMCU_INTERRUPT_STATUS_2__DCPG_IHC_DOMAIN17_POWER_UP_INT_CLEAR__SHIFT 0x1
+#define DMCU_INTERRUPT_STATUS_2__DCPG_IHC_DOMAIN18_POWER_UP_INT_OCCURRED__SHIFT 0x2
+#define DMCU_INTERRUPT_STATUS_2__DCPG_IHC_DOMAIN18_POWER_UP_INT_CLEAR__SHIFT 0x2
+#define DMCU_INTERRUPT_STATUS_2__DCPG_IHC_DOMAIN19_POWER_UP_INT_OCCURRED__SHIFT 0x3
+#define DMCU_INTERRUPT_STATUS_2__DCPG_IHC_DOMAIN19_POWER_UP_INT_CLEAR__SHIFT 0x3
+#define DMCU_INTERRUPT_STATUS_2__DCPG_IHC_DOMAIN20_POWER_UP_INT_OCCURRED__SHIFT 0x4
+#define DMCU_INTERRUPT_STATUS_2__DCPG_IHC_DOMAIN20_POWER_UP_INT_CLEAR__SHIFT 0x4
+#define DMCU_INTERRUPT_STATUS_2__DCPG_IHC_DOMAIN21_POWER_UP_INT_OCCURRED__SHIFT 0x5
+#define DMCU_INTERRUPT_STATUS_2__DCPG_IHC_DOMAIN21_POWER_UP_INT_CLEAR__SHIFT 0x5
+#define DMCU_INTERRUPT_STATUS_2__DCPG_IHC_DOMAIN16_POWER_DOWN_INT_OCCURRED__SHIFT 0x6
+#define DMCU_INTERRUPT_STATUS_2__DCPG_IHC_DOMAIN16_POWER_DOWN_INT_CLEAR__SHIFT 0x6
+#define DMCU_INTERRUPT_STATUS_2__DCPG_IHC_DOMAIN17_POWER_DOWN_INT_OCCURRED__SHIFT 0x7
+#define DMCU_INTERRUPT_STATUS_2__DCPG_IHC_DOMAIN17_POWER_DOWN_INT_CLEAR__SHIFT 0x7
+#define DMCU_INTERRUPT_STATUS_2__DCPG_IHC_DOMAIN18_POWER_DOWN_INT_OCCURRED__SHIFT 0x8
+#define DMCU_INTERRUPT_STATUS_2__DCPG_IHC_DOMAIN18_POWER_DOWN_INT_CLEAR__SHIFT 0x8
+#define DMCU_INTERRUPT_STATUS_2__DCPG_IHC_DOMAIN19_POWER_DOWN_INT_OCCURRED__SHIFT 0x9
+#define DMCU_INTERRUPT_STATUS_2__DCPG_IHC_DOMAIN19_POWER_DOWN_INT_CLEAR__SHIFT 0x9
+#define DMCU_INTERRUPT_STATUS_2__DCPG_IHC_DOMAIN20_POWER_DOWN_INT_OCCURRED__SHIFT 0xa
+#define DMCU_INTERRUPT_STATUS_2__DCPG_IHC_DOMAIN20_POWER_DOWN_INT_CLEAR__SHIFT 0xa
+#define DMCU_INTERRUPT_STATUS_2__DCPG_IHC_DOMAIN21_POWER_DOWN_INT_OCCURRED__SHIFT 0xb
+#define DMCU_INTERRUPT_STATUS_2__DCPG_IHC_DOMAIN21_POWER_DOWN_INT_CLEAR__SHIFT 0xb
+#define DMCU_INTERRUPT_STATUS_2__DCIO_DPCS_TXA_INT_OCCURRED__SHIFT 0x10
+#define DMCU_INTERRUPT_STATUS_2__DCIO_DPCS_TXA_INT_CLEAR__SHIFT 0x10
+#define DMCU_INTERRUPT_STATUS_2__DCIO_DPCS_TXB_INT_OCCURRED__SHIFT 0x11
+#define DMCU_INTERRUPT_STATUS_2__DCIO_DPCS_TXB_INT_CLEAR__SHIFT 0x11
+#define DMCU_INTERRUPT_STATUS_2__DCIO_DPCS_TXC_INT_OCCURRED__SHIFT 0x12
+#define DMCU_INTERRUPT_STATUS_2__DCIO_DPCS_TXC_INT_CLEAR__SHIFT 0x12
+#define DMCU_INTERRUPT_STATUS_2__DCIO_DPCS_TXD_INT_OCCURRED__SHIFT 0x13
+#define DMCU_INTERRUPT_STATUS_2__DCIO_DPCS_TXD_INT_CLEAR__SHIFT 0x13
+#define DMCU_INTERRUPT_STATUS_2__DCIO_DPCS_TXE_INT_OCCURRED__SHIFT 0x14
+#define DMCU_INTERRUPT_STATUS_2__DCIO_DPCS_TXE_INT_CLEAR__SHIFT 0x14
+#define DMCU_INTERRUPT_STATUS_2__DCIO_DPCS_TXF_INT_OCCURRED__SHIFT 0x15
+#define DMCU_INTERRUPT_STATUS_2__DCIO_DPCS_TXF_INT_CLEAR__SHIFT 0x15
+#define DMCU_INTERRUPT_STATUS_2__DCIO_DPCS_TXG_INT_OCCURRED__SHIFT 0x16
+#define DMCU_INTERRUPT_STATUS_2__DCIO_DPCS_TXG_INT_CLEAR__SHIFT 0x16
+#define DMCU_INTERRUPT_STATUS_2__DCPG_IHC_DOMAIN16_POWER_UP_INT_OCCURRED_MASK 0x00000001L
+#define DMCU_INTERRUPT_STATUS_2__DCPG_IHC_DOMAIN16_POWER_UP_INT_CLEAR_MASK 0x00000001L
+#define DMCU_INTERRUPT_STATUS_2__DCPG_IHC_DOMAIN17_POWER_UP_INT_OCCURRED_MASK 0x00000002L
+#define DMCU_INTERRUPT_STATUS_2__DCPG_IHC_DOMAIN17_POWER_UP_INT_CLEAR_MASK 0x00000002L
+#define DMCU_INTERRUPT_STATUS_2__DCPG_IHC_DOMAIN18_POWER_UP_INT_OCCURRED_MASK 0x00000004L
+#define DMCU_INTERRUPT_STATUS_2__DCPG_IHC_DOMAIN18_POWER_UP_INT_CLEAR_MASK 0x00000004L
+#define DMCU_INTERRUPT_STATUS_2__DCPG_IHC_DOMAIN19_POWER_UP_INT_OCCURRED_MASK 0x00000008L
+#define DMCU_INTERRUPT_STATUS_2__DCPG_IHC_DOMAIN19_POWER_UP_INT_CLEAR_MASK 0x00000008L
+#define DMCU_INTERRUPT_STATUS_2__DCPG_IHC_DOMAIN20_POWER_UP_INT_OCCURRED_MASK 0x00000010L
+#define DMCU_INTERRUPT_STATUS_2__DCPG_IHC_DOMAIN20_POWER_UP_INT_CLEAR_MASK 0x00000010L
+#define DMCU_INTERRUPT_STATUS_2__DCPG_IHC_DOMAIN21_POWER_UP_INT_OCCURRED_MASK 0x00000020L
+#define DMCU_INTERRUPT_STATUS_2__DCPG_IHC_DOMAIN21_POWER_UP_INT_CLEAR_MASK 0x00000020L
+#define DMCU_INTERRUPT_STATUS_2__DCPG_IHC_DOMAIN16_POWER_DOWN_INT_OCCURRED_MASK 0x00000040L
+#define DMCU_INTERRUPT_STATUS_2__DCPG_IHC_DOMAIN16_POWER_DOWN_INT_CLEAR_MASK 0x00000040L
+#define DMCU_INTERRUPT_STATUS_2__DCPG_IHC_DOMAIN17_POWER_DOWN_INT_OCCURRED_MASK 0x00000080L
+#define DMCU_INTERRUPT_STATUS_2__DCPG_IHC_DOMAIN17_POWER_DOWN_INT_CLEAR_MASK 0x00000080L
+#define DMCU_INTERRUPT_STATUS_2__DCPG_IHC_DOMAIN18_POWER_DOWN_INT_OCCURRED_MASK 0x00000100L
+#define DMCU_INTERRUPT_STATUS_2__DCPG_IHC_DOMAIN18_POWER_DOWN_INT_CLEAR_MASK 0x00000100L
+#define DMCU_INTERRUPT_STATUS_2__DCPG_IHC_DOMAIN19_POWER_DOWN_INT_OCCURRED_MASK 0x00000200L
+#define DMCU_INTERRUPT_STATUS_2__DCPG_IHC_DOMAIN19_POWER_DOWN_INT_CLEAR_MASK 0x00000200L
+#define DMCU_INTERRUPT_STATUS_2__DCPG_IHC_DOMAIN20_POWER_DOWN_INT_OCCURRED_MASK 0x00000400L
+#define DMCU_INTERRUPT_STATUS_2__DCPG_IHC_DOMAIN20_POWER_DOWN_INT_CLEAR_MASK 0x00000400L
+#define DMCU_INTERRUPT_STATUS_2__DCPG_IHC_DOMAIN21_POWER_DOWN_INT_OCCURRED_MASK 0x00000800L
+#define DMCU_INTERRUPT_STATUS_2__DCPG_IHC_DOMAIN21_POWER_DOWN_INT_CLEAR_MASK 0x00000800L
+#define DMCU_INTERRUPT_STATUS_2__DCIO_DPCS_TXA_INT_OCCURRED_MASK 0x00010000L
+#define DMCU_INTERRUPT_STATUS_2__DCIO_DPCS_TXA_INT_CLEAR_MASK 0x00010000L
+#define DMCU_INTERRUPT_STATUS_2__DCIO_DPCS_TXB_INT_OCCURRED_MASK 0x00020000L
+#define DMCU_INTERRUPT_STATUS_2__DCIO_DPCS_TXB_INT_CLEAR_MASK 0x00020000L
+#define DMCU_INTERRUPT_STATUS_2__DCIO_DPCS_TXC_INT_OCCURRED_MASK 0x00040000L
+#define DMCU_INTERRUPT_STATUS_2__DCIO_DPCS_TXC_INT_CLEAR_MASK 0x00040000L
+#define DMCU_INTERRUPT_STATUS_2__DCIO_DPCS_TXD_INT_OCCURRED_MASK 0x00080000L
+#define DMCU_INTERRUPT_STATUS_2__DCIO_DPCS_TXD_INT_CLEAR_MASK 0x00080000L
+#define DMCU_INTERRUPT_STATUS_2__DCIO_DPCS_TXE_INT_OCCURRED_MASK 0x00100000L
+#define DMCU_INTERRUPT_STATUS_2__DCIO_DPCS_TXE_INT_CLEAR_MASK 0x00100000L
+#define DMCU_INTERRUPT_STATUS_2__DCIO_DPCS_TXF_INT_OCCURRED_MASK 0x00200000L
+#define DMCU_INTERRUPT_STATUS_2__DCIO_DPCS_TXF_INT_CLEAR_MASK 0x00200000L
+#define DMCU_INTERRUPT_STATUS_2__DCIO_DPCS_TXG_INT_OCCURRED_MASK 0x00400000L
+#define DMCU_INTERRUPT_STATUS_2__DCIO_DPCS_TXG_INT_CLEAR_MASK 0x00400000L
+//DMCU_INTERRUPT_TO_UC_EN_MASK_2
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_2__DCPG_IHC_DOMAIN16_POWER_UP_INT_TO_UC_EN__SHIFT 0x0
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_2__DCPG_IHC_DOMAIN17_POWER_UP_INT_TO_UC_EN__SHIFT 0x1
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_2__DCPG_IHC_DOMAIN18_POWER_UP_INT_TO_UC_EN__SHIFT 0x2
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_2__DCPG_IHC_DOMAIN19_POWER_UP_INT_TO_UC_EN__SHIFT 0x3
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_2__DCPG_IHC_DOMAIN20_POWER_UP_INT_TO_UC_EN__SHIFT 0x4
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_2__DCPG_IHC_DOMAIN21_POWER_UP_INT_TO_UC_EN__SHIFT 0x5
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_2__DCPG_IHC_DOMAIN16_POWER_DOWN_INT_TO_UC_EN__SHIFT 0x6
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_2__DCPG_IHC_DOMAIN17_POWER_DOWN_INT_TO_UC_EN__SHIFT 0x7
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_2__DCPG_IHC_DOMAIN18_POWER_DOWN_INT_TO_UC_EN__SHIFT 0x8
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_2__DCPG_IHC_DOMAIN19_POWER_DOWN_INT_TO_UC_EN__SHIFT 0x9
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_2__DCPG_IHC_DOMAIN20_POWER_DOWN_INT_TO_UC_EN__SHIFT 0xa
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_2__DCPG_IHC_DOMAIN21_POWER_DOWN_INT_TO_UC_EN__SHIFT 0xb
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_2__DCIO_DPCS_TXA_INT_TO_UC_EN__SHIFT 0x10
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_2__DCIO_DPCS_TXB_INT_TO_UC_EN__SHIFT 0x11
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_2__DCIO_DPCS_TXC_INT_TO_UC_EN__SHIFT 0x12
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_2__DCIO_DPCS_TXD_INT_TO_UC_EN__SHIFT 0x13
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_2__DCIO_DPCS_TXE_INT_TO_UC_EN__SHIFT 0x14
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_2__DCIO_DPCS_TXF_INT_TO_UC_EN__SHIFT 0x15
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_2__DCIO_DPCS_TXG_INT_TO_UC_EN__SHIFT 0x16
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_2__DCPG_IHC_DOMAIN16_POWER_UP_INT_TO_UC_EN_MASK 0x00000001L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_2__DCPG_IHC_DOMAIN17_POWER_UP_INT_TO_UC_EN_MASK 0x00000002L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_2__DCPG_IHC_DOMAIN18_POWER_UP_INT_TO_UC_EN_MASK 0x00000004L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_2__DCPG_IHC_DOMAIN19_POWER_UP_INT_TO_UC_EN_MASK 0x00000008L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_2__DCPG_IHC_DOMAIN20_POWER_UP_INT_TO_UC_EN_MASK 0x00000010L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_2__DCPG_IHC_DOMAIN21_POWER_UP_INT_TO_UC_EN_MASK 0x00000020L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_2__DCPG_IHC_DOMAIN16_POWER_DOWN_INT_TO_UC_EN_MASK 0x00000040L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_2__DCPG_IHC_DOMAIN17_POWER_DOWN_INT_TO_UC_EN_MASK 0x00000080L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_2__DCPG_IHC_DOMAIN18_POWER_DOWN_INT_TO_UC_EN_MASK 0x00000100L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_2__DCPG_IHC_DOMAIN19_POWER_DOWN_INT_TO_UC_EN_MASK 0x00000200L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_2__DCPG_IHC_DOMAIN20_POWER_DOWN_INT_TO_UC_EN_MASK 0x00000400L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_2__DCPG_IHC_DOMAIN21_POWER_DOWN_INT_TO_UC_EN_MASK 0x00000800L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_2__DCIO_DPCS_TXA_INT_TO_UC_EN_MASK 0x00010000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_2__DCIO_DPCS_TXB_INT_TO_UC_EN_MASK 0x00020000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_2__DCIO_DPCS_TXC_INT_TO_UC_EN_MASK 0x00040000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_2__DCIO_DPCS_TXD_INT_TO_UC_EN_MASK 0x00080000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_2__DCIO_DPCS_TXE_INT_TO_UC_EN_MASK 0x00100000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_2__DCIO_DPCS_TXF_INT_TO_UC_EN_MASK 0x00200000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_2__DCIO_DPCS_TXG_INT_TO_UC_EN_MASK 0x00400000L
+
+
+// addressBlock: dce_dc_dmu_ihc_dispdec
+//DC_GPU_TIMER_START_POSITION_V_UPDATE
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D1_V_UPDATE__SHIFT 0x0
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D2_V_UPDATE__SHIFT 0x4
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D3_V_UPDATE__SHIFT 0x8
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D4_V_UPDATE__SHIFT 0xc
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D5_V_UPDATE__SHIFT 0x10
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D6_V_UPDATE__SHIFT 0x14
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D1_V_UPDATE_MASK 0x00000007L
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D2_V_UPDATE_MASK 0x00000070L
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D3_V_UPDATE_MASK 0x00000700L
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D4_V_UPDATE_MASK 0x00007000L
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D5_V_UPDATE_MASK 0x00070000L
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D6_V_UPDATE_MASK 0x00700000L
+//DC_GPU_TIMER_START_POSITION_VSTARTUP
+#define DC_GPU_TIMER_START_POSITION_VSTARTUP__DC_GPU_TIMER_START_POSITION_D1_VSTARTUP__SHIFT 0x0
+#define DC_GPU_TIMER_START_POSITION_VSTARTUP__DC_GPU_TIMER_START_POSITION_D2_VSTARTUP__SHIFT 0x4
+#define DC_GPU_TIMER_START_POSITION_VSTARTUP__DC_GPU_TIMER_START_POSITION_D3_VSTARTUP__SHIFT 0x8
+#define DC_GPU_TIMER_START_POSITION_VSTARTUP__DC_GPU_TIMER_START_POSITION_D4_VSTARTUP__SHIFT 0xc
+#define DC_GPU_TIMER_START_POSITION_VSTARTUP__DC_GPU_TIMER_START_POSITION_D5_VSTARTUP__SHIFT 0x10
+#define DC_GPU_TIMER_START_POSITION_VSTARTUP__DC_GPU_TIMER_START_POSITION_D6_VSTARTUP__SHIFT 0x14
+#define DC_GPU_TIMER_START_POSITION_VSTARTUP__DC_GPU_TIMER_START_POSITION_D1_VSTARTUP_MASK 0x00000007L
+#define DC_GPU_TIMER_START_POSITION_VSTARTUP__DC_GPU_TIMER_START_POSITION_D2_VSTARTUP_MASK 0x00000070L
+#define DC_GPU_TIMER_START_POSITION_VSTARTUP__DC_GPU_TIMER_START_POSITION_D3_VSTARTUP_MASK 0x00000700L
+#define DC_GPU_TIMER_START_POSITION_VSTARTUP__DC_GPU_TIMER_START_POSITION_D4_VSTARTUP_MASK 0x00007000L
+#define DC_GPU_TIMER_START_POSITION_VSTARTUP__DC_GPU_TIMER_START_POSITION_D5_VSTARTUP_MASK 0x00070000L
+#define DC_GPU_TIMER_START_POSITION_VSTARTUP__DC_GPU_TIMER_START_POSITION_D6_VSTARTUP_MASK 0x00700000L
+//DC_GPU_TIMER_READ
+#define DC_GPU_TIMER_READ__DC_GPU_TIMER_READ__SHIFT 0x0
+#define DC_GPU_TIMER_READ__DC_GPU_TIMER_READ_MASK 0xFFFFFFFFL
+//DC_GPU_TIMER_READ_CNTL
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_READ_SELECT__SHIFT 0x0
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D1_VSYNC_NOM__SHIFT 0x8
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D2_VSYNC_NOM__SHIFT 0xb
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D3_VSYNC_NOM__SHIFT 0xe
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D4_VSYNC_NOM__SHIFT 0x11
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D5_VSYNC_NOM__SHIFT 0x14
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D6_VSYNC_NOM__SHIFT 0x17
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_READ_SELECT_MASK 0x0000007FL
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D1_VSYNC_NOM_MASK 0x00000700L
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D2_VSYNC_NOM_MASK 0x00003800L
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D3_VSYNC_NOM_MASK 0x0001C000L
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D4_VSYNC_NOM_MASK 0x000E0000L
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D5_VSYNC_NOM_MASK 0x00700000L
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D6_VSYNC_NOM_MASK 0x03800000L
+//DISP_INTERRUPT_STATUS
+#define DISP_INTERRUPT_STATUS__OPTC1_DATA_UNDERFLOW_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_SNAPSHOT_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_FORCE_COUNT_NOW_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_TRIGA_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_TRIGB_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_VSYNC_NOM_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS__DIGA_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS__DIGA_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS__DC_HPD1_RX_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS__AUX1_SW_DONE_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS__AUX1_LS_DONE_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS__DACA_AUTODETECT_GENERITE_INTERRUPT__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS__RBBMIF_IHC_TIMEOUT_INTERRUPT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS__DC_I2C_SW_DONE_INTERRUPT__SHIFT 0x18
+#define DISP_INTERRUPT_STATUS__DMCU_UC_INTERNAL_INT__SHIFT 0x1a
+#define DISP_INTERRUPT_STATUS__DMCU_SCP_INT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS__ABM1_HG_READY_INT__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS__ABM1_LS_READY_INT__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS__ABM1_BL_UPDATE_INT__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS__DISP_INTERRUPT_STATUS_CONTINUE__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS__OPTC1_DATA_UNDERFLOW_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_SNAPSHOT_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_FORCE_COUNT_NOW_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_TRIGA_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_TRIGB_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_VSYNC_NOM_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS__DIGA_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS__DIGA_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS__DC_HPD1_RX_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS__AUX1_SW_DONE_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS__AUX1_LS_DONE_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS__DACA_AUTODETECT_GENERITE_INTERRUPT_MASK 0x00400000L
+#define DISP_INTERRUPT_STATUS__RBBMIF_IHC_TIMEOUT_INTERRUPT_MASK 0x00800000L
+#define DISP_INTERRUPT_STATUS__DC_I2C_SW_DONE_INTERRUPT_MASK 0x01000000L
+#define DISP_INTERRUPT_STATUS__DMCU_UC_INTERNAL_INT_MASK 0x04000000L
+#define DISP_INTERRUPT_STATUS__DMCU_SCP_INT_MASK 0x08000000L
+#define DISP_INTERRUPT_STATUS__ABM1_HG_READY_INT_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS__ABM1_LS_READY_INT_MASK 0x20000000L
+#define DISP_INTERRUPT_STATUS__ABM1_BL_UPDATE_INT_MASK 0x40000000L
+#define DISP_INTERRUPT_STATUS__DISP_INTERRUPT_STATUS_CONTINUE_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE
+#define DISP_INTERRUPT_STATUS_CONTINUE__OPTC2_DATA_UNDERFLOW_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG2_IHC_SNAPSHOT_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG2_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG2_IHC_FORCE_COUNT_NOW_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG2_IHC_TRIGA_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG2_IHC_TRIGB_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG2_IHC_VSYNC_NOM_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG2_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE__DIGB_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE__DIGB_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_RX_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE__AUX2_SW_DONE_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE__AUX2_LS_DONE_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG1_IHC_EXT_TIMING_SYNC_LOSS_INTERRUPT__SHIFT 0x19
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG1_IHC_EXT_TIMING_SYNC_INTERRUPT__SHIFT 0x1a
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG1_IHC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG1_IHC_VERTICAL_INTERRUPT0__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG1_IHC_VERTICAL_INTERRUPT1__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG1_IHC_VERTICAL_INTERRUPT2__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS_CONTINUE__DISP_INTERRUPT_STATUS_CONTINUE2__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE__OPTC2_DATA_UNDERFLOW_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG2_IHC_SNAPSHOT_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG2_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG2_IHC_FORCE_COUNT_NOW_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG2_IHC_TRIGA_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG2_IHC_TRIGB_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG2_IHC_VSYNC_NOM_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG2_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE__DIGB_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS_CONTINUE__DIGB_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_RX_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE__AUX2_SW_DONE_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE__AUX2_LS_DONE_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG1_IHC_EXT_TIMING_SYNC_LOSS_INTERRUPT_MASK 0x02000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG1_IHC_EXT_TIMING_SYNC_INTERRUPT_MASK 0x04000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG1_IHC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_MASK 0x08000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG1_IHC_VERTICAL_INTERRUPT0_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG1_IHC_VERTICAL_INTERRUPT1_MASK 0x20000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG1_IHC_VERTICAL_INTERRUPT2_MASK 0x40000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE__DISP_INTERRUPT_STATUS_CONTINUE2_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE2
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OPTC3_DATA_UNDERFLOW_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG3_IHC_SNAPSHOT_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG3_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG3_IHC_FORCE_COUNT_NOW_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG3_IHC_TRIGA_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG3_IHC_TRIGB_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG3_IHC_VSYNC_NOM_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG3_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DIGC_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DIGC_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_RX_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE2__AUX3_SW_DONE_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE2__AUX3_LS_DONE_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG2_IHC_EXT_TIMING_SYNC_LOSS_INTERRUPT__SHIFT 0x19
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG2_IHC_EXT_TIMING_SYNC_INTERRUPT__SHIFT 0x1a
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG2_IHC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG2_IHC_VERTICAL_INTERRUPT0__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG2_IHC_VERTICAL_INTERRUPT1__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG2_IHC_VERTICAL_INTERRUPT2__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DISP_INTERRUPT_STATUS_CONTINUE3__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OPTC3_DATA_UNDERFLOW_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG3_IHC_SNAPSHOT_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG3_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG3_IHC_FORCE_COUNT_NOW_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG3_IHC_TRIGA_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG3_IHC_TRIGB_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG3_IHC_VSYNC_NOM_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG3_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DIGC_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DIGC_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_RX_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__AUX3_SW_DONE_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__AUX3_LS_DONE_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG2_IHC_EXT_TIMING_SYNC_LOSS_INTERRUPT_MASK 0x02000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG2_IHC_EXT_TIMING_SYNC_INTERRUPT_MASK 0x04000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG2_IHC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_MASK 0x08000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG2_IHC_VERTICAL_INTERRUPT0_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG2_IHC_VERTICAL_INTERRUPT1_MASK 0x20000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG2_IHC_VERTICAL_INTERRUPT2_MASK 0x40000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DISP_INTERRUPT_STATUS_CONTINUE3_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE3
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OPTC4_DATA_UNDERFLOW_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG4_IHC_SNAPSHOT_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG4_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG4_IHC_FORCE_COUNT_NOW_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG4_IHC_TRIGA_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG4_IHC_TRIGB_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG4_IHC_VSYNC_NOM_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG4_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DIGD_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DIGD_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_RX_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE3__AUX4_SW_DONE_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE3__AUX4_LS_DONE_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE3__WBSCL0_HOST_CONFLICT_INTERRUPT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS_CONTINUE3__WBSCL0_DATA_OVERFLOW_INTERRUPT__SHIFT 0x18
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG3_IHC_EXT_TIMING_SYNC_LOSS_INTERRUPT__SHIFT 0x19
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG3_IHC_EXT_TIMING_SYNC_INTERRUPT__SHIFT 0x1a
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG3_IHC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG3_IHC_VERTICAL_INTERRUPT0__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG3_IHC_VERTICAL_INTERRUPT1__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG3_IHC_VERTICAL_INTERRUPT2__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DISP_INTERRUPT_STATUS_CONTINUE4__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OPTC4_DATA_UNDERFLOW_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG4_IHC_SNAPSHOT_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG4_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG4_IHC_FORCE_COUNT_NOW_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG4_IHC_TRIGA_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG4_IHC_TRIGB_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG4_IHC_VSYNC_NOM_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG4_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DIGD_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DIGD_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_RX_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__AUX4_SW_DONE_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__AUX4_LS_DONE_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__WBSCL0_HOST_CONFLICT_INTERRUPT_MASK 0x00800000L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__WBSCL0_DATA_OVERFLOW_INTERRUPT_MASK 0x01000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG3_IHC_EXT_TIMING_SYNC_LOSS_INTERRUPT_MASK 0x02000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG3_IHC_EXT_TIMING_SYNC_INTERRUPT_MASK 0x04000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG3_IHC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_MASK 0x08000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG3_IHC_VERTICAL_INTERRUPT0_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG3_IHC_VERTICAL_INTERRUPT1_MASK 0x20000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG3_IHC_VERTICAL_INTERRUPT2_MASK 0x40000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DISP_INTERRUPT_STATUS_CONTINUE4_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE4
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OPTC5_DATA_UNDERFLOW_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OPTC6_DATA_UNDERFLOW_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG5_IHC_SNAPSHOT_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG5_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG5_IHC_FORCE_COUNT_NOW_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG5_IHC_TRIGA_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG5_IHC_TRIGB_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG5_IHC_VSYNC_NOM_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG5_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DIGE_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DIGE_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_RX_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE4__AUX5_SW_DONE_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE4__AUX5_LS_DONE_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG4_IHC_EXT_TIMING_SYNC_LOSS_INTERRUPT__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG4_IHC_EXT_TIMING_SYNC_INTERRUPT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG4_IHC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT__SHIFT 0x18
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG5_IHC_EXT_TIMING_SYNC_LOSS_INTERRUPT__SHIFT 0x19
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG5_IHC_EXT_TIMING_SYNC_INTERRUPT__SHIFT 0x1a
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG5_IHC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG4_IHC_VERTICAL_INTERRUPT0__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG4_IHC_VERTICAL_INTERRUPT1__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG4_IHC_VERTICAL_INTERRUPT2__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DISP_INTERRUPT_STATUS_CONTINUE5__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OPTC5_DATA_UNDERFLOW_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OPTC6_DATA_UNDERFLOW_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG5_IHC_SNAPSHOT_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG5_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG5_IHC_FORCE_COUNT_NOW_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG5_IHC_TRIGA_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG5_IHC_TRIGB_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG5_IHC_VSYNC_NOM_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG5_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DIGE_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DIGE_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_RX_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__AUX5_SW_DONE_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__AUX5_LS_DONE_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG4_IHC_EXT_TIMING_SYNC_LOSS_INTERRUPT_MASK 0x00400000L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG4_IHC_EXT_TIMING_SYNC_INTERRUPT_MASK 0x00800000L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG4_IHC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_MASK 0x01000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG5_IHC_EXT_TIMING_SYNC_LOSS_INTERRUPT_MASK 0x02000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG5_IHC_EXT_TIMING_SYNC_INTERRUPT_MASK 0x04000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG5_IHC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_MASK 0x08000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG4_IHC_VERTICAL_INTERRUPT0_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG4_IHC_VERTICAL_INTERRUPT1_MASK 0x20000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG4_IHC_VERTICAL_INTERRUPT2_MASK 0x40000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DISP_INTERRUPT_STATUS_CONTINUE5_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE5
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_SNAPSHOT_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_FORCE_COUNT_NOW_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_TRIGA_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_TRIGB_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_VSYNC_NOM_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DIGF_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DIGF_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_RX_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE5__AUX6_SW_DONE_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE5__AUX6_LS_DONE_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_EXT_TIMING_SYNC_LOSS_INTERRUPT__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_EXT_TIMING_SYNC_INTERRUPT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT__SHIFT 0x18
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG5_IHC_VERTICAL_INTERRUPT0__SHIFT 0x19
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG5_IHC_VERTICAL_INTERRUPT1__SHIFT 0x1a
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG5_IHC_VERTICAL_INTERRUPT2__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_VERTICAL_INTERRUPT0__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_VERTICAL_INTERRUPT1__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_VERTICAL_INTERRUPT2__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DISP_INTERRUPT_STATUS_CONTINUE6__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_SNAPSHOT_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_FORCE_COUNT_NOW_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_TRIGA_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_TRIGB_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_VSYNC_NOM_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DIGF_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DIGF_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_RX_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__AUX6_SW_DONE_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__AUX6_LS_DONE_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_EXT_TIMING_SYNC_LOSS_INTERRUPT_MASK 0x00400000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_EXT_TIMING_SYNC_INTERRUPT_MASK 0x00800000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_MASK 0x01000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG5_IHC_VERTICAL_INTERRUPT0_MASK 0x02000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG5_IHC_VERTICAL_INTERRUPT1_MASK 0x04000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG5_IHC_VERTICAL_INTERRUPT2_MASK 0x08000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_VERTICAL_INTERRUPT0_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_VERTICAL_INTERRUPT1_MASK 0x20000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_VERTICAL_INTERRUPT2_MASK 0x40000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DISP_INTERRUPT_STATUS_CONTINUE6_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE6
+#define DISP_INTERRUPT_STATUS_CONTINUE6__MCIF_CWB0_IHIF_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE6__MCIF_CWB1_IHIF_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE6__MCIF_DWB0_IHIF_INTERRUPT__SHIFT 0xb
+#define DISP_INTERRUPT_STATUS_CONTINUE6__MCIF_DWB1_IHIF_INTERRUPT__SHIFT 0xc
+#define DISP_INTERRUPT_STATUS_CONTINUE6__MCIF_DWB2_IHIF_INTERRUPT__SHIFT 0xd
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX1_GTC_SYNC_LOCK_DONE_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX1_GTC_SYNC_ERROR_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX2_GTC_SYNC_LOCK_DONE_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX2_GTC_SYNC_ERROR_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX3_GTC_SYNC_LOCK_DONE_INTERRUPT__SHIFT 0x15
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX3_GTC_SYNC_ERROR_INTERRUPT__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX4_GTC_SYNC_LOCK_DONE_INTERRUPT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX4_GTC_SYNC_ERROR_INTERRUPT__SHIFT 0x18
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX5_GTC_SYNC_LOCK_DONE_INTERRUPT__SHIFT 0x19
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX5_GTC_SYNC_ERROR_INTERRUPT__SHIFT 0x1a
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX6_GTC_SYNC_LOCK_DONE_INTERRUPT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX6_GTC_SYNC_ERROR_INTERRUPT__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE6__DISP_INTERRUPT_STATUS_CONTINUE7__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE6__MCIF_CWB0_IHIF_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__MCIF_CWB1_IHIF_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__MCIF_DWB0_IHIF_INTERRUPT_MASK 0x00000800L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__MCIF_DWB1_IHIF_INTERRUPT_MASK 0x00001000L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__MCIF_DWB2_IHIF_INTERRUPT_MASK 0x00002000L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX1_GTC_SYNC_LOCK_DONE_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX1_GTC_SYNC_ERROR_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX2_GTC_SYNC_LOCK_DONE_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX2_GTC_SYNC_ERROR_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX3_GTC_SYNC_LOCK_DONE_INTERRUPT_MASK 0x00200000L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX3_GTC_SYNC_ERROR_INTERRUPT_MASK 0x00400000L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX4_GTC_SYNC_LOCK_DONE_INTERRUPT_MASK 0x00800000L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX4_GTC_SYNC_ERROR_INTERRUPT_MASK 0x01000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX5_GTC_SYNC_LOCK_DONE_INTERRUPT_MASK 0x02000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX5_GTC_SYNC_ERROR_INTERRUPT_MASK 0x04000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX6_GTC_SYNC_LOCK_DONE_INTERRUPT_MASK 0x08000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX6_GTC_SYNC_ERROR_INTERRUPT_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__DISP_INTERRUPT_STATUS_CONTINUE7_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE7
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCCG_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCCG_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DMU_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DMU_PERFMON_COUNTER1_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DIO_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DIO_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE7__WB0_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE7__WB0_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DISP_INTERRUPT_STATUS_CONTINUE8__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCCG_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCCG_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DMU_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DMU_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DIO_PERFMON_COUNTER0_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DIO_PERFMON_COUNTER1_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE7__WB0_PERFMON_COUNTER0_INTERRUPT_MASK 0x08000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE7__WB0_PERFMON_COUNTER1_INTERRUPT_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DISP_INTERRUPT_STATUS_CONTINUE8_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE8
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DPP0_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DPP0_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DPP1_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DPP1_PERFMON_COUNTER1_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DPP2_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DPP2_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DISP_INTERRUPT_STATUS_CONTINUE9__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DPP0_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DPP0_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DPP1_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DPP1_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DPP2_PERFMON_COUNTER0_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DPP2_PERFMON_COUNTER1_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DISP_INTERRUPT_STATUS_CONTINUE9_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE9
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DPP3_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DPP3_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DPP4_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DPP4_PERFMON_COUNTER1_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DPP5_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DPP5_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE9__WBSCL1_HOST_CONFLICT_INTERRUPT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE9__WBSCL1_DATA_OVERFLOW_INTERRUPT__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE9__WBSCL2_HOST_CONFLICT_INTERRUPT__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE9__WBSCL2_DATA_OVERFLOW_INTERRUPT__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DISP_INTERRUPT_STATUS_CONTINUE10__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DPP3_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DPP3_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DPP4_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DPP4_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DPP5_PERFMON_COUNTER0_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DPP5_PERFMON_COUNTER1_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE9__WBSCL1_HOST_CONFLICT_INTERRUPT_MASK 0x08000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE9__WBSCL1_DATA_OVERFLOW_INTERRUPT_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE9__WBSCL2_HOST_CONFLICT_INTERRUPT_MASK 0x20000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE9__WBSCL2_DATA_OVERFLOW_INTERRUPT_MASK 0x40000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DISP_INTERRUPT_STATUS_CONTINUE10_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE10
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_IHC_VSYNC_OTG0_LATCH_INT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_IHC_VSYNC_OTG1_LATCH_INT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_IHC_VSYNC_OTG2_LATCH_INT__SHIFT 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_IHC_VSYNC_OTG3_LATCH_INT__SHIFT 0x3
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_IHC_VSYNC_OTG4_LATCH_INT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_IHC_VSYNC_OTG5_LATCH_INT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_PERFMON2_COUNTER0_INTERRUPT__SHIFT 0xc
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_PERFMON2_COUNTER1_INTERRUPT__SHIFT 0xd
+#define DISP_INTERRUPT_STATUS_CONTINUE10__OTG1_IHC_RANGE_TIMING_UPDATE__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS_CONTINUE10__OTG2_IHC_RANGE_TIMING_UPDATE__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS_CONTINUE10__OTG3_IHC_RANGE_TIMING_UPDATE__SHIFT 0x18
+#define DISP_INTERRUPT_STATUS_CONTINUE10__OTG4_IHC_RANGE_TIMING_UPDATE__SHIFT 0x19
+#define DISP_INTERRUPT_STATUS_CONTINUE10__OTG5_IHC_RANGE_TIMING_UPDATE__SHIFT 0x1a
+#define DISP_INTERRUPT_STATUS_CONTINUE10__OTG6_IHC_RANGE_TIMING_UPDATE__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DISP_INTERRUPT_STATUS_CONTINUE11__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_IHC_VSYNC_OTG0_LATCH_INT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_IHC_VSYNC_OTG1_LATCH_INT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_IHC_VSYNC_OTG2_LATCH_INT_MASK 0x00000004L
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_IHC_VSYNC_OTG3_LATCH_INT_MASK 0x00000008L
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_IHC_VSYNC_OTG4_LATCH_INT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_IHC_VSYNC_OTG5_LATCH_INT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_PERFMON2_COUNTER0_INTERRUPT_MASK 0x00001000L
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_PERFMON2_COUNTER1_INTERRUPT_MASK 0x00002000L
+#define DISP_INTERRUPT_STATUS_CONTINUE10__OTG1_IHC_RANGE_TIMING_UPDATE_MASK 0x00400000L
+#define DISP_INTERRUPT_STATUS_CONTINUE10__OTG2_IHC_RANGE_TIMING_UPDATE_MASK 0x00800000L
+#define DISP_INTERRUPT_STATUS_CONTINUE10__OTG3_IHC_RANGE_TIMING_UPDATE_MASK 0x01000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE10__OTG4_IHC_RANGE_TIMING_UPDATE_MASK 0x02000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE10__OTG5_IHC_RANGE_TIMING_UPDATE_MASK 0x04000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE10__OTG6_IHC_RANGE_TIMING_UPDATE_MASK 0x08000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DISP_INTERRUPT_STATUS_CONTINUE11_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE11
+#define DISP_INTERRUPT_STATUS_CONTINUE11__WB1_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE11__WB1_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE11__WB2_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE11__WB2_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC0_STALL_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC1_STALL_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC2_STALL_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC3_STALL_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC4_STALL_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC5_STALL_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC6_STALL_INTERRUPT__SHIFT 0x15
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC7_STALL_INTERRUPT__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS_CONTINUE11__VGA_IHC_VGA_CRT_INTERRUPT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS_CONTINUE11__DISP_INTERRUPT_STATUS_CONTINUE12__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE11__WB1_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS_CONTINUE11__WB1_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS_CONTINUE11__WB2_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE11__WB2_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC0_STALL_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC1_STALL_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC2_STALL_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC3_STALL_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC4_STALL_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC5_STALL_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC6_STALL_INTERRUPT_MASK 0x00200000L
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC7_STALL_INTERRUPT_MASK 0x00400000L
+#define DISP_INTERRUPT_STATUS_CONTINUE11__VGA_IHC_VGA_CRT_INTERRUPT_MASK 0x00800000L
+#define DISP_INTERRUPT_STATUS_CONTINUE11__DISP_INTERRUPT_STATUS_CONTINUE12_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE12
+#define DISP_INTERRUPT_STATUS_CONTINUE12__MPC_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE12__MPC_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE12__DPP6_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE12__DPP6_PERFMON_COUNTER1_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE12__DPP7_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE12__DPP7_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE12__DISP_INTERRUPT_STATUS_CONTINUE13__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE12__MPC_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE12__MPC_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE12__DPP6_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE12__DPP6_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE12__DPP7_PERFMON_COUNTER0_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE12__DPP7_PERFMON_COUNTER1_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE12__DISP_INTERRUPT_STATUS_CONTINUE13_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE13
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBBUB_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBBUB_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBBUB_IHC_VM_FAULT_INTERRUPT__SHIFT 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBBUB_IHC_TIMEOUT_INTERRUPT__SHIFT 0x3
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN0_POWER_UP_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN1_POWER_UP_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN2_POWER_UP_INTERRUPT__SHIFT 0xb
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN3_POWER_UP_INTERRUPT__SHIFT 0xc
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN4_POWER_UP_INTERRUPT__SHIFT 0xd
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN5_POWER_UP_INTERRUPT__SHIFT 0xe
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN6_POWER_UP_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN7_POWER_UP_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBP0_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBP0_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBP0_IHC_VBLANK_INTERRUPT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBP0_IHC_VLINE_INTERRUPT__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBP0_IHC_VLINE2_INTERRUPT__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBP0_IHC_TIMEOUT_INTERRUPT__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DISP_INTERRUPT_STATUS_CONTINUE14__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBBUB_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBBUB_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBBUB_IHC_VM_FAULT_INTERRUPT_MASK 0x00000004L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBBUB_IHC_TIMEOUT_INTERRUPT_MASK 0x00000008L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN0_POWER_UP_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN1_POWER_UP_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN2_POWER_UP_INTERRUPT_MASK 0x00000800L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN3_POWER_UP_INTERRUPT_MASK 0x00001000L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN4_POWER_UP_INTERRUPT_MASK 0x00002000L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN5_POWER_UP_INTERRUPT_MASK 0x00004000L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN6_POWER_UP_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN7_POWER_UP_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBP0_PERFMON_COUNTER0_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBP0_PERFMON_COUNTER1_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBP0_IHC_VBLANK_INTERRUPT_MASK 0x08000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBP0_IHC_VLINE_INTERRUPT_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBP0_IHC_VLINE2_INTERRUPT_MASK 0x20000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBP0_IHC_TIMEOUT_INTERRUPT_MASK 0x40000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DISP_INTERRUPT_STATUS_CONTINUE14_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE14
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP1_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP1_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP2_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP2_PERFMON_COUNTER1_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP3_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP3_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP1_IHC_VBLANK_INTERRUPT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP1_IHC_VLINE_INTERRUPT__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP1_IHC_VLINE2_INTERRUPT__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP1_IHC_TIMEOUT_INTERRUPT__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS_CONTINUE14__DISP_INTERRUPT_STATUS_CONTINUE15__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP1_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP1_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP2_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP2_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP3_PERFMON_COUNTER0_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP3_PERFMON_COUNTER1_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP1_IHC_VBLANK_INTERRUPT_MASK 0x08000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP1_IHC_VLINE_INTERRUPT_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP1_IHC_VLINE2_INTERRUPT_MASK 0x20000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP1_IHC_TIMEOUT_INTERRUPT_MASK 0x40000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE14__DISP_INTERRUPT_STATUS_CONTINUE15_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE15
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP4_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP4_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP5_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP5_PERFMON_COUNTER1_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP6_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP6_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP2_IHC_VBLANK_INTERRUPT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP2_IHC_VLINE_INTERRUPT__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP2_IHC_VLINE2_INTERRUPT__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP2_IHC_TIMEOUT_INTERRUPT__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS_CONTINUE15__DISP_INTERRUPT_STATUS_CONTINUE16__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP4_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP4_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP5_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP5_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP6_PERFMON_COUNTER0_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP6_PERFMON_COUNTER1_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP2_IHC_VBLANK_INTERRUPT_MASK 0x08000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP2_IHC_VLINE_INTERRUPT_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP2_IHC_VLINE2_INTERRUPT_MASK 0x20000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP2_IHC_TIMEOUT_INTERRUPT_MASK 0x40000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE15__DISP_INTERRUPT_STATUS_CONTINUE16_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE16
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP7_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP7_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP3_IHC_VBLANK_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP3_IHC_VLINE_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP3_IHC_VLINE2_INTERRUPT__SHIFT 0xb
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP4_IHC_VBLANK_INTERRUPT__SHIFT 0xc
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP4_IHC_VLINE_INTERRUPT__SHIFT 0xd
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP4_IHC_VLINE2_INTERRUPT__SHIFT 0xe
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP5_IHC_VBLANK_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP5_IHC_VLINE_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP5_IHC_VLINE2_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP6_IHC_VBLANK_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP6_IHC_VLINE_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP6_IHC_VLINE2_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP7_IHC_VBLANK_INTERRUPT__SHIFT 0x15
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP7_IHC_VLINE_INTERRUPT__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP7_IHC_VLINE2_INTERRUPT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP3_IHC_TIMEOUT_INTERRUPT__SHIFT 0x18
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP4_IHC_TIMEOUT_INTERRUPT__SHIFT 0x19
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP5_IHC_TIMEOUT_INTERRUPT__SHIFT 0x1a
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP6_IHC_TIMEOUT_INTERRUPT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP7_IHC_TIMEOUT_INTERRUPT__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE16__DISP_INTERRUPT_STATUS_CONTINUE17__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP7_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP7_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP3_IHC_VBLANK_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP3_IHC_VLINE_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP3_IHC_VLINE2_INTERRUPT_MASK 0x00000800L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP4_IHC_VBLANK_INTERRUPT_MASK 0x00001000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP4_IHC_VLINE_INTERRUPT_MASK 0x00002000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP4_IHC_VLINE2_INTERRUPT_MASK 0x00004000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP5_IHC_VBLANK_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP5_IHC_VLINE_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP5_IHC_VLINE2_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP6_IHC_VBLANK_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP6_IHC_VLINE_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP6_IHC_VLINE2_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP7_IHC_VBLANK_INTERRUPT_MASK 0x00200000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP7_IHC_VLINE_INTERRUPT_MASK 0x00400000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP7_IHC_VLINE2_INTERRUPT_MASK 0x00800000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP3_IHC_TIMEOUT_INTERRUPT_MASK 0x01000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP4_IHC_TIMEOUT_INTERRUPT_MASK 0x02000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP5_IHC_TIMEOUT_INTERRUPT_MASK 0x04000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP6_IHC_TIMEOUT_INTERRUPT_MASK 0x08000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP7_IHC_TIMEOUT_INTERRUPT_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__DISP_INTERRUPT_STATUS_CONTINUE17_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE17
+#define DISP_INTERRUPT_STATUS_CONTINUE17__OPP_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE17__OPP_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP0_IHC_FLIP_INTERRUPT__SHIFT 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP1_IHC_FLIP_INTERRUPT__SHIFT 0x3
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP2_IHC_FLIP_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP3_IHC_FLIP_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP4_IHC_FLIP_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP5_IHC_FLIP_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP6_IHC_FLIP_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP7_IHC_FLIP_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE17__OPTC_PERFMON_COUNTER0_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE17__OPTC_PERFMON_COUNTER1_INTERRUPT__SHIFT 0xb
+#define DISP_INTERRUPT_STATUS_CONTINUE17__MMHUBBUB_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE17__MMHUBBUB_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP0_IHC_FLIP_AWAY_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP1_IHC_FLIP_AWAY_INTERRUPT__SHIFT 0x15
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP2_IHC_FLIP_AWAY_INTERRUPT__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP3_IHC_FLIP_AWAY_INTERRUPT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP4_IHC_FLIP_AWAY_INTERRUPT__SHIFT 0x18
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP5_IHC_FLIP_AWAY_INTERRUPT__SHIFT 0x19
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP6_IHC_FLIP_AWAY_INTERRUPT__SHIFT 0x1a
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP7_IHC_FLIP_AWAY_INTERRUPT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE17__DISP_INTERRUPT_STATUS_CONTINUE18__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE17__OPP_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__OPP_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP0_IHC_FLIP_INTERRUPT_MASK 0x00000004L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP1_IHC_FLIP_INTERRUPT_MASK 0x00000008L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP2_IHC_FLIP_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP3_IHC_FLIP_INTERRUPT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP4_IHC_FLIP_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP5_IHC_FLIP_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP6_IHC_FLIP_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP7_IHC_FLIP_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__OPTC_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__OPTC_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000800L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__MMHUBBUB_PERFMON_COUNTER0_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__MMHUBBUB_PERFMON_COUNTER1_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP0_IHC_FLIP_AWAY_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP1_IHC_FLIP_AWAY_INTERRUPT_MASK 0x00200000L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP2_IHC_FLIP_AWAY_INTERRUPT_MASK 0x00400000L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP3_IHC_FLIP_AWAY_INTERRUPT_MASK 0x00800000L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP4_IHC_FLIP_AWAY_INTERRUPT_MASK 0x01000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP5_IHC_FLIP_AWAY_INTERRUPT_MASK 0x02000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP6_IHC_FLIP_AWAY_INTERRUPT_MASK 0x04000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP7_IHC_FLIP_AWAY_INTERRUPT_MASK 0x08000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__DISP_INTERRUPT_STATUS_CONTINUE18_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE18
+#define DISP_INTERRUPT_STATUS_CONTINUE18__AZ_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE18__AZ_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_IHC_RXSENSE_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_TXA_IHC_ERROR_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_TXB_IHC_ERROR_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_TXC_IHC_ERROR_INTERRUPT__SHIFT 0xb
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_TXD_IHC_ERROR_INTERRUPT__SHIFT 0xc
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_TXE_IHC_ERROR_INTERRUPT__SHIFT 0xd
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_TXF_IHC_ERROR_INTERRUPT__SHIFT 0xe
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_TXG_IHC_ERROR_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_RXA_IHC_ERROR_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN0_POWER_DOWN_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN1_POWER_DOWN_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN2_POWER_DOWN_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN3_POWER_DOWN_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN4_POWER_DOWN_INTERRUPT__SHIFT 0x15
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN5_POWER_DOWN_INTERRUPT__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN6_POWER_DOWN_INTERRUPT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN7_POWER_DOWN_INTERRUPT__SHIFT 0x18
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DISP_INTERRUPT_STATUS_CONTINUE19__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE18__AZ_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__AZ_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_IHC_RXSENSE_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_TXA_IHC_ERROR_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_TXB_IHC_ERROR_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_TXC_IHC_ERROR_INTERRUPT_MASK 0x00000800L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_TXD_IHC_ERROR_INTERRUPT_MASK 0x00001000L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_TXE_IHC_ERROR_INTERRUPT_MASK 0x00002000L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_TXF_IHC_ERROR_INTERRUPT_MASK 0x00004000L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_TXG_IHC_ERROR_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_RXA_IHC_ERROR_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN0_POWER_DOWN_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN1_POWER_DOWN_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN2_POWER_DOWN_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN3_POWER_DOWN_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN4_POWER_DOWN_INTERRUPT_MASK 0x00200000L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN5_POWER_DOWN_INTERRUPT_MASK 0x00400000L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN6_POWER_DOWN_INTERRUPT_MASK 0x00800000L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN7_POWER_DOWN_INTERRUPT_MASK 0x01000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DISP_INTERRUPT_STATUS_CONTINUE19_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE19
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT0_AUDIO_FORMAT_CHANGED_INT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT1_AUDIO_FORMAT_CHANGED_INT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT2_AUDIO_FORMAT_CHANGED_INT__SHIFT 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT3_AUDIO_FORMAT_CHANGED_INT__SHIFT 0x3
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT4_AUDIO_FORMAT_CHANGED_INT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT5_AUDIO_FORMAT_CHANGED_INT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT6_AUDIO_FORMAT_CHANGED_INT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT7_AUDIO_FORMAT_CHANGED_INT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT0_AUDIO_ENABLED_INT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT1_AUDIO_ENABLED_INT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT2_AUDIO_ENABLED_INT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT3_AUDIO_ENABLED_INT__SHIFT 0xb
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT4_AUDIO_ENABLED_INT__SHIFT 0xc
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT5_AUDIO_ENABLED_INT__SHIFT 0xd
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT6_AUDIO_ENABLED_INT__SHIFT 0xe
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT7_AUDIO_ENABLED_INT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT0_AUDIO_DISABLED_INT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT1_AUDIO_DISABLED_INT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT2_AUDIO_DISABLED_INT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT3_AUDIO_DISABLED_INT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT4_AUDIO_DISABLED_INT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT5_AUDIO_DISABLED_INT__SHIFT 0x15
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT6_AUDIO_DISABLED_INT__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT7_AUDIO_DISABLED_INT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS_CONTINUE19__DIGG_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE19__DIGG_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE19__DISP_INTERRUPT_STATUS_CONTINUE20__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT0_AUDIO_FORMAT_CHANGED_INT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT1_AUDIO_FORMAT_CHANGED_INT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT2_AUDIO_FORMAT_CHANGED_INT_MASK 0x00000004L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT3_AUDIO_FORMAT_CHANGED_INT_MASK 0x00000008L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT4_AUDIO_FORMAT_CHANGED_INT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT5_AUDIO_FORMAT_CHANGED_INT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT6_AUDIO_FORMAT_CHANGED_INT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT7_AUDIO_FORMAT_CHANGED_INT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT0_AUDIO_ENABLED_INT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT1_AUDIO_ENABLED_INT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT2_AUDIO_ENABLED_INT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT3_AUDIO_ENABLED_INT_MASK 0x00000800L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT4_AUDIO_ENABLED_INT_MASK 0x00001000L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT5_AUDIO_ENABLED_INT_MASK 0x00002000L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT6_AUDIO_ENABLED_INT_MASK 0x00004000L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT7_AUDIO_ENABLED_INT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT0_AUDIO_DISABLED_INT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT1_AUDIO_DISABLED_INT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT2_AUDIO_DISABLED_INT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT3_AUDIO_DISABLED_INT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT4_AUDIO_DISABLED_INT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT5_AUDIO_DISABLED_INT_MASK 0x00200000L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT6_AUDIO_DISABLED_INT_MASK 0x00400000L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT7_AUDIO_DISABLED_INT_MASK 0x00800000L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__DIGG_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__DIGG_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x20000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__DISP_INTERRUPT_STATUS_CONTINUE20_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE20
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG1_IHC_CPU_SS_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG2_IHC_CPU_SS_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG3_IHC_CPU_SS_INTERRUPT__SHIFT 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG4_IHC_CPU_SS_INTERRUPT__SHIFT 0x3
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG5_IHC_CPU_SS_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG6_IHC_CPU_SS_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG1_IHC_V_UPDATE_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG2_IHC_V_UPDATE_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG3_IHC_V_UPDATE_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG4_IHC_V_UPDATE_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG5_IHC_V_UPDATE_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG6_IHC_V_UPDATE_INTERRUPT__SHIFT 0xb
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG1_IHC_GSL_VSYNC_GAP_INTERRUPT__SHIFT 0xc
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG2_IHC_GSL_VSYNC_GAP_INTERRUPT__SHIFT 0xd
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG3_IHC_GSL_VSYNC_GAP_INTERRUPT__SHIFT 0xe
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG4_IHC_GSL_VSYNC_GAP_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG5_IHC_GSL_VSYNC_GAP_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG6_IHC_GSL_VSYNC_GAP_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG1_IHC_VSTARTUP_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG2_IHC_VSTARTUP_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG3_IHC_VSTARTUP_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG4_IHC_VSTARTUP_INTERRUPT__SHIFT 0x15
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG5_IHC_VSTARTUP_INTERRUPT__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG6_IHC_VSTARTUP_INTERRUPT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG1_IHC_VREADY_INTERRUPT__SHIFT 0x18
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG2_IHC_VREADY_INTERRUPT__SHIFT 0x19
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG3_IHC_VREADY_INTERRUPT__SHIFT 0x1a
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG4_IHC_VREADY_INTERRUPT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG5_IHC_VREADY_INTERRUPT__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG6_IHC_VREADY_INTERRUPT__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE20__DISP_INTERRUPT_STATUS_CONTINUE21__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG1_IHC_CPU_SS_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG2_IHC_CPU_SS_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG3_IHC_CPU_SS_INTERRUPT_MASK 0x00000004L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG4_IHC_CPU_SS_INTERRUPT_MASK 0x00000008L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG5_IHC_CPU_SS_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG6_IHC_CPU_SS_INTERRUPT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG1_IHC_V_UPDATE_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG2_IHC_V_UPDATE_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG3_IHC_V_UPDATE_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG4_IHC_V_UPDATE_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG5_IHC_V_UPDATE_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG6_IHC_V_UPDATE_INTERRUPT_MASK 0x00000800L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG1_IHC_GSL_VSYNC_GAP_INTERRUPT_MASK 0x00001000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG2_IHC_GSL_VSYNC_GAP_INTERRUPT_MASK 0x00002000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG3_IHC_GSL_VSYNC_GAP_INTERRUPT_MASK 0x00004000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG4_IHC_GSL_VSYNC_GAP_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG5_IHC_GSL_VSYNC_GAP_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG6_IHC_GSL_VSYNC_GAP_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG1_IHC_VSTARTUP_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG2_IHC_VSTARTUP_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG3_IHC_VSTARTUP_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG4_IHC_VSTARTUP_INTERRUPT_MASK 0x00200000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG5_IHC_VSTARTUP_INTERRUPT_MASK 0x00400000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG6_IHC_VSTARTUP_INTERRUPT_MASK 0x00800000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG1_IHC_VREADY_INTERRUPT_MASK 0x01000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG2_IHC_VREADY_INTERRUPT_MASK 0x02000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG3_IHC_VREADY_INTERRUPT_MASK 0x04000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG4_IHC_VREADY_INTERRUPT_MASK 0x08000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG5_IHC_VREADY_INTERRUPT_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG6_IHC_VREADY_INTERRUPT_MASK 0x20000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__DISP_INTERRUPT_STATUS_CONTINUE21_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE21
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DOUT_IHC_I2C_DDC1_HW_DONE_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DOUT_IHC_I2C_DDC2_HW_DONE_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DOUT_IHC_I2C_DDC3_HW_DONE_INTERRUPT__SHIFT 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DOUT_IHC_I2C_DDC4_HW_DONE_INTERRUPT__SHIFT 0x3
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DOUT_IHC_I2C_DDC5_HW_DONE_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DOUT_IHC_I2C_DDC6_HW_DONE_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DOUT_IHC_I2C_DDCVGA_HW_DONE_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DC_I2C_DDC1_READ_REQUEST_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DC_I2C_DDC2_READ_REQUEST_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DC_I2C_DDC3_READ_REQUEST_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DC_I2C_DDC4_READ_REQUEST_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DC_I2C_DDC5_READ_REQUEST_INTERRUPT__SHIFT 0xb
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DC_I2C_DDC6_READ_REQUEST_INTERRUPT__SHIFT 0xc
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DC_I2C_VGA_READ_REQUEST_INTERRUPT__SHIFT 0xd
+#define DISP_INTERRUPT_STATUS_CONTINUE21__GENERIC_I2C_DDC_READ_REUEST_INTERRUPT__SHIFT 0xe
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DIGH_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DIGH_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DISP_INTERRUPT_STATUS_CONTINUE22__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DOUT_IHC_I2C_DDC1_HW_DONE_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DOUT_IHC_I2C_DDC2_HW_DONE_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DOUT_IHC_I2C_DDC3_HW_DONE_INTERRUPT_MASK 0x00000004L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DOUT_IHC_I2C_DDC4_HW_DONE_INTERRUPT_MASK 0x00000008L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DOUT_IHC_I2C_DDC5_HW_DONE_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DOUT_IHC_I2C_DDC6_HW_DONE_INTERRUPT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DOUT_IHC_I2C_DDCVGA_HW_DONE_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DC_I2C_DDC1_READ_REQUEST_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DC_I2C_DDC2_READ_REQUEST_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DC_I2C_DDC3_READ_REQUEST_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DC_I2C_DDC4_READ_REQUEST_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DC_I2C_DDC5_READ_REQUEST_INTERRUPT_MASK 0x00000800L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DC_I2C_DDC6_READ_REQUEST_INTERRUPT_MASK 0x00001000L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DC_I2C_VGA_READ_REQUEST_INTERRUPT_MASK 0x00002000L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__GENERIC_I2C_DDC_READ_REUEST_INTERRUPT_MASK 0x00004000L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DIGH_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DIGH_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x20000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DISP_INTERRUPT_STATUS_CONTINUE22_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE22
+#define DISP_INTERRUPT_STATUS_CONTINUE22__DCPG_IHC_DOMAIN8_POWER_UP_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE22__DCPG_IHC_DOMAIN9_POWER_UP_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE22__DCPG_IHC_DOMAIN10_POWER_UP_INTERRUPT__SHIFT 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE22__DCPG_IHC_DOMAIN11_POWER_UP_INTERRUPT__SHIFT 0x3
+#define DISP_INTERRUPT_STATUS_CONTINUE22__DCPG_IHC_DOMAIN12_POWER_UP_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE22__DCPG_IHC_DOMAIN13_POWER_UP_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE22__DCPG_IHC_DOMAIN14_POWER_UP_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE22__DCPG_IHC_DOMAIN15_POWER_UP_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE22__DCPG_IHC_DOMAIN8_POWER_DOWN_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE22__DCPG_IHC_DOMAIN9_POWER_DOWN_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE22__DCPG_IHC_DOMAIN10_POWER_DOWN_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE22__DCPG_IHC_DOMAIN11_POWER_DOWN_INTERRUPT__SHIFT 0xb
+#define DISP_INTERRUPT_STATUS_CONTINUE22__DCPG_IHC_DOMAIN12_POWER_DOWN_INTERRUPT__SHIFT 0xc
+#define DISP_INTERRUPT_STATUS_CONTINUE22__DCPG_IHC_DOMAIN13_POWER_DOWN_INTERRUPT__SHIFT 0xd
+#define DISP_INTERRUPT_STATUS_CONTINUE22__DCPG_IHC_DOMAIN14_POWER_DOWN_INTERRUPT__SHIFT 0xe
+#define DISP_INTERRUPT_STATUS_CONTINUE22__DCPG_IHC_DOMAIN15_POWER_DOWN_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE22__ABM0_HG_READY_INT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE22__ABM0_LS_READY_INT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE22__ABM0_BL_UPDATE_INT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG1_IHC_V_UPDATE_NO_LOCK_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG2_IHC_V_UPDATE_NO_LOCK_INTERRUPT__SHIFT 0x15
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG3_IHC_V_UPDATE_NO_LOCK_INTERRUPT__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG4_IHC_V_UPDATE_NO_LOCK_INTERRUPT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG5_IHC_V_UPDATE_NO_LOCK_INTERRUPT__SHIFT 0x18
+#define DISP_INTERRUPT_STATUS_CONTINUE22__DISP_INTERRUPT_STATUS_CONTINUE23__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE22__DCPG_IHC_DOMAIN8_POWER_UP_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE22__DCPG_IHC_DOMAIN9_POWER_UP_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE22__DCPG_IHC_DOMAIN10_POWER_UP_INTERRUPT_MASK 0x00000004L
+#define DISP_INTERRUPT_STATUS_CONTINUE22__DCPG_IHC_DOMAIN11_POWER_UP_INTERRUPT_MASK 0x00000008L
+#define DISP_INTERRUPT_STATUS_CONTINUE22__DCPG_IHC_DOMAIN12_POWER_UP_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE22__DCPG_IHC_DOMAIN13_POWER_UP_INTERRUPT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS_CONTINUE22__DCPG_IHC_DOMAIN14_POWER_UP_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS_CONTINUE22__DCPG_IHC_DOMAIN15_POWER_UP_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS_CONTINUE22__DCPG_IHC_DOMAIN8_POWER_DOWN_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE22__DCPG_IHC_DOMAIN9_POWER_DOWN_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE22__DCPG_IHC_DOMAIN10_POWER_DOWN_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE22__DCPG_IHC_DOMAIN11_POWER_DOWN_INTERRUPT_MASK 0x00000800L
+#define DISP_INTERRUPT_STATUS_CONTINUE22__DCPG_IHC_DOMAIN12_POWER_DOWN_INTERRUPT_MASK 0x00001000L
+#define DISP_INTERRUPT_STATUS_CONTINUE22__DCPG_IHC_DOMAIN13_POWER_DOWN_INTERRUPT_MASK 0x00002000L
+#define DISP_INTERRUPT_STATUS_CONTINUE22__DCPG_IHC_DOMAIN14_POWER_DOWN_INTERRUPT_MASK 0x00004000L
+#define DISP_INTERRUPT_STATUS_CONTINUE22__DCPG_IHC_DOMAIN15_POWER_DOWN_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS_CONTINUE22__ABM0_HG_READY_INT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE22__ABM0_LS_READY_INT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE22__ABM0_BL_UPDATE_INT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG1_IHC_V_UPDATE_NO_LOCK_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG2_IHC_V_UPDATE_NO_LOCK_INTERRUPT_MASK 0x00200000L
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG3_IHC_V_UPDATE_NO_LOCK_INTERRUPT_MASK 0x00400000L
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG4_IHC_V_UPDATE_NO_LOCK_INTERRUPT_MASK 0x00800000L
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG5_IHC_V_UPDATE_NO_LOCK_INTERRUPT_MASK 0x01000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE22__DISP_INTERRUPT_STATUS_CONTINUE23_MASK 0x80000000L
+//DC_GPU_TIMER_START_POSITION_VREADY
+#define DC_GPU_TIMER_START_POSITION_VREADY__DC_GPU_TIMER_START_POSITION_D1_VREADY__SHIFT 0x0
+#define DC_GPU_TIMER_START_POSITION_VREADY__DC_GPU_TIMER_START_POSITION_D2_VREADY__SHIFT 0x4
+#define DC_GPU_TIMER_START_POSITION_VREADY__DC_GPU_TIMER_START_POSITION_D3_VREADY__SHIFT 0x8
+#define DC_GPU_TIMER_START_POSITION_VREADY__DC_GPU_TIMER_START_POSITION_D4_VREADY__SHIFT 0xc
+#define DC_GPU_TIMER_START_POSITION_VREADY__DC_GPU_TIMER_START_POSITION_D5_VREADY__SHIFT 0x10
+#define DC_GPU_TIMER_START_POSITION_VREADY__DC_GPU_TIMER_START_POSITION_D6_VREADY__SHIFT 0x14
+#define DC_GPU_TIMER_START_POSITION_VREADY__DC_GPU_TIMER_START_POSITION_D1_VREADY_MASK 0x00000007L
+#define DC_GPU_TIMER_START_POSITION_VREADY__DC_GPU_TIMER_START_POSITION_D2_VREADY_MASK 0x00000070L
+#define DC_GPU_TIMER_START_POSITION_VREADY__DC_GPU_TIMER_START_POSITION_D3_VREADY_MASK 0x00000700L
+#define DC_GPU_TIMER_START_POSITION_VREADY__DC_GPU_TIMER_START_POSITION_D4_VREADY_MASK 0x00007000L
+#define DC_GPU_TIMER_START_POSITION_VREADY__DC_GPU_TIMER_START_POSITION_D5_VREADY_MASK 0x00070000L
+#define DC_GPU_TIMER_START_POSITION_VREADY__DC_GPU_TIMER_START_POSITION_D6_VREADY_MASK 0x00700000L
+//DC_GPU_TIMER_START_POSITION_FLIP
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D1_FLIP__SHIFT 0x0
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D2_FLIP__SHIFT 0x4
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D3_FLIP__SHIFT 0x8
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D4_FLIP__SHIFT 0xc
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D5_FLIP__SHIFT 0x10
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D6_FLIP__SHIFT 0x14
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D7_FLIP__SHIFT 0x18
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D8_FLIP__SHIFT 0x1c
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D1_FLIP_MASK 0x00000007L
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D2_FLIP_MASK 0x00000070L
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D3_FLIP_MASK 0x00000700L
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D4_FLIP_MASK 0x00007000L
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D5_FLIP_MASK 0x00070000L
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D6_FLIP_MASK 0x00700000L
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D7_FLIP_MASK 0x07000000L
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D8_FLIP_MASK 0x70000000L
+//DC_GPU_TIMER_START_POSITION_V_UPDATE_NO_LOCK
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE_NO_LOCK__DC_GPU_TIMER_START_POSITION_D1_V_UPDATE_NO_LOCK__SHIFT 0x0
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE_NO_LOCK__DC_GPU_TIMER_START_POSITION_D2_V_UPDATE_NO_LOCK__SHIFT 0x4
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE_NO_LOCK__DC_GPU_TIMER_START_POSITION_D3_V_UPDATE_NO_LOCK__SHIFT 0x8
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE_NO_LOCK__DC_GPU_TIMER_START_POSITION_D4_V_UPDATE_NO_LOCK__SHIFT 0xc
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE_NO_LOCK__DC_GPU_TIMER_START_POSITION_D5_V_UPDATE_NO_LOCK__SHIFT 0x10
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE_NO_LOCK__DC_GPU_TIMER_START_POSITION_D6_V_UPDATE_NO_LOCK__SHIFT 0x14
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE_NO_LOCK__DC_GPU_TIMER_START_POSITION_D1_V_UPDATE_NO_LOCK_MASK 0x00000007L
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE_NO_LOCK__DC_GPU_TIMER_START_POSITION_D2_V_UPDATE_NO_LOCK_MASK 0x00000070L
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE_NO_LOCK__DC_GPU_TIMER_START_POSITION_D3_V_UPDATE_NO_LOCK_MASK 0x00000700L
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE_NO_LOCK__DC_GPU_TIMER_START_POSITION_D4_V_UPDATE_NO_LOCK_MASK 0x00007000L
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE_NO_LOCK__DC_GPU_TIMER_START_POSITION_D5_V_UPDATE_NO_LOCK_MASK 0x00070000L
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE_NO_LOCK__DC_GPU_TIMER_START_POSITION_D6_V_UPDATE_NO_LOCK_MASK 0x00700000L
+//DC_GPU_TIMER_START_POSITION_FLIP_AWAY
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D1_FLIP_AWAY__SHIFT 0x0
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D2_FLIP_AWAY__SHIFT 0x4
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D3_FLIP_AWAY__SHIFT 0x8
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D4_FLIP_AWAY__SHIFT 0xc
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D5_FLIP_AWAY__SHIFT 0x10
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D6_FLIP_AWAY__SHIFT 0x14
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D7_FLIP_AWAY__SHIFT 0x18
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D8_FLIP_AWAY__SHIFT 0x1c
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D1_FLIP_AWAY_MASK 0x00000007L
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D2_FLIP_AWAY_MASK 0x00000070L
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D3_FLIP_AWAY_MASK 0x00000700L
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D4_FLIP_AWAY_MASK 0x00007000L
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D5_FLIP_AWAY_MASK 0x00070000L
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D6_FLIP_AWAY_MASK 0x00700000L
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D7_FLIP_AWAY_MASK 0x07000000L
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D8_FLIP_AWAY_MASK 0x70000000L
+//DISP_INTERRUPT_STATUS_CONTINUE23
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN16_POWER_UP_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN17_POWER_UP_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN18_POWER_UP_INTERRUPT__SHIFT 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN19_POWER_UP_INTERRUPT__SHIFT 0x3
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN20_POWER_UP_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN21_POWER_UP_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN16_POWER_DOWN_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN17_POWER_DOWN_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN18_POWER_DOWN_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN19_POWER_DOWN_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN20_POWER_DOWN_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN21_POWER_DOWN_INTERRUPT__SHIFT 0xb
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DSC0_IHC_INPUT_UNDERFLOW_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DSC1_IHC_INPUT_UNDERFLOW_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DSC2_IHC_INPUT_UNDERFLOW_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DSC3_IHC_INPUT_UNDERFLOW_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DSC4_IHC_INPUT_UNDERFLOW_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DSC5_IHC_INPUT_UNDERFLOW_INTERRUPT__SHIFT 0x15
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DSC0_IHC_CORE_ERROR_INTERRUPT__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DSC1_IHC_CORE_ERROR_INTERRUPT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DSC2_IHC_CORE_ERROR_INTERRUPT__SHIFT 0x18
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DSC3_IHC_CORE_ERROR_INTERRUPT__SHIFT 0x19
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DSC4_IHC_CORE_ERROR_INTERRUPT__SHIFT 0x1a
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DSC5_IHC_CORE_ERROR_INTERRUPT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DISP_INTERRUPT_STATUS_CONTINUE24__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN16_POWER_UP_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN17_POWER_UP_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN18_POWER_UP_INTERRUPT_MASK 0x00000004L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN19_POWER_UP_INTERRUPT_MASK 0x00000008L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN20_POWER_UP_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN21_POWER_UP_INTERRUPT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN16_POWER_DOWN_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN17_POWER_DOWN_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN18_POWER_DOWN_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN19_POWER_DOWN_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN20_POWER_DOWN_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN21_POWER_DOWN_INTERRUPT_MASK 0x00000800L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DSC0_IHC_INPUT_UNDERFLOW_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DSC1_IHC_INPUT_UNDERFLOW_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DSC2_IHC_INPUT_UNDERFLOW_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DSC3_IHC_INPUT_UNDERFLOW_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DSC4_IHC_INPUT_UNDERFLOW_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DSC5_IHC_INPUT_UNDERFLOW_INTERRUPT_MASK 0x00200000L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DSC0_IHC_CORE_ERROR_INTERRUPT_MASK 0x00400000L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DSC1_IHC_CORE_ERROR_INTERRUPT_MASK 0x00800000L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DSC2_IHC_CORE_ERROR_INTERRUPT_MASK 0x01000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DSC3_IHC_CORE_ERROR_INTERRUPT_MASK 0x02000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DSC4_IHC_CORE_ERROR_INTERRUPT_MASK 0x04000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DSC5_IHC_CORE_ERROR_INTERRUPT_MASK 0x08000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DISP_INTERRUPT_STATUS_CONTINUE24_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE24
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC0_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC0_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC1_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC1_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x3
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC2_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC2_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC3_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC3_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC4_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC4_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC5_PERFMON_COUNTER0_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC5_PERFMON_COUNTER1_INTERRUPT__SHIFT 0xb
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_TIMER_HIGH_PRIORITY_INTERRUPT__SHIFT 0xc
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_TIMER_LOW_PRIORITY_INTERRUPT__SHIFT 0xd
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_INBOX_HIGH_PRIORITY_READY_INTERRUPT__SHIFT 0xe
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_INBOX_HIGH_PRIORITY_DONE_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_INBOX_LOW_PRIORITY_READY_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_INBOX_LOW_PRIORITY_DONE_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_OUTBOX_HIGH_PRIORITY_READY_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_OUTBOX_HIGH_PRIORITY_DONE_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_OUTBOX_LOW_PRIORITY_READY_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_OUTBOX_LOW_PRIORITY_DONE_INTERRUPT__SHIFT 0x15
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_GENERAL_DATAIN0_INTERRUPT__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_GENERAL_DATAIN1_INTERRUPT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_GENERAL_DATAOUT_INTERRUPT__SHIFT 0x18
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_UNDEFINED_ADDRESS_FAULT_INTERRUPT__SHIFT 0x19
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC0_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC0_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC1_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000004L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC1_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000008L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC2_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC2_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC3_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC3_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC4_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC4_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC5_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC5_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000800L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_TIMER_HIGH_PRIORITY_INTERRUPT_MASK 0x00001000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_TIMER_LOW_PRIORITY_INTERRUPT_MASK 0x00002000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_INBOX_HIGH_PRIORITY_READY_INTERRUPT_MASK 0x00004000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_INBOX_HIGH_PRIORITY_DONE_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_INBOX_LOW_PRIORITY_READY_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_INBOX_LOW_PRIORITY_DONE_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_OUTBOX_HIGH_PRIORITY_READY_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_OUTBOX_HIGH_PRIORITY_DONE_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_OUTBOX_LOW_PRIORITY_READY_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_OUTBOX_LOW_PRIORITY_DONE_INTERRUPT_MASK 0x00200000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_GENERAL_DATAIN0_INTERRUPT_MASK 0x00400000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_GENERAL_DATAIN1_INTERRUPT_MASK 0x00800000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_GENERAL_DATAOUT_INTERRUPT_MASK 0x01000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_UNDEFINED_ADDRESS_FAULT_INTERRUPT_MASK 0x02000000L
+//DCCG_INTERRUPT_DEST
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_VSYNC_OTG0_LATCH_INT_DEST__SHIFT 0x0
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_VSYNC_OTG1_LATCH_INT_DEST__SHIFT 0x1
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_VSYNC_OTG2_LATCH_INT_DEST__SHIFT 0x2
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_VSYNC_OTG3_LATCH_INT_DEST__SHIFT 0x3
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_VSYNC_OTG4_LATCH_INT_DEST__SHIFT 0x4
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_VSYNC_OTG5_LATCH_INT_DEST__SHIFT 0x5
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0xc
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0xd
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_PERFMON2_COUNTER0_INTERRUPT_DEST__SHIFT 0xe
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_PERFMON2_COUNTER1_INTERRUPT_DEST__SHIFT 0xf
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_VSYNC_OTG0_LATCH_INT_DEST_MASK 0x00000001L
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_VSYNC_OTG1_LATCH_INT_DEST_MASK 0x00000002L
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_VSYNC_OTG2_LATCH_INT_DEST_MASK 0x00000004L
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_VSYNC_OTG3_LATCH_INT_DEST_MASK 0x00000008L
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_VSYNC_OTG4_LATCH_INT_DEST_MASK 0x00000010L
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_VSYNC_OTG5_LATCH_INT_DEST_MASK 0x00000020L
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00001000L
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00002000L
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_PERFMON2_COUNTER0_INTERRUPT_DEST_MASK 0x00004000L
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_PERFMON2_COUNTER1_INTERRUPT_DEST_MASK 0x00008000L
+//DMU_INTERRUPT_DEST
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_TIMER0_INT_DEST__SHIFT 0x0
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_TIMER1_INT_DEST__SHIFT 0x1
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_GPINT0_INT_DEST__SHIFT 0x2
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_GPINT1_INT_DEST__SHIFT 0x3
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_INBOX0_READY_INT_DEST__SHIFT 0x4
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_INBOX0_DONE_INT_DEST__SHIFT 0x5
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_INBOX1_READY_INT_DEST__SHIFT 0x6
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_INBOX1_DONE_INT_DEST__SHIFT 0x7
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_OUTBOX0_READY_INT_DEST__SHIFT 0x8
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_OUTBOX0_DONE_INT_DEST__SHIFT 0x9
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_OUTBOX1_READY_INT_DEST__SHIFT 0xa
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_OUTBOX1_DONE_INT_DEST__SHIFT 0xb
+#define DMU_INTERRUPT_DEST__DMU_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0xc
+#define DMU_INTERRUPT_DEST__DMU_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0xd
+#define DMU_INTERRUPT_DEST__DMCU_IHC_ABM0_HG_READY_INTERRUPT_DEST__SHIFT 0xe
+#define DMU_INTERRUPT_DEST__DMCU_IHC_ABM0_LS_READY_INTERRUPT_DEST__SHIFT 0xf
+#define DMU_INTERRUPT_DEST__DMCU_IHC_ABM0_BL_UPDATE_INTERRUPT_DEST__SHIFT 0x10
+#define DMU_INTERRUPT_DEST__DMCU_IHC_ABM1_HG_READY_INTERRUPT_DEST__SHIFT 0x11
+#define DMU_INTERRUPT_DEST__DMCU_IHC_ABM1_LS_READY_INTERRUPT_DEST__SHIFT 0x12
+#define DMU_INTERRUPT_DEST__DMCU_IHC_ABM1_BL_UPDATE_INTERRUPT_DEST__SHIFT 0x13
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_GPINT2_INT_DEST__SHIFT 0x18
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_UNDEFINED_ADDRESS_FAULT_INT_DEST__SHIFT 0x19
+#define DMU_INTERRUPT_DEST__RBBMIF_IHC_TIMEOUT_INTERRUPT_DEST__SHIFT 0x1a
+#define DMU_INTERRUPT_DEST__DMCU_IHC_DMCU_INTERNAL_INTERRUPT_DEST__SHIFT 0x1b
+#define DMU_INTERRUPT_DEST__DMCU_IHC_SCP_INTERRUPT_DEST__SHIFT 0x1c
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_TIMER0_INT_DEST_MASK 0x00000001L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_TIMER1_INT_DEST_MASK 0x00000002L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_GPINT0_INT_DEST_MASK 0x00000004L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_GPINT1_INT_DEST_MASK 0x00000008L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_INBOX0_READY_INT_DEST_MASK 0x00000010L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_INBOX0_DONE_INT_DEST_MASK 0x00000020L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_INBOX1_READY_INT_DEST_MASK 0x00000040L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_INBOX1_DONE_INT_DEST_MASK 0x00000080L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_OUTBOX0_READY_INT_DEST_MASK 0x00000100L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_OUTBOX0_DONE_INT_DEST_MASK 0x00000200L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_OUTBOX1_READY_INT_DEST_MASK 0x00000400L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_OUTBOX1_DONE_INT_DEST_MASK 0x00000800L
+#define DMU_INTERRUPT_DEST__DMU_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00001000L
+#define DMU_INTERRUPT_DEST__DMU_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00002000L
+#define DMU_INTERRUPT_DEST__DMCU_IHC_ABM0_HG_READY_INTERRUPT_DEST_MASK 0x00004000L
+#define DMU_INTERRUPT_DEST__DMCU_IHC_ABM0_LS_READY_INTERRUPT_DEST_MASK 0x00008000L
+#define DMU_INTERRUPT_DEST__DMCU_IHC_ABM0_BL_UPDATE_INTERRUPT_DEST_MASK 0x00010000L
+#define DMU_INTERRUPT_DEST__DMCU_IHC_ABM1_HG_READY_INTERRUPT_DEST_MASK 0x00020000L
+#define DMU_INTERRUPT_DEST__DMCU_IHC_ABM1_LS_READY_INTERRUPT_DEST_MASK 0x00040000L
+#define DMU_INTERRUPT_DEST__DMCU_IHC_ABM1_BL_UPDATE_INTERRUPT_DEST_MASK 0x00080000L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_GPINT2_INT_DEST_MASK 0x01000000L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_UNDEFINED_ADDRESS_FAULT_INT_DEST_MASK 0x02000000L
+#define DMU_INTERRUPT_DEST__RBBMIF_IHC_TIMEOUT_INTERRUPT_DEST_MASK 0x04000000L
+#define DMU_INTERRUPT_DEST__DMCU_IHC_DMCU_INTERNAL_INTERRUPT_DEST_MASK 0x08000000L
+#define DMU_INTERRUPT_DEST__DMCU_IHC_SCP_INTERRUPT_DEST_MASK 0x10000000L
+//DCPG_INTERRUPT_DEST
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN0_POWER_UP_INTERRUPT_DEST__SHIFT 0x0
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN1_POWER_UP_INTERRUPT_DEST__SHIFT 0x1
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN2_POWER_UP_INTERRUPT_DEST__SHIFT 0x2
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN3_POWER_UP_INTERRUPT_DEST__SHIFT 0x3
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN4_POWER_UP_INTERRUPT_DEST__SHIFT 0x4
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN5_POWER_UP_INTERRUPT_DEST__SHIFT 0x5
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN6_POWER_UP_INTERRUPT_DEST__SHIFT 0x6
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN7_POWER_UP_INTERRUPT_DEST__SHIFT 0x7
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN8_POWER_UP_INTERRUPT_DEST__SHIFT 0x8
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN9_POWER_UP_INTERRUPT_DEST__SHIFT 0x9
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN10_POWER_UP_INTERRUPT_DEST__SHIFT 0xa
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN11_POWER_UP_INTERRUPT_DEST__SHIFT 0xb
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN12_POWER_UP_INTERRUPT_DEST__SHIFT 0xc
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN13_POWER_UP_INTERRUPT_DEST__SHIFT 0xd
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN14_POWER_UP_INTERRUPT_DEST__SHIFT 0xe
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN15_POWER_UP_INTERRUPT_DEST__SHIFT 0xf
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN0_POWER_DOWN_INTERRUPT_DEST__SHIFT 0x10
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN1_POWER_DOWN_INTERRUPT_DEST__SHIFT 0x11
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN2_POWER_DOWN_INTERRUPT_DEST__SHIFT 0x12
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN3_POWER_DOWN_INTERRUPT_DEST__SHIFT 0x13
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN4_POWER_DOWN_INTERRUPT_DEST__SHIFT 0x14
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN5_POWER_DOWN_INTERRUPT_DEST__SHIFT 0x15
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN6_POWER_DOWN_INTERRUPT_DEST__SHIFT 0x16
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN7_POWER_DOWN_INTERRUPT_DEST__SHIFT 0x17
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN8_POWER_DOWN_INTERRUPT_DEST__SHIFT 0x18
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN9_POWER_DOWN_INTERRUPT_DEST__SHIFT 0x19
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN10_POWER_DOWN_INTERRUPT_DEST__SHIFT 0x1a
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN11_POWER_DOWN_INTERRUPT_DEST__SHIFT 0x1b
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN12_POWER_DOWN_INTERRUPT_DEST__SHIFT 0x1c
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN13_POWER_DOWN_INTERRUPT_DEST__SHIFT 0x1d
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN14_POWER_DOWN_INTERRUPT_DEST__SHIFT 0x1e
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN15_POWER_DOWN_INTERRUPT_DEST__SHIFT 0x1f
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN0_POWER_UP_INTERRUPT_DEST_MASK 0x00000001L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN1_POWER_UP_INTERRUPT_DEST_MASK 0x00000002L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN2_POWER_UP_INTERRUPT_DEST_MASK 0x00000004L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN3_POWER_UP_INTERRUPT_DEST_MASK 0x00000008L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN4_POWER_UP_INTERRUPT_DEST_MASK 0x00000010L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN5_POWER_UP_INTERRUPT_DEST_MASK 0x00000020L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN6_POWER_UP_INTERRUPT_DEST_MASK 0x00000040L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN7_POWER_UP_INTERRUPT_DEST_MASK 0x00000080L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN8_POWER_UP_INTERRUPT_DEST_MASK 0x00000100L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN9_POWER_UP_INTERRUPT_DEST_MASK 0x00000200L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN10_POWER_UP_INTERRUPT_DEST_MASK 0x00000400L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN11_POWER_UP_INTERRUPT_DEST_MASK 0x00000800L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN12_POWER_UP_INTERRUPT_DEST_MASK 0x00001000L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN13_POWER_UP_INTERRUPT_DEST_MASK 0x00002000L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN14_POWER_UP_INTERRUPT_DEST_MASK 0x00004000L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN15_POWER_UP_INTERRUPT_DEST_MASK 0x00008000L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN0_POWER_DOWN_INTERRUPT_DEST_MASK 0x00010000L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN1_POWER_DOWN_INTERRUPT_DEST_MASK 0x00020000L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN2_POWER_DOWN_INTERRUPT_DEST_MASK 0x00040000L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN3_POWER_DOWN_INTERRUPT_DEST_MASK 0x00080000L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN4_POWER_DOWN_INTERRUPT_DEST_MASK 0x00100000L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN5_POWER_DOWN_INTERRUPT_DEST_MASK 0x00200000L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN6_POWER_DOWN_INTERRUPT_DEST_MASK 0x00400000L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN7_POWER_DOWN_INTERRUPT_DEST_MASK 0x00800000L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN8_POWER_DOWN_INTERRUPT_DEST_MASK 0x01000000L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN9_POWER_DOWN_INTERRUPT_DEST_MASK 0x02000000L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN10_POWER_DOWN_INTERRUPT_DEST_MASK 0x04000000L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN11_POWER_DOWN_INTERRUPT_DEST_MASK 0x08000000L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN12_POWER_DOWN_INTERRUPT_DEST_MASK 0x10000000L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN13_POWER_DOWN_INTERRUPT_DEST_MASK 0x20000000L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN14_POWER_DOWN_INTERRUPT_DEST_MASK 0x40000000L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN15_POWER_DOWN_INTERRUPT_DEST_MASK 0x80000000L
+//DCPG_INTERRUPT_DEST2
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN16_POWER_UP_INTERRUPT_DEST__SHIFT 0x0
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN17_POWER_UP_INTERRUPT_DEST__SHIFT 0x1
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN18_POWER_UP_INTERRUPT_DEST__SHIFT 0x2
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN19_POWER_UP_INTERRUPT_DEST__SHIFT 0x3
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN20_POWER_UP_INTERRUPT_DEST__SHIFT 0x4
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN21_POWER_UP_INTERRUPT_DEST__SHIFT 0x5
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN16_POWER_DOWN_INTERRUPT_DEST__SHIFT 0x6
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN17_POWER_DOWN_INTERRUPT_DEST__SHIFT 0x7
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN18_POWER_DOWN_INTERRUPT_DEST__SHIFT 0x8
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN19_POWER_DOWN_INTERRUPT_DEST__SHIFT 0x9
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN20_POWER_DOWN_INTERRUPT_DEST__SHIFT 0xa
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN21_POWER_DOWN_INTERRUPT_DEST__SHIFT 0xb
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN16_POWER_UP_INTERRUPT_DEST_MASK 0x00000001L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN17_POWER_UP_INTERRUPT_DEST_MASK 0x00000002L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN18_POWER_UP_INTERRUPT_DEST_MASK 0x00000004L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN19_POWER_UP_INTERRUPT_DEST_MASK 0x00000008L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN20_POWER_UP_INTERRUPT_DEST_MASK 0x00000010L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN21_POWER_UP_INTERRUPT_DEST_MASK 0x00000020L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN16_POWER_DOWN_INTERRUPT_DEST_MASK 0x00000040L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN17_POWER_DOWN_INTERRUPT_DEST_MASK 0x00000080L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN18_POWER_DOWN_INTERRUPT_DEST_MASK 0x00000100L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN19_POWER_DOWN_INTERRUPT_DEST_MASK 0x00000200L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN20_POWER_DOWN_INTERRUPT_DEST_MASK 0x00000400L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN21_POWER_DOWN_INTERRUPT_DEST_MASK 0x00000800L
+//MMHUBBUB_INTERRUPT_DEST
+#define MMHUBBUB_INTERRUPT_DEST__VGA_IHC_VGA_CRT_INTERRUPT_DEST__SHIFT 0x0
+#define MMHUBBUB_INTERRUPT_DEST__BUFMGR_CWB0_IHIF_INTERRUPT_DEST__SHIFT 0x1
+#define MMHUBBUB_INTERRUPT_DEST__BUFMGR_CWB1_IHIF_INTERRUPT_DEST__SHIFT 0x2
+#define MMHUBBUB_INTERRUPT_DEST__BUFMGR_DWB0_IHIF_INTERRUPT_DEST__SHIFT 0x3
+#define MMHUBBUB_INTERRUPT_DEST__BUFMGR_DWB1_IHIF_INTERRUPT_DEST__SHIFT 0x4
+#define MMHUBBUB_INTERRUPT_DEST__BUFMGR_DWB2_IHIF_INTERRUPT_DEST__SHIFT 0x5
+#define MMHUBBUB_INTERRUPT_DEST__MMHUBBUB_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0xc
+#define MMHUBBUB_INTERRUPT_DEST__MMHUBBUB_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0xd
+#define MMHUBBUB_INTERRUPT_DEST__VGA_IHC_VGA_CRT_INTERRUPT_DEST_MASK 0x00000001L
+#define MMHUBBUB_INTERRUPT_DEST__BUFMGR_CWB0_IHIF_INTERRUPT_DEST_MASK 0x00000002L
+#define MMHUBBUB_INTERRUPT_DEST__BUFMGR_CWB1_IHIF_INTERRUPT_DEST_MASK 0x00000004L
+#define MMHUBBUB_INTERRUPT_DEST__BUFMGR_DWB0_IHIF_INTERRUPT_DEST_MASK 0x00000008L
+#define MMHUBBUB_INTERRUPT_DEST__BUFMGR_DWB1_IHIF_INTERRUPT_DEST_MASK 0x00000010L
+#define MMHUBBUB_INTERRUPT_DEST__BUFMGR_DWB2_IHIF_INTERRUPT_DEST_MASK 0x00000020L
+#define MMHUBBUB_INTERRUPT_DEST__MMHUBBUB_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00001000L
+#define MMHUBBUB_INTERRUPT_DEST__MMHUBBUB_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00002000L
+//WB_INTERRUPT_DEST
+#define WB_INTERRUPT_DEST__WBSCL0_IHIF_HOST_CONFLICT_INTERRUPT_DEST__SHIFT 0x0
+#define WB_INTERRUPT_DEST__WBSCL0_IHIF_DATA_OVERFLOW_INTERRUPT_DEST__SHIFT 0x1
+#define WB_INTERRUPT_DEST__WBSCL1_IHIF_HOST_CONFLICT_INTERRUPT_DEST__SHIFT 0x8
+#define WB_INTERRUPT_DEST__WBSCL1_IHIF_DATA_OVERFLOW_INTERRUPT_DEST__SHIFT 0x9
+#define WB_INTERRUPT_DEST__WBSCL2_IHIF_HOST_CONFLICT_INTERRUPT_DEST__SHIFT 0xa
+#define WB_INTERRUPT_DEST__WBSCL2_IHIF_DATA_OVERFLOW_INTERRUPT_DEST__SHIFT 0xb
+#define WB_INTERRUPT_DEST__WB0_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0xc
+#define WB_INTERRUPT_DEST__WB0_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0xd
+#define WB_INTERRUPT_DEST__WB1_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0xe
+#define WB_INTERRUPT_DEST__WB1_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0xf
+#define WB_INTERRUPT_DEST__WB2_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x10
+#define WB_INTERRUPT_DEST__WB2_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x11
+#define WB_INTERRUPT_DEST__WBSCL0_IHIF_HOST_CONFLICT_INTERRUPT_DEST_MASK 0x00000001L
+#define WB_INTERRUPT_DEST__WBSCL0_IHIF_DATA_OVERFLOW_INTERRUPT_DEST_MASK 0x00000002L
+#define WB_INTERRUPT_DEST__WBSCL1_IHIF_HOST_CONFLICT_INTERRUPT_DEST_MASK 0x00000100L
+#define WB_INTERRUPT_DEST__WBSCL1_IHIF_DATA_OVERFLOW_INTERRUPT_DEST_MASK 0x00000200L
+#define WB_INTERRUPT_DEST__WBSCL2_IHIF_HOST_CONFLICT_INTERRUPT_DEST_MASK 0x00000400L
+#define WB_INTERRUPT_DEST__WBSCL2_IHIF_DATA_OVERFLOW_INTERRUPT_DEST_MASK 0x00000800L
+#define WB_INTERRUPT_DEST__WB0_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00001000L
+#define WB_INTERRUPT_DEST__WB0_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00002000L
+#define WB_INTERRUPT_DEST__WB1_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00004000L
+#define WB_INTERRUPT_DEST__WB1_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00008000L
+#define WB_INTERRUPT_DEST__WB2_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00010000L
+#define WB_INTERRUPT_DEST__WB2_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00020000L
+//DCHUB_INTERRUPT_DEST
+#define DCHUB_INTERRUPT_DEST__HUBP0_IHC_VBLANK_INTERRUPT_DEST__SHIFT 0x0
+#define DCHUB_INTERRUPT_DEST__HUBP0_IHC_VLINE_INTERRUPT_DEST__SHIFT 0x1
+#define DCHUB_INTERRUPT_DEST__HUBP0_IHC_VLINE2_INTERRUPT_DEST__SHIFT 0x2
+#define DCHUB_INTERRUPT_DEST__HUBP0_IHC_TIMEOUT_INTERRUPT_DEST__SHIFT 0x3
+#define DCHUB_INTERRUPT_DEST__HUBP1_IHC_VBLANK_INTERRUPT_DEST__SHIFT 0x4
+#define DCHUB_INTERRUPT_DEST__HUBP1_IHC_VLINE_INTERRUPT_DEST__SHIFT 0x5
+#define DCHUB_INTERRUPT_DEST__HUBP1_IHC_VLINE2_INTERRUPT_DEST__SHIFT 0x6
+#define DCHUB_INTERRUPT_DEST__HUBP1_IHC_TIMEOUT_INTERRUPT_DEST__SHIFT 0x7
+#define DCHUB_INTERRUPT_DEST__HUBP2_IHC_VBLANK_INTERRUPT_DEST__SHIFT 0x8
+#define DCHUB_INTERRUPT_DEST__HUBP2_IHC_VLINE_INTERRUPT_DEST__SHIFT 0x9
+#define DCHUB_INTERRUPT_DEST__HUBP2_IHC_VLINE2_INTERRUPT_DEST__SHIFT 0xa
+#define DCHUB_INTERRUPT_DEST__HUBP2_IHC_TIMEOUT_INTERRUPT_DEST__SHIFT 0xb
+#define DCHUB_INTERRUPT_DEST__HUBP3_IHC_VBLANK_INTERRUPT_DEST__SHIFT 0xc
+#define DCHUB_INTERRUPT_DEST__HUBP3_IHC_VLINE_INTERRUPT_DEST__SHIFT 0xd
+#define DCHUB_INTERRUPT_DEST__HUBP3_IHC_VLINE2_INTERRUPT_DEST__SHIFT 0xe
+#define DCHUB_INTERRUPT_DEST__HUBP3_IHC_TIMEOUT_INTERRUPT_DEST__SHIFT 0xf
+#define DCHUB_INTERRUPT_DEST__HUBP4_IHC_VBLANK_INTERRUPT_DEST__SHIFT 0x10
+#define DCHUB_INTERRUPT_DEST__HUBP4_IHC_VLINE_INTERRUPT_DEST__SHIFT 0x11
+#define DCHUB_INTERRUPT_DEST__HUBP4_IHC_VLINE2_INTERRUPT_DEST__SHIFT 0x12
+#define DCHUB_INTERRUPT_DEST__HUBP4_IHC_TIMEOUT_INTERRUPT_DEST__SHIFT 0x13
+#define DCHUB_INTERRUPT_DEST__HUBP5_IHC_VBLANK_INTERRUPT_DEST__SHIFT 0x14
+#define DCHUB_INTERRUPT_DEST__HUBP5_IHC_VLINE_INTERRUPT_DEST__SHIFT 0x15
+#define DCHUB_INTERRUPT_DEST__HUBP5_IHC_VLINE2_INTERRUPT_DEST__SHIFT 0x16
+#define DCHUB_INTERRUPT_DEST__HUBP5_IHC_TIMEOUT_INTERRUPT_DEST__SHIFT 0x17
+#define DCHUB_INTERRUPT_DEST__HUBP6_IHC_VBLANK_INTERRUPT_DEST__SHIFT 0x18
+#define DCHUB_INTERRUPT_DEST__HUBP6_IHC_VLINE_INTERRUPT_DEST__SHIFT 0x19
+#define DCHUB_INTERRUPT_DEST__HUBP6_IHC_VLINE2_INTERRUPT_DEST__SHIFT 0x1a
+#define DCHUB_INTERRUPT_DEST__HUBP6_IHC_TIMEOUT_INTERRUPT_DEST__SHIFT 0x1b
+#define DCHUB_INTERRUPT_DEST__HUBP7_IHC_VBLANK_INTERRUPT_DEST__SHIFT 0x1c
+#define DCHUB_INTERRUPT_DEST__HUBP7_IHC_VLINE_INTERRUPT_DEST__SHIFT 0x1d
+#define DCHUB_INTERRUPT_DEST__HUBP7_IHC_VLINE2_INTERRUPT_DEST__SHIFT 0x1e
+#define DCHUB_INTERRUPT_DEST__HUBP7_IHC_TIMEOUT_INTERRUPT_DEST__SHIFT 0x1f
+#define DCHUB_INTERRUPT_DEST__HUBP0_IHC_VBLANK_INTERRUPT_DEST_MASK 0x00000001L
+#define DCHUB_INTERRUPT_DEST__HUBP0_IHC_VLINE_INTERRUPT_DEST_MASK 0x00000002L
+#define DCHUB_INTERRUPT_DEST__HUBP0_IHC_VLINE2_INTERRUPT_DEST_MASK 0x00000004L
+#define DCHUB_INTERRUPT_DEST__HUBP0_IHC_TIMEOUT_INTERRUPT_DEST_MASK 0x00000008L
+#define DCHUB_INTERRUPT_DEST__HUBP1_IHC_VBLANK_INTERRUPT_DEST_MASK 0x00000010L
+#define DCHUB_INTERRUPT_DEST__HUBP1_IHC_VLINE_INTERRUPT_DEST_MASK 0x00000020L
+#define DCHUB_INTERRUPT_DEST__HUBP1_IHC_VLINE2_INTERRUPT_DEST_MASK 0x00000040L
+#define DCHUB_INTERRUPT_DEST__HUBP1_IHC_TIMEOUT_INTERRUPT_DEST_MASK 0x00000080L
+#define DCHUB_INTERRUPT_DEST__HUBP2_IHC_VBLANK_INTERRUPT_DEST_MASK 0x00000100L
+#define DCHUB_INTERRUPT_DEST__HUBP2_IHC_VLINE_INTERRUPT_DEST_MASK 0x00000200L
+#define DCHUB_INTERRUPT_DEST__HUBP2_IHC_VLINE2_INTERRUPT_DEST_MASK 0x00000400L
+#define DCHUB_INTERRUPT_DEST__HUBP2_IHC_TIMEOUT_INTERRUPT_DEST_MASK 0x00000800L
+#define DCHUB_INTERRUPT_DEST__HUBP3_IHC_VBLANK_INTERRUPT_DEST_MASK 0x00001000L
+#define DCHUB_INTERRUPT_DEST__HUBP3_IHC_VLINE_INTERRUPT_DEST_MASK 0x00002000L
+#define DCHUB_INTERRUPT_DEST__HUBP3_IHC_VLINE2_INTERRUPT_DEST_MASK 0x00004000L
+#define DCHUB_INTERRUPT_DEST__HUBP3_IHC_TIMEOUT_INTERRUPT_DEST_MASK 0x00008000L
+#define DCHUB_INTERRUPT_DEST__HUBP4_IHC_VBLANK_INTERRUPT_DEST_MASK 0x00010000L
+#define DCHUB_INTERRUPT_DEST__HUBP4_IHC_VLINE_INTERRUPT_DEST_MASK 0x00020000L
+#define DCHUB_INTERRUPT_DEST__HUBP4_IHC_VLINE2_INTERRUPT_DEST_MASK 0x00040000L
+#define DCHUB_INTERRUPT_DEST__HUBP4_IHC_TIMEOUT_INTERRUPT_DEST_MASK 0x00080000L
+#define DCHUB_INTERRUPT_DEST__HUBP5_IHC_VBLANK_INTERRUPT_DEST_MASK 0x00100000L
+#define DCHUB_INTERRUPT_DEST__HUBP5_IHC_VLINE_INTERRUPT_DEST_MASK 0x00200000L
+#define DCHUB_INTERRUPT_DEST__HUBP5_IHC_VLINE2_INTERRUPT_DEST_MASK 0x00400000L
+#define DCHUB_INTERRUPT_DEST__HUBP5_IHC_TIMEOUT_INTERRUPT_DEST_MASK 0x00800000L
+#define DCHUB_INTERRUPT_DEST__HUBP6_IHC_VBLANK_INTERRUPT_DEST_MASK 0x01000000L
+#define DCHUB_INTERRUPT_DEST__HUBP6_IHC_VLINE_INTERRUPT_DEST_MASK 0x02000000L
+#define DCHUB_INTERRUPT_DEST__HUBP6_IHC_VLINE2_INTERRUPT_DEST_MASK 0x04000000L
+#define DCHUB_INTERRUPT_DEST__HUBP6_IHC_TIMEOUT_INTERRUPT_DEST_MASK 0x08000000L
+#define DCHUB_INTERRUPT_DEST__HUBP7_IHC_VBLANK_INTERRUPT_DEST_MASK 0x10000000L
+#define DCHUB_INTERRUPT_DEST__HUBP7_IHC_VLINE_INTERRUPT_DEST_MASK 0x20000000L
+#define DCHUB_INTERRUPT_DEST__HUBP7_IHC_VLINE2_INTERRUPT_DEST_MASK 0x40000000L
+#define DCHUB_INTERRUPT_DEST__HUBP7_IHC_TIMEOUT_INTERRUPT_DEST_MASK 0x80000000L
+//DCHUB_PERFCOUNTER_INTERRUPT_DEST
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBBUB_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0xc
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBBUB_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0xd
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP0_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0xe
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP0_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0xf
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP1_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x10
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP1_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x11
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP2_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x12
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP2_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x13
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP3_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x14
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP3_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x15
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP4_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x16
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP4_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x17
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP5_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x18
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP5_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x19
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP6_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x1a
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP6_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x1b
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP7_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x1c
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP7_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x1d
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBBUB_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00001000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBBUB_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00002000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP0_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00004000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP0_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00008000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP1_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00010000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP1_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00020000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP2_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00040000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP2_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00080000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP3_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00100000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP3_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00200000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP4_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00400000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP4_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00800000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP5_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x01000000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP5_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x02000000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP6_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x04000000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP6_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x08000000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP7_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x10000000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP7_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x20000000L
+//DCHUB_INTERRUPT_DEST2
+#define DCHUB_INTERRUPT_DEST2__HUBP0_IHC_FLIP_INTERRUPT_DEST__SHIFT 0x0
+#define DCHUB_INTERRUPT_DEST2__HUBP0_IHC_FLIP_AWAY_INTERRUPT_DEST__SHIFT 0x1
+#define DCHUB_INTERRUPT_DEST2__HUBP1_IHC_FLIP_INTERRUPT_DEST__SHIFT 0x2
+#define DCHUB_INTERRUPT_DEST2__HUBP1_IHC_FLIP_AWAY_INTERRUPT_DEST__SHIFT 0x3
+#define DCHUB_INTERRUPT_DEST2__HUBP2_IHC_FLIP_INTERRUPT_DEST__SHIFT 0x4
+#define DCHUB_INTERRUPT_DEST2__HUBP2_IHC_FLIP_AWAY_INTERRUPT_DEST__SHIFT 0x5
+#define DCHUB_INTERRUPT_DEST2__HUBP3_IHC_FLIP_INTERRUPT_DEST__SHIFT 0x6
+#define DCHUB_INTERRUPT_DEST2__HUBP3_IHC_FLIP_AWAY_INTERRUPT_DEST__SHIFT 0x7
+#define DCHUB_INTERRUPT_DEST2__HUBP4_IHC_FLIP_INTERRUPT_DEST__SHIFT 0x8
+#define DCHUB_INTERRUPT_DEST2__HUBP4_IHC_FLIP_AWAY_INTERRUPT_DEST__SHIFT 0x9
+#define DCHUB_INTERRUPT_DEST2__HUBP5_IHC_FLIP_INTERRUPT_DEST__SHIFT 0xa
+#define DCHUB_INTERRUPT_DEST2__HUBP5_IHC_FLIP_AWAY_INTERRUPT_DEST__SHIFT 0xb
+#define DCHUB_INTERRUPT_DEST2__HUBP6_IHC_FLIP_INTERRUPT_DEST__SHIFT 0xc
+#define DCHUB_INTERRUPT_DEST2__HUBP6_IHC_FLIP_AWAY_INTERRUPT_DEST__SHIFT 0xd
+#define DCHUB_INTERRUPT_DEST2__HUBP7_IHC_FLIP_INTERRUPT_DEST__SHIFT 0xe
+#define DCHUB_INTERRUPT_DEST2__HUBP7_IHC_FLIP_AWAY_INTERRUPT_DEST__SHIFT 0xf
+#define DCHUB_INTERRUPT_DEST2__HUBBUB_IHC_VM_FAULT_INTERRUPT_DEST__SHIFT 0x18
+#define DCHUB_INTERRUPT_DEST2__HUBBUB_IHC_TIMEOUT_INTERRUPT_DEST__SHIFT 0x19
+#define DCHUB_INTERRUPT_DEST2__HUBP0_IHC_FLIP_INTERRUPT_DEST_MASK 0x00000001L
+#define DCHUB_INTERRUPT_DEST2__HUBP0_IHC_FLIP_AWAY_INTERRUPT_DEST_MASK 0x00000002L
+#define DCHUB_INTERRUPT_DEST2__HUBP1_IHC_FLIP_INTERRUPT_DEST_MASK 0x00000004L
+#define DCHUB_INTERRUPT_DEST2__HUBP1_IHC_FLIP_AWAY_INTERRUPT_DEST_MASK 0x00000008L
+#define DCHUB_INTERRUPT_DEST2__HUBP2_IHC_FLIP_INTERRUPT_DEST_MASK 0x00000010L
+#define DCHUB_INTERRUPT_DEST2__HUBP2_IHC_FLIP_AWAY_INTERRUPT_DEST_MASK 0x00000020L
+#define DCHUB_INTERRUPT_DEST2__HUBP3_IHC_FLIP_INTERRUPT_DEST_MASK 0x00000040L
+#define DCHUB_INTERRUPT_DEST2__HUBP3_IHC_FLIP_AWAY_INTERRUPT_DEST_MASK 0x00000080L
+#define DCHUB_INTERRUPT_DEST2__HUBP4_IHC_FLIP_INTERRUPT_DEST_MASK 0x00000100L
+#define DCHUB_INTERRUPT_DEST2__HUBP4_IHC_FLIP_AWAY_INTERRUPT_DEST_MASK 0x00000200L
+#define DCHUB_INTERRUPT_DEST2__HUBP5_IHC_FLIP_INTERRUPT_DEST_MASK 0x00000400L
+#define DCHUB_INTERRUPT_DEST2__HUBP5_IHC_FLIP_AWAY_INTERRUPT_DEST_MASK 0x00000800L
+#define DCHUB_INTERRUPT_DEST2__HUBP6_IHC_FLIP_INTERRUPT_DEST_MASK 0x00001000L
+#define DCHUB_INTERRUPT_DEST2__HUBP6_IHC_FLIP_AWAY_INTERRUPT_DEST_MASK 0x00002000L
+#define DCHUB_INTERRUPT_DEST2__HUBP7_IHC_FLIP_INTERRUPT_DEST_MASK 0x00004000L
+#define DCHUB_INTERRUPT_DEST2__HUBP7_IHC_FLIP_AWAY_INTERRUPT_DEST_MASK 0x00008000L
+#define DCHUB_INTERRUPT_DEST2__HUBBUB_IHC_VM_FAULT_INTERRUPT_DEST_MASK 0x01000000L
+#define DCHUB_INTERRUPT_DEST2__HUBBUB_IHC_TIMEOUT_INTERRUPT_DEST_MASK 0x02000000L
+//DPP_PERFCOUNTER_INTERRUPT_DEST
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP0_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0xc
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP0_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0xd
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP1_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0xe
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP1_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0xf
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP2_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x10
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP2_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x11
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP3_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x12
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP3_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x13
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP4_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x14
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP4_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x15
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP5_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x16
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP5_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x17
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP6_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x18
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP6_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x19
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP7_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x1a
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP7_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x1b
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP0_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00001000L
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP0_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00002000L
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP1_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00004000L
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP1_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00008000L
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP2_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00010000L
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP2_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00020000L
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP3_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00040000L
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP3_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00080000L
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP4_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00100000L
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP4_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00200000L
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP5_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00400000L
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP5_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00800000L
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP6_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x01000000L
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP6_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x02000000L
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP7_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x04000000L
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP7_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x08000000L
+//MPC_INTERRUPT_DEST
+#define MPC_INTERRUPT_DEST__MPCC0_STALL_INTERRUPT_DEST__SHIFT 0x0
+#define MPC_INTERRUPT_DEST__MPCC1_STALL_INTERRUPT_DEST__SHIFT 0x1
+#define MPC_INTERRUPT_DEST__MPCC2_STALL_INTERRUPT_DEST__SHIFT 0x2
+#define MPC_INTERRUPT_DEST__MPCC3_STALL_INTERRUPT_DEST__SHIFT 0x3
+#define MPC_INTERRUPT_DEST__MPCC4_STALL_INTERRUPT_DEST__SHIFT 0x4
+#define MPC_INTERRUPT_DEST__MPCC5_STALL_INTERRUPT_DEST__SHIFT 0x5
+#define MPC_INTERRUPT_DEST__MPCC6_STALL_INTERRUPT_DEST__SHIFT 0x6
+#define MPC_INTERRUPT_DEST__MPCC7_STALL_INTERRUPT_DEST__SHIFT 0x7
+#define MPC_INTERRUPT_DEST__MPC_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0xc
+#define MPC_INTERRUPT_DEST__MPC_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0xd
+#define MPC_INTERRUPT_DEST__MPCC0_STALL_INTERRUPT_DEST_MASK 0x00000001L
+#define MPC_INTERRUPT_DEST__MPCC1_STALL_INTERRUPT_DEST_MASK 0x00000002L
+#define MPC_INTERRUPT_DEST__MPCC2_STALL_INTERRUPT_DEST_MASK 0x00000004L
+#define MPC_INTERRUPT_DEST__MPCC3_STALL_INTERRUPT_DEST_MASK 0x00000008L
+#define MPC_INTERRUPT_DEST__MPCC4_STALL_INTERRUPT_DEST_MASK 0x00000010L
+#define MPC_INTERRUPT_DEST__MPCC5_STALL_INTERRUPT_DEST_MASK 0x00000020L
+#define MPC_INTERRUPT_DEST__MPCC6_STALL_INTERRUPT_DEST_MASK 0x00000040L
+#define MPC_INTERRUPT_DEST__MPCC7_STALL_INTERRUPT_DEST_MASK 0x00000080L
+#define MPC_INTERRUPT_DEST__MPC_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00001000L
+#define MPC_INTERRUPT_DEST__MPC_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00002000L
+//OPP_INTERRUPT_DEST
+#define OPP_INTERRUPT_DEST__OPP_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0xc
+#define OPP_INTERRUPT_DEST__OPP_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0xd
+#define OPP_INTERRUPT_DEST__OPP_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00001000L
+#define OPP_INTERRUPT_DEST__OPP_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00002000L
+//OPTC_INTERRUPT_DEST
+#define OPTC_INTERRUPT_DEST__OPTC_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0xc
+#define OPTC_INTERRUPT_DEST__OPTC_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0xd
+#define OPTC_INTERRUPT_DEST__OPTC0_IHC_DATA_UNDERFLOW_INTERRUPT_DEST__SHIFT 0x18
+#define OPTC_INTERRUPT_DEST__OPTC1_IHC_DATA_UNDERFLOW_INTERRUPT_DEST__SHIFT 0x19
+#define OPTC_INTERRUPT_DEST__OPTC2_IHC_DATA_UNDERFLOW_INTERRUPT_DEST__SHIFT 0x1a
+#define OPTC_INTERRUPT_DEST__OPTC3_IHC_DATA_UNDERFLOW_INTERRUPT_DEST__SHIFT 0x1b
+#define OPTC_INTERRUPT_DEST__OPTC4_IHC_DATA_UNDERFLOW_INTERRUPT_DEST__SHIFT 0x1c
+#define OPTC_INTERRUPT_DEST__OPTC5_IHC_DATA_UNDERFLOW_INTERRUPT_DEST__SHIFT 0x1d
+#define OPTC_INTERRUPT_DEST__OPTC_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00001000L
+#define OPTC_INTERRUPT_DEST__OPTC_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00002000L
+#define OPTC_INTERRUPT_DEST__OPTC0_IHC_DATA_UNDERFLOW_INTERRUPT_DEST_MASK 0x01000000L
+#define OPTC_INTERRUPT_DEST__OPTC1_IHC_DATA_UNDERFLOW_INTERRUPT_DEST_MASK 0x02000000L
+#define OPTC_INTERRUPT_DEST__OPTC2_IHC_DATA_UNDERFLOW_INTERRUPT_DEST_MASK 0x04000000L
+#define OPTC_INTERRUPT_DEST__OPTC3_IHC_DATA_UNDERFLOW_INTERRUPT_DEST_MASK 0x08000000L
+#define OPTC_INTERRUPT_DEST__OPTC4_IHC_DATA_UNDERFLOW_INTERRUPT_DEST_MASK 0x10000000L
+#define OPTC_INTERRUPT_DEST__OPTC5_IHC_DATA_UNDERFLOW_INTERRUPT_DEST_MASK 0x20000000L
+//OTG0_INTERRUPT_DEST
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_CPU_SS_INTERRUPT_DEST__SHIFT 0x0
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_RANGE_TIMING_INTERRUPT_DEST__SHIFT 0x1
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_V_UPDATE_INTERRUPT_DEST__SHIFT 0x2
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_SNAPSHOT_INTERRUPT_DEST__SHIFT 0x3
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_OTG_FORCE_COUNT_NOW_INTERRUPT_DEST__SHIFT 0x4
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_DEST__SHIFT 0x5
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_OTG_TRIGA_INTERRUPT_DEST__SHIFT 0x6
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_OTG_TRIGB_INTERRUPT_DEST__SHIFT 0x7
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_GSL_VSYNC_GAP_INTERRUPT_DEST__SHIFT 0x8
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_OTG_VERTICAL_INTERRUPT0_DEST__SHIFT 0x9
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_OTG_VERTICAL_INTERRUPT1_DEST__SHIFT 0xa
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_OTG_VERTICAL_INTERRUPT2_DEST__SHIFT 0xb
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_OTG_EXT_TIMING_SYNC_LOSS_INTERRUPT_DEST__SHIFT 0xc
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_OTG_EXT_TIMING_SYNC_INTERRUPT_DEST__SHIFT 0xd
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_OTG_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_DEST__SHIFT 0xe
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INTERRUPT_DEST__SHIFT 0xf
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_VSTARTUP_INTERRUPT_DEST__SHIFT 0x10
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_VREADY_INTERRUPT_DEST__SHIFT 0x11
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_VSYNC_NOM_INTERRUPT_DEST__SHIFT 0x12
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT_DEST__SHIFT 0x13
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_CPU_SS_INTERRUPT_DEST_MASK 0x00000001L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_RANGE_TIMING_INTERRUPT_DEST_MASK 0x00000002L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_V_UPDATE_INTERRUPT_DEST_MASK 0x00000004L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_SNAPSHOT_INTERRUPT_DEST_MASK 0x00000008L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_OTG_FORCE_COUNT_NOW_INTERRUPT_DEST_MASK 0x00000010L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_DEST_MASK 0x00000020L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_OTG_TRIGA_INTERRUPT_DEST_MASK 0x00000040L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_OTG_TRIGB_INTERRUPT_DEST_MASK 0x00000080L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_GSL_VSYNC_GAP_INTERRUPT_DEST_MASK 0x00000100L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_OTG_VERTICAL_INTERRUPT0_DEST_MASK 0x00000200L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_OTG_VERTICAL_INTERRUPT1_DEST_MASK 0x00000400L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_OTG_VERTICAL_INTERRUPT2_DEST_MASK 0x00000800L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_OTG_EXT_TIMING_SYNC_LOSS_INTERRUPT_DEST_MASK 0x00001000L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_OTG_EXT_TIMING_SYNC_INTERRUPT_DEST_MASK 0x00002000L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_OTG_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_DEST_MASK 0x00004000L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INTERRUPT_DEST_MASK 0x00008000L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_VSTARTUP_INTERRUPT_DEST_MASK 0x00010000L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_VREADY_INTERRUPT_DEST_MASK 0x00020000L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_VSYNC_NOM_INTERRUPT_DEST_MASK 0x00040000L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT_DEST_MASK 0x00080000L
+//OTG1_INTERRUPT_DEST
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_CPU_SS_INTERRUPT_DEST__SHIFT 0x0
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_RANGE_TIMING_INTERRUPT_DEST__SHIFT 0x1
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_V_UPDATE_INTERRUPT_DEST__SHIFT 0x2
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_SNAPSHOT_INTERRUPT_DEST__SHIFT 0x3
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_OTG_FORCE_COUNT_NOW_INTERRUPT_DEST__SHIFT 0x4
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_DEST__SHIFT 0x5
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_OTG_TRIGA_INTERRUPT_DEST__SHIFT 0x6
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_OTG_TRIGB_INTERRUPT_DEST__SHIFT 0x7
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_GSL_VSYNC_GAP_INTERRUPT_DEST__SHIFT 0x8
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_OTG_VERTICAL_INTERRUPT0_DEST__SHIFT 0x9
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_OTG_VERTICAL_INTERRUPT1_DEST__SHIFT 0xa
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_OTG_VERTICAL_INTERRUPT2_DEST__SHIFT 0xb
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_OTG_EXT_TIMING_SYNC_LOSS_INTERRUPT_DEST__SHIFT 0xc
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_OTG_EXT_TIMING_SYNC_INTERRUPT_DEST__SHIFT 0xd
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_OTG_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_DEST__SHIFT 0xe
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INTERRUPT_DEST__SHIFT 0xf
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_VSTARTUP_INTERRUPT_DEST__SHIFT 0x10
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_VREADY_INTERRUPT_DEST__SHIFT 0x11
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_VSYNC_NOM_INTERRUPT_DEST__SHIFT 0x12
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_V_UPDATE_NO_LOCK_INTERRUPT_DEST__SHIFT 0x13
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_CPU_SS_INTERRUPT_DEST_MASK 0x00000001L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_RANGE_TIMING_INTERRUPT_DEST_MASK 0x00000002L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_V_UPDATE_INTERRUPT_DEST_MASK 0x00000004L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_SNAPSHOT_INTERRUPT_DEST_MASK 0x00000008L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_OTG_FORCE_COUNT_NOW_INTERRUPT_DEST_MASK 0x00000010L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_DEST_MASK 0x00000020L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_OTG_TRIGA_INTERRUPT_DEST_MASK 0x00000040L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_OTG_TRIGB_INTERRUPT_DEST_MASK 0x00000080L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_GSL_VSYNC_GAP_INTERRUPT_DEST_MASK 0x00000100L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_OTG_VERTICAL_INTERRUPT0_DEST_MASK 0x00000200L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_OTG_VERTICAL_INTERRUPT1_DEST_MASK 0x00000400L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_OTG_VERTICAL_INTERRUPT2_DEST_MASK 0x00000800L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_OTG_EXT_TIMING_SYNC_LOSS_INTERRUPT_DEST_MASK 0x00001000L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_OTG_EXT_TIMING_SYNC_INTERRUPT_DEST_MASK 0x00002000L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_OTG_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_DEST_MASK 0x00004000L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INTERRUPT_DEST_MASK 0x00008000L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_VSTARTUP_INTERRUPT_DEST_MASK 0x00010000L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_VREADY_INTERRUPT_DEST_MASK 0x00020000L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_VSYNC_NOM_INTERRUPT_DEST_MASK 0x00040000L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_V_UPDATE_NO_LOCK_INTERRUPT_DEST_MASK 0x00080000L
+//OTG2_INTERRUPT_DEST
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_CPU_SS_INTERRUPT_DEST__SHIFT 0x0
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_RANGE_TIMING_INTERRUPT_DEST__SHIFT 0x1
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_V_UPDATE_INTERRUPT_DEST__SHIFT 0x2
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_SNAPSHOT_INTERRUPT_DEST__SHIFT 0x3
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_OTG_FORCE_COUNT_NOW_INTERRUPT_DEST__SHIFT 0x4
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_DEST__SHIFT 0x5
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_OTG_TRIGA_INTERRUPT_DEST__SHIFT 0x6
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_OTG_TRIGB_INTERRUPT_DEST__SHIFT 0x7
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_GSL_VSYNC_GAP_INTERRUPT_DEST__SHIFT 0x8
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_OTG_VERTICAL_INTERRUPT0_DEST__SHIFT 0x9
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_OTG_VERTICAL_INTERRUPT1_DEST__SHIFT 0xa
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_OTG_VERTICAL_INTERRUPT2_DEST__SHIFT 0xb
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_OTG_EXT_TIMING_SYNC_LOSS_INTERRUPT_DEST__SHIFT 0xc
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_OTG_EXT_TIMING_SYNC_INTERRUPT_DEST__SHIFT 0xd
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_OTG_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_DEST__SHIFT 0xe
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INTERRUPT_DEST__SHIFT 0xf
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_VSTARTUP_INTERRUPT_DEST__SHIFT 0x10
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_VREADY_INTERRUPT_DEST__SHIFT 0x11
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_VSYNC_NOM_INTERRUPT_DEST__SHIFT 0x12
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_V_UPDATE_NO_LOCK_INTERRUPT_DEST__SHIFT 0x13
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_CPU_SS_INTERRUPT_DEST_MASK 0x00000001L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_RANGE_TIMING_INTERRUPT_DEST_MASK 0x00000002L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_V_UPDATE_INTERRUPT_DEST_MASK 0x00000004L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_SNAPSHOT_INTERRUPT_DEST_MASK 0x00000008L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_OTG_FORCE_COUNT_NOW_INTERRUPT_DEST_MASK 0x00000010L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_DEST_MASK 0x00000020L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_OTG_TRIGA_INTERRUPT_DEST_MASK 0x00000040L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_OTG_TRIGB_INTERRUPT_DEST_MASK 0x00000080L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_GSL_VSYNC_GAP_INTERRUPT_DEST_MASK 0x00000100L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_OTG_VERTICAL_INTERRUPT0_DEST_MASK 0x00000200L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_OTG_VERTICAL_INTERRUPT1_DEST_MASK 0x00000400L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_OTG_VERTICAL_INTERRUPT2_DEST_MASK 0x00000800L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_OTG_EXT_TIMING_SYNC_LOSS_INTERRUPT_DEST_MASK 0x00001000L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_OTG_EXT_TIMING_SYNC_INTERRUPT_DEST_MASK 0x00002000L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_OTG_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_DEST_MASK 0x00004000L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INTERRUPT_DEST_MASK 0x00008000L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_VSTARTUP_INTERRUPT_DEST_MASK 0x00010000L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_VREADY_INTERRUPT_DEST_MASK 0x00020000L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_VSYNC_NOM_INTERRUPT_DEST_MASK 0x00040000L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_V_UPDATE_NO_LOCK_INTERRUPT_DEST_MASK 0x00080000L
+//OTG3_INTERRUPT_DEST
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_CPU_SS_INTERRUPT_DEST__SHIFT 0x0
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_RANGE_TIMING_INTERRUPT_DEST__SHIFT 0x1
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_V_UPDATE_INTERRUPT_DEST__SHIFT 0x2
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_SNAPSHOT_INTERRUPT_DEST__SHIFT 0x3
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_OTG_FORCE_COUNT_NOW_INTERRUPT_DEST__SHIFT 0x4
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_DEST__SHIFT 0x5
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_OTG_TRIGA_INTERRUPT_DEST__SHIFT 0x6
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_OTG_TRIGB_INTERRUPT_DEST__SHIFT 0x7
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_GSL_VSYNC_GAP_INTERRUPT_DEST__SHIFT 0x8
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_OTG_VERTICAL_INTERRUPT0_DEST__SHIFT 0x9
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_OTG_VERTICAL_INTERRUPT1_DEST__SHIFT 0xa
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_OTG_VERTICAL_INTERRUPT2_DEST__SHIFT 0xb
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_OTG_EXT_TIMING_SYNC_LOSS_INTERRUPT_DEST__SHIFT 0xc
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_OTG_EXT_TIMING_SYNC_INTERRUPT_DEST__SHIFT 0xd
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_OTG_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_DEST__SHIFT 0xe
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INTERRUPT_DEST__SHIFT 0xf
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_VSTARTUP_INTERRUPT_DEST__SHIFT 0x10
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_VREADY_INTERRUPT_DEST__SHIFT 0x11
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_VSYNC_NOM_INTERRUPT_DEST__SHIFT 0x12
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_V_UPDATE_NO_LOCK_INTERRUPT_DEST__SHIFT 0x13
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_CPU_SS_INTERRUPT_DEST_MASK 0x00000001L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_RANGE_TIMING_INTERRUPT_DEST_MASK 0x00000002L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_V_UPDATE_INTERRUPT_DEST_MASK 0x00000004L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_SNAPSHOT_INTERRUPT_DEST_MASK 0x00000008L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_OTG_FORCE_COUNT_NOW_INTERRUPT_DEST_MASK 0x00000010L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_DEST_MASK 0x00000020L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_OTG_TRIGA_INTERRUPT_DEST_MASK 0x00000040L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_OTG_TRIGB_INTERRUPT_DEST_MASK 0x00000080L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_GSL_VSYNC_GAP_INTERRUPT_DEST_MASK 0x00000100L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_OTG_VERTICAL_INTERRUPT0_DEST_MASK 0x00000200L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_OTG_VERTICAL_INTERRUPT1_DEST_MASK 0x00000400L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_OTG_VERTICAL_INTERRUPT2_DEST_MASK 0x00000800L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_OTG_EXT_TIMING_SYNC_LOSS_INTERRUPT_DEST_MASK 0x00001000L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_OTG_EXT_TIMING_SYNC_INTERRUPT_DEST_MASK 0x00002000L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_OTG_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_DEST_MASK 0x00004000L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INTERRUPT_DEST_MASK 0x00008000L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_VSTARTUP_INTERRUPT_DEST_MASK 0x00010000L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_VREADY_INTERRUPT_DEST_MASK 0x00020000L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_VSYNC_NOM_INTERRUPT_DEST_MASK 0x00040000L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_V_UPDATE_NO_LOCK_INTERRUPT_DEST_MASK 0x00080000L
+//OTG4_INTERRUPT_DEST
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_CPU_SS_INTERRUPT_DEST__SHIFT 0x0
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_RANGE_TIMING_INTERRUPT_DEST__SHIFT 0x1
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_V_UPDATE_INTERRUPT_DEST__SHIFT 0x2
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_SNAPSHOT_INTERRUPT_DEST__SHIFT 0x3
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_OTG_FORCE_COUNT_NOW_INTERRUPT_DEST__SHIFT 0x4
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_DEST__SHIFT 0x5
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_OTG_TRIGA_INTERRUPT_DEST__SHIFT 0x6
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_OTG_TRIGB_INTERRUPT_DEST__SHIFT 0x7
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_GSL_VSYNC_GAP_INTERRUPT_DEST__SHIFT 0x8
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_OTG_VERTICAL_INTERRUPT0_DEST__SHIFT 0x9
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_OTG_VERTICAL_INTERRUPT1_DEST__SHIFT 0xa
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_OTG_VERTICAL_INTERRUPT2_DEST__SHIFT 0xb
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_OTG_EXT_TIMING_SYNC_LOSS_INTERRUPT_DEST__SHIFT 0xc
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_OTG_EXT_TIMING_SYNC_INTERRUPT_DEST__SHIFT 0xd
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_OTG_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_DEST__SHIFT 0xe
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INTERRUPT_DEST__SHIFT 0xf
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_VSTARTUP_INTERRUPT_DEST__SHIFT 0x10
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_VREADY_INTERRUPT_DEST__SHIFT 0x11
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_VSYNC_NOM_INTERRUPT_DEST__SHIFT 0x12
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_V_UPDATE_NO_LOCK_INTERRUPT_DEST__SHIFT 0x13
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_CPU_SS_INTERRUPT_DEST_MASK 0x00000001L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_RANGE_TIMING_INTERRUPT_DEST_MASK 0x00000002L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_V_UPDATE_INTERRUPT_DEST_MASK 0x00000004L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_SNAPSHOT_INTERRUPT_DEST_MASK 0x00000008L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_OTG_FORCE_COUNT_NOW_INTERRUPT_DEST_MASK 0x00000010L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_DEST_MASK 0x00000020L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_OTG_TRIGA_INTERRUPT_DEST_MASK 0x00000040L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_OTG_TRIGB_INTERRUPT_DEST_MASK 0x00000080L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_GSL_VSYNC_GAP_INTERRUPT_DEST_MASK 0x00000100L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_OTG_VERTICAL_INTERRUPT0_DEST_MASK 0x00000200L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_OTG_VERTICAL_INTERRUPT1_DEST_MASK 0x00000400L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_OTG_VERTICAL_INTERRUPT2_DEST_MASK 0x00000800L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_OTG_EXT_TIMING_SYNC_LOSS_INTERRUPT_DEST_MASK 0x00001000L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_OTG_EXT_TIMING_SYNC_INTERRUPT_DEST_MASK 0x00002000L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_OTG_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_DEST_MASK 0x00004000L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INTERRUPT_DEST_MASK 0x00008000L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_VSTARTUP_INTERRUPT_DEST_MASK 0x00010000L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_VREADY_INTERRUPT_DEST_MASK 0x00020000L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_VSYNC_NOM_INTERRUPT_DEST_MASK 0x00040000L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_V_UPDATE_NO_LOCK_INTERRUPT_DEST_MASK 0x00080000L
+//OTG5_INTERRUPT_DEST
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_CPU_SS_INTERRUPT_DEST__SHIFT 0x0
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_RANGE_TIMING_INTERRUPT_DEST__SHIFT 0x1
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_V_UPDATE_INTERRUPT_DEST__SHIFT 0x2
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_SNAPSHOT_INTERRUPT_DEST__SHIFT 0x3
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_OTG_FORCE_COUNT_NOW_INTERRUPT_DEST__SHIFT 0x4
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_DEST__SHIFT 0x5
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_OTG_TRIGA_INTERRUPT_DEST__SHIFT 0x6
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_OTG_TRIGB_INTERRUPT_DEST__SHIFT 0x7
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_GSL_VSYNC_GAP_INTERRUPT_DEST__SHIFT 0x8
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_OTG_VERTICAL_INTERRUPT0_DEST__SHIFT 0x9
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_OTG_VERTICAL_INTERRUPT1_DEST__SHIFT 0xa
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_OTG_VERTICAL_INTERRUPT2_DEST__SHIFT 0xb
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_OTG_EXT_TIMING_SYNC_LOSS_INTERRUPT_DEST__SHIFT 0xc
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_OTG_EXT_TIMING_SYNC_INTERRUPT_DEST__SHIFT 0xd
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_OTG_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_DEST__SHIFT 0xe
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INTERRUPT_DEST__SHIFT 0xf
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_VSTARTUP_INTERRUPT_DEST__SHIFT 0x10
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_VREADY_INTERRUPT_DEST__SHIFT 0x11
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_VSYNC_NOM_INTERRUPT_DEST__SHIFT 0x12
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_V_UPDATE_NO_LOCK_INTERRUPT_DEST__SHIFT 0x13
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_CPU_SS_INTERRUPT_DEST_MASK 0x00000001L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_RANGE_TIMING_INTERRUPT_DEST_MASK 0x00000002L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_V_UPDATE_INTERRUPT_DEST_MASK 0x00000004L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_SNAPSHOT_INTERRUPT_DEST_MASK 0x00000008L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_OTG_FORCE_COUNT_NOW_INTERRUPT_DEST_MASK 0x00000010L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_DEST_MASK 0x00000020L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_OTG_TRIGA_INTERRUPT_DEST_MASK 0x00000040L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_OTG_TRIGB_INTERRUPT_DEST_MASK 0x00000080L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_GSL_VSYNC_GAP_INTERRUPT_DEST_MASK 0x00000100L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_OTG_VERTICAL_INTERRUPT0_DEST_MASK 0x00000200L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_OTG_VERTICAL_INTERRUPT1_DEST_MASK 0x00000400L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_OTG_VERTICAL_INTERRUPT2_DEST_MASK 0x00000800L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_OTG_EXT_TIMING_SYNC_LOSS_INTERRUPT_DEST_MASK 0x00001000L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_OTG_EXT_TIMING_SYNC_INTERRUPT_DEST_MASK 0x00002000L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_OTG_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_DEST_MASK 0x00004000L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INTERRUPT_DEST_MASK 0x00008000L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_VSTARTUP_INTERRUPT_DEST_MASK 0x00010000L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_VREADY_INTERRUPT_DEST_MASK 0x00020000L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_VSYNC_NOM_INTERRUPT_DEST_MASK 0x00040000L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_V_UPDATE_NO_LOCK_INTERRUPT_DEST_MASK 0x00080000L
+//DIG_INTERRUPT_DEST
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGA_VID_STREAM_DISABLE_INTERRUPT_DEST__SHIFT 0x0
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGB_VID_STREAM_DISABLE_INTERRUPT_DEST__SHIFT 0x1
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGC_VID_STREAM_DISABLE_INTERRUPT_DEST__SHIFT 0x2
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGD_VID_STREAM_DISABLE_INTERRUPT_DEST__SHIFT 0x3
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGE_VID_STREAM_DISABLE_INTERRUPT_DEST__SHIFT 0x4
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGF_VID_STREAM_DISABLE_INTERRUPT_DEST__SHIFT 0x5
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGG_VID_STREAM_DISABLE_INTERRUPT_DEST__SHIFT 0x6
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGH_VID_STREAM_DISABLE_INTERRUPT_DEST__SHIFT 0x7
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGA_FAST_TRAINING_COMPLETE_INTERRUPT_DEST__SHIFT 0x8
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGB_FAST_TRAINING_COMPLETE_INTERRUPT_DEST__SHIFT 0x9
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGC_FAST_TRAINING_COMPLETE_INTERRUPT_DEST__SHIFT 0xa
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGD_FAST_TRAINING_COMPLETE_INTERRUPT_DEST__SHIFT 0xb
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGE_FAST_TRAINING_COMPLETE_INTERRUPT_DEST__SHIFT 0xc
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGF_FAST_TRAINING_COMPLETE_INTERRUPT_DEST__SHIFT 0xd
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGG_FAST_TRAINING_COMPLETE_INTERRUPT_DEST__SHIFT 0xe
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGH_FAST_TRAINING_COMPLETE_INTERRUPT_DEST__SHIFT 0xf
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGA_VID_STREAM_DISABLE_INTERRUPT_DEST_MASK 0x00000001L
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGB_VID_STREAM_DISABLE_INTERRUPT_DEST_MASK 0x00000002L
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGC_VID_STREAM_DISABLE_INTERRUPT_DEST_MASK 0x00000004L
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGD_VID_STREAM_DISABLE_INTERRUPT_DEST_MASK 0x00000008L
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGE_VID_STREAM_DISABLE_INTERRUPT_DEST_MASK 0x00000010L
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGF_VID_STREAM_DISABLE_INTERRUPT_DEST_MASK 0x00000020L
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGG_VID_STREAM_DISABLE_INTERRUPT_DEST_MASK 0x00000040L
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGH_VID_STREAM_DISABLE_INTERRUPT_DEST_MASK 0x00000080L
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGA_FAST_TRAINING_COMPLETE_INTERRUPT_DEST_MASK 0x00000100L
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGB_FAST_TRAINING_COMPLETE_INTERRUPT_DEST_MASK 0x00000200L
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGC_FAST_TRAINING_COMPLETE_INTERRUPT_DEST_MASK 0x00000400L
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGD_FAST_TRAINING_COMPLETE_INTERRUPT_DEST_MASK 0x00000800L
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGE_FAST_TRAINING_COMPLETE_INTERRUPT_DEST_MASK 0x00001000L
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGF_FAST_TRAINING_COMPLETE_INTERRUPT_DEST_MASK 0x00002000L
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGG_FAST_TRAINING_COMPLETE_INTERRUPT_DEST_MASK 0x00004000L
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGH_FAST_TRAINING_COMPLETE_INTERRUPT_DEST_MASK 0x00008000L
+//I2C_DDC_HPD_INTERRUPT_DEST
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_SW_DONE_INTERRUPT_DEST__SHIFT 0x0
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_DDC1_HW_DONE_INTERRUPT_DEST__SHIFT 0x1
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_DDC2_HW_DONE_INTERRUPT_DEST__SHIFT 0x2
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_DDC3_HW_DONE_INTERRUPT_DEST__SHIFT 0x3
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_DDC4_HW_DONE_INTERRUPT_DEST__SHIFT 0x4
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_DDC5_HW_DONE_INTERRUPT_DEST__SHIFT 0x5
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_DDC6_HW_DONE_INTERRUPT_DEST__SHIFT 0x6
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_DDCVGA_HW_DONE_INTERRUPT_DEST__SHIFT 0x7
+#define I2C_DDC_HPD_INTERRUPT_DEST__DC_I2C_DDC1_READ_REQUEST_INTERRUPT_DEST__SHIFT 0x10
+#define I2C_DDC_HPD_INTERRUPT_DEST__DC_I2C_DDC2_READ_REQUEST_INTERRUPT_DEST__SHIFT 0x11
+#define I2C_DDC_HPD_INTERRUPT_DEST__DC_I2C_DDC3_READ_REQUEST_INTERRUPT_DEST__SHIFT 0x12
+#define I2C_DDC_HPD_INTERRUPT_DEST__DC_I2C_DDC4_READ_REQUEST_INTERRUPT_DEST__SHIFT 0x13
+#define I2C_DDC_HPD_INTERRUPT_DEST__DC_I2C_DDC5_READ_REQUEST_INTERRUPT_DEST__SHIFT 0x14
+#define I2C_DDC_HPD_INTERRUPT_DEST__DC_I2C_DDC6_READ_REQUEST_INTERRUPT_DEST__SHIFT 0x15
+#define I2C_DDC_HPD_INTERRUPT_DEST__DC_I2C_DDCVGA_READ_REQUEST_INTERRPUT_DEST__SHIFT 0x16
+#define I2C_DDC_HPD_INTERRUPT_DEST__GENERIC_I2C_DDC_READ_REQUEST_INTERRPUT_DEST__SHIFT 0x17
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_SW_DONE_INTERRUPT_DEST_MASK 0x00000001L
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_DDC1_HW_DONE_INTERRUPT_DEST_MASK 0x00000002L
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_DDC2_HW_DONE_INTERRUPT_DEST_MASK 0x00000004L
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_DDC3_HW_DONE_INTERRUPT_DEST_MASK 0x00000008L
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_DDC4_HW_DONE_INTERRUPT_DEST_MASK 0x00000010L
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_DDC5_HW_DONE_INTERRUPT_DEST_MASK 0x00000020L
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_DDC6_HW_DONE_INTERRUPT_DEST_MASK 0x00000040L
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_DDCVGA_HW_DONE_INTERRUPT_DEST_MASK 0x00000080L
+#define I2C_DDC_HPD_INTERRUPT_DEST__DC_I2C_DDC1_READ_REQUEST_INTERRUPT_DEST_MASK 0x00010000L
+#define I2C_DDC_HPD_INTERRUPT_DEST__DC_I2C_DDC2_READ_REQUEST_INTERRUPT_DEST_MASK 0x00020000L
+#define I2C_DDC_HPD_INTERRUPT_DEST__DC_I2C_DDC3_READ_REQUEST_INTERRUPT_DEST_MASK 0x00040000L
+#define I2C_DDC_HPD_INTERRUPT_DEST__DC_I2C_DDC4_READ_REQUEST_INTERRUPT_DEST_MASK 0x00080000L
+#define I2C_DDC_HPD_INTERRUPT_DEST__DC_I2C_DDC5_READ_REQUEST_INTERRUPT_DEST_MASK 0x00100000L
+#define I2C_DDC_HPD_INTERRUPT_DEST__DC_I2C_DDC6_READ_REQUEST_INTERRUPT_DEST_MASK 0x00200000L
+#define I2C_DDC_HPD_INTERRUPT_DEST__DC_I2C_DDCVGA_READ_REQUEST_INTERRPUT_DEST_MASK 0x00400000L
+#define I2C_DDC_HPD_INTERRUPT_DEST__GENERIC_I2C_DDC_READ_REQUEST_INTERRPUT_DEST_MASK 0x00800000L
+//DIO_INTERRUPT_DEST
+#define DIO_INTERRUPT_DEST__DIO_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0xc
+#define DIO_INTERRUPT_DEST__DIO_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0xd
+#define DIO_INTERRUPT_DEST__DIO_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00001000L
+#define DIO_INTERRUPT_DEST__DIO_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00002000L
+//DCIO_INTERRUPT_DEST
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_TXA_IHC_ERROR_INTERRUPT_DEST__SHIFT 0x0
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_TXB_IHC_ERROR_INTERRUPT_DEST__SHIFT 0x1
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_TXC_IHC_ERROR_INTERRUPT_DEST__SHIFT 0x2
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_TXD_IHC_ERROR_INTERRUPT_DEST__SHIFT 0x3
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_TXE_IHC_ERROR_INTERRUPT_DEST__SHIFT 0x4
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_TXF_IHC_ERROR_INTERRUPT_DEST__SHIFT 0x5
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_TXG_IHC_ERROR_INTERRUPT_DEST__SHIFT 0x6
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_RXA_IHC_ERROR_INTERRUPT_DEST__SHIFT 0x10
+#define DCIO_INTERRUPT_DEST__DCIO_IHC_RXSENSE_INTERRUPT_DEST__SHIFT 0x18
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_TXA_IHC_ERROR_INTERRUPT_DEST_MASK 0x00000001L
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_TXB_IHC_ERROR_INTERRUPT_DEST_MASK 0x00000002L
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_TXC_IHC_ERROR_INTERRUPT_DEST_MASK 0x00000004L
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_TXD_IHC_ERROR_INTERRUPT_DEST_MASK 0x00000008L
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_TXE_IHC_ERROR_INTERRUPT_DEST_MASK 0x00000010L
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_TXF_IHC_ERROR_INTERRUPT_DEST_MASK 0x00000020L
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_TXG_IHC_ERROR_INTERRUPT_DEST_MASK 0x00000040L
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_RXA_IHC_ERROR_INTERRUPT_DEST_MASK 0x00010000L
+#define DCIO_INTERRUPT_DEST__DCIO_IHC_RXSENSE_INTERRUPT_DEST_MASK 0x01000000L
+//HPD_INTERRUPT_DEST
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD1_INTERRUPT_DEST__SHIFT 0x0
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD2_INTERRUPT_DEST__SHIFT 0x1
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD3_INTERRUPT_DEST__SHIFT 0x2
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD4_INTERRUPT_DEST__SHIFT 0x3
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD5_INTERRUPT_DEST__SHIFT 0x4
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD6_INTERRUPT_DEST__SHIFT 0x5
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD1_RX_INTERRUPT_DEST__SHIFT 0x8
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD2_RX_INTERRUPT_DEST__SHIFT 0x9
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD3_RX_INTERRUPT_DEST__SHIFT 0xa
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD4_RX_INTERRUPT_DEST__SHIFT 0xb
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD5_RX_INTERRUPT_DEST__SHIFT 0xc
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD6_RX_INTERRUPT_DEST__SHIFT 0xd
+#define HPD_INTERRUPT_DEST__DOUT_IHC_DACA_AUTODETECT_GENERATE_INTERRUPT_DEST__SHIFT 0xe
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD1_INTERRUPT_DEST_MASK 0x00000001L
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD2_INTERRUPT_DEST_MASK 0x00000002L
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD3_INTERRUPT_DEST_MASK 0x00000004L
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD4_INTERRUPT_DEST_MASK 0x00000008L
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD5_INTERRUPT_DEST_MASK 0x00000010L
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD6_INTERRUPT_DEST_MASK 0x00000020L
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD1_RX_INTERRUPT_DEST_MASK 0x00000100L
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD2_RX_INTERRUPT_DEST_MASK 0x00000200L
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD3_RX_INTERRUPT_DEST_MASK 0x00000400L
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD4_RX_INTERRUPT_DEST_MASK 0x00000800L
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD5_RX_INTERRUPT_DEST_MASK 0x00001000L
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD6_RX_INTERRUPT_DEST_MASK 0x00002000L
+#define HPD_INTERRUPT_DEST__DOUT_IHC_DACA_AUTODETECT_GENERATE_INTERRUPT_DEST_MASK 0x00004000L
+//AZ_INTERRUPT_DEST
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT0_AUDIO_FORMAT_CHANGED_INT_DEST__SHIFT 0x0
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT1_AUDIO_FORMAT_CHANGED_INT_DEST__SHIFT 0x1
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT2_AUDIO_FORMAT_CHANGED_INT_DEST__SHIFT 0x2
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT3_AUDIO_FORMAT_CHANGED_INT_DEST__SHIFT 0x3
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT4_AUDIO_FORMAT_CHANGED_INT_DEST__SHIFT 0x4
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT5_AUDIO_FORMAT_CHANGED_INT_DEST__SHIFT 0x5
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT6_AUDIO_FORMAT_CHANGED_INT_DEST__SHIFT 0x6
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT7_AUDIO_FORMAT_CHANGED_INT_DEST__SHIFT 0x7
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT0_AUDIO_ENABLED_INT_DEST__SHIFT 0x8
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT1_AUDIO_ENABLED_INT_DEST__SHIFT 0x9
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT2_AUDIO_ENABLED_INT_DEST__SHIFT 0xa
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT3_AUDIO_ENABLED_INT_DEST__SHIFT 0xb
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT4_AUDIO_ENABLED_INT_DEST__SHIFT 0xc
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT5_AUDIO_ENABLED_INT_DEST__SHIFT 0xd
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT6_AUDIO_ENABLED_INT_DEST__SHIFT 0xe
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT7_AUDIO_ENABLED_INT_DEST__SHIFT 0xf
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT0_AUDIO_DISABLED_INT_DEST__SHIFT 0x10
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT1_AUDIO_DISABLED_INT_DEST__SHIFT 0x11
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT2_AUDIO_DISABLED_INT_DEST__SHIFT 0x12
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT3_AUDIO_DISABLED_INT_DEST__SHIFT 0x13
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT4_AUDIO_DISABLED_INT_DEST__SHIFT 0x14
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT5_AUDIO_DISABLED_INT_DEST__SHIFT 0x15
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT6_AUDIO_DISABLED_INT_DEST__SHIFT 0x16
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT7_AUDIO_DISABLED_INT_DEST__SHIFT 0x17
+#define AZ_INTERRUPT_DEST__AZ_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x1e
+#define AZ_INTERRUPT_DEST__AZ_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x1f
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT0_AUDIO_FORMAT_CHANGED_INT_DEST_MASK 0x00000001L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT1_AUDIO_FORMAT_CHANGED_INT_DEST_MASK 0x00000002L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT2_AUDIO_FORMAT_CHANGED_INT_DEST_MASK 0x00000004L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT3_AUDIO_FORMAT_CHANGED_INT_DEST_MASK 0x00000008L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT4_AUDIO_FORMAT_CHANGED_INT_DEST_MASK 0x00000010L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT5_AUDIO_FORMAT_CHANGED_INT_DEST_MASK 0x00000020L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT6_AUDIO_FORMAT_CHANGED_INT_DEST_MASK 0x00000040L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT7_AUDIO_FORMAT_CHANGED_INT_DEST_MASK 0x00000080L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT0_AUDIO_ENABLED_INT_DEST_MASK 0x00000100L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT1_AUDIO_ENABLED_INT_DEST_MASK 0x00000200L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT2_AUDIO_ENABLED_INT_DEST_MASK 0x00000400L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT3_AUDIO_ENABLED_INT_DEST_MASK 0x00000800L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT4_AUDIO_ENABLED_INT_DEST_MASK 0x00001000L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT5_AUDIO_ENABLED_INT_DEST_MASK 0x00002000L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT6_AUDIO_ENABLED_INT_DEST_MASK 0x00004000L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT7_AUDIO_ENABLED_INT_DEST_MASK 0x00008000L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT0_AUDIO_DISABLED_INT_DEST_MASK 0x00010000L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT1_AUDIO_DISABLED_INT_DEST_MASK 0x00020000L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT2_AUDIO_DISABLED_INT_DEST_MASK 0x00040000L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT3_AUDIO_DISABLED_INT_DEST_MASK 0x00080000L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT4_AUDIO_DISABLED_INT_DEST_MASK 0x00100000L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT5_AUDIO_DISABLED_INT_DEST_MASK 0x00200000L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT6_AUDIO_DISABLED_INT_DEST_MASK 0x00400000L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT7_AUDIO_DISABLED_INT_DEST_MASK 0x00800000L
+#define AZ_INTERRUPT_DEST__AZ_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x40000000L
+#define AZ_INTERRUPT_DEST__AZ_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x80000000L
+//AUX_INTERRUPT_DEST
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX1_SW_DONE_INTERRUPT_DEST__SHIFT 0x0
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX1_LS_DONE_INTERRUPT_DEST__SHIFT 0x1
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX2_SW_DONE_INTERRUPT_DEST__SHIFT 0x2
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX2_LS_DONE_INTERRUPT_DEST__SHIFT 0x3
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX3_SW_DONE_INTERRUPT_DEST__SHIFT 0x4
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX3_LS_DONE_INTERRUPT_DEST__SHIFT 0x5
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX4_SW_DONE_INTERRUPT_DEST__SHIFT 0x6
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX4_LS_DONE_INTERRUPT_DEST__SHIFT 0x7
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX5_SW_DONE_INTERRUPT_DEST__SHIFT 0x8
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX5_LS_DONE_INTERRUPT_DEST__SHIFT 0x9
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX6_SW_DONE_INTERRUPT_DEST__SHIFT 0xa
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX6_LS_DONE_INTERRUPT_DEST__SHIFT 0xb
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX1_GTC_SYNC_LOCK_DONE_INTERRUPT_DEST__SHIFT 0x10
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX1_GTC_SYNC_ERROR_INTERRUPT_DEST__SHIFT 0x11
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX2_GTC_SYNC_LOCK_DONE_INTERRUPT_DEST__SHIFT 0x12
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX2_GTC_SYNC_ERROR_INTERRUPT_DEST__SHIFT 0x13
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX3_GTC_SYNC_LOCK_DONE_INTERRUPT_DEST__SHIFT 0x14
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX3_GTC_SYNC_ERROR_INTERRUPT_DEST__SHIFT 0x15
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX4_GTC_SYNC_LOCK_DONE_INTERRUPT_DEST__SHIFT 0x16
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX4_GTC_SYNC_ERROR_INTERRUPT_DEST__SHIFT 0x17
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX5_GTC_SYNC_LOCK_DONE_INTERRUPT_DEST__SHIFT 0x18
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX5_GTC_SYNC_ERROR_INTERRUPT_DEST__SHIFT 0x19
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX6_GTC_SYNC_LOCK_DONE_INTERRUPT_DEST__SHIFT 0x1a
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX6_GTC_SYNC_ERROR_INTERRUPT_DEST__SHIFT 0x1b
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX1_SW_DONE_INTERRUPT_DEST_MASK 0x00000001L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX1_LS_DONE_INTERRUPT_DEST_MASK 0x00000002L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX2_SW_DONE_INTERRUPT_DEST_MASK 0x00000004L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX2_LS_DONE_INTERRUPT_DEST_MASK 0x00000008L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX3_SW_DONE_INTERRUPT_DEST_MASK 0x00000010L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX3_LS_DONE_INTERRUPT_DEST_MASK 0x00000020L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX4_SW_DONE_INTERRUPT_DEST_MASK 0x00000040L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX4_LS_DONE_INTERRUPT_DEST_MASK 0x00000080L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX5_SW_DONE_INTERRUPT_DEST_MASK 0x00000100L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX5_LS_DONE_INTERRUPT_DEST_MASK 0x00000200L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX6_SW_DONE_INTERRUPT_DEST_MASK 0x00000400L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX6_LS_DONE_INTERRUPT_DEST_MASK 0x00000800L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX1_GTC_SYNC_LOCK_DONE_INTERRUPT_DEST_MASK 0x00010000L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX1_GTC_SYNC_ERROR_INTERRUPT_DEST_MASK 0x00020000L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX2_GTC_SYNC_LOCK_DONE_INTERRUPT_DEST_MASK 0x00040000L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX2_GTC_SYNC_ERROR_INTERRUPT_DEST_MASK 0x00080000L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX3_GTC_SYNC_LOCK_DONE_INTERRUPT_DEST_MASK 0x00100000L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX3_GTC_SYNC_ERROR_INTERRUPT_DEST_MASK 0x00200000L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX4_GTC_SYNC_LOCK_DONE_INTERRUPT_DEST_MASK 0x00400000L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX4_GTC_SYNC_ERROR_INTERRUPT_DEST_MASK 0x00800000L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX5_GTC_SYNC_LOCK_DONE_INTERRUPT_DEST_MASK 0x01000000L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX5_GTC_SYNC_ERROR_INTERRUPT_DEST_MASK 0x02000000L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX6_GTC_SYNC_LOCK_DONE_INTERRUPT_DEST_MASK 0x04000000L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX6_GTC_SYNC_ERROR_INTERRUPT_DEST_MASK 0x08000000L
+//DSC_INTERRUPT_DEST
+#define DSC_INTERRUPT_DEST__DSC0_IHC_INPUT_UNDERFLOW_INTERRUPT_DEST__SHIFT 0x0
+#define DSC_INTERRUPT_DEST__DSC0_IHC_CORE_ERROR_INTERRUPT_DEST__SHIFT 0x1
+#define DSC_INTERRUPT_DEST__DSC0_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x2
+#define DSC_INTERRUPT_DEST__DSC0_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x3
+#define DSC_INTERRUPT_DEST__DSC1_IHC_INPUT_UNDERFLOW_INTERRUPT_DEST__SHIFT 0x4
+#define DSC_INTERRUPT_DEST__DSC1_IHC_CORE_ERROR_INTERRUPT_DEST__SHIFT 0x5
+#define DSC_INTERRUPT_DEST__DSC1_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x6
+#define DSC_INTERRUPT_DEST__DSC1_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x7
+#define DSC_INTERRUPT_DEST__DSC2_IHC_INPUT_UNDERFLOW_INTERRUPT_DEST__SHIFT 0x8
+#define DSC_INTERRUPT_DEST__DSC2_IHC_CORE_ERROR_INTERRUPT_DEST__SHIFT 0x9
+#define DSC_INTERRUPT_DEST__DSC2_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0xa
+#define DSC_INTERRUPT_DEST__DSC2_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0xb
+#define DSC_INTERRUPT_DEST__DSC3_IHC_INPUT_UNDERFLOW_INTERRUPT_DEST__SHIFT 0xc
+#define DSC_INTERRUPT_DEST__DSC3_IHC_CORE_ERROR_INTERRUPT_DEST__SHIFT 0xd
+#define DSC_INTERRUPT_DEST__DSC3_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0xe
+#define DSC_INTERRUPT_DEST__DSC3_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0xf
+#define DSC_INTERRUPT_DEST__DSC4_IHC_INPUT_UNDERFLOW_INTERRUPT_DEST__SHIFT 0x10
+#define DSC_INTERRUPT_DEST__DSC4_IHC_CORE_ERROR_INTERRUPT_DEST__SHIFT 0x11
+#define DSC_INTERRUPT_DEST__DSC4_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x12
+#define DSC_INTERRUPT_DEST__DSC4_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x13
+#define DSC_INTERRUPT_DEST__DSC5_IHC_INPUT_UNDERFLOW_INTERRUPT_DEST__SHIFT 0x14
+#define DSC_INTERRUPT_DEST__DSC5_IHC_CORE_ERROR_INTERRUPT_DEST__SHIFT 0x15
+#define DSC_INTERRUPT_DEST__DSC5_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x16
+#define DSC_INTERRUPT_DEST__DSC5_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x17
+#define DSC_INTERRUPT_DEST__DSC0_IHC_INPUT_UNDERFLOW_INTERRUPT_DEST_MASK 0x00000001L
+#define DSC_INTERRUPT_DEST__DSC0_IHC_CORE_ERROR_INTERRUPT_DEST_MASK 0x00000002L
+#define DSC_INTERRUPT_DEST__DSC0_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00000004L
+#define DSC_INTERRUPT_DEST__DSC0_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00000008L
+#define DSC_INTERRUPT_DEST__DSC1_IHC_INPUT_UNDERFLOW_INTERRUPT_DEST_MASK 0x00000010L
+#define DSC_INTERRUPT_DEST__DSC1_IHC_CORE_ERROR_INTERRUPT_DEST_MASK 0x00000020L
+#define DSC_INTERRUPT_DEST__DSC1_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00000040L
+#define DSC_INTERRUPT_DEST__DSC1_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00000080L
+#define DSC_INTERRUPT_DEST__DSC2_IHC_INPUT_UNDERFLOW_INTERRUPT_DEST_MASK 0x00000100L
+#define DSC_INTERRUPT_DEST__DSC2_IHC_CORE_ERROR_INTERRUPT_DEST_MASK 0x00000200L
+#define DSC_INTERRUPT_DEST__DSC2_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00000400L
+#define DSC_INTERRUPT_DEST__DSC2_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00000800L
+#define DSC_INTERRUPT_DEST__DSC3_IHC_INPUT_UNDERFLOW_INTERRUPT_DEST_MASK 0x00001000L
+#define DSC_INTERRUPT_DEST__DSC3_IHC_CORE_ERROR_INTERRUPT_DEST_MASK 0x00002000L
+#define DSC_INTERRUPT_DEST__DSC3_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00004000L
+#define DSC_INTERRUPT_DEST__DSC3_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00008000L
+#define DSC_INTERRUPT_DEST__DSC4_IHC_INPUT_UNDERFLOW_INTERRUPT_DEST_MASK 0x00010000L
+#define DSC_INTERRUPT_DEST__DSC4_IHC_CORE_ERROR_INTERRUPT_DEST_MASK 0x00020000L
+#define DSC_INTERRUPT_DEST__DSC4_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00040000L
+#define DSC_INTERRUPT_DEST__DSC4_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00080000L
+#define DSC_INTERRUPT_DEST__DSC5_IHC_INPUT_UNDERFLOW_INTERRUPT_DEST_MASK 0x00100000L
+#define DSC_INTERRUPT_DEST__DSC5_IHC_CORE_ERROR_INTERRUPT_DEST_MASK 0x00200000L
+#define DSC_INTERRUPT_DEST__DSC5_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00400000L
+#define DSC_INTERRUPT_DEST__DSC5_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00800000L
+
+
+// addressBlock: dce_dc_wb0_dispdec_cnv_dispdec
+//WB_ENABLE
+#define WB_ENABLE__WB_ENABLE__SHIFT 0x0
+#define WB_ENABLE__WB_ENABLE_MASK 0x00000001L
+//WB_EC_CONFIG
+#define WB_EC_CONFIG__DISPCLK_R_WB_GATE_DIS__SHIFT 0x0
+#define WB_EC_CONFIG__DISPCLK_G_WB_GATE_DIS__SHIFT 0x1
+#define WB_EC_CONFIG__DISPCLK_G_WBSCL_GATE_DIS__SHIFT 0x2
+#define WB_EC_CONFIG__WB_TEST_CLK_SEL__SHIFT 0x3
+#define WB_EC_CONFIG__WB_LB_LS_DIS__SHIFT 0x7
+#define WB_EC_CONFIG__WB_LB_SD_DIS__SHIFT 0x8
+#define WB_EC_CONFIG__WB_LUT_LS_DIS__SHIFT 0x9
+#define WB_EC_CONFIG__WBSCL_LB_MEM_PWR_MODE_SEL__SHIFT 0xc
+#define WB_EC_CONFIG__WBSCL_LB_MEM_PWR_DIS__SHIFT 0xe
+#define WB_EC_CONFIG__WBSCL_LB_MEM_PWR_FORCE__SHIFT 0xf
+#define WB_EC_CONFIG__WBSCL_LB_MEM_PWR_STATE__SHIFT 0x15
+#define WB_EC_CONFIG__WB_RAM_PW_SAVE_MODE__SHIFT 0x17
+#define WB_EC_CONFIG__WBSCL_LUT_MEM_PWR_STATE__SHIFT 0x18
+#define WB_EC_CONFIG__DISPCLK_R_WB_GATE_DIS_MASK 0x00000001L
+#define WB_EC_CONFIG__DISPCLK_G_WB_GATE_DIS_MASK 0x00000002L
+#define WB_EC_CONFIG__DISPCLK_G_WBSCL_GATE_DIS_MASK 0x00000004L
+#define WB_EC_CONFIG__WB_TEST_CLK_SEL_MASK 0x00000078L
+#define WB_EC_CONFIG__WB_LB_LS_DIS_MASK 0x00000080L
+#define WB_EC_CONFIG__WB_LB_SD_DIS_MASK 0x00000100L
+#define WB_EC_CONFIG__WB_LUT_LS_DIS_MASK 0x00000200L
+#define WB_EC_CONFIG__WBSCL_LB_MEM_PWR_MODE_SEL_MASK 0x00003000L
+#define WB_EC_CONFIG__WBSCL_LB_MEM_PWR_DIS_MASK 0x00004000L
+#define WB_EC_CONFIG__WBSCL_LB_MEM_PWR_FORCE_MASK 0x00018000L
+#define WB_EC_CONFIG__WBSCL_LB_MEM_PWR_STATE_MASK 0x00600000L
+#define WB_EC_CONFIG__WB_RAM_PW_SAVE_MODE_MASK 0x00800000L
+#define WB_EC_CONFIG__WBSCL_LUT_MEM_PWR_STATE_MASK 0x03000000L
+//CNV_MODE
+#define CNV_MODE__CNV_OUT_BPC__SHIFT 0x4
+#define CNV_MODE__CNV_FRAME_CAPTURE_RATE__SHIFT 0x8
+#define CNV_MODE__CNV_WINDOW_CROP_EN__SHIFT 0xc
+#define CNV_MODE__CNV_STEREO_TYPE__SHIFT 0xd
+#define CNV_MODE__CNV_INTERLACED_MODE__SHIFT 0xf
+#define CNV_MODE__CNV_EYE_SELECTION__SHIFT 0x10
+#define CNV_MODE__CNV_STEREO_POLARITY__SHIFT 0x12
+#define CNV_MODE__CNV_INTERLACED_FIELD_ORDER__SHIFT 0x13
+#define CNV_MODE__CNV_STEREO_SPLIT__SHIFT 0x14
+#define CNV_MODE__CNV_NEW_CONTENT__SHIFT 0x18
+#define CNV_MODE__CNV_FRAME_CAPTURE_EN_CURRENT__SHIFT 0x1e
+#define CNV_MODE__CNV_FRAME_CAPTURE_EN__SHIFT 0x1f
+#define CNV_MODE__CNV_OUT_BPC_MASK 0x00000010L
+#define CNV_MODE__CNV_FRAME_CAPTURE_RATE_MASK 0x00000300L
+#define CNV_MODE__CNV_WINDOW_CROP_EN_MASK 0x00001000L
+#define CNV_MODE__CNV_STEREO_TYPE_MASK 0x00006000L
+#define CNV_MODE__CNV_INTERLACED_MODE_MASK 0x00008000L
+#define CNV_MODE__CNV_EYE_SELECTION_MASK 0x00030000L
+#define CNV_MODE__CNV_STEREO_POLARITY_MASK 0x00040000L
+#define CNV_MODE__CNV_INTERLACED_FIELD_ORDER_MASK 0x00080000L
+#define CNV_MODE__CNV_STEREO_SPLIT_MASK 0x00100000L
+#define CNV_MODE__CNV_NEW_CONTENT_MASK 0x01000000L
+#define CNV_MODE__CNV_FRAME_CAPTURE_EN_CURRENT_MASK 0x40000000L
+#define CNV_MODE__CNV_FRAME_CAPTURE_EN_MASK 0x80000000L
+//CNV_WINDOW_START
+#define CNV_WINDOW_START__CNV_WINDOW_START_X__SHIFT 0x0
+#define CNV_WINDOW_START__CNV_WINDOW_START_Y__SHIFT 0x10
+#define CNV_WINDOW_START__CNV_WINDOW_START_X_MASK 0x00000FFFL
+#define CNV_WINDOW_START__CNV_WINDOW_START_Y_MASK 0x0FFF0000L
+//CNV_WINDOW_SIZE
+#define CNV_WINDOW_SIZE__CNV_WINDOW_WIDTH__SHIFT 0x0
+#define CNV_WINDOW_SIZE__CNV_WINDOW_HEIGHT__SHIFT 0x10
+#define CNV_WINDOW_SIZE__CNV_WINDOW_WIDTH_MASK 0x00000FFFL
+#define CNV_WINDOW_SIZE__CNV_WINDOW_HEIGHT_MASK 0x0FFF0000L
+//CNV_UPDATE
+#define CNV_UPDATE__CNV_UPDATE_PENDING__SHIFT 0x0
+#define CNV_UPDATE__CNV_UPDATE_TAKEN__SHIFT 0x8
+#define CNV_UPDATE__CNV_UPDATE_LOCK__SHIFT 0x10
+#define CNV_UPDATE__CNV_UPDATE_PENDING_MASK 0x00000001L
+#define CNV_UPDATE__CNV_UPDATE_TAKEN_MASK 0x00000100L
+#define CNV_UPDATE__CNV_UPDATE_LOCK_MASK 0x00010000L
+//CNV_SOURCE_SIZE
+#define CNV_SOURCE_SIZE__CNV_SOURCE_WIDTH__SHIFT 0x0
+#define CNV_SOURCE_SIZE__CNV_SOURCE_HEIGHT__SHIFT 0x10
+#define CNV_SOURCE_SIZE__CNV_SOURCE_WIDTH_MASK 0x00007FFFL
+#define CNV_SOURCE_SIZE__CNV_SOURCE_HEIGHT_MASK 0x7FFF0000L
+//CNV_TEST_CNTL
+#define CNV_TEST_CNTL__CNV_TEST_CRC_EN__SHIFT 0x4
+#define CNV_TEST_CNTL__CNV_TEST_CRC_CONT_EN__SHIFT 0x8
+#define CNV_TEST_CNTL__CNV_TEST_CRC_EN_MASK 0x00000010L
+#define CNV_TEST_CNTL__CNV_TEST_CRC_CONT_EN_MASK 0x00000100L
+//CNV_TEST_CRC_RED
+#define CNV_TEST_CRC_RED__CNV_TEST_CRC_RED_MASK__SHIFT 0x4
+#define CNV_TEST_CRC_RED__CNV_TEST_CRC_SIG_RED__SHIFT 0x10
+#define CNV_TEST_CRC_RED__CNV_TEST_CRC_RED_MASK_MASK 0x0000FFF0L
+#define CNV_TEST_CRC_RED__CNV_TEST_CRC_SIG_RED_MASK 0xFFFF0000L
+//CNV_TEST_CRC_GREEN
+#define CNV_TEST_CRC_GREEN__CNV_TEST_CRC_GREEN_MASK__SHIFT 0x4
+#define CNV_TEST_CRC_GREEN__CNV_TEST_CRC_SIG_GREEN__SHIFT 0x10
+#define CNV_TEST_CRC_GREEN__CNV_TEST_CRC_GREEN_MASK_MASK 0x0000FFF0L
+#define CNV_TEST_CRC_GREEN__CNV_TEST_CRC_SIG_GREEN_MASK 0xFFFF0000L
+//CNV_TEST_CRC_BLUE
+#define CNV_TEST_CRC_BLUE__CNV_TEST_CRC_BLUE_MASK__SHIFT 0x4
+#define CNV_TEST_CRC_BLUE__CNV_TEST_CRC_SIG_BLUE__SHIFT 0x10
+#define CNV_TEST_CRC_BLUE__CNV_TEST_CRC_BLUE_MASK_MASK 0x0000FFF0L
+#define CNV_TEST_CRC_BLUE__CNV_TEST_CRC_SIG_BLUE_MASK 0xFFFF0000L
+//WB_DEBUG_CTRL
+#define WB_DEBUG_CTRL__WB_DEBUG_EN__SHIFT 0x0
+#define WB_DEBUG_CTRL__WB_DEBUG_SEL__SHIFT 0x6
+#define WB_DEBUG_CTRL__WB_DEBUG_EN_MASK 0x00000001L
+#define WB_DEBUG_CTRL__WB_DEBUG_SEL_MASK 0x000000C0L
+//WB_DBG_MODE
+#define WB_DBG_MODE__WB_DBG_MODE_EN__SHIFT 0x0
+#define WB_DBG_MODE__WB_DBG_DIN_FMT__SHIFT 0x1
+#define WB_DBG_MODE__WB_DBG_36MODE__SHIFT 0x2
+#define WB_DBG_MODE__WB_DBG_CMAP__SHIFT 0x3
+#define WB_DBG_MODE__WB_DBG_PXLRATE_ERROR__SHIFT 0x8
+#define WB_DBG_MODE__WB_DBG_SOURCE_WIDTH__SHIFT 0x10
+#define WB_DBG_MODE__WB_DBG_MODE_EN_MASK 0x00000001L
+#define WB_DBG_MODE__WB_DBG_DIN_FMT_MASK 0x00000002L
+#define WB_DBG_MODE__WB_DBG_36MODE_MASK 0x00000004L
+#define WB_DBG_MODE__WB_DBG_CMAP_MASK 0x00000008L
+#define WB_DBG_MODE__WB_DBG_PXLRATE_ERROR_MASK 0x00000100L
+#define WB_DBG_MODE__WB_DBG_SOURCE_WIDTH_MASK 0x7FFF0000L
+//WB_HW_DEBUG
+#define WB_HW_DEBUG__WB_HW_DEBUG__SHIFT 0x0
+#define WB_HW_DEBUG__WB_HW_DEBUG_MASK 0xFFFFFFFFL
+//WB_SOFT_RESET
+#define WB_SOFT_RESET__WB_SOFT_RESET__SHIFT 0x0
+#define WB_SOFT_RESET__WB_SOFT_RESET_MASK 0x00000001L
+//WB_WARM_UP_MODE_CTL1
+#define WB_WARM_UP_MODE_CTL1__WIDTH_WARMUP__SHIFT 0x0
+#define WB_WARM_UP_MODE_CTL1__HEIGHT_WARMUP__SHIFT 0x10
+#define WB_WARM_UP_MODE_CTL1__GMC_WARM_UP_ENABLE__SHIFT 0x1f
+#define WB_WARM_UP_MODE_CTL1__WIDTH_WARMUP_MASK 0x00007FFFL
+#define WB_WARM_UP_MODE_CTL1__HEIGHT_WARMUP_MASK 0x7FFF0000L
+#define WB_WARM_UP_MODE_CTL1__GMC_WARM_UP_ENABLE_MASK 0x80000000L
+//WB_WARM_UP_MODE_CTL2
+#define WB_WARM_UP_MODE_CTL2__DATA_VALUE_WARMUP__SHIFT 0x0
+#define WB_WARM_UP_MODE_CTL2__MODE_WARMUP__SHIFT 0x10
+#define WB_WARM_UP_MODE_CTL2__DATA_DEPTH_WARMUP__SHIFT 0x14
+#define WB_WARM_UP_MODE_CTL2__DATA_VALUE_WARMUP_MASK 0x000003FFL
+#define WB_WARM_UP_MODE_CTL2__MODE_WARMUP_MASK 0x00010000L
+#define WB_WARM_UP_MODE_CTL2__DATA_DEPTH_WARMUP_MASK 0x00100000L
+//CNV_TEST_DEBUG_INDEX
+#define CNV_TEST_DEBUG_INDEX__CNV_TEST_DEBUG_INDEX__SHIFT 0x0
+#define CNV_TEST_DEBUG_INDEX__CNV_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define CNV_TEST_DEBUG_INDEX__CNV_TEST_DEBUG_INDEX_MASK 0x000000FFL
+#define CNV_TEST_DEBUG_INDEX__CNV_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+//CNV_TEST_DEBUG_DATA
+#define CNV_TEST_DEBUG_DATA__CNV_TEST_DEBUG_DATA__SHIFT 0x0
+#define CNV_TEST_DEBUG_DATA__CNV_TEST_DEBUG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_wb0_dispdec_wbscl_dispdec
+//WBSCL_COEF_RAM_SELECT
+#define WBSCL_COEF_RAM_SELECT__WBSCL_COEF_RAM_TAP_PAIR_IDX__SHIFT 0x0
+#define WBSCL_COEF_RAM_SELECT__WBSCL_COEF_RAM_PHASE__SHIFT 0x8
+#define WBSCL_COEF_RAM_SELECT__WBSCL_COEF_RAM_FILTER_TYPE__SHIFT 0x10
+#define WBSCL_COEF_RAM_SELECT__WBSCL_COEF_RAM_TAP_PAIR_IDX_MASK 0x00000007L
+#define WBSCL_COEF_RAM_SELECT__WBSCL_COEF_RAM_PHASE_MASK 0x00000F00L
+#define WBSCL_COEF_RAM_SELECT__WBSCL_COEF_RAM_FILTER_TYPE_MASK 0x00030000L
+//WBSCL_COEF_RAM_TAP_DATA
+#define WBSCL_COEF_RAM_TAP_DATA__WBSCL_COEF_RAM_EVEN_TAP_COEF__SHIFT 0x0
+#define WBSCL_COEF_RAM_TAP_DATA__WBSCL_COEF_RAM_EVEN_TAP_COEF_EN__SHIFT 0xf
+#define WBSCL_COEF_RAM_TAP_DATA__WBSCL_COEF_RAM_ODD_TAP_COEF__SHIFT 0x10
+#define WBSCL_COEF_RAM_TAP_DATA__WBSCL_COEF_RAM_ODD_TAP_COEF_EN__SHIFT 0x1f
+#define WBSCL_COEF_RAM_TAP_DATA__WBSCL_COEF_RAM_EVEN_TAP_COEF_MASK 0x00003FFFL
+#define WBSCL_COEF_RAM_TAP_DATA__WBSCL_COEF_RAM_EVEN_TAP_COEF_EN_MASK 0x00008000L
+#define WBSCL_COEF_RAM_TAP_DATA__WBSCL_COEF_RAM_ODD_TAP_COEF_MASK 0x3FFF0000L
+#define WBSCL_COEF_RAM_TAP_DATA__WBSCL_COEF_RAM_ODD_TAP_COEF_EN_MASK 0x80000000L
+//WBSCL_MODE
+#define WBSCL_MODE__WBSCL_MODE__SHIFT 0x0
+#define WBSCL_MODE__WBSCL_OUT_BIT_DEPTH__SHIFT 0x4
+#define WBSCL_MODE__WBSCL_MODE_MASK 0x00000003L
+#define WBSCL_MODE__WBSCL_OUT_BIT_DEPTH_MASK 0x00000010L
+//WBSCL_TAP_CONTROL
+#define WBSCL_TAP_CONTROL__WBSCL_V_NUM_OF_TAPS_Y_RGB__SHIFT 0x0
+#define WBSCL_TAP_CONTROL__WBSCL_V_NUM_OF_TAPS_CBCR__SHIFT 0x4
+#define WBSCL_TAP_CONTROL__WBSCL_H_NUM_OF_TAPS_Y_RGB__SHIFT 0x8
+#define WBSCL_TAP_CONTROL__WBSCL_H_NUM_OF_TAPS_CBCR__SHIFT 0xc
+#define WBSCL_TAP_CONTROL__WBSCL_V_NUM_OF_TAPS_Y_RGB_MASK 0x0000000FL
+#define WBSCL_TAP_CONTROL__WBSCL_V_NUM_OF_TAPS_CBCR_MASK 0x000000F0L
+#define WBSCL_TAP_CONTROL__WBSCL_H_NUM_OF_TAPS_Y_RGB_MASK 0x00000F00L
+#define WBSCL_TAP_CONTROL__WBSCL_H_NUM_OF_TAPS_CBCR_MASK 0x0000F000L
+//WBSCL_DEST_SIZE
+#define WBSCL_DEST_SIZE__WBSCL_DEST_HEIGHT__SHIFT 0x0
+#define WBSCL_DEST_SIZE__WBSCL_DEST_WIDTH__SHIFT 0x10
+#define WBSCL_DEST_SIZE__WBSCL_DEST_HEIGHT_MASK 0x00007FFFL
+#define WBSCL_DEST_SIZE__WBSCL_DEST_WIDTH_MASK 0x7FFF0000L
+//WBSCL_HORZ_FILTER_SCALE_RATIO
+#define WBSCL_HORZ_FILTER_SCALE_RATIO__WBSCL_H_SCALE_RATIO__SHIFT 0x0
+#define WBSCL_HORZ_FILTER_SCALE_RATIO__WBSCL_H_SCALE_RATIO_MASK 0x07FFFFFFL
+//WBSCL_HORZ_FILTER_INIT_Y_RGB
+#define WBSCL_HORZ_FILTER_INIT_Y_RGB__WBSCL_H_INIT_FRAC_Y_RGB__SHIFT 0x0
+#define WBSCL_HORZ_FILTER_INIT_Y_RGB__WBSCL_H_INIT_INT_Y_RGB__SHIFT 0x18
+#define WBSCL_HORZ_FILTER_INIT_Y_RGB__WBSCL_H_INIT_FRAC_Y_RGB_MASK 0x00FFFFFFL
+#define WBSCL_HORZ_FILTER_INIT_Y_RGB__WBSCL_H_INIT_INT_Y_RGB_MASK 0x1F000000L
+//WBSCL_HORZ_FILTER_INIT_CBCR
+#define WBSCL_HORZ_FILTER_INIT_CBCR__WBSCL_H_INIT_FRAC_CBCR__SHIFT 0x0
+#define WBSCL_HORZ_FILTER_INIT_CBCR__WBSCL_H_INIT_INT_CBCR__SHIFT 0x18
+#define WBSCL_HORZ_FILTER_INIT_CBCR__WBSCL_H_INIT_FRAC_CBCR_MASK 0x00FFFFFFL
+#define WBSCL_HORZ_FILTER_INIT_CBCR__WBSCL_H_INIT_INT_CBCR_MASK 0x1F000000L
+//WBSCL_VERT_FILTER_SCALE_RATIO
+#define WBSCL_VERT_FILTER_SCALE_RATIO__WBSCL_V_SCALE_RATIO__SHIFT 0x0
+#define WBSCL_VERT_FILTER_SCALE_RATIO__WBSCL_V_SCALE_RATIO_MASK 0x07FFFFFFL
+//WBSCL_VERT_FILTER_INIT_Y_RGB
+#define WBSCL_VERT_FILTER_INIT_Y_RGB__WBSCL_V_INIT_FRAC_Y_RGB__SHIFT 0x0
+#define WBSCL_VERT_FILTER_INIT_Y_RGB__WBSCL_V_INIT_INT_Y_RGB__SHIFT 0x18
+#define WBSCL_VERT_FILTER_INIT_Y_RGB__WBSCL_V_INIT_FRAC_Y_RGB_MASK 0x00FFFFFFL
+#define WBSCL_VERT_FILTER_INIT_Y_RGB__WBSCL_V_INIT_INT_Y_RGB_MASK 0x1F000000L
+//WBSCL_VERT_FILTER_INIT_CBCR
+#define WBSCL_VERT_FILTER_INIT_CBCR__WBSCL_V_INIT_FRAC_CBCR__SHIFT 0x0
+#define WBSCL_VERT_FILTER_INIT_CBCR__WBSCL_V_INIT_INT_CBCR__SHIFT 0x18
+#define WBSCL_VERT_FILTER_INIT_CBCR__WBSCL_V_INIT_FRAC_CBCR_MASK 0x00FFFFFFL
+#define WBSCL_VERT_FILTER_INIT_CBCR__WBSCL_V_INIT_INT_CBCR_MASK 0x1F000000L
+//WBSCL_ROUND_OFFSET
+#define WBSCL_ROUND_OFFSET__WBSCL_ROUND_OFFSET_Y_RGB__SHIFT 0x0
+#define WBSCL_ROUND_OFFSET__WBSCL_ROUND_OFFSET_CBCR__SHIFT 0x10
+#define WBSCL_ROUND_OFFSET__WBSCL_ROUND_OFFSET_Y_RGB_MASK 0x000003FFL
+#define WBSCL_ROUND_OFFSET__WBSCL_ROUND_OFFSET_CBCR_MASK 0x03FF0000L
+//WBSCL_OVERFLOW_STATUS
+#define WBSCL_OVERFLOW_STATUS__WBSCL_DATA_OVERFLOW_FLAG__SHIFT 0x0
+#define WBSCL_OVERFLOW_STATUS__WBSCL_DATA_OVERFLOW_ACK__SHIFT 0x8
+#define WBSCL_OVERFLOW_STATUS__WBSCL_DATA_OVERFLOW_MASK__SHIFT 0xc
+#define WBSCL_OVERFLOW_STATUS__WBSCL_DATA_OVERFLOW_INT_STATUS__SHIFT 0x10
+#define WBSCL_OVERFLOW_STATUS__WBSCL_DATA_OVERFLOW_INT_TYPE__SHIFT 0x14
+#define WBSCL_OVERFLOW_STATUS__WBSCL_DATA_OVERFLOW_FLAG_MASK 0x00000001L
+#define WBSCL_OVERFLOW_STATUS__WBSCL_DATA_OVERFLOW_ACK_MASK 0x00000100L
+#define WBSCL_OVERFLOW_STATUS__WBSCL_DATA_OVERFLOW_MASK_MASK 0x00001000L
+#define WBSCL_OVERFLOW_STATUS__WBSCL_DATA_OVERFLOW_INT_STATUS_MASK 0x00010000L
+#define WBSCL_OVERFLOW_STATUS__WBSCL_DATA_OVERFLOW_INT_TYPE_MASK 0x00100000L
+//WBSCL_COEF_RAM_CONFLICT_STATUS
+#define WBSCL_COEF_RAM_CONFLICT_STATUS__WBSCL_HOST_CONFLICT_FLAG__SHIFT 0x0
+#define WBSCL_COEF_RAM_CONFLICT_STATUS__WBSCL_HOST_CONFLICT_ACK__SHIFT 0x8
+#define WBSCL_COEF_RAM_CONFLICT_STATUS__WBSCL_HOST_CONFLICT_MASK__SHIFT 0xc
+#define WBSCL_COEF_RAM_CONFLICT_STATUS__WBSCL_HOST_CONFLICT_INT_STATUS__SHIFT 0x10
+#define WBSCL_COEF_RAM_CONFLICT_STATUS__WBSCL_HOST_CONFLICT_INT_TYPE__SHIFT 0x14
+#define WBSCL_COEF_RAM_CONFLICT_STATUS__WBSCL_HOST_CONFLICT_FLAG_MASK 0x00000001L
+#define WBSCL_COEF_RAM_CONFLICT_STATUS__WBSCL_HOST_CONFLICT_ACK_MASK 0x00000100L
+#define WBSCL_COEF_RAM_CONFLICT_STATUS__WBSCL_HOST_CONFLICT_MASK_MASK 0x00001000L
+#define WBSCL_COEF_RAM_CONFLICT_STATUS__WBSCL_HOST_CONFLICT_INT_STATUS_MASK 0x00010000L
+#define WBSCL_COEF_RAM_CONFLICT_STATUS__WBSCL_HOST_CONFLICT_INT_TYPE_MASK 0x00100000L
+//WBSCL_TEST_CNTL
+#define WBSCL_TEST_CNTL__WBSCL_TEST_CRC_EN__SHIFT 0x4
+#define WBSCL_TEST_CNTL__WBSCL_TEST_CRC_CONT_EN__SHIFT 0x8
+#define WBSCL_TEST_CNTL__WBSCL_TEST_CRC_EN_MASK 0x00000010L
+#define WBSCL_TEST_CNTL__WBSCL_TEST_CRC_CONT_EN_MASK 0x00000100L
+//WBSCL_TEST_CRC_RED
+#define WBSCL_TEST_CRC_RED__WBSCL_TEST_CRC_RED_MASK__SHIFT 0x0
+#define WBSCL_TEST_CRC_RED__WBSCL_TEST_CRC_SIG_RED__SHIFT 0x10
+#define WBSCL_TEST_CRC_RED__WBSCL_TEST_CRC_RED_MASK_MASK 0x000003FFL
+#define WBSCL_TEST_CRC_RED__WBSCL_TEST_CRC_SIG_RED_MASK 0xFFFF0000L
+//WBSCL_TEST_CRC_GREEN
+#define WBSCL_TEST_CRC_GREEN__WBSCL_TEST_CRC_GREEN_MASK__SHIFT 0x0
+#define WBSCL_TEST_CRC_GREEN__WBSCL_TEST_CRC_SIG_GREEN__SHIFT 0x10
+#define WBSCL_TEST_CRC_GREEN__WBSCL_TEST_CRC_GREEN_MASK_MASK 0x0000FFFFL
+#define WBSCL_TEST_CRC_GREEN__WBSCL_TEST_CRC_SIG_GREEN_MASK 0xFFFF0000L
+//WBSCL_TEST_CRC_BLUE
+#define WBSCL_TEST_CRC_BLUE__WBSCL_TEST_CRC_BLUE_MASK__SHIFT 0x0
+#define WBSCL_TEST_CRC_BLUE__WBSCL_TEST_CRC_SIG_BLUE__SHIFT 0x10
+#define WBSCL_TEST_CRC_BLUE__WBSCL_TEST_CRC_BLUE_MASK_MASK 0x000003FFL
+#define WBSCL_TEST_CRC_BLUE__WBSCL_TEST_CRC_SIG_BLUE_MASK 0xFFFF0000L
+//WBSCL_BACKPRESSURE_CNT_EN
+#define WBSCL_BACKPRESSURE_CNT_EN__WBSCL_BACKPRESSURE_CNT_EN__SHIFT 0x0
+#define WBSCL_BACKPRESSURE_CNT_EN__WBSCL_BACKPRESSURE_CNT_EN_MASK 0x00000001L
+//WB_MCIF_BACKPRESSURE_CNT
+#define WB_MCIF_BACKPRESSURE_CNT__WB_MCIF_Y_MAX_BACKPRESSURE__SHIFT 0x0
+#define WB_MCIF_BACKPRESSURE_CNT__WB_MCIF_C_MAX_BACKPRESSURE__SHIFT 0x10
+#define WB_MCIF_BACKPRESSURE_CNT__WB_MCIF_Y_MAX_BACKPRESSURE_MASK 0x0000FFFFL
+#define WB_MCIF_BACKPRESSURE_CNT__WB_MCIF_C_MAX_BACKPRESSURE_MASK 0xFFFF0000L
+//WBSCL_CLAMP_Y_RGB
+#define WBSCL_CLAMP_Y_RGB__WBSCL_CLAMP_UPPER_Y_RGB__SHIFT 0x0
+#define WBSCL_CLAMP_Y_RGB__WBSCL_CLAMP_LOWER_Y_RGB__SHIFT 0x10
+#define WBSCL_CLAMP_Y_RGB__WBSCL_CLAMP_UPPER_Y_RGB_MASK 0x000003FFL
+#define WBSCL_CLAMP_Y_RGB__WBSCL_CLAMP_LOWER_Y_RGB_MASK 0x03FF0000L
+//WBSCL_CLAMP_CBCR
+#define WBSCL_CLAMP_CBCR__WBSCL_CLAMP_UPPER_CBCR__SHIFT 0x0
+#define WBSCL_CLAMP_CBCR__WBSCL_CLAMP_LOWER_CBCR__SHIFT 0x10
+#define WBSCL_CLAMP_CBCR__WBSCL_CLAMP_UPPER_CBCR_MASK 0x000003FFL
+#define WBSCL_CLAMP_CBCR__WBSCL_CLAMP_LOWER_CBCR_MASK 0x03FF0000L
+//WBSCL_OUTSIDE_PIX_STRATEGY
+#define WBSCL_OUTSIDE_PIX_STRATEGY__WBSCL_OUTSIDE_PIX_STRATEGY__SHIFT 0x0
+#define WBSCL_OUTSIDE_PIX_STRATEGY__WBSCL_BLACK_COLOR_G_Y__SHIFT 0x10
+#define WBSCL_OUTSIDE_PIX_STRATEGY__WBSCL_OUTSIDE_PIX_STRATEGY_MASK 0x00000001L
+#define WBSCL_OUTSIDE_PIX_STRATEGY__WBSCL_BLACK_COLOR_G_Y_MASK 0x03FF0000L
+//WBSCL_OUTSIDE_PIX_STRATEGY_CBCR
+#define WBSCL_OUTSIDE_PIX_STRATEGY_CBCR__WBSCL_BLACK_COLOR_B_CB__SHIFT 0x0
+#define WBSCL_OUTSIDE_PIX_STRATEGY_CBCR__WBSCL_BLACK_COLOR_R_CR__SHIFT 0x10
+#define WBSCL_OUTSIDE_PIX_STRATEGY_CBCR__WBSCL_BLACK_COLOR_B_CB_MASK 0x000003FFL
+#define WBSCL_OUTSIDE_PIX_STRATEGY_CBCR__WBSCL_BLACK_COLOR_R_CR_MASK 0x03FF0000L
+//WBSCL_DEBUG
+#define WBSCL_DEBUG__WBSCL_DEBUG__SHIFT 0x0
+#define WBSCL_DEBUG__WBSCL_DEBUG_MASK 0xFFFFFFFFL
+//WBSCL_TEST_DEBUG_INDEX
+#define WBSCL_TEST_DEBUG_INDEX__WBSCL_TEST_DEBUG_INDEX__SHIFT 0x0
+#define WBSCL_TEST_DEBUG_INDEX__WBSCL_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define WBSCL_TEST_DEBUG_INDEX__WBSCL_TEST_DEBUG_INDEX_MASK 0x000000FFL
+#define WBSCL_TEST_DEBUG_INDEX__WBSCL_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+//WBSCL_TEST_DEBUG_DATA
+#define WBSCL_TEST_DEBUG_DATA__WBSCL_TEST_DEBUG_DATA__SHIFT 0x0
+#define WBSCL_TEST_DEBUG_DATA__WBSCL_TEST_DEBUG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_wb0_dispdec_wb_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON3_PERFCOUNTER_CNTL
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON3_PERFCOUNTER_CNTL2
+#define DC_PERFMON3_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON3_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON3_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON3_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON3_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON3_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON3_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON3_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON3_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON3_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON3_PERFCOUNTER_STATE
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON3_PERFMON_CNTL
+#define DC_PERFMON3_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON3_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON3_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON3_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON3_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON3_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON3_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON3_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON3_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON3_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON3_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON3_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON3_PERFMON_CNTL2
+#define DC_PERFMON3_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON3_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON3_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON3_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON3_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON3_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON3_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON3_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON3_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON3_PERFMON_CVALUE_LOW
+#define DC_PERFMON3_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON3_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON3_PERFMON_HI
+#define DC_PERFMON3_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON3_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON3_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON3_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON3_PERFMON_LOW
+#define DC_PERFMON3_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON3_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_mmhubbub_mcif_wb0_dispdec
+//MCIF_WB0_MCIF_WB_BUFMGR_SW_CONTROL
+#define MCIF_WB0_MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_ENABLE__SHIFT 0x0
+#define MCIF_WB0_MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUF_DUALSIZE_REQ__SHIFT 0x1
+#define MCIF_WB0_MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_SW_INT_EN__SHIFT 0x4
+#define MCIF_WB0_MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_SW_INT_ACK__SHIFT 0x5
+#define MCIF_WB0_MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_SW_SLICE_INT_EN__SHIFT 0x6
+#define MCIF_WB0_MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_SW_OVERRUN_INT_EN__SHIFT 0x7
+#define MCIF_WB0_MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_SW_LOCK__SHIFT 0x8
+#define MCIF_WB0_MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_P_VMID__SHIFT 0x10
+#define MCIF_WB0_MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUF_ADDR_FENCE_EN__SHIFT 0x18
+#define MCIF_WB0_MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_ENABLE_MASK 0x00000001L
+#define MCIF_WB0_MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUF_DUALSIZE_REQ_MASK 0x00000002L
+#define MCIF_WB0_MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_SW_INT_EN_MASK 0x00000010L
+#define MCIF_WB0_MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_SW_INT_ACK_MASK 0x00000020L
+#define MCIF_WB0_MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_SW_SLICE_INT_EN_MASK 0x00000040L
+#define MCIF_WB0_MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_SW_OVERRUN_INT_EN_MASK 0x00000080L
+#define MCIF_WB0_MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_SW_LOCK_MASK 0x00000F00L
+#define MCIF_WB0_MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_P_VMID_MASK 0x000F0000L
+#define MCIF_WB0_MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUF_ADDR_FENCE_EN_MASK 0x01000000L
+//MCIF_WB0_MCIF_WB_BUFMGR_CUR_LINE_R
+#define MCIF_WB0_MCIF_WB_BUFMGR_CUR_LINE_R__MCIF_WB_BUFMGR_CUR_LINE_R__SHIFT 0x0
+#define MCIF_WB0_MCIF_WB_BUFMGR_CUR_LINE_R__MCIF_WB_BUFMGR_CUR_LINE_R_MASK 0x00001FFFL
+//MCIF_WB0_MCIF_WB_BUFMGR_STATUS
+#define MCIF_WB0_MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_VCE_INT_STATUS__SHIFT 0x0
+#define MCIF_WB0_MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_SW_INT_STATUS__SHIFT 0x1
+#define MCIF_WB0_MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_SW_OVERRUN_INT_STATUS__SHIFT 0x2
+#define MCIF_WB0_MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_CUR_BUF__SHIFT 0x4
+#define MCIF_WB0_MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUF_DUALSIZE_STATUS__SHIFT 0x7
+#define MCIF_WB0_MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_BUFTAG__SHIFT 0x8
+#define MCIF_WB0_MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_CUR_LINE_L__SHIFT 0xc
+#define MCIF_WB0_MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_NEXT_BUF__SHIFT 0x1c
+#define MCIF_WB0_MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_VCE_INT_STATUS_MASK 0x00000001L
+#define MCIF_WB0_MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_SW_INT_STATUS_MASK 0x00000002L
+#define MCIF_WB0_MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_SW_OVERRUN_INT_STATUS_MASK 0x00000004L
+#define MCIF_WB0_MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_CUR_BUF_MASK 0x00000070L
+#define MCIF_WB0_MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUF_DUALSIZE_STATUS_MASK 0x00000080L
+#define MCIF_WB0_MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_BUFTAG_MASK 0x00000F00L
+#define MCIF_WB0_MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_CUR_LINE_L_MASK 0x01FFF000L
+#define MCIF_WB0_MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_NEXT_BUF_MASK 0x70000000L
+//MCIF_WB0_MCIF_WB_BUF_PITCH
+#define MCIF_WB0_MCIF_WB_BUF_PITCH__MCIF_WB_BUF_LUMA_PITCH__SHIFT 0x8
+#define MCIF_WB0_MCIF_WB_BUF_PITCH__MCIF_WB_BUF_CHROMA_PITCH__SHIFT 0x18
+#define MCIF_WB0_MCIF_WB_BUF_PITCH__MCIF_WB_BUF_LUMA_PITCH_MASK 0x0000FF00L
+#define MCIF_WB0_MCIF_WB_BUF_PITCH__MCIF_WB_BUF_CHROMA_PITCH_MASK 0xFF000000L
+//MCIF_WB0_MCIF_WB_BUF_1_STATUS
+#define MCIF_WB0_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_ACTIVE__SHIFT 0x0
+#define MCIF_WB0_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_SW_LOCKED__SHIFT 0x1
+#define MCIF_WB0_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_VCE_LOCKED__SHIFT 0x2
+#define MCIF_WB0_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_OVERFLOW__SHIFT 0x3
+#define MCIF_WB0_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_DISABLE__SHIFT 0x4
+#define MCIF_WB0_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_MODE__SHIFT 0x5
+#define MCIF_WB0_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_BUFTAG__SHIFT 0x8
+#define MCIF_WB0_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_NXT_BUF__SHIFT 0xc
+#define MCIF_WB0_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_FIELD__SHIFT 0xf
+#define MCIF_WB0_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_CUR_LINE_L__SHIFT 0x10
+#define MCIF_WB0_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_LONG_LINE_ERROR__SHIFT 0x1d
+#define MCIF_WB0_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_SHORT_LINE_ERROR__SHIFT 0x1e
+#define MCIF_WB0_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_FRAME_LENGTH_ERROR__SHIFT 0x1f
+#define MCIF_WB0_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_ACTIVE_MASK 0x00000001L
+#define MCIF_WB0_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_SW_LOCKED_MASK 0x00000002L
+#define MCIF_WB0_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_VCE_LOCKED_MASK 0x00000004L
+#define MCIF_WB0_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_OVERFLOW_MASK 0x00000008L
+#define MCIF_WB0_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_DISABLE_MASK 0x00000010L
+#define MCIF_WB0_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_MODE_MASK 0x000000E0L
+#define MCIF_WB0_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_BUFTAG_MASK 0x00000F00L
+#define MCIF_WB0_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_NXT_BUF_MASK 0x00007000L
+#define MCIF_WB0_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_FIELD_MASK 0x00008000L
+#define MCIF_WB0_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_CUR_LINE_L_MASK 0x1FFF0000L
+#define MCIF_WB0_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_LONG_LINE_ERROR_MASK 0x20000000L
+#define MCIF_WB0_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_SHORT_LINE_ERROR_MASK 0x40000000L
+#define MCIF_WB0_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_FRAME_LENGTH_ERROR_MASK 0x80000000L
+//MCIF_WB0_MCIF_WB_BUF_1_STATUS2
+#define MCIF_WB0_MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_CUR_LINE_R__SHIFT 0x0
+#define MCIF_WB0_MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_NEW_CONTENT__SHIFT 0xd
+#define MCIF_WB0_MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_COLOR_DEPTH__SHIFT 0xe
+#define MCIF_WB0_MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_TMZ_BLACK_PIXEL__SHIFT 0xf
+#define MCIF_WB0_MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_TMZ__SHIFT 0x10
+#define MCIF_WB0_MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_Y_OVERRUN__SHIFT 0x11
+#define MCIF_WB0_MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_C_OVERRUN__SHIFT 0x12
+#define MCIF_WB0_MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_EYE_FLAG__SHIFT 0x13
+#define MCIF_WB0_MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_CUR_LINE_R_MASK 0x00001FFFL
+#define MCIF_WB0_MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_NEW_CONTENT_MASK 0x00002000L
+#define MCIF_WB0_MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_COLOR_DEPTH_MASK 0x00004000L
+#define MCIF_WB0_MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_TMZ_BLACK_PIXEL_MASK 0x00008000L
+#define MCIF_WB0_MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_TMZ_MASK 0x00010000L
+#define MCIF_WB0_MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_Y_OVERRUN_MASK 0x00020000L
+#define MCIF_WB0_MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_C_OVERRUN_MASK 0x00040000L
+#define MCIF_WB0_MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_EYE_FLAG_MASK 0x00080000L
+//MCIF_WB0_MCIF_WB_BUF_2_STATUS
+#define MCIF_WB0_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_ACTIVE__SHIFT 0x0
+#define MCIF_WB0_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_SW_LOCKED__SHIFT 0x1
+#define MCIF_WB0_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_VCE_LOCKED__SHIFT 0x2
+#define MCIF_WB0_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_OVERFLOW__SHIFT 0x3
+#define MCIF_WB0_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_DISABLE__SHIFT 0x4
+#define MCIF_WB0_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_MODE__SHIFT 0x5
+#define MCIF_WB0_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_BUFTAG__SHIFT 0x8
+#define MCIF_WB0_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_NXT_BUF__SHIFT 0xc
+#define MCIF_WB0_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_FIELD__SHIFT 0xf
+#define MCIF_WB0_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_CUR_LINE_L__SHIFT 0x10
+#define MCIF_WB0_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_LONG_LINE_ERROR__SHIFT 0x1d
+#define MCIF_WB0_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_SHORT_LINE_ERROR__SHIFT 0x1e
+#define MCIF_WB0_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_FRAME_LENGTH_ERROR__SHIFT 0x1f
+#define MCIF_WB0_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_ACTIVE_MASK 0x00000001L
+#define MCIF_WB0_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_SW_LOCKED_MASK 0x00000002L
+#define MCIF_WB0_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_VCE_LOCKED_MASK 0x00000004L
+#define MCIF_WB0_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_OVERFLOW_MASK 0x00000008L
+#define MCIF_WB0_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_DISABLE_MASK 0x00000010L
+#define MCIF_WB0_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_MODE_MASK 0x000000E0L
+#define MCIF_WB0_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_BUFTAG_MASK 0x00000F00L
+#define MCIF_WB0_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_NXT_BUF_MASK 0x00007000L
+#define MCIF_WB0_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_FIELD_MASK 0x00008000L
+#define MCIF_WB0_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_CUR_LINE_L_MASK 0x1FFF0000L
+#define MCIF_WB0_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_LONG_LINE_ERROR_MASK 0x20000000L
+#define MCIF_WB0_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_SHORT_LINE_ERROR_MASK 0x40000000L
+#define MCIF_WB0_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_FRAME_LENGTH_ERROR_MASK 0x80000000L
+//MCIF_WB0_MCIF_WB_BUF_2_STATUS2
+#define MCIF_WB0_MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_CUR_LINE_R__SHIFT 0x0
+#define MCIF_WB0_MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_NEW_CONTENT__SHIFT 0xd
+#define MCIF_WB0_MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_COLOR_DEPTH__SHIFT 0xe
+#define MCIF_WB0_MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_TMZ_BLACK_PIXEL__SHIFT 0xf
+#define MCIF_WB0_MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_TMZ__SHIFT 0x10
+#define MCIF_WB0_MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_Y_OVERRUN__SHIFT 0x11
+#define MCIF_WB0_MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_C_OVERRUN__SHIFT 0x12
+#define MCIF_WB0_MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_EYE_FLAG__SHIFT 0x13
+#define MCIF_WB0_MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_CUR_LINE_R_MASK 0x00001FFFL
+#define MCIF_WB0_MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_NEW_CONTENT_MASK 0x00002000L
+#define MCIF_WB0_MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_COLOR_DEPTH_MASK 0x00004000L
+#define MCIF_WB0_MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_TMZ_BLACK_PIXEL_MASK 0x00008000L
+#define MCIF_WB0_MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_TMZ_MASK 0x00010000L
+#define MCIF_WB0_MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_Y_OVERRUN_MASK 0x00020000L
+#define MCIF_WB0_MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_C_OVERRUN_MASK 0x00040000L
+#define MCIF_WB0_MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_EYE_FLAG_MASK 0x00080000L
+//MCIF_WB0_MCIF_WB_BUF_3_STATUS
+#define MCIF_WB0_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_ACTIVE__SHIFT 0x0
+#define MCIF_WB0_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_SW_LOCKED__SHIFT 0x1
+#define MCIF_WB0_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_VCE_LOCKED__SHIFT 0x2
+#define MCIF_WB0_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_OVERFLOW__SHIFT 0x3
+#define MCIF_WB0_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_DISABLE__SHIFT 0x4
+#define MCIF_WB0_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_MODE__SHIFT 0x5
+#define MCIF_WB0_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_BUFTAG__SHIFT 0x8
+#define MCIF_WB0_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_NXT_BUF__SHIFT 0xc
+#define MCIF_WB0_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_FIELD__SHIFT 0xf
+#define MCIF_WB0_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_CUR_LINE_L__SHIFT 0x10
+#define MCIF_WB0_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_LONG_LINE_ERROR__SHIFT 0x1d
+#define MCIF_WB0_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_SHORT_LINE_ERROR__SHIFT 0x1e
+#define MCIF_WB0_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_FRAME_LENGTH_ERROR__SHIFT 0x1f
+#define MCIF_WB0_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_ACTIVE_MASK 0x00000001L
+#define MCIF_WB0_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_SW_LOCKED_MASK 0x00000002L
+#define MCIF_WB0_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_VCE_LOCKED_MASK 0x00000004L
+#define MCIF_WB0_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_OVERFLOW_MASK 0x00000008L
+#define MCIF_WB0_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_DISABLE_MASK 0x00000010L
+#define MCIF_WB0_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_MODE_MASK 0x000000E0L
+#define MCIF_WB0_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_BUFTAG_MASK 0x00000F00L
+#define MCIF_WB0_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_NXT_BUF_MASK 0x00007000L
+#define MCIF_WB0_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_FIELD_MASK 0x00008000L
+#define MCIF_WB0_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_CUR_LINE_L_MASK 0x1FFF0000L
+#define MCIF_WB0_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_LONG_LINE_ERROR_MASK 0x20000000L
+#define MCIF_WB0_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_SHORT_LINE_ERROR_MASK 0x40000000L
+#define MCIF_WB0_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_FRAME_LENGTH_ERROR_MASK 0x80000000L
+//MCIF_WB0_MCIF_WB_BUF_3_STATUS2
+#define MCIF_WB0_MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_CUR_LINE_R__SHIFT 0x0
+#define MCIF_WB0_MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_NEW_CONTENT__SHIFT 0xd
+#define MCIF_WB0_MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_COLOR_DEPTH__SHIFT 0xe
+#define MCIF_WB0_MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_TMZ_BLACK_PIXEL__SHIFT 0xf
+#define MCIF_WB0_MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_TMZ__SHIFT 0x10
+#define MCIF_WB0_MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_Y_OVERRUN__SHIFT 0x11
+#define MCIF_WB0_MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_C_OVERRUN__SHIFT 0x12
+#define MCIF_WB0_MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_EYE_FLAG__SHIFT 0x13
+#define MCIF_WB0_MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_CUR_LINE_R_MASK 0x00001FFFL
+#define MCIF_WB0_MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_NEW_CONTENT_MASK 0x00002000L
+#define MCIF_WB0_MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_COLOR_DEPTH_MASK 0x00004000L
+#define MCIF_WB0_MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_TMZ_BLACK_PIXEL_MASK 0x00008000L
+#define MCIF_WB0_MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_TMZ_MASK 0x00010000L
+#define MCIF_WB0_MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_Y_OVERRUN_MASK 0x00020000L
+#define MCIF_WB0_MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_C_OVERRUN_MASK 0x00040000L
+#define MCIF_WB0_MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_EYE_FLAG_MASK 0x00080000L
+//MCIF_WB0_MCIF_WB_BUF_4_STATUS
+#define MCIF_WB0_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_ACTIVE__SHIFT 0x0
+#define MCIF_WB0_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_SW_LOCKED__SHIFT 0x1
+#define MCIF_WB0_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_VCE_LOCKED__SHIFT 0x2
+#define MCIF_WB0_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_OVERFLOW__SHIFT 0x3
+#define MCIF_WB0_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_DISABLE__SHIFT 0x4
+#define MCIF_WB0_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_MODE__SHIFT 0x5
+#define MCIF_WB0_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_BUFTAG__SHIFT 0x8
+#define MCIF_WB0_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_NXT_BUF__SHIFT 0xc
+#define MCIF_WB0_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_FIELD__SHIFT 0xf
+#define MCIF_WB0_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_CUR_LINE_L__SHIFT 0x10
+#define MCIF_WB0_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_LONG_LINE_ERROR__SHIFT 0x1d
+#define MCIF_WB0_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_SHORT_LINE_ERROR__SHIFT 0x1e
+#define MCIF_WB0_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_FRAME_LENGTH_ERROR__SHIFT 0x1f
+#define MCIF_WB0_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_ACTIVE_MASK 0x00000001L
+#define MCIF_WB0_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_SW_LOCKED_MASK 0x00000002L
+#define MCIF_WB0_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_VCE_LOCKED_MASK 0x00000004L
+#define MCIF_WB0_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_OVERFLOW_MASK 0x00000008L
+#define MCIF_WB0_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_DISABLE_MASK 0x00000010L
+#define MCIF_WB0_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_MODE_MASK 0x000000E0L
+#define MCIF_WB0_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_BUFTAG_MASK 0x00000F00L
+#define MCIF_WB0_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_NXT_BUF_MASK 0x00007000L
+#define MCIF_WB0_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_FIELD_MASK 0x00008000L
+#define MCIF_WB0_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_CUR_LINE_L_MASK 0x1FFF0000L
+#define MCIF_WB0_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_LONG_LINE_ERROR_MASK 0x20000000L
+#define MCIF_WB0_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_SHORT_LINE_ERROR_MASK 0x40000000L
+#define MCIF_WB0_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_FRAME_LENGTH_ERROR_MASK 0x80000000L
+//MCIF_WB0_MCIF_WB_BUF_4_STATUS2
+#define MCIF_WB0_MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_CUR_LINE_R__SHIFT 0x0
+#define MCIF_WB0_MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_NEW_CONTENT__SHIFT 0xd
+#define MCIF_WB0_MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_COLOR_DEPTH__SHIFT 0xe
+#define MCIF_WB0_MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_TMZ_BLACK_PIXEL__SHIFT 0xf
+#define MCIF_WB0_MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_TMZ__SHIFT 0x10
+#define MCIF_WB0_MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_Y_OVERRUN__SHIFT 0x11
+#define MCIF_WB0_MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_C_OVERRUN__SHIFT 0x12
+#define MCIF_WB0_MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_EYE_FLAG__SHIFT 0x13
+#define MCIF_WB0_MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_CUR_LINE_R_MASK 0x00001FFFL
+#define MCIF_WB0_MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_NEW_CONTENT_MASK 0x00002000L
+#define MCIF_WB0_MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_COLOR_DEPTH_MASK 0x00004000L
+#define MCIF_WB0_MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_TMZ_BLACK_PIXEL_MASK 0x00008000L
+#define MCIF_WB0_MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_TMZ_MASK 0x00010000L
+#define MCIF_WB0_MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_Y_OVERRUN_MASK 0x00020000L
+#define MCIF_WB0_MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_C_OVERRUN_MASK 0x00040000L
+#define MCIF_WB0_MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_EYE_FLAG_MASK 0x00080000L
+//MCIF_WB0_MCIF_WB_ARBITRATION_CONTROL
+#define MCIF_WB0_MCIF_WB_ARBITRATION_CONTROL__MCIF_WB_CLIENT_ARBITRATION_SLICE__SHIFT 0x0
+#define MCIF_WB0_MCIF_WB_ARBITRATION_CONTROL__MCIF_WB_TIME_PER_PIXEL__SHIFT 0x16
+#define MCIF_WB0_MCIF_WB_ARBITRATION_CONTROL__MCIF_WB_CLIENT_ARBITRATION_SLICE_MASK 0x00000003L
+#define MCIF_WB0_MCIF_WB_ARBITRATION_CONTROL__MCIF_WB_TIME_PER_PIXEL_MASK 0xFFC00000L
+//MCIF_WB0_MCIF_WB_SCLK_CHANGE
+#define MCIF_WB0_MCIF_WB_SCLK_CHANGE__WM_CHANGE_ACK_FORCE_ON__SHIFT 0x0
+#define MCIF_WB0_MCIF_WB_SCLK_CHANGE__MCIF_WB_CLI_WATERMARK_MASK__SHIFT 0x1
+#define MCIF_WB0_MCIF_WB_SCLK_CHANGE__WM_CHANGE_ACK_FORCE_ON_MASK 0x00000001L
+#define MCIF_WB0_MCIF_WB_SCLK_CHANGE__MCIF_WB_CLI_WATERMARK_MASK_MASK 0x0000000EL
+//MCIF_WB0_MCIF_WB_TEST_DEBUG_INDEX
+#define MCIF_WB0_MCIF_WB_TEST_DEBUG_INDEX__MCIF_WB_TEST_DEBUG_INDEX__SHIFT 0x0
+#define MCIF_WB0_MCIF_WB_TEST_DEBUG_INDEX__MCIF_WB_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define MCIF_WB0_MCIF_WB_TEST_DEBUG_INDEX__MCIF_WB_TEST_DEBUG_INDEX_MASK 0x000000FFL
+#define MCIF_WB0_MCIF_WB_TEST_DEBUG_INDEX__MCIF_WB_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+//MCIF_WB0_MCIF_WB_TEST_DEBUG_DATA
+#define MCIF_WB0_MCIF_WB_TEST_DEBUG_DATA__MCIF_WB_TEST_DEBUG_DATA__SHIFT 0x0
+#define MCIF_WB0_MCIF_WB_TEST_DEBUG_DATA__MCIF_WB_TEST_DEBUG_DATA_MASK 0xFFFFFFFFL
+//MCIF_WB0_MCIF_WB_BUF_1_ADDR_Y
+#define MCIF_WB0_MCIF_WB_BUF_1_ADDR_Y__MCIF_WB_BUF_1_ADDR_Y__SHIFT 0x0
+#define MCIF_WB0_MCIF_WB_BUF_1_ADDR_Y__MCIF_WB_BUF_1_ADDR_Y_MASK 0xFFFFFFFFL
+//MCIF_WB0_MCIF_WB_BUF_1_ADDR_Y_OFFSET
+#define MCIF_WB0_MCIF_WB_BUF_1_ADDR_Y_OFFSET__MCIF_WB_BUF_1_ADDR_Y_OFFSET__SHIFT 0x0
+#define MCIF_WB0_MCIF_WB_BUF_1_ADDR_Y_OFFSET__MCIF_WB_BUF_1_ADDR_Y_OFFSET_MASK 0x0003FFFFL
+//MCIF_WB0_MCIF_WB_BUF_1_ADDR_C
+#define MCIF_WB0_MCIF_WB_BUF_1_ADDR_C__MCIF_WB_BUF_1_ADDR_C__SHIFT 0x0
+#define MCIF_WB0_MCIF_WB_BUF_1_ADDR_C__MCIF_WB_BUF_1_ADDR_C_MASK 0xFFFFFFFFL
+//MCIF_WB0_MCIF_WB_BUF_1_ADDR_C_OFFSET
+#define MCIF_WB0_MCIF_WB_BUF_1_ADDR_C_OFFSET__MCIF_WB_BUF_1_ADDR_C_OFFSET__SHIFT 0x0
+#define MCIF_WB0_MCIF_WB_BUF_1_ADDR_C_OFFSET__MCIF_WB_BUF_1_ADDR_C_OFFSET_MASK 0x0003FFFFL
+//MCIF_WB0_MCIF_WB_BUF_2_ADDR_Y
+#define MCIF_WB0_MCIF_WB_BUF_2_ADDR_Y__MCIF_WB_BUF_2_ADDR_Y__SHIFT 0x0
+#define MCIF_WB0_MCIF_WB_BUF_2_ADDR_Y__MCIF_WB_BUF_2_ADDR_Y_MASK 0xFFFFFFFFL
+//MCIF_WB0_MCIF_WB_BUF_2_ADDR_Y_OFFSET
+#define MCIF_WB0_MCIF_WB_BUF_2_ADDR_Y_OFFSET__MCIF_WB_BUF_2_ADDR_Y_OFFSET__SHIFT 0x0
+#define MCIF_WB0_MCIF_WB_BUF_2_ADDR_Y_OFFSET__MCIF_WB_BUF_2_ADDR_Y_OFFSET_MASK 0x0003FFFFL
+//MCIF_WB0_MCIF_WB_BUF_2_ADDR_C
+#define MCIF_WB0_MCIF_WB_BUF_2_ADDR_C__MCIF_WB_BUF_2_ADDR_C__SHIFT 0x0
+#define MCIF_WB0_MCIF_WB_BUF_2_ADDR_C__MCIF_WB_BUF_2_ADDR_C_MASK 0xFFFFFFFFL
+//MCIF_WB0_MCIF_WB_BUF_2_ADDR_C_OFFSET
+#define MCIF_WB0_MCIF_WB_BUF_2_ADDR_C_OFFSET__MCIF_WB_BUF_2_ADDR_C_OFFSET__SHIFT 0x0
+#define MCIF_WB0_MCIF_WB_BUF_2_ADDR_C_OFFSET__MCIF_WB_BUF_2_ADDR_C_OFFSET_MASK 0x0003FFFFL
+//MCIF_WB0_MCIF_WB_BUF_3_ADDR_Y
+#define MCIF_WB0_MCIF_WB_BUF_3_ADDR_Y__MCIF_WB_BUF_3_ADDR_Y__SHIFT 0x0
+#define MCIF_WB0_MCIF_WB_BUF_3_ADDR_Y__MCIF_WB_BUF_3_ADDR_Y_MASK 0xFFFFFFFFL
+//MCIF_WB0_MCIF_WB_BUF_3_ADDR_Y_OFFSET
+#define MCIF_WB0_MCIF_WB_BUF_3_ADDR_Y_OFFSET__MCIF_WB_BUF_3_ADDR_Y_OFFSET__SHIFT 0x0
+#define MCIF_WB0_MCIF_WB_BUF_3_ADDR_Y_OFFSET__MCIF_WB_BUF_3_ADDR_Y_OFFSET_MASK 0x0003FFFFL
+//MCIF_WB0_MCIF_WB_BUF_3_ADDR_C
+#define MCIF_WB0_MCIF_WB_BUF_3_ADDR_C__MCIF_WB_BUF_3_ADDR_C__SHIFT 0x0
+#define MCIF_WB0_MCIF_WB_BUF_3_ADDR_C__MCIF_WB_BUF_3_ADDR_C_MASK 0xFFFFFFFFL
+//MCIF_WB0_MCIF_WB_BUF_3_ADDR_C_OFFSET
+#define MCIF_WB0_MCIF_WB_BUF_3_ADDR_C_OFFSET__MCIF_WB_BUF_3_ADDR_C_OFFSET__SHIFT 0x0
+#define MCIF_WB0_MCIF_WB_BUF_3_ADDR_C_OFFSET__MCIF_WB_BUF_3_ADDR_C_OFFSET_MASK 0x0003FFFFL
+//MCIF_WB0_MCIF_WB_BUF_4_ADDR_Y
+#define MCIF_WB0_MCIF_WB_BUF_4_ADDR_Y__MCIF_WB_BUF_4_ADDR_Y__SHIFT 0x0
+#define MCIF_WB0_MCIF_WB_BUF_4_ADDR_Y__MCIF_WB_BUF_4_ADDR_Y_MASK 0xFFFFFFFFL
+//MCIF_WB0_MCIF_WB_BUF_4_ADDR_Y_OFFSET
+#define MCIF_WB0_MCIF_WB_BUF_4_ADDR_Y_OFFSET__MCIF_WB_BUF_4_ADDR_Y_OFFSET__SHIFT 0x0
+#define MCIF_WB0_MCIF_WB_BUF_4_ADDR_Y_OFFSET__MCIF_WB_BUF_4_ADDR_Y_OFFSET_MASK 0x0003FFFFL
+//MCIF_WB0_MCIF_WB_BUF_4_ADDR_C
+#define MCIF_WB0_MCIF_WB_BUF_4_ADDR_C__MCIF_WB_BUF_4_ADDR_C__SHIFT 0x0
+#define MCIF_WB0_MCIF_WB_BUF_4_ADDR_C__MCIF_WB_BUF_4_ADDR_C_MASK 0xFFFFFFFFL
+//MCIF_WB0_MCIF_WB_BUF_4_ADDR_C_OFFSET
+#define MCIF_WB0_MCIF_WB_BUF_4_ADDR_C_OFFSET__MCIF_WB_BUF_4_ADDR_C_OFFSET__SHIFT 0x0
+#define MCIF_WB0_MCIF_WB_BUF_4_ADDR_C_OFFSET__MCIF_WB_BUF_4_ADDR_C_OFFSET_MASK 0x0003FFFFL
+//MCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL
+#define MCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL__MCIF_WB_BUFMGR_VCE_LOCK_IGNORE__SHIFT 0x0
+#define MCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL__MCIF_WB_BUFMGR_VCE_INT_EN__SHIFT 0x4
+#define MCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL__MCIF_WB_BUFMGR_VCE_INT_ACK__SHIFT 0x5
+#define MCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL__MCIF_WB_BUFMGR_VCE_SLICE_INT_EN__SHIFT 0x6
+#define MCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL__MCIF_WB_BUFMGR_VCE_LOCK__SHIFT 0x8
+#define MCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL__MCIF_WB_BUFMGR_SLICE_SIZE__SHIFT 0x10
+#define MCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL__MCIF_WB_BUFMGR_VCE_LOCK_IGNORE_MASK 0x00000001L
+#define MCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL__MCIF_WB_BUFMGR_VCE_INT_EN_MASK 0x00000010L
+#define MCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL__MCIF_WB_BUFMGR_VCE_INT_ACK_MASK 0x00000020L
+#define MCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL__MCIF_WB_BUFMGR_VCE_SLICE_INT_EN_MASK 0x00000040L
+#define MCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL__MCIF_WB_BUFMGR_VCE_LOCK_MASK 0x00000F00L
+#define MCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL__MCIF_WB_BUFMGR_SLICE_SIZE_MASK 0x1FFF0000L
+//MCIF_WB0_MCIF_WB_NB_PSTATE_LATENCY_WATERMARK
+#define MCIF_WB0_MCIF_WB_NB_PSTATE_LATENCY_WATERMARK__NB_PSTATE_CHANGE_REFRESH_WATERMARK__SHIFT 0x0
+#define MCIF_WB0_MCIF_WB_NB_PSTATE_LATENCY_WATERMARK__NB_PSTATE_CHANGE_REFRESH_WATERMARK_MASK 0x0007FFFFL
+//MCIF_WB0_MCIF_WB_NB_PSTATE_CONTROL
+#define MCIF_WB0_MCIF_WB_NB_PSTATE_CONTROL__NB_PSTATE_CHANGE_URGENT_DURING_REQUEST__SHIFT 0x0
+#define MCIF_WB0_MCIF_WB_NB_PSTATE_CONTROL__NB_PSTATE_CHANGE_FORCE_ON__SHIFT 0x1
+#define MCIF_WB0_MCIF_WB_NB_PSTATE_CONTROL__NB_PSTATE_ALLOW_FOR_URGENT__SHIFT 0x2
+#define MCIF_WB0_MCIF_WB_NB_PSTATE_CONTROL__NB_PSTATE_CHANGE_WATERMARK_MASK__SHIFT 0x4
+#define MCIF_WB0_MCIF_WB_NB_PSTATE_CONTROL__NB_PSTATE_CHANGE_URGENT_DURING_REQUEST_MASK 0x00000001L
+#define MCIF_WB0_MCIF_WB_NB_PSTATE_CONTROL__NB_PSTATE_CHANGE_FORCE_ON_MASK 0x00000002L
+#define MCIF_WB0_MCIF_WB_NB_PSTATE_CONTROL__NB_PSTATE_ALLOW_FOR_URGENT_MASK 0x00000004L
+#define MCIF_WB0_MCIF_WB_NB_PSTATE_CONTROL__NB_PSTATE_CHANGE_WATERMARK_MASK_MASK 0x00000070L
+//MCIF_WB0_MCIF_WB_WATERMARK
+#define MCIF_WB0_MCIF_WB_WATERMARK__MCIF_WB_CLI_WATERMARK__SHIFT 0x0
+#define MCIF_WB0_MCIF_WB_WATERMARK__MCIF_WB_CLI_WATERMARK_MASK 0x0000FFFFL
+//MCIF_WB0_MCIF_WB_CLOCK_GATER_CONTROL
+#define MCIF_WB0_MCIF_WB_CLOCK_GATER_CONTROL__MCIF_WB_CLI_CLOCK_GATER_OVERRIDE__SHIFT 0x0
+#define MCIF_WB0_MCIF_WB_CLOCK_GATER_CONTROL__MCIF_WB_CLI_CLOCK_GATER_OVERRIDE_MASK 0x00000001L
+//MCIF_WB0_MCIF_WB_WARM_UP_CNTL
+#define MCIF_WB0_MCIF_WB_WARM_UP_CNTL__MCIF_WB_PITCH_SIZE_WARMUP__SHIFT 0x8
+#define MCIF_WB0_MCIF_WB_WARM_UP_CNTL__MCIF_WB_PITCH_SIZE_WARMUP_MASK 0x0000FF00L
+//MCIF_WB0_MCIF_WB_SELF_REFRESH_CONTROL
+#define MCIF_WB0_MCIF_WB_SELF_REFRESH_CONTROL__DIS_REFRESH_UNDER_NBPREQ__SHIFT 0x0
+#define MCIF_WB0_MCIF_WB_SELF_REFRESH_CONTROL__PERFRAME_SELF_REFRESH__SHIFT 0x1
+#define MCIF_WB0_MCIF_WB_SELF_REFRESH_CONTROL__DIS_REFRESH_UNDER_NBPREQ_MASK 0x00000001L
+#define MCIF_WB0_MCIF_WB_SELF_REFRESH_CONTROL__PERFRAME_SELF_REFRESH_MASK 0x00000002L
+//MCIF_WB0_MULTI_LEVEL_QOS_CTRL
+#define MCIF_WB0_MULTI_LEVEL_QOS_CTRL__MAX_SCALED_TIME_TO_URGENT__SHIFT 0x0
+#define MCIF_WB0_MULTI_LEVEL_QOS_CTRL__MAX_SCALED_TIME_TO_URGENT_MASK 0x003FFFFFL
+//MCIF_WB0_MCIF_WB_SECURITY_LEVEL
+#define MCIF_WB0_MCIF_WB_SECURITY_LEVEL__MCIF_WB_SECURITY_LEVEL__SHIFT 0x0
+#define MCIF_WB0_MCIF_WB_SECURITY_LEVEL__MCIF_WB_SECURITY_LEVEL_MASK 0x00000007L
+//MCIF_WB0_MCIF_WB_BUF_LUMA_SIZE
+#define MCIF_WB0_MCIF_WB_BUF_LUMA_SIZE__MCIF_WB_BUF_LUMA_SIZE__SHIFT 0x0
+#define MCIF_WB0_MCIF_WB_BUF_LUMA_SIZE__MCIF_WB_BUF_LUMA_SIZE_MASK 0x000FFFFFL
+//MCIF_WB0_MCIF_WB_BUF_CHROMA_SIZE
+#define MCIF_WB0_MCIF_WB_BUF_CHROMA_SIZE__MCIF_WB_BUF_CHROMA_SIZE__SHIFT 0x0
+#define MCIF_WB0_MCIF_WB_BUF_CHROMA_SIZE__MCIF_WB_BUF_CHROMA_SIZE_MASK 0x000FFFFFL
+//MCIF_WB0_MCIF_WB_BUF_1_ADDR_Y_HIGH
+#define MCIF_WB0_MCIF_WB_BUF_1_ADDR_Y_HIGH__MCIF_WB_BUF_1_ADDR_Y_HIGH__SHIFT 0x0
+#define MCIF_WB0_MCIF_WB_BUF_1_ADDR_Y_HIGH__MCIF_WB_BUF_1_ADDR_Y_HIGH_MASK 0x000000FFL
+//MCIF_WB0_MCIF_WB_BUF_1_ADDR_C_HIGH
+#define MCIF_WB0_MCIF_WB_BUF_1_ADDR_C_HIGH__MCIF_WB_BUF_1_ADDR_C_HIGH__SHIFT 0x0
+#define MCIF_WB0_MCIF_WB_BUF_1_ADDR_C_HIGH__MCIF_WB_BUF_1_ADDR_C_HIGH_MASK 0x000000FFL
+//MCIF_WB0_MCIF_WB_BUF_2_ADDR_Y_HIGH
+#define MCIF_WB0_MCIF_WB_BUF_2_ADDR_Y_HIGH__MCIF_WB_BUF_2_ADDR_Y_HIGH__SHIFT 0x0
+#define MCIF_WB0_MCIF_WB_BUF_2_ADDR_Y_HIGH__MCIF_WB_BUF_2_ADDR_Y_HIGH_MASK 0x000000FFL
+//MCIF_WB0_MCIF_WB_BUF_2_ADDR_C_HIGH
+#define MCIF_WB0_MCIF_WB_BUF_2_ADDR_C_HIGH__MCIF_WB_BUF_2_ADDR_C_HIGH__SHIFT 0x0
+#define MCIF_WB0_MCIF_WB_BUF_2_ADDR_C_HIGH__MCIF_WB_BUF_2_ADDR_C_HIGH_MASK 0x000000FFL
+//MCIF_WB0_MCIF_WB_BUF_3_ADDR_Y_HIGH
+#define MCIF_WB0_MCIF_WB_BUF_3_ADDR_Y_HIGH__MCIF_WB_BUF_3_ADDR_Y_HIGH__SHIFT 0x0
+#define MCIF_WB0_MCIF_WB_BUF_3_ADDR_Y_HIGH__MCIF_WB_BUF_3_ADDR_Y_HIGH_MASK 0x000000FFL
+//MCIF_WB0_MCIF_WB_BUF_3_ADDR_C_HIGH
+#define MCIF_WB0_MCIF_WB_BUF_3_ADDR_C_HIGH__MCIF_WB_BUF_3_ADDR_C_HIGH__SHIFT 0x0
+#define MCIF_WB0_MCIF_WB_BUF_3_ADDR_C_HIGH__MCIF_WB_BUF_3_ADDR_C_HIGH_MASK 0x000000FFL
+//MCIF_WB0_MCIF_WB_BUF_4_ADDR_Y_HIGH
+#define MCIF_WB0_MCIF_WB_BUF_4_ADDR_Y_HIGH__MCIF_WB_BUF_4_ADDR_Y_HIGH__SHIFT 0x0
+#define MCIF_WB0_MCIF_WB_BUF_4_ADDR_Y_HIGH__MCIF_WB_BUF_4_ADDR_Y_HIGH_MASK 0x000000FFL
+//MCIF_WB0_MCIF_WB_BUF_4_ADDR_C_HIGH
+#define MCIF_WB0_MCIF_WB_BUF_4_ADDR_C_HIGH__MCIF_WB_BUF_4_ADDR_C_HIGH__SHIFT 0x0
+#define MCIF_WB0_MCIF_WB_BUF_4_ADDR_C_HIGH__MCIF_WB_BUF_4_ADDR_C_HIGH_MASK 0x000000FFL
+//MCIF_WB0_MCIF_WB_BUF_1_RESOLUTION
+#define MCIF_WB0_MCIF_WB_BUF_1_RESOLUTION__MCIF_WB_BUF_1_RESOLUTION_WIDTH__SHIFT 0x0
+#define MCIF_WB0_MCIF_WB_BUF_1_RESOLUTION__MCIF_WB_BUF_1_RESOLUTION_HEIGHT__SHIFT 0x10
+#define MCIF_WB0_MCIF_WB_BUF_1_RESOLUTION__MCIF_WB_BUF_1_RESOLUTION_WIDTH_MASK 0x00001FFFL
+#define MCIF_WB0_MCIF_WB_BUF_1_RESOLUTION__MCIF_WB_BUF_1_RESOLUTION_HEIGHT_MASK 0x1FFF0000L
+//MCIF_WB0_MCIF_WB_BUF_2_RESOLUTION
+#define MCIF_WB0_MCIF_WB_BUF_2_RESOLUTION__MCIF_WB_BUF_2_RESOLUTION_WIDTH__SHIFT 0x0
+#define MCIF_WB0_MCIF_WB_BUF_2_RESOLUTION__MCIF_WB_BUF_2_RESOLUTION_HEIGHT__SHIFT 0x10
+#define MCIF_WB0_MCIF_WB_BUF_2_RESOLUTION__MCIF_WB_BUF_2_RESOLUTION_WIDTH_MASK 0x00001FFFL
+#define MCIF_WB0_MCIF_WB_BUF_2_RESOLUTION__MCIF_WB_BUF_2_RESOLUTION_HEIGHT_MASK 0x1FFF0000L
+//MCIF_WB0_MCIF_WB_BUF_3_RESOLUTION
+#define MCIF_WB0_MCIF_WB_BUF_3_RESOLUTION__MCIF_WB_BUF_3_RESOLUTION_WIDTH__SHIFT 0x0
+#define MCIF_WB0_MCIF_WB_BUF_3_RESOLUTION__MCIF_WB_BUF_3_RESOLUTION_HEIGHT__SHIFT 0x10
+#define MCIF_WB0_MCIF_WB_BUF_3_RESOLUTION__MCIF_WB_BUF_3_RESOLUTION_WIDTH_MASK 0x00001FFFL
+#define MCIF_WB0_MCIF_WB_BUF_3_RESOLUTION__MCIF_WB_BUF_3_RESOLUTION_HEIGHT_MASK 0x1FFF0000L
+//MCIF_WB0_MCIF_WB_BUF_4_RESOLUTION
+#define MCIF_WB0_MCIF_WB_BUF_4_RESOLUTION__MCIF_WB_BUF_4_RESOLUTION_WIDTH__SHIFT 0x0
+#define MCIF_WB0_MCIF_WB_BUF_4_RESOLUTION__MCIF_WB_BUF_4_RESOLUTION_HEIGHT__SHIFT 0x10
+#define MCIF_WB0_MCIF_WB_BUF_4_RESOLUTION__MCIF_WB_BUF_4_RESOLUTION_WIDTH_MASK 0x00001FFFL
+#define MCIF_WB0_MCIF_WB_BUF_4_RESOLUTION__MCIF_WB_BUF_4_RESOLUTION_HEIGHT_MASK 0x1FFF0000L
+
+
+// addressBlock: dce_dc_mmhubbub_mcif_wb1_dispdec
+//MCIF_WB1_MCIF_WB_BUFMGR_SW_CONTROL
+#define MCIF_WB1_MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_ENABLE__SHIFT 0x0
+#define MCIF_WB1_MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUF_DUALSIZE_REQ__SHIFT 0x1
+#define MCIF_WB1_MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_SW_INT_EN__SHIFT 0x4
+#define MCIF_WB1_MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_SW_INT_ACK__SHIFT 0x5
+#define MCIF_WB1_MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_SW_SLICE_INT_EN__SHIFT 0x6
+#define MCIF_WB1_MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_SW_OVERRUN_INT_EN__SHIFT 0x7
+#define MCIF_WB1_MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_SW_LOCK__SHIFT 0x8
+#define MCIF_WB1_MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_P_VMID__SHIFT 0x10
+#define MCIF_WB1_MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUF_ADDR_FENCE_EN__SHIFT 0x18
+#define MCIF_WB1_MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_ENABLE_MASK 0x00000001L
+#define MCIF_WB1_MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUF_DUALSIZE_REQ_MASK 0x00000002L
+#define MCIF_WB1_MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_SW_INT_EN_MASK 0x00000010L
+#define MCIF_WB1_MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_SW_INT_ACK_MASK 0x00000020L
+#define MCIF_WB1_MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_SW_SLICE_INT_EN_MASK 0x00000040L
+#define MCIF_WB1_MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_SW_OVERRUN_INT_EN_MASK 0x00000080L
+#define MCIF_WB1_MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_SW_LOCK_MASK 0x00000F00L
+#define MCIF_WB1_MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_P_VMID_MASK 0x000F0000L
+#define MCIF_WB1_MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUF_ADDR_FENCE_EN_MASK 0x01000000L
+//MCIF_WB1_MCIF_WB_BUFMGR_CUR_LINE_R
+#define MCIF_WB1_MCIF_WB_BUFMGR_CUR_LINE_R__MCIF_WB_BUFMGR_CUR_LINE_R__SHIFT 0x0
+#define MCIF_WB1_MCIF_WB_BUFMGR_CUR_LINE_R__MCIF_WB_BUFMGR_CUR_LINE_R_MASK 0x00001FFFL
+//MCIF_WB1_MCIF_WB_BUFMGR_STATUS
+#define MCIF_WB1_MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_VCE_INT_STATUS__SHIFT 0x0
+#define MCIF_WB1_MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_SW_INT_STATUS__SHIFT 0x1
+#define MCIF_WB1_MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_SW_OVERRUN_INT_STATUS__SHIFT 0x2
+#define MCIF_WB1_MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_CUR_BUF__SHIFT 0x4
+#define MCIF_WB1_MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUF_DUALSIZE_STATUS__SHIFT 0x7
+#define MCIF_WB1_MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_BUFTAG__SHIFT 0x8
+#define MCIF_WB1_MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_CUR_LINE_L__SHIFT 0xc
+#define MCIF_WB1_MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_NEXT_BUF__SHIFT 0x1c
+#define MCIF_WB1_MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_VCE_INT_STATUS_MASK 0x00000001L
+#define MCIF_WB1_MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_SW_INT_STATUS_MASK 0x00000002L
+#define MCIF_WB1_MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_SW_OVERRUN_INT_STATUS_MASK 0x00000004L
+#define MCIF_WB1_MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_CUR_BUF_MASK 0x00000070L
+#define MCIF_WB1_MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUF_DUALSIZE_STATUS_MASK 0x00000080L
+#define MCIF_WB1_MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_BUFTAG_MASK 0x00000F00L
+#define MCIF_WB1_MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_CUR_LINE_L_MASK 0x01FFF000L
+#define MCIF_WB1_MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_NEXT_BUF_MASK 0x70000000L
+//MCIF_WB1_MCIF_WB_BUF_PITCH
+#define MCIF_WB1_MCIF_WB_BUF_PITCH__MCIF_WB_BUF_LUMA_PITCH__SHIFT 0x8
+#define MCIF_WB1_MCIF_WB_BUF_PITCH__MCIF_WB_BUF_CHROMA_PITCH__SHIFT 0x18
+#define MCIF_WB1_MCIF_WB_BUF_PITCH__MCIF_WB_BUF_LUMA_PITCH_MASK 0x0000FF00L
+#define MCIF_WB1_MCIF_WB_BUF_PITCH__MCIF_WB_BUF_CHROMA_PITCH_MASK 0xFF000000L
+//MCIF_WB1_MCIF_WB_BUF_1_STATUS
+#define MCIF_WB1_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_ACTIVE__SHIFT 0x0
+#define MCIF_WB1_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_SW_LOCKED__SHIFT 0x1
+#define MCIF_WB1_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_VCE_LOCKED__SHIFT 0x2
+#define MCIF_WB1_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_OVERFLOW__SHIFT 0x3
+#define MCIF_WB1_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_DISABLE__SHIFT 0x4
+#define MCIF_WB1_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_MODE__SHIFT 0x5
+#define MCIF_WB1_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_BUFTAG__SHIFT 0x8
+#define MCIF_WB1_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_NXT_BUF__SHIFT 0xc
+#define MCIF_WB1_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_FIELD__SHIFT 0xf
+#define MCIF_WB1_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_CUR_LINE_L__SHIFT 0x10
+#define MCIF_WB1_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_LONG_LINE_ERROR__SHIFT 0x1d
+#define MCIF_WB1_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_SHORT_LINE_ERROR__SHIFT 0x1e
+#define MCIF_WB1_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_FRAME_LENGTH_ERROR__SHIFT 0x1f
+#define MCIF_WB1_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_ACTIVE_MASK 0x00000001L
+#define MCIF_WB1_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_SW_LOCKED_MASK 0x00000002L
+#define MCIF_WB1_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_VCE_LOCKED_MASK 0x00000004L
+#define MCIF_WB1_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_OVERFLOW_MASK 0x00000008L
+#define MCIF_WB1_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_DISABLE_MASK 0x00000010L
+#define MCIF_WB1_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_MODE_MASK 0x000000E0L
+#define MCIF_WB1_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_BUFTAG_MASK 0x00000F00L
+#define MCIF_WB1_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_NXT_BUF_MASK 0x00007000L
+#define MCIF_WB1_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_FIELD_MASK 0x00008000L
+#define MCIF_WB1_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_CUR_LINE_L_MASK 0x1FFF0000L
+#define MCIF_WB1_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_LONG_LINE_ERROR_MASK 0x20000000L
+#define MCIF_WB1_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_SHORT_LINE_ERROR_MASK 0x40000000L
+#define MCIF_WB1_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_FRAME_LENGTH_ERROR_MASK 0x80000000L
+//MCIF_WB1_MCIF_WB_BUF_1_STATUS2
+#define MCIF_WB1_MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_CUR_LINE_R__SHIFT 0x0
+#define MCIF_WB1_MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_NEW_CONTENT__SHIFT 0xd
+#define MCIF_WB1_MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_COLOR_DEPTH__SHIFT 0xe
+#define MCIF_WB1_MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_TMZ_BLACK_PIXEL__SHIFT 0xf
+#define MCIF_WB1_MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_TMZ__SHIFT 0x10
+#define MCIF_WB1_MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_Y_OVERRUN__SHIFT 0x11
+#define MCIF_WB1_MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_C_OVERRUN__SHIFT 0x12
+#define MCIF_WB1_MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_EYE_FLAG__SHIFT 0x13
+#define MCIF_WB1_MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_CUR_LINE_R_MASK 0x00001FFFL
+#define MCIF_WB1_MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_NEW_CONTENT_MASK 0x00002000L
+#define MCIF_WB1_MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_COLOR_DEPTH_MASK 0x00004000L
+#define MCIF_WB1_MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_TMZ_BLACK_PIXEL_MASK 0x00008000L
+#define MCIF_WB1_MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_TMZ_MASK 0x00010000L
+#define MCIF_WB1_MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_Y_OVERRUN_MASK 0x00020000L
+#define MCIF_WB1_MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_C_OVERRUN_MASK 0x00040000L
+#define MCIF_WB1_MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_EYE_FLAG_MASK 0x00080000L
+//MCIF_WB1_MCIF_WB_BUF_2_STATUS
+#define MCIF_WB1_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_ACTIVE__SHIFT 0x0
+#define MCIF_WB1_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_SW_LOCKED__SHIFT 0x1
+#define MCIF_WB1_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_VCE_LOCKED__SHIFT 0x2
+#define MCIF_WB1_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_OVERFLOW__SHIFT 0x3
+#define MCIF_WB1_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_DISABLE__SHIFT 0x4
+#define MCIF_WB1_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_MODE__SHIFT 0x5
+#define MCIF_WB1_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_BUFTAG__SHIFT 0x8
+#define MCIF_WB1_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_NXT_BUF__SHIFT 0xc
+#define MCIF_WB1_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_FIELD__SHIFT 0xf
+#define MCIF_WB1_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_CUR_LINE_L__SHIFT 0x10
+#define MCIF_WB1_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_LONG_LINE_ERROR__SHIFT 0x1d
+#define MCIF_WB1_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_SHORT_LINE_ERROR__SHIFT 0x1e
+#define MCIF_WB1_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_FRAME_LENGTH_ERROR__SHIFT 0x1f
+#define MCIF_WB1_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_ACTIVE_MASK 0x00000001L
+#define MCIF_WB1_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_SW_LOCKED_MASK 0x00000002L
+#define MCIF_WB1_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_VCE_LOCKED_MASK 0x00000004L
+#define MCIF_WB1_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_OVERFLOW_MASK 0x00000008L
+#define MCIF_WB1_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_DISABLE_MASK 0x00000010L
+#define MCIF_WB1_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_MODE_MASK 0x000000E0L
+#define MCIF_WB1_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_BUFTAG_MASK 0x00000F00L
+#define MCIF_WB1_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_NXT_BUF_MASK 0x00007000L
+#define MCIF_WB1_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_FIELD_MASK 0x00008000L
+#define MCIF_WB1_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_CUR_LINE_L_MASK 0x1FFF0000L
+#define MCIF_WB1_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_LONG_LINE_ERROR_MASK 0x20000000L
+#define MCIF_WB1_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_SHORT_LINE_ERROR_MASK 0x40000000L
+#define MCIF_WB1_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_FRAME_LENGTH_ERROR_MASK 0x80000000L
+//MCIF_WB1_MCIF_WB_BUF_2_STATUS2
+#define MCIF_WB1_MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_CUR_LINE_R__SHIFT 0x0
+#define MCIF_WB1_MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_NEW_CONTENT__SHIFT 0xd
+#define MCIF_WB1_MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_COLOR_DEPTH__SHIFT 0xe
+#define MCIF_WB1_MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_TMZ_BLACK_PIXEL__SHIFT 0xf
+#define MCIF_WB1_MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_TMZ__SHIFT 0x10
+#define MCIF_WB1_MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_Y_OVERRUN__SHIFT 0x11
+#define MCIF_WB1_MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_C_OVERRUN__SHIFT 0x12
+#define MCIF_WB1_MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_EYE_FLAG__SHIFT 0x13
+#define MCIF_WB1_MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_CUR_LINE_R_MASK 0x00001FFFL
+#define MCIF_WB1_MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_NEW_CONTENT_MASK 0x00002000L
+#define MCIF_WB1_MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_COLOR_DEPTH_MASK 0x00004000L
+#define MCIF_WB1_MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_TMZ_BLACK_PIXEL_MASK 0x00008000L
+#define MCIF_WB1_MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_TMZ_MASK 0x00010000L
+#define MCIF_WB1_MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_Y_OVERRUN_MASK 0x00020000L
+#define MCIF_WB1_MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_C_OVERRUN_MASK 0x00040000L
+#define MCIF_WB1_MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_EYE_FLAG_MASK 0x00080000L
+//MCIF_WB1_MCIF_WB_BUF_3_STATUS
+#define MCIF_WB1_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_ACTIVE__SHIFT 0x0
+#define MCIF_WB1_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_SW_LOCKED__SHIFT 0x1
+#define MCIF_WB1_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_VCE_LOCKED__SHIFT 0x2
+#define MCIF_WB1_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_OVERFLOW__SHIFT 0x3
+#define MCIF_WB1_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_DISABLE__SHIFT 0x4
+#define MCIF_WB1_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_MODE__SHIFT 0x5
+#define MCIF_WB1_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_BUFTAG__SHIFT 0x8
+#define MCIF_WB1_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_NXT_BUF__SHIFT 0xc
+#define MCIF_WB1_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_FIELD__SHIFT 0xf
+#define MCIF_WB1_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_CUR_LINE_L__SHIFT 0x10
+#define MCIF_WB1_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_LONG_LINE_ERROR__SHIFT 0x1d
+#define MCIF_WB1_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_SHORT_LINE_ERROR__SHIFT 0x1e
+#define MCIF_WB1_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_FRAME_LENGTH_ERROR__SHIFT 0x1f
+#define MCIF_WB1_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_ACTIVE_MASK 0x00000001L
+#define MCIF_WB1_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_SW_LOCKED_MASK 0x00000002L
+#define MCIF_WB1_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_VCE_LOCKED_MASK 0x00000004L
+#define MCIF_WB1_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_OVERFLOW_MASK 0x00000008L
+#define MCIF_WB1_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_DISABLE_MASK 0x00000010L
+#define MCIF_WB1_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_MODE_MASK 0x000000E0L
+#define MCIF_WB1_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_BUFTAG_MASK 0x00000F00L
+#define MCIF_WB1_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_NXT_BUF_MASK 0x00007000L
+#define MCIF_WB1_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_FIELD_MASK 0x00008000L
+#define MCIF_WB1_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_CUR_LINE_L_MASK 0x1FFF0000L
+#define MCIF_WB1_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_LONG_LINE_ERROR_MASK 0x20000000L
+#define MCIF_WB1_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_SHORT_LINE_ERROR_MASK 0x40000000L
+#define MCIF_WB1_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_FRAME_LENGTH_ERROR_MASK 0x80000000L
+//MCIF_WB1_MCIF_WB_BUF_3_STATUS2
+#define MCIF_WB1_MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_CUR_LINE_R__SHIFT 0x0
+#define MCIF_WB1_MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_NEW_CONTENT__SHIFT 0xd
+#define MCIF_WB1_MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_COLOR_DEPTH__SHIFT 0xe
+#define MCIF_WB1_MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_TMZ_BLACK_PIXEL__SHIFT 0xf
+#define MCIF_WB1_MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_TMZ__SHIFT 0x10
+#define MCIF_WB1_MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_Y_OVERRUN__SHIFT 0x11
+#define MCIF_WB1_MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_C_OVERRUN__SHIFT 0x12
+#define MCIF_WB1_MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_EYE_FLAG__SHIFT 0x13
+#define MCIF_WB1_MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_CUR_LINE_R_MASK 0x00001FFFL
+#define MCIF_WB1_MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_NEW_CONTENT_MASK 0x00002000L
+#define MCIF_WB1_MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_COLOR_DEPTH_MASK 0x00004000L
+#define MCIF_WB1_MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_TMZ_BLACK_PIXEL_MASK 0x00008000L
+#define MCIF_WB1_MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_TMZ_MASK 0x00010000L
+#define MCIF_WB1_MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_Y_OVERRUN_MASK 0x00020000L
+#define MCIF_WB1_MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_C_OVERRUN_MASK 0x00040000L
+#define MCIF_WB1_MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_EYE_FLAG_MASK 0x00080000L
+//MCIF_WB1_MCIF_WB_BUF_4_STATUS
+#define MCIF_WB1_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_ACTIVE__SHIFT 0x0
+#define MCIF_WB1_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_SW_LOCKED__SHIFT 0x1
+#define MCIF_WB1_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_VCE_LOCKED__SHIFT 0x2
+#define MCIF_WB1_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_OVERFLOW__SHIFT 0x3
+#define MCIF_WB1_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_DISABLE__SHIFT 0x4
+#define MCIF_WB1_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_MODE__SHIFT 0x5
+#define MCIF_WB1_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_BUFTAG__SHIFT 0x8
+#define MCIF_WB1_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_NXT_BUF__SHIFT 0xc
+#define MCIF_WB1_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_FIELD__SHIFT 0xf
+#define MCIF_WB1_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_CUR_LINE_L__SHIFT 0x10
+#define MCIF_WB1_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_LONG_LINE_ERROR__SHIFT 0x1d
+#define MCIF_WB1_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_SHORT_LINE_ERROR__SHIFT 0x1e
+#define MCIF_WB1_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_FRAME_LENGTH_ERROR__SHIFT 0x1f
+#define MCIF_WB1_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_ACTIVE_MASK 0x00000001L
+#define MCIF_WB1_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_SW_LOCKED_MASK 0x00000002L
+#define MCIF_WB1_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_VCE_LOCKED_MASK 0x00000004L
+#define MCIF_WB1_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_OVERFLOW_MASK 0x00000008L
+#define MCIF_WB1_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_DISABLE_MASK 0x00000010L
+#define MCIF_WB1_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_MODE_MASK 0x000000E0L
+#define MCIF_WB1_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_BUFTAG_MASK 0x00000F00L
+#define MCIF_WB1_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_NXT_BUF_MASK 0x00007000L
+#define MCIF_WB1_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_FIELD_MASK 0x00008000L
+#define MCIF_WB1_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_CUR_LINE_L_MASK 0x1FFF0000L
+#define MCIF_WB1_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_LONG_LINE_ERROR_MASK 0x20000000L
+#define MCIF_WB1_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_SHORT_LINE_ERROR_MASK 0x40000000L
+#define MCIF_WB1_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_FRAME_LENGTH_ERROR_MASK 0x80000000L
+//MCIF_WB1_MCIF_WB_BUF_4_STATUS2
+#define MCIF_WB1_MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_CUR_LINE_R__SHIFT 0x0
+#define MCIF_WB1_MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_NEW_CONTENT__SHIFT 0xd
+#define MCIF_WB1_MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_COLOR_DEPTH__SHIFT 0xe
+#define MCIF_WB1_MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_TMZ_BLACK_PIXEL__SHIFT 0xf
+#define MCIF_WB1_MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_TMZ__SHIFT 0x10
+#define MCIF_WB1_MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_Y_OVERRUN__SHIFT 0x11
+#define MCIF_WB1_MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_C_OVERRUN__SHIFT 0x12
+#define MCIF_WB1_MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_EYE_FLAG__SHIFT 0x13
+#define MCIF_WB1_MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_CUR_LINE_R_MASK 0x00001FFFL
+#define MCIF_WB1_MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_NEW_CONTENT_MASK 0x00002000L
+#define MCIF_WB1_MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_COLOR_DEPTH_MASK 0x00004000L
+#define MCIF_WB1_MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_TMZ_BLACK_PIXEL_MASK 0x00008000L
+#define MCIF_WB1_MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_TMZ_MASK 0x00010000L
+#define MCIF_WB1_MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_Y_OVERRUN_MASK 0x00020000L
+#define MCIF_WB1_MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_C_OVERRUN_MASK 0x00040000L
+#define MCIF_WB1_MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_EYE_FLAG_MASK 0x00080000L
+//MCIF_WB1_MCIF_WB_ARBITRATION_CONTROL
+#define MCIF_WB1_MCIF_WB_ARBITRATION_CONTROL__MCIF_WB_CLIENT_ARBITRATION_SLICE__SHIFT 0x0
+#define MCIF_WB1_MCIF_WB_ARBITRATION_CONTROL__MCIF_WB_TIME_PER_PIXEL__SHIFT 0x16
+#define MCIF_WB1_MCIF_WB_ARBITRATION_CONTROL__MCIF_WB_CLIENT_ARBITRATION_SLICE_MASK 0x00000003L
+#define MCIF_WB1_MCIF_WB_ARBITRATION_CONTROL__MCIF_WB_TIME_PER_PIXEL_MASK 0xFFC00000L
+//MCIF_WB1_MCIF_WB_SCLK_CHANGE
+#define MCIF_WB1_MCIF_WB_SCLK_CHANGE__WM_CHANGE_ACK_FORCE_ON__SHIFT 0x0
+#define MCIF_WB1_MCIF_WB_SCLK_CHANGE__MCIF_WB_CLI_WATERMARK_MASK__SHIFT 0x1
+#define MCIF_WB1_MCIF_WB_SCLK_CHANGE__WM_CHANGE_ACK_FORCE_ON_MASK 0x00000001L
+#define MCIF_WB1_MCIF_WB_SCLK_CHANGE__MCIF_WB_CLI_WATERMARK_MASK_MASK 0x0000000EL
+//MCIF_WB1_MCIF_WB_TEST_DEBUG_INDEX
+#define MCIF_WB1_MCIF_WB_TEST_DEBUG_INDEX__MCIF_WB_TEST_DEBUG_INDEX__SHIFT 0x0
+#define MCIF_WB1_MCIF_WB_TEST_DEBUG_INDEX__MCIF_WB_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define MCIF_WB1_MCIF_WB_TEST_DEBUG_INDEX__MCIF_WB_TEST_DEBUG_INDEX_MASK 0x000000FFL
+#define MCIF_WB1_MCIF_WB_TEST_DEBUG_INDEX__MCIF_WB_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+//MCIF_WB1_MCIF_WB_TEST_DEBUG_DATA
+#define MCIF_WB1_MCIF_WB_TEST_DEBUG_DATA__MCIF_WB_TEST_DEBUG_DATA__SHIFT 0x0
+#define MCIF_WB1_MCIF_WB_TEST_DEBUG_DATA__MCIF_WB_TEST_DEBUG_DATA_MASK 0xFFFFFFFFL
+//MCIF_WB1_MCIF_WB_BUF_1_ADDR_Y
+#define MCIF_WB1_MCIF_WB_BUF_1_ADDR_Y__MCIF_WB_BUF_1_ADDR_Y__SHIFT 0x0
+#define MCIF_WB1_MCIF_WB_BUF_1_ADDR_Y__MCIF_WB_BUF_1_ADDR_Y_MASK 0xFFFFFFFFL
+//MCIF_WB1_MCIF_WB_BUF_1_ADDR_Y_OFFSET
+#define MCIF_WB1_MCIF_WB_BUF_1_ADDR_Y_OFFSET__MCIF_WB_BUF_1_ADDR_Y_OFFSET__SHIFT 0x0
+#define MCIF_WB1_MCIF_WB_BUF_1_ADDR_Y_OFFSET__MCIF_WB_BUF_1_ADDR_Y_OFFSET_MASK 0x0003FFFFL
+//MCIF_WB1_MCIF_WB_BUF_1_ADDR_C
+#define MCIF_WB1_MCIF_WB_BUF_1_ADDR_C__MCIF_WB_BUF_1_ADDR_C__SHIFT 0x0
+#define MCIF_WB1_MCIF_WB_BUF_1_ADDR_C__MCIF_WB_BUF_1_ADDR_C_MASK 0xFFFFFFFFL
+//MCIF_WB1_MCIF_WB_BUF_1_ADDR_C_OFFSET
+#define MCIF_WB1_MCIF_WB_BUF_1_ADDR_C_OFFSET__MCIF_WB_BUF_1_ADDR_C_OFFSET__SHIFT 0x0
+#define MCIF_WB1_MCIF_WB_BUF_1_ADDR_C_OFFSET__MCIF_WB_BUF_1_ADDR_C_OFFSET_MASK 0x0003FFFFL
+//MCIF_WB1_MCIF_WB_BUF_2_ADDR_Y
+#define MCIF_WB1_MCIF_WB_BUF_2_ADDR_Y__MCIF_WB_BUF_2_ADDR_Y__SHIFT 0x0
+#define MCIF_WB1_MCIF_WB_BUF_2_ADDR_Y__MCIF_WB_BUF_2_ADDR_Y_MASK 0xFFFFFFFFL
+//MCIF_WB1_MCIF_WB_BUF_2_ADDR_Y_OFFSET
+#define MCIF_WB1_MCIF_WB_BUF_2_ADDR_Y_OFFSET__MCIF_WB_BUF_2_ADDR_Y_OFFSET__SHIFT 0x0
+#define MCIF_WB1_MCIF_WB_BUF_2_ADDR_Y_OFFSET__MCIF_WB_BUF_2_ADDR_Y_OFFSET_MASK 0x0003FFFFL
+//MCIF_WB1_MCIF_WB_BUF_2_ADDR_C
+#define MCIF_WB1_MCIF_WB_BUF_2_ADDR_C__MCIF_WB_BUF_2_ADDR_C__SHIFT 0x0
+#define MCIF_WB1_MCIF_WB_BUF_2_ADDR_C__MCIF_WB_BUF_2_ADDR_C_MASK 0xFFFFFFFFL
+//MCIF_WB1_MCIF_WB_BUF_2_ADDR_C_OFFSET
+#define MCIF_WB1_MCIF_WB_BUF_2_ADDR_C_OFFSET__MCIF_WB_BUF_2_ADDR_C_OFFSET__SHIFT 0x0
+#define MCIF_WB1_MCIF_WB_BUF_2_ADDR_C_OFFSET__MCIF_WB_BUF_2_ADDR_C_OFFSET_MASK 0x0003FFFFL
+//MCIF_WB1_MCIF_WB_BUF_3_ADDR_Y
+#define MCIF_WB1_MCIF_WB_BUF_3_ADDR_Y__MCIF_WB_BUF_3_ADDR_Y__SHIFT 0x0
+#define MCIF_WB1_MCIF_WB_BUF_3_ADDR_Y__MCIF_WB_BUF_3_ADDR_Y_MASK 0xFFFFFFFFL
+//MCIF_WB1_MCIF_WB_BUF_3_ADDR_Y_OFFSET
+#define MCIF_WB1_MCIF_WB_BUF_3_ADDR_Y_OFFSET__MCIF_WB_BUF_3_ADDR_Y_OFFSET__SHIFT 0x0
+#define MCIF_WB1_MCIF_WB_BUF_3_ADDR_Y_OFFSET__MCIF_WB_BUF_3_ADDR_Y_OFFSET_MASK 0x0003FFFFL
+//MCIF_WB1_MCIF_WB_BUF_3_ADDR_C
+#define MCIF_WB1_MCIF_WB_BUF_3_ADDR_C__MCIF_WB_BUF_3_ADDR_C__SHIFT 0x0
+#define MCIF_WB1_MCIF_WB_BUF_3_ADDR_C__MCIF_WB_BUF_3_ADDR_C_MASK 0xFFFFFFFFL
+//MCIF_WB1_MCIF_WB_BUF_3_ADDR_C_OFFSET
+#define MCIF_WB1_MCIF_WB_BUF_3_ADDR_C_OFFSET__MCIF_WB_BUF_3_ADDR_C_OFFSET__SHIFT 0x0
+#define MCIF_WB1_MCIF_WB_BUF_3_ADDR_C_OFFSET__MCIF_WB_BUF_3_ADDR_C_OFFSET_MASK 0x0003FFFFL
+//MCIF_WB1_MCIF_WB_BUF_4_ADDR_Y
+#define MCIF_WB1_MCIF_WB_BUF_4_ADDR_Y__MCIF_WB_BUF_4_ADDR_Y__SHIFT 0x0
+#define MCIF_WB1_MCIF_WB_BUF_4_ADDR_Y__MCIF_WB_BUF_4_ADDR_Y_MASK 0xFFFFFFFFL
+//MCIF_WB1_MCIF_WB_BUF_4_ADDR_Y_OFFSET
+#define MCIF_WB1_MCIF_WB_BUF_4_ADDR_Y_OFFSET__MCIF_WB_BUF_4_ADDR_Y_OFFSET__SHIFT 0x0
+#define MCIF_WB1_MCIF_WB_BUF_4_ADDR_Y_OFFSET__MCIF_WB_BUF_4_ADDR_Y_OFFSET_MASK 0x0003FFFFL
+//MCIF_WB1_MCIF_WB_BUF_4_ADDR_C
+#define MCIF_WB1_MCIF_WB_BUF_4_ADDR_C__MCIF_WB_BUF_4_ADDR_C__SHIFT 0x0
+#define MCIF_WB1_MCIF_WB_BUF_4_ADDR_C__MCIF_WB_BUF_4_ADDR_C_MASK 0xFFFFFFFFL
+//MCIF_WB1_MCIF_WB_BUF_4_ADDR_C_OFFSET
+#define MCIF_WB1_MCIF_WB_BUF_4_ADDR_C_OFFSET__MCIF_WB_BUF_4_ADDR_C_OFFSET__SHIFT 0x0
+#define MCIF_WB1_MCIF_WB_BUF_4_ADDR_C_OFFSET__MCIF_WB_BUF_4_ADDR_C_OFFSET_MASK 0x0003FFFFL
+//MCIF_WB1_MCIF_WB_BUFMGR_VCE_CONTROL
+#define MCIF_WB1_MCIF_WB_BUFMGR_VCE_CONTROL__MCIF_WB_BUFMGR_VCE_LOCK_IGNORE__SHIFT 0x0
+#define MCIF_WB1_MCIF_WB_BUFMGR_VCE_CONTROL__MCIF_WB_BUFMGR_VCE_INT_EN__SHIFT 0x4
+#define MCIF_WB1_MCIF_WB_BUFMGR_VCE_CONTROL__MCIF_WB_BUFMGR_VCE_INT_ACK__SHIFT 0x5
+#define MCIF_WB1_MCIF_WB_BUFMGR_VCE_CONTROL__MCIF_WB_BUFMGR_VCE_SLICE_INT_EN__SHIFT 0x6
+#define MCIF_WB1_MCIF_WB_BUFMGR_VCE_CONTROL__MCIF_WB_BUFMGR_VCE_LOCK__SHIFT 0x8
+#define MCIF_WB1_MCIF_WB_BUFMGR_VCE_CONTROL__MCIF_WB_BUFMGR_SLICE_SIZE__SHIFT 0x10
+#define MCIF_WB1_MCIF_WB_BUFMGR_VCE_CONTROL__MCIF_WB_BUFMGR_VCE_LOCK_IGNORE_MASK 0x00000001L
+#define MCIF_WB1_MCIF_WB_BUFMGR_VCE_CONTROL__MCIF_WB_BUFMGR_VCE_INT_EN_MASK 0x00000010L
+#define MCIF_WB1_MCIF_WB_BUFMGR_VCE_CONTROL__MCIF_WB_BUFMGR_VCE_INT_ACK_MASK 0x00000020L
+#define MCIF_WB1_MCIF_WB_BUFMGR_VCE_CONTROL__MCIF_WB_BUFMGR_VCE_SLICE_INT_EN_MASK 0x00000040L
+#define MCIF_WB1_MCIF_WB_BUFMGR_VCE_CONTROL__MCIF_WB_BUFMGR_VCE_LOCK_MASK 0x00000F00L
+#define MCIF_WB1_MCIF_WB_BUFMGR_VCE_CONTROL__MCIF_WB_BUFMGR_SLICE_SIZE_MASK 0x1FFF0000L
+//MCIF_WB1_MCIF_WB_NB_PSTATE_LATENCY_WATERMARK
+#define MCIF_WB1_MCIF_WB_NB_PSTATE_LATENCY_WATERMARK__NB_PSTATE_CHANGE_REFRESH_WATERMARK__SHIFT 0x0
+#define MCIF_WB1_MCIF_WB_NB_PSTATE_LATENCY_WATERMARK__NB_PSTATE_CHANGE_REFRESH_WATERMARK_MASK 0x0007FFFFL
+//MCIF_WB1_MCIF_WB_NB_PSTATE_CONTROL
+#define MCIF_WB1_MCIF_WB_NB_PSTATE_CONTROL__NB_PSTATE_CHANGE_URGENT_DURING_REQUEST__SHIFT 0x0
+#define MCIF_WB1_MCIF_WB_NB_PSTATE_CONTROL__NB_PSTATE_CHANGE_FORCE_ON__SHIFT 0x1
+#define MCIF_WB1_MCIF_WB_NB_PSTATE_CONTROL__NB_PSTATE_ALLOW_FOR_URGENT__SHIFT 0x2
+#define MCIF_WB1_MCIF_WB_NB_PSTATE_CONTROL__NB_PSTATE_CHANGE_WATERMARK_MASK__SHIFT 0x4
+#define MCIF_WB1_MCIF_WB_NB_PSTATE_CONTROL__NB_PSTATE_CHANGE_URGENT_DURING_REQUEST_MASK 0x00000001L
+#define MCIF_WB1_MCIF_WB_NB_PSTATE_CONTROL__NB_PSTATE_CHANGE_FORCE_ON_MASK 0x00000002L
+#define MCIF_WB1_MCIF_WB_NB_PSTATE_CONTROL__NB_PSTATE_ALLOW_FOR_URGENT_MASK 0x00000004L
+#define MCIF_WB1_MCIF_WB_NB_PSTATE_CONTROL__NB_PSTATE_CHANGE_WATERMARK_MASK_MASK 0x00000070L
+//MCIF_WB1_MCIF_WB_WATERMARK
+#define MCIF_WB1_MCIF_WB_WATERMARK__MCIF_WB_CLI_WATERMARK__SHIFT 0x0
+#define MCIF_WB1_MCIF_WB_WATERMARK__MCIF_WB_CLI_WATERMARK_MASK 0x0000FFFFL
+//MCIF_WB1_MCIF_WB_CLOCK_GATER_CONTROL
+#define MCIF_WB1_MCIF_WB_CLOCK_GATER_CONTROL__MCIF_WB_CLI_CLOCK_GATER_OVERRIDE__SHIFT 0x0
+#define MCIF_WB1_MCIF_WB_CLOCK_GATER_CONTROL__MCIF_WB_CLI_CLOCK_GATER_OVERRIDE_MASK 0x00000001L
+//MCIF_WB1_MCIF_WB_WARM_UP_CNTL
+#define MCIF_WB1_MCIF_WB_WARM_UP_CNTL__MCIF_WB_PITCH_SIZE_WARMUP__SHIFT 0x8
+#define MCIF_WB1_MCIF_WB_WARM_UP_CNTL__MCIF_WB_PITCH_SIZE_WARMUP_MASK 0x0000FF00L
+//MCIF_WB1_MCIF_WB_SELF_REFRESH_CONTROL
+#define MCIF_WB1_MCIF_WB_SELF_REFRESH_CONTROL__DIS_REFRESH_UNDER_NBPREQ__SHIFT 0x0
+#define MCIF_WB1_MCIF_WB_SELF_REFRESH_CONTROL__PERFRAME_SELF_REFRESH__SHIFT 0x1
+#define MCIF_WB1_MCIF_WB_SELF_REFRESH_CONTROL__DIS_REFRESH_UNDER_NBPREQ_MASK 0x00000001L
+#define MCIF_WB1_MCIF_WB_SELF_REFRESH_CONTROL__PERFRAME_SELF_REFRESH_MASK 0x00000002L
+//MCIF_WB1_MULTI_LEVEL_QOS_CTRL
+#define MCIF_WB1_MULTI_LEVEL_QOS_CTRL__MAX_SCALED_TIME_TO_URGENT__SHIFT 0x0
+#define MCIF_WB1_MULTI_LEVEL_QOS_CTRL__MAX_SCALED_TIME_TO_URGENT_MASK 0x003FFFFFL
+//MCIF_WB1_MCIF_WB_BUF_LUMA_SIZE
+#define MCIF_WB1_MCIF_WB_BUF_LUMA_SIZE__MCIF_WB_BUF_LUMA_SIZE__SHIFT 0x0
+#define MCIF_WB1_MCIF_WB_BUF_LUMA_SIZE__MCIF_WB_BUF_LUMA_SIZE_MASK 0x000FFFFFL
+//MCIF_WB1_MCIF_WB_BUF_CHROMA_SIZE
+#define MCIF_WB1_MCIF_WB_BUF_CHROMA_SIZE__MCIF_WB_BUF_CHROMA_SIZE__SHIFT 0x0
+#define MCIF_WB1_MCIF_WB_BUF_CHROMA_SIZE__MCIF_WB_BUF_CHROMA_SIZE_MASK 0x000FFFFFL
+//MCIF_WB1_MCIF_WB_BUF_1_ADDR_Y_HIGH
+#define MCIF_WB1_MCIF_WB_BUF_1_ADDR_Y_HIGH__MCIF_WB_BUF_1_ADDR_Y_HIGH__SHIFT 0x0
+#define MCIF_WB1_MCIF_WB_BUF_1_ADDR_Y_HIGH__MCIF_WB_BUF_1_ADDR_Y_HIGH_MASK 0x000000FFL
+//MCIF_WB1_MCIF_WB_BUF_1_ADDR_C_HIGH
+#define MCIF_WB1_MCIF_WB_BUF_1_ADDR_C_HIGH__MCIF_WB_BUF_1_ADDR_C_HIGH__SHIFT 0x0
+#define MCIF_WB1_MCIF_WB_BUF_1_ADDR_C_HIGH__MCIF_WB_BUF_1_ADDR_C_HIGH_MASK 0x000000FFL
+//MCIF_WB1_MCIF_WB_BUF_2_ADDR_Y_HIGH
+#define MCIF_WB1_MCIF_WB_BUF_2_ADDR_Y_HIGH__MCIF_WB_BUF_2_ADDR_Y_HIGH__SHIFT 0x0
+#define MCIF_WB1_MCIF_WB_BUF_2_ADDR_Y_HIGH__MCIF_WB_BUF_2_ADDR_Y_HIGH_MASK 0x000000FFL
+//MCIF_WB1_MCIF_WB_BUF_2_ADDR_C_HIGH
+#define MCIF_WB1_MCIF_WB_BUF_2_ADDR_C_HIGH__MCIF_WB_BUF_2_ADDR_C_HIGH__SHIFT 0x0
+#define MCIF_WB1_MCIF_WB_BUF_2_ADDR_C_HIGH__MCIF_WB_BUF_2_ADDR_C_HIGH_MASK 0x000000FFL
+//MCIF_WB1_MCIF_WB_BUF_3_ADDR_Y_HIGH
+#define MCIF_WB1_MCIF_WB_BUF_3_ADDR_Y_HIGH__MCIF_WB_BUF_3_ADDR_Y_HIGH__SHIFT 0x0
+#define MCIF_WB1_MCIF_WB_BUF_3_ADDR_Y_HIGH__MCIF_WB_BUF_3_ADDR_Y_HIGH_MASK 0x000000FFL
+//MCIF_WB1_MCIF_WB_BUF_3_ADDR_C_HIGH
+#define MCIF_WB1_MCIF_WB_BUF_3_ADDR_C_HIGH__MCIF_WB_BUF_3_ADDR_C_HIGH__SHIFT 0x0
+#define MCIF_WB1_MCIF_WB_BUF_3_ADDR_C_HIGH__MCIF_WB_BUF_3_ADDR_C_HIGH_MASK 0x000000FFL
+//MCIF_WB1_MCIF_WB_BUF_4_ADDR_Y_HIGH
+#define MCIF_WB1_MCIF_WB_BUF_4_ADDR_Y_HIGH__MCIF_WB_BUF_4_ADDR_Y_HIGH__SHIFT 0x0
+#define MCIF_WB1_MCIF_WB_BUF_4_ADDR_Y_HIGH__MCIF_WB_BUF_4_ADDR_Y_HIGH_MASK 0x000000FFL
+//MCIF_WB1_MCIF_WB_BUF_4_ADDR_C_HIGH
+#define MCIF_WB1_MCIF_WB_BUF_4_ADDR_C_HIGH__MCIF_WB_BUF_4_ADDR_C_HIGH__SHIFT 0x0
+#define MCIF_WB1_MCIF_WB_BUF_4_ADDR_C_HIGH__MCIF_WB_BUF_4_ADDR_C_HIGH_MASK 0x000000FFL
+//MCIF_WB1_MCIF_WB_BUF_1_RESOLUTION
+#define MCIF_WB1_MCIF_WB_BUF_1_RESOLUTION__MCIF_WB_BUF_1_RESOLUTION_WIDTH__SHIFT 0x0
+#define MCIF_WB1_MCIF_WB_BUF_1_RESOLUTION__MCIF_WB_BUF_1_RESOLUTION_HEIGHT__SHIFT 0x10
+#define MCIF_WB1_MCIF_WB_BUF_1_RESOLUTION__MCIF_WB_BUF_1_RESOLUTION_WIDTH_MASK 0x00001FFFL
+#define MCIF_WB1_MCIF_WB_BUF_1_RESOLUTION__MCIF_WB_BUF_1_RESOLUTION_HEIGHT_MASK 0x1FFF0000L
+//MCIF_WB1_MCIF_WB_BUF_2_RESOLUTION
+#define MCIF_WB1_MCIF_WB_BUF_2_RESOLUTION__MCIF_WB_BUF_2_RESOLUTION_WIDTH__SHIFT 0x0
+#define MCIF_WB1_MCIF_WB_BUF_2_RESOLUTION__MCIF_WB_BUF_2_RESOLUTION_HEIGHT__SHIFT 0x10
+#define MCIF_WB1_MCIF_WB_BUF_2_RESOLUTION__MCIF_WB_BUF_2_RESOLUTION_WIDTH_MASK 0x00001FFFL
+#define MCIF_WB1_MCIF_WB_BUF_2_RESOLUTION__MCIF_WB_BUF_2_RESOLUTION_HEIGHT_MASK 0x1FFF0000L
+//MCIF_WB1_MCIF_WB_BUF_3_RESOLUTION
+#define MCIF_WB1_MCIF_WB_BUF_3_RESOLUTION__MCIF_WB_BUF_3_RESOLUTION_WIDTH__SHIFT 0x0
+#define MCIF_WB1_MCIF_WB_BUF_3_RESOLUTION__MCIF_WB_BUF_3_RESOLUTION_HEIGHT__SHIFT 0x10
+#define MCIF_WB1_MCIF_WB_BUF_3_RESOLUTION__MCIF_WB_BUF_3_RESOLUTION_WIDTH_MASK 0x00001FFFL
+#define MCIF_WB1_MCIF_WB_BUF_3_RESOLUTION__MCIF_WB_BUF_3_RESOLUTION_HEIGHT_MASK 0x1FFF0000L
+//MCIF_WB1_MCIF_WB_BUF_4_RESOLUTION
+#define MCIF_WB1_MCIF_WB_BUF_4_RESOLUTION__MCIF_WB_BUF_4_RESOLUTION_WIDTH__SHIFT 0x0
+#define MCIF_WB1_MCIF_WB_BUF_4_RESOLUTION__MCIF_WB_BUF_4_RESOLUTION_HEIGHT__SHIFT 0x10
+#define MCIF_WB1_MCIF_WB_BUF_4_RESOLUTION__MCIF_WB_BUF_4_RESOLUTION_WIDTH_MASK 0x00001FFFL
+#define MCIF_WB1_MCIF_WB_BUF_4_RESOLUTION__MCIF_WB_BUF_4_RESOLUTION_HEIGHT_MASK 0x1FFF0000L
+
+
+// addressBlock: dce_dc_mmhubbub_mmhubbub_dispdec
+//WBIF0_MISC_CTRL
+#define WBIF0_MISC_CTRL__MCIFWB0_WR_COMBINE_TIMEOUT_THRESH__SHIFT 0x0
+#define WBIF0_MISC_CTRL__MCIF_WB0_SOCCLK_DS_ENABLE__SHIFT 0x10
+#define WBIF0_MISC_CTRL__MCIFWB0_WR_COMBINE_TIMEOUT_THRESH_MASK 0x000003FFL
+#define WBIF0_MISC_CTRL__MCIF_WB0_SOCCLK_DS_ENABLE_MASK 0x00010000L
+//WBIF0_SMU_WM_CONTROL
+#define WBIF0_SMU_WM_CONTROL__MCIF_WB0_WM_CHG_SEL__SHIFT 0x14
+#define WBIF0_SMU_WM_CONTROL__MCIF_WB0_WM_CHG_REQ__SHIFT 0x16
+#define WBIF0_SMU_WM_CONTROL__MCIF_WB0_WM_CHG_ACK_INT_DIS__SHIFT 0x18
+#define WBIF0_SMU_WM_CONTROL__MCIF_WB0_WM_CHG_ACK_INT_STATUS__SHIFT 0x19
+#define WBIF0_SMU_WM_CONTROL__MCIF_WB0_WM_CHG_SEL_MASK 0x00300000L
+#define WBIF0_SMU_WM_CONTROL__MCIF_WB0_WM_CHG_REQ_MASK 0x00400000L
+#define WBIF0_SMU_WM_CONTROL__MCIF_WB0_WM_CHG_ACK_INT_DIS_MASK 0x01000000L
+#define WBIF0_SMU_WM_CONTROL__MCIF_WB0_WM_CHG_ACK_INT_STATUS_MASK 0x02000000L
+//WBIF0_PHASE0_OUTSTANDING_COUNTER
+#define WBIF0_PHASE0_OUTSTANDING_COUNTER__MCIF_WB0_PHASE0_OUTSTANDING_COUNTER__SHIFT 0x0
+#define WBIF0_PHASE0_OUTSTANDING_COUNTER__MCIF_WB0_PHASE0_OUTSTANDING_COUNTER_MASK 0x07FFFFFFL
+//WBIF0_PHASE1_OUTSTANDING_COUNTER
+#define WBIF0_PHASE1_OUTSTANDING_COUNTER__MCIF_WB0_PHASE1_OUTSTANDING_COUNTER__SHIFT 0x0
+#define WBIF0_PHASE1_OUTSTANDING_COUNTER__MCIF_WB0_PHASE1_OUTSTANDING_COUNTER_MASK 0x07FFFFFFL
+//VGA_SRC_SPLIT_CNTL
+#define VGA_SRC_SPLIT_CNTL__VGA_SPLIT_SEL__SHIFT 0x0
+#define VGA_SRC_SPLIT_CNTL__VGA_SPLIT_SEL_MASK 0x00000003L
+//MMHUBBUB_MEM_PWR_STATUS
+#define MMHUBBUB_MEM_PWR_STATUS__MCIF_DWB0_LUMA_MEM0_PWR_STATE__SHIFT 0x0
+#define MMHUBBUB_MEM_PWR_STATUS__MCIF_DWB0_LUMA_MEM1_PWR_STATE__SHIFT 0x2
+#define MMHUBBUB_MEM_PWR_STATUS__MCIF_DWB0_CHROMA_MEM0_PWR_STATE__SHIFT 0x4
+#define MMHUBBUB_MEM_PWR_STATUS__MCIF_DWB0_CHROMA_MEM1_PWR_STATE__SHIFT 0x6
+#define MMHUBBUB_MEM_PWR_STATUS__VGA_MEM_PWR_STATE__SHIFT 0x1f
+#define MMHUBBUB_MEM_PWR_STATUS__MCIF_DWB0_LUMA_MEM0_PWR_STATE_MASK 0x00000003L
+#define MMHUBBUB_MEM_PWR_STATUS__MCIF_DWB0_LUMA_MEM1_PWR_STATE_MASK 0x0000000CL
+#define MMHUBBUB_MEM_PWR_STATUS__MCIF_DWB0_CHROMA_MEM0_PWR_STATE_MASK 0x00000030L
+#define MMHUBBUB_MEM_PWR_STATUS__MCIF_DWB0_CHROMA_MEM1_PWR_STATE_MASK 0x000000C0L
+#define MMHUBBUB_MEM_PWR_STATUS__VGA_MEM_PWR_STATE_MASK 0x80000000L
+//MMHUBBUB_MEM_PWR_CNTL
+#define MMHUBBUB_MEM_PWR_CNTL__VGA_MEM_PWR_FORCE__SHIFT 0x0
+#define MMHUBBUB_MEM_PWR_CNTL__VGA_MEM_PWR_DIS__SHIFT 0x1
+#define MMHUBBUB_MEM_PWR_CNTL__MCIF_DWB0_MEM_PWR_FORCE__SHIFT 0x2
+#define MMHUBBUB_MEM_PWR_CNTL__MCIF_DWB0_MEM_PWR_DIS__SHIFT 0x4
+#define MMHUBBUB_MEM_PWR_CNTL__MCIF_DWB0_MEM_PWR_MODE_SEL__SHIFT 0x5
+#define MMHUBBUB_MEM_PWR_CNTL__MCIF_DWB0_LUMA_MEM_EN_NUM__SHIFT 0x7
+#define MMHUBBUB_MEM_PWR_CNTL__MCIF_DWB0_CHROMA_MEM_EN_NUM__SHIFT 0x8
+#define MMHUBBUB_MEM_PWR_CNTL__VGA_MEM_PWR_FORCE_MASK 0x00000001L
+#define MMHUBBUB_MEM_PWR_CNTL__VGA_MEM_PWR_DIS_MASK 0x00000002L
+#define MMHUBBUB_MEM_PWR_CNTL__MCIF_DWB0_MEM_PWR_FORCE_MASK 0x0000000CL
+#define MMHUBBUB_MEM_PWR_CNTL__MCIF_DWB0_MEM_PWR_DIS_MASK 0x00000010L
+#define MMHUBBUB_MEM_PWR_CNTL__MCIF_DWB0_MEM_PWR_MODE_SEL_MASK 0x00000060L
+#define MMHUBBUB_MEM_PWR_CNTL__MCIF_DWB0_LUMA_MEM_EN_NUM_MASK 0x00000080L
+#define MMHUBBUB_MEM_PWR_CNTL__MCIF_DWB0_CHROMA_MEM_EN_NUM_MASK 0x00000100L
+//MMHUBBUB_CLOCK_CNTL
+#define MMHUBBUB_CLOCK_CNTL__MMHUBBUB_TEST_CLK_SEL__SHIFT 0x0
+#define MMHUBBUB_CLOCK_CNTL__DISPCLK_R_MMHUBBUB_GATE_DIS__SHIFT 0x5
+#define MMHUBBUB_CLOCK_CNTL__DISPCLK_G_VGAIF_GATE_DIS__SHIFT 0x6
+#define MMHUBBUB_CLOCK_CNTL__SOCCLK_G_VGAIF_GATE_DIS__SHIFT 0x7
+#define MMHUBBUB_CLOCK_CNTL__DISPCLK_G_VGA_GATE_DIS__SHIFT 0x8
+#define MMHUBBUB_CLOCK_CNTL__DISPCLK_G_WBIF0_GATE_DIS__SHIFT 0x9
+#define MMHUBBUB_CLOCK_CNTL__SOCCLK_G_WBIF0_GATE_DIS__SHIFT 0xa
+#define MMHUBBUB_CLOCK_CNTL__MMHUBBUB_TEST_CLK_SEL_MASK 0x0000001FL
+#define MMHUBBUB_CLOCK_CNTL__DISPCLK_R_MMHUBBUB_GATE_DIS_MASK 0x00000020L
+#define MMHUBBUB_CLOCK_CNTL__DISPCLK_G_VGAIF_GATE_DIS_MASK 0x00000040L
+#define MMHUBBUB_CLOCK_CNTL__SOCCLK_G_VGAIF_GATE_DIS_MASK 0x00000080L
+#define MMHUBBUB_CLOCK_CNTL__DISPCLK_G_VGA_GATE_DIS_MASK 0x00000100L
+#define MMHUBBUB_CLOCK_CNTL__DISPCLK_G_WBIF0_GATE_DIS_MASK 0x00000200L
+#define MMHUBBUB_CLOCK_CNTL__SOCCLK_G_WBIF0_GATE_DIS_MASK 0x00000400L
+//MMHUBBUB_SOFT_RESET
+#define MMHUBBUB_SOFT_RESET__VGA_SOFT_RESET__SHIFT 0x0
+#define MMHUBBUB_SOFT_RESET__VGAIF_SOFT_RESET__SHIFT 0x1
+#define MMHUBBUB_SOFT_RESET__WBIF0_SOFT_RESET__SHIFT 0x2
+#define MMHUBBUB_SOFT_RESET__DMUIF_SOFT_RESET__SHIFT 0x8
+#define MMHUBBUB_SOFT_RESET__VGA_SOFT_RESET_MASK 0x00000001L
+#define MMHUBBUB_SOFT_RESET__VGAIF_SOFT_RESET_MASK 0x00000002L
+#define MMHUBBUB_SOFT_RESET__WBIF0_SOFT_RESET_MASK 0x00000004L
+#define MMHUBBUB_SOFT_RESET__DMUIF_SOFT_RESET_MASK 0x00000100L
+//DMU_IF_ERR_STATUS
+#define DMU_IF_ERR_STATUS__DMU_RD_OUTSTANDING_ERR__SHIFT 0x0
+#define DMU_IF_ERR_STATUS__DMU_RD_OUTSTANDING_ERR_CLR__SHIFT 0x4
+#define DMU_IF_ERR_STATUS__DMU_RD_OUTSTANDING_ERR_MASK 0x00000001L
+#define DMU_IF_ERR_STATUS__DMU_RD_OUTSTANDING_ERR_CLR_MASK 0x00000010L
+//MMHUBBUB_CLIENT_UNIT_ID
+#define MMHUBBUB_CLIENT_UNIT_ID__VGA_UNIT_ID__SHIFT 0x0
+#define MMHUBBUB_CLIENT_UNIT_ID__WBIF0_UNIT_ID__SHIFT 0x8
+#define MMHUBBUB_CLIENT_UNIT_ID__VGA_UNIT_ID_MASK 0x0000003FL
+#define MMHUBBUB_CLIENT_UNIT_ID__WBIF0_UNIT_ID_MASK 0x00003F00L
+
+
+// addressBlock: dce_dc_mmhubbub_vgaif_dispdec
+//MCIF_CONTROL
+#define MCIF_CONTROL__MCIF_MC_LATENCY_COUNTER_ENABLE__SHIFT 0x1e
+#define MCIF_CONTROL__MCIF_MC_LATENCY_COUNTER_URGENT_ONLY__SHIFT 0x1f
+#define MCIF_CONTROL__MCIF_MC_LATENCY_COUNTER_ENABLE_MASK 0x40000000L
+#define MCIF_CONTROL__MCIF_MC_LATENCY_COUNTER_URGENT_ONLY_MASK 0x80000000L
+//MCIF_WRITE_COMBINE_CONTROL
+#define MCIF_WRITE_COMBINE_CONTROL__MCIF_WRITE_COMBINE_TIMEOUT__SHIFT 0x0
+#define MCIF_WRITE_COMBINE_CONTROL__MCIF_WRITE_COMBINE_TIMEOUT_MASK 0x000003FFL
+//MCIF_PHASE0_OUTSTANDING_COUNTER
+#define MCIF_PHASE0_OUTSTANDING_COUNTER__MCIF_PHASE0_OUTSTANDING_COUNTER__SHIFT 0x0
+#define MCIF_PHASE0_OUTSTANDING_COUNTER__MCIF_PHASE0_OUTSTANDING_COUNTER_MASK 0x07FFFFFFL
+//MCIF_PHASE1_OUTSTANDING_COUNTER
+#define MCIF_PHASE1_OUTSTANDING_COUNTER__MCIF_PHASE1_OUTSTANDING_COUNTER__SHIFT 0x0
+#define MCIF_PHASE1_OUTSTANDING_COUNTER__MCIF_PHASE1_OUTSTANDING_COUNTER_MASK 0x07FFFFFFL
+//MCIF_PHASE2_OUTSTANDING_COUNTER
+#define MCIF_PHASE2_OUTSTANDING_COUNTER__MCIF_PHASE2_OUTSTANDING_COUNTER__SHIFT 0x0
+#define MCIF_PHASE2_OUTSTANDING_COUNTER__MCIF_PHASE2_OUTSTANDING_COUNTER_MASK 0x07FFFFFFL
+
+
+// addressBlock: dce_dc_mmhubbub_mmhubbub_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON4_PERFCOUNTER_CNTL
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON4_PERFCOUNTER_CNTL2
+#define DC_PERFMON4_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON4_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON4_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON4_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON4_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON4_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON4_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON4_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON4_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON4_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON4_PERFCOUNTER_STATE
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON4_PERFMON_CNTL
+#define DC_PERFMON4_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON4_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON4_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON4_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON4_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON4_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON4_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON4_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON4_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON4_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON4_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON4_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON4_PERFMON_CNTL2
+#define DC_PERFMON4_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON4_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON4_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON4_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON4_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON4_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON4_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON4_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON4_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON4_PERFMON_CVALUE_LOW
+#define DC_PERFMON4_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON4_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON4_PERFMON_HI
+#define DC_PERFMON4_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON4_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON4_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON4_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON4_PERFMON_LOW
+#define DC_PERFMON4_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON4_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0stream0_dispdec
+//AZF0STREAM0_AZALIA_STREAM_INDEX
+#define AZF0STREAM0_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM0_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM0_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM0_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+//AZF0STREAM0_AZALIA_STREAM_DATA
+#define AZF0STREAM0_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM0_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0stream1_dispdec
+//AZF0STREAM1_AZALIA_STREAM_INDEX
+#define AZF0STREAM1_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM1_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM1_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM1_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+//AZF0STREAM1_AZALIA_STREAM_DATA
+#define AZF0STREAM1_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM1_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0stream2_dispdec
+//AZF0STREAM2_AZALIA_STREAM_INDEX
+#define AZF0STREAM2_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM2_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM2_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM2_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+//AZF0STREAM2_AZALIA_STREAM_DATA
+#define AZF0STREAM2_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM2_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0stream3_dispdec
+//AZF0STREAM3_AZALIA_STREAM_INDEX
+#define AZF0STREAM3_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM3_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM3_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM3_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+//AZF0STREAM3_AZALIA_STREAM_DATA
+#define AZF0STREAM3_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM3_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0stream4_dispdec
+//AZF0STREAM4_AZALIA_STREAM_INDEX
+#define AZF0STREAM4_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM4_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM4_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM4_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+//AZF0STREAM4_AZALIA_STREAM_DATA
+#define AZF0STREAM4_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM4_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0stream5_dispdec
+//AZF0STREAM5_AZALIA_STREAM_INDEX
+#define AZF0STREAM5_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM5_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM5_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM5_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+//AZF0STREAM5_AZALIA_STREAM_DATA
+#define AZF0STREAM5_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM5_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0stream6_dispdec
+//AZF0STREAM6_AZALIA_STREAM_INDEX
+#define AZF0STREAM6_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM6_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM6_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM6_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+//AZF0STREAM6_AZALIA_STREAM_DATA
+#define AZF0STREAM6_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM6_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0stream7_dispdec
+//AZF0STREAM7_AZALIA_STREAM_INDEX
+#define AZF0STREAM7_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM7_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM7_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM7_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+//AZF0STREAM7_AZALIA_STREAM_DATA
+#define AZF0STREAM7_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM7_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_az_misc_dispdec
+//AZ_CLOCK_CNTL
+#define AZ_CLOCK_CNTL__SCLK_G_STREAM_AZ_GATE_DIS__SHIFT 0x0
+#define AZ_CLOCK_CNTL__SCLK_R_AZ_GATE_DIS__SHIFT 0x8
+#define AZ_CLOCK_CNTL__SCLK_G_CNTL_AZ_GATE_DIS__SHIFT 0x10
+#define AZ_CLOCK_CNTL__DCIPG_TEST_CLK_SEL__SHIFT 0x18
+#define AZ_CLOCK_CNTL__SCLK_G_STREAM_AZ_GATE_DIS_MASK 0x00000001L
+#define AZ_CLOCK_CNTL__SCLK_R_AZ_GATE_DIS_MASK 0x00000100L
+#define AZ_CLOCK_CNTL__SCLK_G_CNTL_AZ_GATE_DIS_MASK 0x00010000L
+#define AZ_CLOCK_CNTL__DCIPG_TEST_CLK_SEL_MASK 0x1F000000L
+
+
+// addressBlock: dce_dc_hda_az_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON5_PERFCOUNTER_CNTL
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON5_PERFCOUNTER_CNTL2
+#define DC_PERFMON5_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON5_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON5_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON5_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON5_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON5_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON5_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON5_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON5_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON5_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON5_PERFCOUNTER_STATE
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON5_PERFMON_CNTL
+#define DC_PERFMON5_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON5_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON5_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON5_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON5_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON5_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON5_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON5_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON5_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON5_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON5_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON5_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON5_PERFMON_CNTL2
+#define DC_PERFMON5_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON5_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON5_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON5_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON5_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON5_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON5_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON5_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON5_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON5_PERFMON_CVALUE_LOW
+#define DC_PERFMON5_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON5_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON5_PERFMON_HI
+#define DC_PERFMON5_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON5_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON5_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON5_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON5_PERFMON_LOW
+#define DC_PERFMON5_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON5_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0endpoint0_dispdec
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0endpoint1_dispdec
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_INDEX
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_DATA
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0endpoint2_dispdec
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_ENDPOINT_INDEX
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_ENDPOINT_DATA
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0endpoint3_dispdec
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_ENDPOINT_INDEX
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_ENDPOINT_DATA
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0endpoint4_dispdec
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_ENDPOINT_INDEX
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_ENDPOINT_DATA
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0endpoint5_dispdec
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_ENDPOINT_INDEX
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_ENDPOINT_DATA
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0endpoint6_dispdec
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_ENDPOINT_INDEX
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_ENDPOINT_DATA
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0endpoint7_dispdec
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_ENDPOINT_INDEX
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_ENDPOINT_DATA
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0controller_dispdec
+//AZALIA_CONTROLLER_CLOCK_GATING
+#define AZALIA_CONTROLLER_CLOCK_GATING__ENABLE_CLOCK_GATING__SHIFT 0x0
+#define AZALIA_CONTROLLER_CLOCK_GATING__CLOCK_ON_STATE__SHIFT 0x4
+#define AZALIA_CONTROLLER_CLOCK_GATING__ENABLE_CLOCK_GATING_MASK 0x00000001L
+#define AZALIA_CONTROLLER_CLOCK_GATING__CLOCK_ON_STATE_MASK 0x00000010L
+//AZALIA_AUDIO_DTO
+#define AZALIA_AUDIO_DTO__AZALIA_AUDIO_DTO_PHASE__SHIFT 0x0
+#define AZALIA_AUDIO_DTO__AZALIA_AUDIO_DTO_MODULE__SHIFT 0x10
+#define AZALIA_AUDIO_DTO__AZALIA_AUDIO_DTO_PHASE_MASK 0x0000FFFFL
+#define AZALIA_AUDIO_DTO__AZALIA_AUDIO_DTO_MODULE_MASK 0xFFFF0000L
+//AZALIA_AUDIO_DTO_CONTROL
+#define AZALIA_AUDIO_DTO_CONTROL__AZALIA_AUDIO_FORCE_DTO__SHIFT 0x8
+#define AZALIA_AUDIO_DTO_CONTROL__AZALIA_AUDIO_FORCE_DTO_MASK 0x00000300L
+//AZALIA_SOCCLK_CONTROL
+#define AZALIA_SOCCLK_CONTROL__AUDIO_STREAM_SOCCLK_DEEP_SLEEP_EXIT_EN__SHIFT 0x1
+#define AZALIA_SOCCLK_CONTROL__AUDIO_STREAM_SOCCLK_DEEP_SLEEP_EXIT_EN_MASK 0x00000002L
+//AZALIA_UNDERFLOW_FILLER_SAMPLE
+#define AZALIA_UNDERFLOW_FILLER_SAMPLE__AZALIA_UNDERFLOW_FILLER_SAMPLE__SHIFT 0x0
+#define AZALIA_UNDERFLOW_FILLER_SAMPLE__AZALIA_UNDERFLOW_FILLER_SAMPLE_MASK 0xFFFFFFFFL
+//AZALIA_DATA_DMA_CONTROL
+#define AZALIA_DATA_DMA_CONTROL__DATA_DMA_NON_SNOOP__SHIFT 0x0
+#define AZALIA_DATA_DMA_CONTROL__INPUT_DATA_DMA_NON_SNOOP__SHIFT 0x2
+#define AZALIA_DATA_DMA_CONTROL__DATA_DMA_ISOCHRONOUS__SHIFT 0x4
+#define AZALIA_DATA_DMA_CONTROL__INPUT_DATA_DMA_ISOCHRONOUS__SHIFT 0x6
+#define AZALIA_DATA_DMA_CONTROL__AZALIA_IOC_GENERATION_METHOD__SHIFT 0x10
+#define AZALIA_DATA_DMA_CONTROL__AZALIA_UNDERFLOW_CONTROL__SHIFT 0x11
+#define AZALIA_DATA_DMA_CONTROL__DATA_DMA_NON_SNOOP_MASK 0x00000003L
+#define AZALIA_DATA_DMA_CONTROL__INPUT_DATA_DMA_NON_SNOOP_MASK 0x0000000CL
+#define AZALIA_DATA_DMA_CONTROL__DATA_DMA_ISOCHRONOUS_MASK 0x00000030L
+#define AZALIA_DATA_DMA_CONTROL__INPUT_DATA_DMA_ISOCHRONOUS_MASK 0x000000C0L
+#define AZALIA_DATA_DMA_CONTROL__AZALIA_IOC_GENERATION_METHOD_MASK 0x00010000L
+#define AZALIA_DATA_DMA_CONTROL__AZALIA_UNDERFLOW_CONTROL_MASK 0x00020000L
+//AZALIA_BDL_DMA_CONTROL
+#define AZALIA_BDL_DMA_CONTROL__BDL_DMA_NON_SNOOP__SHIFT 0x0
+#define AZALIA_BDL_DMA_CONTROL__INPUT_BDL_DMA_NON_SNOOP__SHIFT 0x2
+#define AZALIA_BDL_DMA_CONTROL__BDL_DMA_ISOCHRONOUS__SHIFT 0x4
+#define AZALIA_BDL_DMA_CONTROL__INPUT_BDL_DMA_ISOCHRONOUS__SHIFT 0x6
+#define AZALIA_BDL_DMA_CONTROL__BDL_DMA_NON_SNOOP_MASK 0x00000003L
+#define AZALIA_BDL_DMA_CONTROL__INPUT_BDL_DMA_NON_SNOOP_MASK 0x0000000CL
+#define AZALIA_BDL_DMA_CONTROL__BDL_DMA_ISOCHRONOUS_MASK 0x00000030L
+#define AZALIA_BDL_DMA_CONTROL__INPUT_BDL_DMA_ISOCHRONOUS_MASK 0x000000C0L
+//AZALIA_RIRB_AND_DP_CONTROL
+#define AZALIA_RIRB_AND_DP_CONTROL__RIRB_NON_SNOOP__SHIFT 0x0
+#define AZALIA_RIRB_AND_DP_CONTROL__DP_DMA_NON_SNOOP__SHIFT 0x4
+#define AZALIA_RIRB_AND_DP_CONTROL__DP_UPDATE_FREQ_DIVIDER__SHIFT 0x5
+#define AZALIA_RIRB_AND_DP_CONTROL__RIRB_NON_SNOOP_MASK 0x00000001L
+#define AZALIA_RIRB_AND_DP_CONTROL__DP_DMA_NON_SNOOP_MASK 0x00000010L
+#define AZALIA_RIRB_AND_DP_CONTROL__DP_UPDATE_FREQ_DIVIDER_MASK 0x000001E0L
+//AZALIA_CORB_DMA_CONTROL
+#define AZALIA_CORB_DMA_CONTROL__CORB_DMA_NON_SNOOP__SHIFT 0x0
+#define AZALIA_CORB_DMA_CONTROL__CORB_DMA_ISOCHRONOUS__SHIFT 0x4
+#define AZALIA_CORB_DMA_CONTROL__CORB_DMA_NON_SNOOP_MASK 0x00000001L
+#define AZALIA_CORB_DMA_CONTROL__CORB_DMA_ISOCHRONOUS_MASK 0x00000010L
+//AZALIA_APPLICATION_POSITION_IN_CYCLIC_BUFFER
+#define AZALIA_APPLICATION_POSITION_IN_CYCLIC_BUFFER__APPLICATION_POSITION_IN_CYCLIC_BUFFER__SHIFT 0x0
+#define AZALIA_APPLICATION_POSITION_IN_CYCLIC_BUFFER__APPLICATION_POSITION_IN_CYCLIC_BUFFER_MASK 0xFFFFFFFFL
+//AZALIA_CYCLIC_BUFFER_SYNC
+#define AZALIA_CYCLIC_BUFFER_SYNC__CYCLIC_BUFFER_SYNC_ENABLE__SHIFT 0x0
+#define AZALIA_CYCLIC_BUFFER_SYNC__CYCLIC_BUFFER_SYNC_ENABLE_MASK 0x00000001L
+//AZALIA_GLOBAL_CAPABILITIES
+#define AZALIA_GLOBAL_CAPABILITIES__NUMBER_OF_SERIAL_DATA_OUTPUT_SIGNALS__SHIFT 0x1
+#define AZALIA_GLOBAL_CAPABILITIES__NUMBER_OF_SERIAL_DATA_OUTPUT_SIGNALS_MASK 0x00000006L
+//AZALIA_OUTPUT_PAYLOAD_CAPABILITY
+#define AZALIA_OUTPUT_PAYLOAD_CAPABILITY__OUTPUT_PAYLOAD_CAPABILITY__SHIFT 0x0
+#define AZALIA_OUTPUT_PAYLOAD_CAPABILITY__OUTSTRMPAY__SHIFT 0x10
+#define AZALIA_OUTPUT_PAYLOAD_CAPABILITY__OUTPUT_PAYLOAD_CAPABILITY_MASK 0x0000FFFFL
+#define AZALIA_OUTPUT_PAYLOAD_CAPABILITY__OUTSTRMPAY_MASK 0xFFFF0000L
+//AZALIA_OUTPUT_STREAM_ARBITER_CONTROL
+#define AZALIA_OUTPUT_STREAM_ARBITER_CONTROL__LATENCY_HIDING_LEVEL__SHIFT 0x0
+#define AZALIA_OUTPUT_STREAM_ARBITER_CONTROL__SYS_MEM_ACTIVE_ENABLE__SHIFT 0x8
+#define AZALIA_OUTPUT_STREAM_ARBITER_CONTROL__INPUT_LATENCY_HIDING_LEVEL__SHIFT 0x10
+#define AZALIA_OUTPUT_STREAM_ARBITER_CONTROL__LATENCY_HIDING_LEVEL_MASK 0x000000FFL
+#define AZALIA_OUTPUT_STREAM_ARBITER_CONTROL__SYS_MEM_ACTIVE_ENABLE_MASK 0x00000100L
+#define AZALIA_OUTPUT_STREAM_ARBITER_CONTROL__INPUT_LATENCY_HIDING_LEVEL_MASK 0x00FF0000L
+//AZALIA_INPUT_PAYLOAD_CAPABILITY
+#define AZALIA_INPUT_PAYLOAD_CAPABILITY__INPUT_PAYLOAD_CAPABILITY__SHIFT 0x0
+#define AZALIA_INPUT_PAYLOAD_CAPABILITY__INSTRMPAY__SHIFT 0x10
+#define AZALIA_INPUT_PAYLOAD_CAPABILITY__INPUT_PAYLOAD_CAPABILITY_MASK 0x0000FFFFL
+#define AZALIA_INPUT_PAYLOAD_CAPABILITY__INSTRMPAY_MASK 0xFFFF0000L
+//AZALIA_INPUT_CRC0_CONTROL0
+#define AZALIA_INPUT_CRC0_CONTROL0__INPUT_CRC_EN__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CONTROL0__INPUT_CRC_BLOCK_MODE__SHIFT 0x4
+#define AZALIA_INPUT_CRC0_CONTROL0__INPUT_CRC_INSTANCE_SEL__SHIFT 0x8
+#define AZALIA_INPUT_CRC0_CONTROL0__INPUT_CRC_EN_MASK 0x00000001L
+#define AZALIA_INPUT_CRC0_CONTROL0__INPUT_CRC_BLOCK_MODE_MASK 0x00000010L
+#define AZALIA_INPUT_CRC0_CONTROL0__INPUT_CRC_INSTANCE_SEL_MASK 0x00000700L
+//AZALIA_INPUT_CRC0_CONTROL1
+#define AZALIA_INPUT_CRC0_CONTROL1__INPUT_CRC_BLOCK_SIZE__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CONTROL1__INPUT_CRC_BLOCK_SIZE_MASK 0xFFFFFFFFL
+//AZALIA_INPUT_CRC0_CONTROL2
+#define AZALIA_INPUT_CRC0_CONTROL2__INPUT_CRC_BLOCK_ITERATION__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CONTROL2__INPUT_CRC_BLOCK_ITERATION_MASK 0x0000FFFFL
+//AZALIA_INPUT_CRC0_CONTROL3
+#define AZALIA_INPUT_CRC0_CONTROL3__INPUT_CRC_COMPLETE__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CONTROL3__INPUT_CRC_BLOCK_COMPLETE_PHASE__SHIFT 0x4
+#define AZALIA_INPUT_CRC0_CONTROL3__INPUT_CRC_CHANNEL_RESULT_SEL__SHIFT 0x8
+#define AZALIA_INPUT_CRC0_CONTROL3__INPUT_CRC_COMPLETE_MASK 0x00000001L
+#define AZALIA_INPUT_CRC0_CONTROL3__INPUT_CRC_BLOCK_COMPLETE_PHASE_MASK 0x00000010L
+#define AZALIA_INPUT_CRC0_CONTROL3__INPUT_CRC_CHANNEL_RESULT_SEL_MASK 0x00000700L
+//AZALIA_INPUT_CRC0_RESULT
+#define AZALIA_INPUT_CRC0_RESULT__INPUT_CRC_RESULT__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_RESULT__INPUT_CRC_RESULT_MASK 0xFFFFFFFFL
+//AZALIA_INPUT_CRC1_CONTROL0
+#define AZALIA_INPUT_CRC1_CONTROL0__INPUT_CRC_EN__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CONTROL0__INPUT_CRC_BLOCK_MODE__SHIFT 0x4
+#define AZALIA_INPUT_CRC1_CONTROL0__INPUT_CRC_INSTANCE_SEL__SHIFT 0x8
+#define AZALIA_INPUT_CRC1_CONTROL0__INPUT_CRC_EN_MASK 0x00000001L
+#define AZALIA_INPUT_CRC1_CONTROL0__INPUT_CRC_BLOCK_MODE_MASK 0x00000010L
+#define AZALIA_INPUT_CRC1_CONTROL0__INPUT_CRC_INSTANCE_SEL_MASK 0x00000700L
+//AZALIA_INPUT_CRC1_CONTROL1
+#define AZALIA_INPUT_CRC1_CONTROL1__INPUT_CRC_BLOCK_SIZE__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CONTROL1__INPUT_CRC_BLOCK_SIZE_MASK 0xFFFFFFFFL
+//AZALIA_INPUT_CRC1_CONTROL2
+#define AZALIA_INPUT_CRC1_CONTROL2__INPUT_CRC_BLOCK_ITERATION__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CONTROL2__INPUT_CRC_BLOCK_ITERATION_MASK 0x0000FFFFL
+//AZALIA_INPUT_CRC1_CONTROL3
+#define AZALIA_INPUT_CRC1_CONTROL3__INPUT_CRC_COMPLETE__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CONTROL3__INPUT_CRC_BLOCK_COMPLETE_PHASE__SHIFT 0x4
+#define AZALIA_INPUT_CRC1_CONTROL3__INPUT_CRC_CHANNEL_RESULT_SEL__SHIFT 0x8
+#define AZALIA_INPUT_CRC1_CONTROL3__INPUT_CRC_COMPLETE_MASK 0x00000001L
+#define AZALIA_INPUT_CRC1_CONTROL3__INPUT_CRC_BLOCK_COMPLETE_PHASE_MASK 0x00000010L
+#define AZALIA_INPUT_CRC1_CONTROL3__INPUT_CRC_CHANNEL_RESULT_SEL_MASK 0x00000700L
+//AZALIA_INPUT_CRC1_RESULT
+#define AZALIA_INPUT_CRC1_RESULT__INPUT_CRC_RESULT__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_RESULT__INPUT_CRC_RESULT_MASK 0xFFFFFFFFL
+//AZALIA_CRC0_CONTROL0
+#define AZALIA_CRC0_CONTROL0__CRC_EN__SHIFT 0x0
+#define AZALIA_CRC0_CONTROL0__CRC_BLOCK_MODE__SHIFT 0x4
+#define AZALIA_CRC0_CONTROL0__CRC_INSTANCE_SEL__SHIFT 0x8
+#define AZALIA_CRC0_CONTROL0__CRC_SOURCE_SEL__SHIFT 0xc
+#define AZALIA_CRC0_CONTROL0__CRC_EN_MASK 0x00000001L
+#define AZALIA_CRC0_CONTROL0__CRC_BLOCK_MODE_MASK 0x00000010L
+#define AZALIA_CRC0_CONTROL0__CRC_INSTANCE_SEL_MASK 0x00000700L
+#define AZALIA_CRC0_CONTROL0__CRC_SOURCE_SEL_MASK 0x00001000L
+//AZALIA_CRC0_CONTROL1
+#define AZALIA_CRC0_CONTROL1__CRC_BLOCK_SIZE__SHIFT 0x0
+#define AZALIA_CRC0_CONTROL1__CRC_BLOCK_SIZE_MASK 0xFFFFFFFFL
+//AZALIA_CRC0_CONTROL2
+#define AZALIA_CRC0_CONTROL2__CRC_BLOCK_ITERATION__SHIFT 0x0
+#define AZALIA_CRC0_CONTROL2__CRC_BLOCK_ITERATION_MASK 0x0000FFFFL
+//AZALIA_CRC0_CONTROL3
+#define AZALIA_CRC0_CONTROL3__CRC_COMPLETE__SHIFT 0x0
+#define AZALIA_CRC0_CONTROL3__CRC_BLOCK_COMPLETE_PHASE__SHIFT 0x4
+#define AZALIA_CRC0_CONTROL3__CRC_CHANNEL_RESULT_SEL__SHIFT 0x8
+#define AZALIA_CRC0_CONTROL3__CRC_COMPLETE_MASK 0x00000001L
+#define AZALIA_CRC0_CONTROL3__CRC_BLOCK_COMPLETE_PHASE_MASK 0x00000010L
+#define AZALIA_CRC0_CONTROL3__CRC_CHANNEL_RESULT_SEL_MASK 0x00000700L
+//AZALIA_CRC0_RESULT
+#define AZALIA_CRC0_RESULT__CRC_RESULT__SHIFT 0x0
+#define AZALIA_CRC0_RESULT__CRC_RESULT_MASK 0xFFFFFFFFL
+//AZALIA_CRC1_CONTROL0
+#define AZALIA_CRC1_CONTROL0__CRC_EN__SHIFT 0x0
+#define AZALIA_CRC1_CONTROL0__CRC_BLOCK_MODE__SHIFT 0x4
+#define AZALIA_CRC1_CONTROL0__CRC_INSTANCE_SEL__SHIFT 0x8
+#define AZALIA_CRC1_CONTROL0__CRC_SOURCE_SEL__SHIFT 0xc
+#define AZALIA_CRC1_CONTROL0__CRC_EN_MASK 0x00000001L
+#define AZALIA_CRC1_CONTROL0__CRC_BLOCK_MODE_MASK 0x00000010L
+#define AZALIA_CRC1_CONTROL0__CRC_INSTANCE_SEL_MASK 0x00000700L
+#define AZALIA_CRC1_CONTROL0__CRC_SOURCE_SEL_MASK 0x00001000L
+//AZALIA_CRC1_CONTROL1
+#define AZALIA_CRC1_CONTROL1__CRC_BLOCK_SIZE__SHIFT 0x0
+#define AZALIA_CRC1_CONTROL1__CRC_BLOCK_SIZE_MASK 0xFFFFFFFFL
+//AZALIA_CRC1_CONTROL2
+#define AZALIA_CRC1_CONTROL2__CRC_BLOCK_ITERATION__SHIFT 0x0
+#define AZALIA_CRC1_CONTROL2__CRC_BLOCK_ITERATION_MASK 0x0000FFFFL
+//AZALIA_CRC1_CONTROL3
+#define AZALIA_CRC1_CONTROL3__CRC_COMPLETE__SHIFT 0x0
+#define AZALIA_CRC1_CONTROL3__CRC_BLOCK_COMPLETE_PHASE__SHIFT 0x4
+#define AZALIA_CRC1_CONTROL3__CRC_CHANNEL_RESULT_SEL__SHIFT 0x8
+#define AZALIA_CRC1_CONTROL3__CRC_COMPLETE_MASK 0x00000001L
+#define AZALIA_CRC1_CONTROL3__CRC_BLOCK_COMPLETE_PHASE_MASK 0x00000010L
+#define AZALIA_CRC1_CONTROL3__CRC_CHANNEL_RESULT_SEL_MASK 0x00000700L
+//AZALIA_CRC1_RESULT
+#define AZALIA_CRC1_RESULT__CRC_RESULT__SHIFT 0x0
+#define AZALIA_CRC1_RESULT__CRC_RESULT_MASK 0xFFFFFFFFL
+//AZALIA_MEM_PWR_CTRL
+#define AZALIA_MEM_PWR_CTRL__AZ_MEM_PWR_FORCE__SHIFT 0x0
+#define AZALIA_MEM_PWR_CTRL__AZ_MEM_PWR_DIS__SHIFT 0x2
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM0_MEM_PWR_FORCE__SHIFT 0x3
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM0_MEM_PWR_DIS__SHIFT 0x5
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM1_MEM_PWR_FORCE__SHIFT 0x6
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM1_MEM_PWR_DIS__SHIFT 0x8
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM2_MEM_PWR_FORCE__SHIFT 0x9
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM2_MEM_PWR_DIS__SHIFT 0xb
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM3_MEM_PWR_FORCE__SHIFT 0xc
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM3_MEM_PWR_DIS__SHIFT 0xe
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM4_MEM_PWR_FORCE__SHIFT 0xf
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM4_MEM_PWR_DIS__SHIFT 0x11
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM5_MEM_PWR_FORCE__SHIFT 0x12
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM5_MEM_PWR_DIS__SHIFT 0x14
+#define AZALIA_MEM_PWR_CTRL__AZ_MEM_PWR_MODE_SEL__SHIFT 0x1c
+#define AZALIA_MEM_PWR_CTRL__AZ_MEM_PWR_FORCE_MASK 0x00000003L
+#define AZALIA_MEM_PWR_CTRL__AZ_MEM_PWR_DIS_MASK 0x00000004L
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM0_MEM_PWR_FORCE_MASK 0x00000018L
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM0_MEM_PWR_DIS_MASK 0x00000020L
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM1_MEM_PWR_FORCE_MASK 0x000000C0L
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM1_MEM_PWR_DIS_MASK 0x00000100L
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM2_MEM_PWR_FORCE_MASK 0x00000600L
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM2_MEM_PWR_DIS_MASK 0x00000800L
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM3_MEM_PWR_FORCE_MASK 0x00003000L
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM3_MEM_PWR_DIS_MASK 0x00004000L
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM4_MEM_PWR_FORCE_MASK 0x00018000L
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM4_MEM_PWR_DIS_MASK 0x00020000L
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM5_MEM_PWR_FORCE_MASK 0x000C0000L
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM5_MEM_PWR_DIS_MASK 0x00100000L
+#define AZALIA_MEM_PWR_CTRL__AZ_MEM_PWR_MODE_SEL_MASK 0x30000000L
+//AZALIA_MEM_PWR_STATUS
+#define AZALIA_MEM_PWR_STATUS__AZ_MEM_PWR_STATE__SHIFT 0x0
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM0_MEM_PWR_STATE__SHIFT 0x2
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM1_MEM_PWR_STATE__SHIFT 0x4
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM2_MEM_PWR_STATE__SHIFT 0x6
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM3_MEM_PWR_STATE__SHIFT 0x8
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM4_MEM_PWR_STATE__SHIFT 0xa
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM5_MEM_PWR_STATE__SHIFT 0xc
+#define AZALIA_MEM_PWR_STATUS__AZ_MEM_PWR_STATE_MASK 0x00000003L
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM0_MEM_PWR_STATE_MASK 0x0000000CL
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM1_MEM_PWR_STATE_MASK 0x00000030L
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM2_MEM_PWR_STATE_MASK 0x000000C0L
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM3_MEM_PWR_STATE_MASK 0x00000300L
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM4_MEM_PWR_STATE_MASK 0x00000C00L
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM5_MEM_PWR_STATE_MASK 0x00003000L
+
+
+// addressBlock: dce_dc_hda_azf0root_dispdec
+//AZALIA_F0_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID
+#define AZALIA_F0_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID__AZALIA_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID__SHIFT 0x0
+#define AZALIA_F0_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID__AZALIA_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID_MASK 0xFFFFFFFFL
+//AZALIA_F0_CODEC_ROOT_PARAMETER_REVISION_ID
+#define AZALIA_F0_CODEC_ROOT_PARAMETER_REVISION_ID__AZALIA_CODEC_ROOT_PARAMETER_REVISION_ID__SHIFT 0x0
+#define AZALIA_F0_CODEC_ROOT_PARAMETER_REVISION_ID__AZALIA_CODEC_ROOT_PARAMETER_REVISION_ID_MASK 0xFFFFFFFFL
+//AZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL
+#define AZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL__HBR_CHANNEL_COUNT__SHIFT 0x0
+#define AZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL__COMPRESSED_CHANNEL_COUNT__SHIFT 0x4
+#define AZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL__HBR_CHANNEL_COUNT_MASK 0x00000007L
+#define AZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL__COMPRESSED_CHANNEL_COUNT_MASK 0x00000070L
+//AZALIA_F0_CODEC_RESYNC_FIFO_CONTROL
+#define AZALIA_F0_CODEC_RESYNC_FIFO_CONTROL__RESYNC_FIFO_STARTUP_KEEPOUT_WINDOW__SHIFT 0x0
+#define AZALIA_F0_CODEC_RESYNC_FIFO_CONTROL__RESYNC_FIFO_STARTUP_KEEPOUT_WINDOW_MASK 0x0000003FL
+//AZALIA_F0_CODEC_FUNCTION_PARAMETER_GROUP_TYPE
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_GROUP_TYPE__AZALIA_CODEC_FUNCTION_PARAMETER_GROUP_TYPE__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_GROUP_TYPE__AZALIA_CODEC_FUNCTION_PARAMETER_GROUP_TYPE_MASK 0xFFFFFFFFL
+//AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS__AZALIA_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS__AZALIA_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__AZALIA_CODEC_FUNCTION_PARAMETER_POWER_STATES__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__CLKSTOP__SHIFT 0x1e
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__EPSS__SHIFT 0x1f
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__AZALIA_CODEC_FUNCTION_PARAMETER_POWER_STATES_MASK 0x3FFFFFFFL
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__CLKSTOP_MASK 0x40000000L
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__EPSS_MASK 0x80000000L
+//AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SET__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_ACT__SHIFT 0x4
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__CLKSTOPOK__SHIFT 0x9
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SETTINGS_RESET__SHIFT 0xa
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SET_MASK 0x0000000FL
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_ACT_MASK 0x000000F0L
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__CLKSTOPOK_MASK 0x00000200L
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SETTINGS_RESET_MASK 0x00000400L
+//AZALIA_F0_CODEC_FUNCTION_CONTROL_RESET
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESET__CODEC_RESET__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESET__CODEC_RESET_MASK 0x00000001L
+//AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE0__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE1__SHIFT 0x8
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE2__SHIFT 0x10
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE3__SHIFT 0x18
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE0_MASK 0x000000FFL
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE1_MASK 0x0000FF00L
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE2_MASK 0x00FF0000L
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE3_MASK 0xFF000000L
+//AZALIA_F0_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION__CONVERTER_SYNCHRONIZATION__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION__CONVERTER_SYNCHRONIZATION_MASK 0x000000FFL
+//CC_RCU_DC_AUDIO_PORT_CONNECTIVITY
+#define CC_RCU_DC_AUDIO_PORT_CONNECTIVITY__PORT_CONNECTIVITY__SHIFT 0x0
+#define CC_RCU_DC_AUDIO_PORT_CONNECTIVITY__PORT_CONNECTIVITY_OVERRIDE_ENABLE__SHIFT 0x4
+#define CC_RCU_DC_AUDIO_PORT_CONNECTIVITY__PORT_CONNECTIVITY_MASK 0x00000007L
+#define CC_RCU_DC_AUDIO_PORT_CONNECTIVITY__PORT_CONNECTIVITY_OVERRIDE_ENABLE_MASK 0x00000010L
+//CC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY
+#define CC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY__INPUT_PORT_CONNECTIVITY__SHIFT 0x0
+#define CC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY__INPUT_PORT_CONNECTIVITY_OVERRIDE_ENABLE__SHIFT 0x4
+#define CC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY__INPUT_PORT_CONNECTIVITY_MASK 0x00000007L
+#define CC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY__INPUT_PORT_CONNECTIVITY_OVERRIDE_ENABLE_MASK 0x00000010L
+//AZALIA_F0_GTC_GROUP_OFFSET0
+#define AZALIA_F0_GTC_GROUP_OFFSET0__GTC_GROUP_OFFSET0__SHIFT 0x0
+#define AZALIA_F0_GTC_GROUP_OFFSET0__GTC_GROUP_OFFSET0_MASK 0xFFFFFFFFL
+//AZALIA_F0_GTC_GROUP_OFFSET1
+#define AZALIA_F0_GTC_GROUP_OFFSET1__GTC_GROUP_OFFSET1__SHIFT 0x0
+#define AZALIA_F0_GTC_GROUP_OFFSET1__GTC_GROUP_OFFSET1_MASK 0xFFFFFFFFL
+//AZALIA_F0_GTC_GROUP_OFFSET2
+#define AZALIA_F0_GTC_GROUP_OFFSET2__GTC_GROUP_OFFSET2__SHIFT 0x0
+#define AZALIA_F0_GTC_GROUP_OFFSET2__GTC_GROUP_OFFSET2_MASK 0xFFFFFFFFL
+//AZALIA_F0_GTC_GROUP_OFFSET3
+#define AZALIA_F0_GTC_GROUP_OFFSET3__GTC_GROUP_OFFSET3__SHIFT 0x0
+#define AZALIA_F0_GTC_GROUP_OFFSET3__GTC_GROUP_OFFSET3_MASK 0xFFFFFFFFL
+//AZALIA_F0_GTC_GROUP_OFFSET4
+#define AZALIA_F0_GTC_GROUP_OFFSET4__GTC_GROUP_OFFSET4__SHIFT 0x0
+#define AZALIA_F0_GTC_GROUP_OFFSET4__GTC_GROUP_OFFSET4_MASK 0xFFFFFFFFL
+//AZALIA_F0_GTC_GROUP_OFFSET5
+#define AZALIA_F0_GTC_GROUP_OFFSET5__GTC_GROUP_OFFSET5__SHIFT 0x0
+#define AZALIA_F0_GTC_GROUP_OFFSET5__GTC_GROUP_OFFSET5_MASK 0xFFFFFFFFL
+//AZALIA_F0_GTC_GROUP_OFFSET6
+#define AZALIA_F0_GTC_GROUP_OFFSET6__GTC_GROUP_OFFSET6__SHIFT 0x0
+#define AZALIA_F0_GTC_GROUP_OFFSET6__GTC_GROUP_OFFSET6_MASK 0xFFFFFFFFL
+//REG_DC_AUDIO_PORT_CONNECTIVITY
+#define REG_DC_AUDIO_PORT_CONNECTIVITY__REG_PORT_CONNECTIVITY__SHIFT 0x0
+#define REG_DC_AUDIO_PORT_CONNECTIVITY__REG_PORT_CONNECTIVITY_OVERRIDE_ENABLE__SHIFT 0x4
+#define REG_DC_AUDIO_PORT_CONNECTIVITY__REG_PORT_CONNECTIVITY_MASK 0x00000007L
+#define REG_DC_AUDIO_PORT_CONNECTIVITY__REG_PORT_CONNECTIVITY_OVERRIDE_ENABLE_MASK 0x00000010L
+//REG_DC_AUDIO_INPUT_PORT_CONNECTIVITY
+#define REG_DC_AUDIO_INPUT_PORT_CONNECTIVITY__REG_INPUT_PORT_CONNECTIVITY__SHIFT 0x0
+#define REG_DC_AUDIO_INPUT_PORT_CONNECTIVITY__REG_INPUT_PORT_CONNECTIVITY_OVERRIDE_ENABLE__SHIFT 0x4
+#define REG_DC_AUDIO_INPUT_PORT_CONNECTIVITY__REG_INPUT_PORT_CONNECTIVITY_MASK 0x00000007L
+#define REG_DC_AUDIO_INPUT_PORT_CONNECTIVITY__REG_INPUT_PORT_CONNECTIVITY_OVERRIDE_ENABLE_MASK 0x00000010L
+
+
+// addressBlock: dce_dc_hda_azf0stream8_dispdec
+//AZF0STREAM8_AZALIA_STREAM_INDEX
+#define AZF0STREAM8_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM8_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM8_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM8_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+//AZF0STREAM8_AZALIA_STREAM_DATA
+#define AZF0STREAM8_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM8_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0stream9_dispdec
+//AZF0STREAM9_AZALIA_STREAM_INDEX
+#define AZF0STREAM9_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM9_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM9_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM9_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+//AZF0STREAM9_AZALIA_STREAM_DATA
+#define AZF0STREAM9_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM9_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0stream10_dispdec
+//AZF0STREAM10_AZALIA_STREAM_INDEX
+#define AZF0STREAM10_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM10_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM10_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM10_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+//AZF0STREAM10_AZALIA_STREAM_DATA
+#define AZF0STREAM10_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM10_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0stream11_dispdec
+//AZF0STREAM11_AZALIA_STREAM_INDEX
+#define AZF0STREAM11_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM11_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM11_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM11_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+//AZF0STREAM11_AZALIA_STREAM_DATA
+#define AZF0STREAM11_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM11_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0stream12_dispdec
+//AZF0STREAM12_AZALIA_STREAM_INDEX
+#define AZF0STREAM12_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM12_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM12_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM12_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+//AZF0STREAM12_AZALIA_STREAM_DATA
+#define AZF0STREAM12_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM12_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0stream13_dispdec
+//AZF0STREAM13_AZALIA_STREAM_INDEX
+#define AZF0STREAM13_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM13_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM13_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM13_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+//AZF0STREAM13_AZALIA_STREAM_DATA
+#define AZF0STREAM13_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM13_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0stream14_dispdec
+//AZF0STREAM14_AZALIA_STREAM_INDEX
+#define AZF0STREAM14_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM14_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM14_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM14_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+//AZF0STREAM14_AZALIA_STREAM_DATA
+#define AZF0STREAM14_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM14_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0stream15_dispdec
+//AZF0STREAM15_AZALIA_STREAM_INDEX
+#define AZF0STREAM15_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM15_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM15_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM15_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+//AZF0STREAM15_AZALIA_STREAM_DATA
+#define AZF0STREAM15_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM15_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0inputendpoint0_dispdec
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0inputendpoint1_dispdec
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0inputendpoint2_dispdec
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0inputendpoint3_dispdec
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0inputendpoint4_dispdec
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0inputendpoint5_dispdec
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0inputendpoint6_dispdec
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0inputendpoint7_dispdec
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dchubbub_hubbub_sdpif_dispdec
+//DCHUBBUB_SDPIF_CFG0
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_NO_OUTSTANDING_REQ__SHIFT 0x0
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_PORT_STATUS__SHIFT 0x1
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_DATA_RESPONSE_STATUS__SHIFT 0x3
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_RESPONSE_STATUS__SHIFT 0x6
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_REQ_CREDIT_ERROR__SHIFT 0xa
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_RESPONSE_STATUS_CLEAR__SHIFT 0xb
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_REQ_CREDIT_ERROR_CLEAR__SHIFT 0xc
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_FLUSH_REQ_CREDIT_EN__SHIFT 0xd
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_REQ_CREDIT_EN__SHIFT 0xe
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_PORT_CONTROL__SHIFT 0xf
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_CREDIT_DISCONNECT_DELAY__SHIFT 0x19
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_NO_OUTSTANDING_REQ_MASK 0x00000001L
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_PORT_STATUS_MASK 0x00000006L
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_DATA_RESPONSE_STATUS_MASK 0x00000038L
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_RESPONSE_STATUS_MASK 0x000003C0L
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_REQ_CREDIT_ERROR_MASK 0x00000400L
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_RESPONSE_STATUS_CLEAR_MASK 0x00000800L
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_REQ_CREDIT_ERROR_CLEAR_MASK 0x00001000L
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_FLUSH_REQ_CREDIT_EN_MASK 0x00002000L
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_REQ_CREDIT_EN_MASK 0x00004000L
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_PORT_CONTROL_MASK 0x00008000L
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_CREDIT_DISCONNECT_DELAY_MASK 0x7E000000L
+//VM_REQUEST_PHYSICAL
+#define VM_REQUEST_PHYSICAL__PDE_REQUEST_PHYSICAL__SHIFT 0x0
+#define VM_REQUEST_PHYSICAL__PTE_REQUEST_PHYSICAL__SHIFT 0x3
+#define VM_REQUEST_PHYSICAL__PDE_REQUEST_PHYSICAL_MASK 0x00000001L
+#define VM_REQUEST_PHYSICAL__PTE_REQUEST_PHYSICAL_MASK 0x00000008L
+//DCHUBBUB_FORCE_IO_STATUS_0
+#define DCHUBBUB_FORCE_IO_STATUS_0__SDPIF_FORCE_IO_STATUS__SHIFT 0x0
+#define DCHUBBUB_FORCE_IO_STATUS_0__SDPIF_FORCE_IO_STATUS_STICKY__SHIFT 0x1
+#define DCHUBBUB_FORCE_IO_STATUS_0__SDPIF_FORCE_IO_STATUS_CLEAR__SHIFT 0x2
+#define DCHUBBUB_FORCE_IO_STATUS_0__SDPIF_FORCE_IO_STATUS_PIPE_ID__SHIFT 0x3
+#define DCHUBBUB_FORCE_IO_STATUS_0__SDPIF_FORCE_IO_STATUS_REQUEST_TYPE__SHIFT 0x7
+#define DCHUBBUB_FORCE_IO_STATUS_0__SDPIF_FORCE_IO_STATUS_ADDR_LO__SHIFT 0xa
+#define DCHUBBUB_FORCE_IO_STATUS_0__SDPIF_FORCE_IO_STATUS_MASK 0x00000001L
+#define DCHUBBUB_FORCE_IO_STATUS_0__SDPIF_FORCE_IO_STATUS_STICKY_MASK 0x00000002L
+#define DCHUBBUB_FORCE_IO_STATUS_0__SDPIF_FORCE_IO_STATUS_CLEAR_MASK 0x00000004L
+#define DCHUBBUB_FORCE_IO_STATUS_0__SDPIF_FORCE_IO_STATUS_PIPE_ID_MASK 0x00000078L
+#define DCHUBBUB_FORCE_IO_STATUS_0__SDPIF_FORCE_IO_STATUS_REQUEST_TYPE_MASK 0x00000380L
+#define DCHUBBUB_FORCE_IO_STATUS_0__SDPIF_FORCE_IO_STATUS_ADDR_LO_MASK 0xFFFFFC00L
+//DCHUBBUB_FORCE_IO_STATUS_1
+#define DCHUBBUB_FORCE_IO_STATUS_1__SDPIF_FORCE_IO_STATUS_ADDR_HI__SHIFT 0x0
+#define DCHUBBUB_FORCE_IO_STATUS_1__SDPIF_FORCE_IO_STATUS_ADDR_HI_MASK 0x001FFFFFL
+//DCN_VM_FB_LOCATION_BASE
+#define DCN_VM_FB_LOCATION_BASE__FB_BASE__SHIFT 0x0
+#define DCN_VM_FB_LOCATION_BASE__FB_BASE_MASK 0x00FFFFFFL
+//DCN_VM_FB_LOCATION_TOP
+#define DCN_VM_FB_LOCATION_TOP__FB_TOP__SHIFT 0x0
+#define DCN_VM_FB_LOCATION_TOP__FB_TOP_MASK 0x00FFFFFFL
+//DCN_VM_FB_OFFSET
+#define DCN_VM_FB_OFFSET__FB_OFFSET__SHIFT 0x0
+#define DCN_VM_FB_OFFSET__FB_OFFSET_MASK 0x00FFFFFFL
+//DCN_VM_AGP_BOT
+#define DCN_VM_AGP_BOT__AGP_BOT__SHIFT 0x0
+#define DCN_VM_AGP_BOT__AGP_BOT_MASK 0x00FFFFFFL
+//DCN_VM_AGP_TOP
+#define DCN_VM_AGP_TOP__AGP_TOP__SHIFT 0x0
+#define DCN_VM_AGP_TOP__AGP_TOP_MASK 0x00FFFFFFL
+//DCN_VM_AGP_BASE
+#define DCN_VM_AGP_BASE__AGP_BASE__SHIFT 0x0
+#define DCN_VM_AGP_BASE__AGP_BASE_MASK 0x00FFFFFFL
+//DCN_VM_LOCAL_HBM_ADDRESS_START
+#define DCN_VM_LOCAL_HBM_ADDRESS_START__ADDRESS_START__SHIFT 0x0
+#define DCN_VM_LOCAL_HBM_ADDRESS_START__ADDRESS_START_MASK 0x000FFFFFL
+//DCN_VM_LOCAL_HBM_ADDRESS_END
+#define DCN_VM_LOCAL_HBM_ADDRESS_END__ADDRESS_END__SHIFT 0x0
+#define DCN_VM_LOCAL_HBM_ADDRESS_END__ADDRESS_END_MASK 0x000FFFFFL
+//DCN_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL
+#define DCN_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL__LOCK__SHIFT 0x0
+#define DCN_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL__LOCK_MASK 0x00000001L
+//DCHUBBUB_SDPIF_PIPE_SEC_LVL
+#define DCHUBBUB_SDPIF_PIPE_SEC_LVL__SDPIF_PIPE0_SEC_LVL__SHIFT 0x0
+#define DCHUBBUB_SDPIF_PIPE_SEC_LVL__SDPIF_PIPE1_SEC_LVL__SHIFT 0x3
+#define DCHUBBUB_SDPIF_PIPE_SEC_LVL__SDPIF_PIPE2_SEC_LVL__SHIFT 0x6
+#define DCHUBBUB_SDPIF_PIPE_SEC_LVL__SDPIF_PIPE3_SEC_LVL__SHIFT 0x9
+#define DCHUBBUB_SDPIF_PIPE_SEC_LVL__SDPIF_PIPE0_SEC_LVL_MASK 0x00000007L
+#define DCHUBBUB_SDPIF_PIPE_SEC_LVL__SDPIF_PIPE1_SEC_LVL_MASK 0x00000038L
+#define DCHUBBUB_SDPIF_PIPE_SEC_LVL__SDPIF_PIPE2_SEC_LVL_MASK 0x000001C0L
+#define DCHUBBUB_SDPIF_PIPE_SEC_LVL__SDPIF_PIPE3_SEC_LVL_MASK 0x00000E00L
+//DCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL
+#define DCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL__SDPIF_PIPE0_DMDATA_SEC_LVL__SHIFT 0x0
+#define DCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL__SDPIF_PIPE1_DMDATA_SEC_LVL__SHIFT 0x3
+#define DCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL__SDPIF_PIPE2_DMDATA_SEC_LVL__SHIFT 0x6
+#define DCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL__SDPIF_PIPE3_DMDATA_SEC_LVL__SHIFT 0x9
+#define DCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL__SDPIF_PIPE0_DMDATA_SEC_LVL_MASK 0x00000007L
+#define DCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL__SDPIF_PIPE1_DMDATA_SEC_LVL_MASK 0x00000038L
+#define DCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL__SDPIF_PIPE2_DMDATA_SEC_LVL_MASK 0x000001C0L
+#define DCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL__SDPIF_PIPE3_DMDATA_SEC_LVL_MASK 0x00000E00L
+//DCHUBBUB_SDPIF_MEM_PWR_CTRL
+#define DCHUBBUB_SDPIF_MEM_PWR_CTRL__DCHUBBUB_SDPIF_MEM_PWR_FORCE__SHIFT 0x0
+#define DCHUBBUB_SDPIF_MEM_PWR_CTRL__DCHUBBUB_SDPIF_MEM_PWR_DIS__SHIFT 0x2
+#define DCHUBBUB_SDPIF_MEM_PWR_CTRL__DCHUBBUB_SDPIF_MEM_PWR_FORCE_MASK 0x00000003L
+#define DCHUBBUB_SDPIF_MEM_PWR_CTRL__DCHUBBUB_SDPIF_MEM_PWR_DIS_MASK 0x00000004L
+//DCHUBBUB_SDPIF_MEM_PWR_STATUS
+#define DCHUBBUB_SDPIF_MEM_PWR_STATUS__DCHUBBUB_SDPIF_MEM_PWR_STATE__SHIFT 0x0
+#define DCHUBBUB_SDPIF_MEM_PWR_STATUS__DCHUBBUB_SDPIF_MEM_PWR_STATE_MASK 0x00000003L
+//DCHUBBUB_SDPIF_CFG1
+#define DCHUBBUB_SDPIF_CFG1__SDPIF_PRQ_ERROR_DETECT_EN__SHIFT 0x0
+#define DCHUBBUB_SDPIF_CFG1__SDPIF_PRQ_ERROR_STATUS__SHIFT 0x1
+#define DCHUBBUB_SDPIF_CFG1__SDPIF_PRQ_ERROR_STATUS_CLEAR__SHIFT 0x2
+#define DCHUBBUB_SDPIF_CFG1__SDPIF_FORCE_SNOOP__SHIFT 0x8
+#define DCHUBBUB_SDPIF_CFG1__SDPIF_PRQ_ERROR_DETECT_EN_MASK 0x00000001L
+#define DCHUBBUB_SDPIF_CFG1__SDPIF_PRQ_ERROR_STATUS_MASK 0x00000002L
+#define DCHUBBUB_SDPIF_CFG1__SDPIF_PRQ_ERROR_STATUS_CLEAR_MASK 0x00000004L
+#define DCHUBBUB_SDPIF_CFG1__SDPIF_FORCE_SNOOP_MASK 0x00000100L
+//DCHUBBUB_SDPIF_CFG2
+#define DCHUBBUB_SDPIF_CFG2__dGPU_ADDR_PRESENT__SHIFT 0x0
+#define DCHUBBUB_SDPIF_CFG2__SDPIF_HOSTVM_SEC_LVL__SHIFT 0x8
+#define DCHUBBUB_SDPIF_CFG2__SDPIF_UNIT_ID_BITMASK__SHIFT 0x10
+#define DCHUBBUB_SDPIF_CFG2__dGPU_ADDR_PRESENT_MASK 0x00000001L
+#define DCHUBBUB_SDPIF_CFG2__SDPIF_HOSTVM_SEC_LVL_MASK 0x00000700L
+#define DCHUBBUB_SDPIF_CFG2__SDPIF_UNIT_ID_BITMASK_MASK 0x01FF0000L
+
+
+// addressBlock: dce_dc_dchubbub_hubbub_ret_path_dispdec
+//DCHUBBUB_RET_PATH_DCC_CFG
+#define DCHUBBUB_RET_PATH_DCC_CFG__DCC_VIDEO_FORMAT_EN__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_DCC_CFG__DCC_VIDEO_FORMAT_EN_MASK 0x00000001L
+//DCHUBBUB_RET_PATH_DCC_CFG0_0
+#define DCHUBBUB_RET_PATH_DCC_CFG0_0__DCC_CFG0_CONSTANT_0__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_DCC_CFG0_0__DCC_CFG0_CONSTANT_0_MASK 0xFFFFFFFFL
+//DCHUBBUB_RET_PATH_DCC_CFG0_1
+#define DCHUBBUB_RET_PATH_DCC_CFG0_1__DCC_CFG0_CONSTANT_1__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_DCC_CFG0_1__DCC_CFG0_CONSTANT_1_MASK 0xFFFFFFFFL
+//DCHUBBUB_RET_PATH_DCC_CFG1_0
+#define DCHUBBUB_RET_PATH_DCC_CFG1_0__DCC_CFG1_CONSTANT_0__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_DCC_CFG1_0__DCC_CFG1_CONSTANT_0_MASK 0xFFFFFFFFL
+//DCHUBBUB_RET_PATH_DCC_CFG1_1
+#define DCHUBBUB_RET_PATH_DCC_CFG1_1__DCC_CFG1_CONSTANT_1__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_DCC_CFG1_1__DCC_CFG1_CONSTANT_1_MASK 0xFFFFFFFFL
+//DCHUBBUB_RET_PATH_DCC_CFG2_0
+#define DCHUBBUB_RET_PATH_DCC_CFG2_0__DCC_CFG2_CONSTANT_0__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_DCC_CFG2_0__DCC_CFG2_CONSTANT_0_MASK 0xFFFFFFFFL
+//DCHUBBUB_RET_PATH_DCC_CFG2_1
+#define DCHUBBUB_RET_PATH_DCC_CFG2_1__DCC_CFG2_CONSTANT_1__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_DCC_CFG2_1__DCC_CFG2_CONSTANT_1_MASK 0xFFFFFFFFL
+//DCHUBBUB_RET_PATH_DCC_CFG3_0
+#define DCHUBBUB_RET_PATH_DCC_CFG3_0__DCC_CFG3_CONSTANT_0__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_DCC_CFG3_0__DCC_CFG3_CONSTANT_0_MASK 0xFFFFFFFFL
+//DCHUBBUB_RET_PATH_DCC_CFG3_1
+#define DCHUBBUB_RET_PATH_DCC_CFG3_1__DCC_CFG3_CONSTANT_1__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_DCC_CFG3_1__DCC_CFG3_CONSTANT_1_MASK 0xFFFFFFFFL
+//DCHUBBUB_RET_PATH_DCC_CFG4_0
+#define DCHUBBUB_RET_PATH_DCC_CFG4_0__DCC_CFG4_CONSTANT_0__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_DCC_CFG4_0__DCC_CFG4_CONSTANT_0_MASK 0xFFFFFFFFL
+//DCHUBBUB_RET_PATH_DCC_CFG4_1
+#define DCHUBBUB_RET_PATH_DCC_CFG4_1__DCC_CFG4_CONSTANT_1__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_DCC_CFG4_1__DCC_CFG4_CONSTANT_1_MASK 0xFFFFFFFFL
+//DCHUBBUB_RET_PATH_DCC_CFG5_0
+#define DCHUBBUB_RET_PATH_DCC_CFG5_0__DCC_CFG5_CONSTANT_0__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_DCC_CFG5_0__DCC_CFG5_CONSTANT_0_MASK 0xFFFFFFFFL
+//DCHUBBUB_RET_PATH_DCC_CFG5_1
+#define DCHUBBUB_RET_PATH_DCC_CFG5_1__DCC_CFG5_CONSTANT_1__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_DCC_CFG5_1__DCC_CFG5_CONSTANT_1_MASK 0xFFFFFFFFL
+//DCHUBBUB_RET_PATH_DCC_CFG6_0
+#define DCHUBBUB_RET_PATH_DCC_CFG6_0__DCC_CFG6_CONSTANT_0__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_DCC_CFG6_0__DCC_CFG6_CONSTANT_0_MASK 0xFFFFFFFFL
+//DCHUBBUB_RET_PATH_DCC_CFG6_1
+#define DCHUBBUB_RET_PATH_DCC_CFG6_1__DCC_CFG6_CONSTANT_1__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_DCC_CFG6_1__DCC_CFG6_CONSTANT_1_MASK 0xFFFFFFFFL
+//DCHUBBUB_RET_PATH_DCC_CFG7_0
+#define DCHUBBUB_RET_PATH_DCC_CFG7_0__DCC_CFG7_CONSTANT_0__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_DCC_CFG7_0__DCC_CFG7_CONSTANT_0_MASK 0xFFFFFFFFL
+//DCHUBBUB_RET_PATH_DCC_CFG7_1
+#define DCHUBBUB_RET_PATH_DCC_CFG7_1__DCC_CFG7_CONSTANT_1__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_DCC_CFG7_1__DCC_CFG7_CONSTANT_1_MASK 0xFFFFFFFFL
+//DCHUBBUB_RET_PATH_MEM_PWR_CTRL
+#define DCHUBBUB_RET_PATH_MEM_PWR_CTRL__DCHUBBUB_RET_PATH_MEM_PWR_FORCE__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_MEM_PWR_CTRL__DCHUBBUB_RET_PATH_MEM_PWR_DIS__SHIFT 0x2
+#define DCHUBBUB_RET_PATH_MEM_PWR_CTRL__DCHUBBUB_RET_PATH_MEM_PWR_FORCE_MASK 0x00000003L
+#define DCHUBBUB_RET_PATH_MEM_PWR_CTRL__DCHUBBUB_RET_PATH_MEM_PWR_DIS_MASK 0x00000004L
+//DCHUBBUB_RET_PATH_MEM_PWR_STATUS
+#define DCHUBBUB_RET_PATH_MEM_PWR_STATUS__DCHUBBUB_RET_PATH_MEM_PWR_STATE__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_MEM_PWR_STATUS__DCHUBBUB_RET_PATH_MEM_PWR_STATE_MASK 0x00000003L
+//DCHUBBUB_CRC_CTRL
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC_EN__SHIFT 0x0
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC_CONT_EN__SHIFT 0x1
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC0_ONE_SHOT_PENDING__SHIFT 0x2
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC1_ONE_SHOT_PENDING__SHIFT 0x3
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC0_SRC_SEL__SHIFT 0x4
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC1_SRC_SEL__SHIFT 0x6
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC_PIPE_SEL__SHIFT 0x8
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC_SURF_SEL__SHIFT 0xc
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC_MASK_SURF_SEL_MSB__SHIFT 0xf
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC_DATA_SRC_SEL__SHIFT 0x14
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC_EN_MASK 0x00000001L
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC_CONT_EN_MASK 0x00000002L
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC0_ONE_SHOT_PENDING_MASK 0x00000004L
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC1_ONE_SHOT_PENDING_MASK 0x00000008L
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC0_SRC_SEL_MASK 0x00000030L
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC1_SRC_SEL_MASK 0x000000C0L
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC_PIPE_SEL_MASK 0x00000F00L
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC_SURF_SEL_MASK 0x00007000L
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC_MASK_SURF_SEL_MSB_MASK 0x00008000L
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC_DATA_SRC_SEL_MASK 0x00100000L
+//DCHUBBUB_CRC0_VAL_R_G
+#define DCHUBBUB_CRC0_VAL_R_G__DCHUBBUB_CRC0_R_CR__SHIFT 0x0
+#define DCHUBBUB_CRC0_VAL_R_G__DCHUBBUB_CRC0_G_Y__SHIFT 0x10
+#define DCHUBBUB_CRC0_VAL_R_G__DCHUBBUB_CRC0_R_CR_MASK 0x0000FFFFL
+#define DCHUBBUB_CRC0_VAL_R_G__DCHUBBUB_CRC0_G_Y_MASK 0xFFFF0000L
+//DCHUBBUB_CRC0_VAL_B_A
+#define DCHUBBUB_CRC0_VAL_B_A__DCHUBBUB_CRC0_B_CB__SHIFT 0x0
+#define DCHUBBUB_CRC0_VAL_B_A__DCHUBBUB_CRC0_ALPHA__SHIFT 0x10
+#define DCHUBBUB_CRC0_VAL_B_A__DCHUBBUB_CRC0_B_CB_MASK 0x0000FFFFL
+#define DCHUBBUB_CRC0_VAL_B_A__DCHUBBUB_CRC0_ALPHA_MASK 0xFFFF0000L
+//DCHUBBUB_CRC1_VAL_R_G
+#define DCHUBBUB_CRC1_VAL_R_G__DCHUBBUB_CRC1_R_CR__SHIFT 0x0
+#define DCHUBBUB_CRC1_VAL_R_G__DCHUBBUB_CRC1_G_Y__SHIFT 0x10
+#define DCHUBBUB_CRC1_VAL_R_G__DCHUBBUB_CRC1_R_CR_MASK 0x0000FFFFL
+#define DCHUBBUB_CRC1_VAL_R_G__DCHUBBUB_CRC1_G_Y_MASK 0xFFFF0000L
+//DCHUBBUB_CRC1_VAL_B_A
+#define DCHUBBUB_CRC1_VAL_B_A__DCHUBBUB_CRC1_B_CB__SHIFT 0x0
+#define DCHUBBUB_CRC1_VAL_B_A__DCHUBBUB_CRC1_ALPHA__SHIFT 0x10
+#define DCHUBBUB_CRC1_VAL_B_A__DCHUBBUB_CRC1_B_CB_MASK 0x0000FFFFL
+#define DCHUBBUB_CRC1_VAL_B_A__DCHUBBUB_CRC1_ALPHA_MASK 0xFFFF0000L
+
+
+// addressBlock: dce_dc_dchubbub_hubbub_dispdec
+//DCHUBBUB_ARB_DF_REQ_OUTSTAND
+#define DCHUBBUB_ARB_DF_REQ_OUTSTAND__DCHUBBUB_ARB_MAX_REQ_OUTSTAND__SHIFT 0x0
+#define DCHUBBUB_ARB_DF_REQ_OUTSTAND__DCHUBBUB_ARB_MIN_REQ_OUTSTAND__SHIFT 0xc
+#define DCHUBBUB_ARB_DF_REQ_OUTSTAND__DCHUBBUB_ARB_MIN_REQ_OUTSTAND_COMMIT_THRESHOLD__SHIFT 0x17
+#define DCHUBBUB_ARB_DF_REQ_OUTSTAND__DCHUBBUB_ARB_MAX_REQ_OUTSTAND_MASK 0x000001FFL
+#define DCHUBBUB_ARB_DF_REQ_OUTSTAND__DCHUBBUB_ARB_MIN_REQ_OUTSTAND_MASK 0x001FF000L
+#define DCHUBBUB_ARB_DF_REQ_OUTSTAND__DCHUBBUB_ARB_MIN_REQ_OUTSTAND_COMMIT_THRESHOLD_MASK 0xFF800000L
+//DCHUBBUB_ARB_SAT_LEVEL
+#define DCHUBBUB_ARB_SAT_LEVEL__DCHUBBUB_ARB_SAT_LEVEL__SHIFT 0x0
+#define DCHUBBUB_ARB_SAT_LEVEL__DCHUBBUB_ARB_SAT_LEVEL_MASK 0xFFFFFFFFL
+//DCHUBBUB_ARB_QOS_FORCE
+#define DCHUBBUB_ARB_QOS_FORCE__DCHUBBUB_ARB_QOS_FORCE_VALUE__SHIFT 0x0
+#define DCHUBBUB_ARB_QOS_FORCE__DCHUBBUB_ARB_QOS_FORCE_ENABLE__SHIFT 0x8
+#define DCHUBBUB_ARB_QOS_FORCE__DCHUBBUB_ARB_QOS_FORCE_VALUE_MASK 0x0000000FL
+#define DCHUBBUB_ARB_QOS_FORCE__DCHUBBUB_ARB_QOS_FORCE_ENABLE_MASK 0x00000100L
+//DCHUBBUB_ARB_DRAM_STATE_CNTL
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_VALUE__SHIFT 0x0
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE__SHIFT 0x1
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE__SHIFT 0x4
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE__SHIFT 0x5
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_DO_NOT_FORCE_URGENCY_AND_SELF_REFRESH_DURING_DRAM_CLOCK_NBPSTATE_CHANGE_REQUEST__SHIFT 0x8
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_FORCE_URGENCY_DURING_DRAM_CLOCK_NBPSTATE_CHANGE_REQUEST_REGARDLESS_OF_ALLOW_SIGNAL__SHIFT 0x9
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_VALUE_MASK 0x00000001L
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE_MASK 0x00000002L
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE_MASK 0x00000010L
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE_MASK 0x00000020L
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_DO_NOT_FORCE_URGENCY_AND_SELF_REFRESH_DURING_DRAM_CLOCK_NBPSTATE_CHANGE_REQUEST_MASK 0x00000100L
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_FORCE_URGENCY_DURING_DRAM_CLOCK_NBPSTATE_CHANGE_REQUEST_REGARDLESS_OF_ALLOW_SIGNAL_MASK 0x00000200L
+//DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A
+#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A__DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A__SHIFT 0x0
+#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A__DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_A__SHIFT 0x10
+#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A__DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A_MASK 0x00003FFFL
+#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A__DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_A_MASK 0x3FFF0000L
+//DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A
+#define DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A__DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A__SHIFT 0x0
+#define DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A__DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A_MASK 0x00003FFFL
+//DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A__DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A__DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_A__SHIFT 0x10
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A__DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A_MASK 0x0000FFFFL
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A__DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_A_MASK 0xFFFF0000L
+//DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A__DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A__DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A__SHIFT 0x10
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A__DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A_MASK 0x0000FFFFL
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A__DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A_MASK 0xFFFF0000L
+//DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A
+#define DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A__DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A__DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A__SHIFT 0x10
+#define DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A__DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A_MASK 0x0000FFFFL
+#define DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A__DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A_MASK 0xFFFF0000L
+//DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B
+#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B__DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B__SHIFT 0x0
+#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B__DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_B__SHIFT 0x10
+#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B__DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B_MASK 0x00003FFFL
+#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B__DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_B_MASK 0x3FFF0000L
+//DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B
+#define DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B__DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B__SHIFT 0x0
+#define DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B__DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B_MASK 0x00003FFFL
+//DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B__DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B__DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_B__SHIFT 0x10
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B__DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B_MASK 0x0000FFFFL
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B__DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_B_MASK 0xFFFF0000L
+//DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B__DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B__DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_B__SHIFT 0x10
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B__DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B_MASK 0x0000FFFFL
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B__DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_B_MASK 0xFFFF0000L
+//DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B
+#define DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B__DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B__DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B__SHIFT 0x10
+#define DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B__DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B_MASK 0x0000FFFFL
+#define DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B__DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B_MASK 0xFFFF0000L
+//DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C
+#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C__DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C__SHIFT 0x0
+#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C__DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_C__SHIFT 0x10
+#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C__DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C_MASK 0x00003FFFL
+#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C__DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_C_MASK 0x3FFF0000L
+//DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C
+#define DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C__DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C__SHIFT 0x0
+#define DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C__DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C_MASK 0x00003FFFL
+//DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C__DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C__DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_C__SHIFT 0x10
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C__DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C_MASK 0x0000FFFFL
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C__DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_C_MASK 0xFFFF0000L
+//DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C__DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C__DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_C__SHIFT 0x10
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C__DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C_MASK 0x0000FFFFL
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C__DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_C_MASK 0xFFFF0000L
+//DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C
+#define DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C__DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C__DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C__SHIFT 0x10
+#define DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C__DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C_MASK 0x0000FFFFL
+#define DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C__DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C_MASK 0xFFFF0000L
+//DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D
+#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D__DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D__SHIFT 0x0
+#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D__DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_D__SHIFT 0x10
+#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D__DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D_MASK 0x00003FFFL
+#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D__DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_D_MASK 0x3FFF0000L
+//DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D
+#define DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D__DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D__SHIFT 0x0
+#define DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D__DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D_MASK 0x00003FFFL
+//DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D__DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D__DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_D__SHIFT 0x10
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D__DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D_MASK 0x0000FFFFL
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D__DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_D_MASK 0xFFFF0000L
+//DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D__DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D__DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_D__SHIFT 0x10
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D__DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D_MASK 0x0000FFFFL
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D__DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_D_MASK 0xFFFF0000L
+//DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D
+#define DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D__DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D__DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D__SHIFT 0x10
+#define DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D__DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D_MASK 0x0000FFFFL
+#define DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D__DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D_MASK 0xFFFF0000L
+//DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_SELECT__SHIFT 0x0
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABLE__SHIFT 0x4
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_STATUS__SHIFT 0x5
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST__SHIFT 0x8
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_SELECT_MASK 0x00000003L
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABLE_MASK 0x00000010L
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_STATUS_MASK 0x00000020L
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST_MASK 0x00000100L
+//DCHUBBUB_ARB_TIMEOUT_ENABLE
+#define DCHUBBUB_ARB_TIMEOUT_ENABLE__DCHUBBUB_ARB_TIMEOUT_ENABLE__SHIFT 0x0
+#define DCHUBBUB_ARB_TIMEOUT_ENABLE__DCHUBBUB_ARB_TIMEOUT_ENABLE_MASK 0x00000001L
+//DCHUBBUB_GLOBAL_TIMER_CNTL
+#define DCHUBBUB_GLOBAL_TIMER_CNTL__DCHUBBUB_GLOBAL_TIMER_REFDIV__SHIFT 0x0
+#define DCHUBBUB_GLOBAL_TIMER_CNTL__DCHUBBUB_GLOBAL_TIMER_ENABLE__SHIFT 0xc
+#define DCHUBBUB_GLOBAL_TIMER_CNTL__DCHUBBUB_GLOBAL_TIMER_INIT__SHIFT 0x10
+#define DCHUBBUB_GLOBAL_TIMER_CNTL__DCHUBBUB_GLOBAL_TIMER_REFDIV_MASK 0x0000000FL
+#define DCHUBBUB_GLOBAL_TIMER_CNTL__DCHUBBUB_GLOBAL_TIMER_ENABLE_MASK 0x00001000L
+#define DCHUBBUB_GLOBAL_TIMER_CNTL__DCHUBBUB_GLOBAL_TIMER_INIT_MASK 0xFFFF0000L
+//SURFACE_CHECK0_ADDRESS_LSB
+#define SURFACE_CHECK0_ADDRESS_LSB__SURFACE_CHECK0_ADDRESS_LSB__SHIFT 0x0
+#define SURFACE_CHECK0_ADDRESS_LSB__SURFACE_CHECK0_ADDRESS_LSB_MASK 0xFFFFFFFFL
+//SURFACE_CHECK0_ADDRESS_MSB
+#define SURFACE_CHECK0_ADDRESS_MSB__SURFACE_CHECK0_ADDRESS_MSB__SHIFT 0x0
+#define SURFACE_CHECK0_ADDRESS_MSB__CHECKER0_SURFACE_INUSE__SHIFT 0x1f
+#define SURFACE_CHECK0_ADDRESS_MSB__SURFACE_CHECK0_ADDRESS_MSB_MASK 0x0000FFFFL
+#define SURFACE_CHECK0_ADDRESS_MSB__CHECKER0_SURFACE_INUSE_MASK 0x80000000L
+//SURFACE_CHECK1_ADDRESS_LSB
+#define SURFACE_CHECK1_ADDRESS_LSB__SURFACE_CHECK1_ADDRESS_LSB__SHIFT 0x0
+#define SURFACE_CHECK1_ADDRESS_LSB__SURFACE_CHECK1_ADDRESS_LSB_MASK 0xFFFFFFFFL
+//SURFACE_CHECK1_ADDRESS_MSB
+#define SURFACE_CHECK1_ADDRESS_MSB__SURFACE_CHECK1_ADDRESS_MSB__SHIFT 0x0
+#define SURFACE_CHECK1_ADDRESS_MSB__CHECKER1_SURFACE_INUSE__SHIFT 0x1f
+#define SURFACE_CHECK1_ADDRESS_MSB__SURFACE_CHECK1_ADDRESS_MSB_MASK 0x0000FFFFL
+#define SURFACE_CHECK1_ADDRESS_MSB__CHECKER1_SURFACE_INUSE_MASK 0x80000000L
+//SURFACE_CHECK2_ADDRESS_LSB
+#define SURFACE_CHECK2_ADDRESS_LSB__SURFACE_CHECK2_ADDRESS_LSB__SHIFT 0x0
+#define SURFACE_CHECK2_ADDRESS_LSB__SURFACE_CHECK2_ADDRESS_LSB_MASK 0xFFFFFFFFL
+//SURFACE_CHECK2_ADDRESS_MSB
+#define SURFACE_CHECK2_ADDRESS_MSB__SURFACE_CHECK2_ADDRESS_MSB__SHIFT 0x0
+#define SURFACE_CHECK2_ADDRESS_MSB__CHECKER2_SURFACE_INUSE__SHIFT 0x1f
+#define SURFACE_CHECK2_ADDRESS_MSB__SURFACE_CHECK2_ADDRESS_MSB_MASK 0x0000FFFFL
+#define SURFACE_CHECK2_ADDRESS_MSB__CHECKER2_SURFACE_INUSE_MASK 0x80000000L
+//SURFACE_CHECK3_ADDRESS_LSB
+#define SURFACE_CHECK3_ADDRESS_LSB__SURFACE_CHECK3_ADDRESS_LSB__SHIFT 0x0
+#define SURFACE_CHECK3_ADDRESS_LSB__SURFACE_CHECK3_ADDRESS_LSB_MASK 0xFFFFFFFFL
+//SURFACE_CHECK3_ADDRESS_MSB
+#define SURFACE_CHECK3_ADDRESS_MSB__SURFACE_CHECK3_ADDRESS_MSB__SHIFT 0x0
+#define SURFACE_CHECK3_ADDRESS_MSB__CHECKER3_SURFACE_INUSE__SHIFT 0x1f
+#define SURFACE_CHECK3_ADDRESS_MSB__SURFACE_CHECK3_ADDRESS_MSB_MASK 0x0000FFFFL
+#define SURFACE_CHECK3_ADDRESS_MSB__CHECKER3_SURFACE_INUSE_MASK 0x80000000L
+//VTG0_CONTROL
+#define VTG0_CONTROL__VTG0_FP2__SHIFT 0x0
+#define VTG0_CONTROL__VTG0_VCOUNT_INIT__SHIFT 0x10
+#define VTG0_CONTROL__VTG0_ENABLE__SHIFT 0x1f
+#define VTG0_CONTROL__VTG0_FP2_MASK 0x00007FFFL
+#define VTG0_CONTROL__VTG0_VCOUNT_INIT_MASK 0x7FFF0000L
+#define VTG0_CONTROL__VTG0_ENABLE_MASK 0x80000000L
+//VTG1_CONTROL
+#define VTG1_CONTROL__VTG1_FP2__SHIFT 0x0
+#define VTG1_CONTROL__VTG1_VCOUNT_INIT__SHIFT 0x10
+#define VTG1_CONTROL__VTG1_ENABLE__SHIFT 0x1f
+#define VTG1_CONTROL__VTG1_FP2_MASK 0x00007FFFL
+#define VTG1_CONTROL__VTG1_VCOUNT_INIT_MASK 0x7FFF0000L
+#define VTG1_CONTROL__VTG1_ENABLE_MASK 0x80000000L
+//VTG2_CONTROL
+#define VTG2_CONTROL__VTG2_FP2__SHIFT 0x0
+#define VTG2_CONTROL__VTG2_VCOUNT_INIT__SHIFT 0x10
+#define VTG2_CONTROL__VTG2_ENABLE__SHIFT 0x1f
+#define VTG2_CONTROL__VTG2_FP2_MASK 0x00007FFFL
+#define VTG2_CONTROL__VTG2_VCOUNT_INIT_MASK 0x7FFF0000L
+#define VTG2_CONTROL__VTG2_ENABLE_MASK 0x80000000L
+//VTG3_CONTROL
+#define VTG3_CONTROL__VTG3_FP2__SHIFT 0x0
+#define VTG3_CONTROL__VTG3_VCOUNT_INIT__SHIFT 0x10
+#define VTG3_CONTROL__VTG3_ENABLE__SHIFT 0x1f
+#define VTG3_CONTROL__VTG3_FP2_MASK 0x00007FFFL
+#define VTG3_CONTROL__VTG3_VCOUNT_INIT_MASK 0x7FFF0000L
+#define VTG3_CONTROL__VTG3_ENABLE_MASK 0x80000000L
+//DCHUBBUB_SOFT_RESET
+#define DCHUBBUB_SOFT_RESET__DCHUBBUB_GLOBAL_SOFT_RESET__SHIFT 0x0
+#define DCHUBBUB_SOFT_RESET__ALLOW_CSTATE_SOFT_RESET__SHIFT 0x1
+#define DCHUBBUB_SOFT_RESET__GLBFLIP_SOFT_RESET__SHIFT 0x4
+#define DCHUBBUB_SOFT_RESET__DCHUBBUB_GLOBAL_SOFT_RESET_MASK 0x00000001L
+#define DCHUBBUB_SOFT_RESET__ALLOW_CSTATE_SOFT_RESET_MASK 0x00000002L
+#define DCHUBBUB_SOFT_RESET__GLBFLIP_SOFT_RESET_MASK 0x00000010L
+//DCHUBBUB_CLOCK_CNTL
+#define DCHUBBUB_CLOCK_CNTL__DCHUBBUB_TEST_CLK_SEL__SHIFT 0x0
+#define DCHUBBUB_CLOCK_CNTL__DISPCLK_R_DCHUBBUB_GATE_DIS__SHIFT 0x5
+#define DCHUBBUB_CLOCK_CNTL__DCFCLK_R_DCHUBBUB_GATE_DIS__SHIFT 0x6
+#define DCHUBBUB_CLOCK_CNTL__DCHUBBUB_TEST_CLK_SEL_MASK 0x0000001FL
+#define DCHUBBUB_CLOCK_CNTL__DISPCLK_R_DCHUBBUB_GATE_DIS_MASK 0x00000020L
+#define DCHUBBUB_CLOCK_CNTL__DCFCLK_R_DCHUBBUB_GATE_DIS_MASK 0x00000040L
+//DCFCLK_CNTL
+#define DCFCLK_CNTL__DCFCLK_TURN_ON_DELAY__SHIFT 0x0
+#define DCFCLK_CNTL__DCFCLK_TURN_OFF_DELAY__SHIFT 0x4
+#define DCFCLK_CNTL__DCFCLK_GATE_DIS__SHIFT 0x1f
+#define DCFCLK_CNTL__DCFCLK_TURN_ON_DELAY_MASK 0x0000000FL
+#define DCFCLK_CNTL__DCFCLK_TURN_OFF_DELAY_MASK 0x00000FF0L
+#define DCFCLK_CNTL__DCFCLK_GATE_DIS_MASK 0x80000000L
+//DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL__DCHUBBUB_LATENCY_CNT_EN__SHIFT 0x0
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL__ARB_LATENCY_PIPE_SEL__SHIFT 0x3
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL__ARB_LATENCY_REQ_TYPE_SEL__SHIFT 0x7
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL__DF_LATENCY_URGENT_ONLY__SHIFT 0xa
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL__ROB_FIFO_LEVEL__SHIFT 0xb
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL__DCHUBBUB_LATENCY_CNT_EN_MASK 0x00000001L
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL__ARB_LATENCY_PIPE_SEL_MASK 0x00000078L
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL__ARB_LATENCY_REQ_TYPE_SEL_MASK 0x00000380L
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL__DF_LATENCY_URGENT_ONLY_MASK 0x00000400L
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL__ROB_FIFO_LEVEL_MASK 0x007FF800L
+//DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL2
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL2__DCHUBBUB_LATENCY_FRAME_WIN_EN__SHIFT 0x0
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL2__DCHUBBUB_LATENCY_FRAME_WIN_SRC_SEL__SHIFT 0x1
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL2__DCHUBBUB_LATENCY_FRAME_WIN_DUR__SHIFT 0x4
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL2__LATENCY_SOURCE_SEL__SHIFT 0xc
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL2__ROB_MAX_FIFO_LEVEL__SHIFT 0x13
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL2__ROB_MAX_FIFO_LEVEL_RESET__SHIFT 0x1f
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL2__DCHUBBUB_LATENCY_FRAME_WIN_EN_MASK 0x00000001L
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL2__DCHUBBUB_LATENCY_FRAME_WIN_SRC_SEL_MASK 0x0000000EL
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL2__DCHUBBUB_LATENCY_FRAME_WIN_DUR_MASK 0x00000FF0L
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL2__LATENCY_SOURCE_SEL_MASK 0x00007000L
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL2__ROB_MAX_FIFO_LEVEL_MASK 0x7FF80000L
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL2__ROB_MAX_FIFO_LEVEL_RESET_MASK 0x80000000L
+//DCHUBBUB_VLINE_SNAPSHOT
+#define DCHUBBUB_VLINE_SNAPSHOT__DCHUBBUB_VLINE_SNAPSHOT__SHIFT 0x0
+#define DCHUBBUB_VLINE_SNAPSHOT__DCHUBBUB_VLINE_SNAPSHOT_MASK 0x00000001L
+//DCHUBBUB_CTRL_STATUS
+#define DCHUBBUB_CTRL_STATUS__URGENT_ZERO_SIZE_REQ_EN__SHIFT 0x0
+#define DCHUBBUB_CTRL_STATUS__URGENT_ZERO_SIZE_REQ_EN_MASK 0x00000001L
+//DCHUBBUB_TIMEOUT_DETECTION_CTRL1
+#define DCHUBBUB_TIMEOUT_DETECTION_CTRL1__DCHUBBUB_TIMEOUT_ERROR_STATUS__SHIFT 0x0
+#define DCHUBBUB_TIMEOUT_DETECTION_CTRL1__DCHUBBUB_TIMEOUT_REQ_STALL_THRESHOLD__SHIFT 0x6
+#define DCHUBBUB_TIMEOUT_DETECTION_CTRL1__DCHUBBUB_TIMEOUT_ERROR_STATUS_MASK 0x0000003FL
+#define DCHUBBUB_TIMEOUT_DETECTION_CTRL1__DCHUBBUB_TIMEOUT_REQ_STALL_THRESHOLD_MASK 0xFFFFFFC0L
+//DCHUBBUB_TIMEOUT_DETECTION_CTRL2
+#define DCHUBBUB_TIMEOUT_DETECTION_CTRL2__DCHUBBUB_TIMEOUT_PSTATE_STALL_THRESHOLD__SHIFT 0x0
+#define DCHUBBUB_TIMEOUT_DETECTION_CTRL2__DCHUBBUB_TIMEOUT_DETECTION_EN__SHIFT 0x1b
+#define DCHUBBUB_TIMEOUT_DETECTION_CTRL2__DCHUBBUB_TIMEOUT_TIMER_RESET__SHIFT 0x1c
+#define DCHUBBUB_TIMEOUT_DETECTION_CTRL2__DCHUBBUB_TIMEOUT_PSTATE_STALL_THRESHOLD_MASK 0x07FFFFFFL
+#define DCHUBBUB_TIMEOUT_DETECTION_CTRL2__DCHUBBUB_TIMEOUT_DETECTION_EN_MASK 0x08000000L
+#define DCHUBBUB_TIMEOUT_DETECTION_CTRL2__DCHUBBUB_TIMEOUT_TIMER_RESET_MASK 0x10000000L
+//DCHUBBUB_TIMEOUT_INTERRUPT_STATUS
+#define DCHUBBUB_TIMEOUT_INTERRUPT_STATUS__DCHUBBUB_TIMEOUT_INT_ENABLE__SHIFT 0x0
+#define DCHUBBUB_TIMEOUT_INTERRUPT_STATUS__DCHUBBUB_TIMEOUT_INT_STATUS__SHIFT 0x1
+#define DCHUBBUB_TIMEOUT_INTERRUPT_STATUS__DCHUBBUB_TIMEOUT_INT_CLEAR__SHIFT 0x2
+#define DCHUBBUB_TIMEOUT_INTERRUPT_STATUS__DCHUBBUB_TIMEOUT_INT_MASK__SHIFT 0x3
+#define DCHUBBUB_TIMEOUT_INTERRUPT_STATUS__DCHUBBUB_TIMEOUT_INT_ENABLE_MASK 0x00000001L
+#define DCHUBBUB_TIMEOUT_INTERRUPT_STATUS__DCHUBBUB_TIMEOUT_INT_STATUS_MASK 0x00000002L
+#define DCHUBBUB_TIMEOUT_INTERRUPT_STATUS__DCHUBBUB_TIMEOUT_INT_CLEAR_MASK 0x00000004L
+#define DCHUBBUB_TIMEOUT_INTERRUPT_STATUS__DCHUBBUB_TIMEOUT_INT_MASK_MASK 0x000000F8L
+//DCHUBBUB_TEST_DEBUG_INDEX
+#define DCHUBBUB_TEST_DEBUG_INDEX__DCHUBBUB_TEST_DEBUG_INDEX__SHIFT 0x0
+#define DCHUBBUB_TEST_DEBUG_INDEX__DCHUBBUB_TEST_DEBUG_INDEX_MASK 0x000000FFL
+//DCHUBBUB_TEST_DEBUG_DATA
+#define DCHUBBUB_TEST_DEBUG_DATA__DCHUBBUB_TEST_DEBUG_DATA__SHIFT 0x0
+#define DCHUBBUB_TEST_DEBUG_DATA__DCHUBBUB_TEST_DEBUG_DATA_MASK 0xFFFFFFFFL
+//DCHUBBUB_ARB_FRAC_URG_BW_NOM_A
+#define DCHUBBUB_ARB_FRAC_URG_BW_NOM_A__DCHUBBUB_ARB_FRAC_URG_BW_NOM_A__SHIFT 0x0
+#define DCHUBBUB_ARB_FRAC_URG_BW_NOM_A__DCHUBBUB_ARB_FRAC_URG_BW_NOM_A_MASK 0x000003FFL
+//DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A
+#define DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A__DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A__SHIFT 0x0
+#define DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A__DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A_MASK 0x000003FFL
+//DCHUBBUB_ARB_FRAC_URG_BW_NOM_B
+#define DCHUBBUB_ARB_FRAC_URG_BW_NOM_B__DCHUBBUB_ARB_FRAC_URG_BW_NOM_B__SHIFT 0x0
+#define DCHUBBUB_ARB_FRAC_URG_BW_NOM_B__DCHUBBUB_ARB_FRAC_URG_BW_NOM_B_MASK 0x000003FFL
+//DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B
+#define DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B__DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B__SHIFT 0x0
+#define DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B__DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B_MASK 0x000003FFL
+//DCHUBBUB_ARB_FRAC_URG_BW_NOM_C
+#define DCHUBBUB_ARB_FRAC_URG_BW_NOM_C__DCHUBBUB_ARB_FRAC_URG_BW_NOM_C__SHIFT 0x0
+#define DCHUBBUB_ARB_FRAC_URG_BW_NOM_C__DCHUBBUB_ARB_FRAC_URG_BW_NOM_C_MASK 0x000003FFL
+//DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C
+#define DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C__DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C__SHIFT 0x0
+#define DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C__DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C_MASK 0x000003FFL
+//DCHUBBUB_ARB_FRAC_URG_BW_NOM_D
+#define DCHUBBUB_ARB_FRAC_URG_BW_NOM_D__DCHUBBUB_ARB_FRAC_URG_BW_NOM_D__SHIFT 0x0
+#define DCHUBBUB_ARB_FRAC_URG_BW_NOM_D__DCHUBBUB_ARB_FRAC_URG_BW_NOM_D_MASK 0x000003FFL
+//DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D
+#define DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D__DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D__SHIFT 0x0
+#define DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D__DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D_MASK 0x000003FFL
+//DCHUBBUB_ARB_HOSTVM_CNTL
+#define DCHUBBUB_ARB_HOSTVM_CNTL__DISABLE_HOSTVM_FORCE_CSTATE__SHIFT 0x0
+#define DCHUBBUB_ARB_HOSTVM_CNTL__DISABLE_HOSTVM_FORCE_ALLOW_PSTATE__SHIFT 0x1
+#define DCHUBBUB_ARB_HOSTVM_CNTL__PRQ_SLACK_MASK__SHIFT 0x2
+#define DCHUBBUB_ARB_HOSTVM_CNTL__PRQ_SPACE_OK_STATUS__SHIFT 0x3
+#define DCHUBBUB_ARB_HOSTVM_CNTL__PRQ_GID_FREE_STATUS__SHIFT 0x4
+#define DCHUBBUB_ARB_HOSTVM_CNTL__DCHVM_RET_FIFO_FREE_STATUS__SHIFT 0x5
+#define DCHUBBUB_ARB_HOSTVM_CNTL__NON_PRQ_CLIENT_WINNER_STATUS__SHIFT 0x6
+#define DCHUBBUB_ARB_HOSTVM_CNTL__HOSTVM_MAX_ALLOCATED_GROUPS__SHIFT 0x8
+#define DCHUBBUB_ARB_HOSTVM_CNTL__HOSTVM_MAX_RD_FIFO_ENTRIES__SHIFT 0x10
+#define DCHUBBUB_ARB_HOSTVM_CNTL__HOSTVM_QOS__SHIFT 0x18
+#define DCHUBBUB_ARB_HOSTVM_CNTL__DCHUBBUB_ARB_MAX_QOS_COMMIT_THRESHOLD__SHIFT 0x1c
+#define DCHUBBUB_ARB_HOSTVM_CNTL__DISABLE_HOSTVM_FORCE_CSTATE_MASK 0x00000001L
+#define DCHUBBUB_ARB_HOSTVM_CNTL__DISABLE_HOSTVM_FORCE_ALLOW_PSTATE_MASK 0x00000002L
+#define DCHUBBUB_ARB_HOSTVM_CNTL__PRQ_SLACK_MASK_MASK 0x00000004L
+#define DCHUBBUB_ARB_HOSTVM_CNTL__PRQ_SPACE_OK_STATUS_MASK 0x00000008L
+#define DCHUBBUB_ARB_HOSTVM_CNTL__PRQ_GID_FREE_STATUS_MASK 0x00000010L
+#define DCHUBBUB_ARB_HOSTVM_CNTL__DCHVM_RET_FIFO_FREE_STATUS_MASK 0x00000020L
+#define DCHUBBUB_ARB_HOSTVM_CNTL__NON_PRQ_CLIENT_WINNER_STATUS_MASK 0x00000040L
+#define DCHUBBUB_ARB_HOSTVM_CNTL__HOSTVM_MAX_ALLOCATED_GROUPS_MASK 0x00003F00L
+#define DCHUBBUB_ARB_HOSTVM_CNTL__HOSTVM_MAX_RD_FIFO_ENTRIES_MASK 0x00FF0000L
+#define DCHUBBUB_ARB_HOSTVM_CNTL__HOSTVM_QOS_MASK 0x0F000000L
+#define DCHUBBUB_ARB_HOSTVM_CNTL__DCHUBBUB_ARB_MAX_QOS_COMMIT_THRESHOLD_MASK 0xF0000000L
+//FMON_CTRL
+#define FMON_CTRL__FMON_START__SHIFT 0x0
+#define FMON_CTRL__FMON_MODE__SHIFT 0x1
+#define FMON_CTRL__FMON_PSTATE_IGNORE__SHIFT 0x4
+#define FMON_CTRL__FMON_STATUS_IGNORE__SHIFT 0x5
+#define FMON_CTRL__FMON_URG_MODE_GREATER__SHIFT 0x6
+#define FMON_CTRL__FMON_FILTER_UID_EN__SHIFT 0x7
+#define FMON_CTRL__FMON_STATE__SHIFT 0x9
+#define FMON_CTRL__FMON_URG_FILTER__SHIFT 0xc
+#define FMON_CTRL__FMON_URG_THRESHOLD__SHIFT 0xd
+#define FMON_CTRL__FMON_FILTER_UID_1__SHIFT 0x11
+#define FMON_CTRL__FMON_FILTER_UID_2__SHIFT 0x16
+#define FMON_CTRL__FMON_SOF_SEL__SHIFT 0x1b
+#define FMON_CTRL__FMON_START_MASK 0x00000001L
+#define FMON_CTRL__FMON_MODE_MASK 0x00000006L
+#define FMON_CTRL__FMON_PSTATE_IGNORE_MASK 0x00000010L
+#define FMON_CTRL__FMON_STATUS_IGNORE_MASK 0x00000020L
+#define FMON_CTRL__FMON_URG_MODE_GREATER_MASK 0x00000040L
+#define FMON_CTRL__FMON_FILTER_UID_EN_MASK 0x00000180L
+#define FMON_CTRL__FMON_STATE_MASK 0x00000600L
+#define FMON_CTRL__FMON_URG_FILTER_MASK 0x00001000L
+#define FMON_CTRL__FMON_URG_THRESHOLD_MASK 0x0001E000L
+#define FMON_CTRL__FMON_FILTER_UID_1_MASK 0x003E0000L
+#define FMON_CTRL__FMON_FILTER_UID_2_MASK 0x07C00000L
+#define FMON_CTRL__FMON_SOF_SEL_MASK 0x38000000L
+
+
+// addressBlock: dce_dc_dchubbub_dchubbub_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON6_PERFCOUNTER_CNTL
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON6_PERFCOUNTER_CNTL2
+#define DC_PERFMON6_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON6_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON6_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON6_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON6_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON6_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON6_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON6_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON6_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON6_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON6_PERFCOUNTER_STATE
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON6_PERFMON_CNTL
+#define DC_PERFMON6_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON6_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON6_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON6_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON6_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON6_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON6_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON6_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON6_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON6_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON6_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON6_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON6_PERFMON_CNTL2
+#define DC_PERFMON6_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON6_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON6_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON6_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON6_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON6_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON6_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON6_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON6_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON6_PERFMON_CVALUE_LOW
+#define DC_PERFMON6_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON6_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON6_PERFMON_HI
+#define DC_PERFMON6_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON6_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON6_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON6_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON6_PERFMON_LOW
+#define DC_PERFMON6_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON6_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dchubbub_hubbub_vmrq_if_dispdec
+//DCN_VM_CONTEXT0_CNTL
+#define DCN_VM_CONTEXT0_CNTL__VM_CONTEXT0_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT0_CNTL__VM_CONTEXT0_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT0_CNTL__VM_CONTEXT0_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT0_CNTL__VM_CONTEXT0_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+//DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32
+#define DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT0_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT0_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32
+#define DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT0_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT0_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32
+#define DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT0_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT0_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32
+#define DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT0_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT0_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32
+#define DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT0_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT0_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32
+#define DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT0_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT0_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT1_CNTL
+#define DCN_VM_CONTEXT1_CNTL__VM_CONTEXT1_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT1_CNTL__VM_CONTEXT1_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT1_CNTL__VM_CONTEXT1_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT1_CNTL__VM_CONTEXT1_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+//DCN_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32
+#define DCN_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT1_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT1_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32
+#define DCN_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT1_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT1_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32
+#define DCN_VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT1_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT1_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32
+#define DCN_VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT1_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT1_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32
+#define DCN_VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT1_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT1_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32
+#define DCN_VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT1_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT1_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT2_CNTL
+#define DCN_VM_CONTEXT2_CNTL__VM_CONTEXT2_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT2_CNTL__VM_CONTEXT2_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT2_CNTL__VM_CONTEXT2_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT2_CNTL__VM_CONTEXT2_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+//DCN_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32
+#define DCN_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT2_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT2_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32
+#define DCN_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT2_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT2_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32
+#define DCN_VM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT2_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT2_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32
+#define DCN_VM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT2_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT2_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32
+#define DCN_VM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT2_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT2_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32
+#define DCN_VM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT2_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT2_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT3_CNTL
+#define DCN_VM_CONTEXT3_CNTL__VM_CONTEXT3_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT3_CNTL__VM_CONTEXT3_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT3_CNTL__VM_CONTEXT3_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT3_CNTL__VM_CONTEXT3_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+//DCN_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32
+#define DCN_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT3_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT3_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32
+#define DCN_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT3_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT3_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32
+#define DCN_VM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT3_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT3_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32
+#define DCN_VM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT3_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT3_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32
+#define DCN_VM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT3_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT3_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32
+#define DCN_VM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT3_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT3_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT4_CNTL
+#define DCN_VM_CONTEXT4_CNTL__VM_CONTEXT4_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT4_CNTL__VM_CONTEXT4_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT4_CNTL__VM_CONTEXT4_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT4_CNTL__VM_CONTEXT4_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+//DCN_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32
+#define DCN_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT4_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT4_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32
+#define DCN_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT4_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT4_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32
+#define DCN_VM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT4_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT4_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32
+#define DCN_VM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT4_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT4_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32
+#define DCN_VM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT4_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT4_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32
+#define DCN_VM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT4_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT4_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT5_CNTL
+#define DCN_VM_CONTEXT5_CNTL__VM_CONTEXT5_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT5_CNTL__VM_CONTEXT5_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT5_CNTL__VM_CONTEXT5_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT5_CNTL__VM_CONTEXT5_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+//DCN_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32
+#define DCN_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT5_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT5_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32
+#define DCN_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT5_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT5_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32
+#define DCN_VM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT5_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT5_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32
+#define DCN_VM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT5_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT5_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32
+#define DCN_VM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT5_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT5_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32
+#define DCN_VM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT5_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT5_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT6_CNTL
+#define DCN_VM_CONTEXT6_CNTL__VM_CONTEXT6_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT6_CNTL__VM_CONTEXT6_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT6_CNTL__VM_CONTEXT6_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT6_CNTL__VM_CONTEXT6_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+//DCN_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32
+#define DCN_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT6_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT6_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32
+#define DCN_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT6_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT6_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32
+#define DCN_VM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT6_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT6_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32
+#define DCN_VM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT6_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT6_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32
+#define DCN_VM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT6_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT6_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32
+#define DCN_VM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT6_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT6_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT7_CNTL
+#define DCN_VM_CONTEXT7_CNTL__VM_CONTEXT7_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT7_CNTL__VM_CONTEXT7_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT7_CNTL__VM_CONTEXT7_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT7_CNTL__VM_CONTEXT7_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+//DCN_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32
+#define DCN_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT7_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT7_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32
+#define DCN_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT7_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT7_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32
+#define DCN_VM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT7_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT7_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32
+#define DCN_VM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT7_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT7_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32
+#define DCN_VM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT7_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT7_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32
+#define DCN_VM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT7_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT7_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT8_CNTL
+#define DCN_VM_CONTEXT8_CNTL__VM_CONTEXT8_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT8_CNTL__VM_CONTEXT8_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT8_CNTL__VM_CONTEXT8_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT8_CNTL__VM_CONTEXT8_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+//DCN_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32
+#define DCN_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT8_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT8_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32
+#define DCN_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT8_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT8_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32
+#define DCN_VM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT8_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT8_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32
+#define DCN_VM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT8_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT8_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32
+#define DCN_VM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT8_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT8_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32
+#define DCN_VM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT8_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT8_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT9_CNTL
+#define DCN_VM_CONTEXT9_CNTL__VM_CONTEXT9_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT9_CNTL__VM_CONTEXT9_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT9_CNTL__VM_CONTEXT9_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT9_CNTL__VM_CONTEXT9_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+//DCN_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32
+#define DCN_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT9_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT9_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32
+#define DCN_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT9_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT9_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32
+#define DCN_VM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT9_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT9_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32
+#define DCN_VM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT9_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT9_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32
+#define DCN_VM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT9_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT9_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32
+#define DCN_VM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT9_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT9_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT10_CNTL
+#define DCN_VM_CONTEXT10_CNTL__VM_CONTEXT10_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT10_CNTL__VM_CONTEXT10_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT10_CNTL__VM_CONTEXT10_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT10_CNTL__VM_CONTEXT10_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+//DCN_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32
+#define DCN_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT10_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT10_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32
+#define DCN_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT10_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT10_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32
+#define DCN_VM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT10_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT10_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32
+#define DCN_VM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT10_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT10_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32
+#define DCN_VM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT10_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT10_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32
+#define DCN_VM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT10_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT10_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT11_CNTL
+#define DCN_VM_CONTEXT11_CNTL__VM_CONTEXT11_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT11_CNTL__VM_CONTEXT11_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT11_CNTL__VM_CONTEXT11_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT11_CNTL__VM_CONTEXT11_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+//DCN_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32
+#define DCN_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT11_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT11_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32
+#define DCN_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT11_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT11_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32
+#define DCN_VM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT11_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT11_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32
+#define DCN_VM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT11_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT11_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32
+#define DCN_VM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT11_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT11_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32
+#define DCN_VM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT11_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT11_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT12_CNTL
+#define DCN_VM_CONTEXT12_CNTL__VM_CONTEXT12_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT12_CNTL__VM_CONTEXT12_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT12_CNTL__VM_CONTEXT12_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT12_CNTL__VM_CONTEXT12_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+//DCN_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32
+#define DCN_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT12_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT12_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32
+#define DCN_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT12_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT12_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32
+#define DCN_VM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT12_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT12_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32
+#define DCN_VM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT12_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT12_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32
+#define DCN_VM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT12_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT12_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32
+#define DCN_VM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT12_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT12_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT13_CNTL
+#define DCN_VM_CONTEXT13_CNTL__VM_CONTEXT13_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT13_CNTL__VM_CONTEXT13_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT13_CNTL__VM_CONTEXT13_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT13_CNTL__VM_CONTEXT13_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+//DCN_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32
+#define DCN_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT13_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT13_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32
+#define DCN_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT13_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT13_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32
+#define DCN_VM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT13_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT13_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32
+#define DCN_VM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT13_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT13_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32
+#define DCN_VM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT13_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT13_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32
+#define DCN_VM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT13_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT13_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT14_CNTL
+#define DCN_VM_CONTEXT14_CNTL__VM_CONTEXT14_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT14_CNTL__VM_CONTEXT14_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT14_CNTL__VM_CONTEXT14_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT14_CNTL__VM_CONTEXT14_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+//DCN_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32
+#define DCN_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT14_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT14_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32
+#define DCN_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT14_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT14_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32
+#define DCN_VM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT14_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT14_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32
+#define DCN_VM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT14_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT14_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32
+#define DCN_VM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT14_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT14_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32
+#define DCN_VM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT14_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT14_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT15_CNTL
+#define DCN_VM_CONTEXT15_CNTL__VM_CONTEXT15_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT15_CNTL__VM_CONTEXT15_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT15_CNTL__VM_CONTEXT15_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT15_CNTL__VM_CONTEXT15_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+//DCN_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32
+#define DCN_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT15_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT15_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32
+#define DCN_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT15_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT15_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32
+#define DCN_VM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT15_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT15_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32
+#define DCN_VM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT15_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT15_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32
+#define DCN_VM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT15_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT15_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32
+#define DCN_VM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT15_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT15_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_DEFAULT_ADDR_MSB
+#define DCN_VM_DEFAULT_ADDR_MSB__DCN_VM_DEFAULT_ADDR_MSB__SHIFT 0x0
+#define DCN_VM_DEFAULT_ADDR_MSB__DCN_VM_DEFAULT_SPA__SHIFT 0x1c
+#define DCN_VM_DEFAULT_ADDR_MSB__DCN_VM_DEFAULT_SNOOP__SHIFT 0x1d
+#define DCN_VM_DEFAULT_ADDR_MSB__DCN_VM_DEFAULT_ADDR_MSB_MASK 0x0000000FL
+#define DCN_VM_DEFAULT_ADDR_MSB__DCN_VM_DEFAULT_SPA_MASK 0x10000000L
+#define DCN_VM_DEFAULT_ADDR_MSB__DCN_VM_DEFAULT_SNOOP_MASK 0x20000000L
+//DCN_VM_DEFAULT_ADDR_LSB
+#define DCN_VM_DEFAULT_ADDR_LSB__DCN_VM_DEFAULT_ADDR_LSB__SHIFT 0x0
+#define DCN_VM_DEFAULT_ADDR_LSB__DCN_VM_DEFAULT_ADDR_LSB_MASK 0xFFFFFFFFL
+//DCN_VM_FAULT_CNTL
+#define DCN_VM_FAULT_CNTL__DCN_VM_ERROR_STATUS_CLEAR__SHIFT 0x0
+#define DCN_VM_FAULT_CNTL__DCN_VM_ERROR_STATUS_MODE__SHIFT 0x1
+#define DCN_VM_FAULT_CNTL__DCN_VM_ERROR_INTERRUPT_ENABLE__SHIFT 0x2
+#define DCN_VM_FAULT_CNTL__DCN_VM_RANGE_FAULT_DISABLE__SHIFT 0x8
+#define DCN_VM_FAULT_CNTL__DCN_VM_PRQ_FAULT_DISABLE__SHIFT 0x9
+#define DCN_VM_FAULT_CNTL__DCN_VM_ERROR_STATUS_CLEAR_MASK 0x00000001L
+#define DCN_VM_FAULT_CNTL__DCN_VM_ERROR_STATUS_MODE_MASK 0x00000002L
+#define DCN_VM_FAULT_CNTL__DCN_VM_ERROR_INTERRUPT_ENABLE_MASK 0x00000004L
+#define DCN_VM_FAULT_CNTL__DCN_VM_RANGE_FAULT_DISABLE_MASK 0x00000100L
+#define DCN_VM_FAULT_CNTL__DCN_VM_PRQ_FAULT_DISABLE_MASK 0x00000200L
+//DCN_VM_FAULT_STATUS
+#define DCN_VM_FAULT_STATUS__DCN_VM_ERROR_STATUS__SHIFT 0x0
+#define DCN_VM_FAULT_STATUS__DCN_VM_ERROR_VMID__SHIFT 0x10
+#define DCN_VM_FAULT_STATUS__DCN_VM_ERROR_TABLE_LEVEL__SHIFT 0x14
+#define DCN_VM_FAULT_STATUS__DCN_VM_ERROR_PIPE__SHIFT 0x18
+#define DCN_VM_FAULT_STATUS__DCN_VM_ERROR_INTERRUPT_STATUS__SHIFT 0x1f
+#define DCN_VM_FAULT_STATUS__DCN_VM_ERROR_STATUS_MASK 0x0000FFFFL
+#define DCN_VM_FAULT_STATUS__DCN_VM_ERROR_VMID_MASK 0x000F0000L
+#define DCN_VM_FAULT_STATUS__DCN_VM_ERROR_TABLE_LEVEL_MASK 0x00300000L
+#define DCN_VM_FAULT_STATUS__DCN_VM_ERROR_PIPE_MASK 0x0F000000L
+#define DCN_VM_FAULT_STATUS__DCN_VM_ERROR_INTERRUPT_STATUS_MASK 0x80000000L
+//DCN_VM_FAULT_ADDR_MSB
+#define DCN_VM_FAULT_ADDR_MSB__DCN_VM_FAULT_ADDR_MSB__SHIFT 0x0
+#define DCN_VM_FAULT_ADDR_MSB__DCN_VM_FAULT_ADDR_MSB_MASK 0x0000000FL
+//DCN_VM_FAULT_ADDR_LSB
+#define DCN_VM_FAULT_ADDR_LSB__DCN_VM_FAULT_ADDR_LSB__SHIFT 0x0
+#define DCN_VM_FAULT_ADDR_LSB__DCN_VM_FAULT_ADDR_LSB_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dcbubp0_dispdec_hubp_dispdec
+//HUBP0_DCSURF_SURFACE_CONFIG
+#define HUBP0_DCSURF_SURFACE_CONFIG__SURFACE_PIXEL_FORMAT__SHIFT 0x0
+#define HUBP0_DCSURF_SURFACE_CONFIG__ROTATION_ANGLE__SHIFT 0x8
+#define HUBP0_DCSURF_SURFACE_CONFIG__H_MIRROR_EN__SHIFT 0xa
+#define HUBP0_DCSURF_SURFACE_CONFIG__SURFACE_PIXEL_FORMAT_MASK 0x0000007FL
+#define HUBP0_DCSURF_SURFACE_CONFIG__ROTATION_ANGLE_MASK 0x00000300L
+#define HUBP0_DCSURF_SURFACE_CONFIG__H_MIRROR_EN_MASK 0x00000400L
+//HUBP0_DCSURF_ADDR_CONFIG
+#define HUBP0_DCSURF_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
+#define HUBP0_DCSURF_ADDR_CONFIG__NUM_BANKS__SHIFT 0x3
+#define HUBP0_DCSURF_ADDR_CONFIG__PIPE_INTERLEAVE__SHIFT 0x6
+#define HUBP0_DCSURF_ADDR_CONFIG__NUM_SE__SHIFT 0x8
+#define HUBP0_DCSURF_ADDR_CONFIG__NUM_RB_PER_SE__SHIFT 0xa
+#define HUBP0_DCSURF_ADDR_CONFIG__MAX_COMPRESSED_FRAGS__SHIFT 0xc
+#define HUBP0_DCSURF_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define HUBP0_DCSURF_ADDR_CONFIG__NUM_BANKS_MASK 0x00000038L
+#define HUBP0_DCSURF_ADDR_CONFIG__PIPE_INTERLEAVE_MASK 0x000000C0L
+#define HUBP0_DCSURF_ADDR_CONFIG__NUM_SE_MASK 0x00000300L
+#define HUBP0_DCSURF_ADDR_CONFIG__NUM_RB_PER_SE_MASK 0x00000C00L
+#define HUBP0_DCSURF_ADDR_CONFIG__MAX_COMPRESSED_FRAGS_MASK 0x00003000L
+//HUBP0_DCSURF_TILING_CONFIG
+#define HUBP0_DCSURF_TILING_CONFIG__SW_MODE__SHIFT 0x0
+#define HUBP0_DCSURF_TILING_CONFIG__DIM_TYPE__SHIFT 0x7
+#define HUBP0_DCSURF_TILING_CONFIG__META_LINEAR__SHIFT 0x9
+#define HUBP0_DCSURF_TILING_CONFIG__RB_ALIGNED__SHIFT 0xa
+#define HUBP0_DCSURF_TILING_CONFIG__PIPE_ALIGNED__SHIFT 0xb
+#define HUBP0_DCSURF_TILING_CONFIG__SW_MODE_MASK 0x0000001FL
+#define HUBP0_DCSURF_TILING_CONFIG__DIM_TYPE_MASK 0x00000180L
+#define HUBP0_DCSURF_TILING_CONFIG__META_LINEAR_MASK 0x00000200L
+#define HUBP0_DCSURF_TILING_CONFIG__RB_ALIGNED_MASK 0x00000400L
+#define HUBP0_DCSURF_TILING_CONFIG__PIPE_ALIGNED_MASK 0x00000800L
+//HUBP0_DCSURF_PRI_VIEWPORT_START
+#define HUBP0_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_X_START__SHIFT 0x0
+#define HUBP0_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START__SHIFT 0x10
+#define HUBP0_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_X_START_MASK 0x00003FFFL
+#define HUBP0_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START_MASK 0x3FFF0000L
+//HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION
+#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0
+#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10
+#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL
+#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
+//HUBP0_DCSURF_PRI_VIEWPORT_START_C
+#define HUBP0_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_X_START_C__SHIFT 0x0
+#define HUBP0_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_Y_START_C__SHIFT 0x10
+#define HUBP0_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_X_START_C_MASK 0x00003FFFL
+#define HUBP0_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_Y_START_C_MASK 0x3FFF0000L
+//HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C
+#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_WIDTH_C__SHIFT 0x0
+#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_HEIGHT_C__SHIFT 0x10
+#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_WIDTH_C_MASK 0x00003FFFL
+#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_HEIGHT_C_MASK 0x3FFF0000L
+//HUBP0_DCSURF_SEC_VIEWPORT_START
+#define HUBP0_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_X_START__SHIFT 0x0
+#define HUBP0_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_Y_START__SHIFT 0x10
+#define HUBP0_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_X_START_MASK 0x00003FFFL
+#define HUBP0_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_Y_START_MASK 0x3FFF0000L
+//HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION
+#define HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_WIDTH__SHIFT 0x0
+#define HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_HEIGHT__SHIFT 0x10
+#define HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_WIDTH_MASK 0x00003FFFL
+#define HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
+//HUBP0_DCSURF_SEC_VIEWPORT_START_C
+#define HUBP0_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_X_START_C__SHIFT 0x0
+#define HUBP0_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_Y_START_C__SHIFT 0x10
+#define HUBP0_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_X_START_C_MASK 0x00003FFFL
+#define HUBP0_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_Y_START_C_MASK 0x3FFF0000L
+//HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C
+#define HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_WIDTH_C__SHIFT 0x0
+#define HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_HEIGHT_C__SHIFT 0x10
+#define HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_WIDTH_C_MASK 0x00003FFFL
+#define HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_HEIGHT_C_MASK 0x3FFF0000L
+//HUBP0_DCHUBP_REQ_SIZE_CONFIG
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__SWATH_HEIGHT__SHIFT 0x0
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__PTE_ROW_HEIGHT_LINEAR__SHIFT 0x4
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__CHUNK_SIZE__SHIFT 0x8
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__MIN_CHUNK_SIZE__SHIFT 0xb
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__META_CHUNK_SIZE__SHIFT 0x10
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__MIN_META_CHUNK_SIZE__SHIFT 0x12
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__DPTE_GROUP_SIZE__SHIFT 0x14
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__VM_GROUP_SIZE__SHIFT 0x18
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__SWATH_HEIGHT_MASK 0x00000007L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__PTE_ROW_HEIGHT_LINEAR_MASK 0x00000070L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__CHUNK_SIZE_MASK 0x00000700L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__MIN_CHUNK_SIZE_MASK 0x00001800L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__META_CHUNK_SIZE_MASK 0x00030000L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__MIN_META_CHUNK_SIZE_MASK 0x000C0000L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__DPTE_GROUP_SIZE_MASK 0x00700000L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__VM_GROUP_SIZE_MASK 0x07000000L
+//HUBP0_DCHUBP_REQ_SIZE_CONFIG_C
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__SWATH_HEIGHT_C__SHIFT 0x0
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__PTE_ROW_HEIGHT_LINEAR_C__SHIFT 0x4
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__CHUNK_SIZE_C__SHIFT 0x8
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__MIN_CHUNK_SIZE_C__SHIFT 0xb
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__META_CHUNK_SIZE_C__SHIFT 0x10
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__MIN_META_CHUNK_SIZE_C__SHIFT 0x12
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__DPTE_GROUP_SIZE_C__SHIFT 0x14
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__SWATH_HEIGHT_C_MASK 0x00000007L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__PTE_ROW_HEIGHT_LINEAR_C_MASK 0x00000070L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__CHUNK_SIZE_C_MASK 0x00000700L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__MIN_CHUNK_SIZE_C_MASK 0x00001800L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__META_CHUNK_SIZE_C_MASK 0x00030000L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__MIN_META_CHUNK_SIZE_C_MASK 0x000C0000L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__DPTE_GROUP_SIZE_C_MASK 0x00700000L
+//HUBP0_DCHUBP_CNTL
+#define HUBP0_DCHUBP_CNTL__HUBP_BLANK_EN__SHIFT 0x0
+#define HUBP0_DCHUBP_CNTL__HUBP_NO_OUTSTANDING_REQ__SHIFT 0x1
+#define HUBP0_DCHUBP_CNTL__HUBP_DISABLE__SHIFT 0x2
+#define HUBP0_DCHUBP_CNTL__HUBP_IN_BLANK__SHIFT 0x3
+#define HUBP0_DCHUBP_CNTL__HUBP_VTG_SEL__SHIFT 0x4
+#define HUBP0_DCHUBP_CNTL__HUBP_VREADY_AT_OR_AFTER_VSYNC__SHIFT 0x8
+#define HUBP0_DCHUBP_CNTL__HUBP_DISABLE_STOP_DATA_DURING_VM__SHIFT 0x9
+#define HUBP0_DCHUBP_CNTL__HUBP_TTU_DISABLE__SHIFT 0xc
+#define HUBP0_DCHUBP_CNTL__HUBP_TTU_MODE__SHIFT 0xd
+#define HUBP0_DCHUBP_CNTL__HUBP_XRQ_NO_OUTSTANDING_REQ__SHIFT 0x10
+#define HUBP0_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS__SHIFT 0x14
+#define HUBP0_DCHUBP_CNTL__HUBP_TIMEOUT_THRESHOLD__SHIFT 0x18
+#define HUBP0_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_CLEAR__SHIFT 0x1a
+#define HUBP0_DCHUBP_CNTL__HUBP_TIMEOUT_INTERRUPT_EN__SHIFT 0x1b
+#define HUBP0_DCHUBP_CNTL__HUBP_UNDERFLOW_STATUS__SHIFT 0x1c
+#define HUBP0_DCHUBP_CNTL__HUBP_UNDERFLOW_CLEAR__SHIFT 0x1f
+#define HUBP0_DCHUBP_CNTL__HUBP_BLANK_EN_MASK 0x00000001L
+#define HUBP0_DCHUBP_CNTL__HUBP_NO_OUTSTANDING_REQ_MASK 0x00000002L
+#define HUBP0_DCHUBP_CNTL__HUBP_DISABLE_MASK 0x00000004L
+#define HUBP0_DCHUBP_CNTL__HUBP_IN_BLANK_MASK 0x00000008L
+#define HUBP0_DCHUBP_CNTL__HUBP_VTG_SEL_MASK 0x000000F0L
+#define HUBP0_DCHUBP_CNTL__HUBP_VREADY_AT_OR_AFTER_VSYNC_MASK 0x00000100L
+#define HUBP0_DCHUBP_CNTL__HUBP_DISABLE_STOP_DATA_DURING_VM_MASK 0x00000200L
+#define HUBP0_DCHUBP_CNTL__HUBP_TTU_DISABLE_MASK 0x00001000L
+#define HUBP0_DCHUBP_CNTL__HUBP_TTU_MODE_MASK 0x0000E000L
+#define HUBP0_DCHUBP_CNTL__HUBP_XRQ_NO_OUTSTANDING_REQ_MASK 0x000F0000L
+#define HUBP0_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_MASK 0x00F00000L
+#define HUBP0_DCHUBP_CNTL__HUBP_TIMEOUT_THRESHOLD_MASK 0x03000000L
+#define HUBP0_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_CLEAR_MASK 0x04000000L
+#define HUBP0_DCHUBP_CNTL__HUBP_TIMEOUT_INTERRUPT_EN_MASK 0x08000000L
+#define HUBP0_DCHUBP_CNTL__HUBP_UNDERFLOW_STATUS_MASK 0x70000000L
+#define HUBP0_DCHUBP_CNTL__HUBP_UNDERFLOW_CLEAR_MASK 0x80000000L
+//HUBP0_HUBP_CLK_CNTL
+#define HUBP0_HUBP_CLK_CNTL__HUBP_CLOCK_ENABLE__SHIFT 0x0
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DISPCLK_R_GATE_DIS__SHIFT 0x4
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DPPCLK_G_GATE_DIS__SHIFT 0x8
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DCFCLK_R_GATE_DIS__SHIFT 0xc
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DCFCLK_G_GATE_DIS__SHIFT 0x10
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DISPCLK_R_CLOCK_ON__SHIFT 0x14
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DPPCLK_G_CLOCK_ON__SHIFT 0x15
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DCFCLK_R_CLOCK_ON__SHIFT 0x16
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DCFCLK_G_CLOCK_ON__SHIFT 0x17
+#define HUBP0_HUBP_CLK_CNTL__HUBP_TEST_CLK_SEL__SHIFT 0x1c
+#define HUBP0_HUBP_CLK_CNTL__HUBP_CLOCK_ENABLE_MASK 0x00000001L
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DISPCLK_R_GATE_DIS_MASK 0x00000010L
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DPPCLK_G_GATE_DIS_MASK 0x00000100L
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DCFCLK_R_GATE_DIS_MASK 0x00001000L
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DCFCLK_G_GATE_DIS_MASK 0x00010000L
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DISPCLK_R_CLOCK_ON_MASK 0x00100000L
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DPPCLK_G_CLOCK_ON_MASK 0x00200000L
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DCFCLK_R_CLOCK_ON_MASK 0x00400000L
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DCFCLK_G_CLOCK_ON_MASK 0x00800000L
+#define HUBP0_HUBP_CLK_CNTL__HUBP_TEST_CLK_SEL_MASK 0xF0000000L
+//HUBP0_DCHUBP_VMPG_CONFIG
+#define HUBP0_DCHUBP_VMPG_CONFIG__VMPG_SIZE__SHIFT 0x0
+#define HUBP0_DCHUBP_VMPG_CONFIG__VMPG_SIZE_MASK 0x00000001L
+//HUBP0_HUBPREQ_DEBUG_DB
+#define HUBP0_HUBPREQ_DEBUG_DB__HUBPREQ_DEBUG__SHIFT 0x0
+#define HUBP0_HUBPREQ_DEBUG_DB__HUBPREQ_DEBUG_MASK 0xFFFFFFFFL
+//HUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_EN_DCFCLK__SHIFT 0x0
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_PERIOD_M1_DCFCLK__SHIFT 0x4
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_START_SEL_DCFCLK__SHIFT 0xc
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_STOP_SEL_DCFCLK__SHIFT 0x14
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_MODE_DCFCLK__SHIFT 0x1c
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_EN_DCFCLK_MASK 0x00000001L
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_PERIOD_M1_DCFCLK_MASK 0x00000FF0L
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_START_SEL_DCFCLK_MASK 0x0001F000L
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_STOP_SEL_DCFCLK_MASK 0x01F00000L
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_MODE_DCFCLK_MASK 0x30000000L
+//HUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_EN_DPPCLK__SHIFT 0x0
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_SRC_SEL_DPPCLK__SHIFT 0x1
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_PERIOD_M1_DPPCLK__SHIFT 0x4
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_START_SEL_DPPCLK__SHIFT 0xc
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_STOP_SEL_DPPCLK__SHIFT 0x14
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_EN_DPPCLK_MASK 0x00000001L
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_SRC_SEL_DPPCLK_MASK 0x00000002L
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_PERIOD_M1_DPPCLK_MASK 0x00000FF0L
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_START_SEL_DPPCLK_MASK 0x0001F000L
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_STOP_SEL_DPPCLK_MASK 0x01F00000L
+
+
+// addressBlock: dce_dc_dcbubp0_dispdec_hubpreq_dispdec
+//HUBPREQ0_DCSURF_SURFACE_PITCH
+#define HUBPREQ0_DCSURF_SURFACE_PITCH__PITCH__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_PITCH__META_PITCH__SHIFT 0x10
+#define HUBPREQ0_DCSURF_SURFACE_PITCH__PITCH_MASK 0x00003FFFL
+#define HUBPREQ0_DCSURF_SURFACE_PITCH__META_PITCH_MASK 0x3FFF0000L
+//HUBPREQ0_DCSURF_SURFACE_PITCH_C
+#define HUBPREQ0_DCSURF_SURFACE_PITCH_C__PITCH_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_PITCH_C__META_PITCH_C__SHIFT 0x10
+#define HUBPREQ0_DCSURF_SURFACE_PITCH_C__PITCH_C_MASK 0x00003FFFL
+#define HUBPREQ0_DCSURF_SURFACE_PITCH_C__META_PITCH_C_MASK 0x3FFF0000L
+//HUBPREQ0_VMID_SETTINGS_0
+#define HUBPREQ0_VMID_SETTINGS_0__VMID__SHIFT 0x0
+#define HUBPREQ0_VMID_SETTINGS_0__VMID_MASK 0x0000000FL
+//HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS
+#define HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS__PRIMARY_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS__PRIMARY_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH
+#define HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH__PRIMARY_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH__PRIMARY_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_C
+#define HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_C__PRIMARY_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_C__PRIMARY_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C__PRIMARY_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C__PRIMARY_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS
+#define HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS__SECONDARY_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS__SECONDARY_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH
+#define HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH__SECONDARY_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH__SECONDARY_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_C
+#define HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_C__SECONDARY_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_C__SECONDARY_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C__SECONDARY_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C__SECONDARY_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS
+#define HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS__PRIMARY_META_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS__PRIMARY_META_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH
+#define HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH__PRIMARY_META_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH__PRIMARY_META_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C
+#define HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C__PRIMARY_META_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C__PRIMARY_META_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C__PRIMARY_META_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C__PRIMARY_META_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS
+#define HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS__SECONDARY_META_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS__SECONDARY_META_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH
+#define HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH__SECONDARY_META_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH__SECONDARY_META_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C
+#define HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C__SECONDARY_META_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C__SECONDARY_META_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SECONDARY_META_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ0_DCSURF_SURFACE_CONTROL
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN__SHIFT 0x1
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK__SHIFT 0x2
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_C__SHIFT 0x4
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_C__SHIFT 0x5
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ__SHIFT 0x8
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN__SHIFT 0x9
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK__SHIFT 0xa
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_C__SHIFT 0xc
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK_C__SHIFT 0xd
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ__SHIFT 0x10
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_C__SHIFT 0x11
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ__SHIFT 0x12
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_C__SHIFT 0x13
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_MASK 0x00000001L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN_MASK 0x00000002L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_MASK 0x00000004L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_C_MASK 0x00000010L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_C_MASK 0x00000020L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_MASK 0x00000100L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN_MASK 0x00000200L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK_MASK 0x00000400L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_C_MASK 0x00001000L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK_C_MASK 0x00002000L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_MASK 0x00010000L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_C_MASK 0x00020000L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_MASK 0x00040000L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_C_MASK 0x00080000L
+//HUBPREQ0_DCSURF_FLIP_CONTROL
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_UPDATE_LOCK__SHIFT 0x0
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_TYPE__SHIFT 0x1
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_VUPDATE_SKIP_NUM__SHIFT 0x4
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING__SHIFT 0x8
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__HUBPREQ_MASTER_UPDATE_LOCK_STATUS__SHIFT 0x9
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_MODE_FOR_STEREOSYNC__SHIFT 0xc
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_IN_STEREOSYNC__SHIFT 0x10
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_DISABLE__SHIFT 0x11
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_POLARITY__SHIFT 0x12
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_DELAY__SHIFT 0x14
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_UPDATE_LOCK_MASK 0x00000001L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_TYPE_MASK 0x00000002L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_VUPDATE_SKIP_NUM_MASK 0x000000F0L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_MASK 0x00000100L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__HUBPREQ_MASTER_UPDATE_LOCK_STATUS_MASK 0x00000200L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_MODE_FOR_STEREOSYNC_MASK 0x00003000L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_IN_STEREOSYNC_MASK 0x00010000L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_DISABLE_MASK 0x00020000L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_POLARITY_MASK 0x00040000L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_DELAY_MASK 0x3FF00000L
+//HUBPREQ0_DCSURF_FLIP_CONTROL2
+#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_FLIP_PENDING_MIN_TIME__SHIFT 0x0
+#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_GSL_ENABLE__SHIFT 0x8
+#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_GSL_MASK__SHIFT 0x9
+#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_TRIPLE_BUFFER_ENABLE__SHIFT 0xa
+#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_INUSE_RAED_NO_LATCH__SHIFT 0xc
+#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_FLIP_PENDING_MIN_TIME_MASK 0x000000FFL
+#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_GSL_ENABLE_MASK 0x00000100L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_GSL_MASK_MASK 0x00000200L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_TRIPLE_BUFFER_ENABLE_MASK 0x00000400L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_INUSE_RAED_NO_LATCH_MASK 0x00001000L
+//HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_MASK__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_TYPE__SHIFT 0x1
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_MASK__SHIFT 0x2
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_TYPE__SHIFT 0x3
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_CLEAR__SHIFT 0x8
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_CLEAR__SHIFT 0x9
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_OCCURRED__SHIFT 0x10
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_STATUS__SHIFT 0x11
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_OCCURRED__SHIFT 0x12
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_STATUS__SHIFT 0x13
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_MASK_MASK 0x00000001L
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_TYPE_MASK 0x00000002L
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_MASK_MASK 0x00000004L
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_TYPE_MASK 0x00000008L
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_CLEAR_MASK 0x00000100L
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_CLEAR_MASK 0x00000200L
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_OCCURRED_MASK 0x00010000L
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_STATUS_MASK 0x00020000L
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_OCCURRED_MASK 0x00040000L
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_STATUS_MASK 0x00080000L
+//HUBPREQ0_DCSURF_SURFACE_INUSE
+#define HUBPREQ0_DCSURF_SURFACE_INUSE__SURFACE_INUSE_ADDRESS__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_INUSE__SURFACE_INUSE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH
+#define HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ0_DCSURF_SURFACE_INUSE_C
+#define HUBPREQ0_DCSURF_SURFACE_INUSE_C__SURFACE_INUSE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_INUSE_C__SURFACE_INUSE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH_C
+#define HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE
+#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE__SURFACE_EARLIEST_INUSE_ADDRESS__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE__SURFACE_EARLIEST_INUSE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH
+#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_C
+#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_C__SURFACE_EARLIEST_INUSE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_C__SURFACE_EARLIEST_INUSE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C
+#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ0_DCN_EXPANSION_MODE
+#define HUBPREQ0_DCN_EXPANSION_MODE__DRQ_EXPANSION_MODE__SHIFT 0x0
+#define HUBPREQ0_DCN_EXPANSION_MODE__CRQ_EXPANSION_MODE__SHIFT 0x2
+#define HUBPREQ0_DCN_EXPANSION_MODE__MRQ_EXPANSION_MODE__SHIFT 0x4
+#define HUBPREQ0_DCN_EXPANSION_MODE__PRQ_EXPANSION_MODE__SHIFT 0x6
+#define HUBPREQ0_DCN_EXPANSION_MODE__DRQ_EXPANSION_MODE_MASK 0x00000003L
+#define HUBPREQ0_DCN_EXPANSION_MODE__CRQ_EXPANSION_MODE_MASK 0x0000000CL
+#define HUBPREQ0_DCN_EXPANSION_MODE__MRQ_EXPANSION_MODE_MASK 0x00000030L
+#define HUBPREQ0_DCN_EXPANSION_MODE__PRQ_EXPANSION_MODE_MASK 0x000000C0L
+//HUBPREQ0_DCN_TTU_QOS_WM
+#define HUBPREQ0_DCN_TTU_QOS_WM__QoS_LEVEL_LOW_WM__SHIFT 0x0
+#define HUBPREQ0_DCN_TTU_QOS_WM__QoS_LEVEL_HIGH_WM__SHIFT 0x10
+#define HUBPREQ0_DCN_TTU_QOS_WM__QoS_LEVEL_LOW_WM_MASK 0x00003FFFL
+#define HUBPREQ0_DCN_TTU_QOS_WM__QoS_LEVEL_HIGH_WM_MASK 0x3FFF0000L
+//HUBPREQ0_DCN_GLOBAL_TTU_CNTL
+#define HUBPREQ0_DCN_GLOBAL_TTU_CNTL__MIN_TTU_VBLANK__SHIFT 0x0
+#define HUBPREQ0_DCN_GLOBAL_TTU_CNTL__QoS_LEVEL_FLIP__SHIFT 0x1c
+#define HUBPREQ0_DCN_GLOBAL_TTU_CNTL__MIN_TTU_VBLANK_MASK 0x00FFFFFFL
+#define HUBPREQ0_DCN_GLOBAL_TTU_CNTL__QoS_LEVEL_FLIP_MASK 0xF0000000L
+//HUBPREQ0_DCN_SURF0_TTU_CNTL0
+#define HUBPREQ0_DCN_SURF0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ0_DCN_SURF0_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ0_DCN_SURF0_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ0_DCN_SURF0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ0_DCN_SURF0_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ0_DCN_SURF0_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ0_DCN_SURF0_TTU_CNTL1
+#define HUBPREQ0_DCN_SURF0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ0_DCN_SURF0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ0_DCN_SURF1_TTU_CNTL0
+#define HUBPREQ0_DCN_SURF1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ0_DCN_SURF1_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ0_DCN_SURF1_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ0_DCN_SURF1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ0_DCN_SURF1_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ0_DCN_SURF1_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ0_DCN_SURF1_TTU_CNTL1
+#define HUBPREQ0_DCN_SURF1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ0_DCN_SURF1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ0_DCN_CUR0_TTU_CNTL0
+#define HUBPREQ0_DCN_CUR0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ0_DCN_CUR0_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ0_DCN_CUR0_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ0_DCN_CUR0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ0_DCN_CUR0_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ0_DCN_CUR0_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ0_DCN_CUR0_TTU_CNTL1
+#define HUBPREQ0_DCN_CUR0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ0_DCN_CUR0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ0_DCN_CUR1_TTU_CNTL0
+#define HUBPREQ0_DCN_CUR1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ0_DCN_CUR1_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ0_DCN_CUR1_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ0_DCN_CUR1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ0_DCN_CUR1_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ0_DCN_CUR1_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ0_DCN_CUR1_TTU_CNTL1
+#define HUBPREQ0_DCN_CUR1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ0_DCN_CUR1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ0_DCN_VM_SYSTEM_APERTURE_LOW_ADDR
+#define HUBPREQ0_DCN_VM_SYSTEM_APERTURE_LOW_ADDR__MC_VM_SYSTEM_APERTURE_LOW_ADDR__SHIFT 0x0
+#define HUBPREQ0_DCN_VM_SYSTEM_APERTURE_LOW_ADDR__MC_VM_SYSTEM_APERTURE_LOW_ADDR_MASK 0x3FFFFFFFL
+//HUBPREQ0_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR
+#define HUBPREQ0_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR__MC_VM_SYSTEM_APERTURE_HIGH_ADDR__SHIFT 0x0
+#define HUBPREQ0_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR__MC_VM_SYSTEM_APERTURE_HIGH_ADDR_MASK 0x3FFFFFFFL
+//HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL
+#define HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB__SHIFT 0x0
+#define HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE__SHIFT 0x3
+#define HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT 0x5
+#define HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL__SHIFT 0x6
+#define HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB_MASK 0x00000001L
+#define HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK 0x00000018L
+#define HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS_MASK 0x00000020L
+#define HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL_MASK 0x00000040L
+//HUBPREQ0_BLANK_OFFSET_0
+#define HUBPREQ0_BLANK_OFFSET_0__REFCYC_H_BLANK_END__SHIFT 0x0
+#define HUBPREQ0_BLANK_OFFSET_0__DLG_V_BLANK_END__SHIFT 0x10
+#define HUBPREQ0_BLANK_OFFSET_0__REFCYC_H_BLANK_END_MASK 0x00001FFFL
+#define HUBPREQ0_BLANK_OFFSET_0__DLG_V_BLANK_END_MASK 0x7FFF0000L
+//HUBPREQ0_BLANK_OFFSET_1
+#define HUBPREQ0_BLANK_OFFSET_1__MIN_DST_Y_NEXT_START__SHIFT 0x0
+#define HUBPREQ0_BLANK_OFFSET_1__MIN_DST_Y_NEXT_START_MASK 0x0003FFFFL
+//HUBPREQ0_DST_DIMENSIONS
+#define HUBPREQ0_DST_DIMENSIONS__REFCYC_PER_HTOTAL__SHIFT 0x0
+#define HUBPREQ0_DST_DIMENSIONS__REFCYC_PER_HTOTAL_MASK 0x001FFFFFL
+//HUBPREQ0_DST_AFTER_SCALER
+#define HUBPREQ0_DST_AFTER_SCALER__REFCYC_X_AFTER_SCALER__SHIFT 0x0
+#define HUBPREQ0_DST_AFTER_SCALER__DST_Y_AFTER_SCALER__SHIFT 0x10
+#define HUBPREQ0_DST_AFTER_SCALER__REFCYC_X_AFTER_SCALER_MASK 0x00001FFFL
+#define HUBPREQ0_DST_AFTER_SCALER__DST_Y_AFTER_SCALER_MASK 0x00070000L
+//HUBPREQ0_PREFETCH_SETTINGS
+#define HUBPREQ0_PREFETCH_SETTINGS__VRATIO_PREFETCH__SHIFT 0x0
+#define HUBPREQ0_PREFETCH_SETTINGS__DST_Y_PREFETCH__SHIFT 0x18
+#define HUBPREQ0_PREFETCH_SETTINGS__VRATIO_PREFETCH_MASK 0x003FFFFFL
+#define HUBPREQ0_PREFETCH_SETTINGS__DST_Y_PREFETCH_MASK 0xFF000000L
+//HUBPREQ0_PREFETCH_SETTINGS_C
+#define HUBPREQ0_PREFETCH_SETTINGS_C__VRATIO_PREFETCH_C__SHIFT 0x0
+#define HUBPREQ0_PREFETCH_SETTINGS_C__VRATIO_PREFETCH_C_MASK 0x003FFFFFL
+//HUBPREQ0_VBLANK_PARAMETERS_0
+#define HUBPREQ0_VBLANK_PARAMETERS_0__DST_Y_PER_VM_VBLANK__SHIFT 0x0
+#define HUBPREQ0_VBLANK_PARAMETERS_0__DST_Y_PER_ROW_VBLANK__SHIFT 0x8
+#define HUBPREQ0_VBLANK_PARAMETERS_0__DST_Y_PER_VM_VBLANK_MASK 0x0000007FL
+#define HUBPREQ0_VBLANK_PARAMETERS_0__DST_Y_PER_ROW_VBLANK_MASK 0x00003F00L
+//HUBPREQ0_VBLANK_PARAMETERS_1
+#define HUBPREQ0_VBLANK_PARAMETERS_1__REFCYC_PER_PTE_GROUP_VBLANK_L__SHIFT 0x0
+#define HUBPREQ0_VBLANK_PARAMETERS_1__REFCYC_PER_PTE_GROUP_VBLANK_L_MASK 0x007FFFFFL
+//HUBPREQ0_VBLANK_PARAMETERS_2
+#define HUBPREQ0_VBLANK_PARAMETERS_2__REFCYC_PER_PTE_GROUP_VBLANK_C__SHIFT 0x0
+#define HUBPREQ0_VBLANK_PARAMETERS_2__REFCYC_PER_PTE_GROUP_VBLANK_C_MASK 0x007FFFFFL
+//HUBPREQ0_VBLANK_PARAMETERS_3
+#define HUBPREQ0_VBLANK_PARAMETERS_3__REFCYC_PER_META_CHUNK_VBLANK_L__SHIFT 0x0
+#define HUBPREQ0_VBLANK_PARAMETERS_3__REFCYC_PER_META_CHUNK_VBLANK_L_MASK 0x007FFFFFL
+//HUBPREQ0_VBLANK_PARAMETERS_4
+#define HUBPREQ0_VBLANK_PARAMETERS_4__REFCYC_PER_META_CHUNK_VBLANK_C__SHIFT 0x0
+#define HUBPREQ0_VBLANK_PARAMETERS_4__REFCYC_PER_META_CHUNK_VBLANK_C_MASK 0x007FFFFFL
+//HUBPREQ0_FLIP_PARAMETERS_0
+#define HUBPREQ0_FLIP_PARAMETERS_0__DST_Y_PER_VM_FLIP__SHIFT 0x0
+#define HUBPREQ0_FLIP_PARAMETERS_0__DST_Y_PER_ROW_FLIP__SHIFT 0x8
+#define HUBPREQ0_FLIP_PARAMETERS_0__DST_Y_PER_VM_FLIP_MASK 0x0000007FL
+#define HUBPREQ0_FLIP_PARAMETERS_0__DST_Y_PER_ROW_FLIP_MASK 0x00003F00L
+//HUBPREQ0_FLIP_PARAMETERS_1
+#define HUBPREQ0_FLIP_PARAMETERS_1__REFCYC_PER_PTE_GROUP_FLIP_L__SHIFT 0x0
+#define HUBPREQ0_FLIP_PARAMETERS_1__REFCYC_PER_PTE_GROUP_FLIP_L_MASK 0x007FFFFFL
+//HUBPREQ0_FLIP_PARAMETERS_2
+#define HUBPREQ0_FLIP_PARAMETERS_2__REFCYC_PER_META_CHUNK_FLIP_L__SHIFT 0x0
+#define HUBPREQ0_FLIP_PARAMETERS_2__REFCYC_PER_META_CHUNK_FLIP_L_MASK 0x007FFFFFL
+//HUBPREQ0_NOM_PARAMETERS_0
+#define HUBPREQ0_NOM_PARAMETERS_0__DST_Y_PER_PTE_ROW_NOM_L__SHIFT 0x0
+#define HUBPREQ0_NOM_PARAMETERS_0__DST_Y_PER_PTE_ROW_NOM_L_MASK 0x0001FFFFL
+//HUBPREQ0_NOM_PARAMETERS_1
+#define HUBPREQ0_NOM_PARAMETERS_1__REFCYC_PER_PTE_GROUP_NOM_L__SHIFT 0x0
+#define HUBPREQ0_NOM_PARAMETERS_1__REFCYC_PER_PTE_GROUP_NOM_L_MASK 0x007FFFFFL
+//HUBPREQ0_NOM_PARAMETERS_2
+#define HUBPREQ0_NOM_PARAMETERS_2__DST_Y_PER_PTE_ROW_NOM_C__SHIFT 0x0
+#define HUBPREQ0_NOM_PARAMETERS_2__DST_Y_PER_PTE_ROW_NOM_C_MASK 0x0001FFFFL
+//HUBPREQ0_NOM_PARAMETERS_3
+#define HUBPREQ0_NOM_PARAMETERS_3__REFCYC_PER_PTE_GROUP_NOM_C__SHIFT 0x0
+#define HUBPREQ0_NOM_PARAMETERS_3__REFCYC_PER_PTE_GROUP_NOM_C_MASK 0x007FFFFFL
+//HUBPREQ0_NOM_PARAMETERS_4
+#define HUBPREQ0_NOM_PARAMETERS_4__DST_Y_PER_META_ROW_NOM_L__SHIFT 0x0
+#define HUBPREQ0_NOM_PARAMETERS_4__DST_Y_PER_META_ROW_NOM_L_MASK 0x0001FFFFL
+//HUBPREQ0_NOM_PARAMETERS_5
+#define HUBPREQ0_NOM_PARAMETERS_5__REFCYC_PER_META_CHUNK_NOM_L__SHIFT 0x0
+#define HUBPREQ0_NOM_PARAMETERS_5__REFCYC_PER_META_CHUNK_NOM_L_MASK 0x007FFFFFL
+//HUBPREQ0_NOM_PARAMETERS_6
+#define HUBPREQ0_NOM_PARAMETERS_6__DST_Y_PER_META_ROW_NOM_C__SHIFT 0x0
+#define HUBPREQ0_NOM_PARAMETERS_6__DST_Y_PER_META_ROW_NOM_C_MASK 0x0001FFFFL
+//HUBPREQ0_NOM_PARAMETERS_7
+#define HUBPREQ0_NOM_PARAMETERS_7__REFCYC_PER_META_CHUNK_NOM_C__SHIFT 0x0
+#define HUBPREQ0_NOM_PARAMETERS_7__REFCYC_PER_META_CHUNK_NOM_C_MASK 0x007FFFFFL
+//HUBPREQ0_PER_LINE_DELIVERY_PRE
+#define HUBPREQ0_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_L__SHIFT 0x0
+#define HUBPREQ0_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_C__SHIFT 0x10
+#define HUBPREQ0_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_L_MASK 0x00001FFFL
+#define HUBPREQ0_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_C_MASK 0x1FFF0000L
+//HUBPREQ0_PER_LINE_DELIVERY
+#define HUBPREQ0_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_L__SHIFT 0x0
+#define HUBPREQ0_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_C__SHIFT 0x10
+#define HUBPREQ0_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_L_MASK 0x00001FFFL
+#define HUBPREQ0_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_C_MASK 0x1FFF0000L
+//HUBPREQ0_CURSOR_SETTINGS
+#define HUBPREQ0_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET__SHIFT 0x0
+#define HUBPREQ0_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST__SHIFT 0x8
+#define HUBPREQ0_CURSOR_SETTINGS__CURSOR1_DST_Y_OFFSET__SHIFT 0x10
+#define HUBPREQ0_CURSOR_SETTINGS__CURSOR1_CHUNK_HDL_ADJUST__SHIFT 0x18
+#define HUBPREQ0_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET_MASK 0x000000FFL
+#define HUBPREQ0_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST_MASK 0x00000300L
+#define HUBPREQ0_CURSOR_SETTINGS__CURSOR1_DST_Y_OFFSET_MASK 0x00FF0000L
+#define HUBPREQ0_CURSOR_SETTINGS__CURSOR1_CHUNK_HDL_ADJUST_MASK 0x03000000L
+//HUBPREQ0_REF_FREQ_TO_PIX_FREQ
+#define HUBPREQ0_REF_FREQ_TO_PIX_FREQ__REF_FREQ_TO_PIX_FREQ__SHIFT 0x0
+#define HUBPREQ0_REF_FREQ_TO_PIX_FREQ__REF_FREQ_TO_PIX_FREQ_MASK 0x001FFFFFL
+//HUBPREQ0_DST_Y_DELTA_DRQ_LIMIT
+#define HUBPREQ0_DST_Y_DELTA_DRQ_LIMIT__DST_Y_DELTA_DRQ_LIMIT__SHIFT 0x0
+#define HUBPREQ0_DST_Y_DELTA_DRQ_LIMIT__DST_Y_DELTA_DRQ_LIMIT_MASK 0x00007FFFL
+//HUBPREQ0_HUBPREQ_MEM_PWR_CTRL
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_FORCE__SHIFT 0x0
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_DIS__SHIFT 0x2
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_FORCE__SHIFT 0x4
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_DIS__SHIFT 0x6
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_FORCE__SHIFT 0x8
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_DIS__SHIFT 0xa
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_FORCE__SHIFT 0xc
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_DIS__SHIFT 0xe
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_FORCE_MASK 0x00000003L
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_DIS_MASK 0x00000004L
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_FORCE_MASK 0x00000030L
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_DIS_MASK 0x00000040L
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_FORCE_MASK 0x00000300L
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_DIS_MASK 0x00000400L
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_FORCE_MASK 0x00003000L
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_DIS_MASK 0x00004000L
+//HUBPREQ0_HUBPREQ_MEM_PWR_STATUS
+#define HUBPREQ0_HUBPREQ_MEM_PWR_STATUS__REQ_DPTE_MEM_PWR_STATE__SHIFT 0x0
+#define HUBPREQ0_HUBPREQ_MEM_PWR_STATUS__REQ_MPTE_MEM_PWR_STATE__SHIFT 0x2
+#define HUBPREQ0_HUBPREQ_MEM_PWR_STATUS__REQ_META_MEM_PWR_STATE__SHIFT 0x4
+#define HUBPREQ0_HUBPREQ_MEM_PWR_STATUS__REQ_PDE_MEM_PWR_STATE__SHIFT 0x6
+#define HUBPREQ0_HUBPREQ_MEM_PWR_STATUS__REQ_DPTE_MEM_PWR_STATE_MASK 0x00000003L
+#define HUBPREQ0_HUBPREQ_MEM_PWR_STATUS__REQ_MPTE_MEM_PWR_STATE_MASK 0x0000000CL
+#define HUBPREQ0_HUBPREQ_MEM_PWR_STATUS__REQ_META_MEM_PWR_STATE_MASK 0x00000030L
+#define HUBPREQ0_HUBPREQ_MEM_PWR_STATUS__REQ_PDE_MEM_PWR_STATE_MASK 0x000000C0L
+//HUBPREQ0_VBLANK_PARAMETERS_5
+#define HUBPREQ0_VBLANK_PARAMETERS_5__REFCYC_PER_VM_GROUP_VBLANK__SHIFT 0x0
+#define HUBPREQ0_VBLANK_PARAMETERS_5__REFCYC_PER_VM_GROUP_VBLANK_MASK 0x007FFFFFL
+//HUBPREQ0_VBLANK_PARAMETERS_6
+#define HUBPREQ0_VBLANK_PARAMETERS_6__REFCYC_PER_VM_REQ_VBLANK__SHIFT 0x0
+#define HUBPREQ0_VBLANK_PARAMETERS_6__REFCYC_PER_VM_REQ_VBLANK_MASK 0x007FFFFFL
+//HUBPREQ0_FLIP_PARAMETERS_3
+#define HUBPREQ0_FLIP_PARAMETERS_3__REFCYC_PER_VM_GROUP_FLIP__SHIFT 0x0
+#define HUBPREQ0_FLIP_PARAMETERS_3__REFCYC_PER_VM_GROUP_FLIP_MASK 0x007FFFFFL
+//HUBPREQ0_FLIP_PARAMETERS_4
+#define HUBPREQ0_FLIP_PARAMETERS_4__REFCYC_PER_VM_REQ_FLIP__SHIFT 0x0
+#define HUBPREQ0_FLIP_PARAMETERS_4__REFCYC_PER_VM_REQ_FLIP_MASK 0x007FFFFFL
+//HUBPREQ0_FLIP_PARAMETERS_5
+#define HUBPREQ0_FLIP_PARAMETERS_5__REFCYC_PER_PTE_GROUP_FLIP_C__SHIFT 0x0
+#define HUBPREQ0_FLIP_PARAMETERS_5__REFCYC_PER_PTE_GROUP_FLIP_C_MASK 0x007FFFFFL
+//HUBPREQ0_FLIP_PARAMETERS_6
+#define HUBPREQ0_FLIP_PARAMETERS_6__REFCYC_PER_META_CHUNK_FLIP_C__SHIFT 0x0
+#define HUBPREQ0_FLIP_PARAMETERS_6__REFCYC_PER_META_CHUNK_FLIP_C_MASK 0x007FFFFFL
+
+
+// addressBlock: dce_dc_dcbubp0_dispdec_hubpret_dispdec
+//HUBPRET0_HUBPRET_CONTROL
+#define HUBPRET0_HUBPRET_CONTROL__DET_BUF_PLANE1_BASE_ADDRESS__SHIFT 0x0
+#define HUBPRET0_HUBPRET_CONTROL__PACK_3TO2_ELEMENT_DISABLE__SHIFT 0xc
+#define HUBPRET0_HUBPRET_CONTROL__CROSSBAR_SRC_ALPHA__SHIFT 0x10
+#define HUBPRET0_HUBPRET_CONTROL__CROSSBAR_SRC_Y_G__SHIFT 0x12
+#define HUBPRET0_HUBPRET_CONTROL__CROSSBAR_SRC_CB_B__SHIFT 0x14
+#define HUBPRET0_HUBPRET_CONTROL__CROSSBAR_SRC_CR_R__SHIFT 0x16
+#define HUBPRET0_HUBPRET_CONTROL__HUBPRET_CONTROL_SPARE__SHIFT 0x18
+#define HUBPRET0_HUBPRET_CONTROL__DET_BUF_PLANE1_BASE_ADDRESS_MASK 0x00000FFFL
+#define HUBPRET0_HUBPRET_CONTROL__PACK_3TO2_ELEMENT_DISABLE_MASK 0x00001000L
+#define HUBPRET0_HUBPRET_CONTROL__CROSSBAR_SRC_ALPHA_MASK 0x00030000L
+#define HUBPRET0_HUBPRET_CONTROL__CROSSBAR_SRC_Y_G_MASK 0x000C0000L
+#define HUBPRET0_HUBPRET_CONTROL__CROSSBAR_SRC_CB_B_MASK 0x00300000L
+#define HUBPRET0_HUBPRET_CONTROL__CROSSBAR_SRC_CR_R_MASK 0x00C00000L
+#define HUBPRET0_HUBPRET_CONTROL__HUBPRET_CONTROL_SPARE_MASK 0xFF000000L
+//HUBPRET0_HUBPRET_MEM_PWR_CTRL
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_FORCE__SHIFT 0x0
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_DIS__SHIFT 0x2
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_LS_MODE__SHIFT 0x4
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_FORCE__SHIFT 0x8
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_DIS__SHIFT 0xa
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_FORCE__SHIFT 0x10
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_DIS__SHIFT 0x12
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_LS_MODE__SHIFT 0x14
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_FORCE_MASK 0x00000003L
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_DIS_MASK 0x00000004L
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_LS_MODE_MASK 0x00000030L
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_FORCE_MASK 0x00000300L
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_DIS_MASK 0x00000400L
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_FORCE_MASK 0x00030000L
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_DIS_MASK 0x00040000L
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_LS_MODE_MASK 0x00300000L
+//HUBPRET0_HUBPRET_MEM_PWR_STATUS
+#define HUBPRET0_HUBPRET_MEM_PWR_STATUS__DET_MEM_PWR_STATE__SHIFT 0x0
+#define HUBPRET0_HUBPRET_MEM_PWR_STATUS__DMROB_MEM_PWR_STATE__SHIFT 0x2
+#define HUBPRET0_HUBPRET_MEM_PWR_STATUS__PIXCDC_MEM_PWR_STATE__SHIFT 0x4
+#define HUBPRET0_HUBPRET_MEM_PWR_STATUS__DET_MEM_PWR_STATE_MASK 0x00000003L
+#define HUBPRET0_HUBPRET_MEM_PWR_STATUS__DMROB_MEM_PWR_STATE_MASK 0x0000000CL
+#define HUBPRET0_HUBPRET_MEM_PWR_STATUS__PIXCDC_MEM_PWR_STATE_MASK 0x00000030L
+//HUBPRET0_HUBPRET_READ_LINE_CTRL0
+#define HUBPRET0_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_INTERVAL_IN_NONACTIVE__SHIFT 0x0
+#define HUBPRET0_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_VBLANK_MAXIMUM__SHIFT 0x10
+#define HUBPRET0_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_INTERVAL_IN_NONACTIVE_MASK 0x0000FFFFL
+#define HUBPRET0_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_VBLANK_MAXIMUM_MASK 0x3FFF0000L
+//HUBPRET0_HUBPRET_READ_LINE_CTRL1
+#define HUBPRET0_HUBPRET_READ_LINE_CTRL1__PIPE_READ_LINE_REPORTED_WHEN_REQ_DISABLED__SHIFT 0x0
+#define HUBPRET0_HUBPRET_READ_LINE_CTRL1__HUBPRET_READ_LINE_CTRL1_SPARE__SHIFT 0x10
+#define HUBPRET0_HUBPRET_READ_LINE_CTRL1__PIPE_READ_LINE_REPORTED_WHEN_REQ_DISABLED_MASK 0x00003FFFL
+#define HUBPRET0_HUBPRET_READ_LINE_CTRL1__HUBPRET_READ_LINE_CTRL1_SPARE_MASK 0xFFFF0000L
+//HUBPRET0_HUBPRET_READ_LINE0
+#define HUBPRET0_HUBPRET_READ_LINE0__PIPE_READ_LINE0_START__SHIFT 0x0
+#define HUBPRET0_HUBPRET_READ_LINE0__PIPE_READ_LINE0_END__SHIFT 0x10
+#define HUBPRET0_HUBPRET_READ_LINE0__PIPE_READ_LINE0_START_MASK 0x00003FFFL
+#define HUBPRET0_HUBPRET_READ_LINE0__PIPE_READ_LINE0_END_MASK 0x3FFF0000L
+//HUBPRET0_HUBPRET_READ_LINE1
+#define HUBPRET0_HUBPRET_READ_LINE1__PIPE_READ_LINE1_START__SHIFT 0x0
+#define HUBPRET0_HUBPRET_READ_LINE1__PIPE_READ_LINE1_END__SHIFT 0x10
+#define HUBPRET0_HUBPRET_READ_LINE1__PIPE_READ_LINE1_START_MASK 0x00003FFFL
+#define HUBPRET0_HUBPRET_READ_LINE1__PIPE_READ_LINE1_END_MASK 0x3FFF0000L
+//HUBPRET0_HUBPRET_INTERRUPT
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_MASK__SHIFT 0x0
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_MASK__SHIFT 0x1
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_MASK__SHIFT 0x2
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_TYPE__SHIFT 0x4
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_TYPE__SHIFT 0x5
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_TYPE__SHIFT 0x6
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_CLEAR__SHIFT 0x8
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_CLEAR__SHIFT 0x9
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_CLEAR__SHIFT 0xa
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_STATUS__SHIFT 0xc
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_STATUS__SHIFT 0xd
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_STATUS__SHIFT 0xe
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_STATUS__SHIFT 0x10
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_STATUS__SHIFT 0x11
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_STATUS__SHIFT 0x12
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_MASK_MASK 0x00000001L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_MASK_MASK 0x00000002L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_MASK_MASK 0x00000004L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_TYPE_MASK 0x00000010L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_TYPE_MASK 0x00000020L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_TYPE_MASK 0x00000040L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_CLEAR_MASK 0x00000100L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_CLEAR_MASK 0x00000200L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_CLEAR_MASK 0x00000400L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_STATUS_MASK 0x00001000L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_STATUS_MASK 0x00002000L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_STATUS_MASK 0x00004000L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_STATUS_MASK 0x00010000L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_STATUS_MASK 0x00020000L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_STATUS_MASK 0x00040000L
+//HUBPRET0_HUBPRET_READ_LINE_VALUE
+#define HUBPRET0_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE__SHIFT 0x0
+#define HUBPRET0_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_SNAPSHOT__SHIFT 0x10
+#define HUBPRET0_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_MASK 0x00003FFFL
+#define HUBPRET0_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_SNAPSHOT_MASK 0x3FFF0000L
+//HUBPRET0_HUBPRET_READ_LINE_STATUS
+#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_VBLANK__SHIFT 0x0
+#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_INSIDE__SHIFT 0x4
+#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_OUTSIDE__SHIFT 0x5
+#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_INSIDE__SHIFT 0x8
+#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_OUTSIDE__SHIFT 0xa
+#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_VBLANK_MASK 0x00000001L
+#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_INSIDE_MASK 0x00000010L
+#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_OUTSIDE_MASK 0x00000020L
+#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_INSIDE_MASK 0x00000100L
+#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_OUTSIDE_MASK 0x00000400L
+
+
+// addressBlock: dce_dc_dcbubp0_dispdec_cursor0_dispdec
+//CURSOR0_0_CURSOR_CONTROL
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_ENABLE__SHIFT 0x0
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT 0x4
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_MODE__SHIFT 0x8
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_TMZ__SHIFT 0xc
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_SNOOP__SHIFT 0xd
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_SYSTEM__SHIFT 0xe
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_PITCH__SHIFT 0x10
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_XY_POSITION_ROTATION_AND_MIRRORING_BYPASS__SHIFT 0x14
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK__SHIFT 0x18
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_EN__SHIFT 0x1e
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_SEL__SHIFT 0x1f
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_ENABLE_MASK 0x00000001L
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY_MASK 0x00000010L
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_MODE_MASK 0x00000700L
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_TMZ_MASK 0x00001000L
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_SNOOP_MASK 0x00002000L
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_SYSTEM_MASK 0x00004000L
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_PITCH_MASK 0x00030000L
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_XY_POSITION_ROTATION_AND_MIRRORING_BYPASS_MASK 0x00100000L
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK_MASK 0x1F000000L
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_EN_MASK 0x40000000L
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_SEL_MASK 0x80000000L
+//CURSOR0_0_CURSOR_SURFACE_ADDRESS
+#define CURSOR0_0_CURSOR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS__SHIFT 0x0
+#define CURSOR0_0_CURSOR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH
+#define CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//CURSOR0_0_CURSOR_SIZE
+#define CURSOR0_0_CURSOR_SIZE__CURSOR_HEIGHT__SHIFT 0x0
+#define CURSOR0_0_CURSOR_SIZE__CURSOR_WIDTH__SHIFT 0x10
+#define CURSOR0_0_CURSOR_SIZE__CURSOR_HEIGHT_MASK 0x000001FFL
+#define CURSOR0_0_CURSOR_SIZE__CURSOR_WIDTH_MASK 0x01FF0000L
+//CURSOR0_0_CURSOR_POSITION
+#define CURSOR0_0_CURSOR_POSITION__CURSOR_Y_POSITION__SHIFT 0x0
+#define CURSOR0_0_CURSOR_POSITION__CURSOR_X_POSITION__SHIFT 0x10
+#define CURSOR0_0_CURSOR_POSITION__CURSOR_Y_POSITION_MASK 0x00003FFFL
+#define CURSOR0_0_CURSOR_POSITION__CURSOR_X_POSITION_MASK 0x3FFF0000L
+//CURSOR0_0_CURSOR_HOT_SPOT
+#define CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y__SHIFT 0x0
+#define CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X__SHIFT 0x10
+#define CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y_MASK 0x000000FFL
+#define CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X_MASK 0x00FF0000L
+//CURSOR0_0_CURSOR_STEREO_CONTROL
+#define CURSOR0_0_CURSOR_STEREO_CONTROL__CURSOR_STEREO_EN__SHIFT 0x0
+#define CURSOR0_0_CURSOR_STEREO_CONTROL__CURSOR_PRIMARY_OFFSET__SHIFT 0x4
+#define CURSOR0_0_CURSOR_STEREO_CONTROL__CURSOR_SECONDARY_OFFSET__SHIFT 0x12
+#define CURSOR0_0_CURSOR_STEREO_CONTROL__CURSOR_STEREO_EN_MASK 0x00000001L
+#define CURSOR0_0_CURSOR_STEREO_CONTROL__CURSOR_PRIMARY_OFFSET_MASK 0x0003FFF0L
+#define CURSOR0_0_CURSOR_STEREO_CONTROL__CURSOR_SECONDARY_OFFSET_MASK 0xFFFC0000L
+//CURSOR0_0_CURSOR_DST_OFFSET
+#define CURSOR0_0_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET__SHIFT 0x0
+#define CURSOR0_0_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET_MASK 0x00001FFFL
+//CURSOR0_0_CURSOR_MEM_PWR_CTRL
+#define CURSOR0_0_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_FORCE__SHIFT 0x0
+#define CURSOR0_0_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_DIS__SHIFT 0x2
+#define CURSOR0_0_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_LS_MODE__SHIFT 0x4
+#define CURSOR0_0_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_FORCE_MASK 0x00000003L
+#define CURSOR0_0_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_DIS_MASK 0x00000004L
+#define CURSOR0_0_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_LS_MODE_MASK 0x00000030L
+//CURSOR0_0_CURSOR_MEM_PWR_STATUS
+#define CURSOR0_0_CURSOR_MEM_PWR_STATUS__CROB_MEM_PWR_STATE__SHIFT 0x0
+#define CURSOR0_0_CURSOR_MEM_PWR_STATUS__CROB_MEM_PWR_STATE_MASK 0x00000003L
+//CURSOR0_0_DMDATA_ADDRESS_HIGH
+#define CURSOR0_0_DMDATA_ADDRESS_HIGH__DMDATA_ADDRESS_HIGH__SHIFT 0x0
+#define CURSOR0_0_DMDATA_ADDRESS_HIGH__DMDATA_SYSTEM__SHIFT 0x1c
+#define CURSOR0_0_DMDATA_ADDRESS_HIGH__DMDATA_SNOOP__SHIFT 0x1d
+#define CURSOR0_0_DMDATA_ADDRESS_HIGH__DMDATA_TMZ__SHIFT 0x1e
+#define CURSOR0_0_DMDATA_ADDRESS_HIGH__DMDATA_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define CURSOR0_0_DMDATA_ADDRESS_HIGH__DMDATA_SYSTEM_MASK 0x10000000L
+#define CURSOR0_0_DMDATA_ADDRESS_HIGH__DMDATA_SNOOP_MASK 0x20000000L
+#define CURSOR0_0_DMDATA_ADDRESS_HIGH__DMDATA_TMZ_MASK 0x40000000L
+//CURSOR0_0_DMDATA_ADDRESS_LOW
+#define CURSOR0_0_DMDATA_ADDRESS_LOW__DMDATA_ADDRESS_LOW__SHIFT 0x0
+#define CURSOR0_0_DMDATA_ADDRESS_LOW__DMDATA_ADDRESS_LOW_MASK 0xFFFFFFFFL
+//CURSOR0_0_DMDATA_CNTL
+#define CURSOR0_0_DMDATA_CNTL__DMDATA_UPDATED__SHIFT 0x0
+#define CURSOR0_0_DMDATA_CNTL__DMDATA_REPEAT__SHIFT 0x1
+#define CURSOR0_0_DMDATA_CNTL__DMDATA_MODE__SHIFT 0x2
+#define CURSOR0_0_DMDATA_CNTL__DMDATA_SIZE__SHIFT 0x10
+#define CURSOR0_0_DMDATA_CNTL__DMDATA_UPDATED_MASK 0x00000001L
+#define CURSOR0_0_DMDATA_CNTL__DMDATA_REPEAT_MASK 0x00000002L
+#define CURSOR0_0_DMDATA_CNTL__DMDATA_MODE_MASK 0x00000004L
+#define CURSOR0_0_DMDATA_CNTL__DMDATA_SIZE_MASK 0x0FFF0000L
+//CURSOR0_0_DMDATA_QOS_CNTL
+#define CURSOR0_0_DMDATA_QOS_CNTL__DMDATA_QOS_MODE__SHIFT 0x0
+#define CURSOR0_0_DMDATA_QOS_CNTL__DMDATA_QOS_LEVEL__SHIFT 0x4
+#define CURSOR0_0_DMDATA_QOS_CNTL__DMDATA_DL_DELTA__SHIFT 0x10
+#define CURSOR0_0_DMDATA_QOS_CNTL__DMDATA_QOS_MODE_MASK 0x00000001L
+#define CURSOR0_0_DMDATA_QOS_CNTL__DMDATA_QOS_LEVEL_MASK 0x000000F0L
+#define CURSOR0_0_DMDATA_QOS_CNTL__DMDATA_DL_DELTA_MASK 0xFFFF0000L
+//CURSOR0_0_DMDATA_STATUS
+#define CURSOR0_0_DMDATA_STATUS__DMDATA_DONE__SHIFT 0x0
+#define CURSOR0_0_DMDATA_STATUS__DMDATA_UNDERFLOW__SHIFT 0x2
+#define CURSOR0_0_DMDATA_STATUS__DMDATA_UNDERFLOW_CLEAR__SHIFT 0x4
+#define CURSOR0_0_DMDATA_STATUS__DMDATA_DONE_MASK 0x00000001L
+#define CURSOR0_0_DMDATA_STATUS__DMDATA_UNDERFLOW_MASK 0x00000004L
+#define CURSOR0_0_DMDATA_STATUS__DMDATA_UNDERFLOW_CLEAR_MASK 0x00000010L
+//CURSOR0_0_DMDATA_SW_CNTL
+#define CURSOR0_0_DMDATA_SW_CNTL__DMDATA_SW_UPDATED__SHIFT 0x0
+#define CURSOR0_0_DMDATA_SW_CNTL__DMDATA_SW_REPEAT__SHIFT 0x1
+#define CURSOR0_0_DMDATA_SW_CNTL__DMDATA_SW_SIZE__SHIFT 0x10
+#define CURSOR0_0_DMDATA_SW_CNTL__DMDATA_SW_UPDATED_MASK 0x00000001L
+#define CURSOR0_0_DMDATA_SW_CNTL__DMDATA_SW_REPEAT_MASK 0x00000002L
+#define CURSOR0_0_DMDATA_SW_CNTL__DMDATA_SW_SIZE_MASK 0x0FFF0000L
+//CURSOR0_0_DMDATA_SW_DATA
+#define CURSOR0_0_DMDATA_SW_DATA__DMDATA_SW_DATA__SHIFT 0x0
+#define CURSOR0_0_DMDATA_SW_DATA__DMDATA_SW_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dcbubp0_dispdec_hubp_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON7_PERFCOUNTER_CNTL
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON7_PERFCOUNTER_CNTL2
+#define DC_PERFMON7_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON7_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON7_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON7_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON7_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON7_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON7_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON7_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON7_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON7_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON7_PERFCOUNTER_STATE
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON7_PERFMON_CNTL
+#define DC_PERFMON7_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON7_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON7_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON7_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON7_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON7_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON7_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON7_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON7_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON7_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON7_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON7_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON7_PERFMON_CNTL2
+#define DC_PERFMON7_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON7_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON7_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON7_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON7_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON7_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON7_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON7_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON7_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON7_PERFMON_CVALUE_LOW
+#define DC_PERFMON7_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON7_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON7_PERFMON_HI
+#define DC_PERFMON7_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON7_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON7_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON7_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON7_PERFMON_LOW
+#define DC_PERFMON7_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON7_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dcbubp1_dispdec_hubp_dispdec
+//HUBP1_DCSURF_SURFACE_CONFIG
+#define HUBP1_DCSURF_SURFACE_CONFIG__SURFACE_PIXEL_FORMAT__SHIFT 0x0
+#define HUBP1_DCSURF_SURFACE_CONFIG__ROTATION_ANGLE__SHIFT 0x8
+#define HUBP1_DCSURF_SURFACE_CONFIG__H_MIRROR_EN__SHIFT 0xa
+#define HUBP1_DCSURF_SURFACE_CONFIG__SURFACE_PIXEL_FORMAT_MASK 0x0000007FL
+#define HUBP1_DCSURF_SURFACE_CONFIG__ROTATION_ANGLE_MASK 0x00000300L
+#define HUBP1_DCSURF_SURFACE_CONFIG__H_MIRROR_EN_MASK 0x00000400L
+//HUBP1_DCSURF_ADDR_CONFIG
+#define HUBP1_DCSURF_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
+#define HUBP1_DCSURF_ADDR_CONFIG__NUM_BANKS__SHIFT 0x3
+#define HUBP1_DCSURF_ADDR_CONFIG__PIPE_INTERLEAVE__SHIFT 0x6
+#define HUBP1_DCSURF_ADDR_CONFIG__NUM_SE__SHIFT 0x8
+#define HUBP1_DCSURF_ADDR_CONFIG__NUM_RB_PER_SE__SHIFT 0xa
+#define HUBP1_DCSURF_ADDR_CONFIG__MAX_COMPRESSED_FRAGS__SHIFT 0xc
+#define HUBP1_DCSURF_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define HUBP1_DCSURF_ADDR_CONFIG__NUM_BANKS_MASK 0x00000038L
+#define HUBP1_DCSURF_ADDR_CONFIG__PIPE_INTERLEAVE_MASK 0x000000C0L
+#define HUBP1_DCSURF_ADDR_CONFIG__NUM_SE_MASK 0x00000300L
+#define HUBP1_DCSURF_ADDR_CONFIG__NUM_RB_PER_SE_MASK 0x00000C00L
+#define HUBP1_DCSURF_ADDR_CONFIG__MAX_COMPRESSED_FRAGS_MASK 0x00003000L
+//HUBP1_DCSURF_TILING_CONFIG
+#define HUBP1_DCSURF_TILING_CONFIG__SW_MODE__SHIFT 0x0
+#define HUBP1_DCSURF_TILING_CONFIG__DIM_TYPE__SHIFT 0x7
+#define HUBP1_DCSURF_TILING_CONFIG__META_LINEAR__SHIFT 0x9
+#define HUBP1_DCSURF_TILING_CONFIG__RB_ALIGNED__SHIFT 0xa
+#define HUBP1_DCSURF_TILING_CONFIG__PIPE_ALIGNED__SHIFT 0xb
+#define HUBP1_DCSURF_TILING_CONFIG__SW_MODE_MASK 0x0000001FL
+#define HUBP1_DCSURF_TILING_CONFIG__DIM_TYPE_MASK 0x00000180L
+#define HUBP1_DCSURF_TILING_CONFIG__META_LINEAR_MASK 0x00000200L
+#define HUBP1_DCSURF_TILING_CONFIG__RB_ALIGNED_MASK 0x00000400L
+#define HUBP1_DCSURF_TILING_CONFIG__PIPE_ALIGNED_MASK 0x00000800L
+//HUBP1_DCSURF_PRI_VIEWPORT_START
+#define HUBP1_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_X_START__SHIFT 0x0
+#define HUBP1_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START__SHIFT 0x10
+#define HUBP1_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_X_START_MASK 0x00003FFFL
+#define HUBP1_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START_MASK 0x3FFF0000L
+//HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION
+#define HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0
+#define HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10
+#define HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL
+#define HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
+//HUBP1_DCSURF_PRI_VIEWPORT_START_C
+#define HUBP1_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_X_START_C__SHIFT 0x0
+#define HUBP1_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_Y_START_C__SHIFT 0x10
+#define HUBP1_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_X_START_C_MASK 0x00003FFFL
+#define HUBP1_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_Y_START_C_MASK 0x3FFF0000L
+//HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION_C
+#define HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_WIDTH_C__SHIFT 0x0
+#define HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_HEIGHT_C__SHIFT 0x10
+#define HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_WIDTH_C_MASK 0x00003FFFL
+#define HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_HEIGHT_C_MASK 0x3FFF0000L
+//HUBP1_DCSURF_SEC_VIEWPORT_START
+#define HUBP1_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_X_START__SHIFT 0x0
+#define HUBP1_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_Y_START__SHIFT 0x10
+#define HUBP1_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_X_START_MASK 0x00003FFFL
+#define HUBP1_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_Y_START_MASK 0x3FFF0000L
+//HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION
+#define HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_WIDTH__SHIFT 0x0
+#define HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_HEIGHT__SHIFT 0x10
+#define HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_WIDTH_MASK 0x00003FFFL
+#define HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
+//HUBP1_DCSURF_SEC_VIEWPORT_START_C
+#define HUBP1_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_X_START_C__SHIFT 0x0
+#define HUBP1_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_Y_START_C__SHIFT 0x10
+#define HUBP1_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_X_START_C_MASK 0x00003FFFL
+#define HUBP1_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_Y_START_C_MASK 0x3FFF0000L
+//HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION_C
+#define HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_WIDTH_C__SHIFT 0x0
+#define HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_HEIGHT_C__SHIFT 0x10
+#define HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_WIDTH_C_MASK 0x00003FFFL
+#define HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_HEIGHT_C_MASK 0x3FFF0000L
+//HUBP1_DCHUBP_REQ_SIZE_CONFIG
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__SWATH_HEIGHT__SHIFT 0x0
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__PTE_ROW_HEIGHT_LINEAR__SHIFT 0x4
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__CHUNK_SIZE__SHIFT 0x8
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__MIN_CHUNK_SIZE__SHIFT 0xb
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__META_CHUNK_SIZE__SHIFT 0x10
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__MIN_META_CHUNK_SIZE__SHIFT 0x12
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__DPTE_GROUP_SIZE__SHIFT 0x14
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__VM_GROUP_SIZE__SHIFT 0x18
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__SWATH_HEIGHT_MASK 0x00000007L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__PTE_ROW_HEIGHT_LINEAR_MASK 0x00000070L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__CHUNK_SIZE_MASK 0x00000700L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__MIN_CHUNK_SIZE_MASK 0x00001800L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__META_CHUNK_SIZE_MASK 0x00030000L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__MIN_META_CHUNK_SIZE_MASK 0x000C0000L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__DPTE_GROUP_SIZE_MASK 0x00700000L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__VM_GROUP_SIZE_MASK 0x07000000L
+//HUBP1_DCHUBP_REQ_SIZE_CONFIG_C
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__SWATH_HEIGHT_C__SHIFT 0x0
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__PTE_ROW_HEIGHT_LINEAR_C__SHIFT 0x4
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__CHUNK_SIZE_C__SHIFT 0x8
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__MIN_CHUNK_SIZE_C__SHIFT 0xb
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__META_CHUNK_SIZE_C__SHIFT 0x10
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__MIN_META_CHUNK_SIZE_C__SHIFT 0x12
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__DPTE_GROUP_SIZE_C__SHIFT 0x14
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__SWATH_HEIGHT_C_MASK 0x00000007L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__PTE_ROW_HEIGHT_LINEAR_C_MASK 0x00000070L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__CHUNK_SIZE_C_MASK 0x00000700L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__MIN_CHUNK_SIZE_C_MASK 0x00001800L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__META_CHUNK_SIZE_C_MASK 0x00030000L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__MIN_META_CHUNK_SIZE_C_MASK 0x000C0000L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__DPTE_GROUP_SIZE_C_MASK 0x00700000L
+//HUBP1_DCHUBP_CNTL
+#define HUBP1_DCHUBP_CNTL__HUBP_BLANK_EN__SHIFT 0x0
+#define HUBP1_DCHUBP_CNTL__HUBP_NO_OUTSTANDING_REQ__SHIFT 0x1
+#define HUBP1_DCHUBP_CNTL__HUBP_DISABLE__SHIFT 0x2
+#define HUBP1_DCHUBP_CNTL__HUBP_IN_BLANK__SHIFT 0x3
+#define HUBP1_DCHUBP_CNTL__HUBP_VTG_SEL__SHIFT 0x4
+#define HUBP1_DCHUBP_CNTL__HUBP_VREADY_AT_OR_AFTER_VSYNC__SHIFT 0x8
+#define HUBP1_DCHUBP_CNTL__HUBP_DISABLE_STOP_DATA_DURING_VM__SHIFT 0x9
+#define HUBP1_DCHUBP_CNTL__HUBP_TTU_DISABLE__SHIFT 0xc
+#define HUBP1_DCHUBP_CNTL__HUBP_TTU_MODE__SHIFT 0xd
+#define HUBP1_DCHUBP_CNTL__HUBP_XRQ_NO_OUTSTANDING_REQ__SHIFT 0x10
+#define HUBP1_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS__SHIFT 0x14
+#define HUBP1_DCHUBP_CNTL__HUBP_TIMEOUT_THRESHOLD__SHIFT 0x18
+#define HUBP1_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_CLEAR__SHIFT 0x1a
+#define HUBP1_DCHUBP_CNTL__HUBP_TIMEOUT_INTERRUPT_EN__SHIFT 0x1b
+#define HUBP1_DCHUBP_CNTL__HUBP_UNDERFLOW_STATUS__SHIFT 0x1c
+#define HUBP1_DCHUBP_CNTL__HUBP_UNDERFLOW_CLEAR__SHIFT 0x1f
+#define HUBP1_DCHUBP_CNTL__HUBP_BLANK_EN_MASK 0x00000001L
+#define HUBP1_DCHUBP_CNTL__HUBP_NO_OUTSTANDING_REQ_MASK 0x00000002L
+#define HUBP1_DCHUBP_CNTL__HUBP_DISABLE_MASK 0x00000004L
+#define HUBP1_DCHUBP_CNTL__HUBP_IN_BLANK_MASK 0x00000008L
+#define HUBP1_DCHUBP_CNTL__HUBP_VTG_SEL_MASK 0x000000F0L
+#define HUBP1_DCHUBP_CNTL__HUBP_VREADY_AT_OR_AFTER_VSYNC_MASK 0x00000100L
+#define HUBP1_DCHUBP_CNTL__HUBP_DISABLE_STOP_DATA_DURING_VM_MASK 0x00000200L
+#define HUBP1_DCHUBP_CNTL__HUBP_TTU_DISABLE_MASK 0x00001000L
+#define HUBP1_DCHUBP_CNTL__HUBP_TTU_MODE_MASK 0x0000E000L
+#define HUBP1_DCHUBP_CNTL__HUBP_XRQ_NO_OUTSTANDING_REQ_MASK 0x000F0000L
+#define HUBP1_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_MASK 0x00F00000L
+#define HUBP1_DCHUBP_CNTL__HUBP_TIMEOUT_THRESHOLD_MASK 0x03000000L
+#define HUBP1_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_CLEAR_MASK 0x04000000L
+#define HUBP1_DCHUBP_CNTL__HUBP_TIMEOUT_INTERRUPT_EN_MASK 0x08000000L
+#define HUBP1_DCHUBP_CNTL__HUBP_UNDERFLOW_STATUS_MASK 0x70000000L
+#define HUBP1_DCHUBP_CNTL__HUBP_UNDERFLOW_CLEAR_MASK 0x80000000L
+//HUBP1_HUBP_CLK_CNTL
+#define HUBP1_HUBP_CLK_CNTL__HUBP_CLOCK_ENABLE__SHIFT 0x0
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DISPCLK_R_GATE_DIS__SHIFT 0x4
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DPPCLK_G_GATE_DIS__SHIFT 0x8
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DCFCLK_R_GATE_DIS__SHIFT 0xc
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DCFCLK_G_GATE_DIS__SHIFT 0x10
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DISPCLK_R_CLOCK_ON__SHIFT 0x14
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DPPCLK_G_CLOCK_ON__SHIFT 0x15
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DCFCLK_R_CLOCK_ON__SHIFT 0x16
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DCFCLK_G_CLOCK_ON__SHIFT 0x17
+#define HUBP1_HUBP_CLK_CNTL__HUBP_TEST_CLK_SEL__SHIFT 0x1c
+#define HUBP1_HUBP_CLK_CNTL__HUBP_CLOCK_ENABLE_MASK 0x00000001L
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DISPCLK_R_GATE_DIS_MASK 0x00000010L
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DPPCLK_G_GATE_DIS_MASK 0x00000100L
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DCFCLK_R_GATE_DIS_MASK 0x00001000L
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DCFCLK_G_GATE_DIS_MASK 0x00010000L
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DISPCLK_R_CLOCK_ON_MASK 0x00100000L
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DPPCLK_G_CLOCK_ON_MASK 0x00200000L
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DCFCLK_R_CLOCK_ON_MASK 0x00400000L
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DCFCLK_G_CLOCK_ON_MASK 0x00800000L
+#define HUBP1_HUBP_CLK_CNTL__HUBP_TEST_CLK_SEL_MASK 0xF0000000L
+//HUBP1_DCHUBP_VMPG_CONFIG
+#define HUBP1_DCHUBP_VMPG_CONFIG__VMPG_SIZE__SHIFT 0x0
+#define HUBP1_DCHUBP_VMPG_CONFIG__VMPG_SIZE_MASK 0x00000001L
+//HUBP1_HUBPREQ_DEBUG_DB
+#define HUBP1_HUBPREQ_DEBUG_DB__HUBPREQ_DEBUG__SHIFT 0x0
+#define HUBP1_HUBPREQ_DEBUG_DB__HUBPREQ_DEBUG_MASK 0xFFFFFFFFL
+//HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_EN_DCFCLK__SHIFT 0x0
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_PERIOD_M1_DCFCLK__SHIFT 0x4
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_START_SEL_DCFCLK__SHIFT 0xc
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_STOP_SEL_DCFCLK__SHIFT 0x14
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_MODE_DCFCLK__SHIFT 0x1c
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_EN_DCFCLK_MASK 0x00000001L
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_PERIOD_M1_DCFCLK_MASK 0x00000FF0L
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_START_SEL_DCFCLK_MASK 0x0001F000L
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_STOP_SEL_DCFCLK_MASK 0x01F00000L
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_MODE_DCFCLK_MASK 0x30000000L
+//HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_EN_DPPCLK__SHIFT 0x0
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_SRC_SEL_DPPCLK__SHIFT 0x1
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_PERIOD_M1_DPPCLK__SHIFT 0x4
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_START_SEL_DPPCLK__SHIFT 0xc
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_STOP_SEL_DPPCLK__SHIFT 0x14
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_EN_DPPCLK_MASK 0x00000001L
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_SRC_SEL_DPPCLK_MASK 0x00000002L
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_PERIOD_M1_DPPCLK_MASK 0x00000FF0L
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_START_SEL_DPPCLK_MASK 0x0001F000L
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_STOP_SEL_DPPCLK_MASK 0x01F00000L
+
+
+// addressBlock: dce_dc_dcbubp1_dispdec_hubpreq_dispdec
+//HUBPREQ1_DCSURF_SURFACE_PITCH
+#define HUBPREQ1_DCSURF_SURFACE_PITCH__PITCH__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_PITCH__META_PITCH__SHIFT 0x10
+#define HUBPREQ1_DCSURF_SURFACE_PITCH__PITCH_MASK 0x00003FFFL
+#define HUBPREQ1_DCSURF_SURFACE_PITCH__META_PITCH_MASK 0x3FFF0000L
+//HUBPREQ1_DCSURF_SURFACE_PITCH_C
+#define HUBPREQ1_DCSURF_SURFACE_PITCH_C__PITCH_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_PITCH_C__META_PITCH_C__SHIFT 0x10
+#define HUBPREQ1_DCSURF_SURFACE_PITCH_C__PITCH_C_MASK 0x00003FFFL
+#define HUBPREQ1_DCSURF_SURFACE_PITCH_C__META_PITCH_C_MASK 0x3FFF0000L
+//HUBPREQ1_VMID_SETTINGS_0
+#define HUBPREQ1_VMID_SETTINGS_0__VMID__SHIFT 0x0
+#define HUBPREQ1_VMID_SETTINGS_0__VMID_MASK 0x0000000FL
+//HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS
+#define HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS__PRIMARY_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS__PRIMARY_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH
+#define HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH__PRIMARY_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH__PRIMARY_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_C
+#define HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_C__PRIMARY_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_C__PRIMARY_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C__PRIMARY_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C__PRIMARY_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS
+#define HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS__SECONDARY_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS__SECONDARY_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH
+#define HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH__SECONDARY_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH__SECONDARY_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_C
+#define HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_C__SECONDARY_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_C__SECONDARY_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C__SECONDARY_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C__SECONDARY_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS
+#define HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS__PRIMARY_META_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS__PRIMARY_META_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH
+#define HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH__PRIMARY_META_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH__PRIMARY_META_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C
+#define HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C__PRIMARY_META_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C__PRIMARY_META_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C__PRIMARY_META_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C__PRIMARY_META_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS
+#define HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS__SECONDARY_META_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS__SECONDARY_META_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH
+#define HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH__SECONDARY_META_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH__SECONDARY_META_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C
+#define HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C__SECONDARY_META_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C__SECONDARY_META_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SECONDARY_META_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ1_DCSURF_SURFACE_CONTROL
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN__SHIFT 0x1
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK__SHIFT 0x2
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_C__SHIFT 0x4
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_C__SHIFT 0x5
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ__SHIFT 0x8
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN__SHIFT 0x9
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK__SHIFT 0xa
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_C__SHIFT 0xc
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK_C__SHIFT 0xd
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ__SHIFT 0x10
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_C__SHIFT 0x11
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ__SHIFT 0x12
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_C__SHIFT 0x13
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_MASK 0x00000001L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN_MASK 0x00000002L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_MASK 0x00000004L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_C_MASK 0x00000010L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_C_MASK 0x00000020L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_MASK 0x00000100L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN_MASK 0x00000200L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK_MASK 0x00000400L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_C_MASK 0x00001000L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK_C_MASK 0x00002000L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_MASK 0x00010000L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_C_MASK 0x00020000L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_MASK 0x00040000L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_C_MASK 0x00080000L
+//HUBPREQ1_DCSURF_FLIP_CONTROL
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_UPDATE_LOCK__SHIFT 0x0
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_TYPE__SHIFT 0x1
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_VUPDATE_SKIP_NUM__SHIFT 0x4
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING__SHIFT 0x8
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__HUBPREQ_MASTER_UPDATE_LOCK_STATUS__SHIFT 0x9
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_MODE_FOR_STEREOSYNC__SHIFT 0xc
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_IN_STEREOSYNC__SHIFT 0x10
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_DISABLE__SHIFT 0x11
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_POLARITY__SHIFT 0x12
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_DELAY__SHIFT 0x14
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_UPDATE_LOCK_MASK 0x00000001L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_TYPE_MASK 0x00000002L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_VUPDATE_SKIP_NUM_MASK 0x000000F0L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_MASK 0x00000100L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__HUBPREQ_MASTER_UPDATE_LOCK_STATUS_MASK 0x00000200L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_MODE_FOR_STEREOSYNC_MASK 0x00003000L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_IN_STEREOSYNC_MASK 0x00010000L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_DISABLE_MASK 0x00020000L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_POLARITY_MASK 0x00040000L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_DELAY_MASK 0x3FF00000L
+//HUBPREQ1_DCSURF_FLIP_CONTROL2
+#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_FLIP_PENDING_MIN_TIME__SHIFT 0x0
+#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_GSL_ENABLE__SHIFT 0x8
+#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_GSL_MASK__SHIFT 0x9
+#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_TRIPLE_BUFFER_ENABLE__SHIFT 0xa
+#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_INUSE_RAED_NO_LATCH__SHIFT 0xc
+#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_FLIP_PENDING_MIN_TIME_MASK 0x000000FFL
+#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_GSL_ENABLE_MASK 0x00000100L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_GSL_MASK_MASK 0x00000200L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_TRIPLE_BUFFER_ENABLE_MASK 0x00000400L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_INUSE_RAED_NO_LATCH_MASK 0x00001000L
+//HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_MASK__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_TYPE__SHIFT 0x1
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_MASK__SHIFT 0x2
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_TYPE__SHIFT 0x3
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_CLEAR__SHIFT 0x8
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_CLEAR__SHIFT 0x9
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_OCCURRED__SHIFT 0x10
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_STATUS__SHIFT 0x11
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_OCCURRED__SHIFT 0x12
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_STATUS__SHIFT 0x13
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_MASK_MASK 0x00000001L
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_TYPE_MASK 0x00000002L
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_MASK_MASK 0x00000004L
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_TYPE_MASK 0x00000008L
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_CLEAR_MASK 0x00000100L
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_CLEAR_MASK 0x00000200L
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_OCCURRED_MASK 0x00010000L
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_STATUS_MASK 0x00020000L
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_OCCURRED_MASK 0x00040000L
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_STATUS_MASK 0x00080000L
+//HUBPREQ1_DCSURF_SURFACE_INUSE
+#define HUBPREQ1_DCSURF_SURFACE_INUSE__SURFACE_INUSE_ADDRESS__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_INUSE__SURFACE_INUSE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ1_DCSURF_SURFACE_INUSE_HIGH
+#define HUBPREQ1_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ1_DCSURF_SURFACE_INUSE_C
+#define HUBPREQ1_DCSURF_SURFACE_INUSE_C__SURFACE_INUSE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_INUSE_C__SURFACE_INUSE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ1_DCSURF_SURFACE_INUSE_HIGH_C
+#define HUBPREQ1_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE
+#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE__SURFACE_EARLIEST_INUSE_ADDRESS__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE__SURFACE_EARLIEST_INUSE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH
+#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_C
+#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_C__SURFACE_EARLIEST_INUSE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_C__SURFACE_EARLIEST_INUSE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C
+#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ1_DCN_EXPANSION_MODE
+#define HUBPREQ1_DCN_EXPANSION_MODE__DRQ_EXPANSION_MODE__SHIFT 0x0
+#define HUBPREQ1_DCN_EXPANSION_MODE__CRQ_EXPANSION_MODE__SHIFT 0x2
+#define HUBPREQ1_DCN_EXPANSION_MODE__MRQ_EXPANSION_MODE__SHIFT 0x4
+#define HUBPREQ1_DCN_EXPANSION_MODE__PRQ_EXPANSION_MODE__SHIFT 0x6
+#define HUBPREQ1_DCN_EXPANSION_MODE__DRQ_EXPANSION_MODE_MASK 0x00000003L
+#define HUBPREQ1_DCN_EXPANSION_MODE__CRQ_EXPANSION_MODE_MASK 0x0000000CL
+#define HUBPREQ1_DCN_EXPANSION_MODE__MRQ_EXPANSION_MODE_MASK 0x00000030L
+#define HUBPREQ1_DCN_EXPANSION_MODE__PRQ_EXPANSION_MODE_MASK 0x000000C0L
+//HUBPREQ1_DCN_TTU_QOS_WM
+#define HUBPREQ1_DCN_TTU_QOS_WM__QoS_LEVEL_LOW_WM__SHIFT 0x0
+#define HUBPREQ1_DCN_TTU_QOS_WM__QoS_LEVEL_HIGH_WM__SHIFT 0x10
+#define HUBPREQ1_DCN_TTU_QOS_WM__QoS_LEVEL_LOW_WM_MASK 0x00003FFFL
+#define HUBPREQ1_DCN_TTU_QOS_WM__QoS_LEVEL_HIGH_WM_MASK 0x3FFF0000L
+//HUBPREQ1_DCN_GLOBAL_TTU_CNTL
+#define HUBPREQ1_DCN_GLOBAL_TTU_CNTL__MIN_TTU_VBLANK__SHIFT 0x0
+#define HUBPREQ1_DCN_GLOBAL_TTU_CNTL__QoS_LEVEL_FLIP__SHIFT 0x1c
+#define HUBPREQ1_DCN_GLOBAL_TTU_CNTL__MIN_TTU_VBLANK_MASK 0x00FFFFFFL
+#define HUBPREQ1_DCN_GLOBAL_TTU_CNTL__QoS_LEVEL_FLIP_MASK 0xF0000000L
+//HUBPREQ1_DCN_SURF0_TTU_CNTL0
+#define HUBPREQ1_DCN_SURF0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ1_DCN_SURF0_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ1_DCN_SURF0_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ1_DCN_SURF0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ1_DCN_SURF0_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ1_DCN_SURF0_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ1_DCN_SURF0_TTU_CNTL1
+#define HUBPREQ1_DCN_SURF0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ1_DCN_SURF0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ1_DCN_SURF1_TTU_CNTL0
+#define HUBPREQ1_DCN_SURF1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ1_DCN_SURF1_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ1_DCN_SURF1_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ1_DCN_SURF1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ1_DCN_SURF1_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ1_DCN_SURF1_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ1_DCN_SURF1_TTU_CNTL1
+#define HUBPREQ1_DCN_SURF1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ1_DCN_SURF1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ1_DCN_CUR0_TTU_CNTL0
+#define HUBPREQ1_DCN_CUR0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ1_DCN_CUR0_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ1_DCN_CUR0_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ1_DCN_CUR0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ1_DCN_CUR0_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ1_DCN_CUR0_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ1_DCN_CUR0_TTU_CNTL1
+#define HUBPREQ1_DCN_CUR0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ1_DCN_CUR0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ1_DCN_CUR1_TTU_CNTL0
+#define HUBPREQ1_DCN_CUR1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ1_DCN_CUR1_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ1_DCN_CUR1_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ1_DCN_CUR1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ1_DCN_CUR1_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ1_DCN_CUR1_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ1_DCN_CUR1_TTU_CNTL1
+#define HUBPREQ1_DCN_CUR1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ1_DCN_CUR1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ1_DCN_VM_SYSTEM_APERTURE_LOW_ADDR
+#define HUBPREQ1_DCN_VM_SYSTEM_APERTURE_LOW_ADDR__MC_VM_SYSTEM_APERTURE_LOW_ADDR__SHIFT 0x0
+#define HUBPREQ1_DCN_VM_SYSTEM_APERTURE_LOW_ADDR__MC_VM_SYSTEM_APERTURE_LOW_ADDR_MASK 0x3FFFFFFFL
+//HUBPREQ1_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR
+#define HUBPREQ1_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR__MC_VM_SYSTEM_APERTURE_HIGH_ADDR__SHIFT 0x0
+#define HUBPREQ1_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR__MC_VM_SYSTEM_APERTURE_HIGH_ADDR_MASK 0x3FFFFFFFL
+//HUBPREQ1_DCN_VM_MX_L1_TLB_CNTL
+#define HUBPREQ1_DCN_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB__SHIFT 0x0
+#define HUBPREQ1_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE__SHIFT 0x3
+#define HUBPREQ1_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT 0x5
+#define HUBPREQ1_DCN_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL__SHIFT 0x6
+#define HUBPREQ1_DCN_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB_MASK 0x00000001L
+#define HUBPREQ1_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK 0x00000018L
+#define HUBPREQ1_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS_MASK 0x00000020L
+#define HUBPREQ1_DCN_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL_MASK 0x00000040L
+//HUBPREQ1_BLANK_OFFSET_0
+#define HUBPREQ1_BLANK_OFFSET_0__REFCYC_H_BLANK_END__SHIFT 0x0
+#define HUBPREQ1_BLANK_OFFSET_0__DLG_V_BLANK_END__SHIFT 0x10
+#define HUBPREQ1_BLANK_OFFSET_0__REFCYC_H_BLANK_END_MASK 0x00001FFFL
+#define HUBPREQ1_BLANK_OFFSET_0__DLG_V_BLANK_END_MASK 0x7FFF0000L
+//HUBPREQ1_BLANK_OFFSET_1
+#define HUBPREQ1_BLANK_OFFSET_1__MIN_DST_Y_NEXT_START__SHIFT 0x0
+#define HUBPREQ1_BLANK_OFFSET_1__MIN_DST_Y_NEXT_START_MASK 0x0003FFFFL
+//HUBPREQ1_DST_DIMENSIONS
+#define HUBPREQ1_DST_DIMENSIONS__REFCYC_PER_HTOTAL__SHIFT 0x0
+#define HUBPREQ1_DST_DIMENSIONS__REFCYC_PER_HTOTAL_MASK 0x001FFFFFL
+//HUBPREQ1_DST_AFTER_SCALER
+#define HUBPREQ1_DST_AFTER_SCALER__REFCYC_X_AFTER_SCALER__SHIFT 0x0
+#define HUBPREQ1_DST_AFTER_SCALER__DST_Y_AFTER_SCALER__SHIFT 0x10
+#define HUBPREQ1_DST_AFTER_SCALER__REFCYC_X_AFTER_SCALER_MASK 0x00001FFFL
+#define HUBPREQ1_DST_AFTER_SCALER__DST_Y_AFTER_SCALER_MASK 0x00070000L
+//HUBPREQ1_PREFETCH_SETTINGS
+#define HUBPREQ1_PREFETCH_SETTINGS__VRATIO_PREFETCH__SHIFT 0x0
+#define HUBPREQ1_PREFETCH_SETTINGS__DST_Y_PREFETCH__SHIFT 0x18
+#define HUBPREQ1_PREFETCH_SETTINGS__VRATIO_PREFETCH_MASK 0x003FFFFFL
+#define HUBPREQ1_PREFETCH_SETTINGS__DST_Y_PREFETCH_MASK 0xFF000000L
+//HUBPREQ1_PREFETCH_SETTINGS_C
+#define HUBPREQ1_PREFETCH_SETTINGS_C__VRATIO_PREFETCH_C__SHIFT 0x0
+#define HUBPREQ1_PREFETCH_SETTINGS_C__VRATIO_PREFETCH_C_MASK 0x003FFFFFL
+//HUBPREQ1_VBLANK_PARAMETERS_0
+#define HUBPREQ1_VBLANK_PARAMETERS_0__DST_Y_PER_VM_VBLANK__SHIFT 0x0
+#define HUBPREQ1_VBLANK_PARAMETERS_0__DST_Y_PER_ROW_VBLANK__SHIFT 0x8
+#define HUBPREQ1_VBLANK_PARAMETERS_0__DST_Y_PER_VM_VBLANK_MASK 0x0000007FL
+#define HUBPREQ1_VBLANK_PARAMETERS_0__DST_Y_PER_ROW_VBLANK_MASK 0x00003F00L
+//HUBPREQ1_VBLANK_PARAMETERS_1
+#define HUBPREQ1_VBLANK_PARAMETERS_1__REFCYC_PER_PTE_GROUP_VBLANK_L__SHIFT 0x0
+#define HUBPREQ1_VBLANK_PARAMETERS_1__REFCYC_PER_PTE_GROUP_VBLANK_L_MASK 0x007FFFFFL
+//HUBPREQ1_VBLANK_PARAMETERS_2
+#define HUBPREQ1_VBLANK_PARAMETERS_2__REFCYC_PER_PTE_GROUP_VBLANK_C__SHIFT 0x0
+#define HUBPREQ1_VBLANK_PARAMETERS_2__REFCYC_PER_PTE_GROUP_VBLANK_C_MASK 0x007FFFFFL
+//HUBPREQ1_VBLANK_PARAMETERS_3
+#define HUBPREQ1_VBLANK_PARAMETERS_3__REFCYC_PER_META_CHUNK_VBLANK_L__SHIFT 0x0
+#define HUBPREQ1_VBLANK_PARAMETERS_3__REFCYC_PER_META_CHUNK_VBLANK_L_MASK 0x007FFFFFL
+//HUBPREQ1_VBLANK_PARAMETERS_4
+#define HUBPREQ1_VBLANK_PARAMETERS_4__REFCYC_PER_META_CHUNK_VBLANK_C__SHIFT 0x0
+#define HUBPREQ1_VBLANK_PARAMETERS_4__REFCYC_PER_META_CHUNK_VBLANK_C_MASK 0x007FFFFFL
+//HUBPREQ1_FLIP_PARAMETERS_0
+#define HUBPREQ1_FLIP_PARAMETERS_0__DST_Y_PER_VM_FLIP__SHIFT 0x0
+#define HUBPREQ1_FLIP_PARAMETERS_0__DST_Y_PER_ROW_FLIP__SHIFT 0x8
+#define HUBPREQ1_FLIP_PARAMETERS_0__DST_Y_PER_VM_FLIP_MASK 0x0000007FL
+#define HUBPREQ1_FLIP_PARAMETERS_0__DST_Y_PER_ROW_FLIP_MASK 0x00003F00L
+//HUBPREQ1_FLIP_PARAMETERS_1
+#define HUBPREQ1_FLIP_PARAMETERS_1__REFCYC_PER_PTE_GROUP_FLIP_L__SHIFT 0x0
+#define HUBPREQ1_FLIP_PARAMETERS_1__REFCYC_PER_PTE_GROUP_FLIP_L_MASK 0x007FFFFFL
+//HUBPREQ1_FLIP_PARAMETERS_2
+#define HUBPREQ1_FLIP_PARAMETERS_2__REFCYC_PER_META_CHUNK_FLIP_L__SHIFT 0x0
+#define HUBPREQ1_FLIP_PARAMETERS_2__REFCYC_PER_META_CHUNK_FLIP_L_MASK 0x007FFFFFL
+//HUBPREQ1_NOM_PARAMETERS_0
+#define HUBPREQ1_NOM_PARAMETERS_0__DST_Y_PER_PTE_ROW_NOM_L__SHIFT 0x0
+#define HUBPREQ1_NOM_PARAMETERS_0__DST_Y_PER_PTE_ROW_NOM_L_MASK 0x0001FFFFL
+//HUBPREQ1_NOM_PARAMETERS_1
+#define HUBPREQ1_NOM_PARAMETERS_1__REFCYC_PER_PTE_GROUP_NOM_L__SHIFT 0x0
+#define HUBPREQ1_NOM_PARAMETERS_1__REFCYC_PER_PTE_GROUP_NOM_L_MASK 0x007FFFFFL
+//HUBPREQ1_NOM_PARAMETERS_2
+#define HUBPREQ1_NOM_PARAMETERS_2__DST_Y_PER_PTE_ROW_NOM_C__SHIFT 0x0
+#define HUBPREQ1_NOM_PARAMETERS_2__DST_Y_PER_PTE_ROW_NOM_C_MASK 0x0001FFFFL
+//HUBPREQ1_NOM_PARAMETERS_3
+#define HUBPREQ1_NOM_PARAMETERS_3__REFCYC_PER_PTE_GROUP_NOM_C__SHIFT 0x0
+#define HUBPREQ1_NOM_PARAMETERS_3__REFCYC_PER_PTE_GROUP_NOM_C_MASK 0x007FFFFFL
+//HUBPREQ1_NOM_PARAMETERS_4
+#define HUBPREQ1_NOM_PARAMETERS_4__DST_Y_PER_META_ROW_NOM_L__SHIFT 0x0
+#define HUBPREQ1_NOM_PARAMETERS_4__DST_Y_PER_META_ROW_NOM_L_MASK 0x0001FFFFL
+//HUBPREQ1_NOM_PARAMETERS_5
+#define HUBPREQ1_NOM_PARAMETERS_5__REFCYC_PER_META_CHUNK_NOM_L__SHIFT 0x0
+#define HUBPREQ1_NOM_PARAMETERS_5__REFCYC_PER_META_CHUNK_NOM_L_MASK 0x007FFFFFL
+//HUBPREQ1_NOM_PARAMETERS_6
+#define HUBPREQ1_NOM_PARAMETERS_6__DST_Y_PER_META_ROW_NOM_C__SHIFT 0x0
+#define HUBPREQ1_NOM_PARAMETERS_6__DST_Y_PER_META_ROW_NOM_C_MASK 0x0001FFFFL
+//HUBPREQ1_NOM_PARAMETERS_7
+#define HUBPREQ1_NOM_PARAMETERS_7__REFCYC_PER_META_CHUNK_NOM_C__SHIFT 0x0
+#define HUBPREQ1_NOM_PARAMETERS_7__REFCYC_PER_META_CHUNK_NOM_C_MASK 0x007FFFFFL
+//HUBPREQ1_PER_LINE_DELIVERY_PRE
+#define HUBPREQ1_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_L__SHIFT 0x0
+#define HUBPREQ1_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_C__SHIFT 0x10
+#define HUBPREQ1_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_L_MASK 0x00001FFFL
+#define HUBPREQ1_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_C_MASK 0x1FFF0000L
+//HUBPREQ1_PER_LINE_DELIVERY
+#define HUBPREQ1_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_L__SHIFT 0x0
+#define HUBPREQ1_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_C__SHIFT 0x10
+#define HUBPREQ1_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_L_MASK 0x00001FFFL
+#define HUBPREQ1_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_C_MASK 0x1FFF0000L
+//HUBPREQ1_CURSOR_SETTINGS
+#define HUBPREQ1_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET__SHIFT 0x0
+#define HUBPREQ1_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST__SHIFT 0x8
+#define HUBPREQ1_CURSOR_SETTINGS__CURSOR1_DST_Y_OFFSET__SHIFT 0x10
+#define HUBPREQ1_CURSOR_SETTINGS__CURSOR1_CHUNK_HDL_ADJUST__SHIFT 0x18
+#define HUBPREQ1_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET_MASK 0x000000FFL
+#define HUBPREQ1_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST_MASK 0x00000300L
+#define HUBPREQ1_CURSOR_SETTINGS__CURSOR1_DST_Y_OFFSET_MASK 0x00FF0000L
+#define HUBPREQ1_CURSOR_SETTINGS__CURSOR1_CHUNK_HDL_ADJUST_MASK 0x03000000L
+//HUBPREQ1_REF_FREQ_TO_PIX_FREQ
+#define HUBPREQ1_REF_FREQ_TO_PIX_FREQ__REF_FREQ_TO_PIX_FREQ__SHIFT 0x0
+#define HUBPREQ1_REF_FREQ_TO_PIX_FREQ__REF_FREQ_TO_PIX_FREQ_MASK 0x001FFFFFL
+//HUBPREQ1_DST_Y_DELTA_DRQ_LIMIT
+#define HUBPREQ1_DST_Y_DELTA_DRQ_LIMIT__DST_Y_DELTA_DRQ_LIMIT__SHIFT 0x0
+#define HUBPREQ1_DST_Y_DELTA_DRQ_LIMIT__DST_Y_DELTA_DRQ_LIMIT_MASK 0x00007FFFL
+//HUBPREQ1_HUBPREQ_MEM_PWR_CTRL
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_FORCE__SHIFT 0x0
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_DIS__SHIFT 0x2
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_FORCE__SHIFT 0x4
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_DIS__SHIFT 0x6
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_FORCE__SHIFT 0x8
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_DIS__SHIFT 0xa
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_FORCE__SHIFT 0xc
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_DIS__SHIFT 0xe
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_FORCE_MASK 0x00000003L
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_DIS_MASK 0x00000004L
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_FORCE_MASK 0x00000030L
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_DIS_MASK 0x00000040L
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_FORCE_MASK 0x00000300L
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_DIS_MASK 0x00000400L
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_FORCE_MASK 0x00003000L
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_DIS_MASK 0x00004000L
+//HUBPREQ1_HUBPREQ_MEM_PWR_STATUS
+#define HUBPREQ1_HUBPREQ_MEM_PWR_STATUS__REQ_DPTE_MEM_PWR_STATE__SHIFT 0x0
+#define HUBPREQ1_HUBPREQ_MEM_PWR_STATUS__REQ_MPTE_MEM_PWR_STATE__SHIFT 0x2
+#define HUBPREQ1_HUBPREQ_MEM_PWR_STATUS__REQ_META_MEM_PWR_STATE__SHIFT 0x4
+#define HUBPREQ1_HUBPREQ_MEM_PWR_STATUS__REQ_PDE_MEM_PWR_STATE__SHIFT 0x6
+#define HUBPREQ1_HUBPREQ_MEM_PWR_STATUS__REQ_DPTE_MEM_PWR_STATE_MASK 0x00000003L
+#define HUBPREQ1_HUBPREQ_MEM_PWR_STATUS__REQ_MPTE_MEM_PWR_STATE_MASK 0x0000000CL
+#define HUBPREQ1_HUBPREQ_MEM_PWR_STATUS__REQ_META_MEM_PWR_STATE_MASK 0x00000030L
+#define HUBPREQ1_HUBPREQ_MEM_PWR_STATUS__REQ_PDE_MEM_PWR_STATE_MASK 0x000000C0L
+//HUBPREQ1_VBLANK_PARAMETERS_5
+#define HUBPREQ1_VBLANK_PARAMETERS_5__REFCYC_PER_VM_GROUP_VBLANK__SHIFT 0x0
+#define HUBPREQ1_VBLANK_PARAMETERS_5__REFCYC_PER_VM_GROUP_VBLANK_MASK 0x007FFFFFL
+//HUBPREQ1_VBLANK_PARAMETERS_6
+#define HUBPREQ1_VBLANK_PARAMETERS_6__REFCYC_PER_VM_REQ_VBLANK__SHIFT 0x0
+#define HUBPREQ1_VBLANK_PARAMETERS_6__REFCYC_PER_VM_REQ_VBLANK_MASK 0x007FFFFFL
+//HUBPREQ1_FLIP_PARAMETERS_3
+#define HUBPREQ1_FLIP_PARAMETERS_3__REFCYC_PER_VM_GROUP_FLIP__SHIFT 0x0
+#define HUBPREQ1_FLIP_PARAMETERS_3__REFCYC_PER_VM_GROUP_FLIP_MASK 0x007FFFFFL
+//HUBPREQ1_FLIP_PARAMETERS_4
+#define HUBPREQ1_FLIP_PARAMETERS_4__REFCYC_PER_VM_REQ_FLIP__SHIFT 0x0
+#define HUBPREQ1_FLIP_PARAMETERS_4__REFCYC_PER_VM_REQ_FLIP_MASK 0x007FFFFFL
+//HUBPREQ1_FLIP_PARAMETERS_5
+#define HUBPREQ1_FLIP_PARAMETERS_5__REFCYC_PER_PTE_GROUP_FLIP_C__SHIFT 0x0
+#define HUBPREQ1_FLIP_PARAMETERS_5__REFCYC_PER_PTE_GROUP_FLIP_C_MASK 0x007FFFFFL
+//HUBPREQ1_FLIP_PARAMETERS_6
+#define HUBPREQ1_FLIP_PARAMETERS_6__REFCYC_PER_META_CHUNK_FLIP_C__SHIFT 0x0
+#define HUBPREQ1_FLIP_PARAMETERS_6__REFCYC_PER_META_CHUNK_FLIP_C_MASK 0x007FFFFFL
+
+
+// addressBlock: dce_dc_dcbubp1_dispdec_hubpret_dispdec
+//HUBPRET1_HUBPRET_CONTROL
+#define HUBPRET1_HUBPRET_CONTROL__DET_BUF_PLANE1_BASE_ADDRESS__SHIFT 0x0
+#define HUBPRET1_HUBPRET_CONTROL__PACK_3TO2_ELEMENT_DISABLE__SHIFT 0xc
+#define HUBPRET1_HUBPRET_CONTROL__CROSSBAR_SRC_ALPHA__SHIFT 0x10
+#define HUBPRET1_HUBPRET_CONTROL__CROSSBAR_SRC_Y_G__SHIFT 0x12
+#define HUBPRET1_HUBPRET_CONTROL__CROSSBAR_SRC_CB_B__SHIFT 0x14
+#define HUBPRET1_HUBPRET_CONTROL__CROSSBAR_SRC_CR_R__SHIFT 0x16
+#define HUBPRET1_HUBPRET_CONTROL__HUBPRET_CONTROL_SPARE__SHIFT 0x18
+#define HUBPRET1_HUBPRET_CONTROL__DET_BUF_PLANE1_BASE_ADDRESS_MASK 0x00000FFFL
+#define HUBPRET1_HUBPRET_CONTROL__PACK_3TO2_ELEMENT_DISABLE_MASK 0x00001000L
+#define HUBPRET1_HUBPRET_CONTROL__CROSSBAR_SRC_ALPHA_MASK 0x00030000L
+#define HUBPRET1_HUBPRET_CONTROL__CROSSBAR_SRC_Y_G_MASK 0x000C0000L
+#define HUBPRET1_HUBPRET_CONTROL__CROSSBAR_SRC_CB_B_MASK 0x00300000L
+#define HUBPRET1_HUBPRET_CONTROL__CROSSBAR_SRC_CR_R_MASK 0x00C00000L
+#define HUBPRET1_HUBPRET_CONTROL__HUBPRET_CONTROL_SPARE_MASK 0xFF000000L
+//HUBPRET1_HUBPRET_MEM_PWR_CTRL
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_FORCE__SHIFT 0x0
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_DIS__SHIFT 0x2
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_LS_MODE__SHIFT 0x4
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_FORCE__SHIFT 0x8
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_DIS__SHIFT 0xa
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_FORCE__SHIFT 0x10
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_DIS__SHIFT 0x12
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_LS_MODE__SHIFT 0x14
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_FORCE_MASK 0x00000003L
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_DIS_MASK 0x00000004L
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_LS_MODE_MASK 0x00000030L
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_FORCE_MASK 0x00000300L
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_DIS_MASK 0x00000400L
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_FORCE_MASK 0x00030000L
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_DIS_MASK 0x00040000L
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_LS_MODE_MASK 0x00300000L
+//HUBPRET1_HUBPRET_MEM_PWR_STATUS
+#define HUBPRET1_HUBPRET_MEM_PWR_STATUS__DET_MEM_PWR_STATE__SHIFT 0x0
+#define HUBPRET1_HUBPRET_MEM_PWR_STATUS__DMROB_MEM_PWR_STATE__SHIFT 0x2
+#define HUBPRET1_HUBPRET_MEM_PWR_STATUS__PIXCDC_MEM_PWR_STATE__SHIFT 0x4
+#define HUBPRET1_HUBPRET_MEM_PWR_STATUS__DET_MEM_PWR_STATE_MASK 0x00000003L
+#define HUBPRET1_HUBPRET_MEM_PWR_STATUS__DMROB_MEM_PWR_STATE_MASK 0x0000000CL
+#define HUBPRET1_HUBPRET_MEM_PWR_STATUS__PIXCDC_MEM_PWR_STATE_MASK 0x00000030L
+//HUBPRET1_HUBPRET_READ_LINE_CTRL0
+#define HUBPRET1_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_INTERVAL_IN_NONACTIVE__SHIFT 0x0
+#define HUBPRET1_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_VBLANK_MAXIMUM__SHIFT 0x10
+#define HUBPRET1_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_INTERVAL_IN_NONACTIVE_MASK 0x0000FFFFL
+#define HUBPRET1_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_VBLANK_MAXIMUM_MASK 0x3FFF0000L
+//HUBPRET1_HUBPRET_READ_LINE_CTRL1
+#define HUBPRET1_HUBPRET_READ_LINE_CTRL1__PIPE_READ_LINE_REPORTED_WHEN_REQ_DISABLED__SHIFT 0x0
+#define HUBPRET1_HUBPRET_READ_LINE_CTRL1__HUBPRET_READ_LINE_CTRL1_SPARE__SHIFT 0x10
+#define HUBPRET1_HUBPRET_READ_LINE_CTRL1__PIPE_READ_LINE_REPORTED_WHEN_REQ_DISABLED_MASK 0x00003FFFL
+#define HUBPRET1_HUBPRET_READ_LINE_CTRL1__HUBPRET_READ_LINE_CTRL1_SPARE_MASK 0xFFFF0000L
+//HUBPRET1_HUBPRET_READ_LINE0
+#define HUBPRET1_HUBPRET_READ_LINE0__PIPE_READ_LINE0_START__SHIFT 0x0
+#define HUBPRET1_HUBPRET_READ_LINE0__PIPE_READ_LINE0_END__SHIFT 0x10
+#define HUBPRET1_HUBPRET_READ_LINE0__PIPE_READ_LINE0_START_MASK 0x00003FFFL
+#define HUBPRET1_HUBPRET_READ_LINE0__PIPE_READ_LINE0_END_MASK 0x3FFF0000L
+//HUBPRET1_HUBPRET_READ_LINE1
+#define HUBPRET1_HUBPRET_READ_LINE1__PIPE_READ_LINE1_START__SHIFT 0x0
+#define HUBPRET1_HUBPRET_READ_LINE1__PIPE_READ_LINE1_END__SHIFT 0x10
+#define HUBPRET1_HUBPRET_READ_LINE1__PIPE_READ_LINE1_START_MASK 0x00003FFFL
+#define HUBPRET1_HUBPRET_READ_LINE1__PIPE_READ_LINE1_END_MASK 0x3FFF0000L
+//HUBPRET1_HUBPRET_INTERRUPT
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_MASK__SHIFT 0x0
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_MASK__SHIFT 0x1
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_MASK__SHIFT 0x2
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_TYPE__SHIFT 0x4
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_TYPE__SHIFT 0x5
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_TYPE__SHIFT 0x6
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_CLEAR__SHIFT 0x8
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_CLEAR__SHIFT 0x9
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_CLEAR__SHIFT 0xa
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_STATUS__SHIFT 0xc
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_STATUS__SHIFT 0xd
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_STATUS__SHIFT 0xe
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_STATUS__SHIFT 0x10
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_STATUS__SHIFT 0x11
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_STATUS__SHIFT 0x12
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_MASK_MASK 0x00000001L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_MASK_MASK 0x00000002L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_MASK_MASK 0x00000004L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_TYPE_MASK 0x00000010L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_TYPE_MASK 0x00000020L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_TYPE_MASK 0x00000040L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_CLEAR_MASK 0x00000100L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_CLEAR_MASK 0x00000200L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_CLEAR_MASK 0x00000400L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_STATUS_MASK 0x00001000L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_STATUS_MASK 0x00002000L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_STATUS_MASK 0x00004000L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_STATUS_MASK 0x00010000L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_STATUS_MASK 0x00020000L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_STATUS_MASK 0x00040000L
+//HUBPRET1_HUBPRET_READ_LINE_VALUE
+#define HUBPRET1_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE__SHIFT 0x0
+#define HUBPRET1_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_SNAPSHOT__SHIFT 0x10
+#define HUBPRET1_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_MASK 0x00003FFFL
+#define HUBPRET1_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_SNAPSHOT_MASK 0x3FFF0000L
+//HUBPRET1_HUBPRET_READ_LINE_STATUS
+#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_VBLANK__SHIFT 0x0
+#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_INSIDE__SHIFT 0x4
+#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_OUTSIDE__SHIFT 0x5
+#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_INSIDE__SHIFT 0x8
+#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_OUTSIDE__SHIFT 0xa
+#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_VBLANK_MASK 0x00000001L
+#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_INSIDE_MASK 0x00000010L
+#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_OUTSIDE_MASK 0x00000020L
+#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_INSIDE_MASK 0x00000100L
+#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_OUTSIDE_MASK 0x00000400L
+
+
+// addressBlock: dce_dc_dcbubp1_dispdec_cursor0_dispdec
+//CURSOR0_1_CURSOR_CONTROL
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_ENABLE__SHIFT 0x0
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT 0x4
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_MODE__SHIFT 0x8
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_TMZ__SHIFT 0xc
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_SNOOP__SHIFT 0xd
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_SYSTEM__SHIFT 0xe
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_PITCH__SHIFT 0x10
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_XY_POSITION_ROTATION_AND_MIRRORING_BYPASS__SHIFT 0x14
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK__SHIFT 0x18
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_EN__SHIFT 0x1e
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_SEL__SHIFT 0x1f
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_ENABLE_MASK 0x00000001L
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_2X_MAGNIFY_MASK 0x00000010L
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_MODE_MASK 0x00000700L
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_TMZ_MASK 0x00001000L
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_SNOOP_MASK 0x00002000L
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_SYSTEM_MASK 0x00004000L
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_PITCH_MASK 0x00030000L
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_XY_POSITION_ROTATION_AND_MIRRORING_BYPASS_MASK 0x00100000L
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK_MASK 0x1F000000L
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_EN_MASK 0x40000000L
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_SEL_MASK 0x80000000L
+//CURSOR0_1_CURSOR_SURFACE_ADDRESS
+#define CURSOR0_1_CURSOR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS__SHIFT 0x0
+#define CURSOR0_1_CURSOR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//CURSOR0_1_CURSOR_SURFACE_ADDRESS_HIGH
+#define CURSOR0_1_CURSOR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define CURSOR0_1_CURSOR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//CURSOR0_1_CURSOR_SIZE
+#define CURSOR0_1_CURSOR_SIZE__CURSOR_HEIGHT__SHIFT 0x0
+#define CURSOR0_1_CURSOR_SIZE__CURSOR_WIDTH__SHIFT 0x10
+#define CURSOR0_1_CURSOR_SIZE__CURSOR_HEIGHT_MASK 0x000001FFL
+#define CURSOR0_1_CURSOR_SIZE__CURSOR_WIDTH_MASK 0x01FF0000L
+//CURSOR0_1_CURSOR_POSITION
+#define CURSOR0_1_CURSOR_POSITION__CURSOR_Y_POSITION__SHIFT 0x0
+#define CURSOR0_1_CURSOR_POSITION__CURSOR_X_POSITION__SHIFT 0x10
+#define CURSOR0_1_CURSOR_POSITION__CURSOR_Y_POSITION_MASK 0x00003FFFL
+#define CURSOR0_1_CURSOR_POSITION__CURSOR_X_POSITION_MASK 0x3FFF0000L
+//CURSOR0_1_CURSOR_HOT_SPOT
+#define CURSOR0_1_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y__SHIFT 0x0
+#define CURSOR0_1_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X__SHIFT 0x10
+#define CURSOR0_1_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y_MASK 0x000000FFL
+#define CURSOR0_1_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X_MASK 0x00FF0000L
+//CURSOR0_1_CURSOR_STEREO_CONTROL
+#define CURSOR0_1_CURSOR_STEREO_CONTROL__CURSOR_STEREO_EN__SHIFT 0x0
+#define CURSOR0_1_CURSOR_STEREO_CONTROL__CURSOR_PRIMARY_OFFSET__SHIFT 0x4
+#define CURSOR0_1_CURSOR_STEREO_CONTROL__CURSOR_SECONDARY_OFFSET__SHIFT 0x12
+#define CURSOR0_1_CURSOR_STEREO_CONTROL__CURSOR_STEREO_EN_MASK 0x00000001L
+#define CURSOR0_1_CURSOR_STEREO_CONTROL__CURSOR_PRIMARY_OFFSET_MASK 0x0003FFF0L
+#define CURSOR0_1_CURSOR_STEREO_CONTROL__CURSOR_SECONDARY_OFFSET_MASK 0xFFFC0000L
+//CURSOR0_1_CURSOR_DST_OFFSET
+#define CURSOR0_1_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET__SHIFT 0x0
+#define CURSOR0_1_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET_MASK 0x00001FFFL
+//CURSOR0_1_CURSOR_MEM_PWR_CTRL
+#define CURSOR0_1_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_FORCE__SHIFT 0x0
+#define CURSOR0_1_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_DIS__SHIFT 0x2
+#define CURSOR0_1_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_LS_MODE__SHIFT 0x4
+#define CURSOR0_1_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_FORCE_MASK 0x00000003L
+#define CURSOR0_1_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_DIS_MASK 0x00000004L
+#define CURSOR0_1_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_LS_MODE_MASK 0x00000030L
+//CURSOR0_1_CURSOR_MEM_PWR_STATUS
+#define CURSOR0_1_CURSOR_MEM_PWR_STATUS__CROB_MEM_PWR_STATE__SHIFT 0x0
+#define CURSOR0_1_CURSOR_MEM_PWR_STATUS__CROB_MEM_PWR_STATE_MASK 0x00000003L
+//CURSOR0_1_DMDATA_ADDRESS_HIGH
+#define CURSOR0_1_DMDATA_ADDRESS_HIGH__DMDATA_ADDRESS_HIGH__SHIFT 0x0
+#define CURSOR0_1_DMDATA_ADDRESS_HIGH__DMDATA_SYSTEM__SHIFT 0x1c
+#define CURSOR0_1_DMDATA_ADDRESS_HIGH__DMDATA_SNOOP__SHIFT 0x1d
+#define CURSOR0_1_DMDATA_ADDRESS_HIGH__DMDATA_TMZ__SHIFT 0x1e
+#define CURSOR0_1_DMDATA_ADDRESS_HIGH__DMDATA_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define CURSOR0_1_DMDATA_ADDRESS_HIGH__DMDATA_SYSTEM_MASK 0x10000000L
+#define CURSOR0_1_DMDATA_ADDRESS_HIGH__DMDATA_SNOOP_MASK 0x20000000L
+#define CURSOR0_1_DMDATA_ADDRESS_HIGH__DMDATA_TMZ_MASK 0x40000000L
+//CURSOR0_1_DMDATA_ADDRESS_LOW
+#define CURSOR0_1_DMDATA_ADDRESS_LOW__DMDATA_ADDRESS_LOW__SHIFT 0x0
+#define CURSOR0_1_DMDATA_ADDRESS_LOW__DMDATA_ADDRESS_LOW_MASK 0xFFFFFFFFL
+//CURSOR0_1_DMDATA_CNTL
+#define CURSOR0_1_DMDATA_CNTL__DMDATA_UPDATED__SHIFT 0x0
+#define CURSOR0_1_DMDATA_CNTL__DMDATA_REPEAT__SHIFT 0x1
+#define CURSOR0_1_DMDATA_CNTL__DMDATA_MODE__SHIFT 0x2
+#define CURSOR0_1_DMDATA_CNTL__DMDATA_SIZE__SHIFT 0x10
+#define CURSOR0_1_DMDATA_CNTL__DMDATA_UPDATED_MASK 0x00000001L
+#define CURSOR0_1_DMDATA_CNTL__DMDATA_REPEAT_MASK 0x00000002L
+#define CURSOR0_1_DMDATA_CNTL__DMDATA_MODE_MASK 0x00000004L
+#define CURSOR0_1_DMDATA_CNTL__DMDATA_SIZE_MASK 0x0FFF0000L
+//CURSOR0_1_DMDATA_QOS_CNTL
+#define CURSOR0_1_DMDATA_QOS_CNTL__DMDATA_QOS_MODE__SHIFT 0x0
+#define CURSOR0_1_DMDATA_QOS_CNTL__DMDATA_QOS_LEVEL__SHIFT 0x4
+#define CURSOR0_1_DMDATA_QOS_CNTL__DMDATA_DL_DELTA__SHIFT 0x10
+#define CURSOR0_1_DMDATA_QOS_CNTL__DMDATA_QOS_MODE_MASK 0x00000001L
+#define CURSOR0_1_DMDATA_QOS_CNTL__DMDATA_QOS_LEVEL_MASK 0x000000F0L
+#define CURSOR0_1_DMDATA_QOS_CNTL__DMDATA_DL_DELTA_MASK 0xFFFF0000L
+//CURSOR0_1_DMDATA_STATUS
+#define CURSOR0_1_DMDATA_STATUS__DMDATA_DONE__SHIFT 0x0
+#define CURSOR0_1_DMDATA_STATUS__DMDATA_UNDERFLOW__SHIFT 0x2
+#define CURSOR0_1_DMDATA_STATUS__DMDATA_UNDERFLOW_CLEAR__SHIFT 0x4
+#define CURSOR0_1_DMDATA_STATUS__DMDATA_DONE_MASK 0x00000001L
+#define CURSOR0_1_DMDATA_STATUS__DMDATA_UNDERFLOW_MASK 0x00000004L
+#define CURSOR0_1_DMDATA_STATUS__DMDATA_UNDERFLOW_CLEAR_MASK 0x00000010L
+//CURSOR0_1_DMDATA_SW_CNTL
+#define CURSOR0_1_DMDATA_SW_CNTL__DMDATA_SW_UPDATED__SHIFT 0x0
+#define CURSOR0_1_DMDATA_SW_CNTL__DMDATA_SW_REPEAT__SHIFT 0x1
+#define CURSOR0_1_DMDATA_SW_CNTL__DMDATA_SW_SIZE__SHIFT 0x10
+#define CURSOR0_1_DMDATA_SW_CNTL__DMDATA_SW_UPDATED_MASK 0x00000001L
+#define CURSOR0_1_DMDATA_SW_CNTL__DMDATA_SW_REPEAT_MASK 0x00000002L
+#define CURSOR0_1_DMDATA_SW_CNTL__DMDATA_SW_SIZE_MASK 0x0FFF0000L
+//CURSOR0_1_DMDATA_SW_DATA
+#define CURSOR0_1_DMDATA_SW_DATA__DMDATA_SW_DATA__SHIFT 0x0
+#define CURSOR0_1_DMDATA_SW_DATA__DMDATA_SW_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dcbubp1_dispdec_hubp_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON8_PERFCOUNTER_CNTL
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON8_PERFCOUNTER_CNTL2
+#define DC_PERFMON8_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON8_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON8_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON8_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON8_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON8_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON8_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON8_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON8_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON8_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON8_PERFCOUNTER_STATE
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON8_PERFMON_CNTL
+#define DC_PERFMON8_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON8_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON8_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON8_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON8_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON8_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON8_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON8_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON8_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON8_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON8_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON8_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON8_PERFMON_CNTL2
+#define DC_PERFMON8_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON8_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON8_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON8_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON8_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON8_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON8_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON8_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON8_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON8_PERFMON_CVALUE_LOW
+#define DC_PERFMON8_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON8_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON8_PERFMON_HI
+#define DC_PERFMON8_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON8_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON8_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON8_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON8_PERFMON_LOW
+#define DC_PERFMON8_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON8_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dcbubp2_dispdec_hubp_dispdec
+//HUBP2_DCSURF_SURFACE_CONFIG
+#define HUBP2_DCSURF_SURFACE_CONFIG__SURFACE_PIXEL_FORMAT__SHIFT 0x0
+#define HUBP2_DCSURF_SURFACE_CONFIG__ROTATION_ANGLE__SHIFT 0x8
+#define HUBP2_DCSURF_SURFACE_CONFIG__H_MIRROR_EN__SHIFT 0xa
+#define HUBP2_DCSURF_SURFACE_CONFIG__SURFACE_PIXEL_FORMAT_MASK 0x0000007FL
+#define HUBP2_DCSURF_SURFACE_CONFIG__ROTATION_ANGLE_MASK 0x00000300L
+#define HUBP2_DCSURF_SURFACE_CONFIG__H_MIRROR_EN_MASK 0x00000400L
+//HUBP2_DCSURF_ADDR_CONFIG
+#define HUBP2_DCSURF_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
+#define HUBP2_DCSURF_ADDR_CONFIG__NUM_BANKS__SHIFT 0x3
+#define HUBP2_DCSURF_ADDR_CONFIG__PIPE_INTERLEAVE__SHIFT 0x6
+#define HUBP2_DCSURF_ADDR_CONFIG__NUM_SE__SHIFT 0x8
+#define HUBP2_DCSURF_ADDR_CONFIG__NUM_RB_PER_SE__SHIFT 0xa
+#define HUBP2_DCSURF_ADDR_CONFIG__MAX_COMPRESSED_FRAGS__SHIFT 0xc
+#define HUBP2_DCSURF_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define HUBP2_DCSURF_ADDR_CONFIG__NUM_BANKS_MASK 0x00000038L
+#define HUBP2_DCSURF_ADDR_CONFIG__PIPE_INTERLEAVE_MASK 0x000000C0L
+#define HUBP2_DCSURF_ADDR_CONFIG__NUM_SE_MASK 0x00000300L
+#define HUBP2_DCSURF_ADDR_CONFIG__NUM_RB_PER_SE_MASK 0x00000C00L
+#define HUBP2_DCSURF_ADDR_CONFIG__MAX_COMPRESSED_FRAGS_MASK 0x00003000L
+//HUBP2_DCSURF_TILING_CONFIG
+#define HUBP2_DCSURF_TILING_CONFIG__SW_MODE__SHIFT 0x0
+#define HUBP2_DCSURF_TILING_CONFIG__DIM_TYPE__SHIFT 0x7
+#define HUBP2_DCSURF_TILING_CONFIG__META_LINEAR__SHIFT 0x9
+#define HUBP2_DCSURF_TILING_CONFIG__RB_ALIGNED__SHIFT 0xa
+#define HUBP2_DCSURF_TILING_CONFIG__PIPE_ALIGNED__SHIFT 0xb
+#define HUBP2_DCSURF_TILING_CONFIG__SW_MODE_MASK 0x0000001FL
+#define HUBP2_DCSURF_TILING_CONFIG__DIM_TYPE_MASK 0x00000180L
+#define HUBP2_DCSURF_TILING_CONFIG__META_LINEAR_MASK 0x00000200L
+#define HUBP2_DCSURF_TILING_CONFIG__RB_ALIGNED_MASK 0x00000400L
+#define HUBP2_DCSURF_TILING_CONFIG__PIPE_ALIGNED_MASK 0x00000800L
+//HUBP2_DCSURF_PRI_VIEWPORT_START
+#define HUBP2_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_X_START__SHIFT 0x0
+#define HUBP2_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START__SHIFT 0x10
+#define HUBP2_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_X_START_MASK 0x00003FFFL
+#define HUBP2_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START_MASK 0x3FFF0000L
+//HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION
+#define HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0
+#define HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10
+#define HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL
+#define HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
+//HUBP2_DCSURF_PRI_VIEWPORT_START_C
+#define HUBP2_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_X_START_C__SHIFT 0x0
+#define HUBP2_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_Y_START_C__SHIFT 0x10
+#define HUBP2_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_X_START_C_MASK 0x00003FFFL
+#define HUBP2_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_Y_START_C_MASK 0x3FFF0000L
+//HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION_C
+#define HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_WIDTH_C__SHIFT 0x0
+#define HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_HEIGHT_C__SHIFT 0x10
+#define HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_WIDTH_C_MASK 0x00003FFFL
+#define HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_HEIGHT_C_MASK 0x3FFF0000L
+//HUBP2_DCSURF_SEC_VIEWPORT_START
+#define HUBP2_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_X_START__SHIFT 0x0
+#define HUBP2_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_Y_START__SHIFT 0x10
+#define HUBP2_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_X_START_MASK 0x00003FFFL
+#define HUBP2_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_Y_START_MASK 0x3FFF0000L
+//HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION
+#define HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_WIDTH__SHIFT 0x0
+#define HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_HEIGHT__SHIFT 0x10
+#define HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_WIDTH_MASK 0x00003FFFL
+#define HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
+//HUBP2_DCSURF_SEC_VIEWPORT_START_C
+#define HUBP2_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_X_START_C__SHIFT 0x0
+#define HUBP2_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_Y_START_C__SHIFT 0x10
+#define HUBP2_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_X_START_C_MASK 0x00003FFFL
+#define HUBP2_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_Y_START_C_MASK 0x3FFF0000L
+//HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION_C
+#define HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_WIDTH_C__SHIFT 0x0
+#define HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_HEIGHT_C__SHIFT 0x10
+#define HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_WIDTH_C_MASK 0x00003FFFL
+#define HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_HEIGHT_C_MASK 0x3FFF0000L
+//HUBP2_DCHUBP_REQ_SIZE_CONFIG
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__SWATH_HEIGHT__SHIFT 0x0
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__PTE_ROW_HEIGHT_LINEAR__SHIFT 0x4
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__CHUNK_SIZE__SHIFT 0x8
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__MIN_CHUNK_SIZE__SHIFT 0xb
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__META_CHUNK_SIZE__SHIFT 0x10
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__MIN_META_CHUNK_SIZE__SHIFT 0x12
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__DPTE_GROUP_SIZE__SHIFT 0x14
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__VM_GROUP_SIZE__SHIFT 0x18
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__SWATH_HEIGHT_MASK 0x00000007L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__PTE_ROW_HEIGHT_LINEAR_MASK 0x00000070L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__CHUNK_SIZE_MASK 0x00000700L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__MIN_CHUNK_SIZE_MASK 0x00001800L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__META_CHUNK_SIZE_MASK 0x00030000L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__MIN_META_CHUNK_SIZE_MASK 0x000C0000L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__DPTE_GROUP_SIZE_MASK 0x00700000L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__VM_GROUP_SIZE_MASK 0x07000000L
+//HUBP2_DCHUBP_REQ_SIZE_CONFIG_C
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__SWATH_HEIGHT_C__SHIFT 0x0
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__PTE_ROW_HEIGHT_LINEAR_C__SHIFT 0x4
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__CHUNK_SIZE_C__SHIFT 0x8
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__MIN_CHUNK_SIZE_C__SHIFT 0xb
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__META_CHUNK_SIZE_C__SHIFT 0x10
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__MIN_META_CHUNK_SIZE_C__SHIFT 0x12
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__DPTE_GROUP_SIZE_C__SHIFT 0x14
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__SWATH_HEIGHT_C_MASK 0x00000007L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__PTE_ROW_HEIGHT_LINEAR_C_MASK 0x00000070L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__CHUNK_SIZE_C_MASK 0x00000700L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__MIN_CHUNK_SIZE_C_MASK 0x00001800L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__META_CHUNK_SIZE_C_MASK 0x00030000L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__MIN_META_CHUNK_SIZE_C_MASK 0x000C0000L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__DPTE_GROUP_SIZE_C_MASK 0x00700000L
+//HUBP2_DCHUBP_CNTL
+#define HUBP2_DCHUBP_CNTL__HUBP_BLANK_EN__SHIFT 0x0
+#define HUBP2_DCHUBP_CNTL__HUBP_NO_OUTSTANDING_REQ__SHIFT 0x1
+#define HUBP2_DCHUBP_CNTL__HUBP_DISABLE__SHIFT 0x2
+#define HUBP2_DCHUBP_CNTL__HUBP_IN_BLANK__SHIFT 0x3
+#define HUBP2_DCHUBP_CNTL__HUBP_VTG_SEL__SHIFT 0x4
+#define HUBP2_DCHUBP_CNTL__HUBP_VREADY_AT_OR_AFTER_VSYNC__SHIFT 0x8
+#define HUBP2_DCHUBP_CNTL__HUBP_DISABLE_STOP_DATA_DURING_VM__SHIFT 0x9
+#define HUBP2_DCHUBP_CNTL__HUBP_TTU_DISABLE__SHIFT 0xc
+#define HUBP2_DCHUBP_CNTL__HUBP_TTU_MODE__SHIFT 0xd
+#define HUBP2_DCHUBP_CNTL__HUBP_XRQ_NO_OUTSTANDING_REQ__SHIFT 0x10
+#define HUBP2_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS__SHIFT 0x14
+#define HUBP2_DCHUBP_CNTL__HUBP_TIMEOUT_THRESHOLD__SHIFT 0x18
+#define HUBP2_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_CLEAR__SHIFT 0x1a
+#define HUBP2_DCHUBP_CNTL__HUBP_TIMEOUT_INTERRUPT_EN__SHIFT 0x1b
+#define HUBP2_DCHUBP_CNTL__HUBP_UNDERFLOW_STATUS__SHIFT 0x1c
+#define HUBP2_DCHUBP_CNTL__HUBP_UNDERFLOW_CLEAR__SHIFT 0x1f
+#define HUBP2_DCHUBP_CNTL__HUBP_BLANK_EN_MASK 0x00000001L
+#define HUBP2_DCHUBP_CNTL__HUBP_NO_OUTSTANDING_REQ_MASK 0x00000002L
+#define HUBP2_DCHUBP_CNTL__HUBP_DISABLE_MASK 0x00000004L
+#define HUBP2_DCHUBP_CNTL__HUBP_IN_BLANK_MASK 0x00000008L
+#define HUBP2_DCHUBP_CNTL__HUBP_VTG_SEL_MASK 0x000000F0L
+#define HUBP2_DCHUBP_CNTL__HUBP_VREADY_AT_OR_AFTER_VSYNC_MASK 0x00000100L
+#define HUBP2_DCHUBP_CNTL__HUBP_DISABLE_STOP_DATA_DURING_VM_MASK 0x00000200L
+#define HUBP2_DCHUBP_CNTL__HUBP_TTU_DISABLE_MASK 0x00001000L
+#define HUBP2_DCHUBP_CNTL__HUBP_TTU_MODE_MASK 0x0000E000L
+#define HUBP2_DCHUBP_CNTL__HUBP_XRQ_NO_OUTSTANDING_REQ_MASK 0x000F0000L
+#define HUBP2_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_MASK 0x00F00000L
+#define HUBP2_DCHUBP_CNTL__HUBP_TIMEOUT_THRESHOLD_MASK 0x03000000L
+#define HUBP2_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_CLEAR_MASK 0x04000000L
+#define HUBP2_DCHUBP_CNTL__HUBP_TIMEOUT_INTERRUPT_EN_MASK 0x08000000L
+#define HUBP2_DCHUBP_CNTL__HUBP_UNDERFLOW_STATUS_MASK 0x70000000L
+#define HUBP2_DCHUBP_CNTL__HUBP_UNDERFLOW_CLEAR_MASK 0x80000000L
+//HUBP2_HUBP_CLK_CNTL
+#define HUBP2_HUBP_CLK_CNTL__HUBP_CLOCK_ENABLE__SHIFT 0x0
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DISPCLK_R_GATE_DIS__SHIFT 0x4
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DPPCLK_G_GATE_DIS__SHIFT 0x8
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DCFCLK_R_GATE_DIS__SHIFT 0xc
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DCFCLK_G_GATE_DIS__SHIFT 0x10
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DISPCLK_R_CLOCK_ON__SHIFT 0x14
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DPPCLK_G_CLOCK_ON__SHIFT 0x15
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DCFCLK_R_CLOCK_ON__SHIFT 0x16
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DCFCLK_G_CLOCK_ON__SHIFT 0x17
+#define HUBP2_HUBP_CLK_CNTL__HUBP_TEST_CLK_SEL__SHIFT 0x1c
+#define HUBP2_HUBP_CLK_CNTL__HUBP_CLOCK_ENABLE_MASK 0x00000001L
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DISPCLK_R_GATE_DIS_MASK 0x00000010L
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DPPCLK_G_GATE_DIS_MASK 0x00000100L
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DCFCLK_R_GATE_DIS_MASK 0x00001000L
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DCFCLK_G_GATE_DIS_MASK 0x00010000L
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DISPCLK_R_CLOCK_ON_MASK 0x00100000L
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DPPCLK_G_CLOCK_ON_MASK 0x00200000L
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DCFCLK_R_CLOCK_ON_MASK 0x00400000L
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DCFCLK_G_CLOCK_ON_MASK 0x00800000L
+#define HUBP2_HUBP_CLK_CNTL__HUBP_TEST_CLK_SEL_MASK 0xF0000000L
+//HUBP2_DCHUBP_VMPG_CONFIG
+#define HUBP2_DCHUBP_VMPG_CONFIG__VMPG_SIZE__SHIFT 0x0
+#define HUBP2_DCHUBP_VMPG_CONFIG__VMPG_SIZE_MASK 0x00000001L
+//HUBP2_HUBPREQ_DEBUG_DB
+#define HUBP2_HUBPREQ_DEBUG_DB__HUBPREQ_DEBUG__SHIFT 0x0
+#define HUBP2_HUBPREQ_DEBUG_DB__HUBPREQ_DEBUG_MASK 0xFFFFFFFFL
+//HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_EN_DCFCLK__SHIFT 0x0
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_PERIOD_M1_DCFCLK__SHIFT 0x4
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_START_SEL_DCFCLK__SHIFT 0xc
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_STOP_SEL_DCFCLK__SHIFT 0x14
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_MODE_DCFCLK__SHIFT 0x1c
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_EN_DCFCLK_MASK 0x00000001L
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_PERIOD_M1_DCFCLK_MASK 0x00000FF0L
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_START_SEL_DCFCLK_MASK 0x0001F000L
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_STOP_SEL_DCFCLK_MASK 0x01F00000L
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_MODE_DCFCLK_MASK 0x30000000L
+//HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_EN_DPPCLK__SHIFT 0x0
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_SRC_SEL_DPPCLK__SHIFT 0x1
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_PERIOD_M1_DPPCLK__SHIFT 0x4
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_START_SEL_DPPCLK__SHIFT 0xc
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_STOP_SEL_DPPCLK__SHIFT 0x14
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_EN_DPPCLK_MASK 0x00000001L
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_SRC_SEL_DPPCLK_MASK 0x00000002L
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_PERIOD_M1_DPPCLK_MASK 0x00000FF0L
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_START_SEL_DPPCLK_MASK 0x0001F000L
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_STOP_SEL_DPPCLK_MASK 0x01F00000L
+
+
+// addressBlock: dce_dc_dcbubp2_dispdec_hubpreq_dispdec
+//HUBPREQ2_DCSURF_SURFACE_PITCH
+#define HUBPREQ2_DCSURF_SURFACE_PITCH__PITCH__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_PITCH__META_PITCH__SHIFT 0x10
+#define HUBPREQ2_DCSURF_SURFACE_PITCH__PITCH_MASK 0x00003FFFL
+#define HUBPREQ2_DCSURF_SURFACE_PITCH__META_PITCH_MASK 0x3FFF0000L
+//HUBPREQ2_DCSURF_SURFACE_PITCH_C
+#define HUBPREQ2_DCSURF_SURFACE_PITCH_C__PITCH_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_PITCH_C__META_PITCH_C__SHIFT 0x10
+#define HUBPREQ2_DCSURF_SURFACE_PITCH_C__PITCH_C_MASK 0x00003FFFL
+#define HUBPREQ2_DCSURF_SURFACE_PITCH_C__META_PITCH_C_MASK 0x3FFF0000L
+//HUBPREQ2_VMID_SETTINGS_0
+#define HUBPREQ2_VMID_SETTINGS_0__VMID__SHIFT 0x0
+#define HUBPREQ2_VMID_SETTINGS_0__VMID_MASK 0x0000000FL
+//HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS
+#define HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS__PRIMARY_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS__PRIMARY_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH
+#define HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH__PRIMARY_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH__PRIMARY_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_C
+#define HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_C__PRIMARY_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_C__PRIMARY_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C__PRIMARY_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C__PRIMARY_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS
+#define HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS__SECONDARY_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS__SECONDARY_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH
+#define HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH__SECONDARY_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH__SECONDARY_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_C
+#define HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_C__SECONDARY_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_C__SECONDARY_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C__SECONDARY_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C__SECONDARY_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS
+#define HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS__PRIMARY_META_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS__PRIMARY_META_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH
+#define HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH__PRIMARY_META_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH__PRIMARY_META_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C
+#define HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C__PRIMARY_META_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C__PRIMARY_META_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C__PRIMARY_META_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C__PRIMARY_META_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS
+#define HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS__SECONDARY_META_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS__SECONDARY_META_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH
+#define HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH__SECONDARY_META_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH__SECONDARY_META_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C
+#define HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C__SECONDARY_META_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C__SECONDARY_META_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SECONDARY_META_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ2_DCSURF_SURFACE_CONTROL
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN__SHIFT 0x1
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK__SHIFT 0x2
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_C__SHIFT 0x4
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_C__SHIFT 0x5
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ__SHIFT 0x8
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN__SHIFT 0x9
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK__SHIFT 0xa
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_C__SHIFT 0xc
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK_C__SHIFT 0xd
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ__SHIFT 0x10
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_C__SHIFT 0x11
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ__SHIFT 0x12
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_C__SHIFT 0x13
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_MASK 0x00000001L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN_MASK 0x00000002L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_MASK 0x00000004L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_C_MASK 0x00000010L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_C_MASK 0x00000020L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_MASK 0x00000100L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN_MASK 0x00000200L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK_MASK 0x00000400L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_C_MASK 0x00001000L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK_C_MASK 0x00002000L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_MASK 0x00010000L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_C_MASK 0x00020000L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_MASK 0x00040000L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_C_MASK 0x00080000L
+//HUBPREQ2_DCSURF_FLIP_CONTROL
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_UPDATE_LOCK__SHIFT 0x0
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_TYPE__SHIFT 0x1
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_VUPDATE_SKIP_NUM__SHIFT 0x4
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING__SHIFT 0x8
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__HUBPREQ_MASTER_UPDATE_LOCK_STATUS__SHIFT 0x9
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_MODE_FOR_STEREOSYNC__SHIFT 0xc
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_IN_STEREOSYNC__SHIFT 0x10
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_DISABLE__SHIFT 0x11
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_POLARITY__SHIFT 0x12
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_DELAY__SHIFT 0x14
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_UPDATE_LOCK_MASK 0x00000001L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_TYPE_MASK 0x00000002L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_VUPDATE_SKIP_NUM_MASK 0x000000F0L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_MASK 0x00000100L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__HUBPREQ_MASTER_UPDATE_LOCK_STATUS_MASK 0x00000200L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_MODE_FOR_STEREOSYNC_MASK 0x00003000L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_IN_STEREOSYNC_MASK 0x00010000L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_DISABLE_MASK 0x00020000L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_POLARITY_MASK 0x00040000L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_DELAY_MASK 0x3FF00000L
+//HUBPREQ2_DCSURF_FLIP_CONTROL2
+#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_FLIP_PENDING_MIN_TIME__SHIFT 0x0
+#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_GSL_ENABLE__SHIFT 0x8
+#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_GSL_MASK__SHIFT 0x9
+#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_TRIPLE_BUFFER_ENABLE__SHIFT 0xa
+#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_INUSE_RAED_NO_LATCH__SHIFT 0xc
+#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_FLIP_PENDING_MIN_TIME_MASK 0x000000FFL
+#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_GSL_ENABLE_MASK 0x00000100L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_GSL_MASK_MASK 0x00000200L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_TRIPLE_BUFFER_ENABLE_MASK 0x00000400L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_INUSE_RAED_NO_LATCH_MASK 0x00001000L
+//HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_MASK__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_TYPE__SHIFT 0x1
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_MASK__SHIFT 0x2
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_TYPE__SHIFT 0x3
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_CLEAR__SHIFT 0x8
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_CLEAR__SHIFT 0x9
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_OCCURRED__SHIFT 0x10
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_STATUS__SHIFT 0x11
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_OCCURRED__SHIFT 0x12
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_STATUS__SHIFT 0x13
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_MASK_MASK 0x00000001L
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_TYPE_MASK 0x00000002L
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_MASK_MASK 0x00000004L
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_TYPE_MASK 0x00000008L
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_CLEAR_MASK 0x00000100L
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_CLEAR_MASK 0x00000200L
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_OCCURRED_MASK 0x00010000L
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_STATUS_MASK 0x00020000L
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_OCCURRED_MASK 0x00040000L
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_STATUS_MASK 0x00080000L
+//HUBPREQ2_DCSURF_SURFACE_INUSE
+#define HUBPREQ2_DCSURF_SURFACE_INUSE__SURFACE_INUSE_ADDRESS__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_INUSE__SURFACE_INUSE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ2_DCSURF_SURFACE_INUSE_HIGH
+#define HUBPREQ2_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ2_DCSURF_SURFACE_INUSE_C
+#define HUBPREQ2_DCSURF_SURFACE_INUSE_C__SURFACE_INUSE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_INUSE_C__SURFACE_INUSE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ2_DCSURF_SURFACE_INUSE_HIGH_C
+#define HUBPREQ2_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE
+#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE__SURFACE_EARLIEST_INUSE_ADDRESS__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE__SURFACE_EARLIEST_INUSE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH
+#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_C
+#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_C__SURFACE_EARLIEST_INUSE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_C__SURFACE_EARLIEST_INUSE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C
+#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ2_DCN_EXPANSION_MODE
+#define HUBPREQ2_DCN_EXPANSION_MODE__DRQ_EXPANSION_MODE__SHIFT 0x0
+#define HUBPREQ2_DCN_EXPANSION_MODE__CRQ_EXPANSION_MODE__SHIFT 0x2
+#define HUBPREQ2_DCN_EXPANSION_MODE__MRQ_EXPANSION_MODE__SHIFT 0x4
+#define HUBPREQ2_DCN_EXPANSION_MODE__PRQ_EXPANSION_MODE__SHIFT 0x6
+#define HUBPREQ2_DCN_EXPANSION_MODE__DRQ_EXPANSION_MODE_MASK 0x00000003L
+#define HUBPREQ2_DCN_EXPANSION_MODE__CRQ_EXPANSION_MODE_MASK 0x0000000CL
+#define HUBPREQ2_DCN_EXPANSION_MODE__MRQ_EXPANSION_MODE_MASK 0x00000030L
+#define HUBPREQ2_DCN_EXPANSION_MODE__PRQ_EXPANSION_MODE_MASK 0x000000C0L
+//HUBPREQ2_DCN_TTU_QOS_WM
+#define HUBPREQ2_DCN_TTU_QOS_WM__QoS_LEVEL_LOW_WM__SHIFT 0x0
+#define HUBPREQ2_DCN_TTU_QOS_WM__QoS_LEVEL_HIGH_WM__SHIFT 0x10
+#define HUBPREQ2_DCN_TTU_QOS_WM__QoS_LEVEL_LOW_WM_MASK 0x00003FFFL
+#define HUBPREQ2_DCN_TTU_QOS_WM__QoS_LEVEL_HIGH_WM_MASK 0x3FFF0000L
+//HUBPREQ2_DCN_GLOBAL_TTU_CNTL
+#define HUBPREQ2_DCN_GLOBAL_TTU_CNTL__MIN_TTU_VBLANK__SHIFT 0x0
+#define HUBPREQ2_DCN_GLOBAL_TTU_CNTL__QoS_LEVEL_FLIP__SHIFT 0x1c
+#define HUBPREQ2_DCN_GLOBAL_TTU_CNTL__MIN_TTU_VBLANK_MASK 0x00FFFFFFL
+#define HUBPREQ2_DCN_GLOBAL_TTU_CNTL__QoS_LEVEL_FLIP_MASK 0xF0000000L
+//HUBPREQ2_DCN_SURF0_TTU_CNTL0
+#define HUBPREQ2_DCN_SURF0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ2_DCN_SURF0_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ2_DCN_SURF0_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ2_DCN_SURF0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ2_DCN_SURF0_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ2_DCN_SURF0_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ2_DCN_SURF0_TTU_CNTL1
+#define HUBPREQ2_DCN_SURF0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ2_DCN_SURF0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ2_DCN_SURF1_TTU_CNTL0
+#define HUBPREQ2_DCN_SURF1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ2_DCN_SURF1_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ2_DCN_SURF1_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ2_DCN_SURF1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ2_DCN_SURF1_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ2_DCN_SURF1_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ2_DCN_SURF1_TTU_CNTL1
+#define HUBPREQ2_DCN_SURF1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ2_DCN_SURF1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ2_DCN_CUR0_TTU_CNTL0
+#define HUBPREQ2_DCN_CUR0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ2_DCN_CUR0_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ2_DCN_CUR0_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ2_DCN_CUR0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ2_DCN_CUR0_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ2_DCN_CUR0_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ2_DCN_CUR0_TTU_CNTL1
+#define HUBPREQ2_DCN_CUR0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ2_DCN_CUR0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ2_DCN_CUR1_TTU_CNTL0
+#define HUBPREQ2_DCN_CUR1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ2_DCN_CUR1_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ2_DCN_CUR1_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ2_DCN_CUR1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ2_DCN_CUR1_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ2_DCN_CUR1_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ2_DCN_CUR1_TTU_CNTL1
+#define HUBPREQ2_DCN_CUR1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ2_DCN_CUR1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ2_DCN_VM_SYSTEM_APERTURE_LOW_ADDR
+#define HUBPREQ2_DCN_VM_SYSTEM_APERTURE_LOW_ADDR__MC_VM_SYSTEM_APERTURE_LOW_ADDR__SHIFT 0x0
+#define HUBPREQ2_DCN_VM_SYSTEM_APERTURE_LOW_ADDR__MC_VM_SYSTEM_APERTURE_LOW_ADDR_MASK 0x3FFFFFFFL
+//HUBPREQ2_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR
+#define HUBPREQ2_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR__MC_VM_SYSTEM_APERTURE_HIGH_ADDR__SHIFT 0x0
+#define HUBPREQ2_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR__MC_VM_SYSTEM_APERTURE_HIGH_ADDR_MASK 0x3FFFFFFFL
+//HUBPREQ2_DCN_VM_MX_L1_TLB_CNTL
+#define HUBPREQ2_DCN_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB__SHIFT 0x0
+#define HUBPREQ2_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE__SHIFT 0x3
+#define HUBPREQ2_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT 0x5
+#define HUBPREQ2_DCN_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL__SHIFT 0x6
+#define HUBPREQ2_DCN_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB_MASK 0x00000001L
+#define HUBPREQ2_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK 0x00000018L
+#define HUBPREQ2_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS_MASK 0x00000020L
+#define HUBPREQ2_DCN_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL_MASK 0x00000040L
+//HUBPREQ2_BLANK_OFFSET_0
+#define HUBPREQ2_BLANK_OFFSET_0__REFCYC_H_BLANK_END__SHIFT 0x0
+#define HUBPREQ2_BLANK_OFFSET_0__DLG_V_BLANK_END__SHIFT 0x10
+#define HUBPREQ2_BLANK_OFFSET_0__REFCYC_H_BLANK_END_MASK 0x00001FFFL
+#define HUBPREQ2_BLANK_OFFSET_0__DLG_V_BLANK_END_MASK 0x7FFF0000L
+//HUBPREQ2_BLANK_OFFSET_1
+#define HUBPREQ2_BLANK_OFFSET_1__MIN_DST_Y_NEXT_START__SHIFT 0x0
+#define HUBPREQ2_BLANK_OFFSET_1__MIN_DST_Y_NEXT_START_MASK 0x0003FFFFL
+//HUBPREQ2_DST_DIMENSIONS
+#define HUBPREQ2_DST_DIMENSIONS__REFCYC_PER_HTOTAL__SHIFT 0x0
+#define HUBPREQ2_DST_DIMENSIONS__REFCYC_PER_HTOTAL_MASK 0x001FFFFFL
+//HUBPREQ2_DST_AFTER_SCALER
+#define HUBPREQ2_DST_AFTER_SCALER__REFCYC_X_AFTER_SCALER__SHIFT 0x0
+#define HUBPREQ2_DST_AFTER_SCALER__DST_Y_AFTER_SCALER__SHIFT 0x10
+#define HUBPREQ2_DST_AFTER_SCALER__REFCYC_X_AFTER_SCALER_MASK 0x00001FFFL
+#define HUBPREQ2_DST_AFTER_SCALER__DST_Y_AFTER_SCALER_MASK 0x00070000L
+//HUBPREQ2_PREFETCH_SETTINGS
+#define HUBPREQ2_PREFETCH_SETTINGS__VRATIO_PREFETCH__SHIFT 0x0
+#define HUBPREQ2_PREFETCH_SETTINGS__DST_Y_PREFETCH__SHIFT 0x18
+#define HUBPREQ2_PREFETCH_SETTINGS__VRATIO_PREFETCH_MASK 0x003FFFFFL
+#define HUBPREQ2_PREFETCH_SETTINGS__DST_Y_PREFETCH_MASK 0xFF000000L
+//HUBPREQ2_PREFETCH_SETTINGS_C
+#define HUBPREQ2_PREFETCH_SETTINGS_C__VRATIO_PREFETCH_C__SHIFT 0x0
+#define HUBPREQ2_PREFETCH_SETTINGS_C__VRATIO_PREFETCH_C_MASK 0x003FFFFFL
+//HUBPREQ2_VBLANK_PARAMETERS_0
+#define HUBPREQ2_VBLANK_PARAMETERS_0__DST_Y_PER_VM_VBLANK__SHIFT 0x0
+#define HUBPREQ2_VBLANK_PARAMETERS_0__DST_Y_PER_ROW_VBLANK__SHIFT 0x8
+#define HUBPREQ2_VBLANK_PARAMETERS_0__DST_Y_PER_VM_VBLANK_MASK 0x0000007FL
+#define HUBPREQ2_VBLANK_PARAMETERS_0__DST_Y_PER_ROW_VBLANK_MASK 0x00003F00L
+//HUBPREQ2_VBLANK_PARAMETERS_1
+#define HUBPREQ2_VBLANK_PARAMETERS_1__REFCYC_PER_PTE_GROUP_VBLANK_L__SHIFT 0x0
+#define HUBPREQ2_VBLANK_PARAMETERS_1__REFCYC_PER_PTE_GROUP_VBLANK_L_MASK 0x007FFFFFL
+//HUBPREQ2_VBLANK_PARAMETERS_2
+#define HUBPREQ2_VBLANK_PARAMETERS_2__REFCYC_PER_PTE_GROUP_VBLANK_C__SHIFT 0x0
+#define HUBPREQ2_VBLANK_PARAMETERS_2__REFCYC_PER_PTE_GROUP_VBLANK_C_MASK 0x007FFFFFL
+//HUBPREQ2_VBLANK_PARAMETERS_3
+#define HUBPREQ2_VBLANK_PARAMETERS_3__REFCYC_PER_META_CHUNK_VBLANK_L__SHIFT 0x0
+#define HUBPREQ2_VBLANK_PARAMETERS_3__REFCYC_PER_META_CHUNK_VBLANK_L_MASK 0x007FFFFFL
+//HUBPREQ2_VBLANK_PARAMETERS_4
+#define HUBPREQ2_VBLANK_PARAMETERS_4__REFCYC_PER_META_CHUNK_VBLANK_C__SHIFT 0x0
+#define HUBPREQ2_VBLANK_PARAMETERS_4__REFCYC_PER_META_CHUNK_VBLANK_C_MASK 0x007FFFFFL
+//HUBPREQ2_FLIP_PARAMETERS_0
+#define HUBPREQ2_FLIP_PARAMETERS_0__DST_Y_PER_VM_FLIP__SHIFT 0x0
+#define HUBPREQ2_FLIP_PARAMETERS_0__DST_Y_PER_ROW_FLIP__SHIFT 0x8
+#define HUBPREQ2_FLIP_PARAMETERS_0__DST_Y_PER_VM_FLIP_MASK 0x0000007FL
+#define HUBPREQ2_FLIP_PARAMETERS_0__DST_Y_PER_ROW_FLIP_MASK 0x00003F00L
+//HUBPREQ2_FLIP_PARAMETERS_1
+#define HUBPREQ2_FLIP_PARAMETERS_1__REFCYC_PER_PTE_GROUP_FLIP_L__SHIFT 0x0
+#define HUBPREQ2_FLIP_PARAMETERS_1__REFCYC_PER_PTE_GROUP_FLIP_L_MASK 0x007FFFFFL
+//HUBPREQ2_FLIP_PARAMETERS_2
+#define HUBPREQ2_FLIP_PARAMETERS_2__REFCYC_PER_META_CHUNK_FLIP_L__SHIFT 0x0
+#define HUBPREQ2_FLIP_PARAMETERS_2__REFCYC_PER_META_CHUNK_FLIP_L_MASK 0x007FFFFFL
+//HUBPREQ2_NOM_PARAMETERS_0
+#define HUBPREQ2_NOM_PARAMETERS_0__DST_Y_PER_PTE_ROW_NOM_L__SHIFT 0x0
+#define HUBPREQ2_NOM_PARAMETERS_0__DST_Y_PER_PTE_ROW_NOM_L_MASK 0x0001FFFFL
+//HUBPREQ2_NOM_PARAMETERS_1
+#define HUBPREQ2_NOM_PARAMETERS_1__REFCYC_PER_PTE_GROUP_NOM_L__SHIFT 0x0
+#define HUBPREQ2_NOM_PARAMETERS_1__REFCYC_PER_PTE_GROUP_NOM_L_MASK 0x007FFFFFL
+//HUBPREQ2_NOM_PARAMETERS_2
+#define HUBPREQ2_NOM_PARAMETERS_2__DST_Y_PER_PTE_ROW_NOM_C__SHIFT 0x0
+#define HUBPREQ2_NOM_PARAMETERS_2__DST_Y_PER_PTE_ROW_NOM_C_MASK 0x0001FFFFL
+//HUBPREQ2_NOM_PARAMETERS_3
+#define HUBPREQ2_NOM_PARAMETERS_3__REFCYC_PER_PTE_GROUP_NOM_C__SHIFT 0x0
+#define HUBPREQ2_NOM_PARAMETERS_3__REFCYC_PER_PTE_GROUP_NOM_C_MASK 0x007FFFFFL
+//HUBPREQ2_NOM_PARAMETERS_4
+#define HUBPREQ2_NOM_PARAMETERS_4__DST_Y_PER_META_ROW_NOM_L__SHIFT 0x0
+#define HUBPREQ2_NOM_PARAMETERS_4__DST_Y_PER_META_ROW_NOM_L_MASK 0x0001FFFFL
+//HUBPREQ2_NOM_PARAMETERS_5
+#define HUBPREQ2_NOM_PARAMETERS_5__REFCYC_PER_META_CHUNK_NOM_L__SHIFT 0x0
+#define HUBPREQ2_NOM_PARAMETERS_5__REFCYC_PER_META_CHUNK_NOM_L_MASK 0x007FFFFFL
+//HUBPREQ2_NOM_PARAMETERS_6
+#define HUBPREQ2_NOM_PARAMETERS_6__DST_Y_PER_META_ROW_NOM_C__SHIFT 0x0
+#define HUBPREQ2_NOM_PARAMETERS_6__DST_Y_PER_META_ROW_NOM_C_MASK 0x0001FFFFL
+//HUBPREQ2_NOM_PARAMETERS_7
+#define HUBPREQ2_NOM_PARAMETERS_7__REFCYC_PER_META_CHUNK_NOM_C__SHIFT 0x0
+#define HUBPREQ2_NOM_PARAMETERS_7__REFCYC_PER_META_CHUNK_NOM_C_MASK 0x007FFFFFL
+//HUBPREQ2_PER_LINE_DELIVERY_PRE
+#define HUBPREQ2_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_L__SHIFT 0x0
+#define HUBPREQ2_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_C__SHIFT 0x10
+#define HUBPREQ2_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_L_MASK 0x00001FFFL
+#define HUBPREQ2_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_C_MASK 0x1FFF0000L
+//HUBPREQ2_PER_LINE_DELIVERY
+#define HUBPREQ2_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_L__SHIFT 0x0
+#define HUBPREQ2_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_C__SHIFT 0x10
+#define HUBPREQ2_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_L_MASK 0x00001FFFL
+#define HUBPREQ2_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_C_MASK 0x1FFF0000L
+//HUBPREQ2_CURSOR_SETTINGS
+#define HUBPREQ2_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET__SHIFT 0x0
+#define HUBPREQ2_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST__SHIFT 0x8
+#define HUBPREQ2_CURSOR_SETTINGS__CURSOR1_DST_Y_OFFSET__SHIFT 0x10
+#define HUBPREQ2_CURSOR_SETTINGS__CURSOR1_CHUNK_HDL_ADJUST__SHIFT 0x18
+#define HUBPREQ2_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET_MASK 0x000000FFL
+#define HUBPREQ2_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST_MASK 0x00000300L
+#define HUBPREQ2_CURSOR_SETTINGS__CURSOR1_DST_Y_OFFSET_MASK 0x00FF0000L
+#define HUBPREQ2_CURSOR_SETTINGS__CURSOR1_CHUNK_HDL_ADJUST_MASK 0x03000000L
+//HUBPREQ2_REF_FREQ_TO_PIX_FREQ
+#define HUBPREQ2_REF_FREQ_TO_PIX_FREQ__REF_FREQ_TO_PIX_FREQ__SHIFT 0x0
+#define HUBPREQ2_REF_FREQ_TO_PIX_FREQ__REF_FREQ_TO_PIX_FREQ_MASK 0x001FFFFFL
+//HUBPREQ2_DST_Y_DELTA_DRQ_LIMIT
+#define HUBPREQ2_DST_Y_DELTA_DRQ_LIMIT__DST_Y_DELTA_DRQ_LIMIT__SHIFT 0x0
+#define HUBPREQ2_DST_Y_DELTA_DRQ_LIMIT__DST_Y_DELTA_DRQ_LIMIT_MASK 0x00007FFFL
+//HUBPREQ2_HUBPREQ_MEM_PWR_CTRL
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_FORCE__SHIFT 0x0
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_DIS__SHIFT 0x2
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_FORCE__SHIFT 0x4
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_DIS__SHIFT 0x6
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_FORCE__SHIFT 0x8
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_DIS__SHIFT 0xa
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_FORCE__SHIFT 0xc
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_DIS__SHIFT 0xe
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_FORCE_MASK 0x00000003L
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_DIS_MASK 0x00000004L
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_FORCE_MASK 0x00000030L
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_DIS_MASK 0x00000040L
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_FORCE_MASK 0x00000300L
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_DIS_MASK 0x00000400L
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_FORCE_MASK 0x00003000L
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_DIS_MASK 0x00004000L
+//HUBPREQ2_HUBPREQ_MEM_PWR_STATUS
+#define HUBPREQ2_HUBPREQ_MEM_PWR_STATUS__REQ_DPTE_MEM_PWR_STATE__SHIFT 0x0
+#define HUBPREQ2_HUBPREQ_MEM_PWR_STATUS__REQ_MPTE_MEM_PWR_STATE__SHIFT 0x2
+#define HUBPREQ2_HUBPREQ_MEM_PWR_STATUS__REQ_META_MEM_PWR_STATE__SHIFT 0x4
+#define HUBPREQ2_HUBPREQ_MEM_PWR_STATUS__REQ_PDE_MEM_PWR_STATE__SHIFT 0x6
+#define HUBPREQ2_HUBPREQ_MEM_PWR_STATUS__REQ_DPTE_MEM_PWR_STATE_MASK 0x00000003L
+#define HUBPREQ2_HUBPREQ_MEM_PWR_STATUS__REQ_MPTE_MEM_PWR_STATE_MASK 0x0000000CL
+#define HUBPREQ2_HUBPREQ_MEM_PWR_STATUS__REQ_META_MEM_PWR_STATE_MASK 0x00000030L
+#define HUBPREQ2_HUBPREQ_MEM_PWR_STATUS__REQ_PDE_MEM_PWR_STATE_MASK 0x000000C0L
+//HUBPREQ2_VBLANK_PARAMETERS_5
+#define HUBPREQ2_VBLANK_PARAMETERS_5__REFCYC_PER_VM_GROUP_VBLANK__SHIFT 0x0
+#define HUBPREQ2_VBLANK_PARAMETERS_5__REFCYC_PER_VM_GROUP_VBLANK_MASK 0x007FFFFFL
+//HUBPREQ2_VBLANK_PARAMETERS_6
+#define HUBPREQ2_VBLANK_PARAMETERS_6__REFCYC_PER_VM_REQ_VBLANK__SHIFT 0x0
+#define HUBPREQ2_VBLANK_PARAMETERS_6__REFCYC_PER_VM_REQ_VBLANK_MASK 0x007FFFFFL
+//HUBPREQ2_FLIP_PARAMETERS_3
+#define HUBPREQ2_FLIP_PARAMETERS_3__REFCYC_PER_VM_GROUP_FLIP__SHIFT 0x0
+#define HUBPREQ2_FLIP_PARAMETERS_3__REFCYC_PER_VM_GROUP_FLIP_MASK 0x007FFFFFL
+//HUBPREQ2_FLIP_PARAMETERS_4
+#define HUBPREQ2_FLIP_PARAMETERS_4__REFCYC_PER_VM_REQ_FLIP__SHIFT 0x0
+#define HUBPREQ2_FLIP_PARAMETERS_4__REFCYC_PER_VM_REQ_FLIP_MASK 0x007FFFFFL
+//HUBPREQ2_FLIP_PARAMETERS_5
+#define HUBPREQ2_FLIP_PARAMETERS_5__REFCYC_PER_PTE_GROUP_FLIP_C__SHIFT 0x0
+#define HUBPREQ2_FLIP_PARAMETERS_5__REFCYC_PER_PTE_GROUP_FLIP_C_MASK 0x007FFFFFL
+//HUBPREQ2_FLIP_PARAMETERS_6
+#define HUBPREQ2_FLIP_PARAMETERS_6__REFCYC_PER_META_CHUNK_FLIP_C__SHIFT 0x0
+#define HUBPREQ2_FLIP_PARAMETERS_6__REFCYC_PER_META_CHUNK_FLIP_C_MASK 0x007FFFFFL
+
+
+// addressBlock: dce_dc_dcbubp2_dispdec_hubpret_dispdec
+//HUBPRET2_HUBPRET_CONTROL
+#define HUBPRET2_HUBPRET_CONTROL__DET_BUF_PLANE1_BASE_ADDRESS__SHIFT 0x0
+#define HUBPRET2_HUBPRET_CONTROL__PACK_3TO2_ELEMENT_DISABLE__SHIFT 0xc
+#define HUBPRET2_HUBPRET_CONTROL__CROSSBAR_SRC_ALPHA__SHIFT 0x10
+#define HUBPRET2_HUBPRET_CONTROL__CROSSBAR_SRC_Y_G__SHIFT 0x12
+#define HUBPRET2_HUBPRET_CONTROL__CROSSBAR_SRC_CB_B__SHIFT 0x14
+#define HUBPRET2_HUBPRET_CONTROL__CROSSBAR_SRC_CR_R__SHIFT 0x16
+#define HUBPRET2_HUBPRET_CONTROL__HUBPRET_CONTROL_SPARE__SHIFT 0x18
+#define HUBPRET2_HUBPRET_CONTROL__DET_BUF_PLANE1_BASE_ADDRESS_MASK 0x00000FFFL
+#define HUBPRET2_HUBPRET_CONTROL__PACK_3TO2_ELEMENT_DISABLE_MASK 0x00001000L
+#define HUBPRET2_HUBPRET_CONTROL__CROSSBAR_SRC_ALPHA_MASK 0x00030000L
+#define HUBPRET2_HUBPRET_CONTROL__CROSSBAR_SRC_Y_G_MASK 0x000C0000L
+#define HUBPRET2_HUBPRET_CONTROL__CROSSBAR_SRC_CB_B_MASK 0x00300000L
+#define HUBPRET2_HUBPRET_CONTROL__CROSSBAR_SRC_CR_R_MASK 0x00C00000L
+#define HUBPRET2_HUBPRET_CONTROL__HUBPRET_CONTROL_SPARE_MASK 0xFF000000L
+//HUBPRET2_HUBPRET_MEM_PWR_CTRL
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_FORCE__SHIFT 0x0
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_DIS__SHIFT 0x2
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_LS_MODE__SHIFT 0x4
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_FORCE__SHIFT 0x8
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_DIS__SHIFT 0xa
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_FORCE__SHIFT 0x10
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_DIS__SHIFT 0x12
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_LS_MODE__SHIFT 0x14
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_FORCE_MASK 0x00000003L
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_DIS_MASK 0x00000004L
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_LS_MODE_MASK 0x00000030L
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_FORCE_MASK 0x00000300L
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_DIS_MASK 0x00000400L
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_FORCE_MASK 0x00030000L
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_DIS_MASK 0x00040000L
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_LS_MODE_MASK 0x00300000L
+//HUBPRET2_HUBPRET_MEM_PWR_STATUS
+#define HUBPRET2_HUBPRET_MEM_PWR_STATUS__DET_MEM_PWR_STATE__SHIFT 0x0
+#define HUBPRET2_HUBPRET_MEM_PWR_STATUS__DMROB_MEM_PWR_STATE__SHIFT 0x2
+#define HUBPRET2_HUBPRET_MEM_PWR_STATUS__PIXCDC_MEM_PWR_STATE__SHIFT 0x4
+#define HUBPRET2_HUBPRET_MEM_PWR_STATUS__DET_MEM_PWR_STATE_MASK 0x00000003L
+#define HUBPRET2_HUBPRET_MEM_PWR_STATUS__DMROB_MEM_PWR_STATE_MASK 0x0000000CL
+#define HUBPRET2_HUBPRET_MEM_PWR_STATUS__PIXCDC_MEM_PWR_STATE_MASK 0x00000030L
+//HUBPRET2_HUBPRET_READ_LINE_CTRL0
+#define HUBPRET2_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_INTERVAL_IN_NONACTIVE__SHIFT 0x0
+#define HUBPRET2_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_VBLANK_MAXIMUM__SHIFT 0x10
+#define HUBPRET2_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_INTERVAL_IN_NONACTIVE_MASK 0x0000FFFFL
+#define HUBPRET2_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_VBLANK_MAXIMUM_MASK 0x3FFF0000L
+//HUBPRET2_HUBPRET_READ_LINE_CTRL1
+#define HUBPRET2_HUBPRET_READ_LINE_CTRL1__PIPE_READ_LINE_REPORTED_WHEN_REQ_DISABLED__SHIFT 0x0
+#define HUBPRET2_HUBPRET_READ_LINE_CTRL1__HUBPRET_READ_LINE_CTRL1_SPARE__SHIFT 0x10
+#define HUBPRET2_HUBPRET_READ_LINE_CTRL1__PIPE_READ_LINE_REPORTED_WHEN_REQ_DISABLED_MASK 0x00003FFFL
+#define HUBPRET2_HUBPRET_READ_LINE_CTRL1__HUBPRET_READ_LINE_CTRL1_SPARE_MASK 0xFFFF0000L
+//HUBPRET2_HUBPRET_READ_LINE0
+#define HUBPRET2_HUBPRET_READ_LINE0__PIPE_READ_LINE0_START__SHIFT 0x0
+#define HUBPRET2_HUBPRET_READ_LINE0__PIPE_READ_LINE0_END__SHIFT 0x10
+#define HUBPRET2_HUBPRET_READ_LINE0__PIPE_READ_LINE0_START_MASK 0x00003FFFL
+#define HUBPRET2_HUBPRET_READ_LINE0__PIPE_READ_LINE0_END_MASK 0x3FFF0000L
+//HUBPRET2_HUBPRET_READ_LINE1
+#define HUBPRET2_HUBPRET_READ_LINE1__PIPE_READ_LINE1_START__SHIFT 0x0
+#define HUBPRET2_HUBPRET_READ_LINE1__PIPE_READ_LINE1_END__SHIFT 0x10
+#define HUBPRET2_HUBPRET_READ_LINE1__PIPE_READ_LINE1_START_MASK 0x00003FFFL
+#define HUBPRET2_HUBPRET_READ_LINE1__PIPE_READ_LINE1_END_MASK 0x3FFF0000L
+//HUBPRET2_HUBPRET_INTERRUPT
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_MASK__SHIFT 0x0
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_MASK__SHIFT 0x1
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_MASK__SHIFT 0x2
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_TYPE__SHIFT 0x4
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_TYPE__SHIFT 0x5
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_TYPE__SHIFT 0x6
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_CLEAR__SHIFT 0x8
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_CLEAR__SHIFT 0x9
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_CLEAR__SHIFT 0xa
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_STATUS__SHIFT 0xc
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_STATUS__SHIFT 0xd
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_STATUS__SHIFT 0xe
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_STATUS__SHIFT 0x10
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_STATUS__SHIFT 0x11
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_STATUS__SHIFT 0x12
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_MASK_MASK 0x00000001L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_MASK_MASK 0x00000002L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_MASK_MASK 0x00000004L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_TYPE_MASK 0x00000010L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_TYPE_MASK 0x00000020L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_TYPE_MASK 0x00000040L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_CLEAR_MASK 0x00000100L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_CLEAR_MASK 0x00000200L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_CLEAR_MASK 0x00000400L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_STATUS_MASK 0x00001000L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_STATUS_MASK 0x00002000L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_STATUS_MASK 0x00004000L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_STATUS_MASK 0x00010000L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_STATUS_MASK 0x00020000L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_STATUS_MASK 0x00040000L
+//HUBPRET2_HUBPRET_READ_LINE_VALUE
+#define HUBPRET2_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE__SHIFT 0x0
+#define HUBPRET2_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_SNAPSHOT__SHIFT 0x10
+#define HUBPRET2_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_MASK 0x00003FFFL
+#define HUBPRET2_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_SNAPSHOT_MASK 0x3FFF0000L
+//HUBPRET2_HUBPRET_READ_LINE_STATUS
+#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_VBLANK__SHIFT 0x0
+#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_INSIDE__SHIFT 0x4
+#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_OUTSIDE__SHIFT 0x5
+#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_INSIDE__SHIFT 0x8
+#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_OUTSIDE__SHIFT 0xa
+#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_VBLANK_MASK 0x00000001L
+#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_INSIDE_MASK 0x00000010L
+#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_OUTSIDE_MASK 0x00000020L
+#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_INSIDE_MASK 0x00000100L
+#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_OUTSIDE_MASK 0x00000400L
+
+
+// addressBlock: dce_dc_dcbubp2_dispdec_cursor0_dispdec
+//CURSOR0_2_CURSOR_CONTROL
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_ENABLE__SHIFT 0x0
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT 0x4
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_MODE__SHIFT 0x8
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_TMZ__SHIFT 0xc
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_SNOOP__SHIFT 0xd
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_SYSTEM__SHIFT 0xe
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_PITCH__SHIFT 0x10
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_XY_POSITION_ROTATION_AND_MIRRORING_BYPASS__SHIFT 0x14
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK__SHIFT 0x18
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_EN__SHIFT 0x1e
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_SEL__SHIFT 0x1f
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_ENABLE_MASK 0x00000001L
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_2X_MAGNIFY_MASK 0x00000010L
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_MODE_MASK 0x00000700L
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_TMZ_MASK 0x00001000L
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_SNOOP_MASK 0x00002000L
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_SYSTEM_MASK 0x00004000L
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_PITCH_MASK 0x00030000L
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_XY_POSITION_ROTATION_AND_MIRRORING_BYPASS_MASK 0x00100000L
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK_MASK 0x1F000000L
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_EN_MASK 0x40000000L
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_SEL_MASK 0x80000000L
+//CURSOR0_2_CURSOR_SURFACE_ADDRESS
+#define CURSOR0_2_CURSOR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS__SHIFT 0x0
+#define CURSOR0_2_CURSOR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//CURSOR0_2_CURSOR_SURFACE_ADDRESS_HIGH
+#define CURSOR0_2_CURSOR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define CURSOR0_2_CURSOR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//CURSOR0_2_CURSOR_SIZE
+#define CURSOR0_2_CURSOR_SIZE__CURSOR_HEIGHT__SHIFT 0x0
+#define CURSOR0_2_CURSOR_SIZE__CURSOR_WIDTH__SHIFT 0x10
+#define CURSOR0_2_CURSOR_SIZE__CURSOR_HEIGHT_MASK 0x000001FFL
+#define CURSOR0_2_CURSOR_SIZE__CURSOR_WIDTH_MASK 0x01FF0000L
+//CURSOR0_2_CURSOR_POSITION
+#define CURSOR0_2_CURSOR_POSITION__CURSOR_Y_POSITION__SHIFT 0x0
+#define CURSOR0_2_CURSOR_POSITION__CURSOR_X_POSITION__SHIFT 0x10
+#define CURSOR0_2_CURSOR_POSITION__CURSOR_Y_POSITION_MASK 0x00003FFFL
+#define CURSOR0_2_CURSOR_POSITION__CURSOR_X_POSITION_MASK 0x3FFF0000L
+//CURSOR0_2_CURSOR_HOT_SPOT
+#define CURSOR0_2_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y__SHIFT 0x0
+#define CURSOR0_2_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X__SHIFT 0x10
+#define CURSOR0_2_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y_MASK 0x000000FFL
+#define CURSOR0_2_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X_MASK 0x00FF0000L
+//CURSOR0_2_CURSOR_STEREO_CONTROL
+#define CURSOR0_2_CURSOR_STEREO_CONTROL__CURSOR_STEREO_EN__SHIFT 0x0
+#define CURSOR0_2_CURSOR_STEREO_CONTROL__CURSOR_PRIMARY_OFFSET__SHIFT 0x4
+#define CURSOR0_2_CURSOR_STEREO_CONTROL__CURSOR_SECONDARY_OFFSET__SHIFT 0x12
+#define CURSOR0_2_CURSOR_STEREO_CONTROL__CURSOR_STEREO_EN_MASK 0x00000001L
+#define CURSOR0_2_CURSOR_STEREO_CONTROL__CURSOR_PRIMARY_OFFSET_MASK 0x0003FFF0L
+#define CURSOR0_2_CURSOR_STEREO_CONTROL__CURSOR_SECONDARY_OFFSET_MASK 0xFFFC0000L
+//CURSOR0_2_CURSOR_DST_OFFSET
+#define CURSOR0_2_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET__SHIFT 0x0
+#define CURSOR0_2_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET_MASK 0x00001FFFL
+//CURSOR0_2_CURSOR_MEM_PWR_CTRL
+#define CURSOR0_2_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_FORCE__SHIFT 0x0
+#define CURSOR0_2_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_DIS__SHIFT 0x2
+#define CURSOR0_2_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_LS_MODE__SHIFT 0x4
+#define CURSOR0_2_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_FORCE_MASK 0x00000003L
+#define CURSOR0_2_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_DIS_MASK 0x00000004L
+#define CURSOR0_2_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_LS_MODE_MASK 0x00000030L
+//CURSOR0_2_CURSOR_MEM_PWR_STATUS
+#define CURSOR0_2_CURSOR_MEM_PWR_STATUS__CROB_MEM_PWR_STATE__SHIFT 0x0
+#define CURSOR0_2_CURSOR_MEM_PWR_STATUS__CROB_MEM_PWR_STATE_MASK 0x00000003L
+//CURSOR0_2_DMDATA_ADDRESS_HIGH
+#define CURSOR0_2_DMDATA_ADDRESS_HIGH__DMDATA_ADDRESS_HIGH__SHIFT 0x0
+#define CURSOR0_2_DMDATA_ADDRESS_HIGH__DMDATA_SYSTEM__SHIFT 0x1c
+#define CURSOR0_2_DMDATA_ADDRESS_HIGH__DMDATA_SNOOP__SHIFT 0x1d
+#define CURSOR0_2_DMDATA_ADDRESS_HIGH__DMDATA_TMZ__SHIFT 0x1e
+#define CURSOR0_2_DMDATA_ADDRESS_HIGH__DMDATA_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define CURSOR0_2_DMDATA_ADDRESS_HIGH__DMDATA_SYSTEM_MASK 0x10000000L
+#define CURSOR0_2_DMDATA_ADDRESS_HIGH__DMDATA_SNOOP_MASK 0x20000000L
+#define CURSOR0_2_DMDATA_ADDRESS_HIGH__DMDATA_TMZ_MASK 0x40000000L
+//CURSOR0_2_DMDATA_ADDRESS_LOW
+#define CURSOR0_2_DMDATA_ADDRESS_LOW__DMDATA_ADDRESS_LOW__SHIFT 0x0
+#define CURSOR0_2_DMDATA_ADDRESS_LOW__DMDATA_ADDRESS_LOW_MASK 0xFFFFFFFFL
+//CURSOR0_2_DMDATA_CNTL
+#define CURSOR0_2_DMDATA_CNTL__DMDATA_UPDATED__SHIFT 0x0
+#define CURSOR0_2_DMDATA_CNTL__DMDATA_REPEAT__SHIFT 0x1
+#define CURSOR0_2_DMDATA_CNTL__DMDATA_MODE__SHIFT 0x2
+#define CURSOR0_2_DMDATA_CNTL__DMDATA_SIZE__SHIFT 0x10
+#define CURSOR0_2_DMDATA_CNTL__DMDATA_UPDATED_MASK 0x00000001L
+#define CURSOR0_2_DMDATA_CNTL__DMDATA_REPEAT_MASK 0x00000002L
+#define CURSOR0_2_DMDATA_CNTL__DMDATA_MODE_MASK 0x00000004L
+#define CURSOR0_2_DMDATA_CNTL__DMDATA_SIZE_MASK 0x0FFF0000L
+//CURSOR0_2_DMDATA_QOS_CNTL
+#define CURSOR0_2_DMDATA_QOS_CNTL__DMDATA_QOS_MODE__SHIFT 0x0
+#define CURSOR0_2_DMDATA_QOS_CNTL__DMDATA_QOS_LEVEL__SHIFT 0x4
+#define CURSOR0_2_DMDATA_QOS_CNTL__DMDATA_DL_DELTA__SHIFT 0x10
+#define CURSOR0_2_DMDATA_QOS_CNTL__DMDATA_QOS_MODE_MASK 0x00000001L
+#define CURSOR0_2_DMDATA_QOS_CNTL__DMDATA_QOS_LEVEL_MASK 0x000000F0L
+#define CURSOR0_2_DMDATA_QOS_CNTL__DMDATA_DL_DELTA_MASK 0xFFFF0000L
+//CURSOR0_2_DMDATA_STATUS
+#define CURSOR0_2_DMDATA_STATUS__DMDATA_DONE__SHIFT 0x0
+#define CURSOR0_2_DMDATA_STATUS__DMDATA_UNDERFLOW__SHIFT 0x2
+#define CURSOR0_2_DMDATA_STATUS__DMDATA_UNDERFLOW_CLEAR__SHIFT 0x4
+#define CURSOR0_2_DMDATA_STATUS__DMDATA_DONE_MASK 0x00000001L
+#define CURSOR0_2_DMDATA_STATUS__DMDATA_UNDERFLOW_MASK 0x00000004L
+#define CURSOR0_2_DMDATA_STATUS__DMDATA_UNDERFLOW_CLEAR_MASK 0x00000010L
+//CURSOR0_2_DMDATA_SW_CNTL
+#define CURSOR0_2_DMDATA_SW_CNTL__DMDATA_SW_UPDATED__SHIFT 0x0
+#define CURSOR0_2_DMDATA_SW_CNTL__DMDATA_SW_REPEAT__SHIFT 0x1
+#define CURSOR0_2_DMDATA_SW_CNTL__DMDATA_SW_SIZE__SHIFT 0x10
+#define CURSOR0_2_DMDATA_SW_CNTL__DMDATA_SW_UPDATED_MASK 0x00000001L
+#define CURSOR0_2_DMDATA_SW_CNTL__DMDATA_SW_REPEAT_MASK 0x00000002L
+#define CURSOR0_2_DMDATA_SW_CNTL__DMDATA_SW_SIZE_MASK 0x0FFF0000L
+//CURSOR0_2_DMDATA_SW_DATA
+#define CURSOR0_2_DMDATA_SW_DATA__DMDATA_SW_DATA__SHIFT 0x0
+#define CURSOR0_2_DMDATA_SW_DATA__DMDATA_SW_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dcbubp2_dispdec_hubp_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON9_PERFCOUNTER_CNTL
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON9_PERFCOUNTER_CNTL2
+#define DC_PERFMON9_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON9_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON9_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON9_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON9_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON9_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON9_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON9_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON9_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON9_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON9_PERFCOUNTER_STATE
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON9_PERFMON_CNTL
+#define DC_PERFMON9_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON9_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON9_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON9_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON9_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON9_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON9_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON9_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON9_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON9_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON9_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON9_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON9_PERFMON_CNTL2
+#define DC_PERFMON9_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON9_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON9_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON9_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON9_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON9_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON9_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON9_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON9_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON9_PERFMON_CVALUE_LOW
+#define DC_PERFMON9_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON9_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON9_PERFMON_HI
+#define DC_PERFMON9_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON9_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON9_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON9_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON9_PERFMON_LOW
+#define DC_PERFMON9_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON9_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dcbubp3_dispdec_hubp_dispdec
+//HUBP3_DCSURF_SURFACE_CONFIG
+#define HUBP3_DCSURF_SURFACE_CONFIG__SURFACE_PIXEL_FORMAT__SHIFT 0x0
+#define HUBP3_DCSURF_SURFACE_CONFIG__ROTATION_ANGLE__SHIFT 0x8
+#define HUBP3_DCSURF_SURFACE_CONFIG__H_MIRROR_EN__SHIFT 0xa
+#define HUBP3_DCSURF_SURFACE_CONFIG__SURFACE_PIXEL_FORMAT_MASK 0x0000007FL
+#define HUBP3_DCSURF_SURFACE_CONFIG__ROTATION_ANGLE_MASK 0x00000300L
+#define HUBP3_DCSURF_SURFACE_CONFIG__H_MIRROR_EN_MASK 0x00000400L
+//HUBP3_DCSURF_ADDR_CONFIG
+#define HUBP3_DCSURF_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
+#define HUBP3_DCSURF_ADDR_CONFIG__NUM_BANKS__SHIFT 0x3
+#define HUBP3_DCSURF_ADDR_CONFIG__PIPE_INTERLEAVE__SHIFT 0x6
+#define HUBP3_DCSURF_ADDR_CONFIG__NUM_SE__SHIFT 0x8
+#define HUBP3_DCSURF_ADDR_CONFIG__NUM_RB_PER_SE__SHIFT 0xa
+#define HUBP3_DCSURF_ADDR_CONFIG__MAX_COMPRESSED_FRAGS__SHIFT 0xc
+#define HUBP3_DCSURF_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define HUBP3_DCSURF_ADDR_CONFIG__NUM_BANKS_MASK 0x00000038L
+#define HUBP3_DCSURF_ADDR_CONFIG__PIPE_INTERLEAVE_MASK 0x000000C0L
+#define HUBP3_DCSURF_ADDR_CONFIG__NUM_SE_MASK 0x00000300L
+#define HUBP3_DCSURF_ADDR_CONFIG__NUM_RB_PER_SE_MASK 0x00000C00L
+#define HUBP3_DCSURF_ADDR_CONFIG__MAX_COMPRESSED_FRAGS_MASK 0x00003000L
+//HUBP3_DCSURF_TILING_CONFIG
+#define HUBP3_DCSURF_TILING_CONFIG__SW_MODE__SHIFT 0x0
+#define HUBP3_DCSURF_TILING_CONFIG__DIM_TYPE__SHIFT 0x7
+#define HUBP3_DCSURF_TILING_CONFIG__META_LINEAR__SHIFT 0x9
+#define HUBP3_DCSURF_TILING_CONFIG__RB_ALIGNED__SHIFT 0xa
+#define HUBP3_DCSURF_TILING_CONFIG__PIPE_ALIGNED__SHIFT 0xb
+#define HUBP3_DCSURF_TILING_CONFIG__SW_MODE_MASK 0x0000001FL
+#define HUBP3_DCSURF_TILING_CONFIG__DIM_TYPE_MASK 0x00000180L
+#define HUBP3_DCSURF_TILING_CONFIG__META_LINEAR_MASK 0x00000200L
+#define HUBP3_DCSURF_TILING_CONFIG__RB_ALIGNED_MASK 0x00000400L
+#define HUBP3_DCSURF_TILING_CONFIG__PIPE_ALIGNED_MASK 0x00000800L
+//HUBP3_DCSURF_PRI_VIEWPORT_START
+#define HUBP3_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_X_START__SHIFT 0x0
+#define HUBP3_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START__SHIFT 0x10
+#define HUBP3_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_X_START_MASK 0x00003FFFL
+#define HUBP3_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START_MASK 0x3FFF0000L
+//HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION
+#define HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0
+#define HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10
+#define HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL
+#define HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
+//HUBP3_DCSURF_PRI_VIEWPORT_START_C
+#define HUBP3_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_X_START_C__SHIFT 0x0
+#define HUBP3_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_Y_START_C__SHIFT 0x10
+#define HUBP3_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_X_START_C_MASK 0x00003FFFL
+#define HUBP3_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_Y_START_C_MASK 0x3FFF0000L
+//HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION_C
+#define HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_WIDTH_C__SHIFT 0x0
+#define HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_HEIGHT_C__SHIFT 0x10
+#define HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_WIDTH_C_MASK 0x00003FFFL
+#define HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_HEIGHT_C_MASK 0x3FFF0000L
+//HUBP3_DCSURF_SEC_VIEWPORT_START
+#define HUBP3_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_X_START__SHIFT 0x0
+#define HUBP3_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_Y_START__SHIFT 0x10
+#define HUBP3_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_X_START_MASK 0x00003FFFL
+#define HUBP3_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_Y_START_MASK 0x3FFF0000L
+//HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION
+#define HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_WIDTH__SHIFT 0x0
+#define HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_HEIGHT__SHIFT 0x10
+#define HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_WIDTH_MASK 0x00003FFFL
+#define HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
+//HUBP3_DCSURF_SEC_VIEWPORT_START_C
+#define HUBP3_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_X_START_C__SHIFT 0x0
+#define HUBP3_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_Y_START_C__SHIFT 0x10
+#define HUBP3_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_X_START_C_MASK 0x00003FFFL
+#define HUBP3_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_Y_START_C_MASK 0x3FFF0000L
+//HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION_C
+#define HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_WIDTH_C__SHIFT 0x0
+#define HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_HEIGHT_C__SHIFT 0x10
+#define HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_WIDTH_C_MASK 0x00003FFFL
+#define HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_HEIGHT_C_MASK 0x3FFF0000L
+//HUBP3_DCHUBP_REQ_SIZE_CONFIG
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__SWATH_HEIGHT__SHIFT 0x0
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__PTE_ROW_HEIGHT_LINEAR__SHIFT 0x4
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__CHUNK_SIZE__SHIFT 0x8
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__MIN_CHUNK_SIZE__SHIFT 0xb
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__META_CHUNK_SIZE__SHIFT 0x10
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__MIN_META_CHUNK_SIZE__SHIFT 0x12
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__DPTE_GROUP_SIZE__SHIFT 0x14
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__VM_GROUP_SIZE__SHIFT 0x18
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__SWATH_HEIGHT_MASK 0x00000007L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__PTE_ROW_HEIGHT_LINEAR_MASK 0x00000070L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__CHUNK_SIZE_MASK 0x00000700L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__MIN_CHUNK_SIZE_MASK 0x00001800L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__META_CHUNK_SIZE_MASK 0x00030000L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__MIN_META_CHUNK_SIZE_MASK 0x000C0000L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__DPTE_GROUP_SIZE_MASK 0x00700000L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__VM_GROUP_SIZE_MASK 0x07000000L
+//HUBP3_DCHUBP_REQ_SIZE_CONFIG_C
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__SWATH_HEIGHT_C__SHIFT 0x0
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__PTE_ROW_HEIGHT_LINEAR_C__SHIFT 0x4
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__CHUNK_SIZE_C__SHIFT 0x8
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__MIN_CHUNK_SIZE_C__SHIFT 0xb
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__META_CHUNK_SIZE_C__SHIFT 0x10
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__MIN_META_CHUNK_SIZE_C__SHIFT 0x12
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__DPTE_GROUP_SIZE_C__SHIFT 0x14
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__SWATH_HEIGHT_C_MASK 0x00000007L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__PTE_ROW_HEIGHT_LINEAR_C_MASK 0x00000070L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__CHUNK_SIZE_C_MASK 0x00000700L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__MIN_CHUNK_SIZE_C_MASK 0x00001800L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__META_CHUNK_SIZE_C_MASK 0x00030000L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__MIN_META_CHUNK_SIZE_C_MASK 0x000C0000L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__DPTE_GROUP_SIZE_C_MASK 0x00700000L
+//HUBP3_DCHUBP_CNTL
+#define HUBP3_DCHUBP_CNTL__HUBP_BLANK_EN__SHIFT 0x0
+#define HUBP3_DCHUBP_CNTL__HUBP_NO_OUTSTANDING_REQ__SHIFT 0x1
+#define HUBP3_DCHUBP_CNTL__HUBP_DISABLE__SHIFT 0x2
+#define HUBP3_DCHUBP_CNTL__HUBP_IN_BLANK__SHIFT 0x3
+#define HUBP3_DCHUBP_CNTL__HUBP_VTG_SEL__SHIFT 0x4
+#define HUBP3_DCHUBP_CNTL__HUBP_VREADY_AT_OR_AFTER_VSYNC__SHIFT 0x8
+#define HUBP3_DCHUBP_CNTL__HUBP_DISABLE_STOP_DATA_DURING_VM__SHIFT 0x9
+#define HUBP3_DCHUBP_CNTL__HUBP_TTU_DISABLE__SHIFT 0xc
+#define HUBP3_DCHUBP_CNTL__HUBP_TTU_MODE__SHIFT 0xd
+#define HUBP3_DCHUBP_CNTL__HUBP_XRQ_NO_OUTSTANDING_REQ__SHIFT 0x10
+#define HUBP3_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS__SHIFT 0x14
+#define HUBP3_DCHUBP_CNTL__HUBP_TIMEOUT_THRESHOLD__SHIFT 0x18
+#define HUBP3_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_CLEAR__SHIFT 0x1a
+#define HUBP3_DCHUBP_CNTL__HUBP_TIMEOUT_INTERRUPT_EN__SHIFT 0x1b
+#define HUBP3_DCHUBP_CNTL__HUBP_UNDERFLOW_STATUS__SHIFT 0x1c
+#define HUBP3_DCHUBP_CNTL__HUBP_UNDERFLOW_CLEAR__SHIFT 0x1f
+#define HUBP3_DCHUBP_CNTL__HUBP_BLANK_EN_MASK 0x00000001L
+#define HUBP3_DCHUBP_CNTL__HUBP_NO_OUTSTANDING_REQ_MASK 0x00000002L
+#define HUBP3_DCHUBP_CNTL__HUBP_DISABLE_MASK 0x00000004L
+#define HUBP3_DCHUBP_CNTL__HUBP_IN_BLANK_MASK 0x00000008L
+#define HUBP3_DCHUBP_CNTL__HUBP_VTG_SEL_MASK 0x000000F0L
+#define HUBP3_DCHUBP_CNTL__HUBP_VREADY_AT_OR_AFTER_VSYNC_MASK 0x00000100L
+#define HUBP3_DCHUBP_CNTL__HUBP_DISABLE_STOP_DATA_DURING_VM_MASK 0x00000200L
+#define HUBP3_DCHUBP_CNTL__HUBP_TTU_DISABLE_MASK 0x00001000L
+#define HUBP3_DCHUBP_CNTL__HUBP_TTU_MODE_MASK 0x0000E000L
+#define HUBP3_DCHUBP_CNTL__HUBP_XRQ_NO_OUTSTANDING_REQ_MASK 0x000F0000L
+#define HUBP3_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_MASK 0x00F00000L
+#define HUBP3_DCHUBP_CNTL__HUBP_TIMEOUT_THRESHOLD_MASK 0x03000000L
+#define HUBP3_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_CLEAR_MASK 0x04000000L
+#define HUBP3_DCHUBP_CNTL__HUBP_TIMEOUT_INTERRUPT_EN_MASK 0x08000000L
+#define HUBP3_DCHUBP_CNTL__HUBP_UNDERFLOW_STATUS_MASK 0x70000000L
+#define HUBP3_DCHUBP_CNTL__HUBP_UNDERFLOW_CLEAR_MASK 0x80000000L
+//HUBP3_HUBP_CLK_CNTL
+#define HUBP3_HUBP_CLK_CNTL__HUBP_CLOCK_ENABLE__SHIFT 0x0
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DISPCLK_R_GATE_DIS__SHIFT 0x4
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DPPCLK_G_GATE_DIS__SHIFT 0x8
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DCFCLK_R_GATE_DIS__SHIFT 0xc
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DCFCLK_G_GATE_DIS__SHIFT 0x10
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DISPCLK_R_CLOCK_ON__SHIFT 0x14
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DPPCLK_G_CLOCK_ON__SHIFT 0x15
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DCFCLK_R_CLOCK_ON__SHIFT 0x16
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DCFCLK_G_CLOCK_ON__SHIFT 0x17
+#define HUBP3_HUBP_CLK_CNTL__HUBP_TEST_CLK_SEL__SHIFT 0x1c
+#define HUBP3_HUBP_CLK_CNTL__HUBP_CLOCK_ENABLE_MASK 0x00000001L
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DISPCLK_R_GATE_DIS_MASK 0x00000010L
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DPPCLK_G_GATE_DIS_MASK 0x00000100L
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DCFCLK_R_GATE_DIS_MASK 0x00001000L
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DCFCLK_G_GATE_DIS_MASK 0x00010000L
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DISPCLK_R_CLOCK_ON_MASK 0x00100000L
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DPPCLK_G_CLOCK_ON_MASK 0x00200000L
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DCFCLK_R_CLOCK_ON_MASK 0x00400000L
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DCFCLK_G_CLOCK_ON_MASK 0x00800000L
+#define HUBP3_HUBP_CLK_CNTL__HUBP_TEST_CLK_SEL_MASK 0xF0000000L
+//HUBP3_DCHUBP_VMPG_CONFIG
+#define HUBP3_DCHUBP_VMPG_CONFIG__VMPG_SIZE__SHIFT 0x0
+#define HUBP3_DCHUBP_VMPG_CONFIG__VMPG_SIZE_MASK 0x00000001L
+//HUBP3_HUBPREQ_DEBUG_DB
+#define HUBP3_HUBPREQ_DEBUG_DB__HUBPREQ_DEBUG__SHIFT 0x0
+#define HUBP3_HUBPREQ_DEBUG_DB__HUBPREQ_DEBUG_MASK 0xFFFFFFFFL
+//HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_EN_DCFCLK__SHIFT 0x0
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_PERIOD_M1_DCFCLK__SHIFT 0x4
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_START_SEL_DCFCLK__SHIFT 0xc
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_STOP_SEL_DCFCLK__SHIFT 0x14
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_MODE_DCFCLK__SHIFT 0x1c
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_EN_DCFCLK_MASK 0x00000001L
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_PERIOD_M1_DCFCLK_MASK 0x00000FF0L
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_START_SEL_DCFCLK_MASK 0x0001F000L
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_STOP_SEL_DCFCLK_MASK 0x01F00000L
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_MODE_DCFCLK_MASK 0x30000000L
+//HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_EN_DPPCLK__SHIFT 0x0
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_SRC_SEL_DPPCLK__SHIFT 0x1
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_PERIOD_M1_DPPCLK__SHIFT 0x4
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_START_SEL_DPPCLK__SHIFT 0xc
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_STOP_SEL_DPPCLK__SHIFT 0x14
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_EN_DPPCLK_MASK 0x00000001L
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_SRC_SEL_DPPCLK_MASK 0x00000002L
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_PERIOD_M1_DPPCLK_MASK 0x00000FF0L
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_START_SEL_DPPCLK_MASK 0x0001F000L
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_STOP_SEL_DPPCLK_MASK 0x01F00000L
+
+
+// addressBlock: dce_dc_dcbubp3_dispdec_hubpreq_dispdec
+//HUBPREQ3_DCSURF_SURFACE_PITCH
+#define HUBPREQ3_DCSURF_SURFACE_PITCH__PITCH__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_PITCH__META_PITCH__SHIFT 0x10
+#define HUBPREQ3_DCSURF_SURFACE_PITCH__PITCH_MASK 0x00003FFFL
+#define HUBPREQ3_DCSURF_SURFACE_PITCH__META_PITCH_MASK 0x3FFF0000L
+//HUBPREQ3_DCSURF_SURFACE_PITCH_C
+#define HUBPREQ3_DCSURF_SURFACE_PITCH_C__PITCH_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_PITCH_C__META_PITCH_C__SHIFT 0x10
+#define HUBPREQ3_DCSURF_SURFACE_PITCH_C__PITCH_C_MASK 0x00003FFFL
+#define HUBPREQ3_DCSURF_SURFACE_PITCH_C__META_PITCH_C_MASK 0x3FFF0000L
+//HUBPREQ3_VMID_SETTINGS_0
+#define HUBPREQ3_VMID_SETTINGS_0__VMID__SHIFT 0x0
+#define HUBPREQ3_VMID_SETTINGS_0__VMID_MASK 0x0000000FL
+//HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS
+#define HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS__PRIMARY_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS__PRIMARY_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH
+#define HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH__PRIMARY_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH__PRIMARY_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_C
+#define HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_C__PRIMARY_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_C__PRIMARY_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C__PRIMARY_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C__PRIMARY_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS
+#define HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS__SECONDARY_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS__SECONDARY_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH
+#define HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH__SECONDARY_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH__SECONDARY_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_C
+#define HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_C__SECONDARY_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_C__SECONDARY_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C__SECONDARY_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C__SECONDARY_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS
+#define HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS__PRIMARY_META_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS__PRIMARY_META_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH
+#define HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH__PRIMARY_META_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH__PRIMARY_META_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C
+#define HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C__PRIMARY_META_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C__PRIMARY_META_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C__PRIMARY_META_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C__PRIMARY_META_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS
+#define HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS__SECONDARY_META_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS__SECONDARY_META_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH
+#define HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH__SECONDARY_META_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH__SECONDARY_META_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C
+#define HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C__SECONDARY_META_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C__SECONDARY_META_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SECONDARY_META_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ3_DCSURF_SURFACE_CONTROL
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN__SHIFT 0x1
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK__SHIFT 0x2
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_C__SHIFT 0x4
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_C__SHIFT 0x5
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ__SHIFT 0x8
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN__SHIFT 0x9
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK__SHIFT 0xa
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_C__SHIFT 0xc
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK_C__SHIFT 0xd
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ__SHIFT 0x10
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_C__SHIFT 0x11
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ__SHIFT 0x12
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_C__SHIFT 0x13
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_MASK 0x00000001L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN_MASK 0x00000002L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_MASK 0x00000004L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_C_MASK 0x00000010L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_C_MASK 0x00000020L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_MASK 0x00000100L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN_MASK 0x00000200L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK_MASK 0x00000400L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_C_MASK 0x00001000L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK_C_MASK 0x00002000L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_MASK 0x00010000L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_C_MASK 0x00020000L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_MASK 0x00040000L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_C_MASK 0x00080000L
+//HUBPREQ3_DCSURF_FLIP_CONTROL
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_UPDATE_LOCK__SHIFT 0x0
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_TYPE__SHIFT 0x1
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_VUPDATE_SKIP_NUM__SHIFT 0x4
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING__SHIFT 0x8
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__HUBPREQ_MASTER_UPDATE_LOCK_STATUS__SHIFT 0x9
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_MODE_FOR_STEREOSYNC__SHIFT 0xc
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_IN_STEREOSYNC__SHIFT 0x10
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_DISABLE__SHIFT 0x11
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_POLARITY__SHIFT 0x12
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_DELAY__SHIFT 0x14
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_UPDATE_LOCK_MASK 0x00000001L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_TYPE_MASK 0x00000002L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_VUPDATE_SKIP_NUM_MASK 0x000000F0L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_MASK 0x00000100L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__HUBPREQ_MASTER_UPDATE_LOCK_STATUS_MASK 0x00000200L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_MODE_FOR_STEREOSYNC_MASK 0x00003000L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_IN_STEREOSYNC_MASK 0x00010000L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_DISABLE_MASK 0x00020000L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_POLARITY_MASK 0x00040000L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_DELAY_MASK 0x3FF00000L
+//HUBPREQ3_DCSURF_FLIP_CONTROL2
+#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_FLIP_PENDING_MIN_TIME__SHIFT 0x0
+#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_GSL_ENABLE__SHIFT 0x8
+#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_GSL_MASK__SHIFT 0x9
+#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_TRIPLE_BUFFER_ENABLE__SHIFT 0xa
+#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_INUSE_RAED_NO_LATCH__SHIFT 0xc
+#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_FLIP_PENDING_MIN_TIME_MASK 0x000000FFL
+#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_GSL_ENABLE_MASK 0x00000100L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_GSL_MASK_MASK 0x00000200L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_TRIPLE_BUFFER_ENABLE_MASK 0x00000400L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_INUSE_RAED_NO_LATCH_MASK 0x00001000L
+//HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_MASK__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_TYPE__SHIFT 0x1
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_MASK__SHIFT 0x2
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_TYPE__SHIFT 0x3
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_CLEAR__SHIFT 0x8
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_CLEAR__SHIFT 0x9
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_OCCURRED__SHIFT 0x10
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_STATUS__SHIFT 0x11
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_OCCURRED__SHIFT 0x12
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_STATUS__SHIFT 0x13
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_MASK_MASK 0x00000001L
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_TYPE_MASK 0x00000002L
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_MASK_MASK 0x00000004L
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_TYPE_MASK 0x00000008L
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_CLEAR_MASK 0x00000100L
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_CLEAR_MASK 0x00000200L
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_OCCURRED_MASK 0x00010000L
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_STATUS_MASK 0x00020000L
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_OCCURRED_MASK 0x00040000L
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_STATUS_MASK 0x00080000L
+//HUBPREQ3_DCSURF_SURFACE_INUSE
+#define HUBPREQ3_DCSURF_SURFACE_INUSE__SURFACE_INUSE_ADDRESS__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_INUSE__SURFACE_INUSE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ3_DCSURF_SURFACE_INUSE_HIGH
+#define HUBPREQ3_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ3_DCSURF_SURFACE_INUSE_C
+#define HUBPREQ3_DCSURF_SURFACE_INUSE_C__SURFACE_INUSE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_INUSE_C__SURFACE_INUSE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ3_DCSURF_SURFACE_INUSE_HIGH_C
+#define HUBPREQ3_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE
+#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE__SURFACE_EARLIEST_INUSE_ADDRESS__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE__SURFACE_EARLIEST_INUSE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH
+#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_C
+#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_C__SURFACE_EARLIEST_INUSE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_C__SURFACE_EARLIEST_INUSE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C
+#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ3_DCN_EXPANSION_MODE
+#define HUBPREQ3_DCN_EXPANSION_MODE__DRQ_EXPANSION_MODE__SHIFT 0x0
+#define HUBPREQ3_DCN_EXPANSION_MODE__CRQ_EXPANSION_MODE__SHIFT 0x2
+#define HUBPREQ3_DCN_EXPANSION_MODE__MRQ_EXPANSION_MODE__SHIFT 0x4
+#define HUBPREQ3_DCN_EXPANSION_MODE__PRQ_EXPANSION_MODE__SHIFT 0x6
+#define HUBPREQ3_DCN_EXPANSION_MODE__DRQ_EXPANSION_MODE_MASK 0x00000003L
+#define HUBPREQ3_DCN_EXPANSION_MODE__CRQ_EXPANSION_MODE_MASK 0x0000000CL
+#define HUBPREQ3_DCN_EXPANSION_MODE__MRQ_EXPANSION_MODE_MASK 0x00000030L
+#define HUBPREQ3_DCN_EXPANSION_MODE__PRQ_EXPANSION_MODE_MASK 0x000000C0L
+//HUBPREQ3_DCN_TTU_QOS_WM
+#define HUBPREQ3_DCN_TTU_QOS_WM__QoS_LEVEL_LOW_WM__SHIFT 0x0
+#define HUBPREQ3_DCN_TTU_QOS_WM__QoS_LEVEL_HIGH_WM__SHIFT 0x10
+#define HUBPREQ3_DCN_TTU_QOS_WM__QoS_LEVEL_LOW_WM_MASK 0x00003FFFL
+#define HUBPREQ3_DCN_TTU_QOS_WM__QoS_LEVEL_HIGH_WM_MASK 0x3FFF0000L
+//HUBPREQ3_DCN_GLOBAL_TTU_CNTL
+#define HUBPREQ3_DCN_GLOBAL_TTU_CNTL__MIN_TTU_VBLANK__SHIFT 0x0
+#define HUBPREQ3_DCN_GLOBAL_TTU_CNTL__QoS_LEVEL_FLIP__SHIFT 0x1c
+#define HUBPREQ3_DCN_GLOBAL_TTU_CNTL__MIN_TTU_VBLANK_MASK 0x00FFFFFFL
+#define HUBPREQ3_DCN_GLOBAL_TTU_CNTL__QoS_LEVEL_FLIP_MASK 0xF0000000L
+//HUBPREQ3_DCN_SURF0_TTU_CNTL0
+#define HUBPREQ3_DCN_SURF0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ3_DCN_SURF0_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ3_DCN_SURF0_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ3_DCN_SURF0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ3_DCN_SURF0_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ3_DCN_SURF0_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ3_DCN_SURF0_TTU_CNTL1
+#define HUBPREQ3_DCN_SURF0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ3_DCN_SURF0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ3_DCN_SURF1_TTU_CNTL0
+#define HUBPREQ3_DCN_SURF1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ3_DCN_SURF1_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ3_DCN_SURF1_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ3_DCN_SURF1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ3_DCN_SURF1_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ3_DCN_SURF1_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ3_DCN_SURF1_TTU_CNTL1
+#define HUBPREQ3_DCN_SURF1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ3_DCN_SURF1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ3_DCN_CUR0_TTU_CNTL0
+#define HUBPREQ3_DCN_CUR0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ3_DCN_CUR0_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ3_DCN_CUR0_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ3_DCN_CUR0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ3_DCN_CUR0_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ3_DCN_CUR0_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ3_DCN_CUR0_TTU_CNTL1
+#define HUBPREQ3_DCN_CUR0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ3_DCN_CUR0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ3_DCN_CUR1_TTU_CNTL0
+#define HUBPREQ3_DCN_CUR1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ3_DCN_CUR1_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ3_DCN_CUR1_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ3_DCN_CUR1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ3_DCN_CUR1_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ3_DCN_CUR1_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ3_DCN_CUR1_TTU_CNTL1
+#define HUBPREQ3_DCN_CUR1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ3_DCN_CUR1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ3_DCN_VM_SYSTEM_APERTURE_LOW_ADDR
+#define HUBPREQ3_DCN_VM_SYSTEM_APERTURE_LOW_ADDR__MC_VM_SYSTEM_APERTURE_LOW_ADDR__SHIFT 0x0
+#define HUBPREQ3_DCN_VM_SYSTEM_APERTURE_LOW_ADDR__MC_VM_SYSTEM_APERTURE_LOW_ADDR_MASK 0x3FFFFFFFL
+//HUBPREQ3_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR
+#define HUBPREQ3_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR__MC_VM_SYSTEM_APERTURE_HIGH_ADDR__SHIFT 0x0
+#define HUBPREQ3_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR__MC_VM_SYSTEM_APERTURE_HIGH_ADDR_MASK 0x3FFFFFFFL
+//HUBPREQ3_DCN_VM_MX_L1_TLB_CNTL
+#define HUBPREQ3_DCN_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB__SHIFT 0x0
+#define HUBPREQ3_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE__SHIFT 0x3
+#define HUBPREQ3_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT 0x5
+#define HUBPREQ3_DCN_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL__SHIFT 0x6
+#define HUBPREQ3_DCN_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB_MASK 0x00000001L
+#define HUBPREQ3_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK 0x00000018L
+#define HUBPREQ3_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS_MASK 0x00000020L
+#define HUBPREQ3_DCN_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL_MASK 0x00000040L
+//HUBPREQ3_BLANK_OFFSET_0
+#define HUBPREQ3_BLANK_OFFSET_0__REFCYC_H_BLANK_END__SHIFT 0x0
+#define HUBPREQ3_BLANK_OFFSET_0__DLG_V_BLANK_END__SHIFT 0x10
+#define HUBPREQ3_BLANK_OFFSET_0__REFCYC_H_BLANK_END_MASK 0x00001FFFL
+#define HUBPREQ3_BLANK_OFFSET_0__DLG_V_BLANK_END_MASK 0x7FFF0000L
+//HUBPREQ3_BLANK_OFFSET_1
+#define HUBPREQ3_BLANK_OFFSET_1__MIN_DST_Y_NEXT_START__SHIFT 0x0
+#define HUBPREQ3_BLANK_OFFSET_1__MIN_DST_Y_NEXT_START_MASK 0x0003FFFFL
+//HUBPREQ3_DST_DIMENSIONS
+#define HUBPREQ3_DST_DIMENSIONS__REFCYC_PER_HTOTAL__SHIFT 0x0
+#define HUBPREQ3_DST_DIMENSIONS__REFCYC_PER_HTOTAL_MASK 0x001FFFFFL
+//HUBPREQ3_DST_AFTER_SCALER
+#define HUBPREQ3_DST_AFTER_SCALER__REFCYC_X_AFTER_SCALER__SHIFT 0x0
+#define HUBPREQ3_DST_AFTER_SCALER__DST_Y_AFTER_SCALER__SHIFT 0x10
+#define HUBPREQ3_DST_AFTER_SCALER__REFCYC_X_AFTER_SCALER_MASK 0x00001FFFL
+#define HUBPREQ3_DST_AFTER_SCALER__DST_Y_AFTER_SCALER_MASK 0x00070000L
+//HUBPREQ3_PREFETCH_SETTINGS
+#define HUBPREQ3_PREFETCH_SETTINGS__VRATIO_PREFETCH__SHIFT 0x0
+#define HUBPREQ3_PREFETCH_SETTINGS__DST_Y_PREFETCH__SHIFT 0x18
+#define HUBPREQ3_PREFETCH_SETTINGS__VRATIO_PREFETCH_MASK 0x003FFFFFL
+#define HUBPREQ3_PREFETCH_SETTINGS__DST_Y_PREFETCH_MASK 0xFF000000L
+//HUBPREQ3_PREFETCH_SETTINGS_C
+#define HUBPREQ3_PREFETCH_SETTINGS_C__VRATIO_PREFETCH_C__SHIFT 0x0
+#define HUBPREQ3_PREFETCH_SETTINGS_C__VRATIO_PREFETCH_C_MASK 0x003FFFFFL
+//HUBPREQ3_VBLANK_PARAMETERS_0
+#define HUBPREQ3_VBLANK_PARAMETERS_0__DST_Y_PER_VM_VBLANK__SHIFT 0x0
+#define HUBPREQ3_VBLANK_PARAMETERS_0__DST_Y_PER_ROW_VBLANK__SHIFT 0x8
+#define HUBPREQ3_VBLANK_PARAMETERS_0__DST_Y_PER_VM_VBLANK_MASK 0x0000007FL
+#define HUBPREQ3_VBLANK_PARAMETERS_0__DST_Y_PER_ROW_VBLANK_MASK 0x00003F00L
+//HUBPREQ3_VBLANK_PARAMETERS_1
+#define HUBPREQ3_VBLANK_PARAMETERS_1__REFCYC_PER_PTE_GROUP_VBLANK_L__SHIFT 0x0
+#define HUBPREQ3_VBLANK_PARAMETERS_1__REFCYC_PER_PTE_GROUP_VBLANK_L_MASK 0x007FFFFFL
+//HUBPREQ3_VBLANK_PARAMETERS_2
+#define HUBPREQ3_VBLANK_PARAMETERS_2__REFCYC_PER_PTE_GROUP_VBLANK_C__SHIFT 0x0
+#define HUBPREQ3_VBLANK_PARAMETERS_2__REFCYC_PER_PTE_GROUP_VBLANK_C_MASK 0x007FFFFFL
+//HUBPREQ3_VBLANK_PARAMETERS_3
+#define HUBPREQ3_VBLANK_PARAMETERS_3__REFCYC_PER_META_CHUNK_VBLANK_L__SHIFT 0x0
+#define HUBPREQ3_VBLANK_PARAMETERS_3__REFCYC_PER_META_CHUNK_VBLANK_L_MASK 0x007FFFFFL
+//HUBPREQ3_VBLANK_PARAMETERS_4
+#define HUBPREQ3_VBLANK_PARAMETERS_4__REFCYC_PER_META_CHUNK_VBLANK_C__SHIFT 0x0
+#define HUBPREQ3_VBLANK_PARAMETERS_4__REFCYC_PER_META_CHUNK_VBLANK_C_MASK 0x007FFFFFL
+//HUBPREQ3_FLIP_PARAMETERS_0
+#define HUBPREQ3_FLIP_PARAMETERS_0__DST_Y_PER_VM_FLIP__SHIFT 0x0
+#define HUBPREQ3_FLIP_PARAMETERS_0__DST_Y_PER_ROW_FLIP__SHIFT 0x8
+#define HUBPREQ3_FLIP_PARAMETERS_0__DST_Y_PER_VM_FLIP_MASK 0x0000007FL
+#define HUBPREQ3_FLIP_PARAMETERS_0__DST_Y_PER_ROW_FLIP_MASK 0x00003F00L
+//HUBPREQ3_FLIP_PARAMETERS_1
+#define HUBPREQ3_FLIP_PARAMETERS_1__REFCYC_PER_PTE_GROUP_FLIP_L__SHIFT 0x0
+#define HUBPREQ3_FLIP_PARAMETERS_1__REFCYC_PER_PTE_GROUP_FLIP_L_MASK 0x007FFFFFL
+//HUBPREQ3_FLIP_PARAMETERS_2
+#define HUBPREQ3_FLIP_PARAMETERS_2__REFCYC_PER_META_CHUNK_FLIP_L__SHIFT 0x0
+#define HUBPREQ3_FLIP_PARAMETERS_2__REFCYC_PER_META_CHUNK_FLIP_L_MASK 0x007FFFFFL
+//HUBPREQ3_NOM_PARAMETERS_0
+#define HUBPREQ3_NOM_PARAMETERS_0__DST_Y_PER_PTE_ROW_NOM_L__SHIFT 0x0
+#define HUBPREQ3_NOM_PARAMETERS_0__DST_Y_PER_PTE_ROW_NOM_L_MASK 0x0001FFFFL
+//HUBPREQ3_NOM_PARAMETERS_1
+#define HUBPREQ3_NOM_PARAMETERS_1__REFCYC_PER_PTE_GROUP_NOM_L__SHIFT 0x0
+#define HUBPREQ3_NOM_PARAMETERS_1__REFCYC_PER_PTE_GROUP_NOM_L_MASK 0x007FFFFFL
+//HUBPREQ3_NOM_PARAMETERS_2
+#define HUBPREQ3_NOM_PARAMETERS_2__DST_Y_PER_PTE_ROW_NOM_C__SHIFT 0x0
+#define HUBPREQ3_NOM_PARAMETERS_2__DST_Y_PER_PTE_ROW_NOM_C_MASK 0x0001FFFFL
+//HUBPREQ3_NOM_PARAMETERS_3
+#define HUBPREQ3_NOM_PARAMETERS_3__REFCYC_PER_PTE_GROUP_NOM_C__SHIFT 0x0
+#define HUBPREQ3_NOM_PARAMETERS_3__REFCYC_PER_PTE_GROUP_NOM_C_MASK 0x007FFFFFL
+//HUBPREQ3_NOM_PARAMETERS_4
+#define HUBPREQ3_NOM_PARAMETERS_4__DST_Y_PER_META_ROW_NOM_L__SHIFT 0x0
+#define HUBPREQ3_NOM_PARAMETERS_4__DST_Y_PER_META_ROW_NOM_L_MASK 0x0001FFFFL
+//HUBPREQ3_NOM_PARAMETERS_5
+#define HUBPREQ3_NOM_PARAMETERS_5__REFCYC_PER_META_CHUNK_NOM_L__SHIFT 0x0
+#define HUBPREQ3_NOM_PARAMETERS_5__REFCYC_PER_META_CHUNK_NOM_L_MASK 0x007FFFFFL
+//HUBPREQ3_NOM_PARAMETERS_6
+#define HUBPREQ3_NOM_PARAMETERS_6__DST_Y_PER_META_ROW_NOM_C__SHIFT 0x0
+#define HUBPREQ3_NOM_PARAMETERS_6__DST_Y_PER_META_ROW_NOM_C_MASK 0x0001FFFFL
+//HUBPREQ3_NOM_PARAMETERS_7
+#define HUBPREQ3_NOM_PARAMETERS_7__REFCYC_PER_META_CHUNK_NOM_C__SHIFT 0x0
+#define HUBPREQ3_NOM_PARAMETERS_7__REFCYC_PER_META_CHUNK_NOM_C_MASK 0x007FFFFFL
+//HUBPREQ3_PER_LINE_DELIVERY_PRE
+#define HUBPREQ3_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_L__SHIFT 0x0
+#define HUBPREQ3_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_C__SHIFT 0x10
+#define HUBPREQ3_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_L_MASK 0x00001FFFL
+#define HUBPREQ3_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_C_MASK 0x1FFF0000L
+//HUBPREQ3_PER_LINE_DELIVERY
+#define HUBPREQ3_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_L__SHIFT 0x0
+#define HUBPREQ3_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_C__SHIFT 0x10
+#define HUBPREQ3_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_L_MASK 0x00001FFFL
+#define HUBPREQ3_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_C_MASK 0x1FFF0000L
+//HUBPREQ3_CURSOR_SETTINGS
+#define HUBPREQ3_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET__SHIFT 0x0
+#define HUBPREQ3_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST__SHIFT 0x8
+#define HUBPREQ3_CURSOR_SETTINGS__CURSOR1_DST_Y_OFFSET__SHIFT 0x10
+#define HUBPREQ3_CURSOR_SETTINGS__CURSOR1_CHUNK_HDL_ADJUST__SHIFT 0x18
+#define HUBPREQ3_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET_MASK 0x000000FFL
+#define HUBPREQ3_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST_MASK 0x00000300L
+#define HUBPREQ3_CURSOR_SETTINGS__CURSOR1_DST_Y_OFFSET_MASK 0x00FF0000L
+#define HUBPREQ3_CURSOR_SETTINGS__CURSOR1_CHUNK_HDL_ADJUST_MASK 0x03000000L
+//HUBPREQ3_REF_FREQ_TO_PIX_FREQ
+#define HUBPREQ3_REF_FREQ_TO_PIX_FREQ__REF_FREQ_TO_PIX_FREQ__SHIFT 0x0
+#define HUBPREQ3_REF_FREQ_TO_PIX_FREQ__REF_FREQ_TO_PIX_FREQ_MASK 0x001FFFFFL
+//HUBPREQ3_DST_Y_DELTA_DRQ_LIMIT
+#define HUBPREQ3_DST_Y_DELTA_DRQ_LIMIT__DST_Y_DELTA_DRQ_LIMIT__SHIFT 0x0
+#define HUBPREQ3_DST_Y_DELTA_DRQ_LIMIT__DST_Y_DELTA_DRQ_LIMIT_MASK 0x00007FFFL
+//HUBPREQ3_HUBPREQ_MEM_PWR_CTRL
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_FORCE__SHIFT 0x0
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_DIS__SHIFT 0x2
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_FORCE__SHIFT 0x4
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_DIS__SHIFT 0x6
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_FORCE__SHIFT 0x8
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_DIS__SHIFT 0xa
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_FORCE__SHIFT 0xc
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_DIS__SHIFT 0xe
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_FORCE_MASK 0x00000003L
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_DIS_MASK 0x00000004L
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_FORCE_MASK 0x00000030L
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_DIS_MASK 0x00000040L
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_FORCE_MASK 0x00000300L
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_DIS_MASK 0x00000400L
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_FORCE_MASK 0x00003000L
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_DIS_MASK 0x00004000L
+//HUBPREQ3_HUBPREQ_MEM_PWR_STATUS
+#define HUBPREQ3_HUBPREQ_MEM_PWR_STATUS__REQ_DPTE_MEM_PWR_STATE__SHIFT 0x0
+#define HUBPREQ3_HUBPREQ_MEM_PWR_STATUS__REQ_MPTE_MEM_PWR_STATE__SHIFT 0x2
+#define HUBPREQ3_HUBPREQ_MEM_PWR_STATUS__REQ_META_MEM_PWR_STATE__SHIFT 0x4
+#define HUBPREQ3_HUBPREQ_MEM_PWR_STATUS__REQ_PDE_MEM_PWR_STATE__SHIFT 0x6
+#define HUBPREQ3_HUBPREQ_MEM_PWR_STATUS__REQ_DPTE_MEM_PWR_STATE_MASK 0x00000003L
+#define HUBPREQ3_HUBPREQ_MEM_PWR_STATUS__REQ_MPTE_MEM_PWR_STATE_MASK 0x0000000CL
+#define HUBPREQ3_HUBPREQ_MEM_PWR_STATUS__REQ_META_MEM_PWR_STATE_MASK 0x00000030L
+#define HUBPREQ3_HUBPREQ_MEM_PWR_STATUS__REQ_PDE_MEM_PWR_STATE_MASK 0x000000C0L
+//HUBPREQ3_VBLANK_PARAMETERS_5
+#define HUBPREQ3_VBLANK_PARAMETERS_5__REFCYC_PER_VM_GROUP_VBLANK__SHIFT 0x0
+#define HUBPREQ3_VBLANK_PARAMETERS_5__REFCYC_PER_VM_GROUP_VBLANK_MASK 0x007FFFFFL
+//HUBPREQ3_VBLANK_PARAMETERS_6
+#define HUBPREQ3_VBLANK_PARAMETERS_6__REFCYC_PER_VM_REQ_VBLANK__SHIFT 0x0
+#define HUBPREQ3_VBLANK_PARAMETERS_6__REFCYC_PER_VM_REQ_VBLANK_MASK 0x007FFFFFL
+//HUBPREQ3_FLIP_PARAMETERS_3
+#define HUBPREQ3_FLIP_PARAMETERS_3__REFCYC_PER_VM_GROUP_FLIP__SHIFT 0x0
+#define HUBPREQ3_FLIP_PARAMETERS_3__REFCYC_PER_VM_GROUP_FLIP_MASK 0x007FFFFFL
+//HUBPREQ3_FLIP_PARAMETERS_4
+#define HUBPREQ3_FLIP_PARAMETERS_4__REFCYC_PER_VM_REQ_FLIP__SHIFT 0x0
+#define HUBPREQ3_FLIP_PARAMETERS_4__REFCYC_PER_VM_REQ_FLIP_MASK 0x007FFFFFL
+//HUBPREQ3_FLIP_PARAMETERS_5
+#define HUBPREQ3_FLIP_PARAMETERS_5__REFCYC_PER_PTE_GROUP_FLIP_C__SHIFT 0x0
+#define HUBPREQ3_FLIP_PARAMETERS_5__REFCYC_PER_PTE_GROUP_FLIP_C_MASK 0x007FFFFFL
+//HUBPREQ3_FLIP_PARAMETERS_6
+#define HUBPREQ3_FLIP_PARAMETERS_6__REFCYC_PER_META_CHUNK_FLIP_C__SHIFT 0x0
+#define HUBPREQ3_FLIP_PARAMETERS_6__REFCYC_PER_META_CHUNK_FLIP_C_MASK 0x007FFFFFL
+
+
+// addressBlock: dce_dc_dcbubp3_dispdec_hubpret_dispdec
+//HUBPRET3_HUBPRET_CONTROL
+#define HUBPRET3_HUBPRET_CONTROL__DET_BUF_PLANE1_BASE_ADDRESS__SHIFT 0x0
+#define HUBPRET3_HUBPRET_CONTROL__PACK_3TO2_ELEMENT_DISABLE__SHIFT 0xc
+#define HUBPRET3_HUBPRET_CONTROL__CROSSBAR_SRC_ALPHA__SHIFT 0x10
+#define HUBPRET3_HUBPRET_CONTROL__CROSSBAR_SRC_Y_G__SHIFT 0x12
+#define HUBPRET3_HUBPRET_CONTROL__CROSSBAR_SRC_CB_B__SHIFT 0x14
+#define HUBPRET3_HUBPRET_CONTROL__CROSSBAR_SRC_CR_R__SHIFT 0x16
+#define HUBPRET3_HUBPRET_CONTROL__HUBPRET_CONTROL_SPARE__SHIFT 0x18
+#define HUBPRET3_HUBPRET_CONTROL__DET_BUF_PLANE1_BASE_ADDRESS_MASK 0x00000FFFL
+#define HUBPRET3_HUBPRET_CONTROL__PACK_3TO2_ELEMENT_DISABLE_MASK 0x00001000L
+#define HUBPRET3_HUBPRET_CONTROL__CROSSBAR_SRC_ALPHA_MASK 0x00030000L
+#define HUBPRET3_HUBPRET_CONTROL__CROSSBAR_SRC_Y_G_MASK 0x000C0000L
+#define HUBPRET3_HUBPRET_CONTROL__CROSSBAR_SRC_CB_B_MASK 0x00300000L
+#define HUBPRET3_HUBPRET_CONTROL__CROSSBAR_SRC_CR_R_MASK 0x00C00000L
+#define HUBPRET3_HUBPRET_CONTROL__HUBPRET_CONTROL_SPARE_MASK 0xFF000000L
+//HUBPRET3_HUBPRET_MEM_PWR_CTRL
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_FORCE__SHIFT 0x0
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_DIS__SHIFT 0x2
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_LS_MODE__SHIFT 0x4
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_FORCE__SHIFT 0x8
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_DIS__SHIFT 0xa
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_FORCE__SHIFT 0x10
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_DIS__SHIFT 0x12
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_LS_MODE__SHIFT 0x14
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_FORCE_MASK 0x00000003L
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_DIS_MASK 0x00000004L
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_LS_MODE_MASK 0x00000030L
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_FORCE_MASK 0x00000300L
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_DIS_MASK 0x00000400L
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_FORCE_MASK 0x00030000L
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_DIS_MASK 0x00040000L
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_LS_MODE_MASK 0x00300000L
+//HUBPRET3_HUBPRET_MEM_PWR_STATUS
+#define HUBPRET3_HUBPRET_MEM_PWR_STATUS__DET_MEM_PWR_STATE__SHIFT 0x0
+#define HUBPRET3_HUBPRET_MEM_PWR_STATUS__DMROB_MEM_PWR_STATE__SHIFT 0x2
+#define HUBPRET3_HUBPRET_MEM_PWR_STATUS__PIXCDC_MEM_PWR_STATE__SHIFT 0x4
+#define HUBPRET3_HUBPRET_MEM_PWR_STATUS__DET_MEM_PWR_STATE_MASK 0x00000003L
+#define HUBPRET3_HUBPRET_MEM_PWR_STATUS__DMROB_MEM_PWR_STATE_MASK 0x0000000CL
+#define HUBPRET3_HUBPRET_MEM_PWR_STATUS__PIXCDC_MEM_PWR_STATE_MASK 0x00000030L
+//HUBPRET3_HUBPRET_READ_LINE_CTRL0
+#define HUBPRET3_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_INTERVAL_IN_NONACTIVE__SHIFT 0x0
+#define HUBPRET3_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_VBLANK_MAXIMUM__SHIFT 0x10
+#define HUBPRET3_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_INTERVAL_IN_NONACTIVE_MASK 0x0000FFFFL
+#define HUBPRET3_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_VBLANK_MAXIMUM_MASK 0x3FFF0000L
+//HUBPRET3_HUBPRET_READ_LINE_CTRL1
+#define HUBPRET3_HUBPRET_READ_LINE_CTRL1__PIPE_READ_LINE_REPORTED_WHEN_REQ_DISABLED__SHIFT 0x0
+#define HUBPRET3_HUBPRET_READ_LINE_CTRL1__HUBPRET_READ_LINE_CTRL1_SPARE__SHIFT 0x10
+#define HUBPRET3_HUBPRET_READ_LINE_CTRL1__PIPE_READ_LINE_REPORTED_WHEN_REQ_DISABLED_MASK 0x00003FFFL
+#define HUBPRET3_HUBPRET_READ_LINE_CTRL1__HUBPRET_READ_LINE_CTRL1_SPARE_MASK 0xFFFF0000L
+//HUBPRET3_HUBPRET_READ_LINE0
+#define HUBPRET3_HUBPRET_READ_LINE0__PIPE_READ_LINE0_START__SHIFT 0x0
+#define HUBPRET3_HUBPRET_READ_LINE0__PIPE_READ_LINE0_END__SHIFT 0x10
+#define HUBPRET3_HUBPRET_READ_LINE0__PIPE_READ_LINE0_START_MASK 0x00003FFFL
+#define HUBPRET3_HUBPRET_READ_LINE0__PIPE_READ_LINE0_END_MASK 0x3FFF0000L
+//HUBPRET3_HUBPRET_READ_LINE1
+#define HUBPRET3_HUBPRET_READ_LINE1__PIPE_READ_LINE1_START__SHIFT 0x0
+#define HUBPRET3_HUBPRET_READ_LINE1__PIPE_READ_LINE1_END__SHIFT 0x10
+#define HUBPRET3_HUBPRET_READ_LINE1__PIPE_READ_LINE1_START_MASK 0x00003FFFL
+#define HUBPRET3_HUBPRET_READ_LINE1__PIPE_READ_LINE1_END_MASK 0x3FFF0000L
+//HUBPRET3_HUBPRET_INTERRUPT
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_MASK__SHIFT 0x0
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_MASK__SHIFT 0x1
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_MASK__SHIFT 0x2
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_TYPE__SHIFT 0x4
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_TYPE__SHIFT 0x5
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_TYPE__SHIFT 0x6
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_CLEAR__SHIFT 0x8
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_CLEAR__SHIFT 0x9
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_CLEAR__SHIFT 0xa
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_STATUS__SHIFT 0xc
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_STATUS__SHIFT 0xd
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_STATUS__SHIFT 0xe
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_STATUS__SHIFT 0x10
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_STATUS__SHIFT 0x11
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_STATUS__SHIFT 0x12
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_MASK_MASK 0x00000001L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_MASK_MASK 0x00000002L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_MASK_MASK 0x00000004L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_TYPE_MASK 0x00000010L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_TYPE_MASK 0x00000020L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_TYPE_MASK 0x00000040L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_CLEAR_MASK 0x00000100L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_CLEAR_MASK 0x00000200L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_CLEAR_MASK 0x00000400L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_STATUS_MASK 0x00001000L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_STATUS_MASK 0x00002000L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_STATUS_MASK 0x00004000L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_STATUS_MASK 0x00010000L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_STATUS_MASK 0x00020000L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_STATUS_MASK 0x00040000L
+//HUBPRET3_HUBPRET_READ_LINE_VALUE
+#define HUBPRET3_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE__SHIFT 0x0
+#define HUBPRET3_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_SNAPSHOT__SHIFT 0x10
+#define HUBPRET3_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_MASK 0x00003FFFL
+#define HUBPRET3_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_SNAPSHOT_MASK 0x3FFF0000L
+//HUBPRET3_HUBPRET_READ_LINE_STATUS
+#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_VBLANK__SHIFT 0x0
+#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_INSIDE__SHIFT 0x4
+#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_OUTSIDE__SHIFT 0x5
+#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_INSIDE__SHIFT 0x8
+#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_OUTSIDE__SHIFT 0xa
+#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_VBLANK_MASK 0x00000001L
+#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_INSIDE_MASK 0x00000010L
+#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_OUTSIDE_MASK 0x00000020L
+#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_INSIDE_MASK 0x00000100L
+#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_OUTSIDE_MASK 0x00000400L
+
+
+// addressBlock: dce_dc_dcbubp3_dispdec_cursor0_dispdec
+//CURSOR0_3_CURSOR_CONTROL
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_ENABLE__SHIFT 0x0
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT 0x4
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_MODE__SHIFT 0x8
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_TMZ__SHIFT 0xc
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_SNOOP__SHIFT 0xd
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_SYSTEM__SHIFT 0xe
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_PITCH__SHIFT 0x10
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_XY_POSITION_ROTATION_AND_MIRRORING_BYPASS__SHIFT 0x14
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK__SHIFT 0x18
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_EN__SHIFT 0x1e
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_SEL__SHIFT 0x1f
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_ENABLE_MASK 0x00000001L
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_2X_MAGNIFY_MASK 0x00000010L
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_MODE_MASK 0x00000700L
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_TMZ_MASK 0x00001000L
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_SNOOP_MASK 0x00002000L
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_SYSTEM_MASK 0x00004000L
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_PITCH_MASK 0x00030000L
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_XY_POSITION_ROTATION_AND_MIRRORING_BYPASS_MASK 0x00100000L
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK_MASK 0x1F000000L
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_EN_MASK 0x40000000L
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_SEL_MASK 0x80000000L
+//CURSOR0_3_CURSOR_SURFACE_ADDRESS
+#define CURSOR0_3_CURSOR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS__SHIFT 0x0
+#define CURSOR0_3_CURSOR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//CURSOR0_3_CURSOR_SURFACE_ADDRESS_HIGH
+#define CURSOR0_3_CURSOR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define CURSOR0_3_CURSOR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//CURSOR0_3_CURSOR_SIZE
+#define CURSOR0_3_CURSOR_SIZE__CURSOR_HEIGHT__SHIFT 0x0
+#define CURSOR0_3_CURSOR_SIZE__CURSOR_WIDTH__SHIFT 0x10
+#define CURSOR0_3_CURSOR_SIZE__CURSOR_HEIGHT_MASK 0x000001FFL
+#define CURSOR0_3_CURSOR_SIZE__CURSOR_WIDTH_MASK 0x01FF0000L
+//CURSOR0_3_CURSOR_POSITION
+#define CURSOR0_3_CURSOR_POSITION__CURSOR_Y_POSITION__SHIFT 0x0
+#define CURSOR0_3_CURSOR_POSITION__CURSOR_X_POSITION__SHIFT 0x10
+#define CURSOR0_3_CURSOR_POSITION__CURSOR_Y_POSITION_MASK 0x00003FFFL
+#define CURSOR0_3_CURSOR_POSITION__CURSOR_X_POSITION_MASK 0x3FFF0000L
+//CURSOR0_3_CURSOR_HOT_SPOT
+#define CURSOR0_3_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y__SHIFT 0x0
+#define CURSOR0_3_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X__SHIFT 0x10
+#define CURSOR0_3_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y_MASK 0x000000FFL
+#define CURSOR0_3_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X_MASK 0x00FF0000L
+//CURSOR0_3_CURSOR_STEREO_CONTROL
+#define CURSOR0_3_CURSOR_STEREO_CONTROL__CURSOR_STEREO_EN__SHIFT 0x0
+#define CURSOR0_3_CURSOR_STEREO_CONTROL__CURSOR_PRIMARY_OFFSET__SHIFT 0x4
+#define CURSOR0_3_CURSOR_STEREO_CONTROL__CURSOR_SECONDARY_OFFSET__SHIFT 0x12
+#define CURSOR0_3_CURSOR_STEREO_CONTROL__CURSOR_STEREO_EN_MASK 0x00000001L
+#define CURSOR0_3_CURSOR_STEREO_CONTROL__CURSOR_PRIMARY_OFFSET_MASK 0x0003FFF0L
+#define CURSOR0_3_CURSOR_STEREO_CONTROL__CURSOR_SECONDARY_OFFSET_MASK 0xFFFC0000L
+//CURSOR0_3_CURSOR_DST_OFFSET
+#define CURSOR0_3_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET__SHIFT 0x0
+#define CURSOR0_3_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET_MASK 0x00001FFFL
+//CURSOR0_3_CURSOR_MEM_PWR_CTRL
+#define CURSOR0_3_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_FORCE__SHIFT 0x0
+#define CURSOR0_3_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_DIS__SHIFT 0x2
+#define CURSOR0_3_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_LS_MODE__SHIFT 0x4
+#define CURSOR0_3_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_FORCE_MASK 0x00000003L
+#define CURSOR0_3_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_DIS_MASK 0x00000004L
+#define CURSOR0_3_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_LS_MODE_MASK 0x00000030L
+//CURSOR0_3_CURSOR_MEM_PWR_STATUS
+#define CURSOR0_3_CURSOR_MEM_PWR_STATUS__CROB_MEM_PWR_STATE__SHIFT 0x0
+#define CURSOR0_3_CURSOR_MEM_PWR_STATUS__CROB_MEM_PWR_STATE_MASK 0x00000003L
+//CURSOR0_3_DMDATA_ADDRESS_HIGH
+#define CURSOR0_3_DMDATA_ADDRESS_HIGH__DMDATA_ADDRESS_HIGH__SHIFT 0x0
+#define CURSOR0_3_DMDATA_ADDRESS_HIGH__DMDATA_SYSTEM__SHIFT 0x1c
+#define CURSOR0_3_DMDATA_ADDRESS_HIGH__DMDATA_SNOOP__SHIFT 0x1d
+#define CURSOR0_3_DMDATA_ADDRESS_HIGH__DMDATA_TMZ__SHIFT 0x1e
+#define CURSOR0_3_DMDATA_ADDRESS_HIGH__DMDATA_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define CURSOR0_3_DMDATA_ADDRESS_HIGH__DMDATA_SYSTEM_MASK 0x10000000L
+#define CURSOR0_3_DMDATA_ADDRESS_HIGH__DMDATA_SNOOP_MASK 0x20000000L
+#define CURSOR0_3_DMDATA_ADDRESS_HIGH__DMDATA_TMZ_MASK 0x40000000L
+//CURSOR0_3_DMDATA_ADDRESS_LOW
+#define CURSOR0_3_DMDATA_ADDRESS_LOW__DMDATA_ADDRESS_LOW__SHIFT 0x0
+#define CURSOR0_3_DMDATA_ADDRESS_LOW__DMDATA_ADDRESS_LOW_MASK 0xFFFFFFFFL
+//CURSOR0_3_DMDATA_CNTL
+#define CURSOR0_3_DMDATA_CNTL__DMDATA_UPDATED__SHIFT 0x0
+#define CURSOR0_3_DMDATA_CNTL__DMDATA_REPEAT__SHIFT 0x1
+#define CURSOR0_3_DMDATA_CNTL__DMDATA_MODE__SHIFT 0x2
+#define CURSOR0_3_DMDATA_CNTL__DMDATA_SIZE__SHIFT 0x10
+#define CURSOR0_3_DMDATA_CNTL__DMDATA_UPDATED_MASK 0x00000001L
+#define CURSOR0_3_DMDATA_CNTL__DMDATA_REPEAT_MASK 0x00000002L
+#define CURSOR0_3_DMDATA_CNTL__DMDATA_MODE_MASK 0x00000004L
+#define CURSOR0_3_DMDATA_CNTL__DMDATA_SIZE_MASK 0x0FFF0000L
+//CURSOR0_3_DMDATA_QOS_CNTL
+#define CURSOR0_3_DMDATA_QOS_CNTL__DMDATA_QOS_MODE__SHIFT 0x0
+#define CURSOR0_3_DMDATA_QOS_CNTL__DMDATA_QOS_LEVEL__SHIFT 0x4
+#define CURSOR0_3_DMDATA_QOS_CNTL__DMDATA_DL_DELTA__SHIFT 0x10
+#define CURSOR0_3_DMDATA_QOS_CNTL__DMDATA_QOS_MODE_MASK 0x00000001L
+#define CURSOR0_3_DMDATA_QOS_CNTL__DMDATA_QOS_LEVEL_MASK 0x000000F0L
+#define CURSOR0_3_DMDATA_QOS_CNTL__DMDATA_DL_DELTA_MASK 0xFFFF0000L
+//CURSOR0_3_DMDATA_STATUS
+#define CURSOR0_3_DMDATA_STATUS__DMDATA_DONE__SHIFT 0x0
+#define CURSOR0_3_DMDATA_STATUS__DMDATA_UNDERFLOW__SHIFT 0x2
+#define CURSOR0_3_DMDATA_STATUS__DMDATA_UNDERFLOW_CLEAR__SHIFT 0x4
+#define CURSOR0_3_DMDATA_STATUS__DMDATA_DONE_MASK 0x00000001L
+#define CURSOR0_3_DMDATA_STATUS__DMDATA_UNDERFLOW_MASK 0x00000004L
+#define CURSOR0_3_DMDATA_STATUS__DMDATA_UNDERFLOW_CLEAR_MASK 0x00000010L
+//CURSOR0_3_DMDATA_SW_CNTL
+#define CURSOR0_3_DMDATA_SW_CNTL__DMDATA_SW_UPDATED__SHIFT 0x0
+#define CURSOR0_3_DMDATA_SW_CNTL__DMDATA_SW_REPEAT__SHIFT 0x1
+#define CURSOR0_3_DMDATA_SW_CNTL__DMDATA_SW_SIZE__SHIFT 0x10
+#define CURSOR0_3_DMDATA_SW_CNTL__DMDATA_SW_UPDATED_MASK 0x00000001L
+#define CURSOR0_3_DMDATA_SW_CNTL__DMDATA_SW_REPEAT_MASK 0x00000002L
+#define CURSOR0_3_DMDATA_SW_CNTL__DMDATA_SW_SIZE_MASK 0x0FFF0000L
+//CURSOR0_3_DMDATA_SW_DATA
+#define CURSOR0_3_DMDATA_SW_DATA__DMDATA_SW_DATA__SHIFT 0x0
+#define CURSOR0_3_DMDATA_SW_DATA__DMDATA_SW_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dcbubp3_dispdec_hubp_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON10_PERFCOUNTER_CNTL
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON10_PERFCOUNTER_CNTL2
+#define DC_PERFMON10_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON10_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON10_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON10_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON10_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON10_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON10_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON10_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON10_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON10_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON10_PERFCOUNTER_STATE
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON10_PERFMON_CNTL
+#define DC_PERFMON10_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON10_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON10_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON10_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON10_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON10_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON10_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON10_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON10_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON10_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON10_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON10_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON10_PERFMON_CNTL2
+#define DC_PERFMON10_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON10_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON10_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON10_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON10_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON10_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON10_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON10_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON10_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON10_PERFMON_CVALUE_LOW
+#define DC_PERFMON10_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON10_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON10_PERFMON_HI
+#define DC_PERFMON10_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON10_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON10_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON10_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON10_PERFMON_LOW
+#define DC_PERFMON10_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON10_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dpp0_dispdec_dpp_top_dispdec
+//DPP_TOP0_DPP_CONTROL
+#define DPP_TOP0_DPP_CONTROL__DPP_CLOCK_ENABLE__SHIFT 0x4
+#define DPP_TOP0_DPP_CONTROL__DPPCLK_G_GATE_DISABLE__SHIFT 0x8
+#define DPP_TOP0_DPP_CONTROL__DPPCLK_G_DYN_GATE_DISABLE__SHIFT 0xa
+#define DPP_TOP0_DPP_CONTROL__DPPCLK_G_DSCL_GATE_DISABLE__SHIFT 0xc
+#define DPP_TOP0_DPP_CONTROL__DPPCLK_G_DSCL_ALPHA_GATE_DISABLE__SHIFT 0xe
+#define DPP_TOP0_DPP_CONTROL__DPPCLK_R_GATE_DISABLE__SHIFT 0x10
+#define DPP_TOP0_DPP_CONTROL__DISPCLK_R_GATE_DISABLE__SHIFT 0x12
+#define DPP_TOP0_DPP_CONTROL__DISPCLK_G_GATE_DISABLE__SHIFT 0x14
+#define DPP_TOP0_DPP_CONTROL__DPP_TEST_CLK_SEL__SHIFT 0x1c
+#define DPP_TOP0_DPP_CONTROL__DPP_CLOCK_ENABLE_MASK 0x00000010L
+#define DPP_TOP0_DPP_CONTROL__DPPCLK_G_GATE_DISABLE_MASK 0x00000100L
+#define DPP_TOP0_DPP_CONTROL__DPPCLK_G_DYN_GATE_DISABLE_MASK 0x00000400L
+#define DPP_TOP0_DPP_CONTROL__DPPCLK_G_DSCL_GATE_DISABLE_MASK 0x00001000L
+#define DPP_TOP0_DPP_CONTROL__DPPCLK_G_DSCL_ALPHA_GATE_DISABLE_MASK 0x00004000L
+#define DPP_TOP0_DPP_CONTROL__DPPCLK_R_GATE_DISABLE_MASK 0x00010000L
+#define DPP_TOP0_DPP_CONTROL__DISPCLK_R_GATE_DISABLE_MASK 0x00040000L
+#define DPP_TOP0_DPP_CONTROL__DISPCLK_G_GATE_DISABLE_MASK 0x00100000L
+#define DPP_TOP0_DPP_CONTROL__DPP_TEST_CLK_SEL_MASK 0xF0000000L
+//DPP_TOP0_DPP_SOFT_RESET
+#define DPP_TOP0_DPP_SOFT_RESET__CNVC_SOFT_RESET__SHIFT 0x0
+#define DPP_TOP0_DPP_SOFT_RESET__DSCL_SOFT_RESET__SHIFT 0x4
+#define DPP_TOP0_DPP_SOFT_RESET__CM_SOFT_RESET__SHIFT 0x8
+#define DPP_TOP0_DPP_SOFT_RESET__OBUF_SOFT_RESET__SHIFT 0xc
+#define DPP_TOP0_DPP_SOFT_RESET__CNVC_SOFT_RESET_MASK 0x00000001L
+#define DPP_TOP0_DPP_SOFT_RESET__DSCL_SOFT_RESET_MASK 0x00000010L
+#define DPP_TOP0_DPP_SOFT_RESET__CM_SOFT_RESET_MASK 0x00000100L
+#define DPP_TOP0_DPP_SOFT_RESET__OBUF_SOFT_RESET_MASK 0x00001000L
+//DPP_TOP0_DPP_CRC_VAL_R_G
+#define DPP_TOP0_DPP_CRC_VAL_R_G__DPP_CRC_R_CR__SHIFT 0x0
+#define DPP_TOP0_DPP_CRC_VAL_R_G__DPP_CRC_G_Y__SHIFT 0x10
+#define DPP_TOP0_DPP_CRC_VAL_R_G__DPP_CRC_R_CR_MASK 0x0000FFFFL
+#define DPP_TOP0_DPP_CRC_VAL_R_G__DPP_CRC_G_Y_MASK 0xFFFF0000L
+//DPP_TOP0_DPP_CRC_VAL_B_A
+#define DPP_TOP0_DPP_CRC_VAL_B_A__DPP_CRC_B_CB__SHIFT 0x0
+#define DPP_TOP0_DPP_CRC_VAL_B_A__DPP_CRC_ALPHA__SHIFT 0x10
+#define DPP_TOP0_DPP_CRC_VAL_B_A__DPP_CRC_B_CB_MASK 0x0000FFFFL
+#define DPP_TOP0_DPP_CRC_VAL_B_A__DPP_CRC_ALPHA_MASK 0xFFFF0000L
+//DPP_TOP0_DPP_CRC_CTRL
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_EN__SHIFT 0x0
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_CONT_EN__SHIFT 0x1
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_ONE_SHOT_PENDING__SHIFT 0x2
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_420_COMP_SEL__SHIFT 0x3
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_SRC_SEL__SHIFT 0x4
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_CURSOR_BITS_SEL__SHIFT 0x6
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_STEREO_EN__SHIFT 0x7
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_STEREO_MODE__SHIFT 0x8
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_INTERLACE_MODE__SHIFT 0xa
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_PIX_FORMAT_SEL__SHIFT 0xc
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_CURSOR_FORMAT_SEL__SHIFT 0xf
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_MASK__SHIFT 0x10
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_EN_MASK 0x00000001L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_CONT_EN_MASK 0x00000002L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_ONE_SHOT_PENDING_MASK 0x00000004L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_420_COMP_SEL_MASK 0x00000008L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_SRC_SEL_MASK 0x00000030L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_CURSOR_BITS_SEL_MASK 0x00000040L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_STEREO_EN_MASK 0x00000080L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_STEREO_MODE_MASK 0x00000300L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_INTERLACE_MODE_MASK 0x00000C00L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_PIX_FORMAT_SEL_MASK 0x00007000L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_CURSOR_FORMAT_SEL_MASK 0x00008000L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_MASK_MASK 0xFFFF0000L
+//DPP_TOP0_HOST_READ_CONTROL
+#define DPP_TOP0_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL__SHIFT 0x0
+#define DPP_TOP0_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL_MASK 0x000000FFL
+
+
+// addressBlock: dce_dc_dpp0_dispdec_cnvc_cfg_dispdec
+//CNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT
+#define CNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT__CNVC_SURFACE_PIXEL_FORMAT__SHIFT 0x0
+#define CNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT__CNVC_SURFACE_PIXEL_FORMAT_MASK 0x0000007FL
+//CNVC_CFG0_FORMAT_CONTROL
+#define CNVC_CFG0_FORMAT_CONTROL__FORMAT_EXPANSION_MODE__SHIFT 0x0
+#define CNVC_CFG0_FORMAT_CONTROL__FORMAT_CNV16__SHIFT 0x4
+#define CNVC_CFG0_FORMAT_CONTROL__ALPHA_EN__SHIFT 0x8
+#define CNVC_CFG0_FORMAT_CONTROL__CNVC_BYPASS__SHIFT 0xc
+#define CNVC_CFG0_FORMAT_CONTROL__CNVC_BYPASS_MSB_ALIGN__SHIFT 0xd
+#define CNVC_CFG0_FORMAT_CONTROL__CLAMP_POSITIVE__SHIFT 0x10
+#define CNVC_CFG0_FORMAT_CONTROL__CLAMP_POSITIVE_C__SHIFT 0x11
+#define CNVC_CFG0_FORMAT_CONTROL__CNVC_UPDATE_PENDING__SHIFT 0x14
+#define CNVC_CFG0_FORMAT_CONTROL__FORMAT_EXPANSION_MODE_MASK 0x00000001L
+#define CNVC_CFG0_FORMAT_CONTROL__FORMAT_CNV16_MASK 0x00000010L
+#define CNVC_CFG0_FORMAT_CONTROL__ALPHA_EN_MASK 0x00000100L
+#define CNVC_CFG0_FORMAT_CONTROL__CNVC_BYPASS_MASK 0x00001000L
+#define CNVC_CFG0_FORMAT_CONTROL__CNVC_BYPASS_MSB_ALIGN_MASK 0x00002000L
+#define CNVC_CFG0_FORMAT_CONTROL__CLAMP_POSITIVE_MASK 0x00010000L
+#define CNVC_CFG0_FORMAT_CONTROL__CLAMP_POSITIVE_C_MASK 0x00020000L
+#define CNVC_CFG0_FORMAT_CONTROL__CNVC_UPDATE_PENDING_MASK 0x00100000L
+//CNVC_CFG0_FCNV_FP_BIAS_R
+#define CNVC_CFG0_FCNV_FP_BIAS_R__FCNV_FP_BIAS_R__SHIFT 0x0
+#define CNVC_CFG0_FCNV_FP_BIAS_R__FCNV_FP_BIAS_R_MASK 0x0007FFFFL
+//CNVC_CFG0_FCNV_FP_BIAS_G
+#define CNVC_CFG0_FCNV_FP_BIAS_G__FCNV_FP_BIAS_G__SHIFT 0x0
+#define CNVC_CFG0_FCNV_FP_BIAS_G__FCNV_FP_BIAS_G_MASK 0x0007FFFFL
+//CNVC_CFG0_FCNV_FP_BIAS_B
+#define CNVC_CFG0_FCNV_FP_BIAS_B__FCNV_FP_BIAS_B__SHIFT 0x0
+#define CNVC_CFG0_FCNV_FP_BIAS_B__FCNV_FP_BIAS_B_MASK 0x0007FFFFL
+//CNVC_CFG0_FCNV_FP_SCALE_R
+#define CNVC_CFG0_FCNV_FP_SCALE_R__FCNV_FP_SCALE_R__SHIFT 0x0
+#define CNVC_CFG0_FCNV_FP_SCALE_R__FCNV_FP_SCALE_R_MASK 0x0007FFFFL
+//CNVC_CFG0_FCNV_FP_SCALE_G
+#define CNVC_CFG0_FCNV_FP_SCALE_G__FCNV_FP_SCALE_G__SHIFT 0x0
+#define CNVC_CFG0_FCNV_FP_SCALE_G__FCNV_FP_SCALE_G_MASK 0x0007FFFFL
+//CNVC_CFG0_FCNV_FP_SCALE_B
+#define CNVC_CFG0_FCNV_FP_SCALE_B__FCNV_FP_SCALE_B__SHIFT 0x0
+#define CNVC_CFG0_FCNV_FP_SCALE_B__FCNV_FP_SCALE_B_MASK 0x0007FFFFL
+//CNVC_CFG0_COLOR_KEYER_CONTROL
+#define CNVC_CFG0_COLOR_KEYER_CONTROL__COLOR_KEYER_EN__SHIFT 0x0
+#define CNVC_CFG0_COLOR_KEYER_CONTROL__COLOR_KEYER_MODE__SHIFT 0x4
+#define CNVC_CFG0_COLOR_KEYER_CONTROL__COLOR_KEYER_EN_MASK 0x00000001L
+#define CNVC_CFG0_COLOR_KEYER_CONTROL__COLOR_KEYER_MODE_MASK 0x00000030L
+//CNVC_CFG0_COLOR_KEYER_ALPHA
+#define CNVC_CFG0_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_LOW__SHIFT 0x0
+#define CNVC_CFG0_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_HIGH__SHIFT 0x10
+#define CNVC_CFG0_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG0_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG0_COLOR_KEYER_RED
+#define CNVC_CFG0_COLOR_KEYER_RED__COLOR_KEYER_RED_LOW__SHIFT 0x0
+#define CNVC_CFG0_COLOR_KEYER_RED__COLOR_KEYER_RED_HIGH__SHIFT 0x10
+#define CNVC_CFG0_COLOR_KEYER_RED__COLOR_KEYER_RED_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG0_COLOR_KEYER_RED__COLOR_KEYER_RED_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG0_COLOR_KEYER_GREEN
+#define CNVC_CFG0_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_LOW__SHIFT 0x0
+#define CNVC_CFG0_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_HIGH__SHIFT 0x10
+#define CNVC_CFG0_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG0_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG0_COLOR_KEYER_BLUE
+#define CNVC_CFG0_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_LOW__SHIFT 0x0
+#define CNVC_CFG0_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_HIGH__SHIFT 0x10
+#define CNVC_CFG0_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG0_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG0_ALPHA_2BIT_LUT
+#define CNVC_CFG0_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT0__SHIFT 0x0
+#define CNVC_CFG0_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT1__SHIFT 0x8
+#define CNVC_CFG0_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT2__SHIFT 0x10
+#define CNVC_CFG0_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT3__SHIFT 0x18
+#define CNVC_CFG0_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT0_MASK 0x000000FFL
+#define CNVC_CFG0_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT1_MASK 0x0000FF00L
+#define CNVC_CFG0_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT2_MASK 0x00FF0000L
+#define CNVC_CFG0_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT3_MASK 0xFF000000L
+
+
+// addressBlock: dce_dc_dpp0_dispdec_cnvc_cur_dispdec
+//CNVC_CUR0_CURSOR0_CONTROL
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_ENABLE__SHIFT 0x0
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_EXPANSION_MODE__SHIFT 0x1
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_PIX_INV_MODE__SHIFT 0x2
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_ROM_EN__SHIFT 0x3
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_MODE__SHIFT 0x4
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_PIXEL_ALPHA_MOD_EN__SHIFT 0x7
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_UPDATE_PENDING__SHIFT 0x10
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_ENABLE_MASK 0x00000001L
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_EXPANSION_MODE_MASK 0x00000002L
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_PIX_INV_MODE_MASK 0x00000004L
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_ROM_EN_MASK 0x00000008L
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_MODE_MASK 0x00000070L
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_PIXEL_ALPHA_MOD_EN_MASK 0x00000080L
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_UPDATE_PENDING_MASK 0x00010000L
+//CNVC_CUR0_CURSOR0_COLOR0
+#define CNVC_CUR0_CURSOR0_COLOR0__CUR0_COLOR0__SHIFT 0x0
+#define CNVC_CUR0_CURSOR0_COLOR0__CUR0_COLOR0_MASK 0x00FFFFFFL
+//CNVC_CUR0_CURSOR0_COLOR1
+#define CNVC_CUR0_CURSOR0_COLOR1__CUR0_COLOR1__SHIFT 0x0
+#define CNVC_CUR0_CURSOR0_COLOR1__CUR0_COLOR1_MASK 0x00FFFFFFL
+//CNVC_CUR0_CURSOR0_FP_SCALE_BIAS
+#define CNVC_CUR0_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE__SHIFT 0x0
+#define CNVC_CUR0_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS__SHIFT 0x10
+#define CNVC_CUR0_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE_MASK 0x0000FFFFL
+#define CNVC_CUR0_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS_MASK 0xFFFF0000L
+
+
+// addressBlock: dce_dc_dpp0_dispdec_dscl_dispdec
+//DSCL0_SCL_COEF_RAM_TAP_SELECT
+#define DSCL0_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_TAP_PAIR_IDX__SHIFT 0x0
+#define DSCL0_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_PHASE__SHIFT 0x8
+#define DSCL0_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_FILTER_TYPE__SHIFT 0x10
+#define DSCL0_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_TAP_PAIR_IDX_MASK 0x00000003L
+#define DSCL0_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_PHASE_MASK 0x00003F00L
+#define DSCL0_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_FILTER_TYPE_MASK 0x00070000L
+//DSCL0_SCL_COEF_RAM_TAP_DATA
+#define DSCL0_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF__SHIFT 0x0
+#define DSCL0_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_EN__SHIFT 0xf
+#define DSCL0_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF__SHIFT 0x10
+#define DSCL0_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_EN__SHIFT 0x1f
+#define DSCL0_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_MASK 0x00003FFFL
+#define DSCL0_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_EN_MASK 0x00008000L
+#define DSCL0_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_MASK 0x3FFF0000L
+#define DSCL0_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_EN_MASK 0x80000000L
+//DSCL0_SCL_MODE
+#define DSCL0_SCL_MODE__DSCL_MODE__SHIFT 0x0
+#define DSCL0_SCL_MODE__SCL_COEF_RAM_SELECT__SHIFT 0x8
+#define DSCL0_SCL_MODE__SCL_COEF_RAM_SELECT_CURRENT__SHIFT 0xc
+#define DSCL0_SCL_MODE__SCL_CHROMA_COEF_MODE__SHIFT 0x10
+#define DSCL0_SCL_MODE__SCL_ALPHA_COEF_MODE__SHIFT 0x14
+#define DSCL0_SCL_MODE__SCL_COEF_RAM_SELECT_RD__SHIFT 0x18
+#define DSCL0_SCL_MODE__DSCL_MODE_MASK 0x00000007L
+#define DSCL0_SCL_MODE__SCL_COEF_RAM_SELECT_MASK 0x00000100L
+#define DSCL0_SCL_MODE__SCL_COEF_RAM_SELECT_CURRENT_MASK 0x00001000L
+#define DSCL0_SCL_MODE__SCL_CHROMA_COEF_MODE_MASK 0x00010000L
+#define DSCL0_SCL_MODE__SCL_ALPHA_COEF_MODE_MASK 0x00100000L
+#define DSCL0_SCL_MODE__SCL_COEF_RAM_SELECT_RD_MASK 0x01000000L
+//DSCL0_SCL_TAP_CONTROL
+#define DSCL0_SCL_TAP_CONTROL__SCL_V_NUM_TAPS__SHIFT 0x0
+#define DSCL0_SCL_TAP_CONTROL__SCL_H_NUM_TAPS__SHIFT 0x4
+#define DSCL0_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_C__SHIFT 0x8
+#define DSCL0_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_C__SHIFT 0xc
+#define DSCL0_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_MASK 0x00000007L
+#define DSCL0_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_MASK 0x00000070L
+#define DSCL0_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_C_MASK 0x00000700L
+#define DSCL0_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_C_MASK 0x00007000L
+//DSCL0_DSCL_CONTROL
+#define DSCL0_DSCL_CONTROL__SCL_BOUNDARY_MODE__SHIFT 0x0
+#define DSCL0_DSCL_CONTROL__SCL_BOUNDARY_MODE_MASK 0x00000001L
+//DSCL0_DSCL_2TAP_CONTROL
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN__SHIFT 0x0
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_EN__SHIFT 0x4
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_FACTOR__SHIFT 0x8
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN__SHIFT 0x10
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_EN__SHIFT 0x14
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_FACTOR__SHIFT 0x18
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN_MASK 0x00000001L
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_EN_MASK 0x00000010L
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_FACTOR_MASK 0x00000700L
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN_MASK 0x00010000L
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_EN_MASK 0x00100000L
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_FACTOR_MASK 0x07000000L
+//DSCL0_SCL_MANUAL_REPLICATE_CONTROL
+#define DSCL0_SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR__SHIFT 0x0
+#define DSCL0_SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR__SHIFT 0x8
+#define DSCL0_SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR_MASK 0x0000000FL
+#define DSCL0_SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR_MASK 0x00000F00L
+//DSCL0_SCL_HORZ_FILTER_SCALE_RATIO
+#define DSCL0_SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO__SHIFT 0x0
+#define DSCL0_SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO_MASK 0x03FFFFFFL
+//DSCL0_SCL_HORZ_FILTER_INIT
+#define DSCL0_SCL_HORZ_FILTER_INIT__SCL_H_INIT_FRAC__SHIFT 0x0
+#define DSCL0_SCL_HORZ_FILTER_INIT__SCL_H_INIT_INT__SHIFT 0x18
+#define DSCL0_SCL_HORZ_FILTER_INIT__SCL_H_INIT_FRAC_MASK 0x00FFFFFFL
+#define DSCL0_SCL_HORZ_FILTER_INIT__SCL_H_INIT_INT_MASK 0x0F000000L
+//DSCL0_SCL_HORZ_FILTER_SCALE_RATIO_C
+#define DSCL0_SCL_HORZ_FILTER_SCALE_RATIO_C__SCL_H_SCALE_RATIO_C__SHIFT 0x0
+#define DSCL0_SCL_HORZ_FILTER_SCALE_RATIO_C__SCL_H_SCALE_RATIO_C_MASK 0x03FFFFFFL
+//DSCL0_SCL_HORZ_FILTER_INIT_C
+#define DSCL0_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_FRAC_C__SHIFT 0x0
+#define DSCL0_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_INT_C__SHIFT 0x18
+#define DSCL0_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_FRAC_C_MASK 0x00FFFFFFL
+#define DSCL0_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_INT_C_MASK 0x0F000000L
+//DSCL0_SCL_VERT_FILTER_SCALE_RATIO
+#define DSCL0_SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO__SHIFT 0x0
+#define DSCL0_SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO_MASK 0x03FFFFFFL
+//DSCL0_SCL_VERT_FILTER_INIT
+#define DSCL0_SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC__SHIFT 0x0
+#define DSCL0_SCL_VERT_FILTER_INIT__SCL_V_INIT_INT__SHIFT 0x18
+#define DSCL0_SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC_MASK 0x00FFFFFFL
+#define DSCL0_SCL_VERT_FILTER_INIT__SCL_V_INIT_INT_MASK 0x0F000000L
+//DSCL0_SCL_VERT_FILTER_INIT_BOT
+#define DSCL0_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT__SHIFT 0x0
+#define DSCL0_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT__SHIFT 0x18
+#define DSCL0_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT_MASK 0x00FFFFFFL
+#define DSCL0_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT_MASK 0x0F000000L
+//DSCL0_SCL_VERT_FILTER_SCALE_RATIO_C
+#define DSCL0_SCL_VERT_FILTER_SCALE_RATIO_C__SCL_V_SCALE_RATIO_C__SHIFT 0x0
+#define DSCL0_SCL_VERT_FILTER_SCALE_RATIO_C__SCL_V_SCALE_RATIO_C_MASK 0x03FFFFFFL
+//DSCL0_SCL_VERT_FILTER_INIT_C
+#define DSCL0_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_FRAC_C__SHIFT 0x0
+#define DSCL0_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_INT_C__SHIFT 0x18
+#define DSCL0_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_FRAC_C_MASK 0x00FFFFFFL
+#define DSCL0_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_INT_C_MASK 0x0F000000L
+//DSCL0_SCL_VERT_FILTER_INIT_BOT_C
+#define DSCL0_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_FRAC_BOT_C__SHIFT 0x0
+#define DSCL0_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_INT_BOT_C__SHIFT 0x18
+#define DSCL0_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_FRAC_BOT_C_MASK 0x00FFFFFFL
+#define DSCL0_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_INT_BOT_C_MASK 0x0F000000L
+//DSCL0_SCL_BLACK_OFFSET
+#define DSCL0_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_RGB_Y__SHIFT 0x0
+#define DSCL0_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_CBCR__SHIFT 0x10
+#define DSCL0_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_RGB_Y_MASK 0x0000FFFFL
+#define DSCL0_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_CBCR_MASK 0xFFFF0000L
+//DSCL0_DSCL_UPDATE
+#define DSCL0_DSCL_UPDATE__SCL_UPDATE_PENDING__SHIFT 0x0
+#define DSCL0_DSCL_UPDATE__SCL_UPDATE_PENDING_MASK 0x00000001L
+//DSCL0_DSCL_AUTOCAL
+#define DSCL0_DSCL_AUTOCAL__AUTOCAL_MODE__SHIFT 0x0
+#define DSCL0_DSCL_AUTOCAL__AUTOCAL_NUM_PIPE__SHIFT 0x8
+#define DSCL0_DSCL_AUTOCAL__AUTOCAL_PIPE_ID__SHIFT 0xc
+#define DSCL0_DSCL_AUTOCAL__AUTOCAL_MODE_MASK 0x00000003L
+#define DSCL0_DSCL_AUTOCAL__AUTOCAL_NUM_PIPE_MASK 0x00000300L
+#define DSCL0_DSCL_AUTOCAL__AUTOCAL_PIPE_ID_MASK 0x00003000L
+//DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT
+#define DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT__SHIFT 0x0
+#define DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT__SHIFT 0x10
+#define DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT_MASK 0x00001FFFL
+#define DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT_MASK 0x1FFF0000L
+//DSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM
+#define DSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM__SHIFT 0x0
+#define DSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP__SHIFT 0x10
+#define DSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM_MASK 0x00001FFFL
+#define DSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP_MASK 0x1FFF0000L
+//DSCL0_OTG_H_BLANK
+#define DSCL0_OTG_H_BLANK__OTG_H_BLANK_START__SHIFT 0x0
+#define DSCL0_OTG_H_BLANK__OTG_H_BLANK_END__SHIFT 0x10
+#define DSCL0_OTG_H_BLANK__OTG_H_BLANK_START_MASK 0x00003FFFL
+#define DSCL0_OTG_H_BLANK__OTG_H_BLANK_END_MASK 0x3FFF0000L
+//DSCL0_OTG_V_BLANK
+#define DSCL0_OTG_V_BLANK__OTG_V_BLANK_START__SHIFT 0x0
+#define DSCL0_OTG_V_BLANK__OTG_V_BLANK_END__SHIFT 0x10
+#define DSCL0_OTG_V_BLANK__OTG_V_BLANK_START_MASK 0x00003FFFL
+#define DSCL0_OTG_V_BLANK__OTG_V_BLANK_END_MASK 0x3FFF0000L
+//DSCL0_RECOUT_START
+#define DSCL0_RECOUT_START__RECOUT_START_X__SHIFT 0x0
+#define DSCL0_RECOUT_START__RECOUT_START_Y__SHIFT 0x10
+#define DSCL0_RECOUT_START__RECOUT_START_X_MASK 0x00001FFFL
+#define DSCL0_RECOUT_START__RECOUT_START_Y_MASK 0x1FFF0000L
+//DSCL0_RECOUT_SIZE
+#define DSCL0_RECOUT_SIZE__RECOUT_WIDTH__SHIFT 0x0
+#define DSCL0_RECOUT_SIZE__RECOUT_HEIGHT__SHIFT 0x10
+#define DSCL0_RECOUT_SIZE__RECOUT_WIDTH_MASK 0x00003FFFL
+#define DSCL0_RECOUT_SIZE__RECOUT_HEIGHT_MASK 0x3FFF0000L
+//DSCL0_MPC_SIZE
+#define DSCL0_MPC_SIZE__MPC_WIDTH__SHIFT 0x0
+#define DSCL0_MPC_SIZE__MPC_HEIGHT__SHIFT 0x10
+#define DSCL0_MPC_SIZE__MPC_WIDTH_MASK 0x00003FFFL
+#define DSCL0_MPC_SIZE__MPC_HEIGHT_MASK 0x3FFF0000L
+//DSCL0_LB_DATA_FORMAT
+#define DSCL0_LB_DATA_FORMAT__INTERLEAVE_EN__SHIFT 0x0
+#define DSCL0_LB_DATA_FORMAT__ALPHA_EN__SHIFT 0x4
+#define DSCL0_LB_DATA_FORMAT__INTERLEAVE_EN_MASK 0x00000001L
+#define DSCL0_LB_DATA_FORMAT__ALPHA_EN_MASK 0x00000010L
+//DSCL0_LB_MEMORY_CTRL
+#define DSCL0_LB_MEMORY_CTRL__MEMORY_CONFIG__SHIFT 0x0
+#define DSCL0_LB_MEMORY_CTRL__LB_MAX_PARTITIONS__SHIFT 0x8
+#define DSCL0_LB_MEMORY_CTRL__LB_NUM_PARTITIONS__SHIFT 0x10
+#define DSCL0_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_C__SHIFT 0x18
+#define DSCL0_LB_MEMORY_CTRL__MEMORY_CONFIG_MASK 0x00000003L
+#define DSCL0_LB_MEMORY_CTRL__LB_MAX_PARTITIONS_MASK 0x00003F00L
+#define DSCL0_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_MASK 0x007F0000L
+#define DSCL0_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_C_MASK 0x7F000000L
+//DSCL0_LB_V_COUNTER
+#define DSCL0_LB_V_COUNTER__V_COUNTER__SHIFT 0x0
+#define DSCL0_LB_V_COUNTER__V_COUNTER_C__SHIFT 0x10
+#define DSCL0_LB_V_COUNTER__V_COUNTER_MASK 0x00001FFFL
+#define DSCL0_LB_V_COUNTER__V_COUNTER_C_MASK 0x1FFF0000L
+//DSCL0_DSCL_MEM_PWR_CTRL
+#define DSCL0_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_FORCE__SHIFT 0x0
+#define DSCL0_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_DIS__SHIFT 0x2
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_FORCE__SHIFT 0x4
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_DIS__SHIFT 0x6
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_FORCE__SHIFT 0x8
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_DIS__SHIFT 0xa
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_FORCE__SHIFT 0xc
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_DIS__SHIFT 0xe
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_FORCE__SHIFT 0x10
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_DIS__SHIFT 0x12
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_FORCE__SHIFT 0x14
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_DIS__SHIFT 0x16
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_FORCE__SHIFT 0x18
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_DIS__SHIFT 0x1a
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_MEM_PWR_MODE__SHIFT 0x1c
+#define DSCL0_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_FORCE_MASK 0x00000003L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_DIS_MASK 0x00000004L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_FORCE_MASK 0x00000030L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_DIS_MASK 0x00000040L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_FORCE_MASK 0x00000300L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_DIS_MASK 0x00000400L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_FORCE_MASK 0x00003000L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_DIS_MASK 0x00004000L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_FORCE_MASK 0x00030000L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_DIS_MASK 0x00040000L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_FORCE_MASK 0x00300000L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_DIS_MASK 0x00400000L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_FORCE_MASK 0x03000000L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_DIS_MASK 0x04000000L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_MEM_PWR_MODE_MASK 0x10000000L
+//DSCL0_DSCL_MEM_PWR_STATUS
+#define DSCL0_DSCL_MEM_PWR_STATUS__LUT_MEM_PWR_STATE__SHIFT 0x0
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G1_MEM_PWR_STATE__SHIFT 0x2
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G2_MEM_PWR_STATE__SHIFT 0x4
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G3_MEM_PWR_STATE__SHIFT 0x6
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G4_MEM_PWR_STATE__SHIFT 0x8
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G5_MEM_PWR_STATE__SHIFT 0xa
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G6_MEM_PWR_STATE__SHIFT 0xc
+#define DSCL0_DSCL_MEM_PWR_STATUS__LUT_MEM_PWR_STATE_MASK 0x00000003L
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G1_MEM_PWR_STATE_MASK 0x0000000CL
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G2_MEM_PWR_STATE_MASK 0x00000030L
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G3_MEM_PWR_STATE_MASK 0x000000C0L
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G4_MEM_PWR_STATE_MASK 0x00000300L
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G5_MEM_PWR_STATE_MASK 0x00000C00L
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G6_MEM_PWR_STATE_MASK 0x00003000L
+//DSCL0_OBUF_CONTROL
+#define DSCL0_OBUF_CONTROL__OBUF_BYPASS__SHIFT 0x0
+#define DSCL0_OBUF_CONTROL__OBUF_USE_FULL_BUFFER__SHIFT 0x4
+#define DSCL0_OBUF_CONTROL__OBUF_IS_HALF_RECOUT_WIDTH__SHIFT 0xc
+#define DSCL0_OBUF_CONTROL__OBUF_OUT_HOLD_CNT__SHIFT 0x1c
+#define DSCL0_OBUF_CONTROL__OBUF_BYPASS_MASK 0x00000001L
+#define DSCL0_OBUF_CONTROL__OBUF_USE_FULL_BUFFER_MASK 0x00000010L
+#define DSCL0_OBUF_CONTROL__OBUF_IS_HALF_RECOUT_WIDTH_MASK 0x00001000L
+#define DSCL0_OBUF_CONTROL__OBUF_OUT_HOLD_CNT_MASK 0xF0000000L
+//DSCL0_OBUF_MEM_PWR_CTRL
+#define DSCL0_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_FORCE__SHIFT 0x0
+#define DSCL0_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_DIS__SHIFT 0x2
+#define DSCL0_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_MODE__SHIFT 0x8
+#define DSCL0_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_STATE__SHIFT 0x10
+#define DSCL0_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_FORCE_MASK 0x00000003L
+#define DSCL0_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_DIS_MASK 0x00000004L
+#define DSCL0_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_MODE_MASK 0x00000100L
+#define DSCL0_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_STATE_MASK 0x00030000L
+
+
+// addressBlock: dce_dc_dpp0_dispdec_cm_dispdec
+//CM0_CM_CONTROL
+#define CM0_CM_CONTROL__CM_BYPASS__SHIFT 0x0
+#define CM0_CM_CONTROL__CM_UPDATE_PENDING__SHIFT 0x8
+#define CM0_CM_CONTROL__CM_BYPASS_MASK 0x00000001L
+#define CM0_CM_CONTROL__CM_UPDATE_PENDING_MASK 0x00000100L
+//CM0_CM_ICSC_CONTROL
+#define CM0_CM_ICSC_CONTROL__CM_ICSC_MODE__SHIFT 0x0
+#define CM0_CM_ICSC_CONTROL__CM_ICSC_MODE_MASK 0x00000003L
+//CM0_CM_ICSC_C11_C12
+#define CM0_CM_ICSC_C11_C12__CM_ICSC_C11__SHIFT 0x0
+#define CM0_CM_ICSC_C11_C12__CM_ICSC_C12__SHIFT 0x10
+#define CM0_CM_ICSC_C11_C12__CM_ICSC_C11_MASK 0x0000FFFFL
+#define CM0_CM_ICSC_C11_C12__CM_ICSC_C12_MASK 0xFFFF0000L
+//CM0_CM_ICSC_C13_C14
+#define CM0_CM_ICSC_C13_C14__CM_ICSC_C13__SHIFT 0x0
+#define CM0_CM_ICSC_C13_C14__CM_ICSC_C14__SHIFT 0x10
+#define CM0_CM_ICSC_C13_C14__CM_ICSC_C13_MASK 0x0000FFFFL
+#define CM0_CM_ICSC_C13_C14__CM_ICSC_C14_MASK 0xFFFF0000L
+//CM0_CM_ICSC_C21_C22
+#define CM0_CM_ICSC_C21_C22__CM_ICSC_C21__SHIFT 0x0
+#define CM0_CM_ICSC_C21_C22__CM_ICSC_C22__SHIFT 0x10
+#define CM0_CM_ICSC_C21_C22__CM_ICSC_C21_MASK 0x0000FFFFL
+#define CM0_CM_ICSC_C21_C22__CM_ICSC_C22_MASK 0xFFFF0000L
+//CM0_CM_ICSC_C23_C24
+#define CM0_CM_ICSC_C23_C24__CM_ICSC_C23__SHIFT 0x0
+#define CM0_CM_ICSC_C23_C24__CM_ICSC_C24__SHIFT 0x10
+#define CM0_CM_ICSC_C23_C24__CM_ICSC_C23_MASK 0x0000FFFFL
+#define CM0_CM_ICSC_C23_C24__CM_ICSC_C24_MASK 0xFFFF0000L
+//CM0_CM_ICSC_C31_C32
+#define CM0_CM_ICSC_C31_C32__CM_ICSC_C31__SHIFT 0x0
+#define CM0_CM_ICSC_C31_C32__CM_ICSC_C32__SHIFT 0x10
+#define CM0_CM_ICSC_C31_C32__CM_ICSC_C31_MASK 0x0000FFFFL
+#define CM0_CM_ICSC_C31_C32__CM_ICSC_C32_MASK 0xFFFF0000L
+//CM0_CM_ICSC_C33_C34
+#define CM0_CM_ICSC_C33_C34__CM_ICSC_C33__SHIFT 0x0
+#define CM0_CM_ICSC_C33_C34__CM_ICSC_C34__SHIFT 0x10
+#define CM0_CM_ICSC_C33_C34__CM_ICSC_C33_MASK 0x0000FFFFL
+#define CM0_CM_ICSC_C33_C34__CM_ICSC_C34_MASK 0xFFFF0000L
+//CM0_CM_ICSC_B_C11_C12
+#define CM0_CM_ICSC_B_C11_C12__CM_ICSC_B_C11__SHIFT 0x0
+#define CM0_CM_ICSC_B_C11_C12__CM_ICSC_B_C12__SHIFT 0x10
+#define CM0_CM_ICSC_B_C11_C12__CM_ICSC_B_C11_MASK 0x0000FFFFL
+#define CM0_CM_ICSC_B_C11_C12__CM_ICSC_B_C12_MASK 0xFFFF0000L
+//CM0_CM_ICSC_B_C13_C14
+#define CM0_CM_ICSC_B_C13_C14__CM_ICSC_B_C13__SHIFT 0x0
+#define CM0_CM_ICSC_B_C13_C14__CM_ICSC_B_C14__SHIFT 0x10
+#define CM0_CM_ICSC_B_C13_C14__CM_ICSC_B_C13_MASK 0x0000FFFFL
+#define CM0_CM_ICSC_B_C13_C14__CM_ICSC_B_C14_MASK 0xFFFF0000L
+//CM0_CM_ICSC_B_C21_C22
+#define CM0_CM_ICSC_B_C21_C22__CM_ICSC_B_C21__SHIFT 0x0
+#define CM0_CM_ICSC_B_C21_C22__CM_ICSC_B_C22__SHIFT 0x10
+#define CM0_CM_ICSC_B_C21_C22__CM_ICSC_B_C21_MASK 0x0000FFFFL
+#define CM0_CM_ICSC_B_C21_C22__CM_ICSC_B_C22_MASK 0xFFFF0000L
+//CM0_CM_ICSC_B_C23_C24
+#define CM0_CM_ICSC_B_C23_C24__CM_ICSC_B_C23__SHIFT 0x0
+#define CM0_CM_ICSC_B_C23_C24__CM_ICSC_B_C24__SHIFT 0x10
+#define CM0_CM_ICSC_B_C23_C24__CM_ICSC_B_C23_MASK 0x0000FFFFL
+#define CM0_CM_ICSC_B_C23_C24__CM_ICSC_B_C24_MASK 0xFFFF0000L
+//CM0_CM_ICSC_B_C31_C32
+#define CM0_CM_ICSC_B_C31_C32__CM_ICSC_B_C31__SHIFT 0x0
+#define CM0_CM_ICSC_B_C31_C32__CM_ICSC_B_C32__SHIFT 0x10
+#define CM0_CM_ICSC_B_C31_C32__CM_ICSC_B_C31_MASK 0x0000FFFFL
+#define CM0_CM_ICSC_B_C31_C32__CM_ICSC_B_C32_MASK 0xFFFF0000L
+//CM0_CM_ICSC_B_C33_C34
+#define CM0_CM_ICSC_B_C33_C34__CM_ICSC_B_C33__SHIFT 0x0
+#define CM0_CM_ICSC_B_C33_C34__CM_ICSC_B_C34__SHIFT 0x10
+#define CM0_CM_ICSC_B_C33_C34__CM_ICSC_B_C33_MASK 0x0000FFFFL
+#define CM0_CM_ICSC_B_C33_C34__CM_ICSC_B_C34_MASK 0xFFFF0000L
+//CM0_CM_GAMUT_REMAP_CONTROL
+#define CM0_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE_MASK 0x00000003L
+//CM0_CM_GAMUT_REMAP_C11_C12
+#define CM0_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C11__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C12__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C11_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C12_MASK 0xFFFF0000L
+//CM0_CM_GAMUT_REMAP_C13_C14
+#define CM0_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C13__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C14__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C13_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C14_MASK 0xFFFF0000L
+//CM0_CM_GAMUT_REMAP_C21_C22
+#define CM0_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C21__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C22__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C21_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C22_MASK 0xFFFF0000L
+//CM0_CM_GAMUT_REMAP_C23_C24
+#define CM0_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C23__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C24__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C23_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C24_MASK 0xFFFF0000L
+//CM0_CM_GAMUT_REMAP_C31_C32
+#define CM0_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C31__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C32__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C31_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C32_MASK 0xFFFF0000L
+//CM0_CM_GAMUT_REMAP_C33_C34
+#define CM0_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C33__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C34__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C33_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C34_MASK 0xFFFF0000L
+//CM0_CM_GAMUT_REMAP_B_C11_C12
+#define CM0_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C11__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C12__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C11_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C12_MASK 0xFFFF0000L
+//CM0_CM_GAMUT_REMAP_B_C13_C14
+#define CM0_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C13__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C14__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C13_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C14_MASK 0xFFFF0000L
+//CM0_CM_GAMUT_REMAP_B_C21_C22
+#define CM0_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C21__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C22__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C21_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C22_MASK 0xFFFF0000L
+//CM0_CM_GAMUT_REMAP_B_C23_C24
+#define CM0_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C23__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C24__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C23_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C24_MASK 0xFFFF0000L
+//CM0_CM_GAMUT_REMAP_B_C31_C32
+#define CM0_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C31__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C32__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C31_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C32_MASK 0xFFFF0000L
+//CM0_CM_GAMUT_REMAP_B_C33_C34
+#define CM0_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C33__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C34__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C33_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C34_MASK 0xFFFF0000L
+//CM0_CM_BIAS_CR_R
+#define CM0_CM_BIAS_CR_R__CM_BIAS_CR_R__SHIFT 0x0
+#define CM0_CM_BIAS_CR_R__CM_BIAS_CR_R_MASK 0x0000FFFFL
+//CM0_CM_BIAS_Y_G_CB_B
+#define CM0_CM_BIAS_Y_G_CB_B__CM_BIAS_Y_G__SHIFT 0x0
+#define CM0_CM_BIAS_Y_G_CB_B__CM_BIAS_CB_B__SHIFT 0x10
+#define CM0_CM_BIAS_Y_G_CB_B__CM_BIAS_Y_G_MASK 0x0000FFFFL
+#define CM0_CM_BIAS_Y_G_CB_B__CM_BIAS_CB_B_MASK 0xFFFF0000L
+//CM0_CM_DGAM_CONTROL
+#define CM0_CM_DGAM_CONTROL__CM_DGAM_LUT_MODE__SHIFT 0x0
+#define CM0_CM_DGAM_CONTROL__CM_DGAM_LUT_MODE_MASK 0x00000007L
+//CM0_CM_DGAM_LUT_INDEX
+#define CM0_CM_DGAM_LUT_INDEX__CM_DGAM_LUT_INDEX__SHIFT 0x0
+#define CM0_CM_DGAM_LUT_INDEX__CM_DGAM_LUT_INDEX_MASK 0x000001FFL
+//CM0_CM_DGAM_LUT_DATA
+#define CM0_CM_DGAM_LUT_DATA__CM_DGAM_LUT_DATA__SHIFT 0x0
+#define CM0_CM_DGAM_LUT_DATA__CM_DGAM_LUT_DATA_MASK 0x0007FFFFL
+//CM0_CM_DGAM_LUT_WRITE_EN_MASK
+#define CM0_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define CM0_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_SEL__SHIFT 0x4
+#define CM0_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_CONFIG_STATUS__SHIFT 0x8
+#define CM0_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_WRITE_LUT_BASE_ONLY__SHIFT 0xc
+#define CM0_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define CM0_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_SEL_MASK 0x00000010L
+#define CM0_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_CONFIG_STATUS_MASK 0x00000700L
+#define CM0_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_WRITE_LUT_BASE_ONLY_MASK 0x00001000L
+//CM0_CM_DGAM_RAMA_START_CNTL_B
+#define CM0_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define CM0_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM0_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM0_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM0_CM_DGAM_RAMA_START_CNTL_G
+#define CM0_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define CM0_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM0_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM0_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM0_CM_DGAM_RAMA_START_CNTL_R
+#define CM0_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define CM0_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM0_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM0_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM0_CM_DGAM_RAMA_SLOPE_CNTL_B
+#define CM0_CM_DGAM_RAMA_SLOPE_CNTL_B__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define CM0_CM_DGAM_RAMA_SLOPE_CNTL_B__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//CM0_CM_DGAM_RAMA_SLOPE_CNTL_G
+#define CM0_CM_DGAM_RAMA_SLOPE_CNTL_G__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define CM0_CM_DGAM_RAMA_SLOPE_CNTL_G__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//CM0_CM_DGAM_RAMA_SLOPE_CNTL_R
+#define CM0_CM_DGAM_RAMA_SLOPE_CNTL_R__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define CM0_CM_DGAM_RAMA_SLOPE_CNTL_R__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//CM0_CM_DGAM_RAMA_END_CNTL1_B
+#define CM0_CM_DGAM_RAMA_END_CNTL1_B__CM_DGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define CM0_CM_DGAM_RAMA_END_CNTL1_B__CM_DGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+//CM0_CM_DGAM_RAMA_END_CNTL2_B
+#define CM0_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define CM0_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM0_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define CM0_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//CM0_CM_DGAM_RAMA_END_CNTL1_G
+#define CM0_CM_DGAM_RAMA_END_CNTL1_G__CM_DGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define CM0_CM_DGAM_RAMA_END_CNTL1_G__CM_DGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+//CM0_CM_DGAM_RAMA_END_CNTL2_G
+#define CM0_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define CM0_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM0_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define CM0_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//CM0_CM_DGAM_RAMA_END_CNTL1_R
+#define CM0_CM_DGAM_RAMA_END_CNTL1_R__CM_DGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define CM0_CM_DGAM_RAMA_END_CNTL1_R__CM_DGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+//CM0_CM_DGAM_RAMA_END_CNTL2_R
+#define CM0_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define CM0_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM0_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define CM0_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//CM0_CM_DGAM_RAMA_REGION_0_1
+#define CM0_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_DGAM_RAMA_REGION_2_3
+#define CM0_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_DGAM_RAMA_REGION_4_5
+#define CM0_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_DGAM_RAMA_REGION_6_7
+#define CM0_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_DGAM_RAMA_REGION_8_9
+#define CM0_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_DGAM_RAMA_REGION_10_11
+#define CM0_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_DGAM_RAMA_REGION_12_13
+#define CM0_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_DGAM_RAMA_REGION_14_15
+#define CM0_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_DGAM_RAMB_START_CNTL_B
+#define CM0_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define CM0_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM0_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM0_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM0_CM_DGAM_RAMB_START_CNTL_G
+#define CM0_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define CM0_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM0_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM0_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM0_CM_DGAM_RAMB_START_CNTL_R
+#define CM0_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define CM0_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM0_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM0_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM0_CM_DGAM_RAMB_SLOPE_CNTL_B
+#define CM0_CM_DGAM_RAMB_SLOPE_CNTL_B__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define CM0_CM_DGAM_RAMB_SLOPE_CNTL_B__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//CM0_CM_DGAM_RAMB_SLOPE_CNTL_G
+#define CM0_CM_DGAM_RAMB_SLOPE_CNTL_G__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define CM0_CM_DGAM_RAMB_SLOPE_CNTL_G__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//CM0_CM_DGAM_RAMB_SLOPE_CNTL_R
+#define CM0_CM_DGAM_RAMB_SLOPE_CNTL_R__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define CM0_CM_DGAM_RAMB_SLOPE_CNTL_R__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//CM0_CM_DGAM_RAMB_END_CNTL1_B
+#define CM0_CM_DGAM_RAMB_END_CNTL1_B__CM_DGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define CM0_CM_DGAM_RAMB_END_CNTL1_B__CM_DGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+//CM0_CM_DGAM_RAMB_END_CNTL2_B
+#define CM0_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define CM0_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM0_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define CM0_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//CM0_CM_DGAM_RAMB_END_CNTL1_G
+#define CM0_CM_DGAM_RAMB_END_CNTL1_G__CM_DGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define CM0_CM_DGAM_RAMB_END_CNTL1_G__CM_DGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+//CM0_CM_DGAM_RAMB_END_CNTL2_G
+#define CM0_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define CM0_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM0_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define CM0_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//CM0_CM_DGAM_RAMB_END_CNTL1_R
+#define CM0_CM_DGAM_RAMB_END_CNTL1_R__CM_DGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define CM0_CM_DGAM_RAMB_END_CNTL1_R__CM_DGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+//CM0_CM_DGAM_RAMB_END_CNTL2_R
+#define CM0_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define CM0_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM0_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define CM0_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//CM0_CM_DGAM_RAMB_REGION_0_1
+#define CM0_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_DGAM_RAMB_REGION_2_3
+#define CM0_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_DGAM_RAMB_REGION_4_5
+#define CM0_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_DGAM_RAMB_REGION_6_7
+#define CM0_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_DGAM_RAMB_REGION_8_9
+#define CM0_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_DGAM_RAMB_REGION_10_11
+#define CM0_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_DGAM_RAMB_REGION_12_13
+#define CM0_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_DGAM_RAMB_REGION_14_15
+#define CM0_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_CONTROL
+#define CM0_CM_BLNDGAM_CONTROL__CM_BLNDGAM_LUT_MODE__SHIFT 0x0
+#define CM0_CM_BLNDGAM_CONTROL__CM_BLNDGAM_LUT_MODE_MASK 0x00000003L
+//CM0_CM_BLNDGAM_LUT_INDEX
+#define CM0_CM_BLNDGAM_LUT_INDEX__CM_BLNDGAM_LUT_INDEX__SHIFT 0x0
+#define CM0_CM_BLNDGAM_LUT_INDEX__CM_BLNDGAM_LUT_INDEX_MASK 0x000001FFL
+//CM0_CM_BLNDGAM_LUT_DATA
+#define CM0_CM_BLNDGAM_LUT_DATA__CM_BLNDGAM_LUT_DATA__SHIFT 0x0
+#define CM0_CM_BLNDGAM_LUT_DATA__CM_BLNDGAM_LUT_DATA_MASK 0x0007FFFFL
+//CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK
+#define CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_SEL__SHIFT 0x4
+#define CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_CONFIG_STATUS__SHIFT 0x8
+#define CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_SEL_MASK 0x00000010L
+#define CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_CONFIG_STATUS_MASK 0x00000300L
+//CM0_CM_BLNDGAM_RAMA_START_CNTL_B
+#define CM0_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM0_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM0_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM0_CM_BLNDGAM_RAMA_START_CNTL_G
+#define CM0_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM0_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM0_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM0_CM_BLNDGAM_RAMA_START_CNTL_R
+#define CM0_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM0_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM0_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_B
+#define CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_G
+#define CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_R
+#define CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//CM0_CM_BLNDGAM_RAMA_END_CNTL1_B
+#define CM0_CM_BLNDGAM_RAMA_END_CNTL1_B__CM_BLNDGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_END_CNTL1_B__CM_BLNDGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+//CM0_CM_BLNDGAM_RAMA_END_CNTL2_B
+#define CM0_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define CM0_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//CM0_CM_BLNDGAM_RAMA_END_CNTL1_G
+#define CM0_CM_BLNDGAM_RAMA_END_CNTL1_G__CM_BLNDGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_END_CNTL1_G__CM_BLNDGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+//CM0_CM_BLNDGAM_RAMA_END_CNTL2_G
+#define CM0_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define CM0_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//CM0_CM_BLNDGAM_RAMA_END_CNTL1_R
+#define CM0_CM_BLNDGAM_RAMA_END_CNTL1_R__CM_BLNDGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_END_CNTL1_R__CM_BLNDGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+//CM0_CM_BLNDGAM_RAMA_END_CNTL2_R
+#define CM0_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define CM0_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//CM0_CM_BLNDGAM_RAMA_REGION_0_1
+#define CM0_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMA_REGION_2_3
+#define CM0_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMA_REGION_4_5
+#define CM0_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMA_REGION_6_7
+#define CM0_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMA_REGION_8_9
+#define CM0_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMA_REGION_10_11
+#define CM0_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMA_REGION_12_13
+#define CM0_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMA_REGION_14_15
+#define CM0_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMA_REGION_16_17
+#define CM0_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMA_REGION_18_19
+#define CM0_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMA_REGION_20_21
+#define CM0_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMA_REGION_22_23
+#define CM0_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMA_REGION_24_25
+#define CM0_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMA_REGION_26_27
+#define CM0_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMA_REGION_28_29
+#define CM0_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMA_REGION_30_31
+#define CM0_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMA_REGION_32_33
+#define CM0_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMB_START_CNTL_B
+#define CM0_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM0_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM0_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM0_CM_BLNDGAM_RAMB_START_CNTL_G
+#define CM0_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM0_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM0_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM0_CM_BLNDGAM_RAMB_START_CNTL_R
+#define CM0_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM0_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM0_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_B
+#define CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_G
+#define CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_R
+#define CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//CM0_CM_BLNDGAM_RAMB_END_CNTL1_B
+#define CM0_CM_BLNDGAM_RAMB_END_CNTL1_B__CM_BLNDGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_END_CNTL1_B__CM_BLNDGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+//CM0_CM_BLNDGAM_RAMB_END_CNTL2_B
+#define CM0_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define CM0_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//CM0_CM_BLNDGAM_RAMB_END_CNTL1_G
+#define CM0_CM_BLNDGAM_RAMB_END_CNTL1_G__CM_BLNDGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_END_CNTL1_G__CM_BLNDGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+//CM0_CM_BLNDGAM_RAMB_END_CNTL2_G
+#define CM0_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define CM0_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//CM0_CM_BLNDGAM_RAMB_END_CNTL1_R
+#define CM0_CM_BLNDGAM_RAMB_END_CNTL1_R__CM_BLNDGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_END_CNTL1_R__CM_BLNDGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+//CM0_CM_BLNDGAM_RAMB_END_CNTL2_R
+#define CM0_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define CM0_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//CM0_CM_BLNDGAM_RAMB_REGION_0_1
+#define CM0_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMB_REGION_2_3
+#define CM0_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMB_REGION_4_5
+#define CM0_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMB_REGION_6_7
+#define CM0_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMB_REGION_8_9
+#define CM0_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMB_REGION_10_11
+#define CM0_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMB_REGION_12_13
+#define CM0_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMB_REGION_14_15
+#define CM0_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMB_REGION_16_17
+#define CM0_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMB_REGION_18_19
+#define CM0_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMB_REGION_20_21
+#define CM0_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMB_REGION_22_23
+#define CM0_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMB_REGION_24_25
+#define CM0_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMB_REGION_26_27
+#define CM0_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMB_REGION_28_29
+#define CM0_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMB_REGION_30_31
+#define CM0_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMB_REGION_32_33
+#define CM0_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_HDR_MULT_COEF
+#define CM0_CM_HDR_MULT_COEF__CM_HDR_MULT_COEF__SHIFT 0x0
+#define CM0_CM_HDR_MULT_COEF__CM_HDR_MULT_COEF_MASK 0x0007FFFFL
+//CM0_CM_MEM_PWR_CTRL
+#define CM0_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_FORCE__SHIFT 0x0
+#define CM0_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_DIS__SHIFT 0x2
+#define CM0_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_FORCE__SHIFT 0x4
+#define CM0_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_DIS__SHIFT 0x6
+#define CM0_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_FORCE_MASK 0x00000003L
+#define CM0_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_DIS_MASK 0x00000004L
+#define CM0_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_FORCE_MASK 0x00000030L
+#define CM0_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_DIS_MASK 0x00000040L
+//CM0_CM_MEM_PWR_STATUS
+#define CM0_CM_MEM_PWR_STATUS__SHARED_MEM_PWR_STATE__SHIFT 0x0
+#define CM0_CM_MEM_PWR_STATUS__BLNDGAM_MEM_PWR_STATE__SHIFT 0x2
+#define CM0_CM_MEM_PWR_STATUS__SHARED_MEM_PWR_STATE_MASK 0x00000003L
+#define CM0_CM_MEM_PWR_STATUS__BLNDGAM_MEM_PWR_STATE_MASK 0x0000000CL
+//CM0_CM_DEALPHA
+#define CM0_CM_DEALPHA__CM_DEALPHA_EN__SHIFT 0x0
+#define CM0_CM_DEALPHA__CM_DEALPHA_EN_MASK 0x00000001L
+//CM0_CM_COEF_FORMAT
+#define CM0_CM_COEF_FORMAT__CM_BIAS_FORMAT__SHIFT 0x0
+#define CM0_CM_COEF_FORMAT__CM_ICSC_COEF_FORMAT__SHIFT 0x4
+#define CM0_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT__SHIFT 0x8
+#define CM0_CM_COEF_FORMAT__CM_BIAS_FORMAT_MASK 0x00000001L
+#define CM0_CM_COEF_FORMAT__CM_ICSC_COEF_FORMAT_MASK 0x00000010L
+#define CM0_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT_MASK 0x00000100L
+//CM0_CM_SHAPER_CONTROL
+#define CM0_CM_SHAPER_CONTROL__CM_SHAPER_LUT_MODE__SHIFT 0x0
+#define CM0_CM_SHAPER_CONTROL__CM_SHAPER_LUT_MODE_MASK 0x00000003L
+//CM0_CM_SHAPER_OFFSET_R
+#define CM0_CM_SHAPER_OFFSET_R__CM_SHAPER_OFFSET_R__SHIFT 0x0
+#define CM0_CM_SHAPER_OFFSET_R__CM_SHAPER_OFFSET_R_MASK 0x0007FFFFL
+//CM0_CM_SHAPER_OFFSET_G
+#define CM0_CM_SHAPER_OFFSET_G__CM_SHAPER_OFFSET_G__SHIFT 0x0
+#define CM0_CM_SHAPER_OFFSET_G__CM_SHAPER_OFFSET_G_MASK 0x0007FFFFL
+//CM0_CM_SHAPER_OFFSET_B
+#define CM0_CM_SHAPER_OFFSET_B__CM_SHAPER_OFFSET_B__SHIFT 0x0
+#define CM0_CM_SHAPER_OFFSET_B__CM_SHAPER_OFFSET_B_MASK 0x0007FFFFL
+//CM0_CM_SHAPER_SCALE_R
+#define CM0_CM_SHAPER_SCALE_R__CM_SHAPER_SCALE_R__SHIFT 0x0
+#define CM0_CM_SHAPER_SCALE_R__CM_SHAPER_SCALE_R_MASK 0x0000FFFFL
+//CM0_CM_SHAPER_SCALE_G_B
+#define CM0_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_G__SHIFT 0x0
+#define CM0_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_B__SHIFT 0x10
+#define CM0_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_G_MASK 0x0000FFFFL
+#define CM0_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_B_MASK 0xFFFF0000L
+//CM0_CM_SHAPER_LUT_INDEX
+#define CM0_CM_SHAPER_LUT_INDEX__CM_SHAPER_LUT_INDEX__SHIFT 0x0
+#define CM0_CM_SHAPER_LUT_INDEX__CM_SHAPER_LUT_INDEX_MASK 0x000000FFL
+//CM0_CM_SHAPER_LUT_DATA
+#define CM0_CM_SHAPER_LUT_DATA__CM_SHAPER_LUT_DATA__SHIFT 0x0
+#define CM0_CM_SHAPER_LUT_DATA__CM_SHAPER_LUT_DATA_MASK 0x00FFFFFFL
+//CM0_CM_SHAPER_LUT_WRITE_EN_MASK
+#define CM0_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define CM0_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_SEL__SHIFT 0x4
+#define CM0_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_CONFIG_STATUS__SHIFT 0x8
+#define CM0_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define CM0_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_SEL_MASK 0x00000010L
+#define CM0_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_CONFIG_STATUS_MASK 0x00000300L
+//CM0_CM_SHAPER_RAMA_START_CNTL_B
+#define CM0_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM0_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM0_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM0_CM_SHAPER_RAMA_START_CNTL_G
+#define CM0_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM0_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM0_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM0_CM_SHAPER_RAMA_START_CNTL_R
+#define CM0_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM0_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM0_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM0_CM_SHAPER_RAMA_END_CNTL_B
+#define CM0_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define CM0_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_BASE_B_MASK 0x3FFF0000L
+//CM0_CM_SHAPER_RAMA_END_CNTL_G
+#define CM0_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define CM0_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_BASE_G_MASK 0x3FFF0000L
+//CM0_CM_SHAPER_RAMA_END_CNTL_R
+#define CM0_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define CM0_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_BASE_R_MASK 0x3FFF0000L
+//CM0_CM_SHAPER_RAMA_REGION_0_1
+#define CM0_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMA_REGION_2_3
+#define CM0_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMA_REGION_4_5
+#define CM0_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMA_REGION_6_7
+#define CM0_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMA_REGION_8_9
+#define CM0_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMA_REGION_10_11
+#define CM0_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMA_REGION_12_13
+#define CM0_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMA_REGION_14_15
+#define CM0_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMA_REGION_16_17
+#define CM0_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMA_REGION_18_19
+#define CM0_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMA_REGION_20_21
+#define CM0_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMA_REGION_22_23
+#define CM0_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMA_REGION_24_25
+#define CM0_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMA_REGION_26_27
+#define CM0_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMA_REGION_28_29
+#define CM0_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMA_REGION_30_31
+#define CM0_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMA_REGION_32_33
+#define CM0_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMB_START_CNTL_B
+#define CM0_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM0_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM0_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM0_CM_SHAPER_RAMB_START_CNTL_G
+#define CM0_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM0_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM0_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM0_CM_SHAPER_RAMB_START_CNTL_R
+#define CM0_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM0_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM0_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM0_CM_SHAPER_RAMB_END_CNTL_B
+#define CM0_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define CM0_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_BASE_B_MASK 0x3FFF0000L
+//CM0_CM_SHAPER_RAMB_END_CNTL_G
+#define CM0_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define CM0_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_BASE_G_MASK 0x3FFF0000L
+//CM0_CM_SHAPER_RAMB_END_CNTL_R
+#define CM0_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define CM0_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_BASE_R_MASK 0x3FFF0000L
+//CM0_CM_SHAPER_RAMB_REGION_0_1
+#define CM0_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMB_REGION_2_3
+#define CM0_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMB_REGION_4_5
+#define CM0_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMB_REGION_6_7
+#define CM0_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMB_REGION_8_9
+#define CM0_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMB_REGION_10_11
+#define CM0_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMB_REGION_12_13
+#define CM0_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMB_REGION_14_15
+#define CM0_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMB_REGION_16_17
+#define CM0_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMB_REGION_18_19
+#define CM0_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMB_REGION_20_21
+#define CM0_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMB_REGION_22_23
+#define CM0_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMB_REGION_24_25
+#define CM0_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMB_REGION_26_27
+#define CM0_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMB_REGION_28_29
+#define CM0_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMB_REGION_30_31
+#define CM0_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMB_REGION_32_33
+#define CM0_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_MEM_PWR_CTRL2
+#define CM0_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_FORCE__SHIFT 0x8
+#define CM0_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_DIS__SHIFT 0xa
+#define CM0_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_FORCE__SHIFT 0xc
+#define CM0_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_DIS__SHIFT 0xe
+#define CM0_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_FORCE_MASK 0x00000300L
+#define CM0_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_DIS_MASK 0x00000400L
+#define CM0_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_FORCE_MASK 0x00003000L
+#define CM0_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_DIS_MASK 0x00004000L
+//CM0_CM_MEM_PWR_STATUS2
+#define CM0_CM_MEM_PWR_STATUS2__SHAPER_MEM_PWR_STATE__SHIFT 0x4
+#define CM0_CM_MEM_PWR_STATUS2__HDR3DLUT_MEM_PWR_STATE__SHIFT 0x6
+#define CM0_CM_MEM_PWR_STATUS2__SHAPER_MEM_PWR_STATE_MASK 0x00000030L
+#define CM0_CM_MEM_PWR_STATUS2__HDR3DLUT_MEM_PWR_STATE_MASK 0x000000C0L
+//CM0_CM_3DLUT_MODE
+#define CM0_CM_3DLUT_MODE__CM_3DLUT_MODE__SHIFT 0x0
+#define CM0_CM_3DLUT_MODE__CM_3DLUT_SIZE__SHIFT 0x4
+#define CM0_CM_3DLUT_MODE__CM_3DLUT_MODE_MASK 0x00000003L
+#define CM0_CM_3DLUT_MODE__CM_3DLUT_SIZE_MASK 0x00000010L
+//CM0_CM_3DLUT_INDEX
+#define CM0_CM_3DLUT_INDEX__CM_3DLUT_INDEX__SHIFT 0x0
+#define CM0_CM_3DLUT_INDEX__CM_3DLUT_INDEX_MASK 0x000007FFL
+//CM0_CM_3DLUT_DATA
+#define CM0_CM_3DLUT_DATA__CM_3DLUT_DATA0__SHIFT 0x0
+#define CM0_CM_3DLUT_DATA__CM_3DLUT_DATA1__SHIFT 0x10
+#define CM0_CM_3DLUT_DATA__CM_3DLUT_DATA0_MASK 0x0000FFFFL
+#define CM0_CM_3DLUT_DATA__CM_3DLUT_DATA1_MASK 0xFFFF0000L
+//CM0_CM_3DLUT_DATA_30BIT
+#define CM0_CM_3DLUT_DATA_30BIT__CM_3DLUT_DATA_30BIT__SHIFT 0x2
+#define CM0_CM_3DLUT_DATA_30BIT__CM_3DLUT_DATA_30BIT_MASK 0xFFFFFFFCL
+//CM0_CM_3DLUT_READ_WRITE_CONTROL
+#define CM0_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_WRITE_EN_MASK__SHIFT 0x0
+#define CM0_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_RAM_SEL__SHIFT 0x4
+#define CM0_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_30BIT_EN__SHIFT 0x8
+#define CM0_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_CONFIG_STATUS__SHIFT 0xc
+#define CM0_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_READ_SEL__SHIFT 0x10
+#define CM0_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_WRITE_EN_MASK_MASK 0x0000000FL
+#define CM0_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_RAM_SEL_MASK 0x00000010L
+#define CM0_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_30BIT_EN_MASK 0x00000100L
+#define CM0_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_CONFIG_STATUS_MASK 0x00003000L
+#define CM0_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_READ_SEL_MASK 0x00030000L
+//CM0_CM_3DLUT_OUT_NORM_FACTOR
+#define CM0_CM_3DLUT_OUT_NORM_FACTOR__CM_3DLUT_OUT_NORM_FACTOR__SHIFT 0x0
+#define CM0_CM_3DLUT_OUT_NORM_FACTOR__CM_3DLUT_OUT_NORM_FACTOR_MASK 0x0000FFFFL
+//CM0_CM_3DLUT_OUT_OFFSET_R
+#define CM0_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_OFFSET_R__SHIFT 0x0
+#define CM0_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_SCALE_R__SHIFT 0x10
+#define CM0_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_OFFSET_R_MASK 0x0000FFFFL
+#define CM0_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_SCALE_R_MASK 0xFFFF0000L
+//CM0_CM_3DLUT_OUT_OFFSET_G
+#define CM0_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_OFFSET_G__SHIFT 0x0
+#define CM0_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_SCALE_G__SHIFT 0x10
+#define CM0_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_OFFSET_G_MASK 0x0000FFFFL
+#define CM0_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_SCALE_G_MASK 0xFFFF0000L
+//CM0_CM_3DLUT_OUT_OFFSET_B
+#define CM0_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_OFFSET_B__SHIFT 0x0
+#define CM0_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_SCALE_B__SHIFT 0x10
+#define CM0_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_OFFSET_B_MASK 0x0000FFFFL
+#define CM0_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_SCALE_B_MASK 0xFFFF0000L
+//CM0_CM_TEST_DEBUG_INDEX
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX__SHIFT 0x0
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX_MASK 0x000000FFL
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+//CM0_CM_TEST_DEBUG_DATA
+#define CM0_CM_TEST_DEBUG_DATA__CM_TEST_DEBUG_DATA__SHIFT 0x0
+#define CM0_CM_TEST_DEBUG_DATA__CM_TEST_DEBUG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dpp0_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON11_PERFCOUNTER_CNTL
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON11_PERFCOUNTER_CNTL2
+#define DC_PERFMON11_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON11_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON11_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON11_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON11_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON11_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON11_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON11_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON11_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON11_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON11_PERFCOUNTER_STATE
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON11_PERFMON_CNTL
+#define DC_PERFMON11_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON11_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON11_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON11_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON11_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON11_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON11_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON11_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON11_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON11_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON11_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON11_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON11_PERFMON_CNTL2
+#define DC_PERFMON11_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON11_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON11_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON11_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON11_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON11_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON11_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON11_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON11_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON11_PERFMON_CVALUE_LOW
+#define DC_PERFMON11_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON11_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON11_PERFMON_HI
+#define DC_PERFMON11_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON11_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON11_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON11_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON11_PERFMON_LOW
+#define DC_PERFMON11_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON11_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dpp1_dispdec_dpp_top_dispdec
+//DPP_TOP1_DPP_CONTROL
+#define DPP_TOP1_DPP_CONTROL__DPP_CLOCK_ENABLE__SHIFT 0x4
+#define DPP_TOP1_DPP_CONTROL__DPPCLK_G_GATE_DISABLE__SHIFT 0x8
+#define DPP_TOP1_DPP_CONTROL__DPPCLK_G_DYN_GATE_DISABLE__SHIFT 0xa
+#define DPP_TOP1_DPP_CONTROL__DPPCLK_G_DSCL_GATE_DISABLE__SHIFT 0xc
+#define DPP_TOP1_DPP_CONTROL__DPPCLK_G_DSCL_ALPHA_GATE_DISABLE__SHIFT 0xe
+#define DPP_TOP1_DPP_CONTROL__DPPCLK_R_GATE_DISABLE__SHIFT 0x10
+#define DPP_TOP1_DPP_CONTROL__DISPCLK_R_GATE_DISABLE__SHIFT 0x12
+#define DPP_TOP1_DPP_CONTROL__DISPCLK_G_GATE_DISABLE__SHIFT 0x14
+#define DPP_TOP1_DPP_CONTROL__DPP_TEST_CLK_SEL__SHIFT 0x1c
+#define DPP_TOP1_DPP_CONTROL__DPP_CLOCK_ENABLE_MASK 0x00000010L
+#define DPP_TOP1_DPP_CONTROL__DPPCLK_G_GATE_DISABLE_MASK 0x00000100L
+#define DPP_TOP1_DPP_CONTROL__DPPCLK_G_DYN_GATE_DISABLE_MASK 0x00000400L
+#define DPP_TOP1_DPP_CONTROL__DPPCLK_G_DSCL_GATE_DISABLE_MASK 0x00001000L
+#define DPP_TOP1_DPP_CONTROL__DPPCLK_G_DSCL_ALPHA_GATE_DISABLE_MASK 0x00004000L
+#define DPP_TOP1_DPP_CONTROL__DPPCLK_R_GATE_DISABLE_MASK 0x00010000L
+#define DPP_TOP1_DPP_CONTROL__DISPCLK_R_GATE_DISABLE_MASK 0x00040000L
+#define DPP_TOP1_DPP_CONTROL__DISPCLK_G_GATE_DISABLE_MASK 0x00100000L
+#define DPP_TOP1_DPP_CONTROL__DPP_TEST_CLK_SEL_MASK 0xF0000000L
+//DPP_TOP1_DPP_SOFT_RESET
+#define DPP_TOP1_DPP_SOFT_RESET__CNVC_SOFT_RESET__SHIFT 0x0
+#define DPP_TOP1_DPP_SOFT_RESET__DSCL_SOFT_RESET__SHIFT 0x4
+#define DPP_TOP1_DPP_SOFT_RESET__CM_SOFT_RESET__SHIFT 0x8
+#define DPP_TOP1_DPP_SOFT_RESET__OBUF_SOFT_RESET__SHIFT 0xc
+#define DPP_TOP1_DPP_SOFT_RESET__CNVC_SOFT_RESET_MASK 0x00000001L
+#define DPP_TOP1_DPP_SOFT_RESET__DSCL_SOFT_RESET_MASK 0x00000010L
+#define DPP_TOP1_DPP_SOFT_RESET__CM_SOFT_RESET_MASK 0x00000100L
+#define DPP_TOP1_DPP_SOFT_RESET__OBUF_SOFT_RESET_MASK 0x00001000L
+//DPP_TOP1_DPP_CRC_VAL_R_G
+#define DPP_TOP1_DPP_CRC_VAL_R_G__DPP_CRC_R_CR__SHIFT 0x0
+#define DPP_TOP1_DPP_CRC_VAL_R_G__DPP_CRC_G_Y__SHIFT 0x10
+#define DPP_TOP1_DPP_CRC_VAL_R_G__DPP_CRC_R_CR_MASK 0x0000FFFFL
+#define DPP_TOP1_DPP_CRC_VAL_R_G__DPP_CRC_G_Y_MASK 0xFFFF0000L
+//DPP_TOP1_DPP_CRC_VAL_B_A
+#define DPP_TOP1_DPP_CRC_VAL_B_A__DPP_CRC_B_CB__SHIFT 0x0
+#define DPP_TOP1_DPP_CRC_VAL_B_A__DPP_CRC_ALPHA__SHIFT 0x10
+#define DPP_TOP1_DPP_CRC_VAL_B_A__DPP_CRC_B_CB_MASK 0x0000FFFFL
+#define DPP_TOP1_DPP_CRC_VAL_B_A__DPP_CRC_ALPHA_MASK 0xFFFF0000L
+//DPP_TOP1_DPP_CRC_CTRL
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_EN__SHIFT 0x0
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_CONT_EN__SHIFT 0x1
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_ONE_SHOT_PENDING__SHIFT 0x2
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_420_COMP_SEL__SHIFT 0x3
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_SRC_SEL__SHIFT 0x4
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_CURSOR_BITS_SEL__SHIFT 0x6
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_STEREO_EN__SHIFT 0x7
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_STEREO_MODE__SHIFT 0x8
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_INTERLACE_MODE__SHIFT 0xa
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_PIX_FORMAT_SEL__SHIFT 0xc
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_CURSOR_FORMAT_SEL__SHIFT 0xf
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_MASK__SHIFT 0x10
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_EN_MASK 0x00000001L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_CONT_EN_MASK 0x00000002L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_ONE_SHOT_PENDING_MASK 0x00000004L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_420_COMP_SEL_MASK 0x00000008L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_SRC_SEL_MASK 0x00000030L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_CURSOR_BITS_SEL_MASK 0x00000040L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_STEREO_EN_MASK 0x00000080L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_STEREO_MODE_MASK 0x00000300L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_INTERLACE_MODE_MASK 0x00000C00L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_PIX_FORMAT_SEL_MASK 0x00007000L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_CURSOR_FORMAT_SEL_MASK 0x00008000L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_MASK_MASK 0xFFFF0000L
+//DPP_TOP1_HOST_READ_CONTROL
+#define DPP_TOP1_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL__SHIFT 0x0
+#define DPP_TOP1_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL_MASK 0x000000FFL
+
+
+// addressBlock: dce_dc_dpp1_dispdec_cnvc_cfg_dispdec
+//CNVC_CFG1_CNVC_SURFACE_PIXEL_FORMAT
+#define CNVC_CFG1_CNVC_SURFACE_PIXEL_FORMAT__CNVC_SURFACE_PIXEL_FORMAT__SHIFT 0x0
+#define CNVC_CFG1_CNVC_SURFACE_PIXEL_FORMAT__CNVC_SURFACE_PIXEL_FORMAT_MASK 0x0000007FL
+//CNVC_CFG1_FORMAT_CONTROL
+#define CNVC_CFG1_FORMAT_CONTROL__FORMAT_EXPANSION_MODE__SHIFT 0x0
+#define CNVC_CFG1_FORMAT_CONTROL__FORMAT_CNV16__SHIFT 0x4
+#define CNVC_CFG1_FORMAT_CONTROL__ALPHA_EN__SHIFT 0x8
+#define CNVC_CFG1_FORMAT_CONTROL__CNVC_BYPASS__SHIFT 0xc
+#define CNVC_CFG1_FORMAT_CONTROL__CNVC_BYPASS_MSB_ALIGN__SHIFT 0xd
+#define CNVC_CFG1_FORMAT_CONTROL__CLAMP_POSITIVE__SHIFT 0x10
+#define CNVC_CFG1_FORMAT_CONTROL__CLAMP_POSITIVE_C__SHIFT 0x11
+#define CNVC_CFG1_FORMAT_CONTROL__CNVC_UPDATE_PENDING__SHIFT 0x14
+#define CNVC_CFG1_FORMAT_CONTROL__FORMAT_EXPANSION_MODE_MASK 0x00000001L
+#define CNVC_CFG1_FORMAT_CONTROL__FORMAT_CNV16_MASK 0x00000010L
+#define CNVC_CFG1_FORMAT_CONTROL__ALPHA_EN_MASK 0x00000100L
+#define CNVC_CFG1_FORMAT_CONTROL__CNVC_BYPASS_MASK 0x00001000L
+#define CNVC_CFG1_FORMAT_CONTROL__CNVC_BYPASS_MSB_ALIGN_MASK 0x00002000L
+#define CNVC_CFG1_FORMAT_CONTROL__CLAMP_POSITIVE_MASK 0x00010000L
+#define CNVC_CFG1_FORMAT_CONTROL__CLAMP_POSITIVE_C_MASK 0x00020000L
+#define CNVC_CFG1_FORMAT_CONTROL__CNVC_UPDATE_PENDING_MASK 0x00100000L
+//CNVC_CFG1_FCNV_FP_BIAS_R
+#define CNVC_CFG1_FCNV_FP_BIAS_R__FCNV_FP_BIAS_R__SHIFT 0x0
+#define CNVC_CFG1_FCNV_FP_BIAS_R__FCNV_FP_BIAS_R_MASK 0x0007FFFFL
+//CNVC_CFG1_FCNV_FP_BIAS_G
+#define CNVC_CFG1_FCNV_FP_BIAS_G__FCNV_FP_BIAS_G__SHIFT 0x0
+#define CNVC_CFG1_FCNV_FP_BIAS_G__FCNV_FP_BIAS_G_MASK 0x0007FFFFL
+//CNVC_CFG1_FCNV_FP_BIAS_B
+#define CNVC_CFG1_FCNV_FP_BIAS_B__FCNV_FP_BIAS_B__SHIFT 0x0
+#define CNVC_CFG1_FCNV_FP_BIAS_B__FCNV_FP_BIAS_B_MASK 0x0007FFFFL
+//CNVC_CFG1_FCNV_FP_SCALE_R
+#define CNVC_CFG1_FCNV_FP_SCALE_R__FCNV_FP_SCALE_R__SHIFT 0x0
+#define CNVC_CFG1_FCNV_FP_SCALE_R__FCNV_FP_SCALE_R_MASK 0x0007FFFFL
+//CNVC_CFG1_FCNV_FP_SCALE_G
+#define CNVC_CFG1_FCNV_FP_SCALE_G__FCNV_FP_SCALE_G__SHIFT 0x0
+#define CNVC_CFG1_FCNV_FP_SCALE_G__FCNV_FP_SCALE_G_MASK 0x0007FFFFL
+//CNVC_CFG1_FCNV_FP_SCALE_B
+#define CNVC_CFG1_FCNV_FP_SCALE_B__FCNV_FP_SCALE_B__SHIFT 0x0
+#define CNVC_CFG1_FCNV_FP_SCALE_B__FCNV_FP_SCALE_B_MASK 0x0007FFFFL
+//CNVC_CFG1_COLOR_KEYER_CONTROL
+#define CNVC_CFG1_COLOR_KEYER_CONTROL__COLOR_KEYER_EN__SHIFT 0x0
+#define CNVC_CFG1_COLOR_KEYER_CONTROL__COLOR_KEYER_MODE__SHIFT 0x4
+#define CNVC_CFG1_COLOR_KEYER_CONTROL__COLOR_KEYER_EN_MASK 0x00000001L
+#define CNVC_CFG1_COLOR_KEYER_CONTROL__COLOR_KEYER_MODE_MASK 0x00000030L
+//CNVC_CFG1_COLOR_KEYER_ALPHA
+#define CNVC_CFG1_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_LOW__SHIFT 0x0
+#define CNVC_CFG1_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_HIGH__SHIFT 0x10
+#define CNVC_CFG1_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG1_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG1_COLOR_KEYER_RED
+#define CNVC_CFG1_COLOR_KEYER_RED__COLOR_KEYER_RED_LOW__SHIFT 0x0
+#define CNVC_CFG1_COLOR_KEYER_RED__COLOR_KEYER_RED_HIGH__SHIFT 0x10
+#define CNVC_CFG1_COLOR_KEYER_RED__COLOR_KEYER_RED_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG1_COLOR_KEYER_RED__COLOR_KEYER_RED_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG1_COLOR_KEYER_GREEN
+#define CNVC_CFG1_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_LOW__SHIFT 0x0
+#define CNVC_CFG1_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_HIGH__SHIFT 0x10
+#define CNVC_CFG1_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG1_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG1_COLOR_KEYER_BLUE
+#define CNVC_CFG1_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_LOW__SHIFT 0x0
+#define CNVC_CFG1_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_HIGH__SHIFT 0x10
+#define CNVC_CFG1_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG1_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG1_ALPHA_2BIT_LUT
+#define CNVC_CFG1_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT0__SHIFT 0x0
+#define CNVC_CFG1_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT1__SHIFT 0x8
+#define CNVC_CFG1_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT2__SHIFT 0x10
+#define CNVC_CFG1_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT3__SHIFT 0x18
+#define CNVC_CFG1_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT0_MASK 0x000000FFL
+#define CNVC_CFG1_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT1_MASK 0x0000FF00L
+#define CNVC_CFG1_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT2_MASK 0x00FF0000L
+#define CNVC_CFG1_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT3_MASK 0xFF000000L
+
+
+// addressBlock: dce_dc_dpp1_dispdec_cnvc_cur_dispdec
+//CNVC_CUR1_CURSOR0_CONTROL
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_ENABLE__SHIFT 0x0
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_EXPANSION_MODE__SHIFT 0x1
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_PIX_INV_MODE__SHIFT 0x2
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_ROM_EN__SHIFT 0x3
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_MODE__SHIFT 0x4
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_PIXEL_ALPHA_MOD_EN__SHIFT 0x7
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_UPDATE_PENDING__SHIFT 0x10
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_ENABLE_MASK 0x00000001L
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_EXPANSION_MODE_MASK 0x00000002L
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_PIX_INV_MODE_MASK 0x00000004L
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_ROM_EN_MASK 0x00000008L
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_MODE_MASK 0x00000070L
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_PIXEL_ALPHA_MOD_EN_MASK 0x00000080L
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_UPDATE_PENDING_MASK 0x00010000L
+//CNVC_CUR1_CURSOR0_COLOR0
+#define CNVC_CUR1_CURSOR0_COLOR0__CUR0_COLOR0__SHIFT 0x0
+#define CNVC_CUR1_CURSOR0_COLOR0__CUR0_COLOR0_MASK 0x00FFFFFFL
+//CNVC_CUR1_CURSOR0_COLOR1
+#define CNVC_CUR1_CURSOR0_COLOR1__CUR0_COLOR1__SHIFT 0x0
+#define CNVC_CUR1_CURSOR0_COLOR1__CUR0_COLOR1_MASK 0x00FFFFFFL
+//CNVC_CUR1_CURSOR0_FP_SCALE_BIAS
+#define CNVC_CUR1_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE__SHIFT 0x0
+#define CNVC_CUR1_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS__SHIFT 0x10
+#define CNVC_CUR1_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE_MASK 0x0000FFFFL
+#define CNVC_CUR1_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS_MASK 0xFFFF0000L
+
+
+// addressBlock: dce_dc_dpp1_dispdec_dscl_dispdec
+//DSCL1_SCL_COEF_RAM_TAP_SELECT
+#define DSCL1_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_TAP_PAIR_IDX__SHIFT 0x0
+#define DSCL1_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_PHASE__SHIFT 0x8
+#define DSCL1_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_FILTER_TYPE__SHIFT 0x10
+#define DSCL1_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_TAP_PAIR_IDX_MASK 0x00000003L
+#define DSCL1_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_PHASE_MASK 0x00003F00L
+#define DSCL1_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_FILTER_TYPE_MASK 0x00070000L
+//DSCL1_SCL_COEF_RAM_TAP_DATA
+#define DSCL1_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF__SHIFT 0x0
+#define DSCL1_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_EN__SHIFT 0xf
+#define DSCL1_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF__SHIFT 0x10
+#define DSCL1_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_EN__SHIFT 0x1f
+#define DSCL1_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_MASK 0x00003FFFL
+#define DSCL1_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_EN_MASK 0x00008000L
+#define DSCL1_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_MASK 0x3FFF0000L
+#define DSCL1_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_EN_MASK 0x80000000L
+//DSCL1_SCL_MODE
+#define DSCL1_SCL_MODE__DSCL_MODE__SHIFT 0x0
+#define DSCL1_SCL_MODE__SCL_COEF_RAM_SELECT__SHIFT 0x8
+#define DSCL1_SCL_MODE__SCL_COEF_RAM_SELECT_CURRENT__SHIFT 0xc
+#define DSCL1_SCL_MODE__SCL_CHROMA_COEF_MODE__SHIFT 0x10
+#define DSCL1_SCL_MODE__SCL_ALPHA_COEF_MODE__SHIFT 0x14
+#define DSCL1_SCL_MODE__SCL_COEF_RAM_SELECT_RD__SHIFT 0x18
+#define DSCL1_SCL_MODE__DSCL_MODE_MASK 0x00000007L
+#define DSCL1_SCL_MODE__SCL_COEF_RAM_SELECT_MASK 0x00000100L
+#define DSCL1_SCL_MODE__SCL_COEF_RAM_SELECT_CURRENT_MASK 0x00001000L
+#define DSCL1_SCL_MODE__SCL_CHROMA_COEF_MODE_MASK 0x00010000L
+#define DSCL1_SCL_MODE__SCL_ALPHA_COEF_MODE_MASK 0x00100000L
+#define DSCL1_SCL_MODE__SCL_COEF_RAM_SELECT_RD_MASK 0x01000000L
+//DSCL1_SCL_TAP_CONTROL
+#define DSCL1_SCL_TAP_CONTROL__SCL_V_NUM_TAPS__SHIFT 0x0
+#define DSCL1_SCL_TAP_CONTROL__SCL_H_NUM_TAPS__SHIFT 0x4
+#define DSCL1_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_C__SHIFT 0x8
+#define DSCL1_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_C__SHIFT 0xc
+#define DSCL1_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_MASK 0x00000007L
+#define DSCL1_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_MASK 0x00000070L
+#define DSCL1_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_C_MASK 0x00000700L
+#define DSCL1_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_C_MASK 0x00007000L
+//DSCL1_DSCL_CONTROL
+#define DSCL1_DSCL_CONTROL__SCL_BOUNDARY_MODE__SHIFT 0x0
+#define DSCL1_DSCL_CONTROL__SCL_BOUNDARY_MODE_MASK 0x00000001L
+//DSCL1_DSCL_2TAP_CONTROL
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN__SHIFT 0x0
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_EN__SHIFT 0x4
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_FACTOR__SHIFT 0x8
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN__SHIFT 0x10
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_EN__SHIFT 0x14
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_FACTOR__SHIFT 0x18
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN_MASK 0x00000001L
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_EN_MASK 0x00000010L
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_FACTOR_MASK 0x00000700L
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN_MASK 0x00010000L
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_EN_MASK 0x00100000L
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_FACTOR_MASK 0x07000000L
+//DSCL1_SCL_MANUAL_REPLICATE_CONTROL
+#define DSCL1_SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR__SHIFT 0x0
+#define DSCL1_SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR__SHIFT 0x8
+#define DSCL1_SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR_MASK 0x0000000FL
+#define DSCL1_SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR_MASK 0x00000F00L
+//DSCL1_SCL_HORZ_FILTER_SCALE_RATIO
+#define DSCL1_SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO__SHIFT 0x0
+#define DSCL1_SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO_MASK 0x03FFFFFFL
+//DSCL1_SCL_HORZ_FILTER_INIT
+#define DSCL1_SCL_HORZ_FILTER_INIT__SCL_H_INIT_FRAC__SHIFT 0x0
+#define DSCL1_SCL_HORZ_FILTER_INIT__SCL_H_INIT_INT__SHIFT 0x18
+#define DSCL1_SCL_HORZ_FILTER_INIT__SCL_H_INIT_FRAC_MASK 0x00FFFFFFL
+#define DSCL1_SCL_HORZ_FILTER_INIT__SCL_H_INIT_INT_MASK 0x0F000000L
+//DSCL1_SCL_HORZ_FILTER_SCALE_RATIO_C
+#define DSCL1_SCL_HORZ_FILTER_SCALE_RATIO_C__SCL_H_SCALE_RATIO_C__SHIFT 0x0
+#define DSCL1_SCL_HORZ_FILTER_SCALE_RATIO_C__SCL_H_SCALE_RATIO_C_MASK 0x03FFFFFFL
+//DSCL1_SCL_HORZ_FILTER_INIT_C
+#define DSCL1_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_FRAC_C__SHIFT 0x0
+#define DSCL1_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_INT_C__SHIFT 0x18
+#define DSCL1_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_FRAC_C_MASK 0x00FFFFFFL
+#define DSCL1_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_INT_C_MASK 0x0F000000L
+//DSCL1_SCL_VERT_FILTER_SCALE_RATIO
+#define DSCL1_SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO__SHIFT 0x0
+#define DSCL1_SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO_MASK 0x03FFFFFFL
+//DSCL1_SCL_VERT_FILTER_INIT
+#define DSCL1_SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC__SHIFT 0x0
+#define DSCL1_SCL_VERT_FILTER_INIT__SCL_V_INIT_INT__SHIFT 0x18
+#define DSCL1_SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC_MASK 0x00FFFFFFL
+#define DSCL1_SCL_VERT_FILTER_INIT__SCL_V_INIT_INT_MASK 0x0F000000L
+//DSCL1_SCL_VERT_FILTER_INIT_BOT
+#define DSCL1_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT__SHIFT 0x0
+#define DSCL1_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT__SHIFT 0x18
+#define DSCL1_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT_MASK 0x00FFFFFFL
+#define DSCL1_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT_MASK 0x0F000000L
+//DSCL1_SCL_VERT_FILTER_SCALE_RATIO_C
+#define DSCL1_SCL_VERT_FILTER_SCALE_RATIO_C__SCL_V_SCALE_RATIO_C__SHIFT 0x0
+#define DSCL1_SCL_VERT_FILTER_SCALE_RATIO_C__SCL_V_SCALE_RATIO_C_MASK 0x03FFFFFFL
+//DSCL1_SCL_VERT_FILTER_INIT_C
+#define DSCL1_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_FRAC_C__SHIFT 0x0
+#define DSCL1_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_INT_C__SHIFT 0x18
+#define DSCL1_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_FRAC_C_MASK 0x00FFFFFFL
+#define DSCL1_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_INT_C_MASK 0x0F000000L
+//DSCL1_SCL_VERT_FILTER_INIT_BOT_C
+#define DSCL1_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_FRAC_BOT_C__SHIFT 0x0
+#define DSCL1_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_INT_BOT_C__SHIFT 0x18
+#define DSCL1_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_FRAC_BOT_C_MASK 0x00FFFFFFL
+#define DSCL1_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_INT_BOT_C_MASK 0x0F000000L
+//DSCL1_SCL_BLACK_OFFSET
+#define DSCL1_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_RGB_Y__SHIFT 0x0
+#define DSCL1_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_CBCR__SHIFT 0x10
+#define DSCL1_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_RGB_Y_MASK 0x0000FFFFL
+#define DSCL1_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_CBCR_MASK 0xFFFF0000L
+//DSCL1_DSCL_UPDATE
+#define DSCL1_DSCL_UPDATE__SCL_UPDATE_PENDING__SHIFT 0x0
+#define DSCL1_DSCL_UPDATE__SCL_UPDATE_PENDING_MASK 0x00000001L
+//DSCL1_DSCL_AUTOCAL
+#define DSCL1_DSCL_AUTOCAL__AUTOCAL_MODE__SHIFT 0x0
+#define DSCL1_DSCL_AUTOCAL__AUTOCAL_NUM_PIPE__SHIFT 0x8
+#define DSCL1_DSCL_AUTOCAL__AUTOCAL_PIPE_ID__SHIFT 0xc
+#define DSCL1_DSCL_AUTOCAL__AUTOCAL_MODE_MASK 0x00000003L
+#define DSCL1_DSCL_AUTOCAL__AUTOCAL_NUM_PIPE_MASK 0x00000300L
+#define DSCL1_DSCL_AUTOCAL__AUTOCAL_PIPE_ID_MASK 0x00003000L
+//DSCL1_DSCL_EXT_OVERSCAN_LEFT_RIGHT
+#define DSCL1_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT__SHIFT 0x0
+#define DSCL1_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT__SHIFT 0x10
+#define DSCL1_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT_MASK 0x00001FFFL
+#define DSCL1_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT_MASK 0x1FFF0000L
+//DSCL1_DSCL_EXT_OVERSCAN_TOP_BOTTOM
+#define DSCL1_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM__SHIFT 0x0
+#define DSCL1_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP__SHIFT 0x10
+#define DSCL1_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM_MASK 0x00001FFFL
+#define DSCL1_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP_MASK 0x1FFF0000L
+//DSCL1_OTG_H_BLANK
+#define DSCL1_OTG_H_BLANK__OTG_H_BLANK_START__SHIFT 0x0
+#define DSCL1_OTG_H_BLANK__OTG_H_BLANK_END__SHIFT 0x10
+#define DSCL1_OTG_H_BLANK__OTG_H_BLANK_START_MASK 0x00003FFFL
+#define DSCL1_OTG_H_BLANK__OTG_H_BLANK_END_MASK 0x3FFF0000L
+//DSCL1_OTG_V_BLANK
+#define DSCL1_OTG_V_BLANK__OTG_V_BLANK_START__SHIFT 0x0
+#define DSCL1_OTG_V_BLANK__OTG_V_BLANK_END__SHIFT 0x10
+#define DSCL1_OTG_V_BLANK__OTG_V_BLANK_START_MASK 0x00003FFFL
+#define DSCL1_OTG_V_BLANK__OTG_V_BLANK_END_MASK 0x3FFF0000L
+//DSCL1_RECOUT_START
+#define DSCL1_RECOUT_START__RECOUT_START_X__SHIFT 0x0
+#define DSCL1_RECOUT_START__RECOUT_START_Y__SHIFT 0x10
+#define DSCL1_RECOUT_START__RECOUT_START_X_MASK 0x00001FFFL
+#define DSCL1_RECOUT_START__RECOUT_START_Y_MASK 0x1FFF0000L
+//DSCL1_RECOUT_SIZE
+#define DSCL1_RECOUT_SIZE__RECOUT_WIDTH__SHIFT 0x0
+#define DSCL1_RECOUT_SIZE__RECOUT_HEIGHT__SHIFT 0x10
+#define DSCL1_RECOUT_SIZE__RECOUT_WIDTH_MASK 0x00003FFFL
+#define DSCL1_RECOUT_SIZE__RECOUT_HEIGHT_MASK 0x3FFF0000L
+//DSCL1_MPC_SIZE
+#define DSCL1_MPC_SIZE__MPC_WIDTH__SHIFT 0x0
+#define DSCL1_MPC_SIZE__MPC_HEIGHT__SHIFT 0x10
+#define DSCL1_MPC_SIZE__MPC_WIDTH_MASK 0x00003FFFL
+#define DSCL1_MPC_SIZE__MPC_HEIGHT_MASK 0x3FFF0000L
+//DSCL1_LB_DATA_FORMAT
+#define DSCL1_LB_DATA_FORMAT__INTERLEAVE_EN__SHIFT 0x0
+#define DSCL1_LB_DATA_FORMAT__ALPHA_EN__SHIFT 0x4
+#define DSCL1_LB_DATA_FORMAT__INTERLEAVE_EN_MASK 0x00000001L
+#define DSCL1_LB_DATA_FORMAT__ALPHA_EN_MASK 0x00000010L
+//DSCL1_LB_MEMORY_CTRL
+#define DSCL1_LB_MEMORY_CTRL__MEMORY_CONFIG__SHIFT 0x0
+#define DSCL1_LB_MEMORY_CTRL__LB_MAX_PARTITIONS__SHIFT 0x8
+#define DSCL1_LB_MEMORY_CTRL__LB_NUM_PARTITIONS__SHIFT 0x10
+#define DSCL1_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_C__SHIFT 0x18
+#define DSCL1_LB_MEMORY_CTRL__MEMORY_CONFIG_MASK 0x00000003L
+#define DSCL1_LB_MEMORY_CTRL__LB_MAX_PARTITIONS_MASK 0x00003F00L
+#define DSCL1_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_MASK 0x007F0000L
+#define DSCL1_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_C_MASK 0x7F000000L
+//DSCL1_LB_V_COUNTER
+#define DSCL1_LB_V_COUNTER__V_COUNTER__SHIFT 0x0
+#define DSCL1_LB_V_COUNTER__V_COUNTER_C__SHIFT 0x10
+#define DSCL1_LB_V_COUNTER__V_COUNTER_MASK 0x00001FFFL
+#define DSCL1_LB_V_COUNTER__V_COUNTER_C_MASK 0x1FFF0000L
+//DSCL1_DSCL_MEM_PWR_CTRL
+#define DSCL1_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_FORCE__SHIFT 0x0
+#define DSCL1_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_DIS__SHIFT 0x2
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_FORCE__SHIFT 0x4
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_DIS__SHIFT 0x6
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_FORCE__SHIFT 0x8
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_DIS__SHIFT 0xa
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_FORCE__SHIFT 0xc
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_DIS__SHIFT 0xe
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_FORCE__SHIFT 0x10
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_DIS__SHIFT 0x12
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_FORCE__SHIFT 0x14
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_DIS__SHIFT 0x16
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_FORCE__SHIFT 0x18
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_DIS__SHIFT 0x1a
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_MEM_PWR_MODE__SHIFT 0x1c
+#define DSCL1_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_FORCE_MASK 0x00000003L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_DIS_MASK 0x00000004L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_FORCE_MASK 0x00000030L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_DIS_MASK 0x00000040L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_FORCE_MASK 0x00000300L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_DIS_MASK 0x00000400L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_FORCE_MASK 0x00003000L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_DIS_MASK 0x00004000L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_FORCE_MASK 0x00030000L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_DIS_MASK 0x00040000L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_FORCE_MASK 0x00300000L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_DIS_MASK 0x00400000L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_FORCE_MASK 0x03000000L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_DIS_MASK 0x04000000L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_MEM_PWR_MODE_MASK 0x10000000L
+//DSCL1_DSCL_MEM_PWR_STATUS
+#define DSCL1_DSCL_MEM_PWR_STATUS__LUT_MEM_PWR_STATE__SHIFT 0x0
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G1_MEM_PWR_STATE__SHIFT 0x2
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G2_MEM_PWR_STATE__SHIFT 0x4
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G3_MEM_PWR_STATE__SHIFT 0x6
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G4_MEM_PWR_STATE__SHIFT 0x8
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G5_MEM_PWR_STATE__SHIFT 0xa
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G6_MEM_PWR_STATE__SHIFT 0xc
+#define DSCL1_DSCL_MEM_PWR_STATUS__LUT_MEM_PWR_STATE_MASK 0x00000003L
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G1_MEM_PWR_STATE_MASK 0x0000000CL
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G2_MEM_PWR_STATE_MASK 0x00000030L
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G3_MEM_PWR_STATE_MASK 0x000000C0L
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G4_MEM_PWR_STATE_MASK 0x00000300L
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G5_MEM_PWR_STATE_MASK 0x00000C00L
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G6_MEM_PWR_STATE_MASK 0x00003000L
+//DSCL1_OBUF_CONTROL
+#define DSCL1_OBUF_CONTROL__OBUF_BYPASS__SHIFT 0x0
+#define DSCL1_OBUF_CONTROL__OBUF_USE_FULL_BUFFER__SHIFT 0x4
+#define DSCL1_OBUF_CONTROL__OBUF_IS_HALF_RECOUT_WIDTH__SHIFT 0xc
+#define DSCL1_OBUF_CONTROL__OBUF_OUT_HOLD_CNT__SHIFT 0x1c
+#define DSCL1_OBUF_CONTROL__OBUF_BYPASS_MASK 0x00000001L
+#define DSCL1_OBUF_CONTROL__OBUF_USE_FULL_BUFFER_MASK 0x00000010L
+#define DSCL1_OBUF_CONTROL__OBUF_IS_HALF_RECOUT_WIDTH_MASK 0x00001000L
+#define DSCL1_OBUF_CONTROL__OBUF_OUT_HOLD_CNT_MASK 0xF0000000L
+//DSCL1_OBUF_MEM_PWR_CTRL
+#define DSCL1_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_FORCE__SHIFT 0x0
+#define DSCL1_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_DIS__SHIFT 0x2
+#define DSCL1_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_MODE__SHIFT 0x8
+#define DSCL1_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_STATE__SHIFT 0x10
+#define DSCL1_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_FORCE_MASK 0x00000003L
+#define DSCL1_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_DIS_MASK 0x00000004L
+#define DSCL1_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_MODE_MASK 0x00000100L
+#define DSCL1_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_STATE_MASK 0x00030000L
+
+
+// addressBlock: dce_dc_dpp1_dispdec_cm_dispdec
+//CM1_CM_CONTROL
+#define CM1_CM_CONTROL__CM_BYPASS__SHIFT 0x0
+#define CM1_CM_CONTROL__CM_UPDATE_PENDING__SHIFT 0x8
+#define CM1_CM_CONTROL__CM_BYPASS_MASK 0x00000001L
+#define CM1_CM_CONTROL__CM_UPDATE_PENDING_MASK 0x00000100L
+//CM1_CM_ICSC_CONTROL
+#define CM1_CM_ICSC_CONTROL__CM_ICSC_MODE__SHIFT 0x0
+#define CM1_CM_ICSC_CONTROL__CM_ICSC_MODE_MASK 0x00000003L
+//CM1_CM_ICSC_C11_C12
+#define CM1_CM_ICSC_C11_C12__CM_ICSC_C11__SHIFT 0x0
+#define CM1_CM_ICSC_C11_C12__CM_ICSC_C12__SHIFT 0x10
+#define CM1_CM_ICSC_C11_C12__CM_ICSC_C11_MASK 0x0000FFFFL
+#define CM1_CM_ICSC_C11_C12__CM_ICSC_C12_MASK 0xFFFF0000L
+//CM1_CM_ICSC_C13_C14
+#define CM1_CM_ICSC_C13_C14__CM_ICSC_C13__SHIFT 0x0
+#define CM1_CM_ICSC_C13_C14__CM_ICSC_C14__SHIFT 0x10
+#define CM1_CM_ICSC_C13_C14__CM_ICSC_C13_MASK 0x0000FFFFL
+#define CM1_CM_ICSC_C13_C14__CM_ICSC_C14_MASK 0xFFFF0000L
+//CM1_CM_ICSC_C21_C22
+#define CM1_CM_ICSC_C21_C22__CM_ICSC_C21__SHIFT 0x0
+#define CM1_CM_ICSC_C21_C22__CM_ICSC_C22__SHIFT 0x10
+#define CM1_CM_ICSC_C21_C22__CM_ICSC_C21_MASK 0x0000FFFFL
+#define CM1_CM_ICSC_C21_C22__CM_ICSC_C22_MASK 0xFFFF0000L
+//CM1_CM_ICSC_C23_C24
+#define CM1_CM_ICSC_C23_C24__CM_ICSC_C23__SHIFT 0x0
+#define CM1_CM_ICSC_C23_C24__CM_ICSC_C24__SHIFT 0x10
+#define CM1_CM_ICSC_C23_C24__CM_ICSC_C23_MASK 0x0000FFFFL
+#define CM1_CM_ICSC_C23_C24__CM_ICSC_C24_MASK 0xFFFF0000L
+//CM1_CM_ICSC_C31_C32
+#define CM1_CM_ICSC_C31_C32__CM_ICSC_C31__SHIFT 0x0
+#define CM1_CM_ICSC_C31_C32__CM_ICSC_C32__SHIFT 0x10
+#define CM1_CM_ICSC_C31_C32__CM_ICSC_C31_MASK 0x0000FFFFL
+#define CM1_CM_ICSC_C31_C32__CM_ICSC_C32_MASK 0xFFFF0000L
+//CM1_CM_ICSC_C33_C34
+#define CM1_CM_ICSC_C33_C34__CM_ICSC_C33__SHIFT 0x0
+#define CM1_CM_ICSC_C33_C34__CM_ICSC_C34__SHIFT 0x10
+#define CM1_CM_ICSC_C33_C34__CM_ICSC_C33_MASK 0x0000FFFFL
+#define CM1_CM_ICSC_C33_C34__CM_ICSC_C34_MASK 0xFFFF0000L
+//CM1_CM_ICSC_B_C11_C12
+#define CM1_CM_ICSC_B_C11_C12__CM_ICSC_B_C11__SHIFT 0x0
+#define CM1_CM_ICSC_B_C11_C12__CM_ICSC_B_C12__SHIFT 0x10
+#define CM1_CM_ICSC_B_C11_C12__CM_ICSC_B_C11_MASK 0x0000FFFFL
+#define CM1_CM_ICSC_B_C11_C12__CM_ICSC_B_C12_MASK 0xFFFF0000L
+//CM1_CM_ICSC_B_C13_C14
+#define CM1_CM_ICSC_B_C13_C14__CM_ICSC_B_C13__SHIFT 0x0
+#define CM1_CM_ICSC_B_C13_C14__CM_ICSC_B_C14__SHIFT 0x10
+#define CM1_CM_ICSC_B_C13_C14__CM_ICSC_B_C13_MASK 0x0000FFFFL
+#define CM1_CM_ICSC_B_C13_C14__CM_ICSC_B_C14_MASK 0xFFFF0000L
+//CM1_CM_ICSC_B_C21_C22
+#define CM1_CM_ICSC_B_C21_C22__CM_ICSC_B_C21__SHIFT 0x0
+#define CM1_CM_ICSC_B_C21_C22__CM_ICSC_B_C22__SHIFT 0x10
+#define CM1_CM_ICSC_B_C21_C22__CM_ICSC_B_C21_MASK 0x0000FFFFL
+#define CM1_CM_ICSC_B_C21_C22__CM_ICSC_B_C22_MASK 0xFFFF0000L
+//CM1_CM_ICSC_B_C23_C24
+#define CM1_CM_ICSC_B_C23_C24__CM_ICSC_B_C23__SHIFT 0x0
+#define CM1_CM_ICSC_B_C23_C24__CM_ICSC_B_C24__SHIFT 0x10
+#define CM1_CM_ICSC_B_C23_C24__CM_ICSC_B_C23_MASK 0x0000FFFFL
+#define CM1_CM_ICSC_B_C23_C24__CM_ICSC_B_C24_MASK 0xFFFF0000L
+//CM1_CM_ICSC_B_C31_C32
+#define CM1_CM_ICSC_B_C31_C32__CM_ICSC_B_C31__SHIFT 0x0
+#define CM1_CM_ICSC_B_C31_C32__CM_ICSC_B_C32__SHIFT 0x10
+#define CM1_CM_ICSC_B_C31_C32__CM_ICSC_B_C31_MASK 0x0000FFFFL
+#define CM1_CM_ICSC_B_C31_C32__CM_ICSC_B_C32_MASK 0xFFFF0000L
+//CM1_CM_ICSC_B_C33_C34
+#define CM1_CM_ICSC_B_C33_C34__CM_ICSC_B_C33__SHIFT 0x0
+#define CM1_CM_ICSC_B_C33_C34__CM_ICSC_B_C34__SHIFT 0x10
+#define CM1_CM_ICSC_B_C33_C34__CM_ICSC_B_C33_MASK 0x0000FFFFL
+#define CM1_CM_ICSC_B_C33_C34__CM_ICSC_B_C34_MASK 0xFFFF0000L
+//CM1_CM_GAMUT_REMAP_CONTROL
+#define CM1_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE_MASK 0x00000003L
+//CM1_CM_GAMUT_REMAP_C11_C12
+#define CM1_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C11__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C12__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C11_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C12_MASK 0xFFFF0000L
+//CM1_CM_GAMUT_REMAP_C13_C14
+#define CM1_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C13__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C14__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C13_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C14_MASK 0xFFFF0000L
+//CM1_CM_GAMUT_REMAP_C21_C22
+#define CM1_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C21__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C22__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C21_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C22_MASK 0xFFFF0000L
+//CM1_CM_GAMUT_REMAP_C23_C24
+#define CM1_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C23__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C24__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C23_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C24_MASK 0xFFFF0000L
+//CM1_CM_GAMUT_REMAP_C31_C32
+#define CM1_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C31__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C32__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C31_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C32_MASK 0xFFFF0000L
+//CM1_CM_GAMUT_REMAP_C33_C34
+#define CM1_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C33__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C34__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C33_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C34_MASK 0xFFFF0000L
+//CM1_CM_GAMUT_REMAP_B_C11_C12
+#define CM1_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C11__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C12__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C11_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C12_MASK 0xFFFF0000L
+//CM1_CM_GAMUT_REMAP_B_C13_C14
+#define CM1_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C13__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C14__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C13_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C14_MASK 0xFFFF0000L
+//CM1_CM_GAMUT_REMAP_B_C21_C22
+#define CM1_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C21__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C22__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C21_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C22_MASK 0xFFFF0000L
+//CM1_CM_GAMUT_REMAP_B_C23_C24
+#define CM1_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C23__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C24__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C23_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C24_MASK 0xFFFF0000L
+//CM1_CM_GAMUT_REMAP_B_C31_C32
+#define CM1_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C31__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C32__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C31_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C32_MASK 0xFFFF0000L
+//CM1_CM_GAMUT_REMAP_B_C33_C34
+#define CM1_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C33__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C34__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C33_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C34_MASK 0xFFFF0000L
+//CM1_CM_BIAS_CR_R
+#define CM1_CM_BIAS_CR_R__CM_BIAS_CR_R__SHIFT 0x0
+#define CM1_CM_BIAS_CR_R__CM_BIAS_CR_R_MASK 0x0000FFFFL
+//CM1_CM_BIAS_Y_G_CB_B
+#define CM1_CM_BIAS_Y_G_CB_B__CM_BIAS_Y_G__SHIFT 0x0
+#define CM1_CM_BIAS_Y_G_CB_B__CM_BIAS_CB_B__SHIFT 0x10
+#define CM1_CM_BIAS_Y_G_CB_B__CM_BIAS_Y_G_MASK 0x0000FFFFL
+#define CM1_CM_BIAS_Y_G_CB_B__CM_BIAS_CB_B_MASK 0xFFFF0000L
+//CM1_CM_DGAM_CONTROL
+#define CM1_CM_DGAM_CONTROL__CM_DGAM_LUT_MODE__SHIFT 0x0
+#define CM1_CM_DGAM_CONTROL__CM_DGAM_LUT_MODE_MASK 0x00000007L
+//CM1_CM_DGAM_LUT_INDEX
+#define CM1_CM_DGAM_LUT_INDEX__CM_DGAM_LUT_INDEX__SHIFT 0x0
+#define CM1_CM_DGAM_LUT_INDEX__CM_DGAM_LUT_INDEX_MASK 0x000001FFL
+//CM1_CM_DGAM_LUT_DATA
+#define CM1_CM_DGAM_LUT_DATA__CM_DGAM_LUT_DATA__SHIFT 0x0
+#define CM1_CM_DGAM_LUT_DATA__CM_DGAM_LUT_DATA_MASK 0x0007FFFFL
+//CM1_CM_DGAM_LUT_WRITE_EN_MASK
+#define CM1_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define CM1_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_SEL__SHIFT 0x4
+#define CM1_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_CONFIG_STATUS__SHIFT 0x8
+#define CM1_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_WRITE_LUT_BASE_ONLY__SHIFT 0xc
+#define CM1_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define CM1_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_SEL_MASK 0x00000010L
+#define CM1_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_CONFIG_STATUS_MASK 0x00000700L
+#define CM1_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_WRITE_LUT_BASE_ONLY_MASK 0x00001000L
+//CM1_CM_DGAM_RAMA_START_CNTL_B
+#define CM1_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define CM1_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM1_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM1_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM1_CM_DGAM_RAMA_START_CNTL_G
+#define CM1_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define CM1_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM1_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM1_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM1_CM_DGAM_RAMA_START_CNTL_R
+#define CM1_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define CM1_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM1_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM1_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM1_CM_DGAM_RAMA_SLOPE_CNTL_B
+#define CM1_CM_DGAM_RAMA_SLOPE_CNTL_B__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define CM1_CM_DGAM_RAMA_SLOPE_CNTL_B__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//CM1_CM_DGAM_RAMA_SLOPE_CNTL_G
+#define CM1_CM_DGAM_RAMA_SLOPE_CNTL_G__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define CM1_CM_DGAM_RAMA_SLOPE_CNTL_G__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//CM1_CM_DGAM_RAMA_SLOPE_CNTL_R
+#define CM1_CM_DGAM_RAMA_SLOPE_CNTL_R__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define CM1_CM_DGAM_RAMA_SLOPE_CNTL_R__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//CM1_CM_DGAM_RAMA_END_CNTL1_B
+#define CM1_CM_DGAM_RAMA_END_CNTL1_B__CM_DGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define CM1_CM_DGAM_RAMA_END_CNTL1_B__CM_DGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+//CM1_CM_DGAM_RAMA_END_CNTL2_B
+#define CM1_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define CM1_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM1_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define CM1_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//CM1_CM_DGAM_RAMA_END_CNTL1_G
+#define CM1_CM_DGAM_RAMA_END_CNTL1_G__CM_DGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define CM1_CM_DGAM_RAMA_END_CNTL1_G__CM_DGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+//CM1_CM_DGAM_RAMA_END_CNTL2_G
+#define CM1_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define CM1_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM1_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define CM1_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//CM1_CM_DGAM_RAMA_END_CNTL1_R
+#define CM1_CM_DGAM_RAMA_END_CNTL1_R__CM_DGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define CM1_CM_DGAM_RAMA_END_CNTL1_R__CM_DGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+//CM1_CM_DGAM_RAMA_END_CNTL2_R
+#define CM1_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define CM1_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM1_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define CM1_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//CM1_CM_DGAM_RAMA_REGION_0_1
+#define CM1_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_DGAM_RAMA_REGION_2_3
+#define CM1_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_DGAM_RAMA_REGION_4_5
+#define CM1_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_DGAM_RAMA_REGION_6_7
+#define CM1_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_DGAM_RAMA_REGION_8_9
+#define CM1_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_DGAM_RAMA_REGION_10_11
+#define CM1_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_DGAM_RAMA_REGION_12_13
+#define CM1_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_DGAM_RAMA_REGION_14_15
+#define CM1_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_DGAM_RAMB_START_CNTL_B
+#define CM1_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define CM1_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM1_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM1_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM1_CM_DGAM_RAMB_START_CNTL_G
+#define CM1_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define CM1_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM1_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM1_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM1_CM_DGAM_RAMB_START_CNTL_R
+#define CM1_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define CM1_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM1_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM1_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM1_CM_DGAM_RAMB_SLOPE_CNTL_B
+#define CM1_CM_DGAM_RAMB_SLOPE_CNTL_B__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define CM1_CM_DGAM_RAMB_SLOPE_CNTL_B__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//CM1_CM_DGAM_RAMB_SLOPE_CNTL_G
+#define CM1_CM_DGAM_RAMB_SLOPE_CNTL_G__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define CM1_CM_DGAM_RAMB_SLOPE_CNTL_G__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//CM1_CM_DGAM_RAMB_SLOPE_CNTL_R
+#define CM1_CM_DGAM_RAMB_SLOPE_CNTL_R__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define CM1_CM_DGAM_RAMB_SLOPE_CNTL_R__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//CM1_CM_DGAM_RAMB_END_CNTL1_B
+#define CM1_CM_DGAM_RAMB_END_CNTL1_B__CM_DGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define CM1_CM_DGAM_RAMB_END_CNTL1_B__CM_DGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+//CM1_CM_DGAM_RAMB_END_CNTL2_B
+#define CM1_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define CM1_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM1_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define CM1_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//CM1_CM_DGAM_RAMB_END_CNTL1_G
+#define CM1_CM_DGAM_RAMB_END_CNTL1_G__CM_DGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define CM1_CM_DGAM_RAMB_END_CNTL1_G__CM_DGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+//CM1_CM_DGAM_RAMB_END_CNTL2_G
+#define CM1_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define CM1_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM1_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define CM1_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//CM1_CM_DGAM_RAMB_END_CNTL1_R
+#define CM1_CM_DGAM_RAMB_END_CNTL1_R__CM_DGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define CM1_CM_DGAM_RAMB_END_CNTL1_R__CM_DGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+//CM1_CM_DGAM_RAMB_END_CNTL2_R
+#define CM1_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define CM1_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM1_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define CM1_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//CM1_CM_DGAM_RAMB_REGION_0_1
+#define CM1_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_DGAM_RAMB_REGION_2_3
+#define CM1_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_DGAM_RAMB_REGION_4_5
+#define CM1_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_DGAM_RAMB_REGION_6_7
+#define CM1_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_DGAM_RAMB_REGION_8_9
+#define CM1_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_DGAM_RAMB_REGION_10_11
+#define CM1_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_DGAM_RAMB_REGION_12_13
+#define CM1_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_DGAM_RAMB_REGION_14_15
+#define CM1_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_CONTROL
+#define CM1_CM_BLNDGAM_CONTROL__CM_BLNDGAM_LUT_MODE__SHIFT 0x0
+#define CM1_CM_BLNDGAM_CONTROL__CM_BLNDGAM_LUT_MODE_MASK 0x00000003L
+//CM1_CM_BLNDGAM_LUT_INDEX
+#define CM1_CM_BLNDGAM_LUT_INDEX__CM_BLNDGAM_LUT_INDEX__SHIFT 0x0
+#define CM1_CM_BLNDGAM_LUT_INDEX__CM_BLNDGAM_LUT_INDEX_MASK 0x000001FFL
+//CM1_CM_BLNDGAM_LUT_DATA
+#define CM1_CM_BLNDGAM_LUT_DATA__CM_BLNDGAM_LUT_DATA__SHIFT 0x0
+#define CM1_CM_BLNDGAM_LUT_DATA__CM_BLNDGAM_LUT_DATA_MASK 0x0007FFFFL
+//CM1_CM_BLNDGAM_LUT_WRITE_EN_MASK
+#define CM1_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define CM1_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_SEL__SHIFT 0x4
+#define CM1_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_CONFIG_STATUS__SHIFT 0x8
+#define CM1_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define CM1_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_SEL_MASK 0x00000010L
+#define CM1_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_CONFIG_STATUS_MASK 0x00000300L
+//CM1_CM_BLNDGAM_RAMA_START_CNTL_B
+#define CM1_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM1_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM1_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM1_CM_BLNDGAM_RAMA_START_CNTL_G
+#define CM1_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM1_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM1_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM1_CM_BLNDGAM_RAMA_START_CNTL_R
+#define CM1_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM1_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM1_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM1_CM_BLNDGAM_RAMA_SLOPE_CNTL_B
+#define CM1_CM_BLNDGAM_RAMA_SLOPE_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_SLOPE_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//CM1_CM_BLNDGAM_RAMA_SLOPE_CNTL_G
+#define CM1_CM_BLNDGAM_RAMA_SLOPE_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_SLOPE_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//CM1_CM_BLNDGAM_RAMA_SLOPE_CNTL_R
+#define CM1_CM_BLNDGAM_RAMA_SLOPE_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_SLOPE_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//CM1_CM_BLNDGAM_RAMA_END_CNTL1_B
+#define CM1_CM_BLNDGAM_RAMA_END_CNTL1_B__CM_BLNDGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_END_CNTL1_B__CM_BLNDGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+//CM1_CM_BLNDGAM_RAMA_END_CNTL2_B
+#define CM1_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define CM1_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//CM1_CM_BLNDGAM_RAMA_END_CNTL1_G
+#define CM1_CM_BLNDGAM_RAMA_END_CNTL1_G__CM_BLNDGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_END_CNTL1_G__CM_BLNDGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+//CM1_CM_BLNDGAM_RAMA_END_CNTL2_G
+#define CM1_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define CM1_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//CM1_CM_BLNDGAM_RAMA_END_CNTL1_R
+#define CM1_CM_BLNDGAM_RAMA_END_CNTL1_R__CM_BLNDGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_END_CNTL1_R__CM_BLNDGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+//CM1_CM_BLNDGAM_RAMA_END_CNTL2_R
+#define CM1_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define CM1_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//CM1_CM_BLNDGAM_RAMA_REGION_0_1
+#define CM1_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMA_REGION_2_3
+#define CM1_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMA_REGION_4_5
+#define CM1_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMA_REGION_6_7
+#define CM1_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMA_REGION_8_9
+#define CM1_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMA_REGION_10_11
+#define CM1_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMA_REGION_12_13
+#define CM1_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMA_REGION_14_15
+#define CM1_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMA_REGION_16_17
+#define CM1_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMA_REGION_18_19
+#define CM1_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMA_REGION_20_21
+#define CM1_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMA_REGION_22_23
+#define CM1_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMA_REGION_24_25
+#define CM1_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMA_REGION_26_27
+#define CM1_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMA_REGION_28_29
+#define CM1_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMA_REGION_30_31
+#define CM1_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMA_REGION_32_33
+#define CM1_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMB_START_CNTL_B
+#define CM1_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM1_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM1_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM1_CM_BLNDGAM_RAMB_START_CNTL_G
+#define CM1_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM1_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM1_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM1_CM_BLNDGAM_RAMB_START_CNTL_R
+#define CM1_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM1_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM1_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM1_CM_BLNDGAM_RAMB_SLOPE_CNTL_B
+#define CM1_CM_BLNDGAM_RAMB_SLOPE_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_SLOPE_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//CM1_CM_BLNDGAM_RAMB_SLOPE_CNTL_G
+#define CM1_CM_BLNDGAM_RAMB_SLOPE_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_SLOPE_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//CM1_CM_BLNDGAM_RAMB_SLOPE_CNTL_R
+#define CM1_CM_BLNDGAM_RAMB_SLOPE_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_SLOPE_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//CM1_CM_BLNDGAM_RAMB_END_CNTL1_B
+#define CM1_CM_BLNDGAM_RAMB_END_CNTL1_B__CM_BLNDGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_END_CNTL1_B__CM_BLNDGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+//CM1_CM_BLNDGAM_RAMB_END_CNTL2_B
+#define CM1_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define CM1_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//CM1_CM_BLNDGAM_RAMB_END_CNTL1_G
+#define CM1_CM_BLNDGAM_RAMB_END_CNTL1_G__CM_BLNDGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_END_CNTL1_G__CM_BLNDGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+//CM1_CM_BLNDGAM_RAMB_END_CNTL2_G
+#define CM1_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define CM1_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//CM1_CM_BLNDGAM_RAMB_END_CNTL1_R
+#define CM1_CM_BLNDGAM_RAMB_END_CNTL1_R__CM_BLNDGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_END_CNTL1_R__CM_BLNDGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+//CM1_CM_BLNDGAM_RAMB_END_CNTL2_R
+#define CM1_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define CM1_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//CM1_CM_BLNDGAM_RAMB_REGION_0_1
+#define CM1_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMB_REGION_2_3
+#define CM1_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMB_REGION_4_5
+#define CM1_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMB_REGION_6_7
+#define CM1_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMB_REGION_8_9
+#define CM1_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMB_REGION_10_11
+#define CM1_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMB_REGION_12_13
+#define CM1_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMB_REGION_14_15
+#define CM1_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMB_REGION_16_17
+#define CM1_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMB_REGION_18_19
+#define CM1_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMB_REGION_20_21
+#define CM1_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMB_REGION_22_23
+#define CM1_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMB_REGION_24_25
+#define CM1_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMB_REGION_26_27
+#define CM1_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMB_REGION_28_29
+#define CM1_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMB_REGION_30_31
+#define CM1_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMB_REGION_32_33
+#define CM1_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_HDR_MULT_COEF
+#define CM1_CM_HDR_MULT_COEF__CM_HDR_MULT_COEF__SHIFT 0x0
+#define CM1_CM_HDR_MULT_COEF__CM_HDR_MULT_COEF_MASK 0x0007FFFFL
+//CM1_CM_MEM_PWR_CTRL
+#define CM1_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_FORCE__SHIFT 0x0
+#define CM1_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_DIS__SHIFT 0x2
+#define CM1_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_FORCE__SHIFT 0x4
+#define CM1_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_DIS__SHIFT 0x6
+#define CM1_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_FORCE_MASK 0x00000003L
+#define CM1_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_DIS_MASK 0x00000004L
+#define CM1_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_FORCE_MASK 0x00000030L
+#define CM1_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_DIS_MASK 0x00000040L
+//CM1_CM_MEM_PWR_STATUS
+#define CM1_CM_MEM_PWR_STATUS__SHARED_MEM_PWR_STATE__SHIFT 0x0
+#define CM1_CM_MEM_PWR_STATUS__BLNDGAM_MEM_PWR_STATE__SHIFT 0x2
+#define CM1_CM_MEM_PWR_STATUS__SHARED_MEM_PWR_STATE_MASK 0x00000003L
+#define CM1_CM_MEM_PWR_STATUS__BLNDGAM_MEM_PWR_STATE_MASK 0x0000000CL
+//CM1_CM_DEALPHA
+#define CM1_CM_DEALPHA__CM_DEALPHA_EN__SHIFT 0x0
+#define CM1_CM_DEALPHA__CM_DEALPHA_EN_MASK 0x00000001L
+//CM1_CM_COEF_FORMAT
+#define CM1_CM_COEF_FORMAT__CM_BIAS_FORMAT__SHIFT 0x0
+#define CM1_CM_COEF_FORMAT__CM_ICSC_COEF_FORMAT__SHIFT 0x4
+#define CM1_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT__SHIFT 0x8
+#define CM1_CM_COEF_FORMAT__CM_BIAS_FORMAT_MASK 0x00000001L
+#define CM1_CM_COEF_FORMAT__CM_ICSC_COEF_FORMAT_MASK 0x00000010L
+#define CM1_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT_MASK 0x00000100L
+//CM1_CM_SHAPER_CONTROL
+#define CM1_CM_SHAPER_CONTROL__CM_SHAPER_LUT_MODE__SHIFT 0x0
+#define CM1_CM_SHAPER_CONTROL__CM_SHAPER_LUT_MODE_MASK 0x00000003L
+//CM1_CM_SHAPER_OFFSET_R
+#define CM1_CM_SHAPER_OFFSET_R__CM_SHAPER_OFFSET_R__SHIFT 0x0
+#define CM1_CM_SHAPER_OFFSET_R__CM_SHAPER_OFFSET_R_MASK 0x0007FFFFL
+//CM1_CM_SHAPER_OFFSET_G
+#define CM1_CM_SHAPER_OFFSET_G__CM_SHAPER_OFFSET_G__SHIFT 0x0
+#define CM1_CM_SHAPER_OFFSET_G__CM_SHAPER_OFFSET_G_MASK 0x0007FFFFL
+//CM1_CM_SHAPER_OFFSET_B
+#define CM1_CM_SHAPER_OFFSET_B__CM_SHAPER_OFFSET_B__SHIFT 0x0
+#define CM1_CM_SHAPER_OFFSET_B__CM_SHAPER_OFFSET_B_MASK 0x0007FFFFL
+//CM1_CM_SHAPER_SCALE_R
+#define CM1_CM_SHAPER_SCALE_R__CM_SHAPER_SCALE_R__SHIFT 0x0
+#define CM1_CM_SHAPER_SCALE_R__CM_SHAPER_SCALE_R_MASK 0x0000FFFFL
+//CM1_CM_SHAPER_SCALE_G_B
+#define CM1_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_G__SHIFT 0x0
+#define CM1_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_B__SHIFT 0x10
+#define CM1_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_G_MASK 0x0000FFFFL
+#define CM1_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_B_MASK 0xFFFF0000L
+//CM1_CM_SHAPER_LUT_INDEX
+#define CM1_CM_SHAPER_LUT_INDEX__CM_SHAPER_LUT_INDEX__SHIFT 0x0
+#define CM1_CM_SHAPER_LUT_INDEX__CM_SHAPER_LUT_INDEX_MASK 0x000000FFL
+//CM1_CM_SHAPER_LUT_DATA
+#define CM1_CM_SHAPER_LUT_DATA__CM_SHAPER_LUT_DATA__SHIFT 0x0
+#define CM1_CM_SHAPER_LUT_DATA__CM_SHAPER_LUT_DATA_MASK 0x00FFFFFFL
+//CM1_CM_SHAPER_LUT_WRITE_EN_MASK
+#define CM1_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define CM1_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_SEL__SHIFT 0x4
+#define CM1_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_CONFIG_STATUS__SHIFT 0x8
+#define CM1_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define CM1_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_SEL_MASK 0x00000010L
+#define CM1_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_CONFIG_STATUS_MASK 0x00000300L
+//CM1_CM_SHAPER_RAMA_START_CNTL_B
+#define CM1_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM1_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM1_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM1_CM_SHAPER_RAMA_START_CNTL_G
+#define CM1_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM1_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM1_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM1_CM_SHAPER_RAMA_START_CNTL_R
+#define CM1_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM1_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM1_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM1_CM_SHAPER_RAMA_END_CNTL_B
+#define CM1_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define CM1_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_BASE_B_MASK 0x3FFF0000L
+//CM1_CM_SHAPER_RAMA_END_CNTL_G
+#define CM1_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define CM1_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_BASE_G_MASK 0x3FFF0000L
+//CM1_CM_SHAPER_RAMA_END_CNTL_R
+#define CM1_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define CM1_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_BASE_R_MASK 0x3FFF0000L
+//CM1_CM_SHAPER_RAMA_REGION_0_1
+#define CM1_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMA_REGION_2_3
+#define CM1_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMA_REGION_4_5
+#define CM1_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMA_REGION_6_7
+#define CM1_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMA_REGION_8_9
+#define CM1_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMA_REGION_10_11
+#define CM1_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMA_REGION_12_13
+#define CM1_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMA_REGION_14_15
+#define CM1_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMA_REGION_16_17
+#define CM1_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMA_REGION_18_19
+#define CM1_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMA_REGION_20_21
+#define CM1_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMA_REGION_22_23
+#define CM1_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMA_REGION_24_25
+#define CM1_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMA_REGION_26_27
+#define CM1_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMA_REGION_28_29
+#define CM1_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMA_REGION_30_31
+#define CM1_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMA_REGION_32_33
+#define CM1_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMB_START_CNTL_B
+#define CM1_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM1_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM1_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM1_CM_SHAPER_RAMB_START_CNTL_G
+#define CM1_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM1_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM1_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM1_CM_SHAPER_RAMB_START_CNTL_R
+#define CM1_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM1_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM1_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM1_CM_SHAPER_RAMB_END_CNTL_B
+#define CM1_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define CM1_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_BASE_B_MASK 0x3FFF0000L
+//CM1_CM_SHAPER_RAMB_END_CNTL_G
+#define CM1_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define CM1_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_BASE_G_MASK 0x3FFF0000L
+//CM1_CM_SHAPER_RAMB_END_CNTL_R
+#define CM1_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define CM1_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_BASE_R_MASK 0x3FFF0000L
+//CM1_CM_SHAPER_RAMB_REGION_0_1
+#define CM1_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMB_REGION_2_3
+#define CM1_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMB_REGION_4_5
+#define CM1_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMB_REGION_6_7
+#define CM1_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMB_REGION_8_9
+#define CM1_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMB_REGION_10_11
+#define CM1_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMB_REGION_12_13
+#define CM1_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMB_REGION_14_15
+#define CM1_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMB_REGION_16_17
+#define CM1_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMB_REGION_18_19
+#define CM1_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMB_REGION_20_21
+#define CM1_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMB_REGION_22_23
+#define CM1_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMB_REGION_24_25
+#define CM1_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMB_REGION_26_27
+#define CM1_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMB_REGION_28_29
+#define CM1_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMB_REGION_30_31
+#define CM1_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMB_REGION_32_33
+#define CM1_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_MEM_PWR_CTRL2
+#define CM1_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_FORCE__SHIFT 0x8
+#define CM1_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_DIS__SHIFT 0xa
+#define CM1_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_FORCE__SHIFT 0xc
+#define CM1_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_DIS__SHIFT 0xe
+#define CM1_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_FORCE_MASK 0x00000300L
+#define CM1_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_DIS_MASK 0x00000400L
+#define CM1_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_FORCE_MASK 0x00003000L
+#define CM1_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_DIS_MASK 0x00004000L
+//CM1_CM_MEM_PWR_STATUS2
+#define CM1_CM_MEM_PWR_STATUS2__SHAPER_MEM_PWR_STATE__SHIFT 0x4
+#define CM1_CM_MEM_PWR_STATUS2__HDR3DLUT_MEM_PWR_STATE__SHIFT 0x6
+#define CM1_CM_MEM_PWR_STATUS2__SHAPER_MEM_PWR_STATE_MASK 0x00000030L
+#define CM1_CM_MEM_PWR_STATUS2__HDR3DLUT_MEM_PWR_STATE_MASK 0x000000C0L
+//CM1_CM_3DLUT_MODE
+#define CM1_CM_3DLUT_MODE__CM_3DLUT_MODE__SHIFT 0x0
+#define CM1_CM_3DLUT_MODE__CM_3DLUT_SIZE__SHIFT 0x4
+#define CM1_CM_3DLUT_MODE__CM_3DLUT_MODE_MASK 0x00000003L
+#define CM1_CM_3DLUT_MODE__CM_3DLUT_SIZE_MASK 0x00000010L
+//CM1_CM_3DLUT_INDEX
+#define CM1_CM_3DLUT_INDEX__CM_3DLUT_INDEX__SHIFT 0x0
+#define CM1_CM_3DLUT_INDEX__CM_3DLUT_INDEX_MASK 0x000007FFL
+//CM1_CM_3DLUT_DATA
+#define CM1_CM_3DLUT_DATA__CM_3DLUT_DATA0__SHIFT 0x0
+#define CM1_CM_3DLUT_DATA__CM_3DLUT_DATA1__SHIFT 0x10
+#define CM1_CM_3DLUT_DATA__CM_3DLUT_DATA0_MASK 0x0000FFFFL
+#define CM1_CM_3DLUT_DATA__CM_3DLUT_DATA1_MASK 0xFFFF0000L
+//CM1_CM_3DLUT_DATA_30BIT
+#define CM1_CM_3DLUT_DATA_30BIT__CM_3DLUT_DATA_30BIT__SHIFT 0x2
+#define CM1_CM_3DLUT_DATA_30BIT__CM_3DLUT_DATA_30BIT_MASK 0xFFFFFFFCL
+//CM1_CM_3DLUT_READ_WRITE_CONTROL
+#define CM1_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_WRITE_EN_MASK__SHIFT 0x0
+#define CM1_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_RAM_SEL__SHIFT 0x4
+#define CM1_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_30BIT_EN__SHIFT 0x8
+#define CM1_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_CONFIG_STATUS__SHIFT 0xc
+#define CM1_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_READ_SEL__SHIFT 0x10
+#define CM1_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_WRITE_EN_MASK_MASK 0x0000000FL
+#define CM1_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_RAM_SEL_MASK 0x00000010L
+#define CM1_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_30BIT_EN_MASK 0x00000100L
+#define CM1_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_CONFIG_STATUS_MASK 0x00003000L
+#define CM1_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_READ_SEL_MASK 0x00030000L
+//CM1_CM_3DLUT_OUT_NORM_FACTOR
+#define CM1_CM_3DLUT_OUT_NORM_FACTOR__CM_3DLUT_OUT_NORM_FACTOR__SHIFT 0x0
+#define CM1_CM_3DLUT_OUT_NORM_FACTOR__CM_3DLUT_OUT_NORM_FACTOR_MASK 0x0000FFFFL
+//CM1_CM_3DLUT_OUT_OFFSET_R
+#define CM1_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_OFFSET_R__SHIFT 0x0
+#define CM1_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_SCALE_R__SHIFT 0x10
+#define CM1_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_OFFSET_R_MASK 0x0000FFFFL
+#define CM1_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_SCALE_R_MASK 0xFFFF0000L
+//CM1_CM_3DLUT_OUT_OFFSET_G
+#define CM1_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_OFFSET_G__SHIFT 0x0
+#define CM1_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_SCALE_G__SHIFT 0x10
+#define CM1_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_OFFSET_G_MASK 0x0000FFFFL
+#define CM1_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_SCALE_G_MASK 0xFFFF0000L
+//CM1_CM_3DLUT_OUT_OFFSET_B
+#define CM1_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_OFFSET_B__SHIFT 0x0
+#define CM1_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_SCALE_B__SHIFT 0x10
+#define CM1_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_OFFSET_B_MASK 0x0000FFFFL
+#define CM1_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_SCALE_B_MASK 0xFFFF0000L
+//CM1_CM_TEST_DEBUG_INDEX
+#define CM1_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX__SHIFT 0x0
+#define CM1_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define CM1_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX_MASK 0x000000FFL
+#define CM1_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+//CM1_CM_TEST_DEBUG_DATA
+#define CM1_CM_TEST_DEBUG_DATA__CM_TEST_DEBUG_DATA__SHIFT 0x0
+#define CM1_CM_TEST_DEBUG_DATA__CM_TEST_DEBUG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dpp1_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON12_PERFCOUNTER_CNTL
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON12_PERFCOUNTER_CNTL2
+#define DC_PERFMON12_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON12_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON12_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON12_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON12_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON12_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON12_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON12_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON12_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON12_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON12_PERFCOUNTER_STATE
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON12_PERFMON_CNTL
+#define DC_PERFMON12_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON12_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON12_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON12_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON12_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON12_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON12_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON12_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON12_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON12_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON12_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON12_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON12_PERFMON_CNTL2
+#define DC_PERFMON12_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON12_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON12_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON12_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON12_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON12_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON12_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON12_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON12_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON12_PERFMON_CVALUE_LOW
+#define DC_PERFMON12_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON12_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON12_PERFMON_HI
+#define DC_PERFMON12_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON12_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON12_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON12_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON12_PERFMON_LOW
+#define DC_PERFMON12_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON12_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dpp2_dispdec_dpp_top_dispdec
+//DPP_TOP2_DPP_CONTROL
+#define DPP_TOP2_DPP_CONTROL__DPP_CLOCK_ENABLE__SHIFT 0x4
+#define DPP_TOP2_DPP_CONTROL__DPPCLK_G_GATE_DISABLE__SHIFT 0x8
+#define DPP_TOP2_DPP_CONTROL__DPPCLK_G_DYN_GATE_DISABLE__SHIFT 0xa
+#define DPP_TOP2_DPP_CONTROL__DPPCLK_G_DSCL_GATE_DISABLE__SHIFT 0xc
+#define DPP_TOP2_DPP_CONTROL__DPPCLK_G_DSCL_ALPHA_GATE_DISABLE__SHIFT 0xe
+#define DPP_TOP2_DPP_CONTROL__DPPCLK_R_GATE_DISABLE__SHIFT 0x10
+#define DPP_TOP2_DPP_CONTROL__DISPCLK_R_GATE_DISABLE__SHIFT 0x12
+#define DPP_TOP2_DPP_CONTROL__DISPCLK_G_GATE_DISABLE__SHIFT 0x14
+#define DPP_TOP2_DPP_CONTROL__DPP_TEST_CLK_SEL__SHIFT 0x1c
+#define DPP_TOP2_DPP_CONTROL__DPP_CLOCK_ENABLE_MASK 0x00000010L
+#define DPP_TOP2_DPP_CONTROL__DPPCLK_G_GATE_DISABLE_MASK 0x00000100L
+#define DPP_TOP2_DPP_CONTROL__DPPCLK_G_DYN_GATE_DISABLE_MASK 0x00000400L
+#define DPP_TOP2_DPP_CONTROL__DPPCLK_G_DSCL_GATE_DISABLE_MASK 0x00001000L
+#define DPP_TOP2_DPP_CONTROL__DPPCLK_G_DSCL_ALPHA_GATE_DISABLE_MASK 0x00004000L
+#define DPP_TOP2_DPP_CONTROL__DPPCLK_R_GATE_DISABLE_MASK 0x00010000L
+#define DPP_TOP2_DPP_CONTROL__DISPCLK_R_GATE_DISABLE_MASK 0x00040000L
+#define DPP_TOP2_DPP_CONTROL__DISPCLK_G_GATE_DISABLE_MASK 0x00100000L
+#define DPP_TOP2_DPP_CONTROL__DPP_TEST_CLK_SEL_MASK 0xF0000000L
+//DPP_TOP2_DPP_SOFT_RESET
+#define DPP_TOP2_DPP_SOFT_RESET__CNVC_SOFT_RESET__SHIFT 0x0
+#define DPP_TOP2_DPP_SOFT_RESET__DSCL_SOFT_RESET__SHIFT 0x4
+#define DPP_TOP2_DPP_SOFT_RESET__CM_SOFT_RESET__SHIFT 0x8
+#define DPP_TOP2_DPP_SOFT_RESET__OBUF_SOFT_RESET__SHIFT 0xc
+#define DPP_TOP2_DPP_SOFT_RESET__CNVC_SOFT_RESET_MASK 0x00000001L
+#define DPP_TOP2_DPP_SOFT_RESET__DSCL_SOFT_RESET_MASK 0x00000010L
+#define DPP_TOP2_DPP_SOFT_RESET__CM_SOFT_RESET_MASK 0x00000100L
+#define DPP_TOP2_DPP_SOFT_RESET__OBUF_SOFT_RESET_MASK 0x00001000L
+//DPP_TOP2_DPP_CRC_VAL_R_G
+#define DPP_TOP2_DPP_CRC_VAL_R_G__DPP_CRC_R_CR__SHIFT 0x0
+#define DPP_TOP2_DPP_CRC_VAL_R_G__DPP_CRC_G_Y__SHIFT 0x10
+#define DPP_TOP2_DPP_CRC_VAL_R_G__DPP_CRC_R_CR_MASK 0x0000FFFFL
+#define DPP_TOP2_DPP_CRC_VAL_R_G__DPP_CRC_G_Y_MASK 0xFFFF0000L
+//DPP_TOP2_DPP_CRC_VAL_B_A
+#define DPP_TOP2_DPP_CRC_VAL_B_A__DPP_CRC_B_CB__SHIFT 0x0
+#define DPP_TOP2_DPP_CRC_VAL_B_A__DPP_CRC_ALPHA__SHIFT 0x10
+#define DPP_TOP2_DPP_CRC_VAL_B_A__DPP_CRC_B_CB_MASK 0x0000FFFFL
+#define DPP_TOP2_DPP_CRC_VAL_B_A__DPP_CRC_ALPHA_MASK 0xFFFF0000L
+//DPP_TOP2_DPP_CRC_CTRL
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_EN__SHIFT 0x0
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_CONT_EN__SHIFT 0x1
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_ONE_SHOT_PENDING__SHIFT 0x2
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_420_COMP_SEL__SHIFT 0x3
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_SRC_SEL__SHIFT 0x4
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_CURSOR_BITS_SEL__SHIFT 0x6
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_STEREO_EN__SHIFT 0x7
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_STEREO_MODE__SHIFT 0x8
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_INTERLACE_MODE__SHIFT 0xa
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_PIX_FORMAT_SEL__SHIFT 0xc
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_CURSOR_FORMAT_SEL__SHIFT 0xf
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_MASK__SHIFT 0x10
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_EN_MASK 0x00000001L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_CONT_EN_MASK 0x00000002L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_ONE_SHOT_PENDING_MASK 0x00000004L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_420_COMP_SEL_MASK 0x00000008L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_SRC_SEL_MASK 0x00000030L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_CURSOR_BITS_SEL_MASK 0x00000040L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_STEREO_EN_MASK 0x00000080L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_STEREO_MODE_MASK 0x00000300L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_INTERLACE_MODE_MASK 0x00000C00L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_PIX_FORMAT_SEL_MASK 0x00007000L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_CURSOR_FORMAT_SEL_MASK 0x00008000L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_MASK_MASK 0xFFFF0000L
+//DPP_TOP2_HOST_READ_CONTROL
+#define DPP_TOP2_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL__SHIFT 0x0
+#define DPP_TOP2_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL_MASK 0x000000FFL
+
+
+// addressBlock: dce_dc_dpp2_dispdec_cnvc_cfg_dispdec
+//CNVC_CFG2_CNVC_SURFACE_PIXEL_FORMAT
+#define CNVC_CFG2_CNVC_SURFACE_PIXEL_FORMAT__CNVC_SURFACE_PIXEL_FORMAT__SHIFT 0x0
+#define CNVC_CFG2_CNVC_SURFACE_PIXEL_FORMAT__CNVC_SURFACE_PIXEL_FORMAT_MASK 0x0000007FL
+//CNVC_CFG2_FORMAT_CONTROL
+#define CNVC_CFG2_FORMAT_CONTROL__FORMAT_EXPANSION_MODE__SHIFT 0x0
+#define CNVC_CFG2_FORMAT_CONTROL__FORMAT_CNV16__SHIFT 0x4
+#define CNVC_CFG2_FORMAT_CONTROL__ALPHA_EN__SHIFT 0x8
+#define CNVC_CFG2_FORMAT_CONTROL__CNVC_BYPASS__SHIFT 0xc
+#define CNVC_CFG2_FORMAT_CONTROL__CNVC_BYPASS_MSB_ALIGN__SHIFT 0xd
+#define CNVC_CFG2_FORMAT_CONTROL__CLAMP_POSITIVE__SHIFT 0x10
+#define CNVC_CFG2_FORMAT_CONTROL__CLAMP_POSITIVE_C__SHIFT 0x11
+#define CNVC_CFG2_FORMAT_CONTROL__CNVC_UPDATE_PENDING__SHIFT 0x14
+#define CNVC_CFG2_FORMAT_CONTROL__FORMAT_EXPANSION_MODE_MASK 0x00000001L
+#define CNVC_CFG2_FORMAT_CONTROL__FORMAT_CNV16_MASK 0x00000010L
+#define CNVC_CFG2_FORMAT_CONTROL__ALPHA_EN_MASK 0x00000100L
+#define CNVC_CFG2_FORMAT_CONTROL__CNVC_BYPASS_MASK 0x00001000L
+#define CNVC_CFG2_FORMAT_CONTROL__CNVC_BYPASS_MSB_ALIGN_MASK 0x00002000L
+#define CNVC_CFG2_FORMAT_CONTROL__CLAMP_POSITIVE_MASK 0x00010000L
+#define CNVC_CFG2_FORMAT_CONTROL__CLAMP_POSITIVE_C_MASK 0x00020000L
+#define CNVC_CFG2_FORMAT_CONTROL__CNVC_UPDATE_PENDING_MASK 0x00100000L
+//CNVC_CFG2_FCNV_FP_BIAS_R
+#define CNVC_CFG2_FCNV_FP_BIAS_R__FCNV_FP_BIAS_R__SHIFT 0x0
+#define CNVC_CFG2_FCNV_FP_BIAS_R__FCNV_FP_BIAS_R_MASK 0x0007FFFFL
+//CNVC_CFG2_FCNV_FP_BIAS_G
+#define CNVC_CFG2_FCNV_FP_BIAS_G__FCNV_FP_BIAS_G__SHIFT 0x0
+#define CNVC_CFG2_FCNV_FP_BIAS_G__FCNV_FP_BIAS_G_MASK 0x0007FFFFL
+//CNVC_CFG2_FCNV_FP_BIAS_B
+#define CNVC_CFG2_FCNV_FP_BIAS_B__FCNV_FP_BIAS_B__SHIFT 0x0
+#define CNVC_CFG2_FCNV_FP_BIAS_B__FCNV_FP_BIAS_B_MASK 0x0007FFFFL
+//CNVC_CFG2_FCNV_FP_SCALE_R
+#define CNVC_CFG2_FCNV_FP_SCALE_R__FCNV_FP_SCALE_R__SHIFT 0x0
+#define CNVC_CFG2_FCNV_FP_SCALE_R__FCNV_FP_SCALE_R_MASK 0x0007FFFFL
+//CNVC_CFG2_FCNV_FP_SCALE_G
+#define CNVC_CFG2_FCNV_FP_SCALE_G__FCNV_FP_SCALE_G__SHIFT 0x0
+#define CNVC_CFG2_FCNV_FP_SCALE_G__FCNV_FP_SCALE_G_MASK 0x0007FFFFL
+//CNVC_CFG2_FCNV_FP_SCALE_B
+#define CNVC_CFG2_FCNV_FP_SCALE_B__FCNV_FP_SCALE_B__SHIFT 0x0
+#define CNVC_CFG2_FCNV_FP_SCALE_B__FCNV_FP_SCALE_B_MASK 0x0007FFFFL
+//CNVC_CFG2_COLOR_KEYER_CONTROL
+#define CNVC_CFG2_COLOR_KEYER_CONTROL__COLOR_KEYER_EN__SHIFT 0x0
+#define CNVC_CFG2_COLOR_KEYER_CONTROL__COLOR_KEYER_MODE__SHIFT 0x4
+#define CNVC_CFG2_COLOR_KEYER_CONTROL__COLOR_KEYER_EN_MASK 0x00000001L
+#define CNVC_CFG2_COLOR_KEYER_CONTROL__COLOR_KEYER_MODE_MASK 0x00000030L
+//CNVC_CFG2_COLOR_KEYER_ALPHA
+#define CNVC_CFG2_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_LOW__SHIFT 0x0
+#define CNVC_CFG2_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_HIGH__SHIFT 0x10
+#define CNVC_CFG2_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG2_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG2_COLOR_KEYER_RED
+#define CNVC_CFG2_COLOR_KEYER_RED__COLOR_KEYER_RED_LOW__SHIFT 0x0
+#define CNVC_CFG2_COLOR_KEYER_RED__COLOR_KEYER_RED_HIGH__SHIFT 0x10
+#define CNVC_CFG2_COLOR_KEYER_RED__COLOR_KEYER_RED_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG2_COLOR_KEYER_RED__COLOR_KEYER_RED_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG2_COLOR_KEYER_GREEN
+#define CNVC_CFG2_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_LOW__SHIFT 0x0
+#define CNVC_CFG2_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_HIGH__SHIFT 0x10
+#define CNVC_CFG2_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG2_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG2_COLOR_KEYER_BLUE
+#define CNVC_CFG2_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_LOW__SHIFT 0x0
+#define CNVC_CFG2_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_HIGH__SHIFT 0x10
+#define CNVC_CFG2_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG2_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG2_ALPHA_2BIT_LUT
+#define CNVC_CFG2_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT0__SHIFT 0x0
+#define CNVC_CFG2_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT1__SHIFT 0x8
+#define CNVC_CFG2_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT2__SHIFT 0x10
+#define CNVC_CFG2_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT3__SHIFT 0x18
+#define CNVC_CFG2_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT0_MASK 0x000000FFL
+#define CNVC_CFG2_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT1_MASK 0x0000FF00L
+#define CNVC_CFG2_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT2_MASK 0x00FF0000L
+#define CNVC_CFG2_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT3_MASK 0xFF000000L
+
+
+// addressBlock: dce_dc_dpp2_dispdec_cnvc_cur_dispdec
+//CNVC_CUR2_CURSOR0_CONTROL
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_ENABLE__SHIFT 0x0
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_EXPANSION_MODE__SHIFT 0x1
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_PIX_INV_MODE__SHIFT 0x2
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_ROM_EN__SHIFT 0x3
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_MODE__SHIFT 0x4
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_PIXEL_ALPHA_MOD_EN__SHIFT 0x7
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_UPDATE_PENDING__SHIFT 0x10
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_ENABLE_MASK 0x00000001L
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_EXPANSION_MODE_MASK 0x00000002L
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_PIX_INV_MODE_MASK 0x00000004L
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_ROM_EN_MASK 0x00000008L
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_MODE_MASK 0x00000070L
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_PIXEL_ALPHA_MOD_EN_MASK 0x00000080L
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_UPDATE_PENDING_MASK 0x00010000L
+//CNVC_CUR2_CURSOR0_COLOR0
+#define CNVC_CUR2_CURSOR0_COLOR0__CUR0_COLOR0__SHIFT 0x0
+#define CNVC_CUR2_CURSOR0_COLOR0__CUR0_COLOR0_MASK 0x00FFFFFFL
+//CNVC_CUR2_CURSOR0_COLOR1
+#define CNVC_CUR2_CURSOR0_COLOR1__CUR0_COLOR1__SHIFT 0x0
+#define CNVC_CUR2_CURSOR0_COLOR1__CUR0_COLOR1_MASK 0x00FFFFFFL
+//CNVC_CUR2_CURSOR0_FP_SCALE_BIAS
+#define CNVC_CUR2_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE__SHIFT 0x0
+#define CNVC_CUR2_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS__SHIFT 0x10
+#define CNVC_CUR2_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE_MASK 0x0000FFFFL
+#define CNVC_CUR2_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS_MASK 0xFFFF0000L
+
+
+// addressBlock: dce_dc_dpp2_dispdec_dscl_dispdec
+//DSCL2_SCL_COEF_RAM_TAP_SELECT
+#define DSCL2_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_TAP_PAIR_IDX__SHIFT 0x0
+#define DSCL2_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_PHASE__SHIFT 0x8
+#define DSCL2_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_FILTER_TYPE__SHIFT 0x10
+#define DSCL2_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_TAP_PAIR_IDX_MASK 0x00000003L
+#define DSCL2_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_PHASE_MASK 0x00003F00L
+#define DSCL2_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_FILTER_TYPE_MASK 0x00070000L
+//DSCL2_SCL_COEF_RAM_TAP_DATA
+#define DSCL2_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF__SHIFT 0x0
+#define DSCL2_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_EN__SHIFT 0xf
+#define DSCL2_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF__SHIFT 0x10
+#define DSCL2_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_EN__SHIFT 0x1f
+#define DSCL2_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_MASK 0x00003FFFL
+#define DSCL2_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_EN_MASK 0x00008000L
+#define DSCL2_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_MASK 0x3FFF0000L
+#define DSCL2_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_EN_MASK 0x80000000L
+//DSCL2_SCL_MODE
+#define DSCL2_SCL_MODE__DSCL_MODE__SHIFT 0x0
+#define DSCL2_SCL_MODE__SCL_COEF_RAM_SELECT__SHIFT 0x8
+#define DSCL2_SCL_MODE__SCL_COEF_RAM_SELECT_CURRENT__SHIFT 0xc
+#define DSCL2_SCL_MODE__SCL_CHROMA_COEF_MODE__SHIFT 0x10
+#define DSCL2_SCL_MODE__SCL_ALPHA_COEF_MODE__SHIFT 0x14
+#define DSCL2_SCL_MODE__SCL_COEF_RAM_SELECT_RD__SHIFT 0x18
+#define DSCL2_SCL_MODE__DSCL_MODE_MASK 0x00000007L
+#define DSCL2_SCL_MODE__SCL_COEF_RAM_SELECT_MASK 0x00000100L
+#define DSCL2_SCL_MODE__SCL_COEF_RAM_SELECT_CURRENT_MASK 0x00001000L
+#define DSCL2_SCL_MODE__SCL_CHROMA_COEF_MODE_MASK 0x00010000L
+#define DSCL2_SCL_MODE__SCL_ALPHA_COEF_MODE_MASK 0x00100000L
+#define DSCL2_SCL_MODE__SCL_COEF_RAM_SELECT_RD_MASK 0x01000000L
+//DSCL2_SCL_TAP_CONTROL
+#define DSCL2_SCL_TAP_CONTROL__SCL_V_NUM_TAPS__SHIFT 0x0
+#define DSCL2_SCL_TAP_CONTROL__SCL_H_NUM_TAPS__SHIFT 0x4
+#define DSCL2_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_C__SHIFT 0x8
+#define DSCL2_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_C__SHIFT 0xc
+#define DSCL2_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_MASK 0x00000007L
+#define DSCL2_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_MASK 0x00000070L
+#define DSCL2_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_C_MASK 0x00000700L
+#define DSCL2_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_C_MASK 0x00007000L
+//DSCL2_DSCL_CONTROL
+#define DSCL2_DSCL_CONTROL__SCL_BOUNDARY_MODE__SHIFT 0x0
+#define DSCL2_DSCL_CONTROL__SCL_BOUNDARY_MODE_MASK 0x00000001L
+//DSCL2_DSCL_2TAP_CONTROL
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN__SHIFT 0x0
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_EN__SHIFT 0x4
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_FACTOR__SHIFT 0x8
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN__SHIFT 0x10
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_EN__SHIFT 0x14
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_FACTOR__SHIFT 0x18
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN_MASK 0x00000001L
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_EN_MASK 0x00000010L
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_FACTOR_MASK 0x00000700L
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN_MASK 0x00010000L
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_EN_MASK 0x00100000L
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_FACTOR_MASK 0x07000000L
+//DSCL2_SCL_MANUAL_REPLICATE_CONTROL
+#define DSCL2_SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR__SHIFT 0x0
+#define DSCL2_SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR__SHIFT 0x8
+#define DSCL2_SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR_MASK 0x0000000FL
+#define DSCL2_SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR_MASK 0x00000F00L
+//DSCL2_SCL_HORZ_FILTER_SCALE_RATIO
+#define DSCL2_SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO__SHIFT 0x0
+#define DSCL2_SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO_MASK 0x03FFFFFFL
+//DSCL2_SCL_HORZ_FILTER_INIT
+#define DSCL2_SCL_HORZ_FILTER_INIT__SCL_H_INIT_FRAC__SHIFT 0x0
+#define DSCL2_SCL_HORZ_FILTER_INIT__SCL_H_INIT_INT__SHIFT 0x18
+#define DSCL2_SCL_HORZ_FILTER_INIT__SCL_H_INIT_FRAC_MASK 0x00FFFFFFL
+#define DSCL2_SCL_HORZ_FILTER_INIT__SCL_H_INIT_INT_MASK 0x0F000000L
+//DSCL2_SCL_HORZ_FILTER_SCALE_RATIO_C
+#define DSCL2_SCL_HORZ_FILTER_SCALE_RATIO_C__SCL_H_SCALE_RATIO_C__SHIFT 0x0
+#define DSCL2_SCL_HORZ_FILTER_SCALE_RATIO_C__SCL_H_SCALE_RATIO_C_MASK 0x03FFFFFFL
+//DSCL2_SCL_HORZ_FILTER_INIT_C
+#define DSCL2_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_FRAC_C__SHIFT 0x0
+#define DSCL2_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_INT_C__SHIFT 0x18
+#define DSCL2_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_FRAC_C_MASK 0x00FFFFFFL
+#define DSCL2_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_INT_C_MASK 0x0F000000L
+//DSCL2_SCL_VERT_FILTER_SCALE_RATIO
+#define DSCL2_SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO__SHIFT 0x0
+#define DSCL2_SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO_MASK 0x03FFFFFFL
+//DSCL2_SCL_VERT_FILTER_INIT
+#define DSCL2_SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC__SHIFT 0x0
+#define DSCL2_SCL_VERT_FILTER_INIT__SCL_V_INIT_INT__SHIFT 0x18
+#define DSCL2_SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC_MASK 0x00FFFFFFL
+#define DSCL2_SCL_VERT_FILTER_INIT__SCL_V_INIT_INT_MASK 0x0F000000L
+//DSCL2_SCL_VERT_FILTER_INIT_BOT
+#define DSCL2_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT__SHIFT 0x0
+#define DSCL2_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT__SHIFT 0x18
+#define DSCL2_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT_MASK 0x00FFFFFFL
+#define DSCL2_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT_MASK 0x0F000000L
+//DSCL2_SCL_VERT_FILTER_SCALE_RATIO_C
+#define DSCL2_SCL_VERT_FILTER_SCALE_RATIO_C__SCL_V_SCALE_RATIO_C__SHIFT 0x0
+#define DSCL2_SCL_VERT_FILTER_SCALE_RATIO_C__SCL_V_SCALE_RATIO_C_MASK 0x03FFFFFFL
+//DSCL2_SCL_VERT_FILTER_INIT_C
+#define DSCL2_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_FRAC_C__SHIFT 0x0
+#define DSCL2_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_INT_C__SHIFT 0x18
+#define DSCL2_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_FRAC_C_MASK 0x00FFFFFFL
+#define DSCL2_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_INT_C_MASK 0x0F000000L
+//DSCL2_SCL_VERT_FILTER_INIT_BOT_C
+#define DSCL2_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_FRAC_BOT_C__SHIFT 0x0
+#define DSCL2_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_INT_BOT_C__SHIFT 0x18
+#define DSCL2_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_FRAC_BOT_C_MASK 0x00FFFFFFL
+#define DSCL2_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_INT_BOT_C_MASK 0x0F000000L
+//DSCL2_SCL_BLACK_OFFSET
+#define DSCL2_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_RGB_Y__SHIFT 0x0
+#define DSCL2_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_CBCR__SHIFT 0x10
+#define DSCL2_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_RGB_Y_MASK 0x0000FFFFL
+#define DSCL2_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_CBCR_MASK 0xFFFF0000L
+//DSCL2_DSCL_UPDATE
+#define DSCL2_DSCL_UPDATE__SCL_UPDATE_PENDING__SHIFT 0x0
+#define DSCL2_DSCL_UPDATE__SCL_UPDATE_PENDING_MASK 0x00000001L
+//DSCL2_DSCL_AUTOCAL
+#define DSCL2_DSCL_AUTOCAL__AUTOCAL_MODE__SHIFT 0x0
+#define DSCL2_DSCL_AUTOCAL__AUTOCAL_NUM_PIPE__SHIFT 0x8
+#define DSCL2_DSCL_AUTOCAL__AUTOCAL_PIPE_ID__SHIFT 0xc
+#define DSCL2_DSCL_AUTOCAL__AUTOCAL_MODE_MASK 0x00000003L
+#define DSCL2_DSCL_AUTOCAL__AUTOCAL_NUM_PIPE_MASK 0x00000300L
+#define DSCL2_DSCL_AUTOCAL__AUTOCAL_PIPE_ID_MASK 0x00003000L
+//DSCL2_DSCL_EXT_OVERSCAN_LEFT_RIGHT
+#define DSCL2_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT__SHIFT 0x0
+#define DSCL2_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT__SHIFT 0x10
+#define DSCL2_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT_MASK 0x00001FFFL
+#define DSCL2_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT_MASK 0x1FFF0000L
+//DSCL2_DSCL_EXT_OVERSCAN_TOP_BOTTOM
+#define DSCL2_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM__SHIFT 0x0
+#define DSCL2_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP__SHIFT 0x10
+#define DSCL2_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM_MASK 0x00001FFFL
+#define DSCL2_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP_MASK 0x1FFF0000L
+//DSCL2_OTG_H_BLANK
+#define DSCL2_OTG_H_BLANK__OTG_H_BLANK_START__SHIFT 0x0
+#define DSCL2_OTG_H_BLANK__OTG_H_BLANK_END__SHIFT 0x10
+#define DSCL2_OTG_H_BLANK__OTG_H_BLANK_START_MASK 0x00003FFFL
+#define DSCL2_OTG_H_BLANK__OTG_H_BLANK_END_MASK 0x3FFF0000L
+//DSCL2_OTG_V_BLANK
+#define DSCL2_OTG_V_BLANK__OTG_V_BLANK_START__SHIFT 0x0
+#define DSCL2_OTG_V_BLANK__OTG_V_BLANK_END__SHIFT 0x10
+#define DSCL2_OTG_V_BLANK__OTG_V_BLANK_START_MASK 0x00003FFFL
+#define DSCL2_OTG_V_BLANK__OTG_V_BLANK_END_MASK 0x3FFF0000L
+//DSCL2_RECOUT_START
+#define DSCL2_RECOUT_START__RECOUT_START_X__SHIFT 0x0
+#define DSCL2_RECOUT_START__RECOUT_START_Y__SHIFT 0x10
+#define DSCL2_RECOUT_START__RECOUT_START_X_MASK 0x00001FFFL
+#define DSCL2_RECOUT_START__RECOUT_START_Y_MASK 0x1FFF0000L
+//DSCL2_RECOUT_SIZE
+#define DSCL2_RECOUT_SIZE__RECOUT_WIDTH__SHIFT 0x0
+#define DSCL2_RECOUT_SIZE__RECOUT_HEIGHT__SHIFT 0x10
+#define DSCL2_RECOUT_SIZE__RECOUT_WIDTH_MASK 0x00003FFFL
+#define DSCL2_RECOUT_SIZE__RECOUT_HEIGHT_MASK 0x3FFF0000L
+//DSCL2_MPC_SIZE
+#define DSCL2_MPC_SIZE__MPC_WIDTH__SHIFT 0x0
+#define DSCL2_MPC_SIZE__MPC_HEIGHT__SHIFT 0x10
+#define DSCL2_MPC_SIZE__MPC_WIDTH_MASK 0x00003FFFL
+#define DSCL2_MPC_SIZE__MPC_HEIGHT_MASK 0x3FFF0000L
+//DSCL2_LB_DATA_FORMAT
+#define DSCL2_LB_DATA_FORMAT__INTERLEAVE_EN__SHIFT 0x0
+#define DSCL2_LB_DATA_FORMAT__ALPHA_EN__SHIFT 0x4
+#define DSCL2_LB_DATA_FORMAT__INTERLEAVE_EN_MASK 0x00000001L
+#define DSCL2_LB_DATA_FORMAT__ALPHA_EN_MASK 0x00000010L
+//DSCL2_LB_MEMORY_CTRL
+#define DSCL2_LB_MEMORY_CTRL__MEMORY_CONFIG__SHIFT 0x0
+#define DSCL2_LB_MEMORY_CTRL__LB_MAX_PARTITIONS__SHIFT 0x8
+#define DSCL2_LB_MEMORY_CTRL__LB_NUM_PARTITIONS__SHIFT 0x10
+#define DSCL2_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_C__SHIFT 0x18
+#define DSCL2_LB_MEMORY_CTRL__MEMORY_CONFIG_MASK 0x00000003L
+#define DSCL2_LB_MEMORY_CTRL__LB_MAX_PARTITIONS_MASK 0x00003F00L
+#define DSCL2_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_MASK 0x007F0000L
+#define DSCL2_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_C_MASK 0x7F000000L
+//DSCL2_LB_V_COUNTER
+#define DSCL2_LB_V_COUNTER__V_COUNTER__SHIFT 0x0
+#define DSCL2_LB_V_COUNTER__V_COUNTER_C__SHIFT 0x10
+#define DSCL2_LB_V_COUNTER__V_COUNTER_MASK 0x00001FFFL
+#define DSCL2_LB_V_COUNTER__V_COUNTER_C_MASK 0x1FFF0000L
+//DSCL2_DSCL_MEM_PWR_CTRL
+#define DSCL2_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_FORCE__SHIFT 0x0
+#define DSCL2_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_DIS__SHIFT 0x2
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_FORCE__SHIFT 0x4
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_DIS__SHIFT 0x6
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_FORCE__SHIFT 0x8
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_DIS__SHIFT 0xa
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_FORCE__SHIFT 0xc
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_DIS__SHIFT 0xe
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_FORCE__SHIFT 0x10
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_DIS__SHIFT 0x12
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_FORCE__SHIFT 0x14
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_DIS__SHIFT 0x16
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_FORCE__SHIFT 0x18
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_DIS__SHIFT 0x1a
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_MEM_PWR_MODE__SHIFT 0x1c
+#define DSCL2_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_FORCE_MASK 0x00000003L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_DIS_MASK 0x00000004L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_FORCE_MASK 0x00000030L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_DIS_MASK 0x00000040L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_FORCE_MASK 0x00000300L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_DIS_MASK 0x00000400L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_FORCE_MASK 0x00003000L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_DIS_MASK 0x00004000L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_FORCE_MASK 0x00030000L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_DIS_MASK 0x00040000L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_FORCE_MASK 0x00300000L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_DIS_MASK 0x00400000L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_FORCE_MASK 0x03000000L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_DIS_MASK 0x04000000L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_MEM_PWR_MODE_MASK 0x10000000L
+//DSCL2_DSCL_MEM_PWR_STATUS
+#define DSCL2_DSCL_MEM_PWR_STATUS__LUT_MEM_PWR_STATE__SHIFT 0x0
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G1_MEM_PWR_STATE__SHIFT 0x2
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G2_MEM_PWR_STATE__SHIFT 0x4
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G3_MEM_PWR_STATE__SHIFT 0x6
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G4_MEM_PWR_STATE__SHIFT 0x8
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G5_MEM_PWR_STATE__SHIFT 0xa
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G6_MEM_PWR_STATE__SHIFT 0xc
+#define DSCL2_DSCL_MEM_PWR_STATUS__LUT_MEM_PWR_STATE_MASK 0x00000003L
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G1_MEM_PWR_STATE_MASK 0x0000000CL
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G2_MEM_PWR_STATE_MASK 0x00000030L
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G3_MEM_PWR_STATE_MASK 0x000000C0L
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G4_MEM_PWR_STATE_MASK 0x00000300L
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G5_MEM_PWR_STATE_MASK 0x00000C00L
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G6_MEM_PWR_STATE_MASK 0x00003000L
+//DSCL2_OBUF_CONTROL
+#define DSCL2_OBUF_CONTROL__OBUF_BYPASS__SHIFT 0x0
+#define DSCL2_OBUF_CONTROL__OBUF_USE_FULL_BUFFER__SHIFT 0x4
+#define DSCL2_OBUF_CONTROL__OBUF_IS_HALF_RECOUT_WIDTH__SHIFT 0xc
+#define DSCL2_OBUF_CONTROL__OBUF_OUT_HOLD_CNT__SHIFT 0x1c
+#define DSCL2_OBUF_CONTROL__OBUF_BYPASS_MASK 0x00000001L
+#define DSCL2_OBUF_CONTROL__OBUF_USE_FULL_BUFFER_MASK 0x00000010L
+#define DSCL2_OBUF_CONTROL__OBUF_IS_HALF_RECOUT_WIDTH_MASK 0x00001000L
+#define DSCL2_OBUF_CONTROL__OBUF_OUT_HOLD_CNT_MASK 0xF0000000L
+//DSCL2_OBUF_MEM_PWR_CTRL
+#define DSCL2_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_FORCE__SHIFT 0x0
+#define DSCL2_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_DIS__SHIFT 0x2
+#define DSCL2_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_MODE__SHIFT 0x8
+#define DSCL2_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_STATE__SHIFT 0x10
+#define DSCL2_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_FORCE_MASK 0x00000003L
+#define DSCL2_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_DIS_MASK 0x00000004L
+#define DSCL2_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_MODE_MASK 0x00000100L
+#define DSCL2_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_STATE_MASK 0x00030000L
+
+
+// addressBlock: dce_dc_dpp2_dispdec_cm_dispdec
+//CM2_CM_CONTROL
+#define CM2_CM_CONTROL__CM_BYPASS__SHIFT 0x0
+#define CM2_CM_CONTROL__CM_UPDATE_PENDING__SHIFT 0x8
+#define CM2_CM_CONTROL__CM_BYPASS_MASK 0x00000001L
+#define CM2_CM_CONTROL__CM_UPDATE_PENDING_MASK 0x00000100L
+//CM2_CM_ICSC_CONTROL
+#define CM2_CM_ICSC_CONTROL__CM_ICSC_MODE__SHIFT 0x0
+#define CM2_CM_ICSC_CONTROL__CM_ICSC_MODE_MASK 0x00000003L
+//CM2_CM_ICSC_C11_C12
+#define CM2_CM_ICSC_C11_C12__CM_ICSC_C11__SHIFT 0x0
+#define CM2_CM_ICSC_C11_C12__CM_ICSC_C12__SHIFT 0x10
+#define CM2_CM_ICSC_C11_C12__CM_ICSC_C11_MASK 0x0000FFFFL
+#define CM2_CM_ICSC_C11_C12__CM_ICSC_C12_MASK 0xFFFF0000L
+//CM2_CM_ICSC_C13_C14
+#define CM2_CM_ICSC_C13_C14__CM_ICSC_C13__SHIFT 0x0
+#define CM2_CM_ICSC_C13_C14__CM_ICSC_C14__SHIFT 0x10
+#define CM2_CM_ICSC_C13_C14__CM_ICSC_C13_MASK 0x0000FFFFL
+#define CM2_CM_ICSC_C13_C14__CM_ICSC_C14_MASK 0xFFFF0000L
+//CM2_CM_ICSC_C21_C22
+#define CM2_CM_ICSC_C21_C22__CM_ICSC_C21__SHIFT 0x0
+#define CM2_CM_ICSC_C21_C22__CM_ICSC_C22__SHIFT 0x10
+#define CM2_CM_ICSC_C21_C22__CM_ICSC_C21_MASK 0x0000FFFFL
+#define CM2_CM_ICSC_C21_C22__CM_ICSC_C22_MASK 0xFFFF0000L
+//CM2_CM_ICSC_C23_C24
+#define CM2_CM_ICSC_C23_C24__CM_ICSC_C23__SHIFT 0x0
+#define CM2_CM_ICSC_C23_C24__CM_ICSC_C24__SHIFT 0x10
+#define CM2_CM_ICSC_C23_C24__CM_ICSC_C23_MASK 0x0000FFFFL
+#define CM2_CM_ICSC_C23_C24__CM_ICSC_C24_MASK 0xFFFF0000L
+//CM2_CM_ICSC_C31_C32
+#define CM2_CM_ICSC_C31_C32__CM_ICSC_C31__SHIFT 0x0
+#define CM2_CM_ICSC_C31_C32__CM_ICSC_C32__SHIFT 0x10
+#define CM2_CM_ICSC_C31_C32__CM_ICSC_C31_MASK 0x0000FFFFL
+#define CM2_CM_ICSC_C31_C32__CM_ICSC_C32_MASK 0xFFFF0000L
+//CM2_CM_ICSC_C33_C34
+#define CM2_CM_ICSC_C33_C34__CM_ICSC_C33__SHIFT 0x0
+#define CM2_CM_ICSC_C33_C34__CM_ICSC_C34__SHIFT 0x10
+#define CM2_CM_ICSC_C33_C34__CM_ICSC_C33_MASK 0x0000FFFFL
+#define CM2_CM_ICSC_C33_C34__CM_ICSC_C34_MASK 0xFFFF0000L
+//CM2_CM_ICSC_B_C11_C12
+#define CM2_CM_ICSC_B_C11_C12__CM_ICSC_B_C11__SHIFT 0x0
+#define CM2_CM_ICSC_B_C11_C12__CM_ICSC_B_C12__SHIFT 0x10
+#define CM2_CM_ICSC_B_C11_C12__CM_ICSC_B_C11_MASK 0x0000FFFFL
+#define CM2_CM_ICSC_B_C11_C12__CM_ICSC_B_C12_MASK 0xFFFF0000L
+//CM2_CM_ICSC_B_C13_C14
+#define CM2_CM_ICSC_B_C13_C14__CM_ICSC_B_C13__SHIFT 0x0
+#define CM2_CM_ICSC_B_C13_C14__CM_ICSC_B_C14__SHIFT 0x10
+#define CM2_CM_ICSC_B_C13_C14__CM_ICSC_B_C13_MASK 0x0000FFFFL
+#define CM2_CM_ICSC_B_C13_C14__CM_ICSC_B_C14_MASK 0xFFFF0000L
+//CM2_CM_ICSC_B_C21_C22
+#define CM2_CM_ICSC_B_C21_C22__CM_ICSC_B_C21__SHIFT 0x0
+#define CM2_CM_ICSC_B_C21_C22__CM_ICSC_B_C22__SHIFT 0x10
+#define CM2_CM_ICSC_B_C21_C22__CM_ICSC_B_C21_MASK 0x0000FFFFL
+#define CM2_CM_ICSC_B_C21_C22__CM_ICSC_B_C22_MASK 0xFFFF0000L
+//CM2_CM_ICSC_B_C23_C24
+#define CM2_CM_ICSC_B_C23_C24__CM_ICSC_B_C23__SHIFT 0x0
+#define CM2_CM_ICSC_B_C23_C24__CM_ICSC_B_C24__SHIFT 0x10
+#define CM2_CM_ICSC_B_C23_C24__CM_ICSC_B_C23_MASK 0x0000FFFFL
+#define CM2_CM_ICSC_B_C23_C24__CM_ICSC_B_C24_MASK 0xFFFF0000L
+//CM2_CM_ICSC_B_C31_C32
+#define CM2_CM_ICSC_B_C31_C32__CM_ICSC_B_C31__SHIFT 0x0
+#define CM2_CM_ICSC_B_C31_C32__CM_ICSC_B_C32__SHIFT 0x10
+#define CM2_CM_ICSC_B_C31_C32__CM_ICSC_B_C31_MASK 0x0000FFFFL
+#define CM2_CM_ICSC_B_C31_C32__CM_ICSC_B_C32_MASK 0xFFFF0000L
+//CM2_CM_ICSC_B_C33_C34
+#define CM2_CM_ICSC_B_C33_C34__CM_ICSC_B_C33__SHIFT 0x0
+#define CM2_CM_ICSC_B_C33_C34__CM_ICSC_B_C34__SHIFT 0x10
+#define CM2_CM_ICSC_B_C33_C34__CM_ICSC_B_C33_MASK 0x0000FFFFL
+#define CM2_CM_ICSC_B_C33_C34__CM_ICSC_B_C34_MASK 0xFFFF0000L
+//CM2_CM_GAMUT_REMAP_CONTROL
+#define CM2_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE_MASK 0x00000003L
+//CM2_CM_GAMUT_REMAP_C11_C12
+#define CM2_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C11__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C12__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C11_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C12_MASK 0xFFFF0000L
+//CM2_CM_GAMUT_REMAP_C13_C14
+#define CM2_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C13__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C14__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C13_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C14_MASK 0xFFFF0000L
+//CM2_CM_GAMUT_REMAP_C21_C22
+#define CM2_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C21__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C22__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C21_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C22_MASK 0xFFFF0000L
+//CM2_CM_GAMUT_REMAP_C23_C24
+#define CM2_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C23__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C24__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C23_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C24_MASK 0xFFFF0000L
+//CM2_CM_GAMUT_REMAP_C31_C32
+#define CM2_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C31__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C32__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C31_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C32_MASK 0xFFFF0000L
+//CM2_CM_GAMUT_REMAP_C33_C34
+#define CM2_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C33__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C34__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C33_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C34_MASK 0xFFFF0000L
+//CM2_CM_GAMUT_REMAP_B_C11_C12
+#define CM2_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C11__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C12__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C11_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C12_MASK 0xFFFF0000L
+//CM2_CM_GAMUT_REMAP_B_C13_C14
+#define CM2_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C13__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C14__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C13_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C14_MASK 0xFFFF0000L
+//CM2_CM_GAMUT_REMAP_B_C21_C22
+#define CM2_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C21__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C22__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C21_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C22_MASK 0xFFFF0000L
+//CM2_CM_GAMUT_REMAP_B_C23_C24
+#define CM2_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C23__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C24__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C23_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C24_MASK 0xFFFF0000L
+//CM2_CM_GAMUT_REMAP_B_C31_C32
+#define CM2_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C31__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C32__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C31_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C32_MASK 0xFFFF0000L
+//CM2_CM_GAMUT_REMAP_B_C33_C34
+#define CM2_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C33__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C34__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C33_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C34_MASK 0xFFFF0000L
+//CM2_CM_BIAS_CR_R
+#define CM2_CM_BIAS_CR_R__CM_BIAS_CR_R__SHIFT 0x0
+#define CM2_CM_BIAS_CR_R__CM_BIAS_CR_R_MASK 0x0000FFFFL
+//CM2_CM_BIAS_Y_G_CB_B
+#define CM2_CM_BIAS_Y_G_CB_B__CM_BIAS_Y_G__SHIFT 0x0
+#define CM2_CM_BIAS_Y_G_CB_B__CM_BIAS_CB_B__SHIFT 0x10
+#define CM2_CM_BIAS_Y_G_CB_B__CM_BIAS_Y_G_MASK 0x0000FFFFL
+#define CM2_CM_BIAS_Y_G_CB_B__CM_BIAS_CB_B_MASK 0xFFFF0000L
+//CM2_CM_DGAM_CONTROL
+#define CM2_CM_DGAM_CONTROL__CM_DGAM_LUT_MODE__SHIFT 0x0
+#define CM2_CM_DGAM_CONTROL__CM_DGAM_LUT_MODE_MASK 0x00000007L
+//CM2_CM_DGAM_LUT_INDEX
+#define CM2_CM_DGAM_LUT_INDEX__CM_DGAM_LUT_INDEX__SHIFT 0x0
+#define CM2_CM_DGAM_LUT_INDEX__CM_DGAM_LUT_INDEX_MASK 0x000001FFL
+//CM2_CM_DGAM_LUT_DATA
+#define CM2_CM_DGAM_LUT_DATA__CM_DGAM_LUT_DATA__SHIFT 0x0
+#define CM2_CM_DGAM_LUT_DATA__CM_DGAM_LUT_DATA_MASK 0x0007FFFFL
+//CM2_CM_DGAM_LUT_WRITE_EN_MASK
+#define CM2_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define CM2_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_SEL__SHIFT 0x4
+#define CM2_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_CONFIG_STATUS__SHIFT 0x8
+#define CM2_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_WRITE_LUT_BASE_ONLY__SHIFT 0xc
+#define CM2_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define CM2_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_SEL_MASK 0x00000010L
+#define CM2_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_CONFIG_STATUS_MASK 0x00000700L
+#define CM2_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_WRITE_LUT_BASE_ONLY_MASK 0x00001000L
+//CM2_CM_DGAM_RAMA_START_CNTL_B
+#define CM2_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define CM2_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM2_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM2_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM2_CM_DGAM_RAMA_START_CNTL_G
+#define CM2_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define CM2_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM2_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM2_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM2_CM_DGAM_RAMA_START_CNTL_R
+#define CM2_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define CM2_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM2_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM2_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM2_CM_DGAM_RAMA_SLOPE_CNTL_B
+#define CM2_CM_DGAM_RAMA_SLOPE_CNTL_B__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define CM2_CM_DGAM_RAMA_SLOPE_CNTL_B__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//CM2_CM_DGAM_RAMA_SLOPE_CNTL_G
+#define CM2_CM_DGAM_RAMA_SLOPE_CNTL_G__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define CM2_CM_DGAM_RAMA_SLOPE_CNTL_G__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//CM2_CM_DGAM_RAMA_SLOPE_CNTL_R
+#define CM2_CM_DGAM_RAMA_SLOPE_CNTL_R__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define CM2_CM_DGAM_RAMA_SLOPE_CNTL_R__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//CM2_CM_DGAM_RAMA_END_CNTL1_B
+#define CM2_CM_DGAM_RAMA_END_CNTL1_B__CM_DGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define CM2_CM_DGAM_RAMA_END_CNTL1_B__CM_DGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+//CM2_CM_DGAM_RAMA_END_CNTL2_B
+#define CM2_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define CM2_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM2_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define CM2_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//CM2_CM_DGAM_RAMA_END_CNTL1_G
+#define CM2_CM_DGAM_RAMA_END_CNTL1_G__CM_DGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define CM2_CM_DGAM_RAMA_END_CNTL1_G__CM_DGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+//CM2_CM_DGAM_RAMA_END_CNTL2_G
+#define CM2_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define CM2_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM2_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define CM2_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//CM2_CM_DGAM_RAMA_END_CNTL1_R
+#define CM2_CM_DGAM_RAMA_END_CNTL1_R__CM_DGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define CM2_CM_DGAM_RAMA_END_CNTL1_R__CM_DGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+//CM2_CM_DGAM_RAMA_END_CNTL2_R
+#define CM2_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define CM2_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM2_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define CM2_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//CM2_CM_DGAM_RAMA_REGION_0_1
+#define CM2_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_DGAM_RAMA_REGION_2_3
+#define CM2_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_DGAM_RAMA_REGION_4_5
+#define CM2_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_DGAM_RAMA_REGION_6_7
+#define CM2_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_DGAM_RAMA_REGION_8_9
+#define CM2_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_DGAM_RAMA_REGION_10_11
+#define CM2_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_DGAM_RAMA_REGION_12_13
+#define CM2_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_DGAM_RAMA_REGION_14_15
+#define CM2_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_DGAM_RAMB_START_CNTL_B
+#define CM2_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define CM2_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM2_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM2_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM2_CM_DGAM_RAMB_START_CNTL_G
+#define CM2_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define CM2_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM2_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM2_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM2_CM_DGAM_RAMB_START_CNTL_R
+#define CM2_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define CM2_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM2_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM2_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM2_CM_DGAM_RAMB_SLOPE_CNTL_B
+#define CM2_CM_DGAM_RAMB_SLOPE_CNTL_B__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define CM2_CM_DGAM_RAMB_SLOPE_CNTL_B__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//CM2_CM_DGAM_RAMB_SLOPE_CNTL_G
+#define CM2_CM_DGAM_RAMB_SLOPE_CNTL_G__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define CM2_CM_DGAM_RAMB_SLOPE_CNTL_G__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//CM2_CM_DGAM_RAMB_SLOPE_CNTL_R
+#define CM2_CM_DGAM_RAMB_SLOPE_CNTL_R__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define CM2_CM_DGAM_RAMB_SLOPE_CNTL_R__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//CM2_CM_DGAM_RAMB_END_CNTL1_B
+#define CM2_CM_DGAM_RAMB_END_CNTL1_B__CM_DGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define CM2_CM_DGAM_RAMB_END_CNTL1_B__CM_DGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+//CM2_CM_DGAM_RAMB_END_CNTL2_B
+#define CM2_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define CM2_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM2_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define CM2_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//CM2_CM_DGAM_RAMB_END_CNTL1_G
+#define CM2_CM_DGAM_RAMB_END_CNTL1_G__CM_DGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define CM2_CM_DGAM_RAMB_END_CNTL1_G__CM_DGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+//CM2_CM_DGAM_RAMB_END_CNTL2_G
+#define CM2_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define CM2_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM2_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define CM2_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//CM2_CM_DGAM_RAMB_END_CNTL1_R
+#define CM2_CM_DGAM_RAMB_END_CNTL1_R__CM_DGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define CM2_CM_DGAM_RAMB_END_CNTL1_R__CM_DGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+//CM2_CM_DGAM_RAMB_END_CNTL2_R
+#define CM2_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define CM2_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM2_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define CM2_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//CM2_CM_DGAM_RAMB_REGION_0_1
+#define CM2_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_DGAM_RAMB_REGION_2_3
+#define CM2_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_DGAM_RAMB_REGION_4_5
+#define CM2_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_DGAM_RAMB_REGION_6_7
+#define CM2_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_DGAM_RAMB_REGION_8_9
+#define CM2_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_DGAM_RAMB_REGION_10_11
+#define CM2_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_DGAM_RAMB_REGION_12_13
+#define CM2_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_DGAM_RAMB_REGION_14_15
+#define CM2_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_CONTROL
+#define CM2_CM_BLNDGAM_CONTROL__CM_BLNDGAM_LUT_MODE__SHIFT 0x0
+#define CM2_CM_BLNDGAM_CONTROL__CM_BLNDGAM_LUT_MODE_MASK 0x00000003L
+//CM2_CM_BLNDGAM_LUT_INDEX
+#define CM2_CM_BLNDGAM_LUT_INDEX__CM_BLNDGAM_LUT_INDEX__SHIFT 0x0
+#define CM2_CM_BLNDGAM_LUT_INDEX__CM_BLNDGAM_LUT_INDEX_MASK 0x000001FFL
+//CM2_CM_BLNDGAM_LUT_DATA
+#define CM2_CM_BLNDGAM_LUT_DATA__CM_BLNDGAM_LUT_DATA__SHIFT 0x0
+#define CM2_CM_BLNDGAM_LUT_DATA__CM_BLNDGAM_LUT_DATA_MASK 0x0007FFFFL
+//CM2_CM_BLNDGAM_LUT_WRITE_EN_MASK
+#define CM2_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define CM2_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_SEL__SHIFT 0x4
+#define CM2_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_CONFIG_STATUS__SHIFT 0x8
+#define CM2_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define CM2_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_SEL_MASK 0x00000010L
+#define CM2_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_CONFIG_STATUS_MASK 0x00000300L
+//CM2_CM_BLNDGAM_RAMA_START_CNTL_B
+#define CM2_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM2_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM2_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM2_CM_BLNDGAM_RAMA_START_CNTL_G
+#define CM2_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM2_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM2_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM2_CM_BLNDGAM_RAMA_START_CNTL_R
+#define CM2_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM2_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM2_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM2_CM_BLNDGAM_RAMA_SLOPE_CNTL_B
+#define CM2_CM_BLNDGAM_RAMA_SLOPE_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_SLOPE_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//CM2_CM_BLNDGAM_RAMA_SLOPE_CNTL_G
+#define CM2_CM_BLNDGAM_RAMA_SLOPE_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_SLOPE_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//CM2_CM_BLNDGAM_RAMA_SLOPE_CNTL_R
+#define CM2_CM_BLNDGAM_RAMA_SLOPE_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_SLOPE_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//CM2_CM_BLNDGAM_RAMA_END_CNTL1_B
+#define CM2_CM_BLNDGAM_RAMA_END_CNTL1_B__CM_BLNDGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_END_CNTL1_B__CM_BLNDGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+//CM2_CM_BLNDGAM_RAMA_END_CNTL2_B
+#define CM2_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define CM2_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//CM2_CM_BLNDGAM_RAMA_END_CNTL1_G
+#define CM2_CM_BLNDGAM_RAMA_END_CNTL1_G__CM_BLNDGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_END_CNTL1_G__CM_BLNDGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+//CM2_CM_BLNDGAM_RAMA_END_CNTL2_G
+#define CM2_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define CM2_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//CM2_CM_BLNDGAM_RAMA_END_CNTL1_R
+#define CM2_CM_BLNDGAM_RAMA_END_CNTL1_R__CM_BLNDGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_END_CNTL1_R__CM_BLNDGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+//CM2_CM_BLNDGAM_RAMA_END_CNTL2_R
+#define CM2_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define CM2_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//CM2_CM_BLNDGAM_RAMA_REGION_0_1
+#define CM2_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMA_REGION_2_3
+#define CM2_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMA_REGION_4_5
+#define CM2_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMA_REGION_6_7
+#define CM2_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMA_REGION_8_9
+#define CM2_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMA_REGION_10_11
+#define CM2_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMA_REGION_12_13
+#define CM2_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMA_REGION_14_15
+#define CM2_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMA_REGION_16_17
+#define CM2_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMA_REGION_18_19
+#define CM2_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMA_REGION_20_21
+#define CM2_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMA_REGION_22_23
+#define CM2_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMA_REGION_24_25
+#define CM2_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMA_REGION_26_27
+#define CM2_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMA_REGION_28_29
+#define CM2_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMA_REGION_30_31
+#define CM2_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMA_REGION_32_33
+#define CM2_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMB_START_CNTL_B
+#define CM2_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM2_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM2_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM2_CM_BLNDGAM_RAMB_START_CNTL_G
+#define CM2_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM2_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM2_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM2_CM_BLNDGAM_RAMB_START_CNTL_R
+#define CM2_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM2_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM2_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM2_CM_BLNDGAM_RAMB_SLOPE_CNTL_B
+#define CM2_CM_BLNDGAM_RAMB_SLOPE_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_SLOPE_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//CM2_CM_BLNDGAM_RAMB_SLOPE_CNTL_G
+#define CM2_CM_BLNDGAM_RAMB_SLOPE_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_SLOPE_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//CM2_CM_BLNDGAM_RAMB_SLOPE_CNTL_R
+#define CM2_CM_BLNDGAM_RAMB_SLOPE_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_SLOPE_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//CM2_CM_BLNDGAM_RAMB_END_CNTL1_B
+#define CM2_CM_BLNDGAM_RAMB_END_CNTL1_B__CM_BLNDGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_END_CNTL1_B__CM_BLNDGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+//CM2_CM_BLNDGAM_RAMB_END_CNTL2_B
+#define CM2_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define CM2_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//CM2_CM_BLNDGAM_RAMB_END_CNTL1_G
+#define CM2_CM_BLNDGAM_RAMB_END_CNTL1_G__CM_BLNDGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_END_CNTL1_G__CM_BLNDGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+//CM2_CM_BLNDGAM_RAMB_END_CNTL2_G
+#define CM2_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define CM2_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//CM2_CM_BLNDGAM_RAMB_END_CNTL1_R
+#define CM2_CM_BLNDGAM_RAMB_END_CNTL1_R__CM_BLNDGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_END_CNTL1_R__CM_BLNDGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+//CM2_CM_BLNDGAM_RAMB_END_CNTL2_R
+#define CM2_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define CM2_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//CM2_CM_BLNDGAM_RAMB_REGION_0_1
+#define CM2_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMB_REGION_2_3
+#define CM2_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMB_REGION_4_5
+#define CM2_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMB_REGION_6_7
+#define CM2_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMB_REGION_8_9
+#define CM2_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMB_REGION_10_11
+#define CM2_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMB_REGION_12_13
+#define CM2_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMB_REGION_14_15
+#define CM2_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMB_REGION_16_17
+#define CM2_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMB_REGION_18_19
+#define CM2_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMB_REGION_20_21
+#define CM2_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMB_REGION_22_23
+#define CM2_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMB_REGION_24_25
+#define CM2_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMB_REGION_26_27
+#define CM2_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMB_REGION_28_29
+#define CM2_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMB_REGION_30_31
+#define CM2_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMB_REGION_32_33
+#define CM2_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_HDR_MULT_COEF
+#define CM2_CM_HDR_MULT_COEF__CM_HDR_MULT_COEF__SHIFT 0x0
+#define CM2_CM_HDR_MULT_COEF__CM_HDR_MULT_COEF_MASK 0x0007FFFFL
+//CM2_CM_MEM_PWR_CTRL
+#define CM2_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_FORCE__SHIFT 0x0
+#define CM2_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_DIS__SHIFT 0x2
+#define CM2_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_FORCE__SHIFT 0x4
+#define CM2_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_DIS__SHIFT 0x6
+#define CM2_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_FORCE_MASK 0x00000003L
+#define CM2_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_DIS_MASK 0x00000004L
+#define CM2_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_FORCE_MASK 0x00000030L
+#define CM2_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_DIS_MASK 0x00000040L
+//CM2_CM_MEM_PWR_STATUS
+#define CM2_CM_MEM_PWR_STATUS__SHARED_MEM_PWR_STATE__SHIFT 0x0
+#define CM2_CM_MEM_PWR_STATUS__BLNDGAM_MEM_PWR_STATE__SHIFT 0x2
+#define CM2_CM_MEM_PWR_STATUS__SHARED_MEM_PWR_STATE_MASK 0x00000003L
+#define CM2_CM_MEM_PWR_STATUS__BLNDGAM_MEM_PWR_STATE_MASK 0x0000000CL
+//CM2_CM_DEALPHA
+#define CM2_CM_DEALPHA__CM_DEALPHA_EN__SHIFT 0x0
+#define CM2_CM_DEALPHA__CM_DEALPHA_EN_MASK 0x00000001L
+//CM2_CM_COEF_FORMAT
+#define CM2_CM_COEF_FORMAT__CM_BIAS_FORMAT__SHIFT 0x0
+#define CM2_CM_COEF_FORMAT__CM_ICSC_COEF_FORMAT__SHIFT 0x4
+#define CM2_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT__SHIFT 0x8
+#define CM2_CM_COEF_FORMAT__CM_BIAS_FORMAT_MASK 0x00000001L
+#define CM2_CM_COEF_FORMAT__CM_ICSC_COEF_FORMAT_MASK 0x00000010L
+#define CM2_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT_MASK 0x00000100L
+//CM2_CM_SHAPER_CONTROL
+#define CM2_CM_SHAPER_CONTROL__CM_SHAPER_LUT_MODE__SHIFT 0x0
+#define CM2_CM_SHAPER_CONTROL__CM_SHAPER_LUT_MODE_MASK 0x00000003L
+//CM2_CM_SHAPER_OFFSET_R
+#define CM2_CM_SHAPER_OFFSET_R__CM_SHAPER_OFFSET_R__SHIFT 0x0
+#define CM2_CM_SHAPER_OFFSET_R__CM_SHAPER_OFFSET_R_MASK 0x0007FFFFL
+//CM2_CM_SHAPER_OFFSET_G
+#define CM2_CM_SHAPER_OFFSET_G__CM_SHAPER_OFFSET_G__SHIFT 0x0
+#define CM2_CM_SHAPER_OFFSET_G__CM_SHAPER_OFFSET_G_MASK 0x0007FFFFL
+//CM2_CM_SHAPER_OFFSET_B
+#define CM2_CM_SHAPER_OFFSET_B__CM_SHAPER_OFFSET_B__SHIFT 0x0
+#define CM2_CM_SHAPER_OFFSET_B__CM_SHAPER_OFFSET_B_MASK 0x0007FFFFL
+//CM2_CM_SHAPER_SCALE_R
+#define CM2_CM_SHAPER_SCALE_R__CM_SHAPER_SCALE_R__SHIFT 0x0
+#define CM2_CM_SHAPER_SCALE_R__CM_SHAPER_SCALE_R_MASK 0x0000FFFFL
+//CM2_CM_SHAPER_SCALE_G_B
+#define CM2_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_G__SHIFT 0x0
+#define CM2_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_B__SHIFT 0x10
+#define CM2_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_G_MASK 0x0000FFFFL
+#define CM2_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_B_MASK 0xFFFF0000L
+//CM2_CM_SHAPER_LUT_INDEX
+#define CM2_CM_SHAPER_LUT_INDEX__CM_SHAPER_LUT_INDEX__SHIFT 0x0
+#define CM2_CM_SHAPER_LUT_INDEX__CM_SHAPER_LUT_INDEX_MASK 0x000000FFL
+//CM2_CM_SHAPER_LUT_DATA
+#define CM2_CM_SHAPER_LUT_DATA__CM_SHAPER_LUT_DATA__SHIFT 0x0
+#define CM2_CM_SHAPER_LUT_DATA__CM_SHAPER_LUT_DATA_MASK 0x00FFFFFFL
+//CM2_CM_SHAPER_LUT_WRITE_EN_MASK
+#define CM2_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define CM2_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_SEL__SHIFT 0x4
+#define CM2_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_CONFIG_STATUS__SHIFT 0x8
+#define CM2_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define CM2_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_SEL_MASK 0x00000010L
+#define CM2_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_CONFIG_STATUS_MASK 0x00000300L
+//CM2_CM_SHAPER_RAMA_START_CNTL_B
+#define CM2_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM2_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM2_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM2_CM_SHAPER_RAMA_START_CNTL_G
+#define CM2_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM2_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM2_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM2_CM_SHAPER_RAMA_START_CNTL_R
+#define CM2_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM2_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM2_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM2_CM_SHAPER_RAMA_END_CNTL_B
+#define CM2_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define CM2_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_BASE_B_MASK 0x3FFF0000L
+//CM2_CM_SHAPER_RAMA_END_CNTL_G
+#define CM2_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define CM2_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_BASE_G_MASK 0x3FFF0000L
+//CM2_CM_SHAPER_RAMA_END_CNTL_R
+#define CM2_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define CM2_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_BASE_R_MASK 0x3FFF0000L
+//CM2_CM_SHAPER_RAMA_REGION_0_1
+#define CM2_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMA_REGION_2_3
+#define CM2_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMA_REGION_4_5
+#define CM2_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMA_REGION_6_7
+#define CM2_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMA_REGION_8_9
+#define CM2_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMA_REGION_10_11
+#define CM2_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMA_REGION_12_13
+#define CM2_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMA_REGION_14_15
+#define CM2_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMA_REGION_16_17
+#define CM2_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMA_REGION_18_19
+#define CM2_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMA_REGION_20_21
+#define CM2_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMA_REGION_22_23
+#define CM2_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMA_REGION_24_25
+#define CM2_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMA_REGION_26_27
+#define CM2_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMA_REGION_28_29
+#define CM2_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMA_REGION_30_31
+#define CM2_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMA_REGION_32_33
+#define CM2_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMB_START_CNTL_B
+#define CM2_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM2_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM2_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM2_CM_SHAPER_RAMB_START_CNTL_G
+#define CM2_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM2_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM2_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM2_CM_SHAPER_RAMB_START_CNTL_R
+#define CM2_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM2_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM2_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM2_CM_SHAPER_RAMB_END_CNTL_B
+#define CM2_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define CM2_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_BASE_B_MASK 0x3FFF0000L
+//CM2_CM_SHAPER_RAMB_END_CNTL_G
+#define CM2_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define CM2_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_BASE_G_MASK 0x3FFF0000L
+//CM2_CM_SHAPER_RAMB_END_CNTL_R
+#define CM2_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define CM2_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_BASE_R_MASK 0x3FFF0000L
+//CM2_CM_SHAPER_RAMB_REGION_0_1
+#define CM2_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMB_REGION_2_3
+#define CM2_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMB_REGION_4_5
+#define CM2_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMB_REGION_6_7
+#define CM2_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMB_REGION_8_9
+#define CM2_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMB_REGION_10_11
+#define CM2_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMB_REGION_12_13
+#define CM2_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMB_REGION_14_15
+#define CM2_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMB_REGION_16_17
+#define CM2_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMB_REGION_18_19
+#define CM2_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMB_REGION_20_21
+#define CM2_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMB_REGION_22_23
+#define CM2_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMB_REGION_24_25
+#define CM2_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMB_REGION_26_27
+#define CM2_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMB_REGION_28_29
+#define CM2_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMB_REGION_30_31
+#define CM2_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMB_REGION_32_33
+#define CM2_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_MEM_PWR_CTRL2
+#define CM2_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_FORCE__SHIFT 0x8
+#define CM2_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_DIS__SHIFT 0xa
+#define CM2_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_FORCE__SHIFT 0xc
+#define CM2_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_DIS__SHIFT 0xe
+#define CM2_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_FORCE_MASK 0x00000300L
+#define CM2_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_DIS_MASK 0x00000400L
+#define CM2_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_FORCE_MASK 0x00003000L
+#define CM2_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_DIS_MASK 0x00004000L
+//CM2_CM_MEM_PWR_STATUS2
+#define CM2_CM_MEM_PWR_STATUS2__SHAPER_MEM_PWR_STATE__SHIFT 0x4
+#define CM2_CM_MEM_PWR_STATUS2__HDR3DLUT_MEM_PWR_STATE__SHIFT 0x6
+#define CM2_CM_MEM_PWR_STATUS2__SHAPER_MEM_PWR_STATE_MASK 0x00000030L
+#define CM2_CM_MEM_PWR_STATUS2__HDR3DLUT_MEM_PWR_STATE_MASK 0x000000C0L
+//CM2_CM_3DLUT_MODE
+#define CM2_CM_3DLUT_MODE__CM_3DLUT_MODE__SHIFT 0x0
+#define CM2_CM_3DLUT_MODE__CM_3DLUT_SIZE__SHIFT 0x4
+#define CM2_CM_3DLUT_MODE__CM_3DLUT_MODE_MASK 0x00000003L
+#define CM2_CM_3DLUT_MODE__CM_3DLUT_SIZE_MASK 0x00000010L
+//CM2_CM_3DLUT_INDEX
+#define CM2_CM_3DLUT_INDEX__CM_3DLUT_INDEX__SHIFT 0x0
+#define CM2_CM_3DLUT_INDEX__CM_3DLUT_INDEX_MASK 0x000007FFL
+//CM2_CM_3DLUT_DATA
+#define CM2_CM_3DLUT_DATA__CM_3DLUT_DATA0__SHIFT 0x0
+#define CM2_CM_3DLUT_DATA__CM_3DLUT_DATA1__SHIFT 0x10
+#define CM2_CM_3DLUT_DATA__CM_3DLUT_DATA0_MASK 0x0000FFFFL
+#define CM2_CM_3DLUT_DATA__CM_3DLUT_DATA1_MASK 0xFFFF0000L
+//CM2_CM_3DLUT_DATA_30BIT
+#define CM2_CM_3DLUT_DATA_30BIT__CM_3DLUT_DATA_30BIT__SHIFT 0x2
+#define CM2_CM_3DLUT_DATA_30BIT__CM_3DLUT_DATA_30BIT_MASK 0xFFFFFFFCL
+//CM2_CM_3DLUT_READ_WRITE_CONTROL
+#define CM2_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_WRITE_EN_MASK__SHIFT 0x0
+#define CM2_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_RAM_SEL__SHIFT 0x4
+#define CM2_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_30BIT_EN__SHIFT 0x8
+#define CM2_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_CONFIG_STATUS__SHIFT 0xc
+#define CM2_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_READ_SEL__SHIFT 0x10
+#define CM2_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_WRITE_EN_MASK_MASK 0x0000000FL
+#define CM2_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_RAM_SEL_MASK 0x00000010L
+#define CM2_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_30BIT_EN_MASK 0x00000100L
+#define CM2_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_CONFIG_STATUS_MASK 0x00003000L
+#define CM2_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_READ_SEL_MASK 0x00030000L
+//CM2_CM_3DLUT_OUT_NORM_FACTOR
+#define CM2_CM_3DLUT_OUT_NORM_FACTOR__CM_3DLUT_OUT_NORM_FACTOR__SHIFT 0x0
+#define CM2_CM_3DLUT_OUT_NORM_FACTOR__CM_3DLUT_OUT_NORM_FACTOR_MASK 0x0000FFFFL
+//CM2_CM_3DLUT_OUT_OFFSET_R
+#define CM2_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_OFFSET_R__SHIFT 0x0
+#define CM2_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_SCALE_R__SHIFT 0x10
+#define CM2_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_OFFSET_R_MASK 0x0000FFFFL
+#define CM2_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_SCALE_R_MASK 0xFFFF0000L
+//CM2_CM_3DLUT_OUT_OFFSET_G
+#define CM2_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_OFFSET_G__SHIFT 0x0
+#define CM2_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_SCALE_G__SHIFT 0x10
+#define CM2_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_OFFSET_G_MASK 0x0000FFFFL
+#define CM2_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_SCALE_G_MASK 0xFFFF0000L
+//CM2_CM_3DLUT_OUT_OFFSET_B
+#define CM2_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_OFFSET_B__SHIFT 0x0
+#define CM2_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_SCALE_B__SHIFT 0x10
+#define CM2_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_OFFSET_B_MASK 0x0000FFFFL
+#define CM2_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_SCALE_B_MASK 0xFFFF0000L
+//CM2_CM_TEST_DEBUG_INDEX
+#define CM2_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX__SHIFT 0x0
+#define CM2_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define CM2_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX_MASK 0x000000FFL
+#define CM2_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+//CM2_CM_TEST_DEBUG_DATA
+#define CM2_CM_TEST_DEBUG_DATA__CM_TEST_DEBUG_DATA__SHIFT 0x0
+#define CM2_CM_TEST_DEBUG_DATA__CM_TEST_DEBUG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dpp2_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON13_PERFCOUNTER_CNTL
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON13_PERFCOUNTER_CNTL2
+#define DC_PERFMON13_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON13_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON13_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON13_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON13_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON13_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON13_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON13_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON13_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON13_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON13_PERFCOUNTER_STATE
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON13_PERFMON_CNTL
+#define DC_PERFMON13_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON13_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON13_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON13_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON13_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON13_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON13_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON13_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON13_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON13_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON13_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON13_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON13_PERFMON_CNTL2
+#define DC_PERFMON13_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON13_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON13_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON13_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON13_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON13_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON13_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON13_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON13_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON13_PERFMON_CVALUE_LOW
+#define DC_PERFMON13_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON13_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON13_PERFMON_HI
+#define DC_PERFMON13_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON13_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON13_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON13_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON13_PERFMON_LOW
+#define DC_PERFMON13_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON13_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dpp3_dispdec_dpp_top_dispdec
+//DPP_TOP3_DPP_CONTROL
+#define DPP_TOP3_DPP_CONTROL__DPP_CLOCK_ENABLE__SHIFT 0x4
+#define DPP_TOP3_DPP_CONTROL__DPPCLK_G_GATE_DISABLE__SHIFT 0x8
+#define DPP_TOP3_DPP_CONTROL__DPPCLK_G_DYN_GATE_DISABLE__SHIFT 0xa
+#define DPP_TOP3_DPP_CONTROL__DPPCLK_G_DSCL_GATE_DISABLE__SHIFT 0xc
+#define DPP_TOP3_DPP_CONTROL__DPPCLK_G_DSCL_ALPHA_GATE_DISABLE__SHIFT 0xe
+#define DPP_TOP3_DPP_CONTROL__DPPCLK_R_GATE_DISABLE__SHIFT 0x10
+#define DPP_TOP3_DPP_CONTROL__DISPCLK_R_GATE_DISABLE__SHIFT 0x12
+#define DPP_TOP3_DPP_CONTROL__DISPCLK_G_GATE_DISABLE__SHIFT 0x14
+#define DPP_TOP3_DPP_CONTROL__DPP_TEST_CLK_SEL__SHIFT 0x1c
+#define DPP_TOP3_DPP_CONTROL__DPP_CLOCK_ENABLE_MASK 0x00000010L
+#define DPP_TOP3_DPP_CONTROL__DPPCLK_G_GATE_DISABLE_MASK 0x00000100L
+#define DPP_TOP3_DPP_CONTROL__DPPCLK_G_DYN_GATE_DISABLE_MASK 0x00000400L
+#define DPP_TOP3_DPP_CONTROL__DPPCLK_G_DSCL_GATE_DISABLE_MASK 0x00001000L
+#define DPP_TOP3_DPP_CONTROL__DPPCLK_G_DSCL_ALPHA_GATE_DISABLE_MASK 0x00004000L
+#define DPP_TOP3_DPP_CONTROL__DPPCLK_R_GATE_DISABLE_MASK 0x00010000L
+#define DPP_TOP3_DPP_CONTROL__DISPCLK_R_GATE_DISABLE_MASK 0x00040000L
+#define DPP_TOP3_DPP_CONTROL__DISPCLK_G_GATE_DISABLE_MASK 0x00100000L
+#define DPP_TOP3_DPP_CONTROL__DPP_TEST_CLK_SEL_MASK 0xF0000000L
+//DPP_TOP3_DPP_SOFT_RESET
+#define DPP_TOP3_DPP_SOFT_RESET__CNVC_SOFT_RESET__SHIFT 0x0
+#define DPP_TOP3_DPP_SOFT_RESET__DSCL_SOFT_RESET__SHIFT 0x4
+#define DPP_TOP3_DPP_SOFT_RESET__CM_SOFT_RESET__SHIFT 0x8
+#define DPP_TOP3_DPP_SOFT_RESET__OBUF_SOFT_RESET__SHIFT 0xc
+#define DPP_TOP3_DPP_SOFT_RESET__CNVC_SOFT_RESET_MASK 0x00000001L
+#define DPP_TOP3_DPP_SOFT_RESET__DSCL_SOFT_RESET_MASK 0x00000010L
+#define DPP_TOP3_DPP_SOFT_RESET__CM_SOFT_RESET_MASK 0x00000100L
+#define DPP_TOP3_DPP_SOFT_RESET__OBUF_SOFT_RESET_MASK 0x00001000L
+//DPP_TOP3_DPP_CRC_VAL_R_G
+#define DPP_TOP3_DPP_CRC_VAL_R_G__DPP_CRC_R_CR__SHIFT 0x0
+#define DPP_TOP3_DPP_CRC_VAL_R_G__DPP_CRC_G_Y__SHIFT 0x10
+#define DPP_TOP3_DPP_CRC_VAL_R_G__DPP_CRC_R_CR_MASK 0x0000FFFFL
+#define DPP_TOP3_DPP_CRC_VAL_R_G__DPP_CRC_G_Y_MASK 0xFFFF0000L
+//DPP_TOP3_DPP_CRC_VAL_B_A
+#define DPP_TOP3_DPP_CRC_VAL_B_A__DPP_CRC_B_CB__SHIFT 0x0
+#define DPP_TOP3_DPP_CRC_VAL_B_A__DPP_CRC_ALPHA__SHIFT 0x10
+#define DPP_TOP3_DPP_CRC_VAL_B_A__DPP_CRC_B_CB_MASK 0x0000FFFFL
+#define DPP_TOP3_DPP_CRC_VAL_B_A__DPP_CRC_ALPHA_MASK 0xFFFF0000L
+//DPP_TOP3_DPP_CRC_CTRL
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_EN__SHIFT 0x0
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_CONT_EN__SHIFT 0x1
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_ONE_SHOT_PENDING__SHIFT 0x2
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_420_COMP_SEL__SHIFT 0x3
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_SRC_SEL__SHIFT 0x4
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_CURSOR_BITS_SEL__SHIFT 0x6
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_STEREO_EN__SHIFT 0x7
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_STEREO_MODE__SHIFT 0x8
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_INTERLACE_MODE__SHIFT 0xa
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_PIX_FORMAT_SEL__SHIFT 0xc
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_CURSOR_FORMAT_SEL__SHIFT 0xf
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_MASK__SHIFT 0x10
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_EN_MASK 0x00000001L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_CONT_EN_MASK 0x00000002L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_ONE_SHOT_PENDING_MASK 0x00000004L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_420_COMP_SEL_MASK 0x00000008L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_SRC_SEL_MASK 0x00000030L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_CURSOR_BITS_SEL_MASK 0x00000040L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_STEREO_EN_MASK 0x00000080L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_STEREO_MODE_MASK 0x00000300L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_INTERLACE_MODE_MASK 0x00000C00L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_PIX_FORMAT_SEL_MASK 0x00007000L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_CURSOR_FORMAT_SEL_MASK 0x00008000L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_MASK_MASK 0xFFFF0000L
+//DPP_TOP3_HOST_READ_CONTROL
+#define DPP_TOP3_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL__SHIFT 0x0
+#define DPP_TOP3_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL_MASK 0x000000FFL
+
+
+// addressBlock: dce_dc_dpp3_dispdec_cnvc_cfg_dispdec
+//CNVC_CFG3_CNVC_SURFACE_PIXEL_FORMAT
+#define CNVC_CFG3_CNVC_SURFACE_PIXEL_FORMAT__CNVC_SURFACE_PIXEL_FORMAT__SHIFT 0x0
+#define CNVC_CFG3_CNVC_SURFACE_PIXEL_FORMAT__CNVC_SURFACE_PIXEL_FORMAT_MASK 0x0000007FL
+//CNVC_CFG3_FORMAT_CONTROL
+#define CNVC_CFG3_FORMAT_CONTROL__FORMAT_EXPANSION_MODE__SHIFT 0x0
+#define CNVC_CFG3_FORMAT_CONTROL__FORMAT_CNV16__SHIFT 0x4
+#define CNVC_CFG3_FORMAT_CONTROL__ALPHA_EN__SHIFT 0x8
+#define CNVC_CFG3_FORMAT_CONTROL__CNVC_BYPASS__SHIFT 0xc
+#define CNVC_CFG3_FORMAT_CONTROL__CNVC_BYPASS_MSB_ALIGN__SHIFT 0xd
+#define CNVC_CFG3_FORMAT_CONTROL__CLAMP_POSITIVE__SHIFT 0x10
+#define CNVC_CFG3_FORMAT_CONTROL__CLAMP_POSITIVE_C__SHIFT 0x11
+#define CNVC_CFG3_FORMAT_CONTROL__CNVC_UPDATE_PENDING__SHIFT 0x14
+#define CNVC_CFG3_FORMAT_CONTROL__FORMAT_EXPANSION_MODE_MASK 0x00000001L
+#define CNVC_CFG3_FORMAT_CONTROL__FORMAT_CNV16_MASK 0x00000010L
+#define CNVC_CFG3_FORMAT_CONTROL__ALPHA_EN_MASK 0x00000100L
+#define CNVC_CFG3_FORMAT_CONTROL__CNVC_BYPASS_MASK 0x00001000L
+#define CNVC_CFG3_FORMAT_CONTROL__CNVC_BYPASS_MSB_ALIGN_MASK 0x00002000L
+#define CNVC_CFG3_FORMAT_CONTROL__CLAMP_POSITIVE_MASK 0x00010000L
+#define CNVC_CFG3_FORMAT_CONTROL__CLAMP_POSITIVE_C_MASK 0x00020000L
+#define CNVC_CFG3_FORMAT_CONTROL__CNVC_UPDATE_PENDING_MASK 0x00100000L
+//CNVC_CFG3_FCNV_FP_BIAS_R
+#define CNVC_CFG3_FCNV_FP_BIAS_R__FCNV_FP_BIAS_R__SHIFT 0x0
+#define CNVC_CFG3_FCNV_FP_BIAS_R__FCNV_FP_BIAS_R_MASK 0x0007FFFFL
+//CNVC_CFG3_FCNV_FP_BIAS_G
+#define CNVC_CFG3_FCNV_FP_BIAS_G__FCNV_FP_BIAS_G__SHIFT 0x0
+#define CNVC_CFG3_FCNV_FP_BIAS_G__FCNV_FP_BIAS_G_MASK 0x0007FFFFL
+//CNVC_CFG3_FCNV_FP_BIAS_B
+#define CNVC_CFG3_FCNV_FP_BIAS_B__FCNV_FP_BIAS_B__SHIFT 0x0
+#define CNVC_CFG3_FCNV_FP_BIAS_B__FCNV_FP_BIAS_B_MASK 0x0007FFFFL
+//CNVC_CFG3_FCNV_FP_SCALE_R
+#define CNVC_CFG3_FCNV_FP_SCALE_R__FCNV_FP_SCALE_R__SHIFT 0x0
+#define CNVC_CFG3_FCNV_FP_SCALE_R__FCNV_FP_SCALE_R_MASK 0x0007FFFFL
+//CNVC_CFG3_FCNV_FP_SCALE_G
+#define CNVC_CFG3_FCNV_FP_SCALE_G__FCNV_FP_SCALE_G__SHIFT 0x0
+#define CNVC_CFG3_FCNV_FP_SCALE_G__FCNV_FP_SCALE_G_MASK 0x0007FFFFL
+//CNVC_CFG3_FCNV_FP_SCALE_B
+#define CNVC_CFG3_FCNV_FP_SCALE_B__FCNV_FP_SCALE_B__SHIFT 0x0
+#define CNVC_CFG3_FCNV_FP_SCALE_B__FCNV_FP_SCALE_B_MASK 0x0007FFFFL
+//CNVC_CFG3_COLOR_KEYER_CONTROL
+#define CNVC_CFG3_COLOR_KEYER_CONTROL__COLOR_KEYER_EN__SHIFT 0x0
+#define CNVC_CFG3_COLOR_KEYER_CONTROL__COLOR_KEYER_MODE__SHIFT 0x4
+#define CNVC_CFG3_COLOR_KEYER_CONTROL__COLOR_KEYER_EN_MASK 0x00000001L
+#define CNVC_CFG3_COLOR_KEYER_CONTROL__COLOR_KEYER_MODE_MASK 0x00000030L
+//CNVC_CFG3_COLOR_KEYER_ALPHA
+#define CNVC_CFG3_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_LOW__SHIFT 0x0
+#define CNVC_CFG3_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_HIGH__SHIFT 0x10
+#define CNVC_CFG3_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG3_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG3_COLOR_KEYER_RED
+#define CNVC_CFG3_COLOR_KEYER_RED__COLOR_KEYER_RED_LOW__SHIFT 0x0
+#define CNVC_CFG3_COLOR_KEYER_RED__COLOR_KEYER_RED_HIGH__SHIFT 0x10
+#define CNVC_CFG3_COLOR_KEYER_RED__COLOR_KEYER_RED_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG3_COLOR_KEYER_RED__COLOR_KEYER_RED_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG3_COLOR_KEYER_GREEN
+#define CNVC_CFG3_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_LOW__SHIFT 0x0
+#define CNVC_CFG3_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_HIGH__SHIFT 0x10
+#define CNVC_CFG3_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG3_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG3_COLOR_KEYER_BLUE
+#define CNVC_CFG3_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_LOW__SHIFT 0x0
+#define CNVC_CFG3_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_HIGH__SHIFT 0x10
+#define CNVC_CFG3_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG3_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG3_ALPHA_2BIT_LUT
+#define CNVC_CFG3_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT0__SHIFT 0x0
+#define CNVC_CFG3_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT1__SHIFT 0x8
+#define CNVC_CFG3_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT2__SHIFT 0x10
+#define CNVC_CFG3_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT3__SHIFT 0x18
+#define CNVC_CFG3_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT0_MASK 0x000000FFL
+#define CNVC_CFG3_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT1_MASK 0x0000FF00L
+#define CNVC_CFG3_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT2_MASK 0x00FF0000L
+#define CNVC_CFG3_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT3_MASK 0xFF000000L
+
+
+// addressBlock: dce_dc_dpp3_dispdec_cnvc_cur_dispdec
+//CNVC_CUR3_CURSOR0_CONTROL
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_ENABLE__SHIFT 0x0
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_EXPANSION_MODE__SHIFT 0x1
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_PIX_INV_MODE__SHIFT 0x2
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_ROM_EN__SHIFT 0x3
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_MODE__SHIFT 0x4
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_PIXEL_ALPHA_MOD_EN__SHIFT 0x7
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_UPDATE_PENDING__SHIFT 0x10
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_ENABLE_MASK 0x00000001L
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_EXPANSION_MODE_MASK 0x00000002L
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_PIX_INV_MODE_MASK 0x00000004L
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_ROM_EN_MASK 0x00000008L
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_MODE_MASK 0x00000070L
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_PIXEL_ALPHA_MOD_EN_MASK 0x00000080L
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_UPDATE_PENDING_MASK 0x00010000L
+//CNVC_CUR3_CURSOR0_COLOR0
+#define CNVC_CUR3_CURSOR0_COLOR0__CUR0_COLOR0__SHIFT 0x0
+#define CNVC_CUR3_CURSOR0_COLOR0__CUR0_COLOR0_MASK 0x00FFFFFFL
+//CNVC_CUR3_CURSOR0_COLOR1
+#define CNVC_CUR3_CURSOR0_COLOR1__CUR0_COLOR1__SHIFT 0x0
+#define CNVC_CUR3_CURSOR0_COLOR1__CUR0_COLOR1_MASK 0x00FFFFFFL
+//CNVC_CUR3_CURSOR0_FP_SCALE_BIAS
+#define CNVC_CUR3_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE__SHIFT 0x0
+#define CNVC_CUR3_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS__SHIFT 0x10
+#define CNVC_CUR3_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE_MASK 0x0000FFFFL
+#define CNVC_CUR3_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS_MASK 0xFFFF0000L
+
+
+// addressBlock: dce_dc_dpp3_dispdec_dscl_dispdec
+//DSCL3_SCL_COEF_RAM_TAP_SELECT
+#define DSCL3_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_TAP_PAIR_IDX__SHIFT 0x0
+#define DSCL3_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_PHASE__SHIFT 0x8
+#define DSCL3_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_FILTER_TYPE__SHIFT 0x10
+#define DSCL3_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_TAP_PAIR_IDX_MASK 0x00000003L
+#define DSCL3_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_PHASE_MASK 0x00003F00L
+#define DSCL3_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_FILTER_TYPE_MASK 0x00070000L
+//DSCL3_SCL_COEF_RAM_TAP_DATA
+#define DSCL3_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF__SHIFT 0x0
+#define DSCL3_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_EN__SHIFT 0xf
+#define DSCL3_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF__SHIFT 0x10
+#define DSCL3_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_EN__SHIFT 0x1f
+#define DSCL3_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_MASK 0x00003FFFL
+#define DSCL3_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_EN_MASK 0x00008000L
+#define DSCL3_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_MASK 0x3FFF0000L
+#define DSCL3_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_EN_MASK 0x80000000L
+//DSCL3_SCL_MODE
+#define DSCL3_SCL_MODE__DSCL_MODE__SHIFT 0x0
+#define DSCL3_SCL_MODE__SCL_COEF_RAM_SELECT__SHIFT 0x8
+#define DSCL3_SCL_MODE__SCL_COEF_RAM_SELECT_CURRENT__SHIFT 0xc
+#define DSCL3_SCL_MODE__SCL_CHROMA_COEF_MODE__SHIFT 0x10
+#define DSCL3_SCL_MODE__SCL_ALPHA_COEF_MODE__SHIFT 0x14
+#define DSCL3_SCL_MODE__SCL_COEF_RAM_SELECT_RD__SHIFT 0x18
+#define DSCL3_SCL_MODE__DSCL_MODE_MASK 0x00000007L
+#define DSCL3_SCL_MODE__SCL_COEF_RAM_SELECT_MASK 0x00000100L
+#define DSCL3_SCL_MODE__SCL_COEF_RAM_SELECT_CURRENT_MASK 0x00001000L
+#define DSCL3_SCL_MODE__SCL_CHROMA_COEF_MODE_MASK 0x00010000L
+#define DSCL3_SCL_MODE__SCL_ALPHA_COEF_MODE_MASK 0x00100000L
+#define DSCL3_SCL_MODE__SCL_COEF_RAM_SELECT_RD_MASK 0x01000000L
+//DSCL3_SCL_TAP_CONTROL
+#define DSCL3_SCL_TAP_CONTROL__SCL_V_NUM_TAPS__SHIFT 0x0
+#define DSCL3_SCL_TAP_CONTROL__SCL_H_NUM_TAPS__SHIFT 0x4
+#define DSCL3_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_C__SHIFT 0x8
+#define DSCL3_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_C__SHIFT 0xc
+#define DSCL3_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_MASK 0x00000007L
+#define DSCL3_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_MASK 0x00000070L
+#define DSCL3_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_C_MASK 0x00000700L
+#define DSCL3_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_C_MASK 0x00007000L
+//DSCL3_DSCL_CONTROL
+#define DSCL3_DSCL_CONTROL__SCL_BOUNDARY_MODE__SHIFT 0x0
+#define DSCL3_DSCL_CONTROL__SCL_BOUNDARY_MODE_MASK 0x00000001L
+//DSCL3_DSCL_2TAP_CONTROL
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN__SHIFT 0x0
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_EN__SHIFT 0x4
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_FACTOR__SHIFT 0x8
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN__SHIFT 0x10
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_EN__SHIFT 0x14
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_FACTOR__SHIFT 0x18
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN_MASK 0x00000001L
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_EN_MASK 0x00000010L
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_FACTOR_MASK 0x00000700L
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN_MASK 0x00010000L
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_EN_MASK 0x00100000L
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_FACTOR_MASK 0x07000000L
+//DSCL3_SCL_MANUAL_REPLICATE_CONTROL
+#define DSCL3_SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR__SHIFT 0x0
+#define DSCL3_SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR__SHIFT 0x8
+#define DSCL3_SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR_MASK 0x0000000FL
+#define DSCL3_SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR_MASK 0x00000F00L
+//DSCL3_SCL_HORZ_FILTER_SCALE_RATIO
+#define DSCL3_SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO__SHIFT 0x0
+#define DSCL3_SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO_MASK 0x03FFFFFFL
+//DSCL3_SCL_HORZ_FILTER_INIT
+#define DSCL3_SCL_HORZ_FILTER_INIT__SCL_H_INIT_FRAC__SHIFT 0x0
+#define DSCL3_SCL_HORZ_FILTER_INIT__SCL_H_INIT_INT__SHIFT 0x18
+#define DSCL3_SCL_HORZ_FILTER_INIT__SCL_H_INIT_FRAC_MASK 0x00FFFFFFL
+#define DSCL3_SCL_HORZ_FILTER_INIT__SCL_H_INIT_INT_MASK 0x0F000000L
+//DSCL3_SCL_HORZ_FILTER_SCALE_RATIO_C
+#define DSCL3_SCL_HORZ_FILTER_SCALE_RATIO_C__SCL_H_SCALE_RATIO_C__SHIFT 0x0
+#define DSCL3_SCL_HORZ_FILTER_SCALE_RATIO_C__SCL_H_SCALE_RATIO_C_MASK 0x03FFFFFFL
+//DSCL3_SCL_HORZ_FILTER_INIT_C
+#define DSCL3_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_FRAC_C__SHIFT 0x0
+#define DSCL3_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_INT_C__SHIFT 0x18
+#define DSCL3_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_FRAC_C_MASK 0x00FFFFFFL
+#define DSCL3_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_INT_C_MASK 0x0F000000L
+//DSCL3_SCL_VERT_FILTER_SCALE_RATIO
+#define DSCL3_SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO__SHIFT 0x0
+#define DSCL3_SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO_MASK 0x03FFFFFFL
+//DSCL3_SCL_VERT_FILTER_INIT
+#define DSCL3_SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC__SHIFT 0x0
+#define DSCL3_SCL_VERT_FILTER_INIT__SCL_V_INIT_INT__SHIFT 0x18
+#define DSCL3_SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC_MASK 0x00FFFFFFL
+#define DSCL3_SCL_VERT_FILTER_INIT__SCL_V_INIT_INT_MASK 0x0F000000L
+//DSCL3_SCL_VERT_FILTER_INIT_BOT
+#define DSCL3_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT__SHIFT 0x0
+#define DSCL3_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT__SHIFT 0x18
+#define DSCL3_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT_MASK 0x00FFFFFFL
+#define DSCL3_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT_MASK 0x0F000000L
+//DSCL3_SCL_VERT_FILTER_SCALE_RATIO_C
+#define DSCL3_SCL_VERT_FILTER_SCALE_RATIO_C__SCL_V_SCALE_RATIO_C__SHIFT 0x0
+#define DSCL3_SCL_VERT_FILTER_SCALE_RATIO_C__SCL_V_SCALE_RATIO_C_MASK 0x03FFFFFFL
+//DSCL3_SCL_VERT_FILTER_INIT_C
+#define DSCL3_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_FRAC_C__SHIFT 0x0
+#define DSCL3_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_INT_C__SHIFT 0x18
+#define DSCL3_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_FRAC_C_MASK 0x00FFFFFFL
+#define DSCL3_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_INT_C_MASK 0x0F000000L
+//DSCL3_SCL_VERT_FILTER_INIT_BOT_C
+#define DSCL3_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_FRAC_BOT_C__SHIFT 0x0
+#define DSCL3_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_INT_BOT_C__SHIFT 0x18
+#define DSCL3_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_FRAC_BOT_C_MASK 0x00FFFFFFL
+#define DSCL3_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_INT_BOT_C_MASK 0x0F000000L
+//DSCL3_SCL_BLACK_OFFSET
+#define DSCL3_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_RGB_Y__SHIFT 0x0
+#define DSCL3_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_CBCR__SHIFT 0x10
+#define DSCL3_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_RGB_Y_MASK 0x0000FFFFL
+#define DSCL3_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_CBCR_MASK 0xFFFF0000L
+//DSCL3_DSCL_UPDATE
+#define DSCL3_DSCL_UPDATE__SCL_UPDATE_PENDING__SHIFT 0x0
+#define DSCL3_DSCL_UPDATE__SCL_UPDATE_PENDING_MASK 0x00000001L
+//DSCL3_DSCL_AUTOCAL
+#define DSCL3_DSCL_AUTOCAL__AUTOCAL_MODE__SHIFT 0x0
+#define DSCL3_DSCL_AUTOCAL__AUTOCAL_NUM_PIPE__SHIFT 0x8
+#define DSCL3_DSCL_AUTOCAL__AUTOCAL_PIPE_ID__SHIFT 0xc
+#define DSCL3_DSCL_AUTOCAL__AUTOCAL_MODE_MASK 0x00000003L
+#define DSCL3_DSCL_AUTOCAL__AUTOCAL_NUM_PIPE_MASK 0x00000300L
+#define DSCL3_DSCL_AUTOCAL__AUTOCAL_PIPE_ID_MASK 0x00003000L
+//DSCL3_DSCL_EXT_OVERSCAN_LEFT_RIGHT
+#define DSCL3_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT__SHIFT 0x0
+#define DSCL3_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT__SHIFT 0x10
+#define DSCL3_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT_MASK 0x00001FFFL
+#define DSCL3_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT_MASK 0x1FFF0000L
+//DSCL3_DSCL_EXT_OVERSCAN_TOP_BOTTOM
+#define DSCL3_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM__SHIFT 0x0
+#define DSCL3_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP__SHIFT 0x10
+#define DSCL3_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM_MASK 0x00001FFFL
+#define DSCL3_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP_MASK 0x1FFF0000L
+//DSCL3_OTG_H_BLANK
+#define DSCL3_OTG_H_BLANK__OTG_H_BLANK_START__SHIFT 0x0
+#define DSCL3_OTG_H_BLANK__OTG_H_BLANK_END__SHIFT 0x10
+#define DSCL3_OTG_H_BLANK__OTG_H_BLANK_START_MASK 0x00003FFFL
+#define DSCL3_OTG_H_BLANK__OTG_H_BLANK_END_MASK 0x3FFF0000L
+//DSCL3_OTG_V_BLANK
+#define DSCL3_OTG_V_BLANK__OTG_V_BLANK_START__SHIFT 0x0
+#define DSCL3_OTG_V_BLANK__OTG_V_BLANK_END__SHIFT 0x10
+#define DSCL3_OTG_V_BLANK__OTG_V_BLANK_START_MASK 0x00003FFFL
+#define DSCL3_OTG_V_BLANK__OTG_V_BLANK_END_MASK 0x3FFF0000L
+//DSCL3_RECOUT_START
+#define DSCL3_RECOUT_START__RECOUT_START_X__SHIFT 0x0
+#define DSCL3_RECOUT_START__RECOUT_START_Y__SHIFT 0x10
+#define DSCL3_RECOUT_START__RECOUT_START_X_MASK 0x00001FFFL
+#define DSCL3_RECOUT_START__RECOUT_START_Y_MASK 0x1FFF0000L
+//DSCL3_RECOUT_SIZE
+#define DSCL3_RECOUT_SIZE__RECOUT_WIDTH__SHIFT 0x0
+#define DSCL3_RECOUT_SIZE__RECOUT_HEIGHT__SHIFT 0x10
+#define DSCL3_RECOUT_SIZE__RECOUT_WIDTH_MASK 0x00003FFFL
+#define DSCL3_RECOUT_SIZE__RECOUT_HEIGHT_MASK 0x3FFF0000L
+//DSCL3_MPC_SIZE
+#define DSCL3_MPC_SIZE__MPC_WIDTH__SHIFT 0x0
+#define DSCL3_MPC_SIZE__MPC_HEIGHT__SHIFT 0x10
+#define DSCL3_MPC_SIZE__MPC_WIDTH_MASK 0x00003FFFL
+#define DSCL3_MPC_SIZE__MPC_HEIGHT_MASK 0x3FFF0000L
+//DSCL3_LB_DATA_FORMAT
+#define DSCL3_LB_DATA_FORMAT__INTERLEAVE_EN__SHIFT 0x0
+#define DSCL3_LB_DATA_FORMAT__ALPHA_EN__SHIFT 0x4
+#define DSCL3_LB_DATA_FORMAT__INTERLEAVE_EN_MASK 0x00000001L
+#define DSCL3_LB_DATA_FORMAT__ALPHA_EN_MASK 0x00000010L
+//DSCL3_LB_MEMORY_CTRL
+#define DSCL3_LB_MEMORY_CTRL__MEMORY_CONFIG__SHIFT 0x0
+#define DSCL3_LB_MEMORY_CTRL__LB_MAX_PARTITIONS__SHIFT 0x8
+#define DSCL3_LB_MEMORY_CTRL__LB_NUM_PARTITIONS__SHIFT 0x10
+#define DSCL3_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_C__SHIFT 0x18
+#define DSCL3_LB_MEMORY_CTRL__MEMORY_CONFIG_MASK 0x00000003L
+#define DSCL3_LB_MEMORY_CTRL__LB_MAX_PARTITIONS_MASK 0x00003F00L
+#define DSCL3_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_MASK 0x007F0000L
+#define DSCL3_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_C_MASK 0x7F000000L
+//DSCL3_LB_V_COUNTER
+#define DSCL3_LB_V_COUNTER__V_COUNTER__SHIFT 0x0
+#define DSCL3_LB_V_COUNTER__V_COUNTER_C__SHIFT 0x10
+#define DSCL3_LB_V_COUNTER__V_COUNTER_MASK 0x00001FFFL
+#define DSCL3_LB_V_COUNTER__V_COUNTER_C_MASK 0x1FFF0000L
+//DSCL3_DSCL_MEM_PWR_CTRL
+#define DSCL3_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_FORCE__SHIFT 0x0
+#define DSCL3_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_DIS__SHIFT 0x2
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_FORCE__SHIFT 0x4
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_DIS__SHIFT 0x6
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_FORCE__SHIFT 0x8
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_DIS__SHIFT 0xa
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_FORCE__SHIFT 0xc
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_DIS__SHIFT 0xe
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_FORCE__SHIFT 0x10
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_DIS__SHIFT 0x12
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_FORCE__SHIFT 0x14
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_DIS__SHIFT 0x16
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_FORCE__SHIFT 0x18
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_DIS__SHIFT 0x1a
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_MEM_PWR_MODE__SHIFT 0x1c
+#define DSCL3_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_FORCE_MASK 0x00000003L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_DIS_MASK 0x00000004L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_FORCE_MASK 0x00000030L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_DIS_MASK 0x00000040L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_FORCE_MASK 0x00000300L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_DIS_MASK 0x00000400L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_FORCE_MASK 0x00003000L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_DIS_MASK 0x00004000L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_FORCE_MASK 0x00030000L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_DIS_MASK 0x00040000L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_FORCE_MASK 0x00300000L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_DIS_MASK 0x00400000L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_FORCE_MASK 0x03000000L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_DIS_MASK 0x04000000L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_MEM_PWR_MODE_MASK 0x10000000L
+//DSCL3_DSCL_MEM_PWR_STATUS
+#define DSCL3_DSCL_MEM_PWR_STATUS__LUT_MEM_PWR_STATE__SHIFT 0x0
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G1_MEM_PWR_STATE__SHIFT 0x2
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G2_MEM_PWR_STATE__SHIFT 0x4
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G3_MEM_PWR_STATE__SHIFT 0x6
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G4_MEM_PWR_STATE__SHIFT 0x8
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G5_MEM_PWR_STATE__SHIFT 0xa
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G6_MEM_PWR_STATE__SHIFT 0xc
+#define DSCL3_DSCL_MEM_PWR_STATUS__LUT_MEM_PWR_STATE_MASK 0x00000003L
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G1_MEM_PWR_STATE_MASK 0x0000000CL
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G2_MEM_PWR_STATE_MASK 0x00000030L
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G3_MEM_PWR_STATE_MASK 0x000000C0L
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G4_MEM_PWR_STATE_MASK 0x00000300L
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G5_MEM_PWR_STATE_MASK 0x00000C00L
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G6_MEM_PWR_STATE_MASK 0x00003000L
+//DSCL3_OBUF_CONTROL
+#define DSCL3_OBUF_CONTROL__OBUF_BYPASS__SHIFT 0x0
+#define DSCL3_OBUF_CONTROL__OBUF_USE_FULL_BUFFER__SHIFT 0x4
+#define DSCL3_OBUF_CONTROL__OBUF_IS_HALF_RECOUT_WIDTH__SHIFT 0xc
+#define DSCL3_OBUF_CONTROL__OBUF_OUT_HOLD_CNT__SHIFT 0x1c
+#define DSCL3_OBUF_CONTROL__OBUF_BYPASS_MASK 0x00000001L
+#define DSCL3_OBUF_CONTROL__OBUF_USE_FULL_BUFFER_MASK 0x00000010L
+#define DSCL3_OBUF_CONTROL__OBUF_IS_HALF_RECOUT_WIDTH_MASK 0x00001000L
+#define DSCL3_OBUF_CONTROL__OBUF_OUT_HOLD_CNT_MASK 0xF0000000L
+//DSCL3_OBUF_MEM_PWR_CTRL
+#define DSCL3_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_FORCE__SHIFT 0x0
+#define DSCL3_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_DIS__SHIFT 0x2
+#define DSCL3_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_MODE__SHIFT 0x8
+#define DSCL3_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_STATE__SHIFT 0x10
+#define DSCL3_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_FORCE_MASK 0x00000003L
+#define DSCL3_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_DIS_MASK 0x00000004L
+#define DSCL3_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_MODE_MASK 0x00000100L
+#define DSCL3_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_STATE_MASK 0x00030000L
+
+
+// addressBlock: dce_dc_dpp3_dispdec_cm_dispdec
+//CM3_CM_CONTROL
+#define CM3_CM_CONTROL__CM_BYPASS__SHIFT 0x0
+#define CM3_CM_CONTROL__CM_UPDATE_PENDING__SHIFT 0x8
+#define CM3_CM_CONTROL__CM_BYPASS_MASK 0x00000001L
+#define CM3_CM_CONTROL__CM_UPDATE_PENDING_MASK 0x00000100L
+//CM3_CM_ICSC_CONTROL
+#define CM3_CM_ICSC_CONTROL__CM_ICSC_MODE__SHIFT 0x0
+#define CM3_CM_ICSC_CONTROL__CM_ICSC_MODE_MASK 0x00000003L
+//CM3_CM_ICSC_C11_C12
+#define CM3_CM_ICSC_C11_C12__CM_ICSC_C11__SHIFT 0x0
+#define CM3_CM_ICSC_C11_C12__CM_ICSC_C12__SHIFT 0x10
+#define CM3_CM_ICSC_C11_C12__CM_ICSC_C11_MASK 0x0000FFFFL
+#define CM3_CM_ICSC_C11_C12__CM_ICSC_C12_MASK 0xFFFF0000L
+//CM3_CM_ICSC_C13_C14
+#define CM3_CM_ICSC_C13_C14__CM_ICSC_C13__SHIFT 0x0
+#define CM3_CM_ICSC_C13_C14__CM_ICSC_C14__SHIFT 0x10
+#define CM3_CM_ICSC_C13_C14__CM_ICSC_C13_MASK 0x0000FFFFL
+#define CM3_CM_ICSC_C13_C14__CM_ICSC_C14_MASK 0xFFFF0000L
+//CM3_CM_ICSC_C21_C22
+#define CM3_CM_ICSC_C21_C22__CM_ICSC_C21__SHIFT 0x0
+#define CM3_CM_ICSC_C21_C22__CM_ICSC_C22__SHIFT 0x10
+#define CM3_CM_ICSC_C21_C22__CM_ICSC_C21_MASK 0x0000FFFFL
+#define CM3_CM_ICSC_C21_C22__CM_ICSC_C22_MASK 0xFFFF0000L
+//CM3_CM_ICSC_C23_C24
+#define CM3_CM_ICSC_C23_C24__CM_ICSC_C23__SHIFT 0x0
+#define CM3_CM_ICSC_C23_C24__CM_ICSC_C24__SHIFT 0x10
+#define CM3_CM_ICSC_C23_C24__CM_ICSC_C23_MASK 0x0000FFFFL
+#define CM3_CM_ICSC_C23_C24__CM_ICSC_C24_MASK 0xFFFF0000L
+//CM3_CM_ICSC_C31_C32
+#define CM3_CM_ICSC_C31_C32__CM_ICSC_C31__SHIFT 0x0
+#define CM3_CM_ICSC_C31_C32__CM_ICSC_C32__SHIFT 0x10
+#define CM3_CM_ICSC_C31_C32__CM_ICSC_C31_MASK 0x0000FFFFL
+#define CM3_CM_ICSC_C31_C32__CM_ICSC_C32_MASK 0xFFFF0000L
+//CM3_CM_ICSC_C33_C34
+#define CM3_CM_ICSC_C33_C34__CM_ICSC_C33__SHIFT 0x0
+#define CM3_CM_ICSC_C33_C34__CM_ICSC_C34__SHIFT 0x10
+#define CM3_CM_ICSC_C33_C34__CM_ICSC_C33_MASK 0x0000FFFFL
+#define CM3_CM_ICSC_C33_C34__CM_ICSC_C34_MASK 0xFFFF0000L
+//CM3_CM_ICSC_B_C11_C12
+#define CM3_CM_ICSC_B_C11_C12__CM_ICSC_B_C11__SHIFT 0x0
+#define CM3_CM_ICSC_B_C11_C12__CM_ICSC_B_C12__SHIFT 0x10
+#define CM3_CM_ICSC_B_C11_C12__CM_ICSC_B_C11_MASK 0x0000FFFFL
+#define CM3_CM_ICSC_B_C11_C12__CM_ICSC_B_C12_MASK 0xFFFF0000L
+//CM3_CM_ICSC_B_C13_C14
+#define CM3_CM_ICSC_B_C13_C14__CM_ICSC_B_C13__SHIFT 0x0
+#define CM3_CM_ICSC_B_C13_C14__CM_ICSC_B_C14__SHIFT 0x10
+#define CM3_CM_ICSC_B_C13_C14__CM_ICSC_B_C13_MASK 0x0000FFFFL
+#define CM3_CM_ICSC_B_C13_C14__CM_ICSC_B_C14_MASK 0xFFFF0000L
+//CM3_CM_ICSC_B_C21_C22
+#define CM3_CM_ICSC_B_C21_C22__CM_ICSC_B_C21__SHIFT 0x0
+#define CM3_CM_ICSC_B_C21_C22__CM_ICSC_B_C22__SHIFT 0x10
+#define CM3_CM_ICSC_B_C21_C22__CM_ICSC_B_C21_MASK 0x0000FFFFL
+#define CM3_CM_ICSC_B_C21_C22__CM_ICSC_B_C22_MASK 0xFFFF0000L
+//CM3_CM_ICSC_B_C23_C24
+#define CM3_CM_ICSC_B_C23_C24__CM_ICSC_B_C23__SHIFT 0x0
+#define CM3_CM_ICSC_B_C23_C24__CM_ICSC_B_C24__SHIFT 0x10
+#define CM3_CM_ICSC_B_C23_C24__CM_ICSC_B_C23_MASK 0x0000FFFFL
+#define CM3_CM_ICSC_B_C23_C24__CM_ICSC_B_C24_MASK 0xFFFF0000L
+//CM3_CM_ICSC_B_C31_C32
+#define CM3_CM_ICSC_B_C31_C32__CM_ICSC_B_C31__SHIFT 0x0
+#define CM3_CM_ICSC_B_C31_C32__CM_ICSC_B_C32__SHIFT 0x10
+#define CM3_CM_ICSC_B_C31_C32__CM_ICSC_B_C31_MASK 0x0000FFFFL
+#define CM3_CM_ICSC_B_C31_C32__CM_ICSC_B_C32_MASK 0xFFFF0000L
+//CM3_CM_ICSC_B_C33_C34
+#define CM3_CM_ICSC_B_C33_C34__CM_ICSC_B_C33__SHIFT 0x0
+#define CM3_CM_ICSC_B_C33_C34__CM_ICSC_B_C34__SHIFT 0x10
+#define CM3_CM_ICSC_B_C33_C34__CM_ICSC_B_C33_MASK 0x0000FFFFL
+#define CM3_CM_ICSC_B_C33_C34__CM_ICSC_B_C34_MASK 0xFFFF0000L
+//CM3_CM_GAMUT_REMAP_CONTROL
+#define CM3_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE_MASK 0x00000003L
+//CM3_CM_GAMUT_REMAP_C11_C12
+#define CM3_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C11__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C12__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C11_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C12_MASK 0xFFFF0000L
+//CM3_CM_GAMUT_REMAP_C13_C14
+#define CM3_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C13__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C14__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C13_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C14_MASK 0xFFFF0000L
+//CM3_CM_GAMUT_REMAP_C21_C22
+#define CM3_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C21__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C22__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C21_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C22_MASK 0xFFFF0000L
+//CM3_CM_GAMUT_REMAP_C23_C24
+#define CM3_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C23__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C24__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C23_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C24_MASK 0xFFFF0000L
+//CM3_CM_GAMUT_REMAP_C31_C32
+#define CM3_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C31__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C32__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C31_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C32_MASK 0xFFFF0000L
+//CM3_CM_GAMUT_REMAP_C33_C34
+#define CM3_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C33__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C34__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C33_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C34_MASK 0xFFFF0000L
+//CM3_CM_GAMUT_REMAP_B_C11_C12
+#define CM3_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C11__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C12__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C11_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C12_MASK 0xFFFF0000L
+//CM3_CM_GAMUT_REMAP_B_C13_C14
+#define CM3_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C13__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C14__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C13_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C14_MASK 0xFFFF0000L
+//CM3_CM_GAMUT_REMAP_B_C21_C22
+#define CM3_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C21__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C22__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C21_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C22_MASK 0xFFFF0000L
+//CM3_CM_GAMUT_REMAP_B_C23_C24
+#define CM3_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C23__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C24__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C23_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C24_MASK 0xFFFF0000L
+//CM3_CM_GAMUT_REMAP_B_C31_C32
+#define CM3_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C31__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C32__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C31_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C32_MASK 0xFFFF0000L
+//CM3_CM_GAMUT_REMAP_B_C33_C34
+#define CM3_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C33__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C34__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C33_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C34_MASK 0xFFFF0000L
+//CM3_CM_BIAS_CR_R
+#define CM3_CM_BIAS_CR_R__CM_BIAS_CR_R__SHIFT 0x0
+#define CM3_CM_BIAS_CR_R__CM_BIAS_CR_R_MASK 0x0000FFFFL
+//CM3_CM_BIAS_Y_G_CB_B
+#define CM3_CM_BIAS_Y_G_CB_B__CM_BIAS_Y_G__SHIFT 0x0
+#define CM3_CM_BIAS_Y_G_CB_B__CM_BIAS_CB_B__SHIFT 0x10
+#define CM3_CM_BIAS_Y_G_CB_B__CM_BIAS_Y_G_MASK 0x0000FFFFL
+#define CM3_CM_BIAS_Y_G_CB_B__CM_BIAS_CB_B_MASK 0xFFFF0000L
+//CM3_CM_DGAM_CONTROL
+#define CM3_CM_DGAM_CONTROL__CM_DGAM_LUT_MODE__SHIFT 0x0
+#define CM3_CM_DGAM_CONTROL__CM_DGAM_LUT_MODE_MASK 0x00000007L
+//CM3_CM_DGAM_LUT_INDEX
+#define CM3_CM_DGAM_LUT_INDEX__CM_DGAM_LUT_INDEX__SHIFT 0x0
+#define CM3_CM_DGAM_LUT_INDEX__CM_DGAM_LUT_INDEX_MASK 0x000001FFL
+//CM3_CM_DGAM_LUT_DATA
+#define CM3_CM_DGAM_LUT_DATA__CM_DGAM_LUT_DATA__SHIFT 0x0
+#define CM3_CM_DGAM_LUT_DATA__CM_DGAM_LUT_DATA_MASK 0x0007FFFFL
+//CM3_CM_DGAM_LUT_WRITE_EN_MASK
+#define CM3_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define CM3_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_SEL__SHIFT 0x4
+#define CM3_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_CONFIG_STATUS__SHIFT 0x8
+#define CM3_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_WRITE_LUT_BASE_ONLY__SHIFT 0xc
+#define CM3_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define CM3_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_SEL_MASK 0x00000010L
+#define CM3_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_CONFIG_STATUS_MASK 0x00000700L
+#define CM3_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_WRITE_LUT_BASE_ONLY_MASK 0x00001000L
+//CM3_CM_DGAM_RAMA_START_CNTL_B
+#define CM3_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define CM3_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM3_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM3_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM3_CM_DGAM_RAMA_START_CNTL_G
+#define CM3_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define CM3_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM3_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM3_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM3_CM_DGAM_RAMA_START_CNTL_R
+#define CM3_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define CM3_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM3_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM3_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM3_CM_DGAM_RAMA_SLOPE_CNTL_B
+#define CM3_CM_DGAM_RAMA_SLOPE_CNTL_B__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define CM3_CM_DGAM_RAMA_SLOPE_CNTL_B__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//CM3_CM_DGAM_RAMA_SLOPE_CNTL_G
+#define CM3_CM_DGAM_RAMA_SLOPE_CNTL_G__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define CM3_CM_DGAM_RAMA_SLOPE_CNTL_G__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//CM3_CM_DGAM_RAMA_SLOPE_CNTL_R
+#define CM3_CM_DGAM_RAMA_SLOPE_CNTL_R__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define CM3_CM_DGAM_RAMA_SLOPE_CNTL_R__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//CM3_CM_DGAM_RAMA_END_CNTL1_B
+#define CM3_CM_DGAM_RAMA_END_CNTL1_B__CM_DGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define CM3_CM_DGAM_RAMA_END_CNTL1_B__CM_DGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+//CM3_CM_DGAM_RAMA_END_CNTL2_B
+#define CM3_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define CM3_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM3_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define CM3_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//CM3_CM_DGAM_RAMA_END_CNTL1_G
+#define CM3_CM_DGAM_RAMA_END_CNTL1_G__CM_DGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define CM3_CM_DGAM_RAMA_END_CNTL1_G__CM_DGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+//CM3_CM_DGAM_RAMA_END_CNTL2_G
+#define CM3_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define CM3_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM3_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define CM3_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//CM3_CM_DGAM_RAMA_END_CNTL1_R
+#define CM3_CM_DGAM_RAMA_END_CNTL1_R__CM_DGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define CM3_CM_DGAM_RAMA_END_CNTL1_R__CM_DGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+//CM3_CM_DGAM_RAMA_END_CNTL2_R
+#define CM3_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define CM3_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM3_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define CM3_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//CM3_CM_DGAM_RAMA_REGION_0_1
+#define CM3_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_DGAM_RAMA_REGION_2_3
+#define CM3_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_DGAM_RAMA_REGION_4_5
+#define CM3_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_DGAM_RAMA_REGION_6_7
+#define CM3_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_DGAM_RAMA_REGION_8_9
+#define CM3_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_DGAM_RAMA_REGION_10_11
+#define CM3_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_DGAM_RAMA_REGION_12_13
+#define CM3_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_DGAM_RAMA_REGION_14_15
+#define CM3_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_DGAM_RAMB_START_CNTL_B
+#define CM3_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define CM3_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM3_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM3_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM3_CM_DGAM_RAMB_START_CNTL_G
+#define CM3_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define CM3_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM3_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM3_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM3_CM_DGAM_RAMB_START_CNTL_R
+#define CM3_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define CM3_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM3_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM3_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM3_CM_DGAM_RAMB_SLOPE_CNTL_B
+#define CM3_CM_DGAM_RAMB_SLOPE_CNTL_B__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define CM3_CM_DGAM_RAMB_SLOPE_CNTL_B__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//CM3_CM_DGAM_RAMB_SLOPE_CNTL_G
+#define CM3_CM_DGAM_RAMB_SLOPE_CNTL_G__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define CM3_CM_DGAM_RAMB_SLOPE_CNTL_G__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//CM3_CM_DGAM_RAMB_SLOPE_CNTL_R
+#define CM3_CM_DGAM_RAMB_SLOPE_CNTL_R__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define CM3_CM_DGAM_RAMB_SLOPE_CNTL_R__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//CM3_CM_DGAM_RAMB_END_CNTL1_B
+#define CM3_CM_DGAM_RAMB_END_CNTL1_B__CM_DGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define CM3_CM_DGAM_RAMB_END_CNTL1_B__CM_DGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+//CM3_CM_DGAM_RAMB_END_CNTL2_B
+#define CM3_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define CM3_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM3_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define CM3_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//CM3_CM_DGAM_RAMB_END_CNTL1_G
+#define CM3_CM_DGAM_RAMB_END_CNTL1_G__CM_DGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define CM3_CM_DGAM_RAMB_END_CNTL1_G__CM_DGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+//CM3_CM_DGAM_RAMB_END_CNTL2_G
+#define CM3_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define CM3_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM3_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define CM3_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//CM3_CM_DGAM_RAMB_END_CNTL1_R
+#define CM3_CM_DGAM_RAMB_END_CNTL1_R__CM_DGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define CM3_CM_DGAM_RAMB_END_CNTL1_R__CM_DGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+//CM3_CM_DGAM_RAMB_END_CNTL2_R
+#define CM3_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define CM3_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM3_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define CM3_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//CM3_CM_DGAM_RAMB_REGION_0_1
+#define CM3_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_DGAM_RAMB_REGION_2_3
+#define CM3_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_DGAM_RAMB_REGION_4_5
+#define CM3_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_DGAM_RAMB_REGION_6_7
+#define CM3_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_DGAM_RAMB_REGION_8_9
+#define CM3_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_DGAM_RAMB_REGION_10_11
+#define CM3_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_DGAM_RAMB_REGION_12_13
+#define CM3_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_DGAM_RAMB_REGION_14_15
+#define CM3_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_CONTROL
+#define CM3_CM_BLNDGAM_CONTROL__CM_BLNDGAM_LUT_MODE__SHIFT 0x0
+#define CM3_CM_BLNDGAM_CONTROL__CM_BLNDGAM_LUT_MODE_MASK 0x00000003L
+//CM3_CM_BLNDGAM_LUT_INDEX
+#define CM3_CM_BLNDGAM_LUT_INDEX__CM_BLNDGAM_LUT_INDEX__SHIFT 0x0
+#define CM3_CM_BLNDGAM_LUT_INDEX__CM_BLNDGAM_LUT_INDEX_MASK 0x000001FFL
+//CM3_CM_BLNDGAM_LUT_DATA
+#define CM3_CM_BLNDGAM_LUT_DATA__CM_BLNDGAM_LUT_DATA__SHIFT 0x0
+#define CM3_CM_BLNDGAM_LUT_DATA__CM_BLNDGAM_LUT_DATA_MASK 0x0007FFFFL
+//CM3_CM_BLNDGAM_LUT_WRITE_EN_MASK
+#define CM3_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define CM3_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_SEL__SHIFT 0x4
+#define CM3_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_CONFIG_STATUS__SHIFT 0x8
+#define CM3_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define CM3_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_SEL_MASK 0x00000010L
+#define CM3_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_CONFIG_STATUS_MASK 0x00000300L
+//CM3_CM_BLNDGAM_RAMA_START_CNTL_B
+#define CM3_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM3_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM3_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM3_CM_BLNDGAM_RAMA_START_CNTL_G
+#define CM3_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM3_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM3_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM3_CM_BLNDGAM_RAMA_START_CNTL_R
+#define CM3_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM3_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM3_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM3_CM_BLNDGAM_RAMA_SLOPE_CNTL_B
+#define CM3_CM_BLNDGAM_RAMA_SLOPE_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_SLOPE_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//CM3_CM_BLNDGAM_RAMA_SLOPE_CNTL_G
+#define CM3_CM_BLNDGAM_RAMA_SLOPE_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_SLOPE_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//CM3_CM_BLNDGAM_RAMA_SLOPE_CNTL_R
+#define CM3_CM_BLNDGAM_RAMA_SLOPE_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_SLOPE_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//CM3_CM_BLNDGAM_RAMA_END_CNTL1_B
+#define CM3_CM_BLNDGAM_RAMA_END_CNTL1_B__CM_BLNDGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_END_CNTL1_B__CM_BLNDGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+//CM3_CM_BLNDGAM_RAMA_END_CNTL2_B
+#define CM3_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define CM3_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//CM3_CM_BLNDGAM_RAMA_END_CNTL1_G
+#define CM3_CM_BLNDGAM_RAMA_END_CNTL1_G__CM_BLNDGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_END_CNTL1_G__CM_BLNDGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+//CM3_CM_BLNDGAM_RAMA_END_CNTL2_G
+#define CM3_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define CM3_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//CM3_CM_BLNDGAM_RAMA_END_CNTL1_R
+#define CM3_CM_BLNDGAM_RAMA_END_CNTL1_R__CM_BLNDGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_END_CNTL1_R__CM_BLNDGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+//CM3_CM_BLNDGAM_RAMA_END_CNTL2_R
+#define CM3_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define CM3_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//CM3_CM_BLNDGAM_RAMA_REGION_0_1
+#define CM3_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMA_REGION_2_3
+#define CM3_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMA_REGION_4_5
+#define CM3_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMA_REGION_6_7
+#define CM3_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMA_REGION_8_9
+#define CM3_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMA_REGION_10_11
+#define CM3_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMA_REGION_12_13
+#define CM3_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMA_REGION_14_15
+#define CM3_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMA_REGION_16_17
+#define CM3_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMA_REGION_18_19
+#define CM3_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMA_REGION_20_21
+#define CM3_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMA_REGION_22_23
+#define CM3_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMA_REGION_24_25
+#define CM3_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMA_REGION_26_27
+#define CM3_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMA_REGION_28_29
+#define CM3_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMA_REGION_30_31
+#define CM3_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMA_REGION_32_33
+#define CM3_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMB_START_CNTL_B
+#define CM3_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM3_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM3_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM3_CM_BLNDGAM_RAMB_START_CNTL_G
+#define CM3_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM3_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM3_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM3_CM_BLNDGAM_RAMB_START_CNTL_R
+#define CM3_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM3_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM3_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM3_CM_BLNDGAM_RAMB_SLOPE_CNTL_B
+#define CM3_CM_BLNDGAM_RAMB_SLOPE_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_SLOPE_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//CM3_CM_BLNDGAM_RAMB_SLOPE_CNTL_G
+#define CM3_CM_BLNDGAM_RAMB_SLOPE_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_SLOPE_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//CM3_CM_BLNDGAM_RAMB_SLOPE_CNTL_R
+#define CM3_CM_BLNDGAM_RAMB_SLOPE_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_SLOPE_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//CM3_CM_BLNDGAM_RAMB_END_CNTL1_B
+#define CM3_CM_BLNDGAM_RAMB_END_CNTL1_B__CM_BLNDGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_END_CNTL1_B__CM_BLNDGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+//CM3_CM_BLNDGAM_RAMB_END_CNTL2_B
+#define CM3_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define CM3_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//CM3_CM_BLNDGAM_RAMB_END_CNTL1_G
+#define CM3_CM_BLNDGAM_RAMB_END_CNTL1_G__CM_BLNDGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_END_CNTL1_G__CM_BLNDGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+//CM3_CM_BLNDGAM_RAMB_END_CNTL2_G
+#define CM3_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define CM3_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//CM3_CM_BLNDGAM_RAMB_END_CNTL1_R
+#define CM3_CM_BLNDGAM_RAMB_END_CNTL1_R__CM_BLNDGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_END_CNTL1_R__CM_BLNDGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+//CM3_CM_BLNDGAM_RAMB_END_CNTL2_R
+#define CM3_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define CM3_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//CM3_CM_BLNDGAM_RAMB_REGION_0_1
+#define CM3_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMB_REGION_2_3
+#define CM3_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMB_REGION_4_5
+#define CM3_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMB_REGION_6_7
+#define CM3_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMB_REGION_8_9
+#define CM3_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMB_REGION_10_11
+#define CM3_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMB_REGION_12_13
+#define CM3_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMB_REGION_14_15
+#define CM3_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMB_REGION_16_17
+#define CM3_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMB_REGION_18_19
+#define CM3_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMB_REGION_20_21
+#define CM3_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMB_REGION_22_23
+#define CM3_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMB_REGION_24_25
+#define CM3_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMB_REGION_26_27
+#define CM3_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMB_REGION_28_29
+#define CM3_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMB_REGION_30_31
+#define CM3_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMB_REGION_32_33
+#define CM3_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_HDR_MULT_COEF
+#define CM3_CM_HDR_MULT_COEF__CM_HDR_MULT_COEF__SHIFT 0x0
+#define CM3_CM_HDR_MULT_COEF__CM_HDR_MULT_COEF_MASK 0x0007FFFFL
+//CM3_CM_MEM_PWR_CTRL
+#define CM3_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_FORCE__SHIFT 0x0
+#define CM3_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_DIS__SHIFT 0x2
+#define CM3_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_FORCE__SHIFT 0x4
+#define CM3_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_DIS__SHIFT 0x6
+#define CM3_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_FORCE_MASK 0x00000003L
+#define CM3_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_DIS_MASK 0x00000004L
+#define CM3_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_FORCE_MASK 0x00000030L
+#define CM3_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_DIS_MASK 0x00000040L
+//CM3_CM_MEM_PWR_STATUS
+#define CM3_CM_MEM_PWR_STATUS__SHARED_MEM_PWR_STATE__SHIFT 0x0
+#define CM3_CM_MEM_PWR_STATUS__BLNDGAM_MEM_PWR_STATE__SHIFT 0x2
+#define CM3_CM_MEM_PWR_STATUS__SHARED_MEM_PWR_STATE_MASK 0x00000003L
+#define CM3_CM_MEM_PWR_STATUS__BLNDGAM_MEM_PWR_STATE_MASK 0x0000000CL
+//CM3_CM_DEALPHA
+#define CM3_CM_DEALPHA__CM_DEALPHA_EN__SHIFT 0x0
+#define CM3_CM_DEALPHA__CM_DEALPHA_EN_MASK 0x00000001L
+//CM3_CM_COEF_FORMAT
+#define CM3_CM_COEF_FORMAT__CM_BIAS_FORMAT__SHIFT 0x0
+#define CM3_CM_COEF_FORMAT__CM_ICSC_COEF_FORMAT__SHIFT 0x4
+#define CM3_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT__SHIFT 0x8
+#define CM3_CM_COEF_FORMAT__CM_BIAS_FORMAT_MASK 0x00000001L
+#define CM3_CM_COEF_FORMAT__CM_ICSC_COEF_FORMAT_MASK 0x00000010L
+#define CM3_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT_MASK 0x00000100L
+//CM3_CM_SHAPER_CONTROL
+#define CM3_CM_SHAPER_CONTROL__CM_SHAPER_LUT_MODE__SHIFT 0x0
+#define CM3_CM_SHAPER_CONTROL__CM_SHAPER_LUT_MODE_MASK 0x00000003L
+//CM3_CM_SHAPER_OFFSET_R
+#define CM3_CM_SHAPER_OFFSET_R__CM_SHAPER_OFFSET_R__SHIFT 0x0
+#define CM3_CM_SHAPER_OFFSET_R__CM_SHAPER_OFFSET_R_MASK 0x0007FFFFL
+//CM3_CM_SHAPER_OFFSET_G
+#define CM3_CM_SHAPER_OFFSET_G__CM_SHAPER_OFFSET_G__SHIFT 0x0
+#define CM3_CM_SHAPER_OFFSET_G__CM_SHAPER_OFFSET_G_MASK 0x0007FFFFL
+//CM3_CM_SHAPER_OFFSET_B
+#define CM3_CM_SHAPER_OFFSET_B__CM_SHAPER_OFFSET_B__SHIFT 0x0
+#define CM3_CM_SHAPER_OFFSET_B__CM_SHAPER_OFFSET_B_MASK 0x0007FFFFL
+//CM3_CM_SHAPER_SCALE_R
+#define CM3_CM_SHAPER_SCALE_R__CM_SHAPER_SCALE_R__SHIFT 0x0
+#define CM3_CM_SHAPER_SCALE_R__CM_SHAPER_SCALE_R_MASK 0x0000FFFFL
+//CM3_CM_SHAPER_SCALE_G_B
+#define CM3_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_G__SHIFT 0x0
+#define CM3_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_B__SHIFT 0x10
+#define CM3_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_G_MASK 0x0000FFFFL
+#define CM3_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_B_MASK 0xFFFF0000L
+//CM3_CM_SHAPER_LUT_INDEX
+#define CM3_CM_SHAPER_LUT_INDEX__CM_SHAPER_LUT_INDEX__SHIFT 0x0
+#define CM3_CM_SHAPER_LUT_INDEX__CM_SHAPER_LUT_INDEX_MASK 0x000000FFL
+//CM3_CM_SHAPER_LUT_DATA
+#define CM3_CM_SHAPER_LUT_DATA__CM_SHAPER_LUT_DATA__SHIFT 0x0
+#define CM3_CM_SHAPER_LUT_DATA__CM_SHAPER_LUT_DATA_MASK 0x00FFFFFFL
+//CM3_CM_SHAPER_LUT_WRITE_EN_MASK
+#define CM3_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define CM3_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_SEL__SHIFT 0x4
+#define CM3_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_CONFIG_STATUS__SHIFT 0x8
+#define CM3_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define CM3_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_SEL_MASK 0x00000010L
+#define CM3_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_CONFIG_STATUS_MASK 0x00000300L
+//CM3_CM_SHAPER_RAMA_START_CNTL_B
+#define CM3_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM3_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM3_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM3_CM_SHAPER_RAMA_START_CNTL_G
+#define CM3_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM3_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM3_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM3_CM_SHAPER_RAMA_START_CNTL_R
+#define CM3_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM3_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM3_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM3_CM_SHAPER_RAMA_END_CNTL_B
+#define CM3_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define CM3_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_BASE_B_MASK 0x3FFF0000L
+//CM3_CM_SHAPER_RAMA_END_CNTL_G
+#define CM3_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define CM3_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_BASE_G_MASK 0x3FFF0000L
+//CM3_CM_SHAPER_RAMA_END_CNTL_R
+#define CM3_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define CM3_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_BASE_R_MASK 0x3FFF0000L
+//CM3_CM_SHAPER_RAMA_REGION_0_1
+#define CM3_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMA_REGION_2_3
+#define CM3_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMA_REGION_4_5
+#define CM3_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMA_REGION_6_7
+#define CM3_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMA_REGION_8_9
+#define CM3_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMA_REGION_10_11
+#define CM3_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMA_REGION_12_13
+#define CM3_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMA_REGION_14_15
+#define CM3_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMA_REGION_16_17
+#define CM3_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMA_REGION_18_19
+#define CM3_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMA_REGION_20_21
+#define CM3_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMA_REGION_22_23
+#define CM3_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMA_REGION_24_25
+#define CM3_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMA_REGION_26_27
+#define CM3_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMA_REGION_28_29
+#define CM3_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMA_REGION_30_31
+#define CM3_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMA_REGION_32_33
+#define CM3_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMB_START_CNTL_B
+#define CM3_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM3_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM3_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM3_CM_SHAPER_RAMB_START_CNTL_G
+#define CM3_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM3_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM3_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM3_CM_SHAPER_RAMB_START_CNTL_R
+#define CM3_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM3_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM3_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM3_CM_SHAPER_RAMB_END_CNTL_B
+#define CM3_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define CM3_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_BASE_B_MASK 0x3FFF0000L
+//CM3_CM_SHAPER_RAMB_END_CNTL_G
+#define CM3_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define CM3_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_BASE_G_MASK 0x3FFF0000L
+//CM3_CM_SHAPER_RAMB_END_CNTL_R
+#define CM3_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define CM3_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_BASE_R_MASK 0x3FFF0000L
+//CM3_CM_SHAPER_RAMB_REGION_0_1
+#define CM3_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMB_REGION_2_3
+#define CM3_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMB_REGION_4_5
+#define CM3_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMB_REGION_6_7
+#define CM3_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMB_REGION_8_9
+#define CM3_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMB_REGION_10_11
+#define CM3_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMB_REGION_12_13
+#define CM3_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMB_REGION_14_15
+#define CM3_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMB_REGION_16_17
+#define CM3_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMB_REGION_18_19
+#define CM3_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMB_REGION_20_21
+#define CM3_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMB_REGION_22_23
+#define CM3_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMB_REGION_24_25
+#define CM3_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMB_REGION_26_27
+#define CM3_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMB_REGION_28_29
+#define CM3_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMB_REGION_30_31
+#define CM3_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMB_REGION_32_33
+#define CM3_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_MEM_PWR_CTRL2
+#define CM3_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_FORCE__SHIFT 0x8
+#define CM3_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_DIS__SHIFT 0xa
+#define CM3_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_FORCE__SHIFT 0xc
+#define CM3_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_DIS__SHIFT 0xe
+#define CM3_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_FORCE_MASK 0x00000300L
+#define CM3_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_DIS_MASK 0x00000400L
+#define CM3_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_FORCE_MASK 0x00003000L
+#define CM3_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_DIS_MASK 0x00004000L
+//CM3_CM_MEM_PWR_STATUS2
+#define CM3_CM_MEM_PWR_STATUS2__SHAPER_MEM_PWR_STATE__SHIFT 0x4
+#define CM3_CM_MEM_PWR_STATUS2__HDR3DLUT_MEM_PWR_STATE__SHIFT 0x6
+#define CM3_CM_MEM_PWR_STATUS2__SHAPER_MEM_PWR_STATE_MASK 0x00000030L
+#define CM3_CM_MEM_PWR_STATUS2__HDR3DLUT_MEM_PWR_STATE_MASK 0x000000C0L
+//CM3_CM_3DLUT_MODE
+#define CM3_CM_3DLUT_MODE__CM_3DLUT_MODE__SHIFT 0x0
+#define CM3_CM_3DLUT_MODE__CM_3DLUT_SIZE__SHIFT 0x4
+#define CM3_CM_3DLUT_MODE__CM_3DLUT_MODE_MASK 0x00000003L
+#define CM3_CM_3DLUT_MODE__CM_3DLUT_SIZE_MASK 0x00000010L
+//CM3_CM_3DLUT_INDEX
+#define CM3_CM_3DLUT_INDEX__CM_3DLUT_INDEX__SHIFT 0x0
+#define CM3_CM_3DLUT_INDEX__CM_3DLUT_INDEX_MASK 0x000007FFL
+//CM3_CM_3DLUT_DATA
+#define CM3_CM_3DLUT_DATA__CM_3DLUT_DATA0__SHIFT 0x0
+#define CM3_CM_3DLUT_DATA__CM_3DLUT_DATA1__SHIFT 0x10
+#define CM3_CM_3DLUT_DATA__CM_3DLUT_DATA0_MASK 0x0000FFFFL
+#define CM3_CM_3DLUT_DATA__CM_3DLUT_DATA1_MASK 0xFFFF0000L
+//CM3_CM_3DLUT_DATA_30BIT
+#define CM3_CM_3DLUT_DATA_30BIT__CM_3DLUT_DATA_30BIT__SHIFT 0x2
+#define CM3_CM_3DLUT_DATA_30BIT__CM_3DLUT_DATA_30BIT_MASK 0xFFFFFFFCL
+//CM3_CM_3DLUT_READ_WRITE_CONTROL
+#define CM3_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_WRITE_EN_MASK__SHIFT 0x0
+#define CM3_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_RAM_SEL__SHIFT 0x4
+#define CM3_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_30BIT_EN__SHIFT 0x8
+#define CM3_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_CONFIG_STATUS__SHIFT 0xc
+#define CM3_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_READ_SEL__SHIFT 0x10
+#define CM3_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_WRITE_EN_MASK_MASK 0x0000000FL
+#define CM3_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_RAM_SEL_MASK 0x00000010L
+#define CM3_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_30BIT_EN_MASK 0x00000100L
+#define CM3_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_CONFIG_STATUS_MASK 0x00003000L
+#define CM3_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_READ_SEL_MASK 0x00030000L
+//CM3_CM_3DLUT_OUT_NORM_FACTOR
+#define CM3_CM_3DLUT_OUT_NORM_FACTOR__CM_3DLUT_OUT_NORM_FACTOR__SHIFT 0x0
+#define CM3_CM_3DLUT_OUT_NORM_FACTOR__CM_3DLUT_OUT_NORM_FACTOR_MASK 0x0000FFFFL
+//CM3_CM_3DLUT_OUT_OFFSET_R
+#define CM3_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_OFFSET_R__SHIFT 0x0
+#define CM3_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_SCALE_R__SHIFT 0x10
+#define CM3_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_OFFSET_R_MASK 0x0000FFFFL
+#define CM3_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_SCALE_R_MASK 0xFFFF0000L
+//CM3_CM_3DLUT_OUT_OFFSET_G
+#define CM3_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_OFFSET_G__SHIFT 0x0
+#define CM3_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_SCALE_G__SHIFT 0x10
+#define CM3_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_OFFSET_G_MASK 0x0000FFFFL
+#define CM3_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_SCALE_G_MASK 0xFFFF0000L
+//CM3_CM_3DLUT_OUT_OFFSET_B
+#define CM3_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_OFFSET_B__SHIFT 0x0
+#define CM3_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_SCALE_B__SHIFT 0x10
+#define CM3_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_OFFSET_B_MASK 0x0000FFFFL
+#define CM3_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_SCALE_B_MASK 0xFFFF0000L
+//CM3_CM_TEST_DEBUG_INDEX
+#define CM3_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX__SHIFT 0x0
+#define CM3_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define CM3_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX_MASK 0x000000FFL
+#define CM3_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+//CM3_CM_TEST_DEBUG_DATA
+#define CM3_CM_TEST_DEBUG_DATA__CM_TEST_DEBUG_DATA__SHIFT 0x0
+#define CM3_CM_TEST_DEBUG_DATA__CM_TEST_DEBUG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dpp3_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON14_PERFCOUNTER_CNTL
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON14_PERFCOUNTER_CNTL2
+#define DC_PERFMON14_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON14_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON14_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON14_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON14_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON14_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON14_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON14_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON14_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON14_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON14_PERFCOUNTER_STATE
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON14_PERFMON_CNTL
+#define DC_PERFMON14_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON14_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON14_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON14_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON14_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON14_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON14_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON14_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON14_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON14_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON14_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON14_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON14_PERFMON_CNTL2
+#define DC_PERFMON14_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON14_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON14_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON14_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON14_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON14_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON14_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON14_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON14_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON14_PERFMON_CVALUE_LOW
+#define DC_PERFMON14_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON14_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON14_PERFMON_HI
+#define DC_PERFMON14_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON14_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON14_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON14_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON14_PERFMON_LOW
+#define DC_PERFMON14_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON14_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_mpc_mpcc0_dispdec
+//MPCC0_MPCC_TOP_SEL
+#define MPCC0_MPCC_TOP_SEL__MPCC_TOP_SEL__SHIFT 0x0
+#define MPCC0_MPCC_TOP_SEL__MPCC_TOP_SEL_MASK 0x0000000FL
+//MPCC0_MPCC_BOT_SEL
+#define MPCC0_MPCC_BOT_SEL__MPCC_BOT_SEL__SHIFT 0x0
+#define MPCC0_MPCC_BOT_SEL__MPCC_BOT_SEL_MASK 0x0000000FL
+//MPCC0_MPCC_OPP_ID
+#define MPCC0_MPCC_OPP_ID__MPCC_OPP_ID__SHIFT 0x0
+#define MPCC0_MPCC_OPP_ID__MPCC_OPP_ID_MASK 0x0000000FL
+//MPCC0_MPCC_CONTROL
+#define MPCC0_MPCC_CONTROL__MPCC_MODE__SHIFT 0x0
+#define MPCC0_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE__SHIFT 0x4
+#define MPCC0_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE__SHIFT 0x6
+#define MPCC0_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY__SHIFT 0x7
+#define MPCC0_MPCC_CONTROL__MPCC_BG_BPC__SHIFT 0x8
+#define MPCC0_MPCC_CONTROL__MPCC_BOT_GAIN_MODE__SHIFT 0xb
+#define MPCC0_MPCC_CONTROL__MPCC_GLOBAL_ALPHA__SHIFT 0x10
+#define MPCC0_MPCC_CONTROL__MPCC_GLOBAL_GAIN__SHIFT 0x18
+#define MPCC0_MPCC_CONTROL__MPCC_MODE_MASK 0x00000003L
+#define MPCC0_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE_MASK 0x00000030L
+#define MPCC0_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE_MASK 0x00000040L
+#define MPCC0_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY_MASK 0x00000080L
+#define MPCC0_MPCC_CONTROL__MPCC_BG_BPC_MASK 0x00000700L
+#define MPCC0_MPCC_CONTROL__MPCC_BOT_GAIN_MODE_MASK 0x00000800L
+#define MPCC0_MPCC_CONTROL__MPCC_GLOBAL_ALPHA_MASK 0x00FF0000L
+#define MPCC0_MPCC_CONTROL__MPCC_GLOBAL_GAIN_MASK 0xFF000000L
+//MPCC0_MPCC_SM_CONTROL
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_EN__SHIFT 0x0
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_MODE__SHIFT 0x1
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT__SHIFT 0x4
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT__SHIFT 0x5
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL__SHIFT 0x8
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL__SHIFT 0x10
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL__SHIFT 0x18
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_EN_MASK 0x00000001L
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_MODE_MASK 0x0000000EL
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT_MASK 0x00000010L
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT_MASK 0x00000020L
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL_MASK 0x00000300L
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL_MASK 0x00030000L
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL_MASK 0x01000000L
+//MPCC0_MPCC_UPDATE_LOCK_SEL
+#define MPCC0_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL__SHIFT 0x0
+#define MPCC0_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS__SHIFT 0x4
+#define MPCC0_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL_MASK 0x0000000FL
+#define MPCC0_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS_MASK 0x00000070L
+//MPCC0_MPCC_TOP_GAIN
+#define MPCC0_MPCC_TOP_GAIN__MPCC_TOP_GAIN__SHIFT 0x0
+#define MPCC0_MPCC_TOP_GAIN__MPCC_TOP_GAIN_MASK 0x0007FFFFL
+//MPCC0_MPCC_BOT_GAIN_INSIDE
+#define MPCC0_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE__SHIFT 0x0
+#define MPCC0_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE_MASK 0x0007FFFFL
+//MPCC0_MPCC_BOT_GAIN_OUTSIDE
+#define MPCC0_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE__SHIFT 0x0
+#define MPCC0_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE_MASK 0x0007FFFFL
+//MPCC0_MPCC_BG_R_CR
+#define MPCC0_MPCC_BG_R_CR__MPCC_BG_R_CR__SHIFT 0x0
+#define MPCC0_MPCC_BG_R_CR__MPCC_BG_R_CR_MASK 0x00000FFFL
+//MPCC0_MPCC_BG_G_Y
+#define MPCC0_MPCC_BG_G_Y__MPCC_BG_G_Y__SHIFT 0x0
+#define MPCC0_MPCC_BG_G_Y__MPCC_BG_G_Y_MASK 0x00000FFFL
+//MPCC0_MPCC_BG_B_CB
+#define MPCC0_MPCC_BG_B_CB__MPCC_BG_B_CB__SHIFT 0x0
+#define MPCC0_MPCC_BG_B_CB__MPCC_BG_B_CB_MASK 0x00000FFFL
+//MPCC0_MPCC_MEM_PWR_CTRL
+#define MPCC0_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE__SHIFT 0x0
+#define MPCC0_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS__SHIFT 0x2
+#define MPCC0_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE__SHIFT 0x4
+#define MPCC0_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE_MASK 0x00000003L
+#define MPCC0_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS_MASK 0x00000004L
+#define MPCC0_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE_MASK 0x00000030L
+//MPCC0_MPCC_STALL_STATUS
+#define MPCC0_MPCC_STALL_STATUS__MPCC_STALL_INT_OCCURED__SHIFT 0x0
+#define MPCC0_MPCC_STALL_STATUS__MPCC_STALL_INT_TYPE__SHIFT 0x4
+#define MPCC0_MPCC_STALL_STATUS__MPCC_STALL_INT_ACK__SHIFT 0x8
+#define MPCC0_MPCC_STALL_STATUS__MPCC_STALL_INT_MASK__SHIFT 0xc
+#define MPCC0_MPCC_STALL_STATUS__MPCC_STALL_INT_OCCURED_MASK 0x00000001L
+#define MPCC0_MPCC_STALL_STATUS__MPCC_STALL_INT_TYPE_MASK 0x00000010L
+#define MPCC0_MPCC_STALL_STATUS__MPCC_STALL_INT_ACK_MASK 0x00000100L
+#define MPCC0_MPCC_STALL_STATUS__MPCC_STALL_INT_MASK_MASK 0x00001000L
+//MPCC0_MPCC_STATUS
+#define MPCC0_MPCC_STATUS__MPCC_IDLE__SHIFT 0x0
+#define MPCC0_MPCC_STATUS__MPCC_BUSY__SHIFT 0x1
+#define MPCC0_MPCC_STATUS__MPCC_DISABLED__SHIFT 0x2
+#define MPCC0_MPCC_STATUS__DPP_MPCC_PIX_DATA_ERROR__SHIFT 0x1d
+#define MPCC0_MPCC_STATUS__DPP_MPCC_INPUT_CHECK_ENABLE__SHIFT 0x1e
+#define MPCC0_MPCC_STATUS__DPP_MPCC_EXCEPTION_ACK__SHIFT 0x1f
+#define MPCC0_MPCC_STATUS__MPCC_IDLE_MASK 0x00000001L
+#define MPCC0_MPCC_STATUS__MPCC_BUSY_MASK 0x00000002L
+#define MPCC0_MPCC_STATUS__MPCC_DISABLED_MASK 0x00000004L
+#define MPCC0_MPCC_STATUS__DPP_MPCC_PIX_DATA_ERROR_MASK 0x20000000L
+#define MPCC0_MPCC_STATUS__DPP_MPCC_INPUT_CHECK_ENABLE_MASK 0x40000000L
+#define MPCC0_MPCC_STATUS__DPP_MPCC_EXCEPTION_ACK_MASK 0x80000000L
+
+
+// addressBlock: dce_dc_mpc_mpcc1_dispdec
+//MPCC1_MPCC_TOP_SEL
+#define MPCC1_MPCC_TOP_SEL__MPCC_TOP_SEL__SHIFT 0x0
+#define MPCC1_MPCC_TOP_SEL__MPCC_TOP_SEL_MASK 0x0000000FL
+//MPCC1_MPCC_BOT_SEL
+#define MPCC1_MPCC_BOT_SEL__MPCC_BOT_SEL__SHIFT 0x0
+#define MPCC1_MPCC_BOT_SEL__MPCC_BOT_SEL_MASK 0x0000000FL
+//MPCC1_MPCC_OPP_ID
+#define MPCC1_MPCC_OPP_ID__MPCC_OPP_ID__SHIFT 0x0
+#define MPCC1_MPCC_OPP_ID__MPCC_OPP_ID_MASK 0x0000000FL
+//MPCC1_MPCC_CONTROL
+#define MPCC1_MPCC_CONTROL__MPCC_MODE__SHIFT 0x0
+#define MPCC1_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE__SHIFT 0x4
+#define MPCC1_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE__SHIFT 0x6
+#define MPCC1_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY__SHIFT 0x7
+#define MPCC1_MPCC_CONTROL__MPCC_BG_BPC__SHIFT 0x8
+#define MPCC1_MPCC_CONTROL__MPCC_BOT_GAIN_MODE__SHIFT 0xb
+#define MPCC1_MPCC_CONTROL__MPCC_GLOBAL_ALPHA__SHIFT 0x10
+#define MPCC1_MPCC_CONTROL__MPCC_GLOBAL_GAIN__SHIFT 0x18
+#define MPCC1_MPCC_CONTROL__MPCC_MODE_MASK 0x00000003L
+#define MPCC1_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE_MASK 0x00000030L
+#define MPCC1_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE_MASK 0x00000040L
+#define MPCC1_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY_MASK 0x00000080L
+#define MPCC1_MPCC_CONTROL__MPCC_BG_BPC_MASK 0x00000700L
+#define MPCC1_MPCC_CONTROL__MPCC_BOT_GAIN_MODE_MASK 0x00000800L
+#define MPCC1_MPCC_CONTROL__MPCC_GLOBAL_ALPHA_MASK 0x00FF0000L
+#define MPCC1_MPCC_CONTROL__MPCC_GLOBAL_GAIN_MASK 0xFF000000L
+//MPCC1_MPCC_SM_CONTROL
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_EN__SHIFT 0x0
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_MODE__SHIFT 0x1
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT__SHIFT 0x4
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT__SHIFT 0x5
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL__SHIFT 0x8
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL__SHIFT 0x10
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL__SHIFT 0x18
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_EN_MASK 0x00000001L
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_MODE_MASK 0x0000000EL
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT_MASK 0x00000010L
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT_MASK 0x00000020L
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL_MASK 0x00000300L
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL_MASK 0x00030000L
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL_MASK 0x01000000L
+//MPCC1_MPCC_UPDATE_LOCK_SEL
+#define MPCC1_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL__SHIFT 0x0
+#define MPCC1_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS__SHIFT 0x4
+#define MPCC1_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL_MASK 0x0000000FL
+#define MPCC1_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS_MASK 0x00000070L
+//MPCC1_MPCC_TOP_GAIN
+#define MPCC1_MPCC_TOP_GAIN__MPCC_TOP_GAIN__SHIFT 0x0
+#define MPCC1_MPCC_TOP_GAIN__MPCC_TOP_GAIN_MASK 0x0007FFFFL
+//MPCC1_MPCC_BOT_GAIN_INSIDE
+#define MPCC1_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE__SHIFT 0x0
+#define MPCC1_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE_MASK 0x0007FFFFL
+//MPCC1_MPCC_BOT_GAIN_OUTSIDE
+#define MPCC1_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE__SHIFT 0x0
+#define MPCC1_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE_MASK 0x0007FFFFL
+//MPCC1_MPCC_BG_R_CR
+#define MPCC1_MPCC_BG_R_CR__MPCC_BG_R_CR__SHIFT 0x0
+#define MPCC1_MPCC_BG_R_CR__MPCC_BG_R_CR_MASK 0x00000FFFL
+//MPCC1_MPCC_BG_G_Y
+#define MPCC1_MPCC_BG_G_Y__MPCC_BG_G_Y__SHIFT 0x0
+#define MPCC1_MPCC_BG_G_Y__MPCC_BG_G_Y_MASK 0x00000FFFL
+//MPCC1_MPCC_BG_B_CB
+#define MPCC1_MPCC_BG_B_CB__MPCC_BG_B_CB__SHIFT 0x0
+#define MPCC1_MPCC_BG_B_CB__MPCC_BG_B_CB_MASK 0x00000FFFL
+//MPCC1_MPCC_MEM_PWR_CTRL
+#define MPCC1_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE__SHIFT 0x0
+#define MPCC1_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS__SHIFT 0x2
+#define MPCC1_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE__SHIFT 0x4
+#define MPCC1_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE_MASK 0x00000003L
+#define MPCC1_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS_MASK 0x00000004L
+#define MPCC1_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE_MASK 0x00000030L
+//MPCC1_MPCC_STALL_STATUS
+#define MPCC1_MPCC_STALL_STATUS__MPCC_STALL_INT_OCCURED__SHIFT 0x0
+#define MPCC1_MPCC_STALL_STATUS__MPCC_STALL_INT_TYPE__SHIFT 0x4
+#define MPCC1_MPCC_STALL_STATUS__MPCC_STALL_INT_ACK__SHIFT 0x8
+#define MPCC1_MPCC_STALL_STATUS__MPCC_STALL_INT_MASK__SHIFT 0xc
+#define MPCC1_MPCC_STALL_STATUS__MPCC_STALL_INT_OCCURED_MASK 0x00000001L
+#define MPCC1_MPCC_STALL_STATUS__MPCC_STALL_INT_TYPE_MASK 0x00000010L
+#define MPCC1_MPCC_STALL_STATUS__MPCC_STALL_INT_ACK_MASK 0x00000100L
+#define MPCC1_MPCC_STALL_STATUS__MPCC_STALL_INT_MASK_MASK 0x00001000L
+//MPCC1_MPCC_STATUS
+#define MPCC1_MPCC_STATUS__MPCC_IDLE__SHIFT 0x0
+#define MPCC1_MPCC_STATUS__MPCC_BUSY__SHIFT 0x1
+#define MPCC1_MPCC_STATUS__MPCC_DISABLED__SHIFT 0x2
+#define MPCC1_MPCC_STATUS__DPP_MPCC_PIX_DATA_ERROR__SHIFT 0x1d
+#define MPCC1_MPCC_STATUS__DPP_MPCC_INPUT_CHECK_ENABLE__SHIFT 0x1e
+#define MPCC1_MPCC_STATUS__DPP_MPCC_EXCEPTION_ACK__SHIFT 0x1f
+#define MPCC1_MPCC_STATUS__MPCC_IDLE_MASK 0x00000001L
+#define MPCC1_MPCC_STATUS__MPCC_BUSY_MASK 0x00000002L
+#define MPCC1_MPCC_STATUS__MPCC_DISABLED_MASK 0x00000004L
+#define MPCC1_MPCC_STATUS__DPP_MPCC_PIX_DATA_ERROR_MASK 0x20000000L
+#define MPCC1_MPCC_STATUS__DPP_MPCC_INPUT_CHECK_ENABLE_MASK 0x40000000L
+#define MPCC1_MPCC_STATUS__DPP_MPCC_EXCEPTION_ACK_MASK 0x80000000L
+
+
+// addressBlock: dce_dc_mpc_mpcc2_dispdec
+//MPCC2_MPCC_TOP_SEL
+#define MPCC2_MPCC_TOP_SEL__MPCC_TOP_SEL__SHIFT 0x0
+#define MPCC2_MPCC_TOP_SEL__MPCC_TOP_SEL_MASK 0x0000000FL
+//MPCC2_MPCC_BOT_SEL
+#define MPCC2_MPCC_BOT_SEL__MPCC_BOT_SEL__SHIFT 0x0
+#define MPCC2_MPCC_BOT_SEL__MPCC_BOT_SEL_MASK 0x0000000FL
+//MPCC2_MPCC_OPP_ID
+#define MPCC2_MPCC_OPP_ID__MPCC_OPP_ID__SHIFT 0x0
+#define MPCC2_MPCC_OPP_ID__MPCC_OPP_ID_MASK 0x0000000FL
+//MPCC2_MPCC_CONTROL
+#define MPCC2_MPCC_CONTROL__MPCC_MODE__SHIFT 0x0
+#define MPCC2_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE__SHIFT 0x4
+#define MPCC2_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE__SHIFT 0x6
+#define MPCC2_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY__SHIFT 0x7
+#define MPCC2_MPCC_CONTROL__MPCC_BG_BPC__SHIFT 0x8
+#define MPCC2_MPCC_CONTROL__MPCC_BOT_GAIN_MODE__SHIFT 0xb
+#define MPCC2_MPCC_CONTROL__MPCC_GLOBAL_ALPHA__SHIFT 0x10
+#define MPCC2_MPCC_CONTROL__MPCC_GLOBAL_GAIN__SHIFT 0x18
+#define MPCC2_MPCC_CONTROL__MPCC_MODE_MASK 0x00000003L
+#define MPCC2_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE_MASK 0x00000030L
+#define MPCC2_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE_MASK 0x00000040L
+#define MPCC2_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY_MASK 0x00000080L
+#define MPCC2_MPCC_CONTROL__MPCC_BG_BPC_MASK 0x00000700L
+#define MPCC2_MPCC_CONTROL__MPCC_BOT_GAIN_MODE_MASK 0x00000800L
+#define MPCC2_MPCC_CONTROL__MPCC_GLOBAL_ALPHA_MASK 0x00FF0000L
+#define MPCC2_MPCC_CONTROL__MPCC_GLOBAL_GAIN_MASK 0xFF000000L
+//MPCC2_MPCC_SM_CONTROL
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_EN__SHIFT 0x0
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_MODE__SHIFT 0x1
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT__SHIFT 0x4
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT__SHIFT 0x5
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL__SHIFT 0x8
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL__SHIFT 0x10
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL__SHIFT 0x18
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_EN_MASK 0x00000001L
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_MODE_MASK 0x0000000EL
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT_MASK 0x00000010L
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT_MASK 0x00000020L
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL_MASK 0x00000300L
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL_MASK 0x00030000L
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL_MASK 0x01000000L
+//MPCC2_MPCC_UPDATE_LOCK_SEL
+#define MPCC2_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL__SHIFT 0x0
+#define MPCC2_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS__SHIFT 0x4
+#define MPCC2_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL_MASK 0x0000000FL
+#define MPCC2_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS_MASK 0x00000070L
+//MPCC2_MPCC_TOP_GAIN
+#define MPCC2_MPCC_TOP_GAIN__MPCC_TOP_GAIN__SHIFT 0x0
+#define MPCC2_MPCC_TOP_GAIN__MPCC_TOP_GAIN_MASK 0x0007FFFFL
+//MPCC2_MPCC_BOT_GAIN_INSIDE
+#define MPCC2_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE__SHIFT 0x0
+#define MPCC2_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE_MASK 0x0007FFFFL
+//MPCC2_MPCC_BOT_GAIN_OUTSIDE
+#define MPCC2_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE__SHIFT 0x0
+#define MPCC2_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE_MASK 0x0007FFFFL
+//MPCC2_MPCC_BG_R_CR
+#define MPCC2_MPCC_BG_R_CR__MPCC_BG_R_CR__SHIFT 0x0
+#define MPCC2_MPCC_BG_R_CR__MPCC_BG_R_CR_MASK 0x00000FFFL
+//MPCC2_MPCC_BG_G_Y
+#define MPCC2_MPCC_BG_G_Y__MPCC_BG_G_Y__SHIFT 0x0
+#define MPCC2_MPCC_BG_G_Y__MPCC_BG_G_Y_MASK 0x00000FFFL
+//MPCC2_MPCC_BG_B_CB
+#define MPCC2_MPCC_BG_B_CB__MPCC_BG_B_CB__SHIFT 0x0
+#define MPCC2_MPCC_BG_B_CB__MPCC_BG_B_CB_MASK 0x00000FFFL
+//MPCC2_MPCC_MEM_PWR_CTRL
+#define MPCC2_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE__SHIFT 0x0
+#define MPCC2_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS__SHIFT 0x2
+#define MPCC2_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE__SHIFT 0x4
+#define MPCC2_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE_MASK 0x00000003L
+#define MPCC2_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS_MASK 0x00000004L
+#define MPCC2_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE_MASK 0x00000030L
+//MPCC2_MPCC_STALL_STATUS
+#define MPCC2_MPCC_STALL_STATUS__MPCC_STALL_INT_OCCURED__SHIFT 0x0
+#define MPCC2_MPCC_STALL_STATUS__MPCC_STALL_INT_TYPE__SHIFT 0x4
+#define MPCC2_MPCC_STALL_STATUS__MPCC_STALL_INT_ACK__SHIFT 0x8
+#define MPCC2_MPCC_STALL_STATUS__MPCC_STALL_INT_MASK__SHIFT 0xc
+#define MPCC2_MPCC_STALL_STATUS__MPCC_STALL_INT_OCCURED_MASK 0x00000001L
+#define MPCC2_MPCC_STALL_STATUS__MPCC_STALL_INT_TYPE_MASK 0x00000010L
+#define MPCC2_MPCC_STALL_STATUS__MPCC_STALL_INT_ACK_MASK 0x00000100L
+#define MPCC2_MPCC_STALL_STATUS__MPCC_STALL_INT_MASK_MASK 0x00001000L
+//MPCC2_MPCC_STATUS
+#define MPCC2_MPCC_STATUS__MPCC_IDLE__SHIFT 0x0
+#define MPCC2_MPCC_STATUS__MPCC_BUSY__SHIFT 0x1
+#define MPCC2_MPCC_STATUS__MPCC_DISABLED__SHIFT 0x2
+#define MPCC2_MPCC_STATUS__DPP_MPCC_PIX_DATA_ERROR__SHIFT 0x1d
+#define MPCC2_MPCC_STATUS__DPP_MPCC_INPUT_CHECK_ENABLE__SHIFT 0x1e
+#define MPCC2_MPCC_STATUS__DPP_MPCC_EXCEPTION_ACK__SHIFT 0x1f
+#define MPCC2_MPCC_STATUS__MPCC_IDLE_MASK 0x00000001L
+#define MPCC2_MPCC_STATUS__MPCC_BUSY_MASK 0x00000002L
+#define MPCC2_MPCC_STATUS__MPCC_DISABLED_MASK 0x00000004L
+#define MPCC2_MPCC_STATUS__DPP_MPCC_PIX_DATA_ERROR_MASK 0x20000000L
+#define MPCC2_MPCC_STATUS__DPP_MPCC_INPUT_CHECK_ENABLE_MASK 0x40000000L
+#define MPCC2_MPCC_STATUS__DPP_MPCC_EXCEPTION_ACK_MASK 0x80000000L
+
+
+// addressBlock: dce_dc_mpc_mpcc3_dispdec
+//MPCC3_MPCC_TOP_SEL
+#define MPCC3_MPCC_TOP_SEL__MPCC_TOP_SEL__SHIFT 0x0
+#define MPCC3_MPCC_TOP_SEL__MPCC_TOP_SEL_MASK 0x0000000FL
+//MPCC3_MPCC_BOT_SEL
+#define MPCC3_MPCC_BOT_SEL__MPCC_BOT_SEL__SHIFT 0x0
+#define MPCC3_MPCC_BOT_SEL__MPCC_BOT_SEL_MASK 0x0000000FL
+//MPCC3_MPCC_OPP_ID
+#define MPCC3_MPCC_OPP_ID__MPCC_OPP_ID__SHIFT 0x0
+#define MPCC3_MPCC_OPP_ID__MPCC_OPP_ID_MASK 0x0000000FL
+//MPCC3_MPCC_CONTROL
+#define MPCC3_MPCC_CONTROL__MPCC_MODE__SHIFT 0x0
+#define MPCC3_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE__SHIFT 0x4
+#define MPCC3_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE__SHIFT 0x6
+#define MPCC3_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY__SHIFT 0x7
+#define MPCC3_MPCC_CONTROL__MPCC_BG_BPC__SHIFT 0x8
+#define MPCC3_MPCC_CONTROL__MPCC_BOT_GAIN_MODE__SHIFT 0xb
+#define MPCC3_MPCC_CONTROL__MPCC_GLOBAL_ALPHA__SHIFT 0x10
+#define MPCC3_MPCC_CONTROL__MPCC_GLOBAL_GAIN__SHIFT 0x18
+#define MPCC3_MPCC_CONTROL__MPCC_MODE_MASK 0x00000003L
+#define MPCC3_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE_MASK 0x00000030L
+#define MPCC3_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE_MASK 0x00000040L
+#define MPCC3_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY_MASK 0x00000080L
+#define MPCC3_MPCC_CONTROL__MPCC_BG_BPC_MASK 0x00000700L
+#define MPCC3_MPCC_CONTROL__MPCC_BOT_GAIN_MODE_MASK 0x00000800L
+#define MPCC3_MPCC_CONTROL__MPCC_GLOBAL_ALPHA_MASK 0x00FF0000L
+#define MPCC3_MPCC_CONTROL__MPCC_GLOBAL_GAIN_MASK 0xFF000000L
+//MPCC3_MPCC_SM_CONTROL
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_EN__SHIFT 0x0
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_MODE__SHIFT 0x1
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT__SHIFT 0x4
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT__SHIFT 0x5
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL__SHIFT 0x8
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL__SHIFT 0x10
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL__SHIFT 0x18
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_EN_MASK 0x00000001L
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_MODE_MASK 0x0000000EL
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT_MASK 0x00000010L
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT_MASK 0x00000020L
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL_MASK 0x00000300L
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL_MASK 0x00030000L
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL_MASK 0x01000000L
+//MPCC3_MPCC_UPDATE_LOCK_SEL
+#define MPCC3_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL__SHIFT 0x0
+#define MPCC3_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS__SHIFT 0x4
+#define MPCC3_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL_MASK 0x0000000FL
+#define MPCC3_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS_MASK 0x00000070L
+//MPCC3_MPCC_TOP_GAIN
+#define MPCC3_MPCC_TOP_GAIN__MPCC_TOP_GAIN__SHIFT 0x0
+#define MPCC3_MPCC_TOP_GAIN__MPCC_TOP_GAIN_MASK 0x0007FFFFL
+//MPCC3_MPCC_BOT_GAIN_INSIDE
+#define MPCC3_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE__SHIFT 0x0
+#define MPCC3_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE_MASK 0x0007FFFFL
+//MPCC3_MPCC_BOT_GAIN_OUTSIDE
+#define MPCC3_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE__SHIFT 0x0
+#define MPCC3_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE_MASK 0x0007FFFFL
+//MPCC3_MPCC_BG_R_CR
+#define MPCC3_MPCC_BG_R_CR__MPCC_BG_R_CR__SHIFT 0x0
+#define MPCC3_MPCC_BG_R_CR__MPCC_BG_R_CR_MASK 0x00000FFFL
+//MPCC3_MPCC_BG_G_Y
+#define MPCC3_MPCC_BG_G_Y__MPCC_BG_G_Y__SHIFT 0x0
+#define MPCC3_MPCC_BG_G_Y__MPCC_BG_G_Y_MASK 0x00000FFFL
+//MPCC3_MPCC_BG_B_CB
+#define MPCC3_MPCC_BG_B_CB__MPCC_BG_B_CB__SHIFT 0x0
+#define MPCC3_MPCC_BG_B_CB__MPCC_BG_B_CB_MASK 0x00000FFFL
+//MPCC3_MPCC_MEM_PWR_CTRL
+#define MPCC3_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE__SHIFT 0x0
+#define MPCC3_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS__SHIFT 0x2
+#define MPCC3_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE__SHIFT 0x4
+#define MPCC3_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE_MASK 0x00000003L
+#define MPCC3_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS_MASK 0x00000004L
+#define MPCC3_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE_MASK 0x00000030L
+//MPCC3_MPCC_STALL_STATUS
+#define MPCC3_MPCC_STALL_STATUS__MPCC_STALL_INT_OCCURED__SHIFT 0x0
+#define MPCC3_MPCC_STALL_STATUS__MPCC_STALL_INT_TYPE__SHIFT 0x4
+#define MPCC3_MPCC_STALL_STATUS__MPCC_STALL_INT_ACK__SHIFT 0x8
+#define MPCC3_MPCC_STALL_STATUS__MPCC_STALL_INT_MASK__SHIFT 0xc
+#define MPCC3_MPCC_STALL_STATUS__MPCC_STALL_INT_OCCURED_MASK 0x00000001L
+#define MPCC3_MPCC_STALL_STATUS__MPCC_STALL_INT_TYPE_MASK 0x00000010L
+#define MPCC3_MPCC_STALL_STATUS__MPCC_STALL_INT_ACK_MASK 0x00000100L
+#define MPCC3_MPCC_STALL_STATUS__MPCC_STALL_INT_MASK_MASK 0x00001000L
+//MPCC3_MPCC_STATUS
+#define MPCC3_MPCC_STATUS__MPCC_IDLE__SHIFT 0x0
+#define MPCC3_MPCC_STATUS__MPCC_BUSY__SHIFT 0x1
+#define MPCC3_MPCC_STATUS__MPCC_DISABLED__SHIFT 0x2
+#define MPCC3_MPCC_STATUS__DPP_MPCC_PIX_DATA_ERROR__SHIFT 0x1d
+#define MPCC3_MPCC_STATUS__DPP_MPCC_INPUT_CHECK_ENABLE__SHIFT 0x1e
+#define MPCC3_MPCC_STATUS__DPP_MPCC_EXCEPTION_ACK__SHIFT 0x1f
+#define MPCC3_MPCC_STATUS__MPCC_IDLE_MASK 0x00000001L
+#define MPCC3_MPCC_STATUS__MPCC_BUSY_MASK 0x00000002L
+#define MPCC3_MPCC_STATUS__MPCC_DISABLED_MASK 0x00000004L
+#define MPCC3_MPCC_STATUS__DPP_MPCC_PIX_DATA_ERROR_MASK 0x20000000L
+#define MPCC3_MPCC_STATUS__DPP_MPCC_INPUT_CHECK_ENABLE_MASK 0x40000000L
+#define MPCC3_MPCC_STATUS__DPP_MPCC_EXCEPTION_ACK_MASK 0x80000000L
+
+
+// addressBlock: dce_dc_mpc_mpcc4_dispdec
+//MPCC4_MPCC_TOP_SEL
+#define MPCC4_MPCC_TOP_SEL__MPCC_TOP_SEL__SHIFT 0x0
+#define MPCC4_MPCC_TOP_SEL__MPCC_TOP_SEL_MASK 0x0000000FL
+//MPCC4_MPCC_BOT_SEL
+#define MPCC4_MPCC_BOT_SEL__MPCC_BOT_SEL__SHIFT 0x0
+#define MPCC4_MPCC_BOT_SEL__MPCC_BOT_SEL_MASK 0x0000000FL
+//MPCC4_MPCC_OPP_ID
+#define MPCC4_MPCC_OPP_ID__MPCC_OPP_ID__SHIFT 0x0
+#define MPCC4_MPCC_OPP_ID__MPCC_OPP_ID_MASK 0x0000000FL
+//MPCC4_MPCC_CONTROL
+#define MPCC4_MPCC_CONTROL__MPCC_MODE__SHIFT 0x0
+#define MPCC4_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE__SHIFT 0x4
+#define MPCC4_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE__SHIFT 0x6
+#define MPCC4_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY__SHIFT 0x7
+#define MPCC4_MPCC_CONTROL__MPCC_BG_BPC__SHIFT 0x8
+#define MPCC4_MPCC_CONTROL__MPCC_BOT_GAIN_MODE__SHIFT 0xb
+#define MPCC4_MPCC_CONTROL__MPCC_GLOBAL_ALPHA__SHIFT 0x10
+#define MPCC4_MPCC_CONTROL__MPCC_GLOBAL_GAIN__SHIFT 0x18
+#define MPCC4_MPCC_CONTROL__MPCC_MODE_MASK 0x00000003L
+#define MPCC4_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE_MASK 0x00000030L
+#define MPCC4_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE_MASK 0x00000040L
+#define MPCC4_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY_MASK 0x00000080L
+#define MPCC4_MPCC_CONTROL__MPCC_BG_BPC_MASK 0x00000700L
+#define MPCC4_MPCC_CONTROL__MPCC_BOT_GAIN_MODE_MASK 0x00000800L
+#define MPCC4_MPCC_CONTROL__MPCC_GLOBAL_ALPHA_MASK 0x00FF0000L
+#define MPCC4_MPCC_CONTROL__MPCC_GLOBAL_GAIN_MASK 0xFF000000L
+//MPCC4_MPCC_SM_CONTROL
+#define MPCC4_MPCC_SM_CONTROL__MPCC_SM_EN__SHIFT 0x0
+#define MPCC4_MPCC_SM_CONTROL__MPCC_SM_MODE__SHIFT 0x1
+#define MPCC4_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT__SHIFT 0x4
+#define MPCC4_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT__SHIFT 0x5
+#define MPCC4_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL__SHIFT 0x8
+#define MPCC4_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL__SHIFT 0x10
+#define MPCC4_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL__SHIFT 0x18
+#define MPCC4_MPCC_SM_CONTROL__MPCC_SM_EN_MASK 0x00000001L
+#define MPCC4_MPCC_SM_CONTROL__MPCC_SM_MODE_MASK 0x0000000EL
+#define MPCC4_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT_MASK 0x00000010L
+#define MPCC4_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT_MASK 0x00000020L
+#define MPCC4_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL_MASK 0x00000300L
+#define MPCC4_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL_MASK 0x00030000L
+#define MPCC4_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL_MASK 0x01000000L
+//MPCC4_MPCC_UPDATE_LOCK_SEL
+#define MPCC4_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL__SHIFT 0x0
+#define MPCC4_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS__SHIFT 0x4
+#define MPCC4_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL_MASK 0x0000000FL
+#define MPCC4_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS_MASK 0x00000070L
+//MPCC4_MPCC_TOP_GAIN
+#define MPCC4_MPCC_TOP_GAIN__MPCC_TOP_GAIN__SHIFT 0x0
+#define MPCC4_MPCC_TOP_GAIN__MPCC_TOP_GAIN_MASK 0x0007FFFFL
+//MPCC4_MPCC_BOT_GAIN_INSIDE
+#define MPCC4_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE__SHIFT 0x0
+#define MPCC4_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE_MASK 0x0007FFFFL
+//MPCC4_MPCC_BOT_GAIN_OUTSIDE
+#define MPCC4_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE__SHIFT 0x0
+#define MPCC4_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE_MASK 0x0007FFFFL
+//MPCC4_MPCC_BG_R_CR
+#define MPCC4_MPCC_BG_R_CR__MPCC_BG_R_CR__SHIFT 0x0
+#define MPCC4_MPCC_BG_R_CR__MPCC_BG_R_CR_MASK 0x00000FFFL
+//MPCC4_MPCC_BG_G_Y
+#define MPCC4_MPCC_BG_G_Y__MPCC_BG_G_Y__SHIFT 0x0
+#define MPCC4_MPCC_BG_G_Y__MPCC_BG_G_Y_MASK 0x00000FFFL
+//MPCC4_MPCC_BG_B_CB
+#define MPCC4_MPCC_BG_B_CB__MPCC_BG_B_CB__SHIFT 0x0
+#define MPCC4_MPCC_BG_B_CB__MPCC_BG_B_CB_MASK 0x00000FFFL
+//MPCC4_MPCC_MEM_PWR_CTRL
+#define MPCC4_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE__SHIFT 0x0
+#define MPCC4_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS__SHIFT 0x2
+#define MPCC4_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE__SHIFT 0x4
+#define MPCC4_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE_MASK 0x00000003L
+#define MPCC4_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS_MASK 0x00000004L
+#define MPCC4_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE_MASK 0x00000030L
+//MPCC4_MPCC_STALL_STATUS
+#define MPCC4_MPCC_STALL_STATUS__MPCC_STALL_INT_OCCURED__SHIFT 0x0
+#define MPCC4_MPCC_STALL_STATUS__MPCC_STALL_INT_TYPE__SHIFT 0x4
+#define MPCC4_MPCC_STALL_STATUS__MPCC_STALL_INT_ACK__SHIFT 0x8
+#define MPCC4_MPCC_STALL_STATUS__MPCC_STALL_INT_MASK__SHIFT 0xc
+#define MPCC4_MPCC_STALL_STATUS__MPCC_STALL_INT_OCCURED_MASK 0x00000001L
+#define MPCC4_MPCC_STALL_STATUS__MPCC_STALL_INT_TYPE_MASK 0x00000010L
+#define MPCC4_MPCC_STALL_STATUS__MPCC_STALL_INT_ACK_MASK 0x00000100L
+#define MPCC4_MPCC_STALL_STATUS__MPCC_STALL_INT_MASK_MASK 0x00001000L
+//MPCC4_MPCC_STATUS
+#define MPCC4_MPCC_STATUS__MPCC_IDLE__SHIFT 0x0
+#define MPCC4_MPCC_STATUS__MPCC_BUSY__SHIFT 0x1
+#define MPCC4_MPCC_STATUS__MPCC_DISABLED__SHIFT 0x2
+#define MPCC4_MPCC_STATUS__DPP_MPCC_PIX_DATA_ERROR__SHIFT 0x1d
+#define MPCC4_MPCC_STATUS__DPP_MPCC_INPUT_CHECK_ENABLE__SHIFT 0x1e
+#define MPCC4_MPCC_STATUS__DPP_MPCC_EXCEPTION_ACK__SHIFT 0x1f
+#define MPCC4_MPCC_STATUS__MPCC_IDLE_MASK 0x00000001L
+#define MPCC4_MPCC_STATUS__MPCC_BUSY_MASK 0x00000002L
+#define MPCC4_MPCC_STATUS__MPCC_DISABLED_MASK 0x00000004L
+#define MPCC4_MPCC_STATUS__DPP_MPCC_PIX_DATA_ERROR_MASK 0x20000000L
+#define MPCC4_MPCC_STATUS__DPP_MPCC_INPUT_CHECK_ENABLE_MASK 0x40000000L
+#define MPCC4_MPCC_STATUS__DPP_MPCC_EXCEPTION_ACK_MASK 0x80000000L
+
+
+// addressBlock: dce_dc_mpc_mpcc5_dispdec
+//MPCC5_MPCC_TOP_SEL
+#define MPCC5_MPCC_TOP_SEL__MPCC_TOP_SEL__SHIFT 0x0
+#define MPCC5_MPCC_TOP_SEL__MPCC_TOP_SEL_MASK 0x0000000FL
+//MPCC5_MPCC_BOT_SEL
+#define MPCC5_MPCC_BOT_SEL__MPCC_BOT_SEL__SHIFT 0x0
+#define MPCC5_MPCC_BOT_SEL__MPCC_BOT_SEL_MASK 0x0000000FL
+//MPCC5_MPCC_OPP_ID
+#define MPCC5_MPCC_OPP_ID__MPCC_OPP_ID__SHIFT 0x0
+#define MPCC5_MPCC_OPP_ID__MPCC_OPP_ID_MASK 0x0000000FL
+//MPCC5_MPCC_CONTROL
+#define MPCC5_MPCC_CONTROL__MPCC_MODE__SHIFT 0x0
+#define MPCC5_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE__SHIFT 0x4
+#define MPCC5_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE__SHIFT 0x6
+#define MPCC5_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY__SHIFT 0x7
+#define MPCC5_MPCC_CONTROL__MPCC_BG_BPC__SHIFT 0x8
+#define MPCC5_MPCC_CONTROL__MPCC_BOT_GAIN_MODE__SHIFT 0xb
+#define MPCC5_MPCC_CONTROL__MPCC_GLOBAL_ALPHA__SHIFT 0x10
+#define MPCC5_MPCC_CONTROL__MPCC_GLOBAL_GAIN__SHIFT 0x18
+#define MPCC5_MPCC_CONTROL__MPCC_MODE_MASK 0x00000003L
+#define MPCC5_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE_MASK 0x00000030L
+#define MPCC5_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE_MASK 0x00000040L
+#define MPCC5_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY_MASK 0x00000080L
+#define MPCC5_MPCC_CONTROL__MPCC_BG_BPC_MASK 0x00000700L
+#define MPCC5_MPCC_CONTROL__MPCC_BOT_GAIN_MODE_MASK 0x00000800L
+#define MPCC5_MPCC_CONTROL__MPCC_GLOBAL_ALPHA_MASK 0x00FF0000L
+#define MPCC5_MPCC_CONTROL__MPCC_GLOBAL_GAIN_MASK 0xFF000000L
+//MPCC5_MPCC_SM_CONTROL
+#define MPCC5_MPCC_SM_CONTROL__MPCC_SM_EN__SHIFT 0x0
+#define MPCC5_MPCC_SM_CONTROL__MPCC_SM_MODE__SHIFT 0x1
+#define MPCC5_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT__SHIFT 0x4
+#define MPCC5_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT__SHIFT 0x5
+#define MPCC5_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL__SHIFT 0x8
+#define MPCC5_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL__SHIFT 0x10
+#define MPCC5_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL__SHIFT 0x18
+#define MPCC5_MPCC_SM_CONTROL__MPCC_SM_EN_MASK 0x00000001L
+#define MPCC5_MPCC_SM_CONTROL__MPCC_SM_MODE_MASK 0x0000000EL
+#define MPCC5_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT_MASK 0x00000010L
+#define MPCC5_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT_MASK 0x00000020L
+#define MPCC5_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL_MASK 0x00000300L
+#define MPCC5_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL_MASK 0x00030000L
+#define MPCC5_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL_MASK 0x01000000L
+//MPCC5_MPCC_UPDATE_LOCK_SEL
+#define MPCC5_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL__SHIFT 0x0
+#define MPCC5_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS__SHIFT 0x4
+#define MPCC5_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL_MASK 0x0000000FL
+#define MPCC5_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS_MASK 0x00000070L
+//MPCC5_MPCC_TOP_GAIN
+#define MPCC5_MPCC_TOP_GAIN__MPCC_TOP_GAIN__SHIFT 0x0
+#define MPCC5_MPCC_TOP_GAIN__MPCC_TOP_GAIN_MASK 0x0007FFFFL
+//MPCC5_MPCC_BOT_GAIN_INSIDE
+#define MPCC5_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE__SHIFT 0x0
+#define MPCC5_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE_MASK 0x0007FFFFL
+//MPCC5_MPCC_BOT_GAIN_OUTSIDE
+#define MPCC5_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE__SHIFT 0x0
+#define MPCC5_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE_MASK 0x0007FFFFL
+//MPCC5_MPCC_BG_R_CR
+#define MPCC5_MPCC_BG_R_CR__MPCC_BG_R_CR__SHIFT 0x0
+#define MPCC5_MPCC_BG_R_CR__MPCC_BG_R_CR_MASK 0x00000FFFL
+//MPCC5_MPCC_BG_G_Y
+#define MPCC5_MPCC_BG_G_Y__MPCC_BG_G_Y__SHIFT 0x0
+#define MPCC5_MPCC_BG_G_Y__MPCC_BG_G_Y_MASK 0x00000FFFL
+//MPCC5_MPCC_BG_B_CB
+#define MPCC5_MPCC_BG_B_CB__MPCC_BG_B_CB__SHIFT 0x0
+#define MPCC5_MPCC_BG_B_CB__MPCC_BG_B_CB_MASK 0x00000FFFL
+//MPCC5_MPCC_MEM_PWR_CTRL
+#define MPCC5_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE__SHIFT 0x0
+#define MPCC5_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS__SHIFT 0x2
+#define MPCC5_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE__SHIFT 0x4
+#define MPCC5_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE_MASK 0x00000003L
+#define MPCC5_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS_MASK 0x00000004L
+#define MPCC5_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE_MASK 0x00000030L
+//MPCC5_MPCC_STALL_STATUS
+#define MPCC5_MPCC_STALL_STATUS__MPCC_STALL_INT_OCCURED__SHIFT 0x0
+#define MPCC5_MPCC_STALL_STATUS__MPCC_STALL_INT_TYPE__SHIFT 0x4
+#define MPCC5_MPCC_STALL_STATUS__MPCC_STALL_INT_ACK__SHIFT 0x8
+#define MPCC5_MPCC_STALL_STATUS__MPCC_STALL_INT_MASK__SHIFT 0xc
+#define MPCC5_MPCC_STALL_STATUS__MPCC_STALL_INT_OCCURED_MASK 0x00000001L
+#define MPCC5_MPCC_STALL_STATUS__MPCC_STALL_INT_TYPE_MASK 0x00000010L
+#define MPCC5_MPCC_STALL_STATUS__MPCC_STALL_INT_ACK_MASK 0x00000100L
+#define MPCC5_MPCC_STALL_STATUS__MPCC_STALL_INT_MASK_MASK 0x00001000L
+//MPCC5_MPCC_STATUS
+#define MPCC5_MPCC_STATUS__MPCC_IDLE__SHIFT 0x0
+#define MPCC5_MPCC_STATUS__MPCC_BUSY__SHIFT 0x1
+#define MPCC5_MPCC_STATUS__MPCC_DISABLED__SHIFT 0x2
+#define MPCC5_MPCC_STATUS__DPP_MPCC_PIX_DATA_ERROR__SHIFT 0x1d
+#define MPCC5_MPCC_STATUS__DPP_MPCC_INPUT_CHECK_ENABLE__SHIFT 0x1e
+#define MPCC5_MPCC_STATUS__DPP_MPCC_EXCEPTION_ACK__SHIFT 0x1f
+#define MPCC5_MPCC_STATUS__MPCC_IDLE_MASK 0x00000001L
+#define MPCC5_MPCC_STATUS__MPCC_BUSY_MASK 0x00000002L
+#define MPCC5_MPCC_STATUS__MPCC_DISABLED_MASK 0x00000004L
+#define MPCC5_MPCC_STATUS__DPP_MPCC_PIX_DATA_ERROR_MASK 0x20000000L
+#define MPCC5_MPCC_STATUS__DPP_MPCC_INPUT_CHECK_ENABLE_MASK 0x40000000L
+#define MPCC5_MPCC_STATUS__DPP_MPCC_EXCEPTION_ACK_MASK 0x80000000L
+
+
+// addressBlock: dce_dc_mpc_mpcc6_dispdec
+//MPCC6_MPCC_TOP_SEL
+#define MPCC6_MPCC_TOP_SEL__MPCC_TOP_SEL__SHIFT 0x0
+#define MPCC6_MPCC_TOP_SEL__MPCC_TOP_SEL_MASK 0x0000000FL
+//MPCC6_MPCC_BOT_SEL
+#define MPCC6_MPCC_BOT_SEL__MPCC_BOT_SEL__SHIFT 0x0
+#define MPCC6_MPCC_BOT_SEL__MPCC_BOT_SEL_MASK 0x0000000FL
+//MPCC6_MPCC_OPP_ID
+#define MPCC6_MPCC_OPP_ID__MPCC_OPP_ID__SHIFT 0x0
+#define MPCC6_MPCC_OPP_ID__MPCC_OPP_ID_MASK 0x0000000FL
+//MPCC6_MPCC_CONTROL
+#define MPCC6_MPCC_CONTROL__MPCC_MODE__SHIFT 0x0
+#define MPCC6_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE__SHIFT 0x4
+#define MPCC6_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE__SHIFT 0x6
+#define MPCC6_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY__SHIFT 0x7
+#define MPCC6_MPCC_CONTROL__MPCC_BG_BPC__SHIFT 0x8
+#define MPCC6_MPCC_CONTROL__MPCC_BOT_GAIN_MODE__SHIFT 0xb
+#define MPCC6_MPCC_CONTROL__MPCC_GLOBAL_ALPHA__SHIFT 0x10
+#define MPCC6_MPCC_CONTROL__MPCC_GLOBAL_GAIN__SHIFT 0x18
+#define MPCC6_MPCC_CONTROL__MPCC_MODE_MASK 0x00000003L
+#define MPCC6_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE_MASK 0x00000030L
+#define MPCC6_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE_MASK 0x00000040L
+#define MPCC6_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY_MASK 0x00000080L
+#define MPCC6_MPCC_CONTROL__MPCC_BG_BPC_MASK 0x00000700L
+#define MPCC6_MPCC_CONTROL__MPCC_BOT_GAIN_MODE_MASK 0x00000800L
+#define MPCC6_MPCC_CONTROL__MPCC_GLOBAL_ALPHA_MASK 0x00FF0000L
+#define MPCC6_MPCC_CONTROL__MPCC_GLOBAL_GAIN_MASK 0xFF000000L
+//MPCC6_MPCC_SM_CONTROL
+#define MPCC6_MPCC_SM_CONTROL__MPCC_SM_EN__SHIFT 0x0
+#define MPCC6_MPCC_SM_CONTROL__MPCC_SM_MODE__SHIFT 0x1
+#define MPCC6_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT__SHIFT 0x4
+#define MPCC6_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT__SHIFT 0x5
+#define MPCC6_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL__SHIFT 0x8
+#define MPCC6_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL__SHIFT 0x10
+#define MPCC6_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL__SHIFT 0x18
+#define MPCC6_MPCC_SM_CONTROL__MPCC_SM_EN_MASK 0x00000001L
+#define MPCC6_MPCC_SM_CONTROL__MPCC_SM_MODE_MASK 0x0000000EL
+#define MPCC6_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT_MASK 0x00000010L
+#define MPCC6_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT_MASK 0x00000020L
+#define MPCC6_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL_MASK 0x00000300L
+#define MPCC6_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL_MASK 0x00030000L
+#define MPCC6_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL_MASK 0x01000000L
+//MPCC6_MPCC_UPDATE_LOCK_SEL
+#define MPCC6_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL__SHIFT 0x0
+#define MPCC6_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS__SHIFT 0x4
+#define MPCC6_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL_MASK 0x0000000FL
+#define MPCC6_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS_MASK 0x00000070L
+//MPCC6_MPCC_TOP_GAIN
+#define MPCC6_MPCC_TOP_GAIN__MPCC_TOP_GAIN__SHIFT 0x0
+#define MPCC6_MPCC_TOP_GAIN__MPCC_TOP_GAIN_MASK 0x0007FFFFL
+//MPCC6_MPCC_BOT_GAIN_INSIDE
+#define MPCC6_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE__SHIFT 0x0
+#define MPCC6_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE_MASK 0x0007FFFFL
+//MPCC6_MPCC_BOT_GAIN_OUTSIDE
+#define MPCC6_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE__SHIFT 0x0
+#define MPCC6_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE_MASK 0x0007FFFFL
+//MPCC6_MPCC_BG_R_CR
+#define MPCC6_MPCC_BG_R_CR__MPCC_BG_R_CR__SHIFT 0x0
+#define MPCC6_MPCC_BG_R_CR__MPCC_BG_R_CR_MASK 0x00000FFFL
+//MPCC6_MPCC_BG_G_Y
+#define MPCC6_MPCC_BG_G_Y__MPCC_BG_G_Y__SHIFT 0x0
+#define MPCC6_MPCC_BG_G_Y__MPCC_BG_G_Y_MASK 0x00000FFFL
+//MPCC6_MPCC_BG_B_CB
+#define MPCC6_MPCC_BG_B_CB__MPCC_BG_B_CB__SHIFT 0x0
+#define MPCC6_MPCC_BG_B_CB__MPCC_BG_B_CB_MASK 0x00000FFFL
+//MPCC6_MPCC_MEM_PWR_CTRL
+#define MPCC6_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE__SHIFT 0x0
+#define MPCC6_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS__SHIFT 0x2
+#define MPCC6_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE__SHIFT 0x4
+#define MPCC6_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE_MASK 0x00000003L
+#define MPCC6_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS_MASK 0x00000004L
+#define MPCC6_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE_MASK 0x00000030L
+//MPCC6_MPCC_STALL_STATUS
+#define MPCC6_MPCC_STALL_STATUS__MPCC_STALL_INT_OCCURED__SHIFT 0x0
+#define MPCC6_MPCC_STALL_STATUS__MPCC_STALL_INT_TYPE__SHIFT 0x4
+#define MPCC6_MPCC_STALL_STATUS__MPCC_STALL_INT_ACK__SHIFT 0x8
+#define MPCC6_MPCC_STALL_STATUS__MPCC_STALL_INT_MASK__SHIFT 0xc
+#define MPCC6_MPCC_STALL_STATUS__MPCC_STALL_INT_OCCURED_MASK 0x00000001L
+#define MPCC6_MPCC_STALL_STATUS__MPCC_STALL_INT_TYPE_MASK 0x00000010L
+#define MPCC6_MPCC_STALL_STATUS__MPCC_STALL_INT_ACK_MASK 0x00000100L
+#define MPCC6_MPCC_STALL_STATUS__MPCC_STALL_INT_MASK_MASK 0x00001000L
+//MPCC6_MPCC_STATUS
+#define MPCC6_MPCC_STATUS__MPCC_IDLE__SHIFT 0x0
+#define MPCC6_MPCC_STATUS__MPCC_BUSY__SHIFT 0x1
+#define MPCC6_MPCC_STATUS__MPCC_DISABLED__SHIFT 0x2
+#define MPCC6_MPCC_STATUS__DPP_MPCC_PIX_DATA_ERROR__SHIFT 0x1d
+#define MPCC6_MPCC_STATUS__DPP_MPCC_INPUT_CHECK_ENABLE__SHIFT 0x1e
+#define MPCC6_MPCC_STATUS__DPP_MPCC_EXCEPTION_ACK__SHIFT 0x1f
+#define MPCC6_MPCC_STATUS__MPCC_IDLE_MASK 0x00000001L
+#define MPCC6_MPCC_STATUS__MPCC_BUSY_MASK 0x00000002L
+#define MPCC6_MPCC_STATUS__MPCC_DISABLED_MASK 0x00000004L
+#define MPCC6_MPCC_STATUS__DPP_MPCC_PIX_DATA_ERROR_MASK 0x20000000L
+#define MPCC6_MPCC_STATUS__DPP_MPCC_INPUT_CHECK_ENABLE_MASK 0x40000000L
+#define MPCC6_MPCC_STATUS__DPP_MPCC_EXCEPTION_ACK_MASK 0x80000000L
+
+
+// addressBlock: dce_dc_mpc_mpcc7_dispdec
+//MPCC7_MPCC_TOP_SEL
+#define MPCC7_MPCC_TOP_SEL__MPCC_TOP_SEL__SHIFT 0x0
+#define MPCC7_MPCC_TOP_SEL__MPCC_TOP_SEL_MASK 0x0000000FL
+//MPCC7_MPCC_BOT_SEL
+#define MPCC7_MPCC_BOT_SEL__MPCC_BOT_SEL__SHIFT 0x0
+#define MPCC7_MPCC_BOT_SEL__MPCC_BOT_SEL_MASK 0x0000000FL
+//MPCC7_MPCC_OPP_ID
+#define MPCC7_MPCC_OPP_ID__MPCC_OPP_ID__SHIFT 0x0
+#define MPCC7_MPCC_OPP_ID__MPCC_OPP_ID_MASK 0x0000000FL
+//MPCC7_MPCC_CONTROL
+#define MPCC7_MPCC_CONTROL__MPCC_MODE__SHIFT 0x0
+#define MPCC7_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE__SHIFT 0x4
+#define MPCC7_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE__SHIFT 0x6
+#define MPCC7_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY__SHIFT 0x7
+#define MPCC7_MPCC_CONTROL__MPCC_BG_BPC__SHIFT 0x8
+#define MPCC7_MPCC_CONTROL__MPCC_BOT_GAIN_MODE__SHIFT 0xb
+#define MPCC7_MPCC_CONTROL__MPCC_GLOBAL_ALPHA__SHIFT 0x10
+#define MPCC7_MPCC_CONTROL__MPCC_GLOBAL_GAIN__SHIFT 0x18
+#define MPCC7_MPCC_CONTROL__MPCC_MODE_MASK 0x00000003L
+#define MPCC7_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE_MASK 0x00000030L
+#define MPCC7_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE_MASK 0x00000040L
+#define MPCC7_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY_MASK 0x00000080L
+#define MPCC7_MPCC_CONTROL__MPCC_BG_BPC_MASK 0x00000700L
+#define MPCC7_MPCC_CONTROL__MPCC_BOT_GAIN_MODE_MASK 0x00000800L
+#define MPCC7_MPCC_CONTROL__MPCC_GLOBAL_ALPHA_MASK 0x00FF0000L
+#define MPCC7_MPCC_CONTROL__MPCC_GLOBAL_GAIN_MASK 0xFF000000L
+//MPCC7_MPCC_SM_CONTROL
+#define MPCC7_MPCC_SM_CONTROL__MPCC_SM_EN__SHIFT 0x0
+#define MPCC7_MPCC_SM_CONTROL__MPCC_SM_MODE__SHIFT 0x1
+#define MPCC7_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT__SHIFT 0x4
+#define MPCC7_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT__SHIFT 0x5
+#define MPCC7_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL__SHIFT 0x8
+#define MPCC7_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL__SHIFT 0x10
+#define MPCC7_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL__SHIFT 0x18
+#define MPCC7_MPCC_SM_CONTROL__MPCC_SM_EN_MASK 0x00000001L
+#define MPCC7_MPCC_SM_CONTROL__MPCC_SM_MODE_MASK 0x0000000EL
+#define MPCC7_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT_MASK 0x00000010L
+#define MPCC7_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT_MASK 0x00000020L
+#define MPCC7_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL_MASK 0x00000300L
+#define MPCC7_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL_MASK 0x00030000L
+#define MPCC7_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL_MASK 0x01000000L
+//MPCC7_MPCC_UPDATE_LOCK_SEL
+#define MPCC7_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL__SHIFT 0x0
+#define MPCC7_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS__SHIFT 0x4
+#define MPCC7_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL_MASK 0x0000000FL
+#define MPCC7_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS_MASK 0x00000070L
+//MPCC7_MPCC_TOP_GAIN
+#define MPCC7_MPCC_TOP_GAIN__MPCC_TOP_GAIN__SHIFT 0x0
+#define MPCC7_MPCC_TOP_GAIN__MPCC_TOP_GAIN_MASK 0x0007FFFFL
+//MPCC7_MPCC_BOT_GAIN_INSIDE
+#define MPCC7_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE__SHIFT 0x0
+#define MPCC7_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE_MASK 0x0007FFFFL
+//MPCC7_MPCC_BOT_GAIN_OUTSIDE
+#define MPCC7_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE__SHIFT 0x0
+#define MPCC7_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE_MASK 0x0007FFFFL
+//MPCC7_MPCC_BG_R_CR
+#define MPCC7_MPCC_BG_R_CR__MPCC_BG_R_CR__SHIFT 0x0
+#define MPCC7_MPCC_BG_R_CR__MPCC_BG_R_CR_MASK 0x00000FFFL
+//MPCC7_MPCC_BG_G_Y
+#define MPCC7_MPCC_BG_G_Y__MPCC_BG_G_Y__SHIFT 0x0
+#define MPCC7_MPCC_BG_G_Y__MPCC_BG_G_Y_MASK 0x00000FFFL
+//MPCC7_MPCC_BG_B_CB
+#define MPCC7_MPCC_BG_B_CB__MPCC_BG_B_CB__SHIFT 0x0
+#define MPCC7_MPCC_BG_B_CB__MPCC_BG_B_CB_MASK 0x00000FFFL
+//MPCC7_MPCC_MEM_PWR_CTRL
+#define MPCC7_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE__SHIFT 0x0
+#define MPCC7_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS__SHIFT 0x2
+#define MPCC7_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE__SHIFT 0x4
+#define MPCC7_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE_MASK 0x00000003L
+#define MPCC7_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS_MASK 0x00000004L
+#define MPCC7_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE_MASK 0x00000030L
+//MPCC7_MPCC_STALL_STATUS
+#define MPCC7_MPCC_STALL_STATUS__MPCC_STALL_INT_OCCURED__SHIFT 0x0
+#define MPCC7_MPCC_STALL_STATUS__MPCC_STALL_INT_TYPE__SHIFT 0x4
+#define MPCC7_MPCC_STALL_STATUS__MPCC_STALL_INT_ACK__SHIFT 0x8
+#define MPCC7_MPCC_STALL_STATUS__MPCC_STALL_INT_MASK__SHIFT 0xc
+#define MPCC7_MPCC_STALL_STATUS__MPCC_STALL_INT_OCCURED_MASK 0x00000001L
+#define MPCC7_MPCC_STALL_STATUS__MPCC_STALL_INT_TYPE_MASK 0x00000010L
+#define MPCC7_MPCC_STALL_STATUS__MPCC_STALL_INT_ACK_MASK 0x00000100L
+#define MPCC7_MPCC_STALL_STATUS__MPCC_STALL_INT_MASK_MASK 0x00001000L
+//MPCC7_MPCC_STATUS
+#define MPCC7_MPCC_STATUS__MPCC_IDLE__SHIFT 0x0
+#define MPCC7_MPCC_STATUS__MPCC_BUSY__SHIFT 0x1
+#define MPCC7_MPCC_STATUS__MPCC_DISABLED__SHIFT 0x2
+#define MPCC7_MPCC_STATUS__DPP_MPCC_PIX_DATA_ERROR__SHIFT 0x1d
+#define MPCC7_MPCC_STATUS__DPP_MPCC_INPUT_CHECK_ENABLE__SHIFT 0x1e
+#define MPCC7_MPCC_STATUS__DPP_MPCC_EXCEPTION_ACK__SHIFT 0x1f
+#define MPCC7_MPCC_STATUS__MPCC_IDLE_MASK 0x00000001L
+#define MPCC7_MPCC_STATUS__MPCC_BUSY_MASK 0x00000002L
+#define MPCC7_MPCC_STATUS__MPCC_DISABLED_MASK 0x00000004L
+#define MPCC7_MPCC_STATUS__DPP_MPCC_PIX_DATA_ERROR_MASK 0x20000000L
+#define MPCC7_MPCC_STATUS__DPP_MPCC_INPUT_CHECK_ENABLE_MASK 0x40000000L
+#define MPCC7_MPCC_STATUS__DPP_MPCC_EXCEPTION_ACK_MASK 0x80000000L
+
+
+// addressBlock: dce_dc_mpc_mpc_cfg_dispdec
+//MPC_CLOCK_CONTROL
+#define MPC_CLOCK_CONTROL__DISPCLK_R_GATE_DISABLE__SHIFT 0x1
+#define MPC_CLOCK_CONTROL__MPC_TEST_CLK_SEL__SHIFT 0x4
+#define MPC_CLOCK_CONTROL__DISPCLK_R_GATE_DISABLE_MASK 0x00000002L
+#define MPC_CLOCK_CONTROL__MPC_TEST_CLK_SEL_MASK 0x00000030L
+//MPC_SOFT_RESET
+#define MPC_SOFT_RESET__MPCC0_SOFT_RESET__SHIFT 0x0
+#define MPC_SOFT_RESET__MPCC1_SOFT_RESET__SHIFT 0x1
+#define MPC_SOFT_RESET__MPCC2_SOFT_RESET__SHIFT 0x2
+#define MPC_SOFT_RESET__MPCC3_SOFT_RESET__SHIFT 0x3
+#define MPC_SOFT_RESET__MPC_SFR0_SOFT_RESET__SHIFT 0xa
+#define MPC_SOFT_RESET__MPC_SFR1_SOFT_RESET__SHIFT 0xb
+#define MPC_SOFT_RESET__MPC_SFR2_SOFT_RESET__SHIFT 0xc
+#define MPC_SOFT_RESET__MPC_SFR3_SOFT_RESET__SHIFT 0xd
+#define MPC_SOFT_RESET__MPC_SFT0_SOFT_RESET__SHIFT 0x14
+#define MPC_SOFT_RESET__MPC_SFT1_SOFT_RESET__SHIFT 0x15
+#define MPC_SOFT_RESET__MPC_SFT2_SOFT_RESET__SHIFT 0x16
+#define MPC_SOFT_RESET__MPC_SFT3_SOFT_RESET__SHIFT 0x17
+#define MPC_SOFT_RESET__MPC_SOFT_RESET__SHIFT 0x1f
+#define MPC_SOFT_RESET__MPCC0_SOFT_RESET_MASK 0x00000001L
+#define MPC_SOFT_RESET__MPCC1_SOFT_RESET_MASK 0x00000002L
+#define MPC_SOFT_RESET__MPCC2_SOFT_RESET_MASK 0x00000004L
+#define MPC_SOFT_RESET__MPCC3_SOFT_RESET_MASK 0x00000008L
+#define MPC_SOFT_RESET__MPC_SFR0_SOFT_RESET_MASK 0x00000400L
+#define MPC_SOFT_RESET__MPC_SFR1_SOFT_RESET_MASK 0x00000800L
+#define MPC_SOFT_RESET__MPC_SFR2_SOFT_RESET_MASK 0x00001000L
+#define MPC_SOFT_RESET__MPC_SFR3_SOFT_RESET_MASK 0x00002000L
+#define MPC_SOFT_RESET__MPC_SFT0_SOFT_RESET_MASK 0x00100000L
+#define MPC_SOFT_RESET__MPC_SFT1_SOFT_RESET_MASK 0x00200000L
+#define MPC_SOFT_RESET__MPC_SFT2_SOFT_RESET_MASK 0x00400000L
+#define MPC_SOFT_RESET__MPC_SFT3_SOFT_RESET_MASK 0x00800000L
+#define MPC_SOFT_RESET__MPC_SOFT_RESET_MASK 0x80000000L
+//MPC_CRC_CTRL
+#define MPC_CRC_CTRL__MPC_CRC_EN__SHIFT 0x0
+#define MPC_CRC_CTRL__MPC_CRC_CONT_EN__SHIFT 0x4
+#define MPC_CRC_CTRL__MPC_CRC_STEREO_MODE__SHIFT 0x8
+#define MPC_CRC_CTRL__MPC_CRC_STEREO_EN__SHIFT 0xa
+#define MPC_CRC_CTRL__MPC_CRC_INTERLACE_MODE__SHIFT 0xc
+#define MPC_CRC_CTRL__MPC_CRC_SRC_SEL__SHIFT 0x18
+#define MPC_CRC_CTRL__MPC_CRC_ONE_SHOT_PENDING__SHIFT 0x1c
+#define MPC_CRC_CTRL__MPC_CRC_UPDATE_ENABLED__SHIFT 0x1e
+#define MPC_CRC_CTRL__MPC_CRC_UPDATE_LOCK__SHIFT 0x1f
+#define MPC_CRC_CTRL__MPC_CRC_EN_MASK 0x00000001L
+#define MPC_CRC_CTRL__MPC_CRC_CONT_EN_MASK 0x00000010L
+#define MPC_CRC_CTRL__MPC_CRC_STEREO_MODE_MASK 0x00000300L
+#define MPC_CRC_CTRL__MPC_CRC_STEREO_EN_MASK 0x00000400L
+#define MPC_CRC_CTRL__MPC_CRC_INTERLACE_MODE_MASK 0x00003000L
+#define MPC_CRC_CTRL__MPC_CRC_SRC_SEL_MASK 0x03000000L
+#define MPC_CRC_CTRL__MPC_CRC_ONE_SHOT_PENDING_MASK 0x10000000L
+#define MPC_CRC_CTRL__MPC_CRC_UPDATE_ENABLED_MASK 0x40000000L
+#define MPC_CRC_CTRL__MPC_CRC_UPDATE_LOCK_MASK 0x80000000L
+//MPC_CRC_SEL_CONTROL
+#define MPC_CRC_SEL_CONTROL__MPC_CRC_DPP_SEL__SHIFT 0x0
+#define MPC_CRC_SEL_CONTROL__MPC_CRC_OPP_SEL__SHIFT 0x4
+#define MPC_CRC_SEL_CONTROL__MPC_CRC_MASK__SHIFT 0x10
+#define MPC_CRC_SEL_CONTROL__MPC_CRC_DPP_SEL_MASK 0x0000000FL
+#define MPC_CRC_SEL_CONTROL__MPC_CRC_OPP_SEL_MASK 0x000000F0L
+#define MPC_CRC_SEL_CONTROL__MPC_CRC_MASK_MASK 0xFFFF0000L
+//MPC_CRC_RESULT_AR
+#define MPC_CRC_RESULT_AR__MPC_CRC_RESULT_A__SHIFT 0x0
+#define MPC_CRC_RESULT_AR__MPC_CRC_RESULT_R__SHIFT 0x10
+#define MPC_CRC_RESULT_AR__MPC_CRC_RESULT_A_MASK 0x0000FFFFL
+#define MPC_CRC_RESULT_AR__MPC_CRC_RESULT_R_MASK 0xFFFF0000L
+//MPC_CRC_RESULT_GB
+#define MPC_CRC_RESULT_GB__MPC_CRC_RESULT_G__SHIFT 0x0
+#define MPC_CRC_RESULT_GB__MPC_CRC_RESULT_B__SHIFT 0x10
+#define MPC_CRC_RESULT_GB__MPC_CRC_RESULT_G_MASK 0x0000FFFFL
+#define MPC_CRC_RESULT_GB__MPC_CRC_RESULT_B_MASK 0xFFFF0000L
+//MPC_CRC_RESULT_C
+#define MPC_CRC_RESULT_C__MPC_CRC_RESULT_C__SHIFT 0x0
+#define MPC_CRC_RESULT_C__MPC_CRC_RESULT_C_MASK 0x0000FFFFL
+//MPC_PERFMON_EVENT_CTRL
+#define MPC_PERFMON_EVENT_CTRL__MPC_PERFMON_EVENT_EN__SHIFT 0x0
+#define MPC_PERFMON_EVENT_CTRL__MPC_PERFMON_EVENT_EN_MASK 0x00000001L
+//MPC_BYPASS_BG_AR
+#define MPC_BYPASS_BG_AR__MPC_BYPASS_BG_ALPHA__SHIFT 0x0
+#define MPC_BYPASS_BG_AR__MPC_BYPASS_BG_R_CR__SHIFT 0x10
+#define MPC_BYPASS_BG_AR__MPC_BYPASS_BG_ALPHA_MASK 0x0000FFFFL
+#define MPC_BYPASS_BG_AR__MPC_BYPASS_BG_R_CR_MASK 0xFFFF0000L
+//MPC_BYPASS_BG_GB
+#define MPC_BYPASS_BG_GB__MPC_BYPASS_BG_G_Y__SHIFT 0x0
+#define MPC_BYPASS_BG_GB__MPC_BYPASS_BG_B_CB__SHIFT 0x10
+#define MPC_BYPASS_BG_GB__MPC_BYPASS_BG_G_Y_MASK 0x0000FFFFL
+#define MPC_BYPASS_BG_GB__MPC_BYPASS_BG_B_CB_MASK 0xFFFF0000L
+//MPC_STALL_GRACE_WINDOW
+#define MPC_STALL_GRACE_WINDOW__MPC_STALL_GRACE_WINDOW_PERIOD__SHIFT 0x0
+#define MPC_STALL_GRACE_WINDOW__MPC_STALL_GRACE_WINDOW_PERIOD_MASK 0x000000FFL
+//MPC_HOST_READ_CONTROL
+#define MPC_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL__SHIFT 0x0
+#define MPC_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL_MASK 0x000000FFL
+//MPC_PENDING_TAKEN_STATUS_REG1
+#define MPC_PENDING_TAKEN_STATUS_REG1__IN_DPP0_SURFACE_UPDATE_PENDING__SHIFT 0x0
+#define MPC_PENDING_TAKEN_STATUS_REG1__IN_DPP0_SURFACE_UPDATE_TAKEN__SHIFT 0x1
+#define MPC_PENDING_TAKEN_STATUS_REG1__IN_DPP0_CONFIG_UPDATE_PENDING__SHIFT 0x2
+#define MPC_PENDING_TAKEN_STATUS_REG1__IN_DPP0_CONFIG_UPDATE_TAKEN__SHIFT 0x3
+#define MPC_PENDING_TAKEN_STATUS_REG1__IN_DPP0_CURSOR_UPDATE_PENDING__SHIFT 0x4
+#define MPC_PENDING_TAKEN_STATUS_REG1__IN_DPP0_CURSOR_UPDATE_TAKEN__SHIFT 0x5
+#define MPC_PENDING_TAKEN_STATUS_REG1__IN_DPP1_SURFACE_UPDATE_PENDING__SHIFT 0x6
+#define MPC_PENDING_TAKEN_STATUS_REG1__IN_DPP1_SURFACE_UPDATE_TAKEN__SHIFT 0x7
+#define MPC_PENDING_TAKEN_STATUS_REG1__IN_DPP1_CONFIG_UPDATE_PENDING__SHIFT 0x8
+#define MPC_PENDING_TAKEN_STATUS_REG1__IN_DPP1_CONFIG_UPDATE_TAKEN__SHIFT 0x9
+#define MPC_PENDING_TAKEN_STATUS_REG1__IN_DPP1_CURSOR_UPDATE_PENDING__SHIFT 0xa
+#define MPC_PENDING_TAKEN_STATUS_REG1__IN_DPP1_CURSOR_UPDATE_TAKEN__SHIFT 0xb
+#define MPC_PENDING_TAKEN_STATUS_REG1__IN_DPP2_SURFACE_UPDATE_PENDING__SHIFT 0xc
+#define MPC_PENDING_TAKEN_STATUS_REG1__IN_DPP2_SURFACE_UPDATE_TAKEN__SHIFT 0xd
+#define MPC_PENDING_TAKEN_STATUS_REG1__IN_DPP2_CONFIG_UPDATE_PENDING__SHIFT 0xe
+#define MPC_PENDING_TAKEN_STATUS_REG1__IN_DPP2_CONFIG_UPDATE_TAKEN__SHIFT 0xf
+#define MPC_PENDING_TAKEN_STATUS_REG1__IN_DPP2_CURSOR_UPDATE_PENDING__SHIFT 0x10
+#define MPC_PENDING_TAKEN_STATUS_REG1__IN_DPP2_CURSOR_UPDATE_TAKEN__SHIFT 0x11
+#define MPC_PENDING_TAKEN_STATUS_REG1__IN_DPP3_SURFACE_UPDATE_PENDING__SHIFT 0x12
+#define MPC_PENDING_TAKEN_STATUS_REG1__IN_DPP3_SURFACE_UPDATE_TAKEN__SHIFT 0x13
+#define MPC_PENDING_TAKEN_STATUS_REG1__IN_DPP3_CONFIG_UPDATE_PENDING__SHIFT 0x14
+#define MPC_PENDING_TAKEN_STATUS_REG1__IN_DPP3_CONFIG_UPDATE_TAKEN__SHIFT 0x15
+#define MPC_PENDING_TAKEN_STATUS_REG1__IN_DPP3_CURSOR_UPDATE_PENDING__SHIFT 0x16
+#define MPC_PENDING_TAKEN_STATUS_REG1__IN_DPP3_CURSOR_UPDATE_TAKEN__SHIFT 0x17
+#define MPC_PENDING_TAKEN_STATUS_REG1__IN_DPP0_SURFACE_UPDATE_PENDING_MASK 0x00000001L
+#define MPC_PENDING_TAKEN_STATUS_REG1__IN_DPP0_SURFACE_UPDATE_TAKEN_MASK 0x00000002L
+#define MPC_PENDING_TAKEN_STATUS_REG1__IN_DPP0_CONFIG_UPDATE_PENDING_MASK 0x00000004L
+#define MPC_PENDING_TAKEN_STATUS_REG1__IN_DPP0_CONFIG_UPDATE_TAKEN_MASK 0x00000008L
+#define MPC_PENDING_TAKEN_STATUS_REG1__IN_DPP0_CURSOR_UPDATE_PENDING_MASK 0x00000010L
+#define MPC_PENDING_TAKEN_STATUS_REG1__IN_DPP0_CURSOR_UPDATE_TAKEN_MASK 0x00000020L
+#define MPC_PENDING_TAKEN_STATUS_REG1__IN_DPP1_SURFACE_UPDATE_PENDING_MASK 0x00000040L
+#define MPC_PENDING_TAKEN_STATUS_REG1__IN_DPP1_SURFACE_UPDATE_TAKEN_MASK 0x00000080L
+#define MPC_PENDING_TAKEN_STATUS_REG1__IN_DPP1_CONFIG_UPDATE_PENDING_MASK 0x00000100L
+#define MPC_PENDING_TAKEN_STATUS_REG1__IN_DPP1_CONFIG_UPDATE_TAKEN_MASK 0x00000200L
+#define MPC_PENDING_TAKEN_STATUS_REG1__IN_DPP1_CURSOR_UPDATE_PENDING_MASK 0x00000400L
+#define MPC_PENDING_TAKEN_STATUS_REG1__IN_DPP1_CURSOR_UPDATE_TAKEN_MASK 0x00000800L
+#define MPC_PENDING_TAKEN_STATUS_REG1__IN_DPP2_SURFACE_UPDATE_PENDING_MASK 0x00001000L
+#define MPC_PENDING_TAKEN_STATUS_REG1__IN_DPP2_SURFACE_UPDATE_TAKEN_MASK 0x00002000L
+#define MPC_PENDING_TAKEN_STATUS_REG1__IN_DPP2_CONFIG_UPDATE_PENDING_MASK 0x00004000L
+#define MPC_PENDING_TAKEN_STATUS_REG1__IN_DPP2_CONFIG_UPDATE_TAKEN_MASK 0x00008000L
+#define MPC_PENDING_TAKEN_STATUS_REG1__IN_DPP2_CURSOR_UPDATE_PENDING_MASK 0x00010000L
+#define MPC_PENDING_TAKEN_STATUS_REG1__IN_DPP2_CURSOR_UPDATE_TAKEN_MASK 0x00020000L
+#define MPC_PENDING_TAKEN_STATUS_REG1__IN_DPP3_SURFACE_UPDATE_PENDING_MASK 0x00040000L
+#define MPC_PENDING_TAKEN_STATUS_REG1__IN_DPP3_SURFACE_UPDATE_TAKEN_MASK 0x00080000L
+#define MPC_PENDING_TAKEN_STATUS_REG1__IN_DPP3_CONFIG_UPDATE_PENDING_MASK 0x00100000L
+#define MPC_PENDING_TAKEN_STATUS_REG1__IN_DPP3_CONFIG_UPDATE_TAKEN_MASK 0x00200000L
+#define MPC_PENDING_TAKEN_STATUS_REG1__IN_DPP3_CURSOR_UPDATE_PENDING_MASK 0x00400000L
+#define MPC_PENDING_TAKEN_STATUS_REG1__IN_DPP3_CURSOR_UPDATE_TAKEN_MASK 0x00800000L
+//MPC_PENDING_TAKEN_STATUS_REG3
+#define MPC_PENDING_TAKEN_STATUS_REG3__OUT_OPP0_CONFIG_UPDATE_PENDING__SHIFT 0x0
+#define MPC_PENDING_TAKEN_STATUS_REG3__OUT_OPP0_CONFIG_UPDATE_TAKEN__SHIFT 0x1
+#define MPC_PENDING_TAKEN_STATUS_REG3__OUT_OPP1_CONFIG_UPDATE_PENDING__SHIFT 0x2
+#define MPC_PENDING_TAKEN_STATUS_REG3__OUT_OPP1_CONFIG_UPDATE_TAKEN__SHIFT 0x3
+#define MPC_PENDING_TAKEN_STATUS_REG3__OUT_OPP2_CONFIG_UPDATE_PENDING__SHIFT 0x4
+#define MPC_PENDING_TAKEN_STATUS_REG3__OUT_OPP2_CONFIG_UPDATE_TAKEN__SHIFT 0x5
+#define MPC_PENDING_TAKEN_STATUS_REG3__OUT_OPP3_CONFIG_UPDATE_PENDING__SHIFT 0x6
+#define MPC_PENDING_TAKEN_STATUS_REG3__OUT_OPP3_CONFIG_UPDATE_TAKEN__SHIFT 0x7
+#define MPC_PENDING_TAKEN_STATUS_REG3__MPCC0_CONFIG_UPDATE_PENDING__SHIFT 0xc
+#define MPC_PENDING_TAKEN_STATUS_REG3__MPCC0_CONFIG_UPDATE_TAKEN__SHIFT 0xd
+#define MPC_PENDING_TAKEN_STATUS_REG3__MPCC1_CONFIG_UPDATE_PENDING__SHIFT 0xe
+#define MPC_PENDING_TAKEN_STATUS_REG3__MPCC1_CONFIG_UPDATE_TAKEN__SHIFT 0xf
+#define MPC_PENDING_TAKEN_STATUS_REG3__MPCC2_CONFIG_UPDATE_PENDING__SHIFT 0x10
+#define MPC_PENDING_TAKEN_STATUS_REG3__MPCC2_CONFIG_UPDATE_TAKEN__SHIFT 0x11
+#define MPC_PENDING_TAKEN_STATUS_REG3__MPCC3_CONFIG_UPDATE_PENDING__SHIFT 0x12
+#define MPC_PENDING_TAKEN_STATUS_REG3__MPCC3_CONFIG_UPDATE_TAKEN__SHIFT 0x13
+#define MPC_PENDING_TAKEN_STATUS_REG3__OUT_OPP0_CONFIG_UPDATE_PENDING_MASK 0x00000001L
+#define MPC_PENDING_TAKEN_STATUS_REG3__OUT_OPP0_CONFIG_UPDATE_TAKEN_MASK 0x00000002L
+#define MPC_PENDING_TAKEN_STATUS_REG3__OUT_OPP1_CONFIG_UPDATE_PENDING_MASK 0x00000004L
+#define MPC_PENDING_TAKEN_STATUS_REG3__OUT_OPP1_CONFIG_UPDATE_TAKEN_MASK 0x00000008L
+#define MPC_PENDING_TAKEN_STATUS_REG3__OUT_OPP2_CONFIG_UPDATE_PENDING_MASK 0x00000010L
+#define MPC_PENDING_TAKEN_STATUS_REG3__OUT_OPP2_CONFIG_UPDATE_TAKEN_MASK 0x00000020L
+#define MPC_PENDING_TAKEN_STATUS_REG3__OUT_OPP3_CONFIG_UPDATE_PENDING_MASK 0x00000040L
+#define MPC_PENDING_TAKEN_STATUS_REG3__OUT_OPP3_CONFIG_UPDATE_TAKEN_MASK 0x00000080L
+#define MPC_PENDING_TAKEN_STATUS_REG3__MPCC0_CONFIG_UPDATE_PENDING_MASK 0x00001000L
+#define MPC_PENDING_TAKEN_STATUS_REG3__MPCC0_CONFIG_UPDATE_TAKEN_MASK 0x00002000L
+#define MPC_PENDING_TAKEN_STATUS_REG3__MPCC1_CONFIG_UPDATE_PENDING_MASK 0x00004000L
+#define MPC_PENDING_TAKEN_STATUS_REG3__MPCC1_CONFIG_UPDATE_TAKEN_MASK 0x00008000L
+#define MPC_PENDING_TAKEN_STATUS_REG3__MPCC2_CONFIG_UPDATE_PENDING_MASK 0x00010000L
+#define MPC_PENDING_TAKEN_STATUS_REG3__MPCC2_CONFIG_UPDATE_TAKEN_MASK 0x00020000L
+#define MPC_PENDING_TAKEN_STATUS_REG3__MPCC3_CONFIG_UPDATE_PENDING_MASK 0x00040000L
+#define MPC_PENDING_TAKEN_STATUS_REG3__MPCC3_CONFIG_UPDATE_TAKEN_MASK 0x00080000L
+//MPC_UPDATE_ACK_REG5
+#define MPC_UPDATE_ACK_REG5__IN_DPP0_SURFACE_UPDATE_ACK__SHIFT 0x0
+#define MPC_UPDATE_ACK_REG5__IN_DPP0_CONFIG_UPDATE_ACK__SHIFT 0x1
+#define MPC_UPDATE_ACK_REG5__IN_DPP0_CURSOR_UPDATE_ACK__SHIFT 0x2
+#define MPC_UPDATE_ACK_REG5__IN_DPP1_SURFACE_UPDATE_ACK__SHIFT 0x3
+#define MPC_UPDATE_ACK_REG5__IN_DPP1_CONFIG_UPDATE_ACK__SHIFT 0x4
+#define MPC_UPDATE_ACK_REG5__IN_DPP1_CURSOR_UPDATE_ACK__SHIFT 0x5
+#define MPC_UPDATE_ACK_REG5__IN_DPP2_SURFACE_UPDATE_ACK__SHIFT 0x6
+#define MPC_UPDATE_ACK_REG5__IN_DPP2_CONFIG_UPDATE_ACK__SHIFT 0x7
+#define MPC_UPDATE_ACK_REG5__IN_DPP2_CURSOR_UPDATE_ACK__SHIFT 0x8
+#define MPC_UPDATE_ACK_REG5__IN_DPP3_SURFACE_UPDATE_ACK__SHIFT 0x9
+#define MPC_UPDATE_ACK_REG5__IN_DPP3_CONFIG_UPDATE_ACK__SHIFT 0xa
+#define MPC_UPDATE_ACK_REG5__IN_DPP3_CURSOR_UPDATE_ACK__SHIFT 0xb
+#define MPC_UPDATE_ACK_REG5__MPCC0_CONFIG_UPDATE_ACK__SHIFT 0xf
+#define MPC_UPDATE_ACK_REG5__MPCC1_CONFIG_UPDATE_ACK__SHIFT 0x10
+#define MPC_UPDATE_ACK_REG5__MPCC2_CONFIG_UPDATE_ACK__SHIFT 0x11
+#define MPC_UPDATE_ACK_REG5__MPCC3_CONFIG_UPDATE_ACK__SHIFT 0x12
+#define MPC_UPDATE_ACK_REG5__OUT_OPP0_CONFIG_UPDATE_ACK__SHIFT 0x14
+#define MPC_UPDATE_ACK_REG5__OUT_OPP1_CONFIG_UPDATE_ACK__SHIFT 0x15
+#define MPC_UPDATE_ACK_REG5__OUT_OPP2_CONFIG_UPDATE_ACK__SHIFT 0x16
+#define MPC_UPDATE_ACK_REG5__OUT_OPP3_CONFIG_UPDATE_ACK__SHIFT 0x17
+#define MPC_UPDATE_ACK_REG5__IN_DPP0_SURFACE_UPDATE_ACK_MASK 0x00000001L
+#define MPC_UPDATE_ACK_REG5__IN_DPP0_CONFIG_UPDATE_ACK_MASK 0x00000002L
+#define MPC_UPDATE_ACK_REG5__IN_DPP0_CURSOR_UPDATE_ACK_MASK 0x00000004L
+#define MPC_UPDATE_ACK_REG5__IN_DPP1_SURFACE_UPDATE_ACK_MASK 0x00000008L
+#define MPC_UPDATE_ACK_REG5__IN_DPP1_CONFIG_UPDATE_ACK_MASK 0x00000010L
+#define MPC_UPDATE_ACK_REG5__IN_DPP1_CURSOR_UPDATE_ACK_MASK 0x00000020L
+#define MPC_UPDATE_ACK_REG5__IN_DPP2_SURFACE_UPDATE_ACK_MASK 0x00000040L
+#define MPC_UPDATE_ACK_REG5__IN_DPP2_CONFIG_UPDATE_ACK_MASK 0x00000080L
+#define MPC_UPDATE_ACK_REG5__IN_DPP2_CURSOR_UPDATE_ACK_MASK 0x00000100L
+#define MPC_UPDATE_ACK_REG5__IN_DPP3_SURFACE_UPDATE_ACK_MASK 0x00000200L
+#define MPC_UPDATE_ACK_REG5__IN_DPP3_CONFIG_UPDATE_ACK_MASK 0x00000400L
+#define MPC_UPDATE_ACK_REG5__IN_DPP3_CURSOR_UPDATE_ACK_MASK 0x00000800L
+#define MPC_UPDATE_ACK_REG5__MPCC0_CONFIG_UPDATE_ACK_MASK 0x00008000L
+#define MPC_UPDATE_ACK_REG5__MPCC1_CONFIG_UPDATE_ACK_MASK 0x00010000L
+#define MPC_UPDATE_ACK_REG5__MPCC2_CONFIG_UPDATE_ACK_MASK 0x00020000L
+#define MPC_UPDATE_ACK_REG5__MPCC3_CONFIG_UPDATE_ACK_MASK 0x00040000L
+#define MPC_UPDATE_ACK_REG5__OUT_OPP0_CONFIG_UPDATE_ACK_MASK 0x00100000L
+#define MPC_UPDATE_ACK_REG5__OUT_OPP1_CONFIG_UPDATE_ACK_MASK 0x00200000L
+#define MPC_UPDATE_ACK_REG5__OUT_OPP2_CONFIG_UPDATE_ACK_MASK 0x00400000L
+#define MPC_UPDATE_ACK_REG5__OUT_OPP3_CONFIG_UPDATE_ACK_MASK 0x00800000L
+//ADR_CFG_CUR_VUPDATE_LOCK_SET0
+#define ADR_CFG_CUR_VUPDATE_LOCK_SET0__ADR_CFG_CUR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_CFG_CUR_VUPDATE_LOCK_SET0__ADR_CFG_CUR_VUPDATE_LOCK_SET_MASK 0x00000001L
+//ADR_CFG_VUPDATE_LOCK_SET0
+#define ADR_CFG_VUPDATE_LOCK_SET0__ADR_CFG_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_CFG_VUPDATE_LOCK_SET0__ADR_CFG_VUPDATE_LOCK_SET_MASK 0x00000001L
+//ADR_VUPDATE_LOCK_SET0
+#define ADR_VUPDATE_LOCK_SET0__ADR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_VUPDATE_LOCK_SET0__ADR_VUPDATE_LOCK_SET_MASK 0x00000001L
+//CFG_VUPDATE_LOCK_SET0
+#define CFG_VUPDATE_LOCK_SET0__CFG_VUPDATE_LOCK_SET__SHIFT 0x0
+#define CFG_VUPDATE_LOCK_SET0__CFG_VUPDATE_LOCK_SET_MASK 0x00000001L
+//CUR_VUPDATE_LOCK_SET0
+#define CUR_VUPDATE_LOCK_SET0__CUR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define CUR_VUPDATE_LOCK_SET0__CUR_VUPDATE_LOCK_SET_MASK 0x00000001L
+//ADR_CFG_CUR_VUPDATE_LOCK_SET1
+#define ADR_CFG_CUR_VUPDATE_LOCK_SET1__ADR_CFG_CUR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_CFG_CUR_VUPDATE_LOCK_SET1__ADR_CFG_CUR_VUPDATE_LOCK_SET_MASK 0x00000001L
+//ADR_CFG_VUPDATE_LOCK_SET1
+#define ADR_CFG_VUPDATE_LOCK_SET1__ADR_CFG_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_CFG_VUPDATE_LOCK_SET1__ADR_CFG_VUPDATE_LOCK_SET_MASK 0x00000001L
+//ADR_VUPDATE_LOCK_SET1
+#define ADR_VUPDATE_LOCK_SET1__ADR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_VUPDATE_LOCK_SET1__ADR_VUPDATE_LOCK_SET_MASK 0x00000001L
+//CFG_VUPDATE_LOCK_SET1
+#define CFG_VUPDATE_LOCK_SET1__CFG_VUPDATE_LOCK_SET__SHIFT 0x0
+#define CFG_VUPDATE_LOCK_SET1__CFG_VUPDATE_LOCK_SET_MASK 0x00000001L
+//CUR_VUPDATE_LOCK_SET1
+#define CUR_VUPDATE_LOCK_SET1__CUR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define CUR_VUPDATE_LOCK_SET1__CUR_VUPDATE_LOCK_SET_MASK 0x00000001L
+//ADR_CFG_CUR_VUPDATE_LOCK_SET2
+#define ADR_CFG_CUR_VUPDATE_LOCK_SET2__ADR_CFG_CUR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_CFG_CUR_VUPDATE_LOCK_SET2__ADR_CFG_CUR_VUPDATE_LOCK_SET_MASK 0x00000001L
+//ADR_CFG_VUPDATE_LOCK_SET2
+#define ADR_CFG_VUPDATE_LOCK_SET2__ADR_CFG_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_CFG_VUPDATE_LOCK_SET2__ADR_CFG_VUPDATE_LOCK_SET_MASK 0x00000001L
+//ADR_VUPDATE_LOCK_SET2
+#define ADR_VUPDATE_LOCK_SET2__ADR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_VUPDATE_LOCK_SET2__ADR_VUPDATE_LOCK_SET_MASK 0x00000001L
+//CFG_VUPDATE_LOCK_SET2
+#define CFG_VUPDATE_LOCK_SET2__CFG_VUPDATE_LOCK_SET__SHIFT 0x0
+#define CFG_VUPDATE_LOCK_SET2__CFG_VUPDATE_LOCK_SET_MASK 0x00000001L
+//CUR_VUPDATE_LOCK_SET2
+#define CUR_VUPDATE_LOCK_SET2__CUR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define CUR_VUPDATE_LOCK_SET2__CUR_VUPDATE_LOCK_SET_MASK 0x00000001L
+//ADR_CFG_CUR_VUPDATE_LOCK_SET3
+#define ADR_CFG_CUR_VUPDATE_LOCK_SET3__ADR_CFG_CUR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_CFG_CUR_VUPDATE_LOCK_SET3__ADR_CFG_CUR_VUPDATE_LOCK_SET_MASK 0x00000001L
+//ADR_CFG_VUPDATE_LOCK_SET3
+#define ADR_CFG_VUPDATE_LOCK_SET3__ADR_CFG_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_CFG_VUPDATE_LOCK_SET3__ADR_CFG_VUPDATE_LOCK_SET_MASK 0x00000001L
+//ADR_VUPDATE_LOCK_SET3
+#define ADR_VUPDATE_LOCK_SET3__ADR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_VUPDATE_LOCK_SET3__ADR_VUPDATE_LOCK_SET_MASK 0x00000001L
+//CFG_VUPDATE_LOCK_SET3
+#define CFG_VUPDATE_LOCK_SET3__CFG_VUPDATE_LOCK_SET__SHIFT 0x0
+#define CFG_VUPDATE_LOCK_SET3__CFG_VUPDATE_LOCK_SET_MASK 0x00000001L
+//CUR_VUPDATE_LOCK_SET3
+#define CUR_VUPDATE_LOCK_SET3__CUR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define CUR_VUPDATE_LOCK_SET3__CUR_VUPDATE_LOCK_SET_MASK 0x00000001L
+//MPC_OUT0_MUX
+#define MPC_OUT0_MUX__MPC_OUT_MUX__SHIFT 0x0
+#define MPC_OUT0_MUX__MPC_OUT_MUX_MASK 0x0000000FL
+//MPC_OUT0_DENORM_CONTROL
+#define MPC_OUT0_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MIN_R_CR__SHIFT 0x0
+#define MPC_OUT0_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MAX_R_CR__SHIFT 0xc
+#define MPC_OUT0_DENORM_CONTROL__MPC_OUT_DENORM_MODE__SHIFT 0x18
+#define MPC_OUT0_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MIN_R_CR_MASK 0x00000FFFL
+#define MPC_OUT0_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MAX_R_CR_MASK 0x00FFF000L
+#define MPC_OUT0_DENORM_CONTROL__MPC_OUT_DENORM_MODE_MASK 0x07000000L
+//MPC_OUT0_DENORM_CLAMP_G_Y
+#define MPC_OUT0_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MIN_G_Y__SHIFT 0x0
+#define MPC_OUT0_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MAX_G_Y__SHIFT 0xc
+#define MPC_OUT0_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MIN_G_Y_MASK 0x00000FFFL
+#define MPC_OUT0_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MAX_G_Y_MASK 0x00FFF000L
+//MPC_OUT0_DENORM_CLAMP_B_CB
+#define MPC_OUT0_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MIN_B_CB__SHIFT 0x0
+#define MPC_OUT0_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MAX_B_CB__SHIFT 0xc
+#define MPC_OUT0_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MIN_B_CB_MASK 0x00000FFFL
+#define MPC_OUT0_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MAX_B_CB_MASK 0x00FFF000L
+//MPC_OUT1_MUX
+#define MPC_OUT1_MUX__MPC_OUT_MUX__SHIFT 0x0
+#define MPC_OUT1_MUX__MPC_OUT_MUX_MASK 0x0000000FL
+//MPC_OUT1_DENORM_CONTROL
+#define MPC_OUT1_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MIN_R_CR__SHIFT 0x0
+#define MPC_OUT1_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MAX_R_CR__SHIFT 0xc
+#define MPC_OUT1_DENORM_CONTROL__MPC_OUT_DENORM_MODE__SHIFT 0x18
+#define MPC_OUT1_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MIN_R_CR_MASK 0x00000FFFL
+#define MPC_OUT1_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MAX_R_CR_MASK 0x00FFF000L
+#define MPC_OUT1_DENORM_CONTROL__MPC_OUT_DENORM_MODE_MASK 0x07000000L
+//MPC_OUT1_DENORM_CLAMP_G_Y
+#define MPC_OUT1_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MIN_G_Y__SHIFT 0x0
+#define MPC_OUT1_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MAX_G_Y__SHIFT 0xc
+#define MPC_OUT1_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MIN_G_Y_MASK 0x00000FFFL
+#define MPC_OUT1_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MAX_G_Y_MASK 0x00FFF000L
+//MPC_OUT1_DENORM_CLAMP_B_CB
+#define MPC_OUT1_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MIN_B_CB__SHIFT 0x0
+#define MPC_OUT1_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MAX_B_CB__SHIFT 0xc
+#define MPC_OUT1_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MIN_B_CB_MASK 0x00000FFFL
+#define MPC_OUT1_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MAX_B_CB_MASK 0x00FFF000L
+//MPC_OUT2_MUX
+#define MPC_OUT2_MUX__MPC_OUT_MUX__SHIFT 0x0
+#define MPC_OUT2_MUX__MPC_OUT_MUX_MASK 0x0000000FL
+//MPC_OUT2_DENORM_CONTROL
+#define MPC_OUT2_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MIN_R_CR__SHIFT 0x0
+#define MPC_OUT2_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MAX_R_CR__SHIFT 0xc
+#define MPC_OUT2_DENORM_CONTROL__MPC_OUT_DENORM_MODE__SHIFT 0x18
+#define MPC_OUT2_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MIN_R_CR_MASK 0x00000FFFL
+#define MPC_OUT2_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MAX_R_CR_MASK 0x00FFF000L
+#define MPC_OUT2_DENORM_CONTROL__MPC_OUT_DENORM_MODE_MASK 0x07000000L
+//MPC_OUT2_DENORM_CLAMP_G_Y
+#define MPC_OUT2_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MIN_G_Y__SHIFT 0x0
+#define MPC_OUT2_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MAX_G_Y__SHIFT 0xc
+#define MPC_OUT2_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MIN_G_Y_MASK 0x00000FFFL
+#define MPC_OUT2_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MAX_G_Y_MASK 0x00FFF000L
+//MPC_OUT2_DENORM_CLAMP_B_CB
+#define MPC_OUT2_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MIN_B_CB__SHIFT 0x0
+#define MPC_OUT2_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MAX_B_CB__SHIFT 0xc
+#define MPC_OUT2_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MIN_B_CB_MASK 0x00000FFFL
+#define MPC_OUT2_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MAX_B_CB_MASK 0x00FFF000L
+//MPC_OUT3_MUX
+#define MPC_OUT3_MUX__MPC_OUT_MUX__SHIFT 0x0
+#define MPC_OUT3_MUX__MPC_OUT_MUX_MASK 0x0000000FL
+//MPC_OUT3_DENORM_CONTROL
+#define MPC_OUT3_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MIN_R_CR__SHIFT 0x0
+#define MPC_OUT3_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MAX_R_CR__SHIFT 0xc
+#define MPC_OUT3_DENORM_CONTROL__MPC_OUT_DENORM_MODE__SHIFT 0x18
+#define MPC_OUT3_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MIN_R_CR_MASK 0x00000FFFL
+#define MPC_OUT3_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MAX_R_CR_MASK 0x00FFF000L
+#define MPC_OUT3_DENORM_CONTROL__MPC_OUT_DENORM_MODE_MASK 0x07000000L
+//MPC_OUT3_DENORM_CLAMP_G_Y
+#define MPC_OUT3_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MIN_G_Y__SHIFT 0x0
+#define MPC_OUT3_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MAX_G_Y__SHIFT 0xc
+#define MPC_OUT3_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MIN_G_Y_MASK 0x00000FFFL
+#define MPC_OUT3_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MAX_G_Y_MASK 0x00FFF000L
+//MPC_OUT3_DENORM_CLAMP_B_CB
+#define MPC_OUT3_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MIN_B_CB__SHIFT 0x0
+#define MPC_OUT3_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MAX_B_CB__SHIFT 0xc
+#define MPC_OUT3_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MIN_B_CB_MASK 0x00000FFFL
+#define MPC_OUT3_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MAX_B_CB_MASK 0x00FFF000L
+
+
+// addressBlock: dce_dc_mpc_mpcc_ogam0_dispdec
+//MPCC_OGAM0_MPCC_OGAM_MODE
+#define MPCC_OGAM0_MPCC_OGAM_MODE__MPCC_OGAM_MODE__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_MODE__MPCC_OGAM_MODE_MASK 0x00000003L
+//MPCC_OGAM0_MPCC_OGAM_LUT_INDEX
+#define MPCC_OGAM0_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX_MASK 0x000001FFL
+//MPCC_OGAM0_MPCC_OGAM_LUT_DATA
+#define MPCC_OGAM0_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA_MASK 0x0007FFFFL
+//MPCC_OGAM0_MPCC_OGAM_LUT_RAM_CONTROL
+#define MPCC_OGAM0_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_RAM_SEL__SHIFT 0x3
+#define MPCC_OGAM0_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_CONFIG_STATUS__SHIFT 0x4
+#define MPCC_OGAM0_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define MPCC_OGAM0_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_RAM_SEL_MASK 0x00000008L
+#define MPCC_OGAM0_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_CONFIG_STATUS_MASK 0x00000030L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_G
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_R
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_B
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_G
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_R
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_B
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_G
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_G
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_R
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_R
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_B
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_G
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_R
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_B
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_G
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_R
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_B
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_B
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_G
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_G
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_R
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_R
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+
+
+// addressBlock: dce_dc_mpc_mpcc_ogam1_dispdec
+//MPCC_OGAM1_MPCC_OGAM_MODE
+#define MPCC_OGAM1_MPCC_OGAM_MODE__MPCC_OGAM_MODE__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_MODE__MPCC_OGAM_MODE_MASK 0x00000003L
+//MPCC_OGAM1_MPCC_OGAM_LUT_INDEX
+#define MPCC_OGAM1_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX_MASK 0x000001FFL
+//MPCC_OGAM1_MPCC_OGAM_LUT_DATA
+#define MPCC_OGAM1_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA_MASK 0x0007FFFFL
+//MPCC_OGAM1_MPCC_OGAM_LUT_RAM_CONTROL
+#define MPCC_OGAM1_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_RAM_SEL__SHIFT 0x3
+#define MPCC_OGAM1_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_CONFIG_STATUS__SHIFT 0x4
+#define MPCC_OGAM1_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define MPCC_OGAM1_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_RAM_SEL_MASK 0x00000008L
+#define MPCC_OGAM1_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_CONFIG_STATUS_MASK 0x00000030L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_B
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_G
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_R
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_SLOPE_CNTL_B
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMA_SLOPE_CNTL_G
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMA_SLOPE_CNTL_R
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_B
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_B
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_G
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_G
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_R
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_R
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_B
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_G
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_R
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_SLOPE_CNTL_B
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMB_SLOPE_CNTL_G
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMB_SLOPE_CNTL_R
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_B
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_B
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_G
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_G
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_R
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_R
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+
+
+// addressBlock: dce_dc_mpc_mpcc_ogam2_dispdec
+//MPCC_OGAM2_MPCC_OGAM_MODE
+#define MPCC_OGAM2_MPCC_OGAM_MODE__MPCC_OGAM_MODE__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_MODE__MPCC_OGAM_MODE_MASK 0x00000003L
+//MPCC_OGAM2_MPCC_OGAM_LUT_INDEX
+#define MPCC_OGAM2_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX_MASK 0x000001FFL
+//MPCC_OGAM2_MPCC_OGAM_LUT_DATA
+#define MPCC_OGAM2_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA_MASK 0x0007FFFFL
+//MPCC_OGAM2_MPCC_OGAM_LUT_RAM_CONTROL
+#define MPCC_OGAM2_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_RAM_SEL__SHIFT 0x3
+#define MPCC_OGAM2_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_CONFIG_STATUS__SHIFT 0x4
+#define MPCC_OGAM2_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define MPCC_OGAM2_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_RAM_SEL_MASK 0x00000008L
+#define MPCC_OGAM2_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_CONFIG_STATUS_MASK 0x00000030L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_B
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_G
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_R
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_SLOPE_CNTL_B
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMA_SLOPE_CNTL_G
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMA_SLOPE_CNTL_R
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_B
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_B
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_G
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_G
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_R
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_R
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_B
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_G
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_R
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_SLOPE_CNTL_B
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMB_SLOPE_CNTL_G
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMB_SLOPE_CNTL_R
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_B
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_B
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_G
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_G
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_R
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_R
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+
+
+// addressBlock: dce_dc_mpc_mpcc_ogam3_dispdec
+//MPCC_OGAM3_MPCC_OGAM_MODE
+#define MPCC_OGAM3_MPCC_OGAM_MODE__MPCC_OGAM_MODE__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_MODE__MPCC_OGAM_MODE_MASK 0x00000003L
+//MPCC_OGAM3_MPCC_OGAM_LUT_INDEX
+#define MPCC_OGAM3_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX_MASK 0x000001FFL
+//MPCC_OGAM3_MPCC_OGAM_LUT_DATA
+#define MPCC_OGAM3_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA_MASK 0x0007FFFFL
+//MPCC_OGAM3_MPCC_OGAM_LUT_RAM_CONTROL
+#define MPCC_OGAM3_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_RAM_SEL__SHIFT 0x3
+#define MPCC_OGAM3_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_CONFIG_STATUS__SHIFT 0x4
+#define MPCC_OGAM3_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define MPCC_OGAM3_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_RAM_SEL_MASK 0x00000008L
+#define MPCC_OGAM3_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_CONFIG_STATUS_MASK 0x00000030L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_B
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_G
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_R
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_SLOPE_CNTL_B
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMA_SLOPE_CNTL_G
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMA_SLOPE_CNTL_R
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_B
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_B
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_G
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_G
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_R
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_R
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_B
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_G
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_R
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_SLOPE_CNTL_B
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMB_SLOPE_CNTL_G
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMB_SLOPE_CNTL_R
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_B
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_B
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_G
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_G
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_R
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_R
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+
+
+// addressBlock: dce_dc_mpc_mpcc_ogam4_dispdec
+//MPCC_OGAM4_MPCC_OGAM_MODE
+#define MPCC_OGAM4_MPCC_OGAM_MODE__MPCC_OGAM_MODE__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_MODE__MPCC_OGAM_MODE_MASK 0x00000003L
+//MPCC_OGAM4_MPCC_OGAM_LUT_INDEX
+#define MPCC_OGAM4_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX_MASK 0x000001FFL
+//MPCC_OGAM4_MPCC_OGAM_LUT_DATA
+#define MPCC_OGAM4_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA_MASK 0x0007FFFFL
+//MPCC_OGAM4_MPCC_OGAM_LUT_RAM_CONTROL
+#define MPCC_OGAM4_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_RAM_SEL__SHIFT 0x3
+#define MPCC_OGAM4_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_CONFIG_STATUS__SHIFT 0x4
+#define MPCC_OGAM4_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define MPCC_OGAM4_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_RAM_SEL_MASK 0x00000008L
+#define MPCC_OGAM4_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_CONFIG_STATUS_MASK 0x00000030L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_B
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_G
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_R
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_SLOPE_CNTL_B
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_OGAM4_MPCC_OGAM_RAMA_SLOPE_CNTL_G
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_OGAM4_MPCC_OGAM_RAMA_SLOPE_CNTL_R
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL1_B
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+//MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_B
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL1_G
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+//MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_G
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL1_R
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+//MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_R
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_0_1
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_2_3
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_4_5
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_6_7
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_8_9
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_10_11
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_12_13
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_14_15
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_16_17
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_18_19
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_20_21
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_22_23
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_24_25
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_26_27
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_28_29
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_30_31
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_32_33
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_B
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_G
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_R
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_SLOPE_CNTL_B
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_OGAM4_MPCC_OGAM_RAMB_SLOPE_CNTL_G
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_OGAM4_MPCC_OGAM_RAMB_SLOPE_CNTL_R
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL1_B
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+//MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_B
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL1_G
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+//MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_G
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL1_R
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+//MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_R
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_0_1
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_2_3
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_4_5
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_6_7
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_8_9
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_10_11
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_12_13
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_14_15
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_16_17
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_18_19
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_20_21
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_22_23
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_24_25
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_26_27
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_28_29
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_30_31
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_32_33
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+
+
+// addressBlock: dce_dc_mpc_mpcc_ogam5_dispdec
+//MPCC_OGAM5_MPCC_OGAM_MODE
+#define MPCC_OGAM5_MPCC_OGAM_MODE__MPCC_OGAM_MODE__SHIFT 0x0
+#define MPCC_OGAM5_MPCC_OGAM_MODE__MPCC_OGAM_MODE_MASK 0x00000003L
+//MPCC_OGAM5_MPCC_OGAM_LUT_INDEX
+#define MPCC_OGAM5_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX__SHIFT 0x0
+#define MPCC_OGAM5_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX_MASK 0x000001FFL
+//MPCC_OGAM5_MPCC_OGAM_LUT_DATA
+#define MPCC_OGAM5_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA__SHIFT 0x0
+#define MPCC_OGAM5_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA_MASK 0x0007FFFFL
+//MPCC_OGAM5_MPCC_OGAM_LUT_RAM_CONTROL
+#define MPCC_OGAM5_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define MPCC_OGAM5_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_RAM_SEL__SHIFT 0x3
+#define MPCC_OGAM5_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_CONFIG_STATUS__SHIFT 0x4
+#define MPCC_OGAM5_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define MPCC_OGAM5_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_RAM_SEL_MASK 0x00000008L
+#define MPCC_OGAM5_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_CONFIG_STATUS_MASK 0x00000030L
+//MPCC_OGAM5_MPCC_OGAM_RAMA_START_CNTL_B
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_OGAM5_MPCC_OGAM_RAMA_START_CNTL_G
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_OGAM5_MPCC_OGAM_RAMA_START_CNTL_R
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_OGAM5_MPCC_OGAM_RAMA_SLOPE_CNTL_B
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_OGAM5_MPCC_OGAM_RAMA_SLOPE_CNTL_G
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_OGAM5_MPCC_OGAM_RAMA_SLOPE_CNTL_R
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_OGAM5_MPCC_OGAM_RAMA_END_CNTL1_B
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+//MPCC_OGAM5_MPCC_OGAM_RAMA_END_CNTL2_B
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//MPCC_OGAM5_MPCC_OGAM_RAMA_END_CNTL1_G
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+//MPCC_OGAM5_MPCC_OGAM_RAMA_END_CNTL2_G
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//MPCC_OGAM5_MPCC_OGAM_RAMA_END_CNTL1_R
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+//MPCC_OGAM5_MPCC_OGAM_RAMA_END_CNTL2_R
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_0_1
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_2_3
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_4_5
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_6_7
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_8_9
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_10_11
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_12_13
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_14_15
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_16_17
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_18_19
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_20_21
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_22_23
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_24_25
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_26_27
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_28_29
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_30_31
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_32_33
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM5_MPCC_OGAM_RAMB_START_CNTL_B
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_OGAM5_MPCC_OGAM_RAMB_START_CNTL_G
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_OGAM5_MPCC_OGAM_RAMB_START_CNTL_R
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_OGAM5_MPCC_OGAM_RAMB_SLOPE_CNTL_B
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_OGAM5_MPCC_OGAM_RAMB_SLOPE_CNTL_G
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_OGAM5_MPCC_OGAM_RAMB_SLOPE_CNTL_R
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_OGAM5_MPCC_OGAM_RAMB_END_CNTL1_B
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+//MPCC_OGAM5_MPCC_OGAM_RAMB_END_CNTL2_B
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//MPCC_OGAM5_MPCC_OGAM_RAMB_END_CNTL1_G
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+//MPCC_OGAM5_MPCC_OGAM_RAMB_END_CNTL2_G
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//MPCC_OGAM5_MPCC_OGAM_RAMB_END_CNTL1_R
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+//MPCC_OGAM5_MPCC_OGAM_RAMB_END_CNTL2_R
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_0_1
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_2_3
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_4_5
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_6_7
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_8_9
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_10_11
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_12_13
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_14_15
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_16_17
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_18_19
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_20_21
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_22_23
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_24_25
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_26_27
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_28_29
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_30_31
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_32_33
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM5_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+
+
+// addressBlock: dce_dc_mpc_mpcc_ogam6_dispdec
+//MPCC_OGAM6_MPCC_OGAM_MODE
+#define MPCC_OGAM6_MPCC_OGAM_MODE__MPCC_OGAM_MODE__SHIFT 0x0
+#define MPCC_OGAM6_MPCC_OGAM_MODE__MPCC_OGAM_MODE_MASK 0x00000003L
+//MPCC_OGAM6_MPCC_OGAM_LUT_INDEX
+#define MPCC_OGAM6_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX__SHIFT 0x0
+#define MPCC_OGAM6_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX_MASK 0x000001FFL
+//MPCC_OGAM6_MPCC_OGAM_LUT_DATA
+#define MPCC_OGAM6_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA__SHIFT 0x0
+#define MPCC_OGAM6_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA_MASK 0x0007FFFFL
+//MPCC_OGAM6_MPCC_OGAM_LUT_RAM_CONTROL
+#define MPCC_OGAM6_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define MPCC_OGAM6_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_RAM_SEL__SHIFT 0x3
+#define MPCC_OGAM6_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_CONFIG_STATUS__SHIFT 0x4
+#define MPCC_OGAM6_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define MPCC_OGAM6_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_RAM_SEL_MASK 0x00000008L
+#define MPCC_OGAM6_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_CONFIG_STATUS_MASK 0x00000030L
+//MPCC_OGAM6_MPCC_OGAM_RAMA_START_CNTL_B
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_OGAM6_MPCC_OGAM_RAMA_START_CNTL_G
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_OGAM6_MPCC_OGAM_RAMA_START_CNTL_R
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_OGAM6_MPCC_OGAM_RAMA_SLOPE_CNTL_B
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_OGAM6_MPCC_OGAM_RAMA_SLOPE_CNTL_G
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_OGAM6_MPCC_OGAM_RAMA_SLOPE_CNTL_R
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_OGAM6_MPCC_OGAM_RAMA_END_CNTL1_B
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+//MPCC_OGAM6_MPCC_OGAM_RAMA_END_CNTL2_B
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//MPCC_OGAM6_MPCC_OGAM_RAMA_END_CNTL1_G
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+//MPCC_OGAM6_MPCC_OGAM_RAMA_END_CNTL2_G
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//MPCC_OGAM6_MPCC_OGAM_RAMA_END_CNTL1_R
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+//MPCC_OGAM6_MPCC_OGAM_RAMA_END_CNTL2_R
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_0_1
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_2_3
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_4_5
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_6_7
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_8_9
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_10_11
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_12_13
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_14_15
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_16_17
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_18_19
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_20_21
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_22_23
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_24_25
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_26_27
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_28_29
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_30_31
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_32_33
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM6_MPCC_OGAM_RAMB_START_CNTL_B
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_OGAM6_MPCC_OGAM_RAMB_START_CNTL_G
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_OGAM6_MPCC_OGAM_RAMB_START_CNTL_R
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_OGAM6_MPCC_OGAM_RAMB_SLOPE_CNTL_B
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_OGAM6_MPCC_OGAM_RAMB_SLOPE_CNTL_G
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_OGAM6_MPCC_OGAM_RAMB_SLOPE_CNTL_R
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_OGAM6_MPCC_OGAM_RAMB_END_CNTL1_B
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+//MPCC_OGAM6_MPCC_OGAM_RAMB_END_CNTL2_B
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//MPCC_OGAM6_MPCC_OGAM_RAMB_END_CNTL1_G
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+//MPCC_OGAM6_MPCC_OGAM_RAMB_END_CNTL2_G
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//MPCC_OGAM6_MPCC_OGAM_RAMB_END_CNTL1_R
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+//MPCC_OGAM6_MPCC_OGAM_RAMB_END_CNTL2_R
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_0_1
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_2_3
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_4_5
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_6_7
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_8_9
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_10_11
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_12_13
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_14_15
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_16_17
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_18_19
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_20_21
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_22_23
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_24_25
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_26_27
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_28_29
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_30_31
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_32_33
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM6_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+
+
+// addressBlock: dce_dc_mpc_mpcc_ogam7_dispdec
+//MPCC_OGAM7_MPCC_OGAM_MODE
+#define MPCC_OGAM7_MPCC_OGAM_MODE__MPCC_OGAM_MODE__SHIFT 0x0
+#define MPCC_OGAM7_MPCC_OGAM_MODE__MPCC_OGAM_MODE_MASK 0x00000003L
+//MPCC_OGAM7_MPCC_OGAM_LUT_INDEX
+#define MPCC_OGAM7_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX__SHIFT 0x0
+#define MPCC_OGAM7_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX_MASK 0x000001FFL
+//MPCC_OGAM7_MPCC_OGAM_LUT_DATA
+#define MPCC_OGAM7_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA__SHIFT 0x0
+#define MPCC_OGAM7_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA_MASK 0x0007FFFFL
+//MPCC_OGAM7_MPCC_OGAM_LUT_RAM_CONTROL
+#define MPCC_OGAM7_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define MPCC_OGAM7_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_RAM_SEL__SHIFT 0x3
+#define MPCC_OGAM7_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_CONFIG_STATUS__SHIFT 0x4
+#define MPCC_OGAM7_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define MPCC_OGAM7_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_RAM_SEL_MASK 0x00000008L
+#define MPCC_OGAM7_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_CONFIG_STATUS_MASK 0x00000030L
+//MPCC_OGAM7_MPCC_OGAM_RAMA_START_CNTL_B
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_OGAM7_MPCC_OGAM_RAMA_START_CNTL_G
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_OGAM7_MPCC_OGAM_RAMA_START_CNTL_R
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_OGAM7_MPCC_OGAM_RAMA_SLOPE_CNTL_B
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_OGAM7_MPCC_OGAM_RAMA_SLOPE_CNTL_G
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_OGAM7_MPCC_OGAM_RAMA_SLOPE_CNTL_R
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_OGAM7_MPCC_OGAM_RAMA_END_CNTL1_B
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+//MPCC_OGAM7_MPCC_OGAM_RAMA_END_CNTL2_B
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//MPCC_OGAM7_MPCC_OGAM_RAMA_END_CNTL1_G
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+//MPCC_OGAM7_MPCC_OGAM_RAMA_END_CNTL2_G
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//MPCC_OGAM7_MPCC_OGAM_RAMA_END_CNTL1_R
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+//MPCC_OGAM7_MPCC_OGAM_RAMA_END_CNTL2_R
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_0_1
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_2_3
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_4_5
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_6_7
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_8_9
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_10_11
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_12_13
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_14_15
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_16_17
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_18_19
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_20_21
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_22_23
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_24_25
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_26_27
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_28_29
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_30_31
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_32_33
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM7_MPCC_OGAM_RAMB_START_CNTL_B
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_OGAM7_MPCC_OGAM_RAMB_START_CNTL_G
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_OGAM7_MPCC_OGAM_RAMB_START_CNTL_R
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_OGAM7_MPCC_OGAM_RAMB_SLOPE_CNTL_B
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_OGAM7_MPCC_OGAM_RAMB_SLOPE_CNTL_G
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_OGAM7_MPCC_OGAM_RAMB_SLOPE_CNTL_R
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_OGAM7_MPCC_OGAM_RAMB_END_CNTL1_B
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+//MPCC_OGAM7_MPCC_OGAM_RAMB_END_CNTL2_B
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//MPCC_OGAM7_MPCC_OGAM_RAMB_END_CNTL1_G
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+//MPCC_OGAM7_MPCC_OGAM_RAMB_END_CNTL2_G
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//MPCC_OGAM7_MPCC_OGAM_RAMB_END_CNTL1_R
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+//MPCC_OGAM7_MPCC_OGAM_RAMB_END_CNTL2_R
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_0_1
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_2_3
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_4_5
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_6_7
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_8_9
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_10_11
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_12_13
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_14_15
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_16_17
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_18_19
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_20_21
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_22_23
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_24_25
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_26_27
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_28_29
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_30_31
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_32_33
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM7_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+
+
+// addressBlock: dce_dc_mpc_mpc_ocsc_dispdec
+//MPC_OUT_CSC_COEF_FORMAT
+#define MPC_OUT_CSC_COEF_FORMAT__MPC_OCSC0_COEF_FORMAT__SHIFT 0x0
+#define MPC_OUT_CSC_COEF_FORMAT__MPC_OCSC1_COEF_FORMAT__SHIFT 0x1
+#define MPC_OUT_CSC_COEF_FORMAT__MPC_OCSC2_COEF_FORMAT__SHIFT 0x2
+#define MPC_OUT_CSC_COEF_FORMAT__MPC_OCSC3_COEF_FORMAT__SHIFT 0x3
+#define MPC_OUT_CSC_COEF_FORMAT__MPC_OCSC0_COEF_FORMAT_MASK 0x00000001L
+#define MPC_OUT_CSC_COEF_FORMAT__MPC_OCSC1_COEF_FORMAT_MASK 0x00000002L
+#define MPC_OUT_CSC_COEF_FORMAT__MPC_OCSC2_COEF_FORMAT_MASK 0x00000004L
+#define MPC_OUT_CSC_COEF_FORMAT__MPC_OCSC3_COEF_FORMAT_MASK 0x00000008L
+//MPC_OUT0_CSC_MODE
+#define MPC_OUT0_CSC_MODE__MPC_OCSC_MODE__SHIFT 0x0
+#define MPC_OUT0_CSC_MODE__MPC_OCSC_MODE_MASK 0x00000003L
+//MPC_OUT0_CSC_C11_C12_A
+#define MPC_OUT0_CSC_C11_C12_A__MPC_OCSC_C11_A__SHIFT 0x0
+#define MPC_OUT0_CSC_C11_C12_A__MPC_OCSC_C12_A__SHIFT 0x10
+#define MPC_OUT0_CSC_C11_C12_A__MPC_OCSC_C11_A_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C11_C12_A__MPC_OCSC_C12_A_MASK 0xFFFF0000L
+//MPC_OUT0_CSC_C13_C14_A
+#define MPC_OUT0_CSC_C13_C14_A__MPC_OCSC_C13_A__SHIFT 0x0
+#define MPC_OUT0_CSC_C13_C14_A__MPC_OCSC_C14_A__SHIFT 0x10
+#define MPC_OUT0_CSC_C13_C14_A__MPC_OCSC_C13_A_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C13_C14_A__MPC_OCSC_C14_A_MASK 0xFFFF0000L
+//MPC_OUT0_CSC_C21_C22_A
+#define MPC_OUT0_CSC_C21_C22_A__MPC_OCSC_C21_A__SHIFT 0x0
+#define MPC_OUT0_CSC_C21_C22_A__MPC_OCSC_C22_A__SHIFT 0x10
+#define MPC_OUT0_CSC_C21_C22_A__MPC_OCSC_C21_A_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C21_C22_A__MPC_OCSC_C22_A_MASK 0xFFFF0000L
+//MPC_OUT0_CSC_C23_C24_A
+#define MPC_OUT0_CSC_C23_C24_A__MPC_OCSC_C23_A__SHIFT 0x0
+#define MPC_OUT0_CSC_C23_C24_A__MPC_OCSC_C24_A__SHIFT 0x10
+#define MPC_OUT0_CSC_C23_C24_A__MPC_OCSC_C23_A_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C23_C24_A__MPC_OCSC_C24_A_MASK 0xFFFF0000L
+//MPC_OUT0_CSC_C31_C32_A
+#define MPC_OUT0_CSC_C31_C32_A__MPC_OCSC_C31_A__SHIFT 0x0
+#define MPC_OUT0_CSC_C31_C32_A__MPC_OCSC_C32_A__SHIFT 0x10
+#define MPC_OUT0_CSC_C31_C32_A__MPC_OCSC_C31_A_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C31_C32_A__MPC_OCSC_C32_A_MASK 0xFFFF0000L
+//MPC_OUT0_CSC_C33_C34_A
+#define MPC_OUT0_CSC_C33_C34_A__MPC_OCSC_C33_A__SHIFT 0x0
+#define MPC_OUT0_CSC_C33_C34_A__MPC_OCSC_C34_A__SHIFT 0x10
+#define MPC_OUT0_CSC_C33_C34_A__MPC_OCSC_C33_A_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C33_C34_A__MPC_OCSC_C34_A_MASK 0xFFFF0000L
+//MPC_OUT0_CSC_C11_C12_B
+#define MPC_OUT0_CSC_C11_C12_B__MPC_OCSC_C11_B__SHIFT 0x0
+#define MPC_OUT0_CSC_C11_C12_B__MPC_OCSC_C12_B__SHIFT 0x10
+#define MPC_OUT0_CSC_C11_C12_B__MPC_OCSC_C11_B_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C11_C12_B__MPC_OCSC_C12_B_MASK 0xFFFF0000L
+//MPC_OUT0_CSC_C13_C14_B
+#define MPC_OUT0_CSC_C13_C14_B__MPC_OCSC_C13_B__SHIFT 0x0
+#define MPC_OUT0_CSC_C13_C14_B__MPC_OCSC_C14_B__SHIFT 0x10
+#define MPC_OUT0_CSC_C13_C14_B__MPC_OCSC_C13_B_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C13_C14_B__MPC_OCSC_C14_B_MASK 0xFFFF0000L
+//MPC_OUT0_CSC_C21_C22_B
+#define MPC_OUT0_CSC_C21_C22_B__MPC_OCSC_C21_B__SHIFT 0x0
+#define MPC_OUT0_CSC_C21_C22_B__MPC_OCSC_C22_B__SHIFT 0x10
+#define MPC_OUT0_CSC_C21_C22_B__MPC_OCSC_C21_B_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C21_C22_B__MPC_OCSC_C22_B_MASK 0xFFFF0000L
+//MPC_OUT0_CSC_C23_C24_B
+#define MPC_OUT0_CSC_C23_C24_B__MPC_OCSC_C23_B__SHIFT 0x0
+#define MPC_OUT0_CSC_C23_C24_B__MPC_OCSC_C24_B__SHIFT 0x10
+#define MPC_OUT0_CSC_C23_C24_B__MPC_OCSC_C23_B_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C23_C24_B__MPC_OCSC_C24_B_MASK 0xFFFF0000L
+//MPC_OUT0_CSC_C31_C32_B
+#define MPC_OUT0_CSC_C31_C32_B__MPC_OCSC_C31_B__SHIFT 0x0
+#define MPC_OUT0_CSC_C31_C32_B__MPC_OCSC_C32_B__SHIFT 0x10
+#define MPC_OUT0_CSC_C31_C32_B__MPC_OCSC_C31_B_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C31_C32_B__MPC_OCSC_C32_B_MASK 0xFFFF0000L
+//MPC_OUT0_CSC_C33_C34_B
+#define MPC_OUT0_CSC_C33_C34_B__MPC_OCSC_C33_B__SHIFT 0x0
+#define MPC_OUT0_CSC_C33_C34_B__MPC_OCSC_C34_B__SHIFT 0x10
+#define MPC_OUT0_CSC_C33_C34_B__MPC_OCSC_C33_B_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C33_C34_B__MPC_OCSC_C34_B_MASK 0xFFFF0000L
+//MPC_OUT1_CSC_MODE
+#define MPC_OUT1_CSC_MODE__MPC_OCSC_MODE__SHIFT 0x0
+#define MPC_OUT1_CSC_MODE__MPC_OCSC_MODE_MASK 0x00000003L
+//MPC_OUT1_CSC_C11_C12_A
+#define MPC_OUT1_CSC_C11_C12_A__MPC_OCSC_C11_A__SHIFT 0x0
+#define MPC_OUT1_CSC_C11_C12_A__MPC_OCSC_C12_A__SHIFT 0x10
+#define MPC_OUT1_CSC_C11_C12_A__MPC_OCSC_C11_A_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C11_C12_A__MPC_OCSC_C12_A_MASK 0xFFFF0000L
+//MPC_OUT1_CSC_C13_C14_A
+#define MPC_OUT1_CSC_C13_C14_A__MPC_OCSC_C13_A__SHIFT 0x0
+#define MPC_OUT1_CSC_C13_C14_A__MPC_OCSC_C14_A__SHIFT 0x10
+#define MPC_OUT1_CSC_C13_C14_A__MPC_OCSC_C13_A_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C13_C14_A__MPC_OCSC_C14_A_MASK 0xFFFF0000L
+//MPC_OUT1_CSC_C21_C22_A
+#define MPC_OUT1_CSC_C21_C22_A__MPC_OCSC_C21_A__SHIFT 0x0
+#define MPC_OUT1_CSC_C21_C22_A__MPC_OCSC_C22_A__SHIFT 0x10
+#define MPC_OUT1_CSC_C21_C22_A__MPC_OCSC_C21_A_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C21_C22_A__MPC_OCSC_C22_A_MASK 0xFFFF0000L
+//MPC_OUT1_CSC_C23_C24_A
+#define MPC_OUT1_CSC_C23_C24_A__MPC_OCSC_C23_A__SHIFT 0x0
+#define MPC_OUT1_CSC_C23_C24_A__MPC_OCSC_C24_A__SHIFT 0x10
+#define MPC_OUT1_CSC_C23_C24_A__MPC_OCSC_C23_A_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C23_C24_A__MPC_OCSC_C24_A_MASK 0xFFFF0000L
+//MPC_OUT1_CSC_C31_C32_A
+#define MPC_OUT1_CSC_C31_C32_A__MPC_OCSC_C31_A__SHIFT 0x0
+#define MPC_OUT1_CSC_C31_C32_A__MPC_OCSC_C32_A__SHIFT 0x10
+#define MPC_OUT1_CSC_C31_C32_A__MPC_OCSC_C31_A_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C31_C32_A__MPC_OCSC_C32_A_MASK 0xFFFF0000L
+//MPC_OUT1_CSC_C33_C34_A
+#define MPC_OUT1_CSC_C33_C34_A__MPC_OCSC_C33_A__SHIFT 0x0
+#define MPC_OUT1_CSC_C33_C34_A__MPC_OCSC_C34_A__SHIFT 0x10
+#define MPC_OUT1_CSC_C33_C34_A__MPC_OCSC_C33_A_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C33_C34_A__MPC_OCSC_C34_A_MASK 0xFFFF0000L
+//MPC_OUT1_CSC_C11_C12_B
+#define MPC_OUT1_CSC_C11_C12_B__MPC_OCSC_C11_B__SHIFT 0x0
+#define MPC_OUT1_CSC_C11_C12_B__MPC_OCSC_C12_B__SHIFT 0x10
+#define MPC_OUT1_CSC_C11_C12_B__MPC_OCSC_C11_B_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C11_C12_B__MPC_OCSC_C12_B_MASK 0xFFFF0000L
+//MPC_OUT1_CSC_C13_C14_B
+#define MPC_OUT1_CSC_C13_C14_B__MPC_OCSC_C13_B__SHIFT 0x0
+#define MPC_OUT1_CSC_C13_C14_B__MPC_OCSC_C14_B__SHIFT 0x10
+#define MPC_OUT1_CSC_C13_C14_B__MPC_OCSC_C13_B_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C13_C14_B__MPC_OCSC_C14_B_MASK 0xFFFF0000L
+//MPC_OUT1_CSC_C21_C22_B
+#define MPC_OUT1_CSC_C21_C22_B__MPC_OCSC_C21_B__SHIFT 0x0
+#define MPC_OUT1_CSC_C21_C22_B__MPC_OCSC_C22_B__SHIFT 0x10
+#define MPC_OUT1_CSC_C21_C22_B__MPC_OCSC_C21_B_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C21_C22_B__MPC_OCSC_C22_B_MASK 0xFFFF0000L
+//MPC_OUT1_CSC_C23_C24_B
+#define MPC_OUT1_CSC_C23_C24_B__MPC_OCSC_C23_B__SHIFT 0x0
+#define MPC_OUT1_CSC_C23_C24_B__MPC_OCSC_C24_B__SHIFT 0x10
+#define MPC_OUT1_CSC_C23_C24_B__MPC_OCSC_C23_B_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C23_C24_B__MPC_OCSC_C24_B_MASK 0xFFFF0000L
+//MPC_OUT1_CSC_C31_C32_B
+#define MPC_OUT1_CSC_C31_C32_B__MPC_OCSC_C31_B__SHIFT 0x0
+#define MPC_OUT1_CSC_C31_C32_B__MPC_OCSC_C32_B__SHIFT 0x10
+#define MPC_OUT1_CSC_C31_C32_B__MPC_OCSC_C31_B_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C31_C32_B__MPC_OCSC_C32_B_MASK 0xFFFF0000L
+//MPC_OUT1_CSC_C33_C34_B
+#define MPC_OUT1_CSC_C33_C34_B__MPC_OCSC_C33_B__SHIFT 0x0
+#define MPC_OUT1_CSC_C33_C34_B__MPC_OCSC_C34_B__SHIFT 0x10
+#define MPC_OUT1_CSC_C33_C34_B__MPC_OCSC_C33_B_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C33_C34_B__MPC_OCSC_C34_B_MASK 0xFFFF0000L
+//MPC_OUT2_CSC_MODE
+#define MPC_OUT2_CSC_MODE__MPC_OCSC_MODE__SHIFT 0x0
+#define MPC_OUT2_CSC_MODE__MPC_OCSC_MODE_MASK 0x00000003L
+//MPC_OUT2_CSC_C11_C12_A
+#define MPC_OUT2_CSC_C11_C12_A__MPC_OCSC_C11_A__SHIFT 0x0
+#define MPC_OUT2_CSC_C11_C12_A__MPC_OCSC_C12_A__SHIFT 0x10
+#define MPC_OUT2_CSC_C11_C12_A__MPC_OCSC_C11_A_MASK 0x0000FFFFL
+#define MPC_OUT2_CSC_C11_C12_A__MPC_OCSC_C12_A_MASK 0xFFFF0000L
+//MPC_OUT2_CSC_C13_C14_A
+#define MPC_OUT2_CSC_C13_C14_A__MPC_OCSC_C13_A__SHIFT 0x0
+#define MPC_OUT2_CSC_C13_C14_A__MPC_OCSC_C14_A__SHIFT 0x10
+#define MPC_OUT2_CSC_C13_C14_A__MPC_OCSC_C13_A_MASK 0x0000FFFFL
+#define MPC_OUT2_CSC_C13_C14_A__MPC_OCSC_C14_A_MASK 0xFFFF0000L
+//MPC_OUT2_CSC_C21_C22_A
+#define MPC_OUT2_CSC_C21_C22_A__MPC_OCSC_C21_A__SHIFT 0x0
+#define MPC_OUT2_CSC_C21_C22_A__MPC_OCSC_C22_A__SHIFT 0x10
+#define MPC_OUT2_CSC_C21_C22_A__MPC_OCSC_C21_A_MASK 0x0000FFFFL
+#define MPC_OUT2_CSC_C21_C22_A__MPC_OCSC_C22_A_MASK 0xFFFF0000L
+//MPC_OUT2_CSC_C23_C24_A
+#define MPC_OUT2_CSC_C23_C24_A__MPC_OCSC_C23_A__SHIFT 0x0
+#define MPC_OUT2_CSC_C23_C24_A__MPC_OCSC_C24_A__SHIFT 0x10
+#define MPC_OUT2_CSC_C23_C24_A__MPC_OCSC_C23_A_MASK 0x0000FFFFL
+#define MPC_OUT2_CSC_C23_C24_A__MPC_OCSC_C24_A_MASK 0xFFFF0000L
+//MPC_OUT2_CSC_C31_C32_A
+#define MPC_OUT2_CSC_C31_C32_A__MPC_OCSC_C31_A__SHIFT 0x0
+#define MPC_OUT2_CSC_C31_C32_A__MPC_OCSC_C32_A__SHIFT 0x10
+#define MPC_OUT2_CSC_C31_C32_A__MPC_OCSC_C31_A_MASK 0x0000FFFFL
+#define MPC_OUT2_CSC_C31_C32_A__MPC_OCSC_C32_A_MASK 0xFFFF0000L
+//MPC_OUT2_CSC_C33_C34_A
+#define MPC_OUT2_CSC_C33_C34_A__MPC_OCSC_C33_A__SHIFT 0x0
+#define MPC_OUT2_CSC_C33_C34_A__MPC_OCSC_C34_A__SHIFT 0x10
+#define MPC_OUT2_CSC_C33_C34_A__MPC_OCSC_C33_A_MASK 0x0000FFFFL
+#define MPC_OUT2_CSC_C33_C34_A__MPC_OCSC_C34_A_MASK 0xFFFF0000L
+//MPC_OUT2_CSC_C11_C12_B
+#define MPC_OUT2_CSC_C11_C12_B__MPC_OCSC_C11_B__SHIFT 0x0
+#define MPC_OUT2_CSC_C11_C12_B__MPC_OCSC_C12_B__SHIFT 0x10
+#define MPC_OUT2_CSC_C11_C12_B__MPC_OCSC_C11_B_MASK 0x0000FFFFL
+#define MPC_OUT2_CSC_C11_C12_B__MPC_OCSC_C12_B_MASK 0xFFFF0000L
+//MPC_OUT2_CSC_C13_C14_B
+#define MPC_OUT2_CSC_C13_C14_B__MPC_OCSC_C13_B__SHIFT 0x0
+#define MPC_OUT2_CSC_C13_C14_B__MPC_OCSC_C14_B__SHIFT 0x10
+#define MPC_OUT2_CSC_C13_C14_B__MPC_OCSC_C13_B_MASK 0x0000FFFFL
+#define MPC_OUT2_CSC_C13_C14_B__MPC_OCSC_C14_B_MASK 0xFFFF0000L
+//MPC_OUT2_CSC_C21_C22_B
+#define MPC_OUT2_CSC_C21_C22_B__MPC_OCSC_C21_B__SHIFT 0x0
+#define MPC_OUT2_CSC_C21_C22_B__MPC_OCSC_C22_B__SHIFT 0x10
+#define MPC_OUT2_CSC_C21_C22_B__MPC_OCSC_C21_B_MASK 0x0000FFFFL
+#define MPC_OUT2_CSC_C21_C22_B__MPC_OCSC_C22_B_MASK 0xFFFF0000L
+//MPC_OUT2_CSC_C23_C24_B
+#define MPC_OUT2_CSC_C23_C24_B__MPC_OCSC_C23_B__SHIFT 0x0
+#define MPC_OUT2_CSC_C23_C24_B__MPC_OCSC_C24_B__SHIFT 0x10
+#define MPC_OUT2_CSC_C23_C24_B__MPC_OCSC_C23_B_MASK 0x0000FFFFL
+#define MPC_OUT2_CSC_C23_C24_B__MPC_OCSC_C24_B_MASK 0xFFFF0000L
+//MPC_OUT2_CSC_C31_C32_B
+#define MPC_OUT2_CSC_C31_C32_B__MPC_OCSC_C31_B__SHIFT 0x0
+#define MPC_OUT2_CSC_C31_C32_B__MPC_OCSC_C32_B__SHIFT 0x10
+#define MPC_OUT2_CSC_C31_C32_B__MPC_OCSC_C31_B_MASK 0x0000FFFFL
+#define MPC_OUT2_CSC_C31_C32_B__MPC_OCSC_C32_B_MASK 0xFFFF0000L
+//MPC_OUT2_CSC_C33_C34_B
+#define MPC_OUT2_CSC_C33_C34_B__MPC_OCSC_C33_B__SHIFT 0x0
+#define MPC_OUT2_CSC_C33_C34_B__MPC_OCSC_C34_B__SHIFT 0x10
+#define MPC_OUT2_CSC_C33_C34_B__MPC_OCSC_C33_B_MASK 0x0000FFFFL
+#define MPC_OUT2_CSC_C33_C34_B__MPC_OCSC_C34_B_MASK 0xFFFF0000L
+//MPC_OUT3_CSC_MODE
+#define MPC_OUT3_CSC_MODE__MPC_OCSC_MODE__SHIFT 0x0
+#define MPC_OUT3_CSC_MODE__MPC_OCSC_MODE_MASK 0x00000003L
+//MPC_OUT3_CSC_C11_C12_A
+#define MPC_OUT3_CSC_C11_C12_A__MPC_OCSC_C11_A__SHIFT 0x0
+#define MPC_OUT3_CSC_C11_C12_A__MPC_OCSC_C12_A__SHIFT 0x10
+#define MPC_OUT3_CSC_C11_C12_A__MPC_OCSC_C11_A_MASK 0x0000FFFFL
+#define MPC_OUT3_CSC_C11_C12_A__MPC_OCSC_C12_A_MASK 0xFFFF0000L
+//MPC_OUT3_CSC_C13_C14_A
+#define MPC_OUT3_CSC_C13_C14_A__MPC_OCSC_C13_A__SHIFT 0x0
+#define MPC_OUT3_CSC_C13_C14_A__MPC_OCSC_C14_A__SHIFT 0x10
+#define MPC_OUT3_CSC_C13_C14_A__MPC_OCSC_C13_A_MASK 0x0000FFFFL
+#define MPC_OUT3_CSC_C13_C14_A__MPC_OCSC_C14_A_MASK 0xFFFF0000L
+//MPC_OUT3_CSC_C21_C22_A
+#define MPC_OUT3_CSC_C21_C22_A__MPC_OCSC_C21_A__SHIFT 0x0
+#define MPC_OUT3_CSC_C21_C22_A__MPC_OCSC_C22_A__SHIFT 0x10
+#define MPC_OUT3_CSC_C21_C22_A__MPC_OCSC_C21_A_MASK 0x0000FFFFL
+#define MPC_OUT3_CSC_C21_C22_A__MPC_OCSC_C22_A_MASK 0xFFFF0000L
+//MPC_OUT3_CSC_C23_C24_A
+#define MPC_OUT3_CSC_C23_C24_A__MPC_OCSC_C23_A__SHIFT 0x0
+#define MPC_OUT3_CSC_C23_C24_A__MPC_OCSC_C24_A__SHIFT 0x10
+#define MPC_OUT3_CSC_C23_C24_A__MPC_OCSC_C23_A_MASK 0x0000FFFFL
+#define MPC_OUT3_CSC_C23_C24_A__MPC_OCSC_C24_A_MASK 0xFFFF0000L
+//MPC_OUT3_CSC_C31_C32_A
+#define MPC_OUT3_CSC_C31_C32_A__MPC_OCSC_C31_A__SHIFT 0x0
+#define MPC_OUT3_CSC_C31_C32_A__MPC_OCSC_C32_A__SHIFT 0x10
+#define MPC_OUT3_CSC_C31_C32_A__MPC_OCSC_C31_A_MASK 0x0000FFFFL
+#define MPC_OUT3_CSC_C31_C32_A__MPC_OCSC_C32_A_MASK 0xFFFF0000L
+//MPC_OUT3_CSC_C33_C34_A
+#define MPC_OUT3_CSC_C33_C34_A__MPC_OCSC_C33_A__SHIFT 0x0
+#define MPC_OUT3_CSC_C33_C34_A__MPC_OCSC_C34_A__SHIFT 0x10
+#define MPC_OUT3_CSC_C33_C34_A__MPC_OCSC_C33_A_MASK 0x0000FFFFL
+#define MPC_OUT3_CSC_C33_C34_A__MPC_OCSC_C34_A_MASK 0xFFFF0000L
+//MPC_OUT3_CSC_C11_C12_B
+#define MPC_OUT3_CSC_C11_C12_B__MPC_OCSC_C11_B__SHIFT 0x0
+#define MPC_OUT3_CSC_C11_C12_B__MPC_OCSC_C12_B__SHIFT 0x10
+#define MPC_OUT3_CSC_C11_C12_B__MPC_OCSC_C11_B_MASK 0x0000FFFFL
+#define MPC_OUT3_CSC_C11_C12_B__MPC_OCSC_C12_B_MASK 0xFFFF0000L
+//MPC_OUT3_CSC_C13_C14_B
+#define MPC_OUT3_CSC_C13_C14_B__MPC_OCSC_C13_B__SHIFT 0x0
+#define MPC_OUT3_CSC_C13_C14_B__MPC_OCSC_C14_B__SHIFT 0x10
+#define MPC_OUT3_CSC_C13_C14_B__MPC_OCSC_C13_B_MASK 0x0000FFFFL
+#define MPC_OUT3_CSC_C13_C14_B__MPC_OCSC_C14_B_MASK 0xFFFF0000L
+//MPC_OUT3_CSC_C21_C22_B
+#define MPC_OUT3_CSC_C21_C22_B__MPC_OCSC_C21_B__SHIFT 0x0
+#define MPC_OUT3_CSC_C21_C22_B__MPC_OCSC_C22_B__SHIFT 0x10
+#define MPC_OUT3_CSC_C21_C22_B__MPC_OCSC_C21_B_MASK 0x0000FFFFL
+#define MPC_OUT3_CSC_C21_C22_B__MPC_OCSC_C22_B_MASK 0xFFFF0000L
+//MPC_OUT3_CSC_C23_C24_B
+#define MPC_OUT3_CSC_C23_C24_B__MPC_OCSC_C23_B__SHIFT 0x0
+#define MPC_OUT3_CSC_C23_C24_B__MPC_OCSC_C24_B__SHIFT 0x10
+#define MPC_OUT3_CSC_C23_C24_B__MPC_OCSC_C23_B_MASK 0x0000FFFFL
+#define MPC_OUT3_CSC_C23_C24_B__MPC_OCSC_C24_B_MASK 0xFFFF0000L
+//MPC_OUT3_CSC_C31_C32_B
+#define MPC_OUT3_CSC_C31_C32_B__MPC_OCSC_C31_B__SHIFT 0x0
+#define MPC_OUT3_CSC_C31_C32_B__MPC_OCSC_C32_B__SHIFT 0x10
+#define MPC_OUT3_CSC_C31_C32_B__MPC_OCSC_C31_B_MASK 0x0000FFFFL
+#define MPC_OUT3_CSC_C31_C32_B__MPC_OCSC_C32_B_MASK 0xFFFF0000L
+//MPC_OUT3_CSC_C33_C34_B
+#define MPC_OUT3_CSC_C33_C34_B__MPC_OCSC_C33_B__SHIFT 0x0
+#define MPC_OUT3_CSC_C33_C34_B__MPC_OCSC_C34_B__SHIFT 0x10
+#define MPC_OUT3_CSC_C33_C34_B__MPC_OCSC_C33_B_MASK 0x0000FFFFL
+#define MPC_OUT3_CSC_C33_C34_B__MPC_OCSC_C34_B_MASK 0xFFFF0000L
+
+
+// addressBlock: dce_dc_mpc_mpc_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON15_PERFCOUNTER_CNTL
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON15_PERFCOUNTER_CNTL2
+#define DC_PERFMON15_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON15_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON15_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON15_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON15_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON15_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON15_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON15_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON15_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON15_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON15_PERFCOUNTER_STATE
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON15_PERFMON_CNTL
+#define DC_PERFMON15_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON15_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON15_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON15_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON15_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON15_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON15_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON15_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON15_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON15_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON15_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON15_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON15_PERFMON_CNTL2
+#define DC_PERFMON15_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON15_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON15_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON15_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON15_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON15_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON15_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON15_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON15_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON15_PERFMON_CVALUE_LOW
+#define DC_PERFMON15_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON15_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON15_PERFMON_HI
+#define DC_PERFMON15_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON15_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON15_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON15_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON15_PERFMON_LOW
+#define DC_PERFMON15_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON15_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_opp_abm0_dispdec
+//BL1_PWM_AMBIENT_LIGHT_LEVEL
+#define BL1_PWM_AMBIENT_LIGHT_LEVEL__BL1_PWM_AMBIENT_LIGHT_LEVEL__SHIFT 0x0
+#define BL1_PWM_AMBIENT_LIGHT_LEVEL__BL1_PWM_AMBIENT_LIGHT_LEVEL_MASK 0x0001FFFFL
+//BL1_PWM_USER_LEVEL
+#define BL1_PWM_USER_LEVEL__BL1_PWM_USER_LEVEL__SHIFT 0x0
+#define BL1_PWM_USER_LEVEL__BL1_PWM_USER_LEVEL_MASK 0x0001FFFFL
+//BL1_PWM_TARGET_ABM_LEVEL
+#define BL1_PWM_TARGET_ABM_LEVEL__BL1_PWM_TARGET_ABM_LEVEL__SHIFT 0x0
+#define BL1_PWM_TARGET_ABM_LEVEL__BL1_PWM_TARGET_ABM_LEVEL_MASK 0x0001FFFFL
+//BL1_PWM_CURRENT_ABM_LEVEL
+#define BL1_PWM_CURRENT_ABM_LEVEL__BL1_PWM_CURRENT_ABM_LEVEL__SHIFT 0x0
+#define BL1_PWM_CURRENT_ABM_LEVEL__BL1_PWM_CURRENT_ABM_LEVEL_MASK 0x0001FFFFL
+//BL1_PWM_FINAL_DUTY_CYCLE
+#define BL1_PWM_FINAL_DUTY_CYCLE__BL1_PWM_FINAL_DUTY_CYCLE__SHIFT 0x0
+#define BL1_PWM_FINAL_DUTY_CYCLE__BL1_PWM_FINAL_DUTY_CYCLE_MASK 0x0001FFFFL
+//BL1_PWM_MINIMUM_DUTY_CYCLE
+#define BL1_PWM_MINIMUM_DUTY_CYCLE__BL1_PWM_MINIMUM_DUTY_CYCLE__SHIFT 0x0
+#define BL1_PWM_MINIMUM_DUTY_CYCLE__BL1_PWM_MINIMUM_DUTY_CYCLE_MASK 0x0001FFFFL
+//BL1_PWM_ABM_CNTL
+#define BL1_PWM_ABM_CNTL__BL1_PWM_USE_ABM_EN__SHIFT 0x0
+#define BL1_PWM_ABM_CNTL__BL1_PWM_USE_AMBIENT_LEVEL_EN__SHIFT 0x1
+#define BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_LEVEL_EN__SHIFT 0x2
+#define BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_CALC_FINAL_DUTY_CYCLE_EN__SHIFT 0x3
+#define BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_STEP_SIZE__SHIFT 0x10
+#define BL1_PWM_ABM_CNTL__BL1_PWM_USE_ABM_EN_MASK 0x00000001L
+#define BL1_PWM_ABM_CNTL__BL1_PWM_USE_AMBIENT_LEVEL_EN_MASK 0x00000002L
+#define BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_LEVEL_EN_MASK 0x00000004L
+#define BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_CALC_FINAL_DUTY_CYCLE_EN_MASK 0x00000008L
+#define BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_STEP_SIZE_MASK 0xFFFF0000L
+//BL1_PWM_BL_UPDATE_SAMPLE_RATE
+#define BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_COUNT_EN__SHIFT 0x0
+#define BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_RESET_SAMPLE_RATE_FRAME_COUNTER__SHIFT 0x1
+#define BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_FRAME_COUNT__SHIFT 0x8
+#define BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET__SHIFT 0x10
+#define BL1_PWM_BL_UPDATE_SAMPLE_RATE__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_COUNT_EN_MASK 0x00000001L
+#define BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_RESET_SAMPLE_RATE_FRAME_COUNTER_MASK 0x00000002L
+#define BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_FRAME_COUNT_MASK 0x0000FF00L
+#define BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET_MASK 0x00FF0000L
+#define BL1_PWM_BL_UPDATE_SAMPLE_RATE__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+//BL1_PWM_GRP2_REG_LOCK
+#define BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_LOCK__SHIFT 0x0
+#define BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_UPDATE_PENDING__SHIFT 0x8
+#define BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_UPDATE_AT_FRAME_START__SHIFT 0x10
+#define BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_FRAME_START_DISP_SEL__SHIFT 0x11
+#define BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_READBACK_DB_REG_VALUE_EN__SHIFT 0x18
+#define BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_IGNORE_MASTER_LOCK_EN__SHIFT 0x1f
+#define BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_LOCK_MASK 0x00000001L
+#define BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_UPDATE_PENDING_MASK 0x00000100L
+#define BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_UPDATE_AT_FRAME_START_MASK 0x00010000L
+#define BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_FRAME_START_DISP_SEL_MASK 0x000E0000L
+#define BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_READBACK_DB_REG_VALUE_EN_MASK 0x01000000L
+#define BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_IGNORE_MASTER_LOCK_EN_MASK 0x80000000L
+//DC_ABM1_CNTL
+#define DC_ABM1_CNTL__ABM1_EN__SHIFT 0x0
+#define DC_ABM1_CNTL__ABM1_PROCESSING_BYPASS__SHIFT 0x4
+#define DC_ABM1_CNTL__ABM1_SOURCE_SELECT__SHIFT 0x8
+#define DC_ABM1_CNTL__ABM1_EN_MASK 0x00000001L
+#define DC_ABM1_CNTL__ABM1_PROCESSING_BYPASS_MASK 0x00000010L
+#define DC_ABM1_CNTL__ABM1_SOURCE_SELECT_MASK 0x00000700L
+//DC_ABM1_IPCSC_COEFF_SEL
+#define DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_B__SHIFT 0x0
+#define DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_G__SHIFT 0x8
+#define DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_R__SHIFT 0x10
+#define DC_ABM1_IPCSC_COEFF_SEL__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_B_MASK 0x0000000FL
+#define DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_G_MASK 0x00000F00L
+#define DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_R_MASK 0x000F0000L
+#define DC_ABM1_IPCSC_COEFF_SEL__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+//DC_ABM1_ACE_OFFSET_SLOPE_0
+#define DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_SLOPE_0__SHIFT 0x0
+#define DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_OFFSET_0__SHIFT 0x10
+#define DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_LOCK__SHIFT 0x1f
+#define DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_SLOPE_0_MASK 0x00007FFFL
+#define DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_OFFSET_0_MASK 0x07FF0000L
+#define DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_LOCK_MASK 0x80000000L
+//DC_ABM1_ACE_OFFSET_SLOPE_1
+#define DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_SLOPE_1__SHIFT 0x0
+#define DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_OFFSET_1__SHIFT 0x10
+#define DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_LOCK__SHIFT 0x1f
+#define DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_SLOPE_1_MASK 0x00007FFFL
+#define DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_OFFSET_1_MASK 0x07FF0000L
+#define DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_LOCK_MASK 0x80000000L
+//DC_ABM1_ACE_OFFSET_SLOPE_2
+#define DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_SLOPE_2__SHIFT 0x0
+#define DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_OFFSET_2__SHIFT 0x10
+#define DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_LOCK__SHIFT 0x1f
+#define DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_SLOPE_2_MASK 0x00007FFFL
+#define DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_OFFSET_2_MASK 0x07FF0000L
+#define DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_LOCK_MASK 0x80000000L
+//DC_ABM1_ACE_OFFSET_SLOPE_3
+#define DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_SLOPE_3__SHIFT 0x0
+#define DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_OFFSET_3__SHIFT 0x10
+#define DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_LOCK__SHIFT 0x1f
+#define DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_SLOPE_3_MASK 0x00007FFFL
+#define DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_OFFSET_3_MASK 0x07FF0000L
+#define DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_LOCK_MASK 0x80000000L
+//DC_ABM1_ACE_OFFSET_SLOPE_4
+#define DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_SLOPE_4__SHIFT 0x0
+#define DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_OFFSET_4__SHIFT 0x10
+#define DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_LOCK__SHIFT 0x1f
+#define DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_SLOPE_4_MASK 0x00007FFFL
+#define DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_OFFSET_4_MASK 0x07FF0000L
+#define DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_LOCK_MASK 0x80000000L
+//DC_ABM1_ACE_THRES_12
+#define DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_1__SHIFT 0x0
+#define DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_2__SHIFT 0x10
+#define DC_ABM1_ACE_THRES_12__ABM1_ACE_LOCK__SHIFT 0x1f
+#define DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_1_MASK 0x000003FFL
+#define DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_2_MASK 0x03FF0000L
+#define DC_ABM1_ACE_THRES_12__ABM1_ACE_LOCK_MASK 0x80000000L
+//DC_ABM1_ACE_THRES_34
+#define DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_3__SHIFT 0x0
+#define DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_4__SHIFT 0x10
+#define DC_ABM1_ACE_THRES_34__ABM1_ACE_IGNORE_MASTER_LOCK_EN__SHIFT 0x1c
+#define DC_ABM1_ACE_THRES_34__ABM1_ACE_READBACK_DB_REG_VALUE_EN__SHIFT 0x1d
+#define DC_ABM1_ACE_THRES_34__ABM1_ACE_DBUF_REG_UPDATE_PENDING__SHIFT 0x1e
+#define DC_ABM1_ACE_THRES_34__ABM1_ACE_LOCK__SHIFT 0x1f
+#define DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_3_MASK 0x000003FFL
+#define DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_4_MASK 0x03FF0000L
+#define DC_ABM1_ACE_THRES_34__ABM1_ACE_IGNORE_MASTER_LOCK_EN_MASK 0x10000000L
+#define DC_ABM1_ACE_THRES_34__ABM1_ACE_READBACK_DB_REG_VALUE_EN_MASK 0x20000000L
+#define DC_ABM1_ACE_THRES_34__ABM1_ACE_DBUF_REG_UPDATE_PENDING_MASK 0x40000000L
+#define DC_ABM1_ACE_THRES_34__ABM1_ACE_LOCK_MASK 0x80000000L
+//DC_ABM1_ACE_CNTL_MISC
+#define DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME__SHIFT 0x0
+#define DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME_CLEAR__SHIFT 0x8
+#define DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME_MASK 0x00000001L
+#define DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME_CLEAR_MASK 0x00000100L
+//DC_ABM1_HGLS_REG_READ_PROGRESS
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_IN_PROGRESS__SHIFT 0x0
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_IN_PROGRESS__SHIFT 0x1
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_IN_PROGRESS__SHIFT 0x2
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME__SHIFT 0x8
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME__SHIFT 0x9
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME__SHIFT 0xa
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME_CLEAR__SHIFT 0x10
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME_CLEAR__SHIFT 0x18
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME_CLEAR__SHIFT 0x1f
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_IN_PROGRESS_MASK 0x00000001L
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_IN_PROGRESS_MASK 0x00000002L
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_IN_PROGRESS_MASK 0x00000004L
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME_MASK 0x00000100L
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME_MASK 0x00000200L
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME_MASK 0x00000400L
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME_CLEAR_MASK 0x00010000L
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME_CLEAR_MASK 0x01000000L
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME_CLEAR_MASK 0x80000000L
+//DC_ABM1_HG_MISC_CTRL
+#define DC_ABM1_HG_MISC_CTRL__ABM1_HG_NUM_OF_BINS_SEL__SHIFT 0x0
+#define DC_ABM1_HG_MISC_CTRL__ABM1_HG_VMAX_SEL__SHIFT 0x8
+#define DC_ABM1_HG_MISC_CTRL__ABM1_HG_FINE_MODE_BIN_SEL__SHIFT 0xc
+#define DC_ABM1_HG_MISC_CTRL__ABM1_HG_BIN_BITWIDTH_SIZE_SEL__SHIFT 0x10
+#define DC_ABM1_HG_MISC_CTRL__ABM1_OVR_SCAN_PIXEL_PROCESS_EN__SHIFT 0x14
+#define DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_READBACK_DB_REG_VALUE_EN__SHIFT 0x17
+#define DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_FRAME_START_DISP_SEL__SHIFT 0x18
+#define DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_AT_FRAME_START__SHIFT 0x1c
+#define DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_IGNORE_MASTER_LOCK_EN__SHIFT 0x1d
+#define DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_PENDING__SHIFT 0x1e
+#define DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define DC_ABM1_HG_MISC_CTRL__ABM1_HG_NUM_OF_BINS_SEL_MASK 0x00000003L
+#define DC_ABM1_HG_MISC_CTRL__ABM1_HG_VMAX_SEL_MASK 0x00000100L
+#define DC_ABM1_HG_MISC_CTRL__ABM1_HG_FINE_MODE_BIN_SEL_MASK 0x00001000L
+#define DC_ABM1_HG_MISC_CTRL__ABM1_HG_BIN_BITWIDTH_SIZE_SEL_MASK 0x00030000L
+#define DC_ABM1_HG_MISC_CTRL__ABM1_OVR_SCAN_PIXEL_PROCESS_EN_MASK 0x00100000L
+#define DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_READBACK_DB_REG_VALUE_EN_MASK 0x00800000L
+#define DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_FRAME_START_DISP_SEL_MASK 0x07000000L
+#define DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_AT_FRAME_START_MASK 0x10000000L
+#define DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_IGNORE_MASTER_LOCK_EN_MASK 0x20000000L
+#define DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_PENDING_MASK 0x40000000L
+#define DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+//DC_ABM1_LS_SUM_OF_LUMA
+#define DC_ABM1_LS_SUM_OF_LUMA__ABM1_LS_SUM_OF_LUMA__SHIFT 0x0
+#define DC_ABM1_LS_SUM_OF_LUMA__ABM1_LS_SUM_OF_LUMA_MASK 0xFFFFFFFFL
+//DC_ABM1_LS_MIN_MAX_LUMA
+#define DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MIN_LUMA__SHIFT 0x0
+#define DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MAX_LUMA__SHIFT 0x10
+#define DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MIN_LUMA_MASK 0x000003FFL
+#define DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MAX_LUMA_MASK 0x03FF0000L
+//DC_ABM1_LS_FILTERED_MIN_MAX_LUMA
+#define DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MIN_LUMA__SHIFT 0x0
+#define DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MAX_LUMA__SHIFT 0x10
+#define DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MIN_LUMA_MASK 0x000003FFL
+#define DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MAX_LUMA_MASK 0x03FF0000L
+//DC_ABM1_LS_PIXEL_COUNT
+#define DC_ABM1_LS_PIXEL_COUNT__ABM1_LS_PIXEL_COUNT__SHIFT 0x0
+#define DC_ABM1_LS_PIXEL_COUNT__ABM1_LS_SUM_OF_LUMA_MSB__SHIFT 0x18
+#define DC_ABM1_LS_PIXEL_COUNT__ABM1_LS_PIXEL_COUNT_MASK 0x00FFFFFFL
+#define DC_ABM1_LS_PIXEL_COUNT__ABM1_LS_SUM_OF_LUMA_MSB_MASK 0xFF000000L
+//DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES
+#define DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MIN_PIXEL_VALUE_THRES__SHIFT 0x0
+#define DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MAX_PIXEL_VALUE_THRES__SHIFT 0x10
+#define DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MIN_PIXEL_VALUE_THRES_MASK 0x000003FFL
+#define DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MAX_PIXEL_VALUE_THRES_MASK 0x03FF0000L
+#define DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+//DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT
+#define DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT__ABM1_LS_MIN_PIXEL_VALUE_COUNT__SHIFT 0x0
+#define DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT__ABM1_LS_MIN_PIXEL_VALUE_COUNT_MASK 0x00FFFFFFL
+//DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT
+#define DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT__ABM1_LS_MAX_PIXEL_VALUE_COUNT__SHIFT 0x0
+#define DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT__ABM1_LS_MAX_PIXEL_VALUE_COUNT_MASK 0x00FFFFFFL
+//DC_ABM1_HG_SAMPLE_RATE
+#define DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_COUNT_EN__SHIFT 0x0
+#define DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_RESET_SAMPLE_RATE_FRAME_COUNTER__SHIFT 0x1
+#define DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_FRAME_COUNT__SHIFT 0x8
+#define DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET__SHIFT 0x10
+#define DC_ABM1_HG_SAMPLE_RATE__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_COUNT_EN_MASK 0x00000001L
+#define DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_RESET_SAMPLE_RATE_FRAME_COUNTER_MASK 0x00000002L
+#define DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_FRAME_COUNT_MASK 0x0000FF00L
+#define DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET_MASK 0x00FF0000L
+#define DC_ABM1_HG_SAMPLE_RATE__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+//DC_ABM1_LS_SAMPLE_RATE
+#define DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_COUNT_EN__SHIFT 0x0
+#define DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_RESET_SAMPLE_RATE_FRAME_COUNTER__SHIFT 0x1
+#define DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_FRAME_COUNT__SHIFT 0x8
+#define DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET__SHIFT 0x10
+#define DC_ABM1_LS_SAMPLE_RATE__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_COUNT_EN_MASK 0x00000001L
+#define DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_RESET_SAMPLE_RATE_FRAME_COUNTER_MASK 0x00000002L
+#define DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_FRAME_COUNT_MASK 0x0000FF00L
+#define DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET_MASK 0x00FF0000L
+#define DC_ABM1_LS_SAMPLE_RATE__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+//DC_ABM1_HG_BIN_1_32_SHIFT_FLAG
+#define DC_ABM1_HG_BIN_1_32_SHIFT_FLAG__ABM1_HG_BIN_1_32_SHIFT_FLAG__SHIFT 0x0
+#define DC_ABM1_HG_BIN_1_32_SHIFT_FLAG__ABM1_HG_BIN_1_32_SHIFT_FLAG_MASK 0xFFFFFFFFL
+//DC_ABM1_HG_BIN_1_8_SHIFT_INDEX
+#define DC_ABM1_HG_BIN_1_8_SHIFT_INDEX__ABM1_HG_BIN_1_8_SHIFT_INDEX__SHIFT 0x0
+#define DC_ABM1_HG_BIN_1_8_SHIFT_INDEX__ABM1_HG_BIN_1_8_SHIFT_INDEX_MASK 0xFFFFFFFFL
+//DC_ABM1_HG_BIN_9_16_SHIFT_INDEX
+#define DC_ABM1_HG_BIN_9_16_SHIFT_INDEX__ABM1_HG_BIN_9_16_SHIFT_INDEX__SHIFT 0x0
+#define DC_ABM1_HG_BIN_9_16_SHIFT_INDEX__ABM1_HG_BIN_9_16_SHIFT_INDEX_MASK 0xFFFFFFFFL
+//DC_ABM1_HG_BIN_17_24_SHIFT_INDEX
+#define DC_ABM1_HG_BIN_17_24_SHIFT_INDEX__ABM1_HG_BIN_17_24_SHIFT_INDEX__SHIFT 0x0
+#define DC_ABM1_HG_BIN_17_24_SHIFT_INDEX__ABM1_HG_BIN_17_24_SHIFT_INDEX_MASK 0xFFFFFFFFL
+//DC_ABM1_HG_BIN_25_32_SHIFT_INDEX
+#define DC_ABM1_HG_BIN_25_32_SHIFT_INDEX__ABM1_HG_BIN_25_32_SHIFT_INDEX__SHIFT 0x0
+#define DC_ABM1_HG_BIN_25_32_SHIFT_INDEX__ABM1_HG_BIN_25_32_SHIFT_INDEX_MASK 0xFFFFFFFFL
+//DC_ABM1_HG_RESULT_1
+#define DC_ABM1_HG_RESULT_1__ABM1_HG_RESULT_1__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_1__ABM1_HG_RESULT_1_MASK 0xFFFFFFFFL
+//DC_ABM1_HG_RESULT_2
+#define DC_ABM1_HG_RESULT_2__ABM1_HG_RESULT_2__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_2__ABM1_HG_RESULT_2_MASK 0xFFFFFFFFL
+//DC_ABM1_HG_RESULT_3
+#define DC_ABM1_HG_RESULT_3__ABM1_HG_RESULT_3__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_3__ABM1_HG_RESULT_3_MASK 0xFFFFFFFFL
+//DC_ABM1_HG_RESULT_4
+#define DC_ABM1_HG_RESULT_4__ABM1_HG_RESULT_4__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_4__ABM1_HG_RESULT_4_MASK 0xFFFFFFFFL
+//DC_ABM1_HG_RESULT_5
+#define DC_ABM1_HG_RESULT_5__ABM1_HG_RESULT_5__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_5__ABM1_HG_RESULT_5_MASK 0xFFFFFFFFL
+//DC_ABM1_HG_RESULT_6
+#define DC_ABM1_HG_RESULT_6__ABM1_HG_RESULT_6__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_6__ABM1_HG_RESULT_6_MASK 0xFFFFFFFFL
+//DC_ABM1_HG_RESULT_7
+#define DC_ABM1_HG_RESULT_7__ABM1_HG_RESULT_7__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_7__ABM1_HG_RESULT_7_MASK 0xFFFFFFFFL
+//DC_ABM1_HG_RESULT_8
+#define DC_ABM1_HG_RESULT_8__ABM1_HG_RESULT_8__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_8__ABM1_HG_RESULT_8_MASK 0xFFFFFFFFL
+//DC_ABM1_HG_RESULT_9
+#define DC_ABM1_HG_RESULT_9__ABM1_HG_RESULT_9__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_9__ABM1_HG_RESULT_9_MASK 0xFFFFFFFFL
+//DC_ABM1_HG_RESULT_10
+#define DC_ABM1_HG_RESULT_10__ABM1_HG_RESULT_10__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_10__ABM1_HG_RESULT_10_MASK 0xFFFFFFFFL
+//DC_ABM1_HG_RESULT_11
+#define DC_ABM1_HG_RESULT_11__ABM1_HG_RESULT_11__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_11__ABM1_HG_RESULT_11_MASK 0xFFFFFFFFL
+//DC_ABM1_HG_RESULT_12
+#define DC_ABM1_HG_RESULT_12__ABM1_HG_RESULT_12__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_12__ABM1_HG_RESULT_12_MASK 0xFFFFFFFFL
+//DC_ABM1_HG_RESULT_13
+#define DC_ABM1_HG_RESULT_13__ABM1_HG_RESULT_13__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_13__ABM1_HG_RESULT_13_MASK 0xFFFFFFFFL
+//DC_ABM1_HG_RESULT_14
+#define DC_ABM1_HG_RESULT_14__ABM1_HG_RESULT_14__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_14__ABM1_HG_RESULT_14_MASK 0xFFFFFFFFL
+//DC_ABM1_HG_RESULT_15
+#define DC_ABM1_HG_RESULT_15__ABM1_HG_RESULT_15__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_15__ABM1_HG_RESULT_15_MASK 0xFFFFFFFFL
+//DC_ABM1_HG_RESULT_16
+#define DC_ABM1_HG_RESULT_16__ABM1_HG_RESULT_16__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_16__ABM1_HG_RESULT_16_MASK 0xFFFFFFFFL
+//DC_ABM1_HG_RESULT_17
+#define DC_ABM1_HG_RESULT_17__ABM1_HG_RESULT_17__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_17__ABM1_HG_RESULT_17_MASK 0xFFFFFFFFL
+//DC_ABM1_HG_RESULT_18
+#define DC_ABM1_HG_RESULT_18__ABM1_HG_RESULT_18__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_18__ABM1_HG_RESULT_18_MASK 0xFFFFFFFFL
+//DC_ABM1_HG_RESULT_19
+#define DC_ABM1_HG_RESULT_19__ABM1_HG_RESULT_19__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_19__ABM1_HG_RESULT_19_MASK 0xFFFFFFFFL
+//DC_ABM1_HG_RESULT_20
+#define DC_ABM1_HG_RESULT_20__ABM1_HG_RESULT_20__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_20__ABM1_HG_RESULT_20_MASK 0xFFFFFFFFL
+//DC_ABM1_HG_RESULT_21
+#define DC_ABM1_HG_RESULT_21__ABM1_HG_RESULT_21__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_21__ABM1_HG_RESULT_21_MASK 0xFFFFFFFFL
+//DC_ABM1_HG_RESULT_22
+#define DC_ABM1_HG_RESULT_22__ABM1_HG_RESULT_22__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_22__ABM1_HG_RESULT_22_MASK 0xFFFFFFFFL
+//DC_ABM1_HG_RESULT_23
+#define DC_ABM1_HG_RESULT_23__ABM1_HG_RESULT_23__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_23__ABM1_HG_RESULT_23_MASK 0xFFFFFFFFL
+//DC_ABM1_HG_RESULT_24
+#define DC_ABM1_HG_RESULT_24__ABM1_HG_RESULT_24__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_24__ABM1_HG_RESULT_24_MASK 0xFFFFFFFFL
+//DC_ABM1_BL_MASTER_LOCK
+#define DC_ABM1_BL_MASTER_LOCK__ABM1_BL_MASTER_LOCK__SHIFT 0x1f
+#define DC_ABM1_BL_MASTER_LOCK__ABM1_BL_MASTER_LOCK_MASK 0x80000000L
+
+
+// addressBlock: dce_dc_opp_fmt0_dispdec
+//FMT0_FMT_CLAMP_COMPONENT_R
+#define FMT0_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_LOWER_R__SHIFT 0x0
+#define FMT0_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_UPPER_R__SHIFT 0x10
+#define FMT0_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_LOWER_R_MASK 0x0000FFFFL
+#define FMT0_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_UPPER_R_MASK 0xFFFF0000L
+//FMT0_FMT_CLAMP_COMPONENT_G
+#define FMT0_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_LOWER_G__SHIFT 0x0
+#define FMT0_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_UPPER_G__SHIFT 0x10
+#define FMT0_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_LOWER_G_MASK 0x0000FFFFL
+#define FMT0_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_UPPER_G_MASK 0xFFFF0000L
+//FMT0_FMT_CLAMP_COMPONENT_B
+#define FMT0_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_LOWER_B__SHIFT 0x0
+#define FMT0_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_UPPER_B__SHIFT 0x10
+#define FMT0_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_LOWER_B_MASK 0x0000FFFFL
+#define FMT0_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_UPPER_B_MASK 0xFFFF0000L
+//FMT0_FMT_DYNAMIC_EXP_CNTL
+#define FMT0_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_EN__SHIFT 0x0
+#define FMT0_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_MODE__SHIFT 0x4
+#define FMT0_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_EN_MASK 0x00000001L
+#define FMT0_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_MODE_MASK 0x00000010L
+//FMT0_FMT_CONTROL
+#define FMT0_FMT_CONTROL__FMT_STEREOSYNC_OVERRIDE__SHIFT 0x0
+#define FMT0_FMT_CONTROL__FMT_PTI_FIELD_POLARITY__SHIFT 0x4
+#define FMT0_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX__SHIFT 0x8
+#define FMT0_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP__SHIFT 0xc
+#define FMT0_FMT_CONTROL__FMT_PIXEL_ENCODING__SHIFT 0x10
+#define FMT0_FMT_CONTROL__FMT_SUBSAMPLING_MODE__SHIFT 0x12
+#define FMT0_FMT_CONTROL__FMT_SUBSAMPLING_ORDER__SHIFT 0x14
+#define FMT0_FMT_CONTROL__FMT_CBCR_BIT_REDUCTION_BYPASS__SHIFT 0x15
+#define FMT0_FMT_CONTROL__FMT_DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x18
+#define FMT0_FMT_CONTROL__FMT_PTI_ENABLE__SHIFT 0x1c
+#define FMT0_FMT_CONTROL__FMT_STEREOSYNC_OVERRIDE_MASK 0x00000001L
+#define FMT0_FMT_CONTROL__FMT_PTI_FIELD_POLARITY_MASK 0x00000010L
+#define FMT0_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX_MASK 0x00000F00L
+#define FMT0_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP_MASK 0x00003000L
+#define FMT0_FMT_CONTROL__FMT_PIXEL_ENCODING_MASK 0x00030000L
+#define FMT0_FMT_CONTROL__FMT_SUBSAMPLING_MODE_MASK 0x000C0000L
+#define FMT0_FMT_CONTROL__FMT_SUBSAMPLING_ORDER_MASK 0x00100000L
+#define FMT0_FMT_CONTROL__FMT_CBCR_BIT_REDUCTION_BYPASS_MASK 0x00200000L
+#define FMT0_FMT_CONTROL__FMT_DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x01000000L
+#define FMT0_FMT_CONTROL__FMT_PTI_ENABLE_MASK 0x10000000L
+//FMT0_FMT_BIT_DEPTH_CONTROL
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN__SHIFT 0x0
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_MODE__SHIFT 0x1
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT 0x4
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN__SHIFT 0x8
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_MODE__SHIFT 0x9
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT 0xb
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE__SHIFT 0xd
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE__SHIFT 0xe
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE__SHIFT 0xf
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_EN__SHIFT 0x10
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_DEPTH__SHIFT 0x11
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_OFFSET__SHIFT 0x15
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_LEVEL__SHIFT 0x18
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_RESET__SHIFT 0x19
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_25FRC_SEL__SHIFT 0x1a
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_50FRC_SEL__SHIFT 0x1c
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_75FRC_SEL__SHIFT 0x1e
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK 0x00000001L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_MODE_MASK 0x00000002L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH_MASK 0x00000030L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK 0x00000100L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_MODE_MASK 0x00000600L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH_MASK 0x00001800L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK 0x00002000L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK 0x00004000L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK 0x00008000L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_EN_MASK 0x00010000L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_DEPTH_MASK 0x00060000L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_OFFSET_MASK 0x00600000L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_LEVEL_MASK 0x01000000L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_RESET_MASK 0x02000000L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_25FRC_SEL_MASK 0x0C000000L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_50FRC_SEL_MASK 0x30000000L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_75FRC_SEL_MASK 0xC0000000L
+//FMT0_FMT_DITHER_RAND_R_SEED
+#define FMT0_FMT_DITHER_RAND_R_SEED__FMT_RAND_R_SEED__SHIFT 0x0
+#define FMT0_FMT_DITHER_RAND_R_SEED__FMT_OFFSET_R_CR__SHIFT 0x10
+#define FMT0_FMT_DITHER_RAND_R_SEED__FMT_RAND_R_SEED_MASK 0x000000FFL
+#define FMT0_FMT_DITHER_RAND_R_SEED__FMT_OFFSET_R_CR_MASK 0xFFFF0000L
+//FMT0_FMT_DITHER_RAND_G_SEED
+#define FMT0_FMT_DITHER_RAND_G_SEED__FMT_RAND_G_SEED__SHIFT 0x0
+#define FMT0_FMT_DITHER_RAND_G_SEED__FMT_OFFSET_G_Y__SHIFT 0x10
+#define FMT0_FMT_DITHER_RAND_G_SEED__FMT_RAND_G_SEED_MASK 0x000000FFL
+#define FMT0_FMT_DITHER_RAND_G_SEED__FMT_OFFSET_G_Y_MASK 0xFFFF0000L
+//FMT0_FMT_DITHER_RAND_B_SEED
+#define FMT0_FMT_DITHER_RAND_B_SEED__FMT_RAND_B_SEED__SHIFT 0x0
+#define FMT0_FMT_DITHER_RAND_B_SEED__FMT_OFFSET_B_CB__SHIFT 0x10
+#define FMT0_FMT_DITHER_RAND_B_SEED__FMT_RAND_B_SEED_MASK 0x000000FFL
+#define FMT0_FMT_DITHER_RAND_B_SEED__FMT_OFFSET_B_CB_MASK 0xFFFF0000L
+//FMT0_FMT_CLAMP_CNTL
+#define FMT0_FMT_CLAMP_CNTL__FMT_CLAMP_DATA_EN__SHIFT 0x0
+#define FMT0_FMT_CLAMP_CNTL__FMT_CLAMP_COLOR_FORMAT__SHIFT 0x10
+#define FMT0_FMT_CLAMP_CNTL__FMT_CLAMP_DATA_EN_MASK 0x00000001L
+#define FMT0_FMT_CLAMP_CNTL__FMT_CLAMP_COLOR_FORMAT_MASK 0x00070000L
+//FMT0_FMT_SIDE_BY_SIDE_STEREO_CONTROL
+#define FMT0_FMT_SIDE_BY_SIDE_STEREO_CONTROL__FMT_SIDE_BY_SIDE_STEREO_ACTIVE_WIDTH__SHIFT 0x0
+#define FMT0_FMT_SIDE_BY_SIDE_STEREO_CONTROL__FMT_SIDE_BY_SIDE_STEREO_ACTIVE_WIDTH_MASK 0x00001FFFL
+//FMT0_FMT_MAP420_MEMORY_CONTROL
+#define FMT0_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_FORCE__SHIFT 0x0
+#define FMT0_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_DIS__SHIFT 0x4
+#define FMT0_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_STATE__SHIFT 0x8
+#define FMT0_FMT_MAP420_MEMORY_CONTROL__FMT_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0xc
+#define FMT0_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_FORCE_MASK 0x00000003L
+#define FMT0_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_DIS_MASK 0x00000010L
+#define FMT0_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_STATE_MASK 0x00000300L
+#define FMT0_FMT_MAP420_MEMORY_CONTROL__FMT_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00003000L
+//FMT0_FMT_422_CONTROL
+#define FMT0_FMT_422_CONTROL__FMT_LEFT_EDGE_EXTRA_PIXEL_COUNT__SHIFT 0x0
+#define FMT0_FMT_422_CONTROL__FMT_LEFT_EDGE_EXTRA_PIXEL_COUNT_MASK 0x00000001L
+
+
+// addressBlock: dce_dc_opp_dpg0_dispdec
+//DPG0_DPG_CONTROL
+#define DPG0_DPG_CONTROL__DPG_EN__SHIFT 0x0
+#define DPG0_DPG_CONTROL__DPG_MODE__SHIFT 0x4
+#define DPG0_DPG_CONTROL__DPG_DYNAMIC_RANGE__SHIFT 0x8
+#define DPG0_DPG_CONTROL__DPG_BIT_DEPTH__SHIFT 0xc
+#define DPG0_DPG_CONTROL__DPG_VRES__SHIFT 0x10
+#define DPG0_DPG_CONTROL__DPG_HRES__SHIFT 0x14
+#define DPG0_DPG_CONTROL__DPG_FIELD_POLARITY__SHIFT 0x18
+#define DPG0_DPG_CONTROL__DPG_EN_MASK 0x00000001L
+#define DPG0_DPG_CONTROL__DPG_MODE_MASK 0x00000070L
+#define DPG0_DPG_CONTROL__DPG_DYNAMIC_RANGE_MASK 0x00000100L
+#define DPG0_DPG_CONTROL__DPG_BIT_DEPTH_MASK 0x00003000L
+#define DPG0_DPG_CONTROL__DPG_VRES_MASK 0x000F0000L
+#define DPG0_DPG_CONTROL__DPG_HRES_MASK 0x00F00000L
+#define DPG0_DPG_CONTROL__DPG_FIELD_POLARITY_MASK 0x01000000L
+//DPG0_DPG_RAMP_CONTROL
+#define DPG0_DPG_RAMP_CONTROL__DPG_RAMP0_OFFSET__SHIFT 0x0
+#define DPG0_DPG_RAMP_CONTROL__DPG_INC0__SHIFT 0x18
+#define DPG0_DPG_RAMP_CONTROL__DPG_INC1__SHIFT 0x1c
+#define DPG0_DPG_RAMP_CONTROL__DPG_RAMP0_OFFSET_MASK 0x0000FFFFL
+#define DPG0_DPG_RAMP_CONTROL__DPG_INC0_MASK 0x0F000000L
+#define DPG0_DPG_RAMP_CONTROL__DPG_INC1_MASK 0xF0000000L
+//DPG0_DPG_DIMENSIONS
+#define DPG0_DPG_DIMENSIONS__DPG_ACTIVE_HEIGHT__SHIFT 0x0
+#define DPG0_DPG_DIMENSIONS__DPG_ACTIVE_WIDTH__SHIFT 0x10
+#define DPG0_DPG_DIMENSIONS__DPG_ACTIVE_HEIGHT_MASK 0x00003FFFL
+#define DPG0_DPG_DIMENSIONS__DPG_ACTIVE_WIDTH_MASK 0x3FFF0000L
+//DPG0_DPG_COLOUR_R_CR
+#define DPG0_DPG_COLOUR_R_CR__DPG_COLOUR0_R_CR__SHIFT 0x0
+#define DPG0_DPG_COLOUR_R_CR__DPG_COLOUR1_R_CR__SHIFT 0x10
+#define DPG0_DPG_COLOUR_R_CR__DPG_COLOUR0_R_CR_MASK 0x0000FFFFL
+#define DPG0_DPG_COLOUR_R_CR__DPG_COLOUR1_R_CR_MASK 0xFFFF0000L
+//DPG0_DPG_COLOUR_G_Y
+#define DPG0_DPG_COLOUR_G_Y__DPG_COLOUR0_G_Y__SHIFT 0x0
+#define DPG0_DPG_COLOUR_G_Y__DPG_COLOUR1_G_Y__SHIFT 0x10
+#define DPG0_DPG_COLOUR_G_Y__DPG_COLOUR0_G_Y_MASK 0x0000FFFFL
+#define DPG0_DPG_COLOUR_G_Y__DPG_COLOUR1_G_Y_MASK 0xFFFF0000L
+//DPG0_DPG_COLOUR_B_CB
+#define DPG0_DPG_COLOUR_B_CB__DPG_COLOUR0_B_CB__SHIFT 0x0
+#define DPG0_DPG_COLOUR_B_CB__DPG_COLOUR1_B_CB__SHIFT 0x10
+#define DPG0_DPG_COLOUR_B_CB__DPG_COLOUR0_B_CB_MASK 0x0000FFFFL
+#define DPG0_DPG_COLOUR_B_CB__DPG_COLOUR1_B_CB_MASK 0xFFFF0000L
+//DPG0_DPG_OFFSET_SEGMENT
+#define DPG0_DPG_OFFSET_SEGMENT__DPG_X_OFFSET__SHIFT 0x0
+#define DPG0_DPG_OFFSET_SEGMENT__DPG_SEGMENT_WIDTH__SHIFT 0x10
+#define DPG0_DPG_OFFSET_SEGMENT__DPG_X_OFFSET_MASK 0x00003FFFL
+#define DPG0_DPG_OFFSET_SEGMENT__DPG_SEGMENT_WIDTH_MASK 0x3FFF0000L
+//DPG0_DPG_STATUS
+#define DPG0_DPG_STATUS__DPG_DOUBLE_BUFFER_PENDING__SHIFT 0x0
+#define DPG0_DPG_STATUS__DPG_DOUBLE_BUFFER_PENDING_MASK 0x00000001L
+
+
+// addressBlock: dce_dc_opp_oppbuf0_dispdec
+//OPPBUF0_OPPBUF_CONTROL
+#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_ACTIVE_WIDTH__SHIFT 0x0
+#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_DISPLAY_SEGMENTATION__SHIFT 0x10
+#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_OVERLAP_PIXEL_NUM__SHIFT 0x14
+#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_PIXEL_REPETITION__SHIFT 0x18
+#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_DOUBLE_BUFFER_PENDING__SHIFT 0x1c
+#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_ACTIVE_WIDTH_MASK 0x00003FFFL
+#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_DISPLAY_SEGMENTATION_MASK 0x00070000L
+#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_OVERLAP_PIXEL_NUM_MASK 0x00F00000L
+#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_PIXEL_REPETITION_MASK 0x0F000000L
+#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_DOUBLE_BUFFER_PENDING_MASK 0x10000000L
+//OPPBUF0_OPPBUF_3D_PARAMETERS_0
+#define OPPBUF0_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE1_SIZE__SHIFT 0x0
+#define OPPBUF0_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE2_SIZE__SHIFT 0xa
+#define OPPBUF0_OPPBUF_3D_PARAMETERS_0__OPPBUF_DUMMY_DATA_R__SHIFT 0x14
+#define OPPBUF0_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE1_SIZE_MASK 0x000003FFL
+#define OPPBUF0_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE2_SIZE_MASK 0x000FFC00L
+#define OPPBUF0_OPPBUF_3D_PARAMETERS_0__OPPBUF_DUMMY_DATA_R_MASK 0xFFF00000L
+//OPPBUF0_OPPBUF_3D_PARAMETERS_1
+#define OPPBUF0_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_G__SHIFT 0x0
+#define OPPBUF0_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_B__SHIFT 0x10
+#define OPPBUF0_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_G_MASK 0x00000FFFL
+#define OPPBUF0_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_B_MASK 0x0FFF0000L
+//OPPBUF0_OPPBUF_CONTROL1
+#define OPPBUF0_OPPBUF_CONTROL1__OPPBUF_NUM_SEGMENT_PADDED_PIXELS__SHIFT 0x0
+#define OPPBUF0_OPPBUF_CONTROL1__OPPBUF_NUM_SEGMENT_PADDED_PIXELS_MASK 0x00000007L
+
+
+// addressBlock: dce_dc_opp_opp_pipe0_dispdec
+//OPP_PIPE0_OPP_PIPE_CONTROL
+#define OPP_PIPE0_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_EN__SHIFT 0x0
+#define OPP_PIPE0_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_ON__SHIFT 0x1
+#define OPP_PIPE0_OPP_PIPE_CONTROL__OPP_PIPE_DIGITAL_BYPASS_EN__SHIFT 0x4
+#define OPP_PIPE0_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_EN_MASK 0x00000001L
+#define OPP_PIPE0_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_ON_MASK 0x00000002L
+#define OPP_PIPE0_OPP_PIPE_CONTROL__OPP_PIPE_DIGITAL_BYPASS_EN_MASK 0x00000010L
+
+
+// addressBlock: dce_dc_opp_opp_pipe_crc0_dispdec
+//OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_EN__SHIFT 0x0
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_CONT_EN__SHIFT 0x4
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_MODE__SHIFT 0x8
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_EN__SHIFT 0xa
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_MODE__SHIFT 0xc
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_EN__SHIFT 0xe
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_PIXEL_SELECT__SHIFT 0x14
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_SOURCE_SELECT__SHIFT 0x18
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_ONE_SHOT_PENDING__SHIFT 0x1c
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_EN_MASK 0x00000001L
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_CONT_EN_MASK 0x00000010L
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_MODE_MASK 0x00000300L
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_EN_MASK 0x00000400L
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_MODE_MASK 0x00003000L
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_EN_MASK 0x00004000L
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_PIXEL_SELECT_MASK 0x00300000L
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_SOURCE_SELECT_MASK 0x01000000L
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_ONE_SHOT_PENDING_MASK 0x10000000L
+//OPP_PIPE_CRC0_OPP_PIPE_CRC_MASK
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_MASK__OPP_PIPE_CRC_MASK__SHIFT 0x0
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_MASK__OPP_PIPE_CRC_MASK_MASK 0x0000FFFFL
+//OPP_PIPE_CRC0_OPP_PIPE_CRC_RESULT0
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_RESULT0__OPP_PIPE_CRC_RESULT_A__SHIFT 0x0
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_RESULT0__OPP_PIPE_CRC_RESULT_R__SHIFT 0x10
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_RESULT0__OPP_PIPE_CRC_RESULT_A_MASK 0x0000FFFFL
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_RESULT0__OPP_PIPE_CRC_RESULT_R_MASK 0xFFFF0000L
+//OPP_PIPE_CRC0_OPP_PIPE_CRC_RESULT1
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_RESULT1__OPP_PIPE_CRC_RESULT_G__SHIFT 0x0
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_RESULT1__OPP_PIPE_CRC_RESULT_B__SHIFT 0x10
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_RESULT1__OPP_PIPE_CRC_RESULT_G_MASK 0x0000FFFFL
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_RESULT1__OPP_PIPE_CRC_RESULT_B_MASK 0xFFFF0000L
+//OPP_PIPE_CRC0_OPP_PIPE_CRC_RESULT2
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_RESULT2__OPP_PIPE_CRC_RESULT_C__SHIFT 0x0
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_RESULT2__OPP_PIPE_CRC_RESULT_C_MASK 0x0000FFFFL
+
+
+// addressBlock: dce_dc_opp_fmt1_dispdec
+//FMT1_FMT_CLAMP_COMPONENT_R
+#define FMT1_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_LOWER_R__SHIFT 0x0
+#define FMT1_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_UPPER_R__SHIFT 0x10
+#define FMT1_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_LOWER_R_MASK 0x0000FFFFL
+#define FMT1_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_UPPER_R_MASK 0xFFFF0000L
+//FMT1_FMT_CLAMP_COMPONENT_G
+#define FMT1_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_LOWER_G__SHIFT 0x0
+#define FMT1_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_UPPER_G__SHIFT 0x10
+#define FMT1_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_LOWER_G_MASK 0x0000FFFFL
+#define FMT1_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_UPPER_G_MASK 0xFFFF0000L
+//FMT1_FMT_CLAMP_COMPONENT_B
+#define FMT1_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_LOWER_B__SHIFT 0x0
+#define FMT1_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_UPPER_B__SHIFT 0x10
+#define FMT1_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_LOWER_B_MASK 0x0000FFFFL
+#define FMT1_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_UPPER_B_MASK 0xFFFF0000L
+//FMT1_FMT_DYNAMIC_EXP_CNTL
+#define FMT1_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_EN__SHIFT 0x0
+#define FMT1_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_MODE__SHIFT 0x4
+#define FMT1_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_EN_MASK 0x00000001L
+#define FMT1_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_MODE_MASK 0x00000010L
+//FMT1_FMT_CONTROL
+#define FMT1_FMT_CONTROL__FMT_STEREOSYNC_OVERRIDE__SHIFT 0x0
+#define FMT1_FMT_CONTROL__FMT_PTI_FIELD_POLARITY__SHIFT 0x4
+#define FMT1_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX__SHIFT 0x8
+#define FMT1_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP__SHIFT 0xc
+#define FMT1_FMT_CONTROL__FMT_PIXEL_ENCODING__SHIFT 0x10
+#define FMT1_FMT_CONTROL__FMT_SUBSAMPLING_MODE__SHIFT 0x12
+#define FMT1_FMT_CONTROL__FMT_SUBSAMPLING_ORDER__SHIFT 0x14
+#define FMT1_FMT_CONTROL__FMT_CBCR_BIT_REDUCTION_BYPASS__SHIFT 0x15
+#define FMT1_FMT_CONTROL__FMT_DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x18
+#define FMT1_FMT_CONTROL__FMT_PTI_ENABLE__SHIFT 0x1c
+#define FMT1_FMT_CONTROL__FMT_STEREOSYNC_OVERRIDE_MASK 0x00000001L
+#define FMT1_FMT_CONTROL__FMT_PTI_FIELD_POLARITY_MASK 0x00000010L
+#define FMT1_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX_MASK 0x00000F00L
+#define FMT1_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP_MASK 0x00003000L
+#define FMT1_FMT_CONTROL__FMT_PIXEL_ENCODING_MASK 0x00030000L
+#define FMT1_FMT_CONTROL__FMT_SUBSAMPLING_MODE_MASK 0x000C0000L
+#define FMT1_FMT_CONTROL__FMT_SUBSAMPLING_ORDER_MASK 0x00100000L
+#define FMT1_FMT_CONTROL__FMT_CBCR_BIT_REDUCTION_BYPASS_MASK 0x00200000L
+#define FMT1_FMT_CONTROL__FMT_DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x01000000L
+#define FMT1_FMT_CONTROL__FMT_PTI_ENABLE_MASK 0x10000000L
+//FMT1_FMT_BIT_DEPTH_CONTROL
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN__SHIFT 0x0
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_MODE__SHIFT 0x1
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT 0x4
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN__SHIFT 0x8
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_MODE__SHIFT 0x9
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT 0xb
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE__SHIFT 0xd
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE__SHIFT 0xe
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE__SHIFT 0xf
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_EN__SHIFT 0x10
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_DEPTH__SHIFT 0x11
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_OFFSET__SHIFT 0x15
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_LEVEL__SHIFT 0x18
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_RESET__SHIFT 0x19
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_25FRC_SEL__SHIFT 0x1a
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_50FRC_SEL__SHIFT 0x1c
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_75FRC_SEL__SHIFT 0x1e
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK 0x00000001L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_MODE_MASK 0x00000002L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH_MASK 0x00000030L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK 0x00000100L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_MODE_MASK 0x00000600L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH_MASK 0x00001800L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK 0x00002000L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK 0x00004000L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK 0x00008000L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_EN_MASK 0x00010000L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_DEPTH_MASK 0x00060000L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_OFFSET_MASK 0x00600000L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_LEVEL_MASK 0x01000000L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_RESET_MASK 0x02000000L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_25FRC_SEL_MASK 0x0C000000L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_50FRC_SEL_MASK 0x30000000L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_75FRC_SEL_MASK 0xC0000000L
+//FMT1_FMT_DITHER_RAND_R_SEED
+#define FMT1_FMT_DITHER_RAND_R_SEED__FMT_RAND_R_SEED__SHIFT 0x0
+#define FMT1_FMT_DITHER_RAND_R_SEED__FMT_OFFSET_R_CR__SHIFT 0x10
+#define FMT1_FMT_DITHER_RAND_R_SEED__FMT_RAND_R_SEED_MASK 0x000000FFL
+#define FMT1_FMT_DITHER_RAND_R_SEED__FMT_OFFSET_R_CR_MASK 0xFFFF0000L
+//FMT1_FMT_DITHER_RAND_G_SEED
+#define FMT1_FMT_DITHER_RAND_G_SEED__FMT_RAND_G_SEED__SHIFT 0x0
+#define FMT1_FMT_DITHER_RAND_G_SEED__FMT_OFFSET_G_Y__SHIFT 0x10
+#define FMT1_FMT_DITHER_RAND_G_SEED__FMT_RAND_G_SEED_MASK 0x000000FFL
+#define FMT1_FMT_DITHER_RAND_G_SEED__FMT_OFFSET_G_Y_MASK 0xFFFF0000L
+//FMT1_FMT_DITHER_RAND_B_SEED
+#define FMT1_FMT_DITHER_RAND_B_SEED__FMT_RAND_B_SEED__SHIFT 0x0
+#define FMT1_FMT_DITHER_RAND_B_SEED__FMT_OFFSET_B_CB__SHIFT 0x10
+#define FMT1_FMT_DITHER_RAND_B_SEED__FMT_RAND_B_SEED_MASK 0x000000FFL
+#define FMT1_FMT_DITHER_RAND_B_SEED__FMT_OFFSET_B_CB_MASK 0xFFFF0000L
+//FMT1_FMT_CLAMP_CNTL
+#define FMT1_FMT_CLAMP_CNTL__FMT_CLAMP_DATA_EN__SHIFT 0x0
+#define FMT1_FMT_CLAMP_CNTL__FMT_CLAMP_COLOR_FORMAT__SHIFT 0x10
+#define FMT1_FMT_CLAMP_CNTL__FMT_CLAMP_DATA_EN_MASK 0x00000001L
+#define FMT1_FMT_CLAMP_CNTL__FMT_CLAMP_COLOR_FORMAT_MASK 0x00070000L
+//FMT1_FMT_SIDE_BY_SIDE_STEREO_CONTROL
+#define FMT1_FMT_SIDE_BY_SIDE_STEREO_CONTROL__FMT_SIDE_BY_SIDE_STEREO_ACTIVE_WIDTH__SHIFT 0x0
+#define FMT1_FMT_SIDE_BY_SIDE_STEREO_CONTROL__FMT_SIDE_BY_SIDE_STEREO_ACTIVE_WIDTH_MASK 0x00001FFFL
+//FMT1_FMT_MAP420_MEMORY_CONTROL
+#define FMT1_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_FORCE__SHIFT 0x0
+#define FMT1_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_DIS__SHIFT 0x4
+#define FMT1_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_STATE__SHIFT 0x8
+#define FMT1_FMT_MAP420_MEMORY_CONTROL__FMT_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0xc
+#define FMT1_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_FORCE_MASK 0x00000003L
+#define FMT1_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_DIS_MASK 0x00000010L
+#define FMT1_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_STATE_MASK 0x00000300L
+#define FMT1_FMT_MAP420_MEMORY_CONTROL__FMT_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00003000L
+//FMT1_FMT_422_CONTROL
+#define FMT1_FMT_422_CONTROL__FMT_LEFT_EDGE_EXTRA_PIXEL_COUNT__SHIFT 0x0
+#define FMT1_FMT_422_CONTROL__FMT_LEFT_EDGE_EXTRA_PIXEL_COUNT_MASK 0x00000001L
+
+
+// addressBlock: dce_dc_opp_dpg1_dispdec
+//DPG1_DPG_CONTROL
+#define DPG1_DPG_CONTROL__DPG_EN__SHIFT 0x0
+#define DPG1_DPG_CONTROL__DPG_MODE__SHIFT 0x4
+#define DPG1_DPG_CONTROL__DPG_DYNAMIC_RANGE__SHIFT 0x8
+#define DPG1_DPG_CONTROL__DPG_BIT_DEPTH__SHIFT 0xc
+#define DPG1_DPG_CONTROL__DPG_VRES__SHIFT 0x10
+#define DPG1_DPG_CONTROL__DPG_HRES__SHIFT 0x14
+#define DPG1_DPG_CONTROL__DPG_FIELD_POLARITY__SHIFT 0x18
+#define DPG1_DPG_CONTROL__DPG_EN_MASK 0x00000001L
+#define DPG1_DPG_CONTROL__DPG_MODE_MASK 0x00000070L
+#define DPG1_DPG_CONTROL__DPG_DYNAMIC_RANGE_MASK 0x00000100L
+#define DPG1_DPG_CONTROL__DPG_BIT_DEPTH_MASK 0x00003000L
+#define DPG1_DPG_CONTROL__DPG_VRES_MASK 0x000F0000L
+#define DPG1_DPG_CONTROL__DPG_HRES_MASK 0x00F00000L
+#define DPG1_DPG_CONTROL__DPG_FIELD_POLARITY_MASK 0x01000000L
+//DPG1_DPG_RAMP_CONTROL
+#define DPG1_DPG_RAMP_CONTROL__DPG_RAMP0_OFFSET__SHIFT 0x0
+#define DPG1_DPG_RAMP_CONTROL__DPG_INC0__SHIFT 0x18
+#define DPG1_DPG_RAMP_CONTROL__DPG_INC1__SHIFT 0x1c
+#define DPG1_DPG_RAMP_CONTROL__DPG_RAMP0_OFFSET_MASK 0x0000FFFFL
+#define DPG1_DPG_RAMP_CONTROL__DPG_INC0_MASK 0x0F000000L
+#define DPG1_DPG_RAMP_CONTROL__DPG_INC1_MASK 0xF0000000L
+//DPG1_DPG_DIMENSIONS
+#define DPG1_DPG_DIMENSIONS__DPG_ACTIVE_HEIGHT__SHIFT 0x0
+#define DPG1_DPG_DIMENSIONS__DPG_ACTIVE_WIDTH__SHIFT 0x10
+#define DPG1_DPG_DIMENSIONS__DPG_ACTIVE_HEIGHT_MASK 0x00003FFFL
+#define DPG1_DPG_DIMENSIONS__DPG_ACTIVE_WIDTH_MASK 0x3FFF0000L
+//DPG1_DPG_COLOUR_R_CR
+#define DPG1_DPG_COLOUR_R_CR__DPG_COLOUR0_R_CR__SHIFT 0x0
+#define DPG1_DPG_COLOUR_R_CR__DPG_COLOUR1_R_CR__SHIFT 0x10
+#define DPG1_DPG_COLOUR_R_CR__DPG_COLOUR0_R_CR_MASK 0x0000FFFFL
+#define DPG1_DPG_COLOUR_R_CR__DPG_COLOUR1_R_CR_MASK 0xFFFF0000L
+//DPG1_DPG_COLOUR_G_Y
+#define DPG1_DPG_COLOUR_G_Y__DPG_COLOUR0_G_Y__SHIFT 0x0
+#define DPG1_DPG_COLOUR_G_Y__DPG_COLOUR1_G_Y__SHIFT 0x10
+#define DPG1_DPG_COLOUR_G_Y__DPG_COLOUR0_G_Y_MASK 0x0000FFFFL
+#define DPG1_DPG_COLOUR_G_Y__DPG_COLOUR1_G_Y_MASK 0xFFFF0000L
+//DPG1_DPG_COLOUR_B_CB
+#define DPG1_DPG_COLOUR_B_CB__DPG_COLOUR0_B_CB__SHIFT 0x0
+#define DPG1_DPG_COLOUR_B_CB__DPG_COLOUR1_B_CB__SHIFT 0x10
+#define DPG1_DPG_COLOUR_B_CB__DPG_COLOUR0_B_CB_MASK 0x0000FFFFL
+#define DPG1_DPG_COLOUR_B_CB__DPG_COLOUR1_B_CB_MASK 0xFFFF0000L
+//DPG1_DPG_OFFSET_SEGMENT
+#define DPG1_DPG_OFFSET_SEGMENT__DPG_X_OFFSET__SHIFT 0x0
+#define DPG1_DPG_OFFSET_SEGMENT__DPG_SEGMENT_WIDTH__SHIFT 0x10
+#define DPG1_DPG_OFFSET_SEGMENT__DPG_X_OFFSET_MASK 0x00003FFFL
+#define DPG1_DPG_OFFSET_SEGMENT__DPG_SEGMENT_WIDTH_MASK 0x3FFF0000L
+//DPG1_DPG_STATUS
+#define DPG1_DPG_STATUS__DPG_DOUBLE_BUFFER_PENDING__SHIFT 0x0
+#define DPG1_DPG_STATUS__DPG_DOUBLE_BUFFER_PENDING_MASK 0x00000001L
+
+
+// addressBlock: dce_dc_opp_oppbuf1_dispdec
+//OPPBUF1_OPPBUF_CONTROL
+#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_ACTIVE_WIDTH__SHIFT 0x0
+#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_DISPLAY_SEGMENTATION__SHIFT 0x10
+#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_OVERLAP_PIXEL_NUM__SHIFT 0x14
+#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_PIXEL_REPETITION__SHIFT 0x18
+#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_DOUBLE_BUFFER_PENDING__SHIFT 0x1c
+#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_ACTIVE_WIDTH_MASK 0x00003FFFL
+#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_DISPLAY_SEGMENTATION_MASK 0x00070000L
+#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_OVERLAP_PIXEL_NUM_MASK 0x00F00000L
+#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_PIXEL_REPETITION_MASK 0x0F000000L
+#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_DOUBLE_BUFFER_PENDING_MASK 0x10000000L
+//OPPBUF1_OPPBUF_3D_PARAMETERS_0
+#define OPPBUF1_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE1_SIZE__SHIFT 0x0
+#define OPPBUF1_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE2_SIZE__SHIFT 0xa
+#define OPPBUF1_OPPBUF_3D_PARAMETERS_0__OPPBUF_DUMMY_DATA_R__SHIFT 0x14
+#define OPPBUF1_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE1_SIZE_MASK 0x000003FFL
+#define OPPBUF1_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE2_SIZE_MASK 0x000FFC00L
+#define OPPBUF1_OPPBUF_3D_PARAMETERS_0__OPPBUF_DUMMY_DATA_R_MASK 0xFFF00000L
+//OPPBUF1_OPPBUF_3D_PARAMETERS_1
+#define OPPBUF1_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_G__SHIFT 0x0
+#define OPPBUF1_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_B__SHIFT 0x10
+#define OPPBUF1_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_G_MASK 0x00000FFFL
+#define OPPBUF1_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_B_MASK 0x0FFF0000L
+//OPPBUF1_OPPBUF_CONTROL1
+#define OPPBUF1_OPPBUF_CONTROL1__OPPBUF_NUM_SEGMENT_PADDED_PIXELS__SHIFT 0x0
+#define OPPBUF1_OPPBUF_CONTROL1__OPPBUF_NUM_SEGMENT_PADDED_PIXELS_MASK 0x00000007L
+
+
+// addressBlock: dce_dc_opp_opp_pipe1_dispdec
+//OPP_PIPE1_OPP_PIPE_CONTROL
+#define OPP_PIPE1_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_EN__SHIFT 0x0
+#define OPP_PIPE1_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_ON__SHIFT 0x1
+#define OPP_PIPE1_OPP_PIPE_CONTROL__OPP_PIPE_DIGITAL_BYPASS_EN__SHIFT 0x4
+#define OPP_PIPE1_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_EN_MASK 0x00000001L
+#define OPP_PIPE1_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_ON_MASK 0x00000002L
+#define OPP_PIPE1_OPP_PIPE_CONTROL__OPP_PIPE_DIGITAL_BYPASS_EN_MASK 0x00000010L
+
+
+// addressBlock: dce_dc_opp_opp_pipe_crc1_dispdec
+//OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_EN__SHIFT 0x0
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_CONT_EN__SHIFT 0x4
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_MODE__SHIFT 0x8
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_EN__SHIFT 0xa
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_MODE__SHIFT 0xc
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_EN__SHIFT 0xe
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_PIXEL_SELECT__SHIFT 0x14
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_SOURCE_SELECT__SHIFT 0x18
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_ONE_SHOT_PENDING__SHIFT 0x1c
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_EN_MASK 0x00000001L
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_CONT_EN_MASK 0x00000010L
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_MODE_MASK 0x00000300L
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_EN_MASK 0x00000400L
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_MODE_MASK 0x00003000L
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_EN_MASK 0x00004000L
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_PIXEL_SELECT_MASK 0x00300000L
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_SOURCE_SELECT_MASK 0x01000000L
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_ONE_SHOT_PENDING_MASK 0x10000000L
+//OPP_PIPE_CRC1_OPP_PIPE_CRC_MASK
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_MASK__OPP_PIPE_CRC_MASK__SHIFT 0x0
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_MASK__OPP_PIPE_CRC_MASK_MASK 0x0000FFFFL
+//OPP_PIPE_CRC1_OPP_PIPE_CRC_RESULT0
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_RESULT0__OPP_PIPE_CRC_RESULT_A__SHIFT 0x0
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_RESULT0__OPP_PIPE_CRC_RESULT_R__SHIFT 0x10
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_RESULT0__OPP_PIPE_CRC_RESULT_A_MASK 0x0000FFFFL
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_RESULT0__OPP_PIPE_CRC_RESULT_R_MASK 0xFFFF0000L
+//OPP_PIPE_CRC1_OPP_PIPE_CRC_RESULT1
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_RESULT1__OPP_PIPE_CRC_RESULT_G__SHIFT 0x0
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_RESULT1__OPP_PIPE_CRC_RESULT_B__SHIFT 0x10
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_RESULT1__OPP_PIPE_CRC_RESULT_G_MASK 0x0000FFFFL
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_RESULT1__OPP_PIPE_CRC_RESULT_B_MASK 0xFFFF0000L
+//OPP_PIPE_CRC1_OPP_PIPE_CRC_RESULT2
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_RESULT2__OPP_PIPE_CRC_RESULT_C__SHIFT 0x0
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_RESULT2__OPP_PIPE_CRC_RESULT_C_MASK 0x0000FFFFL
+
+
+// addressBlock: dce_dc_opp_fmt2_dispdec
+//FMT2_FMT_CLAMP_COMPONENT_R
+#define FMT2_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_LOWER_R__SHIFT 0x0
+#define FMT2_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_UPPER_R__SHIFT 0x10
+#define FMT2_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_LOWER_R_MASK 0x0000FFFFL
+#define FMT2_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_UPPER_R_MASK 0xFFFF0000L
+//FMT2_FMT_CLAMP_COMPONENT_G
+#define FMT2_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_LOWER_G__SHIFT 0x0
+#define FMT2_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_UPPER_G__SHIFT 0x10
+#define FMT2_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_LOWER_G_MASK 0x0000FFFFL
+#define FMT2_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_UPPER_G_MASK 0xFFFF0000L
+//FMT2_FMT_CLAMP_COMPONENT_B
+#define FMT2_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_LOWER_B__SHIFT 0x0
+#define FMT2_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_UPPER_B__SHIFT 0x10
+#define FMT2_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_LOWER_B_MASK 0x0000FFFFL
+#define FMT2_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_UPPER_B_MASK 0xFFFF0000L
+//FMT2_FMT_DYNAMIC_EXP_CNTL
+#define FMT2_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_EN__SHIFT 0x0
+#define FMT2_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_MODE__SHIFT 0x4
+#define FMT2_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_EN_MASK 0x00000001L
+#define FMT2_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_MODE_MASK 0x00000010L
+//FMT2_FMT_CONTROL
+#define FMT2_FMT_CONTROL__FMT_STEREOSYNC_OVERRIDE__SHIFT 0x0
+#define FMT2_FMT_CONTROL__FMT_PTI_FIELD_POLARITY__SHIFT 0x4
+#define FMT2_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX__SHIFT 0x8
+#define FMT2_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP__SHIFT 0xc
+#define FMT2_FMT_CONTROL__FMT_PIXEL_ENCODING__SHIFT 0x10
+#define FMT2_FMT_CONTROL__FMT_SUBSAMPLING_MODE__SHIFT 0x12
+#define FMT2_FMT_CONTROL__FMT_SUBSAMPLING_ORDER__SHIFT 0x14
+#define FMT2_FMT_CONTROL__FMT_CBCR_BIT_REDUCTION_BYPASS__SHIFT 0x15
+#define FMT2_FMT_CONTROL__FMT_DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x18
+#define FMT2_FMT_CONTROL__FMT_PTI_ENABLE__SHIFT 0x1c
+#define FMT2_FMT_CONTROL__FMT_STEREOSYNC_OVERRIDE_MASK 0x00000001L
+#define FMT2_FMT_CONTROL__FMT_PTI_FIELD_POLARITY_MASK 0x00000010L
+#define FMT2_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX_MASK 0x00000F00L
+#define FMT2_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP_MASK 0x00003000L
+#define FMT2_FMT_CONTROL__FMT_PIXEL_ENCODING_MASK 0x00030000L
+#define FMT2_FMT_CONTROL__FMT_SUBSAMPLING_MODE_MASK 0x000C0000L
+#define FMT2_FMT_CONTROL__FMT_SUBSAMPLING_ORDER_MASK 0x00100000L
+#define FMT2_FMT_CONTROL__FMT_CBCR_BIT_REDUCTION_BYPASS_MASK 0x00200000L
+#define FMT2_FMT_CONTROL__FMT_DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x01000000L
+#define FMT2_FMT_CONTROL__FMT_PTI_ENABLE_MASK 0x10000000L
+//FMT2_FMT_BIT_DEPTH_CONTROL
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN__SHIFT 0x0
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_MODE__SHIFT 0x1
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT 0x4
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN__SHIFT 0x8
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_MODE__SHIFT 0x9
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT 0xb
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE__SHIFT 0xd
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE__SHIFT 0xe
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE__SHIFT 0xf
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_EN__SHIFT 0x10
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_DEPTH__SHIFT 0x11
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_OFFSET__SHIFT 0x15
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_LEVEL__SHIFT 0x18
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_RESET__SHIFT 0x19
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_25FRC_SEL__SHIFT 0x1a
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_50FRC_SEL__SHIFT 0x1c
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_75FRC_SEL__SHIFT 0x1e
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK 0x00000001L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_MODE_MASK 0x00000002L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH_MASK 0x00000030L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK 0x00000100L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_MODE_MASK 0x00000600L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH_MASK 0x00001800L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK 0x00002000L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK 0x00004000L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK 0x00008000L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_EN_MASK 0x00010000L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_DEPTH_MASK 0x00060000L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_OFFSET_MASK 0x00600000L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_LEVEL_MASK 0x01000000L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_RESET_MASK 0x02000000L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_25FRC_SEL_MASK 0x0C000000L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_50FRC_SEL_MASK 0x30000000L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_75FRC_SEL_MASK 0xC0000000L
+//FMT2_FMT_DITHER_RAND_R_SEED
+#define FMT2_FMT_DITHER_RAND_R_SEED__FMT_RAND_R_SEED__SHIFT 0x0
+#define FMT2_FMT_DITHER_RAND_R_SEED__FMT_OFFSET_R_CR__SHIFT 0x10
+#define FMT2_FMT_DITHER_RAND_R_SEED__FMT_RAND_R_SEED_MASK 0x000000FFL
+#define FMT2_FMT_DITHER_RAND_R_SEED__FMT_OFFSET_R_CR_MASK 0xFFFF0000L
+//FMT2_FMT_DITHER_RAND_G_SEED
+#define FMT2_FMT_DITHER_RAND_G_SEED__FMT_RAND_G_SEED__SHIFT 0x0
+#define FMT2_FMT_DITHER_RAND_G_SEED__FMT_OFFSET_G_Y__SHIFT 0x10
+#define FMT2_FMT_DITHER_RAND_G_SEED__FMT_RAND_G_SEED_MASK 0x000000FFL
+#define FMT2_FMT_DITHER_RAND_G_SEED__FMT_OFFSET_G_Y_MASK 0xFFFF0000L
+//FMT2_FMT_DITHER_RAND_B_SEED
+#define FMT2_FMT_DITHER_RAND_B_SEED__FMT_RAND_B_SEED__SHIFT 0x0
+#define FMT2_FMT_DITHER_RAND_B_SEED__FMT_OFFSET_B_CB__SHIFT 0x10
+#define FMT2_FMT_DITHER_RAND_B_SEED__FMT_RAND_B_SEED_MASK 0x000000FFL
+#define FMT2_FMT_DITHER_RAND_B_SEED__FMT_OFFSET_B_CB_MASK 0xFFFF0000L
+//FMT2_FMT_CLAMP_CNTL
+#define FMT2_FMT_CLAMP_CNTL__FMT_CLAMP_DATA_EN__SHIFT 0x0
+#define FMT2_FMT_CLAMP_CNTL__FMT_CLAMP_COLOR_FORMAT__SHIFT 0x10
+#define FMT2_FMT_CLAMP_CNTL__FMT_CLAMP_DATA_EN_MASK 0x00000001L
+#define FMT2_FMT_CLAMP_CNTL__FMT_CLAMP_COLOR_FORMAT_MASK 0x00070000L
+//FMT2_FMT_SIDE_BY_SIDE_STEREO_CONTROL
+#define FMT2_FMT_SIDE_BY_SIDE_STEREO_CONTROL__FMT_SIDE_BY_SIDE_STEREO_ACTIVE_WIDTH__SHIFT 0x0
+#define FMT2_FMT_SIDE_BY_SIDE_STEREO_CONTROL__FMT_SIDE_BY_SIDE_STEREO_ACTIVE_WIDTH_MASK 0x00001FFFL
+//FMT2_FMT_MAP420_MEMORY_CONTROL
+#define FMT2_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_FORCE__SHIFT 0x0
+#define FMT2_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_DIS__SHIFT 0x4
+#define FMT2_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_STATE__SHIFT 0x8
+#define FMT2_FMT_MAP420_MEMORY_CONTROL__FMT_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0xc
+#define FMT2_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_FORCE_MASK 0x00000003L
+#define FMT2_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_DIS_MASK 0x00000010L
+#define FMT2_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_STATE_MASK 0x00000300L
+#define FMT2_FMT_MAP420_MEMORY_CONTROL__FMT_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00003000L
+//FMT2_FMT_422_CONTROL
+#define FMT2_FMT_422_CONTROL__FMT_LEFT_EDGE_EXTRA_PIXEL_COUNT__SHIFT 0x0
+#define FMT2_FMT_422_CONTROL__FMT_LEFT_EDGE_EXTRA_PIXEL_COUNT_MASK 0x00000001L
+
+
+// addressBlock: dce_dc_opp_dpg2_dispdec
+//DPG2_DPG_CONTROL
+#define DPG2_DPG_CONTROL__DPG_EN__SHIFT 0x0
+#define DPG2_DPG_CONTROL__DPG_MODE__SHIFT 0x4
+#define DPG2_DPG_CONTROL__DPG_DYNAMIC_RANGE__SHIFT 0x8
+#define DPG2_DPG_CONTROL__DPG_BIT_DEPTH__SHIFT 0xc
+#define DPG2_DPG_CONTROL__DPG_VRES__SHIFT 0x10
+#define DPG2_DPG_CONTROL__DPG_HRES__SHIFT 0x14
+#define DPG2_DPG_CONTROL__DPG_FIELD_POLARITY__SHIFT 0x18
+#define DPG2_DPG_CONTROL__DPG_EN_MASK 0x00000001L
+#define DPG2_DPG_CONTROL__DPG_MODE_MASK 0x00000070L
+#define DPG2_DPG_CONTROL__DPG_DYNAMIC_RANGE_MASK 0x00000100L
+#define DPG2_DPG_CONTROL__DPG_BIT_DEPTH_MASK 0x00003000L
+#define DPG2_DPG_CONTROL__DPG_VRES_MASK 0x000F0000L
+#define DPG2_DPG_CONTROL__DPG_HRES_MASK 0x00F00000L
+#define DPG2_DPG_CONTROL__DPG_FIELD_POLARITY_MASK 0x01000000L
+//DPG2_DPG_RAMP_CONTROL
+#define DPG2_DPG_RAMP_CONTROL__DPG_RAMP0_OFFSET__SHIFT 0x0
+#define DPG2_DPG_RAMP_CONTROL__DPG_INC0__SHIFT 0x18
+#define DPG2_DPG_RAMP_CONTROL__DPG_INC1__SHIFT 0x1c
+#define DPG2_DPG_RAMP_CONTROL__DPG_RAMP0_OFFSET_MASK 0x0000FFFFL
+#define DPG2_DPG_RAMP_CONTROL__DPG_INC0_MASK 0x0F000000L
+#define DPG2_DPG_RAMP_CONTROL__DPG_INC1_MASK 0xF0000000L
+//DPG2_DPG_DIMENSIONS
+#define DPG2_DPG_DIMENSIONS__DPG_ACTIVE_HEIGHT__SHIFT 0x0
+#define DPG2_DPG_DIMENSIONS__DPG_ACTIVE_WIDTH__SHIFT 0x10
+#define DPG2_DPG_DIMENSIONS__DPG_ACTIVE_HEIGHT_MASK 0x00003FFFL
+#define DPG2_DPG_DIMENSIONS__DPG_ACTIVE_WIDTH_MASK 0x3FFF0000L
+//DPG2_DPG_COLOUR_R_CR
+#define DPG2_DPG_COLOUR_R_CR__DPG_COLOUR0_R_CR__SHIFT 0x0
+#define DPG2_DPG_COLOUR_R_CR__DPG_COLOUR1_R_CR__SHIFT 0x10
+#define DPG2_DPG_COLOUR_R_CR__DPG_COLOUR0_R_CR_MASK 0x0000FFFFL
+#define DPG2_DPG_COLOUR_R_CR__DPG_COLOUR1_R_CR_MASK 0xFFFF0000L
+//DPG2_DPG_COLOUR_G_Y
+#define DPG2_DPG_COLOUR_G_Y__DPG_COLOUR0_G_Y__SHIFT 0x0
+#define DPG2_DPG_COLOUR_G_Y__DPG_COLOUR1_G_Y__SHIFT 0x10
+#define DPG2_DPG_COLOUR_G_Y__DPG_COLOUR0_G_Y_MASK 0x0000FFFFL
+#define DPG2_DPG_COLOUR_G_Y__DPG_COLOUR1_G_Y_MASK 0xFFFF0000L
+//DPG2_DPG_COLOUR_B_CB
+#define DPG2_DPG_COLOUR_B_CB__DPG_COLOUR0_B_CB__SHIFT 0x0
+#define DPG2_DPG_COLOUR_B_CB__DPG_COLOUR1_B_CB__SHIFT 0x10
+#define DPG2_DPG_COLOUR_B_CB__DPG_COLOUR0_B_CB_MASK 0x0000FFFFL
+#define DPG2_DPG_COLOUR_B_CB__DPG_COLOUR1_B_CB_MASK 0xFFFF0000L
+//DPG2_DPG_OFFSET_SEGMENT
+#define DPG2_DPG_OFFSET_SEGMENT__DPG_X_OFFSET__SHIFT 0x0
+#define DPG2_DPG_OFFSET_SEGMENT__DPG_SEGMENT_WIDTH__SHIFT 0x10
+#define DPG2_DPG_OFFSET_SEGMENT__DPG_X_OFFSET_MASK 0x00003FFFL
+#define DPG2_DPG_OFFSET_SEGMENT__DPG_SEGMENT_WIDTH_MASK 0x3FFF0000L
+//DPG2_DPG_STATUS
+#define DPG2_DPG_STATUS__DPG_DOUBLE_BUFFER_PENDING__SHIFT 0x0
+#define DPG2_DPG_STATUS__DPG_DOUBLE_BUFFER_PENDING_MASK 0x00000001L
+
+
+// addressBlock: dce_dc_opp_oppbuf2_dispdec
+//OPPBUF2_OPPBUF_CONTROL
+#define OPPBUF2_OPPBUF_CONTROL__OPPBUF_ACTIVE_WIDTH__SHIFT 0x0
+#define OPPBUF2_OPPBUF_CONTROL__OPPBUF_DISPLAY_SEGMENTATION__SHIFT 0x10
+#define OPPBUF2_OPPBUF_CONTROL__OPPBUF_OVERLAP_PIXEL_NUM__SHIFT 0x14
+#define OPPBUF2_OPPBUF_CONTROL__OPPBUF_PIXEL_REPETITION__SHIFT 0x18
+#define OPPBUF2_OPPBUF_CONTROL__OPPBUF_DOUBLE_BUFFER_PENDING__SHIFT 0x1c
+#define OPPBUF2_OPPBUF_CONTROL__OPPBUF_ACTIVE_WIDTH_MASK 0x00003FFFL
+#define OPPBUF2_OPPBUF_CONTROL__OPPBUF_DISPLAY_SEGMENTATION_MASK 0x00070000L
+#define OPPBUF2_OPPBUF_CONTROL__OPPBUF_OVERLAP_PIXEL_NUM_MASK 0x00F00000L
+#define OPPBUF2_OPPBUF_CONTROL__OPPBUF_PIXEL_REPETITION_MASK 0x0F000000L
+#define OPPBUF2_OPPBUF_CONTROL__OPPBUF_DOUBLE_BUFFER_PENDING_MASK 0x10000000L
+//OPPBUF2_OPPBUF_3D_PARAMETERS_0
+#define OPPBUF2_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE1_SIZE__SHIFT 0x0
+#define OPPBUF2_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE2_SIZE__SHIFT 0xa
+#define OPPBUF2_OPPBUF_3D_PARAMETERS_0__OPPBUF_DUMMY_DATA_R__SHIFT 0x14
+#define OPPBUF2_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE1_SIZE_MASK 0x000003FFL
+#define OPPBUF2_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE2_SIZE_MASK 0x000FFC00L
+#define OPPBUF2_OPPBUF_3D_PARAMETERS_0__OPPBUF_DUMMY_DATA_R_MASK 0xFFF00000L
+//OPPBUF2_OPPBUF_3D_PARAMETERS_1
+#define OPPBUF2_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_G__SHIFT 0x0
+#define OPPBUF2_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_B__SHIFT 0x10
+#define OPPBUF2_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_G_MASK 0x00000FFFL
+#define OPPBUF2_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_B_MASK 0x0FFF0000L
+//OPPBUF2_OPPBUF_CONTROL1
+#define OPPBUF2_OPPBUF_CONTROL1__OPPBUF_NUM_SEGMENT_PADDED_PIXELS__SHIFT 0x0
+#define OPPBUF2_OPPBUF_CONTROL1__OPPBUF_NUM_SEGMENT_PADDED_PIXELS_MASK 0x00000007L
+
+
+// addressBlock: dce_dc_opp_opp_pipe2_dispdec
+//OPP_PIPE2_OPP_PIPE_CONTROL
+#define OPP_PIPE2_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_EN__SHIFT 0x0
+#define OPP_PIPE2_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_ON__SHIFT 0x1
+#define OPP_PIPE2_OPP_PIPE_CONTROL__OPP_PIPE_DIGITAL_BYPASS_EN__SHIFT 0x4
+#define OPP_PIPE2_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_EN_MASK 0x00000001L
+#define OPP_PIPE2_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_ON_MASK 0x00000002L
+#define OPP_PIPE2_OPP_PIPE_CONTROL__OPP_PIPE_DIGITAL_BYPASS_EN_MASK 0x00000010L
+
+
+// addressBlock: dce_dc_opp_opp_pipe_crc2_dispdec
+//OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_EN__SHIFT 0x0
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_CONT_EN__SHIFT 0x4
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_MODE__SHIFT 0x8
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_EN__SHIFT 0xa
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_MODE__SHIFT 0xc
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_EN__SHIFT 0xe
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_PIXEL_SELECT__SHIFT 0x14
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_SOURCE_SELECT__SHIFT 0x18
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_ONE_SHOT_PENDING__SHIFT 0x1c
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_EN_MASK 0x00000001L
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_CONT_EN_MASK 0x00000010L
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_MODE_MASK 0x00000300L
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_EN_MASK 0x00000400L
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_MODE_MASK 0x00003000L
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_EN_MASK 0x00004000L
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_PIXEL_SELECT_MASK 0x00300000L
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_SOURCE_SELECT_MASK 0x01000000L
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_ONE_SHOT_PENDING_MASK 0x10000000L
+//OPP_PIPE_CRC2_OPP_PIPE_CRC_MASK
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_MASK__OPP_PIPE_CRC_MASK__SHIFT 0x0
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_MASK__OPP_PIPE_CRC_MASK_MASK 0x0000FFFFL
+//OPP_PIPE_CRC2_OPP_PIPE_CRC_RESULT0
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_RESULT0__OPP_PIPE_CRC_RESULT_A__SHIFT 0x0
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_RESULT0__OPP_PIPE_CRC_RESULT_R__SHIFT 0x10
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_RESULT0__OPP_PIPE_CRC_RESULT_A_MASK 0x0000FFFFL
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_RESULT0__OPP_PIPE_CRC_RESULT_R_MASK 0xFFFF0000L
+//OPP_PIPE_CRC2_OPP_PIPE_CRC_RESULT1
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_RESULT1__OPP_PIPE_CRC_RESULT_G__SHIFT 0x0
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_RESULT1__OPP_PIPE_CRC_RESULT_B__SHIFT 0x10
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_RESULT1__OPP_PIPE_CRC_RESULT_G_MASK 0x0000FFFFL
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_RESULT1__OPP_PIPE_CRC_RESULT_B_MASK 0xFFFF0000L
+//OPP_PIPE_CRC2_OPP_PIPE_CRC_RESULT2
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_RESULT2__OPP_PIPE_CRC_RESULT_C__SHIFT 0x0
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_RESULT2__OPP_PIPE_CRC_RESULT_C_MASK 0x0000FFFFL
+
+
+// addressBlock: dce_dc_opp_fmt3_dispdec
+//FMT3_FMT_CLAMP_COMPONENT_R
+#define FMT3_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_LOWER_R__SHIFT 0x0
+#define FMT3_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_UPPER_R__SHIFT 0x10
+#define FMT3_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_LOWER_R_MASK 0x0000FFFFL
+#define FMT3_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_UPPER_R_MASK 0xFFFF0000L
+//FMT3_FMT_CLAMP_COMPONENT_G
+#define FMT3_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_LOWER_G__SHIFT 0x0
+#define FMT3_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_UPPER_G__SHIFT 0x10
+#define FMT3_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_LOWER_G_MASK 0x0000FFFFL
+#define FMT3_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_UPPER_G_MASK 0xFFFF0000L
+//FMT3_FMT_CLAMP_COMPONENT_B
+#define FMT3_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_LOWER_B__SHIFT 0x0
+#define FMT3_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_UPPER_B__SHIFT 0x10
+#define FMT3_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_LOWER_B_MASK 0x0000FFFFL
+#define FMT3_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_UPPER_B_MASK 0xFFFF0000L
+//FMT3_FMT_DYNAMIC_EXP_CNTL
+#define FMT3_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_EN__SHIFT 0x0
+#define FMT3_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_MODE__SHIFT 0x4
+#define FMT3_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_EN_MASK 0x00000001L
+#define FMT3_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_MODE_MASK 0x00000010L
+//FMT3_FMT_CONTROL
+#define FMT3_FMT_CONTROL__FMT_STEREOSYNC_OVERRIDE__SHIFT 0x0
+#define FMT3_FMT_CONTROL__FMT_PTI_FIELD_POLARITY__SHIFT 0x4
+#define FMT3_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX__SHIFT 0x8
+#define FMT3_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP__SHIFT 0xc
+#define FMT3_FMT_CONTROL__FMT_PIXEL_ENCODING__SHIFT 0x10
+#define FMT3_FMT_CONTROL__FMT_SUBSAMPLING_MODE__SHIFT 0x12
+#define FMT3_FMT_CONTROL__FMT_SUBSAMPLING_ORDER__SHIFT 0x14
+#define FMT3_FMT_CONTROL__FMT_CBCR_BIT_REDUCTION_BYPASS__SHIFT 0x15
+#define FMT3_FMT_CONTROL__FMT_DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x18
+#define FMT3_FMT_CONTROL__FMT_PTI_ENABLE__SHIFT 0x1c
+#define FMT3_FMT_CONTROL__FMT_STEREOSYNC_OVERRIDE_MASK 0x00000001L
+#define FMT3_FMT_CONTROL__FMT_PTI_FIELD_POLARITY_MASK 0x00000010L
+#define FMT3_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX_MASK 0x00000F00L
+#define FMT3_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP_MASK 0x00003000L
+#define FMT3_FMT_CONTROL__FMT_PIXEL_ENCODING_MASK 0x00030000L
+#define FMT3_FMT_CONTROL__FMT_SUBSAMPLING_MODE_MASK 0x000C0000L
+#define FMT3_FMT_CONTROL__FMT_SUBSAMPLING_ORDER_MASK 0x00100000L
+#define FMT3_FMT_CONTROL__FMT_CBCR_BIT_REDUCTION_BYPASS_MASK 0x00200000L
+#define FMT3_FMT_CONTROL__FMT_DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x01000000L
+#define FMT3_FMT_CONTROL__FMT_PTI_ENABLE_MASK 0x10000000L
+//FMT3_FMT_BIT_DEPTH_CONTROL
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN__SHIFT 0x0
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_MODE__SHIFT 0x1
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT 0x4
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN__SHIFT 0x8
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_MODE__SHIFT 0x9
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT 0xb
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE__SHIFT 0xd
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE__SHIFT 0xe
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE__SHIFT 0xf
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_EN__SHIFT 0x10
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_DEPTH__SHIFT 0x11
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_OFFSET__SHIFT 0x15
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_LEVEL__SHIFT 0x18
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_RESET__SHIFT 0x19
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_25FRC_SEL__SHIFT 0x1a
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_50FRC_SEL__SHIFT 0x1c
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_75FRC_SEL__SHIFT 0x1e
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK 0x00000001L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_MODE_MASK 0x00000002L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH_MASK 0x00000030L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK 0x00000100L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_MODE_MASK 0x00000600L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH_MASK 0x00001800L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK 0x00002000L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK 0x00004000L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK 0x00008000L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_EN_MASK 0x00010000L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_DEPTH_MASK 0x00060000L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_OFFSET_MASK 0x00600000L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_LEVEL_MASK 0x01000000L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_RESET_MASK 0x02000000L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_25FRC_SEL_MASK 0x0C000000L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_50FRC_SEL_MASK 0x30000000L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_75FRC_SEL_MASK 0xC0000000L
+//FMT3_FMT_DITHER_RAND_R_SEED
+#define FMT3_FMT_DITHER_RAND_R_SEED__FMT_RAND_R_SEED__SHIFT 0x0
+#define FMT3_FMT_DITHER_RAND_R_SEED__FMT_OFFSET_R_CR__SHIFT 0x10
+#define FMT3_FMT_DITHER_RAND_R_SEED__FMT_RAND_R_SEED_MASK 0x000000FFL
+#define FMT3_FMT_DITHER_RAND_R_SEED__FMT_OFFSET_R_CR_MASK 0xFFFF0000L
+//FMT3_FMT_DITHER_RAND_G_SEED
+#define FMT3_FMT_DITHER_RAND_G_SEED__FMT_RAND_G_SEED__SHIFT 0x0
+#define FMT3_FMT_DITHER_RAND_G_SEED__FMT_OFFSET_G_Y__SHIFT 0x10
+#define FMT3_FMT_DITHER_RAND_G_SEED__FMT_RAND_G_SEED_MASK 0x000000FFL
+#define FMT3_FMT_DITHER_RAND_G_SEED__FMT_OFFSET_G_Y_MASK 0xFFFF0000L
+//FMT3_FMT_DITHER_RAND_B_SEED
+#define FMT3_FMT_DITHER_RAND_B_SEED__FMT_RAND_B_SEED__SHIFT 0x0
+#define FMT3_FMT_DITHER_RAND_B_SEED__FMT_OFFSET_B_CB__SHIFT 0x10
+#define FMT3_FMT_DITHER_RAND_B_SEED__FMT_RAND_B_SEED_MASK 0x000000FFL
+#define FMT3_FMT_DITHER_RAND_B_SEED__FMT_OFFSET_B_CB_MASK 0xFFFF0000L
+//FMT3_FMT_CLAMP_CNTL
+#define FMT3_FMT_CLAMP_CNTL__FMT_CLAMP_DATA_EN__SHIFT 0x0
+#define FMT3_FMT_CLAMP_CNTL__FMT_CLAMP_COLOR_FORMAT__SHIFT 0x10
+#define FMT3_FMT_CLAMP_CNTL__FMT_CLAMP_DATA_EN_MASK 0x00000001L
+#define FMT3_FMT_CLAMP_CNTL__FMT_CLAMP_COLOR_FORMAT_MASK 0x00070000L
+//FMT3_FMT_SIDE_BY_SIDE_STEREO_CONTROL
+#define FMT3_FMT_SIDE_BY_SIDE_STEREO_CONTROL__FMT_SIDE_BY_SIDE_STEREO_ACTIVE_WIDTH__SHIFT 0x0
+#define FMT3_FMT_SIDE_BY_SIDE_STEREO_CONTROL__FMT_SIDE_BY_SIDE_STEREO_ACTIVE_WIDTH_MASK 0x00001FFFL
+//FMT3_FMT_MAP420_MEMORY_CONTROL
+#define FMT3_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_FORCE__SHIFT 0x0
+#define FMT3_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_DIS__SHIFT 0x4
+#define FMT3_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_STATE__SHIFT 0x8
+#define FMT3_FMT_MAP420_MEMORY_CONTROL__FMT_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0xc
+#define FMT3_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_FORCE_MASK 0x00000003L
+#define FMT3_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_DIS_MASK 0x00000010L
+#define FMT3_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_STATE_MASK 0x00000300L
+#define FMT3_FMT_MAP420_MEMORY_CONTROL__FMT_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00003000L
+//FMT3_FMT_422_CONTROL
+#define FMT3_FMT_422_CONTROL__FMT_LEFT_EDGE_EXTRA_PIXEL_COUNT__SHIFT 0x0
+#define FMT3_FMT_422_CONTROL__FMT_LEFT_EDGE_EXTRA_PIXEL_COUNT_MASK 0x00000001L
+
+
+// addressBlock: dce_dc_opp_dpg3_dispdec
+//DPG3_DPG_CONTROL
+#define DPG3_DPG_CONTROL__DPG_EN__SHIFT 0x0
+#define DPG3_DPG_CONTROL__DPG_MODE__SHIFT 0x4
+#define DPG3_DPG_CONTROL__DPG_DYNAMIC_RANGE__SHIFT 0x8
+#define DPG3_DPG_CONTROL__DPG_BIT_DEPTH__SHIFT 0xc
+#define DPG3_DPG_CONTROL__DPG_VRES__SHIFT 0x10
+#define DPG3_DPG_CONTROL__DPG_HRES__SHIFT 0x14
+#define DPG3_DPG_CONTROL__DPG_FIELD_POLARITY__SHIFT 0x18
+#define DPG3_DPG_CONTROL__DPG_EN_MASK 0x00000001L
+#define DPG3_DPG_CONTROL__DPG_MODE_MASK 0x00000070L
+#define DPG3_DPG_CONTROL__DPG_DYNAMIC_RANGE_MASK 0x00000100L
+#define DPG3_DPG_CONTROL__DPG_BIT_DEPTH_MASK 0x00003000L
+#define DPG3_DPG_CONTROL__DPG_VRES_MASK 0x000F0000L
+#define DPG3_DPG_CONTROL__DPG_HRES_MASK 0x00F00000L
+#define DPG3_DPG_CONTROL__DPG_FIELD_POLARITY_MASK 0x01000000L
+//DPG3_DPG_RAMP_CONTROL
+#define DPG3_DPG_RAMP_CONTROL__DPG_RAMP0_OFFSET__SHIFT 0x0
+#define DPG3_DPG_RAMP_CONTROL__DPG_INC0__SHIFT 0x18
+#define DPG3_DPG_RAMP_CONTROL__DPG_INC1__SHIFT 0x1c
+#define DPG3_DPG_RAMP_CONTROL__DPG_RAMP0_OFFSET_MASK 0x0000FFFFL
+#define DPG3_DPG_RAMP_CONTROL__DPG_INC0_MASK 0x0F000000L
+#define DPG3_DPG_RAMP_CONTROL__DPG_INC1_MASK 0xF0000000L
+//DPG3_DPG_DIMENSIONS
+#define DPG3_DPG_DIMENSIONS__DPG_ACTIVE_HEIGHT__SHIFT 0x0
+#define DPG3_DPG_DIMENSIONS__DPG_ACTIVE_WIDTH__SHIFT 0x10
+#define DPG3_DPG_DIMENSIONS__DPG_ACTIVE_HEIGHT_MASK 0x00003FFFL
+#define DPG3_DPG_DIMENSIONS__DPG_ACTIVE_WIDTH_MASK 0x3FFF0000L
+//DPG3_DPG_COLOUR_R_CR
+#define DPG3_DPG_COLOUR_R_CR__DPG_COLOUR0_R_CR__SHIFT 0x0
+#define DPG3_DPG_COLOUR_R_CR__DPG_COLOUR1_R_CR__SHIFT 0x10
+#define DPG3_DPG_COLOUR_R_CR__DPG_COLOUR0_R_CR_MASK 0x0000FFFFL
+#define DPG3_DPG_COLOUR_R_CR__DPG_COLOUR1_R_CR_MASK 0xFFFF0000L
+//DPG3_DPG_COLOUR_G_Y
+#define DPG3_DPG_COLOUR_G_Y__DPG_COLOUR0_G_Y__SHIFT 0x0
+#define DPG3_DPG_COLOUR_G_Y__DPG_COLOUR1_G_Y__SHIFT 0x10
+#define DPG3_DPG_COLOUR_G_Y__DPG_COLOUR0_G_Y_MASK 0x0000FFFFL
+#define DPG3_DPG_COLOUR_G_Y__DPG_COLOUR1_G_Y_MASK 0xFFFF0000L
+//DPG3_DPG_COLOUR_B_CB
+#define DPG3_DPG_COLOUR_B_CB__DPG_COLOUR0_B_CB__SHIFT 0x0
+#define DPG3_DPG_COLOUR_B_CB__DPG_COLOUR1_B_CB__SHIFT 0x10
+#define DPG3_DPG_COLOUR_B_CB__DPG_COLOUR0_B_CB_MASK 0x0000FFFFL
+#define DPG3_DPG_COLOUR_B_CB__DPG_COLOUR1_B_CB_MASK 0xFFFF0000L
+//DPG3_DPG_OFFSET_SEGMENT
+#define DPG3_DPG_OFFSET_SEGMENT__DPG_X_OFFSET__SHIFT 0x0
+#define DPG3_DPG_OFFSET_SEGMENT__DPG_SEGMENT_WIDTH__SHIFT 0x10
+#define DPG3_DPG_OFFSET_SEGMENT__DPG_X_OFFSET_MASK 0x00003FFFL
+#define DPG3_DPG_OFFSET_SEGMENT__DPG_SEGMENT_WIDTH_MASK 0x3FFF0000L
+//DPG3_DPG_STATUS
+#define DPG3_DPG_STATUS__DPG_DOUBLE_BUFFER_PENDING__SHIFT 0x0
+#define DPG3_DPG_STATUS__DPG_DOUBLE_BUFFER_PENDING_MASK 0x00000001L
+
+
+// addressBlock: dce_dc_opp_oppbuf3_dispdec
+//OPPBUF3_OPPBUF_CONTROL
+#define OPPBUF3_OPPBUF_CONTROL__OPPBUF_ACTIVE_WIDTH__SHIFT 0x0
+#define OPPBUF3_OPPBUF_CONTROL__OPPBUF_DISPLAY_SEGMENTATION__SHIFT 0x10
+#define OPPBUF3_OPPBUF_CONTROL__OPPBUF_OVERLAP_PIXEL_NUM__SHIFT 0x14
+#define OPPBUF3_OPPBUF_CONTROL__OPPBUF_PIXEL_REPETITION__SHIFT 0x18
+#define OPPBUF3_OPPBUF_CONTROL__OPPBUF_DOUBLE_BUFFER_PENDING__SHIFT 0x1c
+#define OPPBUF3_OPPBUF_CONTROL__OPPBUF_ACTIVE_WIDTH_MASK 0x00003FFFL
+#define OPPBUF3_OPPBUF_CONTROL__OPPBUF_DISPLAY_SEGMENTATION_MASK 0x00070000L
+#define OPPBUF3_OPPBUF_CONTROL__OPPBUF_OVERLAP_PIXEL_NUM_MASK 0x00F00000L
+#define OPPBUF3_OPPBUF_CONTROL__OPPBUF_PIXEL_REPETITION_MASK 0x0F000000L
+#define OPPBUF3_OPPBUF_CONTROL__OPPBUF_DOUBLE_BUFFER_PENDING_MASK 0x10000000L
+//OPPBUF3_OPPBUF_3D_PARAMETERS_0
+#define OPPBUF3_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE1_SIZE__SHIFT 0x0
+#define OPPBUF3_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE2_SIZE__SHIFT 0xa
+#define OPPBUF3_OPPBUF_3D_PARAMETERS_0__OPPBUF_DUMMY_DATA_R__SHIFT 0x14
+#define OPPBUF3_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE1_SIZE_MASK 0x000003FFL
+#define OPPBUF3_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE2_SIZE_MASK 0x000FFC00L
+#define OPPBUF3_OPPBUF_3D_PARAMETERS_0__OPPBUF_DUMMY_DATA_R_MASK 0xFFF00000L
+//OPPBUF3_OPPBUF_3D_PARAMETERS_1
+#define OPPBUF3_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_G__SHIFT 0x0
+#define OPPBUF3_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_B__SHIFT 0x10
+#define OPPBUF3_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_G_MASK 0x00000FFFL
+#define OPPBUF3_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_B_MASK 0x0FFF0000L
+//OPPBUF3_OPPBUF_CONTROL1
+#define OPPBUF3_OPPBUF_CONTROL1__OPPBUF_NUM_SEGMENT_PADDED_PIXELS__SHIFT 0x0
+#define OPPBUF3_OPPBUF_CONTROL1__OPPBUF_NUM_SEGMENT_PADDED_PIXELS_MASK 0x00000007L
+
+
+// addressBlock: dce_dc_opp_opp_pipe3_dispdec
+//OPP_PIPE3_OPP_PIPE_CONTROL
+#define OPP_PIPE3_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_EN__SHIFT 0x0
+#define OPP_PIPE3_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_ON__SHIFT 0x1
+#define OPP_PIPE3_OPP_PIPE_CONTROL__OPP_PIPE_DIGITAL_BYPASS_EN__SHIFT 0x4
+#define OPP_PIPE3_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_EN_MASK 0x00000001L
+#define OPP_PIPE3_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_ON_MASK 0x00000002L
+#define OPP_PIPE3_OPP_PIPE_CONTROL__OPP_PIPE_DIGITAL_BYPASS_EN_MASK 0x00000010L
+
+
+// addressBlock: dce_dc_opp_opp_pipe_crc3_dispdec
+//OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_EN__SHIFT 0x0
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_CONT_EN__SHIFT 0x4
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_MODE__SHIFT 0x8
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_EN__SHIFT 0xa
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_MODE__SHIFT 0xc
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_EN__SHIFT 0xe
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_PIXEL_SELECT__SHIFT 0x14
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_SOURCE_SELECT__SHIFT 0x18
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_ONE_SHOT_PENDING__SHIFT 0x1c
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_EN_MASK 0x00000001L
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_CONT_EN_MASK 0x00000010L
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_MODE_MASK 0x00000300L
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_EN_MASK 0x00000400L
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_MODE_MASK 0x00003000L
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_EN_MASK 0x00004000L
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_PIXEL_SELECT_MASK 0x00300000L
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_SOURCE_SELECT_MASK 0x01000000L
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_ONE_SHOT_PENDING_MASK 0x10000000L
+//OPP_PIPE_CRC3_OPP_PIPE_CRC_MASK
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_MASK__OPP_PIPE_CRC_MASK__SHIFT 0x0
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_MASK__OPP_PIPE_CRC_MASK_MASK 0x0000FFFFL
+//OPP_PIPE_CRC3_OPP_PIPE_CRC_RESULT0
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_RESULT0__OPP_PIPE_CRC_RESULT_A__SHIFT 0x0
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_RESULT0__OPP_PIPE_CRC_RESULT_R__SHIFT 0x10
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_RESULT0__OPP_PIPE_CRC_RESULT_A_MASK 0x0000FFFFL
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_RESULT0__OPP_PIPE_CRC_RESULT_R_MASK 0xFFFF0000L
+//OPP_PIPE_CRC3_OPP_PIPE_CRC_RESULT1
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_RESULT1__OPP_PIPE_CRC_RESULT_G__SHIFT 0x0
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_RESULT1__OPP_PIPE_CRC_RESULT_B__SHIFT 0x10
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_RESULT1__OPP_PIPE_CRC_RESULT_G_MASK 0x0000FFFFL
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_RESULT1__OPP_PIPE_CRC_RESULT_B_MASK 0xFFFF0000L
+//OPP_PIPE_CRC3_OPP_PIPE_CRC_RESULT2
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_RESULT2__OPP_PIPE_CRC_RESULT_C__SHIFT 0x0
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_RESULT2__OPP_PIPE_CRC_RESULT_C_MASK 0x0000FFFFL
+
+
+// addressBlock: dce_dc_opp_fmt4_dispdec
+//FMT4_FMT_CLAMP_COMPONENT_R
+#define FMT4_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_LOWER_R__SHIFT 0x0
+#define FMT4_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_UPPER_R__SHIFT 0x10
+#define FMT4_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_LOWER_R_MASK 0x0000FFFFL
+#define FMT4_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_UPPER_R_MASK 0xFFFF0000L
+//FMT4_FMT_CLAMP_COMPONENT_G
+#define FMT4_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_LOWER_G__SHIFT 0x0
+#define FMT4_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_UPPER_G__SHIFT 0x10
+#define FMT4_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_LOWER_G_MASK 0x0000FFFFL
+#define FMT4_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_UPPER_G_MASK 0xFFFF0000L
+//FMT4_FMT_CLAMP_COMPONENT_B
+#define FMT4_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_LOWER_B__SHIFT 0x0
+#define FMT4_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_UPPER_B__SHIFT 0x10
+#define FMT4_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_LOWER_B_MASK 0x0000FFFFL
+#define FMT4_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_UPPER_B_MASK 0xFFFF0000L
+//FMT4_FMT_DYNAMIC_EXP_CNTL
+#define FMT4_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_EN__SHIFT 0x0
+#define FMT4_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_MODE__SHIFT 0x4
+#define FMT4_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_EN_MASK 0x00000001L
+#define FMT4_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_MODE_MASK 0x00000010L
+//FMT4_FMT_CONTROL
+#define FMT4_FMT_CONTROL__FMT_STEREOSYNC_OVERRIDE__SHIFT 0x0
+#define FMT4_FMT_CONTROL__FMT_PTI_FIELD_POLARITY__SHIFT 0x4
+#define FMT4_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX__SHIFT 0x8
+#define FMT4_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP__SHIFT 0xc
+#define FMT4_FMT_CONTROL__FMT_PIXEL_ENCODING__SHIFT 0x10
+#define FMT4_FMT_CONTROL__FMT_SUBSAMPLING_MODE__SHIFT 0x12
+#define FMT4_FMT_CONTROL__FMT_SUBSAMPLING_ORDER__SHIFT 0x14
+#define FMT4_FMT_CONTROL__FMT_CBCR_BIT_REDUCTION_BYPASS__SHIFT 0x15
+#define FMT4_FMT_CONTROL__FMT_DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x18
+#define FMT4_FMT_CONTROL__FMT_PTI_ENABLE__SHIFT 0x1c
+#define FMT4_FMT_CONTROL__FMT_STEREOSYNC_OVERRIDE_MASK 0x00000001L
+#define FMT4_FMT_CONTROL__FMT_PTI_FIELD_POLARITY_MASK 0x00000010L
+#define FMT4_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX_MASK 0x00000F00L
+#define FMT4_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP_MASK 0x00003000L
+#define FMT4_FMT_CONTROL__FMT_PIXEL_ENCODING_MASK 0x00030000L
+#define FMT4_FMT_CONTROL__FMT_SUBSAMPLING_MODE_MASK 0x000C0000L
+#define FMT4_FMT_CONTROL__FMT_SUBSAMPLING_ORDER_MASK 0x00100000L
+#define FMT4_FMT_CONTROL__FMT_CBCR_BIT_REDUCTION_BYPASS_MASK 0x00200000L
+#define FMT4_FMT_CONTROL__FMT_DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x01000000L
+#define FMT4_FMT_CONTROL__FMT_PTI_ENABLE_MASK 0x10000000L
+//FMT4_FMT_BIT_DEPTH_CONTROL
+#define FMT4_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN__SHIFT 0x0
+#define FMT4_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_MODE__SHIFT 0x1
+#define FMT4_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT 0x4
+#define FMT4_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN__SHIFT 0x8
+#define FMT4_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_MODE__SHIFT 0x9
+#define FMT4_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT 0xb
+#define FMT4_FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE__SHIFT 0xd
+#define FMT4_FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE__SHIFT 0xe
+#define FMT4_FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE__SHIFT 0xf
+#define FMT4_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_EN__SHIFT 0x10
+#define FMT4_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_DEPTH__SHIFT 0x11
+#define FMT4_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_OFFSET__SHIFT 0x15
+#define FMT4_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_LEVEL__SHIFT 0x18
+#define FMT4_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_RESET__SHIFT 0x19
+#define FMT4_FMT_BIT_DEPTH_CONTROL__FMT_25FRC_SEL__SHIFT 0x1a
+#define FMT4_FMT_BIT_DEPTH_CONTROL__FMT_50FRC_SEL__SHIFT 0x1c
+#define FMT4_FMT_BIT_DEPTH_CONTROL__FMT_75FRC_SEL__SHIFT 0x1e
+#define FMT4_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK 0x00000001L
+#define FMT4_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_MODE_MASK 0x00000002L
+#define FMT4_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH_MASK 0x00000030L
+#define FMT4_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK 0x00000100L
+#define FMT4_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_MODE_MASK 0x00000600L
+#define FMT4_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH_MASK 0x00001800L
+#define FMT4_FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK 0x00002000L
+#define FMT4_FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK 0x00004000L
+#define FMT4_FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK 0x00008000L
+#define FMT4_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_EN_MASK 0x00010000L
+#define FMT4_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_DEPTH_MASK 0x00060000L
+#define FMT4_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_OFFSET_MASK 0x00600000L
+#define FMT4_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_LEVEL_MASK 0x01000000L
+#define FMT4_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_RESET_MASK 0x02000000L
+#define FMT4_FMT_BIT_DEPTH_CONTROL__FMT_25FRC_SEL_MASK 0x0C000000L
+#define FMT4_FMT_BIT_DEPTH_CONTROL__FMT_50FRC_SEL_MASK 0x30000000L
+#define FMT4_FMT_BIT_DEPTH_CONTROL__FMT_75FRC_SEL_MASK 0xC0000000L
+//FMT4_FMT_DITHER_RAND_R_SEED
+#define FMT4_FMT_DITHER_RAND_R_SEED__FMT_RAND_R_SEED__SHIFT 0x0
+#define FMT4_FMT_DITHER_RAND_R_SEED__FMT_OFFSET_R_CR__SHIFT 0x10
+#define FMT4_FMT_DITHER_RAND_R_SEED__FMT_RAND_R_SEED_MASK 0x000000FFL
+#define FMT4_FMT_DITHER_RAND_R_SEED__FMT_OFFSET_R_CR_MASK 0xFFFF0000L
+//FMT4_FMT_DITHER_RAND_G_SEED
+#define FMT4_FMT_DITHER_RAND_G_SEED__FMT_RAND_G_SEED__SHIFT 0x0
+#define FMT4_FMT_DITHER_RAND_G_SEED__FMT_OFFSET_G_Y__SHIFT 0x10
+#define FMT4_FMT_DITHER_RAND_G_SEED__FMT_RAND_G_SEED_MASK 0x000000FFL
+#define FMT4_FMT_DITHER_RAND_G_SEED__FMT_OFFSET_G_Y_MASK 0xFFFF0000L
+//FMT4_FMT_DITHER_RAND_B_SEED
+#define FMT4_FMT_DITHER_RAND_B_SEED__FMT_RAND_B_SEED__SHIFT 0x0
+#define FMT4_FMT_DITHER_RAND_B_SEED__FMT_OFFSET_B_CB__SHIFT 0x10
+#define FMT4_FMT_DITHER_RAND_B_SEED__FMT_RAND_B_SEED_MASK 0x000000FFL
+#define FMT4_FMT_DITHER_RAND_B_SEED__FMT_OFFSET_B_CB_MASK 0xFFFF0000L
+//FMT4_FMT_CLAMP_CNTL
+#define FMT4_FMT_CLAMP_CNTL__FMT_CLAMP_DATA_EN__SHIFT 0x0
+#define FMT4_FMT_CLAMP_CNTL__FMT_CLAMP_COLOR_FORMAT__SHIFT 0x10
+#define FMT4_FMT_CLAMP_CNTL__FMT_CLAMP_DATA_EN_MASK 0x00000001L
+#define FMT4_FMT_CLAMP_CNTL__FMT_CLAMP_COLOR_FORMAT_MASK 0x00070000L
+//FMT4_FMT_SIDE_BY_SIDE_STEREO_CONTROL
+#define FMT4_FMT_SIDE_BY_SIDE_STEREO_CONTROL__FMT_SIDE_BY_SIDE_STEREO_ACTIVE_WIDTH__SHIFT 0x0
+#define FMT4_FMT_SIDE_BY_SIDE_STEREO_CONTROL__FMT_SIDE_BY_SIDE_STEREO_ACTIVE_WIDTH_MASK 0x00001FFFL
+//FMT4_FMT_MAP420_MEMORY_CONTROL
+#define FMT4_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_FORCE__SHIFT 0x0
+#define FMT4_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_DIS__SHIFT 0x4
+#define FMT4_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_STATE__SHIFT 0x8
+#define FMT4_FMT_MAP420_MEMORY_CONTROL__FMT_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0xc
+#define FMT4_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_FORCE_MASK 0x00000003L
+#define FMT4_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_DIS_MASK 0x00000010L
+#define FMT4_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_STATE_MASK 0x00000300L
+#define FMT4_FMT_MAP420_MEMORY_CONTROL__FMT_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00003000L
+//FMT4_FMT_422_CONTROL
+#define FMT4_FMT_422_CONTROL__FMT_LEFT_EDGE_EXTRA_PIXEL_COUNT__SHIFT 0x0
+#define FMT4_FMT_422_CONTROL__FMT_LEFT_EDGE_EXTRA_PIXEL_COUNT_MASK 0x00000001L
+
+
+// addressBlock: dce_dc_opp_dpg4_dispdec
+//DPG4_DPG_CONTROL
+#define DPG4_DPG_CONTROL__DPG_EN__SHIFT 0x0
+#define DPG4_DPG_CONTROL__DPG_MODE__SHIFT 0x4
+#define DPG4_DPG_CONTROL__DPG_DYNAMIC_RANGE__SHIFT 0x8
+#define DPG4_DPG_CONTROL__DPG_BIT_DEPTH__SHIFT 0xc
+#define DPG4_DPG_CONTROL__DPG_VRES__SHIFT 0x10
+#define DPG4_DPG_CONTROL__DPG_HRES__SHIFT 0x14
+#define DPG4_DPG_CONTROL__DPG_FIELD_POLARITY__SHIFT 0x18
+#define DPG4_DPG_CONTROL__DPG_EN_MASK 0x00000001L
+#define DPG4_DPG_CONTROL__DPG_MODE_MASK 0x00000070L
+#define DPG4_DPG_CONTROL__DPG_DYNAMIC_RANGE_MASK 0x00000100L
+#define DPG4_DPG_CONTROL__DPG_BIT_DEPTH_MASK 0x00003000L
+#define DPG4_DPG_CONTROL__DPG_VRES_MASK 0x000F0000L
+#define DPG4_DPG_CONTROL__DPG_HRES_MASK 0x00F00000L
+#define DPG4_DPG_CONTROL__DPG_FIELD_POLARITY_MASK 0x01000000L
+//DPG4_DPG_RAMP_CONTROL
+#define DPG4_DPG_RAMP_CONTROL__DPG_RAMP0_OFFSET__SHIFT 0x0
+#define DPG4_DPG_RAMP_CONTROL__DPG_INC0__SHIFT 0x18
+#define DPG4_DPG_RAMP_CONTROL__DPG_INC1__SHIFT 0x1c
+#define DPG4_DPG_RAMP_CONTROL__DPG_RAMP0_OFFSET_MASK 0x0000FFFFL
+#define DPG4_DPG_RAMP_CONTROL__DPG_INC0_MASK 0x0F000000L
+#define DPG4_DPG_RAMP_CONTROL__DPG_INC1_MASK 0xF0000000L
+//DPG4_DPG_DIMENSIONS
+#define DPG4_DPG_DIMENSIONS__DPG_ACTIVE_HEIGHT__SHIFT 0x0
+#define DPG4_DPG_DIMENSIONS__DPG_ACTIVE_WIDTH__SHIFT 0x10
+#define DPG4_DPG_DIMENSIONS__DPG_ACTIVE_HEIGHT_MASK 0x00003FFFL
+#define DPG4_DPG_DIMENSIONS__DPG_ACTIVE_WIDTH_MASK 0x3FFF0000L
+//DPG4_DPG_COLOUR_R_CR
+#define DPG4_DPG_COLOUR_R_CR__DPG_COLOUR0_R_CR__SHIFT 0x0
+#define DPG4_DPG_COLOUR_R_CR__DPG_COLOUR1_R_CR__SHIFT 0x10
+#define DPG4_DPG_COLOUR_R_CR__DPG_COLOUR0_R_CR_MASK 0x0000FFFFL
+#define DPG4_DPG_COLOUR_R_CR__DPG_COLOUR1_R_CR_MASK 0xFFFF0000L
+//DPG4_DPG_COLOUR_G_Y
+#define DPG4_DPG_COLOUR_G_Y__DPG_COLOUR0_G_Y__SHIFT 0x0
+#define DPG4_DPG_COLOUR_G_Y__DPG_COLOUR1_G_Y__SHIFT 0x10
+#define DPG4_DPG_COLOUR_G_Y__DPG_COLOUR0_G_Y_MASK 0x0000FFFFL
+#define DPG4_DPG_COLOUR_G_Y__DPG_COLOUR1_G_Y_MASK 0xFFFF0000L
+//DPG4_DPG_COLOUR_B_CB
+#define DPG4_DPG_COLOUR_B_CB__DPG_COLOUR0_B_CB__SHIFT 0x0
+#define DPG4_DPG_COLOUR_B_CB__DPG_COLOUR1_B_CB__SHIFT 0x10
+#define DPG4_DPG_COLOUR_B_CB__DPG_COLOUR0_B_CB_MASK 0x0000FFFFL
+#define DPG4_DPG_COLOUR_B_CB__DPG_COLOUR1_B_CB_MASK 0xFFFF0000L
+//DPG4_DPG_OFFSET_SEGMENT
+#define DPG4_DPG_OFFSET_SEGMENT__DPG_X_OFFSET__SHIFT 0x0
+#define DPG4_DPG_OFFSET_SEGMENT__DPG_SEGMENT_WIDTH__SHIFT 0x10
+#define DPG4_DPG_OFFSET_SEGMENT__DPG_X_OFFSET_MASK 0x00003FFFL
+#define DPG4_DPG_OFFSET_SEGMENT__DPG_SEGMENT_WIDTH_MASK 0x3FFF0000L
+//DPG4_DPG_STATUS
+#define DPG4_DPG_STATUS__DPG_DOUBLE_BUFFER_PENDING__SHIFT 0x0
+#define DPG4_DPG_STATUS__DPG_DOUBLE_BUFFER_PENDING_MASK 0x00000001L
+
+
+// addressBlock: dce_dc_opp_oppbuf4_dispdec
+//OPPBUF4_OPPBUF_CONTROL
+#define OPPBUF4_OPPBUF_CONTROL__OPPBUF_ACTIVE_WIDTH__SHIFT 0x0
+#define OPPBUF4_OPPBUF_CONTROL__OPPBUF_DISPLAY_SEGMENTATION__SHIFT 0x10
+#define OPPBUF4_OPPBUF_CONTROL__OPPBUF_OVERLAP_PIXEL_NUM__SHIFT 0x14
+#define OPPBUF4_OPPBUF_CONTROL__OPPBUF_PIXEL_REPETITION__SHIFT 0x18
+#define OPPBUF4_OPPBUF_CONTROL__OPPBUF_DOUBLE_BUFFER_PENDING__SHIFT 0x1c
+#define OPPBUF4_OPPBUF_CONTROL__OPPBUF_ACTIVE_WIDTH_MASK 0x00003FFFL
+#define OPPBUF4_OPPBUF_CONTROL__OPPBUF_DISPLAY_SEGMENTATION_MASK 0x00070000L
+#define OPPBUF4_OPPBUF_CONTROL__OPPBUF_OVERLAP_PIXEL_NUM_MASK 0x00F00000L
+#define OPPBUF4_OPPBUF_CONTROL__OPPBUF_PIXEL_REPETITION_MASK 0x0F000000L
+#define OPPBUF4_OPPBUF_CONTROL__OPPBUF_DOUBLE_BUFFER_PENDING_MASK 0x10000000L
+//OPPBUF4_OPPBUF_3D_PARAMETERS_0
+#define OPPBUF4_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE1_SIZE__SHIFT 0x0
+#define OPPBUF4_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE2_SIZE__SHIFT 0xa
+#define OPPBUF4_OPPBUF_3D_PARAMETERS_0__OPPBUF_DUMMY_DATA_R__SHIFT 0x14
+#define OPPBUF4_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE1_SIZE_MASK 0x000003FFL
+#define OPPBUF4_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE2_SIZE_MASK 0x000FFC00L
+#define OPPBUF4_OPPBUF_3D_PARAMETERS_0__OPPBUF_DUMMY_DATA_R_MASK 0xFFF00000L
+//OPPBUF4_OPPBUF_3D_PARAMETERS_1
+#define OPPBUF4_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_G__SHIFT 0x0
+#define OPPBUF4_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_B__SHIFT 0x10
+#define OPPBUF4_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_G_MASK 0x00000FFFL
+#define OPPBUF4_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_B_MASK 0x0FFF0000L
+//OPPBUF4_OPPBUF_CONTROL1
+#define OPPBUF4_OPPBUF_CONTROL1__OPPBUF_NUM_SEGMENT_PADDED_PIXELS__SHIFT 0x0
+#define OPPBUF4_OPPBUF_CONTROL1__OPPBUF_NUM_SEGMENT_PADDED_PIXELS_MASK 0x00000007L
+
+
+// addressBlock: dce_dc_opp_opp_pipe4_dispdec
+//OPP_PIPE4_OPP_PIPE_CONTROL
+#define OPP_PIPE4_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_EN__SHIFT 0x0
+#define OPP_PIPE4_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_ON__SHIFT 0x1
+#define OPP_PIPE4_OPP_PIPE_CONTROL__OPP_PIPE_DIGITAL_BYPASS_EN__SHIFT 0x4
+#define OPP_PIPE4_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_EN_MASK 0x00000001L
+#define OPP_PIPE4_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_ON_MASK 0x00000002L
+#define OPP_PIPE4_OPP_PIPE_CONTROL__OPP_PIPE_DIGITAL_BYPASS_EN_MASK 0x00000010L
+
+
+// addressBlock: dce_dc_opp_opp_pipe_crc4_dispdec
+//OPP_PIPE_CRC4_OPP_PIPE_CRC_CONTROL
+#define OPP_PIPE_CRC4_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_EN__SHIFT 0x0
+#define OPP_PIPE_CRC4_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_CONT_EN__SHIFT 0x4
+#define OPP_PIPE_CRC4_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_MODE__SHIFT 0x8
+#define OPP_PIPE_CRC4_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_EN__SHIFT 0xa
+#define OPP_PIPE_CRC4_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_MODE__SHIFT 0xc
+#define OPP_PIPE_CRC4_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_EN__SHIFT 0xe
+#define OPP_PIPE_CRC4_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_PIXEL_SELECT__SHIFT 0x14
+#define OPP_PIPE_CRC4_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_SOURCE_SELECT__SHIFT 0x18
+#define OPP_PIPE_CRC4_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_ONE_SHOT_PENDING__SHIFT 0x1c
+#define OPP_PIPE_CRC4_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_EN_MASK 0x00000001L
+#define OPP_PIPE_CRC4_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_CONT_EN_MASK 0x00000010L
+#define OPP_PIPE_CRC4_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_MODE_MASK 0x00000300L
+#define OPP_PIPE_CRC4_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_EN_MASK 0x00000400L
+#define OPP_PIPE_CRC4_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_MODE_MASK 0x00003000L
+#define OPP_PIPE_CRC4_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_EN_MASK 0x00004000L
+#define OPP_PIPE_CRC4_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_PIXEL_SELECT_MASK 0x00300000L
+#define OPP_PIPE_CRC4_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_SOURCE_SELECT_MASK 0x01000000L
+#define OPP_PIPE_CRC4_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_ONE_SHOT_PENDING_MASK 0x10000000L
+//OPP_PIPE_CRC4_OPP_PIPE_CRC_MASK
+#define OPP_PIPE_CRC4_OPP_PIPE_CRC_MASK__OPP_PIPE_CRC_MASK__SHIFT 0x0
+#define OPP_PIPE_CRC4_OPP_PIPE_CRC_MASK__OPP_PIPE_CRC_MASK_MASK 0x0000FFFFL
+//OPP_PIPE_CRC4_OPP_PIPE_CRC_RESULT0
+#define OPP_PIPE_CRC4_OPP_PIPE_CRC_RESULT0__OPP_PIPE_CRC_RESULT_A__SHIFT 0x0
+#define OPP_PIPE_CRC4_OPP_PIPE_CRC_RESULT0__OPP_PIPE_CRC_RESULT_R__SHIFT 0x10
+#define OPP_PIPE_CRC4_OPP_PIPE_CRC_RESULT0__OPP_PIPE_CRC_RESULT_A_MASK 0x0000FFFFL
+#define OPP_PIPE_CRC4_OPP_PIPE_CRC_RESULT0__OPP_PIPE_CRC_RESULT_R_MASK 0xFFFF0000L
+//OPP_PIPE_CRC4_OPP_PIPE_CRC_RESULT1
+#define OPP_PIPE_CRC4_OPP_PIPE_CRC_RESULT1__OPP_PIPE_CRC_RESULT_G__SHIFT 0x0
+#define OPP_PIPE_CRC4_OPP_PIPE_CRC_RESULT1__OPP_PIPE_CRC_RESULT_B__SHIFT 0x10
+#define OPP_PIPE_CRC4_OPP_PIPE_CRC_RESULT1__OPP_PIPE_CRC_RESULT_G_MASK 0x0000FFFFL
+#define OPP_PIPE_CRC4_OPP_PIPE_CRC_RESULT1__OPP_PIPE_CRC_RESULT_B_MASK 0xFFFF0000L
+//OPP_PIPE_CRC4_OPP_PIPE_CRC_RESULT2
+#define OPP_PIPE_CRC4_OPP_PIPE_CRC_RESULT2__OPP_PIPE_CRC_RESULT_C__SHIFT 0x0
+#define OPP_PIPE_CRC4_OPP_PIPE_CRC_RESULT2__OPP_PIPE_CRC_RESULT_C_MASK 0x0000FFFFL
+
+
+// addressBlock: dce_dc_opp_fmt5_dispdec
+//FMT5_FMT_CLAMP_COMPONENT_R
+#define FMT5_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_LOWER_R__SHIFT 0x0
+#define FMT5_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_UPPER_R__SHIFT 0x10
+#define FMT5_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_LOWER_R_MASK 0x0000FFFFL
+#define FMT5_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_UPPER_R_MASK 0xFFFF0000L
+//FMT5_FMT_CLAMP_COMPONENT_G
+#define FMT5_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_LOWER_G__SHIFT 0x0
+#define FMT5_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_UPPER_G__SHIFT 0x10
+#define FMT5_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_LOWER_G_MASK 0x0000FFFFL
+#define FMT5_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_UPPER_G_MASK 0xFFFF0000L
+//FMT5_FMT_CLAMP_COMPONENT_B
+#define FMT5_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_LOWER_B__SHIFT 0x0
+#define FMT5_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_UPPER_B__SHIFT 0x10
+#define FMT5_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_LOWER_B_MASK 0x0000FFFFL
+#define FMT5_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_UPPER_B_MASK 0xFFFF0000L
+//FMT5_FMT_DYNAMIC_EXP_CNTL
+#define FMT5_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_EN__SHIFT 0x0
+#define FMT5_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_MODE__SHIFT 0x4
+#define FMT5_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_EN_MASK 0x00000001L
+#define FMT5_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_MODE_MASK 0x00000010L
+//FMT5_FMT_CONTROL
+#define FMT5_FMT_CONTROL__FMT_STEREOSYNC_OVERRIDE__SHIFT 0x0
+#define FMT5_FMT_CONTROL__FMT_PTI_FIELD_POLARITY__SHIFT 0x4
+#define FMT5_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX__SHIFT 0x8
+#define FMT5_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP__SHIFT 0xc
+#define FMT5_FMT_CONTROL__FMT_PIXEL_ENCODING__SHIFT 0x10
+#define FMT5_FMT_CONTROL__FMT_SUBSAMPLING_MODE__SHIFT 0x12
+#define FMT5_FMT_CONTROL__FMT_SUBSAMPLING_ORDER__SHIFT 0x14
+#define FMT5_FMT_CONTROL__FMT_CBCR_BIT_REDUCTION_BYPASS__SHIFT 0x15
+#define FMT5_FMT_CONTROL__FMT_DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x18
+#define FMT5_FMT_CONTROL__FMT_PTI_ENABLE__SHIFT 0x1c
+#define FMT5_FMT_CONTROL__FMT_STEREOSYNC_OVERRIDE_MASK 0x00000001L
+#define FMT5_FMT_CONTROL__FMT_PTI_FIELD_POLARITY_MASK 0x00000010L
+#define FMT5_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX_MASK 0x00000F00L
+#define FMT5_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP_MASK 0x00003000L
+#define FMT5_FMT_CONTROL__FMT_PIXEL_ENCODING_MASK 0x00030000L
+#define FMT5_FMT_CONTROL__FMT_SUBSAMPLING_MODE_MASK 0x000C0000L
+#define FMT5_FMT_CONTROL__FMT_SUBSAMPLING_ORDER_MASK 0x00100000L
+#define FMT5_FMT_CONTROL__FMT_CBCR_BIT_REDUCTION_BYPASS_MASK 0x00200000L
+#define FMT5_FMT_CONTROL__FMT_DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x01000000L
+#define FMT5_FMT_CONTROL__FMT_PTI_ENABLE_MASK 0x10000000L
+//FMT5_FMT_BIT_DEPTH_CONTROL
+#define FMT5_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN__SHIFT 0x0
+#define FMT5_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_MODE__SHIFT 0x1
+#define FMT5_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT 0x4
+#define FMT5_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN__SHIFT 0x8
+#define FMT5_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_MODE__SHIFT 0x9
+#define FMT5_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT 0xb
+#define FMT5_FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE__SHIFT 0xd
+#define FMT5_FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE__SHIFT 0xe
+#define FMT5_FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE__SHIFT 0xf
+#define FMT5_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_EN__SHIFT 0x10
+#define FMT5_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_DEPTH__SHIFT 0x11
+#define FMT5_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_OFFSET__SHIFT 0x15
+#define FMT5_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_LEVEL__SHIFT 0x18
+#define FMT5_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_RESET__SHIFT 0x19
+#define FMT5_FMT_BIT_DEPTH_CONTROL__FMT_25FRC_SEL__SHIFT 0x1a
+#define FMT5_FMT_BIT_DEPTH_CONTROL__FMT_50FRC_SEL__SHIFT 0x1c
+#define FMT5_FMT_BIT_DEPTH_CONTROL__FMT_75FRC_SEL__SHIFT 0x1e
+#define FMT5_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK 0x00000001L
+#define FMT5_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_MODE_MASK 0x00000002L
+#define FMT5_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH_MASK 0x00000030L
+#define FMT5_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK 0x00000100L
+#define FMT5_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_MODE_MASK 0x00000600L
+#define FMT5_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH_MASK 0x00001800L
+#define FMT5_FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK 0x00002000L
+#define FMT5_FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK 0x00004000L
+#define FMT5_FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK 0x00008000L
+#define FMT5_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_EN_MASK 0x00010000L
+#define FMT5_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_DEPTH_MASK 0x00060000L
+#define FMT5_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_OFFSET_MASK 0x00600000L
+#define FMT5_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_LEVEL_MASK 0x01000000L
+#define FMT5_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_RESET_MASK 0x02000000L
+#define FMT5_FMT_BIT_DEPTH_CONTROL__FMT_25FRC_SEL_MASK 0x0C000000L
+#define FMT5_FMT_BIT_DEPTH_CONTROL__FMT_50FRC_SEL_MASK 0x30000000L
+#define FMT5_FMT_BIT_DEPTH_CONTROL__FMT_75FRC_SEL_MASK 0xC0000000L
+//FMT5_FMT_DITHER_RAND_R_SEED
+#define FMT5_FMT_DITHER_RAND_R_SEED__FMT_RAND_R_SEED__SHIFT 0x0
+#define FMT5_FMT_DITHER_RAND_R_SEED__FMT_OFFSET_R_CR__SHIFT 0x10
+#define FMT5_FMT_DITHER_RAND_R_SEED__FMT_RAND_R_SEED_MASK 0x000000FFL
+#define FMT5_FMT_DITHER_RAND_R_SEED__FMT_OFFSET_R_CR_MASK 0xFFFF0000L
+//FMT5_FMT_DITHER_RAND_G_SEED
+#define FMT5_FMT_DITHER_RAND_G_SEED__FMT_RAND_G_SEED__SHIFT 0x0
+#define FMT5_FMT_DITHER_RAND_G_SEED__FMT_OFFSET_G_Y__SHIFT 0x10
+#define FMT5_FMT_DITHER_RAND_G_SEED__FMT_RAND_G_SEED_MASK 0x000000FFL
+#define FMT5_FMT_DITHER_RAND_G_SEED__FMT_OFFSET_G_Y_MASK 0xFFFF0000L
+//FMT5_FMT_DITHER_RAND_B_SEED
+#define FMT5_FMT_DITHER_RAND_B_SEED__FMT_RAND_B_SEED__SHIFT 0x0
+#define FMT5_FMT_DITHER_RAND_B_SEED__FMT_OFFSET_B_CB__SHIFT 0x10
+#define FMT5_FMT_DITHER_RAND_B_SEED__FMT_RAND_B_SEED_MASK 0x000000FFL
+#define FMT5_FMT_DITHER_RAND_B_SEED__FMT_OFFSET_B_CB_MASK 0xFFFF0000L
+//FMT5_FMT_CLAMP_CNTL
+#define FMT5_FMT_CLAMP_CNTL__FMT_CLAMP_DATA_EN__SHIFT 0x0
+#define FMT5_FMT_CLAMP_CNTL__FMT_CLAMP_COLOR_FORMAT__SHIFT 0x10
+#define FMT5_FMT_CLAMP_CNTL__FMT_CLAMP_DATA_EN_MASK 0x00000001L
+#define FMT5_FMT_CLAMP_CNTL__FMT_CLAMP_COLOR_FORMAT_MASK 0x00070000L
+//FMT5_FMT_SIDE_BY_SIDE_STEREO_CONTROL
+#define FMT5_FMT_SIDE_BY_SIDE_STEREO_CONTROL__FMT_SIDE_BY_SIDE_STEREO_ACTIVE_WIDTH__SHIFT 0x0
+#define FMT5_FMT_SIDE_BY_SIDE_STEREO_CONTROL__FMT_SIDE_BY_SIDE_STEREO_ACTIVE_WIDTH_MASK 0x00001FFFL
+//FMT5_FMT_MAP420_MEMORY_CONTROL
+#define FMT5_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_FORCE__SHIFT 0x0
+#define FMT5_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_DIS__SHIFT 0x4
+#define FMT5_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_STATE__SHIFT 0x8
+#define FMT5_FMT_MAP420_MEMORY_CONTROL__FMT_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0xc
+#define FMT5_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_FORCE_MASK 0x00000003L
+#define FMT5_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_DIS_MASK 0x00000010L
+#define FMT5_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_STATE_MASK 0x00000300L
+#define FMT5_FMT_MAP420_MEMORY_CONTROL__FMT_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00003000L
+//FMT5_FMT_422_CONTROL
+#define FMT5_FMT_422_CONTROL__FMT_LEFT_EDGE_EXTRA_PIXEL_COUNT__SHIFT 0x0
+#define FMT5_FMT_422_CONTROL__FMT_LEFT_EDGE_EXTRA_PIXEL_COUNT_MASK 0x00000001L
+
+
+// addressBlock: dce_dc_opp_dpg5_dispdec
+//DPG5_DPG_CONTROL
+#define DPG5_DPG_CONTROL__DPG_EN__SHIFT 0x0
+#define DPG5_DPG_CONTROL__DPG_MODE__SHIFT 0x4
+#define DPG5_DPG_CONTROL__DPG_DYNAMIC_RANGE__SHIFT 0x8
+#define DPG5_DPG_CONTROL__DPG_BIT_DEPTH__SHIFT 0xc
+#define DPG5_DPG_CONTROL__DPG_VRES__SHIFT 0x10
+#define DPG5_DPG_CONTROL__DPG_HRES__SHIFT 0x14
+#define DPG5_DPG_CONTROL__DPG_FIELD_POLARITY__SHIFT 0x18
+#define DPG5_DPG_CONTROL__DPG_EN_MASK 0x00000001L
+#define DPG5_DPG_CONTROL__DPG_MODE_MASK 0x00000070L
+#define DPG5_DPG_CONTROL__DPG_DYNAMIC_RANGE_MASK 0x00000100L
+#define DPG5_DPG_CONTROL__DPG_BIT_DEPTH_MASK 0x00003000L
+#define DPG5_DPG_CONTROL__DPG_VRES_MASK 0x000F0000L
+#define DPG5_DPG_CONTROL__DPG_HRES_MASK 0x00F00000L
+#define DPG5_DPG_CONTROL__DPG_FIELD_POLARITY_MASK 0x01000000L
+//DPG5_DPG_RAMP_CONTROL
+#define DPG5_DPG_RAMP_CONTROL__DPG_RAMP0_OFFSET__SHIFT 0x0
+#define DPG5_DPG_RAMP_CONTROL__DPG_INC0__SHIFT 0x18
+#define DPG5_DPG_RAMP_CONTROL__DPG_INC1__SHIFT 0x1c
+#define DPG5_DPG_RAMP_CONTROL__DPG_RAMP0_OFFSET_MASK 0x0000FFFFL
+#define DPG5_DPG_RAMP_CONTROL__DPG_INC0_MASK 0x0F000000L
+#define DPG5_DPG_RAMP_CONTROL__DPG_INC1_MASK 0xF0000000L
+//DPG5_DPG_DIMENSIONS
+#define DPG5_DPG_DIMENSIONS__DPG_ACTIVE_HEIGHT__SHIFT 0x0
+#define DPG5_DPG_DIMENSIONS__DPG_ACTIVE_WIDTH__SHIFT 0x10
+#define DPG5_DPG_DIMENSIONS__DPG_ACTIVE_HEIGHT_MASK 0x00003FFFL
+#define DPG5_DPG_DIMENSIONS__DPG_ACTIVE_WIDTH_MASK 0x3FFF0000L
+//DPG5_DPG_COLOUR_R_CR
+#define DPG5_DPG_COLOUR_R_CR__DPG_COLOUR0_R_CR__SHIFT 0x0
+#define DPG5_DPG_COLOUR_R_CR__DPG_COLOUR1_R_CR__SHIFT 0x10
+#define DPG5_DPG_COLOUR_R_CR__DPG_COLOUR0_R_CR_MASK 0x0000FFFFL
+#define DPG5_DPG_COLOUR_R_CR__DPG_COLOUR1_R_CR_MASK 0xFFFF0000L
+//DPG5_DPG_COLOUR_G_Y
+#define DPG5_DPG_COLOUR_G_Y__DPG_COLOUR0_G_Y__SHIFT 0x0
+#define DPG5_DPG_COLOUR_G_Y__DPG_COLOUR1_G_Y__SHIFT 0x10
+#define DPG5_DPG_COLOUR_G_Y__DPG_COLOUR0_G_Y_MASK 0x0000FFFFL
+#define DPG5_DPG_COLOUR_G_Y__DPG_COLOUR1_G_Y_MASK 0xFFFF0000L
+//DPG5_DPG_COLOUR_B_CB
+#define DPG5_DPG_COLOUR_B_CB__DPG_COLOUR0_B_CB__SHIFT 0x0
+#define DPG5_DPG_COLOUR_B_CB__DPG_COLOUR1_B_CB__SHIFT 0x10
+#define DPG5_DPG_COLOUR_B_CB__DPG_COLOUR0_B_CB_MASK 0x0000FFFFL
+#define DPG5_DPG_COLOUR_B_CB__DPG_COLOUR1_B_CB_MASK 0xFFFF0000L
+//DPG5_DPG_OFFSET_SEGMENT
+#define DPG5_DPG_OFFSET_SEGMENT__DPG_X_OFFSET__SHIFT 0x0
+#define DPG5_DPG_OFFSET_SEGMENT__DPG_SEGMENT_WIDTH__SHIFT 0x10
+#define DPG5_DPG_OFFSET_SEGMENT__DPG_X_OFFSET_MASK 0x00003FFFL
+#define DPG5_DPG_OFFSET_SEGMENT__DPG_SEGMENT_WIDTH_MASK 0x3FFF0000L
+//DPG5_DPG_STATUS
+#define DPG5_DPG_STATUS__DPG_DOUBLE_BUFFER_PENDING__SHIFT 0x0
+#define DPG5_DPG_STATUS__DPG_DOUBLE_BUFFER_PENDING_MASK 0x00000001L
+
+
+// addressBlock: dce_dc_opp_oppbuf5_dispdec
+//OPPBUF5_OPPBUF_CONTROL
+#define OPPBUF5_OPPBUF_CONTROL__OPPBUF_ACTIVE_WIDTH__SHIFT 0x0
+#define OPPBUF5_OPPBUF_CONTROL__OPPBUF_DISPLAY_SEGMENTATION__SHIFT 0x10
+#define OPPBUF5_OPPBUF_CONTROL__OPPBUF_OVERLAP_PIXEL_NUM__SHIFT 0x14
+#define OPPBUF5_OPPBUF_CONTROL__OPPBUF_PIXEL_REPETITION__SHIFT 0x18
+#define OPPBUF5_OPPBUF_CONTROL__OPPBUF_DOUBLE_BUFFER_PENDING__SHIFT 0x1c
+#define OPPBUF5_OPPBUF_CONTROL__OPPBUF_ACTIVE_WIDTH_MASK 0x00003FFFL
+#define OPPBUF5_OPPBUF_CONTROL__OPPBUF_DISPLAY_SEGMENTATION_MASK 0x00070000L
+#define OPPBUF5_OPPBUF_CONTROL__OPPBUF_OVERLAP_PIXEL_NUM_MASK 0x00F00000L
+#define OPPBUF5_OPPBUF_CONTROL__OPPBUF_PIXEL_REPETITION_MASK 0x0F000000L
+#define OPPBUF5_OPPBUF_CONTROL__OPPBUF_DOUBLE_BUFFER_PENDING_MASK 0x10000000L
+//OPPBUF5_OPPBUF_3D_PARAMETERS_0
+#define OPPBUF5_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE1_SIZE__SHIFT 0x0
+#define OPPBUF5_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE2_SIZE__SHIFT 0xa
+#define OPPBUF5_OPPBUF_3D_PARAMETERS_0__OPPBUF_DUMMY_DATA_R__SHIFT 0x14
+#define OPPBUF5_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE1_SIZE_MASK 0x000003FFL
+#define OPPBUF5_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE2_SIZE_MASK 0x000FFC00L
+#define OPPBUF5_OPPBUF_3D_PARAMETERS_0__OPPBUF_DUMMY_DATA_R_MASK 0xFFF00000L
+//OPPBUF5_OPPBUF_3D_PARAMETERS_1
+#define OPPBUF5_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_G__SHIFT 0x0
+#define OPPBUF5_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_B__SHIFT 0x10
+#define OPPBUF5_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_G_MASK 0x00000FFFL
+#define OPPBUF5_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_B_MASK 0x0FFF0000L
+//OPPBUF5_OPPBUF_CONTROL1
+#define OPPBUF5_OPPBUF_CONTROL1__OPPBUF_NUM_SEGMENT_PADDED_PIXELS__SHIFT 0x0
+#define OPPBUF5_OPPBUF_CONTROL1__OPPBUF_NUM_SEGMENT_PADDED_PIXELS_MASK 0x00000007L
+
+
+// addressBlock: dce_dc_opp_opp_pipe5_dispdec
+//OPP_PIPE5_OPP_PIPE_CONTROL
+#define OPP_PIPE5_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_EN__SHIFT 0x0
+#define OPP_PIPE5_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_ON__SHIFT 0x1
+#define OPP_PIPE5_OPP_PIPE_CONTROL__OPP_PIPE_DIGITAL_BYPASS_EN__SHIFT 0x4
+#define OPP_PIPE5_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_EN_MASK 0x00000001L
+#define OPP_PIPE5_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_ON_MASK 0x00000002L
+#define OPP_PIPE5_OPP_PIPE_CONTROL__OPP_PIPE_DIGITAL_BYPASS_EN_MASK 0x00000010L
+
+
+// addressBlock: dce_dc_opp_opp_pipe_crc5_dispdec
+//OPP_PIPE_CRC5_OPP_PIPE_CRC_CONTROL
+#define OPP_PIPE_CRC5_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_EN__SHIFT 0x0
+#define OPP_PIPE_CRC5_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_CONT_EN__SHIFT 0x4
+#define OPP_PIPE_CRC5_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_MODE__SHIFT 0x8
+#define OPP_PIPE_CRC5_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_EN__SHIFT 0xa
+#define OPP_PIPE_CRC5_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_MODE__SHIFT 0xc
+#define OPP_PIPE_CRC5_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_EN__SHIFT 0xe
+#define OPP_PIPE_CRC5_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_PIXEL_SELECT__SHIFT 0x14
+#define OPP_PIPE_CRC5_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_SOURCE_SELECT__SHIFT 0x18
+#define OPP_PIPE_CRC5_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_ONE_SHOT_PENDING__SHIFT 0x1c
+#define OPP_PIPE_CRC5_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_EN_MASK 0x00000001L
+#define OPP_PIPE_CRC5_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_CONT_EN_MASK 0x00000010L
+#define OPP_PIPE_CRC5_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_MODE_MASK 0x00000300L
+#define OPP_PIPE_CRC5_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_EN_MASK 0x00000400L
+#define OPP_PIPE_CRC5_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_MODE_MASK 0x00003000L
+#define OPP_PIPE_CRC5_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_EN_MASK 0x00004000L
+#define OPP_PIPE_CRC5_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_PIXEL_SELECT_MASK 0x00300000L
+#define OPP_PIPE_CRC5_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_SOURCE_SELECT_MASK 0x01000000L
+#define OPP_PIPE_CRC5_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_ONE_SHOT_PENDING_MASK 0x10000000L
+//OPP_PIPE_CRC5_OPP_PIPE_CRC_MASK
+#define OPP_PIPE_CRC5_OPP_PIPE_CRC_MASK__OPP_PIPE_CRC_MASK__SHIFT 0x0
+#define OPP_PIPE_CRC5_OPP_PIPE_CRC_MASK__OPP_PIPE_CRC_MASK_MASK 0x0000FFFFL
+//OPP_PIPE_CRC5_OPP_PIPE_CRC_RESULT0
+#define OPP_PIPE_CRC5_OPP_PIPE_CRC_RESULT0__OPP_PIPE_CRC_RESULT_A__SHIFT 0x0
+#define OPP_PIPE_CRC5_OPP_PIPE_CRC_RESULT0__OPP_PIPE_CRC_RESULT_R__SHIFT 0x10
+#define OPP_PIPE_CRC5_OPP_PIPE_CRC_RESULT0__OPP_PIPE_CRC_RESULT_A_MASK 0x0000FFFFL
+#define OPP_PIPE_CRC5_OPP_PIPE_CRC_RESULT0__OPP_PIPE_CRC_RESULT_R_MASK 0xFFFF0000L
+//OPP_PIPE_CRC5_OPP_PIPE_CRC_RESULT1
+#define OPP_PIPE_CRC5_OPP_PIPE_CRC_RESULT1__OPP_PIPE_CRC_RESULT_G__SHIFT 0x0
+#define OPP_PIPE_CRC5_OPP_PIPE_CRC_RESULT1__OPP_PIPE_CRC_RESULT_B__SHIFT 0x10
+#define OPP_PIPE_CRC5_OPP_PIPE_CRC_RESULT1__OPP_PIPE_CRC_RESULT_G_MASK 0x0000FFFFL
+#define OPP_PIPE_CRC5_OPP_PIPE_CRC_RESULT1__OPP_PIPE_CRC_RESULT_B_MASK 0xFFFF0000L
+//OPP_PIPE_CRC5_OPP_PIPE_CRC_RESULT2
+#define OPP_PIPE_CRC5_OPP_PIPE_CRC_RESULT2__OPP_PIPE_CRC_RESULT_C__SHIFT 0x0
+#define OPP_PIPE_CRC5_OPP_PIPE_CRC_RESULT2__OPP_PIPE_CRC_RESULT_C_MASK 0x0000FFFFL
+
+
+// addressBlock: dce_dc_opp_opp_top_dispdec
+//OPP_TOP_CLK_CONTROL
+#define OPP_TOP_CLK_CONTROL__OPP_DISPCLK_R_GATE_DIS__SHIFT 0x0
+#define OPP_TOP_CLK_CONTROL__OPP_DISPCLK_G_ABM_GATE_DIS__SHIFT 0x4
+#define OPP_TOP_CLK_CONTROL__OPP_TEST_CLK_SEL__SHIFT 0x8
+#define OPP_TOP_CLK_CONTROL__OPP_ABM0_CLOCK_ON__SHIFT 0xc
+#define OPP_TOP_CLK_CONTROL__OPP_ABM1_CLOCK_ON__SHIFT 0xd
+#define OPP_TOP_CLK_CONTROL__OPP_DISPCLK_R_GATE_DIS_MASK 0x00000001L
+#define OPP_TOP_CLK_CONTROL__OPP_DISPCLK_G_ABM_GATE_DIS_MASK 0x00000010L
+#define OPP_TOP_CLK_CONTROL__OPP_TEST_CLK_SEL_MASK 0x00000F00L
+#define OPP_TOP_CLK_CONTROL__OPP_ABM0_CLOCK_ON_MASK 0x00001000L
+#define OPP_TOP_CLK_CONTROL__OPP_ABM1_CLOCK_ON_MASK 0x00002000L
+
+
+// addressBlock: dce_dc_opp_dscrm0_dispdec
+//DSCRM0_DSCRM_DSC_FORWARD_CONFIG
+#define DSCRM0_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN__SHIFT 0x0
+#define DSCRM0_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_OPP_PIPE_SOURCE__SHIFT 0x4
+#define DSCRM0_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x8
+#define DSCRM0_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN_STATUS__SHIFT 0xc
+#define DSCRM0_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN_MASK 0x00000001L
+#define DSCRM0_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_OPP_PIPE_SOURCE_MASK 0x00000070L
+#define DSCRM0_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x00000100L
+#define DSCRM0_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN_STATUS_MASK 0x00001000L
+
+
+// addressBlock: dce_dc_opp_dscrm1_dispdec
+//DSCRM1_DSCRM_DSC_FORWARD_CONFIG
+#define DSCRM1_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN__SHIFT 0x0
+#define DSCRM1_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_OPP_PIPE_SOURCE__SHIFT 0x4
+#define DSCRM1_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x8
+#define DSCRM1_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN_STATUS__SHIFT 0xc
+#define DSCRM1_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN_MASK 0x00000001L
+#define DSCRM1_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_OPP_PIPE_SOURCE_MASK 0x00000070L
+#define DSCRM1_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x00000100L
+#define DSCRM1_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN_STATUS_MASK 0x00001000L
+
+
+// addressBlock: dce_dc_opp_dscrm2_dispdec
+//DSCRM2_DSCRM_DSC_FORWARD_CONFIG
+#define DSCRM2_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN__SHIFT 0x0
+#define DSCRM2_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_OPP_PIPE_SOURCE__SHIFT 0x4
+#define DSCRM2_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x8
+#define DSCRM2_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN_STATUS__SHIFT 0xc
+#define DSCRM2_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN_MASK 0x00000001L
+#define DSCRM2_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_OPP_PIPE_SOURCE_MASK 0x00000070L
+#define DSCRM2_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x00000100L
+#define DSCRM2_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN_STATUS_MASK 0x00001000L
+
+
+// addressBlock: dce_dc_opp_dscrm3_dispdec
+//DSCRM3_DSCRM_DSC_FORWARD_CONFIG
+#define DSCRM3_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN__SHIFT 0x0
+#define DSCRM3_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_OPP_PIPE_SOURCE__SHIFT 0x4
+#define DSCRM3_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x8
+#define DSCRM3_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN_STATUS__SHIFT 0xc
+#define DSCRM3_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN_MASK 0x00000001L
+#define DSCRM3_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_OPP_PIPE_SOURCE_MASK 0x00000070L
+#define DSCRM3_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x00000100L
+#define DSCRM3_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN_STATUS_MASK 0x00001000L
+
+
+// addressBlock: dce_dc_opp_dscrm4_dispdec
+//DSCRM4_DSCRM_DSC_FORWARD_CONFIG
+#define DSCRM4_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN__SHIFT 0x0
+#define DSCRM4_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_OPP_PIPE_SOURCE__SHIFT 0x4
+#define DSCRM4_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x8
+#define DSCRM4_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN_STATUS__SHIFT 0xc
+#define DSCRM4_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN_MASK 0x00000001L
+#define DSCRM4_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_OPP_PIPE_SOURCE_MASK 0x00000070L
+#define DSCRM4_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x00000100L
+#define DSCRM4_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN_STATUS_MASK 0x00001000L
+
+
+// addressBlock: dce_dc_opp_dscrm5_dispdec
+//DSCRM5_DSCRM_DSC_FORWARD_CONFIG
+#define DSCRM5_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN__SHIFT 0x0
+#define DSCRM5_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_OPP_PIPE_SOURCE__SHIFT 0x4
+#define DSCRM5_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x8
+#define DSCRM5_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN_STATUS__SHIFT 0xc
+#define DSCRM5_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN_MASK 0x00000001L
+#define DSCRM5_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_OPP_PIPE_SOURCE_MASK 0x00000070L
+#define DSCRM5_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x00000100L
+#define DSCRM5_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN_STATUS_MASK 0x00001000L
+
+
+// addressBlock: dce_dc_opp_opp_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON16_PERFCOUNTER_CNTL
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON16_PERFCOUNTER_CNTL2
+#define DC_PERFMON16_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON16_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON16_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON16_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON16_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON16_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON16_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON16_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON16_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON16_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON16_PERFCOUNTER_STATE
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON16_PERFMON_CNTL
+#define DC_PERFMON16_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON16_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON16_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON16_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON16_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON16_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON16_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON16_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON16_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON16_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON16_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON16_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON16_PERFMON_CNTL2
+#define DC_PERFMON16_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON16_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON16_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON16_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON16_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON16_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON16_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON16_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON16_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON16_PERFMON_CVALUE_LOW
+#define DC_PERFMON16_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON16_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON16_PERFMON_HI
+#define DC_PERFMON16_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON16_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON16_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON16_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON16_PERFMON_LOW
+#define DC_PERFMON16_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON16_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_optc_odm0_dispdec
+//ODM0_OPTC_INPUT_GLOBAL_CONTROL
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_INPUT_SOFT_RESET__SHIFT 0x0
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_EN__SHIFT 0x8
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_TYPE__SHIFT 0x9
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_STATUS__SHIFT 0xa
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_STATUS__SHIFT 0xb
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_CLEAR__SHIFT 0xc
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_CURRENT__SHIFT 0xd
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_DOUBLE_BUFFER_PENDING__SHIFT 0x1f
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_INPUT_SOFT_RESET_MASK 0x00000001L
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_EN_MASK 0x00000100L
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_TYPE_MASK 0x00000200L
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_STATUS_MASK 0x00000400L
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_STATUS_MASK 0x00000800L
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_CLEAR_MASK 0x00001000L
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_CURRENT_MASK 0x00002000L
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_DOUBLE_BUFFER_PENDING_MASK 0x80000000L
+//ODM0_OPTC_DATA_SOURCE_SELECT
+#define ODM0_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_INPUT_SEGMENT__SHIFT 0x0
+#define ODM0_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_OUTPUT_SEGMENT__SHIFT 0x2
+#define ODM0_OPTC_DATA_SOURCE_SELECT__OPTC_SEG0_SRC_SEL__SHIFT 0x8
+#define ODM0_OPTC_DATA_SOURCE_SELECT__OPTC_SEG1_SRC_SEL__SHIFT 0xc
+#define ODM0_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_INPUT_SEGMENT_MASK 0x00000001L
+#define ODM0_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_OUTPUT_SEGMENT_MASK 0x0000000CL
+#define ODM0_OPTC_DATA_SOURCE_SELECT__OPTC_SEG0_SRC_SEL_MASK 0x00000F00L
+#define ODM0_OPTC_DATA_SOURCE_SELECT__OPTC_SEG1_SRC_SEL_MASK 0x0000F000L
+//ODM0_OPTC_DATA_FORMAT_CONTROL
+#define ODM0_OPTC_DATA_FORMAT_CONTROL__OPTC_DATA_FORMAT__SHIFT 0x0
+#define ODM0_OPTC_DATA_FORMAT_CONTROL__OPTC_DSC_MODE__SHIFT 0x4
+#define ODM0_OPTC_DATA_FORMAT_CONTROL__OPTC_DATA_FORMAT_MASK 0x00000003L
+#define ODM0_OPTC_DATA_FORMAT_CONTROL__OPTC_DSC_MODE_MASK 0x00000030L
+//ODM0_OPTC_BYTES_PER_PIXEL
+#define ODM0_OPTC_BYTES_PER_PIXEL__OPTC_DSC_BYTES_PER_PIXEL__SHIFT 0x0
+#define ODM0_OPTC_BYTES_PER_PIXEL__OPTC_DSC_BYTES_PER_PIXEL_MASK 0x7FFFFFFFL
+//ODM0_OPTC_WIDTH_CONTROL
+#define ODM0_OPTC_WIDTH_CONTROL__OPTC_SEGMENT_WIDTH__SHIFT 0x0
+#define ODM0_OPTC_WIDTH_CONTROL__OPTC_DSC_SLICE_WIDTH__SHIFT 0x10
+#define ODM0_OPTC_WIDTH_CONTROL__OPTC_SEGMENT_WIDTH_MASK 0x00001FFFL
+#define ODM0_OPTC_WIDTH_CONTROL__OPTC_DSC_SLICE_WIDTH_MASK 0x1FFF0000L
+//ODM0_OPTC_INPUT_CLOCK_CONTROL
+#define ODM0_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_GATE_DIS__SHIFT 0x0
+#define ODM0_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_EN__SHIFT 0x1
+#define ODM0_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_ON__SHIFT 0x2
+#define ODM0_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_GATE_DIS_MASK 0x00000001L
+#define ODM0_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_EN_MASK 0x00000002L
+#define ODM0_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_ON_MASK 0x00000004L
+//ODM0_OPTC_MEMORY_CONFIG
+#define ODM0_OPTC_MEMORY_CONFIG__OPTC_MEM_SEL__SHIFT 0x0
+#define ODM0_OPTC_MEMORY_CONFIG__OPTC_MEM_SEL_MASK 0x0000FFFFL
+//ODM0_OPTC_INPUT_SPARE_REGISTER
+#define ODM0_OPTC_INPUT_SPARE_REGISTER__OPTC_INPUT_SPARE_REG__SHIFT 0x0
+#define ODM0_OPTC_INPUT_SPARE_REGISTER__OPTC_INPUT_SPARE_REG_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_optc_odm1_dispdec
+//ODM1_OPTC_INPUT_GLOBAL_CONTROL
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_INPUT_SOFT_RESET__SHIFT 0x0
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_EN__SHIFT 0x8
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_TYPE__SHIFT 0x9
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_STATUS__SHIFT 0xa
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_STATUS__SHIFT 0xb
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_CLEAR__SHIFT 0xc
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_CURRENT__SHIFT 0xd
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_DOUBLE_BUFFER_PENDING__SHIFT 0x1f
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_INPUT_SOFT_RESET_MASK 0x00000001L
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_EN_MASK 0x00000100L
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_TYPE_MASK 0x00000200L
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_STATUS_MASK 0x00000400L
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_STATUS_MASK 0x00000800L
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_CLEAR_MASK 0x00001000L
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_CURRENT_MASK 0x00002000L
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_DOUBLE_BUFFER_PENDING_MASK 0x80000000L
+//ODM1_OPTC_DATA_SOURCE_SELECT
+#define ODM1_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_INPUT_SEGMENT__SHIFT 0x0
+#define ODM1_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_OUTPUT_SEGMENT__SHIFT 0x2
+#define ODM1_OPTC_DATA_SOURCE_SELECT__OPTC_SEG0_SRC_SEL__SHIFT 0x8
+#define ODM1_OPTC_DATA_SOURCE_SELECT__OPTC_SEG1_SRC_SEL__SHIFT 0xc
+#define ODM1_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_INPUT_SEGMENT_MASK 0x00000001L
+#define ODM1_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_OUTPUT_SEGMENT_MASK 0x0000000CL
+#define ODM1_OPTC_DATA_SOURCE_SELECT__OPTC_SEG0_SRC_SEL_MASK 0x00000F00L
+#define ODM1_OPTC_DATA_SOURCE_SELECT__OPTC_SEG1_SRC_SEL_MASK 0x0000F000L
+//ODM1_OPTC_DATA_FORMAT_CONTROL
+#define ODM1_OPTC_DATA_FORMAT_CONTROL__OPTC_DATA_FORMAT__SHIFT 0x0
+#define ODM1_OPTC_DATA_FORMAT_CONTROL__OPTC_DSC_MODE__SHIFT 0x4
+#define ODM1_OPTC_DATA_FORMAT_CONTROL__OPTC_DATA_FORMAT_MASK 0x00000003L
+#define ODM1_OPTC_DATA_FORMAT_CONTROL__OPTC_DSC_MODE_MASK 0x00000030L
+//ODM1_OPTC_BYTES_PER_PIXEL
+#define ODM1_OPTC_BYTES_PER_PIXEL__OPTC_DSC_BYTES_PER_PIXEL__SHIFT 0x0
+#define ODM1_OPTC_BYTES_PER_PIXEL__OPTC_DSC_BYTES_PER_PIXEL_MASK 0x7FFFFFFFL
+//ODM1_OPTC_WIDTH_CONTROL
+#define ODM1_OPTC_WIDTH_CONTROL__OPTC_SEGMENT_WIDTH__SHIFT 0x0
+#define ODM1_OPTC_WIDTH_CONTROL__OPTC_DSC_SLICE_WIDTH__SHIFT 0x10
+#define ODM1_OPTC_WIDTH_CONTROL__OPTC_SEGMENT_WIDTH_MASK 0x00001FFFL
+#define ODM1_OPTC_WIDTH_CONTROL__OPTC_DSC_SLICE_WIDTH_MASK 0x1FFF0000L
+//ODM1_OPTC_INPUT_CLOCK_CONTROL
+#define ODM1_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_GATE_DIS__SHIFT 0x0
+#define ODM1_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_EN__SHIFT 0x1
+#define ODM1_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_ON__SHIFT 0x2
+#define ODM1_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_GATE_DIS_MASK 0x00000001L
+#define ODM1_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_EN_MASK 0x00000002L
+#define ODM1_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_ON_MASK 0x00000004L
+//ODM1_OPTC_MEMORY_CONFIG
+#define ODM1_OPTC_MEMORY_CONFIG__OPTC_MEM_SEL__SHIFT 0x0
+#define ODM1_OPTC_MEMORY_CONFIG__OPTC_MEM_SEL_MASK 0x0000FFFFL
+//ODM1_OPTC_INPUT_SPARE_REGISTER
+#define ODM1_OPTC_INPUT_SPARE_REGISTER__OPTC_INPUT_SPARE_REG__SHIFT 0x0
+#define ODM1_OPTC_INPUT_SPARE_REGISTER__OPTC_INPUT_SPARE_REG_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_optc_odm2_dispdec
+//ODM2_OPTC_INPUT_GLOBAL_CONTROL
+#define ODM2_OPTC_INPUT_GLOBAL_CONTROL__OPTC_INPUT_SOFT_RESET__SHIFT 0x0
+#define ODM2_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_EN__SHIFT 0x8
+#define ODM2_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_TYPE__SHIFT 0x9
+#define ODM2_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_STATUS__SHIFT 0xa
+#define ODM2_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_STATUS__SHIFT 0xb
+#define ODM2_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_CLEAR__SHIFT 0xc
+#define ODM2_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_CURRENT__SHIFT 0xd
+#define ODM2_OPTC_INPUT_GLOBAL_CONTROL__OPTC_DOUBLE_BUFFER_PENDING__SHIFT 0x1f
+#define ODM2_OPTC_INPUT_GLOBAL_CONTROL__OPTC_INPUT_SOFT_RESET_MASK 0x00000001L
+#define ODM2_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_EN_MASK 0x00000100L
+#define ODM2_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_TYPE_MASK 0x00000200L
+#define ODM2_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_STATUS_MASK 0x00000400L
+#define ODM2_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_STATUS_MASK 0x00000800L
+#define ODM2_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_CLEAR_MASK 0x00001000L
+#define ODM2_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_CURRENT_MASK 0x00002000L
+#define ODM2_OPTC_INPUT_GLOBAL_CONTROL__OPTC_DOUBLE_BUFFER_PENDING_MASK 0x80000000L
+//ODM2_OPTC_DATA_SOURCE_SELECT
+#define ODM2_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_INPUT_SEGMENT__SHIFT 0x0
+#define ODM2_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_OUTPUT_SEGMENT__SHIFT 0x2
+#define ODM2_OPTC_DATA_SOURCE_SELECT__OPTC_SEG0_SRC_SEL__SHIFT 0x8
+#define ODM2_OPTC_DATA_SOURCE_SELECT__OPTC_SEG1_SRC_SEL__SHIFT 0xc
+#define ODM2_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_INPUT_SEGMENT_MASK 0x00000001L
+#define ODM2_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_OUTPUT_SEGMENT_MASK 0x0000000CL
+#define ODM2_OPTC_DATA_SOURCE_SELECT__OPTC_SEG0_SRC_SEL_MASK 0x00000F00L
+#define ODM2_OPTC_DATA_SOURCE_SELECT__OPTC_SEG1_SRC_SEL_MASK 0x0000F000L
+//ODM2_OPTC_DATA_FORMAT_CONTROL
+#define ODM2_OPTC_DATA_FORMAT_CONTROL__OPTC_DATA_FORMAT__SHIFT 0x0
+#define ODM2_OPTC_DATA_FORMAT_CONTROL__OPTC_DSC_MODE__SHIFT 0x4
+#define ODM2_OPTC_DATA_FORMAT_CONTROL__OPTC_DATA_FORMAT_MASK 0x00000003L
+#define ODM2_OPTC_DATA_FORMAT_CONTROL__OPTC_DSC_MODE_MASK 0x00000030L
+//ODM2_OPTC_BYTES_PER_PIXEL
+#define ODM2_OPTC_BYTES_PER_PIXEL__OPTC_DSC_BYTES_PER_PIXEL__SHIFT 0x0
+#define ODM2_OPTC_BYTES_PER_PIXEL__OPTC_DSC_BYTES_PER_PIXEL_MASK 0x7FFFFFFFL
+//ODM2_OPTC_WIDTH_CONTROL
+#define ODM2_OPTC_WIDTH_CONTROL__OPTC_SEGMENT_WIDTH__SHIFT 0x0
+#define ODM2_OPTC_WIDTH_CONTROL__OPTC_DSC_SLICE_WIDTH__SHIFT 0x10
+#define ODM2_OPTC_WIDTH_CONTROL__OPTC_SEGMENT_WIDTH_MASK 0x00001FFFL
+#define ODM2_OPTC_WIDTH_CONTROL__OPTC_DSC_SLICE_WIDTH_MASK 0x1FFF0000L
+//ODM2_OPTC_INPUT_CLOCK_CONTROL
+#define ODM2_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_GATE_DIS__SHIFT 0x0
+#define ODM2_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_EN__SHIFT 0x1
+#define ODM2_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_ON__SHIFT 0x2
+#define ODM2_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_GATE_DIS_MASK 0x00000001L
+#define ODM2_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_EN_MASK 0x00000002L
+#define ODM2_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_ON_MASK 0x00000004L
+//ODM2_OPTC_MEMORY_CONFIG
+#define ODM2_OPTC_MEMORY_CONFIG__OPTC_MEM_SEL__SHIFT 0x0
+#define ODM2_OPTC_MEMORY_CONFIG__OPTC_MEM_SEL_MASK 0x0000FFFFL
+//ODM2_OPTC_INPUT_SPARE_REGISTER
+#define ODM2_OPTC_INPUT_SPARE_REGISTER__OPTC_INPUT_SPARE_REG__SHIFT 0x0
+#define ODM2_OPTC_INPUT_SPARE_REGISTER__OPTC_INPUT_SPARE_REG_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_optc_odm3_dispdec
+//ODM3_OPTC_INPUT_GLOBAL_CONTROL
+#define ODM3_OPTC_INPUT_GLOBAL_CONTROL__OPTC_INPUT_SOFT_RESET__SHIFT 0x0
+#define ODM3_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_EN__SHIFT 0x8
+#define ODM3_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_TYPE__SHIFT 0x9
+#define ODM3_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_STATUS__SHIFT 0xa
+#define ODM3_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_STATUS__SHIFT 0xb
+#define ODM3_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_CLEAR__SHIFT 0xc
+#define ODM3_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_CURRENT__SHIFT 0xd
+#define ODM3_OPTC_INPUT_GLOBAL_CONTROL__OPTC_DOUBLE_BUFFER_PENDING__SHIFT 0x1f
+#define ODM3_OPTC_INPUT_GLOBAL_CONTROL__OPTC_INPUT_SOFT_RESET_MASK 0x00000001L
+#define ODM3_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_EN_MASK 0x00000100L
+#define ODM3_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_TYPE_MASK 0x00000200L
+#define ODM3_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_STATUS_MASK 0x00000400L
+#define ODM3_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_STATUS_MASK 0x00000800L
+#define ODM3_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_CLEAR_MASK 0x00001000L
+#define ODM3_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_CURRENT_MASK 0x00002000L
+#define ODM3_OPTC_INPUT_GLOBAL_CONTROL__OPTC_DOUBLE_BUFFER_PENDING_MASK 0x80000000L
+//ODM3_OPTC_DATA_SOURCE_SELECT
+#define ODM3_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_INPUT_SEGMENT__SHIFT 0x0
+#define ODM3_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_OUTPUT_SEGMENT__SHIFT 0x2
+#define ODM3_OPTC_DATA_SOURCE_SELECT__OPTC_SEG0_SRC_SEL__SHIFT 0x8
+#define ODM3_OPTC_DATA_SOURCE_SELECT__OPTC_SEG1_SRC_SEL__SHIFT 0xc
+#define ODM3_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_INPUT_SEGMENT_MASK 0x00000001L
+#define ODM3_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_OUTPUT_SEGMENT_MASK 0x0000000CL
+#define ODM3_OPTC_DATA_SOURCE_SELECT__OPTC_SEG0_SRC_SEL_MASK 0x00000F00L
+#define ODM3_OPTC_DATA_SOURCE_SELECT__OPTC_SEG1_SRC_SEL_MASK 0x0000F000L
+//ODM3_OPTC_DATA_FORMAT_CONTROL
+#define ODM3_OPTC_DATA_FORMAT_CONTROL__OPTC_DATA_FORMAT__SHIFT 0x0
+#define ODM3_OPTC_DATA_FORMAT_CONTROL__OPTC_DSC_MODE__SHIFT 0x4
+#define ODM3_OPTC_DATA_FORMAT_CONTROL__OPTC_DATA_FORMAT_MASK 0x00000003L
+#define ODM3_OPTC_DATA_FORMAT_CONTROL__OPTC_DSC_MODE_MASK 0x00000030L
+//ODM3_OPTC_BYTES_PER_PIXEL
+#define ODM3_OPTC_BYTES_PER_PIXEL__OPTC_DSC_BYTES_PER_PIXEL__SHIFT 0x0
+#define ODM3_OPTC_BYTES_PER_PIXEL__OPTC_DSC_BYTES_PER_PIXEL_MASK 0x7FFFFFFFL
+//ODM3_OPTC_WIDTH_CONTROL
+#define ODM3_OPTC_WIDTH_CONTROL__OPTC_SEGMENT_WIDTH__SHIFT 0x0
+#define ODM3_OPTC_WIDTH_CONTROL__OPTC_DSC_SLICE_WIDTH__SHIFT 0x10
+#define ODM3_OPTC_WIDTH_CONTROL__OPTC_SEGMENT_WIDTH_MASK 0x00001FFFL
+#define ODM3_OPTC_WIDTH_CONTROL__OPTC_DSC_SLICE_WIDTH_MASK 0x1FFF0000L
+//ODM3_OPTC_INPUT_CLOCK_CONTROL
+#define ODM3_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_GATE_DIS__SHIFT 0x0
+#define ODM3_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_EN__SHIFT 0x1
+#define ODM3_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_ON__SHIFT 0x2
+#define ODM3_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_GATE_DIS_MASK 0x00000001L
+#define ODM3_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_EN_MASK 0x00000002L
+#define ODM3_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_ON_MASK 0x00000004L
+//ODM3_OPTC_MEMORY_CONFIG
+#define ODM3_OPTC_MEMORY_CONFIG__OPTC_MEM_SEL__SHIFT 0x0
+#define ODM3_OPTC_MEMORY_CONFIG__OPTC_MEM_SEL_MASK 0x0000FFFFL
+//ODM3_OPTC_INPUT_SPARE_REGISTER
+#define ODM3_OPTC_INPUT_SPARE_REGISTER__OPTC_INPUT_SPARE_REG__SHIFT 0x0
+#define ODM3_OPTC_INPUT_SPARE_REGISTER__OPTC_INPUT_SPARE_REG_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_optc_odm4_dispdec
+//ODM4_OPTC_INPUT_GLOBAL_CONTROL
+#define ODM4_OPTC_INPUT_GLOBAL_CONTROL__OPTC_INPUT_SOFT_RESET__SHIFT 0x0
+#define ODM4_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_EN__SHIFT 0x8
+#define ODM4_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_TYPE__SHIFT 0x9
+#define ODM4_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_STATUS__SHIFT 0xa
+#define ODM4_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_STATUS__SHIFT 0xb
+#define ODM4_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_CLEAR__SHIFT 0xc
+#define ODM4_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_CURRENT__SHIFT 0xd
+#define ODM4_OPTC_INPUT_GLOBAL_CONTROL__OPTC_DOUBLE_BUFFER_PENDING__SHIFT 0x1f
+#define ODM4_OPTC_INPUT_GLOBAL_CONTROL__OPTC_INPUT_SOFT_RESET_MASK 0x00000001L
+#define ODM4_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_EN_MASK 0x00000100L
+#define ODM4_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_TYPE_MASK 0x00000200L
+#define ODM4_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_STATUS_MASK 0x00000400L
+#define ODM4_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_STATUS_MASK 0x00000800L
+#define ODM4_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_CLEAR_MASK 0x00001000L
+#define ODM4_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_CURRENT_MASK 0x00002000L
+#define ODM4_OPTC_INPUT_GLOBAL_CONTROL__OPTC_DOUBLE_BUFFER_PENDING_MASK 0x80000000L
+//ODM4_OPTC_DATA_SOURCE_SELECT
+#define ODM4_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_INPUT_SEGMENT__SHIFT 0x0
+#define ODM4_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_OUTPUT_SEGMENT__SHIFT 0x2
+#define ODM4_OPTC_DATA_SOURCE_SELECT__OPTC_SEG0_SRC_SEL__SHIFT 0x8
+#define ODM4_OPTC_DATA_SOURCE_SELECT__OPTC_SEG1_SRC_SEL__SHIFT 0xc
+#define ODM4_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_INPUT_SEGMENT_MASK 0x00000001L
+#define ODM4_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_OUTPUT_SEGMENT_MASK 0x0000000CL
+#define ODM4_OPTC_DATA_SOURCE_SELECT__OPTC_SEG0_SRC_SEL_MASK 0x00000F00L
+#define ODM4_OPTC_DATA_SOURCE_SELECT__OPTC_SEG1_SRC_SEL_MASK 0x0000F000L
+//ODM4_OPTC_DATA_FORMAT_CONTROL
+#define ODM4_OPTC_DATA_FORMAT_CONTROL__OPTC_DATA_FORMAT__SHIFT 0x0
+#define ODM4_OPTC_DATA_FORMAT_CONTROL__OPTC_DSC_MODE__SHIFT 0x4
+#define ODM4_OPTC_DATA_FORMAT_CONTROL__OPTC_DATA_FORMAT_MASK 0x00000003L
+#define ODM4_OPTC_DATA_FORMAT_CONTROL__OPTC_DSC_MODE_MASK 0x00000030L
+//ODM4_OPTC_BYTES_PER_PIXEL
+#define ODM4_OPTC_BYTES_PER_PIXEL__OPTC_DSC_BYTES_PER_PIXEL__SHIFT 0x0
+#define ODM4_OPTC_BYTES_PER_PIXEL__OPTC_DSC_BYTES_PER_PIXEL_MASK 0x7FFFFFFFL
+//ODM4_OPTC_WIDTH_CONTROL
+#define ODM4_OPTC_WIDTH_CONTROL__OPTC_SEGMENT_WIDTH__SHIFT 0x0
+#define ODM4_OPTC_WIDTH_CONTROL__OPTC_DSC_SLICE_WIDTH__SHIFT 0x10
+#define ODM4_OPTC_WIDTH_CONTROL__OPTC_SEGMENT_WIDTH_MASK 0x00001FFFL
+#define ODM4_OPTC_WIDTH_CONTROL__OPTC_DSC_SLICE_WIDTH_MASK 0x1FFF0000L
+//ODM4_OPTC_INPUT_CLOCK_CONTROL
+#define ODM4_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_GATE_DIS__SHIFT 0x0
+#define ODM4_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_EN__SHIFT 0x1
+#define ODM4_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_ON__SHIFT 0x2
+#define ODM4_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_GATE_DIS_MASK 0x00000001L
+#define ODM4_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_EN_MASK 0x00000002L
+#define ODM4_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_ON_MASK 0x00000004L
+//ODM4_OPTC_MEMORY_CONFIG
+#define ODM4_OPTC_MEMORY_CONFIG__OPTC_MEM_SEL__SHIFT 0x0
+#define ODM4_OPTC_MEMORY_CONFIG__OPTC_MEM_SEL_MASK 0x0000FFFFL
+//ODM4_OPTC_INPUT_SPARE_REGISTER
+#define ODM4_OPTC_INPUT_SPARE_REGISTER__OPTC_INPUT_SPARE_REG__SHIFT 0x0
+#define ODM4_OPTC_INPUT_SPARE_REGISTER__OPTC_INPUT_SPARE_REG_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_optc_odm5_dispdec
+//ODM5_OPTC_INPUT_GLOBAL_CONTROL
+#define ODM5_OPTC_INPUT_GLOBAL_CONTROL__OPTC_INPUT_SOFT_RESET__SHIFT 0x0
+#define ODM5_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_EN__SHIFT 0x8
+#define ODM5_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_TYPE__SHIFT 0x9
+#define ODM5_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_STATUS__SHIFT 0xa
+#define ODM5_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_STATUS__SHIFT 0xb
+#define ODM5_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_CLEAR__SHIFT 0xc
+#define ODM5_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_CURRENT__SHIFT 0xd
+#define ODM5_OPTC_INPUT_GLOBAL_CONTROL__OPTC_DOUBLE_BUFFER_PENDING__SHIFT 0x1f
+#define ODM5_OPTC_INPUT_GLOBAL_CONTROL__OPTC_INPUT_SOFT_RESET_MASK 0x00000001L
+#define ODM5_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_EN_MASK 0x00000100L
+#define ODM5_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_TYPE_MASK 0x00000200L
+#define ODM5_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_STATUS_MASK 0x00000400L
+#define ODM5_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_STATUS_MASK 0x00000800L
+#define ODM5_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_CLEAR_MASK 0x00001000L
+#define ODM5_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_CURRENT_MASK 0x00002000L
+#define ODM5_OPTC_INPUT_GLOBAL_CONTROL__OPTC_DOUBLE_BUFFER_PENDING_MASK 0x80000000L
+//ODM5_OPTC_DATA_SOURCE_SELECT
+#define ODM5_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_INPUT_SEGMENT__SHIFT 0x0
+#define ODM5_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_OUTPUT_SEGMENT__SHIFT 0x2
+#define ODM5_OPTC_DATA_SOURCE_SELECT__OPTC_SEG0_SRC_SEL__SHIFT 0x8
+#define ODM5_OPTC_DATA_SOURCE_SELECT__OPTC_SEG1_SRC_SEL__SHIFT 0xc
+#define ODM5_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_INPUT_SEGMENT_MASK 0x00000001L
+#define ODM5_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_OUTPUT_SEGMENT_MASK 0x0000000CL
+#define ODM5_OPTC_DATA_SOURCE_SELECT__OPTC_SEG0_SRC_SEL_MASK 0x00000F00L
+#define ODM5_OPTC_DATA_SOURCE_SELECT__OPTC_SEG1_SRC_SEL_MASK 0x0000F000L
+//ODM5_OPTC_DATA_FORMAT_CONTROL
+#define ODM5_OPTC_DATA_FORMAT_CONTROL__OPTC_DATA_FORMAT__SHIFT 0x0
+#define ODM5_OPTC_DATA_FORMAT_CONTROL__OPTC_DSC_MODE__SHIFT 0x4
+#define ODM5_OPTC_DATA_FORMAT_CONTROL__OPTC_DATA_FORMAT_MASK 0x00000003L
+#define ODM5_OPTC_DATA_FORMAT_CONTROL__OPTC_DSC_MODE_MASK 0x00000030L
+//ODM5_OPTC_BYTES_PER_PIXEL
+#define ODM5_OPTC_BYTES_PER_PIXEL__OPTC_DSC_BYTES_PER_PIXEL__SHIFT 0x0
+#define ODM5_OPTC_BYTES_PER_PIXEL__OPTC_DSC_BYTES_PER_PIXEL_MASK 0x7FFFFFFFL
+//ODM5_OPTC_WIDTH_CONTROL
+#define ODM5_OPTC_WIDTH_CONTROL__OPTC_SEGMENT_WIDTH__SHIFT 0x0
+#define ODM5_OPTC_WIDTH_CONTROL__OPTC_DSC_SLICE_WIDTH__SHIFT 0x10
+#define ODM5_OPTC_WIDTH_CONTROL__OPTC_SEGMENT_WIDTH_MASK 0x00001FFFL
+#define ODM5_OPTC_WIDTH_CONTROL__OPTC_DSC_SLICE_WIDTH_MASK 0x1FFF0000L
+//ODM5_OPTC_INPUT_CLOCK_CONTROL
+#define ODM5_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_GATE_DIS__SHIFT 0x0
+#define ODM5_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_EN__SHIFT 0x1
+#define ODM5_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_ON__SHIFT 0x2
+#define ODM5_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_GATE_DIS_MASK 0x00000001L
+#define ODM5_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_EN_MASK 0x00000002L
+#define ODM5_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_ON_MASK 0x00000004L
+//ODM5_OPTC_MEMORY_CONFIG
+#define ODM5_OPTC_MEMORY_CONFIG__OPTC_MEM_SEL__SHIFT 0x0
+#define ODM5_OPTC_MEMORY_CONFIG__OPTC_MEM_SEL_MASK 0x0000FFFFL
+//ODM5_OPTC_INPUT_SPARE_REGISTER
+#define ODM5_OPTC_INPUT_SPARE_REGISTER__OPTC_INPUT_SPARE_REG__SHIFT 0x0
+#define ODM5_OPTC_INPUT_SPARE_REGISTER__OPTC_INPUT_SPARE_REG_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_optc_otg0_dispdec
+//OTG0_OTG_H_TOTAL
+#define OTG0_OTG_H_TOTAL__OTG_H_TOTAL__SHIFT 0x0
+#define OTG0_OTG_H_TOTAL__OTG_H_TOTAL_MASK 0x00007FFFL
+//OTG0_OTG_H_BLANK_START_END
+#define OTG0_OTG_H_BLANK_START_END__OTG_H_BLANK_START__SHIFT 0x0
+#define OTG0_OTG_H_BLANK_START_END__OTG_H_BLANK_END__SHIFT 0x10
+#define OTG0_OTG_H_BLANK_START_END__OTG_H_BLANK_START_MASK 0x00007FFFL
+#define OTG0_OTG_H_BLANK_START_END__OTG_H_BLANK_END_MASK 0x7FFF0000L
+//OTG0_OTG_H_SYNC_A
+#define OTG0_OTG_H_SYNC_A__OTG_H_SYNC_A_START__SHIFT 0x0
+#define OTG0_OTG_H_SYNC_A__OTG_H_SYNC_A_END__SHIFT 0x10
+#define OTG0_OTG_H_SYNC_A__OTG_H_SYNC_A_START_MASK 0x00007FFFL
+#define OTG0_OTG_H_SYNC_A__OTG_H_SYNC_A_END_MASK 0x7FFF0000L
+//OTG0_OTG_H_SYNC_A_CNTL
+#define OTG0_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_POL__SHIFT 0x0
+#define OTG0_OTG_H_SYNC_A_CNTL__OTG_COMP_SYNC_A_EN__SHIFT 0x10
+#define OTG0_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_CUTOFF__SHIFT 0x11
+#define OTG0_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_POL_MASK 0x00000001L
+#define OTG0_OTG_H_SYNC_A_CNTL__OTG_COMP_SYNC_A_EN_MASK 0x00010000L
+#define OTG0_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_CUTOFF_MASK 0x00020000L
+//OTG0_OTG_H_TIMING_CNTL
+#define OTG0_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_BY2__SHIFT 0x0
+#define OTG0_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_BY2_UPDATE_MODE__SHIFT 0x8
+#define OTG0_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_BY2_MASK 0x00000001L
+#define OTG0_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_BY2_UPDATE_MODE_MASK 0x00000100L
+//OTG0_OTG_V_TOTAL
+#define OTG0_OTG_V_TOTAL__OTG_V_TOTAL__SHIFT 0x0
+#define OTG0_OTG_V_TOTAL__OTG_V_TOTAL_MASK 0x00007FFFL
+//OTG0_OTG_V_TOTAL_MIN
+#define OTG0_OTG_V_TOTAL_MIN__OTG_V_TOTAL_MIN__SHIFT 0x0
+#define OTG0_OTG_V_TOTAL_MIN__OTG_V_TOTAL_MIN_MASK 0x00007FFFL
+//OTG0_OTG_V_TOTAL_MAX
+#define OTG0_OTG_V_TOTAL_MAX__OTG_V_TOTAL_MAX__SHIFT 0x0
+#define OTG0_OTG_V_TOTAL_MAX__OTG_V_TOTAL_MAX_MASK 0x00007FFFL
+//OTG0_OTG_V_TOTAL_MID
+#define OTG0_OTG_V_TOTAL_MID__OTG_V_TOTAL_MID__SHIFT 0x0
+#define OTG0_OTG_V_TOTAL_MID__OTG_V_TOTAL_MID_MASK 0x00007FFFL
+//OTG0_OTG_V_TOTAL_CONTROL
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MIN_SEL__SHIFT 0x0
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MAX_SEL__SHIFT 0x1
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MAX_EN__SHIFT 0x2
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MIN_EN__SHIFT 0x3
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_FORCE_LOCK_ON_EVENT__SHIFT 0x4
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_DRR_EVENT_ACTIVE_PERIOD__SHIFT 0x5
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK_EN__SHIFT 0x7
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_FRAME_NUM__SHIFT 0x8
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK__SHIFT 0x10
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MIN_SEL_MASK 0x00000001L
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MAX_SEL_MASK 0x00000002L
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MAX_EN_MASK 0x00000004L
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MIN_EN_MASK 0x00000008L
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_FORCE_LOCK_ON_EVENT_MASK 0x00000010L
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_DRR_EVENT_ACTIVE_PERIOD_MASK 0x00000020L
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK_EN_MASK 0x00000080L
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_FRAME_NUM_MASK 0x0000FF00L
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK_MASK 0xFFFF0000L
+//OTG0_OTG_V_TOTAL_INT_STATUS
+#define OTG0_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED__SHIFT 0x0
+#define OTG0_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_INT__SHIFT 0x4
+#define OTG0_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_ACK__SHIFT 0x8
+#define OTG0_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_MSK__SHIFT 0xc
+#define OTG0_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_MASK 0x00000001L
+#define OTG0_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_INT_MASK 0x00000010L
+#define OTG0_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_ACK_MASK 0x00000100L
+#define OTG0_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_MSK_MASK 0x00001000L
+//OTG0_OTG_VSYNC_NOM_INT_STATUS
+#define OTG0_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM__SHIFT 0x0
+#define OTG0_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_INT_CLEAR__SHIFT 0x4
+#define OTG0_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_MASK 0x00000001L
+#define OTG0_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_INT_CLEAR_MASK 0x00000010L
+//OTG0_OTG_V_BLANK_START_END
+#define OTG0_OTG_V_BLANK_START_END__OTG_V_BLANK_START__SHIFT 0x0
+#define OTG0_OTG_V_BLANK_START_END__OTG_V_BLANK_END__SHIFT 0x10
+#define OTG0_OTG_V_BLANK_START_END__OTG_V_BLANK_START_MASK 0x00007FFFL
+#define OTG0_OTG_V_BLANK_START_END__OTG_V_BLANK_END_MASK 0x7FFF0000L
+//OTG0_OTG_V_SYNC_A
+#define OTG0_OTG_V_SYNC_A__OTG_V_SYNC_A_START__SHIFT 0x0
+#define OTG0_OTG_V_SYNC_A__OTG_V_SYNC_A_END__SHIFT 0x10
+#define OTG0_OTG_V_SYNC_A__OTG_V_SYNC_A_START_MASK 0x00007FFFL
+#define OTG0_OTG_V_SYNC_A__OTG_V_SYNC_A_END_MASK 0x7FFF0000L
+//OTG0_OTG_V_SYNC_A_CNTL
+#define OTG0_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_A_POL__SHIFT 0x0
+#define OTG0_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_A_POL_MASK 0x00000001L
+//OTG0_OTG_TRIGA_CNTL
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_SELECT__SHIFT 0x0
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_PIPE_SELECT__SHIFT 0x5
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_SELECT__SHIFT 0x8
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_RESYNC_BYPASS_EN__SHIFT 0xb
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_INPUT_STATUS__SHIFT 0xc
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_STATUS__SHIFT 0xd
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_OCCURRED__SHIFT 0xe
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_RISING_EDGE_DETECT_CNTL__SHIFT 0x10
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_FALLING_EDGE_DETECT_CNTL__SHIFT 0x12
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_FREQUENCY_SELECT__SHIFT 0x14
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_DELAY__SHIFT 0x18
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_CLEAR__SHIFT 0x1f
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_SELECT_MASK 0x0000001FL
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_PIPE_SELECT_MASK 0x000000E0L
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_SELECT_MASK 0x00000700L
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_RESYNC_BYPASS_EN_MASK 0x00000800L
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_INPUT_STATUS_MASK 0x00001000L
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_STATUS_MASK 0x00002000L
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_OCCURRED_MASK 0x00004000L
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_RISING_EDGE_DETECT_CNTL_MASK 0x00030000L
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_FALLING_EDGE_DETECT_CNTL_MASK 0x000C0000L
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_FREQUENCY_SELECT_MASK 0x00300000L
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_DELAY_MASK 0x1F000000L
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_CLEAR_MASK 0x80000000L
+//OTG0_OTG_TRIGA_MANUAL_TRIG
+#define OTG0_OTG_TRIGA_MANUAL_TRIG__OTG_TRIGA_MANUAL_TRIG__SHIFT 0x0
+#define OTG0_OTG_TRIGA_MANUAL_TRIG__OTG_TRIGA_MANUAL_TRIG_MASK 0x00000001L
+//OTG0_OTG_TRIGB_CNTL
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_SELECT__SHIFT 0x0
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_PIPE_SELECT__SHIFT 0x5
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_SELECT__SHIFT 0x8
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_RESYNC_BYPASS_EN__SHIFT 0xb
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_INPUT_STATUS__SHIFT 0xc
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_STATUS__SHIFT 0xd
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_OCCURRED__SHIFT 0xe
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_RISING_EDGE_DETECT_CNTL__SHIFT 0x10
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_FALLING_EDGE_DETECT_CNTL__SHIFT 0x12
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_FREQUENCY_SELECT__SHIFT 0x14
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_DELAY__SHIFT 0x18
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_CLEAR__SHIFT 0x1f
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_SELECT_MASK 0x0000001FL
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_PIPE_SELECT_MASK 0x000000E0L
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_SELECT_MASK 0x00000700L
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_RESYNC_BYPASS_EN_MASK 0x00000800L
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_INPUT_STATUS_MASK 0x00001000L
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_STATUS_MASK 0x00002000L
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_OCCURRED_MASK 0x00004000L
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_RISING_EDGE_DETECT_CNTL_MASK 0x00030000L
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_FALLING_EDGE_DETECT_CNTL_MASK 0x000C0000L
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_FREQUENCY_SELECT_MASK 0x00300000L
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_DELAY_MASK 0x1F000000L
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_CLEAR_MASK 0x80000000L
+//OTG0_OTG_TRIGB_MANUAL_TRIG
+#define OTG0_OTG_TRIGB_MANUAL_TRIG__OTG_TRIGB_MANUAL_TRIG__SHIFT 0x0
+#define OTG0_OTG_TRIGB_MANUAL_TRIG__OTG_TRIGB_MANUAL_TRIG_MASK 0x00000001L
+//OTG0_OTG_FORCE_COUNT_NOW_CNTL
+#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_MODE__SHIFT 0x0
+#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CHECK__SHIFT 0x4
+#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_TRIG_SEL__SHIFT 0x8
+#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_OCCURRED__SHIFT 0x10
+#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CLEAR__SHIFT 0x18
+#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_MODE_MASK 0x00000003L
+#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CHECK_MASK 0x00000010L
+#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_TRIG_SEL_MASK 0x00000100L
+#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_OCCURRED_MASK 0x00010000L
+#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CLEAR_MASK 0x01000000L
+//OTG0_OTG_FLOW_CONTROL
+#define OTG0_OTG_FLOW_CONTROL__OTG_FLOW_CONTROL_SOURCE_SELECT__SHIFT 0x0
+#define OTG0_OTG_FLOW_CONTROL__OTG_FLOW_CONTROL_POLARITY__SHIFT 0x8
+#define OTG0_OTG_FLOW_CONTROL__OTG_FLOW_CONTROL_GRANULARITY__SHIFT 0x10
+#define OTG0_OTG_FLOW_CONTROL__OTG_FLOW_CONTROL_INPUT_STATUS__SHIFT 0x18
+#define OTG0_OTG_FLOW_CONTROL__OTG_FLOW_CONTROL_SOURCE_SELECT_MASK 0x0000001FL
+#define OTG0_OTG_FLOW_CONTROL__OTG_FLOW_CONTROL_POLARITY_MASK 0x00000100L
+#define OTG0_OTG_FLOW_CONTROL__OTG_FLOW_CONTROL_GRANULARITY_MASK 0x00010000L
+#define OTG0_OTG_FLOW_CONTROL__OTG_FLOW_CONTROL_INPUT_STATUS_MASK 0x01000000L
+//OTG0_OTG_STEREO_FORCE_NEXT_EYE
+#define OTG0_OTG_STEREO_FORCE_NEXT_EYE__OTG_STEREO_FORCE_NEXT_EYE__SHIFT 0x0
+#define OTG0_OTG_STEREO_FORCE_NEXT_EYE__OTG_AVSYNC_FRAME_COUNTER__SHIFT 0x8
+#define OTG0_OTG_STEREO_FORCE_NEXT_EYE__OTG_AVSYNC_LINE_COUNTER__SHIFT 0x10
+#define OTG0_OTG_STEREO_FORCE_NEXT_EYE__OTG_STEREO_FORCE_NEXT_EYE_MASK 0x00000003L
+#define OTG0_OTG_STEREO_FORCE_NEXT_EYE__OTG_AVSYNC_FRAME_COUNTER_MASK 0x0000FF00L
+#define OTG0_OTG_STEREO_FORCE_NEXT_EYE__OTG_AVSYNC_LINE_COUNTER_MASK 0x1FFF0000L
+//OTG0_OTG_CONTROL
+#define OTG0_OTG_CONTROL__OTG_MASTER_EN__SHIFT 0x0
+#define OTG0_OTG_CONTROL__OTG_DISABLE_POINT_CNTL__SHIFT 0x8
+#define OTG0_OTG_CONTROL__OTG_START_POINT_CNTL__SHIFT 0xc
+#define OTG0_OTG_CONTROL__OTG_FIELD_NUMBER_CNTL__SHIFT 0xd
+#define OTG0_OTG_CONTROL__OTG_FIELD_NUMBER_POLARITY__SHIFT 0xe
+#define OTG0_OTG_CONTROL__OTG_CURRENT_MASTER_EN_STATE__SHIFT 0x10
+#define OTG0_OTG_CONTROL__OTG_DISP_READ_REQUEST_DISABLE__SHIFT 0x18
+#define OTG0_OTG_CONTROL__OTG_AVSYNC_LOCK_SNAPSHOT__SHIFT 0x1e
+#define OTG0_OTG_CONTROL__OTG_AVSYNC_VSYNC_N_HSYNC_MODE__SHIFT 0x1f
+#define OTG0_OTG_CONTROL__OTG_MASTER_EN_MASK 0x00000001L
+#define OTG0_OTG_CONTROL__OTG_DISABLE_POINT_CNTL_MASK 0x00000300L
+#define OTG0_OTG_CONTROL__OTG_START_POINT_CNTL_MASK 0x00001000L
+#define OTG0_OTG_CONTROL__OTG_FIELD_NUMBER_CNTL_MASK 0x00002000L
+#define OTG0_OTG_CONTROL__OTG_FIELD_NUMBER_POLARITY_MASK 0x00004000L
+#define OTG0_OTG_CONTROL__OTG_CURRENT_MASTER_EN_STATE_MASK 0x00010000L
+#define OTG0_OTG_CONTROL__OTG_DISP_READ_REQUEST_DISABLE_MASK 0x01000000L
+#define OTG0_OTG_CONTROL__OTG_AVSYNC_LOCK_SNAPSHOT_MASK 0x40000000L
+#define OTG0_OTG_CONTROL__OTG_AVSYNC_VSYNC_N_HSYNC_MODE_MASK 0x80000000L
+//OTG0_OTG_BLANK_CONTROL
+#define OTG0_OTG_BLANK_CONTROL__OTG_CURRENT_BLANK_STATE__SHIFT 0x0
+#define OTG0_OTG_BLANK_CONTROL__OTG_BLANK_DATA_EN__SHIFT 0x8
+#define OTG0_OTG_BLANK_CONTROL__OTG_BLANK_DE_MODE__SHIFT 0x10
+#define OTG0_OTG_BLANK_CONTROL__OTG_CURRENT_BLANK_STATE_MASK 0x00000001L
+#define OTG0_OTG_BLANK_CONTROL__OTG_BLANK_DATA_EN_MASK 0x00000100L
+#define OTG0_OTG_BLANK_CONTROL__OTG_BLANK_DE_MODE_MASK 0x00010000L
+//OTG0_OTG_PIPE_ABORT_CONTROL
+#define OTG0_OTG_PIPE_ABORT_CONTROL__OTG_PIPE_ABORT__SHIFT 0x0
+#define OTG0_OTG_PIPE_ABORT_CONTROL__OTG_PIPE_ABORT_DONE__SHIFT 0x8
+#define OTG0_OTG_PIPE_ABORT_CONTROL__OTG_PIPE_ABORT_MASK 0x00000001L
+#define OTG0_OTG_PIPE_ABORT_CONTROL__OTG_PIPE_ABORT_DONE_MASK 0x00000100L
+//OTG0_OTG_INTERLACE_CONTROL
+#define OTG0_OTG_INTERLACE_CONTROL__OTG_INTERLACE_ENABLE__SHIFT 0x0
+#define OTG0_OTG_INTERLACE_CONTROL__OTG_INTERLACE_FORCE_NEXT_FIELD__SHIFT 0x10
+#define OTG0_OTG_INTERLACE_CONTROL__OTG_INTERLACE_ENABLE_MASK 0x00000001L
+#define OTG0_OTG_INTERLACE_CONTROL__OTG_INTERLACE_FORCE_NEXT_FIELD_MASK 0x00030000L
+//OTG0_OTG_INTERLACE_STATUS
+#define OTG0_OTG_INTERLACE_STATUS__OTG_INTERLACE_CURRENT_FIELD__SHIFT 0x0
+#define OTG0_OTG_INTERLACE_STATUS__OTG_INTERLACE_NEXT_FIELD__SHIFT 0x1
+#define OTG0_OTG_INTERLACE_STATUS__OTG_INTERLACE_CURRENT_FIELD_MASK 0x00000001L
+#define OTG0_OTG_INTERLACE_STATUS__OTG_INTERLACE_NEXT_FIELD_MASK 0x00000002L
+//OTG0_OTG_PIXEL_DATA_READBACK0
+#define OTG0_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_BLUE_CB__SHIFT 0x0
+#define OTG0_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_GREEN_Y__SHIFT 0x10
+#define OTG0_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_BLUE_CB_MASK 0x0000FFFFL
+#define OTG0_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_GREEN_Y_MASK 0xFFFF0000L
+//OTG0_OTG_PIXEL_DATA_READBACK1
+#define OTG0_OTG_PIXEL_DATA_READBACK1__OTG_PIXEL_DATA_RED_CR__SHIFT 0x0
+#define OTG0_OTG_PIXEL_DATA_READBACK1__OTG_PIXEL_DATA_RED_CR_MASK 0x0000FFFFL
+//OTG0_OTG_STATUS
+#define OTG0_OTG_STATUS__OTG_V_BLANK__SHIFT 0x0
+#define OTG0_OTG_STATUS__OTG_V_ACTIVE_DISP__SHIFT 0x1
+#define OTG0_OTG_STATUS__OTG_V_SYNC_A__SHIFT 0x2
+#define OTG0_OTG_STATUS__OTG_V_UPDATE__SHIFT 0x3
+#define OTG0_OTG_STATUS__OTG_V_BLANK_3D_STRUCTURE__SHIFT 0x5
+#define OTG0_OTG_STATUS__OTG_H_BLANK__SHIFT 0x10
+#define OTG0_OTG_STATUS__OTG_H_ACTIVE_DISP__SHIFT 0x11
+#define OTG0_OTG_STATUS__OTG_H_SYNC_A__SHIFT 0x12
+#define OTG0_OTG_STATUS__OTG_V_BLANK_MASK 0x00000001L
+#define OTG0_OTG_STATUS__OTG_V_ACTIVE_DISP_MASK 0x00000002L
+#define OTG0_OTG_STATUS__OTG_V_SYNC_A_MASK 0x00000004L
+#define OTG0_OTG_STATUS__OTG_V_UPDATE_MASK 0x00000008L
+#define OTG0_OTG_STATUS__OTG_V_BLANK_3D_STRUCTURE_MASK 0x00000020L
+#define OTG0_OTG_STATUS__OTG_H_BLANK_MASK 0x00010000L
+#define OTG0_OTG_STATUS__OTG_H_ACTIVE_DISP_MASK 0x00020000L
+#define OTG0_OTG_STATUS__OTG_H_SYNC_A_MASK 0x00040000L
+//OTG0_OTG_STATUS_POSITION
+#define OTG0_OTG_STATUS_POSITION__OTG_VERT_COUNT__SHIFT 0x0
+#define OTG0_OTG_STATUS_POSITION__OTG_HORZ_COUNT__SHIFT 0x10
+#define OTG0_OTG_STATUS_POSITION__OTG_VERT_COUNT_MASK 0x00007FFFL
+#define OTG0_OTG_STATUS_POSITION__OTG_HORZ_COUNT_MASK 0x7FFF0000L
+//OTG0_OTG_NOM_VERT_POSITION
+#define OTG0_OTG_NOM_VERT_POSITION__OTG_VERT_COUNT_NOM__SHIFT 0x0
+#define OTG0_OTG_NOM_VERT_POSITION__OTG_VERT_COUNT_NOM_MASK 0x00007FFFL
+//OTG0_OTG_STATUS_FRAME_COUNT
+#define OTG0_OTG_STATUS_FRAME_COUNT__OTG_FRAME_COUNT__SHIFT 0x0
+#define OTG0_OTG_STATUS_FRAME_COUNT__OTG_FRAME_COUNT_MASK 0x00FFFFFFL
+//OTG0_OTG_STATUS_VF_COUNT
+#define OTG0_OTG_STATUS_VF_COUNT__OTG_VF_COUNT__SHIFT 0x0
+#define OTG0_OTG_STATUS_VF_COUNT__OTG_VF_COUNT_MASK 0x7FFFFFFFL
+//OTG0_OTG_STATUS_HV_COUNT
+#define OTG0_OTG_STATUS_HV_COUNT__OTG_HV_COUNT__SHIFT 0x0
+#define OTG0_OTG_STATUS_HV_COUNT__OTG_HV_COUNT_MASK 0x7FFFFFFFL
+//OTG0_OTG_COUNT_CONTROL
+#define OTG0_OTG_COUNT_CONTROL__OTG_HORZ_COUNT_BY2_EN__SHIFT 0x0
+#define OTG0_OTG_COUNT_CONTROL__OTG_HORZ_REPETITION_COUNT__SHIFT 0x1
+#define OTG0_OTG_COUNT_CONTROL__OTG_HORZ_COUNT_BY2_EN_MASK 0x00000001L
+#define OTG0_OTG_COUNT_CONTROL__OTG_HORZ_REPETITION_COUNT_MASK 0x0000001EL
+//OTG0_OTG_COUNT_RESET
+#define OTG0_OTG_COUNT_RESET__OTG_RESET_FRAME_COUNT__SHIFT 0x0
+#define OTG0_OTG_COUNT_RESET__OTG_RESET_FRAME_COUNT_MASK 0x00000001L
+//OTG0_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE
+#define OTG0_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__SHIFT 0x0
+#define OTG0_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__OTG_MANUAL_FORCE_VSYNC_NEXT_LINE_MASK 0x00000001L
+//OTG0_OTG_VERT_SYNC_CONTROL
+#define OTG0_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED__SHIFT 0x0
+#define OTG0_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_CLEAR__SHIFT 0x8
+#define OTG0_OTG_VERT_SYNC_CONTROL__OTG_AUTO_FORCE_VSYNC_MODE__SHIFT 0x10
+#define OTG0_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED_MASK 0x00000001L
+#define OTG0_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_CLEAR_MASK 0x00000100L
+#define OTG0_OTG_VERT_SYNC_CONTROL__OTG_AUTO_FORCE_VSYNC_MODE_MASK 0x00030000L
+//OTG0_OTG_STEREO_STATUS
+#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_CURRENT_EYE__SHIFT 0x0
+#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_SYNC_OUTPUT__SHIFT 0x8
+#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_SYNC_SELECT__SHIFT 0x10
+#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_EYE_FLAG__SHIFT 0x14
+#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_FORCE_NEXT_EYE_PENDING__SHIFT 0x18
+#define OTG0_OTG_STEREO_STATUS__OTG_CURRENT_3D_STRUCTURE_STATE__SHIFT 0x1e
+#define OTG0_OTG_STEREO_STATUS__OTG_CURRENT_STEREOSYNC_EN_STATE__SHIFT 0x1f
+#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_CURRENT_EYE_MASK 0x00000001L
+#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_SYNC_OUTPUT_MASK 0x00000100L
+#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_SYNC_SELECT_MASK 0x00010000L
+#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_EYE_FLAG_MASK 0x00100000L
+#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_FORCE_NEXT_EYE_PENDING_MASK 0x03000000L
+#define OTG0_OTG_STEREO_STATUS__OTG_CURRENT_3D_STRUCTURE_STATE_MASK 0x40000000L
+#define OTG0_OTG_STEREO_STATUS__OTG_CURRENT_STEREOSYNC_EN_STATE_MASK 0x80000000L
+//OTG0_OTG_STEREO_CONTROL
+#define OTG0_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_LINE_NUM__SHIFT 0x0
+#define OTG0_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_POLARITY__SHIFT 0xf
+#define OTG0_OTG_STEREO_CONTROL__OTG_STEREO_EYE_FLAG_POLARITY__SHIFT 0x11
+#define OTG0_OTG_STEREO_CONTROL__OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP__SHIFT 0x12
+#define OTG0_OTG_STEREO_CONTROL__OTG_DISABLE_FIELD_NUM__SHIFT 0x13
+#define OTG0_OTG_STEREO_CONTROL__OTG_DISABLE_V_BLANK_FOR_DP_FIX__SHIFT 0x14
+#define OTG0_OTG_STEREO_CONTROL__OTG_FIELD_NUM_SEL__SHIFT 0x15
+#define OTG0_OTG_STEREO_CONTROL__OTG_STEREO_EN__SHIFT 0x18
+#define OTG0_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_LINE_NUM_MASK 0x00007FFFL
+#define OTG0_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_POLARITY_MASK 0x00008000L
+#define OTG0_OTG_STEREO_CONTROL__OTG_STEREO_EYE_FLAG_POLARITY_MASK 0x00020000L
+#define OTG0_OTG_STEREO_CONTROL__OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP_MASK 0x00040000L
+#define OTG0_OTG_STEREO_CONTROL__OTG_DISABLE_FIELD_NUM_MASK 0x00080000L
+#define OTG0_OTG_STEREO_CONTROL__OTG_DISABLE_V_BLANK_FOR_DP_FIX_MASK 0x00100000L
+#define OTG0_OTG_STEREO_CONTROL__OTG_FIELD_NUM_SEL_MASK 0x00200000L
+#define OTG0_OTG_STEREO_CONTROL__OTG_STEREO_EN_MASK 0x01000000L
+//OTG0_OTG_SNAPSHOT_STATUS
+#define OTG0_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_OCCURRED__SHIFT 0x0
+#define OTG0_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_CLEAR__SHIFT 0x1
+#define OTG0_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_MANUAL_TRIGGER__SHIFT 0x2
+#define OTG0_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_OCCURRED_MASK 0x00000001L
+#define OTG0_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_CLEAR_MASK 0x00000002L
+#define OTG0_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_MANUAL_TRIGGER_MASK 0x00000004L
+//OTG0_OTG_SNAPSHOT_CONTROL
+#define OTG0_OTG_SNAPSHOT_CONTROL__OTG_AUTO_SNAPSHOT_TRIG_SEL__SHIFT 0x0
+#define OTG0_OTG_SNAPSHOT_CONTROL__OTG_AUTO_SNAPSHOT_TRIG_SEL_MASK 0x00000003L
+//OTG0_OTG_SNAPSHOT_POSITION
+#define OTG0_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_VERT_COUNT__SHIFT 0x0
+#define OTG0_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_HORZ_COUNT__SHIFT 0x10
+#define OTG0_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_VERT_COUNT_MASK 0x00007FFFL
+#define OTG0_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_HORZ_COUNT_MASK 0x7FFF0000L
+//OTG0_OTG_SNAPSHOT_FRAME
+#define OTG0_OTG_SNAPSHOT_FRAME__OTG_SNAPSHOT_FRAME_COUNT__SHIFT 0x0
+#define OTG0_OTG_SNAPSHOT_FRAME__OTG_SNAPSHOT_FRAME_COUNT_MASK 0x00FFFFFFL
+//OTG0_OTG_INTERRUPT_CONTROL
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_MSK__SHIFT 0x0
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_TYPE__SHIFT 0x1
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_MSK__SHIFT 0x8
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_TYPE__SHIFT 0x9
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_MSK__SHIFT 0x10
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_TYPE__SHIFT 0x11
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_MSK__SHIFT 0x18
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_MSK__SHIFT 0x19
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_TYPE__SHIFT 0x1a
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_TYPE__SHIFT 0x1b
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_MSK__SHIFT 0x1c
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_TYPE__SHIFT 0x1d
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_MSK__SHIFT 0x1e
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_TYPE__SHIFT 0x1f
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_MSK_MASK 0x00000001L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_TYPE_MASK 0x00000002L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_MSK_MASK 0x00000100L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_TYPE_MASK 0x00000200L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_MSK_MASK 0x00010000L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_TYPE_MASK 0x00020000L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_MSK_MASK 0x01000000L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_MSK_MASK 0x02000000L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_TYPE_MASK 0x04000000L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_TYPE_MASK 0x08000000L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_MSK_MASK 0x10000000L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_TYPE_MASK 0x20000000L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_MSK_MASK 0x40000000L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_TYPE_MASK 0x80000000L
+//OTG0_OTG_UPDATE_LOCK
+#define OTG0_OTG_UPDATE_LOCK__OTG_UPDATE_LOCK__SHIFT 0x0
+#define OTG0_OTG_UPDATE_LOCK__OTG_UPDATE_LOCK_MASK 0x00000001L
+//OTG0_OTG_DOUBLE_BUFFER_CONTROL
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_PENDING__SHIFT 0x0
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_H_TIMING_DIV_BY2_DB_UPDATE_PENDING__SHIFT 0x2
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_BLANK_DATA_EN_UPDATE_PENDING__SHIFT 0x3
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_RANGE_TIMING_DBUF_UPDATE_PENDING__SHIFT 0x4
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_TIMING_DB_UPDATE_PENDING__SHIFT 0x5
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_CTRL_DB_UPDATE_PENDING__SHIFT 0x6
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_STRUCTURE_EN_DB_UPDATE_PENDING__SHIFT 0x7
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_INSTANTLY__SHIFT 0x8
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_VSTARTUP_DB_UPDATE_PENDING__SHIFT 0x9
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_DSC_POSITION_DB_UPDATE_PENDING__SHIFT 0xa
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_BLANK_DATA_DOUBLE_BUFFER_EN__SHIFT 0x10
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_RANGE_TIMING_DBUF_UPDATE_MODE__SHIFT 0x18
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_PENDING_MASK 0x00000001L
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_H_TIMING_DIV_BY2_DB_UPDATE_PENDING_MASK 0x00000004L
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_BLANK_DATA_EN_UPDATE_PENDING_MASK 0x00000008L
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_RANGE_TIMING_DBUF_UPDATE_PENDING_MASK 0x00000010L
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_TIMING_DB_UPDATE_PENDING_MASK 0x00000020L
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_CTRL_DB_UPDATE_PENDING_MASK 0x00000040L
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_STRUCTURE_EN_DB_UPDATE_PENDING_MASK 0x00000080L
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_INSTANTLY_MASK 0x00000100L
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_VSTARTUP_DB_UPDATE_PENDING_MASK 0x00000200L
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_DSC_POSITION_DB_UPDATE_PENDING_MASK 0x00000400L
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_BLANK_DATA_DOUBLE_BUFFER_EN_MASK 0x00010000L
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_RANGE_TIMING_DBUF_UPDATE_MODE_MASK 0x03000000L
+//OTG0_OTG_MASTER_EN
+#define OTG0_OTG_MASTER_EN__OTG_MASTER_EN__SHIFT 0x0
+#define OTG0_OTG_MASTER_EN__OTG_MASTER_EN_MASK 0x00000001L
+//OTG0_OTG_BLANK_DATA_COLOR
+#define OTG0_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_BLUE_CB__SHIFT 0x0
+#define OTG0_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_GREEN_Y__SHIFT 0xa
+#define OTG0_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_RED_CR__SHIFT 0x14
+#define OTG0_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_BLUE_CB_MASK 0x000003FFL
+#define OTG0_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_GREEN_Y_MASK 0x000FFC00L
+#define OTG0_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_RED_CR_MASK 0x3FF00000L
+//OTG0_OTG_BLANK_DATA_COLOR_EXT
+#define OTG0_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_BLUE_CB_EXT__SHIFT 0x0
+#define OTG0_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_GREEN_Y_EXT__SHIFT 0x8
+#define OTG0_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_RED_CR_EXT__SHIFT 0x10
+#define OTG0_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_BLUE_CB_EXT_MASK 0x0000003FL
+#define OTG0_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_GREEN_Y_EXT_MASK 0x00003F00L
+#define OTG0_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_RED_CR_EXT_MASK 0x003F0000L
+//OTG0_OTG_BLACK_COLOR
+#define OTG0_OTG_BLACK_COLOR__OTG_BLACK_COLOR_B_CB__SHIFT 0x0
+#define OTG0_OTG_BLACK_COLOR__OTG_BLACK_COLOR_G_Y__SHIFT 0xa
+#define OTG0_OTG_BLACK_COLOR__OTG_BLACK_COLOR_R_CR__SHIFT 0x14
+#define OTG0_OTG_BLACK_COLOR__OTG_BLACK_COLOR_B_CB_MASK 0x000003FFL
+#define OTG0_OTG_BLACK_COLOR__OTG_BLACK_COLOR_G_Y_MASK 0x000FFC00L
+#define OTG0_OTG_BLACK_COLOR__OTG_BLACK_COLOR_R_CR_MASK 0x3FF00000L
+//OTG0_OTG_BLACK_COLOR_EXT
+#define OTG0_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_B_CB_EXT__SHIFT 0x0
+#define OTG0_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_G_Y_EXT__SHIFT 0x8
+#define OTG0_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_R_CR_EXT__SHIFT 0x10
+#define OTG0_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_B_CB_EXT_MASK 0x0000003FL
+#define OTG0_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_G_Y_EXT_MASK 0x00003F00L
+#define OTG0_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_R_CR_EXT_MASK 0x003F0000L
+//OTG0_OTG_VERTICAL_INTERRUPT0_POSITION
+#define OTG0_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_START__SHIFT 0x0
+#define OTG0_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_END__SHIFT 0x10
+#define OTG0_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_START_MASK 0x00007FFFL
+#define OTG0_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_END_MASK 0x7FFF0000L
+//OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_OUTPUT_POLARITY__SHIFT 0x4
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_ENABLE__SHIFT 0x8
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_STATUS__SHIFT 0xc
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_STATUS__SHIFT 0x10
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_CLEAR__SHIFT 0x14
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_TYPE__SHIFT 0x18
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_OUTPUT_POLARITY_MASK 0x00000010L
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_ENABLE_MASK 0x00000100L
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_STATUS_MASK 0x00001000L
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_STATUS_MASK 0x00010000L
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_CLEAR_MASK 0x00100000L
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_TYPE_MASK 0x01000000L
+//OTG0_OTG_VERTICAL_INTERRUPT1_POSITION
+#define OTG0_OTG_VERTICAL_INTERRUPT1_POSITION__OTG_VERTICAL_INTERRUPT1_LINE_START__SHIFT 0x0
+#define OTG0_OTG_VERTICAL_INTERRUPT1_POSITION__OTG_VERTICAL_INTERRUPT1_LINE_START_MASK 0x00007FFFL
+//OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL
+#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_ENABLE__SHIFT 0x8
+#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_STATUS__SHIFT 0xc
+#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_STATUS__SHIFT 0x10
+#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_CLEAR__SHIFT 0x14
+#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_TYPE__SHIFT 0x18
+#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_ENABLE_MASK 0x00000100L
+#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_STATUS_MASK 0x00001000L
+#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_STATUS_MASK 0x00010000L
+#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_CLEAR_MASK 0x00100000L
+#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_TYPE_MASK 0x01000000L
+//OTG0_OTG_VERTICAL_INTERRUPT2_POSITION
+#define OTG0_OTG_VERTICAL_INTERRUPT2_POSITION__OTG_VERTICAL_INTERRUPT2_LINE_START__SHIFT 0x0
+#define OTG0_OTG_VERTICAL_INTERRUPT2_POSITION__OTG_VERTICAL_INTERRUPT2_LINE_START_MASK 0x00007FFFL
+//OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL
+#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_ENABLE__SHIFT 0x8
+#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_STATUS__SHIFT 0xc
+#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_STATUS__SHIFT 0x10
+#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_CLEAR__SHIFT 0x14
+#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_TYPE__SHIFT 0x18
+#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_ENABLE_MASK 0x00000100L
+#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_STATUS_MASK 0x00001000L
+#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_STATUS_MASK 0x00010000L
+#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_CLEAR_MASK 0x00100000L
+#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_TYPE_MASK 0x01000000L
+//OTG0_OTG_CRC_CNTL
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_EN__SHIFT 0x0
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_DUAL_LINK_EN__SHIFT 0x1
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_DUAL_LINK_MODE__SHIFT 0x2
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_BLANK_ONLY__SHIFT 0x3
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_CONT_EN__SHIFT 0x4
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_CAPTURE_START_SEL__SHIFT 0x5
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_STEREO_MODE__SHIFT 0x8
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_INTERLACE_MODE__SHIFT 0xc
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_USE_NEW_AND_REPEATED_PIXELS__SHIFT 0x13
+#define OTG0_OTG_CRC_CNTL__OTG_CRC0_SELECT__SHIFT 0x14
+#define OTG0_OTG_CRC_CNTL__OTG_CRC1_SELECT__SHIFT 0x18
+#define OTG0_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC0_PENDING__SHIFT 0x1c
+#define OTG0_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC1_PENDING__SHIFT 0x1d
+#define OTG0_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC2_PENDING__SHIFT 0x1e
+#define OTG0_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC3_PENDING__SHIFT 0x1f
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_EN_MASK 0x00000001L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_DUAL_LINK_EN_MASK 0x00000002L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_DUAL_LINK_MODE_MASK 0x00000004L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_BLANK_ONLY_MASK 0x00000008L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_CONT_EN_MASK 0x00000010L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_CAPTURE_START_SEL_MASK 0x00000060L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_STEREO_MODE_MASK 0x00000300L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_INTERLACE_MODE_MASK 0x00003000L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_USE_NEW_AND_REPEATED_PIXELS_MASK 0x00080000L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC0_SELECT_MASK 0x00700000L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC1_SELECT_MASK 0x07000000L
+#define OTG0_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC0_PENDING_MASK 0x10000000L
+#define OTG0_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC1_PENDING_MASK 0x20000000L
+#define OTG0_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC2_PENDING_MASK 0x40000000L
+#define OTG0_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC3_PENDING_MASK 0x80000000L
+//OTG0_OTG_CRC_CNTL2
+#define OTG0_OTG_CRC_CNTL2__OTG_CRC_DSC_MODE__SHIFT 0x0
+#define OTG0_OTG_CRC_CNTL2__OTG_CRC_DATA_STREAM_COMBINE_MODE__SHIFT 0x1
+#define OTG0_OTG_CRC_CNTL2__OTG_CRC_DATA_STREAM_SPLIT_MODE__SHIFT 0x4
+#define OTG0_OTG_CRC_CNTL2__OTG_CRC_DATA_FORMAT__SHIFT 0x8
+#define OTG0_OTG_CRC_CNTL2__OTG_CRC_DSC_MODE_MASK 0x00000001L
+#define OTG0_OTG_CRC_CNTL2__OTG_CRC_DATA_STREAM_COMBINE_MODE_MASK 0x00000002L
+#define OTG0_OTG_CRC_CNTL2__OTG_CRC_DATA_STREAM_SPLIT_MODE_MASK 0x00000030L
+#define OTG0_OTG_CRC_CNTL2__OTG_CRC_DATA_FORMAT_MASK 0x00000300L
+//OTG0_OTG_CRC0_WINDOWA_X_CONTROL
+#define OTG0_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_START__SHIFT 0x0
+#define OTG0_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_END__SHIFT 0x10
+#define OTG0_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_START_MASK 0x00007FFFL
+#define OTG0_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_END_MASK 0x7FFF0000L
+//OTG0_OTG_CRC0_WINDOWA_Y_CONTROL
+#define OTG0_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_START__SHIFT 0x0
+#define OTG0_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_END__SHIFT 0x10
+#define OTG0_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_START_MASK 0x00007FFFL
+#define OTG0_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_END_MASK 0x7FFF0000L
+//OTG0_OTG_CRC0_WINDOWB_X_CONTROL
+#define OTG0_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_START__SHIFT 0x0
+#define OTG0_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_END__SHIFT 0x10
+#define OTG0_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_START_MASK 0x00007FFFL
+#define OTG0_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_END_MASK 0x7FFF0000L
+//OTG0_OTG_CRC0_WINDOWB_Y_CONTROL
+#define OTG0_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_START__SHIFT 0x0
+#define OTG0_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_END__SHIFT 0x10
+#define OTG0_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_START_MASK 0x00007FFFL
+#define OTG0_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_END_MASK 0x7FFF0000L
+//OTG0_OTG_CRC0_DATA_RG
+#define OTG0_OTG_CRC0_DATA_RG__CRC0_R_CR__SHIFT 0x0
+#define OTG0_OTG_CRC0_DATA_RG__CRC0_G_Y__SHIFT 0x10
+#define OTG0_OTG_CRC0_DATA_RG__CRC0_R_CR_MASK 0x0000FFFFL
+#define OTG0_OTG_CRC0_DATA_RG__CRC0_G_Y_MASK 0xFFFF0000L
+//OTG0_OTG_CRC0_DATA_B
+#define OTG0_OTG_CRC0_DATA_B__CRC0_B_CB__SHIFT 0x0
+#define OTG0_OTG_CRC0_DATA_B__CRC0_C__SHIFT 0x10
+#define OTG0_OTG_CRC0_DATA_B__CRC0_B_CB_MASK 0x0000FFFFL
+#define OTG0_OTG_CRC0_DATA_B__CRC0_C_MASK 0xFFFF0000L
+//OTG0_OTG_CRC1_WINDOWA_X_CONTROL
+#define OTG0_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_START__SHIFT 0x0
+#define OTG0_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_END__SHIFT 0x10
+#define OTG0_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_START_MASK 0x00007FFFL
+#define OTG0_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_END_MASK 0x7FFF0000L
+//OTG0_OTG_CRC1_WINDOWA_Y_CONTROL
+#define OTG0_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_START__SHIFT 0x0
+#define OTG0_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_END__SHIFT 0x10
+#define OTG0_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_START_MASK 0x00007FFFL
+#define OTG0_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_END_MASK 0x7FFF0000L
+//OTG0_OTG_CRC1_WINDOWB_X_CONTROL
+#define OTG0_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_START__SHIFT 0x0
+#define OTG0_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_END__SHIFT 0x10
+#define OTG0_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_START_MASK 0x00007FFFL
+#define OTG0_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_END_MASK 0x7FFF0000L
+//OTG0_OTG_CRC1_WINDOWB_Y_CONTROL
+#define OTG0_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_START__SHIFT 0x0
+#define OTG0_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_END__SHIFT 0x10
+#define OTG0_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_START_MASK 0x00007FFFL
+#define OTG0_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_END_MASK 0x7FFF0000L
+//OTG0_OTG_CRC1_DATA_RG
+#define OTG0_OTG_CRC1_DATA_RG__CRC1_R_CR__SHIFT 0x0
+#define OTG0_OTG_CRC1_DATA_RG__CRC1_G_Y__SHIFT 0x10
+#define OTG0_OTG_CRC1_DATA_RG__CRC1_R_CR_MASK 0x0000FFFFL
+#define OTG0_OTG_CRC1_DATA_RG__CRC1_G_Y_MASK 0xFFFF0000L
+//OTG0_OTG_CRC1_DATA_B
+#define OTG0_OTG_CRC1_DATA_B__CRC1_B_CB__SHIFT 0x0
+#define OTG0_OTG_CRC1_DATA_B__CRC1_C__SHIFT 0x10
+#define OTG0_OTG_CRC1_DATA_B__CRC1_B_CB_MASK 0x0000FFFFL
+#define OTG0_OTG_CRC1_DATA_B__CRC1_C_MASK 0xFFFF0000L
+//OTG0_OTG_CRC2_DATA_RG
+#define OTG0_OTG_CRC2_DATA_RG__CRC2_R_CR__SHIFT 0x0
+#define OTG0_OTG_CRC2_DATA_RG__CRC2_G_Y__SHIFT 0x10
+#define OTG0_OTG_CRC2_DATA_RG__CRC2_R_CR_MASK 0x0000FFFFL
+#define OTG0_OTG_CRC2_DATA_RG__CRC2_G_Y_MASK 0xFFFF0000L
+//OTG0_OTG_CRC2_DATA_B
+#define OTG0_OTG_CRC2_DATA_B__CRC2_B_CB__SHIFT 0x0
+#define OTG0_OTG_CRC2_DATA_B__CRC2_C__SHIFT 0x10
+#define OTG0_OTG_CRC2_DATA_B__CRC2_B_CB_MASK 0x0000FFFFL
+#define OTG0_OTG_CRC2_DATA_B__CRC2_C_MASK 0xFFFF0000L
+//OTG0_OTG_CRC3_DATA_RG
+#define OTG0_OTG_CRC3_DATA_RG__CRC3_R_CR__SHIFT 0x0
+#define OTG0_OTG_CRC3_DATA_RG__CRC3_G_Y__SHIFT 0x10
+#define OTG0_OTG_CRC3_DATA_RG__CRC3_R_CR_MASK 0x0000FFFFL
+#define OTG0_OTG_CRC3_DATA_RG__CRC3_G_Y_MASK 0xFFFF0000L
+//OTG0_OTG_CRC3_DATA_B
+#define OTG0_OTG_CRC3_DATA_B__CRC3_B_CB__SHIFT 0x0
+#define OTG0_OTG_CRC3_DATA_B__CRC3_C__SHIFT 0x10
+#define OTG0_OTG_CRC3_DATA_B__CRC3_B_CB_MASK 0x0000FFFFL
+#define OTG0_OTG_CRC3_DATA_B__CRC3_C_MASK 0xFFFF0000L
+//OTG0_OTG_CRC_SIG_RED_GREEN_MASK
+#define OTG0_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_RED_MASK__SHIFT 0x0
+#define OTG0_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_GREEN_MASK__SHIFT 0x10
+#define OTG0_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_RED_MASK_MASK 0x0000FFFFL
+#define OTG0_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_GREEN_MASK_MASK 0xFFFF0000L
+//OTG0_OTG_CRC_SIG_BLUE_CONTROL_MASK
+#define OTG0_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_BLUE_MASK__SHIFT 0x0
+#define OTG0_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_CONTROL_MASK__SHIFT 0x10
+#define OTG0_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_BLUE_MASK_MASK 0x0000FFFFL
+#define OTG0_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_CONTROL_MASK_MASK 0xFFFF0000L
+//OTG0_OTG_STATIC_SCREEN_CONTROL
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_EVENT_MASK__SHIFT 0x0
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_FRAME_COUNT__SHIFT 0x10
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_ENABLE__SHIFT 0x18
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_SS_STATUS__SHIFT 0x19
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_STATUS__SHIFT 0x1a
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_CLEAR__SHIFT 0x1b
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_TYPE__SHIFT 0x1c
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE__SHIFT 0x1e
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_VALUE__SHIFT 0x1f
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_EVENT_MASK_MASK 0x0000FFFFL
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_FRAME_COUNT_MASK 0x00FF0000L
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_ENABLE_MASK 0x01000000L
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_SS_STATUS_MASK 0x02000000L
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_STATUS_MASK 0x04000000L
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_CLEAR_MASK 0x08000000L
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_TYPE_MASK 0x10000000L
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_MASK 0x40000000L
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_VALUE_MASK 0x80000000L
+//OTG0_OTG_3D_STRUCTURE_CONTROL
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_EN__SHIFT 0x0
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_V_UPDATE_MODE__SHIFT 0x8
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_STEREO_SEL_OVR__SHIFT 0xc
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET__SHIFT 0x10
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_PENDING__SHIFT 0x11
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT__SHIFT 0x12
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_EN_MASK 0x00000001L
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_V_UPDATE_MODE_MASK 0x00000300L
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_STEREO_SEL_OVR_MASK 0x00001000L
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_MASK 0x00010000L
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_PENDING_MASK 0x00020000L
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_MASK 0x000C0000L
+//OTG0_OTG_GSL_VSYNC_GAP
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_LIMIT__SHIFT 0x0
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_DELAY__SHIFT 0x8
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_SOURCE_SEL__SHIFT 0x10
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MODE__SHIFT 0x11
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_CLEAR__SHIFT 0x13
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_OCCURRED__SHIFT 0x14
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASTER_FASTER__SHIFT 0x17
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP__SHIFT 0x18
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_LIMIT_MASK 0x000000FFL
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_DELAY_MASK 0x0000FF00L
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_SOURCE_SEL_MASK 0x00010000L
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MODE_MASK 0x00060000L
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_CLEAR_MASK 0x00080000L
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_OCCURRED_MASK 0x00100000L
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASTER_FASTER_MASK 0x00800000L
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASK 0xFF000000L
+//OTG0_OTG_MASTER_UPDATE_MODE
+#define OTG0_OTG_MASTER_UPDATE_MODE__MASTER_UPDATE_INTERLACED_MODE__SHIFT 0x0
+#define OTG0_OTG_MASTER_UPDATE_MODE__MASTER_UPDATE_INTERLACED_MODE_MASK 0x00000003L
+//OTG0_OTG_CLOCK_CONTROL
+#define OTG0_OTG_CLOCK_CONTROL__OTG_CLOCK_EN__SHIFT 0x0
+#define OTG0_OTG_CLOCK_CONTROL__OTG_CLOCK_GATE_DIS__SHIFT 0x1
+#define OTG0_OTG_CLOCK_CONTROL__OTG_SOFT_RESET__SHIFT 0x4
+#define OTG0_OTG_CLOCK_CONTROL__OTG_CLOCK_ON__SHIFT 0x8
+#define OTG0_OTG_CLOCK_CONTROL__OTG_BUSY__SHIFT 0x10
+#define OTG0_OTG_CLOCK_CONTROL__OTG_CLOCK_EN_MASK 0x00000001L
+#define OTG0_OTG_CLOCK_CONTROL__OTG_CLOCK_GATE_DIS_MASK 0x00000002L
+#define OTG0_OTG_CLOCK_CONTROL__OTG_SOFT_RESET_MASK 0x00000010L
+#define OTG0_OTG_CLOCK_CONTROL__OTG_CLOCK_ON_MASK 0x00000100L
+#define OTG0_OTG_CLOCK_CONTROL__OTG_BUSY_MASK 0x00010000L
+//OTG0_OTG_VSTARTUP_PARAM
+#define OTG0_OTG_VSTARTUP_PARAM__VSTARTUP_START__SHIFT 0x0
+#define OTG0_OTG_VSTARTUP_PARAM__VSTARTUP_START_MASK 0x000003FFL
+//OTG0_OTG_VUPDATE_PARAM
+#define OTG0_OTG_VUPDATE_PARAM__VUPDATE_OFFSET__SHIFT 0x0
+#define OTG0_OTG_VUPDATE_PARAM__VUPDATE_WIDTH__SHIFT 0x10
+#define OTG0_OTG_VUPDATE_PARAM__VUPDATE_OFFSET_MASK 0x0000FFFFL
+#define OTG0_OTG_VUPDATE_PARAM__VUPDATE_WIDTH_MASK 0x03FF0000L
+//OTG0_OTG_VREADY_PARAM
+#define OTG0_OTG_VREADY_PARAM__VREADY_OFFSET__SHIFT 0x0
+#define OTG0_OTG_VREADY_PARAM__VREADY_OFFSET_MASK 0x0000FFFFL
+//OTG0_OTG_GLOBAL_SYNC_STATUS
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_EN__SHIFT 0x0
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_TYPE__SHIFT 0x1
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_OCCURRED__SHIFT 0x2
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_STATUS__SHIFT 0x3
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_CLEAR__SHIFT 0x4
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_EN__SHIFT 0x5
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_TYPE__SHIFT 0x6
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_POSITION_SEL__SHIFT 0x7
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_OCCURRED__SHIFT 0x8
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_STATUS__SHIFT 0x9
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_CLEAR__SHIFT 0xa
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_STATUS__SHIFT 0xb
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_EN__SHIFT 0xc
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_TYPE__SHIFT 0xd
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_OCCURRED__SHIFT 0xe
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_STATUS__SHIFT 0xf
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_CLEAR__SHIFT 0x10
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_STATUS__SHIFT 0x11
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_EN__SHIFT 0x12
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_TYPE__SHIFT 0x13
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_OCCURRED__SHIFT 0x14
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_STATUS__SHIFT 0x15
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_CLEAR__SHIFT 0x16
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__STEREO_SELECT_STATUS__SHIFT 0x18
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__FIELD_NUMBER_STATUS__SHIFT 0x19
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_EN_MASK 0x00000001L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_TYPE_MASK 0x00000002L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_OCCURRED_MASK 0x00000004L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_STATUS_MASK 0x00000008L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_CLEAR_MASK 0x00000010L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_EN_MASK 0x00000020L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_TYPE_MASK 0x00000040L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_POSITION_SEL_MASK 0x00000080L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_OCCURRED_MASK 0x00000100L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_STATUS_MASK 0x00000200L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_CLEAR_MASK 0x00000400L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_STATUS_MASK 0x00000800L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_EN_MASK 0x00001000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_TYPE_MASK 0x00002000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_OCCURRED_MASK 0x00004000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_STATUS_MASK 0x00008000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_CLEAR_MASK 0x00010000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_STATUS_MASK 0x00020000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_EN_MASK 0x00040000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_TYPE_MASK 0x00080000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_OCCURRED_MASK 0x00100000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_STATUS_MASK 0x00200000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_CLEAR_MASK 0x00400000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__STEREO_SELECT_STATUS_MASK 0x01000000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__FIELD_NUMBER_STATUS_MASK 0x02000000L
+//OTG0_OTG_MASTER_UPDATE_LOCK
+#define OTG0_OTG_MASTER_UPDATE_LOCK__OTG_MASTER_UPDATE_LOCK__SHIFT 0x0
+#define OTG0_OTG_MASTER_UPDATE_LOCK__UPDATE_LOCK_STATUS__SHIFT 0x8
+#define OTG0_OTG_MASTER_UPDATE_LOCK__OTG_MASTER_UPDATE_LOCK_MASK 0x00000001L
+#define OTG0_OTG_MASTER_UPDATE_LOCK__UPDATE_LOCK_STATUS_MASK 0x00000100L
+//OTG0_OTG_GSL_CONTROL
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL0_EN__SHIFT 0x0
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL1_EN__SHIFT 0x1
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL2_EN__SHIFT 0x2
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL_MASTER_EN__SHIFT 0x3
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL_MASTER_MODE__SHIFT 0x4
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL_CHECK_DELAY__SHIFT 0x8
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL_FORCE_DELAY__SHIFT 0x10
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL_CHECK_ALL_FIELDS__SHIFT 0x1c
+#define OTG0_OTG_GSL_CONTROL__OTG_MASTER_UPDATE_LOCK_GSL_EN__SHIFT 0x1f
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL0_EN_MASK 0x00000001L
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL1_EN_MASK 0x00000002L
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL2_EN_MASK 0x00000004L
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL_MASTER_EN_MASK 0x00000008L
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL_MASTER_MODE_MASK 0x00000030L
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL_CHECK_DELAY_MASK 0x00000F00L
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL_FORCE_DELAY_MASK 0x001F0000L
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL_CHECK_ALL_FIELDS_MASK 0x10000000L
+#define OTG0_OTG_GSL_CONTROL__OTG_MASTER_UPDATE_LOCK_GSL_EN_MASK 0x80000000L
+//OTG0_OTG_GSL_WINDOW_X
+#define OTG0_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_START_X__SHIFT 0x0
+#define OTG0_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_END_X__SHIFT 0x10
+#define OTG0_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_START_X_MASK 0x00007FFFL
+#define OTG0_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_END_X_MASK 0x7FFF0000L
+//OTG0_OTG_GSL_WINDOW_Y
+#define OTG0_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_START_Y__SHIFT 0x0
+#define OTG0_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_END_Y__SHIFT 0x10
+#define OTG0_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_START_Y_MASK 0x00007FFFL
+#define OTG0_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_END_Y_MASK 0x7FFF0000L
+//OTG0_OTG_VUPDATE_KEEPOUT
+#define OTG0_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET__SHIFT 0x0
+#define OTG0_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET__SHIFT 0x10
+#define OTG0_OTG_VUPDATE_KEEPOUT__OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN__SHIFT 0x1f
+#define OTG0_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET_MASK 0x0000FFFFL
+#define OTG0_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET_MASK 0x03FF0000L
+#define OTG0_OTG_VUPDATE_KEEPOUT__OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN_MASK 0x80000000L
+//OTG0_OTG_GLOBAL_CONTROL0
+#define OTG0_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_HTOTAL_KEEPOUT__SHIFT 0x0
+#define OTG0_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_HTOTAL_KEEPOUT_EN__SHIFT 0x8
+#define OTG0_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_SEL__SHIFT 0x19
+#define OTG0_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_HTOTAL_KEEPOUT_MASK 0x000000FFL
+#define OTG0_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_HTOTAL_KEEPOUT_EN_MASK 0x00000100L
+#define OTG0_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_SEL_MASK 0x0E000000L
+//OTG0_OTG_GLOBAL_CONTROL1
+#define OTG0_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_X__SHIFT 0x0
+#define OTG0_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_Y__SHIFT 0x10
+#define OTG0_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_EN__SHIFT 0x1f
+#define OTG0_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_X_MASK 0x00007FFFL
+#define OTG0_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_Y_MASK 0x7FFF0000L
+#define OTG0_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_EN_MASK 0x80000000L
+//OTG0_OTG_GLOBAL_CONTROL2
+#define OTG0_OTG_GLOBAL_CONTROL2__DIG_UPDATE_LOCATION__SHIFT 0x0
+#define OTG0_OTG_GLOBAL_CONTROL2__GLOBAL_UPDATE_LOCK_EN__SHIFT 0xa
+#define OTG0_OTG_GLOBAL_CONTROL2__MANUAL_FLOW_CONTROL_SEL__SHIFT 0x10
+#define OTG0_OTG_GLOBAL_CONTROL2__MASTER_UPDATE_LOCK_WINDOW_SEL__SHIFT 0x1d
+#define OTG0_OTG_GLOBAL_CONTROL2__OTG_VUPDATE_BLOCK_DISABLE__SHIFT 0x1e
+#define OTG0_OTG_GLOBAL_CONTROL2__DCCG_VUPDATE_MODE__SHIFT 0x1f
+#define OTG0_OTG_GLOBAL_CONTROL2__DIG_UPDATE_LOCATION_MASK 0x000003FFL
+#define OTG0_OTG_GLOBAL_CONTROL2__GLOBAL_UPDATE_LOCK_EN_MASK 0x00000400L
+#define OTG0_OTG_GLOBAL_CONTROL2__MANUAL_FLOW_CONTROL_SEL_MASK 0x00070000L
+#define OTG0_OTG_GLOBAL_CONTROL2__MASTER_UPDATE_LOCK_WINDOW_SEL_MASK 0x20000000L
+#define OTG0_OTG_GLOBAL_CONTROL2__OTG_VUPDATE_BLOCK_DISABLE_MASK 0x40000000L
+#define OTG0_OTG_GLOBAL_CONTROL2__DCCG_VUPDATE_MODE_MASK 0x80000000L
+//OTG0_OTG_GLOBAL_CONTROL3
+#define OTG0_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD__SHIFT 0x0
+#define OTG0_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_STEREO_SEL__SHIFT 0x4
+#define OTG0_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD_STEREO_FLAG_SEL__SHIFT 0x8
+#define OTG0_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD_MASK 0x00000003L
+#define OTG0_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_STEREO_SEL_MASK 0x00000030L
+#define OTG0_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD_STEREO_FLAG_SEL_MASK 0x00000100L
+//OTG0_OTG_TRIG_MANUAL_CONTROL
+#define OTG0_OTG_TRIG_MANUAL_CONTROL__TRIG_MANUAL_CONTROL__SHIFT 0x0
+#define OTG0_OTG_TRIG_MANUAL_CONTROL__TRIG_MANUAL_CONTROL_MASK 0x00000001L
+//OTG0_OTG_MANUAL_FLOW_CONTROL
+#define OTG0_OTG_MANUAL_FLOW_CONTROL__MANUAL_FLOW_CONTROL__SHIFT 0x0
+#define OTG0_OTG_MANUAL_FLOW_CONTROL__MANUAL_FLOW_CONTROL_MASK 0x00000001L
+//OTG0_OTG_RANGE_TIMING_INT_STATUS
+#define OTG0_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED__SHIFT 0x0
+#define OTG0_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT__SHIFT 0x4
+#define OTG0_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_CLEAR__SHIFT 0x8
+#define OTG0_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT_MSK__SHIFT 0xc
+#define OTG0_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT_TYPE__SHIFT 0x10
+#define OTG0_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_MASK 0x00000001L
+#define OTG0_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT_MASK 0x00000010L
+#define OTG0_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_CLEAR_MASK 0x00000100L
+#define OTG0_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT_MSK_MASK 0x00001000L
+#define OTG0_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT_TYPE_MASK 0x00010000L
+//OTG0_OTG_DRR_CONTROL
+#define OTG0_OTG_DRR_CONTROL__OTG_DRR_AVERAGE_FRAME__SHIFT 0x0
+#define OTG0_OTG_DRR_CONTROL__OTG_V_TOTAL_LAST_USED_BY_DRR__SHIFT 0x10
+#define OTG0_OTG_DRR_CONTROL__OTG_DRR_AVERAGE_FRAME_MASK 0x00000007L
+#define OTG0_OTG_DRR_CONTROL__OTG_V_TOTAL_LAST_USED_BY_DRR_MASK 0x7FFF0000L
+//OTG0_OTG_REQUEST_CONTROL
+#define OTG0_OTG_REQUEST_CONTROL__OTG_REQUEST_MODE_FOR_H_DUPLICATE__SHIFT 0x0
+#define OTG0_OTG_REQUEST_CONTROL__OTG_REQUEST_MODE_FOR_H_DUPLICATE_MASK 0x00000001L
+//OTG0_OTG_DSC_START_POSITION
+#define OTG0_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_X__SHIFT 0x0
+#define OTG0_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_LINE_NUM__SHIFT 0x10
+#define OTG0_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_X_MASK 0x00007FFFL
+#define OTG0_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_LINE_NUM_MASK 0x03FF0000L
+//OTG0_OTG_PIPE_UPDATE_STATUS
+#define OTG0_OTG_PIPE_UPDATE_STATUS__OTG_FLIP_PENDING__SHIFT 0x0
+#define OTG0_OTG_PIPE_UPDATE_STATUS__OTG_FLIP_TAKEN__SHIFT 0x1
+#define OTG0_OTG_PIPE_UPDATE_STATUS__OTG_FLIP_TAKEN_CLEAR__SHIFT 0x2
+#define OTG0_OTG_PIPE_UPDATE_STATUS__OTG_DC_REG_UPDATE_PENDING__SHIFT 0x4
+#define OTG0_OTG_PIPE_UPDATE_STATUS__OTG_DC_REG_UPDATE_TAKEN__SHIFT 0x5
+#define OTG0_OTG_PIPE_UPDATE_STATUS__OTG_DC_REG_UPDATE_TAKEN_CLEAR__SHIFT 0x6
+#define OTG0_OTG_PIPE_UPDATE_STATUS__OTG_CURSOR_UPDATE_PENDING__SHIFT 0x8
+#define OTG0_OTG_PIPE_UPDATE_STATUS__OTG_CURSOR_UPDATE_TAKEN__SHIFT 0x9
+#define OTG0_OTG_PIPE_UPDATE_STATUS__OTG_CURSOR_UPDATE_TAKEN_CLEAR__SHIFT 0xa
+#define OTG0_OTG_PIPE_UPDATE_STATUS__OTG_VUPDATE_KEEPOUT_STATUS__SHIFT 0x10
+#define OTG0_OTG_PIPE_UPDATE_STATUS__OTG_FLIP_PENDING_MASK 0x00000001L
+#define OTG0_OTG_PIPE_UPDATE_STATUS__OTG_FLIP_TAKEN_MASK 0x00000002L
+#define OTG0_OTG_PIPE_UPDATE_STATUS__OTG_FLIP_TAKEN_CLEAR_MASK 0x00000004L
+#define OTG0_OTG_PIPE_UPDATE_STATUS__OTG_DC_REG_UPDATE_PENDING_MASK 0x00000010L
+#define OTG0_OTG_PIPE_UPDATE_STATUS__OTG_DC_REG_UPDATE_TAKEN_MASK 0x00000020L
+#define OTG0_OTG_PIPE_UPDATE_STATUS__OTG_DC_REG_UPDATE_TAKEN_CLEAR_MASK 0x00000040L
+#define OTG0_OTG_PIPE_UPDATE_STATUS__OTG_CURSOR_UPDATE_PENDING_MASK 0x00000100L
+#define OTG0_OTG_PIPE_UPDATE_STATUS__OTG_CURSOR_UPDATE_TAKEN_MASK 0x00000200L
+#define OTG0_OTG_PIPE_UPDATE_STATUS__OTG_CURSOR_UPDATE_TAKEN_CLEAR_MASK 0x00000400L
+#define OTG0_OTG_PIPE_UPDATE_STATUS__OTG_VUPDATE_KEEPOUT_STATUS_MASK 0x00010000L
+//OTG0_OTG_SPARE_REGISTER
+#define OTG0_OTG_SPARE_REGISTER__OTG_SPARE_REG__SHIFT 0x0
+#define OTG0_OTG_SPARE_REGISTER__OTG_SPARE_REG_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_optc_otg1_dispdec
+//OTG1_OTG_H_TOTAL
+#define OTG1_OTG_H_TOTAL__OTG_H_TOTAL__SHIFT 0x0
+#define OTG1_OTG_H_TOTAL__OTG_H_TOTAL_MASK 0x00007FFFL
+//OTG1_OTG_H_BLANK_START_END
+#define OTG1_OTG_H_BLANK_START_END__OTG_H_BLANK_START__SHIFT 0x0
+#define OTG1_OTG_H_BLANK_START_END__OTG_H_BLANK_END__SHIFT 0x10
+#define OTG1_OTG_H_BLANK_START_END__OTG_H_BLANK_START_MASK 0x00007FFFL
+#define OTG1_OTG_H_BLANK_START_END__OTG_H_BLANK_END_MASK 0x7FFF0000L
+//OTG1_OTG_H_SYNC_A
+#define OTG1_OTG_H_SYNC_A__OTG_H_SYNC_A_START__SHIFT 0x0
+#define OTG1_OTG_H_SYNC_A__OTG_H_SYNC_A_END__SHIFT 0x10
+#define OTG1_OTG_H_SYNC_A__OTG_H_SYNC_A_START_MASK 0x00007FFFL
+#define OTG1_OTG_H_SYNC_A__OTG_H_SYNC_A_END_MASK 0x7FFF0000L
+//OTG1_OTG_H_SYNC_A_CNTL
+#define OTG1_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_POL__SHIFT 0x0
+#define OTG1_OTG_H_SYNC_A_CNTL__OTG_COMP_SYNC_A_EN__SHIFT 0x10
+#define OTG1_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_CUTOFF__SHIFT 0x11
+#define OTG1_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_POL_MASK 0x00000001L
+#define OTG1_OTG_H_SYNC_A_CNTL__OTG_COMP_SYNC_A_EN_MASK 0x00010000L
+#define OTG1_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_CUTOFF_MASK 0x00020000L
+//OTG1_OTG_H_TIMING_CNTL
+#define OTG1_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_BY2__SHIFT 0x0
+#define OTG1_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_BY2_UPDATE_MODE__SHIFT 0x8
+#define OTG1_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_BY2_MASK 0x00000001L
+#define OTG1_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_BY2_UPDATE_MODE_MASK 0x00000100L
+//OTG1_OTG_V_TOTAL
+#define OTG1_OTG_V_TOTAL__OTG_V_TOTAL__SHIFT 0x0
+#define OTG1_OTG_V_TOTAL__OTG_V_TOTAL_MASK 0x00007FFFL
+//OTG1_OTG_V_TOTAL_MIN
+#define OTG1_OTG_V_TOTAL_MIN__OTG_V_TOTAL_MIN__SHIFT 0x0
+#define OTG1_OTG_V_TOTAL_MIN__OTG_V_TOTAL_MIN_MASK 0x00007FFFL
+//OTG1_OTG_V_TOTAL_MAX
+#define OTG1_OTG_V_TOTAL_MAX__OTG_V_TOTAL_MAX__SHIFT 0x0
+#define OTG1_OTG_V_TOTAL_MAX__OTG_V_TOTAL_MAX_MASK 0x00007FFFL
+//OTG1_OTG_V_TOTAL_MID
+#define OTG1_OTG_V_TOTAL_MID__OTG_V_TOTAL_MID__SHIFT 0x0
+#define OTG1_OTG_V_TOTAL_MID__OTG_V_TOTAL_MID_MASK 0x00007FFFL
+//OTG1_OTG_V_TOTAL_CONTROL
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MIN_SEL__SHIFT 0x0
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MAX_SEL__SHIFT 0x1
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MAX_EN__SHIFT 0x2
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MIN_EN__SHIFT 0x3
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_FORCE_LOCK_ON_EVENT__SHIFT 0x4
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_DRR_EVENT_ACTIVE_PERIOD__SHIFT 0x5
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK_EN__SHIFT 0x7
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_FRAME_NUM__SHIFT 0x8
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK__SHIFT 0x10
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MIN_SEL_MASK 0x00000001L
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MAX_SEL_MASK 0x00000002L
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MAX_EN_MASK 0x00000004L
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MIN_EN_MASK 0x00000008L
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_FORCE_LOCK_ON_EVENT_MASK 0x00000010L
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_DRR_EVENT_ACTIVE_PERIOD_MASK 0x00000020L
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK_EN_MASK 0x00000080L
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_FRAME_NUM_MASK 0x0000FF00L
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK_MASK 0xFFFF0000L
+//OTG1_OTG_V_TOTAL_INT_STATUS
+#define OTG1_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED__SHIFT 0x0
+#define OTG1_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_INT__SHIFT 0x4
+#define OTG1_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_ACK__SHIFT 0x8
+#define OTG1_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_MSK__SHIFT 0xc
+#define OTG1_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_MASK 0x00000001L
+#define OTG1_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_INT_MASK 0x00000010L
+#define OTG1_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_ACK_MASK 0x00000100L
+#define OTG1_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_MSK_MASK 0x00001000L
+//OTG1_OTG_VSYNC_NOM_INT_STATUS
+#define OTG1_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM__SHIFT 0x0
+#define OTG1_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_INT_CLEAR__SHIFT 0x4
+#define OTG1_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_MASK 0x00000001L
+#define OTG1_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_INT_CLEAR_MASK 0x00000010L
+//OTG1_OTG_V_BLANK_START_END
+#define OTG1_OTG_V_BLANK_START_END__OTG_V_BLANK_START__SHIFT 0x0
+#define OTG1_OTG_V_BLANK_START_END__OTG_V_BLANK_END__SHIFT 0x10
+#define OTG1_OTG_V_BLANK_START_END__OTG_V_BLANK_START_MASK 0x00007FFFL
+#define OTG1_OTG_V_BLANK_START_END__OTG_V_BLANK_END_MASK 0x7FFF0000L
+//OTG1_OTG_V_SYNC_A
+#define OTG1_OTG_V_SYNC_A__OTG_V_SYNC_A_START__SHIFT 0x0
+#define OTG1_OTG_V_SYNC_A__OTG_V_SYNC_A_END__SHIFT 0x10
+#define OTG1_OTG_V_SYNC_A__OTG_V_SYNC_A_START_MASK 0x00007FFFL
+#define OTG1_OTG_V_SYNC_A__OTG_V_SYNC_A_END_MASK 0x7FFF0000L
+//OTG1_OTG_V_SYNC_A_CNTL
+#define OTG1_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_A_POL__SHIFT 0x0
+#define OTG1_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_A_POL_MASK 0x00000001L
+//OTG1_OTG_TRIGA_CNTL
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_SELECT__SHIFT 0x0
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_PIPE_SELECT__SHIFT 0x5
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_SELECT__SHIFT 0x8
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_RESYNC_BYPASS_EN__SHIFT 0xb
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_INPUT_STATUS__SHIFT 0xc
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_STATUS__SHIFT 0xd
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_OCCURRED__SHIFT 0xe
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_RISING_EDGE_DETECT_CNTL__SHIFT 0x10
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_FALLING_EDGE_DETECT_CNTL__SHIFT 0x12
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_FREQUENCY_SELECT__SHIFT 0x14
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_DELAY__SHIFT 0x18
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_CLEAR__SHIFT 0x1f
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_SELECT_MASK 0x0000001FL
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_PIPE_SELECT_MASK 0x000000E0L
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_SELECT_MASK 0x00000700L
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_RESYNC_BYPASS_EN_MASK 0x00000800L
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_INPUT_STATUS_MASK 0x00001000L
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_STATUS_MASK 0x00002000L
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_OCCURRED_MASK 0x00004000L
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_RISING_EDGE_DETECT_CNTL_MASK 0x00030000L
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_FALLING_EDGE_DETECT_CNTL_MASK 0x000C0000L
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_FREQUENCY_SELECT_MASK 0x00300000L
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_DELAY_MASK 0x1F000000L
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_CLEAR_MASK 0x80000000L
+//OTG1_OTG_TRIGA_MANUAL_TRIG
+#define OTG1_OTG_TRIGA_MANUAL_TRIG__OTG_TRIGA_MANUAL_TRIG__SHIFT 0x0
+#define OTG1_OTG_TRIGA_MANUAL_TRIG__OTG_TRIGA_MANUAL_TRIG_MASK 0x00000001L
+//OTG1_OTG_TRIGB_CNTL
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_SELECT__SHIFT 0x0
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_PIPE_SELECT__SHIFT 0x5
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_SELECT__SHIFT 0x8
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_RESYNC_BYPASS_EN__SHIFT 0xb
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_INPUT_STATUS__SHIFT 0xc
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_STATUS__SHIFT 0xd
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_OCCURRED__SHIFT 0xe
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_RISING_EDGE_DETECT_CNTL__SHIFT 0x10
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_FALLING_EDGE_DETECT_CNTL__SHIFT 0x12
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_FREQUENCY_SELECT__SHIFT 0x14
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_DELAY__SHIFT 0x18
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_CLEAR__SHIFT 0x1f
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_SELECT_MASK 0x0000001FL
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_PIPE_SELECT_MASK 0x000000E0L
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_SELECT_MASK 0x00000700L
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_RESYNC_BYPASS_EN_MASK 0x00000800L
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_INPUT_STATUS_MASK 0x00001000L
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_STATUS_MASK 0x00002000L
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_OCCURRED_MASK 0x00004000L
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_RISING_EDGE_DETECT_CNTL_MASK 0x00030000L
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_FALLING_EDGE_DETECT_CNTL_MASK 0x000C0000L
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_FREQUENCY_SELECT_MASK 0x00300000L
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_DELAY_MASK 0x1F000000L
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_CLEAR_MASK 0x80000000L
+//OTG1_OTG_TRIGB_MANUAL_TRIG
+#define OTG1_OTG_TRIGB_MANUAL_TRIG__OTG_TRIGB_MANUAL_TRIG__SHIFT 0x0
+#define OTG1_OTG_TRIGB_MANUAL_TRIG__OTG_TRIGB_MANUAL_TRIG_MASK 0x00000001L
+//OTG1_OTG_FORCE_COUNT_NOW_CNTL
+#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_MODE__SHIFT 0x0
+#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CHECK__SHIFT 0x4
+#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_TRIG_SEL__SHIFT 0x8
+#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_OCCURRED__SHIFT 0x10
+#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CLEAR__SHIFT 0x18
+#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_MODE_MASK 0x00000003L
+#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CHECK_MASK 0x00000010L
+#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_TRIG_SEL_MASK 0x00000100L
+#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_OCCURRED_MASK 0x00010000L
+#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CLEAR_MASK 0x01000000L
+//OTG1_OTG_FLOW_CONTROL
+#define OTG1_OTG_FLOW_CONTROL__OTG_FLOW_CONTROL_SOURCE_SELECT__SHIFT 0x0
+#define OTG1_OTG_FLOW_CONTROL__OTG_FLOW_CONTROL_POLARITY__SHIFT 0x8
+#define OTG1_OTG_FLOW_CONTROL__OTG_FLOW_CONTROL_GRANULARITY__SHIFT 0x10
+#define OTG1_OTG_FLOW_CONTROL__OTG_FLOW_CONTROL_INPUT_STATUS__SHIFT 0x18
+#define OTG1_OTG_FLOW_CONTROL__OTG_FLOW_CONTROL_SOURCE_SELECT_MASK 0x0000001FL
+#define OTG1_OTG_FLOW_CONTROL__OTG_FLOW_CONTROL_POLARITY_MASK 0x00000100L
+#define OTG1_OTG_FLOW_CONTROL__OTG_FLOW_CONTROL_GRANULARITY_MASK 0x00010000L
+#define OTG1_OTG_FLOW_CONTROL__OTG_FLOW_CONTROL_INPUT_STATUS_MASK 0x01000000L
+//OTG1_OTG_STEREO_FORCE_NEXT_EYE
+#define OTG1_OTG_STEREO_FORCE_NEXT_EYE__OTG_STEREO_FORCE_NEXT_EYE__SHIFT 0x0
+#define OTG1_OTG_STEREO_FORCE_NEXT_EYE__OTG_AVSYNC_FRAME_COUNTER__SHIFT 0x8
+#define OTG1_OTG_STEREO_FORCE_NEXT_EYE__OTG_AVSYNC_LINE_COUNTER__SHIFT 0x10
+#define OTG1_OTG_STEREO_FORCE_NEXT_EYE__OTG_STEREO_FORCE_NEXT_EYE_MASK 0x00000003L
+#define OTG1_OTG_STEREO_FORCE_NEXT_EYE__OTG_AVSYNC_FRAME_COUNTER_MASK 0x0000FF00L
+#define OTG1_OTG_STEREO_FORCE_NEXT_EYE__OTG_AVSYNC_LINE_COUNTER_MASK 0x1FFF0000L
+//OTG1_OTG_CONTROL
+#define OTG1_OTG_CONTROL__OTG_MASTER_EN__SHIFT 0x0
+#define OTG1_OTG_CONTROL__OTG_DISABLE_POINT_CNTL__SHIFT 0x8
+#define OTG1_OTG_CONTROL__OTG_START_POINT_CNTL__SHIFT 0xc
+#define OTG1_OTG_CONTROL__OTG_FIELD_NUMBER_CNTL__SHIFT 0xd
+#define OTG1_OTG_CONTROL__OTG_FIELD_NUMBER_POLARITY__SHIFT 0xe
+#define OTG1_OTG_CONTROL__OTG_CURRENT_MASTER_EN_STATE__SHIFT 0x10
+#define OTG1_OTG_CONTROL__OTG_DISP_READ_REQUEST_DISABLE__SHIFT 0x18
+#define OTG1_OTG_CONTROL__OTG_AVSYNC_LOCK_SNAPSHOT__SHIFT 0x1e
+#define OTG1_OTG_CONTROL__OTG_AVSYNC_VSYNC_N_HSYNC_MODE__SHIFT 0x1f
+#define OTG1_OTG_CONTROL__OTG_MASTER_EN_MASK 0x00000001L
+#define OTG1_OTG_CONTROL__OTG_DISABLE_POINT_CNTL_MASK 0x00000300L
+#define OTG1_OTG_CONTROL__OTG_START_POINT_CNTL_MASK 0x00001000L
+#define OTG1_OTG_CONTROL__OTG_FIELD_NUMBER_CNTL_MASK 0x00002000L
+#define OTG1_OTG_CONTROL__OTG_FIELD_NUMBER_POLARITY_MASK 0x00004000L
+#define OTG1_OTG_CONTROL__OTG_CURRENT_MASTER_EN_STATE_MASK 0x00010000L
+#define OTG1_OTG_CONTROL__OTG_DISP_READ_REQUEST_DISABLE_MASK 0x01000000L
+#define OTG1_OTG_CONTROL__OTG_AVSYNC_LOCK_SNAPSHOT_MASK 0x40000000L
+#define OTG1_OTG_CONTROL__OTG_AVSYNC_VSYNC_N_HSYNC_MODE_MASK 0x80000000L
+//OTG1_OTG_BLANK_CONTROL
+#define OTG1_OTG_BLANK_CONTROL__OTG_CURRENT_BLANK_STATE__SHIFT 0x0
+#define OTG1_OTG_BLANK_CONTROL__OTG_BLANK_DATA_EN__SHIFT 0x8
+#define OTG1_OTG_BLANK_CONTROL__OTG_BLANK_DE_MODE__SHIFT 0x10
+#define OTG1_OTG_BLANK_CONTROL__OTG_CURRENT_BLANK_STATE_MASK 0x00000001L
+#define OTG1_OTG_BLANK_CONTROL__OTG_BLANK_DATA_EN_MASK 0x00000100L
+#define OTG1_OTG_BLANK_CONTROL__OTG_BLANK_DE_MODE_MASK 0x00010000L
+//OTG1_OTG_PIPE_ABORT_CONTROL
+#define OTG1_OTG_PIPE_ABORT_CONTROL__OTG_PIPE_ABORT__SHIFT 0x0
+#define OTG1_OTG_PIPE_ABORT_CONTROL__OTG_PIPE_ABORT_DONE__SHIFT 0x8
+#define OTG1_OTG_PIPE_ABORT_CONTROL__OTG_PIPE_ABORT_MASK 0x00000001L
+#define OTG1_OTG_PIPE_ABORT_CONTROL__OTG_PIPE_ABORT_DONE_MASK 0x00000100L
+//OTG1_OTG_INTERLACE_CONTROL
+#define OTG1_OTG_INTERLACE_CONTROL__OTG_INTERLACE_ENABLE__SHIFT 0x0
+#define OTG1_OTG_INTERLACE_CONTROL__OTG_INTERLACE_FORCE_NEXT_FIELD__SHIFT 0x10
+#define OTG1_OTG_INTERLACE_CONTROL__OTG_INTERLACE_ENABLE_MASK 0x00000001L
+#define OTG1_OTG_INTERLACE_CONTROL__OTG_INTERLACE_FORCE_NEXT_FIELD_MASK 0x00030000L
+//OTG1_OTG_INTERLACE_STATUS
+#define OTG1_OTG_INTERLACE_STATUS__OTG_INTERLACE_CURRENT_FIELD__SHIFT 0x0
+#define OTG1_OTG_INTERLACE_STATUS__OTG_INTERLACE_NEXT_FIELD__SHIFT 0x1
+#define OTG1_OTG_INTERLACE_STATUS__OTG_INTERLACE_CURRENT_FIELD_MASK 0x00000001L
+#define OTG1_OTG_INTERLACE_STATUS__OTG_INTERLACE_NEXT_FIELD_MASK 0x00000002L
+//OTG1_OTG_PIXEL_DATA_READBACK0
+#define OTG1_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_BLUE_CB__SHIFT 0x0
+#define OTG1_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_GREEN_Y__SHIFT 0x10
+#define OTG1_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_BLUE_CB_MASK 0x0000FFFFL
+#define OTG1_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_GREEN_Y_MASK 0xFFFF0000L
+//OTG1_OTG_PIXEL_DATA_READBACK1
+#define OTG1_OTG_PIXEL_DATA_READBACK1__OTG_PIXEL_DATA_RED_CR__SHIFT 0x0
+#define OTG1_OTG_PIXEL_DATA_READBACK1__OTG_PIXEL_DATA_RED_CR_MASK 0x0000FFFFL
+//OTG1_OTG_STATUS
+#define OTG1_OTG_STATUS__OTG_V_BLANK__SHIFT 0x0
+#define OTG1_OTG_STATUS__OTG_V_ACTIVE_DISP__SHIFT 0x1
+#define OTG1_OTG_STATUS__OTG_V_SYNC_A__SHIFT 0x2
+#define OTG1_OTG_STATUS__OTG_V_UPDATE__SHIFT 0x3
+#define OTG1_OTG_STATUS__OTG_V_BLANK_3D_STRUCTURE__SHIFT 0x5
+#define OTG1_OTG_STATUS__OTG_H_BLANK__SHIFT 0x10
+#define OTG1_OTG_STATUS__OTG_H_ACTIVE_DISP__SHIFT 0x11
+#define OTG1_OTG_STATUS__OTG_H_SYNC_A__SHIFT 0x12
+#define OTG1_OTG_STATUS__OTG_V_BLANK_MASK 0x00000001L
+#define OTG1_OTG_STATUS__OTG_V_ACTIVE_DISP_MASK 0x00000002L
+#define OTG1_OTG_STATUS__OTG_V_SYNC_A_MASK 0x00000004L
+#define OTG1_OTG_STATUS__OTG_V_UPDATE_MASK 0x00000008L
+#define OTG1_OTG_STATUS__OTG_V_BLANK_3D_STRUCTURE_MASK 0x00000020L
+#define OTG1_OTG_STATUS__OTG_H_BLANK_MASK 0x00010000L
+#define OTG1_OTG_STATUS__OTG_H_ACTIVE_DISP_MASK 0x00020000L
+#define OTG1_OTG_STATUS__OTG_H_SYNC_A_MASK 0x00040000L
+//OTG1_OTG_STATUS_POSITION
+#define OTG1_OTG_STATUS_POSITION__OTG_VERT_COUNT__SHIFT 0x0
+#define OTG1_OTG_STATUS_POSITION__OTG_HORZ_COUNT__SHIFT 0x10
+#define OTG1_OTG_STATUS_POSITION__OTG_VERT_COUNT_MASK 0x00007FFFL
+#define OTG1_OTG_STATUS_POSITION__OTG_HORZ_COUNT_MASK 0x7FFF0000L
+//OTG1_OTG_NOM_VERT_POSITION
+#define OTG1_OTG_NOM_VERT_POSITION__OTG_VERT_COUNT_NOM__SHIFT 0x0
+#define OTG1_OTG_NOM_VERT_POSITION__OTG_VERT_COUNT_NOM_MASK 0x00007FFFL
+//OTG1_OTG_STATUS_FRAME_COUNT
+#define OTG1_OTG_STATUS_FRAME_COUNT__OTG_FRAME_COUNT__SHIFT 0x0
+#define OTG1_OTG_STATUS_FRAME_COUNT__OTG_FRAME_COUNT_MASK 0x00FFFFFFL
+//OTG1_OTG_STATUS_VF_COUNT
+#define OTG1_OTG_STATUS_VF_COUNT__OTG_VF_COUNT__SHIFT 0x0
+#define OTG1_OTG_STATUS_VF_COUNT__OTG_VF_COUNT_MASK 0x7FFFFFFFL
+//OTG1_OTG_STATUS_HV_COUNT
+#define OTG1_OTG_STATUS_HV_COUNT__OTG_HV_COUNT__SHIFT 0x0
+#define OTG1_OTG_STATUS_HV_COUNT__OTG_HV_COUNT_MASK 0x7FFFFFFFL
+//OTG1_OTG_COUNT_CONTROL
+#define OTG1_OTG_COUNT_CONTROL__OTG_HORZ_COUNT_BY2_EN__SHIFT 0x0
+#define OTG1_OTG_COUNT_CONTROL__OTG_HORZ_REPETITION_COUNT__SHIFT 0x1
+#define OTG1_OTG_COUNT_CONTROL__OTG_HORZ_COUNT_BY2_EN_MASK 0x00000001L
+#define OTG1_OTG_COUNT_CONTROL__OTG_HORZ_REPETITION_COUNT_MASK 0x0000001EL
+//OTG1_OTG_COUNT_RESET
+#define OTG1_OTG_COUNT_RESET__OTG_RESET_FRAME_COUNT__SHIFT 0x0
+#define OTG1_OTG_COUNT_RESET__OTG_RESET_FRAME_COUNT_MASK 0x00000001L
+//OTG1_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE
+#define OTG1_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__SHIFT 0x0
+#define OTG1_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__OTG_MANUAL_FORCE_VSYNC_NEXT_LINE_MASK 0x00000001L
+//OTG1_OTG_VERT_SYNC_CONTROL
+#define OTG1_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED__SHIFT 0x0
+#define OTG1_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_CLEAR__SHIFT 0x8
+#define OTG1_OTG_VERT_SYNC_CONTROL__OTG_AUTO_FORCE_VSYNC_MODE__SHIFT 0x10
+#define OTG1_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED_MASK 0x00000001L
+#define OTG1_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_CLEAR_MASK 0x00000100L
+#define OTG1_OTG_VERT_SYNC_CONTROL__OTG_AUTO_FORCE_VSYNC_MODE_MASK 0x00030000L
+//OTG1_OTG_STEREO_STATUS
+#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_CURRENT_EYE__SHIFT 0x0
+#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_SYNC_OUTPUT__SHIFT 0x8
+#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_SYNC_SELECT__SHIFT 0x10
+#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_EYE_FLAG__SHIFT 0x14
+#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_FORCE_NEXT_EYE_PENDING__SHIFT 0x18
+#define OTG1_OTG_STEREO_STATUS__OTG_CURRENT_3D_STRUCTURE_STATE__SHIFT 0x1e
+#define OTG1_OTG_STEREO_STATUS__OTG_CURRENT_STEREOSYNC_EN_STATE__SHIFT 0x1f
+#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_CURRENT_EYE_MASK 0x00000001L
+#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_SYNC_OUTPUT_MASK 0x00000100L
+#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_SYNC_SELECT_MASK 0x00010000L
+#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_EYE_FLAG_MASK 0x00100000L
+#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_FORCE_NEXT_EYE_PENDING_MASK 0x03000000L
+#define OTG1_OTG_STEREO_STATUS__OTG_CURRENT_3D_STRUCTURE_STATE_MASK 0x40000000L
+#define OTG1_OTG_STEREO_STATUS__OTG_CURRENT_STEREOSYNC_EN_STATE_MASK 0x80000000L
+//OTG1_OTG_STEREO_CONTROL
+#define OTG1_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_LINE_NUM__SHIFT 0x0
+#define OTG1_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_POLARITY__SHIFT 0xf
+#define OTG1_OTG_STEREO_CONTROL__OTG_STEREO_EYE_FLAG_POLARITY__SHIFT 0x11
+#define OTG1_OTG_STEREO_CONTROL__OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP__SHIFT 0x12
+#define OTG1_OTG_STEREO_CONTROL__OTG_DISABLE_FIELD_NUM__SHIFT 0x13
+#define OTG1_OTG_STEREO_CONTROL__OTG_DISABLE_V_BLANK_FOR_DP_FIX__SHIFT 0x14
+#define OTG1_OTG_STEREO_CONTROL__OTG_FIELD_NUM_SEL__SHIFT 0x15
+#define OTG1_OTG_STEREO_CONTROL__OTG_STEREO_EN__SHIFT 0x18
+#define OTG1_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_LINE_NUM_MASK 0x00007FFFL
+#define OTG1_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_POLARITY_MASK 0x00008000L
+#define OTG1_OTG_STEREO_CONTROL__OTG_STEREO_EYE_FLAG_POLARITY_MASK 0x00020000L
+#define OTG1_OTG_STEREO_CONTROL__OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP_MASK 0x00040000L
+#define OTG1_OTG_STEREO_CONTROL__OTG_DISABLE_FIELD_NUM_MASK 0x00080000L
+#define OTG1_OTG_STEREO_CONTROL__OTG_DISABLE_V_BLANK_FOR_DP_FIX_MASK 0x00100000L
+#define OTG1_OTG_STEREO_CONTROL__OTG_FIELD_NUM_SEL_MASK 0x00200000L
+#define OTG1_OTG_STEREO_CONTROL__OTG_STEREO_EN_MASK 0x01000000L
+//OTG1_OTG_SNAPSHOT_STATUS
+#define OTG1_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_OCCURRED__SHIFT 0x0
+#define OTG1_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_CLEAR__SHIFT 0x1
+#define OTG1_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_MANUAL_TRIGGER__SHIFT 0x2
+#define OTG1_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_OCCURRED_MASK 0x00000001L
+#define OTG1_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_CLEAR_MASK 0x00000002L
+#define OTG1_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_MANUAL_TRIGGER_MASK 0x00000004L
+//OTG1_OTG_SNAPSHOT_CONTROL
+#define OTG1_OTG_SNAPSHOT_CONTROL__OTG_AUTO_SNAPSHOT_TRIG_SEL__SHIFT 0x0
+#define OTG1_OTG_SNAPSHOT_CONTROL__OTG_AUTO_SNAPSHOT_TRIG_SEL_MASK 0x00000003L
+//OTG1_OTG_SNAPSHOT_POSITION
+#define OTG1_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_VERT_COUNT__SHIFT 0x0
+#define OTG1_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_HORZ_COUNT__SHIFT 0x10
+#define OTG1_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_VERT_COUNT_MASK 0x00007FFFL
+#define OTG1_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_HORZ_COUNT_MASK 0x7FFF0000L
+//OTG1_OTG_SNAPSHOT_FRAME
+#define OTG1_OTG_SNAPSHOT_FRAME__OTG_SNAPSHOT_FRAME_COUNT__SHIFT 0x0
+#define OTG1_OTG_SNAPSHOT_FRAME__OTG_SNAPSHOT_FRAME_COUNT_MASK 0x00FFFFFFL
+//OTG1_OTG_INTERRUPT_CONTROL
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_MSK__SHIFT 0x0
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_TYPE__SHIFT 0x1
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_MSK__SHIFT 0x8
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_TYPE__SHIFT 0x9
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_MSK__SHIFT 0x10
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_TYPE__SHIFT 0x11
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_MSK__SHIFT 0x18
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_MSK__SHIFT 0x19
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_TYPE__SHIFT 0x1a
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_TYPE__SHIFT 0x1b
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_MSK__SHIFT 0x1c
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_TYPE__SHIFT 0x1d
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_MSK__SHIFT 0x1e
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_TYPE__SHIFT 0x1f
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_MSK_MASK 0x00000001L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_TYPE_MASK 0x00000002L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_MSK_MASK 0x00000100L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_TYPE_MASK 0x00000200L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_MSK_MASK 0x00010000L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_TYPE_MASK 0x00020000L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_MSK_MASK 0x01000000L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_MSK_MASK 0x02000000L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_TYPE_MASK 0x04000000L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_TYPE_MASK 0x08000000L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_MSK_MASK 0x10000000L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_TYPE_MASK 0x20000000L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_MSK_MASK 0x40000000L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_TYPE_MASK 0x80000000L
+//OTG1_OTG_UPDATE_LOCK
+#define OTG1_OTG_UPDATE_LOCK__OTG_UPDATE_LOCK__SHIFT 0x0
+#define OTG1_OTG_UPDATE_LOCK__OTG_UPDATE_LOCK_MASK 0x00000001L
+//OTG1_OTG_DOUBLE_BUFFER_CONTROL
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_PENDING__SHIFT 0x0
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_H_TIMING_DIV_BY2_DB_UPDATE_PENDING__SHIFT 0x2
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_BLANK_DATA_EN_UPDATE_PENDING__SHIFT 0x3
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_RANGE_TIMING_DBUF_UPDATE_PENDING__SHIFT 0x4
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_TIMING_DB_UPDATE_PENDING__SHIFT 0x5
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_CTRL_DB_UPDATE_PENDING__SHIFT 0x6
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_STRUCTURE_EN_DB_UPDATE_PENDING__SHIFT 0x7
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_INSTANTLY__SHIFT 0x8
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_VSTARTUP_DB_UPDATE_PENDING__SHIFT 0x9
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_DSC_POSITION_DB_UPDATE_PENDING__SHIFT 0xa
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_BLANK_DATA_DOUBLE_BUFFER_EN__SHIFT 0x10
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_RANGE_TIMING_DBUF_UPDATE_MODE__SHIFT 0x18
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_PENDING_MASK 0x00000001L
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_H_TIMING_DIV_BY2_DB_UPDATE_PENDING_MASK 0x00000004L
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_BLANK_DATA_EN_UPDATE_PENDING_MASK 0x00000008L
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_RANGE_TIMING_DBUF_UPDATE_PENDING_MASK 0x00000010L
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_TIMING_DB_UPDATE_PENDING_MASK 0x00000020L
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_CTRL_DB_UPDATE_PENDING_MASK 0x00000040L
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_STRUCTURE_EN_DB_UPDATE_PENDING_MASK 0x00000080L
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_INSTANTLY_MASK 0x00000100L
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_VSTARTUP_DB_UPDATE_PENDING_MASK 0x00000200L
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_DSC_POSITION_DB_UPDATE_PENDING_MASK 0x00000400L
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_BLANK_DATA_DOUBLE_BUFFER_EN_MASK 0x00010000L
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_RANGE_TIMING_DBUF_UPDATE_MODE_MASK 0x03000000L
+//OTG1_OTG_MASTER_EN
+#define OTG1_OTG_MASTER_EN__OTG_MASTER_EN__SHIFT 0x0
+#define OTG1_OTG_MASTER_EN__OTG_MASTER_EN_MASK 0x00000001L
+//OTG1_OTG_BLANK_DATA_COLOR
+#define OTG1_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_BLUE_CB__SHIFT 0x0
+#define OTG1_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_GREEN_Y__SHIFT 0xa
+#define OTG1_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_RED_CR__SHIFT 0x14
+#define OTG1_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_BLUE_CB_MASK 0x000003FFL
+#define OTG1_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_GREEN_Y_MASK 0x000FFC00L
+#define OTG1_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_RED_CR_MASK 0x3FF00000L
+//OTG1_OTG_BLANK_DATA_COLOR_EXT
+#define OTG1_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_BLUE_CB_EXT__SHIFT 0x0
+#define OTG1_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_GREEN_Y_EXT__SHIFT 0x8
+#define OTG1_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_RED_CR_EXT__SHIFT 0x10
+#define OTG1_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_BLUE_CB_EXT_MASK 0x0000003FL
+#define OTG1_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_GREEN_Y_EXT_MASK 0x00003F00L
+#define OTG1_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_RED_CR_EXT_MASK 0x003F0000L
+//OTG1_OTG_BLACK_COLOR
+#define OTG1_OTG_BLACK_COLOR__OTG_BLACK_COLOR_B_CB__SHIFT 0x0
+#define OTG1_OTG_BLACK_COLOR__OTG_BLACK_COLOR_G_Y__SHIFT 0xa
+#define OTG1_OTG_BLACK_COLOR__OTG_BLACK_COLOR_R_CR__SHIFT 0x14
+#define OTG1_OTG_BLACK_COLOR__OTG_BLACK_COLOR_B_CB_MASK 0x000003FFL
+#define OTG1_OTG_BLACK_COLOR__OTG_BLACK_COLOR_G_Y_MASK 0x000FFC00L
+#define OTG1_OTG_BLACK_COLOR__OTG_BLACK_COLOR_R_CR_MASK 0x3FF00000L
+//OTG1_OTG_BLACK_COLOR_EXT
+#define OTG1_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_B_CB_EXT__SHIFT 0x0
+#define OTG1_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_G_Y_EXT__SHIFT 0x8
+#define OTG1_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_R_CR_EXT__SHIFT 0x10
+#define OTG1_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_B_CB_EXT_MASK 0x0000003FL
+#define OTG1_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_G_Y_EXT_MASK 0x00003F00L
+#define OTG1_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_R_CR_EXT_MASK 0x003F0000L
+//OTG1_OTG_VERTICAL_INTERRUPT0_POSITION
+#define OTG1_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_START__SHIFT 0x0
+#define OTG1_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_END__SHIFT 0x10
+#define OTG1_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_START_MASK 0x00007FFFL
+#define OTG1_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_END_MASK 0x7FFF0000L
+//OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_OUTPUT_POLARITY__SHIFT 0x4
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_ENABLE__SHIFT 0x8
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_STATUS__SHIFT 0xc
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_STATUS__SHIFT 0x10
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_CLEAR__SHIFT 0x14
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_TYPE__SHIFT 0x18
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_OUTPUT_POLARITY_MASK 0x00000010L
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_ENABLE_MASK 0x00000100L
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_STATUS_MASK 0x00001000L
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_STATUS_MASK 0x00010000L
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_CLEAR_MASK 0x00100000L
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_TYPE_MASK 0x01000000L
+//OTG1_OTG_VERTICAL_INTERRUPT1_POSITION
+#define OTG1_OTG_VERTICAL_INTERRUPT1_POSITION__OTG_VERTICAL_INTERRUPT1_LINE_START__SHIFT 0x0
+#define OTG1_OTG_VERTICAL_INTERRUPT1_POSITION__OTG_VERTICAL_INTERRUPT1_LINE_START_MASK 0x00007FFFL
+//OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL
+#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_ENABLE__SHIFT 0x8
+#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_STATUS__SHIFT 0xc
+#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_STATUS__SHIFT 0x10
+#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_CLEAR__SHIFT 0x14
+#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_TYPE__SHIFT 0x18
+#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_ENABLE_MASK 0x00000100L
+#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_STATUS_MASK 0x00001000L
+#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_STATUS_MASK 0x00010000L
+#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_CLEAR_MASK 0x00100000L
+#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_TYPE_MASK 0x01000000L
+//OTG1_OTG_VERTICAL_INTERRUPT2_POSITION
+#define OTG1_OTG_VERTICAL_INTERRUPT2_POSITION__OTG_VERTICAL_INTERRUPT2_LINE_START__SHIFT 0x0
+#define OTG1_OTG_VERTICAL_INTERRUPT2_POSITION__OTG_VERTICAL_INTERRUPT2_LINE_START_MASK 0x00007FFFL
+//OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL
+#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_ENABLE__SHIFT 0x8
+#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_STATUS__SHIFT 0xc
+#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_STATUS__SHIFT 0x10
+#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_CLEAR__SHIFT 0x14
+#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_TYPE__SHIFT 0x18
+#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_ENABLE_MASK 0x00000100L
+#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_STATUS_MASK 0x00001000L
+#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_STATUS_MASK 0x00010000L
+#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_CLEAR_MASK 0x00100000L
+#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_TYPE_MASK 0x01000000L
+//OTG1_OTG_CRC_CNTL
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_EN__SHIFT 0x0
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_DUAL_LINK_EN__SHIFT 0x1
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_DUAL_LINK_MODE__SHIFT 0x2
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_BLANK_ONLY__SHIFT 0x3
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_CONT_EN__SHIFT 0x4
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_CAPTURE_START_SEL__SHIFT 0x5
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_STEREO_MODE__SHIFT 0x8
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_INTERLACE_MODE__SHIFT 0xc
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_USE_NEW_AND_REPEATED_PIXELS__SHIFT 0x13
+#define OTG1_OTG_CRC_CNTL__OTG_CRC0_SELECT__SHIFT 0x14
+#define OTG1_OTG_CRC_CNTL__OTG_CRC1_SELECT__SHIFT 0x18
+#define OTG1_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC0_PENDING__SHIFT 0x1c
+#define OTG1_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC1_PENDING__SHIFT 0x1d
+#define OTG1_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC2_PENDING__SHIFT 0x1e
+#define OTG1_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC3_PENDING__SHIFT 0x1f
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_EN_MASK 0x00000001L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_DUAL_LINK_EN_MASK 0x00000002L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_DUAL_LINK_MODE_MASK 0x00000004L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_BLANK_ONLY_MASK 0x00000008L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_CONT_EN_MASK 0x00000010L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_CAPTURE_START_SEL_MASK 0x00000060L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_STEREO_MODE_MASK 0x00000300L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_INTERLACE_MODE_MASK 0x00003000L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_USE_NEW_AND_REPEATED_PIXELS_MASK 0x00080000L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC0_SELECT_MASK 0x00700000L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC1_SELECT_MASK 0x07000000L
+#define OTG1_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC0_PENDING_MASK 0x10000000L
+#define OTG1_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC1_PENDING_MASK 0x20000000L
+#define OTG1_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC2_PENDING_MASK 0x40000000L
+#define OTG1_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC3_PENDING_MASK 0x80000000L
+//OTG1_OTG_CRC_CNTL2
+#define OTG1_OTG_CRC_CNTL2__OTG_CRC_DSC_MODE__SHIFT 0x0
+#define OTG1_OTG_CRC_CNTL2__OTG_CRC_DATA_STREAM_COMBINE_MODE__SHIFT 0x1
+#define OTG1_OTG_CRC_CNTL2__OTG_CRC_DATA_STREAM_SPLIT_MODE__SHIFT 0x4
+#define OTG1_OTG_CRC_CNTL2__OTG_CRC_DATA_FORMAT__SHIFT 0x8
+#define OTG1_OTG_CRC_CNTL2__OTG_CRC_DSC_MODE_MASK 0x00000001L
+#define OTG1_OTG_CRC_CNTL2__OTG_CRC_DATA_STREAM_COMBINE_MODE_MASK 0x00000002L
+#define OTG1_OTG_CRC_CNTL2__OTG_CRC_DATA_STREAM_SPLIT_MODE_MASK 0x00000030L
+#define OTG1_OTG_CRC_CNTL2__OTG_CRC_DATA_FORMAT_MASK 0x00000300L
+//OTG1_OTG_CRC0_WINDOWA_X_CONTROL
+#define OTG1_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_START__SHIFT 0x0
+#define OTG1_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_END__SHIFT 0x10
+#define OTG1_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_START_MASK 0x00007FFFL
+#define OTG1_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_END_MASK 0x7FFF0000L
+//OTG1_OTG_CRC0_WINDOWA_Y_CONTROL
+#define OTG1_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_START__SHIFT 0x0
+#define OTG1_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_END__SHIFT 0x10
+#define OTG1_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_START_MASK 0x00007FFFL
+#define OTG1_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_END_MASK 0x7FFF0000L
+//OTG1_OTG_CRC0_WINDOWB_X_CONTROL
+#define OTG1_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_START__SHIFT 0x0
+#define OTG1_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_END__SHIFT 0x10
+#define OTG1_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_START_MASK 0x00007FFFL
+#define OTG1_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_END_MASK 0x7FFF0000L
+//OTG1_OTG_CRC0_WINDOWB_Y_CONTROL
+#define OTG1_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_START__SHIFT 0x0
+#define OTG1_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_END__SHIFT 0x10
+#define OTG1_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_START_MASK 0x00007FFFL
+#define OTG1_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_END_MASK 0x7FFF0000L
+//OTG1_OTG_CRC0_DATA_RG
+#define OTG1_OTG_CRC0_DATA_RG__CRC0_R_CR__SHIFT 0x0
+#define OTG1_OTG_CRC0_DATA_RG__CRC0_G_Y__SHIFT 0x10
+#define OTG1_OTG_CRC0_DATA_RG__CRC0_R_CR_MASK 0x0000FFFFL
+#define OTG1_OTG_CRC0_DATA_RG__CRC0_G_Y_MASK 0xFFFF0000L
+//OTG1_OTG_CRC0_DATA_B
+#define OTG1_OTG_CRC0_DATA_B__CRC0_B_CB__SHIFT 0x0
+#define OTG1_OTG_CRC0_DATA_B__CRC0_C__SHIFT 0x10
+#define OTG1_OTG_CRC0_DATA_B__CRC0_B_CB_MASK 0x0000FFFFL
+#define OTG1_OTG_CRC0_DATA_B__CRC0_C_MASK 0xFFFF0000L
+//OTG1_OTG_CRC1_WINDOWA_X_CONTROL
+#define OTG1_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_START__SHIFT 0x0
+#define OTG1_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_END__SHIFT 0x10
+#define OTG1_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_START_MASK 0x00007FFFL
+#define OTG1_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_END_MASK 0x7FFF0000L
+//OTG1_OTG_CRC1_WINDOWA_Y_CONTROL
+#define OTG1_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_START__SHIFT 0x0
+#define OTG1_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_END__SHIFT 0x10
+#define OTG1_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_START_MASK 0x00007FFFL
+#define OTG1_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_END_MASK 0x7FFF0000L
+//OTG1_OTG_CRC1_WINDOWB_X_CONTROL
+#define OTG1_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_START__SHIFT 0x0
+#define OTG1_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_END__SHIFT 0x10
+#define OTG1_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_START_MASK 0x00007FFFL
+#define OTG1_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_END_MASK 0x7FFF0000L
+//OTG1_OTG_CRC1_WINDOWB_Y_CONTROL
+#define OTG1_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_START__SHIFT 0x0
+#define OTG1_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_END__SHIFT 0x10
+#define OTG1_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_START_MASK 0x00007FFFL
+#define OTG1_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_END_MASK 0x7FFF0000L
+//OTG1_OTG_CRC1_DATA_RG
+#define OTG1_OTG_CRC1_DATA_RG__CRC1_R_CR__SHIFT 0x0
+#define OTG1_OTG_CRC1_DATA_RG__CRC1_G_Y__SHIFT 0x10
+#define OTG1_OTG_CRC1_DATA_RG__CRC1_R_CR_MASK 0x0000FFFFL
+#define OTG1_OTG_CRC1_DATA_RG__CRC1_G_Y_MASK 0xFFFF0000L
+//OTG1_OTG_CRC1_DATA_B
+#define OTG1_OTG_CRC1_DATA_B__CRC1_B_CB__SHIFT 0x0
+#define OTG1_OTG_CRC1_DATA_B__CRC1_C__SHIFT 0x10
+#define OTG1_OTG_CRC1_DATA_B__CRC1_B_CB_MASK 0x0000FFFFL
+#define OTG1_OTG_CRC1_DATA_B__CRC1_C_MASK 0xFFFF0000L
+//OTG1_OTG_CRC2_DATA_RG
+#define OTG1_OTG_CRC2_DATA_RG__CRC2_R_CR__SHIFT 0x0
+#define OTG1_OTG_CRC2_DATA_RG__CRC2_G_Y__SHIFT 0x10
+#define OTG1_OTG_CRC2_DATA_RG__CRC2_R_CR_MASK 0x0000FFFFL
+#define OTG1_OTG_CRC2_DATA_RG__CRC2_G_Y_MASK 0xFFFF0000L
+//OTG1_OTG_CRC2_DATA_B
+#define OTG1_OTG_CRC2_DATA_B__CRC2_B_CB__SHIFT 0x0
+#define OTG1_OTG_CRC2_DATA_B__CRC2_C__SHIFT 0x10
+#define OTG1_OTG_CRC2_DATA_B__CRC2_B_CB_MASK 0x0000FFFFL
+#define OTG1_OTG_CRC2_DATA_B__CRC2_C_MASK 0xFFFF0000L
+//OTG1_OTG_CRC3_DATA_RG
+#define OTG1_OTG_CRC3_DATA_RG__CRC3_R_CR__SHIFT 0x0
+#define OTG1_OTG_CRC3_DATA_RG__CRC3_G_Y__SHIFT 0x10
+#define OTG1_OTG_CRC3_DATA_RG__CRC3_R_CR_MASK 0x0000FFFFL
+#define OTG1_OTG_CRC3_DATA_RG__CRC3_G_Y_MASK 0xFFFF0000L
+//OTG1_OTG_CRC3_DATA_B
+#define OTG1_OTG_CRC3_DATA_B__CRC3_B_CB__SHIFT 0x0
+#define OTG1_OTG_CRC3_DATA_B__CRC3_C__SHIFT 0x10
+#define OTG1_OTG_CRC3_DATA_B__CRC3_B_CB_MASK 0x0000FFFFL
+#define OTG1_OTG_CRC3_DATA_B__CRC3_C_MASK 0xFFFF0000L
+//OTG1_OTG_CRC_SIG_RED_GREEN_MASK
+#define OTG1_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_RED_MASK__SHIFT 0x0
+#define OTG1_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_GREEN_MASK__SHIFT 0x10
+#define OTG1_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_RED_MASK_MASK 0x0000FFFFL
+#define OTG1_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_GREEN_MASK_MASK 0xFFFF0000L
+//OTG1_OTG_CRC_SIG_BLUE_CONTROL_MASK
+#define OTG1_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_BLUE_MASK__SHIFT 0x0
+#define OTG1_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_CONTROL_MASK__SHIFT 0x10
+#define OTG1_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_BLUE_MASK_MASK 0x0000FFFFL
+#define OTG1_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_CONTROL_MASK_MASK 0xFFFF0000L
+//OTG1_OTG_STATIC_SCREEN_CONTROL
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_EVENT_MASK__SHIFT 0x0
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_FRAME_COUNT__SHIFT 0x10
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_ENABLE__SHIFT 0x18
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_SS_STATUS__SHIFT 0x19
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_STATUS__SHIFT 0x1a
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_CLEAR__SHIFT 0x1b
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_TYPE__SHIFT 0x1c
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE__SHIFT 0x1e
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_VALUE__SHIFT 0x1f
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_EVENT_MASK_MASK 0x0000FFFFL
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_FRAME_COUNT_MASK 0x00FF0000L
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_ENABLE_MASK 0x01000000L
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_SS_STATUS_MASK 0x02000000L
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_STATUS_MASK 0x04000000L
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_CLEAR_MASK 0x08000000L
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_TYPE_MASK 0x10000000L
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_MASK 0x40000000L
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_VALUE_MASK 0x80000000L
+//OTG1_OTG_3D_STRUCTURE_CONTROL
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_EN__SHIFT 0x0
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_V_UPDATE_MODE__SHIFT 0x8
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_STEREO_SEL_OVR__SHIFT 0xc
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET__SHIFT 0x10
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_PENDING__SHIFT 0x11
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT__SHIFT 0x12
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_EN_MASK 0x00000001L
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_V_UPDATE_MODE_MASK 0x00000300L
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_STEREO_SEL_OVR_MASK 0x00001000L
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_MASK 0x00010000L
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_PENDING_MASK 0x00020000L
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_MASK 0x000C0000L
+//OTG1_OTG_GSL_VSYNC_GAP
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_LIMIT__SHIFT 0x0
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_DELAY__SHIFT 0x8
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_SOURCE_SEL__SHIFT 0x10
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MODE__SHIFT 0x11
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_CLEAR__SHIFT 0x13
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_OCCURRED__SHIFT 0x14
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASTER_FASTER__SHIFT 0x17
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP__SHIFT 0x18
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_LIMIT_MASK 0x000000FFL
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_DELAY_MASK 0x0000FF00L
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_SOURCE_SEL_MASK 0x00010000L
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MODE_MASK 0x00060000L
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_CLEAR_MASK 0x00080000L
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_OCCURRED_MASK 0x00100000L
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASTER_FASTER_MASK 0x00800000L
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASK 0xFF000000L
+//OTG1_OTG_MASTER_UPDATE_MODE
+#define OTG1_OTG_MASTER_UPDATE_MODE__MASTER_UPDATE_INTERLACED_MODE__SHIFT 0x0
+#define OTG1_OTG_MASTER_UPDATE_MODE__MASTER_UPDATE_INTERLACED_MODE_MASK 0x00000003L
+//OTG1_OTG_CLOCK_CONTROL
+#define OTG1_OTG_CLOCK_CONTROL__OTG_CLOCK_EN__SHIFT 0x0
+#define OTG1_OTG_CLOCK_CONTROL__OTG_CLOCK_GATE_DIS__SHIFT 0x1
+#define OTG1_OTG_CLOCK_CONTROL__OTG_SOFT_RESET__SHIFT 0x4
+#define OTG1_OTG_CLOCK_CONTROL__OTG_CLOCK_ON__SHIFT 0x8
+#define OTG1_OTG_CLOCK_CONTROL__OTG_BUSY__SHIFT 0x10
+#define OTG1_OTG_CLOCK_CONTROL__OTG_CLOCK_EN_MASK 0x00000001L
+#define OTG1_OTG_CLOCK_CONTROL__OTG_CLOCK_GATE_DIS_MASK 0x00000002L
+#define OTG1_OTG_CLOCK_CONTROL__OTG_SOFT_RESET_MASK 0x00000010L
+#define OTG1_OTG_CLOCK_CONTROL__OTG_CLOCK_ON_MASK 0x00000100L
+#define OTG1_OTG_CLOCK_CONTROL__OTG_BUSY_MASK 0x00010000L
+//OTG1_OTG_VSTARTUP_PARAM
+#define OTG1_OTG_VSTARTUP_PARAM__VSTARTUP_START__SHIFT 0x0
+#define OTG1_OTG_VSTARTUP_PARAM__VSTARTUP_START_MASK 0x000003FFL
+//OTG1_OTG_VUPDATE_PARAM
+#define OTG1_OTG_VUPDATE_PARAM__VUPDATE_OFFSET__SHIFT 0x0
+#define OTG1_OTG_VUPDATE_PARAM__VUPDATE_WIDTH__SHIFT 0x10
+#define OTG1_OTG_VUPDATE_PARAM__VUPDATE_OFFSET_MASK 0x0000FFFFL
+#define OTG1_OTG_VUPDATE_PARAM__VUPDATE_WIDTH_MASK 0x03FF0000L
+//OTG1_OTG_VREADY_PARAM
+#define OTG1_OTG_VREADY_PARAM__VREADY_OFFSET__SHIFT 0x0
+#define OTG1_OTG_VREADY_PARAM__VREADY_OFFSET_MASK 0x0000FFFFL
+//OTG1_OTG_GLOBAL_SYNC_STATUS
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_EN__SHIFT 0x0
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_TYPE__SHIFT 0x1
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_OCCURRED__SHIFT 0x2
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_STATUS__SHIFT 0x3
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_CLEAR__SHIFT 0x4
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_EN__SHIFT 0x5
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_TYPE__SHIFT 0x6
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_POSITION_SEL__SHIFT 0x7
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_OCCURRED__SHIFT 0x8
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_STATUS__SHIFT 0x9
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_CLEAR__SHIFT 0xa
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_STATUS__SHIFT 0xb
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_EN__SHIFT 0xc
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_TYPE__SHIFT 0xd
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_OCCURRED__SHIFT 0xe
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_STATUS__SHIFT 0xf
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_CLEAR__SHIFT 0x10
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_STATUS__SHIFT 0x11
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_EN__SHIFT 0x12
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_TYPE__SHIFT 0x13
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_OCCURRED__SHIFT 0x14
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_STATUS__SHIFT 0x15
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_CLEAR__SHIFT 0x16
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__STEREO_SELECT_STATUS__SHIFT 0x18
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__FIELD_NUMBER_STATUS__SHIFT 0x19
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_EN_MASK 0x00000001L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_TYPE_MASK 0x00000002L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_OCCURRED_MASK 0x00000004L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_STATUS_MASK 0x00000008L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_CLEAR_MASK 0x00000010L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_EN_MASK 0x00000020L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_TYPE_MASK 0x00000040L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_POSITION_SEL_MASK 0x00000080L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_OCCURRED_MASK 0x00000100L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_STATUS_MASK 0x00000200L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_CLEAR_MASK 0x00000400L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_STATUS_MASK 0x00000800L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_EN_MASK 0x00001000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_TYPE_MASK 0x00002000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_OCCURRED_MASK 0x00004000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_STATUS_MASK 0x00008000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_CLEAR_MASK 0x00010000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_STATUS_MASK 0x00020000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_EN_MASK 0x00040000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_TYPE_MASK 0x00080000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_OCCURRED_MASK 0x00100000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_STATUS_MASK 0x00200000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_CLEAR_MASK 0x00400000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__STEREO_SELECT_STATUS_MASK 0x01000000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__FIELD_NUMBER_STATUS_MASK 0x02000000L
+//OTG1_OTG_MASTER_UPDATE_LOCK
+#define OTG1_OTG_MASTER_UPDATE_LOCK__OTG_MASTER_UPDATE_LOCK__SHIFT 0x0
+#define OTG1_OTG_MASTER_UPDATE_LOCK__UPDATE_LOCK_STATUS__SHIFT 0x8
+#define OTG1_OTG_MASTER_UPDATE_LOCK__OTG_MASTER_UPDATE_LOCK_MASK 0x00000001L
+#define OTG1_OTG_MASTER_UPDATE_LOCK__UPDATE_LOCK_STATUS_MASK 0x00000100L
+//OTG1_OTG_GSL_CONTROL
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL0_EN__SHIFT 0x0
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL1_EN__SHIFT 0x1
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL2_EN__SHIFT 0x2
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL_MASTER_EN__SHIFT 0x3
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL_MASTER_MODE__SHIFT 0x4
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL_CHECK_DELAY__SHIFT 0x8
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL_FORCE_DELAY__SHIFT 0x10
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL_CHECK_ALL_FIELDS__SHIFT 0x1c
+#define OTG1_OTG_GSL_CONTROL__OTG_MASTER_UPDATE_LOCK_GSL_EN__SHIFT 0x1f
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL0_EN_MASK 0x00000001L
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL1_EN_MASK 0x00000002L
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL2_EN_MASK 0x00000004L
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL_MASTER_EN_MASK 0x00000008L
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL_MASTER_MODE_MASK 0x00000030L
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL_CHECK_DELAY_MASK 0x00000F00L
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL_FORCE_DELAY_MASK 0x001F0000L
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL_CHECK_ALL_FIELDS_MASK 0x10000000L
+#define OTG1_OTG_GSL_CONTROL__OTG_MASTER_UPDATE_LOCK_GSL_EN_MASK 0x80000000L
+//OTG1_OTG_GSL_WINDOW_X
+#define OTG1_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_START_X__SHIFT 0x0
+#define OTG1_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_END_X__SHIFT 0x10
+#define OTG1_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_START_X_MASK 0x00007FFFL
+#define OTG1_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_END_X_MASK 0x7FFF0000L
+//OTG1_OTG_GSL_WINDOW_Y
+#define OTG1_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_START_Y__SHIFT 0x0
+#define OTG1_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_END_Y__SHIFT 0x10
+#define OTG1_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_START_Y_MASK 0x00007FFFL
+#define OTG1_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_END_Y_MASK 0x7FFF0000L
+//OTG1_OTG_VUPDATE_KEEPOUT
+#define OTG1_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET__SHIFT 0x0
+#define OTG1_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET__SHIFT 0x10
+#define OTG1_OTG_VUPDATE_KEEPOUT__OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN__SHIFT 0x1f
+#define OTG1_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET_MASK 0x0000FFFFL
+#define OTG1_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET_MASK 0x03FF0000L
+#define OTG1_OTG_VUPDATE_KEEPOUT__OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN_MASK 0x80000000L
+//OTG1_OTG_GLOBAL_CONTROL0
+#define OTG1_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_HTOTAL_KEEPOUT__SHIFT 0x0
+#define OTG1_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_HTOTAL_KEEPOUT_EN__SHIFT 0x8
+#define OTG1_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_SEL__SHIFT 0x19
+#define OTG1_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_HTOTAL_KEEPOUT_MASK 0x000000FFL
+#define OTG1_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_HTOTAL_KEEPOUT_EN_MASK 0x00000100L
+#define OTG1_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_SEL_MASK 0x0E000000L
+//OTG1_OTG_GLOBAL_CONTROL1
+#define OTG1_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_X__SHIFT 0x0
+#define OTG1_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_Y__SHIFT 0x10
+#define OTG1_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_EN__SHIFT 0x1f
+#define OTG1_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_X_MASK 0x00007FFFL
+#define OTG1_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_Y_MASK 0x7FFF0000L
+#define OTG1_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_EN_MASK 0x80000000L
+//OTG1_OTG_GLOBAL_CONTROL2
+#define OTG1_OTG_GLOBAL_CONTROL2__DIG_UPDATE_LOCATION__SHIFT 0x0
+#define OTG1_OTG_GLOBAL_CONTROL2__GLOBAL_UPDATE_LOCK_EN__SHIFT 0xa
+#define OTG1_OTG_GLOBAL_CONTROL2__MANUAL_FLOW_CONTROL_SEL__SHIFT 0x10
+#define OTG1_OTG_GLOBAL_CONTROL2__MASTER_UPDATE_LOCK_WINDOW_SEL__SHIFT 0x1d
+#define OTG1_OTG_GLOBAL_CONTROL2__OTG_VUPDATE_BLOCK_DISABLE__SHIFT 0x1e
+#define OTG1_OTG_GLOBAL_CONTROL2__DCCG_VUPDATE_MODE__SHIFT 0x1f
+#define OTG1_OTG_GLOBAL_CONTROL2__DIG_UPDATE_LOCATION_MASK 0x000003FFL
+#define OTG1_OTG_GLOBAL_CONTROL2__GLOBAL_UPDATE_LOCK_EN_MASK 0x00000400L
+#define OTG1_OTG_GLOBAL_CONTROL2__MANUAL_FLOW_CONTROL_SEL_MASK 0x00070000L
+#define OTG1_OTG_GLOBAL_CONTROL2__MASTER_UPDATE_LOCK_WINDOW_SEL_MASK 0x20000000L
+#define OTG1_OTG_GLOBAL_CONTROL2__OTG_VUPDATE_BLOCK_DISABLE_MASK 0x40000000L
+#define OTG1_OTG_GLOBAL_CONTROL2__DCCG_VUPDATE_MODE_MASK 0x80000000L
+//OTG1_OTG_GLOBAL_CONTROL3
+#define OTG1_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD__SHIFT 0x0
+#define OTG1_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_STEREO_SEL__SHIFT 0x4
+#define OTG1_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD_STEREO_FLAG_SEL__SHIFT 0x8
+#define OTG1_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD_MASK 0x00000003L
+#define OTG1_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_STEREO_SEL_MASK 0x00000030L
+#define OTG1_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD_STEREO_FLAG_SEL_MASK 0x00000100L
+//OTG1_OTG_TRIG_MANUAL_CONTROL
+#define OTG1_OTG_TRIG_MANUAL_CONTROL__TRIG_MANUAL_CONTROL__SHIFT 0x0
+#define OTG1_OTG_TRIG_MANUAL_CONTROL__TRIG_MANUAL_CONTROL_MASK 0x00000001L
+//OTG1_OTG_MANUAL_FLOW_CONTROL
+#define OTG1_OTG_MANUAL_FLOW_CONTROL__MANUAL_FLOW_CONTROL__SHIFT 0x0
+#define OTG1_OTG_MANUAL_FLOW_CONTROL__MANUAL_FLOW_CONTROL_MASK 0x00000001L
+//OTG1_OTG_RANGE_TIMING_INT_STATUS
+#define OTG1_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED__SHIFT 0x0
+#define OTG1_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT__SHIFT 0x4
+#define OTG1_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_CLEAR__SHIFT 0x8
+#define OTG1_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT_MSK__SHIFT 0xc
+#define OTG1_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT_TYPE__SHIFT 0x10
+#define OTG1_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_MASK 0x00000001L
+#define OTG1_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT_MASK 0x00000010L
+#define OTG1_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_CLEAR_MASK 0x00000100L
+#define OTG1_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT_MSK_MASK 0x00001000L
+#define OTG1_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT_TYPE_MASK 0x00010000L
+//OTG1_OTG_DRR_CONTROL
+#define OTG1_OTG_DRR_CONTROL__OTG_DRR_AVERAGE_FRAME__SHIFT 0x0
+#define OTG1_OTG_DRR_CONTROL__OTG_V_TOTAL_LAST_USED_BY_DRR__SHIFT 0x10
+#define OTG1_OTG_DRR_CONTROL__OTG_DRR_AVERAGE_FRAME_MASK 0x00000007L
+#define OTG1_OTG_DRR_CONTROL__OTG_V_TOTAL_LAST_USED_BY_DRR_MASK 0x7FFF0000L
+//OTG1_OTG_REQUEST_CONTROL
+#define OTG1_OTG_REQUEST_CONTROL__OTG_REQUEST_MODE_FOR_H_DUPLICATE__SHIFT 0x0
+#define OTG1_OTG_REQUEST_CONTROL__OTG_REQUEST_MODE_FOR_H_DUPLICATE_MASK 0x00000001L
+//OTG1_OTG_DSC_START_POSITION
+#define OTG1_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_X__SHIFT 0x0
+#define OTG1_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_LINE_NUM__SHIFT 0x10
+#define OTG1_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_X_MASK 0x00007FFFL
+#define OTG1_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_LINE_NUM_MASK 0x03FF0000L
+//OTG1_OTG_PIPE_UPDATE_STATUS
+#define OTG1_OTG_PIPE_UPDATE_STATUS__OTG_FLIP_PENDING__SHIFT 0x0
+#define OTG1_OTG_PIPE_UPDATE_STATUS__OTG_FLIP_TAKEN__SHIFT 0x1
+#define OTG1_OTG_PIPE_UPDATE_STATUS__OTG_FLIP_TAKEN_CLEAR__SHIFT 0x2
+#define OTG1_OTG_PIPE_UPDATE_STATUS__OTG_DC_REG_UPDATE_PENDING__SHIFT 0x4
+#define OTG1_OTG_PIPE_UPDATE_STATUS__OTG_DC_REG_UPDATE_TAKEN__SHIFT 0x5
+#define OTG1_OTG_PIPE_UPDATE_STATUS__OTG_DC_REG_UPDATE_TAKEN_CLEAR__SHIFT 0x6
+#define OTG1_OTG_PIPE_UPDATE_STATUS__OTG_CURSOR_UPDATE_PENDING__SHIFT 0x8
+#define OTG1_OTG_PIPE_UPDATE_STATUS__OTG_CURSOR_UPDATE_TAKEN__SHIFT 0x9
+#define OTG1_OTG_PIPE_UPDATE_STATUS__OTG_CURSOR_UPDATE_TAKEN_CLEAR__SHIFT 0xa
+#define OTG1_OTG_PIPE_UPDATE_STATUS__OTG_VUPDATE_KEEPOUT_STATUS__SHIFT 0x10
+#define OTG1_OTG_PIPE_UPDATE_STATUS__OTG_FLIP_PENDING_MASK 0x00000001L
+#define OTG1_OTG_PIPE_UPDATE_STATUS__OTG_FLIP_TAKEN_MASK 0x00000002L
+#define OTG1_OTG_PIPE_UPDATE_STATUS__OTG_FLIP_TAKEN_CLEAR_MASK 0x00000004L
+#define OTG1_OTG_PIPE_UPDATE_STATUS__OTG_DC_REG_UPDATE_PENDING_MASK 0x00000010L
+#define OTG1_OTG_PIPE_UPDATE_STATUS__OTG_DC_REG_UPDATE_TAKEN_MASK 0x00000020L
+#define OTG1_OTG_PIPE_UPDATE_STATUS__OTG_DC_REG_UPDATE_TAKEN_CLEAR_MASK 0x00000040L
+#define OTG1_OTG_PIPE_UPDATE_STATUS__OTG_CURSOR_UPDATE_PENDING_MASK 0x00000100L
+#define OTG1_OTG_PIPE_UPDATE_STATUS__OTG_CURSOR_UPDATE_TAKEN_MASK 0x00000200L
+#define OTG1_OTG_PIPE_UPDATE_STATUS__OTG_CURSOR_UPDATE_TAKEN_CLEAR_MASK 0x00000400L
+#define OTG1_OTG_PIPE_UPDATE_STATUS__OTG_VUPDATE_KEEPOUT_STATUS_MASK 0x00010000L
+//OTG1_OTG_SPARE_REGISTER
+#define OTG1_OTG_SPARE_REGISTER__OTG_SPARE_REG__SHIFT 0x0
+#define OTG1_OTG_SPARE_REGISTER__OTG_SPARE_REG_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_optc_otg2_dispdec
+//OTG2_OTG_H_TOTAL
+#define OTG2_OTG_H_TOTAL__OTG_H_TOTAL__SHIFT 0x0
+#define OTG2_OTG_H_TOTAL__OTG_H_TOTAL_MASK 0x00007FFFL
+//OTG2_OTG_H_BLANK_START_END
+#define OTG2_OTG_H_BLANK_START_END__OTG_H_BLANK_START__SHIFT 0x0
+#define OTG2_OTG_H_BLANK_START_END__OTG_H_BLANK_END__SHIFT 0x10
+#define OTG2_OTG_H_BLANK_START_END__OTG_H_BLANK_START_MASK 0x00007FFFL
+#define OTG2_OTG_H_BLANK_START_END__OTG_H_BLANK_END_MASK 0x7FFF0000L
+//OTG2_OTG_H_SYNC_A
+#define OTG2_OTG_H_SYNC_A__OTG_H_SYNC_A_START__SHIFT 0x0
+#define OTG2_OTG_H_SYNC_A__OTG_H_SYNC_A_END__SHIFT 0x10
+#define OTG2_OTG_H_SYNC_A__OTG_H_SYNC_A_START_MASK 0x00007FFFL
+#define OTG2_OTG_H_SYNC_A__OTG_H_SYNC_A_END_MASK 0x7FFF0000L
+//OTG2_OTG_H_SYNC_A_CNTL
+#define OTG2_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_POL__SHIFT 0x0
+#define OTG2_OTG_H_SYNC_A_CNTL__OTG_COMP_SYNC_A_EN__SHIFT 0x10
+#define OTG2_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_CUTOFF__SHIFT 0x11
+#define OTG2_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_POL_MASK 0x00000001L
+#define OTG2_OTG_H_SYNC_A_CNTL__OTG_COMP_SYNC_A_EN_MASK 0x00010000L
+#define OTG2_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_CUTOFF_MASK 0x00020000L
+//OTG2_OTG_H_TIMING_CNTL
+#define OTG2_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_BY2__SHIFT 0x0
+#define OTG2_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_BY2_UPDATE_MODE__SHIFT 0x8
+#define OTG2_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_BY2_MASK 0x00000001L
+#define OTG2_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_BY2_UPDATE_MODE_MASK 0x00000100L
+//OTG2_OTG_V_TOTAL
+#define OTG2_OTG_V_TOTAL__OTG_V_TOTAL__SHIFT 0x0
+#define OTG2_OTG_V_TOTAL__OTG_V_TOTAL_MASK 0x00007FFFL
+//OTG2_OTG_V_TOTAL_MIN
+#define OTG2_OTG_V_TOTAL_MIN__OTG_V_TOTAL_MIN__SHIFT 0x0
+#define OTG2_OTG_V_TOTAL_MIN__OTG_V_TOTAL_MIN_MASK 0x00007FFFL
+//OTG2_OTG_V_TOTAL_MAX
+#define OTG2_OTG_V_TOTAL_MAX__OTG_V_TOTAL_MAX__SHIFT 0x0
+#define OTG2_OTG_V_TOTAL_MAX__OTG_V_TOTAL_MAX_MASK 0x00007FFFL
+//OTG2_OTG_V_TOTAL_MID
+#define OTG2_OTG_V_TOTAL_MID__OTG_V_TOTAL_MID__SHIFT 0x0
+#define OTG2_OTG_V_TOTAL_MID__OTG_V_TOTAL_MID_MASK 0x00007FFFL
+//OTG2_OTG_V_TOTAL_CONTROL
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MIN_SEL__SHIFT 0x0
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MAX_SEL__SHIFT 0x1
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MAX_EN__SHIFT 0x2
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MIN_EN__SHIFT 0x3
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_FORCE_LOCK_ON_EVENT__SHIFT 0x4
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_DRR_EVENT_ACTIVE_PERIOD__SHIFT 0x5
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK_EN__SHIFT 0x7
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_FRAME_NUM__SHIFT 0x8
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK__SHIFT 0x10
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MIN_SEL_MASK 0x00000001L
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MAX_SEL_MASK 0x00000002L
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MAX_EN_MASK 0x00000004L
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MIN_EN_MASK 0x00000008L
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_FORCE_LOCK_ON_EVENT_MASK 0x00000010L
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_DRR_EVENT_ACTIVE_PERIOD_MASK 0x00000020L
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK_EN_MASK 0x00000080L
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_FRAME_NUM_MASK 0x0000FF00L
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK_MASK 0xFFFF0000L
+//OTG2_OTG_V_TOTAL_INT_STATUS
+#define OTG2_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED__SHIFT 0x0
+#define OTG2_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_INT__SHIFT 0x4
+#define OTG2_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_ACK__SHIFT 0x8
+#define OTG2_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_MSK__SHIFT 0xc
+#define OTG2_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_MASK 0x00000001L
+#define OTG2_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_INT_MASK 0x00000010L
+#define OTG2_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_ACK_MASK 0x00000100L
+#define OTG2_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_MSK_MASK 0x00001000L
+//OTG2_OTG_VSYNC_NOM_INT_STATUS
+#define OTG2_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM__SHIFT 0x0
+#define OTG2_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_INT_CLEAR__SHIFT 0x4
+#define OTG2_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_MASK 0x00000001L
+#define OTG2_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_INT_CLEAR_MASK 0x00000010L
+//OTG2_OTG_V_BLANK_START_END
+#define OTG2_OTG_V_BLANK_START_END__OTG_V_BLANK_START__SHIFT 0x0
+#define OTG2_OTG_V_BLANK_START_END__OTG_V_BLANK_END__SHIFT 0x10
+#define OTG2_OTG_V_BLANK_START_END__OTG_V_BLANK_START_MASK 0x00007FFFL
+#define OTG2_OTG_V_BLANK_START_END__OTG_V_BLANK_END_MASK 0x7FFF0000L
+//OTG2_OTG_V_SYNC_A
+#define OTG2_OTG_V_SYNC_A__OTG_V_SYNC_A_START__SHIFT 0x0
+#define OTG2_OTG_V_SYNC_A__OTG_V_SYNC_A_END__SHIFT 0x10
+#define OTG2_OTG_V_SYNC_A__OTG_V_SYNC_A_START_MASK 0x00007FFFL
+#define OTG2_OTG_V_SYNC_A__OTG_V_SYNC_A_END_MASK 0x7FFF0000L
+//OTG2_OTG_V_SYNC_A_CNTL
+#define OTG2_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_A_POL__SHIFT 0x0
+#define OTG2_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_A_POL_MASK 0x00000001L
+//OTG2_OTG_TRIGA_CNTL
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_SELECT__SHIFT 0x0
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_PIPE_SELECT__SHIFT 0x5
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_SELECT__SHIFT 0x8
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_RESYNC_BYPASS_EN__SHIFT 0xb
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_INPUT_STATUS__SHIFT 0xc
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_STATUS__SHIFT 0xd
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_OCCURRED__SHIFT 0xe
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_RISING_EDGE_DETECT_CNTL__SHIFT 0x10
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_FALLING_EDGE_DETECT_CNTL__SHIFT 0x12
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_FREQUENCY_SELECT__SHIFT 0x14
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_DELAY__SHIFT 0x18
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_CLEAR__SHIFT 0x1f
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_SELECT_MASK 0x0000001FL
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_PIPE_SELECT_MASK 0x000000E0L
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_SELECT_MASK 0x00000700L
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_RESYNC_BYPASS_EN_MASK 0x00000800L
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_INPUT_STATUS_MASK 0x00001000L
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_STATUS_MASK 0x00002000L
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_OCCURRED_MASK 0x00004000L
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_RISING_EDGE_DETECT_CNTL_MASK 0x00030000L
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_FALLING_EDGE_DETECT_CNTL_MASK 0x000C0000L
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_FREQUENCY_SELECT_MASK 0x00300000L
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_DELAY_MASK 0x1F000000L
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_CLEAR_MASK 0x80000000L
+//OTG2_OTG_TRIGA_MANUAL_TRIG
+#define OTG2_OTG_TRIGA_MANUAL_TRIG__OTG_TRIGA_MANUAL_TRIG__SHIFT 0x0
+#define OTG2_OTG_TRIGA_MANUAL_TRIG__OTG_TRIGA_MANUAL_TRIG_MASK 0x00000001L
+//OTG2_OTG_TRIGB_CNTL
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_SELECT__SHIFT 0x0
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_PIPE_SELECT__SHIFT 0x5
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_SELECT__SHIFT 0x8
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_RESYNC_BYPASS_EN__SHIFT 0xb
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_INPUT_STATUS__SHIFT 0xc
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_STATUS__SHIFT 0xd
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_OCCURRED__SHIFT 0xe
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_RISING_EDGE_DETECT_CNTL__SHIFT 0x10
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_FALLING_EDGE_DETECT_CNTL__SHIFT 0x12
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_FREQUENCY_SELECT__SHIFT 0x14
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_DELAY__SHIFT 0x18
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_CLEAR__SHIFT 0x1f
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_SELECT_MASK 0x0000001FL
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_PIPE_SELECT_MASK 0x000000E0L
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_SELECT_MASK 0x00000700L
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_RESYNC_BYPASS_EN_MASK 0x00000800L
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_INPUT_STATUS_MASK 0x00001000L
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_STATUS_MASK 0x00002000L
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_OCCURRED_MASK 0x00004000L
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_RISING_EDGE_DETECT_CNTL_MASK 0x00030000L
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_FALLING_EDGE_DETECT_CNTL_MASK 0x000C0000L
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_FREQUENCY_SELECT_MASK 0x00300000L
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_DELAY_MASK 0x1F000000L
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_CLEAR_MASK 0x80000000L
+//OTG2_OTG_TRIGB_MANUAL_TRIG
+#define OTG2_OTG_TRIGB_MANUAL_TRIG__OTG_TRIGB_MANUAL_TRIG__SHIFT 0x0
+#define OTG2_OTG_TRIGB_MANUAL_TRIG__OTG_TRIGB_MANUAL_TRIG_MASK 0x00000001L
+//OTG2_OTG_FORCE_COUNT_NOW_CNTL
+#define OTG2_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_MODE__SHIFT 0x0
+#define OTG2_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CHECK__SHIFT 0x4
+#define OTG2_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_TRIG_SEL__SHIFT 0x8
+#define OTG2_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_OCCURRED__SHIFT 0x10
+#define OTG2_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CLEAR__SHIFT 0x18
+#define OTG2_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_MODE_MASK 0x00000003L
+#define OTG2_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CHECK_MASK 0x00000010L
+#define OTG2_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_TRIG_SEL_MASK 0x00000100L
+#define OTG2_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_OCCURRED_MASK 0x00010000L
+#define OTG2_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CLEAR_MASK 0x01000000L
+//OTG2_OTG_FLOW_CONTROL
+#define OTG2_OTG_FLOW_CONTROL__OTG_FLOW_CONTROL_SOURCE_SELECT__SHIFT 0x0
+#define OTG2_OTG_FLOW_CONTROL__OTG_FLOW_CONTROL_POLARITY__SHIFT 0x8
+#define OTG2_OTG_FLOW_CONTROL__OTG_FLOW_CONTROL_GRANULARITY__SHIFT 0x10
+#define OTG2_OTG_FLOW_CONTROL__OTG_FLOW_CONTROL_INPUT_STATUS__SHIFT 0x18
+#define OTG2_OTG_FLOW_CONTROL__OTG_FLOW_CONTROL_SOURCE_SELECT_MASK 0x0000001FL
+#define OTG2_OTG_FLOW_CONTROL__OTG_FLOW_CONTROL_POLARITY_MASK 0x00000100L
+#define OTG2_OTG_FLOW_CONTROL__OTG_FLOW_CONTROL_GRANULARITY_MASK 0x00010000L
+#define OTG2_OTG_FLOW_CONTROL__OTG_FLOW_CONTROL_INPUT_STATUS_MASK 0x01000000L
+//OTG2_OTG_STEREO_FORCE_NEXT_EYE
+#define OTG2_OTG_STEREO_FORCE_NEXT_EYE__OTG_STEREO_FORCE_NEXT_EYE__SHIFT 0x0
+#define OTG2_OTG_STEREO_FORCE_NEXT_EYE__OTG_AVSYNC_FRAME_COUNTER__SHIFT 0x8
+#define OTG2_OTG_STEREO_FORCE_NEXT_EYE__OTG_AVSYNC_LINE_COUNTER__SHIFT 0x10
+#define OTG2_OTG_STEREO_FORCE_NEXT_EYE__OTG_STEREO_FORCE_NEXT_EYE_MASK 0x00000003L
+#define OTG2_OTG_STEREO_FORCE_NEXT_EYE__OTG_AVSYNC_FRAME_COUNTER_MASK 0x0000FF00L
+#define OTG2_OTG_STEREO_FORCE_NEXT_EYE__OTG_AVSYNC_LINE_COUNTER_MASK 0x1FFF0000L
+//OTG2_OTG_CONTROL
+#define OTG2_OTG_CONTROL__OTG_MASTER_EN__SHIFT 0x0
+#define OTG2_OTG_CONTROL__OTG_DISABLE_POINT_CNTL__SHIFT 0x8
+#define OTG2_OTG_CONTROL__OTG_START_POINT_CNTL__SHIFT 0xc
+#define OTG2_OTG_CONTROL__OTG_FIELD_NUMBER_CNTL__SHIFT 0xd
+#define OTG2_OTG_CONTROL__OTG_FIELD_NUMBER_POLARITY__SHIFT 0xe
+#define OTG2_OTG_CONTROL__OTG_CURRENT_MASTER_EN_STATE__SHIFT 0x10
+#define OTG2_OTG_CONTROL__OTG_DISP_READ_REQUEST_DISABLE__SHIFT 0x18
+#define OTG2_OTG_CONTROL__OTG_AVSYNC_LOCK_SNAPSHOT__SHIFT 0x1e
+#define OTG2_OTG_CONTROL__OTG_AVSYNC_VSYNC_N_HSYNC_MODE__SHIFT 0x1f
+#define OTG2_OTG_CONTROL__OTG_MASTER_EN_MASK 0x00000001L
+#define OTG2_OTG_CONTROL__OTG_DISABLE_POINT_CNTL_MASK 0x00000300L
+#define OTG2_OTG_CONTROL__OTG_START_POINT_CNTL_MASK 0x00001000L
+#define OTG2_OTG_CONTROL__OTG_FIELD_NUMBER_CNTL_MASK 0x00002000L
+#define OTG2_OTG_CONTROL__OTG_FIELD_NUMBER_POLARITY_MASK 0x00004000L
+#define OTG2_OTG_CONTROL__OTG_CURRENT_MASTER_EN_STATE_MASK 0x00010000L
+#define OTG2_OTG_CONTROL__OTG_DISP_READ_REQUEST_DISABLE_MASK 0x01000000L
+#define OTG2_OTG_CONTROL__OTG_AVSYNC_LOCK_SNAPSHOT_MASK 0x40000000L
+#define OTG2_OTG_CONTROL__OTG_AVSYNC_VSYNC_N_HSYNC_MODE_MASK 0x80000000L
+//OTG2_OTG_BLANK_CONTROL
+#define OTG2_OTG_BLANK_CONTROL__OTG_CURRENT_BLANK_STATE__SHIFT 0x0
+#define OTG2_OTG_BLANK_CONTROL__OTG_BLANK_DATA_EN__SHIFT 0x8
+#define OTG2_OTG_BLANK_CONTROL__OTG_BLANK_DE_MODE__SHIFT 0x10
+#define OTG2_OTG_BLANK_CONTROL__OTG_CURRENT_BLANK_STATE_MASK 0x00000001L
+#define OTG2_OTG_BLANK_CONTROL__OTG_BLANK_DATA_EN_MASK 0x00000100L
+#define OTG2_OTG_BLANK_CONTROL__OTG_BLANK_DE_MODE_MASK 0x00010000L
+//OTG2_OTG_PIPE_ABORT_CONTROL
+#define OTG2_OTG_PIPE_ABORT_CONTROL__OTG_PIPE_ABORT__SHIFT 0x0
+#define OTG2_OTG_PIPE_ABORT_CONTROL__OTG_PIPE_ABORT_DONE__SHIFT 0x8
+#define OTG2_OTG_PIPE_ABORT_CONTROL__OTG_PIPE_ABORT_MASK 0x00000001L
+#define OTG2_OTG_PIPE_ABORT_CONTROL__OTG_PIPE_ABORT_DONE_MASK 0x00000100L
+//OTG2_OTG_INTERLACE_CONTROL
+#define OTG2_OTG_INTERLACE_CONTROL__OTG_INTERLACE_ENABLE__SHIFT 0x0
+#define OTG2_OTG_INTERLACE_CONTROL__OTG_INTERLACE_FORCE_NEXT_FIELD__SHIFT 0x10
+#define OTG2_OTG_INTERLACE_CONTROL__OTG_INTERLACE_ENABLE_MASK 0x00000001L
+#define OTG2_OTG_INTERLACE_CONTROL__OTG_INTERLACE_FORCE_NEXT_FIELD_MASK 0x00030000L
+//OTG2_OTG_INTERLACE_STATUS
+#define OTG2_OTG_INTERLACE_STATUS__OTG_INTERLACE_CURRENT_FIELD__SHIFT 0x0
+#define OTG2_OTG_INTERLACE_STATUS__OTG_INTERLACE_NEXT_FIELD__SHIFT 0x1
+#define OTG2_OTG_INTERLACE_STATUS__OTG_INTERLACE_CURRENT_FIELD_MASK 0x00000001L
+#define OTG2_OTG_INTERLACE_STATUS__OTG_INTERLACE_NEXT_FIELD_MASK 0x00000002L
+//OTG2_OTG_PIXEL_DATA_READBACK0
+#define OTG2_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_BLUE_CB__SHIFT 0x0
+#define OTG2_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_GREEN_Y__SHIFT 0x10
+#define OTG2_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_BLUE_CB_MASK 0x0000FFFFL
+#define OTG2_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_GREEN_Y_MASK 0xFFFF0000L
+//OTG2_OTG_PIXEL_DATA_READBACK1
+#define OTG2_OTG_PIXEL_DATA_READBACK1__OTG_PIXEL_DATA_RED_CR__SHIFT 0x0
+#define OTG2_OTG_PIXEL_DATA_READBACK1__OTG_PIXEL_DATA_RED_CR_MASK 0x0000FFFFL
+//OTG2_OTG_STATUS
+#define OTG2_OTG_STATUS__OTG_V_BLANK__SHIFT 0x0
+#define OTG2_OTG_STATUS__OTG_V_ACTIVE_DISP__SHIFT 0x1
+#define OTG2_OTG_STATUS__OTG_V_SYNC_A__SHIFT 0x2
+#define OTG2_OTG_STATUS__OTG_V_UPDATE__SHIFT 0x3
+#define OTG2_OTG_STATUS__OTG_V_BLANK_3D_STRUCTURE__SHIFT 0x5
+#define OTG2_OTG_STATUS__OTG_H_BLANK__SHIFT 0x10
+#define OTG2_OTG_STATUS__OTG_H_ACTIVE_DISP__SHIFT 0x11
+#define OTG2_OTG_STATUS__OTG_H_SYNC_A__SHIFT 0x12
+#define OTG2_OTG_STATUS__OTG_V_BLANK_MASK 0x00000001L
+#define OTG2_OTG_STATUS__OTG_V_ACTIVE_DISP_MASK 0x00000002L
+#define OTG2_OTG_STATUS__OTG_V_SYNC_A_MASK 0x00000004L
+#define OTG2_OTG_STATUS__OTG_V_UPDATE_MASK 0x00000008L
+#define OTG2_OTG_STATUS__OTG_V_BLANK_3D_STRUCTURE_MASK 0x00000020L
+#define OTG2_OTG_STATUS__OTG_H_BLANK_MASK 0x00010000L
+#define OTG2_OTG_STATUS__OTG_H_ACTIVE_DISP_MASK 0x00020000L
+#define OTG2_OTG_STATUS__OTG_H_SYNC_A_MASK 0x00040000L
+//OTG2_OTG_STATUS_POSITION
+#define OTG2_OTG_STATUS_POSITION__OTG_VERT_COUNT__SHIFT 0x0
+#define OTG2_OTG_STATUS_POSITION__OTG_HORZ_COUNT__SHIFT 0x10
+#define OTG2_OTG_STATUS_POSITION__OTG_VERT_COUNT_MASK 0x00007FFFL
+#define OTG2_OTG_STATUS_POSITION__OTG_HORZ_COUNT_MASK 0x7FFF0000L
+//OTG2_OTG_NOM_VERT_POSITION
+#define OTG2_OTG_NOM_VERT_POSITION__OTG_VERT_COUNT_NOM__SHIFT 0x0
+#define OTG2_OTG_NOM_VERT_POSITION__OTG_VERT_COUNT_NOM_MASK 0x00007FFFL
+//OTG2_OTG_STATUS_FRAME_COUNT
+#define OTG2_OTG_STATUS_FRAME_COUNT__OTG_FRAME_COUNT__SHIFT 0x0
+#define OTG2_OTG_STATUS_FRAME_COUNT__OTG_FRAME_COUNT_MASK 0x00FFFFFFL
+//OTG2_OTG_STATUS_VF_COUNT
+#define OTG2_OTG_STATUS_VF_COUNT__OTG_VF_COUNT__SHIFT 0x0
+#define OTG2_OTG_STATUS_VF_COUNT__OTG_VF_COUNT_MASK 0x7FFFFFFFL
+//OTG2_OTG_STATUS_HV_COUNT
+#define OTG2_OTG_STATUS_HV_COUNT__OTG_HV_COUNT__SHIFT 0x0
+#define OTG2_OTG_STATUS_HV_COUNT__OTG_HV_COUNT_MASK 0x7FFFFFFFL
+//OTG2_OTG_COUNT_CONTROL
+#define OTG2_OTG_COUNT_CONTROL__OTG_HORZ_COUNT_BY2_EN__SHIFT 0x0
+#define OTG2_OTG_COUNT_CONTROL__OTG_HORZ_REPETITION_COUNT__SHIFT 0x1
+#define OTG2_OTG_COUNT_CONTROL__OTG_HORZ_COUNT_BY2_EN_MASK 0x00000001L
+#define OTG2_OTG_COUNT_CONTROL__OTG_HORZ_REPETITION_COUNT_MASK 0x0000001EL
+//OTG2_OTG_COUNT_RESET
+#define OTG2_OTG_COUNT_RESET__OTG_RESET_FRAME_COUNT__SHIFT 0x0
+#define OTG2_OTG_COUNT_RESET__OTG_RESET_FRAME_COUNT_MASK 0x00000001L
+//OTG2_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE
+#define OTG2_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__SHIFT 0x0
+#define OTG2_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__OTG_MANUAL_FORCE_VSYNC_NEXT_LINE_MASK 0x00000001L
+//OTG2_OTG_VERT_SYNC_CONTROL
+#define OTG2_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED__SHIFT 0x0
+#define OTG2_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_CLEAR__SHIFT 0x8
+#define OTG2_OTG_VERT_SYNC_CONTROL__OTG_AUTO_FORCE_VSYNC_MODE__SHIFT 0x10
+#define OTG2_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED_MASK 0x00000001L
+#define OTG2_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_CLEAR_MASK 0x00000100L
+#define OTG2_OTG_VERT_SYNC_CONTROL__OTG_AUTO_FORCE_VSYNC_MODE_MASK 0x00030000L
+//OTG2_OTG_STEREO_STATUS
+#define OTG2_OTG_STEREO_STATUS__OTG_STEREO_CURRENT_EYE__SHIFT 0x0
+#define OTG2_OTG_STEREO_STATUS__OTG_STEREO_SYNC_OUTPUT__SHIFT 0x8
+#define OTG2_OTG_STEREO_STATUS__OTG_STEREO_SYNC_SELECT__SHIFT 0x10
+#define OTG2_OTG_STEREO_STATUS__OTG_STEREO_EYE_FLAG__SHIFT 0x14
+#define OTG2_OTG_STEREO_STATUS__OTG_STEREO_FORCE_NEXT_EYE_PENDING__SHIFT 0x18
+#define OTG2_OTG_STEREO_STATUS__OTG_CURRENT_3D_STRUCTURE_STATE__SHIFT 0x1e
+#define OTG2_OTG_STEREO_STATUS__OTG_CURRENT_STEREOSYNC_EN_STATE__SHIFT 0x1f
+#define OTG2_OTG_STEREO_STATUS__OTG_STEREO_CURRENT_EYE_MASK 0x00000001L
+#define OTG2_OTG_STEREO_STATUS__OTG_STEREO_SYNC_OUTPUT_MASK 0x00000100L
+#define OTG2_OTG_STEREO_STATUS__OTG_STEREO_SYNC_SELECT_MASK 0x00010000L
+#define OTG2_OTG_STEREO_STATUS__OTG_STEREO_EYE_FLAG_MASK 0x00100000L
+#define OTG2_OTG_STEREO_STATUS__OTG_STEREO_FORCE_NEXT_EYE_PENDING_MASK 0x03000000L
+#define OTG2_OTG_STEREO_STATUS__OTG_CURRENT_3D_STRUCTURE_STATE_MASK 0x40000000L
+#define OTG2_OTG_STEREO_STATUS__OTG_CURRENT_STEREOSYNC_EN_STATE_MASK 0x80000000L
+//OTG2_OTG_STEREO_CONTROL
+#define OTG2_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_LINE_NUM__SHIFT 0x0
+#define OTG2_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_POLARITY__SHIFT 0xf
+#define OTG2_OTG_STEREO_CONTROL__OTG_STEREO_EYE_FLAG_POLARITY__SHIFT 0x11
+#define OTG2_OTG_STEREO_CONTROL__OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP__SHIFT 0x12
+#define OTG2_OTG_STEREO_CONTROL__OTG_DISABLE_FIELD_NUM__SHIFT 0x13
+#define OTG2_OTG_STEREO_CONTROL__OTG_DISABLE_V_BLANK_FOR_DP_FIX__SHIFT 0x14
+#define OTG2_OTG_STEREO_CONTROL__OTG_FIELD_NUM_SEL__SHIFT 0x15
+#define OTG2_OTG_STEREO_CONTROL__OTG_STEREO_EN__SHIFT 0x18
+#define OTG2_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_LINE_NUM_MASK 0x00007FFFL
+#define OTG2_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_POLARITY_MASK 0x00008000L
+#define OTG2_OTG_STEREO_CONTROL__OTG_STEREO_EYE_FLAG_POLARITY_MASK 0x00020000L
+#define OTG2_OTG_STEREO_CONTROL__OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP_MASK 0x00040000L
+#define OTG2_OTG_STEREO_CONTROL__OTG_DISABLE_FIELD_NUM_MASK 0x00080000L
+#define OTG2_OTG_STEREO_CONTROL__OTG_DISABLE_V_BLANK_FOR_DP_FIX_MASK 0x00100000L
+#define OTG2_OTG_STEREO_CONTROL__OTG_FIELD_NUM_SEL_MASK 0x00200000L
+#define OTG2_OTG_STEREO_CONTROL__OTG_STEREO_EN_MASK 0x01000000L
+//OTG2_OTG_SNAPSHOT_STATUS
+#define OTG2_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_OCCURRED__SHIFT 0x0
+#define OTG2_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_CLEAR__SHIFT 0x1
+#define OTG2_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_MANUAL_TRIGGER__SHIFT 0x2
+#define OTG2_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_OCCURRED_MASK 0x00000001L
+#define OTG2_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_CLEAR_MASK 0x00000002L
+#define OTG2_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_MANUAL_TRIGGER_MASK 0x00000004L
+//OTG2_OTG_SNAPSHOT_CONTROL
+#define OTG2_OTG_SNAPSHOT_CONTROL__OTG_AUTO_SNAPSHOT_TRIG_SEL__SHIFT 0x0
+#define OTG2_OTG_SNAPSHOT_CONTROL__OTG_AUTO_SNAPSHOT_TRIG_SEL_MASK 0x00000003L
+//OTG2_OTG_SNAPSHOT_POSITION
+#define OTG2_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_VERT_COUNT__SHIFT 0x0
+#define OTG2_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_HORZ_COUNT__SHIFT 0x10
+#define OTG2_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_VERT_COUNT_MASK 0x00007FFFL
+#define OTG2_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_HORZ_COUNT_MASK 0x7FFF0000L
+//OTG2_OTG_SNAPSHOT_FRAME
+#define OTG2_OTG_SNAPSHOT_FRAME__OTG_SNAPSHOT_FRAME_COUNT__SHIFT 0x0
+#define OTG2_OTG_SNAPSHOT_FRAME__OTG_SNAPSHOT_FRAME_COUNT_MASK 0x00FFFFFFL
+//OTG2_OTG_INTERRUPT_CONTROL
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_MSK__SHIFT 0x0
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_TYPE__SHIFT 0x1
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_MSK__SHIFT 0x8
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_TYPE__SHIFT 0x9
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_MSK__SHIFT 0x10
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_TYPE__SHIFT 0x11
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_MSK__SHIFT 0x18
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_MSK__SHIFT 0x19
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_TYPE__SHIFT 0x1a
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_TYPE__SHIFT 0x1b
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_MSK__SHIFT 0x1c
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_TYPE__SHIFT 0x1d
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_MSK__SHIFT 0x1e
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_TYPE__SHIFT 0x1f
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_MSK_MASK 0x00000001L
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_TYPE_MASK 0x00000002L
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_MSK_MASK 0x00000100L
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_TYPE_MASK 0x00000200L
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_MSK_MASK 0x00010000L
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_TYPE_MASK 0x00020000L
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_MSK_MASK 0x01000000L
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_MSK_MASK 0x02000000L
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_TYPE_MASK 0x04000000L
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_TYPE_MASK 0x08000000L
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_MSK_MASK 0x10000000L
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_TYPE_MASK 0x20000000L
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_MSK_MASK 0x40000000L
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_TYPE_MASK 0x80000000L
+//OTG2_OTG_UPDATE_LOCK
+#define OTG2_OTG_UPDATE_LOCK__OTG_UPDATE_LOCK__SHIFT 0x0
+#define OTG2_OTG_UPDATE_LOCK__OTG_UPDATE_LOCK_MASK 0x00000001L
+//OTG2_OTG_DOUBLE_BUFFER_CONTROL
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_PENDING__SHIFT 0x0
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_H_TIMING_DIV_BY2_DB_UPDATE_PENDING__SHIFT 0x2
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_BLANK_DATA_EN_UPDATE_PENDING__SHIFT 0x3
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_RANGE_TIMING_DBUF_UPDATE_PENDING__SHIFT 0x4
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_TIMING_DB_UPDATE_PENDING__SHIFT 0x5
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_CTRL_DB_UPDATE_PENDING__SHIFT 0x6
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_STRUCTURE_EN_DB_UPDATE_PENDING__SHIFT 0x7
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_INSTANTLY__SHIFT 0x8
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_VSTARTUP_DB_UPDATE_PENDING__SHIFT 0x9
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_DSC_POSITION_DB_UPDATE_PENDING__SHIFT 0xa
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_BLANK_DATA_DOUBLE_BUFFER_EN__SHIFT 0x10
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_RANGE_TIMING_DBUF_UPDATE_MODE__SHIFT 0x18
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_PENDING_MASK 0x00000001L
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_H_TIMING_DIV_BY2_DB_UPDATE_PENDING_MASK 0x00000004L
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_BLANK_DATA_EN_UPDATE_PENDING_MASK 0x00000008L
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_RANGE_TIMING_DBUF_UPDATE_PENDING_MASK 0x00000010L
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_TIMING_DB_UPDATE_PENDING_MASK 0x00000020L
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_CTRL_DB_UPDATE_PENDING_MASK 0x00000040L
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_STRUCTURE_EN_DB_UPDATE_PENDING_MASK 0x00000080L
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_INSTANTLY_MASK 0x00000100L
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_VSTARTUP_DB_UPDATE_PENDING_MASK 0x00000200L
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_DSC_POSITION_DB_UPDATE_PENDING_MASK 0x00000400L
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_BLANK_DATA_DOUBLE_BUFFER_EN_MASK 0x00010000L
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_RANGE_TIMING_DBUF_UPDATE_MODE_MASK 0x03000000L
+//OTG2_OTG_MASTER_EN
+#define OTG2_OTG_MASTER_EN__OTG_MASTER_EN__SHIFT 0x0
+#define OTG2_OTG_MASTER_EN__OTG_MASTER_EN_MASK 0x00000001L
+//OTG2_OTG_BLANK_DATA_COLOR
+#define OTG2_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_BLUE_CB__SHIFT 0x0
+#define OTG2_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_GREEN_Y__SHIFT 0xa
+#define OTG2_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_RED_CR__SHIFT 0x14
+#define OTG2_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_BLUE_CB_MASK 0x000003FFL
+#define OTG2_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_GREEN_Y_MASK 0x000FFC00L
+#define OTG2_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_RED_CR_MASK 0x3FF00000L
+//OTG2_OTG_BLANK_DATA_COLOR_EXT
+#define OTG2_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_BLUE_CB_EXT__SHIFT 0x0
+#define OTG2_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_GREEN_Y_EXT__SHIFT 0x8
+#define OTG2_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_RED_CR_EXT__SHIFT 0x10
+#define OTG2_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_BLUE_CB_EXT_MASK 0x0000003FL
+#define OTG2_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_GREEN_Y_EXT_MASK 0x00003F00L
+#define OTG2_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_RED_CR_EXT_MASK 0x003F0000L
+//OTG2_OTG_BLACK_COLOR
+#define OTG2_OTG_BLACK_COLOR__OTG_BLACK_COLOR_B_CB__SHIFT 0x0
+#define OTG2_OTG_BLACK_COLOR__OTG_BLACK_COLOR_G_Y__SHIFT 0xa
+#define OTG2_OTG_BLACK_COLOR__OTG_BLACK_COLOR_R_CR__SHIFT 0x14
+#define OTG2_OTG_BLACK_COLOR__OTG_BLACK_COLOR_B_CB_MASK 0x000003FFL
+#define OTG2_OTG_BLACK_COLOR__OTG_BLACK_COLOR_G_Y_MASK 0x000FFC00L
+#define OTG2_OTG_BLACK_COLOR__OTG_BLACK_COLOR_R_CR_MASK 0x3FF00000L
+//OTG2_OTG_BLACK_COLOR_EXT
+#define OTG2_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_B_CB_EXT__SHIFT 0x0
+#define OTG2_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_G_Y_EXT__SHIFT 0x8
+#define OTG2_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_R_CR_EXT__SHIFT 0x10
+#define OTG2_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_B_CB_EXT_MASK 0x0000003FL
+#define OTG2_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_G_Y_EXT_MASK 0x00003F00L
+#define OTG2_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_R_CR_EXT_MASK 0x003F0000L
+//OTG2_OTG_VERTICAL_INTERRUPT0_POSITION
+#define OTG2_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_START__SHIFT 0x0
+#define OTG2_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_END__SHIFT 0x10
+#define OTG2_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_START_MASK 0x00007FFFL
+#define OTG2_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_END_MASK 0x7FFF0000L
+//OTG2_OTG_VERTICAL_INTERRUPT0_CONTROL
+#define OTG2_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_OUTPUT_POLARITY__SHIFT 0x4
+#define OTG2_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_ENABLE__SHIFT 0x8
+#define OTG2_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_STATUS__SHIFT 0xc
+#define OTG2_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_STATUS__SHIFT 0x10
+#define OTG2_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_CLEAR__SHIFT 0x14
+#define OTG2_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_TYPE__SHIFT 0x18
+#define OTG2_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_OUTPUT_POLARITY_MASK 0x00000010L
+#define OTG2_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_ENABLE_MASK 0x00000100L
+#define OTG2_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_STATUS_MASK 0x00001000L
+#define OTG2_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_STATUS_MASK 0x00010000L
+#define OTG2_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_CLEAR_MASK 0x00100000L
+#define OTG2_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_TYPE_MASK 0x01000000L
+//OTG2_OTG_VERTICAL_INTERRUPT1_POSITION
+#define OTG2_OTG_VERTICAL_INTERRUPT1_POSITION__OTG_VERTICAL_INTERRUPT1_LINE_START__SHIFT 0x0
+#define OTG2_OTG_VERTICAL_INTERRUPT1_POSITION__OTG_VERTICAL_INTERRUPT1_LINE_START_MASK 0x00007FFFL
+//OTG2_OTG_VERTICAL_INTERRUPT1_CONTROL
+#define OTG2_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_ENABLE__SHIFT 0x8
+#define OTG2_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_STATUS__SHIFT 0xc
+#define OTG2_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_STATUS__SHIFT 0x10
+#define OTG2_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_CLEAR__SHIFT 0x14
+#define OTG2_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_TYPE__SHIFT 0x18
+#define OTG2_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_ENABLE_MASK 0x00000100L
+#define OTG2_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_STATUS_MASK 0x00001000L
+#define OTG2_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_STATUS_MASK 0x00010000L
+#define OTG2_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_CLEAR_MASK 0x00100000L
+#define OTG2_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_TYPE_MASK 0x01000000L
+//OTG2_OTG_VERTICAL_INTERRUPT2_POSITION
+#define OTG2_OTG_VERTICAL_INTERRUPT2_POSITION__OTG_VERTICAL_INTERRUPT2_LINE_START__SHIFT 0x0
+#define OTG2_OTG_VERTICAL_INTERRUPT2_POSITION__OTG_VERTICAL_INTERRUPT2_LINE_START_MASK 0x00007FFFL
+//OTG2_OTG_VERTICAL_INTERRUPT2_CONTROL
+#define OTG2_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_ENABLE__SHIFT 0x8
+#define OTG2_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_STATUS__SHIFT 0xc
+#define OTG2_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_STATUS__SHIFT 0x10
+#define OTG2_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_CLEAR__SHIFT 0x14
+#define OTG2_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_TYPE__SHIFT 0x18
+#define OTG2_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_ENABLE_MASK 0x00000100L
+#define OTG2_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_STATUS_MASK 0x00001000L
+#define OTG2_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_STATUS_MASK 0x00010000L
+#define OTG2_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_CLEAR_MASK 0x00100000L
+#define OTG2_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_TYPE_MASK 0x01000000L
+//OTG2_OTG_CRC_CNTL
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_EN__SHIFT 0x0
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_DUAL_LINK_EN__SHIFT 0x1
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_DUAL_LINK_MODE__SHIFT 0x2
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_BLANK_ONLY__SHIFT 0x3
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_CONT_EN__SHIFT 0x4
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_CAPTURE_START_SEL__SHIFT 0x5
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_STEREO_MODE__SHIFT 0x8
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_INTERLACE_MODE__SHIFT 0xc
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_USE_NEW_AND_REPEATED_PIXELS__SHIFT 0x13
+#define OTG2_OTG_CRC_CNTL__OTG_CRC0_SELECT__SHIFT 0x14
+#define OTG2_OTG_CRC_CNTL__OTG_CRC1_SELECT__SHIFT 0x18
+#define OTG2_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC0_PENDING__SHIFT 0x1c
+#define OTG2_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC1_PENDING__SHIFT 0x1d
+#define OTG2_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC2_PENDING__SHIFT 0x1e
+#define OTG2_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC3_PENDING__SHIFT 0x1f
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_EN_MASK 0x00000001L
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_DUAL_LINK_EN_MASK 0x00000002L
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_DUAL_LINK_MODE_MASK 0x00000004L
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_BLANK_ONLY_MASK 0x00000008L
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_CONT_EN_MASK 0x00000010L
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_CAPTURE_START_SEL_MASK 0x00000060L
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_STEREO_MODE_MASK 0x00000300L
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_INTERLACE_MODE_MASK 0x00003000L
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_USE_NEW_AND_REPEATED_PIXELS_MASK 0x00080000L
+#define OTG2_OTG_CRC_CNTL__OTG_CRC0_SELECT_MASK 0x00700000L
+#define OTG2_OTG_CRC_CNTL__OTG_CRC1_SELECT_MASK 0x07000000L
+#define OTG2_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC0_PENDING_MASK 0x10000000L
+#define OTG2_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC1_PENDING_MASK 0x20000000L
+#define OTG2_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC2_PENDING_MASK 0x40000000L
+#define OTG2_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC3_PENDING_MASK 0x80000000L
+//OTG2_OTG_CRC_CNTL2
+#define OTG2_OTG_CRC_CNTL2__OTG_CRC_DSC_MODE__SHIFT 0x0
+#define OTG2_OTG_CRC_CNTL2__OTG_CRC_DATA_STREAM_COMBINE_MODE__SHIFT 0x1
+#define OTG2_OTG_CRC_CNTL2__OTG_CRC_DATA_STREAM_SPLIT_MODE__SHIFT 0x4
+#define OTG2_OTG_CRC_CNTL2__OTG_CRC_DATA_FORMAT__SHIFT 0x8
+#define OTG2_OTG_CRC_CNTL2__OTG_CRC_DSC_MODE_MASK 0x00000001L
+#define OTG2_OTG_CRC_CNTL2__OTG_CRC_DATA_STREAM_COMBINE_MODE_MASK 0x00000002L
+#define OTG2_OTG_CRC_CNTL2__OTG_CRC_DATA_STREAM_SPLIT_MODE_MASK 0x00000030L
+#define OTG2_OTG_CRC_CNTL2__OTG_CRC_DATA_FORMAT_MASK 0x00000300L
+//OTG2_OTG_CRC0_WINDOWA_X_CONTROL
+#define OTG2_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_START__SHIFT 0x0
+#define OTG2_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_END__SHIFT 0x10
+#define OTG2_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_START_MASK 0x00007FFFL
+#define OTG2_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_END_MASK 0x7FFF0000L
+//OTG2_OTG_CRC0_WINDOWA_Y_CONTROL
+#define OTG2_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_START__SHIFT 0x0
+#define OTG2_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_END__SHIFT 0x10
+#define OTG2_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_START_MASK 0x00007FFFL
+#define OTG2_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_END_MASK 0x7FFF0000L
+//OTG2_OTG_CRC0_WINDOWB_X_CONTROL
+#define OTG2_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_START__SHIFT 0x0
+#define OTG2_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_END__SHIFT 0x10
+#define OTG2_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_START_MASK 0x00007FFFL
+#define OTG2_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_END_MASK 0x7FFF0000L
+//OTG2_OTG_CRC0_WINDOWB_Y_CONTROL
+#define OTG2_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_START__SHIFT 0x0
+#define OTG2_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_END__SHIFT 0x10
+#define OTG2_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_START_MASK 0x00007FFFL
+#define OTG2_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_END_MASK 0x7FFF0000L
+//OTG2_OTG_CRC0_DATA_RG
+#define OTG2_OTG_CRC0_DATA_RG__CRC0_R_CR__SHIFT 0x0
+#define OTG2_OTG_CRC0_DATA_RG__CRC0_G_Y__SHIFT 0x10
+#define OTG2_OTG_CRC0_DATA_RG__CRC0_R_CR_MASK 0x0000FFFFL
+#define OTG2_OTG_CRC0_DATA_RG__CRC0_G_Y_MASK 0xFFFF0000L
+//OTG2_OTG_CRC0_DATA_B
+#define OTG2_OTG_CRC0_DATA_B__CRC0_B_CB__SHIFT 0x0
+#define OTG2_OTG_CRC0_DATA_B__CRC0_C__SHIFT 0x10
+#define OTG2_OTG_CRC0_DATA_B__CRC0_B_CB_MASK 0x0000FFFFL
+#define OTG2_OTG_CRC0_DATA_B__CRC0_C_MASK 0xFFFF0000L
+//OTG2_OTG_CRC1_WINDOWA_X_CONTROL
+#define OTG2_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_START__SHIFT 0x0
+#define OTG2_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_END__SHIFT 0x10
+#define OTG2_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_START_MASK 0x00007FFFL
+#define OTG2_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_END_MASK 0x7FFF0000L
+//OTG2_OTG_CRC1_WINDOWA_Y_CONTROL
+#define OTG2_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_START__SHIFT 0x0
+#define OTG2_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_END__SHIFT 0x10
+#define OTG2_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_START_MASK 0x00007FFFL
+#define OTG2_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_END_MASK 0x7FFF0000L
+//OTG2_OTG_CRC1_WINDOWB_X_CONTROL
+#define OTG2_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_START__SHIFT 0x0
+#define OTG2_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_END__SHIFT 0x10
+#define OTG2_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_START_MASK 0x00007FFFL
+#define OTG2_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_END_MASK 0x7FFF0000L
+//OTG2_OTG_CRC1_WINDOWB_Y_CONTROL
+#define OTG2_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_START__SHIFT 0x0
+#define OTG2_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_END__SHIFT 0x10
+#define OTG2_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_START_MASK 0x00007FFFL
+#define OTG2_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_END_MASK 0x7FFF0000L
+//OTG2_OTG_CRC1_DATA_RG
+#define OTG2_OTG_CRC1_DATA_RG__CRC1_R_CR__SHIFT 0x0
+#define OTG2_OTG_CRC1_DATA_RG__CRC1_G_Y__SHIFT 0x10
+#define OTG2_OTG_CRC1_DATA_RG__CRC1_R_CR_MASK 0x0000FFFFL
+#define OTG2_OTG_CRC1_DATA_RG__CRC1_G_Y_MASK 0xFFFF0000L
+//OTG2_OTG_CRC1_DATA_B
+#define OTG2_OTG_CRC1_DATA_B__CRC1_B_CB__SHIFT 0x0
+#define OTG2_OTG_CRC1_DATA_B__CRC1_C__SHIFT 0x10
+#define OTG2_OTG_CRC1_DATA_B__CRC1_B_CB_MASK 0x0000FFFFL
+#define OTG2_OTG_CRC1_DATA_B__CRC1_C_MASK 0xFFFF0000L
+//OTG2_OTG_CRC2_DATA_RG
+#define OTG2_OTG_CRC2_DATA_RG__CRC2_R_CR__SHIFT 0x0
+#define OTG2_OTG_CRC2_DATA_RG__CRC2_G_Y__SHIFT 0x10
+#define OTG2_OTG_CRC2_DATA_RG__CRC2_R_CR_MASK 0x0000FFFFL
+#define OTG2_OTG_CRC2_DATA_RG__CRC2_G_Y_MASK 0xFFFF0000L
+//OTG2_OTG_CRC2_DATA_B
+#define OTG2_OTG_CRC2_DATA_B__CRC2_B_CB__SHIFT 0x0
+#define OTG2_OTG_CRC2_DATA_B__CRC2_C__SHIFT 0x10
+#define OTG2_OTG_CRC2_DATA_B__CRC2_B_CB_MASK 0x0000FFFFL
+#define OTG2_OTG_CRC2_DATA_B__CRC2_C_MASK 0xFFFF0000L
+//OTG2_OTG_CRC3_DATA_RG
+#define OTG2_OTG_CRC3_DATA_RG__CRC3_R_CR__SHIFT 0x0
+#define OTG2_OTG_CRC3_DATA_RG__CRC3_G_Y__SHIFT 0x10
+#define OTG2_OTG_CRC3_DATA_RG__CRC3_R_CR_MASK 0x0000FFFFL
+#define OTG2_OTG_CRC3_DATA_RG__CRC3_G_Y_MASK 0xFFFF0000L
+//OTG2_OTG_CRC3_DATA_B
+#define OTG2_OTG_CRC3_DATA_B__CRC3_B_CB__SHIFT 0x0
+#define OTG2_OTG_CRC3_DATA_B__CRC3_C__SHIFT 0x10
+#define OTG2_OTG_CRC3_DATA_B__CRC3_B_CB_MASK 0x0000FFFFL
+#define OTG2_OTG_CRC3_DATA_B__CRC3_C_MASK 0xFFFF0000L
+//OTG2_OTG_CRC_SIG_RED_GREEN_MASK
+#define OTG2_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_RED_MASK__SHIFT 0x0
+#define OTG2_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_GREEN_MASK__SHIFT 0x10
+#define OTG2_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_RED_MASK_MASK 0x0000FFFFL
+#define OTG2_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_GREEN_MASK_MASK 0xFFFF0000L
+//OTG2_OTG_CRC_SIG_BLUE_CONTROL_MASK
+#define OTG2_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_BLUE_MASK__SHIFT 0x0
+#define OTG2_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_CONTROL_MASK__SHIFT 0x10
+#define OTG2_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_BLUE_MASK_MASK 0x0000FFFFL
+#define OTG2_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_CONTROL_MASK_MASK 0xFFFF0000L
+//OTG2_OTG_STATIC_SCREEN_CONTROL
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_EVENT_MASK__SHIFT 0x0
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_FRAME_COUNT__SHIFT 0x10
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_ENABLE__SHIFT 0x18
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_SS_STATUS__SHIFT 0x19
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_STATUS__SHIFT 0x1a
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_CLEAR__SHIFT 0x1b
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_TYPE__SHIFT 0x1c
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE__SHIFT 0x1e
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_VALUE__SHIFT 0x1f
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_EVENT_MASK_MASK 0x0000FFFFL
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_FRAME_COUNT_MASK 0x00FF0000L
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_ENABLE_MASK 0x01000000L
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_SS_STATUS_MASK 0x02000000L
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_STATUS_MASK 0x04000000L
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_CLEAR_MASK 0x08000000L
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_TYPE_MASK 0x10000000L
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_MASK 0x40000000L
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_VALUE_MASK 0x80000000L
+//OTG2_OTG_3D_STRUCTURE_CONTROL
+#define OTG2_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_EN__SHIFT 0x0
+#define OTG2_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_V_UPDATE_MODE__SHIFT 0x8
+#define OTG2_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_STEREO_SEL_OVR__SHIFT 0xc
+#define OTG2_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET__SHIFT 0x10
+#define OTG2_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_PENDING__SHIFT 0x11
+#define OTG2_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT__SHIFT 0x12
+#define OTG2_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_EN_MASK 0x00000001L
+#define OTG2_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_V_UPDATE_MODE_MASK 0x00000300L
+#define OTG2_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_STEREO_SEL_OVR_MASK 0x00001000L
+#define OTG2_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_MASK 0x00010000L
+#define OTG2_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_PENDING_MASK 0x00020000L
+#define OTG2_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_MASK 0x000C0000L
+//OTG2_OTG_GSL_VSYNC_GAP
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_LIMIT__SHIFT 0x0
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_DELAY__SHIFT 0x8
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_SOURCE_SEL__SHIFT 0x10
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MODE__SHIFT 0x11
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_CLEAR__SHIFT 0x13
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_OCCURRED__SHIFT 0x14
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASTER_FASTER__SHIFT 0x17
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP__SHIFT 0x18
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_LIMIT_MASK 0x000000FFL
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_DELAY_MASK 0x0000FF00L
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_SOURCE_SEL_MASK 0x00010000L
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MODE_MASK 0x00060000L
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_CLEAR_MASK 0x00080000L
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_OCCURRED_MASK 0x00100000L
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASTER_FASTER_MASK 0x00800000L
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASK 0xFF000000L
+//OTG2_OTG_MASTER_UPDATE_MODE
+#define OTG2_OTG_MASTER_UPDATE_MODE__MASTER_UPDATE_INTERLACED_MODE__SHIFT 0x0
+#define OTG2_OTG_MASTER_UPDATE_MODE__MASTER_UPDATE_INTERLACED_MODE_MASK 0x00000003L
+//OTG2_OTG_CLOCK_CONTROL
+#define OTG2_OTG_CLOCK_CONTROL__OTG_CLOCK_EN__SHIFT 0x0
+#define OTG2_OTG_CLOCK_CONTROL__OTG_CLOCK_GATE_DIS__SHIFT 0x1
+#define OTG2_OTG_CLOCK_CONTROL__OTG_SOFT_RESET__SHIFT 0x4
+#define OTG2_OTG_CLOCK_CONTROL__OTG_CLOCK_ON__SHIFT 0x8
+#define OTG2_OTG_CLOCK_CONTROL__OTG_BUSY__SHIFT 0x10
+#define OTG2_OTG_CLOCK_CONTROL__OTG_CLOCK_EN_MASK 0x00000001L
+#define OTG2_OTG_CLOCK_CONTROL__OTG_CLOCK_GATE_DIS_MASK 0x00000002L
+#define OTG2_OTG_CLOCK_CONTROL__OTG_SOFT_RESET_MASK 0x00000010L
+#define OTG2_OTG_CLOCK_CONTROL__OTG_CLOCK_ON_MASK 0x00000100L
+#define OTG2_OTG_CLOCK_CONTROL__OTG_BUSY_MASK 0x00010000L
+//OTG2_OTG_VSTARTUP_PARAM
+#define OTG2_OTG_VSTARTUP_PARAM__VSTARTUP_START__SHIFT 0x0
+#define OTG2_OTG_VSTARTUP_PARAM__VSTARTUP_START_MASK 0x000003FFL
+//OTG2_OTG_VUPDATE_PARAM
+#define OTG2_OTG_VUPDATE_PARAM__VUPDATE_OFFSET__SHIFT 0x0
+#define OTG2_OTG_VUPDATE_PARAM__VUPDATE_WIDTH__SHIFT 0x10
+#define OTG2_OTG_VUPDATE_PARAM__VUPDATE_OFFSET_MASK 0x0000FFFFL
+#define OTG2_OTG_VUPDATE_PARAM__VUPDATE_WIDTH_MASK 0x03FF0000L
+//OTG2_OTG_VREADY_PARAM
+#define OTG2_OTG_VREADY_PARAM__VREADY_OFFSET__SHIFT 0x0
+#define OTG2_OTG_VREADY_PARAM__VREADY_OFFSET_MASK 0x0000FFFFL
+//OTG2_OTG_GLOBAL_SYNC_STATUS
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_EN__SHIFT 0x0
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_TYPE__SHIFT 0x1
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_OCCURRED__SHIFT 0x2
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_STATUS__SHIFT 0x3
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_CLEAR__SHIFT 0x4
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_EN__SHIFT 0x5
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_TYPE__SHIFT 0x6
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_POSITION_SEL__SHIFT 0x7
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_OCCURRED__SHIFT 0x8
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_STATUS__SHIFT 0x9
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_CLEAR__SHIFT 0xa
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_STATUS__SHIFT 0xb
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_EN__SHIFT 0xc
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_TYPE__SHIFT 0xd
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_OCCURRED__SHIFT 0xe
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_STATUS__SHIFT 0xf
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_CLEAR__SHIFT 0x10
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_STATUS__SHIFT 0x11
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_EN__SHIFT 0x12
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_TYPE__SHIFT 0x13
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_OCCURRED__SHIFT 0x14
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_STATUS__SHIFT 0x15
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_CLEAR__SHIFT 0x16
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__STEREO_SELECT_STATUS__SHIFT 0x18
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__FIELD_NUMBER_STATUS__SHIFT 0x19
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_EN_MASK 0x00000001L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_TYPE_MASK 0x00000002L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_OCCURRED_MASK 0x00000004L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_STATUS_MASK 0x00000008L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_CLEAR_MASK 0x00000010L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_EN_MASK 0x00000020L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_TYPE_MASK 0x00000040L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_POSITION_SEL_MASK 0x00000080L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_OCCURRED_MASK 0x00000100L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_STATUS_MASK 0x00000200L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_CLEAR_MASK 0x00000400L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_STATUS_MASK 0x00000800L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_EN_MASK 0x00001000L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_TYPE_MASK 0x00002000L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_OCCURRED_MASK 0x00004000L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_STATUS_MASK 0x00008000L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_CLEAR_MASK 0x00010000L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_STATUS_MASK 0x00020000L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_EN_MASK 0x00040000L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_TYPE_MASK 0x00080000L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_OCCURRED_MASK 0x00100000L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_STATUS_MASK 0x00200000L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_CLEAR_MASK 0x00400000L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__STEREO_SELECT_STATUS_MASK 0x01000000L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__FIELD_NUMBER_STATUS_MASK 0x02000000L
+//OTG2_OTG_MASTER_UPDATE_LOCK
+#define OTG2_OTG_MASTER_UPDATE_LOCK__OTG_MASTER_UPDATE_LOCK__SHIFT 0x0
+#define OTG2_OTG_MASTER_UPDATE_LOCK__UPDATE_LOCK_STATUS__SHIFT 0x8
+#define OTG2_OTG_MASTER_UPDATE_LOCK__OTG_MASTER_UPDATE_LOCK_MASK 0x00000001L
+#define OTG2_OTG_MASTER_UPDATE_LOCK__UPDATE_LOCK_STATUS_MASK 0x00000100L
+//OTG2_OTG_GSL_CONTROL
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL0_EN__SHIFT 0x0
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL1_EN__SHIFT 0x1
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL2_EN__SHIFT 0x2
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL_MASTER_EN__SHIFT 0x3
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL_MASTER_MODE__SHIFT 0x4
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL_CHECK_DELAY__SHIFT 0x8
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL_FORCE_DELAY__SHIFT 0x10
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL_CHECK_ALL_FIELDS__SHIFT 0x1c
+#define OTG2_OTG_GSL_CONTROL__OTG_MASTER_UPDATE_LOCK_GSL_EN__SHIFT 0x1f
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL0_EN_MASK 0x00000001L
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL1_EN_MASK 0x00000002L
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL2_EN_MASK 0x00000004L
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL_MASTER_EN_MASK 0x00000008L
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL_MASTER_MODE_MASK 0x00000030L
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL_CHECK_DELAY_MASK 0x00000F00L
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL_FORCE_DELAY_MASK 0x001F0000L
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL_CHECK_ALL_FIELDS_MASK 0x10000000L
+#define OTG2_OTG_GSL_CONTROL__OTG_MASTER_UPDATE_LOCK_GSL_EN_MASK 0x80000000L
+//OTG2_OTG_GSL_WINDOW_X
+#define OTG2_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_START_X__SHIFT 0x0
+#define OTG2_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_END_X__SHIFT 0x10
+#define OTG2_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_START_X_MASK 0x00007FFFL
+#define OTG2_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_END_X_MASK 0x7FFF0000L
+//OTG2_OTG_GSL_WINDOW_Y
+#define OTG2_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_START_Y__SHIFT 0x0
+#define OTG2_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_END_Y__SHIFT 0x10
+#define OTG2_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_START_Y_MASK 0x00007FFFL
+#define OTG2_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_END_Y_MASK 0x7FFF0000L
+//OTG2_OTG_VUPDATE_KEEPOUT
+#define OTG2_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET__SHIFT 0x0
+#define OTG2_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET__SHIFT 0x10
+#define OTG2_OTG_VUPDATE_KEEPOUT__OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN__SHIFT 0x1f
+#define OTG2_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET_MASK 0x0000FFFFL
+#define OTG2_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET_MASK 0x03FF0000L
+#define OTG2_OTG_VUPDATE_KEEPOUT__OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN_MASK 0x80000000L
+//OTG2_OTG_GLOBAL_CONTROL0
+#define OTG2_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_HTOTAL_KEEPOUT__SHIFT 0x0
+#define OTG2_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_HTOTAL_KEEPOUT_EN__SHIFT 0x8
+#define OTG2_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_SEL__SHIFT 0x19
+#define OTG2_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_HTOTAL_KEEPOUT_MASK 0x000000FFL
+#define OTG2_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_HTOTAL_KEEPOUT_EN_MASK 0x00000100L
+#define OTG2_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_SEL_MASK 0x0E000000L
+//OTG2_OTG_GLOBAL_CONTROL1
+#define OTG2_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_X__SHIFT 0x0
+#define OTG2_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_Y__SHIFT 0x10
+#define OTG2_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_EN__SHIFT 0x1f
+#define OTG2_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_X_MASK 0x00007FFFL
+#define OTG2_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_Y_MASK 0x7FFF0000L
+#define OTG2_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_EN_MASK 0x80000000L
+//OTG2_OTG_GLOBAL_CONTROL2
+#define OTG2_OTG_GLOBAL_CONTROL2__DIG_UPDATE_LOCATION__SHIFT 0x0
+#define OTG2_OTG_GLOBAL_CONTROL2__GLOBAL_UPDATE_LOCK_EN__SHIFT 0xa
+#define OTG2_OTG_GLOBAL_CONTROL2__MANUAL_FLOW_CONTROL_SEL__SHIFT 0x10
+#define OTG2_OTG_GLOBAL_CONTROL2__MASTER_UPDATE_LOCK_WINDOW_SEL__SHIFT 0x1d
+#define OTG2_OTG_GLOBAL_CONTROL2__OTG_VUPDATE_BLOCK_DISABLE__SHIFT 0x1e
+#define OTG2_OTG_GLOBAL_CONTROL2__DCCG_VUPDATE_MODE__SHIFT 0x1f
+#define OTG2_OTG_GLOBAL_CONTROL2__DIG_UPDATE_LOCATION_MASK 0x000003FFL
+#define OTG2_OTG_GLOBAL_CONTROL2__GLOBAL_UPDATE_LOCK_EN_MASK 0x00000400L
+#define OTG2_OTG_GLOBAL_CONTROL2__MANUAL_FLOW_CONTROL_SEL_MASK 0x00070000L
+#define OTG2_OTG_GLOBAL_CONTROL2__MASTER_UPDATE_LOCK_WINDOW_SEL_MASK 0x20000000L
+#define OTG2_OTG_GLOBAL_CONTROL2__OTG_VUPDATE_BLOCK_DISABLE_MASK 0x40000000L
+#define OTG2_OTG_GLOBAL_CONTROL2__DCCG_VUPDATE_MODE_MASK 0x80000000L
+//OTG2_OTG_GLOBAL_CONTROL3
+#define OTG2_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD__SHIFT 0x0
+#define OTG2_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_STEREO_SEL__SHIFT 0x4
+#define OTG2_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD_STEREO_FLAG_SEL__SHIFT 0x8
+#define OTG2_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD_MASK 0x00000003L
+#define OTG2_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_STEREO_SEL_MASK 0x00000030L
+#define OTG2_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD_STEREO_FLAG_SEL_MASK 0x00000100L
+//OTG2_OTG_TRIG_MANUAL_CONTROL
+#define OTG2_OTG_TRIG_MANUAL_CONTROL__TRIG_MANUAL_CONTROL__SHIFT 0x0
+#define OTG2_OTG_TRIG_MANUAL_CONTROL__TRIG_MANUAL_CONTROL_MASK 0x00000001L
+//OTG2_OTG_MANUAL_FLOW_CONTROL
+#define OTG2_OTG_MANUAL_FLOW_CONTROL__MANUAL_FLOW_CONTROL__SHIFT 0x0
+#define OTG2_OTG_MANUAL_FLOW_CONTROL__MANUAL_FLOW_CONTROL_MASK 0x00000001L
+//OTG2_OTG_RANGE_TIMING_INT_STATUS
+#define OTG2_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED__SHIFT 0x0
+#define OTG2_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT__SHIFT 0x4
+#define OTG2_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_CLEAR__SHIFT 0x8
+#define OTG2_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT_MSK__SHIFT 0xc
+#define OTG2_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT_TYPE__SHIFT 0x10
+#define OTG2_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_MASK 0x00000001L
+#define OTG2_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT_MASK 0x00000010L
+#define OTG2_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_CLEAR_MASK 0x00000100L
+#define OTG2_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT_MSK_MASK 0x00001000L
+#define OTG2_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT_TYPE_MASK 0x00010000L
+//OTG2_OTG_DRR_CONTROL
+#define OTG2_OTG_DRR_CONTROL__OTG_DRR_AVERAGE_FRAME__SHIFT 0x0
+#define OTG2_OTG_DRR_CONTROL__OTG_V_TOTAL_LAST_USED_BY_DRR__SHIFT 0x10
+#define OTG2_OTG_DRR_CONTROL__OTG_DRR_AVERAGE_FRAME_MASK 0x00000007L
+#define OTG2_OTG_DRR_CONTROL__OTG_V_TOTAL_LAST_USED_BY_DRR_MASK 0x7FFF0000L
+//OTG2_OTG_REQUEST_CONTROL
+#define OTG2_OTG_REQUEST_CONTROL__OTG_REQUEST_MODE_FOR_H_DUPLICATE__SHIFT 0x0
+#define OTG2_OTG_REQUEST_CONTROL__OTG_REQUEST_MODE_FOR_H_DUPLICATE_MASK 0x00000001L
+//OTG2_OTG_DSC_START_POSITION
+#define OTG2_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_X__SHIFT 0x0
+#define OTG2_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_LINE_NUM__SHIFT 0x10
+#define OTG2_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_X_MASK 0x00007FFFL
+#define OTG2_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_LINE_NUM_MASK 0x03FF0000L
+//OTG2_OTG_PIPE_UPDATE_STATUS
+#define OTG2_OTG_PIPE_UPDATE_STATUS__OTG_FLIP_PENDING__SHIFT 0x0
+#define OTG2_OTG_PIPE_UPDATE_STATUS__OTG_FLIP_TAKEN__SHIFT 0x1
+#define OTG2_OTG_PIPE_UPDATE_STATUS__OTG_FLIP_TAKEN_CLEAR__SHIFT 0x2
+#define OTG2_OTG_PIPE_UPDATE_STATUS__OTG_DC_REG_UPDATE_PENDING__SHIFT 0x4
+#define OTG2_OTG_PIPE_UPDATE_STATUS__OTG_DC_REG_UPDATE_TAKEN__SHIFT 0x5
+#define OTG2_OTG_PIPE_UPDATE_STATUS__OTG_DC_REG_UPDATE_TAKEN_CLEAR__SHIFT 0x6
+#define OTG2_OTG_PIPE_UPDATE_STATUS__OTG_CURSOR_UPDATE_PENDING__SHIFT 0x8
+#define OTG2_OTG_PIPE_UPDATE_STATUS__OTG_CURSOR_UPDATE_TAKEN__SHIFT 0x9
+#define OTG2_OTG_PIPE_UPDATE_STATUS__OTG_CURSOR_UPDATE_TAKEN_CLEAR__SHIFT 0xa
+#define OTG2_OTG_PIPE_UPDATE_STATUS__OTG_VUPDATE_KEEPOUT_STATUS__SHIFT 0x10
+#define OTG2_OTG_PIPE_UPDATE_STATUS__OTG_FLIP_PENDING_MASK 0x00000001L
+#define OTG2_OTG_PIPE_UPDATE_STATUS__OTG_FLIP_TAKEN_MASK 0x00000002L
+#define OTG2_OTG_PIPE_UPDATE_STATUS__OTG_FLIP_TAKEN_CLEAR_MASK 0x00000004L
+#define OTG2_OTG_PIPE_UPDATE_STATUS__OTG_DC_REG_UPDATE_PENDING_MASK 0x00000010L
+#define OTG2_OTG_PIPE_UPDATE_STATUS__OTG_DC_REG_UPDATE_TAKEN_MASK 0x00000020L
+#define OTG2_OTG_PIPE_UPDATE_STATUS__OTG_DC_REG_UPDATE_TAKEN_CLEAR_MASK 0x00000040L
+#define OTG2_OTG_PIPE_UPDATE_STATUS__OTG_CURSOR_UPDATE_PENDING_MASK 0x00000100L
+#define OTG2_OTG_PIPE_UPDATE_STATUS__OTG_CURSOR_UPDATE_TAKEN_MASK 0x00000200L
+#define OTG2_OTG_PIPE_UPDATE_STATUS__OTG_CURSOR_UPDATE_TAKEN_CLEAR_MASK 0x00000400L
+#define OTG2_OTG_PIPE_UPDATE_STATUS__OTG_VUPDATE_KEEPOUT_STATUS_MASK 0x00010000L
+//OTG2_OTG_SPARE_REGISTER
+#define OTG2_OTG_SPARE_REGISTER__OTG_SPARE_REG__SHIFT 0x0
+#define OTG2_OTG_SPARE_REGISTER__OTG_SPARE_REG_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_optc_otg3_dispdec
+//OTG3_OTG_H_TOTAL
+#define OTG3_OTG_H_TOTAL__OTG_H_TOTAL__SHIFT 0x0
+#define OTG3_OTG_H_TOTAL__OTG_H_TOTAL_MASK 0x00007FFFL
+//OTG3_OTG_H_BLANK_START_END
+#define OTG3_OTG_H_BLANK_START_END__OTG_H_BLANK_START__SHIFT 0x0
+#define OTG3_OTG_H_BLANK_START_END__OTG_H_BLANK_END__SHIFT 0x10
+#define OTG3_OTG_H_BLANK_START_END__OTG_H_BLANK_START_MASK 0x00007FFFL
+#define OTG3_OTG_H_BLANK_START_END__OTG_H_BLANK_END_MASK 0x7FFF0000L
+//OTG3_OTG_H_SYNC_A
+#define OTG3_OTG_H_SYNC_A__OTG_H_SYNC_A_START__SHIFT 0x0
+#define OTG3_OTG_H_SYNC_A__OTG_H_SYNC_A_END__SHIFT 0x10
+#define OTG3_OTG_H_SYNC_A__OTG_H_SYNC_A_START_MASK 0x00007FFFL
+#define OTG3_OTG_H_SYNC_A__OTG_H_SYNC_A_END_MASK 0x7FFF0000L
+//OTG3_OTG_H_SYNC_A_CNTL
+#define OTG3_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_POL__SHIFT 0x0
+#define OTG3_OTG_H_SYNC_A_CNTL__OTG_COMP_SYNC_A_EN__SHIFT 0x10
+#define OTG3_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_CUTOFF__SHIFT 0x11
+#define OTG3_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_POL_MASK 0x00000001L
+#define OTG3_OTG_H_SYNC_A_CNTL__OTG_COMP_SYNC_A_EN_MASK 0x00010000L
+#define OTG3_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_CUTOFF_MASK 0x00020000L
+//OTG3_OTG_H_TIMING_CNTL
+#define OTG3_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_BY2__SHIFT 0x0
+#define OTG3_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_BY2_UPDATE_MODE__SHIFT 0x8
+#define OTG3_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_BY2_MASK 0x00000001L
+#define OTG3_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_BY2_UPDATE_MODE_MASK 0x00000100L
+//OTG3_OTG_V_TOTAL
+#define OTG3_OTG_V_TOTAL__OTG_V_TOTAL__SHIFT 0x0
+#define OTG3_OTG_V_TOTAL__OTG_V_TOTAL_MASK 0x00007FFFL
+//OTG3_OTG_V_TOTAL_MIN
+#define OTG3_OTG_V_TOTAL_MIN__OTG_V_TOTAL_MIN__SHIFT 0x0
+#define OTG3_OTG_V_TOTAL_MIN__OTG_V_TOTAL_MIN_MASK 0x00007FFFL
+//OTG3_OTG_V_TOTAL_MAX
+#define OTG3_OTG_V_TOTAL_MAX__OTG_V_TOTAL_MAX__SHIFT 0x0
+#define OTG3_OTG_V_TOTAL_MAX__OTG_V_TOTAL_MAX_MASK 0x00007FFFL
+//OTG3_OTG_V_TOTAL_MID
+#define OTG3_OTG_V_TOTAL_MID__OTG_V_TOTAL_MID__SHIFT 0x0
+#define OTG3_OTG_V_TOTAL_MID__OTG_V_TOTAL_MID_MASK 0x00007FFFL
+//OTG3_OTG_V_TOTAL_CONTROL
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MIN_SEL__SHIFT 0x0
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MAX_SEL__SHIFT 0x1
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MAX_EN__SHIFT 0x2
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MIN_EN__SHIFT 0x3
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_FORCE_LOCK_ON_EVENT__SHIFT 0x4
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_DRR_EVENT_ACTIVE_PERIOD__SHIFT 0x5
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK_EN__SHIFT 0x7
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_FRAME_NUM__SHIFT 0x8
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK__SHIFT 0x10
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MIN_SEL_MASK 0x00000001L
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MAX_SEL_MASK 0x00000002L
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MAX_EN_MASK 0x00000004L
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MIN_EN_MASK 0x00000008L
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_FORCE_LOCK_ON_EVENT_MASK 0x00000010L
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_DRR_EVENT_ACTIVE_PERIOD_MASK 0x00000020L
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK_EN_MASK 0x00000080L
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_FRAME_NUM_MASK 0x0000FF00L
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK_MASK 0xFFFF0000L
+//OTG3_OTG_V_TOTAL_INT_STATUS
+#define OTG3_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED__SHIFT 0x0
+#define OTG3_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_INT__SHIFT 0x4
+#define OTG3_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_ACK__SHIFT 0x8
+#define OTG3_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_MSK__SHIFT 0xc
+#define OTG3_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_MASK 0x00000001L
+#define OTG3_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_INT_MASK 0x00000010L
+#define OTG3_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_ACK_MASK 0x00000100L
+#define OTG3_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_MSK_MASK 0x00001000L
+//OTG3_OTG_VSYNC_NOM_INT_STATUS
+#define OTG3_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM__SHIFT 0x0
+#define OTG3_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_INT_CLEAR__SHIFT 0x4
+#define OTG3_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_MASK 0x00000001L
+#define OTG3_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_INT_CLEAR_MASK 0x00000010L
+//OTG3_OTG_V_BLANK_START_END
+#define OTG3_OTG_V_BLANK_START_END__OTG_V_BLANK_START__SHIFT 0x0
+#define OTG3_OTG_V_BLANK_START_END__OTG_V_BLANK_END__SHIFT 0x10
+#define OTG3_OTG_V_BLANK_START_END__OTG_V_BLANK_START_MASK 0x00007FFFL
+#define OTG3_OTG_V_BLANK_START_END__OTG_V_BLANK_END_MASK 0x7FFF0000L
+//OTG3_OTG_V_SYNC_A
+#define OTG3_OTG_V_SYNC_A__OTG_V_SYNC_A_START__SHIFT 0x0
+#define OTG3_OTG_V_SYNC_A__OTG_V_SYNC_A_END__SHIFT 0x10
+#define OTG3_OTG_V_SYNC_A__OTG_V_SYNC_A_START_MASK 0x00007FFFL
+#define OTG3_OTG_V_SYNC_A__OTG_V_SYNC_A_END_MASK 0x7FFF0000L
+//OTG3_OTG_V_SYNC_A_CNTL
+#define OTG3_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_A_POL__SHIFT 0x0
+#define OTG3_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_A_POL_MASK 0x00000001L
+//OTG3_OTG_TRIGA_CNTL
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_SELECT__SHIFT 0x0
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_PIPE_SELECT__SHIFT 0x5
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_SELECT__SHIFT 0x8
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_RESYNC_BYPASS_EN__SHIFT 0xb
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_INPUT_STATUS__SHIFT 0xc
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_STATUS__SHIFT 0xd
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_OCCURRED__SHIFT 0xe
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_RISING_EDGE_DETECT_CNTL__SHIFT 0x10
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_FALLING_EDGE_DETECT_CNTL__SHIFT 0x12
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_FREQUENCY_SELECT__SHIFT 0x14
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_DELAY__SHIFT 0x18
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_CLEAR__SHIFT 0x1f
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_SELECT_MASK 0x0000001FL
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_PIPE_SELECT_MASK 0x000000E0L
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_SELECT_MASK 0x00000700L
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_RESYNC_BYPASS_EN_MASK 0x00000800L
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_INPUT_STATUS_MASK 0x00001000L
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_STATUS_MASK 0x00002000L
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_OCCURRED_MASK 0x00004000L
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_RISING_EDGE_DETECT_CNTL_MASK 0x00030000L
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_FALLING_EDGE_DETECT_CNTL_MASK 0x000C0000L
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_FREQUENCY_SELECT_MASK 0x00300000L
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_DELAY_MASK 0x1F000000L
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_CLEAR_MASK 0x80000000L
+//OTG3_OTG_TRIGA_MANUAL_TRIG
+#define OTG3_OTG_TRIGA_MANUAL_TRIG__OTG_TRIGA_MANUAL_TRIG__SHIFT 0x0
+#define OTG3_OTG_TRIGA_MANUAL_TRIG__OTG_TRIGA_MANUAL_TRIG_MASK 0x00000001L
+//OTG3_OTG_TRIGB_CNTL
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_SELECT__SHIFT 0x0
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_PIPE_SELECT__SHIFT 0x5
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_SELECT__SHIFT 0x8
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_RESYNC_BYPASS_EN__SHIFT 0xb
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_INPUT_STATUS__SHIFT 0xc
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_STATUS__SHIFT 0xd
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_OCCURRED__SHIFT 0xe
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_RISING_EDGE_DETECT_CNTL__SHIFT 0x10
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_FALLING_EDGE_DETECT_CNTL__SHIFT 0x12
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_FREQUENCY_SELECT__SHIFT 0x14
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_DELAY__SHIFT 0x18
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_CLEAR__SHIFT 0x1f
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_SELECT_MASK 0x0000001FL
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_PIPE_SELECT_MASK 0x000000E0L
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_SELECT_MASK 0x00000700L
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_RESYNC_BYPASS_EN_MASK 0x00000800L
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_INPUT_STATUS_MASK 0x00001000L
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_STATUS_MASK 0x00002000L
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_OCCURRED_MASK 0x00004000L
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_RISING_EDGE_DETECT_CNTL_MASK 0x00030000L
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_FALLING_EDGE_DETECT_CNTL_MASK 0x000C0000L
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_FREQUENCY_SELECT_MASK 0x00300000L
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_DELAY_MASK 0x1F000000L
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_CLEAR_MASK 0x80000000L
+//OTG3_OTG_TRIGB_MANUAL_TRIG
+#define OTG3_OTG_TRIGB_MANUAL_TRIG__OTG_TRIGB_MANUAL_TRIG__SHIFT 0x0
+#define OTG3_OTG_TRIGB_MANUAL_TRIG__OTG_TRIGB_MANUAL_TRIG_MASK 0x00000001L
+//OTG3_OTG_FORCE_COUNT_NOW_CNTL
+#define OTG3_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_MODE__SHIFT 0x0
+#define OTG3_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CHECK__SHIFT 0x4
+#define OTG3_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_TRIG_SEL__SHIFT 0x8
+#define OTG3_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_OCCURRED__SHIFT 0x10
+#define OTG3_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CLEAR__SHIFT 0x18
+#define OTG3_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_MODE_MASK 0x00000003L
+#define OTG3_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CHECK_MASK 0x00000010L
+#define OTG3_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_TRIG_SEL_MASK 0x00000100L
+#define OTG3_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_OCCURRED_MASK 0x00010000L
+#define OTG3_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CLEAR_MASK 0x01000000L
+//OTG3_OTG_FLOW_CONTROL
+#define OTG3_OTG_FLOW_CONTROL__OTG_FLOW_CONTROL_SOURCE_SELECT__SHIFT 0x0
+#define OTG3_OTG_FLOW_CONTROL__OTG_FLOW_CONTROL_POLARITY__SHIFT 0x8
+#define OTG3_OTG_FLOW_CONTROL__OTG_FLOW_CONTROL_GRANULARITY__SHIFT 0x10
+#define OTG3_OTG_FLOW_CONTROL__OTG_FLOW_CONTROL_INPUT_STATUS__SHIFT 0x18
+#define OTG3_OTG_FLOW_CONTROL__OTG_FLOW_CONTROL_SOURCE_SELECT_MASK 0x0000001FL
+#define OTG3_OTG_FLOW_CONTROL__OTG_FLOW_CONTROL_POLARITY_MASK 0x00000100L
+#define OTG3_OTG_FLOW_CONTROL__OTG_FLOW_CONTROL_GRANULARITY_MASK 0x00010000L
+#define OTG3_OTG_FLOW_CONTROL__OTG_FLOW_CONTROL_INPUT_STATUS_MASK 0x01000000L
+//OTG3_OTG_STEREO_FORCE_NEXT_EYE
+#define OTG3_OTG_STEREO_FORCE_NEXT_EYE__OTG_STEREO_FORCE_NEXT_EYE__SHIFT 0x0
+#define OTG3_OTG_STEREO_FORCE_NEXT_EYE__OTG_AVSYNC_FRAME_COUNTER__SHIFT 0x8
+#define OTG3_OTG_STEREO_FORCE_NEXT_EYE__OTG_AVSYNC_LINE_COUNTER__SHIFT 0x10
+#define OTG3_OTG_STEREO_FORCE_NEXT_EYE__OTG_STEREO_FORCE_NEXT_EYE_MASK 0x00000003L
+#define OTG3_OTG_STEREO_FORCE_NEXT_EYE__OTG_AVSYNC_FRAME_COUNTER_MASK 0x0000FF00L
+#define OTG3_OTG_STEREO_FORCE_NEXT_EYE__OTG_AVSYNC_LINE_COUNTER_MASK 0x1FFF0000L
+//OTG3_OTG_CONTROL
+#define OTG3_OTG_CONTROL__OTG_MASTER_EN__SHIFT 0x0
+#define OTG3_OTG_CONTROL__OTG_DISABLE_POINT_CNTL__SHIFT 0x8
+#define OTG3_OTG_CONTROL__OTG_START_POINT_CNTL__SHIFT 0xc
+#define OTG3_OTG_CONTROL__OTG_FIELD_NUMBER_CNTL__SHIFT 0xd
+#define OTG3_OTG_CONTROL__OTG_FIELD_NUMBER_POLARITY__SHIFT 0xe
+#define OTG3_OTG_CONTROL__OTG_CURRENT_MASTER_EN_STATE__SHIFT 0x10
+#define OTG3_OTG_CONTROL__OTG_DISP_READ_REQUEST_DISABLE__SHIFT 0x18
+#define OTG3_OTG_CONTROL__OTG_AVSYNC_LOCK_SNAPSHOT__SHIFT 0x1e
+#define OTG3_OTG_CONTROL__OTG_AVSYNC_VSYNC_N_HSYNC_MODE__SHIFT 0x1f
+#define OTG3_OTG_CONTROL__OTG_MASTER_EN_MASK 0x00000001L
+#define OTG3_OTG_CONTROL__OTG_DISABLE_POINT_CNTL_MASK 0x00000300L
+#define OTG3_OTG_CONTROL__OTG_START_POINT_CNTL_MASK 0x00001000L
+#define OTG3_OTG_CONTROL__OTG_FIELD_NUMBER_CNTL_MASK 0x00002000L
+#define OTG3_OTG_CONTROL__OTG_FIELD_NUMBER_POLARITY_MASK 0x00004000L
+#define OTG3_OTG_CONTROL__OTG_CURRENT_MASTER_EN_STATE_MASK 0x00010000L
+#define OTG3_OTG_CONTROL__OTG_DISP_READ_REQUEST_DISABLE_MASK 0x01000000L
+#define OTG3_OTG_CONTROL__OTG_AVSYNC_LOCK_SNAPSHOT_MASK 0x40000000L
+#define OTG3_OTG_CONTROL__OTG_AVSYNC_VSYNC_N_HSYNC_MODE_MASK 0x80000000L
+//OTG3_OTG_BLANK_CONTROL
+#define OTG3_OTG_BLANK_CONTROL__OTG_CURRENT_BLANK_STATE__SHIFT 0x0
+#define OTG3_OTG_BLANK_CONTROL__OTG_BLANK_DATA_EN__SHIFT 0x8
+#define OTG3_OTG_BLANK_CONTROL__OTG_BLANK_DE_MODE__SHIFT 0x10
+#define OTG3_OTG_BLANK_CONTROL__OTG_CURRENT_BLANK_STATE_MASK 0x00000001L
+#define OTG3_OTG_BLANK_CONTROL__OTG_BLANK_DATA_EN_MASK 0x00000100L
+#define OTG3_OTG_BLANK_CONTROL__OTG_BLANK_DE_MODE_MASK 0x00010000L
+//OTG3_OTG_PIPE_ABORT_CONTROL
+#define OTG3_OTG_PIPE_ABORT_CONTROL__OTG_PIPE_ABORT__SHIFT 0x0
+#define OTG3_OTG_PIPE_ABORT_CONTROL__OTG_PIPE_ABORT_DONE__SHIFT 0x8
+#define OTG3_OTG_PIPE_ABORT_CONTROL__OTG_PIPE_ABORT_MASK 0x00000001L
+#define OTG3_OTG_PIPE_ABORT_CONTROL__OTG_PIPE_ABORT_DONE_MASK 0x00000100L
+//OTG3_OTG_INTERLACE_CONTROL
+#define OTG3_OTG_INTERLACE_CONTROL__OTG_INTERLACE_ENABLE__SHIFT 0x0
+#define OTG3_OTG_INTERLACE_CONTROL__OTG_INTERLACE_FORCE_NEXT_FIELD__SHIFT 0x10
+#define OTG3_OTG_INTERLACE_CONTROL__OTG_INTERLACE_ENABLE_MASK 0x00000001L
+#define OTG3_OTG_INTERLACE_CONTROL__OTG_INTERLACE_FORCE_NEXT_FIELD_MASK 0x00030000L
+//OTG3_OTG_INTERLACE_STATUS
+#define OTG3_OTG_INTERLACE_STATUS__OTG_INTERLACE_CURRENT_FIELD__SHIFT 0x0
+#define OTG3_OTG_INTERLACE_STATUS__OTG_INTERLACE_NEXT_FIELD__SHIFT 0x1
+#define OTG3_OTG_INTERLACE_STATUS__OTG_INTERLACE_CURRENT_FIELD_MASK 0x00000001L
+#define OTG3_OTG_INTERLACE_STATUS__OTG_INTERLACE_NEXT_FIELD_MASK 0x00000002L
+//OTG3_OTG_PIXEL_DATA_READBACK0
+#define OTG3_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_BLUE_CB__SHIFT 0x0
+#define OTG3_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_GREEN_Y__SHIFT 0x10
+#define OTG3_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_BLUE_CB_MASK 0x0000FFFFL
+#define OTG3_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_GREEN_Y_MASK 0xFFFF0000L
+//OTG3_OTG_PIXEL_DATA_READBACK1
+#define OTG3_OTG_PIXEL_DATA_READBACK1__OTG_PIXEL_DATA_RED_CR__SHIFT 0x0
+#define OTG3_OTG_PIXEL_DATA_READBACK1__OTG_PIXEL_DATA_RED_CR_MASK 0x0000FFFFL
+//OTG3_OTG_STATUS
+#define OTG3_OTG_STATUS__OTG_V_BLANK__SHIFT 0x0
+#define OTG3_OTG_STATUS__OTG_V_ACTIVE_DISP__SHIFT 0x1
+#define OTG3_OTG_STATUS__OTG_V_SYNC_A__SHIFT 0x2
+#define OTG3_OTG_STATUS__OTG_V_UPDATE__SHIFT 0x3
+#define OTG3_OTG_STATUS__OTG_V_BLANK_3D_STRUCTURE__SHIFT 0x5
+#define OTG3_OTG_STATUS__OTG_H_BLANK__SHIFT 0x10
+#define OTG3_OTG_STATUS__OTG_H_ACTIVE_DISP__SHIFT 0x11
+#define OTG3_OTG_STATUS__OTG_H_SYNC_A__SHIFT 0x12
+#define OTG3_OTG_STATUS__OTG_V_BLANK_MASK 0x00000001L
+#define OTG3_OTG_STATUS__OTG_V_ACTIVE_DISP_MASK 0x00000002L
+#define OTG3_OTG_STATUS__OTG_V_SYNC_A_MASK 0x00000004L
+#define OTG3_OTG_STATUS__OTG_V_UPDATE_MASK 0x00000008L
+#define OTG3_OTG_STATUS__OTG_V_BLANK_3D_STRUCTURE_MASK 0x00000020L
+#define OTG3_OTG_STATUS__OTG_H_BLANK_MASK 0x00010000L
+#define OTG3_OTG_STATUS__OTG_H_ACTIVE_DISP_MASK 0x00020000L
+#define OTG3_OTG_STATUS__OTG_H_SYNC_A_MASK 0x00040000L
+//OTG3_OTG_STATUS_POSITION
+#define OTG3_OTG_STATUS_POSITION__OTG_VERT_COUNT__SHIFT 0x0
+#define OTG3_OTG_STATUS_POSITION__OTG_HORZ_COUNT__SHIFT 0x10
+#define OTG3_OTG_STATUS_POSITION__OTG_VERT_COUNT_MASK 0x00007FFFL
+#define OTG3_OTG_STATUS_POSITION__OTG_HORZ_COUNT_MASK 0x7FFF0000L
+//OTG3_OTG_NOM_VERT_POSITION
+#define OTG3_OTG_NOM_VERT_POSITION__OTG_VERT_COUNT_NOM__SHIFT 0x0
+#define OTG3_OTG_NOM_VERT_POSITION__OTG_VERT_COUNT_NOM_MASK 0x00007FFFL
+//OTG3_OTG_STATUS_FRAME_COUNT
+#define OTG3_OTG_STATUS_FRAME_COUNT__OTG_FRAME_COUNT__SHIFT 0x0
+#define OTG3_OTG_STATUS_FRAME_COUNT__OTG_FRAME_COUNT_MASK 0x00FFFFFFL
+//OTG3_OTG_STATUS_VF_COUNT
+#define OTG3_OTG_STATUS_VF_COUNT__OTG_VF_COUNT__SHIFT 0x0
+#define OTG3_OTG_STATUS_VF_COUNT__OTG_VF_COUNT_MASK 0x7FFFFFFFL
+//OTG3_OTG_STATUS_HV_COUNT
+#define OTG3_OTG_STATUS_HV_COUNT__OTG_HV_COUNT__SHIFT 0x0
+#define OTG3_OTG_STATUS_HV_COUNT__OTG_HV_COUNT_MASK 0x7FFFFFFFL
+//OTG3_OTG_COUNT_CONTROL
+#define OTG3_OTG_COUNT_CONTROL__OTG_HORZ_COUNT_BY2_EN__SHIFT 0x0
+#define OTG3_OTG_COUNT_CONTROL__OTG_HORZ_REPETITION_COUNT__SHIFT 0x1
+#define OTG3_OTG_COUNT_CONTROL__OTG_HORZ_COUNT_BY2_EN_MASK 0x00000001L
+#define OTG3_OTG_COUNT_CONTROL__OTG_HORZ_REPETITION_COUNT_MASK 0x0000001EL
+//OTG3_OTG_COUNT_RESET
+#define OTG3_OTG_COUNT_RESET__OTG_RESET_FRAME_COUNT__SHIFT 0x0
+#define OTG3_OTG_COUNT_RESET__OTG_RESET_FRAME_COUNT_MASK 0x00000001L
+//OTG3_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE
+#define OTG3_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__SHIFT 0x0
+#define OTG3_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__OTG_MANUAL_FORCE_VSYNC_NEXT_LINE_MASK 0x00000001L
+//OTG3_OTG_VERT_SYNC_CONTROL
+#define OTG3_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED__SHIFT 0x0
+#define OTG3_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_CLEAR__SHIFT 0x8
+#define OTG3_OTG_VERT_SYNC_CONTROL__OTG_AUTO_FORCE_VSYNC_MODE__SHIFT 0x10
+#define OTG3_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED_MASK 0x00000001L
+#define OTG3_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_CLEAR_MASK 0x00000100L
+#define OTG3_OTG_VERT_SYNC_CONTROL__OTG_AUTO_FORCE_VSYNC_MODE_MASK 0x00030000L
+//OTG3_OTG_STEREO_STATUS
+#define OTG3_OTG_STEREO_STATUS__OTG_STEREO_CURRENT_EYE__SHIFT 0x0
+#define OTG3_OTG_STEREO_STATUS__OTG_STEREO_SYNC_OUTPUT__SHIFT 0x8
+#define OTG3_OTG_STEREO_STATUS__OTG_STEREO_SYNC_SELECT__SHIFT 0x10
+#define OTG3_OTG_STEREO_STATUS__OTG_STEREO_EYE_FLAG__SHIFT 0x14
+#define OTG3_OTG_STEREO_STATUS__OTG_STEREO_FORCE_NEXT_EYE_PENDING__SHIFT 0x18
+#define OTG3_OTG_STEREO_STATUS__OTG_CURRENT_3D_STRUCTURE_STATE__SHIFT 0x1e
+#define OTG3_OTG_STEREO_STATUS__OTG_CURRENT_STEREOSYNC_EN_STATE__SHIFT 0x1f
+#define OTG3_OTG_STEREO_STATUS__OTG_STEREO_CURRENT_EYE_MASK 0x00000001L
+#define OTG3_OTG_STEREO_STATUS__OTG_STEREO_SYNC_OUTPUT_MASK 0x00000100L
+#define OTG3_OTG_STEREO_STATUS__OTG_STEREO_SYNC_SELECT_MASK 0x00010000L
+#define OTG3_OTG_STEREO_STATUS__OTG_STEREO_EYE_FLAG_MASK 0x00100000L
+#define OTG3_OTG_STEREO_STATUS__OTG_STEREO_FORCE_NEXT_EYE_PENDING_MASK 0x03000000L
+#define OTG3_OTG_STEREO_STATUS__OTG_CURRENT_3D_STRUCTURE_STATE_MASK 0x40000000L
+#define OTG3_OTG_STEREO_STATUS__OTG_CURRENT_STEREOSYNC_EN_STATE_MASK 0x80000000L
+//OTG3_OTG_STEREO_CONTROL
+#define OTG3_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_LINE_NUM__SHIFT 0x0
+#define OTG3_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_POLARITY__SHIFT 0xf
+#define OTG3_OTG_STEREO_CONTROL__OTG_STEREO_EYE_FLAG_POLARITY__SHIFT 0x11
+#define OTG3_OTG_STEREO_CONTROL__OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP__SHIFT 0x12
+#define OTG3_OTG_STEREO_CONTROL__OTG_DISABLE_FIELD_NUM__SHIFT 0x13
+#define OTG3_OTG_STEREO_CONTROL__OTG_DISABLE_V_BLANK_FOR_DP_FIX__SHIFT 0x14
+#define OTG3_OTG_STEREO_CONTROL__OTG_FIELD_NUM_SEL__SHIFT 0x15
+#define OTG3_OTG_STEREO_CONTROL__OTG_STEREO_EN__SHIFT 0x18
+#define OTG3_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_LINE_NUM_MASK 0x00007FFFL
+#define OTG3_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_POLARITY_MASK 0x00008000L
+#define OTG3_OTG_STEREO_CONTROL__OTG_STEREO_EYE_FLAG_POLARITY_MASK 0x00020000L
+#define OTG3_OTG_STEREO_CONTROL__OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP_MASK 0x00040000L
+#define OTG3_OTG_STEREO_CONTROL__OTG_DISABLE_FIELD_NUM_MASK 0x00080000L
+#define OTG3_OTG_STEREO_CONTROL__OTG_DISABLE_V_BLANK_FOR_DP_FIX_MASK 0x00100000L
+#define OTG3_OTG_STEREO_CONTROL__OTG_FIELD_NUM_SEL_MASK 0x00200000L
+#define OTG3_OTG_STEREO_CONTROL__OTG_STEREO_EN_MASK 0x01000000L
+//OTG3_OTG_SNAPSHOT_STATUS
+#define OTG3_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_OCCURRED__SHIFT 0x0
+#define OTG3_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_CLEAR__SHIFT 0x1
+#define OTG3_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_MANUAL_TRIGGER__SHIFT 0x2
+#define OTG3_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_OCCURRED_MASK 0x00000001L
+#define OTG3_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_CLEAR_MASK 0x00000002L
+#define OTG3_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_MANUAL_TRIGGER_MASK 0x00000004L
+//OTG3_OTG_SNAPSHOT_CONTROL
+#define OTG3_OTG_SNAPSHOT_CONTROL__OTG_AUTO_SNAPSHOT_TRIG_SEL__SHIFT 0x0
+#define OTG3_OTG_SNAPSHOT_CONTROL__OTG_AUTO_SNAPSHOT_TRIG_SEL_MASK 0x00000003L
+//OTG3_OTG_SNAPSHOT_POSITION
+#define OTG3_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_VERT_COUNT__SHIFT 0x0
+#define OTG3_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_HORZ_COUNT__SHIFT 0x10
+#define OTG3_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_VERT_COUNT_MASK 0x00007FFFL
+#define OTG3_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_HORZ_COUNT_MASK 0x7FFF0000L
+//OTG3_OTG_SNAPSHOT_FRAME
+#define OTG3_OTG_SNAPSHOT_FRAME__OTG_SNAPSHOT_FRAME_COUNT__SHIFT 0x0
+#define OTG3_OTG_SNAPSHOT_FRAME__OTG_SNAPSHOT_FRAME_COUNT_MASK 0x00FFFFFFL
+//OTG3_OTG_INTERRUPT_CONTROL
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_MSK__SHIFT 0x0
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_TYPE__SHIFT 0x1
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_MSK__SHIFT 0x8
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_TYPE__SHIFT 0x9
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_MSK__SHIFT 0x10
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_TYPE__SHIFT 0x11
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_MSK__SHIFT 0x18
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_MSK__SHIFT 0x19
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_TYPE__SHIFT 0x1a
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_TYPE__SHIFT 0x1b
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_MSK__SHIFT 0x1c
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_TYPE__SHIFT 0x1d
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_MSK__SHIFT 0x1e
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_TYPE__SHIFT 0x1f
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_MSK_MASK 0x00000001L
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_TYPE_MASK 0x00000002L
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_MSK_MASK 0x00000100L
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_TYPE_MASK 0x00000200L
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_MSK_MASK 0x00010000L
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_TYPE_MASK 0x00020000L
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_MSK_MASK 0x01000000L
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_MSK_MASK 0x02000000L
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_TYPE_MASK 0x04000000L
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_TYPE_MASK 0x08000000L
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_MSK_MASK 0x10000000L
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_TYPE_MASK 0x20000000L
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_MSK_MASK 0x40000000L
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_TYPE_MASK 0x80000000L
+//OTG3_OTG_UPDATE_LOCK
+#define OTG3_OTG_UPDATE_LOCK__OTG_UPDATE_LOCK__SHIFT 0x0
+#define OTG3_OTG_UPDATE_LOCK__OTG_UPDATE_LOCK_MASK 0x00000001L
+//OTG3_OTG_DOUBLE_BUFFER_CONTROL
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_PENDING__SHIFT 0x0
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_H_TIMING_DIV_BY2_DB_UPDATE_PENDING__SHIFT 0x2
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_BLANK_DATA_EN_UPDATE_PENDING__SHIFT 0x3
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_RANGE_TIMING_DBUF_UPDATE_PENDING__SHIFT 0x4
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_TIMING_DB_UPDATE_PENDING__SHIFT 0x5
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_CTRL_DB_UPDATE_PENDING__SHIFT 0x6
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_STRUCTURE_EN_DB_UPDATE_PENDING__SHIFT 0x7
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_INSTANTLY__SHIFT 0x8
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_VSTARTUP_DB_UPDATE_PENDING__SHIFT 0x9
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_DSC_POSITION_DB_UPDATE_PENDING__SHIFT 0xa
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_BLANK_DATA_DOUBLE_BUFFER_EN__SHIFT 0x10
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_RANGE_TIMING_DBUF_UPDATE_MODE__SHIFT 0x18
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_PENDING_MASK 0x00000001L
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_H_TIMING_DIV_BY2_DB_UPDATE_PENDING_MASK 0x00000004L
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_BLANK_DATA_EN_UPDATE_PENDING_MASK 0x00000008L
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_RANGE_TIMING_DBUF_UPDATE_PENDING_MASK 0x00000010L
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_TIMING_DB_UPDATE_PENDING_MASK 0x00000020L
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_CTRL_DB_UPDATE_PENDING_MASK 0x00000040L
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_STRUCTURE_EN_DB_UPDATE_PENDING_MASK 0x00000080L
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_INSTANTLY_MASK 0x00000100L
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_VSTARTUP_DB_UPDATE_PENDING_MASK 0x00000200L
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_DSC_POSITION_DB_UPDATE_PENDING_MASK 0x00000400L
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_BLANK_DATA_DOUBLE_BUFFER_EN_MASK 0x00010000L
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_RANGE_TIMING_DBUF_UPDATE_MODE_MASK 0x03000000L
+//OTG3_OTG_MASTER_EN
+#define OTG3_OTG_MASTER_EN__OTG_MASTER_EN__SHIFT 0x0
+#define OTG3_OTG_MASTER_EN__OTG_MASTER_EN_MASK 0x00000001L
+//OTG3_OTG_BLANK_DATA_COLOR
+#define OTG3_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_BLUE_CB__SHIFT 0x0
+#define OTG3_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_GREEN_Y__SHIFT 0xa
+#define OTG3_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_RED_CR__SHIFT 0x14
+#define OTG3_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_BLUE_CB_MASK 0x000003FFL
+#define OTG3_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_GREEN_Y_MASK 0x000FFC00L
+#define OTG3_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_RED_CR_MASK 0x3FF00000L
+//OTG3_OTG_BLANK_DATA_COLOR_EXT
+#define OTG3_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_BLUE_CB_EXT__SHIFT 0x0
+#define OTG3_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_GREEN_Y_EXT__SHIFT 0x8
+#define OTG3_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_RED_CR_EXT__SHIFT 0x10
+#define OTG3_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_BLUE_CB_EXT_MASK 0x0000003FL
+#define OTG3_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_GREEN_Y_EXT_MASK 0x00003F00L
+#define OTG3_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_RED_CR_EXT_MASK 0x003F0000L
+//OTG3_OTG_BLACK_COLOR
+#define OTG3_OTG_BLACK_COLOR__OTG_BLACK_COLOR_B_CB__SHIFT 0x0
+#define OTG3_OTG_BLACK_COLOR__OTG_BLACK_COLOR_G_Y__SHIFT 0xa
+#define OTG3_OTG_BLACK_COLOR__OTG_BLACK_COLOR_R_CR__SHIFT 0x14
+#define OTG3_OTG_BLACK_COLOR__OTG_BLACK_COLOR_B_CB_MASK 0x000003FFL
+#define OTG3_OTG_BLACK_COLOR__OTG_BLACK_COLOR_G_Y_MASK 0x000FFC00L
+#define OTG3_OTG_BLACK_COLOR__OTG_BLACK_COLOR_R_CR_MASK 0x3FF00000L
+//OTG3_OTG_BLACK_COLOR_EXT
+#define OTG3_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_B_CB_EXT__SHIFT 0x0
+#define OTG3_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_G_Y_EXT__SHIFT 0x8
+#define OTG3_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_R_CR_EXT__SHIFT 0x10
+#define OTG3_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_B_CB_EXT_MASK 0x0000003FL
+#define OTG3_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_G_Y_EXT_MASK 0x00003F00L
+#define OTG3_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_R_CR_EXT_MASK 0x003F0000L
+//OTG3_OTG_VERTICAL_INTERRUPT0_POSITION
+#define OTG3_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_START__SHIFT 0x0
+#define OTG3_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_END__SHIFT 0x10
+#define OTG3_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_START_MASK 0x00007FFFL
+#define OTG3_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_END_MASK 0x7FFF0000L
+//OTG3_OTG_VERTICAL_INTERRUPT0_CONTROL
+#define OTG3_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_OUTPUT_POLARITY__SHIFT 0x4
+#define OTG3_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_ENABLE__SHIFT 0x8
+#define OTG3_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_STATUS__SHIFT 0xc
+#define OTG3_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_STATUS__SHIFT 0x10
+#define OTG3_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_CLEAR__SHIFT 0x14
+#define OTG3_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_TYPE__SHIFT 0x18
+#define OTG3_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_OUTPUT_POLARITY_MASK 0x00000010L
+#define OTG3_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_ENABLE_MASK 0x00000100L
+#define OTG3_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_STATUS_MASK 0x00001000L
+#define OTG3_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_STATUS_MASK 0x00010000L
+#define OTG3_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_CLEAR_MASK 0x00100000L
+#define OTG3_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_TYPE_MASK 0x01000000L
+//OTG3_OTG_VERTICAL_INTERRUPT1_POSITION
+#define OTG3_OTG_VERTICAL_INTERRUPT1_POSITION__OTG_VERTICAL_INTERRUPT1_LINE_START__SHIFT 0x0
+#define OTG3_OTG_VERTICAL_INTERRUPT1_POSITION__OTG_VERTICAL_INTERRUPT1_LINE_START_MASK 0x00007FFFL
+//OTG3_OTG_VERTICAL_INTERRUPT1_CONTROL
+#define OTG3_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_ENABLE__SHIFT 0x8
+#define OTG3_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_STATUS__SHIFT 0xc
+#define OTG3_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_STATUS__SHIFT 0x10
+#define OTG3_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_CLEAR__SHIFT 0x14
+#define OTG3_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_TYPE__SHIFT 0x18
+#define OTG3_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_ENABLE_MASK 0x00000100L
+#define OTG3_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_STATUS_MASK 0x00001000L
+#define OTG3_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_STATUS_MASK 0x00010000L
+#define OTG3_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_CLEAR_MASK 0x00100000L
+#define OTG3_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_TYPE_MASK 0x01000000L
+//OTG3_OTG_VERTICAL_INTERRUPT2_POSITION
+#define OTG3_OTG_VERTICAL_INTERRUPT2_POSITION__OTG_VERTICAL_INTERRUPT2_LINE_START__SHIFT 0x0
+#define OTG3_OTG_VERTICAL_INTERRUPT2_POSITION__OTG_VERTICAL_INTERRUPT2_LINE_START_MASK 0x00007FFFL
+//OTG3_OTG_VERTICAL_INTERRUPT2_CONTROL
+#define OTG3_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_ENABLE__SHIFT 0x8
+#define OTG3_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_STATUS__SHIFT 0xc
+#define OTG3_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_STATUS__SHIFT 0x10
+#define OTG3_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_CLEAR__SHIFT 0x14
+#define OTG3_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_TYPE__SHIFT 0x18
+#define OTG3_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_ENABLE_MASK 0x00000100L
+#define OTG3_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_STATUS_MASK 0x00001000L
+#define OTG3_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_STATUS_MASK 0x00010000L
+#define OTG3_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_CLEAR_MASK 0x00100000L
+#define OTG3_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_TYPE_MASK 0x01000000L
+//OTG3_OTG_CRC_CNTL
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_EN__SHIFT 0x0
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_DUAL_LINK_EN__SHIFT 0x1
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_DUAL_LINK_MODE__SHIFT 0x2
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_BLANK_ONLY__SHIFT 0x3
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_CONT_EN__SHIFT 0x4
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_CAPTURE_START_SEL__SHIFT 0x5
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_STEREO_MODE__SHIFT 0x8
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_INTERLACE_MODE__SHIFT 0xc
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_USE_NEW_AND_REPEATED_PIXELS__SHIFT 0x13
+#define OTG3_OTG_CRC_CNTL__OTG_CRC0_SELECT__SHIFT 0x14
+#define OTG3_OTG_CRC_CNTL__OTG_CRC1_SELECT__SHIFT 0x18
+#define OTG3_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC0_PENDING__SHIFT 0x1c
+#define OTG3_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC1_PENDING__SHIFT 0x1d
+#define OTG3_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC2_PENDING__SHIFT 0x1e
+#define OTG3_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC3_PENDING__SHIFT 0x1f
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_EN_MASK 0x00000001L
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_DUAL_LINK_EN_MASK 0x00000002L
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_DUAL_LINK_MODE_MASK 0x00000004L
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_BLANK_ONLY_MASK 0x00000008L
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_CONT_EN_MASK 0x00000010L
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_CAPTURE_START_SEL_MASK 0x00000060L
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_STEREO_MODE_MASK 0x00000300L
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_INTERLACE_MODE_MASK 0x00003000L
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_USE_NEW_AND_REPEATED_PIXELS_MASK 0x00080000L
+#define OTG3_OTG_CRC_CNTL__OTG_CRC0_SELECT_MASK 0x00700000L
+#define OTG3_OTG_CRC_CNTL__OTG_CRC1_SELECT_MASK 0x07000000L
+#define OTG3_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC0_PENDING_MASK 0x10000000L
+#define OTG3_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC1_PENDING_MASK 0x20000000L
+#define OTG3_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC2_PENDING_MASK 0x40000000L
+#define OTG3_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC3_PENDING_MASK 0x80000000L
+//OTG3_OTG_CRC_CNTL2
+#define OTG3_OTG_CRC_CNTL2__OTG_CRC_DSC_MODE__SHIFT 0x0
+#define OTG3_OTG_CRC_CNTL2__OTG_CRC_DATA_STREAM_COMBINE_MODE__SHIFT 0x1
+#define OTG3_OTG_CRC_CNTL2__OTG_CRC_DATA_STREAM_SPLIT_MODE__SHIFT 0x4
+#define OTG3_OTG_CRC_CNTL2__OTG_CRC_DATA_FORMAT__SHIFT 0x8
+#define OTG3_OTG_CRC_CNTL2__OTG_CRC_DSC_MODE_MASK 0x00000001L
+#define OTG3_OTG_CRC_CNTL2__OTG_CRC_DATA_STREAM_COMBINE_MODE_MASK 0x00000002L
+#define OTG3_OTG_CRC_CNTL2__OTG_CRC_DATA_STREAM_SPLIT_MODE_MASK 0x00000030L
+#define OTG3_OTG_CRC_CNTL2__OTG_CRC_DATA_FORMAT_MASK 0x00000300L
+//OTG3_OTG_CRC0_WINDOWA_X_CONTROL
+#define OTG3_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_START__SHIFT 0x0
+#define OTG3_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_END__SHIFT 0x10
+#define OTG3_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_START_MASK 0x00007FFFL
+#define OTG3_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_END_MASK 0x7FFF0000L
+//OTG3_OTG_CRC0_WINDOWA_Y_CONTROL
+#define OTG3_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_START__SHIFT 0x0
+#define OTG3_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_END__SHIFT 0x10
+#define OTG3_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_START_MASK 0x00007FFFL
+#define OTG3_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_END_MASK 0x7FFF0000L
+//OTG3_OTG_CRC0_WINDOWB_X_CONTROL
+#define OTG3_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_START__SHIFT 0x0
+#define OTG3_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_END__SHIFT 0x10
+#define OTG3_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_START_MASK 0x00007FFFL
+#define OTG3_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_END_MASK 0x7FFF0000L
+//OTG3_OTG_CRC0_WINDOWB_Y_CONTROL
+#define OTG3_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_START__SHIFT 0x0
+#define OTG3_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_END__SHIFT 0x10
+#define OTG3_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_START_MASK 0x00007FFFL
+#define OTG3_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_END_MASK 0x7FFF0000L
+//OTG3_OTG_CRC0_DATA_RG
+#define OTG3_OTG_CRC0_DATA_RG__CRC0_R_CR__SHIFT 0x0
+#define OTG3_OTG_CRC0_DATA_RG__CRC0_G_Y__SHIFT 0x10
+#define OTG3_OTG_CRC0_DATA_RG__CRC0_R_CR_MASK 0x0000FFFFL
+#define OTG3_OTG_CRC0_DATA_RG__CRC0_G_Y_MASK 0xFFFF0000L
+//OTG3_OTG_CRC0_DATA_B
+#define OTG3_OTG_CRC0_DATA_B__CRC0_B_CB__SHIFT 0x0
+#define OTG3_OTG_CRC0_DATA_B__CRC0_C__SHIFT 0x10
+#define OTG3_OTG_CRC0_DATA_B__CRC0_B_CB_MASK 0x0000FFFFL
+#define OTG3_OTG_CRC0_DATA_B__CRC0_C_MASK 0xFFFF0000L
+//OTG3_OTG_CRC1_WINDOWA_X_CONTROL
+#define OTG3_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_START__SHIFT 0x0
+#define OTG3_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_END__SHIFT 0x10
+#define OTG3_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_START_MASK 0x00007FFFL
+#define OTG3_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_END_MASK 0x7FFF0000L
+//OTG3_OTG_CRC1_WINDOWA_Y_CONTROL
+#define OTG3_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_START__SHIFT 0x0
+#define OTG3_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_END__SHIFT 0x10
+#define OTG3_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_START_MASK 0x00007FFFL
+#define OTG3_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_END_MASK 0x7FFF0000L
+//OTG3_OTG_CRC1_WINDOWB_X_CONTROL
+#define OTG3_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_START__SHIFT 0x0
+#define OTG3_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_END__SHIFT 0x10
+#define OTG3_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_START_MASK 0x00007FFFL
+#define OTG3_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_END_MASK 0x7FFF0000L
+//OTG3_OTG_CRC1_WINDOWB_Y_CONTROL
+#define OTG3_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_START__SHIFT 0x0
+#define OTG3_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_END__SHIFT 0x10
+#define OTG3_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_START_MASK 0x00007FFFL
+#define OTG3_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_END_MASK 0x7FFF0000L
+//OTG3_OTG_CRC1_DATA_RG
+#define OTG3_OTG_CRC1_DATA_RG__CRC1_R_CR__SHIFT 0x0
+#define OTG3_OTG_CRC1_DATA_RG__CRC1_G_Y__SHIFT 0x10
+#define OTG3_OTG_CRC1_DATA_RG__CRC1_R_CR_MASK 0x0000FFFFL
+#define OTG3_OTG_CRC1_DATA_RG__CRC1_G_Y_MASK 0xFFFF0000L
+//OTG3_OTG_CRC1_DATA_B
+#define OTG3_OTG_CRC1_DATA_B__CRC1_B_CB__SHIFT 0x0
+#define OTG3_OTG_CRC1_DATA_B__CRC1_C__SHIFT 0x10
+#define OTG3_OTG_CRC1_DATA_B__CRC1_B_CB_MASK 0x0000FFFFL
+#define OTG3_OTG_CRC1_DATA_B__CRC1_C_MASK 0xFFFF0000L
+//OTG3_OTG_CRC2_DATA_RG
+#define OTG3_OTG_CRC2_DATA_RG__CRC2_R_CR__SHIFT 0x0
+#define OTG3_OTG_CRC2_DATA_RG__CRC2_G_Y__SHIFT 0x10
+#define OTG3_OTG_CRC2_DATA_RG__CRC2_R_CR_MASK 0x0000FFFFL
+#define OTG3_OTG_CRC2_DATA_RG__CRC2_G_Y_MASK 0xFFFF0000L
+//OTG3_OTG_CRC2_DATA_B
+#define OTG3_OTG_CRC2_DATA_B__CRC2_B_CB__SHIFT 0x0
+#define OTG3_OTG_CRC2_DATA_B__CRC2_C__SHIFT 0x10
+#define OTG3_OTG_CRC2_DATA_B__CRC2_B_CB_MASK 0x0000FFFFL
+#define OTG3_OTG_CRC2_DATA_B__CRC2_C_MASK 0xFFFF0000L
+//OTG3_OTG_CRC3_DATA_RG
+#define OTG3_OTG_CRC3_DATA_RG__CRC3_R_CR__SHIFT 0x0
+#define OTG3_OTG_CRC3_DATA_RG__CRC3_G_Y__SHIFT 0x10
+#define OTG3_OTG_CRC3_DATA_RG__CRC3_R_CR_MASK 0x0000FFFFL
+#define OTG3_OTG_CRC3_DATA_RG__CRC3_G_Y_MASK 0xFFFF0000L
+//OTG3_OTG_CRC3_DATA_B
+#define OTG3_OTG_CRC3_DATA_B__CRC3_B_CB__SHIFT 0x0
+#define OTG3_OTG_CRC3_DATA_B__CRC3_C__SHIFT 0x10
+#define OTG3_OTG_CRC3_DATA_B__CRC3_B_CB_MASK 0x0000FFFFL
+#define OTG3_OTG_CRC3_DATA_B__CRC3_C_MASK 0xFFFF0000L
+//OTG3_OTG_CRC_SIG_RED_GREEN_MASK
+#define OTG3_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_RED_MASK__SHIFT 0x0
+#define OTG3_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_GREEN_MASK__SHIFT 0x10
+#define OTG3_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_RED_MASK_MASK 0x0000FFFFL
+#define OTG3_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_GREEN_MASK_MASK 0xFFFF0000L
+//OTG3_OTG_CRC_SIG_BLUE_CONTROL_MASK
+#define OTG3_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_BLUE_MASK__SHIFT 0x0
+#define OTG3_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_CONTROL_MASK__SHIFT 0x10
+#define OTG3_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_BLUE_MASK_MASK 0x0000FFFFL
+#define OTG3_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_CONTROL_MASK_MASK 0xFFFF0000L
+//OTG3_OTG_STATIC_SCREEN_CONTROL
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_EVENT_MASK__SHIFT 0x0
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_FRAME_COUNT__SHIFT 0x10
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_ENABLE__SHIFT 0x18
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_SS_STATUS__SHIFT 0x19
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_STATUS__SHIFT 0x1a
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_CLEAR__SHIFT 0x1b
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_TYPE__SHIFT 0x1c
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE__SHIFT 0x1e
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_VALUE__SHIFT 0x1f
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_EVENT_MASK_MASK 0x0000FFFFL
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_FRAME_COUNT_MASK 0x00FF0000L
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_ENABLE_MASK 0x01000000L
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_SS_STATUS_MASK 0x02000000L
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_STATUS_MASK 0x04000000L
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_CLEAR_MASK 0x08000000L
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_TYPE_MASK 0x10000000L
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_MASK 0x40000000L
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_VALUE_MASK 0x80000000L
+//OTG3_OTG_3D_STRUCTURE_CONTROL
+#define OTG3_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_EN__SHIFT 0x0
+#define OTG3_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_V_UPDATE_MODE__SHIFT 0x8
+#define OTG3_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_STEREO_SEL_OVR__SHIFT 0xc
+#define OTG3_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET__SHIFT 0x10
+#define OTG3_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_PENDING__SHIFT 0x11
+#define OTG3_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT__SHIFT 0x12
+#define OTG3_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_EN_MASK 0x00000001L
+#define OTG3_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_V_UPDATE_MODE_MASK 0x00000300L
+#define OTG3_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_STEREO_SEL_OVR_MASK 0x00001000L
+#define OTG3_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_MASK 0x00010000L
+#define OTG3_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_PENDING_MASK 0x00020000L
+#define OTG3_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_MASK 0x000C0000L
+//OTG3_OTG_GSL_VSYNC_GAP
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_LIMIT__SHIFT 0x0
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_DELAY__SHIFT 0x8
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_SOURCE_SEL__SHIFT 0x10
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MODE__SHIFT 0x11
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_CLEAR__SHIFT 0x13
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_OCCURRED__SHIFT 0x14
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASTER_FASTER__SHIFT 0x17
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP__SHIFT 0x18
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_LIMIT_MASK 0x000000FFL
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_DELAY_MASK 0x0000FF00L
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_SOURCE_SEL_MASK 0x00010000L
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MODE_MASK 0x00060000L
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_CLEAR_MASK 0x00080000L
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_OCCURRED_MASK 0x00100000L
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASTER_FASTER_MASK 0x00800000L
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASK 0xFF000000L
+//OTG3_OTG_MASTER_UPDATE_MODE
+#define OTG3_OTG_MASTER_UPDATE_MODE__MASTER_UPDATE_INTERLACED_MODE__SHIFT 0x0
+#define OTG3_OTG_MASTER_UPDATE_MODE__MASTER_UPDATE_INTERLACED_MODE_MASK 0x00000003L
+//OTG3_OTG_CLOCK_CONTROL
+#define OTG3_OTG_CLOCK_CONTROL__OTG_CLOCK_EN__SHIFT 0x0
+#define OTG3_OTG_CLOCK_CONTROL__OTG_CLOCK_GATE_DIS__SHIFT 0x1
+#define OTG3_OTG_CLOCK_CONTROL__OTG_SOFT_RESET__SHIFT 0x4
+#define OTG3_OTG_CLOCK_CONTROL__OTG_CLOCK_ON__SHIFT 0x8
+#define OTG3_OTG_CLOCK_CONTROL__OTG_BUSY__SHIFT 0x10
+#define OTG3_OTG_CLOCK_CONTROL__OTG_CLOCK_EN_MASK 0x00000001L
+#define OTG3_OTG_CLOCK_CONTROL__OTG_CLOCK_GATE_DIS_MASK 0x00000002L
+#define OTG3_OTG_CLOCK_CONTROL__OTG_SOFT_RESET_MASK 0x00000010L
+#define OTG3_OTG_CLOCK_CONTROL__OTG_CLOCK_ON_MASK 0x00000100L
+#define OTG3_OTG_CLOCK_CONTROL__OTG_BUSY_MASK 0x00010000L
+//OTG3_OTG_VSTARTUP_PARAM
+#define OTG3_OTG_VSTARTUP_PARAM__VSTARTUP_START__SHIFT 0x0
+#define OTG3_OTG_VSTARTUP_PARAM__VSTARTUP_START_MASK 0x000003FFL
+//OTG3_OTG_VUPDATE_PARAM
+#define OTG3_OTG_VUPDATE_PARAM__VUPDATE_OFFSET__SHIFT 0x0
+#define OTG3_OTG_VUPDATE_PARAM__VUPDATE_WIDTH__SHIFT 0x10
+#define OTG3_OTG_VUPDATE_PARAM__VUPDATE_OFFSET_MASK 0x0000FFFFL
+#define OTG3_OTG_VUPDATE_PARAM__VUPDATE_WIDTH_MASK 0x03FF0000L
+//OTG3_OTG_VREADY_PARAM
+#define OTG3_OTG_VREADY_PARAM__VREADY_OFFSET__SHIFT 0x0
+#define OTG3_OTG_VREADY_PARAM__VREADY_OFFSET_MASK 0x0000FFFFL
+//OTG3_OTG_GLOBAL_SYNC_STATUS
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_EN__SHIFT 0x0
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_TYPE__SHIFT 0x1
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_OCCURRED__SHIFT 0x2
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_STATUS__SHIFT 0x3
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_CLEAR__SHIFT 0x4
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_EN__SHIFT 0x5
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_TYPE__SHIFT 0x6
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_POSITION_SEL__SHIFT 0x7
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_OCCURRED__SHIFT 0x8
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_STATUS__SHIFT 0x9
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_CLEAR__SHIFT 0xa
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_STATUS__SHIFT 0xb
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_EN__SHIFT 0xc
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_TYPE__SHIFT 0xd
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_OCCURRED__SHIFT 0xe
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_STATUS__SHIFT 0xf
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_CLEAR__SHIFT 0x10
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_STATUS__SHIFT 0x11
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_EN__SHIFT 0x12
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_TYPE__SHIFT 0x13
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_OCCURRED__SHIFT 0x14
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_STATUS__SHIFT 0x15
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_CLEAR__SHIFT 0x16
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__STEREO_SELECT_STATUS__SHIFT 0x18
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__FIELD_NUMBER_STATUS__SHIFT 0x19
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_EN_MASK 0x00000001L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_TYPE_MASK 0x00000002L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_OCCURRED_MASK 0x00000004L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_STATUS_MASK 0x00000008L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_CLEAR_MASK 0x00000010L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_EN_MASK 0x00000020L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_TYPE_MASK 0x00000040L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_POSITION_SEL_MASK 0x00000080L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_OCCURRED_MASK 0x00000100L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_STATUS_MASK 0x00000200L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_CLEAR_MASK 0x00000400L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_STATUS_MASK 0x00000800L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_EN_MASK 0x00001000L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_TYPE_MASK 0x00002000L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_OCCURRED_MASK 0x00004000L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_STATUS_MASK 0x00008000L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_CLEAR_MASK 0x00010000L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_STATUS_MASK 0x00020000L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_EN_MASK 0x00040000L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_TYPE_MASK 0x00080000L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_OCCURRED_MASK 0x00100000L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_STATUS_MASK 0x00200000L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_CLEAR_MASK 0x00400000L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__STEREO_SELECT_STATUS_MASK 0x01000000L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__FIELD_NUMBER_STATUS_MASK 0x02000000L
+//OTG3_OTG_MASTER_UPDATE_LOCK
+#define OTG3_OTG_MASTER_UPDATE_LOCK__OTG_MASTER_UPDATE_LOCK__SHIFT 0x0
+#define OTG3_OTG_MASTER_UPDATE_LOCK__UPDATE_LOCK_STATUS__SHIFT 0x8
+#define OTG3_OTG_MASTER_UPDATE_LOCK__OTG_MASTER_UPDATE_LOCK_MASK 0x00000001L
+#define OTG3_OTG_MASTER_UPDATE_LOCK__UPDATE_LOCK_STATUS_MASK 0x00000100L
+//OTG3_OTG_GSL_CONTROL
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL0_EN__SHIFT 0x0
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL1_EN__SHIFT 0x1
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL2_EN__SHIFT 0x2
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL_MASTER_EN__SHIFT 0x3
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL_MASTER_MODE__SHIFT 0x4
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL_CHECK_DELAY__SHIFT 0x8
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL_FORCE_DELAY__SHIFT 0x10
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL_CHECK_ALL_FIELDS__SHIFT 0x1c
+#define OTG3_OTG_GSL_CONTROL__OTG_MASTER_UPDATE_LOCK_GSL_EN__SHIFT 0x1f
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL0_EN_MASK 0x00000001L
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL1_EN_MASK 0x00000002L
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL2_EN_MASK 0x00000004L
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL_MASTER_EN_MASK 0x00000008L
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL_MASTER_MODE_MASK 0x00000030L
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL_CHECK_DELAY_MASK 0x00000F00L
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL_FORCE_DELAY_MASK 0x001F0000L
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL_CHECK_ALL_FIELDS_MASK 0x10000000L
+#define OTG3_OTG_GSL_CONTROL__OTG_MASTER_UPDATE_LOCK_GSL_EN_MASK 0x80000000L
+//OTG3_OTG_GSL_WINDOW_X
+#define OTG3_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_START_X__SHIFT 0x0
+#define OTG3_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_END_X__SHIFT 0x10
+#define OTG3_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_START_X_MASK 0x00007FFFL
+#define OTG3_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_END_X_MASK 0x7FFF0000L
+//OTG3_OTG_GSL_WINDOW_Y
+#define OTG3_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_START_Y__SHIFT 0x0
+#define OTG3_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_END_Y__SHIFT 0x10
+#define OTG3_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_START_Y_MASK 0x00007FFFL
+#define OTG3_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_END_Y_MASK 0x7FFF0000L
+//OTG3_OTG_VUPDATE_KEEPOUT
+#define OTG3_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET__SHIFT 0x0
+#define OTG3_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET__SHIFT 0x10
+#define OTG3_OTG_VUPDATE_KEEPOUT__OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN__SHIFT 0x1f
+#define OTG3_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET_MASK 0x0000FFFFL
+#define OTG3_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET_MASK 0x03FF0000L
+#define OTG3_OTG_VUPDATE_KEEPOUT__OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN_MASK 0x80000000L
+//OTG3_OTG_GLOBAL_CONTROL0
+#define OTG3_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_HTOTAL_KEEPOUT__SHIFT 0x0
+#define OTG3_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_HTOTAL_KEEPOUT_EN__SHIFT 0x8
+#define OTG3_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_SEL__SHIFT 0x19
+#define OTG3_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_HTOTAL_KEEPOUT_MASK 0x000000FFL
+#define OTG3_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_HTOTAL_KEEPOUT_EN_MASK 0x00000100L
+#define OTG3_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_SEL_MASK 0x0E000000L
+//OTG3_OTG_GLOBAL_CONTROL1
+#define OTG3_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_X__SHIFT 0x0
+#define OTG3_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_Y__SHIFT 0x10
+#define OTG3_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_EN__SHIFT 0x1f
+#define OTG3_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_X_MASK 0x00007FFFL
+#define OTG3_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_Y_MASK 0x7FFF0000L
+#define OTG3_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_EN_MASK 0x80000000L
+//OTG3_OTG_GLOBAL_CONTROL2
+#define OTG3_OTG_GLOBAL_CONTROL2__DIG_UPDATE_LOCATION__SHIFT 0x0
+#define OTG3_OTG_GLOBAL_CONTROL2__GLOBAL_UPDATE_LOCK_EN__SHIFT 0xa
+#define OTG3_OTG_GLOBAL_CONTROL2__MANUAL_FLOW_CONTROL_SEL__SHIFT 0x10
+#define OTG3_OTG_GLOBAL_CONTROL2__MASTER_UPDATE_LOCK_WINDOW_SEL__SHIFT 0x1d
+#define OTG3_OTG_GLOBAL_CONTROL2__OTG_VUPDATE_BLOCK_DISABLE__SHIFT 0x1e
+#define OTG3_OTG_GLOBAL_CONTROL2__DCCG_VUPDATE_MODE__SHIFT 0x1f
+#define OTG3_OTG_GLOBAL_CONTROL2__DIG_UPDATE_LOCATION_MASK 0x000003FFL
+#define OTG3_OTG_GLOBAL_CONTROL2__GLOBAL_UPDATE_LOCK_EN_MASK 0x00000400L
+#define OTG3_OTG_GLOBAL_CONTROL2__MANUAL_FLOW_CONTROL_SEL_MASK 0x00070000L
+#define OTG3_OTG_GLOBAL_CONTROL2__MASTER_UPDATE_LOCK_WINDOW_SEL_MASK 0x20000000L
+#define OTG3_OTG_GLOBAL_CONTROL2__OTG_VUPDATE_BLOCK_DISABLE_MASK 0x40000000L
+#define OTG3_OTG_GLOBAL_CONTROL2__DCCG_VUPDATE_MODE_MASK 0x80000000L
+//OTG3_OTG_GLOBAL_CONTROL3
+#define OTG3_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD__SHIFT 0x0
+#define OTG3_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_STEREO_SEL__SHIFT 0x4
+#define OTG3_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD_STEREO_FLAG_SEL__SHIFT 0x8
+#define OTG3_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD_MASK 0x00000003L
+#define OTG3_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_STEREO_SEL_MASK 0x00000030L
+#define OTG3_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD_STEREO_FLAG_SEL_MASK 0x00000100L
+//OTG3_OTG_TRIG_MANUAL_CONTROL
+#define OTG3_OTG_TRIG_MANUAL_CONTROL__TRIG_MANUAL_CONTROL__SHIFT 0x0
+#define OTG3_OTG_TRIG_MANUAL_CONTROL__TRIG_MANUAL_CONTROL_MASK 0x00000001L
+//OTG3_OTG_MANUAL_FLOW_CONTROL
+#define OTG3_OTG_MANUAL_FLOW_CONTROL__MANUAL_FLOW_CONTROL__SHIFT 0x0
+#define OTG3_OTG_MANUAL_FLOW_CONTROL__MANUAL_FLOW_CONTROL_MASK 0x00000001L
+//OTG3_OTG_RANGE_TIMING_INT_STATUS
+#define OTG3_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED__SHIFT 0x0
+#define OTG3_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT__SHIFT 0x4
+#define OTG3_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_CLEAR__SHIFT 0x8
+#define OTG3_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT_MSK__SHIFT 0xc
+#define OTG3_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT_TYPE__SHIFT 0x10
+#define OTG3_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_MASK 0x00000001L
+#define OTG3_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT_MASK 0x00000010L
+#define OTG3_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_CLEAR_MASK 0x00000100L
+#define OTG3_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT_MSK_MASK 0x00001000L
+#define OTG3_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT_TYPE_MASK 0x00010000L
+//OTG3_OTG_DRR_CONTROL
+#define OTG3_OTG_DRR_CONTROL__OTG_DRR_AVERAGE_FRAME__SHIFT 0x0
+#define OTG3_OTG_DRR_CONTROL__OTG_V_TOTAL_LAST_USED_BY_DRR__SHIFT 0x10
+#define OTG3_OTG_DRR_CONTROL__OTG_DRR_AVERAGE_FRAME_MASK 0x00000007L
+#define OTG3_OTG_DRR_CONTROL__OTG_V_TOTAL_LAST_USED_BY_DRR_MASK 0x7FFF0000L
+//OTG3_OTG_REQUEST_CONTROL
+#define OTG3_OTG_REQUEST_CONTROL__OTG_REQUEST_MODE_FOR_H_DUPLICATE__SHIFT 0x0
+#define OTG3_OTG_REQUEST_CONTROL__OTG_REQUEST_MODE_FOR_H_DUPLICATE_MASK 0x00000001L
+//OTG3_OTG_DSC_START_POSITION
+#define OTG3_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_X__SHIFT 0x0
+#define OTG3_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_LINE_NUM__SHIFT 0x10
+#define OTG3_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_X_MASK 0x00007FFFL
+#define OTG3_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_LINE_NUM_MASK 0x03FF0000L
+//OTG3_OTG_PIPE_UPDATE_STATUS
+#define OTG3_OTG_PIPE_UPDATE_STATUS__OTG_FLIP_PENDING__SHIFT 0x0
+#define OTG3_OTG_PIPE_UPDATE_STATUS__OTG_FLIP_TAKEN__SHIFT 0x1
+#define OTG3_OTG_PIPE_UPDATE_STATUS__OTG_FLIP_TAKEN_CLEAR__SHIFT 0x2
+#define OTG3_OTG_PIPE_UPDATE_STATUS__OTG_DC_REG_UPDATE_PENDING__SHIFT 0x4
+#define OTG3_OTG_PIPE_UPDATE_STATUS__OTG_DC_REG_UPDATE_TAKEN__SHIFT 0x5
+#define OTG3_OTG_PIPE_UPDATE_STATUS__OTG_DC_REG_UPDATE_TAKEN_CLEAR__SHIFT 0x6
+#define OTG3_OTG_PIPE_UPDATE_STATUS__OTG_CURSOR_UPDATE_PENDING__SHIFT 0x8
+#define OTG3_OTG_PIPE_UPDATE_STATUS__OTG_CURSOR_UPDATE_TAKEN__SHIFT 0x9
+#define OTG3_OTG_PIPE_UPDATE_STATUS__OTG_CURSOR_UPDATE_TAKEN_CLEAR__SHIFT 0xa
+#define OTG3_OTG_PIPE_UPDATE_STATUS__OTG_VUPDATE_KEEPOUT_STATUS__SHIFT 0x10
+#define OTG3_OTG_PIPE_UPDATE_STATUS__OTG_FLIP_PENDING_MASK 0x00000001L
+#define OTG3_OTG_PIPE_UPDATE_STATUS__OTG_FLIP_TAKEN_MASK 0x00000002L
+#define OTG3_OTG_PIPE_UPDATE_STATUS__OTG_FLIP_TAKEN_CLEAR_MASK 0x00000004L
+#define OTG3_OTG_PIPE_UPDATE_STATUS__OTG_DC_REG_UPDATE_PENDING_MASK 0x00000010L
+#define OTG3_OTG_PIPE_UPDATE_STATUS__OTG_DC_REG_UPDATE_TAKEN_MASK 0x00000020L
+#define OTG3_OTG_PIPE_UPDATE_STATUS__OTG_DC_REG_UPDATE_TAKEN_CLEAR_MASK 0x00000040L
+#define OTG3_OTG_PIPE_UPDATE_STATUS__OTG_CURSOR_UPDATE_PENDING_MASK 0x00000100L
+#define OTG3_OTG_PIPE_UPDATE_STATUS__OTG_CURSOR_UPDATE_TAKEN_MASK 0x00000200L
+#define OTG3_OTG_PIPE_UPDATE_STATUS__OTG_CURSOR_UPDATE_TAKEN_CLEAR_MASK 0x00000400L
+#define OTG3_OTG_PIPE_UPDATE_STATUS__OTG_VUPDATE_KEEPOUT_STATUS_MASK 0x00010000L
+//OTG3_OTG_SPARE_REGISTER
+#define OTG3_OTG_SPARE_REGISTER__OTG_SPARE_REG__SHIFT 0x0
+#define OTG3_OTG_SPARE_REGISTER__OTG_SPARE_REG_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_optc_otg4_dispdec
+//OTG4_OTG_H_TOTAL
+#define OTG4_OTG_H_TOTAL__OTG_H_TOTAL__SHIFT 0x0
+#define OTG4_OTG_H_TOTAL__OTG_H_TOTAL_MASK 0x00007FFFL
+//OTG4_OTG_H_BLANK_START_END
+#define OTG4_OTG_H_BLANK_START_END__OTG_H_BLANK_START__SHIFT 0x0
+#define OTG4_OTG_H_BLANK_START_END__OTG_H_BLANK_END__SHIFT 0x10
+#define OTG4_OTG_H_BLANK_START_END__OTG_H_BLANK_START_MASK 0x00007FFFL
+#define OTG4_OTG_H_BLANK_START_END__OTG_H_BLANK_END_MASK 0x7FFF0000L
+//OTG4_OTG_H_SYNC_A
+#define OTG4_OTG_H_SYNC_A__OTG_H_SYNC_A_START__SHIFT 0x0
+#define OTG4_OTG_H_SYNC_A__OTG_H_SYNC_A_END__SHIFT 0x10
+#define OTG4_OTG_H_SYNC_A__OTG_H_SYNC_A_START_MASK 0x00007FFFL
+#define OTG4_OTG_H_SYNC_A__OTG_H_SYNC_A_END_MASK 0x7FFF0000L
+//OTG4_OTG_H_SYNC_A_CNTL
+#define OTG4_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_POL__SHIFT 0x0
+#define OTG4_OTG_H_SYNC_A_CNTL__OTG_COMP_SYNC_A_EN__SHIFT 0x10
+#define OTG4_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_CUTOFF__SHIFT 0x11
+#define OTG4_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_POL_MASK 0x00000001L
+#define OTG4_OTG_H_SYNC_A_CNTL__OTG_COMP_SYNC_A_EN_MASK 0x00010000L
+#define OTG4_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_CUTOFF_MASK 0x00020000L
+//OTG4_OTG_H_TIMING_CNTL
+#define OTG4_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_BY2__SHIFT 0x0
+#define OTG4_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_BY2_UPDATE_MODE__SHIFT 0x8
+#define OTG4_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_BY2_MASK 0x00000001L
+#define OTG4_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_BY2_UPDATE_MODE_MASK 0x00000100L
+//OTG4_OTG_V_TOTAL
+#define OTG4_OTG_V_TOTAL__OTG_V_TOTAL__SHIFT 0x0
+#define OTG4_OTG_V_TOTAL__OTG_V_TOTAL_MASK 0x00007FFFL
+//OTG4_OTG_V_TOTAL_MIN
+#define OTG4_OTG_V_TOTAL_MIN__OTG_V_TOTAL_MIN__SHIFT 0x0
+#define OTG4_OTG_V_TOTAL_MIN__OTG_V_TOTAL_MIN_MASK 0x00007FFFL
+//OTG4_OTG_V_TOTAL_MAX
+#define OTG4_OTG_V_TOTAL_MAX__OTG_V_TOTAL_MAX__SHIFT 0x0
+#define OTG4_OTG_V_TOTAL_MAX__OTG_V_TOTAL_MAX_MASK 0x00007FFFL
+//OTG4_OTG_V_TOTAL_MID
+#define OTG4_OTG_V_TOTAL_MID__OTG_V_TOTAL_MID__SHIFT 0x0
+#define OTG4_OTG_V_TOTAL_MID__OTG_V_TOTAL_MID_MASK 0x00007FFFL
+//OTG4_OTG_V_TOTAL_CONTROL
+#define OTG4_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MIN_SEL__SHIFT 0x0
+#define OTG4_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MAX_SEL__SHIFT 0x1
+#define OTG4_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MAX_EN__SHIFT 0x2
+#define OTG4_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MIN_EN__SHIFT 0x3
+#define OTG4_OTG_V_TOTAL_CONTROL__OTG_FORCE_LOCK_ON_EVENT__SHIFT 0x4
+#define OTG4_OTG_V_TOTAL_CONTROL__OTG_DRR_EVENT_ACTIVE_PERIOD__SHIFT 0x5
+#define OTG4_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK_EN__SHIFT 0x7
+#define OTG4_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_FRAME_NUM__SHIFT 0x8
+#define OTG4_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK__SHIFT 0x10
+#define OTG4_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MIN_SEL_MASK 0x00000001L
+#define OTG4_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MAX_SEL_MASK 0x00000002L
+#define OTG4_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MAX_EN_MASK 0x00000004L
+#define OTG4_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MIN_EN_MASK 0x00000008L
+#define OTG4_OTG_V_TOTAL_CONTROL__OTG_FORCE_LOCK_ON_EVENT_MASK 0x00000010L
+#define OTG4_OTG_V_TOTAL_CONTROL__OTG_DRR_EVENT_ACTIVE_PERIOD_MASK 0x00000020L
+#define OTG4_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK_EN_MASK 0x00000080L
+#define OTG4_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_FRAME_NUM_MASK 0x0000FF00L
+#define OTG4_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK_MASK 0xFFFF0000L
+//OTG4_OTG_V_TOTAL_INT_STATUS
+#define OTG4_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED__SHIFT 0x0
+#define OTG4_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_INT__SHIFT 0x4
+#define OTG4_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_ACK__SHIFT 0x8
+#define OTG4_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_MSK__SHIFT 0xc
+#define OTG4_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_MASK 0x00000001L
+#define OTG4_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_INT_MASK 0x00000010L
+#define OTG4_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_ACK_MASK 0x00000100L
+#define OTG4_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_MSK_MASK 0x00001000L
+//OTG4_OTG_VSYNC_NOM_INT_STATUS
+#define OTG4_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM__SHIFT 0x0
+#define OTG4_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_INT_CLEAR__SHIFT 0x4
+#define OTG4_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_MASK 0x00000001L
+#define OTG4_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_INT_CLEAR_MASK 0x00000010L
+//OTG4_OTG_V_BLANK_START_END
+#define OTG4_OTG_V_BLANK_START_END__OTG_V_BLANK_START__SHIFT 0x0
+#define OTG4_OTG_V_BLANK_START_END__OTG_V_BLANK_END__SHIFT 0x10
+#define OTG4_OTG_V_BLANK_START_END__OTG_V_BLANK_START_MASK 0x00007FFFL
+#define OTG4_OTG_V_BLANK_START_END__OTG_V_BLANK_END_MASK 0x7FFF0000L
+//OTG4_OTG_V_SYNC_A
+#define OTG4_OTG_V_SYNC_A__OTG_V_SYNC_A_START__SHIFT 0x0
+#define OTG4_OTG_V_SYNC_A__OTG_V_SYNC_A_END__SHIFT 0x10
+#define OTG4_OTG_V_SYNC_A__OTG_V_SYNC_A_START_MASK 0x00007FFFL
+#define OTG4_OTG_V_SYNC_A__OTG_V_SYNC_A_END_MASK 0x7FFF0000L
+//OTG4_OTG_V_SYNC_A_CNTL
+#define OTG4_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_A_POL__SHIFT 0x0
+#define OTG4_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_A_POL_MASK 0x00000001L
+//OTG4_OTG_TRIGA_CNTL
+#define OTG4_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_SELECT__SHIFT 0x0
+#define OTG4_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_PIPE_SELECT__SHIFT 0x5
+#define OTG4_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_SELECT__SHIFT 0x8
+#define OTG4_OTG_TRIGA_CNTL__OTG_TRIGA_RESYNC_BYPASS_EN__SHIFT 0xb
+#define OTG4_OTG_TRIGA_CNTL__OTG_TRIGA_INPUT_STATUS__SHIFT 0xc
+#define OTG4_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_STATUS__SHIFT 0xd
+#define OTG4_OTG_TRIGA_CNTL__OTG_TRIGA_OCCURRED__SHIFT 0xe
+#define OTG4_OTG_TRIGA_CNTL__OTG_TRIGA_RISING_EDGE_DETECT_CNTL__SHIFT 0x10
+#define OTG4_OTG_TRIGA_CNTL__OTG_TRIGA_FALLING_EDGE_DETECT_CNTL__SHIFT 0x12
+#define OTG4_OTG_TRIGA_CNTL__OTG_TRIGA_FREQUENCY_SELECT__SHIFT 0x14
+#define OTG4_OTG_TRIGA_CNTL__OTG_TRIGA_DELAY__SHIFT 0x18
+#define OTG4_OTG_TRIGA_CNTL__OTG_TRIGA_CLEAR__SHIFT 0x1f
+#define OTG4_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_SELECT_MASK 0x0000001FL
+#define OTG4_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_PIPE_SELECT_MASK 0x000000E0L
+#define OTG4_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_SELECT_MASK 0x00000700L
+#define OTG4_OTG_TRIGA_CNTL__OTG_TRIGA_RESYNC_BYPASS_EN_MASK 0x00000800L
+#define OTG4_OTG_TRIGA_CNTL__OTG_TRIGA_INPUT_STATUS_MASK 0x00001000L
+#define OTG4_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_STATUS_MASK 0x00002000L
+#define OTG4_OTG_TRIGA_CNTL__OTG_TRIGA_OCCURRED_MASK 0x00004000L
+#define OTG4_OTG_TRIGA_CNTL__OTG_TRIGA_RISING_EDGE_DETECT_CNTL_MASK 0x00030000L
+#define OTG4_OTG_TRIGA_CNTL__OTG_TRIGA_FALLING_EDGE_DETECT_CNTL_MASK 0x000C0000L
+#define OTG4_OTG_TRIGA_CNTL__OTG_TRIGA_FREQUENCY_SELECT_MASK 0x00300000L
+#define OTG4_OTG_TRIGA_CNTL__OTG_TRIGA_DELAY_MASK 0x1F000000L
+#define OTG4_OTG_TRIGA_CNTL__OTG_TRIGA_CLEAR_MASK 0x80000000L
+//OTG4_OTG_TRIGA_MANUAL_TRIG
+#define OTG4_OTG_TRIGA_MANUAL_TRIG__OTG_TRIGA_MANUAL_TRIG__SHIFT 0x0
+#define OTG4_OTG_TRIGA_MANUAL_TRIG__OTG_TRIGA_MANUAL_TRIG_MASK 0x00000001L
+//OTG4_OTG_TRIGB_CNTL
+#define OTG4_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_SELECT__SHIFT 0x0
+#define OTG4_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_PIPE_SELECT__SHIFT 0x5
+#define OTG4_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_SELECT__SHIFT 0x8
+#define OTG4_OTG_TRIGB_CNTL__OTG_TRIGB_RESYNC_BYPASS_EN__SHIFT 0xb
+#define OTG4_OTG_TRIGB_CNTL__OTG_TRIGB_INPUT_STATUS__SHIFT 0xc
+#define OTG4_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_STATUS__SHIFT 0xd
+#define OTG4_OTG_TRIGB_CNTL__OTG_TRIGB_OCCURRED__SHIFT 0xe
+#define OTG4_OTG_TRIGB_CNTL__OTG_TRIGB_RISING_EDGE_DETECT_CNTL__SHIFT 0x10
+#define OTG4_OTG_TRIGB_CNTL__OTG_TRIGB_FALLING_EDGE_DETECT_CNTL__SHIFT 0x12
+#define OTG4_OTG_TRIGB_CNTL__OTG_TRIGB_FREQUENCY_SELECT__SHIFT 0x14
+#define OTG4_OTG_TRIGB_CNTL__OTG_TRIGB_DELAY__SHIFT 0x18
+#define OTG4_OTG_TRIGB_CNTL__OTG_TRIGB_CLEAR__SHIFT 0x1f
+#define OTG4_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_SELECT_MASK 0x0000001FL
+#define OTG4_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_PIPE_SELECT_MASK 0x000000E0L
+#define OTG4_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_SELECT_MASK 0x00000700L
+#define OTG4_OTG_TRIGB_CNTL__OTG_TRIGB_RESYNC_BYPASS_EN_MASK 0x00000800L
+#define OTG4_OTG_TRIGB_CNTL__OTG_TRIGB_INPUT_STATUS_MASK 0x00001000L
+#define OTG4_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_STATUS_MASK 0x00002000L
+#define OTG4_OTG_TRIGB_CNTL__OTG_TRIGB_OCCURRED_MASK 0x00004000L
+#define OTG4_OTG_TRIGB_CNTL__OTG_TRIGB_RISING_EDGE_DETECT_CNTL_MASK 0x00030000L
+#define OTG4_OTG_TRIGB_CNTL__OTG_TRIGB_FALLING_EDGE_DETECT_CNTL_MASK 0x000C0000L
+#define OTG4_OTG_TRIGB_CNTL__OTG_TRIGB_FREQUENCY_SELECT_MASK 0x00300000L
+#define OTG4_OTG_TRIGB_CNTL__OTG_TRIGB_DELAY_MASK 0x1F000000L
+#define OTG4_OTG_TRIGB_CNTL__OTG_TRIGB_CLEAR_MASK 0x80000000L
+//OTG4_OTG_TRIGB_MANUAL_TRIG
+#define OTG4_OTG_TRIGB_MANUAL_TRIG__OTG_TRIGB_MANUAL_TRIG__SHIFT 0x0
+#define OTG4_OTG_TRIGB_MANUAL_TRIG__OTG_TRIGB_MANUAL_TRIG_MASK 0x00000001L
+//OTG4_OTG_FORCE_COUNT_NOW_CNTL
+#define OTG4_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_MODE__SHIFT 0x0
+#define OTG4_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CHECK__SHIFT 0x4
+#define OTG4_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_TRIG_SEL__SHIFT 0x8
+#define OTG4_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_OCCURRED__SHIFT 0x10
+#define OTG4_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CLEAR__SHIFT 0x18
+#define OTG4_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_MODE_MASK 0x00000003L
+#define OTG4_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CHECK_MASK 0x00000010L
+#define OTG4_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_TRIG_SEL_MASK 0x00000100L
+#define OTG4_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_OCCURRED_MASK 0x00010000L
+#define OTG4_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CLEAR_MASK 0x01000000L
+//OTG4_OTG_FLOW_CONTROL
+#define OTG4_OTG_FLOW_CONTROL__OTG_FLOW_CONTROL_SOURCE_SELECT__SHIFT 0x0
+#define OTG4_OTG_FLOW_CONTROL__OTG_FLOW_CONTROL_POLARITY__SHIFT 0x8
+#define OTG4_OTG_FLOW_CONTROL__OTG_FLOW_CONTROL_GRANULARITY__SHIFT 0x10
+#define OTG4_OTG_FLOW_CONTROL__OTG_FLOW_CONTROL_INPUT_STATUS__SHIFT 0x18
+#define OTG4_OTG_FLOW_CONTROL__OTG_FLOW_CONTROL_SOURCE_SELECT_MASK 0x0000001FL
+#define OTG4_OTG_FLOW_CONTROL__OTG_FLOW_CONTROL_POLARITY_MASK 0x00000100L
+#define OTG4_OTG_FLOW_CONTROL__OTG_FLOW_CONTROL_GRANULARITY_MASK 0x00010000L
+#define OTG4_OTG_FLOW_CONTROL__OTG_FLOW_CONTROL_INPUT_STATUS_MASK 0x01000000L
+//OTG4_OTG_STEREO_FORCE_NEXT_EYE
+#define OTG4_OTG_STEREO_FORCE_NEXT_EYE__OTG_STEREO_FORCE_NEXT_EYE__SHIFT 0x0
+#define OTG4_OTG_STEREO_FORCE_NEXT_EYE__OTG_AVSYNC_FRAME_COUNTER__SHIFT 0x8
+#define OTG4_OTG_STEREO_FORCE_NEXT_EYE__OTG_AVSYNC_LINE_COUNTER__SHIFT 0x10
+#define OTG4_OTG_STEREO_FORCE_NEXT_EYE__OTG_STEREO_FORCE_NEXT_EYE_MASK 0x00000003L
+#define OTG4_OTG_STEREO_FORCE_NEXT_EYE__OTG_AVSYNC_FRAME_COUNTER_MASK 0x0000FF00L
+#define OTG4_OTG_STEREO_FORCE_NEXT_EYE__OTG_AVSYNC_LINE_COUNTER_MASK 0x1FFF0000L
+//OTG4_OTG_CONTROL
+#define OTG4_OTG_CONTROL__OTG_MASTER_EN__SHIFT 0x0
+#define OTG4_OTG_CONTROL__OTG_DISABLE_POINT_CNTL__SHIFT 0x8
+#define OTG4_OTG_CONTROL__OTG_START_POINT_CNTL__SHIFT 0xc
+#define OTG4_OTG_CONTROL__OTG_FIELD_NUMBER_CNTL__SHIFT 0xd
+#define OTG4_OTG_CONTROL__OTG_FIELD_NUMBER_POLARITY__SHIFT 0xe
+#define OTG4_OTG_CONTROL__OTG_CURRENT_MASTER_EN_STATE__SHIFT 0x10
+#define OTG4_OTG_CONTROL__OTG_DISP_READ_REQUEST_DISABLE__SHIFT 0x18
+#define OTG4_OTG_CONTROL__OTG_AVSYNC_LOCK_SNAPSHOT__SHIFT 0x1e
+#define OTG4_OTG_CONTROL__OTG_AVSYNC_VSYNC_N_HSYNC_MODE__SHIFT 0x1f
+#define OTG4_OTG_CONTROL__OTG_MASTER_EN_MASK 0x00000001L
+#define OTG4_OTG_CONTROL__OTG_DISABLE_POINT_CNTL_MASK 0x00000300L
+#define OTG4_OTG_CONTROL__OTG_START_POINT_CNTL_MASK 0x00001000L
+#define OTG4_OTG_CONTROL__OTG_FIELD_NUMBER_CNTL_MASK 0x00002000L
+#define OTG4_OTG_CONTROL__OTG_FIELD_NUMBER_POLARITY_MASK 0x00004000L
+#define OTG4_OTG_CONTROL__OTG_CURRENT_MASTER_EN_STATE_MASK 0x00010000L
+#define OTG4_OTG_CONTROL__OTG_DISP_READ_REQUEST_DISABLE_MASK 0x01000000L
+#define OTG4_OTG_CONTROL__OTG_AVSYNC_LOCK_SNAPSHOT_MASK 0x40000000L
+#define OTG4_OTG_CONTROL__OTG_AVSYNC_VSYNC_N_HSYNC_MODE_MASK 0x80000000L
+//OTG4_OTG_BLANK_CONTROL
+#define OTG4_OTG_BLANK_CONTROL__OTG_CURRENT_BLANK_STATE__SHIFT 0x0
+#define OTG4_OTG_BLANK_CONTROL__OTG_BLANK_DATA_EN__SHIFT 0x8
+#define OTG4_OTG_BLANK_CONTROL__OTG_BLANK_DE_MODE__SHIFT 0x10
+#define OTG4_OTG_BLANK_CONTROL__OTG_CURRENT_BLANK_STATE_MASK 0x00000001L
+#define OTG4_OTG_BLANK_CONTROL__OTG_BLANK_DATA_EN_MASK 0x00000100L
+#define OTG4_OTG_BLANK_CONTROL__OTG_BLANK_DE_MODE_MASK 0x00010000L
+//OTG4_OTG_PIPE_ABORT_CONTROL
+#define OTG4_OTG_PIPE_ABORT_CONTROL__OTG_PIPE_ABORT__SHIFT 0x0
+#define OTG4_OTG_PIPE_ABORT_CONTROL__OTG_PIPE_ABORT_DONE__SHIFT 0x8
+#define OTG4_OTG_PIPE_ABORT_CONTROL__OTG_PIPE_ABORT_MASK 0x00000001L
+#define OTG4_OTG_PIPE_ABORT_CONTROL__OTG_PIPE_ABORT_DONE_MASK 0x00000100L
+//OTG4_OTG_INTERLACE_CONTROL
+#define OTG4_OTG_INTERLACE_CONTROL__OTG_INTERLACE_ENABLE__SHIFT 0x0
+#define OTG4_OTG_INTERLACE_CONTROL__OTG_INTERLACE_FORCE_NEXT_FIELD__SHIFT 0x10
+#define OTG4_OTG_INTERLACE_CONTROL__OTG_INTERLACE_ENABLE_MASK 0x00000001L
+#define OTG4_OTG_INTERLACE_CONTROL__OTG_INTERLACE_FORCE_NEXT_FIELD_MASK 0x00030000L
+//OTG4_OTG_INTERLACE_STATUS
+#define OTG4_OTG_INTERLACE_STATUS__OTG_INTERLACE_CURRENT_FIELD__SHIFT 0x0
+#define OTG4_OTG_INTERLACE_STATUS__OTG_INTERLACE_NEXT_FIELD__SHIFT 0x1
+#define OTG4_OTG_INTERLACE_STATUS__OTG_INTERLACE_CURRENT_FIELD_MASK 0x00000001L
+#define OTG4_OTG_INTERLACE_STATUS__OTG_INTERLACE_NEXT_FIELD_MASK 0x00000002L
+//OTG4_OTG_PIXEL_DATA_READBACK0
+#define OTG4_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_BLUE_CB__SHIFT 0x0
+#define OTG4_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_GREEN_Y__SHIFT 0x10
+#define OTG4_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_BLUE_CB_MASK 0x0000FFFFL
+#define OTG4_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_GREEN_Y_MASK 0xFFFF0000L
+//OTG4_OTG_PIXEL_DATA_READBACK1
+#define OTG4_OTG_PIXEL_DATA_READBACK1__OTG_PIXEL_DATA_RED_CR__SHIFT 0x0
+#define OTG4_OTG_PIXEL_DATA_READBACK1__OTG_PIXEL_DATA_RED_CR_MASK 0x0000FFFFL
+//OTG4_OTG_STATUS
+#define OTG4_OTG_STATUS__OTG_V_BLANK__SHIFT 0x0
+#define OTG4_OTG_STATUS__OTG_V_ACTIVE_DISP__SHIFT 0x1
+#define OTG4_OTG_STATUS__OTG_V_SYNC_A__SHIFT 0x2
+#define OTG4_OTG_STATUS__OTG_V_UPDATE__SHIFT 0x3
+#define OTG4_OTG_STATUS__OTG_V_BLANK_3D_STRUCTURE__SHIFT 0x5
+#define OTG4_OTG_STATUS__OTG_H_BLANK__SHIFT 0x10
+#define OTG4_OTG_STATUS__OTG_H_ACTIVE_DISP__SHIFT 0x11
+#define OTG4_OTG_STATUS__OTG_H_SYNC_A__SHIFT 0x12
+#define OTG4_OTG_STATUS__OTG_V_BLANK_MASK 0x00000001L
+#define OTG4_OTG_STATUS__OTG_V_ACTIVE_DISP_MASK 0x00000002L
+#define OTG4_OTG_STATUS__OTG_V_SYNC_A_MASK 0x00000004L
+#define OTG4_OTG_STATUS__OTG_V_UPDATE_MASK 0x00000008L
+#define OTG4_OTG_STATUS__OTG_V_BLANK_3D_STRUCTURE_MASK 0x00000020L
+#define OTG4_OTG_STATUS__OTG_H_BLANK_MASK 0x00010000L
+#define OTG4_OTG_STATUS__OTG_H_ACTIVE_DISP_MASK 0x00020000L
+#define OTG4_OTG_STATUS__OTG_H_SYNC_A_MASK 0x00040000L
+//OTG4_OTG_STATUS_POSITION
+#define OTG4_OTG_STATUS_POSITION__OTG_VERT_COUNT__SHIFT 0x0
+#define OTG4_OTG_STATUS_POSITION__OTG_HORZ_COUNT__SHIFT 0x10
+#define OTG4_OTG_STATUS_POSITION__OTG_VERT_COUNT_MASK 0x00007FFFL
+#define OTG4_OTG_STATUS_POSITION__OTG_HORZ_COUNT_MASK 0x7FFF0000L
+//OTG4_OTG_NOM_VERT_POSITION
+#define OTG4_OTG_NOM_VERT_POSITION__OTG_VERT_COUNT_NOM__SHIFT 0x0
+#define OTG4_OTG_NOM_VERT_POSITION__OTG_VERT_COUNT_NOM_MASK 0x00007FFFL
+//OTG4_OTG_STATUS_FRAME_COUNT
+#define OTG4_OTG_STATUS_FRAME_COUNT__OTG_FRAME_COUNT__SHIFT 0x0
+#define OTG4_OTG_STATUS_FRAME_COUNT__OTG_FRAME_COUNT_MASK 0x00FFFFFFL
+//OTG4_OTG_STATUS_VF_COUNT
+#define OTG4_OTG_STATUS_VF_COUNT__OTG_VF_COUNT__SHIFT 0x0
+#define OTG4_OTG_STATUS_VF_COUNT__OTG_VF_COUNT_MASK 0x7FFFFFFFL
+//OTG4_OTG_STATUS_HV_COUNT
+#define OTG4_OTG_STATUS_HV_COUNT__OTG_HV_COUNT__SHIFT 0x0
+#define OTG4_OTG_STATUS_HV_COUNT__OTG_HV_COUNT_MASK 0x7FFFFFFFL
+//OTG4_OTG_COUNT_CONTROL
+#define OTG4_OTG_COUNT_CONTROL__OTG_HORZ_COUNT_BY2_EN__SHIFT 0x0
+#define OTG4_OTG_COUNT_CONTROL__OTG_HORZ_REPETITION_COUNT__SHIFT 0x1
+#define OTG4_OTG_COUNT_CONTROL__OTG_HORZ_COUNT_BY2_EN_MASK 0x00000001L
+#define OTG4_OTG_COUNT_CONTROL__OTG_HORZ_REPETITION_COUNT_MASK 0x0000001EL
+//OTG4_OTG_COUNT_RESET
+#define OTG4_OTG_COUNT_RESET__OTG_RESET_FRAME_COUNT__SHIFT 0x0
+#define OTG4_OTG_COUNT_RESET__OTG_RESET_FRAME_COUNT_MASK 0x00000001L
+//OTG4_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE
+#define OTG4_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__SHIFT 0x0
+#define OTG4_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__OTG_MANUAL_FORCE_VSYNC_NEXT_LINE_MASK 0x00000001L
+//OTG4_OTG_VERT_SYNC_CONTROL
+#define OTG4_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED__SHIFT 0x0
+#define OTG4_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_CLEAR__SHIFT 0x8
+#define OTG4_OTG_VERT_SYNC_CONTROL__OTG_AUTO_FORCE_VSYNC_MODE__SHIFT 0x10
+#define OTG4_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED_MASK 0x00000001L
+#define OTG4_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_CLEAR_MASK 0x00000100L
+#define OTG4_OTG_VERT_SYNC_CONTROL__OTG_AUTO_FORCE_VSYNC_MODE_MASK 0x00030000L
+//OTG4_OTG_STEREO_STATUS
+#define OTG4_OTG_STEREO_STATUS__OTG_STEREO_CURRENT_EYE__SHIFT 0x0
+#define OTG4_OTG_STEREO_STATUS__OTG_STEREO_SYNC_OUTPUT__SHIFT 0x8
+#define OTG4_OTG_STEREO_STATUS__OTG_STEREO_SYNC_SELECT__SHIFT 0x10
+#define OTG4_OTG_STEREO_STATUS__OTG_STEREO_EYE_FLAG__SHIFT 0x14
+#define OTG4_OTG_STEREO_STATUS__OTG_STEREO_FORCE_NEXT_EYE_PENDING__SHIFT 0x18
+#define OTG4_OTG_STEREO_STATUS__OTG_CURRENT_3D_STRUCTURE_STATE__SHIFT 0x1e
+#define OTG4_OTG_STEREO_STATUS__OTG_CURRENT_STEREOSYNC_EN_STATE__SHIFT 0x1f
+#define OTG4_OTG_STEREO_STATUS__OTG_STEREO_CURRENT_EYE_MASK 0x00000001L
+#define OTG4_OTG_STEREO_STATUS__OTG_STEREO_SYNC_OUTPUT_MASK 0x00000100L
+#define OTG4_OTG_STEREO_STATUS__OTG_STEREO_SYNC_SELECT_MASK 0x00010000L
+#define OTG4_OTG_STEREO_STATUS__OTG_STEREO_EYE_FLAG_MASK 0x00100000L
+#define OTG4_OTG_STEREO_STATUS__OTG_STEREO_FORCE_NEXT_EYE_PENDING_MASK 0x03000000L
+#define OTG4_OTG_STEREO_STATUS__OTG_CURRENT_3D_STRUCTURE_STATE_MASK 0x40000000L
+#define OTG4_OTG_STEREO_STATUS__OTG_CURRENT_STEREOSYNC_EN_STATE_MASK 0x80000000L
+//OTG4_OTG_STEREO_CONTROL
+#define OTG4_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_LINE_NUM__SHIFT 0x0
+#define OTG4_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_POLARITY__SHIFT 0xf
+#define OTG4_OTG_STEREO_CONTROL__OTG_STEREO_EYE_FLAG_POLARITY__SHIFT 0x11
+#define OTG4_OTG_STEREO_CONTROL__OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP__SHIFT 0x12
+#define OTG4_OTG_STEREO_CONTROL__OTG_DISABLE_FIELD_NUM__SHIFT 0x13
+#define OTG4_OTG_STEREO_CONTROL__OTG_DISABLE_V_BLANK_FOR_DP_FIX__SHIFT 0x14
+#define OTG4_OTG_STEREO_CONTROL__OTG_FIELD_NUM_SEL__SHIFT 0x15
+#define OTG4_OTG_STEREO_CONTROL__OTG_STEREO_EN__SHIFT 0x18
+#define OTG4_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_LINE_NUM_MASK 0x00007FFFL
+#define OTG4_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_POLARITY_MASK 0x00008000L
+#define OTG4_OTG_STEREO_CONTROL__OTG_STEREO_EYE_FLAG_POLARITY_MASK 0x00020000L
+#define OTG4_OTG_STEREO_CONTROL__OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP_MASK 0x00040000L
+#define OTG4_OTG_STEREO_CONTROL__OTG_DISABLE_FIELD_NUM_MASK 0x00080000L
+#define OTG4_OTG_STEREO_CONTROL__OTG_DISABLE_V_BLANK_FOR_DP_FIX_MASK 0x00100000L
+#define OTG4_OTG_STEREO_CONTROL__OTG_FIELD_NUM_SEL_MASK 0x00200000L
+#define OTG4_OTG_STEREO_CONTROL__OTG_STEREO_EN_MASK 0x01000000L
+//OTG4_OTG_SNAPSHOT_STATUS
+#define OTG4_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_OCCURRED__SHIFT 0x0
+#define OTG4_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_CLEAR__SHIFT 0x1
+#define OTG4_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_MANUAL_TRIGGER__SHIFT 0x2
+#define OTG4_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_OCCURRED_MASK 0x00000001L
+#define OTG4_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_CLEAR_MASK 0x00000002L
+#define OTG4_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_MANUAL_TRIGGER_MASK 0x00000004L
+//OTG4_OTG_SNAPSHOT_CONTROL
+#define OTG4_OTG_SNAPSHOT_CONTROL__OTG_AUTO_SNAPSHOT_TRIG_SEL__SHIFT 0x0
+#define OTG4_OTG_SNAPSHOT_CONTROL__OTG_AUTO_SNAPSHOT_TRIG_SEL_MASK 0x00000003L
+//OTG4_OTG_SNAPSHOT_POSITION
+#define OTG4_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_VERT_COUNT__SHIFT 0x0
+#define OTG4_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_HORZ_COUNT__SHIFT 0x10
+#define OTG4_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_VERT_COUNT_MASK 0x00007FFFL
+#define OTG4_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_HORZ_COUNT_MASK 0x7FFF0000L
+//OTG4_OTG_SNAPSHOT_FRAME
+#define OTG4_OTG_SNAPSHOT_FRAME__OTG_SNAPSHOT_FRAME_COUNT__SHIFT 0x0
+#define OTG4_OTG_SNAPSHOT_FRAME__OTG_SNAPSHOT_FRAME_COUNT_MASK 0x00FFFFFFL
+//OTG4_OTG_INTERRUPT_CONTROL
+#define OTG4_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_MSK__SHIFT 0x0
+#define OTG4_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_TYPE__SHIFT 0x1
+#define OTG4_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_MSK__SHIFT 0x8
+#define OTG4_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_TYPE__SHIFT 0x9
+#define OTG4_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_MSK__SHIFT 0x10
+#define OTG4_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_TYPE__SHIFT 0x11
+#define OTG4_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_MSK__SHIFT 0x18
+#define OTG4_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_MSK__SHIFT 0x19
+#define OTG4_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_TYPE__SHIFT 0x1a
+#define OTG4_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_TYPE__SHIFT 0x1b
+#define OTG4_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_MSK__SHIFT 0x1c
+#define OTG4_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_TYPE__SHIFT 0x1d
+#define OTG4_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_MSK__SHIFT 0x1e
+#define OTG4_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_TYPE__SHIFT 0x1f
+#define OTG4_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_MSK_MASK 0x00000001L
+#define OTG4_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_TYPE_MASK 0x00000002L
+#define OTG4_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_MSK_MASK 0x00000100L
+#define OTG4_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_TYPE_MASK 0x00000200L
+#define OTG4_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_MSK_MASK 0x00010000L
+#define OTG4_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_TYPE_MASK 0x00020000L
+#define OTG4_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_MSK_MASK 0x01000000L
+#define OTG4_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_MSK_MASK 0x02000000L
+#define OTG4_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_TYPE_MASK 0x04000000L
+#define OTG4_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_TYPE_MASK 0x08000000L
+#define OTG4_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_MSK_MASK 0x10000000L
+#define OTG4_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_TYPE_MASK 0x20000000L
+#define OTG4_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_MSK_MASK 0x40000000L
+#define OTG4_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_TYPE_MASK 0x80000000L
+//OTG4_OTG_UPDATE_LOCK
+#define OTG4_OTG_UPDATE_LOCK__OTG_UPDATE_LOCK__SHIFT 0x0
+#define OTG4_OTG_UPDATE_LOCK__OTG_UPDATE_LOCK_MASK 0x00000001L
+//OTG4_OTG_DOUBLE_BUFFER_CONTROL
+#define OTG4_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_PENDING__SHIFT 0x0
+#define OTG4_OTG_DOUBLE_BUFFER_CONTROL__OTG_H_TIMING_DIV_BY2_DB_UPDATE_PENDING__SHIFT 0x2
+#define OTG4_OTG_DOUBLE_BUFFER_CONTROL__OTG_BLANK_DATA_EN_UPDATE_PENDING__SHIFT 0x3
+#define OTG4_OTG_DOUBLE_BUFFER_CONTROL__OTG_RANGE_TIMING_DBUF_UPDATE_PENDING__SHIFT 0x4
+#define OTG4_OTG_DOUBLE_BUFFER_CONTROL__OTG_TIMING_DB_UPDATE_PENDING__SHIFT 0x5
+#define OTG4_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_CTRL_DB_UPDATE_PENDING__SHIFT 0x6
+#define OTG4_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_STRUCTURE_EN_DB_UPDATE_PENDING__SHIFT 0x7
+#define OTG4_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_INSTANTLY__SHIFT 0x8
+#define OTG4_OTG_DOUBLE_BUFFER_CONTROL__OTG_VSTARTUP_DB_UPDATE_PENDING__SHIFT 0x9
+#define OTG4_OTG_DOUBLE_BUFFER_CONTROL__OTG_DSC_POSITION_DB_UPDATE_PENDING__SHIFT 0xa
+#define OTG4_OTG_DOUBLE_BUFFER_CONTROL__OTG_BLANK_DATA_DOUBLE_BUFFER_EN__SHIFT 0x10
+#define OTG4_OTG_DOUBLE_BUFFER_CONTROL__OTG_RANGE_TIMING_DBUF_UPDATE_MODE__SHIFT 0x18
+#define OTG4_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_PENDING_MASK 0x00000001L
+#define OTG4_OTG_DOUBLE_BUFFER_CONTROL__OTG_H_TIMING_DIV_BY2_DB_UPDATE_PENDING_MASK 0x00000004L
+#define OTG4_OTG_DOUBLE_BUFFER_CONTROL__OTG_BLANK_DATA_EN_UPDATE_PENDING_MASK 0x00000008L
+#define OTG4_OTG_DOUBLE_BUFFER_CONTROL__OTG_RANGE_TIMING_DBUF_UPDATE_PENDING_MASK 0x00000010L
+#define OTG4_OTG_DOUBLE_BUFFER_CONTROL__OTG_TIMING_DB_UPDATE_PENDING_MASK 0x00000020L
+#define OTG4_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_CTRL_DB_UPDATE_PENDING_MASK 0x00000040L
+#define OTG4_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_STRUCTURE_EN_DB_UPDATE_PENDING_MASK 0x00000080L
+#define OTG4_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_INSTANTLY_MASK 0x00000100L
+#define OTG4_OTG_DOUBLE_BUFFER_CONTROL__OTG_VSTARTUP_DB_UPDATE_PENDING_MASK 0x00000200L
+#define OTG4_OTG_DOUBLE_BUFFER_CONTROL__OTG_DSC_POSITION_DB_UPDATE_PENDING_MASK 0x00000400L
+#define OTG4_OTG_DOUBLE_BUFFER_CONTROL__OTG_BLANK_DATA_DOUBLE_BUFFER_EN_MASK 0x00010000L
+#define OTG4_OTG_DOUBLE_BUFFER_CONTROL__OTG_RANGE_TIMING_DBUF_UPDATE_MODE_MASK 0x03000000L
+//OTG4_OTG_MASTER_EN
+#define OTG4_OTG_MASTER_EN__OTG_MASTER_EN__SHIFT 0x0
+#define OTG4_OTG_MASTER_EN__OTG_MASTER_EN_MASK 0x00000001L
+//OTG4_OTG_BLANK_DATA_COLOR
+#define OTG4_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_BLUE_CB__SHIFT 0x0
+#define OTG4_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_GREEN_Y__SHIFT 0xa
+#define OTG4_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_RED_CR__SHIFT 0x14
+#define OTG4_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_BLUE_CB_MASK 0x000003FFL
+#define OTG4_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_GREEN_Y_MASK 0x000FFC00L
+#define OTG4_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_RED_CR_MASK 0x3FF00000L
+//OTG4_OTG_BLANK_DATA_COLOR_EXT
+#define OTG4_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_BLUE_CB_EXT__SHIFT 0x0
+#define OTG4_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_GREEN_Y_EXT__SHIFT 0x8
+#define OTG4_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_RED_CR_EXT__SHIFT 0x10
+#define OTG4_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_BLUE_CB_EXT_MASK 0x0000003FL
+#define OTG4_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_GREEN_Y_EXT_MASK 0x00003F00L
+#define OTG4_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_RED_CR_EXT_MASK 0x003F0000L
+//OTG4_OTG_BLACK_COLOR
+#define OTG4_OTG_BLACK_COLOR__OTG_BLACK_COLOR_B_CB__SHIFT 0x0
+#define OTG4_OTG_BLACK_COLOR__OTG_BLACK_COLOR_G_Y__SHIFT 0xa
+#define OTG4_OTG_BLACK_COLOR__OTG_BLACK_COLOR_R_CR__SHIFT 0x14
+#define OTG4_OTG_BLACK_COLOR__OTG_BLACK_COLOR_B_CB_MASK 0x000003FFL
+#define OTG4_OTG_BLACK_COLOR__OTG_BLACK_COLOR_G_Y_MASK 0x000FFC00L
+#define OTG4_OTG_BLACK_COLOR__OTG_BLACK_COLOR_R_CR_MASK 0x3FF00000L
+//OTG4_OTG_BLACK_COLOR_EXT
+#define OTG4_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_B_CB_EXT__SHIFT 0x0
+#define OTG4_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_G_Y_EXT__SHIFT 0x8
+#define OTG4_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_R_CR_EXT__SHIFT 0x10
+#define OTG4_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_B_CB_EXT_MASK 0x0000003FL
+#define OTG4_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_G_Y_EXT_MASK 0x00003F00L
+#define OTG4_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_R_CR_EXT_MASK 0x003F0000L
+//OTG4_OTG_VERTICAL_INTERRUPT0_POSITION
+#define OTG4_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_START__SHIFT 0x0
+#define OTG4_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_END__SHIFT 0x10
+#define OTG4_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_START_MASK 0x00007FFFL
+#define OTG4_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_END_MASK 0x7FFF0000L
+//OTG4_OTG_VERTICAL_INTERRUPT0_CONTROL
+#define OTG4_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_OUTPUT_POLARITY__SHIFT 0x4
+#define OTG4_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_ENABLE__SHIFT 0x8
+#define OTG4_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_STATUS__SHIFT 0xc
+#define OTG4_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_STATUS__SHIFT 0x10
+#define OTG4_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_CLEAR__SHIFT 0x14
+#define OTG4_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_TYPE__SHIFT 0x18
+#define OTG4_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_OUTPUT_POLARITY_MASK 0x00000010L
+#define OTG4_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_ENABLE_MASK 0x00000100L
+#define OTG4_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_STATUS_MASK 0x00001000L
+#define OTG4_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_STATUS_MASK 0x00010000L
+#define OTG4_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_CLEAR_MASK 0x00100000L
+#define OTG4_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_TYPE_MASK 0x01000000L
+//OTG4_OTG_VERTICAL_INTERRUPT1_POSITION
+#define OTG4_OTG_VERTICAL_INTERRUPT1_POSITION__OTG_VERTICAL_INTERRUPT1_LINE_START__SHIFT 0x0
+#define OTG4_OTG_VERTICAL_INTERRUPT1_POSITION__OTG_VERTICAL_INTERRUPT1_LINE_START_MASK 0x00007FFFL
+//OTG4_OTG_VERTICAL_INTERRUPT1_CONTROL
+#define OTG4_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_ENABLE__SHIFT 0x8
+#define OTG4_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_STATUS__SHIFT 0xc
+#define OTG4_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_STATUS__SHIFT 0x10
+#define OTG4_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_CLEAR__SHIFT 0x14
+#define OTG4_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_TYPE__SHIFT 0x18
+#define OTG4_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_ENABLE_MASK 0x00000100L
+#define OTG4_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_STATUS_MASK 0x00001000L
+#define OTG4_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_STATUS_MASK 0x00010000L
+#define OTG4_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_CLEAR_MASK 0x00100000L
+#define OTG4_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_TYPE_MASK 0x01000000L
+//OTG4_OTG_VERTICAL_INTERRUPT2_POSITION
+#define OTG4_OTG_VERTICAL_INTERRUPT2_POSITION__OTG_VERTICAL_INTERRUPT2_LINE_START__SHIFT 0x0
+#define OTG4_OTG_VERTICAL_INTERRUPT2_POSITION__OTG_VERTICAL_INTERRUPT2_LINE_START_MASK 0x00007FFFL
+//OTG4_OTG_VERTICAL_INTERRUPT2_CONTROL
+#define OTG4_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_ENABLE__SHIFT 0x8
+#define OTG4_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_STATUS__SHIFT 0xc
+#define OTG4_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_STATUS__SHIFT 0x10
+#define OTG4_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_CLEAR__SHIFT 0x14
+#define OTG4_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_TYPE__SHIFT 0x18
+#define OTG4_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_ENABLE_MASK 0x00000100L
+#define OTG4_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_STATUS_MASK 0x00001000L
+#define OTG4_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_STATUS_MASK 0x00010000L
+#define OTG4_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_CLEAR_MASK 0x00100000L
+#define OTG4_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_TYPE_MASK 0x01000000L
+//OTG4_OTG_CRC_CNTL
+#define OTG4_OTG_CRC_CNTL__OTG_CRC_EN__SHIFT 0x0
+#define OTG4_OTG_CRC_CNTL__OTG_CRC_DUAL_LINK_EN__SHIFT 0x1
+#define OTG4_OTG_CRC_CNTL__OTG_CRC_DUAL_LINK_MODE__SHIFT 0x2
+#define OTG4_OTG_CRC_CNTL__OTG_CRC_BLANK_ONLY__SHIFT 0x3
+#define OTG4_OTG_CRC_CNTL__OTG_CRC_CONT_EN__SHIFT 0x4
+#define OTG4_OTG_CRC_CNTL__OTG_CRC_CAPTURE_START_SEL__SHIFT 0x5
+#define OTG4_OTG_CRC_CNTL__OTG_CRC_STEREO_MODE__SHIFT 0x8
+#define OTG4_OTG_CRC_CNTL__OTG_CRC_INTERLACE_MODE__SHIFT 0xc
+#define OTG4_OTG_CRC_CNTL__OTG_CRC_USE_NEW_AND_REPEATED_PIXELS__SHIFT 0x13
+#define OTG4_OTG_CRC_CNTL__OTG_CRC0_SELECT__SHIFT 0x14
+#define OTG4_OTG_CRC_CNTL__OTG_CRC1_SELECT__SHIFT 0x18
+#define OTG4_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC0_PENDING__SHIFT 0x1c
+#define OTG4_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC1_PENDING__SHIFT 0x1d
+#define OTG4_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC2_PENDING__SHIFT 0x1e
+#define OTG4_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC3_PENDING__SHIFT 0x1f
+#define OTG4_OTG_CRC_CNTL__OTG_CRC_EN_MASK 0x00000001L
+#define OTG4_OTG_CRC_CNTL__OTG_CRC_DUAL_LINK_EN_MASK 0x00000002L
+#define OTG4_OTG_CRC_CNTL__OTG_CRC_DUAL_LINK_MODE_MASK 0x00000004L
+#define OTG4_OTG_CRC_CNTL__OTG_CRC_BLANK_ONLY_MASK 0x00000008L
+#define OTG4_OTG_CRC_CNTL__OTG_CRC_CONT_EN_MASK 0x00000010L
+#define OTG4_OTG_CRC_CNTL__OTG_CRC_CAPTURE_START_SEL_MASK 0x00000060L
+#define OTG4_OTG_CRC_CNTL__OTG_CRC_STEREO_MODE_MASK 0x00000300L
+#define OTG4_OTG_CRC_CNTL__OTG_CRC_INTERLACE_MODE_MASK 0x00003000L
+#define OTG4_OTG_CRC_CNTL__OTG_CRC_USE_NEW_AND_REPEATED_PIXELS_MASK 0x00080000L
+#define OTG4_OTG_CRC_CNTL__OTG_CRC0_SELECT_MASK 0x00700000L
+#define OTG4_OTG_CRC_CNTL__OTG_CRC1_SELECT_MASK 0x07000000L
+#define OTG4_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC0_PENDING_MASK 0x10000000L
+#define OTG4_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC1_PENDING_MASK 0x20000000L
+#define OTG4_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC2_PENDING_MASK 0x40000000L
+#define OTG4_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC3_PENDING_MASK 0x80000000L
+//OTG4_OTG_CRC_CNTL2
+#define OTG4_OTG_CRC_CNTL2__OTG_CRC_DSC_MODE__SHIFT 0x0
+#define OTG4_OTG_CRC_CNTL2__OTG_CRC_DATA_STREAM_COMBINE_MODE__SHIFT 0x1
+#define OTG4_OTG_CRC_CNTL2__OTG_CRC_DATA_STREAM_SPLIT_MODE__SHIFT 0x4
+#define OTG4_OTG_CRC_CNTL2__OTG_CRC_DATA_FORMAT__SHIFT 0x8
+#define OTG4_OTG_CRC_CNTL2__OTG_CRC_DSC_MODE_MASK 0x00000001L
+#define OTG4_OTG_CRC_CNTL2__OTG_CRC_DATA_STREAM_COMBINE_MODE_MASK 0x00000002L
+#define OTG4_OTG_CRC_CNTL2__OTG_CRC_DATA_STREAM_SPLIT_MODE_MASK 0x00000030L
+#define OTG4_OTG_CRC_CNTL2__OTG_CRC_DATA_FORMAT_MASK 0x00000300L
+//OTG4_OTG_CRC0_WINDOWA_X_CONTROL
+#define OTG4_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_START__SHIFT 0x0
+#define OTG4_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_END__SHIFT 0x10
+#define OTG4_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_START_MASK 0x00007FFFL
+#define OTG4_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_END_MASK 0x7FFF0000L
+//OTG4_OTG_CRC0_WINDOWA_Y_CONTROL
+#define OTG4_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_START__SHIFT 0x0
+#define OTG4_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_END__SHIFT 0x10
+#define OTG4_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_START_MASK 0x00007FFFL
+#define OTG4_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_END_MASK 0x7FFF0000L
+//OTG4_OTG_CRC0_WINDOWB_X_CONTROL
+#define OTG4_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_START__SHIFT 0x0
+#define OTG4_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_END__SHIFT 0x10
+#define OTG4_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_START_MASK 0x00007FFFL
+#define OTG4_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_END_MASK 0x7FFF0000L
+//OTG4_OTG_CRC0_WINDOWB_Y_CONTROL
+#define OTG4_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_START__SHIFT 0x0
+#define OTG4_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_END__SHIFT 0x10
+#define OTG4_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_START_MASK 0x00007FFFL
+#define OTG4_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_END_MASK 0x7FFF0000L
+//OTG4_OTG_CRC0_DATA_RG
+#define OTG4_OTG_CRC0_DATA_RG__CRC0_R_CR__SHIFT 0x0
+#define OTG4_OTG_CRC0_DATA_RG__CRC0_G_Y__SHIFT 0x10
+#define OTG4_OTG_CRC0_DATA_RG__CRC0_R_CR_MASK 0x0000FFFFL
+#define OTG4_OTG_CRC0_DATA_RG__CRC0_G_Y_MASK 0xFFFF0000L
+//OTG4_OTG_CRC0_DATA_B
+#define OTG4_OTG_CRC0_DATA_B__CRC0_B_CB__SHIFT 0x0
+#define OTG4_OTG_CRC0_DATA_B__CRC0_C__SHIFT 0x10
+#define OTG4_OTG_CRC0_DATA_B__CRC0_B_CB_MASK 0x0000FFFFL
+#define OTG4_OTG_CRC0_DATA_B__CRC0_C_MASK 0xFFFF0000L
+//OTG4_OTG_CRC1_WINDOWA_X_CONTROL
+#define OTG4_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_START__SHIFT 0x0
+#define OTG4_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_END__SHIFT 0x10
+#define OTG4_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_START_MASK 0x00007FFFL
+#define OTG4_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_END_MASK 0x7FFF0000L
+//OTG4_OTG_CRC1_WINDOWA_Y_CONTROL
+#define OTG4_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_START__SHIFT 0x0
+#define OTG4_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_END__SHIFT 0x10
+#define OTG4_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_START_MASK 0x00007FFFL
+#define OTG4_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_END_MASK 0x7FFF0000L
+//OTG4_OTG_CRC1_WINDOWB_X_CONTROL
+#define OTG4_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_START__SHIFT 0x0
+#define OTG4_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_END__SHIFT 0x10
+#define OTG4_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_START_MASK 0x00007FFFL
+#define OTG4_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_END_MASK 0x7FFF0000L
+//OTG4_OTG_CRC1_WINDOWB_Y_CONTROL
+#define OTG4_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_START__SHIFT 0x0
+#define OTG4_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_END__SHIFT 0x10
+#define OTG4_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_START_MASK 0x00007FFFL
+#define OTG4_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_END_MASK 0x7FFF0000L
+//OTG4_OTG_CRC1_DATA_RG
+#define OTG4_OTG_CRC1_DATA_RG__CRC1_R_CR__SHIFT 0x0
+#define OTG4_OTG_CRC1_DATA_RG__CRC1_G_Y__SHIFT 0x10
+#define OTG4_OTG_CRC1_DATA_RG__CRC1_R_CR_MASK 0x0000FFFFL
+#define OTG4_OTG_CRC1_DATA_RG__CRC1_G_Y_MASK 0xFFFF0000L
+//OTG4_OTG_CRC1_DATA_B
+#define OTG4_OTG_CRC1_DATA_B__CRC1_B_CB__SHIFT 0x0
+#define OTG4_OTG_CRC1_DATA_B__CRC1_C__SHIFT 0x10
+#define OTG4_OTG_CRC1_DATA_B__CRC1_B_CB_MASK 0x0000FFFFL
+#define OTG4_OTG_CRC1_DATA_B__CRC1_C_MASK 0xFFFF0000L
+//OTG4_OTG_CRC2_DATA_RG
+#define OTG4_OTG_CRC2_DATA_RG__CRC2_R_CR__SHIFT 0x0
+#define OTG4_OTG_CRC2_DATA_RG__CRC2_G_Y__SHIFT 0x10
+#define OTG4_OTG_CRC2_DATA_RG__CRC2_R_CR_MASK 0x0000FFFFL
+#define OTG4_OTG_CRC2_DATA_RG__CRC2_G_Y_MASK 0xFFFF0000L
+//OTG4_OTG_CRC2_DATA_B
+#define OTG4_OTG_CRC2_DATA_B__CRC2_B_CB__SHIFT 0x0
+#define OTG4_OTG_CRC2_DATA_B__CRC2_C__SHIFT 0x10
+#define OTG4_OTG_CRC2_DATA_B__CRC2_B_CB_MASK 0x0000FFFFL
+#define OTG4_OTG_CRC2_DATA_B__CRC2_C_MASK 0xFFFF0000L
+//OTG4_OTG_CRC3_DATA_RG
+#define OTG4_OTG_CRC3_DATA_RG__CRC3_R_CR__SHIFT 0x0
+#define OTG4_OTG_CRC3_DATA_RG__CRC3_G_Y__SHIFT 0x10
+#define OTG4_OTG_CRC3_DATA_RG__CRC3_R_CR_MASK 0x0000FFFFL
+#define OTG4_OTG_CRC3_DATA_RG__CRC3_G_Y_MASK 0xFFFF0000L
+//OTG4_OTG_CRC3_DATA_B
+#define OTG4_OTG_CRC3_DATA_B__CRC3_B_CB__SHIFT 0x0
+#define OTG4_OTG_CRC3_DATA_B__CRC3_C__SHIFT 0x10
+#define OTG4_OTG_CRC3_DATA_B__CRC3_B_CB_MASK 0x0000FFFFL
+#define OTG4_OTG_CRC3_DATA_B__CRC3_C_MASK 0xFFFF0000L
+//OTG4_OTG_CRC_SIG_RED_GREEN_MASK
+#define OTG4_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_RED_MASK__SHIFT 0x0
+#define OTG4_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_GREEN_MASK__SHIFT 0x10
+#define OTG4_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_RED_MASK_MASK 0x0000FFFFL
+#define OTG4_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_GREEN_MASK_MASK 0xFFFF0000L
+//OTG4_OTG_CRC_SIG_BLUE_CONTROL_MASK
+#define OTG4_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_BLUE_MASK__SHIFT 0x0
+#define OTG4_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_CONTROL_MASK__SHIFT 0x10
+#define OTG4_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_BLUE_MASK_MASK 0x0000FFFFL
+#define OTG4_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_CONTROL_MASK_MASK 0xFFFF0000L
+//OTG4_OTG_STATIC_SCREEN_CONTROL
+#define OTG4_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_EVENT_MASK__SHIFT 0x0
+#define OTG4_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_FRAME_COUNT__SHIFT 0x10
+#define OTG4_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_ENABLE__SHIFT 0x18
+#define OTG4_OTG_STATIC_SCREEN_CONTROL__OTG_SS_STATUS__SHIFT 0x19
+#define OTG4_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_STATUS__SHIFT 0x1a
+#define OTG4_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_CLEAR__SHIFT 0x1b
+#define OTG4_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_TYPE__SHIFT 0x1c
+#define OTG4_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE__SHIFT 0x1e
+#define OTG4_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_VALUE__SHIFT 0x1f
+#define OTG4_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_EVENT_MASK_MASK 0x0000FFFFL
+#define OTG4_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_FRAME_COUNT_MASK 0x00FF0000L
+#define OTG4_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_ENABLE_MASK 0x01000000L
+#define OTG4_OTG_STATIC_SCREEN_CONTROL__OTG_SS_STATUS_MASK 0x02000000L
+#define OTG4_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_STATUS_MASK 0x04000000L
+#define OTG4_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_CLEAR_MASK 0x08000000L
+#define OTG4_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_TYPE_MASK 0x10000000L
+#define OTG4_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_MASK 0x40000000L
+#define OTG4_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_VALUE_MASK 0x80000000L
+//OTG4_OTG_3D_STRUCTURE_CONTROL
+#define OTG4_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_EN__SHIFT 0x0
+#define OTG4_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_V_UPDATE_MODE__SHIFT 0x8
+#define OTG4_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_STEREO_SEL_OVR__SHIFT 0xc
+#define OTG4_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET__SHIFT 0x10
+#define OTG4_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_PENDING__SHIFT 0x11
+#define OTG4_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT__SHIFT 0x12
+#define OTG4_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_EN_MASK 0x00000001L
+#define OTG4_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_V_UPDATE_MODE_MASK 0x00000300L
+#define OTG4_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_STEREO_SEL_OVR_MASK 0x00001000L
+#define OTG4_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_MASK 0x00010000L
+#define OTG4_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_PENDING_MASK 0x00020000L
+#define OTG4_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_MASK 0x000C0000L
+//OTG4_OTG_GSL_VSYNC_GAP
+#define OTG4_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_LIMIT__SHIFT 0x0
+#define OTG4_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_DELAY__SHIFT 0x8
+#define OTG4_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_SOURCE_SEL__SHIFT 0x10
+#define OTG4_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MODE__SHIFT 0x11
+#define OTG4_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_CLEAR__SHIFT 0x13
+#define OTG4_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_OCCURRED__SHIFT 0x14
+#define OTG4_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASTER_FASTER__SHIFT 0x17
+#define OTG4_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP__SHIFT 0x18
+#define OTG4_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_LIMIT_MASK 0x000000FFL
+#define OTG4_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_DELAY_MASK 0x0000FF00L
+#define OTG4_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_SOURCE_SEL_MASK 0x00010000L
+#define OTG4_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MODE_MASK 0x00060000L
+#define OTG4_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_CLEAR_MASK 0x00080000L
+#define OTG4_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_OCCURRED_MASK 0x00100000L
+#define OTG4_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASTER_FASTER_MASK 0x00800000L
+#define OTG4_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASK 0xFF000000L
+//OTG4_OTG_MASTER_UPDATE_MODE
+#define OTG4_OTG_MASTER_UPDATE_MODE__MASTER_UPDATE_INTERLACED_MODE__SHIFT 0x0
+#define OTG4_OTG_MASTER_UPDATE_MODE__MASTER_UPDATE_INTERLACED_MODE_MASK 0x00000003L
+//OTG4_OTG_CLOCK_CONTROL
+#define OTG4_OTG_CLOCK_CONTROL__OTG_CLOCK_EN__SHIFT 0x0
+#define OTG4_OTG_CLOCK_CONTROL__OTG_CLOCK_GATE_DIS__SHIFT 0x1
+#define OTG4_OTG_CLOCK_CONTROL__OTG_SOFT_RESET__SHIFT 0x4
+#define OTG4_OTG_CLOCK_CONTROL__OTG_CLOCK_ON__SHIFT 0x8
+#define OTG4_OTG_CLOCK_CONTROL__OTG_BUSY__SHIFT 0x10
+#define OTG4_OTG_CLOCK_CONTROL__OTG_CLOCK_EN_MASK 0x00000001L
+#define OTG4_OTG_CLOCK_CONTROL__OTG_CLOCK_GATE_DIS_MASK 0x00000002L
+#define OTG4_OTG_CLOCK_CONTROL__OTG_SOFT_RESET_MASK 0x00000010L
+#define OTG4_OTG_CLOCK_CONTROL__OTG_CLOCK_ON_MASK 0x00000100L
+#define OTG4_OTG_CLOCK_CONTROL__OTG_BUSY_MASK 0x00010000L
+//OTG4_OTG_VSTARTUP_PARAM
+#define OTG4_OTG_VSTARTUP_PARAM__VSTARTUP_START__SHIFT 0x0
+#define OTG4_OTG_VSTARTUP_PARAM__VSTARTUP_START_MASK 0x000003FFL
+//OTG4_OTG_VUPDATE_PARAM
+#define OTG4_OTG_VUPDATE_PARAM__VUPDATE_OFFSET__SHIFT 0x0
+#define OTG4_OTG_VUPDATE_PARAM__VUPDATE_WIDTH__SHIFT 0x10
+#define OTG4_OTG_VUPDATE_PARAM__VUPDATE_OFFSET_MASK 0x0000FFFFL
+#define OTG4_OTG_VUPDATE_PARAM__VUPDATE_WIDTH_MASK 0x03FF0000L
+//OTG4_OTG_VREADY_PARAM
+#define OTG4_OTG_VREADY_PARAM__VREADY_OFFSET__SHIFT 0x0
+#define OTG4_OTG_VREADY_PARAM__VREADY_OFFSET_MASK 0x0000FFFFL
+//OTG4_OTG_GLOBAL_SYNC_STATUS
+#define OTG4_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_EN__SHIFT 0x0
+#define OTG4_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_TYPE__SHIFT 0x1
+#define OTG4_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_OCCURRED__SHIFT 0x2
+#define OTG4_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_STATUS__SHIFT 0x3
+#define OTG4_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_CLEAR__SHIFT 0x4
+#define OTG4_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_EN__SHIFT 0x5
+#define OTG4_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_TYPE__SHIFT 0x6
+#define OTG4_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_POSITION_SEL__SHIFT 0x7
+#define OTG4_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_OCCURRED__SHIFT 0x8
+#define OTG4_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_STATUS__SHIFT 0x9
+#define OTG4_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_CLEAR__SHIFT 0xa
+#define OTG4_OTG_GLOBAL_SYNC_STATUS__VUPDATE_STATUS__SHIFT 0xb
+#define OTG4_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_EN__SHIFT 0xc
+#define OTG4_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_TYPE__SHIFT 0xd
+#define OTG4_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_OCCURRED__SHIFT 0xe
+#define OTG4_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_STATUS__SHIFT 0xf
+#define OTG4_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_CLEAR__SHIFT 0x10
+#define OTG4_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_STATUS__SHIFT 0x11
+#define OTG4_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_EN__SHIFT 0x12
+#define OTG4_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_TYPE__SHIFT 0x13
+#define OTG4_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_OCCURRED__SHIFT 0x14
+#define OTG4_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_STATUS__SHIFT 0x15
+#define OTG4_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_CLEAR__SHIFT 0x16
+#define OTG4_OTG_GLOBAL_SYNC_STATUS__STEREO_SELECT_STATUS__SHIFT 0x18
+#define OTG4_OTG_GLOBAL_SYNC_STATUS__FIELD_NUMBER_STATUS__SHIFT 0x19
+#define OTG4_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_EN_MASK 0x00000001L
+#define OTG4_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_TYPE_MASK 0x00000002L
+#define OTG4_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_OCCURRED_MASK 0x00000004L
+#define OTG4_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_STATUS_MASK 0x00000008L
+#define OTG4_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_CLEAR_MASK 0x00000010L
+#define OTG4_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_EN_MASK 0x00000020L
+#define OTG4_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_TYPE_MASK 0x00000040L
+#define OTG4_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_POSITION_SEL_MASK 0x00000080L
+#define OTG4_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_OCCURRED_MASK 0x00000100L
+#define OTG4_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_STATUS_MASK 0x00000200L
+#define OTG4_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_CLEAR_MASK 0x00000400L
+#define OTG4_OTG_GLOBAL_SYNC_STATUS__VUPDATE_STATUS_MASK 0x00000800L
+#define OTG4_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_EN_MASK 0x00001000L
+#define OTG4_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_TYPE_MASK 0x00002000L
+#define OTG4_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_OCCURRED_MASK 0x00004000L
+#define OTG4_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_STATUS_MASK 0x00008000L
+#define OTG4_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_CLEAR_MASK 0x00010000L
+#define OTG4_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_STATUS_MASK 0x00020000L
+#define OTG4_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_EN_MASK 0x00040000L
+#define OTG4_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_TYPE_MASK 0x00080000L
+#define OTG4_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_OCCURRED_MASK 0x00100000L
+#define OTG4_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_STATUS_MASK 0x00200000L
+#define OTG4_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_CLEAR_MASK 0x00400000L
+#define OTG4_OTG_GLOBAL_SYNC_STATUS__STEREO_SELECT_STATUS_MASK 0x01000000L
+#define OTG4_OTG_GLOBAL_SYNC_STATUS__FIELD_NUMBER_STATUS_MASK 0x02000000L
+//OTG4_OTG_MASTER_UPDATE_LOCK
+#define OTG4_OTG_MASTER_UPDATE_LOCK__OTG_MASTER_UPDATE_LOCK__SHIFT 0x0
+#define OTG4_OTG_MASTER_UPDATE_LOCK__UPDATE_LOCK_STATUS__SHIFT 0x8
+#define OTG4_OTG_MASTER_UPDATE_LOCK__OTG_MASTER_UPDATE_LOCK_MASK 0x00000001L
+#define OTG4_OTG_MASTER_UPDATE_LOCK__UPDATE_LOCK_STATUS_MASK 0x00000100L
+//OTG4_OTG_GSL_CONTROL
+#define OTG4_OTG_GSL_CONTROL__OTG_GSL0_EN__SHIFT 0x0
+#define OTG4_OTG_GSL_CONTROL__OTG_GSL1_EN__SHIFT 0x1
+#define OTG4_OTG_GSL_CONTROL__OTG_GSL2_EN__SHIFT 0x2
+#define OTG4_OTG_GSL_CONTROL__OTG_GSL_MASTER_EN__SHIFT 0x3
+#define OTG4_OTG_GSL_CONTROL__OTG_GSL_MASTER_MODE__SHIFT 0x4
+#define OTG4_OTG_GSL_CONTROL__OTG_GSL_CHECK_DELAY__SHIFT 0x8
+#define OTG4_OTG_GSL_CONTROL__OTG_GSL_FORCE_DELAY__SHIFT 0x10
+#define OTG4_OTG_GSL_CONTROL__OTG_GSL_CHECK_ALL_FIELDS__SHIFT 0x1c
+#define OTG4_OTG_GSL_CONTROL__OTG_MASTER_UPDATE_LOCK_GSL_EN__SHIFT 0x1f
+#define OTG4_OTG_GSL_CONTROL__OTG_GSL0_EN_MASK 0x00000001L
+#define OTG4_OTG_GSL_CONTROL__OTG_GSL1_EN_MASK 0x00000002L
+#define OTG4_OTG_GSL_CONTROL__OTG_GSL2_EN_MASK 0x00000004L
+#define OTG4_OTG_GSL_CONTROL__OTG_GSL_MASTER_EN_MASK 0x00000008L
+#define OTG4_OTG_GSL_CONTROL__OTG_GSL_MASTER_MODE_MASK 0x00000030L
+#define OTG4_OTG_GSL_CONTROL__OTG_GSL_CHECK_DELAY_MASK 0x00000F00L
+#define OTG4_OTG_GSL_CONTROL__OTG_GSL_FORCE_DELAY_MASK 0x001F0000L
+#define OTG4_OTG_GSL_CONTROL__OTG_GSL_CHECK_ALL_FIELDS_MASK 0x10000000L
+#define OTG4_OTG_GSL_CONTROL__OTG_MASTER_UPDATE_LOCK_GSL_EN_MASK 0x80000000L
+//OTG4_OTG_GSL_WINDOW_X
+#define OTG4_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_START_X__SHIFT 0x0
+#define OTG4_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_END_X__SHIFT 0x10
+#define OTG4_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_START_X_MASK 0x00007FFFL
+#define OTG4_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_END_X_MASK 0x7FFF0000L
+//OTG4_OTG_GSL_WINDOW_Y
+#define OTG4_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_START_Y__SHIFT 0x0
+#define OTG4_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_END_Y__SHIFT 0x10
+#define OTG4_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_START_Y_MASK 0x00007FFFL
+#define OTG4_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_END_Y_MASK 0x7FFF0000L
+//OTG4_OTG_VUPDATE_KEEPOUT
+#define OTG4_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET__SHIFT 0x0
+#define OTG4_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET__SHIFT 0x10
+#define OTG4_OTG_VUPDATE_KEEPOUT__OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN__SHIFT 0x1f
+#define OTG4_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET_MASK 0x0000FFFFL
+#define OTG4_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET_MASK 0x03FF0000L
+#define OTG4_OTG_VUPDATE_KEEPOUT__OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN_MASK 0x80000000L
+//OTG4_OTG_GLOBAL_CONTROL0
+#define OTG4_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_HTOTAL_KEEPOUT__SHIFT 0x0
+#define OTG4_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_HTOTAL_KEEPOUT_EN__SHIFT 0x8
+#define OTG4_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_SEL__SHIFT 0x19
+#define OTG4_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_HTOTAL_KEEPOUT_MASK 0x000000FFL
+#define OTG4_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_HTOTAL_KEEPOUT_EN_MASK 0x00000100L
+#define OTG4_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_SEL_MASK 0x0E000000L
+//OTG4_OTG_GLOBAL_CONTROL1
+#define OTG4_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_X__SHIFT 0x0
+#define OTG4_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_Y__SHIFT 0x10
+#define OTG4_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_EN__SHIFT 0x1f
+#define OTG4_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_X_MASK 0x00007FFFL
+#define OTG4_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_Y_MASK 0x7FFF0000L
+#define OTG4_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_EN_MASK 0x80000000L
+//OTG4_OTG_GLOBAL_CONTROL2
+#define OTG4_OTG_GLOBAL_CONTROL2__DIG_UPDATE_LOCATION__SHIFT 0x0
+#define OTG4_OTG_GLOBAL_CONTROL2__GLOBAL_UPDATE_LOCK_EN__SHIFT 0xa
+#define OTG4_OTG_GLOBAL_CONTROL2__MANUAL_FLOW_CONTROL_SEL__SHIFT 0x10
+#define OTG4_OTG_GLOBAL_CONTROL2__MASTER_UPDATE_LOCK_WINDOW_SEL__SHIFT 0x1d
+#define OTG4_OTG_GLOBAL_CONTROL2__OTG_VUPDATE_BLOCK_DISABLE__SHIFT 0x1e
+#define OTG4_OTG_GLOBAL_CONTROL2__DCCG_VUPDATE_MODE__SHIFT 0x1f
+#define OTG4_OTG_GLOBAL_CONTROL2__DIG_UPDATE_LOCATION_MASK 0x000003FFL
+#define OTG4_OTG_GLOBAL_CONTROL2__GLOBAL_UPDATE_LOCK_EN_MASK 0x00000400L
+#define OTG4_OTG_GLOBAL_CONTROL2__MANUAL_FLOW_CONTROL_SEL_MASK 0x00070000L
+#define OTG4_OTG_GLOBAL_CONTROL2__MASTER_UPDATE_LOCK_WINDOW_SEL_MASK 0x20000000L
+#define OTG4_OTG_GLOBAL_CONTROL2__OTG_VUPDATE_BLOCK_DISABLE_MASK 0x40000000L
+#define OTG4_OTG_GLOBAL_CONTROL2__DCCG_VUPDATE_MODE_MASK 0x80000000L
+//OTG4_OTG_GLOBAL_CONTROL3
+#define OTG4_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD__SHIFT 0x0
+#define OTG4_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_STEREO_SEL__SHIFT 0x4
+#define OTG4_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD_STEREO_FLAG_SEL__SHIFT 0x8
+#define OTG4_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD_MASK 0x00000003L
+#define OTG4_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_STEREO_SEL_MASK 0x00000030L
+#define OTG4_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD_STEREO_FLAG_SEL_MASK 0x00000100L
+//OTG4_OTG_TRIG_MANUAL_CONTROL
+#define OTG4_OTG_TRIG_MANUAL_CONTROL__TRIG_MANUAL_CONTROL__SHIFT 0x0
+#define OTG4_OTG_TRIG_MANUAL_CONTROL__TRIG_MANUAL_CONTROL_MASK 0x00000001L
+//OTG4_OTG_MANUAL_FLOW_CONTROL
+#define OTG4_OTG_MANUAL_FLOW_CONTROL__MANUAL_FLOW_CONTROL__SHIFT 0x0
+#define OTG4_OTG_MANUAL_FLOW_CONTROL__MANUAL_FLOW_CONTROL_MASK 0x00000001L
+//OTG4_OTG_RANGE_TIMING_INT_STATUS
+#define OTG4_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED__SHIFT 0x0
+#define OTG4_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT__SHIFT 0x4
+#define OTG4_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_CLEAR__SHIFT 0x8
+#define OTG4_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT_MSK__SHIFT 0xc
+#define OTG4_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT_TYPE__SHIFT 0x10
+#define OTG4_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_MASK 0x00000001L
+#define OTG4_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT_MASK 0x00000010L
+#define OTG4_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_CLEAR_MASK 0x00000100L
+#define OTG4_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT_MSK_MASK 0x00001000L
+#define OTG4_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT_TYPE_MASK 0x00010000L
+//OTG4_OTG_DRR_CONTROL
+#define OTG4_OTG_DRR_CONTROL__OTG_DRR_AVERAGE_FRAME__SHIFT 0x0
+#define OTG4_OTG_DRR_CONTROL__OTG_V_TOTAL_LAST_USED_BY_DRR__SHIFT 0x10
+#define OTG4_OTG_DRR_CONTROL__OTG_DRR_AVERAGE_FRAME_MASK 0x00000007L
+#define OTG4_OTG_DRR_CONTROL__OTG_V_TOTAL_LAST_USED_BY_DRR_MASK 0x7FFF0000L
+//OTG4_OTG_REQUEST_CONTROL
+#define OTG4_OTG_REQUEST_CONTROL__OTG_REQUEST_MODE_FOR_H_DUPLICATE__SHIFT 0x0
+#define OTG4_OTG_REQUEST_CONTROL__OTG_REQUEST_MODE_FOR_H_DUPLICATE_MASK 0x00000001L
+//OTG4_OTG_DSC_START_POSITION
+#define OTG4_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_X__SHIFT 0x0
+#define OTG4_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_LINE_NUM__SHIFT 0x10
+#define OTG4_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_X_MASK 0x00007FFFL
+#define OTG4_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_LINE_NUM_MASK 0x03FF0000L
+//OTG4_OTG_PIPE_UPDATE_STATUS
+#define OTG4_OTG_PIPE_UPDATE_STATUS__OTG_FLIP_PENDING__SHIFT 0x0
+#define OTG4_OTG_PIPE_UPDATE_STATUS__OTG_FLIP_TAKEN__SHIFT 0x1
+#define OTG4_OTG_PIPE_UPDATE_STATUS__OTG_FLIP_TAKEN_CLEAR__SHIFT 0x2
+#define OTG4_OTG_PIPE_UPDATE_STATUS__OTG_DC_REG_UPDATE_PENDING__SHIFT 0x4
+#define OTG4_OTG_PIPE_UPDATE_STATUS__OTG_DC_REG_UPDATE_TAKEN__SHIFT 0x5
+#define OTG4_OTG_PIPE_UPDATE_STATUS__OTG_DC_REG_UPDATE_TAKEN_CLEAR__SHIFT 0x6
+#define OTG4_OTG_PIPE_UPDATE_STATUS__OTG_CURSOR_UPDATE_PENDING__SHIFT 0x8
+#define OTG4_OTG_PIPE_UPDATE_STATUS__OTG_CURSOR_UPDATE_TAKEN__SHIFT 0x9
+#define OTG4_OTG_PIPE_UPDATE_STATUS__OTG_CURSOR_UPDATE_TAKEN_CLEAR__SHIFT 0xa
+#define OTG4_OTG_PIPE_UPDATE_STATUS__OTG_VUPDATE_KEEPOUT_STATUS__SHIFT 0x10
+#define OTG4_OTG_PIPE_UPDATE_STATUS__OTG_FLIP_PENDING_MASK 0x00000001L
+#define OTG4_OTG_PIPE_UPDATE_STATUS__OTG_FLIP_TAKEN_MASK 0x00000002L
+#define OTG4_OTG_PIPE_UPDATE_STATUS__OTG_FLIP_TAKEN_CLEAR_MASK 0x00000004L
+#define OTG4_OTG_PIPE_UPDATE_STATUS__OTG_DC_REG_UPDATE_PENDING_MASK 0x00000010L
+#define OTG4_OTG_PIPE_UPDATE_STATUS__OTG_DC_REG_UPDATE_TAKEN_MASK 0x00000020L
+#define OTG4_OTG_PIPE_UPDATE_STATUS__OTG_DC_REG_UPDATE_TAKEN_CLEAR_MASK 0x00000040L
+#define OTG4_OTG_PIPE_UPDATE_STATUS__OTG_CURSOR_UPDATE_PENDING_MASK 0x00000100L
+#define OTG4_OTG_PIPE_UPDATE_STATUS__OTG_CURSOR_UPDATE_TAKEN_MASK 0x00000200L
+#define OTG4_OTG_PIPE_UPDATE_STATUS__OTG_CURSOR_UPDATE_TAKEN_CLEAR_MASK 0x00000400L
+#define OTG4_OTG_PIPE_UPDATE_STATUS__OTG_VUPDATE_KEEPOUT_STATUS_MASK 0x00010000L
+//OTG4_OTG_SPARE_REGISTER
+#define OTG4_OTG_SPARE_REGISTER__OTG_SPARE_REG__SHIFT 0x0
+#define OTG4_OTG_SPARE_REGISTER__OTG_SPARE_REG_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_optc_otg5_dispdec
+//OTG5_OTG_H_TOTAL
+#define OTG5_OTG_H_TOTAL__OTG_H_TOTAL__SHIFT 0x0
+#define OTG5_OTG_H_TOTAL__OTG_H_TOTAL_MASK 0x00007FFFL
+//OTG5_OTG_H_BLANK_START_END
+#define OTG5_OTG_H_BLANK_START_END__OTG_H_BLANK_START__SHIFT 0x0
+#define OTG5_OTG_H_BLANK_START_END__OTG_H_BLANK_END__SHIFT 0x10
+#define OTG5_OTG_H_BLANK_START_END__OTG_H_BLANK_START_MASK 0x00007FFFL
+#define OTG5_OTG_H_BLANK_START_END__OTG_H_BLANK_END_MASK 0x7FFF0000L
+//OTG5_OTG_H_SYNC_A
+#define OTG5_OTG_H_SYNC_A__OTG_H_SYNC_A_START__SHIFT 0x0
+#define OTG5_OTG_H_SYNC_A__OTG_H_SYNC_A_END__SHIFT 0x10
+#define OTG5_OTG_H_SYNC_A__OTG_H_SYNC_A_START_MASK 0x00007FFFL
+#define OTG5_OTG_H_SYNC_A__OTG_H_SYNC_A_END_MASK 0x7FFF0000L
+//OTG5_OTG_H_SYNC_A_CNTL
+#define OTG5_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_POL__SHIFT 0x0
+#define OTG5_OTG_H_SYNC_A_CNTL__OTG_COMP_SYNC_A_EN__SHIFT 0x10
+#define OTG5_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_CUTOFF__SHIFT 0x11
+#define OTG5_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_POL_MASK 0x00000001L
+#define OTG5_OTG_H_SYNC_A_CNTL__OTG_COMP_SYNC_A_EN_MASK 0x00010000L
+#define OTG5_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_CUTOFF_MASK 0x00020000L
+//OTG5_OTG_H_TIMING_CNTL
+#define OTG5_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_BY2__SHIFT 0x0
+#define OTG5_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_BY2_UPDATE_MODE__SHIFT 0x8
+#define OTG5_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_BY2_MASK 0x00000001L
+#define OTG5_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_BY2_UPDATE_MODE_MASK 0x00000100L
+//OTG5_OTG_V_TOTAL
+#define OTG5_OTG_V_TOTAL__OTG_V_TOTAL__SHIFT 0x0
+#define OTG5_OTG_V_TOTAL__OTG_V_TOTAL_MASK 0x00007FFFL
+//OTG5_OTG_V_TOTAL_MIN
+#define OTG5_OTG_V_TOTAL_MIN__OTG_V_TOTAL_MIN__SHIFT 0x0
+#define OTG5_OTG_V_TOTAL_MIN__OTG_V_TOTAL_MIN_MASK 0x00007FFFL
+//OTG5_OTG_V_TOTAL_MAX
+#define OTG5_OTG_V_TOTAL_MAX__OTG_V_TOTAL_MAX__SHIFT 0x0
+#define OTG5_OTG_V_TOTAL_MAX__OTG_V_TOTAL_MAX_MASK 0x00007FFFL
+//OTG5_OTG_V_TOTAL_MID
+#define OTG5_OTG_V_TOTAL_MID__OTG_V_TOTAL_MID__SHIFT 0x0
+#define OTG5_OTG_V_TOTAL_MID__OTG_V_TOTAL_MID_MASK 0x00007FFFL
+//OTG5_OTG_V_TOTAL_CONTROL
+#define OTG5_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MIN_SEL__SHIFT 0x0
+#define OTG5_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MAX_SEL__SHIFT 0x1
+#define OTG5_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MAX_EN__SHIFT 0x2
+#define OTG5_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MIN_EN__SHIFT 0x3
+#define OTG5_OTG_V_TOTAL_CONTROL__OTG_FORCE_LOCK_ON_EVENT__SHIFT 0x4
+#define OTG5_OTG_V_TOTAL_CONTROL__OTG_DRR_EVENT_ACTIVE_PERIOD__SHIFT 0x5
+#define OTG5_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK_EN__SHIFT 0x7
+#define OTG5_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_FRAME_NUM__SHIFT 0x8
+#define OTG5_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK__SHIFT 0x10
+#define OTG5_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MIN_SEL_MASK 0x00000001L
+#define OTG5_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MAX_SEL_MASK 0x00000002L
+#define OTG5_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MAX_EN_MASK 0x00000004L
+#define OTG5_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MIN_EN_MASK 0x00000008L
+#define OTG5_OTG_V_TOTAL_CONTROL__OTG_FORCE_LOCK_ON_EVENT_MASK 0x00000010L
+#define OTG5_OTG_V_TOTAL_CONTROL__OTG_DRR_EVENT_ACTIVE_PERIOD_MASK 0x00000020L
+#define OTG5_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK_EN_MASK 0x00000080L
+#define OTG5_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_FRAME_NUM_MASK 0x0000FF00L
+#define OTG5_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK_MASK 0xFFFF0000L
+//OTG5_OTG_V_TOTAL_INT_STATUS
+#define OTG5_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED__SHIFT 0x0
+#define OTG5_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_INT__SHIFT 0x4
+#define OTG5_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_ACK__SHIFT 0x8
+#define OTG5_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_MSK__SHIFT 0xc
+#define OTG5_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_MASK 0x00000001L
+#define OTG5_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_INT_MASK 0x00000010L
+#define OTG5_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_ACK_MASK 0x00000100L
+#define OTG5_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_MSK_MASK 0x00001000L
+//OTG5_OTG_VSYNC_NOM_INT_STATUS
+#define OTG5_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM__SHIFT 0x0
+#define OTG5_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_INT_CLEAR__SHIFT 0x4
+#define OTG5_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_MASK 0x00000001L
+#define OTG5_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_INT_CLEAR_MASK 0x00000010L
+//OTG5_OTG_V_BLANK_START_END
+#define OTG5_OTG_V_BLANK_START_END__OTG_V_BLANK_START__SHIFT 0x0
+#define OTG5_OTG_V_BLANK_START_END__OTG_V_BLANK_END__SHIFT 0x10
+#define OTG5_OTG_V_BLANK_START_END__OTG_V_BLANK_START_MASK 0x00007FFFL
+#define OTG5_OTG_V_BLANK_START_END__OTG_V_BLANK_END_MASK 0x7FFF0000L
+//OTG5_OTG_V_SYNC_A
+#define OTG5_OTG_V_SYNC_A__OTG_V_SYNC_A_START__SHIFT 0x0
+#define OTG5_OTG_V_SYNC_A__OTG_V_SYNC_A_END__SHIFT 0x10
+#define OTG5_OTG_V_SYNC_A__OTG_V_SYNC_A_START_MASK 0x00007FFFL
+#define OTG5_OTG_V_SYNC_A__OTG_V_SYNC_A_END_MASK 0x7FFF0000L
+//OTG5_OTG_V_SYNC_A_CNTL
+#define OTG5_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_A_POL__SHIFT 0x0
+#define OTG5_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_A_POL_MASK 0x00000001L
+//OTG5_OTG_TRIGA_CNTL
+#define OTG5_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_SELECT__SHIFT 0x0
+#define OTG5_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_PIPE_SELECT__SHIFT 0x5
+#define OTG5_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_SELECT__SHIFT 0x8
+#define OTG5_OTG_TRIGA_CNTL__OTG_TRIGA_RESYNC_BYPASS_EN__SHIFT 0xb
+#define OTG5_OTG_TRIGA_CNTL__OTG_TRIGA_INPUT_STATUS__SHIFT 0xc
+#define OTG5_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_STATUS__SHIFT 0xd
+#define OTG5_OTG_TRIGA_CNTL__OTG_TRIGA_OCCURRED__SHIFT 0xe
+#define OTG5_OTG_TRIGA_CNTL__OTG_TRIGA_RISING_EDGE_DETECT_CNTL__SHIFT 0x10
+#define OTG5_OTG_TRIGA_CNTL__OTG_TRIGA_FALLING_EDGE_DETECT_CNTL__SHIFT 0x12
+#define OTG5_OTG_TRIGA_CNTL__OTG_TRIGA_FREQUENCY_SELECT__SHIFT 0x14
+#define OTG5_OTG_TRIGA_CNTL__OTG_TRIGA_DELAY__SHIFT 0x18
+#define OTG5_OTG_TRIGA_CNTL__OTG_TRIGA_CLEAR__SHIFT 0x1f
+#define OTG5_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_SELECT_MASK 0x0000001FL
+#define OTG5_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_PIPE_SELECT_MASK 0x000000E0L
+#define OTG5_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_SELECT_MASK 0x00000700L
+#define OTG5_OTG_TRIGA_CNTL__OTG_TRIGA_RESYNC_BYPASS_EN_MASK 0x00000800L
+#define OTG5_OTG_TRIGA_CNTL__OTG_TRIGA_INPUT_STATUS_MASK 0x00001000L
+#define OTG5_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_STATUS_MASK 0x00002000L
+#define OTG5_OTG_TRIGA_CNTL__OTG_TRIGA_OCCURRED_MASK 0x00004000L
+#define OTG5_OTG_TRIGA_CNTL__OTG_TRIGA_RISING_EDGE_DETECT_CNTL_MASK 0x00030000L
+#define OTG5_OTG_TRIGA_CNTL__OTG_TRIGA_FALLING_EDGE_DETECT_CNTL_MASK 0x000C0000L
+#define OTG5_OTG_TRIGA_CNTL__OTG_TRIGA_FREQUENCY_SELECT_MASK 0x00300000L
+#define OTG5_OTG_TRIGA_CNTL__OTG_TRIGA_DELAY_MASK 0x1F000000L
+#define OTG5_OTG_TRIGA_CNTL__OTG_TRIGA_CLEAR_MASK 0x80000000L
+//OTG5_OTG_TRIGA_MANUAL_TRIG
+#define OTG5_OTG_TRIGA_MANUAL_TRIG__OTG_TRIGA_MANUAL_TRIG__SHIFT 0x0
+#define OTG5_OTG_TRIGA_MANUAL_TRIG__OTG_TRIGA_MANUAL_TRIG_MASK 0x00000001L
+//OTG5_OTG_TRIGB_CNTL
+#define OTG5_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_SELECT__SHIFT 0x0
+#define OTG5_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_PIPE_SELECT__SHIFT 0x5
+#define OTG5_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_SELECT__SHIFT 0x8
+#define OTG5_OTG_TRIGB_CNTL__OTG_TRIGB_RESYNC_BYPASS_EN__SHIFT 0xb
+#define OTG5_OTG_TRIGB_CNTL__OTG_TRIGB_INPUT_STATUS__SHIFT 0xc
+#define OTG5_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_STATUS__SHIFT 0xd
+#define OTG5_OTG_TRIGB_CNTL__OTG_TRIGB_OCCURRED__SHIFT 0xe
+#define OTG5_OTG_TRIGB_CNTL__OTG_TRIGB_RISING_EDGE_DETECT_CNTL__SHIFT 0x10
+#define OTG5_OTG_TRIGB_CNTL__OTG_TRIGB_FALLING_EDGE_DETECT_CNTL__SHIFT 0x12
+#define OTG5_OTG_TRIGB_CNTL__OTG_TRIGB_FREQUENCY_SELECT__SHIFT 0x14
+#define OTG5_OTG_TRIGB_CNTL__OTG_TRIGB_DELAY__SHIFT 0x18
+#define OTG5_OTG_TRIGB_CNTL__OTG_TRIGB_CLEAR__SHIFT 0x1f
+#define OTG5_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_SELECT_MASK 0x0000001FL
+#define OTG5_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_PIPE_SELECT_MASK 0x000000E0L
+#define OTG5_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_SELECT_MASK 0x00000700L
+#define OTG5_OTG_TRIGB_CNTL__OTG_TRIGB_RESYNC_BYPASS_EN_MASK 0x00000800L
+#define OTG5_OTG_TRIGB_CNTL__OTG_TRIGB_INPUT_STATUS_MASK 0x00001000L
+#define OTG5_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_STATUS_MASK 0x00002000L
+#define OTG5_OTG_TRIGB_CNTL__OTG_TRIGB_OCCURRED_MASK 0x00004000L
+#define OTG5_OTG_TRIGB_CNTL__OTG_TRIGB_RISING_EDGE_DETECT_CNTL_MASK 0x00030000L
+#define OTG5_OTG_TRIGB_CNTL__OTG_TRIGB_FALLING_EDGE_DETECT_CNTL_MASK 0x000C0000L
+#define OTG5_OTG_TRIGB_CNTL__OTG_TRIGB_FREQUENCY_SELECT_MASK 0x00300000L
+#define OTG5_OTG_TRIGB_CNTL__OTG_TRIGB_DELAY_MASK 0x1F000000L
+#define OTG5_OTG_TRIGB_CNTL__OTG_TRIGB_CLEAR_MASK 0x80000000L
+//OTG5_OTG_TRIGB_MANUAL_TRIG
+#define OTG5_OTG_TRIGB_MANUAL_TRIG__OTG_TRIGB_MANUAL_TRIG__SHIFT 0x0
+#define OTG5_OTG_TRIGB_MANUAL_TRIG__OTG_TRIGB_MANUAL_TRIG_MASK 0x00000001L
+//OTG5_OTG_FORCE_COUNT_NOW_CNTL
+#define OTG5_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_MODE__SHIFT 0x0
+#define OTG5_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CHECK__SHIFT 0x4
+#define OTG5_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_TRIG_SEL__SHIFT 0x8
+#define OTG5_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_OCCURRED__SHIFT 0x10
+#define OTG5_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CLEAR__SHIFT 0x18
+#define OTG5_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_MODE_MASK 0x00000003L
+#define OTG5_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CHECK_MASK 0x00000010L
+#define OTG5_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_TRIG_SEL_MASK 0x00000100L
+#define OTG5_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_OCCURRED_MASK 0x00010000L
+#define OTG5_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CLEAR_MASK 0x01000000L
+//OTG5_OTG_FLOW_CONTROL
+#define OTG5_OTG_FLOW_CONTROL__OTG_FLOW_CONTROL_SOURCE_SELECT__SHIFT 0x0
+#define OTG5_OTG_FLOW_CONTROL__OTG_FLOW_CONTROL_POLARITY__SHIFT 0x8
+#define OTG5_OTG_FLOW_CONTROL__OTG_FLOW_CONTROL_GRANULARITY__SHIFT 0x10
+#define OTG5_OTG_FLOW_CONTROL__OTG_FLOW_CONTROL_INPUT_STATUS__SHIFT 0x18
+#define OTG5_OTG_FLOW_CONTROL__OTG_FLOW_CONTROL_SOURCE_SELECT_MASK 0x0000001FL
+#define OTG5_OTG_FLOW_CONTROL__OTG_FLOW_CONTROL_POLARITY_MASK 0x00000100L
+#define OTG5_OTG_FLOW_CONTROL__OTG_FLOW_CONTROL_GRANULARITY_MASK 0x00010000L
+#define OTG5_OTG_FLOW_CONTROL__OTG_FLOW_CONTROL_INPUT_STATUS_MASK 0x01000000L
+//OTG5_OTG_STEREO_FORCE_NEXT_EYE
+#define OTG5_OTG_STEREO_FORCE_NEXT_EYE__OTG_STEREO_FORCE_NEXT_EYE__SHIFT 0x0
+#define OTG5_OTG_STEREO_FORCE_NEXT_EYE__OTG_AVSYNC_FRAME_COUNTER__SHIFT 0x8
+#define OTG5_OTG_STEREO_FORCE_NEXT_EYE__OTG_AVSYNC_LINE_COUNTER__SHIFT 0x10
+#define OTG5_OTG_STEREO_FORCE_NEXT_EYE__OTG_STEREO_FORCE_NEXT_EYE_MASK 0x00000003L
+#define OTG5_OTG_STEREO_FORCE_NEXT_EYE__OTG_AVSYNC_FRAME_COUNTER_MASK 0x0000FF00L
+#define OTG5_OTG_STEREO_FORCE_NEXT_EYE__OTG_AVSYNC_LINE_COUNTER_MASK 0x1FFF0000L
+//OTG5_OTG_CONTROL
+#define OTG5_OTG_CONTROL__OTG_MASTER_EN__SHIFT 0x0
+#define OTG5_OTG_CONTROL__OTG_DISABLE_POINT_CNTL__SHIFT 0x8
+#define OTG5_OTG_CONTROL__OTG_START_POINT_CNTL__SHIFT 0xc
+#define OTG5_OTG_CONTROL__OTG_FIELD_NUMBER_CNTL__SHIFT 0xd
+#define OTG5_OTG_CONTROL__OTG_FIELD_NUMBER_POLARITY__SHIFT 0xe
+#define OTG5_OTG_CONTROL__OTG_CURRENT_MASTER_EN_STATE__SHIFT 0x10
+#define OTG5_OTG_CONTROL__OTG_DISP_READ_REQUEST_DISABLE__SHIFT 0x18
+#define OTG5_OTG_CONTROL__OTG_AVSYNC_LOCK_SNAPSHOT__SHIFT 0x1e
+#define OTG5_OTG_CONTROL__OTG_AVSYNC_VSYNC_N_HSYNC_MODE__SHIFT 0x1f
+#define OTG5_OTG_CONTROL__OTG_MASTER_EN_MASK 0x00000001L
+#define OTG5_OTG_CONTROL__OTG_DISABLE_POINT_CNTL_MASK 0x00000300L
+#define OTG5_OTG_CONTROL__OTG_START_POINT_CNTL_MASK 0x00001000L
+#define OTG5_OTG_CONTROL__OTG_FIELD_NUMBER_CNTL_MASK 0x00002000L
+#define OTG5_OTG_CONTROL__OTG_FIELD_NUMBER_POLARITY_MASK 0x00004000L
+#define OTG5_OTG_CONTROL__OTG_CURRENT_MASTER_EN_STATE_MASK 0x00010000L
+#define OTG5_OTG_CONTROL__OTG_DISP_READ_REQUEST_DISABLE_MASK 0x01000000L
+#define OTG5_OTG_CONTROL__OTG_AVSYNC_LOCK_SNAPSHOT_MASK 0x40000000L
+#define OTG5_OTG_CONTROL__OTG_AVSYNC_VSYNC_N_HSYNC_MODE_MASK 0x80000000L
+//OTG5_OTG_BLANK_CONTROL
+#define OTG5_OTG_BLANK_CONTROL__OTG_CURRENT_BLANK_STATE__SHIFT 0x0
+#define OTG5_OTG_BLANK_CONTROL__OTG_BLANK_DATA_EN__SHIFT 0x8
+#define OTG5_OTG_BLANK_CONTROL__OTG_BLANK_DE_MODE__SHIFT 0x10
+#define OTG5_OTG_BLANK_CONTROL__OTG_CURRENT_BLANK_STATE_MASK 0x00000001L
+#define OTG5_OTG_BLANK_CONTROL__OTG_BLANK_DATA_EN_MASK 0x00000100L
+#define OTG5_OTG_BLANK_CONTROL__OTG_BLANK_DE_MODE_MASK 0x00010000L
+//OTG5_OTG_PIPE_ABORT_CONTROL
+#define OTG5_OTG_PIPE_ABORT_CONTROL__OTG_PIPE_ABORT__SHIFT 0x0
+#define OTG5_OTG_PIPE_ABORT_CONTROL__OTG_PIPE_ABORT_DONE__SHIFT 0x8
+#define OTG5_OTG_PIPE_ABORT_CONTROL__OTG_PIPE_ABORT_MASK 0x00000001L
+#define OTG5_OTG_PIPE_ABORT_CONTROL__OTG_PIPE_ABORT_DONE_MASK 0x00000100L
+//OTG5_OTG_INTERLACE_CONTROL
+#define OTG5_OTG_INTERLACE_CONTROL__OTG_INTERLACE_ENABLE__SHIFT 0x0
+#define OTG5_OTG_INTERLACE_CONTROL__OTG_INTERLACE_FORCE_NEXT_FIELD__SHIFT 0x10
+#define OTG5_OTG_INTERLACE_CONTROL__OTG_INTERLACE_ENABLE_MASK 0x00000001L
+#define OTG5_OTG_INTERLACE_CONTROL__OTG_INTERLACE_FORCE_NEXT_FIELD_MASK 0x00030000L
+//OTG5_OTG_INTERLACE_STATUS
+#define OTG5_OTG_INTERLACE_STATUS__OTG_INTERLACE_CURRENT_FIELD__SHIFT 0x0
+#define OTG5_OTG_INTERLACE_STATUS__OTG_INTERLACE_NEXT_FIELD__SHIFT 0x1
+#define OTG5_OTG_INTERLACE_STATUS__OTG_INTERLACE_CURRENT_FIELD_MASK 0x00000001L
+#define OTG5_OTG_INTERLACE_STATUS__OTG_INTERLACE_NEXT_FIELD_MASK 0x00000002L
+//OTG5_OTG_PIXEL_DATA_READBACK0
+#define OTG5_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_BLUE_CB__SHIFT 0x0
+#define OTG5_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_GREEN_Y__SHIFT 0x10
+#define OTG5_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_BLUE_CB_MASK 0x0000FFFFL
+#define OTG5_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_GREEN_Y_MASK 0xFFFF0000L
+//OTG5_OTG_PIXEL_DATA_READBACK1
+#define OTG5_OTG_PIXEL_DATA_READBACK1__OTG_PIXEL_DATA_RED_CR__SHIFT 0x0
+#define OTG5_OTG_PIXEL_DATA_READBACK1__OTG_PIXEL_DATA_RED_CR_MASK 0x0000FFFFL
+//OTG5_OTG_STATUS
+#define OTG5_OTG_STATUS__OTG_V_BLANK__SHIFT 0x0
+#define OTG5_OTG_STATUS__OTG_V_ACTIVE_DISP__SHIFT 0x1
+#define OTG5_OTG_STATUS__OTG_V_SYNC_A__SHIFT 0x2
+#define OTG5_OTG_STATUS__OTG_V_UPDATE__SHIFT 0x3
+#define OTG5_OTG_STATUS__OTG_V_BLANK_3D_STRUCTURE__SHIFT 0x5
+#define OTG5_OTG_STATUS__OTG_H_BLANK__SHIFT 0x10
+#define OTG5_OTG_STATUS__OTG_H_ACTIVE_DISP__SHIFT 0x11
+#define OTG5_OTG_STATUS__OTG_H_SYNC_A__SHIFT 0x12
+#define OTG5_OTG_STATUS__OTG_V_BLANK_MASK 0x00000001L
+#define OTG5_OTG_STATUS__OTG_V_ACTIVE_DISP_MASK 0x00000002L
+#define OTG5_OTG_STATUS__OTG_V_SYNC_A_MASK 0x00000004L
+#define OTG5_OTG_STATUS__OTG_V_UPDATE_MASK 0x00000008L
+#define OTG5_OTG_STATUS__OTG_V_BLANK_3D_STRUCTURE_MASK 0x00000020L
+#define OTG5_OTG_STATUS__OTG_H_BLANK_MASK 0x00010000L
+#define OTG5_OTG_STATUS__OTG_H_ACTIVE_DISP_MASK 0x00020000L
+#define OTG5_OTG_STATUS__OTG_H_SYNC_A_MASK 0x00040000L
+//OTG5_OTG_STATUS_POSITION
+#define OTG5_OTG_STATUS_POSITION__OTG_VERT_COUNT__SHIFT 0x0
+#define OTG5_OTG_STATUS_POSITION__OTG_HORZ_COUNT__SHIFT 0x10
+#define OTG5_OTG_STATUS_POSITION__OTG_VERT_COUNT_MASK 0x00007FFFL
+#define OTG5_OTG_STATUS_POSITION__OTG_HORZ_COUNT_MASK 0x7FFF0000L
+//OTG5_OTG_NOM_VERT_POSITION
+#define OTG5_OTG_NOM_VERT_POSITION__OTG_VERT_COUNT_NOM__SHIFT 0x0
+#define OTG5_OTG_NOM_VERT_POSITION__OTG_VERT_COUNT_NOM_MASK 0x00007FFFL
+//OTG5_OTG_STATUS_FRAME_COUNT
+#define OTG5_OTG_STATUS_FRAME_COUNT__OTG_FRAME_COUNT__SHIFT 0x0
+#define OTG5_OTG_STATUS_FRAME_COUNT__OTG_FRAME_COUNT_MASK 0x00FFFFFFL
+//OTG5_OTG_STATUS_VF_COUNT
+#define OTG5_OTG_STATUS_VF_COUNT__OTG_VF_COUNT__SHIFT 0x0
+#define OTG5_OTG_STATUS_VF_COUNT__OTG_VF_COUNT_MASK 0x7FFFFFFFL
+//OTG5_OTG_STATUS_HV_COUNT
+#define OTG5_OTG_STATUS_HV_COUNT__OTG_HV_COUNT__SHIFT 0x0
+#define OTG5_OTG_STATUS_HV_COUNT__OTG_HV_COUNT_MASK 0x7FFFFFFFL
+//OTG5_OTG_COUNT_CONTROL
+#define OTG5_OTG_COUNT_CONTROL__OTG_HORZ_COUNT_BY2_EN__SHIFT 0x0
+#define OTG5_OTG_COUNT_CONTROL__OTG_HORZ_REPETITION_COUNT__SHIFT 0x1
+#define OTG5_OTG_COUNT_CONTROL__OTG_HORZ_COUNT_BY2_EN_MASK 0x00000001L
+#define OTG5_OTG_COUNT_CONTROL__OTG_HORZ_REPETITION_COUNT_MASK 0x0000001EL
+//OTG5_OTG_COUNT_RESET
+#define OTG5_OTG_COUNT_RESET__OTG_RESET_FRAME_COUNT__SHIFT 0x0
+#define OTG5_OTG_COUNT_RESET__OTG_RESET_FRAME_COUNT_MASK 0x00000001L
+//OTG5_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE
+#define OTG5_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__SHIFT 0x0
+#define OTG5_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__OTG_MANUAL_FORCE_VSYNC_NEXT_LINE_MASK 0x00000001L
+//OTG5_OTG_VERT_SYNC_CONTROL
+#define OTG5_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED__SHIFT 0x0
+#define OTG5_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_CLEAR__SHIFT 0x8
+#define OTG5_OTG_VERT_SYNC_CONTROL__OTG_AUTO_FORCE_VSYNC_MODE__SHIFT 0x10
+#define OTG5_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED_MASK 0x00000001L
+#define OTG5_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_CLEAR_MASK 0x00000100L
+#define OTG5_OTG_VERT_SYNC_CONTROL__OTG_AUTO_FORCE_VSYNC_MODE_MASK 0x00030000L
+//OTG5_OTG_STEREO_STATUS
+#define OTG5_OTG_STEREO_STATUS__OTG_STEREO_CURRENT_EYE__SHIFT 0x0
+#define OTG5_OTG_STEREO_STATUS__OTG_STEREO_SYNC_OUTPUT__SHIFT 0x8
+#define OTG5_OTG_STEREO_STATUS__OTG_STEREO_SYNC_SELECT__SHIFT 0x10
+#define OTG5_OTG_STEREO_STATUS__OTG_STEREO_EYE_FLAG__SHIFT 0x14
+#define OTG5_OTG_STEREO_STATUS__OTG_STEREO_FORCE_NEXT_EYE_PENDING__SHIFT 0x18
+#define OTG5_OTG_STEREO_STATUS__OTG_CURRENT_3D_STRUCTURE_STATE__SHIFT 0x1e
+#define OTG5_OTG_STEREO_STATUS__OTG_CURRENT_STEREOSYNC_EN_STATE__SHIFT 0x1f
+#define OTG5_OTG_STEREO_STATUS__OTG_STEREO_CURRENT_EYE_MASK 0x00000001L
+#define OTG5_OTG_STEREO_STATUS__OTG_STEREO_SYNC_OUTPUT_MASK 0x00000100L
+#define OTG5_OTG_STEREO_STATUS__OTG_STEREO_SYNC_SELECT_MASK 0x00010000L
+#define OTG5_OTG_STEREO_STATUS__OTG_STEREO_EYE_FLAG_MASK 0x00100000L
+#define OTG5_OTG_STEREO_STATUS__OTG_STEREO_FORCE_NEXT_EYE_PENDING_MASK 0x03000000L
+#define OTG5_OTG_STEREO_STATUS__OTG_CURRENT_3D_STRUCTURE_STATE_MASK 0x40000000L
+#define OTG5_OTG_STEREO_STATUS__OTG_CURRENT_STEREOSYNC_EN_STATE_MASK 0x80000000L
+//OTG5_OTG_STEREO_CONTROL
+#define OTG5_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_LINE_NUM__SHIFT 0x0
+#define OTG5_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_POLARITY__SHIFT 0xf
+#define OTG5_OTG_STEREO_CONTROL__OTG_STEREO_EYE_FLAG_POLARITY__SHIFT 0x11
+#define OTG5_OTG_STEREO_CONTROL__OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP__SHIFT 0x12
+#define OTG5_OTG_STEREO_CONTROL__OTG_DISABLE_FIELD_NUM__SHIFT 0x13
+#define OTG5_OTG_STEREO_CONTROL__OTG_DISABLE_V_BLANK_FOR_DP_FIX__SHIFT 0x14
+#define OTG5_OTG_STEREO_CONTROL__OTG_FIELD_NUM_SEL__SHIFT 0x15
+#define OTG5_OTG_STEREO_CONTROL__OTG_STEREO_EN__SHIFT 0x18
+#define OTG5_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_LINE_NUM_MASK 0x00007FFFL
+#define OTG5_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_POLARITY_MASK 0x00008000L
+#define OTG5_OTG_STEREO_CONTROL__OTG_STEREO_EYE_FLAG_POLARITY_MASK 0x00020000L
+#define OTG5_OTG_STEREO_CONTROL__OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP_MASK 0x00040000L
+#define OTG5_OTG_STEREO_CONTROL__OTG_DISABLE_FIELD_NUM_MASK 0x00080000L
+#define OTG5_OTG_STEREO_CONTROL__OTG_DISABLE_V_BLANK_FOR_DP_FIX_MASK 0x00100000L
+#define OTG5_OTG_STEREO_CONTROL__OTG_FIELD_NUM_SEL_MASK 0x00200000L
+#define OTG5_OTG_STEREO_CONTROL__OTG_STEREO_EN_MASK 0x01000000L
+//OTG5_OTG_SNAPSHOT_STATUS
+#define OTG5_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_OCCURRED__SHIFT 0x0
+#define OTG5_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_CLEAR__SHIFT 0x1
+#define OTG5_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_MANUAL_TRIGGER__SHIFT 0x2
+#define OTG5_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_OCCURRED_MASK 0x00000001L
+#define OTG5_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_CLEAR_MASK 0x00000002L
+#define OTG5_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_MANUAL_TRIGGER_MASK 0x00000004L
+//OTG5_OTG_SNAPSHOT_CONTROL
+#define OTG5_OTG_SNAPSHOT_CONTROL__OTG_AUTO_SNAPSHOT_TRIG_SEL__SHIFT 0x0
+#define OTG5_OTG_SNAPSHOT_CONTROL__OTG_AUTO_SNAPSHOT_TRIG_SEL_MASK 0x00000003L
+//OTG5_OTG_SNAPSHOT_POSITION
+#define OTG5_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_VERT_COUNT__SHIFT 0x0
+#define OTG5_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_HORZ_COUNT__SHIFT 0x10
+#define OTG5_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_VERT_COUNT_MASK 0x00007FFFL
+#define OTG5_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_HORZ_COUNT_MASK 0x7FFF0000L
+//OTG5_OTG_SNAPSHOT_FRAME
+#define OTG5_OTG_SNAPSHOT_FRAME__OTG_SNAPSHOT_FRAME_COUNT__SHIFT 0x0
+#define OTG5_OTG_SNAPSHOT_FRAME__OTG_SNAPSHOT_FRAME_COUNT_MASK 0x00FFFFFFL
+//OTG5_OTG_INTERRUPT_CONTROL
+#define OTG5_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_MSK__SHIFT 0x0
+#define OTG5_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_TYPE__SHIFT 0x1
+#define OTG5_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_MSK__SHIFT 0x8
+#define OTG5_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_TYPE__SHIFT 0x9
+#define OTG5_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_MSK__SHIFT 0x10
+#define OTG5_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_TYPE__SHIFT 0x11
+#define OTG5_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_MSK__SHIFT 0x18
+#define OTG5_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_MSK__SHIFT 0x19
+#define OTG5_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_TYPE__SHIFT 0x1a
+#define OTG5_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_TYPE__SHIFT 0x1b
+#define OTG5_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_MSK__SHIFT 0x1c
+#define OTG5_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_TYPE__SHIFT 0x1d
+#define OTG5_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_MSK__SHIFT 0x1e
+#define OTG5_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_TYPE__SHIFT 0x1f
+#define OTG5_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_MSK_MASK 0x00000001L
+#define OTG5_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_TYPE_MASK 0x00000002L
+#define OTG5_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_MSK_MASK 0x00000100L
+#define OTG5_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_TYPE_MASK 0x00000200L
+#define OTG5_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_MSK_MASK 0x00010000L
+#define OTG5_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_TYPE_MASK 0x00020000L
+#define OTG5_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_MSK_MASK 0x01000000L
+#define OTG5_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_MSK_MASK 0x02000000L
+#define OTG5_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_TYPE_MASK 0x04000000L
+#define OTG5_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_TYPE_MASK 0x08000000L
+#define OTG5_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_MSK_MASK 0x10000000L
+#define OTG5_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_TYPE_MASK 0x20000000L
+#define OTG5_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_MSK_MASK 0x40000000L
+#define OTG5_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_TYPE_MASK 0x80000000L
+//OTG5_OTG_UPDATE_LOCK
+#define OTG5_OTG_UPDATE_LOCK__OTG_UPDATE_LOCK__SHIFT 0x0
+#define OTG5_OTG_UPDATE_LOCK__OTG_UPDATE_LOCK_MASK 0x00000001L
+//OTG5_OTG_DOUBLE_BUFFER_CONTROL
+#define OTG5_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_PENDING__SHIFT 0x0
+#define OTG5_OTG_DOUBLE_BUFFER_CONTROL__OTG_H_TIMING_DIV_BY2_DB_UPDATE_PENDING__SHIFT 0x2
+#define OTG5_OTG_DOUBLE_BUFFER_CONTROL__OTG_BLANK_DATA_EN_UPDATE_PENDING__SHIFT 0x3
+#define OTG5_OTG_DOUBLE_BUFFER_CONTROL__OTG_RANGE_TIMING_DBUF_UPDATE_PENDING__SHIFT 0x4
+#define OTG5_OTG_DOUBLE_BUFFER_CONTROL__OTG_TIMING_DB_UPDATE_PENDING__SHIFT 0x5
+#define OTG5_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_CTRL_DB_UPDATE_PENDING__SHIFT 0x6
+#define OTG5_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_STRUCTURE_EN_DB_UPDATE_PENDING__SHIFT 0x7
+#define OTG5_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_INSTANTLY__SHIFT 0x8
+#define OTG5_OTG_DOUBLE_BUFFER_CONTROL__OTG_VSTARTUP_DB_UPDATE_PENDING__SHIFT 0x9
+#define OTG5_OTG_DOUBLE_BUFFER_CONTROL__OTG_DSC_POSITION_DB_UPDATE_PENDING__SHIFT 0xa
+#define OTG5_OTG_DOUBLE_BUFFER_CONTROL__OTG_BLANK_DATA_DOUBLE_BUFFER_EN__SHIFT 0x10
+#define OTG5_OTG_DOUBLE_BUFFER_CONTROL__OTG_RANGE_TIMING_DBUF_UPDATE_MODE__SHIFT 0x18
+#define OTG5_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_PENDING_MASK 0x00000001L
+#define OTG5_OTG_DOUBLE_BUFFER_CONTROL__OTG_H_TIMING_DIV_BY2_DB_UPDATE_PENDING_MASK 0x00000004L
+#define OTG5_OTG_DOUBLE_BUFFER_CONTROL__OTG_BLANK_DATA_EN_UPDATE_PENDING_MASK 0x00000008L
+#define OTG5_OTG_DOUBLE_BUFFER_CONTROL__OTG_RANGE_TIMING_DBUF_UPDATE_PENDING_MASK 0x00000010L
+#define OTG5_OTG_DOUBLE_BUFFER_CONTROL__OTG_TIMING_DB_UPDATE_PENDING_MASK 0x00000020L
+#define OTG5_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_CTRL_DB_UPDATE_PENDING_MASK 0x00000040L
+#define OTG5_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_STRUCTURE_EN_DB_UPDATE_PENDING_MASK 0x00000080L
+#define OTG5_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_INSTANTLY_MASK 0x00000100L
+#define OTG5_OTG_DOUBLE_BUFFER_CONTROL__OTG_VSTARTUP_DB_UPDATE_PENDING_MASK 0x00000200L
+#define OTG5_OTG_DOUBLE_BUFFER_CONTROL__OTG_DSC_POSITION_DB_UPDATE_PENDING_MASK 0x00000400L
+#define OTG5_OTG_DOUBLE_BUFFER_CONTROL__OTG_BLANK_DATA_DOUBLE_BUFFER_EN_MASK 0x00010000L
+#define OTG5_OTG_DOUBLE_BUFFER_CONTROL__OTG_RANGE_TIMING_DBUF_UPDATE_MODE_MASK 0x03000000L
+//OTG5_OTG_MASTER_EN
+#define OTG5_OTG_MASTER_EN__OTG_MASTER_EN__SHIFT 0x0
+#define OTG5_OTG_MASTER_EN__OTG_MASTER_EN_MASK 0x00000001L
+//OTG5_OTG_BLANK_DATA_COLOR
+#define OTG5_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_BLUE_CB__SHIFT 0x0
+#define OTG5_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_GREEN_Y__SHIFT 0xa
+#define OTG5_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_RED_CR__SHIFT 0x14
+#define OTG5_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_BLUE_CB_MASK 0x000003FFL
+#define OTG5_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_GREEN_Y_MASK 0x000FFC00L
+#define OTG5_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_RED_CR_MASK 0x3FF00000L
+//OTG5_OTG_BLANK_DATA_COLOR_EXT
+#define OTG5_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_BLUE_CB_EXT__SHIFT 0x0
+#define OTG5_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_GREEN_Y_EXT__SHIFT 0x8
+#define OTG5_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_RED_CR_EXT__SHIFT 0x10
+#define OTG5_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_BLUE_CB_EXT_MASK 0x0000003FL
+#define OTG5_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_GREEN_Y_EXT_MASK 0x00003F00L
+#define OTG5_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_RED_CR_EXT_MASK 0x003F0000L
+//OTG5_OTG_BLACK_COLOR
+#define OTG5_OTG_BLACK_COLOR__OTG_BLACK_COLOR_B_CB__SHIFT 0x0
+#define OTG5_OTG_BLACK_COLOR__OTG_BLACK_COLOR_G_Y__SHIFT 0xa
+#define OTG5_OTG_BLACK_COLOR__OTG_BLACK_COLOR_R_CR__SHIFT 0x14
+#define OTG5_OTG_BLACK_COLOR__OTG_BLACK_COLOR_B_CB_MASK 0x000003FFL
+#define OTG5_OTG_BLACK_COLOR__OTG_BLACK_COLOR_G_Y_MASK 0x000FFC00L
+#define OTG5_OTG_BLACK_COLOR__OTG_BLACK_COLOR_R_CR_MASK 0x3FF00000L
+//OTG5_OTG_BLACK_COLOR_EXT
+#define OTG5_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_B_CB_EXT__SHIFT 0x0
+#define OTG5_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_G_Y_EXT__SHIFT 0x8
+#define OTG5_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_R_CR_EXT__SHIFT 0x10
+#define OTG5_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_B_CB_EXT_MASK 0x0000003FL
+#define OTG5_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_G_Y_EXT_MASK 0x00003F00L
+#define OTG5_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_R_CR_EXT_MASK 0x003F0000L
+//OTG5_OTG_VERTICAL_INTERRUPT0_POSITION
+#define OTG5_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_START__SHIFT 0x0
+#define OTG5_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_END__SHIFT 0x10
+#define OTG5_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_START_MASK 0x00007FFFL
+#define OTG5_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_END_MASK 0x7FFF0000L
+//OTG5_OTG_VERTICAL_INTERRUPT0_CONTROL
+#define OTG5_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_OUTPUT_POLARITY__SHIFT 0x4
+#define OTG5_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_ENABLE__SHIFT 0x8
+#define OTG5_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_STATUS__SHIFT 0xc
+#define OTG5_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_STATUS__SHIFT 0x10
+#define OTG5_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_CLEAR__SHIFT 0x14
+#define OTG5_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_TYPE__SHIFT 0x18
+#define OTG5_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_OUTPUT_POLARITY_MASK 0x00000010L
+#define OTG5_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_ENABLE_MASK 0x00000100L
+#define OTG5_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_STATUS_MASK 0x00001000L
+#define OTG5_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_STATUS_MASK 0x00010000L
+#define OTG5_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_CLEAR_MASK 0x00100000L
+#define OTG5_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_TYPE_MASK 0x01000000L
+//OTG5_OTG_VERTICAL_INTERRUPT1_POSITION
+#define OTG5_OTG_VERTICAL_INTERRUPT1_POSITION__OTG_VERTICAL_INTERRUPT1_LINE_START__SHIFT 0x0
+#define OTG5_OTG_VERTICAL_INTERRUPT1_POSITION__OTG_VERTICAL_INTERRUPT1_LINE_START_MASK 0x00007FFFL
+//OTG5_OTG_VERTICAL_INTERRUPT1_CONTROL
+#define OTG5_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_ENABLE__SHIFT 0x8
+#define OTG5_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_STATUS__SHIFT 0xc
+#define OTG5_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_STATUS__SHIFT 0x10
+#define OTG5_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_CLEAR__SHIFT 0x14
+#define OTG5_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_TYPE__SHIFT 0x18
+#define OTG5_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_ENABLE_MASK 0x00000100L
+#define OTG5_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_STATUS_MASK 0x00001000L
+#define OTG5_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_STATUS_MASK 0x00010000L
+#define OTG5_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_CLEAR_MASK 0x00100000L
+#define OTG5_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_TYPE_MASK 0x01000000L
+//OTG5_OTG_VERTICAL_INTERRUPT2_POSITION
+#define OTG5_OTG_VERTICAL_INTERRUPT2_POSITION__OTG_VERTICAL_INTERRUPT2_LINE_START__SHIFT 0x0
+#define OTG5_OTG_VERTICAL_INTERRUPT2_POSITION__OTG_VERTICAL_INTERRUPT2_LINE_START_MASK 0x00007FFFL
+//OTG5_OTG_VERTICAL_INTERRUPT2_CONTROL
+#define OTG5_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_ENABLE__SHIFT 0x8
+#define OTG5_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_STATUS__SHIFT 0xc
+#define OTG5_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_STATUS__SHIFT 0x10
+#define OTG5_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_CLEAR__SHIFT 0x14
+#define OTG5_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_TYPE__SHIFT 0x18
+#define OTG5_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_ENABLE_MASK 0x00000100L
+#define OTG5_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_STATUS_MASK 0x00001000L
+#define OTG5_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_STATUS_MASK 0x00010000L
+#define OTG5_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_CLEAR_MASK 0x00100000L
+#define OTG5_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_TYPE_MASK 0x01000000L
+//OTG5_OTG_CRC_CNTL
+#define OTG5_OTG_CRC_CNTL__OTG_CRC_EN__SHIFT 0x0
+#define OTG5_OTG_CRC_CNTL__OTG_CRC_DUAL_LINK_EN__SHIFT 0x1
+#define OTG5_OTG_CRC_CNTL__OTG_CRC_DUAL_LINK_MODE__SHIFT 0x2
+#define OTG5_OTG_CRC_CNTL__OTG_CRC_BLANK_ONLY__SHIFT 0x3
+#define OTG5_OTG_CRC_CNTL__OTG_CRC_CONT_EN__SHIFT 0x4
+#define OTG5_OTG_CRC_CNTL__OTG_CRC_CAPTURE_START_SEL__SHIFT 0x5
+#define OTG5_OTG_CRC_CNTL__OTG_CRC_STEREO_MODE__SHIFT 0x8
+#define OTG5_OTG_CRC_CNTL__OTG_CRC_INTERLACE_MODE__SHIFT 0xc
+#define OTG5_OTG_CRC_CNTL__OTG_CRC_USE_NEW_AND_REPEATED_PIXELS__SHIFT 0x13
+#define OTG5_OTG_CRC_CNTL__OTG_CRC0_SELECT__SHIFT 0x14
+#define OTG5_OTG_CRC_CNTL__OTG_CRC1_SELECT__SHIFT 0x18
+#define OTG5_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC0_PENDING__SHIFT 0x1c
+#define OTG5_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC1_PENDING__SHIFT 0x1d
+#define OTG5_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC2_PENDING__SHIFT 0x1e
+#define OTG5_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC3_PENDING__SHIFT 0x1f
+#define OTG5_OTG_CRC_CNTL__OTG_CRC_EN_MASK 0x00000001L
+#define OTG5_OTG_CRC_CNTL__OTG_CRC_DUAL_LINK_EN_MASK 0x00000002L
+#define OTG5_OTG_CRC_CNTL__OTG_CRC_DUAL_LINK_MODE_MASK 0x00000004L
+#define OTG5_OTG_CRC_CNTL__OTG_CRC_BLANK_ONLY_MASK 0x00000008L
+#define OTG5_OTG_CRC_CNTL__OTG_CRC_CONT_EN_MASK 0x00000010L
+#define OTG5_OTG_CRC_CNTL__OTG_CRC_CAPTURE_START_SEL_MASK 0x00000060L
+#define OTG5_OTG_CRC_CNTL__OTG_CRC_STEREO_MODE_MASK 0x00000300L
+#define OTG5_OTG_CRC_CNTL__OTG_CRC_INTERLACE_MODE_MASK 0x00003000L
+#define OTG5_OTG_CRC_CNTL__OTG_CRC_USE_NEW_AND_REPEATED_PIXELS_MASK 0x00080000L
+#define OTG5_OTG_CRC_CNTL__OTG_CRC0_SELECT_MASK 0x00700000L
+#define OTG5_OTG_CRC_CNTL__OTG_CRC1_SELECT_MASK 0x07000000L
+#define OTG5_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC0_PENDING_MASK 0x10000000L
+#define OTG5_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC1_PENDING_MASK 0x20000000L
+#define OTG5_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC2_PENDING_MASK 0x40000000L
+#define OTG5_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC3_PENDING_MASK 0x80000000L
+//OTG5_OTG_CRC_CNTL2
+#define OTG5_OTG_CRC_CNTL2__OTG_CRC_DSC_MODE__SHIFT 0x0
+#define OTG5_OTG_CRC_CNTL2__OTG_CRC_DATA_STREAM_COMBINE_MODE__SHIFT 0x1
+#define OTG5_OTG_CRC_CNTL2__OTG_CRC_DATA_STREAM_SPLIT_MODE__SHIFT 0x4
+#define OTG5_OTG_CRC_CNTL2__OTG_CRC_DATA_FORMAT__SHIFT 0x8
+#define OTG5_OTG_CRC_CNTL2__OTG_CRC_DSC_MODE_MASK 0x00000001L
+#define OTG5_OTG_CRC_CNTL2__OTG_CRC_DATA_STREAM_COMBINE_MODE_MASK 0x00000002L
+#define OTG5_OTG_CRC_CNTL2__OTG_CRC_DATA_STREAM_SPLIT_MODE_MASK 0x00000030L
+#define OTG5_OTG_CRC_CNTL2__OTG_CRC_DATA_FORMAT_MASK 0x00000300L
+//OTG5_OTG_CRC0_WINDOWA_X_CONTROL
+#define OTG5_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_START__SHIFT 0x0
+#define OTG5_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_END__SHIFT 0x10
+#define OTG5_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_START_MASK 0x00007FFFL
+#define OTG5_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_END_MASK 0x7FFF0000L
+//OTG5_OTG_CRC0_WINDOWA_Y_CONTROL
+#define OTG5_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_START__SHIFT 0x0
+#define OTG5_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_END__SHIFT 0x10
+#define OTG5_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_START_MASK 0x00007FFFL
+#define OTG5_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_END_MASK 0x7FFF0000L
+//OTG5_OTG_CRC0_WINDOWB_X_CONTROL
+#define OTG5_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_START__SHIFT 0x0
+#define OTG5_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_END__SHIFT 0x10
+#define OTG5_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_START_MASK 0x00007FFFL
+#define OTG5_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_END_MASK 0x7FFF0000L
+//OTG5_OTG_CRC0_WINDOWB_Y_CONTROL
+#define OTG5_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_START__SHIFT 0x0
+#define OTG5_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_END__SHIFT 0x10
+#define OTG5_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_START_MASK 0x00007FFFL
+#define OTG5_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_END_MASK 0x7FFF0000L
+//OTG5_OTG_CRC0_DATA_RG
+#define OTG5_OTG_CRC0_DATA_RG__CRC0_R_CR__SHIFT 0x0
+#define OTG5_OTG_CRC0_DATA_RG__CRC0_G_Y__SHIFT 0x10
+#define OTG5_OTG_CRC0_DATA_RG__CRC0_R_CR_MASK 0x0000FFFFL
+#define OTG5_OTG_CRC0_DATA_RG__CRC0_G_Y_MASK 0xFFFF0000L
+//OTG5_OTG_CRC0_DATA_B
+#define OTG5_OTG_CRC0_DATA_B__CRC0_B_CB__SHIFT 0x0
+#define OTG5_OTG_CRC0_DATA_B__CRC0_C__SHIFT 0x10
+#define OTG5_OTG_CRC0_DATA_B__CRC0_B_CB_MASK 0x0000FFFFL
+#define OTG5_OTG_CRC0_DATA_B__CRC0_C_MASK 0xFFFF0000L
+//OTG5_OTG_CRC1_WINDOWA_X_CONTROL
+#define OTG5_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_START__SHIFT 0x0
+#define OTG5_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_END__SHIFT 0x10
+#define OTG5_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_START_MASK 0x00007FFFL
+#define OTG5_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_END_MASK 0x7FFF0000L
+//OTG5_OTG_CRC1_WINDOWA_Y_CONTROL
+#define OTG5_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_START__SHIFT 0x0
+#define OTG5_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_END__SHIFT 0x10
+#define OTG5_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_START_MASK 0x00007FFFL
+#define OTG5_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_END_MASK 0x7FFF0000L
+//OTG5_OTG_CRC1_WINDOWB_X_CONTROL
+#define OTG5_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_START__SHIFT 0x0
+#define OTG5_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_END__SHIFT 0x10
+#define OTG5_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_START_MASK 0x00007FFFL
+#define OTG5_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_END_MASK 0x7FFF0000L
+//OTG5_OTG_CRC1_WINDOWB_Y_CONTROL
+#define OTG5_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_START__SHIFT 0x0
+#define OTG5_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_END__SHIFT 0x10
+#define OTG5_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_START_MASK 0x00007FFFL
+#define OTG5_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_END_MASK 0x7FFF0000L
+//OTG5_OTG_CRC1_DATA_RG
+#define OTG5_OTG_CRC1_DATA_RG__CRC1_R_CR__SHIFT 0x0
+#define OTG5_OTG_CRC1_DATA_RG__CRC1_G_Y__SHIFT 0x10
+#define OTG5_OTG_CRC1_DATA_RG__CRC1_R_CR_MASK 0x0000FFFFL
+#define OTG5_OTG_CRC1_DATA_RG__CRC1_G_Y_MASK 0xFFFF0000L
+//OTG5_OTG_CRC1_DATA_B
+#define OTG5_OTG_CRC1_DATA_B__CRC1_B_CB__SHIFT 0x0
+#define OTG5_OTG_CRC1_DATA_B__CRC1_C__SHIFT 0x10
+#define OTG5_OTG_CRC1_DATA_B__CRC1_B_CB_MASK 0x0000FFFFL
+#define OTG5_OTG_CRC1_DATA_B__CRC1_C_MASK 0xFFFF0000L
+//OTG5_OTG_CRC2_DATA_RG
+#define OTG5_OTG_CRC2_DATA_RG__CRC2_R_CR__SHIFT 0x0
+#define OTG5_OTG_CRC2_DATA_RG__CRC2_G_Y__SHIFT 0x10
+#define OTG5_OTG_CRC2_DATA_RG__CRC2_R_CR_MASK 0x0000FFFFL
+#define OTG5_OTG_CRC2_DATA_RG__CRC2_G_Y_MASK 0xFFFF0000L
+//OTG5_OTG_CRC2_DATA_B
+#define OTG5_OTG_CRC2_DATA_B__CRC2_B_CB__SHIFT 0x0
+#define OTG5_OTG_CRC2_DATA_B__CRC2_C__SHIFT 0x10
+#define OTG5_OTG_CRC2_DATA_B__CRC2_B_CB_MASK 0x0000FFFFL
+#define OTG5_OTG_CRC2_DATA_B__CRC2_C_MASK 0xFFFF0000L
+//OTG5_OTG_CRC3_DATA_RG
+#define OTG5_OTG_CRC3_DATA_RG__CRC3_R_CR__SHIFT 0x0
+#define OTG5_OTG_CRC3_DATA_RG__CRC3_G_Y__SHIFT 0x10
+#define OTG5_OTG_CRC3_DATA_RG__CRC3_R_CR_MASK 0x0000FFFFL
+#define OTG5_OTG_CRC3_DATA_RG__CRC3_G_Y_MASK 0xFFFF0000L
+//OTG5_OTG_CRC3_DATA_B
+#define OTG5_OTG_CRC3_DATA_B__CRC3_B_CB__SHIFT 0x0
+#define OTG5_OTG_CRC3_DATA_B__CRC3_C__SHIFT 0x10
+#define OTG5_OTG_CRC3_DATA_B__CRC3_B_CB_MASK 0x0000FFFFL
+#define OTG5_OTG_CRC3_DATA_B__CRC3_C_MASK 0xFFFF0000L
+//OTG5_OTG_CRC_SIG_RED_GREEN_MASK
+#define OTG5_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_RED_MASK__SHIFT 0x0
+#define OTG5_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_GREEN_MASK__SHIFT 0x10
+#define OTG5_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_RED_MASK_MASK 0x0000FFFFL
+#define OTG5_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_GREEN_MASK_MASK 0xFFFF0000L
+//OTG5_OTG_CRC_SIG_BLUE_CONTROL_MASK
+#define OTG5_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_BLUE_MASK__SHIFT 0x0
+#define OTG5_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_CONTROL_MASK__SHIFT 0x10
+#define OTG5_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_BLUE_MASK_MASK 0x0000FFFFL
+#define OTG5_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_CONTROL_MASK_MASK 0xFFFF0000L
+//OTG5_OTG_STATIC_SCREEN_CONTROL
+#define OTG5_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_EVENT_MASK__SHIFT 0x0
+#define OTG5_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_FRAME_COUNT__SHIFT 0x10
+#define OTG5_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_ENABLE__SHIFT 0x18
+#define OTG5_OTG_STATIC_SCREEN_CONTROL__OTG_SS_STATUS__SHIFT 0x19
+#define OTG5_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_STATUS__SHIFT 0x1a
+#define OTG5_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_CLEAR__SHIFT 0x1b
+#define OTG5_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_TYPE__SHIFT 0x1c
+#define OTG5_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE__SHIFT 0x1e
+#define OTG5_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_VALUE__SHIFT 0x1f
+#define OTG5_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_EVENT_MASK_MASK 0x0000FFFFL
+#define OTG5_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_FRAME_COUNT_MASK 0x00FF0000L
+#define OTG5_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_ENABLE_MASK 0x01000000L
+#define OTG5_OTG_STATIC_SCREEN_CONTROL__OTG_SS_STATUS_MASK 0x02000000L
+#define OTG5_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_STATUS_MASK 0x04000000L
+#define OTG5_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_CLEAR_MASK 0x08000000L
+#define OTG5_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_TYPE_MASK 0x10000000L
+#define OTG5_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_MASK 0x40000000L
+#define OTG5_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_VALUE_MASK 0x80000000L
+//OTG5_OTG_3D_STRUCTURE_CONTROL
+#define OTG5_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_EN__SHIFT 0x0
+#define OTG5_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_V_UPDATE_MODE__SHIFT 0x8
+#define OTG5_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_STEREO_SEL_OVR__SHIFT 0xc
+#define OTG5_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET__SHIFT 0x10
+#define OTG5_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_PENDING__SHIFT 0x11
+#define OTG5_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT__SHIFT 0x12
+#define OTG5_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_EN_MASK 0x00000001L
+#define OTG5_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_V_UPDATE_MODE_MASK 0x00000300L
+#define OTG5_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_STEREO_SEL_OVR_MASK 0x00001000L
+#define OTG5_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_MASK 0x00010000L
+#define OTG5_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_PENDING_MASK 0x00020000L
+#define OTG5_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_MASK 0x000C0000L
+//OTG5_OTG_GSL_VSYNC_GAP
+#define OTG5_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_LIMIT__SHIFT 0x0
+#define OTG5_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_DELAY__SHIFT 0x8
+#define OTG5_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_SOURCE_SEL__SHIFT 0x10
+#define OTG5_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MODE__SHIFT 0x11
+#define OTG5_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_CLEAR__SHIFT 0x13
+#define OTG5_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_OCCURRED__SHIFT 0x14
+#define OTG5_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASTER_FASTER__SHIFT 0x17
+#define OTG5_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP__SHIFT 0x18
+#define OTG5_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_LIMIT_MASK 0x000000FFL
+#define OTG5_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_DELAY_MASK 0x0000FF00L
+#define OTG5_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_SOURCE_SEL_MASK 0x00010000L
+#define OTG5_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MODE_MASK 0x00060000L
+#define OTG5_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_CLEAR_MASK 0x00080000L
+#define OTG5_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_OCCURRED_MASK 0x00100000L
+#define OTG5_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASTER_FASTER_MASK 0x00800000L
+#define OTG5_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASK 0xFF000000L
+//OTG5_OTG_MASTER_UPDATE_MODE
+#define OTG5_OTG_MASTER_UPDATE_MODE__MASTER_UPDATE_INTERLACED_MODE__SHIFT 0x0
+#define OTG5_OTG_MASTER_UPDATE_MODE__MASTER_UPDATE_INTERLACED_MODE_MASK 0x00000003L
+//OTG5_OTG_CLOCK_CONTROL
+#define OTG5_OTG_CLOCK_CONTROL__OTG_CLOCK_EN__SHIFT 0x0
+#define OTG5_OTG_CLOCK_CONTROL__OTG_CLOCK_GATE_DIS__SHIFT 0x1
+#define OTG5_OTG_CLOCK_CONTROL__OTG_SOFT_RESET__SHIFT 0x4
+#define OTG5_OTG_CLOCK_CONTROL__OTG_CLOCK_ON__SHIFT 0x8
+#define OTG5_OTG_CLOCK_CONTROL__OTG_BUSY__SHIFT 0x10
+#define OTG5_OTG_CLOCK_CONTROL__OTG_CLOCK_EN_MASK 0x00000001L
+#define OTG5_OTG_CLOCK_CONTROL__OTG_CLOCK_GATE_DIS_MASK 0x00000002L
+#define OTG5_OTG_CLOCK_CONTROL__OTG_SOFT_RESET_MASK 0x00000010L
+#define OTG5_OTG_CLOCK_CONTROL__OTG_CLOCK_ON_MASK 0x00000100L
+#define OTG5_OTG_CLOCK_CONTROL__OTG_BUSY_MASK 0x00010000L
+//OTG5_OTG_VSTARTUP_PARAM
+#define OTG5_OTG_VSTARTUP_PARAM__VSTARTUP_START__SHIFT 0x0
+#define OTG5_OTG_VSTARTUP_PARAM__VSTARTUP_START_MASK 0x000003FFL
+//OTG5_OTG_VUPDATE_PARAM
+#define OTG5_OTG_VUPDATE_PARAM__VUPDATE_OFFSET__SHIFT 0x0
+#define OTG5_OTG_VUPDATE_PARAM__VUPDATE_WIDTH__SHIFT 0x10
+#define OTG5_OTG_VUPDATE_PARAM__VUPDATE_OFFSET_MASK 0x0000FFFFL
+#define OTG5_OTG_VUPDATE_PARAM__VUPDATE_WIDTH_MASK 0x03FF0000L
+//OTG5_OTG_VREADY_PARAM
+#define OTG5_OTG_VREADY_PARAM__VREADY_OFFSET__SHIFT 0x0
+#define OTG5_OTG_VREADY_PARAM__VREADY_OFFSET_MASK 0x0000FFFFL
+//OTG5_OTG_GLOBAL_SYNC_STATUS
+#define OTG5_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_EN__SHIFT 0x0
+#define OTG5_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_TYPE__SHIFT 0x1
+#define OTG5_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_OCCURRED__SHIFT 0x2
+#define OTG5_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_STATUS__SHIFT 0x3
+#define OTG5_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_CLEAR__SHIFT 0x4
+#define OTG5_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_EN__SHIFT 0x5
+#define OTG5_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_TYPE__SHIFT 0x6
+#define OTG5_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_POSITION_SEL__SHIFT 0x7
+#define OTG5_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_OCCURRED__SHIFT 0x8
+#define OTG5_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_STATUS__SHIFT 0x9
+#define OTG5_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_CLEAR__SHIFT 0xa
+#define OTG5_OTG_GLOBAL_SYNC_STATUS__VUPDATE_STATUS__SHIFT 0xb
+#define OTG5_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_EN__SHIFT 0xc
+#define OTG5_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_TYPE__SHIFT 0xd
+#define OTG5_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_OCCURRED__SHIFT 0xe
+#define OTG5_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_STATUS__SHIFT 0xf
+#define OTG5_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_CLEAR__SHIFT 0x10
+#define OTG5_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_STATUS__SHIFT 0x11
+#define OTG5_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_EN__SHIFT 0x12
+#define OTG5_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_TYPE__SHIFT 0x13
+#define OTG5_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_OCCURRED__SHIFT 0x14
+#define OTG5_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_STATUS__SHIFT 0x15
+#define OTG5_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_CLEAR__SHIFT 0x16
+#define OTG5_OTG_GLOBAL_SYNC_STATUS__STEREO_SELECT_STATUS__SHIFT 0x18
+#define OTG5_OTG_GLOBAL_SYNC_STATUS__FIELD_NUMBER_STATUS__SHIFT 0x19
+#define OTG5_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_EN_MASK 0x00000001L
+#define OTG5_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_TYPE_MASK 0x00000002L
+#define OTG5_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_OCCURRED_MASK 0x00000004L
+#define OTG5_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_STATUS_MASK 0x00000008L
+#define OTG5_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_CLEAR_MASK 0x00000010L
+#define OTG5_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_EN_MASK 0x00000020L
+#define OTG5_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_TYPE_MASK 0x00000040L
+#define OTG5_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_POSITION_SEL_MASK 0x00000080L
+#define OTG5_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_OCCURRED_MASK 0x00000100L
+#define OTG5_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_STATUS_MASK 0x00000200L
+#define OTG5_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_CLEAR_MASK 0x00000400L
+#define OTG5_OTG_GLOBAL_SYNC_STATUS__VUPDATE_STATUS_MASK 0x00000800L
+#define OTG5_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_EN_MASK 0x00001000L
+#define OTG5_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_TYPE_MASK 0x00002000L
+#define OTG5_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_OCCURRED_MASK 0x00004000L
+#define OTG5_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_STATUS_MASK 0x00008000L
+#define OTG5_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_CLEAR_MASK 0x00010000L
+#define OTG5_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_STATUS_MASK 0x00020000L
+#define OTG5_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_EN_MASK 0x00040000L
+#define OTG5_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_TYPE_MASK 0x00080000L
+#define OTG5_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_OCCURRED_MASK 0x00100000L
+#define OTG5_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_STATUS_MASK 0x00200000L
+#define OTG5_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_CLEAR_MASK 0x00400000L
+#define OTG5_OTG_GLOBAL_SYNC_STATUS__STEREO_SELECT_STATUS_MASK 0x01000000L
+#define OTG5_OTG_GLOBAL_SYNC_STATUS__FIELD_NUMBER_STATUS_MASK 0x02000000L
+//OTG5_OTG_MASTER_UPDATE_LOCK
+#define OTG5_OTG_MASTER_UPDATE_LOCK__OTG_MASTER_UPDATE_LOCK__SHIFT 0x0
+#define OTG5_OTG_MASTER_UPDATE_LOCK__UPDATE_LOCK_STATUS__SHIFT 0x8
+#define OTG5_OTG_MASTER_UPDATE_LOCK__OTG_MASTER_UPDATE_LOCK_MASK 0x00000001L
+#define OTG5_OTG_MASTER_UPDATE_LOCK__UPDATE_LOCK_STATUS_MASK 0x00000100L
+//OTG5_OTG_GSL_CONTROL
+#define OTG5_OTG_GSL_CONTROL__OTG_GSL0_EN__SHIFT 0x0
+#define OTG5_OTG_GSL_CONTROL__OTG_GSL1_EN__SHIFT 0x1
+#define OTG5_OTG_GSL_CONTROL__OTG_GSL2_EN__SHIFT 0x2
+#define OTG5_OTG_GSL_CONTROL__OTG_GSL_MASTER_EN__SHIFT 0x3
+#define OTG5_OTG_GSL_CONTROL__OTG_GSL_MASTER_MODE__SHIFT 0x4
+#define OTG5_OTG_GSL_CONTROL__OTG_GSL_CHECK_DELAY__SHIFT 0x8
+#define OTG5_OTG_GSL_CONTROL__OTG_GSL_FORCE_DELAY__SHIFT 0x10
+#define OTG5_OTG_GSL_CONTROL__OTG_GSL_CHECK_ALL_FIELDS__SHIFT 0x1c
+#define OTG5_OTG_GSL_CONTROL__OTG_MASTER_UPDATE_LOCK_GSL_EN__SHIFT 0x1f
+#define OTG5_OTG_GSL_CONTROL__OTG_GSL0_EN_MASK 0x00000001L
+#define OTG5_OTG_GSL_CONTROL__OTG_GSL1_EN_MASK 0x00000002L
+#define OTG5_OTG_GSL_CONTROL__OTG_GSL2_EN_MASK 0x00000004L
+#define OTG5_OTG_GSL_CONTROL__OTG_GSL_MASTER_EN_MASK 0x00000008L
+#define OTG5_OTG_GSL_CONTROL__OTG_GSL_MASTER_MODE_MASK 0x00000030L
+#define OTG5_OTG_GSL_CONTROL__OTG_GSL_CHECK_DELAY_MASK 0x00000F00L
+#define OTG5_OTG_GSL_CONTROL__OTG_GSL_FORCE_DELAY_MASK 0x001F0000L
+#define OTG5_OTG_GSL_CONTROL__OTG_GSL_CHECK_ALL_FIELDS_MASK 0x10000000L
+#define OTG5_OTG_GSL_CONTROL__OTG_MASTER_UPDATE_LOCK_GSL_EN_MASK 0x80000000L
+//OTG5_OTG_GSL_WINDOW_X
+#define OTG5_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_START_X__SHIFT 0x0
+#define OTG5_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_END_X__SHIFT 0x10
+#define OTG5_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_START_X_MASK 0x00007FFFL
+#define OTG5_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_END_X_MASK 0x7FFF0000L
+//OTG5_OTG_GSL_WINDOW_Y
+#define OTG5_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_START_Y__SHIFT 0x0
+#define OTG5_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_END_Y__SHIFT 0x10
+#define OTG5_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_START_Y_MASK 0x00007FFFL
+#define OTG5_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_END_Y_MASK 0x7FFF0000L
+//OTG5_OTG_VUPDATE_KEEPOUT
+#define OTG5_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET__SHIFT 0x0
+#define OTG5_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET__SHIFT 0x10
+#define OTG5_OTG_VUPDATE_KEEPOUT__OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN__SHIFT 0x1f
+#define OTG5_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET_MASK 0x0000FFFFL
+#define OTG5_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET_MASK 0x03FF0000L
+#define OTG5_OTG_VUPDATE_KEEPOUT__OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN_MASK 0x80000000L
+//OTG5_OTG_GLOBAL_CONTROL0
+#define OTG5_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_HTOTAL_KEEPOUT__SHIFT 0x0
+#define OTG5_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_HTOTAL_KEEPOUT_EN__SHIFT 0x8
+#define OTG5_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_SEL__SHIFT 0x19
+#define OTG5_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_HTOTAL_KEEPOUT_MASK 0x000000FFL
+#define OTG5_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_HTOTAL_KEEPOUT_EN_MASK 0x00000100L
+#define OTG5_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_SEL_MASK 0x0E000000L
+//OTG5_OTG_GLOBAL_CONTROL1
+#define OTG5_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_X__SHIFT 0x0
+#define OTG5_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_Y__SHIFT 0x10
+#define OTG5_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_EN__SHIFT 0x1f
+#define OTG5_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_X_MASK 0x00007FFFL
+#define OTG5_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_Y_MASK 0x7FFF0000L
+#define OTG5_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_EN_MASK 0x80000000L
+//OTG5_OTG_GLOBAL_CONTROL2
+#define OTG5_OTG_GLOBAL_CONTROL2__DIG_UPDATE_LOCATION__SHIFT 0x0
+#define OTG5_OTG_GLOBAL_CONTROL2__GLOBAL_UPDATE_LOCK_EN__SHIFT 0xa
+#define OTG5_OTG_GLOBAL_CONTROL2__MANUAL_FLOW_CONTROL_SEL__SHIFT 0x10
+#define OTG5_OTG_GLOBAL_CONTROL2__MASTER_UPDATE_LOCK_WINDOW_SEL__SHIFT 0x1d
+#define OTG5_OTG_GLOBAL_CONTROL2__OTG_VUPDATE_BLOCK_DISABLE__SHIFT 0x1e
+#define OTG5_OTG_GLOBAL_CONTROL2__DCCG_VUPDATE_MODE__SHIFT 0x1f
+#define OTG5_OTG_GLOBAL_CONTROL2__DIG_UPDATE_LOCATION_MASK 0x000003FFL
+#define OTG5_OTG_GLOBAL_CONTROL2__GLOBAL_UPDATE_LOCK_EN_MASK 0x00000400L
+#define OTG5_OTG_GLOBAL_CONTROL2__MANUAL_FLOW_CONTROL_SEL_MASK 0x00070000L
+#define OTG5_OTG_GLOBAL_CONTROL2__MASTER_UPDATE_LOCK_WINDOW_SEL_MASK 0x20000000L
+#define OTG5_OTG_GLOBAL_CONTROL2__OTG_VUPDATE_BLOCK_DISABLE_MASK 0x40000000L
+#define OTG5_OTG_GLOBAL_CONTROL2__DCCG_VUPDATE_MODE_MASK 0x80000000L
+//OTG5_OTG_GLOBAL_CONTROL3
+#define OTG5_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD__SHIFT 0x0
+#define OTG5_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_STEREO_SEL__SHIFT 0x4
+#define OTG5_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD_STEREO_FLAG_SEL__SHIFT 0x8
+#define OTG5_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD_MASK 0x00000003L
+#define OTG5_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_STEREO_SEL_MASK 0x00000030L
+#define OTG5_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD_STEREO_FLAG_SEL_MASK 0x00000100L
+//OTG5_OTG_TRIG_MANUAL_CONTROL
+#define OTG5_OTG_TRIG_MANUAL_CONTROL__TRIG_MANUAL_CONTROL__SHIFT 0x0
+#define OTG5_OTG_TRIG_MANUAL_CONTROL__TRIG_MANUAL_CONTROL_MASK 0x00000001L
+//OTG5_OTG_MANUAL_FLOW_CONTROL
+#define OTG5_OTG_MANUAL_FLOW_CONTROL__MANUAL_FLOW_CONTROL__SHIFT 0x0
+#define OTG5_OTG_MANUAL_FLOW_CONTROL__MANUAL_FLOW_CONTROL_MASK 0x00000001L
+//OTG5_OTG_RANGE_TIMING_INT_STATUS
+#define OTG5_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED__SHIFT 0x0
+#define OTG5_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT__SHIFT 0x4
+#define OTG5_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_CLEAR__SHIFT 0x8
+#define OTG5_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT_MSK__SHIFT 0xc
+#define OTG5_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT_TYPE__SHIFT 0x10
+#define OTG5_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_MASK 0x00000001L
+#define OTG5_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT_MASK 0x00000010L
+#define OTG5_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_CLEAR_MASK 0x00000100L
+#define OTG5_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT_MSK_MASK 0x00001000L
+#define OTG5_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT_TYPE_MASK 0x00010000L
+//OTG5_OTG_DRR_CONTROL
+#define OTG5_OTG_DRR_CONTROL__OTG_DRR_AVERAGE_FRAME__SHIFT 0x0
+#define OTG5_OTG_DRR_CONTROL__OTG_V_TOTAL_LAST_USED_BY_DRR__SHIFT 0x10
+#define OTG5_OTG_DRR_CONTROL__OTG_DRR_AVERAGE_FRAME_MASK 0x00000007L
+#define OTG5_OTG_DRR_CONTROL__OTG_V_TOTAL_LAST_USED_BY_DRR_MASK 0x7FFF0000L
+//OTG5_OTG_REQUEST_CONTROL
+#define OTG5_OTG_REQUEST_CONTROL__OTG_REQUEST_MODE_FOR_H_DUPLICATE__SHIFT 0x0
+#define OTG5_OTG_REQUEST_CONTROL__OTG_REQUEST_MODE_FOR_H_DUPLICATE_MASK 0x00000001L
+//OTG5_OTG_DSC_START_POSITION
+#define OTG5_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_X__SHIFT 0x0
+#define OTG5_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_LINE_NUM__SHIFT 0x10
+#define OTG5_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_X_MASK 0x00007FFFL
+#define OTG5_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_LINE_NUM_MASK 0x03FF0000L
+//OTG5_OTG_PIPE_UPDATE_STATUS
+#define OTG5_OTG_PIPE_UPDATE_STATUS__OTG_FLIP_PENDING__SHIFT 0x0
+#define OTG5_OTG_PIPE_UPDATE_STATUS__OTG_FLIP_TAKEN__SHIFT 0x1
+#define OTG5_OTG_PIPE_UPDATE_STATUS__OTG_FLIP_TAKEN_CLEAR__SHIFT 0x2
+#define OTG5_OTG_PIPE_UPDATE_STATUS__OTG_DC_REG_UPDATE_PENDING__SHIFT 0x4
+#define OTG5_OTG_PIPE_UPDATE_STATUS__OTG_DC_REG_UPDATE_TAKEN__SHIFT 0x5
+#define OTG5_OTG_PIPE_UPDATE_STATUS__OTG_DC_REG_UPDATE_TAKEN_CLEAR__SHIFT 0x6
+#define OTG5_OTG_PIPE_UPDATE_STATUS__OTG_CURSOR_UPDATE_PENDING__SHIFT 0x8
+#define OTG5_OTG_PIPE_UPDATE_STATUS__OTG_CURSOR_UPDATE_TAKEN__SHIFT 0x9
+#define OTG5_OTG_PIPE_UPDATE_STATUS__OTG_CURSOR_UPDATE_TAKEN_CLEAR__SHIFT 0xa
+#define OTG5_OTG_PIPE_UPDATE_STATUS__OTG_VUPDATE_KEEPOUT_STATUS__SHIFT 0x10
+#define OTG5_OTG_PIPE_UPDATE_STATUS__OTG_FLIP_PENDING_MASK 0x00000001L
+#define OTG5_OTG_PIPE_UPDATE_STATUS__OTG_FLIP_TAKEN_MASK 0x00000002L
+#define OTG5_OTG_PIPE_UPDATE_STATUS__OTG_FLIP_TAKEN_CLEAR_MASK 0x00000004L
+#define OTG5_OTG_PIPE_UPDATE_STATUS__OTG_DC_REG_UPDATE_PENDING_MASK 0x00000010L
+#define OTG5_OTG_PIPE_UPDATE_STATUS__OTG_DC_REG_UPDATE_TAKEN_MASK 0x00000020L
+#define OTG5_OTG_PIPE_UPDATE_STATUS__OTG_DC_REG_UPDATE_TAKEN_CLEAR_MASK 0x00000040L
+#define OTG5_OTG_PIPE_UPDATE_STATUS__OTG_CURSOR_UPDATE_PENDING_MASK 0x00000100L
+#define OTG5_OTG_PIPE_UPDATE_STATUS__OTG_CURSOR_UPDATE_TAKEN_MASK 0x00000200L
+#define OTG5_OTG_PIPE_UPDATE_STATUS__OTG_CURSOR_UPDATE_TAKEN_CLEAR_MASK 0x00000400L
+#define OTG5_OTG_PIPE_UPDATE_STATUS__OTG_VUPDATE_KEEPOUT_STATUS_MASK 0x00010000L
+//OTG5_OTG_SPARE_REGISTER
+#define OTG5_OTG_SPARE_REGISTER__OTG_SPARE_REG__SHIFT 0x0
+#define OTG5_OTG_SPARE_REGISTER__OTG_SPARE_REG_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_optc_optc_misc_dispdec
+//DWB_SOURCE_SELECT
+#define DWB_SOURCE_SELECT__OPTC_DWB0_SOURCE_SELECT__SHIFT 0x0
+#define DWB_SOURCE_SELECT__OPTC_DWB1_SOURCE_SELECT__SHIFT 0x3
+#define DWB_SOURCE_SELECT__OPTC_DWB2_SOURCE_SELECT__SHIFT 0x6
+#define DWB_SOURCE_SELECT__OPTC_DWB0_SOURCE_SELECT_MASK 0x00000007L
+#define DWB_SOURCE_SELECT__OPTC_DWB1_SOURCE_SELECT_MASK 0x00000038L
+#define DWB_SOURCE_SELECT__OPTC_DWB2_SOURCE_SELECT_MASK 0x000001C0L
+//GSL_SOURCE_SELECT
+#define GSL_SOURCE_SELECT__GSL0_READY_SOURCE_SEL__SHIFT 0x0
+#define GSL_SOURCE_SELECT__GSL1_READY_SOURCE_SEL__SHIFT 0x4
+#define GSL_SOURCE_SELECT__GSL2_READY_SOURCE_SEL__SHIFT 0x8
+#define GSL_SOURCE_SELECT__GSL_TIMING_SYNC_SEL__SHIFT 0x10
+#define GSL_SOURCE_SELECT__GSL0_READY_SOURCE_SEL_MASK 0x00000007L
+#define GSL_SOURCE_SELECT__GSL1_READY_SOURCE_SEL_MASK 0x00000070L
+#define GSL_SOURCE_SELECT__GSL2_READY_SOURCE_SEL_MASK 0x00000700L
+#define GSL_SOURCE_SELECT__GSL_TIMING_SYNC_SEL_MASK 0x00070000L
+//OPTC_CLOCK_CONTROL
+#define OPTC_CLOCK_CONTROL__OPTC_DISPCLK_R_GATE_DIS__SHIFT 0x0
+#define OPTC_CLOCK_CONTROL__OPTC_DISPCLK_R_CLOCK_ON__SHIFT 0x1
+#define OPTC_CLOCK_CONTROL__OPTC_TEST_CLK_SEL__SHIFT 0x8
+#define OPTC_CLOCK_CONTROL__OPTC_DISPCLK_R_GATE_DIS_MASK 0x00000001L
+#define OPTC_CLOCK_CONTROL__OPTC_DISPCLK_R_CLOCK_ON_MASK 0x00000002L
+#define OPTC_CLOCK_CONTROL__OPTC_TEST_CLK_SEL_MASK 0x00000F00L
+//ODM_MEM_PWR_CTRL
+#define ODM_MEM_PWR_CTRL__ODM_MEM0_PWR_FORCE__SHIFT 0x0
+#define ODM_MEM_PWR_CTRL__ODM_MEM0_PWR_DIS__SHIFT 0x2
+#define ODM_MEM_PWR_CTRL__ODM_MEM1_PWR_FORCE__SHIFT 0x4
+#define ODM_MEM_PWR_CTRL__ODM_MEM1_PWR_DIS__SHIFT 0x6
+#define ODM_MEM_PWR_CTRL__ODM_MEM2_PWR_FORCE__SHIFT 0x8
+#define ODM_MEM_PWR_CTRL__ODM_MEM2_PWR_DIS__SHIFT 0xa
+#define ODM_MEM_PWR_CTRL__ODM_MEM3_PWR_FORCE__SHIFT 0xc
+#define ODM_MEM_PWR_CTRL__ODM_MEM3_PWR_DIS__SHIFT 0xe
+#define ODM_MEM_PWR_CTRL__ODM_MEM4_PWR_FORCE__SHIFT 0x10
+#define ODM_MEM_PWR_CTRL__ODM_MEM4_PWR_DIS__SHIFT 0x12
+#define ODM_MEM_PWR_CTRL__ODM_MEM5_PWR_FORCE__SHIFT 0x14
+#define ODM_MEM_PWR_CTRL__ODM_MEM5_PWR_DIS__SHIFT 0x16
+#define ODM_MEM_PWR_CTRL__ODM_MEM6_PWR_FORCE__SHIFT 0x18
+#define ODM_MEM_PWR_CTRL__ODM_MEM6_PWR_DIS__SHIFT 0x1a
+#define ODM_MEM_PWR_CTRL__ODM_MEM7_PWR_FORCE__SHIFT 0x1c
+#define ODM_MEM_PWR_CTRL__ODM_MEM7_PWR_DIS__SHIFT 0x1e
+#define ODM_MEM_PWR_CTRL__ODM_MEM0_PWR_FORCE_MASK 0x00000003L
+#define ODM_MEM_PWR_CTRL__ODM_MEM0_PWR_DIS_MASK 0x00000004L
+#define ODM_MEM_PWR_CTRL__ODM_MEM1_PWR_FORCE_MASK 0x00000030L
+#define ODM_MEM_PWR_CTRL__ODM_MEM1_PWR_DIS_MASK 0x00000040L
+#define ODM_MEM_PWR_CTRL__ODM_MEM2_PWR_FORCE_MASK 0x00000300L
+#define ODM_MEM_PWR_CTRL__ODM_MEM2_PWR_DIS_MASK 0x00000400L
+#define ODM_MEM_PWR_CTRL__ODM_MEM3_PWR_FORCE_MASK 0x00003000L
+#define ODM_MEM_PWR_CTRL__ODM_MEM3_PWR_DIS_MASK 0x00004000L
+#define ODM_MEM_PWR_CTRL__ODM_MEM4_PWR_FORCE_MASK 0x00030000L
+#define ODM_MEM_PWR_CTRL__ODM_MEM4_PWR_DIS_MASK 0x00040000L
+#define ODM_MEM_PWR_CTRL__ODM_MEM5_PWR_FORCE_MASK 0x00300000L
+#define ODM_MEM_PWR_CTRL__ODM_MEM5_PWR_DIS_MASK 0x00400000L
+#define ODM_MEM_PWR_CTRL__ODM_MEM6_PWR_FORCE_MASK 0x03000000L
+#define ODM_MEM_PWR_CTRL__ODM_MEM6_PWR_DIS_MASK 0x04000000L
+#define ODM_MEM_PWR_CTRL__ODM_MEM7_PWR_FORCE_MASK 0x30000000L
+#define ODM_MEM_PWR_CTRL__ODM_MEM7_PWR_DIS_MASK 0x40000000L
+//ODM_MEM_PWR_CTRL2
+#define ODM_MEM_PWR_CTRL2__ODM_MEM8_PWR_FORCE__SHIFT 0x0
+#define ODM_MEM_PWR_CTRL2__ODM_MEM8_PWR_DIS__SHIFT 0x2
+#define ODM_MEM_PWR_CTRL2__ODM_MEM9_PWR_FORCE__SHIFT 0x4
+#define ODM_MEM_PWR_CTRL2__ODM_MEM9_PWR_DIS__SHIFT 0x6
+#define ODM_MEM_PWR_CTRL2__ODM_MEM10_PWR_FORCE__SHIFT 0x8
+#define ODM_MEM_PWR_CTRL2__ODM_MEM10_PWR_DIS__SHIFT 0xa
+#define ODM_MEM_PWR_CTRL2__ODM_MEM11_PWR_FORCE__SHIFT 0xc
+#define ODM_MEM_PWR_CTRL2__ODM_MEM11_PWR_DIS__SHIFT 0xe
+#define ODM_MEM_PWR_CTRL2__ODM_MEM8_PWR_FORCE_MASK 0x00000003L
+#define ODM_MEM_PWR_CTRL2__ODM_MEM8_PWR_DIS_MASK 0x00000004L
+#define ODM_MEM_PWR_CTRL2__ODM_MEM9_PWR_FORCE_MASK 0x00000030L
+#define ODM_MEM_PWR_CTRL2__ODM_MEM9_PWR_DIS_MASK 0x00000040L
+#define ODM_MEM_PWR_CTRL2__ODM_MEM10_PWR_FORCE_MASK 0x00000300L
+#define ODM_MEM_PWR_CTRL2__ODM_MEM10_PWR_DIS_MASK 0x00000400L
+#define ODM_MEM_PWR_CTRL2__ODM_MEM11_PWR_FORCE_MASK 0x00003000L
+#define ODM_MEM_PWR_CTRL2__ODM_MEM11_PWR_DIS_MASK 0x00004000L
+//ODM_MEM_PWR_CTRL3
+#define ODM_MEM_PWR_CTRL3__ODM_MEM_UNASSIGNED_PWR_MODE__SHIFT 0x0
+#define ODM_MEM_PWR_CTRL3__ODM_MEM_VBLANK_PWR_MODE__SHIFT 0x2
+#define ODM_MEM_PWR_CTRL3__ODM_MEM_UNASSIGNED_PWR_MODE_MASK 0x00000003L
+#define ODM_MEM_PWR_CTRL3__ODM_MEM_VBLANK_PWR_MODE_MASK 0x0000000CL
+//ODM_MEM_PWR_STATUS
+#define ODM_MEM_PWR_STATUS__ODM_MEM0_PWR_STATE__SHIFT 0x0
+#define ODM_MEM_PWR_STATUS__ODM_MEM1_PWR_STATE__SHIFT 0x2
+#define ODM_MEM_PWR_STATUS__ODM_MEM2_PWR_STATE__SHIFT 0x4
+#define ODM_MEM_PWR_STATUS__ODM_MEM3_PWR_STATE__SHIFT 0x6
+#define ODM_MEM_PWR_STATUS__ODM_MEM4_PWR_STATE__SHIFT 0x8
+#define ODM_MEM_PWR_STATUS__ODM_MEM5_PWR_STATE__SHIFT 0xa
+#define ODM_MEM_PWR_STATUS__ODM_MEM6_PWR_STATE__SHIFT 0xc
+#define ODM_MEM_PWR_STATUS__ODM_MEM7_PWR_STATE__SHIFT 0xe
+#define ODM_MEM_PWR_STATUS__ODM_MEM8_PWR_STATE__SHIFT 0x10
+#define ODM_MEM_PWR_STATUS__ODM_MEM9_PWR_STATE__SHIFT 0x12
+#define ODM_MEM_PWR_STATUS__ODM_MEM10_PWR_STATE__SHIFT 0x14
+#define ODM_MEM_PWR_STATUS__ODM_MEM11_PWR_STATE__SHIFT 0x16
+#define ODM_MEM_PWR_STATUS__ODM_MEM0_PWR_STATE_MASK 0x00000003L
+#define ODM_MEM_PWR_STATUS__ODM_MEM1_PWR_STATE_MASK 0x0000000CL
+#define ODM_MEM_PWR_STATUS__ODM_MEM2_PWR_STATE_MASK 0x00000030L
+#define ODM_MEM_PWR_STATUS__ODM_MEM3_PWR_STATE_MASK 0x000000C0L
+#define ODM_MEM_PWR_STATUS__ODM_MEM4_PWR_STATE_MASK 0x00000300L
+#define ODM_MEM_PWR_STATUS__ODM_MEM5_PWR_STATE_MASK 0x00000C00L
+#define ODM_MEM_PWR_STATUS__ODM_MEM6_PWR_STATE_MASK 0x00003000L
+#define ODM_MEM_PWR_STATUS__ODM_MEM7_PWR_STATE_MASK 0x0000C000L
+#define ODM_MEM_PWR_STATUS__ODM_MEM8_PWR_STATE_MASK 0x00030000L
+#define ODM_MEM_PWR_STATUS__ODM_MEM9_PWR_STATE_MASK 0x000C0000L
+#define ODM_MEM_PWR_STATUS__ODM_MEM10_PWR_STATE_MASK 0x00300000L
+#define ODM_MEM_PWR_STATUS__ODM_MEM11_PWR_STATE_MASK 0x00C00000L
+//OPTC_MISC_SPARE_REGISTER
+#define OPTC_MISC_SPARE_REGISTER__OPTC_MISC_SPARE_REG__SHIFT 0x0
+#define OPTC_MISC_SPARE_REGISTER__OPTC_MISC_SPARE_REG_MASK 0x000000FFL
+
+
+// addressBlock: dce_dc_optc_optc_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON17_PERFCOUNTER_CNTL
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON17_PERFCOUNTER_CNTL2
+#define DC_PERFMON17_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON17_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON17_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON17_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON17_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON17_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON17_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON17_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON17_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON17_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON17_PERFCOUNTER_STATE
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON17_PERFMON_CNTL
+#define DC_PERFMON17_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON17_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON17_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON17_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON17_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON17_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON17_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON17_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON17_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON17_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON17_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON17_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON17_PERFMON_CNTL2
+#define DC_PERFMON17_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON17_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON17_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON17_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON17_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON17_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON17_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON17_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON17_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON17_PERFMON_CVALUE_LOW
+#define DC_PERFMON17_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON17_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON17_PERFMON_HI
+#define DC_PERFMON17_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON17_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON17_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON17_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON17_PERFMON_LOW
+#define DC_PERFMON17_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON17_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dio_dout_i2c_dispdec
+//DC_I2C_CONTROL
+#define DC_I2C_CONTROL__DC_I2C_GO__SHIFT 0x0
+#define DC_I2C_CONTROL__DC_I2C_SOFT_RESET__SHIFT 0x1
+#define DC_I2C_CONTROL__DC_I2C_SEND_RESET__SHIFT 0x2
+#define DC_I2C_CONTROL__DC_I2C_SW_STATUS_RESET__SHIFT 0x3
+#define DC_I2C_CONTROL__DC_I2C_DDC_SELECT__SHIFT 0x8
+#define DC_I2C_CONTROL__DC_I2C_TRANSACTION_COUNT__SHIFT 0x14
+#define DC_I2C_CONTROL__DC_I2C_GO_MASK 0x00000001L
+#define DC_I2C_CONTROL__DC_I2C_SOFT_RESET_MASK 0x00000002L
+#define DC_I2C_CONTROL__DC_I2C_SEND_RESET_MASK 0x00000004L
+#define DC_I2C_CONTROL__DC_I2C_SW_STATUS_RESET_MASK 0x00000008L
+#define DC_I2C_CONTROL__DC_I2C_DDC_SELECT_MASK 0x00000700L
+#define DC_I2C_CONTROL__DC_I2C_TRANSACTION_COUNT_MASK 0x00300000L
+//DC_I2C_ARBITRATION
+#define DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY__SHIFT 0x0
+#define DC_I2C_ARBITRATION__DC_I2C_REG_RW_CNTL_STATUS__SHIFT 0x2
+#define DC_I2C_ARBITRATION__DC_I2C_NO_QUEUED_SW_GO__SHIFT 0x4
+#define DC_I2C_ARBITRATION__DC_I2C_ABORT_HW_XFER__SHIFT 0x8
+#define DC_I2C_ARBITRATION__DC_I2C_ABORT_SW_XFER__SHIFT 0xc
+#define DC_I2C_ARBITRATION__DC_I2C_SW_USE_I2C_REG_REQ__SHIFT 0x14
+#define DC_I2C_ARBITRATION__DC_I2C_SW_DONE_USING_I2C_REG__SHIFT 0x15
+#define DC_I2C_ARBITRATION__DC_I2C_DMCU_USE_I2C_REG_REQ__SHIFT 0x18
+#define DC_I2C_ARBITRATION__DC_I2C_DMCU_DONE_USING_I2C_REG__SHIFT 0x19
+#define DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_MASK 0x00000003L
+#define DC_I2C_ARBITRATION__DC_I2C_REG_RW_CNTL_STATUS_MASK 0x0000000CL
+#define DC_I2C_ARBITRATION__DC_I2C_NO_QUEUED_SW_GO_MASK 0x00000010L
+#define DC_I2C_ARBITRATION__DC_I2C_ABORT_HW_XFER_MASK 0x00000100L
+#define DC_I2C_ARBITRATION__DC_I2C_ABORT_SW_XFER_MASK 0x00001000L
+#define DC_I2C_ARBITRATION__DC_I2C_SW_USE_I2C_REG_REQ_MASK 0x00100000L
+#define DC_I2C_ARBITRATION__DC_I2C_SW_DONE_USING_I2C_REG_MASK 0x00200000L
+#define DC_I2C_ARBITRATION__DC_I2C_DMCU_USE_I2C_REG_REQ_MASK 0x01000000L
+#define DC_I2C_ARBITRATION__DC_I2C_DMCU_DONE_USING_I2C_REG_MASK 0x02000000L
+//DC_I2C_INTERRUPT_CONTROL
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_INT__SHIFT 0x0
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_ACK__SHIFT 0x1
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_MASK__SHIFT 0x2
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_INT__SHIFT 0x4
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_ACK__SHIFT 0x5
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_MASK__SHIFT 0x6
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_INT__SHIFT 0x8
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_ACK__SHIFT 0x9
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_MASK__SHIFT 0xa
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC3_HW_DONE_INT__SHIFT 0xc
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC3_HW_DONE_ACK__SHIFT 0xd
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC3_HW_DONE_MASK__SHIFT 0xe
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC4_HW_DONE_INT__SHIFT 0x10
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC4_HW_DONE_ACK__SHIFT 0x11
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC4_HW_DONE_MASK__SHIFT 0x12
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC5_HW_DONE_INT__SHIFT 0x14
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC5_HW_DONE_ACK__SHIFT 0x15
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC5_HW_DONE_MASK__SHIFT 0x16
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC6_HW_DONE_INT__SHIFT 0x18
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC6_HW_DONE_ACK__SHIFT 0x19
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC6_HW_DONE_MASK__SHIFT 0x1a
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDCVGA_HW_DONE_INT__SHIFT 0x1b
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDCVGA_HW_DONE_ACK__SHIFT 0x1c
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDCVGA_HW_DONE_MASK__SHIFT 0x1d
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_INT_MASK 0x00000001L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_ACK_MASK 0x00000002L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_MASK_MASK 0x00000004L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_INT_MASK 0x00000010L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_ACK_MASK 0x00000020L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_MASK_MASK 0x00000040L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_INT_MASK 0x00000100L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_ACK_MASK 0x00000200L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_MASK_MASK 0x00000400L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC3_HW_DONE_INT_MASK 0x00001000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC3_HW_DONE_ACK_MASK 0x00002000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC3_HW_DONE_MASK_MASK 0x00004000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC4_HW_DONE_INT_MASK 0x00010000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC4_HW_DONE_ACK_MASK 0x00020000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC4_HW_DONE_MASK_MASK 0x00040000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC5_HW_DONE_INT_MASK 0x00100000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC5_HW_DONE_ACK_MASK 0x00200000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC5_HW_DONE_MASK_MASK 0x00400000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC6_HW_DONE_INT_MASK 0x01000000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC6_HW_DONE_ACK_MASK 0x02000000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC6_HW_DONE_MASK_MASK 0x04000000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDCVGA_HW_DONE_INT_MASK 0x08000000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDCVGA_HW_DONE_ACK_MASK 0x10000000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDCVGA_HW_DONE_MASK_MASK 0x20000000L
+//DC_I2C_SW_STATUS
+#define DC_I2C_SW_STATUS__DC_I2C_SW_STATUS__SHIFT 0x0
+#define DC_I2C_SW_STATUS__DC_I2C_SW_DONE__SHIFT 0x2
+#define DC_I2C_SW_STATUS__DC_I2C_SW_ABORTED__SHIFT 0x4
+#define DC_I2C_SW_STATUS__DC_I2C_SW_TIMEOUT__SHIFT 0x5
+#define DC_I2C_SW_STATUS__DC_I2C_SW_INTERRUPTED__SHIFT 0x6
+#define DC_I2C_SW_STATUS__DC_I2C_SW_BUFFER_OVERFLOW__SHIFT 0x7
+#define DC_I2C_SW_STATUS__DC_I2C_SW_STOPPED_ON_NACK__SHIFT 0x8
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK0__SHIFT 0xc
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK1__SHIFT 0xd
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK2__SHIFT 0xe
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK3__SHIFT 0xf
+#define DC_I2C_SW_STATUS__DC_I2C_SW_REQ__SHIFT 0x12
+#define DC_I2C_SW_STATUS__DC_I2C_SW_STATUS_MASK 0x00000003L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_DONE_MASK 0x00000004L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_ABORTED_MASK 0x00000010L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_TIMEOUT_MASK 0x00000020L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_INTERRUPTED_MASK 0x00000040L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_BUFFER_OVERFLOW_MASK 0x00000080L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_STOPPED_ON_NACK_MASK 0x00000100L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK0_MASK 0x00001000L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK1_MASK 0x00002000L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK2_MASK 0x00004000L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK3_MASK 0x00008000L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_REQ_MASK 0x00040000L
+//DC_I2C_DDC1_HW_STATUS
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_STATUS__SHIFT 0x0
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_DONE__SHIFT 0x3
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_REQ__SHIFT 0x10
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_URG__SHIFT 0x11
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_STATUS__SHIFT 0x14
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_NUM_VALID_TRIES__SHIFT 0x18
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_STATE__SHIFT 0x1c
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_STATUS_MASK 0x00000003L
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_DONE_MASK 0x00000008L
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_REQ_MASK 0x00010000L
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_URG_MASK 0x00020000L
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_STATUS_MASK 0x00100000L
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_NUM_VALID_TRIES_MASK 0x0F000000L
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_STATE_MASK 0x70000000L
+//DC_I2C_DDC2_HW_STATUS
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_STATUS__SHIFT 0x0
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_DONE__SHIFT 0x3
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_REQ__SHIFT 0x10
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_URG__SHIFT 0x11
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_STATUS__SHIFT 0x14
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_NUM_VALID_TRIES__SHIFT 0x18
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_STATE__SHIFT 0x1c
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_STATUS_MASK 0x00000003L
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_DONE_MASK 0x00000008L
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_REQ_MASK 0x00010000L
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_URG_MASK 0x00020000L
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_STATUS_MASK 0x00100000L
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_NUM_VALID_TRIES_MASK 0x0F000000L
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_STATE_MASK 0x70000000L
+//DC_I2C_DDC3_HW_STATUS
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_STATUS__SHIFT 0x0
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_DONE__SHIFT 0x3
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_REQ__SHIFT 0x10
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_URG__SHIFT 0x11
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_EDID_DETECT_STATUS__SHIFT 0x14
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_EDID_DETECT_NUM_VALID_TRIES__SHIFT 0x18
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_EDID_DETECT_STATE__SHIFT 0x1c
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_STATUS_MASK 0x00000003L
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_DONE_MASK 0x00000008L
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_REQ_MASK 0x00010000L
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_URG_MASK 0x00020000L
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_EDID_DETECT_STATUS_MASK 0x00100000L
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_EDID_DETECT_NUM_VALID_TRIES_MASK 0x0F000000L
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_EDID_DETECT_STATE_MASK 0x70000000L
+//DC_I2C_DDC4_HW_STATUS
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_STATUS__SHIFT 0x0
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_DONE__SHIFT 0x3
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_REQ__SHIFT 0x10
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_URG__SHIFT 0x11
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_EDID_DETECT_STATUS__SHIFT 0x14
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_EDID_DETECT_NUM_VALID_TRIES__SHIFT 0x18
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_EDID_DETECT_STATE__SHIFT 0x1c
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_STATUS_MASK 0x00000003L
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_DONE_MASK 0x00000008L
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_REQ_MASK 0x00010000L
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_URG_MASK 0x00020000L
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_EDID_DETECT_STATUS_MASK 0x00100000L
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_EDID_DETECT_NUM_VALID_TRIES_MASK 0x0F000000L
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_EDID_DETECT_STATE_MASK 0x70000000L
+//DC_I2C_DDC5_HW_STATUS
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_STATUS__SHIFT 0x0
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_DONE__SHIFT 0x3
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_REQ__SHIFT 0x10
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_URG__SHIFT 0x11
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_EDID_DETECT_STATUS__SHIFT 0x14
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_EDID_DETECT_NUM_VALID_TRIES__SHIFT 0x18
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_EDID_DETECT_STATE__SHIFT 0x1c
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_STATUS_MASK 0x00000003L
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_DONE_MASK 0x00000008L
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_REQ_MASK 0x00010000L
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_URG_MASK 0x00020000L
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_EDID_DETECT_STATUS_MASK 0x00100000L
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_EDID_DETECT_NUM_VALID_TRIES_MASK 0x0F000000L
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_EDID_DETECT_STATE_MASK 0x70000000L
+//DC_I2C_DDC1_SPEED
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_THRESHOLD__SHIFT 0x0
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_DISABLE_FILTER_DURING_STALL__SHIFT 0x4
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_START_STOP_TIMING_CNTL__SHIFT 0x8
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_PRESCALE__SHIFT 0x10
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_THRESHOLD_MASK 0x00000003L
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_DISABLE_FILTER_DURING_STALL_MASK 0x00000010L
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_START_STOP_TIMING_CNTL_MASK 0x00000300L
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_PRESCALE_MASK 0xFFFF0000L
+//DC_I2C_DDC1_SETUP
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_DATA_DRIVE_EN__SHIFT 0x0
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_DATA_DRIVE_SEL__SHIFT 0x1
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_SEND_RESET_LENGTH__SHIFT 0x2
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_EDID_DETECT_ENABLE__SHIFT 0x4
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_EDID_DETECT_MODE__SHIFT 0x5
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_ENABLE__SHIFT 0x6
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_CLK_DRIVE_EN__SHIFT 0x7
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_INTRA_BYTE_DELAY__SHIFT 0x8
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_INTRA_TRANSACTION_DELAY__SHIFT 0x10
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_TIME_LIMIT__SHIFT 0x18
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_DATA_DRIVE_EN_MASK 0x00000001L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_DATA_DRIVE_SEL_MASK 0x00000002L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_SEND_RESET_LENGTH_MASK 0x00000004L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_EDID_DETECT_ENABLE_MASK 0x00000010L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_EDID_DETECT_MODE_MASK 0x00000020L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_ENABLE_MASK 0x00000040L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_CLK_DRIVE_EN_MASK 0x00000080L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_INTRA_BYTE_DELAY_MASK 0x0000FF00L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_INTRA_TRANSACTION_DELAY_MASK 0x00FF0000L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_TIME_LIMIT_MASK 0xFF000000L
+//DC_I2C_DDC2_SPEED
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_THRESHOLD__SHIFT 0x0
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_DISABLE_FILTER_DURING_STALL__SHIFT 0x4
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_START_STOP_TIMING_CNTL__SHIFT 0x8
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_PRESCALE__SHIFT 0x10
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_THRESHOLD_MASK 0x00000003L
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_DISABLE_FILTER_DURING_STALL_MASK 0x00000010L
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_START_STOP_TIMING_CNTL_MASK 0x00000300L
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_PRESCALE_MASK 0xFFFF0000L
+//DC_I2C_DDC2_SETUP
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_DATA_DRIVE_EN__SHIFT 0x0
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_DATA_DRIVE_SEL__SHIFT 0x1
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_SEND_RESET_LENGTH__SHIFT 0x2
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_EDID_DETECT_ENABLE__SHIFT 0x4
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_EDID_DETECT_MODE__SHIFT 0x5
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_ENABLE__SHIFT 0x6
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_CLK_DRIVE_EN__SHIFT 0x7
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_INTRA_BYTE_DELAY__SHIFT 0x8
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_INTRA_TRANSACTION_DELAY__SHIFT 0x10
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_TIME_LIMIT__SHIFT 0x18
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_DATA_DRIVE_EN_MASK 0x00000001L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_DATA_DRIVE_SEL_MASK 0x00000002L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_SEND_RESET_LENGTH_MASK 0x00000004L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_EDID_DETECT_ENABLE_MASK 0x00000010L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_EDID_DETECT_MODE_MASK 0x00000020L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_ENABLE_MASK 0x00000040L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_CLK_DRIVE_EN_MASK 0x00000080L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_INTRA_BYTE_DELAY_MASK 0x0000FF00L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_INTRA_TRANSACTION_DELAY_MASK 0x00FF0000L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_TIME_LIMIT_MASK 0xFF000000L
+//DC_I2C_DDC3_SPEED
+#define DC_I2C_DDC3_SPEED__DC_I2C_DDC3_THRESHOLD__SHIFT 0x0
+#define DC_I2C_DDC3_SPEED__DC_I2C_DDC3_DISABLE_FILTER_DURING_STALL__SHIFT 0x4
+#define DC_I2C_DDC3_SPEED__DC_I2C_DDC3_START_STOP_TIMING_CNTL__SHIFT 0x8
+#define DC_I2C_DDC3_SPEED__DC_I2C_DDC3_PRESCALE__SHIFT 0x10
+#define DC_I2C_DDC3_SPEED__DC_I2C_DDC3_THRESHOLD_MASK 0x00000003L
+#define DC_I2C_DDC3_SPEED__DC_I2C_DDC3_DISABLE_FILTER_DURING_STALL_MASK 0x00000010L
+#define DC_I2C_DDC3_SPEED__DC_I2C_DDC3_START_STOP_TIMING_CNTL_MASK 0x00000300L
+#define DC_I2C_DDC3_SPEED__DC_I2C_DDC3_PRESCALE_MASK 0xFFFF0000L
+//DC_I2C_DDC3_SETUP
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_DATA_DRIVE_EN__SHIFT 0x0
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_DATA_DRIVE_SEL__SHIFT 0x1
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_SEND_RESET_LENGTH__SHIFT 0x2
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_EDID_DETECT_ENABLE__SHIFT 0x4
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_EDID_DETECT_MODE__SHIFT 0x5
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_ENABLE__SHIFT 0x6
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_CLK_DRIVE_EN__SHIFT 0x7
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_INTRA_BYTE_DELAY__SHIFT 0x8
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_INTRA_TRANSACTION_DELAY__SHIFT 0x10
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_TIME_LIMIT__SHIFT 0x18
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_DATA_DRIVE_EN_MASK 0x00000001L
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_DATA_DRIVE_SEL_MASK 0x00000002L
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_SEND_RESET_LENGTH_MASK 0x00000004L
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_EDID_DETECT_ENABLE_MASK 0x00000010L
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_EDID_DETECT_MODE_MASK 0x00000020L
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_ENABLE_MASK 0x00000040L
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_CLK_DRIVE_EN_MASK 0x00000080L
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_INTRA_BYTE_DELAY_MASK 0x0000FF00L
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_INTRA_TRANSACTION_DELAY_MASK 0x00FF0000L
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_TIME_LIMIT_MASK 0xFF000000L
+//DC_I2C_DDC4_SPEED
+#define DC_I2C_DDC4_SPEED__DC_I2C_DDC4_THRESHOLD__SHIFT 0x0
+#define DC_I2C_DDC4_SPEED__DC_I2C_DDC4_DISABLE_FILTER_DURING_STALL__SHIFT 0x4
+#define DC_I2C_DDC4_SPEED__DC_I2C_DDC4_START_STOP_TIMING_CNTL__SHIFT 0x8
+#define DC_I2C_DDC4_SPEED__DC_I2C_DDC4_PRESCALE__SHIFT 0x10
+#define DC_I2C_DDC4_SPEED__DC_I2C_DDC4_THRESHOLD_MASK 0x00000003L
+#define DC_I2C_DDC4_SPEED__DC_I2C_DDC4_DISABLE_FILTER_DURING_STALL_MASK 0x00000010L
+#define DC_I2C_DDC4_SPEED__DC_I2C_DDC4_START_STOP_TIMING_CNTL_MASK 0x00000300L
+#define DC_I2C_DDC4_SPEED__DC_I2C_DDC4_PRESCALE_MASK 0xFFFF0000L
+//DC_I2C_DDC4_SETUP
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_DATA_DRIVE_EN__SHIFT 0x0
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_DATA_DRIVE_SEL__SHIFT 0x1
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_SEND_RESET_LENGTH__SHIFT 0x2
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_EDID_DETECT_ENABLE__SHIFT 0x4
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_EDID_DETECT_MODE__SHIFT 0x5
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_ENABLE__SHIFT 0x6
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_CLK_DRIVE_EN__SHIFT 0x7
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_INTRA_BYTE_DELAY__SHIFT 0x8
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_INTRA_TRANSACTION_DELAY__SHIFT 0x10
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_TIME_LIMIT__SHIFT 0x18
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_DATA_DRIVE_EN_MASK 0x00000001L
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_DATA_DRIVE_SEL_MASK 0x00000002L
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_SEND_RESET_LENGTH_MASK 0x00000004L
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_EDID_DETECT_ENABLE_MASK 0x00000010L
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_EDID_DETECT_MODE_MASK 0x00000020L
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_ENABLE_MASK 0x00000040L
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_CLK_DRIVE_EN_MASK 0x00000080L
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_INTRA_BYTE_DELAY_MASK 0x0000FF00L
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_INTRA_TRANSACTION_DELAY_MASK 0x00FF0000L
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_TIME_LIMIT_MASK 0xFF000000L
+//DC_I2C_DDC5_SPEED
+#define DC_I2C_DDC5_SPEED__DC_I2C_DDC5_THRESHOLD__SHIFT 0x0
+#define DC_I2C_DDC5_SPEED__DC_I2C_DDC5_DISABLE_FILTER_DURING_STALL__SHIFT 0x4
+#define DC_I2C_DDC5_SPEED__DC_I2C_DDC5_START_STOP_TIMING_CNTL__SHIFT 0x8
+#define DC_I2C_DDC5_SPEED__DC_I2C_DDC5_PRESCALE__SHIFT 0x10
+#define DC_I2C_DDC5_SPEED__DC_I2C_DDC5_THRESHOLD_MASK 0x00000003L
+#define DC_I2C_DDC5_SPEED__DC_I2C_DDC5_DISABLE_FILTER_DURING_STALL_MASK 0x00000010L
+#define DC_I2C_DDC5_SPEED__DC_I2C_DDC5_START_STOP_TIMING_CNTL_MASK 0x00000300L
+#define DC_I2C_DDC5_SPEED__DC_I2C_DDC5_PRESCALE_MASK 0xFFFF0000L
+//DC_I2C_DDC5_SETUP
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_DATA_DRIVE_EN__SHIFT 0x0
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_DATA_DRIVE_SEL__SHIFT 0x1
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_SEND_RESET_LENGTH__SHIFT 0x2
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_EDID_DETECT_ENABLE__SHIFT 0x4
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_EDID_DETECT_MODE__SHIFT 0x5
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_ENABLE__SHIFT 0x6
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_CLK_DRIVE_EN__SHIFT 0x7
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_INTRA_BYTE_DELAY__SHIFT 0x8
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_INTRA_TRANSACTION_DELAY__SHIFT 0x10
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_TIME_LIMIT__SHIFT 0x18
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_DATA_DRIVE_EN_MASK 0x00000001L
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_DATA_DRIVE_SEL_MASK 0x00000002L
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_SEND_RESET_LENGTH_MASK 0x00000004L
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_EDID_DETECT_ENABLE_MASK 0x00000010L
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_EDID_DETECT_MODE_MASK 0x00000020L
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_ENABLE_MASK 0x00000040L
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_CLK_DRIVE_EN_MASK 0x00000080L
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_INTRA_BYTE_DELAY_MASK 0x0000FF00L
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_INTRA_TRANSACTION_DELAY_MASK 0x00FF0000L
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_TIME_LIMIT_MASK 0xFF000000L
+//DC_I2C_TRANSACTION0
+#define DC_I2C_TRANSACTION0__DC_I2C_RW0__SHIFT 0x0
+#define DC_I2C_TRANSACTION0__DC_I2C_STOP_ON_NACK0__SHIFT 0x8
+#define DC_I2C_TRANSACTION0__DC_I2C_START0__SHIFT 0xc
+#define DC_I2C_TRANSACTION0__DC_I2C_STOP0__SHIFT 0xd
+#define DC_I2C_TRANSACTION0__DC_I2C_COUNT0__SHIFT 0x10
+#define DC_I2C_TRANSACTION0__DC_I2C_RW0_MASK 0x00000001L
+#define DC_I2C_TRANSACTION0__DC_I2C_STOP_ON_NACK0_MASK 0x00000100L
+#define DC_I2C_TRANSACTION0__DC_I2C_START0_MASK 0x00001000L
+#define DC_I2C_TRANSACTION0__DC_I2C_STOP0_MASK 0x00002000L
+#define DC_I2C_TRANSACTION0__DC_I2C_COUNT0_MASK 0x03FF0000L
+//DC_I2C_TRANSACTION1
+#define DC_I2C_TRANSACTION1__DC_I2C_RW1__SHIFT 0x0
+#define DC_I2C_TRANSACTION1__DC_I2C_STOP_ON_NACK1__SHIFT 0x8
+#define DC_I2C_TRANSACTION1__DC_I2C_START1__SHIFT 0xc
+#define DC_I2C_TRANSACTION1__DC_I2C_STOP1__SHIFT 0xd
+#define DC_I2C_TRANSACTION1__DC_I2C_COUNT1__SHIFT 0x10
+#define DC_I2C_TRANSACTION1__DC_I2C_RW1_MASK 0x00000001L
+#define DC_I2C_TRANSACTION1__DC_I2C_STOP_ON_NACK1_MASK 0x00000100L
+#define DC_I2C_TRANSACTION1__DC_I2C_START1_MASK 0x00001000L
+#define DC_I2C_TRANSACTION1__DC_I2C_STOP1_MASK 0x00002000L
+#define DC_I2C_TRANSACTION1__DC_I2C_COUNT1_MASK 0x03FF0000L
+//DC_I2C_TRANSACTION2
+#define DC_I2C_TRANSACTION2__DC_I2C_RW2__SHIFT 0x0
+#define DC_I2C_TRANSACTION2__DC_I2C_STOP_ON_NACK2__SHIFT 0x8
+#define DC_I2C_TRANSACTION2__DC_I2C_START2__SHIFT 0xc
+#define DC_I2C_TRANSACTION2__DC_I2C_STOP2__SHIFT 0xd
+#define DC_I2C_TRANSACTION2__DC_I2C_COUNT2__SHIFT 0x10
+#define DC_I2C_TRANSACTION2__DC_I2C_RW2_MASK 0x00000001L
+#define DC_I2C_TRANSACTION2__DC_I2C_STOP_ON_NACK2_MASK 0x00000100L
+#define DC_I2C_TRANSACTION2__DC_I2C_START2_MASK 0x00001000L
+#define DC_I2C_TRANSACTION2__DC_I2C_STOP2_MASK 0x00002000L
+#define DC_I2C_TRANSACTION2__DC_I2C_COUNT2_MASK 0x03FF0000L
+//DC_I2C_TRANSACTION3
+#define DC_I2C_TRANSACTION3__DC_I2C_RW3__SHIFT 0x0
+#define DC_I2C_TRANSACTION3__DC_I2C_STOP_ON_NACK3__SHIFT 0x8
+#define DC_I2C_TRANSACTION3__DC_I2C_START3__SHIFT 0xc
+#define DC_I2C_TRANSACTION3__DC_I2C_STOP3__SHIFT 0xd
+#define DC_I2C_TRANSACTION3__DC_I2C_COUNT3__SHIFT 0x10
+#define DC_I2C_TRANSACTION3__DC_I2C_RW3_MASK 0x00000001L
+#define DC_I2C_TRANSACTION3__DC_I2C_STOP_ON_NACK3_MASK 0x00000100L
+#define DC_I2C_TRANSACTION3__DC_I2C_START3_MASK 0x00001000L
+#define DC_I2C_TRANSACTION3__DC_I2C_STOP3_MASK 0x00002000L
+#define DC_I2C_TRANSACTION3__DC_I2C_COUNT3_MASK 0x03FF0000L
+//DC_I2C_DATA
+#define DC_I2C_DATA__DC_I2C_DATA_RW__SHIFT 0x0
+#define DC_I2C_DATA__DC_I2C_DATA__SHIFT 0x8
+#define DC_I2C_DATA__DC_I2C_INDEX__SHIFT 0x10
+#define DC_I2C_DATA__DC_I2C_INDEX_WRITE__SHIFT 0x1f
+#define DC_I2C_DATA__DC_I2C_DATA_RW_MASK 0x00000001L
+#define DC_I2C_DATA__DC_I2C_DATA_MASK 0x0000FF00L
+#define DC_I2C_DATA__DC_I2C_INDEX_MASK 0x03FF0000L
+#define DC_I2C_DATA__DC_I2C_INDEX_WRITE_MASK 0x80000000L
+//DC_I2C_EDID_DETECT_CTRL
+#define DC_I2C_EDID_DETECT_CTRL__DC_I2C_EDID_DETECT_WAIT_TIME__SHIFT 0x0
+#define DC_I2C_EDID_DETECT_CTRL__DC_I2C_EDID_DETECT_NUM_TRIES_UNTIL_VALID__SHIFT 0x14
+#define DC_I2C_EDID_DETECT_CTRL__DC_I2C_EDID_DETECT_SEND_RESET__SHIFT 0x1c
+#define DC_I2C_EDID_DETECT_CTRL__DC_I2C_EDID_DETECT_WAIT_TIME_MASK 0x0000FFFFL
+#define DC_I2C_EDID_DETECT_CTRL__DC_I2C_EDID_DETECT_NUM_TRIES_UNTIL_VALID_MASK 0x00F00000L
+#define DC_I2C_EDID_DETECT_CTRL__DC_I2C_EDID_DETECT_SEND_RESET_MASK 0x10000000L
+//DC_I2C_READ_REQUEST_INTERRUPT
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC1_READ_REQUEST_OCCURRED__SHIFT 0x0
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC1_READ_REQUEST_INT__SHIFT 0x1
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC1_READ_REQUEST_ACK__SHIFT 0x2
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC1_READ_REQUEST_MASK__SHIFT 0x3
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC2_READ_REQUEST_OCCURRED__SHIFT 0x4
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC2_READ_REQUEST_INT__SHIFT 0x5
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC2_READ_REQUEST_ACK__SHIFT 0x6
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC2_READ_REQUEST_MASK__SHIFT 0x7
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC3_READ_REQUEST_OCCURRED__SHIFT 0x8
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC3_READ_REQUEST_INT__SHIFT 0x9
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC3_READ_REQUEST_ACK__SHIFT 0xa
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC3_READ_REQUEST_MASK__SHIFT 0xb
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC4_READ_REQUEST_OCCURRED__SHIFT 0xc
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC4_READ_REQUEST_INT__SHIFT 0xd
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC4_READ_REQUEST_ACK__SHIFT 0xe
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC4_READ_REQUEST_MASK__SHIFT 0xf
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC5_READ_REQUEST_OCCURRED__SHIFT 0x10
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC5_READ_REQUEST_INT__SHIFT 0x11
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC5_READ_REQUEST_ACK__SHIFT 0x12
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC5_READ_REQUEST_MASK__SHIFT 0x13
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC6_READ_REQUEST_OCCURRED__SHIFT 0x14
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC6_READ_REQUEST_INT__SHIFT 0x15
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC6_READ_REQUEST_ACK__SHIFT 0x16
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC6_READ_REQUEST_MASK__SHIFT 0x17
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDCVGA_READ_REQUEST_OCCURRED__SHIFT 0x18
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDCVGA_READ_REQUEST_INT__SHIFT 0x19
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDCVGA_READ_REQUEST_ACK__SHIFT 0x1a
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDCVGA_READ_REQUEST_MASK__SHIFT 0x1b
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC_READ_REQUEST_ACK_ENABLE__SHIFT 0x1e
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC_READ_REQUEST_INT_TYPE__SHIFT 0x1f
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC1_READ_REQUEST_OCCURRED_MASK 0x00000001L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC1_READ_REQUEST_INT_MASK 0x00000002L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC1_READ_REQUEST_ACK_MASK 0x00000004L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC1_READ_REQUEST_MASK_MASK 0x00000008L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC2_READ_REQUEST_OCCURRED_MASK 0x00000010L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC2_READ_REQUEST_INT_MASK 0x00000020L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC2_READ_REQUEST_ACK_MASK 0x00000040L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC2_READ_REQUEST_MASK_MASK 0x00000080L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC3_READ_REQUEST_OCCURRED_MASK 0x00000100L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC3_READ_REQUEST_INT_MASK 0x00000200L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC3_READ_REQUEST_ACK_MASK 0x00000400L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC3_READ_REQUEST_MASK_MASK 0x00000800L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC4_READ_REQUEST_OCCURRED_MASK 0x00001000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC4_READ_REQUEST_INT_MASK 0x00002000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC4_READ_REQUEST_ACK_MASK 0x00004000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC4_READ_REQUEST_MASK_MASK 0x00008000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC5_READ_REQUEST_OCCURRED_MASK 0x00010000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC5_READ_REQUEST_INT_MASK 0x00020000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC5_READ_REQUEST_ACK_MASK 0x00040000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC5_READ_REQUEST_MASK_MASK 0x00080000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC6_READ_REQUEST_OCCURRED_MASK 0x00100000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC6_READ_REQUEST_INT_MASK 0x00200000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC6_READ_REQUEST_ACK_MASK 0x00400000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC6_READ_REQUEST_MASK_MASK 0x00800000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDCVGA_READ_REQUEST_OCCURRED_MASK 0x01000000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDCVGA_READ_REQUEST_INT_MASK 0x02000000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDCVGA_READ_REQUEST_ACK_MASK 0x04000000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDCVGA_READ_REQUEST_MASK_MASK 0x08000000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC_READ_REQUEST_ACK_ENABLE_MASK 0x40000000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC_READ_REQUEST_INT_TYPE_MASK 0x80000000L
+
+
+//DIG_SOFT_RESET
+#define DIG_SOFT_RESET__DIGA_FE_SOFT_RESET__SHIFT 0x0
+#define DIG_SOFT_RESET__DIGA_BE_SOFT_RESET__SHIFT 0x1
+#define DIG_SOFT_RESET__DIGB_FE_SOFT_RESET__SHIFT 0x4
+#define DIG_SOFT_RESET__DIGB_BE_SOFT_RESET__SHIFT 0x5
+#define DIG_SOFT_RESET__DIGC_FE_SOFT_RESET__SHIFT 0x8
+#define DIG_SOFT_RESET__DIGC_BE_SOFT_RESET__SHIFT 0x9
+#define DIG_SOFT_RESET__DIGD_FE_SOFT_RESET__SHIFT 0xc
+#define DIG_SOFT_RESET__DIGD_BE_SOFT_RESET__SHIFT 0xd
+#define DIG_SOFT_RESET__DIGE_FE_SOFT_RESET__SHIFT 0x10
+#define DIG_SOFT_RESET__DIGE_BE_SOFT_RESET__SHIFT 0x11
+#define DIG_SOFT_RESET__DIGF_FE_SOFT_RESET__SHIFT 0x14
+#define DIG_SOFT_RESET__DIGF_BE_SOFT_RESET__SHIFT 0x15
+#define DIG_SOFT_RESET__DIGG_FE_SOFT_RESET__SHIFT 0x18
+#define DIG_SOFT_RESET__DIGG_BE_SOFT_RESET__SHIFT 0x19
+#define DIG_SOFT_RESET__DIGA_FE_SOFT_RESET_MASK 0x00000001L
+#define DIG_SOFT_RESET__DIGA_BE_SOFT_RESET_MASK 0x00000002L
+#define DIG_SOFT_RESET__DIGB_FE_SOFT_RESET_MASK 0x00000010L
+#define DIG_SOFT_RESET__DIGB_BE_SOFT_RESET_MASK 0x00000020L
+#define DIG_SOFT_RESET__DIGC_FE_SOFT_RESET_MASK 0x00000100L
+#define DIG_SOFT_RESET__DIGC_BE_SOFT_RESET_MASK 0x00000200L
+#define DIG_SOFT_RESET__DIGD_FE_SOFT_RESET_MASK 0x00001000L
+#define DIG_SOFT_RESET__DIGD_BE_SOFT_RESET_MASK 0x00002000L
+#define DIG_SOFT_RESET__DIGE_FE_SOFT_RESET_MASK 0x00010000L
+#define DIG_SOFT_RESET__DIGE_BE_SOFT_RESET_MASK 0x00020000L
+#define DIG_SOFT_RESET__DIGF_FE_SOFT_RESET_MASK 0x00100000L
+#define DIG_SOFT_RESET__DIGF_BE_SOFT_RESET_MASK 0x00200000L
+#define DIG_SOFT_RESET__DIGG_FE_SOFT_RESET_MASK 0x01000000L
+#define DIG_SOFT_RESET__DIGG_BE_SOFT_RESET_MASK 0x02000000L
+//DIO_MEM_PWR_STATUS1
+#define DIO_MEM_PWR_STATUS1__AFMT0_MEM_PWR_STATE__SHIFT 0x0
+#define DIO_MEM_PWR_STATUS1__AFMT1_MEM_PWR_STATE__SHIFT 0x2
+#define DIO_MEM_PWR_STATUS1__AFMT2_MEM_PWR_STATE__SHIFT 0x4
+#define DIO_MEM_PWR_STATUS1__AFMT3_MEM_PWR_STATE__SHIFT 0x6
+#define DIO_MEM_PWR_STATUS1__AFMT4_MEM_PWR_STATE__SHIFT 0x8
+#define DIO_MEM_PWR_STATUS1__AFMT5_MEM_PWR_STATE__SHIFT 0xa
+#define DIO_MEM_PWR_STATUS1__DME0_MEM_PWR_STATE__SHIFT 0x10
+#define DIO_MEM_PWR_STATUS1__DME1_MEM_PWR_STATE__SHIFT 0x12
+#define DIO_MEM_PWR_STATUS1__DME2_MEM_PWR_STATE__SHIFT 0x14
+#define DIO_MEM_PWR_STATUS1__DME3_MEM_PWR_STATE__SHIFT 0x16
+#define DIO_MEM_PWR_STATUS1__DME4_MEM_PWR_STATE__SHIFT 0x18
+#define DIO_MEM_PWR_STATUS1__DME5_MEM_PWR_STATE__SHIFT 0x1a
+#define DIO_MEM_PWR_STATUS1__AFMT0_MEM_PWR_STATE_MASK 0x00000001L
+#define DIO_MEM_PWR_STATUS1__AFMT1_MEM_PWR_STATE_MASK 0x00000004L
+#define DIO_MEM_PWR_STATUS1__AFMT2_MEM_PWR_STATE_MASK 0x00000010L
+#define DIO_MEM_PWR_STATUS1__AFMT3_MEM_PWR_STATE_MASK 0x00000040L
+#define DIO_MEM_PWR_STATUS1__AFMT4_MEM_PWR_STATE_MASK 0x00000100L
+#define DIO_MEM_PWR_STATUS1__AFMT5_MEM_PWR_STATE_MASK 0x00000400L
+#define DIO_MEM_PWR_STATUS1__DME0_MEM_PWR_STATE_MASK 0x00030000L
+#define DIO_MEM_PWR_STATUS1__DME1_MEM_PWR_STATE_MASK 0x000C0000L
+#define DIO_MEM_PWR_STATUS1__DME2_MEM_PWR_STATE_MASK 0x00300000L
+#define DIO_MEM_PWR_STATUS1__DME3_MEM_PWR_STATE_MASK 0x00C00000L
+#define DIO_MEM_PWR_STATUS1__DME4_MEM_PWR_STATE_MASK 0x03000000L
+#define DIO_MEM_PWR_STATUS1__DME5_MEM_PWR_STATE_MASK 0x0C000000L
+//DIO_CLK_CNTL2
+#define DIO_CLK_CNTL2__DIO_TEST_CLK_SEL__SHIFT 0x0
+#define DIO_CLK_CNTL2__SOCCLK_G_AFMTA_GATE_DIS__SHIFT 0x7
+#define DIO_CLK_CNTL2__SOCCLK_G_AFMTB_GATE_DIS__SHIFT 0x8
+#define DIO_CLK_CNTL2__SOCCLK_G_AFMTC_GATE_DIS__SHIFT 0x9
+#define DIO_CLK_CNTL2__SOCCLK_G_AFMTD_GATE_DIS__SHIFT 0xa
+#define DIO_CLK_CNTL2__SOCCLK_G_AFMTE_GATE_DIS__SHIFT 0xb
+#define DIO_CLK_CNTL2__SOCCLK_G_AFMTF_GATE_DIS__SHIFT 0xc
+#define DIO_CLK_CNTL2__SOCCLK_G_AFMTG_GATE_DIS__SHIFT 0xd
+#define DIO_CLK_CNTL2__SYMCLKA_FE_G_AFMT_GATE_DIS__SHIFT 0x11
+#define DIO_CLK_CNTL2__SYMCLKB_FE_G_AFMT_GATE_DIS__SHIFT 0x12
+#define DIO_CLK_CNTL2__SYMCLKC_FE_G_AFMT_GATE_DIS__SHIFT 0x13
+#define DIO_CLK_CNTL2__SYMCLKD_FE_G_AFMT_GATE_DIS__SHIFT 0x14
+#define DIO_CLK_CNTL2__SYMCLKE_FE_G_AFMT_GATE_DIS__SHIFT 0x15
+#define DIO_CLK_CNTL2__SYMCLKF_FE_G_AFMT_GATE_DIS__SHIFT 0x16
+#define DIO_CLK_CNTL2__SYMCLKG_FE_G_AFMT_GATE_DIS__SHIFT 0x17
+#define DIO_CLK_CNTL2__DIO_TEST_CLK_SEL_MASK 0x0000007FL
+#define DIO_CLK_CNTL2__SOCCLK_G_AFMTA_GATE_DIS_MASK 0x00000080L
+#define DIO_CLK_CNTL2__SOCCLK_G_AFMTB_GATE_DIS_MASK 0x00000100L
+#define DIO_CLK_CNTL2__SOCCLK_G_AFMTC_GATE_DIS_MASK 0x00000200L
+#define DIO_CLK_CNTL2__SOCCLK_G_AFMTD_GATE_DIS_MASK 0x00000400L
+#define DIO_CLK_CNTL2__SOCCLK_G_AFMTE_GATE_DIS_MASK 0x00000800L
+#define DIO_CLK_CNTL2__SOCCLK_G_AFMTF_GATE_DIS_MASK 0x00001000L
+#define DIO_CLK_CNTL2__SOCCLK_G_AFMTG_GATE_DIS_MASK 0x00002000L
+#define DIO_CLK_CNTL2__SYMCLKA_FE_G_AFMT_GATE_DIS_MASK 0x00020000L
+#define DIO_CLK_CNTL2__SYMCLKB_FE_G_AFMT_GATE_DIS_MASK 0x00040000L
+#define DIO_CLK_CNTL2__SYMCLKC_FE_G_AFMT_GATE_DIS_MASK 0x00080000L
+#define DIO_CLK_CNTL2__SYMCLKD_FE_G_AFMT_GATE_DIS_MASK 0x00100000L
+#define DIO_CLK_CNTL2__SYMCLKE_FE_G_AFMT_GATE_DIS_MASK 0x00200000L
+#define DIO_CLK_CNTL2__SYMCLKF_FE_G_AFMT_GATE_DIS_MASK 0x00400000L
+#define DIO_CLK_CNTL2__SYMCLKG_FE_G_AFMT_GATE_DIS_MASK 0x00800000L
+//DIO_CLK_CNTL3
+#define DIO_CLK_CNTL3__SYMCLKA_FE_G_TMDS_GATE_DIS__SHIFT 0x0
+#define DIO_CLK_CNTL3__SYMCLKB_FE_G_TMDS_GATE_DIS__SHIFT 0x1
+#define DIO_CLK_CNTL3__SYMCLKC_FE_G_TMDS_GATE_DIS__SHIFT 0x2
+#define DIO_CLK_CNTL3__SYMCLKD_FE_G_TMDS_GATE_DIS__SHIFT 0x3
+#define DIO_CLK_CNTL3__SYMCLKE_FE_G_TMDS_GATE_DIS__SHIFT 0x4
+#define DIO_CLK_CNTL3__SYMCLKF_FE_G_TMDS_GATE_DIS__SHIFT 0x5
+#define DIO_CLK_CNTL3__SYMCLKG_FE_G_TMDS_GATE_DIS__SHIFT 0x6
+#define DIO_CLK_CNTL3__SYMCLKA_G_TMDS_GATE_DIS__SHIFT 0xa
+#define DIO_CLK_CNTL3__SYMCLKB_G_TMDS_GATE_DIS__SHIFT 0xb
+#define DIO_CLK_CNTL3__SYMCLKC_G_TMDS_GATE_DIS__SHIFT 0xc
+#define DIO_CLK_CNTL3__SYMCLKD_G_TMDS_GATE_DIS__SHIFT 0xd
+#define DIO_CLK_CNTL3__SYMCLKE_G_TMDS_GATE_DIS__SHIFT 0xe
+#define DIO_CLK_CNTL3__SYMCLKF_G_TMDS_GATE_DIS__SHIFT 0xf
+#define DIO_CLK_CNTL3__SYMCLKG_G_TMDS_GATE_DIS__SHIFT 0x10
+#define DIO_CLK_CNTL3__SYMCLKA_FE_G_TMDS_GATE_DIS_MASK 0x00000001L
+#define DIO_CLK_CNTL3__SYMCLKB_FE_G_TMDS_GATE_DIS_MASK 0x00000002L
+#define DIO_CLK_CNTL3__SYMCLKC_FE_G_TMDS_GATE_DIS_MASK 0x00000004L
+#define DIO_CLK_CNTL3__SYMCLKD_FE_G_TMDS_GATE_DIS_MASK 0x00000008L
+#define DIO_CLK_CNTL3__SYMCLKE_FE_G_TMDS_GATE_DIS_MASK 0x00000010L
+#define DIO_CLK_CNTL3__SYMCLKF_FE_G_TMDS_GATE_DIS_MASK 0x00000020L
+#define DIO_CLK_CNTL3__SYMCLKG_FE_G_TMDS_GATE_DIS_MASK 0x00000040L
+#define DIO_CLK_CNTL3__SYMCLKA_G_TMDS_GATE_DIS_MASK 0x00000400L
+#define DIO_CLK_CNTL3__SYMCLKB_G_TMDS_GATE_DIS_MASK 0x00000800L
+#define DIO_CLK_CNTL3__SYMCLKC_G_TMDS_GATE_DIS_MASK 0x00001000L
+#define DIO_CLK_CNTL3__SYMCLKD_G_TMDS_GATE_DIS_MASK 0x00002000L
+#define DIO_CLK_CNTL3__SYMCLKE_G_TMDS_GATE_DIS_MASK 0x00004000L
+#define DIO_CLK_CNTL3__SYMCLKF_G_TMDS_GATE_DIS_MASK 0x00008000L
+#define DIO_CLK_CNTL3__SYMCLKG_G_TMDS_GATE_DIS_MASK 0x00010000L
+//DIO_HDMI_RXSTATUS_TIMER_CONTROL
+#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_ENABLE__SHIFT 0x0
+#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_TYPE__SHIFT 0x4
+#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_STATUS__SHIFT 0x8
+#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_MASK__SHIFT 0xc
+#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_INTERVAL__SHIFT 0x10
+#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_ENABLE_MASK 0x00000001L
+#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_TYPE_MASK 0x00000010L
+#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_STATUS_MASK 0x00000100L
+#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_MASK_MASK 0x00001000L
+#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_INTERVAL_MASK 0x0FFF0000L
+//DIO_PSP_INTERRUPT_STATUS
+#define DIO_PSP_INTERRUPT_STATUS__DIO_PSP_INTERRUPT_STATUS__SHIFT 0x0
+#define DIO_PSP_INTERRUPT_STATUS__DIO_PSP_INTERRUPT_MESSAGE__SHIFT 0x1
+#define DIO_PSP_INTERRUPT_STATUS__DIO_PSP_INTERRUPT_STATUS_MASK 0x00000001L
+#define DIO_PSP_INTERRUPT_STATUS__DIO_PSP_INTERRUPT_MESSAGE_MASK 0xFFFFFFFEL
+//DIO_PSP_INTERRUPT_CLEAR
+#define DIO_PSP_INTERRUPT_CLEAR__DIO_PSP_INTERRUPT_CLEAR__SHIFT 0x0
+#define DIO_PSP_INTERRUPT_CLEAR__DIO_PSP_INTERRUPT_CLEAR_MASK 0x00000001L
+//DIO_GENERIC_INTERRUPT_MESSAGE
+#define DIO_GENERIC_INTERRUPT_MESSAGE__DIO_GENERIC_INTERRUPT_STATUS__SHIFT 0x0
+#define DIO_GENERIC_INTERRUPT_MESSAGE__DIO_GENERIC_INTERRUPT_MESSAGE__SHIFT 0x1
+#define DIO_GENERIC_INTERRUPT_MESSAGE__DIO_GENERIC_INTERRUPT_STATUS_MASK 0x00000001L
+#define DIO_GENERIC_INTERRUPT_MESSAGE__DIO_GENERIC_INTERRUPT_MESSAGE_MASK 0xFFFFFFFEL
+//DIO_GENERIC_INTERRUPT_CLEAR
+#define DIO_GENERIC_INTERRUPT_CLEAR__DIO_GENERIC_INTERRUPT_CLEAR__SHIFT 0x0
+#define DIO_GENERIC_INTERRUPT_CLEAR__DIO_GENERIC_INTERRUPT_CLEAR_MASK 0x00000001L
+
+
+// addressBlock: dce_dc_dio_hpd0_dispdec
+//HPD0_DC_HPD_INT_STATUS
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_INT_STATUS__SHIFT 0x0
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_SENSE__SHIFT 0x1
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED__SHIFT 0x4
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_RX_INT_STATUS__SHIFT 0x8
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_CON_TIMER_VAL__SHIFT 0xc
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_DISCON_TIMER_VAL__SHIFT 0x18
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_INT_STATUS_MASK 0x00000001L
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK 0x00000002L
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED_MASK 0x00000010L
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_RX_INT_STATUS_MASK 0x00000100L
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_CON_TIMER_VAL_MASK 0x000FF000L
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_DISCON_TIMER_VAL_MASK 0xFF000000L
+//HPD0_DC_HPD_INT_CONTROL
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_INT_ACK__SHIFT 0x0
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY__SHIFT 0x8
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_INT_EN__SHIFT 0x10
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK__SHIFT 0x14
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN__SHIFT 0x18
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_INT_ACK_MASK 0x00000001L
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY_MASK 0x00000100L
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_INT_EN_MASK 0x00010000L
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK_MASK 0x00100000L
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN_MASK 0x01000000L
+//HPD0_DC_HPD_CONTROL
+#define HPD0_DC_HPD_CONTROL__DC_HPD_CONNECTION_TIMER__SHIFT 0x0
+#define HPD0_DC_HPD_CONTROL__DC_HPD_RX_INT_TIMER__SHIFT 0x10
+#define HPD0_DC_HPD_CONTROL__DC_HPD_EN__SHIFT 0x1c
+#define HPD0_DC_HPD_CONTROL__DC_HPD_CONNECTION_TIMER_MASK 0x00001FFFL
+#define HPD0_DC_HPD_CONTROL__DC_HPD_RX_INT_TIMER_MASK 0x03FF0000L
+#define HPD0_DC_HPD_CONTROL__DC_HPD_EN_MASK 0x10000000L
+//HPD0_DC_HPD_FAST_TRAIN_CNTL
+#define HPD0_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_DELAY__SHIFT 0x0
+#define HPD0_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_DELAY__SHIFT 0xc
+#define HPD0_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_EN__SHIFT 0x18
+#define HPD0_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_EN__SHIFT 0x1c
+#define HPD0_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_DELAY_MASK 0x000000FFL
+#define HPD0_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_DELAY_MASK 0x000FF000L
+#define HPD0_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_EN_MASK 0x01000000L
+#define HPD0_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_EN_MASK 0x10000000L
+//HPD0_DC_HPD_TOGGLE_FILT_CNTL
+#define HPD0_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_CONNECT_INT_DELAY__SHIFT 0x0
+#define HPD0_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_DISCONNECT_INT_DELAY__SHIFT 0x14
+#define HPD0_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_CONNECT_INT_DELAY_MASK 0x000000FFL
+#define HPD0_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_DISCONNECT_INT_DELAY_MASK 0x0FF00000L
+
+
+// addressBlock: dce_dc_dio_hpd1_dispdec
+//HPD1_DC_HPD_INT_STATUS
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_INT_STATUS__SHIFT 0x0
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_SENSE__SHIFT 0x1
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED__SHIFT 0x4
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_RX_INT_STATUS__SHIFT 0x8
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_CON_TIMER_VAL__SHIFT 0xc
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_DISCON_TIMER_VAL__SHIFT 0x18
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_INT_STATUS_MASK 0x00000001L
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK 0x00000002L
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED_MASK 0x00000010L
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_RX_INT_STATUS_MASK 0x00000100L
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_CON_TIMER_VAL_MASK 0x000FF000L
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_DISCON_TIMER_VAL_MASK 0xFF000000L
+//HPD1_DC_HPD_INT_CONTROL
+#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_INT_ACK__SHIFT 0x0
+#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY__SHIFT 0x8
+#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_INT_EN__SHIFT 0x10
+#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK__SHIFT 0x14
+#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN__SHIFT 0x18
+#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_INT_ACK_MASK 0x00000001L
+#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY_MASK 0x00000100L
+#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_INT_EN_MASK 0x00010000L
+#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK_MASK 0x00100000L
+#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN_MASK 0x01000000L
+//HPD1_DC_HPD_CONTROL
+#define HPD1_DC_HPD_CONTROL__DC_HPD_CONNECTION_TIMER__SHIFT 0x0
+#define HPD1_DC_HPD_CONTROL__DC_HPD_RX_INT_TIMER__SHIFT 0x10
+#define HPD1_DC_HPD_CONTROL__DC_HPD_EN__SHIFT 0x1c
+#define HPD1_DC_HPD_CONTROL__DC_HPD_CONNECTION_TIMER_MASK 0x00001FFFL
+#define HPD1_DC_HPD_CONTROL__DC_HPD_RX_INT_TIMER_MASK 0x03FF0000L
+#define HPD1_DC_HPD_CONTROL__DC_HPD_EN_MASK 0x10000000L
+//HPD1_DC_HPD_FAST_TRAIN_CNTL
+#define HPD1_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_DELAY__SHIFT 0x0
+#define HPD1_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_DELAY__SHIFT 0xc
+#define HPD1_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_EN__SHIFT 0x18
+#define HPD1_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_EN__SHIFT 0x1c
+#define HPD1_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_DELAY_MASK 0x000000FFL
+#define HPD1_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_DELAY_MASK 0x000FF000L
+#define HPD1_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_EN_MASK 0x01000000L
+#define HPD1_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_EN_MASK 0x10000000L
+//HPD1_DC_HPD_TOGGLE_FILT_CNTL
+#define HPD1_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_CONNECT_INT_DELAY__SHIFT 0x0
+#define HPD1_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_DISCONNECT_INT_DELAY__SHIFT 0x14
+#define HPD1_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_CONNECT_INT_DELAY_MASK 0x000000FFL
+#define HPD1_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_DISCONNECT_INT_DELAY_MASK 0x0FF00000L
+
+
+// addressBlock: dce_dc_dio_hpd2_dispdec
+//HPD2_DC_HPD_INT_STATUS
+#define HPD2_DC_HPD_INT_STATUS__DC_HPD_INT_STATUS__SHIFT 0x0
+#define HPD2_DC_HPD_INT_STATUS__DC_HPD_SENSE__SHIFT 0x1
+#define HPD2_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED__SHIFT 0x4
+#define HPD2_DC_HPD_INT_STATUS__DC_HPD_RX_INT_STATUS__SHIFT 0x8
+#define HPD2_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_CON_TIMER_VAL__SHIFT 0xc
+#define HPD2_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_DISCON_TIMER_VAL__SHIFT 0x18
+#define HPD2_DC_HPD_INT_STATUS__DC_HPD_INT_STATUS_MASK 0x00000001L
+#define HPD2_DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK 0x00000002L
+#define HPD2_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED_MASK 0x00000010L
+#define HPD2_DC_HPD_INT_STATUS__DC_HPD_RX_INT_STATUS_MASK 0x00000100L
+#define HPD2_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_CON_TIMER_VAL_MASK 0x000FF000L
+#define HPD2_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_DISCON_TIMER_VAL_MASK 0xFF000000L
+//HPD2_DC_HPD_INT_CONTROL
+#define HPD2_DC_HPD_INT_CONTROL__DC_HPD_INT_ACK__SHIFT 0x0
+#define HPD2_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY__SHIFT 0x8
+#define HPD2_DC_HPD_INT_CONTROL__DC_HPD_INT_EN__SHIFT 0x10
+#define HPD2_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK__SHIFT 0x14
+#define HPD2_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN__SHIFT 0x18
+#define HPD2_DC_HPD_INT_CONTROL__DC_HPD_INT_ACK_MASK 0x00000001L
+#define HPD2_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY_MASK 0x00000100L
+#define HPD2_DC_HPD_INT_CONTROL__DC_HPD_INT_EN_MASK 0x00010000L
+#define HPD2_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK_MASK 0x00100000L
+#define HPD2_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN_MASK 0x01000000L
+//HPD2_DC_HPD_CONTROL
+#define HPD2_DC_HPD_CONTROL__DC_HPD_CONNECTION_TIMER__SHIFT 0x0
+#define HPD2_DC_HPD_CONTROL__DC_HPD_RX_INT_TIMER__SHIFT 0x10
+#define HPD2_DC_HPD_CONTROL__DC_HPD_EN__SHIFT 0x1c
+#define HPD2_DC_HPD_CONTROL__DC_HPD_CONNECTION_TIMER_MASK 0x00001FFFL
+#define HPD2_DC_HPD_CONTROL__DC_HPD_RX_INT_TIMER_MASK 0x03FF0000L
+#define HPD2_DC_HPD_CONTROL__DC_HPD_EN_MASK 0x10000000L
+//HPD2_DC_HPD_FAST_TRAIN_CNTL
+#define HPD2_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_DELAY__SHIFT 0x0
+#define HPD2_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_DELAY__SHIFT 0xc
+#define HPD2_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_EN__SHIFT 0x18
+#define HPD2_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_EN__SHIFT 0x1c
+#define HPD2_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_DELAY_MASK 0x000000FFL
+#define HPD2_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_DELAY_MASK 0x000FF000L
+#define HPD2_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_EN_MASK 0x01000000L
+#define HPD2_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_EN_MASK 0x10000000L
+//HPD2_DC_HPD_TOGGLE_FILT_CNTL
+#define HPD2_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_CONNECT_INT_DELAY__SHIFT 0x0
+#define HPD2_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_DISCONNECT_INT_DELAY__SHIFT 0x14
+#define HPD2_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_CONNECT_INT_DELAY_MASK 0x000000FFL
+#define HPD2_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_DISCONNECT_INT_DELAY_MASK 0x0FF00000L
+
+
+// addressBlock: dce_dc_dio_hpd3_dispdec
+//HPD3_DC_HPD_INT_STATUS
+#define HPD3_DC_HPD_INT_STATUS__DC_HPD_INT_STATUS__SHIFT 0x0
+#define HPD3_DC_HPD_INT_STATUS__DC_HPD_SENSE__SHIFT 0x1
+#define HPD3_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED__SHIFT 0x4
+#define HPD3_DC_HPD_INT_STATUS__DC_HPD_RX_INT_STATUS__SHIFT 0x8
+#define HPD3_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_CON_TIMER_VAL__SHIFT 0xc
+#define HPD3_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_DISCON_TIMER_VAL__SHIFT 0x18
+#define HPD3_DC_HPD_INT_STATUS__DC_HPD_INT_STATUS_MASK 0x00000001L
+#define HPD3_DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK 0x00000002L
+#define HPD3_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED_MASK 0x00000010L
+#define HPD3_DC_HPD_INT_STATUS__DC_HPD_RX_INT_STATUS_MASK 0x00000100L
+#define HPD3_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_CON_TIMER_VAL_MASK 0x000FF000L
+#define HPD3_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_DISCON_TIMER_VAL_MASK 0xFF000000L
+//HPD3_DC_HPD_INT_CONTROL
+#define HPD3_DC_HPD_INT_CONTROL__DC_HPD_INT_ACK__SHIFT 0x0
+#define HPD3_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY__SHIFT 0x8
+#define HPD3_DC_HPD_INT_CONTROL__DC_HPD_INT_EN__SHIFT 0x10
+#define HPD3_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK__SHIFT 0x14
+#define HPD3_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN__SHIFT 0x18
+#define HPD3_DC_HPD_INT_CONTROL__DC_HPD_INT_ACK_MASK 0x00000001L
+#define HPD3_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY_MASK 0x00000100L
+#define HPD3_DC_HPD_INT_CONTROL__DC_HPD_INT_EN_MASK 0x00010000L
+#define HPD3_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK_MASK 0x00100000L
+#define HPD3_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN_MASK 0x01000000L
+//HPD3_DC_HPD_CONTROL
+#define HPD3_DC_HPD_CONTROL__DC_HPD_CONNECTION_TIMER__SHIFT 0x0
+#define HPD3_DC_HPD_CONTROL__DC_HPD_RX_INT_TIMER__SHIFT 0x10
+#define HPD3_DC_HPD_CONTROL__DC_HPD_EN__SHIFT 0x1c
+#define HPD3_DC_HPD_CONTROL__DC_HPD_CONNECTION_TIMER_MASK 0x00001FFFL
+#define HPD3_DC_HPD_CONTROL__DC_HPD_RX_INT_TIMER_MASK 0x03FF0000L
+#define HPD3_DC_HPD_CONTROL__DC_HPD_EN_MASK 0x10000000L
+//HPD3_DC_HPD_FAST_TRAIN_CNTL
+#define HPD3_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_DELAY__SHIFT 0x0
+#define HPD3_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_DELAY__SHIFT 0xc
+#define HPD3_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_EN__SHIFT 0x18
+#define HPD3_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_EN__SHIFT 0x1c
+#define HPD3_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_DELAY_MASK 0x000000FFL
+#define HPD3_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_DELAY_MASK 0x000FF000L
+#define HPD3_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_EN_MASK 0x01000000L
+#define HPD3_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_EN_MASK 0x10000000L
+//HPD3_DC_HPD_TOGGLE_FILT_CNTL
+#define HPD3_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_CONNECT_INT_DELAY__SHIFT 0x0
+#define HPD3_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_DISCONNECT_INT_DELAY__SHIFT 0x14
+#define HPD3_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_CONNECT_INT_DELAY_MASK 0x000000FFL
+#define HPD3_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_DISCONNECT_INT_DELAY_MASK 0x0FF00000L
+
+
+// addressBlock: dce_dc_dio_hpd4_dispdec
+//HPD4_DC_HPD_INT_STATUS
+#define HPD4_DC_HPD_INT_STATUS__DC_HPD_INT_STATUS__SHIFT 0x0
+#define HPD4_DC_HPD_INT_STATUS__DC_HPD_SENSE__SHIFT 0x1
+#define HPD4_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED__SHIFT 0x4
+#define HPD4_DC_HPD_INT_STATUS__DC_HPD_RX_INT_STATUS__SHIFT 0x8
+#define HPD4_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_CON_TIMER_VAL__SHIFT 0xc
+#define HPD4_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_DISCON_TIMER_VAL__SHIFT 0x18
+#define HPD4_DC_HPD_INT_STATUS__DC_HPD_INT_STATUS_MASK 0x00000001L
+#define HPD4_DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK 0x00000002L
+#define HPD4_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED_MASK 0x00000010L
+#define HPD4_DC_HPD_INT_STATUS__DC_HPD_RX_INT_STATUS_MASK 0x00000100L
+#define HPD4_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_CON_TIMER_VAL_MASK 0x000FF000L
+#define HPD4_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_DISCON_TIMER_VAL_MASK 0xFF000000L
+//HPD4_DC_HPD_INT_CONTROL
+#define HPD4_DC_HPD_INT_CONTROL__DC_HPD_INT_ACK__SHIFT 0x0
+#define HPD4_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY__SHIFT 0x8
+#define HPD4_DC_HPD_INT_CONTROL__DC_HPD_INT_EN__SHIFT 0x10
+#define HPD4_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK__SHIFT 0x14
+#define HPD4_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN__SHIFT 0x18
+#define HPD4_DC_HPD_INT_CONTROL__DC_HPD_INT_ACK_MASK 0x00000001L
+#define HPD4_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY_MASK 0x00000100L
+#define HPD4_DC_HPD_INT_CONTROL__DC_HPD_INT_EN_MASK 0x00010000L
+#define HPD4_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK_MASK 0x00100000L
+#define HPD4_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN_MASK 0x01000000L
+//HPD4_DC_HPD_CONTROL
+#define HPD4_DC_HPD_CONTROL__DC_HPD_CONNECTION_TIMER__SHIFT 0x0
+#define HPD4_DC_HPD_CONTROL__DC_HPD_RX_INT_TIMER__SHIFT 0x10
+#define HPD4_DC_HPD_CONTROL__DC_HPD_EN__SHIFT 0x1c
+#define HPD4_DC_HPD_CONTROL__DC_HPD_CONNECTION_TIMER_MASK 0x00001FFFL
+#define HPD4_DC_HPD_CONTROL__DC_HPD_RX_INT_TIMER_MASK 0x03FF0000L
+#define HPD4_DC_HPD_CONTROL__DC_HPD_EN_MASK 0x10000000L
+//HPD4_DC_HPD_FAST_TRAIN_CNTL
+#define HPD4_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_DELAY__SHIFT 0x0
+#define HPD4_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_DELAY__SHIFT 0xc
+#define HPD4_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_EN__SHIFT 0x18
+#define HPD4_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_EN__SHIFT 0x1c
+#define HPD4_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_DELAY_MASK 0x000000FFL
+#define HPD4_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_DELAY_MASK 0x000FF000L
+#define HPD4_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_EN_MASK 0x01000000L
+#define HPD4_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_EN_MASK 0x10000000L
+//HPD4_DC_HPD_TOGGLE_FILT_CNTL
+#define HPD4_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_CONNECT_INT_DELAY__SHIFT 0x0
+#define HPD4_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_DISCONNECT_INT_DELAY__SHIFT 0x14
+#define HPD4_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_CONNECT_INT_DELAY_MASK 0x000000FFL
+#define HPD4_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_DISCONNECT_INT_DELAY_MASK 0x0FF00000L
+
+
+// addressBlock: dce_dc_dio_dio_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON18_PERFCOUNTER_CNTL
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON18_PERFCOUNTER_CNTL2
+#define DC_PERFMON18_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON18_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON18_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON18_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON18_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON18_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON18_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON18_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON18_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON18_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON18_PERFCOUNTER_STATE
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON18_PERFMON_CNTL
+#define DC_PERFMON18_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON18_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON18_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON18_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON18_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON18_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON18_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON18_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON18_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON18_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON18_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON18_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON18_PERFMON_CNTL2
+#define DC_PERFMON18_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON18_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON18_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON18_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON18_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON18_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON18_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON18_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON18_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON18_PERFMON_CVALUE_LOW
+#define DC_PERFMON18_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON18_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON18_PERFMON_HI
+#define DC_PERFMON18_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON18_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON18_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON18_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON18_PERFMON_LOW
+#define DC_PERFMON18_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON18_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dio_dp_aux0_dispdec
+//DP_AUX0_AUX_CONTROL
+#define DP_AUX0_AUX_CONTROL__AUX_EN__SHIFT 0x0
+#define DP_AUX0_AUX_CONTROL__AUX_RESET__SHIFT 0x4
+#define DP_AUX0_AUX_CONTROL__AUX_RESET_DONE__SHIFT 0x5
+#define DP_AUX0_AUX_CONTROL__AUX_LS_READ_EN__SHIFT 0x8
+#define DP_AUX0_AUX_CONTROL__AUX_LS_UPDATE_DISABLE__SHIFT 0xc
+#define DP_AUX0_AUX_CONTROL__AUX_IGNORE_HPD_DISCON__SHIFT 0x10
+#define DP_AUX0_AUX_CONTROL__AUX_MODE_DET_EN__SHIFT 0x12
+#define DP_AUX0_AUX_CONTROL__AUX_HPD_SEL__SHIFT 0x14
+#define DP_AUX0_AUX_CONTROL__AUX_IMPCAL_REQ_EN__SHIFT 0x18
+#define DP_AUX0_AUX_CONTROL__AUX_TEST_MODE__SHIFT 0x1c
+#define DP_AUX0_AUX_CONTROL__AUX_DEGLITCH_EN__SHIFT 0x1d
+#define DP_AUX0_AUX_CONTROL__SPARE_0__SHIFT 0x1e
+#define DP_AUX0_AUX_CONTROL__SPARE_1__SHIFT 0x1f
+#define DP_AUX0_AUX_CONTROL__AUX_EN_MASK 0x00000001L
+#define DP_AUX0_AUX_CONTROL__AUX_RESET_MASK 0x00000010L
+#define DP_AUX0_AUX_CONTROL__AUX_RESET_DONE_MASK 0x00000020L
+#define DP_AUX0_AUX_CONTROL__AUX_LS_READ_EN_MASK 0x00000100L
+#define DP_AUX0_AUX_CONTROL__AUX_LS_UPDATE_DISABLE_MASK 0x00001000L
+#define DP_AUX0_AUX_CONTROL__AUX_IGNORE_HPD_DISCON_MASK 0x00010000L
+#define DP_AUX0_AUX_CONTROL__AUX_MODE_DET_EN_MASK 0x00040000L
+#define DP_AUX0_AUX_CONTROL__AUX_HPD_SEL_MASK 0x00700000L
+#define DP_AUX0_AUX_CONTROL__AUX_IMPCAL_REQ_EN_MASK 0x01000000L
+#define DP_AUX0_AUX_CONTROL__AUX_TEST_MODE_MASK 0x10000000L
+#define DP_AUX0_AUX_CONTROL__AUX_DEGLITCH_EN_MASK 0x20000000L
+#define DP_AUX0_AUX_CONTROL__SPARE_0_MASK 0x40000000L
+#define DP_AUX0_AUX_CONTROL__SPARE_1_MASK 0x80000000L
+//DP_AUX0_AUX_SW_CONTROL
+#define DP_AUX0_AUX_SW_CONTROL__AUX_SW_GO__SHIFT 0x0
+#define DP_AUX0_AUX_SW_CONTROL__AUX_LS_READ_TRIG__SHIFT 0x2
+#define DP_AUX0_AUX_SW_CONTROL__AUX_SW_START_DELAY__SHIFT 0x4
+#define DP_AUX0_AUX_SW_CONTROL__AUX_SW_WR_BYTES__SHIFT 0x10
+#define DP_AUX0_AUX_SW_CONTROL__AUX_SW_GO_MASK 0x00000001L
+#define DP_AUX0_AUX_SW_CONTROL__AUX_LS_READ_TRIG_MASK 0x00000004L
+#define DP_AUX0_AUX_SW_CONTROL__AUX_SW_START_DELAY_MASK 0x000000F0L
+#define DP_AUX0_AUX_SW_CONTROL__AUX_SW_WR_BYTES_MASK 0x001F0000L
+//DP_AUX0_AUX_ARB_CONTROL
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_ARB_PRIORITY__SHIFT 0x0
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_REG_RW_CNTL_STATUS__SHIFT 0x2
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_NO_QUEUED_SW_GO__SHIFT 0x8
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_NO_QUEUED_LS_GO__SHIFT 0xa
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_SW_USE_AUX_REG_REQ__SHIFT 0x10
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_SW_PENDING_USE_AUX_REG_REQ__SHIFT 0x10
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_SW_DONE_USING_AUX_REG__SHIFT 0x11
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_DMCU_USE_AUX_REG_REQ__SHIFT 0x18
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_DMCU_PENDING_USE_AUX_REG_REQ__SHIFT 0x18
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_DMCU_DONE_USING_AUX_REG__SHIFT 0x19
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_ARB_PRIORITY_MASK 0x00000003L
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_REG_RW_CNTL_STATUS_MASK 0x0000000CL
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_NO_QUEUED_SW_GO_MASK 0x00000100L
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_NO_QUEUED_LS_GO_MASK 0x00000400L
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_SW_USE_AUX_REG_REQ_MASK 0x00010000L
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_SW_PENDING_USE_AUX_REG_REQ_MASK 0x00010000L
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_SW_DONE_USING_AUX_REG_MASK 0x00020000L
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_DMCU_USE_AUX_REG_REQ_MASK 0x01000000L
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_DMCU_PENDING_USE_AUX_REG_REQ_MASK 0x01000000L
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_DMCU_DONE_USING_AUX_REG_MASK 0x02000000L
+//DP_AUX0_AUX_INTERRUPT_CONTROL
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_INT__SHIFT 0x0
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_ACK__SHIFT 0x1
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_MASK__SHIFT 0x2
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_INT__SHIFT 0x4
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_ACK__SHIFT 0x5
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_MASK__SHIFT 0x6
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT__SHIFT 0x8
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_ACK__SHIFT 0x9
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK__SHIFT 0xa
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT__SHIFT 0xc
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_ACK__SHIFT 0xd
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK__SHIFT 0xe
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_INT_MASK 0x00000001L
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_ACK_MASK 0x00000002L
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_MASK_MASK 0x00000004L
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_INT_MASK 0x00000010L
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_ACK_MASK 0x00000020L
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_MASK_MASK 0x00000040L
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK 0x00000100L
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_ACK_MASK 0x00000200L
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK_MASK 0x00000400L
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK 0x00001000L
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_ACK_MASK 0x00002000L
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK_MASK 0x00004000L
+//DP_AUX0_AUX_SW_STATUS
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_DONE__SHIFT 0x0
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_REQ__SHIFT 0x1
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT__SHIFT 0x7
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_HPD_DISCON__SHIFT 0x9
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX0_AUX_SW_STATUS__AUX_ARB_STATUS__SHIFT 0x1d
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_DONE_MASK 0x00000001L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_REQ_MASK 0x00000002L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_MASK 0x00000080L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX0_AUX_SW_STATUS__AUX_ARB_STATUS_MASK 0xE0000000L
+//DP_AUX0_AUX_LS_STATUS
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_DONE__SHIFT 0x0
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_REQ__SHIFT 0x1
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT__SHIFT 0x7
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_HPD_DISCON__SHIFT 0x9
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_CP_IRQ__SHIFT 0x1d
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_UPDATED__SHIFT 0x1e
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_UPDATED_ACK__SHIFT 0x1f
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_DONE_MASK 0x00000001L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_REQ_MASK 0x00000002L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_MASK 0x00000080L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_CP_IRQ_MASK 0x20000000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_UPDATED_MASK 0x40000000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_UPDATED_ACK_MASK 0x80000000L
+//DP_AUX0_AUX_SW_DATA
+#define DP_AUX0_AUX_SW_DATA__AUX_SW_DATA_RW__SHIFT 0x0
+#define DP_AUX0_AUX_SW_DATA__AUX_SW_DATA__SHIFT 0x8
+#define DP_AUX0_AUX_SW_DATA__AUX_SW_INDEX__SHIFT 0x10
+#define DP_AUX0_AUX_SW_DATA__AUX_SW_AUTOINCREMENT_DISABLE__SHIFT 0x1f
+#define DP_AUX0_AUX_SW_DATA__AUX_SW_DATA_RW_MASK 0x00000001L
+#define DP_AUX0_AUX_SW_DATA__AUX_SW_DATA_MASK 0x0000FF00L
+#define DP_AUX0_AUX_SW_DATA__AUX_SW_INDEX_MASK 0x001F0000L
+#define DP_AUX0_AUX_SW_DATA__AUX_SW_AUTOINCREMENT_DISABLE_MASK 0x80000000L
+//DP_AUX0_AUX_LS_DATA
+#define DP_AUX0_AUX_LS_DATA__AUX_LS_DATA__SHIFT 0x8
+#define DP_AUX0_AUX_LS_DATA__AUX_LS_INDEX__SHIFT 0x10
+#define DP_AUX0_AUX_LS_DATA__AUX_LS_DATA_MASK 0x0000FF00L
+#define DP_AUX0_AUX_LS_DATA__AUX_LS_INDEX_MASK 0x001F0000L
+//DP_AUX0_AUX_DPHY_TX_REF_CONTROL
+#define DP_AUX0_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_SEL__SHIFT 0x0
+#define DP_AUX0_AUX_DPHY_TX_REF_CONTROL__AUX_TX_RATE__SHIFT 0x4
+#define DP_AUX0_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_DIV__SHIFT 0x10
+#define DP_AUX0_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_SEL_MASK 0x00000001L
+#define DP_AUX0_AUX_DPHY_TX_REF_CONTROL__AUX_TX_RATE_MASK 0x00000030L
+#define DP_AUX0_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_DIV_MASK 0x01FF0000L
+//DP_AUX0_AUX_DPHY_TX_CONTROL
+#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN__SHIFT 0x0
+#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MUL__SHIFT 0x4
+#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_TX_OE_ASSERT_TIME__SHIFT 0x6
+#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_SYMBOLS__SHIFT 0x8
+#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_MODE_DET_CHECK_DELAY__SHIFT 0x10
+#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MASK 0x0000000FL
+#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MUL_MASK 0x00000030L
+#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_TX_OE_ASSERT_TIME_MASK 0x00000040L
+#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_SYMBOLS_MASK 0x00003F00L
+#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_MODE_DET_CHECK_DELAY_MASK 0x00070000L
+//DP_AUX0_AUX_DPHY_RX_CONTROL0
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_START_WINDOW__SHIFT 0x4
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_RECEIVE_WINDOW__SHIFT 0x8
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_HALF_SYM_DETECT_LEN__SHIFT 0xc
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_TRANSITION_FILTER_EN__SHIFT 0x10
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT__SHIFT 0x11
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_START__SHIFT 0x12
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_STOP__SHIFT 0x13
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_PHASE_DETECT_LEN__SHIFT 0x14
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_DETECTION_THRESHOLD__SHIFT 0x1c
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_START_WINDOW_MASK 0x00000070L
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_RECEIVE_WINDOW_MASK 0x00000700L
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_HALF_SYM_DETECT_LEN_MASK 0x00003000L
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_TRANSITION_FILTER_EN_MASK 0x00010000L
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT_MASK 0x00020000L
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_START_MASK 0x00040000L
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_STOP_MASK 0x00080000L
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_PHASE_DETECT_LEN_MASK 0x00300000L
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_DETECTION_THRESHOLD_MASK 0x70000000L
+//DP_AUX0_AUX_DPHY_RX_CONTROL1
+#define DP_AUX0_AUX_DPHY_RX_CONTROL1__AUX_RX_PRECHARGE_SKIP__SHIFT 0x0
+#define DP_AUX0_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN__SHIFT 0x8
+#define DP_AUX0_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MUL__SHIFT 0xf
+#define DP_AUX0_AUX_DPHY_RX_CONTROL1__AUX_RX_PRECHARGE_SKIP_MASK 0x000000FFL
+#define DP_AUX0_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MASK 0x00007F00L
+#define DP_AUX0_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MUL_MASK 0x00018000L
+//DP_AUX0_AUX_DPHY_TX_STATUS
+#define DP_AUX0_AUX_DPHY_TX_STATUS__AUX_TX_ACTIVE__SHIFT 0x0
+#define DP_AUX0_AUX_DPHY_TX_STATUS__AUX_TX_STATE__SHIFT 0x4
+#define DP_AUX0_AUX_DPHY_TX_STATUS__AUX_TX_HALF_SYM_PERIOD__SHIFT 0x10
+#define DP_AUX0_AUX_DPHY_TX_STATUS__AUX_TX_ACTIVE_MASK 0x00000001L
+#define DP_AUX0_AUX_DPHY_TX_STATUS__AUX_TX_STATE_MASK 0x00000070L
+#define DP_AUX0_AUX_DPHY_TX_STATUS__AUX_TX_HALF_SYM_PERIOD_MASK 0x01FF0000L
+//DP_AUX0_AUX_DPHY_RX_STATUS
+#define DP_AUX0_AUX_DPHY_RX_STATUS__AUX_RX_STATE__SHIFT 0x0
+#define DP_AUX0_AUX_DPHY_RX_STATUS__AUX_RX_SYNC_VALID_COUNT__SHIFT 0x8
+#define DP_AUX0_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_FRACT__SHIFT 0x10
+#define DP_AUX0_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD__SHIFT 0x15
+#define DP_AUX0_AUX_DPHY_RX_STATUS__AUX_RX_STATE_MASK 0x00000007L
+#define DP_AUX0_AUX_DPHY_RX_STATUS__AUX_RX_SYNC_VALID_COUNT_MASK 0x00001F00L
+#define DP_AUX0_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_FRACT_MASK 0x001F0000L
+#define DP_AUX0_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_MASK 0x3FE00000L
+//DP_AUX0_AUX_GTC_SYNC_CONTROL
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_EN__SHIFT 0x0
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_EN__SHIFT 0x4
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_INTERVAL__SHIFT 0x8
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_PERIOD__SHIFT 0xc
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_MAINT_PERIOD__SHIFT 0x10
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_BLOCK_REQ__SHIFT 0x14
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_INTERVAL_RESET_WINDOW__SHIFT 0x16
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_OFFSET_CALC_MAX_ATTEMPT__SHIFT 0x18
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_MAX_ATTEMPT__SHIFT 0x1c
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_EN_MASK 0x00000001L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_EN_MASK 0x00000010L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_INTERVAL_MASK 0x00000F00L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_PERIOD_MASK 0x0000F000L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_MAINT_PERIOD_MASK 0x00070000L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_BLOCK_REQ_MASK 0x00100000L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_INTERVAL_RESET_WINDOW_MASK 0x00C00000L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_OFFSET_CALC_MAX_ATTEMPT_MASK 0x03000000L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_MAX_ATTEMPT_MASK 0xF0000000L
+//DP_AUX0_AUX_GTC_SYNC_ERROR_CONTROL
+#define DP_AUX0_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_POTENTIAL_ERROR_THRESHOLD__SHIFT 0x0
+#define DP_AUX0_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_DEFINITE_ERROR_THRESHOLD__SHIFT 0x8
+#define DP_AUX0_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_LEN__SHIFT 0x10
+#define DP_AUX0_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_NUM_RETRY_FOR_LOCK_MAINT__SHIFT 0x14
+#define DP_AUX0_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_POTENTIAL_ERROR_THRESHOLD_MASK 0x0000001FL
+#define DP_AUX0_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_DEFINITE_ERROR_THRESHOLD_MASK 0x00001F00L
+#define DP_AUX0_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_LEN_MASK 0x00030000L
+#define DP_AUX0_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_NUM_RETRY_FOR_LOCK_MAINT_MASK 0x00300000L
+//DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_COMPLETE__SHIFT 0x0
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_LOST__SHIFT 0x4
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_OCCURRED__SHIFT 0x8
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_STATE__SHIFT 0x9
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_PHASE_ADJUST_TIME_VIOL__SHIFT 0x10
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED__SHIFT 0x14
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_ACK__SHIFT 0x15
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED__SHIFT 0x16
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_ACK__SHIFT 0x17
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED__SHIFT 0x18
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_ACK__SHIFT 0x19
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CTRL_STATE__SHIFT 0x1c
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_COMPLETE_MASK 0x00000001L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_LOST_MASK 0x00000010L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_OCCURRED_MASK 0x00000100L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_STATE_MASK 0x00001E00L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_PHASE_ADJUST_TIME_VIOL_MASK 0x00010000L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_MASK 0x00100000L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_ACK_MASK 0x00200000L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_MASK 0x00400000L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_ACK_MASK 0x00800000L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_MASK 0x01000000L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_ACK_MASK 0x02000000L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CTRL_STATE_MASK 0xF0000000L
+//DP_AUX0_AUX_GTC_SYNC_STATUS
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_DONE__SHIFT 0x0
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REQ__SHIFT 0x1
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_TIMEOUT__SHIFT 0x7
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_HPD_DISCON__SHIFT 0x9
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NACKED__SHIFT 0x1d
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_MASTER_REQ_BY_RX__SHIFT 0x1e
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_DONE_MASK 0x00000001L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REQ_MASK 0x00000002L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_TIMEOUT_MASK 0x00000080L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NACKED_MASK 0x20000000L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_MASTER_REQ_BY_RX_MASK 0x40000000L
+//DP_AUX0_AUX_PHY_WAKE_CNTL
+#define DP_AUX0_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_GO__SHIFT 0x0
+#define DP_AUX0_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PENDING__SHIFT 0x1
+#define DP_AUX0_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PRIORITY__SHIFT 0x2
+#define DP_AUX0_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_ACK__SHIFT 0x3
+#define DP_AUX0_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_GO_MASK 0x00000001L
+#define DP_AUX0_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PENDING_MASK 0x00000002L
+#define DP_AUX0_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PRIORITY_MASK 0x00000004L
+#define DP_AUX0_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_ACK_MASK 0x00000008L
+
+
+// addressBlock: dce_dc_dio_dp_aux1_dispdec
+//DP_AUX1_AUX_CONTROL
+#define DP_AUX1_AUX_CONTROL__AUX_EN__SHIFT 0x0
+#define DP_AUX1_AUX_CONTROL__AUX_RESET__SHIFT 0x4
+#define DP_AUX1_AUX_CONTROL__AUX_RESET_DONE__SHIFT 0x5
+#define DP_AUX1_AUX_CONTROL__AUX_LS_READ_EN__SHIFT 0x8
+#define DP_AUX1_AUX_CONTROL__AUX_LS_UPDATE_DISABLE__SHIFT 0xc
+#define DP_AUX1_AUX_CONTROL__AUX_IGNORE_HPD_DISCON__SHIFT 0x10
+#define DP_AUX1_AUX_CONTROL__AUX_MODE_DET_EN__SHIFT 0x12
+#define DP_AUX1_AUX_CONTROL__AUX_HPD_SEL__SHIFT 0x14
+#define DP_AUX1_AUX_CONTROL__AUX_IMPCAL_REQ_EN__SHIFT 0x18
+#define DP_AUX1_AUX_CONTROL__AUX_TEST_MODE__SHIFT 0x1c
+#define DP_AUX1_AUX_CONTROL__AUX_DEGLITCH_EN__SHIFT 0x1d
+#define DP_AUX1_AUX_CONTROL__SPARE_0__SHIFT 0x1e
+#define DP_AUX1_AUX_CONTROL__SPARE_1__SHIFT 0x1f
+#define DP_AUX1_AUX_CONTROL__AUX_EN_MASK 0x00000001L
+#define DP_AUX1_AUX_CONTROL__AUX_RESET_MASK 0x00000010L
+#define DP_AUX1_AUX_CONTROL__AUX_RESET_DONE_MASK 0x00000020L
+#define DP_AUX1_AUX_CONTROL__AUX_LS_READ_EN_MASK 0x00000100L
+#define DP_AUX1_AUX_CONTROL__AUX_LS_UPDATE_DISABLE_MASK 0x00001000L
+#define DP_AUX1_AUX_CONTROL__AUX_IGNORE_HPD_DISCON_MASK 0x00010000L
+#define DP_AUX1_AUX_CONTROL__AUX_MODE_DET_EN_MASK 0x00040000L
+#define DP_AUX1_AUX_CONTROL__AUX_HPD_SEL_MASK 0x00700000L
+#define DP_AUX1_AUX_CONTROL__AUX_IMPCAL_REQ_EN_MASK 0x01000000L
+#define DP_AUX1_AUX_CONTROL__AUX_TEST_MODE_MASK 0x10000000L
+#define DP_AUX1_AUX_CONTROL__AUX_DEGLITCH_EN_MASK 0x20000000L
+#define DP_AUX1_AUX_CONTROL__SPARE_0_MASK 0x40000000L
+#define DP_AUX1_AUX_CONTROL__SPARE_1_MASK 0x80000000L
+//DP_AUX1_AUX_SW_CONTROL
+#define DP_AUX1_AUX_SW_CONTROL__AUX_SW_GO__SHIFT 0x0
+#define DP_AUX1_AUX_SW_CONTROL__AUX_LS_READ_TRIG__SHIFT 0x2
+#define DP_AUX1_AUX_SW_CONTROL__AUX_SW_START_DELAY__SHIFT 0x4
+#define DP_AUX1_AUX_SW_CONTROL__AUX_SW_WR_BYTES__SHIFT 0x10
+#define DP_AUX1_AUX_SW_CONTROL__AUX_SW_GO_MASK 0x00000001L
+#define DP_AUX1_AUX_SW_CONTROL__AUX_LS_READ_TRIG_MASK 0x00000004L
+#define DP_AUX1_AUX_SW_CONTROL__AUX_SW_START_DELAY_MASK 0x000000F0L
+#define DP_AUX1_AUX_SW_CONTROL__AUX_SW_WR_BYTES_MASK 0x001F0000L
+//DP_AUX1_AUX_ARB_CONTROL
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_ARB_PRIORITY__SHIFT 0x0
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_REG_RW_CNTL_STATUS__SHIFT 0x2
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_NO_QUEUED_SW_GO__SHIFT 0x8
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_NO_QUEUED_LS_GO__SHIFT 0xa
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_SW_USE_AUX_REG_REQ__SHIFT 0x10
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_SW_PENDING_USE_AUX_REG_REQ__SHIFT 0x10
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_SW_DONE_USING_AUX_REG__SHIFT 0x11
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_DMCU_USE_AUX_REG_REQ__SHIFT 0x18
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_DMCU_PENDING_USE_AUX_REG_REQ__SHIFT 0x18
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_DMCU_DONE_USING_AUX_REG__SHIFT 0x19
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_ARB_PRIORITY_MASK 0x00000003L
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_REG_RW_CNTL_STATUS_MASK 0x0000000CL
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_NO_QUEUED_SW_GO_MASK 0x00000100L
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_NO_QUEUED_LS_GO_MASK 0x00000400L
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_SW_USE_AUX_REG_REQ_MASK 0x00010000L
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_SW_PENDING_USE_AUX_REG_REQ_MASK 0x00010000L
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_SW_DONE_USING_AUX_REG_MASK 0x00020000L
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_DMCU_USE_AUX_REG_REQ_MASK 0x01000000L
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_DMCU_PENDING_USE_AUX_REG_REQ_MASK 0x01000000L
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_DMCU_DONE_USING_AUX_REG_MASK 0x02000000L
+//DP_AUX1_AUX_INTERRUPT_CONTROL
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_INT__SHIFT 0x0
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_ACK__SHIFT 0x1
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_MASK__SHIFT 0x2
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_INT__SHIFT 0x4
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_ACK__SHIFT 0x5
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_MASK__SHIFT 0x6
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT__SHIFT 0x8
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_ACK__SHIFT 0x9
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK__SHIFT 0xa
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT__SHIFT 0xc
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_ACK__SHIFT 0xd
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK__SHIFT 0xe
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_INT_MASK 0x00000001L
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_ACK_MASK 0x00000002L
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_MASK_MASK 0x00000004L
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_INT_MASK 0x00000010L
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_ACK_MASK 0x00000020L
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_MASK_MASK 0x00000040L
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK 0x00000100L
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_ACK_MASK 0x00000200L
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK_MASK 0x00000400L
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK 0x00001000L
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_ACK_MASK 0x00002000L
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK_MASK 0x00004000L
+//DP_AUX1_AUX_SW_STATUS
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_DONE__SHIFT 0x0
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_REQ__SHIFT 0x1
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT__SHIFT 0x7
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_HPD_DISCON__SHIFT 0x9
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX1_AUX_SW_STATUS__AUX_ARB_STATUS__SHIFT 0x1d
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_DONE_MASK 0x00000001L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_REQ_MASK 0x00000002L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_MASK 0x00000080L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX1_AUX_SW_STATUS__AUX_ARB_STATUS_MASK 0xE0000000L
+//DP_AUX1_AUX_LS_STATUS
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_DONE__SHIFT 0x0
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_REQ__SHIFT 0x1
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT__SHIFT 0x7
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_HPD_DISCON__SHIFT 0x9
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_CP_IRQ__SHIFT 0x1d
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_UPDATED__SHIFT 0x1e
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_UPDATED_ACK__SHIFT 0x1f
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_DONE_MASK 0x00000001L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_REQ_MASK 0x00000002L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_MASK 0x00000080L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_CP_IRQ_MASK 0x20000000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_UPDATED_MASK 0x40000000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_UPDATED_ACK_MASK 0x80000000L
+//DP_AUX1_AUX_SW_DATA
+#define DP_AUX1_AUX_SW_DATA__AUX_SW_DATA_RW__SHIFT 0x0
+#define DP_AUX1_AUX_SW_DATA__AUX_SW_DATA__SHIFT 0x8
+#define DP_AUX1_AUX_SW_DATA__AUX_SW_INDEX__SHIFT 0x10
+#define DP_AUX1_AUX_SW_DATA__AUX_SW_AUTOINCREMENT_DISABLE__SHIFT 0x1f
+#define DP_AUX1_AUX_SW_DATA__AUX_SW_DATA_RW_MASK 0x00000001L
+#define DP_AUX1_AUX_SW_DATA__AUX_SW_DATA_MASK 0x0000FF00L
+#define DP_AUX1_AUX_SW_DATA__AUX_SW_INDEX_MASK 0x001F0000L
+#define DP_AUX1_AUX_SW_DATA__AUX_SW_AUTOINCREMENT_DISABLE_MASK 0x80000000L
+//DP_AUX1_AUX_LS_DATA
+#define DP_AUX1_AUX_LS_DATA__AUX_LS_DATA__SHIFT 0x8
+#define DP_AUX1_AUX_LS_DATA__AUX_LS_INDEX__SHIFT 0x10
+#define DP_AUX1_AUX_LS_DATA__AUX_LS_DATA_MASK 0x0000FF00L
+#define DP_AUX1_AUX_LS_DATA__AUX_LS_INDEX_MASK 0x001F0000L
+//DP_AUX1_AUX_DPHY_TX_REF_CONTROL
+#define DP_AUX1_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_SEL__SHIFT 0x0
+#define DP_AUX1_AUX_DPHY_TX_REF_CONTROL__AUX_TX_RATE__SHIFT 0x4
+#define DP_AUX1_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_DIV__SHIFT 0x10
+#define DP_AUX1_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_SEL_MASK 0x00000001L
+#define DP_AUX1_AUX_DPHY_TX_REF_CONTROL__AUX_TX_RATE_MASK 0x00000030L
+#define DP_AUX1_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_DIV_MASK 0x01FF0000L
+//DP_AUX1_AUX_DPHY_TX_CONTROL
+#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN__SHIFT 0x0
+#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MUL__SHIFT 0x4
+#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_TX_OE_ASSERT_TIME__SHIFT 0x6
+#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_SYMBOLS__SHIFT 0x8
+#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_MODE_DET_CHECK_DELAY__SHIFT 0x10
+#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MASK 0x0000000FL
+#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MUL_MASK 0x00000030L
+#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_TX_OE_ASSERT_TIME_MASK 0x00000040L
+#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_SYMBOLS_MASK 0x00003F00L
+#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_MODE_DET_CHECK_DELAY_MASK 0x00070000L
+//DP_AUX1_AUX_DPHY_RX_CONTROL0
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_START_WINDOW__SHIFT 0x4
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_RECEIVE_WINDOW__SHIFT 0x8
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_HALF_SYM_DETECT_LEN__SHIFT 0xc
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_TRANSITION_FILTER_EN__SHIFT 0x10
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT__SHIFT 0x11
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_START__SHIFT 0x12
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_STOP__SHIFT 0x13
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_PHASE_DETECT_LEN__SHIFT 0x14
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_DETECTION_THRESHOLD__SHIFT 0x1c
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_START_WINDOW_MASK 0x00000070L
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_RECEIVE_WINDOW_MASK 0x00000700L
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_HALF_SYM_DETECT_LEN_MASK 0x00003000L
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_TRANSITION_FILTER_EN_MASK 0x00010000L
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT_MASK 0x00020000L
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_START_MASK 0x00040000L
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_STOP_MASK 0x00080000L
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_PHASE_DETECT_LEN_MASK 0x00300000L
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_DETECTION_THRESHOLD_MASK 0x70000000L
+//DP_AUX1_AUX_DPHY_RX_CONTROL1
+#define DP_AUX1_AUX_DPHY_RX_CONTROL1__AUX_RX_PRECHARGE_SKIP__SHIFT 0x0
+#define DP_AUX1_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN__SHIFT 0x8
+#define DP_AUX1_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MUL__SHIFT 0xf
+#define DP_AUX1_AUX_DPHY_RX_CONTROL1__AUX_RX_PRECHARGE_SKIP_MASK 0x000000FFL
+#define DP_AUX1_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MASK 0x00007F00L
+#define DP_AUX1_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MUL_MASK 0x00018000L
+//DP_AUX1_AUX_DPHY_TX_STATUS
+#define DP_AUX1_AUX_DPHY_TX_STATUS__AUX_TX_ACTIVE__SHIFT 0x0
+#define DP_AUX1_AUX_DPHY_TX_STATUS__AUX_TX_STATE__SHIFT 0x4
+#define DP_AUX1_AUX_DPHY_TX_STATUS__AUX_TX_HALF_SYM_PERIOD__SHIFT 0x10
+#define DP_AUX1_AUX_DPHY_TX_STATUS__AUX_TX_ACTIVE_MASK 0x00000001L
+#define DP_AUX1_AUX_DPHY_TX_STATUS__AUX_TX_STATE_MASK 0x00000070L
+#define DP_AUX1_AUX_DPHY_TX_STATUS__AUX_TX_HALF_SYM_PERIOD_MASK 0x01FF0000L
+//DP_AUX1_AUX_DPHY_RX_STATUS
+#define DP_AUX1_AUX_DPHY_RX_STATUS__AUX_RX_STATE__SHIFT 0x0
+#define DP_AUX1_AUX_DPHY_RX_STATUS__AUX_RX_SYNC_VALID_COUNT__SHIFT 0x8
+#define DP_AUX1_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_FRACT__SHIFT 0x10
+#define DP_AUX1_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD__SHIFT 0x15
+#define DP_AUX1_AUX_DPHY_RX_STATUS__AUX_RX_STATE_MASK 0x00000007L
+#define DP_AUX1_AUX_DPHY_RX_STATUS__AUX_RX_SYNC_VALID_COUNT_MASK 0x00001F00L
+#define DP_AUX1_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_FRACT_MASK 0x001F0000L
+#define DP_AUX1_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_MASK 0x3FE00000L
+//DP_AUX1_AUX_GTC_SYNC_CONTROL
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_EN__SHIFT 0x0
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_EN__SHIFT 0x4
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_INTERVAL__SHIFT 0x8
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_PERIOD__SHIFT 0xc
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_MAINT_PERIOD__SHIFT 0x10
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_BLOCK_REQ__SHIFT 0x14
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_INTERVAL_RESET_WINDOW__SHIFT 0x16
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_OFFSET_CALC_MAX_ATTEMPT__SHIFT 0x18
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_MAX_ATTEMPT__SHIFT 0x1c
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_EN_MASK 0x00000001L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_EN_MASK 0x00000010L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_INTERVAL_MASK 0x00000F00L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_PERIOD_MASK 0x0000F000L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_MAINT_PERIOD_MASK 0x00070000L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_BLOCK_REQ_MASK 0x00100000L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_INTERVAL_RESET_WINDOW_MASK 0x00C00000L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_OFFSET_CALC_MAX_ATTEMPT_MASK 0x03000000L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_MAX_ATTEMPT_MASK 0xF0000000L
+//DP_AUX1_AUX_GTC_SYNC_ERROR_CONTROL
+#define DP_AUX1_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_POTENTIAL_ERROR_THRESHOLD__SHIFT 0x0
+#define DP_AUX1_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_DEFINITE_ERROR_THRESHOLD__SHIFT 0x8
+#define DP_AUX1_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_LEN__SHIFT 0x10
+#define DP_AUX1_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_NUM_RETRY_FOR_LOCK_MAINT__SHIFT 0x14
+#define DP_AUX1_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_POTENTIAL_ERROR_THRESHOLD_MASK 0x0000001FL
+#define DP_AUX1_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_DEFINITE_ERROR_THRESHOLD_MASK 0x00001F00L
+#define DP_AUX1_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_LEN_MASK 0x00030000L
+#define DP_AUX1_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_NUM_RETRY_FOR_LOCK_MAINT_MASK 0x00300000L
+//DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_COMPLETE__SHIFT 0x0
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_LOST__SHIFT 0x4
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_OCCURRED__SHIFT 0x8
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_STATE__SHIFT 0x9
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_PHASE_ADJUST_TIME_VIOL__SHIFT 0x10
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED__SHIFT 0x14
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_ACK__SHIFT 0x15
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED__SHIFT 0x16
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_ACK__SHIFT 0x17
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED__SHIFT 0x18
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_ACK__SHIFT 0x19
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CTRL_STATE__SHIFT 0x1c
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_COMPLETE_MASK 0x00000001L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_LOST_MASK 0x00000010L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_OCCURRED_MASK 0x00000100L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_STATE_MASK 0x00001E00L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_PHASE_ADJUST_TIME_VIOL_MASK 0x00010000L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_MASK 0x00100000L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_ACK_MASK 0x00200000L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_MASK 0x00400000L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_ACK_MASK 0x00800000L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_MASK 0x01000000L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_ACK_MASK 0x02000000L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CTRL_STATE_MASK 0xF0000000L
+//DP_AUX1_AUX_GTC_SYNC_STATUS
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_DONE__SHIFT 0x0
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REQ__SHIFT 0x1
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_TIMEOUT__SHIFT 0x7
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_HPD_DISCON__SHIFT 0x9
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NACKED__SHIFT 0x1d
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_MASTER_REQ_BY_RX__SHIFT 0x1e
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_DONE_MASK 0x00000001L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REQ_MASK 0x00000002L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_TIMEOUT_MASK 0x00000080L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NACKED_MASK 0x20000000L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_MASTER_REQ_BY_RX_MASK 0x40000000L
+//DP_AUX1_AUX_PHY_WAKE_CNTL
+#define DP_AUX1_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_GO__SHIFT 0x0
+#define DP_AUX1_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PENDING__SHIFT 0x1
+#define DP_AUX1_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PRIORITY__SHIFT 0x2
+#define DP_AUX1_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_ACK__SHIFT 0x3
+#define DP_AUX1_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_GO_MASK 0x00000001L
+#define DP_AUX1_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PENDING_MASK 0x00000002L
+#define DP_AUX1_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PRIORITY_MASK 0x00000004L
+#define DP_AUX1_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_ACK_MASK 0x00000008L
+
+
+// addressBlock: dce_dc_dio_dp_aux2_dispdec
+//DP_AUX2_AUX_CONTROL
+#define DP_AUX2_AUX_CONTROL__AUX_EN__SHIFT 0x0
+#define DP_AUX2_AUX_CONTROL__AUX_RESET__SHIFT 0x4
+#define DP_AUX2_AUX_CONTROL__AUX_RESET_DONE__SHIFT 0x5
+#define DP_AUX2_AUX_CONTROL__AUX_LS_READ_EN__SHIFT 0x8
+#define DP_AUX2_AUX_CONTROL__AUX_LS_UPDATE_DISABLE__SHIFT 0xc
+#define DP_AUX2_AUX_CONTROL__AUX_IGNORE_HPD_DISCON__SHIFT 0x10
+#define DP_AUX2_AUX_CONTROL__AUX_MODE_DET_EN__SHIFT 0x12
+#define DP_AUX2_AUX_CONTROL__AUX_HPD_SEL__SHIFT 0x14
+#define DP_AUX2_AUX_CONTROL__AUX_IMPCAL_REQ_EN__SHIFT 0x18
+#define DP_AUX2_AUX_CONTROL__AUX_TEST_MODE__SHIFT 0x1c
+#define DP_AUX2_AUX_CONTROL__AUX_DEGLITCH_EN__SHIFT 0x1d
+#define DP_AUX2_AUX_CONTROL__SPARE_0__SHIFT 0x1e
+#define DP_AUX2_AUX_CONTROL__SPARE_1__SHIFT 0x1f
+#define DP_AUX2_AUX_CONTROL__AUX_EN_MASK 0x00000001L
+#define DP_AUX2_AUX_CONTROL__AUX_RESET_MASK 0x00000010L
+#define DP_AUX2_AUX_CONTROL__AUX_RESET_DONE_MASK 0x00000020L
+#define DP_AUX2_AUX_CONTROL__AUX_LS_READ_EN_MASK 0x00000100L
+#define DP_AUX2_AUX_CONTROL__AUX_LS_UPDATE_DISABLE_MASK 0x00001000L
+#define DP_AUX2_AUX_CONTROL__AUX_IGNORE_HPD_DISCON_MASK 0x00010000L
+#define DP_AUX2_AUX_CONTROL__AUX_MODE_DET_EN_MASK 0x00040000L
+#define DP_AUX2_AUX_CONTROL__AUX_HPD_SEL_MASK 0x00700000L
+#define DP_AUX2_AUX_CONTROL__AUX_IMPCAL_REQ_EN_MASK 0x01000000L
+#define DP_AUX2_AUX_CONTROL__AUX_TEST_MODE_MASK 0x10000000L
+#define DP_AUX2_AUX_CONTROL__AUX_DEGLITCH_EN_MASK 0x20000000L
+#define DP_AUX2_AUX_CONTROL__SPARE_0_MASK 0x40000000L
+#define DP_AUX2_AUX_CONTROL__SPARE_1_MASK 0x80000000L
+//DP_AUX2_AUX_SW_CONTROL
+#define DP_AUX2_AUX_SW_CONTROL__AUX_SW_GO__SHIFT 0x0
+#define DP_AUX2_AUX_SW_CONTROL__AUX_LS_READ_TRIG__SHIFT 0x2
+#define DP_AUX2_AUX_SW_CONTROL__AUX_SW_START_DELAY__SHIFT 0x4
+#define DP_AUX2_AUX_SW_CONTROL__AUX_SW_WR_BYTES__SHIFT 0x10
+#define DP_AUX2_AUX_SW_CONTROL__AUX_SW_GO_MASK 0x00000001L
+#define DP_AUX2_AUX_SW_CONTROL__AUX_LS_READ_TRIG_MASK 0x00000004L
+#define DP_AUX2_AUX_SW_CONTROL__AUX_SW_START_DELAY_MASK 0x000000F0L
+#define DP_AUX2_AUX_SW_CONTROL__AUX_SW_WR_BYTES_MASK 0x001F0000L
+//DP_AUX2_AUX_ARB_CONTROL
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_ARB_PRIORITY__SHIFT 0x0
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_REG_RW_CNTL_STATUS__SHIFT 0x2
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_NO_QUEUED_SW_GO__SHIFT 0x8
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_NO_QUEUED_LS_GO__SHIFT 0xa
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_SW_USE_AUX_REG_REQ__SHIFT 0x10
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_SW_PENDING_USE_AUX_REG_REQ__SHIFT 0x10
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_SW_DONE_USING_AUX_REG__SHIFT 0x11
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_DMCU_USE_AUX_REG_REQ__SHIFT 0x18
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_DMCU_PENDING_USE_AUX_REG_REQ__SHIFT 0x18
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_DMCU_DONE_USING_AUX_REG__SHIFT 0x19
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_ARB_PRIORITY_MASK 0x00000003L
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_REG_RW_CNTL_STATUS_MASK 0x0000000CL
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_NO_QUEUED_SW_GO_MASK 0x00000100L
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_NO_QUEUED_LS_GO_MASK 0x00000400L
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_SW_USE_AUX_REG_REQ_MASK 0x00010000L
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_SW_PENDING_USE_AUX_REG_REQ_MASK 0x00010000L
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_SW_DONE_USING_AUX_REG_MASK 0x00020000L
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_DMCU_USE_AUX_REG_REQ_MASK 0x01000000L
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_DMCU_PENDING_USE_AUX_REG_REQ_MASK 0x01000000L
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_DMCU_DONE_USING_AUX_REG_MASK 0x02000000L
+//DP_AUX2_AUX_INTERRUPT_CONTROL
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_INT__SHIFT 0x0
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_ACK__SHIFT 0x1
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_MASK__SHIFT 0x2
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_INT__SHIFT 0x4
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_ACK__SHIFT 0x5
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_MASK__SHIFT 0x6
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT__SHIFT 0x8
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_ACK__SHIFT 0x9
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK__SHIFT 0xa
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT__SHIFT 0xc
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_ACK__SHIFT 0xd
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK__SHIFT 0xe
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_INT_MASK 0x00000001L
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_ACK_MASK 0x00000002L
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_MASK_MASK 0x00000004L
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_INT_MASK 0x00000010L
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_ACK_MASK 0x00000020L
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_MASK_MASK 0x00000040L
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK 0x00000100L
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_ACK_MASK 0x00000200L
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK_MASK 0x00000400L
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK 0x00001000L
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_ACK_MASK 0x00002000L
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK_MASK 0x00004000L
+//DP_AUX2_AUX_SW_STATUS
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_DONE__SHIFT 0x0
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_REQ__SHIFT 0x1
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT__SHIFT 0x7
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_HPD_DISCON__SHIFT 0x9
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX2_AUX_SW_STATUS__AUX_ARB_STATUS__SHIFT 0x1d
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_DONE_MASK 0x00000001L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_REQ_MASK 0x00000002L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_MASK 0x00000080L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX2_AUX_SW_STATUS__AUX_ARB_STATUS_MASK 0xE0000000L
+//DP_AUX2_AUX_LS_STATUS
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_DONE__SHIFT 0x0
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_REQ__SHIFT 0x1
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT__SHIFT 0x7
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_HPD_DISCON__SHIFT 0x9
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_CP_IRQ__SHIFT 0x1d
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_UPDATED__SHIFT 0x1e
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_UPDATED_ACK__SHIFT 0x1f
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_DONE_MASK 0x00000001L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_REQ_MASK 0x00000002L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_MASK 0x00000080L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_CP_IRQ_MASK 0x20000000L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_UPDATED_MASK 0x40000000L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_UPDATED_ACK_MASK 0x80000000L
+//DP_AUX2_AUX_SW_DATA
+#define DP_AUX2_AUX_SW_DATA__AUX_SW_DATA_RW__SHIFT 0x0
+#define DP_AUX2_AUX_SW_DATA__AUX_SW_DATA__SHIFT 0x8
+#define DP_AUX2_AUX_SW_DATA__AUX_SW_INDEX__SHIFT 0x10
+#define DP_AUX2_AUX_SW_DATA__AUX_SW_AUTOINCREMENT_DISABLE__SHIFT 0x1f
+#define DP_AUX2_AUX_SW_DATA__AUX_SW_DATA_RW_MASK 0x00000001L
+#define DP_AUX2_AUX_SW_DATA__AUX_SW_DATA_MASK 0x0000FF00L
+#define DP_AUX2_AUX_SW_DATA__AUX_SW_INDEX_MASK 0x001F0000L
+#define DP_AUX2_AUX_SW_DATA__AUX_SW_AUTOINCREMENT_DISABLE_MASK 0x80000000L
+//DP_AUX2_AUX_LS_DATA
+#define DP_AUX2_AUX_LS_DATA__AUX_LS_DATA__SHIFT 0x8
+#define DP_AUX2_AUX_LS_DATA__AUX_LS_INDEX__SHIFT 0x10
+#define DP_AUX2_AUX_LS_DATA__AUX_LS_DATA_MASK 0x0000FF00L
+#define DP_AUX2_AUX_LS_DATA__AUX_LS_INDEX_MASK 0x001F0000L
+//DP_AUX2_AUX_DPHY_TX_REF_CONTROL
+#define DP_AUX2_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_SEL__SHIFT 0x0
+#define DP_AUX2_AUX_DPHY_TX_REF_CONTROL__AUX_TX_RATE__SHIFT 0x4
+#define DP_AUX2_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_DIV__SHIFT 0x10
+#define DP_AUX2_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_SEL_MASK 0x00000001L
+#define DP_AUX2_AUX_DPHY_TX_REF_CONTROL__AUX_TX_RATE_MASK 0x00000030L
+#define DP_AUX2_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_DIV_MASK 0x01FF0000L
+//DP_AUX2_AUX_DPHY_TX_CONTROL
+#define DP_AUX2_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN__SHIFT 0x0
+#define DP_AUX2_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MUL__SHIFT 0x4
+#define DP_AUX2_AUX_DPHY_TX_CONTROL__AUX_TX_OE_ASSERT_TIME__SHIFT 0x6
+#define DP_AUX2_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_SYMBOLS__SHIFT 0x8
+#define DP_AUX2_AUX_DPHY_TX_CONTROL__AUX_MODE_DET_CHECK_DELAY__SHIFT 0x10
+#define DP_AUX2_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MASK 0x0000000FL
+#define DP_AUX2_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MUL_MASK 0x00000030L
+#define DP_AUX2_AUX_DPHY_TX_CONTROL__AUX_TX_OE_ASSERT_TIME_MASK 0x00000040L
+#define DP_AUX2_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_SYMBOLS_MASK 0x00003F00L
+#define DP_AUX2_AUX_DPHY_TX_CONTROL__AUX_MODE_DET_CHECK_DELAY_MASK 0x00070000L
+//DP_AUX2_AUX_DPHY_RX_CONTROL0
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_START_WINDOW__SHIFT 0x4
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_RECEIVE_WINDOW__SHIFT 0x8
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_HALF_SYM_DETECT_LEN__SHIFT 0xc
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_TRANSITION_FILTER_EN__SHIFT 0x10
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT__SHIFT 0x11
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_START__SHIFT 0x12
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_STOP__SHIFT 0x13
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_PHASE_DETECT_LEN__SHIFT 0x14
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_DETECTION_THRESHOLD__SHIFT 0x1c
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_START_WINDOW_MASK 0x00000070L
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_RECEIVE_WINDOW_MASK 0x00000700L
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_HALF_SYM_DETECT_LEN_MASK 0x00003000L
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_TRANSITION_FILTER_EN_MASK 0x00010000L
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT_MASK 0x00020000L
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_START_MASK 0x00040000L
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_STOP_MASK 0x00080000L
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_PHASE_DETECT_LEN_MASK 0x00300000L
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_DETECTION_THRESHOLD_MASK 0x70000000L
+//DP_AUX2_AUX_DPHY_RX_CONTROL1
+#define DP_AUX2_AUX_DPHY_RX_CONTROL1__AUX_RX_PRECHARGE_SKIP__SHIFT 0x0
+#define DP_AUX2_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN__SHIFT 0x8
+#define DP_AUX2_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MUL__SHIFT 0xf
+#define DP_AUX2_AUX_DPHY_RX_CONTROL1__AUX_RX_PRECHARGE_SKIP_MASK 0x000000FFL
+#define DP_AUX2_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MASK 0x00007F00L
+#define DP_AUX2_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MUL_MASK 0x00018000L
+//DP_AUX2_AUX_DPHY_TX_STATUS
+#define DP_AUX2_AUX_DPHY_TX_STATUS__AUX_TX_ACTIVE__SHIFT 0x0
+#define DP_AUX2_AUX_DPHY_TX_STATUS__AUX_TX_STATE__SHIFT 0x4
+#define DP_AUX2_AUX_DPHY_TX_STATUS__AUX_TX_HALF_SYM_PERIOD__SHIFT 0x10
+#define DP_AUX2_AUX_DPHY_TX_STATUS__AUX_TX_ACTIVE_MASK 0x00000001L
+#define DP_AUX2_AUX_DPHY_TX_STATUS__AUX_TX_STATE_MASK 0x00000070L
+#define DP_AUX2_AUX_DPHY_TX_STATUS__AUX_TX_HALF_SYM_PERIOD_MASK 0x01FF0000L
+//DP_AUX2_AUX_DPHY_RX_STATUS
+#define DP_AUX2_AUX_DPHY_RX_STATUS__AUX_RX_STATE__SHIFT 0x0
+#define DP_AUX2_AUX_DPHY_RX_STATUS__AUX_RX_SYNC_VALID_COUNT__SHIFT 0x8
+#define DP_AUX2_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_FRACT__SHIFT 0x10
+#define DP_AUX2_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD__SHIFT 0x15
+#define DP_AUX2_AUX_DPHY_RX_STATUS__AUX_RX_STATE_MASK 0x00000007L
+#define DP_AUX2_AUX_DPHY_RX_STATUS__AUX_RX_SYNC_VALID_COUNT_MASK 0x00001F00L
+#define DP_AUX2_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_FRACT_MASK 0x001F0000L
+#define DP_AUX2_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_MASK 0x3FE00000L
+//DP_AUX2_AUX_GTC_SYNC_CONTROL
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_EN__SHIFT 0x0
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_EN__SHIFT 0x4
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_INTERVAL__SHIFT 0x8
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_PERIOD__SHIFT 0xc
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_MAINT_PERIOD__SHIFT 0x10
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_BLOCK_REQ__SHIFT 0x14
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_INTERVAL_RESET_WINDOW__SHIFT 0x16
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_OFFSET_CALC_MAX_ATTEMPT__SHIFT 0x18
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_MAX_ATTEMPT__SHIFT 0x1c
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_EN_MASK 0x00000001L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_EN_MASK 0x00000010L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_INTERVAL_MASK 0x00000F00L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_PERIOD_MASK 0x0000F000L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_MAINT_PERIOD_MASK 0x00070000L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_BLOCK_REQ_MASK 0x00100000L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_INTERVAL_RESET_WINDOW_MASK 0x00C00000L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_OFFSET_CALC_MAX_ATTEMPT_MASK 0x03000000L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_MAX_ATTEMPT_MASK 0xF0000000L
+//DP_AUX2_AUX_GTC_SYNC_ERROR_CONTROL
+#define DP_AUX2_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_POTENTIAL_ERROR_THRESHOLD__SHIFT 0x0
+#define DP_AUX2_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_DEFINITE_ERROR_THRESHOLD__SHIFT 0x8
+#define DP_AUX2_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_LEN__SHIFT 0x10
+#define DP_AUX2_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_NUM_RETRY_FOR_LOCK_MAINT__SHIFT 0x14
+#define DP_AUX2_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_POTENTIAL_ERROR_THRESHOLD_MASK 0x0000001FL
+#define DP_AUX2_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_DEFINITE_ERROR_THRESHOLD_MASK 0x00001F00L
+#define DP_AUX2_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_LEN_MASK 0x00030000L
+#define DP_AUX2_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_NUM_RETRY_FOR_LOCK_MAINT_MASK 0x00300000L
+//DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_COMPLETE__SHIFT 0x0
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_LOST__SHIFT 0x4
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_OCCURRED__SHIFT 0x8
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_STATE__SHIFT 0x9
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_PHASE_ADJUST_TIME_VIOL__SHIFT 0x10
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED__SHIFT 0x14
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_ACK__SHIFT 0x15
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED__SHIFT 0x16
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_ACK__SHIFT 0x17
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED__SHIFT 0x18
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_ACK__SHIFT 0x19
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CTRL_STATE__SHIFT 0x1c
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_COMPLETE_MASK 0x00000001L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_LOST_MASK 0x00000010L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_OCCURRED_MASK 0x00000100L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_STATE_MASK 0x00001E00L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_PHASE_ADJUST_TIME_VIOL_MASK 0x00010000L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_MASK 0x00100000L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_ACK_MASK 0x00200000L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_MASK 0x00400000L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_ACK_MASK 0x00800000L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_MASK 0x01000000L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_ACK_MASK 0x02000000L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CTRL_STATE_MASK 0xF0000000L
+//DP_AUX2_AUX_GTC_SYNC_STATUS
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_DONE__SHIFT 0x0
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REQ__SHIFT 0x1
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_TIMEOUT__SHIFT 0x7
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_HPD_DISCON__SHIFT 0x9
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NACKED__SHIFT 0x1d
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_MASTER_REQ_BY_RX__SHIFT 0x1e
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_DONE_MASK 0x00000001L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REQ_MASK 0x00000002L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_TIMEOUT_MASK 0x00000080L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NACKED_MASK 0x20000000L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_MASTER_REQ_BY_RX_MASK 0x40000000L
+//DP_AUX2_AUX_PHY_WAKE_CNTL
+#define DP_AUX2_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_GO__SHIFT 0x0
+#define DP_AUX2_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PENDING__SHIFT 0x1
+#define DP_AUX2_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PRIORITY__SHIFT 0x2
+#define DP_AUX2_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_ACK__SHIFT 0x3
+#define DP_AUX2_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_GO_MASK 0x00000001L
+#define DP_AUX2_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PENDING_MASK 0x00000002L
+#define DP_AUX2_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PRIORITY_MASK 0x00000004L
+#define DP_AUX2_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_ACK_MASK 0x00000008L
+
+
+// addressBlock: dce_dc_dio_dp_aux3_dispdec
+//DP_AUX3_AUX_CONTROL
+#define DP_AUX3_AUX_CONTROL__AUX_EN__SHIFT 0x0
+#define DP_AUX3_AUX_CONTROL__AUX_RESET__SHIFT 0x4
+#define DP_AUX3_AUX_CONTROL__AUX_RESET_DONE__SHIFT 0x5
+#define DP_AUX3_AUX_CONTROL__AUX_LS_READ_EN__SHIFT 0x8
+#define DP_AUX3_AUX_CONTROL__AUX_LS_UPDATE_DISABLE__SHIFT 0xc
+#define DP_AUX3_AUX_CONTROL__AUX_IGNORE_HPD_DISCON__SHIFT 0x10
+#define DP_AUX3_AUX_CONTROL__AUX_MODE_DET_EN__SHIFT 0x12
+#define DP_AUX3_AUX_CONTROL__AUX_HPD_SEL__SHIFT 0x14
+#define DP_AUX3_AUX_CONTROL__AUX_IMPCAL_REQ_EN__SHIFT 0x18
+#define DP_AUX3_AUX_CONTROL__AUX_TEST_MODE__SHIFT 0x1c
+#define DP_AUX3_AUX_CONTROL__AUX_DEGLITCH_EN__SHIFT 0x1d
+#define DP_AUX3_AUX_CONTROL__SPARE_0__SHIFT 0x1e
+#define DP_AUX3_AUX_CONTROL__SPARE_1__SHIFT 0x1f
+#define DP_AUX3_AUX_CONTROL__AUX_EN_MASK 0x00000001L
+#define DP_AUX3_AUX_CONTROL__AUX_RESET_MASK 0x00000010L
+#define DP_AUX3_AUX_CONTROL__AUX_RESET_DONE_MASK 0x00000020L
+#define DP_AUX3_AUX_CONTROL__AUX_LS_READ_EN_MASK 0x00000100L
+#define DP_AUX3_AUX_CONTROL__AUX_LS_UPDATE_DISABLE_MASK 0x00001000L
+#define DP_AUX3_AUX_CONTROL__AUX_IGNORE_HPD_DISCON_MASK 0x00010000L
+#define DP_AUX3_AUX_CONTROL__AUX_MODE_DET_EN_MASK 0x00040000L
+#define DP_AUX3_AUX_CONTROL__AUX_HPD_SEL_MASK 0x00700000L
+#define DP_AUX3_AUX_CONTROL__AUX_IMPCAL_REQ_EN_MASK 0x01000000L
+#define DP_AUX3_AUX_CONTROL__AUX_TEST_MODE_MASK 0x10000000L
+#define DP_AUX3_AUX_CONTROL__AUX_DEGLITCH_EN_MASK 0x20000000L
+#define DP_AUX3_AUX_CONTROL__SPARE_0_MASK 0x40000000L
+#define DP_AUX3_AUX_CONTROL__SPARE_1_MASK 0x80000000L
+//DP_AUX3_AUX_SW_CONTROL
+#define DP_AUX3_AUX_SW_CONTROL__AUX_SW_GO__SHIFT 0x0
+#define DP_AUX3_AUX_SW_CONTROL__AUX_LS_READ_TRIG__SHIFT 0x2
+#define DP_AUX3_AUX_SW_CONTROL__AUX_SW_START_DELAY__SHIFT 0x4
+#define DP_AUX3_AUX_SW_CONTROL__AUX_SW_WR_BYTES__SHIFT 0x10
+#define DP_AUX3_AUX_SW_CONTROL__AUX_SW_GO_MASK 0x00000001L
+#define DP_AUX3_AUX_SW_CONTROL__AUX_LS_READ_TRIG_MASK 0x00000004L
+#define DP_AUX3_AUX_SW_CONTROL__AUX_SW_START_DELAY_MASK 0x000000F0L
+#define DP_AUX3_AUX_SW_CONTROL__AUX_SW_WR_BYTES_MASK 0x001F0000L
+//DP_AUX3_AUX_ARB_CONTROL
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_ARB_PRIORITY__SHIFT 0x0
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_REG_RW_CNTL_STATUS__SHIFT 0x2
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_NO_QUEUED_SW_GO__SHIFT 0x8
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_NO_QUEUED_LS_GO__SHIFT 0xa
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_SW_USE_AUX_REG_REQ__SHIFT 0x10
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_SW_PENDING_USE_AUX_REG_REQ__SHIFT 0x10
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_SW_DONE_USING_AUX_REG__SHIFT 0x11
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_DMCU_USE_AUX_REG_REQ__SHIFT 0x18
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_DMCU_PENDING_USE_AUX_REG_REQ__SHIFT 0x18
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_DMCU_DONE_USING_AUX_REG__SHIFT 0x19
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_ARB_PRIORITY_MASK 0x00000003L
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_REG_RW_CNTL_STATUS_MASK 0x0000000CL
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_NO_QUEUED_SW_GO_MASK 0x00000100L
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_NO_QUEUED_LS_GO_MASK 0x00000400L
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_SW_USE_AUX_REG_REQ_MASK 0x00010000L
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_SW_PENDING_USE_AUX_REG_REQ_MASK 0x00010000L
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_SW_DONE_USING_AUX_REG_MASK 0x00020000L
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_DMCU_USE_AUX_REG_REQ_MASK 0x01000000L
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_DMCU_PENDING_USE_AUX_REG_REQ_MASK 0x01000000L
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_DMCU_DONE_USING_AUX_REG_MASK 0x02000000L
+//DP_AUX3_AUX_INTERRUPT_CONTROL
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_INT__SHIFT 0x0
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_ACK__SHIFT 0x1
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_MASK__SHIFT 0x2
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_INT__SHIFT 0x4
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_ACK__SHIFT 0x5
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_MASK__SHIFT 0x6
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT__SHIFT 0x8
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_ACK__SHIFT 0x9
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK__SHIFT 0xa
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT__SHIFT 0xc
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_ACK__SHIFT 0xd
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK__SHIFT 0xe
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_INT_MASK 0x00000001L
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_ACK_MASK 0x00000002L
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_MASK_MASK 0x00000004L
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_INT_MASK 0x00000010L
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_ACK_MASK 0x00000020L
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_MASK_MASK 0x00000040L
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK 0x00000100L
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_ACK_MASK 0x00000200L
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK_MASK 0x00000400L
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK 0x00001000L
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_ACK_MASK 0x00002000L
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK_MASK 0x00004000L
+//DP_AUX3_AUX_SW_STATUS
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_DONE__SHIFT 0x0
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_REQ__SHIFT 0x1
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT__SHIFT 0x7
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_HPD_DISCON__SHIFT 0x9
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX3_AUX_SW_STATUS__AUX_ARB_STATUS__SHIFT 0x1d
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_DONE_MASK 0x00000001L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_REQ_MASK 0x00000002L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_MASK 0x00000080L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX3_AUX_SW_STATUS__AUX_ARB_STATUS_MASK 0xE0000000L
+//DP_AUX3_AUX_LS_STATUS
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_DONE__SHIFT 0x0
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_REQ__SHIFT 0x1
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT__SHIFT 0x7
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_HPD_DISCON__SHIFT 0x9
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_CP_IRQ__SHIFT 0x1d
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_UPDATED__SHIFT 0x1e
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_UPDATED_ACK__SHIFT 0x1f
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_DONE_MASK 0x00000001L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_REQ_MASK 0x00000002L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_MASK 0x00000080L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_CP_IRQ_MASK 0x20000000L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_UPDATED_MASK 0x40000000L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_UPDATED_ACK_MASK 0x80000000L
+//DP_AUX3_AUX_SW_DATA
+#define DP_AUX3_AUX_SW_DATA__AUX_SW_DATA_RW__SHIFT 0x0
+#define DP_AUX3_AUX_SW_DATA__AUX_SW_DATA__SHIFT 0x8
+#define DP_AUX3_AUX_SW_DATA__AUX_SW_INDEX__SHIFT 0x10
+#define DP_AUX3_AUX_SW_DATA__AUX_SW_AUTOINCREMENT_DISABLE__SHIFT 0x1f
+#define DP_AUX3_AUX_SW_DATA__AUX_SW_DATA_RW_MASK 0x00000001L
+#define DP_AUX3_AUX_SW_DATA__AUX_SW_DATA_MASK 0x0000FF00L
+#define DP_AUX3_AUX_SW_DATA__AUX_SW_INDEX_MASK 0x001F0000L
+#define DP_AUX3_AUX_SW_DATA__AUX_SW_AUTOINCREMENT_DISABLE_MASK 0x80000000L
+//DP_AUX3_AUX_LS_DATA
+#define DP_AUX3_AUX_LS_DATA__AUX_LS_DATA__SHIFT 0x8
+#define DP_AUX3_AUX_LS_DATA__AUX_LS_INDEX__SHIFT 0x10
+#define DP_AUX3_AUX_LS_DATA__AUX_LS_DATA_MASK 0x0000FF00L
+#define DP_AUX3_AUX_LS_DATA__AUX_LS_INDEX_MASK 0x001F0000L
+//DP_AUX3_AUX_DPHY_TX_REF_CONTROL
+#define DP_AUX3_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_SEL__SHIFT 0x0
+#define DP_AUX3_AUX_DPHY_TX_REF_CONTROL__AUX_TX_RATE__SHIFT 0x4
+#define DP_AUX3_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_DIV__SHIFT 0x10
+#define DP_AUX3_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_SEL_MASK 0x00000001L
+#define DP_AUX3_AUX_DPHY_TX_REF_CONTROL__AUX_TX_RATE_MASK 0x00000030L
+#define DP_AUX3_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_DIV_MASK 0x01FF0000L
+//DP_AUX3_AUX_DPHY_TX_CONTROL
+#define DP_AUX3_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN__SHIFT 0x0
+#define DP_AUX3_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MUL__SHIFT 0x4
+#define DP_AUX3_AUX_DPHY_TX_CONTROL__AUX_TX_OE_ASSERT_TIME__SHIFT 0x6
+#define DP_AUX3_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_SYMBOLS__SHIFT 0x8
+#define DP_AUX3_AUX_DPHY_TX_CONTROL__AUX_MODE_DET_CHECK_DELAY__SHIFT 0x10
+#define DP_AUX3_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MASK 0x0000000FL
+#define DP_AUX3_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MUL_MASK 0x00000030L
+#define DP_AUX3_AUX_DPHY_TX_CONTROL__AUX_TX_OE_ASSERT_TIME_MASK 0x00000040L
+#define DP_AUX3_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_SYMBOLS_MASK 0x00003F00L
+#define DP_AUX3_AUX_DPHY_TX_CONTROL__AUX_MODE_DET_CHECK_DELAY_MASK 0x00070000L
+//DP_AUX3_AUX_DPHY_RX_CONTROL0
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_START_WINDOW__SHIFT 0x4
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_RECEIVE_WINDOW__SHIFT 0x8
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_HALF_SYM_DETECT_LEN__SHIFT 0xc
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_TRANSITION_FILTER_EN__SHIFT 0x10
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT__SHIFT 0x11
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_START__SHIFT 0x12
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_STOP__SHIFT 0x13
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_PHASE_DETECT_LEN__SHIFT 0x14
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_DETECTION_THRESHOLD__SHIFT 0x1c
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_START_WINDOW_MASK 0x00000070L
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_RECEIVE_WINDOW_MASK 0x00000700L
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_HALF_SYM_DETECT_LEN_MASK 0x00003000L
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_TRANSITION_FILTER_EN_MASK 0x00010000L
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT_MASK 0x00020000L
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_START_MASK 0x00040000L
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_STOP_MASK 0x00080000L
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_PHASE_DETECT_LEN_MASK 0x00300000L
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_DETECTION_THRESHOLD_MASK 0x70000000L
+//DP_AUX3_AUX_DPHY_RX_CONTROL1
+#define DP_AUX3_AUX_DPHY_RX_CONTROL1__AUX_RX_PRECHARGE_SKIP__SHIFT 0x0
+#define DP_AUX3_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN__SHIFT 0x8
+#define DP_AUX3_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MUL__SHIFT 0xf
+#define DP_AUX3_AUX_DPHY_RX_CONTROL1__AUX_RX_PRECHARGE_SKIP_MASK 0x000000FFL
+#define DP_AUX3_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MASK 0x00007F00L
+#define DP_AUX3_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MUL_MASK 0x00018000L
+//DP_AUX3_AUX_DPHY_TX_STATUS
+#define DP_AUX3_AUX_DPHY_TX_STATUS__AUX_TX_ACTIVE__SHIFT 0x0
+#define DP_AUX3_AUX_DPHY_TX_STATUS__AUX_TX_STATE__SHIFT 0x4
+#define DP_AUX3_AUX_DPHY_TX_STATUS__AUX_TX_HALF_SYM_PERIOD__SHIFT 0x10
+#define DP_AUX3_AUX_DPHY_TX_STATUS__AUX_TX_ACTIVE_MASK 0x00000001L
+#define DP_AUX3_AUX_DPHY_TX_STATUS__AUX_TX_STATE_MASK 0x00000070L
+#define DP_AUX3_AUX_DPHY_TX_STATUS__AUX_TX_HALF_SYM_PERIOD_MASK 0x01FF0000L
+//DP_AUX3_AUX_DPHY_RX_STATUS
+#define DP_AUX3_AUX_DPHY_RX_STATUS__AUX_RX_STATE__SHIFT 0x0
+#define DP_AUX3_AUX_DPHY_RX_STATUS__AUX_RX_SYNC_VALID_COUNT__SHIFT 0x8
+#define DP_AUX3_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_FRACT__SHIFT 0x10
+#define DP_AUX3_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD__SHIFT 0x15
+#define DP_AUX3_AUX_DPHY_RX_STATUS__AUX_RX_STATE_MASK 0x00000007L
+#define DP_AUX3_AUX_DPHY_RX_STATUS__AUX_RX_SYNC_VALID_COUNT_MASK 0x00001F00L
+#define DP_AUX3_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_FRACT_MASK 0x001F0000L
+#define DP_AUX3_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_MASK 0x3FE00000L
+//DP_AUX3_AUX_GTC_SYNC_CONTROL
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_EN__SHIFT 0x0
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_EN__SHIFT 0x4
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_INTERVAL__SHIFT 0x8
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_PERIOD__SHIFT 0xc
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_MAINT_PERIOD__SHIFT 0x10
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_BLOCK_REQ__SHIFT 0x14
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_INTERVAL_RESET_WINDOW__SHIFT 0x16
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_OFFSET_CALC_MAX_ATTEMPT__SHIFT 0x18
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_MAX_ATTEMPT__SHIFT 0x1c
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_EN_MASK 0x00000001L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_EN_MASK 0x00000010L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_INTERVAL_MASK 0x00000F00L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_PERIOD_MASK 0x0000F000L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_MAINT_PERIOD_MASK 0x00070000L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_BLOCK_REQ_MASK 0x00100000L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_INTERVAL_RESET_WINDOW_MASK 0x00C00000L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_OFFSET_CALC_MAX_ATTEMPT_MASK 0x03000000L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_MAX_ATTEMPT_MASK 0xF0000000L
+//DP_AUX3_AUX_GTC_SYNC_ERROR_CONTROL
+#define DP_AUX3_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_POTENTIAL_ERROR_THRESHOLD__SHIFT 0x0
+#define DP_AUX3_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_DEFINITE_ERROR_THRESHOLD__SHIFT 0x8
+#define DP_AUX3_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_LEN__SHIFT 0x10
+#define DP_AUX3_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_NUM_RETRY_FOR_LOCK_MAINT__SHIFT 0x14
+#define DP_AUX3_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_POTENTIAL_ERROR_THRESHOLD_MASK 0x0000001FL
+#define DP_AUX3_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_DEFINITE_ERROR_THRESHOLD_MASK 0x00001F00L
+#define DP_AUX3_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_LEN_MASK 0x00030000L
+#define DP_AUX3_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_NUM_RETRY_FOR_LOCK_MAINT_MASK 0x00300000L
+//DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_COMPLETE__SHIFT 0x0
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_LOST__SHIFT 0x4
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_OCCURRED__SHIFT 0x8
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_STATE__SHIFT 0x9
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_PHASE_ADJUST_TIME_VIOL__SHIFT 0x10
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED__SHIFT 0x14
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_ACK__SHIFT 0x15
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED__SHIFT 0x16
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_ACK__SHIFT 0x17
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED__SHIFT 0x18
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_ACK__SHIFT 0x19
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CTRL_STATE__SHIFT 0x1c
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_COMPLETE_MASK 0x00000001L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_LOST_MASK 0x00000010L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_OCCURRED_MASK 0x00000100L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_STATE_MASK 0x00001E00L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_PHASE_ADJUST_TIME_VIOL_MASK 0x00010000L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_MASK 0x00100000L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_ACK_MASK 0x00200000L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_MASK 0x00400000L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_ACK_MASK 0x00800000L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_MASK 0x01000000L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_ACK_MASK 0x02000000L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CTRL_STATE_MASK 0xF0000000L
+//DP_AUX3_AUX_GTC_SYNC_STATUS
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_DONE__SHIFT 0x0
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REQ__SHIFT 0x1
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_TIMEOUT__SHIFT 0x7
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_HPD_DISCON__SHIFT 0x9
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NACKED__SHIFT 0x1d
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_MASTER_REQ_BY_RX__SHIFT 0x1e
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_DONE_MASK 0x00000001L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REQ_MASK 0x00000002L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_TIMEOUT_MASK 0x00000080L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NACKED_MASK 0x20000000L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_MASTER_REQ_BY_RX_MASK 0x40000000L
+//DP_AUX3_AUX_PHY_WAKE_CNTL
+#define DP_AUX3_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_GO__SHIFT 0x0
+#define DP_AUX3_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PENDING__SHIFT 0x1
+#define DP_AUX3_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PRIORITY__SHIFT 0x2
+#define DP_AUX3_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_ACK__SHIFT 0x3
+#define DP_AUX3_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_GO_MASK 0x00000001L
+#define DP_AUX3_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PENDING_MASK 0x00000002L
+#define DP_AUX3_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PRIORITY_MASK 0x00000004L
+#define DP_AUX3_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_ACK_MASK 0x00000008L
+
+
+// addressBlock: dce_dc_dio_dp_aux4_dispdec
+//DP_AUX4_AUX_CONTROL
+#define DP_AUX4_AUX_CONTROL__AUX_EN__SHIFT 0x0
+#define DP_AUX4_AUX_CONTROL__AUX_RESET__SHIFT 0x4
+#define DP_AUX4_AUX_CONTROL__AUX_RESET_DONE__SHIFT 0x5
+#define DP_AUX4_AUX_CONTROL__AUX_LS_READ_EN__SHIFT 0x8
+#define DP_AUX4_AUX_CONTROL__AUX_LS_UPDATE_DISABLE__SHIFT 0xc
+#define DP_AUX4_AUX_CONTROL__AUX_IGNORE_HPD_DISCON__SHIFT 0x10
+#define DP_AUX4_AUX_CONTROL__AUX_MODE_DET_EN__SHIFT 0x12
+#define DP_AUX4_AUX_CONTROL__AUX_HPD_SEL__SHIFT 0x14
+#define DP_AUX4_AUX_CONTROL__AUX_IMPCAL_REQ_EN__SHIFT 0x18
+#define DP_AUX4_AUX_CONTROL__AUX_TEST_MODE__SHIFT 0x1c
+#define DP_AUX4_AUX_CONTROL__AUX_DEGLITCH_EN__SHIFT 0x1d
+#define DP_AUX4_AUX_CONTROL__SPARE_0__SHIFT 0x1e
+#define DP_AUX4_AUX_CONTROL__SPARE_1__SHIFT 0x1f
+#define DP_AUX4_AUX_CONTROL__AUX_EN_MASK 0x00000001L
+#define DP_AUX4_AUX_CONTROL__AUX_RESET_MASK 0x00000010L
+#define DP_AUX4_AUX_CONTROL__AUX_RESET_DONE_MASK 0x00000020L
+#define DP_AUX4_AUX_CONTROL__AUX_LS_READ_EN_MASK 0x00000100L
+#define DP_AUX4_AUX_CONTROL__AUX_LS_UPDATE_DISABLE_MASK 0x00001000L
+#define DP_AUX4_AUX_CONTROL__AUX_IGNORE_HPD_DISCON_MASK 0x00010000L
+#define DP_AUX4_AUX_CONTROL__AUX_MODE_DET_EN_MASK 0x00040000L
+#define DP_AUX4_AUX_CONTROL__AUX_HPD_SEL_MASK 0x00700000L
+#define DP_AUX4_AUX_CONTROL__AUX_IMPCAL_REQ_EN_MASK 0x01000000L
+#define DP_AUX4_AUX_CONTROL__AUX_TEST_MODE_MASK 0x10000000L
+#define DP_AUX4_AUX_CONTROL__AUX_DEGLITCH_EN_MASK 0x20000000L
+#define DP_AUX4_AUX_CONTROL__SPARE_0_MASK 0x40000000L
+#define DP_AUX4_AUX_CONTROL__SPARE_1_MASK 0x80000000L
+//DP_AUX4_AUX_SW_CONTROL
+#define DP_AUX4_AUX_SW_CONTROL__AUX_SW_GO__SHIFT 0x0
+#define DP_AUX4_AUX_SW_CONTROL__AUX_LS_READ_TRIG__SHIFT 0x2
+#define DP_AUX4_AUX_SW_CONTROL__AUX_SW_START_DELAY__SHIFT 0x4
+#define DP_AUX4_AUX_SW_CONTROL__AUX_SW_WR_BYTES__SHIFT 0x10
+#define DP_AUX4_AUX_SW_CONTROL__AUX_SW_GO_MASK 0x00000001L
+#define DP_AUX4_AUX_SW_CONTROL__AUX_LS_READ_TRIG_MASK 0x00000004L
+#define DP_AUX4_AUX_SW_CONTROL__AUX_SW_START_DELAY_MASK 0x000000F0L
+#define DP_AUX4_AUX_SW_CONTROL__AUX_SW_WR_BYTES_MASK 0x001F0000L
+//DP_AUX4_AUX_ARB_CONTROL
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_ARB_PRIORITY__SHIFT 0x0
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_REG_RW_CNTL_STATUS__SHIFT 0x2
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_NO_QUEUED_SW_GO__SHIFT 0x8
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_NO_QUEUED_LS_GO__SHIFT 0xa
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_SW_USE_AUX_REG_REQ__SHIFT 0x10
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_SW_PENDING_USE_AUX_REG_REQ__SHIFT 0x10
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_SW_DONE_USING_AUX_REG__SHIFT 0x11
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_DMCU_USE_AUX_REG_REQ__SHIFT 0x18
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_DMCU_PENDING_USE_AUX_REG_REQ__SHIFT 0x18
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_DMCU_DONE_USING_AUX_REG__SHIFT 0x19
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_ARB_PRIORITY_MASK 0x00000003L
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_REG_RW_CNTL_STATUS_MASK 0x0000000CL
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_NO_QUEUED_SW_GO_MASK 0x00000100L
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_NO_QUEUED_LS_GO_MASK 0x00000400L
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_SW_USE_AUX_REG_REQ_MASK 0x00010000L
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_SW_PENDING_USE_AUX_REG_REQ_MASK 0x00010000L
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_SW_DONE_USING_AUX_REG_MASK 0x00020000L
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_DMCU_USE_AUX_REG_REQ_MASK 0x01000000L
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_DMCU_PENDING_USE_AUX_REG_REQ_MASK 0x01000000L
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_DMCU_DONE_USING_AUX_REG_MASK 0x02000000L
+//DP_AUX4_AUX_INTERRUPT_CONTROL
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_INT__SHIFT 0x0
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_ACK__SHIFT 0x1
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_MASK__SHIFT 0x2
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_INT__SHIFT 0x4
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_ACK__SHIFT 0x5
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_MASK__SHIFT 0x6
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT__SHIFT 0x8
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_ACK__SHIFT 0x9
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK__SHIFT 0xa
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT__SHIFT 0xc
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_ACK__SHIFT 0xd
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK__SHIFT 0xe
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_INT_MASK 0x00000001L
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_ACK_MASK 0x00000002L
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_MASK_MASK 0x00000004L
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_INT_MASK 0x00000010L
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_ACK_MASK 0x00000020L
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_MASK_MASK 0x00000040L
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK 0x00000100L
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_ACK_MASK 0x00000200L
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK_MASK 0x00000400L
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK 0x00001000L
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_ACK_MASK 0x00002000L
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK_MASK 0x00004000L
+//DP_AUX4_AUX_SW_STATUS
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_DONE__SHIFT 0x0
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_REQ__SHIFT 0x1
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT__SHIFT 0x7
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_HPD_DISCON__SHIFT 0x9
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX4_AUX_SW_STATUS__AUX_ARB_STATUS__SHIFT 0x1d
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_DONE_MASK 0x00000001L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_REQ_MASK 0x00000002L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_MASK 0x00000080L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX4_AUX_SW_STATUS__AUX_ARB_STATUS_MASK 0xE0000000L
+//DP_AUX4_AUX_LS_STATUS
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_DONE__SHIFT 0x0
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_REQ__SHIFT 0x1
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT__SHIFT 0x7
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_HPD_DISCON__SHIFT 0x9
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_CP_IRQ__SHIFT 0x1d
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_UPDATED__SHIFT 0x1e
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_UPDATED_ACK__SHIFT 0x1f
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_DONE_MASK 0x00000001L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_REQ_MASK 0x00000002L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_MASK 0x00000080L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_CP_IRQ_MASK 0x20000000L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_UPDATED_MASK 0x40000000L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_UPDATED_ACK_MASK 0x80000000L
+//DP_AUX4_AUX_SW_DATA
+#define DP_AUX4_AUX_SW_DATA__AUX_SW_DATA_RW__SHIFT 0x0
+#define DP_AUX4_AUX_SW_DATA__AUX_SW_DATA__SHIFT 0x8
+#define DP_AUX4_AUX_SW_DATA__AUX_SW_INDEX__SHIFT 0x10
+#define DP_AUX4_AUX_SW_DATA__AUX_SW_AUTOINCREMENT_DISABLE__SHIFT 0x1f
+#define DP_AUX4_AUX_SW_DATA__AUX_SW_DATA_RW_MASK 0x00000001L
+#define DP_AUX4_AUX_SW_DATA__AUX_SW_DATA_MASK 0x0000FF00L
+#define DP_AUX4_AUX_SW_DATA__AUX_SW_INDEX_MASK 0x001F0000L
+#define DP_AUX4_AUX_SW_DATA__AUX_SW_AUTOINCREMENT_DISABLE_MASK 0x80000000L
+//DP_AUX4_AUX_LS_DATA
+#define DP_AUX4_AUX_LS_DATA__AUX_LS_DATA__SHIFT 0x8
+#define DP_AUX4_AUX_LS_DATA__AUX_LS_INDEX__SHIFT 0x10
+#define DP_AUX4_AUX_LS_DATA__AUX_LS_DATA_MASK 0x0000FF00L
+#define DP_AUX4_AUX_LS_DATA__AUX_LS_INDEX_MASK 0x001F0000L
+//DP_AUX4_AUX_DPHY_TX_REF_CONTROL
+#define DP_AUX4_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_SEL__SHIFT 0x0
+#define DP_AUX4_AUX_DPHY_TX_REF_CONTROL__AUX_TX_RATE__SHIFT 0x4
+#define DP_AUX4_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_DIV__SHIFT 0x10
+#define DP_AUX4_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_SEL_MASK 0x00000001L
+#define DP_AUX4_AUX_DPHY_TX_REF_CONTROL__AUX_TX_RATE_MASK 0x00000030L
+#define DP_AUX4_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_DIV_MASK 0x01FF0000L
+//DP_AUX4_AUX_DPHY_TX_CONTROL
+#define DP_AUX4_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN__SHIFT 0x0
+#define DP_AUX4_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MUL__SHIFT 0x4
+#define DP_AUX4_AUX_DPHY_TX_CONTROL__AUX_TX_OE_ASSERT_TIME__SHIFT 0x6
+#define DP_AUX4_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_SYMBOLS__SHIFT 0x8
+#define DP_AUX4_AUX_DPHY_TX_CONTROL__AUX_MODE_DET_CHECK_DELAY__SHIFT 0x10
+#define DP_AUX4_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MASK 0x0000000FL
+#define DP_AUX4_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MUL_MASK 0x00000030L
+#define DP_AUX4_AUX_DPHY_TX_CONTROL__AUX_TX_OE_ASSERT_TIME_MASK 0x00000040L
+#define DP_AUX4_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_SYMBOLS_MASK 0x00003F00L
+#define DP_AUX4_AUX_DPHY_TX_CONTROL__AUX_MODE_DET_CHECK_DELAY_MASK 0x00070000L
+//DP_AUX4_AUX_DPHY_RX_CONTROL0
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_START_WINDOW__SHIFT 0x4
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_RECEIVE_WINDOW__SHIFT 0x8
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_HALF_SYM_DETECT_LEN__SHIFT 0xc
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_TRANSITION_FILTER_EN__SHIFT 0x10
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT__SHIFT 0x11
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_START__SHIFT 0x12
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_STOP__SHIFT 0x13
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_PHASE_DETECT_LEN__SHIFT 0x14
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_DETECTION_THRESHOLD__SHIFT 0x1c
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_START_WINDOW_MASK 0x00000070L
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_RECEIVE_WINDOW_MASK 0x00000700L
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_HALF_SYM_DETECT_LEN_MASK 0x00003000L
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_TRANSITION_FILTER_EN_MASK 0x00010000L
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT_MASK 0x00020000L
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_START_MASK 0x00040000L
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_STOP_MASK 0x00080000L
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_PHASE_DETECT_LEN_MASK 0x00300000L
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_DETECTION_THRESHOLD_MASK 0x70000000L
+//DP_AUX4_AUX_DPHY_RX_CONTROL1
+#define DP_AUX4_AUX_DPHY_RX_CONTROL1__AUX_RX_PRECHARGE_SKIP__SHIFT 0x0
+#define DP_AUX4_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN__SHIFT 0x8
+#define DP_AUX4_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MUL__SHIFT 0xf
+#define DP_AUX4_AUX_DPHY_RX_CONTROL1__AUX_RX_PRECHARGE_SKIP_MASK 0x000000FFL
+#define DP_AUX4_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MASK 0x00007F00L
+#define DP_AUX4_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MUL_MASK 0x00018000L
+//DP_AUX4_AUX_DPHY_TX_STATUS
+#define DP_AUX4_AUX_DPHY_TX_STATUS__AUX_TX_ACTIVE__SHIFT 0x0
+#define DP_AUX4_AUX_DPHY_TX_STATUS__AUX_TX_STATE__SHIFT 0x4
+#define DP_AUX4_AUX_DPHY_TX_STATUS__AUX_TX_HALF_SYM_PERIOD__SHIFT 0x10
+#define DP_AUX4_AUX_DPHY_TX_STATUS__AUX_TX_ACTIVE_MASK 0x00000001L
+#define DP_AUX4_AUX_DPHY_TX_STATUS__AUX_TX_STATE_MASK 0x00000070L
+#define DP_AUX4_AUX_DPHY_TX_STATUS__AUX_TX_HALF_SYM_PERIOD_MASK 0x01FF0000L
+//DP_AUX4_AUX_DPHY_RX_STATUS
+#define DP_AUX4_AUX_DPHY_RX_STATUS__AUX_RX_STATE__SHIFT 0x0
+#define DP_AUX4_AUX_DPHY_RX_STATUS__AUX_RX_SYNC_VALID_COUNT__SHIFT 0x8
+#define DP_AUX4_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_FRACT__SHIFT 0x10
+#define DP_AUX4_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD__SHIFT 0x15
+#define DP_AUX4_AUX_DPHY_RX_STATUS__AUX_RX_STATE_MASK 0x00000007L
+#define DP_AUX4_AUX_DPHY_RX_STATUS__AUX_RX_SYNC_VALID_COUNT_MASK 0x00001F00L
+#define DP_AUX4_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_FRACT_MASK 0x001F0000L
+#define DP_AUX4_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_MASK 0x3FE00000L
+//DP_AUX4_AUX_GTC_SYNC_CONTROL
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_EN__SHIFT 0x0
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_EN__SHIFT 0x4
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_INTERVAL__SHIFT 0x8
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_PERIOD__SHIFT 0xc
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_MAINT_PERIOD__SHIFT 0x10
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_BLOCK_REQ__SHIFT 0x14
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_INTERVAL_RESET_WINDOW__SHIFT 0x16
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_OFFSET_CALC_MAX_ATTEMPT__SHIFT 0x18
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_MAX_ATTEMPT__SHIFT 0x1c
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_EN_MASK 0x00000001L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_EN_MASK 0x00000010L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_INTERVAL_MASK 0x00000F00L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_PERIOD_MASK 0x0000F000L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_MAINT_PERIOD_MASK 0x00070000L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_BLOCK_REQ_MASK 0x00100000L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_INTERVAL_RESET_WINDOW_MASK 0x00C00000L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_OFFSET_CALC_MAX_ATTEMPT_MASK 0x03000000L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_MAX_ATTEMPT_MASK 0xF0000000L
+//DP_AUX4_AUX_GTC_SYNC_ERROR_CONTROL
+#define DP_AUX4_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_POTENTIAL_ERROR_THRESHOLD__SHIFT 0x0
+#define DP_AUX4_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_DEFINITE_ERROR_THRESHOLD__SHIFT 0x8
+#define DP_AUX4_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_LEN__SHIFT 0x10
+#define DP_AUX4_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_NUM_RETRY_FOR_LOCK_MAINT__SHIFT 0x14
+#define DP_AUX4_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_POTENTIAL_ERROR_THRESHOLD_MASK 0x0000001FL
+#define DP_AUX4_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_DEFINITE_ERROR_THRESHOLD_MASK 0x00001F00L
+#define DP_AUX4_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_LEN_MASK 0x00030000L
+#define DP_AUX4_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_NUM_RETRY_FOR_LOCK_MAINT_MASK 0x00300000L
+//DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_COMPLETE__SHIFT 0x0
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_LOST__SHIFT 0x4
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_OCCURRED__SHIFT 0x8
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_STATE__SHIFT 0x9
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_PHASE_ADJUST_TIME_VIOL__SHIFT 0x10
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED__SHIFT 0x14
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_ACK__SHIFT 0x15
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED__SHIFT 0x16
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_ACK__SHIFT 0x17
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED__SHIFT 0x18
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_ACK__SHIFT 0x19
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CTRL_STATE__SHIFT 0x1c
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_COMPLETE_MASK 0x00000001L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_LOST_MASK 0x00000010L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_OCCURRED_MASK 0x00000100L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_STATE_MASK 0x00001E00L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_PHASE_ADJUST_TIME_VIOL_MASK 0x00010000L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_MASK 0x00100000L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_ACK_MASK 0x00200000L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_MASK 0x00400000L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_ACK_MASK 0x00800000L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_MASK 0x01000000L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_ACK_MASK 0x02000000L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CTRL_STATE_MASK 0xF0000000L
+//DP_AUX4_AUX_GTC_SYNC_STATUS
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_DONE__SHIFT 0x0
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REQ__SHIFT 0x1
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_TIMEOUT__SHIFT 0x7
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_HPD_DISCON__SHIFT 0x9
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NACKED__SHIFT 0x1d
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_MASTER_REQ_BY_RX__SHIFT 0x1e
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_DONE_MASK 0x00000001L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REQ_MASK 0x00000002L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_TIMEOUT_MASK 0x00000080L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NACKED_MASK 0x20000000L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_MASTER_REQ_BY_RX_MASK 0x40000000L
+//DP_AUX4_AUX_PHY_WAKE_CNTL
+#define DP_AUX4_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_GO__SHIFT 0x0
+#define DP_AUX4_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PENDING__SHIFT 0x1
+#define DP_AUX4_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PRIORITY__SHIFT 0x2
+#define DP_AUX4_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_ACK__SHIFT 0x3
+#define DP_AUX4_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_GO_MASK 0x00000001L
+#define DP_AUX4_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PENDING_MASK 0x00000002L
+#define DP_AUX4_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PRIORITY_MASK 0x00000004L
+#define DP_AUX4_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_ACK_MASK 0x00000008L
+
+
+// addressBlock: dce_dc_dio_dig0_dispdec
+//DIG0_DIG_FE_CNTL
+#define DIG0_DIG_FE_CNTL__DIG_SOURCE_SELECT__SHIFT 0x0
+#define DIG0_DIG_FE_CNTL__DIG_STEREOSYNC_SELECT__SHIFT 0x4
+#define DIG0_DIG_FE_CNTL__DIG_STEREOSYNC_GATE_EN__SHIFT 0x8
+#define DIG0_DIG_FE_CNTL__DIG_START__SHIFT 0xa
+#define DIG0_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_SELECT__SHIFT 0xc
+#define DIG0_DIG_FE_CNTL__DIG_INPUT_PIXEL_SELECT__SHIFT 0x10
+#define DIG0_DIG_FE_CNTL__DOLBY_VISION_EN__SHIFT 0x12
+#define DIG0_DIG_FE_CNTL__DOLBY_VISION_METADATA_PACKET_MISSED__SHIFT 0x13
+#define DIG0_DIG_FE_CNTL__DIG_SYMCLK_FE_ON__SHIFT 0x18
+#define DIG0_DIG_FE_CNTL__TMDS_PIXEL_ENCODING__SHIFT 0x1c
+#define DIG0_DIG_FE_CNTL__TMDS_COLOR_FORMAT__SHIFT 0x1e
+#define DIG0_DIG_FE_CNTL__DIG_SOURCE_SELECT_MASK 0x00000007L
+#define DIG0_DIG_FE_CNTL__DIG_STEREOSYNC_SELECT_MASK 0x00000070L
+#define DIG0_DIG_FE_CNTL__DIG_STEREOSYNC_GATE_EN_MASK 0x00000100L
+#define DIG0_DIG_FE_CNTL__DIG_START_MASK 0x00000400L
+#define DIG0_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_SELECT_MASK 0x00007000L
+#define DIG0_DIG_FE_CNTL__DIG_INPUT_PIXEL_SELECT_MASK 0x00030000L
+#define DIG0_DIG_FE_CNTL__DOLBY_VISION_EN_MASK 0x00040000L
+#define DIG0_DIG_FE_CNTL__DOLBY_VISION_METADATA_PACKET_MISSED_MASK 0x00080000L
+#define DIG0_DIG_FE_CNTL__DIG_SYMCLK_FE_ON_MASK 0x01000000L
+#define DIG0_DIG_FE_CNTL__TMDS_PIXEL_ENCODING_MASK 0x10000000L
+#define DIG0_DIG_FE_CNTL__TMDS_COLOR_FORMAT_MASK 0xC0000000L
+//DIG0_DIG_OUTPUT_CRC_CNTL
+#define DIG0_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_EN__SHIFT 0x0
+#define DIG0_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_LINK_SEL__SHIFT 0x4
+#define DIG0_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_DATA_SEL__SHIFT 0x8
+#define DIG0_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_EN_MASK 0x00000001L
+#define DIG0_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_LINK_SEL_MASK 0x00000010L
+#define DIG0_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_DATA_SEL_MASK 0x00000300L
+//DIG0_DIG_OUTPUT_CRC_RESULT
+#define DIG0_DIG_OUTPUT_CRC_RESULT__DIG_OUTPUT_CRC_RESULT__SHIFT 0x0
+#define DIG0_DIG_OUTPUT_CRC_RESULT__DIG_OUTPUT_CRC_RESULT_MASK 0x3FFFFFFFL
+//DIG0_DIG_CLOCK_PATTERN
+#define DIG0_DIG_CLOCK_PATTERN__DIG_CLOCK_PATTERN__SHIFT 0x0
+#define DIG0_DIG_CLOCK_PATTERN__DIG_CLOCK_PATTERN_MASK 0x000003FFL
+//DIG0_DIG_TEST_PATTERN
+#define DIG0_DIG_TEST_PATTERN__DIG_TEST_PATTERN_OUT_EN__SHIFT 0x0
+#define DIG0_DIG_TEST_PATTERN__DIG_HALF_CLOCK_PATTERN_SEL__SHIFT 0x1
+#define DIG0_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_OUT_EN__SHIFT 0x4
+#define DIG0_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_RESET__SHIFT 0x5
+#define DIG0_DIG_TEST_PATTERN__DIG_TEST_PATTERN_EXTERNAL_RESET_EN__SHIFT 0x6
+#define DIG0_DIG_TEST_PATTERN__DIG_STATIC_TEST_PATTERN__SHIFT 0x10
+#define DIG0_DIG_TEST_PATTERN__DIG_TEST_PATTERN_OUT_EN_MASK 0x00000001L
+#define DIG0_DIG_TEST_PATTERN__DIG_HALF_CLOCK_PATTERN_SEL_MASK 0x00000002L
+#define DIG0_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_OUT_EN_MASK 0x00000010L
+#define DIG0_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_RESET_MASK 0x00000020L
+#define DIG0_DIG_TEST_PATTERN__DIG_TEST_PATTERN_EXTERNAL_RESET_EN_MASK 0x00000040L
+#define DIG0_DIG_TEST_PATTERN__DIG_STATIC_TEST_PATTERN_MASK 0x03FF0000L
+//DIG0_DIG_RANDOM_PATTERN_SEED
+#define DIG0_DIG_RANDOM_PATTERN_SEED__DIG_RANDOM_PATTERN_SEED__SHIFT 0x0
+#define DIG0_DIG_RANDOM_PATTERN_SEED__DIG_RAN_PAT_DURING_DE_ONLY__SHIFT 0x18
+#define DIG0_DIG_RANDOM_PATTERN_SEED__DIG_RANDOM_PATTERN_SEED_MASK 0x00FFFFFFL
+#define DIG0_DIG_RANDOM_PATTERN_SEED__DIG_RAN_PAT_DURING_DE_ONLY_MASK 0x01000000L
+//DIG0_DIG_FIFO_STATUS
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_LEVEL_ERROR__SHIFT 0x0
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_USE_OVERWRITE_LEVEL__SHIFT 0x1
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_OVERWRITE_LEVEL__SHIFT 0x2
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_ERROR_ACK__SHIFT 0x8
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_CAL_AVERAGE_LEVEL__SHIFT 0xa
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_MAXIMUM_LEVEL__SHIFT 0x10
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_MINIMUM_LEVEL__SHIFT 0x16
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_READ_CLOCK_SRC__SHIFT 0x1a
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_CALIBRATED__SHIFT 0x1d
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_FORCE_RECAL_AVERAGE__SHIFT 0x1e
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_FORCE_RECOMP_MINMAX__SHIFT 0x1f
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_LEVEL_ERROR_MASK 0x00000001L
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_USE_OVERWRITE_LEVEL_MASK 0x00000002L
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_OVERWRITE_LEVEL_MASK 0x000000FCL
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_ERROR_ACK_MASK 0x00000100L
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_CAL_AVERAGE_LEVEL_MASK 0x0000FC00L
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_MAXIMUM_LEVEL_MASK 0x001F0000L
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_MINIMUM_LEVEL_MASK 0x03C00000L
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_READ_CLOCK_SRC_MASK 0x04000000L
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_CALIBRATED_MASK 0x20000000L
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_FORCE_RECAL_AVERAGE_MASK 0x40000000L
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_FORCE_RECOMP_MINMAX_MASK 0x80000000L
+//DIG0_HDMI_METADATA_PACKET_CONTROL
+#define DIG0_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_ENABLE__SHIFT 0x0
+#define DIG0_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_REFERENCE__SHIFT 0x4
+#define DIG0_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_MISSED__SHIFT 0x8
+#define DIG0_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE__SHIFT 0x10
+#define DIG0_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define DIG0_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_REFERENCE_MASK 0x00000010L
+#define DIG0_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_MISSED_MASK 0x00000100L
+#define DIG0_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_MASK 0xFFFF0000L
+//DIG0_HDMI_GENERIC_PACKET_CONTROL4
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC6_LINE__SHIFT 0x0
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC7_LINE__SHIFT 0x10
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC6_LINE_MASK 0x0000FFFFL
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC7_LINE_MASK 0xFFFF0000L
+//DIG0_HDMI_CONTROL
+#define DIG0_HDMI_CONTROL__HDMI_KEEPOUT_MODE__SHIFT 0x0
+#define DIG0_HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN__SHIFT 0x1
+#define DIG0_HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE__SHIFT 0x2
+#define DIG0_HDMI_CONTROL__HDMI_NO_EXTRA_NULL_PACKET_FILLED__SHIFT 0x3
+#define DIG0_HDMI_CONTROL__HDMI_PACKET_GEN_VERSION__SHIFT 0x4
+#define DIG0_HDMI_CONTROL__HDMI_ERROR_ACK__SHIFT 0x8
+#define DIG0_HDMI_CONTROL__HDMI_ERROR_MASK__SHIFT 0x9
+#define DIG0_HDMI_CONTROL__HDMI_UNSCRAMBLED_CONTROL_LINE_NUM__SHIFT 0x10
+#define DIG0_HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE__SHIFT 0x18
+#define DIG0_HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT 0x1c
+#define DIG0_HDMI_CONTROL__HDMI_KEEPOUT_MODE_MASK 0x00000001L
+#define DIG0_HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN_MASK 0x00000002L
+#define DIG0_HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE_MASK 0x00000004L
+#define DIG0_HDMI_CONTROL__HDMI_NO_EXTRA_NULL_PACKET_FILLED_MASK 0x00000008L
+#define DIG0_HDMI_CONTROL__HDMI_PACKET_GEN_VERSION_MASK 0x00000010L
+#define DIG0_HDMI_CONTROL__HDMI_ERROR_ACK_MASK 0x00000100L
+#define DIG0_HDMI_CONTROL__HDMI_ERROR_MASK_MASK 0x00000200L
+#define DIG0_HDMI_CONTROL__HDMI_UNSCRAMBLED_CONTROL_LINE_NUM_MASK 0x003F0000L
+#define DIG0_HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK 0x01000000L
+#define DIG0_HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH_MASK 0x30000000L
+//DIG0_HDMI_STATUS
+#define DIG0_HDMI_STATUS__HDMI_ACTIVE_AVMUTE__SHIFT 0x0
+#define DIG0_HDMI_STATUS__HDMI_AUDIO_PACKET_ERROR__SHIFT 0x10
+#define DIG0_HDMI_STATUS__HDMI_VBI_PACKET_ERROR__SHIFT 0x14
+#define DIG0_HDMI_STATUS__HDMI_ERROR_INT__SHIFT 0x1b
+#define DIG0_HDMI_STATUS__HDMI_ACTIVE_AVMUTE_MASK 0x00000001L
+#define DIG0_HDMI_STATUS__HDMI_AUDIO_PACKET_ERROR_MASK 0x00010000L
+#define DIG0_HDMI_STATUS__HDMI_VBI_PACKET_ERROR_MASK 0x00100000L
+#define DIG0_HDMI_STATUS__HDMI_ERROR_INT_MASK 0x08000000L
+//DIG0_HDMI_AUDIO_PACKET_CONTROL
+#define DIG0_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN__SHIFT 0x4
+#define DIG0_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_SEND_MAX_PACKETS__SHIFT 0x8
+#define DIG0_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_PACKETS_PER_LINE__SHIFT 0x10
+#define DIG0_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN_MASK 0x00000030L
+#define DIG0_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_SEND_MAX_PACKETS_MASK 0x00000100L
+#define DIG0_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_PACKETS_PER_LINE_MASK 0x001F0000L
+//DIG0_HDMI_ACR_PACKET_CONTROL
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SEND__SHIFT 0x0
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_CONT__SHIFT 0x1
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SELECT__SHIFT 0x4
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE__SHIFT 0x8
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND__SHIFT 0xc
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE__SHIFT 0x10
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY__SHIFT 0x1f
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SEND_MASK 0x00000001L
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_CONT_MASK 0x00000002L
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SELECT_MASK 0x00000030L
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE_MASK 0x00000100L
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK 0x00001000L
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE_MASK 0x00070000L
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY_MASK 0x80000000L
+//DIG0_HDMI_VBI_PACKET_CONTROL
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND__SHIFT 0x0
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND__SHIFT 0x4
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT__SHIFT 0x5
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND__SHIFT 0x8
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT__SHIFT 0x9
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE__SHIFT 0x10
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK 0x00000001L
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK 0x00000010L
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK 0x00000020L
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND_MASK 0x00000100L
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT_MASK 0x00000200L
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE_MASK 0x003F0000L
+//DIG0_HDMI_INFOFRAME_CONTROL0
+#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND__SHIFT 0x4
+#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT__SHIFT 0x5
+#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_SEND__SHIFT 0x8
+#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_CONT__SHIFT 0x9
+#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND_MASK 0x00000010L
+#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT_MASK 0x00000020L
+#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_SEND_MASK 0x00000100L
+#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_CONT_MASK 0x00000200L
+//DIG0_HDMI_INFOFRAME_CONTROL1
+#define DIG0_HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE__SHIFT 0x8
+#define DIG0_HDMI_INFOFRAME_CONTROL1__HDMI_MPEG_INFO_LINE__SHIFT 0x10
+#define DIG0_HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE_MASK 0x00003F00L
+#define DIG0_HDMI_INFOFRAME_CONTROL1__HDMI_MPEG_INFO_LINE_MASK 0x003F0000L
+//DIG0_HDMI_GENERIC_PACKET_CONTROL0
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND__SHIFT 0x0
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT__SHIFT 0x1
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE_REFERENCE__SHIFT 0x2
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND__SHIFT 0x4
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT__SHIFT 0x5
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE_REFERENCE__SHIFT 0x6
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_SEND__SHIFT 0x8
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_CONT__SHIFT 0x9
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LINE_REFERENCE__SHIFT 0xa
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_SEND__SHIFT 0xc
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_CONT__SHIFT 0xd
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LINE_REFERENCE__SHIFT 0xe
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_SEND__SHIFT 0x10
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_CONT__SHIFT 0x11
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LINE_REFERENCE__SHIFT 0x12
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_SEND__SHIFT 0x14
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_CONT__SHIFT 0x15
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LINE_REFERENCE__SHIFT 0x16
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_SEND__SHIFT 0x18
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_CONT__SHIFT 0x19
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LINE_REFERENCE__SHIFT 0x1a
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_SEND__SHIFT 0x1c
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_CONT__SHIFT 0x1d
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LINE_REFERENCE__SHIFT 0x1e
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND_MASK 0x00000001L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT_MASK 0x00000002L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE_REFERENCE_MASK 0x00000004L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND_MASK 0x00000010L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT_MASK 0x00000020L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE_REFERENCE_MASK 0x00000040L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_SEND_MASK 0x00000100L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_CONT_MASK 0x00000200L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LINE_REFERENCE_MASK 0x00000400L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_SEND_MASK 0x00001000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_CONT_MASK 0x00002000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LINE_REFERENCE_MASK 0x00004000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_SEND_MASK 0x00010000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_CONT_MASK 0x00020000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LINE_REFERENCE_MASK 0x00040000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_SEND_MASK 0x00100000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_CONT_MASK 0x00200000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LINE_REFERENCE_MASK 0x00400000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_SEND_MASK 0x01000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_CONT_MASK 0x02000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LINE_REFERENCE_MASK 0x04000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_SEND_MASK 0x10000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_CONT_MASK 0x20000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LINE_REFERENCE_MASK 0x40000000L
+//DIG0_HDMI_GC
+#define DIG0_HDMI_GC__HDMI_GC_AVMUTE__SHIFT 0x0
+#define DIG0_HDMI_GC__HDMI_GC_AVMUTE_CONT__SHIFT 0x2
+#define DIG0_HDMI_GC__HDMI_DEFAULT_PHASE__SHIFT 0x4
+#define DIG0_HDMI_GC__HDMI_PACKING_PHASE__SHIFT 0x8
+#define DIG0_HDMI_GC__HDMI_PACKING_PHASE_OVERRIDE__SHIFT 0xc
+#define DIG0_HDMI_GC__HDMI_GC_AVMUTE_MASK 0x00000001L
+#define DIG0_HDMI_GC__HDMI_GC_AVMUTE_CONT_MASK 0x00000004L
+#define DIG0_HDMI_GC__HDMI_DEFAULT_PHASE_MASK 0x00000010L
+#define DIG0_HDMI_GC__HDMI_PACKING_PHASE_MASK 0x00000F00L
+#define DIG0_HDMI_GC__HDMI_PACKING_PHASE_OVERRIDE_MASK 0x00001000L
+//DIG0_AFMT_AUDIO_PACKET_CONTROL2
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD__SHIFT 0x0
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT__SHIFT 0x1
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT 0x8
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID__SHIFT 0x10
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD__SHIFT 0x18
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD__SHIFT 0x1c
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD_MASK 0x00000001L
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT_MASK 0x00000002L
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE_MASK 0x0000FF00L
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID_MASK 0x00FF0000L
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD_MASK 0x01000000L
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD_MASK 0x10000000L
+//DIG0_AFMT_ISRC1_0
+#define DIG0_AFMT_ISRC1_0__AFMT_ISRC_STATUS__SHIFT 0x0
+#define DIG0_AFMT_ISRC1_0__AFMT_ISRC_CONTINUE__SHIFT 0x6
+#define DIG0_AFMT_ISRC1_0__AFMT_ISRC_VALID__SHIFT 0x7
+#define DIG0_AFMT_ISRC1_0__AFMT_ISRC_STATUS_MASK 0x00000007L
+#define DIG0_AFMT_ISRC1_0__AFMT_ISRC_CONTINUE_MASK 0x00000040L
+#define DIG0_AFMT_ISRC1_0__AFMT_ISRC_VALID_MASK 0x00000080L
+//DIG0_AFMT_ISRC1_1
+#define DIG0_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC0__SHIFT 0x0
+#define DIG0_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC1__SHIFT 0x8
+#define DIG0_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC2__SHIFT 0x10
+#define DIG0_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC3__SHIFT 0x18
+#define DIG0_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC0_MASK 0x000000FFL
+#define DIG0_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC1_MASK 0x0000FF00L
+#define DIG0_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC2_MASK 0x00FF0000L
+#define DIG0_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC3_MASK 0xFF000000L
+//DIG0_AFMT_ISRC1_2
+#define DIG0_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC4__SHIFT 0x0
+#define DIG0_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC5__SHIFT 0x8
+#define DIG0_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC6__SHIFT 0x10
+#define DIG0_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC7__SHIFT 0x18
+#define DIG0_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC4_MASK 0x000000FFL
+#define DIG0_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC5_MASK 0x0000FF00L
+#define DIG0_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC6_MASK 0x00FF0000L
+#define DIG0_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC7_MASK 0xFF000000L
+//DIG0_AFMT_ISRC1_3
+#define DIG0_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC8__SHIFT 0x0
+#define DIG0_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC9__SHIFT 0x8
+#define DIG0_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC10__SHIFT 0x10
+#define DIG0_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC11__SHIFT 0x18
+#define DIG0_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC8_MASK 0x000000FFL
+#define DIG0_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC9_MASK 0x0000FF00L
+#define DIG0_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC10_MASK 0x00FF0000L
+#define DIG0_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC11_MASK 0xFF000000L
+//DIG0_AFMT_ISRC1_4
+#define DIG0_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC12__SHIFT 0x0
+#define DIG0_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC13__SHIFT 0x8
+#define DIG0_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC14__SHIFT 0x10
+#define DIG0_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC15__SHIFT 0x18
+#define DIG0_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC12_MASK 0x000000FFL
+#define DIG0_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC13_MASK 0x0000FF00L
+#define DIG0_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC14_MASK 0x00FF0000L
+#define DIG0_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC15_MASK 0xFF000000L
+//DIG0_AFMT_ISRC2_0
+#define DIG0_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC16__SHIFT 0x0
+#define DIG0_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC17__SHIFT 0x8
+#define DIG0_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC18__SHIFT 0x10
+#define DIG0_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC19__SHIFT 0x18
+#define DIG0_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC16_MASK 0x000000FFL
+#define DIG0_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC17_MASK 0x0000FF00L
+#define DIG0_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC18_MASK 0x00FF0000L
+#define DIG0_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC19_MASK 0xFF000000L
+//DIG0_AFMT_ISRC2_1
+#define DIG0_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC20__SHIFT 0x0
+#define DIG0_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC21__SHIFT 0x8
+#define DIG0_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC22__SHIFT 0x10
+#define DIG0_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC23__SHIFT 0x18
+#define DIG0_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC20_MASK 0x000000FFL
+#define DIG0_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC21_MASK 0x0000FF00L
+#define DIG0_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC22_MASK 0x00FF0000L
+#define DIG0_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC23_MASK 0xFF000000L
+//DIG0_AFMT_ISRC2_2
+#define DIG0_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC24__SHIFT 0x0
+#define DIG0_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC25__SHIFT 0x8
+#define DIG0_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC26__SHIFT 0x10
+#define DIG0_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC27__SHIFT 0x18
+#define DIG0_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC24_MASK 0x000000FFL
+#define DIG0_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC25_MASK 0x0000FF00L
+#define DIG0_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC26_MASK 0x00FF0000L
+#define DIG0_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC27_MASK 0xFF000000L
+//DIG0_AFMT_ISRC2_3
+#define DIG0_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC28__SHIFT 0x0
+#define DIG0_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC29__SHIFT 0x8
+#define DIG0_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC30__SHIFT 0x10
+#define DIG0_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC31__SHIFT 0x18
+#define DIG0_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC28_MASK 0x000000FFL
+#define DIG0_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC29_MASK 0x0000FF00L
+#define DIG0_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC30_MASK 0x00FF0000L
+#define DIG0_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC31_MASK 0xFF000000L
+//DIG0_HDMI_GENERIC_PACKET_CONTROL2
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_LINE__SHIFT 0x0
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_LINE__SHIFT 0x10
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_LINE_MASK 0x0000FFFFL
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_LINE_MASK 0xFFFF0000L
+//DIG0_HDMI_GENERIC_PACKET_CONTROL3
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC4_LINE__SHIFT 0x0
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC5_LINE__SHIFT 0x10
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC4_LINE_MASK 0x0000FFFFL
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC5_LINE_MASK 0xFFFF0000L
+//DIG0_HDMI_DB_CONTROL
+#define DIG0_HDMI_DB_CONTROL__HDMI_DB_PENDING__SHIFT 0x0
+#define DIG0_HDMI_DB_CONTROL__HDMI_DB_TAKEN__SHIFT 0x4
+#define DIG0_HDMI_DB_CONTROL__HDMI_DB_TAKEN_CLR__SHIFT 0x5
+#define DIG0_HDMI_DB_CONTROL__HDMI_DB_LOCK__SHIFT 0x8
+#define DIG0_HDMI_DB_CONTROL__HDMI_DB_DISABLE__SHIFT 0xc
+#define DIG0_HDMI_DB_CONTROL__VUPDATE_DB_PENDING__SHIFT 0xf
+#define DIG0_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN__SHIFT 0x10
+#define DIG0_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_CLR__SHIFT 0x11
+#define DIG0_HDMI_DB_CONTROL__HDMI_DB_PENDING_MASK 0x00000001L
+#define DIG0_HDMI_DB_CONTROL__HDMI_DB_TAKEN_MASK 0x00000010L
+#define DIG0_HDMI_DB_CONTROL__HDMI_DB_TAKEN_CLR_MASK 0x00000020L
+#define DIG0_HDMI_DB_CONTROL__HDMI_DB_LOCK_MASK 0x00000100L
+#define DIG0_HDMI_DB_CONTROL__HDMI_DB_DISABLE_MASK 0x00001000L
+#define DIG0_HDMI_DB_CONTROL__VUPDATE_DB_PENDING_MASK 0x00008000L
+#define DIG0_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_MASK 0x00010000L
+#define DIG0_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_CLR_MASK 0x00020000L
+//DIG0_DME_CONTROL
+#define DIG0_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID__SHIFT 0x0
+#define DIG0_DME_CONTROL__METADATA_ENGINE_EN__SHIFT 0x4
+#define DIG0_DME_CONTROL__METADATA_STREAM_TYPE__SHIFT 0x8
+#define DIG0_DME_CONTROL__METADATA_DB_PENDING__SHIFT 0xc
+#define DIG0_DME_CONTROL__METADATA_DB_TAKEN__SHIFT 0xd
+#define DIG0_DME_CONTROL__METADATA_DB_TAKEN_CLR__SHIFT 0x10
+#define DIG0_DME_CONTROL__METADATA_DB_DISABLE__SHIFT 0x14
+#define DIG0_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID_MASK 0x00000007L
+#define DIG0_DME_CONTROL__METADATA_ENGINE_EN_MASK 0x00000010L
+#define DIG0_DME_CONTROL__METADATA_STREAM_TYPE_MASK 0x00000100L
+#define DIG0_DME_CONTROL__METADATA_DB_PENDING_MASK 0x00001000L
+#define DIG0_DME_CONTROL__METADATA_DB_TAKEN_MASK 0x00002000L
+#define DIG0_DME_CONTROL__METADATA_DB_TAKEN_CLR_MASK 0x00010000L
+#define DIG0_DME_CONTROL__METADATA_DB_DISABLE_MASK 0x00100000L
+//DIG0_AFMT_MPEG_INFO0
+#define DIG0_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_CHECKSUM__SHIFT 0x0
+#define DIG0_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB0__SHIFT 0x8
+#define DIG0_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB1__SHIFT 0x10
+#define DIG0_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB2__SHIFT 0x18
+#define DIG0_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_CHECKSUM_MASK 0x000000FFL
+#define DIG0_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB0_MASK 0x0000FF00L
+#define DIG0_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB1_MASK 0x00FF0000L
+#define DIG0_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB2_MASK 0xFF000000L
+//DIG0_AFMT_MPEG_INFO1
+#define DIG0_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_MB3__SHIFT 0x0
+#define DIG0_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_MF__SHIFT 0x8
+#define DIG0_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_FR__SHIFT 0xc
+#define DIG0_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_MB3_MASK 0x000000FFL
+#define DIG0_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_MF_MASK 0x00000300L
+#define DIG0_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_FR_MASK 0x00001000L
+//DIG0_AFMT_GENERIC_HDR
+#define DIG0_AFMT_GENERIC_HDR__AFMT_GENERIC_HB0__SHIFT 0x0
+#define DIG0_AFMT_GENERIC_HDR__AFMT_GENERIC_HB1__SHIFT 0x8
+#define DIG0_AFMT_GENERIC_HDR__AFMT_GENERIC_HB2__SHIFT 0x10
+#define DIG0_AFMT_GENERIC_HDR__AFMT_GENERIC_HB3__SHIFT 0x18
+#define DIG0_AFMT_GENERIC_HDR__AFMT_GENERIC_HB0_MASK 0x000000FFL
+#define DIG0_AFMT_GENERIC_HDR__AFMT_GENERIC_HB1_MASK 0x0000FF00L
+#define DIG0_AFMT_GENERIC_HDR__AFMT_GENERIC_HB2_MASK 0x00FF0000L
+#define DIG0_AFMT_GENERIC_HDR__AFMT_GENERIC_HB3_MASK 0xFF000000L
+//DIG0_AFMT_GENERIC_0
+#define DIG0_AFMT_GENERIC_0__AFMT_GENERIC_BYTE0__SHIFT 0x0
+#define DIG0_AFMT_GENERIC_0__AFMT_GENERIC_BYTE1__SHIFT 0x8
+#define DIG0_AFMT_GENERIC_0__AFMT_GENERIC_BYTE2__SHIFT 0x10
+#define DIG0_AFMT_GENERIC_0__AFMT_GENERIC_BYTE3__SHIFT 0x18
+#define DIG0_AFMT_GENERIC_0__AFMT_GENERIC_BYTE0_MASK 0x000000FFL
+#define DIG0_AFMT_GENERIC_0__AFMT_GENERIC_BYTE1_MASK 0x0000FF00L
+#define DIG0_AFMT_GENERIC_0__AFMT_GENERIC_BYTE2_MASK 0x00FF0000L
+#define DIG0_AFMT_GENERIC_0__AFMT_GENERIC_BYTE3_MASK 0xFF000000L
+//DIG0_AFMT_GENERIC_1
+#define DIG0_AFMT_GENERIC_1__AFMT_GENERIC_BYTE4__SHIFT 0x0
+#define DIG0_AFMT_GENERIC_1__AFMT_GENERIC_BYTE5__SHIFT 0x8
+#define DIG0_AFMT_GENERIC_1__AFMT_GENERIC_BYTE6__SHIFT 0x10
+#define DIG0_AFMT_GENERIC_1__AFMT_GENERIC_BYTE7__SHIFT 0x18
+#define DIG0_AFMT_GENERIC_1__AFMT_GENERIC_BYTE4_MASK 0x000000FFL
+#define DIG0_AFMT_GENERIC_1__AFMT_GENERIC_BYTE5_MASK 0x0000FF00L
+#define DIG0_AFMT_GENERIC_1__AFMT_GENERIC_BYTE6_MASK 0x00FF0000L
+#define DIG0_AFMT_GENERIC_1__AFMT_GENERIC_BYTE7_MASK 0xFF000000L
+//DIG0_AFMT_GENERIC_2
+#define DIG0_AFMT_GENERIC_2__AFMT_GENERIC_BYTE8__SHIFT 0x0
+#define DIG0_AFMT_GENERIC_2__AFMT_GENERIC_BYTE9__SHIFT 0x8
+#define DIG0_AFMT_GENERIC_2__AFMT_GENERIC_BYTE10__SHIFT 0x10
+#define DIG0_AFMT_GENERIC_2__AFMT_GENERIC_BYTE11__SHIFT 0x18
+#define DIG0_AFMT_GENERIC_2__AFMT_GENERIC_BYTE8_MASK 0x000000FFL
+#define DIG0_AFMT_GENERIC_2__AFMT_GENERIC_BYTE9_MASK 0x0000FF00L
+#define DIG0_AFMT_GENERIC_2__AFMT_GENERIC_BYTE10_MASK 0x00FF0000L
+#define DIG0_AFMT_GENERIC_2__AFMT_GENERIC_BYTE11_MASK 0xFF000000L
+//DIG0_AFMT_GENERIC_3
+#define DIG0_AFMT_GENERIC_3__AFMT_GENERIC_BYTE12__SHIFT 0x0
+#define DIG0_AFMT_GENERIC_3__AFMT_GENERIC_BYTE13__SHIFT 0x8
+#define DIG0_AFMT_GENERIC_3__AFMT_GENERIC_BYTE14__SHIFT 0x10
+#define DIG0_AFMT_GENERIC_3__AFMT_GENERIC_BYTE15__SHIFT 0x18
+#define DIG0_AFMT_GENERIC_3__AFMT_GENERIC_BYTE12_MASK 0x000000FFL
+#define DIG0_AFMT_GENERIC_3__AFMT_GENERIC_BYTE13_MASK 0x0000FF00L
+#define DIG0_AFMT_GENERIC_3__AFMT_GENERIC_BYTE14_MASK 0x00FF0000L
+#define DIG0_AFMT_GENERIC_3__AFMT_GENERIC_BYTE15_MASK 0xFF000000L
+//DIG0_AFMT_GENERIC_4
+#define DIG0_AFMT_GENERIC_4__AFMT_GENERIC_BYTE16__SHIFT 0x0
+#define DIG0_AFMT_GENERIC_4__AFMT_GENERIC_BYTE17__SHIFT 0x8
+#define DIG0_AFMT_GENERIC_4__AFMT_GENERIC_BYTE18__SHIFT 0x10
+#define DIG0_AFMT_GENERIC_4__AFMT_GENERIC_BYTE19__SHIFT 0x18
+#define DIG0_AFMT_GENERIC_4__AFMT_GENERIC_BYTE16_MASK 0x000000FFL
+#define DIG0_AFMT_GENERIC_4__AFMT_GENERIC_BYTE17_MASK 0x0000FF00L
+#define DIG0_AFMT_GENERIC_4__AFMT_GENERIC_BYTE18_MASK 0x00FF0000L
+#define DIG0_AFMT_GENERIC_4__AFMT_GENERIC_BYTE19_MASK 0xFF000000L
+//DIG0_AFMT_GENERIC_5
+#define DIG0_AFMT_GENERIC_5__AFMT_GENERIC_BYTE20__SHIFT 0x0
+#define DIG0_AFMT_GENERIC_5__AFMT_GENERIC_BYTE21__SHIFT 0x8
+#define DIG0_AFMT_GENERIC_5__AFMT_GENERIC_BYTE22__SHIFT 0x10
+#define DIG0_AFMT_GENERIC_5__AFMT_GENERIC_BYTE23__SHIFT 0x18
+#define DIG0_AFMT_GENERIC_5__AFMT_GENERIC_BYTE20_MASK 0x000000FFL
+#define DIG0_AFMT_GENERIC_5__AFMT_GENERIC_BYTE21_MASK 0x0000FF00L
+#define DIG0_AFMT_GENERIC_5__AFMT_GENERIC_BYTE22_MASK 0x00FF0000L
+#define DIG0_AFMT_GENERIC_5__AFMT_GENERIC_BYTE23_MASK 0xFF000000L
+//DIG0_AFMT_GENERIC_6
+#define DIG0_AFMT_GENERIC_6__AFMT_GENERIC_BYTE24__SHIFT 0x0
+#define DIG0_AFMT_GENERIC_6__AFMT_GENERIC_BYTE25__SHIFT 0x8
+#define DIG0_AFMT_GENERIC_6__AFMT_GENERIC_BYTE26__SHIFT 0x10
+#define DIG0_AFMT_GENERIC_6__AFMT_GENERIC_BYTE27__SHIFT 0x18
+#define DIG0_AFMT_GENERIC_6__AFMT_GENERIC_BYTE24_MASK 0x000000FFL
+#define DIG0_AFMT_GENERIC_6__AFMT_GENERIC_BYTE25_MASK 0x0000FF00L
+#define DIG0_AFMT_GENERIC_6__AFMT_GENERIC_BYTE26_MASK 0x00FF0000L
+#define DIG0_AFMT_GENERIC_6__AFMT_GENERIC_BYTE27_MASK 0xFF000000L
+//DIG0_AFMT_GENERIC_7
+#define DIG0_AFMT_GENERIC_7__AFMT_GENERIC_BYTE28__SHIFT 0x0
+#define DIG0_AFMT_GENERIC_7__AFMT_GENERIC_BYTE29__SHIFT 0x8
+#define DIG0_AFMT_GENERIC_7__AFMT_GENERIC_BYTE30__SHIFT 0x10
+#define DIG0_AFMT_GENERIC_7__AFMT_GENERIC_BYTE31__SHIFT 0x18
+#define DIG0_AFMT_GENERIC_7__AFMT_GENERIC_BYTE28_MASK 0x000000FFL
+#define DIG0_AFMT_GENERIC_7__AFMT_GENERIC_BYTE29_MASK 0x0000FF00L
+#define DIG0_AFMT_GENERIC_7__AFMT_GENERIC_BYTE30_MASK 0x00FF0000L
+#define DIG0_AFMT_GENERIC_7__AFMT_GENERIC_BYTE31_MASK 0xFF000000L
+//DIG0_HDMI_GENERIC_PACKET_CONTROL1
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC0_LINE__SHIFT 0x0
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC1_LINE__SHIFT 0x10
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC0_LINE_MASK 0x0000FFFFL
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC1_LINE_MASK 0xFFFF0000L
+//DIG0_HDMI_ACR_32_0
+#define DIG0_HDMI_ACR_32_0__HDMI_ACR_CTS_32__SHIFT 0xc
+#define DIG0_HDMI_ACR_32_0__HDMI_ACR_CTS_32_MASK 0xFFFFF000L
+//DIG0_HDMI_ACR_32_1
+#define DIG0_HDMI_ACR_32_1__HDMI_ACR_N_32__SHIFT 0x0
+#define DIG0_HDMI_ACR_32_1__HDMI_ACR_N_32_MASK 0x000FFFFFL
+//DIG0_HDMI_ACR_44_0
+#define DIG0_HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT 0xc
+#define DIG0_HDMI_ACR_44_0__HDMI_ACR_CTS_44_MASK 0xFFFFF000L
+//DIG0_HDMI_ACR_44_1
+#define DIG0_HDMI_ACR_44_1__HDMI_ACR_N_44__SHIFT 0x0
+#define DIG0_HDMI_ACR_44_1__HDMI_ACR_N_44_MASK 0x000FFFFFL
+//DIG0_HDMI_ACR_48_0
+#define DIG0_HDMI_ACR_48_0__HDMI_ACR_CTS_48__SHIFT 0xc
+#define DIG0_HDMI_ACR_48_0__HDMI_ACR_CTS_48_MASK 0xFFFFF000L
+//DIG0_HDMI_ACR_48_1
+#define DIG0_HDMI_ACR_48_1__HDMI_ACR_N_48__SHIFT 0x0
+#define DIG0_HDMI_ACR_48_1__HDMI_ACR_N_48_MASK 0x000FFFFFL
+//DIG0_HDMI_ACR_STATUS_0
+#define DIG0_HDMI_ACR_STATUS_0__HDMI_ACR_CTS__SHIFT 0xc
+#define DIG0_HDMI_ACR_STATUS_0__HDMI_ACR_CTS_MASK 0xFFFFF000L
+//DIG0_HDMI_ACR_STATUS_1
+#define DIG0_HDMI_ACR_STATUS_1__HDMI_ACR_N__SHIFT 0x0
+#define DIG0_HDMI_ACR_STATUS_1__HDMI_ACR_N_MASK 0x000FFFFFL
+//DIG0_AFMT_AUDIO_INFO0
+#define DIG0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM__SHIFT 0x0
+#define DIG0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC__SHIFT 0x8
+#define DIG0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT__SHIFT 0xb
+#define DIG0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET__SHIFT 0x10
+#define DIG0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT__SHIFT 0x18
+#define DIG0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_MASK 0x000000FFL
+#define DIG0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC_MASK 0x00000700L
+#define DIG0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT_MASK 0x00007800L
+#define DIG0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET_MASK 0x00FF0000L
+#define DIG0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT_MASK 0x1F000000L
+//DIG0_AFMT_AUDIO_INFO1
+#define DIG0_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA__SHIFT 0x0
+#define DIG0_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV__SHIFT 0xb
+#define DIG0_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH__SHIFT 0xf
+#define DIG0_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL__SHIFT 0x10
+#define DIG0_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA_MASK 0x000000FFL
+#define DIG0_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV_MASK 0x00007800L
+#define DIG0_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH_MASK 0x00008000L
+#define DIG0_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL_MASK 0x00030000L
+//DIG0_AFMT_60958_0
+#define DIG0_AFMT_60958_0__AFMT_60958_CS_A__SHIFT 0x0
+#define DIG0_AFMT_60958_0__AFMT_60958_CS_B__SHIFT 0x1
+#define DIG0_AFMT_60958_0__AFMT_60958_CS_C__SHIFT 0x2
+#define DIG0_AFMT_60958_0__AFMT_60958_CS_D__SHIFT 0x3
+#define DIG0_AFMT_60958_0__AFMT_60958_CS_MODE__SHIFT 0x6
+#define DIG0_AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE__SHIFT 0x8
+#define DIG0_AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER__SHIFT 0x10
+#define DIG0_AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x14
+#define DIG0_AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x18
+#define DIG0_AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY__SHIFT 0x1c
+#define DIG0_AFMT_60958_0__AFMT_60958_CS_A_MASK 0x00000001L
+#define DIG0_AFMT_60958_0__AFMT_60958_CS_B_MASK 0x00000002L
+#define DIG0_AFMT_60958_0__AFMT_60958_CS_C_MASK 0x00000004L
+#define DIG0_AFMT_60958_0__AFMT_60958_CS_D_MASK 0x00000038L
+#define DIG0_AFMT_60958_0__AFMT_60958_CS_MODE_MASK 0x000000C0L
+#define DIG0_AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE_MASK 0x0000FF00L
+#define DIG0_AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER_MASK 0x000F0000L
+#define DIG0_AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L_MASK 0x00F00000L
+#define DIG0_AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY_MASK 0x0F000000L
+#define DIG0_AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY_MASK 0x30000000L
+//DIG0_AFMT_60958_1
+#define DIG0_AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH__SHIFT 0x0
+#define DIG0_AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x4
+#define DIG0_AFMT_60958_1__AFMT_60958_VALID_L__SHIFT 0x10
+#define DIG0_AFMT_60958_1__AFMT_60958_VALID_R__SHIFT 0x12
+#define DIG0_AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x14
+#define DIG0_AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH_MASK 0x0000000FL
+#define DIG0_AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x000000F0L
+#define DIG0_AFMT_60958_1__AFMT_60958_VALID_L_MASK 0x00010000L
+#define DIG0_AFMT_60958_1__AFMT_60958_VALID_R_MASK 0x00040000L
+#define DIG0_AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R_MASK 0x00F00000L
+//DIG0_AFMT_AUDIO_CRC_CONTROL
+#define DIG0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN__SHIFT 0x0
+#define DIG0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT__SHIFT 0x4
+#define DIG0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE__SHIFT 0x8
+#define DIG0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL__SHIFT 0xc
+#define DIG0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT__SHIFT 0x10
+#define DIG0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN_MASK 0x00000001L
+#define DIG0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT_MASK 0x00000010L
+#define DIG0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE_MASK 0x00000100L
+#define DIG0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL_MASK 0x0000F000L
+#define DIG0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT_MASK 0xFFFF0000L
+//DIG0_AFMT_RAMP_CONTROL0
+#define DIG0_AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT__SHIFT 0x0
+#define DIG0_AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN__SHIFT 0x1f
+#define DIG0_AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT_MASK 0x00FFFFFFL
+#define DIG0_AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN_MASK 0x80000000L
+//DIG0_AFMT_RAMP_CONTROL1
+#define DIG0_AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT__SHIFT 0x0
+#define DIG0_AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE__SHIFT 0x18
+#define DIG0_AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT_MASK 0x00FFFFFFL
+#define DIG0_AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE_MASK 0xFF000000L
+//DIG0_AFMT_RAMP_CONTROL2
+#define DIG0_AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT__SHIFT 0x0
+#define DIG0_AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT_MASK 0x00FFFFFFL
+//DIG0_AFMT_RAMP_CONTROL3
+#define DIG0_AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT__SHIFT 0x0
+#define DIG0_AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT_MASK 0x00FFFFFFL
+//DIG0_AFMT_60958_2
+#define DIG0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define DIG0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define DIG0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x8
+#define DIG0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5__SHIFT 0xc
+#define DIG0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x10
+#define DIG0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x14
+#define DIG0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define DIG0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+#define DIG0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4_MASK 0x00000F00L
+#define DIG0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5_MASK 0x0000F000L
+#define DIG0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6_MASK 0x000F0000L
+#define DIG0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7_MASK 0x00F00000L
+//DIG0_AFMT_AUDIO_CRC_RESULT
+#define DIG0_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE__SHIFT 0x0
+#define DIG0_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC__SHIFT 0x8
+#define DIG0_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE_MASK 0x00000001L
+#define DIG0_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_MASK 0xFFFFFF00L
+//DIG0_AFMT_STATUS
+#define DIG0_AFMT_STATUS__AFMT_AUDIO_ENABLE__SHIFT 0x4
+#define DIG0_AFMT_STATUS__AFMT_AZ_HBR_ENABLE__SHIFT 0x8
+#define DIG0_AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW__SHIFT 0x18
+#define DIG0_AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG__SHIFT 0x1e
+#define DIG0_AFMT_STATUS__AFMT_AUDIO_ENABLE_MASK 0x00000010L
+#define DIG0_AFMT_STATUS__AFMT_AZ_HBR_ENABLE_MASK 0x00000100L
+#define DIG0_AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW_MASK 0x01000000L
+#define DIG0_AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG_MASK 0x40000000L
+//DIG0_AFMT_AUDIO_PACKET_CONTROL
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND__SHIFT 0x0
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS__SHIFT 0xb
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN__SHIFT 0xc
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE__SHIFT 0xe
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK__SHIFT 0x17
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP__SHIFT 0x18
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE__SHIFT 0x1a
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK__SHIFT 0x1e
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB__SHIFT 0x1f
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK 0x00000001L
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS_MASK 0x00000800L
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN_MASK 0x00001000L
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE_MASK 0x00004000L
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK_MASK 0x00800000L
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP_MASK 0x01000000L
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE_MASK 0x04000000L
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK_MASK 0x40000000L
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB_MASK 0x80000000L
+//DIG0_AFMT_VBI_PACKET_CONTROL
+#define DIG0_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_LOCK_STATUS__SHIFT 0x8
+#define DIG0_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_CONFLICT__SHIFT 0x10
+#define DIG0_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_CONFLICT_CLR__SHIFT 0x11
+#define DIG0_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_INDEX__SHIFT 0x1c
+#define DIG0_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_LOCK_STATUS_MASK 0x00000100L
+#define DIG0_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_CONFLICT_MASK 0x00010000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_CONFLICT_CLR_MASK 0x00020000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_INDEX_MASK 0xF0000000L
+//DIG0_AFMT_INFOFRAME_CONTROL0
+#define DIG0_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE__SHIFT 0x6
+#define DIG0_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE__SHIFT 0x7
+#define DIG0_AFMT_INFOFRAME_CONTROL0__AFMT_MPEG_INFO_UPDATE__SHIFT 0xa
+#define DIG0_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE_MASK 0x00000040L
+#define DIG0_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE_MASK 0x00000080L
+#define DIG0_AFMT_INFOFRAME_CONTROL0__AFMT_MPEG_INFO_UPDATE_MASK 0x00000400L
+//DIG0_AFMT_AUDIO_SRC_CONTROL
+#define DIG0_AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT__SHIFT 0x0
+#define DIG0_AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT_MASK 0x00000007L
+//DIG0_DIG_BE_CNTL
+#define DIG0_DIG_BE_CNTL__DIG_DUAL_LINK_ENABLE__SHIFT 0x0
+#define DIG0_DIG_BE_CNTL__DIG_SWAP__SHIFT 0x1
+#define DIG0_DIG_BE_CNTL__DIG_RB_SWITCH_EN__SHIFT 0x2
+#define DIG0_DIG_BE_CNTL__DIG_FE_SOURCE_SELECT__SHIFT 0x8
+#define DIG0_DIG_BE_CNTL__DIG_MODE__SHIFT 0x10
+#define DIG0_DIG_BE_CNTL__DIG_HPD_SELECT__SHIFT 0x1c
+#define DIG0_DIG_BE_CNTL__DIG_DUAL_LINK_ENABLE_MASK 0x00000001L
+#define DIG0_DIG_BE_CNTL__DIG_SWAP_MASK 0x00000002L
+#define DIG0_DIG_BE_CNTL__DIG_RB_SWITCH_EN_MASK 0x00000004L
+#define DIG0_DIG_BE_CNTL__DIG_FE_SOURCE_SELECT_MASK 0x00007F00L
+#define DIG0_DIG_BE_CNTL__DIG_MODE_MASK 0x00070000L
+#define DIG0_DIG_BE_CNTL__DIG_HPD_SELECT_MASK 0x70000000L
+//DIG0_DIG_BE_EN_CNTL
+#define DIG0_DIG_BE_EN_CNTL__DIG_ENABLE__SHIFT 0x0
+#define DIG0_DIG_BE_EN_CNTL__DIG_SYMCLK_BE_ON__SHIFT 0x8
+#define DIG0_DIG_BE_EN_CNTL__DIG_ENABLE_MASK 0x00000001L
+#define DIG0_DIG_BE_EN_CNTL__DIG_SYMCLK_BE_ON_MASK 0x00000100L
+//DIG0_TMDS_CNTL
+#define DIG0_TMDS_CNTL__TMDS_SYNC_PHASE__SHIFT 0x0
+#define DIG0_TMDS_CNTL__TMDS_SYNC_PHASE_MASK 0x00000001L
+//DIG0_TMDS_CONTROL_CHAR
+#define DIG0_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR0_OUT_EN__SHIFT 0x0
+#define DIG0_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR1_OUT_EN__SHIFT 0x1
+#define DIG0_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR2_OUT_EN__SHIFT 0x2
+#define DIG0_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR3_OUT_EN__SHIFT 0x3
+#define DIG0_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR0_OUT_EN_MASK 0x00000001L
+#define DIG0_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR1_OUT_EN_MASK 0x00000002L
+#define DIG0_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR2_OUT_EN_MASK 0x00000004L
+#define DIG0_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR3_OUT_EN_MASK 0x00000008L
+//DIG0_TMDS_CONTROL0_FEEDBACK
+#define DIG0_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_SELECT__SHIFT 0x0
+#define DIG0_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_DELAY__SHIFT 0x8
+#define DIG0_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_SELECT_MASK 0x00000003L
+#define DIG0_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_DELAY_MASK 0x00000300L
+//DIG0_TMDS_STEREOSYNC_CTL_SEL
+#define DIG0_TMDS_STEREOSYNC_CTL_SEL__TMDS_STEREOSYNC_CTL_SEL__SHIFT 0x0
+#define DIG0_TMDS_STEREOSYNC_CTL_SEL__TMDS_STEREOSYNC_CTL_SEL_MASK 0x00000003L
+//DIG0_TMDS_SYNC_CHAR_PATTERN_0_1
+#define DIG0_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN0__SHIFT 0x0
+#define DIG0_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN1__SHIFT 0x10
+#define DIG0_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN0_MASK 0x000003FFL
+#define DIG0_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN1_MASK 0x03FF0000L
+//DIG0_TMDS_SYNC_CHAR_PATTERN_2_3
+#define DIG0_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN2__SHIFT 0x0
+#define DIG0_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN3__SHIFT 0x10
+#define DIG0_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN2_MASK 0x000003FFL
+#define DIG0_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN3_MASK 0x03FF0000L
+//DIG0_TMDS_CTL_BITS
+#define DIG0_TMDS_CTL_BITS__TMDS_CTL0__SHIFT 0x0
+#define DIG0_TMDS_CTL_BITS__TMDS_CTL1__SHIFT 0x8
+#define DIG0_TMDS_CTL_BITS__TMDS_CTL2__SHIFT 0x10
+#define DIG0_TMDS_CTL_BITS__TMDS_CTL3__SHIFT 0x18
+#define DIG0_TMDS_CTL_BITS__TMDS_CTL0_MASK 0x00000001L
+#define DIG0_TMDS_CTL_BITS__TMDS_CTL1_MASK 0x00000100L
+#define DIG0_TMDS_CTL_BITS__TMDS_CTL2_MASK 0x00010000L
+#define DIG0_TMDS_CTL_BITS__TMDS_CTL3_MASK 0x01000000L
+//DIG0_TMDS_DCBALANCER_CONTROL
+#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_EN__SHIFT 0x0
+#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_SYNC_DCBAL_EN__SHIFT 0x4
+#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_EN__SHIFT 0x8
+#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_IN__SHIFT 0x10
+#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_FORCE__SHIFT 0x18
+#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_EN_MASK 0x00000001L
+#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_SYNC_DCBAL_EN_MASK 0x00000070L
+#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_EN_MASK 0x00000100L
+#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_IN_MASK 0x000F0000L
+#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_FORCE_MASK 0x01000000L
+//DIG0_TMDS_SYNC_DCBALANCE_CHAR
+#define DIG0_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR01__SHIFT 0x0
+#define DIG0_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR11__SHIFT 0x10
+#define DIG0_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR01_MASK 0x000003FFL
+#define DIG0_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR11_MASK 0x03FF0000L
+//DIG0_TMDS_CTL0_1_GEN_CNTL
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_SEL__SHIFT 0x0
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_DELAY__SHIFT 0x4
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_INVERT__SHIFT 0x7
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_MODULATION__SHIFT 0x8
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_USE_FEEDBACK_PATH__SHIFT 0xa
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_FB_SYNC_CONT__SHIFT 0xb
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_PATTERN_OUT_EN__SHIFT 0xc
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_SEL__SHIFT 0x10
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_DELAY__SHIFT 0x14
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_INVERT__SHIFT 0x17
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_MODULATION__SHIFT 0x18
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_USE_FEEDBACK_PATH__SHIFT 0x1a
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_FB_SYNC_CONT__SHIFT 0x1b
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_PATTERN_OUT_EN__SHIFT 0x1c
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_2BIT_COUNTER_EN__SHIFT 0x1f
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_SEL_MASK 0x0000000FL
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_DELAY_MASK 0x00000070L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_INVERT_MASK 0x00000080L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_MODULATION_MASK 0x00000300L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_USE_FEEDBACK_PATH_MASK 0x00000400L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_FB_SYNC_CONT_MASK 0x00000800L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_PATTERN_OUT_EN_MASK 0x00001000L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_SEL_MASK 0x000F0000L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_DELAY_MASK 0x00700000L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_INVERT_MASK 0x00800000L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_MODULATION_MASK 0x03000000L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_USE_FEEDBACK_PATH_MASK 0x04000000L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_FB_SYNC_CONT_MASK 0x08000000L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_PATTERN_OUT_EN_MASK 0x10000000L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_2BIT_COUNTER_EN_MASK 0x80000000L
+//DIG0_TMDS_CTL2_3_GEN_CNTL
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_SEL__SHIFT 0x0
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_DELAY__SHIFT 0x4
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_INVERT__SHIFT 0x7
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_MODULATION__SHIFT 0x8
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_USE_FEEDBACK_PATH__SHIFT 0xa
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_FB_SYNC_CONT__SHIFT 0xb
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_PATTERN_OUT_EN__SHIFT 0xc
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_SEL__SHIFT 0x10
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_DELAY__SHIFT 0x14
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_INVERT__SHIFT 0x17
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_MODULATION__SHIFT 0x18
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_USE_FEEDBACK_PATH__SHIFT 0x1a
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_FB_SYNC_CONT__SHIFT 0x1b
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_PATTERN_OUT_EN__SHIFT 0x1c
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_SEL_MASK 0x0000000FL
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_DELAY_MASK 0x00000070L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_INVERT_MASK 0x00000080L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_MODULATION_MASK 0x00000300L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_USE_FEEDBACK_PATH_MASK 0x00000400L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_FB_SYNC_CONT_MASK 0x00000800L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_PATTERN_OUT_EN_MASK 0x00001000L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_SEL_MASK 0x000F0000L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_DELAY_MASK 0x00700000L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_INVERT_MASK 0x00800000L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_MODULATION_MASK 0x03000000L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_USE_FEEDBACK_PATH_MASK 0x04000000L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_FB_SYNC_CONT_MASK 0x08000000L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_PATTERN_OUT_EN_MASK 0x10000000L
+//DIG0_DIG_VERSION
+#define DIG0_DIG_VERSION__DIG_TYPE__SHIFT 0x0
+#define DIG0_DIG_VERSION__DIG_TYPE_MASK 0x00000001L
+//DIG0_DIG_LANE_ENABLE
+#define DIG0_DIG_LANE_ENABLE__DIG_LANE0EN__SHIFT 0x0
+#define DIG0_DIG_LANE_ENABLE__DIG_LANE1EN__SHIFT 0x1
+#define DIG0_DIG_LANE_ENABLE__DIG_LANE2EN__SHIFT 0x2
+#define DIG0_DIG_LANE_ENABLE__DIG_LANE3EN__SHIFT 0x3
+#define DIG0_DIG_LANE_ENABLE__DIG_CLK_EN__SHIFT 0x8
+#define DIG0_DIG_LANE_ENABLE__DIG_LANE0EN_MASK 0x00000001L
+#define DIG0_DIG_LANE_ENABLE__DIG_LANE1EN_MASK 0x00000002L
+#define DIG0_DIG_LANE_ENABLE__DIG_LANE2EN_MASK 0x00000004L
+#define DIG0_DIG_LANE_ENABLE__DIG_LANE3EN_MASK 0x00000008L
+#define DIG0_DIG_LANE_ENABLE__DIG_CLK_EN_MASK 0x00000100L
+//DIG0_AFMT_CNTL
+#define DIG0_AFMT_CNTL__AFMT_AUDIO_CLOCK_EN__SHIFT 0x0
+#define DIG0_AFMT_CNTL__AFMT_AUDIO_CLOCK_ON__SHIFT 0x8
+#define DIG0_AFMT_CNTL__AFMT_AUDIO_CLOCK_EN_MASK 0x00000001L
+#define DIG0_AFMT_CNTL__AFMT_AUDIO_CLOCK_ON_MASK 0x00000100L
+//DIG0_AFMT_VBI_PACKET_CONTROL1
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_FRAME_UPDATE__SHIFT 0x0
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_FRAME_UPDATE_PENDING__SHIFT 0x1
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_IMMEDIATE_UPDATE__SHIFT 0x2
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_IMMEDIATE_UPDATE_PENDING__SHIFT 0x3
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_FRAME_UPDATE__SHIFT 0x4
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_FRAME_UPDATE_PENDING__SHIFT 0x5
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_IMMEDIATE_UPDATE__SHIFT 0x6
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_IMMEDIATE_UPDATE_PENDING__SHIFT 0x7
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_FRAME_UPDATE__SHIFT 0x8
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_FRAME_UPDATE_PENDING__SHIFT 0x9
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_IMMEDIATE_UPDATE__SHIFT 0xa
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_IMMEDIATE_UPDATE_PENDING__SHIFT 0xb
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_FRAME_UPDATE__SHIFT 0xc
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_FRAME_UPDATE_PENDING__SHIFT 0xd
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_IMMEDIATE_UPDATE__SHIFT 0xe
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_IMMEDIATE_UPDATE_PENDING__SHIFT 0xf
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_FRAME_UPDATE__SHIFT 0x10
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_FRAME_UPDATE_PENDING__SHIFT 0x11
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_IMMEDIATE_UPDATE__SHIFT 0x12
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_IMMEDIATE_UPDATE_PENDING__SHIFT 0x13
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_FRAME_UPDATE__SHIFT 0x14
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_FRAME_UPDATE_PENDING__SHIFT 0x15
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_IMMEDIATE_UPDATE__SHIFT 0x16
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_IMMEDIATE_UPDATE_PENDING__SHIFT 0x17
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_FRAME_UPDATE__SHIFT 0x18
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_FRAME_UPDATE_PENDING__SHIFT 0x19
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_IMMEDIATE_UPDATE__SHIFT 0x1a
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1b
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_FRAME_UPDATE__SHIFT 0x1c
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_FRAME_UPDATE_PENDING__SHIFT 0x1d
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_IMMEDIATE_UPDATE__SHIFT 0x1e
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1f
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_FRAME_UPDATE_MASK 0x00000001L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_FRAME_UPDATE_PENDING_MASK 0x00000002L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_IMMEDIATE_UPDATE_MASK 0x00000004L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_IMMEDIATE_UPDATE_PENDING_MASK 0x00000008L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_FRAME_UPDATE_MASK 0x00000010L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_FRAME_UPDATE_PENDING_MASK 0x00000020L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_IMMEDIATE_UPDATE_MASK 0x00000040L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_IMMEDIATE_UPDATE_PENDING_MASK 0x00000080L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_FRAME_UPDATE_MASK 0x00000100L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_FRAME_UPDATE_PENDING_MASK 0x00000200L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_IMMEDIATE_UPDATE_MASK 0x00000400L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_IMMEDIATE_UPDATE_PENDING_MASK 0x00000800L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_FRAME_UPDATE_MASK 0x00001000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_FRAME_UPDATE_PENDING_MASK 0x00002000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_IMMEDIATE_UPDATE_MASK 0x00004000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_IMMEDIATE_UPDATE_PENDING_MASK 0x00008000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_FRAME_UPDATE_MASK 0x00010000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_FRAME_UPDATE_PENDING_MASK 0x00020000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_IMMEDIATE_UPDATE_MASK 0x00040000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_IMMEDIATE_UPDATE_PENDING_MASK 0x00080000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_FRAME_UPDATE_MASK 0x00100000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_FRAME_UPDATE_PENDING_MASK 0x00200000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_IMMEDIATE_UPDATE_MASK 0x00400000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_IMMEDIATE_UPDATE_PENDING_MASK 0x00800000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_FRAME_UPDATE_MASK 0x01000000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_FRAME_UPDATE_PENDING_MASK 0x02000000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_IMMEDIATE_UPDATE_MASK 0x04000000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_IMMEDIATE_UPDATE_PENDING_MASK 0x08000000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_FRAME_UPDATE_MASK 0x10000000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_FRAME_UPDATE_PENDING_MASK 0x20000000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_IMMEDIATE_UPDATE_MASK 0x40000000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_IMMEDIATE_UPDATE_PENDING_MASK 0x80000000L
+//DIG0_HDMI_GENERIC_PACKET_CONTROL5
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND__SHIFT 0x0
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_PENDING__SHIFT 0x1
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND__SHIFT 0x2
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_PENDING__SHIFT 0x3
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND__SHIFT 0x4
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_PENDING__SHIFT 0x5
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND__SHIFT 0x6
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_PENDING__SHIFT 0x7
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND__SHIFT 0x8
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_PENDING__SHIFT 0x9
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND__SHIFT 0xa
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_PENDING__SHIFT 0xb
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND__SHIFT 0xc
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_PENDING__SHIFT 0xd
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND__SHIFT 0xe
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_PENDING__SHIFT 0xf
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_MASK 0x00000001L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_PENDING_MASK 0x00000002L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_MASK 0x00000004L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_PENDING_MASK 0x00000008L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_MASK 0x00000010L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_PENDING_MASK 0x00000020L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_MASK 0x00000040L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_PENDING_MASK 0x00000080L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_MASK 0x00000100L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_PENDING_MASK 0x00000200L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_MASK 0x00000400L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_PENDING_MASK 0x00000800L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_MASK 0x00001000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_PENDING_MASK 0x00002000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_MASK 0x00004000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_PENDING_MASK 0x00008000L
+//DIG0_FORCE_DIG_DISABLE
+#define DIG0_FORCE_DIG_DISABLE__FORCE_DIG_DISABLE__SHIFT 0x0
+#define DIG0_FORCE_DIG_DISABLE__FORCE_DIG_DISABLE_MASK 0x00000001L
+
+
+// addressBlock: dce_dc_dio_dp0_dispdec
+//DP0_DP_LINK_CNTL
+#define DP0_DP_LINK_CNTL__DP_LINK_TRAINING_COMPLETE__SHIFT 0x4
+#define DP0_DP_LINK_CNTL__DP_LINK_STATUS__SHIFT 0x8
+#define DP0_DP_LINK_CNTL__DP_EMBEDDED_PANEL_MODE__SHIFT 0x11
+#define DP0_DP_LINK_CNTL__DP_LINK_TRAINING_COMPLETE_MASK 0x00000010L
+#define DP0_DP_LINK_CNTL__DP_LINK_STATUS_MASK 0x00000100L
+#define DP0_DP_LINK_CNTL__DP_EMBEDDED_PANEL_MODE_MASK 0x00020000L
+//DP0_DP_PIXEL_FORMAT
+#define DP0_DP_PIXEL_FORMAT__DP_PIXEL_ENCODING__SHIFT 0x0
+#define DP0_DP_PIXEL_FORMAT__DP_COMPONENT_DEPTH__SHIFT 0x18
+#define DP0_DP_PIXEL_FORMAT__DP_PIXEL_COMBINE__SHIFT 0x1c
+#define DP0_DP_PIXEL_FORMAT__DP_PIXEL_ENCODING_MASK 0x00000007L
+#define DP0_DP_PIXEL_FORMAT__DP_COMPONENT_DEPTH_MASK 0x07000000L
+#define DP0_DP_PIXEL_FORMAT__DP_PIXEL_COMBINE_MASK 0x30000000L
+//DP0_DP_MSA_COLORIMETRY
+#define DP0_DP_MSA_COLORIMETRY__DP_MSA_MISC0__SHIFT 0x18
+#define DP0_DP_MSA_COLORIMETRY__DP_MSA_MISC0_MASK 0xFF000000L
+//DP0_DP_CONFIG
+#define DP0_DP_CONFIG__DP_UDI_LANES__SHIFT 0x0
+#define DP0_DP_CONFIG__DP_UDI_LANES_MASK 0x00000003L
+//DP0_DP_VID_STREAM_CNTL
+#define DP0_DP_VID_STREAM_CNTL__DP_VID_STREAM_ENABLE__SHIFT 0x0
+#define DP0_DP_VID_STREAM_CNTL__DP_VID_STREAM_DIS_DEFER__SHIFT 0x8
+#define DP0_DP_VID_STREAM_CNTL__DP_VID_STREAM_STATUS__SHIFT 0x10
+#define DP0_DP_VID_STREAM_CNTL__DP_VID_STREAM_CHANGE_KEEPOUT__SHIFT 0x14
+#define DP0_DP_VID_STREAM_CNTL__DP_VID_STREAM_ENABLE_MASK 0x00000001L
+#define DP0_DP_VID_STREAM_CNTL__DP_VID_STREAM_DIS_DEFER_MASK 0x00000300L
+#define DP0_DP_VID_STREAM_CNTL__DP_VID_STREAM_STATUS_MASK 0x00010000L
+#define DP0_DP_VID_STREAM_CNTL__DP_VID_STREAM_CHANGE_KEEPOUT_MASK 0x00100000L
+//DP0_DP_STEER_FIFO
+#define DP0_DP_STEER_FIFO__DP_STEER_FIFO_RESET__SHIFT 0x0
+#define DP0_DP_STEER_FIFO__DP_STEER_OVERFLOW_FLAG__SHIFT 0x4
+#define DP0_DP_STEER_FIFO__DP_STEER_OVERFLOW_INT__SHIFT 0x5
+#define DP0_DP_STEER_FIFO__DP_STEER_OVERFLOW_ACK__SHIFT 0x6
+#define DP0_DP_STEER_FIFO__DP_STEER_OVERFLOW_MASK__SHIFT 0x7
+#define DP0_DP_STEER_FIFO__DP_TU_OVERFLOW_FLAG__SHIFT 0x8
+#define DP0_DP_STEER_FIFO__DP_TU_OVERFLOW_ACK__SHIFT 0xc
+#define DP0_DP_STEER_FIFO__DP_STEER_FIFO_RESET_MASK 0x00000001L
+#define DP0_DP_STEER_FIFO__DP_STEER_OVERFLOW_FLAG_MASK 0x00000010L
+#define DP0_DP_STEER_FIFO__DP_STEER_OVERFLOW_INT_MASK 0x00000020L
+#define DP0_DP_STEER_FIFO__DP_STEER_OVERFLOW_ACK_MASK 0x00000040L
+#define DP0_DP_STEER_FIFO__DP_STEER_OVERFLOW_MASK_MASK 0x00000080L
+#define DP0_DP_STEER_FIFO__DP_TU_OVERFLOW_FLAG_MASK 0x00000100L
+#define DP0_DP_STEER_FIFO__DP_TU_OVERFLOW_ACK_MASK 0x00001000L
+//DP0_DP_MSA_MISC
+#define DP0_DP_MSA_MISC__DP_MSA_MISC1__SHIFT 0x0
+#define DP0_DP_MSA_MISC__DP_MSA_MISC2__SHIFT 0x8
+#define DP0_DP_MSA_MISC__DP_MSA_MISC3__SHIFT 0x10
+#define DP0_DP_MSA_MISC__DP_MSA_MISC4__SHIFT 0x18
+#define DP0_DP_MSA_MISC__DP_MSA_MISC1_MASK 0x000000FFL
+#define DP0_DP_MSA_MISC__DP_MSA_MISC2_MASK 0x0000FF00L
+#define DP0_DP_MSA_MISC__DP_MSA_MISC3_MASK 0x00FF0000L
+#define DP0_DP_MSA_MISC__DP_MSA_MISC4_MASK 0xFF000000L
+//DP0_DP_VID_TIMING
+#define DP0_DP_VID_TIMING__DP_VID_M_N_DOUBLE_BUFFER_MODE__SHIFT 0x4
+#define DP0_DP_VID_TIMING__DP_VID_M_N_GEN_EN__SHIFT 0x8
+#define DP0_DP_VID_TIMING__DP_VID_N_MUL__SHIFT 0xa
+#define DP0_DP_VID_TIMING__DP_VID_M_DIV__SHIFT 0xc
+#define DP0_DP_VID_TIMING__DP_VID_N_DIV__SHIFT 0x18
+#define DP0_DP_VID_TIMING__DP_VID_M_N_DOUBLE_BUFFER_MODE_MASK 0x00000010L
+#define DP0_DP_VID_TIMING__DP_VID_M_N_GEN_EN_MASK 0x00000100L
+#define DP0_DP_VID_TIMING__DP_VID_N_MUL_MASK 0x00000C00L
+#define DP0_DP_VID_TIMING__DP_VID_M_DIV_MASK 0x00003000L
+#define DP0_DP_VID_TIMING__DP_VID_N_DIV_MASK 0xFF000000L
+//DP0_DP_VID_N
+#define DP0_DP_VID_N__DP_VID_N__SHIFT 0x0
+#define DP0_DP_VID_N__DP_VID_N_MASK 0x00FFFFFFL
+//DP0_DP_VID_M
+#define DP0_DP_VID_M__DP_VID_M__SHIFT 0x0
+#define DP0_DP_VID_M__DP_VID_M_MASK 0x00FFFFFFL
+//DP0_DP_LINK_FRAMING_CNTL
+#define DP0_DP_LINK_FRAMING_CNTL__DP_IDLE_BS_INTERVAL__SHIFT 0x0
+#define DP0_DP_LINK_FRAMING_CNTL__DP_VBID_DISABLE__SHIFT 0x18
+#define DP0_DP_LINK_FRAMING_CNTL__DP_VID_ENHANCED_FRAME_MODE__SHIFT 0x1c
+#define DP0_DP_LINK_FRAMING_CNTL__DP_IDLE_BS_INTERVAL_MASK 0x0003FFFFL
+#define DP0_DP_LINK_FRAMING_CNTL__DP_VBID_DISABLE_MASK 0x01000000L
+#define DP0_DP_LINK_FRAMING_CNTL__DP_VID_ENHANCED_FRAME_MODE_MASK 0x10000000L
+//DP0_DP_HBR2_EYE_PATTERN
+#define DP0_DP_HBR2_EYE_PATTERN__DP_HBR2_EYE_PATTERN_ENABLE__SHIFT 0x0
+#define DP0_DP_HBR2_EYE_PATTERN__DP_HBR2_EYE_PATTERN_ENABLE_MASK 0x00000001L
+//DP0_DP_VID_MSA_VBID
+#define DP0_DP_VID_MSA_VBID__DP_VID_MSA_LOCATION__SHIFT 0x0
+#define DP0_DP_VID_MSA_VBID__DP_VID_VBID_FIELD_POL__SHIFT 0x18
+#define DP0_DP_VID_MSA_VBID__DP_VID_MSA_LOCATION_MASK 0x00000FFFL
+#define DP0_DP_VID_MSA_VBID__DP_VID_VBID_FIELD_POL_MASK 0x01000000L
+//DP0_DP_VID_INTERRUPT_CNTL
+#define DP0_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_INT__SHIFT 0x0
+#define DP0_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_ACK__SHIFT 0x1
+#define DP0_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_MASK__SHIFT 0x2
+#define DP0_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_INT_MASK 0x00000001L
+#define DP0_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_ACK_MASK 0x00000002L
+#define DP0_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_MASK_MASK 0x00000004L
+//DP0_DP_DPHY_CNTL
+#define DP0_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE0__SHIFT 0x0
+#define DP0_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE1__SHIFT 0x1
+#define DP0_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE2__SHIFT 0x2
+#define DP0_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE3__SHIFT 0x3
+#define DP0_DP_DPHY_CNTL__DPHY_FEC_EN__SHIFT 0x4
+#define DP0_DP_DPHY_CNTL__DPHY_FEC_READY_SHADOW__SHIFT 0x5
+#define DP0_DP_DPHY_CNTL__DPHY_FEC_ACTIVE_STATUS__SHIFT 0x6
+#define DP0_DP_DPHY_CNTL__DPHY_BYPASS__SHIFT 0x10
+#define DP0_DP_DPHY_CNTL__DPHY_SKEW_BYPASS__SHIFT 0x18
+#define DP0_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE0_MASK 0x00000001L
+#define DP0_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE1_MASK 0x00000002L
+#define DP0_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE2_MASK 0x00000004L
+#define DP0_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE3_MASK 0x00000008L
+#define DP0_DP_DPHY_CNTL__DPHY_FEC_EN_MASK 0x00000010L
+#define DP0_DP_DPHY_CNTL__DPHY_FEC_READY_SHADOW_MASK 0x00000020L
+#define DP0_DP_DPHY_CNTL__DPHY_FEC_ACTIVE_STATUS_MASK 0x00000040L
+#define DP0_DP_DPHY_CNTL__DPHY_BYPASS_MASK 0x00010000L
+#define DP0_DP_DPHY_CNTL__DPHY_SKEW_BYPASS_MASK 0x01000000L
+//DP0_DP_DPHY_TRAINING_PATTERN_SEL
+#define DP0_DP_DPHY_TRAINING_PATTERN_SEL__DPHY_TRAINING_PATTERN_SEL__SHIFT 0x0
+#define DP0_DP_DPHY_TRAINING_PATTERN_SEL__DPHY_TRAINING_PATTERN_SEL_MASK 0x00000003L
+//DP0_DP_DPHY_SYM0
+#define DP0_DP_DPHY_SYM0__DPHY_SYM1__SHIFT 0x0
+#define DP0_DP_DPHY_SYM0__DPHY_SYM2__SHIFT 0xa
+#define DP0_DP_DPHY_SYM0__DPHY_SYM3__SHIFT 0x14
+#define DP0_DP_DPHY_SYM0__DPHY_SYM1_MASK 0x000003FFL
+#define DP0_DP_DPHY_SYM0__DPHY_SYM2_MASK 0x000FFC00L
+#define DP0_DP_DPHY_SYM0__DPHY_SYM3_MASK 0x3FF00000L
+//DP0_DP_DPHY_SYM1
+#define DP0_DP_DPHY_SYM1__DPHY_SYM4__SHIFT 0x0
+#define DP0_DP_DPHY_SYM1__DPHY_SYM5__SHIFT 0xa
+#define DP0_DP_DPHY_SYM1__DPHY_SYM6__SHIFT 0x14
+#define DP0_DP_DPHY_SYM1__DPHY_SYM4_MASK 0x000003FFL
+#define DP0_DP_DPHY_SYM1__DPHY_SYM5_MASK 0x000FFC00L
+#define DP0_DP_DPHY_SYM1__DPHY_SYM6_MASK 0x3FF00000L
+//DP0_DP_DPHY_SYM2
+#define DP0_DP_DPHY_SYM2__DPHY_SYM7__SHIFT 0x0
+#define DP0_DP_DPHY_SYM2__DPHY_SYM8__SHIFT 0xa
+#define DP0_DP_DPHY_SYM2__DPHY_SYM7_MASK 0x000003FFL
+#define DP0_DP_DPHY_SYM2__DPHY_SYM8_MASK 0x000FFC00L
+//DP0_DP_DPHY_8B10B_CNTL
+#define DP0_DP_DPHY_8B10B_CNTL__DPHY_8B10B_RESET__SHIFT 0x8
+#define DP0_DP_DPHY_8B10B_CNTL__DPHY_8B10B_EXT_DISP__SHIFT 0x10
+#define DP0_DP_DPHY_8B10B_CNTL__DPHY_8B10B_CUR_DISP__SHIFT 0x18
+#define DP0_DP_DPHY_8B10B_CNTL__DPHY_8B10B_RESET_MASK 0x00000100L
+#define DP0_DP_DPHY_8B10B_CNTL__DPHY_8B10B_EXT_DISP_MASK 0x00010000L
+#define DP0_DP_DPHY_8B10B_CNTL__DPHY_8B10B_CUR_DISP_MASK 0x01000000L
+//DP0_DP_DPHY_PRBS_CNTL
+#define DP0_DP_DPHY_PRBS_CNTL__DPHY_PRBS_EN__SHIFT 0x0
+#define DP0_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL__SHIFT 0x4
+#define DP0_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED__SHIFT 0x8
+#define DP0_DP_DPHY_PRBS_CNTL__DPHY_PRBS_EN_MASK 0x00000001L
+#define DP0_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL_MASK 0x00000030L
+#define DP0_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED_MASK 0x7FFFFF00L
+//DP0_DP_DPHY_SCRAM_CNTL
+#define DP0_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_DIS__SHIFT 0x0
+#define DP0_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE__SHIFT 0x4
+#define DP0_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT__SHIFT 0x8
+#define DP0_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_KCODE__SHIFT 0x18
+#define DP0_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_DIS_MASK 0x00000001L
+#define DP0_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE_MASK 0x00000010L
+#define DP0_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT_MASK 0x0003FF00L
+#define DP0_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_KCODE_MASK 0x01000000L
+//DP0_DP_DPHY_CRC_EN
+#define DP0_DP_DPHY_CRC_EN__DPHY_CRC_EN__SHIFT 0x0
+#define DP0_DP_DPHY_CRC_EN__DPHY_CRC_CONT_EN__SHIFT 0x4
+#define DP0_DP_DPHY_CRC_EN__DPHY_CRC_RESULT_VALID__SHIFT 0x8
+#define DP0_DP_DPHY_CRC_EN__DPHY_CRC_EN_MASK 0x00000001L
+#define DP0_DP_DPHY_CRC_EN__DPHY_CRC_CONT_EN_MASK 0x00000010L
+#define DP0_DP_DPHY_CRC_EN__DPHY_CRC_RESULT_VALID_MASK 0x00000100L
+//DP0_DP_DPHY_CRC_CNTL
+#define DP0_DP_DPHY_CRC_CNTL__DPHY_CRC_FIELD__SHIFT 0x0
+#define DP0_DP_DPHY_CRC_CNTL__DPHY_CRC_SEL__SHIFT 0x4
+#define DP0_DP_DPHY_CRC_CNTL__DPHY_CRC_MASK__SHIFT 0x10
+#define DP0_DP_DPHY_CRC_CNTL__DPHY_CRC_FIELD_MASK 0x00000001L
+#define DP0_DP_DPHY_CRC_CNTL__DPHY_CRC_SEL_MASK 0x00000030L
+#define DP0_DP_DPHY_CRC_CNTL__DPHY_CRC_MASK_MASK 0x00FF0000L
+//DP0_DP_DPHY_CRC_RESULT
+#define DP0_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT__SHIFT 0x0
+#define DP0_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT1__SHIFT 0x8
+#define DP0_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT2__SHIFT 0x10
+#define DP0_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT3__SHIFT 0x18
+#define DP0_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT_MASK 0x000000FFL
+#define DP0_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT1_MASK 0x0000FF00L
+#define DP0_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT2_MASK 0x00FF0000L
+#define DP0_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT3_MASK 0xFF000000L
+//DP0_DP_DPHY_CRC_MST_CNTL
+#define DP0_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_FIRST_SLOT__SHIFT 0x0
+#define DP0_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_LAST_SLOT__SHIFT 0x8
+#define DP0_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_FIRST_SLOT_MASK 0x0000003FL
+#define DP0_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_LAST_SLOT_MASK 0x00003F00L
+//DP0_DP_DPHY_CRC_MST_STATUS
+#define DP0_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_LOCK__SHIFT 0x0
+#define DP0_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR__SHIFT 0x8
+#define DP0_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR_ACK__SHIFT 0x10
+#define DP0_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_LOCK_MASK 0x00000001L
+#define DP0_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR_MASK 0x00000100L
+#define DP0_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR_ACK_MASK 0x00010000L
+//DP0_DP_DPHY_FAST_TRAINING
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_RX_FAST_TRAINING_CAPABLE__SHIFT 0x0
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_SW_FAST_TRAINING_START__SHIFT 0x1
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN__SHIFT 0x2
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP1_TIME__SHIFT 0x8
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP2_TIME__SHIFT 0x14
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_RX_FAST_TRAINING_CAPABLE_MASK 0x00000001L
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_SW_FAST_TRAINING_START_MASK 0x00000002L
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN_MASK 0x00000004L
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP1_TIME_MASK 0x000FFF00L
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP2_TIME_MASK 0xFFF00000L
+//DP0_DP_DPHY_FAST_TRAINING_STATUS
+#define DP0_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_STATE__SHIFT 0x0
+#define DP0_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_OCCURRED__SHIFT 0x4
+#define DP0_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_MASK__SHIFT 0x8
+#define DP0_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_ACK__SHIFT 0xc
+#define DP0_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_STATE_MASK 0x00000007L
+#define DP0_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_OCCURRED_MASK 0x00000010L
+#define DP0_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_MASK_MASK 0x00000100L
+#define DP0_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_ACK_MASK 0x00001000L
+//DP0_DP_SEC_CNTL
+#define DP0_DP_SEC_CNTL__DP_SEC_STREAM_ENABLE__SHIFT 0x0
+#define DP0_DP_SEC_CNTL__DP_SEC_ASP_ENABLE__SHIFT 0x4
+#define DP0_DP_SEC_CNTL__DP_SEC_ATP_ENABLE__SHIFT 0x8
+#define DP0_DP_SEC_CNTL__DP_SEC_AIP_ENABLE__SHIFT 0xc
+#define DP0_DP_SEC_CNTL__DP_SEC_ACM_ENABLE__SHIFT 0x10
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP0_ENABLE__SHIFT 0x14
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP1_ENABLE__SHIFT 0x15
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP2_ENABLE__SHIFT 0x16
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP3_ENABLE__SHIFT 0x17
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP4_ENABLE__SHIFT 0x18
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP5_ENABLE__SHIFT 0x19
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP6_ENABLE__SHIFT 0x1a
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP7_ENABLE__SHIFT 0x1b
+#define DP0_DP_SEC_CNTL__DP_SEC_MPG_ENABLE__SHIFT 0x1c
+#define DP0_DP_SEC_CNTL__DP_SEC_STREAM_ENABLE_MASK 0x00000001L
+#define DP0_DP_SEC_CNTL__DP_SEC_ASP_ENABLE_MASK 0x00000010L
+#define DP0_DP_SEC_CNTL__DP_SEC_ATP_ENABLE_MASK 0x00000100L
+#define DP0_DP_SEC_CNTL__DP_SEC_AIP_ENABLE_MASK 0x00001000L
+#define DP0_DP_SEC_CNTL__DP_SEC_ACM_ENABLE_MASK 0x00010000L
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP0_ENABLE_MASK 0x00100000L
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP1_ENABLE_MASK 0x00200000L
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP2_ENABLE_MASK 0x00400000L
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP3_ENABLE_MASK 0x00800000L
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP4_ENABLE_MASK 0x01000000L
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP5_ENABLE_MASK 0x02000000L
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP6_ENABLE_MASK 0x04000000L
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP7_ENABLE_MASK 0x08000000L
+#define DP0_DP_SEC_CNTL__DP_SEC_MPG_ENABLE_MASK 0x10000000L
+//DP0_DP_SEC_CNTL1
+#define DP0_DP_SEC_CNTL1__DP_SEC_ISRC_ENABLE__SHIFT 0x0
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_REFERENCE__SHIFT 0x1
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_PRIORITY__SHIFT 0x4
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_SEND__SHIFT 0x5
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_PENDING__SHIFT 0x6
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_DEADLINE_MISSED__SHIFT 0x7
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_ANY_LINE__SHIFT 0x8
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP1_LINE_REFERENCE__SHIFT 0x9
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP2_LINE_REFERENCE__SHIFT 0xa
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP3_LINE_REFERENCE__SHIFT 0xb
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP4_LINE_REFERENCE__SHIFT 0xc
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP5_LINE_REFERENCE__SHIFT 0xd
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP6_LINE_REFERENCE__SHIFT 0xe
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP7_LINE_REFERENCE__SHIFT 0xf
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_NUM__SHIFT 0x10
+#define DP0_DP_SEC_CNTL1__DP_SEC_ISRC_ENABLE_MASK 0x00000001L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_REFERENCE_MASK 0x00000002L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_PRIORITY_MASK 0x00000010L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_MASK 0x00000020L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_PENDING_MASK 0x00000040L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_DEADLINE_MISSED_MASK 0x00000080L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_ANY_LINE_MASK 0x00000100L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP1_LINE_REFERENCE_MASK 0x00000200L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP2_LINE_REFERENCE_MASK 0x00000400L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP3_LINE_REFERENCE_MASK 0x00000800L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP4_LINE_REFERENCE_MASK 0x00001000L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP5_LINE_REFERENCE_MASK 0x00002000L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP6_LINE_REFERENCE_MASK 0x00004000L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP7_LINE_REFERENCE_MASK 0x00008000L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_NUM_MASK 0xFFFF0000L
+//DP0_DP_SEC_FRAMING1
+#define DP0_DP_SEC_FRAMING1__DP_SEC_FRAME_START_LOCATION__SHIFT 0x0
+#define DP0_DP_SEC_FRAMING1__DP_SEC_VBLANK_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP0_DP_SEC_FRAMING1__DP_SEC_FRAME_START_LOCATION_MASK 0x00000FFFL
+#define DP0_DP_SEC_FRAMING1__DP_SEC_VBLANK_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+//DP0_DP_SEC_FRAMING2
+#define DP0_DP_SEC_FRAMING2__DP_SEC_START_POSITION__SHIFT 0x0
+#define DP0_DP_SEC_FRAMING2__DP_SEC_HBLANK_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP0_DP_SEC_FRAMING2__DP_SEC_START_POSITION_MASK 0x0000FFFFL
+#define DP0_DP_SEC_FRAMING2__DP_SEC_HBLANK_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+//DP0_DP_SEC_FRAMING3
+#define DP0_DP_SEC_FRAMING3__DP_SEC_IDLE_FRAME_SIZE__SHIFT 0x0
+#define DP0_DP_SEC_FRAMING3__DP_SEC_IDLE_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP0_DP_SEC_FRAMING3__DP_SEC_IDLE_FRAME_SIZE_MASK 0x00003FFFL
+#define DP0_DP_SEC_FRAMING3__DP_SEC_IDLE_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+//DP0_DP_SEC_FRAMING4
+#define DP0_DP_SEC_FRAMING4__DP_SST_SDP_SPLITTING__SHIFT 0x0
+#define DP0_DP_SEC_FRAMING4__DP_SEC_COLLISION_STATUS__SHIFT 0x14
+#define DP0_DP_SEC_FRAMING4__DP_SEC_COLLISION_ACK__SHIFT 0x18
+#define DP0_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE__SHIFT 0x1c
+#define DP0_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_STATUS__SHIFT 0x1d
+#define DP0_DP_SEC_FRAMING4__DP_SST_SDP_SPLITTING_MASK 0x00000001L
+#define DP0_DP_SEC_FRAMING4__DP_SEC_COLLISION_STATUS_MASK 0x00100000L
+#define DP0_DP_SEC_FRAMING4__DP_SEC_COLLISION_ACK_MASK 0x01000000L
+#define DP0_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_MASK 0x10000000L
+#define DP0_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_STATUS_MASK 0x20000000L
+//DP0_DP_SEC_AUD_N
+#define DP0_DP_SEC_AUD_N__DP_SEC_AUD_N__SHIFT 0x0
+#define DP0_DP_SEC_AUD_N__DP_SEC_AUD_N_MASK 0x00FFFFFFL
+//DP0_DP_SEC_AUD_N_READBACK
+#define DP0_DP_SEC_AUD_N_READBACK__DP_SEC_AUD_N_READBACK__SHIFT 0x0
+#define DP0_DP_SEC_AUD_N_READBACK__DP_SEC_AUD_N_READBACK_MASK 0x00FFFFFFL
+//DP0_DP_SEC_AUD_M
+#define DP0_DP_SEC_AUD_M__DP_SEC_AUD_M__SHIFT 0x0
+#define DP0_DP_SEC_AUD_M__DP_SEC_AUD_M_MASK 0x00FFFFFFL
+//DP0_DP_SEC_AUD_M_READBACK
+#define DP0_DP_SEC_AUD_M_READBACK__DP_SEC_AUD_M_READBACK__SHIFT 0x0
+#define DP0_DP_SEC_AUD_M_READBACK__DP_SEC_AUD_M_READBACK_MASK 0x00FFFFFFL
+//DP0_DP_SEC_TIMESTAMP
+#define DP0_DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE__SHIFT 0x0
+#define DP0_DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE_MASK 0x00000001L
+//DP0_DP_SEC_PACKET_CNTL
+#define DP0_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CODING_TYPE__SHIFT 0x1
+#define DP0_DP_SEC_PACKET_CNTL__DP_SEC_ASP_PRIORITY__SHIFT 0x4
+#define DP0_DP_SEC_PACKET_CNTL__DP_SEC_VERSION__SHIFT 0x8
+#define DP0_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE__SHIFT 0x10
+#define DP0_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CODING_TYPE_MASK 0x0000000EL
+#define DP0_DP_SEC_PACKET_CNTL__DP_SEC_ASP_PRIORITY_MASK 0x00000010L
+#define DP0_DP_SEC_PACKET_CNTL__DP_SEC_VERSION_MASK 0x00003F00L
+#define DP0_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE_MASK 0x00010000L
+//DP0_DP_MSE_RATE_CNTL
+#define DP0_DP_MSE_RATE_CNTL__DP_MSE_RATE_Y__SHIFT 0x0
+#define DP0_DP_MSE_RATE_CNTL__DP_MSE_RATE_X__SHIFT 0x1a
+#define DP0_DP_MSE_RATE_CNTL__DP_MSE_RATE_Y_MASK 0x03FFFFFFL
+#define DP0_DP_MSE_RATE_CNTL__DP_MSE_RATE_X_MASK 0xFC000000L
+//DP0_DP_MSE_RATE_UPDATE
+#define DP0_DP_MSE_RATE_UPDATE__DP_MSE_RATE_UPDATE_PENDING__SHIFT 0x0
+#define DP0_DP_MSE_RATE_UPDATE__DP_MSE_RATE_UPDATE_PENDING_MASK 0x00000001L
+//DP0_DP_MSE_SAT0
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_SRC0__SHIFT 0x0
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT0__SHIFT 0x8
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_SRC1__SHIFT 0x10
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT1__SHIFT 0x18
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_SRC0_MASK 0x00000007L
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT0_MASK 0x00003F00L
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_SRC1_MASK 0x00070000L
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT1_MASK 0x3F000000L
+//DP0_DP_MSE_SAT1
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_SRC2__SHIFT 0x0
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT2__SHIFT 0x8
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_SRC3__SHIFT 0x10
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT3__SHIFT 0x18
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_SRC2_MASK 0x00000007L
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT2_MASK 0x00003F00L
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_SRC3_MASK 0x00070000L
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT3_MASK 0x3F000000L
+//DP0_DP_MSE_SAT2
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_SRC4__SHIFT 0x0
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT4__SHIFT 0x8
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_SRC5__SHIFT 0x10
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT5__SHIFT 0x18
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_SRC4_MASK 0x00000007L
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT4_MASK 0x00003F00L
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_SRC5_MASK 0x00070000L
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT5_MASK 0x3F000000L
+//DP0_DP_MSE_SAT_UPDATE
+#define DP0_DP_MSE_SAT_UPDATE__DP_MSE_SAT_UPDATE__SHIFT 0x0
+#define DP0_DP_MSE_SAT_UPDATE__DP_MSE_16_MTP_KEEPOUT__SHIFT 0x8
+#define DP0_DP_MSE_SAT_UPDATE__DP_MSE_SAT_UPDATE_MASK 0x00000003L
+#define DP0_DP_MSE_SAT_UPDATE__DP_MSE_16_MTP_KEEPOUT_MASK 0x00000100L
+//DP0_DP_MSE_LINK_TIMING
+#define DP0_DP_MSE_LINK_TIMING__DP_MSE_LINK_FRAME__SHIFT 0x0
+#define DP0_DP_MSE_LINK_TIMING__DP_MSE_LINK_LINE__SHIFT 0x10
+#define DP0_DP_MSE_LINK_TIMING__DP_MSE_LINK_FRAME_MASK 0x000003FFL
+#define DP0_DP_MSE_LINK_TIMING__DP_MSE_LINK_LINE_MASK 0x00030000L
+//DP0_DP_MSE_MISC_CNTL
+#define DP0_DP_MSE_MISC_CNTL__DP_MSE_BLANK_CODE__SHIFT 0x0
+#define DP0_DP_MSE_MISC_CNTL__DP_MSE_TIMESTAMP_MODE__SHIFT 0x4
+#define DP0_DP_MSE_MISC_CNTL__DP_MSE_ZERO_ENCODER__SHIFT 0x8
+#define DP0_DP_MSE_MISC_CNTL__DP_MSE_BLANK_CODE_MASK 0x00000001L
+#define DP0_DP_MSE_MISC_CNTL__DP_MSE_TIMESTAMP_MODE_MASK 0x00000010L
+#define DP0_DP_MSE_MISC_CNTL__DP_MSE_ZERO_ENCODER_MASK 0x00000100L
+//DP0_DP_DPHY_BS_SR_SWAP_CNTL
+#define DP0_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT__SHIFT 0x0
+#define DP0_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_BS_SR_SWAP_DONE__SHIFT 0xf
+#define DP0_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_START__SHIFT 0x10
+#define DP0_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_MASK 0x000003FFL
+#define DP0_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_BS_SR_SWAP_DONE_MASK 0x00008000L
+#define DP0_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_START_MASK 0x00010000L
+//DP0_DP_DPHY_HBR2_PATTERN_CONTROL
+#define DP0_DP_DPHY_HBR2_PATTERN_CONTROL__DP_DPHY_HBR2_PATTERN_CONTROL__SHIFT 0x0
+#define DP0_DP_DPHY_HBR2_PATTERN_CONTROL__DP_DPHY_HBR2_PATTERN_CONTROL_MASK 0x00000007L
+//DP0_DP_MSE_SAT0_STATUS
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC0_STATUS__SHIFT 0x0
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT0_STATUS__SHIFT 0x8
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC1_STATUS__SHIFT 0x10
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT1_STATUS__SHIFT 0x18
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC0_STATUS_MASK 0x00000007L
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT0_STATUS_MASK 0x00003F00L
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC1_STATUS_MASK 0x00070000L
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT1_STATUS_MASK 0x3F000000L
+//DP0_DP_MSE_SAT1_STATUS
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC2_STATUS__SHIFT 0x0
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT2_STATUS__SHIFT 0x8
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC3_STATUS__SHIFT 0x10
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT3_STATUS__SHIFT 0x18
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC2_STATUS_MASK 0x00000007L
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT2_STATUS_MASK 0x00003F00L
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC3_STATUS_MASK 0x00070000L
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT3_STATUS_MASK 0x3F000000L
+//DP0_DP_MSE_SAT2_STATUS
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC4_STATUS__SHIFT 0x0
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT4_STATUS__SHIFT 0x8
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC5_STATUS__SHIFT 0x10
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT5_STATUS__SHIFT 0x18
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC4_STATUS_MASK 0x00000007L
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT4_STATUS_MASK 0x00003F00L
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC5_STATUS_MASK 0x00070000L
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT5_STATUS_MASK 0x3F000000L
+//DP0_DP_MSA_TIMING_PARAM1
+#define DP0_DP_MSA_TIMING_PARAM1__DP_MSA_VTOTAL__SHIFT 0x0
+#define DP0_DP_MSA_TIMING_PARAM1__DP_MSA_HTOTAL__SHIFT 0x10
+#define DP0_DP_MSA_TIMING_PARAM1__DP_MSA_VTOTAL_MASK 0x0000FFFFL
+#define DP0_DP_MSA_TIMING_PARAM1__DP_MSA_HTOTAL_MASK 0xFFFF0000L
+//DP0_DP_MSA_TIMING_PARAM2
+#define DP0_DP_MSA_TIMING_PARAM2__DP_MSA_VSTART__SHIFT 0x0
+#define DP0_DP_MSA_TIMING_PARAM2__DP_MSA_HSTART__SHIFT 0x10
+#define DP0_DP_MSA_TIMING_PARAM2__DP_MSA_VSTART_MASK 0x0000FFFFL
+#define DP0_DP_MSA_TIMING_PARAM2__DP_MSA_HSTART_MASK 0xFFFF0000L
+//DP0_DP_MSA_TIMING_PARAM3
+#define DP0_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCWIDTH__SHIFT 0x0
+#define DP0_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCPOLARITY__SHIFT 0xf
+#define DP0_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCWIDTH__SHIFT 0x10
+#define DP0_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCPOLARITY__SHIFT 0x1f
+#define DP0_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCWIDTH_MASK 0x00007FFFL
+#define DP0_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCPOLARITY_MASK 0x00008000L
+#define DP0_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCWIDTH_MASK 0x7FFF0000L
+#define DP0_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCPOLARITY_MASK 0x80000000L
+//DP0_DP_MSA_TIMING_PARAM4
+#define DP0_DP_MSA_TIMING_PARAM4__DP_MSA_VHEIGHT__SHIFT 0x0
+#define DP0_DP_MSA_TIMING_PARAM4__DP_MSA_HWIDTH__SHIFT 0x10
+#define DP0_DP_MSA_TIMING_PARAM4__DP_MSA_VHEIGHT_MASK 0x0000FFFFL
+#define DP0_DP_MSA_TIMING_PARAM4__DP_MSA_HWIDTH_MASK 0xFFFF0000L
+//DP0_DP_MSO_CNTL
+#define DP0_DP_MSO_CNTL__DP_MSO_NUM_OF_SSTLINK__SHIFT 0x0
+#define DP0_DP_MSO_CNTL__DP_MSO_SEC_STREAM_ENABLE__SHIFT 0x4
+#define DP0_DP_MSO_CNTL__DP_MSO_SEC_ASP_ENABLE__SHIFT 0x8
+#define DP0_DP_MSO_CNTL__DP_MSO_SEC_ATP_ENABLE__SHIFT 0xc
+#define DP0_DP_MSO_CNTL__DP_MSO_SEC_AIP_ENABLE__SHIFT 0x10
+#define DP0_DP_MSO_CNTL__DP_MSO_SEC_ACM_ENABLE__SHIFT 0x14
+#define DP0_DP_MSO_CNTL__DP_MSO_SEC_GSP0_ENABLE__SHIFT 0x18
+#define DP0_DP_MSO_CNTL__DP_MSO_SEC_GSP1_ENABLE__SHIFT 0x1c
+#define DP0_DP_MSO_CNTL__DP_MSO_NUM_OF_SSTLINK_MASK 0x00000003L
+#define DP0_DP_MSO_CNTL__DP_MSO_SEC_STREAM_ENABLE_MASK 0x000000F0L
+#define DP0_DP_MSO_CNTL__DP_MSO_SEC_ASP_ENABLE_MASK 0x00000F00L
+#define DP0_DP_MSO_CNTL__DP_MSO_SEC_ATP_ENABLE_MASK 0x0000F000L
+#define DP0_DP_MSO_CNTL__DP_MSO_SEC_AIP_ENABLE_MASK 0x000F0000L
+#define DP0_DP_MSO_CNTL__DP_MSO_SEC_ACM_ENABLE_MASK 0x00F00000L
+#define DP0_DP_MSO_CNTL__DP_MSO_SEC_GSP0_ENABLE_MASK 0x0F000000L
+#define DP0_DP_MSO_CNTL__DP_MSO_SEC_GSP1_ENABLE_MASK 0xF0000000L
+//DP0_DP_MSO_CNTL1
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_GSP2_ENABLE__SHIFT 0x0
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_GSP3_ENABLE__SHIFT 0x4
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_GSP4_ENABLE__SHIFT 0x8
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_GSP5_ENABLE__SHIFT 0xc
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_GSP6_ENABLE__SHIFT 0x10
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_GSP7_ENABLE__SHIFT 0x14
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_MPG_ENABLE__SHIFT 0x18
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_ISRC_ENABLE__SHIFT 0x1c
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_GSP2_ENABLE_MASK 0x0000000FL
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_GSP3_ENABLE_MASK 0x000000F0L
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_GSP4_ENABLE_MASK 0x00000F00L
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_GSP5_ENABLE_MASK 0x0000F000L
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_GSP6_ENABLE_MASK 0x000F0000L
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_GSP7_ENABLE_MASK 0x00F00000L
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_MPG_ENABLE_MASK 0x0F000000L
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_ISRC_ENABLE_MASK 0xF0000000L
+//DP0_DP_DSC_CNTL
+#define DP0_DP_DSC_CNTL__DP_DSC_MODE__SHIFT 0x0
+#define DP0_DP_DSC_CNTL__DP_DSC_SLICE_WIDTH__SHIFT 0x10
+#define DP0_DP_DSC_CNTL__DP_DSC_MODE_MASK 0x00000003L
+#define DP0_DP_DSC_CNTL__DP_DSC_SLICE_WIDTH_MASK 0x1FFF0000L
+//DP0_DP_SEC_CNTL2
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP1_SEND__SHIFT 0x0
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_PENDING__SHIFT 0x1
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_DEADLINE_MISSED__SHIFT 0x2
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_ANY_LINE__SHIFT 0x3
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP2_SEND__SHIFT 0x4
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_PENDING__SHIFT 0x5
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_DEADLINE_MISSED__SHIFT 0x6
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_ANY_LINE__SHIFT 0x7
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP3_SEND__SHIFT 0x8
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_PENDING__SHIFT 0x9
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_DEADLINE_MISSED__SHIFT 0xa
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_ANY_LINE__SHIFT 0xb
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP4_SEND__SHIFT 0xc
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_PENDING__SHIFT 0xd
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_ANY_LINE__SHIFT 0xf
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP5_SEND__SHIFT 0x10
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_PENDING__SHIFT 0x11
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_DEADLINE_MISSED__SHIFT 0x12
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_ANY_LINE__SHIFT 0x13
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP6_SEND__SHIFT 0x14
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_PENDING__SHIFT 0x15
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_DEADLINE_MISSED__SHIFT 0x16
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_ANY_LINE__SHIFT 0x17
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_SEND__SHIFT 0x18
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_PENDING__SHIFT 0x19
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_DEADLINE_MISSED__SHIFT 0x1a
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_ANY_LINE__SHIFT 0x1b
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_PPS__SHIFT 0x1c
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_MASK 0x00000001L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_PENDING_MASK 0x00000002L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_DEADLINE_MISSED_MASK 0x00000004L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_ANY_LINE_MASK 0x00000008L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_MASK 0x00000010L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_PENDING_MASK 0x00000020L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_DEADLINE_MISSED_MASK 0x00000040L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_ANY_LINE_MASK 0x00000080L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_MASK 0x00000100L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_PENDING_MASK 0x00000200L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_DEADLINE_MISSED_MASK 0x00000400L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_ANY_LINE_MASK 0x00000800L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_MASK 0x00001000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_PENDING_MASK 0x00002000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_ANY_LINE_MASK 0x00008000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_MASK 0x00010000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_PENDING_MASK 0x00020000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_DEADLINE_MISSED_MASK 0x00040000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_ANY_LINE_MASK 0x00080000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_MASK 0x00100000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_PENDING_MASK 0x00200000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_DEADLINE_MISSED_MASK 0x00400000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_ANY_LINE_MASK 0x00800000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_MASK 0x01000000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_PENDING_MASK 0x02000000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_DEADLINE_MISSED_MASK 0x04000000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_ANY_LINE_MASK 0x08000000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_PPS_MASK 0x10000000L
+//DP0_DP_SEC_CNTL3
+#define DP0_DP_SEC_CNTL3__DP_SEC_GSP1_LINE_NUM__SHIFT 0x0
+#define DP0_DP_SEC_CNTL3__DP_SEC_GSP2_LINE_NUM__SHIFT 0x10
+#define DP0_DP_SEC_CNTL3__DP_SEC_GSP1_LINE_NUM_MASK 0x0000FFFFL
+#define DP0_DP_SEC_CNTL3__DP_SEC_GSP2_LINE_NUM_MASK 0xFFFF0000L
+//DP0_DP_SEC_CNTL4
+#define DP0_DP_SEC_CNTL4__DP_SEC_GSP3_LINE_NUM__SHIFT 0x0
+#define DP0_DP_SEC_CNTL4__DP_SEC_GSP4_LINE_NUM__SHIFT 0x10
+#define DP0_DP_SEC_CNTL4__DP_SEC_GSP3_LINE_NUM_MASK 0x0000FFFFL
+#define DP0_DP_SEC_CNTL4__DP_SEC_GSP4_LINE_NUM_MASK 0xFFFF0000L
+//DP0_DP_SEC_CNTL5
+#define DP0_DP_SEC_CNTL5__DP_SEC_GSP5_LINE_NUM__SHIFT 0x0
+#define DP0_DP_SEC_CNTL5__DP_SEC_GSP6_LINE_NUM__SHIFT 0x10
+#define DP0_DP_SEC_CNTL5__DP_SEC_GSP5_LINE_NUM_MASK 0x0000FFFFL
+#define DP0_DP_SEC_CNTL5__DP_SEC_GSP6_LINE_NUM_MASK 0xFFFF0000L
+//DP0_DP_SEC_CNTL6
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP7_LINE_NUM__SHIFT 0x0
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP7_LINE_NUM_MASK 0x0000FFFFL
+//DP0_DP_SEC_CNTL7
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_ACTIVE__SHIFT 0x0
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_IN_IDLE__SHIFT 0x1
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_ACTIVE__SHIFT 0x4
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_IN_IDLE__SHIFT 0x5
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_ACTIVE__SHIFT 0x8
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_IN_IDLE__SHIFT 0x9
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_ACTIVE__SHIFT 0xc
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_IN_IDLE__SHIFT 0xd
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_ACTIVE__SHIFT 0x10
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_IN_IDLE__SHIFT 0x11
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_ACTIVE__SHIFT 0x14
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_IN_IDLE__SHIFT 0x15
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_ACTIVE__SHIFT 0x18
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_IN_IDLE__SHIFT 0x19
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_ACTIVE__SHIFT 0x1c
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_IN_IDLE__SHIFT 0x1d
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_ACTIVE_MASK 0x00000001L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_IN_IDLE_MASK 0x00000002L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_ACTIVE_MASK 0x00000010L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_IN_IDLE_MASK 0x00000020L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_ACTIVE_MASK 0x00000100L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_IN_IDLE_MASK 0x00000200L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_ACTIVE_MASK 0x00001000L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_IN_IDLE_MASK 0x00002000L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_ACTIVE_MASK 0x00010000L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_IN_IDLE_MASK 0x00020000L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_ACTIVE_MASK 0x00100000L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_IN_IDLE_MASK 0x00200000L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_ACTIVE_MASK 0x01000000L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_IN_IDLE_MASK 0x02000000L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_ACTIVE_MASK 0x10000000L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_IN_IDLE_MASK 0x20000000L
+//DP0_DP_DB_CNTL
+#define DP0_DP_DB_CNTL__DP_DB_PENDING__SHIFT 0x0
+#define DP0_DP_DB_CNTL__DP_DB_TAKEN__SHIFT 0x4
+#define DP0_DP_DB_CNTL__DP_DB_TAKEN_CLR__SHIFT 0x5
+#define DP0_DP_DB_CNTL__DP_DB_LOCK__SHIFT 0x8
+#define DP0_DP_DB_CNTL__DP_DB_DISABLE__SHIFT 0xc
+#define DP0_DP_DB_CNTL__DP_VUPDATE_DB_PENDING__SHIFT 0xf
+#define DP0_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN__SHIFT 0x10
+#define DP0_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_CLR__SHIFT 0x11
+#define DP0_DP_DB_CNTL__DP_DB_PENDING_MASK 0x00000001L
+#define DP0_DP_DB_CNTL__DP_DB_TAKEN_MASK 0x00000010L
+#define DP0_DP_DB_CNTL__DP_DB_TAKEN_CLR_MASK 0x00000020L
+#define DP0_DP_DB_CNTL__DP_DB_LOCK_MASK 0x00000100L
+#define DP0_DP_DB_CNTL__DP_DB_DISABLE_MASK 0x00001000L
+#define DP0_DP_DB_CNTL__DP_VUPDATE_DB_PENDING_MASK 0x00008000L
+#define DP0_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_MASK 0x00010000L
+#define DP0_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_CLR_MASK 0x00020000L
+//DP0_DP_MSA_VBID_MISC
+#define DP0_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE__SHIFT 0x0
+#define DP0_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_EN__SHIFT 0x4
+#define DP0_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE__SHIFT 0x8
+#define DP0_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE__SHIFT 0x9
+#define DP0_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_EN__SHIFT 0xc
+#define DP0_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_EN__SHIFT 0xd
+#define DP0_DP_MSA_VBID_MISC__DP_VBID6_LINE_REFERENCE__SHIFT 0xf
+#define DP0_DP_MSA_VBID_MISC__DP_VBID6_LINE_NUM__SHIFT 0x10
+#define DP0_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_MASK 0x00000003L
+#define DP0_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_EN_MASK 0x00000010L
+#define DP0_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_MASK 0x00000100L
+#define DP0_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_MASK 0x00000200L
+#define DP0_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_EN_MASK 0x00001000L
+#define DP0_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_EN_MASK 0x00002000L
+#define DP0_DP_MSA_VBID_MISC__DP_VBID6_LINE_REFERENCE_MASK 0x00008000L
+#define DP0_DP_MSA_VBID_MISC__DP_VBID6_LINE_NUM_MASK 0xFFFF0000L
+//DP0_DP_SEC_METADATA_TRANSMISSION
+#define DP0_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_ENABLE__SHIFT 0x0
+#define DP0_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_REFERENCE__SHIFT 0x1
+#define DP0_DP_SEC_METADATA_TRANSMISSION__DP_SEC_MSO_METADATA_PACKET_ENABLE__SHIFT 0x4
+#define DP0_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE__SHIFT 0x10
+#define DP0_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define DP0_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_REFERENCE_MASK 0x00000002L
+#define DP0_DP_SEC_METADATA_TRANSMISSION__DP_SEC_MSO_METADATA_PACKET_ENABLE_MASK 0x000000F0L
+#define DP0_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_MASK 0xFFFF0000L
+//DP0_DP_DSC_BYTES_PER_PIXEL
+#define DP0_DP_DSC_BYTES_PER_PIXEL__DP_DSC_BYTES_PER_PIXEL__SHIFT 0x0
+#define DP0_DP_DSC_BYTES_PER_PIXEL__DP_DSC_BYTES_PER_PIXEL_MASK 0x7FFFFFFFL
+//DP0_DP_ALPM_CNTL
+#define DP0_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_SEND__SHIFT 0x0
+#define DP0_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PENDING__SHIFT 0x1
+#define DP0_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_SEND__SHIFT 0x2
+#define DP0_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_PENDING__SHIFT 0x3
+#define DP0_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_IMMEDIATE__SHIFT 0x4
+#define DP0_DP_ALPM_CNTL__DP_LINK_TRAINING_SWITCH_BETWEEN_VIDEO__SHIFT 0x5
+#define DP0_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_LINE_NUM__SHIFT 0x10
+#define DP0_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_SEND_MASK 0x00000001L
+#define DP0_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PENDING_MASK 0x00000002L
+#define DP0_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_SEND_MASK 0x00000004L
+#define DP0_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_PENDING_MASK 0x00000008L
+#define DP0_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_IMMEDIATE_MASK 0x00000010L
+#define DP0_DP_ALPM_CNTL__DP_LINK_TRAINING_SWITCH_BETWEEN_VIDEO_MASK 0x00000020L
+#define DP0_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_LINE_NUM_MASK 0xFFFF0000L
+
+
+// addressBlock: dce_dc_dio_dig1_dispdec
+//DIG1_DIG_FE_CNTL
+#define DIG1_DIG_FE_CNTL__DIG_SOURCE_SELECT__SHIFT 0x0
+#define DIG1_DIG_FE_CNTL__DIG_STEREOSYNC_SELECT__SHIFT 0x4
+#define DIG1_DIG_FE_CNTL__DIG_STEREOSYNC_GATE_EN__SHIFT 0x8
+#define DIG1_DIG_FE_CNTL__DIG_START__SHIFT 0xa
+#define DIG1_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_SELECT__SHIFT 0xc
+#define DIG1_DIG_FE_CNTL__DIG_INPUT_PIXEL_SELECT__SHIFT 0x10
+#define DIG1_DIG_FE_CNTL__DOLBY_VISION_EN__SHIFT 0x12
+#define DIG1_DIG_FE_CNTL__DOLBY_VISION_METADATA_PACKET_MISSED__SHIFT 0x13
+#define DIG1_DIG_FE_CNTL__DIG_SYMCLK_FE_ON__SHIFT 0x18
+#define DIG1_DIG_FE_CNTL__TMDS_PIXEL_ENCODING__SHIFT 0x1c
+#define DIG1_DIG_FE_CNTL__TMDS_COLOR_FORMAT__SHIFT 0x1e
+#define DIG1_DIG_FE_CNTL__DIG_SOURCE_SELECT_MASK 0x00000007L
+#define DIG1_DIG_FE_CNTL__DIG_STEREOSYNC_SELECT_MASK 0x00000070L
+#define DIG1_DIG_FE_CNTL__DIG_STEREOSYNC_GATE_EN_MASK 0x00000100L
+#define DIG1_DIG_FE_CNTL__DIG_START_MASK 0x00000400L
+#define DIG1_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_SELECT_MASK 0x00007000L
+#define DIG1_DIG_FE_CNTL__DIG_INPUT_PIXEL_SELECT_MASK 0x00030000L
+#define DIG1_DIG_FE_CNTL__DOLBY_VISION_EN_MASK 0x00040000L
+#define DIG1_DIG_FE_CNTL__DOLBY_VISION_METADATA_PACKET_MISSED_MASK 0x00080000L
+#define DIG1_DIG_FE_CNTL__DIG_SYMCLK_FE_ON_MASK 0x01000000L
+#define DIG1_DIG_FE_CNTL__TMDS_PIXEL_ENCODING_MASK 0x10000000L
+#define DIG1_DIG_FE_CNTL__TMDS_COLOR_FORMAT_MASK 0xC0000000L
+//DIG1_DIG_OUTPUT_CRC_CNTL
+#define DIG1_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_EN__SHIFT 0x0
+#define DIG1_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_LINK_SEL__SHIFT 0x4
+#define DIG1_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_DATA_SEL__SHIFT 0x8
+#define DIG1_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_EN_MASK 0x00000001L
+#define DIG1_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_LINK_SEL_MASK 0x00000010L
+#define DIG1_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_DATA_SEL_MASK 0x00000300L
+//DIG1_DIG_OUTPUT_CRC_RESULT
+#define DIG1_DIG_OUTPUT_CRC_RESULT__DIG_OUTPUT_CRC_RESULT__SHIFT 0x0
+#define DIG1_DIG_OUTPUT_CRC_RESULT__DIG_OUTPUT_CRC_RESULT_MASK 0x3FFFFFFFL
+//DIG1_DIG_CLOCK_PATTERN
+#define DIG1_DIG_CLOCK_PATTERN__DIG_CLOCK_PATTERN__SHIFT 0x0
+#define DIG1_DIG_CLOCK_PATTERN__DIG_CLOCK_PATTERN_MASK 0x000003FFL
+//DIG1_DIG_TEST_PATTERN
+#define DIG1_DIG_TEST_PATTERN__DIG_TEST_PATTERN_OUT_EN__SHIFT 0x0
+#define DIG1_DIG_TEST_PATTERN__DIG_HALF_CLOCK_PATTERN_SEL__SHIFT 0x1
+#define DIG1_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_OUT_EN__SHIFT 0x4
+#define DIG1_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_RESET__SHIFT 0x5
+#define DIG1_DIG_TEST_PATTERN__DIG_TEST_PATTERN_EXTERNAL_RESET_EN__SHIFT 0x6
+#define DIG1_DIG_TEST_PATTERN__DIG_STATIC_TEST_PATTERN__SHIFT 0x10
+#define DIG1_DIG_TEST_PATTERN__DIG_TEST_PATTERN_OUT_EN_MASK 0x00000001L
+#define DIG1_DIG_TEST_PATTERN__DIG_HALF_CLOCK_PATTERN_SEL_MASK 0x00000002L
+#define DIG1_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_OUT_EN_MASK 0x00000010L
+#define DIG1_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_RESET_MASK 0x00000020L
+#define DIG1_DIG_TEST_PATTERN__DIG_TEST_PATTERN_EXTERNAL_RESET_EN_MASK 0x00000040L
+#define DIG1_DIG_TEST_PATTERN__DIG_STATIC_TEST_PATTERN_MASK 0x03FF0000L
+//DIG1_DIG_RANDOM_PATTERN_SEED
+#define DIG1_DIG_RANDOM_PATTERN_SEED__DIG_RANDOM_PATTERN_SEED__SHIFT 0x0
+#define DIG1_DIG_RANDOM_PATTERN_SEED__DIG_RAN_PAT_DURING_DE_ONLY__SHIFT 0x18
+#define DIG1_DIG_RANDOM_PATTERN_SEED__DIG_RANDOM_PATTERN_SEED_MASK 0x00FFFFFFL
+#define DIG1_DIG_RANDOM_PATTERN_SEED__DIG_RAN_PAT_DURING_DE_ONLY_MASK 0x01000000L
+//DIG1_DIG_FIFO_STATUS
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_LEVEL_ERROR__SHIFT 0x0
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_USE_OVERWRITE_LEVEL__SHIFT 0x1
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_OVERWRITE_LEVEL__SHIFT 0x2
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_ERROR_ACK__SHIFT 0x8
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_CAL_AVERAGE_LEVEL__SHIFT 0xa
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_MAXIMUM_LEVEL__SHIFT 0x10
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_MINIMUM_LEVEL__SHIFT 0x16
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_READ_CLOCK_SRC__SHIFT 0x1a
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_CALIBRATED__SHIFT 0x1d
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_FORCE_RECAL_AVERAGE__SHIFT 0x1e
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_FORCE_RECOMP_MINMAX__SHIFT 0x1f
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_LEVEL_ERROR_MASK 0x00000001L
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_USE_OVERWRITE_LEVEL_MASK 0x00000002L
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_OVERWRITE_LEVEL_MASK 0x000000FCL
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_ERROR_ACK_MASK 0x00000100L
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_CAL_AVERAGE_LEVEL_MASK 0x0000FC00L
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_MAXIMUM_LEVEL_MASK 0x001F0000L
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_MINIMUM_LEVEL_MASK 0x03C00000L
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_READ_CLOCK_SRC_MASK 0x04000000L
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_CALIBRATED_MASK 0x20000000L
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_FORCE_RECAL_AVERAGE_MASK 0x40000000L
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_FORCE_RECOMP_MINMAX_MASK 0x80000000L
+//DIG1_HDMI_METADATA_PACKET_CONTROL
+#define DIG1_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_ENABLE__SHIFT 0x0
+#define DIG1_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_REFERENCE__SHIFT 0x4
+#define DIG1_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_MISSED__SHIFT 0x8
+#define DIG1_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE__SHIFT 0x10
+#define DIG1_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define DIG1_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_REFERENCE_MASK 0x00000010L
+#define DIG1_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_MISSED_MASK 0x00000100L
+#define DIG1_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_MASK 0xFFFF0000L
+//DIG1_HDMI_GENERIC_PACKET_CONTROL4
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC6_LINE__SHIFT 0x0
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC7_LINE__SHIFT 0x10
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC6_LINE_MASK 0x0000FFFFL
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC7_LINE_MASK 0xFFFF0000L
+//DIG1_HDMI_CONTROL
+#define DIG1_HDMI_CONTROL__HDMI_KEEPOUT_MODE__SHIFT 0x0
+#define DIG1_HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN__SHIFT 0x1
+#define DIG1_HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE__SHIFT 0x2
+#define DIG1_HDMI_CONTROL__HDMI_NO_EXTRA_NULL_PACKET_FILLED__SHIFT 0x3
+#define DIG1_HDMI_CONTROL__HDMI_PACKET_GEN_VERSION__SHIFT 0x4
+#define DIG1_HDMI_CONTROL__HDMI_ERROR_ACK__SHIFT 0x8
+#define DIG1_HDMI_CONTROL__HDMI_ERROR_MASK__SHIFT 0x9
+#define DIG1_HDMI_CONTROL__HDMI_UNSCRAMBLED_CONTROL_LINE_NUM__SHIFT 0x10
+#define DIG1_HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE__SHIFT 0x18
+#define DIG1_HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT 0x1c
+#define DIG1_HDMI_CONTROL__HDMI_KEEPOUT_MODE_MASK 0x00000001L
+#define DIG1_HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN_MASK 0x00000002L
+#define DIG1_HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE_MASK 0x00000004L
+#define DIG1_HDMI_CONTROL__HDMI_NO_EXTRA_NULL_PACKET_FILLED_MASK 0x00000008L
+#define DIG1_HDMI_CONTROL__HDMI_PACKET_GEN_VERSION_MASK 0x00000010L
+#define DIG1_HDMI_CONTROL__HDMI_ERROR_ACK_MASK 0x00000100L
+#define DIG1_HDMI_CONTROL__HDMI_ERROR_MASK_MASK 0x00000200L
+#define DIG1_HDMI_CONTROL__HDMI_UNSCRAMBLED_CONTROL_LINE_NUM_MASK 0x003F0000L
+#define DIG1_HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK 0x01000000L
+#define DIG1_HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH_MASK 0x30000000L
+//DIG1_HDMI_STATUS
+#define DIG1_HDMI_STATUS__HDMI_ACTIVE_AVMUTE__SHIFT 0x0
+#define DIG1_HDMI_STATUS__HDMI_AUDIO_PACKET_ERROR__SHIFT 0x10
+#define DIG1_HDMI_STATUS__HDMI_VBI_PACKET_ERROR__SHIFT 0x14
+#define DIG1_HDMI_STATUS__HDMI_ERROR_INT__SHIFT 0x1b
+#define DIG1_HDMI_STATUS__HDMI_ACTIVE_AVMUTE_MASK 0x00000001L
+#define DIG1_HDMI_STATUS__HDMI_AUDIO_PACKET_ERROR_MASK 0x00010000L
+#define DIG1_HDMI_STATUS__HDMI_VBI_PACKET_ERROR_MASK 0x00100000L
+#define DIG1_HDMI_STATUS__HDMI_ERROR_INT_MASK 0x08000000L
+//DIG1_HDMI_AUDIO_PACKET_CONTROL
+#define DIG1_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN__SHIFT 0x4
+#define DIG1_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_SEND_MAX_PACKETS__SHIFT 0x8
+#define DIG1_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_PACKETS_PER_LINE__SHIFT 0x10
+#define DIG1_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN_MASK 0x00000030L
+#define DIG1_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_SEND_MAX_PACKETS_MASK 0x00000100L
+#define DIG1_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_PACKETS_PER_LINE_MASK 0x001F0000L
+//DIG1_HDMI_ACR_PACKET_CONTROL
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SEND__SHIFT 0x0
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_CONT__SHIFT 0x1
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SELECT__SHIFT 0x4
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE__SHIFT 0x8
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND__SHIFT 0xc
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE__SHIFT 0x10
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY__SHIFT 0x1f
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SEND_MASK 0x00000001L
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_CONT_MASK 0x00000002L
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SELECT_MASK 0x00000030L
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE_MASK 0x00000100L
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK 0x00001000L
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE_MASK 0x00070000L
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY_MASK 0x80000000L
+//DIG1_HDMI_VBI_PACKET_CONTROL
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND__SHIFT 0x0
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND__SHIFT 0x4
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT__SHIFT 0x5
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND__SHIFT 0x8
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT__SHIFT 0x9
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE__SHIFT 0x10
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK 0x00000001L
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK 0x00000010L
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK 0x00000020L
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND_MASK 0x00000100L
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT_MASK 0x00000200L
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE_MASK 0x003F0000L
+//DIG1_HDMI_INFOFRAME_CONTROL0
+#define DIG1_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND__SHIFT 0x4
+#define DIG1_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT__SHIFT 0x5
+#define DIG1_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_SEND__SHIFT 0x8
+#define DIG1_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_CONT__SHIFT 0x9
+#define DIG1_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND_MASK 0x00000010L
+#define DIG1_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT_MASK 0x00000020L
+#define DIG1_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_SEND_MASK 0x00000100L
+#define DIG1_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_CONT_MASK 0x00000200L
+//DIG1_HDMI_INFOFRAME_CONTROL1
+#define DIG1_HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE__SHIFT 0x8
+#define DIG1_HDMI_INFOFRAME_CONTROL1__HDMI_MPEG_INFO_LINE__SHIFT 0x10
+#define DIG1_HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE_MASK 0x00003F00L
+#define DIG1_HDMI_INFOFRAME_CONTROL1__HDMI_MPEG_INFO_LINE_MASK 0x003F0000L
+//DIG1_HDMI_GENERIC_PACKET_CONTROL0
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND__SHIFT 0x0
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT__SHIFT 0x1
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE_REFERENCE__SHIFT 0x2
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND__SHIFT 0x4
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT__SHIFT 0x5
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE_REFERENCE__SHIFT 0x6
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_SEND__SHIFT 0x8
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_CONT__SHIFT 0x9
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LINE_REFERENCE__SHIFT 0xa
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_SEND__SHIFT 0xc
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_CONT__SHIFT 0xd
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LINE_REFERENCE__SHIFT 0xe
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_SEND__SHIFT 0x10
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_CONT__SHIFT 0x11
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LINE_REFERENCE__SHIFT 0x12
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_SEND__SHIFT 0x14
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_CONT__SHIFT 0x15
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LINE_REFERENCE__SHIFT 0x16
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_SEND__SHIFT 0x18
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_CONT__SHIFT 0x19
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LINE_REFERENCE__SHIFT 0x1a
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_SEND__SHIFT 0x1c
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_CONT__SHIFT 0x1d
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LINE_REFERENCE__SHIFT 0x1e
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND_MASK 0x00000001L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT_MASK 0x00000002L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE_REFERENCE_MASK 0x00000004L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND_MASK 0x00000010L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT_MASK 0x00000020L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE_REFERENCE_MASK 0x00000040L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_SEND_MASK 0x00000100L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_CONT_MASK 0x00000200L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LINE_REFERENCE_MASK 0x00000400L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_SEND_MASK 0x00001000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_CONT_MASK 0x00002000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LINE_REFERENCE_MASK 0x00004000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_SEND_MASK 0x00010000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_CONT_MASK 0x00020000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LINE_REFERENCE_MASK 0x00040000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_SEND_MASK 0x00100000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_CONT_MASK 0x00200000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LINE_REFERENCE_MASK 0x00400000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_SEND_MASK 0x01000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_CONT_MASK 0x02000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LINE_REFERENCE_MASK 0x04000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_SEND_MASK 0x10000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_CONT_MASK 0x20000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LINE_REFERENCE_MASK 0x40000000L
+//DIG1_HDMI_GC
+#define DIG1_HDMI_GC__HDMI_GC_AVMUTE__SHIFT 0x0
+#define DIG1_HDMI_GC__HDMI_GC_AVMUTE_CONT__SHIFT 0x2
+#define DIG1_HDMI_GC__HDMI_DEFAULT_PHASE__SHIFT 0x4
+#define DIG1_HDMI_GC__HDMI_PACKING_PHASE__SHIFT 0x8
+#define DIG1_HDMI_GC__HDMI_PACKING_PHASE_OVERRIDE__SHIFT 0xc
+#define DIG1_HDMI_GC__HDMI_GC_AVMUTE_MASK 0x00000001L
+#define DIG1_HDMI_GC__HDMI_GC_AVMUTE_CONT_MASK 0x00000004L
+#define DIG1_HDMI_GC__HDMI_DEFAULT_PHASE_MASK 0x00000010L
+#define DIG1_HDMI_GC__HDMI_PACKING_PHASE_MASK 0x00000F00L
+#define DIG1_HDMI_GC__HDMI_PACKING_PHASE_OVERRIDE_MASK 0x00001000L
+//DIG1_AFMT_AUDIO_PACKET_CONTROL2
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD__SHIFT 0x0
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT__SHIFT 0x1
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT 0x8
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID__SHIFT 0x10
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD__SHIFT 0x18
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD__SHIFT 0x1c
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD_MASK 0x00000001L
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT_MASK 0x00000002L
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE_MASK 0x0000FF00L
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID_MASK 0x00FF0000L
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD_MASK 0x01000000L
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD_MASK 0x10000000L
+//DIG1_AFMT_ISRC1_0
+#define DIG1_AFMT_ISRC1_0__AFMT_ISRC_STATUS__SHIFT 0x0
+#define DIG1_AFMT_ISRC1_0__AFMT_ISRC_CONTINUE__SHIFT 0x6
+#define DIG1_AFMT_ISRC1_0__AFMT_ISRC_VALID__SHIFT 0x7
+#define DIG1_AFMT_ISRC1_0__AFMT_ISRC_STATUS_MASK 0x00000007L
+#define DIG1_AFMT_ISRC1_0__AFMT_ISRC_CONTINUE_MASK 0x00000040L
+#define DIG1_AFMT_ISRC1_0__AFMT_ISRC_VALID_MASK 0x00000080L
+//DIG1_AFMT_ISRC1_1
+#define DIG1_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC0__SHIFT 0x0
+#define DIG1_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC1__SHIFT 0x8
+#define DIG1_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC2__SHIFT 0x10
+#define DIG1_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC3__SHIFT 0x18
+#define DIG1_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC0_MASK 0x000000FFL
+#define DIG1_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC1_MASK 0x0000FF00L
+#define DIG1_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC2_MASK 0x00FF0000L
+#define DIG1_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC3_MASK 0xFF000000L
+//DIG1_AFMT_ISRC1_2
+#define DIG1_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC4__SHIFT 0x0
+#define DIG1_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC5__SHIFT 0x8
+#define DIG1_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC6__SHIFT 0x10
+#define DIG1_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC7__SHIFT 0x18
+#define DIG1_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC4_MASK 0x000000FFL
+#define DIG1_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC5_MASK 0x0000FF00L
+#define DIG1_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC6_MASK 0x00FF0000L
+#define DIG1_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC7_MASK 0xFF000000L
+//DIG1_AFMT_ISRC1_3
+#define DIG1_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC8__SHIFT 0x0
+#define DIG1_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC9__SHIFT 0x8
+#define DIG1_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC10__SHIFT 0x10
+#define DIG1_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC11__SHIFT 0x18
+#define DIG1_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC8_MASK 0x000000FFL
+#define DIG1_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC9_MASK 0x0000FF00L
+#define DIG1_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC10_MASK 0x00FF0000L
+#define DIG1_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC11_MASK 0xFF000000L
+//DIG1_AFMT_ISRC1_4
+#define DIG1_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC12__SHIFT 0x0
+#define DIG1_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC13__SHIFT 0x8
+#define DIG1_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC14__SHIFT 0x10
+#define DIG1_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC15__SHIFT 0x18
+#define DIG1_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC12_MASK 0x000000FFL
+#define DIG1_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC13_MASK 0x0000FF00L
+#define DIG1_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC14_MASK 0x00FF0000L
+#define DIG1_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC15_MASK 0xFF000000L
+//DIG1_AFMT_ISRC2_0
+#define DIG1_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC16__SHIFT 0x0
+#define DIG1_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC17__SHIFT 0x8
+#define DIG1_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC18__SHIFT 0x10
+#define DIG1_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC19__SHIFT 0x18
+#define DIG1_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC16_MASK 0x000000FFL
+#define DIG1_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC17_MASK 0x0000FF00L
+#define DIG1_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC18_MASK 0x00FF0000L
+#define DIG1_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC19_MASK 0xFF000000L
+//DIG1_AFMT_ISRC2_1
+#define DIG1_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC20__SHIFT 0x0
+#define DIG1_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC21__SHIFT 0x8
+#define DIG1_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC22__SHIFT 0x10
+#define DIG1_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC23__SHIFT 0x18
+#define DIG1_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC20_MASK 0x000000FFL
+#define DIG1_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC21_MASK 0x0000FF00L
+#define DIG1_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC22_MASK 0x00FF0000L
+#define DIG1_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC23_MASK 0xFF000000L
+//DIG1_AFMT_ISRC2_2
+#define DIG1_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC24__SHIFT 0x0
+#define DIG1_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC25__SHIFT 0x8
+#define DIG1_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC26__SHIFT 0x10
+#define DIG1_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC27__SHIFT 0x18
+#define DIG1_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC24_MASK 0x000000FFL
+#define DIG1_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC25_MASK 0x0000FF00L
+#define DIG1_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC26_MASK 0x00FF0000L
+#define DIG1_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC27_MASK 0xFF000000L
+//DIG1_AFMT_ISRC2_3
+#define DIG1_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC28__SHIFT 0x0
+#define DIG1_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC29__SHIFT 0x8
+#define DIG1_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC30__SHIFT 0x10
+#define DIG1_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC31__SHIFT 0x18
+#define DIG1_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC28_MASK 0x000000FFL
+#define DIG1_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC29_MASK 0x0000FF00L
+#define DIG1_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC30_MASK 0x00FF0000L
+#define DIG1_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC31_MASK 0xFF000000L
+//DIG1_HDMI_GENERIC_PACKET_CONTROL2
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_LINE__SHIFT 0x0
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_LINE__SHIFT 0x10
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_LINE_MASK 0x0000FFFFL
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_LINE_MASK 0xFFFF0000L
+//DIG1_HDMI_GENERIC_PACKET_CONTROL3
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC4_LINE__SHIFT 0x0
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC5_LINE__SHIFT 0x10
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC4_LINE_MASK 0x0000FFFFL
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC5_LINE_MASK 0xFFFF0000L
+//DIG1_HDMI_DB_CONTROL
+#define DIG1_HDMI_DB_CONTROL__HDMI_DB_PENDING__SHIFT 0x0
+#define DIG1_HDMI_DB_CONTROL__HDMI_DB_TAKEN__SHIFT 0x4
+#define DIG1_HDMI_DB_CONTROL__HDMI_DB_TAKEN_CLR__SHIFT 0x5
+#define DIG1_HDMI_DB_CONTROL__HDMI_DB_LOCK__SHIFT 0x8
+#define DIG1_HDMI_DB_CONTROL__HDMI_DB_DISABLE__SHIFT 0xc
+#define DIG1_HDMI_DB_CONTROL__VUPDATE_DB_PENDING__SHIFT 0xf
+#define DIG1_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN__SHIFT 0x10
+#define DIG1_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_CLR__SHIFT 0x11
+#define DIG1_HDMI_DB_CONTROL__HDMI_DB_PENDING_MASK 0x00000001L
+#define DIG1_HDMI_DB_CONTROL__HDMI_DB_TAKEN_MASK 0x00000010L
+#define DIG1_HDMI_DB_CONTROL__HDMI_DB_TAKEN_CLR_MASK 0x00000020L
+#define DIG1_HDMI_DB_CONTROL__HDMI_DB_LOCK_MASK 0x00000100L
+#define DIG1_HDMI_DB_CONTROL__HDMI_DB_DISABLE_MASK 0x00001000L
+#define DIG1_HDMI_DB_CONTROL__VUPDATE_DB_PENDING_MASK 0x00008000L
+#define DIG1_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_MASK 0x00010000L
+#define DIG1_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_CLR_MASK 0x00020000L
+//DIG1_DME_CONTROL
+#define DIG1_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID__SHIFT 0x0
+#define DIG1_DME_CONTROL__METADATA_ENGINE_EN__SHIFT 0x4
+#define DIG1_DME_CONTROL__METADATA_STREAM_TYPE__SHIFT 0x8
+#define DIG1_DME_CONTROL__METADATA_DB_PENDING__SHIFT 0xc
+#define DIG1_DME_CONTROL__METADATA_DB_TAKEN__SHIFT 0xd
+#define DIG1_DME_CONTROL__METADATA_DB_TAKEN_CLR__SHIFT 0x10
+#define DIG1_DME_CONTROL__METADATA_DB_DISABLE__SHIFT 0x14
+#define DIG1_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID_MASK 0x00000007L
+#define DIG1_DME_CONTROL__METADATA_ENGINE_EN_MASK 0x00000010L
+#define DIG1_DME_CONTROL__METADATA_STREAM_TYPE_MASK 0x00000100L
+#define DIG1_DME_CONTROL__METADATA_DB_PENDING_MASK 0x00001000L
+#define DIG1_DME_CONTROL__METADATA_DB_TAKEN_MASK 0x00002000L
+#define DIG1_DME_CONTROL__METADATA_DB_TAKEN_CLR_MASK 0x00010000L
+#define DIG1_DME_CONTROL__METADATA_DB_DISABLE_MASK 0x00100000L
+//DIG1_AFMT_MPEG_INFO0
+#define DIG1_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_CHECKSUM__SHIFT 0x0
+#define DIG1_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB0__SHIFT 0x8
+#define DIG1_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB1__SHIFT 0x10
+#define DIG1_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB2__SHIFT 0x18
+#define DIG1_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_CHECKSUM_MASK 0x000000FFL
+#define DIG1_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB0_MASK 0x0000FF00L
+#define DIG1_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB1_MASK 0x00FF0000L
+#define DIG1_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB2_MASK 0xFF000000L
+//DIG1_AFMT_MPEG_INFO1
+#define DIG1_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_MB3__SHIFT 0x0
+#define DIG1_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_MF__SHIFT 0x8
+#define DIG1_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_FR__SHIFT 0xc
+#define DIG1_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_MB3_MASK 0x000000FFL
+#define DIG1_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_MF_MASK 0x00000300L
+#define DIG1_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_FR_MASK 0x00001000L
+//DIG1_AFMT_GENERIC_HDR
+#define DIG1_AFMT_GENERIC_HDR__AFMT_GENERIC_HB0__SHIFT 0x0
+#define DIG1_AFMT_GENERIC_HDR__AFMT_GENERIC_HB1__SHIFT 0x8
+#define DIG1_AFMT_GENERIC_HDR__AFMT_GENERIC_HB2__SHIFT 0x10
+#define DIG1_AFMT_GENERIC_HDR__AFMT_GENERIC_HB3__SHIFT 0x18
+#define DIG1_AFMT_GENERIC_HDR__AFMT_GENERIC_HB0_MASK 0x000000FFL
+#define DIG1_AFMT_GENERIC_HDR__AFMT_GENERIC_HB1_MASK 0x0000FF00L
+#define DIG1_AFMT_GENERIC_HDR__AFMT_GENERIC_HB2_MASK 0x00FF0000L
+#define DIG1_AFMT_GENERIC_HDR__AFMT_GENERIC_HB3_MASK 0xFF000000L
+//DIG1_AFMT_GENERIC_0
+#define DIG1_AFMT_GENERIC_0__AFMT_GENERIC_BYTE0__SHIFT 0x0
+#define DIG1_AFMT_GENERIC_0__AFMT_GENERIC_BYTE1__SHIFT 0x8
+#define DIG1_AFMT_GENERIC_0__AFMT_GENERIC_BYTE2__SHIFT 0x10
+#define DIG1_AFMT_GENERIC_0__AFMT_GENERIC_BYTE3__SHIFT 0x18
+#define DIG1_AFMT_GENERIC_0__AFMT_GENERIC_BYTE0_MASK 0x000000FFL
+#define DIG1_AFMT_GENERIC_0__AFMT_GENERIC_BYTE1_MASK 0x0000FF00L
+#define DIG1_AFMT_GENERIC_0__AFMT_GENERIC_BYTE2_MASK 0x00FF0000L
+#define DIG1_AFMT_GENERIC_0__AFMT_GENERIC_BYTE3_MASK 0xFF000000L
+//DIG1_AFMT_GENERIC_1
+#define DIG1_AFMT_GENERIC_1__AFMT_GENERIC_BYTE4__SHIFT 0x0
+#define DIG1_AFMT_GENERIC_1__AFMT_GENERIC_BYTE5__SHIFT 0x8
+#define DIG1_AFMT_GENERIC_1__AFMT_GENERIC_BYTE6__SHIFT 0x10
+#define DIG1_AFMT_GENERIC_1__AFMT_GENERIC_BYTE7__SHIFT 0x18
+#define DIG1_AFMT_GENERIC_1__AFMT_GENERIC_BYTE4_MASK 0x000000FFL
+#define DIG1_AFMT_GENERIC_1__AFMT_GENERIC_BYTE5_MASK 0x0000FF00L
+#define DIG1_AFMT_GENERIC_1__AFMT_GENERIC_BYTE6_MASK 0x00FF0000L
+#define DIG1_AFMT_GENERIC_1__AFMT_GENERIC_BYTE7_MASK 0xFF000000L
+//DIG1_AFMT_GENERIC_2
+#define DIG1_AFMT_GENERIC_2__AFMT_GENERIC_BYTE8__SHIFT 0x0
+#define DIG1_AFMT_GENERIC_2__AFMT_GENERIC_BYTE9__SHIFT 0x8
+#define DIG1_AFMT_GENERIC_2__AFMT_GENERIC_BYTE10__SHIFT 0x10
+#define DIG1_AFMT_GENERIC_2__AFMT_GENERIC_BYTE11__SHIFT 0x18
+#define DIG1_AFMT_GENERIC_2__AFMT_GENERIC_BYTE8_MASK 0x000000FFL
+#define DIG1_AFMT_GENERIC_2__AFMT_GENERIC_BYTE9_MASK 0x0000FF00L
+#define DIG1_AFMT_GENERIC_2__AFMT_GENERIC_BYTE10_MASK 0x00FF0000L
+#define DIG1_AFMT_GENERIC_2__AFMT_GENERIC_BYTE11_MASK 0xFF000000L
+//DIG1_AFMT_GENERIC_3
+#define DIG1_AFMT_GENERIC_3__AFMT_GENERIC_BYTE12__SHIFT 0x0
+#define DIG1_AFMT_GENERIC_3__AFMT_GENERIC_BYTE13__SHIFT 0x8
+#define DIG1_AFMT_GENERIC_3__AFMT_GENERIC_BYTE14__SHIFT 0x10
+#define DIG1_AFMT_GENERIC_3__AFMT_GENERIC_BYTE15__SHIFT 0x18
+#define DIG1_AFMT_GENERIC_3__AFMT_GENERIC_BYTE12_MASK 0x000000FFL
+#define DIG1_AFMT_GENERIC_3__AFMT_GENERIC_BYTE13_MASK 0x0000FF00L
+#define DIG1_AFMT_GENERIC_3__AFMT_GENERIC_BYTE14_MASK 0x00FF0000L
+#define DIG1_AFMT_GENERIC_3__AFMT_GENERIC_BYTE15_MASK 0xFF000000L
+//DIG1_AFMT_GENERIC_4
+#define DIG1_AFMT_GENERIC_4__AFMT_GENERIC_BYTE16__SHIFT 0x0
+#define DIG1_AFMT_GENERIC_4__AFMT_GENERIC_BYTE17__SHIFT 0x8
+#define DIG1_AFMT_GENERIC_4__AFMT_GENERIC_BYTE18__SHIFT 0x10
+#define DIG1_AFMT_GENERIC_4__AFMT_GENERIC_BYTE19__SHIFT 0x18
+#define DIG1_AFMT_GENERIC_4__AFMT_GENERIC_BYTE16_MASK 0x000000FFL
+#define DIG1_AFMT_GENERIC_4__AFMT_GENERIC_BYTE17_MASK 0x0000FF00L
+#define DIG1_AFMT_GENERIC_4__AFMT_GENERIC_BYTE18_MASK 0x00FF0000L
+#define DIG1_AFMT_GENERIC_4__AFMT_GENERIC_BYTE19_MASK 0xFF000000L
+//DIG1_AFMT_GENERIC_5
+#define DIG1_AFMT_GENERIC_5__AFMT_GENERIC_BYTE20__SHIFT 0x0
+#define DIG1_AFMT_GENERIC_5__AFMT_GENERIC_BYTE21__SHIFT 0x8
+#define DIG1_AFMT_GENERIC_5__AFMT_GENERIC_BYTE22__SHIFT 0x10
+#define DIG1_AFMT_GENERIC_5__AFMT_GENERIC_BYTE23__SHIFT 0x18
+#define DIG1_AFMT_GENERIC_5__AFMT_GENERIC_BYTE20_MASK 0x000000FFL
+#define DIG1_AFMT_GENERIC_5__AFMT_GENERIC_BYTE21_MASK 0x0000FF00L
+#define DIG1_AFMT_GENERIC_5__AFMT_GENERIC_BYTE22_MASK 0x00FF0000L
+#define DIG1_AFMT_GENERIC_5__AFMT_GENERIC_BYTE23_MASK 0xFF000000L
+//DIG1_AFMT_GENERIC_6
+#define DIG1_AFMT_GENERIC_6__AFMT_GENERIC_BYTE24__SHIFT 0x0
+#define DIG1_AFMT_GENERIC_6__AFMT_GENERIC_BYTE25__SHIFT 0x8
+#define DIG1_AFMT_GENERIC_6__AFMT_GENERIC_BYTE26__SHIFT 0x10
+#define DIG1_AFMT_GENERIC_6__AFMT_GENERIC_BYTE27__SHIFT 0x18
+#define DIG1_AFMT_GENERIC_6__AFMT_GENERIC_BYTE24_MASK 0x000000FFL
+#define DIG1_AFMT_GENERIC_6__AFMT_GENERIC_BYTE25_MASK 0x0000FF00L
+#define DIG1_AFMT_GENERIC_6__AFMT_GENERIC_BYTE26_MASK 0x00FF0000L
+#define DIG1_AFMT_GENERIC_6__AFMT_GENERIC_BYTE27_MASK 0xFF000000L
+//DIG1_AFMT_GENERIC_7
+#define DIG1_AFMT_GENERIC_7__AFMT_GENERIC_BYTE28__SHIFT 0x0
+#define DIG1_AFMT_GENERIC_7__AFMT_GENERIC_BYTE29__SHIFT 0x8
+#define DIG1_AFMT_GENERIC_7__AFMT_GENERIC_BYTE30__SHIFT 0x10
+#define DIG1_AFMT_GENERIC_7__AFMT_GENERIC_BYTE31__SHIFT 0x18
+#define DIG1_AFMT_GENERIC_7__AFMT_GENERIC_BYTE28_MASK 0x000000FFL
+#define DIG1_AFMT_GENERIC_7__AFMT_GENERIC_BYTE29_MASK 0x0000FF00L
+#define DIG1_AFMT_GENERIC_7__AFMT_GENERIC_BYTE30_MASK 0x00FF0000L
+#define DIG1_AFMT_GENERIC_7__AFMT_GENERIC_BYTE31_MASK 0xFF000000L
+//DIG1_HDMI_GENERIC_PACKET_CONTROL1
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC0_LINE__SHIFT 0x0
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC1_LINE__SHIFT 0x10
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC0_LINE_MASK 0x0000FFFFL
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC1_LINE_MASK 0xFFFF0000L
+//DIG1_HDMI_ACR_32_0
+#define DIG1_HDMI_ACR_32_0__HDMI_ACR_CTS_32__SHIFT 0xc
+#define DIG1_HDMI_ACR_32_0__HDMI_ACR_CTS_32_MASK 0xFFFFF000L
+//DIG1_HDMI_ACR_32_1
+#define DIG1_HDMI_ACR_32_1__HDMI_ACR_N_32__SHIFT 0x0
+#define DIG1_HDMI_ACR_32_1__HDMI_ACR_N_32_MASK 0x000FFFFFL
+//DIG1_HDMI_ACR_44_0
+#define DIG1_HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT 0xc
+#define DIG1_HDMI_ACR_44_0__HDMI_ACR_CTS_44_MASK 0xFFFFF000L
+//DIG1_HDMI_ACR_44_1
+#define DIG1_HDMI_ACR_44_1__HDMI_ACR_N_44__SHIFT 0x0
+#define DIG1_HDMI_ACR_44_1__HDMI_ACR_N_44_MASK 0x000FFFFFL
+//DIG1_HDMI_ACR_48_0
+#define DIG1_HDMI_ACR_48_0__HDMI_ACR_CTS_48__SHIFT 0xc
+#define DIG1_HDMI_ACR_48_0__HDMI_ACR_CTS_48_MASK 0xFFFFF000L
+//DIG1_HDMI_ACR_48_1
+#define DIG1_HDMI_ACR_48_1__HDMI_ACR_N_48__SHIFT 0x0
+#define DIG1_HDMI_ACR_48_1__HDMI_ACR_N_48_MASK 0x000FFFFFL
+//DIG1_HDMI_ACR_STATUS_0
+#define DIG1_HDMI_ACR_STATUS_0__HDMI_ACR_CTS__SHIFT 0xc
+#define DIG1_HDMI_ACR_STATUS_0__HDMI_ACR_CTS_MASK 0xFFFFF000L
+//DIG1_HDMI_ACR_STATUS_1
+#define DIG1_HDMI_ACR_STATUS_1__HDMI_ACR_N__SHIFT 0x0
+#define DIG1_HDMI_ACR_STATUS_1__HDMI_ACR_N_MASK 0x000FFFFFL
+//DIG1_AFMT_AUDIO_INFO0
+#define DIG1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM__SHIFT 0x0
+#define DIG1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC__SHIFT 0x8
+#define DIG1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT__SHIFT 0xb
+#define DIG1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET__SHIFT 0x10
+#define DIG1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT__SHIFT 0x18
+#define DIG1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_MASK 0x000000FFL
+#define DIG1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC_MASK 0x00000700L
+#define DIG1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT_MASK 0x00007800L
+#define DIG1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET_MASK 0x00FF0000L
+#define DIG1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT_MASK 0x1F000000L
+//DIG1_AFMT_AUDIO_INFO1
+#define DIG1_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA__SHIFT 0x0
+#define DIG1_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV__SHIFT 0xb
+#define DIG1_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH__SHIFT 0xf
+#define DIG1_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL__SHIFT 0x10
+#define DIG1_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA_MASK 0x000000FFL
+#define DIG1_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV_MASK 0x00007800L
+#define DIG1_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH_MASK 0x00008000L
+#define DIG1_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL_MASK 0x00030000L
+//DIG1_AFMT_60958_0
+#define DIG1_AFMT_60958_0__AFMT_60958_CS_A__SHIFT 0x0
+#define DIG1_AFMT_60958_0__AFMT_60958_CS_B__SHIFT 0x1
+#define DIG1_AFMT_60958_0__AFMT_60958_CS_C__SHIFT 0x2
+#define DIG1_AFMT_60958_0__AFMT_60958_CS_D__SHIFT 0x3
+#define DIG1_AFMT_60958_0__AFMT_60958_CS_MODE__SHIFT 0x6
+#define DIG1_AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE__SHIFT 0x8
+#define DIG1_AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER__SHIFT 0x10
+#define DIG1_AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x14
+#define DIG1_AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x18
+#define DIG1_AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY__SHIFT 0x1c
+#define DIG1_AFMT_60958_0__AFMT_60958_CS_A_MASK 0x00000001L
+#define DIG1_AFMT_60958_0__AFMT_60958_CS_B_MASK 0x00000002L
+#define DIG1_AFMT_60958_0__AFMT_60958_CS_C_MASK 0x00000004L
+#define DIG1_AFMT_60958_0__AFMT_60958_CS_D_MASK 0x00000038L
+#define DIG1_AFMT_60958_0__AFMT_60958_CS_MODE_MASK 0x000000C0L
+#define DIG1_AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE_MASK 0x0000FF00L
+#define DIG1_AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER_MASK 0x000F0000L
+#define DIG1_AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L_MASK 0x00F00000L
+#define DIG1_AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY_MASK 0x0F000000L
+#define DIG1_AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY_MASK 0x30000000L
+//DIG1_AFMT_60958_1
+#define DIG1_AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH__SHIFT 0x0
+#define DIG1_AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x4
+#define DIG1_AFMT_60958_1__AFMT_60958_VALID_L__SHIFT 0x10
+#define DIG1_AFMT_60958_1__AFMT_60958_VALID_R__SHIFT 0x12
+#define DIG1_AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x14
+#define DIG1_AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH_MASK 0x0000000FL
+#define DIG1_AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x000000F0L
+#define DIG1_AFMT_60958_1__AFMT_60958_VALID_L_MASK 0x00010000L
+#define DIG1_AFMT_60958_1__AFMT_60958_VALID_R_MASK 0x00040000L
+#define DIG1_AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R_MASK 0x00F00000L
+//DIG1_AFMT_AUDIO_CRC_CONTROL
+#define DIG1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN__SHIFT 0x0
+#define DIG1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT__SHIFT 0x4
+#define DIG1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE__SHIFT 0x8
+#define DIG1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL__SHIFT 0xc
+#define DIG1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT__SHIFT 0x10
+#define DIG1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN_MASK 0x00000001L
+#define DIG1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT_MASK 0x00000010L
+#define DIG1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE_MASK 0x00000100L
+#define DIG1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL_MASK 0x0000F000L
+#define DIG1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT_MASK 0xFFFF0000L
+//DIG1_AFMT_RAMP_CONTROL0
+#define DIG1_AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT__SHIFT 0x0
+#define DIG1_AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN__SHIFT 0x1f
+#define DIG1_AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT_MASK 0x00FFFFFFL
+#define DIG1_AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN_MASK 0x80000000L
+//DIG1_AFMT_RAMP_CONTROL1
+#define DIG1_AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT__SHIFT 0x0
+#define DIG1_AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE__SHIFT 0x18
+#define DIG1_AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT_MASK 0x00FFFFFFL
+#define DIG1_AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE_MASK 0xFF000000L
+//DIG1_AFMT_RAMP_CONTROL2
+#define DIG1_AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT__SHIFT 0x0
+#define DIG1_AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT_MASK 0x00FFFFFFL
+//DIG1_AFMT_RAMP_CONTROL3
+#define DIG1_AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT__SHIFT 0x0
+#define DIG1_AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT_MASK 0x00FFFFFFL
+//DIG1_AFMT_60958_2
+#define DIG1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define DIG1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define DIG1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x8
+#define DIG1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5__SHIFT 0xc
+#define DIG1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x10
+#define DIG1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x14
+#define DIG1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define DIG1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+#define DIG1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4_MASK 0x00000F00L
+#define DIG1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5_MASK 0x0000F000L
+#define DIG1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6_MASK 0x000F0000L
+#define DIG1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7_MASK 0x00F00000L
+//DIG1_AFMT_AUDIO_CRC_RESULT
+#define DIG1_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE__SHIFT 0x0
+#define DIG1_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC__SHIFT 0x8
+#define DIG1_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE_MASK 0x00000001L
+#define DIG1_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_MASK 0xFFFFFF00L
+//DIG1_AFMT_STATUS
+#define DIG1_AFMT_STATUS__AFMT_AUDIO_ENABLE__SHIFT 0x4
+#define DIG1_AFMT_STATUS__AFMT_AZ_HBR_ENABLE__SHIFT 0x8
+#define DIG1_AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW__SHIFT 0x18
+#define DIG1_AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG__SHIFT 0x1e
+#define DIG1_AFMT_STATUS__AFMT_AUDIO_ENABLE_MASK 0x00000010L
+#define DIG1_AFMT_STATUS__AFMT_AZ_HBR_ENABLE_MASK 0x00000100L
+#define DIG1_AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW_MASK 0x01000000L
+#define DIG1_AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG_MASK 0x40000000L
+//DIG1_AFMT_AUDIO_PACKET_CONTROL
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND__SHIFT 0x0
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS__SHIFT 0xb
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN__SHIFT 0xc
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE__SHIFT 0xe
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK__SHIFT 0x17
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP__SHIFT 0x18
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE__SHIFT 0x1a
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK__SHIFT 0x1e
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB__SHIFT 0x1f
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK 0x00000001L
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS_MASK 0x00000800L
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN_MASK 0x00001000L
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE_MASK 0x00004000L
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK_MASK 0x00800000L
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP_MASK 0x01000000L
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE_MASK 0x04000000L
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK_MASK 0x40000000L
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB_MASK 0x80000000L
+//DIG1_AFMT_VBI_PACKET_CONTROL
+#define DIG1_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_LOCK_STATUS__SHIFT 0x8
+#define DIG1_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_CONFLICT__SHIFT 0x10
+#define DIG1_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_CONFLICT_CLR__SHIFT 0x11
+#define DIG1_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_INDEX__SHIFT 0x1c
+#define DIG1_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_LOCK_STATUS_MASK 0x00000100L
+#define DIG1_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_CONFLICT_MASK 0x00010000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_CONFLICT_CLR_MASK 0x00020000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_INDEX_MASK 0xF0000000L
+//DIG1_AFMT_INFOFRAME_CONTROL0
+#define DIG1_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE__SHIFT 0x6
+#define DIG1_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE__SHIFT 0x7
+#define DIG1_AFMT_INFOFRAME_CONTROL0__AFMT_MPEG_INFO_UPDATE__SHIFT 0xa
+#define DIG1_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE_MASK 0x00000040L
+#define DIG1_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE_MASK 0x00000080L
+#define DIG1_AFMT_INFOFRAME_CONTROL0__AFMT_MPEG_INFO_UPDATE_MASK 0x00000400L
+//DIG1_AFMT_AUDIO_SRC_CONTROL
+#define DIG1_AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT__SHIFT 0x0
+#define DIG1_AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT_MASK 0x00000007L
+//DIG1_DIG_BE_CNTL
+#define DIG1_DIG_BE_CNTL__DIG_DUAL_LINK_ENABLE__SHIFT 0x0
+#define DIG1_DIG_BE_CNTL__DIG_SWAP__SHIFT 0x1
+#define DIG1_DIG_BE_CNTL__DIG_RB_SWITCH_EN__SHIFT 0x2
+#define DIG1_DIG_BE_CNTL__DIG_FE_SOURCE_SELECT__SHIFT 0x8
+#define DIG1_DIG_BE_CNTL__DIG_MODE__SHIFT 0x10
+#define DIG1_DIG_BE_CNTL__DIG_HPD_SELECT__SHIFT 0x1c
+#define DIG1_DIG_BE_CNTL__DIG_DUAL_LINK_ENABLE_MASK 0x00000001L
+#define DIG1_DIG_BE_CNTL__DIG_SWAP_MASK 0x00000002L
+#define DIG1_DIG_BE_CNTL__DIG_RB_SWITCH_EN_MASK 0x00000004L
+#define DIG1_DIG_BE_CNTL__DIG_FE_SOURCE_SELECT_MASK 0x00007F00L
+#define DIG1_DIG_BE_CNTL__DIG_MODE_MASK 0x00070000L
+#define DIG1_DIG_BE_CNTL__DIG_HPD_SELECT_MASK 0x70000000L
+//DIG1_DIG_BE_EN_CNTL
+#define DIG1_DIG_BE_EN_CNTL__DIG_ENABLE__SHIFT 0x0
+#define DIG1_DIG_BE_EN_CNTL__DIG_SYMCLK_BE_ON__SHIFT 0x8
+#define DIG1_DIG_BE_EN_CNTL__DIG_ENABLE_MASK 0x00000001L
+#define DIG1_DIG_BE_EN_CNTL__DIG_SYMCLK_BE_ON_MASK 0x00000100L
+//DIG1_TMDS_CNTL
+#define DIG1_TMDS_CNTL__TMDS_SYNC_PHASE__SHIFT 0x0
+#define DIG1_TMDS_CNTL__TMDS_SYNC_PHASE_MASK 0x00000001L
+//DIG1_TMDS_CONTROL_CHAR
+#define DIG1_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR0_OUT_EN__SHIFT 0x0
+#define DIG1_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR1_OUT_EN__SHIFT 0x1
+#define DIG1_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR2_OUT_EN__SHIFT 0x2
+#define DIG1_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR3_OUT_EN__SHIFT 0x3
+#define DIG1_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR0_OUT_EN_MASK 0x00000001L
+#define DIG1_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR1_OUT_EN_MASK 0x00000002L
+#define DIG1_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR2_OUT_EN_MASK 0x00000004L
+#define DIG1_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR3_OUT_EN_MASK 0x00000008L
+//DIG1_TMDS_CONTROL0_FEEDBACK
+#define DIG1_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_SELECT__SHIFT 0x0
+#define DIG1_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_DELAY__SHIFT 0x8
+#define DIG1_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_SELECT_MASK 0x00000003L
+#define DIG1_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_DELAY_MASK 0x00000300L
+//DIG1_TMDS_STEREOSYNC_CTL_SEL
+#define DIG1_TMDS_STEREOSYNC_CTL_SEL__TMDS_STEREOSYNC_CTL_SEL__SHIFT 0x0
+#define DIG1_TMDS_STEREOSYNC_CTL_SEL__TMDS_STEREOSYNC_CTL_SEL_MASK 0x00000003L
+//DIG1_TMDS_SYNC_CHAR_PATTERN_0_1
+#define DIG1_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN0__SHIFT 0x0
+#define DIG1_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN1__SHIFT 0x10
+#define DIG1_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN0_MASK 0x000003FFL
+#define DIG1_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN1_MASK 0x03FF0000L
+//DIG1_TMDS_SYNC_CHAR_PATTERN_2_3
+#define DIG1_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN2__SHIFT 0x0
+#define DIG1_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN3__SHIFT 0x10
+#define DIG1_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN2_MASK 0x000003FFL
+#define DIG1_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN3_MASK 0x03FF0000L
+//DIG1_TMDS_CTL_BITS
+#define DIG1_TMDS_CTL_BITS__TMDS_CTL0__SHIFT 0x0
+#define DIG1_TMDS_CTL_BITS__TMDS_CTL1__SHIFT 0x8
+#define DIG1_TMDS_CTL_BITS__TMDS_CTL2__SHIFT 0x10
+#define DIG1_TMDS_CTL_BITS__TMDS_CTL3__SHIFT 0x18
+#define DIG1_TMDS_CTL_BITS__TMDS_CTL0_MASK 0x00000001L
+#define DIG1_TMDS_CTL_BITS__TMDS_CTL1_MASK 0x00000100L
+#define DIG1_TMDS_CTL_BITS__TMDS_CTL2_MASK 0x00010000L
+#define DIG1_TMDS_CTL_BITS__TMDS_CTL3_MASK 0x01000000L
+//DIG1_TMDS_DCBALANCER_CONTROL
+#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_EN__SHIFT 0x0
+#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_SYNC_DCBAL_EN__SHIFT 0x4
+#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_EN__SHIFT 0x8
+#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_IN__SHIFT 0x10
+#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_FORCE__SHIFT 0x18
+#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_EN_MASK 0x00000001L
+#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_SYNC_DCBAL_EN_MASK 0x00000070L
+#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_EN_MASK 0x00000100L
+#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_IN_MASK 0x000F0000L
+#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_FORCE_MASK 0x01000000L
+//DIG1_TMDS_SYNC_DCBALANCE_CHAR
+#define DIG1_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR01__SHIFT 0x0
+#define DIG1_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR11__SHIFT 0x10
+#define DIG1_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR01_MASK 0x000003FFL
+#define DIG1_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR11_MASK 0x03FF0000L
+//DIG1_TMDS_CTL0_1_GEN_CNTL
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_SEL__SHIFT 0x0
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_DELAY__SHIFT 0x4
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_INVERT__SHIFT 0x7
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_MODULATION__SHIFT 0x8
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_USE_FEEDBACK_PATH__SHIFT 0xa
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_FB_SYNC_CONT__SHIFT 0xb
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_PATTERN_OUT_EN__SHIFT 0xc
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_SEL__SHIFT 0x10
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_DELAY__SHIFT 0x14
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_INVERT__SHIFT 0x17
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_MODULATION__SHIFT 0x18
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_USE_FEEDBACK_PATH__SHIFT 0x1a
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_FB_SYNC_CONT__SHIFT 0x1b
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_PATTERN_OUT_EN__SHIFT 0x1c
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_2BIT_COUNTER_EN__SHIFT 0x1f
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_SEL_MASK 0x0000000FL
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_DELAY_MASK 0x00000070L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_INVERT_MASK 0x00000080L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_MODULATION_MASK 0x00000300L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_USE_FEEDBACK_PATH_MASK 0x00000400L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_FB_SYNC_CONT_MASK 0x00000800L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_PATTERN_OUT_EN_MASK 0x00001000L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_SEL_MASK 0x000F0000L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_DELAY_MASK 0x00700000L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_INVERT_MASK 0x00800000L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_MODULATION_MASK 0x03000000L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_USE_FEEDBACK_PATH_MASK 0x04000000L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_FB_SYNC_CONT_MASK 0x08000000L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_PATTERN_OUT_EN_MASK 0x10000000L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_2BIT_COUNTER_EN_MASK 0x80000000L
+//DIG1_TMDS_CTL2_3_GEN_CNTL
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_SEL__SHIFT 0x0
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_DELAY__SHIFT 0x4
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_INVERT__SHIFT 0x7
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_MODULATION__SHIFT 0x8
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_USE_FEEDBACK_PATH__SHIFT 0xa
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_FB_SYNC_CONT__SHIFT 0xb
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_PATTERN_OUT_EN__SHIFT 0xc
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_SEL__SHIFT 0x10
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_DELAY__SHIFT 0x14
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_INVERT__SHIFT 0x17
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_MODULATION__SHIFT 0x18
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_USE_FEEDBACK_PATH__SHIFT 0x1a
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_FB_SYNC_CONT__SHIFT 0x1b
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_PATTERN_OUT_EN__SHIFT 0x1c
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_SEL_MASK 0x0000000FL
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_DELAY_MASK 0x00000070L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_INVERT_MASK 0x00000080L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_MODULATION_MASK 0x00000300L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_USE_FEEDBACK_PATH_MASK 0x00000400L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_FB_SYNC_CONT_MASK 0x00000800L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_PATTERN_OUT_EN_MASK 0x00001000L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_SEL_MASK 0x000F0000L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_DELAY_MASK 0x00700000L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_INVERT_MASK 0x00800000L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_MODULATION_MASK 0x03000000L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_USE_FEEDBACK_PATH_MASK 0x04000000L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_FB_SYNC_CONT_MASK 0x08000000L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_PATTERN_OUT_EN_MASK 0x10000000L
+//DIG1_DIG_VERSION
+#define DIG1_DIG_VERSION__DIG_TYPE__SHIFT 0x0
+#define DIG1_DIG_VERSION__DIG_TYPE_MASK 0x00000001L
+//DIG1_DIG_LANE_ENABLE
+#define DIG1_DIG_LANE_ENABLE__DIG_LANE0EN__SHIFT 0x0
+#define DIG1_DIG_LANE_ENABLE__DIG_LANE1EN__SHIFT 0x1
+#define DIG1_DIG_LANE_ENABLE__DIG_LANE2EN__SHIFT 0x2
+#define DIG1_DIG_LANE_ENABLE__DIG_LANE3EN__SHIFT 0x3
+#define DIG1_DIG_LANE_ENABLE__DIG_CLK_EN__SHIFT 0x8
+#define DIG1_DIG_LANE_ENABLE__DIG_LANE0EN_MASK 0x00000001L
+#define DIG1_DIG_LANE_ENABLE__DIG_LANE1EN_MASK 0x00000002L
+#define DIG1_DIG_LANE_ENABLE__DIG_LANE2EN_MASK 0x00000004L
+#define DIG1_DIG_LANE_ENABLE__DIG_LANE3EN_MASK 0x00000008L
+#define DIG1_DIG_LANE_ENABLE__DIG_CLK_EN_MASK 0x00000100L
+//DIG1_AFMT_CNTL
+#define DIG1_AFMT_CNTL__AFMT_AUDIO_CLOCK_EN__SHIFT 0x0
+#define DIG1_AFMT_CNTL__AFMT_AUDIO_CLOCK_ON__SHIFT 0x8
+#define DIG1_AFMT_CNTL__AFMT_AUDIO_CLOCK_EN_MASK 0x00000001L
+#define DIG1_AFMT_CNTL__AFMT_AUDIO_CLOCK_ON_MASK 0x00000100L
+//DIG1_AFMT_VBI_PACKET_CONTROL1
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_FRAME_UPDATE__SHIFT 0x0
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_FRAME_UPDATE_PENDING__SHIFT 0x1
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_IMMEDIATE_UPDATE__SHIFT 0x2
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_IMMEDIATE_UPDATE_PENDING__SHIFT 0x3
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_FRAME_UPDATE__SHIFT 0x4
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_FRAME_UPDATE_PENDING__SHIFT 0x5
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_IMMEDIATE_UPDATE__SHIFT 0x6
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_IMMEDIATE_UPDATE_PENDING__SHIFT 0x7
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_FRAME_UPDATE__SHIFT 0x8
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_FRAME_UPDATE_PENDING__SHIFT 0x9
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_IMMEDIATE_UPDATE__SHIFT 0xa
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_IMMEDIATE_UPDATE_PENDING__SHIFT 0xb
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_FRAME_UPDATE__SHIFT 0xc
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_FRAME_UPDATE_PENDING__SHIFT 0xd
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_IMMEDIATE_UPDATE__SHIFT 0xe
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_IMMEDIATE_UPDATE_PENDING__SHIFT 0xf
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_FRAME_UPDATE__SHIFT 0x10
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_FRAME_UPDATE_PENDING__SHIFT 0x11
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_IMMEDIATE_UPDATE__SHIFT 0x12
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_IMMEDIATE_UPDATE_PENDING__SHIFT 0x13
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_FRAME_UPDATE__SHIFT 0x14
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_FRAME_UPDATE_PENDING__SHIFT 0x15
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_IMMEDIATE_UPDATE__SHIFT 0x16
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_IMMEDIATE_UPDATE_PENDING__SHIFT 0x17
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_FRAME_UPDATE__SHIFT 0x18
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_FRAME_UPDATE_PENDING__SHIFT 0x19
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_IMMEDIATE_UPDATE__SHIFT 0x1a
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1b
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_FRAME_UPDATE__SHIFT 0x1c
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_FRAME_UPDATE_PENDING__SHIFT 0x1d
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_IMMEDIATE_UPDATE__SHIFT 0x1e
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1f
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_FRAME_UPDATE_MASK 0x00000001L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_FRAME_UPDATE_PENDING_MASK 0x00000002L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_IMMEDIATE_UPDATE_MASK 0x00000004L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_IMMEDIATE_UPDATE_PENDING_MASK 0x00000008L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_FRAME_UPDATE_MASK 0x00000010L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_FRAME_UPDATE_PENDING_MASK 0x00000020L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_IMMEDIATE_UPDATE_MASK 0x00000040L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_IMMEDIATE_UPDATE_PENDING_MASK 0x00000080L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_FRAME_UPDATE_MASK 0x00000100L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_FRAME_UPDATE_PENDING_MASK 0x00000200L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_IMMEDIATE_UPDATE_MASK 0x00000400L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_IMMEDIATE_UPDATE_PENDING_MASK 0x00000800L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_FRAME_UPDATE_MASK 0x00001000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_FRAME_UPDATE_PENDING_MASK 0x00002000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_IMMEDIATE_UPDATE_MASK 0x00004000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_IMMEDIATE_UPDATE_PENDING_MASK 0x00008000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_FRAME_UPDATE_MASK 0x00010000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_FRAME_UPDATE_PENDING_MASK 0x00020000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_IMMEDIATE_UPDATE_MASK 0x00040000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_IMMEDIATE_UPDATE_PENDING_MASK 0x00080000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_FRAME_UPDATE_MASK 0x00100000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_FRAME_UPDATE_PENDING_MASK 0x00200000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_IMMEDIATE_UPDATE_MASK 0x00400000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_IMMEDIATE_UPDATE_PENDING_MASK 0x00800000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_FRAME_UPDATE_MASK 0x01000000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_FRAME_UPDATE_PENDING_MASK 0x02000000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_IMMEDIATE_UPDATE_MASK 0x04000000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_IMMEDIATE_UPDATE_PENDING_MASK 0x08000000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_FRAME_UPDATE_MASK 0x10000000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_FRAME_UPDATE_PENDING_MASK 0x20000000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_IMMEDIATE_UPDATE_MASK 0x40000000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_IMMEDIATE_UPDATE_PENDING_MASK 0x80000000L
+//DIG1_HDMI_GENERIC_PACKET_CONTROL5
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND__SHIFT 0x0
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_PENDING__SHIFT 0x1
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND__SHIFT 0x2
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_PENDING__SHIFT 0x3
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND__SHIFT 0x4
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_PENDING__SHIFT 0x5
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND__SHIFT 0x6
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_PENDING__SHIFT 0x7
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND__SHIFT 0x8
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_PENDING__SHIFT 0x9
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND__SHIFT 0xa
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_PENDING__SHIFT 0xb
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND__SHIFT 0xc
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_PENDING__SHIFT 0xd
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND__SHIFT 0xe
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_PENDING__SHIFT 0xf
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_MASK 0x00000001L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_PENDING_MASK 0x00000002L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_MASK 0x00000004L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_PENDING_MASK 0x00000008L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_MASK 0x00000010L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_PENDING_MASK 0x00000020L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_MASK 0x00000040L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_PENDING_MASK 0x00000080L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_MASK 0x00000100L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_PENDING_MASK 0x00000200L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_MASK 0x00000400L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_PENDING_MASK 0x00000800L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_MASK 0x00001000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_PENDING_MASK 0x00002000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_MASK 0x00004000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_PENDING_MASK 0x00008000L
+//DIG1_FORCE_DIG_DISABLE
+#define DIG1_FORCE_DIG_DISABLE__FORCE_DIG_DISABLE__SHIFT 0x0
+#define DIG1_FORCE_DIG_DISABLE__FORCE_DIG_DISABLE_MASK 0x00000001L
+
+
+// addressBlock: dce_dc_dio_dp1_dispdec
+//DP1_DP_LINK_CNTL
+#define DP1_DP_LINK_CNTL__DP_LINK_TRAINING_COMPLETE__SHIFT 0x4
+#define DP1_DP_LINK_CNTL__DP_LINK_STATUS__SHIFT 0x8
+#define DP1_DP_LINK_CNTL__DP_EMBEDDED_PANEL_MODE__SHIFT 0x11
+#define DP1_DP_LINK_CNTL__DP_LINK_TRAINING_COMPLETE_MASK 0x00000010L
+#define DP1_DP_LINK_CNTL__DP_LINK_STATUS_MASK 0x00000100L
+#define DP1_DP_LINK_CNTL__DP_EMBEDDED_PANEL_MODE_MASK 0x00020000L
+//DP1_DP_PIXEL_FORMAT
+#define DP1_DP_PIXEL_FORMAT__DP_PIXEL_ENCODING__SHIFT 0x0
+#define DP1_DP_PIXEL_FORMAT__DP_COMPONENT_DEPTH__SHIFT 0x18
+#define DP1_DP_PIXEL_FORMAT__DP_PIXEL_COMBINE__SHIFT 0x1c
+#define DP1_DP_PIXEL_FORMAT__DP_PIXEL_ENCODING_MASK 0x00000007L
+#define DP1_DP_PIXEL_FORMAT__DP_COMPONENT_DEPTH_MASK 0x07000000L
+#define DP1_DP_PIXEL_FORMAT__DP_PIXEL_COMBINE_MASK 0x30000000L
+//DP1_DP_MSA_COLORIMETRY
+#define DP1_DP_MSA_COLORIMETRY__DP_MSA_MISC0__SHIFT 0x18
+#define DP1_DP_MSA_COLORIMETRY__DP_MSA_MISC0_MASK 0xFF000000L
+//DP1_DP_CONFIG
+#define DP1_DP_CONFIG__DP_UDI_LANES__SHIFT 0x0
+#define DP1_DP_CONFIG__DP_UDI_LANES_MASK 0x00000003L
+//DP1_DP_VID_STREAM_CNTL
+#define DP1_DP_VID_STREAM_CNTL__DP_VID_STREAM_ENABLE__SHIFT 0x0
+#define DP1_DP_VID_STREAM_CNTL__DP_VID_STREAM_DIS_DEFER__SHIFT 0x8
+#define DP1_DP_VID_STREAM_CNTL__DP_VID_STREAM_STATUS__SHIFT 0x10
+#define DP1_DP_VID_STREAM_CNTL__DP_VID_STREAM_CHANGE_KEEPOUT__SHIFT 0x14
+#define DP1_DP_VID_STREAM_CNTL__DP_VID_STREAM_ENABLE_MASK 0x00000001L
+#define DP1_DP_VID_STREAM_CNTL__DP_VID_STREAM_DIS_DEFER_MASK 0x00000300L
+#define DP1_DP_VID_STREAM_CNTL__DP_VID_STREAM_STATUS_MASK 0x00010000L
+#define DP1_DP_VID_STREAM_CNTL__DP_VID_STREAM_CHANGE_KEEPOUT_MASK 0x00100000L
+//DP1_DP_STEER_FIFO
+#define DP1_DP_STEER_FIFO__DP_STEER_FIFO_RESET__SHIFT 0x0
+#define DP1_DP_STEER_FIFO__DP_STEER_OVERFLOW_FLAG__SHIFT 0x4
+#define DP1_DP_STEER_FIFO__DP_STEER_OVERFLOW_INT__SHIFT 0x5
+#define DP1_DP_STEER_FIFO__DP_STEER_OVERFLOW_ACK__SHIFT 0x6
+#define DP1_DP_STEER_FIFO__DP_STEER_OVERFLOW_MASK__SHIFT 0x7
+#define DP1_DP_STEER_FIFO__DP_TU_OVERFLOW_FLAG__SHIFT 0x8
+#define DP1_DP_STEER_FIFO__DP_TU_OVERFLOW_ACK__SHIFT 0xc
+#define DP1_DP_STEER_FIFO__DP_STEER_FIFO_RESET_MASK 0x00000001L
+#define DP1_DP_STEER_FIFO__DP_STEER_OVERFLOW_FLAG_MASK 0x00000010L
+#define DP1_DP_STEER_FIFO__DP_STEER_OVERFLOW_INT_MASK 0x00000020L
+#define DP1_DP_STEER_FIFO__DP_STEER_OVERFLOW_ACK_MASK 0x00000040L
+#define DP1_DP_STEER_FIFO__DP_STEER_OVERFLOW_MASK_MASK 0x00000080L
+#define DP1_DP_STEER_FIFO__DP_TU_OVERFLOW_FLAG_MASK 0x00000100L
+#define DP1_DP_STEER_FIFO__DP_TU_OVERFLOW_ACK_MASK 0x00001000L
+//DP1_DP_MSA_MISC
+#define DP1_DP_MSA_MISC__DP_MSA_MISC1__SHIFT 0x0
+#define DP1_DP_MSA_MISC__DP_MSA_MISC2__SHIFT 0x8
+#define DP1_DP_MSA_MISC__DP_MSA_MISC3__SHIFT 0x10
+#define DP1_DP_MSA_MISC__DP_MSA_MISC4__SHIFT 0x18
+#define DP1_DP_MSA_MISC__DP_MSA_MISC1_MASK 0x000000FFL
+#define DP1_DP_MSA_MISC__DP_MSA_MISC2_MASK 0x0000FF00L
+#define DP1_DP_MSA_MISC__DP_MSA_MISC3_MASK 0x00FF0000L
+#define DP1_DP_MSA_MISC__DP_MSA_MISC4_MASK 0xFF000000L
+//DP1_DP_VID_TIMING
+#define DP1_DP_VID_TIMING__DP_VID_M_N_DOUBLE_BUFFER_MODE__SHIFT 0x4
+#define DP1_DP_VID_TIMING__DP_VID_M_N_GEN_EN__SHIFT 0x8
+#define DP1_DP_VID_TIMING__DP_VID_N_MUL__SHIFT 0xa
+#define DP1_DP_VID_TIMING__DP_VID_M_DIV__SHIFT 0xc
+#define DP1_DP_VID_TIMING__DP_VID_N_DIV__SHIFT 0x18
+#define DP1_DP_VID_TIMING__DP_VID_M_N_DOUBLE_BUFFER_MODE_MASK 0x00000010L
+#define DP1_DP_VID_TIMING__DP_VID_M_N_GEN_EN_MASK 0x00000100L
+#define DP1_DP_VID_TIMING__DP_VID_N_MUL_MASK 0x00000C00L
+#define DP1_DP_VID_TIMING__DP_VID_M_DIV_MASK 0x00003000L
+#define DP1_DP_VID_TIMING__DP_VID_N_DIV_MASK 0xFF000000L
+//DP1_DP_VID_N
+#define DP1_DP_VID_N__DP_VID_N__SHIFT 0x0
+#define DP1_DP_VID_N__DP_VID_N_MASK 0x00FFFFFFL
+//DP1_DP_VID_M
+#define DP1_DP_VID_M__DP_VID_M__SHIFT 0x0
+#define DP1_DP_VID_M__DP_VID_M_MASK 0x00FFFFFFL
+//DP1_DP_LINK_FRAMING_CNTL
+#define DP1_DP_LINK_FRAMING_CNTL__DP_IDLE_BS_INTERVAL__SHIFT 0x0
+#define DP1_DP_LINK_FRAMING_CNTL__DP_VBID_DISABLE__SHIFT 0x18
+#define DP1_DP_LINK_FRAMING_CNTL__DP_VID_ENHANCED_FRAME_MODE__SHIFT 0x1c
+#define DP1_DP_LINK_FRAMING_CNTL__DP_IDLE_BS_INTERVAL_MASK 0x0003FFFFL
+#define DP1_DP_LINK_FRAMING_CNTL__DP_VBID_DISABLE_MASK 0x01000000L
+#define DP1_DP_LINK_FRAMING_CNTL__DP_VID_ENHANCED_FRAME_MODE_MASK 0x10000000L
+//DP1_DP_HBR2_EYE_PATTERN
+#define DP1_DP_HBR2_EYE_PATTERN__DP_HBR2_EYE_PATTERN_ENABLE__SHIFT 0x0
+#define DP1_DP_HBR2_EYE_PATTERN__DP_HBR2_EYE_PATTERN_ENABLE_MASK 0x00000001L
+//DP1_DP_VID_MSA_VBID
+#define DP1_DP_VID_MSA_VBID__DP_VID_MSA_LOCATION__SHIFT 0x0
+#define DP1_DP_VID_MSA_VBID__DP_VID_VBID_FIELD_POL__SHIFT 0x18
+#define DP1_DP_VID_MSA_VBID__DP_VID_MSA_LOCATION_MASK 0x00000FFFL
+#define DP1_DP_VID_MSA_VBID__DP_VID_VBID_FIELD_POL_MASK 0x01000000L
+//DP1_DP_VID_INTERRUPT_CNTL
+#define DP1_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_INT__SHIFT 0x0
+#define DP1_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_ACK__SHIFT 0x1
+#define DP1_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_MASK__SHIFT 0x2
+#define DP1_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_INT_MASK 0x00000001L
+#define DP1_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_ACK_MASK 0x00000002L
+#define DP1_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_MASK_MASK 0x00000004L
+//DP1_DP_DPHY_CNTL
+#define DP1_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE0__SHIFT 0x0
+#define DP1_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE1__SHIFT 0x1
+#define DP1_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE2__SHIFT 0x2
+#define DP1_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE3__SHIFT 0x3
+#define DP1_DP_DPHY_CNTL__DPHY_FEC_EN__SHIFT 0x4
+#define DP1_DP_DPHY_CNTL__DPHY_FEC_READY_SHADOW__SHIFT 0x5
+#define DP1_DP_DPHY_CNTL__DPHY_FEC_ACTIVE_STATUS__SHIFT 0x6
+#define DP1_DP_DPHY_CNTL__DPHY_BYPASS__SHIFT 0x10
+#define DP1_DP_DPHY_CNTL__DPHY_SKEW_BYPASS__SHIFT 0x18
+#define DP1_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE0_MASK 0x00000001L
+#define DP1_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE1_MASK 0x00000002L
+#define DP1_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE2_MASK 0x00000004L
+#define DP1_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE3_MASK 0x00000008L
+#define DP1_DP_DPHY_CNTL__DPHY_FEC_EN_MASK 0x00000010L
+#define DP1_DP_DPHY_CNTL__DPHY_FEC_READY_SHADOW_MASK 0x00000020L
+#define DP1_DP_DPHY_CNTL__DPHY_FEC_ACTIVE_STATUS_MASK 0x00000040L
+#define DP1_DP_DPHY_CNTL__DPHY_BYPASS_MASK 0x00010000L
+#define DP1_DP_DPHY_CNTL__DPHY_SKEW_BYPASS_MASK 0x01000000L
+//DP1_DP_DPHY_TRAINING_PATTERN_SEL
+#define DP1_DP_DPHY_TRAINING_PATTERN_SEL__DPHY_TRAINING_PATTERN_SEL__SHIFT 0x0
+#define DP1_DP_DPHY_TRAINING_PATTERN_SEL__DPHY_TRAINING_PATTERN_SEL_MASK 0x00000003L
+//DP1_DP_DPHY_SYM0
+#define DP1_DP_DPHY_SYM0__DPHY_SYM1__SHIFT 0x0
+#define DP1_DP_DPHY_SYM0__DPHY_SYM2__SHIFT 0xa
+#define DP1_DP_DPHY_SYM0__DPHY_SYM3__SHIFT 0x14
+#define DP1_DP_DPHY_SYM0__DPHY_SYM1_MASK 0x000003FFL
+#define DP1_DP_DPHY_SYM0__DPHY_SYM2_MASK 0x000FFC00L
+#define DP1_DP_DPHY_SYM0__DPHY_SYM3_MASK 0x3FF00000L
+//DP1_DP_DPHY_SYM1
+#define DP1_DP_DPHY_SYM1__DPHY_SYM4__SHIFT 0x0
+#define DP1_DP_DPHY_SYM1__DPHY_SYM5__SHIFT 0xa
+#define DP1_DP_DPHY_SYM1__DPHY_SYM6__SHIFT 0x14
+#define DP1_DP_DPHY_SYM1__DPHY_SYM4_MASK 0x000003FFL
+#define DP1_DP_DPHY_SYM1__DPHY_SYM5_MASK 0x000FFC00L
+#define DP1_DP_DPHY_SYM1__DPHY_SYM6_MASK 0x3FF00000L
+//DP1_DP_DPHY_SYM2
+#define DP1_DP_DPHY_SYM2__DPHY_SYM7__SHIFT 0x0
+#define DP1_DP_DPHY_SYM2__DPHY_SYM8__SHIFT 0xa
+#define DP1_DP_DPHY_SYM2__DPHY_SYM7_MASK 0x000003FFL
+#define DP1_DP_DPHY_SYM2__DPHY_SYM8_MASK 0x000FFC00L
+//DP1_DP_DPHY_8B10B_CNTL
+#define DP1_DP_DPHY_8B10B_CNTL__DPHY_8B10B_RESET__SHIFT 0x8
+#define DP1_DP_DPHY_8B10B_CNTL__DPHY_8B10B_EXT_DISP__SHIFT 0x10
+#define DP1_DP_DPHY_8B10B_CNTL__DPHY_8B10B_CUR_DISP__SHIFT 0x18
+#define DP1_DP_DPHY_8B10B_CNTL__DPHY_8B10B_RESET_MASK 0x00000100L
+#define DP1_DP_DPHY_8B10B_CNTL__DPHY_8B10B_EXT_DISP_MASK 0x00010000L
+#define DP1_DP_DPHY_8B10B_CNTL__DPHY_8B10B_CUR_DISP_MASK 0x01000000L
+//DP1_DP_DPHY_PRBS_CNTL
+#define DP1_DP_DPHY_PRBS_CNTL__DPHY_PRBS_EN__SHIFT 0x0
+#define DP1_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL__SHIFT 0x4
+#define DP1_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED__SHIFT 0x8
+#define DP1_DP_DPHY_PRBS_CNTL__DPHY_PRBS_EN_MASK 0x00000001L
+#define DP1_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL_MASK 0x00000030L
+#define DP1_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED_MASK 0x7FFFFF00L
+//DP1_DP_DPHY_SCRAM_CNTL
+#define DP1_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_DIS__SHIFT 0x0
+#define DP1_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE__SHIFT 0x4
+#define DP1_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT__SHIFT 0x8
+#define DP1_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_KCODE__SHIFT 0x18
+#define DP1_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_DIS_MASK 0x00000001L
+#define DP1_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE_MASK 0x00000010L
+#define DP1_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT_MASK 0x0003FF00L
+#define DP1_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_KCODE_MASK 0x01000000L
+//DP1_DP_DPHY_CRC_EN
+#define DP1_DP_DPHY_CRC_EN__DPHY_CRC_EN__SHIFT 0x0
+#define DP1_DP_DPHY_CRC_EN__DPHY_CRC_CONT_EN__SHIFT 0x4
+#define DP1_DP_DPHY_CRC_EN__DPHY_CRC_RESULT_VALID__SHIFT 0x8
+#define DP1_DP_DPHY_CRC_EN__DPHY_CRC_EN_MASK 0x00000001L
+#define DP1_DP_DPHY_CRC_EN__DPHY_CRC_CONT_EN_MASK 0x00000010L
+#define DP1_DP_DPHY_CRC_EN__DPHY_CRC_RESULT_VALID_MASK 0x00000100L
+//DP1_DP_DPHY_CRC_CNTL
+#define DP1_DP_DPHY_CRC_CNTL__DPHY_CRC_FIELD__SHIFT 0x0
+#define DP1_DP_DPHY_CRC_CNTL__DPHY_CRC_SEL__SHIFT 0x4
+#define DP1_DP_DPHY_CRC_CNTL__DPHY_CRC_MASK__SHIFT 0x10
+#define DP1_DP_DPHY_CRC_CNTL__DPHY_CRC_FIELD_MASK 0x00000001L
+#define DP1_DP_DPHY_CRC_CNTL__DPHY_CRC_SEL_MASK 0x00000030L
+#define DP1_DP_DPHY_CRC_CNTL__DPHY_CRC_MASK_MASK 0x00FF0000L
+//DP1_DP_DPHY_CRC_RESULT
+#define DP1_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT__SHIFT 0x0
+#define DP1_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT1__SHIFT 0x8
+#define DP1_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT2__SHIFT 0x10
+#define DP1_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT3__SHIFT 0x18
+#define DP1_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT_MASK 0x000000FFL
+#define DP1_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT1_MASK 0x0000FF00L
+#define DP1_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT2_MASK 0x00FF0000L
+#define DP1_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT3_MASK 0xFF000000L
+//DP1_DP_DPHY_CRC_MST_CNTL
+#define DP1_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_FIRST_SLOT__SHIFT 0x0
+#define DP1_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_LAST_SLOT__SHIFT 0x8
+#define DP1_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_FIRST_SLOT_MASK 0x0000003FL
+#define DP1_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_LAST_SLOT_MASK 0x00003F00L
+//DP1_DP_DPHY_CRC_MST_STATUS
+#define DP1_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_LOCK__SHIFT 0x0
+#define DP1_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR__SHIFT 0x8
+#define DP1_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR_ACK__SHIFT 0x10
+#define DP1_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_LOCK_MASK 0x00000001L
+#define DP1_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR_MASK 0x00000100L
+#define DP1_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR_ACK_MASK 0x00010000L
+//DP1_DP_DPHY_FAST_TRAINING
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_RX_FAST_TRAINING_CAPABLE__SHIFT 0x0
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_SW_FAST_TRAINING_START__SHIFT 0x1
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN__SHIFT 0x2
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP1_TIME__SHIFT 0x8
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP2_TIME__SHIFT 0x14
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_RX_FAST_TRAINING_CAPABLE_MASK 0x00000001L
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_SW_FAST_TRAINING_START_MASK 0x00000002L
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN_MASK 0x00000004L
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP1_TIME_MASK 0x000FFF00L
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP2_TIME_MASK 0xFFF00000L
+//DP1_DP_DPHY_FAST_TRAINING_STATUS
+#define DP1_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_STATE__SHIFT 0x0
+#define DP1_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_OCCURRED__SHIFT 0x4
+#define DP1_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_MASK__SHIFT 0x8
+#define DP1_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_ACK__SHIFT 0xc
+#define DP1_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_STATE_MASK 0x00000007L
+#define DP1_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_OCCURRED_MASK 0x00000010L
+#define DP1_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_MASK_MASK 0x00000100L
+#define DP1_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_ACK_MASK 0x00001000L
+//DP1_DP_SEC_CNTL
+#define DP1_DP_SEC_CNTL__DP_SEC_STREAM_ENABLE__SHIFT 0x0
+#define DP1_DP_SEC_CNTL__DP_SEC_ASP_ENABLE__SHIFT 0x4
+#define DP1_DP_SEC_CNTL__DP_SEC_ATP_ENABLE__SHIFT 0x8
+#define DP1_DP_SEC_CNTL__DP_SEC_AIP_ENABLE__SHIFT 0xc
+#define DP1_DP_SEC_CNTL__DP_SEC_ACM_ENABLE__SHIFT 0x10
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP0_ENABLE__SHIFT 0x14
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP1_ENABLE__SHIFT 0x15
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP2_ENABLE__SHIFT 0x16
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP3_ENABLE__SHIFT 0x17
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP4_ENABLE__SHIFT 0x18
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP5_ENABLE__SHIFT 0x19
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP6_ENABLE__SHIFT 0x1a
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP7_ENABLE__SHIFT 0x1b
+#define DP1_DP_SEC_CNTL__DP_SEC_MPG_ENABLE__SHIFT 0x1c
+#define DP1_DP_SEC_CNTL__DP_SEC_STREAM_ENABLE_MASK 0x00000001L
+#define DP1_DP_SEC_CNTL__DP_SEC_ASP_ENABLE_MASK 0x00000010L
+#define DP1_DP_SEC_CNTL__DP_SEC_ATP_ENABLE_MASK 0x00000100L
+#define DP1_DP_SEC_CNTL__DP_SEC_AIP_ENABLE_MASK 0x00001000L
+#define DP1_DP_SEC_CNTL__DP_SEC_ACM_ENABLE_MASK 0x00010000L
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP0_ENABLE_MASK 0x00100000L
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP1_ENABLE_MASK 0x00200000L
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP2_ENABLE_MASK 0x00400000L
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP3_ENABLE_MASK 0x00800000L
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP4_ENABLE_MASK 0x01000000L
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP5_ENABLE_MASK 0x02000000L
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP6_ENABLE_MASK 0x04000000L
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP7_ENABLE_MASK 0x08000000L
+#define DP1_DP_SEC_CNTL__DP_SEC_MPG_ENABLE_MASK 0x10000000L
+//DP1_DP_SEC_CNTL1
+#define DP1_DP_SEC_CNTL1__DP_SEC_ISRC_ENABLE__SHIFT 0x0
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_REFERENCE__SHIFT 0x1
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_PRIORITY__SHIFT 0x4
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_SEND__SHIFT 0x5
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_PENDING__SHIFT 0x6
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_DEADLINE_MISSED__SHIFT 0x7
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_ANY_LINE__SHIFT 0x8
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP1_LINE_REFERENCE__SHIFT 0x9
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP2_LINE_REFERENCE__SHIFT 0xa
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP3_LINE_REFERENCE__SHIFT 0xb
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP4_LINE_REFERENCE__SHIFT 0xc
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP5_LINE_REFERENCE__SHIFT 0xd
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP6_LINE_REFERENCE__SHIFT 0xe
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP7_LINE_REFERENCE__SHIFT 0xf
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_NUM__SHIFT 0x10
+#define DP1_DP_SEC_CNTL1__DP_SEC_ISRC_ENABLE_MASK 0x00000001L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_REFERENCE_MASK 0x00000002L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_PRIORITY_MASK 0x00000010L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_MASK 0x00000020L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_PENDING_MASK 0x00000040L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_DEADLINE_MISSED_MASK 0x00000080L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_ANY_LINE_MASK 0x00000100L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP1_LINE_REFERENCE_MASK 0x00000200L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP2_LINE_REFERENCE_MASK 0x00000400L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP3_LINE_REFERENCE_MASK 0x00000800L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP4_LINE_REFERENCE_MASK 0x00001000L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP5_LINE_REFERENCE_MASK 0x00002000L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP6_LINE_REFERENCE_MASK 0x00004000L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP7_LINE_REFERENCE_MASK 0x00008000L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_NUM_MASK 0xFFFF0000L
+//DP1_DP_SEC_FRAMING1
+#define DP1_DP_SEC_FRAMING1__DP_SEC_FRAME_START_LOCATION__SHIFT 0x0
+#define DP1_DP_SEC_FRAMING1__DP_SEC_VBLANK_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP1_DP_SEC_FRAMING1__DP_SEC_FRAME_START_LOCATION_MASK 0x00000FFFL
+#define DP1_DP_SEC_FRAMING1__DP_SEC_VBLANK_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+//DP1_DP_SEC_FRAMING2
+#define DP1_DP_SEC_FRAMING2__DP_SEC_START_POSITION__SHIFT 0x0
+#define DP1_DP_SEC_FRAMING2__DP_SEC_HBLANK_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP1_DP_SEC_FRAMING2__DP_SEC_START_POSITION_MASK 0x0000FFFFL
+#define DP1_DP_SEC_FRAMING2__DP_SEC_HBLANK_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+//DP1_DP_SEC_FRAMING3
+#define DP1_DP_SEC_FRAMING3__DP_SEC_IDLE_FRAME_SIZE__SHIFT 0x0
+#define DP1_DP_SEC_FRAMING3__DP_SEC_IDLE_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP1_DP_SEC_FRAMING3__DP_SEC_IDLE_FRAME_SIZE_MASK 0x00003FFFL
+#define DP1_DP_SEC_FRAMING3__DP_SEC_IDLE_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+//DP1_DP_SEC_FRAMING4
+#define DP1_DP_SEC_FRAMING4__DP_SST_SDP_SPLITTING__SHIFT 0x0
+#define DP1_DP_SEC_FRAMING4__DP_SEC_COLLISION_STATUS__SHIFT 0x14
+#define DP1_DP_SEC_FRAMING4__DP_SEC_COLLISION_ACK__SHIFT 0x18
+#define DP1_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE__SHIFT 0x1c
+#define DP1_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_STATUS__SHIFT 0x1d
+#define DP1_DP_SEC_FRAMING4__DP_SST_SDP_SPLITTING_MASK 0x00000001L
+#define DP1_DP_SEC_FRAMING4__DP_SEC_COLLISION_STATUS_MASK 0x00100000L
+#define DP1_DP_SEC_FRAMING4__DP_SEC_COLLISION_ACK_MASK 0x01000000L
+#define DP1_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_MASK 0x10000000L
+#define DP1_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_STATUS_MASK 0x20000000L
+//DP1_DP_SEC_AUD_N
+#define DP1_DP_SEC_AUD_N__DP_SEC_AUD_N__SHIFT 0x0
+#define DP1_DP_SEC_AUD_N__DP_SEC_AUD_N_MASK 0x00FFFFFFL
+//DP1_DP_SEC_AUD_N_READBACK
+#define DP1_DP_SEC_AUD_N_READBACK__DP_SEC_AUD_N_READBACK__SHIFT 0x0
+#define DP1_DP_SEC_AUD_N_READBACK__DP_SEC_AUD_N_READBACK_MASK 0x00FFFFFFL
+//DP1_DP_SEC_AUD_M
+#define DP1_DP_SEC_AUD_M__DP_SEC_AUD_M__SHIFT 0x0
+#define DP1_DP_SEC_AUD_M__DP_SEC_AUD_M_MASK 0x00FFFFFFL
+//DP1_DP_SEC_AUD_M_READBACK
+#define DP1_DP_SEC_AUD_M_READBACK__DP_SEC_AUD_M_READBACK__SHIFT 0x0
+#define DP1_DP_SEC_AUD_M_READBACK__DP_SEC_AUD_M_READBACK_MASK 0x00FFFFFFL
+//DP1_DP_SEC_TIMESTAMP
+#define DP1_DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE__SHIFT 0x0
+#define DP1_DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE_MASK 0x00000001L
+//DP1_DP_SEC_PACKET_CNTL
+#define DP1_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CODING_TYPE__SHIFT 0x1
+#define DP1_DP_SEC_PACKET_CNTL__DP_SEC_ASP_PRIORITY__SHIFT 0x4
+#define DP1_DP_SEC_PACKET_CNTL__DP_SEC_VERSION__SHIFT 0x8
+#define DP1_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE__SHIFT 0x10
+#define DP1_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CODING_TYPE_MASK 0x0000000EL
+#define DP1_DP_SEC_PACKET_CNTL__DP_SEC_ASP_PRIORITY_MASK 0x00000010L
+#define DP1_DP_SEC_PACKET_CNTL__DP_SEC_VERSION_MASK 0x00003F00L
+#define DP1_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE_MASK 0x00010000L
+//DP1_DP_MSE_RATE_CNTL
+#define DP1_DP_MSE_RATE_CNTL__DP_MSE_RATE_Y__SHIFT 0x0
+#define DP1_DP_MSE_RATE_CNTL__DP_MSE_RATE_X__SHIFT 0x1a
+#define DP1_DP_MSE_RATE_CNTL__DP_MSE_RATE_Y_MASK 0x03FFFFFFL
+#define DP1_DP_MSE_RATE_CNTL__DP_MSE_RATE_X_MASK 0xFC000000L
+//DP1_DP_MSE_RATE_UPDATE
+#define DP1_DP_MSE_RATE_UPDATE__DP_MSE_RATE_UPDATE_PENDING__SHIFT 0x0
+#define DP1_DP_MSE_RATE_UPDATE__DP_MSE_RATE_UPDATE_PENDING_MASK 0x00000001L
+//DP1_DP_MSE_SAT0
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_SRC0__SHIFT 0x0
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT0__SHIFT 0x8
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_SRC1__SHIFT 0x10
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT1__SHIFT 0x18
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_SRC0_MASK 0x00000007L
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT0_MASK 0x00003F00L
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_SRC1_MASK 0x00070000L
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT1_MASK 0x3F000000L
+//DP1_DP_MSE_SAT1
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_SRC2__SHIFT 0x0
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT2__SHIFT 0x8
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_SRC3__SHIFT 0x10
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT3__SHIFT 0x18
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_SRC2_MASK 0x00000007L
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT2_MASK 0x00003F00L
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_SRC3_MASK 0x00070000L
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT3_MASK 0x3F000000L
+//DP1_DP_MSE_SAT2
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_SRC4__SHIFT 0x0
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT4__SHIFT 0x8
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_SRC5__SHIFT 0x10
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT5__SHIFT 0x18
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_SRC4_MASK 0x00000007L
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT4_MASK 0x00003F00L
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_SRC5_MASK 0x00070000L
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT5_MASK 0x3F000000L
+//DP1_DP_MSE_SAT_UPDATE
+#define DP1_DP_MSE_SAT_UPDATE__DP_MSE_SAT_UPDATE__SHIFT 0x0
+#define DP1_DP_MSE_SAT_UPDATE__DP_MSE_16_MTP_KEEPOUT__SHIFT 0x8
+#define DP1_DP_MSE_SAT_UPDATE__DP_MSE_SAT_UPDATE_MASK 0x00000003L
+#define DP1_DP_MSE_SAT_UPDATE__DP_MSE_16_MTP_KEEPOUT_MASK 0x00000100L
+//DP1_DP_MSE_LINK_TIMING
+#define DP1_DP_MSE_LINK_TIMING__DP_MSE_LINK_FRAME__SHIFT 0x0
+#define DP1_DP_MSE_LINK_TIMING__DP_MSE_LINK_LINE__SHIFT 0x10
+#define DP1_DP_MSE_LINK_TIMING__DP_MSE_LINK_FRAME_MASK 0x000003FFL
+#define DP1_DP_MSE_LINK_TIMING__DP_MSE_LINK_LINE_MASK 0x00030000L
+//DP1_DP_MSE_MISC_CNTL
+#define DP1_DP_MSE_MISC_CNTL__DP_MSE_BLANK_CODE__SHIFT 0x0
+#define DP1_DP_MSE_MISC_CNTL__DP_MSE_TIMESTAMP_MODE__SHIFT 0x4
+#define DP1_DP_MSE_MISC_CNTL__DP_MSE_ZERO_ENCODER__SHIFT 0x8
+#define DP1_DP_MSE_MISC_CNTL__DP_MSE_BLANK_CODE_MASK 0x00000001L
+#define DP1_DP_MSE_MISC_CNTL__DP_MSE_TIMESTAMP_MODE_MASK 0x00000010L
+#define DP1_DP_MSE_MISC_CNTL__DP_MSE_ZERO_ENCODER_MASK 0x00000100L
+//DP1_DP_DPHY_BS_SR_SWAP_CNTL
+#define DP1_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT__SHIFT 0x0
+#define DP1_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_BS_SR_SWAP_DONE__SHIFT 0xf
+#define DP1_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_START__SHIFT 0x10
+#define DP1_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_MASK 0x000003FFL
+#define DP1_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_BS_SR_SWAP_DONE_MASK 0x00008000L
+#define DP1_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_START_MASK 0x00010000L
+//DP1_DP_DPHY_HBR2_PATTERN_CONTROL
+#define DP1_DP_DPHY_HBR2_PATTERN_CONTROL__DP_DPHY_HBR2_PATTERN_CONTROL__SHIFT 0x0
+#define DP1_DP_DPHY_HBR2_PATTERN_CONTROL__DP_DPHY_HBR2_PATTERN_CONTROL_MASK 0x00000007L
+//DP1_DP_MSE_SAT0_STATUS
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC0_STATUS__SHIFT 0x0
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT0_STATUS__SHIFT 0x8
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC1_STATUS__SHIFT 0x10
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT1_STATUS__SHIFT 0x18
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC0_STATUS_MASK 0x00000007L
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT0_STATUS_MASK 0x00003F00L
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC1_STATUS_MASK 0x00070000L
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT1_STATUS_MASK 0x3F000000L
+//DP1_DP_MSE_SAT1_STATUS
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC2_STATUS__SHIFT 0x0
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT2_STATUS__SHIFT 0x8
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC3_STATUS__SHIFT 0x10
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT3_STATUS__SHIFT 0x18
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC2_STATUS_MASK 0x00000007L
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT2_STATUS_MASK 0x00003F00L
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC3_STATUS_MASK 0x00070000L
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT3_STATUS_MASK 0x3F000000L
+//DP1_DP_MSE_SAT2_STATUS
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC4_STATUS__SHIFT 0x0
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT4_STATUS__SHIFT 0x8
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC5_STATUS__SHIFT 0x10
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT5_STATUS__SHIFT 0x18
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC4_STATUS_MASK 0x00000007L
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT4_STATUS_MASK 0x00003F00L
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC5_STATUS_MASK 0x00070000L
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT5_STATUS_MASK 0x3F000000L
+//DP1_DP_MSA_TIMING_PARAM1
+#define DP1_DP_MSA_TIMING_PARAM1__DP_MSA_VTOTAL__SHIFT 0x0
+#define DP1_DP_MSA_TIMING_PARAM1__DP_MSA_HTOTAL__SHIFT 0x10
+#define DP1_DP_MSA_TIMING_PARAM1__DP_MSA_VTOTAL_MASK 0x0000FFFFL
+#define DP1_DP_MSA_TIMING_PARAM1__DP_MSA_HTOTAL_MASK 0xFFFF0000L
+//DP1_DP_MSA_TIMING_PARAM2
+#define DP1_DP_MSA_TIMING_PARAM2__DP_MSA_VSTART__SHIFT 0x0
+#define DP1_DP_MSA_TIMING_PARAM2__DP_MSA_HSTART__SHIFT 0x10
+#define DP1_DP_MSA_TIMING_PARAM2__DP_MSA_VSTART_MASK 0x0000FFFFL
+#define DP1_DP_MSA_TIMING_PARAM2__DP_MSA_HSTART_MASK 0xFFFF0000L
+//DP1_DP_MSA_TIMING_PARAM3
+#define DP1_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCWIDTH__SHIFT 0x0
+#define DP1_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCPOLARITY__SHIFT 0xf
+#define DP1_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCWIDTH__SHIFT 0x10
+#define DP1_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCPOLARITY__SHIFT 0x1f
+#define DP1_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCWIDTH_MASK 0x00007FFFL
+#define DP1_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCPOLARITY_MASK 0x00008000L
+#define DP1_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCWIDTH_MASK 0x7FFF0000L
+#define DP1_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCPOLARITY_MASK 0x80000000L
+//DP1_DP_MSA_TIMING_PARAM4
+#define DP1_DP_MSA_TIMING_PARAM4__DP_MSA_VHEIGHT__SHIFT 0x0
+#define DP1_DP_MSA_TIMING_PARAM4__DP_MSA_HWIDTH__SHIFT 0x10
+#define DP1_DP_MSA_TIMING_PARAM4__DP_MSA_VHEIGHT_MASK 0x0000FFFFL
+#define DP1_DP_MSA_TIMING_PARAM4__DP_MSA_HWIDTH_MASK 0xFFFF0000L
+//DP1_DP_MSO_CNTL
+#define DP1_DP_MSO_CNTL__DP_MSO_NUM_OF_SSTLINK__SHIFT 0x0
+#define DP1_DP_MSO_CNTL__DP_MSO_SEC_STREAM_ENABLE__SHIFT 0x4
+#define DP1_DP_MSO_CNTL__DP_MSO_SEC_ASP_ENABLE__SHIFT 0x8
+#define DP1_DP_MSO_CNTL__DP_MSO_SEC_ATP_ENABLE__SHIFT 0xc
+#define DP1_DP_MSO_CNTL__DP_MSO_SEC_AIP_ENABLE__SHIFT 0x10
+#define DP1_DP_MSO_CNTL__DP_MSO_SEC_ACM_ENABLE__SHIFT 0x14
+#define DP1_DP_MSO_CNTL__DP_MSO_SEC_GSP0_ENABLE__SHIFT 0x18
+#define DP1_DP_MSO_CNTL__DP_MSO_SEC_GSP1_ENABLE__SHIFT 0x1c
+#define DP1_DP_MSO_CNTL__DP_MSO_NUM_OF_SSTLINK_MASK 0x00000003L
+#define DP1_DP_MSO_CNTL__DP_MSO_SEC_STREAM_ENABLE_MASK 0x000000F0L
+#define DP1_DP_MSO_CNTL__DP_MSO_SEC_ASP_ENABLE_MASK 0x00000F00L
+#define DP1_DP_MSO_CNTL__DP_MSO_SEC_ATP_ENABLE_MASK 0x0000F000L
+#define DP1_DP_MSO_CNTL__DP_MSO_SEC_AIP_ENABLE_MASK 0x000F0000L
+#define DP1_DP_MSO_CNTL__DP_MSO_SEC_ACM_ENABLE_MASK 0x00F00000L
+#define DP1_DP_MSO_CNTL__DP_MSO_SEC_GSP0_ENABLE_MASK 0x0F000000L
+#define DP1_DP_MSO_CNTL__DP_MSO_SEC_GSP1_ENABLE_MASK 0xF0000000L
+//DP1_DP_MSO_CNTL1
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_GSP2_ENABLE__SHIFT 0x0
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_GSP3_ENABLE__SHIFT 0x4
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_GSP4_ENABLE__SHIFT 0x8
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_GSP5_ENABLE__SHIFT 0xc
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_GSP6_ENABLE__SHIFT 0x10
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_GSP7_ENABLE__SHIFT 0x14
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_MPG_ENABLE__SHIFT 0x18
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_ISRC_ENABLE__SHIFT 0x1c
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_GSP2_ENABLE_MASK 0x0000000FL
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_GSP3_ENABLE_MASK 0x000000F0L
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_GSP4_ENABLE_MASK 0x00000F00L
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_GSP5_ENABLE_MASK 0x0000F000L
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_GSP6_ENABLE_MASK 0x000F0000L
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_GSP7_ENABLE_MASK 0x00F00000L
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_MPG_ENABLE_MASK 0x0F000000L
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_ISRC_ENABLE_MASK 0xF0000000L
+//DP1_DP_DSC_CNTL
+#define DP1_DP_DSC_CNTL__DP_DSC_MODE__SHIFT 0x0
+#define DP1_DP_DSC_CNTL__DP_DSC_SLICE_WIDTH__SHIFT 0x10
+#define DP1_DP_DSC_CNTL__DP_DSC_MODE_MASK 0x00000003L
+#define DP1_DP_DSC_CNTL__DP_DSC_SLICE_WIDTH_MASK 0x1FFF0000L
+//DP1_DP_SEC_CNTL2
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP1_SEND__SHIFT 0x0
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_PENDING__SHIFT 0x1
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_DEADLINE_MISSED__SHIFT 0x2
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_ANY_LINE__SHIFT 0x3
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP2_SEND__SHIFT 0x4
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_PENDING__SHIFT 0x5
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_DEADLINE_MISSED__SHIFT 0x6
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_ANY_LINE__SHIFT 0x7
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP3_SEND__SHIFT 0x8
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_PENDING__SHIFT 0x9
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_DEADLINE_MISSED__SHIFT 0xa
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_ANY_LINE__SHIFT 0xb
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP4_SEND__SHIFT 0xc
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_PENDING__SHIFT 0xd
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_ANY_LINE__SHIFT 0xf
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP5_SEND__SHIFT 0x10
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_PENDING__SHIFT 0x11
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_DEADLINE_MISSED__SHIFT 0x12
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_ANY_LINE__SHIFT 0x13
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP6_SEND__SHIFT 0x14
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_PENDING__SHIFT 0x15
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_DEADLINE_MISSED__SHIFT 0x16
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_ANY_LINE__SHIFT 0x17
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_SEND__SHIFT 0x18
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_PENDING__SHIFT 0x19
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_DEADLINE_MISSED__SHIFT 0x1a
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_ANY_LINE__SHIFT 0x1b
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_PPS__SHIFT 0x1c
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_MASK 0x00000001L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_PENDING_MASK 0x00000002L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_DEADLINE_MISSED_MASK 0x00000004L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_ANY_LINE_MASK 0x00000008L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_MASK 0x00000010L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_PENDING_MASK 0x00000020L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_DEADLINE_MISSED_MASK 0x00000040L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_ANY_LINE_MASK 0x00000080L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_MASK 0x00000100L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_PENDING_MASK 0x00000200L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_DEADLINE_MISSED_MASK 0x00000400L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_ANY_LINE_MASK 0x00000800L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_MASK 0x00001000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_PENDING_MASK 0x00002000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_ANY_LINE_MASK 0x00008000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_MASK 0x00010000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_PENDING_MASK 0x00020000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_DEADLINE_MISSED_MASK 0x00040000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_ANY_LINE_MASK 0x00080000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_MASK 0x00100000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_PENDING_MASK 0x00200000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_DEADLINE_MISSED_MASK 0x00400000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_ANY_LINE_MASK 0x00800000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_MASK 0x01000000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_PENDING_MASK 0x02000000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_DEADLINE_MISSED_MASK 0x04000000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_ANY_LINE_MASK 0x08000000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_PPS_MASK 0x10000000L
+//DP1_DP_SEC_CNTL3
+#define DP1_DP_SEC_CNTL3__DP_SEC_GSP1_LINE_NUM__SHIFT 0x0
+#define DP1_DP_SEC_CNTL3__DP_SEC_GSP2_LINE_NUM__SHIFT 0x10
+#define DP1_DP_SEC_CNTL3__DP_SEC_GSP1_LINE_NUM_MASK 0x0000FFFFL
+#define DP1_DP_SEC_CNTL3__DP_SEC_GSP2_LINE_NUM_MASK 0xFFFF0000L
+//DP1_DP_SEC_CNTL4
+#define DP1_DP_SEC_CNTL4__DP_SEC_GSP3_LINE_NUM__SHIFT 0x0
+#define DP1_DP_SEC_CNTL4__DP_SEC_GSP4_LINE_NUM__SHIFT 0x10
+#define DP1_DP_SEC_CNTL4__DP_SEC_GSP3_LINE_NUM_MASK 0x0000FFFFL
+#define DP1_DP_SEC_CNTL4__DP_SEC_GSP4_LINE_NUM_MASK 0xFFFF0000L
+//DP1_DP_SEC_CNTL5
+#define DP1_DP_SEC_CNTL5__DP_SEC_GSP5_LINE_NUM__SHIFT 0x0
+#define DP1_DP_SEC_CNTL5__DP_SEC_GSP6_LINE_NUM__SHIFT 0x10
+#define DP1_DP_SEC_CNTL5__DP_SEC_GSP5_LINE_NUM_MASK 0x0000FFFFL
+#define DP1_DP_SEC_CNTL5__DP_SEC_GSP6_LINE_NUM_MASK 0xFFFF0000L
+//DP1_DP_SEC_CNTL6
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP7_LINE_NUM__SHIFT 0x0
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP7_LINE_NUM_MASK 0x0000FFFFL
+//DP1_DP_SEC_CNTL7
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_ACTIVE__SHIFT 0x0
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_IN_IDLE__SHIFT 0x1
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_ACTIVE__SHIFT 0x4
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_IN_IDLE__SHIFT 0x5
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_ACTIVE__SHIFT 0x8
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_IN_IDLE__SHIFT 0x9
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_ACTIVE__SHIFT 0xc
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_IN_IDLE__SHIFT 0xd
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_ACTIVE__SHIFT 0x10
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_IN_IDLE__SHIFT 0x11
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_ACTIVE__SHIFT 0x14
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_IN_IDLE__SHIFT 0x15
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_ACTIVE__SHIFT 0x18
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_IN_IDLE__SHIFT 0x19
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_ACTIVE__SHIFT 0x1c
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_IN_IDLE__SHIFT 0x1d
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_ACTIVE_MASK 0x00000001L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_IN_IDLE_MASK 0x00000002L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_ACTIVE_MASK 0x00000010L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_IN_IDLE_MASK 0x00000020L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_ACTIVE_MASK 0x00000100L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_IN_IDLE_MASK 0x00000200L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_ACTIVE_MASK 0x00001000L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_IN_IDLE_MASK 0x00002000L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_ACTIVE_MASK 0x00010000L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_IN_IDLE_MASK 0x00020000L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_ACTIVE_MASK 0x00100000L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_IN_IDLE_MASK 0x00200000L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_ACTIVE_MASK 0x01000000L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_IN_IDLE_MASK 0x02000000L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_ACTIVE_MASK 0x10000000L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_IN_IDLE_MASK 0x20000000L
+//DP1_DP_DB_CNTL
+#define DP1_DP_DB_CNTL__DP_DB_PENDING__SHIFT 0x0
+#define DP1_DP_DB_CNTL__DP_DB_TAKEN__SHIFT 0x4
+#define DP1_DP_DB_CNTL__DP_DB_TAKEN_CLR__SHIFT 0x5
+#define DP1_DP_DB_CNTL__DP_DB_LOCK__SHIFT 0x8
+#define DP1_DP_DB_CNTL__DP_DB_DISABLE__SHIFT 0xc
+#define DP1_DP_DB_CNTL__DP_VUPDATE_DB_PENDING__SHIFT 0xf
+#define DP1_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN__SHIFT 0x10
+#define DP1_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_CLR__SHIFT 0x11
+#define DP1_DP_DB_CNTL__DP_DB_PENDING_MASK 0x00000001L
+#define DP1_DP_DB_CNTL__DP_DB_TAKEN_MASK 0x00000010L
+#define DP1_DP_DB_CNTL__DP_DB_TAKEN_CLR_MASK 0x00000020L
+#define DP1_DP_DB_CNTL__DP_DB_LOCK_MASK 0x00000100L
+#define DP1_DP_DB_CNTL__DP_DB_DISABLE_MASK 0x00001000L
+#define DP1_DP_DB_CNTL__DP_VUPDATE_DB_PENDING_MASK 0x00008000L
+#define DP1_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_MASK 0x00010000L
+#define DP1_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_CLR_MASK 0x00020000L
+//DP1_DP_MSA_VBID_MISC
+#define DP1_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE__SHIFT 0x0
+#define DP1_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_EN__SHIFT 0x4
+#define DP1_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE__SHIFT 0x8
+#define DP1_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE__SHIFT 0x9
+#define DP1_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_EN__SHIFT 0xc
+#define DP1_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_EN__SHIFT 0xd
+#define DP1_DP_MSA_VBID_MISC__DP_VBID6_LINE_REFERENCE__SHIFT 0xf
+#define DP1_DP_MSA_VBID_MISC__DP_VBID6_LINE_NUM__SHIFT 0x10
+#define DP1_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_MASK 0x00000003L
+#define DP1_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_EN_MASK 0x00000010L
+#define DP1_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_MASK 0x00000100L
+#define DP1_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_MASK 0x00000200L
+#define DP1_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_EN_MASK 0x00001000L
+#define DP1_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_EN_MASK 0x00002000L
+#define DP1_DP_MSA_VBID_MISC__DP_VBID6_LINE_REFERENCE_MASK 0x00008000L
+#define DP1_DP_MSA_VBID_MISC__DP_VBID6_LINE_NUM_MASK 0xFFFF0000L
+//DP1_DP_SEC_METADATA_TRANSMISSION
+#define DP1_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_ENABLE__SHIFT 0x0
+#define DP1_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_REFERENCE__SHIFT 0x1
+#define DP1_DP_SEC_METADATA_TRANSMISSION__DP_SEC_MSO_METADATA_PACKET_ENABLE__SHIFT 0x4
+#define DP1_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE__SHIFT 0x10
+#define DP1_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define DP1_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_REFERENCE_MASK 0x00000002L
+#define DP1_DP_SEC_METADATA_TRANSMISSION__DP_SEC_MSO_METADATA_PACKET_ENABLE_MASK 0x000000F0L
+#define DP1_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_MASK 0xFFFF0000L
+//DP1_DP_DSC_BYTES_PER_PIXEL
+#define DP1_DP_DSC_BYTES_PER_PIXEL__DP_DSC_BYTES_PER_PIXEL__SHIFT 0x0
+#define DP1_DP_DSC_BYTES_PER_PIXEL__DP_DSC_BYTES_PER_PIXEL_MASK 0x7FFFFFFFL
+//DP1_DP_ALPM_CNTL
+#define DP1_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_SEND__SHIFT 0x0
+#define DP1_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PENDING__SHIFT 0x1
+#define DP1_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_SEND__SHIFT 0x2
+#define DP1_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_PENDING__SHIFT 0x3
+#define DP1_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_IMMEDIATE__SHIFT 0x4
+#define DP1_DP_ALPM_CNTL__DP_LINK_TRAINING_SWITCH_BETWEEN_VIDEO__SHIFT 0x5
+#define DP1_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_LINE_NUM__SHIFT 0x10
+#define DP1_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_SEND_MASK 0x00000001L
+#define DP1_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PENDING_MASK 0x00000002L
+#define DP1_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_SEND_MASK 0x00000004L
+#define DP1_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_PENDING_MASK 0x00000008L
+#define DP1_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_IMMEDIATE_MASK 0x00000010L
+#define DP1_DP_ALPM_CNTL__DP_LINK_TRAINING_SWITCH_BETWEEN_VIDEO_MASK 0x00000020L
+#define DP1_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_LINE_NUM_MASK 0xFFFF0000L
+
+
+// addressBlock: dce_dc_dio_dig2_dispdec
+//DIG2_DIG_FE_CNTL
+#define DIG2_DIG_FE_CNTL__DIG_SOURCE_SELECT__SHIFT 0x0
+#define DIG2_DIG_FE_CNTL__DIG_STEREOSYNC_SELECT__SHIFT 0x4
+#define DIG2_DIG_FE_CNTL__DIG_STEREOSYNC_GATE_EN__SHIFT 0x8
+#define DIG2_DIG_FE_CNTL__DIG_START__SHIFT 0xa
+#define DIG2_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_SELECT__SHIFT 0xc
+#define DIG2_DIG_FE_CNTL__DIG_INPUT_PIXEL_SELECT__SHIFT 0x10
+#define DIG2_DIG_FE_CNTL__DOLBY_VISION_EN__SHIFT 0x12
+#define DIG2_DIG_FE_CNTL__DOLBY_VISION_METADATA_PACKET_MISSED__SHIFT 0x13
+#define DIG2_DIG_FE_CNTL__DIG_SYMCLK_FE_ON__SHIFT 0x18
+#define DIG2_DIG_FE_CNTL__TMDS_PIXEL_ENCODING__SHIFT 0x1c
+#define DIG2_DIG_FE_CNTL__TMDS_COLOR_FORMAT__SHIFT 0x1e
+#define DIG2_DIG_FE_CNTL__DIG_SOURCE_SELECT_MASK 0x00000007L
+#define DIG2_DIG_FE_CNTL__DIG_STEREOSYNC_SELECT_MASK 0x00000070L
+#define DIG2_DIG_FE_CNTL__DIG_STEREOSYNC_GATE_EN_MASK 0x00000100L
+#define DIG2_DIG_FE_CNTL__DIG_START_MASK 0x00000400L
+#define DIG2_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_SELECT_MASK 0x00007000L
+#define DIG2_DIG_FE_CNTL__DIG_INPUT_PIXEL_SELECT_MASK 0x00030000L
+#define DIG2_DIG_FE_CNTL__DOLBY_VISION_EN_MASK 0x00040000L
+#define DIG2_DIG_FE_CNTL__DOLBY_VISION_METADATA_PACKET_MISSED_MASK 0x00080000L
+#define DIG2_DIG_FE_CNTL__DIG_SYMCLK_FE_ON_MASK 0x01000000L
+#define DIG2_DIG_FE_CNTL__TMDS_PIXEL_ENCODING_MASK 0x10000000L
+#define DIG2_DIG_FE_CNTL__TMDS_COLOR_FORMAT_MASK 0xC0000000L
+//DIG2_DIG_OUTPUT_CRC_CNTL
+#define DIG2_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_EN__SHIFT 0x0
+#define DIG2_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_LINK_SEL__SHIFT 0x4
+#define DIG2_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_DATA_SEL__SHIFT 0x8
+#define DIG2_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_EN_MASK 0x00000001L
+#define DIG2_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_LINK_SEL_MASK 0x00000010L
+#define DIG2_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_DATA_SEL_MASK 0x00000300L
+//DIG2_DIG_OUTPUT_CRC_RESULT
+#define DIG2_DIG_OUTPUT_CRC_RESULT__DIG_OUTPUT_CRC_RESULT__SHIFT 0x0
+#define DIG2_DIG_OUTPUT_CRC_RESULT__DIG_OUTPUT_CRC_RESULT_MASK 0x3FFFFFFFL
+//DIG2_DIG_CLOCK_PATTERN
+#define DIG2_DIG_CLOCK_PATTERN__DIG_CLOCK_PATTERN__SHIFT 0x0
+#define DIG2_DIG_CLOCK_PATTERN__DIG_CLOCK_PATTERN_MASK 0x000003FFL
+//DIG2_DIG_TEST_PATTERN
+#define DIG2_DIG_TEST_PATTERN__DIG_TEST_PATTERN_OUT_EN__SHIFT 0x0
+#define DIG2_DIG_TEST_PATTERN__DIG_HALF_CLOCK_PATTERN_SEL__SHIFT 0x1
+#define DIG2_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_OUT_EN__SHIFT 0x4
+#define DIG2_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_RESET__SHIFT 0x5
+#define DIG2_DIG_TEST_PATTERN__DIG_TEST_PATTERN_EXTERNAL_RESET_EN__SHIFT 0x6
+#define DIG2_DIG_TEST_PATTERN__DIG_STATIC_TEST_PATTERN__SHIFT 0x10
+#define DIG2_DIG_TEST_PATTERN__DIG_TEST_PATTERN_OUT_EN_MASK 0x00000001L
+#define DIG2_DIG_TEST_PATTERN__DIG_HALF_CLOCK_PATTERN_SEL_MASK 0x00000002L
+#define DIG2_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_OUT_EN_MASK 0x00000010L
+#define DIG2_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_RESET_MASK 0x00000020L
+#define DIG2_DIG_TEST_PATTERN__DIG_TEST_PATTERN_EXTERNAL_RESET_EN_MASK 0x00000040L
+#define DIG2_DIG_TEST_PATTERN__DIG_STATIC_TEST_PATTERN_MASK 0x03FF0000L
+//DIG2_DIG_RANDOM_PATTERN_SEED
+#define DIG2_DIG_RANDOM_PATTERN_SEED__DIG_RANDOM_PATTERN_SEED__SHIFT 0x0
+#define DIG2_DIG_RANDOM_PATTERN_SEED__DIG_RAN_PAT_DURING_DE_ONLY__SHIFT 0x18
+#define DIG2_DIG_RANDOM_PATTERN_SEED__DIG_RANDOM_PATTERN_SEED_MASK 0x00FFFFFFL
+#define DIG2_DIG_RANDOM_PATTERN_SEED__DIG_RAN_PAT_DURING_DE_ONLY_MASK 0x01000000L
+//DIG2_DIG_FIFO_STATUS
+#define DIG2_DIG_FIFO_STATUS__DIG_FIFO_LEVEL_ERROR__SHIFT 0x0
+#define DIG2_DIG_FIFO_STATUS__DIG_FIFO_USE_OVERWRITE_LEVEL__SHIFT 0x1
+#define DIG2_DIG_FIFO_STATUS__DIG_FIFO_OVERWRITE_LEVEL__SHIFT 0x2
+#define DIG2_DIG_FIFO_STATUS__DIG_FIFO_ERROR_ACK__SHIFT 0x8
+#define DIG2_DIG_FIFO_STATUS__DIG_FIFO_CAL_AVERAGE_LEVEL__SHIFT 0xa
+#define DIG2_DIG_FIFO_STATUS__DIG_FIFO_MAXIMUM_LEVEL__SHIFT 0x10
+#define DIG2_DIG_FIFO_STATUS__DIG_FIFO_MINIMUM_LEVEL__SHIFT 0x16
+#define DIG2_DIG_FIFO_STATUS__DIG_FIFO_READ_CLOCK_SRC__SHIFT 0x1a
+#define DIG2_DIG_FIFO_STATUS__DIG_FIFO_CALIBRATED__SHIFT 0x1d
+#define DIG2_DIG_FIFO_STATUS__DIG_FIFO_FORCE_RECAL_AVERAGE__SHIFT 0x1e
+#define DIG2_DIG_FIFO_STATUS__DIG_FIFO_FORCE_RECOMP_MINMAX__SHIFT 0x1f
+#define DIG2_DIG_FIFO_STATUS__DIG_FIFO_LEVEL_ERROR_MASK 0x00000001L
+#define DIG2_DIG_FIFO_STATUS__DIG_FIFO_USE_OVERWRITE_LEVEL_MASK 0x00000002L
+#define DIG2_DIG_FIFO_STATUS__DIG_FIFO_OVERWRITE_LEVEL_MASK 0x000000FCL
+#define DIG2_DIG_FIFO_STATUS__DIG_FIFO_ERROR_ACK_MASK 0x00000100L
+#define DIG2_DIG_FIFO_STATUS__DIG_FIFO_CAL_AVERAGE_LEVEL_MASK 0x0000FC00L
+#define DIG2_DIG_FIFO_STATUS__DIG_FIFO_MAXIMUM_LEVEL_MASK 0x001F0000L
+#define DIG2_DIG_FIFO_STATUS__DIG_FIFO_MINIMUM_LEVEL_MASK 0x03C00000L
+#define DIG2_DIG_FIFO_STATUS__DIG_FIFO_READ_CLOCK_SRC_MASK 0x04000000L
+#define DIG2_DIG_FIFO_STATUS__DIG_FIFO_CALIBRATED_MASK 0x20000000L
+#define DIG2_DIG_FIFO_STATUS__DIG_FIFO_FORCE_RECAL_AVERAGE_MASK 0x40000000L
+#define DIG2_DIG_FIFO_STATUS__DIG_FIFO_FORCE_RECOMP_MINMAX_MASK 0x80000000L
+//DIG2_HDMI_METADATA_PACKET_CONTROL
+#define DIG2_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_ENABLE__SHIFT 0x0
+#define DIG2_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_REFERENCE__SHIFT 0x4
+#define DIG2_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_MISSED__SHIFT 0x8
+#define DIG2_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE__SHIFT 0x10
+#define DIG2_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define DIG2_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_REFERENCE_MASK 0x00000010L
+#define DIG2_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_MISSED_MASK 0x00000100L
+#define DIG2_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_MASK 0xFFFF0000L
+//DIG2_HDMI_GENERIC_PACKET_CONTROL4
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC6_LINE__SHIFT 0x0
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC7_LINE__SHIFT 0x10
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC6_LINE_MASK 0x0000FFFFL
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC7_LINE_MASK 0xFFFF0000L
+//DIG2_HDMI_CONTROL
+#define DIG2_HDMI_CONTROL__HDMI_KEEPOUT_MODE__SHIFT 0x0
+#define DIG2_HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN__SHIFT 0x1
+#define DIG2_HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE__SHIFT 0x2
+#define DIG2_HDMI_CONTROL__HDMI_NO_EXTRA_NULL_PACKET_FILLED__SHIFT 0x3
+#define DIG2_HDMI_CONTROL__HDMI_PACKET_GEN_VERSION__SHIFT 0x4
+#define DIG2_HDMI_CONTROL__HDMI_ERROR_ACK__SHIFT 0x8
+#define DIG2_HDMI_CONTROL__HDMI_ERROR_MASK__SHIFT 0x9
+#define DIG2_HDMI_CONTROL__HDMI_UNSCRAMBLED_CONTROL_LINE_NUM__SHIFT 0x10
+#define DIG2_HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE__SHIFT 0x18
+#define DIG2_HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT 0x1c
+#define DIG2_HDMI_CONTROL__HDMI_KEEPOUT_MODE_MASK 0x00000001L
+#define DIG2_HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN_MASK 0x00000002L
+#define DIG2_HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE_MASK 0x00000004L
+#define DIG2_HDMI_CONTROL__HDMI_NO_EXTRA_NULL_PACKET_FILLED_MASK 0x00000008L
+#define DIG2_HDMI_CONTROL__HDMI_PACKET_GEN_VERSION_MASK 0x00000010L
+#define DIG2_HDMI_CONTROL__HDMI_ERROR_ACK_MASK 0x00000100L
+#define DIG2_HDMI_CONTROL__HDMI_ERROR_MASK_MASK 0x00000200L
+#define DIG2_HDMI_CONTROL__HDMI_UNSCRAMBLED_CONTROL_LINE_NUM_MASK 0x003F0000L
+#define DIG2_HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK 0x01000000L
+#define DIG2_HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH_MASK 0x30000000L
+//DIG2_HDMI_STATUS
+#define DIG2_HDMI_STATUS__HDMI_ACTIVE_AVMUTE__SHIFT 0x0
+#define DIG2_HDMI_STATUS__HDMI_AUDIO_PACKET_ERROR__SHIFT 0x10
+#define DIG2_HDMI_STATUS__HDMI_VBI_PACKET_ERROR__SHIFT 0x14
+#define DIG2_HDMI_STATUS__HDMI_ERROR_INT__SHIFT 0x1b
+#define DIG2_HDMI_STATUS__HDMI_ACTIVE_AVMUTE_MASK 0x00000001L
+#define DIG2_HDMI_STATUS__HDMI_AUDIO_PACKET_ERROR_MASK 0x00010000L
+#define DIG2_HDMI_STATUS__HDMI_VBI_PACKET_ERROR_MASK 0x00100000L
+#define DIG2_HDMI_STATUS__HDMI_ERROR_INT_MASK 0x08000000L
+//DIG2_HDMI_AUDIO_PACKET_CONTROL
+#define DIG2_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN__SHIFT 0x4
+#define DIG2_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_SEND_MAX_PACKETS__SHIFT 0x8
+#define DIG2_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_PACKETS_PER_LINE__SHIFT 0x10
+#define DIG2_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN_MASK 0x00000030L
+#define DIG2_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_SEND_MAX_PACKETS_MASK 0x00000100L
+#define DIG2_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_PACKETS_PER_LINE_MASK 0x001F0000L
+//DIG2_HDMI_ACR_PACKET_CONTROL
+#define DIG2_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SEND__SHIFT 0x0
+#define DIG2_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_CONT__SHIFT 0x1
+#define DIG2_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SELECT__SHIFT 0x4
+#define DIG2_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE__SHIFT 0x8
+#define DIG2_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND__SHIFT 0xc
+#define DIG2_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE__SHIFT 0x10
+#define DIG2_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY__SHIFT 0x1f
+#define DIG2_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SEND_MASK 0x00000001L
+#define DIG2_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_CONT_MASK 0x00000002L
+#define DIG2_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SELECT_MASK 0x00000030L
+#define DIG2_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE_MASK 0x00000100L
+#define DIG2_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK 0x00001000L
+#define DIG2_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE_MASK 0x00070000L
+#define DIG2_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY_MASK 0x80000000L
+//DIG2_HDMI_VBI_PACKET_CONTROL
+#define DIG2_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND__SHIFT 0x0
+#define DIG2_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND__SHIFT 0x4
+#define DIG2_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT__SHIFT 0x5
+#define DIG2_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND__SHIFT 0x8
+#define DIG2_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT__SHIFT 0x9
+#define DIG2_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE__SHIFT 0x10
+#define DIG2_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK 0x00000001L
+#define DIG2_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK 0x00000010L
+#define DIG2_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK 0x00000020L
+#define DIG2_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND_MASK 0x00000100L
+#define DIG2_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT_MASK 0x00000200L
+#define DIG2_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE_MASK 0x003F0000L
+//DIG2_HDMI_INFOFRAME_CONTROL0
+#define DIG2_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND__SHIFT 0x4
+#define DIG2_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT__SHIFT 0x5
+#define DIG2_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_SEND__SHIFT 0x8
+#define DIG2_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_CONT__SHIFT 0x9
+#define DIG2_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND_MASK 0x00000010L
+#define DIG2_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT_MASK 0x00000020L
+#define DIG2_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_SEND_MASK 0x00000100L
+#define DIG2_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_CONT_MASK 0x00000200L
+//DIG2_HDMI_INFOFRAME_CONTROL1
+#define DIG2_HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE__SHIFT 0x8
+#define DIG2_HDMI_INFOFRAME_CONTROL1__HDMI_MPEG_INFO_LINE__SHIFT 0x10
+#define DIG2_HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE_MASK 0x00003F00L
+#define DIG2_HDMI_INFOFRAME_CONTROL1__HDMI_MPEG_INFO_LINE_MASK 0x003F0000L
+//DIG2_HDMI_GENERIC_PACKET_CONTROL0
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND__SHIFT 0x0
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT__SHIFT 0x1
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE_REFERENCE__SHIFT 0x2
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND__SHIFT 0x4
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT__SHIFT 0x5
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE_REFERENCE__SHIFT 0x6
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_SEND__SHIFT 0x8
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_CONT__SHIFT 0x9
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LINE_REFERENCE__SHIFT 0xa
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_SEND__SHIFT 0xc
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_CONT__SHIFT 0xd
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LINE_REFERENCE__SHIFT 0xe
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_SEND__SHIFT 0x10
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_CONT__SHIFT 0x11
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LINE_REFERENCE__SHIFT 0x12
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_SEND__SHIFT 0x14
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_CONT__SHIFT 0x15
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LINE_REFERENCE__SHIFT 0x16
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_SEND__SHIFT 0x18
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_CONT__SHIFT 0x19
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LINE_REFERENCE__SHIFT 0x1a
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_SEND__SHIFT 0x1c
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_CONT__SHIFT 0x1d
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LINE_REFERENCE__SHIFT 0x1e
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND_MASK 0x00000001L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT_MASK 0x00000002L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE_REFERENCE_MASK 0x00000004L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND_MASK 0x00000010L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT_MASK 0x00000020L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE_REFERENCE_MASK 0x00000040L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_SEND_MASK 0x00000100L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_CONT_MASK 0x00000200L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LINE_REFERENCE_MASK 0x00000400L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_SEND_MASK 0x00001000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_CONT_MASK 0x00002000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LINE_REFERENCE_MASK 0x00004000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_SEND_MASK 0x00010000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_CONT_MASK 0x00020000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LINE_REFERENCE_MASK 0x00040000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_SEND_MASK 0x00100000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_CONT_MASK 0x00200000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LINE_REFERENCE_MASK 0x00400000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_SEND_MASK 0x01000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_CONT_MASK 0x02000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LINE_REFERENCE_MASK 0x04000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_SEND_MASK 0x10000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_CONT_MASK 0x20000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LINE_REFERENCE_MASK 0x40000000L
+//DIG2_HDMI_GC
+#define DIG2_HDMI_GC__HDMI_GC_AVMUTE__SHIFT 0x0
+#define DIG2_HDMI_GC__HDMI_GC_AVMUTE_CONT__SHIFT 0x2
+#define DIG2_HDMI_GC__HDMI_DEFAULT_PHASE__SHIFT 0x4
+#define DIG2_HDMI_GC__HDMI_PACKING_PHASE__SHIFT 0x8
+#define DIG2_HDMI_GC__HDMI_PACKING_PHASE_OVERRIDE__SHIFT 0xc
+#define DIG2_HDMI_GC__HDMI_GC_AVMUTE_MASK 0x00000001L
+#define DIG2_HDMI_GC__HDMI_GC_AVMUTE_CONT_MASK 0x00000004L
+#define DIG2_HDMI_GC__HDMI_DEFAULT_PHASE_MASK 0x00000010L
+#define DIG2_HDMI_GC__HDMI_PACKING_PHASE_MASK 0x00000F00L
+#define DIG2_HDMI_GC__HDMI_PACKING_PHASE_OVERRIDE_MASK 0x00001000L
+//DIG2_AFMT_AUDIO_PACKET_CONTROL2
+#define DIG2_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD__SHIFT 0x0
+#define DIG2_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT__SHIFT 0x1
+#define DIG2_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT 0x8
+#define DIG2_AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID__SHIFT 0x10
+#define DIG2_AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD__SHIFT 0x18
+#define DIG2_AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD__SHIFT 0x1c
+#define DIG2_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD_MASK 0x00000001L
+#define DIG2_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT_MASK 0x00000002L
+#define DIG2_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE_MASK 0x0000FF00L
+#define DIG2_AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID_MASK 0x00FF0000L
+#define DIG2_AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD_MASK 0x01000000L
+#define DIG2_AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD_MASK 0x10000000L
+//DIG2_AFMT_ISRC1_0
+#define DIG2_AFMT_ISRC1_0__AFMT_ISRC_STATUS__SHIFT 0x0
+#define DIG2_AFMT_ISRC1_0__AFMT_ISRC_CONTINUE__SHIFT 0x6
+#define DIG2_AFMT_ISRC1_0__AFMT_ISRC_VALID__SHIFT 0x7
+#define DIG2_AFMT_ISRC1_0__AFMT_ISRC_STATUS_MASK 0x00000007L
+#define DIG2_AFMT_ISRC1_0__AFMT_ISRC_CONTINUE_MASK 0x00000040L
+#define DIG2_AFMT_ISRC1_0__AFMT_ISRC_VALID_MASK 0x00000080L
+//DIG2_AFMT_ISRC1_1
+#define DIG2_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC0__SHIFT 0x0
+#define DIG2_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC1__SHIFT 0x8
+#define DIG2_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC2__SHIFT 0x10
+#define DIG2_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC3__SHIFT 0x18
+#define DIG2_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC0_MASK 0x000000FFL
+#define DIG2_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC1_MASK 0x0000FF00L
+#define DIG2_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC2_MASK 0x00FF0000L
+#define DIG2_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC3_MASK 0xFF000000L
+//DIG2_AFMT_ISRC1_2
+#define DIG2_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC4__SHIFT 0x0
+#define DIG2_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC5__SHIFT 0x8
+#define DIG2_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC6__SHIFT 0x10
+#define DIG2_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC7__SHIFT 0x18
+#define DIG2_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC4_MASK 0x000000FFL
+#define DIG2_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC5_MASK 0x0000FF00L
+#define DIG2_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC6_MASK 0x00FF0000L
+#define DIG2_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC7_MASK 0xFF000000L
+//DIG2_AFMT_ISRC1_3
+#define DIG2_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC8__SHIFT 0x0
+#define DIG2_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC9__SHIFT 0x8
+#define DIG2_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC10__SHIFT 0x10
+#define DIG2_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC11__SHIFT 0x18
+#define DIG2_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC8_MASK 0x000000FFL
+#define DIG2_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC9_MASK 0x0000FF00L
+#define DIG2_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC10_MASK 0x00FF0000L
+#define DIG2_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC11_MASK 0xFF000000L
+//DIG2_AFMT_ISRC1_4
+#define DIG2_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC12__SHIFT 0x0
+#define DIG2_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC13__SHIFT 0x8
+#define DIG2_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC14__SHIFT 0x10
+#define DIG2_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC15__SHIFT 0x18
+#define DIG2_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC12_MASK 0x000000FFL
+#define DIG2_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC13_MASK 0x0000FF00L
+#define DIG2_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC14_MASK 0x00FF0000L
+#define DIG2_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC15_MASK 0xFF000000L
+//DIG2_AFMT_ISRC2_0
+#define DIG2_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC16__SHIFT 0x0
+#define DIG2_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC17__SHIFT 0x8
+#define DIG2_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC18__SHIFT 0x10
+#define DIG2_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC19__SHIFT 0x18
+#define DIG2_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC16_MASK 0x000000FFL
+#define DIG2_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC17_MASK 0x0000FF00L
+#define DIG2_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC18_MASK 0x00FF0000L
+#define DIG2_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC19_MASK 0xFF000000L
+//DIG2_AFMT_ISRC2_1
+#define DIG2_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC20__SHIFT 0x0
+#define DIG2_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC21__SHIFT 0x8
+#define DIG2_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC22__SHIFT 0x10
+#define DIG2_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC23__SHIFT 0x18
+#define DIG2_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC20_MASK 0x000000FFL
+#define DIG2_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC21_MASK 0x0000FF00L
+#define DIG2_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC22_MASK 0x00FF0000L
+#define DIG2_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC23_MASK 0xFF000000L
+//DIG2_AFMT_ISRC2_2
+#define DIG2_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC24__SHIFT 0x0
+#define DIG2_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC25__SHIFT 0x8
+#define DIG2_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC26__SHIFT 0x10
+#define DIG2_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC27__SHIFT 0x18
+#define DIG2_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC24_MASK 0x000000FFL
+#define DIG2_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC25_MASK 0x0000FF00L
+#define DIG2_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC26_MASK 0x00FF0000L
+#define DIG2_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC27_MASK 0xFF000000L
+//DIG2_AFMT_ISRC2_3
+#define DIG2_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC28__SHIFT 0x0
+#define DIG2_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC29__SHIFT 0x8
+#define DIG2_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC30__SHIFT 0x10
+#define DIG2_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC31__SHIFT 0x18
+#define DIG2_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC28_MASK 0x000000FFL
+#define DIG2_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC29_MASK 0x0000FF00L
+#define DIG2_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC30_MASK 0x00FF0000L
+#define DIG2_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC31_MASK 0xFF000000L
+//DIG2_HDMI_GENERIC_PACKET_CONTROL2
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_LINE__SHIFT 0x0
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_LINE__SHIFT 0x10
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_LINE_MASK 0x0000FFFFL
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_LINE_MASK 0xFFFF0000L
+//DIG2_HDMI_GENERIC_PACKET_CONTROL3
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC4_LINE__SHIFT 0x0
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC5_LINE__SHIFT 0x10
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC4_LINE_MASK 0x0000FFFFL
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC5_LINE_MASK 0xFFFF0000L
+//DIG2_HDMI_DB_CONTROL
+#define DIG2_HDMI_DB_CONTROL__HDMI_DB_PENDING__SHIFT 0x0
+#define DIG2_HDMI_DB_CONTROL__HDMI_DB_TAKEN__SHIFT 0x4
+#define DIG2_HDMI_DB_CONTROL__HDMI_DB_TAKEN_CLR__SHIFT 0x5
+#define DIG2_HDMI_DB_CONTROL__HDMI_DB_LOCK__SHIFT 0x8
+#define DIG2_HDMI_DB_CONTROL__HDMI_DB_DISABLE__SHIFT 0xc
+#define DIG2_HDMI_DB_CONTROL__VUPDATE_DB_PENDING__SHIFT 0xf
+#define DIG2_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN__SHIFT 0x10
+#define DIG2_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_CLR__SHIFT 0x11
+#define DIG2_HDMI_DB_CONTROL__HDMI_DB_PENDING_MASK 0x00000001L
+#define DIG2_HDMI_DB_CONTROL__HDMI_DB_TAKEN_MASK 0x00000010L
+#define DIG2_HDMI_DB_CONTROL__HDMI_DB_TAKEN_CLR_MASK 0x00000020L
+#define DIG2_HDMI_DB_CONTROL__HDMI_DB_LOCK_MASK 0x00000100L
+#define DIG2_HDMI_DB_CONTROL__HDMI_DB_DISABLE_MASK 0x00001000L
+#define DIG2_HDMI_DB_CONTROL__VUPDATE_DB_PENDING_MASK 0x00008000L
+#define DIG2_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_MASK 0x00010000L
+#define DIG2_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_CLR_MASK 0x00020000L
+//DIG2_DME_CONTROL
+#define DIG2_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID__SHIFT 0x0
+#define DIG2_DME_CONTROL__METADATA_ENGINE_EN__SHIFT 0x4
+#define DIG2_DME_CONTROL__METADATA_STREAM_TYPE__SHIFT 0x8
+#define DIG2_DME_CONTROL__METADATA_DB_PENDING__SHIFT 0xc
+#define DIG2_DME_CONTROL__METADATA_DB_TAKEN__SHIFT 0xd
+#define DIG2_DME_CONTROL__METADATA_DB_TAKEN_CLR__SHIFT 0x10
+#define DIG2_DME_CONTROL__METADATA_DB_DISABLE__SHIFT 0x14
+#define DIG2_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID_MASK 0x00000007L
+#define DIG2_DME_CONTROL__METADATA_ENGINE_EN_MASK 0x00000010L
+#define DIG2_DME_CONTROL__METADATA_STREAM_TYPE_MASK 0x00000100L
+#define DIG2_DME_CONTROL__METADATA_DB_PENDING_MASK 0x00001000L
+#define DIG2_DME_CONTROL__METADATA_DB_TAKEN_MASK 0x00002000L
+#define DIG2_DME_CONTROL__METADATA_DB_TAKEN_CLR_MASK 0x00010000L
+#define DIG2_DME_CONTROL__METADATA_DB_DISABLE_MASK 0x00100000L
+//DIG2_AFMT_MPEG_INFO0
+#define DIG2_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_CHECKSUM__SHIFT 0x0
+#define DIG2_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB0__SHIFT 0x8
+#define DIG2_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB1__SHIFT 0x10
+#define DIG2_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB2__SHIFT 0x18
+#define DIG2_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_CHECKSUM_MASK 0x000000FFL
+#define DIG2_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB0_MASK 0x0000FF00L
+#define DIG2_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB1_MASK 0x00FF0000L
+#define DIG2_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB2_MASK 0xFF000000L
+//DIG2_AFMT_MPEG_INFO1
+#define DIG2_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_MB3__SHIFT 0x0
+#define DIG2_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_MF__SHIFT 0x8
+#define DIG2_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_FR__SHIFT 0xc
+#define DIG2_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_MB3_MASK 0x000000FFL
+#define DIG2_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_MF_MASK 0x00000300L
+#define DIG2_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_FR_MASK 0x00001000L
+//DIG2_AFMT_GENERIC_HDR
+#define DIG2_AFMT_GENERIC_HDR__AFMT_GENERIC_HB0__SHIFT 0x0
+#define DIG2_AFMT_GENERIC_HDR__AFMT_GENERIC_HB1__SHIFT 0x8
+#define DIG2_AFMT_GENERIC_HDR__AFMT_GENERIC_HB2__SHIFT 0x10
+#define DIG2_AFMT_GENERIC_HDR__AFMT_GENERIC_HB3__SHIFT 0x18
+#define DIG2_AFMT_GENERIC_HDR__AFMT_GENERIC_HB0_MASK 0x000000FFL
+#define DIG2_AFMT_GENERIC_HDR__AFMT_GENERIC_HB1_MASK 0x0000FF00L
+#define DIG2_AFMT_GENERIC_HDR__AFMT_GENERIC_HB2_MASK 0x00FF0000L
+#define DIG2_AFMT_GENERIC_HDR__AFMT_GENERIC_HB3_MASK 0xFF000000L
+//DIG2_AFMT_GENERIC_0
+#define DIG2_AFMT_GENERIC_0__AFMT_GENERIC_BYTE0__SHIFT 0x0
+#define DIG2_AFMT_GENERIC_0__AFMT_GENERIC_BYTE1__SHIFT 0x8
+#define DIG2_AFMT_GENERIC_0__AFMT_GENERIC_BYTE2__SHIFT 0x10
+#define DIG2_AFMT_GENERIC_0__AFMT_GENERIC_BYTE3__SHIFT 0x18
+#define DIG2_AFMT_GENERIC_0__AFMT_GENERIC_BYTE0_MASK 0x000000FFL
+#define DIG2_AFMT_GENERIC_0__AFMT_GENERIC_BYTE1_MASK 0x0000FF00L
+#define DIG2_AFMT_GENERIC_0__AFMT_GENERIC_BYTE2_MASK 0x00FF0000L
+#define DIG2_AFMT_GENERIC_0__AFMT_GENERIC_BYTE3_MASK 0xFF000000L
+//DIG2_AFMT_GENERIC_1
+#define DIG2_AFMT_GENERIC_1__AFMT_GENERIC_BYTE4__SHIFT 0x0
+#define DIG2_AFMT_GENERIC_1__AFMT_GENERIC_BYTE5__SHIFT 0x8
+#define DIG2_AFMT_GENERIC_1__AFMT_GENERIC_BYTE6__SHIFT 0x10
+#define DIG2_AFMT_GENERIC_1__AFMT_GENERIC_BYTE7__SHIFT 0x18
+#define DIG2_AFMT_GENERIC_1__AFMT_GENERIC_BYTE4_MASK 0x000000FFL
+#define DIG2_AFMT_GENERIC_1__AFMT_GENERIC_BYTE5_MASK 0x0000FF00L
+#define DIG2_AFMT_GENERIC_1__AFMT_GENERIC_BYTE6_MASK 0x00FF0000L
+#define DIG2_AFMT_GENERIC_1__AFMT_GENERIC_BYTE7_MASK 0xFF000000L
+//DIG2_AFMT_GENERIC_2
+#define DIG2_AFMT_GENERIC_2__AFMT_GENERIC_BYTE8__SHIFT 0x0
+#define DIG2_AFMT_GENERIC_2__AFMT_GENERIC_BYTE9__SHIFT 0x8
+#define DIG2_AFMT_GENERIC_2__AFMT_GENERIC_BYTE10__SHIFT 0x10
+#define DIG2_AFMT_GENERIC_2__AFMT_GENERIC_BYTE11__SHIFT 0x18
+#define DIG2_AFMT_GENERIC_2__AFMT_GENERIC_BYTE8_MASK 0x000000FFL
+#define DIG2_AFMT_GENERIC_2__AFMT_GENERIC_BYTE9_MASK 0x0000FF00L
+#define DIG2_AFMT_GENERIC_2__AFMT_GENERIC_BYTE10_MASK 0x00FF0000L
+#define DIG2_AFMT_GENERIC_2__AFMT_GENERIC_BYTE11_MASK 0xFF000000L
+//DIG2_AFMT_GENERIC_3
+#define DIG2_AFMT_GENERIC_3__AFMT_GENERIC_BYTE12__SHIFT 0x0
+#define DIG2_AFMT_GENERIC_3__AFMT_GENERIC_BYTE13__SHIFT 0x8
+#define DIG2_AFMT_GENERIC_3__AFMT_GENERIC_BYTE14__SHIFT 0x10
+#define DIG2_AFMT_GENERIC_3__AFMT_GENERIC_BYTE15__SHIFT 0x18
+#define DIG2_AFMT_GENERIC_3__AFMT_GENERIC_BYTE12_MASK 0x000000FFL
+#define DIG2_AFMT_GENERIC_3__AFMT_GENERIC_BYTE13_MASK 0x0000FF00L
+#define DIG2_AFMT_GENERIC_3__AFMT_GENERIC_BYTE14_MASK 0x00FF0000L
+#define DIG2_AFMT_GENERIC_3__AFMT_GENERIC_BYTE15_MASK 0xFF000000L
+//DIG2_AFMT_GENERIC_4
+#define DIG2_AFMT_GENERIC_4__AFMT_GENERIC_BYTE16__SHIFT 0x0
+#define DIG2_AFMT_GENERIC_4__AFMT_GENERIC_BYTE17__SHIFT 0x8
+#define DIG2_AFMT_GENERIC_4__AFMT_GENERIC_BYTE18__SHIFT 0x10
+#define DIG2_AFMT_GENERIC_4__AFMT_GENERIC_BYTE19__SHIFT 0x18
+#define DIG2_AFMT_GENERIC_4__AFMT_GENERIC_BYTE16_MASK 0x000000FFL
+#define DIG2_AFMT_GENERIC_4__AFMT_GENERIC_BYTE17_MASK 0x0000FF00L
+#define DIG2_AFMT_GENERIC_4__AFMT_GENERIC_BYTE18_MASK 0x00FF0000L
+#define DIG2_AFMT_GENERIC_4__AFMT_GENERIC_BYTE19_MASK 0xFF000000L
+//DIG2_AFMT_GENERIC_5
+#define DIG2_AFMT_GENERIC_5__AFMT_GENERIC_BYTE20__SHIFT 0x0
+#define DIG2_AFMT_GENERIC_5__AFMT_GENERIC_BYTE21__SHIFT 0x8
+#define DIG2_AFMT_GENERIC_5__AFMT_GENERIC_BYTE22__SHIFT 0x10
+#define DIG2_AFMT_GENERIC_5__AFMT_GENERIC_BYTE23__SHIFT 0x18
+#define DIG2_AFMT_GENERIC_5__AFMT_GENERIC_BYTE20_MASK 0x000000FFL
+#define DIG2_AFMT_GENERIC_5__AFMT_GENERIC_BYTE21_MASK 0x0000FF00L
+#define DIG2_AFMT_GENERIC_5__AFMT_GENERIC_BYTE22_MASK 0x00FF0000L
+#define DIG2_AFMT_GENERIC_5__AFMT_GENERIC_BYTE23_MASK 0xFF000000L
+//DIG2_AFMT_GENERIC_6
+#define DIG2_AFMT_GENERIC_6__AFMT_GENERIC_BYTE24__SHIFT 0x0
+#define DIG2_AFMT_GENERIC_6__AFMT_GENERIC_BYTE25__SHIFT 0x8
+#define DIG2_AFMT_GENERIC_6__AFMT_GENERIC_BYTE26__SHIFT 0x10
+#define DIG2_AFMT_GENERIC_6__AFMT_GENERIC_BYTE27__SHIFT 0x18
+#define DIG2_AFMT_GENERIC_6__AFMT_GENERIC_BYTE24_MASK 0x000000FFL
+#define DIG2_AFMT_GENERIC_6__AFMT_GENERIC_BYTE25_MASK 0x0000FF00L
+#define DIG2_AFMT_GENERIC_6__AFMT_GENERIC_BYTE26_MASK 0x00FF0000L
+#define DIG2_AFMT_GENERIC_6__AFMT_GENERIC_BYTE27_MASK 0xFF000000L
+//DIG2_AFMT_GENERIC_7
+#define DIG2_AFMT_GENERIC_7__AFMT_GENERIC_BYTE28__SHIFT 0x0
+#define DIG2_AFMT_GENERIC_7__AFMT_GENERIC_BYTE29__SHIFT 0x8
+#define DIG2_AFMT_GENERIC_7__AFMT_GENERIC_BYTE30__SHIFT 0x10
+#define DIG2_AFMT_GENERIC_7__AFMT_GENERIC_BYTE31__SHIFT 0x18
+#define DIG2_AFMT_GENERIC_7__AFMT_GENERIC_BYTE28_MASK 0x000000FFL
+#define DIG2_AFMT_GENERIC_7__AFMT_GENERIC_BYTE29_MASK 0x0000FF00L
+#define DIG2_AFMT_GENERIC_7__AFMT_GENERIC_BYTE30_MASK 0x00FF0000L
+#define DIG2_AFMT_GENERIC_7__AFMT_GENERIC_BYTE31_MASK 0xFF000000L
+//DIG2_HDMI_GENERIC_PACKET_CONTROL1
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC0_LINE__SHIFT 0x0
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC1_LINE__SHIFT 0x10
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC0_LINE_MASK 0x0000FFFFL
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC1_LINE_MASK 0xFFFF0000L
+//DIG2_HDMI_ACR_32_0
+#define DIG2_HDMI_ACR_32_0__HDMI_ACR_CTS_32__SHIFT 0xc
+#define DIG2_HDMI_ACR_32_0__HDMI_ACR_CTS_32_MASK 0xFFFFF000L
+//DIG2_HDMI_ACR_32_1
+#define DIG2_HDMI_ACR_32_1__HDMI_ACR_N_32__SHIFT 0x0
+#define DIG2_HDMI_ACR_32_1__HDMI_ACR_N_32_MASK 0x000FFFFFL
+//DIG2_HDMI_ACR_44_0
+#define DIG2_HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT 0xc
+#define DIG2_HDMI_ACR_44_0__HDMI_ACR_CTS_44_MASK 0xFFFFF000L
+//DIG2_HDMI_ACR_44_1
+#define DIG2_HDMI_ACR_44_1__HDMI_ACR_N_44__SHIFT 0x0
+#define DIG2_HDMI_ACR_44_1__HDMI_ACR_N_44_MASK 0x000FFFFFL
+//DIG2_HDMI_ACR_48_0
+#define DIG2_HDMI_ACR_48_0__HDMI_ACR_CTS_48__SHIFT 0xc
+#define DIG2_HDMI_ACR_48_0__HDMI_ACR_CTS_48_MASK 0xFFFFF000L
+//DIG2_HDMI_ACR_48_1
+#define DIG2_HDMI_ACR_48_1__HDMI_ACR_N_48__SHIFT 0x0
+#define DIG2_HDMI_ACR_48_1__HDMI_ACR_N_48_MASK 0x000FFFFFL
+//DIG2_HDMI_ACR_STATUS_0
+#define DIG2_HDMI_ACR_STATUS_0__HDMI_ACR_CTS__SHIFT 0xc
+#define DIG2_HDMI_ACR_STATUS_0__HDMI_ACR_CTS_MASK 0xFFFFF000L
+//DIG2_HDMI_ACR_STATUS_1
+#define DIG2_HDMI_ACR_STATUS_1__HDMI_ACR_N__SHIFT 0x0
+#define DIG2_HDMI_ACR_STATUS_1__HDMI_ACR_N_MASK 0x000FFFFFL
+//DIG2_AFMT_AUDIO_INFO0
+#define DIG2_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM__SHIFT 0x0
+#define DIG2_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC__SHIFT 0x8
+#define DIG2_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT__SHIFT 0xb
+#define DIG2_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET__SHIFT 0x10
+#define DIG2_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT__SHIFT 0x18
+#define DIG2_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_MASK 0x000000FFL
+#define DIG2_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC_MASK 0x00000700L
+#define DIG2_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT_MASK 0x00007800L
+#define DIG2_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET_MASK 0x00FF0000L
+#define DIG2_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT_MASK 0x1F000000L
+//DIG2_AFMT_AUDIO_INFO1
+#define DIG2_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA__SHIFT 0x0
+#define DIG2_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV__SHIFT 0xb
+#define DIG2_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH__SHIFT 0xf
+#define DIG2_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL__SHIFT 0x10
+#define DIG2_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA_MASK 0x000000FFL
+#define DIG2_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV_MASK 0x00007800L
+#define DIG2_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH_MASK 0x00008000L
+#define DIG2_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL_MASK 0x00030000L
+//DIG2_AFMT_60958_0
+#define DIG2_AFMT_60958_0__AFMT_60958_CS_A__SHIFT 0x0
+#define DIG2_AFMT_60958_0__AFMT_60958_CS_B__SHIFT 0x1
+#define DIG2_AFMT_60958_0__AFMT_60958_CS_C__SHIFT 0x2
+#define DIG2_AFMT_60958_0__AFMT_60958_CS_D__SHIFT 0x3
+#define DIG2_AFMT_60958_0__AFMT_60958_CS_MODE__SHIFT 0x6
+#define DIG2_AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE__SHIFT 0x8
+#define DIG2_AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER__SHIFT 0x10
+#define DIG2_AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x14
+#define DIG2_AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x18
+#define DIG2_AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY__SHIFT 0x1c
+#define DIG2_AFMT_60958_0__AFMT_60958_CS_A_MASK 0x00000001L
+#define DIG2_AFMT_60958_0__AFMT_60958_CS_B_MASK 0x00000002L
+#define DIG2_AFMT_60958_0__AFMT_60958_CS_C_MASK 0x00000004L
+#define DIG2_AFMT_60958_0__AFMT_60958_CS_D_MASK 0x00000038L
+#define DIG2_AFMT_60958_0__AFMT_60958_CS_MODE_MASK 0x000000C0L
+#define DIG2_AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE_MASK 0x0000FF00L
+#define DIG2_AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER_MASK 0x000F0000L
+#define DIG2_AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L_MASK 0x00F00000L
+#define DIG2_AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY_MASK 0x0F000000L
+#define DIG2_AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY_MASK 0x30000000L
+//DIG2_AFMT_60958_1
+#define DIG2_AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH__SHIFT 0x0
+#define DIG2_AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x4
+#define DIG2_AFMT_60958_1__AFMT_60958_VALID_L__SHIFT 0x10
+#define DIG2_AFMT_60958_1__AFMT_60958_VALID_R__SHIFT 0x12
+#define DIG2_AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x14
+#define DIG2_AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH_MASK 0x0000000FL
+#define DIG2_AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x000000F0L
+#define DIG2_AFMT_60958_1__AFMT_60958_VALID_L_MASK 0x00010000L
+#define DIG2_AFMT_60958_1__AFMT_60958_VALID_R_MASK 0x00040000L
+#define DIG2_AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R_MASK 0x00F00000L
+//DIG2_AFMT_AUDIO_CRC_CONTROL
+#define DIG2_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN__SHIFT 0x0
+#define DIG2_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT__SHIFT 0x4
+#define DIG2_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE__SHIFT 0x8
+#define DIG2_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL__SHIFT 0xc
+#define DIG2_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT__SHIFT 0x10
+#define DIG2_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN_MASK 0x00000001L
+#define DIG2_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT_MASK 0x00000010L
+#define DIG2_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE_MASK 0x00000100L
+#define DIG2_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL_MASK 0x0000F000L
+#define DIG2_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT_MASK 0xFFFF0000L
+//DIG2_AFMT_RAMP_CONTROL0
+#define DIG2_AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT__SHIFT 0x0
+#define DIG2_AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN__SHIFT 0x1f
+#define DIG2_AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT_MASK 0x00FFFFFFL
+#define DIG2_AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN_MASK 0x80000000L
+//DIG2_AFMT_RAMP_CONTROL1
+#define DIG2_AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT__SHIFT 0x0
+#define DIG2_AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE__SHIFT 0x18
+#define DIG2_AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT_MASK 0x00FFFFFFL
+#define DIG2_AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE_MASK 0xFF000000L
+//DIG2_AFMT_RAMP_CONTROL2
+#define DIG2_AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT__SHIFT 0x0
+#define DIG2_AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT_MASK 0x00FFFFFFL
+//DIG2_AFMT_RAMP_CONTROL3
+#define DIG2_AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT__SHIFT 0x0
+#define DIG2_AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT_MASK 0x00FFFFFFL
+//DIG2_AFMT_60958_2
+#define DIG2_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define DIG2_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define DIG2_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x8
+#define DIG2_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5__SHIFT 0xc
+#define DIG2_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x10
+#define DIG2_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x14
+#define DIG2_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define DIG2_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+#define DIG2_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4_MASK 0x00000F00L
+#define DIG2_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5_MASK 0x0000F000L
+#define DIG2_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6_MASK 0x000F0000L
+#define DIG2_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7_MASK 0x00F00000L
+//DIG2_AFMT_AUDIO_CRC_RESULT
+#define DIG2_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE__SHIFT 0x0
+#define DIG2_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC__SHIFT 0x8
+#define DIG2_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE_MASK 0x00000001L
+#define DIG2_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_MASK 0xFFFFFF00L
+//DIG2_AFMT_STATUS
+#define DIG2_AFMT_STATUS__AFMT_AUDIO_ENABLE__SHIFT 0x4
+#define DIG2_AFMT_STATUS__AFMT_AZ_HBR_ENABLE__SHIFT 0x8
+#define DIG2_AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW__SHIFT 0x18
+#define DIG2_AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG__SHIFT 0x1e
+#define DIG2_AFMT_STATUS__AFMT_AUDIO_ENABLE_MASK 0x00000010L
+#define DIG2_AFMT_STATUS__AFMT_AZ_HBR_ENABLE_MASK 0x00000100L
+#define DIG2_AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW_MASK 0x01000000L
+#define DIG2_AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG_MASK 0x40000000L
+//DIG2_AFMT_AUDIO_PACKET_CONTROL
+#define DIG2_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND__SHIFT 0x0
+#define DIG2_AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS__SHIFT 0xb
+#define DIG2_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN__SHIFT 0xc
+#define DIG2_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE__SHIFT 0xe
+#define DIG2_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK__SHIFT 0x17
+#define DIG2_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP__SHIFT 0x18
+#define DIG2_AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE__SHIFT 0x1a
+#define DIG2_AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK__SHIFT 0x1e
+#define DIG2_AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB__SHIFT 0x1f
+#define DIG2_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK 0x00000001L
+#define DIG2_AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS_MASK 0x00000800L
+#define DIG2_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN_MASK 0x00001000L
+#define DIG2_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE_MASK 0x00004000L
+#define DIG2_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK_MASK 0x00800000L
+#define DIG2_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP_MASK 0x01000000L
+#define DIG2_AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE_MASK 0x04000000L
+#define DIG2_AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK_MASK 0x40000000L
+#define DIG2_AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB_MASK 0x80000000L
+//DIG2_AFMT_VBI_PACKET_CONTROL
+#define DIG2_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_LOCK_STATUS__SHIFT 0x8
+#define DIG2_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_CONFLICT__SHIFT 0x10
+#define DIG2_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_CONFLICT_CLR__SHIFT 0x11
+#define DIG2_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_INDEX__SHIFT 0x1c
+#define DIG2_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_LOCK_STATUS_MASK 0x00000100L
+#define DIG2_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_CONFLICT_MASK 0x00010000L
+#define DIG2_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_CONFLICT_CLR_MASK 0x00020000L
+#define DIG2_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_INDEX_MASK 0xF0000000L
+//DIG2_AFMT_INFOFRAME_CONTROL0
+#define DIG2_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE__SHIFT 0x6
+#define DIG2_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE__SHIFT 0x7
+#define DIG2_AFMT_INFOFRAME_CONTROL0__AFMT_MPEG_INFO_UPDATE__SHIFT 0xa
+#define DIG2_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE_MASK 0x00000040L
+#define DIG2_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE_MASK 0x00000080L
+#define DIG2_AFMT_INFOFRAME_CONTROL0__AFMT_MPEG_INFO_UPDATE_MASK 0x00000400L
+//DIG2_AFMT_AUDIO_SRC_CONTROL
+#define DIG2_AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT__SHIFT 0x0
+#define DIG2_AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT_MASK 0x00000007L
+//DIG2_DIG_BE_CNTL
+#define DIG2_DIG_BE_CNTL__DIG_DUAL_LINK_ENABLE__SHIFT 0x0
+#define DIG2_DIG_BE_CNTL__DIG_SWAP__SHIFT 0x1
+#define DIG2_DIG_BE_CNTL__DIG_RB_SWITCH_EN__SHIFT 0x2
+#define DIG2_DIG_BE_CNTL__DIG_FE_SOURCE_SELECT__SHIFT 0x8
+#define DIG2_DIG_BE_CNTL__DIG_MODE__SHIFT 0x10
+#define DIG2_DIG_BE_CNTL__DIG_HPD_SELECT__SHIFT 0x1c
+#define DIG2_DIG_BE_CNTL__DIG_DUAL_LINK_ENABLE_MASK 0x00000001L
+#define DIG2_DIG_BE_CNTL__DIG_SWAP_MASK 0x00000002L
+#define DIG2_DIG_BE_CNTL__DIG_RB_SWITCH_EN_MASK 0x00000004L
+#define DIG2_DIG_BE_CNTL__DIG_FE_SOURCE_SELECT_MASK 0x00007F00L
+#define DIG2_DIG_BE_CNTL__DIG_MODE_MASK 0x00070000L
+#define DIG2_DIG_BE_CNTL__DIG_HPD_SELECT_MASK 0x70000000L
+//DIG2_DIG_BE_EN_CNTL
+#define DIG2_DIG_BE_EN_CNTL__DIG_ENABLE__SHIFT 0x0
+#define DIG2_DIG_BE_EN_CNTL__DIG_SYMCLK_BE_ON__SHIFT 0x8
+#define DIG2_DIG_BE_EN_CNTL__DIG_ENABLE_MASK 0x00000001L
+#define DIG2_DIG_BE_EN_CNTL__DIG_SYMCLK_BE_ON_MASK 0x00000100L
+//DIG2_TMDS_CNTL
+#define DIG2_TMDS_CNTL__TMDS_SYNC_PHASE__SHIFT 0x0
+#define DIG2_TMDS_CNTL__TMDS_SYNC_PHASE_MASK 0x00000001L
+//DIG2_TMDS_CONTROL_CHAR
+#define DIG2_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR0_OUT_EN__SHIFT 0x0
+#define DIG2_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR1_OUT_EN__SHIFT 0x1
+#define DIG2_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR2_OUT_EN__SHIFT 0x2
+#define DIG2_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR3_OUT_EN__SHIFT 0x3
+#define DIG2_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR0_OUT_EN_MASK 0x00000001L
+#define DIG2_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR1_OUT_EN_MASK 0x00000002L
+#define DIG2_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR2_OUT_EN_MASK 0x00000004L
+#define DIG2_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR3_OUT_EN_MASK 0x00000008L
+//DIG2_TMDS_CONTROL0_FEEDBACK
+#define DIG2_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_SELECT__SHIFT 0x0
+#define DIG2_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_DELAY__SHIFT 0x8
+#define DIG2_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_SELECT_MASK 0x00000003L
+#define DIG2_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_DELAY_MASK 0x00000300L
+//DIG2_TMDS_STEREOSYNC_CTL_SEL
+#define DIG2_TMDS_STEREOSYNC_CTL_SEL__TMDS_STEREOSYNC_CTL_SEL__SHIFT 0x0
+#define DIG2_TMDS_STEREOSYNC_CTL_SEL__TMDS_STEREOSYNC_CTL_SEL_MASK 0x00000003L
+//DIG2_TMDS_SYNC_CHAR_PATTERN_0_1
+#define DIG2_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN0__SHIFT 0x0
+#define DIG2_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN1__SHIFT 0x10
+#define DIG2_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN0_MASK 0x000003FFL
+#define DIG2_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN1_MASK 0x03FF0000L
+//DIG2_TMDS_SYNC_CHAR_PATTERN_2_3
+#define DIG2_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN2__SHIFT 0x0
+#define DIG2_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN3__SHIFT 0x10
+#define DIG2_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN2_MASK 0x000003FFL
+#define DIG2_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN3_MASK 0x03FF0000L
+//DIG2_TMDS_CTL_BITS
+#define DIG2_TMDS_CTL_BITS__TMDS_CTL0__SHIFT 0x0
+#define DIG2_TMDS_CTL_BITS__TMDS_CTL1__SHIFT 0x8
+#define DIG2_TMDS_CTL_BITS__TMDS_CTL2__SHIFT 0x10
+#define DIG2_TMDS_CTL_BITS__TMDS_CTL3__SHIFT 0x18
+#define DIG2_TMDS_CTL_BITS__TMDS_CTL0_MASK 0x00000001L
+#define DIG2_TMDS_CTL_BITS__TMDS_CTL1_MASK 0x00000100L
+#define DIG2_TMDS_CTL_BITS__TMDS_CTL2_MASK 0x00010000L
+#define DIG2_TMDS_CTL_BITS__TMDS_CTL3_MASK 0x01000000L
+//DIG2_TMDS_DCBALANCER_CONTROL
+#define DIG2_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_EN__SHIFT 0x0
+#define DIG2_TMDS_DCBALANCER_CONTROL__TMDS_SYNC_DCBAL_EN__SHIFT 0x4
+#define DIG2_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_EN__SHIFT 0x8
+#define DIG2_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_IN__SHIFT 0x10
+#define DIG2_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_FORCE__SHIFT 0x18
+#define DIG2_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_EN_MASK 0x00000001L
+#define DIG2_TMDS_DCBALANCER_CONTROL__TMDS_SYNC_DCBAL_EN_MASK 0x00000070L
+#define DIG2_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_EN_MASK 0x00000100L
+#define DIG2_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_IN_MASK 0x000F0000L
+#define DIG2_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_FORCE_MASK 0x01000000L
+//DIG2_TMDS_SYNC_DCBALANCE_CHAR
+#define DIG2_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR01__SHIFT 0x0
+#define DIG2_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR11__SHIFT 0x10
+#define DIG2_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR01_MASK 0x000003FFL
+#define DIG2_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR11_MASK 0x03FF0000L
+//DIG2_TMDS_CTL0_1_GEN_CNTL
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_SEL__SHIFT 0x0
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_DELAY__SHIFT 0x4
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_INVERT__SHIFT 0x7
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_MODULATION__SHIFT 0x8
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_USE_FEEDBACK_PATH__SHIFT 0xa
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_FB_SYNC_CONT__SHIFT 0xb
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_PATTERN_OUT_EN__SHIFT 0xc
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_SEL__SHIFT 0x10
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_DELAY__SHIFT 0x14
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_INVERT__SHIFT 0x17
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_MODULATION__SHIFT 0x18
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_USE_FEEDBACK_PATH__SHIFT 0x1a
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_FB_SYNC_CONT__SHIFT 0x1b
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_PATTERN_OUT_EN__SHIFT 0x1c
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_2BIT_COUNTER_EN__SHIFT 0x1f
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_SEL_MASK 0x0000000FL
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_DELAY_MASK 0x00000070L
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_INVERT_MASK 0x00000080L
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_MODULATION_MASK 0x00000300L
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_USE_FEEDBACK_PATH_MASK 0x00000400L
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_FB_SYNC_CONT_MASK 0x00000800L
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_PATTERN_OUT_EN_MASK 0x00001000L
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_SEL_MASK 0x000F0000L
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_DELAY_MASK 0x00700000L
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_INVERT_MASK 0x00800000L
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_MODULATION_MASK 0x03000000L
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_USE_FEEDBACK_PATH_MASK 0x04000000L
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_FB_SYNC_CONT_MASK 0x08000000L
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_PATTERN_OUT_EN_MASK 0x10000000L
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_2BIT_COUNTER_EN_MASK 0x80000000L
+//DIG2_TMDS_CTL2_3_GEN_CNTL
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_SEL__SHIFT 0x0
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_DELAY__SHIFT 0x4
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_INVERT__SHIFT 0x7
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_MODULATION__SHIFT 0x8
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_USE_FEEDBACK_PATH__SHIFT 0xa
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_FB_SYNC_CONT__SHIFT 0xb
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_PATTERN_OUT_EN__SHIFT 0xc
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_SEL__SHIFT 0x10
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_DELAY__SHIFT 0x14
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_INVERT__SHIFT 0x17
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_MODULATION__SHIFT 0x18
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_USE_FEEDBACK_PATH__SHIFT 0x1a
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_FB_SYNC_CONT__SHIFT 0x1b
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_PATTERN_OUT_EN__SHIFT 0x1c
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_SEL_MASK 0x0000000FL
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_DELAY_MASK 0x00000070L
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_INVERT_MASK 0x00000080L
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_MODULATION_MASK 0x00000300L
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_USE_FEEDBACK_PATH_MASK 0x00000400L
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_FB_SYNC_CONT_MASK 0x00000800L
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_PATTERN_OUT_EN_MASK 0x00001000L
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_SEL_MASK 0x000F0000L
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_DELAY_MASK 0x00700000L
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_INVERT_MASK 0x00800000L
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_MODULATION_MASK 0x03000000L
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_USE_FEEDBACK_PATH_MASK 0x04000000L
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_FB_SYNC_CONT_MASK 0x08000000L
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_PATTERN_OUT_EN_MASK 0x10000000L
+//DIG2_DIG_VERSION
+#define DIG2_DIG_VERSION__DIG_TYPE__SHIFT 0x0
+#define DIG2_DIG_VERSION__DIG_TYPE_MASK 0x00000001L
+//DIG2_DIG_LANE_ENABLE
+#define DIG2_DIG_LANE_ENABLE__DIG_LANE0EN__SHIFT 0x0
+#define DIG2_DIG_LANE_ENABLE__DIG_LANE1EN__SHIFT 0x1
+#define DIG2_DIG_LANE_ENABLE__DIG_LANE2EN__SHIFT 0x2
+#define DIG2_DIG_LANE_ENABLE__DIG_LANE3EN__SHIFT 0x3
+#define DIG2_DIG_LANE_ENABLE__DIG_CLK_EN__SHIFT 0x8
+#define DIG2_DIG_LANE_ENABLE__DIG_LANE0EN_MASK 0x00000001L
+#define DIG2_DIG_LANE_ENABLE__DIG_LANE1EN_MASK 0x00000002L
+#define DIG2_DIG_LANE_ENABLE__DIG_LANE2EN_MASK 0x00000004L
+#define DIG2_DIG_LANE_ENABLE__DIG_LANE3EN_MASK 0x00000008L
+#define DIG2_DIG_LANE_ENABLE__DIG_CLK_EN_MASK 0x00000100L
+//DIG2_AFMT_CNTL
+#define DIG2_AFMT_CNTL__AFMT_AUDIO_CLOCK_EN__SHIFT 0x0
+#define DIG2_AFMT_CNTL__AFMT_AUDIO_CLOCK_ON__SHIFT 0x8
+#define DIG2_AFMT_CNTL__AFMT_AUDIO_CLOCK_EN_MASK 0x00000001L
+#define DIG2_AFMT_CNTL__AFMT_AUDIO_CLOCK_ON_MASK 0x00000100L
+//DIG2_AFMT_VBI_PACKET_CONTROL1
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_FRAME_UPDATE__SHIFT 0x0
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_FRAME_UPDATE_PENDING__SHIFT 0x1
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_IMMEDIATE_UPDATE__SHIFT 0x2
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_IMMEDIATE_UPDATE_PENDING__SHIFT 0x3
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_FRAME_UPDATE__SHIFT 0x4
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_FRAME_UPDATE_PENDING__SHIFT 0x5
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_IMMEDIATE_UPDATE__SHIFT 0x6
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_IMMEDIATE_UPDATE_PENDING__SHIFT 0x7
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_FRAME_UPDATE__SHIFT 0x8
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_FRAME_UPDATE_PENDING__SHIFT 0x9
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_IMMEDIATE_UPDATE__SHIFT 0xa
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_IMMEDIATE_UPDATE_PENDING__SHIFT 0xb
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_FRAME_UPDATE__SHIFT 0xc
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_FRAME_UPDATE_PENDING__SHIFT 0xd
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_IMMEDIATE_UPDATE__SHIFT 0xe
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_IMMEDIATE_UPDATE_PENDING__SHIFT 0xf
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_FRAME_UPDATE__SHIFT 0x10
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_FRAME_UPDATE_PENDING__SHIFT 0x11
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_IMMEDIATE_UPDATE__SHIFT 0x12
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_IMMEDIATE_UPDATE_PENDING__SHIFT 0x13
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_FRAME_UPDATE__SHIFT 0x14
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_FRAME_UPDATE_PENDING__SHIFT 0x15
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_IMMEDIATE_UPDATE__SHIFT 0x16
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_IMMEDIATE_UPDATE_PENDING__SHIFT 0x17
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_FRAME_UPDATE__SHIFT 0x18
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_FRAME_UPDATE_PENDING__SHIFT 0x19
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_IMMEDIATE_UPDATE__SHIFT 0x1a
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1b
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_FRAME_UPDATE__SHIFT 0x1c
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_FRAME_UPDATE_PENDING__SHIFT 0x1d
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_IMMEDIATE_UPDATE__SHIFT 0x1e
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1f
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_FRAME_UPDATE_MASK 0x00000001L
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_FRAME_UPDATE_PENDING_MASK 0x00000002L
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_IMMEDIATE_UPDATE_MASK 0x00000004L
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_IMMEDIATE_UPDATE_PENDING_MASK 0x00000008L
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_FRAME_UPDATE_MASK 0x00000010L
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_FRAME_UPDATE_PENDING_MASK 0x00000020L
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_IMMEDIATE_UPDATE_MASK 0x00000040L
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_IMMEDIATE_UPDATE_PENDING_MASK 0x00000080L
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_FRAME_UPDATE_MASK 0x00000100L
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_FRAME_UPDATE_PENDING_MASK 0x00000200L
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_IMMEDIATE_UPDATE_MASK 0x00000400L
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_IMMEDIATE_UPDATE_PENDING_MASK 0x00000800L
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_FRAME_UPDATE_MASK 0x00001000L
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_FRAME_UPDATE_PENDING_MASK 0x00002000L
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_IMMEDIATE_UPDATE_MASK 0x00004000L
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_IMMEDIATE_UPDATE_PENDING_MASK 0x00008000L
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_FRAME_UPDATE_MASK 0x00010000L
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_FRAME_UPDATE_PENDING_MASK 0x00020000L
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_IMMEDIATE_UPDATE_MASK 0x00040000L
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_IMMEDIATE_UPDATE_PENDING_MASK 0x00080000L
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_FRAME_UPDATE_MASK 0x00100000L
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_FRAME_UPDATE_PENDING_MASK 0x00200000L
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_IMMEDIATE_UPDATE_MASK 0x00400000L
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_IMMEDIATE_UPDATE_PENDING_MASK 0x00800000L
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_FRAME_UPDATE_MASK 0x01000000L
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_FRAME_UPDATE_PENDING_MASK 0x02000000L
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_IMMEDIATE_UPDATE_MASK 0x04000000L
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_IMMEDIATE_UPDATE_PENDING_MASK 0x08000000L
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_FRAME_UPDATE_MASK 0x10000000L
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_FRAME_UPDATE_PENDING_MASK 0x20000000L
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_IMMEDIATE_UPDATE_MASK 0x40000000L
+#define DIG2_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_IMMEDIATE_UPDATE_PENDING_MASK 0x80000000L
+//DIG2_HDMI_GENERIC_PACKET_CONTROL5
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND__SHIFT 0x0
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_PENDING__SHIFT 0x1
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND__SHIFT 0x2
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_PENDING__SHIFT 0x3
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND__SHIFT 0x4
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_PENDING__SHIFT 0x5
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND__SHIFT 0x6
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_PENDING__SHIFT 0x7
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND__SHIFT 0x8
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_PENDING__SHIFT 0x9
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND__SHIFT 0xa
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_PENDING__SHIFT 0xb
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND__SHIFT 0xc
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_PENDING__SHIFT 0xd
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND__SHIFT 0xe
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_PENDING__SHIFT 0xf
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_MASK 0x00000001L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_PENDING_MASK 0x00000002L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_MASK 0x00000004L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_PENDING_MASK 0x00000008L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_MASK 0x00000010L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_PENDING_MASK 0x00000020L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_MASK 0x00000040L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_PENDING_MASK 0x00000080L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_MASK 0x00000100L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_PENDING_MASK 0x00000200L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_MASK 0x00000400L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_PENDING_MASK 0x00000800L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_MASK 0x00001000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_PENDING_MASK 0x00002000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_MASK 0x00004000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_PENDING_MASK 0x00008000L
+//DIG2_FORCE_DIG_DISABLE
+#define DIG2_FORCE_DIG_DISABLE__FORCE_DIG_DISABLE__SHIFT 0x0
+#define DIG2_FORCE_DIG_DISABLE__FORCE_DIG_DISABLE_MASK 0x00000001L
+
+
+// addressBlock: dce_dc_dio_dp2_dispdec
+//DP2_DP_LINK_CNTL
+#define DP2_DP_LINK_CNTL__DP_LINK_TRAINING_COMPLETE__SHIFT 0x4
+#define DP2_DP_LINK_CNTL__DP_LINK_STATUS__SHIFT 0x8
+#define DP2_DP_LINK_CNTL__DP_EMBEDDED_PANEL_MODE__SHIFT 0x11
+#define DP2_DP_LINK_CNTL__DP_LINK_TRAINING_COMPLETE_MASK 0x00000010L
+#define DP2_DP_LINK_CNTL__DP_LINK_STATUS_MASK 0x00000100L
+#define DP2_DP_LINK_CNTL__DP_EMBEDDED_PANEL_MODE_MASK 0x00020000L
+//DP2_DP_PIXEL_FORMAT
+#define DP2_DP_PIXEL_FORMAT__DP_PIXEL_ENCODING__SHIFT 0x0
+#define DP2_DP_PIXEL_FORMAT__DP_COMPONENT_DEPTH__SHIFT 0x18
+#define DP2_DP_PIXEL_FORMAT__DP_PIXEL_COMBINE__SHIFT 0x1c
+#define DP2_DP_PIXEL_FORMAT__DP_PIXEL_ENCODING_MASK 0x00000007L
+#define DP2_DP_PIXEL_FORMAT__DP_COMPONENT_DEPTH_MASK 0x07000000L
+#define DP2_DP_PIXEL_FORMAT__DP_PIXEL_COMBINE_MASK 0x30000000L
+//DP2_DP_MSA_COLORIMETRY
+#define DP2_DP_MSA_COLORIMETRY__DP_MSA_MISC0__SHIFT 0x18
+#define DP2_DP_MSA_COLORIMETRY__DP_MSA_MISC0_MASK 0xFF000000L
+//DP2_DP_CONFIG
+#define DP2_DP_CONFIG__DP_UDI_LANES__SHIFT 0x0
+#define DP2_DP_CONFIG__DP_UDI_LANES_MASK 0x00000003L
+//DP2_DP_VID_STREAM_CNTL
+#define DP2_DP_VID_STREAM_CNTL__DP_VID_STREAM_ENABLE__SHIFT 0x0
+#define DP2_DP_VID_STREAM_CNTL__DP_VID_STREAM_DIS_DEFER__SHIFT 0x8
+#define DP2_DP_VID_STREAM_CNTL__DP_VID_STREAM_STATUS__SHIFT 0x10
+#define DP2_DP_VID_STREAM_CNTL__DP_VID_STREAM_CHANGE_KEEPOUT__SHIFT 0x14
+#define DP2_DP_VID_STREAM_CNTL__DP_VID_STREAM_ENABLE_MASK 0x00000001L
+#define DP2_DP_VID_STREAM_CNTL__DP_VID_STREAM_DIS_DEFER_MASK 0x00000300L
+#define DP2_DP_VID_STREAM_CNTL__DP_VID_STREAM_STATUS_MASK 0x00010000L
+#define DP2_DP_VID_STREAM_CNTL__DP_VID_STREAM_CHANGE_KEEPOUT_MASK 0x00100000L
+//DP2_DP_STEER_FIFO
+#define DP2_DP_STEER_FIFO__DP_STEER_FIFO_RESET__SHIFT 0x0
+#define DP2_DP_STEER_FIFO__DP_STEER_OVERFLOW_FLAG__SHIFT 0x4
+#define DP2_DP_STEER_FIFO__DP_STEER_OVERFLOW_INT__SHIFT 0x5
+#define DP2_DP_STEER_FIFO__DP_STEER_OVERFLOW_ACK__SHIFT 0x6
+#define DP2_DP_STEER_FIFO__DP_STEER_OVERFLOW_MASK__SHIFT 0x7
+#define DP2_DP_STEER_FIFO__DP_TU_OVERFLOW_FLAG__SHIFT 0x8
+#define DP2_DP_STEER_FIFO__DP_TU_OVERFLOW_ACK__SHIFT 0xc
+#define DP2_DP_STEER_FIFO__DP_STEER_FIFO_RESET_MASK 0x00000001L
+#define DP2_DP_STEER_FIFO__DP_STEER_OVERFLOW_FLAG_MASK 0x00000010L
+#define DP2_DP_STEER_FIFO__DP_STEER_OVERFLOW_INT_MASK 0x00000020L
+#define DP2_DP_STEER_FIFO__DP_STEER_OVERFLOW_ACK_MASK 0x00000040L
+#define DP2_DP_STEER_FIFO__DP_STEER_OVERFLOW_MASK_MASK 0x00000080L
+#define DP2_DP_STEER_FIFO__DP_TU_OVERFLOW_FLAG_MASK 0x00000100L
+#define DP2_DP_STEER_FIFO__DP_TU_OVERFLOW_ACK_MASK 0x00001000L
+//DP2_DP_MSA_MISC
+#define DP2_DP_MSA_MISC__DP_MSA_MISC1__SHIFT 0x0
+#define DP2_DP_MSA_MISC__DP_MSA_MISC2__SHIFT 0x8
+#define DP2_DP_MSA_MISC__DP_MSA_MISC3__SHIFT 0x10
+#define DP2_DP_MSA_MISC__DP_MSA_MISC4__SHIFT 0x18
+#define DP2_DP_MSA_MISC__DP_MSA_MISC1_MASK 0x000000FFL
+#define DP2_DP_MSA_MISC__DP_MSA_MISC2_MASK 0x0000FF00L
+#define DP2_DP_MSA_MISC__DP_MSA_MISC3_MASK 0x00FF0000L
+#define DP2_DP_MSA_MISC__DP_MSA_MISC4_MASK 0xFF000000L
+//DP2_DP_VID_TIMING
+#define DP2_DP_VID_TIMING__DP_VID_M_N_DOUBLE_BUFFER_MODE__SHIFT 0x4
+#define DP2_DP_VID_TIMING__DP_VID_M_N_GEN_EN__SHIFT 0x8
+#define DP2_DP_VID_TIMING__DP_VID_N_MUL__SHIFT 0xa
+#define DP2_DP_VID_TIMING__DP_VID_M_DIV__SHIFT 0xc
+#define DP2_DP_VID_TIMING__DP_VID_N_DIV__SHIFT 0x18
+#define DP2_DP_VID_TIMING__DP_VID_M_N_DOUBLE_BUFFER_MODE_MASK 0x00000010L
+#define DP2_DP_VID_TIMING__DP_VID_M_N_GEN_EN_MASK 0x00000100L
+#define DP2_DP_VID_TIMING__DP_VID_N_MUL_MASK 0x00000C00L
+#define DP2_DP_VID_TIMING__DP_VID_M_DIV_MASK 0x00003000L
+#define DP2_DP_VID_TIMING__DP_VID_N_DIV_MASK 0xFF000000L
+//DP2_DP_VID_N
+#define DP2_DP_VID_N__DP_VID_N__SHIFT 0x0
+#define DP2_DP_VID_N__DP_VID_N_MASK 0x00FFFFFFL
+//DP2_DP_VID_M
+#define DP2_DP_VID_M__DP_VID_M__SHIFT 0x0
+#define DP2_DP_VID_M__DP_VID_M_MASK 0x00FFFFFFL
+//DP2_DP_LINK_FRAMING_CNTL
+#define DP2_DP_LINK_FRAMING_CNTL__DP_IDLE_BS_INTERVAL__SHIFT 0x0
+#define DP2_DP_LINK_FRAMING_CNTL__DP_VBID_DISABLE__SHIFT 0x18
+#define DP2_DP_LINK_FRAMING_CNTL__DP_VID_ENHANCED_FRAME_MODE__SHIFT 0x1c
+#define DP2_DP_LINK_FRAMING_CNTL__DP_IDLE_BS_INTERVAL_MASK 0x0003FFFFL
+#define DP2_DP_LINK_FRAMING_CNTL__DP_VBID_DISABLE_MASK 0x01000000L
+#define DP2_DP_LINK_FRAMING_CNTL__DP_VID_ENHANCED_FRAME_MODE_MASK 0x10000000L
+//DP2_DP_HBR2_EYE_PATTERN
+#define DP2_DP_HBR2_EYE_PATTERN__DP_HBR2_EYE_PATTERN_ENABLE__SHIFT 0x0
+#define DP2_DP_HBR2_EYE_PATTERN__DP_HBR2_EYE_PATTERN_ENABLE_MASK 0x00000001L
+//DP2_DP_VID_MSA_VBID
+#define DP2_DP_VID_MSA_VBID__DP_VID_MSA_LOCATION__SHIFT 0x0
+#define DP2_DP_VID_MSA_VBID__DP_VID_VBID_FIELD_POL__SHIFT 0x18
+#define DP2_DP_VID_MSA_VBID__DP_VID_MSA_LOCATION_MASK 0x00000FFFL
+#define DP2_DP_VID_MSA_VBID__DP_VID_VBID_FIELD_POL_MASK 0x01000000L
+//DP2_DP_VID_INTERRUPT_CNTL
+#define DP2_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_INT__SHIFT 0x0
+#define DP2_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_ACK__SHIFT 0x1
+#define DP2_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_MASK__SHIFT 0x2
+#define DP2_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_INT_MASK 0x00000001L
+#define DP2_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_ACK_MASK 0x00000002L
+#define DP2_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_MASK_MASK 0x00000004L
+//DP2_DP_DPHY_CNTL
+#define DP2_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE0__SHIFT 0x0
+#define DP2_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE1__SHIFT 0x1
+#define DP2_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE2__SHIFT 0x2
+#define DP2_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE3__SHIFT 0x3
+#define DP2_DP_DPHY_CNTL__DPHY_FEC_EN__SHIFT 0x4
+#define DP2_DP_DPHY_CNTL__DPHY_FEC_READY_SHADOW__SHIFT 0x5
+#define DP2_DP_DPHY_CNTL__DPHY_FEC_ACTIVE_STATUS__SHIFT 0x6
+#define DP2_DP_DPHY_CNTL__DPHY_BYPASS__SHIFT 0x10
+#define DP2_DP_DPHY_CNTL__DPHY_SKEW_BYPASS__SHIFT 0x18
+#define DP2_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE0_MASK 0x00000001L
+#define DP2_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE1_MASK 0x00000002L
+#define DP2_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE2_MASK 0x00000004L
+#define DP2_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE3_MASK 0x00000008L
+#define DP2_DP_DPHY_CNTL__DPHY_FEC_EN_MASK 0x00000010L
+#define DP2_DP_DPHY_CNTL__DPHY_FEC_READY_SHADOW_MASK 0x00000020L
+#define DP2_DP_DPHY_CNTL__DPHY_FEC_ACTIVE_STATUS_MASK 0x00000040L
+#define DP2_DP_DPHY_CNTL__DPHY_BYPASS_MASK 0x00010000L
+#define DP2_DP_DPHY_CNTL__DPHY_SKEW_BYPASS_MASK 0x01000000L
+//DP2_DP_DPHY_TRAINING_PATTERN_SEL
+#define DP2_DP_DPHY_TRAINING_PATTERN_SEL__DPHY_TRAINING_PATTERN_SEL__SHIFT 0x0
+#define DP2_DP_DPHY_TRAINING_PATTERN_SEL__DPHY_TRAINING_PATTERN_SEL_MASK 0x00000003L
+//DP2_DP_DPHY_SYM0
+#define DP2_DP_DPHY_SYM0__DPHY_SYM1__SHIFT 0x0
+#define DP2_DP_DPHY_SYM0__DPHY_SYM2__SHIFT 0xa
+#define DP2_DP_DPHY_SYM0__DPHY_SYM3__SHIFT 0x14
+#define DP2_DP_DPHY_SYM0__DPHY_SYM1_MASK 0x000003FFL
+#define DP2_DP_DPHY_SYM0__DPHY_SYM2_MASK 0x000FFC00L
+#define DP2_DP_DPHY_SYM0__DPHY_SYM3_MASK 0x3FF00000L
+//DP2_DP_DPHY_SYM1
+#define DP2_DP_DPHY_SYM1__DPHY_SYM4__SHIFT 0x0
+#define DP2_DP_DPHY_SYM1__DPHY_SYM5__SHIFT 0xa
+#define DP2_DP_DPHY_SYM1__DPHY_SYM6__SHIFT 0x14
+#define DP2_DP_DPHY_SYM1__DPHY_SYM4_MASK 0x000003FFL
+#define DP2_DP_DPHY_SYM1__DPHY_SYM5_MASK 0x000FFC00L
+#define DP2_DP_DPHY_SYM1__DPHY_SYM6_MASK 0x3FF00000L
+//DP2_DP_DPHY_SYM2
+#define DP2_DP_DPHY_SYM2__DPHY_SYM7__SHIFT 0x0
+#define DP2_DP_DPHY_SYM2__DPHY_SYM8__SHIFT 0xa
+#define DP2_DP_DPHY_SYM2__DPHY_SYM7_MASK 0x000003FFL
+#define DP2_DP_DPHY_SYM2__DPHY_SYM8_MASK 0x000FFC00L
+//DP2_DP_DPHY_8B10B_CNTL
+#define DP2_DP_DPHY_8B10B_CNTL__DPHY_8B10B_RESET__SHIFT 0x8
+#define DP2_DP_DPHY_8B10B_CNTL__DPHY_8B10B_EXT_DISP__SHIFT 0x10
+#define DP2_DP_DPHY_8B10B_CNTL__DPHY_8B10B_CUR_DISP__SHIFT 0x18
+#define DP2_DP_DPHY_8B10B_CNTL__DPHY_8B10B_RESET_MASK 0x00000100L
+#define DP2_DP_DPHY_8B10B_CNTL__DPHY_8B10B_EXT_DISP_MASK 0x00010000L
+#define DP2_DP_DPHY_8B10B_CNTL__DPHY_8B10B_CUR_DISP_MASK 0x01000000L
+//DP2_DP_DPHY_PRBS_CNTL
+#define DP2_DP_DPHY_PRBS_CNTL__DPHY_PRBS_EN__SHIFT 0x0
+#define DP2_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL__SHIFT 0x4
+#define DP2_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED__SHIFT 0x8
+#define DP2_DP_DPHY_PRBS_CNTL__DPHY_PRBS_EN_MASK 0x00000001L
+#define DP2_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL_MASK 0x00000030L
+#define DP2_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED_MASK 0x7FFFFF00L
+//DP2_DP_DPHY_SCRAM_CNTL
+#define DP2_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_DIS__SHIFT 0x0
+#define DP2_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE__SHIFT 0x4
+#define DP2_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT__SHIFT 0x8
+#define DP2_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_KCODE__SHIFT 0x18
+#define DP2_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_DIS_MASK 0x00000001L
+#define DP2_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE_MASK 0x00000010L
+#define DP2_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT_MASK 0x0003FF00L
+#define DP2_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_KCODE_MASK 0x01000000L
+//DP2_DP_DPHY_CRC_EN
+#define DP2_DP_DPHY_CRC_EN__DPHY_CRC_EN__SHIFT 0x0
+#define DP2_DP_DPHY_CRC_EN__DPHY_CRC_CONT_EN__SHIFT 0x4
+#define DP2_DP_DPHY_CRC_EN__DPHY_CRC_RESULT_VALID__SHIFT 0x8
+#define DP2_DP_DPHY_CRC_EN__DPHY_CRC_EN_MASK 0x00000001L
+#define DP2_DP_DPHY_CRC_EN__DPHY_CRC_CONT_EN_MASK 0x00000010L
+#define DP2_DP_DPHY_CRC_EN__DPHY_CRC_RESULT_VALID_MASK 0x00000100L
+//DP2_DP_DPHY_CRC_CNTL
+#define DP2_DP_DPHY_CRC_CNTL__DPHY_CRC_FIELD__SHIFT 0x0
+#define DP2_DP_DPHY_CRC_CNTL__DPHY_CRC_SEL__SHIFT 0x4
+#define DP2_DP_DPHY_CRC_CNTL__DPHY_CRC_MASK__SHIFT 0x10
+#define DP2_DP_DPHY_CRC_CNTL__DPHY_CRC_FIELD_MASK 0x00000001L
+#define DP2_DP_DPHY_CRC_CNTL__DPHY_CRC_SEL_MASK 0x00000030L
+#define DP2_DP_DPHY_CRC_CNTL__DPHY_CRC_MASK_MASK 0x00FF0000L
+//DP2_DP_DPHY_CRC_RESULT
+#define DP2_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT__SHIFT 0x0
+#define DP2_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT1__SHIFT 0x8
+#define DP2_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT2__SHIFT 0x10
+#define DP2_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT3__SHIFT 0x18
+#define DP2_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT_MASK 0x000000FFL
+#define DP2_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT1_MASK 0x0000FF00L
+#define DP2_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT2_MASK 0x00FF0000L
+#define DP2_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT3_MASK 0xFF000000L
+//DP2_DP_DPHY_CRC_MST_CNTL
+#define DP2_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_FIRST_SLOT__SHIFT 0x0
+#define DP2_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_LAST_SLOT__SHIFT 0x8
+#define DP2_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_FIRST_SLOT_MASK 0x0000003FL
+#define DP2_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_LAST_SLOT_MASK 0x00003F00L
+//DP2_DP_DPHY_CRC_MST_STATUS
+#define DP2_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_LOCK__SHIFT 0x0
+#define DP2_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR__SHIFT 0x8
+#define DP2_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR_ACK__SHIFT 0x10
+#define DP2_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_LOCK_MASK 0x00000001L
+#define DP2_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR_MASK 0x00000100L
+#define DP2_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR_ACK_MASK 0x00010000L
+//DP2_DP_DPHY_FAST_TRAINING
+#define DP2_DP_DPHY_FAST_TRAINING__DPHY_RX_FAST_TRAINING_CAPABLE__SHIFT 0x0
+#define DP2_DP_DPHY_FAST_TRAINING__DPHY_SW_FAST_TRAINING_START__SHIFT 0x1
+#define DP2_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN__SHIFT 0x2
+#define DP2_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP1_TIME__SHIFT 0x8
+#define DP2_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP2_TIME__SHIFT 0x14
+#define DP2_DP_DPHY_FAST_TRAINING__DPHY_RX_FAST_TRAINING_CAPABLE_MASK 0x00000001L
+#define DP2_DP_DPHY_FAST_TRAINING__DPHY_SW_FAST_TRAINING_START_MASK 0x00000002L
+#define DP2_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN_MASK 0x00000004L
+#define DP2_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP1_TIME_MASK 0x000FFF00L
+#define DP2_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP2_TIME_MASK 0xFFF00000L
+//DP2_DP_DPHY_FAST_TRAINING_STATUS
+#define DP2_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_STATE__SHIFT 0x0
+#define DP2_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_OCCURRED__SHIFT 0x4
+#define DP2_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_MASK__SHIFT 0x8
+#define DP2_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_ACK__SHIFT 0xc
+#define DP2_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_STATE_MASK 0x00000007L
+#define DP2_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_OCCURRED_MASK 0x00000010L
+#define DP2_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_MASK_MASK 0x00000100L
+#define DP2_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_ACK_MASK 0x00001000L
+//DP2_DP_SEC_CNTL
+#define DP2_DP_SEC_CNTL__DP_SEC_STREAM_ENABLE__SHIFT 0x0
+#define DP2_DP_SEC_CNTL__DP_SEC_ASP_ENABLE__SHIFT 0x4
+#define DP2_DP_SEC_CNTL__DP_SEC_ATP_ENABLE__SHIFT 0x8
+#define DP2_DP_SEC_CNTL__DP_SEC_AIP_ENABLE__SHIFT 0xc
+#define DP2_DP_SEC_CNTL__DP_SEC_ACM_ENABLE__SHIFT 0x10
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP0_ENABLE__SHIFT 0x14
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP1_ENABLE__SHIFT 0x15
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP2_ENABLE__SHIFT 0x16
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP3_ENABLE__SHIFT 0x17
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP4_ENABLE__SHIFT 0x18
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP5_ENABLE__SHIFT 0x19
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP6_ENABLE__SHIFT 0x1a
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP7_ENABLE__SHIFT 0x1b
+#define DP2_DP_SEC_CNTL__DP_SEC_MPG_ENABLE__SHIFT 0x1c
+#define DP2_DP_SEC_CNTL__DP_SEC_STREAM_ENABLE_MASK 0x00000001L
+#define DP2_DP_SEC_CNTL__DP_SEC_ASP_ENABLE_MASK 0x00000010L
+#define DP2_DP_SEC_CNTL__DP_SEC_ATP_ENABLE_MASK 0x00000100L
+#define DP2_DP_SEC_CNTL__DP_SEC_AIP_ENABLE_MASK 0x00001000L
+#define DP2_DP_SEC_CNTL__DP_SEC_ACM_ENABLE_MASK 0x00010000L
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP0_ENABLE_MASK 0x00100000L
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP1_ENABLE_MASK 0x00200000L
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP2_ENABLE_MASK 0x00400000L
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP3_ENABLE_MASK 0x00800000L
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP4_ENABLE_MASK 0x01000000L
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP5_ENABLE_MASK 0x02000000L
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP6_ENABLE_MASK 0x04000000L
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP7_ENABLE_MASK 0x08000000L
+#define DP2_DP_SEC_CNTL__DP_SEC_MPG_ENABLE_MASK 0x10000000L
+//DP2_DP_SEC_CNTL1
+#define DP2_DP_SEC_CNTL1__DP_SEC_ISRC_ENABLE__SHIFT 0x0
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_REFERENCE__SHIFT 0x1
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP0_PRIORITY__SHIFT 0x4
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP0_SEND__SHIFT 0x5
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_PENDING__SHIFT 0x6
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_DEADLINE_MISSED__SHIFT 0x7
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_ANY_LINE__SHIFT 0x8
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP1_LINE_REFERENCE__SHIFT 0x9
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP2_LINE_REFERENCE__SHIFT 0xa
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP3_LINE_REFERENCE__SHIFT 0xb
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP4_LINE_REFERENCE__SHIFT 0xc
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP5_LINE_REFERENCE__SHIFT 0xd
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP6_LINE_REFERENCE__SHIFT 0xe
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP7_LINE_REFERENCE__SHIFT 0xf
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_NUM__SHIFT 0x10
+#define DP2_DP_SEC_CNTL1__DP_SEC_ISRC_ENABLE_MASK 0x00000001L
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_REFERENCE_MASK 0x00000002L
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP0_PRIORITY_MASK 0x00000010L
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_MASK 0x00000020L
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_PENDING_MASK 0x00000040L
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_DEADLINE_MISSED_MASK 0x00000080L
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_ANY_LINE_MASK 0x00000100L
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP1_LINE_REFERENCE_MASK 0x00000200L
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP2_LINE_REFERENCE_MASK 0x00000400L
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP3_LINE_REFERENCE_MASK 0x00000800L
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP4_LINE_REFERENCE_MASK 0x00001000L
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP5_LINE_REFERENCE_MASK 0x00002000L
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP6_LINE_REFERENCE_MASK 0x00004000L
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP7_LINE_REFERENCE_MASK 0x00008000L
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_NUM_MASK 0xFFFF0000L
+//DP2_DP_SEC_FRAMING1
+#define DP2_DP_SEC_FRAMING1__DP_SEC_FRAME_START_LOCATION__SHIFT 0x0
+#define DP2_DP_SEC_FRAMING1__DP_SEC_VBLANK_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP2_DP_SEC_FRAMING1__DP_SEC_FRAME_START_LOCATION_MASK 0x00000FFFL
+#define DP2_DP_SEC_FRAMING1__DP_SEC_VBLANK_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+//DP2_DP_SEC_FRAMING2
+#define DP2_DP_SEC_FRAMING2__DP_SEC_START_POSITION__SHIFT 0x0
+#define DP2_DP_SEC_FRAMING2__DP_SEC_HBLANK_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP2_DP_SEC_FRAMING2__DP_SEC_START_POSITION_MASK 0x0000FFFFL
+#define DP2_DP_SEC_FRAMING2__DP_SEC_HBLANK_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+//DP2_DP_SEC_FRAMING3
+#define DP2_DP_SEC_FRAMING3__DP_SEC_IDLE_FRAME_SIZE__SHIFT 0x0
+#define DP2_DP_SEC_FRAMING3__DP_SEC_IDLE_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP2_DP_SEC_FRAMING3__DP_SEC_IDLE_FRAME_SIZE_MASK 0x00003FFFL
+#define DP2_DP_SEC_FRAMING3__DP_SEC_IDLE_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+//DP2_DP_SEC_FRAMING4
+#define DP2_DP_SEC_FRAMING4__DP_SST_SDP_SPLITTING__SHIFT 0x0
+#define DP2_DP_SEC_FRAMING4__DP_SEC_COLLISION_STATUS__SHIFT 0x14
+#define DP2_DP_SEC_FRAMING4__DP_SEC_COLLISION_ACK__SHIFT 0x18
+#define DP2_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE__SHIFT 0x1c
+#define DP2_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_STATUS__SHIFT 0x1d
+#define DP2_DP_SEC_FRAMING4__DP_SST_SDP_SPLITTING_MASK 0x00000001L
+#define DP2_DP_SEC_FRAMING4__DP_SEC_COLLISION_STATUS_MASK 0x00100000L
+#define DP2_DP_SEC_FRAMING4__DP_SEC_COLLISION_ACK_MASK 0x01000000L
+#define DP2_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_MASK 0x10000000L
+#define DP2_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_STATUS_MASK 0x20000000L
+//DP2_DP_SEC_AUD_N
+#define DP2_DP_SEC_AUD_N__DP_SEC_AUD_N__SHIFT 0x0
+#define DP2_DP_SEC_AUD_N__DP_SEC_AUD_N_MASK 0x00FFFFFFL
+//DP2_DP_SEC_AUD_N_READBACK
+#define DP2_DP_SEC_AUD_N_READBACK__DP_SEC_AUD_N_READBACK__SHIFT 0x0
+#define DP2_DP_SEC_AUD_N_READBACK__DP_SEC_AUD_N_READBACK_MASK 0x00FFFFFFL
+//DP2_DP_SEC_AUD_M
+#define DP2_DP_SEC_AUD_M__DP_SEC_AUD_M__SHIFT 0x0
+#define DP2_DP_SEC_AUD_M__DP_SEC_AUD_M_MASK 0x00FFFFFFL
+//DP2_DP_SEC_AUD_M_READBACK
+#define DP2_DP_SEC_AUD_M_READBACK__DP_SEC_AUD_M_READBACK__SHIFT 0x0
+#define DP2_DP_SEC_AUD_M_READBACK__DP_SEC_AUD_M_READBACK_MASK 0x00FFFFFFL
+//DP2_DP_SEC_TIMESTAMP
+#define DP2_DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE__SHIFT 0x0
+#define DP2_DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE_MASK 0x00000001L
+//DP2_DP_SEC_PACKET_CNTL
+#define DP2_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CODING_TYPE__SHIFT 0x1
+#define DP2_DP_SEC_PACKET_CNTL__DP_SEC_ASP_PRIORITY__SHIFT 0x4
+#define DP2_DP_SEC_PACKET_CNTL__DP_SEC_VERSION__SHIFT 0x8
+#define DP2_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE__SHIFT 0x10
+#define DP2_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CODING_TYPE_MASK 0x0000000EL
+#define DP2_DP_SEC_PACKET_CNTL__DP_SEC_ASP_PRIORITY_MASK 0x00000010L
+#define DP2_DP_SEC_PACKET_CNTL__DP_SEC_VERSION_MASK 0x00003F00L
+#define DP2_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE_MASK 0x00010000L
+//DP2_DP_MSE_RATE_CNTL
+#define DP2_DP_MSE_RATE_CNTL__DP_MSE_RATE_Y__SHIFT 0x0
+#define DP2_DP_MSE_RATE_CNTL__DP_MSE_RATE_X__SHIFT 0x1a
+#define DP2_DP_MSE_RATE_CNTL__DP_MSE_RATE_Y_MASK 0x03FFFFFFL
+#define DP2_DP_MSE_RATE_CNTL__DP_MSE_RATE_X_MASK 0xFC000000L
+//DP2_DP_MSE_RATE_UPDATE
+#define DP2_DP_MSE_RATE_UPDATE__DP_MSE_RATE_UPDATE_PENDING__SHIFT 0x0
+#define DP2_DP_MSE_RATE_UPDATE__DP_MSE_RATE_UPDATE_PENDING_MASK 0x00000001L
+//DP2_DP_MSE_SAT0
+#define DP2_DP_MSE_SAT0__DP_MSE_SAT_SRC0__SHIFT 0x0
+#define DP2_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT0__SHIFT 0x8
+#define DP2_DP_MSE_SAT0__DP_MSE_SAT_SRC1__SHIFT 0x10
+#define DP2_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT1__SHIFT 0x18
+#define DP2_DP_MSE_SAT0__DP_MSE_SAT_SRC0_MASK 0x00000007L
+#define DP2_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT0_MASK 0x00003F00L
+#define DP2_DP_MSE_SAT0__DP_MSE_SAT_SRC1_MASK 0x00070000L
+#define DP2_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT1_MASK 0x3F000000L
+//DP2_DP_MSE_SAT1
+#define DP2_DP_MSE_SAT1__DP_MSE_SAT_SRC2__SHIFT 0x0
+#define DP2_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT2__SHIFT 0x8
+#define DP2_DP_MSE_SAT1__DP_MSE_SAT_SRC3__SHIFT 0x10
+#define DP2_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT3__SHIFT 0x18
+#define DP2_DP_MSE_SAT1__DP_MSE_SAT_SRC2_MASK 0x00000007L
+#define DP2_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT2_MASK 0x00003F00L
+#define DP2_DP_MSE_SAT1__DP_MSE_SAT_SRC3_MASK 0x00070000L
+#define DP2_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT3_MASK 0x3F000000L
+//DP2_DP_MSE_SAT2
+#define DP2_DP_MSE_SAT2__DP_MSE_SAT_SRC4__SHIFT 0x0
+#define DP2_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT4__SHIFT 0x8
+#define DP2_DP_MSE_SAT2__DP_MSE_SAT_SRC5__SHIFT 0x10
+#define DP2_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT5__SHIFT 0x18
+#define DP2_DP_MSE_SAT2__DP_MSE_SAT_SRC4_MASK 0x00000007L
+#define DP2_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT4_MASK 0x00003F00L
+#define DP2_DP_MSE_SAT2__DP_MSE_SAT_SRC5_MASK 0x00070000L
+#define DP2_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT5_MASK 0x3F000000L
+//DP2_DP_MSE_SAT_UPDATE
+#define DP2_DP_MSE_SAT_UPDATE__DP_MSE_SAT_UPDATE__SHIFT 0x0
+#define DP2_DP_MSE_SAT_UPDATE__DP_MSE_16_MTP_KEEPOUT__SHIFT 0x8
+#define DP2_DP_MSE_SAT_UPDATE__DP_MSE_SAT_UPDATE_MASK 0x00000003L
+#define DP2_DP_MSE_SAT_UPDATE__DP_MSE_16_MTP_KEEPOUT_MASK 0x00000100L
+//DP2_DP_MSE_LINK_TIMING
+#define DP2_DP_MSE_LINK_TIMING__DP_MSE_LINK_FRAME__SHIFT 0x0
+#define DP2_DP_MSE_LINK_TIMING__DP_MSE_LINK_LINE__SHIFT 0x10
+#define DP2_DP_MSE_LINK_TIMING__DP_MSE_LINK_FRAME_MASK 0x000003FFL
+#define DP2_DP_MSE_LINK_TIMING__DP_MSE_LINK_LINE_MASK 0x00030000L
+//DP2_DP_MSE_MISC_CNTL
+#define DP2_DP_MSE_MISC_CNTL__DP_MSE_BLANK_CODE__SHIFT 0x0
+#define DP2_DP_MSE_MISC_CNTL__DP_MSE_TIMESTAMP_MODE__SHIFT 0x4
+#define DP2_DP_MSE_MISC_CNTL__DP_MSE_ZERO_ENCODER__SHIFT 0x8
+#define DP2_DP_MSE_MISC_CNTL__DP_MSE_BLANK_CODE_MASK 0x00000001L
+#define DP2_DP_MSE_MISC_CNTL__DP_MSE_TIMESTAMP_MODE_MASK 0x00000010L
+#define DP2_DP_MSE_MISC_CNTL__DP_MSE_ZERO_ENCODER_MASK 0x00000100L
+//DP2_DP_DPHY_BS_SR_SWAP_CNTL
+#define DP2_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT__SHIFT 0x0
+#define DP2_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_BS_SR_SWAP_DONE__SHIFT 0xf
+#define DP2_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_START__SHIFT 0x10
+#define DP2_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_MASK 0x000003FFL
+#define DP2_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_BS_SR_SWAP_DONE_MASK 0x00008000L
+#define DP2_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_START_MASK 0x00010000L
+//DP2_DP_DPHY_HBR2_PATTERN_CONTROL
+#define DP2_DP_DPHY_HBR2_PATTERN_CONTROL__DP_DPHY_HBR2_PATTERN_CONTROL__SHIFT 0x0
+#define DP2_DP_DPHY_HBR2_PATTERN_CONTROL__DP_DPHY_HBR2_PATTERN_CONTROL_MASK 0x00000007L
+//DP2_DP_MSE_SAT0_STATUS
+#define DP2_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC0_STATUS__SHIFT 0x0
+#define DP2_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT0_STATUS__SHIFT 0x8
+#define DP2_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC1_STATUS__SHIFT 0x10
+#define DP2_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT1_STATUS__SHIFT 0x18
+#define DP2_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC0_STATUS_MASK 0x00000007L
+#define DP2_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT0_STATUS_MASK 0x00003F00L
+#define DP2_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC1_STATUS_MASK 0x00070000L
+#define DP2_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT1_STATUS_MASK 0x3F000000L
+//DP2_DP_MSE_SAT1_STATUS
+#define DP2_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC2_STATUS__SHIFT 0x0
+#define DP2_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT2_STATUS__SHIFT 0x8
+#define DP2_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC3_STATUS__SHIFT 0x10
+#define DP2_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT3_STATUS__SHIFT 0x18
+#define DP2_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC2_STATUS_MASK 0x00000007L
+#define DP2_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT2_STATUS_MASK 0x00003F00L
+#define DP2_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC3_STATUS_MASK 0x00070000L
+#define DP2_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT3_STATUS_MASK 0x3F000000L
+//DP2_DP_MSE_SAT2_STATUS
+#define DP2_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC4_STATUS__SHIFT 0x0
+#define DP2_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT4_STATUS__SHIFT 0x8
+#define DP2_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC5_STATUS__SHIFT 0x10
+#define DP2_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT5_STATUS__SHIFT 0x18
+#define DP2_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC4_STATUS_MASK 0x00000007L
+#define DP2_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT4_STATUS_MASK 0x00003F00L
+#define DP2_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC5_STATUS_MASK 0x00070000L
+#define DP2_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT5_STATUS_MASK 0x3F000000L
+//DP2_DP_MSA_TIMING_PARAM1
+#define DP2_DP_MSA_TIMING_PARAM1__DP_MSA_VTOTAL__SHIFT 0x0
+#define DP2_DP_MSA_TIMING_PARAM1__DP_MSA_HTOTAL__SHIFT 0x10
+#define DP2_DP_MSA_TIMING_PARAM1__DP_MSA_VTOTAL_MASK 0x0000FFFFL
+#define DP2_DP_MSA_TIMING_PARAM1__DP_MSA_HTOTAL_MASK 0xFFFF0000L
+//DP2_DP_MSA_TIMING_PARAM2
+#define DP2_DP_MSA_TIMING_PARAM2__DP_MSA_VSTART__SHIFT 0x0
+#define DP2_DP_MSA_TIMING_PARAM2__DP_MSA_HSTART__SHIFT 0x10
+#define DP2_DP_MSA_TIMING_PARAM2__DP_MSA_VSTART_MASK 0x0000FFFFL
+#define DP2_DP_MSA_TIMING_PARAM2__DP_MSA_HSTART_MASK 0xFFFF0000L
+//DP2_DP_MSA_TIMING_PARAM3
+#define DP2_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCWIDTH__SHIFT 0x0
+#define DP2_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCPOLARITY__SHIFT 0xf
+#define DP2_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCWIDTH__SHIFT 0x10
+#define DP2_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCPOLARITY__SHIFT 0x1f
+#define DP2_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCWIDTH_MASK 0x00007FFFL
+#define DP2_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCPOLARITY_MASK 0x00008000L
+#define DP2_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCWIDTH_MASK 0x7FFF0000L
+#define DP2_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCPOLARITY_MASK 0x80000000L
+//DP2_DP_MSA_TIMING_PARAM4
+#define DP2_DP_MSA_TIMING_PARAM4__DP_MSA_VHEIGHT__SHIFT 0x0
+#define DP2_DP_MSA_TIMING_PARAM4__DP_MSA_HWIDTH__SHIFT 0x10
+#define DP2_DP_MSA_TIMING_PARAM4__DP_MSA_VHEIGHT_MASK 0x0000FFFFL
+#define DP2_DP_MSA_TIMING_PARAM4__DP_MSA_HWIDTH_MASK 0xFFFF0000L
+//DP2_DP_MSO_CNTL
+#define DP2_DP_MSO_CNTL__DP_MSO_NUM_OF_SSTLINK__SHIFT 0x0
+#define DP2_DP_MSO_CNTL__DP_MSO_SEC_STREAM_ENABLE__SHIFT 0x4
+#define DP2_DP_MSO_CNTL__DP_MSO_SEC_ASP_ENABLE__SHIFT 0x8
+#define DP2_DP_MSO_CNTL__DP_MSO_SEC_ATP_ENABLE__SHIFT 0xc
+#define DP2_DP_MSO_CNTL__DP_MSO_SEC_AIP_ENABLE__SHIFT 0x10
+#define DP2_DP_MSO_CNTL__DP_MSO_SEC_ACM_ENABLE__SHIFT 0x14
+#define DP2_DP_MSO_CNTL__DP_MSO_SEC_GSP0_ENABLE__SHIFT 0x18
+#define DP2_DP_MSO_CNTL__DP_MSO_SEC_GSP1_ENABLE__SHIFT 0x1c
+#define DP2_DP_MSO_CNTL__DP_MSO_NUM_OF_SSTLINK_MASK 0x00000003L
+#define DP2_DP_MSO_CNTL__DP_MSO_SEC_STREAM_ENABLE_MASK 0x000000F0L
+#define DP2_DP_MSO_CNTL__DP_MSO_SEC_ASP_ENABLE_MASK 0x00000F00L
+#define DP2_DP_MSO_CNTL__DP_MSO_SEC_ATP_ENABLE_MASK 0x0000F000L
+#define DP2_DP_MSO_CNTL__DP_MSO_SEC_AIP_ENABLE_MASK 0x000F0000L
+#define DP2_DP_MSO_CNTL__DP_MSO_SEC_ACM_ENABLE_MASK 0x00F00000L
+#define DP2_DP_MSO_CNTL__DP_MSO_SEC_GSP0_ENABLE_MASK 0x0F000000L
+#define DP2_DP_MSO_CNTL__DP_MSO_SEC_GSP1_ENABLE_MASK 0xF0000000L
+//DP2_DP_MSO_CNTL1
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_GSP2_ENABLE__SHIFT 0x0
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_GSP3_ENABLE__SHIFT 0x4
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_GSP4_ENABLE__SHIFT 0x8
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_GSP5_ENABLE__SHIFT 0xc
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_GSP6_ENABLE__SHIFT 0x10
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_GSP7_ENABLE__SHIFT 0x14
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_MPG_ENABLE__SHIFT 0x18
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_ISRC_ENABLE__SHIFT 0x1c
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_GSP2_ENABLE_MASK 0x0000000FL
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_GSP3_ENABLE_MASK 0x000000F0L
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_GSP4_ENABLE_MASK 0x00000F00L
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_GSP5_ENABLE_MASK 0x0000F000L
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_GSP6_ENABLE_MASK 0x000F0000L
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_GSP7_ENABLE_MASK 0x00F00000L
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_MPG_ENABLE_MASK 0x0F000000L
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_ISRC_ENABLE_MASK 0xF0000000L
+//DP2_DP_DSC_CNTL
+#define DP2_DP_DSC_CNTL__DP_DSC_MODE__SHIFT 0x0
+#define DP2_DP_DSC_CNTL__DP_DSC_SLICE_WIDTH__SHIFT 0x10
+#define DP2_DP_DSC_CNTL__DP_DSC_MODE_MASK 0x00000003L
+#define DP2_DP_DSC_CNTL__DP_DSC_SLICE_WIDTH_MASK 0x1FFF0000L
+//DP2_DP_SEC_CNTL2
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP1_SEND__SHIFT 0x0
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_PENDING__SHIFT 0x1
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_DEADLINE_MISSED__SHIFT 0x2
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_ANY_LINE__SHIFT 0x3
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP2_SEND__SHIFT 0x4
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_PENDING__SHIFT 0x5
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_DEADLINE_MISSED__SHIFT 0x6
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_ANY_LINE__SHIFT 0x7
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP3_SEND__SHIFT 0x8
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_PENDING__SHIFT 0x9
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_DEADLINE_MISSED__SHIFT 0xa
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_ANY_LINE__SHIFT 0xb
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP4_SEND__SHIFT 0xc
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_PENDING__SHIFT 0xd
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_ANY_LINE__SHIFT 0xf
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP5_SEND__SHIFT 0x10
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_PENDING__SHIFT 0x11
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_DEADLINE_MISSED__SHIFT 0x12
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_ANY_LINE__SHIFT 0x13
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP6_SEND__SHIFT 0x14
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_PENDING__SHIFT 0x15
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_DEADLINE_MISSED__SHIFT 0x16
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_ANY_LINE__SHIFT 0x17
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP7_SEND__SHIFT 0x18
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_PENDING__SHIFT 0x19
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_DEADLINE_MISSED__SHIFT 0x1a
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_ANY_LINE__SHIFT 0x1b
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP7_PPS__SHIFT 0x1c
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_MASK 0x00000001L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_PENDING_MASK 0x00000002L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_DEADLINE_MISSED_MASK 0x00000004L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_ANY_LINE_MASK 0x00000008L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_MASK 0x00000010L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_PENDING_MASK 0x00000020L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_DEADLINE_MISSED_MASK 0x00000040L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_ANY_LINE_MASK 0x00000080L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_MASK 0x00000100L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_PENDING_MASK 0x00000200L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_DEADLINE_MISSED_MASK 0x00000400L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_ANY_LINE_MASK 0x00000800L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_MASK 0x00001000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_PENDING_MASK 0x00002000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_ANY_LINE_MASK 0x00008000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_MASK 0x00010000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_PENDING_MASK 0x00020000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_DEADLINE_MISSED_MASK 0x00040000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_ANY_LINE_MASK 0x00080000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_MASK 0x00100000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_PENDING_MASK 0x00200000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_DEADLINE_MISSED_MASK 0x00400000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_ANY_LINE_MASK 0x00800000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_MASK 0x01000000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_PENDING_MASK 0x02000000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_DEADLINE_MISSED_MASK 0x04000000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_ANY_LINE_MASK 0x08000000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP7_PPS_MASK 0x10000000L
+//DP2_DP_SEC_CNTL3
+#define DP2_DP_SEC_CNTL3__DP_SEC_GSP1_LINE_NUM__SHIFT 0x0
+#define DP2_DP_SEC_CNTL3__DP_SEC_GSP2_LINE_NUM__SHIFT 0x10
+#define DP2_DP_SEC_CNTL3__DP_SEC_GSP1_LINE_NUM_MASK 0x0000FFFFL
+#define DP2_DP_SEC_CNTL3__DP_SEC_GSP2_LINE_NUM_MASK 0xFFFF0000L
+//DP2_DP_SEC_CNTL4
+#define DP2_DP_SEC_CNTL4__DP_SEC_GSP3_LINE_NUM__SHIFT 0x0
+#define DP2_DP_SEC_CNTL4__DP_SEC_GSP4_LINE_NUM__SHIFT 0x10
+#define DP2_DP_SEC_CNTL4__DP_SEC_GSP3_LINE_NUM_MASK 0x0000FFFFL
+#define DP2_DP_SEC_CNTL4__DP_SEC_GSP4_LINE_NUM_MASK 0xFFFF0000L
+//DP2_DP_SEC_CNTL5
+#define DP2_DP_SEC_CNTL5__DP_SEC_GSP5_LINE_NUM__SHIFT 0x0
+#define DP2_DP_SEC_CNTL5__DP_SEC_GSP6_LINE_NUM__SHIFT 0x10
+#define DP2_DP_SEC_CNTL5__DP_SEC_GSP5_LINE_NUM_MASK 0x0000FFFFL
+#define DP2_DP_SEC_CNTL5__DP_SEC_GSP6_LINE_NUM_MASK 0xFFFF0000L
+//DP2_DP_SEC_CNTL6
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP7_LINE_NUM__SHIFT 0x0
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP7_LINE_NUM_MASK 0x0000FFFFL
+//DP2_DP_SEC_CNTL7
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_ACTIVE__SHIFT 0x0
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_IN_IDLE__SHIFT 0x1
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_ACTIVE__SHIFT 0x4
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_IN_IDLE__SHIFT 0x5
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_ACTIVE__SHIFT 0x8
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_IN_IDLE__SHIFT 0x9
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_ACTIVE__SHIFT 0xc
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_IN_IDLE__SHIFT 0xd
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_ACTIVE__SHIFT 0x10
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_IN_IDLE__SHIFT 0x11
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_ACTIVE__SHIFT 0x14
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_IN_IDLE__SHIFT 0x15
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_ACTIVE__SHIFT 0x18
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_IN_IDLE__SHIFT 0x19
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_ACTIVE__SHIFT 0x1c
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_IN_IDLE__SHIFT 0x1d
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_ACTIVE_MASK 0x00000001L
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_IN_IDLE_MASK 0x00000002L
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_ACTIVE_MASK 0x00000010L
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_IN_IDLE_MASK 0x00000020L
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_ACTIVE_MASK 0x00000100L
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_IN_IDLE_MASK 0x00000200L
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_ACTIVE_MASK 0x00001000L
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_IN_IDLE_MASK 0x00002000L
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_ACTIVE_MASK 0x00010000L
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_IN_IDLE_MASK 0x00020000L
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_ACTIVE_MASK 0x00100000L
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_IN_IDLE_MASK 0x00200000L
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_ACTIVE_MASK 0x01000000L
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_IN_IDLE_MASK 0x02000000L
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_ACTIVE_MASK 0x10000000L
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_IN_IDLE_MASK 0x20000000L
+//DP2_DP_DB_CNTL
+#define DP2_DP_DB_CNTL__DP_DB_PENDING__SHIFT 0x0
+#define DP2_DP_DB_CNTL__DP_DB_TAKEN__SHIFT 0x4
+#define DP2_DP_DB_CNTL__DP_DB_TAKEN_CLR__SHIFT 0x5
+#define DP2_DP_DB_CNTL__DP_DB_LOCK__SHIFT 0x8
+#define DP2_DP_DB_CNTL__DP_DB_DISABLE__SHIFT 0xc
+#define DP2_DP_DB_CNTL__DP_VUPDATE_DB_PENDING__SHIFT 0xf
+#define DP2_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN__SHIFT 0x10
+#define DP2_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_CLR__SHIFT 0x11
+#define DP2_DP_DB_CNTL__DP_DB_PENDING_MASK 0x00000001L
+#define DP2_DP_DB_CNTL__DP_DB_TAKEN_MASK 0x00000010L
+#define DP2_DP_DB_CNTL__DP_DB_TAKEN_CLR_MASK 0x00000020L
+#define DP2_DP_DB_CNTL__DP_DB_LOCK_MASK 0x00000100L
+#define DP2_DP_DB_CNTL__DP_DB_DISABLE_MASK 0x00001000L
+#define DP2_DP_DB_CNTL__DP_VUPDATE_DB_PENDING_MASK 0x00008000L
+#define DP2_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_MASK 0x00010000L
+#define DP2_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_CLR_MASK 0x00020000L
+//DP2_DP_MSA_VBID_MISC
+#define DP2_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE__SHIFT 0x0
+#define DP2_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_EN__SHIFT 0x4
+#define DP2_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE__SHIFT 0x8
+#define DP2_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE__SHIFT 0x9
+#define DP2_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_EN__SHIFT 0xc
+#define DP2_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_EN__SHIFT 0xd
+#define DP2_DP_MSA_VBID_MISC__DP_VBID6_LINE_REFERENCE__SHIFT 0xf
+#define DP2_DP_MSA_VBID_MISC__DP_VBID6_LINE_NUM__SHIFT 0x10
+#define DP2_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_MASK 0x00000003L
+#define DP2_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_EN_MASK 0x00000010L
+#define DP2_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_MASK 0x00000100L
+#define DP2_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_MASK 0x00000200L
+#define DP2_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_EN_MASK 0x00001000L
+#define DP2_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_EN_MASK 0x00002000L
+#define DP2_DP_MSA_VBID_MISC__DP_VBID6_LINE_REFERENCE_MASK 0x00008000L
+#define DP2_DP_MSA_VBID_MISC__DP_VBID6_LINE_NUM_MASK 0xFFFF0000L
+//DP2_DP_SEC_METADATA_TRANSMISSION
+#define DP2_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_ENABLE__SHIFT 0x0
+#define DP2_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_REFERENCE__SHIFT 0x1
+#define DP2_DP_SEC_METADATA_TRANSMISSION__DP_SEC_MSO_METADATA_PACKET_ENABLE__SHIFT 0x4
+#define DP2_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE__SHIFT 0x10
+#define DP2_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define DP2_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_REFERENCE_MASK 0x00000002L
+#define DP2_DP_SEC_METADATA_TRANSMISSION__DP_SEC_MSO_METADATA_PACKET_ENABLE_MASK 0x000000F0L
+#define DP2_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_MASK 0xFFFF0000L
+//DP2_DP_DSC_BYTES_PER_PIXEL
+#define DP2_DP_DSC_BYTES_PER_PIXEL__DP_DSC_BYTES_PER_PIXEL__SHIFT 0x0
+#define DP2_DP_DSC_BYTES_PER_PIXEL__DP_DSC_BYTES_PER_PIXEL_MASK 0x7FFFFFFFL
+//DP2_DP_ALPM_CNTL
+#define DP2_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_SEND__SHIFT 0x0
+#define DP2_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PENDING__SHIFT 0x1
+#define DP2_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_SEND__SHIFT 0x2
+#define DP2_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_PENDING__SHIFT 0x3
+#define DP2_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_IMMEDIATE__SHIFT 0x4
+#define DP2_DP_ALPM_CNTL__DP_LINK_TRAINING_SWITCH_BETWEEN_VIDEO__SHIFT 0x5
+#define DP2_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_LINE_NUM__SHIFT 0x10
+#define DP2_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_SEND_MASK 0x00000001L
+#define DP2_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PENDING_MASK 0x00000002L
+#define DP2_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_SEND_MASK 0x00000004L
+#define DP2_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_PENDING_MASK 0x00000008L
+#define DP2_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_IMMEDIATE_MASK 0x00000010L
+#define DP2_DP_ALPM_CNTL__DP_LINK_TRAINING_SWITCH_BETWEEN_VIDEO_MASK 0x00000020L
+#define DP2_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_LINE_NUM_MASK 0xFFFF0000L
+
+
+// addressBlock: dce_dc_dio_dig3_dispdec
+//DIG3_DIG_FE_CNTL
+#define DIG3_DIG_FE_CNTL__DIG_SOURCE_SELECT__SHIFT 0x0
+#define DIG3_DIG_FE_CNTL__DIG_STEREOSYNC_SELECT__SHIFT 0x4
+#define DIG3_DIG_FE_CNTL__DIG_STEREOSYNC_GATE_EN__SHIFT 0x8
+#define DIG3_DIG_FE_CNTL__DIG_START__SHIFT 0xa
+#define DIG3_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_SELECT__SHIFT 0xc
+#define DIG3_DIG_FE_CNTL__DIG_INPUT_PIXEL_SELECT__SHIFT 0x10
+#define DIG3_DIG_FE_CNTL__DOLBY_VISION_EN__SHIFT 0x12
+#define DIG3_DIG_FE_CNTL__DOLBY_VISION_METADATA_PACKET_MISSED__SHIFT 0x13
+#define DIG3_DIG_FE_CNTL__DIG_SYMCLK_FE_ON__SHIFT 0x18
+#define DIG3_DIG_FE_CNTL__TMDS_PIXEL_ENCODING__SHIFT 0x1c
+#define DIG3_DIG_FE_CNTL__TMDS_COLOR_FORMAT__SHIFT 0x1e
+#define DIG3_DIG_FE_CNTL__DIG_SOURCE_SELECT_MASK 0x00000007L
+#define DIG3_DIG_FE_CNTL__DIG_STEREOSYNC_SELECT_MASK 0x00000070L
+#define DIG3_DIG_FE_CNTL__DIG_STEREOSYNC_GATE_EN_MASK 0x00000100L
+#define DIG3_DIG_FE_CNTL__DIG_START_MASK 0x00000400L
+#define DIG3_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_SELECT_MASK 0x00007000L
+#define DIG3_DIG_FE_CNTL__DIG_INPUT_PIXEL_SELECT_MASK 0x00030000L
+#define DIG3_DIG_FE_CNTL__DOLBY_VISION_EN_MASK 0x00040000L
+#define DIG3_DIG_FE_CNTL__DOLBY_VISION_METADATA_PACKET_MISSED_MASK 0x00080000L
+#define DIG3_DIG_FE_CNTL__DIG_SYMCLK_FE_ON_MASK 0x01000000L
+#define DIG3_DIG_FE_CNTL__TMDS_PIXEL_ENCODING_MASK 0x10000000L
+#define DIG3_DIG_FE_CNTL__TMDS_COLOR_FORMAT_MASK 0xC0000000L
+//DIG3_DIG_OUTPUT_CRC_CNTL
+#define DIG3_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_EN__SHIFT 0x0
+#define DIG3_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_LINK_SEL__SHIFT 0x4
+#define DIG3_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_DATA_SEL__SHIFT 0x8
+#define DIG3_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_EN_MASK 0x00000001L
+#define DIG3_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_LINK_SEL_MASK 0x00000010L
+#define DIG3_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_DATA_SEL_MASK 0x00000300L
+//DIG3_DIG_OUTPUT_CRC_RESULT
+#define DIG3_DIG_OUTPUT_CRC_RESULT__DIG_OUTPUT_CRC_RESULT__SHIFT 0x0
+#define DIG3_DIG_OUTPUT_CRC_RESULT__DIG_OUTPUT_CRC_RESULT_MASK 0x3FFFFFFFL
+//DIG3_DIG_CLOCK_PATTERN
+#define DIG3_DIG_CLOCK_PATTERN__DIG_CLOCK_PATTERN__SHIFT 0x0
+#define DIG3_DIG_CLOCK_PATTERN__DIG_CLOCK_PATTERN_MASK 0x000003FFL
+//DIG3_DIG_TEST_PATTERN
+#define DIG3_DIG_TEST_PATTERN__DIG_TEST_PATTERN_OUT_EN__SHIFT 0x0
+#define DIG3_DIG_TEST_PATTERN__DIG_HALF_CLOCK_PATTERN_SEL__SHIFT 0x1
+#define DIG3_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_OUT_EN__SHIFT 0x4
+#define DIG3_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_RESET__SHIFT 0x5
+#define DIG3_DIG_TEST_PATTERN__DIG_TEST_PATTERN_EXTERNAL_RESET_EN__SHIFT 0x6
+#define DIG3_DIG_TEST_PATTERN__DIG_STATIC_TEST_PATTERN__SHIFT 0x10
+#define DIG3_DIG_TEST_PATTERN__DIG_TEST_PATTERN_OUT_EN_MASK 0x00000001L
+#define DIG3_DIG_TEST_PATTERN__DIG_HALF_CLOCK_PATTERN_SEL_MASK 0x00000002L
+#define DIG3_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_OUT_EN_MASK 0x00000010L
+#define DIG3_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_RESET_MASK 0x00000020L
+#define DIG3_DIG_TEST_PATTERN__DIG_TEST_PATTERN_EXTERNAL_RESET_EN_MASK 0x00000040L
+#define DIG3_DIG_TEST_PATTERN__DIG_STATIC_TEST_PATTERN_MASK 0x03FF0000L
+//DIG3_DIG_RANDOM_PATTERN_SEED
+#define DIG3_DIG_RANDOM_PATTERN_SEED__DIG_RANDOM_PATTERN_SEED__SHIFT 0x0
+#define DIG3_DIG_RANDOM_PATTERN_SEED__DIG_RAN_PAT_DURING_DE_ONLY__SHIFT 0x18
+#define DIG3_DIG_RANDOM_PATTERN_SEED__DIG_RANDOM_PATTERN_SEED_MASK 0x00FFFFFFL
+#define DIG3_DIG_RANDOM_PATTERN_SEED__DIG_RAN_PAT_DURING_DE_ONLY_MASK 0x01000000L
+//DIG3_DIG_FIFO_STATUS
+#define DIG3_DIG_FIFO_STATUS__DIG_FIFO_LEVEL_ERROR__SHIFT 0x0
+#define DIG3_DIG_FIFO_STATUS__DIG_FIFO_USE_OVERWRITE_LEVEL__SHIFT 0x1
+#define DIG3_DIG_FIFO_STATUS__DIG_FIFO_OVERWRITE_LEVEL__SHIFT 0x2
+#define DIG3_DIG_FIFO_STATUS__DIG_FIFO_ERROR_ACK__SHIFT 0x8
+#define DIG3_DIG_FIFO_STATUS__DIG_FIFO_CAL_AVERAGE_LEVEL__SHIFT 0xa
+#define DIG3_DIG_FIFO_STATUS__DIG_FIFO_MAXIMUM_LEVEL__SHIFT 0x10
+#define DIG3_DIG_FIFO_STATUS__DIG_FIFO_MINIMUM_LEVEL__SHIFT 0x16
+#define DIG3_DIG_FIFO_STATUS__DIG_FIFO_READ_CLOCK_SRC__SHIFT 0x1a
+#define DIG3_DIG_FIFO_STATUS__DIG_FIFO_CALIBRATED__SHIFT 0x1d
+#define DIG3_DIG_FIFO_STATUS__DIG_FIFO_FORCE_RECAL_AVERAGE__SHIFT 0x1e
+#define DIG3_DIG_FIFO_STATUS__DIG_FIFO_FORCE_RECOMP_MINMAX__SHIFT 0x1f
+#define DIG3_DIG_FIFO_STATUS__DIG_FIFO_LEVEL_ERROR_MASK 0x00000001L
+#define DIG3_DIG_FIFO_STATUS__DIG_FIFO_USE_OVERWRITE_LEVEL_MASK 0x00000002L
+#define DIG3_DIG_FIFO_STATUS__DIG_FIFO_OVERWRITE_LEVEL_MASK 0x000000FCL
+#define DIG3_DIG_FIFO_STATUS__DIG_FIFO_ERROR_ACK_MASK 0x00000100L
+#define DIG3_DIG_FIFO_STATUS__DIG_FIFO_CAL_AVERAGE_LEVEL_MASK 0x0000FC00L
+#define DIG3_DIG_FIFO_STATUS__DIG_FIFO_MAXIMUM_LEVEL_MASK 0x001F0000L
+#define DIG3_DIG_FIFO_STATUS__DIG_FIFO_MINIMUM_LEVEL_MASK 0x03C00000L
+#define DIG3_DIG_FIFO_STATUS__DIG_FIFO_READ_CLOCK_SRC_MASK 0x04000000L
+#define DIG3_DIG_FIFO_STATUS__DIG_FIFO_CALIBRATED_MASK 0x20000000L
+#define DIG3_DIG_FIFO_STATUS__DIG_FIFO_FORCE_RECAL_AVERAGE_MASK 0x40000000L
+#define DIG3_DIG_FIFO_STATUS__DIG_FIFO_FORCE_RECOMP_MINMAX_MASK 0x80000000L
+//DIG3_HDMI_METADATA_PACKET_CONTROL
+#define DIG3_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_ENABLE__SHIFT 0x0
+#define DIG3_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_REFERENCE__SHIFT 0x4
+#define DIG3_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_MISSED__SHIFT 0x8
+#define DIG3_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE__SHIFT 0x10
+#define DIG3_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define DIG3_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_REFERENCE_MASK 0x00000010L
+#define DIG3_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_MISSED_MASK 0x00000100L
+#define DIG3_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_MASK 0xFFFF0000L
+//DIG3_HDMI_GENERIC_PACKET_CONTROL4
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC6_LINE__SHIFT 0x0
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC7_LINE__SHIFT 0x10
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC6_LINE_MASK 0x0000FFFFL
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC7_LINE_MASK 0xFFFF0000L
+//DIG3_HDMI_CONTROL
+#define DIG3_HDMI_CONTROL__HDMI_KEEPOUT_MODE__SHIFT 0x0
+#define DIG3_HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN__SHIFT 0x1
+#define DIG3_HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE__SHIFT 0x2
+#define DIG3_HDMI_CONTROL__HDMI_NO_EXTRA_NULL_PACKET_FILLED__SHIFT 0x3
+#define DIG3_HDMI_CONTROL__HDMI_PACKET_GEN_VERSION__SHIFT 0x4
+#define DIG3_HDMI_CONTROL__HDMI_ERROR_ACK__SHIFT 0x8
+#define DIG3_HDMI_CONTROL__HDMI_ERROR_MASK__SHIFT 0x9
+#define DIG3_HDMI_CONTROL__HDMI_UNSCRAMBLED_CONTROL_LINE_NUM__SHIFT 0x10
+#define DIG3_HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE__SHIFT 0x18
+#define DIG3_HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT 0x1c
+#define DIG3_HDMI_CONTROL__HDMI_KEEPOUT_MODE_MASK 0x00000001L
+#define DIG3_HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN_MASK 0x00000002L
+#define DIG3_HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE_MASK 0x00000004L
+#define DIG3_HDMI_CONTROL__HDMI_NO_EXTRA_NULL_PACKET_FILLED_MASK 0x00000008L
+#define DIG3_HDMI_CONTROL__HDMI_PACKET_GEN_VERSION_MASK 0x00000010L
+#define DIG3_HDMI_CONTROL__HDMI_ERROR_ACK_MASK 0x00000100L
+#define DIG3_HDMI_CONTROL__HDMI_ERROR_MASK_MASK 0x00000200L
+#define DIG3_HDMI_CONTROL__HDMI_UNSCRAMBLED_CONTROL_LINE_NUM_MASK 0x003F0000L
+#define DIG3_HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK 0x01000000L
+#define DIG3_HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH_MASK 0x30000000L
+//DIG3_HDMI_STATUS
+#define DIG3_HDMI_STATUS__HDMI_ACTIVE_AVMUTE__SHIFT 0x0
+#define DIG3_HDMI_STATUS__HDMI_AUDIO_PACKET_ERROR__SHIFT 0x10
+#define DIG3_HDMI_STATUS__HDMI_VBI_PACKET_ERROR__SHIFT 0x14
+#define DIG3_HDMI_STATUS__HDMI_ERROR_INT__SHIFT 0x1b
+#define DIG3_HDMI_STATUS__HDMI_ACTIVE_AVMUTE_MASK 0x00000001L
+#define DIG3_HDMI_STATUS__HDMI_AUDIO_PACKET_ERROR_MASK 0x00010000L
+#define DIG3_HDMI_STATUS__HDMI_VBI_PACKET_ERROR_MASK 0x00100000L
+#define DIG3_HDMI_STATUS__HDMI_ERROR_INT_MASK 0x08000000L
+//DIG3_HDMI_AUDIO_PACKET_CONTROL
+#define DIG3_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN__SHIFT 0x4
+#define DIG3_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_SEND_MAX_PACKETS__SHIFT 0x8
+#define DIG3_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_PACKETS_PER_LINE__SHIFT 0x10
+#define DIG3_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN_MASK 0x00000030L
+#define DIG3_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_SEND_MAX_PACKETS_MASK 0x00000100L
+#define DIG3_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_PACKETS_PER_LINE_MASK 0x001F0000L
+//DIG3_HDMI_ACR_PACKET_CONTROL
+#define DIG3_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SEND__SHIFT 0x0
+#define DIG3_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_CONT__SHIFT 0x1
+#define DIG3_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SELECT__SHIFT 0x4
+#define DIG3_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE__SHIFT 0x8
+#define DIG3_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND__SHIFT 0xc
+#define DIG3_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE__SHIFT 0x10
+#define DIG3_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY__SHIFT 0x1f
+#define DIG3_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SEND_MASK 0x00000001L
+#define DIG3_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_CONT_MASK 0x00000002L
+#define DIG3_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SELECT_MASK 0x00000030L
+#define DIG3_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE_MASK 0x00000100L
+#define DIG3_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK 0x00001000L
+#define DIG3_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE_MASK 0x00070000L
+#define DIG3_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY_MASK 0x80000000L
+//DIG3_HDMI_VBI_PACKET_CONTROL
+#define DIG3_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND__SHIFT 0x0
+#define DIG3_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND__SHIFT 0x4
+#define DIG3_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT__SHIFT 0x5
+#define DIG3_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND__SHIFT 0x8
+#define DIG3_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT__SHIFT 0x9
+#define DIG3_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE__SHIFT 0x10
+#define DIG3_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK 0x00000001L
+#define DIG3_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK 0x00000010L
+#define DIG3_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK 0x00000020L
+#define DIG3_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND_MASK 0x00000100L
+#define DIG3_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT_MASK 0x00000200L
+#define DIG3_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE_MASK 0x003F0000L
+//DIG3_HDMI_INFOFRAME_CONTROL0
+#define DIG3_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND__SHIFT 0x4
+#define DIG3_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT__SHIFT 0x5
+#define DIG3_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_SEND__SHIFT 0x8
+#define DIG3_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_CONT__SHIFT 0x9
+#define DIG3_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND_MASK 0x00000010L
+#define DIG3_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT_MASK 0x00000020L
+#define DIG3_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_SEND_MASK 0x00000100L
+#define DIG3_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_CONT_MASK 0x00000200L
+//DIG3_HDMI_INFOFRAME_CONTROL1
+#define DIG3_HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE__SHIFT 0x8
+#define DIG3_HDMI_INFOFRAME_CONTROL1__HDMI_MPEG_INFO_LINE__SHIFT 0x10
+#define DIG3_HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE_MASK 0x00003F00L
+#define DIG3_HDMI_INFOFRAME_CONTROL1__HDMI_MPEG_INFO_LINE_MASK 0x003F0000L
+//DIG3_HDMI_GENERIC_PACKET_CONTROL0
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND__SHIFT 0x0
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT__SHIFT 0x1
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE_REFERENCE__SHIFT 0x2
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND__SHIFT 0x4
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT__SHIFT 0x5
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE_REFERENCE__SHIFT 0x6
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_SEND__SHIFT 0x8
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_CONT__SHIFT 0x9
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LINE_REFERENCE__SHIFT 0xa
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_SEND__SHIFT 0xc
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_CONT__SHIFT 0xd
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LINE_REFERENCE__SHIFT 0xe
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_SEND__SHIFT 0x10
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_CONT__SHIFT 0x11
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LINE_REFERENCE__SHIFT 0x12
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_SEND__SHIFT 0x14
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_CONT__SHIFT 0x15
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LINE_REFERENCE__SHIFT 0x16
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_SEND__SHIFT 0x18
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_CONT__SHIFT 0x19
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LINE_REFERENCE__SHIFT 0x1a
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_SEND__SHIFT 0x1c
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_CONT__SHIFT 0x1d
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LINE_REFERENCE__SHIFT 0x1e
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND_MASK 0x00000001L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT_MASK 0x00000002L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE_REFERENCE_MASK 0x00000004L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND_MASK 0x00000010L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT_MASK 0x00000020L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE_REFERENCE_MASK 0x00000040L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_SEND_MASK 0x00000100L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_CONT_MASK 0x00000200L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LINE_REFERENCE_MASK 0x00000400L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_SEND_MASK 0x00001000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_CONT_MASK 0x00002000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LINE_REFERENCE_MASK 0x00004000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_SEND_MASK 0x00010000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_CONT_MASK 0x00020000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LINE_REFERENCE_MASK 0x00040000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_SEND_MASK 0x00100000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_CONT_MASK 0x00200000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LINE_REFERENCE_MASK 0x00400000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_SEND_MASK 0x01000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_CONT_MASK 0x02000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LINE_REFERENCE_MASK 0x04000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_SEND_MASK 0x10000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_CONT_MASK 0x20000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LINE_REFERENCE_MASK 0x40000000L
+//DIG3_HDMI_GC
+#define DIG3_HDMI_GC__HDMI_GC_AVMUTE__SHIFT 0x0
+#define DIG3_HDMI_GC__HDMI_GC_AVMUTE_CONT__SHIFT 0x2
+#define DIG3_HDMI_GC__HDMI_DEFAULT_PHASE__SHIFT 0x4
+#define DIG3_HDMI_GC__HDMI_PACKING_PHASE__SHIFT 0x8
+#define DIG3_HDMI_GC__HDMI_PACKING_PHASE_OVERRIDE__SHIFT 0xc
+#define DIG3_HDMI_GC__HDMI_GC_AVMUTE_MASK 0x00000001L
+#define DIG3_HDMI_GC__HDMI_GC_AVMUTE_CONT_MASK 0x00000004L
+#define DIG3_HDMI_GC__HDMI_DEFAULT_PHASE_MASK 0x00000010L
+#define DIG3_HDMI_GC__HDMI_PACKING_PHASE_MASK 0x00000F00L
+#define DIG3_HDMI_GC__HDMI_PACKING_PHASE_OVERRIDE_MASK 0x00001000L
+//DIG3_AFMT_AUDIO_PACKET_CONTROL2
+#define DIG3_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD__SHIFT 0x0
+#define DIG3_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT__SHIFT 0x1
+#define DIG3_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT 0x8
+#define DIG3_AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID__SHIFT 0x10
+#define DIG3_AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD__SHIFT 0x18
+#define DIG3_AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD__SHIFT 0x1c
+#define DIG3_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD_MASK 0x00000001L
+#define DIG3_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT_MASK 0x00000002L
+#define DIG3_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE_MASK 0x0000FF00L
+#define DIG3_AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID_MASK 0x00FF0000L
+#define DIG3_AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD_MASK 0x01000000L
+#define DIG3_AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD_MASK 0x10000000L
+//DIG3_AFMT_ISRC1_0
+#define DIG3_AFMT_ISRC1_0__AFMT_ISRC_STATUS__SHIFT 0x0
+#define DIG3_AFMT_ISRC1_0__AFMT_ISRC_CONTINUE__SHIFT 0x6
+#define DIG3_AFMT_ISRC1_0__AFMT_ISRC_VALID__SHIFT 0x7
+#define DIG3_AFMT_ISRC1_0__AFMT_ISRC_STATUS_MASK 0x00000007L
+#define DIG3_AFMT_ISRC1_0__AFMT_ISRC_CONTINUE_MASK 0x00000040L
+#define DIG3_AFMT_ISRC1_0__AFMT_ISRC_VALID_MASK 0x00000080L
+//DIG3_AFMT_ISRC1_1
+#define DIG3_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC0__SHIFT 0x0
+#define DIG3_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC1__SHIFT 0x8
+#define DIG3_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC2__SHIFT 0x10
+#define DIG3_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC3__SHIFT 0x18
+#define DIG3_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC0_MASK 0x000000FFL
+#define DIG3_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC1_MASK 0x0000FF00L
+#define DIG3_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC2_MASK 0x00FF0000L
+#define DIG3_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC3_MASK 0xFF000000L
+//DIG3_AFMT_ISRC1_2
+#define DIG3_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC4__SHIFT 0x0
+#define DIG3_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC5__SHIFT 0x8
+#define DIG3_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC6__SHIFT 0x10
+#define DIG3_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC7__SHIFT 0x18
+#define DIG3_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC4_MASK 0x000000FFL
+#define DIG3_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC5_MASK 0x0000FF00L
+#define DIG3_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC6_MASK 0x00FF0000L
+#define DIG3_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC7_MASK 0xFF000000L
+//DIG3_AFMT_ISRC1_3
+#define DIG3_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC8__SHIFT 0x0
+#define DIG3_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC9__SHIFT 0x8
+#define DIG3_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC10__SHIFT 0x10
+#define DIG3_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC11__SHIFT 0x18
+#define DIG3_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC8_MASK 0x000000FFL
+#define DIG3_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC9_MASK 0x0000FF00L
+#define DIG3_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC10_MASK 0x00FF0000L
+#define DIG3_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC11_MASK 0xFF000000L
+//DIG3_AFMT_ISRC1_4
+#define DIG3_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC12__SHIFT 0x0
+#define DIG3_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC13__SHIFT 0x8
+#define DIG3_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC14__SHIFT 0x10
+#define DIG3_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC15__SHIFT 0x18
+#define DIG3_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC12_MASK 0x000000FFL
+#define DIG3_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC13_MASK 0x0000FF00L
+#define DIG3_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC14_MASK 0x00FF0000L
+#define DIG3_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC15_MASK 0xFF000000L
+//DIG3_AFMT_ISRC2_0
+#define DIG3_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC16__SHIFT 0x0
+#define DIG3_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC17__SHIFT 0x8
+#define DIG3_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC18__SHIFT 0x10
+#define DIG3_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC19__SHIFT 0x18
+#define DIG3_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC16_MASK 0x000000FFL
+#define DIG3_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC17_MASK 0x0000FF00L
+#define DIG3_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC18_MASK 0x00FF0000L
+#define DIG3_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC19_MASK 0xFF000000L
+//DIG3_AFMT_ISRC2_1
+#define DIG3_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC20__SHIFT 0x0
+#define DIG3_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC21__SHIFT 0x8
+#define DIG3_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC22__SHIFT 0x10
+#define DIG3_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC23__SHIFT 0x18
+#define DIG3_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC20_MASK 0x000000FFL
+#define DIG3_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC21_MASK 0x0000FF00L
+#define DIG3_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC22_MASK 0x00FF0000L
+#define DIG3_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC23_MASK 0xFF000000L
+//DIG3_AFMT_ISRC2_2
+#define DIG3_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC24__SHIFT 0x0
+#define DIG3_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC25__SHIFT 0x8
+#define DIG3_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC26__SHIFT 0x10
+#define DIG3_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC27__SHIFT 0x18
+#define DIG3_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC24_MASK 0x000000FFL
+#define DIG3_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC25_MASK 0x0000FF00L
+#define DIG3_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC26_MASK 0x00FF0000L
+#define DIG3_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC27_MASK 0xFF000000L
+//DIG3_AFMT_ISRC2_3
+#define DIG3_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC28__SHIFT 0x0
+#define DIG3_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC29__SHIFT 0x8
+#define DIG3_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC30__SHIFT 0x10
+#define DIG3_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC31__SHIFT 0x18
+#define DIG3_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC28_MASK 0x000000FFL
+#define DIG3_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC29_MASK 0x0000FF00L
+#define DIG3_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC30_MASK 0x00FF0000L
+#define DIG3_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC31_MASK 0xFF000000L
+//DIG3_HDMI_GENERIC_PACKET_CONTROL2
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_LINE__SHIFT 0x0
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_LINE__SHIFT 0x10
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_LINE_MASK 0x0000FFFFL
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_LINE_MASK 0xFFFF0000L
+//DIG3_HDMI_GENERIC_PACKET_CONTROL3
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC4_LINE__SHIFT 0x0
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC5_LINE__SHIFT 0x10
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC4_LINE_MASK 0x0000FFFFL
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC5_LINE_MASK 0xFFFF0000L
+//DIG3_HDMI_DB_CONTROL
+#define DIG3_HDMI_DB_CONTROL__HDMI_DB_PENDING__SHIFT 0x0
+#define DIG3_HDMI_DB_CONTROL__HDMI_DB_TAKEN__SHIFT 0x4
+#define DIG3_HDMI_DB_CONTROL__HDMI_DB_TAKEN_CLR__SHIFT 0x5
+#define DIG3_HDMI_DB_CONTROL__HDMI_DB_LOCK__SHIFT 0x8
+#define DIG3_HDMI_DB_CONTROL__HDMI_DB_DISABLE__SHIFT 0xc
+#define DIG3_HDMI_DB_CONTROL__VUPDATE_DB_PENDING__SHIFT 0xf
+#define DIG3_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN__SHIFT 0x10
+#define DIG3_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_CLR__SHIFT 0x11
+#define DIG3_HDMI_DB_CONTROL__HDMI_DB_PENDING_MASK 0x00000001L
+#define DIG3_HDMI_DB_CONTROL__HDMI_DB_TAKEN_MASK 0x00000010L
+#define DIG3_HDMI_DB_CONTROL__HDMI_DB_TAKEN_CLR_MASK 0x00000020L
+#define DIG3_HDMI_DB_CONTROL__HDMI_DB_LOCK_MASK 0x00000100L
+#define DIG3_HDMI_DB_CONTROL__HDMI_DB_DISABLE_MASK 0x00001000L
+#define DIG3_HDMI_DB_CONTROL__VUPDATE_DB_PENDING_MASK 0x00008000L
+#define DIG3_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_MASK 0x00010000L
+#define DIG3_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_CLR_MASK 0x00020000L
+//DIG3_DME_CONTROL
+#define DIG3_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID__SHIFT 0x0
+#define DIG3_DME_CONTROL__METADATA_ENGINE_EN__SHIFT 0x4
+#define DIG3_DME_CONTROL__METADATA_STREAM_TYPE__SHIFT 0x8
+#define DIG3_DME_CONTROL__METADATA_DB_PENDING__SHIFT 0xc
+#define DIG3_DME_CONTROL__METADATA_DB_TAKEN__SHIFT 0xd
+#define DIG3_DME_CONTROL__METADATA_DB_TAKEN_CLR__SHIFT 0x10
+#define DIG3_DME_CONTROL__METADATA_DB_DISABLE__SHIFT 0x14
+#define DIG3_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID_MASK 0x00000007L
+#define DIG3_DME_CONTROL__METADATA_ENGINE_EN_MASK 0x00000010L
+#define DIG3_DME_CONTROL__METADATA_STREAM_TYPE_MASK 0x00000100L
+#define DIG3_DME_CONTROL__METADATA_DB_PENDING_MASK 0x00001000L
+#define DIG3_DME_CONTROL__METADATA_DB_TAKEN_MASK 0x00002000L
+#define DIG3_DME_CONTROL__METADATA_DB_TAKEN_CLR_MASK 0x00010000L
+#define DIG3_DME_CONTROL__METADATA_DB_DISABLE_MASK 0x00100000L
+//DIG3_AFMT_MPEG_INFO0
+#define DIG3_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_CHECKSUM__SHIFT 0x0
+#define DIG3_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB0__SHIFT 0x8
+#define DIG3_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB1__SHIFT 0x10
+#define DIG3_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB2__SHIFT 0x18
+#define DIG3_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_CHECKSUM_MASK 0x000000FFL
+#define DIG3_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB0_MASK 0x0000FF00L
+#define DIG3_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB1_MASK 0x00FF0000L
+#define DIG3_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB2_MASK 0xFF000000L
+//DIG3_AFMT_MPEG_INFO1
+#define DIG3_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_MB3__SHIFT 0x0
+#define DIG3_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_MF__SHIFT 0x8
+#define DIG3_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_FR__SHIFT 0xc
+#define DIG3_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_MB3_MASK 0x000000FFL
+#define DIG3_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_MF_MASK 0x00000300L
+#define DIG3_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_FR_MASK 0x00001000L
+//DIG3_AFMT_GENERIC_HDR
+#define DIG3_AFMT_GENERIC_HDR__AFMT_GENERIC_HB0__SHIFT 0x0
+#define DIG3_AFMT_GENERIC_HDR__AFMT_GENERIC_HB1__SHIFT 0x8
+#define DIG3_AFMT_GENERIC_HDR__AFMT_GENERIC_HB2__SHIFT 0x10
+#define DIG3_AFMT_GENERIC_HDR__AFMT_GENERIC_HB3__SHIFT 0x18
+#define DIG3_AFMT_GENERIC_HDR__AFMT_GENERIC_HB0_MASK 0x000000FFL
+#define DIG3_AFMT_GENERIC_HDR__AFMT_GENERIC_HB1_MASK 0x0000FF00L
+#define DIG3_AFMT_GENERIC_HDR__AFMT_GENERIC_HB2_MASK 0x00FF0000L
+#define DIG3_AFMT_GENERIC_HDR__AFMT_GENERIC_HB3_MASK 0xFF000000L
+//DIG3_AFMT_GENERIC_0
+#define DIG3_AFMT_GENERIC_0__AFMT_GENERIC_BYTE0__SHIFT 0x0
+#define DIG3_AFMT_GENERIC_0__AFMT_GENERIC_BYTE1__SHIFT 0x8
+#define DIG3_AFMT_GENERIC_0__AFMT_GENERIC_BYTE2__SHIFT 0x10
+#define DIG3_AFMT_GENERIC_0__AFMT_GENERIC_BYTE3__SHIFT 0x18
+#define DIG3_AFMT_GENERIC_0__AFMT_GENERIC_BYTE0_MASK 0x000000FFL
+#define DIG3_AFMT_GENERIC_0__AFMT_GENERIC_BYTE1_MASK 0x0000FF00L
+#define DIG3_AFMT_GENERIC_0__AFMT_GENERIC_BYTE2_MASK 0x00FF0000L
+#define DIG3_AFMT_GENERIC_0__AFMT_GENERIC_BYTE3_MASK 0xFF000000L
+//DIG3_AFMT_GENERIC_1
+#define DIG3_AFMT_GENERIC_1__AFMT_GENERIC_BYTE4__SHIFT 0x0
+#define DIG3_AFMT_GENERIC_1__AFMT_GENERIC_BYTE5__SHIFT 0x8
+#define DIG3_AFMT_GENERIC_1__AFMT_GENERIC_BYTE6__SHIFT 0x10
+#define DIG3_AFMT_GENERIC_1__AFMT_GENERIC_BYTE7__SHIFT 0x18
+#define DIG3_AFMT_GENERIC_1__AFMT_GENERIC_BYTE4_MASK 0x000000FFL
+#define DIG3_AFMT_GENERIC_1__AFMT_GENERIC_BYTE5_MASK 0x0000FF00L
+#define DIG3_AFMT_GENERIC_1__AFMT_GENERIC_BYTE6_MASK 0x00FF0000L
+#define DIG3_AFMT_GENERIC_1__AFMT_GENERIC_BYTE7_MASK 0xFF000000L
+//DIG3_AFMT_GENERIC_2
+#define DIG3_AFMT_GENERIC_2__AFMT_GENERIC_BYTE8__SHIFT 0x0
+#define DIG3_AFMT_GENERIC_2__AFMT_GENERIC_BYTE9__SHIFT 0x8
+#define DIG3_AFMT_GENERIC_2__AFMT_GENERIC_BYTE10__SHIFT 0x10
+#define DIG3_AFMT_GENERIC_2__AFMT_GENERIC_BYTE11__SHIFT 0x18
+#define DIG3_AFMT_GENERIC_2__AFMT_GENERIC_BYTE8_MASK 0x000000FFL
+#define DIG3_AFMT_GENERIC_2__AFMT_GENERIC_BYTE9_MASK 0x0000FF00L
+#define DIG3_AFMT_GENERIC_2__AFMT_GENERIC_BYTE10_MASK 0x00FF0000L
+#define DIG3_AFMT_GENERIC_2__AFMT_GENERIC_BYTE11_MASK 0xFF000000L
+//DIG3_AFMT_GENERIC_3
+#define DIG3_AFMT_GENERIC_3__AFMT_GENERIC_BYTE12__SHIFT 0x0
+#define DIG3_AFMT_GENERIC_3__AFMT_GENERIC_BYTE13__SHIFT 0x8
+#define DIG3_AFMT_GENERIC_3__AFMT_GENERIC_BYTE14__SHIFT 0x10
+#define DIG3_AFMT_GENERIC_3__AFMT_GENERIC_BYTE15__SHIFT 0x18
+#define DIG3_AFMT_GENERIC_3__AFMT_GENERIC_BYTE12_MASK 0x000000FFL
+#define DIG3_AFMT_GENERIC_3__AFMT_GENERIC_BYTE13_MASK 0x0000FF00L
+#define DIG3_AFMT_GENERIC_3__AFMT_GENERIC_BYTE14_MASK 0x00FF0000L
+#define DIG3_AFMT_GENERIC_3__AFMT_GENERIC_BYTE15_MASK 0xFF000000L
+//DIG3_AFMT_GENERIC_4
+#define DIG3_AFMT_GENERIC_4__AFMT_GENERIC_BYTE16__SHIFT 0x0
+#define DIG3_AFMT_GENERIC_4__AFMT_GENERIC_BYTE17__SHIFT 0x8
+#define DIG3_AFMT_GENERIC_4__AFMT_GENERIC_BYTE18__SHIFT 0x10
+#define DIG3_AFMT_GENERIC_4__AFMT_GENERIC_BYTE19__SHIFT 0x18
+#define DIG3_AFMT_GENERIC_4__AFMT_GENERIC_BYTE16_MASK 0x000000FFL
+#define DIG3_AFMT_GENERIC_4__AFMT_GENERIC_BYTE17_MASK 0x0000FF00L
+#define DIG3_AFMT_GENERIC_4__AFMT_GENERIC_BYTE18_MASK 0x00FF0000L
+#define DIG3_AFMT_GENERIC_4__AFMT_GENERIC_BYTE19_MASK 0xFF000000L
+//DIG3_AFMT_GENERIC_5
+#define DIG3_AFMT_GENERIC_5__AFMT_GENERIC_BYTE20__SHIFT 0x0
+#define DIG3_AFMT_GENERIC_5__AFMT_GENERIC_BYTE21__SHIFT 0x8
+#define DIG3_AFMT_GENERIC_5__AFMT_GENERIC_BYTE22__SHIFT 0x10
+#define DIG3_AFMT_GENERIC_5__AFMT_GENERIC_BYTE23__SHIFT 0x18
+#define DIG3_AFMT_GENERIC_5__AFMT_GENERIC_BYTE20_MASK 0x000000FFL
+#define DIG3_AFMT_GENERIC_5__AFMT_GENERIC_BYTE21_MASK 0x0000FF00L
+#define DIG3_AFMT_GENERIC_5__AFMT_GENERIC_BYTE22_MASK 0x00FF0000L
+#define DIG3_AFMT_GENERIC_5__AFMT_GENERIC_BYTE23_MASK 0xFF000000L
+//DIG3_AFMT_GENERIC_6
+#define DIG3_AFMT_GENERIC_6__AFMT_GENERIC_BYTE24__SHIFT 0x0
+#define DIG3_AFMT_GENERIC_6__AFMT_GENERIC_BYTE25__SHIFT 0x8
+#define DIG3_AFMT_GENERIC_6__AFMT_GENERIC_BYTE26__SHIFT 0x10
+#define DIG3_AFMT_GENERIC_6__AFMT_GENERIC_BYTE27__SHIFT 0x18
+#define DIG3_AFMT_GENERIC_6__AFMT_GENERIC_BYTE24_MASK 0x000000FFL
+#define DIG3_AFMT_GENERIC_6__AFMT_GENERIC_BYTE25_MASK 0x0000FF00L
+#define DIG3_AFMT_GENERIC_6__AFMT_GENERIC_BYTE26_MASK 0x00FF0000L
+#define DIG3_AFMT_GENERIC_6__AFMT_GENERIC_BYTE27_MASK 0xFF000000L
+//DIG3_AFMT_GENERIC_7
+#define DIG3_AFMT_GENERIC_7__AFMT_GENERIC_BYTE28__SHIFT 0x0
+#define DIG3_AFMT_GENERIC_7__AFMT_GENERIC_BYTE29__SHIFT 0x8
+#define DIG3_AFMT_GENERIC_7__AFMT_GENERIC_BYTE30__SHIFT 0x10
+#define DIG3_AFMT_GENERIC_7__AFMT_GENERIC_BYTE31__SHIFT 0x18
+#define DIG3_AFMT_GENERIC_7__AFMT_GENERIC_BYTE28_MASK 0x000000FFL
+#define DIG3_AFMT_GENERIC_7__AFMT_GENERIC_BYTE29_MASK 0x0000FF00L
+#define DIG3_AFMT_GENERIC_7__AFMT_GENERIC_BYTE30_MASK 0x00FF0000L
+#define DIG3_AFMT_GENERIC_7__AFMT_GENERIC_BYTE31_MASK 0xFF000000L
+//DIG3_HDMI_GENERIC_PACKET_CONTROL1
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC0_LINE__SHIFT 0x0
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC1_LINE__SHIFT 0x10
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC0_LINE_MASK 0x0000FFFFL
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC1_LINE_MASK 0xFFFF0000L
+//DIG3_HDMI_ACR_32_0
+#define DIG3_HDMI_ACR_32_0__HDMI_ACR_CTS_32__SHIFT 0xc
+#define DIG3_HDMI_ACR_32_0__HDMI_ACR_CTS_32_MASK 0xFFFFF000L
+//DIG3_HDMI_ACR_32_1
+#define DIG3_HDMI_ACR_32_1__HDMI_ACR_N_32__SHIFT 0x0
+#define DIG3_HDMI_ACR_32_1__HDMI_ACR_N_32_MASK 0x000FFFFFL
+//DIG3_HDMI_ACR_44_0
+#define DIG3_HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT 0xc
+#define DIG3_HDMI_ACR_44_0__HDMI_ACR_CTS_44_MASK 0xFFFFF000L
+//DIG3_HDMI_ACR_44_1
+#define DIG3_HDMI_ACR_44_1__HDMI_ACR_N_44__SHIFT 0x0
+#define DIG3_HDMI_ACR_44_1__HDMI_ACR_N_44_MASK 0x000FFFFFL
+//DIG3_HDMI_ACR_48_0
+#define DIG3_HDMI_ACR_48_0__HDMI_ACR_CTS_48__SHIFT 0xc
+#define DIG3_HDMI_ACR_48_0__HDMI_ACR_CTS_48_MASK 0xFFFFF000L
+//DIG3_HDMI_ACR_48_1
+#define DIG3_HDMI_ACR_48_1__HDMI_ACR_N_48__SHIFT 0x0
+#define DIG3_HDMI_ACR_48_1__HDMI_ACR_N_48_MASK 0x000FFFFFL
+//DIG3_HDMI_ACR_STATUS_0
+#define DIG3_HDMI_ACR_STATUS_0__HDMI_ACR_CTS__SHIFT 0xc
+#define DIG3_HDMI_ACR_STATUS_0__HDMI_ACR_CTS_MASK 0xFFFFF000L
+//DIG3_HDMI_ACR_STATUS_1
+#define DIG3_HDMI_ACR_STATUS_1__HDMI_ACR_N__SHIFT 0x0
+#define DIG3_HDMI_ACR_STATUS_1__HDMI_ACR_N_MASK 0x000FFFFFL
+//DIG3_AFMT_AUDIO_INFO0
+#define DIG3_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM__SHIFT 0x0
+#define DIG3_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC__SHIFT 0x8
+#define DIG3_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT__SHIFT 0xb
+#define DIG3_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET__SHIFT 0x10
+#define DIG3_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT__SHIFT 0x18
+#define DIG3_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_MASK 0x000000FFL
+#define DIG3_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC_MASK 0x00000700L
+#define DIG3_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT_MASK 0x00007800L
+#define DIG3_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET_MASK 0x00FF0000L
+#define DIG3_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT_MASK 0x1F000000L
+//DIG3_AFMT_AUDIO_INFO1
+#define DIG3_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA__SHIFT 0x0
+#define DIG3_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV__SHIFT 0xb
+#define DIG3_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH__SHIFT 0xf
+#define DIG3_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL__SHIFT 0x10
+#define DIG3_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA_MASK 0x000000FFL
+#define DIG3_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV_MASK 0x00007800L
+#define DIG3_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH_MASK 0x00008000L
+#define DIG3_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL_MASK 0x00030000L
+//DIG3_AFMT_60958_0
+#define DIG3_AFMT_60958_0__AFMT_60958_CS_A__SHIFT 0x0
+#define DIG3_AFMT_60958_0__AFMT_60958_CS_B__SHIFT 0x1
+#define DIG3_AFMT_60958_0__AFMT_60958_CS_C__SHIFT 0x2
+#define DIG3_AFMT_60958_0__AFMT_60958_CS_D__SHIFT 0x3
+#define DIG3_AFMT_60958_0__AFMT_60958_CS_MODE__SHIFT 0x6
+#define DIG3_AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE__SHIFT 0x8
+#define DIG3_AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER__SHIFT 0x10
+#define DIG3_AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x14
+#define DIG3_AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x18
+#define DIG3_AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY__SHIFT 0x1c
+#define DIG3_AFMT_60958_0__AFMT_60958_CS_A_MASK 0x00000001L
+#define DIG3_AFMT_60958_0__AFMT_60958_CS_B_MASK 0x00000002L
+#define DIG3_AFMT_60958_0__AFMT_60958_CS_C_MASK 0x00000004L
+#define DIG3_AFMT_60958_0__AFMT_60958_CS_D_MASK 0x00000038L
+#define DIG3_AFMT_60958_0__AFMT_60958_CS_MODE_MASK 0x000000C0L
+#define DIG3_AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE_MASK 0x0000FF00L
+#define DIG3_AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER_MASK 0x000F0000L
+#define DIG3_AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L_MASK 0x00F00000L
+#define DIG3_AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY_MASK 0x0F000000L
+#define DIG3_AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY_MASK 0x30000000L
+//DIG3_AFMT_60958_1
+#define DIG3_AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH__SHIFT 0x0
+#define DIG3_AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x4
+#define DIG3_AFMT_60958_1__AFMT_60958_VALID_L__SHIFT 0x10
+#define DIG3_AFMT_60958_1__AFMT_60958_VALID_R__SHIFT 0x12
+#define DIG3_AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x14
+#define DIG3_AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH_MASK 0x0000000FL
+#define DIG3_AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x000000F0L
+#define DIG3_AFMT_60958_1__AFMT_60958_VALID_L_MASK 0x00010000L
+#define DIG3_AFMT_60958_1__AFMT_60958_VALID_R_MASK 0x00040000L
+#define DIG3_AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R_MASK 0x00F00000L
+//DIG3_AFMT_AUDIO_CRC_CONTROL
+#define DIG3_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN__SHIFT 0x0
+#define DIG3_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT__SHIFT 0x4
+#define DIG3_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE__SHIFT 0x8
+#define DIG3_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL__SHIFT 0xc
+#define DIG3_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT__SHIFT 0x10
+#define DIG3_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN_MASK 0x00000001L
+#define DIG3_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT_MASK 0x00000010L
+#define DIG3_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE_MASK 0x00000100L
+#define DIG3_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL_MASK 0x0000F000L
+#define DIG3_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT_MASK 0xFFFF0000L
+//DIG3_AFMT_RAMP_CONTROL0
+#define DIG3_AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT__SHIFT 0x0
+#define DIG3_AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN__SHIFT 0x1f
+#define DIG3_AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT_MASK 0x00FFFFFFL
+#define DIG3_AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN_MASK 0x80000000L
+//DIG3_AFMT_RAMP_CONTROL1
+#define DIG3_AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT__SHIFT 0x0
+#define DIG3_AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE__SHIFT 0x18
+#define DIG3_AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT_MASK 0x00FFFFFFL
+#define DIG3_AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE_MASK 0xFF000000L
+//DIG3_AFMT_RAMP_CONTROL2
+#define DIG3_AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT__SHIFT 0x0
+#define DIG3_AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT_MASK 0x00FFFFFFL
+//DIG3_AFMT_RAMP_CONTROL3
+#define DIG3_AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT__SHIFT 0x0
+#define DIG3_AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT_MASK 0x00FFFFFFL
+//DIG3_AFMT_60958_2
+#define DIG3_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define DIG3_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define DIG3_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x8
+#define DIG3_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5__SHIFT 0xc
+#define DIG3_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x10
+#define DIG3_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x14
+#define DIG3_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define DIG3_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+#define DIG3_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4_MASK 0x00000F00L
+#define DIG3_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5_MASK 0x0000F000L
+#define DIG3_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6_MASK 0x000F0000L
+#define DIG3_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7_MASK 0x00F00000L
+//DIG3_AFMT_AUDIO_CRC_RESULT
+#define DIG3_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE__SHIFT 0x0
+#define DIG3_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC__SHIFT 0x8
+#define DIG3_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE_MASK 0x00000001L
+#define DIG3_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_MASK 0xFFFFFF00L
+//DIG3_AFMT_STATUS
+#define DIG3_AFMT_STATUS__AFMT_AUDIO_ENABLE__SHIFT 0x4
+#define DIG3_AFMT_STATUS__AFMT_AZ_HBR_ENABLE__SHIFT 0x8
+#define DIG3_AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW__SHIFT 0x18
+#define DIG3_AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG__SHIFT 0x1e
+#define DIG3_AFMT_STATUS__AFMT_AUDIO_ENABLE_MASK 0x00000010L
+#define DIG3_AFMT_STATUS__AFMT_AZ_HBR_ENABLE_MASK 0x00000100L
+#define DIG3_AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW_MASK 0x01000000L
+#define DIG3_AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG_MASK 0x40000000L
+//DIG3_AFMT_AUDIO_PACKET_CONTROL
+#define DIG3_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND__SHIFT 0x0
+#define DIG3_AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS__SHIFT 0xb
+#define DIG3_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN__SHIFT 0xc
+#define DIG3_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE__SHIFT 0xe
+#define DIG3_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK__SHIFT 0x17
+#define DIG3_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP__SHIFT 0x18
+#define DIG3_AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE__SHIFT 0x1a
+#define DIG3_AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK__SHIFT 0x1e
+#define DIG3_AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB__SHIFT 0x1f
+#define DIG3_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK 0x00000001L
+#define DIG3_AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS_MASK 0x00000800L
+#define DIG3_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN_MASK 0x00001000L
+#define DIG3_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE_MASK 0x00004000L
+#define DIG3_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK_MASK 0x00800000L
+#define DIG3_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP_MASK 0x01000000L
+#define DIG3_AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE_MASK 0x04000000L
+#define DIG3_AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK_MASK 0x40000000L
+#define DIG3_AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB_MASK 0x80000000L
+//DIG3_AFMT_VBI_PACKET_CONTROL
+#define DIG3_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_LOCK_STATUS__SHIFT 0x8
+#define DIG3_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_CONFLICT__SHIFT 0x10
+#define DIG3_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_CONFLICT_CLR__SHIFT 0x11
+#define DIG3_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_INDEX__SHIFT 0x1c
+#define DIG3_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_LOCK_STATUS_MASK 0x00000100L
+#define DIG3_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_CONFLICT_MASK 0x00010000L
+#define DIG3_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_CONFLICT_CLR_MASK 0x00020000L
+#define DIG3_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_INDEX_MASK 0xF0000000L
+//DIG3_AFMT_INFOFRAME_CONTROL0
+#define DIG3_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE__SHIFT 0x6
+#define DIG3_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE__SHIFT 0x7
+#define DIG3_AFMT_INFOFRAME_CONTROL0__AFMT_MPEG_INFO_UPDATE__SHIFT 0xa
+#define DIG3_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE_MASK 0x00000040L
+#define DIG3_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE_MASK 0x00000080L
+#define DIG3_AFMT_INFOFRAME_CONTROL0__AFMT_MPEG_INFO_UPDATE_MASK 0x00000400L
+//DIG3_AFMT_AUDIO_SRC_CONTROL
+#define DIG3_AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT__SHIFT 0x0
+#define DIG3_AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT_MASK 0x00000007L
+//DIG3_DIG_BE_CNTL
+#define DIG3_DIG_BE_CNTL__DIG_DUAL_LINK_ENABLE__SHIFT 0x0
+#define DIG3_DIG_BE_CNTL__DIG_SWAP__SHIFT 0x1
+#define DIG3_DIG_BE_CNTL__DIG_RB_SWITCH_EN__SHIFT 0x2
+#define DIG3_DIG_BE_CNTL__DIG_FE_SOURCE_SELECT__SHIFT 0x8
+#define DIG3_DIG_BE_CNTL__DIG_MODE__SHIFT 0x10
+#define DIG3_DIG_BE_CNTL__DIG_HPD_SELECT__SHIFT 0x1c
+#define DIG3_DIG_BE_CNTL__DIG_DUAL_LINK_ENABLE_MASK 0x00000001L
+#define DIG3_DIG_BE_CNTL__DIG_SWAP_MASK 0x00000002L
+#define DIG3_DIG_BE_CNTL__DIG_RB_SWITCH_EN_MASK 0x00000004L
+#define DIG3_DIG_BE_CNTL__DIG_FE_SOURCE_SELECT_MASK 0x00007F00L
+#define DIG3_DIG_BE_CNTL__DIG_MODE_MASK 0x00070000L
+#define DIG3_DIG_BE_CNTL__DIG_HPD_SELECT_MASK 0x70000000L
+//DIG3_DIG_BE_EN_CNTL
+#define DIG3_DIG_BE_EN_CNTL__DIG_ENABLE__SHIFT 0x0
+#define DIG3_DIG_BE_EN_CNTL__DIG_SYMCLK_BE_ON__SHIFT 0x8
+#define DIG3_DIG_BE_EN_CNTL__DIG_ENABLE_MASK 0x00000001L
+#define DIG3_DIG_BE_EN_CNTL__DIG_SYMCLK_BE_ON_MASK 0x00000100L
+//DIG3_TMDS_CNTL
+#define DIG3_TMDS_CNTL__TMDS_SYNC_PHASE__SHIFT 0x0
+#define DIG3_TMDS_CNTL__TMDS_SYNC_PHASE_MASK 0x00000001L
+//DIG3_TMDS_CONTROL_CHAR
+#define DIG3_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR0_OUT_EN__SHIFT 0x0
+#define DIG3_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR1_OUT_EN__SHIFT 0x1
+#define DIG3_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR2_OUT_EN__SHIFT 0x2
+#define DIG3_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR3_OUT_EN__SHIFT 0x3
+#define DIG3_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR0_OUT_EN_MASK 0x00000001L
+#define DIG3_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR1_OUT_EN_MASK 0x00000002L
+#define DIG3_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR2_OUT_EN_MASK 0x00000004L
+#define DIG3_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR3_OUT_EN_MASK 0x00000008L
+//DIG3_TMDS_CONTROL0_FEEDBACK
+#define DIG3_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_SELECT__SHIFT 0x0
+#define DIG3_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_DELAY__SHIFT 0x8
+#define DIG3_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_SELECT_MASK 0x00000003L
+#define DIG3_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_DELAY_MASK 0x00000300L
+//DIG3_TMDS_STEREOSYNC_CTL_SEL
+#define DIG3_TMDS_STEREOSYNC_CTL_SEL__TMDS_STEREOSYNC_CTL_SEL__SHIFT 0x0
+#define DIG3_TMDS_STEREOSYNC_CTL_SEL__TMDS_STEREOSYNC_CTL_SEL_MASK 0x00000003L
+//DIG3_TMDS_SYNC_CHAR_PATTERN_0_1
+#define DIG3_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN0__SHIFT 0x0
+#define DIG3_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN1__SHIFT 0x10
+#define DIG3_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN0_MASK 0x000003FFL
+#define DIG3_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN1_MASK 0x03FF0000L
+//DIG3_TMDS_SYNC_CHAR_PATTERN_2_3
+#define DIG3_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN2__SHIFT 0x0
+#define DIG3_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN3__SHIFT 0x10
+#define DIG3_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN2_MASK 0x000003FFL
+#define DIG3_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN3_MASK 0x03FF0000L
+//DIG3_TMDS_CTL_BITS
+#define DIG3_TMDS_CTL_BITS__TMDS_CTL0__SHIFT 0x0
+#define DIG3_TMDS_CTL_BITS__TMDS_CTL1__SHIFT 0x8
+#define DIG3_TMDS_CTL_BITS__TMDS_CTL2__SHIFT 0x10
+#define DIG3_TMDS_CTL_BITS__TMDS_CTL3__SHIFT 0x18
+#define DIG3_TMDS_CTL_BITS__TMDS_CTL0_MASK 0x00000001L
+#define DIG3_TMDS_CTL_BITS__TMDS_CTL1_MASK 0x00000100L
+#define DIG3_TMDS_CTL_BITS__TMDS_CTL2_MASK 0x00010000L
+#define DIG3_TMDS_CTL_BITS__TMDS_CTL3_MASK 0x01000000L
+//DIG3_TMDS_DCBALANCER_CONTROL
+#define DIG3_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_EN__SHIFT 0x0
+#define DIG3_TMDS_DCBALANCER_CONTROL__TMDS_SYNC_DCBAL_EN__SHIFT 0x4
+#define DIG3_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_EN__SHIFT 0x8
+#define DIG3_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_IN__SHIFT 0x10
+#define DIG3_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_FORCE__SHIFT 0x18
+#define DIG3_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_EN_MASK 0x00000001L
+#define DIG3_TMDS_DCBALANCER_CONTROL__TMDS_SYNC_DCBAL_EN_MASK 0x00000070L
+#define DIG3_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_EN_MASK 0x00000100L
+#define DIG3_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_IN_MASK 0x000F0000L
+#define DIG3_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_FORCE_MASK 0x01000000L
+//DIG3_TMDS_SYNC_DCBALANCE_CHAR
+#define DIG3_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR01__SHIFT 0x0
+#define DIG3_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR11__SHIFT 0x10
+#define DIG3_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR01_MASK 0x000003FFL
+#define DIG3_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR11_MASK 0x03FF0000L
+//DIG3_TMDS_CTL0_1_GEN_CNTL
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_SEL__SHIFT 0x0
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_DELAY__SHIFT 0x4
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_INVERT__SHIFT 0x7
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_MODULATION__SHIFT 0x8
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_USE_FEEDBACK_PATH__SHIFT 0xa
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_FB_SYNC_CONT__SHIFT 0xb
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_PATTERN_OUT_EN__SHIFT 0xc
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_SEL__SHIFT 0x10
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_DELAY__SHIFT 0x14
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_INVERT__SHIFT 0x17
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_MODULATION__SHIFT 0x18
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_USE_FEEDBACK_PATH__SHIFT 0x1a
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_FB_SYNC_CONT__SHIFT 0x1b
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_PATTERN_OUT_EN__SHIFT 0x1c
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_2BIT_COUNTER_EN__SHIFT 0x1f
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_SEL_MASK 0x0000000FL
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_DELAY_MASK 0x00000070L
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_INVERT_MASK 0x00000080L
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_MODULATION_MASK 0x00000300L
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_USE_FEEDBACK_PATH_MASK 0x00000400L
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_FB_SYNC_CONT_MASK 0x00000800L
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_PATTERN_OUT_EN_MASK 0x00001000L
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_SEL_MASK 0x000F0000L
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_DELAY_MASK 0x00700000L
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_INVERT_MASK 0x00800000L
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_MODULATION_MASK 0x03000000L
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_USE_FEEDBACK_PATH_MASK 0x04000000L
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_FB_SYNC_CONT_MASK 0x08000000L
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_PATTERN_OUT_EN_MASK 0x10000000L
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_2BIT_COUNTER_EN_MASK 0x80000000L
+//DIG3_TMDS_CTL2_3_GEN_CNTL
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_SEL__SHIFT 0x0
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_DELAY__SHIFT 0x4
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_INVERT__SHIFT 0x7
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_MODULATION__SHIFT 0x8
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_USE_FEEDBACK_PATH__SHIFT 0xa
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_FB_SYNC_CONT__SHIFT 0xb
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_PATTERN_OUT_EN__SHIFT 0xc
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_SEL__SHIFT 0x10
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_DELAY__SHIFT 0x14
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_INVERT__SHIFT 0x17
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_MODULATION__SHIFT 0x18
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_USE_FEEDBACK_PATH__SHIFT 0x1a
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_FB_SYNC_CONT__SHIFT 0x1b
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_PATTERN_OUT_EN__SHIFT 0x1c
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_SEL_MASK 0x0000000FL
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_DELAY_MASK 0x00000070L
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_INVERT_MASK 0x00000080L
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_MODULATION_MASK 0x00000300L
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_USE_FEEDBACK_PATH_MASK 0x00000400L
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_FB_SYNC_CONT_MASK 0x00000800L
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_PATTERN_OUT_EN_MASK 0x00001000L
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_SEL_MASK 0x000F0000L
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_DELAY_MASK 0x00700000L
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_INVERT_MASK 0x00800000L
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_MODULATION_MASK 0x03000000L
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_USE_FEEDBACK_PATH_MASK 0x04000000L
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_FB_SYNC_CONT_MASK 0x08000000L
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_PATTERN_OUT_EN_MASK 0x10000000L
+//DIG3_DIG_VERSION
+#define DIG3_DIG_VERSION__DIG_TYPE__SHIFT 0x0
+#define DIG3_DIG_VERSION__DIG_TYPE_MASK 0x00000001L
+//DIG3_DIG_LANE_ENABLE
+#define DIG3_DIG_LANE_ENABLE__DIG_LANE0EN__SHIFT 0x0
+#define DIG3_DIG_LANE_ENABLE__DIG_LANE1EN__SHIFT 0x1
+#define DIG3_DIG_LANE_ENABLE__DIG_LANE2EN__SHIFT 0x2
+#define DIG3_DIG_LANE_ENABLE__DIG_LANE3EN__SHIFT 0x3
+#define DIG3_DIG_LANE_ENABLE__DIG_CLK_EN__SHIFT 0x8
+#define DIG3_DIG_LANE_ENABLE__DIG_LANE0EN_MASK 0x00000001L
+#define DIG3_DIG_LANE_ENABLE__DIG_LANE1EN_MASK 0x00000002L
+#define DIG3_DIG_LANE_ENABLE__DIG_LANE2EN_MASK 0x00000004L
+#define DIG3_DIG_LANE_ENABLE__DIG_LANE3EN_MASK 0x00000008L
+#define DIG3_DIG_LANE_ENABLE__DIG_CLK_EN_MASK 0x00000100L
+//DIG3_AFMT_CNTL
+#define DIG3_AFMT_CNTL__AFMT_AUDIO_CLOCK_EN__SHIFT 0x0
+#define DIG3_AFMT_CNTL__AFMT_AUDIO_CLOCK_ON__SHIFT 0x8
+#define DIG3_AFMT_CNTL__AFMT_AUDIO_CLOCK_EN_MASK 0x00000001L
+#define DIG3_AFMT_CNTL__AFMT_AUDIO_CLOCK_ON_MASK 0x00000100L
+//DIG3_AFMT_VBI_PACKET_CONTROL1
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_FRAME_UPDATE__SHIFT 0x0
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_FRAME_UPDATE_PENDING__SHIFT 0x1
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_IMMEDIATE_UPDATE__SHIFT 0x2
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_IMMEDIATE_UPDATE_PENDING__SHIFT 0x3
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_FRAME_UPDATE__SHIFT 0x4
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_FRAME_UPDATE_PENDING__SHIFT 0x5
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_IMMEDIATE_UPDATE__SHIFT 0x6
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_IMMEDIATE_UPDATE_PENDING__SHIFT 0x7
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_FRAME_UPDATE__SHIFT 0x8
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_FRAME_UPDATE_PENDING__SHIFT 0x9
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_IMMEDIATE_UPDATE__SHIFT 0xa
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_IMMEDIATE_UPDATE_PENDING__SHIFT 0xb
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_FRAME_UPDATE__SHIFT 0xc
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_FRAME_UPDATE_PENDING__SHIFT 0xd
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_IMMEDIATE_UPDATE__SHIFT 0xe
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_IMMEDIATE_UPDATE_PENDING__SHIFT 0xf
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_FRAME_UPDATE__SHIFT 0x10
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_FRAME_UPDATE_PENDING__SHIFT 0x11
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_IMMEDIATE_UPDATE__SHIFT 0x12
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_IMMEDIATE_UPDATE_PENDING__SHIFT 0x13
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_FRAME_UPDATE__SHIFT 0x14
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_FRAME_UPDATE_PENDING__SHIFT 0x15
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_IMMEDIATE_UPDATE__SHIFT 0x16
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_IMMEDIATE_UPDATE_PENDING__SHIFT 0x17
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_FRAME_UPDATE__SHIFT 0x18
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_FRAME_UPDATE_PENDING__SHIFT 0x19
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_IMMEDIATE_UPDATE__SHIFT 0x1a
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1b
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_FRAME_UPDATE__SHIFT 0x1c
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_FRAME_UPDATE_PENDING__SHIFT 0x1d
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_IMMEDIATE_UPDATE__SHIFT 0x1e
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1f
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_FRAME_UPDATE_MASK 0x00000001L
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_FRAME_UPDATE_PENDING_MASK 0x00000002L
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_IMMEDIATE_UPDATE_MASK 0x00000004L
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_IMMEDIATE_UPDATE_PENDING_MASK 0x00000008L
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_FRAME_UPDATE_MASK 0x00000010L
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_FRAME_UPDATE_PENDING_MASK 0x00000020L
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_IMMEDIATE_UPDATE_MASK 0x00000040L
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_IMMEDIATE_UPDATE_PENDING_MASK 0x00000080L
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_FRAME_UPDATE_MASK 0x00000100L
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_FRAME_UPDATE_PENDING_MASK 0x00000200L
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_IMMEDIATE_UPDATE_MASK 0x00000400L
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_IMMEDIATE_UPDATE_PENDING_MASK 0x00000800L
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_FRAME_UPDATE_MASK 0x00001000L
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_FRAME_UPDATE_PENDING_MASK 0x00002000L
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_IMMEDIATE_UPDATE_MASK 0x00004000L
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_IMMEDIATE_UPDATE_PENDING_MASK 0x00008000L
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_FRAME_UPDATE_MASK 0x00010000L
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_FRAME_UPDATE_PENDING_MASK 0x00020000L
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_IMMEDIATE_UPDATE_MASK 0x00040000L
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_IMMEDIATE_UPDATE_PENDING_MASK 0x00080000L
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_FRAME_UPDATE_MASK 0x00100000L
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_FRAME_UPDATE_PENDING_MASK 0x00200000L
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_IMMEDIATE_UPDATE_MASK 0x00400000L
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_IMMEDIATE_UPDATE_PENDING_MASK 0x00800000L
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_FRAME_UPDATE_MASK 0x01000000L
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_FRAME_UPDATE_PENDING_MASK 0x02000000L
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_IMMEDIATE_UPDATE_MASK 0x04000000L
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_IMMEDIATE_UPDATE_PENDING_MASK 0x08000000L
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_FRAME_UPDATE_MASK 0x10000000L
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_FRAME_UPDATE_PENDING_MASK 0x20000000L
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_IMMEDIATE_UPDATE_MASK 0x40000000L
+#define DIG3_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_IMMEDIATE_UPDATE_PENDING_MASK 0x80000000L
+//DIG3_HDMI_GENERIC_PACKET_CONTROL5
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND__SHIFT 0x0
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_PENDING__SHIFT 0x1
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND__SHIFT 0x2
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_PENDING__SHIFT 0x3
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND__SHIFT 0x4
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_PENDING__SHIFT 0x5
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND__SHIFT 0x6
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_PENDING__SHIFT 0x7
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND__SHIFT 0x8
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_PENDING__SHIFT 0x9
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND__SHIFT 0xa
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_PENDING__SHIFT 0xb
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND__SHIFT 0xc
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_PENDING__SHIFT 0xd
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND__SHIFT 0xe
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_PENDING__SHIFT 0xf
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_MASK 0x00000001L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_PENDING_MASK 0x00000002L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_MASK 0x00000004L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_PENDING_MASK 0x00000008L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_MASK 0x00000010L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_PENDING_MASK 0x00000020L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_MASK 0x00000040L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_PENDING_MASK 0x00000080L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_MASK 0x00000100L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_PENDING_MASK 0x00000200L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_MASK 0x00000400L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_PENDING_MASK 0x00000800L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_MASK 0x00001000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_PENDING_MASK 0x00002000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_MASK 0x00004000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_PENDING_MASK 0x00008000L
+//DIG3_FORCE_DIG_DISABLE
+#define DIG3_FORCE_DIG_DISABLE__FORCE_DIG_DISABLE__SHIFT 0x0
+#define DIG3_FORCE_DIG_DISABLE__FORCE_DIG_DISABLE_MASK 0x00000001L
+
+
+// addressBlock: dce_dc_dio_dp3_dispdec
+//DP3_DP_LINK_CNTL
+#define DP3_DP_LINK_CNTL__DP_LINK_TRAINING_COMPLETE__SHIFT 0x4
+#define DP3_DP_LINK_CNTL__DP_LINK_STATUS__SHIFT 0x8
+#define DP3_DP_LINK_CNTL__DP_EMBEDDED_PANEL_MODE__SHIFT 0x11
+#define DP3_DP_LINK_CNTL__DP_LINK_TRAINING_COMPLETE_MASK 0x00000010L
+#define DP3_DP_LINK_CNTL__DP_LINK_STATUS_MASK 0x00000100L
+#define DP3_DP_LINK_CNTL__DP_EMBEDDED_PANEL_MODE_MASK 0x00020000L
+//DP3_DP_PIXEL_FORMAT
+#define DP3_DP_PIXEL_FORMAT__DP_PIXEL_ENCODING__SHIFT 0x0
+#define DP3_DP_PIXEL_FORMAT__DP_COMPONENT_DEPTH__SHIFT 0x18
+#define DP3_DP_PIXEL_FORMAT__DP_PIXEL_COMBINE__SHIFT 0x1c
+#define DP3_DP_PIXEL_FORMAT__DP_PIXEL_ENCODING_MASK 0x00000007L
+#define DP3_DP_PIXEL_FORMAT__DP_COMPONENT_DEPTH_MASK 0x07000000L
+#define DP3_DP_PIXEL_FORMAT__DP_PIXEL_COMBINE_MASK 0x30000000L
+//DP3_DP_MSA_COLORIMETRY
+#define DP3_DP_MSA_COLORIMETRY__DP_MSA_MISC0__SHIFT 0x18
+#define DP3_DP_MSA_COLORIMETRY__DP_MSA_MISC0_MASK 0xFF000000L
+//DP3_DP_CONFIG
+#define DP3_DP_CONFIG__DP_UDI_LANES__SHIFT 0x0
+#define DP3_DP_CONFIG__DP_UDI_LANES_MASK 0x00000003L
+//DP3_DP_VID_STREAM_CNTL
+#define DP3_DP_VID_STREAM_CNTL__DP_VID_STREAM_ENABLE__SHIFT 0x0
+#define DP3_DP_VID_STREAM_CNTL__DP_VID_STREAM_DIS_DEFER__SHIFT 0x8
+#define DP3_DP_VID_STREAM_CNTL__DP_VID_STREAM_STATUS__SHIFT 0x10
+#define DP3_DP_VID_STREAM_CNTL__DP_VID_STREAM_CHANGE_KEEPOUT__SHIFT 0x14
+#define DP3_DP_VID_STREAM_CNTL__DP_VID_STREAM_ENABLE_MASK 0x00000001L
+#define DP3_DP_VID_STREAM_CNTL__DP_VID_STREAM_DIS_DEFER_MASK 0x00000300L
+#define DP3_DP_VID_STREAM_CNTL__DP_VID_STREAM_STATUS_MASK 0x00010000L
+#define DP3_DP_VID_STREAM_CNTL__DP_VID_STREAM_CHANGE_KEEPOUT_MASK 0x00100000L
+//DP3_DP_STEER_FIFO
+#define DP3_DP_STEER_FIFO__DP_STEER_FIFO_RESET__SHIFT 0x0
+#define DP3_DP_STEER_FIFO__DP_STEER_OVERFLOW_FLAG__SHIFT 0x4
+#define DP3_DP_STEER_FIFO__DP_STEER_OVERFLOW_INT__SHIFT 0x5
+#define DP3_DP_STEER_FIFO__DP_STEER_OVERFLOW_ACK__SHIFT 0x6
+#define DP3_DP_STEER_FIFO__DP_STEER_OVERFLOW_MASK__SHIFT 0x7
+#define DP3_DP_STEER_FIFO__DP_TU_OVERFLOW_FLAG__SHIFT 0x8
+#define DP3_DP_STEER_FIFO__DP_TU_OVERFLOW_ACK__SHIFT 0xc
+#define DP3_DP_STEER_FIFO__DP_STEER_FIFO_RESET_MASK 0x00000001L
+#define DP3_DP_STEER_FIFO__DP_STEER_OVERFLOW_FLAG_MASK 0x00000010L
+#define DP3_DP_STEER_FIFO__DP_STEER_OVERFLOW_INT_MASK 0x00000020L
+#define DP3_DP_STEER_FIFO__DP_STEER_OVERFLOW_ACK_MASK 0x00000040L
+#define DP3_DP_STEER_FIFO__DP_STEER_OVERFLOW_MASK_MASK 0x00000080L
+#define DP3_DP_STEER_FIFO__DP_TU_OVERFLOW_FLAG_MASK 0x00000100L
+#define DP3_DP_STEER_FIFO__DP_TU_OVERFLOW_ACK_MASK 0x00001000L
+//DP3_DP_MSA_MISC
+#define DP3_DP_MSA_MISC__DP_MSA_MISC1__SHIFT 0x0
+#define DP3_DP_MSA_MISC__DP_MSA_MISC2__SHIFT 0x8
+#define DP3_DP_MSA_MISC__DP_MSA_MISC3__SHIFT 0x10
+#define DP3_DP_MSA_MISC__DP_MSA_MISC4__SHIFT 0x18
+#define DP3_DP_MSA_MISC__DP_MSA_MISC1_MASK 0x000000FFL
+#define DP3_DP_MSA_MISC__DP_MSA_MISC2_MASK 0x0000FF00L
+#define DP3_DP_MSA_MISC__DP_MSA_MISC3_MASK 0x00FF0000L
+#define DP3_DP_MSA_MISC__DP_MSA_MISC4_MASK 0xFF000000L
+//DP3_DP_VID_TIMING
+#define DP3_DP_VID_TIMING__DP_VID_M_N_DOUBLE_BUFFER_MODE__SHIFT 0x4
+#define DP3_DP_VID_TIMING__DP_VID_M_N_GEN_EN__SHIFT 0x8
+#define DP3_DP_VID_TIMING__DP_VID_N_MUL__SHIFT 0xa
+#define DP3_DP_VID_TIMING__DP_VID_M_DIV__SHIFT 0xc
+#define DP3_DP_VID_TIMING__DP_VID_N_DIV__SHIFT 0x18
+#define DP3_DP_VID_TIMING__DP_VID_M_N_DOUBLE_BUFFER_MODE_MASK 0x00000010L
+#define DP3_DP_VID_TIMING__DP_VID_M_N_GEN_EN_MASK 0x00000100L
+#define DP3_DP_VID_TIMING__DP_VID_N_MUL_MASK 0x00000C00L
+#define DP3_DP_VID_TIMING__DP_VID_M_DIV_MASK 0x00003000L
+#define DP3_DP_VID_TIMING__DP_VID_N_DIV_MASK 0xFF000000L
+//DP3_DP_VID_N
+#define DP3_DP_VID_N__DP_VID_N__SHIFT 0x0
+#define DP3_DP_VID_N__DP_VID_N_MASK 0x00FFFFFFL
+//DP3_DP_VID_M
+#define DP3_DP_VID_M__DP_VID_M__SHIFT 0x0
+#define DP3_DP_VID_M__DP_VID_M_MASK 0x00FFFFFFL
+//DP3_DP_LINK_FRAMING_CNTL
+#define DP3_DP_LINK_FRAMING_CNTL__DP_IDLE_BS_INTERVAL__SHIFT 0x0
+#define DP3_DP_LINK_FRAMING_CNTL__DP_VBID_DISABLE__SHIFT 0x18
+#define DP3_DP_LINK_FRAMING_CNTL__DP_VID_ENHANCED_FRAME_MODE__SHIFT 0x1c
+#define DP3_DP_LINK_FRAMING_CNTL__DP_IDLE_BS_INTERVAL_MASK 0x0003FFFFL
+#define DP3_DP_LINK_FRAMING_CNTL__DP_VBID_DISABLE_MASK 0x01000000L
+#define DP3_DP_LINK_FRAMING_CNTL__DP_VID_ENHANCED_FRAME_MODE_MASK 0x10000000L
+//DP3_DP_HBR2_EYE_PATTERN
+#define DP3_DP_HBR2_EYE_PATTERN__DP_HBR2_EYE_PATTERN_ENABLE__SHIFT 0x0
+#define DP3_DP_HBR2_EYE_PATTERN__DP_HBR2_EYE_PATTERN_ENABLE_MASK 0x00000001L
+//DP3_DP_VID_MSA_VBID
+#define DP3_DP_VID_MSA_VBID__DP_VID_MSA_LOCATION__SHIFT 0x0
+#define DP3_DP_VID_MSA_VBID__DP_VID_VBID_FIELD_POL__SHIFT 0x18
+#define DP3_DP_VID_MSA_VBID__DP_VID_MSA_LOCATION_MASK 0x00000FFFL
+#define DP3_DP_VID_MSA_VBID__DP_VID_VBID_FIELD_POL_MASK 0x01000000L
+//DP3_DP_VID_INTERRUPT_CNTL
+#define DP3_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_INT__SHIFT 0x0
+#define DP3_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_ACK__SHIFT 0x1
+#define DP3_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_MASK__SHIFT 0x2
+#define DP3_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_INT_MASK 0x00000001L
+#define DP3_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_ACK_MASK 0x00000002L
+#define DP3_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_MASK_MASK 0x00000004L
+//DP3_DP_DPHY_CNTL
+#define DP3_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE0__SHIFT 0x0
+#define DP3_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE1__SHIFT 0x1
+#define DP3_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE2__SHIFT 0x2
+#define DP3_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE3__SHIFT 0x3
+#define DP3_DP_DPHY_CNTL__DPHY_FEC_EN__SHIFT 0x4
+#define DP3_DP_DPHY_CNTL__DPHY_FEC_READY_SHADOW__SHIFT 0x5
+#define DP3_DP_DPHY_CNTL__DPHY_FEC_ACTIVE_STATUS__SHIFT 0x6
+#define DP3_DP_DPHY_CNTL__DPHY_BYPASS__SHIFT 0x10
+#define DP3_DP_DPHY_CNTL__DPHY_SKEW_BYPASS__SHIFT 0x18
+#define DP3_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE0_MASK 0x00000001L
+#define DP3_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE1_MASK 0x00000002L
+#define DP3_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE2_MASK 0x00000004L
+#define DP3_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE3_MASK 0x00000008L
+#define DP3_DP_DPHY_CNTL__DPHY_FEC_EN_MASK 0x00000010L
+#define DP3_DP_DPHY_CNTL__DPHY_FEC_READY_SHADOW_MASK 0x00000020L
+#define DP3_DP_DPHY_CNTL__DPHY_FEC_ACTIVE_STATUS_MASK 0x00000040L
+#define DP3_DP_DPHY_CNTL__DPHY_BYPASS_MASK 0x00010000L
+#define DP3_DP_DPHY_CNTL__DPHY_SKEW_BYPASS_MASK 0x01000000L
+//DP3_DP_DPHY_TRAINING_PATTERN_SEL
+#define DP3_DP_DPHY_TRAINING_PATTERN_SEL__DPHY_TRAINING_PATTERN_SEL__SHIFT 0x0
+#define DP3_DP_DPHY_TRAINING_PATTERN_SEL__DPHY_TRAINING_PATTERN_SEL_MASK 0x00000003L
+//DP3_DP_DPHY_SYM0
+#define DP3_DP_DPHY_SYM0__DPHY_SYM1__SHIFT 0x0
+#define DP3_DP_DPHY_SYM0__DPHY_SYM2__SHIFT 0xa
+#define DP3_DP_DPHY_SYM0__DPHY_SYM3__SHIFT 0x14
+#define DP3_DP_DPHY_SYM0__DPHY_SYM1_MASK 0x000003FFL
+#define DP3_DP_DPHY_SYM0__DPHY_SYM2_MASK 0x000FFC00L
+#define DP3_DP_DPHY_SYM0__DPHY_SYM3_MASK 0x3FF00000L
+//DP3_DP_DPHY_SYM1
+#define DP3_DP_DPHY_SYM1__DPHY_SYM4__SHIFT 0x0
+#define DP3_DP_DPHY_SYM1__DPHY_SYM5__SHIFT 0xa
+#define DP3_DP_DPHY_SYM1__DPHY_SYM6__SHIFT 0x14
+#define DP3_DP_DPHY_SYM1__DPHY_SYM4_MASK 0x000003FFL
+#define DP3_DP_DPHY_SYM1__DPHY_SYM5_MASK 0x000FFC00L
+#define DP3_DP_DPHY_SYM1__DPHY_SYM6_MASK 0x3FF00000L
+//DP3_DP_DPHY_SYM2
+#define DP3_DP_DPHY_SYM2__DPHY_SYM7__SHIFT 0x0
+#define DP3_DP_DPHY_SYM2__DPHY_SYM8__SHIFT 0xa
+#define DP3_DP_DPHY_SYM2__DPHY_SYM7_MASK 0x000003FFL
+#define DP3_DP_DPHY_SYM2__DPHY_SYM8_MASK 0x000FFC00L
+//DP3_DP_DPHY_8B10B_CNTL
+#define DP3_DP_DPHY_8B10B_CNTL__DPHY_8B10B_RESET__SHIFT 0x8
+#define DP3_DP_DPHY_8B10B_CNTL__DPHY_8B10B_EXT_DISP__SHIFT 0x10
+#define DP3_DP_DPHY_8B10B_CNTL__DPHY_8B10B_CUR_DISP__SHIFT 0x18
+#define DP3_DP_DPHY_8B10B_CNTL__DPHY_8B10B_RESET_MASK 0x00000100L
+#define DP3_DP_DPHY_8B10B_CNTL__DPHY_8B10B_EXT_DISP_MASK 0x00010000L
+#define DP3_DP_DPHY_8B10B_CNTL__DPHY_8B10B_CUR_DISP_MASK 0x01000000L
+//DP3_DP_DPHY_PRBS_CNTL
+#define DP3_DP_DPHY_PRBS_CNTL__DPHY_PRBS_EN__SHIFT 0x0
+#define DP3_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL__SHIFT 0x4
+#define DP3_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED__SHIFT 0x8
+#define DP3_DP_DPHY_PRBS_CNTL__DPHY_PRBS_EN_MASK 0x00000001L
+#define DP3_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL_MASK 0x00000030L
+#define DP3_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED_MASK 0x7FFFFF00L
+//DP3_DP_DPHY_SCRAM_CNTL
+#define DP3_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_DIS__SHIFT 0x0
+#define DP3_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE__SHIFT 0x4
+#define DP3_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT__SHIFT 0x8
+#define DP3_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_KCODE__SHIFT 0x18
+#define DP3_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_DIS_MASK 0x00000001L
+#define DP3_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE_MASK 0x00000010L
+#define DP3_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT_MASK 0x0003FF00L
+#define DP3_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_KCODE_MASK 0x01000000L
+//DP3_DP_DPHY_CRC_EN
+#define DP3_DP_DPHY_CRC_EN__DPHY_CRC_EN__SHIFT 0x0
+#define DP3_DP_DPHY_CRC_EN__DPHY_CRC_CONT_EN__SHIFT 0x4
+#define DP3_DP_DPHY_CRC_EN__DPHY_CRC_RESULT_VALID__SHIFT 0x8
+#define DP3_DP_DPHY_CRC_EN__DPHY_CRC_EN_MASK 0x00000001L
+#define DP3_DP_DPHY_CRC_EN__DPHY_CRC_CONT_EN_MASK 0x00000010L
+#define DP3_DP_DPHY_CRC_EN__DPHY_CRC_RESULT_VALID_MASK 0x00000100L
+//DP3_DP_DPHY_CRC_CNTL
+#define DP3_DP_DPHY_CRC_CNTL__DPHY_CRC_FIELD__SHIFT 0x0
+#define DP3_DP_DPHY_CRC_CNTL__DPHY_CRC_SEL__SHIFT 0x4
+#define DP3_DP_DPHY_CRC_CNTL__DPHY_CRC_MASK__SHIFT 0x10
+#define DP3_DP_DPHY_CRC_CNTL__DPHY_CRC_FIELD_MASK 0x00000001L
+#define DP3_DP_DPHY_CRC_CNTL__DPHY_CRC_SEL_MASK 0x00000030L
+#define DP3_DP_DPHY_CRC_CNTL__DPHY_CRC_MASK_MASK 0x00FF0000L
+//DP3_DP_DPHY_CRC_RESULT
+#define DP3_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT__SHIFT 0x0
+#define DP3_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT1__SHIFT 0x8
+#define DP3_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT2__SHIFT 0x10
+#define DP3_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT3__SHIFT 0x18
+#define DP3_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT_MASK 0x000000FFL
+#define DP3_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT1_MASK 0x0000FF00L
+#define DP3_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT2_MASK 0x00FF0000L
+#define DP3_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT3_MASK 0xFF000000L
+//DP3_DP_DPHY_CRC_MST_CNTL
+#define DP3_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_FIRST_SLOT__SHIFT 0x0
+#define DP3_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_LAST_SLOT__SHIFT 0x8
+#define DP3_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_FIRST_SLOT_MASK 0x0000003FL
+#define DP3_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_LAST_SLOT_MASK 0x00003F00L
+//DP3_DP_DPHY_CRC_MST_STATUS
+#define DP3_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_LOCK__SHIFT 0x0
+#define DP3_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR__SHIFT 0x8
+#define DP3_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR_ACK__SHIFT 0x10
+#define DP3_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_LOCK_MASK 0x00000001L
+#define DP3_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR_MASK 0x00000100L
+#define DP3_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR_ACK_MASK 0x00010000L
+//DP3_DP_DPHY_FAST_TRAINING
+#define DP3_DP_DPHY_FAST_TRAINING__DPHY_RX_FAST_TRAINING_CAPABLE__SHIFT 0x0
+#define DP3_DP_DPHY_FAST_TRAINING__DPHY_SW_FAST_TRAINING_START__SHIFT 0x1
+#define DP3_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN__SHIFT 0x2
+#define DP3_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP1_TIME__SHIFT 0x8
+#define DP3_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP2_TIME__SHIFT 0x14
+#define DP3_DP_DPHY_FAST_TRAINING__DPHY_RX_FAST_TRAINING_CAPABLE_MASK 0x00000001L
+#define DP3_DP_DPHY_FAST_TRAINING__DPHY_SW_FAST_TRAINING_START_MASK 0x00000002L
+#define DP3_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN_MASK 0x00000004L
+#define DP3_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP1_TIME_MASK 0x000FFF00L
+#define DP3_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP2_TIME_MASK 0xFFF00000L
+//DP3_DP_DPHY_FAST_TRAINING_STATUS
+#define DP3_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_STATE__SHIFT 0x0
+#define DP3_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_OCCURRED__SHIFT 0x4
+#define DP3_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_MASK__SHIFT 0x8
+#define DP3_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_ACK__SHIFT 0xc
+#define DP3_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_STATE_MASK 0x00000007L
+#define DP3_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_OCCURRED_MASK 0x00000010L
+#define DP3_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_MASK_MASK 0x00000100L
+#define DP3_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_ACK_MASK 0x00001000L
+//DP3_DP_SEC_CNTL
+#define DP3_DP_SEC_CNTL__DP_SEC_STREAM_ENABLE__SHIFT 0x0
+#define DP3_DP_SEC_CNTL__DP_SEC_ASP_ENABLE__SHIFT 0x4
+#define DP3_DP_SEC_CNTL__DP_SEC_ATP_ENABLE__SHIFT 0x8
+#define DP3_DP_SEC_CNTL__DP_SEC_AIP_ENABLE__SHIFT 0xc
+#define DP3_DP_SEC_CNTL__DP_SEC_ACM_ENABLE__SHIFT 0x10
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP0_ENABLE__SHIFT 0x14
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP1_ENABLE__SHIFT 0x15
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP2_ENABLE__SHIFT 0x16
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP3_ENABLE__SHIFT 0x17
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP4_ENABLE__SHIFT 0x18
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP5_ENABLE__SHIFT 0x19
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP6_ENABLE__SHIFT 0x1a
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP7_ENABLE__SHIFT 0x1b
+#define DP3_DP_SEC_CNTL__DP_SEC_MPG_ENABLE__SHIFT 0x1c
+#define DP3_DP_SEC_CNTL__DP_SEC_STREAM_ENABLE_MASK 0x00000001L
+#define DP3_DP_SEC_CNTL__DP_SEC_ASP_ENABLE_MASK 0x00000010L
+#define DP3_DP_SEC_CNTL__DP_SEC_ATP_ENABLE_MASK 0x00000100L
+#define DP3_DP_SEC_CNTL__DP_SEC_AIP_ENABLE_MASK 0x00001000L
+#define DP3_DP_SEC_CNTL__DP_SEC_ACM_ENABLE_MASK 0x00010000L
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP0_ENABLE_MASK 0x00100000L
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP1_ENABLE_MASK 0x00200000L
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP2_ENABLE_MASK 0x00400000L
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP3_ENABLE_MASK 0x00800000L
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP4_ENABLE_MASK 0x01000000L
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP5_ENABLE_MASK 0x02000000L
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP6_ENABLE_MASK 0x04000000L
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP7_ENABLE_MASK 0x08000000L
+#define DP3_DP_SEC_CNTL__DP_SEC_MPG_ENABLE_MASK 0x10000000L
+//DP3_DP_SEC_CNTL1
+#define DP3_DP_SEC_CNTL1__DP_SEC_ISRC_ENABLE__SHIFT 0x0
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_REFERENCE__SHIFT 0x1
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP0_PRIORITY__SHIFT 0x4
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP0_SEND__SHIFT 0x5
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_PENDING__SHIFT 0x6
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_DEADLINE_MISSED__SHIFT 0x7
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_ANY_LINE__SHIFT 0x8
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP1_LINE_REFERENCE__SHIFT 0x9
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP2_LINE_REFERENCE__SHIFT 0xa
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP3_LINE_REFERENCE__SHIFT 0xb
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP4_LINE_REFERENCE__SHIFT 0xc
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP5_LINE_REFERENCE__SHIFT 0xd
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP6_LINE_REFERENCE__SHIFT 0xe
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP7_LINE_REFERENCE__SHIFT 0xf
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_NUM__SHIFT 0x10
+#define DP3_DP_SEC_CNTL1__DP_SEC_ISRC_ENABLE_MASK 0x00000001L
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_REFERENCE_MASK 0x00000002L
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP0_PRIORITY_MASK 0x00000010L
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_MASK 0x00000020L
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_PENDING_MASK 0x00000040L
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_DEADLINE_MISSED_MASK 0x00000080L
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_ANY_LINE_MASK 0x00000100L
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP1_LINE_REFERENCE_MASK 0x00000200L
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP2_LINE_REFERENCE_MASK 0x00000400L
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP3_LINE_REFERENCE_MASK 0x00000800L
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP4_LINE_REFERENCE_MASK 0x00001000L
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP5_LINE_REFERENCE_MASK 0x00002000L
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP6_LINE_REFERENCE_MASK 0x00004000L
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP7_LINE_REFERENCE_MASK 0x00008000L
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_NUM_MASK 0xFFFF0000L
+//DP3_DP_SEC_FRAMING1
+#define DP3_DP_SEC_FRAMING1__DP_SEC_FRAME_START_LOCATION__SHIFT 0x0
+#define DP3_DP_SEC_FRAMING1__DP_SEC_VBLANK_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP3_DP_SEC_FRAMING1__DP_SEC_FRAME_START_LOCATION_MASK 0x00000FFFL
+#define DP3_DP_SEC_FRAMING1__DP_SEC_VBLANK_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+//DP3_DP_SEC_FRAMING2
+#define DP3_DP_SEC_FRAMING2__DP_SEC_START_POSITION__SHIFT 0x0
+#define DP3_DP_SEC_FRAMING2__DP_SEC_HBLANK_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP3_DP_SEC_FRAMING2__DP_SEC_START_POSITION_MASK 0x0000FFFFL
+#define DP3_DP_SEC_FRAMING2__DP_SEC_HBLANK_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+//DP3_DP_SEC_FRAMING3
+#define DP3_DP_SEC_FRAMING3__DP_SEC_IDLE_FRAME_SIZE__SHIFT 0x0
+#define DP3_DP_SEC_FRAMING3__DP_SEC_IDLE_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP3_DP_SEC_FRAMING3__DP_SEC_IDLE_FRAME_SIZE_MASK 0x00003FFFL
+#define DP3_DP_SEC_FRAMING3__DP_SEC_IDLE_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+//DP3_DP_SEC_FRAMING4
+#define DP3_DP_SEC_FRAMING4__DP_SST_SDP_SPLITTING__SHIFT 0x0
+#define DP3_DP_SEC_FRAMING4__DP_SEC_COLLISION_STATUS__SHIFT 0x14
+#define DP3_DP_SEC_FRAMING4__DP_SEC_COLLISION_ACK__SHIFT 0x18
+#define DP3_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE__SHIFT 0x1c
+#define DP3_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_STATUS__SHIFT 0x1d
+#define DP3_DP_SEC_FRAMING4__DP_SST_SDP_SPLITTING_MASK 0x00000001L
+#define DP3_DP_SEC_FRAMING4__DP_SEC_COLLISION_STATUS_MASK 0x00100000L
+#define DP3_DP_SEC_FRAMING4__DP_SEC_COLLISION_ACK_MASK 0x01000000L
+#define DP3_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_MASK 0x10000000L
+#define DP3_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_STATUS_MASK 0x20000000L
+//DP3_DP_SEC_AUD_N
+#define DP3_DP_SEC_AUD_N__DP_SEC_AUD_N__SHIFT 0x0
+#define DP3_DP_SEC_AUD_N__DP_SEC_AUD_N_MASK 0x00FFFFFFL
+//DP3_DP_SEC_AUD_N_READBACK
+#define DP3_DP_SEC_AUD_N_READBACK__DP_SEC_AUD_N_READBACK__SHIFT 0x0
+#define DP3_DP_SEC_AUD_N_READBACK__DP_SEC_AUD_N_READBACK_MASK 0x00FFFFFFL
+//DP3_DP_SEC_AUD_M
+#define DP3_DP_SEC_AUD_M__DP_SEC_AUD_M__SHIFT 0x0
+#define DP3_DP_SEC_AUD_M__DP_SEC_AUD_M_MASK 0x00FFFFFFL
+//DP3_DP_SEC_AUD_M_READBACK
+#define DP3_DP_SEC_AUD_M_READBACK__DP_SEC_AUD_M_READBACK__SHIFT 0x0
+#define DP3_DP_SEC_AUD_M_READBACK__DP_SEC_AUD_M_READBACK_MASK 0x00FFFFFFL
+//DP3_DP_SEC_TIMESTAMP
+#define DP3_DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE__SHIFT 0x0
+#define DP3_DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE_MASK 0x00000001L
+//DP3_DP_SEC_PACKET_CNTL
+#define DP3_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CODING_TYPE__SHIFT 0x1
+#define DP3_DP_SEC_PACKET_CNTL__DP_SEC_ASP_PRIORITY__SHIFT 0x4
+#define DP3_DP_SEC_PACKET_CNTL__DP_SEC_VERSION__SHIFT 0x8
+#define DP3_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE__SHIFT 0x10
+#define DP3_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CODING_TYPE_MASK 0x0000000EL
+#define DP3_DP_SEC_PACKET_CNTL__DP_SEC_ASP_PRIORITY_MASK 0x00000010L
+#define DP3_DP_SEC_PACKET_CNTL__DP_SEC_VERSION_MASK 0x00003F00L
+#define DP3_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE_MASK 0x00010000L
+//DP3_DP_MSE_RATE_CNTL
+#define DP3_DP_MSE_RATE_CNTL__DP_MSE_RATE_Y__SHIFT 0x0
+#define DP3_DP_MSE_RATE_CNTL__DP_MSE_RATE_X__SHIFT 0x1a
+#define DP3_DP_MSE_RATE_CNTL__DP_MSE_RATE_Y_MASK 0x03FFFFFFL
+#define DP3_DP_MSE_RATE_CNTL__DP_MSE_RATE_X_MASK 0xFC000000L
+//DP3_DP_MSE_RATE_UPDATE
+#define DP3_DP_MSE_RATE_UPDATE__DP_MSE_RATE_UPDATE_PENDING__SHIFT 0x0
+#define DP3_DP_MSE_RATE_UPDATE__DP_MSE_RATE_UPDATE_PENDING_MASK 0x00000001L
+//DP3_DP_MSE_SAT0
+#define DP3_DP_MSE_SAT0__DP_MSE_SAT_SRC0__SHIFT 0x0
+#define DP3_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT0__SHIFT 0x8
+#define DP3_DP_MSE_SAT0__DP_MSE_SAT_SRC1__SHIFT 0x10
+#define DP3_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT1__SHIFT 0x18
+#define DP3_DP_MSE_SAT0__DP_MSE_SAT_SRC0_MASK 0x00000007L
+#define DP3_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT0_MASK 0x00003F00L
+#define DP3_DP_MSE_SAT0__DP_MSE_SAT_SRC1_MASK 0x00070000L
+#define DP3_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT1_MASK 0x3F000000L
+//DP3_DP_MSE_SAT1
+#define DP3_DP_MSE_SAT1__DP_MSE_SAT_SRC2__SHIFT 0x0
+#define DP3_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT2__SHIFT 0x8
+#define DP3_DP_MSE_SAT1__DP_MSE_SAT_SRC3__SHIFT 0x10
+#define DP3_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT3__SHIFT 0x18
+#define DP3_DP_MSE_SAT1__DP_MSE_SAT_SRC2_MASK 0x00000007L
+#define DP3_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT2_MASK 0x00003F00L
+#define DP3_DP_MSE_SAT1__DP_MSE_SAT_SRC3_MASK 0x00070000L
+#define DP3_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT3_MASK 0x3F000000L
+//DP3_DP_MSE_SAT2
+#define DP3_DP_MSE_SAT2__DP_MSE_SAT_SRC4__SHIFT 0x0
+#define DP3_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT4__SHIFT 0x8
+#define DP3_DP_MSE_SAT2__DP_MSE_SAT_SRC5__SHIFT 0x10
+#define DP3_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT5__SHIFT 0x18
+#define DP3_DP_MSE_SAT2__DP_MSE_SAT_SRC4_MASK 0x00000007L
+#define DP3_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT4_MASK 0x00003F00L
+#define DP3_DP_MSE_SAT2__DP_MSE_SAT_SRC5_MASK 0x00070000L
+#define DP3_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT5_MASK 0x3F000000L
+//DP3_DP_MSE_SAT_UPDATE
+#define DP3_DP_MSE_SAT_UPDATE__DP_MSE_SAT_UPDATE__SHIFT 0x0
+#define DP3_DP_MSE_SAT_UPDATE__DP_MSE_16_MTP_KEEPOUT__SHIFT 0x8
+#define DP3_DP_MSE_SAT_UPDATE__DP_MSE_SAT_UPDATE_MASK 0x00000003L
+#define DP3_DP_MSE_SAT_UPDATE__DP_MSE_16_MTP_KEEPOUT_MASK 0x00000100L
+//DP3_DP_MSE_LINK_TIMING
+#define DP3_DP_MSE_LINK_TIMING__DP_MSE_LINK_FRAME__SHIFT 0x0
+#define DP3_DP_MSE_LINK_TIMING__DP_MSE_LINK_LINE__SHIFT 0x10
+#define DP3_DP_MSE_LINK_TIMING__DP_MSE_LINK_FRAME_MASK 0x000003FFL
+#define DP3_DP_MSE_LINK_TIMING__DP_MSE_LINK_LINE_MASK 0x00030000L
+//DP3_DP_MSE_MISC_CNTL
+#define DP3_DP_MSE_MISC_CNTL__DP_MSE_BLANK_CODE__SHIFT 0x0
+#define DP3_DP_MSE_MISC_CNTL__DP_MSE_TIMESTAMP_MODE__SHIFT 0x4
+#define DP3_DP_MSE_MISC_CNTL__DP_MSE_ZERO_ENCODER__SHIFT 0x8
+#define DP3_DP_MSE_MISC_CNTL__DP_MSE_BLANK_CODE_MASK 0x00000001L
+#define DP3_DP_MSE_MISC_CNTL__DP_MSE_TIMESTAMP_MODE_MASK 0x00000010L
+#define DP3_DP_MSE_MISC_CNTL__DP_MSE_ZERO_ENCODER_MASK 0x00000100L
+//DP3_DP_DPHY_BS_SR_SWAP_CNTL
+#define DP3_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT__SHIFT 0x0
+#define DP3_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_BS_SR_SWAP_DONE__SHIFT 0xf
+#define DP3_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_START__SHIFT 0x10
+#define DP3_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_MASK 0x000003FFL
+#define DP3_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_BS_SR_SWAP_DONE_MASK 0x00008000L
+#define DP3_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_START_MASK 0x00010000L
+//DP3_DP_DPHY_HBR2_PATTERN_CONTROL
+#define DP3_DP_DPHY_HBR2_PATTERN_CONTROL__DP_DPHY_HBR2_PATTERN_CONTROL__SHIFT 0x0
+#define DP3_DP_DPHY_HBR2_PATTERN_CONTROL__DP_DPHY_HBR2_PATTERN_CONTROL_MASK 0x00000007L
+//DP3_DP_MSE_SAT0_STATUS
+#define DP3_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC0_STATUS__SHIFT 0x0
+#define DP3_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT0_STATUS__SHIFT 0x8
+#define DP3_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC1_STATUS__SHIFT 0x10
+#define DP3_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT1_STATUS__SHIFT 0x18
+#define DP3_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC0_STATUS_MASK 0x00000007L
+#define DP3_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT0_STATUS_MASK 0x00003F00L
+#define DP3_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC1_STATUS_MASK 0x00070000L
+#define DP3_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT1_STATUS_MASK 0x3F000000L
+//DP3_DP_MSE_SAT1_STATUS
+#define DP3_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC2_STATUS__SHIFT 0x0
+#define DP3_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT2_STATUS__SHIFT 0x8
+#define DP3_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC3_STATUS__SHIFT 0x10
+#define DP3_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT3_STATUS__SHIFT 0x18
+#define DP3_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC2_STATUS_MASK 0x00000007L
+#define DP3_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT2_STATUS_MASK 0x00003F00L
+#define DP3_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC3_STATUS_MASK 0x00070000L
+#define DP3_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT3_STATUS_MASK 0x3F000000L
+//DP3_DP_MSE_SAT2_STATUS
+#define DP3_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC4_STATUS__SHIFT 0x0
+#define DP3_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT4_STATUS__SHIFT 0x8
+#define DP3_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC5_STATUS__SHIFT 0x10
+#define DP3_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT5_STATUS__SHIFT 0x18
+#define DP3_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC4_STATUS_MASK 0x00000007L
+#define DP3_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT4_STATUS_MASK 0x00003F00L
+#define DP3_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC5_STATUS_MASK 0x00070000L
+#define DP3_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT5_STATUS_MASK 0x3F000000L
+//DP3_DP_MSA_TIMING_PARAM1
+#define DP3_DP_MSA_TIMING_PARAM1__DP_MSA_VTOTAL__SHIFT 0x0
+#define DP3_DP_MSA_TIMING_PARAM1__DP_MSA_HTOTAL__SHIFT 0x10
+#define DP3_DP_MSA_TIMING_PARAM1__DP_MSA_VTOTAL_MASK 0x0000FFFFL
+#define DP3_DP_MSA_TIMING_PARAM1__DP_MSA_HTOTAL_MASK 0xFFFF0000L
+//DP3_DP_MSA_TIMING_PARAM2
+#define DP3_DP_MSA_TIMING_PARAM2__DP_MSA_VSTART__SHIFT 0x0
+#define DP3_DP_MSA_TIMING_PARAM2__DP_MSA_HSTART__SHIFT 0x10
+#define DP3_DP_MSA_TIMING_PARAM2__DP_MSA_VSTART_MASK 0x0000FFFFL
+#define DP3_DP_MSA_TIMING_PARAM2__DP_MSA_HSTART_MASK 0xFFFF0000L
+//DP3_DP_MSA_TIMING_PARAM3
+#define DP3_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCWIDTH__SHIFT 0x0
+#define DP3_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCPOLARITY__SHIFT 0xf
+#define DP3_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCWIDTH__SHIFT 0x10
+#define DP3_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCPOLARITY__SHIFT 0x1f
+#define DP3_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCWIDTH_MASK 0x00007FFFL
+#define DP3_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCPOLARITY_MASK 0x00008000L
+#define DP3_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCWIDTH_MASK 0x7FFF0000L
+#define DP3_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCPOLARITY_MASK 0x80000000L
+//DP3_DP_MSA_TIMING_PARAM4
+#define DP3_DP_MSA_TIMING_PARAM4__DP_MSA_VHEIGHT__SHIFT 0x0
+#define DP3_DP_MSA_TIMING_PARAM4__DP_MSA_HWIDTH__SHIFT 0x10
+#define DP3_DP_MSA_TIMING_PARAM4__DP_MSA_VHEIGHT_MASK 0x0000FFFFL
+#define DP3_DP_MSA_TIMING_PARAM4__DP_MSA_HWIDTH_MASK 0xFFFF0000L
+//DP3_DP_MSO_CNTL
+#define DP3_DP_MSO_CNTL__DP_MSO_NUM_OF_SSTLINK__SHIFT 0x0
+#define DP3_DP_MSO_CNTL__DP_MSO_SEC_STREAM_ENABLE__SHIFT 0x4
+#define DP3_DP_MSO_CNTL__DP_MSO_SEC_ASP_ENABLE__SHIFT 0x8
+#define DP3_DP_MSO_CNTL__DP_MSO_SEC_ATP_ENABLE__SHIFT 0xc
+#define DP3_DP_MSO_CNTL__DP_MSO_SEC_AIP_ENABLE__SHIFT 0x10
+#define DP3_DP_MSO_CNTL__DP_MSO_SEC_ACM_ENABLE__SHIFT 0x14
+#define DP3_DP_MSO_CNTL__DP_MSO_SEC_GSP0_ENABLE__SHIFT 0x18
+#define DP3_DP_MSO_CNTL__DP_MSO_SEC_GSP1_ENABLE__SHIFT 0x1c
+#define DP3_DP_MSO_CNTL__DP_MSO_NUM_OF_SSTLINK_MASK 0x00000003L
+#define DP3_DP_MSO_CNTL__DP_MSO_SEC_STREAM_ENABLE_MASK 0x000000F0L
+#define DP3_DP_MSO_CNTL__DP_MSO_SEC_ASP_ENABLE_MASK 0x00000F00L
+#define DP3_DP_MSO_CNTL__DP_MSO_SEC_ATP_ENABLE_MASK 0x0000F000L
+#define DP3_DP_MSO_CNTL__DP_MSO_SEC_AIP_ENABLE_MASK 0x000F0000L
+#define DP3_DP_MSO_CNTL__DP_MSO_SEC_ACM_ENABLE_MASK 0x00F00000L
+#define DP3_DP_MSO_CNTL__DP_MSO_SEC_GSP0_ENABLE_MASK 0x0F000000L
+#define DP3_DP_MSO_CNTL__DP_MSO_SEC_GSP1_ENABLE_MASK 0xF0000000L
+//DP3_DP_MSO_CNTL1
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_GSP2_ENABLE__SHIFT 0x0
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_GSP3_ENABLE__SHIFT 0x4
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_GSP4_ENABLE__SHIFT 0x8
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_GSP5_ENABLE__SHIFT 0xc
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_GSP6_ENABLE__SHIFT 0x10
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_GSP7_ENABLE__SHIFT 0x14
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_MPG_ENABLE__SHIFT 0x18
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_ISRC_ENABLE__SHIFT 0x1c
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_GSP2_ENABLE_MASK 0x0000000FL
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_GSP3_ENABLE_MASK 0x000000F0L
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_GSP4_ENABLE_MASK 0x00000F00L
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_GSP5_ENABLE_MASK 0x0000F000L
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_GSP6_ENABLE_MASK 0x000F0000L
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_GSP7_ENABLE_MASK 0x00F00000L
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_MPG_ENABLE_MASK 0x0F000000L
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_ISRC_ENABLE_MASK 0xF0000000L
+//DP3_DP_DSC_CNTL
+#define DP3_DP_DSC_CNTL__DP_DSC_MODE__SHIFT 0x0
+#define DP3_DP_DSC_CNTL__DP_DSC_SLICE_WIDTH__SHIFT 0x10
+#define DP3_DP_DSC_CNTL__DP_DSC_MODE_MASK 0x00000003L
+#define DP3_DP_DSC_CNTL__DP_DSC_SLICE_WIDTH_MASK 0x1FFF0000L
+//DP3_DP_SEC_CNTL2
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP1_SEND__SHIFT 0x0
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_PENDING__SHIFT 0x1
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_DEADLINE_MISSED__SHIFT 0x2
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_ANY_LINE__SHIFT 0x3
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP2_SEND__SHIFT 0x4
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_PENDING__SHIFT 0x5
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_DEADLINE_MISSED__SHIFT 0x6
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_ANY_LINE__SHIFT 0x7
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP3_SEND__SHIFT 0x8
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_PENDING__SHIFT 0x9
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_DEADLINE_MISSED__SHIFT 0xa
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_ANY_LINE__SHIFT 0xb
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP4_SEND__SHIFT 0xc
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_PENDING__SHIFT 0xd
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_ANY_LINE__SHIFT 0xf
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP5_SEND__SHIFT 0x10
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_PENDING__SHIFT 0x11
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_DEADLINE_MISSED__SHIFT 0x12
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_ANY_LINE__SHIFT 0x13
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP6_SEND__SHIFT 0x14
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_PENDING__SHIFT 0x15
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_DEADLINE_MISSED__SHIFT 0x16
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_ANY_LINE__SHIFT 0x17
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP7_SEND__SHIFT 0x18
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_PENDING__SHIFT 0x19
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_DEADLINE_MISSED__SHIFT 0x1a
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_ANY_LINE__SHIFT 0x1b
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP7_PPS__SHIFT 0x1c
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_MASK 0x00000001L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_PENDING_MASK 0x00000002L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_DEADLINE_MISSED_MASK 0x00000004L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_ANY_LINE_MASK 0x00000008L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_MASK 0x00000010L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_PENDING_MASK 0x00000020L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_DEADLINE_MISSED_MASK 0x00000040L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_ANY_LINE_MASK 0x00000080L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_MASK 0x00000100L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_PENDING_MASK 0x00000200L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_DEADLINE_MISSED_MASK 0x00000400L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_ANY_LINE_MASK 0x00000800L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_MASK 0x00001000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_PENDING_MASK 0x00002000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_ANY_LINE_MASK 0x00008000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_MASK 0x00010000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_PENDING_MASK 0x00020000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_DEADLINE_MISSED_MASK 0x00040000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_ANY_LINE_MASK 0x00080000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_MASK 0x00100000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_PENDING_MASK 0x00200000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_DEADLINE_MISSED_MASK 0x00400000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_ANY_LINE_MASK 0x00800000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_MASK 0x01000000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_PENDING_MASK 0x02000000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_DEADLINE_MISSED_MASK 0x04000000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_ANY_LINE_MASK 0x08000000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP7_PPS_MASK 0x10000000L
+//DP3_DP_SEC_CNTL3
+#define DP3_DP_SEC_CNTL3__DP_SEC_GSP1_LINE_NUM__SHIFT 0x0
+#define DP3_DP_SEC_CNTL3__DP_SEC_GSP2_LINE_NUM__SHIFT 0x10
+#define DP3_DP_SEC_CNTL3__DP_SEC_GSP1_LINE_NUM_MASK 0x0000FFFFL
+#define DP3_DP_SEC_CNTL3__DP_SEC_GSP2_LINE_NUM_MASK 0xFFFF0000L
+//DP3_DP_SEC_CNTL4
+#define DP3_DP_SEC_CNTL4__DP_SEC_GSP3_LINE_NUM__SHIFT 0x0
+#define DP3_DP_SEC_CNTL4__DP_SEC_GSP4_LINE_NUM__SHIFT 0x10
+#define DP3_DP_SEC_CNTL4__DP_SEC_GSP3_LINE_NUM_MASK 0x0000FFFFL
+#define DP3_DP_SEC_CNTL4__DP_SEC_GSP4_LINE_NUM_MASK 0xFFFF0000L
+//DP3_DP_SEC_CNTL5
+#define DP3_DP_SEC_CNTL5__DP_SEC_GSP5_LINE_NUM__SHIFT 0x0
+#define DP3_DP_SEC_CNTL5__DP_SEC_GSP6_LINE_NUM__SHIFT 0x10
+#define DP3_DP_SEC_CNTL5__DP_SEC_GSP5_LINE_NUM_MASK 0x0000FFFFL
+#define DP3_DP_SEC_CNTL5__DP_SEC_GSP6_LINE_NUM_MASK 0xFFFF0000L
+//DP3_DP_SEC_CNTL6
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP7_LINE_NUM__SHIFT 0x0
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP7_LINE_NUM_MASK 0x0000FFFFL
+//DP3_DP_SEC_CNTL7
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_ACTIVE__SHIFT 0x0
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_IN_IDLE__SHIFT 0x1
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_ACTIVE__SHIFT 0x4
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_IN_IDLE__SHIFT 0x5
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_ACTIVE__SHIFT 0x8
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_IN_IDLE__SHIFT 0x9
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_ACTIVE__SHIFT 0xc
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_IN_IDLE__SHIFT 0xd
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_ACTIVE__SHIFT 0x10
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_IN_IDLE__SHIFT 0x11
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_ACTIVE__SHIFT 0x14
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_IN_IDLE__SHIFT 0x15
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_ACTIVE__SHIFT 0x18
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_IN_IDLE__SHIFT 0x19
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_ACTIVE__SHIFT 0x1c
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_IN_IDLE__SHIFT 0x1d
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_ACTIVE_MASK 0x00000001L
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_IN_IDLE_MASK 0x00000002L
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_ACTIVE_MASK 0x00000010L
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_IN_IDLE_MASK 0x00000020L
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_ACTIVE_MASK 0x00000100L
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_IN_IDLE_MASK 0x00000200L
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_ACTIVE_MASK 0x00001000L
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_IN_IDLE_MASK 0x00002000L
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_ACTIVE_MASK 0x00010000L
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_IN_IDLE_MASK 0x00020000L
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_ACTIVE_MASK 0x00100000L
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_IN_IDLE_MASK 0x00200000L
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_ACTIVE_MASK 0x01000000L
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_IN_IDLE_MASK 0x02000000L
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_ACTIVE_MASK 0x10000000L
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_IN_IDLE_MASK 0x20000000L
+//DP3_DP_DB_CNTL
+#define DP3_DP_DB_CNTL__DP_DB_PENDING__SHIFT 0x0
+#define DP3_DP_DB_CNTL__DP_DB_TAKEN__SHIFT 0x4
+#define DP3_DP_DB_CNTL__DP_DB_TAKEN_CLR__SHIFT 0x5
+#define DP3_DP_DB_CNTL__DP_DB_LOCK__SHIFT 0x8
+#define DP3_DP_DB_CNTL__DP_DB_DISABLE__SHIFT 0xc
+#define DP3_DP_DB_CNTL__DP_VUPDATE_DB_PENDING__SHIFT 0xf
+#define DP3_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN__SHIFT 0x10
+#define DP3_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_CLR__SHIFT 0x11
+#define DP3_DP_DB_CNTL__DP_DB_PENDING_MASK 0x00000001L
+#define DP3_DP_DB_CNTL__DP_DB_TAKEN_MASK 0x00000010L
+#define DP3_DP_DB_CNTL__DP_DB_TAKEN_CLR_MASK 0x00000020L
+#define DP3_DP_DB_CNTL__DP_DB_LOCK_MASK 0x00000100L
+#define DP3_DP_DB_CNTL__DP_DB_DISABLE_MASK 0x00001000L
+#define DP3_DP_DB_CNTL__DP_VUPDATE_DB_PENDING_MASK 0x00008000L
+#define DP3_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_MASK 0x00010000L
+#define DP3_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_CLR_MASK 0x00020000L
+//DP3_DP_MSA_VBID_MISC
+#define DP3_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE__SHIFT 0x0
+#define DP3_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_EN__SHIFT 0x4
+#define DP3_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE__SHIFT 0x8
+#define DP3_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE__SHIFT 0x9
+#define DP3_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_EN__SHIFT 0xc
+#define DP3_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_EN__SHIFT 0xd
+#define DP3_DP_MSA_VBID_MISC__DP_VBID6_LINE_REFERENCE__SHIFT 0xf
+#define DP3_DP_MSA_VBID_MISC__DP_VBID6_LINE_NUM__SHIFT 0x10
+#define DP3_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_MASK 0x00000003L
+#define DP3_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_EN_MASK 0x00000010L
+#define DP3_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_MASK 0x00000100L
+#define DP3_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_MASK 0x00000200L
+#define DP3_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_EN_MASK 0x00001000L
+#define DP3_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_EN_MASK 0x00002000L
+#define DP3_DP_MSA_VBID_MISC__DP_VBID6_LINE_REFERENCE_MASK 0x00008000L
+#define DP3_DP_MSA_VBID_MISC__DP_VBID6_LINE_NUM_MASK 0xFFFF0000L
+//DP3_DP_SEC_METADATA_TRANSMISSION
+#define DP3_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_ENABLE__SHIFT 0x0
+#define DP3_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_REFERENCE__SHIFT 0x1
+#define DP3_DP_SEC_METADATA_TRANSMISSION__DP_SEC_MSO_METADATA_PACKET_ENABLE__SHIFT 0x4
+#define DP3_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE__SHIFT 0x10
+#define DP3_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define DP3_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_REFERENCE_MASK 0x00000002L
+#define DP3_DP_SEC_METADATA_TRANSMISSION__DP_SEC_MSO_METADATA_PACKET_ENABLE_MASK 0x000000F0L
+#define DP3_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_MASK 0xFFFF0000L
+//DP3_DP_DSC_BYTES_PER_PIXEL
+#define DP3_DP_DSC_BYTES_PER_PIXEL__DP_DSC_BYTES_PER_PIXEL__SHIFT 0x0
+#define DP3_DP_DSC_BYTES_PER_PIXEL__DP_DSC_BYTES_PER_PIXEL_MASK 0x7FFFFFFFL
+//DP3_DP_ALPM_CNTL
+#define DP3_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_SEND__SHIFT 0x0
+#define DP3_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PENDING__SHIFT 0x1
+#define DP3_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_SEND__SHIFT 0x2
+#define DP3_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_PENDING__SHIFT 0x3
+#define DP3_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_IMMEDIATE__SHIFT 0x4
+#define DP3_DP_ALPM_CNTL__DP_LINK_TRAINING_SWITCH_BETWEEN_VIDEO__SHIFT 0x5
+#define DP3_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_LINE_NUM__SHIFT 0x10
+#define DP3_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_SEND_MASK 0x00000001L
+#define DP3_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PENDING_MASK 0x00000002L
+#define DP3_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_SEND_MASK 0x00000004L
+#define DP3_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_PENDING_MASK 0x00000008L
+#define DP3_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_IMMEDIATE_MASK 0x00000010L
+#define DP3_DP_ALPM_CNTL__DP_LINK_TRAINING_SWITCH_BETWEEN_VIDEO_MASK 0x00000020L
+#define DP3_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_LINE_NUM_MASK 0xFFFF0000L
+
+
+// addressBlock: dce_dc_dio_dig4_dispdec
+//DIG4_DIG_FE_CNTL
+#define DIG4_DIG_FE_CNTL__DIG_SOURCE_SELECT__SHIFT 0x0
+#define DIG4_DIG_FE_CNTL__DIG_STEREOSYNC_SELECT__SHIFT 0x4
+#define DIG4_DIG_FE_CNTL__DIG_STEREOSYNC_GATE_EN__SHIFT 0x8
+#define DIG4_DIG_FE_CNTL__DIG_START__SHIFT 0xa
+#define DIG4_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_SELECT__SHIFT 0xc
+#define DIG4_DIG_FE_CNTL__DIG_INPUT_PIXEL_SELECT__SHIFT 0x10
+#define DIG4_DIG_FE_CNTL__DOLBY_VISION_EN__SHIFT 0x12
+#define DIG4_DIG_FE_CNTL__DOLBY_VISION_METADATA_PACKET_MISSED__SHIFT 0x13
+#define DIG4_DIG_FE_CNTL__DIG_SYMCLK_FE_ON__SHIFT 0x18
+#define DIG4_DIG_FE_CNTL__TMDS_PIXEL_ENCODING__SHIFT 0x1c
+#define DIG4_DIG_FE_CNTL__TMDS_COLOR_FORMAT__SHIFT 0x1e
+#define DIG4_DIG_FE_CNTL__DIG_SOURCE_SELECT_MASK 0x00000007L
+#define DIG4_DIG_FE_CNTL__DIG_STEREOSYNC_SELECT_MASK 0x00000070L
+#define DIG4_DIG_FE_CNTL__DIG_STEREOSYNC_GATE_EN_MASK 0x00000100L
+#define DIG4_DIG_FE_CNTL__DIG_START_MASK 0x00000400L
+#define DIG4_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_SELECT_MASK 0x00007000L
+#define DIG4_DIG_FE_CNTL__DIG_INPUT_PIXEL_SELECT_MASK 0x00030000L
+#define DIG4_DIG_FE_CNTL__DOLBY_VISION_EN_MASK 0x00040000L
+#define DIG4_DIG_FE_CNTL__DOLBY_VISION_METADATA_PACKET_MISSED_MASK 0x00080000L
+#define DIG4_DIG_FE_CNTL__DIG_SYMCLK_FE_ON_MASK 0x01000000L
+#define DIG4_DIG_FE_CNTL__TMDS_PIXEL_ENCODING_MASK 0x10000000L
+#define DIG4_DIG_FE_CNTL__TMDS_COLOR_FORMAT_MASK 0xC0000000L
+//DIG4_DIG_OUTPUT_CRC_CNTL
+#define DIG4_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_EN__SHIFT 0x0
+#define DIG4_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_LINK_SEL__SHIFT 0x4
+#define DIG4_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_DATA_SEL__SHIFT 0x8
+#define DIG4_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_EN_MASK 0x00000001L
+#define DIG4_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_LINK_SEL_MASK 0x00000010L
+#define DIG4_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_DATA_SEL_MASK 0x00000300L
+//DIG4_DIG_OUTPUT_CRC_RESULT
+#define DIG4_DIG_OUTPUT_CRC_RESULT__DIG_OUTPUT_CRC_RESULT__SHIFT 0x0
+#define DIG4_DIG_OUTPUT_CRC_RESULT__DIG_OUTPUT_CRC_RESULT_MASK 0x3FFFFFFFL
+//DIG4_DIG_CLOCK_PATTERN
+#define DIG4_DIG_CLOCK_PATTERN__DIG_CLOCK_PATTERN__SHIFT 0x0
+#define DIG4_DIG_CLOCK_PATTERN__DIG_CLOCK_PATTERN_MASK 0x000003FFL
+//DIG4_DIG_TEST_PATTERN
+#define DIG4_DIG_TEST_PATTERN__DIG_TEST_PATTERN_OUT_EN__SHIFT 0x0
+#define DIG4_DIG_TEST_PATTERN__DIG_HALF_CLOCK_PATTERN_SEL__SHIFT 0x1
+#define DIG4_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_OUT_EN__SHIFT 0x4
+#define DIG4_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_RESET__SHIFT 0x5
+#define DIG4_DIG_TEST_PATTERN__DIG_TEST_PATTERN_EXTERNAL_RESET_EN__SHIFT 0x6
+#define DIG4_DIG_TEST_PATTERN__DIG_STATIC_TEST_PATTERN__SHIFT 0x10
+#define DIG4_DIG_TEST_PATTERN__DIG_TEST_PATTERN_OUT_EN_MASK 0x00000001L
+#define DIG4_DIG_TEST_PATTERN__DIG_HALF_CLOCK_PATTERN_SEL_MASK 0x00000002L
+#define DIG4_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_OUT_EN_MASK 0x00000010L
+#define DIG4_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_RESET_MASK 0x00000020L
+#define DIG4_DIG_TEST_PATTERN__DIG_TEST_PATTERN_EXTERNAL_RESET_EN_MASK 0x00000040L
+#define DIG4_DIG_TEST_PATTERN__DIG_STATIC_TEST_PATTERN_MASK 0x03FF0000L
+//DIG4_DIG_RANDOM_PATTERN_SEED
+#define DIG4_DIG_RANDOM_PATTERN_SEED__DIG_RANDOM_PATTERN_SEED__SHIFT 0x0
+#define DIG4_DIG_RANDOM_PATTERN_SEED__DIG_RAN_PAT_DURING_DE_ONLY__SHIFT 0x18
+#define DIG4_DIG_RANDOM_PATTERN_SEED__DIG_RANDOM_PATTERN_SEED_MASK 0x00FFFFFFL
+#define DIG4_DIG_RANDOM_PATTERN_SEED__DIG_RAN_PAT_DURING_DE_ONLY_MASK 0x01000000L
+//DIG4_DIG_FIFO_STATUS
+#define DIG4_DIG_FIFO_STATUS__DIG_FIFO_LEVEL_ERROR__SHIFT 0x0
+#define DIG4_DIG_FIFO_STATUS__DIG_FIFO_USE_OVERWRITE_LEVEL__SHIFT 0x1
+#define DIG4_DIG_FIFO_STATUS__DIG_FIFO_OVERWRITE_LEVEL__SHIFT 0x2
+#define DIG4_DIG_FIFO_STATUS__DIG_FIFO_ERROR_ACK__SHIFT 0x8
+#define DIG4_DIG_FIFO_STATUS__DIG_FIFO_CAL_AVERAGE_LEVEL__SHIFT 0xa
+#define DIG4_DIG_FIFO_STATUS__DIG_FIFO_MAXIMUM_LEVEL__SHIFT 0x10
+#define DIG4_DIG_FIFO_STATUS__DIG_FIFO_MINIMUM_LEVEL__SHIFT 0x16
+#define DIG4_DIG_FIFO_STATUS__DIG_FIFO_READ_CLOCK_SRC__SHIFT 0x1a
+#define DIG4_DIG_FIFO_STATUS__DIG_FIFO_CALIBRATED__SHIFT 0x1d
+#define DIG4_DIG_FIFO_STATUS__DIG_FIFO_FORCE_RECAL_AVERAGE__SHIFT 0x1e
+#define DIG4_DIG_FIFO_STATUS__DIG_FIFO_FORCE_RECOMP_MINMAX__SHIFT 0x1f
+#define DIG4_DIG_FIFO_STATUS__DIG_FIFO_LEVEL_ERROR_MASK 0x00000001L
+#define DIG4_DIG_FIFO_STATUS__DIG_FIFO_USE_OVERWRITE_LEVEL_MASK 0x00000002L
+#define DIG4_DIG_FIFO_STATUS__DIG_FIFO_OVERWRITE_LEVEL_MASK 0x000000FCL
+#define DIG4_DIG_FIFO_STATUS__DIG_FIFO_ERROR_ACK_MASK 0x00000100L
+#define DIG4_DIG_FIFO_STATUS__DIG_FIFO_CAL_AVERAGE_LEVEL_MASK 0x0000FC00L
+#define DIG4_DIG_FIFO_STATUS__DIG_FIFO_MAXIMUM_LEVEL_MASK 0x001F0000L
+#define DIG4_DIG_FIFO_STATUS__DIG_FIFO_MINIMUM_LEVEL_MASK 0x03C00000L
+#define DIG4_DIG_FIFO_STATUS__DIG_FIFO_READ_CLOCK_SRC_MASK 0x04000000L
+#define DIG4_DIG_FIFO_STATUS__DIG_FIFO_CALIBRATED_MASK 0x20000000L
+#define DIG4_DIG_FIFO_STATUS__DIG_FIFO_FORCE_RECAL_AVERAGE_MASK 0x40000000L
+#define DIG4_DIG_FIFO_STATUS__DIG_FIFO_FORCE_RECOMP_MINMAX_MASK 0x80000000L
+//DIG4_HDMI_METADATA_PACKET_CONTROL
+#define DIG4_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_ENABLE__SHIFT 0x0
+#define DIG4_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_REFERENCE__SHIFT 0x4
+#define DIG4_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_MISSED__SHIFT 0x8
+#define DIG4_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE__SHIFT 0x10
+#define DIG4_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define DIG4_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_REFERENCE_MASK 0x00000010L
+#define DIG4_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_MISSED_MASK 0x00000100L
+#define DIG4_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_MASK 0xFFFF0000L
+//DIG4_HDMI_GENERIC_PACKET_CONTROL4
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC6_LINE__SHIFT 0x0
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC7_LINE__SHIFT 0x10
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC6_LINE_MASK 0x0000FFFFL
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC7_LINE_MASK 0xFFFF0000L
+//DIG4_HDMI_CONTROL
+#define DIG4_HDMI_CONTROL__HDMI_KEEPOUT_MODE__SHIFT 0x0
+#define DIG4_HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN__SHIFT 0x1
+#define DIG4_HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE__SHIFT 0x2
+#define DIG4_HDMI_CONTROL__HDMI_NO_EXTRA_NULL_PACKET_FILLED__SHIFT 0x3
+#define DIG4_HDMI_CONTROL__HDMI_PACKET_GEN_VERSION__SHIFT 0x4
+#define DIG4_HDMI_CONTROL__HDMI_ERROR_ACK__SHIFT 0x8
+#define DIG4_HDMI_CONTROL__HDMI_ERROR_MASK__SHIFT 0x9
+#define DIG4_HDMI_CONTROL__HDMI_UNSCRAMBLED_CONTROL_LINE_NUM__SHIFT 0x10
+#define DIG4_HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE__SHIFT 0x18
+#define DIG4_HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT 0x1c
+#define DIG4_HDMI_CONTROL__HDMI_KEEPOUT_MODE_MASK 0x00000001L
+#define DIG4_HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN_MASK 0x00000002L
+#define DIG4_HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE_MASK 0x00000004L
+#define DIG4_HDMI_CONTROL__HDMI_NO_EXTRA_NULL_PACKET_FILLED_MASK 0x00000008L
+#define DIG4_HDMI_CONTROL__HDMI_PACKET_GEN_VERSION_MASK 0x00000010L
+#define DIG4_HDMI_CONTROL__HDMI_ERROR_ACK_MASK 0x00000100L
+#define DIG4_HDMI_CONTROL__HDMI_ERROR_MASK_MASK 0x00000200L
+#define DIG4_HDMI_CONTROL__HDMI_UNSCRAMBLED_CONTROL_LINE_NUM_MASK 0x003F0000L
+#define DIG4_HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK 0x01000000L
+#define DIG4_HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH_MASK 0x30000000L
+//DIG4_HDMI_STATUS
+#define DIG4_HDMI_STATUS__HDMI_ACTIVE_AVMUTE__SHIFT 0x0
+#define DIG4_HDMI_STATUS__HDMI_AUDIO_PACKET_ERROR__SHIFT 0x10
+#define DIG4_HDMI_STATUS__HDMI_VBI_PACKET_ERROR__SHIFT 0x14
+#define DIG4_HDMI_STATUS__HDMI_ERROR_INT__SHIFT 0x1b
+#define DIG4_HDMI_STATUS__HDMI_ACTIVE_AVMUTE_MASK 0x00000001L
+#define DIG4_HDMI_STATUS__HDMI_AUDIO_PACKET_ERROR_MASK 0x00010000L
+#define DIG4_HDMI_STATUS__HDMI_VBI_PACKET_ERROR_MASK 0x00100000L
+#define DIG4_HDMI_STATUS__HDMI_ERROR_INT_MASK 0x08000000L
+//DIG4_HDMI_AUDIO_PACKET_CONTROL
+#define DIG4_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN__SHIFT 0x4
+#define DIG4_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_SEND_MAX_PACKETS__SHIFT 0x8
+#define DIG4_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_PACKETS_PER_LINE__SHIFT 0x10
+#define DIG4_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN_MASK 0x00000030L
+#define DIG4_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_SEND_MAX_PACKETS_MASK 0x00000100L
+#define DIG4_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_PACKETS_PER_LINE_MASK 0x001F0000L
+//DIG4_HDMI_ACR_PACKET_CONTROL
+#define DIG4_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SEND__SHIFT 0x0
+#define DIG4_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_CONT__SHIFT 0x1
+#define DIG4_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SELECT__SHIFT 0x4
+#define DIG4_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE__SHIFT 0x8
+#define DIG4_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND__SHIFT 0xc
+#define DIG4_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE__SHIFT 0x10
+#define DIG4_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY__SHIFT 0x1f
+#define DIG4_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SEND_MASK 0x00000001L
+#define DIG4_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_CONT_MASK 0x00000002L
+#define DIG4_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SELECT_MASK 0x00000030L
+#define DIG4_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE_MASK 0x00000100L
+#define DIG4_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK 0x00001000L
+#define DIG4_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE_MASK 0x00070000L
+#define DIG4_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY_MASK 0x80000000L
+//DIG4_HDMI_VBI_PACKET_CONTROL
+#define DIG4_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND__SHIFT 0x0
+#define DIG4_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND__SHIFT 0x4
+#define DIG4_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT__SHIFT 0x5
+#define DIG4_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND__SHIFT 0x8
+#define DIG4_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT__SHIFT 0x9
+#define DIG4_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE__SHIFT 0x10
+#define DIG4_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK 0x00000001L
+#define DIG4_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK 0x00000010L
+#define DIG4_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK 0x00000020L
+#define DIG4_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND_MASK 0x00000100L
+#define DIG4_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT_MASK 0x00000200L
+#define DIG4_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE_MASK 0x003F0000L
+//DIG4_HDMI_INFOFRAME_CONTROL0
+#define DIG4_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND__SHIFT 0x4
+#define DIG4_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT__SHIFT 0x5
+#define DIG4_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_SEND__SHIFT 0x8
+#define DIG4_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_CONT__SHIFT 0x9
+#define DIG4_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND_MASK 0x00000010L
+#define DIG4_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT_MASK 0x00000020L
+#define DIG4_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_SEND_MASK 0x00000100L
+#define DIG4_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_CONT_MASK 0x00000200L
+//DIG4_HDMI_INFOFRAME_CONTROL1
+#define DIG4_HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE__SHIFT 0x8
+#define DIG4_HDMI_INFOFRAME_CONTROL1__HDMI_MPEG_INFO_LINE__SHIFT 0x10
+#define DIG4_HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE_MASK 0x00003F00L
+#define DIG4_HDMI_INFOFRAME_CONTROL1__HDMI_MPEG_INFO_LINE_MASK 0x003F0000L
+//DIG4_HDMI_GENERIC_PACKET_CONTROL0
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND__SHIFT 0x0
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT__SHIFT 0x1
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE_REFERENCE__SHIFT 0x2
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND__SHIFT 0x4
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT__SHIFT 0x5
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE_REFERENCE__SHIFT 0x6
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_SEND__SHIFT 0x8
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_CONT__SHIFT 0x9
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LINE_REFERENCE__SHIFT 0xa
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_SEND__SHIFT 0xc
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_CONT__SHIFT 0xd
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LINE_REFERENCE__SHIFT 0xe
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_SEND__SHIFT 0x10
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_CONT__SHIFT 0x11
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LINE_REFERENCE__SHIFT 0x12
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_SEND__SHIFT 0x14
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_CONT__SHIFT 0x15
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LINE_REFERENCE__SHIFT 0x16
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_SEND__SHIFT 0x18
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_CONT__SHIFT 0x19
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LINE_REFERENCE__SHIFT 0x1a
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_SEND__SHIFT 0x1c
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_CONT__SHIFT 0x1d
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LINE_REFERENCE__SHIFT 0x1e
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND_MASK 0x00000001L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT_MASK 0x00000002L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE_REFERENCE_MASK 0x00000004L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND_MASK 0x00000010L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT_MASK 0x00000020L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE_REFERENCE_MASK 0x00000040L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_SEND_MASK 0x00000100L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_CONT_MASK 0x00000200L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LINE_REFERENCE_MASK 0x00000400L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_SEND_MASK 0x00001000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_CONT_MASK 0x00002000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LINE_REFERENCE_MASK 0x00004000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_SEND_MASK 0x00010000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_CONT_MASK 0x00020000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LINE_REFERENCE_MASK 0x00040000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_SEND_MASK 0x00100000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_CONT_MASK 0x00200000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LINE_REFERENCE_MASK 0x00400000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_SEND_MASK 0x01000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_CONT_MASK 0x02000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LINE_REFERENCE_MASK 0x04000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_SEND_MASK 0x10000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_CONT_MASK 0x20000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LINE_REFERENCE_MASK 0x40000000L
+//DIG4_HDMI_GC
+#define DIG4_HDMI_GC__HDMI_GC_AVMUTE__SHIFT 0x0
+#define DIG4_HDMI_GC__HDMI_GC_AVMUTE_CONT__SHIFT 0x2
+#define DIG4_HDMI_GC__HDMI_DEFAULT_PHASE__SHIFT 0x4
+#define DIG4_HDMI_GC__HDMI_PACKING_PHASE__SHIFT 0x8
+#define DIG4_HDMI_GC__HDMI_PACKING_PHASE_OVERRIDE__SHIFT 0xc
+#define DIG4_HDMI_GC__HDMI_GC_AVMUTE_MASK 0x00000001L
+#define DIG4_HDMI_GC__HDMI_GC_AVMUTE_CONT_MASK 0x00000004L
+#define DIG4_HDMI_GC__HDMI_DEFAULT_PHASE_MASK 0x00000010L
+#define DIG4_HDMI_GC__HDMI_PACKING_PHASE_MASK 0x00000F00L
+#define DIG4_HDMI_GC__HDMI_PACKING_PHASE_OVERRIDE_MASK 0x00001000L
+//DIG4_AFMT_AUDIO_PACKET_CONTROL2
+#define DIG4_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD__SHIFT 0x0
+#define DIG4_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT__SHIFT 0x1
+#define DIG4_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT 0x8
+#define DIG4_AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID__SHIFT 0x10
+#define DIG4_AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD__SHIFT 0x18
+#define DIG4_AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD__SHIFT 0x1c
+#define DIG4_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD_MASK 0x00000001L
+#define DIG4_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT_MASK 0x00000002L
+#define DIG4_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE_MASK 0x0000FF00L
+#define DIG4_AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID_MASK 0x00FF0000L
+#define DIG4_AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD_MASK 0x01000000L
+#define DIG4_AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD_MASK 0x10000000L
+//DIG4_AFMT_ISRC1_0
+#define DIG4_AFMT_ISRC1_0__AFMT_ISRC_STATUS__SHIFT 0x0
+#define DIG4_AFMT_ISRC1_0__AFMT_ISRC_CONTINUE__SHIFT 0x6
+#define DIG4_AFMT_ISRC1_0__AFMT_ISRC_VALID__SHIFT 0x7
+#define DIG4_AFMT_ISRC1_0__AFMT_ISRC_STATUS_MASK 0x00000007L
+#define DIG4_AFMT_ISRC1_0__AFMT_ISRC_CONTINUE_MASK 0x00000040L
+#define DIG4_AFMT_ISRC1_0__AFMT_ISRC_VALID_MASK 0x00000080L
+//DIG4_AFMT_ISRC1_1
+#define DIG4_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC0__SHIFT 0x0
+#define DIG4_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC1__SHIFT 0x8
+#define DIG4_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC2__SHIFT 0x10
+#define DIG4_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC3__SHIFT 0x18
+#define DIG4_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC0_MASK 0x000000FFL
+#define DIG4_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC1_MASK 0x0000FF00L
+#define DIG4_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC2_MASK 0x00FF0000L
+#define DIG4_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC3_MASK 0xFF000000L
+//DIG4_AFMT_ISRC1_2
+#define DIG4_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC4__SHIFT 0x0
+#define DIG4_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC5__SHIFT 0x8
+#define DIG4_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC6__SHIFT 0x10
+#define DIG4_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC7__SHIFT 0x18
+#define DIG4_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC4_MASK 0x000000FFL
+#define DIG4_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC5_MASK 0x0000FF00L
+#define DIG4_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC6_MASK 0x00FF0000L
+#define DIG4_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC7_MASK 0xFF000000L
+//DIG4_AFMT_ISRC1_3
+#define DIG4_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC8__SHIFT 0x0
+#define DIG4_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC9__SHIFT 0x8
+#define DIG4_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC10__SHIFT 0x10
+#define DIG4_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC11__SHIFT 0x18
+#define DIG4_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC8_MASK 0x000000FFL
+#define DIG4_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC9_MASK 0x0000FF00L
+#define DIG4_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC10_MASK 0x00FF0000L
+#define DIG4_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC11_MASK 0xFF000000L
+//DIG4_AFMT_ISRC1_4
+#define DIG4_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC12__SHIFT 0x0
+#define DIG4_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC13__SHIFT 0x8
+#define DIG4_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC14__SHIFT 0x10
+#define DIG4_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC15__SHIFT 0x18
+#define DIG4_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC12_MASK 0x000000FFL
+#define DIG4_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC13_MASK 0x0000FF00L
+#define DIG4_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC14_MASK 0x00FF0000L
+#define DIG4_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC15_MASK 0xFF000000L
+//DIG4_AFMT_ISRC2_0
+#define DIG4_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC16__SHIFT 0x0
+#define DIG4_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC17__SHIFT 0x8
+#define DIG4_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC18__SHIFT 0x10
+#define DIG4_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC19__SHIFT 0x18
+#define DIG4_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC16_MASK 0x000000FFL
+#define DIG4_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC17_MASK 0x0000FF00L
+#define DIG4_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC18_MASK 0x00FF0000L
+#define DIG4_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC19_MASK 0xFF000000L
+//DIG4_AFMT_ISRC2_1
+#define DIG4_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC20__SHIFT 0x0
+#define DIG4_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC21__SHIFT 0x8
+#define DIG4_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC22__SHIFT 0x10
+#define DIG4_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC23__SHIFT 0x18
+#define DIG4_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC20_MASK 0x000000FFL
+#define DIG4_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC21_MASK 0x0000FF00L
+#define DIG4_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC22_MASK 0x00FF0000L
+#define DIG4_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC23_MASK 0xFF000000L
+//DIG4_AFMT_ISRC2_2
+#define DIG4_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC24__SHIFT 0x0
+#define DIG4_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC25__SHIFT 0x8
+#define DIG4_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC26__SHIFT 0x10
+#define DIG4_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC27__SHIFT 0x18
+#define DIG4_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC24_MASK 0x000000FFL
+#define DIG4_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC25_MASK 0x0000FF00L
+#define DIG4_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC26_MASK 0x00FF0000L
+#define DIG4_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC27_MASK 0xFF000000L
+//DIG4_AFMT_ISRC2_3
+#define DIG4_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC28__SHIFT 0x0
+#define DIG4_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC29__SHIFT 0x8
+#define DIG4_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC30__SHIFT 0x10
+#define DIG4_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC31__SHIFT 0x18
+#define DIG4_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC28_MASK 0x000000FFL
+#define DIG4_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC29_MASK 0x0000FF00L
+#define DIG4_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC30_MASK 0x00FF0000L
+#define DIG4_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC31_MASK 0xFF000000L
+//DIG4_HDMI_GENERIC_PACKET_CONTROL2
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_LINE__SHIFT 0x0
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_LINE__SHIFT 0x10
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_LINE_MASK 0x0000FFFFL
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_LINE_MASK 0xFFFF0000L
+//DIG4_HDMI_GENERIC_PACKET_CONTROL3
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC4_LINE__SHIFT 0x0
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC5_LINE__SHIFT 0x10
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC4_LINE_MASK 0x0000FFFFL
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC5_LINE_MASK 0xFFFF0000L
+//DIG4_HDMI_DB_CONTROL
+#define DIG4_HDMI_DB_CONTROL__HDMI_DB_PENDING__SHIFT 0x0
+#define DIG4_HDMI_DB_CONTROL__HDMI_DB_TAKEN__SHIFT 0x4
+#define DIG4_HDMI_DB_CONTROL__HDMI_DB_TAKEN_CLR__SHIFT 0x5
+#define DIG4_HDMI_DB_CONTROL__HDMI_DB_LOCK__SHIFT 0x8
+#define DIG4_HDMI_DB_CONTROL__HDMI_DB_DISABLE__SHIFT 0xc
+#define DIG4_HDMI_DB_CONTROL__VUPDATE_DB_PENDING__SHIFT 0xf
+#define DIG4_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN__SHIFT 0x10
+#define DIG4_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_CLR__SHIFT 0x11
+#define DIG4_HDMI_DB_CONTROL__HDMI_DB_PENDING_MASK 0x00000001L
+#define DIG4_HDMI_DB_CONTROL__HDMI_DB_TAKEN_MASK 0x00000010L
+#define DIG4_HDMI_DB_CONTROL__HDMI_DB_TAKEN_CLR_MASK 0x00000020L
+#define DIG4_HDMI_DB_CONTROL__HDMI_DB_LOCK_MASK 0x00000100L
+#define DIG4_HDMI_DB_CONTROL__HDMI_DB_DISABLE_MASK 0x00001000L
+#define DIG4_HDMI_DB_CONTROL__VUPDATE_DB_PENDING_MASK 0x00008000L
+#define DIG4_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_MASK 0x00010000L
+#define DIG4_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_CLR_MASK 0x00020000L
+//DIG4_DME_CONTROL
+#define DIG4_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID__SHIFT 0x0
+#define DIG4_DME_CONTROL__METADATA_ENGINE_EN__SHIFT 0x4
+#define DIG4_DME_CONTROL__METADATA_STREAM_TYPE__SHIFT 0x8
+#define DIG4_DME_CONTROL__METADATA_DB_PENDING__SHIFT 0xc
+#define DIG4_DME_CONTROL__METADATA_DB_TAKEN__SHIFT 0xd
+#define DIG4_DME_CONTROL__METADATA_DB_TAKEN_CLR__SHIFT 0x10
+#define DIG4_DME_CONTROL__METADATA_DB_DISABLE__SHIFT 0x14
+#define DIG4_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID_MASK 0x00000007L
+#define DIG4_DME_CONTROL__METADATA_ENGINE_EN_MASK 0x00000010L
+#define DIG4_DME_CONTROL__METADATA_STREAM_TYPE_MASK 0x00000100L
+#define DIG4_DME_CONTROL__METADATA_DB_PENDING_MASK 0x00001000L
+#define DIG4_DME_CONTROL__METADATA_DB_TAKEN_MASK 0x00002000L
+#define DIG4_DME_CONTROL__METADATA_DB_TAKEN_CLR_MASK 0x00010000L
+#define DIG4_DME_CONTROL__METADATA_DB_DISABLE_MASK 0x00100000L
+//DIG4_AFMT_MPEG_INFO0
+#define DIG4_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_CHECKSUM__SHIFT 0x0
+#define DIG4_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB0__SHIFT 0x8
+#define DIG4_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB1__SHIFT 0x10
+#define DIG4_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB2__SHIFT 0x18
+#define DIG4_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_CHECKSUM_MASK 0x000000FFL
+#define DIG4_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB0_MASK 0x0000FF00L
+#define DIG4_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB1_MASK 0x00FF0000L
+#define DIG4_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB2_MASK 0xFF000000L
+//DIG4_AFMT_MPEG_INFO1
+#define DIG4_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_MB3__SHIFT 0x0
+#define DIG4_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_MF__SHIFT 0x8
+#define DIG4_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_FR__SHIFT 0xc
+#define DIG4_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_MB3_MASK 0x000000FFL
+#define DIG4_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_MF_MASK 0x00000300L
+#define DIG4_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_FR_MASK 0x00001000L
+//DIG4_AFMT_GENERIC_HDR
+#define DIG4_AFMT_GENERIC_HDR__AFMT_GENERIC_HB0__SHIFT 0x0
+#define DIG4_AFMT_GENERIC_HDR__AFMT_GENERIC_HB1__SHIFT 0x8
+#define DIG4_AFMT_GENERIC_HDR__AFMT_GENERIC_HB2__SHIFT 0x10
+#define DIG4_AFMT_GENERIC_HDR__AFMT_GENERIC_HB3__SHIFT 0x18
+#define DIG4_AFMT_GENERIC_HDR__AFMT_GENERIC_HB0_MASK 0x000000FFL
+#define DIG4_AFMT_GENERIC_HDR__AFMT_GENERIC_HB1_MASK 0x0000FF00L
+#define DIG4_AFMT_GENERIC_HDR__AFMT_GENERIC_HB2_MASK 0x00FF0000L
+#define DIG4_AFMT_GENERIC_HDR__AFMT_GENERIC_HB3_MASK 0xFF000000L
+//DIG4_AFMT_GENERIC_0
+#define DIG4_AFMT_GENERIC_0__AFMT_GENERIC_BYTE0__SHIFT 0x0
+#define DIG4_AFMT_GENERIC_0__AFMT_GENERIC_BYTE1__SHIFT 0x8
+#define DIG4_AFMT_GENERIC_0__AFMT_GENERIC_BYTE2__SHIFT 0x10
+#define DIG4_AFMT_GENERIC_0__AFMT_GENERIC_BYTE3__SHIFT 0x18
+#define DIG4_AFMT_GENERIC_0__AFMT_GENERIC_BYTE0_MASK 0x000000FFL
+#define DIG4_AFMT_GENERIC_0__AFMT_GENERIC_BYTE1_MASK 0x0000FF00L
+#define DIG4_AFMT_GENERIC_0__AFMT_GENERIC_BYTE2_MASK 0x00FF0000L
+#define DIG4_AFMT_GENERIC_0__AFMT_GENERIC_BYTE3_MASK 0xFF000000L
+//DIG4_AFMT_GENERIC_1
+#define DIG4_AFMT_GENERIC_1__AFMT_GENERIC_BYTE4__SHIFT 0x0
+#define DIG4_AFMT_GENERIC_1__AFMT_GENERIC_BYTE5__SHIFT 0x8
+#define DIG4_AFMT_GENERIC_1__AFMT_GENERIC_BYTE6__SHIFT 0x10
+#define DIG4_AFMT_GENERIC_1__AFMT_GENERIC_BYTE7__SHIFT 0x18
+#define DIG4_AFMT_GENERIC_1__AFMT_GENERIC_BYTE4_MASK 0x000000FFL
+#define DIG4_AFMT_GENERIC_1__AFMT_GENERIC_BYTE5_MASK 0x0000FF00L
+#define DIG4_AFMT_GENERIC_1__AFMT_GENERIC_BYTE6_MASK 0x00FF0000L
+#define DIG4_AFMT_GENERIC_1__AFMT_GENERIC_BYTE7_MASK 0xFF000000L
+//DIG4_AFMT_GENERIC_2
+#define DIG4_AFMT_GENERIC_2__AFMT_GENERIC_BYTE8__SHIFT 0x0
+#define DIG4_AFMT_GENERIC_2__AFMT_GENERIC_BYTE9__SHIFT 0x8
+#define DIG4_AFMT_GENERIC_2__AFMT_GENERIC_BYTE10__SHIFT 0x10
+#define DIG4_AFMT_GENERIC_2__AFMT_GENERIC_BYTE11__SHIFT 0x18
+#define DIG4_AFMT_GENERIC_2__AFMT_GENERIC_BYTE8_MASK 0x000000FFL
+#define DIG4_AFMT_GENERIC_2__AFMT_GENERIC_BYTE9_MASK 0x0000FF00L
+#define DIG4_AFMT_GENERIC_2__AFMT_GENERIC_BYTE10_MASK 0x00FF0000L
+#define DIG4_AFMT_GENERIC_2__AFMT_GENERIC_BYTE11_MASK 0xFF000000L
+//DIG4_AFMT_GENERIC_3
+#define DIG4_AFMT_GENERIC_3__AFMT_GENERIC_BYTE12__SHIFT 0x0
+#define DIG4_AFMT_GENERIC_3__AFMT_GENERIC_BYTE13__SHIFT 0x8
+#define DIG4_AFMT_GENERIC_3__AFMT_GENERIC_BYTE14__SHIFT 0x10
+#define DIG4_AFMT_GENERIC_3__AFMT_GENERIC_BYTE15__SHIFT 0x18
+#define DIG4_AFMT_GENERIC_3__AFMT_GENERIC_BYTE12_MASK 0x000000FFL
+#define DIG4_AFMT_GENERIC_3__AFMT_GENERIC_BYTE13_MASK 0x0000FF00L
+#define DIG4_AFMT_GENERIC_3__AFMT_GENERIC_BYTE14_MASK 0x00FF0000L
+#define DIG4_AFMT_GENERIC_3__AFMT_GENERIC_BYTE15_MASK 0xFF000000L
+//DIG4_AFMT_GENERIC_4
+#define DIG4_AFMT_GENERIC_4__AFMT_GENERIC_BYTE16__SHIFT 0x0
+#define DIG4_AFMT_GENERIC_4__AFMT_GENERIC_BYTE17__SHIFT 0x8
+#define DIG4_AFMT_GENERIC_4__AFMT_GENERIC_BYTE18__SHIFT 0x10
+#define DIG4_AFMT_GENERIC_4__AFMT_GENERIC_BYTE19__SHIFT 0x18
+#define DIG4_AFMT_GENERIC_4__AFMT_GENERIC_BYTE16_MASK 0x000000FFL
+#define DIG4_AFMT_GENERIC_4__AFMT_GENERIC_BYTE17_MASK 0x0000FF00L
+#define DIG4_AFMT_GENERIC_4__AFMT_GENERIC_BYTE18_MASK 0x00FF0000L
+#define DIG4_AFMT_GENERIC_4__AFMT_GENERIC_BYTE19_MASK 0xFF000000L
+//DIG4_AFMT_GENERIC_5
+#define DIG4_AFMT_GENERIC_5__AFMT_GENERIC_BYTE20__SHIFT 0x0
+#define DIG4_AFMT_GENERIC_5__AFMT_GENERIC_BYTE21__SHIFT 0x8
+#define DIG4_AFMT_GENERIC_5__AFMT_GENERIC_BYTE22__SHIFT 0x10
+#define DIG4_AFMT_GENERIC_5__AFMT_GENERIC_BYTE23__SHIFT 0x18
+#define DIG4_AFMT_GENERIC_5__AFMT_GENERIC_BYTE20_MASK 0x000000FFL
+#define DIG4_AFMT_GENERIC_5__AFMT_GENERIC_BYTE21_MASK 0x0000FF00L
+#define DIG4_AFMT_GENERIC_5__AFMT_GENERIC_BYTE22_MASK 0x00FF0000L
+#define DIG4_AFMT_GENERIC_5__AFMT_GENERIC_BYTE23_MASK 0xFF000000L
+//DIG4_AFMT_GENERIC_6
+#define DIG4_AFMT_GENERIC_6__AFMT_GENERIC_BYTE24__SHIFT 0x0
+#define DIG4_AFMT_GENERIC_6__AFMT_GENERIC_BYTE25__SHIFT 0x8
+#define DIG4_AFMT_GENERIC_6__AFMT_GENERIC_BYTE26__SHIFT 0x10
+#define DIG4_AFMT_GENERIC_6__AFMT_GENERIC_BYTE27__SHIFT 0x18
+#define DIG4_AFMT_GENERIC_6__AFMT_GENERIC_BYTE24_MASK 0x000000FFL
+#define DIG4_AFMT_GENERIC_6__AFMT_GENERIC_BYTE25_MASK 0x0000FF00L
+#define DIG4_AFMT_GENERIC_6__AFMT_GENERIC_BYTE26_MASK 0x00FF0000L
+#define DIG4_AFMT_GENERIC_6__AFMT_GENERIC_BYTE27_MASK 0xFF000000L
+//DIG4_AFMT_GENERIC_7
+#define DIG4_AFMT_GENERIC_7__AFMT_GENERIC_BYTE28__SHIFT 0x0
+#define DIG4_AFMT_GENERIC_7__AFMT_GENERIC_BYTE29__SHIFT 0x8
+#define DIG4_AFMT_GENERIC_7__AFMT_GENERIC_BYTE30__SHIFT 0x10
+#define DIG4_AFMT_GENERIC_7__AFMT_GENERIC_BYTE31__SHIFT 0x18
+#define DIG4_AFMT_GENERIC_7__AFMT_GENERIC_BYTE28_MASK 0x000000FFL
+#define DIG4_AFMT_GENERIC_7__AFMT_GENERIC_BYTE29_MASK 0x0000FF00L
+#define DIG4_AFMT_GENERIC_7__AFMT_GENERIC_BYTE30_MASK 0x00FF0000L
+#define DIG4_AFMT_GENERIC_7__AFMT_GENERIC_BYTE31_MASK 0xFF000000L
+//DIG4_HDMI_GENERIC_PACKET_CONTROL1
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC0_LINE__SHIFT 0x0
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC1_LINE__SHIFT 0x10
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC0_LINE_MASK 0x0000FFFFL
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC1_LINE_MASK 0xFFFF0000L
+//DIG4_HDMI_ACR_32_0
+#define DIG4_HDMI_ACR_32_0__HDMI_ACR_CTS_32__SHIFT 0xc
+#define DIG4_HDMI_ACR_32_0__HDMI_ACR_CTS_32_MASK 0xFFFFF000L
+//DIG4_HDMI_ACR_32_1
+#define DIG4_HDMI_ACR_32_1__HDMI_ACR_N_32__SHIFT 0x0
+#define DIG4_HDMI_ACR_32_1__HDMI_ACR_N_32_MASK 0x000FFFFFL
+//DIG4_HDMI_ACR_44_0
+#define DIG4_HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT 0xc
+#define DIG4_HDMI_ACR_44_0__HDMI_ACR_CTS_44_MASK 0xFFFFF000L
+//DIG4_HDMI_ACR_44_1
+#define DIG4_HDMI_ACR_44_1__HDMI_ACR_N_44__SHIFT 0x0
+#define DIG4_HDMI_ACR_44_1__HDMI_ACR_N_44_MASK 0x000FFFFFL
+//DIG4_HDMI_ACR_48_0
+#define DIG4_HDMI_ACR_48_0__HDMI_ACR_CTS_48__SHIFT 0xc
+#define DIG4_HDMI_ACR_48_0__HDMI_ACR_CTS_48_MASK 0xFFFFF000L
+//DIG4_HDMI_ACR_48_1
+#define DIG4_HDMI_ACR_48_1__HDMI_ACR_N_48__SHIFT 0x0
+#define DIG4_HDMI_ACR_48_1__HDMI_ACR_N_48_MASK 0x000FFFFFL
+//DIG4_HDMI_ACR_STATUS_0
+#define DIG4_HDMI_ACR_STATUS_0__HDMI_ACR_CTS__SHIFT 0xc
+#define DIG4_HDMI_ACR_STATUS_0__HDMI_ACR_CTS_MASK 0xFFFFF000L
+//DIG4_HDMI_ACR_STATUS_1
+#define DIG4_HDMI_ACR_STATUS_1__HDMI_ACR_N__SHIFT 0x0
+#define DIG4_HDMI_ACR_STATUS_1__HDMI_ACR_N_MASK 0x000FFFFFL
+//DIG4_AFMT_AUDIO_INFO0
+#define DIG4_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM__SHIFT 0x0
+#define DIG4_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC__SHIFT 0x8
+#define DIG4_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT__SHIFT 0xb
+#define DIG4_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET__SHIFT 0x10
+#define DIG4_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT__SHIFT 0x18
+#define DIG4_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_MASK 0x000000FFL
+#define DIG4_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC_MASK 0x00000700L
+#define DIG4_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT_MASK 0x00007800L
+#define DIG4_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET_MASK 0x00FF0000L
+#define DIG4_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT_MASK 0x1F000000L
+//DIG4_AFMT_AUDIO_INFO1
+#define DIG4_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA__SHIFT 0x0
+#define DIG4_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV__SHIFT 0xb
+#define DIG4_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH__SHIFT 0xf
+#define DIG4_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL__SHIFT 0x10
+#define DIG4_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA_MASK 0x000000FFL
+#define DIG4_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV_MASK 0x00007800L
+#define DIG4_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH_MASK 0x00008000L
+#define DIG4_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL_MASK 0x00030000L
+//DIG4_AFMT_60958_0
+#define DIG4_AFMT_60958_0__AFMT_60958_CS_A__SHIFT 0x0
+#define DIG4_AFMT_60958_0__AFMT_60958_CS_B__SHIFT 0x1
+#define DIG4_AFMT_60958_0__AFMT_60958_CS_C__SHIFT 0x2
+#define DIG4_AFMT_60958_0__AFMT_60958_CS_D__SHIFT 0x3
+#define DIG4_AFMT_60958_0__AFMT_60958_CS_MODE__SHIFT 0x6
+#define DIG4_AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE__SHIFT 0x8
+#define DIG4_AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER__SHIFT 0x10
+#define DIG4_AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x14
+#define DIG4_AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x18
+#define DIG4_AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY__SHIFT 0x1c
+#define DIG4_AFMT_60958_0__AFMT_60958_CS_A_MASK 0x00000001L
+#define DIG4_AFMT_60958_0__AFMT_60958_CS_B_MASK 0x00000002L
+#define DIG4_AFMT_60958_0__AFMT_60958_CS_C_MASK 0x00000004L
+#define DIG4_AFMT_60958_0__AFMT_60958_CS_D_MASK 0x00000038L
+#define DIG4_AFMT_60958_0__AFMT_60958_CS_MODE_MASK 0x000000C0L
+#define DIG4_AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE_MASK 0x0000FF00L
+#define DIG4_AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER_MASK 0x000F0000L
+#define DIG4_AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L_MASK 0x00F00000L
+#define DIG4_AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY_MASK 0x0F000000L
+#define DIG4_AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY_MASK 0x30000000L
+//DIG4_AFMT_60958_1
+#define DIG4_AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH__SHIFT 0x0
+#define DIG4_AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x4
+#define DIG4_AFMT_60958_1__AFMT_60958_VALID_L__SHIFT 0x10
+#define DIG4_AFMT_60958_1__AFMT_60958_VALID_R__SHIFT 0x12
+#define DIG4_AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x14
+#define DIG4_AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH_MASK 0x0000000FL
+#define DIG4_AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x000000F0L
+#define DIG4_AFMT_60958_1__AFMT_60958_VALID_L_MASK 0x00010000L
+#define DIG4_AFMT_60958_1__AFMT_60958_VALID_R_MASK 0x00040000L
+#define DIG4_AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R_MASK 0x00F00000L
+//DIG4_AFMT_AUDIO_CRC_CONTROL
+#define DIG4_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN__SHIFT 0x0
+#define DIG4_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT__SHIFT 0x4
+#define DIG4_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE__SHIFT 0x8
+#define DIG4_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL__SHIFT 0xc
+#define DIG4_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT__SHIFT 0x10
+#define DIG4_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN_MASK 0x00000001L
+#define DIG4_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT_MASK 0x00000010L
+#define DIG4_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE_MASK 0x00000100L
+#define DIG4_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL_MASK 0x0000F000L
+#define DIG4_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT_MASK 0xFFFF0000L
+//DIG4_AFMT_RAMP_CONTROL0
+#define DIG4_AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT__SHIFT 0x0
+#define DIG4_AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN__SHIFT 0x1f
+#define DIG4_AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT_MASK 0x00FFFFFFL
+#define DIG4_AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN_MASK 0x80000000L
+//DIG4_AFMT_RAMP_CONTROL1
+#define DIG4_AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT__SHIFT 0x0
+#define DIG4_AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE__SHIFT 0x18
+#define DIG4_AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT_MASK 0x00FFFFFFL
+#define DIG4_AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE_MASK 0xFF000000L
+//DIG4_AFMT_RAMP_CONTROL2
+#define DIG4_AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT__SHIFT 0x0
+#define DIG4_AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT_MASK 0x00FFFFFFL
+//DIG4_AFMT_RAMP_CONTROL3
+#define DIG4_AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT__SHIFT 0x0
+#define DIG4_AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT_MASK 0x00FFFFFFL
+//DIG4_AFMT_60958_2
+#define DIG4_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define DIG4_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define DIG4_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x8
+#define DIG4_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5__SHIFT 0xc
+#define DIG4_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x10
+#define DIG4_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x14
+#define DIG4_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define DIG4_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+#define DIG4_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4_MASK 0x00000F00L
+#define DIG4_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5_MASK 0x0000F000L
+#define DIG4_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6_MASK 0x000F0000L
+#define DIG4_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7_MASK 0x00F00000L
+//DIG4_AFMT_AUDIO_CRC_RESULT
+#define DIG4_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE__SHIFT 0x0
+#define DIG4_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC__SHIFT 0x8
+#define DIG4_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE_MASK 0x00000001L
+#define DIG4_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_MASK 0xFFFFFF00L
+//DIG4_AFMT_STATUS
+#define DIG4_AFMT_STATUS__AFMT_AUDIO_ENABLE__SHIFT 0x4
+#define DIG4_AFMT_STATUS__AFMT_AZ_HBR_ENABLE__SHIFT 0x8
+#define DIG4_AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW__SHIFT 0x18
+#define DIG4_AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG__SHIFT 0x1e
+#define DIG4_AFMT_STATUS__AFMT_AUDIO_ENABLE_MASK 0x00000010L
+#define DIG4_AFMT_STATUS__AFMT_AZ_HBR_ENABLE_MASK 0x00000100L
+#define DIG4_AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW_MASK 0x01000000L
+#define DIG4_AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG_MASK 0x40000000L
+//DIG4_AFMT_AUDIO_PACKET_CONTROL
+#define DIG4_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND__SHIFT 0x0
+#define DIG4_AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS__SHIFT 0xb
+#define DIG4_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN__SHIFT 0xc
+#define DIG4_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE__SHIFT 0xe
+#define DIG4_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK__SHIFT 0x17
+#define DIG4_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP__SHIFT 0x18
+#define DIG4_AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE__SHIFT 0x1a
+#define DIG4_AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK__SHIFT 0x1e
+#define DIG4_AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB__SHIFT 0x1f
+#define DIG4_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK 0x00000001L
+#define DIG4_AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS_MASK 0x00000800L
+#define DIG4_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN_MASK 0x00001000L
+#define DIG4_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE_MASK 0x00004000L
+#define DIG4_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK_MASK 0x00800000L
+#define DIG4_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP_MASK 0x01000000L
+#define DIG4_AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE_MASK 0x04000000L
+#define DIG4_AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK_MASK 0x40000000L
+#define DIG4_AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB_MASK 0x80000000L
+//DIG4_AFMT_VBI_PACKET_CONTROL
+#define DIG4_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_LOCK_STATUS__SHIFT 0x8
+#define DIG4_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_CONFLICT__SHIFT 0x10
+#define DIG4_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_CONFLICT_CLR__SHIFT 0x11
+#define DIG4_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_INDEX__SHIFT 0x1c
+#define DIG4_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_LOCK_STATUS_MASK 0x00000100L
+#define DIG4_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_CONFLICT_MASK 0x00010000L
+#define DIG4_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_CONFLICT_CLR_MASK 0x00020000L
+#define DIG4_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_INDEX_MASK 0xF0000000L
+//DIG4_AFMT_INFOFRAME_CONTROL0
+#define DIG4_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE__SHIFT 0x6
+#define DIG4_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE__SHIFT 0x7
+#define DIG4_AFMT_INFOFRAME_CONTROL0__AFMT_MPEG_INFO_UPDATE__SHIFT 0xa
+#define DIG4_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE_MASK 0x00000040L
+#define DIG4_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE_MASK 0x00000080L
+#define DIG4_AFMT_INFOFRAME_CONTROL0__AFMT_MPEG_INFO_UPDATE_MASK 0x00000400L
+//DIG4_AFMT_AUDIO_SRC_CONTROL
+#define DIG4_AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT__SHIFT 0x0
+#define DIG4_AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT_MASK 0x00000007L
+//DIG4_DIG_BE_CNTL
+#define DIG4_DIG_BE_CNTL__DIG_DUAL_LINK_ENABLE__SHIFT 0x0
+#define DIG4_DIG_BE_CNTL__DIG_SWAP__SHIFT 0x1
+#define DIG4_DIG_BE_CNTL__DIG_RB_SWITCH_EN__SHIFT 0x2
+#define DIG4_DIG_BE_CNTL__DIG_FE_SOURCE_SELECT__SHIFT 0x8
+#define DIG4_DIG_BE_CNTL__DIG_MODE__SHIFT 0x10
+#define DIG4_DIG_BE_CNTL__DIG_HPD_SELECT__SHIFT 0x1c
+#define DIG4_DIG_BE_CNTL__DIG_DUAL_LINK_ENABLE_MASK 0x00000001L
+#define DIG4_DIG_BE_CNTL__DIG_SWAP_MASK 0x00000002L
+#define DIG4_DIG_BE_CNTL__DIG_RB_SWITCH_EN_MASK 0x00000004L
+#define DIG4_DIG_BE_CNTL__DIG_FE_SOURCE_SELECT_MASK 0x00007F00L
+#define DIG4_DIG_BE_CNTL__DIG_MODE_MASK 0x00070000L
+#define DIG4_DIG_BE_CNTL__DIG_HPD_SELECT_MASK 0x70000000L
+//DIG4_DIG_BE_EN_CNTL
+#define DIG4_DIG_BE_EN_CNTL__DIG_ENABLE__SHIFT 0x0
+#define DIG4_DIG_BE_EN_CNTL__DIG_SYMCLK_BE_ON__SHIFT 0x8
+#define DIG4_DIG_BE_EN_CNTL__DIG_ENABLE_MASK 0x00000001L
+#define DIG4_DIG_BE_EN_CNTL__DIG_SYMCLK_BE_ON_MASK 0x00000100L
+//DIG4_TMDS_CNTL
+#define DIG4_TMDS_CNTL__TMDS_SYNC_PHASE__SHIFT 0x0
+#define DIG4_TMDS_CNTL__TMDS_SYNC_PHASE_MASK 0x00000001L
+//DIG4_TMDS_CONTROL_CHAR
+#define DIG4_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR0_OUT_EN__SHIFT 0x0
+#define DIG4_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR1_OUT_EN__SHIFT 0x1
+#define DIG4_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR2_OUT_EN__SHIFT 0x2
+#define DIG4_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR3_OUT_EN__SHIFT 0x3
+#define DIG4_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR0_OUT_EN_MASK 0x00000001L
+#define DIG4_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR1_OUT_EN_MASK 0x00000002L
+#define DIG4_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR2_OUT_EN_MASK 0x00000004L
+#define DIG4_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR3_OUT_EN_MASK 0x00000008L
+//DIG4_TMDS_CONTROL0_FEEDBACK
+#define DIG4_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_SELECT__SHIFT 0x0
+#define DIG4_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_DELAY__SHIFT 0x8
+#define DIG4_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_SELECT_MASK 0x00000003L
+#define DIG4_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_DELAY_MASK 0x00000300L
+//DIG4_TMDS_STEREOSYNC_CTL_SEL
+#define DIG4_TMDS_STEREOSYNC_CTL_SEL__TMDS_STEREOSYNC_CTL_SEL__SHIFT 0x0
+#define DIG4_TMDS_STEREOSYNC_CTL_SEL__TMDS_STEREOSYNC_CTL_SEL_MASK 0x00000003L
+//DIG4_TMDS_SYNC_CHAR_PATTERN_0_1
+#define DIG4_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN0__SHIFT 0x0
+#define DIG4_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN1__SHIFT 0x10
+#define DIG4_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN0_MASK 0x000003FFL
+#define DIG4_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN1_MASK 0x03FF0000L
+//DIG4_TMDS_SYNC_CHAR_PATTERN_2_3
+#define DIG4_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN2__SHIFT 0x0
+#define DIG4_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN3__SHIFT 0x10
+#define DIG4_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN2_MASK 0x000003FFL
+#define DIG4_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN3_MASK 0x03FF0000L
+//DIG4_TMDS_CTL_BITS
+#define DIG4_TMDS_CTL_BITS__TMDS_CTL0__SHIFT 0x0
+#define DIG4_TMDS_CTL_BITS__TMDS_CTL1__SHIFT 0x8
+#define DIG4_TMDS_CTL_BITS__TMDS_CTL2__SHIFT 0x10
+#define DIG4_TMDS_CTL_BITS__TMDS_CTL3__SHIFT 0x18
+#define DIG4_TMDS_CTL_BITS__TMDS_CTL0_MASK 0x00000001L
+#define DIG4_TMDS_CTL_BITS__TMDS_CTL1_MASK 0x00000100L
+#define DIG4_TMDS_CTL_BITS__TMDS_CTL2_MASK 0x00010000L
+#define DIG4_TMDS_CTL_BITS__TMDS_CTL3_MASK 0x01000000L
+//DIG4_TMDS_DCBALANCER_CONTROL
+#define DIG4_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_EN__SHIFT 0x0
+#define DIG4_TMDS_DCBALANCER_CONTROL__TMDS_SYNC_DCBAL_EN__SHIFT 0x4
+#define DIG4_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_EN__SHIFT 0x8
+#define DIG4_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_IN__SHIFT 0x10
+#define DIG4_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_FORCE__SHIFT 0x18
+#define DIG4_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_EN_MASK 0x00000001L
+#define DIG4_TMDS_DCBALANCER_CONTROL__TMDS_SYNC_DCBAL_EN_MASK 0x00000070L
+#define DIG4_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_EN_MASK 0x00000100L
+#define DIG4_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_IN_MASK 0x000F0000L
+#define DIG4_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_FORCE_MASK 0x01000000L
+//DIG4_TMDS_SYNC_DCBALANCE_CHAR
+#define DIG4_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR01__SHIFT 0x0
+#define DIG4_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR11__SHIFT 0x10
+#define DIG4_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR01_MASK 0x000003FFL
+#define DIG4_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR11_MASK 0x03FF0000L
+//DIG4_TMDS_CTL0_1_GEN_CNTL
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_SEL__SHIFT 0x0
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_DELAY__SHIFT 0x4
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_INVERT__SHIFT 0x7
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_MODULATION__SHIFT 0x8
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_USE_FEEDBACK_PATH__SHIFT 0xa
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_FB_SYNC_CONT__SHIFT 0xb
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_PATTERN_OUT_EN__SHIFT 0xc
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_SEL__SHIFT 0x10
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_DELAY__SHIFT 0x14
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_INVERT__SHIFT 0x17
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_MODULATION__SHIFT 0x18
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_USE_FEEDBACK_PATH__SHIFT 0x1a
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_FB_SYNC_CONT__SHIFT 0x1b
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_PATTERN_OUT_EN__SHIFT 0x1c
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_2BIT_COUNTER_EN__SHIFT 0x1f
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_SEL_MASK 0x0000000FL
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_DELAY_MASK 0x00000070L
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_INVERT_MASK 0x00000080L
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_MODULATION_MASK 0x00000300L
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_USE_FEEDBACK_PATH_MASK 0x00000400L
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_FB_SYNC_CONT_MASK 0x00000800L
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_PATTERN_OUT_EN_MASK 0x00001000L
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_SEL_MASK 0x000F0000L
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_DELAY_MASK 0x00700000L
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_INVERT_MASK 0x00800000L
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_MODULATION_MASK 0x03000000L
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_USE_FEEDBACK_PATH_MASK 0x04000000L
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_FB_SYNC_CONT_MASK 0x08000000L
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_PATTERN_OUT_EN_MASK 0x10000000L
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_2BIT_COUNTER_EN_MASK 0x80000000L
+//DIG4_TMDS_CTL2_3_GEN_CNTL
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_SEL__SHIFT 0x0
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_DELAY__SHIFT 0x4
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_INVERT__SHIFT 0x7
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_MODULATION__SHIFT 0x8
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_USE_FEEDBACK_PATH__SHIFT 0xa
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_FB_SYNC_CONT__SHIFT 0xb
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_PATTERN_OUT_EN__SHIFT 0xc
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_SEL__SHIFT 0x10
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_DELAY__SHIFT 0x14
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_INVERT__SHIFT 0x17
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_MODULATION__SHIFT 0x18
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_USE_FEEDBACK_PATH__SHIFT 0x1a
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_FB_SYNC_CONT__SHIFT 0x1b
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_PATTERN_OUT_EN__SHIFT 0x1c
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_SEL_MASK 0x0000000FL
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_DELAY_MASK 0x00000070L
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_INVERT_MASK 0x00000080L
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_MODULATION_MASK 0x00000300L
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_USE_FEEDBACK_PATH_MASK 0x00000400L
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_FB_SYNC_CONT_MASK 0x00000800L
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_PATTERN_OUT_EN_MASK 0x00001000L
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_SEL_MASK 0x000F0000L
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_DELAY_MASK 0x00700000L
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_INVERT_MASK 0x00800000L
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_MODULATION_MASK 0x03000000L
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_USE_FEEDBACK_PATH_MASK 0x04000000L
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_FB_SYNC_CONT_MASK 0x08000000L
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_PATTERN_OUT_EN_MASK 0x10000000L
+//DIG4_DIG_VERSION
+#define DIG4_DIG_VERSION__DIG_TYPE__SHIFT 0x0
+#define DIG4_DIG_VERSION__DIG_TYPE_MASK 0x00000001L
+//DIG4_DIG_LANE_ENABLE
+#define DIG4_DIG_LANE_ENABLE__DIG_LANE0EN__SHIFT 0x0
+#define DIG4_DIG_LANE_ENABLE__DIG_LANE1EN__SHIFT 0x1
+#define DIG4_DIG_LANE_ENABLE__DIG_LANE2EN__SHIFT 0x2
+#define DIG4_DIG_LANE_ENABLE__DIG_LANE3EN__SHIFT 0x3
+#define DIG4_DIG_LANE_ENABLE__DIG_CLK_EN__SHIFT 0x8
+#define DIG4_DIG_LANE_ENABLE__DIG_LANE0EN_MASK 0x00000001L
+#define DIG4_DIG_LANE_ENABLE__DIG_LANE1EN_MASK 0x00000002L
+#define DIG4_DIG_LANE_ENABLE__DIG_LANE2EN_MASK 0x00000004L
+#define DIG4_DIG_LANE_ENABLE__DIG_LANE3EN_MASK 0x00000008L
+#define DIG4_DIG_LANE_ENABLE__DIG_CLK_EN_MASK 0x00000100L
+//DIG4_AFMT_CNTL
+#define DIG4_AFMT_CNTL__AFMT_AUDIO_CLOCK_EN__SHIFT 0x0
+#define DIG4_AFMT_CNTL__AFMT_AUDIO_CLOCK_ON__SHIFT 0x8
+#define DIG4_AFMT_CNTL__AFMT_AUDIO_CLOCK_EN_MASK 0x00000001L
+#define DIG4_AFMT_CNTL__AFMT_AUDIO_CLOCK_ON_MASK 0x00000100L
+//DIG4_AFMT_VBI_PACKET_CONTROL1
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_FRAME_UPDATE__SHIFT 0x0
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_FRAME_UPDATE_PENDING__SHIFT 0x1
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_IMMEDIATE_UPDATE__SHIFT 0x2
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_IMMEDIATE_UPDATE_PENDING__SHIFT 0x3
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_FRAME_UPDATE__SHIFT 0x4
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_FRAME_UPDATE_PENDING__SHIFT 0x5
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_IMMEDIATE_UPDATE__SHIFT 0x6
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_IMMEDIATE_UPDATE_PENDING__SHIFT 0x7
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_FRAME_UPDATE__SHIFT 0x8
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_FRAME_UPDATE_PENDING__SHIFT 0x9
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_IMMEDIATE_UPDATE__SHIFT 0xa
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_IMMEDIATE_UPDATE_PENDING__SHIFT 0xb
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_FRAME_UPDATE__SHIFT 0xc
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_FRAME_UPDATE_PENDING__SHIFT 0xd
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_IMMEDIATE_UPDATE__SHIFT 0xe
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_IMMEDIATE_UPDATE_PENDING__SHIFT 0xf
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_FRAME_UPDATE__SHIFT 0x10
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_FRAME_UPDATE_PENDING__SHIFT 0x11
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_IMMEDIATE_UPDATE__SHIFT 0x12
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_IMMEDIATE_UPDATE_PENDING__SHIFT 0x13
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_FRAME_UPDATE__SHIFT 0x14
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_FRAME_UPDATE_PENDING__SHIFT 0x15
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_IMMEDIATE_UPDATE__SHIFT 0x16
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_IMMEDIATE_UPDATE_PENDING__SHIFT 0x17
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_FRAME_UPDATE__SHIFT 0x18
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_FRAME_UPDATE_PENDING__SHIFT 0x19
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_IMMEDIATE_UPDATE__SHIFT 0x1a
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1b
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_FRAME_UPDATE__SHIFT 0x1c
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_FRAME_UPDATE_PENDING__SHIFT 0x1d
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_IMMEDIATE_UPDATE__SHIFT 0x1e
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1f
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_FRAME_UPDATE_MASK 0x00000001L
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_FRAME_UPDATE_PENDING_MASK 0x00000002L
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_IMMEDIATE_UPDATE_MASK 0x00000004L
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_IMMEDIATE_UPDATE_PENDING_MASK 0x00000008L
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_FRAME_UPDATE_MASK 0x00000010L
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_FRAME_UPDATE_PENDING_MASK 0x00000020L
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_IMMEDIATE_UPDATE_MASK 0x00000040L
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_IMMEDIATE_UPDATE_PENDING_MASK 0x00000080L
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_FRAME_UPDATE_MASK 0x00000100L
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_FRAME_UPDATE_PENDING_MASK 0x00000200L
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_IMMEDIATE_UPDATE_MASK 0x00000400L
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_IMMEDIATE_UPDATE_PENDING_MASK 0x00000800L
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_FRAME_UPDATE_MASK 0x00001000L
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_FRAME_UPDATE_PENDING_MASK 0x00002000L
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_IMMEDIATE_UPDATE_MASK 0x00004000L
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_IMMEDIATE_UPDATE_PENDING_MASK 0x00008000L
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_FRAME_UPDATE_MASK 0x00010000L
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_FRAME_UPDATE_PENDING_MASK 0x00020000L
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_IMMEDIATE_UPDATE_MASK 0x00040000L
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_IMMEDIATE_UPDATE_PENDING_MASK 0x00080000L
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_FRAME_UPDATE_MASK 0x00100000L
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_FRAME_UPDATE_PENDING_MASK 0x00200000L
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_IMMEDIATE_UPDATE_MASK 0x00400000L
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_IMMEDIATE_UPDATE_PENDING_MASK 0x00800000L
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_FRAME_UPDATE_MASK 0x01000000L
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_FRAME_UPDATE_PENDING_MASK 0x02000000L
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_IMMEDIATE_UPDATE_MASK 0x04000000L
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_IMMEDIATE_UPDATE_PENDING_MASK 0x08000000L
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_FRAME_UPDATE_MASK 0x10000000L
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_FRAME_UPDATE_PENDING_MASK 0x20000000L
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_IMMEDIATE_UPDATE_MASK 0x40000000L
+#define DIG4_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_IMMEDIATE_UPDATE_PENDING_MASK 0x80000000L
+//DIG4_HDMI_GENERIC_PACKET_CONTROL5
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND__SHIFT 0x0
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_PENDING__SHIFT 0x1
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND__SHIFT 0x2
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_PENDING__SHIFT 0x3
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND__SHIFT 0x4
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_PENDING__SHIFT 0x5
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND__SHIFT 0x6
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_PENDING__SHIFT 0x7
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND__SHIFT 0x8
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_PENDING__SHIFT 0x9
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND__SHIFT 0xa
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_PENDING__SHIFT 0xb
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND__SHIFT 0xc
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_PENDING__SHIFT 0xd
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND__SHIFT 0xe
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_PENDING__SHIFT 0xf
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_MASK 0x00000001L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_PENDING_MASK 0x00000002L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_MASK 0x00000004L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_PENDING_MASK 0x00000008L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_MASK 0x00000010L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_PENDING_MASK 0x00000020L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_MASK 0x00000040L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_PENDING_MASK 0x00000080L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_MASK 0x00000100L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_PENDING_MASK 0x00000200L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_MASK 0x00000400L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_PENDING_MASK 0x00000800L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_MASK 0x00001000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_PENDING_MASK 0x00002000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_MASK 0x00004000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_PENDING_MASK 0x00008000L
+//DIG4_FORCE_DIG_DISABLE
+#define DIG4_FORCE_DIG_DISABLE__FORCE_DIG_DISABLE__SHIFT 0x0
+#define DIG4_FORCE_DIG_DISABLE__FORCE_DIG_DISABLE_MASK 0x00000001L
+
+
+// addressBlock: dce_dc_dio_dp4_dispdec
+//DP4_DP_LINK_CNTL
+#define DP4_DP_LINK_CNTL__DP_LINK_TRAINING_COMPLETE__SHIFT 0x4
+#define DP4_DP_LINK_CNTL__DP_LINK_STATUS__SHIFT 0x8
+#define DP4_DP_LINK_CNTL__DP_EMBEDDED_PANEL_MODE__SHIFT 0x11
+#define DP4_DP_LINK_CNTL__DP_LINK_TRAINING_COMPLETE_MASK 0x00000010L
+#define DP4_DP_LINK_CNTL__DP_LINK_STATUS_MASK 0x00000100L
+#define DP4_DP_LINK_CNTL__DP_EMBEDDED_PANEL_MODE_MASK 0x00020000L
+//DP4_DP_PIXEL_FORMAT
+#define DP4_DP_PIXEL_FORMAT__DP_PIXEL_ENCODING__SHIFT 0x0
+#define DP4_DP_PIXEL_FORMAT__DP_COMPONENT_DEPTH__SHIFT 0x18
+#define DP4_DP_PIXEL_FORMAT__DP_PIXEL_COMBINE__SHIFT 0x1c
+#define DP4_DP_PIXEL_FORMAT__DP_PIXEL_ENCODING_MASK 0x00000007L
+#define DP4_DP_PIXEL_FORMAT__DP_COMPONENT_DEPTH_MASK 0x07000000L
+#define DP4_DP_PIXEL_FORMAT__DP_PIXEL_COMBINE_MASK 0x30000000L
+//DP4_DP_MSA_COLORIMETRY
+#define DP4_DP_MSA_COLORIMETRY__DP_MSA_MISC0__SHIFT 0x18
+#define DP4_DP_MSA_COLORIMETRY__DP_MSA_MISC0_MASK 0xFF000000L
+//DP4_DP_CONFIG
+#define DP4_DP_CONFIG__DP_UDI_LANES__SHIFT 0x0
+#define DP4_DP_CONFIG__DP_UDI_LANES_MASK 0x00000003L
+//DP4_DP_VID_STREAM_CNTL
+#define DP4_DP_VID_STREAM_CNTL__DP_VID_STREAM_ENABLE__SHIFT 0x0
+#define DP4_DP_VID_STREAM_CNTL__DP_VID_STREAM_DIS_DEFER__SHIFT 0x8
+#define DP4_DP_VID_STREAM_CNTL__DP_VID_STREAM_STATUS__SHIFT 0x10
+#define DP4_DP_VID_STREAM_CNTL__DP_VID_STREAM_CHANGE_KEEPOUT__SHIFT 0x14
+#define DP4_DP_VID_STREAM_CNTL__DP_VID_STREAM_ENABLE_MASK 0x00000001L
+#define DP4_DP_VID_STREAM_CNTL__DP_VID_STREAM_DIS_DEFER_MASK 0x00000300L
+#define DP4_DP_VID_STREAM_CNTL__DP_VID_STREAM_STATUS_MASK 0x00010000L
+#define DP4_DP_VID_STREAM_CNTL__DP_VID_STREAM_CHANGE_KEEPOUT_MASK 0x00100000L
+//DP4_DP_STEER_FIFO
+#define DP4_DP_STEER_FIFO__DP_STEER_FIFO_RESET__SHIFT 0x0
+#define DP4_DP_STEER_FIFO__DP_STEER_OVERFLOW_FLAG__SHIFT 0x4
+#define DP4_DP_STEER_FIFO__DP_STEER_OVERFLOW_INT__SHIFT 0x5
+#define DP4_DP_STEER_FIFO__DP_STEER_OVERFLOW_ACK__SHIFT 0x6
+#define DP4_DP_STEER_FIFO__DP_STEER_OVERFLOW_MASK__SHIFT 0x7
+#define DP4_DP_STEER_FIFO__DP_TU_OVERFLOW_FLAG__SHIFT 0x8
+#define DP4_DP_STEER_FIFO__DP_TU_OVERFLOW_ACK__SHIFT 0xc
+#define DP4_DP_STEER_FIFO__DP_STEER_FIFO_RESET_MASK 0x00000001L
+#define DP4_DP_STEER_FIFO__DP_STEER_OVERFLOW_FLAG_MASK 0x00000010L
+#define DP4_DP_STEER_FIFO__DP_STEER_OVERFLOW_INT_MASK 0x00000020L
+#define DP4_DP_STEER_FIFO__DP_STEER_OVERFLOW_ACK_MASK 0x00000040L
+#define DP4_DP_STEER_FIFO__DP_STEER_OVERFLOW_MASK_MASK 0x00000080L
+#define DP4_DP_STEER_FIFO__DP_TU_OVERFLOW_FLAG_MASK 0x00000100L
+#define DP4_DP_STEER_FIFO__DP_TU_OVERFLOW_ACK_MASK 0x00001000L
+//DP4_DP_MSA_MISC
+#define DP4_DP_MSA_MISC__DP_MSA_MISC1__SHIFT 0x0
+#define DP4_DP_MSA_MISC__DP_MSA_MISC2__SHIFT 0x8
+#define DP4_DP_MSA_MISC__DP_MSA_MISC3__SHIFT 0x10
+#define DP4_DP_MSA_MISC__DP_MSA_MISC4__SHIFT 0x18
+#define DP4_DP_MSA_MISC__DP_MSA_MISC1_MASK 0x000000FFL
+#define DP4_DP_MSA_MISC__DP_MSA_MISC2_MASK 0x0000FF00L
+#define DP4_DP_MSA_MISC__DP_MSA_MISC3_MASK 0x00FF0000L
+#define DP4_DP_MSA_MISC__DP_MSA_MISC4_MASK 0xFF000000L
+//DP4_DP_VID_TIMING
+#define DP4_DP_VID_TIMING__DP_VID_M_N_DOUBLE_BUFFER_MODE__SHIFT 0x4
+#define DP4_DP_VID_TIMING__DP_VID_M_N_GEN_EN__SHIFT 0x8
+#define DP4_DP_VID_TIMING__DP_VID_N_MUL__SHIFT 0xa
+#define DP4_DP_VID_TIMING__DP_VID_M_DIV__SHIFT 0xc
+#define DP4_DP_VID_TIMING__DP_VID_N_DIV__SHIFT 0x18
+#define DP4_DP_VID_TIMING__DP_VID_M_N_DOUBLE_BUFFER_MODE_MASK 0x00000010L
+#define DP4_DP_VID_TIMING__DP_VID_M_N_GEN_EN_MASK 0x00000100L
+#define DP4_DP_VID_TIMING__DP_VID_N_MUL_MASK 0x00000C00L
+#define DP4_DP_VID_TIMING__DP_VID_M_DIV_MASK 0x00003000L
+#define DP4_DP_VID_TIMING__DP_VID_N_DIV_MASK 0xFF000000L
+//DP4_DP_VID_N
+#define DP4_DP_VID_N__DP_VID_N__SHIFT 0x0
+#define DP4_DP_VID_N__DP_VID_N_MASK 0x00FFFFFFL
+//DP4_DP_VID_M
+#define DP4_DP_VID_M__DP_VID_M__SHIFT 0x0
+#define DP4_DP_VID_M__DP_VID_M_MASK 0x00FFFFFFL
+//DP4_DP_LINK_FRAMING_CNTL
+#define DP4_DP_LINK_FRAMING_CNTL__DP_IDLE_BS_INTERVAL__SHIFT 0x0
+#define DP4_DP_LINK_FRAMING_CNTL__DP_VBID_DISABLE__SHIFT 0x18
+#define DP4_DP_LINK_FRAMING_CNTL__DP_VID_ENHANCED_FRAME_MODE__SHIFT 0x1c
+#define DP4_DP_LINK_FRAMING_CNTL__DP_IDLE_BS_INTERVAL_MASK 0x0003FFFFL
+#define DP4_DP_LINK_FRAMING_CNTL__DP_VBID_DISABLE_MASK 0x01000000L
+#define DP4_DP_LINK_FRAMING_CNTL__DP_VID_ENHANCED_FRAME_MODE_MASK 0x10000000L
+//DP4_DP_HBR2_EYE_PATTERN
+#define DP4_DP_HBR2_EYE_PATTERN__DP_HBR2_EYE_PATTERN_ENABLE__SHIFT 0x0
+#define DP4_DP_HBR2_EYE_PATTERN__DP_HBR2_EYE_PATTERN_ENABLE_MASK 0x00000001L
+//DP4_DP_VID_MSA_VBID
+#define DP4_DP_VID_MSA_VBID__DP_VID_MSA_LOCATION__SHIFT 0x0
+#define DP4_DP_VID_MSA_VBID__DP_VID_VBID_FIELD_POL__SHIFT 0x18
+#define DP4_DP_VID_MSA_VBID__DP_VID_MSA_LOCATION_MASK 0x00000FFFL
+#define DP4_DP_VID_MSA_VBID__DP_VID_VBID_FIELD_POL_MASK 0x01000000L
+//DP4_DP_VID_INTERRUPT_CNTL
+#define DP4_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_INT__SHIFT 0x0
+#define DP4_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_ACK__SHIFT 0x1
+#define DP4_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_MASK__SHIFT 0x2
+#define DP4_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_INT_MASK 0x00000001L
+#define DP4_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_ACK_MASK 0x00000002L
+#define DP4_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_MASK_MASK 0x00000004L
+//DP4_DP_DPHY_CNTL
+#define DP4_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE0__SHIFT 0x0
+#define DP4_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE1__SHIFT 0x1
+#define DP4_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE2__SHIFT 0x2
+#define DP4_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE3__SHIFT 0x3
+#define DP4_DP_DPHY_CNTL__DPHY_FEC_EN__SHIFT 0x4
+#define DP4_DP_DPHY_CNTL__DPHY_FEC_READY_SHADOW__SHIFT 0x5
+#define DP4_DP_DPHY_CNTL__DPHY_FEC_ACTIVE_STATUS__SHIFT 0x6
+#define DP4_DP_DPHY_CNTL__DPHY_BYPASS__SHIFT 0x10
+#define DP4_DP_DPHY_CNTL__DPHY_SKEW_BYPASS__SHIFT 0x18
+#define DP4_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE0_MASK 0x00000001L
+#define DP4_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE1_MASK 0x00000002L
+#define DP4_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE2_MASK 0x00000004L
+#define DP4_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE3_MASK 0x00000008L
+#define DP4_DP_DPHY_CNTL__DPHY_FEC_EN_MASK 0x00000010L
+#define DP4_DP_DPHY_CNTL__DPHY_FEC_READY_SHADOW_MASK 0x00000020L
+#define DP4_DP_DPHY_CNTL__DPHY_FEC_ACTIVE_STATUS_MASK 0x00000040L
+#define DP4_DP_DPHY_CNTL__DPHY_BYPASS_MASK 0x00010000L
+#define DP4_DP_DPHY_CNTL__DPHY_SKEW_BYPASS_MASK 0x01000000L
+//DP4_DP_DPHY_TRAINING_PATTERN_SEL
+#define DP4_DP_DPHY_TRAINING_PATTERN_SEL__DPHY_TRAINING_PATTERN_SEL__SHIFT 0x0
+#define DP4_DP_DPHY_TRAINING_PATTERN_SEL__DPHY_TRAINING_PATTERN_SEL_MASK 0x00000003L
+//DP4_DP_DPHY_SYM0
+#define DP4_DP_DPHY_SYM0__DPHY_SYM1__SHIFT 0x0
+#define DP4_DP_DPHY_SYM0__DPHY_SYM2__SHIFT 0xa
+#define DP4_DP_DPHY_SYM0__DPHY_SYM3__SHIFT 0x14
+#define DP4_DP_DPHY_SYM0__DPHY_SYM1_MASK 0x000003FFL
+#define DP4_DP_DPHY_SYM0__DPHY_SYM2_MASK 0x000FFC00L
+#define DP4_DP_DPHY_SYM0__DPHY_SYM3_MASK 0x3FF00000L
+//DP4_DP_DPHY_SYM1
+#define DP4_DP_DPHY_SYM1__DPHY_SYM4__SHIFT 0x0
+#define DP4_DP_DPHY_SYM1__DPHY_SYM5__SHIFT 0xa
+#define DP4_DP_DPHY_SYM1__DPHY_SYM6__SHIFT 0x14
+#define DP4_DP_DPHY_SYM1__DPHY_SYM4_MASK 0x000003FFL
+#define DP4_DP_DPHY_SYM1__DPHY_SYM5_MASK 0x000FFC00L
+#define DP4_DP_DPHY_SYM1__DPHY_SYM6_MASK 0x3FF00000L
+//DP4_DP_DPHY_SYM2
+#define DP4_DP_DPHY_SYM2__DPHY_SYM7__SHIFT 0x0
+#define DP4_DP_DPHY_SYM2__DPHY_SYM8__SHIFT 0xa
+#define DP4_DP_DPHY_SYM2__DPHY_SYM7_MASK 0x000003FFL
+#define DP4_DP_DPHY_SYM2__DPHY_SYM8_MASK 0x000FFC00L
+//DP4_DP_DPHY_8B10B_CNTL
+#define DP4_DP_DPHY_8B10B_CNTL__DPHY_8B10B_RESET__SHIFT 0x8
+#define DP4_DP_DPHY_8B10B_CNTL__DPHY_8B10B_EXT_DISP__SHIFT 0x10
+#define DP4_DP_DPHY_8B10B_CNTL__DPHY_8B10B_CUR_DISP__SHIFT 0x18
+#define DP4_DP_DPHY_8B10B_CNTL__DPHY_8B10B_RESET_MASK 0x00000100L
+#define DP4_DP_DPHY_8B10B_CNTL__DPHY_8B10B_EXT_DISP_MASK 0x00010000L
+#define DP4_DP_DPHY_8B10B_CNTL__DPHY_8B10B_CUR_DISP_MASK 0x01000000L
+//DP4_DP_DPHY_PRBS_CNTL
+#define DP4_DP_DPHY_PRBS_CNTL__DPHY_PRBS_EN__SHIFT 0x0
+#define DP4_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL__SHIFT 0x4
+#define DP4_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED__SHIFT 0x8
+#define DP4_DP_DPHY_PRBS_CNTL__DPHY_PRBS_EN_MASK 0x00000001L
+#define DP4_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL_MASK 0x00000030L
+#define DP4_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED_MASK 0x7FFFFF00L
+//DP4_DP_DPHY_SCRAM_CNTL
+#define DP4_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_DIS__SHIFT 0x0
+#define DP4_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE__SHIFT 0x4
+#define DP4_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT__SHIFT 0x8
+#define DP4_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_KCODE__SHIFT 0x18
+#define DP4_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_DIS_MASK 0x00000001L
+#define DP4_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE_MASK 0x00000010L
+#define DP4_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT_MASK 0x0003FF00L
+#define DP4_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_KCODE_MASK 0x01000000L
+//DP4_DP_DPHY_CRC_EN
+#define DP4_DP_DPHY_CRC_EN__DPHY_CRC_EN__SHIFT 0x0
+#define DP4_DP_DPHY_CRC_EN__DPHY_CRC_CONT_EN__SHIFT 0x4
+#define DP4_DP_DPHY_CRC_EN__DPHY_CRC_RESULT_VALID__SHIFT 0x8
+#define DP4_DP_DPHY_CRC_EN__DPHY_CRC_EN_MASK 0x00000001L
+#define DP4_DP_DPHY_CRC_EN__DPHY_CRC_CONT_EN_MASK 0x00000010L
+#define DP4_DP_DPHY_CRC_EN__DPHY_CRC_RESULT_VALID_MASK 0x00000100L
+//DP4_DP_DPHY_CRC_CNTL
+#define DP4_DP_DPHY_CRC_CNTL__DPHY_CRC_FIELD__SHIFT 0x0
+#define DP4_DP_DPHY_CRC_CNTL__DPHY_CRC_SEL__SHIFT 0x4
+#define DP4_DP_DPHY_CRC_CNTL__DPHY_CRC_MASK__SHIFT 0x10
+#define DP4_DP_DPHY_CRC_CNTL__DPHY_CRC_FIELD_MASK 0x00000001L
+#define DP4_DP_DPHY_CRC_CNTL__DPHY_CRC_SEL_MASK 0x00000030L
+#define DP4_DP_DPHY_CRC_CNTL__DPHY_CRC_MASK_MASK 0x00FF0000L
+//DP4_DP_DPHY_CRC_RESULT
+#define DP4_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT__SHIFT 0x0
+#define DP4_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT1__SHIFT 0x8
+#define DP4_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT2__SHIFT 0x10
+#define DP4_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT3__SHIFT 0x18
+#define DP4_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT_MASK 0x000000FFL
+#define DP4_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT1_MASK 0x0000FF00L
+#define DP4_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT2_MASK 0x00FF0000L
+#define DP4_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT3_MASK 0xFF000000L
+//DP4_DP_DPHY_CRC_MST_CNTL
+#define DP4_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_FIRST_SLOT__SHIFT 0x0
+#define DP4_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_LAST_SLOT__SHIFT 0x8
+#define DP4_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_FIRST_SLOT_MASK 0x0000003FL
+#define DP4_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_LAST_SLOT_MASK 0x00003F00L
+//DP4_DP_DPHY_CRC_MST_STATUS
+#define DP4_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_LOCK__SHIFT 0x0
+#define DP4_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR__SHIFT 0x8
+#define DP4_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR_ACK__SHIFT 0x10
+#define DP4_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_LOCK_MASK 0x00000001L
+#define DP4_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR_MASK 0x00000100L
+#define DP4_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR_ACK_MASK 0x00010000L
+//DP4_DP_DPHY_FAST_TRAINING
+#define DP4_DP_DPHY_FAST_TRAINING__DPHY_RX_FAST_TRAINING_CAPABLE__SHIFT 0x0
+#define DP4_DP_DPHY_FAST_TRAINING__DPHY_SW_FAST_TRAINING_START__SHIFT 0x1
+#define DP4_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN__SHIFT 0x2
+#define DP4_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP1_TIME__SHIFT 0x8
+#define DP4_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP2_TIME__SHIFT 0x14
+#define DP4_DP_DPHY_FAST_TRAINING__DPHY_RX_FAST_TRAINING_CAPABLE_MASK 0x00000001L
+#define DP4_DP_DPHY_FAST_TRAINING__DPHY_SW_FAST_TRAINING_START_MASK 0x00000002L
+#define DP4_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN_MASK 0x00000004L
+#define DP4_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP1_TIME_MASK 0x000FFF00L
+#define DP4_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP2_TIME_MASK 0xFFF00000L
+//DP4_DP_DPHY_FAST_TRAINING_STATUS
+#define DP4_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_STATE__SHIFT 0x0
+#define DP4_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_OCCURRED__SHIFT 0x4
+#define DP4_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_MASK__SHIFT 0x8
+#define DP4_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_ACK__SHIFT 0xc
+#define DP4_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_STATE_MASK 0x00000007L
+#define DP4_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_OCCURRED_MASK 0x00000010L
+#define DP4_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_MASK_MASK 0x00000100L
+#define DP4_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_ACK_MASK 0x00001000L
+//DP4_DP_SEC_CNTL
+#define DP4_DP_SEC_CNTL__DP_SEC_STREAM_ENABLE__SHIFT 0x0
+#define DP4_DP_SEC_CNTL__DP_SEC_ASP_ENABLE__SHIFT 0x4
+#define DP4_DP_SEC_CNTL__DP_SEC_ATP_ENABLE__SHIFT 0x8
+#define DP4_DP_SEC_CNTL__DP_SEC_AIP_ENABLE__SHIFT 0xc
+#define DP4_DP_SEC_CNTL__DP_SEC_ACM_ENABLE__SHIFT 0x10
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP0_ENABLE__SHIFT 0x14
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP1_ENABLE__SHIFT 0x15
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP2_ENABLE__SHIFT 0x16
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP3_ENABLE__SHIFT 0x17
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP4_ENABLE__SHIFT 0x18
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP5_ENABLE__SHIFT 0x19
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP6_ENABLE__SHIFT 0x1a
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP7_ENABLE__SHIFT 0x1b
+#define DP4_DP_SEC_CNTL__DP_SEC_MPG_ENABLE__SHIFT 0x1c
+#define DP4_DP_SEC_CNTL__DP_SEC_STREAM_ENABLE_MASK 0x00000001L
+#define DP4_DP_SEC_CNTL__DP_SEC_ASP_ENABLE_MASK 0x00000010L
+#define DP4_DP_SEC_CNTL__DP_SEC_ATP_ENABLE_MASK 0x00000100L
+#define DP4_DP_SEC_CNTL__DP_SEC_AIP_ENABLE_MASK 0x00001000L
+#define DP4_DP_SEC_CNTL__DP_SEC_ACM_ENABLE_MASK 0x00010000L
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP0_ENABLE_MASK 0x00100000L
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP1_ENABLE_MASK 0x00200000L
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP2_ENABLE_MASK 0x00400000L
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP3_ENABLE_MASK 0x00800000L
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP4_ENABLE_MASK 0x01000000L
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP5_ENABLE_MASK 0x02000000L
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP6_ENABLE_MASK 0x04000000L
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP7_ENABLE_MASK 0x08000000L
+#define DP4_DP_SEC_CNTL__DP_SEC_MPG_ENABLE_MASK 0x10000000L
+//DP4_DP_SEC_CNTL1
+#define DP4_DP_SEC_CNTL1__DP_SEC_ISRC_ENABLE__SHIFT 0x0
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_REFERENCE__SHIFT 0x1
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP0_PRIORITY__SHIFT 0x4
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP0_SEND__SHIFT 0x5
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_PENDING__SHIFT 0x6
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_DEADLINE_MISSED__SHIFT 0x7
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_ANY_LINE__SHIFT 0x8
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP1_LINE_REFERENCE__SHIFT 0x9
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP2_LINE_REFERENCE__SHIFT 0xa
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP3_LINE_REFERENCE__SHIFT 0xb
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP4_LINE_REFERENCE__SHIFT 0xc
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP5_LINE_REFERENCE__SHIFT 0xd
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP6_LINE_REFERENCE__SHIFT 0xe
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP7_LINE_REFERENCE__SHIFT 0xf
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_NUM__SHIFT 0x10
+#define DP4_DP_SEC_CNTL1__DP_SEC_ISRC_ENABLE_MASK 0x00000001L
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_REFERENCE_MASK 0x00000002L
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP0_PRIORITY_MASK 0x00000010L
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_MASK 0x00000020L
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_PENDING_MASK 0x00000040L
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_DEADLINE_MISSED_MASK 0x00000080L
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_ANY_LINE_MASK 0x00000100L
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP1_LINE_REFERENCE_MASK 0x00000200L
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP2_LINE_REFERENCE_MASK 0x00000400L
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP3_LINE_REFERENCE_MASK 0x00000800L
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP4_LINE_REFERENCE_MASK 0x00001000L
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP5_LINE_REFERENCE_MASK 0x00002000L
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP6_LINE_REFERENCE_MASK 0x00004000L
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP7_LINE_REFERENCE_MASK 0x00008000L
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_NUM_MASK 0xFFFF0000L
+//DP4_DP_SEC_FRAMING1
+#define DP4_DP_SEC_FRAMING1__DP_SEC_FRAME_START_LOCATION__SHIFT 0x0
+#define DP4_DP_SEC_FRAMING1__DP_SEC_VBLANK_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP4_DP_SEC_FRAMING1__DP_SEC_FRAME_START_LOCATION_MASK 0x00000FFFL
+#define DP4_DP_SEC_FRAMING1__DP_SEC_VBLANK_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+//DP4_DP_SEC_FRAMING2
+#define DP4_DP_SEC_FRAMING2__DP_SEC_START_POSITION__SHIFT 0x0
+#define DP4_DP_SEC_FRAMING2__DP_SEC_HBLANK_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP4_DP_SEC_FRAMING2__DP_SEC_START_POSITION_MASK 0x0000FFFFL
+#define DP4_DP_SEC_FRAMING2__DP_SEC_HBLANK_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+//DP4_DP_SEC_FRAMING3
+#define DP4_DP_SEC_FRAMING3__DP_SEC_IDLE_FRAME_SIZE__SHIFT 0x0
+#define DP4_DP_SEC_FRAMING3__DP_SEC_IDLE_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP4_DP_SEC_FRAMING3__DP_SEC_IDLE_FRAME_SIZE_MASK 0x00003FFFL
+#define DP4_DP_SEC_FRAMING3__DP_SEC_IDLE_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+//DP4_DP_SEC_FRAMING4
+#define DP4_DP_SEC_FRAMING4__DP_SST_SDP_SPLITTING__SHIFT 0x0
+#define DP4_DP_SEC_FRAMING4__DP_SEC_COLLISION_STATUS__SHIFT 0x14
+#define DP4_DP_SEC_FRAMING4__DP_SEC_COLLISION_ACK__SHIFT 0x18
+#define DP4_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE__SHIFT 0x1c
+#define DP4_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_STATUS__SHIFT 0x1d
+#define DP4_DP_SEC_FRAMING4__DP_SST_SDP_SPLITTING_MASK 0x00000001L
+#define DP4_DP_SEC_FRAMING4__DP_SEC_COLLISION_STATUS_MASK 0x00100000L
+#define DP4_DP_SEC_FRAMING4__DP_SEC_COLLISION_ACK_MASK 0x01000000L
+#define DP4_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_MASK 0x10000000L
+#define DP4_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_STATUS_MASK 0x20000000L
+//DP4_DP_SEC_AUD_N
+#define DP4_DP_SEC_AUD_N__DP_SEC_AUD_N__SHIFT 0x0
+#define DP4_DP_SEC_AUD_N__DP_SEC_AUD_N_MASK 0x00FFFFFFL
+//DP4_DP_SEC_AUD_N_READBACK
+#define DP4_DP_SEC_AUD_N_READBACK__DP_SEC_AUD_N_READBACK__SHIFT 0x0
+#define DP4_DP_SEC_AUD_N_READBACK__DP_SEC_AUD_N_READBACK_MASK 0x00FFFFFFL
+//DP4_DP_SEC_AUD_M
+#define DP4_DP_SEC_AUD_M__DP_SEC_AUD_M__SHIFT 0x0
+#define DP4_DP_SEC_AUD_M__DP_SEC_AUD_M_MASK 0x00FFFFFFL
+//DP4_DP_SEC_AUD_M_READBACK
+#define DP4_DP_SEC_AUD_M_READBACK__DP_SEC_AUD_M_READBACK__SHIFT 0x0
+#define DP4_DP_SEC_AUD_M_READBACK__DP_SEC_AUD_M_READBACK_MASK 0x00FFFFFFL
+//DP4_DP_SEC_TIMESTAMP
+#define DP4_DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE__SHIFT 0x0
+#define DP4_DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE_MASK 0x00000001L
+//DP4_DP_SEC_PACKET_CNTL
+#define DP4_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CODING_TYPE__SHIFT 0x1
+#define DP4_DP_SEC_PACKET_CNTL__DP_SEC_ASP_PRIORITY__SHIFT 0x4
+#define DP4_DP_SEC_PACKET_CNTL__DP_SEC_VERSION__SHIFT 0x8
+#define DP4_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE__SHIFT 0x10
+#define DP4_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CODING_TYPE_MASK 0x0000000EL
+#define DP4_DP_SEC_PACKET_CNTL__DP_SEC_ASP_PRIORITY_MASK 0x00000010L
+#define DP4_DP_SEC_PACKET_CNTL__DP_SEC_VERSION_MASK 0x00003F00L
+#define DP4_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE_MASK 0x00010000L
+//DP4_DP_MSE_RATE_CNTL
+#define DP4_DP_MSE_RATE_CNTL__DP_MSE_RATE_Y__SHIFT 0x0
+#define DP4_DP_MSE_RATE_CNTL__DP_MSE_RATE_X__SHIFT 0x1a
+#define DP4_DP_MSE_RATE_CNTL__DP_MSE_RATE_Y_MASK 0x03FFFFFFL
+#define DP4_DP_MSE_RATE_CNTL__DP_MSE_RATE_X_MASK 0xFC000000L
+//DP4_DP_MSE_RATE_UPDATE
+#define DP4_DP_MSE_RATE_UPDATE__DP_MSE_RATE_UPDATE_PENDING__SHIFT 0x0
+#define DP4_DP_MSE_RATE_UPDATE__DP_MSE_RATE_UPDATE_PENDING_MASK 0x00000001L
+//DP4_DP_MSE_SAT0
+#define DP4_DP_MSE_SAT0__DP_MSE_SAT_SRC0__SHIFT 0x0
+#define DP4_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT0__SHIFT 0x8
+#define DP4_DP_MSE_SAT0__DP_MSE_SAT_SRC1__SHIFT 0x10
+#define DP4_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT1__SHIFT 0x18
+#define DP4_DP_MSE_SAT0__DP_MSE_SAT_SRC0_MASK 0x00000007L
+#define DP4_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT0_MASK 0x00003F00L
+#define DP4_DP_MSE_SAT0__DP_MSE_SAT_SRC1_MASK 0x00070000L
+#define DP4_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT1_MASK 0x3F000000L
+//DP4_DP_MSE_SAT1
+#define DP4_DP_MSE_SAT1__DP_MSE_SAT_SRC2__SHIFT 0x0
+#define DP4_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT2__SHIFT 0x8
+#define DP4_DP_MSE_SAT1__DP_MSE_SAT_SRC3__SHIFT 0x10
+#define DP4_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT3__SHIFT 0x18
+#define DP4_DP_MSE_SAT1__DP_MSE_SAT_SRC2_MASK 0x00000007L
+#define DP4_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT2_MASK 0x00003F00L
+#define DP4_DP_MSE_SAT1__DP_MSE_SAT_SRC3_MASK 0x00070000L
+#define DP4_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT3_MASK 0x3F000000L
+//DP4_DP_MSE_SAT2
+#define DP4_DP_MSE_SAT2__DP_MSE_SAT_SRC4__SHIFT 0x0
+#define DP4_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT4__SHIFT 0x8
+#define DP4_DP_MSE_SAT2__DP_MSE_SAT_SRC5__SHIFT 0x10
+#define DP4_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT5__SHIFT 0x18
+#define DP4_DP_MSE_SAT2__DP_MSE_SAT_SRC4_MASK 0x00000007L
+#define DP4_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT4_MASK 0x00003F00L
+#define DP4_DP_MSE_SAT2__DP_MSE_SAT_SRC5_MASK 0x00070000L
+#define DP4_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT5_MASK 0x3F000000L
+//DP4_DP_MSE_SAT_UPDATE
+#define DP4_DP_MSE_SAT_UPDATE__DP_MSE_SAT_UPDATE__SHIFT 0x0
+#define DP4_DP_MSE_SAT_UPDATE__DP_MSE_16_MTP_KEEPOUT__SHIFT 0x8
+#define DP4_DP_MSE_SAT_UPDATE__DP_MSE_SAT_UPDATE_MASK 0x00000003L
+#define DP4_DP_MSE_SAT_UPDATE__DP_MSE_16_MTP_KEEPOUT_MASK 0x00000100L
+//DP4_DP_MSE_LINK_TIMING
+#define DP4_DP_MSE_LINK_TIMING__DP_MSE_LINK_FRAME__SHIFT 0x0
+#define DP4_DP_MSE_LINK_TIMING__DP_MSE_LINK_LINE__SHIFT 0x10
+#define DP4_DP_MSE_LINK_TIMING__DP_MSE_LINK_FRAME_MASK 0x000003FFL
+#define DP4_DP_MSE_LINK_TIMING__DP_MSE_LINK_LINE_MASK 0x00030000L
+//DP4_DP_MSE_MISC_CNTL
+#define DP4_DP_MSE_MISC_CNTL__DP_MSE_BLANK_CODE__SHIFT 0x0
+#define DP4_DP_MSE_MISC_CNTL__DP_MSE_TIMESTAMP_MODE__SHIFT 0x4
+#define DP4_DP_MSE_MISC_CNTL__DP_MSE_ZERO_ENCODER__SHIFT 0x8
+#define DP4_DP_MSE_MISC_CNTL__DP_MSE_BLANK_CODE_MASK 0x00000001L
+#define DP4_DP_MSE_MISC_CNTL__DP_MSE_TIMESTAMP_MODE_MASK 0x00000010L
+#define DP4_DP_MSE_MISC_CNTL__DP_MSE_ZERO_ENCODER_MASK 0x00000100L
+//DP4_DP_DPHY_BS_SR_SWAP_CNTL
+#define DP4_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT__SHIFT 0x0
+#define DP4_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_BS_SR_SWAP_DONE__SHIFT 0xf
+#define DP4_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_START__SHIFT 0x10
+#define DP4_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_MASK 0x000003FFL
+#define DP4_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_BS_SR_SWAP_DONE_MASK 0x00008000L
+#define DP4_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_START_MASK 0x00010000L
+//DP4_DP_DPHY_HBR2_PATTERN_CONTROL
+#define DP4_DP_DPHY_HBR2_PATTERN_CONTROL__DP_DPHY_HBR2_PATTERN_CONTROL__SHIFT 0x0
+#define DP4_DP_DPHY_HBR2_PATTERN_CONTROL__DP_DPHY_HBR2_PATTERN_CONTROL_MASK 0x00000007L
+//DP4_DP_MSE_SAT0_STATUS
+#define DP4_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC0_STATUS__SHIFT 0x0
+#define DP4_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT0_STATUS__SHIFT 0x8
+#define DP4_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC1_STATUS__SHIFT 0x10
+#define DP4_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT1_STATUS__SHIFT 0x18
+#define DP4_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC0_STATUS_MASK 0x00000007L
+#define DP4_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT0_STATUS_MASK 0x00003F00L
+#define DP4_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC1_STATUS_MASK 0x00070000L
+#define DP4_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT1_STATUS_MASK 0x3F000000L
+//DP4_DP_MSE_SAT1_STATUS
+#define DP4_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC2_STATUS__SHIFT 0x0
+#define DP4_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT2_STATUS__SHIFT 0x8
+#define DP4_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC3_STATUS__SHIFT 0x10
+#define DP4_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT3_STATUS__SHIFT 0x18
+#define DP4_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC2_STATUS_MASK 0x00000007L
+#define DP4_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT2_STATUS_MASK 0x00003F00L
+#define DP4_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC3_STATUS_MASK 0x00070000L
+#define DP4_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT3_STATUS_MASK 0x3F000000L
+//DP4_DP_MSE_SAT2_STATUS
+#define DP4_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC4_STATUS__SHIFT 0x0
+#define DP4_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT4_STATUS__SHIFT 0x8
+#define DP4_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC5_STATUS__SHIFT 0x10
+#define DP4_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT5_STATUS__SHIFT 0x18
+#define DP4_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC4_STATUS_MASK 0x00000007L
+#define DP4_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT4_STATUS_MASK 0x00003F00L
+#define DP4_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC5_STATUS_MASK 0x00070000L
+#define DP4_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT5_STATUS_MASK 0x3F000000L
+//DP4_DP_MSA_TIMING_PARAM1
+#define DP4_DP_MSA_TIMING_PARAM1__DP_MSA_VTOTAL__SHIFT 0x0
+#define DP4_DP_MSA_TIMING_PARAM1__DP_MSA_HTOTAL__SHIFT 0x10
+#define DP4_DP_MSA_TIMING_PARAM1__DP_MSA_VTOTAL_MASK 0x0000FFFFL
+#define DP4_DP_MSA_TIMING_PARAM1__DP_MSA_HTOTAL_MASK 0xFFFF0000L
+//DP4_DP_MSA_TIMING_PARAM2
+#define DP4_DP_MSA_TIMING_PARAM2__DP_MSA_VSTART__SHIFT 0x0
+#define DP4_DP_MSA_TIMING_PARAM2__DP_MSA_HSTART__SHIFT 0x10
+#define DP4_DP_MSA_TIMING_PARAM2__DP_MSA_VSTART_MASK 0x0000FFFFL
+#define DP4_DP_MSA_TIMING_PARAM2__DP_MSA_HSTART_MASK 0xFFFF0000L
+//DP4_DP_MSA_TIMING_PARAM3
+#define DP4_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCWIDTH__SHIFT 0x0
+#define DP4_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCPOLARITY__SHIFT 0xf
+#define DP4_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCWIDTH__SHIFT 0x10
+#define DP4_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCPOLARITY__SHIFT 0x1f
+#define DP4_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCWIDTH_MASK 0x00007FFFL
+#define DP4_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCPOLARITY_MASK 0x00008000L
+#define DP4_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCWIDTH_MASK 0x7FFF0000L
+#define DP4_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCPOLARITY_MASK 0x80000000L
+//DP4_DP_MSA_TIMING_PARAM4
+#define DP4_DP_MSA_TIMING_PARAM4__DP_MSA_VHEIGHT__SHIFT 0x0
+#define DP4_DP_MSA_TIMING_PARAM4__DP_MSA_HWIDTH__SHIFT 0x10
+#define DP4_DP_MSA_TIMING_PARAM4__DP_MSA_VHEIGHT_MASK 0x0000FFFFL
+#define DP4_DP_MSA_TIMING_PARAM4__DP_MSA_HWIDTH_MASK 0xFFFF0000L
+//DP4_DP_MSO_CNTL
+#define DP4_DP_MSO_CNTL__DP_MSO_NUM_OF_SSTLINK__SHIFT 0x0
+#define DP4_DP_MSO_CNTL__DP_MSO_SEC_STREAM_ENABLE__SHIFT 0x4
+#define DP4_DP_MSO_CNTL__DP_MSO_SEC_ASP_ENABLE__SHIFT 0x8
+#define DP4_DP_MSO_CNTL__DP_MSO_SEC_ATP_ENABLE__SHIFT 0xc
+#define DP4_DP_MSO_CNTL__DP_MSO_SEC_AIP_ENABLE__SHIFT 0x10
+#define DP4_DP_MSO_CNTL__DP_MSO_SEC_ACM_ENABLE__SHIFT 0x14
+#define DP4_DP_MSO_CNTL__DP_MSO_SEC_GSP0_ENABLE__SHIFT 0x18
+#define DP4_DP_MSO_CNTL__DP_MSO_SEC_GSP1_ENABLE__SHIFT 0x1c
+#define DP4_DP_MSO_CNTL__DP_MSO_NUM_OF_SSTLINK_MASK 0x00000003L
+#define DP4_DP_MSO_CNTL__DP_MSO_SEC_STREAM_ENABLE_MASK 0x000000F0L
+#define DP4_DP_MSO_CNTL__DP_MSO_SEC_ASP_ENABLE_MASK 0x00000F00L
+#define DP4_DP_MSO_CNTL__DP_MSO_SEC_ATP_ENABLE_MASK 0x0000F000L
+#define DP4_DP_MSO_CNTL__DP_MSO_SEC_AIP_ENABLE_MASK 0x000F0000L
+#define DP4_DP_MSO_CNTL__DP_MSO_SEC_ACM_ENABLE_MASK 0x00F00000L
+#define DP4_DP_MSO_CNTL__DP_MSO_SEC_GSP0_ENABLE_MASK 0x0F000000L
+#define DP4_DP_MSO_CNTL__DP_MSO_SEC_GSP1_ENABLE_MASK 0xF0000000L
+//DP4_DP_MSO_CNTL1
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_GSP2_ENABLE__SHIFT 0x0
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_GSP3_ENABLE__SHIFT 0x4
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_GSP4_ENABLE__SHIFT 0x8
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_GSP5_ENABLE__SHIFT 0xc
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_GSP6_ENABLE__SHIFT 0x10
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_GSP7_ENABLE__SHIFT 0x14
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_MPG_ENABLE__SHIFT 0x18
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_ISRC_ENABLE__SHIFT 0x1c
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_GSP2_ENABLE_MASK 0x0000000FL
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_GSP3_ENABLE_MASK 0x000000F0L
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_GSP4_ENABLE_MASK 0x00000F00L
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_GSP5_ENABLE_MASK 0x0000F000L
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_GSP6_ENABLE_MASK 0x000F0000L
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_GSP7_ENABLE_MASK 0x00F00000L
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_MPG_ENABLE_MASK 0x0F000000L
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_ISRC_ENABLE_MASK 0xF0000000L
+//DP4_DP_DSC_CNTL
+#define DP4_DP_DSC_CNTL__DP_DSC_MODE__SHIFT 0x0
+#define DP4_DP_DSC_CNTL__DP_DSC_SLICE_WIDTH__SHIFT 0x10
+#define DP4_DP_DSC_CNTL__DP_DSC_MODE_MASK 0x00000003L
+#define DP4_DP_DSC_CNTL__DP_DSC_SLICE_WIDTH_MASK 0x1FFF0000L
+//DP4_DP_SEC_CNTL2
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP1_SEND__SHIFT 0x0
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_PENDING__SHIFT 0x1
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_DEADLINE_MISSED__SHIFT 0x2
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_ANY_LINE__SHIFT 0x3
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP2_SEND__SHIFT 0x4
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_PENDING__SHIFT 0x5
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_DEADLINE_MISSED__SHIFT 0x6
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_ANY_LINE__SHIFT 0x7
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP3_SEND__SHIFT 0x8
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_PENDING__SHIFT 0x9
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_DEADLINE_MISSED__SHIFT 0xa
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_ANY_LINE__SHIFT 0xb
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP4_SEND__SHIFT 0xc
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_PENDING__SHIFT 0xd
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_ANY_LINE__SHIFT 0xf
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP5_SEND__SHIFT 0x10
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_PENDING__SHIFT 0x11
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_DEADLINE_MISSED__SHIFT 0x12
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_ANY_LINE__SHIFT 0x13
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP6_SEND__SHIFT 0x14
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_PENDING__SHIFT 0x15
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_DEADLINE_MISSED__SHIFT 0x16
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_ANY_LINE__SHIFT 0x17
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP7_SEND__SHIFT 0x18
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_PENDING__SHIFT 0x19
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_DEADLINE_MISSED__SHIFT 0x1a
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_ANY_LINE__SHIFT 0x1b
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP7_PPS__SHIFT 0x1c
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_MASK 0x00000001L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_PENDING_MASK 0x00000002L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_DEADLINE_MISSED_MASK 0x00000004L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_ANY_LINE_MASK 0x00000008L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_MASK 0x00000010L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_PENDING_MASK 0x00000020L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_DEADLINE_MISSED_MASK 0x00000040L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_ANY_LINE_MASK 0x00000080L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_MASK 0x00000100L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_PENDING_MASK 0x00000200L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_DEADLINE_MISSED_MASK 0x00000400L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_ANY_LINE_MASK 0x00000800L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_MASK 0x00001000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_PENDING_MASK 0x00002000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_ANY_LINE_MASK 0x00008000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_MASK 0x00010000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_PENDING_MASK 0x00020000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_DEADLINE_MISSED_MASK 0x00040000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_ANY_LINE_MASK 0x00080000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_MASK 0x00100000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_PENDING_MASK 0x00200000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_DEADLINE_MISSED_MASK 0x00400000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_ANY_LINE_MASK 0x00800000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_MASK 0x01000000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_PENDING_MASK 0x02000000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_DEADLINE_MISSED_MASK 0x04000000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_ANY_LINE_MASK 0x08000000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP7_PPS_MASK 0x10000000L
+//DP4_DP_SEC_CNTL3
+#define DP4_DP_SEC_CNTL3__DP_SEC_GSP1_LINE_NUM__SHIFT 0x0
+#define DP4_DP_SEC_CNTL3__DP_SEC_GSP2_LINE_NUM__SHIFT 0x10
+#define DP4_DP_SEC_CNTL3__DP_SEC_GSP1_LINE_NUM_MASK 0x0000FFFFL
+#define DP4_DP_SEC_CNTL3__DP_SEC_GSP2_LINE_NUM_MASK 0xFFFF0000L
+//DP4_DP_SEC_CNTL4
+#define DP4_DP_SEC_CNTL4__DP_SEC_GSP3_LINE_NUM__SHIFT 0x0
+#define DP4_DP_SEC_CNTL4__DP_SEC_GSP4_LINE_NUM__SHIFT 0x10
+#define DP4_DP_SEC_CNTL4__DP_SEC_GSP3_LINE_NUM_MASK 0x0000FFFFL
+#define DP4_DP_SEC_CNTL4__DP_SEC_GSP4_LINE_NUM_MASK 0xFFFF0000L
+//DP4_DP_SEC_CNTL5
+#define DP4_DP_SEC_CNTL5__DP_SEC_GSP5_LINE_NUM__SHIFT 0x0
+#define DP4_DP_SEC_CNTL5__DP_SEC_GSP6_LINE_NUM__SHIFT 0x10
+#define DP4_DP_SEC_CNTL5__DP_SEC_GSP5_LINE_NUM_MASK 0x0000FFFFL
+#define DP4_DP_SEC_CNTL5__DP_SEC_GSP6_LINE_NUM_MASK 0xFFFF0000L
+//DP4_DP_SEC_CNTL6
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP7_LINE_NUM__SHIFT 0x0
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP7_LINE_NUM_MASK 0x0000FFFFL
+//DP4_DP_SEC_CNTL7
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_ACTIVE__SHIFT 0x0
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_IN_IDLE__SHIFT 0x1
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_ACTIVE__SHIFT 0x4
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_IN_IDLE__SHIFT 0x5
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_ACTIVE__SHIFT 0x8
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_IN_IDLE__SHIFT 0x9
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_ACTIVE__SHIFT 0xc
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_IN_IDLE__SHIFT 0xd
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_ACTIVE__SHIFT 0x10
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_IN_IDLE__SHIFT 0x11
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_ACTIVE__SHIFT 0x14
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_IN_IDLE__SHIFT 0x15
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_ACTIVE__SHIFT 0x18
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_IN_IDLE__SHIFT 0x19
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_ACTIVE__SHIFT 0x1c
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_IN_IDLE__SHIFT 0x1d
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_ACTIVE_MASK 0x00000001L
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_IN_IDLE_MASK 0x00000002L
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_ACTIVE_MASK 0x00000010L
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_IN_IDLE_MASK 0x00000020L
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_ACTIVE_MASK 0x00000100L
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_IN_IDLE_MASK 0x00000200L
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_ACTIVE_MASK 0x00001000L
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_IN_IDLE_MASK 0x00002000L
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_ACTIVE_MASK 0x00010000L
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_IN_IDLE_MASK 0x00020000L
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_ACTIVE_MASK 0x00100000L
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_IN_IDLE_MASK 0x00200000L
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_ACTIVE_MASK 0x01000000L
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_IN_IDLE_MASK 0x02000000L
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_ACTIVE_MASK 0x10000000L
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_IN_IDLE_MASK 0x20000000L
+//DP4_DP_DB_CNTL
+#define DP4_DP_DB_CNTL__DP_DB_PENDING__SHIFT 0x0
+#define DP4_DP_DB_CNTL__DP_DB_TAKEN__SHIFT 0x4
+#define DP4_DP_DB_CNTL__DP_DB_TAKEN_CLR__SHIFT 0x5
+#define DP4_DP_DB_CNTL__DP_DB_LOCK__SHIFT 0x8
+#define DP4_DP_DB_CNTL__DP_DB_DISABLE__SHIFT 0xc
+#define DP4_DP_DB_CNTL__DP_VUPDATE_DB_PENDING__SHIFT 0xf
+#define DP4_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN__SHIFT 0x10
+#define DP4_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_CLR__SHIFT 0x11
+#define DP4_DP_DB_CNTL__DP_DB_PENDING_MASK 0x00000001L
+#define DP4_DP_DB_CNTL__DP_DB_TAKEN_MASK 0x00000010L
+#define DP4_DP_DB_CNTL__DP_DB_TAKEN_CLR_MASK 0x00000020L
+#define DP4_DP_DB_CNTL__DP_DB_LOCK_MASK 0x00000100L
+#define DP4_DP_DB_CNTL__DP_DB_DISABLE_MASK 0x00001000L
+#define DP4_DP_DB_CNTL__DP_VUPDATE_DB_PENDING_MASK 0x00008000L
+#define DP4_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_MASK 0x00010000L
+#define DP4_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_CLR_MASK 0x00020000L
+//DP4_DP_MSA_VBID_MISC
+#define DP4_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE__SHIFT 0x0
+#define DP4_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_EN__SHIFT 0x4
+#define DP4_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE__SHIFT 0x8
+#define DP4_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE__SHIFT 0x9
+#define DP4_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_EN__SHIFT 0xc
+#define DP4_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_EN__SHIFT 0xd
+#define DP4_DP_MSA_VBID_MISC__DP_VBID6_LINE_REFERENCE__SHIFT 0xf
+#define DP4_DP_MSA_VBID_MISC__DP_VBID6_LINE_NUM__SHIFT 0x10
+#define DP4_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_MASK 0x00000003L
+#define DP4_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_EN_MASK 0x00000010L
+#define DP4_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_MASK 0x00000100L
+#define DP4_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_MASK 0x00000200L
+#define DP4_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_EN_MASK 0x00001000L
+#define DP4_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_EN_MASK 0x00002000L
+#define DP4_DP_MSA_VBID_MISC__DP_VBID6_LINE_REFERENCE_MASK 0x00008000L
+#define DP4_DP_MSA_VBID_MISC__DP_VBID6_LINE_NUM_MASK 0xFFFF0000L
+//DP4_DP_SEC_METADATA_TRANSMISSION
+#define DP4_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_ENABLE__SHIFT 0x0
+#define DP4_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_REFERENCE__SHIFT 0x1
+#define DP4_DP_SEC_METADATA_TRANSMISSION__DP_SEC_MSO_METADATA_PACKET_ENABLE__SHIFT 0x4
+#define DP4_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE__SHIFT 0x10
+#define DP4_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define DP4_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_REFERENCE_MASK 0x00000002L
+#define DP4_DP_SEC_METADATA_TRANSMISSION__DP_SEC_MSO_METADATA_PACKET_ENABLE_MASK 0x000000F0L
+#define DP4_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_MASK 0xFFFF0000L
+//DP4_DP_DSC_BYTES_PER_PIXEL
+#define DP4_DP_DSC_BYTES_PER_PIXEL__DP_DSC_BYTES_PER_PIXEL__SHIFT 0x0
+#define DP4_DP_DSC_BYTES_PER_PIXEL__DP_DSC_BYTES_PER_PIXEL_MASK 0x7FFFFFFFL
+//DP4_DP_ALPM_CNTL
+#define DP4_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_SEND__SHIFT 0x0
+#define DP4_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PENDING__SHIFT 0x1
+#define DP4_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_SEND__SHIFT 0x2
+#define DP4_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_PENDING__SHIFT 0x3
+#define DP4_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_IMMEDIATE__SHIFT 0x4
+#define DP4_DP_ALPM_CNTL__DP_LINK_TRAINING_SWITCH_BETWEEN_VIDEO__SHIFT 0x5
+#define DP4_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_LINE_NUM__SHIFT 0x10
+#define DP4_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_SEND_MASK 0x00000001L
+#define DP4_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PENDING_MASK 0x00000002L
+#define DP4_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_SEND_MASK 0x00000004L
+#define DP4_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_PENDING_MASK 0x00000008L
+#define DP4_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_IMMEDIATE_MASK 0x00000010L
+#define DP4_DP_ALPM_CNTL__DP_LINK_TRAINING_SWITCH_BETWEEN_VIDEO_MASK 0x00000020L
+#define DP4_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_LINE_NUM_MASK 0xFFFF0000L
+
+
+// addressBlock: dce_dc_dcio_dcio_dispdec
+//DC_GENERICA
+#define DC_GENERICA__GENERICA_EN__SHIFT 0x0
+#define DC_GENERICA__GENERICA_SEL__SHIFT 0x7
+#define DC_GENERICA__GENERICA_UNIPHY_REFDIV_CLK_SEL__SHIFT 0xc
+#define DC_GENERICA__GENERICA_UNIPHY_FBDIV_CLK_SEL__SHIFT 0x10
+#define DC_GENERICA__GENERICA_UNIPHY_FBDIV_SSC_CLK_SEL__SHIFT 0x14
+#define DC_GENERICA__GENERICA_UNIPHY_FBDIV_CLK_DIV2_SEL__SHIFT 0x18
+#define DC_GENERICA__GENERICA_EN_MASK 0x00000001L
+#define DC_GENERICA__GENERICA_SEL_MASK 0x00000F80L
+#define DC_GENERICA__GENERICA_UNIPHY_REFDIV_CLK_SEL_MASK 0x0000F000L
+#define DC_GENERICA__GENERICA_UNIPHY_FBDIV_CLK_SEL_MASK 0x000F0000L
+#define DC_GENERICA__GENERICA_UNIPHY_FBDIV_SSC_CLK_SEL_MASK 0x00F00000L
+#define DC_GENERICA__GENERICA_UNIPHY_FBDIV_CLK_DIV2_SEL_MASK 0x0F000000L
+//DC_GENERICB
+#define DC_GENERICB__GENERICB_EN__SHIFT 0x0
+#define DC_GENERICB__GENERICB_SEL__SHIFT 0x8
+#define DC_GENERICB__GENERICB_UNIPHY_REFDIV_CLK_SEL__SHIFT 0xc
+#define DC_GENERICB__GENERICB_UNIPHY_FBDIV_CLK_SEL__SHIFT 0x10
+#define DC_GENERICB__GENERICB_UNIPHY_FBDIV_SSC_CLK_SEL__SHIFT 0x14
+#define DC_GENERICB__GENERICB_UNIPHY_FBDIV_CLK_DIV2_SEL__SHIFT 0x18
+#define DC_GENERICB__GENERICB_EN_MASK 0x00000001L
+#define DC_GENERICB__GENERICB_SEL_MASK 0x00000F00L
+#define DC_GENERICB__GENERICB_UNIPHY_REFDIV_CLK_SEL_MASK 0x0000F000L
+#define DC_GENERICB__GENERICB_UNIPHY_FBDIV_CLK_SEL_MASK 0x000F0000L
+#define DC_GENERICB__GENERICB_UNIPHY_FBDIV_SSC_CLK_SEL_MASK 0x00F00000L
+#define DC_GENERICB__GENERICB_UNIPHY_FBDIV_CLK_DIV2_SEL_MASK 0x0F000000L
+//DC_REF_CLK_CNTL
+#define DC_REF_CLK_CNTL__HSYNCA_OUTPUT_SEL__SHIFT 0x0
+#define DC_REF_CLK_CNTL__GENLK_CLK_OUTPUT_SEL__SHIFT 0x8
+#define DC_REF_CLK_CNTL__HSYNCA_OUTPUT_SEL_MASK 0x00000003L
+#define DC_REF_CLK_CNTL__GENLK_CLK_OUTPUT_SEL_MASK 0x00000300L
+//UNIPHYA_LINK_CNTL
+#define UNIPHYA_LINK_CNTL__UNIPHY_PFREQCHG__SHIFT 0x0
+#define UNIPHYA_LINK_CNTL__UNIPHY_PIXVLD_RESET__SHIFT 0x4
+#define UNIPHYA_LINK_CNTL__UNIPHY_MINIMUM_PIXVLD_LOW_DURATION__SHIFT 0x8
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL0_INVERT__SHIFT 0xc
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL1_INVERT__SHIFT 0xd
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL2_INVERT__SHIFT 0xe
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL3_INVERT__SHIFT 0xf
+#define UNIPHYA_LINK_CNTL__UNIPHY_LANE_STAGGER_DELAY__SHIFT 0x14
+#define UNIPHYA_LINK_CNTL__UNIPHY_LINK_ENABLE_HPD_MASK__SHIFT 0x18
+#define UNIPHYA_LINK_CNTL__UNIPHY_PFREQCHG_MASK 0x00000001L
+#define UNIPHYA_LINK_CNTL__UNIPHY_PIXVLD_RESET_MASK 0x00000010L
+#define UNIPHYA_LINK_CNTL__UNIPHY_MINIMUM_PIXVLD_LOW_DURATION_MASK 0x00000700L
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL0_INVERT_MASK 0x00001000L
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL1_INVERT_MASK 0x00002000L
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL2_INVERT_MASK 0x00004000L
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL3_INVERT_MASK 0x00008000L
+#define UNIPHYA_LINK_CNTL__UNIPHY_LANE_STAGGER_DELAY_MASK 0x00700000L
+#define UNIPHYA_LINK_CNTL__UNIPHY_LINK_ENABLE_HPD_MASK_MASK 0x03000000L
+//UNIPHYA_CHANNEL_XBAR_CNTL
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE__SHIFT 0x0
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE__SHIFT 0x8
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE__SHIFT 0x10
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE__SHIFT 0x18
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_LINK_ENABLE__SHIFT 0x1c
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE_MASK 0x00000003L
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE_MASK 0x00000300L
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE_MASK 0x00030000L
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE_MASK 0x03000000L
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_LINK_ENABLE_MASK 0x10000000L
+//UNIPHYB_LINK_CNTL
+#define UNIPHYB_LINK_CNTL__UNIPHY_PFREQCHG__SHIFT 0x0
+#define UNIPHYB_LINK_CNTL__UNIPHY_PIXVLD_RESET__SHIFT 0x4
+#define UNIPHYB_LINK_CNTL__UNIPHY_MINIMUM_PIXVLD_LOW_DURATION__SHIFT 0x8
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL0_INVERT__SHIFT 0xc
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL1_INVERT__SHIFT 0xd
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL2_INVERT__SHIFT 0xe
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL3_INVERT__SHIFT 0xf
+#define UNIPHYB_LINK_CNTL__UNIPHY_LANE_STAGGER_DELAY__SHIFT 0x14
+#define UNIPHYB_LINK_CNTL__UNIPHY_LINK_ENABLE_HPD_MASK__SHIFT 0x18
+#define UNIPHYB_LINK_CNTL__UNIPHY_PFREQCHG_MASK 0x00000001L
+#define UNIPHYB_LINK_CNTL__UNIPHY_PIXVLD_RESET_MASK 0x00000010L
+#define UNIPHYB_LINK_CNTL__UNIPHY_MINIMUM_PIXVLD_LOW_DURATION_MASK 0x00000700L
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL0_INVERT_MASK 0x00001000L
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL1_INVERT_MASK 0x00002000L
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL2_INVERT_MASK 0x00004000L
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL3_INVERT_MASK 0x00008000L
+#define UNIPHYB_LINK_CNTL__UNIPHY_LANE_STAGGER_DELAY_MASK 0x00700000L
+#define UNIPHYB_LINK_CNTL__UNIPHY_LINK_ENABLE_HPD_MASK_MASK 0x03000000L
+//UNIPHYB_CHANNEL_XBAR_CNTL
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE__SHIFT 0x0
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE__SHIFT 0x8
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE__SHIFT 0x10
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE__SHIFT 0x18
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_LINK_ENABLE__SHIFT 0x1c
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE_MASK 0x00000003L
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE_MASK 0x00000300L
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE_MASK 0x00030000L
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE_MASK 0x03000000L
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_LINK_ENABLE_MASK 0x10000000L
+//UNIPHYC_LINK_CNTL
+#define UNIPHYC_LINK_CNTL__UNIPHY_PFREQCHG__SHIFT 0x0
+#define UNIPHYC_LINK_CNTL__UNIPHY_PIXVLD_RESET__SHIFT 0x4
+#define UNIPHYC_LINK_CNTL__UNIPHY_MINIMUM_PIXVLD_LOW_DURATION__SHIFT 0x8
+#define UNIPHYC_LINK_CNTL__UNIPHY_CHANNEL0_INVERT__SHIFT 0xc
+#define UNIPHYC_LINK_CNTL__UNIPHY_CHANNEL1_INVERT__SHIFT 0xd
+#define UNIPHYC_LINK_CNTL__UNIPHY_CHANNEL2_INVERT__SHIFT 0xe
+#define UNIPHYC_LINK_CNTL__UNIPHY_CHANNEL3_INVERT__SHIFT 0xf
+#define UNIPHYC_LINK_CNTL__UNIPHY_LANE_STAGGER_DELAY__SHIFT 0x14
+#define UNIPHYC_LINK_CNTL__UNIPHY_LINK_ENABLE_HPD_MASK__SHIFT 0x18
+#define UNIPHYC_LINK_CNTL__UNIPHY_PFREQCHG_MASK 0x00000001L
+#define UNIPHYC_LINK_CNTL__UNIPHY_PIXVLD_RESET_MASK 0x00000010L
+#define UNIPHYC_LINK_CNTL__UNIPHY_MINIMUM_PIXVLD_LOW_DURATION_MASK 0x00000700L
+#define UNIPHYC_LINK_CNTL__UNIPHY_CHANNEL0_INVERT_MASK 0x00001000L
+#define UNIPHYC_LINK_CNTL__UNIPHY_CHANNEL1_INVERT_MASK 0x00002000L
+#define UNIPHYC_LINK_CNTL__UNIPHY_CHANNEL2_INVERT_MASK 0x00004000L
+#define UNIPHYC_LINK_CNTL__UNIPHY_CHANNEL3_INVERT_MASK 0x00008000L
+#define UNIPHYC_LINK_CNTL__UNIPHY_LANE_STAGGER_DELAY_MASK 0x00700000L
+#define UNIPHYC_LINK_CNTL__UNIPHY_LINK_ENABLE_HPD_MASK_MASK 0x03000000L
+//UNIPHYC_CHANNEL_XBAR_CNTL
+#define UNIPHYC_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE__SHIFT 0x0
+#define UNIPHYC_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE__SHIFT 0x8
+#define UNIPHYC_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE__SHIFT 0x10
+#define UNIPHYC_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE__SHIFT 0x18
+#define UNIPHYC_CHANNEL_XBAR_CNTL__UNIPHY_LINK_ENABLE__SHIFT 0x1c
+#define UNIPHYC_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE_MASK 0x00000003L
+#define UNIPHYC_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE_MASK 0x00000300L
+#define UNIPHYC_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE_MASK 0x00030000L
+#define UNIPHYC_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE_MASK 0x03000000L
+#define UNIPHYC_CHANNEL_XBAR_CNTL__UNIPHY_LINK_ENABLE_MASK 0x10000000L
+//UNIPHYD_LINK_CNTL
+#define UNIPHYD_LINK_CNTL__UNIPHY_PFREQCHG__SHIFT 0x0
+#define UNIPHYD_LINK_CNTL__UNIPHY_PIXVLD_RESET__SHIFT 0x4
+#define UNIPHYD_LINK_CNTL__UNIPHY_MINIMUM_PIXVLD_LOW_DURATION__SHIFT 0x8
+#define UNIPHYD_LINK_CNTL__UNIPHY_CHANNEL0_INVERT__SHIFT 0xc
+#define UNIPHYD_LINK_CNTL__UNIPHY_CHANNEL1_INVERT__SHIFT 0xd
+#define UNIPHYD_LINK_CNTL__UNIPHY_CHANNEL2_INVERT__SHIFT 0xe
+#define UNIPHYD_LINK_CNTL__UNIPHY_CHANNEL3_INVERT__SHIFT 0xf
+#define UNIPHYD_LINK_CNTL__UNIPHY_LANE_STAGGER_DELAY__SHIFT 0x14
+#define UNIPHYD_LINK_CNTL__UNIPHY_LINK_ENABLE_HPD_MASK__SHIFT 0x18
+#define UNIPHYD_LINK_CNTL__UNIPHY_PFREQCHG_MASK 0x00000001L
+#define UNIPHYD_LINK_CNTL__UNIPHY_PIXVLD_RESET_MASK 0x00000010L
+#define UNIPHYD_LINK_CNTL__UNIPHY_MINIMUM_PIXVLD_LOW_DURATION_MASK 0x00000700L
+#define UNIPHYD_LINK_CNTL__UNIPHY_CHANNEL0_INVERT_MASK 0x00001000L
+#define UNIPHYD_LINK_CNTL__UNIPHY_CHANNEL1_INVERT_MASK 0x00002000L
+#define UNIPHYD_LINK_CNTL__UNIPHY_CHANNEL2_INVERT_MASK 0x00004000L
+#define UNIPHYD_LINK_CNTL__UNIPHY_CHANNEL3_INVERT_MASK 0x00008000L
+#define UNIPHYD_LINK_CNTL__UNIPHY_LANE_STAGGER_DELAY_MASK 0x00700000L
+#define UNIPHYD_LINK_CNTL__UNIPHY_LINK_ENABLE_HPD_MASK_MASK 0x03000000L
+//UNIPHYD_CHANNEL_XBAR_CNTL
+#define UNIPHYD_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE__SHIFT 0x0
+#define UNIPHYD_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE__SHIFT 0x8
+#define UNIPHYD_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE__SHIFT 0x10
+#define UNIPHYD_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE__SHIFT 0x18
+#define UNIPHYD_CHANNEL_XBAR_CNTL__UNIPHY_LINK_ENABLE__SHIFT 0x1c
+#define UNIPHYD_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE_MASK 0x00000003L
+#define UNIPHYD_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE_MASK 0x00000300L
+#define UNIPHYD_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE_MASK 0x00030000L
+#define UNIPHYD_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE_MASK 0x03000000L
+#define UNIPHYD_CHANNEL_XBAR_CNTL__UNIPHY_LINK_ENABLE_MASK 0x10000000L
+//UNIPHYE_LINK_CNTL
+#define UNIPHYE_LINK_CNTL__UNIPHY_PFREQCHG__SHIFT 0x0
+#define UNIPHYE_LINK_CNTL__UNIPHY_PIXVLD_RESET__SHIFT 0x4
+#define UNIPHYE_LINK_CNTL__UNIPHY_MINIMUM_PIXVLD_LOW_DURATION__SHIFT 0x8
+#define UNIPHYE_LINK_CNTL__UNIPHY_CHANNEL0_INVERT__SHIFT 0xc
+#define UNIPHYE_LINK_CNTL__UNIPHY_CHANNEL1_INVERT__SHIFT 0xd
+#define UNIPHYE_LINK_CNTL__UNIPHY_CHANNEL2_INVERT__SHIFT 0xe
+#define UNIPHYE_LINK_CNTL__UNIPHY_CHANNEL3_INVERT__SHIFT 0xf
+#define UNIPHYE_LINK_CNTL__UNIPHY_LANE_STAGGER_DELAY__SHIFT 0x14
+#define UNIPHYE_LINK_CNTL__UNIPHY_LINK_ENABLE_HPD_MASK__SHIFT 0x18
+#define UNIPHYE_LINK_CNTL__UNIPHY_PFREQCHG_MASK 0x00000001L
+#define UNIPHYE_LINK_CNTL__UNIPHY_PIXVLD_RESET_MASK 0x00000010L
+#define UNIPHYE_LINK_CNTL__UNIPHY_MINIMUM_PIXVLD_LOW_DURATION_MASK 0x00000700L
+#define UNIPHYE_LINK_CNTL__UNIPHY_CHANNEL0_INVERT_MASK 0x00001000L
+#define UNIPHYE_LINK_CNTL__UNIPHY_CHANNEL1_INVERT_MASK 0x00002000L
+#define UNIPHYE_LINK_CNTL__UNIPHY_CHANNEL2_INVERT_MASK 0x00004000L
+#define UNIPHYE_LINK_CNTL__UNIPHY_CHANNEL3_INVERT_MASK 0x00008000L
+#define UNIPHYE_LINK_CNTL__UNIPHY_LANE_STAGGER_DELAY_MASK 0x00700000L
+#define UNIPHYE_LINK_CNTL__UNIPHY_LINK_ENABLE_HPD_MASK_MASK 0x03000000L
+//UNIPHYE_CHANNEL_XBAR_CNTL
+#define UNIPHYE_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE__SHIFT 0x0
+#define UNIPHYE_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE__SHIFT 0x8
+#define UNIPHYE_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE__SHIFT 0x10
+#define UNIPHYE_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE__SHIFT 0x18
+#define UNIPHYE_CHANNEL_XBAR_CNTL__UNIPHY_LINK_ENABLE__SHIFT 0x1c
+#define UNIPHYE_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE_MASK 0x00000003L
+#define UNIPHYE_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE_MASK 0x00000300L
+#define UNIPHYE_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE_MASK 0x00030000L
+#define UNIPHYE_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE_MASK 0x03000000L
+#define UNIPHYE_CHANNEL_XBAR_CNTL__UNIPHY_LINK_ENABLE_MASK 0x10000000L
+//DCIO_WRCMD_DELAY
+#define DCIO_WRCMD_DELAY__DAC_DELAY__SHIFT 0x4
+#define DCIO_WRCMD_DELAY__DPHY_DELAY__SHIFT 0x8
+#define DCIO_WRCMD_DELAY__DCRXPHY_DELAY__SHIFT 0xc
+#define DCIO_WRCMD_DELAY__ZCAL_DELAY__SHIFT 0x10
+#define DCIO_WRCMD_DELAY__UNIPHY_DELAY__SHIFT 0x18
+#define DCIO_WRCMD_DELAY__DAC_DELAY_MASK 0x000000F0L
+#define DCIO_WRCMD_DELAY__DPHY_DELAY_MASK 0x00000F00L
+#define DCIO_WRCMD_DELAY__DCRXPHY_DELAY_MASK 0x0000F000L
+#define DCIO_WRCMD_DELAY__ZCAL_DELAY_MASK 0x000F0000L
+#define DCIO_WRCMD_DELAY__UNIPHY_DELAY_MASK 0xFF000000L
+//DC_PINSTRAPS
+#define DC_PINSTRAPS__DC_PINSTRAPS_SMS_EN_HARD__SHIFT 0xd
+#define DC_PINSTRAPS__DC_PINSTRAPS_AUDIO__SHIFT 0xe
+#define DC_PINSTRAPS__DC_PINSTRAPS_CCBYPASS__SHIFT 0x10
+#define DC_PINSTRAPS__DC_PINSTRAPS_CONNECTIVITY__SHIFT 0x11
+#define DC_PINSTRAPS__DC_PINSTRAPS_SMS_EN_HARD_MASK 0x00002000L
+#define DC_PINSTRAPS__DC_PINSTRAPS_AUDIO_MASK 0x0000C000L
+#define DC_PINSTRAPS__DC_PINSTRAPS_CCBYPASS_MASK 0x00010000L
+#define DC_PINSTRAPS__DC_PINSTRAPS_CONNECTIVITY_MASK 0x000E0000L
+//LVTMA_PWRSEQ_CNTL
+#define LVTMA_PWRSEQ_CNTL__LVTMA_PWRSEQ_EN__SHIFT 0x0
+#define LVTMA_PWRSEQ_CNTL__LVTMA_PWRSEQ_DISABLE_SYNCEN_CONTROL_OF_TX_EN__SHIFT 0x1
+#define LVTMA_PWRSEQ_CNTL__LVTMA_PWRSEQ_TARGET_STATE__SHIFT 0x4
+#define LVTMA_PWRSEQ_CNTL__LVTMA_SYNCEN__SHIFT 0x8
+#define LVTMA_PWRSEQ_CNTL__LVTMA_SYNCEN_OVRD__SHIFT 0x9
+#define LVTMA_PWRSEQ_CNTL__LVTMA_SYNCEN_POL__SHIFT 0xa
+#define LVTMA_PWRSEQ_CNTL__LVTMA_DIGON__SHIFT 0x10
+#define LVTMA_PWRSEQ_CNTL__LVTMA_DIGON_OVRD__SHIFT 0x11
+#define LVTMA_PWRSEQ_CNTL__LVTMA_DIGON_POL__SHIFT 0x12
+#define LVTMA_PWRSEQ_CNTL__LVTMA_BLON__SHIFT 0x18
+#define LVTMA_PWRSEQ_CNTL__LVTMA_BLON_OVRD__SHIFT 0x19
+#define LVTMA_PWRSEQ_CNTL__LVTMA_BLON_POL__SHIFT 0x1a
+#define LVTMA_PWRSEQ_CNTL__LVTMA_PWRSEQ_EN_MASK 0x00000001L
+#define LVTMA_PWRSEQ_CNTL__LVTMA_PWRSEQ_DISABLE_SYNCEN_CONTROL_OF_TX_EN_MASK 0x00000002L
+#define LVTMA_PWRSEQ_CNTL__LVTMA_PWRSEQ_TARGET_STATE_MASK 0x00000010L
+#define LVTMA_PWRSEQ_CNTL__LVTMA_SYNCEN_MASK 0x00000100L
+#define LVTMA_PWRSEQ_CNTL__LVTMA_SYNCEN_OVRD_MASK 0x00000200L
+#define LVTMA_PWRSEQ_CNTL__LVTMA_SYNCEN_POL_MASK 0x00000400L
+#define LVTMA_PWRSEQ_CNTL__LVTMA_DIGON_MASK 0x00010000L
+#define LVTMA_PWRSEQ_CNTL__LVTMA_DIGON_OVRD_MASK 0x00020000L
+#define LVTMA_PWRSEQ_CNTL__LVTMA_DIGON_POL_MASK 0x00040000L
+#define LVTMA_PWRSEQ_CNTL__LVTMA_BLON_MASK 0x01000000L
+#define LVTMA_PWRSEQ_CNTL__LVTMA_BLON_OVRD_MASK 0x02000000L
+#define LVTMA_PWRSEQ_CNTL__LVTMA_BLON_POL_MASK 0x04000000L
+//LVTMA_PWRSEQ_STATE
+#define LVTMA_PWRSEQ_STATE__LVTMA_PWRSEQ_TARGET_STATE_R__SHIFT 0x0
+#define LVTMA_PWRSEQ_STATE__LVTMA_PWRSEQ_DIGON__SHIFT 0x1
+#define LVTMA_PWRSEQ_STATE__LVTMA_PWRSEQ_SYNCEN__SHIFT 0x2
+#define LVTMA_PWRSEQ_STATE__LVTMA_PWRSEQ_BLON__SHIFT 0x3
+#define LVTMA_PWRSEQ_STATE__LVTMA_PWRSEQ_DONE__SHIFT 0x4
+#define LVTMA_PWRSEQ_STATE__LVTMA_PWRSEQ_STATE__SHIFT 0x8
+#define LVTMA_PWRSEQ_STATE__LVTMA_PWRSEQ_TARGET_STATE_R_MASK 0x00000001L
+#define LVTMA_PWRSEQ_STATE__LVTMA_PWRSEQ_DIGON_MASK 0x00000002L
+#define LVTMA_PWRSEQ_STATE__LVTMA_PWRSEQ_SYNCEN_MASK 0x00000004L
+#define LVTMA_PWRSEQ_STATE__LVTMA_PWRSEQ_BLON_MASK 0x00000008L
+#define LVTMA_PWRSEQ_STATE__LVTMA_PWRSEQ_DONE_MASK 0x00000010L
+#define LVTMA_PWRSEQ_STATE__LVTMA_PWRSEQ_STATE_MASK 0x00000F00L
+//LVTMA_PWRSEQ_REF_DIV
+#define LVTMA_PWRSEQ_REF_DIV__LVTMA_PWRSEQ_REF_DIV__SHIFT 0x0
+#define LVTMA_PWRSEQ_REF_DIV__BL_PWM_REF_DIV__SHIFT 0x10
+#define LVTMA_PWRSEQ_REF_DIV__LVTMA_PWRSEQ_REF_DIV_MASK 0x00000FFFL
+#define LVTMA_PWRSEQ_REF_DIV__BL_PWM_REF_DIV_MASK 0xFFFF0000L
+//LVTMA_PWRSEQ_DELAY1
+#define LVTMA_PWRSEQ_DELAY1__LVTMA_PWRUP_DELAY1__SHIFT 0x0
+#define LVTMA_PWRSEQ_DELAY1__LVTMA_PWRUP_DELAY2__SHIFT 0x8
+#define LVTMA_PWRSEQ_DELAY1__LVTMA_PWRDN_DELAY1__SHIFT 0x10
+#define LVTMA_PWRSEQ_DELAY1__LVTMA_PWRDN_DELAY2__SHIFT 0x18
+#define LVTMA_PWRSEQ_DELAY1__LVTMA_PWRUP_DELAY1_MASK 0x000000FFL
+#define LVTMA_PWRSEQ_DELAY1__LVTMA_PWRUP_DELAY2_MASK 0x0000FF00L
+#define LVTMA_PWRSEQ_DELAY1__LVTMA_PWRDN_DELAY1_MASK 0x00FF0000L
+#define LVTMA_PWRSEQ_DELAY1__LVTMA_PWRDN_DELAY2_MASK 0xFF000000L
+//LVTMA_PWRSEQ_DELAY2
+#define LVTMA_PWRSEQ_DELAY2__LVTMA_PWRDN_MIN_LENGTH__SHIFT 0x0
+#define LVTMA_PWRSEQ_DELAY2__LVTMA_PWRUP_DELAY3__SHIFT 0x8
+#define LVTMA_PWRSEQ_DELAY2__LVTMA_PWRDN_DELAY3__SHIFT 0x10
+#define LVTMA_PWRSEQ_DELAY2__LVTMA_VARY_BL_OVERRIDE_EN__SHIFT 0x18
+#define LVTMA_PWRSEQ_DELAY2__LVTMA_PWRDN_MIN_LENGTH_MASK 0x000000FFL
+#define LVTMA_PWRSEQ_DELAY2__LVTMA_PWRUP_DELAY3_MASK 0x0000FF00L
+#define LVTMA_PWRSEQ_DELAY2__LVTMA_PWRDN_DELAY3_MASK 0x00FF0000L
+#define LVTMA_PWRSEQ_DELAY2__LVTMA_VARY_BL_OVERRIDE_EN_MASK 0x01000000L
+//BL_PWM_CNTL
+#define BL_PWM_CNTL__BL_ACTIVE_INT_FRAC_CNT__SHIFT 0x0
+#define BL_PWM_CNTL__BL_PWM_FRACTIONAL_EN__SHIFT 0x1e
+#define BL_PWM_CNTL__BL_PWM_EN__SHIFT 0x1f
+#define BL_PWM_CNTL__BL_ACTIVE_INT_FRAC_CNT_MASK 0x0000FFFFL
+#define BL_PWM_CNTL__BL_PWM_FRACTIONAL_EN_MASK 0x40000000L
+#define BL_PWM_CNTL__BL_PWM_EN_MASK 0x80000000L
+//BL_PWM_CNTL2
+#define BL_PWM_CNTL2__BL_PWM_POST_FRAME_START_DELAY_BEFORE_UPDATE__SHIFT 0x0
+#define BL_PWM_CNTL2__BL_PWM_OVERRIDE_BL_OUT_ENABLE__SHIFT 0x1e
+#define BL_PWM_CNTL2__BL_PWM_OVERRIDE_LVTMA_PWRSEQ_EN__SHIFT 0x1f
+#define BL_PWM_CNTL2__BL_PWM_POST_FRAME_START_DELAY_BEFORE_UPDATE_MASK 0x0000FFFFL
+#define BL_PWM_CNTL2__BL_PWM_OVERRIDE_BL_OUT_ENABLE_MASK 0x40000000L
+#define BL_PWM_CNTL2__BL_PWM_OVERRIDE_LVTMA_PWRSEQ_EN_MASK 0x80000000L
+//BL_PWM_PERIOD_CNTL
+#define BL_PWM_PERIOD_CNTL__BL_PWM_PERIOD__SHIFT 0x0
+#define BL_PWM_PERIOD_CNTL__BL_PWM_PERIOD_BITCNT__SHIFT 0x10
+#define BL_PWM_PERIOD_CNTL__BL_PWM_PERIOD_MASK 0x0000FFFFL
+#define BL_PWM_PERIOD_CNTL__BL_PWM_PERIOD_BITCNT_MASK 0x000F0000L
+//BL_PWM_GRP1_REG_LOCK
+#define BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_REG_LOCK__SHIFT 0x0
+#define BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_REG_UPDATE_PENDING__SHIFT 0x8
+#define BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_UPDATE_AT_FRAME_START__SHIFT 0x10
+#define BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_FRAME_START_DISP_SEL__SHIFT 0x11
+#define BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_READBACK_DB_REG_VALUE_EN__SHIFT 0x18
+#define BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN__SHIFT 0x1f
+#define BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_REG_LOCK_MASK 0x00000001L
+#define BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_REG_UPDATE_PENDING_MASK 0x00000100L
+#define BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_UPDATE_AT_FRAME_START_MASK 0x00010000L
+#define BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_FRAME_START_DISP_SEL_MASK 0x000E0000L
+#define BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_READBACK_DB_REG_VALUE_EN_MASK 0x01000000L
+#define BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN_MASK 0x80000000L
+//DCIO_GSL_GENLK_PAD_CNTL
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_CLK_GSL_FLIP_READY_SEL__SHIFT 0x4
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_CLK_GSL_MASK__SHIFT 0x8
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_VSYNC_GSL_FLIP_READY_SEL__SHIFT 0x14
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_VSYNC_GSL_MASK__SHIFT 0x18
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_CLK_GSL_FLIP_READY_SEL_MASK 0x00000030L
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_CLK_GSL_MASK_MASK 0x00000300L
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_VSYNC_GSL_FLIP_READY_SEL_MASK 0x00300000L
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_VSYNC_GSL_MASK_MASK 0x03000000L
+//DCIO_GSL_SWAPLOCK_PAD_CNTL
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_A_GSL_FLIP_READY_SEL__SHIFT 0x4
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_A_GSL_MASK__SHIFT 0x8
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_B_GSL_FLIP_READY_SEL__SHIFT 0x14
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_B_GSL_MASK__SHIFT 0x18
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_A_GSL_FLIP_READY_SEL_MASK 0x00000030L
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_A_GSL_MASK_MASK 0x00000300L
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_B_GSL_FLIP_READY_SEL_MASK 0x00300000L
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_B_GSL_MASK_MASK 0x03000000L
+//DCIO_CLOCK_CNTL
+#define DCIO_CLOCK_CNTL__DCIO_TEST_CLK_SEL__SHIFT 0x0
+#define DCIO_CLOCK_CNTL__DISPCLK_R_DCIO_GATE_DIS__SHIFT 0x5
+#define DCIO_CLOCK_CNTL__DCIO_TEST_CLK_SEL_MASK 0x0000001FL
+#define DCIO_CLOCK_CNTL__DISPCLK_R_DCIO_GATE_DIS_MASK 0x00000020L
+//DCIO_SOFT_RESET
+#define DCIO_SOFT_RESET__UNIPHYA_SOFT_RESET__SHIFT 0x0
+#define DCIO_SOFT_RESET__DSYNCA_SOFT_RESET__SHIFT 0x1
+#define DCIO_SOFT_RESET__UNIPHYB_SOFT_RESET__SHIFT 0x2
+#define DCIO_SOFT_RESET__DSYNCB_SOFT_RESET__SHIFT 0x3
+#define DCIO_SOFT_RESET__UNIPHYC_SOFT_RESET__SHIFT 0x4
+#define DCIO_SOFT_RESET__DSYNCC_SOFT_RESET__SHIFT 0x5
+#define DCIO_SOFT_RESET__UNIPHYD_SOFT_RESET__SHIFT 0x6
+#define DCIO_SOFT_RESET__DSYNCD_SOFT_RESET__SHIFT 0x7
+#define DCIO_SOFT_RESET__UNIPHYE_SOFT_RESET__SHIFT 0x8
+#define DCIO_SOFT_RESET__DSYNCE_SOFT_RESET__SHIFT 0x9
+#define DCIO_SOFT_RESET__UNIPHYF_SOFT_RESET__SHIFT 0xa
+#define DCIO_SOFT_RESET__DSYNCF_SOFT_RESET__SHIFT 0xb
+#define DCIO_SOFT_RESET__UNIPHYG_SOFT_RESET__SHIFT 0xc
+#define DCIO_SOFT_RESET__DSYNCG_SOFT_RESET__SHIFT 0xd
+#define DCIO_SOFT_RESET__DACA_SOFT_RESET__SHIFT 0x10
+#define DCIO_SOFT_RESET__DCRXPHY_SOFT_RESET__SHIFT 0x14
+#define DCIO_SOFT_RESET__DPHY_SOFT_RESET__SHIFT 0x18
+#define DCIO_SOFT_RESET__ZCAL_SOFT_RESET__SHIFT 0x1a
+#define DCIO_SOFT_RESET__UNIPHYA_SOFT_RESET_MASK 0x00000001L
+#define DCIO_SOFT_RESET__DSYNCA_SOFT_RESET_MASK 0x00000002L
+#define DCIO_SOFT_RESET__UNIPHYB_SOFT_RESET_MASK 0x00000004L
+#define DCIO_SOFT_RESET__DSYNCB_SOFT_RESET_MASK 0x00000008L
+#define DCIO_SOFT_RESET__UNIPHYC_SOFT_RESET_MASK 0x00000010L
+#define DCIO_SOFT_RESET__DSYNCC_SOFT_RESET_MASK 0x00000020L
+#define DCIO_SOFT_RESET__UNIPHYD_SOFT_RESET_MASK 0x00000040L
+#define DCIO_SOFT_RESET__DSYNCD_SOFT_RESET_MASK 0x00000080L
+#define DCIO_SOFT_RESET__UNIPHYE_SOFT_RESET_MASK 0x00000100L
+#define DCIO_SOFT_RESET__DSYNCE_SOFT_RESET_MASK 0x00000200L
+#define DCIO_SOFT_RESET__UNIPHYF_SOFT_RESET_MASK 0x00000400L
+#define DCIO_SOFT_RESET__DSYNCF_SOFT_RESET_MASK 0x00000800L
+#define DCIO_SOFT_RESET__UNIPHYG_SOFT_RESET_MASK 0x00001000L
+#define DCIO_SOFT_RESET__DSYNCG_SOFT_RESET_MASK 0x00002000L
+#define DCIO_SOFT_RESET__DACA_SOFT_RESET_MASK 0x00010000L
+#define DCIO_SOFT_RESET__DCRXPHY_SOFT_RESET_MASK 0x00100000L
+#define DCIO_SOFT_RESET__DPHY_SOFT_RESET_MASK 0x01000000L
+#define DCIO_SOFT_RESET__ZCAL_SOFT_RESET_MASK 0x04000000L
+
+
+// addressBlock: dce_dc_dcio_dcio_chip_dispdec
+//DC_GPIO_GENERIC_MASK
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICA_MASK__SHIFT 0x0
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICA_PD_DIS__SHIFT 0x1
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICA_RECV__SHIFT 0x2
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICB_MASK__SHIFT 0x4
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICB_PD_DIS__SHIFT 0x5
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICB_RECV__SHIFT 0x6
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICC_MASK__SHIFT 0x8
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICC_PD_DIS__SHIFT 0x9
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICC_RECV__SHIFT 0xa
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICD_MASK__SHIFT 0xc
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICD_PD_DIS__SHIFT 0xd
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICD_RECV__SHIFT 0xe
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICE_MASK__SHIFT 0x10
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICE_PD_DIS__SHIFT 0x11
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICE_RECV__SHIFT 0x12
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICF_MASK__SHIFT 0x14
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICF_PD_DIS__SHIFT 0x15
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICF_RECV__SHIFT 0x16
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICG_MASK__SHIFT 0x18
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICG_PD_DIS__SHIFT 0x19
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICG_RECV__SHIFT 0x1a
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICA_MASK_MASK 0x00000001L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICA_PD_DIS_MASK 0x00000002L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICA_RECV_MASK 0x0000000CL
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICB_MASK_MASK 0x00000010L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICB_PD_DIS_MASK 0x00000020L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICB_RECV_MASK 0x000000C0L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICC_MASK_MASK 0x00000100L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICC_PD_DIS_MASK 0x00000200L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICC_RECV_MASK 0x00000C00L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICD_MASK_MASK 0x00001000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICD_PD_DIS_MASK 0x00002000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICD_RECV_MASK 0x0000C000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICE_MASK_MASK 0x00010000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICE_PD_DIS_MASK 0x00020000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICE_RECV_MASK 0x000C0000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICF_MASK_MASK 0x00100000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICF_PD_DIS_MASK 0x00200000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICF_RECV_MASK 0x00C00000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICG_MASK_MASK 0x01000000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICG_PD_DIS_MASK 0x02000000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICG_RECV_MASK 0x0C000000L
+//DC_GPIO_GENERIC_A
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A__SHIFT 0x0
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A__SHIFT 0x8
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A__SHIFT 0x10
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A__SHIFT 0x14
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A__SHIFT 0x15
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A__SHIFT 0x16
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A__SHIFT 0x17
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK 0x00000001L
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK 0x00000100L
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK 0x00010000L
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK 0x00100000L
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK 0x00200000L
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK 0x00400000L
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK 0x00800000L
+//DC_GPIO_GENERIC_EN
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICA_EN__SHIFT 0x0
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICB_EN__SHIFT 0x8
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICC_EN__SHIFT 0x10
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICD_EN__SHIFT 0x14
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICE_EN__SHIFT 0x15
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICF_EN__SHIFT 0x16
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICG_EN__SHIFT 0x17
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICA_EN_MASK 0x00000001L
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICB_EN_MASK 0x00000100L
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICC_EN_MASK 0x00010000L
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICD_EN_MASK 0x00100000L
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICE_EN_MASK 0x00200000L
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICF_EN_MASK 0x00400000L
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICG_EN_MASK 0x00800000L
+//DC_GPIO_GENERIC_Y
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICA_Y__SHIFT 0x0
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICB_Y__SHIFT 0x8
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICC_Y__SHIFT 0x10
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICD_Y__SHIFT 0x14
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICE_Y__SHIFT 0x15
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICF_Y__SHIFT 0x16
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICG_Y__SHIFT 0x17
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICA_Y_MASK 0x00000001L
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICB_Y_MASK 0x00000100L
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICC_Y_MASK 0x00010000L
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICD_Y_MASK 0x00100000L
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICE_Y_MASK 0x00200000L
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICF_Y_MASK 0x00400000L
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICG_Y_MASK 0x00800000L
+//DC_GPIO_DDC1_MASK
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_MASK__SHIFT 0x0
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_PD_EN__SHIFT 0x4
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_RECV__SHIFT 0x6
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_MASK__SHIFT 0x8
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_PD_EN__SHIFT 0xc
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_RECV__SHIFT 0xe
+#define DC_GPIO_DDC1_MASK__AUX_PAD1_MODE__SHIFT 0x10
+#define DC_GPIO_DDC1_MASK__AUX1_POL__SHIFT 0x14
+#define DC_GPIO_DDC1_MASK__ALLOW_HW_DDC1_PD_EN__SHIFT 0x16
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_STR__SHIFT 0x18
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_STR__SHIFT 0x1c
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_MASK_MASK 0x00000001L
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_PD_EN_MASK 0x00000010L
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_RECV_MASK 0x00000040L
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_MASK_MASK 0x00000100L
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_PD_EN_MASK 0x00001000L
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_RECV_MASK 0x00004000L
+#define DC_GPIO_DDC1_MASK__AUX_PAD1_MODE_MASK 0x00010000L
+#define DC_GPIO_DDC1_MASK__AUX1_POL_MASK 0x00100000L
+#define DC_GPIO_DDC1_MASK__ALLOW_HW_DDC1_PD_EN_MASK 0x00400000L
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_STR_MASK 0x0F000000L
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_STR_MASK 0xF0000000L
+//DC_GPIO_DDC1_A
+#define DC_GPIO_DDC1_A__DC_GPIO_DDC1CLK_A__SHIFT 0x0
+#define DC_GPIO_DDC1_A__DC_GPIO_DDC1DATA_A__SHIFT 0x8
+#define DC_GPIO_DDC1_A__DC_GPIO_DDC1CLK_A_MASK 0x00000001L
+#define DC_GPIO_DDC1_A__DC_GPIO_DDC1DATA_A_MASK 0x00000100L
+//DC_GPIO_DDC1_EN
+#define DC_GPIO_DDC1_EN__DC_GPIO_DDC1CLK_EN__SHIFT 0x0
+#define DC_GPIO_DDC1_EN__DC_GPIO_DDC1DATA_EN__SHIFT 0x8
+#define DC_GPIO_DDC1_EN__DC_GPIO_DDC1CLK_EN_MASK 0x00000001L
+#define DC_GPIO_DDC1_EN__DC_GPIO_DDC1DATA_EN_MASK 0x00000100L
+//DC_GPIO_DDC1_Y
+#define DC_GPIO_DDC1_Y__DC_GPIO_DDC1CLK_Y__SHIFT 0x0
+#define DC_GPIO_DDC1_Y__DC_GPIO_DDC1DATA_Y__SHIFT 0x8
+#define DC_GPIO_DDC1_Y__DC_GPIO_DDC1CLK_Y_MASK 0x00000001L
+#define DC_GPIO_DDC1_Y__DC_GPIO_DDC1DATA_Y_MASK 0x00000100L
+//DC_GPIO_DDC2_MASK
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_MASK__SHIFT 0x0
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_PD_EN__SHIFT 0x4
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_RECV__SHIFT 0x6
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_MASK__SHIFT 0x8
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_PD_EN__SHIFT 0xc
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_RECV__SHIFT 0xe
+#define DC_GPIO_DDC2_MASK__AUX_PAD2_MODE__SHIFT 0x10
+#define DC_GPIO_DDC2_MASK__AUX2_POL__SHIFT 0x14
+#define DC_GPIO_DDC2_MASK__ALLOW_HW_DDC2_PD_EN__SHIFT 0x16
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_STR__SHIFT 0x18
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_STR__SHIFT 0x1c
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_MASK_MASK 0x00000001L
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_PD_EN_MASK 0x00000010L
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_RECV_MASK 0x00000040L
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_MASK_MASK 0x00000100L
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_PD_EN_MASK 0x00001000L
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_RECV_MASK 0x00004000L
+#define DC_GPIO_DDC2_MASK__AUX_PAD2_MODE_MASK 0x00010000L
+#define DC_GPIO_DDC2_MASK__AUX2_POL_MASK 0x00100000L
+#define DC_GPIO_DDC2_MASK__ALLOW_HW_DDC2_PD_EN_MASK 0x00400000L
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_STR_MASK 0x0F000000L
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_STR_MASK 0xF0000000L
+//DC_GPIO_DDC2_A
+#define DC_GPIO_DDC2_A__DC_GPIO_DDC2CLK_A__SHIFT 0x0
+#define DC_GPIO_DDC2_A__DC_GPIO_DDC2DATA_A__SHIFT 0x8
+#define DC_GPIO_DDC2_A__DC_GPIO_DDC2CLK_A_MASK 0x00000001L
+#define DC_GPIO_DDC2_A__DC_GPIO_DDC2DATA_A_MASK 0x00000100L
+//DC_GPIO_DDC2_EN
+#define DC_GPIO_DDC2_EN__DC_GPIO_DDC2CLK_EN__SHIFT 0x0
+#define DC_GPIO_DDC2_EN__DC_GPIO_DDC2DATA_EN__SHIFT 0x8
+#define DC_GPIO_DDC2_EN__DC_GPIO_DDC2CLK_EN_MASK 0x00000001L
+#define DC_GPIO_DDC2_EN__DC_GPIO_DDC2DATA_EN_MASK 0x00000100L
+//DC_GPIO_DDC2_Y
+#define DC_GPIO_DDC2_Y__DC_GPIO_DDC2CLK_Y__SHIFT 0x0
+#define DC_GPIO_DDC2_Y__DC_GPIO_DDC2DATA_Y__SHIFT 0x8
+#define DC_GPIO_DDC2_Y__DC_GPIO_DDC2CLK_Y_MASK 0x00000001L
+#define DC_GPIO_DDC2_Y__DC_GPIO_DDC2DATA_Y_MASK 0x00000100L
+//DC_GPIO_DDC3_MASK
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_MASK__SHIFT 0x0
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_PD_EN__SHIFT 0x4
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_RECV__SHIFT 0x6
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_MASK__SHIFT 0x8
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_PD_EN__SHIFT 0xc
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_RECV__SHIFT 0xe
+#define DC_GPIO_DDC3_MASK__AUX_PAD3_MODE__SHIFT 0x10
+#define DC_GPIO_DDC3_MASK__AUX3_POL__SHIFT 0x14
+#define DC_GPIO_DDC3_MASK__ALLOW_HW_DDC3_PD_EN__SHIFT 0x16
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_STR__SHIFT 0x18
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_STR__SHIFT 0x1c
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_MASK_MASK 0x00000001L
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_PD_EN_MASK 0x00000010L
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_RECV_MASK 0x00000040L
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_MASK_MASK 0x00000100L
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_PD_EN_MASK 0x00001000L
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_RECV_MASK 0x00004000L
+#define DC_GPIO_DDC3_MASK__AUX_PAD3_MODE_MASK 0x00010000L
+#define DC_GPIO_DDC3_MASK__AUX3_POL_MASK 0x00100000L
+#define DC_GPIO_DDC3_MASK__ALLOW_HW_DDC3_PD_EN_MASK 0x00400000L
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_STR_MASK 0x0F000000L
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_STR_MASK 0xF0000000L
+//DC_GPIO_DDC3_A
+#define DC_GPIO_DDC3_A__DC_GPIO_DDC3CLK_A__SHIFT 0x0
+#define DC_GPIO_DDC3_A__DC_GPIO_DDC3DATA_A__SHIFT 0x8
+#define DC_GPIO_DDC3_A__DC_GPIO_DDC3CLK_A_MASK 0x00000001L
+#define DC_GPIO_DDC3_A__DC_GPIO_DDC3DATA_A_MASK 0x00000100L
+//DC_GPIO_DDC3_EN
+#define DC_GPIO_DDC3_EN__DC_GPIO_DDC3CLK_EN__SHIFT 0x0
+#define DC_GPIO_DDC3_EN__DC_GPIO_DDC3DATA_EN__SHIFT 0x8
+#define DC_GPIO_DDC3_EN__DC_GPIO_DDC3CLK_EN_MASK 0x00000001L
+#define DC_GPIO_DDC3_EN__DC_GPIO_DDC3DATA_EN_MASK 0x00000100L
+//DC_GPIO_DDC3_Y
+#define DC_GPIO_DDC3_Y__DC_GPIO_DDC3CLK_Y__SHIFT 0x0
+#define DC_GPIO_DDC3_Y__DC_GPIO_DDC3DATA_Y__SHIFT 0x8
+#define DC_GPIO_DDC3_Y__DC_GPIO_DDC3CLK_Y_MASK 0x00000001L
+#define DC_GPIO_DDC3_Y__DC_GPIO_DDC3DATA_Y_MASK 0x00000100L
+//DC_GPIO_DDC4_MASK
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_MASK__SHIFT 0x0
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_PD_EN__SHIFT 0x4
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_RECV__SHIFT 0x6
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_MASK__SHIFT 0x8
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_PD_EN__SHIFT 0xc
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_RECV__SHIFT 0xe
+#define DC_GPIO_DDC4_MASK__AUX_PAD4_MODE__SHIFT 0x10
+#define DC_GPIO_DDC4_MASK__AUX4_POL__SHIFT 0x14
+#define DC_GPIO_DDC4_MASK__ALLOW_HW_DDC4_PD_EN__SHIFT 0x16
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_STR__SHIFT 0x18
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_STR__SHIFT 0x1c
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_MASK_MASK 0x00000001L
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_PD_EN_MASK 0x00000010L
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_RECV_MASK 0x00000040L
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_MASK_MASK 0x00000100L
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_PD_EN_MASK 0x00001000L
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_RECV_MASK 0x00004000L
+#define DC_GPIO_DDC4_MASK__AUX_PAD4_MODE_MASK 0x00010000L
+#define DC_GPIO_DDC4_MASK__AUX4_POL_MASK 0x00100000L
+#define DC_GPIO_DDC4_MASK__ALLOW_HW_DDC4_PD_EN_MASK 0x00400000L
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_STR_MASK 0x0F000000L
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_STR_MASK 0xF0000000L
+//DC_GPIO_DDC4_A
+#define DC_GPIO_DDC4_A__DC_GPIO_DDC4CLK_A__SHIFT 0x0
+#define DC_GPIO_DDC4_A__DC_GPIO_DDC4DATA_A__SHIFT 0x8
+#define DC_GPIO_DDC4_A__DC_GPIO_DDC4CLK_A_MASK 0x00000001L
+#define DC_GPIO_DDC4_A__DC_GPIO_DDC4DATA_A_MASK 0x00000100L
+//DC_GPIO_DDC4_EN
+#define DC_GPIO_DDC4_EN__DC_GPIO_DDC4CLK_EN__SHIFT 0x0
+#define DC_GPIO_DDC4_EN__DC_GPIO_DDC4DATA_EN__SHIFT 0x8
+#define DC_GPIO_DDC4_EN__DC_GPIO_DDC4CLK_EN_MASK 0x00000001L
+#define DC_GPIO_DDC4_EN__DC_GPIO_DDC4DATA_EN_MASK 0x00000100L
+//DC_GPIO_DDC4_Y
+#define DC_GPIO_DDC4_Y__DC_GPIO_DDC4CLK_Y__SHIFT 0x0
+#define DC_GPIO_DDC4_Y__DC_GPIO_DDC4DATA_Y__SHIFT 0x8
+#define DC_GPIO_DDC4_Y__DC_GPIO_DDC4CLK_Y_MASK 0x00000001L
+#define DC_GPIO_DDC4_Y__DC_GPIO_DDC4DATA_Y_MASK 0x00000100L
+//DC_GPIO_DDC5_MASK
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_MASK__SHIFT 0x0
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_PD_EN__SHIFT 0x4
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_RECV__SHIFT 0x6
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_MASK__SHIFT 0x8
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_PD_EN__SHIFT 0xc
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_RECV__SHIFT 0xe
+#define DC_GPIO_DDC5_MASK__AUX_PAD5_MODE__SHIFT 0x10
+#define DC_GPIO_DDC5_MASK__AUX5_POL__SHIFT 0x14
+#define DC_GPIO_DDC5_MASK__ALLOW_HW_DDC5_PD_EN__SHIFT 0x16
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_STR__SHIFT 0x18
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_STR__SHIFT 0x1c
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_MASK_MASK 0x00000001L
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_PD_EN_MASK 0x00000010L
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_RECV_MASK 0x00000040L
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_MASK_MASK 0x00000100L
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_PD_EN_MASK 0x00001000L
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_RECV_MASK 0x00004000L
+#define DC_GPIO_DDC5_MASK__AUX_PAD5_MODE_MASK 0x00010000L
+#define DC_GPIO_DDC5_MASK__AUX5_POL_MASK 0x00100000L
+#define DC_GPIO_DDC5_MASK__ALLOW_HW_DDC5_PD_EN_MASK 0x00400000L
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_STR_MASK 0x0F000000L
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_STR_MASK 0xF0000000L
+//DC_GPIO_DDC5_A
+#define DC_GPIO_DDC5_A__DC_GPIO_DDC5CLK_A__SHIFT 0x0
+#define DC_GPIO_DDC5_A__DC_GPIO_DDC5DATA_A__SHIFT 0x8
+#define DC_GPIO_DDC5_A__DC_GPIO_DDC5CLK_A_MASK 0x00000001L
+#define DC_GPIO_DDC5_A__DC_GPIO_DDC5DATA_A_MASK 0x00000100L
+//DC_GPIO_DDC5_EN
+#define DC_GPIO_DDC5_EN__DC_GPIO_DDC5CLK_EN__SHIFT 0x0
+#define DC_GPIO_DDC5_EN__DC_GPIO_DDC5DATA_EN__SHIFT 0x8
+#define DC_GPIO_DDC5_EN__DC_GPIO_DDC5CLK_EN_MASK 0x00000001L
+#define DC_GPIO_DDC5_EN__DC_GPIO_DDC5DATA_EN_MASK 0x00000100L
+//DC_GPIO_DDC5_Y
+#define DC_GPIO_DDC5_Y__DC_GPIO_DDC5CLK_Y__SHIFT 0x0
+#define DC_GPIO_DDC5_Y__DC_GPIO_DDC5DATA_Y__SHIFT 0x8
+#define DC_GPIO_DDC5_Y__DC_GPIO_DDC5CLK_Y_MASK 0x00000001L
+#define DC_GPIO_DDC5_Y__DC_GPIO_DDC5DATA_Y_MASK 0x00000100L
+//DC_GPIO_DDCVGA_MASK
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGACLK_MASK__SHIFT 0x0
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGACLK_RECV__SHIFT 0x6
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_MASK__SHIFT 0x8
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_PD_EN__SHIFT 0xc
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_RECV__SHIFT 0xe
+#define DC_GPIO_DDCVGA_MASK__AUX_PADVGA_MODE__SHIFT 0x10
+#define DC_GPIO_DDCVGA_MASK__AUXVGA_POL__SHIFT 0x14
+#define DC_GPIO_DDCVGA_MASK__ALLOW_HW_DDCVGA_PD_EN__SHIFT 0x16
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGACLK_STR__SHIFT 0x18
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_STR__SHIFT 0x1c
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGACLK_MASK_MASK 0x00000001L
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGACLK_RECV_MASK 0x00000040L
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_MASK_MASK 0x00000100L
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_PD_EN_MASK 0x00001000L
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_RECV_MASK 0x00004000L
+#define DC_GPIO_DDCVGA_MASK__AUX_PADVGA_MODE_MASK 0x00010000L
+#define DC_GPIO_DDCVGA_MASK__AUXVGA_POL_MASK 0x00100000L
+#define DC_GPIO_DDCVGA_MASK__ALLOW_HW_DDCVGA_PD_EN_MASK 0x00400000L
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGACLK_STR_MASK 0x0F000000L
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_STR_MASK 0xF0000000L
+//DC_GPIO_DDCVGA_A
+#define DC_GPIO_DDCVGA_A__DC_GPIO_DDCVGACLK_A__SHIFT 0x0
+#define DC_GPIO_DDCVGA_A__DC_GPIO_DDCVGADATA_A__SHIFT 0x8
+#define DC_GPIO_DDCVGA_A__DC_GPIO_DDCVGACLK_A_MASK 0x00000001L
+#define DC_GPIO_DDCVGA_A__DC_GPIO_DDCVGADATA_A_MASK 0x00000100L
+//DC_GPIO_DDCVGA_EN
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGACLK_EN__SHIFT 0x0
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_EN__SHIFT 0x8
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGACLK_EN_MASK 0x00000001L
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_EN_MASK 0x00000100L
+//DC_GPIO_DDCVGA_Y
+#define DC_GPIO_DDCVGA_Y__DC_GPIO_DDCVGACLK_Y__SHIFT 0x0
+#define DC_GPIO_DDCVGA_Y__DC_GPIO_DDCVGADATA_Y__SHIFT 0x8
+#define DC_GPIO_DDCVGA_Y__DC_GPIO_DDCVGACLK_Y_MASK 0x00000001L
+#define DC_GPIO_DDCVGA_Y__DC_GPIO_DDCVGADATA_Y_MASK 0x00000100L
+//DC_GPIO_GENLK_MASK
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_MASK__SHIFT 0x0
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_PD_DIS__SHIFT 0x1
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_PU_EN__SHIFT 0x3
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_RECV__SHIFT 0x4
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_MASK__SHIFT 0x8
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_PD_DIS__SHIFT 0x9
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_PU_EN__SHIFT 0xb
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_RECV__SHIFT 0xc
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_MASK__SHIFT 0x10
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_PD_DIS__SHIFT 0x11
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_PU_EN__SHIFT 0x13
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_RECV__SHIFT 0x14
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_MASK__SHIFT 0x18
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_PD_DIS__SHIFT 0x19
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_PU_EN__SHIFT 0x1b
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_RECV__SHIFT 0x1c
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_MASK_MASK 0x00000001L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_PD_DIS_MASK 0x00000002L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_PU_EN_MASK 0x00000008L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_RECV_MASK 0x00000030L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_MASK_MASK 0x00000100L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_PD_DIS_MASK 0x00000200L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_PU_EN_MASK 0x00000800L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_RECV_MASK 0x00003000L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_MASK_MASK 0x00010000L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_PD_DIS_MASK 0x00020000L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_PU_EN_MASK 0x00080000L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_RECV_MASK 0x00300000L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_MASK_MASK 0x01000000L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_PD_DIS_MASK 0x02000000L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_PU_EN_MASK 0x08000000L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_RECV_MASK 0x30000000L
+//DC_GPIO_GENLK_A
+#define DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A__SHIFT 0x0
+#define DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A__SHIFT 0x8
+#define DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A__SHIFT 0x10
+#define DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A__SHIFT 0x18
+#define DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A_MASK 0x00000001L
+#define DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A_MASK 0x00000100L
+#define DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A_MASK 0x00010000L
+#define DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A_MASK 0x01000000L
+//DC_GPIO_GENLK_EN
+#define DC_GPIO_GENLK_EN__DC_GPIO_GENLK_CLK_EN__SHIFT 0x0
+#define DC_GPIO_GENLK_EN__DC_GPIO_GENLK_VSYNC_EN__SHIFT 0x8
+#define DC_GPIO_GENLK_EN__DC_GPIO_SWAPLOCK_A_EN__SHIFT 0x10
+#define DC_GPIO_GENLK_EN__DC_GPIO_SWAPLOCK_B_EN__SHIFT 0x18
+#define DC_GPIO_GENLK_EN__DC_GPIO_GENLK_CLK_EN_MASK 0x00000001L
+#define DC_GPIO_GENLK_EN__DC_GPIO_GENLK_VSYNC_EN_MASK 0x00000100L
+#define DC_GPIO_GENLK_EN__DC_GPIO_SWAPLOCK_A_EN_MASK 0x00010000L
+#define DC_GPIO_GENLK_EN__DC_GPIO_SWAPLOCK_B_EN_MASK 0x01000000L
+//DC_GPIO_GENLK_Y
+#define DC_GPIO_GENLK_Y__DC_GPIO_GENLK_CLK_Y__SHIFT 0x0
+#define DC_GPIO_GENLK_Y__DC_GPIO_GENLK_VSYNC_Y__SHIFT 0x8
+#define DC_GPIO_GENLK_Y__DC_GPIO_SWAPLOCK_A_Y__SHIFT 0x10
+#define DC_GPIO_GENLK_Y__DC_GPIO_SWAPLOCK_B_Y__SHIFT 0x18
+#define DC_GPIO_GENLK_Y__DC_GPIO_GENLK_CLK_Y_MASK 0x00000001L
+#define DC_GPIO_GENLK_Y__DC_GPIO_GENLK_VSYNC_Y_MASK 0x00000100L
+#define DC_GPIO_GENLK_Y__DC_GPIO_SWAPLOCK_A_Y_MASK 0x00010000L
+#define DC_GPIO_GENLK_Y__DC_GPIO_SWAPLOCK_B_Y_MASK 0x01000000L
+//DC_GPIO_HPD_MASK
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_MASK__SHIFT 0x0
+#define DC_GPIO_HPD_MASK__DC_GPIO_RX_HPD_MASK__SHIFT 0x1
+#define DC_GPIO_HPD_MASK__DC_GPIO_RX_HPD_PD_DIS__SHIFT 0x2
+#define DC_GPIO_HPD_MASK__DC_GPIO_RX_HPD_RX_SEL__SHIFT 0x3
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_PD_DIS__SHIFT 0x4
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_RECV__SHIFT 0x6
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_MASK__SHIFT 0x8
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_PD_DIS__SHIFT 0x9
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_RECV__SHIFT 0xa
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_MASK__SHIFT 0x10
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_PD_DIS__SHIFT 0x11
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_RECV__SHIFT 0x12
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_MASK__SHIFT 0x14
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_PD_DIS__SHIFT 0x15
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_RECV__SHIFT 0x16
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_MASK__SHIFT 0x18
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_PD_DIS__SHIFT 0x19
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_RECV__SHIFT 0x1a
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_MASK__SHIFT 0x1c
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_PD_DIS__SHIFT 0x1d
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_RECV__SHIFT 0x1e
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_MASK_MASK 0x00000001L
+#define DC_GPIO_HPD_MASK__DC_GPIO_RX_HPD_MASK_MASK 0x00000002L
+#define DC_GPIO_HPD_MASK__DC_GPIO_RX_HPD_PD_DIS_MASK 0x00000004L
+#define DC_GPIO_HPD_MASK__DC_GPIO_RX_HPD_RX_SEL_MASK 0x00000008L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_PD_DIS_MASK 0x00000010L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_RECV_MASK 0x000000C0L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_MASK_MASK 0x00000100L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_PD_DIS_MASK 0x00000200L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_RECV_MASK 0x00000C00L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_MASK_MASK 0x00010000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_PD_DIS_MASK 0x00020000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_RECV_MASK 0x000C0000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_MASK_MASK 0x00100000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_PD_DIS_MASK 0x00200000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_RECV_MASK 0x00C00000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_MASK_MASK 0x01000000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_PD_DIS_MASK 0x02000000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_RECV_MASK 0x0C000000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_MASK_MASK 0x10000000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_PD_DIS_MASK 0x20000000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_RECV_MASK 0xC0000000L
+//DC_GPIO_HPD_A
+#define DC_GPIO_HPD_A__DC_GPIO_HPD1_A__SHIFT 0x0
+#define DC_GPIO_HPD_A__DC_GPIO_HPD2_A__SHIFT 0x8
+#define DC_GPIO_HPD_A__DC_GPIO_HPD3_A__SHIFT 0x10
+#define DC_GPIO_HPD_A__DC_GPIO_HPD4_A__SHIFT 0x18
+#define DC_GPIO_HPD_A__DC_GPIO_HPD5_A__SHIFT 0x1a
+#define DC_GPIO_HPD_A__DC_GPIO_HPD6_A__SHIFT 0x1c
+#define DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK 0x00000001L
+#define DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK 0x00000100L
+#define DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK 0x00010000L
+#define DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK 0x01000000L
+#define DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK 0x04000000L
+#define DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK 0x10000000L
+//DC_GPIO_HPD_EN
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD1_EN__SHIFT 0x0
+#define DC_GPIO_HPD_EN__HPD1_SCHMEN_PI__SHIFT 0x1
+#define DC_GPIO_HPD_EN__HPD1_SLEWNCORE__SHIFT 0x2
+#define DC_GPIO_HPD_EN__RX_HPD_SCHMEN_PI__SHIFT 0x3
+#define DC_GPIO_HPD_EN__RX_HPD_SLEWNCORE__SHIFT 0x4
+#define DC_GPIO_HPD_EN__HPD12_SPARE0__SHIFT 0x5
+#define DC_GPIO_HPD_EN__HPD1_SEL0__SHIFT 0x6
+#define DC_GPIO_HPD_EN__RX_HPD_SEL0__SHIFT 0x7
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD2_EN__SHIFT 0x8
+#define DC_GPIO_HPD_EN__HPD2_SCHMEN_PI__SHIFT 0x9
+#define DC_GPIO_HPD_EN__HPD12_SPARE1__SHIFT 0xa
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD3_EN__SHIFT 0x10
+#define DC_GPIO_HPD_EN__HPD3_SCHMEN_PI__SHIFT 0x11
+#define DC_GPIO_HPD_EN__HPD34_SPARE0__SHIFT 0x12
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD4_EN__SHIFT 0x14
+#define DC_GPIO_HPD_EN__HPD4_SCHMEN_PI__SHIFT 0x15
+#define DC_GPIO_HPD_EN__HPD34_SPARE1__SHIFT 0x16
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD5_EN__SHIFT 0x18
+#define DC_GPIO_HPD_EN__HPD5_SCHMEN_PI__SHIFT 0x19
+#define DC_GPIO_HPD_EN__HPD56_SPARE0__SHIFT 0x1a
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD6_EN__SHIFT 0x1c
+#define DC_GPIO_HPD_EN__HPD6_SCHMEN_PI__SHIFT 0x1d
+#define DC_GPIO_HPD_EN__HPD56_SPARE1__SHIFT 0x1e
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD1_EN_MASK 0x00000001L
+#define DC_GPIO_HPD_EN__HPD1_SCHMEN_PI_MASK 0x00000002L
+#define DC_GPIO_HPD_EN__HPD1_SLEWNCORE_MASK 0x00000004L
+#define DC_GPIO_HPD_EN__RX_HPD_SCHMEN_PI_MASK 0x00000008L
+#define DC_GPIO_HPD_EN__RX_HPD_SLEWNCORE_MASK 0x00000010L
+#define DC_GPIO_HPD_EN__HPD12_SPARE0_MASK 0x00000020L
+#define DC_GPIO_HPD_EN__HPD1_SEL0_MASK 0x00000040L
+#define DC_GPIO_HPD_EN__RX_HPD_SEL0_MASK 0x00000080L
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD2_EN_MASK 0x00000100L
+#define DC_GPIO_HPD_EN__HPD2_SCHMEN_PI_MASK 0x00000200L
+#define DC_GPIO_HPD_EN__HPD12_SPARE1_MASK 0x00000400L
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD3_EN_MASK 0x00010000L
+#define DC_GPIO_HPD_EN__HPD3_SCHMEN_PI_MASK 0x00020000L
+#define DC_GPIO_HPD_EN__HPD34_SPARE0_MASK 0x00040000L
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD4_EN_MASK 0x00100000L
+#define DC_GPIO_HPD_EN__HPD4_SCHMEN_PI_MASK 0x00200000L
+#define DC_GPIO_HPD_EN__HPD34_SPARE1_MASK 0x00400000L
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD5_EN_MASK 0x01000000L
+#define DC_GPIO_HPD_EN__HPD5_SCHMEN_PI_MASK 0x02000000L
+#define DC_GPIO_HPD_EN__HPD56_SPARE0_MASK 0x04000000L
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD6_EN_MASK 0x10000000L
+#define DC_GPIO_HPD_EN__HPD6_SCHMEN_PI_MASK 0x20000000L
+#define DC_GPIO_HPD_EN__HPD56_SPARE1_MASK 0x40000000L
+//DC_GPIO_HPD_Y
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD1_Y__SHIFT 0x0
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD2_Y__SHIFT 0x8
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD3_Y__SHIFT 0x10
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD4_Y__SHIFT 0x18
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD5_Y__SHIFT 0x1a
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD6_Y__SHIFT 0x1c
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD1_Y_MASK 0x00000001L
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD2_Y_MASK 0x00000100L
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD3_Y_MASK 0x00010000L
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD4_Y_MASK 0x01000000L
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD5_Y_MASK 0x04000000L
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD6_Y_MASK 0x10000000L
+//DC_GPIO_PWRSEQ_MASK
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_MASK__SHIFT 0x0
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_PD_DIS__SHIFT 0x4
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_RECV__SHIFT 0x6
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_MASK__SHIFT 0x8
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_PD_DIS__SHIFT 0xc
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_RECV__SHIFT 0xe
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_ENA_BL_MASK__SHIFT 0x10
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_ENA_BL_PD_DIS__SHIFT 0x14
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_ENA_BL_RECV__SHIFT 0x16
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_VSYNC_IN_MASK__SHIFT 0x18
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_VSYNC_IN_PD_DIS__SHIFT 0x19
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_VSYNC_IN_RECV__SHIFT 0x1a
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_HSYNC_IN_MASK__SHIFT 0x1c
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_HSYNC_IN_PD_DIS__SHIFT 0x1d
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_HSYNC_IN_RECV__SHIFT 0x1e
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_MASK_MASK 0x00000001L
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_PD_DIS_MASK 0x00000010L
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_RECV_MASK 0x000000C0L
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_MASK_MASK 0x00000100L
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_PD_DIS_MASK 0x00001000L
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_RECV_MASK 0x0000C000L
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_ENA_BL_MASK_MASK 0x00010000L
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_ENA_BL_PD_DIS_MASK 0x00100000L
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_ENA_BL_RECV_MASK 0x00C00000L
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_VSYNC_IN_MASK_MASK 0x01000000L
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_VSYNC_IN_PD_DIS_MASK 0x02000000L
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_VSYNC_IN_RECV_MASK 0x04000000L
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_HSYNC_IN_MASK_MASK 0x10000000L
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_HSYNC_IN_PD_DIS_MASK 0x20000000L
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_HSYNC_IN_RECV_MASK 0x40000000L
+//DC_GPIO_PWRSEQ_A
+#define DC_GPIO_PWRSEQ_A__DC_GPIO_BLON_A__SHIFT 0x0
+#define DC_GPIO_PWRSEQ_A__DC_GPIO_DIGON_A__SHIFT 0x8
+#define DC_GPIO_PWRSEQ_A__DC_GPIO_ENA_BL_A__SHIFT 0x10
+#define DC_GPIO_PWRSEQ_A__DC_GPIO_VSYNC_IN_A__SHIFT 0x18
+#define DC_GPIO_PWRSEQ_A__DC_GPIO_HSYNC_IN_A__SHIFT 0x1f
+#define DC_GPIO_PWRSEQ_A__DC_GPIO_BLON_A_MASK 0x00000001L
+#define DC_GPIO_PWRSEQ_A__DC_GPIO_DIGON_A_MASK 0x00000100L
+#define DC_GPIO_PWRSEQ_A__DC_GPIO_ENA_BL_A_MASK 0x00010000L
+#define DC_GPIO_PWRSEQ_A__DC_GPIO_VSYNC_IN_A_MASK 0x01000000L
+#define DC_GPIO_PWRSEQ_A__DC_GPIO_HSYNC_IN_A_MASK 0x80000000L
+//DC_GPIO_PWRSEQ_EN
+#define DC_GPIO_PWRSEQ_EN__DC_GPIO_BLON_EN__SHIFT 0x0
+#define DC_GPIO_PWRSEQ_EN__DC_GPIO_VARY_BL_GENERICA_EN__SHIFT 0x1
+#define DC_GPIO_PWRSEQ_EN__DC_GPIO_DIGON_EN__SHIFT 0x8
+#define DC_GPIO_PWRSEQ_EN__DC_GPIO_ENA_BL_EN__SHIFT 0x10
+#define DC_GPIO_PWRSEQ_EN__DC_GPIO_VSYNC_IN_EN__SHIFT 0x18
+#define DC_GPIO_PWRSEQ_EN__DC_GPIO_HSYNC_IN_EN__SHIFT 0x1f
+#define DC_GPIO_PWRSEQ_EN__DC_GPIO_BLON_EN_MASK 0x00000001L
+#define DC_GPIO_PWRSEQ_EN__DC_GPIO_VARY_BL_GENERICA_EN_MASK 0x00000002L
+#define DC_GPIO_PWRSEQ_EN__DC_GPIO_DIGON_EN_MASK 0x00000100L
+#define DC_GPIO_PWRSEQ_EN__DC_GPIO_ENA_BL_EN_MASK 0x00010000L
+#define DC_GPIO_PWRSEQ_EN__DC_GPIO_VSYNC_IN_EN_MASK 0x01000000L
+#define DC_GPIO_PWRSEQ_EN__DC_GPIO_HSYNC_IN_EN_MASK 0x80000000L
+//DC_GPIO_PWRSEQ_Y
+#define DC_GPIO_PWRSEQ_Y__DC_GPIO_BLON_Y__SHIFT 0x0
+#define DC_GPIO_PWRSEQ_Y__DC_GPIO_DIGON_Y__SHIFT 0x8
+#define DC_GPIO_PWRSEQ_Y__DC_GPIO_ENA_BL_Y__SHIFT 0x10
+#define DC_GPIO_PWRSEQ_Y__DC_GPIO_VSYNC_IN__SHIFT 0x18
+#define DC_GPIO_PWRSEQ_Y__DC_GPIO_HSYNC_IN__SHIFT 0x1f
+#define DC_GPIO_PWRSEQ_Y__DC_GPIO_BLON_Y_MASK 0x00000001L
+#define DC_GPIO_PWRSEQ_Y__DC_GPIO_DIGON_Y_MASK 0x00000100L
+#define DC_GPIO_PWRSEQ_Y__DC_GPIO_ENA_BL_Y_MASK 0x00010000L
+#define DC_GPIO_PWRSEQ_Y__DC_GPIO_VSYNC_IN_MASK 0x01000000L
+#define DC_GPIO_PWRSEQ_Y__DC_GPIO_HSYNC_IN_MASK 0x80000000L
+//DC_GPIO_PAD_STRENGTH_1
+#define DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SN__SHIFT 0x0
+#define DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP__SHIFT 0x4
+#define DC_GPIO_PAD_STRENGTH_1__RX_HPD_STRENGTH_SN__SHIFT 0x8
+#define DC_GPIO_PAD_STRENGTH_1__RX_HPD_STRENGTH_SP__SHIFT 0xc
+#define DC_GPIO_PAD_STRENGTH_1__TX_HPD_STRENGTH_SN__SHIFT 0x10
+#define DC_GPIO_PAD_STRENGTH_1__TX_HPD_STRENGTH_SP__SHIFT 0x14
+#define DC_GPIO_PAD_STRENGTH_1__SYNC_STRENGTH_SN__SHIFT 0x18
+#define DC_GPIO_PAD_STRENGTH_1__SYNC_STRENGTH_SP__SHIFT 0x1c
+#define DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SN_MASK 0x0000000FL
+#define DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP_MASK 0x000000F0L
+#define DC_GPIO_PAD_STRENGTH_1__RX_HPD_STRENGTH_SN_MASK 0x00000F00L
+#define DC_GPIO_PAD_STRENGTH_1__RX_HPD_STRENGTH_SP_MASK 0x0000F000L
+#define DC_GPIO_PAD_STRENGTH_1__TX_HPD_STRENGTH_SN_MASK 0x000F0000L
+#define DC_GPIO_PAD_STRENGTH_1__TX_HPD_STRENGTH_SP_MASK 0x00F00000L
+#define DC_GPIO_PAD_STRENGTH_1__SYNC_STRENGTH_SN_MASK 0x0F000000L
+#define DC_GPIO_PAD_STRENGTH_1__SYNC_STRENGTH_SP_MASK 0xF0000000L
+//DC_GPIO_PAD_STRENGTH_2
+#define DC_GPIO_PAD_STRENGTH_2__STRENGTH_SN__SHIFT 0x0
+#define DC_GPIO_PAD_STRENGTH_2__STRENGTH_SP__SHIFT 0x4
+#define DC_GPIO_PAD_STRENGTH_2__EXT_RESET_DRVSTRENGTH__SHIFT 0x8
+#define DC_GPIO_PAD_STRENGTH_2__REF_27_DRVSTRENGTH__SHIFT 0xc
+#define DC_GPIO_PAD_STRENGTH_2__PWRSEQ_STRENGTH_SN__SHIFT 0x10
+#define DC_GPIO_PAD_STRENGTH_2__PWRSEQ_STRENGTH_SP__SHIFT 0x14
+#define DC_GPIO_PAD_STRENGTH_2__REF_27_SRC_SEL__SHIFT 0x1e
+#define DC_GPIO_PAD_STRENGTH_2__STRENGTH_SN_MASK 0x0000000FL
+#define DC_GPIO_PAD_STRENGTH_2__STRENGTH_SP_MASK 0x000000F0L
+#define DC_GPIO_PAD_STRENGTH_2__EXT_RESET_DRVSTRENGTH_MASK 0x00000700L
+#define DC_GPIO_PAD_STRENGTH_2__REF_27_DRVSTRENGTH_MASK 0x00007000L
+#define DC_GPIO_PAD_STRENGTH_2__PWRSEQ_STRENGTH_SN_MASK 0x000F0000L
+#define DC_GPIO_PAD_STRENGTH_2__PWRSEQ_STRENGTH_SP_MASK 0x00F00000L
+#define DC_GPIO_PAD_STRENGTH_2__REF_27_SRC_SEL_MASK 0xC0000000L
+//PHY_AUX_CNTL
+#define PHY_AUX_CNTL__AUXSLAVE_PAD_SLEWN__SHIFT 0x0
+#define PHY_AUX_CNTL__AUXSLAVE_PAD_WAKE__SHIFT 0x1
+#define PHY_AUX_CNTL__AUXSLAVE_PAD_RXSEL__SHIFT 0x2
+#define PHY_AUX_CNTL__AUXSLAVE_PAD_MODE__SHIFT 0x3
+#define PHY_AUX_CNTL__DDCSLAVE_DATA_PD_EN__SHIFT 0x4
+#define PHY_AUX_CNTL__DDCSLAVE_DATA_EN__SHIFT 0x5
+#define PHY_AUX_CNTL__DDCSLAVE_CLK_PD_EN__SHIFT 0x6
+#define PHY_AUX_CNTL__DDCSLAVE_CLK_EN__SHIFT 0x7
+#define PHY_AUX_CNTL__AUXSLAVE_CLK_PD_EN__SHIFT 0x8
+#define PHY_AUX_CNTL__AUX_PAD_WAKE__SHIFT 0x9
+#define PHY_AUX_CNTL__AUX1_PAD_RXSEL__SHIFT 0xa
+#define PHY_AUX_CNTL__AUX2_PAD_RXSEL__SHIFT 0xc
+#define PHY_AUX_CNTL__AUX3_PAD_RXSEL__SHIFT 0xe
+#define PHY_AUX_CNTL__AUX4_PAD_RXSEL__SHIFT 0x10
+#define PHY_AUX_CNTL__AUX5_PAD_RXSEL__SHIFT 0x12
+#define PHY_AUX_CNTL__AUX6_PAD_RXSEL__SHIFT 0x14
+#define PHY_AUX_CNTL__AUX_CAL_RESBIASEN__SHIFT 0x17
+#define PHY_AUX_CNTL__AUX_CAL_SPARE__SHIFT 0x18
+#define PHY_AUX_CNTL__AUX_CAL_BIASENTST__SHIFT 0x1c
+#define PHY_AUX_CNTL__AUXSLAVE_PAD_SLEWN_MASK 0x00000001L
+#define PHY_AUX_CNTL__AUXSLAVE_PAD_WAKE_MASK 0x00000002L
+#define PHY_AUX_CNTL__AUXSLAVE_PAD_RXSEL_MASK 0x00000004L
+#define PHY_AUX_CNTL__AUXSLAVE_PAD_MODE_MASK 0x00000008L
+#define PHY_AUX_CNTL__DDCSLAVE_DATA_PD_EN_MASK 0x00000010L
+#define PHY_AUX_CNTL__DDCSLAVE_DATA_EN_MASK 0x00000020L
+#define PHY_AUX_CNTL__DDCSLAVE_CLK_PD_EN_MASK 0x00000040L
+#define PHY_AUX_CNTL__DDCSLAVE_CLK_EN_MASK 0x00000080L
+#define PHY_AUX_CNTL__AUXSLAVE_CLK_PD_EN_MASK 0x00000100L
+#define PHY_AUX_CNTL__AUX_PAD_WAKE_MASK 0x00000200L
+#define PHY_AUX_CNTL__AUX1_PAD_RXSEL_MASK 0x00000C00L
+#define PHY_AUX_CNTL__AUX2_PAD_RXSEL_MASK 0x00003000L
+#define PHY_AUX_CNTL__AUX3_PAD_RXSEL_MASK 0x0000C000L
+#define PHY_AUX_CNTL__AUX4_PAD_RXSEL_MASK 0x00030000L
+#define PHY_AUX_CNTL__AUX5_PAD_RXSEL_MASK 0x000C0000L
+#define PHY_AUX_CNTL__AUX6_PAD_RXSEL_MASK 0x00300000L
+#define PHY_AUX_CNTL__AUX_CAL_RESBIASEN_MASK 0x00800000L
+#define PHY_AUX_CNTL__AUX_CAL_SPARE_MASK 0x03000000L
+#define PHY_AUX_CNTL__AUX_CAL_BIASENTST_MASK 0x70000000L
+//DC_GPIO_TX12_EN
+#define DC_GPIO_TX12_EN__DC_GPIO_BLON_TX12_EN__SHIFT 0x0
+#define DC_GPIO_TX12_EN__DC_GPIO_DIGON_TX12_EN__SHIFT 0x1
+#define DC_GPIO_TX12_EN__DC_GPIO_ENA_BL_TX12_EN__SHIFT 0x2
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICA_TX12_EN__SHIFT 0x3
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICB_TX12_EN__SHIFT 0x4
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICC_TX12_EN__SHIFT 0x5
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICD_TX12_EN__SHIFT 0x6
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICE_TX12_EN__SHIFT 0x7
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICF_TX12_EN__SHIFT 0x8
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICG_TX12_EN__SHIFT 0x9
+#define DC_GPIO_TX12_EN__DC_GPIO_BLON_TX12_EN_MASK 0x00000001L
+#define DC_GPIO_TX12_EN__DC_GPIO_DIGON_TX12_EN_MASK 0x00000002L
+#define DC_GPIO_TX12_EN__DC_GPIO_ENA_BL_TX12_EN_MASK 0x00000004L
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICA_TX12_EN_MASK 0x00000008L
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICB_TX12_EN_MASK 0x00000010L
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICC_TX12_EN_MASK 0x00000020L
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICD_TX12_EN_MASK 0x00000040L
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICE_TX12_EN_MASK 0x00000080L
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICF_TX12_EN_MASK 0x00000100L
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICG_TX12_EN_MASK 0x00000200L
+//DC_GPIO_AUX_CTRL_0
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX1_FALLSLEWSEL__SHIFT 0x0
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX2_FALLSLEWSEL__SHIFT 0x2
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX3_FALLSLEWSEL__SHIFT 0x4
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX4_FALLSLEWSEL__SHIFT 0x6
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX5_FALLSLEWSEL__SHIFT 0x8
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX6_FALLSLEWSEL__SHIFT 0xa
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_DDCVGA_FALLSLEWSEL__SHIFT 0xc
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX1_SPIKERCEN__SHIFT 0x10
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX2_SPIKERCEN__SHIFT 0x11
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX3_SPIKERCEN__SHIFT 0x12
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX4_SPIKERCEN__SHIFT 0x13
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX5_SPIKERCEN__SHIFT 0x14
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX6_SPIKERCEN__SHIFT 0x15
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_DDCVGA_SPIKERCEN__SHIFT 0x16
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX1_SPIKERCSEL__SHIFT 0x18
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX2_SPIKERCSEL__SHIFT 0x19
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX3_SPIKERCSEL__SHIFT 0x1a
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX4_SPIKERCSEL__SHIFT 0x1b
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX5_SPIKERCSEL__SHIFT 0x1c
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX6_SPIKERCSEL__SHIFT 0x1d
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_DDCVGA_SPIKERCSEL__SHIFT 0x1e
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX1_FALLSLEWSEL_MASK 0x00000003L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX2_FALLSLEWSEL_MASK 0x0000000CL
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX3_FALLSLEWSEL_MASK 0x00000030L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX4_FALLSLEWSEL_MASK 0x000000C0L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX5_FALLSLEWSEL_MASK 0x00000300L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX6_FALLSLEWSEL_MASK 0x00000C00L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_DDCVGA_FALLSLEWSEL_MASK 0x00003000L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX1_SPIKERCEN_MASK 0x00010000L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX2_SPIKERCEN_MASK 0x00020000L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX3_SPIKERCEN_MASK 0x00040000L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX4_SPIKERCEN_MASK 0x00080000L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX5_SPIKERCEN_MASK 0x00100000L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX6_SPIKERCEN_MASK 0x00200000L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_DDCVGA_SPIKERCEN_MASK 0x00400000L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX1_SPIKERCSEL_MASK 0x01000000L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX2_SPIKERCSEL_MASK 0x02000000L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX3_SPIKERCSEL_MASK 0x04000000L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX4_SPIKERCSEL_MASK 0x08000000L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX5_SPIKERCSEL_MASK 0x10000000L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX6_SPIKERCSEL_MASK 0x20000000L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_DDCVGA_SPIKERCSEL_MASK 0x40000000L
+//DC_GPIO_AUX_CTRL_1
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_CSEL_0P9__SHIFT 0x0
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_CSEL_1P1__SHIFT 0x1
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_RSEL_0P9__SHIFT 0x2
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_RSEL_1P1__SHIFT 0x3
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_CSEL_0P9__SHIFT 0x4
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_CSEL_1P1__SHIFT 0x5
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_RSEL_0P9__SHIFT 0x6
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_RSEL_1P1__SHIFT 0x7
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_BIASCRTEN__SHIFT 0x8
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_BIASCRTEN__SHIFT 0x9
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_RESBIASEN__SHIFT 0xa
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_RESBIASEN__SHIFT 0xb
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX1_COMPSEL__SHIFT 0xc
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_DDCVGA_SPARE__SHIFT 0xe
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_DDCVGA_SLEWN__SHIFT 0x12
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_DDCVGA_RXSEL__SHIFT 0x14
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX2_COMPSEL__SHIFT 0x19
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX3_COMPSEL__SHIFT 0x1a
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX4_COMPSEL__SHIFT 0x1b
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX5_COMPSEL__SHIFT 0x1c
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX6_COMPSEL__SHIFT 0x1d
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_DDCVGA_COMPSEL__SHIFT 0x1e
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_CSEL_0P9_MASK 0x00000001L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_CSEL_1P1_MASK 0x00000002L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_RSEL_0P9_MASK 0x00000004L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_RSEL_1P1_MASK 0x00000008L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_CSEL_0P9_MASK 0x00000010L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_CSEL_1P1_MASK 0x00000020L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_RSEL_0P9_MASK 0x00000040L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_RSEL_1P1_MASK 0x00000080L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_BIASCRTEN_MASK 0x00000100L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_BIASCRTEN_MASK 0x00000200L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_RESBIASEN_MASK 0x00000400L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_RESBIASEN_MASK 0x00000800L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX1_COMPSEL_MASK 0x00001000L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_DDCVGA_SPARE_MASK 0x0000C000L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_DDCVGA_SLEWN_MASK 0x00040000L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_DDCVGA_RXSEL_MASK 0x00300000L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX2_COMPSEL_MASK 0x02000000L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX3_COMPSEL_MASK 0x04000000L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX4_COMPSEL_MASK 0x08000000L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX5_COMPSEL_MASK 0x10000000L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX6_COMPSEL_MASK 0x20000000L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_DDCVGA_COMPSEL_MASK 0x40000000L
+//DC_GPIO_AUX_CTRL_2
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_FALLSLEWSEL__SHIFT 0x0
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD34_FALLSLEWSEL__SHIFT 0x2
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD56_FALLSLEWSEL__SHIFT 0x4
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_SPIKERCEN__SHIFT 0x8
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD34_SPIKERCEN__SHIFT 0x9
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD56_SPIKERCEN__SHIFT 0xa
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_SPIKERCSEL__SHIFT 0xc
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD34_SPIKERCSEL__SHIFT 0xd
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD56_SPIKERCSEL__SHIFT 0xe
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_CSEL_0P9__SHIFT 0x10
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_CSEL_1P1__SHIFT 0x11
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_RSEL_0P9__SHIFT 0x12
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_RSEL_1P1__SHIFT 0x13
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_BIASCRTEN__SHIFT 0x14
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_SLEWN__SHIFT 0x18
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD34_SLEWN__SHIFT 0x19
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD56_SLEWN__SHIFT 0x1a
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_RESBIASEN__SHIFT 0x1b
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_COMPSEL__SHIFT 0x1c
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD34_COMPSEL__SHIFT 0x1d
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD56_COMPSEL__SHIFT 0x1e
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_FALLSLEWSEL_MASK 0x00000003L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD34_FALLSLEWSEL_MASK 0x0000000CL
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD56_FALLSLEWSEL_MASK 0x00000030L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_SPIKERCEN_MASK 0x00000100L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD34_SPIKERCEN_MASK 0x00000200L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD56_SPIKERCEN_MASK 0x00000400L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_SPIKERCSEL_MASK 0x00001000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD34_SPIKERCSEL_MASK 0x00002000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD56_SPIKERCSEL_MASK 0x00004000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_CSEL_0P9_MASK 0x00010000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_CSEL_1P1_MASK 0x00020000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_RSEL_0P9_MASK 0x00040000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_RSEL_1P1_MASK 0x00080000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_BIASCRTEN_MASK 0x00100000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_SLEWN_MASK 0x01000000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD34_SLEWN_MASK 0x02000000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD56_SLEWN_MASK 0x04000000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_RESBIASEN_MASK 0x08000000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_COMPSEL_MASK 0x10000000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD34_COMPSEL_MASK 0x20000000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD56_COMPSEL_MASK 0x40000000L
+//DC_GPIO_RXEN
+#define DC_GPIO_RXEN__DC_GPIO_GENERICA_RXEN__SHIFT 0x0
+#define DC_GPIO_RXEN__DC_GPIO_GENERICB_RXEN__SHIFT 0x1
+#define DC_GPIO_RXEN__DC_GPIO_GENERICC_RXEN__SHIFT 0x2
+#define DC_GPIO_RXEN__DC_GPIO_GENERICD_RXEN__SHIFT 0x3
+#define DC_GPIO_RXEN__DC_GPIO_GENERICE_RXEN__SHIFT 0x4
+#define DC_GPIO_RXEN__DC_GPIO_GENERICF_RXEN__SHIFT 0x5
+#define DC_GPIO_RXEN__DC_GPIO_GENERICG_RXEN__SHIFT 0x6
+#define DC_GPIO_RXEN__DC_GPIO_HSYNCA_RXEN__SHIFT 0x8
+#define DC_GPIO_RXEN__DC_GPIO_VSYNCA_RXEN__SHIFT 0x9
+#define DC_GPIO_RXEN__DC_GPIO_GENLK_CLK_RXEN__SHIFT 0xa
+#define DC_GPIO_RXEN__DC_GPIO_GENLK_VSYNC_RXEN__SHIFT 0xb
+#define DC_GPIO_RXEN__DC_GPIO_SWAPLOCK_A_RXEN__SHIFT 0xc
+#define DC_GPIO_RXEN__DC_GPIO_SWAPLOCK_B_RXEN__SHIFT 0xd
+#define DC_GPIO_RXEN__DC_GPIO_HPD1_RXEN__SHIFT 0xe
+#define DC_GPIO_RXEN__DC_GPIO_HPD2_RXEN__SHIFT 0xf
+#define DC_GPIO_RXEN__DC_GPIO_HPD3_RXEN__SHIFT 0x10
+#define DC_GPIO_RXEN__DC_GPIO_HPD4_RXEN__SHIFT 0x11
+#define DC_GPIO_RXEN__DC_GPIO_HPD5_RXEN__SHIFT 0x12
+#define DC_GPIO_RXEN__DC_GPIO_HPD6_RXEN__SHIFT 0x13
+#define DC_GPIO_RXEN__DC_GPIO_BLON_RXEN__SHIFT 0x14
+#define DC_GPIO_RXEN__DC_GPIO_DIGON_RXEN__SHIFT 0x15
+#define DC_GPIO_RXEN__DC_GPIO_ENA_BL_RXEN__SHIFT 0x16
+#define DC_GPIO_RXEN__DC_GPIO_GENERICA_RXEN_MASK 0x00000001L
+#define DC_GPIO_RXEN__DC_GPIO_GENERICB_RXEN_MASK 0x00000002L
+#define DC_GPIO_RXEN__DC_GPIO_GENERICC_RXEN_MASK 0x00000004L
+#define DC_GPIO_RXEN__DC_GPIO_GENERICD_RXEN_MASK 0x00000008L
+#define DC_GPIO_RXEN__DC_GPIO_GENERICE_RXEN_MASK 0x00000010L
+#define DC_GPIO_RXEN__DC_GPIO_GENERICF_RXEN_MASK 0x00000020L
+#define DC_GPIO_RXEN__DC_GPIO_GENERICG_RXEN_MASK 0x00000040L
+#define DC_GPIO_RXEN__DC_GPIO_HSYNCA_RXEN_MASK 0x00000100L
+#define DC_GPIO_RXEN__DC_GPIO_VSYNCA_RXEN_MASK 0x00000200L
+#define DC_GPIO_RXEN__DC_GPIO_GENLK_CLK_RXEN_MASK 0x00000400L
+#define DC_GPIO_RXEN__DC_GPIO_GENLK_VSYNC_RXEN_MASK 0x00000800L
+#define DC_GPIO_RXEN__DC_GPIO_SWAPLOCK_A_RXEN_MASK 0x00001000L
+#define DC_GPIO_RXEN__DC_GPIO_SWAPLOCK_B_RXEN_MASK 0x00002000L
+#define DC_GPIO_RXEN__DC_GPIO_HPD1_RXEN_MASK 0x00004000L
+#define DC_GPIO_RXEN__DC_GPIO_HPD2_RXEN_MASK 0x00008000L
+#define DC_GPIO_RXEN__DC_GPIO_HPD3_RXEN_MASK 0x00010000L
+#define DC_GPIO_RXEN__DC_GPIO_HPD4_RXEN_MASK 0x00020000L
+#define DC_GPIO_RXEN__DC_GPIO_HPD5_RXEN_MASK 0x00040000L
+#define DC_GPIO_RXEN__DC_GPIO_HPD6_RXEN_MASK 0x00080000L
+#define DC_GPIO_RXEN__DC_GPIO_BLON_RXEN_MASK 0x00100000L
+#define DC_GPIO_RXEN__DC_GPIO_DIGON_RXEN_MASK 0x00200000L
+#define DC_GPIO_RXEN__DC_GPIO_ENA_BL_RXEN_MASK 0x00400000L
+//DC_GPIO_PULLUPEN
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICA_PU_EN__SHIFT 0x0
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICB_PU_EN__SHIFT 0x1
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICC_PU_EN__SHIFT 0x2
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICD_PU_EN__SHIFT 0x3
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICE_PU_EN__SHIFT 0x4
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICF_PU_EN__SHIFT 0x5
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICG_PU_EN__SHIFT 0x6
+#define DC_GPIO_PULLUPEN__DC_GPIO_HSYNCA_PU_EN__SHIFT 0x8
+#define DC_GPIO_PULLUPEN__DC_GPIO_VSYNCA_PU_EN__SHIFT 0x9
+#define DC_GPIO_PULLUPEN__DC_GPIO_HPD1_PU_EN__SHIFT 0xe
+#define DC_GPIO_PULLUPEN__DC_GPIO_HPD2_PU_EN__SHIFT 0xf
+#define DC_GPIO_PULLUPEN__DC_GPIO_HPD3_PU_EN__SHIFT 0x10
+#define DC_GPIO_PULLUPEN__DC_GPIO_HPD4_PU_EN__SHIFT 0x11
+#define DC_GPIO_PULLUPEN__DC_GPIO_HPD5_PU_EN__SHIFT 0x12
+#define DC_GPIO_PULLUPEN__DC_GPIO_HPD6_PU_EN__SHIFT 0x13
+#define DC_GPIO_PULLUPEN__DC_GPIO_BLON_PU_EN__SHIFT 0x14
+#define DC_GPIO_PULLUPEN__DC_GPIO_DIGON_PU_EN__SHIFT 0x15
+#define DC_GPIO_PULLUPEN__DC_GPIO_ENA_BL_PU_EN__SHIFT 0x16
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICA_PU_EN_MASK 0x00000001L
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICB_PU_EN_MASK 0x00000002L
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICC_PU_EN_MASK 0x00000004L
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICD_PU_EN_MASK 0x00000008L
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICE_PU_EN_MASK 0x00000010L
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICF_PU_EN_MASK 0x00000020L
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICG_PU_EN_MASK 0x00000040L
+#define DC_GPIO_PULLUPEN__DC_GPIO_HSYNCA_PU_EN_MASK 0x00000100L
+#define DC_GPIO_PULLUPEN__DC_GPIO_VSYNCA_PU_EN_MASK 0x00000200L
+#define DC_GPIO_PULLUPEN__DC_GPIO_HPD1_PU_EN_MASK 0x00004000L
+#define DC_GPIO_PULLUPEN__DC_GPIO_HPD2_PU_EN_MASK 0x00008000L
+#define DC_GPIO_PULLUPEN__DC_GPIO_HPD3_PU_EN_MASK 0x00010000L
+#define DC_GPIO_PULLUPEN__DC_GPIO_HPD4_PU_EN_MASK 0x00020000L
+#define DC_GPIO_PULLUPEN__DC_GPIO_HPD5_PU_EN_MASK 0x00040000L
+#define DC_GPIO_PULLUPEN__DC_GPIO_HPD6_PU_EN_MASK 0x00080000L
+#define DC_GPIO_PULLUPEN__DC_GPIO_BLON_PU_EN_MASK 0x00100000L
+#define DC_GPIO_PULLUPEN__DC_GPIO_DIGON_PU_EN_MASK 0x00200000L
+#define DC_GPIO_PULLUPEN__DC_GPIO_ENA_BL_PU_EN_MASK 0x00400000L
+//DC_GPIO_AUX_CTRL_3
+#define DC_GPIO_AUX_CTRL_3__AUX1_NEN_RTERM__SHIFT 0x0
+#define DC_GPIO_AUX_CTRL_3__AUX2_NEN_RTERM__SHIFT 0x1
+#define DC_GPIO_AUX_CTRL_3__AUX3_NEN_RTERM__SHIFT 0x2
+#define DC_GPIO_AUX_CTRL_3__AUX4_NEN_RTERM__SHIFT 0x3
+#define DC_GPIO_AUX_CTRL_3__AUX5_NEN_RTERM__SHIFT 0x4
+#define DC_GPIO_AUX_CTRL_3__AUX6_NEN_RTERM__SHIFT 0x5
+#define DC_GPIO_AUX_CTRL_3__AUX1_DP_DN_SWAP__SHIFT 0x8
+#define DC_GPIO_AUX_CTRL_3__AUX2_DP_DN_SWAP__SHIFT 0x9
+#define DC_GPIO_AUX_CTRL_3__AUX3_DP_DN_SWAP__SHIFT 0xa
+#define DC_GPIO_AUX_CTRL_3__AUX4_DP_DN_SWAP__SHIFT 0xb
+#define DC_GPIO_AUX_CTRL_3__AUX5_DP_DN_SWAP__SHIFT 0xc
+#define DC_GPIO_AUX_CTRL_3__AUX6_DP_DN_SWAP__SHIFT 0xd
+#define DC_GPIO_AUX_CTRL_3__AUX1_HYS_TUNE__SHIFT 0x10
+#define DC_GPIO_AUX_CTRL_3__AUX2_HYS_TUNE__SHIFT 0x12
+#define DC_GPIO_AUX_CTRL_3__AUX3_HYS_TUNE__SHIFT 0x14
+#define DC_GPIO_AUX_CTRL_3__AUX4_HYS_TUNE__SHIFT 0x16
+#define DC_GPIO_AUX_CTRL_3__AUX5_HYS_TUNE__SHIFT 0x18
+#define DC_GPIO_AUX_CTRL_3__AUX6_HYS_TUNE__SHIFT 0x1a
+#define DC_GPIO_AUX_CTRL_3__AUX1_NEN_RTERM_MASK 0x00000001L
+#define DC_GPIO_AUX_CTRL_3__AUX2_NEN_RTERM_MASK 0x00000002L
+#define DC_GPIO_AUX_CTRL_3__AUX3_NEN_RTERM_MASK 0x00000004L
+#define DC_GPIO_AUX_CTRL_3__AUX4_NEN_RTERM_MASK 0x00000008L
+#define DC_GPIO_AUX_CTRL_3__AUX5_NEN_RTERM_MASK 0x00000010L
+#define DC_GPIO_AUX_CTRL_3__AUX6_NEN_RTERM_MASK 0x00000020L
+#define DC_GPIO_AUX_CTRL_3__AUX1_DP_DN_SWAP_MASK 0x00000100L
+#define DC_GPIO_AUX_CTRL_3__AUX2_DP_DN_SWAP_MASK 0x00000200L
+#define DC_GPIO_AUX_CTRL_3__AUX3_DP_DN_SWAP_MASK 0x00000400L
+#define DC_GPIO_AUX_CTRL_3__AUX4_DP_DN_SWAP_MASK 0x00000800L
+#define DC_GPIO_AUX_CTRL_3__AUX5_DP_DN_SWAP_MASK 0x00001000L
+#define DC_GPIO_AUX_CTRL_3__AUX6_DP_DN_SWAP_MASK 0x00002000L
+#define DC_GPIO_AUX_CTRL_3__AUX1_HYS_TUNE_MASK 0x00030000L
+#define DC_GPIO_AUX_CTRL_3__AUX2_HYS_TUNE_MASK 0x000C0000L
+#define DC_GPIO_AUX_CTRL_3__AUX3_HYS_TUNE_MASK 0x00300000L
+#define DC_GPIO_AUX_CTRL_3__AUX4_HYS_TUNE_MASK 0x00C00000L
+#define DC_GPIO_AUX_CTRL_3__AUX5_HYS_TUNE_MASK 0x03000000L
+#define DC_GPIO_AUX_CTRL_3__AUX6_HYS_TUNE_MASK 0x0C000000L
+//DC_GPIO_AUX_CTRL_4
+#define DC_GPIO_AUX_CTRL_4__AUX1_AUX_CTRL__SHIFT 0x0
+#define DC_GPIO_AUX_CTRL_4__AUX2_AUX_CTRL__SHIFT 0x4
+#define DC_GPIO_AUX_CTRL_4__AUX3_AUX_CTRL__SHIFT 0x8
+#define DC_GPIO_AUX_CTRL_4__AUX4_AUX_CTRL__SHIFT 0xc
+#define DC_GPIO_AUX_CTRL_4__AUX5_AUX_CTRL__SHIFT 0x10
+#define DC_GPIO_AUX_CTRL_4__AUX6_AUX_CTRL__SHIFT 0x14
+#define DC_GPIO_AUX_CTRL_4__AUX1_AUX_CTRL_MASK 0x0000000FL
+#define DC_GPIO_AUX_CTRL_4__AUX2_AUX_CTRL_MASK 0x000000F0L
+#define DC_GPIO_AUX_CTRL_4__AUX3_AUX_CTRL_MASK 0x00000F00L
+#define DC_GPIO_AUX_CTRL_4__AUX4_AUX_CTRL_MASK 0x0000F000L
+#define DC_GPIO_AUX_CTRL_4__AUX5_AUX_CTRL_MASK 0x000F0000L
+#define DC_GPIO_AUX_CTRL_4__AUX6_AUX_CTRL_MASK 0x00F00000L
+//DC_GPIO_AUX_CTRL_5
+#define DC_GPIO_AUX_CTRL_5__AUX1_VOD_TUNE__SHIFT 0x0
+#define DC_GPIO_AUX_CTRL_5__AUX2_VOD_TUNE__SHIFT 0x2
+#define DC_GPIO_AUX_CTRL_5__AUX3_VOD_TUNE__SHIFT 0x4
+#define DC_GPIO_AUX_CTRL_5__AUX4_VOD_TUNE__SHIFT 0x6
+#define DC_GPIO_AUX_CTRL_5__AUX5_VOD_TUNE__SHIFT 0x8
+#define DC_GPIO_AUX_CTRL_5__AUX6_VOD_TUNE__SHIFT 0xa
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD1_I2CMODE__SHIFT 0xc
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD2_I2CMODE__SHIFT 0xd
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD3_I2CMODE__SHIFT 0xe
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD4_I2CMODE__SHIFT 0xf
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD5_I2CMODE__SHIFT 0x10
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD6_I2CMODE__SHIFT 0x11
+#define DC_GPIO_AUX_CTRL_5__DDC1_I2C_VPH_1V2_EN__SHIFT 0x12
+#define DC_GPIO_AUX_CTRL_5__DDC2_I2C_VPH_1V2_EN__SHIFT 0x13
+#define DC_GPIO_AUX_CTRL_5__DDC3_I2C_VPH_1V2_EN__SHIFT 0x14
+#define DC_GPIO_AUX_CTRL_5__DDC4_I2C_VPH_1V2_EN__SHIFT 0x15
+#define DC_GPIO_AUX_CTRL_5__DDC5_I2C_VPH_1V2_EN__SHIFT 0x16
+#define DC_GPIO_AUX_CTRL_5__DDC6_I2C_VPH_1V2_EN__SHIFT 0x17
+#define DC_GPIO_AUX_CTRL_5__DDC1_PAD_I2C_CTRL__SHIFT 0x18
+#define DC_GPIO_AUX_CTRL_5__DDC2_PAD_I2C_CTRL__SHIFT 0x19
+#define DC_GPIO_AUX_CTRL_5__DDC3_PAD_I2C_CTRL__SHIFT 0x1a
+#define DC_GPIO_AUX_CTRL_5__DDC4_PAD_I2C_CTRL__SHIFT 0x1b
+#define DC_GPIO_AUX_CTRL_5__DDC5_PAD_I2C_CTRL__SHIFT 0x1c
+#define DC_GPIO_AUX_CTRL_5__DDC6_PAD_I2C_CTRL__SHIFT 0x1d
+#define DC_GPIO_AUX_CTRL_5__AUX1_VOD_TUNE_MASK 0x00000003L
+#define DC_GPIO_AUX_CTRL_5__AUX2_VOD_TUNE_MASK 0x0000000CL
+#define DC_GPIO_AUX_CTRL_5__AUX3_VOD_TUNE_MASK 0x00000030L
+#define DC_GPIO_AUX_CTRL_5__AUX4_VOD_TUNE_MASK 0x000000C0L
+#define DC_GPIO_AUX_CTRL_5__AUX5_VOD_TUNE_MASK 0x00000300L
+#define DC_GPIO_AUX_CTRL_5__AUX6_VOD_TUNE_MASK 0x00000C00L
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD1_I2CMODE_MASK 0x00001000L
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD2_I2CMODE_MASK 0x00002000L
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD3_I2CMODE_MASK 0x00004000L
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD4_I2CMODE_MASK 0x00008000L
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD5_I2CMODE_MASK 0x00010000L
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD6_I2CMODE_MASK 0x00020000L
+#define DC_GPIO_AUX_CTRL_5__DDC1_I2C_VPH_1V2_EN_MASK 0x00040000L
+#define DC_GPIO_AUX_CTRL_5__DDC2_I2C_VPH_1V2_EN_MASK 0x00080000L
+#define DC_GPIO_AUX_CTRL_5__DDC3_I2C_VPH_1V2_EN_MASK 0x00100000L
+#define DC_GPIO_AUX_CTRL_5__DDC4_I2C_VPH_1V2_EN_MASK 0x00200000L
+#define DC_GPIO_AUX_CTRL_5__DDC5_I2C_VPH_1V2_EN_MASK 0x00400000L
+#define DC_GPIO_AUX_CTRL_5__DDC6_I2C_VPH_1V2_EN_MASK 0x00800000L
+#define DC_GPIO_AUX_CTRL_5__DDC1_PAD_I2C_CTRL_MASK 0x01000000L
+#define DC_GPIO_AUX_CTRL_5__DDC2_PAD_I2C_CTRL_MASK 0x02000000L
+#define DC_GPIO_AUX_CTRL_5__DDC3_PAD_I2C_CTRL_MASK 0x04000000L
+#define DC_GPIO_AUX_CTRL_5__DDC4_PAD_I2C_CTRL_MASK 0x08000000L
+#define DC_GPIO_AUX_CTRL_5__DDC5_PAD_I2C_CTRL_MASK 0x10000000L
+#define DC_GPIO_AUX_CTRL_5__DDC6_PAD_I2C_CTRL_MASK 0x20000000L
+//AUXI2C_PAD_ALL_PWR_OK
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY1_ALL_PWR_OK__SHIFT 0x0
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY2_ALL_PWR_OK__SHIFT 0x1
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY3_ALL_PWR_OK__SHIFT 0x2
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY4_ALL_PWR_OK__SHIFT 0x3
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY5_ALL_PWR_OK__SHIFT 0x4
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY6_ALL_PWR_OK__SHIFT 0x5
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY1_ALL_PWR_OK_MASK 0x00000001L
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY2_ALL_PWR_OK_MASK 0x00000002L
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY3_ALL_PWR_OK_MASK 0x00000004L
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY4_ALL_PWR_OK_MASK 0x00000008L
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY5_ALL_PWR_OK_MASK 0x00000010L
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY6_ALL_PWR_OK_MASK 0x00000020L
+
+// addressBlock: dce_dc_dsc0_dispdec_dsc_top_dispdec
+//DSC_TOP0_DSC_TOP_CONTROL
+#define DSC_TOP0_DSC_TOP_CONTROL__DSC_CLOCK_EN__SHIFT 0x0
+#define DSC_TOP0_DSC_TOP_CONTROL__DSC_DISPCLK_R_GATE_DIS__SHIFT 0x4
+#define DSC_TOP0_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS__SHIFT 0x8
+#define DSC_TOP0_DSC_TOP_CONTROL__DSC_CLOCK_EN_MASK 0x00000001L
+#define DSC_TOP0_DSC_TOP_CONTROL__DSC_DISPCLK_R_GATE_DIS_MASK 0x00000010L
+#define DSC_TOP0_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS_MASK 0x00000100L
+//DSC_TOP0_DSC_DEBUG_CONTROL
+#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_DBG_EN__SHIFT 0x0
+#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL__SHIFT 0x4
+#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_DBG_EN_MASK 0x00000001L
+#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL_MASK 0x00000070L
+
+
+// addressBlock: dce_dc_dsc0_dispdec_dsccif_dispdec
+//DSCCIF0_DSCCIF_CONFIG0
+#define DSCCIF0_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_RECOVERY_EN__SHIFT 0x0
+#define DSCCIF0_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x4
+#define DSCCIF0_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_STATUS__SHIFT 0x8
+#define DSCCIF0_DSCCIF_CONFIG0__INPUT_PIXEL_FORMAT__SHIFT 0xc
+#define DSCCIF0_DSCCIF_CONFIG0__BITS_PER_COMPONENT__SHIFT 0x10
+#define DSCCIF0_DSCCIF_CONFIG0__DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x18
+#define DSCCIF0_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_RECOVERY_EN_MASK 0x00000001L
+#define DSCCIF0_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00000010L
+#define DSCCIF0_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_STATUS_MASK 0x00000100L
+#define DSCCIF0_DSCCIF_CONFIG0__INPUT_PIXEL_FORMAT_MASK 0x00007000L
+#define DSCCIF0_DSCCIF_CONFIG0__BITS_PER_COMPONENT_MASK 0x000F0000L
+#define DSCCIF0_DSCCIF_CONFIG0__DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x01000000L
+//DSCCIF0_DSCCIF_CONFIG1
+#define DSCCIF0_DSCCIF_CONFIG1__PIC_WIDTH__SHIFT 0x0
+#define DSCCIF0_DSCCIF_CONFIG1__PIC_HEIGHT__SHIFT 0x10
+#define DSCCIF0_DSCCIF_CONFIG1__PIC_WIDTH_MASK 0x0000FFFFL
+#define DSCCIF0_DSCCIF_CONFIG1__PIC_HEIGHT_MASK 0xFFFF0000L
+
+
+// addressBlock: dce_dc_dsc0_dispdec_dscc_dispdec
+//DSCC0_DSCC_CONFIG0
+#define DSCC0_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE__SHIFT 0x0
+#define DSCC0_DSCC_CONFIG0__NUMBER_OF_SLICES_PER_LINE__SHIFT 0x4
+#define DSCC0_DSCC_CONFIG0__ALTERNATE_ICH_ENCODING_EN__SHIFT 0x8
+#define DSCC0_DSCC_CONFIG0__NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION__SHIFT 0x10
+#define DSCC0_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE_MASK 0x0000000FL
+#define DSCC0_DSCC_CONFIG0__NUMBER_OF_SLICES_PER_LINE_MASK 0x00000030L
+#define DSCC0_DSCC_CONFIG0__ALTERNATE_ICH_ENCODING_EN_MASK 0x00000100L
+#define DSCC0_DSCC_CONFIG0__NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION_MASK 0xFFFF0000L
+//DSCC0_DSCC_CONFIG1
+#define DSCC0_DSCC_CONFIG1__DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE__SHIFT 0x0
+#define DSCC0_DSCC_CONFIG1__DSCC_DISABLE_ICH__SHIFT 0x18
+#define DSCC0_DSCC_CONFIG1__DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE_MASK 0x0003FFFFL
+#define DSCC0_DSCC_CONFIG1__DSCC_DISABLE_ICH_MASK 0x01000000L
+//DSCC0_DSCC_STATUS
+#define DSCC0_DSCC_STATUS__DSCC_DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x0
+#define DSCC0_DSCC_STATUS__DSCC_DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x00000001L
+//DSCC0_DSCC_INTERRUPT_CONTROL_STATUS
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED__SHIFT 0x0
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED__SHIFT 0x1
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED__SHIFT 0x2
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED__SHIFT 0x3
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED__SHIFT 0x4
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED__SHIFT 0x5
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED__SHIFT 0x6
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED__SHIFT 0x7
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED__SHIFT 0x8
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED__SHIFT 0x9
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED__SHIFT 0xa
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED__SHIFT 0xb
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x10
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x11
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x12
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x13
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x14
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x15
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x16
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x17
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x18
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x19
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x1a
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x1b
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_MASK 0x00000001L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_MASK 0x00000002L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_MASK 0x00000004L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_MASK 0x00000008L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_MASK 0x00000010L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_MASK 0x00000020L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_MASK 0x00000040L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_MASK 0x00000080L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_MASK 0x00000100L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_MASK 0x00000200L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_MASK 0x00000400L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_MASK 0x00000800L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_INT_EN_MASK 0x00010000L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_INT_EN_MASK 0x00020000L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_INT_EN_MASK 0x00040000L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_INT_EN_MASK 0x00080000L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00100000L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00200000L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00400000L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00800000L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_INT_EN_MASK 0x01000000L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_INT_EN_MASK 0x02000000L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_INT_EN_MASK 0x04000000L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_INT_EN_MASK 0x08000000L
+//DSCC0_DSCC_PPS_CONFIG0
+#define DSCC0_DSCC_PPS_CONFIG0__DSC_VERSION_MINOR__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG0__DSC_VERSION_MAJOR__SHIFT 0x4
+#define DSCC0_DSCC_PPS_CONFIG0__PPS_IDENTIFIER__SHIFT 0x8
+#define DSCC0_DSCC_PPS_CONFIG0__LINEBUF_DEPTH__SHIFT 0x18
+#define DSCC0_DSCC_PPS_CONFIG0__BITS_PER_COMPONENT__SHIFT 0x1c
+#define DSCC0_DSCC_PPS_CONFIG0__DSC_VERSION_MINOR_MASK 0x0000000FL
+#define DSCC0_DSCC_PPS_CONFIG0__DSC_VERSION_MAJOR_MASK 0x000000F0L
+#define DSCC0_DSCC_PPS_CONFIG0__PPS_IDENTIFIER_MASK 0x0000FF00L
+#define DSCC0_DSCC_PPS_CONFIG0__LINEBUF_DEPTH_MASK 0x0F000000L
+#define DSCC0_DSCC_PPS_CONFIG0__BITS_PER_COMPONENT_MASK 0xF0000000L
+//DSCC0_DSCC_PPS_CONFIG1
+#define DSCC0_DSCC_PPS_CONFIG1__BITS_PER_PIXEL__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG1__VBR_ENABLE__SHIFT 0xa
+#define DSCC0_DSCC_PPS_CONFIG1__SIMPLE_422__SHIFT 0xb
+#define DSCC0_DSCC_PPS_CONFIG1__CONVERT_RGB__SHIFT 0xc
+#define DSCC0_DSCC_PPS_CONFIG1__BLOCK_PRED_ENABLE__SHIFT 0xd
+#define DSCC0_DSCC_PPS_CONFIG1__NATIVE_422__SHIFT 0xe
+#define DSCC0_DSCC_PPS_CONFIG1__NATIVE_420__SHIFT 0xf
+#define DSCC0_DSCC_PPS_CONFIG1__CHUNK_SIZE__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG1__BITS_PER_PIXEL_MASK 0x000003FFL
+#define DSCC0_DSCC_PPS_CONFIG1__VBR_ENABLE_MASK 0x00000400L
+#define DSCC0_DSCC_PPS_CONFIG1__SIMPLE_422_MASK 0x00000800L
+#define DSCC0_DSCC_PPS_CONFIG1__CONVERT_RGB_MASK 0x00001000L
+#define DSCC0_DSCC_PPS_CONFIG1__BLOCK_PRED_ENABLE_MASK 0x00002000L
+#define DSCC0_DSCC_PPS_CONFIG1__NATIVE_422_MASK 0x00004000L
+#define DSCC0_DSCC_PPS_CONFIG1__NATIVE_420_MASK 0x00008000L
+#define DSCC0_DSCC_PPS_CONFIG1__CHUNK_SIZE_MASK 0xFFFF0000L
+//DSCC0_DSCC_PPS_CONFIG2
+#define DSCC0_DSCC_PPS_CONFIG2__PIC_WIDTH__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG2__PIC_HEIGHT__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG2__PIC_WIDTH_MASK 0x0000FFFFL
+#define DSCC0_DSCC_PPS_CONFIG2__PIC_HEIGHT_MASK 0xFFFF0000L
+//DSCC0_DSCC_PPS_CONFIG3
+#define DSCC0_DSCC_PPS_CONFIG3__SLICE_WIDTH__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG3__SLICE_HEIGHT__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG3__SLICE_WIDTH_MASK 0x0000FFFFL
+#define DSCC0_DSCC_PPS_CONFIG3__SLICE_HEIGHT_MASK 0xFFFF0000L
+//DSCC0_DSCC_PPS_CONFIG4
+#define DSCC0_DSCC_PPS_CONFIG4__INITIAL_XMIT_DELAY__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG4__INITIAL_DEC_DELAY__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG4__INITIAL_XMIT_DELAY_MASK 0x000003FFL
+#define DSCC0_DSCC_PPS_CONFIG4__INITIAL_DEC_DELAY_MASK 0xFFFF0000L
+//DSCC0_DSCC_PPS_CONFIG5
+#define DSCC0_DSCC_PPS_CONFIG5__INITIAL_SCALE_VALUE__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG5__SCALE_INCREMENT_INTERVAL__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG5__INITIAL_SCALE_VALUE_MASK 0x0000003FL
+#define DSCC0_DSCC_PPS_CONFIG5__SCALE_INCREMENT_INTERVAL_MASK 0xFFFF0000L
+//DSCC0_DSCC_PPS_CONFIG6
+#define DSCC0_DSCC_PPS_CONFIG6__SCALE_DECREMENT_INTERVAL__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG6__FIRST_LINE_BPG_OFFSET__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG6__SECOND_LINE_BPG_OFFSET__SHIFT 0x18
+#define DSCC0_DSCC_PPS_CONFIG6__SCALE_DECREMENT_INTERVAL_MASK 0x00000FFFL
+#define DSCC0_DSCC_PPS_CONFIG6__FIRST_LINE_BPG_OFFSET_MASK 0x001F0000L
+#define DSCC0_DSCC_PPS_CONFIG6__SECOND_LINE_BPG_OFFSET_MASK 0x1F000000L
+//DSCC0_DSCC_PPS_CONFIG7
+#define DSCC0_DSCC_PPS_CONFIG7__NFL_BPG_OFFSET__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG7__SLICE_BPG_OFFSET__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG7__NFL_BPG_OFFSET_MASK 0x0000FFFFL
+#define DSCC0_DSCC_PPS_CONFIG7__SLICE_BPG_OFFSET_MASK 0xFFFF0000L
+//DSCC0_DSCC_PPS_CONFIG8
+#define DSCC0_DSCC_PPS_CONFIG8__NSL_BPG_OFFSET__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG8__SECOND_LINE_OFFSET_ADJ__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG8__NSL_BPG_OFFSET_MASK 0x0000FFFFL
+#define DSCC0_DSCC_PPS_CONFIG8__SECOND_LINE_OFFSET_ADJ_MASK 0xFFFF0000L
+//DSCC0_DSCC_PPS_CONFIG9
+#define DSCC0_DSCC_PPS_CONFIG9__INITIAL_OFFSET__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG9__FINAL_OFFSET__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG9__INITIAL_OFFSET_MASK 0x0000FFFFL
+#define DSCC0_DSCC_PPS_CONFIG9__FINAL_OFFSET_MASK 0xFFFF0000L
+//DSCC0_DSCC_PPS_CONFIG10
+#define DSCC0_DSCC_PPS_CONFIG10__FLATNESS_MIN_QP__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG10__FLATNESS_MAX_QP__SHIFT 0x8
+#define DSCC0_DSCC_PPS_CONFIG10__RC_MODEL_SIZE__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG10__FLATNESS_MIN_QP_MASK 0x0000001FL
+#define DSCC0_DSCC_PPS_CONFIG10__FLATNESS_MAX_QP_MASK 0x00001F00L
+#define DSCC0_DSCC_PPS_CONFIG10__RC_MODEL_SIZE_MASK 0xFFFF0000L
+//DSCC0_DSCC_PPS_CONFIG11
+#define DSCC0_DSCC_PPS_CONFIG11__RC_EDGE_FACTOR__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT0__SHIFT 0x8
+#define DSCC0_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT1__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_LO__SHIFT 0x18
+#define DSCC0_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_HI__SHIFT 0x1c
+#define DSCC0_DSCC_PPS_CONFIG11__RC_EDGE_FACTOR_MASK 0x0000000FL
+#define DSCC0_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT0_MASK 0x00001F00L
+#define DSCC0_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT1_MASK 0x001F0000L
+#define DSCC0_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_LO_MASK 0x0F000000L
+#define DSCC0_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_HI_MASK 0xF0000000L
+//DSCC0_DSCC_PPS_CONFIG12
+#define DSCC0_DSCC_PPS_CONFIG12__RC_BUF_THRESH0__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG12__RC_BUF_THRESH1__SHIFT 0x8
+#define DSCC0_DSCC_PPS_CONFIG12__RC_BUF_THRESH2__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG12__RC_BUF_THRESH3__SHIFT 0x18
+#define DSCC0_DSCC_PPS_CONFIG12__RC_BUF_THRESH0_MASK 0x000000FFL
+#define DSCC0_DSCC_PPS_CONFIG12__RC_BUF_THRESH1_MASK 0x0000FF00L
+#define DSCC0_DSCC_PPS_CONFIG12__RC_BUF_THRESH2_MASK 0x00FF0000L
+#define DSCC0_DSCC_PPS_CONFIG12__RC_BUF_THRESH3_MASK 0xFF000000L
+//DSCC0_DSCC_PPS_CONFIG13
+#define DSCC0_DSCC_PPS_CONFIG13__RC_BUF_THRESH4__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG13__RC_BUF_THRESH5__SHIFT 0x8
+#define DSCC0_DSCC_PPS_CONFIG13__RC_BUF_THRESH6__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG13__RC_BUF_THRESH7__SHIFT 0x18
+#define DSCC0_DSCC_PPS_CONFIG13__RC_BUF_THRESH4_MASK 0x000000FFL
+#define DSCC0_DSCC_PPS_CONFIG13__RC_BUF_THRESH5_MASK 0x0000FF00L
+#define DSCC0_DSCC_PPS_CONFIG13__RC_BUF_THRESH6_MASK 0x00FF0000L
+#define DSCC0_DSCC_PPS_CONFIG13__RC_BUF_THRESH7_MASK 0xFF000000L
+//DSCC0_DSCC_PPS_CONFIG14
+#define DSCC0_DSCC_PPS_CONFIG14__RC_BUF_THRESH8__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG14__RC_BUF_THRESH9__SHIFT 0x8
+#define DSCC0_DSCC_PPS_CONFIG14__RC_BUF_THRESH10__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG14__RC_BUF_THRESH11__SHIFT 0x18
+#define DSCC0_DSCC_PPS_CONFIG14__RC_BUF_THRESH8_MASK 0x000000FFL
+#define DSCC0_DSCC_PPS_CONFIG14__RC_BUF_THRESH9_MASK 0x0000FF00L
+#define DSCC0_DSCC_PPS_CONFIG14__RC_BUF_THRESH10_MASK 0x00FF0000L
+#define DSCC0_DSCC_PPS_CONFIG14__RC_BUF_THRESH11_MASK 0xFF000000L
+//DSCC0_DSCC_PPS_CONFIG15
+#define DSCC0_DSCC_PPS_CONFIG15__RC_BUF_THRESH12__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG15__RC_BUF_THRESH13__SHIFT 0x8
+#define DSCC0_DSCC_PPS_CONFIG15__RANGE_MIN_QP0__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG15__RANGE_MAX_QP0__SHIFT 0x15
+#define DSCC0_DSCC_PPS_CONFIG15__RANGE_BPG_OFFSET0__SHIFT 0x1a
+#define DSCC0_DSCC_PPS_CONFIG15__RC_BUF_THRESH12_MASK 0x000000FFL
+#define DSCC0_DSCC_PPS_CONFIG15__RC_BUF_THRESH13_MASK 0x0000FF00L
+#define DSCC0_DSCC_PPS_CONFIG15__RANGE_MIN_QP0_MASK 0x001F0000L
+#define DSCC0_DSCC_PPS_CONFIG15__RANGE_MAX_QP0_MASK 0x03E00000L
+#define DSCC0_DSCC_PPS_CONFIG15__RANGE_BPG_OFFSET0_MASK 0xFC000000L
+//DSCC0_DSCC_PPS_CONFIG16
+#define DSCC0_DSCC_PPS_CONFIG16__RANGE_MIN_QP1__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG16__RANGE_MAX_QP1__SHIFT 0x5
+#define DSCC0_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET1__SHIFT 0xa
+#define DSCC0_DSCC_PPS_CONFIG16__RANGE_MIN_QP2__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG16__RANGE_MAX_QP2__SHIFT 0x15
+#define DSCC0_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET2__SHIFT 0x1a
+#define DSCC0_DSCC_PPS_CONFIG16__RANGE_MIN_QP1_MASK 0x0000001FL
+#define DSCC0_DSCC_PPS_CONFIG16__RANGE_MAX_QP1_MASK 0x000003E0L
+#define DSCC0_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET1_MASK 0x0000FC00L
+#define DSCC0_DSCC_PPS_CONFIG16__RANGE_MIN_QP2_MASK 0x001F0000L
+#define DSCC0_DSCC_PPS_CONFIG16__RANGE_MAX_QP2_MASK 0x03E00000L
+#define DSCC0_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET2_MASK 0xFC000000L
+//DSCC0_DSCC_PPS_CONFIG17
+#define DSCC0_DSCC_PPS_CONFIG17__RANGE_MIN_QP3__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG17__RANGE_MAX_QP3__SHIFT 0x5
+#define DSCC0_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET3__SHIFT 0xa
+#define DSCC0_DSCC_PPS_CONFIG17__RANGE_MIN_QP4__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG17__RANGE_MAX_QP4__SHIFT 0x15
+#define DSCC0_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET4__SHIFT 0x1a
+#define DSCC0_DSCC_PPS_CONFIG17__RANGE_MIN_QP3_MASK 0x0000001FL
+#define DSCC0_DSCC_PPS_CONFIG17__RANGE_MAX_QP3_MASK 0x000003E0L
+#define DSCC0_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET3_MASK 0x0000FC00L
+#define DSCC0_DSCC_PPS_CONFIG17__RANGE_MIN_QP4_MASK 0x001F0000L
+#define DSCC0_DSCC_PPS_CONFIG17__RANGE_MAX_QP4_MASK 0x03E00000L
+#define DSCC0_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET4_MASK 0xFC000000L
+//DSCC0_DSCC_PPS_CONFIG18
+#define DSCC0_DSCC_PPS_CONFIG18__RANGE_MIN_QP5__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG18__RANGE_MAX_QP5__SHIFT 0x5
+#define DSCC0_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET5__SHIFT 0xa
+#define DSCC0_DSCC_PPS_CONFIG18__RANGE_MIN_QP6__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG18__RANGE_MAX_QP6__SHIFT 0x15
+#define DSCC0_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET6__SHIFT 0x1a
+#define DSCC0_DSCC_PPS_CONFIG18__RANGE_MIN_QP5_MASK 0x0000001FL
+#define DSCC0_DSCC_PPS_CONFIG18__RANGE_MAX_QP5_MASK 0x000003E0L
+#define DSCC0_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET5_MASK 0x0000FC00L
+#define DSCC0_DSCC_PPS_CONFIG18__RANGE_MIN_QP6_MASK 0x001F0000L
+#define DSCC0_DSCC_PPS_CONFIG18__RANGE_MAX_QP6_MASK 0x03E00000L
+#define DSCC0_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET6_MASK 0xFC000000L
+//DSCC0_DSCC_PPS_CONFIG19
+#define DSCC0_DSCC_PPS_CONFIG19__RANGE_MIN_QP7__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG19__RANGE_MAX_QP7__SHIFT 0x5
+#define DSCC0_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET7__SHIFT 0xa
+#define DSCC0_DSCC_PPS_CONFIG19__RANGE_MIN_QP8__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG19__RANGE_MAX_QP8__SHIFT 0x15
+#define DSCC0_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET8__SHIFT 0x1a
+#define DSCC0_DSCC_PPS_CONFIG19__RANGE_MIN_QP7_MASK 0x0000001FL
+#define DSCC0_DSCC_PPS_CONFIG19__RANGE_MAX_QP7_MASK 0x000003E0L
+#define DSCC0_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET7_MASK 0x0000FC00L
+#define DSCC0_DSCC_PPS_CONFIG19__RANGE_MIN_QP8_MASK 0x001F0000L
+#define DSCC0_DSCC_PPS_CONFIG19__RANGE_MAX_QP8_MASK 0x03E00000L
+#define DSCC0_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET8_MASK 0xFC000000L
+//DSCC0_DSCC_PPS_CONFIG20
+#define DSCC0_DSCC_PPS_CONFIG20__RANGE_MIN_QP9__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG20__RANGE_MAX_QP9__SHIFT 0x5
+#define DSCC0_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET9__SHIFT 0xa
+#define DSCC0_DSCC_PPS_CONFIG20__RANGE_MIN_QP10__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG20__RANGE_MAX_QP10__SHIFT 0x15
+#define DSCC0_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET10__SHIFT 0x1a
+#define DSCC0_DSCC_PPS_CONFIG20__RANGE_MIN_QP9_MASK 0x0000001FL
+#define DSCC0_DSCC_PPS_CONFIG20__RANGE_MAX_QP9_MASK 0x000003E0L
+#define DSCC0_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET9_MASK 0x0000FC00L
+#define DSCC0_DSCC_PPS_CONFIG20__RANGE_MIN_QP10_MASK 0x001F0000L
+#define DSCC0_DSCC_PPS_CONFIG20__RANGE_MAX_QP10_MASK 0x03E00000L
+#define DSCC0_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET10_MASK 0xFC000000L
+//DSCC0_DSCC_PPS_CONFIG21
+#define DSCC0_DSCC_PPS_CONFIG21__RANGE_MIN_QP11__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG21__RANGE_MAX_QP11__SHIFT 0x5
+#define DSCC0_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET11__SHIFT 0xa
+#define DSCC0_DSCC_PPS_CONFIG21__RANGE_MIN_QP12__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG21__RANGE_MAX_QP12__SHIFT 0x15
+#define DSCC0_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET12__SHIFT 0x1a
+#define DSCC0_DSCC_PPS_CONFIG21__RANGE_MIN_QP11_MASK 0x0000001FL
+#define DSCC0_DSCC_PPS_CONFIG21__RANGE_MAX_QP11_MASK 0x000003E0L
+#define DSCC0_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET11_MASK 0x0000FC00L
+#define DSCC0_DSCC_PPS_CONFIG21__RANGE_MIN_QP12_MASK 0x001F0000L
+#define DSCC0_DSCC_PPS_CONFIG21__RANGE_MAX_QP12_MASK 0x03E00000L
+#define DSCC0_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET12_MASK 0xFC000000L
+//DSCC0_DSCC_PPS_CONFIG22
+#define DSCC0_DSCC_PPS_CONFIG22__RANGE_MIN_QP13__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG22__RANGE_MAX_QP13__SHIFT 0x5
+#define DSCC0_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET13__SHIFT 0xa
+#define DSCC0_DSCC_PPS_CONFIG22__RANGE_MIN_QP14__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG22__RANGE_MAX_QP14__SHIFT 0x15
+#define DSCC0_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET14__SHIFT 0x1a
+#define DSCC0_DSCC_PPS_CONFIG22__RANGE_MIN_QP13_MASK 0x0000001FL
+#define DSCC0_DSCC_PPS_CONFIG22__RANGE_MAX_QP13_MASK 0x000003E0L
+#define DSCC0_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET13_MASK 0x0000FC00L
+#define DSCC0_DSCC_PPS_CONFIG22__RANGE_MIN_QP14_MASK 0x001F0000L
+#define DSCC0_DSCC_PPS_CONFIG22__RANGE_MAX_QP14_MASK 0x03E00000L
+#define DSCC0_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET14_MASK 0xFC000000L
+//DSCC0_DSCC_MEM_POWER_CONTROL
+#define DSCC0_DSCC_MEM_POWER_CONTROL__DSCC_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0x0
+#define DSCC0_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_FORCE__SHIFT 0x4
+#define DSCC0_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_DIS__SHIFT 0x8
+#define DSCC0_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_STATE__SHIFT 0x10
+#define DSCC0_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_FORCE__SHIFT 0x14
+#define DSCC0_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_DIS__SHIFT 0x18
+#define DSCC0_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_STATE__SHIFT 0x1c
+#define DSCC0_DSCC_MEM_POWER_CONTROL__DSCC_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00000003L
+#define DSCC0_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_FORCE_MASK 0x00000030L
+#define DSCC0_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_DIS_MASK 0x00000100L
+#define DSCC0_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_STATE_MASK 0x00030000L
+#define DSCC0_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_FORCE_MASK 0x00300000L
+#define DSCC0_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_DIS_MASK 0x01000000L
+#define DSCC0_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_STATE_MASK 0x30000000L
+//DSCC0_DSCC_R_Y_SQUARED_ERROR_LOWER
+#define DSCC0_DSCC_R_Y_SQUARED_ERROR_LOWER__DSCC_R_Y_SQUARED_ERROR_LOWER__SHIFT 0x0
+#define DSCC0_DSCC_R_Y_SQUARED_ERROR_LOWER__DSCC_R_Y_SQUARED_ERROR_LOWER_MASK 0xFFFFFFFFL
+//DSCC0_DSCC_R_Y_SQUARED_ERROR_UPPER
+#define DSCC0_DSCC_R_Y_SQUARED_ERROR_UPPER__DSCC_R_Y_SQUARED_ERROR_UPPER__SHIFT 0x0
+#define DSCC0_DSCC_R_Y_SQUARED_ERROR_UPPER__DSCC_R_Y_SQUARED_ERROR_UPPER_MASK 0xFFFFFFFFL
+//DSCC0_DSCC_G_CB_SQUARED_ERROR_LOWER
+#define DSCC0_DSCC_G_CB_SQUARED_ERROR_LOWER__DSCC_G_CB_SQUARED_ERROR_LOWER__SHIFT 0x0
+#define DSCC0_DSCC_G_CB_SQUARED_ERROR_LOWER__DSCC_G_CB_SQUARED_ERROR_LOWER_MASK 0xFFFFFFFFL
+//DSCC0_DSCC_G_CB_SQUARED_ERROR_UPPER
+#define DSCC0_DSCC_G_CB_SQUARED_ERROR_UPPER__DSCC_G_CB_SQUARED_ERROR_UPPER__SHIFT 0x0
+#define DSCC0_DSCC_G_CB_SQUARED_ERROR_UPPER__DSCC_G_CB_SQUARED_ERROR_UPPER_MASK 0xFFFFFFFFL
+//DSCC0_DSCC_B_CR_SQUARED_ERROR_LOWER
+#define DSCC0_DSCC_B_CR_SQUARED_ERROR_LOWER__DSCC_B_CR_SQUARED_ERROR_LOWER__SHIFT 0x0
+#define DSCC0_DSCC_B_CR_SQUARED_ERROR_LOWER__DSCC_B_CR_SQUARED_ERROR_LOWER_MASK 0xFFFFFFFFL
+//DSCC0_DSCC_B_CR_SQUARED_ERROR_UPPER
+#define DSCC0_DSCC_B_CR_SQUARED_ERROR_UPPER__DSCC_B_CR_SQUARED_ERROR_UPPER__SHIFT 0x0
+#define DSCC0_DSCC_B_CR_SQUARED_ERROR_UPPER__DSCC_B_CR_SQUARED_ERROR_UPPER_MASK 0xFFFFFFFFL
+//DSCC0_DSCC_MAX_ABS_ERROR0
+#define DSCC0_DSCC_MAX_ABS_ERROR0__DSCC_R_Y_MAX_ABS_ERROR__SHIFT 0x0
+#define DSCC0_DSCC_MAX_ABS_ERROR0__DSCC_G_CB_MAX_ABS_ERROR__SHIFT 0x10
+#define DSCC0_DSCC_MAX_ABS_ERROR0__DSCC_R_Y_MAX_ABS_ERROR_MASK 0x0000FFFFL
+#define DSCC0_DSCC_MAX_ABS_ERROR0__DSCC_G_CB_MAX_ABS_ERROR_MASK 0xFFFF0000L
+//DSCC0_DSCC_MAX_ABS_ERROR1
+#define DSCC0_DSCC_MAX_ABS_ERROR1__DSCC_B_CR_MAX_ABS_ERROR__SHIFT 0x0
+#define DSCC0_DSCC_MAX_ABS_ERROR1__DSCC_B_CR_MAX_ABS_ERROR_MASK 0x0000FFFFL
+//DSCC0_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL
+#define DSCC0_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC0_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC0_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL
+#define DSCC0_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC0_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC0_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL
+#define DSCC0_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC0_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC0_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL
+#define DSCC0_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC0_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC0_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL
+#define DSCC0_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC0_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC0_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL
+#define DSCC0_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC0_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC0_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL
+#define DSCC0_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC0_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL
+#define DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS0_ROTATE__SHIFT 0x0
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS1_ROTATE__SHIFT 0x8
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS2_ROTATE__SHIFT 0x10
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS3_ROTATE__SHIFT 0x18
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS0_ROTATE_MASK 0x0000001FL
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS1_ROTATE_MASK 0x00001F00L
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS2_ROTATE_MASK 0x001F0000L
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS3_ROTATE_MASK 0x1F000000L
+
+
+// addressBlock: dce_dc_dsc0_dispdec_dsc_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON19_PERFCOUNTER_CNTL
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON19_PERFCOUNTER_CNTL2
+#define DC_PERFMON19_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON19_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON19_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON19_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON19_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON19_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON19_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON19_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON19_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON19_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON19_PERFCOUNTER_STATE
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON19_PERFMON_CNTL
+#define DC_PERFMON19_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON19_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON19_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON19_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON19_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON19_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON19_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON19_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON19_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON19_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON19_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON19_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON19_PERFMON_CNTL2
+#define DC_PERFMON19_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON19_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON19_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON19_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON19_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON19_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON19_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON19_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON19_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON19_PERFMON_CVALUE_LOW
+#define DC_PERFMON19_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON19_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON19_PERFMON_HI
+#define DC_PERFMON19_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON19_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON19_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON19_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON19_PERFMON_LOW
+#define DC_PERFMON19_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON19_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dsc1_dispdec_dsc_top_dispdec
+//DSC_TOP1_DSC_TOP_CONTROL
+#define DSC_TOP1_DSC_TOP_CONTROL__DSC_CLOCK_EN__SHIFT 0x0
+#define DSC_TOP1_DSC_TOP_CONTROL__DSC_DISPCLK_R_GATE_DIS__SHIFT 0x4
+#define DSC_TOP1_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS__SHIFT 0x8
+#define DSC_TOP1_DSC_TOP_CONTROL__DSC_CLOCK_EN_MASK 0x00000001L
+#define DSC_TOP1_DSC_TOP_CONTROL__DSC_DISPCLK_R_GATE_DIS_MASK 0x00000010L
+#define DSC_TOP1_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS_MASK 0x00000100L
+//DSC_TOP1_DSC_DEBUG_CONTROL
+#define DSC_TOP1_DSC_DEBUG_CONTROL__DSC_DBG_EN__SHIFT 0x0
+#define DSC_TOP1_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL__SHIFT 0x4
+#define DSC_TOP1_DSC_DEBUG_CONTROL__DSC_DBG_EN_MASK 0x00000001L
+#define DSC_TOP1_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL_MASK 0x00000070L
+
+
+// addressBlock: dce_dc_dsc1_dispdec_dsccif_dispdec
+//DSCCIF1_DSCCIF_CONFIG0
+#define DSCCIF1_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_RECOVERY_EN__SHIFT 0x0
+#define DSCCIF1_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x4
+#define DSCCIF1_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_STATUS__SHIFT 0x8
+#define DSCCIF1_DSCCIF_CONFIG0__INPUT_PIXEL_FORMAT__SHIFT 0xc
+#define DSCCIF1_DSCCIF_CONFIG0__BITS_PER_COMPONENT__SHIFT 0x10
+#define DSCCIF1_DSCCIF_CONFIG0__DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x18
+#define DSCCIF1_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_RECOVERY_EN_MASK 0x00000001L
+#define DSCCIF1_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00000010L
+#define DSCCIF1_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_STATUS_MASK 0x00000100L
+#define DSCCIF1_DSCCIF_CONFIG0__INPUT_PIXEL_FORMAT_MASK 0x00007000L
+#define DSCCIF1_DSCCIF_CONFIG0__BITS_PER_COMPONENT_MASK 0x000F0000L
+#define DSCCIF1_DSCCIF_CONFIG0__DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x01000000L
+//DSCCIF1_DSCCIF_CONFIG1
+#define DSCCIF1_DSCCIF_CONFIG1__PIC_WIDTH__SHIFT 0x0
+#define DSCCIF1_DSCCIF_CONFIG1__PIC_HEIGHT__SHIFT 0x10
+#define DSCCIF1_DSCCIF_CONFIG1__PIC_WIDTH_MASK 0x0000FFFFL
+#define DSCCIF1_DSCCIF_CONFIG1__PIC_HEIGHT_MASK 0xFFFF0000L
+
+
+// addressBlock: dce_dc_dsc1_dispdec_dscc_dispdec
+//DSCC1_DSCC_CONFIG0
+#define DSCC1_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE__SHIFT 0x0
+#define DSCC1_DSCC_CONFIG0__NUMBER_OF_SLICES_PER_LINE__SHIFT 0x4
+#define DSCC1_DSCC_CONFIG0__ALTERNATE_ICH_ENCODING_EN__SHIFT 0x8
+#define DSCC1_DSCC_CONFIG0__NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION__SHIFT 0x10
+#define DSCC1_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE_MASK 0x0000000FL
+#define DSCC1_DSCC_CONFIG0__NUMBER_OF_SLICES_PER_LINE_MASK 0x00000030L
+#define DSCC1_DSCC_CONFIG0__ALTERNATE_ICH_ENCODING_EN_MASK 0x00000100L
+#define DSCC1_DSCC_CONFIG0__NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION_MASK 0xFFFF0000L
+//DSCC1_DSCC_CONFIG1
+#define DSCC1_DSCC_CONFIG1__DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE__SHIFT 0x0
+#define DSCC1_DSCC_CONFIG1__DSCC_DISABLE_ICH__SHIFT 0x18
+#define DSCC1_DSCC_CONFIG1__DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE_MASK 0x0003FFFFL
+#define DSCC1_DSCC_CONFIG1__DSCC_DISABLE_ICH_MASK 0x01000000L
+//DSCC1_DSCC_STATUS
+#define DSCC1_DSCC_STATUS__DSCC_DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x0
+#define DSCC1_DSCC_STATUS__DSCC_DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x00000001L
+//DSCC1_DSCC_INTERRUPT_CONTROL_STATUS
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED__SHIFT 0x0
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED__SHIFT 0x1
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED__SHIFT 0x2
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED__SHIFT 0x3
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED__SHIFT 0x4
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED__SHIFT 0x5
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED__SHIFT 0x6
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED__SHIFT 0x7
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED__SHIFT 0x8
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED__SHIFT 0x9
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED__SHIFT 0xa
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED__SHIFT 0xb
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x10
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x11
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x12
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x13
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x14
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x15
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x16
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x17
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x18
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x19
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x1a
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x1b
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_MASK 0x00000001L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_MASK 0x00000002L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_MASK 0x00000004L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_MASK 0x00000008L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_MASK 0x00000010L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_MASK 0x00000020L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_MASK 0x00000040L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_MASK 0x00000080L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_MASK 0x00000100L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_MASK 0x00000200L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_MASK 0x00000400L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_MASK 0x00000800L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_INT_EN_MASK 0x00010000L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_INT_EN_MASK 0x00020000L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_INT_EN_MASK 0x00040000L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_INT_EN_MASK 0x00080000L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00100000L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00200000L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00400000L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00800000L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_INT_EN_MASK 0x01000000L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_INT_EN_MASK 0x02000000L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_INT_EN_MASK 0x04000000L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_INT_EN_MASK 0x08000000L
+//DSCC1_DSCC_PPS_CONFIG0
+#define DSCC1_DSCC_PPS_CONFIG0__DSC_VERSION_MINOR__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG0__DSC_VERSION_MAJOR__SHIFT 0x4
+#define DSCC1_DSCC_PPS_CONFIG0__PPS_IDENTIFIER__SHIFT 0x8
+#define DSCC1_DSCC_PPS_CONFIG0__LINEBUF_DEPTH__SHIFT 0x18
+#define DSCC1_DSCC_PPS_CONFIG0__BITS_PER_COMPONENT__SHIFT 0x1c
+#define DSCC1_DSCC_PPS_CONFIG0__DSC_VERSION_MINOR_MASK 0x0000000FL
+#define DSCC1_DSCC_PPS_CONFIG0__DSC_VERSION_MAJOR_MASK 0x000000F0L
+#define DSCC1_DSCC_PPS_CONFIG0__PPS_IDENTIFIER_MASK 0x0000FF00L
+#define DSCC1_DSCC_PPS_CONFIG0__LINEBUF_DEPTH_MASK 0x0F000000L
+#define DSCC1_DSCC_PPS_CONFIG0__BITS_PER_COMPONENT_MASK 0xF0000000L
+//DSCC1_DSCC_PPS_CONFIG1
+#define DSCC1_DSCC_PPS_CONFIG1__BITS_PER_PIXEL__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG1__VBR_ENABLE__SHIFT 0xa
+#define DSCC1_DSCC_PPS_CONFIG1__SIMPLE_422__SHIFT 0xb
+#define DSCC1_DSCC_PPS_CONFIG1__CONVERT_RGB__SHIFT 0xc
+#define DSCC1_DSCC_PPS_CONFIG1__BLOCK_PRED_ENABLE__SHIFT 0xd
+#define DSCC1_DSCC_PPS_CONFIG1__NATIVE_422__SHIFT 0xe
+#define DSCC1_DSCC_PPS_CONFIG1__NATIVE_420__SHIFT 0xf
+#define DSCC1_DSCC_PPS_CONFIG1__CHUNK_SIZE__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG1__BITS_PER_PIXEL_MASK 0x000003FFL
+#define DSCC1_DSCC_PPS_CONFIG1__VBR_ENABLE_MASK 0x00000400L
+#define DSCC1_DSCC_PPS_CONFIG1__SIMPLE_422_MASK 0x00000800L
+#define DSCC1_DSCC_PPS_CONFIG1__CONVERT_RGB_MASK 0x00001000L
+#define DSCC1_DSCC_PPS_CONFIG1__BLOCK_PRED_ENABLE_MASK 0x00002000L
+#define DSCC1_DSCC_PPS_CONFIG1__NATIVE_422_MASK 0x00004000L
+#define DSCC1_DSCC_PPS_CONFIG1__NATIVE_420_MASK 0x00008000L
+#define DSCC1_DSCC_PPS_CONFIG1__CHUNK_SIZE_MASK 0xFFFF0000L
+//DSCC1_DSCC_PPS_CONFIG2
+#define DSCC1_DSCC_PPS_CONFIG2__PIC_WIDTH__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG2__PIC_HEIGHT__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG2__PIC_WIDTH_MASK 0x0000FFFFL
+#define DSCC1_DSCC_PPS_CONFIG2__PIC_HEIGHT_MASK 0xFFFF0000L
+//DSCC1_DSCC_PPS_CONFIG3
+#define DSCC1_DSCC_PPS_CONFIG3__SLICE_WIDTH__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG3__SLICE_HEIGHT__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG3__SLICE_WIDTH_MASK 0x0000FFFFL
+#define DSCC1_DSCC_PPS_CONFIG3__SLICE_HEIGHT_MASK 0xFFFF0000L
+//DSCC1_DSCC_PPS_CONFIG4
+#define DSCC1_DSCC_PPS_CONFIG4__INITIAL_XMIT_DELAY__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG4__INITIAL_DEC_DELAY__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG4__INITIAL_XMIT_DELAY_MASK 0x000003FFL
+#define DSCC1_DSCC_PPS_CONFIG4__INITIAL_DEC_DELAY_MASK 0xFFFF0000L
+//DSCC1_DSCC_PPS_CONFIG5
+#define DSCC1_DSCC_PPS_CONFIG5__INITIAL_SCALE_VALUE__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG5__SCALE_INCREMENT_INTERVAL__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG5__INITIAL_SCALE_VALUE_MASK 0x0000003FL
+#define DSCC1_DSCC_PPS_CONFIG5__SCALE_INCREMENT_INTERVAL_MASK 0xFFFF0000L
+//DSCC1_DSCC_PPS_CONFIG6
+#define DSCC1_DSCC_PPS_CONFIG6__SCALE_DECREMENT_INTERVAL__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG6__FIRST_LINE_BPG_OFFSET__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG6__SECOND_LINE_BPG_OFFSET__SHIFT 0x18
+#define DSCC1_DSCC_PPS_CONFIG6__SCALE_DECREMENT_INTERVAL_MASK 0x00000FFFL
+#define DSCC1_DSCC_PPS_CONFIG6__FIRST_LINE_BPG_OFFSET_MASK 0x001F0000L
+#define DSCC1_DSCC_PPS_CONFIG6__SECOND_LINE_BPG_OFFSET_MASK 0x1F000000L
+//DSCC1_DSCC_PPS_CONFIG7
+#define DSCC1_DSCC_PPS_CONFIG7__NFL_BPG_OFFSET__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG7__SLICE_BPG_OFFSET__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG7__NFL_BPG_OFFSET_MASK 0x0000FFFFL
+#define DSCC1_DSCC_PPS_CONFIG7__SLICE_BPG_OFFSET_MASK 0xFFFF0000L
+//DSCC1_DSCC_PPS_CONFIG8
+#define DSCC1_DSCC_PPS_CONFIG8__NSL_BPG_OFFSET__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG8__SECOND_LINE_OFFSET_ADJ__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG8__NSL_BPG_OFFSET_MASK 0x0000FFFFL
+#define DSCC1_DSCC_PPS_CONFIG8__SECOND_LINE_OFFSET_ADJ_MASK 0xFFFF0000L
+//DSCC1_DSCC_PPS_CONFIG9
+#define DSCC1_DSCC_PPS_CONFIG9__INITIAL_OFFSET__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG9__FINAL_OFFSET__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG9__INITIAL_OFFSET_MASK 0x0000FFFFL
+#define DSCC1_DSCC_PPS_CONFIG9__FINAL_OFFSET_MASK 0xFFFF0000L
+//DSCC1_DSCC_PPS_CONFIG10
+#define DSCC1_DSCC_PPS_CONFIG10__FLATNESS_MIN_QP__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG10__FLATNESS_MAX_QP__SHIFT 0x8
+#define DSCC1_DSCC_PPS_CONFIG10__RC_MODEL_SIZE__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG10__FLATNESS_MIN_QP_MASK 0x0000001FL
+#define DSCC1_DSCC_PPS_CONFIG10__FLATNESS_MAX_QP_MASK 0x00001F00L
+#define DSCC1_DSCC_PPS_CONFIG10__RC_MODEL_SIZE_MASK 0xFFFF0000L
+//DSCC1_DSCC_PPS_CONFIG11
+#define DSCC1_DSCC_PPS_CONFIG11__RC_EDGE_FACTOR__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT0__SHIFT 0x8
+#define DSCC1_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT1__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_LO__SHIFT 0x18
+#define DSCC1_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_HI__SHIFT 0x1c
+#define DSCC1_DSCC_PPS_CONFIG11__RC_EDGE_FACTOR_MASK 0x0000000FL
+#define DSCC1_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT0_MASK 0x00001F00L
+#define DSCC1_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT1_MASK 0x001F0000L
+#define DSCC1_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_LO_MASK 0x0F000000L
+#define DSCC1_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_HI_MASK 0xF0000000L
+//DSCC1_DSCC_PPS_CONFIG12
+#define DSCC1_DSCC_PPS_CONFIG12__RC_BUF_THRESH0__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG12__RC_BUF_THRESH1__SHIFT 0x8
+#define DSCC1_DSCC_PPS_CONFIG12__RC_BUF_THRESH2__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG12__RC_BUF_THRESH3__SHIFT 0x18
+#define DSCC1_DSCC_PPS_CONFIG12__RC_BUF_THRESH0_MASK 0x000000FFL
+#define DSCC1_DSCC_PPS_CONFIG12__RC_BUF_THRESH1_MASK 0x0000FF00L
+#define DSCC1_DSCC_PPS_CONFIG12__RC_BUF_THRESH2_MASK 0x00FF0000L
+#define DSCC1_DSCC_PPS_CONFIG12__RC_BUF_THRESH3_MASK 0xFF000000L
+//DSCC1_DSCC_PPS_CONFIG13
+#define DSCC1_DSCC_PPS_CONFIG13__RC_BUF_THRESH4__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG13__RC_BUF_THRESH5__SHIFT 0x8
+#define DSCC1_DSCC_PPS_CONFIG13__RC_BUF_THRESH6__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG13__RC_BUF_THRESH7__SHIFT 0x18
+#define DSCC1_DSCC_PPS_CONFIG13__RC_BUF_THRESH4_MASK 0x000000FFL
+#define DSCC1_DSCC_PPS_CONFIG13__RC_BUF_THRESH5_MASK 0x0000FF00L
+#define DSCC1_DSCC_PPS_CONFIG13__RC_BUF_THRESH6_MASK 0x00FF0000L
+#define DSCC1_DSCC_PPS_CONFIG13__RC_BUF_THRESH7_MASK 0xFF000000L
+//DSCC1_DSCC_PPS_CONFIG14
+#define DSCC1_DSCC_PPS_CONFIG14__RC_BUF_THRESH8__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG14__RC_BUF_THRESH9__SHIFT 0x8
+#define DSCC1_DSCC_PPS_CONFIG14__RC_BUF_THRESH10__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG14__RC_BUF_THRESH11__SHIFT 0x18
+#define DSCC1_DSCC_PPS_CONFIG14__RC_BUF_THRESH8_MASK 0x000000FFL
+#define DSCC1_DSCC_PPS_CONFIG14__RC_BUF_THRESH9_MASK 0x0000FF00L
+#define DSCC1_DSCC_PPS_CONFIG14__RC_BUF_THRESH10_MASK 0x00FF0000L
+#define DSCC1_DSCC_PPS_CONFIG14__RC_BUF_THRESH11_MASK 0xFF000000L
+//DSCC1_DSCC_PPS_CONFIG15
+#define DSCC1_DSCC_PPS_CONFIG15__RC_BUF_THRESH12__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG15__RC_BUF_THRESH13__SHIFT 0x8
+#define DSCC1_DSCC_PPS_CONFIG15__RANGE_MIN_QP0__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG15__RANGE_MAX_QP0__SHIFT 0x15
+#define DSCC1_DSCC_PPS_CONFIG15__RANGE_BPG_OFFSET0__SHIFT 0x1a
+#define DSCC1_DSCC_PPS_CONFIG15__RC_BUF_THRESH12_MASK 0x000000FFL
+#define DSCC1_DSCC_PPS_CONFIG15__RC_BUF_THRESH13_MASK 0x0000FF00L
+#define DSCC1_DSCC_PPS_CONFIG15__RANGE_MIN_QP0_MASK 0x001F0000L
+#define DSCC1_DSCC_PPS_CONFIG15__RANGE_MAX_QP0_MASK 0x03E00000L
+#define DSCC1_DSCC_PPS_CONFIG15__RANGE_BPG_OFFSET0_MASK 0xFC000000L
+//DSCC1_DSCC_PPS_CONFIG16
+#define DSCC1_DSCC_PPS_CONFIG16__RANGE_MIN_QP1__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG16__RANGE_MAX_QP1__SHIFT 0x5
+#define DSCC1_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET1__SHIFT 0xa
+#define DSCC1_DSCC_PPS_CONFIG16__RANGE_MIN_QP2__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG16__RANGE_MAX_QP2__SHIFT 0x15
+#define DSCC1_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET2__SHIFT 0x1a
+#define DSCC1_DSCC_PPS_CONFIG16__RANGE_MIN_QP1_MASK 0x0000001FL
+#define DSCC1_DSCC_PPS_CONFIG16__RANGE_MAX_QP1_MASK 0x000003E0L
+#define DSCC1_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET1_MASK 0x0000FC00L
+#define DSCC1_DSCC_PPS_CONFIG16__RANGE_MIN_QP2_MASK 0x001F0000L
+#define DSCC1_DSCC_PPS_CONFIG16__RANGE_MAX_QP2_MASK 0x03E00000L
+#define DSCC1_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET2_MASK 0xFC000000L
+//DSCC1_DSCC_PPS_CONFIG17
+#define DSCC1_DSCC_PPS_CONFIG17__RANGE_MIN_QP3__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG17__RANGE_MAX_QP3__SHIFT 0x5
+#define DSCC1_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET3__SHIFT 0xa
+#define DSCC1_DSCC_PPS_CONFIG17__RANGE_MIN_QP4__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG17__RANGE_MAX_QP4__SHIFT 0x15
+#define DSCC1_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET4__SHIFT 0x1a
+#define DSCC1_DSCC_PPS_CONFIG17__RANGE_MIN_QP3_MASK 0x0000001FL
+#define DSCC1_DSCC_PPS_CONFIG17__RANGE_MAX_QP3_MASK 0x000003E0L
+#define DSCC1_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET3_MASK 0x0000FC00L
+#define DSCC1_DSCC_PPS_CONFIG17__RANGE_MIN_QP4_MASK 0x001F0000L
+#define DSCC1_DSCC_PPS_CONFIG17__RANGE_MAX_QP4_MASK 0x03E00000L
+#define DSCC1_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET4_MASK 0xFC000000L
+//DSCC1_DSCC_PPS_CONFIG18
+#define DSCC1_DSCC_PPS_CONFIG18__RANGE_MIN_QP5__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG18__RANGE_MAX_QP5__SHIFT 0x5
+#define DSCC1_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET5__SHIFT 0xa
+#define DSCC1_DSCC_PPS_CONFIG18__RANGE_MIN_QP6__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG18__RANGE_MAX_QP6__SHIFT 0x15
+#define DSCC1_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET6__SHIFT 0x1a
+#define DSCC1_DSCC_PPS_CONFIG18__RANGE_MIN_QP5_MASK 0x0000001FL
+#define DSCC1_DSCC_PPS_CONFIG18__RANGE_MAX_QP5_MASK 0x000003E0L
+#define DSCC1_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET5_MASK 0x0000FC00L
+#define DSCC1_DSCC_PPS_CONFIG18__RANGE_MIN_QP6_MASK 0x001F0000L
+#define DSCC1_DSCC_PPS_CONFIG18__RANGE_MAX_QP6_MASK 0x03E00000L
+#define DSCC1_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET6_MASK 0xFC000000L
+//DSCC1_DSCC_PPS_CONFIG19
+#define DSCC1_DSCC_PPS_CONFIG19__RANGE_MIN_QP7__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG19__RANGE_MAX_QP7__SHIFT 0x5
+#define DSCC1_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET7__SHIFT 0xa
+#define DSCC1_DSCC_PPS_CONFIG19__RANGE_MIN_QP8__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG19__RANGE_MAX_QP8__SHIFT 0x15
+#define DSCC1_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET8__SHIFT 0x1a
+#define DSCC1_DSCC_PPS_CONFIG19__RANGE_MIN_QP7_MASK 0x0000001FL
+#define DSCC1_DSCC_PPS_CONFIG19__RANGE_MAX_QP7_MASK 0x000003E0L
+#define DSCC1_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET7_MASK 0x0000FC00L
+#define DSCC1_DSCC_PPS_CONFIG19__RANGE_MIN_QP8_MASK 0x001F0000L
+#define DSCC1_DSCC_PPS_CONFIG19__RANGE_MAX_QP8_MASK 0x03E00000L
+#define DSCC1_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET8_MASK 0xFC000000L
+//DSCC1_DSCC_PPS_CONFIG20
+#define DSCC1_DSCC_PPS_CONFIG20__RANGE_MIN_QP9__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG20__RANGE_MAX_QP9__SHIFT 0x5
+#define DSCC1_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET9__SHIFT 0xa
+#define DSCC1_DSCC_PPS_CONFIG20__RANGE_MIN_QP10__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG20__RANGE_MAX_QP10__SHIFT 0x15
+#define DSCC1_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET10__SHIFT 0x1a
+#define DSCC1_DSCC_PPS_CONFIG20__RANGE_MIN_QP9_MASK 0x0000001FL
+#define DSCC1_DSCC_PPS_CONFIG20__RANGE_MAX_QP9_MASK 0x000003E0L
+#define DSCC1_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET9_MASK 0x0000FC00L
+#define DSCC1_DSCC_PPS_CONFIG20__RANGE_MIN_QP10_MASK 0x001F0000L
+#define DSCC1_DSCC_PPS_CONFIG20__RANGE_MAX_QP10_MASK 0x03E00000L
+#define DSCC1_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET10_MASK 0xFC000000L
+//DSCC1_DSCC_PPS_CONFIG21
+#define DSCC1_DSCC_PPS_CONFIG21__RANGE_MIN_QP11__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG21__RANGE_MAX_QP11__SHIFT 0x5
+#define DSCC1_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET11__SHIFT 0xa
+#define DSCC1_DSCC_PPS_CONFIG21__RANGE_MIN_QP12__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG21__RANGE_MAX_QP12__SHIFT 0x15
+#define DSCC1_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET12__SHIFT 0x1a
+#define DSCC1_DSCC_PPS_CONFIG21__RANGE_MIN_QP11_MASK 0x0000001FL
+#define DSCC1_DSCC_PPS_CONFIG21__RANGE_MAX_QP11_MASK 0x000003E0L
+#define DSCC1_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET11_MASK 0x0000FC00L
+#define DSCC1_DSCC_PPS_CONFIG21__RANGE_MIN_QP12_MASK 0x001F0000L
+#define DSCC1_DSCC_PPS_CONFIG21__RANGE_MAX_QP12_MASK 0x03E00000L
+#define DSCC1_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET12_MASK 0xFC000000L
+//DSCC1_DSCC_PPS_CONFIG22
+#define DSCC1_DSCC_PPS_CONFIG22__RANGE_MIN_QP13__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG22__RANGE_MAX_QP13__SHIFT 0x5
+#define DSCC1_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET13__SHIFT 0xa
+#define DSCC1_DSCC_PPS_CONFIG22__RANGE_MIN_QP14__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG22__RANGE_MAX_QP14__SHIFT 0x15
+#define DSCC1_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET14__SHIFT 0x1a
+#define DSCC1_DSCC_PPS_CONFIG22__RANGE_MIN_QP13_MASK 0x0000001FL
+#define DSCC1_DSCC_PPS_CONFIG22__RANGE_MAX_QP13_MASK 0x000003E0L
+#define DSCC1_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET13_MASK 0x0000FC00L
+#define DSCC1_DSCC_PPS_CONFIG22__RANGE_MIN_QP14_MASK 0x001F0000L
+#define DSCC1_DSCC_PPS_CONFIG22__RANGE_MAX_QP14_MASK 0x03E00000L
+#define DSCC1_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET14_MASK 0xFC000000L
+//DSCC1_DSCC_MEM_POWER_CONTROL
+#define DSCC1_DSCC_MEM_POWER_CONTROL__DSCC_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0x0
+#define DSCC1_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_FORCE__SHIFT 0x4
+#define DSCC1_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_DIS__SHIFT 0x8
+#define DSCC1_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_STATE__SHIFT 0x10
+#define DSCC1_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_FORCE__SHIFT 0x14
+#define DSCC1_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_DIS__SHIFT 0x18
+#define DSCC1_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_STATE__SHIFT 0x1c
+#define DSCC1_DSCC_MEM_POWER_CONTROL__DSCC_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00000003L
+#define DSCC1_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_FORCE_MASK 0x00000030L
+#define DSCC1_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_DIS_MASK 0x00000100L
+#define DSCC1_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_STATE_MASK 0x00030000L
+#define DSCC1_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_FORCE_MASK 0x00300000L
+#define DSCC1_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_DIS_MASK 0x01000000L
+#define DSCC1_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_STATE_MASK 0x30000000L
+//DSCC1_DSCC_R_Y_SQUARED_ERROR_LOWER
+#define DSCC1_DSCC_R_Y_SQUARED_ERROR_LOWER__DSCC_R_Y_SQUARED_ERROR_LOWER__SHIFT 0x0
+#define DSCC1_DSCC_R_Y_SQUARED_ERROR_LOWER__DSCC_R_Y_SQUARED_ERROR_LOWER_MASK 0xFFFFFFFFL
+//DSCC1_DSCC_R_Y_SQUARED_ERROR_UPPER
+#define DSCC1_DSCC_R_Y_SQUARED_ERROR_UPPER__DSCC_R_Y_SQUARED_ERROR_UPPER__SHIFT 0x0
+#define DSCC1_DSCC_R_Y_SQUARED_ERROR_UPPER__DSCC_R_Y_SQUARED_ERROR_UPPER_MASK 0xFFFFFFFFL
+//DSCC1_DSCC_G_CB_SQUARED_ERROR_LOWER
+#define DSCC1_DSCC_G_CB_SQUARED_ERROR_LOWER__DSCC_G_CB_SQUARED_ERROR_LOWER__SHIFT 0x0
+#define DSCC1_DSCC_G_CB_SQUARED_ERROR_LOWER__DSCC_G_CB_SQUARED_ERROR_LOWER_MASK 0xFFFFFFFFL
+//DSCC1_DSCC_G_CB_SQUARED_ERROR_UPPER
+#define DSCC1_DSCC_G_CB_SQUARED_ERROR_UPPER__DSCC_G_CB_SQUARED_ERROR_UPPER__SHIFT 0x0
+#define DSCC1_DSCC_G_CB_SQUARED_ERROR_UPPER__DSCC_G_CB_SQUARED_ERROR_UPPER_MASK 0xFFFFFFFFL
+//DSCC1_DSCC_B_CR_SQUARED_ERROR_LOWER
+#define DSCC1_DSCC_B_CR_SQUARED_ERROR_LOWER__DSCC_B_CR_SQUARED_ERROR_LOWER__SHIFT 0x0
+#define DSCC1_DSCC_B_CR_SQUARED_ERROR_LOWER__DSCC_B_CR_SQUARED_ERROR_LOWER_MASK 0xFFFFFFFFL
+//DSCC1_DSCC_B_CR_SQUARED_ERROR_UPPER
+#define DSCC1_DSCC_B_CR_SQUARED_ERROR_UPPER__DSCC_B_CR_SQUARED_ERROR_UPPER__SHIFT 0x0
+#define DSCC1_DSCC_B_CR_SQUARED_ERROR_UPPER__DSCC_B_CR_SQUARED_ERROR_UPPER_MASK 0xFFFFFFFFL
+//DSCC1_DSCC_MAX_ABS_ERROR0
+#define DSCC1_DSCC_MAX_ABS_ERROR0__DSCC_R_Y_MAX_ABS_ERROR__SHIFT 0x0
+#define DSCC1_DSCC_MAX_ABS_ERROR0__DSCC_G_CB_MAX_ABS_ERROR__SHIFT 0x10
+#define DSCC1_DSCC_MAX_ABS_ERROR0__DSCC_R_Y_MAX_ABS_ERROR_MASK 0x0000FFFFL
+#define DSCC1_DSCC_MAX_ABS_ERROR0__DSCC_G_CB_MAX_ABS_ERROR_MASK 0xFFFF0000L
+//DSCC1_DSCC_MAX_ABS_ERROR1
+#define DSCC1_DSCC_MAX_ABS_ERROR1__DSCC_B_CR_MAX_ABS_ERROR__SHIFT 0x0
+#define DSCC1_DSCC_MAX_ABS_ERROR1__DSCC_B_CR_MAX_ABS_ERROR_MASK 0x0000FFFFL
+//DSCC1_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL
+#define DSCC1_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC1_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC1_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL
+#define DSCC1_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC1_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC1_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL
+#define DSCC1_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC1_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC1_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL
+#define DSCC1_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC1_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC1_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL
+#define DSCC1_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC1_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC1_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL
+#define DSCC1_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC1_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC1_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL
+#define DSCC1_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC1_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC1_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL
+#define DSCC1_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC1_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC1_DSCC_TEST_DEBUG_BUS_ROTATE
+#define DSCC1_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS0_ROTATE__SHIFT 0x0
+#define DSCC1_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS1_ROTATE__SHIFT 0x8
+#define DSCC1_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS2_ROTATE__SHIFT 0x10
+#define DSCC1_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS3_ROTATE__SHIFT 0x18
+#define DSCC1_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS0_ROTATE_MASK 0x0000001FL
+#define DSCC1_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS1_ROTATE_MASK 0x00001F00L
+#define DSCC1_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS2_ROTATE_MASK 0x001F0000L
+#define DSCC1_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS3_ROTATE_MASK 0x1F000000L
+
+
+// addressBlock: dce_dc_dsc1_dispdec_dsc_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON20_PERFCOUNTER_CNTL
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON20_PERFCOUNTER_CNTL2
+#define DC_PERFMON20_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON20_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON20_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON20_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON20_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON20_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON20_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON20_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON20_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON20_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON20_PERFCOUNTER_STATE
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON20_PERFMON_CNTL
+#define DC_PERFMON20_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON20_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON20_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON20_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON20_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON20_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON20_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON20_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON20_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON20_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON20_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON20_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON20_PERFMON_CNTL2
+#define DC_PERFMON20_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON20_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON20_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON20_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON20_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON20_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON20_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON20_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON20_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON20_PERFMON_CVALUE_LOW
+#define DC_PERFMON20_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON20_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON20_PERFMON_HI
+#define DC_PERFMON20_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON20_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON20_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON20_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON20_PERFMON_LOW
+#define DC_PERFMON20_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON20_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dsc2_dispdec_dsc_top_dispdec
+//DSC_TOP2_DSC_TOP_CONTROL
+#define DSC_TOP2_DSC_TOP_CONTROL__DSC_CLOCK_EN__SHIFT 0x0
+#define DSC_TOP2_DSC_TOP_CONTROL__DSC_DISPCLK_R_GATE_DIS__SHIFT 0x4
+#define DSC_TOP2_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS__SHIFT 0x8
+#define DSC_TOP2_DSC_TOP_CONTROL__DSC_CLOCK_EN_MASK 0x00000001L
+#define DSC_TOP2_DSC_TOP_CONTROL__DSC_DISPCLK_R_GATE_DIS_MASK 0x00000010L
+#define DSC_TOP2_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS_MASK 0x00000100L
+//DSC_TOP2_DSC_DEBUG_CONTROL
+#define DSC_TOP2_DSC_DEBUG_CONTROL__DSC_DBG_EN__SHIFT 0x0
+#define DSC_TOP2_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL__SHIFT 0x4
+#define DSC_TOP2_DSC_DEBUG_CONTROL__DSC_DBG_EN_MASK 0x00000001L
+#define DSC_TOP2_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL_MASK 0x00000070L
+
+
+// addressBlock: dce_dc_dsc2_dispdec_dsccif_dispdec
+//DSCCIF2_DSCCIF_CONFIG0
+#define DSCCIF2_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_RECOVERY_EN__SHIFT 0x0
+#define DSCCIF2_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x4
+#define DSCCIF2_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_STATUS__SHIFT 0x8
+#define DSCCIF2_DSCCIF_CONFIG0__INPUT_PIXEL_FORMAT__SHIFT 0xc
+#define DSCCIF2_DSCCIF_CONFIG0__BITS_PER_COMPONENT__SHIFT 0x10
+#define DSCCIF2_DSCCIF_CONFIG0__DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x18
+#define DSCCIF2_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_RECOVERY_EN_MASK 0x00000001L
+#define DSCCIF2_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00000010L
+#define DSCCIF2_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_STATUS_MASK 0x00000100L
+#define DSCCIF2_DSCCIF_CONFIG0__INPUT_PIXEL_FORMAT_MASK 0x00007000L
+#define DSCCIF2_DSCCIF_CONFIG0__BITS_PER_COMPONENT_MASK 0x000F0000L
+#define DSCCIF2_DSCCIF_CONFIG0__DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x01000000L
+//DSCCIF2_DSCCIF_CONFIG1
+#define DSCCIF2_DSCCIF_CONFIG1__PIC_WIDTH__SHIFT 0x0
+#define DSCCIF2_DSCCIF_CONFIG1__PIC_HEIGHT__SHIFT 0x10
+#define DSCCIF2_DSCCIF_CONFIG1__PIC_WIDTH_MASK 0x0000FFFFL
+#define DSCCIF2_DSCCIF_CONFIG1__PIC_HEIGHT_MASK 0xFFFF0000L
+
+
+// addressBlock: dce_dc_dsc2_dispdec_dscc_dispdec
+//DSCC2_DSCC_CONFIG0
+#define DSCC2_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE__SHIFT 0x0
+#define DSCC2_DSCC_CONFIG0__NUMBER_OF_SLICES_PER_LINE__SHIFT 0x4
+#define DSCC2_DSCC_CONFIG0__ALTERNATE_ICH_ENCODING_EN__SHIFT 0x8
+#define DSCC2_DSCC_CONFIG0__NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION__SHIFT 0x10
+#define DSCC2_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE_MASK 0x0000000FL
+#define DSCC2_DSCC_CONFIG0__NUMBER_OF_SLICES_PER_LINE_MASK 0x00000030L
+#define DSCC2_DSCC_CONFIG0__ALTERNATE_ICH_ENCODING_EN_MASK 0x00000100L
+#define DSCC2_DSCC_CONFIG0__NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION_MASK 0xFFFF0000L
+//DSCC2_DSCC_CONFIG1
+#define DSCC2_DSCC_CONFIG1__DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE__SHIFT 0x0
+#define DSCC2_DSCC_CONFIG1__DSCC_DISABLE_ICH__SHIFT 0x18
+#define DSCC2_DSCC_CONFIG1__DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE_MASK 0x0003FFFFL
+#define DSCC2_DSCC_CONFIG1__DSCC_DISABLE_ICH_MASK 0x01000000L
+//DSCC2_DSCC_STATUS
+#define DSCC2_DSCC_STATUS__DSCC_DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x0
+#define DSCC2_DSCC_STATUS__DSCC_DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x00000001L
+//DSCC2_DSCC_INTERRUPT_CONTROL_STATUS
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED__SHIFT 0x0
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED__SHIFT 0x1
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED__SHIFT 0x2
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED__SHIFT 0x3
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED__SHIFT 0x4
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED__SHIFT 0x5
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED__SHIFT 0x6
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED__SHIFT 0x7
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED__SHIFT 0x8
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED__SHIFT 0x9
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED__SHIFT 0xa
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED__SHIFT 0xb
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x10
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x11
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x12
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x13
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x14
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x15
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x16
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x17
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x18
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x19
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x1a
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x1b
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_MASK 0x00000001L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_MASK 0x00000002L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_MASK 0x00000004L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_MASK 0x00000008L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_MASK 0x00000010L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_MASK 0x00000020L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_MASK 0x00000040L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_MASK 0x00000080L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_MASK 0x00000100L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_MASK 0x00000200L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_MASK 0x00000400L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_MASK 0x00000800L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_INT_EN_MASK 0x00010000L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_INT_EN_MASK 0x00020000L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_INT_EN_MASK 0x00040000L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_INT_EN_MASK 0x00080000L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00100000L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00200000L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00400000L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00800000L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_INT_EN_MASK 0x01000000L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_INT_EN_MASK 0x02000000L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_INT_EN_MASK 0x04000000L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_INT_EN_MASK 0x08000000L
+//DSCC2_DSCC_PPS_CONFIG0
+#define DSCC2_DSCC_PPS_CONFIG0__DSC_VERSION_MINOR__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG0__DSC_VERSION_MAJOR__SHIFT 0x4
+#define DSCC2_DSCC_PPS_CONFIG0__PPS_IDENTIFIER__SHIFT 0x8
+#define DSCC2_DSCC_PPS_CONFIG0__LINEBUF_DEPTH__SHIFT 0x18
+#define DSCC2_DSCC_PPS_CONFIG0__BITS_PER_COMPONENT__SHIFT 0x1c
+#define DSCC2_DSCC_PPS_CONFIG0__DSC_VERSION_MINOR_MASK 0x0000000FL
+#define DSCC2_DSCC_PPS_CONFIG0__DSC_VERSION_MAJOR_MASK 0x000000F0L
+#define DSCC2_DSCC_PPS_CONFIG0__PPS_IDENTIFIER_MASK 0x0000FF00L
+#define DSCC2_DSCC_PPS_CONFIG0__LINEBUF_DEPTH_MASK 0x0F000000L
+#define DSCC2_DSCC_PPS_CONFIG0__BITS_PER_COMPONENT_MASK 0xF0000000L
+//DSCC2_DSCC_PPS_CONFIG1
+#define DSCC2_DSCC_PPS_CONFIG1__BITS_PER_PIXEL__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG1__VBR_ENABLE__SHIFT 0xa
+#define DSCC2_DSCC_PPS_CONFIG1__SIMPLE_422__SHIFT 0xb
+#define DSCC2_DSCC_PPS_CONFIG1__CONVERT_RGB__SHIFT 0xc
+#define DSCC2_DSCC_PPS_CONFIG1__BLOCK_PRED_ENABLE__SHIFT 0xd
+#define DSCC2_DSCC_PPS_CONFIG1__NATIVE_422__SHIFT 0xe
+#define DSCC2_DSCC_PPS_CONFIG1__NATIVE_420__SHIFT 0xf
+#define DSCC2_DSCC_PPS_CONFIG1__CHUNK_SIZE__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG1__BITS_PER_PIXEL_MASK 0x000003FFL
+#define DSCC2_DSCC_PPS_CONFIG1__VBR_ENABLE_MASK 0x00000400L
+#define DSCC2_DSCC_PPS_CONFIG1__SIMPLE_422_MASK 0x00000800L
+#define DSCC2_DSCC_PPS_CONFIG1__CONVERT_RGB_MASK 0x00001000L
+#define DSCC2_DSCC_PPS_CONFIG1__BLOCK_PRED_ENABLE_MASK 0x00002000L
+#define DSCC2_DSCC_PPS_CONFIG1__NATIVE_422_MASK 0x00004000L
+#define DSCC2_DSCC_PPS_CONFIG1__NATIVE_420_MASK 0x00008000L
+#define DSCC2_DSCC_PPS_CONFIG1__CHUNK_SIZE_MASK 0xFFFF0000L
+//DSCC2_DSCC_PPS_CONFIG2
+#define DSCC2_DSCC_PPS_CONFIG2__PIC_WIDTH__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG2__PIC_HEIGHT__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG2__PIC_WIDTH_MASK 0x0000FFFFL
+#define DSCC2_DSCC_PPS_CONFIG2__PIC_HEIGHT_MASK 0xFFFF0000L
+//DSCC2_DSCC_PPS_CONFIG3
+#define DSCC2_DSCC_PPS_CONFIG3__SLICE_WIDTH__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG3__SLICE_HEIGHT__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG3__SLICE_WIDTH_MASK 0x0000FFFFL
+#define DSCC2_DSCC_PPS_CONFIG3__SLICE_HEIGHT_MASK 0xFFFF0000L
+//DSCC2_DSCC_PPS_CONFIG4
+#define DSCC2_DSCC_PPS_CONFIG4__INITIAL_XMIT_DELAY__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG4__INITIAL_DEC_DELAY__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG4__INITIAL_XMIT_DELAY_MASK 0x000003FFL
+#define DSCC2_DSCC_PPS_CONFIG4__INITIAL_DEC_DELAY_MASK 0xFFFF0000L
+//DSCC2_DSCC_PPS_CONFIG5
+#define DSCC2_DSCC_PPS_CONFIG5__INITIAL_SCALE_VALUE__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG5__SCALE_INCREMENT_INTERVAL__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG5__INITIAL_SCALE_VALUE_MASK 0x0000003FL
+#define DSCC2_DSCC_PPS_CONFIG5__SCALE_INCREMENT_INTERVAL_MASK 0xFFFF0000L
+//DSCC2_DSCC_PPS_CONFIG6
+#define DSCC2_DSCC_PPS_CONFIG6__SCALE_DECREMENT_INTERVAL__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG6__FIRST_LINE_BPG_OFFSET__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG6__SECOND_LINE_BPG_OFFSET__SHIFT 0x18
+#define DSCC2_DSCC_PPS_CONFIG6__SCALE_DECREMENT_INTERVAL_MASK 0x00000FFFL
+#define DSCC2_DSCC_PPS_CONFIG6__FIRST_LINE_BPG_OFFSET_MASK 0x001F0000L
+#define DSCC2_DSCC_PPS_CONFIG6__SECOND_LINE_BPG_OFFSET_MASK 0x1F000000L
+//DSCC2_DSCC_PPS_CONFIG7
+#define DSCC2_DSCC_PPS_CONFIG7__NFL_BPG_OFFSET__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG7__SLICE_BPG_OFFSET__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG7__NFL_BPG_OFFSET_MASK 0x0000FFFFL
+#define DSCC2_DSCC_PPS_CONFIG7__SLICE_BPG_OFFSET_MASK 0xFFFF0000L
+//DSCC2_DSCC_PPS_CONFIG8
+#define DSCC2_DSCC_PPS_CONFIG8__NSL_BPG_OFFSET__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG8__SECOND_LINE_OFFSET_ADJ__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG8__NSL_BPG_OFFSET_MASK 0x0000FFFFL
+#define DSCC2_DSCC_PPS_CONFIG8__SECOND_LINE_OFFSET_ADJ_MASK 0xFFFF0000L
+//DSCC2_DSCC_PPS_CONFIG9
+#define DSCC2_DSCC_PPS_CONFIG9__INITIAL_OFFSET__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG9__FINAL_OFFSET__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG9__INITIAL_OFFSET_MASK 0x0000FFFFL
+#define DSCC2_DSCC_PPS_CONFIG9__FINAL_OFFSET_MASK 0xFFFF0000L
+//DSCC2_DSCC_PPS_CONFIG10
+#define DSCC2_DSCC_PPS_CONFIG10__FLATNESS_MIN_QP__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG10__FLATNESS_MAX_QP__SHIFT 0x8
+#define DSCC2_DSCC_PPS_CONFIG10__RC_MODEL_SIZE__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG10__FLATNESS_MIN_QP_MASK 0x0000001FL
+#define DSCC2_DSCC_PPS_CONFIG10__FLATNESS_MAX_QP_MASK 0x00001F00L
+#define DSCC2_DSCC_PPS_CONFIG10__RC_MODEL_SIZE_MASK 0xFFFF0000L
+//DSCC2_DSCC_PPS_CONFIG11
+#define DSCC2_DSCC_PPS_CONFIG11__RC_EDGE_FACTOR__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT0__SHIFT 0x8
+#define DSCC2_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT1__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_LO__SHIFT 0x18
+#define DSCC2_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_HI__SHIFT 0x1c
+#define DSCC2_DSCC_PPS_CONFIG11__RC_EDGE_FACTOR_MASK 0x0000000FL
+#define DSCC2_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT0_MASK 0x00001F00L
+#define DSCC2_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT1_MASK 0x001F0000L
+#define DSCC2_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_LO_MASK 0x0F000000L
+#define DSCC2_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_HI_MASK 0xF0000000L
+//DSCC2_DSCC_PPS_CONFIG12
+#define DSCC2_DSCC_PPS_CONFIG12__RC_BUF_THRESH0__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG12__RC_BUF_THRESH1__SHIFT 0x8
+#define DSCC2_DSCC_PPS_CONFIG12__RC_BUF_THRESH2__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG12__RC_BUF_THRESH3__SHIFT 0x18
+#define DSCC2_DSCC_PPS_CONFIG12__RC_BUF_THRESH0_MASK 0x000000FFL
+#define DSCC2_DSCC_PPS_CONFIG12__RC_BUF_THRESH1_MASK 0x0000FF00L
+#define DSCC2_DSCC_PPS_CONFIG12__RC_BUF_THRESH2_MASK 0x00FF0000L
+#define DSCC2_DSCC_PPS_CONFIG12__RC_BUF_THRESH3_MASK 0xFF000000L
+//DSCC2_DSCC_PPS_CONFIG13
+#define DSCC2_DSCC_PPS_CONFIG13__RC_BUF_THRESH4__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG13__RC_BUF_THRESH5__SHIFT 0x8
+#define DSCC2_DSCC_PPS_CONFIG13__RC_BUF_THRESH6__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG13__RC_BUF_THRESH7__SHIFT 0x18
+#define DSCC2_DSCC_PPS_CONFIG13__RC_BUF_THRESH4_MASK 0x000000FFL
+#define DSCC2_DSCC_PPS_CONFIG13__RC_BUF_THRESH5_MASK 0x0000FF00L
+#define DSCC2_DSCC_PPS_CONFIG13__RC_BUF_THRESH6_MASK 0x00FF0000L
+#define DSCC2_DSCC_PPS_CONFIG13__RC_BUF_THRESH7_MASK 0xFF000000L
+//DSCC2_DSCC_PPS_CONFIG14
+#define DSCC2_DSCC_PPS_CONFIG14__RC_BUF_THRESH8__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG14__RC_BUF_THRESH9__SHIFT 0x8
+#define DSCC2_DSCC_PPS_CONFIG14__RC_BUF_THRESH10__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG14__RC_BUF_THRESH11__SHIFT 0x18
+#define DSCC2_DSCC_PPS_CONFIG14__RC_BUF_THRESH8_MASK 0x000000FFL
+#define DSCC2_DSCC_PPS_CONFIG14__RC_BUF_THRESH9_MASK 0x0000FF00L
+#define DSCC2_DSCC_PPS_CONFIG14__RC_BUF_THRESH10_MASK 0x00FF0000L
+#define DSCC2_DSCC_PPS_CONFIG14__RC_BUF_THRESH11_MASK 0xFF000000L
+//DSCC2_DSCC_PPS_CONFIG15
+#define DSCC2_DSCC_PPS_CONFIG15__RC_BUF_THRESH12__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG15__RC_BUF_THRESH13__SHIFT 0x8
+#define DSCC2_DSCC_PPS_CONFIG15__RANGE_MIN_QP0__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG15__RANGE_MAX_QP0__SHIFT 0x15
+#define DSCC2_DSCC_PPS_CONFIG15__RANGE_BPG_OFFSET0__SHIFT 0x1a
+#define DSCC2_DSCC_PPS_CONFIG15__RC_BUF_THRESH12_MASK 0x000000FFL
+#define DSCC2_DSCC_PPS_CONFIG15__RC_BUF_THRESH13_MASK 0x0000FF00L
+#define DSCC2_DSCC_PPS_CONFIG15__RANGE_MIN_QP0_MASK 0x001F0000L
+#define DSCC2_DSCC_PPS_CONFIG15__RANGE_MAX_QP0_MASK 0x03E00000L
+#define DSCC2_DSCC_PPS_CONFIG15__RANGE_BPG_OFFSET0_MASK 0xFC000000L
+//DSCC2_DSCC_PPS_CONFIG16
+#define DSCC2_DSCC_PPS_CONFIG16__RANGE_MIN_QP1__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG16__RANGE_MAX_QP1__SHIFT 0x5
+#define DSCC2_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET1__SHIFT 0xa
+#define DSCC2_DSCC_PPS_CONFIG16__RANGE_MIN_QP2__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG16__RANGE_MAX_QP2__SHIFT 0x15
+#define DSCC2_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET2__SHIFT 0x1a
+#define DSCC2_DSCC_PPS_CONFIG16__RANGE_MIN_QP1_MASK 0x0000001FL
+#define DSCC2_DSCC_PPS_CONFIG16__RANGE_MAX_QP1_MASK 0x000003E0L
+#define DSCC2_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET1_MASK 0x0000FC00L
+#define DSCC2_DSCC_PPS_CONFIG16__RANGE_MIN_QP2_MASK 0x001F0000L
+#define DSCC2_DSCC_PPS_CONFIG16__RANGE_MAX_QP2_MASK 0x03E00000L
+#define DSCC2_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET2_MASK 0xFC000000L
+//DSCC2_DSCC_PPS_CONFIG17
+#define DSCC2_DSCC_PPS_CONFIG17__RANGE_MIN_QP3__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG17__RANGE_MAX_QP3__SHIFT 0x5
+#define DSCC2_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET3__SHIFT 0xa
+#define DSCC2_DSCC_PPS_CONFIG17__RANGE_MIN_QP4__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG17__RANGE_MAX_QP4__SHIFT 0x15
+#define DSCC2_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET4__SHIFT 0x1a
+#define DSCC2_DSCC_PPS_CONFIG17__RANGE_MIN_QP3_MASK 0x0000001FL
+#define DSCC2_DSCC_PPS_CONFIG17__RANGE_MAX_QP3_MASK 0x000003E0L
+#define DSCC2_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET3_MASK 0x0000FC00L
+#define DSCC2_DSCC_PPS_CONFIG17__RANGE_MIN_QP4_MASK 0x001F0000L
+#define DSCC2_DSCC_PPS_CONFIG17__RANGE_MAX_QP4_MASK 0x03E00000L
+#define DSCC2_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET4_MASK 0xFC000000L
+//DSCC2_DSCC_PPS_CONFIG18
+#define DSCC2_DSCC_PPS_CONFIG18__RANGE_MIN_QP5__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG18__RANGE_MAX_QP5__SHIFT 0x5
+#define DSCC2_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET5__SHIFT 0xa
+#define DSCC2_DSCC_PPS_CONFIG18__RANGE_MIN_QP6__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG18__RANGE_MAX_QP6__SHIFT 0x15
+#define DSCC2_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET6__SHIFT 0x1a
+#define DSCC2_DSCC_PPS_CONFIG18__RANGE_MIN_QP5_MASK 0x0000001FL
+#define DSCC2_DSCC_PPS_CONFIG18__RANGE_MAX_QP5_MASK 0x000003E0L
+#define DSCC2_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET5_MASK 0x0000FC00L
+#define DSCC2_DSCC_PPS_CONFIG18__RANGE_MIN_QP6_MASK 0x001F0000L
+#define DSCC2_DSCC_PPS_CONFIG18__RANGE_MAX_QP6_MASK 0x03E00000L
+#define DSCC2_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET6_MASK 0xFC000000L
+//DSCC2_DSCC_PPS_CONFIG19
+#define DSCC2_DSCC_PPS_CONFIG19__RANGE_MIN_QP7__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG19__RANGE_MAX_QP7__SHIFT 0x5
+#define DSCC2_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET7__SHIFT 0xa
+#define DSCC2_DSCC_PPS_CONFIG19__RANGE_MIN_QP8__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG19__RANGE_MAX_QP8__SHIFT 0x15
+#define DSCC2_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET8__SHIFT 0x1a
+#define DSCC2_DSCC_PPS_CONFIG19__RANGE_MIN_QP7_MASK 0x0000001FL
+#define DSCC2_DSCC_PPS_CONFIG19__RANGE_MAX_QP7_MASK 0x000003E0L
+#define DSCC2_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET7_MASK 0x0000FC00L
+#define DSCC2_DSCC_PPS_CONFIG19__RANGE_MIN_QP8_MASK 0x001F0000L
+#define DSCC2_DSCC_PPS_CONFIG19__RANGE_MAX_QP8_MASK 0x03E00000L
+#define DSCC2_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET8_MASK 0xFC000000L
+//DSCC2_DSCC_PPS_CONFIG20
+#define DSCC2_DSCC_PPS_CONFIG20__RANGE_MIN_QP9__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG20__RANGE_MAX_QP9__SHIFT 0x5
+#define DSCC2_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET9__SHIFT 0xa
+#define DSCC2_DSCC_PPS_CONFIG20__RANGE_MIN_QP10__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG20__RANGE_MAX_QP10__SHIFT 0x15
+#define DSCC2_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET10__SHIFT 0x1a
+#define DSCC2_DSCC_PPS_CONFIG20__RANGE_MIN_QP9_MASK 0x0000001FL
+#define DSCC2_DSCC_PPS_CONFIG20__RANGE_MAX_QP9_MASK 0x000003E0L
+#define DSCC2_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET9_MASK 0x0000FC00L
+#define DSCC2_DSCC_PPS_CONFIG20__RANGE_MIN_QP10_MASK 0x001F0000L
+#define DSCC2_DSCC_PPS_CONFIG20__RANGE_MAX_QP10_MASK 0x03E00000L
+#define DSCC2_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET10_MASK 0xFC000000L
+//DSCC2_DSCC_PPS_CONFIG21
+#define DSCC2_DSCC_PPS_CONFIG21__RANGE_MIN_QP11__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG21__RANGE_MAX_QP11__SHIFT 0x5
+#define DSCC2_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET11__SHIFT 0xa
+#define DSCC2_DSCC_PPS_CONFIG21__RANGE_MIN_QP12__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG21__RANGE_MAX_QP12__SHIFT 0x15
+#define DSCC2_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET12__SHIFT 0x1a
+#define DSCC2_DSCC_PPS_CONFIG21__RANGE_MIN_QP11_MASK 0x0000001FL
+#define DSCC2_DSCC_PPS_CONFIG21__RANGE_MAX_QP11_MASK 0x000003E0L
+#define DSCC2_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET11_MASK 0x0000FC00L
+#define DSCC2_DSCC_PPS_CONFIG21__RANGE_MIN_QP12_MASK 0x001F0000L
+#define DSCC2_DSCC_PPS_CONFIG21__RANGE_MAX_QP12_MASK 0x03E00000L
+#define DSCC2_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET12_MASK 0xFC000000L
+//DSCC2_DSCC_PPS_CONFIG22
+#define DSCC2_DSCC_PPS_CONFIG22__RANGE_MIN_QP13__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG22__RANGE_MAX_QP13__SHIFT 0x5
+#define DSCC2_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET13__SHIFT 0xa
+#define DSCC2_DSCC_PPS_CONFIG22__RANGE_MIN_QP14__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG22__RANGE_MAX_QP14__SHIFT 0x15
+#define DSCC2_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET14__SHIFT 0x1a
+#define DSCC2_DSCC_PPS_CONFIG22__RANGE_MIN_QP13_MASK 0x0000001FL
+#define DSCC2_DSCC_PPS_CONFIG22__RANGE_MAX_QP13_MASK 0x000003E0L
+#define DSCC2_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET13_MASK 0x0000FC00L
+#define DSCC2_DSCC_PPS_CONFIG22__RANGE_MIN_QP14_MASK 0x001F0000L
+#define DSCC2_DSCC_PPS_CONFIG22__RANGE_MAX_QP14_MASK 0x03E00000L
+#define DSCC2_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET14_MASK 0xFC000000L
+//DSCC2_DSCC_MEM_POWER_CONTROL
+#define DSCC2_DSCC_MEM_POWER_CONTROL__DSCC_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0x0
+#define DSCC2_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_FORCE__SHIFT 0x4
+#define DSCC2_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_DIS__SHIFT 0x8
+#define DSCC2_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_STATE__SHIFT 0x10
+#define DSCC2_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_FORCE__SHIFT 0x14
+#define DSCC2_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_DIS__SHIFT 0x18
+#define DSCC2_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_STATE__SHIFT 0x1c
+#define DSCC2_DSCC_MEM_POWER_CONTROL__DSCC_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00000003L
+#define DSCC2_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_FORCE_MASK 0x00000030L
+#define DSCC2_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_DIS_MASK 0x00000100L
+#define DSCC2_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_STATE_MASK 0x00030000L
+#define DSCC2_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_FORCE_MASK 0x00300000L
+#define DSCC2_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_DIS_MASK 0x01000000L
+#define DSCC2_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_STATE_MASK 0x30000000L
+//DSCC2_DSCC_R_Y_SQUARED_ERROR_LOWER
+#define DSCC2_DSCC_R_Y_SQUARED_ERROR_LOWER__DSCC_R_Y_SQUARED_ERROR_LOWER__SHIFT 0x0
+#define DSCC2_DSCC_R_Y_SQUARED_ERROR_LOWER__DSCC_R_Y_SQUARED_ERROR_LOWER_MASK 0xFFFFFFFFL
+//DSCC2_DSCC_R_Y_SQUARED_ERROR_UPPER
+#define DSCC2_DSCC_R_Y_SQUARED_ERROR_UPPER__DSCC_R_Y_SQUARED_ERROR_UPPER__SHIFT 0x0
+#define DSCC2_DSCC_R_Y_SQUARED_ERROR_UPPER__DSCC_R_Y_SQUARED_ERROR_UPPER_MASK 0xFFFFFFFFL
+//DSCC2_DSCC_G_CB_SQUARED_ERROR_LOWER
+#define DSCC2_DSCC_G_CB_SQUARED_ERROR_LOWER__DSCC_G_CB_SQUARED_ERROR_LOWER__SHIFT 0x0
+#define DSCC2_DSCC_G_CB_SQUARED_ERROR_LOWER__DSCC_G_CB_SQUARED_ERROR_LOWER_MASK 0xFFFFFFFFL
+//DSCC2_DSCC_G_CB_SQUARED_ERROR_UPPER
+#define DSCC2_DSCC_G_CB_SQUARED_ERROR_UPPER__DSCC_G_CB_SQUARED_ERROR_UPPER__SHIFT 0x0
+#define DSCC2_DSCC_G_CB_SQUARED_ERROR_UPPER__DSCC_G_CB_SQUARED_ERROR_UPPER_MASK 0xFFFFFFFFL
+//DSCC2_DSCC_B_CR_SQUARED_ERROR_LOWER
+#define DSCC2_DSCC_B_CR_SQUARED_ERROR_LOWER__DSCC_B_CR_SQUARED_ERROR_LOWER__SHIFT 0x0
+#define DSCC2_DSCC_B_CR_SQUARED_ERROR_LOWER__DSCC_B_CR_SQUARED_ERROR_LOWER_MASK 0xFFFFFFFFL
+//DSCC2_DSCC_B_CR_SQUARED_ERROR_UPPER
+#define DSCC2_DSCC_B_CR_SQUARED_ERROR_UPPER__DSCC_B_CR_SQUARED_ERROR_UPPER__SHIFT 0x0
+#define DSCC2_DSCC_B_CR_SQUARED_ERROR_UPPER__DSCC_B_CR_SQUARED_ERROR_UPPER_MASK 0xFFFFFFFFL
+//DSCC2_DSCC_MAX_ABS_ERROR0
+#define DSCC2_DSCC_MAX_ABS_ERROR0__DSCC_R_Y_MAX_ABS_ERROR__SHIFT 0x0
+#define DSCC2_DSCC_MAX_ABS_ERROR0__DSCC_G_CB_MAX_ABS_ERROR__SHIFT 0x10
+#define DSCC2_DSCC_MAX_ABS_ERROR0__DSCC_R_Y_MAX_ABS_ERROR_MASK 0x0000FFFFL
+#define DSCC2_DSCC_MAX_ABS_ERROR0__DSCC_G_CB_MAX_ABS_ERROR_MASK 0xFFFF0000L
+//DSCC2_DSCC_MAX_ABS_ERROR1
+#define DSCC2_DSCC_MAX_ABS_ERROR1__DSCC_B_CR_MAX_ABS_ERROR__SHIFT 0x0
+#define DSCC2_DSCC_MAX_ABS_ERROR1__DSCC_B_CR_MAX_ABS_ERROR_MASK 0x0000FFFFL
+//DSCC2_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL
+#define DSCC2_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC2_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC2_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL
+#define DSCC2_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC2_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC2_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL
+#define DSCC2_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC2_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC2_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL
+#define DSCC2_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC2_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC2_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL
+#define DSCC2_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC2_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC2_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL
+#define DSCC2_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC2_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC2_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL
+#define DSCC2_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC2_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC2_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL
+#define DSCC2_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC2_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC2_DSCC_TEST_DEBUG_BUS_ROTATE
+#define DSCC2_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS0_ROTATE__SHIFT 0x0
+#define DSCC2_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS1_ROTATE__SHIFT 0x8
+#define DSCC2_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS2_ROTATE__SHIFT 0x10
+#define DSCC2_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS3_ROTATE__SHIFT 0x18
+#define DSCC2_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS0_ROTATE_MASK 0x0000001FL
+#define DSCC2_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS1_ROTATE_MASK 0x00001F00L
+#define DSCC2_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS2_ROTATE_MASK 0x001F0000L
+#define DSCC2_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS3_ROTATE_MASK 0x1F000000L
+
+
+// addressBlock: dce_dc_dsc2_dispdec_dsc_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON21_PERFCOUNTER_CNTL
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON21_PERFCOUNTER_CNTL2
+#define DC_PERFMON21_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON21_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON21_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON21_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON21_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON21_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON21_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON21_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON21_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON21_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON21_PERFCOUNTER_STATE
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON21_PERFMON_CNTL
+#define DC_PERFMON21_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON21_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON21_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON21_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON21_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON21_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON21_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON21_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON21_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON21_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON21_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON21_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON21_PERFMON_CNTL2
+#define DC_PERFMON21_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON21_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON21_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON21_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON21_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON21_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON21_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON21_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON21_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON21_PERFMON_CVALUE_LOW
+#define DC_PERFMON21_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON21_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON21_PERFMON_HI
+#define DC_PERFMON21_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON21_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON21_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON21_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON21_PERFMON_LOW
+#define DC_PERFMON21_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON21_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dsc3_dispdec_dsc_top_dispdec
+//DSC_TOP3_DSC_TOP_CONTROL
+#define DSC_TOP3_DSC_TOP_CONTROL__DSC_CLOCK_EN__SHIFT 0x0
+#define DSC_TOP3_DSC_TOP_CONTROL__DSC_DISPCLK_R_GATE_DIS__SHIFT 0x4
+#define DSC_TOP3_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS__SHIFT 0x8
+#define DSC_TOP3_DSC_TOP_CONTROL__DSC_CLOCK_EN_MASK 0x00000001L
+#define DSC_TOP3_DSC_TOP_CONTROL__DSC_DISPCLK_R_GATE_DIS_MASK 0x00000010L
+#define DSC_TOP3_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS_MASK 0x00000100L
+//DSC_TOP3_DSC_DEBUG_CONTROL
+#define DSC_TOP3_DSC_DEBUG_CONTROL__DSC_DBG_EN__SHIFT 0x0
+#define DSC_TOP3_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL__SHIFT 0x4
+#define DSC_TOP3_DSC_DEBUG_CONTROL__DSC_DBG_EN_MASK 0x00000001L
+#define DSC_TOP3_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL_MASK 0x00000070L
+
+
+// addressBlock: dce_dc_dsc3_dispdec_dsccif_dispdec
+//DSCCIF3_DSCCIF_CONFIG0
+#define DSCCIF3_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_RECOVERY_EN__SHIFT 0x0
+#define DSCCIF3_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x4
+#define DSCCIF3_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_STATUS__SHIFT 0x8
+#define DSCCIF3_DSCCIF_CONFIG0__INPUT_PIXEL_FORMAT__SHIFT 0xc
+#define DSCCIF3_DSCCIF_CONFIG0__BITS_PER_COMPONENT__SHIFT 0x10
+#define DSCCIF3_DSCCIF_CONFIG0__DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x18
+#define DSCCIF3_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_RECOVERY_EN_MASK 0x00000001L
+#define DSCCIF3_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00000010L
+#define DSCCIF3_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_STATUS_MASK 0x00000100L
+#define DSCCIF3_DSCCIF_CONFIG0__INPUT_PIXEL_FORMAT_MASK 0x00007000L
+#define DSCCIF3_DSCCIF_CONFIG0__BITS_PER_COMPONENT_MASK 0x000F0000L
+#define DSCCIF3_DSCCIF_CONFIG0__DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x01000000L
+//DSCCIF3_DSCCIF_CONFIG1
+#define DSCCIF3_DSCCIF_CONFIG1__PIC_WIDTH__SHIFT 0x0
+#define DSCCIF3_DSCCIF_CONFIG1__PIC_HEIGHT__SHIFT 0x10
+#define DSCCIF3_DSCCIF_CONFIG1__PIC_WIDTH_MASK 0x0000FFFFL
+#define DSCCIF3_DSCCIF_CONFIG1__PIC_HEIGHT_MASK 0xFFFF0000L
+
+
+// addressBlock: dce_dc_dsc3_dispdec_dscc_dispdec
+//DSCC3_DSCC_CONFIG0
+#define DSCC3_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE__SHIFT 0x0
+#define DSCC3_DSCC_CONFIG0__NUMBER_OF_SLICES_PER_LINE__SHIFT 0x4
+#define DSCC3_DSCC_CONFIG0__ALTERNATE_ICH_ENCODING_EN__SHIFT 0x8
+#define DSCC3_DSCC_CONFIG0__NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION__SHIFT 0x10
+#define DSCC3_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE_MASK 0x0000000FL
+#define DSCC3_DSCC_CONFIG0__NUMBER_OF_SLICES_PER_LINE_MASK 0x00000030L
+#define DSCC3_DSCC_CONFIG0__ALTERNATE_ICH_ENCODING_EN_MASK 0x00000100L
+#define DSCC3_DSCC_CONFIG0__NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION_MASK 0xFFFF0000L
+//DSCC3_DSCC_CONFIG1
+#define DSCC3_DSCC_CONFIG1__DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE__SHIFT 0x0
+#define DSCC3_DSCC_CONFIG1__DSCC_DISABLE_ICH__SHIFT 0x18
+#define DSCC3_DSCC_CONFIG1__DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE_MASK 0x0003FFFFL
+#define DSCC3_DSCC_CONFIG1__DSCC_DISABLE_ICH_MASK 0x01000000L
+//DSCC3_DSCC_STATUS
+#define DSCC3_DSCC_STATUS__DSCC_DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x0
+#define DSCC3_DSCC_STATUS__DSCC_DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x00000001L
+//DSCC3_DSCC_INTERRUPT_CONTROL_STATUS
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED__SHIFT 0x0
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED__SHIFT 0x1
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED__SHIFT 0x2
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED__SHIFT 0x3
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED__SHIFT 0x4
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED__SHIFT 0x5
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED__SHIFT 0x6
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED__SHIFT 0x7
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED__SHIFT 0x8
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED__SHIFT 0x9
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED__SHIFT 0xa
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED__SHIFT 0xb
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x10
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x11
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x12
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x13
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x14
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x15
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x16
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x17
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x18
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x19
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x1a
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x1b
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_MASK 0x00000001L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_MASK 0x00000002L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_MASK 0x00000004L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_MASK 0x00000008L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_MASK 0x00000010L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_MASK 0x00000020L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_MASK 0x00000040L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_MASK 0x00000080L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_MASK 0x00000100L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_MASK 0x00000200L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_MASK 0x00000400L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_MASK 0x00000800L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_INT_EN_MASK 0x00010000L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_INT_EN_MASK 0x00020000L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_INT_EN_MASK 0x00040000L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_INT_EN_MASK 0x00080000L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00100000L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00200000L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00400000L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00800000L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_INT_EN_MASK 0x01000000L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_INT_EN_MASK 0x02000000L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_INT_EN_MASK 0x04000000L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_INT_EN_MASK 0x08000000L
+//DSCC3_DSCC_PPS_CONFIG0
+#define DSCC3_DSCC_PPS_CONFIG0__DSC_VERSION_MINOR__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG0__DSC_VERSION_MAJOR__SHIFT 0x4
+#define DSCC3_DSCC_PPS_CONFIG0__PPS_IDENTIFIER__SHIFT 0x8
+#define DSCC3_DSCC_PPS_CONFIG0__LINEBUF_DEPTH__SHIFT 0x18
+#define DSCC3_DSCC_PPS_CONFIG0__BITS_PER_COMPONENT__SHIFT 0x1c
+#define DSCC3_DSCC_PPS_CONFIG0__DSC_VERSION_MINOR_MASK 0x0000000FL
+#define DSCC3_DSCC_PPS_CONFIG0__DSC_VERSION_MAJOR_MASK 0x000000F0L
+#define DSCC3_DSCC_PPS_CONFIG0__PPS_IDENTIFIER_MASK 0x0000FF00L
+#define DSCC3_DSCC_PPS_CONFIG0__LINEBUF_DEPTH_MASK 0x0F000000L
+#define DSCC3_DSCC_PPS_CONFIG0__BITS_PER_COMPONENT_MASK 0xF0000000L
+//DSCC3_DSCC_PPS_CONFIG1
+#define DSCC3_DSCC_PPS_CONFIG1__BITS_PER_PIXEL__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG1__VBR_ENABLE__SHIFT 0xa
+#define DSCC3_DSCC_PPS_CONFIG1__SIMPLE_422__SHIFT 0xb
+#define DSCC3_DSCC_PPS_CONFIG1__CONVERT_RGB__SHIFT 0xc
+#define DSCC3_DSCC_PPS_CONFIG1__BLOCK_PRED_ENABLE__SHIFT 0xd
+#define DSCC3_DSCC_PPS_CONFIG1__NATIVE_422__SHIFT 0xe
+#define DSCC3_DSCC_PPS_CONFIG1__NATIVE_420__SHIFT 0xf
+#define DSCC3_DSCC_PPS_CONFIG1__CHUNK_SIZE__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG1__BITS_PER_PIXEL_MASK 0x000003FFL
+#define DSCC3_DSCC_PPS_CONFIG1__VBR_ENABLE_MASK 0x00000400L
+#define DSCC3_DSCC_PPS_CONFIG1__SIMPLE_422_MASK 0x00000800L
+#define DSCC3_DSCC_PPS_CONFIG1__CONVERT_RGB_MASK 0x00001000L
+#define DSCC3_DSCC_PPS_CONFIG1__BLOCK_PRED_ENABLE_MASK 0x00002000L
+#define DSCC3_DSCC_PPS_CONFIG1__NATIVE_422_MASK 0x00004000L
+#define DSCC3_DSCC_PPS_CONFIG1__NATIVE_420_MASK 0x00008000L
+#define DSCC3_DSCC_PPS_CONFIG1__CHUNK_SIZE_MASK 0xFFFF0000L
+//DSCC3_DSCC_PPS_CONFIG2
+#define DSCC3_DSCC_PPS_CONFIG2__PIC_WIDTH__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG2__PIC_HEIGHT__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG2__PIC_WIDTH_MASK 0x0000FFFFL
+#define DSCC3_DSCC_PPS_CONFIG2__PIC_HEIGHT_MASK 0xFFFF0000L
+//DSCC3_DSCC_PPS_CONFIG3
+#define DSCC3_DSCC_PPS_CONFIG3__SLICE_WIDTH__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG3__SLICE_HEIGHT__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG3__SLICE_WIDTH_MASK 0x0000FFFFL
+#define DSCC3_DSCC_PPS_CONFIG3__SLICE_HEIGHT_MASK 0xFFFF0000L
+//DSCC3_DSCC_PPS_CONFIG4
+#define DSCC3_DSCC_PPS_CONFIG4__INITIAL_XMIT_DELAY__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG4__INITIAL_DEC_DELAY__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG4__INITIAL_XMIT_DELAY_MASK 0x000003FFL
+#define DSCC3_DSCC_PPS_CONFIG4__INITIAL_DEC_DELAY_MASK 0xFFFF0000L
+//DSCC3_DSCC_PPS_CONFIG5
+#define DSCC3_DSCC_PPS_CONFIG5__INITIAL_SCALE_VALUE__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG5__SCALE_INCREMENT_INTERVAL__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG5__INITIAL_SCALE_VALUE_MASK 0x0000003FL
+#define DSCC3_DSCC_PPS_CONFIG5__SCALE_INCREMENT_INTERVAL_MASK 0xFFFF0000L
+//DSCC3_DSCC_PPS_CONFIG6
+#define DSCC3_DSCC_PPS_CONFIG6__SCALE_DECREMENT_INTERVAL__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG6__FIRST_LINE_BPG_OFFSET__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG6__SECOND_LINE_BPG_OFFSET__SHIFT 0x18
+#define DSCC3_DSCC_PPS_CONFIG6__SCALE_DECREMENT_INTERVAL_MASK 0x00000FFFL
+#define DSCC3_DSCC_PPS_CONFIG6__FIRST_LINE_BPG_OFFSET_MASK 0x001F0000L
+#define DSCC3_DSCC_PPS_CONFIG6__SECOND_LINE_BPG_OFFSET_MASK 0x1F000000L
+//DSCC3_DSCC_PPS_CONFIG7
+#define DSCC3_DSCC_PPS_CONFIG7__NFL_BPG_OFFSET__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG7__SLICE_BPG_OFFSET__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG7__NFL_BPG_OFFSET_MASK 0x0000FFFFL
+#define DSCC3_DSCC_PPS_CONFIG7__SLICE_BPG_OFFSET_MASK 0xFFFF0000L
+//DSCC3_DSCC_PPS_CONFIG8
+#define DSCC3_DSCC_PPS_CONFIG8__NSL_BPG_OFFSET__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG8__SECOND_LINE_OFFSET_ADJ__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG8__NSL_BPG_OFFSET_MASK 0x0000FFFFL
+#define DSCC3_DSCC_PPS_CONFIG8__SECOND_LINE_OFFSET_ADJ_MASK 0xFFFF0000L
+//DSCC3_DSCC_PPS_CONFIG9
+#define DSCC3_DSCC_PPS_CONFIG9__INITIAL_OFFSET__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG9__FINAL_OFFSET__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG9__INITIAL_OFFSET_MASK 0x0000FFFFL
+#define DSCC3_DSCC_PPS_CONFIG9__FINAL_OFFSET_MASK 0xFFFF0000L
+//DSCC3_DSCC_PPS_CONFIG10
+#define DSCC3_DSCC_PPS_CONFIG10__FLATNESS_MIN_QP__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG10__FLATNESS_MAX_QP__SHIFT 0x8
+#define DSCC3_DSCC_PPS_CONFIG10__RC_MODEL_SIZE__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG10__FLATNESS_MIN_QP_MASK 0x0000001FL
+#define DSCC3_DSCC_PPS_CONFIG10__FLATNESS_MAX_QP_MASK 0x00001F00L
+#define DSCC3_DSCC_PPS_CONFIG10__RC_MODEL_SIZE_MASK 0xFFFF0000L
+//DSCC3_DSCC_PPS_CONFIG11
+#define DSCC3_DSCC_PPS_CONFIG11__RC_EDGE_FACTOR__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT0__SHIFT 0x8
+#define DSCC3_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT1__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_LO__SHIFT 0x18
+#define DSCC3_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_HI__SHIFT 0x1c
+#define DSCC3_DSCC_PPS_CONFIG11__RC_EDGE_FACTOR_MASK 0x0000000FL
+#define DSCC3_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT0_MASK 0x00001F00L
+#define DSCC3_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT1_MASK 0x001F0000L
+#define DSCC3_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_LO_MASK 0x0F000000L
+#define DSCC3_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_HI_MASK 0xF0000000L
+//DSCC3_DSCC_PPS_CONFIG12
+#define DSCC3_DSCC_PPS_CONFIG12__RC_BUF_THRESH0__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG12__RC_BUF_THRESH1__SHIFT 0x8
+#define DSCC3_DSCC_PPS_CONFIG12__RC_BUF_THRESH2__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG12__RC_BUF_THRESH3__SHIFT 0x18
+#define DSCC3_DSCC_PPS_CONFIG12__RC_BUF_THRESH0_MASK 0x000000FFL
+#define DSCC3_DSCC_PPS_CONFIG12__RC_BUF_THRESH1_MASK 0x0000FF00L
+#define DSCC3_DSCC_PPS_CONFIG12__RC_BUF_THRESH2_MASK 0x00FF0000L
+#define DSCC3_DSCC_PPS_CONFIG12__RC_BUF_THRESH3_MASK 0xFF000000L
+//DSCC3_DSCC_PPS_CONFIG13
+#define DSCC3_DSCC_PPS_CONFIG13__RC_BUF_THRESH4__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG13__RC_BUF_THRESH5__SHIFT 0x8
+#define DSCC3_DSCC_PPS_CONFIG13__RC_BUF_THRESH6__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG13__RC_BUF_THRESH7__SHIFT 0x18
+#define DSCC3_DSCC_PPS_CONFIG13__RC_BUF_THRESH4_MASK 0x000000FFL
+#define DSCC3_DSCC_PPS_CONFIG13__RC_BUF_THRESH5_MASK 0x0000FF00L
+#define DSCC3_DSCC_PPS_CONFIG13__RC_BUF_THRESH6_MASK 0x00FF0000L
+#define DSCC3_DSCC_PPS_CONFIG13__RC_BUF_THRESH7_MASK 0xFF000000L
+//DSCC3_DSCC_PPS_CONFIG14
+#define DSCC3_DSCC_PPS_CONFIG14__RC_BUF_THRESH8__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG14__RC_BUF_THRESH9__SHIFT 0x8
+#define DSCC3_DSCC_PPS_CONFIG14__RC_BUF_THRESH10__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG14__RC_BUF_THRESH11__SHIFT 0x18
+#define DSCC3_DSCC_PPS_CONFIG14__RC_BUF_THRESH8_MASK 0x000000FFL
+#define DSCC3_DSCC_PPS_CONFIG14__RC_BUF_THRESH9_MASK 0x0000FF00L
+#define DSCC3_DSCC_PPS_CONFIG14__RC_BUF_THRESH10_MASK 0x00FF0000L
+#define DSCC3_DSCC_PPS_CONFIG14__RC_BUF_THRESH11_MASK 0xFF000000L
+//DSCC3_DSCC_PPS_CONFIG15
+#define DSCC3_DSCC_PPS_CONFIG15__RC_BUF_THRESH12__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG15__RC_BUF_THRESH13__SHIFT 0x8
+#define DSCC3_DSCC_PPS_CONFIG15__RANGE_MIN_QP0__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG15__RANGE_MAX_QP0__SHIFT 0x15
+#define DSCC3_DSCC_PPS_CONFIG15__RANGE_BPG_OFFSET0__SHIFT 0x1a
+#define DSCC3_DSCC_PPS_CONFIG15__RC_BUF_THRESH12_MASK 0x000000FFL
+#define DSCC3_DSCC_PPS_CONFIG15__RC_BUF_THRESH13_MASK 0x0000FF00L
+#define DSCC3_DSCC_PPS_CONFIG15__RANGE_MIN_QP0_MASK 0x001F0000L
+#define DSCC3_DSCC_PPS_CONFIG15__RANGE_MAX_QP0_MASK 0x03E00000L
+#define DSCC3_DSCC_PPS_CONFIG15__RANGE_BPG_OFFSET0_MASK 0xFC000000L
+//DSCC3_DSCC_PPS_CONFIG16
+#define DSCC3_DSCC_PPS_CONFIG16__RANGE_MIN_QP1__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG16__RANGE_MAX_QP1__SHIFT 0x5
+#define DSCC3_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET1__SHIFT 0xa
+#define DSCC3_DSCC_PPS_CONFIG16__RANGE_MIN_QP2__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG16__RANGE_MAX_QP2__SHIFT 0x15
+#define DSCC3_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET2__SHIFT 0x1a
+#define DSCC3_DSCC_PPS_CONFIG16__RANGE_MIN_QP1_MASK 0x0000001FL
+#define DSCC3_DSCC_PPS_CONFIG16__RANGE_MAX_QP1_MASK 0x000003E0L
+#define DSCC3_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET1_MASK 0x0000FC00L
+#define DSCC3_DSCC_PPS_CONFIG16__RANGE_MIN_QP2_MASK 0x001F0000L
+#define DSCC3_DSCC_PPS_CONFIG16__RANGE_MAX_QP2_MASK 0x03E00000L
+#define DSCC3_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET2_MASK 0xFC000000L
+//DSCC3_DSCC_PPS_CONFIG17
+#define DSCC3_DSCC_PPS_CONFIG17__RANGE_MIN_QP3__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG17__RANGE_MAX_QP3__SHIFT 0x5
+#define DSCC3_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET3__SHIFT 0xa
+#define DSCC3_DSCC_PPS_CONFIG17__RANGE_MIN_QP4__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG17__RANGE_MAX_QP4__SHIFT 0x15
+#define DSCC3_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET4__SHIFT 0x1a
+#define DSCC3_DSCC_PPS_CONFIG17__RANGE_MIN_QP3_MASK 0x0000001FL
+#define DSCC3_DSCC_PPS_CONFIG17__RANGE_MAX_QP3_MASK 0x000003E0L
+#define DSCC3_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET3_MASK 0x0000FC00L
+#define DSCC3_DSCC_PPS_CONFIG17__RANGE_MIN_QP4_MASK 0x001F0000L
+#define DSCC3_DSCC_PPS_CONFIG17__RANGE_MAX_QP4_MASK 0x03E00000L
+#define DSCC3_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET4_MASK 0xFC000000L
+//DSCC3_DSCC_PPS_CONFIG18
+#define DSCC3_DSCC_PPS_CONFIG18__RANGE_MIN_QP5__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG18__RANGE_MAX_QP5__SHIFT 0x5
+#define DSCC3_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET5__SHIFT 0xa
+#define DSCC3_DSCC_PPS_CONFIG18__RANGE_MIN_QP6__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG18__RANGE_MAX_QP6__SHIFT 0x15
+#define DSCC3_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET6__SHIFT 0x1a
+#define DSCC3_DSCC_PPS_CONFIG18__RANGE_MIN_QP5_MASK 0x0000001FL
+#define DSCC3_DSCC_PPS_CONFIG18__RANGE_MAX_QP5_MASK 0x000003E0L
+#define DSCC3_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET5_MASK 0x0000FC00L
+#define DSCC3_DSCC_PPS_CONFIG18__RANGE_MIN_QP6_MASK 0x001F0000L
+#define DSCC3_DSCC_PPS_CONFIG18__RANGE_MAX_QP6_MASK 0x03E00000L
+#define DSCC3_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET6_MASK 0xFC000000L
+//DSCC3_DSCC_PPS_CONFIG19
+#define DSCC3_DSCC_PPS_CONFIG19__RANGE_MIN_QP7__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG19__RANGE_MAX_QP7__SHIFT 0x5
+#define DSCC3_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET7__SHIFT 0xa
+#define DSCC3_DSCC_PPS_CONFIG19__RANGE_MIN_QP8__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG19__RANGE_MAX_QP8__SHIFT 0x15
+#define DSCC3_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET8__SHIFT 0x1a
+#define DSCC3_DSCC_PPS_CONFIG19__RANGE_MIN_QP7_MASK 0x0000001FL
+#define DSCC3_DSCC_PPS_CONFIG19__RANGE_MAX_QP7_MASK 0x000003E0L
+#define DSCC3_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET7_MASK 0x0000FC00L
+#define DSCC3_DSCC_PPS_CONFIG19__RANGE_MIN_QP8_MASK 0x001F0000L
+#define DSCC3_DSCC_PPS_CONFIG19__RANGE_MAX_QP8_MASK 0x03E00000L
+#define DSCC3_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET8_MASK 0xFC000000L
+//DSCC3_DSCC_PPS_CONFIG20
+#define DSCC3_DSCC_PPS_CONFIG20__RANGE_MIN_QP9__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG20__RANGE_MAX_QP9__SHIFT 0x5
+#define DSCC3_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET9__SHIFT 0xa
+#define DSCC3_DSCC_PPS_CONFIG20__RANGE_MIN_QP10__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG20__RANGE_MAX_QP10__SHIFT 0x15
+#define DSCC3_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET10__SHIFT 0x1a
+#define DSCC3_DSCC_PPS_CONFIG20__RANGE_MIN_QP9_MASK 0x0000001FL
+#define DSCC3_DSCC_PPS_CONFIG20__RANGE_MAX_QP9_MASK 0x000003E0L
+#define DSCC3_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET9_MASK 0x0000FC00L
+#define DSCC3_DSCC_PPS_CONFIG20__RANGE_MIN_QP10_MASK 0x001F0000L
+#define DSCC3_DSCC_PPS_CONFIG20__RANGE_MAX_QP10_MASK 0x03E00000L
+#define DSCC3_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET10_MASK 0xFC000000L
+//DSCC3_DSCC_PPS_CONFIG21
+#define DSCC3_DSCC_PPS_CONFIG21__RANGE_MIN_QP11__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG21__RANGE_MAX_QP11__SHIFT 0x5
+#define DSCC3_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET11__SHIFT 0xa
+#define DSCC3_DSCC_PPS_CONFIG21__RANGE_MIN_QP12__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG21__RANGE_MAX_QP12__SHIFT 0x15
+#define DSCC3_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET12__SHIFT 0x1a
+#define DSCC3_DSCC_PPS_CONFIG21__RANGE_MIN_QP11_MASK 0x0000001FL
+#define DSCC3_DSCC_PPS_CONFIG21__RANGE_MAX_QP11_MASK 0x000003E0L
+#define DSCC3_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET11_MASK 0x0000FC00L
+#define DSCC3_DSCC_PPS_CONFIG21__RANGE_MIN_QP12_MASK 0x001F0000L
+#define DSCC3_DSCC_PPS_CONFIG21__RANGE_MAX_QP12_MASK 0x03E00000L
+#define DSCC3_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET12_MASK 0xFC000000L
+//DSCC3_DSCC_PPS_CONFIG22
+#define DSCC3_DSCC_PPS_CONFIG22__RANGE_MIN_QP13__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG22__RANGE_MAX_QP13__SHIFT 0x5
+#define DSCC3_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET13__SHIFT 0xa
+#define DSCC3_DSCC_PPS_CONFIG22__RANGE_MIN_QP14__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG22__RANGE_MAX_QP14__SHIFT 0x15
+#define DSCC3_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET14__SHIFT 0x1a
+#define DSCC3_DSCC_PPS_CONFIG22__RANGE_MIN_QP13_MASK 0x0000001FL
+#define DSCC3_DSCC_PPS_CONFIG22__RANGE_MAX_QP13_MASK 0x000003E0L
+#define DSCC3_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET13_MASK 0x0000FC00L
+#define DSCC3_DSCC_PPS_CONFIG22__RANGE_MIN_QP14_MASK 0x001F0000L
+#define DSCC3_DSCC_PPS_CONFIG22__RANGE_MAX_QP14_MASK 0x03E00000L
+#define DSCC3_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET14_MASK 0xFC000000L
+//DSCC3_DSCC_MEM_POWER_CONTROL
+#define DSCC3_DSCC_MEM_POWER_CONTROL__DSCC_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0x0
+#define DSCC3_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_FORCE__SHIFT 0x4
+#define DSCC3_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_DIS__SHIFT 0x8
+#define DSCC3_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_STATE__SHIFT 0x10
+#define DSCC3_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_FORCE__SHIFT 0x14
+#define DSCC3_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_DIS__SHIFT 0x18
+#define DSCC3_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_STATE__SHIFT 0x1c
+#define DSCC3_DSCC_MEM_POWER_CONTROL__DSCC_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00000003L
+#define DSCC3_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_FORCE_MASK 0x00000030L
+#define DSCC3_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_DIS_MASK 0x00000100L
+#define DSCC3_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_STATE_MASK 0x00030000L
+#define DSCC3_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_FORCE_MASK 0x00300000L
+#define DSCC3_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_DIS_MASK 0x01000000L
+#define DSCC3_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_STATE_MASK 0x30000000L
+//DSCC3_DSCC_R_Y_SQUARED_ERROR_LOWER
+#define DSCC3_DSCC_R_Y_SQUARED_ERROR_LOWER__DSCC_R_Y_SQUARED_ERROR_LOWER__SHIFT 0x0
+#define DSCC3_DSCC_R_Y_SQUARED_ERROR_LOWER__DSCC_R_Y_SQUARED_ERROR_LOWER_MASK 0xFFFFFFFFL
+//DSCC3_DSCC_R_Y_SQUARED_ERROR_UPPER
+#define DSCC3_DSCC_R_Y_SQUARED_ERROR_UPPER__DSCC_R_Y_SQUARED_ERROR_UPPER__SHIFT 0x0
+#define DSCC3_DSCC_R_Y_SQUARED_ERROR_UPPER__DSCC_R_Y_SQUARED_ERROR_UPPER_MASK 0xFFFFFFFFL
+//DSCC3_DSCC_G_CB_SQUARED_ERROR_LOWER
+#define DSCC3_DSCC_G_CB_SQUARED_ERROR_LOWER__DSCC_G_CB_SQUARED_ERROR_LOWER__SHIFT 0x0
+#define DSCC3_DSCC_G_CB_SQUARED_ERROR_LOWER__DSCC_G_CB_SQUARED_ERROR_LOWER_MASK 0xFFFFFFFFL
+//DSCC3_DSCC_G_CB_SQUARED_ERROR_UPPER
+#define DSCC3_DSCC_G_CB_SQUARED_ERROR_UPPER__DSCC_G_CB_SQUARED_ERROR_UPPER__SHIFT 0x0
+#define DSCC3_DSCC_G_CB_SQUARED_ERROR_UPPER__DSCC_G_CB_SQUARED_ERROR_UPPER_MASK 0xFFFFFFFFL
+//DSCC3_DSCC_B_CR_SQUARED_ERROR_LOWER
+#define DSCC3_DSCC_B_CR_SQUARED_ERROR_LOWER__DSCC_B_CR_SQUARED_ERROR_LOWER__SHIFT 0x0
+#define DSCC3_DSCC_B_CR_SQUARED_ERROR_LOWER__DSCC_B_CR_SQUARED_ERROR_LOWER_MASK 0xFFFFFFFFL
+//DSCC3_DSCC_B_CR_SQUARED_ERROR_UPPER
+#define DSCC3_DSCC_B_CR_SQUARED_ERROR_UPPER__DSCC_B_CR_SQUARED_ERROR_UPPER__SHIFT 0x0
+#define DSCC3_DSCC_B_CR_SQUARED_ERROR_UPPER__DSCC_B_CR_SQUARED_ERROR_UPPER_MASK 0xFFFFFFFFL
+//DSCC3_DSCC_MAX_ABS_ERROR0
+#define DSCC3_DSCC_MAX_ABS_ERROR0__DSCC_R_Y_MAX_ABS_ERROR__SHIFT 0x0
+#define DSCC3_DSCC_MAX_ABS_ERROR0__DSCC_G_CB_MAX_ABS_ERROR__SHIFT 0x10
+#define DSCC3_DSCC_MAX_ABS_ERROR0__DSCC_R_Y_MAX_ABS_ERROR_MASK 0x0000FFFFL
+#define DSCC3_DSCC_MAX_ABS_ERROR0__DSCC_G_CB_MAX_ABS_ERROR_MASK 0xFFFF0000L
+//DSCC3_DSCC_MAX_ABS_ERROR1
+#define DSCC3_DSCC_MAX_ABS_ERROR1__DSCC_B_CR_MAX_ABS_ERROR__SHIFT 0x0
+#define DSCC3_DSCC_MAX_ABS_ERROR1__DSCC_B_CR_MAX_ABS_ERROR_MASK 0x0000FFFFL
+//DSCC3_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL
+#define DSCC3_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC3_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC3_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL
+#define DSCC3_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC3_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC3_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL
+#define DSCC3_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC3_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC3_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL
+#define DSCC3_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC3_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC3_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL
+#define DSCC3_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC3_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC3_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL
+#define DSCC3_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC3_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC3_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL
+#define DSCC3_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC3_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC3_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL
+#define DSCC3_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC3_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC3_DSCC_TEST_DEBUG_BUS_ROTATE
+#define DSCC3_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS0_ROTATE__SHIFT 0x0
+#define DSCC3_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS1_ROTATE__SHIFT 0x8
+#define DSCC3_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS2_ROTATE__SHIFT 0x10
+#define DSCC3_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS3_ROTATE__SHIFT 0x18
+#define DSCC3_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS0_ROTATE_MASK 0x0000001FL
+#define DSCC3_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS1_ROTATE_MASK 0x00001F00L
+#define DSCC3_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS2_ROTATE_MASK 0x001F0000L
+#define DSCC3_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS3_ROTATE_MASK 0x1F000000L
+
+
+// addressBlock: dce_dc_dsc3_dispdec_dsc_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON22_PERFCOUNTER_CNTL
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON22_PERFCOUNTER_CNTL2
+#define DC_PERFMON22_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON22_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON22_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON22_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON22_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON22_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON22_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON22_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON22_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON22_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON22_PERFCOUNTER_STATE
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON22_PERFMON_CNTL
+#define DC_PERFMON22_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON22_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON22_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON22_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON22_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON22_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON22_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON22_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON22_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON22_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON22_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON22_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON22_PERFMON_CNTL2
+#define DC_PERFMON22_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON22_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON22_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON22_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON22_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON22_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON22_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON22_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON22_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON22_PERFMON_CVALUE_LOW
+#define DC_PERFMON22_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON22_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON22_PERFMON_HI
+#define DC_PERFMON22_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON22_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON22_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON22_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON22_PERFMON_LOW
+#define DC_PERFMON22_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON22_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dsc4_dispdec_dsc_top_dispdec
+//DSC_TOP4_DSC_TOP_CONTROL
+#define DSC_TOP4_DSC_TOP_CONTROL__DSC_CLOCK_EN__SHIFT 0x0
+#define DSC_TOP4_DSC_TOP_CONTROL__DSC_DISPCLK_R_GATE_DIS__SHIFT 0x4
+#define DSC_TOP4_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS__SHIFT 0x8
+#define DSC_TOP4_DSC_TOP_CONTROL__DSC_CLOCK_EN_MASK 0x00000001L
+#define DSC_TOP4_DSC_TOP_CONTROL__DSC_DISPCLK_R_GATE_DIS_MASK 0x00000010L
+#define DSC_TOP4_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS_MASK 0x00000100L
+//DSC_TOP4_DSC_DEBUG_CONTROL
+#define DSC_TOP4_DSC_DEBUG_CONTROL__DSC_DBG_EN__SHIFT 0x0
+#define DSC_TOP4_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL__SHIFT 0x4
+#define DSC_TOP4_DSC_DEBUG_CONTROL__DSC_DBG_EN_MASK 0x00000001L
+#define DSC_TOP4_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL_MASK 0x00000070L
+
+
+// addressBlock: dce_dc_dsc4_dispdec_dsccif_dispdec
+//DSCCIF4_DSCCIF_CONFIG0
+#define DSCCIF4_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_RECOVERY_EN__SHIFT 0x0
+#define DSCCIF4_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x4
+#define DSCCIF4_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_STATUS__SHIFT 0x8
+#define DSCCIF4_DSCCIF_CONFIG0__INPUT_PIXEL_FORMAT__SHIFT 0xc
+#define DSCCIF4_DSCCIF_CONFIG0__BITS_PER_COMPONENT__SHIFT 0x10
+#define DSCCIF4_DSCCIF_CONFIG0__DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x18
+#define DSCCIF4_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_RECOVERY_EN_MASK 0x00000001L
+#define DSCCIF4_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00000010L
+#define DSCCIF4_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_STATUS_MASK 0x00000100L
+#define DSCCIF4_DSCCIF_CONFIG0__INPUT_PIXEL_FORMAT_MASK 0x00007000L
+#define DSCCIF4_DSCCIF_CONFIG0__BITS_PER_COMPONENT_MASK 0x000F0000L
+#define DSCCIF4_DSCCIF_CONFIG0__DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x01000000L
+//DSCCIF4_DSCCIF_CONFIG1
+#define DSCCIF4_DSCCIF_CONFIG1__PIC_WIDTH__SHIFT 0x0
+#define DSCCIF4_DSCCIF_CONFIG1__PIC_HEIGHT__SHIFT 0x10
+#define DSCCIF4_DSCCIF_CONFIG1__PIC_WIDTH_MASK 0x0000FFFFL
+#define DSCCIF4_DSCCIF_CONFIG1__PIC_HEIGHT_MASK 0xFFFF0000L
+
+
+// addressBlock: dce_dc_dsc4_dispdec_dscc_dispdec
+//DSCC4_DSCC_CONFIG0
+#define DSCC4_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE__SHIFT 0x0
+#define DSCC4_DSCC_CONFIG0__NUMBER_OF_SLICES_PER_LINE__SHIFT 0x4
+#define DSCC4_DSCC_CONFIG0__ALTERNATE_ICH_ENCODING_EN__SHIFT 0x8
+#define DSCC4_DSCC_CONFIG0__NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION__SHIFT 0x10
+#define DSCC4_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE_MASK 0x0000000FL
+#define DSCC4_DSCC_CONFIG0__NUMBER_OF_SLICES_PER_LINE_MASK 0x00000030L
+#define DSCC4_DSCC_CONFIG0__ALTERNATE_ICH_ENCODING_EN_MASK 0x00000100L
+#define DSCC4_DSCC_CONFIG0__NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION_MASK 0xFFFF0000L
+//DSCC4_DSCC_CONFIG1
+#define DSCC4_DSCC_CONFIG1__DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE__SHIFT 0x0
+#define DSCC4_DSCC_CONFIG1__DSCC_DISABLE_ICH__SHIFT 0x18
+#define DSCC4_DSCC_CONFIG1__DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE_MASK 0x0003FFFFL
+#define DSCC4_DSCC_CONFIG1__DSCC_DISABLE_ICH_MASK 0x01000000L
+//DSCC4_DSCC_STATUS
+#define DSCC4_DSCC_STATUS__DSCC_DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x0
+#define DSCC4_DSCC_STATUS__DSCC_DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x00000001L
+//DSCC4_DSCC_INTERRUPT_CONTROL_STATUS
+#define DSCC4_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED__SHIFT 0x0
+#define DSCC4_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED__SHIFT 0x1
+#define DSCC4_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED__SHIFT 0x2
+#define DSCC4_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED__SHIFT 0x3
+#define DSCC4_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED__SHIFT 0x4
+#define DSCC4_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED__SHIFT 0x5
+#define DSCC4_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED__SHIFT 0x6
+#define DSCC4_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED__SHIFT 0x7
+#define DSCC4_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED__SHIFT 0x8
+#define DSCC4_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED__SHIFT 0x9
+#define DSCC4_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED__SHIFT 0xa
+#define DSCC4_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED__SHIFT 0xb
+#define DSCC4_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x10
+#define DSCC4_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x11
+#define DSCC4_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x12
+#define DSCC4_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x13
+#define DSCC4_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x14
+#define DSCC4_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x15
+#define DSCC4_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x16
+#define DSCC4_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x17
+#define DSCC4_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x18
+#define DSCC4_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x19
+#define DSCC4_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x1a
+#define DSCC4_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x1b
+#define DSCC4_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_MASK 0x00000001L
+#define DSCC4_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_MASK 0x00000002L
+#define DSCC4_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_MASK 0x00000004L
+#define DSCC4_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_MASK 0x00000008L
+#define DSCC4_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_MASK 0x00000010L
+#define DSCC4_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_MASK 0x00000020L
+#define DSCC4_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_MASK 0x00000040L
+#define DSCC4_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_MASK 0x00000080L
+#define DSCC4_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_MASK 0x00000100L
+#define DSCC4_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_MASK 0x00000200L
+#define DSCC4_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_MASK 0x00000400L
+#define DSCC4_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_MASK 0x00000800L
+#define DSCC4_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_INT_EN_MASK 0x00010000L
+#define DSCC4_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_INT_EN_MASK 0x00020000L
+#define DSCC4_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_INT_EN_MASK 0x00040000L
+#define DSCC4_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_INT_EN_MASK 0x00080000L
+#define DSCC4_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00100000L
+#define DSCC4_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00200000L
+#define DSCC4_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00400000L
+#define DSCC4_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00800000L
+#define DSCC4_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_INT_EN_MASK 0x01000000L
+#define DSCC4_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_INT_EN_MASK 0x02000000L
+#define DSCC4_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_INT_EN_MASK 0x04000000L
+#define DSCC4_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_INT_EN_MASK 0x08000000L
+//DSCC4_DSCC_PPS_CONFIG0
+#define DSCC4_DSCC_PPS_CONFIG0__DSC_VERSION_MINOR__SHIFT 0x0
+#define DSCC4_DSCC_PPS_CONFIG0__DSC_VERSION_MAJOR__SHIFT 0x4
+#define DSCC4_DSCC_PPS_CONFIG0__PPS_IDENTIFIER__SHIFT 0x8
+#define DSCC4_DSCC_PPS_CONFIG0__LINEBUF_DEPTH__SHIFT 0x18
+#define DSCC4_DSCC_PPS_CONFIG0__BITS_PER_COMPONENT__SHIFT 0x1c
+#define DSCC4_DSCC_PPS_CONFIG0__DSC_VERSION_MINOR_MASK 0x0000000FL
+#define DSCC4_DSCC_PPS_CONFIG0__DSC_VERSION_MAJOR_MASK 0x000000F0L
+#define DSCC4_DSCC_PPS_CONFIG0__PPS_IDENTIFIER_MASK 0x0000FF00L
+#define DSCC4_DSCC_PPS_CONFIG0__LINEBUF_DEPTH_MASK 0x0F000000L
+#define DSCC4_DSCC_PPS_CONFIG0__BITS_PER_COMPONENT_MASK 0xF0000000L
+//DSCC4_DSCC_PPS_CONFIG1
+#define DSCC4_DSCC_PPS_CONFIG1__BITS_PER_PIXEL__SHIFT 0x0
+#define DSCC4_DSCC_PPS_CONFIG1__VBR_ENABLE__SHIFT 0xa
+#define DSCC4_DSCC_PPS_CONFIG1__SIMPLE_422__SHIFT 0xb
+#define DSCC4_DSCC_PPS_CONFIG1__CONVERT_RGB__SHIFT 0xc
+#define DSCC4_DSCC_PPS_CONFIG1__BLOCK_PRED_ENABLE__SHIFT 0xd
+#define DSCC4_DSCC_PPS_CONFIG1__NATIVE_422__SHIFT 0xe
+#define DSCC4_DSCC_PPS_CONFIG1__NATIVE_420__SHIFT 0xf
+#define DSCC4_DSCC_PPS_CONFIG1__CHUNK_SIZE__SHIFT 0x10
+#define DSCC4_DSCC_PPS_CONFIG1__BITS_PER_PIXEL_MASK 0x000003FFL
+#define DSCC4_DSCC_PPS_CONFIG1__VBR_ENABLE_MASK 0x00000400L
+#define DSCC4_DSCC_PPS_CONFIG1__SIMPLE_422_MASK 0x00000800L
+#define DSCC4_DSCC_PPS_CONFIG1__CONVERT_RGB_MASK 0x00001000L
+#define DSCC4_DSCC_PPS_CONFIG1__BLOCK_PRED_ENABLE_MASK 0x00002000L
+#define DSCC4_DSCC_PPS_CONFIG1__NATIVE_422_MASK 0x00004000L
+#define DSCC4_DSCC_PPS_CONFIG1__NATIVE_420_MASK 0x00008000L
+#define DSCC4_DSCC_PPS_CONFIG1__CHUNK_SIZE_MASK 0xFFFF0000L
+//DSCC4_DSCC_PPS_CONFIG2
+#define DSCC4_DSCC_PPS_CONFIG2__PIC_WIDTH__SHIFT 0x0
+#define DSCC4_DSCC_PPS_CONFIG2__PIC_HEIGHT__SHIFT 0x10
+#define DSCC4_DSCC_PPS_CONFIG2__PIC_WIDTH_MASK 0x0000FFFFL
+#define DSCC4_DSCC_PPS_CONFIG2__PIC_HEIGHT_MASK 0xFFFF0000L
+//DSCC4_DSCC_PPS_CONFIG3
+#define DSCC4_DSCC_PPS_CONFIG3__SLICE_WIDTH__SHIFT 0x0
+#define DSCC4_DSCC_PPS_CONFIG3__SLICE_HEIGHT__SHIFT 0x10
+#define DSCC4_DSCC_PPS_CONFIG3__SLICE_WIDTH_MASK 0x0000FFFFL
+#define DSCC4_DSCC_PPS_CONFIG3__SLICE_HEIGHT_MASK 0xFFFF0000L
+//DSCC4_DSCC_PPS_CONFIG4
+#define DSCC4_DSCC_PPS_CONFIG4__INITIAL_XMIT_DELAY__SHIFT 0x0
+#define DSCC4_DSCC_PPS_CONFIG4__INITIAL_DEC_DELAY__SHIFT 0x10
+#define DSCC4_DSCC_PPS_CONFIG4__INITIAL_XMIT_DELAY_MASK 0x000003FFL
+#define DSCC4_DSCC_PPS_CONFIG4__INITIAL_DEC_DELAY_MASK 0xFFFF0000L
+//DSCC4_DSCC_PPS_CONFIG5
+#define DSCC4_DSCC_PPS_CONFIG5__INITIAL_SCALE_VALUE__SHIFT 0x0
+#define DSCC4_DSCC_PPS_CONFIG5__SCALE_INCREMENT_INTERVAL__SHIFT 0x10
+#define DSCC4_DSCC_PPS_CONFIG5__INITIAL_SCALE_VALUE_MASK 0x0000003FL
+#define DSCC4_DSCC_PPS_CONFIG5__SCALE_INCREMENT_INTERVAL_MASK 0xFFFF0000L
+//DSCC4_DSCC_PPS_CONFIG6
+#define DSCC4_DSCC_PPS_CONFIG6__SCALE_DECREMENT_INTERVAL__SHIFT 0x0
+#define DSCC4_DSCC_PPS_CONFIG6__FIRST_LINE_BPG_OFFSET__SHIFT 0x10
+#define DSCC4_DSCC_PPS_CONFIG6__SECOND_LINE_BPG_OFFSET__SHIFT 0x18
+#define DSCC4_DSCC_PPS_CONFIG6__SCALE_DECREMENT_INTERVAL_MASK 0x00000FFFL
+#define DSCC4_DSCC_PPS_CONFIG6__FIRST_LINE_BPG_OFFSET_MASK 0x001F0000L
+#define DSCC4_DSCC_PPS_CONFIG6__SECOND_LINE_BPG_OFFSET_MASK 0x1F000000L
+//DSCC4_DSCC_PPS_CONFIG7
+#define DSCC4_DSCC_PPS_CONFIG7__NFL_BPG_OFFSET__SHIFT 0x0
+#define DSCC4_DSCC_PPS_CONFIG7__SLICE_BPG_OFFSET__SHIFT 0x10
+#define DSCC4_DSCC_PPS_CONFIG7__NFL_BPG_OFFSET_MASK 0x0000FFFFL
+#define DSCC4_DSCC_PPS_CONFIG7__SLICE_BPG_OFFSET_MASK 0xFFFF0000L
+//DSCC4_DSCC_PPS_CONFIG8
+#define DSCC4_DSCC_PPS_CONFIG8__NSL_BPG_OFFSET__SHIFT 0x0
+#define DSCC4_DSCC_PPS_CONFIG8__SECOND_LINE_OFFSET_ADJ__SHIFT 0x10
+#define DSCC4_DSCC_PPS_CONFIG8__NSL_BPG_OFFSET_MASK 0x0000FFFFL
+#define DSCC4_DSCC_PPS_CONFIG8__SECOND_LINE_OFFSET_ADJ_MASK 0xFFFF0000L
+//DSCC4_DSCC_PPS_CONFIG9
+#define DSCC4_DSCC_PPS_CONFIG9__INITIAL_OFFSET__SHIFT 0x0
+#define DSCC4_DSCC_PPS_CONFIG9__FINAL_OFFSET__SHIFT 0x10
+#define DSCC4_DSCC_PPS_CONFIG9__INITIAL_OFFSET_MASK 0x0000FFFFL
+#define DSCC4_DSCC_PPS_CONFIG9__FINAL_OFFSET_MASK 0xFFFF0000L
+//DSCC4_DSCC_PPS_CONFIG10
+#define DSCC4_DSCC_PPS_CONFIG10__FLATNESS_MIN_QP__SHIFT 0x0
+#define DSCC4_DSCC_PPS_CONFIG10__FLATNESS_MAX_QP__SHIFT 0x8
+#define DSCC4_DSCC_PPS_CONFIG10__RC_MODEL_SIZE__SHIFT 0x10
+#define DSCC4_DSCC_PPS_CONFIG10__FLATNESS_MIN_QP_MASK 0x0000001FL
+#define DSCC4_DSCC_PPS_CONFIG10__FLATNESS_MAX_QP_MASK 0x00001F00L
+#define DSCC4_DSCC_PPS_CONFIG10__RC_MODEL_SIZE_MASK 0xFFFF0000L
+//DSCC4_DSCC_PPS_CONFIG11
+#define DSCC4_DSCC_PPS_CONFIG11__RC_EDGE_FACTOR__SHIFT 0x0
+#define DSCC4_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT0__SHIFT 0x8
+#define DSCC4_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT1__SHIFT 0x10
+#define DSCC4_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_LO__SHIFT 0x18
+#define DSCC4_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_HI__SHIFT 0x1c
+#define DSCC4_DSCC_PPS_CONFIG11__RC_EDGE_FACTOR_MASK 0x0000000FL
+#define DSCC4_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT0_MASK 0x00001F00L
+#define DSCC4_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT1_MASK 0x001F0000L
+#define DSCC4_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_LO_MASK 0x0F000000L
+#define DSCC4_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_HI_MASK 0xF0000000L
+//DSCC4_DSCC_PPS_CONFIG12
+#define DSCC4_DSCC_PPS_CONFIG12__RC_BUF_THRESH0__SHIFT 0x0
+#define DSCC4_DSCC_PPS_CONFIG12__RC_BUF_THRESH1__SHIFT 0x8
+#define DSCC4_DSCC_PPS_CONFIG12__RC_BUF_THRESH2__SHIFT 0x10
+#define DSCC4_DSCC_PPS_CONFIG12__RC_BUF_THRESH3__SHIFT 0x18
+#define DSCC4_DSCC_PPS_CONFIG12__RC_BUF_THRESH0_MASK 0x000000FFL
+#define DSCC4_DSCC_PPS_CONFIG12__RC_BUF_THRESH1_MASK 0x0000FF00L
+#define DSCC4_DSCC_PPS_CONFIG12__RC_BUF_THRESH2_MASK 0x00FF0000L
+#define DSCC4_DSCC_PPS_CONFIG12__RC_BUF_THRESH3_MASK 0xFF000000L
+//DSCC4_DSCC_PPS_CONFIG13
+#define DSCC4_DSCC_PPS_CONFIG13__RC_BUF_THRESH4__SHIFT 0x0
+#define DSCC4_DSCC_PPS_CONFIG13__RC_BUF_THRESH5__SHIFT 0x8
+#define DSCC4_DSCC_PPS_CONFIG13__RC_BUF_THRESH6__SHIFT 0x10
+#define DSCC4_DSCC_PPS_CONFIG13__RC_BUF_THRESH7__SHIFT 0x18
+#define DSCC4_DSCC_PPS_CONFIG13__RC_BUF_THRESH4_MASK 0x000000FFL
+#define DSCC4_DSCC_PPS_CONFIG13__RC_BUF_THRESH5_MASK 0x0000FF00L
+#define DSCC4_DSCC_PPS_CONFIG13__RC_BUF_THRESH6_MASK 0x00FF0000L
+#define DSCC4_DSCC_PPS_CONFIG13__RC_BUF_THRESH7_MASK 0xFF000000L
+//DSCC4_DSCC_PPS_CONFIG14
+#define DSCC4_DSCC_PPS_CONFIG14__RC_BUF_THRESH8__SHIFT 0x0
+#define DSCC4_DSCC_PPS_CONFIG14__RC_BUF_THRESH9__SHIFT 0x8
+#define DSCC4_DSCC_PPS_CONFIG14__RC_BUF_THRESH10__SHIFT 0x10
+#define DSCC4_DSCC_PPS_CONFIG14__RC_BUF_THRESH11__SHIFT 0x18
+#define DSCC4_DSCC_PPS_CONFIG14__RC_BUF_THRESH8_MASK 0x000000FFL
+#define DSCC4_DSCC_PPS_CONFIG14__RC_BUF_THRESH9_MASK 0x0000FF00L
+#define DSCC4_DSCC_PPS_CONFIG14__RC_BUF_THRESH10_MASK 0x00FF0000L
+#define DSCC4_DSCC_PPS_CONFIG14__RC_BUF_THRESH11_MASK 0xFF000000L
+//DSCC4_DSCC_PPS_CONFIG15
+#define DSCC4_DSCC_PPS_CONFIG15__RC_BUF_THRESH12__SHIFT 0x0
+#define DSCC4_DSCC_PPS_CONFIG15__RC_BUF_THRESH13__SHIFT 0x8
+#define DSCC4_DSCC_PPS_CONFIG15__RANGE_MIN_QP0__SHIFT 0x10
+#define DSCC4_DSCC_PPS_CONFIG15__RANGE_MAX_QP0__SHIFT 0x15
+#define DSCC4_DSCC_PPS_CONFIG15__RANGE_BPG_OFFSET0__SHIFT 0x1a
+#define DSCC4_DSCC_PPS_CONFIG15__RC_BUF_THRESH12_MASK 0x000000FFL
+#define DSCC4_DSCC_PPS_CONFIG15__RC_BUF_THRESH13_MASK 0x0000FF00L
+#define DSCC4_DSCC_PPS_CONFIG15__RANGE_MIN_QP0_MASK 0x001F0000L
+#define DSCC4_DSCC_PPS_CONFIG15__RANGE_MAX_QP0_MASK 0x03E00000L
+#define DSCC4_DSCC_PPS_CONFIG15__RANGE_BPG_OFFSET0_MASK 0xFC000000L
+//DSCC4_DSCC_PPS_CONFIG16
+#define DSCC4_DSCC_PPS_CONFIG16__RANGE_MIN_QP1__SHIFT 0x0
+#define DSCC4_DSCC_PPS_CONFIG16__RANGE_MAX_QP1__SHIFT 0x5
+#define DSCC4_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET1__SHIFT 0xa
+#define DSCC4_DSCC_PPS_CONFIG16__RANGE_MIN_QP2__SHIFT 0x10
+#define DSCC4_DSCC_PPS_CONFIG16__RANGE_MAX_QP2__SHIFT 0x15
+#define DSCC4_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET2__SHIFT 0x1a
+#define DSCC4_DSCC_PPS_CONFIG16__RANGE_MIN_QP1_MASK 0x0000001FL
+#define DSCC4_DSCC_PPS_CONFIG16__RANGE_MAX_QP1_MASK 0x000003E0L
+#define DSCC4_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET1_MASK 0x0000FC00L
+#define DSCC4_DSCC_PPS_CONFIG16__RANGE_MIN_QP2_MASK 0x001F0000L
+#define DSCC4_DSCC_PPS_CONFIG16__RANGE_MAX_QP2_MASK 0x03E00000L
+#define DSCC4_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET2_MASK 0xFC000000L
+//DSCC4_DSCC_PPS_CONFIG17
+#define DSCC4_DSCC_PPS_CONFIG17__RANGE_MIN_QP3__SHIFT 0x0
+#define DSCC4_DSCC_PPS_CONFIG17__RANGE_MAX_QP3__SHIFT 0x5
+#define DSCC4_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET3__SHIFT 0xa
+#define DSCC4_DSCC_PPS_CONFIG17__RANGE_MIN_QP4__SHIFT 0x10
+#define DSCC4_DSCC_PPS_CONFIG17__RANGE_MAX_QP4__SHIFT 0x15
+#define DSCC4_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET4__SHIFT 0x1a
+#define DSCC4_DSCC_PPS_CONFIG17__RANGE_MIN_QP3_MASK 0x0000001FL
+#define DSCC4_DSCC_PPS_CONFIG17__RANGE_MAX_QP3_MASK 0x000003E0L
+#define DSCC4_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET3_MASK 0x0000FC00L
+#define DSCC4_DSCC_PPS_CONFIG17__RANGE_MIN_QP4_MASK 0x001F0000L
+#define DSCC4_DSCC_PPS_CONFIG17__RANGE_MAX_QP4_MASK 0x03E00000L
+#define DSCC4_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET4_MASK 0xFC000000L
+//DSCC4_DSCC_PPS_CONFIG18
+#define DSCC4_DSCC_PPS_CONFIG18__RANGE_MIN_QP5__SHIFT 0x0
+#define DSCC4_DSCC_PPS_CONFIG18__RANGE_MAX_QP5__SHIFT 0x5
+#define DSCC4_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET5__SHIFT 0xa
+#define DSCC4_DSCC_PPS_CONFIG18__RANGE_MIN_QP6__SHIFT 0x10
+#define DSCC4_DSCC_PPS_CONFIG18__RANGE_MAX_QP6__SHIFT 0x15
+#define DSCC4_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET6__SHIFT 0x1a
+#define DSCC4_DSCC_PPS_CONFIG18__RANGE_MIN_QP5_MASK 0x0000001FL
+#define DSCC4_DSCC_PPS_CONFIG18__RANGE_MAX_QP5_MASK 0x000003E0L
+#define DSCC4_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET5_MASK 0x0000FC00L
+#define DSCC4_DSCC_PPS_CONFIG18__RANGE_MIN_QP6_MASK 0x001F0000L
+#define DSCC4_DSCC_PPS_CONFIG18__RANGE_MAX_QP6_MASK 0x03E00000L
+#define DSCC4_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET6_MASK 0xFC000000L
+//DSCC4_DSCC_PPS_CONFIG19
+#define DSCC4_DSCC_PPS_CONFIG19__RANGE_MIN_QP7__SHIFT 0x0
+#define DSCC4_DSCC_PPS_CONFIG19__RANGE_MAX_QP7__SHIFT 0x5
+#define DSCC4_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET7__SHIFT 0xa
+#define DSCC4_DSCC_PPS_CONFIG19__RANGE_MIN_QP8__SHIFT 0x10
+#define DSCC4_DSCC_PPS_CONFIG19__RANGE_MAX_QP8__SHIFT 0x15
+#define DSCC4_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET8__SHIFT 0x1a
+#define DSCC4_DSCC_PPS_CONFIG19__RANGE_MIN_QP7_MASK 0x0000001FL
+#define DSCC4_DSCC_PPS_CONFIG19__RANGE_MAX_QP7_MASK 0x000003E0L
+#define DSCC4_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET7_MASK 0x0000FC00L
+#define DSCC4_DSCC_PPS_CONFIG19__RANGE_MIN_QP8_MASK 0x001F0000L
+#define DSCC4_DSCC_PPS_CONFIG19__RANGE_MAX_QP8_MASK 0x03E00000L
+#define DSCC4_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET8_MASK 0xFC000000L
+//DSCC4_DSCC_PPS_CONFIG20
+#define DSCC4_DSCC_PPS_CONFIG20__RANGE_MIN_QP9__SHIFT 0x0
+#define DSCC4_DSCC_PPS_CONFIG20__RANGE_MAX_QP9__SHIFT 0x5
+#define DSCC4_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET9__SHIFT 0xa
+#define DSCC4_DSCC_PPS_CONFIG20__RANGE_MIN_QP10__SHIFT 0x10
+#define DSCC4_DSCC_PPS_CONFIG20__RANGE_MAX_QP10__SHIFT 0x15
+#define DSCC4_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET10__SHIFT 0x1a
+#define DSCC4_DSCC_PPS_CONFIG20__RANGE_MIN_QP9_MASK 0x0000001FL
+#define DSCC4_DSCC_PPS_CONFIG20__RANGE_MAX_QP9_MASK 0x000003E0L
+#define DSCC4_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET9_MASK 0x0000FC00L
+#define DSCC4_DSCC_PPS_CONFIG20__RANGE_MIN_QP10_MASK 0x001F0000L
+#define DSCC4_DSCC_PPS_CONFIG20__RANGE_MAX_QP10_MASK 0x03E00000L
+#define DSCC4_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET10_MASK 0xFC000000L
+//DSCC4_DSCC_PPS_CONFIG21
+#define DSCC4_DSCC_PPS_CONFIG21__RANGE_MIN_QP11__SHIFT 0x0
+#define DSCC4_DSCC_PPS_CONFIG21__RANGE_MAX_QP11__SHIFT 0x5
+#define DSCC4_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET11__SHIFT 0xa
+#define DSCC4_DSCC_PPS_CONFIG21__RANGE_MIN_QP12__SHIFT 0x10
+#define DSCC4_DSCC_PPS_CONFIG21__RANGE_MAX_QP12__SHIFT 0x15
+#define DSCC4_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET12__SHIFT 0x1a
+#define DSCC4_DSCC_PPS_CONFIG21__RANGE_MIN_QP11_MASK 0x0000001FL
+#define DSCC4_DSCC_PPS_CONFIG21__RANGE_MAX_QP11_MASK 0x000003E0L
+#define DSCC4_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET11_MASK 0x0000FC00L
+#define DSCC4_DSCC_PPS_CONFIG21__RANGE_MIN_QP12_MASK 0x001F0000L
+#define DSCC4_DSCC_PPS_CONFIG21__RANGE_MAX_QP12_MASK 0x03E00000L
+#define DSCC4_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET12_MASK 0xFC000000L
+//DSCC4_DSCC_PPS_CONFIG22
+#define DSCC4_DSCC_PPS_CONFIG22__RANGE_MIN_QP13__SHIFT 0x0
+#define DSCC4_DSCC_PPS_CONFIG22__RANGE_MAX_QP13__SHIFT 0x5
+#define DSCC4_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET13__SHIFT 0xa
+#define DSCC4_DSCC_PPS_CONFIG22__RANGE_MIN_QP14__SHIFT 0x10
+#define DSCC4_DSCC_PPS_CONFIG22__RANGE_MAX_QP14__SHIFT 0x15
+#define DSCC4_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET14__SHIFT 0x1a
+#define DSCC4_DSCC_PPS_CONFIG22__RANGE_MIN_QP13_MASK 0x0000001FL
+#define DSCC4_DSCC_PPS_CONFIG22__RANGE_MAX_QP13_MASK 0x000003E0L
+#define DSCC4_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET13_MASK 0x0000FC00L
+#define DSCC4_DSCC_PPS_CONFIG22__RANGE_MIN_QP14_MASK 0x001F0000L
+#define DSCC4_DSCC_PPS_CONFIG22__RANGE_MAX_QP14_MASK 0x03E00000L
+#define DSCC4_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET14_MASK 0xFC000000L
+//DSCC4_DSCC_MEM_POWER_CONTROL
+#define DSCC4_DSCC_MEM_POWER_CONTROL__DSCC_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0x0
+#define DSCC4_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_FORCE__SHIFT 0x4
+#define DSCC4_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_DIS__SHIFT 0x8
+#define DSCC4_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_STATE__SHIFT 0x10
+#define DSCC4_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_FORCE__SHIFT 0x14
+#define DSCC4_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_DIS__SHIFT 0x18
+#define DSCC4_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_STATE__SHIFT 0x1c
+#define DSCC4_DSCC_MEM_POWER_CONTROL__DSCC_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00000003L
+#define DSCC4_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_FORCE_MASK 0x00000030L
+#define DSCC4_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_DIS_MASK 0x00000100L
+#define DSCC4_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_STATE_MASK 0x00030000L
+#define DSCC4_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_FORCE_MASK 0x00300000L
+#define DSCC4_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_DIS_MASK 0x01000000L
+#define DSCC4_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_STATE_MASK 0x30000000L
+//DSCC4_DSCC_R_Y_SQUARED_ERROR_LOWER
+#define DSCC4_DSCC_R_Y_SQUARED_ERROR_LOWER__DSCC_R_Y_SQUARED_ERROR_LOWER__SHIFT 0x0
+#define DSCC4_DSCC_R_Y_SQUARED_ERROR_LOWER__DSCC_R_Y_SQUARED_ERROR_LOWER_MASK 0xFFFFFFFFL
+//DSCC4_DSCC_R_Y_SQUARED_ERROR_UPPER
+#define DSCC4_DSCC_R_Y_SQUARED_ERROR_UPPER__DSCC_R_Y_SQUARED_ERROR_UPPER__SHIFT 0x0
+#define DSCC4_DSCC_R_Y_SQUARED_ERROR_UPPER__DSCC_R_Y_SQUARED_ERROR_UPPER_MASK 0xFFFFFFFFL
+//DSCC4_DSCC_G_CB_SQUARED_ERROR_LOWER
+#define DSCC4_DSCC_G_CB_SQUARED_ERROR_LOWER__DSCC_G_CB_SQUARED_ERROR_LOWER__SHIFT 0x0
+#define DSCC4_DSCC_G_CB_SQUARED_ERROR_LOWER__DSCC_G_CB_SQUARED_ERROR_LOWER_MASK 0xFFFFFFFFL
+//DSCC4_DSCC_G_CB_SQUARED_ERROR_UPPER
+#define DSCC4_DSCC_G_CB_SQUARED_ERROR_UPPER__DSCC_G_CB_SQUARED_ERROR_UPPER__SHIFT 0x0
+#define DSCC4_DSCC_G_CB_SQUARED_ERROR_UPPER__DSCC_G_CB_SQUARED_ERROR_UPPER_MASK 0xFFFFFFFFL
+//DSCC4_DSCC_B_CR_SQUARED_ERROR_LOWER
+#define DSCC4_DSCC_B_CR_SQUARED_ERROR_LOWER__DSCC_B_CR_SQUARED_ERROR_LOWER__SHIFT 0x0
+#define DSCC4_DSCC_B_CR_SQUARED_ERROR_LOWER__DSCC_B_CR_SQUARED_ERROR_LOWER_MASK 0xFFFFFFFFL
+//DSCC4_DSCC_B_CR_SQUARED_ERROR_UPPER
+#define DSCC4_DSCC_B_CR_SQUARED_ERROR_UPPER__DSCC_B_CR_SQUARED_ERROR_UPPER__SHIFT 0x0
+#define DSCC4_DSCC_B_CR_SQUARED_ERROR_UPPER__DSCC_B_CR_SQUARED_ERROR_UPPER_MASK 0xFFFFFFFFL
+//DSCC4_DSCC_MAX_ABS_ERROR0
+#define DSCC4_DSCC_MAX_ABS_ERROR0__DSCC_R_Y_MAX_ABS_ERROR__SHIFT 0x0
+#define DSCC4_DSCC_MAX_ABS_ERROR0__DSCC_G_CB_MAX_ABS_ERROR__SHIFT 0x10
+#define DSCC4_DSCC_MAX_ABS_ERROR0__DSCC_R_Y_MAX_ABS_ERROR_MASK 0x0000FFFFL
+#define DSCC4_DSCC_MAX_ABS_ERROR0__DSCC_G_CB_MAX_ABS_ERROR_MASK 0xFFFF0000L
+//DSCC4_DSCC_MAX_ABS_ERROR1
+#define DSCC4_DSCC_MAX_ABS_ERROR1__DSCC_B_CR_MAX_ABS_ERROR__SHIFT 0x0
+#define DSCC4_DSCC_MAX_ABS_ERROR1__DSCC_B_CR_MAX_ABS_ERROR_MASK 0x0000FFFFL
+//DSCC4_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL
+#define DSCC4_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC4_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC4_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL
+#define DSCC4_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC4_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC4_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL
+#define DSCC4_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC4_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC4_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL
+#define DSCC4_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC4_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC4_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL
+#define DSCC4_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC4_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC4_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL
+#define DSCC4_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC4_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC4_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL
+#define DSCC4_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC4_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC4_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL
+#define DSCC4_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC4_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC4_DSCC_TEST_DEBUG_BUS_ROTATE
+#define DSCC4_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS0_ROTATE__SHIFT 0x0
+#define DSCC4_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS1_ROTATE__SHIFT 0x8
+#define DSCC4_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS2_ROTATE__SHIFT 0x10
+#define DSCC4_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS3_ROTATE__SHIFT 0x18
+#define DSCC4_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS0_ROTATE_MASK 0x0000001FL
+#define DSCC4_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS1_ROTATE_MASK 0x00001F00L
+#define DSCC4_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS2_ROTATE_MASK 0x001F0000L
+#define DSCC4_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS3_ROTATE_MASK 0x1F000000L
+
+
+// addressBlock: dce_dc_dsc4_dispdec_dsc_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON23_PERFCOUNTER_CNTL
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON23_PERFCOUNTER_CNTL2
+#define DC_PERFMON23_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON23_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON23_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON23_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON23_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON23_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON23_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON23_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON23_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON23_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON23_PERFCOUNTER_STATE
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON23_PERFMON_CNTL
+#define DC_PERFMON23_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON23_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON23_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON23_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON23_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON23_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON23_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON23_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON23_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON23_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON23_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON23_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON23_PERFMON_CNTL2
+#define DC_PERFMON23_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON23_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON23_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON23_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON23_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON23_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON23_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON23_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON23_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON23_PERFMON_CVALUE_LOW
+#define DC_PERFMON23_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON23_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON23_PERFMON_HI
+#define DC_PERFMON23_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON23_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON23_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON23_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON23_PERFMON_LOW
+#define DC_PERFMON23_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON23_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dsc5_dispdec_dsc_top_dispdec
+//DSC_TOP5_DSC_TOP_CONTROL
+#define DSC_TOP5_DSC_TOP_CONTROL__DSC_CLOCK_EN__SHIFT 0x0
+#define DSC_TOP5_DSC_TOP_CONTROL__DSC_DISPCLK_R_GATE_DIS__SHIFT 0x4
+#define DSC_TOP5_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS__SHIFT 0x8
+#define DSC_TOP5_DSC_TOP_CONTROL__DSC_CLOCK_EN_MASK 0x00000001L
+#define DSC_TOP5_DSC_TOP_CONTROL__DSC_DISPCLK_R_GATE_DIS_MASK 0x00000010L
+#define DSC_TOP5_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS_MASK 0x00000100L
+//DSC_TOP5_DSC_DEBUG_CONTROL
+#define DSC_TOP5_DSC_DEBUG_CONTROL__DSC_DBG_EN__SHIFT 0x0
+#define DSC_TOP5_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL__SHIFT 0x4
+#define DSC_TOP5_DSC_DEBUG_CONTROL__DSC_DBG_EN_MASK 0x00000001L
+#define DSC_TOP5_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL_MASK 0x00000070L
+
+
+// addressBlock: dce_dc_dsc5_dispdec_dsccif_dispdec
+//DSCCIF5_DSCCIF_CONFIG0
+#define DSCCIF5_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_RECOVERY_EN__SHIFT 0x0
+#define DSCCIF5_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x4
+#define DSCCIF5_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_STATUS__SHIFT 0x8
+#define DSCCIF5_DSCCIF_CONFIG0__INPUT_PIXEL_FORMAT__SHIFT 0xc
+#define DSCCIF5_DSCCIF_CONFIG0__BITS_PER_COMPONENT__SHIFT 0x10
+#define DSCCIF5_DSCCIF_CONFIG0__DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x18
+#define DSCCIF5_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_RECOVERY_EN_MASK 0x00000001L
+#define DSCCIF5_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00000010L
+#define DSCCIF5_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_STATUS_MASK 0x00000100L
+#define DSCCIF5_DSCCIF_CONFIG0__INPUT_PIXEL_FORMAT_MASK 0x00007000L
+#define DSCCIF5_DSCCIF_CONFIG0__BITS_PER_COMPONENT_MASK 0x000F0000L
+#define DSCCIF5_DSCCIF_CONFIG0__DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x01000000L
+//DSCCIF5_DSCCIF_CONFIG1
+#define DSCCIF5_DSCCIF_CONFIG1__PIC_WIDTH__SHIFT 0x0
+#define DSCCIF5_DSCCIF_CONFIG1__PIC_HEIGHT__SHIFT 0x10
+#define DSCCIF5_DSCCIF_CONFIG1__PIC_WIDTH_MASK 0x0000FFFFL
+#define DSCCIF5_DSCCIF_CONFIG1__PIC_HEIGHT_MASK 0xFFFF0000L
+
+
+// addressBlock: dce_dc_dsc5_dispdec_dscc_dispdec
+//DSCC5_DSCC_CONFIG0
+#define DSCC5_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE__SHIFT 0x0
+#define DSCC5_DSCC_CONFIG0__NUMBER_OF_SLICES_PER_LINE__SHIFT 0x4
+#define DSCC5_DSCC_CONFIG0__ALTERNATE_ICH_ENCODING_EN__SHIFT 0x8
+#define DSCC5_DSCC_CONFIG0__NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION__SHIFT 0x10
+#define DSCC5_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE_MASK 0x0000000FL
+#define DSCC5_DSCC_CONFIG0__NUMBER_OF_SLICES_PER_LINE_MASK 0x00000030L
+#define DSCC5_DSCC_CONFIG0__ALTERNATE_ICH_ENCODING_EN_MASK 0x00000100L
+#define DSCC5_DSCC_CONFIG0__NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION_MASK 0xFFFF0000L
+//DSCC5_DSCC_CONFIG1
+#define DSCC5_DSCC_CONFIG1__DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE__SHIFT 0x0
+#define DSCC5_DSCC_CONFIG1__DSCC_DISABLE_ICH__SHIFT 0x18
+#define DSCC5_DSCC_CONFIG1__DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE_MASK 0x0003FFFFL
+#define DSCC5_DSCC_CONFIG1__DSCC_DISABLE_ICH_MASK 0x01000000L
+//DSCC5_DSCC_STATUS
+#define DSCC5_DSCC_STATUS__DSCC_DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x0
+#define DSCC5_DSCC_STATUS__DSCC_DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x00000001L
+//DSCC5_DSCC_INTERRUPT_CONTROL_STATUS
+#define DSCC5_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED__SHIFT 0x0
+#define DSCC5_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED__SHIFT 0x1
+#define DSCC5_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED__SHIFT 0x2
+#define DSCC5_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED__SHIFT 0x3
+#define DSCC5_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED__SHIFT 0x4
+#define DSCC5_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED__SHIFT 0x5
+#define DSCC5_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED__SHIFT 0x6
+#define DSCC5_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED__SHIFT 0x7
+#define DSCC5_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED__SHIFT 0x8
+#define DSCC5_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED__SHIFT 0x9
+#define DSCC5_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED__SHIFT 0xa
+#define DSCC5_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED__SHIFT 0xb
+#define DSCC5_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x10
+#define DSCC5_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x11
+#define DSCC5_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x12
+#define DSCC5_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x13
+#define DSCC5_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x14
+#define DSCC5_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x15
+#define DSCC5_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x16
+#define DSCC5_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x17
+#define DSCC5_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x18
+#define DSCC5_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x19
+#define DSCC5_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x1a
+#define DSCC5_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x1b
+#define DSCC5_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_MASK 0x00000001L
+#define DSCC5_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_MASK 0x00000002L
+#define DSCC5_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_MASK 0x00000004L
+#define DSCC5_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_MASK 0x00000008L
+#define DSCC5_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_MASK 0x00000010L
+#define DSCC5_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_MASK 0x00000020L
+#define DSCC5_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_MASK 0x00000040L
+#define DSCC5_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_MASK 0x00000080L
+#define DSCC5_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_MASK 0x00000100L
+#define DSCC5_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_MASK 0x00000200L
+#define DSCC5_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_MASK 0x00000400L
+#define DSCC5_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_MASK 0x00000800L
+#define DSCC5_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_INT_EN_MASK 0x00010000L
+#define DSCC5_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_INT_EN_MASK 0x00020000L
+#define DSCC5_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_INT_EN_MASK 0x00040000L
+#define DSCC5_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_INT_EN_MASK 0x00080000L
+#define DSCC5_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00100000L
+#define DSCC5_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00200000L
+#define DSCC5_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00400000L
+#define DSCC5_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00800000L
+#define DSCC5_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_INT_EN_MASK 0x01000000L
+#define DSCC5_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_INT_EN_MASK 0x02000000L
+#define DSCC5_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_INT_EN_MASK 0x04000000L
+#define DSCC5_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_INT_EN_MASK 0x08000000L
+//DSCC5_DSCC_PPS_CONFIG0
+#define DSCC5_DSCC_PPS_CONFIG0__DSC_VERSION_MINOR__SHIFT 0x0
+#define DSCC5_DSCC_PPS_CONFIG0__DSC_VERSION_MAJOR__SHIFT 0x4
+#define DSCC5_DSCC_PPS_CONFIG0__PPS_IDENTIFIER__SHIFT 0x8
+#define DSCC5_DSCC_PPS_CONFIG0__LINEBUF_DEPTH__SHIFT 0x18
+#define DSCC5_DSCC_PPS_CONFIG0__BITS_PER_COMPONENT__SHIFT 0x1c
+#define DSCC5_DSCC_PPS_CONFIG0__DSC_VERSION_MINOR_MASK 0x0000000FL
+#define DSCC5_DSCC_PPS_CONFIG0__DSC_VERSION_MAJOR_MASK 0x000000F0L
+#define DSCC5_DSCC_PPS_CONFIG0__PPS_IDENTIFIER_MASK 0x0000FF00L
+#define DSCC5_DSCC_PPS_CONFIG0__LINEBUF_DEPTH_MASK 0x0F000000L
+#define DSCC5_DSCC_PPS_CONFIG0__BITS_PER_COMPONENT_MASK 0xF0000000L
+//DSCC5_DSCC_PPS_CONFIG1
+#define DSCC5_DSCC_PPS_CONFIG1__BITS_PER_PIXEL__SHIFT 0x0
+#define DSCC5_DSCC_PPS_CONFIG1__VBR_ENABLE__SHIFT 0xa
+#define DSCC5_DSCC_PPS_CONFIG1__SIMPLE_422__SHIFT 0xb
+#define DSCC5_DSCC_PPS_CONFIG1__CONVERT_RGB__SHIFT 0xc
+#define DSCC5_DSCC_PPS_CONFIG1__BLOCK_PRED_ENABLE__SHIFT 0xd
+#define DSCC5_DSCC_PPS_CONFIG1__NATIVE_422__SHIFT 0xe
+#define DSCC5_DSCC_PPS_CONFIG1__NATIVE_420__SHIFT 0xf
+#define DSCC5_DSCC_PPS_CONFIG1__CHUNK_SIZE__SHIFT 0x10
+#define DSCC5_DSCC_PPS_CONFIG1__BITS_PER_PIXEL_MASK 0x000003FFL
+#define DSCC5_DSCC_PPS_CONFIG1__VBR_ENABLE_MASK 0x00000400L
+#define DSCC5_DSCC_PPS_CONFIG1__SIMPLE_422_MASK 0x00000800L
+#define DSCC5_DSCC_PPS_CONFIG1__CONVERT_RGB_MASK 0x00001000L
+#define DSCC5_DSCC_PPS_CONFIG1__BLOCK_PRED_ENABLE_MASK 0x00002000L
+#define DSCC5_DSCC_PPS_CONFIG1__NATIVE_422_MASK 0x00004000L
+#define DSCC5_DSCC_PPS_CONFIG1__NATIVE_420_MASK 0x00008000L
+#define DSCC5_DSCC_PPS_CONFIG1__CHUNK_SIZE_MASK 0xFFFF0000L
+//DSCC5_DSCC_PPS_CONFIG2
+#define DSCC5_DSCC_PPS_CONFIG2__PIC_WIDTH__SHIFT 0x0
+#define DSCC5_DSCC_PPS_CONFIG2__PIC_HEIGHT__SHIFT 0x10
+#define DSCC5_DSCC_PPS_CONFIG2__PIC_WIDTH_MASK 0x0000FFFFL
+#define DSCC5_DSCC_PPS_CONFIG2__PIC_HEIGHT_MASK 0xFFFF0000L
+//DSCC5_DSCC_PPS_CONFIG3
+#define DSCC5_DSCC_PPS_CONFIG3__SLICE_WIDTH__SHIFT 0x0
+#define DSCC5_DSCC_PPS_CONFIG3__SLICE_HEIGHT__SHIFT 0x10
+#define DSCC5_DSCC_PPS_CONFIG3__SLICE_WIDTH_MASK 0x0000FFFFL
+#define DSCC5_DSCC_PPS_CONFIG3__SLICE_HEIGHT_MASK 0xFFFF0000L
+//DSCC5_DSCC_PPS_CONFIG4
+#define DSCC5_DSCC_PPS_CONFIG4__INITIAL_XMIT_DELAY__SHIFT 0x0
+#define DSCC5_DSCC_PPS_CONFIG4__INITIAL_DEC_DELAY__SHIFT 0x10
+#define DSCC5_DSCC_PPS_CONFIG4__INITIAL_XMIT_DELAY_MASK 0x000003FFL
+#define DSCC5_DSCC_PPS_CONFIG4__INITIAL_DEC_DELAY_MASK 0xFFFF0000L
+//DSCC5_DSCC_PPS_CONFIG5
+#define DSCC5_DSCC_PPS_CONFIG5__INITIAL_SCALE_VALUE__SHIFT 0x0
+#define DSCC5_DSCC_PPS_CONFIG5__SCALE_INCREMENT_INTERVAL__SHIFT 0x10
+#define DSCC5_DSCC_PPS_CONFIG5__INITIAL_SCALE_VALUE_MASK 0x0000003FL
+#define DSCC5_DSCC_PPS_CONFIG5__SCALE_INCREMENT_INTERVAL_MASK 0xFFFF0000L
+//DSCC5_DSCC_PPS_CONFIG6
+#define DSCC5_DSCC_PPS_CONFIG6__SCALE_DECREMENT_INTERVAL__SHIFT 0x0
+#define DSCC5_DSCC_PPS_CONFIG6__FIRST_LINE_BPG_OFFSET__SHIFT 0x10
+#define DSCC5_DSCC_PPS_CONFIG6__SECOND_LINE_BPG_OFFSET__SHIFT 0x18
+#define DSCC5_DSCC_PPS_CONFIG6__SCALE_DECREMENT_INTERVAL_MASK 0x00000FFFL
+#define DSCC5_DSCC_PPS_CONFIG6__FIRST_LINE_BPG_OFFSET_MASK 0x001F0000L
+#define DSCC5_DSCC_PPS_CONFIG6__SECOND_LINE_BPG_OFFSET_MASK 0x1F000000L
+//DSCC5_DSCC_PPS_CONFIG7
+#define DSCC5_DSCC_PPS_CONFIG7__NFL_BPG_OFFSET__SHIFT 0x0
+#define DSCC5_DSCC_PPS_CONFIG7__SLICE_BPG_OFFSET__SHIFT 0x10
+#define DSCC5_DSCC_PPS_CONFIG7__NFL_BPG_OFFSET_MASK 0x0000FFFFL
+#define DSCC5_DSCC_PPS_CONFIG7__SLICE_BPG_OFFSET_MASK 0xFFFF0000L
+//DSCC5_DSCC_PPS_CONFIG8
+#define DSCC5_DSCC_PPS_CONFIG8__NSL_BPG_OFFSET__SHIFT 0x0
+#define DSCC5_DSCC_PPS_CONFIG8__SECOND_LINE_OFFSET_ADJ__SHIFT 0x10
+#define DSCC5_DSCC_PPS_CONFIG8__NSL_BPG_OFFSET_MASK 0x0000FFFFL
+#define DSCC5_DSCC_PPS_CONFIG8__SECOND_LINE_OFFSET_ADJ_MASK 0xFFFF0000L
+//DSCC5_DSCC_PPS_CONFIG9
+#define DSCC5_DSCC_PPS_CONFIG9__INITIAL_OFFSET__SHIFT 0x0
+#define DSCC5_DSCC_PPS_CONFIG9__FINAL_OFFSET__SHIFT 0x10
+#define DSCC5_DSCC_PPS_CONFIG9__INITIAL_OFFSET_MASK 0x0000FFFFL
+#define DSCC5_DSCC_PPS_CONFIG9__FINAL_OFFSET_MASK 0xFFFF0000L
+//DSCC5_DSCC_PPS_CONFIG10
+#define DSCC5_DSCC_PPS_CONFIG10__FLATNESS_MIN_QP__SHIFT 0x0
+#define DSCC5_DSCC_PPS_CONFIG10__FLATNESS_MAX_QP__SHIFT 0x8
+#define DSCC5_DSCC_PPS_CONFIG10__RC_MODEL_SIZE__SHIFT 0x10
+#define DSCC5_DSCC_PPS_CONFIG10__FLATNESS_MIN_QP_MASK 0x0000001FL
+#define DSCC5_DSCC_PPS_CONFIG10__FLATNESS_MAX_QP_MASK 0x00001F00L
+#define DSCC5_DSCC_PPS_CONFIG10__RC_MODEL_SIZE_MASK 0xFFFF0000L
+//DSCC5_DSCC_PPS_CONFIG11
+#define DSCC5_DSCC_PPS_CONFIG11__RC_EDGE_FACTOR__SHIFT 0x0
+#define DSCC5_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT0__SHIFT 0x8
+#define DSCC5_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT1__SHIFT 0x10
+#define DSCC5_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_LO__SHIFT 0x18
+#define DSCC5_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_HI__SHIFT 0x1c
+#define DSCC5_DSCC_PPS_CONFIG11__RC_EDGE_FACTOR_MASK 0x0000000FL
+#define DSCC5_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT0_MASK 0x00001F00L
+#define DSCC5_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT1_MASK 0x001F0000L
+#define DSCC5_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_LO_MASK 0x0F000000L
+#define DSCC5_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_HI_MASK 0xF0000000L
+//DSCC5_DSCC_PPS_CONFIG12
+#define DSCC5_DSCC_PPS_CONFIG12__RC_BUF_THRESH0__SHIFT 0x0
+#define DSCC5_DSCC_PPS_CONFIG12__RC_BUF_THRESH1__SHIFT 0x8
+#define DSCC5_DSCC_PPS_CONFIG12__RC_BUF_THRESH2__SHIFT 0x10
+#define DSCC5_DSCC_PPS_CONFIG12__RC_BUF_THRESH3__SHIFT 0x18
+#define DSCC5_DSCC_PPS_CONFIG12__RC_BUF_THRESH0_MASK 0x000000FFL
+#define DSCC5_DSCC_PPS_CONFIG12__RC_BUF_THRESH1_MASK 0x0000FF00L
+#define DSCC5_DSCC_PPS_CONFIG12__RC_BUF_THRESH2_MASK 0x00FF0000L
+#define DSCC5_DSCC_PPS_CONFIG12__RC_BUF_THRESH3_MASK 0xFF000000L
+//DSCC5_DSCC_PPS_CONFIG13
+#define DSCC5_DSCC_PPS_CONFIG13__RC_BUF_THRESH4__SHIFT 0x0
+#define DSCC5_DSCC_PPS_CONFIG13__RC_BUF_THRESH5__SHIFT 0x8
+#define DSCC5_DSCC_PPS_CONFIG13__RC_BUF_THRESH6__SHIFT 0x10
+#define DSCC5_DSCC_PPS_CONFIG13__RC_BUF_THRESH7__SHIFT 0x18
+#define DSCC5_DSCC_PPS_CONFIG13__RC_BUF_THRESH4_MASK 0x000000FFL
+#define DSCC5_DSCC_PPS_CONFIG13__RC_BUF_THRESH5_MASK 0x0000FF00L
+#define DSCC5_DSCC_PPS_CONFIG13__RC_BUF_THRESH6_MASK 0x00FF0000L
+#define DSCC5_DSCC_PPS_CONFIG13__RC_BUF_THRESH7_MASK 0xFF000000L
+//DSCC5_DSCC_PPS_CONFIG14
+#define DSCC5_DSCC_PPS_CONFIG14__RC_BUF_THRESH8__SHIFT 0x0
+#define DSCC5_DSCC_PPS_CONFIG14__RC_BUF_THRESH9__SHIFT 0x8
+#define DSCC5_DSCC_PPS_CONFIG14__RC_BUF_THRESH10__SHIFT 0x10
+#define DSCC5_DSCC_PPS_CONFIG14__RC_BUF_THRESH11__SHIFT 0x18
+#define DSCC5_DSCC_PPS_CONFIG14__RC_BUF_THRESH8_MASK 0x000000FFL
+#define DSCC5_DSCC_PPS_CONFIG14__RC_BUF_THRESH9_MASK 0x0000FF00L
+#define DSCC5_DSCC_PPS_CONFIG14__RC_BUF_THRESH10_MASK 0x00FF0000L
+#define DSCC5_DSCC_PPS_CONFIG14__RC_BUF_THRESH11_MASK 0xFF000000L
+//DSCC5_DSCC_PPS_CONFIG15
+#define DSCC5_DSCC_PPS_CONFIG15__RC_BUF_THRESH12__SHIFT 0x0
+#define DSCC5_DSCC_PPS_CONFIG15__RC_BUF_THRESH13__SHIFT 0x8
+#define DSCC5_DSCC_PPS_CONFIG15__RANGE_MIN_QP0__SHIFT 0x10
+#define DSCC5_DSCC_PPS_CONFIG15__RANGE_MAX_QP0__SHIFT 0x15
+#define DSCC5_DSCC_PPS_CONFIG15__RANGE_BPG_OFFSET0__SHIFT 0x1a
+#define DSCC5_DSCC_PPS_CONFIG15__RC_BUF_THRESH12_MASK 0x000000FFL
+#define DSCC5_DSCC_PPS_CONFIG15__RC_BUF_THRESH13_MASK 0x0000FF00L
+#define DSCC5_DSCC_PPS_CONFIG15__RANGE_MIN_QP0_MASK 0x001F0000L
+#define DSCC5_DSCC_PPS_CONFIG15__RANGE_MAX_QP0_MASK 0x03E00000L
+#define DSCC5_DSCC_PPS_CONFIG15__RANGE_BPG_OFFSET0_MASK 0xFC000000L
+//DSCC5_DSCC_PPS_CONFIG16
+#define DSCC5_DSCC_PPS_CONFIG16__RANGE_MIN_QP1__SHIFT 0x0
+#define DSCC5_DSCC_PPS_CONFIG16__RANGE_MAX_QP1__SHIFT 0x5
+#define DSCC5_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET1__SHIFT 0xa
+#define DSCC5_DSCC_PPS_CONFIG16__RANGE_MIN_QP2__SHIFT 0x10
+#define DSCC5_DSCC_PPS_CONFIG16__RANGE_MAX_QP2__SHIFT 0x15
+#define DSCC5_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET2__SHIFT 0x1a
+#define DSCC5_DSCC_PPS_CONFIG16__RANGE_MIN_QP1_MASK 0x0000001FL
+#define DSCC5_DSCC_PPS_CONFIG16__RANGE_MAX_QP1_MASK 0x000003E0L
+#define DSCC5_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET1_MASK 0x0000FC00L
+#define DSCC5_DSCC_PPS_CONFIG16__RANGE_MIN_QP2_MASK 0x001F0000L
+#define DSCC5_DSCC_PPS_CONFIG16__RANGE_MAX_QP2_MASK 0x03E00000L
+#define DSCC5_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET2_MASK 0xFC000000L
+//DSCC5_DSCC_PPS_CONFIG17
+#define DSCC5_DSCC_PPS_CONFIG17__RANGE_MIN_QP3__SHIFT 0x0
+#define DSCC5_DSCC_PPS_CONFIG17__RANGE_MAX_QP3__SHIFT 0x5
+#define DSCC5_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET3__SHIFT 0xa
+#define DSCC5_DSCC_PPS_CONFIG17__RANGE_MIN_QP4__SHIFT 0x10
+#define DSCC5_DSCC_PPS_CONFIG17__RANGE_MAX_QP4__SHIFT 0x15
+#define DSCC5_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET4__SHIFT 0x1a
+#define DSCC5_DSCC_PPS_CONFIG17__RANGE_MIN_QP3_MASK 0x0000001FL
+#define DSCC5_DSCC_PPS_CONFIG17__RANGE_MAX_QP3_MASK 0x000003E0L
+#define DSCC5_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET3_MASK 0x0000FC00L
+#define DSCC5_DSCC_PPS_CONFIG17__RANGE_MIN_QP4_MASK 0x001F0000L
+#define DSCC5_DSCC_PPS_CONFIG17__RANGE_MAX_QP4_MASK 0x03E00000L
+#define DSCC5_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET4_MASK 0xFC000000L
+//DSCC5_DSCC_PPS_CONFIG18
+#define DSCC5_DSCC_PPS_CONFIG18__RANGE_MIN_QP5__SHIFT 0x0
+#define DSCC5_DSCC_PPS_CONFIG18__RANGE_MAX_QP5__SHIFT 0x5
+#define DSCC5_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET5__SHIFT 0xa
+#define DSCC5_DSCC_PPS_CONFIG18__RANGE_MIN_QP6__SHIFT 0x10
+#define DSCC5_DSCC_PPS_CONFIG18__RANGE_MAX_QP6__SHIFT 0x15
+#define DSCC5_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET6__SHIFT 0x1a
+#define DSCC5_DSCC_PPS_CONFIG18__RANGE_MIN_QP5_MASK 0x0000001FL
+#define DSCC5_DSCC_PPS_CONFIG18__RANGE_MAX_QP5_MASK 0x000003E0L
+#define DSCC5_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET5_MASK 0x0000FC00L
+#define DSCC5_DSCC_PPS_CONFIG18__RANGE_MIN_QP6_MASK 0x001F0000L
+#define DSCC5_DSCC_PPS_CONFIG18__RANGE_MAX_QP6_MASK 0x03E00000L
+#define DSCC5_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET6_MASK 0xFC000000L
+//DSCC5_DSCC_PPS_CONFIG19
+#define DSCC5_DSCC_PPS_CONFIG19__RANGE_MIN_QP7__SHIFT 0x0
+#define DSCC5_DSCC_PPS_CONFIG19__RANGE_MAX_QP7__SHIFT 0x5
+#define DSCC5_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET7__SHIFT 0xa
+#define DSCC5_DSCC_PPS_CONFIG19__RANGE_MIN_QP8__SHIFT 0x10
+#define DSCC5_DSCC_PPS_CONFIG19__RANGE_MAX_QP8__SHIFT 0x15
+#define DSCC5_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET8__SHIFT 0x1a
+#define DSCC5_DSCC_PPS_CONFIG19__RANGE_MIN_QP7_MASK 0x0000001FL
+#define DSCC5_DSCC_PPS_CONFIG19__RANGE_MAX_QP7_MASK 0x000003E0L
+#define DSCC5_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET7_MASK 0x0000FC00L
+#define DSCC5_DSCC_PPS_CONFIG19__RANGE_MIN_QP8_MASK 0x001F0000L
+#define DSCC5_DSCC_PPS_CONFIG19__RANGE_MAX_QP8_MASK 0x03E00000L
+#define DSCC5_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET8_MASK 0xFC000000L
+//DSCC5_DSCC_PPS_CONFIG20
+#define DSCC5_DSCC_PPS_CONFIG20__RANGE_MIN_QP9__SHIFT 0x0
+#define DSCC5_DSCC_PPS_CONFIG20__RANGE_MAX_QP9__SHIFT 0x5
+#define DSCC5_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET9__SHIFT 0xa
+#define DSCC5_DSCC_PPS_CONFIG20__RANGE_MIN_QP10__SHIFT 0x10
+#define DSCC5_DSCC_PPS_CONFIG20__RANGE_MAX_QP10__SHIFT 0x15
+#define DSCC5_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET10__SHIFT 0x1a
+#define DSCC5_DSCC_PPS_CONFIG20__RANGE_MIN_QP9_MASK 0x0000001FL
+#define DSCC5_DSCC_PPS_CONFIG20__RANGE_MAX_QP9_MASK 0x000003E0L
+#define DSCC5_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET9_MASK 0x0000FC00L
+#define DSCC5_DSCC_PPS_CONFIG20__RANGE_MIN_QP10_MASK 0x001F0000L
+#define DSCC5_DSCC_PPS_CONFIG20__RANGE_MAX_QP10_MASK 0x03E00000L
+#define DSCC5_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET10_MASK 0xFC000000L
+//DSCC5_DSCC_PPS_CONFIG21
+#define DSCC5_DSCC_PPS_CONFIG21__RANGE_MIN_QP11__SHIFT 0x0
+#define DSCC5_DSCC_PPS_CONFIG21__RANGE_MAX_QP11__SHIFT 0x5
+#define DSCC5_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET11__SHIFT 0xa
+#define DSCC5_DSCC_PPS_CONFIG21__RANGE_MIN_QP12__SHIFT 0x10
+#define DSCC5_DSCC_PPS_CONFIG21__RANGE_MAX_QP12__SHIFT 0x15
+#define DSCC5_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET12__SHIFT 0x1a
+#define DSCC5_DSCC_PPS_CONFIG21__RANGE_MIN_QP11_MASK 0x0000001FL
+#define DSCC5_DSCC_PPS_CONFIG21__RANGE_MAX_QP11_MASK 0x000003E0L
+#define DSCC5_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET11_MASK 0x0000FC00L
+#define DSCC5_DSCC_PPS_CONFIG21__RANGE_MIN_QP12_MASK 0x001F0000L
+#define DSCC5_DSCC_PPS_CONFIG21__RANGE_MAX_QP12_MASK 0x03E00000L
+#define DSCC5_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET12_MASK 0xFC000000L
+//DSCC5_DSCC_PPS_CONFIG22
+#define DSCC5_DSCC_PPS_CONFIG22__RANGE_MIN_QP13__SHIFT 0x0
+#define DSCC5_DSCC_PPS_CONFIG22__RANGE_MAX_QP13__SHIFT 0x5
+#define DSCC5_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET13__SHIFT 0xa
+#define DSCC5_DSCC_PPS_CONFIG22__RANGE_MIN_QP14__SHIFT 0x10
+#define DSCC5_DSCC_PPS_CONFIG22__RANGE_MAX_QP14__SHIFT 0x15
+#define DSCC5_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET14__SHIFT 0x1a
+#define DSCC5_DSCC_PPS_CONFIG22__RANGE_MIN_QP13_MASK 0x0000001FL
+#define DSCC5_DSCC_PPS_CONFIG22__RANGE_MAX_QP13_MASK 0x000003E0L
+#define DSCC5_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET13_MASK 0x0000FC00L
+#define DSCC5_DSCC_PPS_CONFIG22__RANGE_MIN_QP14_MASK 0x001F0000L
+#define DSCC5_DSCC_PPS_CONFIG22__RANGE_MAX_QP14_MASK 0x03E00000L
+#define DSCC5_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET14_MASK 0xFC000000L
+//DSCC5_DSCC_MEM_POWER_CONTROL
+#define DSCC5_DSCC_MEM_POWER_CONTROL__DSCC_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0x0
+#define DSCC5_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_FORCE__SHIFT 0x4
+#define DSCC5_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_DIS__SHIFT 0x8
+#define DSCC5_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_STATE__SHIFT 0x10
+#define DSCC5_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_FORCE__SHIFT 0x14
+#define DSCC5_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_DIS__SHIFT 0x18
+#define DSCC5_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_STATE__SHIFT 0x1c
+#define DSCC5_DSCC_MEM_POWER_CONTROL__DSCC_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00000003L
+#define DSCC5_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_FORCE_MASK 0x00000030L
+#define DSCC5_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_DIS_MASK 0x00000100L
+#define DSCC5_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_STATE_MASK 0x00030000L
+#define DSCC5_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_FORCE_MASK 0x00300000L
+#define DSCC5_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_DIS_MASK 0x01000000L
+#define DSCC5_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_STATE_MASK 0x30000000L
+//DSCC5_DSCC_R_Y_SQUARED_ERROR_LOWER
+#define DSCC5_DSCC_R_Y_SQUARED_ERROR_LOWER__DSCC_R_Y_SQUARED_ERROR_LOWER__SHIFT 0x0
+#define DSCC5_DSCC_R_Y_SQUARED_ERROR_LOWER__DSCC_R_Y_SQUARED_ERROR_LOWER_MASK 0xFFFFFFFFL
+//DSCC5_DSCC_R_Y_SQUARED_ERROR_UPPER
+#define DSCC5_DSCC_R_Y_SQUARED_ERROR_UPPER__DSCC_R_Y_SQUARED_ERROR_UPPER__SHIFT 0x0
+#define DSCC5_DSCC_R_Y_SQUARED_ERROR_UPPER__DSCC_R_Y_SQUARED_ERROR_UPPER_MASK 0xFFFFFFFFL
+//DSCC5_DSCC_G_CB_SQUARED_ERROR_LOWER
+#define DSCC5_DSCC_G_CB_SQUARED_ERROR_LOWER__DSCC_G_CB_SQUARED_ERROR_LOWER__SHIFT 0x0
+#define DSCC5_DSCC_G_CB_SQUARED_ERROR_LOWER__DSCC_G_CB_SQUARED_ERROR_LOWER_MASK 0xFFFFFFFFL
+//DSCC5_DSCC_G_CB_SQUARED_ERROR_UPPER
+#define DSCC5_DSCC_G_CB_SQUARED_ERROR_UPPER__DSCC_G_CB_SQUARED_ERROR_UPPER__SHIFT 0x0
+#define DSCC5_DSCC_G_CB_SQUARED_ERROR_UPPER__DSCC_G_CB_SQUARED_ERROR_UPPER_MASK 0xFFFFFFFFL
+//DSCC5_DSCC_B_CR_SQUARED_ERROR_LOWER
+#define DSCC5_DSCC_B_CR_SQUARED_ERROR_LOWER__DSCC_B_CR_SQUARED_ERROR_LOWER__SHIFT 0x0
+#define DSCC5_DSCC_B_CR_SQUARED_ERROR_LOWER__DSCC_B_CR_SQUARED_ERROR_LOWER_MASK 0xFFFFFFFFL
+//DSCC5_DSCC_B_CR_SQUARED_ERROR_UPPER
+#define DSCC5_DSCC_B_CR_SQUARED_ERROR_UPPER__DSCC_B_CR_SQUARED_ERROR_UPPER__SHIFT 0x0
+#define DSCC5_DSCC_B_CR_SQUARED_ERROR_UPPER__DSCC_B_CR_SQUARED_ERROR_UPPER_MASK 0xFFFFFFFFL
+//DSCC5_DSCC_MAX_ABS_ERROR0
+#define DSCC5_DSCC_MAX_ABS_ERROR0__DSCC_R_Y_MAX_ABS_ERROR__SHIFT 0x0
+#define DSCC5_DSCC_MAX_ABS_ERROR0__DSCC_G_CB_MAX_ABS_ERROR__SHIFT 0x10
+#define DSCC5_DSCC_MAX_ABS_ERROR0__DSCC_R_Y_MAX_ABS_ERROR_MASK 0x0000FFFFL
+#define DSCC5_DSCC_MAX_ABS_ERROR0__DSCC_G_CB_MAX_ABS_ERROR_MASK 0xFFFF0000L
+//DSCC5_DSCC_MAX_ABS_ERROR1
+#define DSCC5_DSCC_MAX_ABS_ERROR1__DSCC_B_CR_MAX_ABS_ERROR__SHIFT 0x0
+#define DSCC5_DSCC_MAX_ABS_ERROR1__DSCC_B_CR_MAX_ABS_ERROR_MASK 0x0000FFFFL
+//DSCC5_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL
+#define DSCC5_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC5_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC5_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL
+#define DSCC5_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC5_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC5_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL
+#define DSCC5_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC5_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC5_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL
+#define DSCC5_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC5_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC5_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL
+#define DSCC5_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC5_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC5_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL
+#define DSCC5_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC5_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC5_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL
+#define DSCC5_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC5_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC5_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL
+#define DSCC5_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC5_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC5_DSCC_TEST_DEBUG_BUS_ROTATE
+#define DSCC5_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS0_ROTATE__SHIFT 0x0
+#define DSCC5_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS1_ROTATE__SHIFT 0x8
+#define DSCC5_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS2_ROTATE__SHIFT 0x10
+#define DSCC5_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS3_ROTATE__SHIFT 0x18
+#define DSCC5_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS0_ROTATE_MASK 0x0000001FL
+#define DSCC5_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS1_ROTATE_MASK 0x00001F00L
+#define DSCC5_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS2_ROTATE_MASK 0x001F0000L
+#define DSCC5_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS3_ROTATE_MASK 0x1F000000L
+
+
+// addressBlock: dce_dc_dsc5_dispdec_dsc_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON24_PERFCOUNTER_CNTL
+#define DC_PERFMON24_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON24_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON24_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON24_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON24_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON24_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON24_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON24_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON24_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON24_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON24_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON24_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON24_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON24_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON24_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON24_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON24_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON24_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON24_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON24_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON24_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON24_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON24_PERFCOUNTER_CNTL2
+#define DC_PERFMON24_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON24_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON24_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON24_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON24_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON24_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON24_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON24_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON24_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON24_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON24_PERFCOUNTER_STATE
+#define DC_PERFMON24_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON24_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON24_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON24_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON24_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON24_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON24_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON24_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON24_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON24_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON24_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON24_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON24_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON24_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON24_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON24_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON24_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON24_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON24_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON24_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON24_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON24_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON24_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON24_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON24_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON24_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON24_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON24_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON24_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON24_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON24_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON24_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON24_PERFMON_CNTL
+#define DC_PERFMON24_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON24_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON24_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON24_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON24_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON24_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON24_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON24_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON24_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON24_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON24_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON24_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON24_PERFMON_CNTL2
+#define DC_PERFMON24_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON24_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON24_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON24_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON24_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON24_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON24_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON24_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON24_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON24_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON24_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON24_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON24_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON24_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON24_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON24_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON24_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON24_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON24_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON24_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON24_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON24_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON24_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON24_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON24_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON24_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON24_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON24_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON24_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON24_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON24_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON24_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON24_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON24_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON24_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON24_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON24_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON24_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON24_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON24_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON24_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON24_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON24_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON24_PERFMON_CVALUE_LOW
+#define DC_PERFMON24_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON24_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON24_PERFMON_HI
+#define DC_PERFMON24_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON24_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON24_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON24_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON24_PERFMON_LOW
+#define DC_PERFMON24_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON24_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dmu_dmcub_dispdec
+//DMCUB_REGION0_OFFSET
+#define DMCUB_REGION0_OFFSET__DMCUB_REGION0_OFFSET__SHIFT 0x8
+#define DMCUB_REGION0_OFFSET__DMCUB_REGION0_OFFSET_MASK 0xFFFFFF00L
+//DMCUB_REGION0_OFFSET_HIGH
+#define DMCUB_REGION0_OFFSET_HIGH__DMCUB_REGION0_OFFSET_HIGH__SHIFT 0x0
+#define DMCUB_REGION0_OFFSET_HIGH__DMCUB_REGION0_OFFSET_HIGH_MASK 0x0000FFFFL
+//DMCUB_REGION1_OFFSET
+#define DMCUB_REGION1_OFFSET__DMCUB_REGION1_OFFSET__SHIFT 0x8
+#define DMCUB_REGION1_OFFSET__DMCUB_REGION1_OFFSET_MASK 0xFFFFFF00L
+//DMCUB_REGION1_OFFSET_HIGH
+#define DMCUB_REGION1_OFFSET_HIGH__DMCUB_REGION1_OFFSET_HIGH__SHIFT 0x0
+#define DMCUB_REGION1_OFFSET_HIGH__DMCUB_REGION1_OFFSET_HIGH_MASK 0x0000FFFFL
+//DMCUB_REGION2_OFFSET
+#define DMCUB_REGION2_OFFSET__DMCUB_REGION2_OFFSET__SHIFT 0x8
+#define DMCUB_REGION2_OFFSET__DMCUB_REGION2_OFFSET_MASK 0xFFFFFF00L
+//DMCUB_REGION2_OFFSET_HIGH
+#define DMCUB_REGION2_OFFSET_HIGH__DMCUB_REGION2_OFFSET_HIGH__SHIFT 0x0
+#define DMCUB_REGION2_OFFSET_HIGH__DMCUB_REGION2_OFFSET_HIGH_MASK 0x0000FFFFL
+//DMCUB_REGION4_OFFSET
+#define DMCUB_REGION4_OFFSET__DMCUB_REGION4_OFFSET__SHIFT 0x8
+#define DMCUB_REGION4_OFFSET__DMCUB_REGION4_OFFSET_MASK 0xFFFFFF00L
+//DMCUB_REGION4_OFFSET_HIGH
+#define DMCUB_REGION4_OFFSET_HIGH__DMCUB_REGION4_OFFSET_HIGH__SHIFT 0x0
+#define DMCUB_REGION4_OFFSET_HIGH__DMCUB_REGION4_OFFSET_HIGH_MASK 0x0000FFFFL
+//DMCUB_REGION5_OFFSET
+#define DMCUB_REGION5_OFFSET__DMCUB_REGION5_OFFSET__SHIFT 0x8
+#define DMCUB_REGION5_OFFSET__DMCUB_REGION5_OFFSET_MASK 0xFFFFFF00L
+//DMCUB_REGION5_OFFSET_HIGH
+#define DMCUB_REGION5_OFFSET_HIGH__DMCUB_REGION5_OFFSET_HIGH__SHIFT 0x0
+#define DMCUB_REGION5_OFFSET_HIGH__DMCUB_REGION5_OFFSET_HIGH_MASK 0x0000FFFFL
+//DMCUB_REGION6_OFFSET
+#define DMCUB_REGION6_OFFSET__DMCUB_REGION6_OFFSET__SHIFT 0x8
+#define DMCUB_REGION6_OFFSET__DMCUB_REGION6_OFFSET_MASK 0xFFFFFF00L
+//DMCUB_REGION6_OFFSET_HIGH
+#define DMCUB_REGION6_OFFSET_HIGH__DMCUB_REGION6_OFFSET_HIGH__SHIFT 0x0
+#define DMCUB_REGION6_OFFSET_HIGH__DMCUB_REGION6_OFFSET_HIGH_MASK 0x0000FFFFL
+//DMCUB_REGION7_OFFSET
+#define DMCUB_REGION7_OFFSET__DMCUB_REGION7_OFFSET__SHIFT 0x8
+#define DMCUB_REGION7_OFFSET__DMCUB_REGION7_OFFSET_MASK 0xFFFFFF00L
+//DMCUB_REGION7_OFFSET_HIGH
+#define DMCUB_REGION7_OFFSET_HIGH__DMCUB_REGION7_OFFSET_HIGH__SHIFT 0x0
+#define DMCUB_REGION7_OFFSET_HIGH__DMCUB_REGION7_OFFSET_HIGH_MASK 0x0000FFFFL
+//DMCUB_REGION0_TOP_ADDRESS
+#define DMCUB_REGION0_TOP_ADDRESS__DMCUB_REGION0_TOP_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION0_TOP_ADDRESS__DMCUB_REGION0_ENABLE__SHIFT 0x1f
+#define DMCUB_REGION0_TOP_ADDRESS__DMCUB_REGION0_TOP_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION0_TOP_ADDRESS__DMCUB_REGION0_ENABLE_MASK 0x80000000L
+//DMCUB_REGION1_TOP_ADDRESS
+#define DMCUB_REGION1_TOP_ADDRESS__DMCUB_REGION1_TOP_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION1_TOP_ADDRESS__DMCUB_REGION1_ENABLE__SHIFT 0x1f
+#define DMCUB_REGION1_TOP_ADDRESS__DMCUB_REGION1_TOP_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION1_TOP_ADDRESS__DMCUB_REGION1_ENABLE_MASK 0x80000000L
+//DMCUB_REGION2_TOP_ADDRESS
+#define DMCUB_REGION2_TOP_ADDRESS__DMCUB_REGION2_TOP_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION2_TOP_ADDRESS__DMCUB_REGION2_ENABLE__SHIFT 0x1f
+#define DMCUB_REGION2_TOP_ADDRESS__DMCUB_REGION2_TOP_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION2_TOP_ADDRESS__DMCUB_REGION2_ENABLE_MASK 0x80000000L
+//DMCUB_REGION4_TOP_ADDRESS
+#define DMCUB_REGION4_TOP_ADDRESS__DMCUB_REGION4_TOP_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION4_TOP_ADDRESS__DMCUB_REGION4_ENABLE__SHIFT 0x1f
+#define DMCUB_REGION4_TOP_ADDRESS__DMCUB_REGION4_TOP_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION4_TOP_ADDRESS__DMCUB_REGION4_ENABLE_MASK 0x80000000L
+//DMCUB_REGION5_TOP_ADDRESS
+#define DMCUB_REGION5_TOP_ADDRESS__DMCUB_REGION5_TOP_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION5_TOP_ADDRESS__DMCUB_REGION5_ENABLE__SHIFT 0x1f
+#define DMCUB_REGION5_TOP_ADDRESS__DMCUB_REGION5_TOP_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION5_TOP_ADDRESS__DMCUB_REGION5_ENABLE_MASK 0x80000000L
+//DMCUB_REGION6_TOP_ADDRESS
+#define DMCUB_REGION6_TOP_ADDRESS__DMCUB_REGION6_TOP_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION6_TOP_ADDRESS__DMCUB_REGION6_ENABLE__SHIFT 0x1f
+#define DMCUB_REGION6_TOP_ADDRESS__DMCUB_REGION6_TOP_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION6_TOP_ADDRESS__DMCUB_REGION6_ENABLE_MASK 0x80000000L
+//DMCUB_REGION7_TOP_ADDRESS
+#define DMCUB_REGION7_TOP_ADDRESS__DMCUB_REGION7_TOP_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION7_TOP_ADDRESS__DMCUB_REGION7_ENABLE__SHIFT 0x1f
+#define DMCUB_REGION7_TOP_ADDRESS__DMCUB_REGION7_TOP_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION7_TOP_ADDRESS__DMCUB_REGION7_ENABLE_MASK 0x80000000L
+//DMCUB_REGION3_CW0_BASE_ADDRESS
+#define DMCUB_REGION3_CW0_BASE_ADDRESS__DMCUB_REGION3_CW0_BASE_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW0_BASE_ADDRESS__DMCUB_REGION3_CW0_BASE_ADDRESS_MASK 0x1FFFFFFFL
+//DMCUB_REGION3_CW1_BASE_ADDRESS
+#define DMCUB_REGION3_CW1_BASE_ADDRESS__DMCUB_REGION3_CW1_BASE_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW1_BASE_ADDRESS__DMCUB_REGION3_CW1_BASE_ADDRESS_MASK 0x1FFFFFFFL
+//DMCUB_REGION3_CW2_BASE_ADDRESS
+#define DMCUB_REGION3_CW2_BASE_ADDRESS__DMCUB_REGION3_CW2_BASE_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW2_BASE_ADDRESS__DMCUB_REGION3_CW2_BASE_ADDRESS_MASK 0x1FFFFFFFL
+//DMCUB_REGION3_CW3_BASE_ADDRESS
+#define DMCUB_REGION3_CW3_BASE_ADDRESS__DMCUB_REGION3_CW3_BASE_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW3_BASE_ADDRESS__DMCUB_REGION3_CW3_BASE_ADDRESS_MASK 0x1FFFFFFFL
+//DMCUB_REGION3_CW4_BASE_ADDRESS
+#define DMCUB_REGION3_CW4_BASE_ADDRESS__DMCUB_REGION3_CW4_BASE_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW4_BASE_ADDRESS__DMCUB_REGION3_CW4_BASE_ADDRESS_MASK 0x1FFFFFFFL
+//DMCUB_REGION3_CW5_BASE_ADDRESS
+#define DMCUB_REGION3_CW5_BASE_ADDRESS__DMCUB_REGION3_CW5_BASE_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW5_BASE_ADDRESS__DMCUB_REGION3_CW5_BASE_ADDRESS_MASK 0x1FFFFFFFL
+//DMCUB_REGION3_CW6_BASE_ADDRESS
+#define DMCUB_REGION3_CW6_BASE_ADDRESS__DMCUB_REGION3_CW6_BASE_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW6_BASE_ADDRESS__DMCUB_REGION3_CW6_BASE_ADDRESS_MASK 0x1FFFFFFFL
+//DMCUB_REGION3_CW7_BASE_ADDRESS
+#define DMCUB_REGION3_CW7_BASE_ADDRESS__DMCUB_REGION3_CW7_BASE_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW7_BASE_ADDRESS__DMCUB_REGION3_CW7_BASE_ADDRESS_MASK 0x1FFFFFFFL
+//DMCUB_REGION3_CW0_TOP_ADDRESS
+#define DMCUB_REGION3_CW0_TOP_ADDRESS__DMCUB_REGION3_CW0_TOP_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW0_TOP_ADDRESS__DMCUB_REGION3_CW0_ENABLE__SHIFT 0x1f
+#define DMCUB_REGION3_CW0_TOP_ADDRESS__DMCUB_REGION3_CW0_TOP_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION3_CW0_TOP_ADDRESS__DMCUB_REGION3_CW0_ENABLE_MASK 0x80000000L
+//DMCUB_REGION3_CW1_TOP_ADDRESS
+#define DMCUB_REGION3_CW1_TOP_ADDRESS__DMCUB_REGION3_CW1_TOP_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW1_TOP_ADDRESS__DMCUB_REGION3_CW1_ENABLE__SHIFT 0x1f
+#define DMCUB_REGION3_CW1_TOP_ADDRESS__DMCUB_REGION3_CW1_TOP_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION3_CW1_TOP_ADDRESS__DMCUB_REGION3_CW1_ENABLE_MASK 0x80000000L
+//DMCUB_REGION3_CW2_TOP_ADDRESS
+#define DMCUB_REGION3_CW2_TOP_ADDRESS__DMCUB_REGION3_CW2_TOP_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW2_TOP_ADDRESS__DMCUB_REGION3_CW2_ENABLE__SHIFT 0x1f
+#define DMCUB_REGION3_CW2_TOP_ADDRESS__DMCUB_REGION3_CW2_TOP_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION3_CW2_TOP_ADDRESS__DMCUB_REGION3_CW2_ENABLE_MASK 0x80000000L
+//DMCUB_REGION3_CW3_TOP_ADDRESS
+#define DMCUB_REGION3_CW3_TOP_ADDRESS__DMCUB_REGION3_CW3_TOP_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW3_TOP_ADDRESS__DMCUB_REGION3_CW3_ENABLE__SHIFT 0x1f
+#define DMCUB_REGION3_CW3_TOP_ADDRESS__DMCUB_REGION3_CW3_TOP_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION3_CW3_TOP_ADDRESS__DMCUB_REGION3_CW3_ENABLE_MASK 0x80000000L
+//DMCUB_REGION3_CW4_TOP_ADDRESS
+#define DMCUB_REGION3_CW4_TOP_ADDRESS__DMCUB_REGION3_CW4_TOP_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW4_TOP_ADDRESS__DMCUB_REGION3_CW4_ENABLE__SHIFT 0x1f
+#define DMCUB_REGION3_CW4_TOP_ADDRESS__DMCUB_REGION3_CW4_TOP_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION3_CW4_TOP_ADDRESS__DMCUB_REGION3_CW4_ENABLE_MASK 0x80000000L
+//DMCUB_REGION3_CW5_TOP_ADDRESS
+#define DMCUB_REGION3_CW5_TOP_ADDRESS__DMCUB_REGION3_CW5_TOP_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW5_TOP_ADDRESS__DMCUB_REGION3_CW5_ENABLE__SHIFT 0x1f
+#define DMCUB_REGION3_CW5_TOP_ADDRESS__DMCUB_REGION3_CW5_TOP_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION3_CW5_TOP_ADDRESS__DMCUB_REGION3_CW5_ENABLE_MASK 0x80000000L
+//DMCUB_REGION3_CW6_TOP_ADDRESS
+#define DMCUB_REGION3_CW6_TOP_ADDRESS__DMCUB_REGION3_CW6_TOP_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW6_TOP_ADDRESS__DMCUB_REGION3_CW6_ENABLE__SHIFT 0x1f
+#define DMCUB_REGION3_CW6_TOP_ADDRESS__DMCUB_REGION3_CW6_TOP_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION3_CW6_TOP_ADDRESS__DMCUB_REGION3_CW6_ENABLE_MASK 0x80000000L
+//DMCUB_REGION3_CW7_TOP_ADDRESS
+#define DMCUB_REGION3_CW7_TOP_ADDRESS__DMCUB_REGION3_CW7_TOP_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW7_TOP_ADDRESS__DMCUB_REGION3_CW7_ENABLE__SHIFT 0x1f
+#define DMCUB_REGION3_CW7_TOP_ADDRESS__DMCUB_REGION3_CW7_TOP_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION3_CW7_TOP_ADDRESS__DMCUB_REGION3_CW7_ENABLE_MASK 0x80000000L
+//DMCUB_REGION3_CW0_OFFSET
+#define DMCUB_REGION3_CW0_OFFSET__DMCUB_REGION3_CW0_OFFSET__SHIFT 0x8
+#define DMCUB_REGION3_CW0_OFFSET__DMCUB_REGION3_CW0_OFFSET_MASK 0xFFFFFF00L
+//DMCUB_REGION3_CW0_OFFSET_HIGH
+#define DMCUB_REGION3_CW0_OFFSET_HIGH__DMCUB_REGION3_CW0_OFFSET_HIGH__SHIFT 0x0
+#define DMCUB_REGION3_CW0_OFFSET_HIGH__DMCUB_REGION3_CW0_OFFSET_HIGH_MASK 0x0000FFFFL
+//DMCUB_REGION3_CW1_OFFSET
+#define DMCUB_REGION3_CW1_OFFSET__DMCUB_REGION3_CW1_OFFSET__SHIFT 0x8
+#define DMCUB_REGION3_CW1_OFFSET__DMCUB_REGION3_CW1_OFFSET_MASK 0xFFFFFF00L
+//DMCUB_REGION3_CW1_OFFSET_HIGH
+#define DMCUB_REGION3_CW1_OFFSET_HIGH__DMCUB_REGION3_CW1_OFFSET_HIGH__SHIFT 0x0
+#define DMCUB_REGION3_CW1_OFFSET_HIGH__DMCUB_REGION3_CW1_OFFSET_HIGH_MASK 0x0000FFFFL
+//DMCUB_REGION3_CW2_OFFSET
+#define DMCUB_REGION3_CW2_OFFSET__DMCUB_REGION3_CW2_OFFSET__SHIFT 0x8
+#define DMCUB_REGION3_CW2_OFFSET__DMCUB_REGION3_CW2_OFFSET_MASK 0xFFFFFF00L
+//DMCUB_REGION3_CW2_OFFSET_HIGH
+#define DMCUB_REGION3_CW2_OFFSET_HIGH__DMCUB_REGION3_CW2_OFFSET_HIGH__SHIFT 0x0
+#define DMCUB_REGION3_CW2_OFFSET_HIGH__DMCUB_REGION3_CW2_OFFSET_HIGH_MASK 0x0000FFFFL
+//DMCUB_REGION3_CW3_OFFSET
+#define DMCUB_REGION3_CW3_OFFSET__DMCUB_REGION3_CW3_OFFSET__SHIFT 0x8
+#define DMCUB_REGION3_CW3_OFFSET__DMCUB_REGION3_CW3_OFFSET_MASK 0xFFFFFF00L
+//DMCUB_REGION3_CW3_OFFSET_HIGH
+#define DMCUB_REGION3_CW3_OFFSET_HIGH__DMCUB_REGION3_CW3_OFFSET_HIGH__SHIFT 0x0
+#define DMCUB_REGION3_CW3_OFFSET_HIGH__DMCUB_REGION3_CW3_OFFSET_HIGH_MASK 0x0000FFFFL
+//DMCUB_REGION3_CW4_OFFSET
+#define DMCUB_REGION3_CW4_OFFSET__DMCUB_REGION3_CW4_OFFSET__SHIFT 0x8
+#define DMCUB_REGION3_CW4_OFFSET__DMCUB_REGION3_CW4_OFFSET_MASK 0xFFFFFF00L
+//DMCUB_REGION3_CW4_OFFSET_HIGH
+#define DMCUB_REGION3_CW4_OFFSET_HIGH__DMCUB_REGION3_CW4_OFFSET_HIGH__SHIFT 0x0
+#define DMCUB_REGION3_CW4_OFFSET_HIGH__DMCUB_REGION3_CW4_OFFSET_HIGH_MASK 0x0000FFFFL
+//DMCUB_REGION3_CW5_OFFSET
+#define DMCUB_REGION3_CW5_OFFSET__DMCUB_REGION3_CW5_OFFSET__SHIFT 0x8
+#define DMCUB_REGION3_CW5_OFFSET__DMCUB_REGION3_CW5_OFFSET_MASK 0xFFFFFF00L
+//DMCUB_REGION3_CW5_OFFSET_HIGH
+#define DMCUB_REGION3_CW5_OFFSET_HIGH__DMCUB_REGION3_CW5_OFFSET_HIGH__SHIFT 0x0
+#define DMCUB_REGION3_CW5_OFFSET_HIGH__DMCUB_REGION3_CW5_OFFSET_HIGH_MASK 0x0000FFFFL
+//DMCUB_REGION3_CW6_OFFSET
+#define DMCUB_REGION3_CW6_OFFSET__DMCUB_REGION3_CW6_OFFSET__SHIFT 0x8
+#define DMCUB_REGION3_CW6_OFFSET__DMCUB_REGION3_CW6_OFFSET_MASK 0xFFFFFF00L
+//DMCUB_REGION3_CW6_OFFSET_HIGH
+#define DMCUB_REGION3_CW6_OFFSET_HIGH__DMCUB_REGION3_CW6_OFFSET_HIGH__SHIFT 0x0
+#define DMCUB_REGION3_CW6_OFFSET_HIGH__DMCUB_REGION3_CW6_OFFSET_HIGH_MASK 0x0000FFFFL
+//DMCUB_REGION3_CW7_OFFSET
+#define DMCUB_REGION3_CW7_OFFSET__DMCUB_REGION3_CW7_OFFSET__SHIFT 0x8
+#define DMCUB_REGION3_CW7_OFFSET__DMCUB_REGION3_CW7_OFFSET_MASK 0xFFFFFF00L
+//DMCUB_REGION3_CW7_OFFSET_HIGH
+#define DMCUB_REGION3_CW7_OFFSET_HIGH__DMCUB_REGION3_CW7_OFFSET_HIGH__SHIFT 0x0
+#define DMCUB_REGION3_CW7_OFFSET_HIGH__DMCUB_REGION3_CW7_OFFSET_HIGH_MASK 0x0000FFFFL
+//DMCUB_INTERRUPT_ENABLE
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_TIMER0_INT_EN__SHIFT 0x0
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_TIMER1_INT_EN__SHIFT 0x1
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_INBOX0_READY_INT_EN__SHIFT 0x2
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_INBOX0_DONE_INT_EN__SHIFT 0x3
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_INBOX1_READY_INT_EN__SHIFT 0x4
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_INBOX1_DONE_INT_EN__SHIFT 0x5
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_OUTBOX0_READY_INT_EN__SHIFT 0x6
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_OUTBOX0_DONE_INT_EN__SHIFT 0x7
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_OUTBOX1_READY_INT_EN__SHIFT 0x8
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_OUTBOX1_DONE_INT_EN__SHIFT 0x9
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_GPINT0_INT_EN__SHIFT 0xa
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_GPINT1_INT_EN__SHIFT 0xb
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_GPINT2_INT_EN__SHIFT 0xc
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_UNDEFINED_ADDRESS_FAULT_INT_EN__SHIFT 0xd
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_TIMER0_INT_EN_MASK 0x00000001L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_TIMER1_INT_EN_MASK 0x00000002L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_INBOX0_READY_INT_EN_MASK 0x00000004L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_INBOX0_DONE_INT_EN_MASK 0x00000008L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_INBOX1_READY_INT_EN_MASK 0x00000010L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_INBOX1_DONE_INT_EN_MASK 0x00000020L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_OUTBOX0_READY_INT_EN_MASK 0x00000040L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_OUTBOX0_DONE_INT_EN_MASK 0x00000080L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_OUTBOX1_READY_INT_EN_MASK 0x00000100L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_OUTBOX1_DONE_INT_EN_MASK 0x00000200L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_GPINT0_INT_EN_MASK 0x00000400L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_GPINT1_INT_EN_MASK 0x00000800L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_GPINT2_INT_EN_MASK 0x00001000L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_UNDEFINED_ADDRESS_FAULT_INT_EN_MASK 0x00002000L
+//DMCUB_INTERRUPT_ACK
+#define DMCUB_INTERRUPT_ACK__DMCUB_TIMER0_INT_ACK__SHIFT 0x0
+#define DMCUB_INTERRUPT_ACK__DMCUB_TIMER1_INT_ACK__SHIFT 0x1
+#define DMCUB_INTERRUPT_ACK__DMCUB_INBOX0_READY_INT_ACK__SHIFT 0x2
+#define DMCUB_INTERRUPT_ACK__DMCUB_INBOX0_DONE_INT_ACK__SHIFT 0x3
+#define DMCUB_INTERRUPT_ACK__DMCUB_INBOX1_READY_INT_ACK__SHIFT 0x4
+#define DMCUB_INTERRUPT_ACK__DMCUB_INBOX1_DONE_INT_ACK__SHIFT 0x5
+#define DMCUB_INTERRUPT_ACK__DMCUB_OUTBOX0_READY_INT_ACK__SHIFT 0x6
+#define DMCUB_INTERRUPT_ACK__DMCUB_OUTBOX0_DONE_INT_ACK__SHIFT 0x7
+#define DMCUB_INTERRUPT_ACK__DMCUB_OUTBOX1_READY_INT_ACK__SHIFT 0x8
+#define DMCUB_INTERRUPT_ACK__DMCUB_OUTBOX1_DONE_INT_ACK__SHIFT 0x9
+#define DMCUB_INTERRUPT_ACK__DMCUB_GPINT0_INT_ACK__SHIFT 0xa
+#define DMCUB_INTERRUPT_ACK__DMCUB_GPINT1_INT_ACK__SHIFT 0xb
+#define DMCUB_INTERRUPT_ACK__DMCUB_GPINT2_INT_ACK__SHIFT 0xc
+#define DMCUB_INTERRUPT_ACK__DMCUB_UNDEFINED_ADDRESS_FAULT_ACK__SHIFT 0xd
+#define DMCUB_INTERRUPT_ACK__DMCUB_TIMER0_INT_ACK_MASK 0x00000001L
+#define DMCUB_INTERRUPT_ACK__DMCUB_TIMER1_INT_ACK_MASK 0x00000002L
+#define DMCUB_INTERRUPT_ACK__DMCUB_INBOX0_READY_INT_ACK_MASK 0x00000004L
+#define DMCUB_INTERRUPT_ACK__DMCUB_INBOX0_DONE_INT_ACK_MASK 0x00000008L
+#define DMCUB_INTERRUPT_ACK__DMCUB_INBOX1_READY_INT_ACK_MASK 0x00000010L
+#define DMCUB_INTERRUPT_ACK__DMCUB_INBOX1_DONE_INT_ACK_MASK 0x00000020L
+#define DMCUB_INTERRUPT_ACK__DMCUB_OUTBOX0_READY_INT_ACK_MASK 0x00000040L
+#define DMCUB_INTERRUPT_ACK__DMCUB_OUTBOX0_DONE_INT_ACK_MASK 0x00000080L
+#define DMCUB_INTERRUPT_ACK__DMCUB_OUTBOX1_READY_INT_ACK_MASK 0x00000100L
+#define DMCUB_INTERRUPT_ACK__DMCUB_OUTBOX1_DONE_INT_ACK_MASK 0x00000200L
+#define DMCUB_INTERRUPT_ACK__DMCUB_GPINT0_INT_ACK_MASK 0x00000400L
+#define DMCUB_INTERRUPT_ACK__DMCUB_GPINT1_INT_ACK_MASK 0x00000800L
+#define DMCUB_INTERRUPT_ACK__DMCUB_GPINT2_INT_ACK_MASK 0x00001000L
+#define DMCUB_INTERRUPT_ACK__DMCUB_UNDEFINED_ADDRESS_FAULT_ACK_MASK 0x00002000L
+//DMCUB_INTERRUPT_STATUS
+#define DMCUB_INTERRUPT_STATUS__DMCUB_TIMER0_INT_STAT__SHIFT 0x0
+#define DMCUB_INTERRUPT_STATUS__DMCUB_TIMER1_INT_STAT__SHIFT 0x1
+#define DMCUB_INTERRUPT_STATUS__DMCUB_INBOX0_READY_INT_STAT__SHIFT 0x2
+#define DMCUB_INTERRUPT_STATUS__DMCUB_INBOX0_DONE_INT_STAT__SHIFT 0x3
+#define DMCUB_INTERRUPT_STATUS__DMCUB_INBOX1_READY_INT_STAT__SHIFT 0x4
+#define DMCUB_INTERRUPT_STATUS__DMCUB_INBOX1_DONE_INT_STAT__SHIFT 0x5
+#define DMCUB_INTERRUPT_STATUS__DMCUB_OUTBOX0_READY_INT_STAT__SHIFT 0x6
+#define DMCUB_INTERRUPT_STATUS__DMCUB_OUTBOX0_DONE_INT_STAT__SHIFT 0x7
+#define DMCUB_INTERRUPT_STATUS__DMCUB_OUTBOX1_READY_INT_STAT__SHIFT 0x8
+#define DMCUB_INTERRUPT_STATUS__DMCUB_OUTBOX1_DONE_INT_STAT__SHIFT 0x9
+#define DMCUB_INTERRUPT_STATUS__DMCUB_GPINT0_INT_STAT__SHIFT 0xa
+#define DMCUB_INTERRUPT_STATUS__DMCUB_GPINT1_INT_STAT__SHIFT 0xb
+#define DMCUB_INTERRUPT_STATUS__DMCUB_GPINT2_INT_STAT__SHIFT 0xc
+#define DMCUB_INTERRUPT_STATUS__DMCUB_UNDEFINED_ADDRESS_FAULT__SHIFT 0xd
+#define DMCUB_INTERRUPT_STATUS__DMCUB_INST_FETCH_FAULT__SHIFT 0xe
+#define DMCUB_INTERRUPT_STATUS__DMCUB_DATA_WRITE_FAULT__SHIFT 0xf
+#define DMCUB_INTERRUPT_STATUS__DMCUB_TIMER0_INT_STAT_MASK 0x00000001L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_TIMER1_INT_STAT_MASK 0x00000002L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_INBOX0_READY_INT_STAT_MASK 0x00000004L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_INBOX0_DONE_INT_STAT_MASK 0x00000008L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_INBOX1_READY_INT_STAT_MASK 0x00000010L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_INBOX1_DONE_INT_STAT_MASK 0x00000020L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_OUTBOX0_READY_INT_STAT_MASK 0x00000040L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_OUTBOX0_DONE_INT_STAT_MASK 0x00000080L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_OUTBOX1_READY_INT_STAT_MASK 0x00000100L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_OUTBOX1_DONE_INT_STAT_MASK 0x00000200L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_GPINT0_INT_STAT_MASK 0x00000400L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_GPINT1_INT_STAT_MASK 0x00000800L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_GPINT2_INT_STAT_MASK 0x00001000L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_UNDEFINED_ADDRESS_FAULT_MASK 0x00002000L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_INST_FETCH_FAULT_MASK 0x00004000L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_DATA_WRITE_FAULT_MASK 0x00008000L
+//DMCUB_INTERRUPT_TYPE
+#define DMCUB_INTERRUPT_TYPE__DMCUB_TIMER0_INT_TYPE__SHIFT 0x0
+#define DMCUB_INTERRUPT_TYPE__DMCUB_TIMER1_INT_TYPE__SHIFT 0x1
+#define DMCUB_INTERRUPT_TYPE__DMCUB_INBOX0_READY_INT_TYPE__SHIFT 0x2
+#define DMCUB_INTERRUPT_TYPE__DMCUB_INBOX0_DONE_INT_TYPE__SHIFT 0x3
+#define DMCUB_INTERRUPT_TYPE__DMCUB_INBOX1_READY_INT_TYPE__SHIFT 0x4
+#define DMCUB_INTERRUPT_TYPE__DMCUB_INBOX1_DONE_INT_TYPE__SHIFT 0x5
+#define DMCUB_INTERRUPT_TYPE__DMCUB_OUTBOX0_READY_INT_TYPE__SHIFT 0x6
+#define DMCUB_INTERRUPT_TYPE__DMCUB_OUTBOX0_DONE_INT_TYPE__SHIFT 0x7
+#define DMCUB_INTERRUPT_TYPE__DMCUB_OUTBOX1_READY_INT_TYPE__SHIFT 0x8
+#define DMCUB_INTERRUPT_TYPE__DMCUB_OUTBOX1_DONE_INT_TYPE__SHIFT 0x9
+#define DMCUB_INTERRUPT_TYPE__DMCUB_GPINT0_INT_TYPE__SHIFT 0xa
+#define DMCUB_INTERRUPT_TYPE__DMCUB_GPINT1_INT_TYPE__SHIFT 0xb
+#define DMCUB_INTERRUPT_TYPE__DMCUB_GPINT2_INT_TYPE__SHIFT 0xc
+#define DMCUB_INTERRUPT_TYPE__DMCUB_UNDEFINED_ADDRESS_FAULT_INT_TYPE__SHIFT 0xd
+#define DMCUB_INTERRUPT_TYPE__DMCUB_TIMER0_INT_TYPE_MASK 0x00000001L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_TIMER1_INT_TYPE_MASK 0x00000002L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_INBOX0_READY_INT_TYPE_MASK 0x00000004L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_INBOX0_DONE_INT_TYPE_MASK 0x00000008L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_INBOX1_READY_INT_TYPE_MASK 0x00000010L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_INBOX1_DONE_INT_TYPE_MASK 0x00000020L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_OUTBOX0_READY_INT_TYPE_MASK 0x00000040L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_OUTBOX0_DONE_INT_TYPE_MASK 0x00000080L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_OUTBOX1_READY_INT_TYPE_MASK 0x00000100L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_OUTBOX1_DONE_INT_TYPE_MASK 0x00000200L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_GPINT0_INT_TYPE_MASK 0x00000400L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_GPINT1_INT_TYPE_MASK 0x00000800L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_GPINT2_INT_TYPE_MASK 0x00001000L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_UNDEFINED_ADDRESS_FAULT_INT_TYPE_MASK 0x00002000L
+//DMCUB_EXT_INTERRUPT_STATUS
+#define DMCUB_EXT_INTERRUPT_STATUS__DMCUB_EXT_INTERRUPT_COUNT__SHIFT 0x0
+#define DMCUB_EXT_INTERRUPT_STATUS__DMCUB_EXT_INTERRUPT_ID__SHIFT 0x8
+#define DMCUB_EXT_INTERRUPT_STATUS__DMCUB_EXT_INTERRUPT_COUNT_MASK 0x000000FFL
+#define DMCUB_EXT_INTERRUPT_STATUS__DMCUB_EXT_INTERRUPT_ID_MASK 0x0000FF00L
+//DMCUB_EXT_INTERRUPT_CTXID
+#define DMCUB_EXT_INTERRUPT_CTXID__DMCUB_EXT_INTERRUPT_CTXID__SHIFT 0x0
+#define DMCUB_EXT_INTERRUPT_CTXID__DMCUB_EXT_INTERRUPT_CTXID_MASK 0x0FFFFFFFL
+//DMCUB_EXT_INTERRUPT_ACK
+#define DMCUB_EXT_INTERRUPT_ACK__DMCUB_EXT_INTERRUPT_ACK__SHIFT 0x0
+#define DMCUB_EXT_INTERRUPT_ACK__DMCUB_EXT_INTERRUPT_ACK_MASK 0x00000001L
+//DMCUB_INST_FETCH_FAULT_ADDR
+#define DMCUB_INST_FETCH_FAULT_ADDR__DMCUB_INST_FETCH_FAULT_ADDR__SHIFT 0x0
+#define DMCUB_INST_FETCH_FAULT_ADDR__DMCUB_INST_FETCH_FAULT_ADDR_MASK 0xFFFFFFFFL
+//DMCUB_DATA_WRITE_FAULT_ADDR
+#define DMCUB_DATA_WRITE_FAULT_ADDR__DMCUB_DATA_WRITE_FAULT_ADDR__SHIFT 0x0
+#define DMCUB_DATA_WRITE_FAULT_ADDR__DMCUB_DATA_WRITE_FAULT_ADDR_MASK 0xFFFFFFFFL
+//DMCUB_SEC_CNTL
+#define DMCUB_SEC_CNTL__DMCUB_MEM_SEC_LVL__SHIFT 0x0
+#define DMCUB_SEC_CNTL__DMCUB_MEM_UNIT_ID__SHIFT 0x8
+#define DMCUB_SEC_CNTL__DMCUB_SEC_RESET__SHIFT 0x10
+#define DMCUB_SEC_CNTL__DMCUB_DATA_FAULT_INT_DISABLE__SHIFT 0x11
+#define DMCUB_SEC_CNTL__DMCUB_AUTO_RESET_STATUS__SHIFT 0x14
+#define DMCUB_SEC_CNTL__DMCUB_SEC_RESET_STATUS__SHIFT 0x15
+#define DMCUB_SEC_CNTL__DMCUB_INST_FETCH_FAULT_CLEAR__SHIFT 0x18
+#define DMCUB_SEC_CNTL__DMCUB_DATA_WRITE_FAULT_CLEAR__SHIFT 0x19
+#define DMCUB_SEC_CNTL__DMCUB_MEM_SEC_LVL_MASK 0x00000007L
+#define DMCUB_SEC_CNTL__DMCUB_MEM_UNIT_ID_MASK 0x00003F00L
+#define DMCUB_SEC_CNTL__DMCUB_SEC_RESET_MASK 0x00010000L
+#define DMCUB_SEC_CNTL__DMCUB_DATA_FAULT_INT_DISABLE_MASK 0x00020000L
+#define DMCUB_SEC_CNTL__DMCUB_AUTO_RESET_STATUS_MASK 0x00100000L
+#define DMCUB_SEC_CNTL__DMCUB_SEC_RESET_STATUS_MASK 0x00200000L
+#define DMCUB_SEC_CNTL__DMCUB_INST_FETCH_FAULT_CLEAR_MASK 0x01000000L
+#define DMCUB_SEC_CNTL__DMCUB_DATA_WRITE_FAULT_CLEAR_MASK 0x02000000L
+//DMCUB_MEM_CNTL
+#define DMCUB_MEM_CNTL__DMCUB_MEM_WRITE_QOS__SHIFT 0x0
+#define DMCUB_MEM_CNTL__DMCUB_MEM_READ_QOS__SHIFT 0x4
+#define DMCUB_MEM_CNTL__DMCUB_MEM_WRITE_SPACE__SHIFT 0x8
+#define DMCUB_MEM_CNTL__DMCUB_MEM_READ_SPACE__SHIFT 0xc
+#define DMCUB_MEM_CNTL__DMCUB_MEM_WRITE_QOS_MASK 0x0000000FL
+#define DMCUB_MEM_CNTL__DMCUB_MEM_READ_QOS_MASK 0x000000F0L
+#define DMCUB_MEM_CNTL__DMCUB_MEM_WRITE_SPACE_MASK 0x00000700L
+#define DMCUB_MEM_CNTL__DMCUB_MEM_READ_SPACE_MASK 0x00007000L
+//DMCUB_INBOX0_BASE_ADDRESS
+#define DMCUB_INBOX0_BASE_ADDRESS__DMCUB_INBOX0_BASE_ADDRESS__SHIFT 0x0
+#define DMCUB_INBOX0_BASE_ADDRESS__DMCUB_INBOX0_BASE_ADDRESS_MASK 0xFFFFFFFFL
+//DMCUB_INBOX0_SIZE
+#define DMCUB_INBOX0_SIZE__DMCUB_INBOX0_SIZE__SHIFT 0x0
+#define DMCUB_INBOX0_SIZE__DMCUB_INBOX0_SIZE_MASK 0xFFFFFFFFL
+//DMCUB_INBOX0_WPTR
+#define DMCUB_INBOX0_WPTR__DMCUB_INBOX0_WPTR__SHIFT 0x0
+#define DMCUB_INBOX0_WPTR__DMCUB_INBOX0_WPTR_MASK 0xFFFFFFFFL
+//DMCUB_INBOX0_RPTR
+#define DMCUB_INBOX0_RPTR__DMCUB_INBOX0_RPTR__SHIFT 0x0
+#define DMCUB_INBOX0_RPTR__DMCUB_INBOX0_RPTR_MASK 0xFFFFFFFFL
+//DMCUB_INBOX1_BASE_ADDRESS
+#define DMCUB_INBOX1_BASE_ADDRESS__DMCUB_INBOX1_BASE_ADDRESS__SHIFT 0x0
+#define DMCUB_INBOX1_BASE_ADDRESS__DMCUB_INBOX1_BASE_ADDRESS_MASK 0xFFFFFFFFL
+//DMCUB_INBOX1_SIZE
+#define DMCUB_INBOX1_SIZE__DMCUB_INBOX1_SIZE__SHIFT 0x0
+#define DMCUB_INBOX1_SIZE__DMCUB_INBOX1_SIZE_MASK 0xFFFFFFFFL
+//DMCUB_INBOX1_WPTR
+#define DMCUB_INBOX1_WPTR__DMCUB_INBOX1_WPTR__SHIFT 0x0
+#define DMCUB_INBOX1_WPTR__DMCUB_INBOX1_WPTR_MASK 0xFFFFFFFFL
+//DMCUB_INBOX1_RPTR
+#define DMCUB_INBOX1_RPTR__DMCUB_INBOX1_RPTR__SHIFT 0x0
+#define DMCUB_INBOX1_RPTR__DMCUB_INBOX1_RPTR_MASK 0xFFFFFFFFL
+//DMCUB_OUTBOX0_BASE_ADDRESS
+#define DMCUB_OUTBOX0_BASE_ADDRESS__DMCUB_OUTBOX0_BASE_ADDRESS__SHIFT 0x0
+#define DMCUB_OUTBOX0_BASE_ADDRESS__DMCUB_OUTBOX0_BASE_ADDRESS_MASK 0xFFFFFFFFL
+//DMCUB_OUTBOX0_SIZE
+#define DMCUB_OUTBOX0_SIZE__DMCUB_OUTBOX0_SIZE__SHIFT 0x0
+#define DMCUB_OUTBOX0_SIZE__DMCUB_OUTBOX0_SIZE_MASK 0xFFFFFFFFL
+//DMCUB_OUTBOX0_WPTR
+#define DMCUB_OUTBOX0_WPTR__DMCUB_OUTBOX0_WPTR__SHIFT 0x0
+#define DMCUB_OUTBOX0_WPTR__DMCUB_OUTBOX0_WPTR_MASK 0xFFFFFFFFL
+//DMCUB_OUTBOX0_RPTR
+#define DMCUB_OUTBOX0_RPTR__DMCUB_OUTBOX0_RPTR__SHIFT 0x0
+#define DMCUB_OUTBOX0_RPTR__DMCUB_OUTBOX0_RPTR_MASK 0xFFFFFFFFL
+//DMCUB_OUTBOX1_BASE_ADDRESS
+#define DMCUB_OUTBOX1_BASE_ADDRESS__DMCUB_OUTBOX1_BASE_ADDRESS__SHIFT 0x0
+#define DMCUB_OUTBOX1_BASE_ADDRESS__DMCUB_OUTBOX1_BASE_ADDRESS_MASK 0xFFFFFFFFL
+//DMCUB_OUTBOX1_SIZE
+#define DMCUB_OUTBOX1_SIZE__DMCUB_OUTBOX1_SIZE__SHIFT 0x0
+#define DMCUB_OUTBOX1_SIZE__DMCUB_OUTBOX1_SIZE_MASK 0xFFFFFFFFL
+//DMCUB_OUTBOX1_WPTR
+#define DMCUB_OUTBOX1_WPTR__DMCUB_OUTBOX1_WPTR__SHIFT 0x0
+#define DMCUB_OUTBOX1_WPTR__DMCUB_OUTBOX1_WPTR_MASK 0xFFFFFFFFL
+//DMCUB_OUTBOX1_RPTR
+#define DMCUB_OUTBOX1_RPTR__DMCUB_OUTBOX1_RPTR__SHIFT 0x0
+#define DMCUB_OUTBOX1_RPTR__DMCUB_OUTBOX1_RPTR_MASK 0xFFFFFFFFL
+//DMCUB_TIMER_TRIGGER0
+#define DMCUB_TIMER_TRIGGER0__DMCUB_TIMER_TRIGGER0__SHIFT 0x0
+#define DMCUB_TIMER_TRIGGER0__DMCUB_TIMER_TRIGGER0_MASK 0xFFFFFFFFL
+//DMCUB_TIMER_TRIGGER1
+#define DMCUB_TIMER_TRIGGER1__DMCUB_TIMER_TRIGGER1__SHIFT 0x0
+#define DMCUB_TIMER_TRIGGER1__DMCUB_TIMER_TRIGGER1_MASK 0xFFFFFFFFL
+//DMCUB_TIMER_WINDOW
+#define DMCUB_TIMER_WINDOW__DMCUB_TIMER_WINDOW__SHIFT 0x0
+#define DMCUB_TIMER_WINDOW__DMCUB_TIMER_WINDOW_MASK 0x00000007L
+//DMCUB_SCRATCH0
+#define DMCUB_SCRATCH0__DMCUB_SCRATCH0__SHIFT 0x0
+#define DMCUB_SCRATCH0__DMCUB_SCRATCH0_MASK 0xFFFFFFFFL
+//DMCUB_SCRATCH1
+#define DMCUB_SCRATCH1__DMCUB_SCRATCH1__SHIFT 0x0
+#define DMCUB_SCRATCH1__DMCUB_SCRATCH1_MASK 0xFFFFFFFFL
+//DMCUB_SCRATCH2
+#define DMCUB_SCRATCH2__DMCUB_SCRATCH2__SHIFT 0x0
+#define DMCUB_SCRATCH2__DMCUB_SCRATCH2_MASK 0xFFFFFFFFL
+//DMCUB_SCRATCH3
+#define DMCUB_SCRATCH3__DMCUB_SCRATCH3__SHIFT 0x0
+#define DMCUB_SCRATCH3__DMCUB_SCRATCH3_MASK 0xFFFFFFFFL
+//DMCUB_SCRATCH4
+#define DMCUB_SCRATCH4__DMCUB_SCRATCH4__SHIFT 0x0
+#define DMCUB_SCRATCH4__DMCUB_SCRATCH4_MASK 0xFFFFFFFFL
+//DMCUB_SCRATCH5
+#define DMCUB_SCRATCH5__DMCUB_SCRATCH5__SHIFT 0x0
+#define DMCUB_SCRATCH5__DMCUB_SCRATCH5_MASK 0xFFFFFFFFL
+//DMCUB_SCRATCH6
+#define DMCUB_SCRATCH6__DMCUB_SCRATCH6__SHIFT 0x0
+#define DMCUB_SCRATCH6__DMCUB_SCRATCH6_MASK 0xFFFFFFFFL
+//DMCUB_SCRATCH7
+#define DMCUB_SCRATCH7__DMCUB_SCRATCH7__SHIFT 0x0
+#define DMCUB_SCRATCH7__DMCUB_SCRATCH7_MASK 0xFFFFFFFFL
+//DMCUB_SCRATCH8
+#define DMCUB_SCRATCH8__DMCUB_SCRATCH8__SHIFT 0x0
+#define DMCUB_SCRATCH8__DMCUB_SCRATCH8_MASK 0xFFFFFFFFL
+//DMCUB_SCRATCH9
+#define DMCUB_SCRATCH9__DMCUB_SCRATCH9__SHIFT 0x0
+#define DMCUB_SCRATCH9__DMCUB_SCRATCH9_MASK 0xFFFFFFFFL
+//DMCUB_SCRATCH10
+#define DMCUB_SCRATCH10__DMCUB_SCRATCH10__SHIFT 0x0
+#define DMCUB_SCRATCH10__DMCUB_SCRATCH10_MASK 0xFFFFFFFFL
+//DMCUB_SCRATCH11
+#define DMCUB_SCRATCH11__DMCUB_SCRATCH11__SHIFT 0x0
+#define DMCUB_SCRATCH11__DMCUB_SCRATCH11_MASK 0xFFFFFFFFL
+//DMCUB_SCRATCH12
+#define DMCUB_SCRATCH12__DMCUB_SCRATCH12__SHIFT 0x0
+#define DMCUB_SCRATCH12__DMCUB_SCRATCH12_MASK 0xFFFFFFFFL
+//DMCUB_SCRATCH13
+#define DMCUB_SCRATCH13__DMCUB_SCRATCH13__SHIFT 0x0
+#define DMCUB_SCRATCH13__DMCUB_SCRATCH13_MASK 0xFFFFFFFFL
+//DMCUB_SCRATCH14
+#define DMCUB_SCRATCH14__DMCUB_SCRATCH14__SHIFT 0x0
+#define DMCUB_SCRATCH14__DMCUB_SCRATCH14_MASK 0xFFFFFFFFL
+//DMCUB_SCRATCH15
+#define DMCUB_SCRATCH15__DMCUB_SCRATCH15__SHIFT 0x0
+#define DMCUB_SCRATCH15__DMCUB_SCRATCH15_MASK 0xFFFFFFFFL
+//DMCUB_CNTL
+#define DMCUB_CNTL__DMCUB_LS_WAKE_DELAY__SHIFT 0x0
+#define DMCUB_CNTL__DMCUB_DMCUBCLK_R_GATE_DIS__SHIFT 0x8
+#define DMCUB_CNTL__DMCUB_ENABLE__SHIFT 0x10
+#define DMCUB_CNTL__DMCUB_SOFT_RESET__SHIFT 0x11
+#define DMCUB_CNTL__DMCUB_MEM_LIGHT_SLEEP_DISABLE__SHIFT 0x12
+#define DMCUB_CNTL__DMCUB_TRACEPORT_EN__SHIFT 0x13
+#define DMCUB_CNTL__DMCUB_PWAIT_MODE_STATUS__SHIFT 0x14
+#define DMCUB_CNTL__DMCUB_LS_WAKE_DELAY_MASK 0x000000FFL
+#define DMCUB_CNTL__DMCUB_DMCUBCLK_R_GATE_DIS_MASK 0x00000100L
+#define DMCUB_CNTL__DMCUB_ENABLE_MASK 0x00010000L
+#define DMCUB_CNTL__DMCUB_SOFT_RESET_MASK 0x00020000L
+#define DMCUB_CNTL__DMCUB_MEM_LIGHT_SLEEP_DISABLE_MASK 0x00040000L
+#define DMCUB_CNTL__DMCUB_TRACEPORT_EN_MASK 0x00080000L
+#define DMCUB_CNTL__DMCUB_PWAIT_MODE_STATUS_MASK 0x00100000L
+//DMCUB_GPINT_DATAIN0
+#define DMCUB_GPINT_DATAIN0__DMCUB_GPINT_DATAIN0__SHIFT 0x0
+#define DMCUB_GPINT_DATAIN0__DMCUB_GPINT_DATAIN0_MASK 0xFFFFFFFFL
+//DMCUB_GPINT_DATAIN1
+#define DMCUB_GPINT_DATAIN1__DMCUB_GPINT_DATAIN1__SHIFT 0x0
+#define DMCUB_GPINT_DATAIN1__DMCUB_GPINT_DATAIN1_MASK 0xFFFFFFFFL
+//DMCUB_GPINT_DATAOUT
+#define DMCUB_GPINT_DATAOUT__DMCUB_GPINT_DATAOUT__SHIFT 0x0
+#define DMCUB_GPINT_DATAOUT__DMCUB_GPINT_DATAOUT_MASK 0xFFFFFFFFL
+//DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR
+#define DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR__DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR__SHIFT 0x0
+#define DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR__DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR_MASK 0xFFFFFFFFL
+//DMCUB_LS_WAKE_INT_ENABLE
+#define DMCUB_LS_WAKE_INT_ENABLE__DMCUB_LS_WAKE_INT_ENABLE__SHIFT 0x0
+#define DMCUB_LS_WAKE_INT_ENABLE__DMCUB_LS_WAKE_INT_ENABLE_MASK 0xFFFFFFFFL
+//DMCUB_MEM_PWR_CNTL
+#define DMCUB_MEM_PWR_CNTL__DMCUB_MEM_PWR_FORCE__SHIFT 0x1
+#define DMCUB_MEM_PWR_CNTL__DMCUB_MEM_PWR_DIS__SHIFT 0x3
+#define DMCUB_MEM_PWR_CNTL__DMCUB_MEM_PWR_STATE__SHIFT 0x4
+#define DMCUB_MEM_PWR_CNTL__DMCUB_MEM_PWR_FORCE_MASK 0x00000006L
+#define DMCUB_MEM_PWR_CNTL__DMCUB_MEM_PWR_DIS_MASK 0x00000008L
+#define DMCUB_MEM_PWR_CNTL__DMCUB_MEM_PWR_STATE_MASK 0x00000030L
+//DMCUB_TIMER_CURRENT
+#define DMCUB_TIMER_CURRENT__DMCUB_TIMER_CURRENT__SHIFT 0x0
+#define DMCUB_TIMER_CURRENT__DMCUB_TIMER_CURRENT_MASK 0xFFFFFFFFL
+//DMCUB_PROC_ID
+#define DMCUB_PROC_ID__DMCUB_PROC_ID__SHIFT 0x0
+#define DMCUB_PROC_ID__DMCUB_PROC_ID_MASK 0x0000FFFFL
+
+
+// addressBlock: dce_dc_mmhubbub_mcif_wb2_dispdec
+//MCIF_WB2_MCIF_WB_BUFMGR_SW_CONTROL
+#define MCIF_WB2_MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_ENABLE__SHIFT 0x0
+#define MCIF_WB2_MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUF_DUALSIZE_REQ__SHIFT 0x1
+#define MCIF_WB2_MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_SW_INT_EN__SHIFT 0x4
+#define MCIF_WB2_MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_SW_INT_ACK__SHIFT 0x5
+#define MCIF_WB2_MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_SW_SLICE_INT_EN__SHIFT 0x6
+#define MCIF_WB2_MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_SW_OVERRUN_INT_EN__SHIFT 0x7
+#define MCIF_WB2_MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_SW_LOCK__SHIFT 0x8
+#define MCIF_WB2_MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_P_VMID__SHIFT 0x10
+#define MCIF_WB2_MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUF_ADDR_FENCE_EN__SHIFT 0x18
+#define MCIF_WB2_MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_ENABLE_MASK 0x00000001L
+#define MCIF_WB2_MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUF_DUALSIZE_REQ_MASK 0x00000002L
+#define MCIF_WB2_MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_SW_INT_EN_MASK 0x00000010L
+#define MCIF_WB2_MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_SW_INT_ACK_MASK 0x00000020L
+#define MCIF_WB2_MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_SW_SLICE_INT_EN_MASK 0x00000040L
+#define MCIF_WB2_MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_SW_OVERRUN_INT_EN_MASK 0x00000080L
+#define MCIF_WB2_MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_SW_LOCK_MASK 0x00000F00L
+#define MCIF_WB2_MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_P_VMID_MASK 0x000F0000L
+#define MCIF_WB2_MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUF_ADDR_FENCE_EN_MASK 0x01000000L
+//MCIF_WB2_MCIF_WB_BUFMGR_CUR_LINE_R
+#define MCIF_WB2_MCIF_WB_BUFMGR_CUR_LINE_R__MCIF_WB_BUFMGR_CUR_LINE_R__SHIFT 0x0
+#define MCIF_WB2_MCIF_WB_BUFMGR_CUR_LINE_R__MCIF_WB_BUFMGR_CUR_LINE_R_MASK 0x00001FFFL
+//MCIF_WB2_MCIF_WB_BUFMGR_STATUS
+#define MCIF_WB2_MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_VCE_INT_STATUS__SHIFT 0x0
+#define MCIF_WB2_MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_SW_INT_STATUS__SHIFT 0x1
+#define MCIF_WB2_MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_SW_OVERRUN_INT_STATUS__SHIFT 0x2
+#define MCIF_WB2_MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_CUR_BUF__SHIFT 0x4
+#define MCIF_WB2_MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUF_DUALSIZE_STATUS__SHIFT 0x7
+#define MCIF_WB2_MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_BUFTAG__SHIFT 0x8
+#define MCIF_WB2_MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_CUR_LINE_L__SHIFT 0xc
+#define MCIF_WB2_MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_NEXT_BUF__SHIFT 0x1c
+#define MCIF_WB2_MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_VCE_INT_STATUS_MASK 0x00000001L
+#define MCIF_WB2_MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_SW_INT_STATUS_MASK 0x00000002L
+#define MCIF_WB2_MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_SW_OVERRUN_INT_STATUS_MASK 0x00000004L
+#define MCIF_WB2_MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_CUR_BUF_MASK 0x00000070L
+#define MCIF_WB2_MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUF_DUALSIZE_STATUS_MASK 0x00000080L
+#define MCIF_WB2_MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_BUFTAG_MASK 0x00000F00L
+#define MCIF_WB2_MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_CUR_LINE_L_MASK 0x01FFF000L
+#define MCIF_WB2_MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_NEXT_BUF_MASK 0x70000000L
+//MCIF_WB2_MCIF_WB_BUF_PITCH
+#define MCIF_WB2_MCIF_WB_BUF_PITCH__MCIF_WB_BUF_LUMA_PITCH__SHIFT 0x8
+#define MCIF_WB2_MCIF_WB_BUF_PITCH__MCIF_WB_BUF_CHROMA_PITCH__SHIFT 0x18
+#define MCIF_WB2_MCIF_WB_BUF_PITCH__MCIF_WB_BUF_LUMA_PITCH_MASK 0x0000FF00L
+#define MCIF_WB2_MCIF_WB_BUF_PITCH__MCIF_WB_BUF_CHROMA_PITCH_MASK 0xFF000000L
+//MCIF_WB2_MCIF_WB_BUF_1_STATUS
+#define MCIF_WB2_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_ACTIVE__SHIFT 0x0
+#define MCIF_WB2_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_SW_LOCKED__SHIFT 0x1
+#define MCIF_WB2_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_VCE_LOCKED__SHIFT 0x2
+#define MCIF_WB2_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_OVERFLOW__SHIFT 0x3
+#define MCIF_WB2_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_DISABLE__SHIFT 0x4
+#define MCIF_WB2_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_MODE__SHIFT 0x5
+#define MCIF_WB2_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_BUFTAG__SHIFT 0x8
+#define MCIF_WB2_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_NXT_BUF__SHIFT 0xc
+#define MCIF_WB2_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_FIELD__SHIFT 0xf
+#define MCIF_WB2_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_CUR_LINE_L__SHIFT 0x10
+#define MCIF_WB2_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_LONG_LINE_ERROR__SHIFT 0x1d
+#define MCIF_WB2_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_SHORT_LINE_ERROR__SHIFT 0x1e
+#define MCIF_WB2_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_FRAME_LENGTH_ERROR__SHIFT 0x1f
+#define MCIF_WB2_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_ACTIVE_MASK 0x00000001L
+#define MCIF_WB2_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_SW_LOCKED_MASK 0x00000002L
+#define MCIF_WB2_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_VCE_LOCKED_MASK 0x00000004L
+#define MCIF_WB2_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_OVERFLOW_MASK 0x00000008L
+#define MCIF_WB2_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_DISABLE_MASK 0x00000010L
+#define MCIF_WB2_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_MODE_MASK 0x000000E0L
+#define MCIF_WB2_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_BUFTAG_MASK 0x00000F00L
+#define MCIF_WB2_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_NXT_BUF_MASK 0x00007000L
+#define MCIF_WB2_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_FIELD_MASK 0x00008000L
+#define MCIF_WB2_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_CUR_LINE_L_MASK 0x1FFF0000L
+#define MCIF_WB2_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_LONG_LINE_ERROR_MASK 0x20000000L
+#define MCIF_WB2_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_SHORT_LINE_ERROR_MASK 0x40000000L
+#define MCIF_WB2_MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_FRAME_LENGTH_ERROR_MASK 0x80000000L
+//MCIF_WB2_MCIF_WB_BUF_1_STATUS2
+#define MCIF_WB2_MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_CUR_LINE_R__SHIFT 0x0
+#define MCIF_WB2_MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_NEW_CONTENT__SHIFT 0xd
+#define MCIF_WB2_MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_COLOR_DEPTH__SHIFT 0xe
+#define MCIF_WB2_MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_TMZ_BLACK_PIXEL__SHIFT 0xf
+#define MCIF_WB2_MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_TMZ__SHIFT 0x10
+#define MCIF_WB2_MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_Y_OVERRUN__SHIFT 0x11
+#define MCIF_WB2_MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_C_OVERRUN__SHIFT 0x12
+#define MCIF_WB2_MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_EYE_FLAG__SHIFT 0x13
+#define MCIF_WB2_MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_CUR_LINE_R_MASK 0x00001FFFL
+#define MCIF_WB2_MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_NEW_CONTENT_MASK 0x00002000L
+#define MCIF_WB2_MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_COLOR_DEPTH_MASK 0x00004000L
+#define MCIF_WB2_MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_TMZ_BLACK_PIXEL_MASK 0x00008000L
+#define MCIF_WB2_MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_TMZ_MASK 0x00010000L
+#define MCIF_WB2_MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_Y_OVERRUN_MASK 0x00020000L
+#define MCIF_WB2_MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_C_OVERRUN_MASK 0x00040000L
+#define MCIF_WB2_MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_EYE_FLAG_MASK 0x00080000L
+//MCIF_WB2_MCIF_WB_BUF_2_STATUS
+#define MCIF_WB2_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_ACTIVE__SHIFT 0x0
+#define MCIF_WB2_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_SW_LOCKED__SHIFT 0x1
+#define MCIF_WB2_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_VCE_LOCKED__SHIFT 0x2
+#define MCIF_WB2_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_OVERFLOW__SHIFT 0x3
+#define MCIF_WB2_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_DISABLE__SHIFT 0x4
+#define MCIF_WB2_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_MODE__SHIFT 0x5
+#define MCIF_WB2_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_BUFTAG__SHIFT 0x8
+#define MCIF_WB2_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_NXT_BUF__SHIFT 0xc
+#define MCIF_WB2_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_FIELD__SHIFT 0xf
+#define MCIF_WB2_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_CUR_LINE_L__SHIFT 0x10
+#define MCIF_WB2_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_LONG_LINE_ERROR__SHIFT 0x1d
+#define MCIF_WB2_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_SHORT_LINE_ERROR__SHIFT 0x1e
+#define MCIF_WB2_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_FRAME_LENGTH_ERROR__SHIFT 0x1f
+#define MCIF_WB2_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_ACTIVE_MASK 0x00000001L
+#define MCIF_WB2_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_SW_LOCKED_MASK 0x00000002L
+#define MCIF_WB2_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_VCE_LOCKED_MASK 0x00000004L
+#define MCIF_WB2_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_OVERFLOW_MASK 0x00000008L
+#define MCIF_WB2_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_DISABLE_MASK 0x00000010L
+#define MCIF_WB2_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_MODE_MASK 0x000000E0L
+#define MCIF_WB2_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_BUFTAG_MASK 0x00000F00L
+#define MCIF_WB2_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_NXT_BUF_MASK 0x00007000L
+#define MCIF_WB2_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_FIELD_MASK 0x00008000L
+#define MCIF_WB2_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_CUR_LINE_L_MASK 0x1FFF0000L
+#define MCIF_WB2_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_LONG_LINE_ERROR_MASK 0x20000000L
+#define MCIF_WB2_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_SHORT_LINE_ERROR_MASK 0x40000000L
+#define MCIF_WB2_MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_FRAME_LENGTH_ERROR_MASK 0x80000000L
+//MCIF_WB2_MCIF_WB_BUF_2_STATUS2
+#define MCIF_WB2_MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_CUR_LINE_R__SHIFT 0x0
+#define MCIF_WB2_MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_NEW_CONTENT__SHIFT 0xd
+#define MCIF_WB2_MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_COLOR_DEPTH__SHIFT 0xe
+#define MCIF_WB2_MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_TMZ_BLACK_PIXEL__SHIFT 0xf
+#define MCIF_WB2_MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_TMZ__SHIFT 0x10
+#define MCIF_WB2_MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_Y_OVERRUN__SHIFT 0x11
+#define MCIF_WB2_MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_C_OVERRUN__SHIFT 0x12
+#define MCIF_WB2_MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_EYE_FLAG__SHIFT 0x13
+#define MCIF_WB2_MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_CUR_LINE_R_MASK 0x00001FFFL
+#define MCIF_WB2_MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_NEW_CONTENT_MASK 0x00002000L
+#define MCIF_WB2_MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_COLOR_DEPTH_MASK 0x00004000L
+#define MCIF_WB2_MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_TMZ_BLACK_PIXEL_MASK 0x00008000L
+#define MCIF_WB2_MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_TMZ_MASK 0x00010000L
+#define MCIF_WB2_MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_Y_OVERRUN_MASK 0x00020000L
+#define MCIF_WB2_MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_C_OVERRUN_MASK 0x00040000L
+#define MCIF_WB2_MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_EYE_FLAG_MASK 0x00080000L
+//MCIF_WB2_MCIF_WB_BUF_3_STATUS
+#define MCIF_WB2_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_ACTIVE__SHIFT 0x0
+#define MCIF_WB2_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_SW_LOCKED__SHIFT 0x1
+#define MCIF_WB2_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_VCE_LOCKED__SHIFT 0x2
+#define MCIF_WB2_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_OVERFLOW__SHIFT 0x3
+#define MCIF_WB2_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_DISABLE__SHIFT 0x4
+#define MCIF_WB2_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_MODE__SHIFT 0x5
+#define MCIF_WB2_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_BUFTAG__SHIFT 0x8
+#define MCIF_WB2_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_NXT_BUF__SHIFT 0xc
+#define MCIF_WB2_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_FIELD__SHIFT 0xf
+#define MCIF_WB2_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_CUR_LINE_L__SHIFT 0x10
+#define MCIF_WB2_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_LONG_LINE_ERROR__SHIFT 0x1d
+#define MCIF_WB2_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_SHORT_LINE_ERROR__SHIFT 0x1e
+#define MCIF_WB2_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_FRAME_LENGTH_ERROR__SHIFT 0x1f
+#define MCIF_WB2_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_ACTIVE_MASK 0x00000001L
+#define MCIF_WB2_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_SW_LOCKED_MASK 0x00000002L
+#define MCIF_WB2_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_VCE_LOCKED_MASK 0x00000004L
+#define MCIF_WB2_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_OVERFLOW_MASK 0x00000008L
+#define MCIF_WB2_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_DISABLE_MASK 0x00000010L
+#define MCIF_WB2_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_MODE_MASK 0x000000E0L
+#define MCIF_WB2_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_BUFTAG_MASK 0x00000F00L
+#define MCIF_WB2_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_NXT_BUF_MASK 0x00007000L
+#define MCIF_WB2_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_FIELD_MASK 0x00008000L
+#define MCIF_WB2_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_CUR_LINE_L_MASK 0x1FFF0000L
+#define MCIF_WB2_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_LONG_LINE_ERROR_MASK 0x20000000L
+#define MCIF_WB2_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_SHORT_LINE_ERROR_MASK 0x40000000L
+#define MCIF_WB2_MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_FRAME_LENGTH_ERROR_MASK 0x80000000L
+//MCIF_WB2_MCIF_WB_BUF_3_STATUS2
+#define MCIF_WB2_MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_CUR_LINE_R__SHIFT 0x0
+#define MCIF_WB2_MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_NEW_CONTENT__SHIFT 0xd
+#define MCIF_WB2_MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_COLOR_DEPTH__SHIFT 0xe
+#define MCIF_WB2_MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_TMZ_BLACK_PIXEL__SHIFT 0xf
+#define MCIF_WB2_MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_TMZ__SHIFT 0x10
+#define MCIF_WB2_MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_Y_OVERRUN__SHIFT 0x11
+#define MCIF_WB2_MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_C_OVERRUN__SHIFT 0x12
+#define MCIF_WB2_MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_EYE_FLAG__SHIFT 0x13
+#define MCIF_WB2_MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_CUR_LINE_R_MASK 0x00001FFFL
+#define MCIF_WB2_MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_NEW_CONTENT_MASK 0x00002000L
+#define MCIF_WB2_MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_COLOR_DEPTH_MASK 0x00004000L
+#define MCIF_WB2_MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_TMZ_BLACK_PIXEL_MASK 0x00008000L
+#define MCIF_WB2_MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_TMZ_MASK 0x00010000L
+#define MCIF_WB2_MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_Y_OVERRUN_MASK 0x00020000L
+#define MCIF_WB2_MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_C_OVERRUN_MASK 0x00040000L
+#define MCIF_WB2_MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_EYE_FLAG_MASK 0x00080000L
+//MCIF_WB2_MCIF_WB_BUF_4_STATUS
+#define MCIF_WB2_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_ACTIVE__SHIFT 0x0
+#define MCIF_WB2_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_SW_LOCKED__SHIFT 0x1
+#define MCIF_WB2_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_VCE_LOCKED__SHIFT 0x2
+#define MCIF_WB2_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_OVERFLOW__SHIFT 0x3
+#define MCIF_WB2_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_DISABLE__SHIFT 0x4
+#define MCIF_WB2_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_MODE__SHIFT 0x5
+#define MCIF_WB2_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_BUFTAG__SHIFT 0x8
+#define MCIF_WB2_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_NXT_BUF__SHIFT 0xc
+#define MCIF_WB2_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_FIELD__SHIFT 0xf
+#define MCIF_WB2_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_CUR_LINE_L__SHIFT 0x10
+#define MCIF_WB2_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_LONG_LINE_ERROR__SHIFT 0x1d
+#define MCIF_WB2_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_SHORT_LINE_ERROR__SHIFT 0x1e
+#define MCIF_WB2_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_FRAME_LENGTH_ERROR__SHIFT 0x1f
+#define MCIF_WB2_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_ACTIVE_MASK 0x00000001L
+#define MCIF_WB2_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_SW_LOCKED_MASK 0x00000002L
+#define MCIF_WB2_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_VCE_LOCKED_MASK 0x00000004L
+#define MCIF_WB2_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_OVERFLOW_MASK 0x00000008L
+#define MCIF_WB2_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_DISABLE_MASK 0x00000010L
+#define MCIF_WB2_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_MODE_MASK 0x000000E0L
+#define MCIF_WB2_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_BUFTAG_MASK 0x00000F00L
+#define MCIF_WB2_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_NXT_BUF_MASK 0x00007000L
+#define MCIF_WB2_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_FIELD_MASK 0x00008000L
+#define MCIF_WB2_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_CUR_LINE_L_MASK 0x1FFF0000L
+#define MCIF_WB2_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_LONG_LINE_ERROR_MASK 0x20000000L
+#define MCIF_WB2_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_SHORT_LINE_ERROR_MASK 0x40000000L
+#define MCIF_WB2_MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_FRAME_LENGTH_ERROR_MASK 0x80000000L
+//MCIF_WB2_MCIF_WB_BUF_4_STATUS2
+#define MCIF_WB2_MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_CUR_LINE_R__SHIFT 0x0
+#define MCIF_WB2_MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_NEW_CONTENT__SHIFT 0xd
+#define MCIF_WB2_MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_COLOR_DEPTH__SHIFT 0xe
+#define MCIF_WB2_MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_TMZ_BLACK_PIXEL__SHIFT 0xf
+#define MCIF_WB2_MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_TMZ__SHIFT 0x10
+#define MCIF_WB2_MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_Y_OVERRUN__SHIFT 0x11
+#define MCIF_WB2_MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_C_OVERRUN__SHIFT 0x12
+#define MCIF_WB2_MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_EYE_FLAG__SHIFT 0x13
+#define MCIF_WB2_MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_CUR_LINE_R_MASK 0x00001FFFL
+#define MCIF_WB2_MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_NEW_CONTENT_MASK 0x00002000L
+#define MCIF_WB2_MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_COLOR_DEPTH_MASK 0x00004000L
+#define MCIF_WB2_MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_TMZ_BLACK_PIXEL_MASK 0x00008000L
+#define MCIF_WB2_MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_TMZ_MASK 0x00010000L
+#define MCIF_WB2_MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_Y_OVERRUN_MASK 0x00020000L
+#define MCIF_WB2_MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_C_OVERRUN_MASK 0x00040000L
+#define MCIF_WB2_MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_EYE_FLAG_MASK 0x00080000L
+//MCIF_WB2_MCIF_WB_ARBITRATION_CONTROL
+#define MCIF_WB2_MCIF_WB_ARBITRATION_CONTROL__MCIF_WB_CLIENT_ARBITRATION_SLICE__SHIFT 0x0
+#define MCIF_WB2_MCIF_WB_ARBITRATION_CONTROL__MCIF_WB_TIME_PER_PIXEL__SHIFT 0x16
+#define MCIF_WB2_MCIF_WB_ARBITRATION_CONTROL__MCIF_WB_CLIENT_ARBITRATION_SLICE_MASK 0x00000003L
+#define MCIF_WB2_MCIF_WB_ARBITRATION_CONTROL__MCIF_WB_TIME_PER_PIXEL_MASK 0xFFC00000L
+//MCIF_WB2_MCIF_WB_SCLK_CHANGE
+#define MCIF_WB2_MCIF_WB_SCLK_CHANGE__WM_CHANGE_ACK_FORCE_ON__SHIFT 0x0
+#define MCIF_WB2_MCIF_WB_SCLK_CHANGE__MCIF_WB_CLI_WATERMARK_MASK__SHIFT 0x1
+#define MCIF_WB2_MCIF_WB_SCLK_CHANGE__WM_CHANGE_ACK_FORCE_ON_MASK 0x00000001L
+#define MCIF_WB2_MCIF_WB_SCLK_CHANGE__MCIF_WB_CLI_WATERMARK_MASK_MASK 0x0000000EL
+//MCIF_WB2_MCIF_WB_TEST_DEBUG_INDEX
+#define MCIF_WB2_MCIF_WB_TEST_DEBUG_INDEX__MCIF_WB_TEST_DEBUG_INDEX__SHIFT 0x0
+#define MCIF_WB2_MCIF_WB_TEST_DEBUG_INDEX__MCIF_WB_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define MCIF_WB2_MCIF_WB_TEST_DEBUG_INDEX__MCIF_WB_TEST_DEBUG_INDEX_MASK 0x000000FFL
+#define MCIF_WB2_MCIF_WB_TEST_DEBUG_INDEX__MCIF_WB_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+//MCIF_WB2_MCIF_WB_TEST_DEBUG_DATA
+#define MCIF_WB2_MCIF_WB_TEST_DEBUG_DATA__MCIF_WB_TEST_DEBUG_DATA__SHIFT 0x0
+#define MCIF_WB2_MCIF_WB_TEST_DEBUG_DATA__MCIF_WB_TEST_DEBUG_DATA_MASK 0xFFFFFFFFL
+//MCIF_WB2_MCIF_WB_BUF_1_ADDR_Y
+#define MCIF_WB2_MCIF_WB_BUF_1_ADDR_Y__MCIF_WB_BUF_1_ADDR_Y__SHIFT 0x0
+#define MCIF_WB2_MCIF_WB_BUF_1_ADDR_Y__MCIF_WB_BUF_1_ADDR_Y_MASK 0xFFFFFFFFL
+//MCIF_WB2_MCIF_WB_BUF_1_ADDR_Y_OFFSET
+#define MCIF_WB2_MCIF_WB_BUF_1_ADDR_Y_OFFSET__MCIF_WB_BUF_1_ADDR_Y_OFFSET__SHIFT 0x0
+#define MCIF_WB2_MCIF_WB_BUF_1_ADDR_Y_OFFSET__MCIF_WB_BUF_1_ADDR_Y_OFFSET_MASK 0x0003FFFFL
+//MCIF_WB2_MCIF_WB_BUF_1_ADDR_C
+#define MCIF_WB2_MCIF_WB_BUF_1_ADDR_C__MCIF_WB_BUF_1_ADDR_C__SHIFT 0x0
+#define MCIF_WB2_MCIF_WB_BUF_1_ADDR_C__MCIF_WB_BUF_1_ADDR_C_MASK 0xFFFFFFFFL
+//MCIF_WB2_MCIF_WB_BUF_1_ADDR_C_OFFSET
+#define MCIF_WB2_MCIF_WB_BUF_1_ADDR_C_OFFSET__MCIF_WB_BUF_1_ADDR_C_OFFSET__SHIFT 0x0
+#define MCIF_WB2_MCIF_WB_BUF_1_ADDR_C_OFFSET__MCIF_WB_BUF_1_ADDR_C_OFFSET_MASK 0x0003FFFFL
+//MCIF_WB2_MCIF_WB_BUF_2_ADDR_Y
+#define MCIF_WB2_MCIF_WB_BUF_2_ADDR_Y__MCIF_WB_BUF_2_ADDR_Y__SHIFT 0x0
+#define MCIF_WB2_MCIF_WB_BUF_2_ADDR_Y__MCIF_WB_BUF_2_ADDR_Y_MASK 0xFFFFFFFFL
+//MCIF_WB2_MCIF_WB_BUF_2_ADDR_Y_OFFSET
+#define MCIF_WB2_MCIF_WB_BUF_2_ADDR_Y_OFFSET__MCIF_WB_BUF_2_ADDR_Y_OFFSET__SHIFT 0x0
+#define MCIF_WB2_MCIF_WB_BUF_2_ADDR_Y_OFFSET__MCIF_WB_BUF_2_ADDR_Y_OFFSET_MASK 0x0003FFFFL
+//MCIF_WB2_MCIF_WB_BUF_2_ADDR_C
+#define MCIF_WB2_MCIF_WB_BUF_2_ADDR_C__MCIF_WB_BUF_2_ADDR_C__SHIFT 0x0
+#define MCIF_WB2_MCIF_WB_BUF_2_ADDR_C__MCIF_WB_BUF_2_ADDR_C_MASK 0xFFFFFFFFL
+//MCIF_WB2_MCIF_WB_BUF_2_ADDR_C_OFFSET
+#define MCIF_WB2_MCIF_WB_BUF_2_ADDR_C_OFFSET__MCIF_WB_BUF_2_ADDR_C_OFFSET__SHIFT 0x0
+#define MCIF_WB2_MCIF_WB_BUF_2_ADDR_C_OFFSET__MCIF_WB_BUF_2_ADDR_C_OFFSET_MASK 0x0003FFFFL
+//MCIF_WB2_MCIF_WB_BUF_3_ADDR_Y
+#define MCIF_WB2_MCIF_WB_BUF_3_ADDR_Y__MCIF_WB_BUF_3_ADDR_Y__SHIFT 0x0
+#define MCIF_WB2_MCIF_WB_BUF_3_ADDR_Y__MCIF_WB_BUF_3_ADDR_Y_MASK 0xFFFFFFFFL
+//MCIF_WB2_MCIF_WB_BUF_3_ADDR_Y_OFFSET
+#define MCIF_WB2_MCIF_WB_BUF_3_ADDR_Y_OFFSET__MCIF_WB_BUF_3_ADDR_Y_OFFSET__SHIFT 0x0
+#define MCIF_WB2_MCIF_WB_BUF_3_ADDR_Y_OFFSET__MCIF_WB_BUF_3_ADDR_Y_OFFSET_MASK 0x0003FFFFL
+//MCIF_WB2_MCIF_WB_BUF_3_ADDR_C
+#define MCIF_WB2_MCIF_WB_BUF_3_ADDR_C__MCIF_WB_BUF_3_ADDR_C__SHIFT 0x0
+#define MCIF_WB2_MCIF_WB_BUF_3_ADDR_C__MCIF_WB_BUF_3_ADDR_C_MASK 0xFFFFFFFFL
+//MCIF_WB2_MCIF_WB_BUF_3_ADDR_C_OFFSET
+#define MCIF_WB2_MCIF_WB_BUF_3_ADDR_C_OFFSET__MCIF_WB_BUF_3_ADDR_C_OFFSET__SHIFT 0x0
+#define MCIF_WB2_MCIF_WB_BUF_3_ADDR_C_OFFSET__MCIF_WB_BUF_3_ADDR_C_OFFSET_MASK 0x0003FFFFL
+//MCIF_WB2_MCIF_WB_BUF_4_ADDR_Y
+#define MCIF_WB2_MCIF_WB_BUF_4_ADDR_Y__MCIF_WB_BUF_4_ADDR_Y__SHIFT 0x0
+#define MCIF_WB2_MCIF_WB_BUF_4_ADDR_Y__MCIF_WB_BUF_4_ADDR_Y_MASK 0xFFFFFFFFL
+//MCIF_WB2_MCIF_WB_BUF_4_ADDR_Y_OFFSET
+#define MCIF_WB2_MCIF_WB_BUF_4_ADDR_Y_OFFSET__MCIF_WB_BUF_4_ADDR_Y_OFFSET__SHIFT 0x0
+#define MCIF_WB2_MCIF_WB_BUF_4_ADDR_Y_OFFSET__MCIF_WB_BUF_4_ADDR_Y_OFFSET_MASK 0x0003FFFFL
+//MCIF_WB2_MCIF_WB_BUF_4_ADDR_C
+#define MCIF_WB2_MCIF_WB_BUF_4_ADDR_C__MCIF_WB_BUF_4_ADDR_C__SHIFT 0x0
+#define MCIF_WB2_MCIF_WB_BUF_4_ADDR_C__MCIF_WB_BUF_4_ADDR_C_MASK 0xFFFFFFFFL
+//MCIF_WB2_MCIF_WB_BUF_4_ADDR_C_OFFSET
+#define MCIF_WB2_MCIF_WB_BUF_4_ADDR_C_OFFSET__MCIF_WB_BUF_4_ADDR_C_OFFSET__SHIFT 0x0
+#define MCIF_WB2_MCIF_WB_BUF_4_ADDR_C_OFFSET__MCIF_WB_BUF_4_ADDR_C_OFFSET_MASK 0x0003FFFFL
+//MCIF_WB2_MCIF_WB_BUFMGR_VCE_CONTROL
+#define MCIF_WB2_MCIF_WB_BUFMGR_VCE_CONTROL__MCIF_WB_BUFMGR_VCE_LOCK_IGNORE__SHIFT 0x0
+#define MCIF_WB2_MCIF_WB_BUFMGR_VCE_CONTROL__MCIF_WB_BUFMGR_VCE_INT_EN__SHIFT 0x4
+#define MCIF_WB2_MCIF_WB_BUFMGR_VCE_CONTROL__MCIF_WB_BUFMGR_VCE_INT_ACK__SHIFT 0x5
+#define MCIF_WB2_MCIF_WB_BUFMGR_VCE_CONTROL__MCIF_WB_BUFMGR_VCE_SLICE_INT_EN__SHIFT 0x6
+#define MCIF_WB2_MCIF_WB_BUFMGR_VCE_CONTROL__MCIF_WB_BUFMGR_VCE_LOCK__SHIFT 0x8
+#define MCIF_WB2_MCIF_WB_BUFMGR_VCE_CONTROL__MCIF_WB_BUFMGR_SLICE_SIZE__SHIFT 0x10
+#define MCIF_WB2_MCIF_WB_BUFMGR_VCE_CONTROL__MCIF_WB_BUFMGR_VCE_LOCK_IGNORE_MASK 0x00000001L
+#define MCIF_WB2_MCIF_WB_BUFMGR_VCE_CONTROL__MCIF_WB_BUFMGR_VCE_INT_EN_MASK 0x00000010L
+#define MCIF_WB2_MCIF_WB_BUFMGR_VCE_CONTROL__MCIF_WB_BUFMGR_VCE_INT_ACK_MASK 0x00000020L
+#define MCIF_WB2_MCIF_WB_BUFMGR_VCE_CONTROL__MCIF_WB_BUFMGR_VCE_SLICE_INT_EN_MASK 0x00000040L
+#define MCIF_WB2_MCIF_WB_BUFMGR_VCE_CONTROL__MCIF_WB_BUFMGR_VCE_LOCK_MASK 0x00000F00L
+#define MCIF_WB2_MCIF_WB_BUFMGR_VCE_CONTROL__MCIF_WB_BUFMGR_SLICE_SIZE_MASK 0x1FFF0000L
+//MCIF_WB2_MCIF_WB_NB_PSTATE_LATENCY_WATERMARK
+#define MCIF_WB2_MCIF_WB_NB_PSTATE_LATENCY_WATERMARK__NB_PSTATE_CHANGE_REFRESH_WATERMARK__SHIFT 0x0
+#define MCIF_WB2_MCIF_WB_NB_PSTATE_LATENCY_WATERMARK__NB_PSTATE_CHANGE_REFRESH_WATERMARK_MASK 0x0007FFFFL
+//MCIF_WB2_MCIF_WB_NB_PSTATE_CONTROL
+#define MCIF_WB2_MCIF_WB_NB_PSTATE_CONTROL__NB_PSTATE_CHANGE_URGENT_DURING_REQUEST__SHIFT 0x0
+#define MCIF_WB2_MCIF_WB_NB_PSTATE_CONTROL__NB_PSTATE_CHANGE_FORCE_ON__SHIFT 0x1
+#define MCIF_WB2_MCIF_WB_NB_PSTATE_CONTROL__NB_PSTATE_ALLOW_FOR_URGENT__SHIFT 0x2
+#define MCIF_WB2_MCIF_WB_NB_PSTATE_CONTROL__NB_PSTATE_CHANGE_WATERMARK_MASK__SHIFT 0x4
+#define MCIF_WB2_MCIF_WB_NB_PSTATE_CONTROL__NB_PSTATE_CHANGE_URGENT_DURING_REQUEST_MASK 0x00000001L
+#define MCIF_WB2_MCIF_WB_NB_PSTATE_CONTROL__NB_PSTATE_CHANGE_FORCE_ON_MASK 0x00000002L
+#define MCIF_WB2_MCIF_WB_NB_PSTATE_CONTROL__NB_PSTATE_ALLOW_FOR_URGENT_MASK 0x00000004L
+#define MCIF_WB2_MCIF_WB_NB_PSTATE_CONTROL__NB_PSTATE_CHANGE_WATERMARK_MASK_MASK 0x00000070L
+//MCIF_WB2_MCIF_WB_WATERMARK
+#define MCIF_WB2_MCIF_WB_WATERMARK__MCIF_WB_CLI_WATERMARK__SHIFT 0x0
+#define MCIF_WB2_MCIF_WB_WATERMARK__MCIF_WB_CLI_WATERMARK_MASK 0x0000FFFFL
+//MCIF_WB2_MCIF_WB_CLOCK_GATER_CONTROL
+#define MCIF_WB2_MCIF_WB_CLOCK_GATER_CONTROL__MCIF_WB_CLI_CLOCK_GATER_OVERRIDE__SHIFT 0x0
+#define MCIF_WB2_MCIF_WB_CLOCK_GATER_CONTROL__MCIF_WB_CLI_CLOCK_GATER_OVERRIDE_MASK 0x00000001L
+//MCIF_WB2_MCIF_WB_WARM_UP_CNTL
+#define MCIF_WB2_MCIF_WB_WARM_UP_CNTL__MCIF_WB_PITCH_SIZE_WARMUP__SHIFT 0x8
+#define MCIF_WB2_MCIF_WB_WARM_UP_CNTL__MCIF_WB_PITCH_SIZE_WARMUP_MASK 0x0000FF00L
+//MCIF_WB2_MCIF_WB_SELF_REFRESH_CONTROL
+#define MCIF_WB2_MCIF_WB_SELF_REFRESH_CONTROL__DIS_REFRESH_UNDER_NBPREQ__SHIFT 0x0
+#define MCIF_WB2_MCIF_WB_SELF_REFRESH_CONTROL__PERFRAME_SELF_REFRESH__SHIFT 0x1
+#define MCIF_WB2_MCIF_WB_SELF_REFRESH_CONTROL__DIS_REFRESH_UNDER_NBPREQ_MASK 0x00000001L
+#define MCIF_WB2_MCIF_WB_SELF_REFRESH_CONTROL__PERFRAME_SELF_REFRESH_MASK 0x00000002L
+//MCIF_WB2_MULTI_LEVEL_QOS_CTRL
+#define MCIF_WB2_MULTI_LEVEL_QOS_CTRL__MAX_SCALED_TIME_TO_URGENT__SHIFT 0x0
+#define MCIF_WB2_MULTI_LEVEL_QOS_CTRL__MAX_SCALED_TIME_TO_URGENT_MASK 0x003FFFFFL
+//MCIF_WB2_MCIF_WB_BUF_LUMA_SIZE
+#define MCIF_WB2_MCIF_WB_BUF_LUMA_SIZE__MCIF_WB_BUF_LUMA_SIZE__SHIFT 0x0
+#define MCIF_WB2_MCIF_WB_BUF_LUMA_SIZE__MCIF_WB_BUF_LUMA_SIZE_MASK 0x000FFFFFL
+//MCIF_WB2_MCIF_WB_BUF_CHROMA_SIZE
+#define MCIF_WB2_MCIF_WB_BUF_CHROMA_SIZE__MCIF_WB_BUF_CHROMA_SIZE__SHIFT 0x0
+#define MCIF_WB2_MCIF_WB_BUF_CHROMA_SIZE__MCIF_WB_BUF_CHROMA_SIZE_MASK 0x000FFFFFL
+//MCIF_WB2_MCIF_WB_BUF_1_ADDR_Y_HIGH
+#define MCIF_WB2_MCIF_WB_BUF_1_ADDR_Y_HIGH__MCIF_WB_BUF_1_ADDR_Y_HIGH__SHIFT 0x0
+#define MCIF_WB2_MCIF_WB_BUF_1_ADDR_Y_HIGH__MCIF_WB_BUF_1_ADDR_Y_HIGH_MASK 0x000000FFL
+//MCIF_WB2_MCIF_WB_BUF_1_ADDR_C_HIGH
+#define MCIF_WB2_MCIF_WB_BUF_1_ADDR_C_HIGH__MCIF_WB_BUF_1_ADDR_C_HIGH__SHIFT 0x0
+#define MCIF_WB2_MCIF_WB_BUF_1_ADDR_C_HIGH__MCIF_WB_BUF_1_ADDR_C_HIGH_MASK 0x000000FFL
+//MCIF_WB2_MCIF_WB_BUF_2_ADDR_Y_HIGH
+#define MCIF_WB2_MCIF_WB_BUF_2_ADDR_Y_HIGH__MCIF_WB_BUF_2_ADDR_Y_HIGH__SHIFT 0x0
+#define MCIF_WB2_MCIF_WB_BUF_2_ADDR_Y_HIGH__MCIF_WB_BUF_2_ADDR_Y_HIGH_MASK 0x000000FFL
+//MCIF_WB2_MCIF_WB_BUF_2_ADDR_C_HIGH
+#define MCIF_WB2_MCIF_WB_BUF_2_ADDR_C_HIGH__MCIF_WB_BUF_2_ADDR_C_HIGH__SHIFT 0x0
+#define MCIF_WB2_MCIF_WB_BUF_2_ADDR_C_HIGH__MCIF_WB_BUF_2_ADDR_C_HIGH_MASK 0x000000FFL
+//MCIF_WB2_MCIF_WB_BUF_3_ADDR_Y_HIGH
+#define MCIF_WB2_MCIF_WB_BUF_3_ADDR_Y_HIGH__MCIF_WB_BUF_3_ADDR_Y_HIGH__SHIFT 0x0
+#define MCIF_WB2_MCIF_WB_BUF_3_ADDR_Y_HIGH__MCIF_WB_BUF_3_ADDR_Y_HIGH_MASK 0x000000FFL
+//MCIF_WB2_MCIF_WB_BUF_3_ADDR_C_HIGH
+#define MCIF_WB2_MCIF_WB_BUF_3_ADDR_C_HIGH__MCIF_WB_BUF_3_ADDR_C_HIGH__SHIFT 0x0
+#define MCIF_WB2_MCIF_WB_BUF_3_ADDR_C_HIGH__MCIF_WB_BUF_3_ADDR_C_HIGH_MASK 0x000000FFL
+//MCIF_WB2_MCIF_WB_BUF_4_ADDR_Y_HIGH
+#define MCIF_WB2_MCIF_WB_BUF_4_ADDR_Y_HIGH__MCIF_WB_BUF_4_ADDR_Y_HIGH__SHIFT 0x0
+#define MCIF_WB2_MCIF_WB_BUF_4_ADDR_Y_HIGH__MCIF_WB_BUF_4_ADDR_Y_HIGH_MASK 0x000000FFL
+//MCIF_WB2_MCIF_WB_BUF_4_ADDR_C_HIGH
+#define MCIF_WB2_MCIF_WB_BUF_4_ADDR_C_HIGH__MCIF_WB_BUF_4_ADDR_C_HIGH__SHIFT 0x0
+#define MCIF_WB2_MCIF_WB_BUF_4_ADDR_C_HIGH__MCIF_WB_BUF_4_ADDR_C_HIGH_MASK 0x000000FFL
+//MCIF_WB2_MCIF_WB_BUF_1_RESOLUTION
+#define MCIF_WB2_MCIF_WB_BUF_1_RESOLUTION__MCIF_WB_BUF_1_RESOLUTION_WIDTH__SHIFT 0x0
+#define MCIF_WB2_MCIF_WB_BUF_1_RESOLUTION__MCIF_WB_BUF_1_RESOLUTION_HEIGHT__SHIFT 0x10
+#define MCIF_WB2_MCIF_WB_BUF_1_RESOLUTION__MCIF_WB_BUF_1_RESOLUTION_WIDTH_MASK 0x00001FFFL
+#define MCIF_WB2_MCIF_WB_BUF_1_RESOLUTION__MCIF_WB_BUF_1_RESOLUTION_HEIGHT_MASK 0x1FFF0000L
+//MCIF_WB2_MCIF_WB_BUF_2_RESOLUTION
+#define MCIF_WB2_MCIF_WB_BUF_2_RESOLUTION__MCIF_WB_BUF_2_RESOLUTION_WIDTH__SHIFT 0x0
+#define MCIF_WB2_MCIF_WB_BUF_2_RESOLUTION__MCIF_WB_BUF_2_RESOLUTION_HEIGHT__SHIFT 0x10
+#define MCIF_WB2_MCIF_WB_BUF_2_RESOLUTION__MCIF_WB_BUF_2_RESOLUTION_WIDTH_MASK 0x00001FFFL
+#define MCIF_WB2_MCIF_WB_BUF_2_RESOLUTION__MCIF_WB_BUF_2_RESOLUTION_HEIGHT_MASK 0x1FFF0000L
+//MCIF_WB2_MCIF_WB_BUF_3_RESOLUTION
+#define MCIF_WB2_MCIF_WB_BUF_3_RESOLUTION__MCIF_WB_BUF_3_RESOLUTION_WIDTH__SHIFT 0x0
+#define MCIF_WB2_MCIF_WB_BUF_3_RESOLUTION__MCIF_WB_BUF_3_RESOLUTION_HEIGHT__SHIFT 0x10
+#define MCIF_WB2_MCIF_WB_BUF_3_RESOLUTION__MCIF_WB_BUF_3_RESOLUTION_WIDTH_MASK 0x00001FFFL
+#define MCIF_WB2_MCIF_WB_BUF_3_RESOLUTION__MCIF_WB_BUF_3_RESOLUTION_HEIGHT_MASK 0x1FFF0000L
+//MCIF_WB2_MCIF_WB_BUF_4_RESOLUTION
+#define MCIF_WB2_MCIF_WB_BUF_4_RESOLUTION__MCIF_WB_BUF_4_RESOLUTION_WIDTH__SHIFT 0x0
+#define MCIF_WB2_MCIF_WB_BUF_4_RESOLUTION__MCIF_WB_BUF_4_RESOLUTION_HEIGHT__SHIFT 0x10
+#define MCIF_WB2_MCIF_WB_BUF_4_RESOLUTION__MCIF_WB_BUF_4_RESOLUTION_WIDTH_MASK 0x00001FFFL
+#define MCIF_WB2_MCIF_WB_BUF_4_RESOLUTION__MCIF_WB_BUF_4_RESOLUTION_HEIGHT_MASK 0x1FFF0000L
+
+
+// addressBlock: dce_dc_dchvm_hvm_dispdec
+//DCHVM_CTRL0
+#define DCHVM_CTRL0__HOSTVM_INIT_REQ__SHIFT 0x0
+#define DCHVM_CTRL0__HOSTVM_INIT_REQ_MASK 0x00000001L
+//DCHVM_CTRL1
+#define DCHVM_CTRL1__DUMMY1__SHIFT 0x0
+#define DCHVM_CTRL1__DUMMY1_MASK 0xFFFFFFFFL
+//DCHVM_CLK_CTRL
+#define DCHVM_CLK_CTRL__HVM_DISPCLK_R_GATE_DIS__SHIFT 0x0
+#define DCHVM_CLK_CTRL__HVM_DISPCLK_G_GATE_DIS__SHIFT 0x1
+#define DCHVM_CLK_CTRL__HVM_DCFCLK_R_GATE_DIS__SHIFT 0x4
+#define DCHVM_CLK_CTRL__HVM_DCFCLK_G_GATE_DIS__SHIFT 0x5
+#define DCHVM_CLK_CTRL__TR_REQ_REQCLKREQ_MODE__SHIFT 0x8
+#define DCHVM_CLK_CTRL__TW_RSP_COMPCLKREQ_MODE__SHIFT 0xa
+#define DCHVM_CLK_CTRL__HVM_DISPCLK_R_GATE_DIS_MASK 0x00000001L
+#define DCHVM_CLK_CTRL__HVM_DISPCLK_G_GATE_DIS_MASK 0x00000002L
+#define DCHVM_CLK_CTRL__HVM_DCFCLK_R_GATE_DIS_MASK 0x00000010L
+#define DCHVM_CLK_CTRL__HVM_DCFCLK_G_GATE_DIS_MASK 0x00000020L
+#define DCHVM_CLK_CTRL__TR_REQ_REQCLKREQ_MODE_MASK 0x00000300L
+#define DCHVM_CLK_CTRL__TW_RSP_COMPCLKREQ_MODE_MASK 0x00000C00L
+//DCHVM_MEM_CTRL
+#define DCHVM_MEM_CTRL__HVM_GPUVMRET_PWR_REQ_DIS__SHIFT 0x0
+#define DCHVM_MEM_CTRL__HVM_GPUVMRET_FORCE_REQ__SHIFT 0x2
+#define DCHVM_MEM_CTRL__HVM_GPUVMRET_POWER_STATUS__SHIFT 0x4
+#define DCHVM_MEM_CTRL__HVM_GPUVMRET_PWR_REQ_DIS_MASK 0x00000001L
+#define DCHVM_MEM_CTRL__HVM_GPUVMRET_FORCE_REQ_MASK 0x0000000CL
+#define DCHVM_MEM_CTRL__HVM_GPUVMRET_POWER_STATUS_MASK 0x00000030L
+//DCHVM_RIOMMU_CTRL0
+#define DCHVM_RIOMMU_CTRL0__HOSTVM_PREFETCH_REQ__SHIFT 0x0
+#define DCHVM_RIOMMU_CTRL0__HOSTVM_POWERSTATUS__SHIFT 0x1
+#define DCHVM_RIOMMU_CTRL0__HOSTVM_PREFETCH_REQ_MASK 0x00000001L
+#define DCHVM_RIOMMU_CTRL0__HOSTVM_POWERSTATUS_MASK 0x00000002L
+//DCHVM_RIOMMU_STAT0
+#define DCHVM_RIOMMU_STAT0__RIOMMU_ACTIVE__SHIFT 0x0
+#define DCHVM_RIOMMU_STAT0__HOSTVM_PREFETCH_DONE__SHIFT 0x1
+#define DCHVM_RIOMMU_STAT0__RIOMMU_ACTIVE_MASK 0x00000001L
+#define DCHVM_RIOMMU_STAT0__HOSTVM_PREFETCH_DONE_MASK 0x00000002L
+
+
+// addressBlock: vga_vgaseqind
+//SEQ00
+#define SEQ00__SEQ_RST0B__SHIFT 0x0
+#define SEQ00__SEQ_RST1B__SHIFT 0x1
+#define SEQ00__SEQ_RST0B_MASK 0x01L
+#define SEQ00__SEQ_RST1B_MASK 0x02L
+//SEQ01
+#define SEQ01__SEQ_DOT8__SHIFT 0x0
+#define SEQ01__SEQ_SHIFT2__SHIFT 0x2
+#define SEQ01__SEQ_PCLKBY2__SHIFT 0x3
+#define SEQ01__SEQ_SHIFT4__SHIFT 0x4
+#define SEQ01__SEQ_MAXBW__SHIFT 0x5
+#define SEQ01__SEQ_DOT8_MASK 0x01L
+#define SEQ01__SEQ_SHIFT2_MASK 0x04L
+#define SEQ01__SEQ_PCLKBY2_MASK 0x08L
+#define SEQ01__SEQ_SHIFT4_MASK 0x10L
+#define SEQ01__SEQ_MAXBW_MASK 0x20L
+//SEQ02
+#define SEQ02__SEQ_MAP0_EN__SHIFT 0x0
+#define SEQ02__SEQ_MAP1_EN__SHIFT 0x1
+#define SEQ02__SEQ_MAP2_EN__SHIFT 0x2
+#define SEQ02__SEQ_MAP3_EN__SHIFT 0x3
+#define SEQ02__SEQ_MAP0_EN_MASK 0x01L
+#define SEQ02__SEQ_MAP1_EN_MASK 0x02L
+#define SEQ02__SEQ_MAP2_EN_MASK 0x04L
+#define SEQ02__SEQ_MAP3_EN_MASK 0x08L
+//SEQ03
+#define SEQ03__SEQ_FONT_B1__SHIFT 0x0
+#define SEQ03__SEQ_FONT_B2__SHIFT 0x1
+#define SEQ03__SEQ_FONT_A1__SHIFT 0x2
+#define SEQ03__SEQ_FONT_A2__SHIFT 0x3
+#define SEQ03__SEQ_FONT_B0__SHIFT 0x4
+#define SEQ03__SEQ_FONT_A0__SHIFT 0x5
+#define SEQ03__SEQ_FONT_B1_MASK 0x01L
+#define SEQ03__SEQ_FONT_B2_MASK 0x02L
+#define SEQ03__SEQ_FONT_A1_MASK 0x04L
+#define SEQ03__SEQ_FONT_A2_MASK 0x08L
+#define SEQ03__SEQ_FONT_B0_MASK 0x10L
+#define SEQ03__SEQ_FONT_A0_MASK 0x20L
+//SEQ04
+#define SEQ04__SEQ_256K__SHIFT 0x1
+#define SEQ04__SEQ_ODDEVEN__SHIFT 0x2
+#define SEQ04__SEQ_CHAIN__SHIFT 0x3
+#define SEQ04__SEQ_256K_MASK 0x02L
+#define SEQ04__SEQ_ODDEVEN_MASK 0x04L
+#define SEQ04__SEQ_CHAIN_MASK 0x08L
+
+
+// addressBlock: vga_vgacrtind
+//CRT00
+#define CRT00__H_TOTAL__SHIFT 0x0
+#define CRT00__H_TOTAL_MASK 0xFFL
+//CRT01
+#define CRT01__H_DISP_END__SHIFT 0x0
+#define CRT01__H_DISP_END_MASK 0xFFL
+//CRT02
+#define CRT02__H_BLANK_START__SHIFT 0x0
+#define CRT02__H_BLANK_START_MASK 0xFFL
+//CRT03
+#define CRT03__H_BLANK_END__SHIFT 0x0
+#define CRT03__H_DE_SKEW__SHIFT 0x5
+#define CRT03__CR10CR11_R_DIS_B__SHIFT 0x7
+#define CRT03__H_BLANK_END_MASK 0x1FL
+#define CRT03__H_DE_SKEW_MASK 0x60L
+#define CRT03__CR10CR11_R_DIS_B_MASK 0x80L
+//CRT04
+#define CRT04__H_SYNC_START__SHIFT 0x0
+#define CRT04__H_SYNC_START_MASK 0xFFL
+//CRT05
+#define CRT05__H_SYNC_END__SHIFT 0x0
+#define CRT05__H_SYNC_SKEW__SHIFT 0x5
+#define CRT05__H_BLANK_END_B5__SHIFT 0x7
+#define CRT05__H_SYNC_END_MASK 0x1FL
+#define CRT05__H_SYNC_SKEW_MASK 0x60L
+#define CRT05__H_BLANK_END_B5_MASK 0x80L
+//CRT06
+#define CRT06__V_TOTAL__SHIFT 0x0
+#define CRT06__V_TOTAL_MASK 0xFFL
+//CRT07
+#define CRT07__V_TOTAL_B8__SHIFT 0x0
+#define CRT07__V_DISP_END_B8__SHIFT 0x1
+#define CRT07__V_SYNC_START_B8__SHIFT 0x2
+#define CRT07__V_BLANK_START_B8__SHIFT 0x3
+#define CRT07__LINE_CMP_B8__SHIFT 0x4
+#define CRT07__V_TOTAL_B9__SHIFT 0x5
+#define CRT07__V_DISP_END_B9__SHIFT 0x6
+#define CRT07__V_SYNC_START_B9__SHIFT 0x7
+#define CRT07__V_TOTAL_B8_MASK 0x01L
+#define CRT07__V_DISP_END_B8_MASK 0x02L
+#define CRT07__V_SYNC_START_B8_MASK 0x04L
+#define CRT07__V_BLANK_START_B8_MASK 0x08L
+#define CRT07__LINE_CMP_B8_MASK 0x10L
+#define CRT07__V_TOTAL_B9_MASK 0x20L
+#define CRT07__V_DISP_END_B9_MASK 0x40L
+#define CRT07__V_SYNC_START_B9_MASK 0x80L
+//CRT08
+#define CRT08__ROW_SCAN_START__SHIFT 0x0
+#define CRT08__BYTE_PAN__SHIFT 0x5
+#define CRT08__ROW_SCAN_START_MASK 0x1FL
+#define CRT08__BYTE_PAN_MASK 0x60L
+//CRT09
+#define CRT09__MAX_ROW_SCAN__SHIFT 0x0
+#define CRT09__V_BLANK_START_B9__SHIFT 0x5
+#define CRT09__LINE_CMP_B9__SHIFT 0x6
+#define CRT09__DOUBLE_CHAR_HEIGHT__SHIFT 0x7
+#define CRT09__MAX_ROW_SCAN_MASK 0x1FL
+#define CRT09__V_BLANK_START_B9_MASK 0x20L
+#define CRT09__LINE_CMP_B9_MASK 0x40L
+#define CRT09__DOUBLE_CHAR_HEIGHT_MASK 0x80L
+//CRT0A
+#define CRT0A__CURSOR_START__SHIFT 0x0
+#define CRT0A__CURSOR_DISABLE__SHIFT 0x5
+#define CRT0A__CURSOR_START_MASK 0x1FL
+#define CRT0A__CURSOR_DISABLE_MASK 0x20L
+//CRT0B
+#define CRT0B__CURSOR_END__SHIFT 0x0
+#define CRT0B__CURSOR_SKEW__SHIFT 0x5
+#define CRT0B__CURSOR_END_MASK 0x1FL
+#define CRT0B__CURSOR_SKEW_MASK 0x60L
+//CRT0C
+#define CRT0C__DISP_START__SHIFT 0x0
+#define CRT0C__DISP_START_MASK 0xFFL
+//CRT0D
+#define CRT0D__DISP_START__SHIFT 0x0
+#define CRT0D__DISP_START_MASK 0xFFL
+//CRT0E
+#define CRT0E__CURSOR_LOC_HI__SHIFT 0x0
+#define CRT0E__CURSOR_LOC_HI_MASK 0xFFL
+//CRT0F
+#define CRT0F__CURSOR_LOC_LO__SHIFT 0x0
+#define CRT0F__CURSOR_LOC_LO_MASK 0xFFL
+//CRT10
+#define CRT10__V_SYNC_START__SHIFT 0x0
+#define CRT10__V_SYNC_START_MASK 0xFFL
+//CRT11
+#define CRT11__V_SYNC_END__SHIFT 0x0
+#define CRT11__V_INTR_CLR__SHIFT 0x4
+#define CRT11__V_INTR_EN__SHIFT 0x5
+#define CRT11__SEL5_REFRESH_CYC__SHIFT 0x6
+#define CRT11__C0T7_WR_ONLY__SHIFT 0x7
+#define CRT11__V_SYNC_END_MASK 0x0FL
+#define CRT11__V_INTR_CLR_MASK 0x10L
+#define CRT11__V_INTR_EN_MASK 0x20L
+#define CRT11__SEL5_REFRESH_CYC_MASK 0x40L
+#define CRT11__C0T7_WR_ONLY_MASK 0x80L
+//CRT12
+#define CRT12__V_DISP_END__SHIFT 0x0
+#define CRT12__V_DISP_END_MASK 0xFFL
+//CRT13
+#define CRT13__DISP_PITCH__SHIFT 0x0
+#define CRT13__DISP_PITCH_MASK 0xFFL
+//CRT14
+#define CRT14__UNDRLN_LOC__SHIFT 0x0
+#define CRT14__ADDR_CNT_BY4__SHIFT 0x5
+#define CRT14__DOUBLE_WORD__SHIFT 0x6
+#define CRT14__UNDRLN_LOC_MASK 0x1FL
+#define CRT14__ADDR_CNT_BY4_MASK 0x20L
+#define CRT14__DOUBLE_WORD_MASK 0x40L
+//CRT15
+#define CRT15__V_BLANK_START__SHIFT 0x0
+#define CRT15__V_BLANK_START_MASK 0xFFL
+//CRT16
+#define CRT16__V_BLANK_END__SHIFT 0x0
+#define CRT16__V_BLANK_END_MASK 0xFFL
+//CRT17
+#define CRT17__RA0_AS_A13B__SHIFT 0x0
+#define CRT17__RA1_AS_A14B__SHIFT 0x1
+#define CRT17__VCOUNT_BY2__SHIFT 0x2
+#define CRT17__ADDR_CNT_BY2__SHIFT 0x3
+#define CRT17__WRAP_A15TOA0__SHIFT 0x5
+#define CRT17__BYTE_MODE__SHIFT 0x6
+#define CRT17__CRTC_SYNC_EN__SHIFT 0x7
+#define CRT17__RA0_AS_A13B_MASK 0x01L
+#define CRT17__RA1_AS_A14B_MASK 0x02L
+#define CRT17__VCOUNT_BY2_MASK 0x04L
+#define CRT17__ADDR_CNT_BY2_MASK 0x08L
+#define CRT17__WRAP_A15TOA0_MASK 0x20L
+#define CRT17__BYTE_MODE_MASK 0x40L
+#define CRT17__CRTC_SYNC_EN_MASK 0x80L
+//CRT18
+#define CRT18__LINE_CMP__SHIFT 0x0
+#define CRT18__LINE_CMP_MASK 0xFFL
+//CRT1E
+#define CRT1E__GRPH_DEC_RD1__SHIFT 0x1
+#define CRT1E__GRPH_DEC_RD1_MASK 0x02L
+//CRT1F
+#define CRT1F__GRPH_DEC_RD0__SHIFT 0x0
+#define CRT1F__GRPH_DEC_RD0_MASK 0xFFL
+//CRT22
+#define CRT22__GRPH_LATCH_DATA__SHIFT 0x0
+#define CRT22__GRPH_LATCH_DATA_MASK 0xFFL
+
+
+// addressBlock: vga_vgagrphind
+//GRA00
+#define GRA00__GRPH_SET_RESET0__SHIFT 0x0
+#define GRA00__GRPH_SET_RESET1__SHIFT 0x1
+#define GRA00__GRPH_SET_RESET2__SHIFT 0x2
+#define GRA00__GRPH_SET_RESET3__SHIFT 0x3
+#define GRA00__GRPH_SET_RESET0_MASK 0x01L
+#define GRA00__GRPH_SET_RESET1_MASK 0x02L
+#define GRA00__GRPH_SET_RESET2_MASK 0x04L
+#define GRA00__GRPH_SET_RESET3_MASK 0x08L
+//GRA01
+#define GRA01__GRPH_SET_RESET_ENA0__SHIFT 0x0
+#define GRA01__GRPH_SET_RESET_ENA1__SHIFT 0x1
+#define GRA01__GRPH_SET_RESET_ENA2__SHIFT 0x2
+#define GRA01__GRPH_SET_RESET_ENA3__SHIFT 0x3
+#define GRA01__GRPH_SET_RESET_ENA0_MASK 0x01L
+#define GRA01__GRPH_SET_RESET_ENA1_MASK 0x02L
+#define GRA01__GRPH_SET_RESET_ENA2_MASK 0x04L
+#define GRA01__GRPH_SET_RESET_ENA3_MASK 0x08L
+//GRA02
+#define GRA02__GRPH_CCOMP__SHIFT 0x0
+#define GRA02__GRPH_CCOMP_MASK 0x0FL
+//GRA03
+#define GRA03__GRPH_ROTATE__SHIFT 0x0
+#define GRA03__GRPH_FN_SEL__SHIFT 0x3
+#define GRA03__GRPH_ROTATE_MASK 0x07L
+#define GRA03__GRPH_FN_SEL_MASK 0x18L
+//GRA04
+#define GRA04__GRPH_RMAP__SHIFT 0x0
+#define GRA04__GRPH_RMAP_MASK 0x03L
+//GRA05
+#define GRA05__GRPH_WRITE_MODE__SHIFT 0x0
+#define GRA05__GRPH_READ1__SHIFT 0x3
+#define GRA05__CGA_ODDEVEN__SHIFT 0x4
+#define GRA05__GRPH_OES__SHIFT 0x5
+#define GRA05__GRPH_PACK__SHIFT 0x6
+#define GRA05__GRPH_WRITE_MODE_MASK 0x03L
+#define GRA05__GRPH_READ1_MASK 0x08L
+#define GRA05__CGA_ODDEVEN_MASK 0x10L
+#define GRA05__GRPH_OES_MASK 0x20L
+#define GRA05__GRPH_PACK_MASK 0x40L
+//GRA06
+#define GRA06__GRPH_GRAPHICS__SHIFT 0x0
+#define GRA06__GRPH_ODDEVEN__SHIFT 0x1
+#define GRA06__GRPH_ADRSEL__SHIFT 0x2
+#define GRA06__GRPH_GRAPHICS_MASK 0x01L
+#define GRA06__GRPH_ODDEVEN_MASK 0x02L
+#define GRA06__GRPH_ADRSEL_MASK 0x0CL
+//GRA07
+#define GRA07__GRPH_XCARE0__SHIFT 0x0
+#define GRA07__GRPH_XCARE1__SHIFT 0x1
+#define GRA07__GRPH_XCARE2__SHIFT 0x2
+#define GRA07__GRPH_XCARE3__SHIFT 0x3
+#define GRA07__GRPH_XCARE0_MASK 0x01L
+#define GRA07__GRPH_XCARE1_MASK 0x02L
+#define GRA07__GRPH_XCARE2_MASK 0x04L
+#define GRA07__GRPH_XCARE3_MASK 0x08L
+//GRA08
+#define GRA08__GRPH_BMSK__SHIFT 0x0
+#define GRA08__GRPH_BMSK_MASK 0xFFL
+
+
+// addressBlock: vga_vgaattrind
+//ATTR00
+#define ATTR00__ATTR_PAL__SHIFT 0x0
+#define ATTR00__ATTR_PAL_MASK 0x3FL
+//ATTR01
+#define ATTR01__ATTR_PAL__SHIFT 0x0
+#define ATTR01__ATTR_PAL_MASK 0x3FL
+//ATTR02
+#define ATTR02__ATTR_PAL__SHIFT 0x0
+#define ATTR02__ATTR_PAL_MASK 0x3FL
+//ATTR03
+#define ATTR03__ATTR_PAL__SHIFT 0x0
+#define ATTR03__ATTR_PAL_MASK 0x3FL
+//ATTR04
+#define ATTR04__ATTR_PAL__SHIFT 0x0
+#define ATTR04__ATTR_PAL_MASK 0x3FL
+//ATTR05
+#define ATTR05__ATTR_PAL__SHIFT 0x0
+#define ATTR05__ATTR_PAL_MASK 0x3FL
+//ATTR06
+#define ATTR06__ATTR_PAL__SHIFT 0x0
+#define ATTR06__ATTR_PAL_MASK 0x3FL
+//ATTR07
+#define ATTR07__ATTR_PAL__SHIFT 0x0
+#define ATTR07__ATTR_PAL_MASK 0x3FL
+//ATTR08
+#define ATTR08__ATTR_PAL__SHIFT 0x0
+#define ATTR08__ATTR_PAL_MASK 0x3FL
+//ATTR09
+#define ATTR09__ATTR_PAL__SHIFT 0x0
+#define ATTR09__ATTR_PAL_MASK 0x3FL
+//ATTR0A
+#define ATTR0A__ATTR_PAL__SHIFT 0x0
+#define ATTR0A__ATTR_PAL_MASK 0x3FL
+//ATTR0B
+#define ATTR0B__ATTR_PAL__SHIFT 0x0
+#define ATTR0B__ATTR_PAL_MASK 0x3FL
+//ATTR0C
+#define ATTR0C__ATTR_PAL__SHIFT 0x0
+#define ATTR0C__ATTR_PAL_MASK 0x3FL
+//ATTR0D
+#define ATTR0D__ATTR_PAL__SHIFT 0x0
+#define ATTR0D__ATTR_PAL_MASK 0x3FL
+//ATTR0E
+#define ATTR0E__ATTR_PAL__SHIFT 0x0
+#define ATTR0E__ATTR_PAL_MASK 0x3FL
+//ATTR0F
+#define ATTR0F__ATTR_PAL__SHIFT 0x0
+#define ATTR0F__ATTR_PAL_MASK 0x3FL
+//ATTR10
+#define ATTR10__ATTR_GRPH_MODE__SHIFT 0x0
+#define ATTR10__ATTR_MONO_EN__SHIFT 0x1
+#define ATTR10__ATTR_LGRPH_EN__SHIFT 0x2
+#define ATTR10__ATTR_BLINK_EN__SHIFT 0x3
+#define ATTR10__ATTR_PANTOPONLY__SHIFT 0x5
+#define ATTR10__ATTR_PCLKBY2__SHIFT 0x6
+#define ATTR10__ATTR_CSEL_EN__SHIFT 0x7
+#define ATTR10__ATTR_GRPH_MODE_MASK 0x01L
+#define ATTR10__ATTR_MONO_EN_MASK 0x02L
+#define ATTR10__ATTR_LGRPH_EN_MASK 0x04L
+#define ATTR10__ATTR_BLINK_EN_MASK 0x08L
+#define ATTR10__ATTR_PANTOPONLY_MASK 0x20L
+#define ATTR10__ATTR_PCLKBY2_MASK 0x40L
+#define ATTR10__ATTR_CSEL_EN_MASK 0x80L
+//ATTR11
+#define ATTR11__ATTR_OVSC__SHIFT 0x0
+#define ATTR11__ATTR_OVSC_MASK 0xFFL
+//ATTR12
+#define ATTR12__ATTR_MAP_EN__SHIFT 0x0
+#define ATTR12__ATTR_VSMUX__SHIFT 0x4
+#define ATTR12__ATTR_MAP_EN_MASK 0x0FL
+#define ATTR12__ATTR_VSMUX_MASK 0x30L
+//ATTR13
+#define ATTR13__ATTR_PPAN__SHIFT 0x0
+#define ATTR13__ATTR_PPAN_MASK 0x0FL
+//ATTR14
+#define ATTR14__ATTR_CSEL1__SHIFT 0x0
+#define ATTR14__ATTR_CSEL2__SHIFT 0x2
+#define ATTR14__ATTR_CSEL1_MASK 0x03L
+#define ATTR14__ATTR_CSEL2_MASK 0x0CL
+
+
+// addressBlock: azendpoint_f2codecind
+//AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_R__SHIFT 0xf
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_R_MASK 0x00008000L
+//AZALIA_F2_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_2
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_2__CC__SHIFT 0x0
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_2__CC_MASK 0x0000007FL
+//AZALIA_F2_CODEC_CONVERTER_STRIPE_CONTROL
+#define AZALIA_F2_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL__SHIFT 0x0
+#define AZALIA_F2_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY__SHIFT 0x14
+#define AZALIA_F2_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL_MASK 0x00000003L
+#define AZALIA_F2_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY_MASK 0x00700000L
+//AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_3
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_3__KEEPALIVE__SHIFT 0x7
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_3__KEEPALIVE_MASK 0x00000080L
+//AZALIA_F2_CODEC_CONVERTER_CONTROL_RAMP_RATE
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE__SHIFT 0x0
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE_MASK 0x000000FFL
+//AZALIA_F2_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED__SHIFT 0x1
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP__SHIFT 0x4
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED_MASK 0x00000002L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP_MASK 0x00000070L
+//AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZALIA_F2_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZALIA_F2_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONNECTION_LIST_ENTRY
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONNECTION_LIST_ENTRY__CONNECTION_LIST_ENTRY__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONNECTION_LIST_ENTRY__CONNECTION_LIST_ENTRY_MASK 0xFFFFFFFFL
+//AZALIA_F2_CODEC_PIN_CONTROL_WIDGET_CONTROL
+#define AZALIA_F2_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE__SHIFT 0x6
+#define AZALIA_F2_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE_MASK 0x00000040L
+//AZALIA_F2_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZALIA_F2_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZALIA_F2_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZALIA_F2_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__PRESENCE_DETECT__SHIFT 0x1f
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__PRESENCE_DETECT_MASK 0x80000000L
+//AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+//AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2__MISC__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2__COLOR__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2__MISC_MASK 0x0000000FL
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2__COLOR_MASK 0x000000F0L
+//AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3__CONNECTION_TYPE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3__DEFAULT_DEVICE__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3__CONNECTION_TYPE_MASK 0x0000000FL
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3__DEFAULT_DEVICE_MASK 0x000000F0L
+//AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4__LOCATION__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4__PORT_CONNECTIVITY__SHIFT 0x6
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4__LOCATION_MASK 0x0000003FL
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4__PORT_CONNECTIVITY_MASK 0x000000C0L
+//AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__SPEAKER_ALLOCATION__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__HDMI_CONNECTION__SHIFT 0x8
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__DP_CONNECTION__SHIFT 0x9
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__EXTRA_CONNECTION_INFO__SHIFT 0xa
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__SPEAKER_ALLOCATION_MASK 0x0000007FL
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__HDMI_CONNECTION_MASK 0x00000100L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__DP_CONNECTION_MASK 0x00000200L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__EXTRA_CONNECTION_INFO_MASK 0x0000FC00L
+//AZALIA_F2_CODEC_PIN_CONTROL_CHANNEL_ALLOCATION
+#define AZALIA_F2_CODEC_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION_MASK 0x000000FFL
+//AZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_INFO
+#define AZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_INFO__LFE_PLAYBACK_LEVEL__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_INFO__LEVEL_SHIFT__SHIFT 0x3
+#define AZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_INFO__DOWN_MIX_INHIBIT__SHIFT 0x7
+#define AZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_INFO__LFE_PLAYBACK_LEVEL_MASK 0x00000003L
+#define AZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_INFO__LEVEL_SHIFT_MASK 0x00000078L
+#define AZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_INFO__DOWN_MIX_INHIBIT_MASK 0x00000080L
+//AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__MAX_CHANNELS__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__FORMAT_CODE__SHIFT 0x3
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__MAX_CHANNELS_MASK 0x00000007L
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__FORMAT_CODE_MASK 0x00000078L
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR_DATA
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR_DATA__DESCRIPTOR__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR_DATA__DESCRIPTOR_MASK 0xFFFFFFFFL
+//AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE__MULTICHANNEL01_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE__MULTICHANNEL01_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE__MULTICHANNEL01_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE__MULTICHANNEL01_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE__MULTICHANNEL01_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE__MULTICHANNEL01_CHANNEL_ID_MASK 0x000000F0L
+//AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE__MULTICHANNEL23_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE__MULTICHANNEL23_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE__MULTICHANNEL23_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE__MULTICHANNEL23_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE__MULTICHANNEL23_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE__MULTICHANNEL23_CHANNEL_ID_MASK 0x000000F0L
+//AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE__MULTICHANNEL45_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE__MULTICHANNEL45_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE__MULTICHANNEL45_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE__MULTICHANNEL45_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE__MULTICHANNEL45_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE__MULTICHANNEL45_CHANNEL_ID_MASK 0x000000F0L
+//AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE__MULTICHANNEL67_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE__MULTICHANNEL67_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE__MULTICHANNEL67_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE__MULTICHANNEL67_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE__MULTICHANNEL67_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE__MULTICHANNEL67_CHANNEL_ID_MASK 0x000000F0L
+//AZALIA_F2_CODEC_PIN_CONTROL_LIPSYNC
+#define AZALIA_F2_CODEC_PIN_CONTROL_LIPSYNC__VIDEO_LIPSYNC__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_LIPSYNC__AUDIO_LIPSYNC__SHIFT 0x8
+#define AZALIA_F2_CODEC_PIN_CONTROL_LIPSYNC__VIDEO_LIPSYNC_MASK 0x000000FFL
+#define AZALIA_F2_CODEC_PIN_CONTROL_LIPSYNC__AUDIO_LIPSYNC_MASK 0x0000FF00L
+//AZALIA_F2_CODEC_PIN_CONTROL_HBR
+#define AZALIA_F2_CODEC_PIN_CONTROL_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_HBR__HBR_ENABLE_MASK 0x00000010L
+//AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_SINK_INFO_INDEX
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_SINK_INFO_INDEX__SINK_INFO_INDEX__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_SINK_INFO_INDEX__SINK_INFO_INDEX_MASK 0x000000FFL
+//AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_SINK_INFO_DATA
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_SINK_INFO_DATA__SINK_DATA__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_SINK_INFO_DATA__SINK_DATA_MASK 0xFFFFFFFFL
+//AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_CHANNEL_ID_MASK 0x000000F0L
+//AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_CHANNEL_ID_MASK 0x000000F0L
+//AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_CHANNEL_ID_MASK 0x000000F0L
+//AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_CHANNEL_ID_MASK 0x000000F0L
+//AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL_MODE
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE_MASK 0x00000001L
+//AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE__SHIFT 0x0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER__SHIFT 0x2
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE_MASK 0x00000003L
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER_MASK 0x0000003CL
+//AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY__SHIFT 0x0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN__SHIFT 0x2
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH__SHIFT 0x3
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN__SHIFT 0x7
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_MASK 0x00000003L
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN_MASK 0x00000004L
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_MASK 0x00000078L
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN_MASK 0x00000080L
+//AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_2
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x6
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_MASK 0x0000003FL
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000040L
+//AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_3
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x4
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x0000000FL
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000010L
+//AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF__SHIFT 0x0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO__SHIFT 0x4
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A__SHIFT 0x5
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID__SHIFT 0x7
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF_MASK 0x0000000FL
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO_MASK 0x00000010L
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_MASK 0x00000060L
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID_MASK 0x00000080L
+//AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_5
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x4
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L_MASK 0x0000000FL
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R_MASK 0x000000F0L
+//AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_6
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+//AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_7
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5__SHIFT 0x4
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4_MASK 0x0000000FL
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5_MASK 0x000000F0L
+//AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_8
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x4
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6_MASK 0x0000000FL
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7_MASK 0x000000F0L
+//AZALIA_F2_CODEC_PIN_ASSOCIATION_INFO
+#define AZALIA_F2_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO_MASK 0xFFFFFFFFL
+//AZALIA_F2_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS
+#define AZALIA_F2_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE_MASK 0x00000001L
+//AZALIA_F2_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL
+#define AZALIA_F2_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZALIA_F2_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+//AZALIA_F2_CODEC_PIN_CONTROL_LPIB
+#define AZALIA_F2_CODEC_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+//AZALIA_F2_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT
+#define AZALIA_F2_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+//AZALIA_F2_CODEC_PIN_CONTROL_CODING_TYPE
+#define AZALIA_F2_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE_MASK 0x000000FFL
+//AZALIA_F2_CODEC_PIN_CONTROL_FORMAT_CHANGED
+#define AZALIA_F2_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON__SHIFT 0x8
+#define AZALIA_F2_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE__SHIFT 0x10
+#define AZALIA_F2_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON_MASK 0x0000FF00L
+#define AZALIA_F2_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE_MASK 0x00FF0000L
+//AZALIA_F2_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION
+#define AZALIA_F2_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION_MASK 0x00000003L
+//AZALIA_F2_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE
+#define AZALIA_F2_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY_MASK 0x00000010L
+//AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+//AZALIA_F2_CODEC_PIN_PARAMETER_CONNECTION_LIST_LENGTH
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CONNECTION_LIST_LENGTH__CONNECTION_LIST_LENGTH__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CONNECTION_LIST_LENGTH__CONNECTION_LIST_LENGTH_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azendpoint_descriptorind
+//AUDIO_DESCRIPTOR0
+#define AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR0__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AUDIO_DESCRIPTOR1
+#define AUDIO_DESCRIPTOR1__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR1__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AUDIO_DESCRIPTOR2
+#define AUDIO_DESCRIPTOR2__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR2__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AUDIO_DESCRIPTOR3
+#define AUDIO_DESCRIPTOR3__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR3__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AUDIO_DESCRIPTOR4
+#define AUDIO_DESCRIPTOR4__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR4__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AUDIO_DESCRIPTOR5
+#define AUDIO_DESCRIPTOR5__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR5__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AUDIO_DESCRIPTOR6
+#define AUDIO_DESCRIPTOR6__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR6__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AUDIO_DESCRIPTOR7
+#define AUDIO_DESCRIPTOR7__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR7__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AUDIO_DESCRIPTOR8
+#define AUDIO_DESCRIPTOR8__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR8__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AUDIO_DESCRIPTOR9
+#define AUDIO_DESCRIPTOR9__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR9__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AUDIO_DESCRIPTOR10
+#define AUDIO_DESCRIPTOR10__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR10__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AUDIO_DESCRIPTOR11
+#define AUDIO_DESCRIPTOR11__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR11__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AUDIO_DESCRIPTOR12
+#define AUDIO_DESCRIPTOR12__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR12__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AUDIO_DESCRIPTOR13
+#define AUDIO_DESCRIPTOR13__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR13__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+
+
+// addressBlock: azendpoint_sinkinfoind
+//AZALIA_F2_CODEC_PIN_CONTROL_MANUFACTURER_ID
+#define AZALIA_F2_CODEC_PIN_CONTROL_MANUFACTURER_ID__MANUFACTURER_ID__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MANUFACTURER_ID__MANUFACTURER_ID_MASK 0x0000FFFFL
+//AZALIA_F2_CODEC_PIN_CONTROL_PRODUCT_ID
+#define AZALIA_F2_CODEC_PIN_CONTROL_PRODUCT_ID__PRODUCT_ID__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_PRODUCT_ID__PRODUCT_ID_MASK 0x0000FFFFL
+//AZALIA_F2_CODEC_PIN_CONTROL_SINK_DESCRIPTION_LEN
+#define AZALIA_F2_CODEC_PIN_CONTROL_SINK_DESCRIPTION_LEN__SINK_DESCRIPTION_LEN__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_SINK_DESCRIPTION_LEN__SINK_DESCRIPTION_LEN_MASK 0x000000FFL
+//AZALIA_F2_CODEC_PIN_CONTROL_PORTID0
+#define AZALIA_F2_CODEC_PIN_CONTROL_PORTID0__PORTID__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_PORTID0__PORTID_MASK 0xFFFFFFFFL
+//AZALIA_F2_CODEC_PIN_CONTROL_PORTID1
+#define AZALIA_F2_CODEC_PIN_CONTROL_PORTID1__PORTID__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_PORTID1__PORTID_MASK 0xFFFFFFFFL
+//SINK_DESCRIPTION0
+#define SINK_DESCRIPTION0__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION0__DESCRIPTION_MASK 0x000000FFL
+//SINK_DESCRIPTION1
+#define SINK_DESCRIPTION1__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION1__DESCRIPTION_MASK 0x000000FFL
+//SINK_DESCRIPTION2
+#define SINK_DESCRIPTION2__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION2__DESCRIPTION_MASK 0x000000FFL
+//SINK_DESCRIPTION3
+#define SINK_DESCRIPTION3__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION3__DESCRIPTION_MASK 0x000000FFL
+//SINK_DESCRIPTION4
+#define SINK_DESCRIPTION4__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION4__DESCRIPTION_MASK 0x000000FFL
+//SINK_DESCRIPTION5
+#define SINK_DESCRIPTION5__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION5__DESCRIPTION_MASK 0x000000FFL
+//SINK_DESCRIPTION6
+#define SINK_DESCRIPTION6__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION6__DESCRIPTION_MASK 0x000000FFL
+//SINK_DESCRIPTION7
+#define SINK_DESCRIPTION7__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION7__DESCRIPTION_MASK 0x000000FFL
+//SINK_DESCRIPTION8
+#define SINK_DESCRIPTION8__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION8__DESCRIPTION_MASK 0x000000FFL
+//SINK_DESCRIPTION9
+#define SINK_DESCRIPTION9__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION9__DESCRIPTION_MASK 0x000000FFL
+//SINK_DESCRIPTION10
+#define SINK_DESCRIPTION10__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION10__DESCRIPTION_MASK 0x000000FFL
+//SINK_DESCRIPTION11
+#define SINK_DESCRIPTION11__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION11__DESCRIPTION_MASK 0x000000FFL
+//SINK_DESCRIPTION12
+#define SINK_DESCRIPTION12__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION12__DESCRIPTION_MASK 0x000000FFL
+//SINK_DESCRIPTION13
+#define SINK_DESCRIPTION13__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION13__DESCRIPTION_MASK 0x000000FFL
+//SINK_DESCRIPTION14
+#define SINK_DESCRIPTION14__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION14__DESCRIPTION_MASK 0x000000FFL
+//SINK_DESCRIPTION15
+#define SINK_DESCRIPTION15__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION15__DESCRIPTION_MASK 0x000000FFL
+//SINK_DESCRIPTION16
+#define SINK_DESCRIPTION16__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION16__DESCRIPTION_MASK 0x000000FFL
+//SINK_DESCRIPTION17
+#define SINK_DESCRIPTION17__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION17__DESCRIPTION_MASK 0x000000FFL
+
+
+// addressBlock: azf0controller_azinputcrc0resultind
+//AZALIA_INPUT_CRC0_CHANNEL0
+#define AZALIA_INPUT_CRC0_CHANNEL0__INPUT_CRC_CHANNEL0__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CHANNEL0__INPUT_CRC_CHANNEL0_MASK 0xFFFFFFFFL
+//AZALIA_INPUT_CRC0_CHANNEL1
+#define AZALIA_INPUT_CRC0_CHANNEL1__INPUT_CRC_CHANNEL1__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CHANNEL1__INPUT_CRC_CHANNEL1_MASK 0xFFFFFFFFL
+//AZALIA_INPUT_CRC0_CHANNEL2
+#define AZALIA_INPUT_CRC0_CHANNEL2__INPUT_CRC_CHANNEL2__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CHANNEL2__INPUT_CRC_CHANNEL2_MASK 0xFFFFFFFFL
+//AZALIA_INPUT_CRC0_CHANNEL3
+#define AZALIA_INPUT_CRC0_CHANNEL3__INPUT_CRC_CHANNEL3__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CHANNEL3__INPUT_CRC_CHANNEL3_MASK 0xFFFFFFFFL
+//AZALIA_INPUT_CRC0_CHANNEL4
+#define AZALIA_INPUT_CRC0_CHANNEL4__INPUT_CRC_CHANNEL4__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CHANNEL4__INPUT_CRC_CHANNEL4_MASK 0xFFFFFFFFL
+//AZALIA_INPUT_CRC0_CHANNEL5
+#define AZALIA_INPUT_CRC0_CHANNEL5__INPUT_CRC_CHANNEL5__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CHANNEL5__INPUT_CRC_CHANNEL5_MASK 0xFFFFFFFFL
+//AZALIA_INPUT_CRC0_CHANNEL6
+#define AZALIA_INPUT_CRC0_CHANNEL6__INPUT_CRC_CHANNEL6__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CHANNEL6__INPUT_CRC_CHANNEL6_MASK 0xFFFFFFFFL
+//AZALIA_INPUT_CRC0_CHANNEL7
+#define AZALIA_INPUT_CRC0_CHANNEL7__INPUT_CRC_CHANNEL7__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CHANNEL7__INPUT_CRC_CHANNEL7_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azf0controller_azinputcrc1resultind
+//AZALIA_INPUT_CRC1_CHANNEL0
+#define AZALIA_INPUT_CRC1_CHANNEL0__INPUT_CRC_CHANNEL0__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CHANNEL0__INPUT_CRC_CHANNEL0_MASK 0xFFFFFFFFL
+//AZALIA_INPUT_CRC1_CHANNEL1
+#define AZALIA_INPUT_CRC1_CHANNEL1__INPUT_CRC_CHANNEL1__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CHANNEL1__INPUT_CRC_CHANNEL1_MASK 0xFFFFFFFFL
+//AZALIA_INPUT_CRC1_CHANNEL2
+#define AZALIA_INPUT_CRC1_CHANNEL2__INPUT_CRC_CHANNEL2__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CHANNEL2__INPUT_CRC_CHANNEL2_MASK 0xFFFFFFFFL
+//AZALIA_INPUT_CRC1_CHANNEL3
+#define AZALIA_INPUT_CRC1_CHANNEL3__INPUT_CRC_CHANNEL3__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CHANNEL3__INPUT_CRC_CHANNEL3_MASK 0xFFFFFFFFL
+//AZALIA_INPUT_CRC1_CHANNEL4
+#define AZALIA_INPUT_CRC1_CHANNEL4__INPUT_CRC_CHANNEL4__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CHANNEL4__INPUT_CRC_CHANNEL4_MASK 0xFFFFFFFFL
+//AZALIA_INPUT_CRC1_CHANNEL5
+#define AZALIA_INPUT_CRC1_CHANNEL5__INPUT_CRC_CHANNEL5__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CHANNEL5__INPUT_CRC_CHANNEL5_MASK 0xFFFFFFFFL
+//AZALIA_INPUT_CRC1_CHANNEL6
+#define AZALIA_INPUT_CRC1_CHANNEL6__INPUT_CRC_CHANNEL6__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CHANNEL6__INPUT_CRC_CHANNEL6_MASK 0xFFFFFFFFL
+//AZALIA_INPUT_CRC1_CHANNEL7
+#define AZALIA_INPUT_CRC1_CHANNEL7__INPUT_CRC_CHANNEL7__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CHANNEL7__INPUT_CRC_CHANNEL7_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azf0controller_azcrc0resultind
+//AZALIA_CRC0_CHANNEL0
+#define AZALIA_CRC0_CHANNEL0__CRC_CHANNEL0__SHIFT 0x0
+#define AZALIA_CRC0_CHANNEL0__CRC_CHANNEL0_MASK 0xFFFFFFFFL
+//AZALIA_CRC0_CHANNEL1
+#define AZALIA_CRC0_CHANNEL1__CRC_CHANNEL1__SHIFT 0x0
+#define AZALIA_CRC0_CHANNEL1__CRC_CHANNEL1_MASK 0xFFFFFFFFL
+//AZALIA_CRC0_CHANNEL2
+#define AZALIA_CRC0_CHANNEL2__CRC_CHANNEL2__SHIFT 0x0
+#define AZALIA_CRC0_CHANNEL2__CRC_CHANNEL2_MASK 0xFFFFFFFFL
+//AZALIA_CRC0_CHANNEL3
+#define AZALIA_CRC0_CHANNEL3__CRC_CHANNEL3__SHIFT 0x0
+#define AZALIA_CRC0_CHANNEL3__CRC_CHANNEL3_MASK 0xFFFFFFFFL
+//AZALIA_CRC0_CHANNEL4
+#define AZALIA_CRC0_CHANNEL4__CRC_CHANNEL4__SHIFT 0x0
+#define AZALIA_CRC0_CHANNEL4__CRC_CHANNEL4_MASK 0xFFFFFFFFL
+//AZALIA_CRC0_CHANNEL5
+#define AZALIA_CRC0_CHANNEL5__CRC_CHANNEL5__SHIFT 0x0
+#define AZALIA_CRC0_CHANNEL5__CRC_CHANNEL5_MASK 0xFFFFFFFFL
+//AZALIA_CRC0_CHANNEL6
+#define AZALIA_CRC0_CHANNEL6__CRC_CHANNEL6__SHIFT 0x0
+#define AZALIA_CRC0_CHANNEL6__CRC_CHANNEL6_MASK 0xFFFFFFFFL
+//AZALIA_CRC0_CHANNEL7
+#define AZALIA_CRC0_CHANNEL7__CRC_CHANNEL7__SHIFT 0x0
+#define AZALIA_CRC0_CHANNEL7__CRC_CHANNEL7_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azf0controller_azcrc1resultind
+//AZALIA_CRC1_CHANNEL0
+#define AZALIA_CRC1_CHANNEL0__CRC_CHANNEL0__SHIFT 0x0
+#define AZALIA_CRC1_CHANNEL0__CRC_CHANNEL0_MASK 0xFFFFFFFFL
+//AZALIA_CRC1_CHANNEL1
+#define AZALIA_CRC1_CHANNEL1__CRC_CHANNEL1__SHIFT 0x0
+#define AZALIA_CRC1_CHANNEL1__CRC_CHANNEL1_MASK 0xFFFFFFFFL
+//AZALIA_CRC1_CHANNEL2
+#define AZALIA_CRC1_CHANNEL2__CRC_CHANNEL2__SHIFT 0x0
+#define AZALIA_CRC1_CHANNEL2__CRC_CHANNEL2_MASK 0xFFFFFFFFL
+//AZALIA_CRC1_CHANNEL3
+#define AZALIA_CRC1_CHANNEL3__CRC_CHANNEL3__SHIFT 0x0
+#define AZALIA_CRC1_CHANNEL3__CRC_CHANNEL3_MASK 0xFFFFFFFFL
+//AZALIA_CRC1_CHANNEL4
+#define AZALIA_CRC1_CHANNEL4__CRC_CHANNEL4__SHIFT 0x0
+#define AZALIA_CRC1_CHANNEL4__CRC_CHANNEL4_MASK 0xFFFFFFFFL
+//AZALIA_CRC1_CHANNEL5
+#define AZALIA_CRC1_CHANNEL5__CRC_CHANNEL5__SHIFT 0x0
+#define AZALIA_CRC1_CHANNEL5__CRC_CHANNEL5_MASK 0xFFFFFFFFL
+//AZALIA_CRC1_CHANNEL6
+#define AZALIA_CRC1_CHANNEL6__CRC_CHANNEL6__SHIFT 0x0
+#define AZALIA_CRC1_CHANNEL6__CRC_CHANNEL6_MASK 0xFFFFFFFFL
+//AZALIA_CRC1_CHANNEL7
+#define AZALIA_CRC1_CHANNEL7__CRC_CHANNEL7__SHIFT 0x0
+#define AZALIA_CRC1_CHANNEL7__CRC_CHANNEL7_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azinputendpoint_f2codecind
+//AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+//AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE__SHIFT 0x5
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE_MASK 0x00000020L
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_PIN_SENSE
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_PIN_SENSE__PRESENCE_DETECT__SHIFT 0x1f
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_PIN_SENSE__PRESENCE_DETECT_MASK 0x80000000L
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2__MISC__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2__COLOR__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2__MISC_MASK 0x0000000FL
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2__COLOR_MASK 0x000000F0L
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3__CONNECTION_TYPE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3__DEFAULT_DEVICE__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3__CONNECTION_TYPE_MASK 0x0000000FL
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3__DEFAULT_DEVICE_MASK 0x000000F0L
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4__LOCATION__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4__PORT_CONNECTIVITY__SHIFT 0x6
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4__LOCATION_MASK 0x0000003FL
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4__PORT_CONNECTIVITY_MASK 0x000000C0L
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION_MASK 0x000000FFL
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL0_ENABLE
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL0_ENABLE__MULTICHANNEL0_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL0_ENABLE__MULTICHANNEL0_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL0_ENABLE__MULTICHANNEL0_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL0_ENABLE__MULTICHANNEL0_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL0_ENABLE__MULTICHANNEL0_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL0_ENABLE__MULTICHANNEL0_CHANNEL_ID_MASK 0x000000F0L
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL2_ENABLE
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL2_ENABLE__MULTICHANNEL2_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL2_ENABLE__MULTICHANNEL2_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL2_ENABLE__MULTICHANNEL2_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL2_ENABLE__MULTICHANNEL2_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL2_ENABLE__MULTICHANNEL2_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL2_ENABLE__MULTICHANNEL2_CHANNEL_ID_MASK 0x000000F0L
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL4_ENABLE
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL4_ENABLE__MULTICHANNEL4_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL4_ENABLE__MULTICHANNEL4_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL4_ENABLE__MULTICHANNEL4_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL4_ENABLE__MULTICHANNEL4_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL4_ENABLE__MULTICHANNEL4_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL4_ENABLE__MULTICHANNEL4_CHANNEL_ID_MASK 0x000000F0L
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL6_ENABLE
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL6_ENABLE__MULTICHANNEL6_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL6_ENABLE__MULTICHANNEL6_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL6_ENABLE__MULTICHANNEL6_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL6_ENABLE__MULTICHANNEL6_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL6_ENABLE__MULTICHANNEL6_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL6_ENABLE__MULTICHANNEL6_CHANNEL_ID_MASK 0x000000F0L
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_HBR
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_HBR__HBR_ENABLE_MASK 0x00000010L
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL1_ENABLE
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_CHANNEL_ID_MASK 0x000000F0L
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL3_ENABLE
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_CHANNEL_ID_MASK 0x000000F0L
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL5_ENABLE
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_CHANNEL_ID_MASK 0x000000F0L
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL7_ENABLE
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_CHANNEL_ID_MASK 0x000000F0L
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE__SHIFT 0x5
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_MASK 0x00000001L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT_MASK 0x00000006L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE_MASK 0x00000010L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE_MASK 0x00000020L
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INFOFRAME
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5__SHIFT 0x10
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID__SHIFT 0x1f
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT_MASK 0x00000007L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5_MASK 0x00FF0000L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID_MASK 0x80000000L
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_STATUS_L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_STATUS_L__CHANNEL_STATUS_L__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_STATUS_L__CHANNEL_STATUS_L_MASK 0xFFFFFFFFL
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_STATUS_H
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_STATUS_H__CHANNEL_STATUS_H__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_STATUS_H__CHANNEL_STATUS_H_MASK 0xFFFFFFFFL
+//AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+
+
+// addressBlock: azroot_f2codecind
+//AZALIA_F2_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID
+#define AZALIA_F2_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID__AZALIA_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID__SHIFT 0x0
+#define AZALIA_F2_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID__AZALIA_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID_MASK 0xFFFFFFFFL
+//AZALIA_F2_CODEC_ROOT_PARAMETER_REVISION_ID
+#define AZALIA_F2_CODEC_ROOT_PARAMETER_REVISION_ID__AZALIA_CODEC_ROOT_PARAMETER_REVISION_ID__SHIFT 0x0
+#define AZALIA_F2_CODEC_ROOT_PARAMETER_REVISION_ID__AZALIA_CODEC_ROOT_PARAMETER_REVISION_ID_MASK 0xFFFFFFFFL
+//AZALIA_F2_CODEC_ROOT_PARAMETER_SUBORDINATE_NODE_COUNT
+#define AZALIA_F2_CODEC_ROOT_PARAMETER_SUBORDINATE_NODE_COUNT__AZALIA_CODEC_ROOT_PARAMETER_SUBORDINATE_NODE_COUNT__SHIFT 0x0
+#define AZALIA_F2_CODEC_ROOT_PARAMETER_SUBORDINATE_NODE_COUNT__AZALIA_CODEC_ROOT_PARAMETER_SUBORDINATE_NODE_COUNT_MASK 0xFFFFFFFFL
+//AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SET__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_ACT__SHIFT 0x4
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__CLKSTOPOK__SHIFT 0x9
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SETTINGS_RESET__SHIFT 0xa
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SET_MASK 0x0000000FL
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_ACT_MASK 0x000000F0L
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__CLKSTOPOK_MASK 0x00000200L
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SETTINGS_RESET_MASK 0x00000400L
+//AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE0__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE1__SHIFT 0x8
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE2__SHIFT 0x10
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE3__SHIFT 0x18
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE0_MASK 0x000000FFL
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE1_MASK 0x0000FF00L
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE2_MASK 0x00FF0000L
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE3_MASK 0xFF000000L
+//AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_2
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_2__SUBSYSTEM_ID_BYTE1__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_2__SUBSYSTEM_ID_BYTE1_MASK 0x000000FFL
+//AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_3
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_3__SUBSYSTEM_ID_BYTE2__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_3__SUBSYSTEM_ID_BYTE2_MASK 0x000000FFL
+//AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_4
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_4__SUBSYSTEM_ID_BYTE3__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_4__SUBSYSTEM_ID_BYTE3_MASK 0x000000FFL
+//AZALIA_F2_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION__CONVERTER_SYNCHRONIZATION__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION__CONVERTER_SYNCHRONIZATION_MASK 0x000000FFL
+//AZALIA_F2_CODEC_FUNCTION_CONTROL_RESET
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESET__CODEC_RESET__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESET__CODEC_RESET_MASK 0x00000001L
+//AZALIA_F2_CODEC_FUNCTION_PARAMETER_SUBORDINATE_NODE_COUNT
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_SUBORDINATE_NODE_COUNT__AZALIA_CODEC_FUNCTION_PARAMETER_SUBORDINATE_NODE_COUNT__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_SUBORDINATE_NODE_COUNT__AZALIA_CODEC_FUNCTION_PARAMETER_SUBORDINATE_NODE_COUNT_MASK 0xFFFFFFFFL
+//AZALIA_F2_CODEC_FUNCTION_PARAMETER_GROUP_TYPE
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_GROUP_TYPE__AZALIA_CODEC_FUNCTION_PARAMETER_GROUP_TYPE__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_GROUP_TYPE__AZALIA_CODEC_FUNCTION_PARAMETER_GROUP_TYPE_MASK 0xFFFFFFFFL
+//AZALIA_F2_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZALIA_F2_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS__AZALIA_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS__AZALIA_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZALIA_F2_CODEC_FUNCTION_PARAMETER_POWER_STATES
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_POWER_STATES__AZALIA_CODEC_FUNCTION_PARAMETER_POWER_STATES__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_POWER_STATES__CLKSTOP__SHIFT 0x1e
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_POWER_STATES__EPSS__SHIFT 0x1f
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_POWER_STATES__AZALIA_CODEC_FUNCTION_PARAMETER_POWER_STATES_MASK 0x3FFFFFFFL
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_POWER_STATES__CLKSTOP_MASK 0x40000000L
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_POWER_STATES__EPSS_MASK 0x80000000L
+
+
+// addressBlock: azf0stream0_streamind
+//AZF0STREAM0_AZALIA_FIFO_SIZE_CONTROL
+#define AZF0STREAM0_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM0_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM0_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM0_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM0_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM0_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+//AZF0STREAM0_AZALIA_LATENCY_COUNTER_CONTROL
+#define AZF0STREAM0_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM0_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+//AZF0STREAM0_AZALIA_WORSTCASE_LATENCY_COUNT
+#define AZF0STREAM0_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM0_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM0_AZALIA_CUMULATIVE_LATENCY_COUNT
+#define AZF0STREAM0_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM0_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM0_AZALIA_CUMULATIVE_REQUEST_COUNT
+#define AZF0STREAM0_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM0_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azf0stream1_streamind
+//AZF0STREAM1_AZALIA_FIFO_SIZE_CONTROL
+#define AZF0STREAM1_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM1_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM1_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM1_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM1_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM1_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+//AZF0STREAM1_AZALIA_LATENCY_COUNTER_CONTROL
+#define AZF0STREAM1_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM1_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+//AZF0STREAM1_AZALIA_WORSTCASE_LATENCY_COUNT
+#define AZF0STREAM1_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM1_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM1_AZALIA_CUMULATIVE_LATENCY_COUNT
+#define AZF0STREAM1_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM1_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM1_AZALIA_CUMULATIVE_REQUEST_COUNT
+#define AZF0STREAM1_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM1_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azf0stream2_streamind
+//AZF0STREAM2_AZALIA_FIFO_SIZE_CONTROL
+#define AZF0STREAM2_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM2_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM2_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM2_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM2_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM2_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+//AZF0STREAM2_AZALIA_LATENCY_COUNTER_CONTROL
+#define AZF0STREAM2_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM2_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+//AZF0STREAM2_AZALIA_WORSTCASE_LATENCY_COUNT
+#define AZF0STREAM2_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM2_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM2_AZALIA_CUMULATIVE_LATENCY_COUNT
+#define AZF0STREAM2_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM2_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM2_AZALIA_CUMULATIVE_REQUEST_COUNT
+#define AZF0STREAM2_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM2_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azf0stream3_streamind
+//AZF0STREAM3_AZALIA_FIFO_SIZE_CONTROL
+#define AZF0STREAM3_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM3_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM3_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM3_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM3_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM3_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+//AZF0STREAM3_AZALIA_LATENCY_COUNTER_CONTROL
+#define AZF0STREAM3_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM3_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+//AZF0STREAM3_AZALIA_WORSTCASE_LATENCY_COUNT
+#define AZF0STREAM3_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM3_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM3_AZALIA_CUMULATIVE_LATENCY_COUNT
+#define AZF0STREAM3_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM3_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM3_AZALIA_CUMULATIVE_REQUEST_COUNT
+#define AZF0STREAM3_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM3_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azf0stream4_streamind
+//AZF0STREAM4_AZALIA_FIFO_SIZE_CONTROL
+#define AZF0STREAM4_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM4_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM4_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM4_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM4_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM4_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+//AZF0STREAM4_AZALIA_LATENCY_COUNTER_CONTROL
+#define AZF0STREAM4_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM4_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+//AZF0STREAM4_AZALIA_WORSTCASE_LATENCY_COUNT
+#define AZF0STREAM4_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM4_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM4_AZALIA_CUMULATIVE_LATENCY_COUNT
+#define AZF0STREAM4_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM4_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM4_AZALIA_CUMULATIVE_REQUEST_COUNT
+#define AZF0STREAM4_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM4_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azf0stream5_streamind
+//AZF0STREAM5_AZALIA_FIFO_SIZE_CONTROL
+#define AZF0STREAM5_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM5_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM5_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM5_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM5_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM5_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+//AZF0STREAM5_AZALIA_LATENCY_COUNTER_CONTROL
+#define AZF0STREAM5_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM5_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+//AZF0STREAM5_AZALIA_WORSTCASE_LATENCY_COUNT
+#define AZF0STREAM5_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM5_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM5_AZALIA_CUMULATIVE_LATENCY_COUNT
+#define AZF0STREAM5_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM5_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM5_AZALIA_CUMULATIVE_REQUEST_COUNT
+#define AZF0STREAM5_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM5_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azf0stream6_streamind
+//AZF0STREAM6_AZALIA_FIFO_SIZE_CONTROL
+#define AZF0STREAM6_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM6_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM6_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM6_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM6_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM6_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+//AZF0STREAM6_AZALIA_LATENCY_COUNTER_CONTROL
+#define AZF0STREAM6_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM6_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+//AZF0STREAM6_AZALIA_WORSTCASE_LATENCY_COUNT
+#define AZF0STREAM6_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM6_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM6_AZALIA_CUMULATIVE_LATENCY_COUNT
+#define AZF0STREAM6_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM6_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM6_AZALIA_CUMULATIVE_REQUEST_COUNT
+#define AZF0STREAM6_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM6_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azf0stream7_streamind
+//AZF0STREAM7_AZALIA_FIFO_SIZE_CONTROL
+#define AZF0STREAM7_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM7_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM7_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM7_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM7_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM7_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+//AZF0STREAM7_AZALIA_LATENCY_COUNTER_CONTROL
+#define AZF0STREAM7_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM7_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+//AZF0STREAM7_AZALIA_WORSTCASE_LATENCY_COUNT
+#define AZF0STREAM7_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM7_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM7_AZALIA_CUMULATIVE_LATENCY_COUNT
+#define AZF0STREAM7_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM7_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM7_AZALIA_CUMULATIVE_REQUEST_COUNT
+#define AZF0STREAM7_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM7_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azf0stream8_streamind
+//AZF0STREAM8_AZALIA_FIFO_SIZE_CONTROL
+#define AZF0STREAM8_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM8_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM8_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM8_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM8_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM8_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+//AZF0STREAM8_AZALIA_LATENCY_COUNTER_CONTROL
+#define AZF0STREAM8_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM8_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+//AZF0STREAM8_AZALIA_WORSTCASE_LATENCY_COUNT
+#define AZF0STREAM8_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM8_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM8_AZALIA_CUMULATIVE_LATENCY_COUNT
+#define AZF0STREAM8_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM8_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM8_AZALIA_CUMULATIVE_REQUEST_COUNT
+#define AZF0STREAM8_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM8_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azf0stream9_streamind
+//AZF0STREAM9_AZALIA_FIFO_SIZE_CONTROL
+#define AZF0STREAM9_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM9_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM9_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM9_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM9_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM9_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+//AZF0STREAM9_AZALIA_LATENCY_COUNTER_CONTROL
+#define AZF0STREAM9_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM9_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+//AZF0STREAM9_AZALIA_WORSTCASE_LATENCY_COUNT
+#define AZF0STREAM9_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM9_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM9_AZALIA_CUMULATIVE_LATENCY_COUNT
+#define AZF0STREAM9_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM9_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM9_AZALIA_CUMULATIVE_REQUEST_COUNT
+#define AZF0STREAM9_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM9_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azf0stream10_streamind
+//AZF0STREAM10_AZALIA_FIFO_SIZE_CONTROL
+#define AZF0STREAM10_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM10_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM10_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM10_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM10_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM10_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+//AZF0STREAM10_AZALIA_LATENCY_COUNTER_CONTROL
+#define AZF0STREAM10_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM10_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+//AZF0STREAM10_AZALIA_WORSTCASE_LATENCY_COUNT
+#define AZF0STREAM10_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM10_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM10_AZALIA_CUMULATIVE_LATENCY_COUNT
+#define AZF0STREAM10_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM10_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM10_AZALIA_CUMULATIVE_REQUEST_COUNT
+#define AZF0STREAM10_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM10_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azf0stream11_streamind
+//AZF0STREAM11_AZALIA_FIFO_SIZE_CONTROL
+#define AZF0STREAM11_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM11_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM11_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM11_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM11_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM11_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+//AZF0STREAM11_AZALIA_LATENCY_COUNTER_CONTROL
+#define AZF0STREAM11_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM11_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+//AZF0STREAM11_AZALIA_WORSTCASE_LATENCY_COUNT
+#define AZF0STREAM11_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM11_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM11_AZALIA_CUMULATIVE_LATENCY_COUNT
+#define AZF0STREAM11_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM11_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM11_AZALIA_CUMULATIVE_REQUEST_COUNT
+#define AZF0STREAM11_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM11_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azf0stream12_streamind
+//AZF0STREAM12_AZALIA_FIFO_SIZE_CONTROL
+#define AZF0STREAM12_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM12_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM12_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM12_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM12_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM12_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+//AZF0STREAM12_AZALIA_LATENCY_COUNTER_CONTROL
+#define AZF0STREAM12_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM12_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+//AZF0STREAM12_AZALIA_WORSTCASE_LATENCY_COUNT
+#define AZF0STREAM12_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM12_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM12_AZALIA_CUMULATIVE_LATENCY_COUNT
+#define AZF0STREAM12_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM12_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM12_AZALIA_CUMULATIVE_REQUEST_COUNT
+#define AZF0STREAM12_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM12_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azf0stream13_streamind
+//AZF0STREAM13_AZALIA_FIFO_SIZE_CONTROL
+#define AZF0STREAM13_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM13_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM13_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM13_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM13_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM13_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+//AZF0STREAM13_AZALIA_LATENCY_COUNTER_CONTROL
+#define AZF0STREAM13_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM13_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+//AZF0STREAM13_AZALIA_WORSTCASE_LATENCY_COUNT
+#define AZF0STREAM13_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM13_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM13_AZALIA_CUMULATIVE_LATENCY_COUNT
+#define AZF0STREAM13_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM13_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM13_AZALIA_CUMULATIVE_REQUEST_COUNT
+#define AZF0STREAM13_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM13_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azf0stream14_streamind
+//AZF0STREAM14_AZALIA_FIFO_SIZE_CONTROL
+#define AZF0STREAM14_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM14_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM14_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM14_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM14_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM14_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+//AZF0STREAM14_AZALIA_LATENCY_COUNTER_CONTROL
+#define AZF0STREAM14_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM14_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+//AZF0STREAM14_AZALIA_WORSTCASE_LATENCY_COUNT
+#define AZF0STREAM14_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM14_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM14_AZALIA_CUMULATIVE_LATENCY_COUNT
+#define AZF0STREAM14_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM14_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM14_AZALIA_CUMULATIVE_REQUEST_COUNT
+#define AZF0STREAM14_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM14_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azf0stream15_streamind
+//AZF0STREAM15_AZALIA_FIFO_SIZE_CONTROL
+#define AZF0STREAM15_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM15_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM15_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM15_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM15_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM15_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+//AZF0STREAM15_AZALIA_LATENCY_COUNTER_CONTROL
+#define AZF0STREAM15_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM15_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+//AZF0STREAM15_AZALIA_WORSTCASE_LATENCY_COUNT
+#define AZF0STREAM15_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM15_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM15_AZALIA_CUMULATIVE_LATENCY_COUNT
+#define AZF0STREAM15_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM15_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM15_AZALIA_CUMULATIVE_REQUEST_COUNT
+#define AZF0STREAM15_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM15_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azf0endpoint0_endpointind
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY__SHIFT 0x14
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL_MASK 0x00000003L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY_MASK 0x00700000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE_MASK 0x000000FFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED__SHIFT 0x1
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA__SHIFT 0x2
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED_MASK 0x00000002L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA_MASK 0x00000004L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP_MASK 0x00000070L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE__SHIFT 0x6
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE_MASK 0x00000040L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION__SHIFT 0x11
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO__SHIFT 0x12
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT__SHIFT 0x1b
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT__SHIFT 0x1f
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK 0x0000007FL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK 0x00010000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK 0x00020000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO_MASK 0x00FC0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL_MASK 0x03000000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT_MASK 0x78000000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT_MASK 0x80000000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID_MASK 0xF0000000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC_MASK 0x000000FFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC_MASK 0x0000FF00L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID_MASK 0x0000FFFFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID_MASK 0xFFFF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN_MASK 0x000000FFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0_MASK 0x000000FFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2_MASK 0x00FF0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3_MASK 0xFF000000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4_MASK 0x000000FFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6_MASK 0x00FF0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7_MASK 0xFF000000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8_MASK 0x000000FFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10_MASK 0x00FF0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11_MASK 0xFF000000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12_MASK 0x000000FFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14_MASK 0x00FF0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15_MASK 0xFF000000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16_MASK 0x000000FFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17_MASK 0x0000FF00L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE_MASK 0x00000001L
+//AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER__SHIFT 0x2
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE_MASK 0x00000003L
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER_MASK 0x0000003CL
+//AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN__SHIFT 0x2
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH__SHIFT 0x3
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN__SHIFT 0x7
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_MASK 0x00000003L
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN_MASK 0x00000004L
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_MASK 0x00000078L
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN_MASK 0x00000080L
+//AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x6
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_MASK 0x0000003FL
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000040L
+//AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x0000000FL
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000010L
+//AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A__SHIFT 0x5
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID__SHIFT 0x7
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF_MASK 0x0000000FL
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO_MASK 0x00000010L
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_MASK 0x00000060L
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID_MASK 0x00000080L
+//AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L_MASK 0x0000000FL
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R_MASK 0x000000F0L
+//AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+//AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4_MASK 0x0000000FL
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5_MASK 0x000000F0L
+//AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6_MASK 0x0000000FL
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7_MASK 0x000000F0L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE_MASK 0x00000001L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE_MASK 0x000000FFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE__SHIFT 0x1
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE_MASK 0x00000002L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION_MASK 0x00000003L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY_MASK 0x00000010L
+//AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLE_STATUS
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS_MASK 0x00000001L
+//AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLED_INT_STATUS
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT0_AZALIA_F0_AUDIO_DISABLED_INT_STATUS
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT0_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE_MASK 0x00000100L
+
+
+// addressBlock: azf0endpoint1_endpointind
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY__SHIFT 0x14
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL_MASK 0x00000003L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY_MASK 0x00700000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE_MASK 0x000000FFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED__SHIFT 0x1
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA__SHIFT 0x2
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED_MASK 0x00000002L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA_MASK 0x00000004L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP_MASK 0x00000070L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE__SHIFT 0x6
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE_MASK 0x00000040L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION__SHIFT 0x11
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO__SHIFT 0x12
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT__SHIFT 0x1b
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT__SHIFT 0x1f
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK 0x0000007FL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK 0x00010000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK 0x00020000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO_MASK 0x00FC0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL_MASK 0x03000000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT_MASK 0x78000000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT_MASK 0x80000000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID_MASK 0xF0000000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC_MASK 0x000000FFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC_MASK 0x0000FF00L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID_MASK 0x0000FFFFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID_MASK 0xFFFF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN_MASK 0x000000FFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0_MASK 0x000000FFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2_MASK 0x00FF0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3_MASK 0xFF000000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4_MASK 0x000000FFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6_MASK 0x00FF0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7_MASK 0xFF000000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8_MASK 0x000000FFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10_MASK 0x00FF0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11_MASK 0xFF000000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12_MASK 0x000000FFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14_MASK 0x00FF0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15_MASK 0xFF000000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16_MASK 0x000000FFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17_MASK 0x0000FF00L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE_MASK 0x00000001L
+//AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER__SHIFT 0x2
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE_MASK 0x00000003L
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER_MASK 0x0000003CL
+//AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN__SHIFT 0x2
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH__SHIFT 0x3
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN__SHIFT 0x7
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_MASK 0x00000003L
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN_MASK 0x00000004L
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_MASK 0x00000078L
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN_MASK 0x00000080L
+//AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x6
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_MASK 0x0000003FL
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000040L
+//AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x0000000FL
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000010L
+//AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A__SHIFT 0x5
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID__SHIFT 0x7
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF_MASK 0x0000000FL
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO_MASK 0x00000010L
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_MASK 0x00000060L
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID_MASK 0x00000080L
+//AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L_MASK 0x0000000FL
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R_MASK 0x000000F0L
+//AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+//AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4_MASK 0x0000000FL
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5_MASK 0x000000F0L
+//AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6_MASK 0x0000000FL
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7_MASK 0x000000F0L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE_MASK 0x00000001L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE_MASK 0x000000FFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE__SHIFT 0x1
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE_MASK 0x00000002L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION_MASK 0x00000003L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY_MASK 0x00000010L
+//AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLE_STATUS
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS_MASK 0x00000001L
+//AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLED_INT_STATUS
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT1_AZALIA_F0_AUDIO_DISABLED_INT_STATUS
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT1_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE_MASK 0x00000100L
+
+
+// addressBlock: azf0endpoint2_endpointind
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY__SHIFT 0x14
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL_MASK 0x00000003L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY_MASK 0x00700000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE_MASK 0x000000FFL
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED__SHIFT 0x1
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA__SHIFT 0x2
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED_MASK 0x00000002L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA_MASK 0x00000004L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP_MASK 0x00000070L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE__SHIFT 0x6
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE_MASK 0x00000040L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION__SHIFT 0x11
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO__SHIFT 0x12
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL__SHIFT 0x18
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT__SHIFT 0x1b
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT__SHIFT 0x1f
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK 0x0000007FL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK 0x00010000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK 0x00020000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO_MASK 0x00FC0000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL_MASK 0x03000000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT_MASK 0x78000000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT_MASK 0x80000000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID_MASK 0xF0000000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC_MASK 0x000000FFL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC_MASK 0x0000FF00L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID_MASK 0x0000FFFFL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID_MASK 0xFFFF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN_MASK 0x000000FFL
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3__SHIFT 0x18
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0_MASK 0x000000FFL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2_MASK 0x00FF0000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3_MASK 0xFF000000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7__SHIFT 0x18
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4_MASK 0x000000FFL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6_MASK 0x00FF0000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7_MASK 0xFF000000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11__SHIFT 0x18
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8_MASK 0x000000FFL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10_MASK 0x00FF0000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11_MASK 0xFF000000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15__SHIFT 0x18
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12_MASK 0x000000FFL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14_MASK 0x00FF0000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15_MASK 0xFF000000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16_MASK 0x000000FFL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17_MASK 0x0000FF00L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE_MASK 0x00000001L
+//AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER__SHIFT 0x2
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE_MASK 0x00000003L
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER_MASK 0x0000003CL
+//AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN__SHIFT 0x2
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH__SHIFT 0x3
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN__SHIFT 0x7
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_MASK 0x00000003L
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN_MASK 0x00000004L
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_MASK 0x00000078L
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN_MASK 0x00000080L
+//AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x6
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_MASK 0x0000003FL
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000040L
+//AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x0000000FL
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000010L
+//AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A__SHIFT 0x5
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID__SHIFT 0x7
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF_MASK 0x0000000FL
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO_MASK 0x00000010L
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_MASK 0x00000060L
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID_MASK 0x00000080L
+//AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L_MASK 0x0000000FL
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R_MASK 0x000000F0L
+//AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+//AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4_MASK 0x0000000FL
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5_MASK 0x000000F0L
+//AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6_MASK 0x0000000FL
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7_MASK 0x000000F0L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE_MASK 0x00000001L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_LPIB
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE_MASK 0x000000FFL
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE__SHIFT 0x1
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE_MASK 0x00000002L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION_MASK 0x00000003L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY_MASK 0x00000010L
+//AZF0ENDPOINT2_AZALIA_F0_AUDIO_ENABLE_STATUS
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS_MASK 0x00000001L
+//AZF0ENDPOINT2_AZALIA_F0_AUDIO_ENABLED_INT_STATUS
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT2_AZALIA_F0_AUDIO_DISABLED_INT_STATUS
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT2_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE_MASK 0x00000100L
+
+
+// addressBlock: azf0endpoint3_endpointind
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY__SHIFT 0x14
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL_MASK 0x00000003L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY_MASK 0x00700000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE_MASK 0x000000FFL
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED__SHIFT 0x1
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA__SHIFT 0x2
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED_MASK 0x00000002L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA_MASK 0x00000004L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP_MASK 0x00000070L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE__SHIFT 0x6
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE_MASK 0x00000040L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION__SHIFT 0x11
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO__SHIFT 0x12
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL__SHIFT 0x18
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT__SHIFT 0x1b
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT__SHIFT 0x1f
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK 0x0000007FL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK 0x00010000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK 0x00020000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO_MASK 0x00FC0000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL_MASK 0x03000000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT_MASK 0x78000000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT_MASK 0x80000000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID_MASK 0xF0000000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC_MASK 0x000000FFL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC_MASK 0x0000FF00L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID_MASK 0x0000FFFFL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID_MASK 0xFFFF0000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN_MASK 0x000000FFL
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3__SHIFT 0x18
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0_MASK 0x000000FFL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2_MASK 0x00FF0000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3_MASK 0xFF000000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7__SHIFT 0x18
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4_MASK 0x000000FFL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6_MASK 0x00FF0000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7_MASK 0xFF000000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11__SHIFT 0x18
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8_MASK 0x000000FFL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10_MASK 0x00FF0000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11_MASK 0xFF000000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15__SHIFT 0x18
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12_MASK 0x000000FFL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14_MASK 0x00FF0000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15_MASK 0xFF000000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16_MASK 0x000000FFL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17_MASK 0x0000FF00L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE_MASK 0x00000001L
+//AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER__SHIFT 0x2
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE_MASK 0x00000003L
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER_MASK 0x0000003CL
+//AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN__SHIFT 0x2
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH__SHIFT 0x3
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN__SHIFT 0x7
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_MASK 0x00000003L
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN_MASK 0x00000004L
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_MASK 0x00000078L
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN_MASK 0x00000080L
+//AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x6
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_MASK 0x0000003FL
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000040L
+//AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x0000000FL
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000010L
+//AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A__SHIFT 0x5
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID__SHIFT 0x7
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF_MASK 0x0000000FL
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO_MASK 0x00000010L
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_MASK 0x00000060L
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID_MASK 0x00000080L
+//AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L_MASK 0x0000000FL
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R_MASK 0x000000F0L
+//AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+//AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4_MASK 0x0000000FL
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5_MASK 0x000000F0L
+//AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6_MASK 0x0000000FL
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7_MASK 0x000000F0L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE_MASK 0x00000001L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_LPIB
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE_MASK 0x000000FFL
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE__SHIFT 0x1
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE_MASK 0x00000002L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE_MASK 0x00FF0000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION_MASK 0x00000003L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY_MASK 0x00000010L
+//AZF0ENDPOINT3_AZALIA_F0_AUDIO_ENABLE_STATUS
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS_MASK 0x00000001L
+//AZF0ENDPOINT3_AZALIA_F0_AUDIO_ENABLED_INT_STATUS
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT3_AZALIA_F0_AUDIO_DISABLED_INT_STATUS
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT3_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE_MASK 0x00000100L
+
+
+// addressBlock: azf0endpoint4_endpointind
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY__SHIFT 0x14
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL_MASK 0x00000003L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY_MASK 0x00700000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE_MASK 0x000000FFL
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED__SHIFT 0x1
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA__SHIFT 0x2
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED_MASK 0x00000002L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA_MASK 0x00000004L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP_MASK 0x00000070L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE__SHIFT 0x6
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE_MASK 0x00000040L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION__SHIFT 0x11
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO__SHIFT 0x12
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL__SHIFT 0x18
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT__SHIFT 0x1b
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT__SHIFT 0x1f
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK 0x0000007FL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK 0x00010000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK 0x00020000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO_MASK 0x00FC0000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL_MASK 0x03000000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT_MASK 0x78000000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT_MASK 0x80000000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID_MASK 0xF0000000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC_MASK 0x000000FFL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC_MASK 0x0000FF00L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID_MASK 0x0000FFFFL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID_MASK 0xFFFF0000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN_MASK 0x000000FFL
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3__SHIFT 0x18
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0_MASK 0x000000FFL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2_MASK 0x00FF0000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3_MASK 0xFF000000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7__SHIFT 0x18
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4_MASK 0x000000FFL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6_MASK 0x00FF0000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7_MASK 0xFF000000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11__SHIFT 0x18
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8_MASK 0x000000FFL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10_MASK 0x00FF0000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11_MASK 0xFF000000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15__SHIFT 0x18
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12_MASK 0x000000FFL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14_MASK 0x00FF0000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15_MASK 0xFF000000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16_MASK 0x000000FFL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17_MASK 0x0000FF00L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE_MASK 0x00000001L
+//AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER__SHIFT 0x2
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE_MASK 0x00000003L
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER_MASK 0x0000003CL
+//AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN__SHIFT 0x2
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH__SHIFT 0x3
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN__SHIFT 0x7
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_MASK 0x00000003L
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN_MASK 0x00000004L
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_MASK 0x00000078L
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN_MASK 0x00000080L
+//AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x6
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_MASK 0x0000003FL
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000040L
+//AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x0000000FL
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000010L
+//AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A__SHIFT 0x5
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID__SHIFT 0x7
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF_MASK 0x0000000FL
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO_MASK 0x00000010L
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_MASK 0x00000060L
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID_MASK 0x00000080L
+//AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L_MASK 0x0000000FL
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R_MASK 0x000000F0L
+//AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+//AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4_MASK 0x0000000FL
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5_MASK 0x000000F0L
+//AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6_MASK 0x0000000FL
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7_MASK 0x000000F0L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE_MASK 0x00000001L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_LPIB
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE_MASK 0x000000FFL
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE__SHIFT 0x1
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE_MASK 0x00000002L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE_MASK 0x00FF0000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION_MASK 0x00000003L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY_MASK 0x00000010L
+//AZF0ENDPOINT4_AZALIA_F0_AUDIO_ENABLE_STATUS
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS_MASK 0x00000001L
+//AZF0ENDPOINT4_AZALIA_F0_AUDIO_ENABLED_INT_STATUS
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT4_AZALIA_F0_AUDIO_DISABLED_INT_STATUS
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT4_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE_MASK 0x00000100L
+
+
+// addressBlock: azf0endpoint5_endpointind
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY__SHIFT 0x14
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL_MASK 0x00000003L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY_MASK 0x00700000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE_MASK 0x000000FFL
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED__SHIFT 0x1
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA__SHIFT 0x2
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED_MASK 0x00000002L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA_MASK 0x00000004L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP_MASK 0x00000070L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE__SHIFT 0x6
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE_MASK 0x00000040L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION__SHIFT 0x11
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO__SHIFT 0x12
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL__SHIFT 0x18
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT__SHIFT 0x1b
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT__SHIFT 0x1f
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK 0x0000007FL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK 0x00010000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK 0x00020000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO_MASK 0x00FC0000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL_MASK 0x03000000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT_MASK 0x78000000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT_MASK 0x80000000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID_MASK 0xF0000000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC_MASK 0x000000FFL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC_MASK 0x0000FF00L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID_MASK 0x0000FFFFL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID_MASK 0xFFFF0000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN_MASK 0x000000FFL
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3__SHIFT 0x18
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0_MASK 0x000000FFL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2_MASK 0x00FF0000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3_MASK 0xFF000000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7__SHIFT 0x18
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4_MASK 0x000000FFL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6_MASK 0x00FF0000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7_MASK 0xFF000000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11__SHIFT 0x18
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8_MASK 0x000000FFL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10_MASK 0x00FF0000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11_MASK 0xFF000000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15__SHIFT 0x18
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12_MASK 0x000000FFL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14_MASK 0x00FF0000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15_MASK 0xFF000000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16_MASK 0x000000FFL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17_MASK 0x0000FF00L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE_MASK 0x00000001L
+//AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER__SHIFT 0x2
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE_MASK 0x00000003L
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER_MASK 0x0000003CL
+//AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN__SHIFT 0x2
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH__SHIFT 0x3
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN__SHIFT 0x7
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_MASK 0x00000003L
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN_MASK 0x00000004L
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_MASK 0x00000078L
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN_MASK 0x00000080L
+//AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x6
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_MASK 0x0000003FL
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000040L
+//AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x0000000FL
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000010L
+//AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A__SHIFT 0x5
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID__SHIFT 0x7
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF_MASK 0x0000000FL
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO_MASK 0x00000010L
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_MASK 0x00000060L
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID_MASK 0x00000080L
+//AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L_MASK 0x0000000FL
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R_MASK 0x000000F0L
+//AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+//AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4_MASK 0x0000000FL
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5_MASK 0x000000F0L
+//AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6_MASK 0x0000000FL
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7_MASK 0x000000F0L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE_MASK 0x00000001L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_LPIB
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE_MASK 0x000000FFL
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE__SHIFT 0x1
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE_MASK 0x00000002L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE_MASK 0x00FF0000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION_MASK 0x00000003L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY_MASK 0x00000010L
+//AZF0ENDPOINT5_AZALIA_F0_AUDIO_ENABLE_STATUS
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS_MASK 0x00000001L
+//AZF0ENDPOINT5_AZALIA_F0_AUDIO_ENABLED_INT_STATUS
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT5_AZALIA_F0_AUDIO_DISABLED_INT_STATUS
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT5_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE_MASK 0x00000100L
+
+
+// addressBlock: azf0endpoint6_endpointind
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY__SHIFT 0x14
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL_MASK 0x00000003L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY_MASK 0x00700000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE_MASK 0x000000FFL
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED__SHIFT 0x1
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA__SHIFT 0x2
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED_MASK 0x00000002L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA_MASK 0x00000004L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP_MASK 0x00000070L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE__SHIFT 0x6
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE_MASK 0x00000040L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION__SHIFT 0x11
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO__SHIFT 0x12
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL__SHIFT 0x18
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT__SHIFT 0x1b
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT__SHIFT 0x1f
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK 0x0000007FL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK 0x00010000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK 0x00020000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO_MASK 0x00FC0000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL_MASK 0x03000000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT_MASK 0x78000000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT_MASK 0x80000000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID_MASK 0xF0000000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC_MASK 0x000000FFL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC_MASK 0x0000FF00L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID_MASK 0x0000FFFFL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID_MASK 0xFFFF0000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN_MASK 0x000000FFL
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3__SHIFT 0x18
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0_MASK 0x000000FFL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2_MASK 0x00FF0000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3_MASK 0xFF000000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7__SHIFT 0x18
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4_MASK 0x000000FFL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6_MASK 0x00FF0000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7_MASK 0xFF000000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11__SHIFT 0x18
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8_MASK 0x000000FFL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10_MASK 0x00FF0000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11_MASK 0xFF000000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15__SHIFT 0x18
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12_MASK 0x000000FFL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14_MASK 0x00FF0000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15_MASK 0xFF000000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16_MASK 0x000000FFL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17_MASK 0x0000FF00L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE_MASK 0x00000001L
+//AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER__SHIFT 0x2
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE_MASK 0x00000003L
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER_MASK 0x0000003CL
+//AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN__SHIFT 0x2
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH__SHIFT 0x3
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN__SHIFT 0x7
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_MASK 0x00000003L
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN_MASK 0x00000004L
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_MASK 0x00000078L
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN_MASK 0x00000080L
+//AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x6
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_MASK 0x0000003FL
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000040L
+//AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x0000000FL
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000010L
+//AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A__SHIFT 0x5
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID__SHIFT 0x7
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF_MASK 0x0000000FL
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO_MASK 0x00000010L
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_MASK 0x00000060L
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID_MASK 0x00000080L
+//AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L_MASK 0x0000000FL
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R_MASK 0x000000F0L
+//AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+//AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4_MASK 0x0000000FL
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5_MASK 0x000000F0L
+//AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6_MASK 0x0000000FL
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7_MASK 0x000000F0L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE_MASK 0x00000001L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_LPIB
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE_MASK 0x000000FFL
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE__SHIFT 0x1
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE_MASK 0x00000002L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE_MASK 0x00FF0000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION_MASK 0x00000003L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY_MASK 0x00000010L
+//AZF0ENDPOINT6_AZALIA_F0_AUDIO_ENABLE_STATUS
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS_MASK 0x00000001L
+//AZF0ENDPOINT6_AZALIA_F0_AUDIO_ENABLED_INT_STATUS
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT6_AZALIA_F0_AUDIO_DISABLED_INT_STATUS
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT6_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE_MASK 0x00000100L
+
+
+// addressBlock: azf0endpoint7_endpointind
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY__SHIFT 0x14
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL_MASK 0x00000003L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY_MASK 0x00700000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE_MASK 0x000000FFL
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED__SHIFT 0x1
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA__SHIFT 0x2
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED_MASK 0x00000002L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA_MASK 0x00000004L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP_MASK 0x00000070L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE__SHIFT 0x6
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE_MASK 0x00000040L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION__SHIFT 0x11
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO__SHIFT 0x12
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL__SHIFT 0x18
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT__SHIFT 0x1b
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT__SHIFT 0x1f
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK 0x0000007FL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK 0x00010000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK 0x00020000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO_MASK 0x00FC0000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL_MASK 0x03000000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT_MASK 0x78000000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT_MASK 0x80000000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID_MASK 0xF0000000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC_MASK 0x000000FFL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC_MASK 0x0000FF00L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID_MASK 0x0000FFFFL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID_MASK 0xFFFF0000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN_MASK 0x000000FFL
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3__SHIFT 0x18
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0_MASK 0x000000FFL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2_MASK 0x00FF0000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3_MASK 0xFF000000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7__SHIFT 0x18
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4_MASK 0x000000FFL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6_MASK 0x00FF0000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7_MASK 0xFF000000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11__SHIFT 0x18
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8_MASK 0x000000FFL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10_MASK 0x00FF0000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11_MASK 0xFF000000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15__SHIFT 0x18
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12_MASK 0x000000FFL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14_MASK 0x00FF0000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15_MASK 0xFF000000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16_MASK 0x000000FFL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17_MASK 0x0000FF00L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE_MASK 0x00000001L
+//AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER__SHIFT 0x2
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE_MASK 0x00000003L
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER_MASK 0x0000003CL
+//AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN__SHIFT 0x2
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH__SHIFT 0x3
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN__SHIFT 0x7
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_MASK 0x00000003L
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN_MASK 0x00000004L
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_MASK 0x00000078L
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN_MASK 0x00000080L
+//AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x6
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_MASK 0x0000003FL
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000040L
+//AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x0000000FL
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000010L
+//AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A__SHIFT 0x5
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID__SHIFT 0x7
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF_MASK 0x0000000FL
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO_MASK 0x00000010L
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_MASK 0x00000060L
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID_MASK 0x00000080L
+//AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L_MASK 0x0000000FL
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R_MASK 0x000000F0L
+//AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+//AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4_MASK 0x0000000FL
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5_MASK 0x000000F0L
+//AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6_MASK 0x0000000FL
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7_MASK 0x000000F0L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE_MASK 0x00000001L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_LPIB
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE_MASK 0x000000FFL
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE__SHIFT 0x1
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE_MASK 0x00000002L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE_MASK 0x00FF0000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION_MASK 0x00000003L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY_MASK 0x00000010L
+//AZF0ENDPOINT7_AZALIA_F0_AUDIO_ENABLE_STATUS
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS_MASK 0x00000001L
+//AZF0ENDPOINT7_AZALIA_F0_AUDIO_ENABLED_INT_STATUS
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT7_AZALIA_F0_AUDIO_DISABLED_INT_STATUS
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT7_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE_MASK 0x00000100L
+
+
+// addressBlock: azf0inputendpoint0_inputendpointind
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT__SHIFT 0x1f
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT_MASK 0x80000000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE_MASK 0x00000020L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID_MASK 0xF0000000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION_MASK 0x000000FFL
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT__SHIFT 0x1
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT_MASK 0x00000006L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE_MASK 0x00000020L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5__SHIFT 0x10
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID__SHIFT 0x1f
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT_MASK 0x00000007L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5_MASK 0x00FF0000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID_MASK 0x80000000L
+
+
+// addressBlock: azf0inputendpoint1_inputendpointind
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT__SHIFT 0x1f
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT_MASK 0x80000000L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE_MASK 0x00000020L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID_MASK 0xF0000000L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION_MASK 0x000000FFL
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT__SHIFT 0x1
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_MASK 0x00000001L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT_MASK 0x00000006L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE_MASK 0x00000020L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5__SHIFT 0x10
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID__SHIFT 0x1f
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT_MASK 0x00000007L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5_MASK 0x00FF0000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID_MASK 0x80000000L
+
+
+// addressBlock: azf0inputendpoint2_inputendpointind
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT__SHIFT 0x1f
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT_MASK 0x80000000L
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE_MASK 0x00000020L
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID_MASK 0xF0000000L
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION_MASK 0x000000FFL
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT__SHIFT 0x1
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_MASK 0x00000001L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT_MASK 0x00000006L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE_MASK 0x00000020L
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5__SHIFT 0x10
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID__SHIFT 0x1f
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT_MASK 0x00000007L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5_MASK 0x00FF0000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID_MASK 0x80000000L
+
+
+// addressBlock: azf0inputendpoint3_inputendpointind
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT__SHIFT 0x1f
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT_MASK 0x80000000L
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE_MASK 0x00000020L
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID_MASK 0xF0000000L
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION_MASK 0x000000FFL
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT__SHIFT 0x1
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_MASK 0x00000001L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT_MASK 0x00000006L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE_MASK 0x00000020L
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5__SHIFT 0x10
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID__SHIFT 0x1f
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT_MASK 0x00000007L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5_MASK 0x00FF0000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID_MASK 0x80000000L
+
+
+// addressBlock: azf0inputendpoint4_inputendpointind
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT__SHIFT 0x1f
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT_MASK 0x80000000L
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE_MASK 0x00000020L
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID_MASK 0xF0000000L
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION_MASK 0x000000FFL
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT__SHIFT 0x1
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_MASK 0x00000001L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT_MASK 0x00000006L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE_MASK 0x00000020L
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5__SHIFT 0x10
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID__SHIFT 0x1f
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT_MASK 0x00000007L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5_MASK 0x00FF0000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID_MASK 0x80000000L
+
+
+// addressBlock: azf0inputendpoint5_inputendpointind
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT__SHIFT 0x1f
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT_MASK 0x80000000L
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE_MASK 0x00000020L
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID_MASK 0xF0000000L
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION_MASK 0x000000FFL
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT__SHIFT 0x1
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_MASK 0x00000001L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT_MASK 0x00000006L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE_MASK 0x00000020L
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5__SHIFT 0x10
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID__SHIFT 0x1f
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT_MASK 0x00000007L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5_MASK 0x00FF0000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID_MASK 0x80000000L
+
+
+// addressBlock: azf0inputendpoint6_inputendpointind
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT__SHIFT 0x1f
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT_MASK 0x80000000L
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE_MASK 0x00000020L
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID_MASK 0xF0000000L
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION_MASK 0x000000FFL
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT__SHIFT 0x1
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_MASK 0x00000001L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT_MASK 0x00000006L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE_MASK 0x00000020L
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5__SHIFT 0x10
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID__SHIFT 0x1f
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT_MASK 0x00000007L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5_MASK 0x00FF0000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID_MASK 0x80000000L
+
+
+// addressBlock: azf0inputendpoint7_inputendpointind
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT__SHIFT 0x1f
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT_MASK 0x80000000L
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE_MASK 0x00000020L
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID_MASK 0xF0000000L
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION_MASK 0x000000FFL
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT__SHIFT 0x1
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_MASK 0x00000001L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT_MASK 0x00000006L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE_MASK 0x00000020L
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5__SHIFT 0x10
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID__SHIFT 0x1f
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT_MASK 0x00000007L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5_MASK 0x00FF0000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID_MASK 0x80000000L
+
+//MPC_OCSC_TEST_DEBUG_INDEX
+#define MPC_OCSC_TEST_DEBUG_INDEX__MPC_OCSC_TEST_DEBUG_INDEX__SHIFT 0x0
+#define MPC_OCSC_TEST_DEBUG_INDEX__MPC_OCSC_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define MPC_OCSC_TEST_DEBUG_INDEX__MPC_OCSC_TEST_DEBUG_INDEX_MASK 0x000000FFL
+#define MPC_OCSC_TEST_DEBUG_INDEX__MPC_OCSC_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+//MPC_OCSC_TEST_DEBUG_DATA
+#define MPC_OCSC_TEST_DEBUG_DATA__MPC_OCSC_TEST_DEBUG_DATA__SHIFT 0x0
+#define MPC_OCSC_TEST_DEBUG_DATA__MPC_OCSC_TEST_DEBUG_DATA_MASK 0xFFFFFFFFL
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h
index 6efcaa93e17b..bb2c9c7a18df 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h
@@ -27,6 +27,9 @@
#define mmDF_PIE_AON0_DfGlobalClkGater 0x00fc
#define mmDF_PIE_AON0_DfGlobalClkGater_BASE_IDX 0
+#define mmDF_CS_UMC_AON0_DfGlobalCtrl 0x00fe
+#define mmDF_CS_UMC_AON0_DfGlobalCtrl_BASE_IDX 0
+
#define mmDF_CS_UMC_AON0_DramBaseAddress0 0x0044
#define mmDF_CS_UMC_AON0_DramBaseAddress0_BASE_IDX 0
@@ -38,6 +41,14 @@
#define smnPerfMonCtlHi2 0x01d464UL
#define smnPerfMonCtlLo3 0x01d470UL
#define smnPerfMonCtlHi3 0x01d474UL
+#define smnPerfMonCtlLo4 0x01d880UL
+#define smnPerfMonCtlHi4 0x01d884UL
+#define smnPerfMonCtlLo5 0x01d888UL
+#define smnPerfMonCtlHi5 0x01d88cUL
+#define smnPerfMonCtlLo6 0x01d890UL
+#define smnPerfMonCtlHi6 0x01d894UL
+#define smnPerfMonCtlLo7 0x01d898UL
+#define smnPerfMonCtlHi7 0x01d89cUL
#define smnPerfMonCtrLo0 0x01d448UL
#define smnPerfMonCtrHi0 0x01d44cUL
@@ -47,5 +58,20 @@
#define smnPerfMonCtrHi2 0x01d46cUL
#define smnPerfMonCtrLo3 0x01d478UL
#define smnPerfMonCtrHi3 0x01d47cUL
+#define smnPerfMonCtrLo4 0x01d790UL
+#define smnPerfMonCtrHi4 0x01d794UL
+#define smnPerfMonCtrLo5 0x01d798UL
+#define smnPerfMonCtrHi5 0x01d79cUL
+#define smnPerfMonCtrLo6 0x01d7a0UL
+#define smnPerfMonCtrHi6 0x01d7a4UL
+#define smnPerfMonCtrLo7 0x01d7a8UL
+#define smnPerfMonCtrHi7 0x01d7acUL
+
+#define smnDF_PIE_AON_FabricIndirectConfigAccessAddress3 0x1d05cUL
+#define smnDF_PIE_AON_FabricIndirectConfigAccessDataLo3 0x1d098UL
+#define smnDF_PIE_AON_FabricIndirectConfigAccessDataHi3 0x1d09cUL
+
+#define smnDF_CS_UMC_AON0_DramBaseAddress0 0x1c110UL
+#define smnDF_CS_UMC_AON0_DramLimitAddress0 0x1c114UL
#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h
index 06fac509e987..7afa87c7ff54 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h
@@ -33,6 +33,14 @@
#define DF_PIE_AON0_DfGlobalClkGater__MGCGMode__SHIFT 0x0
#define DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK 0x0000000FL
+/* DF_CS_UMC_AON0_DfGlobalCtrl */
+#define DF_CS_UMC_AON0_DfGlobalCtrl__GlbHashIntlvCtl64K__SHIFT 0x14
+#define DF_CS_UMC_AON0_DfGlobalCtrl__GlbHashIntlvCtl2M__SHIFT 0x15
+#define DF_CS_UMC_AON0_DfGlobalCtrl__GlbHashIntlvCtl1G__SHIFT 0x16
+#define DF_CS_UMC_AON0_DfGlobalCtrl__GlbHashIntlvCtl64K_MASK 0x00100000L
+#define DF_CS_UMC_AON0_DfGlobalCtrl__GlbHashIntlvCtl2M_MASK 0x00200000L
+#define DF_CS_UMC_AON0_DfGlobalCtrl__GlbHashIntlvCtl1G_MASK 0x00400000L
+
/* DF_CS_AON0_DramBaseAddress0 */
#define DF_CS_UMC_AON0_DramBaseAddress0__AddrRngVal__SHIFT 0x0
#define DF_CS_UMC_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT 0x1
@@ -45,4 +53,12 @@
#define DF_CS_UMC_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000E00L
#define DF_CS_UMC_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L
+//DF_CS_UMC_AON0_DramLimitAddress0
+#define DF_CS_UMC_AON0_DramLimitAddress0__DstFabricID__SHIFT 0x0
+#define DF_CS_UMC_AON0_DramLimitAddress0__AllowReqIO__SHIFT 0xa
+#define DF_CS_UMC_AON0_DramLimitAddress0__DramLimitAddr__SHIFT 0xc
+#define DF_CS_UMC_AON0_DramLimitAddress0__DstFabricID_MASK 0x000003FFL
+#define DF_CS_UMC_AON0_DramLimitAddress0__AllowReqIO_MASK 0x00000400L
+#define DF_CS_UMC_AON0_DramLimitAddress0__DramLimitAddr_MASK 0xFFFFF000L
+
#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_0_offset.h
new file mode 100644
index 000000000000..36ae5b7fe88e
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_0_offset.h
@@ -0,0 +1,647 @@
+/*
+ * Copyright (C) 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _dpcs_2_0_0_OFFSET_HEADER
+#define _dpcs_2_0_0_OFFSET_HEADER
+
+
+
+// addressBlock: dpcssys_dpcs0_dpcstx0_dispdec
+// base address: 0x0
+#define mmDPCSTX0_DPCSTX_TX_CLOCK_CNTL 0x2928
+#define mmDPCSTX0_DPCSTX_TX_CLOCK_CNTL_BASE_IDX 2
+#define mmDPCSTX0_DPCSTX_TX_CNTL 0x2929
+#define mmDPCSTX0_DPCSTX_TX_CNTL_BASE_IDX 2
+#define mmDPCSTX0_DPCSTX_CBUS_CNTL 0x292a
+#define mmDPCSTX0_DPCSTX_CBUS_CNTL_BASE_IDX 2
+#define mmDPCSTX0_DPCSTX_INTERRUPT_CNTL 0x292b
+#define mmDPCSTX0_DPCSTX_INTERRUPT_CNTL_BASE_IDX 2
+#define mmDPCSTX0_DPCSTX_PLL_UPDATE_ADDR 0x292c
+#define mmDPCSTX0_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX 2
+#define mmDPCSTX0_DPCSTX_PLL_UPDATE_DATA 0x292d
+#define mmDPCSTX0_DPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+#define mmDPCSTX0_DPCSTX_DEBUG_CONFIG 0x292e
+#define mmDPCSTX0_DPCSTX_DEBUG_CONFIG_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcs0_rdpcstx0_dispdec
+// base address: 0x0
+#define mmRDPCSTX0_RDPCSTX_CNTL 0x2930
+#define mmRDPCSTX0_RDPCSTX_CNTL_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_CLOCK_CNTL 0x2931
+#define mmRDPCSTX0_RDPCSTX_CLOCK_CNTL_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_INTERRUPT_CONTROL 0x2932
+#define mmRDPCSTX0_RDPCSTX_INTERRUPT_CONTROL_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PLL_UPDATE_DATA 0x2933
+#define mmRDPCSTX0_RDPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+#define mmRDPCSTX0_RDPCS_TX_CR_ADDR 0x2934
+#define mmRDPCSTX0_RDPCS_TX_CR_ADDR_BASE_IDX 2
+#define mmRDPCSTX0_RDPCS_TX_CR_DATA 0x2935
+#define mmRDPCSTX0_RDPCS_TX_CR_DATA_BASE_IDX 2
+#define mmRDPCSTX0_RDPCS_TX_SRAM_CNTL 0x2936
+#define mmRDPCSTX0_RDPCS_TX_SRAM_CNTL_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_MEM_POWER_CTRL 0x2937
+#define mmRDPCSTX0_RDPCSTX_MEM_POWER_CTRL_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_MEM_POWER_CTRL2 0x2938
+#define mmRDPCSTX0_RDPCSTX_MEM_POWER_CTRL2_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_SCRATCH 0x2939
+#define mmRDPCSTX0_RDPCSTX_SCRATCH_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x293c
+#define mmRDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_DEBUG_CONFIG 0x293d
+#define mmRDPCSTX0_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL0 0x2940
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL0_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL1 0x2941
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL1_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL2 0x2942
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL2_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL3 0x2943
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL3_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL4 0x2944
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL4_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL5 0x2945
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL5_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL6 0x2946
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL6_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL7 0x2947
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL7_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL8 0x2948
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL8_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL9 0x2949
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL9_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL10 0x294a
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL10_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL11 0x294b
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL11_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL12 0x294c
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL12_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL13 0x294d
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL13_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL14 0x294e
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL14_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_FUSE0 0x294f
+#define mmRDPCSTX0_RDPCSTX_PHY_FUSE0_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_FUSE1 0x2950
+#define mmRDPCSTX0_RDPCSTX_PHY_FUSE1_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_FUSE2 0x2951
+#define mmRDPCSTX0_RDPCSTX_PHY_FUSE2_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_FUSE3 0x2952
+#define mmRDPCSTX0_RDPCSTX_PHY_FUSE3_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_RX_LD_VAL 0x2953
+#define mmRDPCSTX0_RDPCSTX_PHY_RX_LD_VAL_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3 0x2954
+#define mmRDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6 0x2955
+#define mmRDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_DPALT_CONTROL_REG 0x2956
+#define mmRDPCSTX0_RDPCSTX_DPALT_CONTROL_REG_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcssys_cr0_dispdec
+// base address: 0x0
+#define mmDPCSSYS_CR0_DPCSSYS_CR_ADDR 0x2934
+#define mmDPCSSYS_CR0_DPCSSYS_CR_ADDR_BASE_IDX 2
+#define mmDPCSSYS_CR0_DPCSSYS_CR_DATA 0x2935
+#define mmDPCSSYS_CR0_DPCSSYS_CR_DATA_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcs0_dpcstx1_dispdec
+// base address: 0x360
+#define mmDPCSTX1_DPCSTX_TX_CLOCK_CNTL 0x2a00
+#define mmDPCSTX1_DPCSTX_TX_CLOCK_CNTL_BASE_IDX 2
+#define mmDPCSTX1_DPCSTX_TX_CNTL 0x2a01
+#define mmDPCSTX1_DPCSTX_TX_CNTL_BASE_IDX 2
+#define mmDPCSTX1_DPCSTX_CBUS_CNTL 0x2a02
+#define mmDPCSTX1_DPCSTX_CBUS_CNTL_BASE_IDX 2
+#define mmDPCSTX1_DPCSTX_INTERRUPT_CNTL 0x2a03
+#define mmDPCSTX1_DPCSTX_INTERRUPT_CNTL_BASE_IDX 2
+#define mmDPCSTX1_DPCSTX_PLL_UPDATE_ADDR 0x2a04
+#define mmDPCSTX1_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX 2
+#define mmDPCSTX1_DPCSTX_PLL_UPDATE_DATA 0x2a05
+#define mmDPCSTX1_DPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+#define mmDPCSTX1_DPCSTX_DEBUG_CONFIG 0x2a06
+#define mmDPCSTX1_DPCSTX_DEBUG_CONFIG_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcs0_rdpcstx1_dispdec
+// base address: 0x360
+#define mmRDPCSTX1_RDPCSTX_CNTL 0x2a08
+#define mmRDPCSTX1_RDPCSTX_CNTL_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_CLOCK_CNTL 0x2a09
+#define mmRDPCSTX1_RDPCSTX_CLOCK_CNTL_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_INTERRUPT_CONTROL 0x2a0a
+#define mmRDPCSTX1_RDPCSTX_INTERRUPT_CONTROL_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PLL_UPDATE_DATA 0x2a0b
+#define mmRDPCSTX1_RDPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+#define mmRDPCSTX1_RDPCS_TX_CR_ADDR 0x2a0c
+#define mmRDPCSTX1_RDPCS_TX_CR_ADDR_BASE_IDX 2
+#define mmRDPCSTX1_RDPCS_TX_CR_DATA 0x2a0d
+#define mmRDPCSTX1_RDPCS_TX_CR_DATA_BASE_IDX 2
+#define mmRDPCSTX1_RDPCS_TX_SRAM_CNTL 0x2a0e
+#define mmRDPCSTX1_RDPCS_TX_SRAM_CNTL_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_MEM_POWER_CTRL 0x2a0f
+#define mmRDPCSTX1_RDPCSTX_MEM_POWER_CTRL_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_MEM_POWER_CTRL2 0x2a10
+#define mmRDPCSTX1_RDPCSTX_MEM_POWER_CTRL2_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_SCRATCH 0x2a11
+#define mmRDPCSTX1_RDPCSTX_SCRATCH_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x2a14
+#define mmRDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_DEBUG_CONFIG 0x2a15
+#define mmRDPCSTX1_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL0 0x2a18
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL0_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL1 0x2a19
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL1_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL2 0x2a1a
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL2_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL3 0x2a1b
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL3_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL4 0x2a1c
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL4_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL5 0x2a1d
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL5_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL6 0x2a1e
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL6_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL7 0x2a1f
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL7_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL8 0x2a20
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL8_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL9 0x2a21
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL9_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL10 0x2a22
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL10_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL11 0x2a23
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL11_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL12 0x2a24
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL12_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL13 0x2a25
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL13_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL14 0x2a26
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL14_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_FUSE0 0x2a27
+#define mmRDPCSTX1_RDPCSTX_PHY_FUSE0_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_FUSE1 0x2a28
+#define mmRDPCSTX1_RDPCSTX_PHY_FUSE1_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_FUSE2 0x2a29
+#define mmRDPCSTX1_RDPCSTX_PHY_FUSE2_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_FUSE3 0x2a2a
+#define mmRDPCSTX1_RDPCSTX_PHY_FUSE3_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_RX_LD_VAL 0x2a2b
+#define mmRDPCSTX1_RDPCSTX_PHY_RX_LD_VAL_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3 0x2a2c
+#define mmRDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6 0x2a2d
+#define mmRDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_DPALT_CONTROL_REG 0x2a2e
+#define mmRDPCSTX1_RDPCSTX_DPALT_CONTROL_REG_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcssys_cr1_dispdec
+// base address: 0x360
+#define mmDPCSSYS_CR1_DPCSSYS_CR_ADDR 0x2a0c
+#define mmDPCSSYS_CR1_DPCSSYS_CR_ADDR_BASE_IDX 2
+#define mmDPCSSYS_CR1_DPCSSYS_CR_DATA 0x2a0d
+#define mmDPCSSYS_CR1_DPCSSYS_CR_DATA_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcs0_dpcstx2_dispdec
+// base address: 0x6c0
+#define mmDPCSTX2_DPCSTX_TX_CLOCK_CNTL 0x2ad8
+#define mmDPCSTX2_DPCSTX_TX_CLOCK_CNTL_BASE_IDX 2
+#define mmDPCSTX2_DPCSTX_TX_CNTL 0x2ad9
+#define mmDPCSTX2_DPCSTX_TX_CNTL_BASE_IDX 2
+#define mmDPCSTX2_DPCSTX_CBUS_CNTL 0x2ada
+#define mmDPCSTX2_DPCSTX_CBUS_CNTL_BASE_IDX 2
+#define mmDPCSTX2_DPCSTX_INTERRUPT_CNTL 0x2adb
+#define mmDPCSTX2_DPCSTX_INTERRUPT_CNTL_BASE_IDX 2
+#define mmDPCSTX2_DPCSTX_PLL_UPDATE_ADDR 0x2adc
+#define mmDPCSTX2_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX 2
+#define mmDPCSTX2_DPCSTX_PLL_UPDATE_DATA 0x2add
+#define mmDPCSTX2_DPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+#define mmDPCSTX2_DPCSTX_DEBUG_CONFIG 0x2ade
+#define mmDPCSTX2_DPCSTX_DEBUG_CONFIG_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcs0_rdpcstx2_dispdec
+// base address: 0x6c0
+#define mmRDPCSTX2_RDPCSTX_CNTL 0x2ae0
+#define mmRDPCSTX2_RDPCSTX_CNTL_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_CLOCK_CNTL 0x2ae1
+#define mmRDPCSTX2_RDPCSTX_CLOCK_CNTL_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_INTERRUPT_CONTROL 0x2ae2
+#define mmRDPCSTX2_RDPCSTX_INTERRUPT_CONTROL_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PLL_UPDATE_DATA 0x2ae3
+#define mmRDPCSTX2_RDPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+#define mmRDPCSTX2_RDPCS_TX_CR_ADDR 0x2ae4
+#define mmRDPCSTX2_RDPCS_TX_CR_ADDR_BASE_IDX 2
+#define mmRDPCSTX2_RDPCS_TX_CR_DATA 0x2ae5
+#define mmRDPCSTX2_RDPCS_TX_CR_DATA_BASE_IDX 2
+#define mmRDPCSTX2_RDPCS_TX_SRAM_CNTL 0x2ae6
+#define mmRDPCSTX2_RDPCS_TX_SRAM_CNTL_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_MEM_POWER_CTRL 0x2ae7
+#define mmRDPCSTX2_RDPCSTX_MEM_POWER_CTRL_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_MEM_POWER_CTRL2 0x2ae8
+#define mmRDPCSTX2_RDPCSTX_MEM_POWER_CTRL2_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_SCRATCH 0x2ae9
+#define mmRDPCSTX2_RDPCSTX_SCRATCH_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x2aec
+#define mmRDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_DEBUG_CONFIG 0x2aed
+#define mmRDPCSTX2_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL0 0x2af0
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL0_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL1 0x2af1
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL1_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL2 0x2af2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL2_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL3 0x2af3
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL3_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL4 0x2af4
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL4_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL5 0x2af5
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL5_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL6 0x2af6
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL6_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL7 0x2af7
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL7_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL8 0x2af8
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL8_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL9 0x2af9
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL9_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL10 0x2afa
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL10_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL11 0x2afb
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL11_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL12 0x2afc
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL12_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL13 0x2afd
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL13_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL14 0x2afe
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL14_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_FUSE0 0x2aff
+#define mmRDPCSTX2_RDPCSTX_PHY_FUSE0_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_FUSE1 0x2b00
+#define mmRDPCSTX2_RDPCSTX_PHY_FUSE1_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_FUSE2 0x2b01
+#define mmRDPCSTX2_RDPCSTX_PHY_FUSE2_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_FUSE3 0x2b02
+#define mmRDPCSTX2_RDPCSTX_PHY_FUSE3_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_RX_LD_VAL 0x2b03
+#define mmRDPCSTX2_RDPCSTX_PHY_RX_LD_VAL_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3 0x2b04
+#define mmRDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6 0x2b05
+#define mmRDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_DPALT_CONTROL_REG 0x2b06
+#define mmRDPCSTX2_RDPCSTX_DPALT_CONTROL_REG_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcssys_cr2_dispdec
+// base address: 0x6c0
+#define mmDPCSSYS_CR2_DPCSSYS_CR_ADDR 0x2ae4
+#define mmDPCSSYS_CR2_DPCSSYS_CR_ADDR_BASE_IDX 2
+#define mmDPCSSYS_CR2_DPCSSYS_CR_DATA 0x2ae5
+#define mmDPCSSYS_CR2_DPCSSYS_CR_DATA_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcs0_dpcstx3_dispdec
+// base address: 0xa20
+#define mmDPCSTX3_DPCSTX_TX_CLOCK_CNTL 0x2bb0
+#define mmDPCSTX3_DPCSTX_TX_CLOCK_CNTL_BASE_IDX 2
+#define mmDPCSTX3_DPCSTX_TX_CNTL 0x2bb1
+#define mmDPCSTX3_DPCSTX_TX_CNTL_BASE_IDX 2
+#define mmDPCSTX3_DPCSTX_CBUS_CNTL 0x2bb2
+#define mmDPCSTX3_DPCSTX_CBUS_CNTL_BASE_IDX 2
+#define mmDPCSTX3_DPCSTX_INTERRUPT_CNTL 0x2bb3
+#define mmDPCSTX3_DPCSTX_INTERRUPT_CNTL_BASE_IDX 2
+#define mmDPCSTX3_DPCSTX_PLL_UPDATE_ADDR 0x2bb4
+#define mmDPCSTX3_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX 2
+#define mmDPCSTX3_DPCSTX_PLL_UPDATE_DATA 0x2bb5
+#define mmDPCSTX3_DPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+#define mmDPCSTX3_DPCSTX_DEBUG_CONFIG 0x2bb6
+#define mmDPCSTX3_DPCSTX_DEBUG_CONFIG_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcs0_rdpcstx3_dispdec
+// base address: 0xa20
+#define mmRDPCSTX3_RDPCSTX_CNTL 0x2bb8
+#define mmRDPCSTX3_RDPCSTX_CNTL_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_CLOCK_CNTL 0x2bb9
+#define mmRDPCSTX3_RDPCSTX_CLOCK_CNTL_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_INTERRUPT_CONTROL 0x2bba
+#define mmRDPCSTX3_RDPCSTX_INTERRUPT_CONTROL_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PLL_UPDATE_DATA 0x2bbb
+#define mmRDPCSTX3_RDPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+#define mmRDPCSTX3_RDPCS_TX_CR_ADDR 0x2bbc
+#define mmRDPCSTX3_RDPCS_TX_CR_ADDR_BASE_IDX 2
+#define mmRDPCSTX3_RDPCS_TX_CR_DATA 0x2bbd
+#define mmRDPCSTX3_RDPCS_TX_CR_DATA_BASE_IDX 2
+#define mmRDPCSTX3_RDPCS_TX_SRAM_CNTL 0x2bbe
+#define mmRDPCSTX3_RDPCS_TX_SRAM_CNTL_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_MEM_POWER_CTRL 0x2bbf
+#define mmRDPCSTX3_RDPCSTX_MEM_POWER_CTRL_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_MEM_POWER_CTRL2 0x2bc0
+#define mmRDPCSTX3_RDPCSTX_MEM_POWER_CTRL2_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_SCRATCH 0x2bc1
+#define mmRDPCSTX3_RDPCSTX_SCRATCH_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x2bc4
+#define mmRDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_DEBUG_CONFIG 0x2bc5
+#define mmRDPCSTX3_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL0 0x2bc8
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL0_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL1 0x2bc9
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL1_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL2 0x2bca
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL2_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL3 0x2bcb
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL3_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL4 0x2bcc
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL4_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL5 0x2bcd
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL5_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL6 0x2bce
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL6_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL7 0x2bcf
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL7_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL8 0x2bd0
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL8_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL9 0x2bd1
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL9_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL10 0x2bd2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL10_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL11 0x2bd3
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL11_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL12 0x2bd4
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL12_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL13 0x2bd5
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL13_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL14 0x2bd6
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL14_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_FUSE0 0x2bd7
+#define mmRDPCSTX3_RDPCSTX_PHY_FUSE0_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_FUSE1 0x2bd8
+#define mmRDPCSTX3_RDPCSTX_PHY_FUSE1_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_FUSE2 0x2bd9
+#define mmRDPCSTX3_RDPCSTX_PHY_FUSE2_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_FUSE3 0x2bda
+#define mmRDPCSTX3_RDPCSTX_PHY_FUSE3_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_RX_LD_VAL 0x2bdb
+#define mmRDPCSTX3_RDPCSTX_PHY_RX_LD_VAL_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3 0x2bdc
+#define mmRDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6 0x2bdd
+#define mmRDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_DPALT_CONTROL_REG 0x2bde
+#define mmRDPCSTX3_RDPCSTX_DPALT_CONTROL_REG_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcssys_cr3_dispdec
+// base address: 0xa20
+#define mmDPCSSYS_CR3_DPCSSYS_CR_ADDR 0x2bbc
+#define mmDPCSSYS_CR3_DPCSSYS_CR_ADDR_BASE_IDX 2
+#define mmDPCSSYS_CR3_DPCSSYS_CR_DATA 0x2bbd
+#define mmDPCSSYS_CR3_DPCSSYS_CR_DATA_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcs0_dpcsrx_dispdec
+// base address: 0x0
+#define mmDPCSRX_PHY_CNTL 0x2c76
+#define mmDPCSRX_PHY_CNTL_BASE_IDX 2
+#define mmDPCSRX_RX_CLOCK_CNTL 0x2c78
+#define mmDPCSRX_RX_CLOCK_CNTL_BASE_IDX 2
+#define mmDPCSRX_RX_CNTL 0x2c7a
+#define mmDPCSRX_RX_CNTL_BASE_IDX 2
+#define mmDPCSRX_CBUS_CNTL 0x2c7b
+#define mmDPCSRX_CBUS_CNTL_BASE_IDX 2
+#define mmDPCSRX_REG_ERROR_STATUS 0x2c7c
+#define mmDPCSRX_REG_ERROR_STATUS_BASE_IDX 2
+#define mmDPCSRX_RX_ERROR_STATUS 0x2c7d
+#define mmDPCSRX_RX_ERROR_STATUS_BASE_IDX 2
+#define mmDPCSRX_INDEX_MODE_ADDR 0x2c80
+#define mmDPCSRX_INDEX_MODE_ADDR_BASE_IDX 2
+#define mmDPCSRX_INDEX_MODE_DATA 0x2c81
+#define mmDPCSRX_INDEX_MODE_DATA_BASE_IDX 2
+#define mmDPCSRX_DEBUG_CONFIG 0x2c82
+#define mmDPCSRX_DEBUG_CONFIG_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcs0_dpcstx4_dispdec
+// base address: 0xd80
+#define mmDPCSTX4_DPCSTX_TX_CLOCK_CNTL 0x2c88
+#define mmDPCSTX4_DPCSTX_TX_CLOCK_CNTL_BASE_IDX 2
+#define mmDPCSTX4_DPCSTX_TX_CNTL 0x2c89
+#define mmDPCSTX4_DPCSTX_TX_CNTL_BASE_IDX 2
+#define mmDPCSTX4_DPCSTX_CBUS_CNTL 0x2c8a
+#define mmDPCSTX4_DPCSTX_CBUS_CNTL_BASE_IDX 2
+#define mmDPCSTX4_DPCSTX_INTERRUPT_CNTL 0x2c8b
+#define mmDPCSTX4_DPCSTX_INTERRUPT_CNTL_BASE_IDX 2
+#define mmDPCSTX4_DPCSTX_PLL_UPDATE_ADDR 0x2c8c
+#define mmDPCSTX4_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX 2
+#define mmDPCSTX4_DPCSTX_PLL_UPDATE_DATA 0x2c8d
+#define mmDPCSTX4_DPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+#define mmDPCSTX4_DPCSTX_DEBUG_CONFIG 0x2c8e
+#define mmDPCSTX4_DPCSTX_DEBUG_CONFIG_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcs0_rdpcstx4_dispdec
+// base address: 0xd80
+#define mmRDPCSTX4_RDPCSTX_CNTL 0x2c90
+#define mmRDPCSTX4_RDPCSTX_CNTL_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_CLOCK_CNTL 0x2c91
+#define mmRDPCSTX4_RDPCSTX_CLOCK_CNTL_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_INTERRUPT_CONTROL 0x2c92
+#define mmRDPCSTX4_RDPCSTX_INTERRUPT_CONTROL_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PLL_UPDATE_DATA 0x2c93
+#define mmRDPCSTX4_RDPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+#define mmRDPCSTX4_RDPCS_TX_CR_ADDR 0x2c94
+#define mmRDPCSTX4_RDPCS_TX_CR_ADDR_BASE_IDX 2
+#define mmRDPCSTX4_RDPCS_TX_CR_DATA 0x2c95
+#define mmRDPCSTX4_RDPCS_TX_CR_DATA_BASE_IDX 2
+#define mmRDPCSTX4_RDPCS_TX_SRAM_CNTL 0x2c96
+#define mmRDPCSTX4_RDPCS_TX_SRAM_CNTL_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_MEM_POWER_CTRL 0x2c97
+#define mmRDPCSTX4_RDPCSTX_MEM_POWER_CTRL_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_MEM_POWER_CTRL2 0x2c98
+#define mmRDPCSTX4_RDPCSTX_MEM_POWER_CTRL2_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_SCRATCH 0x2c99
+#define mmRDPCSTX4_RDPCSTX_SCRATCH_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x2c9c
+#define mmRDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_DEBUG_CONFIG 0x2c9d
+#define mmRDPCSTX4_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL0 0x2ca0
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL0_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL1 0x2ca1
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL1_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL2 0x2ca2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL2_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL3 0x2ca3
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL3_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL4 0x2ca4
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL4_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL5 0x2ca5
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL5_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL6 0x2ca6
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL6_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL7 0x2ca7
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL7_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL8 0x2ca8
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL8_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL9 0x2ca9
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL9_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL10 0x2caa
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL10_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL11 0x2cab
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL11_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL12 0x2cac
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL12_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL13 0x2cad
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL13_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL14 0x2cae
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL14_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_FUSE0 0x2caf
+#define mmRDPCSTX4_RDPCSTX_PHY_FUSE0_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_FUSE1 0x2cb0
+#define mmRDPCSTX4_RDPCSTX_PHY_FUSE1_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_FUSE2 0x2cb1
+#define mmRDPCSTX4_RDPCSTX_PHY_FUSE2_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_FUSE3 0x2cb2
+#define mmRDPCSTX4_RDPCSTX_PHY_FUSE3_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_RX_LD_VAL 0x2cb3
+#define mmRDPCSTX4_RDPCSTX_PHY_RX_LD_VAL_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3 0x2cb4
+#define mmRDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6 0x2cb5
+#define mmRDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_DPALT_CONTROL_REG 0x2cb6
+#define mmRDPCSTX4_RDPCSTX_DPALT_CONTROL_REG_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcssys_cr4_dispdec
+// base address: 0xd80
+#define mmDPCSSYS_CR4_DPCSSYS_CR_ADDR 0x2c94
+#define mmDPCSSYS_CR4_DPCSSYS_CR_ADDR_BASE_IDX 2
+#define mmDPCSSYS_CR4_DPCSSYS_CR_DATA 0x2c95
+#define mmDPCSSYS_CR4_DPCSSYS_CR_DATA_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcs0_dpcstx5_dispdec
+// base address: 0x10e0
+#define mmDPCSTX5_DPCSTX_TX_CLOCK_CNTL 0x2d60
+#define mmDPCSTX5_DPCSTX_TX_CLOCK_CNTL_BASE_IDX 2
+#define mmDPCSTX5_DPCSTX_TX_CNTL 0x2d61
+#define mmDPCSTX5_DPCSTX_TX_CNTL_BASE_IDX 2
+#define mmDPCSTX5_DPCSTX_CBUS_CNTL 0x2d62
+#define mmDPCSTX5_DPCSTX_CBUS_CNTL_BASE_IDX 2
+#define mmDPCSTX5_DPCSTX_INTERRUPT_CNTL 0x2d63
+#define mmDPCSTX5_DPCSTX_INTERRUPT_CNTL_BASE_IDX 2
+#define mmDPCSTX5_DPCSTX_PLL_UPDATE_ADDR 0x2d64
+#define mmDPCSTX5_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX 2
+#define mmDPCSTX5_DPCSTX_PLL_UPDATE_DATA 0x2d65
+#define mmDPCSTX5_DPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+#define mmDPCSTX5_DPCSTX_DEBUG_CONFIG 0x2d66
+#define mmDPCSTX5_DPCSTX_DEBUG_CONFIG_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcs0_rdpcstx5_dispdec
+// base address: 0x10e0
+#define mmRDPCSTX5_RDPCSTX_CNTL 0x2d68
+#define mmRDPCSTX5_RDPCSTX_CNTL_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_CLOCK_CNTL 0x2d69
+#define mmRDPCSTX5_RDPCSTX_CLOCK_CNTL_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_INTERRUPT_CONTROL 0x2d6a
+#define mmRDPCSTX5_RDPCSTX_INTERRUPT_CONTROL_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PLL_UPDATE_DATA 0x2d6b
+#define mmRDPCSTX5_RDPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+#define mmRDPCSTX5_RDPCS_TX_CR_ADDR 0x2d6c
+#define mmRDPCSTX5_RDPCS_TX_CR_ADDR_BASE_IDX 2
+#define mmRDPCSTX5_RDPCS_TX_CR_DATA 0x2d6d
+#define mmRDPCSTX5_RDPCS_TX_CR_DATA_BASE_IDX 2
+#define mmRDPCSTX5_RDPCS_TX_SRAM_CNTL 0x2d6e
+#define mmRDPCSTX5_RDPCS_TX_SRAM_CNTL_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_MEM_POWER_CTRL 0x2d6f
+#define mmRDPCSTX5_RDPCSTX_MEM_POWER_CTRL_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_MEM_POWER_CTRL2 0x2d70
+#define mmRDPCSTX5_RDPCSTX_MEM_POWER_CTRL2_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_SCRATCH 0x2d71
+#define mmRDPCSTX5_RDPCSTX_SCRATCH_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x2d74
+#define mmRDPCSTX5_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_DEBUG_CONFIG 0x2d75
+#define mmRDPCSTX5_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL0 0x2d78
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL0_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL1 0x2d79
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL1_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL2 0x2d7a
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL2_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL3 0x2d7b
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL3_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL4 0x2d7c
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL4_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL5 0x2d7d
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL5_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL6 0x2d7e
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL6_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL7 0x2d7f
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL7_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL8 0x2d80
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL8_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL9 0x2d81
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL9_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL10 0x2d82
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL10_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL11 0x2d83
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL11_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL12 0x2d84
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL12_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL13 0x2d85
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL13_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL14 0x2d86
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL14_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PHY_FUSE0 0x2d87
+#define mmRDPCSTX5_RDPCSTX_PHY_FUSE0_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PHY_FUSE1 0x2d88
+#define mmRDPCSTX5_RDPCSTX_PHY_FUSE1_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PHY_FUSE2 0x2d89
+#define mmRDPCSTX5_RDPCSTX_PHY_FUSE2_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PHY_FUSE3 0x2d8a
+#define mmRDPCSTX5_RDPCSTX_PHY_FUSE3_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PHY_RX_LD_VAL 0x2d8b
+#define mmRDPCSTX5_RDPCSTX_PHY_RX_LD_VAL_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3 0x2d8c
+#define mmRDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6 0x2d8d
+#define mmRDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_DPALT_CONTROL_REG 0x2d8e
+#define mmRDPCSTX5_RDPCSTX_DPALT_CONTROL_REG_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcssys_cr5_dispdec
+// base address: 0x10e0
+#define mmDPCSSYS_CR5_DPCSSYS_CR_ADDR 0x2d6c
+#define mmDPCSSYS_CR5_DPCSSYS_CR_ADDR_BASE_IDX 2
+#define mmDPCSSYS_CR5_DPCSSYS_CR_DATA 0x2d6d
+#define mmDPCSSYS_CR5_DPCSSYS_CR_DATA_BASE_IDX 2
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_0_sh_mask.h
new file mode 100644
index 000000000000..25e0569e05c8
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_0_sh_mask.h
@@ -0,0 +1,3912 @@
+/*
+ * Copyright (C) 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _dpcs_2_0_0_SH_MASK_HEADER
+#define _dpcs_2_0_0_SH_MASK_HEADER
+
+
+// addressBlock: dpcssys_dpcs0_dpcstx0_dispdec
+//DPCSTX0_DPCSTX_TX_CLOCK_CNTL
+#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS__SHIFT 0x0
+#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN__SHIFT 0x1
+#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON__SHIFT 0x2
+#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0x3
+#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS_MASK 0x00000001L
+#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN_MASK 0x00000002L
+#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON_MASK 0x00000004L
+#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000008L
+//DPCSTX0_DPCSTX_TX_CNTL
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ__SHIFT 0xc
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING__SHIFT 0xd
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP__SHIFT 0xe
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT__SHIFT 0xf
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN__SHIFT 0x10
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START__SHIFT 0x11
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET__SHIFT 0x1f
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ_MASK 0x00001000L
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING_MASK 0x00002000L
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP_MASK 0x00004000L
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT_MASK 0x00008000L
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN_MASK 0x00010000L
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START_MASK 0x00020000L
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET_MASK 0x80000000L
+//DPCSTX0_DPCSTX_CBUS_CNTL
+#define DPCSTX0_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY__SHIFT 0x0
+#define DPCSTX0_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET__SHIFT 0x1f
+#define DPCSTX0_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY_MASK 0x000000FFL
+#define DPCSTX0_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET_MASK 0x80000000L
+//DPCSTX0_DPCSTX_INTERRUPT_CNTL
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW__SHIFT 0x0
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR__SHIFT 0x1
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK__SHIFT 0x4
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR__SHIFT 0x8
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR__SHIFT 0x9
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR__SHIFT 0xa
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR__SHIFT 0xb
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR__SHIFT 0xc
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK__SHIFT 0x10
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK__SHIFT 0x14
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR_MASK 0x00000002L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK_MASK 0x00000010L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR_MASK 0x00000100L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR_MASK 0x00000200L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR_MASK 0x00000400L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR_MASK 0x00000800L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR_MASK 0x00001000L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK_MASK 0x00010000L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK_MASK 0x00100000L
+//DPCSTX0_DPCSTX_PLL_UPDATE_ADDR
+#define DPCSTX0_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR__SHIFT 0x0
+#define DPCSTX0_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR_MASK 0x0003FFFFL
+//DPCSTX0_DPCSTX_PLL_UPDATE_DATA
+#define DPCSTX0_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA__SHIFT 0x0
+#define DPCSTX0_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA_MASK 0xFFFFFFFFL
+//DPCSTX0_DPCSTX_DEBUG_CONFIG
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN__SHIFT 0x0
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL__SHIFT 0x1
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL__SHIFT 0x4
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL__SHIFT 0x8
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS__SHIFT 0xe
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN__SHIFT 0x10
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN_MASK 0x00000001L
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL_MASK 0x0000000EL
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL_MASK 0x00000070L
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL_MASK 0x00000700L
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS_MASK 0x00004000L
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN_MASK 0x00010000L
+
+
+// addressBlock: dpcssys_dpcs0_rdpcstx0_dispdec
+//RDPCSTX0_RDPCSTX_CNTL
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN__SHIFT 0xd
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN__SHIFT 0xe
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN__SHIFT 0xf
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_START__SHIFT 0x11
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN__SHIFT 0x19
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS__SHIFT 0x1a
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET__SHIFT 0x1f
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET_MASK 0x00000010L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN_MASK 0x00001000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN_MASK 0x00002000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN_MASK 0x00004000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN_MASK 0x00008000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN_MASK 0x00010000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_START_MASK 0x00020000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN_MASK 0x01000000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN_MASK 0x02000000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS_MASK 0x04000000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET_MASK 0x80000000L
+//RDPCSTX0_RDPCSTX_CLOCK_CNTL
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN__SHIFT 0x5
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN__SHIFT 0x7
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN__SHIFT 0x9
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0xa
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN__SHIFT 0xd
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON__SHIFT 0xe
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN_MASK 0x00000010L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN_MASK 0x00000020L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN_MASK 0x00000040L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN_MASK 0x00000080L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS_MASK 0x00000100L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN_MASK 0x00000200L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000400L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS_MASK 0x00001000L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN_MASK 0x00002000L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON_MASK 0x00004000L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS_MASK 0x00010000L
+//RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE__SHIFT 0x1
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE__SHIFT 0x2
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR__SHIFT 0x5
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR__SHIFT 0x7
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR__SHIFT 0x9
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR__SHIFT 0xa
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK__SHIFT 0x11
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK__SHIFT 0x12
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK 0x00000002L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK 0x00000004L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR_MASK 0x00000010L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR_MASK 0x00000020L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR_MASK 0x00000040L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR_MASK 0x00000080L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR_MASK 0x00000100L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR_MASK 0x00000200L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR_MASK 0x00000400L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR_MASK 0x00001000L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK_MASK 0x00010000L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK_MASK 0x00020000L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK_MASK 0x00040000L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK_MASK 0x00100000L
+//RDPCSTX0_RDPCSTX_PLL_UPDATE_DATA
+#define RDPCSTX0_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA_MASK 0x00000001L
+//RDPCSTX0_RDPCS_TX_CR_ADDR
+#define RDPCSTX0_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0
+#define RDPCSTX0_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL
+//RDPCSTX0_RDPCS_TX_CR_DATA
+#define RDPCSTX0_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0
+#define RDPCSTX0_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL
+//RDPCSTX0_RDPCS_TX_SRAM_CNTL
+#define RDPCSTX0_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS__SHIFT 0x14
+#define RDPCSTX0_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE__SHIFT 0x18
+#define RDPCSTX0_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE__SHIFT 0x1c
+#define RDPCSTX0_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS_MASK 0x00100000L
+#define RDPCSTX0_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE_MASK 0x03000000L
+#define RDPCSTX0_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE_MASK 0x30000000L
+//RDPCSTX0_RDPCSTX_MEM_POWER_CTRL
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1__SHIFT 0x1a
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2__SHIFT 0x1b
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1__SHIFT 0x1c
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2__SHIFT 0x1d
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM__SHIFT 0x1e
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES_MASK 0x00000FFFL
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES_MASK 0x03FFF000L
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1_MASK 0x04000000L
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2_MASK 0x08000000L
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1_MASK 0x10000000L
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2_MASK 0x20000000L
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM_MASK 0x40000000L
+//RDPCSTX0_RDPCSTX_MEM_POWER_CTRL2
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO__SHIFT 0x2
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF_MASK 0x00000003L
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO_MASK 0x00000004L
+//RDPCSTX0_RDPCSTX_SCRATCH
+#define RDPCSTX0_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH_MASK 0xFFFFFFFFL
+//RDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS_MASK 0x00000010L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE_MASK 0x0000FF00L
+//RDPCSTX0_RDPCSTX_DEBUG_CONFIG
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP__SHIFT 0x7
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE__SHIFT 0xf
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT_MASK 0x00000070L
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP_MASK 0x00000080L
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK_MASK 0x00001F00L
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE_MASK 0x00008000L
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX_MASK 0x00FF0000L
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MASK 0xFF000000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET__SHIFT 0x1
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N__SHIFT 0x2
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN__SHIFT 0x3
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE__SHIFT 0x9
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL__SHIFT 0xe
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ__SHIFT 0x11
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK__SHIFT 0x12
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL__SHIFT 0x15
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT__SHIFT 0x19
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE__SHIFT 0x1c
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE__SHIFT 0x1d
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS__SHIFT 0x1f
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET_MASK 0x00000002L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N_MASK 0x00000004L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN_MASK 0x00000008L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT_MASK 0x00000030L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE_MASK 0x00000100L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE_MASK 0x00003E00L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL_MASK 0x0001C000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ_MASK 0x00020000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK_MASK 0x00040000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL_MASK 0x00100000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL_MASK 0x00200000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN_MASK 0x01000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT_MASK 0x02000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE_MASK 0x10000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE_MASK 0x20000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS_MASK 0x80000000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL1
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN__SHIFT 0x1
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE__SHIFT 0x2
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN__SHIFT 0x3
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET__SHIFT 0x5
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE__SHIFT 0x7
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN_MASK 0x00000002L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE_MASK 0x00000004L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN_MASK 0x00000008L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE_MASK 0x00000010L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET_MASK 0x00000020L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN_MASK 0x00000040L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE_MASK 0x00000080L
+//RDPCSTX0_RDPCSTX_PHY_CNTL2
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR__SHIFT 0x3
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN__SHIFT 0x5
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN__SHIFT 0x7
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN__SHIFT 0x9
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN__SHIFT 0xa
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN__SHIFT 0xb
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR_MASK 0x00000008L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN_MASK 0x00000010L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN_MASK 0x00000020L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN_MASK 0x00000040L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN_MASK 0x00000080L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN_MASK 0x00000100L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN_MASK 0x00000200L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN_MASK 0x00000400L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN_MASK 0x00000800L
+//RDPCSTX0_RDPCSTX_PHY_CNTL3
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE__SHIFT 0x1
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY__SHIFT 0x2
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN__SHIFT 0x3
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK__SHIFT 0x5
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE__SHIFT 0x9
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY__SHIFT 0xa
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN__SHIFT 0xb
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK__SHIFT 0xd
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE__SHIFT 0x11
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY__SHIFT 0x12
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN__SHIFT 0x13
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK__SHIFT 0x15
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE__SHIFT 0x19
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY__SHIFT 0x1a
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN__SHIFT 0x1b
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ__SHIFT 0x1c
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK__SHIFT 0x1d
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_MASK 0x00000002L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_MASK 0x00000004L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_MASK 0x00000008L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_MASK 0x00000010L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_MASK 0x00000020L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_MASK 0x00000100L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_MASK 0x00000200L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_MASK 0x00000400L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_MASK 0x00000800L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_MASK 0x00001000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_MASK 0x00002000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_MASK 0x00010000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_MASK 0x00020000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_MASK 0x00040000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_MASK 0x00080000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_MASK 0x00100000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_MASK 0x00200000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_MASK 0x01000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_MASK 0x02000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_MASK 0x04000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_MASK 0x08000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_MASK 0x10000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_MASK 0x20000000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN__SHIFT 0x7
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC__SHIFT 0xe
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN__SHIFT 0xf
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC__SHIFT 0x16
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN__SHIFT 0x17
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT__SHIFT 0x1c
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC__SHIFT 0x1e
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN__SHIFT 0x1f
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL_MASK 0x00000007L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT_MASK 0x00000010L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC_MASK 0x00000040L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN_MASK 0x00000080L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL_MASK 0x00000700L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT_MASK 0x00001000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC_MASK 0x00004000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN_MASK 0x00008000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL_MASK 0x00070000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT_MASK 0x00100000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC_MASK 0x00400000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN_MASK 0x00800000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL_MASK 0x07000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT_MASK 0x10000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC_MASK 0x40000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN_MASK 0x80000000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL5
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE__SHIFT 0x1
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT__SHIFT 0x7
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE__SHIFT 0x9
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ__SHIFT 0xe
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT__SHIFT 0xf
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE__SHIFT 0x11
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ__SHIFT 0x16
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT__SHIFT 0x17
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE__SHIFT 0x19
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH__SHIFT 0x1c
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ__SHIFT 0x1e
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT__SHIFT 0x1f
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE_MASK 0x0000000EL
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH_MASK 0x00000030L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ_MASK 0x00000040L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT_MASK 0x00000080L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD_MASK 0x00000100L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE_MASK 0x00000E00L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH_MASK 0x00003000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ_MASK 0x00004000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT_MASK 0x00008000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD_MASK 0x00010000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE_MASK 0x000E0000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH_MASK 0x00300000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ_MASK 0x00400000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT_MASK 0x00800000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD_MASK 0x01000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE_MASK 0x0E000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH_MASK 0x30000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ_MASK 0x40000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT_MASK 0x80000000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL6
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN__SHIFT 0x2
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN__SHIFT 0xa
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN__SHIFT 0xe
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE__SHIFT 0x11
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK__SHIFT 0x12
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN__SHIFT 0x13
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_MASK 0x00000003L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_MASK 0x00000004L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_MASK 0x00000030L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_MASK 0x00000040L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_MASK 0x00000300L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_MASK 0x00000400L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_MASK 0x00003000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_MASK 0x00004000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_MASK 0x00010000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_MASK 0x00020000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_MASK 0x00040000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_MASK 0x00080000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_MASK 0x00100000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL7
+#define RDPCSTX0_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN_MASK 0x0000FFFFL
+#define RDPCSTX0_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT_MASK 0xFFFF0000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL8
+#define RDPCSTX0_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK_MASK 0x000FFFFFL
+//RDPCSTX0_RDPCSTX_PHY_CNTL9
+#define RDPCSTX0_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE_MASK 0x001FFFFFL
+#define RDPCSTX0_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD_MASK 0x01000000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL10
+#define RDPCSTX0_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM_MASK 0x0000FFFFL
+//RDPCSTX0_RDPCSTX_PHY_CNTL11
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER_MASK 0x0000FFF0L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV_MASK 0x00070000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV_MASK 0x00700000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV_MASK 0x03000000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL12
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN__SHIFT 0x2
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE__SHIFT 0x7
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN_MASK 0x00000004L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV_MASK 0x00000070L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE_MASK 0x00000080L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN_MASK 0x00000100L
+//RDPCSTX0_RDPCSTX_PHY_CNTL13
+#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN__SHIFT 0x1c
+#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN__SHIFT 0x1d
+#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE__SHIFT 0x1e
+#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER_MASK 0x0FF00000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN_MASK 0x10000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN_MASK 0x20000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE_MASK 0x40000000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL14
+#define RDPCSTX0_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN__SHIFT 0x1c
+#define RDPCSTX0_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN_MASK 0x01000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN_MASK 0x10000000L
+//RDPCSTX0_RDPCSTX_PHY_FUSE0
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I__SHIFT 0x12
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I_MASK 0x000C0000L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO_MASK 0x00300000L
+//RDPCSTX0_RDPCSTX_PHY_FUSE1
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT__SHIFT 0x12
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP__SHIFT 0x19
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT_MASK 0x01FC0000L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP_MASK 0xFE000000L
+//RDPCSTX0_RDPCSTX_PHY_FUSE2
+#define RDPCSTX0_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX0_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST_MASK 0x0003F000L
+//RDPCSTX0_RDPCSTX_PHY_FUSE3
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE__SHIFT 0x12
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE_MASK 0x00FC0000L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE_MASK 0x03000000L
+//RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL
+#define RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL_MASK 0x0000007FL
+#define RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL_MASK 0x001FFF00L
+//RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED__SHIFT 0x1
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED__SHIFT 0x2
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED__SHIFT 0x3
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED__SHIFT 0x5
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED__SHIFT 0x9
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED__SHIFT 0xa
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED__SHIFT 0xb
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED__SHIFT 0xd
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED__SHIFT 0x11
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED__SHIFT 0x12
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED__SHIFT 0x13
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED__SHIFT 0x15
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED__SHIFT 0x19
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED__SHIFT 0x1a
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED__SHIFT 0x1b
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED__SHIFT 0x1c
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED__SHIFT 0x1d
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED_MASK 0x00000002L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED_MASK 0x00000004L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED_MASK 0x00000008L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED_MASK 0x00000010L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED_MASK 0x00000020L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED_MASK 0x00000100L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED_MASK 0x00000200L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED_MASK 0x00000400L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED_MASK 0x00000800L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED_MASK 0x00001000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED_MASK 0x00002000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED_MASK 0x00010000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED_MASK 0x00020000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED_MASK 0x00040000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED_MASK 0x00080000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED_MASK 0x00100000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED_MASK 0x00200000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED_MASK 0x01000000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED_MASK 0x02000000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED_MASK 0x04000000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED_MASK 0x08000000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED_MASK 0x10000000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED_MASK 0x20000000L
+//RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED__SHIFT 0x2
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED__SHIFT 0xa
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED__SHIFT 0xe
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED__SHIFT 0x11
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED__SHIFT 0x12
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED__SHIFT 0x13
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED_MASK 0x00000003L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED_MASK 0x00000004L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED_MASK 0x00000030L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED_MASK 0x00000040L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED_MASK 0x00000300L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED_MASK 0x00000400L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED_MASK 0x00003000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED_MASK 0x00004000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED_MASK 0x00010000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED_MASK 0x00020000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED_MASK 0x00040000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED_MASK 0x00080000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED_MASK 0x00100000L
+//RDPCSTX0_RDPCSTX_DPALT_CONTROL_REG
+#define RDPCSTX0_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED_MASK 0x00000010L
+#define RDPCSTX0_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE_MASK 0x0000FF00L
+
+
+// addressBlock: dpcssys_dpcssys_cr0_dispdec
+//DPCSSYS_CR0_DPCSSYS_CR_ADDR
+#define DPCSSYS_CR0_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0
+#define DPCSSYS_CR0_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL
+//DPCSSYS_CR0_DPCSSYS_CR_DATA
+#define DPCSSYS_CR0_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0
+#define DPCSSYS_CR0_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL
+
+
+// addressBlock: dpcssys_dpcs0_dpcstx1_dispdec
+//DPCSTX1_DPCSTX_TX_CLOCK_CNTL
+#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS__SHIFT 0x0
+#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN__SHIFT 0x1
+#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON__SHIFT 0x2
+#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0x3
+#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS_MASK 0x00000001L
+#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN_MASK 0x00000002L
+#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON_MASK 0x00000004L
+#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000008L
+//DPCSTX1_DPCSTX_TX_CNTL
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ__SHIFT 0xc
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING__SHIFT 0xd
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP__SHIFT 0xe
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT__SHIFT 0xf
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN__SHIFT 0x10
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START__SHIFT 0x11
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET__SHIFT 0x1f
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ_MASK 0x00001000L
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING_MASK 0x00002000L
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP_MASK 0x00004000L
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT_MASK 0x00008000L
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN_MASK 0x00010000L
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START_MASK 0x00020000L
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET_MASK 0x80000000L
+//DPCSTX1_DPCSTX_CBUS_CNTL
+#define DPCSTX1_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY__SHIFT 0x0
+#define DPCSTX1_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET__SHIFT 0x1f
+#define DPCSTX1_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY_MASK 0x000000FFL
+#define DPCSTX1_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET_MASK 0x80000000L
+//DPCSTX1_DPCSTX_INTERRUPT_CNTL
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW__SHIFT 0x0
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR__SHIFT 0x1
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK__SHIFT 0x4
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR__SHIFT 0x8
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR__SHIFT 0x9
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR__SHIFT 0xa
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR__SHIFT 0xb
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR__SHIFT 0xc
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK__SHIFT 0x10
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK__SHIFT 0x14
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR_MASK 0x00000002L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK_MASK 0x00000010L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR_MASK 0x00000100L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR_MASK 0x00000200L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR_MASK 0x00000400L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR_MASK 0x00000800L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR_MASK 0x00001000L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK_MASK 0x00010000L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK_MASK 0x00100000L
+//DPCSTX1_DPCSTX_PLL_UPDATE_ADDR
+#define DPCSTX1_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR__SHIFT 0x0
+#define DPCSTX1_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR_MASK 0x0003FFFFL
+//DPCSTX1_DPCSTX_PLL_UPDATE_DATA
+#define DPCSTX1_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA__SHIFT 0x0
+#define DPCSTX1_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA_MASK 0xFFFFFFFFL
+//DPCSTX1_DPCSTX_DEBUG_CONFIG
+#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN__SHIFT 0x0
+#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL__SHIFT 0x1
+#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL__SHIFT 0x4
+#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL__SHIFT 0x8
+#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS__SHIFT 0xe
+#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN__SHIFT 0x10
+#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN_MASK 0x00000001L
+#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL_MASK 0x0000000EL
+#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL_MASK 0x00000070L
+#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL_MASK 0x00000700L
+#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS_MASK 0x00004000L
+#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN_MASK 0x00010000L
+
+
+// addressBlock: dpcssys_dpcs0_rdpcstx1_dispdec
+//RDPCSTX1_RDPCSTX_CNTL
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN__SHIFT 0xd
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN__SHIFT 0xe
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN__SHIFT 0xf
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_START__SHIFT 0x11
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN__SHIFT 0x18
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN__SHIFT 0x19
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS__SHIFT 0x1a
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET__SHIFT 0x1f
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET_MASK 0x00000010L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN_MASK 0x00001000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN_MASK 0x00002000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN_MASK 0x00004000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN_MASK 0x00008000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN_MASK 0x00010000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_START_MASK 0x00020000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN_MASK 0x01000000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN_MASK 0x02000000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS_MASK 0x04000000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET_MASK 0x80000000L
+//RDPCSTX1_RDPCSTX_CLOCK_CNTL
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN__SHIFT 0x5
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN__SHIFT 0x6
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN__SHIFT 0x7
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN__SHIFT 0x9
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0xa
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN__SHIFT 0xd
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON__SHIFT 0xe
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN_MASK 0x00000010L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN_MASK 0x00000020L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN_MASK 0x00000040L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN_MASK 0x00000080L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS_MASK 0x00000100L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN_MASK 0x00000200L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000400L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS_MASK 0x00001000L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN_MASK 0x00002000L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON_MASK 0x00004000L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS_MASK 0x00010000L
+//RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE__SHIFT 0x1
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE__SHIFT 0x2
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR__SHIFT 0x5
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR__SHIFT 0x6
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR__SHIFT 0x7
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR__SHIFT 0x9
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR__SHIFT 0xa
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK__SHIFT 0x11
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK__SHIFT 0x12
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK 0x00000002L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK 0x00000004L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR_MASK 0x00000010L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR_MASK 0x00000020L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR_MASK 0x00000040L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR_MASK 0x00000080L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR_MASK 0x00000100L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR_MASK 0x00000200L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR_MASK 0x00000400L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR_MASK 0x00001000L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK_MASK 0x00010000L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK_MASK 0x00020000L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK_MASK 0x00040000L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK_MASK 0x00100000L
+//RDPCSTX1_RDPCSTX_PLL_UPDATE_DATA
+#define RDPCSTX1_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA_MASK 0x00000001L
+//RDPCSTX1_RDPCS_TX_CR_ADDR
+#define RDPCSTX1_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0
+#define RDPCSTX1_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL
+//RDPCSTX1_RDPCS_TX_CR_DATA
+#define RDPCSTX1_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0
+#define RDPCSTX1_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL
+//RDPCSTX1_RDPCS_TX_SRAM_CNTL
+#define RDPCSTX1_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS__SHIFT 0x14
+#define RDPCSTX1_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE__SHIFT 0x18
+#define RDPCSTX1_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE__SHIFT 0x1c
+#define RDPCSTX1_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS_MASK 0x00100000L
+#define RDPCSTX1_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE_MASK 0x03000000L
+#define RDPCSTX1_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE_MASK 0x30000000L
+//RDPCSTX1_RDPCSTX_MEM_POWER_CTRL
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1__SHIFT 0x1a
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2__SHIFT 0x1b
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1__SHIFT 0x1c
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2__SHIFT 0x1d
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM__SHIFT 0x1e
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES_MASK 0x00000FFFL
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES_MASK 0x03FFF000L
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1_MASK 0x04000000L
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2_MASK 0x08000000L
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1_MASK 0x10000000L
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2_MASK 0x20000000L
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM_MASK 0x40000000L
+//RDPCSTX1_RDPCSTX_MEM_POWER_CTRL2
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO__SHIFT 0x2
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF_MASK 0x00000003L
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO_MASK 0x00000004L
+//RDPCSTX1_RDPCSTX_SCRATCH
+#define RDPCSTX1_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH_MASK 0xFFFFFFFFL
+//RDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS_MASK 0x00000010L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE_MASK 0x0000FF00L
+//RDPCSTX1_RDPCSTX_DEBUG_CONFIG
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP__SHIFT 0x7
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE__SHIFT 0xf
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT__SHIFT 0x18
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT_MASK 0x00000070L
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP_MASK 0x00000080L
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK_MASK 0x00001F00L
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE_MASK 0x00008000L
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX_MASK 0x00FF0000L
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MASK 0xFF000000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET__SHIFT 0x1
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N__SHIFT 0x2
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN__SHIFT 0x3
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE__SHIFT 0x9
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL__SHIFT 0xe
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ__SHIFT 0x11
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK__SHIFT 0x12
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL__SHIFT 0x15
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN__SHIFT 0x18
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT__SHIFT 0x19
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE__SHIFT 0x1c
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE__SHIFT 0x1d
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS__SHIFT 0x1f
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET_MASK 0x00000002L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N_MASK 0x00000004L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN_MASK 0x00000008L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT_MASK 0x00000030L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE_MASK 0x00000100L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE_MASK 0x00003E00L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL_MASK 0x0001C000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ_MASK 0x00020000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK_MASK 0x00040000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL_MASK 0x00100000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL_MASK 0x00200000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN_MASK 0x01000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT_MASK 0x02000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE_MASK 0x10000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE_MASK 0x20000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS_MASK 0x80000000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL1
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN__SHIFT 0x1
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE__SHIFT 0x2
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN__SHIFT 0x3
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET__SHIFT 0x5
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN__SHIFT 0x6
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE__SHIFT 0x7
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN_MASK 0x00000002L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE_MASK 0x00000004L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN_MASK 0x00000008L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE_MASK 0x00000010L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET_MASK 0x00000020L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN_MASK 0x00000040L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE_MASK 0x00000080L
+//RDPCSTX1_RDPCSTX_PHY_CNTL2
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR__SHIFT 0x3
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN__SHIFT 0x5
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN__SHIFT 0x6
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN__SHIFT 0x7
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN__SHIFT 0x9
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN__SHIFT 0xa
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN__SHIFT 0xb
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR_MASK 0x00000008L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN_MASK 0x00000010L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN_MASK 0x00000020L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN_MASK 0x00000040L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN_MASK 0x00000080L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN_MASK 0x00000100L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN_MASK 0x00000200L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN_MASK 0x00000400L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN_MASK 0x00000800L
+//RDPCSTX1_RDPCSTX_PHY_CNTL3
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE__SHIFT 0x1
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY__SHIFT 0x2
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN__SHIFT 0x3
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK__SHIFT 0x5
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE__SHIFT 0x9
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY__SHIFT 0xa
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN__SHIFT 0xb
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK__SHIFT 0xd
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE__SHIFT 0x11
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY__SHIFT 0x12
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN__SHIFT 0x13
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK__SHIFT 0x15
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET__SHIFT 0x18
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE__SHIFT 0x19
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY__SHIFT 0x1a
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN__SHIFT 0x1b
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ__SHIFT 0x1c
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK__SHIFT 0x1d
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_MASK 0x00000002L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_MASK 0x00000004L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_MASK 0x00000008L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_MASK 0x00000010L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_MASK 0x00000020L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_MASK 0x00000100L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_MASK 0x00000200L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_MASK 0x00000400L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_MASK 0x00000800L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_MASK 0x00001000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_MASK 0x00002000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_MASK 0x00010000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_MASK 0x00020000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_MASK 0x00040000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_MASK 0x00080000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_MASK 0x00100000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_MASK 0x00200000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_MASK 0x01000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_MASK 0x02000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_MASK 0x04000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_MASK 0x08000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_MASK 0x10000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_MASK 0x20000000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC__SHIFT 0x6
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN__SHIFT 0x7
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC__SHIFT 0xe
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN__SHIFT 0xf
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC__SHIFT 0x16
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN__SHIFT 0x17
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL__SHIFT 0x18
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT__SHIFT 0x1c
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC__SHIFT 0x1e
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN__SHIFT 0x1f
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL_MASK 0x00000007L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT_MASK 0x00000010L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC_MASK 0x00000040L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN_MASK 0x00000080L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL_MASK 0x00000700L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT_MASK 0x00001000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC_MASK 0x00004000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN_MASK 0x00008000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL_MASK 0x00070000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT_MASK 0x00100000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC_MASK 0x00400000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN_MASK 0x00800000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL_MASK 0x07000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT_MASK 0x10000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC_MASK 0x40000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN_MASK 0x80000000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL5
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE__SHIFT 0x1
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ__SHIFT 0x6
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT__SHIFT 0x7
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE__SHIFT 0x9
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ__SHIFT 0xe
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT__SHIFT 0xf
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE__SHIFT 0x11
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ__SHIFT 0x16
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT__SHIFT 0x17
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD__SHIFT 0x18
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE__SHIFT 0x19
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH__SHIFT 0x1c
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ__SHIFT 0x1e
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT__SHIFT 0x1f
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE_MASK 0x0000000EL
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH_MASK 0x00000030L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ_MASK 0x00000040L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT_MASK 0x00000080L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD_MASK 0x00000100L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE_MASK 0x00000E00L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH_MASK 0x00003000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ_MASK 0x00004000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT_MASK 0x00008000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD_MASK 0x00010000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE_MASK 0x000E0000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH_MASK 0x00300000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ_MASK 0x00400000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT_MASK 0x00800000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD_MASK 0x01000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE_MASK 0x0E000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH_MASK 0x30000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ_MASK 0x40000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT_MASK 0x80000000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL6
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN__SHIFT 0x2
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN__SHIFT 0x6
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN__SHIFT 0xa
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN__SHIFT 0xe
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE__SHIFT 0x11
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK__SHIFT 0x12
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN__SHIFT 0x13
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_MASK 0x00000003L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_MASK 0x00000004L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_MASK 0x00000030L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_MASK 0x00000040L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_MASK 0x00000300L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_MASK 0x00000400L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_MASK 0x00003000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_MASK 0x00004000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_MASK 0x00010000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_MASK 0x00020000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_MASK 0x00040000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_MASK 0x00080000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_MASK 0x00100000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL7
+#define RDPCSTX1_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN_MASK 0x0000FFFFL
+#define RDPCSTX1_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT_MASK 0xFFFF0000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL8
+#define RDPCSTX1_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK_MASK 0x000FFFFFL
+//RDPCSTX1_RDPCSTX_PHY_CNTL9
+#define RDPCSTX1_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD__SHIFT 0x18
+#define RDPCSTX1_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE_MASK 0x001FFFFFL
+#define RDPCSTX1_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD_MASK 0x01000000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL10
+#define RDPCSTX1_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM_MASK 0x0000FFFFL
+//RDPCSTX1_RDPCSTX_PHY_CNTL11
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV__SHIFT 0x18
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER_MASK 0x0000FFF0L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV_MASK 0x00070000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV_MASK 0x00700000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV_MASK 0x03000000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL12
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN__SHIFT 0x2
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE__SHIFT 0x7
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN_MASK 0x00000004L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV_MASK 0x00000070L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE_MASK 0x00000080L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN_MASK 0x00000100L
+//RDPCSTX1_RDPCSTX_PHY_CNTL13
+#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN__SHIFT 0x1c
+#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN__SHIFT 0x1d
+#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE__SHIFT 0x1e
+#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER_MASK 0x0FF00000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN_MASK 0x10000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN_MASK 0x20000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE_MASK 0x40000000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL14
+#define RDPCSTX1_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN__SHIFT 0x18
+#define RDPCSTX1_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN__SHIFT 0x1c
+#define RDPCSTX1_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN_MASK 0x01000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN_MASK 0x10000000L
+//RDPCSTX1_RDPCSTX_PHY_FUSE0
+#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE__SHIFT 0x6
+#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I__SHIFT 0x12
+#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I_MASK 0x000C0000L
+#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO_MASK 0x00300000L
+//RDPCSTX1_RDPCSTX_PHY_FUSE1
+#define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE__SHIFT 0x6
+#define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT__SHIFT 0x12
+#define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP__SHIFT 0x19
+#define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT_MASK 0x01FC0000L
+#define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP_MASK 0xFE000000L
+//RDPCSTX1_RDPCSTX_PHY_FUSE2
+#define RDPCSTX1_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE__SHIFT 0x6
+#define RDPCSTX1_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX1_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX1_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST_MASK 0x0003F000L
+//RDPCSTX1_RDPCSTX_PHY_FUSE3
+#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE__SHIFT 0x6
+#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE__SHIFT 0x12
+#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE__SHIFT 0x18
+#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE_MASK 0x00FC0000L
+#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE_MASK 0x03000000L
+//RDPCSTX1_RDPCSTX_PHY_RX_LD_VAL
+#define RDPCSTX1_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL_MASK 0x0000007FL
+#define RDPCSTX1_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL_MASK 0x001FFF00L
+//RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED__SHIFT 0x1
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED__SHIFT 0x2
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED__SHIFT 0x3
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED__SHIFT 0x5
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED__SHIFT 0x9
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED__SHIFT 0xa
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED__SHIFT 0xb
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED__SHIFT 0xd
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED__SHIFT 0x11
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED__SHIFT 0x12
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED__SHIFT 0x13
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED__SHIFT 0x15
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED__SHIFT 0x18
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED__SHIFT 0x19
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED__SHIFT 0x1a
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED__SHIFT 0x1b
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED__SHIFT 0x1c
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED__SHIFT 0x1d
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED_MASK 0x00000002L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED_MASK 0x00000004L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED_MASK 0x00000008L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED_MASK 0x00000010L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED_MASK 0x00000020L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED_MASK 0x00000100L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED_MASK 0x00000200L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED_MASK 0x00000400L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED_MASK 0x00000800L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED_MASK 0x00001000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED_MASK 0x00002000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED_MASK 0x00010000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED_MASK 0x00020000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED_MASK 0x00040000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED_MASK 0x00080000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED_MASK 0x00100000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED_MASK 0x00200000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED_MASK 0x01000000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED_MASK 0x02000000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED_MASK 0x04000000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED_MASK 0x08000000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED_MASK 0x10000000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED_MASK 0x20000000L
+//RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED__SHIFT 0x2
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED__SHIFT 0x6
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED__SHIFT 0xa
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED__SHIFT 0xe
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED__SHIFT 0x11
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED__SHIFT 0x12
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED__SHIFT 0x13
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED_MASK 0x00000003L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED_MASK 0x00000004L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED_MASK 0x00000030L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED_MASK 0x00000040L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED_MASK 0x00000300L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED_MASK 0x00000400L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED_MASK 0x00003000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED_MASK 0x00004000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED_MASK 0x00010000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED_MASK 0x00020000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED_MASK 0x00040000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED_MASK 0x00080000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED_MASK 0x00100000L
+//RDPCSTX1_RDPCSTX_DPALT_CONTROL_REG
+#define RDPCSTX1_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED_MASK 0x00000010L
+#define RDPCSTX1_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE_MASK 0x0000FF00L
+
+
+// addressBlock: dpcssys_dpcssys_cr1_dispdec
+//DPCSSYS_CR1_DPCSSYS_CR_ADDR
+#define DPCSSYS_CR1_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0
+#define DPCSSYS_CR1_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL
+//DPCSSYS_CR1_DPCSSYS_CR_DATA
+#define DPCSSYS_CR1_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0
+#define DPCSSYS_CR1_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL
+
+
+// addressBlock: dpcssys_dpcs0_dpcstx2_dispdec
+//DPCSTX2_DPCSTX_TX_CLOCK_CNTL
+#define DPCSTX2_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS__SHIFT 0x0
+#define DPCSTX2_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN__SHIFT 0x1
+#define DPCSTX2_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON__SHIFT 0x2
+#define DPCSTX2_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0x3
+#define DPCSTX2_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS_MASK 0x00000001L
+#define DPCSTX2_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN_MASK 0x00000002L
+#define DPCSTX2_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON_MASK 0x00000004L
+#define DPCSTX2_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000008L
+//DPCSTX2_DPCSTX_TX_CNTL
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ__SHIFT 0xc
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING__SHIFT 0xd
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP__SHIFT 0xe
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT__SHIFT 0xf
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN__SHIFT 0x10
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START__SHIFT 0x11
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET__SHIFT 0x1f
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ_MASK 0x00001000L
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING_MASK 0x00002000L
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP_MASK 0x00004000L
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT_MASK 0x00008000L
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN_MASK 0x00010000L
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START_MASK 0x00020000L
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET_MASK 0x80000000L
+//DPCSTX2_DPCSTX_CBUS_CNTL
+#define DPCSTX2_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY__SHIFT 0x0
+#define DPCSTX2_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET__SHIFT 0x1f
+#define DPCSTX2_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY_MASK 0x000000FFL
+#define DPCSTX2_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET_MASK 0x80000000L
+//DPCSTX2_DPCSTX_INTERRUPT_CNTL
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW__SHIFT 0x0
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR__SHIFT 0x1
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK__SHIFT 0x4
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR__SHIFT 0x8
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR__SHIFT 0x9
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR__SHIFT 0xa
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR__SHIFT 0xb
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR__SHIFT 0xc
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK__SHIFT 0x10
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK__SHIFT 0x14
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR_MASK 0x00000002L
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK_MASK 0x00000010L
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR_MASK 0x00000100L
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR_MASK 0x00000200L
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR_MASK 0x00000400L
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR_MASK 0x00000800L
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR_MASK 0x00001000L
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK_MASK 0x00010000L
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK_MASK 0x00100000L
+//DPCSTX2_DPCSTX_PLL_UPDATE_ADDR
+#define DPCSTX2_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR__SHIFT 0x0
+#define DPCSTX2_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR_MASK 0x0003FFFFL
+//DPCSTX2_DPCSTX_PLL_UPDATE_DATA
+#define DPCSTX2_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA__SHIFT 0x0
+#define DPCSTX2_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA_MASK 0xFFFFFFFFL
+//DPCSTX2_DPCSTX_DEBUG_CONFIG
+#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN__SHIFT 0x0
+#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL__SHIFT 0x1
+#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL__SHIFT 0x4
+#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL__SHIFT 0x8
+#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS__SHIFT 0xe
+#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN__SHIFT 0x10
+#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN_MASK 0x00000001L
+#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL_MASK 0x0000000EL
+#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL_MASK 0x00000070L
+#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL_MASK 0x00000700L
+#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS_MASK 0x00004000L
+#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN_MASK 0x00010000L
+
+
+// addressBlock: dpcssys_dpcs0_rdpcstx2_dispdec
+//RDPCSTX2_RDPCSTX_CNTL
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN__SHIFT 0xc
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN__SHIFT 0xd
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN__SHIFT 0xe
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN__SHIFT 0xf
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN__SHIFT 0x10
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_START__SHIFT 0x11
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN__SHIFT 0x18
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN__SHIFT 0x19
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS__SHIFT 0x1a
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET__SHIFT 0x1f
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET_MASK 0x00000001L
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET_MASK 0x00000010L
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN_MASK 0x00001000L
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN_MASK 0x00002000L
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN_MASK 0x00004000L
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN_MASK 0x00008000L
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN_MASK 0x00010000L
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_START_MASK 0x00020000L
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN_MASK 0x01000000L
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN_MASK 0x02000000L
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS_MASK 0x04000000L
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET_MASK 0x80000000L
+//RDPCSTX2_RDPCSTX_CLOCK_CNTL
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN__SHIFT 0x5
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN__SHIFT 0x6
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN__SHIFT 0x7
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS__SHIFT 0x8
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN__SHIFT 0x9
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0xa
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS__SHIFT 0xc
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN__SHIFT 0xd
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON__SHIFT 0xe
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS__SHIFT 0x10
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN_MASK 0x00000001L
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN_MASK 0x00000010L
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN_MASK 0x00000020L
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN_MASK 0x00000040L
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN_MASK 0x00000080L
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS_MASK 0x00000100L
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN_MASK 0x00000200L
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000400L
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS_MASK 0x00001000L
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN_MASK 0x00002000L
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON_MASK 0x00004000L
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS_MASK 0x00010000L
+//RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE__SHIFT 0x1
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE__SHIFT 0x2
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR__SHIFT 0x5
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR__SHIFT 0x6
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR__SHIFT 0x7
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR__SHIFT 0x8
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR__SHIFT 0x9
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR__SHIFT 0xa
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR__SHIFT 0xc
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK__SHIFT 0x10
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK__SHIFT 0x11
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK__SHIFT 0x12
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK__SHIFT 0x14
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK 0x00000002L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK 0x00000004L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR_MASK 0x00000010L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR_MASK 0x00000020L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR_MASK 0x00000040L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR_MASK 0x00000080L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR_MASK 0x00000100L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR_MASK 0x00000200L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR_MASK 0x00000400L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR_MASK 0x00001000L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK_MASK 0x00010000L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK_MASK 0x00020000L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK_MASK 0x00040000L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK_MASK 0x00100000L
+//RDPCSTX2_RDPCSTX_PLL_UPDATE_DATA
+#define RDPCSTX2_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA_MASK 0x00000001L
+//RDPCSTX2_RDPCS_TX_CR_ADDR
+#define RDPCSTX2_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0
+#define RDPCSTX2_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL
+//RDPCSTX2_RDPCS_TX_CR_DATA
+#define RDPCSTX2_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0
+#define RDPCSTX2_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL
+//RDPCSTX2_RDPCS_TX_SRAM_CNTL
+#define RDPCSTX2_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS__SHIFT 0x14
+#define RDPCSTX2_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE__SHIFT 0x18
+#define RDPCSTX2_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE__SHIFT 0x1c
+#define RDPCSTX2_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS_MASK 0x00100000L
+#define RDPCSTX2_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE_MASK 0x03000000L
+#define RDPCSTX2_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE_MASK 0x30000000L
+//RDPCSTX2_RDPCSTX_MEM_POWER_CTRL
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES__SHIFT 0xc
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1__SHIFT 0x1a
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2__SHIFT 0x1b
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1__SHIFT 0x1c
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2__SHIFT 0x1d
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM__SHIFT 0x1e
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES_MASK 0x00000FFFL
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES_MASK 0x03FFF000L
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1_MASK 0x04000000L
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2_MASK 0x08000000L
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1_MASK 0x10000000L
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2_MASK 0x20000000L
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM_MASK 0x40000000L
+//RDPCSTX2_RDPCSTX_MEM_POWER_CTRL2
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO__SHIFT 0x2
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF_MASK 0x00000003L
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO_MASK 0x00000004L
+//RDPCSTX2_RDPCSTX_SCRATCH
+#define RDPCSTX2_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH_MASK 0xFFFFFFFFL
+//RDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE__SHIFT 0x8
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG_MASK 0x00000001L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS_MASK 0x00000010L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE_MASK 0x0000FF00L
+//RDPCSTX2_RDPCSTX_DEBUG_CONFIG
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP__SHIFT 0x7
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK__SHIFT 0x8
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE__SHIFT 0xf
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX__SHIFT 0x10
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT__SHIFT 0x18
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN_MASK 0x00000001L
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT_MASK 0x00000070L
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP_MASK 0x00000080L
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK_MASK 0x00001F00L
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE_MASK 0x00008000L
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX_MASK 0x00FF0000L
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MASK 0xFF000000L
+//RDPCSTX2_RDPCSTX_PHY_CNTL0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET__SHIFT 0x1
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N__SHIFT 0x2
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN__SHIFT 0x3
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE__SHIFT 0x8
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE__SHIFT 0x9
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL__SHIFT 0xe
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ__SHIFT 0x11
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK__SHIFT 0x12
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL__SHIFT 0x14
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL__SHIFT 0x15
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN__SHIFT 0x18
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT__SHIFT 0x19
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE__SHIFT 0x1c
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE__SHIFT 0x1d
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS__SHIFT 0x1f
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET_MASK 0x00000001L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET_MASK 0x00000002L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N_MASK 0x00000004L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN_MASK 0x00000008L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT_MASK 0x00000030L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE_MASK 0x00000100L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE_MASK 0x00003E00L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL_MASK 0x0001C000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ_MASK 0x00020000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK_MASK 0x00040000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL_MASK 0x00100000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL_MASK 0x00200000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN_MASK 0x01000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT_MASK 0x02000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE_MASK 0x10000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE_MASK 0x20000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS_MASK 0x80000000L
+//RDPCSTX2_RDPCSTX_PHY_CNTL1
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN__SHIFT 0x1
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE__SHIFT 0x2
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN__SHIFT 0x3
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET__SHIFT 0x5
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN__SHIFT 0x6
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE__SHIFT 0x7
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN_MASK 0x00000001L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN_MASK 0x00000002L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE_MASK 0x00000004L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN_MASK 0x00000008L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE_MASK 0x00000010L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET_MASK 0x00000020L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN_MASK 0x00000040L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE_MASK 0x00000080L
+//RDPCSTX2_RDPCSTX_PHY_CNTL2
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR__SHIFT 0x3
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN__SHIFT 0x5
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN__SHIFT 0x6
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN__SHIFT 0x7
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN__SHIFT 0x8
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN__SHIFT 0x9
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN__SHIFT 0xa
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN__SHIFT 0xb
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR_MASK 0x00000008L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN_MASK 0x00000010L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN_MASK 0x00000020L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN_MASK 0x00000040L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN_MASK 0x00000080L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN_MASK 0x00000100L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN_MASK 0x00000200L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN_MASK 0x00000400L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN_MASK 0x00000800L
+//RDPCSTX2_RDPCSTX_PHY_CNTL3
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE__SHIFT 0x1
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY__SHIFT 0x2
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN__SHIFT 0x3
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK__SHIFT 0x5
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET__SHIFT 0x8
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE__SHIFT 0x9
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY__SHIFT 0xa
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN__SHIFT 0xb
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ__SHIFT 0xc
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK__SHIFT 0xd
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET__SHIFT 0x10
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE__SHIFT 0x11
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY__SHIFT 0x12
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN__SHIFT 0x13
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ__SHIFT 0x14
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK__SHIFT 0x15
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET__SHIFT 0x18
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE__SHIFT 0x19
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY__SHIFT 0x1a
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN__SHIFT 0x1b
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ__SHIFT 0x1c
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK__SHIFT 0x1d
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_MASK 0x00000001L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_MASK 0x00000002L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_MASK 0x00000004L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_MASK 0x00000008L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_MASK 0x00000010L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_MASK 0x00000020L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_MASK 0x00000100L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_MASK 0x00000200L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_MASK 0x00000400L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_MASK 0x00000800L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_MASK 0x00001000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_MASK 0x00002000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_MASK 0x00010000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_MASK 0x00020000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_MASK 0x00040000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_MASK 0x00080000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_MASK 0x00100000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_MASK 0x00200000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_MASK 0x01000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_MASK 0x02000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_MASK 0x04000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_MASK 0x08000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_MASK 0x10000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_MASK 0x20000000L
+//RDPCSTX2_RDPCSTX_PHY_CNTL4
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC__SHIFT 0x6
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN__SHIFT 0x7
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL__SHIFT 0x8
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT__SHIFT 0xc
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC__SHIFT 0xe
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN__SHIFT 0xf
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL__SHIFT 0x10
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT__SHIFT 0x14
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC__SHIFT 0x16
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN__SHIFT 0x17
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL__SHIFT 0x18
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT__SHIFT 0x1c
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC__SHIFT 0x1e
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN__SHIFT 0x1f
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL_MASK 0x00000007L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT_MASK 0x00000010L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC_MASK 0x00000040L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN_MASK 0x00000080L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL_MASK 0x00000700L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT_MASK 0x00001000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC_MASK 0x00004000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN_MASK 0x00008000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL_MASK 0x00070000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT_MASK 0x00100000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC_MASK 0x00400000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN_MASK 0x00800000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL_MASK 0x07000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT_MASK 0x10000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC_MASK 0x40000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN_MASK 0x80000000L
+//RDPCSTX2_RDPCSTX_PHY_CNTL5
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE__SHIFT 0x1
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ__SHIFT 0x6
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT__SHIFT 0x7
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD__SHIFT 0x8
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE__SHIFT 0x9
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH__SHIFT 0xc
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ__SHIFT 0xe
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT__SHIFT 0xf
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD__SHIFT 0x10
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE__SHIFT 0x11
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH__SHIFT 0x14
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ__SHIFT 0x16
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT__SHIFT 0x17
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD__SHIFT 0x18
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE__SHIFT 0x19
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH__SHIFT 0x1c
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ__SHIFT 0x1e
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT__SHIFT 0x1f
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD_MASK 0x00000001L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE_MASK 0x0000000EL
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH_MASK 0x00000030L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ_MASK 0x00000040L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT_MASK 0x00000080L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD_MASK 0x00000100L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE_MASK 0x00000E00L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH_MASK 0x00003000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ_MASK 0x00004000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT_MASK 0x00008000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD_MASK 0x00010000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE_MASK 0x000E0000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH_MASK 0x00300000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ_MASK 0x00400000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT_MASK 0x00800000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD_MASK 0x01000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE_MASK 0x0E000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH_MASK 0x30000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ_MASK 0x40000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT_MASK 0x80000000L
+//RDPCSTX2_RDPCSTX_PHY_CNTL6
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN__SHIFT 0x2
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN__SHIFT 0x6
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE__SHIFT 0x8
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN__SHIFT 0xa
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE__SHIFT 0xc
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN__SHIFT 0xe
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4__SHIFT 0x10
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE__SHIFT 0x11
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK__SHIFT 0x12
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN__SHIFT 0x13
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ__SHIFT 0x14
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_MASK 0x00000003L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_MASK 0x00000004L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_MASK 0x00000030L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_MASK 0x00000040L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_MASK 0x00000300L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_MASK 0x00000400L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_MASK 0x00003000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_MASK 0x00004000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_MASK 0x00010000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_MASK 0x00020000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_MASK 0x00040000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_MASK 0x00080000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_MASK 0x00100000L
+//RDPCSTX2_RDPCSTX_PHY_CNTL7
+#define RDPCSTX2_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT__SHIFT 0x10
+#define RDPCSTX2_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN_MASK 0x0000FFFFL
+#define RDPCSTX2_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT_MASK 0xFFFF0000L
+//RDPCSTX2_RDPCSTX_PHY_CNTL8
+#define RDPCSTX2_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK_MASK 0x000FFFFFL
+//RDPCSTX2_RDPCSTX_PHY_CNTL9
+#define RDPCSTX2_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD__SHIFT 0x18
+#define RDPCSTX2_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE_MASK 0x001FFFFFL
+#define RDPCSTX2_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD_MASK 0x01000000L
+//RDPCSTX2_RDPCSTX_PHY_CNTL10
+#define RDPCSTX2_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM_MASK 0x0000FFFFL
+//RDPCSTX2_RDPCSTX_PHY_CNTL11
+#define RDPCSTX2_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV__SHIFT 0x10
+#define RDPCSTX2_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV__SHIFT 0x14
+#define RDPCSTX2_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV__SHIFT 0x18
+#define RDPCSTX2_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER_MASK 0x0000FFF0L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV_MASK 0x00070000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV_MASK 0x00700000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV_MASK 0x03000000L
+//RDPCSTX2_RDPCSTX_PHY_CNTL12
+#define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN__SHIFT 0x2
+#define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE__SHIFT 0x7
+#define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN__SHIFT 0x8
+#define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN_MASK 0x00000001L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN_MASK 0x00000004L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV_MASK 0x00000070L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE_MASK 0x00000080L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN_MASK 0x00000100L
+//RDPCSTX2_RDPCSTX_PHY_CNTL13
+#define RDPCSTX2_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER__SHIFT 0x14
+#define RDPCSTX2_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN__SHIFT 0x1c
+#define RDPCSTX2_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN__SHIFT 0x1d
+#define RDPCSTX2_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE__SHIFT 0x1e
+#define RDPCSTX2_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER_MASK 0x0FF00000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN_MASK 0x10000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN_MASK 0x20000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE_MASK 0x40000000L
+//RDPCSTX2_RDPCSTX_PHY_CNTL14
+#define RDPCSTX2_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN__SHIFT 0x18
+#define RDPCSTX2_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN__SHIFT 0x1c
+#define RDPCSTX2_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE_MASK 0x00000001L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN_MASK 0x01000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN_MASK 0x10000000L
+//RDPCSTX2_RDPCSTX_PHY_FUSE0
+#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE__SHIFT 0x6
+#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST__SHIFT 0xc
+#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I__SHIFT 0x12
+#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO__SHIFT 0x14
+#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I_MASK 0x000C0000L
+#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO_MASK 0x00300000L
+//RDPCSTX2_RDPCSTX_PHY_FUSE1
+#define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE__SHIFT 0x6
+#define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST__SHIFT 0xc
+#define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT__SHIFT 0x12
+#define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP__SHIFT 0x19
+#define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT_MASK 0x01FC0000L
+#define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP_MASK 0xFE000000L
+//RDPCSTX2_RDPCSTX_PHY_FUSE2
+#define RDPCSTX2_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE__SHIFT 0x6
+#define RDPCSTX2_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST__SHIFT 0xc
+#define RDPCSTX2_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX2_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX2_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST_MASK 0x0003F000L
+//RDPCSTX2_RDPCSTX_PHY_FUSE3
+#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE__SHIFT 0x6
+#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST__SHIFT 0xc
+#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE__SHIFT 0x12
+#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE__SHIFT 0x18
+#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE_MASK 0x00FC0000L
+#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE_MASK 0x03000000L
+//RDPCSTX2_RDPCSTX_PHY_RX_LD_VAL
+#define RDPCSTX2_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL__SHIFT 0x8
+#define RDPCSTX2_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL_MASK 0x0000007FL
+#define RDPCSTX2_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL_MASK 0x001FFF00L
+//RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED__SHIFT 0x1
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED__SHIFT 0x2
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED__SHIFT 0x3
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED__SHIFT 0x5
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED__SHIFT 0x8
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED__SHIFT 0x9
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED__SHIFT 0xa
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED__SHIFT 0xb
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED__SHIFT 0xc
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED__SHIFT 0xd
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED__SHIFT 0x10
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED__SHIFT 0x11
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED__SHIFT 0x12
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED__SHIFT 0x13
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED__SHIFT 0x14
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED__SHIFT 0x15
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED__SHIFT 0x18
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED__SHIFT 0x19
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED__SHIFT 0x1a
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED__SHIFT 0x1b
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED__SHIFT 0x1c
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED__SHIFT 0x1d
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED_MASK 0x00000001L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED_MASK 0x00000002L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED_MASK 0x00000004L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED_MASK 0x00000008L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED_MASK 0x00000010L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED_MASK 0x00000020L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED_MASK 0x00000100L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED_MASK 0x00000200L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED_MASK 0x00000400L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED_MASK 0x00000800L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED_MASK 0x00001000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED_MASK 0x00002000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED_MASK 0x00010000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED_MASK 0x00020000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED_MASK 0x00040000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED_MASK 0x00080000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED_MASK 0x00100000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED_MASK 0x00200000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED_MASK 0x01000000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED_MASK 0x02000000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED_MASK 0x04000000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED_MASK 0x08000000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED_MASK 0x10000000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED_MASK 0x20000000L
+//RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED__SHIFT 0x2
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED__SHIFT 0x6
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED__SHIFT 0x8
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED__SHIFT 0xa
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED__SHIFT 0xc
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED__SHIFT 0xe
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED__SHIFT 0x10
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED__SHIFT 0x11
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED__SHIFT 0x12
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED__SHIFT 0x13
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED__SHIFT 0x14
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED_MASK 0x00000003L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED_MASK 0x00000004L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED_MASK 0x00000030L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED_MASK 0x00000040L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED_MASK 0x00000300L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED_MASK 0x00000400L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED_MASK 0x00003000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED_MASK 0x00004000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED_MASK 0x00010000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED_MASK 0x00020000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED_MASK 0x00040000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED_MASK 0x00080000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED_MASK 0x00100000L
+//RDPCSTX2_RDPCSTX_DPALT_CONTROL_REG
+#define RDPCSTX2_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE__SHIFT 0x8
+#define RDPCSTX2_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS_MASK 0x00000001L
+#define RDPCSTX2_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED_MASK 0x00000010L
+#define RDPCSTX2_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE_MASK 0x0000FF00L
+
+
+// addressBlock: dpcssys_dpcssys_cr2_dispdec
+//DPCSSYS_CR2_DPCSSYS_CR_ADDR
+#define DPCSSYS_CR2_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0
+#define DPCSSYS_CR2_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL
+//DPCSSYS_CR2_DPCSSYS_CR_DATA
+#define DPCSSYS_CR2_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0
+#define DPCSSYS_CR2_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL
+
+
+// addressBlock: dpcssys_dpcs0_dpcstx3_dispdec
+//DPCSTX3_DPCSTX_TX_CLOCK_CNTL
+#define DPCSTX3_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS__SHIFT 0x0
+#define DPCSTX3_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN__SHIFT 0x1
+#define DPCSTX3_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON__SHIFT 0x2
+#define DPCSTX3_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0x3
+#define DPCSTX3_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS_MASK 0x00000001L
+#define DPCSTX3_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN_MASK 0x00000002L
+#define DPCSTX3_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON_MASK 0x00000004L
+#define DPCSTX3_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000008L
+//DPCSTX3_DPCSTX_TX_CNTL
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ__SHIFT 0xc
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING__SHIFT 0xd
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP__SHIFT 0xe
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT__SHIFT 0xf
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN__SHIFT 0x10
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START__SHIFT 0x11
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET__SHIFT 0x1f
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ_MASK 0x00001000L
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING_MASK 0x00002000L
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP_MASK 0x00004000L
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT_MASK 0x00008000L
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN_MASK 0x00010000L
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START_MASK 0x00020000L
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET_MASK 0x80000000L
+//DPCSTX3_DPCSTX_CBUS_CNTL
+#define DPCSTX3_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY__SHIFT 0x0
+#define DPCSTX3_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET__SHIFT 0x1f
+#define DPCSTX3_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY_MASK 0x000000FFL
+#define DPCSTX3_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET_MASK 0x80000000L
+//DPCSTX3_DPCSTX_INTERRUPT_CNTL
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW__SHIFT 0x0
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR__SHIFT 0x1
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK__SHIFT 0x4
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR__SHIFT 0x8
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR__SHIFT 0x9
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR__SHIFT 0xa
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR__SHIFT 0xb
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR__SHIFT 0xc
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK__SHIFT 0x10
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK__SHIFT 0x14
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR_MASK 0x00000002L
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK_MASK 0x00000010L
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR_MASK 0x00000100L
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR_MASK 0x00000200L
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR_MASK 0x00000400L
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR_MASK 0x00000800L
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR_MASK 0x00001000L
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK_MASK 0x00010000L
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK_MASK 0x00100000L
+//DPCSTX3_DPCSTX_PLL_UPDATE_ADDR
+#define DPCSTX3_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR__SHIFT 0x0
+#define DPCSTX3_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR_MASK 0x0003FFFFL
+//DPCSTX3_DPCSTX_PLL_UPDATE_DATA
+#define DPCSTX3_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA__SHIFT 0x0
+#define DPCSTX3_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA_MASK 0xFFFFFFFFL
+//DPCSTX3_DPCSTX_DEBUG_CONFIG
+#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN__SHIFT 0x0
+#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL__SHIFT 0x1
+#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL__SHIFT 0x4
+#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL__SHIFT 0x8
+#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS__SHIFT 0xe
+#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN__SHIFT 0x10
+#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN_MASK 0x00000001L
+#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL_MASK 0x0000000EL
+#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL_MASK 0x00000070L
+#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL_MASK 0x00000700L
+#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS_MASK 0x00004000L
+#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN_MASK 0x00010000L
+
+
+// addressBlock: dpcssys_dpcs0_rdpcstx3_dispdec
+//RDPCSTX3_RDPCSTX_CNTL
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN__SHIFT 0xc
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN__SHIFT 0xd
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN__SHIFT 0xe
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN__SHIFT 0xf
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN__SHIFT 0x10
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_START__SHIFT 0x11
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN__SHIFT 0x18
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN__SHIFT 0x19
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS__SHIFT 0x1a
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET__SHIFT 0x1f
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET_MASK 0x00000001L
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET_MASK 0x00000010L
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN_MASK 0x00001000L
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN_MASK 0x00002000L
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN_MASK 0x00004000L
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN_MASK 0x00008000L
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN_MASK 0x00010000L
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_START_MASK 0x00020000L
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN_MASK 0x01000000L
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN_MASK 0x02000000L
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS_MASK 0x04000000L
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET_MASK 0x80000000L
+//RDPCSTX3_RDPCSTX_CLOCK_CNTL
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN__SHIFT 0x5
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN__SHIFT 0x6
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN__SHIFT 0x7
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS__SHIFT 0x8
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN__SHIFT 0x9
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0xa
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS__SHIFT 0xc
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN__SHIFT 0xd
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON__SHIFT 0xe
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS__SHIFT 0x10
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN_MASK 0x00000001L
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN_MASK 0x00000010L
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN_MASK 0x00000020L
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN_MASK 0x00000040L
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN_MASK 0x00000080L
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS_MASK 0x00000100L
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN_MASK 0x00000200L
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000400L
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS_MASK 0x00001000L
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN_MASK 0x00002000L
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON_MASK 0x00004000L
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS_MASK 0x00010000L
+//RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE__SHIFT 0x1
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE__SHIFT 0x2
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR__SHIFT 0x5
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR__SHIFT 0x6
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR__SHIFT 0x7
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR__SHIFT 0x8
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR__SHIFT 0x9
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR__SHIFT 0xa
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR__SHIFT 0xc
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK__SHIFT 0x10
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK__SHIFT 0x11
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK__SHIFT 0x12
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK__SHIFT 0x14
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK 0x00000002L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK 0x00000004L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR_MASK 0x00000010L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR_MASK 0x00000020L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR_MASK 0x00000040L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR_MASK 0x00000080L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR_MASK 0x00000100L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR_MASK 0x00000200L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR_MASK 0x00000400L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR_MASK 0x00001000L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK_MASK 0x00010000L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK_MASK 0x00020000L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK_MASK 0x00040000L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK_MASK 0x00100000L
+//RDPCSTX3_RDPCSTX_PLL_UPDATE_DATA
+#define RDPCSTX3_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA_MASK 0x00000001L
+//RDPCSTX3_RDPCS_TX_CR_ADDR
+#define RDPCSTX3_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0
+#define RDPCSTX3_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL
+//RDPCSTX3_RDPCS_TX_CR_DATA
+#define RDPCSTX3_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0
+#define RDPCSTX3_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL
+//RDPCSTX3_RDPCS_TX_SRAM_CNTL
+#define RDPCSTX3_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS__SHIFT 0x14
+#define RDPCSTX3_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE__SHIFT 0x18
+#define RDPCSTX3_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE__SHIFT 0x1c
+#define RDPCSTX3_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS_MASK 0x00100000L
+#define RDPCSTX3_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE_MASK 0x03000000L
+#define RDPCSTX3_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE_MASK 0x30000000L
+//RDPCSTX3_RDPCSTX_MEM_POWER_CTRL
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES__SHIFT 0xc
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1__SHIFT 0x1a
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2__SHIFT 0x1b
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1__SHIFT 0x1c
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2__SHIFT 0x1d
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM__SHIFT 0x1e
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES_MASK 0x00000FFFL
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES_MASK 0x03FFF000L
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1_MASK 0x04000000L
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2_MASK 0x08000000L
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1_MASK 0x10000000L
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2_MASK 0x20000000L
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM_MASK 0x40000000L
+//RDPCSTX3_RDPCSTX_MEM_POWER_CTRL2
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO__SHIFT 0x2
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF_MASK 0x00000003L
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO_MASK 0x00000004L
+//RDPCSTX3_RDPCSTX_SCRATCH
+#define RDPCSTX3_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH_MASK 0xFFFFFFFFL
+//RDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE__SHIFT 0x8
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG_MASK 0x00000001L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS_MASK 0x00000010L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE_MASK 0x0000FF00L
+//RDPCSTX3_RDPCSTX_DEBUG_CONFIG
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP__SHIFT 0x7
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK__SHIFT 0x8
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE__SHIFT 0xf
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX__SHIFT 0x10
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT__SHIFT 0x18
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN_MASK 0x00000001L
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT_MASK 0x00000070L
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP_MASK 0x00000080L
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK_MASK 0x00001F00L
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE_MASK 0x00008000L
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX_MASK 0x00FF0000L
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MASK 0xFF000000L
+//RDPCSTX3_RDPCSTX_PHY_CNTL0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET__SHIFT 0x1
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N__SHIFT 0x2
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN__SHIFT 0x3
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE__SHIFT 0x8
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE__SHIFT 0x9
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL__SHIFT 0xe
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ__SHIFT 0x11
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK__SHIFT 0x12
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL__SHIFT 0x14
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL__SHIFT 0x15
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN__SHIFT 0x18
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT__SHIFT 0x19
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE__SHIFT 0x1c
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE__SHIFT 0x1d
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS__SHIFT 0x1f
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET_MASK 0x00000001L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET_MASK 0x00000002L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N_MASK 0x00000004L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN_MASK 0x00000008L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT_MASK 0x00000030L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE_MASK 0x00000100L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE_MASK 0x00003E00L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL_MASK 0x0001C000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ_MASK 0x00020000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK_MASK 0x00040000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL_MASK 0x00100000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL_MASK 0x00200000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN_MASK 0x01000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT_MASK 0x02000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE_MASK 0x10000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE_MASK 0x20000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS_MASK 0x80000000L
+//RDPCSTX3_RDPCSTX_PHY_CNTL1
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN__SHIFT 0x1
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE__SHIFT 0x2
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN__SHIFT 0x3
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET__SHIFT 0x5
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN__SHIFT 0x6
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE__SHIFT 0x7
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN_MASK 0x00000001L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN_MASK 0x00000002L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE_MASK 0x00000004L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN_MASK 0x00000008L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE_MASK 0x00000010L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET_MASK 0x00000020L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN_MASK 0x00000040L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE_MASK 0x00000080L
+//RDPCSTX3_RDPCSTX_PHY_CNTL2
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR__SHIFT 0x3
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN__SHIFT 0x5
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN__SHIFT 0x6
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN__SHIFT 0x7
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN__SHIFT 0x8
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN__SHIFT 0x9
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN__SHIFT 0xa
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN__SHIFT 0xb
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR_MASK 0x00000008L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN_MASK 0x00000010L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN_MASK 0x00000020L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN_MASK 0x00000040L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN_MASK 0x00000080L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN_MASK 0x00000100L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN_MASK 0x00000200L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN_MASK 0x00000400L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN_MASK 0x00000800L
+//RDPCSTX3_RDPCSTX_PHY_CNTL3
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE__SHIFT 0x1
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY__SHIFT 0x2
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN__SHIFT 0x3
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK__SHIFT 0x5
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET__SHIFT 0x8
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE__SHIFT 0x9
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY__SHIFT 0xa
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN__SHIFT 0xb
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ__SHIFT 0xc
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK__SHIFT 0xd
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET__SHIFT 0x10
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE__SHIFT 0x11
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY__SHIFT 0x12
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN__SHIFT 0x13
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ__SHIFT 0x14
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK__SHIFT 0x15
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET__SHIFT 0x18
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE__SHIFT 0x19
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY__SHIFT 0x1a
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN__SHIFT 0x1b
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ__SHIFT 0x1c
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK__SHIFT 0x1d
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_MASK 0x00000001L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_MASK 0x00000002L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_MASK 0x00000004L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_MASK 0x00000008L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_MASK 0x00000010L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_MASK 0x00000020L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_MASK 0x00000100L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_MASK 0x00000200L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_MASK 0x00000400L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_MASK 0x00000800L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_MASK 0x00001000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_MASK 0x00002000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_MASK 0x00010000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_MASK 0x00020000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_MASK 0x00040000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_MASK 0x00080000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_MASK 0x00100000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_MASK 0x00200000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_MASK 0x01000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_MASK 0x02000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_MASK 0x04000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_MASK 0x08000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_MASK 0x10000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_MASK 0x20000000L
+//RDPCSTX3_RDPCSTX_PHY_CNTL4
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC__SHIFT 0x6
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN__SHIFT 0x7
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL__SHIFT 0x8
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT__SHIFT 0xc
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC__SHIFT 0xe
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN__SHIFT 0xf
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL__SHIFT 0x10
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT__SHIFT 0x14
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC__SHIFT 0x16
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN__SHIFT 0x17
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL__SHIFT 0x18
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT__SHIFT 0x1c
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC__SHIFT 0x1e
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN__SHIFT 0x1f
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL_MASK 0x00000007L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT_MASK 0x00000010L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC_MASK 0x00000040L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN_MASK 0x00000080L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL_MASK 0x00000700L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT_MASK 0x00001000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC_MASK 0x00004000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN_MASK 0x00008000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL_MASK 0x00070000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT_MASK 0x00100000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC_MASK 0x00400000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN_MASK 0x00800000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL_MASK 0x07000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT_MASK 0x10000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC_MASK 0x40000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN_MASK 0x80000000L
+//RDPCSTX3_RDPCSTX_PHY_CNTL5
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE__SHIFT 0x1
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ__SHIFT 0x6
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT__SHIFT 0x7
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD__SHIFT 0x8
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE__SHIFT 0x9
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH__SHIFT 0xc
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ__SHIFT 0xe
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT__SHIFT 0xf
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD__SHIFT 0x10
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE__SHIFT 0x11
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH__SHIFT 0x14
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ__SHIFT 0x16
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT__SHIFT 0x17
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD__SHIFT 0x18
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE__SHIFT 0x19
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH__SHIFT 0x1c
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ__SHIFT 0x1e
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT__SHIFT 0x1f
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD_MASK 0x00000001L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE_MASK 0x0000000EL
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH_MASK 0x00000030L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ_MASK 0x00000040L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT_MASK 0x00000080L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD_MASK 0x00000100L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE_MASK 0x00000E00L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH_MASK 0x00003000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ_MASK 0x00004000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT_MASK 0x00008000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD_MASK 0x00010000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE_MASK 0x000E0000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH_MASK 0x00300000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ_MASK 0x00400000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT_MASK 0x00800000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD_MASK 0x01000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE_MASK 0x0E000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH_MASK 0x30000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ_MASK 0x40000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT_MASK 0x80000000L
+//RDPCSTX3_RDPCSTX_PHY_CNTL6
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN__SHIFT 0x2
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN__SHIFT 0x6
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE__SHIFT 0x8
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN__SHIFT 0xa
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE__SHIFT 0xc
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN__SHIFT 0xe
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4__SHIFT 0x10
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE__SHIFT 0x11
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK__SHIFT 0x12
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN__SHIFT 0x13
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ__SHIFT 0x14
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_MASK 0x00000003L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_MASK 0x00000004L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_MASK 0x00000030L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_MASK 0x00000040L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_MASK 0x00000300L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_MASK 0x00000400L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_MASK 0x00003000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_MASK 0x00004000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_MASK 0x00010000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_MASK 0x00020000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_MASK 0x00040000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_MASK 0x00080000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_MASK 0x00100000L
+//RDPCSTX3_RDPCSTX_PHY_CNTL7
+#define RDPCSTX3_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT__SHIFT 0x10
+#define RDPCSTX3_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN_MASK 0x0000FFFFL
+#define RDPCSTX3_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT_MASK 0xFFFF0000L
+//RDPCSTX3_RDPCSTX_PHY_CNTL8
+#define RDPCSTX3_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK_MASK 0x000FFFFFL
+//RDPCSTX3_RDPCSTX_PHY_CNTL9
+#define RDPCSTX3_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD__SHIFT 0x18
+#define RDPCSTX3_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE_MASK 0x001FFFFFL
+#define RDPCSTX3_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD_MASK 0x01000000L
+//RDPCSTX3_RDPCSTX_PHY_CNTL10
+#define RDPCSTX3_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM_MASK 0x0000FFFFL
+//RDPCSTX3_RDPCSTX_PHY_CNTL11
+#define RDPCSTX3_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV__SHIFT 0x10
+#define RDPCSTX3_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV__SHIFT 0x14
+#define RDPCSTX3_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV__SHIFT 0x18
+#define RDPCSTX3_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER_MASK 0x0000FFF0L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV_MASK 0x00070000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV_MASK 0x00700000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV_MASK 0x03000000L
+//RDPCSTX3_RDPCSTX_PHY_CNTL12
+#define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN__SHIFT 0x2
+#define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE__SHIFT 0x7
+#define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN__SHIFT 0x8
+#define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN_MASK 0x00000001L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN_MASK 0x00000004L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV_MASK 0x00000070L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE_MASK 0x00000080L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN_MASK 0x00000100L
+//RDPCSTX3_RDPCSTX_PHY_CNTL13
+#define RDPCSTX3_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER__SHIFT 0x14
+#define RDPCSTX3_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN__SHIFT 0x1c
+#define RDPCSTX3_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN__SHIFT 0x1d
+#define RDPCSTX3_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE__SHIFT 0x1e
+#define RDPCSTX3_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER_MASK 0x0FF00000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN_MASK 0x10000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN_MASK 0x20000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE_MASK 0x40000000L
+//RDPCSTX3_RDPCSTX_PHY_CNTL14
+#define RDPCSTX3_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN__SHIFT 0x18
+#define RDPCSTX3_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN__SHIFT 0x1c
+#define RDPCSTX3_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE_MASK 0x00000001L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN_MASK 0x01000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN_MASK 0x10000000L
+//RDPCSTX3_RDPCSTX_PHY_FUSE0
+#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE__SHIFT 0x6
+#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST__SHIFT 0xc
+#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I__SHIFT 0x12
+#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO__SHIFT 0x14
+#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I_MASK 0x000C0000L
+#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO_MASK 0x00300000L
+//RDPCSTX3_RDPCSTX_PHY_FUSE1
+#define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE__SHIFT 0x6
+#define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST__SHIFT 0xc
+#define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT__SHIFT 0x12
+#define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP__SHIFT 0x19
+#define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT_MASK 0x01FC0000L
+#define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP_MASK 0xFE000000L
+//RDPCSTX3_RDPCSTX_PHY_FUSE2
+#define RDPCSTX3_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE__SHIFT 0x6
+#define RDPCSTX3_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST__SHIFT 0xc
+#define RDPCSTX3_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX3_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX3_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST_MASK 0x0003F000L
+//RDPCSTX3_RDPCSTX_PHY_FUSE3
+#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE__SHIFT 0x6
+#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST__SHIFT 0xc
+#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE__SHIFT 0x12
+#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE__SHIFT 0x18
+#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE_MASK 0x00FC0000L
+#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE_MASK 0x03000000L
+//RDPCSTX3_RDPCSTX_PHY_RX_LD_VAL
+#define RDPCSTX3_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL__SHIFT 0x8
+#define RDPCSTX3_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL_MASK 0x0000007FL
+#define RDPCSTX3_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL_MASK 0x001FFF00L
+//RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED__SHIFT 0x1
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED__SHIFT 0x2
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED__SHIFT 0x3
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED__SHIFT 0x5
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED__SHIFT 0x8
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED__SHIFT 0x9
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED__SHIFT 0xa
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED__SHIFT 0xb
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED__SHIFT 0xc
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED__SHIFT 0xd
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED__SHIFT 0x10
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED__SHIFT 0x11
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED__SHIFT 0x12
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED__SHIFT 0x13
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED__SHIFT 0x14
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED__SHIFT 0x15
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED__SHIFT 0x18
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED__SHIFT 0x19
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED__SHIFT 0x1a
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED__SHIFT 0x1b
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED__SHIFT 0x1c
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED__SHIFT 0x1d
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED_MASK 0x00000001L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED_MASK 0x00000002L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED_MASK 0x00000004L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED_MASK 0x00000008L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED_MASK 0x00000010L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED_MASK 0x00000020L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED_MASK 0x00000100L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED_MASK 0x00000200L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED_MASK 0x00000400L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED_MASK 0x00000800L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED_MASK 0x00001000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED_MASK 0x00002000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED_MASK 0x00010000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED_MASK 0x00020000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED_MASK 0x00040000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED_MASK 0x00080000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED_MASK 0x00100000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED_MASK 0x00200000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED_MASK 0x01000000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED_MASK 0x02000000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED_MASK 0x04000000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED_MASK 0x08000000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED_MASK 0x10000000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED_MASK 0x20000000L
+//RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED__SHIFT 0x2
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED__SHIFT 0x6
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED__SHIFT 0x8
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED__SHIFT 0xa
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED__SHIFT 0xc
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED__SHIFT 0xe
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED__SHIFT 0x10
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED__SHIFT 0x11
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED__SHIFT 0x12
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED__SHIFT 0x13
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED__SHIFT 0x14
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED_MASK 0x00000003L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED_MASK 0x00000004L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED_MASK 0x00000030L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED_MASK 0x00000040L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED_MASK 0x00000300L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED_MASK 0x00000400L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED_MASK 0x00003000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED_MASK 0x00004000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED_MASK 0x00010000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED_MASK 0x00020000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED_MASK 0x00040000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED_MASK 0x00080000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED_MASK 0x00100000L
+//RDPCSTX3_RDPCSTX_DPALT_CONTROL_REG
+#define RDPCSTX3_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE__SHIFT 0x8
+#define RDPCSTX3_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS_MASK 0x00000001L
+#define RDPCSTX3_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED_MASK 0x00000010L
+#define RDPCSTX3_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE_MASK 0x0000FF00L
+
+
+// addressBlock: dpcssys_dpcssys_cr3_dispdec
+//DPCSSYS_CR3_DPCSSYS_CR_ADDR
+#define DPCSSYS_CR3_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0
+#define DPCSSYS_CR3_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL
+//DPCSSYS_CR3_DPCSSYS_CR_DATA
+#define DPCSSYS_CR3_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0
+#define DPCSSYS_CR3_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL
+
+
+// addressBlock: dpcssys_dpcs0_dpcsrx_dispdec
+//DPCSRX_PHY_CNTL
+#define DPCSRX_PHY_CNTL__DPCS_PHY_RESET__SHIFT 0x0
+#define DPCSRX_PHY_CNTL__DPCS_PHY_RESET_MASK 0x00000001L
+//DPCSRX_RX_CLOCK_CNTL
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX_GATE_DIS__SHIFT 0x0
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX_EN__SHIFT 0x1
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX_SEL__SHIFT 0x2
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX_CLOCK_ON__SHIFT 0x4
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX0_GATE_DIS__SHIFT 0x10
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX0_EN__SHIFT 0x11
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX0_CLOCK_ON__SHIFT 0x12
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX1_GATE_DIS__SHIFT 0x14
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX1_EN__SHIFT 0x15
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX1_CLOCK_ON__SHIFT 0x16
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX2_GATE_DIS__SHIFT 0x18
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX2_EN__SHIFT 0x19
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX2_CLOCK_ON__SHIFT 0x1a
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX3_GATE_DIS__SHIFT 0x1c
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX3_EN__SHIFT 0x1d
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX3_CLOCK_ON__SHIFT 0x1e
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX_GATE_DIS_MASK 0x00000001L
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX_EN_MASK 0x00000002L
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX_SEL_MASK 0x0000000CL
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX_CLOCK_ON_MASK 0x00000010L
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX0_GATE_DIS_MASK 0x00010000L
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX0_EN_MASK 0x00020000L
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX0_CLOCK_ON_MASK 0x00040000L
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX1_GATE_DIS_MASK 0x00100000L
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX1_EN_MASK 0x00200000L
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX1_CLOCK_ON_MASK 0x00400000L
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX2_GATE_DIS_MASK 0x01000000L
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX2_EN_MASK 0x02000000L
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX2_CLOCK_ON_MASK 0x04000000L
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX3_GATE_DIS_MASK 0x10000000L
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX3_EN_MASK 0x20000000L
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX3_CLOCK_ON_MASK 0x40000000L
+//DPCSRX_RX_CNTL
+#define DPCSRX_RX_CNTL__DPCS_RX_LANE0_EN__SHIFT 0x0
+#define DPCSRX_RX_CNTL__DPCS_RX_LANE1_EN__SHIFT 0x1
+#define DPCSRX_RX_CNTL__DPCS_RX_LANE2_EN__SHIFT 0x2
+#define DPCSRX_RX_CNTL__DPCS_RX_LANE3_EN__SHIFT 0x3
+#define DPCSRX_RX_CNTL__DPCS_RX_FIFO_EN__SHIFT 0x4
+#define DPCSRX_RX_CNTL__DPCS_RX_FIFO_START__SHIFT 0x5
+#define DPCSRX_RX_CNTL__DPCS_RX_FIFO_RD_START_DELAY__SHIFT 0x8
+#define DPCSRX_RX_CNTL__DPCS_RX_SOFT_RESET__SHIFT 0x1f
+#define DPCSRX_RX_CNTL__DPCS_RX_LANE0_EN_MASK 0x00000001L
+#define DPCSRX_RX_CNTL__DPCS_RX_LANE1_EN_MASK 0x00000002L
+#define DPCSRX_RX_CNTL__DPCS_RX_LANE2_EN_MASK 0x00000004L
+#define DPCSRX_RX_CNTL__DPCS_RX_LANE3_EN_MASK 0x00000008L
+#define DPCSRX_RX_CNTL__DPCS_RX_FIFO_EN_MASK 0x00000010L
+#define DPCSRX_RX_CNTL__DPCS_RX_FIFO_START_MASK 0x00000020L
+#define DPCSRX_RX_CNTL__DPCS_RX_FIFO_RD_START_DELAY_MASK 0x00000F00L
+#define DPCSRX_RX_CNTL__DPCS_RX_SOFT_RESET_MASK 0x80000000L
+//DPCSRX_CBUS_CNTL
+#define DPCSRX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY__SHIFT 0x0
+#define DPCSRX_CBUS_CNTL__DPCS_PHY_MASTER_REQ_DELAY__SHIFT 0x8
+#define DPCSRX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET__SHIFT 0x1f
+#define DPCSRX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY_MASK 0x0000000FL
+#define DPCSRX_CBUS_CNTL__DPCS_PHY_MASTER_REQ_DELAY_MASK 0x0000FF00L
+#define DPCSRX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET_MASK 0x80000000L
+//DPCSRX_REG_ERROR_STATUS
+#define DPCSRX_REG_ERROR_STATUS__DPCS_REG_FIFO_OVERFLOW__SHIFT 0x0
+#define DPCSRX_REG_ERROR_STATUS__DPCS_REG_ERROR_CLR__SHIFT 0x1
+#define DPCSRX_REG_ERROR_STATUS__DPCS_REG_FIFO_ERROR_MASK__SHIFT 0x4
+#define DPCSRX_REG_ERROR_STATUS__DPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L
+#define DPCSRX_REG_ERROR_STATUS__DPCS_REG_ERROR_CLR_MASK 0x00000002L
+#define DPCSRX_REG_ERROR_STATUS__DPCS_REG_FIFO_ERROR_MASK_MASK 0x00000010L
+//DPCSRX_RX_ERROR_STATUS
+#define DPCSRX_RX_ERROR_STATUS__DPCS_RX0_FIFO_ERROR__SHIFT 0x0
+#define DPCSRX_RX_ERROR_STATUS__DPCS_RX1_FIFO_ERROR__SHIFT 0x1
+#define DPCSRX_RX_ERROR_STATUS__DPCS_RX2_FIFO_ERROR__SHIFT 0x2
+#define DPCSRX_RX_ERROR_STATUS__DPCS_RX3_FIFO_ERROR__SHIFT 0x3
+#define DPCSRX_RX_ERROR_STATUS__DPCS_RX_ERROR_CLR__SHIFT 0x8
+#define DPCSRX_RX_ERROR_STATUS__DPCS_RX_FIFO_ERROR_MASK__SHIFT 0xc
+#define DPCSRX_RX_ERROR_STATUS__DPCS_RX0_FIFO_ERROR_MASK 0x00000001L
+#define DPCSRX_RX_ERROR_STATUS__DPCS_RX1_FIFO_ERROR_MASK 0x00000002L
+#define DPCSRX_RX_ERROR_STATUS__DPCS_RX2_FIFO_ERROR_MASK 0x00000004L
+#define DPCSRX_RX_ERROR_STATUS__DPCS_RX3_FIFO_ERROR_MASK 0x00000008L
+#define DPCSRX_RX_ERROR_STATUS__DPCS_RX_ERROR_CLR_MASK 0x00000100L
+#define DPCSRX_RX_ERROR_STATUS__DPCS_RX_FIFO_ERROR_MASK_MASK 0x00001000L
+//DPCSRX_INDEX_MODE_ADDR
+#define DPCSRX_INDEX_MODE_ADDR__DPCS_INDEX_MODE_ADDR__SHIFT 0x0
+#define DPCSRX_INDEX_MODE_ADDR__DPCS_INDEX_MODE_ADDR_MASK 0x0003FFFFL
+//DPCSRX_INDEX_MODE_DATA
+#define DPCSRX_INDEX_MODE_DATA__DPCS_INDEX_MODE_DATA__SHIFT 0x0
+#define DPCSRX_INDEX_MODE_DATA__DPCS_INDEX_MODE_DATA_MASK 0xFFFFFFFFL
+//DPCSRX_DEBUG_CONFIG
+#define DPCSRX_DEBUG_CONFIG__DPCS_DBG_EN__SHIFT 0x0
+#define DPCSRX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL__SHIFT 0x1
+#define DPCSRX_DEBUG_CONFIG__DPCS_DBG_RX_SYMCLK_SEL__SHIFT 0x6
+#define DPCSRX_DEBUG_CONFIG__DPCS_DBG_BLOCK_SEL__SHIFT 0xb
+#define DPCSRX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS__SHIFT 0xe
+#define DPCSRX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN__SHIFT 0x10
+#define DPCSRX_DEBUG_CONFIG__DPCS_DBG_EN_MASK 0x00000001L
+#define DPCSRX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL_MASK 0x0000000EL
+#define DPCSRX_DEBUG_CONFIG__DPCS_DBG_RX_SYMCLK_SEL_MASK 0x000000C0L
+#define DPCSRX_DEBUG_CONFIG__DPCS_DBG_BLOCK_SEL_MASK 0x00003800L
+#define DPCSRX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS_MASK 0x00004000L
+#define DPCSRX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN_MASK 0x00010000L
+
+
+// addressBlock: dpcssys_dpcs0_dpcstx4_dispdec
+//DPCSTX4_DPCSTX_TX_CLOCK_CNTL
+#define DPCSTX4_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS__SHIFT 0x0
+#define DPCSTX4_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN__SHIFT 0x1
+#define DPCSTX4_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON__SHIFT 0x2
+#define DPCSTX4_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0x3
+#define DPCSTX4_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS_MASK 0x00000001L
+#define DPCSTX4_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN_MASK 0x00000002L
+#define DPCSTX4_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON_MASK 0x00000004L
+#define DPCSTX4_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000008L
+//DPCSTX4_DPCSTX_TX_CNTL
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ__SHIFT 0xc
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING__SHIFT 0xd
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP__SHIFT 0xe
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT__SHIFT 0xf
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN__SHIFT 0x10
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START__SHIFT 0x11
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET__SHIFT 0x1f
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ_MASK 0x00001000L
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING_MASK 0x00002000L
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP_MASK 0x00004000L
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT_MASK 0x00008000L
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN_MASK 0x00010000L
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START_MASK 0x00020000L
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET_MASK 0x80000000L
+//DPCSTX4_DPCSTX_CBUS_CNTL
+#define DPCSTX4_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY__SHIFT 0x0
+#define DPCSTX4_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET__SHIFT 0x1f
+#define DPCSTX4_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY_MASK 0x000000FFL
+#define DPCSTX4_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET_MASK 0x80000000L
+//DPCSTX4_DPCSTX_INTERRUPT_CNTL
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW__SHIFT 0x0
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR__SHIFT 0x1
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK__SHIFT 0x4
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR__SHIFT 0x8
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR__SHIFT 0x9
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR__SHIFT 0xa
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR__SHIFT 0xb
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR__SHIFT 0xc
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK__SHIFT 0x10
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK__SHIFT 0x14
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR_MASK 0x00000002L
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK_MASK 0x00000010L
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR_MASK 0x00000100L
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR_MASK 0x00000200L
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR_MASK 0x00000400L
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR_MASK 0x00000800L
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR_MASK 0x00001000L
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK_MASK 0x00010000L
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK_MASK 0x00100000L
+//DPCSTX4_DPCSTX_PLL_UPDATE_ADDR
+#define DPCSTX4_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR__SHIFT 0x0
+#define DPCSTX4_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR_MASK 0x0003FFFFL
+//DPCSTX4_DPCSTX_PLL_UPDATE_DATA
+#define DPCSTX4_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA__SHIFT 0x0
+#define DPCSTX4_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA_MASK 0xFFFFFFFFL
+//DPCSTX4_DPCSTX_DEBUG_CONFIG
+#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN__SHIFT 0x0
+#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL__SHIFT 0x1
+#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL__SHIFT 0x4
+#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL__SHIFT 0x8
+#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS__SHIFT 0xe
+#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN__SHIFT 0x10
+#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN_MASK 0x00000001L
+#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL_MASK 0x0000000EL
+#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL_MASK 0x00000070L
+#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL_MASK 0x00000700L
+#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS_MASK 0x00004000L
+#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN_MASK 0x00010000L
+
+
+// addressBlock: dpcssys_dpcs0_rdpcstx4_dispdec
+//RDPCSTX4_RDPCSTX_CNTL
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN__SHIFT 0xc
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN__SHIFT 0xd
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN__SHIFT 0xe
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN__SHIFT 0xf
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN__SHIFT 0x10
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_START__SHIFT 0x11
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN__SHIFT 0x18
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN__SHIFT 0x19
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS__SHIFT 0x1a
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET__SHIFT 0x1f
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET_MASK 0x00000001L
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET_MASK 0x00000010L
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN_MASK 0x00001000L
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN_MASK 0x00002000L
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN_MASK 0x00004000L
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN_MASK 0x00008000L
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN_MASK 0x00010000L
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_START_MASK 0x00020000L
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN_MASK 0x01000000L
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN_MASK 0x02000000L
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS_MASK 0x04000000L
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET_MASK 0x80000000L
+//RDPCSTX4_RDPCSTX_CLOCK_CNTL
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN__SHIFT 0x5
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN__SHIFT 0x6
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN__SHIFT 0x7
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS__SHIFT 0x8
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN__SHIFT 0x9
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0xa
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS__SHIFT 0xc
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN__SHIFT 0xd
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON__SHIFT 0xe
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS__SHIFT 0x10
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN_MASK 0x00000001L
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN_MASK 0x00000010L
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN_MASK 0x00000020L
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN_MASK 0x00000040L
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN_MASK 0x00000080L
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS_MASK 0x00000100L
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN_MASK 0x00000200L
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000400L
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS_MASK 0x00001000L
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN_MASK 0x00002000L
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON_MASK 0x00004000L
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS_MASK 0x00010000L
+//RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE__SHIFT 0x1
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE__SHIFT 0x2
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR__SHIFT 0x5
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR__SHIFT 0x6
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR__SHIFT 0x7
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR__SHIFT 0x8
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR__SHIFT 0x9
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR__SHIFT 0xa
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR__SHIFT 0xc
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK__SHIFT 0x10
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK__SHIFT 0x11
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK__SHIFT 0x12
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK__SHIFT 0x14
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK 0x00000002L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK 0x00000004L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR_MASK 0x00000010L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR_MASK 0x00000020L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR_MASK 0x00000040L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR_MASK 0x00000080L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR_MASK 0x00000100L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR_MASK 0x00000200L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR_MASK 0x00000400L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR_MASK 0x00001000L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK_MASK 0x00010000L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK_MASK 0x00020000L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK_MASK 0x00040000L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK_MASK 0x00100000L
+//RDPCSTX4_RDPCSTX_PLL_UPDATE_DATA
+#define RDPCSTX4_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA_MASK 0x00000001L
+//RDPCSTX4_RDPCS_TX_CR_ADDR
+#define RDPCSTX4_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0
+#define RDPCSTX4_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL
+//RDPCSTX4_RDPCS_TX_CR_DATA
+#define RDPCSTX4_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0
+#define RDPCSTX4_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL
+//RDPCSTX4_RDPCS_TX_SRAM_CNTL
+#define RDPCSTX4_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS__SHIFT 0x14
+#define RDPCSTX4_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE__SHIFT 0x18
+#define RDPCSTX4_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE__SHIFT 0x1c
+#define RDPCSTX4_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS_MASK 0x00100000L
+#define RDPCSTX4_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE_MASK 0x03000000L
+#define RDPCSTX4_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE_MASK 0x30000000L
+//RDPCSTX4_RDPCSTX_MEM_POWER_CTRL
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES__SHIFT 0xc
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1__SHIFT 0x1a
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2__SHIFT 0x1b
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1__SHIFT 0x1c
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2__SHIFT 0x1d
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM__SHIFT 0x1e
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES_MASK 0x00000FFFL
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES_MASK 0x03FFF000L
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1_MASK 0x04000000L
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2_MASK 0x08000000L
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1_MASK 0x10000000L
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2_MASK 0x20000000L
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM_MASK 0x40000000L
+//RDPCSTX4_RDPCSTX_MEM_POWER_CTRL2
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO__SHIFT 0x2
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF_MASK 0x00000003L
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO_MASK 0x00000004L
+//RDPCSTX4_RDPCSTX_SCRATCH
+#define RDPCSTX4_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH_MASK 0xFFFFFFFFL
+//RDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE__SHIFT 0x8
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG_MASK 0x00000001L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS_MASK 0x00000010L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE_MASK 0x0000FF00L
+//RDPCSTX4_RDPCSTX_DEBUG_CONFIG
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP__SHIFT 0x7
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK__SHIFT 0x8
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE__SHIFT 0xf
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX__SHIFT 0x10
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT__SHIFT 0x18
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN_MASK 0x00000001L
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT_MASK 0x00000070L
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP_MASK 0x00000080L
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK_MASK 0x00001F00L
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE_MASK 0x00008000L
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX_MASK 0x00FF0000L
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MASK 0xFF000000L
+//RDPCSTX4_RDPCSTX_PHY_CNTL0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET__SHIFT 0x1
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N__SHIFT 0x2
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN__SHIFT 0x3
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE__SHIFT 0x8
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE__SHIFT 0x9
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL__SHIFT 0xe
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ__SHIFT 0x11
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK__SHIFT 0x12
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL__SHIFT 0x14
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL__SHIFT 0x15
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN__SHIFT 0x18
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT__SHIFT 0x19
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE__SHIFT 0x1c
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE__SHIFT 0x1d
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS__SHIFT 0x1f
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET_MASK 0x00000001L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET_MASK 0x00000002L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N_MASK 0x00000004L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN_MASK 0x00000008L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT_MASK 0x00000030L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE_MASK 0x00000100L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE_MASK 0x00003E00L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL_MASK 0x0001C000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ_MASK 0x00020000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK_MASK 0x00040000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL_MASK 0x00100000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL_MASK 0x00200000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN_MASK 0x01000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT_MASK 0x02000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE_MASK 0x10000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE_MASK 0x20000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS_MASK 0x80000000L
+//RDPCSTX4_RDPCSTX_PHY_CNTL1
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN__SHIFT 0x1
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE__SHIFT 0x2
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN__SHIFT 0x3
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET__SHIFT 0x5
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN__SHIFT 0x6
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE__SHIFT 0x7
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN_MASK 0x00000001L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN_MASK 0x00000002L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE_MASK 0x00000004L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN_MASK 0x00000008L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE_MASK 0x00000010L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET_MASK 0x00000020L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN_MASK 0x00000040L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE_MASK 0x00000080L
+//RDPCSTX4_RDPCSTX_PHY_CNTL2
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR__SHIFT 0x3
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN__SHIFT 0x5
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN__SHIFT 0x6
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN__SHIFT 0x7
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN__SHIFT 0x8
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN__SHIFT 0x9
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN__SHIFT 0xa
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN__SHIFT 0xb
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR_MASK 0x00000008L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN_MASK 0x00000010L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN_MASK 0x00000020L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN_MASK 0x00000040L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN_MASK 0x00000080L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN_MASK 0x00000100L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN_MASK 0x00000200L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN_MASK 0x00000400L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN_MASK 0x00000800L
+//RDPCSTX4_RDPCSTX_PHY_CNTL3
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE__SHIFT 0x1
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY__SHIFT 0x2
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN__SHIFT 0x3
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK__SHIFT 0x5
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET__SHIFT 0x8
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE__SHIFT 0x9
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY__SHIFT 0xa
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN__SHIFT 0xb
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ__SHIFT 0xc
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK__SHIFT 0xd
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET__SHIFT 0x10
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE__SHIFT 0x11
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY__SHIFT 0x12
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN__SHIFT 0x13
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ__SHIFT 0x14
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK__SHIFT 0x15
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET__SHIFT 0x18
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE__SHIFT 0x19
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY__SHIFT 0x1a
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN__SHIFT 0x1b
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ__SHIFT 0x1c
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK__SHIFT 0x1d
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_MASK 0x00000001L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_MASK 0x00000002L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_MASK 0x00000004L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_MASK 0x00000008L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_MASK 0x00000010L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_MASK 0x00000020L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_MASK 0x00000100L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_MASK 0x00000200L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_MASK 0x00000400L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_MASK 0x00000800L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_MASK 0x00001000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_MASK 0x00002000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_MASK 0x00010000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_MASK 0x00020000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_MASK 0x00040000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_MASK 0x00080000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_MASK 0x00100000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_MASK 0x00200000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_MASK 0x01000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_MASK 0x02000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_MASK 0x04000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_MASK 0x08000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_MASK 0x10000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_MASK 0x20000000L
+//RDPCSTX4_RDPCSTX_PHY_CNTL4
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC__SHIFT 0x6
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN__SHIFT 0x7
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL__SHIFT 0x8
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT__SHIFT 0xc
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC__SHIFT 0xe
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN__SHIFT 0xf
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL__SHIFT 0x10
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT__SHIFT 0x14
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC__SHIFT 0x16
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN__SHIFT 0x17
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL__SHIFT 0x18
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT__SHIFT 0x1c
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC__SHIFT 0x1e
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN__SHIFT 0x1f
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL_MASK 0x00000007L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT_MASK 0x00000010L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC_MASK 0x00000040L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN_MASK 0x00000080L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL_MASK 0x00000700L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT_MASK 0x00001000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC_MASK 0x00004000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN_MASK 0x00008000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL_MASK 0x00070000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT_MASK 0x00100000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC_MASK 0x00400000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN_MASK 0x00800000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL_MASK 0x07000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT_MASK 0x10000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC_MASK 0x40000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN_MASK 0x80000000L
+//RDPCSTX4_RDPCSTX_PHY_CNTL5
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE__SHIFT 0x1
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ__SHIFT 0x6
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT__SHIFT 0x7
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD__SHIFT 0x8
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE__SHIFT 0x9
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH__SHIFT 0xc
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ__SHIFT 0xe
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT__SHIFT 0xf
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD__SHIFT 0x10
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE__SHIFT 0x11
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH__SHIFT 0x14
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ__SHIFT 0x16
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT__SHIFT 0x17
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD__SHIFT 0x18
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE__SHIFT 0x19
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH__SHIFT 0x1c
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ__SHIFT 0x1e
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT__SHIFT 0x1f
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD_MASK 0x00000001L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE_MASK 0x0000000EL
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH_MASK 0x00000030L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ_MASK 0x00000040L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT_MASK 0x00000080L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD_MASK 0x00000100L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE_MASK 0x00000E00L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH_MASK 0x00003000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ_MASK 0x00004000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT_MASK 0x00008000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD_MASK 0x00010000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE_MASK 0x000E0000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH_MASK 0x00300000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ_MASK 0x00400000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT_MASK 0x00800000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD_MASK 0x01000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE_MASK 0x0E000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH_MASK 0x30000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ_MASK 0x40000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT_MASK 0x80000000L
+//RDPCSTX4_RDPCSTX_PHY_CNTL6
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN__SHIFT 0x2
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN__SHIFT 0x6
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE__SHIFT 0x8
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN__SHIFT 0xa
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE__SHIFT 0xc
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN__SHIFT 0xe
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4__SHIFT 0x10
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE__SHIFT 0x11
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK__SHIFT 0x12
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN__SHIFT 0x13
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ__SHIFT 0x14
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_MASK 0x00000003L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_MASK 0x00000004L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_MASK 0x00000030L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_MASK 0x00000040L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_MASK 0x00000300L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_MASK 0x00000400L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_MASK 0x00003000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_MASK 0x00004000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_MASK 0x00010000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_MASK 0x00020000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_MASK 0x00040000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_MASK 0x00080000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_MASK 0x00100000L
+//RDPCSTX4_RDPCSTX_PHY_CNTL7
+#define RDPCSTX4_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT__SHIFT 0x10
+#define RDPCSTX4_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN_MASK 0x0000FFFFL
+#define RDPCSTX4_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT_MASK 0xFFFF0000L
+//RDPCSTX4_RDPCSTX_PHY_CNTL8
+#define RDPCSTX4_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK_MASK 0x000FFFFFL
+//RDPCSTX4_RDPCSTX_PHY_CNTL9
+#define RDPCSTX4_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD__SHIFT 0x18
+#define RDPCSTX4_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE_MASK 0x001FFFFFL
+#define RDPCSTX4_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD_MASK 0x01000000L
+//RDPCSTX4_RDPCSTX_PHY_CNTL10
+#define RDPCSTX4_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM_MASK 0x0000FFFFL
+//RDPCSTX4_RDPCSTX_PHY_CNTL11
+#define RDPCSTX4_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV__SHIFT 0x10
+#define RDPCSTX4_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV__SHIFT 0x14
+#define RDPCSTX4_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV__SHIFT 0x18
+#define RDPCSTX4_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER_MASK 0x0000FFF0L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV_MASK 0x00070000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV_MASK 0x00700000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV_MASK 0x03000000L
+//RDPCSTX4_RDPCSTX_PHY_CNTL12
+#define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN__SHIFT 0x2
+#define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE__SHIFT 0x7
+#define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN__SHIFT 0x8
+#define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN_MASK 0x00000001L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN_MASK 0x00000004L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV_MASK 0x00000070L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE_MASK 0x00000080L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN_MASK 0x00000100L
+//RDPCSTX4_RDPCSTX_PHY_CNTL13
+#define RDPCSTX4_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER__SHIFT 0x14
+#define RDPCSTX4_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN__SHIFT 0x1c
+#define RDPCSTX4_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN__SHIFT 0x1d
+#define RDPCSTX4_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE__SHIFT 0x1e
+#define RDPCSTX4_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER_MASK 0x0FF00000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN_MASK 0x10000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN_MASK 0x20000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE_MASK 0x40000000L
+//RDPCSTX4_RDPCSTX_PHY_CNTL14
+#define RDPCSTX4_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN__SHIFT 0x18
+#define RDPCSTX4_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN__SHIFT 0x1c
+#define RDPCSTX4_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE_MASK 0x00000001L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN_MASK 0x01000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN_MASK 0x10000000L
+//RDPCSTX4_RDPCSTX_PHY_FUSE0
+#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE__SHIFT 0x6
+#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST__SHIFT 0xc
+#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I__SHIFT 0x12
+#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO__SHIFT 0x14
+#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I_MASK 0x000C0000L
+#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO_MASK 0x00300000L
+//RDPCSTX4_RDPCSTX_PHY_FUSE1
+#define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE__SHIFT 0x6
+#define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST__SHIFT 0xc
+#define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT__SHIFT 0x12
+#define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP__SHIFT 0x19
+#define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT_MASK 0x01FC0000L
+#define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP_MASK 0xFE000000L
+//RDPCSTX4_RDPCSTX_PHY_FUSE2
+#define RDPCSTX4_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE__SHIFT 0x6
+#define RDPCSTX4_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST__SHIFT 0xc
+#define RDPCSTX4_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX4_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX4_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST_MASK 0x0003F000L
+//RDPCSTX4_RDPCSTX_PHY_FUSE3
+#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE__SHIFT 0x6
+#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST__SHIFT 0xc
+#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE__SHIFT 0x12
+#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE__SHIFT 0x18
+#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE_MASK 0x00FC0000L
+#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE_MASK 0x03000000L
+//RDPCSTX4_RDPCSTX_PHY_RX_LD_VAL
+#define RDPCSTX4_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL__SHIFT 0x8
+#define RDPCSTX4_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL_MASK 0x0000007FL
+#define RDPCSTX4_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL_MASK 0x001FFF00L
+//RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED__SHIFT 0x1
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED__SHIFT 0x2
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED__SHIFT 0x3
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED__SHIFT 0x5
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED__SHIFT 0x8
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED__SHIFT 0x9
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED__SHIFT 0xa
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED__SHIFT 0xb
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED__SHIFT 0xc
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED__SHIFT 0xd
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED__SHIFT 0x10
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED__SHIFT 0x11
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED__SHIFT 0x12
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED__SHIFT 0x13
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED__SHIFT 0x14
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED__SHIFT 0x15
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED__SHIFT 0x18
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED__SHIFT 0x19
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED__SHIFT 0x1a
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED__SHIFT 0x1b
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED__SHIFT 0x1c
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED__SHIFT 0x1d
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED_MASK 0x00000001L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED_MASK 0x00000002L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED_MASK 0x00000004L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED_MASK 0x00000008L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED_MASK 0x00000010L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED_MASK 0x00000020L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED_MASK 0x00000100L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED_MASK 0x00000200L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED_MASK 0x00000400L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED_MASK 0x00000800L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED_MASK 0x00001000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED_MASK 0x00002000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED_MASK 0x00010000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED_MASK 0x00020000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED_MASK 0x00040000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED_MASK 0x00080000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED_MASK 0x00100000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED_MASK 0x00200000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED_MASK 0x01000000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED_MASK 0x02000000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED_MASK 0x04000000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED_MASK 0x08000000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED_MASK 0x10000000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED_MASK 0x20000000L
+//RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED__SHIFT 0x2
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED__SHIFT 0x6
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED__SHIFT 0x8
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED__SHIFT 0xa
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED__SHIFT 0xc
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED__SHIFT 0xe
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED__SHIFT 0x10
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED__SHIFT 0x11
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED__SHIFT 0x12
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED__SHIFT 0x13
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED__SHIFT 0x14
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED_MASK 0x00000003L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED_MASK 0x00000004L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED_MASK 0x00000030L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED_MASK 0x00000040L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED_MASK 0x00000300L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED_MASK 0x00000400L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED_MASK 0x00003000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED_MASK 0x00004000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED_MASK 0x00010000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED_MASK 0x00020000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED_MASK 0x00040000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED_MASK 0x00080000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED_MASK 0x00100000L
+//RDPCSTX4_RDPCSTX_DPALT_CONTROL_REG
+#define RDPCSTX4_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE__SHIFT 0x8
+#define RDPCSTX4_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS_MASK 0x00000001L
+#define RDPCSTX4_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED_MASK 0x00000010L
+#define RDPCSTX4_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE_MASK 0x0000FF00L
+
+
+// addressBlock: dpcssys_dpcssys_cr4_dispdec
+//DPCSSYS_CR4_DPCSSYS_CR_ADDR
+#define DPCSSYS_CR4_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0
+#define DPCSSYS_CR4_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL
+//DPCSSYS_CR4_DPCSSYS_CR_DATA
+#define DPCSSYS_CR4_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0
+#define DPCSSYS_CR4_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL
+
+
+// addressBlock: dpcssys_dpcs0_dpcstx5_dispdec
+//DPCSTX5_DPCSTX_TX_CLOCK_CNTL
+#define DPCSTX5_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS__SHIFT 0x0
+#define DPCSTX5_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN__SHIFT 0x1
+#define DPCSTX5_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON__SHIFT 0x2
+#define DPCSTX5_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0x3
+#define DPCSTX5_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS_MASK 0x00000001L
+#define DPCSTX5_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN_MASK 0x00000002L
+#define DPCSTX5_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON_MASK 0x00000004L
+#define DPCSTX5_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000008L
+//DPCSTX5_DPCSTX_TX_CNTL
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ__SHIFT 0xc
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING__SHIFT 0xd
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP__SHIFT 0xe
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT__SHIFT 0xf
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN__SHIFT 0x10
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START__SHIFT 0x11
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET__SHIFT 0x1f
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ_MASK 0x00001000L
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING_MASK 0x00002000L
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP_MASK 0x00004000L
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT_MASK 0x00008000L
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN_MASK 0x00010000L
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START_MASK 0x00020000L
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET_MASK 0x80000000L
+//DPCSTX5_DPCSTX_CBUS_CNTL
+#define DPCSTX5_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY__SHIFT 0x0
+#define DPCSTX5_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET__SHIFT 0x1f
+#define DPCSTX5_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY_MASK 0x000000FFL
+#define DPCSTX5_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET_MASK 0x80000000L
+//DPCSTX5_DPCSTX_INTERRUPT_CNTL
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW__SHIFT 0x0
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR__SHIFT 0x1
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK__SHIFT 0x4
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR__SHIFT 0x8
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR__SHIFT 0x9
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR__SHIFT 0xa
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR__SHIFT 0xb
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR__SHIFT 0xc
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK__SHIFT 0x10
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK__SHIFT 0x14
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR_MASK 0x00000002L
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK_MASK 0x00000010L
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR_MASK 0x00000100L
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR_MASK 0x00000200L
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR_MASK 0x00000400L
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR_MASK 0x00000800L
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR_MASK 0x00001000L
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK_MASK 0x00010000L
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK_MASK 0x00100000L
+//DPCSTX5_DPCSTX_PLL_UPDATE_ADDR
+#define DPCSTX5_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR__SHIFT 0x0
+#define DPCSTX5_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR_MASK 0x0003FFFFL
+//DPCSTX5_DPCSTX_PLL_UPDATE_DATA
+#define DPCSTX5_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA__SHIFT 0x0
+#define DPCSTX5_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA_MASK 0xFFFFFFFFL
+//DPCSTX5_DPCSTX_DEBUG_CONFIG
+#define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN__SHIFT 0x0
+#define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL__SHIFT 0x1
+#define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL__SHIFT 0x4
+#define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL__SHIFT 0x8
+#define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS__SHIFT 0xe
+#define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN__SHIFT 0x10
+#define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN_MASK 0x00000001L
+#define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL_MASK 0x0000000EL
+#define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL_MASK 0x00000070L
+#define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL_MASK 0x00000700L
+#define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS_MASK 0x00004000L
+#define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN_MASK 0x00010000L
+
+
+// addressBlock: dpcssys_dpcs0_rdpcstx5_dispdec
+//RDPCSTX5_RDPCSTX_CNTL
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET__SHIFT 0x4
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN__SHIFT 0xc
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN__SHIFT 0xd
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN__SHIFT 0xe
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN__SHIFT 0xf
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN__SHIFT 0x10
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_START__SHIFT 0x11
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN__SHIFT 0x18
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN__SHIFT 0x19
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS__SHIFT 0x1a
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET__SHIFT 0x1f
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET_MASK 0x00000001L
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET_MASK 0x00000010L
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN_MASK 0x00001000L
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN_MASK 0x00002000L
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN_MASK 0x00004000L
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN_MASK 0x00008000L
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN_MASK 0x00010000L
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_START_MASK 0x00020000L
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN_MASK 0x01000000L
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN_MASK 0x02000000L
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS_MASK 0x04000000L
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET_MASK 0x80000000L
+//RDPCSTX5_RDPCSTX_CLOCK_CNTL
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN__SHIFT 0x4
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN__SHIFT 0x5
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN__SHIFT 0x6
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN__SHIFT 0x7
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS__SHIFT 0x8
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN__SHIFT 0x9
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0xa
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS__SHIFT 0xc
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN__SHIFT 0xd
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON__SHIFT 0xe
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS__SHIFT 0x10
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN_MASK 0x00000001L
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN_MASK 0x00000010L
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN_MASK 0x00000020L
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN_MASK 0x00000040L
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN_MASK 0x00000080L
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS_MASK 0x00000100L
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN_MASK 0x00000200L
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000400L
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS_MASK 0x00001000L
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN_MASK 0x00002000L
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON_MASK 0x00004000L
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS_MASK 0x00010000L
+//RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE__SHIFT 0x1
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE__SHIFT 0x2
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR__SHIFT 0x4
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR__SHIFT 0x5
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR__SHIFT 0x6
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR__SHIFT 0x7
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR__SHIFT 0x8
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR__SHIFT 0x9
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR__SHIFT 0xa
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR__SHIFT 0xc
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK__SHIFT 0x10
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK__SHIFT 0x11
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK__SHIFT 0x12
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK__SHIFT 0x14
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK 0x00000002L
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK 0x00000004L
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR_MASK 0x00000010L
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR_MASK 0x00000020L
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR_MASK 0x00000040L
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR_MASK 0x00000080L
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR_MASK 0x00000100L
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR_MASK 0x00000200L
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR_MASK 0x00000400L
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR_MASK 0x00001000L
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK_MASK 0x00010000L
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK_MASK 0x00020000L
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK_MASK 0x00040000L
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK_MASK 0x00100000L
+//RDPCSTX5_RDPCSTX_PLL_UPDATE_DATA
+#define RDPCSTX5_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA_MASK 0x00000001L
+//RDPCSTX5_RDPCS_TX_CR_ADDR
+#define RDPCSTX5_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0
+#define RDPCSTX5_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL
+//RDPCSTX5_RDPCS_TX_CR_DATA
+#define RDPCSTX5_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0
+#define RDPCSTX5_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL
+//RDPCSTX5_RDPCS_TX_SRAM_CNTL
+#define RDPCSTX5_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS__SHIFT 0x14
+#define RDPCSTX5_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE__SHIFT 0x18
+#define RDPCSTX5_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE__SHIFT 0x1c
+#define RDPCSTX5_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS_MASK 0x00100000L
+#define RDPCSTX5_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE_MASK 0x03000000L
+#define RDPCSTX5_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE_MASK 0x30000000L
+//RDPCSTX5_RDPCSTX_MEM_POWER_CTRL
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES__SHIFT 0xc
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1__SHIFT 0x1a
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2__SHIFT 0x1b
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1__SHIFT 0x1c
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2__SHIFT 0x1d
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM__SHIFT 0x1e
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES_MASK 0x00000FFFL
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES_MASK 0x03FFF000L
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1_MASK 0x04000000L
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2_MASK 0x08000000L
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1_MASK 0x10000000L
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2_MASK 0x20000000L
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM_MASK 0x40000000L
+//RDPCSTX5_RDPCSTX_MEM_POWER_CTRL2
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO__SHIFT 0x2
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF_MASK 0x00000003L
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO_MASK 0x00000004L
+//RDPCSTX5_RDPCSTX_SCRATCH
+#define RDPCSTX5_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH_MASK 0xFFFFFFFFL
+//RDPCSTX5_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS__SHIFT 0x4
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE__SHIFT 0x8
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG_MASK 0x00000001L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS_MASK 0x00000010L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE_MASK 0x0000FF00L
+//RDPCSTX5_RDPCSTX_DEBUG_CONFIG
+#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT__SHIFT 0x4
+#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP__SHIFT 0x7
+#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK__SHIFT 0x8
+#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE__SHIFT 0xf
+#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX__SHIFT 0x10
+#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT__SHIFT 0x18
+#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN_MASK 0x00000001L
+#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT_MASK 0x00000070L
+#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP_MASK 0x00000080L
+#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK_MASK 0x00001F00L
+#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE_MASK 0x00008000L
+#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX_MASK 0x00FF0000L
+#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MASK 0xFF000000L
+//RDPCSTX5_RDPCSTX_PHY_CNTL0
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET__SHIFT 0x1
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N__SHIFT 0x2
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN__SHIFT 0x3
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT__SHIFT 0x4
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE__SHIFT 0x8
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE__SHIFT 0x9
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL__SHIFT 0xe
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ__SHIFT 0x11
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK__SHIFT 0x12
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL__SHIFT 0x14
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL__SHIFT 0x15
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN__SHIFT 0x18
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT__SHIFT 0x19
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE__SHIFT 0x1c
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE__SHIFT 0x1d
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS__SHIFT 0x1f
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET_MASK 0x00000001L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET_MASK 0x00000002L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N_MASK 0x00000004L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN_MASK 0x00000008L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT_MASK 0x00000030L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE_MASK 0x00000100L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE_MASK 0x00003E00L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL_MASK 0x0001C000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ_MASK 0x00020000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK_MASK 0x00040000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL_MASK 0x00100000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL_MASK 0x00200000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN_MASK 0x01000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT_MASK 0x02000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE_MASK 0x10000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE_MASK 0x20000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS_MASK 0x80000000L
+//RDPCSTX5_RDPCSTX_PHY_CNTL1
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN__SHIFT 0x1
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE__SHIFT 0x2
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN__SHIFT 0x3
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE__SHIFT 0x4
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET__SHIFT 0x5
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN__SHIFT 0x6
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE__SHIFT 0x7
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN_MASK 0x00000001L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN_MASK 0x00000002L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE_MASK 0x00000004L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN_MASK 0x00000008L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE_MASK 0x00000010L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET_MASK 0x00000020L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN_MASK 0x00000040L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE_MASK 0x00000080L
+//RDPCSTX5_RDPCSTX_PHY_CNTL2
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR__SHIFT 0x3
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN__SHIFT 0x4
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN__SHIFT 0x5
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN__SHIFT 0x6
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN__SHIFT 0x7
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN__SHIFT 0x8
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN__SHIFT 0x9
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN__SHIFT 0xa
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN__SHIFT 0xb
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR_MASK 0x00000008L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN_MASK 0x00000010L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN_MASK 0x00000020L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN_MASK 0x00000040L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN_MASK 0x00000080L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN_MASK 0x00000100L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN_MASK 0x00000200L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN_MASK 0x00000400L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN_MASK 0x00000800L
+//RDPCSTX5_RDPCSTX_PHY_CNTL3
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE__SHIFT 0x1
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY__SHIFT 0x2
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN__SHIFT 0x3
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ__SHIFT 0x4
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK__SHIFT 0x5
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET__SHIFT 0x8
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE__SHIFT 0x9
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY__SHIFT 0xa
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN__SHIFT 0xb
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ__SHIFT 0xc
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK__SHIFT 0xd
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET__SHIFT 0x10
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE__SHIFT 0x11
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY__SHIFT 0x12
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN__SHIFT 0x13
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ__SHIFT 0x14
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK__SHIFT 0x15
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET__SHIFT 0x18
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE__SHIFT 0x19
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY__SHIFT 0x1a
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN__SHIFT 0x1b
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ__SHIFT 0x1c
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK__SHIFT 0x1d
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_MASK 0x00000001L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_MASK 0x00000002L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_MASK 0x00000004L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_MASK 0x00000008L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_MASK 0x00000010L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_MASK 0x00000020L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_MASK 0x00000100L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_MASK 0x00000200L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_MASK 0x00000400L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_MASK 0x00000800L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_MASK 0x00001000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_MASK 0x00002000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_MASK 0x00010000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_MASK 0x00020000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_MASK 0x00040000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_MASK 0x00080000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_MASK 0x00100000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_MASK 0x00200000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_MASK 0x01000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_MASK 0x02000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_MASK 0x04000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_MASK 0x08000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_MASK 0x10000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_MASK 0x20000000L
+//RDPCSTX5_RDPCSTX_PHY_CNTL4
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT__SHIFT 0x4
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC__SHIFT 0x6
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN__SHIFT 0x7
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL__SHIFT 0x8
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT__SHIFT 0xc
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC__SHIFT 0xe
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN__SHIFT 0xf
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL__SHIFT 0x10
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT__SHIFT 0x14
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC__SHIFT 0x16
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN__SHIFT 0x17
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL__SHIFT 0x18
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT__SHIFT 0x1c
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC__SHIFT 0x1e
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN__SHIFT 0x1f
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL_MASK 0x00000007L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT_MASK 0x00000010L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC_MASK 0x00000040L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN_MASK 0x00000080L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL_MASK 0x00000700L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT_MASK 0x00001000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC_MASK 0x00004000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN_MASK 0x00008000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL_MASK 0x00070000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT_MASK 0x00100000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC_MASK 0x00400000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN_MASK 0x00800000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL_MASK 0x07000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT_MASK 0x10000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC_MASK 0x40000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN_MASK 0x80000000L
+//RDPCSTX5_RDPCSTX_PHY_CNTL5
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE__SHIFT 0x1
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH__SHIFT 0x4
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ__SHIFT 0x6
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT__SHIFT 0x7
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD__SHIFT 0x8
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE__SHIFT 0x9
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH__SHIFT 0xc
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ__SHIFT 0xe
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT__SHIFT 0xf
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD__SHIFT 0x10
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE__SHIFT 0x11
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH__SHIFT 0x14
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ__SHIFT 0x16
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT__SHIFT 0x17
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD__SHIFT 0x18
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE__SHIFT 0x19
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH__SHIFT 0x1c
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ__SHIFT 0x1e
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT__SHIFT 0x1f
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD_MASK 0x00000001L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE_MASK 0x0000000EL
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH_MASK 0x00000030L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ_MASK 0x00000040L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT_MASK 0x00000080L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD_MASK 0x00000100L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE_MASK 0x00000E00L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH_MASK 0x00003000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ_MASK 0x00004000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT_MASK 0x00008000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD_MASK 0x00010000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE_MASK 0x000E0000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH_MASK 0x00300000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ_MASK 0x00400000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT_MASK 0x00800000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD_MASK 0x01000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE_MASK 0x0E000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH_MASK 0x30000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ_MASK 0x40000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT_MASK 0x80000000L
+//RDPCSTX5_RDPCSTX_PHY_CNTL6
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN__SHIFT 0x2
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE__SHIFT 0x4
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN__SHIFT 0x6
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE__SHIFT 0x8
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN__SHIFT 0xa
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE__SHIFT 0xc
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN__SHIFT 0xe
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4__SHIFT 0x10
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE__SHIFT 0x11
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK__SHIFT 0x12
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN__SHIFT 0x13
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ__SHIFT 0x14
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_MASK 0x00000003L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_MASK 0x00000004L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_MASK 0x00000030L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_MASK 0x00000040L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_MASK 0x00000300L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_MASK 0x00000400L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_MASK 0x00003000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_MASK 0x00004000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_MASK 0x00010000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_MASK 0x00020000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_MASK 0x00040000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_MASK 0x00080000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_MASK 0x00100000L
+//RDPCSTX5_RDPCSTX_PHY_CNTL7
+#define RDPCSTX5_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT__SHIFT 0x10
+#define RDPCSTX5_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN_MASK 0x0000FFFFL
+#define RDPCSTX5_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT_MASK 0xFFFF0000L
+//RDPCSTX5_RDPCSTX_PHY_CNTL8
+#define RDPCSTX5_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK_MASK 0x000FFFFFL
+//RDPCSTX5_RDPCSTX_PHY_CNTL9
+#define RDPCSTX5_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD__SHIFT 0x18
+#define RDPCSTX5_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE_MASK 0x001FFFFFL
+#define RDPCSTX5_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD_MASK 0x01000000L
+//RDPCSTX5_RDPCSTX_PHY_CNTL10
+#define RDPCSTX5_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM_MASK 0x0000FFFFL
+//RDPCSTX5_RDPCSTX_PHY_CNTL11
+#define RDPCSTX5_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER__SHIFT 0x4
+#define RDPCSTX5_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV__SHIFT 0x10
+#define RDPCSTX5_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV__SHIFT 0x14
+#define RDPCSTX5_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV__SHIFT 0x18
+#define RDPCSTX5_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER_MASK 0x0000FFF0L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV_MASK 0x00070000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV_MASK 0x00700000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV_MASK 0x03000000L
+//RDPCSTX5_RDPCSTX_PHY_CNTL12
+#define RDPCSTX5_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN__SHIFT 0x2
+#define RDPCSTX5_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV__SHIFT 0x4
+#define RDPCSTX5_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE__SHIFT 0x7
+#define RDPCSTX5_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN__SHIFT 0x8
+#define RDPCSTX5_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN_MASK 0x00000001L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN_MASK 0x00000004L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV_MASK 0x00000070L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE_MASK 0x00000080L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN_MASK 0x00000100L
+//RDPCSTX5_RDPCSTX_PHY_CNTL13
+#define RDPCSTX5_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER__SHIFT 0x14
+#define RDPCSTX5_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN__SHIFT 0x1c
+#define RDPCSTX5_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN__SHIFT 0x1d
+#define RDPCSTX5_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE__SHIFT 0x1e
+#define RDPCSTX5_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER_MASK 0x0FF00000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN_MASK 0x10000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN_MASK 0x20000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE_MASK 0x40000000L
+//RDPCSTX5_RDPCSTX_PHY_CNTL14
+#define RDPCSTX5_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN__SHIFT 0x18
+#define RDPCSTX5_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN__SHIFT 0x1c
+#define RDPCSTX5_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE_MASK 0x00000001L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN_MASK 0x01000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN_MASK 0x10000000L
+//RDPCSTX5_RDPCSTX_PHY_FUSE0
+#define RDPCSTX5_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE__SHIFT 0x6
+#define RDPCSTX5_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST__SHIFT 0xc
+#define RDPCSTX5_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I__SHIFT 0x12
+#define RDPCSTX5_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO__SHIFT 0x14
+#define RDPCSTX5_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX5_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX5_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX5_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I_MASK 0x000C0000L
+#define RDPCSTX5_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO_MASK 0x00300000L
+//RDPCSTX5_RDPCSTX_PHY_FUSE1
+#define RDPCSTX5_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE__SHIFT 0x6
+#define RDPCSTX5_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST__SHIFT 0xc
+#define RDPCSTX5_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT__SHIFT 0x12
+#define RDPCSTX5_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP__SHIFT 0x19
+#define RDPCSTX5_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX5_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX5_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX5_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT_MASK 0x01FC0000L
+#define RDPCSTX5_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP_MASK 0xFE000000L
+//RDPCSTX5_RDPCSTX_PHY_FUSE2
+#define RDPCSTX5_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE__SHIFT 0x6
+#define RDPCSTX5_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST__SHIFT 0xc
+#define RDPCSTX5_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX5_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX5_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST_MASK 0x0003F000L
+//RDPCSTX5_RDPCSTX_PHY_FUSE3
+#define RDPCSTX5_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE__SHIFT 0x6
+#define RDPCSTX5_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST__SHIFT 0xc
+#define RDPCSTX5_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE__SHIFT 0x12
+#define RDPCSTX5_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE__SHIFT 0x18
+#define RDPCSTX5_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX5_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX5_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX5_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE_MASK 0x00FC0000L
+#define RDPCSTX5_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE_MASK 0x03000000L
+//RDPCSTX5_RDPCSTX_PHY_RX_LD_VAL
+#define RDPCSTX5_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL__SHIFT 0x8
+#define RDPCSTX5_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL_MASK 0x0000007FL
+#define RDPCSTX5_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL_MASK 0x001FFF00L
+//RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED__SHIFT 0x1
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED__SHIFT 0x2
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED__SHIFT 0x3
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED__SHIFT 0x4
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED__SHIFT 0x5
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED__SHIFT 0x8
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED__SHIFT 0x9
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED__SHIFT 0xa
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED__SHIFT 0xb
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED__SHIFT 0xc
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED__SHIFT 0xd
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED__SHIFT 0x10
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED__SHIFT 0x11
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED__SHIFT 0x12
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED__SHIFT 0x13
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED__SHIFT 0x14
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED__SHIFT 0x15
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED__SHIFT 0x18
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED__SHIFT 0x19
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED__SHIFT 0x1a
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED__SHIFT 0x1b
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED__SHIFT 0x1c
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED__SHIFT 0x1d
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED_MASK 0x00000001L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED_MASK 0x00000002L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED_MASK 0x00000004L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED_MASK 0x00000008L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED_MASK 0x00000010L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED_MASK 0x00000020L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED_MASK 0x00000100L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED_MASK 0x00000200L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED_MASK 0x00000400L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED_MASK 0x00000800L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED_MASK 0x00001000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED_MASK 0x00002000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED_MASK 0x00010000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED_MASK 0x00020000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED_MASK 0x00040000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED_MASK 0x00080000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED_MASK 0x00100000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED_MASK 0x00200000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED_MASK 0x01000000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED_MASK 0x02000000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED_MASK 0x04000000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED_MASK 0x08000000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED_MASK 0x10000000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED_MASK 0x20000000L
+//RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED__SHIFT 0x2
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED__SHIFT 0x4
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED__SHIFT 0x6
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED__SHIFT 0x8
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED__SHIFT 0xa
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED__SHIFT 0xc
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED__SHIFT 0xe
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED__SHIFT 0x10
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED__SHIFT 0x11
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED__SHIFT 0x12
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED__SHIFT 0x13
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED__SHIFT 0x14
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED_MASK 0x00000003L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED_MASK 0x00000004L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED_MASK 0x00000030L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED_MASK 0x00000040L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED_MASK 0x00000300L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED_MASK 0x00000400L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED_MASK 0x00003000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED_MASK 0x00004000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED_MASK 0x00010000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED_MASK 0x00020000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED_MASK 0x00040000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED_MASK 0x00080000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED_MASK 0x00100000L
+//RDPCSTX5_RDPCSTX_DPALT_CONTROL_REG
+#define RDPCSTX5_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED__SHIFT 0x4
+#define RDPCSTX5_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE__SHIFT 0x8
+#define RDPCSTX5_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS_MASK 0x00000001L
+#define RDPCSTX5_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED_MASK 0x00000010L
+#define RDPCSTX5_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE_MASK 0x0000FF00L
+
+
+// addressBlock: dpcssys_dpcssys_cr5_dispdec
+//DPCSSYS_CR5_DPCSSYS_CR_ADDR
+#define DPCSSYS_CR5_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0
+#define DPCSSYS_CR5_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL
+//DPCSSYS_CR5_DPCSSYS_CR_DATA
+#define DPCSSYS_CR5_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0
+#define DPCSSYS_CR5_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_1_0_offset.h
new file mode 100644
index 000000000000..945bb6101a9d
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_1_0_offset.h
@@ -0,0 +1,565 @@
+/*
+ * Copyright (C) 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _dpcs_2_1_0_OFFSET_HEADER
+#define _dpcs_2_1_0_OFFSET_HEADER
+
+
+
+// addressBlock: dpcssys_dpcs0_dpcstx0_dispdec
+// base address: 0x0
+#define mmDPCSTX0_DPCSTX_TX_CLOCK_CNTL 0x2928
+#define mmDPCSTX0_DPCSTX_TX_CLOCK_CNTL_BASE_IDX 2
+#define mmDPCSTX0_DPCSTX_TX_CNTL 0x2929
+#define mmDPCSTX0_DPCSTX_TX_CNTL_BASE_IDX 2
+#define mmDPCSTX0_DPCSTX_CBUS_CNTL 0x292a
+#define mmDPCSTX0_DPCSTX_CBUS_CNTL_BASE_IDX 2
+#define mmDPCSTX0_DPCSTX_INTERRUPT_CNTL 0x292b
+#define mmDPCSTX0_DPCSTX_INTERRUPT_CNTL_BASE_IDX 2
+#define mmDPCSTX0_DPCSTX_PLL_UPDATE_ADDR 0x292c
+#define mmDPCSTX0_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX 2
+#define mmDPCSTX0_DPCSTX_PLL_UPDATE_DATA 0x292d
+#define mmDPCSTX0_DPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+#define mmDPCSTX0_DPCSTX_DEBUG_CONFIG 0x292e
+#define mmDPCSTX0_DPCSTX_DEBUG_CONFIG_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcs0_rdpcstx0_dispdec
+// base address: 0x0
+#define mmRDPCSTX0_RDPCSTX_CNTL 0x2930
+#define mmRDPCSTX0_RDPCSTX_CNTL_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_CLOCK_CNTL 0x2931
+#define mmRDPCSTX0_RDPCSTX_CLOCK_CNTL_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_INTERRUPT_CONTROL 0x2932
+#define mmRDPCSTX0_RDPCSTX_INTERRUPT_CONTROL_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PLL_UPDATE_DATA 0x2933
+#define mmRDPCSTX0_RDPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+#define mmRDPCSTX0_RDPCS_TX_CR_ADDR 0x2934
+#define mmRDPCSTX0_RDPCS_TX_CR_ADDR_BASE_IDX 2
+#define mmRDPCSTX0_RDPCS_TX_CR_DATA 0x2935
+#define mmRDPCSTX0_RDPCS_TX_CR_DATA_BASE_IDX 2
+#define mmRDPCSTX0_RDPCS_TX_SRAM_CNTL 0x2936
+#define mmRDPCSTX0_RDPCS_TX_SRAM_CNTL_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_SCRATCH 0x2937
+#define mmRDPCSTX0_RDPCSTX_SCRATCH_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_SPARE 0x2938
+#define mmRDPCSTX0_RDPCSTX_SPARE_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_CNTL2 0x2939
+#define mmRDPCSTX0_RDPCSTX_CNTL2_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x293c
+#define mmRDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_DEBUG_CONFIG 0x293d
+#define mmRDPCSTX0_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL0 0x2940
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL0_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL1 0x2941
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL1_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL2 0x2942
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL2_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL3 0x2943
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL3_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL4 0x2944
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL4_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL5 0x2945
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL5_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL6 0x2946
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL6_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL7 0x2947
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL7_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL8 0x2948
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL8_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL9 0x2949
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL9_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL10 0x294a
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL10_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL11 0x294b
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL11_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL12 0x294c
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL12_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL13 0x294d
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL13_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL14 0x294e
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL14_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_FUSE0 0x294f
+#define mmRDPCSTX0_RDPCSTX_PHY_FUSE0_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_FUSE1 0x2950
+#define mmRDPCSTX0_RDPCSTX_PHY_FUSE1_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_FUSE2 0x2951
+#define mmRDPCSTX0_RDPCSTX_PHY_FUSE2_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_FUSE3 0x2952
+#define mmRDPCSTX0_RDPCSTX_PHY_FUSE3_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_RX_LD_VAL 0x2953
+#define mmRDPCSTX0_RDPCSTX_PHY_RX_LD_VAL_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3 0x2954
+#define mmRDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6 0x2955
+#define mmRDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_DPALT_CONTROL_REG 0x2956
+#define mmRDPCSTX0_RDPCSTX_DPALT_CONTROL_REG_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL15 0x2958
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL15_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL16 0x2959
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL16_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL17 0x295a
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL17_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_DEBUG_CONFIG2 0x295b
+#define mmRDPCSTX0_RDPCSTX_DEBUG_CONFIG2_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcssys_cr0_dispdec
+// base address: 0x0
+#define mmDPCSSYS_CR0_DPCSSYS_CR_ADDR 0x2934
+#define mmDPCSSYS_CR0_DPCSSYS_CR_ADDR_BASE_IDX 2
+#define mmDPCSSYS_CR0_DPCSSYS_CR_DATA 0x2935
+#define mmDPCSSYS_CR0_DPCSSYS_CR_DATA_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcs0_dpcstx1_dispdec
+// base address: 0x360
+#define mmDPCSTX1_DPCSTX_TX_CLOCK_CNTL 0x2a00
+#define mmDPCSTX1_DPCSTX_TX_CLOCK_CNTL_BASE_IDX 2
+#define mmDPCSTX1_DPCSTX_TX_CNTL 0x2a01
+#define mmDPCSTX1_DPCSTX_TX_CNTL_BASE_IDX 2
+#define mmDPCSTX1_DPCSTX_CBUS_CNTL 0x2a02
+#define mmDPCSTX1_DPCSTX_CBUS_CNTL_BASE_IDX 2
+#define mmDPCSTX1_DPCSTX_INTERRUPT_CNTL 0x2a03
+#define mmDPCSTX1_DPCSTX_INTERRUPT_CNTL_BASE_IDX 2
+#define mmDPCSTX1_DPCSTX_PLL_UPDATE_ADDR 0x2a04
+#define mmDPCSTX1_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX 2
+#define mmDPCSTX1_DPCSTX_PLL_UPDATE_DATA 0x2a05
+#define mmDPCSTX1_DPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+#define mmDPCSTX1_DPCSTX_DEBUG_CONFIG 0x2a06
+#define mmDPCSTX1_DPCSTX_DEBUG_CONFIG_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcs0_rdpcstx1_dispdec
+// base address: 0x360
+#define mmRDPCSTX1_RDPCSTX_CNTL 0x2a08
+#define mmRDPCSTX1_RDPCSTX_CNTL_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_CLOCK_CNTL 0x2a09
+#define mmRDPCSTX1_RDPCSTX_CLOCK_CNTL_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_INTERRUPT_CONTROL 0x2a0a
+#define mmRDPCSTX1_RDPCSTX_INTERRUPT_CONTROL_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PLL_UPDATE_DATA 0x2a0b
+#define mmRDPCSTX1_RDPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+#define mmRDPCSTX1_RDPCS_TX_CR_ADDR 0x2a0c
+#define mmRDPCSTX1_RDPCS_TX_CR_ADDR_BASE_IDX 2
+#define mmRDPCSTX1_RDPCS_TX_CR_DATA 0x2a0d
+#define mmRDPCSTX1_RDPCS_TX_CR_DATA_BASE_IDX 2
+#define mmRDPCSTX1_RDPCS_TX_SRAM_CNTL 0x2a0e
+#define mmRDPCSTX1_RDPCS_TX_SRAM_CNTL_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_SCRATCH 0x2a0f
+#define mmRDPCSTX1_RDPCSTX_SCRATCH_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_SPARE 0x2a10
+#define mmRDPCSTX1_RDPCSTX_SPARE_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_CNTL2 0x2a11
+#define mmRDPCSTX1_RDPCSTX_CNTL2_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x2a14
+#define mmRDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_DEBUG_CONFIG 0x2a15
+#define mmRDPCSTX1_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL0 0x2a18
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL0_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL1 0x2a19
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL1_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL2 0x2a1a
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL2_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL3 0x2a1b
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL3_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL4 0x2a1c
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL4_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL5 0x2a1d
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL5_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL6 0x2a1e
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL6_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL7 0x2a1f
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL7_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL8 0x2a20
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL8_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL9 0x2a21
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL9_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL10 0x2a22
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL10_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL11 0x2a23
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL11_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL12 0x2a24
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL12_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL13 0x2a25
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL13_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL14 0x2a26
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL14_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_FUSE0 0x2a27
+#define mmRDPCSTX1_RDPCSTX_PHY_FUSE0_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_FUSE1 0x2a28
+#define mmRDPCSTX1_RDPCSTX_PHY_FUSE1_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_FUSE2 0x2a29
+#define mmRDPCSTX1_RDPCSTX_PHY_FUSE2_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_FUSE3 0x2a2a
+#define mmRDPCSTX1_RDPCSTX_PHY_FUSE3_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_RX_LD_VAL 0x2a2b
+#define mmRDPCSTX1_RDPCSTX_PHY_RX_LD_VAL_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3 0x2a2c
+#define mmRDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6 0x2a2d
+#define mmRDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_DPALT_CONTROL_REG 0x2a2e
+#define mmRDPCSTX1_RDPCSTX_DPALT_CONTROL_REG_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL15 0x2a30
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL15_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL16 0x2a31
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL16_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL17 0x2a32
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL17_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_DEBUG_CONFIG2 0x2a33
+#define mmRDPCSTX1_RDPCSTX_DEBUG_CONFIG2_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcssys_cr1_dispdec
+// base address: 0x360
+#define mmDPCSSYS_CR1_DPCSSYS_CR_ADDR 0x2a0c
+#define mmDPCSSYS_CR1_DPCSSYS_CR_ADDR_BASE_IDX 2
+#define mmDPCSSYS_CR1_DPCSSYS_CR_DATA 0x2a0d
+#define mmDPCSSYS_CR1_DPCSSYS_CR_DATA_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcs0_dpcstx2_dispdec
+// base address: 0x6c0
+#define mmDPCSTX2_DPCSTX_TX_CLOCK_CNTL 0x2ad8
+#define mmDPCSTX2_DPCSTX_TX_CLOCK_CNTL_BASE_IDX 2
+#define mmDPCSTX2_DPCSTX_TX_CNTL 0x2ad9
+#define mmDPCSTX2_DPCSTX_TX_CNTL_BASE_IDX 2
+#define mmDPCSTX2_DPCSTX_CBUS_CNTL 0x2ada
+#define mmDPCSTX2_DPCSTX_CBUS_CNTL_BASE_IDX 2
+#define mmDPCSTX2_DPCSTX_INTERRUPT_CNTL 0x2adb
+#define mmDPCSTX2_DPCSTX_INTERRUPT_CNTL_BASE_IDX 2
+#define mmDPCSTX2_DPCSTX_PLL_UPDATE_ADDR 0x2adc
+#define mmDPCSTX2_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX 2
+#define mmDPCSTX2_DPCSTX_PLL_UPDATE_DATA 0x2add
+#define mmDPCSTX2_DPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+#define mmDPCSTX2_DPCSTX_DEBUG_CONFIG 0x2ade
+#define mmDPCSTX2_DPCSTX_DEBUG_CONFIG_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcs0_rdpcstx2_dispdec
+// base address: 0x6c0
+#define mmRDPCSTX2_RDPCSTX_CNTL 0x2ae0
+#define mmRDPCSTX2_RDPCSTX_CNTL_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_CLOCK_CNTL 0x2ae1
+#define mmRDPCSTX2_RDPCSTX_CLOCK_CNTL_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_INTERRUPT_CONTROL 0x2ae2
+#define mmRDPCSTX2_RDPCSTX_INTERRUPT_CONTROL_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PLL_UPDATE_DATA 0x2ae3
+#define mmRDPCSTX2_RDPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+#define mmRDPCSTX2_RDPCS_TX_CR_ADDR 0x2ae4
+#define mmRDPCSTX2_RDPCS_TX_CR_ADDR_BASE_IDX 2
+#define mmRDPCSTX2_RDPCS_TX_CR_DATA 0x2ae5
+#define mmRDPCSTX2_RDPCS_TX_CR_DATA_BASE_IDX 2
+#define mmRDPCSTX2_RDPCS_TX_SRAM_CNTL 0x2ae6
+#define mmRDPCSTX2_RDPCS_TX_SRAM_CNTL_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_SCRATCH 0x2ae7
+#define mmRDPCSTX2_RDPCSTX_SCRATCH_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_SPARE 0x2ae8
+#define mmRDPCSTX2_RDPCSTX_SPARE_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_CNTL2 0x2ae9
+#define mmRDPCSTX2_RDPCSTX_CNTL2_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x2aec
+#define mmRDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_DEBUG_CONFIG 0x2aed
+#define mmRDPCSTX2_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL0 0x2af0
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL0_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL1 0x2af1
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL1_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL2 0x2af2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL2_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL3 0x2af3
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL3_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL4 0x2af4
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL4_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL5 0x2af5
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL5_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL6 0x2af6
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL6_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL7 0x2af7
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL7_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL8 0x2af8
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL8_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL9 0x2af9
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL9_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL10 0x2afa
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL10_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL11 0x2afb
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL11_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL12 0x2afc
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL12_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL13 0x2afd
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL13_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL14 0x2afe
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL14_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_FUSE0 0x2aff
+#define mmRDPCSTX2_RDPCSTX_PHY_FUSE0_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_FUSE1 0x2b00
+#define mmRDPCSTX2_RDPCSTX_PHY_FUSE1_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_FUSE2 0x2b01
+#define mmRDPCSTX2_RDPCSTX_PHY_FUSE2_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_FUSE3 0x2b02
+#define mmRDPCSTX2_RDPCSTX_PHY_FUSE3_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_RX_LD_VAL 0x2b03
+#define mmRDPCSTX2_RDPCSTX_PHY_RX_LD_VAL_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3 0x2b04
+#define mmRDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6 0x2b05
+#define mmRDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_DPALT_CONTROL_REG 0x2b06
+#define mmRDPCSTX2_RDPCSTX_DPALT_CONTROL_REG_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL15 0x2b08
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL15_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL16 0x2b09
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL16_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL17 0x2b0a
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL17_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_DEBUG_CONFIG2 0x2b0b
+#define mmRDPCSTX2_RDPCSTX_DEBUG_CONFIG2_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcssys_cr2_dispdec
+// base address: 0x6c0
+#define mmDPCSSYS_CR2_DPCSSYS_CR_ADDR 0x2ae4
+#define mmDPCSSYS_CR2_DPCSSYS_CR_ADDR_BASE_IDX 2
+#define mmDPCSSYS_CR2_DPCSSYS_CR_DATA 0x2ae5
+#define mmDPCSSYS_CR2_DPCSSYS_CR_DATA_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcs0_dpcstx3_dispdec
+// base address: 0xa20
+#define mmDPCSTX3_DPCSTX_TX_CLOCK_CNTL 0x2bb0
+#define mmDPCSTX3_DPCSTX_TX_CLOCK_CNTL_BASE_IDX 2
+#define mmDPCSTX3_DPCSTX_TX_CNTL 0x2bb1
+#define mmDPCSTX3_DPCSTX_TX_CNTL_BASE_IDX 2
+#define mmDPCSTX3_DPCSTX_CBUS_CNTL 0x2bb2
+#define mmDPCSTX3_DPCSTX_CBUS_CNTL_BASE_IDX 2
+#define mmDPCSTX3_DPCSTX_INTERRUPT_CNTL 0x2bb3
+#define mmDPCSTX3_DPCSTX_INTERRUPT_CNTL_BASE_IDX 2
+#define mmDPCSTX3_DPCSTX_PLL_UPDATE_ADDR 0x2bb4
+#define mmDPCSTX3_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX 2
+#define mmDPCSTX3_DPCSTX_PLL_UPDATE_DATA 0x2bb5
+#define mmDPCSTX3_DPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+#define mmDPCSTX3_DPCSTX_DEBUG_CONFIG 0x2bb6
+#define mmDPCSTX3_DPCSTX_DEBUG_CONFIG_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcs0_rdpcstx3_dispdec
+// base address: 0xa20
+#define mmRDPCSTX3_RDPCSTX_CNTL 0x2bb8
+#define mmRDPCSTX3_RDPCSTX_CNTL_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_CLOCK_CNTL 0x2bb9
+#define mmRDPCSTX3_RDPCSTX_CLOCK_CNTL_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_INTERRUPT_CONTROL 0x2bba
+#define mmRDPCSTX3_RDPCSTX_INTERRUPT_CONTROL_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PLL_UPDATE_DATA 0x2bbb
+#define mmRDPCSTX3_RDPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+#define mmRDPCSTX3_RDPCS_TX_CR_ADDR 0x2bbc
+#define mmRDPCSTX3_RDPCS_TX_CR_ADDR_BASE_IDX 2
+#define mmRDPCSTX3_RDPCS_TX_CR_DATA 0x2bbd
+#define mmRDPCSTX3_RDPCS_TX_CR_DATA_BASE_IDX 2
+#define mmRDPCSTX3_RDPCS_TX_SRAM_CNTL 0x2bbe
+#define mmRDPCSTX3_RDPCS_TX_SRAM_CNTL_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_SCRATCH 0x2bbf
+#define mmRDPCSTX3_RDPCSTX_SCRATCH_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_SPARE 0x2bc0
+#define mmRDPCSTX3_RDPCSTX_SPARE_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_CNTL2 0x2bc1
+#define mmRDPCSTX3_RDPCSTX_CNTL2_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x2bc4
+#define mmRDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_DEBUG_CONFIG 0x2bc5
+#define mmRDPCSTX3_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL0 0x2bc8
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL0_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL1 0x2bc9
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL1_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL2 0x2bca
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL2_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL3 0x2bcb
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL3_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL4 0x2bcc
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL4_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL5 0x2bcd
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL5_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL6 0x2bce
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL6_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL7 0x2bcf
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL7_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL8 0x2bd0
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL8_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL9 0x2bd1
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL9_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL10 0x2bd2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL10_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL11 0x2bd3
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL11_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL12 0x2bd4
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL12_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL13 0x2bd5
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL13_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL14 0x2bd6
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL14_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_FUSE0 0x2bd7
+#define mmRDPCSTX3_RDPCSTX_PHY_FUSE0_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_FUSE1 0x2bd8
+#define mmRDPCSTX3_RDPCSTX_PHY_FUSE1_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_FUSE2 0x2bd9
+#define mmRDPCSTX3_RDPCSTX_PHY_FUSE2_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_FUSE3 0x2bda
+#define mmRDPCSTX3_RDPCSTX_PHY_FUSE3_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_RX_LD_VAL 0x2bdb
+#define mmRDPCSTX3_RDPCSTX_PHY_RX_LD_VAL_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3 0x2bdc
+#define mmRDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6 0x2bdd
+#define mmRDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_DPALT_CONTROL_REG 0x2bde
+#define mmRDPCSTX3_RDPCSTX_DPALT_CONTROL_REG_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL15 0x2be0
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL15_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL16 0x2be1
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL16_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL17 0x2be2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL17_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_DEBUG_CONFIG2 0x2be3
+#define mmRDPCSTX3_RDPCSTX_DEBUG_CONFIG2_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcssys_cr3_dispdec
+// base address: 0xa20
+#define mmDPCSSYS_CR3_DPCSSYS_CR_ADDR 0x2bbc
+#define mmDPCSSYS_CR3_DPCSSYS_CR_ADDR_BASE_IDX 2
+#define mmDPCSSYS_CR3_DPCSSYS_CR_DATA 0x2bbd
+#define mmDPCSSYS_CR3_DPCSSYS_CR_DATA_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcs0_dpcstx4_dispdec
+// base address: 0xd80
+#define mmDPCSTX4_DPCSTX_TX_CLOCK_CNTL 0x2c88
+#define mmDPCSTX4_DPCSTX_TX_CLOCK_CNTL_BASE_IDX 2
+#define mmDPCSTX4_DPCSTX_TX_CNTL 0x2c89
+#define mmDPCSTX4_DPCSTX_TX_CNTL_BASE_IDX 2
+#define mmDPCSTX4_DPCSTX_CBUS_CNTL 0x2c8a
+#define mmDPCSTX4_DPCSTX_CBUS_CNTL_BASE_IDX 2
+#define mmDPCSTX4_DPCSTX_INTERRUPT_CNTL 0x2c8b
+#define mmDPCSTX4_DPCSTX_INTERRUPT_CNTL_BASE_IDX 2
+#define mmDPCSTX4_DPCSTX_PLL_UPDATE_ADDR 0x2c8c
+#define mmDPCSTX4_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX 2
+#define mmDPCSTX4_DPCSTX_PLL_UPDATE_DATA 0x2c8d
+#define mmDPCSTX4_DPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+#define mmDPCSTX4_DPCSTX_DEBUG_CONFIG 0x2c8e
+#define mmDPCSTX4_DPCSTX_DEBUG_CONFIG_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcs0_rdpcstx4_dispdec
+// base address: 0xd80
+#define mmRDPCSTX4_RDPCSTX_CNTL 0x2c90
+#define mmRDPCSTX4_RDPCSTX_CNTL_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_CLOCK_CNTL 0x2c91
+#define mmRDPCSTX4_RDPCSTX_CLOCK_CNTL_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_INTERRUPT_CONTROL 0x2c92
+#define mmRDPCSTX4_RDPCSTX_INTERRUPT_CONTROL_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PLL_UPDATE_DATA 0x2c93
+#define mmRDPCSTX4_RDPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+#define mmRDPCSTX4_RDPCS_TX_CR_ADDR 0x2c94
+#define mmRDPCSTX4_RDPCS_TX_CR_ADDR_BASE_IDX 2
+#define mmRDPCSTX4_RDPCS_TX_CR_DATA 0x2c95
+#define mmRDPCSTX4_RDPCS_TX_CR_DATA_BASE_IDX 2
+#define mmRDPCSTX4_RDPCS_TX_SRAM_CNTL 0x2c96
+#define mmRDPCSTX4_RDPCS_TX_SRAM_CNTL_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_SCRATCH 0x2c97
+#define mmRDPCSTX4_RDPCSTX_SCRATCH_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_SPARE 0x2c98
+#define mmRDPCSTX4_RDPCSTX_SPARE_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_CNTL2 0x2c99
+#define mmRDPCSTX4_RDPCSTX_CNTL2_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x2c9c
+#define mmRDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_DEBUG_CONFIG 0x2c9d
+#define mmRDPCSTX4_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL0 0x2ca0
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL0_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL1 0x2ca1
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL1_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL2 0x2ca2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL2_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL3 0x2ca3
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL3_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL4 0x2ca4
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL4_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL5 0x2ca5
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL5_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL6 0x2ca6
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL6_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL7 0x2ca7
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL7_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL8 0x2ca8
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL8_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL9 0x2ca9
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL9_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL10 0x2caa
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL10_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL11 0x2cab
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL11_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL12 0x2cac
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL12_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL13 0x2cad
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL13_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL14 0x2cae
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL14_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_FUSE0 0x2caf
+#define mmRDPCSTX4_RDPCSTX_PHY_FUSE0_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_FUSE1 0x2cb0
+#define mmRDPCSTX4_RDPCSTX_PHY_FUSE1_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_FUSE2 0x2cb1
+#define mmRDPCSTX4_RDPCSTX_PHY_FUSE2_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_FUSE3 0x2cb2
+#define mmRDPCSTX4_RDPCSTX_PHY_FUSE3_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_RX_LD_VAL 0x2cb3
+#define mmRDPCSTX4_RDPCSTX_PHY_RX_LD_VAL_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3 0x2cb4
+#define mmRDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6 0x2cb5
+#define mmRDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_DPALT_CONTROL_REG 0x2cb6
+#define mmRDPCSTX4_RDPCSTX_DPALT_CONTROL_REG_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL15 0x2cb8
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL15_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL16 0x2cb9
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL16_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL17 0x2cba
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL17_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_DEBUG_CONFIG2 0x2cbb
+#define mmRDPCSTX4_RDPCSTX_DEBUG_CONFIG2_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcssys_cr4_dispdec
+// base address: 0xd80
+#define mmDPCSSYS_CR4_DPCSSYS_CR_ADDR 0x2c94
+#define mmDPCSSYS_CR4_DPCSSYS_CR_ADDR_BASE_IDX 2
+#define mmDPCSSYS_CR4_DPCSSYS_CR_DATA 0x2c95
+#define mmDPCSSYS_CR4_DPCSSYS_CR_DATA_BASE_IDX 2
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_1_0_sh_mask.h
new file mode 100644
index 000000000000..6e039f2208e1
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_1_0_sh_mask.h
@@ -0,0 +1,3430 @@
+/*
+ * Copyright (C) 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _dpcs_2_1_0_SH_MASK_HEADER
+#define _dpcs_2_1_0_SH_MASK_HEADER
+
+
+// addressBlock: dpcssys_dpcs0_dpcstx0_dispdec
+//DPCSTX0_DPCSTX_TX_CLOCK_CNTL
+#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS__SHIFT 0x0
+#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN__SHIFT 0x1
+#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON__SHIFT 0x2
+#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0x3
+#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS_MASK 0x00000001L
+#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN_MASK 0x00000002L
+#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON_MASK 0x00000004L
+#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000008L
+//DPCSTX0_DPCSTX_TX_CNTL
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ__SHIFT 0xc
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING__SHIFT 0xd
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP__SHIFT 0xe
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT__SHIFT 0xf
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN__SHIFT 0x10
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START__SHIFT 0x11
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET__SHIFT 0x1f
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ_MASK 0x00001000L
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING_MASK 0x00002000L
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP_MASK 0x00004000L
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT_MASK 0x00008000L
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN_MASK 0x00010000L
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START_MASK 0x00020000L
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET_MASK 0x80000000L
+//DPCSTX0_DPCSTX_CBUS_CNTL
+#define DPCSTX0_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY__SHIFT 0x0
+#define DPCSTX0_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET__SHIFT 0x1f
+#define DPCSTX0_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY_MASK 0x000000FFL
+#define DPCSTX0_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET_MASK 0x80000000L
+//DPCSTX0_DPCSTX_INTERRUPT_CNTL
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW__SHIFT 0x0
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR__SHIFT 0x1
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK__SHIFT 0x4
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR__SHIFT 0x8
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR__SHIFT 0x9
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR__SHIFT 0xa
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR__SHIFT 0xb
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR__SHIFT 0xc
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK__SHIFT 0x10
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK__SHIFT 0x14
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR_MASK 0x00000002L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK_MASK 0x00000010L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR_MASK 0x00000100L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR_MASK 0x00000200L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR_MASK 0x00000400L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR_MASK 0x00000800L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR_MASK 0x00001000L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK_MASK 0x00010000L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK_MASK 0x00100000L
+//DPCSTX0_DPCSTX_PLL_UPDATE_ADDR
+#define DPCSTX0_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR__SHIFT 0x0
+#define DPCSTX0_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR_MASK 0x0003FFFFL
+//DPCSTX0_DPCSTX_PLL_UPDATE_DATA
+#define DPCSTX0_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA__SHIFT 0x0
+#define DPCSTX0_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA_MASK 0xFFFFFFFFL
+//DPCSTX0_DPCSTX_DEBUG_CONFIG
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN__SHIFT 0x0
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL__SHIFT 0x1
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL__SHIFT 0x4
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL__SHIFT 0x8
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS__SHIFT 0xe
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN__SHIFT 0x10
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_INDEX__SHIFT 0x18
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN_MASK 0x00000001L
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL_MASK 0x0000000EL
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL_MASK 0x00000070L
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL_MASK 0x00000700L
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS_MASK 0x00004000L
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN_MASK 0x00010000L
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_INDEX_MASK 0xFF000000L
+
+
+
+// addressBlock: dpcssys_dpcs0_rdpcstx0_dispdec
+//RDPCSTX0_RDPCSTX_CNTL
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN__SHIFT 0xd
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN__SHIFT 0xe
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN__SHIFT 0xf
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_START__SHIFT 0x11
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS__SHIFT 0x1a
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN__SHIFT 0x1c
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN__SHIFT 0x1d
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET__SHIFT 0x1f
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET_MASK 0x00000010L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN_MASK 0x00001000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN_MASK 0x00002000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN_MASK 0x00004000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN_MASK 0x00008000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN_MASK 0x00010000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_START_MASK 0x00020000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY_MASK 0x01F00000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS_MASK 0x04000000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN_MASK 0x10000000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN_MASK 0x20000000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET_MASK 0x80000000L
+//RDPCSTX0_RDPCSTX_CLOCK_CNTL
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN__SHIFT 0x5
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN__SHIFT 0x7
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN__SHIFT 0x9
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0xa
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN__SHIFT 0xd
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON__SHIFT 0xe
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_OCLACLK_GATE_DIS__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_OCLACLK_EN__SHIFT 0x15
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_OCLACLK_CLOCK_ON__SHIFT 0x16
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN_MASK 0x00000010L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN_MASK 0x00000020L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN_MASK 0x00000040L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN_MASK 0x00000080L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS_MASK 0x00000100L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN_MASK 0x00000200L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000400L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS_MASK 0x00001000L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN_MASK 0x00002000L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON_MASK 0x00004000L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS_MASK 0x00010000L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_OCLACLK_GATE_DIS_MASK 0x00100000L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_OCLACLK_EN_MASK 0x00200000L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_OCLACLK_CLOCK_ON_MASK 0x00400000L
+//RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE__SHIFT 0x1
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE__SHIFT 0x2
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR__SHIFT 0x5
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR__SHIFT 0x7
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR__SHIFT 0x9
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR__SHIFT 0xa
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK__SHIFT 0x11
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK__SHIFT 0x12
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK 0x00000002L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK 0x00000004L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR_MASK 0x00000010L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR_MASK 0x00000020L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR_MASK 0x00000040L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR_MASK 0x00000080L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR_MASK 0x00000100L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR_MASK 0x00000200L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR_MASK 0x00000400L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR_MASK 0x00001000L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK_MASK 0x00010000L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK_MASK 0x00020000L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK_MASK 0x00040000L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK_MASK 0x00100000L
+//RDPCSTX0_RDPCSTX_PLL_UPDATE_DATA
+#define RDPCSTX0_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA_MASK 0x00000001L
+//RDPCSTX0_RDPCS_TX_CR_ADDR
+#define RDPCSTX0_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0
+#define RDPCSTX0_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL
+//RDPCSTX0_RDPCS_TX_CR_DATA
+#define RDPCSTX0_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0
+#define RDPCSTX0_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL
+//RDPCSTX0_RDPCS_TX_SRAM_CNTL
+#define RDPCSTX0_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS__SHIFT 0x14
+#define RDPCSTX0_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE__SHIFT 0x18
+#define RDPCSTX0_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE__SHIFT 0x1c
+#define RDPCSTX0_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS_MASK 0x00100000L
+#define RDPCSTX0_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE_MASK 0x03000000L
+#define RDPCSTX0_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE_MASK 0x30000000L
+//RDPCSTX0_RDPCSTX_SCRATCH
+#define RDPCSTX0_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH_MASK 0xFFFFFFFFL
+//RDPCSTX0_RDPCSTX_SPARE
+#define RDPCSTX0_RDPCSTX_SPARE__RDPCSTX_SPARE__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_SPARE__RDPCSTX_SPARE_MASK 0xFFFFFFFFL
+//RDPCSTX0_RDPCSTX_CNTL2
+#define RDPCSTX0_RDPCSTX_CNTL2__RDPCS_CR_CONVERT_FIFO_EMPTY__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_CNTL2__RDPCS_CR_CONVERT_FIFO_FULL__SHIFT 0x1
+#define RDPCSTX0_RDPCSTX_CNTL2__RDPCS_CR_CONVERT_FIFO_EMPTY_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_CNTL2__RDPCS_CR_CONVERT_FIFO_FULL_MASK 0x00000002L
+//RDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS_MASK 0x00000010L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE_MASK 0x0000FF00L
+//RDPCSTX0_RDPCSTX_DEBUG_CONFIG
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP__SHIFT 0x7
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE__SHIFT 0xf
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT_MASK 0x00000070L
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP_MASK 0x00000080L
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK_MASK 0x00001F00L
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE_MASK 0x00008000L
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX_MASK 0x00FF0000L
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MASK 0xFF000000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET__SHIFT 0x1
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N__SHIFT 0x2
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN__SHIFT 0x3
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE__SHIFT 0x9
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ__SHIFT 0x11
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK__SHIFT 0x12
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL__SHIFT 0x15
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT__SHIFT 0x19
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE__SHIFT 0x1c
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE__SHIFT 0x1d
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS__SHIFT 0x1f
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET_MASK 0x00000002L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N_MASK 0x00000004L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN_MASK 0x00000008L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT_MASK 0x00000030L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE_MASK 0x00000100L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE_MASK 0x00003E00L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ_MASK 0x00020000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK_MASK 0x00040000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL_MASK 0x00100000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL_MASK 0x00200000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN_MASK 0x01000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT_MASK 0x02000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE_MASK 0x10000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE_MASK 0x20000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS_MASK 0x80000000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL1
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN__SHIFT 0x1
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE__SHIFT 0x2
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN__SHIFT 0x3
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET__SHIFT 0x5
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE__SHIFT 0x7
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN_MASK 0x00000002L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE_MASK 0x00000004L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN_MASK 0x00000008L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE_MASK 0x00000010L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET_MASK 0x00000020L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN_MASK 0x00000040L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE_MASK 0x00000080L
+//RDPCSTX0_RDPCSTX_PHY_CNTL2
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR__SHIFT 0x3
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN__SHIFT 0x5
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN__SHIFT 0x7
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN__SHIFT 0x9
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN__SHIFT 0xa
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN__SHIFT 0xb
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR_MASK 0x00000008L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN_MASK 0x00000010L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN_MASK 0x00000020L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN_MASK 0x00000040L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN_MASK 0x00000080L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN_MASK 0x00000100L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN_MASK 0x00000200L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN_MASK 0x00000400L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN_MASK 0x00000800L
+//RDPCSTX0_RDPCSTX_PHY_CNTL3
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE__SHIFT 0x1
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY__SHIFT 0x2
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN__SHIFT 0x3
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK__SHIFT 0x5
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE__SHIFT 0x9
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY__SHIFT 0xa
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN__SHIFT 0xb
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK__SHIFT 0xd
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE__SHIFT 0x11
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY__SHIFT 0x12
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN__SHIFT 0x13
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK__SHIFT 0x15
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE__SHIFT 0x19
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY__SHIFT 0x1a
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN__SHIFT 0x1b
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ__SHIFT 0x1c
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK__SHIFT 0x1d
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_MASK 0x00000002L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_MASK 0x00000004L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_MASK 0x00000008L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_MASK 0x00000010L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_MASK 0x00000020L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_MASK 0x00000100L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_MASK 0x00000200L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_MASK 0x00000400L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_MASK 0x00000800L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_MASK 0x00001000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_MASK 0x00002000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_MASK 0x00010000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_MASK 0x00020000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_MASK 0x00040000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_MASK 0x00080000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_MASK 0x00100000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_MASK 0x00200000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_MASK 0x01000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_MASK 0x02000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_MASK 0x04000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_MASK 0x08000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_MASK 0x10000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_MASK 0x20000000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN__SHIFT 0x7
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC__SHIFT 0xe
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN__SHIFT 0xf
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC__SHIFT 0x16
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN__SHIFT 0x17
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT__SHIFT 0x1c
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC__SHIFT 0x1e
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN__SHIFT 0x1f
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL_MASK 0x00000007L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT_MASK 0x00000010L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC_MASK 0x00000040L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN_MASK 0x00000080L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL_MASK 0x00000700L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT_MASK 0x00001000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC_MASK 0x00004000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN_MASK 0x00008000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL_MASK 0x00070000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT_MASK 0x00100000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC_MASK 0x00400000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN_MASK 0x00800000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL_MASK 0x07000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT_MASK 0x10000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC_MASK 0x40000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN_MASK 0x80000000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL5
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE__SHIFT 0x1
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT__SHIFT 0x7
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE__SHIFT 0x9
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ__SHIFT 0xe
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT__SHIFT 0xf
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE__SHIFT 0x11
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ__SHIFT 0x16
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT__SHIFT 0x17
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE__SHIFT 0x19
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH__SHIFT 0x1c
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ__SHIFT 0x1e
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT__SHIFT 0x1f
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE_MASK 0x0000000EL
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH_MASK 0x00000030L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ_MASK 0x00000040L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT_MASK 0x00000080L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD_MASK 0x00000100L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE_MASK 0x00000E00L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH_MASK 0x00003000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ_MASK 0x00004000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT_MASK 0x00008000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD_MASK 0x00010000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE_MASK 0x000E0000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH_MASK 0x00300000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ_MASK 0x00400000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT_MASK 0x00800000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD_MASK 0x01000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE_MASK 0x0E000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH_MASK 0x30000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ_MASK 0x40000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT_MASK 0x80000000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL6
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN__SHIFT 0x2
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN__SHIFT 0xa
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN__SHIFT 0xe
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE__SHIFT 0x11
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK__SHIFT 0x12
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN__SHIFT 0x13
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_MASK 0x00000003L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_MASK 0x00000004L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_MASK 0x00000030L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_MASK 0x00000040L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_MASK 0x00000300L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_MASK 0x00000400L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_MASK 0x00003000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_MASK 0x00004000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_MASK 0x00010000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_MASK 0x00020000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_MASK 0x00040000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_MASK 0x00080000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_MASK 0x00100000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL7
+#define RDPCSTX0_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN_MASK 0x0000FFFFL
+#define RDPCSTX0_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT_MASK 0xFFFF0000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL8
+#define RDPCSTX0_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK_MASK 0x000FFFFFL
+//RDPCSTX0_RDPCSTX_PHY_CNTL9
+#define RDPCSTX0_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE_MASK 0x001FFFFFL
+#define RDPCSTX0_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD_MASK 0x01000000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL10
+#define RDPCSTX0_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM_MASK 0x0000FFFFL
+//RDPCSTX0_RDPCSTX_PHY_CNTL11
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER_MASK 0x0000FFF0L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV_MASK 0x00070000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV_MASK 0x00700000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV_MASK 0x03000000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL12
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN__SHIFT 0x2
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE__SHIFT 0x7
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN_MASK 0x00000004L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV_MASK 0x00000070L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE_MASK 0x00000080L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN_MASK 0x00000100L
+//RDPCSTX0_RDPCSTX_PHY_CNTL13
+#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN__SHIFT 0x1c
+#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN__SHIFT 0x1d
+#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE__SHIFT 0x1e
+#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER_MASK 0x0FF00000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN_MASK 0x10000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN_MASK 0x20000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE_MASK 0x40000000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL14
+#define RDPCSTX0_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN__SHIFT 0x1c
+#define RDPCSTX0_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN_MASK 0x01000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN_MASK 0x10000000L
+//RDPCSTX0_RDPCSTX_PHY_FUSE0
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I__SHIFT 0x12
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_CP_INT_GS__SHIFT 0x16
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_RX_VREF_CTRL__SHIFT 0x1d
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I_MASK 0x000C0000L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO_MASK 0x00300000L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_CP_INT_GS_MASK 0x1FC00000L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_RX_VREF_CTRL_MASK 0xE0000000L
+//RDPCSTX0_RDPCSTX_PHY_FUSE1
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT__SHIFT 0x12
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP__SHIFT 0x19
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT_MASK 0x01FC0000L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP_MASK 0xFE000000L
+//RDPCSTX0_RDPCSTX_PHY_FUSE2
+#define RDPCSTX0_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_MPLLB_CP_PROP_GS__SHIFT 0x17
+#define RDPCSTX0_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX0_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_MPLLB_CP_PROP_GS_MASK 0x3F800000L
+//RDPCSTX0_RDPCSTX_PHY_FUSE3
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE__SHIFT 0x12
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_TX_VBOOST_LVL__SHIFT 0x1a
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_SUP_RX_VCO_VREF_SEL__SHIFT 0x1d
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE_MASK 0x00FC0000L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE_MASK 0x03000000L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_TX_VBOOST_LVL_MASK 0x1C000000L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_SUP_RX_VCO_VREF_SEL_MASK 0xE0000000L
+//RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL
+#define RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL_MASK 0x0000007FL
+#define RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL_MASK 0x001FFF00L
+//RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED__SHIFT 0x1
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED__SHIFT 0x2
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED__SHIFT 0x3
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED__SHIFT 0x5
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED__SHIFT 0x9
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED__SHIFT 0xa
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED__SHIFT 0xb
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED__SHIFT 0xd
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED__SHIFT 0x11
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED__SHIFT 0x12
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED__SHIFT 0x13
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED__SHIFT 0x15
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED__SHIFT 0x19
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED__SHIFT 0x1a
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED__SHIFT 0x1b
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED__SHIFT 0x1c
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED__SHIFT 0x1d
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED_MASK 0x00000002L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED_MASK 0x00000004L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED_MASK 0x00000008L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED_MASK 0x00000010L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED_MASK 0x00000020L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED_MASK 0x00000100L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED_MASK 0x00000200L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED_MASK 0x00000400L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED_MASK 0x00000800L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED_MASK 0x00001000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED_MASK 0x00002000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED_MASK 0x00010000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED_MASK 0x00020000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED_MASK 0x00040000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED_MASK 0x00080000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED_MASK 0x00100000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED_MASK 0x00200000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED_MASK 0x01000000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED_MASK 0x02000000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED_MASK 0x04000000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED_MASK 0x08000000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED_MASK 0x10000000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED_MASK 0x20000000L
+//RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED__SHIFT 0x2
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED__SHIFT 0xa
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED__SHIFT 0xe
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED__SHIFT 0x11
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED__SHIFT 0x12
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED__SHIFT 0x13
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED_MASK 0x00000003L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED_MASK 0x00000004L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED_MASK 0x00000030L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED_MASK 0x00000040L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED_MASK 0x00000300L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED_MASK 0x00000400L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED_MASK 0x00003000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED_MASK 0x00004000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED_MASK 0x00010000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED_MASK 0x00020000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED_MASK 0x00040000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED_MASK 0x00080000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED_MASK 0x00100000L
+//RDPCSTX0_RDPCSTX_DPALT_CONTROL_REG
+#define RDPCSTX0_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED_MASK 0x00000010L
+#define RDPCSTX0_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE_MASK 0x0000FF00L
+//RDPCSTX0_RDPCSTX_PHY_CNTL15
+#define RDPCSTX0_RDPCSTX_PHY_CNTL15__RDPCS_PHY_SSTX_VREGDRV_BYP__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL15__RDPCS_PHY_DP_TX0_VREGDRV_BYP__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_PHY_CNTL15__RDPCS_PHY_DP_TX1_VREGDRV_BYP__SHIFT 0x11
+#define RDPCSTX0_RDPCSTX_PHY_CNTL15__RDPCS_PHY_DP_TX2_VREGDRV_BYP__SHIFT 0x12
+#define RDPCSTX0_RDPCSTX_PHY_CNTL15__RDPCS_PHY_DP_TX3_VREGDRV_BYP__SHIFT 0x13
+#define RDPCSTX0_RDPCSTX_PHY_CNTL15__RDPCS_PHY_SUP_PRE_HP__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_PHY_CNTL15__RDPCS_PHY_SSTX_VREGDRV_BYP_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL15__RDPCS_PHY_DP_TX0_VREGDRV_BYP_MASK 0x00010000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL15__RDPCS_PHY_DP_TX1_VREGDRV_BYP_MASK 0x00020000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL15__RDPCS_PHY_DP_TX2_VREGDRV_BYP_MASK 0x00040000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL15__RDPCS_PHY_DP_TX3_VREGDRV_BYP_MASK 0x00080000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL15__RDPCS_PHY_SUP_PRE_HP_MASK 0x00100000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL16
+#define RDPCSTX0_RDPCSTX_PHY_CNTL16__RDPCS_PHY_DP_TX0_OUT_GENERIC_BUS__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL16__RDPCS_PHY_DP_TX1_OUT_GENERIC_BUS__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_PHY_CNTL16__RDPCS_PHY_DP_TX2_OUT_GENERIC_BUS__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_PHY_CNTL16__RDPCS_PHY_DP_TX3_OUT_GENERIC_BUS__SHIFT 0x12
+#define RDPCSTX0_RDPCSTX_PHY_CNTL16__RDPCS_PHY_CMN_OUT_GENERIC_BUS__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_PHY_CNTL16__RDPCS_PHY_DP_TX0_OUT_GENERIC_BUS_MASK 0x0000001FL
+#define RDPCSTX0_RDPCSTX_PHY_CNTL16__RDPCS_PHY_DP_TX1_OUT_GENERIC_BUS_MASK 0x000007C0L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL16__RDPCS_PHY_DP_TX2_OUT_GENERIC_BUS_MASK 0x0001F000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL16__RDPCS_PHY_DP_TX3_OUT_GENERIC_BUS_MASK 0x007C0000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL16__RDPCS_PHY_CMN_OUT_GENERIC_BUS_MASK 0x1F000000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL17
+#define RDPCSTX0_RDPCSTX_PHY_CNTL17__RDPCS_PHY_DP_TX0_IN_GENERIC_BUS__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL17__RDPCS_PHY_DP_TX1_IN_GENERIC_BUS__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_PHY_CNTL17__RDPCS_PHY_DP_TX2_IN_GENERIC_BUS__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_PHY_CNTL17__RDPCS_PHY_DP_TX3_IN_GENERIC_BUS__SHIFT 0x12
+#define RDPCSTX0_RDPCSTX_PHY_CNTL17__RDPCS_PHY_CMN_IN_GENERIC_BUS__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_PHY_CNTL17__RDPCS_PHY_DP_TX0_IN_GENERIC_BUS_MASK 0x0000001FL
+#define RDPCSTX0_RDPCSTX_PHY_CNTL17__RDPCS_PHY_DP_TX1_IN_GENERIC_BUS_MASK 0x000007C0L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL17__RDPCS_PHY_DP_TX2_IN_GENERIC_BUS_MASK 0x0001F000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL17__RDPCS_PHY_DP_TX3_IN_GENERIC_BUS_MASK 0x007C0000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL17__RDPCS_PHY_CMN_IN_GENERIC_BUS_MASK 0x1F000000L
+//RDPCSTX0_RDPCSTX_DEBUG_CONFIG2
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG2__RDPCS_DBG_OCLA_SRC0__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG2__RDPCS_DBG_OCLA_SRC1__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG2__RDPCS_DBG_OCLA_SRC2__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG2__RDPCS_DBG_OCLA_SRC3__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG2__RDPCS_DBG_OCLA_VALID_REPLACE_MSB__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG2__RDPCS_DBG_OCLA_SRC0_MASK 0x00000007L
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG2__RDPCS_DBG_OCLA_SRC1_MASK 0x00000070L
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG2__RDPCS_DBG_OCLA_SRC2_MASK 0x00000700L
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG2__RDPCS_DBG_OCLA_SRC3_MASK 0x00007000L
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG2__RDPCS_DBG_OCLA_VALID_REPLACE_MSB_MASK 0x00010000L
+
+
+// addressBlock: dpcssys_dpcssys_cr0_dispdec
+//DPCSSYS_CR0_DPCSSYS_CR_ADDR
+#define DPCSSYS_CR0_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0
+#define DPCSSYS_CR0_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL
+//DPCSSYS_CR0_DPCSSYS_CR_DATA
+#define DPCSSYS_CR0_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0
+#define DPCSSYS_CR0_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL
+
+
+// addressBlock: dpcssys_dpcs0_dpcstx1_dispdec
+//DPCSTX1_DPCSTX_TX_CLOCK_CNTL
+#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS__SHIFT 0x0
+#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN__SHIFT 0x1
+#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON__SHIFT 0x2
+#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0x3
+#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS_MASK 0x00000001L
+#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN_MASK 0x00000002L
+#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON_MASK 0x00000004L
+#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000008L
+//DPCSTX1_DPCSTX_TX_CNTL
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ__SHIFT 0xc
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING__SHIFT 0xd
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP__SHIFT 0xe
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT__SHIFT 0xf
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN__SHIFT 0x10
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START__SHIFT 0x11
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET__SHIFT 0x1f
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ_MASK 0x00001000L
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING_MASK 0x00002000L
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP_MASK 0x00004000L
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT_MASK 0x00008000L
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN_MASK 0x00010000L
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START_MASK 0x00020000L
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET_MASK 0x80000000L
+//DPCSTX1_DPCSTX_CBUS_CNTL
+#define DPCSTX1_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY__SHIFT 0x0
+#define DPCSTX1_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET__SHIFT 0x1f
+#define DPCSTX1_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY_MASK 0x000000FFL
+#define DPCSTX1_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET_MASK 0x80000000L
+//DPCSTX1_DPCSTX_INTERRUPT_CNTL
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW__SHIFT 0x0
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR__SHIFT 0x1
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK__SHIFT 0x4
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR__SHIFT 0x8
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR__SHIFT 0x9
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR__SHIFT 0xa
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR__SHIFT 0xb
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR__SHIFT 0xc
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK__SHIFT 0x10
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK__SHIFT 0x14
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR_MASK 0x00000002L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK_MASK 0x00000010L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR_MASK 0x00000100L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR_MASK 0x00000200L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR_MASK 0x00000400L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR_MASK 0x00000800L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR_MASK 0x00001000L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK_MASK 0x00010000L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK_MASK 0x00100000L
+//DPCSTX1_DPCSTX_PLL_UPDATE_ADDR
+#define DPCSTX1_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR__SHIFT 0x0
+#define DPCSTX1_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR_MASK 0x0003FFFFL
+//DPCSTX1_DPCSTX_PLL_UPDATE_DATA
+#define DPCSTX1_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA__SHIFT 0x0
+#define DPCSTX1_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA_MASK 0xFFFFFFFFL
+//DPCSTX1_DPCSTX_DEBUG_CONFIG
+#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN__SHIFT 0x0
+#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL__SHIFT 0x1
+#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL__SHIFT 0x4
+#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL__SHIFT 0x8
+#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS__SHIFT 0xe
+#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN__SHIFT 0x10
+#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_INDEX__SHIFT 0x18
+#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN_MASK 0x00000001L
+#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL_MASK 0x0000000EL
+#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL_MASK 0x00000070L
+#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL_MASK 0x00000700L
+#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS_MASK 0x00004000L
+#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN_MASK 0x00010000L
+#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_INDEX_MASK 0xFF000000L
+
+
+// addressBlock: dpcssys_dpcs0_rdpcstx1_dispdec
+//RDPCSTX1_RDPCSTX_CNTL
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN__SHIFT 0xd
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN__SHIFT 0xe
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN__SHIFT 0xf
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_START__SHIFT 0x11
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS__SHIFT 0x1a
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN__SHIFT 0x1c
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN__SHIFT 0x1d
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET__SHIFT 0x1f
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET_MASK 0x00000010L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN_MASK 0x00001000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN_MASK 0x00002000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN_MASK 0x00004000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN_MASK 0x00008000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN_MASK 0x00010000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_START_MASK 0x00020000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY_MASK 0x01F00000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS_MASK 0x04000000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN_MASK 0x10000000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN_MASK 0x20000000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET_MASK 0x80000000L
+//RDPCSTX1_RDPCSTX_CLOCK_CNTL
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN__SHIFT 0x5
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN__SHIFT 0x6
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN__SHIFT 0x7
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN__SHIFT 0x9
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0xa
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN__SHIFT 0xd
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON__SHIFT 0xe
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_OCLACLK_GATE_DIS__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_OCLACLK_EN__SHIFT 0x15
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_OCLACLK_CLOCK_ON__SHIFT 0x16
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN_MASK 0x00000010L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN_MASK 0x00000020L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN_MASK 0x00000040L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN_MASK 0x00000080L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS_MASK 0x00000100L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN_MASK 0x00000200L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000400L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS_MASK 0x00001000L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN_MASK 0x00002000L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON_MASK 0x00004000L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS_MASK 0x00010000L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_OCLACLK_GATE_DIS_MASK 0x00100000L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_OCLACLK_EN_MASK 0x00200000L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_OCLACLK_CLOCK_ON_MASK 0x00400000L
+//RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE__SHIFT 0x1
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE__SHIFT 0x2
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR__SHIFT 0x5
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR__SHIFT 0x6
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR__SHIFT 0x7
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR__SHIFT 0x9
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR__SHIFT 0xa
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK__SHIFT 0x11
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK__SHIFT 0x12
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK 0x00000002L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK 0x00000004L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR_MASK 0x00000010L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR_MASK 0x00000020L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR_MASK 0x00000040L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR_MASK 0x00000080L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR_MASK 0x00000100L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR_MASK 0x00000200L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR_MASK 0x00000400L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR_MASK 0x00001000L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK_MASK 0x00010000L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK_MASK 0x00020000L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK_MASK 0x00040000L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK_MASK 0x00100000L
+//RDPCSTX1_RDPCSTX_PLL_UPDATE_DATA
+#define RDPCSTX1_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA_MASK 0x00000001L
+//RDPCSTX1_RDPCS_TX_CR_ADDR
+#define RDPCSTX1_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0
+#define RDPCSTX1_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL
+//RDPCSTX1_RDPCS_TX_CR_DATA
+#define RDPCSTX1_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0
+#define RDPCSTX1_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL
+//RDPCSTX1_RDPCS_TX_SRAM_CNTL
+#define RDPCSTX1_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS__SHIFT 0x14
+#define RDPCSTX1_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE__SHIFT 0x18
+#define RDPCSTX1_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE__SHIFT 0x1c
+#define RDPCSTX1_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS_MASK 0x00100000L
+#define RDPCSTX1_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE_MASK 0x03000000L
+#define RDPCSTX1_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE_MASK 0x30000000L
+//RDPCSTX1_RDPCSTX_SCRATCH
+#define RDPCSTX1_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH_MASK 0xFFFFFFFFL
+//RDPCSTX1_RDPCSTX_SPARE
+#define RDPCSTX1_RDPCSTX_SPARE__RDPCSTX_SPARE__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_SPARE__RDPCSTX_SPARE_MASK 0xFFFFFFFFL
+//RDPCSTX1_RDPCSTX_CNTL2
+#define RDPCSTX1_RDPCSTX_CNTL2__RDPCS_CR_CONVERT_FIFO_EMPTY__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_CNTL2__RDPCS_CR_CONVERT_FIFO_FULL__SHIFT 0x1
+#define RDPCSTX1_RDPCSTX_CNTL2__RDPCS_CR_CONVERT_FIFO_EMPTY_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_CNTL2__RDPCS_CR_CONVERT_FIFO_FULL_MASK 0x00000002L
+//RDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS_MASK 0x00000010L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE_MASK 0x0000FF00L
+//RDPCSTX1_RDPCSTX_DEBUG_CONFIG
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP__SHIFT 0x7
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE__SHIFT 0xf
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT__SHIFT 0x18
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT_MASK 0x00000070L
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP_MASK 0x00000080L
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK_MASK 0x00001F00L
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE_MASK 0x00008000L
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX_MASK 0x00FF0000L
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MASK 0xFF000000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET__SHIFT 0x1
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N__SHIFT 0x2
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN__SHIFT 0x3
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE__SHIFT 0x9
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ__SHIFT 0x11
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK__SHIFT 0x12
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL__SHIFT 0x15
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN__SHIFT 0x18
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT__SHIFT 0x19
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE__SHIFT 0x1c
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE__SHIFT 0x1d
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS__SHIFT 0x1f
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET_MASK 0x00000002L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N_MASK 0x00000004L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN_MASK 0x00000008L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT_MASK 0x00000030L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE_MASK 0x00000100L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE_MASK 0x00003E00L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ_MASK 0x00020000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK_MASK 0x00040000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL_MASK 0x00100000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL_MASK 0x00200000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN_MASK 0x01000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT_MASK 0x02000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE_MASK 0x10000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE_MASK 0x20000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS_MASK 0x80000000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL1
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN__SHIFT 0x1
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE__SHIFT 0x2
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN__SHIFT 0x3
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET__SHIFT 0x5
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN__SHIFT 0x6
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE__SHIFT 0x7
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN_MASK 0x00000002L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE_MASK 0x00000004L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN_MASK 0x00000008L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE_MASK 0x00000010L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET_MASK 0x00000020L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN_MASK 0x00000040L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE_MASK 0x00000080L
+//RDPCSTX1_RDPCSTX_PHY_CNTL2
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR__SHIFT 0x3
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN__SHIFT 0x5
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN__SHIFT 0x6
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN__SHIFT 0x7
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN__SHIFT 0x9
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN__SHIFT 0xa
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN__SHIFT 0xb
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR_MASK 0x00000008L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN_MASK 0x00000010L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN_MASK 0x00000020L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN_MASK 0x00000040L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN_MASK 0x00000080L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN_MASK 0x00000100L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN_MASK 0x00000200L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN_MASK 0x00000400L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN_MASK 0x00000800L
+//RDPCSTX1_RDPCSTX_PHY_CNTL3
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE__SHIFT 0x1
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY__SHIFT 0x2
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN__SHIFT 0x3
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK__SHIFT 0x5
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE__SHIFT 0x9
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY__SHIFT 0xa
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN__SHIFT 0xb
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK__SHIFT 0xd
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE__SHIFT 0x11
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY__SHIFT 0x12
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN__SHIFT 0x13
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK__SHIFT 0x15
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET__SHIFT 0x18
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE__SHIFT 0x19
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY__SHIFT 0x1a
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN__SHIFT 0x1b
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ__SHIFT 0x1c
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK__SHIFT 0x1d
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_MASK 0x00000002L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_MASK 0x00000004L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_MASK 0x00000008L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_MASK 0x00000010L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_MASK 0x00000020L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_MASK 0x00000100L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_MASK 0x00000200L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_MASK 0x00000400L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_MASK 0x00000800L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_MASK 0x00001000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_MASK 0x00002000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_MASK 0x00010000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_MASK 0x00020000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_MASK 0x00040000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_MASK 0x00080000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_MASK 0x00100000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_MASK 0x00200000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_MASK 0x01000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_MASK 0x02000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_MASK 0x04000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_MASK 0x08000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_MASK 0x10000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_MASK 0x20000000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC__SHIFT 0x6
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN__SHIFT 0x7
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC__SHIFT 0xe
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN__SHIFT 0xf
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC__SHIFT 0x16
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN__SHIFT 0x17
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL__SHIFT 0x18
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT__SHIFT 0x1c
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC__SHIFT 0x1e
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN__SHIFT 0x1f
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL_MASK 0x00000007L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT_MASK 0x00000010L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC_MASK 0x00000040L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN_MASK 0x00000080L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL_MASK 0x00000700L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT_MASK 0x00001000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC_MASK 0x00004000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN_MASK 0x00008000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL_MASK 0x00070000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT_MASK 0x00100000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC_MASK 0x00400000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN_MASK 0x00800000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL_MASK 0x07000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT_MASK 0x10000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC_MASK 0x40000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN_MASK 0x80000000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL5
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE__SHIFT 0x1
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ__SHIFT 0x6
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT__SHIFT 0x7
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE__SHIFT 0x9
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ__SHIFT 0xe
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT__SHIFT 0xf
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE__SHIFT 0x11
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ__SHIFT 0x16
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT__SHIFT 0x17
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD__SHIFT 0x18
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE__SHIFT 0x19
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH__SHIFT 0x1c
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ__SHIFT 0x1e
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT__SHIFT 0x1f
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE_MASK 0x0000000EL
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH_MASK 0x00000030L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ_MASK 0x00000040L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT_MASK 0x00000080L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD_MASK 0x00000100L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE_MASK 0x00000E00L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH_MASK 0x00003000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ_MASK 0x00004000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT_MASK 0x00008000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD_MASK 0x00010000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE_MASK 0x000E0000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH_MASK 0x00300000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ_MASK 0x00400000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT_MASK 0x00800000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD_MASK 0x01000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE_MASK 0x0E000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH_MASK 0x30000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ_MASK 0x40000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT_MASK 0x80000000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL6
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN__SHIFT 0x2
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN__SHIFT 0x6
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN__SHIFT 0xa
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN__SHIFT 0xe
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE__SHIFT 0x11
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK__SHIFT 0x12
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN__SHIFT 0x13
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_MASK 0x00000003L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_MASK 0x00000004L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_MASK 0x00000030L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_MASK 0x00000040L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_MASK 0x00000300L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_MASK 0x00000400L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_MASK 0x00003000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_MASK 0x00004000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_MASK 0x00010000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_MASK 0x00020000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_MASK 0x00040000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_MASK 0x00080000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_MASK 0x00100000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL7
+#define RDPCSTX1_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN_MASK 0x0000FFFFL
+#define RDPCSTX1_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT_MASK 0xFFFF0000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL8
+#define RDPCSTX1_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK_MASK 0x000FFFFFL
+//RDPCSTX1_RDPCSTX_PHY_CNTL9
+#define RDPCSTX1_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD__SHIFT 0x18
+#define RDPCSTX1_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE_MASK 0x001FFFFFL
+#define RDPCSTX1_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD_MASK 0x01000000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL10
+#define RDPCSTX1_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM_MASK 0x0000FFFFL
+//RDPCSTX1_RDPCSTX_PHY_CNTL11
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV__SHIFT 0x18
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER_MASK 0x0000FFF0L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV_MASK 0x00070000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV_MASK 0x00700000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV_MASK 0x03000000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL12
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN__SHIFT 0x2
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE__SHIFT 0x7
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN_MASK 0x00000004L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV_MASK 0x00000070L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE_MASK 0x00000080L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN_MASK 0x00000100L
+//RDPCSTX1_RDPCSTX_PHY_CNTL13
+#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN__SHIFT 0x1c
+#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN__SHIFT 0x1d
+#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE__SHIFT 0x1e
+#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER_MASK 0x0FF00000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN_MASK 0x10000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN_MASK 0x20000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE_MASK 0x40000000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL14
+#define RDPCSTX1_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN__SHIFT 0x18
+#define RDPCSTX1_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN__SHIFT 0x1c
+#define RDPCSTX1_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN_MASK 0x01000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN_MASK 0x10000000L
+//RDPCSTX1_RDPCSTX_PHY_FUSE0
+#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE__SHIFT 0x6
+#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I__SHIFT 0x12
+#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_CP_INT_GS__SHIFT 0x16
+#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_RX_VREF_CTRL__SHIFT 0x1d
+#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I_MASK 0x000C0000L
+#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO_MASK 0x00300000L
+#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_CP_INT_GS_MASK 0x1FC00000L
+#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_RX_VREF_CTRL_MASK 0xE0000000L
+//RDPCSTX1_RDPCSTX_PHY_FUSE1
+#define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE__SHIFT 0x6
+#define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT__SHIFT 0x12
+#define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP__SHIFT 0x19
+#define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT_MASK 0x01FC0000L
+#define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP_MASK 0xFE000000L
+//RDPCSTX1_RDPCSTX_PHY_FUSE2
+#define RDPCSTX1_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE__SHIFT 0x6
+#define RDPCSTX1_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_MPLLB_CP_PROP_GS__SHIFT 0x17
+#define RDPCSTX1_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX1_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX1_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX1_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_MPLLB_CP_PROP_GS_MASK 0x3F800000L
+//RDPCSTX1_RDPCSTX_PHY_FUSE3
+#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE__SHIFT 0x6
+#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE__SHIFT 0x12
+#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE__SHIFT 0x18
+#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_TX_VBOOST_LVL__SHIFT 0x1a
+#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_SUP_RX_VCO_VREF_SEL__SHIFT 0x1d
+#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE_MASK 0x00FC0000L
+#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE_MASK 0x03000000L
+#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_TX_VBOOST_LVL_MASK 0x1C000000L
+#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_SUP_RX_VCO_VREF_SEL_MASK 0xE0000000L
+//RDPCSTX1_RDPCSTX_PHY_RX_LD_VAL
+#define RDPCSTX1_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL_MASK 0x0000007FL
+#define RDPCSTX1_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL_MASK 0x001FFF00L
+//RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED__SHIFT 0x1
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED__SHIFT 0x2
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED__SHIFT 0x3
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED__SHIFT 0x5
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED__SHIFT 0x9
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED__SHIFT 0xa
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED__SHIFT 0xb
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED__SHIFT 0xd
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED__SHIFT 0x11
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED__SHIFT 0x12
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED__SHIFT 0x13
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED__SHIFT 0x15
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED__SHIFT 0x18
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED__SHIFT 0x19
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED__SHIFT 0x1a
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED__SHIFT 0x1b
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED__SHIFT 0x1c
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED__SHIFT 0x1d
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED_MASK 0x00000002L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED_MASK 0x00000004L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED_MASK 0x00000008L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED_MASK 0x00000010L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED_MASK 0x00000020L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED_MASK 0x00000100L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED_MASK 0x00000200L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED_MASK 0x00000400L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED_MASK 0x00000800L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED_MASK 0x00001000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED_MASK 0x00002000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED_MASK 0x00010000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED_MASK 0x00020000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED_MASK 0x00040000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED_MASK 0x00080000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED_MASK 0x00100000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED_MASK 0x00200000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED_MASK 0x01000000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED_MASK 0x02000000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED_MASK 0x04000000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED_MASK 0x08000000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED_MASK 0x10000000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED_MASK 0x20000000L
+//RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED__SHIFT 0x2
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED__SHIFT 0x6
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED__SHIFT 0xa
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED__SHIFT 0xe
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED__SHIFT 0x11
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED__SHIFT 0x12
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED__SHIFT 0x13
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED_MASK 0x00000003L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED_MASK 0x00000004L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED_MASK 0x00000030L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED_MASK 0x00000040L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED_MASK 0x00000300L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED_MASK 0x00000400L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED_MASK 0x00003000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED_MASK 0x00004000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED_MASK 0x00010000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED_MASK 0x00020000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED_MASK 0x00040000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED_MASK 0x00080000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED_MASK 0x00100000L
+//RDPCSTX1_RDPCSTX_DPALT_CONTROL_REG
+#define RDPCSTX1_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED_MASK 0x00000010L
+#define RDPCSTX1_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE_MASK 0x0000FF00L
+//RDPCSTX1_RDPCSTX_PHY_CNTL15
+#define RDPCSTX1_RDPCSTX_PHY_CNTL15__RDPCS_PHY_SSTX_VREGDRV_BYP__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL15__RDPCS_PHY_DP_TX0_VREGDRV_BYP__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_PHY_CNTL15__RDPCS_PHY_DP_TX1_VREGDRV_BYP__SHIFT 0x11
+#define RDPCSTX1_RDPCSTX_PHY_CNTL15__RDPCS_PHY_DP_TX2_VREGDRV_BYP__SHIFT 0x12
+#define RDPCSTX1_RDPCSTX_PHY_CNTL15__RDPCS_PHY_DP_TX3_VREGDRV_BYP__SHIFT 0x13
+#define RDPCSTX1_RDPCSTX_PHY_CNTL15__RDPCS_PHY_SUP_PRE_HP__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_PHY_CNTL15__RDPCS_PHY_SSTX_VREGDRV_BYP_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL15__RDPCS_PHY_DP_TX0_VREGDRV_BYP_MASK 0x00010000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL15__RDPCS_PHY_DP_TX1_VREGDRV_BYP_MASK 0x00020000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL15__RDPCS_PHY_DP_TX2_VREGDRV_BYP_MASK 0x00040000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL15__RDPCS_PHY_DP_TX3_VREGDRV_BYP_MASK 0x00080000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL15__RDPCS_PHY_SUP_PRE_HP_MASK 0x00100000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL16
+#define RDPCSTX1_RDPCSTX_PHY_CNTL16__RDPCS_PHY_DP_TX0_OUT_GENERIC_BUS__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL16__RDPCS_PHY_DP_TX1_OUT_GENERIC_BUS__SHIFT 0x6
+#define RDPCSTX1_RDPCSTX_PHY_CNTL16__RDPCS_PHY_DP_TX2_OUT_GENERIC_BUS__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_PHY_CNTL16__RDPCS_PHY_DP_TX3_OUT_GENERIC_BUS__SHIFT 0x12
+#define RDPCSTX1_RDPCSTX_PHY_CNTL16__RDPCS_PHY_CMN_OUT_GENERIC_BUS__SHIFT 0x18
+#define RDPCSTX1_RDPCSTX_PHY_CNTL16__RDPCS_PHY_DP_TX0_OUT_GENERIC_BUS_MASK 0x0000001FL
+#define RDPCSTX1_RDPCSTX_PHY_CNTL16__RDPCS_PHY_DP_TX1_OUT_GENERIC_BUS_MASK 0x000007C0L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL16__RDPCS_PHY_DP_TX2_OUT_GENERIC_BUS_MASK 0x0001F000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL16__RDPCS_PHY_DP_TX3_OUT_GENERIC_BUS_MASK 0x007C0000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL16__RDPCS_PHY_CMN_OUT_GENERIC_BUS_MASK 0x1F000000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL17
+#define RDPCSTX1_RDPCSTX_PHY_CNTL17__RDPCS_PHY_DP_TX0_IN_GENERIC_BUS__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL17__RDPCS_PHY_DP_TX1_IN_GENERIC_BUS__SHIFT 0x6
+#define RDPCSTX1_RDPCSTX_PHY_CNTL17__RDPCS_PHY_DP_TX2_IN_GENERIC_BUS__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_PHY_CNTL17__RDPCS_PHY_DP_TX3_IN_GENERIC_BUS__SHIFT 0x12
+#define RDPCSTX1_RDPCSTX_PHY_CNTL17__RDPCS_PHY_CMN_IN_GENERIC_BUS__SHIFT 0x18
+#define RDPCSTX1_RDPCSTX_PHY_CNTL17__RDPCS_PHY_DP_TX0_IN_GENERIC_BUS_MASK 0x0000001FL
+#define RDPCSTX1_RDPCSTX_PHY_CNTL17__RDPCS_PHY_DP_TX1_IN_GENERIC_BUS_MASK 0x000007C0L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL17__RDPCS_PHY_DP_TX2_IN_GENERIC_BUS_MASK 0x0001F000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL17__RDPCS_PHY_DP_TX3_IN_GENERIC_BUS_MASK 0x007C0000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL17__RDPCS_PHY_CMN_IN_GENERIC_BUS_MASK 0x1F000000L
+//RDPCSTX1_RDPCSTX_DEBUG_CONFIG2
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG2__RDPCS_DBG_OCLA_SRC0__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG2__RDPCS_DBG_OCLA_SRC1__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG2__RDPCS_DBG_OCLA_SRC2__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG2__RDPCS_DBG_OCLA_SRC3__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG2__RDPCS_DBG_OCLA_VALID_REPLACE_MSB__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG2__RDPCS_DBG_OCLA_SRC0_MASK 0x00000007L
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG2__RDPCS_DBG_OCLA_SRC1_MASK 0x00000070L
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG2__RDPCS_DBG_OCLA_SRC2_MASK 0x00000700L
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG2__RDPCS_DBG_OCLA_SRC3_MASK 0x00007000L
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG2__RDPCS_DBG_OCLA_VALID_REPLACE_MSB_MASK 0x00010000L
+
+
+// addressBlock: dpcssys_dpcssys_cr1_dispdec
+//DPCSSYS_CR1_DPCSSYS_CR_ADDR
+#define DPCSSYS_CR1_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0
+#define DPCSSYS_CR1_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL
+//DPCSSYS_CR1_DPCSSYS_CR_DATA
+#define DPCSSYS_CR1_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0
+#define DPCSSYS_CR1_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL
+
+
+// addressBlock: dpcssys_dpcs0_dpcstx2_dispdec
+//DPCSTX2_DPCSTX_TX_CLOCK_CNTL
+#define DPCSTX2_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS__SHIFT 0x0
+#define DPCSTX2_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN__SHIFT 0x1
+#define DPCSTX2_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON__SHIFT 0x2
+#define DPCSTX2_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0x3
+#define DPCSTX2_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS_MASK 0x00000001L
+#define DPCSTX2_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN_MASK 0x00000002L
+#define DPCSTX2_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON_MASK 0x00000004L
+#define DPCSTX2_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000008L
+//DPCSTX2_DPCSTX_TX_CNTL
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ__SHIFT 0xc
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING__SHIFT 0xd
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP__SHIFT 0xe
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT__SHIFT 0xf
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN__SHIFT 0x10
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START__SHIFT 0x11
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET__SHIFT 0x1f
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ_MASK 0x00001000L
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING_MASK 0x00002000L
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP_MASK 0x00004000L
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT_MASK 0x00008000L
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN_MASK 0x00010000L
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START_MASK 0x00020000L
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET_MASK 0x80000000L
+//DPCSTX2_DPCSTX_CBUS_CNTL
+#define DPCSTX2_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY__SHIFT 0x0
+#define DPCSTX2_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET__SHIFT 0x1f
+#define DPCSTX2_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY_MASK 0x000000FFL
+#define DPCSTX2_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET_MASK 0x80000000L
+//DPCSTX2_DPCSTX_INTERRUPT_CNTL
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW__SHIFT 0x0
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR__SHIFT 0x1
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK__SHIFT 0x4
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR__SHIFT 0x8
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR__SHIFT 0x9
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR__SHIFT 0xa
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR__SHIFT 0xb
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR__SHIFT 0xc
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK__SHIFT 0x10
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK__SHIFT 0x14
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR_MASK 0x00000002L
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK_MASK 0x00000010L
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR_MASK 0x00000100L
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR_MASK 0x00000200L
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR_MASK 0x00000400L
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR_MASK 0x00000800L
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR_MASK 0x00001000L
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK_MASK 0x00010000L
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK_MASK 0x00100000L
+//DPCSTX2_DPCSTX_PLL_UPDATE_ADDR
+#define DPCSTX2_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR__SHIFT 0x0
+#define DPCSTX2_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR_MASK 0x0003FFFFL
+//DPCSTX2_DPCSTX_PLL_UPDATE_DATA
+#define DPCSTX2_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA__SHIFT 0x0
+#define DPCSTX2_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA_MASK 0xFFFFFFFFL
+//DPCSTX2_DPCSTX_DEBUG_CONFIG
+#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN__SHIFT 0x0
+#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL__SHIFT 0x1
+#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL__SHIFT 0x4
+#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL__SHIFT 0x8
+#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS__SHIFT 0xe
+#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN__SHIFT 0x10
+#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_INDEX__SHIFT 0x18
+#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN_MASK 0x00000001L
+#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL_MASK 0x0000000EL
+#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL_MASK 0x00000070L
+#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL_MASK 0x00000700L
+#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS_MASK 0x00004000L
+#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN_MASK 0x00010000L
+#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_INDEX_MASK 0xFF000000L
+
+
+// addressBlock: dpcssys_dpcs0_rdpcstx2_dispdec
+//RDPCSTX2_RDPCSTX_CNTL
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN__SHIFT 0xc
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN__SHIFT 0xd
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN__SHIFT 0xe
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN__SHIFT 0xf
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN__SHIFT 0x10
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_START__SHIFT 0x11
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS__SHIFT 0x1a
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN__SHIFT 0x1c
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN__SHIFT 0x1d
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET__SHIFT 0x1f
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET_MASK 0x00000001L
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET_MASK 0x00000010L
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN_MASK 0x00001000L
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN_MASK 0x00002000L
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN_MASK 0x00004000L
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN_MASK 0x00008000L
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN_MASK 0x00010000L
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_START_MASK 0x00020000L
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY_MASK 0x01F00000L
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS_MASK 0x04000000L
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN_MASK 0x10000000L
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN_MASK 0x20000000L
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET_MASK 0x80000000L
+//RDPCSTX2_RDPCSTX_CLOCK_CNTL
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN__SHIFT 0x5
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN__SHIFT 0x6
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN__SHIFT 0x7
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS__SHIFT 0x8
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN__SHIFT 0x9
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0xa
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS__SHIFT 0xc
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN__SHIFT 0xd
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON__SHIFT 0xe
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS__SHIFT 0x10
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_OCLACLK_GATE_DIS__SHIFT 0x14
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_OCLACLK_EN__SHIFT 0x15
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_OCLACLK_CLOCK_ON__SHIFT 0x16
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN_MASK 0x00000001L
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN_MASK 0x00000010L
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN_MASK 0x00000020L
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN_MASK 0x00000040L
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN_MASK 0x00000080L
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS_MASK 0x00000100L
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN_MASK 0x00000200L
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000400L
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS_MASK 0x00001000L
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN_MASK 0x00002000L
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON_MASK 0x00004000L
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS_MASK 0x00010000L
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_OCLACLK_GATE_DIS_MASK 0x00100000L
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_OCLACLK_EN_MASK 0x00200000L
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_OCLACLK_CLOCK_ON_MASK 0x00400000L
+//RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE__SHIFT 0x1
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE__SHIFT 0x2
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR__SHIFT 0x5
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR__SHIFT 0x6
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR__SHIFT 0x7
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR__SHIFT 0x8
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR__SHIFT 0x9
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR__SHIFT 0xa
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR__SHIFT 0xc
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK__SHIFT 0x10
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK__SHIFT 0x11
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK__SHIFT 0x12
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK__SHIFT 0x14
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK 0x00000002L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK 0x00000004L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR_MASK 0x00000010L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR_MASK 0x00000020L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR_MASK 0x00000040L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR_MASK 0x00000080L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR_MASK 0x00000100L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR_MASK 0x00000200L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR_MASK 0x00000400L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR_MASK 0x00001000L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK_MASK 0x00010000L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK_MASK 0x00020000L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK_MASK 0x00040000L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK_MASK 0x00100000L
+//RDPCSTX2_RDPCSTX_PLL_UPDATE_DATA
+#define RDPCSTX2_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA_MASK 0x00000001L
+//RDPCSTX2_RDPCS_TX_CR_ADDR
+#define RDPCSTX2_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0
+#define RDPCSTX2_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL
+//RDPCSTX2_RDPCS_TX_CR_DATA
+#define RDPCSTX2_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0
+#define RDPCSTX2_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL
+//RDPCSTX2_RDPCS_TX_SRAM_CNTL
+#define RDPCSTX2_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS__SHIFT 0x14
+#define RDPCSTX2_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE__SHIFT 0x18
+#define RDPCSTX2_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE__SHIFT 0x1c
+#define RDPCSTX2_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS_MASK 0x00100000L
+#define RDPCSTX2_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE_MASK 0x03000000L
+#define RDPCSTX2_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE_MASK 0x30000000L
+//RDPCSTX2_RDPCSTX_SCRATCH
+#define RDPCSTX2_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH_MASK 0xFFFFFFFFL
+//RDPCSTX2_RDPCSTX_SPARE
+#define RDPCSTX2_RDPCSTX_SPARE__RDPCSTX_SPARE__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_SPARE__RDPCSTX_SPARE_MASK 0xFFFFFFFFL
+//RDPCSTX2_RDPCSTX_CNTL2
+#define RDPCSTX2_RDPCSTX_CNTL2__RDPCS_CR_CONVERT_FIFO_EMPTY__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_CNTL2__RDPCS_CR_CONVERT_FIFO_FULL__SHIFT 0x1
+#define RDPCSTX2_RDPCSTX_CNTL2__RDPCS_CR_CONVERT_FIFO_EMPTY_MASK 0x00000001L
+#define RDPCSTX2_RDPCSTX_CNTL2__RDPCS_CR_CONVERT_FIFO_FULL_MASK 0x00000002L
+//RDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE__SHIFT 0x8
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG_MASK 0x00000001L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS_MASK 0x00000010L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE_MASK 0x0000FF00L
+//RDPCSTX2_RDPCSTX_DEBUG_CONFIG
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP__SHIFT 0x7
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK__SHIFT 0x8
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE__SHIFT 0xf
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX__SHIFT 0x10
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT__SHIFT 0x18
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN_MASK 0x00000001L
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT_MASK 0x00000070L
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP_MASK 0x00000080L
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK_MASK 0x00001F00L
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE_MASK 0x00008000L
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX_MASK 0x00FF0000L
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MASK 0xFF000000L
+//RDPCSTX2_RDPCSTX_PHY_CNTL0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET__SHIFT 0x1
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N__SHIFT 0x2
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN__SHIFT 0x3
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE__SHIFT 0x8
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE__SHIFT 0x9
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ__SHIFT 0x11
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK__SHIFT 0x12
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL__SHIFT 0x14
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL__SHIFT 0x15
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN__SHIFT 0x18
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT__SHIFT 0x19
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE__SHIFT 0x1c
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE__SHIFT 0x1d
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS__SHIFT 0x1f
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET_MASK 0x00000001L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET_MASK 0x00000002L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N_MASK 0x00000004L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN_MASK 0x00000008L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT_MASK 0x00000030L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE_MASK 0x00000100L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE_MASK 0x00003E00L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ_MASK 0x00020000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK_MASK 0x00040000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL_MASK 0x00100000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL_MASK 0x00200000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN_MASK 0x01000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT_MASK 0x02000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE_MASK 0x10000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE_MASK 0x20000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS_MASK 0x80000000L
+//RDPCSTX2_RDPCSTX_PHY_CNTL1
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN__SHIFT 0x1
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE__SHIFT 0x2
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN__SHIFT 0x3
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET__SHIFT 0x5
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN__SHIFT 0x6
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE__SHIFT 0x7
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN_MASK 0x00000001L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN_MASK 0x00000002L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE_MASK 0x00000004L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN_MASK 0x00000008L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE_MASK 0x00000010L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET_MASK 0x00000020L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN_MASK 0x00000040L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE_MASK 0x00000080L
+//RDPCSTX2_RDPCSTX_PHY_CNTL2
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR__SHIFT 0x3
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN__SHIFT 0x5
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN__SHIFT 0x6
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN__SHIFT 0x7
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN__SHIFT 0x8
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN__SHIFT 0x9
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN__SHIFT 0xa
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN__SHIFT 0xb
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR_MASK 0x00000008L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN_MASK 0x00000010L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN_MASK 0x00000020L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN_MASK 0x00000040L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN_MASK 0x00000080L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN_MASK 0x00000100L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN_MASK 0x00000200L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN_MASK 0x00000400L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN_MASK 0x00000800L
+//RDPCSTX2_RDPCSTX_PHY_CNTL3
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE__SHIFT 0x1
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY__SHIFT 0x2
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN__SHIFT 0x3
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK__SHIFT 0x5
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET__SHIFT 0x8
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE__SHIFT 0x9
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY__SHIFT 0xa
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN__SHIFT 0xb
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ__SHIFT 0xc
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK__SHIFT 0xd
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET__SHIFT 0x10
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE__SHIFT 0x11
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY__SHIFT 0x12
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN__SHIFT 0x13
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ__SHIFT 0x14
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK__SHIFT 0x15
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET__SHIFT 0x18
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE__SHIFT 0x19
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY__SHIFT 0x1a
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN__SHIFT 0x1b
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ__SHIFT 0x1c
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK__SHIFT 0x1d
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_MASK 0x00000001L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_MASK 0x00000002L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_MASK 0x00000004L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_MASK 0x00000008L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_MASK 0x00000010L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_MASK 0x00000020L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_MASK 0x00000100L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_MASK 0x00000200L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_MASK 0x00000400L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_MASK 0x00000800L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_MASK 0x00001000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_MASK 0x00002000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_MASK 0x00010000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_MASK 0x00020000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_MASK 0x00040000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_MASK 0x00080000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_MASK 0x00100000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_MASK 0x00200000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_MASK 0x01000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_MASK 0x02000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_MASK 0x04000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_MASK 0x08000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_MASK 0x10000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_MASK 0x20000000L
+//RDPCSTX2_RDPCSTX_PHY_CNTL4
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC__SHIFT 0x6
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN__SHIFT 0x7
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL__SHIFT 0x8
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT__SHIFT 0xc
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC__SHIFT 0xe
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN__SHIFT 0xf
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL__SHIFT 0x10
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT__SHIFT 0x14
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC__SHIFT 0x16
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN__SHIFT 0x17
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL__SHIFT 0x18
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT__SHIFT 0x1c
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC__SHIFT 0x1e
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN__SHIFT 0x1f
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL_MASK 0x00000007L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT_MASK 0x00000010L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC_MASK 0x00000040L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN_MASK 0x00000080L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL_MASK 0x00000700L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT_MASK 0x00001000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC_MASK 0x00004000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN_MASK 0x00008000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL_MASK 0x00070000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT_MASK 0x00100000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC_MASK 0x00400000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN_MASK 0x00800000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL_MASK 0x07000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT_MASK 0x10000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC_MASK 0x40000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN_MASK 0x80000000L
+//RDPCSTX2_RDPCSTX_PHY_CNTL5
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE__SHIFT 0x1
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ__SHIFT 0x6
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT__SHIFT 0x7
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD__SHIFT 0x8
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE__SHIFT 0x9
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH__SHIFT 0xc
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ__SHIFT 0xe
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT__SHIFT 0xf
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD__SHIFT 0x10
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE__SHIFT 0x11
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH__SHIFT 0x14
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ__SHIFT 0x16
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT__SHIFT 0x17
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD__SHIFT 0x18
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE__SHIFT 0x19
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH__SHIFT 0x1c
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ__SHIFT 0x1e
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT__SHIFT 0x1f
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD_MASK 0x00000001L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE_MASK 0x0000000EL
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH_MASK 0x00000030L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ_MASK 0x00000040L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT_MASK 0x00000080L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD_MASK 0x00000100L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE_MASK 0x00000E00L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH_MASK 0x00003000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ_MASK 0x00004000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT_MASK 0x00008000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD_MASK 0x00010000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE_MASK 0x000E0000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH_MASK 0x00300000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ_MASK 0x00400000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT_MASK 0x00800000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD_MASK 0x01000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE_MASK 0x0E000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH_MASK 0x30000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ_MASK 0x40000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT_MASK 0x80000000L
+//RDPCSTX2_RDPCSTX_PHY_CNTL6
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN__SHIFT 0x2
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN__SHIFT 0x6
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE__SHIFT 0x8
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN__SHIFT 0xa
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE__SHIFT 0xc
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN__SHIFT 0xe
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4__SHIFT 0x10
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE__SHIFT 0x11
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK__SHIFT 0x12
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN__SHIFT 0x13
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ__SHIFT 0x14
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_MASK 0x00000003L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_MASK 0x00000004L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_MASK 0x00000030L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_MASK 0x00000040L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_MASK 0x00000300L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_MASK 0x00000400L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_MASK 0x00003000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_MASK 0x00004000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_MASK 0x00010000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_MASK 0x00020000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_MASK 0x00040000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_MASK 0x00080000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_MASK 0x00100000L
+//RDPCSTX2_RDPCSTX_PHY_CNTL7
+#define RDPCSTX2_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT__SHIFT 0x10
+#define RDPCSTX2_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN_MASK 0x0000FFFFL
+#define RDPCSTX2_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT_MASK 0xFFFF0000L
+//RDPCSTX2_RDPCSTX_PHY_CNTL8
+#define RDPCSTX2_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK_MASK 0x000FFFFFL
+//RDPCSTX2_RDPCSTX_PHY_CNTL9
+#define RDPCSTX2_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD__SHIFT 0x18
+#define RDPCSTX2_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE_MASK 0x001FFFFFL
+#define RDPCSTX2_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD_MASK 0x01000000L
+//RDPCSTX2_RDPCSTX_PHY_CNTL10
+#define RDPCSTX2_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM_MASK 0x0000FFFFL
+//RDPCSTX2_RDPCSTX_PHY_CNTL11
+#define RDPCSTX2_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV__SHIFT 0x10
+#define RDPCSTX2_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV__SHIFT 0x14
+#define RDPCSTX2_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV__SHIFT 0x18
+#define RDPCSTX2_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER_MASK 0x0000FFF0L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV_MASK 0x00070000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV_MASK 0x00700000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV_MASK 0x03000000L
+//RDPCSTX2_RDPCSTX_PHY_CNTL12
+#define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN__SHIFT 0x2
+#define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE__SHIFT 0x7
+#define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN__SHIFT 0x8
+#define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN_MASK 0x00000001L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN_MASK 0x00000004L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV_MASK 0x00000070L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE_MASK 0x00000080L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN_MASK 0x00000100L
+//RDPCSTX2_RDPCSTX_PHY_CNTL13
+#define RDPCSTX2_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER__SHIFT 0x14
+#define RDPCSTX2_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN__SHIFT 0x1c
+#define RDPCSTX2_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN__SHIFT 0x1d
+#define RDPCSTX2_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE__SHIFT 0x1e
+#define RDPCSTX2_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER_MASK 0x0FF00000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN_MASK 0x10000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN_MASK 0x20000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE_MASK 0x40000000L
+//RDPCSTX2_RDPCSTX_PHY_CNTL14
+#define RDPCSTX2_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN__SHIFT 0x18
+#define RDPCSTX2_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN__SHIFT 0x1c
+#define RDPCSTX2_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE_MASK 0x00000001L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN_MASK 0x01000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN_MASK 0x10000000L
+//RDPCSTX2_RDPCSTX_PHY_FUSE0
+#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE__SHIFT 0x6
+#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST__SHIFT 0xc
+#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I__SHIFT 0x12
+#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO__SHIFT 0x14
+#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_CP_INT_GS__SHIFT 0x16
+#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_RX_VREF_CTRL__SHIFT 0x1d
+#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I_MASK 0x000C0000L
+#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO_MASK 0x00300000L
+#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_CP_INT_GS_MASK 0x1FC00000L
+#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_RX_VREF_CTRL_MASK 0xE0000000L
+//RDPCSTX2_RDPCSTX_PHY_FUSE1
+#define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE__SHIFT 0x6
+#define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST__SHIFT 0xc
+#define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT__SHIFT 0x12
+#define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP__SHIFT 0x19
+#define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT_MASK 0x01FC0000L
+#define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP_MASK 0xFE000000L
+//RDPCSTX2_RDPCSTX_PHY_FUSE2
+#define RDPCSTX2_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE__SHIFT 0x6
+#define RDPCSTX2_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST__SHIFT 0xc
+#define RDPCSTX2_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_MPLLB_CP_PROP_GS__SHIFT 0x17
+#define RDPCSTX2_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX2_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX2_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX2_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_MPLLB_CP_PROP_GS_MASK 0x3F800000L
+//RDPCSTX2_RDPCSTX_PHY_FUSE3
+#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE__SHIFT 0x6
+#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST__SHIFT 0xc
+#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE__SHIFT 0x12
+#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE__SHIFT 0x18
+#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_TX_VBOOST_LVL__SHIFT 0x1a
+#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_SUP_RX_VCO_VREF_SEL__SHIFT 0x1d
+#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE_MASK 0x00FC0000L
+#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE_MASK 0x03000000L
+#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_TX_VBOOST_LVL_MASK 0x1C000000L
+#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_SUP_RX_VCO_VREF_SEL_MASK 0xE0000000L
+//RDPCSTX2_RDPCSTX_PHY_RX_LD_VAL
+#define RDPCSTX2_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL__SHIFT 0x8
+#define RDPCSTX2_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL_MASK 0x0000007FL
+#define RDPCSTX2_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL_MASK 0x001FFF00L
+//RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED__SHIFT 0x1
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED__SHIFT 0x2
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED__SHIFT 0x3
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED__SHIFT 0x5
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED__SHIFT 0x8
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED__SHIFT 0x9
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED__SHIFT 0xa
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED__SHIFT 0xb
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED__SHIFT 0xc
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED__SHIFT 0xd
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED__SHIFT 0x10
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED__SHIFT 0x11
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED__SHIFT 0x12
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED__SHIFT 0x13
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED__SHIFT 0x14
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED__SHIFT 0x15
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED__SHIFT 0x18
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED__SHIFT 0x19
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED__SHIFT 0x1a
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED__SHIFT 0x1b
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED__SHIFT 0x1c
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED__SHIFT 0x1d
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED_MASK 0x00000001L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED_MASK 0x00000002L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED_MASK 0x00000004L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED_MASK 0x00000008L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED_MASK 0x00000010L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED_MASK 0x00000020L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED_MASK 0x00000100L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED_MASK 0x00000200L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED_MASK 0x00000400L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED_MASK 0x00000800L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED_MASK 0x00001000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED_MASK 0x00002000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED_MASK 0x00010000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED_MASK 0x00020000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED_MASK 0x00040000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED_MASK 0x00080000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED_MASK 0x00100000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED_MASK 0x00200000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED_MASK 0x01000000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED_MASK 0x02000000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED_MASK 0x04000000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED_MASK 0x08000000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED_MASK 0x10000000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED_MASK 0x20000000L
+//RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED__SHIFT 0x2
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED__SHIFT 0x6
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED__SHIFT 0x8
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED__SHIFT 0xa
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED__SHIFT 0xc
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED__SHIFT 0xe
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED__SHIFT 0x10
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED__SHIFT 0x11
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED__SHIFT 0x12
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED__SHIFT 0x13
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED__SHIFT 0x14
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED_MASK 0x00000003L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED_MASK 0x00000004L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED_MASK 0x00000030L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED_MASK 0x00000040L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED_MASK 0x00000300L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED_MASK 0x00000400L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED_MASK 0x00003000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED_MASK 0x00004000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED_MASK 0x00010000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED_MASK 0x00020000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED_MASK 0x00040000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED_MASK 0x00080000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED_MASK 0x00100000L
+//RDPCSTX2_RDPCSTX_DPALT_CONTROL_REG
+#define RDPCSTX2_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE__SHIFT 0x8
+#define RDPCSTX2_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS_MASK 0x00000001L
+#define RDPCSTX2_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED_MASK 0x00000010L
+#define RDPCSTX2_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE_MASK 0x0000FF00L
+//RDPCSTX2_RDPCSTX_PHY_CNTL15
+#define RDPCSTX2_RDPCSTX_PHY_CNTL15__RDPCS_PHY_SSTX_VREGDRV_BYP__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL15__RDPCS_PHY_DP_TX0_VREGDRV_BYP__SHIFT 0x10
+#define RDPCSTX2_RDPCSTX_PHY_CNTL15__RDPCS_PHY_DP_TX1_VREGDRV_BYP__SHIFT 0x11
+#define RDPCSTX2_RDPCSTX_PHY_CNTL15__RDPCS_PHY_DP_TX2_VREGDRV_BYP__SHIFT 0x12
+#define RDPCSTX2_RDPCSTX_PHY_CNTL15__RDPCS_PHY_DP_TX3_VREGDRV_BYP__SHIFT 0x13
+#define RDPCSTX2_RDPCSTX_PHY_CNTL15__RDPCS_PHY_SUP_PRE_HP__SHIFT 0x14
+#define RDPCSTX2_RDPCSTX_PHY_CNTL15__RDPCS_PHY_SSTX_VREGDRV_BYP_MASK 0x00000001L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL15__RDPCS_PHY_DP_TX0_VREGDRV_BYP_MASK 0x00010000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL15__RDPCS_PHY_DP_TX1_VREGDRV_BYP_MASK 0x00020000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL15__RDPCS_PHY_DP_TX2_VREGDRV_BYP_MASK 0x00040000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL15__RDPCS_PHY_DP_TX3_VREGDRV_BYP_MASK 0x00080000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL15__RDPCS_PHY_SUP_PRE_HP_MASK 0x00100000L
+//RDPCSTX2_RDPCSTX_PHY_CNTL16
+#define RDPCSTX2_RDPCSTX_PHY_CNTL16__RDPCS_PHY_DP_TX0_OUT_GENERIC_BUS__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL16__RDPCS_PHY_DP_TX1_OUT_GENERIC_BUS__SHIFT 0x6
+#define RDPCSTX2_RDPCSTX_PHY_CNTL16__RDPCS_PHY_DP_TX2_OUT_GENERIC_BUS__SHIFT 0xc
+#define RDPCSTX2_RDPCSTX_PHY_CNTL16__RDPCS_PHY_DP_TX3_OUT_GENERIC_BUS__SHIFT 0x12
+#define RDPCSTX2_RDPCSTX_PHY_CNTL16__RDPCS_PHY_CMN_OUT_GENERIC_BUS__SHIFT 0x18
+#define RDPCSTX2_RDPCSTX_PHY_CNTL16__RDPCS_PHY_DP_TX0_OUT_GENERIC_BUS_MASK 0x0000001FL
+#define RDPCSTX2_RDPCSTX_PHY_CNTL16__RDPCS_PHY_DP_TX1_OUT_GENERIC_BUS_MASK 0x000007C0L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL16__RDPCS_PHY_DP_TX2_OUT_GENERIC_BUS_MASK 0x0001F000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL16__RDPCS_PHY_DP_TX3_OUT_GENERIC_BUS_MASK 0x007C0000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL16__RDPCS_PHY_CMN_OUT_GENERIC_BUS_MASK 0x1F000000L
+//RDPCSTX2_RDPCSTX_PHY_CNTL17
+#define RDPCSTX2_RDPCSTX_PHY_CNTL17__RDPCS_PHY_DP_TX0_IN_GENERIC_BUS__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL17__RDPCS_PHY_DP_TX1_IN_GENERIC_BUS__SHIFT 0x6
+#define RDPCSTX2_RDPCSTX_PHY_CNTL17__RDPCS_PHY_DP_TX2_IN_GENERIC_BUS__SHIFT 0xc
+#define RDPCSTX2_RDPCSTX_PHY_CNTL17__RDPCS_PHY_DP_TX3_IN_GENERIC_BUS__SHIFT 0x12
+#define RDPCSTX2_RDPCSTX_PHY_CNTL17__RDPCS_PHY_CMN_IN_GENERIC_BUS__SHIFT 0x18
+#define RDPCSTX2_RDPCSTX_PHY_CNTL17__RDPCS_PHY_DP_TX0_IN_GENERIC_BUS_MASK 0x0000001FL
+#define RDPCSTX2_RDPCSTX_PHY_CNTL17__RDPCS_PHY_DP_TX1_IN_GENERIC_BUS_MASK 0x000007C0L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL17__RDPCS_PHY_DP_TX2_IN_GENERIC_BUS_MASK 0x0001F000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL17__RDPCS_PHY_DP_TX3_IN_GENERIC_BUS_MASK 0x007C0000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL17__RDPCS_PHY_CMN_IN_GENERIC_BUS_MASK 0x1F000000L
+//RDPCSTX2_RDPCSTX_DEBUG_CONFIG2
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG2__RDPCS_DBG_OCLA_SRC0__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG2__RDPCS_DBG_OCLA_SRC1__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG2__RDPCS_DBG_OCLA_SRC2__SHIFT 0x8
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG2__RDPCS_DBG_OCLA_SRC3__SHIFT 0xc
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG2__RDPCS_DBG_OCLA_VALID_REPLACE_MSB__SHIFT 0x10
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG2__RDPCS_DBG_OCLA_SRC0_MASK 0x00000007L
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG2__RDPCS_DBG_OCLA_SRC1_MASK 0x00000070L
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG2__RDPCS_DBG_OCLA_SRC2_MASK 0x00000700L
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG2__RDPCS_DBG_OCLA_SRC3_MASK 0x00007000L
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG2__RDPCS_DBG_OCLA_VALID_REPLACE_MSB_MASK 0x00010000L
+
+
+// addressBlock: dpcssys_dpcssys_cr2_dispdec
+//DPCSSYS_CR2_DPCSSYS_CR_ADDR
+#define DPCSSYS_CR2_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0
+#define DPCSSYS_CR2_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL
+//DPCSSYS_CR2_DPCSSYS_CR_DATA
+#define DPCSSYS_CR2_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0
+#define DPCSSYS_CR2_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL
+
+
+// addressBlock: dpcssys_dpcs0_dpcstx3_dispdec
+//DPCSTX3_DPCSTX_TX_CLOCK_CNTL
+#define DPCSTX3_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS__SHIFT 0x0
+#define DPCSTX3_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN__SHIFT 0x1
+#define DPCSTX3_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON__SHIFT 0x2
+#define DPCSTX3_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0x3
+#define DPCSTX3_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS_MASK 0x00000001L
+#define DPCSTX3_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN_MASK 0x00000002L
+#define DPCSTX3_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON_MASK 0x00000004L
+#define DPCSTX3_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000008L
+//DPCSTX3_DPCSTX_TX_CNTL
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ__SHIFT 0xc
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING__SHIFT 0xd
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP__SHIFT 0xe
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT__SHIFT 0xf
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN__SHIFT 0x10
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START__SHIFT 0x11
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET__SHIFT 0x1f
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ_MASK 0x00001000L
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING_MASK 0x00002000L
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP_MASK 0x00004000L
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT_MASK 0x00008000L
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN_MASK 0x00010000L
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START_MASK 0x00020000L
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET_MASK 0x80000000L
+//DPCSTX3_DPCSTX_CBUS_CNTL
+#define DPCSTX3_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY__SHIFT 0x0
+#define DPCSTX3_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET__SHIFT 0x1f
+#define DPCSTX3_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY_MASK 0x000000FFL
+#define DPCSTX3_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET_MASK 0x80000000L
+//DPCSTX3_DPCSTX_INTERRUPT_CNTL
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW__SHIFT 0x0
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR__SHIFT 0x1
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK__SHIFT 0x4
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR__SHIFT 0x8
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR__SHIFT 0x9
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR__SHIFT 0xa
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR__SHIFT 0xb
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR__SHIFT 0xc
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK__SHIFT 0x10
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK__SHIFT 0x14
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR_MASK 0x00000002L
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK_MASK 0x00000010L
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR_MASK 0x00000100L
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR_MASK 0x00000200L
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR_MASK 0x00000400L
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR_MASK 0x00000800L
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR_MASK 0x00001000L
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK_MASK 0x00010000L
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK_MASK 0x00100000L
+//DPCSTX3_DPCSTX_PLL_UPDATE_ADDR
+#define DPCSTX3_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR__SHIFT 0x0
+#define DPCSTX3_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR_MASK 0x0003FFFFL
+//DPCSTX3_DPCSTX_PLL_UPDATE_DATA
+#define DPCSTX3_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA__SHIFT 0x0
+#define DPCSTX3_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA_MASK 0xFFFFFFFFL
+//DPCSTX3_DPCSTX_DEBUG_CONFIG
+#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN__SHIFT 0x0
+#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL__SHIFT 0x1
+#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL__SHIFT 0x4
+#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL__SHIFT 0x8
+#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS__SHIFT 0xe
+#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN__SHIFT 0x10
+#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_INDEX__SHIFT 0x18
+#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN_MASK 0x00000001L
+#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL_MASK 0x0000000EL
+#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL_MASK 0x00000070L
+#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL_MASK 0x00000700L
+#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS_MASK 0x00004000L
+#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN_MASK 0x00010000L
+#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_INDEX_MASK 0xFF000000L
+
+
+// addressBlock: dpcssys_dpcs0_rdpcstx3_dispdec
+//RDPCSTX3_RDPCSTX_CNTL
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN__SHIFT 0xc
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN__SHIFT 0xd
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN__SHIFT 0xe
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN__SHIFT 0xf
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN__SHIFT 0x10
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_START__SHIFT 0x11
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS__SHIFT 0x1a
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN__SHIFT 0x1c
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN__SHIFT 0x1d
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET__SHIFT 0x1f
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET_MASK 0x00000001L
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET_MASK 0x00000010L
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN_MASK 0x00001000L
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN_MASK 0x00002000L
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN_MASK 0x00004000L
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN_MASK 0x00008000L
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN_MASK 0x00010000L
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_START_MASK 0x00020000L
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY_MASK 0x01F00000L
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS_MASK 0x04000000L
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN_MASK 0x10000000L
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN_MASK 0x20000000L
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET_MASK 0x80000000L
+//RDPCSTX3_RDPCSTX_CLOCK_CNTL
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN__SHIFT 0x5
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN__SHIFT 0x6
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN__SHIFT 0x7
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS__SHIFT 0x8
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN__SHIFT 0x9
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0xa
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS__SHIFT 0xc
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN__SHIFT 0xd
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON__SHIFT 0xe
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS__SHIFT 0x10
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_OCLACLK_GATE_DIS__SHIFT 0x14
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_OCLACLK_EN__SHIFT 0x15
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_OCLACLK_CLOCK_ON__SHIFT 0x16
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN_MASK 0x00000001L
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN_MASK 0x00000010L
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN_MASK 0x00000020L
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN_MASK 0x00000040L
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN_MASK 0x00000080L
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS_MASK 0x00000100L
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN_MASK 0x00000200L
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000400L
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS_MASK 0x00001000L
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN_MASK 0x00002000L
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON_MASK 0x00004000L
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS_MASK 0x00010000L
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_OCLACLK_GATE_DIS_MASK 0x00100000L
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_OCLACLK_EN_MASK 0x00200000L
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_OCLACLK_CLOCK_ON_MASK 0x00400000L
+//RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE__SHIFT 0x1
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE__SHIFT 0x2
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR__SHIFT 0x5
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR__SHIFT 0x6
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR__SHIFT 0x7
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR__SHIFT 0x8
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR__SHIFT 0x9
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR__SHIFT 0xa
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR__SHIFT 0xc
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK__SHIFT 0x10
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK__SHIFT 0x11
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK__SHIFT 0x12
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK__SHIFT 0x14
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK 0x00000002L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK 0x00000004L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR_MASK 0x00000010L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR_MASK 0x00000020L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR_MASK 0x00000040L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR_MASK 0x00000080L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR_MASK 0x00000100L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR_MASK 0x00000200L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR_MASK 0x00000400L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR_MASK 0x00001000L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK_MASK 0x00010000L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK_MASK 0x00020000L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK_MASK 0x00040000L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK_MASK 0x00100000L
+//RDPCSTX3_RDPCSTX_PLL_UPDATE_DATA
+#define RDPCSTX3_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA_MASK 0x00000001L
+//RDPCSTX3_RDPCS_TX_CR_ADDR
+#define RDPCSTX3_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0
+#define RDPCSTX3_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL
+//RDPCSTX3_RDPCS_TX_CR_DATA
+#define RDPCSTX3_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0
+#define RDPCSTX3_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL
+//RDPCSTX3_RDPCS_TX_SRAM_CNTL
+#define RDPCSTX3_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS__SHIFT 0x14
+#define RDPCSTX3_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE__SHIFT 0x18
+#define RDPCSTX3_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE__SHIFT 0x1c
+#define RDPCSTX3_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS_MASK 0x00100000L
+#define RDPCSTX3_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE_MASK 0x03000000L
+#define RDPCSTX3_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE_MASK 0x30000000L
+//RDPCSTX3_RDPCSTX_SCRATCH
+#define RDPCSTX3_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH_MASK 0xFFFFFFFFL
+//RDPCSTX3_RDPCSTX_SPARE
+#define RDPCSTX3_RDPCSTX_SPARE__RDPCSTX_SPARE__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_SPARE__RDPCSTX_SPARE_MASK 0xFFFFFFFFL
+//RDPCSTX3_RDPCSTX_CNTL2
+#define RDPCSTX3_RDPCSTX_CNTL2__RDPCS_CR_CONVERT_FIFO_EMPTY__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_CNTL2__RDPCS_CR_CONVERT_FIFO_FULL__SHIFT 0x1
+#define RDPCSTX3_RDPCSTX_CNTL2__RDPCS_CR_CONVERT_FIFO_EMPTY_MASK 0x00000001L
+#define RDPCSTX3_RDPCSTX_CNTL2__RDPCS_CR_CONVERT_FIFO_FULL_MASK 0x00000002L
+//RDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE__SHIFT 0x8
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG_MASK 0x00000001L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS_MASK 0x00000010L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE_MASK 0x0000FF00L
+//RDPCSTX3_RDPCSTX_DEBUG_CONFIG
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP__SHIFT 0x7
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK__SHIFT 0x8
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE__SHIFT 0xf
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX__SHIFT 0x10
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT__SHIFT 0x18
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN_MASK 0x00000001L
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT_MASK 0x00000070L
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP_MASK 0x00000080L
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK_MASK 0x00001F00L
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE_MASK 0x00008000L
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX_MASK 0x00FF0000L
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MASK 0xFF000000L
+//RDPCSTX3_RDPCSTX_PHY_CNTL0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET__SHIFT 0x1
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N__SHIFT 0x2
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN__SHIFT 0x3
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE__SHIFT 0x8
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE__SHIFT 0x9
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ__SHIFT 0x11
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK__SHIFT 0x12
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL__SHIFT 0x14
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL__SHIFT 0x15
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN__SHIFT 0x18
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT__SHIFT 0x19
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE__SHIFT 0x1c
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE__SHIFT 0x1d
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS__SHIFT 0x1f
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET_MASK 0x00000001L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET_MASK 0x00000002L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N_MASK 0x00000004L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN_MASK 0x00000008L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT_MASK 0x00000030L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE_MASK 0x00000100L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE_MASK 0x00003E00L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ_MASK 0x00020000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK_MASK 0x00040000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL_MASK 0x00100000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL_MASK 0x00200000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN_MASK 0x01000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT_MASK 0x02000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE_MASK 0x10000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE_MASK 0x20000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS_MASK 0x80000000L
+//RDPCSTX3_RDPCSTX_PHY_CNTL1
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN__SHIFT 0x1
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE__SHIFT 0x2
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN__SHIFT 0x3
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET__SHIFT 0x5
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN__SHIFT 0x6
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE__SHIFT 0x7
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN_MASK 0x00000001L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN_MASK 0x00000002L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE_MASK 0x00000004L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN_MASK 0x00000008L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE_MASK 0x00000010L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET_MASK 0x00000020L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN_MASK 0x00000040L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE_MASK 0x00000080L
+//RDPCSTX3_RDPCSTX_PHY_CNTL2
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR__SHIFT 0x3
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN__SHIFT 0x5
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN__SHIFT 0x6
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN__SHIFT 0x7
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN__SHIFT 0x8
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN__SHIFT 0x9
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN__SHIFT 0xa
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN__SHIFT 0xb
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR_MASK 0x00000008L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN_MASK 0x00000010L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN_MASK 0x00000020L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN_MASK 0x00000040L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN_MASK 0x00000080L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN_MASK 0x00000100L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN_MASK 0x00000200L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN_MASK 0x00000400L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN_MASK 0x00000800L
+//RDPCSTX3_RDPCSTX_PHY_CNTL3
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE__SHIFT 0x1
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY__SHIFT 0x2
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN__SHIFT 0x3
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK__SHIFT 0x5
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET__SHIFT 0x8
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE__SHIFT 0x9
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY__SHIFT 0xa
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN__SHIFT 0xb
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ__SHIFT 0xc
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK__SHIFT 0xd
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET__SHIFT 0x10
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE__SHIFT 0x11
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY__SHIFT 0x12
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN__SHIFT 0x13
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ__SHIFT 0x14
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK__SHIFT 0x15
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET__SHIFT 0x18
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE__SHIFT 0x19
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY__SHIFT 0x1a
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN__SHIFT 0x1b
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ__SHIFT 0x1c
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK__SHIFT 0x1d
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_MASK 0x00000001L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_MASK 0x00000002L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_MASK 0x00000004L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_MASK 0x00000008L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_MASK 0x00000010L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_MASK 0x00000020L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_MASK 0x00000100L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_MASK 0x00000200L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_MASK 0x00000400L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_MASK 0x00000800L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_MASK 0x00001000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_MASK 0x00002000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_MASK 0x00010000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_MASK 0x00020000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_MASK 0x00040000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_MASK 0x00080000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_MASK 0x00100000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_MASK 0x00200000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_MASK 0x01000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_MASK 0x02000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_MASK 0x04000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_MASK 0x08000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_MASK 0x10000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_MASK 0x20000000L
+//RDPCSTX3_RDPCSTX_PHY_CNTL4
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC__SHIFT 0x6
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN__SHIFT 0x7
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL__SHIFT 0x8
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT__SHIFT 0xc
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC__SHIFT 0xe
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN__SHIFT 0xf
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL__SHIFT 0x10
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT__SHIFT 0x14
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC__SHIFT 0x16
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN__SHIFT 0x17
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL__SHIFT 0x18
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT__SHIFT 0x1c
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC__SHIFT 0x1e
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN__SHIFT 0x1f
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL_MASK 0x00000007L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT_MASK 0x00000010L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC_MASK 0x00000040L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN_MASK 0x00000080L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL_MASK 0x00000700L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT_MASK 0x00001000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC_MASK 0x00004000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN_MASK 0x00008000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL_MASK 0x00070000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT_MASK 0x00100000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC_MASK 0x00400000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN_MASK 0x00800000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL_MASK 0x07000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT_MASK 0x10000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC_MASK 0x40000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN_MASK 0x80000000L
+//RDPCSTX3_RDPCSTX_PHY_CNTL5
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE__SHIFT 0x1
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ__SHIFT 0x6
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT__SHIFT 0x7
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD__SHIFT 0x8
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE__SHIFT 0x9
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH__SHIFT 0xc
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ__SHIFT 0xe
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT__SHIFT 0xf
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD__SHIFT 0x10
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE__SHIFT 0x11
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH__SHIFT 0x14
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ__SHIFT 0x16
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT__SHIFT 0x17
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD__SHIFT 0x18
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE__SHIFT 0x19
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH__SHIFT 0x1c
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ__SHIFT 0x1e
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT__SHIFT 0x1f
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD_MASK 0x00000001L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE_MASK 0x0000000EL
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH_MASK 0x00000030L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ_MASK 0x00000040L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT_MASK 0x00000080L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD_MASK 0x00000100L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE_MASK 0x00000E00L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH_MASK 0x00003000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ_MASK 0x00004000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT_MASK 0x00008000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD_MASK 0x00010000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE_MASK 0x000E0000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH_MASK 0x00300000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ_MASK 0x00400000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT_MASK 0x00800000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD_MASK 0x01000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE_MASK 0x0E000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH_MASK 0x30000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ_MASK 0x40000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT_MASK 0x80000000L
+//RDPCSTX3_RDPCSTX_PHY_CNTL6
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN__SHIFT 0x2
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN__SHIFT 0x6
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE__SHIFT 0x8
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN__SHIFT 0xa
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE__SHIFT 0xc
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN__SHIFT 0xe
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4__SHIFT 0x10
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE__SHIFT 0x11
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK__SHIFT 0x12
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN__SHIFT 0x13
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ__SHIFT 0x14
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_MASK 0x00000003L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_MASK 0x00000004L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_MASK 0x00000030L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_MASK 0x00000040L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_MASK 0x00000300L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_MASK 0x00000400L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_MASK 0x00003000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_MASK 0x00004000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_MASK 0x00010000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_MASK 0x00020000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_MASK 0x00040000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_MASK 0x00080000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_MASK 0x00100000L
+//RDPCSTX3_RDPCSTX_PHY_CNTL7
+#define RDPCSTX3_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT__SHIFT 0x10
+#define RDPCSTX3_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN_MASK 0x0000FFFFL
+#define RDPCSTX3_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT_MASK 0xFFFF0000L
+//RDPCSTX3_RDPCSTX_PHY_CNTL8
+#define RDPCSTX3_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK_MASK 0x000FFFFFL
+//RDPCSTX3_RDPCSTX_PHY_CNTL9
+#define RDPCSTX3_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD__SHIFT 0x18
+#define RDPCSTX3_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE_MASK 0x001FFFFFL
+#define RDPCSTX3_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD_MASK 0x01000000L
+//RDPCSTX3_RDPCSTX_PHY_CNTL10
+#define RDPCSTX3_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM_MASK 0x0000FFFFL
+//RDPCSTX3_RDPCSTX_PHY_CNTL11
+#define RDPCSTX3_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV__SHIFT 0x10
+#define RDPCSTX3_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV__SHIFT 0x14
+#define RDPCSTX3_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV__SHIFT 0x18
+#define RDPCSTX3_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER_MASK 0x0000FFF0L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV_MASK 0x00070000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV_MASK 0x00700000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV_MASK 0x03000000L
+//RDPCSTX3_RDPCSTX_PHY_CNTL12
+#define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN__SHIFT 0x2
+#define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE__SHIFT 0x7
+#define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN__SHIFT 0x8
+#define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN_MASK 0x00000001L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN_MASK 0x00000004L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV_MASK 0x00000070L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE_MASK 0x00000080L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN_MASK 0x00000100L
+//RDPCSTX3_RDPCSTX_PHY_CNTL13
+#define RDPCSTX3_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER__SHIFT 0x14
+#define RDPCSTX3_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN__SHIFT 0x1c
+#define RDPCSTX3_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN__SHIFT 0x1d
+#define RDPCSTX3_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE__SHIFT 0x1e
+#define RDPCSTX3_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER_MASK 0x0FF00000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN_MASK 0x10000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN_MASK 0x20000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE_MASK 0x40000000L
+//RDPCSTX3_RDPCSTX_PHY_CNTL14
+#define RDPCSTX3_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN__SHIFT 0x18
+#define RDPCSTX3_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN__SHIFT 0x1c
+#define RDPCSTX3_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE_MASK 0x00000001L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN_MASK 0x01000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN_MASK 0x10000000L
+//RDPCSTX3_RDPCSTX_PHY_FUSE0
+#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE__SHIFT 0x6
+#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST__SHIFT 0xc
+#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I__SHIFT 0x12
+#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO__SHIFT 0x14
+#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_CP_INT_GS__SHIFT 0x16
+#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_RX_VREF_CTRL__SHIFT 0x1d
+#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I_MASK 0x000C0000L
+#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO_MASK 0x00300000L
+#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_CP_INT_GS_MASK 0x1FC00000L
+#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_RX_VREF_CTRL_MASK 0xE0000000L
+//RDPCSTX3_RDPCSTX_PHY_FUSE1
+#define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE__SHIFT 0x6
+#define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST__SHIFT 0xc
+#define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT__SHIFT 0x12
+#define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP__SHIFT 0x19
+#define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT_MASK 0x01FC0000L
+#define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP_MASK 0xFE000000L
+//RDPCSTX3_RDPCSTX_PHY_FUSE2
+#define RDPCSTX3_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE__SHIFT 0x6
+#define RDPCSTX3_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST__SHIFT 0xc
+#define RDPCSTX3_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_MPLLB_CP_PROP_GS__SHIFT 0x17
+#define RDPCSTX3_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX3_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX3_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX3_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_MPLLB_CP_PROP_GS_MASK 0x3F800000L
+//RDPCSTX3_RDPCSTX_PHY_FUSE3
+#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE__SHIFT 0x6
+#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST__SHIFT 0xc
+#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE__SHIFT 0x12
+#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE__SHIFT 0x18
+#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_TX_VBOOST_LVL__SHIFT 0x1a
+#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_SUP_RX_VCO_VREF_SEL__SHIFT 0x1d
+#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE_MASK 0x00FC0000L
+#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE_MASK 0x03000000L
+#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_TX_VBOOST_LVL_MASK 0x1C000000L
+#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_SUP_RX_VCO_VREF_SEL_MASK 0xE0000000L
+//RDPCSTX3_RDPCSTX_PHY_RX_LD_VAL
+#define RDPCSTX3_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL__SHIFT 0x8
+#define RDPCSTX3_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL_MASK 0x0000007FL
+#define RDPCSTX3_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL_MASK 0x001FFF00L
+//RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED__SHIFT 0x1
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED__SHIFT 0x2
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED__SHIFT 0x3
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED__SHIFT 0x5
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED__SHIFT 0x8
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED__SHIFT 0x9
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED__SHIFT 0xa
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED__SHIFT 0xb
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED__SHIFT 0xc
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED__SHIFT 0xd
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED__SHIFT 0x10
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED__SHIFT 0x11
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED__SHIFT 0x12
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED__SHIFT 0x13
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED__SHIFT 0x14
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED__SHIFT 0x15
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED__SHIFT 0x18
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED__SHIFT 0x19
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED__SHIFT 0x1a
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED__SHIFT 0x1b
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED__SHIFT 0x1c
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED__SHIFT 0x1d
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED_MASK 0x00000001L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED_MASK 0x00000002L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED_MASK 0x00000004L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED_MASK 0x00000008L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED_MASK 0x00000010L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED_MASK 0x00000020L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED_MASK 0x00000100L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED_MASK 0x00000200L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED_MASK 0x00000400L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED_MASK 0x00000800L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED_MASK 0x00001000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED_MASK 0x00002000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED_MASK 0x00010000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED_MASK 0x00020000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED_MASK 0x00040000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED_MASK 0x00080000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED_MASK 0x00100000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED_MASK 0x00200000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED_MASK 0x01000000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED_MASK 0x02000000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED_MASK 0x04000000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED_MASK 0x08000000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED_MASK 0x10000000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED_MASK 0x20000000L
+//RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED__SHIFT 0x2
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED__SHIFT 0x6
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED__SHIFT 0x8
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED__SHIFT 0xa
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED__SHIFT 0xc
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED__SHIFT 0xe
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED__SHIFT 0x10
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED__SHIFT 0x11
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED__SHIFT 0x12
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED__SHIFT 0x13
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED__SHIFT 0x14
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED_MASK 0x00000003L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED_MASK 0x00000004L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED_MASK 0x00000030L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED_MASK 0x00000040L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED_MASK 0x00000300L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED_MASK 0x00000400L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED_MASK 0x00003000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED_MASK 0x00004000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED_MASK 0x00010000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED_MASK 0x00020000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED_MASK 0x00040000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED_MASK 0x00080000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED_MASK 0x00100000L
+//RDPCSTX3_RDPCSTX_DPALT_CONTROL_REG
+#define RDPCSTX3_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE__SHIFT 0x8
+#define RDPCSTX3_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS_MASK 0x00000001L
+#define RDPCSTX3_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED_MASK 0x00000010L
+#define RDPCSTX3_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE_MASK 0x0000FF00L
+//RDPCSTX3_RDPCSTX_PHY_CNTL15
+#define RDPCSTX3_RDPCSTX_PHY_CNTL15__RDPCS_PHY_SSTX_VREGDRV_BYP__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL15__RDPCS_PHY_DP_TX0_VREGDRV_BYP__SHIFT 0x10
+#define RDPCSTX3_RDPCSTX_PHY_CNTL15__RDPCS_PHY_DP_TX1_VREGDRV_BYP__SHIFT 0x11
+#define RDPCSTX3_RDPCSTX_PHY_CNTL15__RDPCS_PHY_DP_TX2_VREGDRV_BYP__SHIFT 0x12
+#define RDPCSTX3_RDPCSTX_PHY_CNTL15__RDPCS_PHY_DP_TX3_VREGDRV_BYP__SHIFT 0x13
+#define RDPCSTX3_RDPCSTX_PHY_CNTL15__RDPCS_PHY_SUP_PRE_HP__SHIFT 0x14
+#define RDPCSTX3_RDPCSTX_PHY_CNTL15__RDPCS_PHY_SSTX_VREGDRV_BYP_MASK 0x00000001L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL15__RDPCS_PHY_DP_TX0_VREGDRV_BYP_MASK 0x00010000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL15__RDPCS_PHY_DP_TX1_VREGDRV_BYP_MASK 0x00020000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL15__RDPCS_PHY_DP_TX2_VREGDRV_BYP_MASK 0x00040000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL15__RDPCS_PHY_DP_TX3_VREGDRV_BYP_MASK 0x00080000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL15__RDPCS_PHY_SUP_PRE_HP_MASK 0x00100000L
+//RDPCSTX3_RDPCSTX_PHY_CNTL16
+#define RDPCSTX3_RDPCSTX_PHY_CNTL16__RDPCS_PHY_DP_TX0_OUT_GENERIC_BUS__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL16__RDPCS_PHY_DP_TX1_OUT_GENERIC_BUS__SHIFT 0x6
+#define RDPCSTX3_RDPCSTX_PHY_CNTL16__RDPCS_PHY_DP_TX2_OUT_GENERIC_BUS__SHIFT 0xc
+#define RDPCSTX3_RDPCSTX_PHY_CNTL16__RDPCS_PHY_DP_TX3_OUT_GENERIC_BUS__SHIFT 0x12
+#define RDPCSTX3_RDPCSTX_PHY_CNTL16__RDPCS_PHY_CMN_OUT_GENERIC_BUS__SHIFT 0x18
+#define RDPCSTX3_RDPCSTX_PHY_CNTL16__RDPCS_PHY_DP_TX0_OUT_GENERIC_BUS_MASK 0x0000001FL
+#define RDPCSTX3_RDPCSTX_PHY_CNTL16__RDPCS_PHY_DP_TX1_OUT_GENERIC_BUS_MASK 0x000007C0L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL16__RDPCS_PHY_DP_TX2_OUT_GENERIC_BUS_MASK 0x0001F000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL16__RDPCS_PHY_DP_TX3_OUT_GENERIC_BUS_MASK 0x007C0000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL16__RDPCS_PHY_CMN_OUT_GENERIC_BUS_MASK 0x1F000000L
+//RDPCSTX3_RDPCSTX_PHY_CNTL17
+#define RDPCSTX3_RDPCSTX_PHY_CNTL17__RDPCS_PHY_DP_TX0_IN_GENERIC_BUS__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL17__RDPCS_PHY_DP_TX1_IN_GENERIC_BUS__SHIFT 0x6
+#define RDPCSTX3_RDPCSTX_PHY_CNTL17__RDPCS_PHY_DP_TX2_IN_GENERIC_BUS__SHIFT 0xc
+#define RDPCSTX3_RDPCSTX_PHY_CNTL17__RDPCS_PHY_DP_TX3_IN_GENERIC_BUS__SHIFT 0x12
+#define RDPCSTX3_RDPCSTX_PHY_CNTL17__RDPCS_PHY_CMN_IN_GENERIC_BUS__SHIFT 0x18
+#define RDPCSTX3_RDPCSTX_PHY_CNTL17__RDPCS_PHY_DP_TX0_IN_GENERIC_BUS_MASK 0x0000001FL
+#define RDPCSTX3_RDPCSTX_PHY_CNTL17__RDPCS_PHY_DP_TX1_IN_GENERIC_BUS_MASK 0x000007C0L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL17__RDPCS_PHY_DP_TX2_IN_GENERIC_BUS_MASK 0x0001F000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL17__RDPCS_PHY_DP_TX3_IN_GENERIC_BUS_MASK 0x007C0000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL17__RDPCS_PHY_CMN_IN_GENERIC_BUS_MASK 0x1F000000L
+//RDPCSTX3_RDPCSTX_DEBUG_CONFIG2
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG2__RDPCS_DBG_OCLA_SRC0__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG2__RDPCS_DBG_OCLA_SRC1__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG2__RDPCS_DBG_OCLA_SRC2__SHIFT 0x8
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG2__RDPCS_DBG_OCLA_SRC3__SHIFT 0xc
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG2__RDPCS_DBG_OCLA_VALID_REPLACE_MSB__SHIFT 0x10
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG2__RDPCS_DBG_OCLA_SRC0_MASK 0x00000007L
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG2__RDPCS_DBG_OCLA_SRC1_MASK 0x00000070L
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG2__RDPCS_DBG_OCLA_SRC2_MASK 0x00000700L
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG2__RDPCS_DBG_OCLA_SRC3_MASK 0x00007000L
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG2__RDPCS_DBG_OCLA_VALID_REPLACE_MSB_MASK 0x00010000L
+
+
+// addressBlock: dpcssys_dpcssys_cr3_dispdec
+//DPCSSYS_CR3_DPCSSYS_CR_ADDR
+#define DPCSSYS_CR3_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0
+#define DPCSSYS_CR3_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL
+//DPCSSYS_CR3_DPCSSYS_CR_DATA
+#define DPCSSYS_CR3_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0
+#define DPCSSYS_CR3_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL
+
+
+// addressBlock: dpcssys_dpcs0_dpcstx4_dispdec
+//DPCSTX4_DPCSTX_TX_CLOCK_CNTL
+#define DPCSTX4_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS__SHIFT 0x0
+#define DPCSTX4_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN__SHIFT 0x1
+#define DPCSTX4_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON__SHIFT 0x2
+#define DPCSTX4_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0x3
+#define DPCSTX4_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS_MASK 0x00000001L
+#define DPCSTX4_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN_MASK 0x00000002L
+#define DPCSTX4_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON_MASK 0x00000004L
+#define DPCSTX4_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000008L
+//DPCSTX4_DPCSTX_TX_CNTL
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ__SHIFT 0xc
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING__SHIFT 0xd
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP__SHIFT 0xe
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT__SHIFT 0xf
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN__SHIFT 0x10
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START__SHIFT 0x11
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET__SHIFT 0x1f
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ_MASK 0x00001000L
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING_MASK 0x00002000L
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP_MASK 0x00004000L
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT_MASK 0x00008000L
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN_MASK 0x00010000L
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START_MASK 0x00020000L
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET_MASK 0x80000000L
+//DPCSTX4_DPCSTX_CBUS_CNTL
+#define DPCSTX4_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY__SHIFT 0x0
+#define DPCSTX4_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET__SHIFT 0x1f
+#define DPCSTX4_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY_MASK 0x000000FFL
+#define DPCSTX4_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET_MASK 0x80000000L
+//DPCSTX4_DPCSTX_INTERRUPT_CNTL
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW__SHIFT 0x0
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR__SHIFT 0x1
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK__SHIFT 0x4
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR__SHIFT 0x8
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR__SHIFT 0x9
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR__SHIFT 0xa
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR__SHIFT 0xb
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR__SHIFT 0xc
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK__SHIFT 0x10
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK__SHIFT 0x14
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR_MASK 0x00000002L
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK_MASK 0x00000010L
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR_MASK 0x00000100L
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR_MASK 0x00000200L
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR_MASK 0x00000400L
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR_MASK 0x00000800L
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR_MASK 0x00001000L
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK_MASK 0x00010000L
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK_MASK 0x00100000L
+//DPCSTX4_DPCSTX_PLL_UPDATE_ADDR
+#define DPCSTX4_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR__SHIFT 0x0
+#define DPCSTX4_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR_MASK 0x0003FFFFL
+//DPCSTX4_DPCSTX_PLL_UPDATE_DATA
+#define DPCSTX4_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA__SHIFT 0x0
+#define DPCSTX4_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA_MASK 0xFFFFFFFFL
+//DPCSTX4_DPCSTX_DEBUG_CONFIG
+#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN__SHIFT 0x0
+#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL__SHIFT 0x1
+#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL__SHIFT 0x4
+#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL__SHIFT 0x8
+#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS__SHIFT 0xe
+#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN__SHIFT 0x10
+#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_INDEX__SHIFT 0x18
+#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN_MASK 0x00000001L
+#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL_MASK 0x0000000EL
+#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL_MASK 0x00000070L
+#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL_MASK 0x00000700L
+#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS_MASK 0x00004000L
+#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN_MASK 0x00010000L
+#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_INDEX_MASK 0xFF000000L
+
+
+// addressBlock: dpcssys_dpcs0_rdpcstx4_dispdec
+//RDPCSTX4_RDPCSTX_CNTL
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN__SHIFT 0xc
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN__SHIFT 0xd
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN__SHIFT 0xe
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN__SHIFT 0xf
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN__SHIFT 0x10
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_START__SHIFT 0x11
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS__SHIFT 0x1a
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN__SHIFT 0x1c
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN__SHIFT 0x1d
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET__SHIFT 0x1f
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET_MASK 0x00000001L
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET_MASK 0x00000010L
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN_MASK 0x00001000L
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN_MASK 0x00002000L
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN_MASK 0x00004000L
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN_MASK 0x00008000L
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN_MASK 0x00010000L
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_START_MASK 0x00020000L
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY_MASK 0x01F00000L
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS_MASK 0x04000000L
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN_MASK 0x10000000L
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN_MASK 0x20000000L
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET_MASK 0x80000000L
+//RDPCSTX4_RDPCSTX_CLOCK_CNTL
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN__SHIFT 0x5
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN__SHIFT 0x6
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN__SHIFT 0x7
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS__SHIFT 0x8
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN__SHIFT 0x9
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0xa
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS__SHIFT 0xc
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN__SHIFT 0xd
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON__SHIFT 0xe
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS__SHIFT 0x10
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_OCLACLK_GATE_DIS__SHIFT 0x14
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_OCLACLK_EN__SHIFT 0x15
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_OCLACLK_CLOCK_ON__SHIFT 0x16
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN_MASK 0x00000001L
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN_MASK 0x00000010L
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN_MASK 0x00000020L
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN_MASK 0x00000040L
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN_MASK 0x00000080L
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS_MASK 0x00000100L
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN_MASK 0x00000200L
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000400L
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS_MASK 0x00001000L
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN_MASK 0x00002000L
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON_MASK 0x00004000L
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS_MASK 0x00010000L
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_OCLACLK_GATE_DIS_MASK 0x00100000L
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_OCLACLK_EN_MASK 0x00200000L
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_OCLACLK_CLOCK_ON_MASK 0x00400000L
+//RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE__SHIFT 0x1
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE__SHIFT 0x2
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR__SHIFT 0x5
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR__SHIFT 0x6
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR__SHIFT 0x7
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR__SHIFT 0x8
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR__SHIFT 0x9
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR__SHIFT 0xa
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR__SHIFT 0xc
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK__SHIFT 0x10
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK__SHIFT 0x11
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK__SHIFT 0x12
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK__SHIFT 0x14
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK 0x00000002L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK 0x00000004L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR_MASK 0x00000010L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR_MASK 0x00000020L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR_MASK 0x00000040L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR_MASK 0x00000080L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR_MASK 0x00000100L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR_MASK 0x00000200L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR_MASK 0x00000400L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR_MASK 0x00001000L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK_MASK 0x00010000L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK_MASK 0x00020000L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK_MASK 0x00040000L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK_MASK 0x00100000L
+//RDPCSTX4_RDPCSTX_PLL_UPDATE_DATA
+#define RDPCSTX4_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA_MASK 0x00000001L
+//RDPCSTX4_RDPCS_TX_CR_ADDR
+#define RDPCSTX4_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0
+#define RDPCSTX4_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL
+//RDPCSTX4_RDPCS_TX_CR_DATA
+#define RDPCSTX4_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0
+#define RDPCSTX4_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL
+//RDPCSTX4_RDPCS_TX_SRAM_CNTL
+#define RDPCSTX4_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS__SHIFT 0x14
+#define RDPCSTX4_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE__SHIFT 0x18
+#define RDPCSTX4_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE__SHIFT 0x1c
+#define RDPCSTX4_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS_MASK 0x00100000L
+#define RDPCSTX4_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE_MASK 0x03000000L
+#define RDPCSTX4_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE_MASK 0x30000000L
+//RDPCSTX4_RDPCSTX_SCRATCH
+#define RDPCSTX4_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH_MASK 0xFFFFFFFFL
+//RDPCSTX4_RDPCSTX_SPARE
+#define RDPCSTX4_RDPCSTX_SPARE__RDPCSTX_SPARE__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_SPARE__RDPCSTX_SPARE_MASK 0xFFFFFFFFL
+//RDPCSTX4_RDPCSTX_CNTL2
+#define RDPCSTX4_RDPCSTX_CNTL2__RDPCS_CR_CONVERT_FIFO_EMPTY__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_CNTL2__RDPCS_CR_CONVERT_FIFO_FULL__SHIFT 0x1
+#define RDPCSTX4_RDPCSTX_CNTL2__RDPCS_CR_CONVERT_FIFO_EMPTY_MASK 0x00000001L
+#define RDPCSTX4_RDPCSTX_CNTL2__RDPCS_CR_CONVERT_FIFO_FULL_MASK 0x00000002L
+//RDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE__SHIFT 0x8
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG_MASK 0x00000001L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS_MASK 0x00000010L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE_MASK 0x0000FF00L
+//RDPCSTX4_RDPCSTX_DEBUG_CONFIG
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP__SHIFT 0x7
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK__SHIFT 0x8
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE__SHIFT 0xf
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX__SHIFT 0x10
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT__SHIFT 0x18
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN_MASK 0x00000001L
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT_MASK 0x00000070L
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP_MASK 0x00000080L
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK_MASK 0x00001F00L
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE_MASK 0x00008000L
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX_MASK 0x00FF0000L
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MASK 0xFF000000L
+//RDPCSTX4_RDPCSTX_PHY_CNTL0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET__SHIFT 0x1
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N__SHIFT 0x2
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN__SHIFT 0x3
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE__SHIFT 0x8
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE__SHIFT 0x9
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ__SHIFT 0x11
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK__SHIFT 0x12
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL__SHIFT 0x14
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL__SHIFT 0x15
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN__SHIFT 0x18
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT__SHIFT 0x19
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE__SHIFT 0x1c
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE__SHIFT 0x1d
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS__SHIFT 0x1f
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET_MASK 0x00000001L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET_MASK 0x00000002L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N_MASK 0x00000004L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN_MASK 0x00000008L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT_MASK 0x00000030L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE_MASK 0x00000100L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE_MASK 0x00003E00L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ_MASK 0x00020000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK_MASK 0x00040000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL_MASK 0x00100000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL_MASK 0x00200000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN_MASK 0x01000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT_MASK 0x02000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE_MASK 0x10000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE_MASK 0x20000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS_MASK 0x80000000L
+//RDPCSTX4_RDPCSTX_PHY_CNTL1
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN__SHIFT 0x1
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE__SHIFT 0x2
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN__SHIFT 0x3
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET__SHIFT 0x5
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN__SHIFT 0x6
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE__SHIFT 0x7
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN_MASK 0x00000001L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN_MASK 0x00000002L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE_MASK 0x00000004L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN_MASK 0x00000008L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE_MASK 0x00000010L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET_MASK 0x00000020L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN_MASK 0x00000040L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE_MASK 0x00000080L
+//RDPCSTX4_RDPCSTX_PHY_CNTL2
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR__SHIFT 0x3
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN__SHIFT 0x5
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN__SHIFT 0x6
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN__SHIFT 0x7
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN__SHIFT 0x8
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN__SHIFT 0x9
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN__SHIFT 0xa
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN__SHIFT 0xb
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR_MASK 0x00000008L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN_MASK 0x00000010L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN_MASK 0x00000020L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN_MASK 0x00000040L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN_MASK 0x00000080L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN_MASK 0x00000100L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN_MASK 0x00000200L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN_MASK 0x00000400L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN_MASK 0x00000800L
+//RDPCSTX4_RDPCSTX_PHY_CNTL3
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE__SHIFT 0x1
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY__SHIFT 0x2
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN__SHIFT 0x3
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK__SHIFT 0x5
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET__SHIFT 0x8
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE__SHIFT 0x9
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY__SHIFT 0xa
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN__SHIFT 0xb
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ__SHIFT 0xc
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK__SHIFT 0xd
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET__SHIFT 0x10
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE__SHIFT 0x11
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY__SHIFT 0x12
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN__SHIFT 0x13
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ__SHIFT 0x14
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK__SHIFT 0x15
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET__SHIFT 0x18
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE__SHIFT 0x19
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY__SHIFT 0x1a
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN__SHIFT 0x1b
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ__SHIFT 0x1c
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK__SHIFT 0x1d
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_MASK 0x00000001L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_MASK 0x00000002L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_MASK 0x00000004L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_MASK 0x00000008L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_MASK 0x00000010L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_MASK 0x00000020L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_MASK 0x00000100L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_MASK 0x00000200L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_MASK 0x00000400L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_MASK 0x00000800L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_MASK 0x00001000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_MASK 0x00002000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_MASK 0x00010000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_MASK 0x00020000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_MASK 0x00040000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_MASK 0x00080000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_MASK 0x00100000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_MASK 0x00200000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_MASK 0x01000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_MASK 0x02000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_MASK 0x04000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_MASK 0x08000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_MASK 0x10000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_MASK 0x20000000L
+//RDPCSTX4_RDPCSTX_PHY_CNTL4
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC__SHIFT 0x6
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN__SHIFT 0x7
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL__SHIFT 0x8
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT__SHIFT 0xc
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC__SHIFT 0xe
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN__SHIFT 0xf
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL__SHIFT 0x10
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT__SHIFT 0x14
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC__SHIFT 0x16
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN__SHIFT 0x17
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL__SHIFT 0x18
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT__SHIFT 0x1c
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC__SHIFT 0x1e
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN__SHIFT 0x1f
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL_MASK 0x00000007L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT_MASK 0x00000010L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC_MASK 0x00000040L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN_MASK 0x00000080L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL_MASK 0x00000700L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT_MASK 0x00001000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC_MASK 0x00004000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN_MASK 0x00008000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL_MASK 0x00070000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT_MASK 0x00100000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC_MASK 0x00400000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN_MASK 0x00800000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL_MASK 0x07000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT_MASK 0x10000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC_MASK 0x40000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN_MASK 0x80000000L
+//RDPCSTX4_RDPCSTX_PHY_CNTL5
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE__SHIFT 0x1
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ__SHIFT 0x6
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT__SHIFT 0x7
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD__SHIFT 0x8
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE__SHIFT 0x9
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH__SHIFT 0xc
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ__SHIFT 0xe
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT__SHIFT 0xf
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD__SHIFT 0x10
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE__SHIFT 0x11
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH__SHIFT 0x14
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ__SHIFT 0x16
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT__SHIFT 0x17
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD__SHIFT 0x18
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE__SHIFT 0x19
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH__SHIFT 0x1c
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ__SHIFT 0x1e
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT__SHIFT 0x1f
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD_MASK 0x00000001L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE_MASK 0x0000000EL
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH_MASK 0x00000030L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ_MASK 0x00000040L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT_MASK 0x00000080L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD_MASK 0x00000100L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE_MASK 0x00000E00L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH_MASK 0x00003000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ_MASK 0x00004000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT_MASK 0x00008000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD_MASK 0x00010000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE_MASK 0x000E0000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH_MASK 0x00300000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ_MASK 0x00400000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT_MASK 0x00800000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD_MASK 0x01000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE_MASK 0x0E000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH_MASK 0x30000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ_MASK 0x40000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT_MASK 0x80000000L
+//RDPCSTX4_RDPCSTX_PHY_CNTL6
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN__SHIFT 0x2
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN__SHIFT 0x6
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE__SHIFT 0x8
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN__SHIFT 0xa
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE__SHIFT 0xc
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN__SHIFT 0xe
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4__SHIFT 0x10
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE__SHIFT 0x11
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK__SHIFT 0x12
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN__SHIFT 0x13
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ__SHIFT 0x14
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_MASK 0x00000003L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_MASK 0x00000004L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_MASK 0x00000030L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_MASK 0x00000040L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_MASK 0x00000300L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_MASK 0x00000400L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_MASK 0x00003000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_MASK 0x00004000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_MASK 0x00010000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_MASK 0x00020000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_MASK 0x00040000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_MASK 0x00080000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_MASK 0x00100000L
+//RDPCSTX4_RDPCSTX_PHY_CNTL7
+#define RDPCSTX4_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT__SHIFT 0x10
+#define RDPCSTX4_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN_MASK 0x0000FFFFL
+#define RDPCSTX4_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT_MASK 0xFFFF0000L
+//RDPCSTX4_RDPCSTX_PHY_CNTL8
+#define RDPCSTX4_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK_MASK 0x000FFFFFL
+//RDPCSTX4_RDPCSTX_PHY_CNTL9
+#define RDPCSTX4_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD__SHIFT 0x18
+#define RDPCSTX4_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE_MASK 0x001FFFFFL
+#define RDPCSTX4_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD_MASK 0x01000000L
+//RDPCSTX4_RDPCSTX_PHY_CNTL10
+#define RDPCSTX4_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM_MASK 0x0000FFFFL
+//RDPCSTX4_RDPCSTX_PHY_CNTL11
+#define RDPCSTX4_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV__SHIFT 0x10
+#define RDPCSTX4_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV__SHIFT 0x14
+#define RDPCSTX4_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV__SHIFT 0x18
+#define RDPCSTX4_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER_MASK 0x0000FFF0L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV_MASK 0x00070000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV_MASK 0x00700000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV_MASK 0x03000000L
+//RDPCSTX4_RDPCSTX_PHY_CNTL12
+#define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN__SHIFT 0x2
+#define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE__SHIFT 0x7
+#define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN__SHIFT 0x8
+#define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN_MASK 0x00000001L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN_MASK 0x00000004L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV_MASK 0x00000070L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE_MASK 0x00000080L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN_MASK 0x00000100L
+//RDPCSTX4_RDPCSTX_PHY_CNTL13
+#define RDPCSTX4_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER__SHIFT 0x14
+#define RDPCSTX4_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN__SHIFT 0x1c
+#define RDPCSTX4_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN__SHIFT 0x1d
+#define RDPCSTX4_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE__SHIFT 0x1e
+#define RDPCSTX4_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER_MASK 0x0FF00000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN_MASK 0x10000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN_MASK 0x20000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE_MASK 0x40000000L
+//RDPCSTX4_RDPCSTX_PHY_CNTL14
+#define RDPCSTX4_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN__SHIFT 0x18
+#define RDPCSTX4_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN__SHIFT 0x1c
+#define RDPCSTX4_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE_MASK 0x00000001L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN_MASK 0x01000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN_MASK 0x10000000L
+//RDPCSTX4_RDPCSTX_PHY_FUSE0
+#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE__SHIFT 0x6
+#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST__SHIFT 0xc
+#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I__SHIFT 0x12
+#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO__SHIFT 0x14
+#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_CP_INT_GS__SHIFT 0x16
+#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_RX_VREF_CTRL__SHIFT 0x1d
+#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I_MASK 0x000C0000L
+#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO_MASK 0x00300000L
+#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_CP_INT_GS_MASK 0x1FC00000L
+#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_RX_VREF_CTRL_MASK 0xE0000000L
+//RDPCSTX4_RDPCSTX_PHY_FUSE1
+#define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE__SHIFT 0x6
+#define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST__SHIFT 0xc
+#define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT__SHIFT 0x12
+#define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP__SHIFT 0x19
+#define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT_MASK 0x01FC0000L
+#define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP_MASK 0xFE000000L
+//RDPCSTX4_RDPCSTX_PHY_FUSE2
+#define RDPCSTX4_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE__SHIFT 0x6
+#define RDPCSTX4_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST__SHIFT 0xc
+#define RDPCSTX4_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_MPLLB_CP_PROP_GS__SHIFT 0x17
+#define RDPCSTX4_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX4_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX4_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX4_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_MPLLB_CP_PROP_GS_MASK 0x3F800000L
+//RDPCSTX4_RDPCSTX_PHY_FUSE3
+#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE__SHIFT 0x6
+#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST__SHIFT 0xc
+#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE__SHIFT 0x12
+#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE__SHIFT 0x18
+#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_TX_VBOOST_LVL__SHIFT 0x1a
+#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_SUP_RX_VCO_VREF_SEL__SHIFT 0x1d
+#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE_MASK 0x00FC0000L
+#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE_MASK 0x03000000L
+#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_TX_VBOOST_LVL_MASK 0x1C000000L
+#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_SUP_RX_VCO_VREF_SEL_MASK 0xE0000000L
+//RDPCSTX4_RDPCSTX_PHY_RX_LD_VAL
+#define RDPCSTX4_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL__SHIFT 0x8
+#define RDPCSTX4_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL_MASK 0x0000007FL
+#define RDPCSTX4_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL_MASK 0x001FFF00L
+//RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED__SHIFT 0x1
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED__SHIFT 0x2
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED__SHIFT 0x3
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED__SHIFT 0x5
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED__SHIFT 0x8
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED__SHIFT 0x9
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED__SHIFT 0xa
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED__SHIFT 0xb
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED__SHIFT 0xc
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED__SHIFT 0xd
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED__SHIFT 0x10
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED__SHIFT 0x11
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED__SHIFT 0x12
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED__SHIFT 0x13
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED__SHIFT 0x14
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED__SHIFT 0x15
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED__SHIFT 0x18
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED__SHIFT 0x19
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED__SHIFT 0x1a
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED__SHIFT 0x1b
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED__SHIFT 0x1c
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED__SHIFT 0x1d
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED_MASK 0x00000001L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED_MASK 0x00000002L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED_MASK 0x00000004L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED_MASK 0x00000008L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED_MASK 0x00000010L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED_MASK 0x00000020L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED_MASK 0x00000100L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED_MASK 0x00000200L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED_MASK 0x00000400L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED_MASK 0x00000800L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED_MASK 0x00001000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED_MASK 0x00002000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED_MASK 0x00010000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED_MASK 0x00020000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED_MASK 0x00040000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED_MASK 0x00080000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED_MASK 0x00100000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED_MASK 0x00200000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED_MASK 0x01000000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED_MASK 0x02000000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED_MASK 0x04000000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED_MASK 0x08000000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED_MASK 0x10000000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED_MASK 0x20000000L
+//RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED__SHIFT 0x2
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED__SHIFT 0x6
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED__SHIFT 0x8
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED__SHIFT 0xa
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED__SHIFT 0xc
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED__SHIFT 0xe
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED__SHIFT 0x10
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED__SHIFT 0x11
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED__SHIFT 0x12
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED__SHIFT 0x13
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED__SHIFT 0x14
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED_MASK 0x00000003L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED_MASK 0x00000004L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED_MASK 0x00000030L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED_MASK 0x00000040L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED_MASK 0x00000300L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED_MASK 0x00000400L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED_MASK 0x00003000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED_MASK 0x00004000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED_MASK 0x00010000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED_MASK 0x00020000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED_MASK 0x00040000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED_MASK 0x00080000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED_MASK 0x00100000L
+//RDPCSTX4_RDPCSTX_DPALT_CONTROL_REG
+#define RDPCSTX4_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE__SHIFT 0x8
+#define RDPCSTX4_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS_MASK 0x00000001L
+#define RDPCSTX4_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED_MASK 0x00000010L
+#define RDPCSTX4_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE_MASK 0x0000FF00L
+//RDPCSTX4_RDPCSTX_PHY_CNTL15
+#define RDPCSTX4_RDPCSTX_PHY_CNTL15__RDPCS_PHY_SSTX_VREGDRV_BYP__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL15__RDPCS_PHY_DP_TX0_VREGDRV_BYP__SHIFT 0x10
+#define RDPCSTX4_RDPCSTX_PHY_CNTL15__RDPCS_PHY_DP_TX1_VREGDRV_BYP__SHIFT 0x11
+#define RDPCSTX4_RDPCSTX_PHY_CNTL15__RDPCS_PHY_DP_TX2_VREGDRV_BYP__SHIFT 0x12
+#define RDPCSTX4_RDPCSTX_PHY_CNTL15__RDPCS_PHY_DP_TX3_VREGDRV_BYP__SHIFT 0x13
+#define RDPCSTX4_RDPCSTX_PHY_CNTL15__RDPCS_PHY_SUP_PRE_HP__SHIFT 0x14
+#define RDPCSTX4_RDPCSTX_PHY_CNTL15__RDPCS_PHY_SSTX_VREGDRV_BYP_MASK 0x00000001L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL15__RDPCS_PHY_DP_TX0_VREGDRV_BYP_MASK 0x00010000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL15__RDPCS_PHY_DP_TX1_VREGDRV_BYP_MASK 0x00020000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL15__RDPCS_PHY_DP_TX2_VREGDRV_BYP_MASK 0x00040000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL15__RDPCS_PHY_DP_TX3_VREGDRV_BYP_MASK 0x00080000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL15__RDPCS_PHY_SUP_PRE_HP_MASK 0x00100000L
+//RDPCSTX4_RDPCSTX_PHY_CNTL16
+#define RDPCSTX4_RDPCSTX_PHY_CNTL16__RDPCS_PHY_DP_TX0_OUT_GENERIC_BUS__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL16__RDPCS_PHY_DP_TX1_OUT_GENERIC_BUS__SHIFT 0x6
+#define RDPCSTX4_RDPCSTX_PHY_CNTL16__RDPCS_PHY_DP_TX2_OUT_GENERIC_BUS__SHIFT 0xc
+#define RDPCSTX4_RDPCSTX_PHY_CNTL16__RDPCS_PHY_DP_TX3_OUT_GENERIC_BUS__SHIFT 0x12
+#define RDPCSTX4_RDPCSTX_PHY_CNTL16__RDPCS_PHY_CMN_OUT_GENERIC_BUS__SHIFT 0x18
+#define RDPCSTX4_RDPCSTX_PHY_CNTL16__RDPCS_PHY_DP_TX0_OUT_GENERIC_BUS_MASK 0x0000001FL
+#define RDPCSTX4_RDPCSTX_PHY_CNTL16__RDPCS_PHY_DP_TX1_OUT_GENERIC_BUS_MASK 0x000007C0L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL16__RDPCS_PHY_DP_TX2_OUT_GENERIC_BUS_MASK 0x0001F000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL16__RDPCS_PHY_DP_TX3_OUT_GENERIC_BUS_MASK 0x007C0000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL16__RDPCS_PHY_CMN_OUT_GENERIC_BUS_MASK 0x1F000000L
+//RDPCSTX4_RDPCSTX_PHY_CNTL17
+#define RDPCSTX4_RDPCSTX_PHY_CNTL17__RDPCS_PHY_DP_TX0_IN_GENERIC_BUS__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL17__RDPCS_PHY_DP_TX1_IN_GENERIC_BUS__SHIFT 0x6
+#define RDPCSTX4_RDPCSTX_PHY_CNTL17__RDPCS_PHY_DP_TX2_IN_GENERIC_BUS__SHIFT 0xc
+#define RDPCSTX4_RDPCSTX_PHY_CNTL17__RDPCS_PHY_DP_TX3_IN_GENERIC_BUS__SHIFT 0x12
+#define RDPCSTX4_RDPCSTX_PHY_CNTL17__RDPCS_PHY_CMN_IN_GENERIC_BUS__SHIFT 0x18
+#define RDPCSTX4_RDPCSTX_PHY_CNTL17__RDPCS_PHY_DP_TX0_IN_GENERIC_BUS_MASK 0x0000001FL
+#define RDPCSTX4_RDPCSTX_PHY_CNTL17__RDPCS_PHY_DP_TX1_IN_GENERIC_BUS_MASK 0x000007C0L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL17__RDPCS_PHY_DP_TX2_IN_GENERIC_BUS_MASK 0x0001F000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL17__RDPCS_PHY_DP_TX3_IN_GENERIC_BUS_MASK 0x007C0000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL17__RDPCS_PHY_CMN_IN_GENERIC_BUS_MASK 0x1F000000L
+//RDPCSTX4_RDPCSTX_DEBUG_CONFIG2
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG2__RDPCS_DBG_OCLA_SRC0__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG2__RDPCS_DBG_OCLA_SRC1__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG2__RDPCS_DBG_OCLA_SRC2__SHIFT 0x8
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG2__RDPCS_DBG_OCLA_SRC3__SHIFT 0xc
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG2__RDPCS_DBG_OCLA_VALID_REPLACE_MSB__SHIFT 0x10
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG2__RDPCS_DBG_OCLA_SRC0_MASK 0x00000007L
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG2__RDPCS_DBG_OCLA_SRC1_MASK 0x00000070L
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG2__RDPCS_DBG_OCLA_SRC2_MASK 0x00000700L
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG2__RDPCS_DBG_OCLA_SRC3_MASK 0x00007000L
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG2__RDPCS_DBG_OCLA_VALID_REPLACE_MSB_MASK 0x00010000L
+
+
+// addressBlock: dpcssys_dpcssys_cr4_dispdec
+//DPCSSYS_CR4_DPCSSYS_CR_ADDR
+#define DPCSSYS_CR4_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0
+#define DPCSSYS_CR4_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL
+//DPCSSYS_CR4_DPCSSYS_CR_DATA
+#define DPCSSYS_CR4_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0
+#define DPCSSYS_CR4_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_offset.h
index 1dbc7cefbc05..075867d4b1da 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_offset.h
@@ -10107,6 +10107,8 @@
#define mmCGTT_IA_CLK_CTRL_BASE_IDX 1
#define mmCGTT_WD_CLK_CTRL 0x5086
#define mmCGTT_WD_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_GS_NGG_CLK_CTRL 0x5087
+#define mmCGTT_GS_NGG_CLK_CTRL_BASE_IDX 1
#define mmCGTT_PA_CLK_CTRL 0x5088
#define mmCGTT_PA_CLK_CTRL_BASE_IDX 1
#define mmCGTT_SC_CLK_CTRL0 0x5089
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_sh_mask.h
index 6c2a421fe8b7..e7db6f9f9c86 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_sh_mask.h
@@ -37872,6 +37872,45 @@
#define CGTT_WD_CLK_CTRL__CORE_OVERRIDE_MASK 0x20000000L
#define CGTT_WD_CLK_CTRL__RBIU_INPUT_OVERRIDE_MASK 0x40000000L
#define CGTT_WD_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//CGTT_GS_NGG_CLK_CTRL
+#define CGTT_GS_NGG_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_GS_NGG_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_GS_NGG_CLK_CTRL__PERF_ENABLE__SHIFT 0xf
+#define CGTT_GS_NGG_CLK_CTRL__DBG_ENABLE__SHIFT 0x10
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_GS_NGG_CLK_CTRL__PRIMGEN_OVERRIDE__SHIFT 0x1b
+#define CGTT_GS_NGG_CLK_CTRL__GS1_OVERRIDE__SHIFT 0x1c
+#define CGTT_GS_NGG_CLK_CTRL__GS0_OVERRIDE__SHIFT 0x1d
+#define CGTT_GS_NGG_CLK_CTRL__RBIU_INPUT_OVERRIDE__SHIFT 0x1e
+#define CGTT_GS_NGG_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_GS_NGG_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_GS_NGG_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_GS_NGG_CLK_CTRL__PERF_ENABLE_MASK 0x00008000L
+#define CGTT_GS_NGG_CLK_CTRL__DBG_ENABLE_MASK 0x00010000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_GS_NGG_CLK_CTRL__PRIMGEN_OVERRIDE_MASK 0x08000000L
+#define CGTT_GS_NGG_CLK_CTRL__GS1_OVERRIDE_MASK 0x10000000L
+#define CGTT_GS_NGG_CLK_CTRL__GS0_OVERRIDE_MASK 0x20000000L
+#define CGTT_GS_NGG_CLK_CTRL__RBIU_INPUT_OVERRIDE_MASK 0x40000000L
+#define CGTT_GS_NGG_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
//CGTT_PA_CLK_CTRL
#define CGTT_PA_CLK_CTRL__ON_DELAY__SHIFT 0x0
#define CGTT_PA_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h
index f1d048e0ed2c..d984c916df80 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h
@@ -1146,7 +1146,14 @@
#define mmATC_L2_MEM_POWER_LS_BASE_IDX 0
#define mmATC_L2_CGTT_CLK_CTRL 0x080c
#define mmATC_L2_CGTT_CLK_CTRL_BASE_IDX 0
-
+#define mmATC_L2_CACHE_4K_EDC_INDEX 0x080e
+#define mmATC_L2_CACHE_4K_EDC_INDEX_BASE_IDX 0
+#define mmATC_L2_CACHE_2M_EDC_INDEX 0x080f
+#define mmATC_L2_CACHE_2M_EDC_INDEX_BASE_IDX 0
+#define mmATC_L2_CACHE_4K_EDC_CNT 0x0810
+#define mmATC_L2_CACHE_4K_EDC_CNT_BASE_IDX 0
+#define mmATC_L2_CACHE_2M_EDC_CNT 0x0811
+#define mmATC_L2_CACHE_2M_EDC_CNT_BASE_IDX 0
// addressBlock: gc_utcl2_vml2pfdec
// base address: 0xa100
@@ -1206,7 +1213,14 @@
#define mmVM_L2_CACHE_PARITY_CNTL_BASE_IDX 0
#define mmVM_L2_CGTT_CLK_CTRL 0x085e
#define mmVM_L2_CGTT_CLK_CTRL_BASE_IDX 0
-
+#define mmVM_L2_MEM_ECC_INDEX 0x0860
+#define mmVM_L2_MEM_ECC_INDEX_BASE_IDX 0
+#define mmVM_L2_WALKER_MEM_ECC_INDEX 0x0861
+#define mmVM_L2_WALKER_MEM_ECC_INDEX_BASE_IDX 0
+#define mmVM_L2_MEM_ECC_CNT 0x0862
+#define mmVM_L2_MEM_ECC_CNT_BASE_IDX 0
+#define mmVM_L2_WALKER_MEM_ECC_CNT 0x0863
+#define mmVM_L2_WALKER_MEM_ECC_CNT_BASE_IDX 0
// addressBlock: gc_utcl2_vml2vcdec
// base address: 0xa200
@@ -1700,6 +1714,8 @@
#define mmTCP_BUFFER_ADDR_HASH_CNTL_BASE_IDX 0
#define mmTCP_EDC_CNT 0x0b17
#define mmTCP_EDC_CNT_BASE_IDX 0
+#define mmTCP_EDC_CNT_NEW 0x0b18
+#define mmTCP_EDC_CNT_NEW_BASE_IDX 0
#define mmTC_CFG_L1_LOAD_POLICY0 0x0b1a
#define mmTC_CFG_L1_LOAD_POLICY0_BASE_IDX 0
#define mmTC_CFG_L1_LOAD_POLICY1 0x0b1b
@@ -2208,6 +2224,14 @@
#define mmCOMPUTE_STATIC_THREAD_MGMT_SE2_BASE_IDX 0
#define mmCOMPUTE_STATIC_THREAD_MGMT_SE3 0x0e1a
#define mmCOMPUTE_STATIC_THREAD_MGMT_SE3_BASE_IDX 0
+#define mmCOMPUTE_STATIC_THREAD_MGMT_SE4 0x0e25
+#define mmCOMPUTE_STATIC_THREAD_MGMT_SE4_BASE_IDX 0
+#define mmCOMPUTE_STATIC_THREAD_MGMT_SE5 0x0e26
+#define mmCOMPUTE_STATIC_THREAD_MGMT_SE5_BASE_IDX 0
+#define mmCOMPUTE_STATIC_THREAD_MGMT_SE6 0x0e27
+#define mmCOMPUTE_STATIC_THREAD_MGMT_SE6_BASE_IDX 0
+#define mmCOMPUTE_STATIC_THREAD_MGMT_SE7 0x0e28
+#define mmCOMPUTE_STATIC_THREAD_MGMT_SE7_BASE_IDX 0
#define mmCOMPUTE_RESTART_X 0x0e1b
#define mmCOMPUTE_RESTART_X_BASE_IDX 0
#define mmCOMPUTE_RESTART_Y 0x0e1c
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h
index 2e1214be67a2..ea316d8dcb37 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h
@@ -21,6 +21,105 @@
#ifndef _gc_9_0_SH_MASK_HEADER
#define _gc_9_0_SH_MASK_HEADER
+//GCEA_EDC_CNT
+#define GCEA_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT__SHIFT 0x0
+#define GCEA_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT__SHIFT 0x2
+#define GCEA_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT__SHIFT 0x4
+#define GCEA_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT__SHIFT 0x6
+#define GCEA_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT__SHIFT 0x8
+#define GCEA_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT__SHIFT 0xa
+#define GCEA_EDC_CNT__RRET_TAGMEM_SEC_COUNT__SHIFT 0xc
+#define GCEA_EDC_CNT__RRET_TAGMEM_DED_COUNT__SHIFT 0xe
+#define GCEA_EDC_CNT__WRET_TAGMEM_SEC_COUNT__SHIFT 0x10
+#define GCEA_EDC_CNT__WRET_TAGMEM_DED_COUNT__SHIFT 0x12
+#define GCEA_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT__SHIFT 0x14
+#define GCEA_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT__SHIFT 0x16
+#define GCEA_EDC_CNT__IORD_CMDMEM_SED_COUNT__SHIFT 0x18
+#define GCEA_EDC_CNT__IOWR_CMDMEM_SED_COUNT__SHIFT 0x1a
+#define GCEA_EDC_CNT__IOWR_DATAMEM_SED_COUNT__SHIFT 0x1c
+#define GCEA_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT_MASK 0x00000003L
+#define GCEA_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT_MASK 0x0000000CL
+#define GCEA_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT_MASK 0x00000030L
+#define GCEA_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
+#define GCEA_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT_MASK 0x00000300L
+#define GCEA_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT_MASK 0x00000C00L
+#define GCEA_EDC_CNT__RRET_TAGMEM_SEC_COUNT_MASK 0x00003000L
+#define GCEA_EDC_CNT__RRET_TAGMEM_DED_COUNT_MASK 0x0000C000L
+#define GCEA_EDC_CNT__WRET_TAGMEM_SEC_COUNT_MASK 0x00030000L
+#define GCEA_EDC_CNT__WRET_TAGMEM_DED_COUNT_MASK 0x000C0000L
+#define GCEA_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT_MASK 0x00300000L
+#define GCEA_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT_MASK 0x00C00000L
+#define GCEA_EDC_CNT__IORD_CMDMEM_SED_COUNT_MASK 0x03000000L
+#define GCEA_EDC_CNT__IOWR_CMDMEM_SED_COUNT_MASK 0x0C000000L
+#define GCEA_EDC_CNT__IOWR_DATAMEM_SED_COUNT_MASK 0x30000000L
+
+#define GCEA_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT__SHIFT 0x0
+#define GCEA_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT__SHIFT 0x2
+#define GCEA_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT__SHIFT 0x4
+#define GCEA_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT__SHIFT 0x6
+#define GCEA_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT__SHIFT 0x8
+#define GCEA_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa
+#define GCEA_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc
+#define GCEA_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe
+#define GCEA_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10
+#define GCEA_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12
+#define GCEA_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14
+#define GCEA_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16
+#define GCEA_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L
+#define GCEA_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL
+#define GCEA_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L
+#define GCEA_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
+#define GCEA_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT_MASK 0x00000300L
+#define GCEA_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L
+#define GCEA_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L
+#define GCEA_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L
+#define GCEA_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L
+#define GCEA_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L
+#define GCEA_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L
+#define GCEA_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L
+
+// addressBlock: gc_cppdec2
+//CPF_EDC_TAG_CNT
+#define CPF_EDC_TAG_CNT__DED_COUNT__SHIFT 0x0
+#define CPF_EDC_TAG_CNT__SEC_COUNT__SHIFT 0x2
+#define CPF_EDC_TAG_CNT__DED_COUNT_MASK 0x00000003L
+#define CPF_EDC_TAG_CNT__SEC_COUNT_MASK 0x0000000CL
+//CPF_EDC_ROQ_CNT
+#define CPF_EDC_ROQ_CNT__COUNT_ME1__SHIFT 0x0
+#define CPF_EDC_ROQ_CNT__COUNT_ME2__SHIFT 0x2
+#define CPF_EDC_ROQ_CNT__COUNT_ME1_MASK 0x00000003L
+#define CPF_EDC_ROQ_CNT__COUNT_ME2_MASK 0x0000000CL
+//CPG_EDC_TAG_CNT
+#define CPG_EDC_TAG_CNT__DED_COUNT__SHIFT 0x0
+#define CPG_EDC_TAG_CNT__SEC_COUNT__SHIFT 0x2
+#define CPG_EDC_TAG_CNT__DED_COUNT_MASK 0x00000003L
+#define CPG_EDC_TAG_CNT__SEC_COUNT_MASK 0x0000000CL
+//CPG_EDC_DMA_CNT
+#define CPG_EDC_DMA_CNT__ROQ_COUNT__SHIFT 0x0
+#define CPG_EDC_DMA_CNT__TAG_DED_COUNT__SHIFT 0x2
+#define CPG_EDC_DMA_CNT__TAG_SEC_COUNT__SHIFT 0x4
+#define CPG_EDC_DMA_CNT__ROQ_COUNT_MASK 0x00000003L
+#define CPG_EDC_DMA_CNT__TAG_DED_COUNT_MASK 0x0000000CL
+#define CPG_EDC_DMA_CNT__TAG_SEC_COUNT_MASK 0x00000030L
+//CPC_EDC_SCRATCH_CNT
+#define CPC_EDC_SCRATCH_CNT__DED_COUNT__SHIFT 0x0
+#define CPC_EDC_SCRATCH_CNT__SEC_COUNT__SHIFT 0x2
+#define CPC_EDC_SCRATCH_CNT__DED_COUNT_MASK 0x00000003L
+#define CPC_EDC_SCRATCH_CNT__SEC_COUNT_MASK 0x0000000CL
+//CPC_EDC_UCODE_CNT
+#define CPC_EDC_UCODE_CNT__DED_COUNT__SHIFT 0x0
+#define CPC_EDC_UCODE_CNT__SEC_COUNT__SHIFT 0x2
+#define CPC_EDC_UCODE_CNT__DED_COUNT_MASK 0x00000003L
+#define CPC_EDC_UCODE_CNT__SEC_COUNT_MASK 0x0000000CL
+//DC_EDC_STATE_CNT
+#define DC_EDC_STATE_CNT__COUNT_ME1__SHIFT 0x0
+#define DC_EDC_STATE_CNT__COUNT_ME1_MASK 0x00000003L
+//DC_EDC_CSINVOC_CNT
+#define DC_EDC_CSINVOC_CNT__COUNT_ME1__SHIFT 0x0
+#define DC_EDC_CSINVOC_CNT__COUNT_ME1_MASK 0x00000003L
+//DC_EDC_RESTORE_CNT
+#define DC_EDC_RESTORE_CNT__COUNT_ME1__SHIFT 0x0
+#define DC_EDC_RESTORE_CNT__COUNT_ME1_MASK 0x00000003L
// addressBlock: gc_grbmdec
//GRBM_CNTL
@@ -1961,7 +2060,8 @@
// addressBlock: gc_sqdec
//SQ_CONFIG
-#define SQ_CONFIG__UNUSED__SHIFT 0x0
+#define SQ_CONFIG__DISABLE_BARRIER_WAITCNT__SHIFT 0x0
+#define SQ_CONFIG__UNUSED__SHIFT 0x1
#define SQ_CONFIG__OVERRIDE_ALU_BUSY__SHIFT 0x7
#define SQ_CONFIG__DEBUG_EN__SHIFT 0x8
#define SQ_CONFIG__DEBUG_SINGLE_MEMOP__SHIFT 0x9
@@ -1980,7 +2080,8 @@
#define SQ_CONFIG__DISABLE_SP_REDUNDANT_THREAD_GATING__SHIFT 0x1d
#define SQ_CONFIG__DISABLE_FLAT_SOFT_CLAUSE__SHIFT 0x1e
#define SQ_CONFIG__DISABLE_MIMG_SOFT_CLAUSE__SHIFT 0x1f
-#define SQ_CONFIG__UNUSED_MASK 0x0000007FL
+#define SQ_CONFIG__DISABLE_BARRIER_WAITCNT_MASK 0x00000001L
+#define SQ_CONFIG__UNUSED_MASK 0x0000007EL
#define SQ_CONFIG__OVERRIDE_ALU_BUSY_MASK 0x00000080L
#define SQ_CONFIG__DEBUG_EN_MASK 0x00000100L
#define SQ_CONFIG__DEBUG_SINGLE_MEMOP_MASK 0x00000200L
@@ -6562,7 +6663,6 @@
#define ATC_L2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00FF0000L
#define ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0xFF000000L
-
// addressBlock: gc_utcl2_vml2pfdec
//VM_L2_CNTL
#define VM_L2_CNTL__ENABLE_L2_CACHE__SHIFT 0x0
@@ -6892,7 +6992,22 @@
#define VM_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L
#define VM_L2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00FF0000L
#define VM_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0xFF000000L
-
+//VM_L2_MEM_ECC_INDEX
+#define VM_L2_MEM_ECC_INDEX__INDEX__SHIFT 0x0
+#define VM_L2_MEM_ECC_INDEX__INDEX_MASK 0x000000FFL
+//VM_L2_WALKER_MEM_ECC_INDEX
+#define VM_L2_WALKER_MEM_ECC_INDEX__INDEX__SHIFT 0x0
+#define VM_L2_WALKER_MEM_ECC_INDEX__INDEX_MASK 0x000000FFL
+//VM_L2_MEM_ECC_CNT
+#define VM_L2_MEM_ECC_CNT__SEC_COUNT__SHIFT 0xc
+#define VM_L2_MEM_ECC_CNT__DED_COUNT__SHIFT 0xe
+#define VM_L2_MEM_ECC_CNT__SEC_COUNT_MASK 0x00003000L
+#define VM_L2_MEM_ECC_CNT__DED_COUNT_MASK 0x0000C000L
+//VM_L2_WALKER_MEM_ECC_CNT
+#define VM_L2_WALKER_MEM_ECC_CNT__SEC_COUNT__SHIFT 0xc
+#define VM_L2_WALKER_MEM_ECC_CNT__DED_COUNT__SHIFT 0xe
+#define VM_L2_WALKER_MEM_ECC_CNT__SEC_COUNT_MASK 0x00003000L
+#define VM_L2_WALKER_MEM_ECC_CNT__DED_COUNT_MASK 0x0000C000L
// addressBlock: gc_utcl2_vml2vcdec
//VM_CONTEXT0_CNTL
@@ -8626,10 +8741,16 @@
#define TCP_ADDR_CONFIG__NUM_BANKS__SHIFT 0x4
#define TCP_ADDR_CONFIG__COLHI_WIDTH__SHIFT 0x6
#define TCP_ADDR_CONFIG__RB_SPLIT_COLHI__SHIFT 0x9
+#define TCP_ADDR_CONFIG__ENABLE64KHASH__SHIFT 0xb
+#define TCP_ADDR_CONFIG__ENABLE2MHASH__SHIFT 0xc
+#define TCP_ADDR_CONFIG__ENABLE1GHASH__SHIFT 0xd
#define TCP_ADDR_CONFIG__NUM_TCC_BANKS_MASK 0x0000000FL
#define TCP_ADDR_CONFIG__NUM_BANKS_MASK 0x00000030L
#define TCP_ADDR_CONFIG__COLHI_WIDTH_MASK 0x000001C0L
#define TCP_ADDR_CONFIG__RB_SPLIT_COLHI_MASK 0x00000200L
+#define TCP_ADDR_CONFIG__ENABLE64KHASH_MASK 0x00000800L
+#define TCP_ADDR_CONFIG__ENABLE2MHASH_MASK 0x00001000L
+#define TCP_ADDR_CONFIG__ENABLE1GHASH_MASK 0x00002000L
//TCP_CREDIT
#define TCP_CREDIT__LFIFO_CREDIT__SHIFT 0x0
#define TCP_CREDIT__REQ_FIFO_CREDIT__SHIFT 0x10
@@ -9033,11 +9154,15 @@
#define TCC_EDC_CNT2__SRC_FIFO_NEXT_RAM_SED_COUNT__SHIFT 0x4
#define TCC_EDC_CNT2__LATENCY_FIFO_NEXT_RAM_SED_COUNT__SHIFT 0x6
#define TCC_EDC_CNT2__CACHE_TAG_PROBE_FIFO_SED_COUNT__SHIFT 0x8
+#define TCC_EDC_CNT2__WRRET_TAG_WRITE_RETURN_SED_COUNT__SHIFT 0xa
+#define TCC_EDC_CNT2__ATOMIC_RETURN_BUFFER_SED_COUNT__SHIFT 0xc
#define TCC_EDC_CNT2__WRITE_RETURN_SED_COUNT_MASK 0x00000003L
#define TCC_EDC_CNT2__WRITE_CACHE_READ_SED_COUNT_MASK 0x0000000CL
#define TCC_EDC_CNT2__SRC_FIFO_NEXT_RAM_SED_COUNT_MASK 0x00000030L
#define TCC_EDC_CNT2__LATENCY_FIFO_NEXT_RAM_SED_COUNT_MASK 0x000000C0L
#define TCC_EDC_CNT2__CACHE_TAG_PROBE_FIFO_SED_COUNT_MASK 0x00000300L
+#define TCC_EDC_CNT2__WRRET_TAG_WRITE_RETURN_SED_COUNT_MASK 0x00000C00L
+#define TCC_EDC_CNT2__ATOMIC_RETURN_BUFFER_SED_COUNT_MASK 0x00003000L
//TCC_REDUNDANCY
#define TCC_REDUNDANCY__MC_SEL0__SHIFT 0x0
#define TCC_REDUNDANCY__MC_SEL1__SHIFT 0x1
@@ -29818,6 +29943,60 @@
#define DIDT_DBR_STALL_EVENT_COUNTER__DIDT_STALL_EVENT_COUNTER__SHIFT 0x0
#define DIDT_DBR_STALL_EVENT_COUNTER__DIDT_STALL_EVENT_COUNTER_MASK 0xFFFFFFFFL
+//TA_EDC_CNT
+#define TA_EDC_CNT__TA_FS_DFIFO_SEC_COUNT__SHIFT 0x0
+#define TA_EDC_CNT__TA_FS_DFIFO_DED_COUNT__SHIFT 0x2
+#define TA_EDC_CNT__TA_FS_AFIFO_SED_COUNT__SHIFT 0x4
+#define TA_EDC_CNT__TA_FL_LFIFO_SED_COUNT__SHIFT 0x6
+#define TA_EDC_CNT__TA_FX_LFIFO_SED_COUNT__SHIFT 0x8
+#define TA_EDC_CNT__TA_FS_CFIFO_SED_COUNT__SHIFT 0xa
+#define TA_EDC_CNT__TA_FS_DFIFO_SEC_COUNT_MASK 0x00000003L
+#define TA_EDC_CNT__TA_FS_DFIFO_DED_COUNT_MASK 0x0000000CL
+#define TA_EDC_CNT__TA_FS_AFIFO_SED_COUNT_MASK 0x00000030L
+#define TA_EDC_CNT__TA_FL_LFIFO_SED_COUNT_MASK 0x000000C0L
+#define TA_EDC_CNT__TA_FX_LFIFO_SED_COUNT_MASK 0x00000300L
+#define TA_EDC_CNT__TA_FS_CFIFO_SED_COUNT_MASK 0x00000C00L
+
+//TCI_EDC_CNT
+#define TCI_EDC_CNT__WRITE_RAM_SED_COUNT__SHIFT 0x0
+#define TCI_EDC_CNT__WRITE_RAM_SED_COUNT_MASK 0x00000003L
+
+//TCP_EDC_CNT_NEW
+#define TCP_EDC_CNT_NEW__CACHE_RAM_SEC_COUNT__SHIFT 0x0
+#define TCP_EDC_CNT_NEW__CACHE_RAM_DED_COUNT__SHIFT 0x2
+#define TCP_EDC_CNT_NEW__LFIFO_RAM_SEC_COUNT__SHIFT 0x4
+#define TCP_EDC_CNT_NEW__LFIFO_RAM_DED_COUNT__SHIFT 0x6
+#define TCP_EDC_CNT_NEW__CMD_FIFO_SED_COUNT__SHIFT 0x8
+#define TCP_EDC_CNT_NEW__VM_FIFO_SEC_COUNT__SHIFT 0xa
+#define TCP_EDC_CNT_NEW__VM_FIFO_DED_COUNT__SHIFT 0xc
+#define TCP_EDC_CNT_NEW__DB_RAM_SED_COUNT__SHIFT 0xe
+#define TCP_EDC_CNT_NEW__UTCL1_LFIFO0_SEC_COUNT__SHIFT 0x10
+#define TCP_EDC_CNT_NEW__UTCL1_LFIFO0_DED_COUNT__SHIFT 0x12
+#define TCP_EDC_CNT_NEW__UTCL1_LFIFO1_SEC_COUNT__SHIFT 0x14
+#define TCP_EDC_CNT_NEW__UTCL1_LFIFO1_DED_COUNT__SHIFT 0x16
+#define TCP_EDC_CNT_NEW__CACHE_RAM_SEC_COUNT_MASK 0x00000003L
+#define TCP_EDC_CNT_NEW__CACHE_RAM_DED_COUNT_MASK 0x0000000CL
+#define TCP_EDC_CNT_NEW__LFIFO_RAM_SEC_COUNT_MASK 0x00000030L
+#define TCP_EDC_CNT_NEW__LFIFO_RAM_DED_COUNT_MASK 0x000000C0L
+#define TCP_EDC_CNT_NEW__CMD_FIFO_SED_COUNT_MASK 0x00000300L
+#define TCP_EDC_CNT_NEW__VM_FIFO_SEC_COUNT_MASK 0x00000C00L
+#define TCP_EDC_CNT_NEW__VM_FIFO_DED_COUNT_MASK 0x00003000L
+#define TCP_EDC_CNT_NEW__DB_RAM_SED_COUNT_MASK 0x0000C000L
+#define TCP_EDC_CNT_NEW__UTCL1_LFIFO0_SEC_COUNT_MASK 0x00030000L
+#define TCP_EDC_CNT_NEW__UTCL1_LFIFO0_DED_COUNT_MASK 0x000C0000L
+#define TCP_EDC_CNT_NEW__UTCL1_LFIFO1_SEC_COUNT_MASK 0x00300000L
+#define TCP_EDC_CNT_NEW__UTCL1_LFIFO1_DED_COUNT_MASK 0x00C00000L
+//TD_EDC_CNT
+#define TD_EDC_CNT__SS_FIFO_LO_SEC_COUNT__SHIFT 0x0
+#define TD_EDC_CNT__SS_FIFO_LO_DED_COUNT__SHIFT 0x2
+#define TD_EDC_CNT__SS_FIFO_HI_SEC_COUNT__SHIFT 0x4
+#define TD_EDC_CNT__SS_FIFO_HI_DED_COUNT__SHIFT 0x6
+#define TD_EDC_CNT__CS_FIFO_SED_COUNT__SHIFT 0x8
+#define TD_EDC_CNT__SS_FIFO_LO_SEC_COUNT_MASK 0x00000003L
+#define TD_EDC_CNT__SS_FIFO_LO_DED_COUNT_MASK 0x0000000CL
+#define TD_EDC_CNT__SS_FIFO_HI_SEC_COUNT_MASK 0x00000030L
+#define TD_EDC_CNT__SS_FIFO_HI_DED_COUNT_MASK 0x000000C0L
+#define TD_EDC_CNT__CS_FIFO_SED_COUNT_MASK 0x00000300L
#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_offset.h
new file mode 100644
index 000000000000..f41556abfbbc
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_offset.h
@@ -0,0 +1,264 @@
+/*
+ * Copyright (C) 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _gc_9_4_1_OFFSET_HEADER
+#define _gc_9_4_1_OFFSET_HEADER
+
+// addressBlock: gc_grbmdec
+// base address: 0x8000
+#define mmGRBM_CNTL 0x0000
+#define mmGRBM_CNTL_BASE_IDX 0
+#define mmGRBM_SKEW_CNTL 0x0001
+#define mmGRBM_SKEW_CNTL_BASE_IDX 0
+#define mmGRBM_STATUS2 0x0002
+#define mmGRBM_STATUS2_BASE_IDX 0
+#define mmGRBM_PWR_CNTL 0x0003
+#define mmGRBM_PWR_CNTL_BASE_IDX 0
+#define mmGRBM_STATUS 0x0004
+#define mmGRBM_STATUS_BASE_IDX 0
+#define mmGRBM_STATUS_SE0 0x0005
+#define mmGRBM_STATUS_SE0_BASE_IDX 0
+#define mmGRBM_STATUS_SE1 0x0006
+#define mmGRBM_STATUS_SE1_BASE_IDX 0
+#define mmGRBM_SOFT_RESET 0x0008
+#define mmGRBM_SOFT_RESET_BASE_IDX 0
+#define mmGRBM_GFX_CLKEN_CNTL 0x000c
+#define mmGRBM_GFX_CLKEN_CNTL_BASE_IDX 0
+#define mmGRBM_WAIT_IDLE_CLOCKS 0x000d
+#define mmGRBM_WAIT_IDLE_CLOCKS_BASE_IDX 0
+#define mmGRBM_STATUS_SE2 0x000e
+#define mmGRBM_STATUS_SE2_BASE_IDX 0
+#define mmGRBM_STATUS_SE3 0x000f
+#define mmGRBM_STATUS_SE3_BASE_IDX 0
+#define mmGRBM_READ_ERROR 0x0016
+#define mmGRBM_READ_ERROR_BASE_IDX 0
+#define mmGRBM_READ_ERROR2 0x0017
+#define mmGRBM_READ_ERROR2_BASE_IDX 0
+#define mmGRBM_INT_CNTL 0x0018
+#define mmGRBM_INT_CNTL_BASE_IDX 0
+#define mmGRBM_TRAP_OP 0x0019
+#define mmGRBM_TRAP_OP_BASE_IDX 0
+#define mmGRBM_TRAP_ADDR 0x001a
+#define mmGRBM_TRAP_ADDR_BASE_IDX 0
+#define mmGRBM_TRAP_ADDR_MSK 0x001b
+#define mmGRBM_TRAP_ADDR_MSK_BASE_IDX 0
+#define mmGRBM_TRAP_WD 0x001c
+#define mmGRBM_TRAP_WD_BASE_IDX 0
+#define mmGRBM_TRAP_WD_MSK 0x001d
+#define mmGRBM_TRAP_WD_MSK_BASE_IDX 0
+#define mmGRBM_DSM_BYPASS 0x001e
+#define mmGRBM_DSM_BYPASS_BASE_IDX 0
+#define mmGRBM_WRITE_ERROR 0x001f
+#define mmGRBM_WRITE_ERROR_BASE_IDX 0
+#define mmGRBM_IOV_ERROR 0x0020
+#define mmGRBM_IOV_ERROR_BASE_IDX 0
+#define mmGRBM_CHIP_REVISION 0x0021
+#define mmGRBM_CHIP_REVISION_BASE_IDX 0
+#define mmGRBM_GFX_CNTL 0x0022
+#define mmGRBM_GFX_CNTL_BASE_IDX 0
+#define mmGRBM_RSMU_CFG 0x0023
+#define mmGRBM_RSMU_CFG_BASE_IDX 0
+#define mmGRBM_IH_CREDIT 0x0024
+#define mmGRBM_IH_CREDIT_BASE_IDX 0
+#define mmGRBM_PWR_CNTL2 0x0025
+#define mmGRBM_PWR_CNTL2_BASE_IDX 0
+#define mmGRBM_UTCL2_INVAL_RANGE_START 0x0026
+#define mmGRBM_UTCL2_INVAL_RANGE_START_BASE_IDX 0
+#define mmGRBM_UTCL2_INVAL_RANGE_END 0x0027
+#define mmGRBM_UTCL2_INVAL_RANGE_END_BASE_IDX 0
+#define mmGRBM_RSMU_READ_ERROR 0x0028
+#define mmGRBM_RSMU_READ_ERROR_BASE_IDX 0
+#define mmGRBM_CHICKEN_BITS 0x0029
+#define mmGRBM_CHICKEN_BITS_BASE_IDX 0
+#define mmGRBM_FENCE_RANGE0 0x002a
+#define mmGRBM_FENCE_RANGE0_BASE_IDX 0
+#define mmGRBM_FENCE_RANGE1 0x002b
+#define mmGRBM_FENCE_RANGE1_BASE_IDX 0
+#define mmGRBM_NOWHERE 0x003f
+#define mmGRBM_NOWHERE_BASE_IDX 0
+#define mmGRBM_SCRATCH_REG0 0x0040
+#define mmGRBM_SCRATCH_REG0_BASE_IDX 0
+#define mmGRBM_SCRATCH_REG1 0x0041
+#define mmGRBM_SCRATCH_REG1_BASE_IDX 0
+#define mmGRBM_SCRATCH_REG2 0x0042
+#define mmGRBM_SCRATCH_REG2_BASE_IDX 0
+#define mmGRBM_SCRATCH_REG3 0x0043
+#define mmGRBM_SCRATCH_REG3_BASE_IDX 0
+#define mmGRBM_SCRATCH_REG4 0x0044
+#define mmGRBM_SCRATCH_REG4_BASE_IDX 0
+#define mmGRBM_SCRATCH_REG5 0x0045
+#define mmGRBM_SCRATCH_REG5_BASE_IDX 0
+#define mmGRBM_SCRATCH_REG6 0x0046
+#define mmGRBM_SCRATCH_REG6_BASE_IDX 0
+#define mmGRBM_SCRATCH_REG7 0x0047
+#define mmGRBM_SCRATCH_REG7_BASE_IDX 0
+
+// addressBlock: gc_cppdec2
+// base address: 0xc600
+#define mmCPF_EDC_TAG_CNT 0x1189
+#define mmCPF_EDC_TAG_CNT_BASE_IDX 0
+#define mmCPF_EDC_ROQ_CNT 0x118a
+#define mmCPF_EDC_ROQ_CNT_BASE_IDX 0
+#define mmCPG_EDC_TAG_CNT 0x118b
+#define mmCPG_EDC_TAG_CNT_BASE_IDX 0
+#define mmCPG_EDC_DMA_CNT 0x118d
+#define mmCPG_EDC_DMA_CNT_BASE_IDX 0
+#define mmCPC_EDC_SCRATCH_CNT 0x118e
+#define mmCPC_EDC_SCRATCH_CNT_BASE_IDX 0
+#define mmCPC_EDC_UCODE_CNT 0x118f
+#define mmCPC_EDC_UCODE_CNT_BASE_IDX 0
+#define mmDC_EDC_STATE_CNT 0x1191
+#define mmDC_EDC_STATE_CNT_BASE_IDX 0
+#define mmDC_EDC_CSINVOC_CNT 0x1192
+#define mmDC_EDC_CSINVOC_CNT_BASE_IDX 0
+#define mmDC_EDC_RESTORE_CNT 0x1193
+#define mmDC_EDC_RESTORE_CNT_BASE_IDX 0
+
+// addressBlock: gc_gdsdec
+// base address: 0x9700
+#define mmGDS_EDC_CNT 0x05c5
+#define mmGDS_EDC_CNT_BASE_IDX 0
+#define mmGDS_EDC_GRBM_CNT 0x05c6
+#define mmGDS_EDC_GRBM_CNT_BASE_IDX 0
+#define mmGDS_EDC_OA_DED 0x05c7
+#define mmGDS_EDC_OA_DED_BASE_IDX 0
+#define mmGDS_EDC_OA_PHY_CNT 0x05cb
+#define mmGDS_EDC_OA_PHY_CNT_BASE_IDX 0
+#define mmGDS_EDC_OA_PIPE_CNT 0x05cc
+#define mmGDS_EDC_OA_PIPE_CNT_BASE_IDX 0
+
+// addressBlock: gc_shsdec
+// base address: 0x9000
+#define mmSPI_EDC_CNT 0x0445
+#define mmSPI_EDC_CNT_BASE_IDX 0
+
+// addressBlock: gc_sqdec
+// base address: 0x8c00
+#define mmSQC_EDC_CNT2 0x032c
+#define mmSQC_EDC_CNT2_BASE_IDX 0
+#define mmSQC_EDC_CNT3 0x032d
+#define mmSQC_EDC_CNT3_BASE_IDX 0
+#define mmSQC_EDC_PARITY_CNT3 0x032e
+#define mmSQC_EDC_PARITY_CNT3_BASE_IDX 0
+#define mmSQC_EDC_CNT 0x03a2
+#define mmSQC_EDC_CNT_BASE_IDX 0
+#define mmSQ_EDC_SEC_CNT 0x03a3
+#define mmSQ_EDC_SEC_CNT_BASE_IDX 0
+#define mmSQ_EDC_DED_CNT 0x03a4
+#define mmSQ_EDC_DED_CNT_BASE_IDX 0
+#define mmSQ_EDC_INFO 0x03a5
+#define mmSQ_EDC_INFO_BASE_IDX 0
+#define mmSQ_EDC_CNT 0x03a6
+#define mmSQ_EDC_CNT_BASE_IDX 0
+
+// addressBlock: gc_tpdec
+// base address: 0x9400
+#define mmTA_EDC_CNT 0x0586
+#define mmTA_EDC_CNT_BASE_IDX 0
+
+// addressBlock: gc_tcdec
+// base address: 0xac00
+#define mmTCP_EDC_CNT 0x0b17
+#define mmTCP_EDC_CNT_BASE_IDX 0
+#define mmTCP_EDC_CNT_NEW 0x0b18
+#define mmTCP_EDC_CNT_NEW_BASE_IDX 0
+#define mmTCP_ATC_EDC_GATCL1_CNT 0x12b1
+#define mmTCP_ATC_EDC_GATCL1_CNT_BASE_IDX 0
+#define mmTCI_EDC_CNT 0x0b60
+#define mmTCI_EDC_CNT_BASE_IDX 0
+#define mmTCC_EDC_CNT 0x0b82
+#define mmTCC_EDC_CNT_BASE_IDX 0
+#define mmTCC_EDC_CNT2 0x0b83
+#define mmTCC_EDC_CNT2_BASE_IDX 0
+#define mmTCA_EDC_CNT 0x0bc5
+#define mmTCA_EDC_CNT_BASE_IDX 0
+
+// addressBlock: gc_tpdec
+// base address: 0x9400
+#define mmTD_EDC_CNT 0x052e
+#define mmTD_EDC_CNT_BASE_IDX 0
+#define mmTA_EDC_CNT 0x0586
+#define mmTA_EDC_CNT_BASE_IDX 0
+
+// addressBlock: gc_ea_gceadec2
+// base address: 0x9c00
+#define mmGCEA_EDC_CNT 0x0706
+#define mmGCEA_EDC_CNT_BASE_IDX 0
+#define mmGCEA_EDC_CNT2 0x0707
+#define mmGCEA_EDC_CNT2_BASE_IDX 0
+#define mmGCEA_EDC_CNT3 0x071b
+#define mmGCEA_EDC_CNT3_BASE_IDX 0
+
+// addressBlock: gc_gfxudec
+// base address: 0x30000
+#define mmSCRATCH_REG0 0x2040
+#define mmSCRATCH_REG0_BASE_IDX 1
+#define mmSCRATCH_REG1 0x2041
+#define mmSCRATCH_REG1_BASE_IDX 1
+#define mmSCRATCH_REG2 0x2042
+#define mmSCRATCH_REG2_BASE_IDX 1
+#define mmSCRATCH_REG3 0x2043
+#define mmSCRATCH_REG3_BASE_IDX 1
+#define mmSCRATCH_REG4 0x2044
+#define mmSCRATCH_REG4_BASE_IDX 1
+#define mmSCRATCH_REG5 0x2045
+#define mmSCRATCH_REG5_BASE_IDX 1
+#define mmSCRATCH_REG6 0x2046
+#define mmSCRATCH_REG6_BASE_IDX 1
+#define mmSCRATCH_REG7 0x2047
+#define mmSCRATCH_REG7_BASE_IDX 1
+#define mmGRBM_GFX_INDEX 0x2200
+#define mmGRBM_GFX_INDEX_BASE_IDX 1
+
+// addressBlock: gc_utcl2_atcl2dec
+// base address: 0xa000
+#define mmATC_L2_CACHE_4K_DSM_INDEX 0x080e
+#define mmATC_L2_CACHE_4K_DSM_INDEX_BASE_IDX 0
+#define mmATC_L2_CACHE_2M_DSM_INDEX 0x080f
+#define mmATC_L2_CACHE_2M_DSM_INDEX_BASE_IDX 0
+#define mmATC_L2_CACHE_4K_DSM_CNTL 0x0810
+#define mmATC_L2_CACHE_4K_DSM_CNTL_BASE_IDX 0
+#define mmATC_L2_CACHE_2M_DSM_CNTL 0x0811
+#define mmATC_L2_CACHE_2M_DSM_CNTL_BASE_IDX 0
+
+// addressBlock: gc_utcl2_vml2pfdec
+// base address: 0xa100
+#define mmVML2_MEM_ECC_INDEX 0x0860
+#define mmVML2_MEM_ECC_INDEX_BASE_IDX 0
+#define mmVML2_WALKER_MEM_ECC_INDEX 0x0861
+#define mmVML2_WALKER_MEM_ECC_INDEX_BASE_IDX 0
+#define mmUTCL2_MEM_ECC_INDEX 0x0862
+#define mmUTCL2_MEM_ECC_INDEX_BASE_IDX 0
+
+#define mmVML2_MEM_ECC_CNTL 0x0863
+#define mmVML2_MEM_ECC_CNTL_BASE_IDX 0
+#define mmVML2_WALKER_MEM_ECC_CNTL 0x0864
+#define mmVML2_WALKER_MEM_ECC_CNTL_BASE_IDX 0
+#define mmUTCL2_MEM_ECC_CNTL 0x0865
+#define mmUTCL2_MEM_ECC_CNTL_BASE_IDX 0
+
+// addressBlock: gc_rlcpdec
+// base address: 0x3b000
+#define mmRLC_EDC_CNT 0x4d40
+#define mmRLC_EDC_CNT_BASE_IDX 1
+#define mmRLC_EDC_CNT2 0x4d41
+#define mmRLC_EDC_CNT2_BASE_IDX 1
+
+#endif \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_sh_mask.h
new file mode 100644
index 000000000000..f26246a600c6
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_sh_mask.h
@@ -0,0 +1,748 @@
+/*
+ * Copyright (C) 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _gc_9_4_1_SH_MASK_HEADER
+#define _gc_9_4_1_SH_MASK_HEADER
+
+// addressBlock: gc_cppdec2
+//CPF_EDC_TAG_CNT
+#define CPF_EDC_TAG_CNT__DED_COUNT__SHIFT 0x0
+#define CPF_EDC_TAG_CNT__SEC_COUNT__SHIFT 0x2
+#define CPF_EDC_TAG_CNT__DED_COUNT_MASK 0x00000003L
+#define CPF_EDC_TAG_CNT__SEC_COUNT_MASK 0x0000000CL
+//CPF_EDC_ROQ_CNT
+#define CPF_EDC_ROQ_CNT__DED_COUNT_ME1__SHIFT 0x0
+#define CPF_EDC_ROQ_CNT__SEC_COUNT_ME1__SHIFT 0x2
+#define CPF_EDC_ROQ_CNT__DED_COUNT_ME2__SHIFT 0x4
+#define CPF_EDC_ROQ_CNT__SEC_COUNT_ME2__SHIFT 0x6
+#define CPF_EDC_ROQ_CNT__DED_COUNT_ME1_MASK 0x00000003L
+#define CPF_EDC_ROQ_CNT__SEC_COUNT_ME1_MASK 0x0000000CL
+#define CPF_EDC_ROQ_CNT__DED_COUNT_ME2_MASK 0x00000030L
+#define CPF_EDC_ROQ_CNT__SEC_COUNT_ME2_MASK 0x000000C0L
+//CPG_EDC_TAG_CNT
+#define CPG_EDC_TAG_CNT__DED_COUNT__SHIFT 0x0
+#define CPG_EDC_TAG_CNT__SEC_COUNT__SHIFT 0x2
+#define CPG_EDC_TAG_CNT__DED_COUNT_MASK 0x00000003L
+#define CPG_EDC_TAG_CNT__SEC_COUNT_MASK 0x0000000CL
+//CPG_EDC_DMA_CNT
+#define CPG_EDC_DMA_CNT__ROQ_DED_COUNT__SHIFT 0x0
+#define CPG_EDC_DMA_CNT__ROQ_SEC_COUNT__SHIFT 0x2
+#define CPG_EDC_DMA_CNT__TAG_DED_COUNT__SHIFT 0x4
+#define CPG_EDC_DMA_CNT__TAG_SEC_COUNT__SHIFT 0x6
+#define CPG_EDC_DMA_CNT__ROQ_DED_COUNT_MASK 0x00000003L
+#define CPG_EDC_DMA_CNT__ROQ_SEC_COUNT_MASK 0x0000000CL
+#define CPG_EDC_DMA_CNT__TAG_DED_COUNT_MASK 0x00000030L
+#define CPG_EDC_DMA_CNT__TAG_SEC_COUNT_MASK 0x000000C0L
+//CPC_EDC_SCRATCH_CNT
+#define CPC_EDC_SCRATCH_CNT__DED_COUNT__SHIFT 0x0
+#define CPC_EDC_SCRATCH_CNT__SEC_COUNT__SHIFT 0x2
+#define CPC_EDC_SCRATCH_CNT__DED_COUNT_MASK 0x00000003L
+#define CPC_EDC_SCRATCH_CNT__SEC_COUNT_MASK 0x0000000CL
+//CPC_EDC_UCODE_CNT
+#define CPC_EDC_UCODE_CNT__DED_COUNT__SHIFT 0x0
+#define CPC_EDC_UCODE_CNT__SEC_COUNT__SHIFT 0x2
+#define CPC_EDC_UCODE_CNT__DED_COUNT_MASK 0x00000003L
+#define CPC_EDC_UCODE_CNT__SEC_COUNT_MASK 0x0000000CL
+//DC_EDC_STATE_CNT
+#define DC_EDC_STATE_CNT__DED_COUNT_ME1__SHIFT 0x0
+#define DC_EDC_STATE_CNT__SEC_COUNT_ME1__SHIFT 0x2
+#define DC_EDC_STATE_CNT__DED_COUNT_ME1_MASK 0x00000003L
+#define DC_EDC_STATE_CNT__SEC_COUNT_ME1_MASK 0x0000000CL
+//DC_EDC_CSINVOC_CNT
+#define DC_EDC_CSINVOC_CNT__DED_COUNT_ME1__SHIFT 0x0
+#define DC_EDC_CSINVOC_CNT__SEC_COUNT_ME1__SHIFT 0x2
+#define DC_EDC_CSINVOC_CNT__DED_COUNT1_ME1__SHIFT 0x4
+#define DC_EDC_CSINVOC_CNT__SEC_COUNT1_ME1__SHIFT 0x6
+#define DC_EDC_CSINVOC_CNT__DED_COUNT_ME1_MASK 0x00000003L
+#define DC_EDC_CSINVOC_CNT__SEC_COUNT_ME1_MASK 0x0000000CL
+#define DC_EDC_CSINVOC_CNT__DED_COUNT1_ME1_MASK 0x00000030L
+#define DC_EDC_CSINVOC_CNT__SEC_COUNT1_ME1_MASK 0x000000C0L
+//DC_EDC_RESTORE_CNT
+#define DC_EDC_RESTORE_CNT__DED_COUNT_ME1__SHIFT 0x0
+#define DC_EDC_RESTORE_CNT__SEC_COUNT_ME1__SHIFT 0x2
+#define DC_EDC_RESTORE_CNT__DED_COUNT1_ME1__SHIFT 0x4
+#define DC_EDC_RESTORE_CNT__SEC_COUNT1_ME1__SHIFT 0x6
+#define DC_EDC_RESTORE_CNT__DED_COUNT_ME1_MASK 0x00000003L
+#define DC_EDC_RESTORE_CNT__SEC_COUNT_ME1_MASK 0x0000000CL
+#define DC_EDC_RESTORE_CNT__DED_COUNT1_ME1_MASK 0x00000030L
+#define DC_EDC_RESTORE_CNT__SEC_COUNT1_ME1_MASK 0x000000C0L
+
+// addressBlock: gc_gdsdec
+//GDS_EDC_CNT
+#define GDS_EDC_CNT__GDS_MEM_DED__SHIFT 0x0
+#define GDS_EDC_CNT__GDS_MEM_SEC__SHIFT 0x4
+#define GDS_EDC_CNT__UNUSED__SHIFT 0x6
+#define GDS_EDC_CNT__GDS_MEM_DED_MASK 0x00000003L
+#define GDS_EDC_CNT__GDS_MEM_SEC_MASK 0x00000030L
+#define GDS_EDC_CNT__UNUSED_MASK 0xFFFFFFC0L
+//GDS_EDC_GRBM_CNT
+#define GDS_EDC_GRBM_CNT__DED__SHIFT 0x0
+#define GDS_EDC_GRBM_CNT__SEC__SHIFT 0x2
+#define GDS_EDC_GRBM_CNT__UNUSED__SHIFT 0x4
+#define GDS_EDC_GRBM_CNT__DED_MASK 0x00000003L
+#define GDS_EDC_GRBM_CNT__SEC_MASK 0x0000000CL
+#define GDS_EDC_GRBM_CNT__UNUSED_MASK 0xFFFFFFF0L
+//GDS_EDC_OA_DED
+#define GDS_EDC_OA_DED__ME0_GFXHP3D_PIX_DED__SHIFT 0x0
+#define GDS_EDC_OA_DED__ME0_GFXHP3D_VTX_DED__SHIFT 0x1
+#define GDS_EDC_OA_DED__ME0_CS_DED__SHIFT 0x2
+#define GDS_EDC_OA_DED__ME0_GFXHP3D_GS_DED__SHIFT 0x3
+#define GDS_EDC_OA_DED__ME1_PIPE0_DED__SHIFT 0x4
+#define GDS_EDC_OA_DED__ME1_PIPE1_DED__SHIFT 0x5
+#define GDS_EDC_OA_DED__ME1_PIPE2_DED__SHIFT 0x6
+#define GDS_EDC_OA_DED__ME1_PIPE3_DED__SHIFT 0x7
+#define GDS_EDC_OA_DED__ME2_PIPE0_DED__SHIFT 0x8
+#define GDS_EDC_OA_DED__ME2_PIPE1_DED__SHIFT 0x9
+#define GDS_EDC_OA_DED__ME2_PIPE2_DED__SHIFT 0xa
+#define GDS_EDC_OA_DED__ME2_PIPE3_DED__SHIFT 0xb
+#define GDS_EDC_OA_DED__UNUSED1__SHIFT 0xc
+#define GDS_EDC_OA_DED__ME0_GFXHP3D_PIX_DED_MASK 0x00000001L
+#define GDS_EDC_OA_DED__ME0_GFXHP3D_VTX_DED_MASK 0x00000002L
+#define GDS_EDC_OA_DED__ME0_CS_DED_MASK 0x00000004L
+#define GDS_EDC_OA_DED__ME0_GFXHP3D_GS_DED_MASK 0x00000008L
+#define GDS_EDC_OA_DED__ME1_PIPE0_DED_MASK 0x00000010L
+#define GDS_EDC_OA_DED__ME1_PIPE1_DED_MASK 0x00000020L
+#define GDS_EDC_OA_DED__ME1_PIPE2_DED_MASK 0x00000040L
+#define GDS_EDC_OA_DED__ME1_PIPE3_DED_MASK 0x00000080L
+#define GDS_EDC_OA_DED__ME2_PIPE0_DED_MASK 0x00000100L
+#define GDS_EDC_OA_DED__ME2_PIPE1_DED_MASK 0x00000200L
+#define GDS_EDC_OA_DED__ME2_PIPE2_DED_MASK 0x00000400L
+#define GDS_EDC_OA_DED__ME2_PIPE3_DED_MASK 0x00000800L
+#define GDS_EDC_OA_DED__UNUSED1_MASK 0xFFFFF000L
+//GDS_EDC_OA_PHY_CNT
+#define GDS_EDC_OA_PHY_CNT__ME0_CS_PIPE_MEM_SEC__SHIFT 0x0
+#define GDS_EDC_OA_PHY_CNT__ME0_CS_PIPE_MEM_DED__SHIFT 0x2
+#define GDS_EDC_OA_PHY_CNT__PHY_CMD_RAM_MEM_SEC__SHIFT 0x4
+#define GDS_EDC_OA_PHY_CNT__PHY_CMD_RAM_MEM_DED__SHIFT 0x6
+#define GDS_EDC_OA_PHY_CNT__PHY_DATA_RAM_MEM_SEC__SHIFT 0x8
+#define GDS_EDC_OA_PHY_CNT__PHY_DATA_RAM_MEM_DED__SHIFT 0xa
+#define GDS_EDC_OA_PHY_CNT__UNUSED1__SHIFT 0xc
+#define GDS_EDC_OA_PHY_CNT__ME0_CS_PIPE_MEM_SEC_MASK 0x00000003L
+#define GDS_EDC_OA_PHY_CNT__ME0_CS_PIPE_MEM_DED_MASK 0x0000000CL
+#define GDS_EDC_OA_PHY_CNT__PHY_CMD_RAM_MEM_SEC_MASK 0x00000030L
+#define GDS_EDC_OA_PHY_CNT__PHY_CMD_RAM_MEM_DED_MASK 0x000000C0L
+#define GDS_EDC_OA_PHY_CNT__PHY_DATA_RAM_MEM_SEC_MASK 0x00000300L
+#define GDS_EDC_OA_PHY_CNT__PHY_DATA_RAM_MEM_DED_MASK 0x00000C00L
+#define GDS_EDC_OA_PHY_CNT__UNUSED1_MASK 0xFFFFF000L
+//GDS_EDC_OA_PIPE_CNT
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE0_PIPE_MEM_SEC__SHIFT 0x0
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE0_PIPE_MEM_DED__SHIFT 0x2
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE1_PIPE_MEM_SEC__SHIFT 0x4
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE1_PIPE_MEM_DED__SHIFT 0x6
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE2_PIPE_MEM_SEC__SHIFT 0x8
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE2_PIPE_MEM_DED__SHIFT 0xa
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE3_PIPE_MEM_SEC__SHIFT 0xc
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE3_PIPE_MEM_DED__SHIFT 0xe
+#define GDS_EDC_OA_PIPE_CNT__UNUSED__SHIFT 0x10
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE0_PIPE_MEM_SEC_MASK 0x00000003L
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE0_PIPE_MEM_DED_MASK 0x0000000CL
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE1_PIPE_MEM_SEC_MASK 0x00000030L
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE1_PIPE_MEM_DED_MASK 0x000000C0L
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE2_PIPE_MEM_SEC_MASK 0x00000300L
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE2_PIPE_MEM_DED_MASK 0x00000C00L
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE3_PIPE_MEM_SEC_MASK 0x00003000L
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE3_PIPE_MEM_DED_MASK 0x0000C000L
+#define GDS_EDC_OA_PIPE_CNT__UNUSED_MASK 0xFFFF0000L
+
+// addressBlock: gc_shsdec
+//SPI_EDC_CNT
+#define SPI_EDC_CNT__SPI_SR_MEM_SEC_COUNT__SHIFT 0x0
+#define SPI_EDC_CNT__SPI_SR_MEM_DED_COUNT__SHIFT 0x2
+#define SPI_EDC_CNT__SPI_GDS_EXPREQ_SEC_COUNT__SHIFT 0x4
+#define SPI_EDC_CNT__SPI_GDS_EXPREQ_DED_COUNT__SHIFT 0x6
+#define SPI_EDC_CNT__SPI_WB_GRANT_30_SEC_COUNT__SHIFT 0x8
+#define SPI_EDC_CNT__SPI_WB_GRANT_30_DED_COUNT__SHIFT 0xa
+#define SPI_EDC_CNT__SPI_WB_GRANT_61_SEC_COUNT__SHIFT 0xc
+#define SPI_EDC_CNT__SPI_WB_GRANT_61_DED_COUNT__SHIFT 0xe
+#define SPI_EDC_CNT__SPI_LIFE_CNT_SEC_COUNT__SHIFT 0x10
+#define SPI_EDC_CNT__SPI_LIFE_CNT_DED_COUNT__SHIFT 0x12
+#define SPI_EDC_CNT__SPI_SR_MEM_SEC_COUNT_MASK 0x00000003L
+#define SPI_EDC_CNT__SPI_SR_MEM_DED_COUNT_MASK 0x0000000CL
+#define SPI_EDC_CNT__SPI_GDS_EXPREQ_SEC_COUNT_MASK 0x00000030L
+#define SPI_EDC_CNT__SPI_GDS_EXPREQ_DED_COUNT_MASK 0x000000C0L
+#define SPI_EDC_CNT__SPI_WB_GRANT_30_SEC_COUNT_MASK 0x00000300L
+#define SPI_EDC_CNT__SPI_WB_GRANT_30_DED_COUNT_MASK 0x00000C00L
+#define SPI_EDC_CNT__SPI_WB_GRANT_61_SEC_COUNT_MASK 0x00003000L
+#define SPI_EDC_CNT__SPI_WB_GRANT_61_DED_COUNT_MASK 0x0000C000L
+#define SPI_EDC_CNT__SPI_LIFE_CNT_SEC_COUNT_MASK 0x00030000L
+#define SPI_EDC_CNT__SPI_LIFE_CNT_DED_COUNT_MASK 0x000C0000L
+
+// addressBlock: gc_sqdec
+//SQC_EDC_CNT2
+#define SQC_EDC_CNT2__INST_BANKA_TAG_RAM_SEC_COUNT__SHIFT 0x0
+#define SQC_EDC_CNT2__INST_BANKA_TAG_RAM_DED_COUNT__SHIFT 0x2
+#define SQC_EDC_CNT2__INST_BANKA_BANK_RAM_SEC_COUNT__SHIFT 0x4
+#define SQC_EDC_CNT2__INST_BANKA_BANK_RAM_DED_COUNT__SHIFT 0x6
+#define SQC_EDC_CNT2__DATA_BANKA_TAG_RAM_SEC_COUNT__SHIFT 0x8
+#define SQC_EDC_CNT2__DATA_BANKA_TAG_RAM_DED_COUNT__SHIFT 0xa
+#define SQC_EDC_CNT2__DATA_BANKA_BANK_RAM_SEC_COUNT__SHIFT 0xc
+#define SQC_EDC_CNT2__DATA_BANKA_BANK_RAM_DED_COUNT__SHIFT 0xe
+#define SQC_EDC_CNT2__INST_UTCL1_LFIFO_SEC_COUNT__SHIFT 0x10
+#define SQC_EDC_CNT2__INST_UTCL1_LFIFO_DED_COUNT__SHIFT 0x12
+#define SQC_EDC_CNT2__INST_BANKA_TAG_RAM_SEC_COUNT_MASK 0x00000003L
+#define SQC_EDC_CNT2__INST_BANKA_TAG_RAM_DED_COUNT_MASK 0x0000000CL
+#define SQC_EDC_CNT2__INST_BANKA_BANK_RAM_SEC_COUNT_MASK 0x00000030L
+#define SQC_EDC_CNT2__INST_BANKA_BANK_RAM_DED_COUNT_MASK 0x000000C0L
+#define SQC_EDC_CNT2__DATA_BANKA_TAG_RAM_SEC_COUNT_MASK 0x00000300L
+#define SQC_EDC_CNT2__DATA_BANKA_TAG_RAM_DED_COUNT_MASK 0x00000C00L
+#define SQC_EDC_CNT2__DATA_BANKA_BANK_RAM_SEC_COUNT_MASK 0x00003000L
+#define SQC_EDC_CNT2__DATA_BANKA_BANK_RAM_DED_COUNT_MASK 0x0000C000L
+#define SQC_EDC_CNT2__INST_UTCL1_LFIFO_SEC_COUNT_MASK 0x00030000L
+#define SQC_EDC_CNT2__INST_UTCL1_LFIFO_DED_COUNT_MASK 0x000C0000L
+//SQC_EDC_CNT3
+#define SQC_EDC_CNT3__INST_BANKB_TAG_RAM_SEC_COUNT__SHIFT 0x0
+#define SQC_EDC_CNT3__INST_BANKB_TAG_RAM_DED_COUNT__SHIFT 0x2
+#define SQC_EDC_CNT3__INST_BANKB_BANK_RAM_SEC_COUNT__SHIFT 0x4
+#define SQC_EDC_CNT3__INST_BANKB_BANK_RAM_DED_COUNT__SHIFT 0x6
+#define SQC_EDC_CNT3__DATA_BANKB_TAG_RAM_SEC_COUNT__SHIFT 0x8
+#define SQC_EDC_CNT3__DATA_BANKB_TAG_RAM_DED_COUNT__SHIFT 0xa
+#define SQC_EDC_CNT3__DATA_BANKB_BANK_RAM_SEC_COUNT__SHIFT 0xc
+#define SQC_EDC_CNT3__DATA_BANKB_BANK_RAM_DED_COUNT__SHIFT 0xe
+#define SQC_EDC_CNT3__INST_BANKB_TAG_RAM_SEC_COUNT_MASK 0x00000003L
+#define SQC_EDC_CNT3__INST_BANKB_TAG_RAM_DED_COUNT_MASK 0x0000000CL
+#define SQC_EDC_CNT3__INST_BANKB_BANK_RAM_SEC_COUNT_MASK 0x00000030L
+#define SQC_EDC_CNT3__INST_BANKB_BANK_RAM_DED_COUNT_MASK 0x000000C0L
+#define SQC_EDC_CNT3__DATA_BANKB_TAG_RAM_SEC_COUNT_MASK 0x00000300L
+#define SQC_EDC_CNT3__DATA_BANKB_TAG_RAM_DED_COUNT_MASK 0x00000C00L
+#define SQC_EDC_CNT3__DATA_BANKB_BANK_RAM_SEC_COUNT_MASK 0x00003000L
+#define SQC_EDC_CNT3__DATA_BANKB_BANK_RAM_DED_COUNT_MASK 0x0000C000L
+//SQC_EDC_PARITY_CNT3
+#define SQC_EDC_PARITY_CNT3__INST_BANKA_UTCL1_MISS_FIFO_SEC_COUNT__SHIFT 0x0
+#define SQC_EDC_PARITY_CNT3__INST_BANKA_UTCL1_MISS_FIFO_DED_COUNT__SHIFT 0x2
+#define SQC_EDC_PARITY_CNT3__INST_BANKA_MISS_FIFO_SEC_COUNT__SHIFT 0x4
+#define SQC_EDC_PARITY_CNT3__INST_BANKA_MISS_FIFO_DED_COUNT__SHIFT 0x6
+#define SQC_EDC_PARITY_CNT3__DATA_BANKA_HIT_FIFO_SEC_COUNT__SHIFT 0x8
+#define SQC_EDC_PARITY_CNT3__DATA_BANKA_HIT_FIFO_DED_COUNT__SHIFT 0xa
+#define SQC_EDC_PARITY_CNT3__DATA_BANKA_MISS_FIFO_SEC_COUNT__SHIFT 0xc
+#define SQC_EDC_PARITY_CNT3__DATA_BANKA_MISS_FIFO_DED_COUNT__SHIFT 0xe
+#define SQC_EDC_PARITY_CNT3__INST_BANKB_UTCL1_MISS_FIFO_SEC_COUNT__SHIFT 0x10
+#define SQC_EDC_PARITY_CNT3__INST_BANKB_UTCL1_MISS_FIFO_DED_COUNT__SHIFT 0x12
+#define SQC_EDC_PARITY_CNT3__INST_BANKB_MISS_FIFO_SEC_COUNT__SHIFT 0x14
+#define SQC_EDC_PARITY_CNT3__INST_BANKB_MISS_FIFO_DED_COUNT__SHIFT 0x16
+#define SQC_EDC_PARITY_CNT3__DATA_BANKB_HIT_FIFO_SEC_COUNT__SHIFT 0x18
+#define SQC_EDC_PARITY_CNT3__DATA_BANKB_HIT_FIFO_DED_COUNT__SHIFT 0x1a
+#define SQC_EDC_PARITY_CNT3__DATA_BANKB_MISS_FIFO_SEC_COUNT__SHIFT 0x1c
+#define SQC_EDC_PARITY_CNT3__DATA_BANKB_MISS_FIFO_DED_COUNT__SHIFT 0x1e
+#define SQC_EDC_PARITY_CNT3__INST_BANKA_UTCL1_MISS_FIFO_SEC_COUNT_MASK 0x00000003L
+#define SQC_EDC_PARITY_CNT3__INST_BANKA_UTCL1_MISS_FIFO_DED_COUNT_MASK 0x0000000CL
+#define SQC_EDC_PARITY_CNT3__INST_BANKA_MISS_FIFO_SEC_COUNT_MASK 0x00000030L
+#define SQC_EDC_PARITY_CNT3__INST_BANKA_MISS_FIFO_DED_COUNT_MASK 0x000000C0L
+#define SQC_EDC_PARITY_CNT3__DATA_BANKA_HIT_FIFO_SEC_COUNT_MASK 0x00000300L
+#define SQC_EDC_PARITY_CNT3__DATA_BANKA_HIT_FIFO_DED_COUNT_MASK 0x00000C00L
+#define SQC_EDC_PARITY_CNT3__DATA_BANKA_MISS_FIFO_SEC_COUNT_MASK 0x00003000L
+#define SQC_EDC_PARITY_CNT3__DATA_BANKA_MISS_FIFO_DED_COUNT_MASK 0x0000C000L
+#define SQC_EDC_PARITY_CNT3__INST_BANKB_UTCL1_MISS_FIFO_SEC_COUNT_MASK 0x00030000L
+#define SQC_EDC_PARITY_CNT3__INST_BANKB_UTCL1_MISS_FIFO_DED_COUNT_MASK 0x000C0000L
+#define SQC_EDC_PARITY_CNT3__INST_BANKB_MISS_FIFO_SEC_COUNT_MASK 0x00300000L
+#define SQC_EDC_PARITY_CNT3__INST_BANKB_MISS_FIFO_DED_COUNT_MASK 0x00C00000L
+#define SQC_EDC_PARITY_CNT3__DATA_BANKB_HIT_FIFO_SEC_COUNT_MASK 0x03000000L
+#define SQC_EDC_PARITY_CNT3__DATA_BANKB_HIT_FIFO_DED_COUNT_MASK 0x0C000000L
+#define SQC_EDC_PARITY_CNT3__DATA_BANKB_MISS_FIFO_SEC_COUNT_MASK 0x30000000L
+#define SQC_EDC_PARITY_CNT3__DATA_BANKB_MISS_FIFO_DED_COUNT_MASK 0xC0000000L
+//SQC_EDC_CNT
+#define SQC_EDC_CNT__DATA_CU0_WRITE_DATA_BUF_SEC_COUNT__SHIFT 0x0
+#define SQC_EDC_CNT__DATA_CU0_WRITE_DATA_BUF_DED_COUNT__SHIFT 0x2
+#define SQC_EDC_CNT__DATA_CU0_UTCL1_LFIFO_SEC_COUNT__SHIFT 0x4
+#define SQC_EDC_CNT__DATA_CU0_UTCL1_LFIFO_DED_COUNT__SHIFT 0x6
+#define SQC_EDC_CNT__DATA_CU1_WRITE_DATA_BUF_SEC_COUNT__SHIFT 0x8
+#define SQC_EDC_CNT__DATA_CU1_WRITE_DATA_BUF_DED_COUNT__SHIFT 0xa
+#define SQC_EDC_CNT__DATA_CU1_UTCL1_LFIFO_SEC_COUNT__SHIFT 0xc
+#define SQC_EDC_CNT__DATA_CU1_UTCL1_LFIFO_DED_COUNT__SHIFT 0xe
+#define SQC_EDC_CNT__DATA_CU2_WRITE_DATA_BUF_SEC_COUNT__SHIFT 0x10
+#define SQC_EDC_CNT__DATA_CU2_WRITE_DATA_BUF_DED_COUNT__SHIFT 0x12
+#define SQC_EDC_CNT__DATA_CU2_UTCL1_LFIFO_SEC_COUNT__SHIFT 0x14
+#define SQC_EDC_CNT__DATA_CU2_UTCL1_LFIFO_DED_COUNT__SHIFT 0x16
+#define SQC_EDC_CNT__DATA_CU3_WRITE_DATA_BUF_SEC_COUNT__SHIFT 0x18
+#define SQC_EDC_CNT__DATA_CU3_WRITE_DATA_BUF_DED_COUNT__SHIFT 0x1a
+#define SQC_EDC_CNT__DATA_CU3_UTCL1_LFIFO_SEC_COUNT__SHIFT 0x1c
+#define SQC_EDC_CNT__DATA_CU3_UTCL1_LFIFO_DED_COUNT__SHIFT 0x1e
+#define SQC_EDC_CNT__DATA_CU0_WRITE_DATA_BUF_SEC_COUNT_MASK 0x00000003L
+#define SQC_EDC_CNT__DATA_CU0_WRITE_DATA_BUF_DED_COUNT_MASK 0x0000000CL
+#define SQC_EDC_CNT__DATA_CU0_UTCL1_LFIFO_SEC_COUNT_MASK 0x00000030L
+#define SQC_EDC_CNT__DATA_CU0_UTCL1_LFIFO_DED_COUNT_MASK 0x000000C0L
+#define SQC_EDC_CNT__DATA_CU1_WRITE_DATA_BUF_SEC_COUNT_MASK 0x00000300L
+#define SQC_EDC_CNT__DATA_CU1_WRITE_DATA_BUF_DED_COUNT_MASK 0x00000C00L
+#define SQC_EDC_CNT__DATA_CU1_UTCL1_LFIFO_SEC_COUNT_MASK 0x00003000L
+#define SQC_EDC_CNT__DATA_CU1_UTCL1_LFIFO_DED_COUNT_MASK 0x0000C000L
+#define SQC_EDC_CNT__DATA_CU2_WRITE_DATA_BUF_SEC_COUNT_MASK 0x00030000L
+#define SQC_EDC_CNT__DATA_CU2_WRITE_DATA_BUF_DED_COUNT_MASK 0x000C0000L
+#define SQC_EDC_CNT__DATA_CU2_UTCL1_LFIFO_SEC_COUNT_MASK 0x00300000L
+#define SQC_EDC_CNT__DATA_CU2_UTCL1_LFIFO_DED_COUNT_MASK 0x00C00000L
+#define SQC_EDC_CNT__DATA_CU3_WRITE_DATA_BUF_SEC_COUNT_MASK 0x03000000L
+#define SQC_EDC_CNT__DATA_CU3_WRITE_DATA_BUF_DED_COUNT_MASK 0x0C000000L
+#define SQC_EDC_CNT__DATA_CU3_UTCL1_LFIFO_SEC_COUNT_MASK 0x30000000L
+#define SQC_EDC_CNT__DATA_CU3_UTCL1_LFIFO_DED_COUNT_MASK 0xC0000000L
+//SQ_EDC_SEC_CNT
+#define SQ_EDC_SEC_CNT__LDS_SEC__SHIFT 0x0
+#define SQ_EDC_SEC_CNT__SGPR_SEC__SHIFT 0x8
+#define SQ_EDC_SEC_CNT__VGPR_SEC__SHIFT 0x10
+#define SQ_EDC_SEC_CNT__LDS_SEC_MASK 0x000000FFL
+#define SQ_EDC_SEC_CNT__SGPR_SEC_MASK 0x0000FF00L
+#define SQ_EDC_SEC_CNT__VGPR_SEC_MASK 0x00FF0000L
+//SQ_EDC_DED_CNT
+#define SQ_EDC_DED_CNT__LDS_DED__SHIFT 0x0
+#define SQ_EDC_DED_CNT__SGPR_DED__SHIFT 0x8
+#define SQ_EDC_DED_CNT__VGPR_DED__SHIFT 0x10
+#define SQ_EDC_DED_CNT__LDS_DED_MASK 0x000000FFL
+#define SQ_EDC_DED_CNT__SGPR_DED_MASK 0x0000FF00L
+#define SQ_EDC_DED_CNT__VGPR_DED_MASK 0x00FF0000L
+//SQ_EDC_INFO
+#define SQ_EDC_INFO__WAVE_ID__SHIFT 0x0
+#define SQ_EDC_INFO__SIMD_ID__SHIFT 0x4
+#define SQ_EDC_INFO__SOURCE__SHIFT 0x6
+#define SQ_EDC_INFO__VM_ID__SHIFT 0x9
+#define SQ_EDC_INFO__WAVE_ID_MASK 0x0000000FL
+#define SQ_EDC_INFO__SIMD_ID_MASK 0x00000030L
+#define SQ_EDC_INFO__SOURCE_MASK 0x000001C0L
+#define SQ_EDC_INFO__VM_ID_MASK 0x00001E00L
+//SQ_EDC_CNT
+#define SQ_EDC_CNT__LDS_D_SEC_COUNT__SHIFT 0x0
+#define SQ_EDC_CNT__LDS_D_DED_COUNT__SHIFT 0x2
+#define SQ_EDC_CNT__LDS_I_SEC_COUNT__SHIFT 0x4
+#define SQ_EDC_CNT__LDS_I_DED_COUNT__SHIFT 0x6
+#define SQ_EDC_CNT__SGPR_SEC_COUNT__SHIFT 0x8
+#define SQ_EDC_CNT__SGPR_DED_COUNT__SHIFT 0xa
+#define SQ_EDC_CNT__VGPR0_SEC_COUNT__SHIFT 0xc
+#define SQ_EDC_CNT__VGPR0_DED_COUNT__SHIFT 0xe
+#define SQ_EDC_CNT__VGPR1_SEC_COUNT__SHIFT 0x10
+#define SQ_EDC_CNT__VGPR1_DED_COUNT__SHIFT 0x12
+#define SQ_EDC_CNT__VGPR2_SEC_COUNT__SHIFT 0x14
+#define SQ_EDC_CNT__VGPR2_DED_COUNT__SHIFT 0x16
+#define SQ_EDC_CNT__VGPR3_SEC_COUNT__SHIFT 0x18
+#define SQ_EDC_CNT__VGPR3_DED_COUNT__SHIFT 0x1a
+#define SQ_EDC_CNT__LDS_D_SEC_COUNT_MASK 0x00000003L
+#define SQ_EDC_CNT__LDS_D_DED_COUNT_MASK 0x0000000CL
+#define SQ_EDC_CNT__LDS_I_SEC_COUNT_MASK 0x00000030L
+#define SQ_EDC_CNT__LDS_I_DED_COUNT_MASK 0x000000C0L
+#define SQ_EDC_CNT__SGPR_SEC_COUNT_MASK 0x00000300L
+#define SQ_EDC_CNT__SGPR_DED_COUNT_MASK 0x00000C00L
+#define SQ_EDC_CNT__VGPR0_SEC_COUNT_MASK 0x00003000L
+#define SQ_EDC_CNT__VGPR0_DED_COUNT_MASK 0x0000C000L
+#define SQ_EDC_CNT__VGPR1_SEC_COUNT_MASK 0x00030000L
+#define SQ_EDC_CNT__VGPR1_DED_COUNT_MASK 0x000C0000L
+#define SQ_EDC_CNT__VGPR2_SEC_COUNT_MASK 0x00300000L
+#define SQ_EDC_CNT__VGPR2_DED_COUNT_MASK 0x00C00000L
+#define SQ_EDC_CNT__VGPR3_SEC_COUNT_MASK 0x03000000L
+#define SQ_EDC_CNT__VGPR3_DED_COUNT_MASK 0x0C000000L
+
+// addressBlock: gc_tpdec
+//TA_EDC_CNT
+#define TA_EDC_CNT__TA_FS_DFIFO_SEC_COUNT__SHIFT 0x0
+#define TA_EDC_CNT__TA_FS_DFIFO_DED_COUNT__SHIFT 0x2
+#define TA_EDC_CNT__TA_FS_AFIFO_SEC_COUNT__SHIFT 0x4
+#define TA_EDC_CNT__TA_FS_AFIFO_DED_COUNT__SHIFT 0x6
+#define TA_EDC_CNT__TA_FL_LFIFO_SEC_COUNT__SHIFT 0x8
+#define TA_EDC_CNT__TA_FL_LFIFO_DED_COUNT__SHIFT 0xa
+#define TA_EDC_CNT__TA_FX_LFIFO_SEC_COUNT__SHIFT 0xc
+#define TA_EDC_CNT__TA_FX_LFIFO_DED_COUNT__SHIFT 0xe
+#define TA_EDC_CNT__TA_FS_CFIFO_SEC_COUNT__SHIFT 0x10
+#define TA_EDC_CNT__TA_FS_CFIFO_DED_COUNT__SHIFT 0x12
+#define TA_EDC_CNT__TA_FS_DFIFO_SEC_COUNT_MASK 0x00000003L
+#define TA_EDC_CNT__TA_FS_DFIFO_DED_COUNT_MASK 0x0000000CL
+#define TA_EDC_CNT__TA_FS_AFIFO_SEC_COUNT_MASK 0x00000030L
+#define TA_EDC_CNT__TA_FS_AFIFO_DED_COUNT_MASK 0x000000C0L
+#define TA_EDC_CNT__TA_FL_LFIFO_SEC_COUNT_MASK 0x00000300L
+#define TA_EDC_CNT__TA_FL_LFIFO_DED_COUNT_MASK 0x00000C00L
+#define TA_EDC_CNT__TA_FX_LFIFO_SEC_COUNT_MASK 0x00003000L
+#define TA_EDC_CNT__TA_FX_LFIFO_DED_COUNT_MASK 0x0000C000L
+#define TA_EDC_CNT__TA_FS_CFIFO_SEC_COUNT_MASK 0x00030000L
+#define TA_EDC_CNT__TA_FS_CFIFO_DED_COUNT_MASK 0x000C0000L
+
+// addressBlock: gc_tcdec
+//TCP_EDC_CNT
+#define TCP_EDC_CNT__SEC_COUNT__SHIFT 0x0
+#define TCP_EDC_CNT__LFIFO_SED_COUNT__SHIFT 0x8
+#define TCP_EDC_CNT__DED_COUNT__SHIFT 0x10
+#define TCP_EDC_CNT__SEC_COUNT_MASK 0x000000FFL
+#define TCP_EDC_CNT__LFIFO_SED_COUNT_MASK 0x0000FF00L
+#define TCP_EDC_CNT__DED_COUNT_MASK 0x00FF0000L
+//TCP_EDC_CNT_NEW
+#define TCP_EDC_CNT_NEW__CACHE_RAM_SEC_COUNT__SHIFT 0x0
+#define TCP_EDC_CNT_NEW__CACHE_RAM_DED_COUNT__SHIFT 0x2
+#define TCP_EDC_CNT_NEW__LFIFO_RAM_SEC_COUNT__SHIFT 0x4
+#define TCP_EDC_CNT_NEW__LFIFO_RAM_DED_COUNT__SHIFT 0x6
+#define TCP_EDC_CNT_NEW__CMD_FIFO_SEC_COUNT__SHIFT 0x8
+#define TCP_EDC_CNT_NEW__CMD_FIFO_DED_COUNT__SHIFT 0xa
+#define TCP_EDC_CNT_NEW__VM_FIFO_SEC_COUNT__SHIFT 0xc
+#define TCP_EDC_CNT_NEW__VM_FIFO_DED_COUNT__SHIFT 0xe
+#define TCP_EDC_CNT_NEW__DB_RAM_SED_COUNT__SHIFT 0x10
+#define TCP_EDC_CNT_NEW__UTCL1_LFIFO0_SEC_COUNT__SHIFT 0x12
+#define TCP_EDC_CNT_NEW__UTCL1_LFIFO0_DED_COUNT__SHIFT 0x14
+#define TCP_EDC_CNT_NEW__UTCL1_LFIFO1_SEC_COUNT__SHIFT 0x16
+#define TCP_EDC_CNT_NEW__UTCL1_LFIFO1_DED_COUNT__SHIFT 0x18
+#define TCP_EDC_CNT_NEW__CACHE_RAM_SEC_COUNT_MASK 0x00000003L
+#define TCP_EDC_CNT_NEW__CACHE_RAM_DED_COUNT_MASK 0x0000000CL
+#define TCP_EDC_CNT_NEW__LFIFO_RAM_SEC_COUNT_MASK 0x00000030L
+#define TCP_EDC_CNT_NEW__LFIFO_RAM_DED_COUNT_MASK 0x000000C0L
+#define TCP_EDC_CNT_NEW__CMD_FIFO_SEC_COUNT_MASK 0x00000300L
+#define TCP_EDC_CNT_NEW__CMD_FIFO_DED_COUNT_MASK 0x00000C00L
+#define TCP_EDC_CNT_NEW__VM_FIFO_SEC_COUNT_MASK 0x00003000L
+#define TCP_EDC_CNT_NEW__VM_FIFO_DED_COUNT_MASK 0x0000C000L
+#define TCP_EDC_CNT_NEW__DB_RAM_SED_COUNT_MASK 0x00030000L
+#define TCP_EDC_CNT_NEW__UTCL1_LFIFO0_SEC_COUNT_MASK 0x000C0000L
+#define TCP_EDC_CNT_NEW__UTCL1_LFIFO0_DED_COUNT_MASK 0x00300000L
+#define TCP_EDC_CNT_NEW__UTCL1_LFIFO1_SEC_COUNT_MASK 0x00C00000L
+#define TCP_EDC_CNT_NEW__UTCL1_LFIFO1_DED_COUNT_MASK 0x03000000L
+//TCP_ATC_EDC_GATCL1_CNT
+#define TCP_ATC_EDC_GATCL1_CNT__DATA_SEC__SHIFT 0x0
+#define TCP_ATC_EDC_GATCL1_CNT__DATA_SEC_MASK 0x000000FFL
+//TCI_EDC_CNT
+#define TCI_EDC_CNT__WRITE_RAM_SEC_COUNT__SHIFT 0x0
+#define TCI_EDC_CNT__WRITE_RAM_DED_COUNT__SHIFT 0x2
+#define TCI_EDC_CNT__WRITE_RAM_SEC_COUNT_MASK 0x00000003L
+#define TCI_EDC_CNT__WRITE_RAM_DED_COUNT_MASK 0x0000000CL
+//TCA_EDC_CNT
+#define TCA_EDC_CNT__HOLE_FIFO_SEC_COUNT__SHIFT 0x0
+#define TCA_EDC_CNT__HOLE_FIFO_DED_COUNT__SHIFT 0x2
+#define TCA_EDC_CNT__REQ_FIFO_SEC_COUNT__SHIFT 0x4
+#define TCA_EDC_CNT__REQ_FIFO_DED_COUNT__SHIFT 0x6
+#define TCA_EDC_CNT__HOLE_FIFO_SEC_COUNT_MASK 0x00000003L
+#define TCA_EDC_CNT__HOLE_FIFO_DED_COUNT_MASK 0x0000000CL
+#define TCA_EDC_CNT__REQ_FIFO_SEC_COUNT_MASK 0x00000030L
+#define TCA_EDC_CNT__REQ_FIFO_DED_COUNT_MASK 0x000000C0L
+//TCC_EDC_CNT
+#define TCC_EDC_CNT__CACHE_DATA_SEC_COUNT__SHIFT 0x0
+#define TCC_EDC_CNT__CACHE_DATA_DED_COUNT__SHIFT 0x2
+#define TCC_EDC_CNT__CACHE_DIRTY_SEC_COUNT__SHIFT 0x4
+#define TCC_EDC_CNT__CACHE_DIRTY_DED_COUNT__SHIFT 0x6
+#define TCC_EDC_CNT__HIGH_RATE_TAG_SEC_COUNT__SHIFT 0x8
+#define TCC_EDC_CNT__HIGH_RATE_TAG_DED_COUNT__SHIFT 0xa
+#define TCC_EDC_CNT__LOW_RATE_TAG_SEC_COUNT__SHIFT 0xc
+#define TCC_EDC_CNT__LOW_RATE_TAG_DED_COUNT__SHIFT 0xe
+#define TCC_EDC_CNT__SRC_FIFO_SEC_COUNT__SHIFT 0x10
+#define TCC_EDC_CNT__SRC_FIFO_DED_COUNT__SHIFT 0x12
+#define TCC_EDC_CNT__LATENCY_FIFO_SEC_COUNT__SHIFT 0x14
+#define TCC_EDC_CNT__LATENCY_FIFO_DED_COUNT__SHIFT 0x16
+#define TCC_EDC_CNT__LATENCY_FIFO_NEXT_RAM_SEC_COUNT__SHIFT 0x18
+#define TCC_EDC_CNT__LATENCY_FIFO_NEXT_RAM_DED_COUNT__SHIFT 0x1a
+#define TCC_EDC_CNT__CACHE_DATA_SEC_COUNT_MASK 0x00000003L
+#define TCC_EDC_CNT__CACHE_DATA_DED_COUNT_MASK 0x0000000CL
+#define TCC_EDC_CNT__CACHE_DIRTY_SEC_COUNT_MASK 0x00000030L
+#define TCC_EDC_CNT__CACHE_DIRTY_DED_COUNT_MASK 0x000000C0L
+#define TCC_EDC_CNT__HIGH_RATE_TAG_SEC_COUNT_MASK 0x00000300L
+#define TCC_EDC_CNT__HIGH_RATE_TAG_DED_COUNT_MASK 0x00000C00L
+#define TCC_EDC_CNT__LOW_RATE_TAG_SEC_COUNT_MASK 0x00003000L
+#define TCC_EDC_CNT__LOW_RATE_TAG_DED_COUNT_MASK 0x0000C000L
+#define TCC_EDC_CNT__SRC_FIFO_SEC_COUNT_MASK 0x00030000L
+#define TCC_EDC_CNT__SRC_FIFO_DED_COUNT_MASK 0x000C0000L
+#define TCC_EDC_CNT__LATENCY_FIFO_SEC_COUNT_MASK 0x00300000L
+#define TCC_EDC_CNT__LATENCY_FIFO_DED_COUNT_MASK 0x00C00000L
+#define TCC_EDC_CNT__LATENCY_FIFO_NEXT_RAM_SEC_COUNT_MASK 0x03000000L
+#define TCC_EDC_CNT__LATENCY_FIFO_NEXT_RAM_DED_COUNT_MASK 0x0C000000L
+//TCC_EDC_CNT2
+#define TCC_EDC_CNT2__CACHE_TAG_PROBE_FIFO_SEC_COUNT__SHIFT 0x0
+#define TCC_EDC_CNT2__CACHE_TAG_PROBE_FIFO_DED_COUNT__SHIFT 0x2
+#define TCC_EDC_CNT2__UC_ATOMIC_FIFO_SEC_COUNT__SHIFT 0x4
+#define TCC_EDC_CNT2__UC_ATOMIC_FIFO_DED_COUNT__SHIFT 0x6
+#define TCC_EDC_CNT2__WRITE_CACHE_READ_SEC_COUNT__SHIFT 0x8
+#define TCC_EDC_CNT2__WRITE_CACHE_READ_DED_COUNT__SHIFT 0xa
+#define TCC_EDC_CNT2__RETURN_CONTROL_SEC_COUNT__SHIFT 0xc
+#define TCC_EDC_CNT2__RETURN_CONTROL_DED_COUNT__SHIFT 0xe
+#define TCC_EDC_CNT2__IN_USE_TRANSFER_SEC_COUNT__SHIFT 0x10
+#define TCC_EDC_CNT2__IN_USE_TRANSFER_DED_COUNT__SHIFT 0x12
+#define TCC_EDC_CNT2__IN_USE_DEC_SEC_COUNT__SHIFT 0x14
+#define TCC_EDC_CNT2__IN_USE_DEC_DED_COUNT__SHIFT 0x16
+#define TCC_EDC_CNT2__WRITE_RETURN_SEC_COUNT__SHIFT 0x18
+#define TCC_EDC_CNT2__WRITE_RETURN_DED_COUNT__SHIFT 0x1a
+#define TCC_EDC_CNT2__RETURN_DATA_SEC_COUNT__SHIFT 0x1c
+#define TCC_EDC_CNT2__RETURN_DATA_DED_COUNT__SHIFT 0x1e
+#define TCC_EDC_CNT2__CACHE_TAG_PROBE_FIFO_SEC_COUNT_MASK 0x00000003L
+#define TCC_EDC_CNT2__CACHE_TAG_PROBE_FIFO_DED_COUNT_MASK 0x0000000CL
+#define TCC_EDC_CNT2__UC_ATOMIC_FIFO_SEC_COUNT_MASK 0x00000030L
+#define TCC_EDC_CNT2__UC_ATOMIC_FIFO_DED_COUNT_MASK 0x000000C0L
+#define TCC_EDC_CNT2__WRITE_CACHE_READ_SEC_COUNT_MASK 0x00000300L
+#define TCC_EDC_CNT2__WRITE_CACHE_READ_DED_COUNT_MASK 0x00000C00L
+#define TCC_EDC_CNT2__RETURN_CONTROL_SEC_COUNT_MASK 0x00003000L
+#define TCC_EDC_CNT2__RETURN_CONTROL_DED_COUNT_MASK 0x0000C000L
+#define TCC_EDC_CNT2__IN_USE_TRANSFER_SEC_COUNT_MASK 0x00030000L
+#define TCC_EDC_CNT2__IN_USE_TRANSFER_DED_COUNT_MASK 0x000C0000L
+#define TCC_EDC_CNT2__IN_USE_DEC_SEC_COUNT_MASK 0x00300000L
+#define TCC_EDC_CNT2__IN_USE_DEC_DED_COUNT_MASK 0x00C00000L
+#define TCC_EDC_CNT2__WRITE_RETURN_SEC_COUNT_MASK 0x03000000L
+#define TCC_EDC_CNT2__WRITE_RETURN_DED_COUNT_MASK 0x0C000000L
+#define TCC_EDC_CNT2__RETURN_DATA_SEC_COUNT_MASK 0x30000000L
+#define TCC_EDC_CNT2__RETURN_DATA_DED_COUNT_MASK 0xC0000000L
+
+// addressBlock: gc_tpdec
+//TD_EDC_CNT
+#define TD_EDC_CNT__SS_FIFO_LO_SEC_COUNT__SHIFT 0x0
+#define TD_EDC_CNT__SS_FIFO_LO_DED_COUNT__SHIFT 0x2
+#define TD_EDC_CNT__SS_FIFO_HI_SEC_COUNT__SHIFT 0x4
+#define TD_EDC_CNT__SS_FIFO_HI_DED_COUNT__SHIFT 0x6
+#define TD_EDC_CNT__CS_FIFO_SEC_COUNT__SHIFT 0x8
+#define TD_EDC_CNT__CS_FIFO_DED_COUNT__SHIFT 0xa
+#define TD_EDC_CNT__SS_FIFO_LO_SEC_COUNT_MASK 0x00000003L
+#define TD_EDC_CNT__SS_FIFO_LO_DED_COUNT_MASK 0x0000000CL
+#define TD_EDC_CNT__SS_FIFO_HI_SEC_COUNT_MASK 0x00000030L
+#define TD_EDC_CNT__SS_FIFO_HI_DED_COUNT_MASK 0x000000C0L
+#define TD_EDC_CNT__CS_FIFO_SEC_COUNT_MASK 0x00000300L
+#define TD_EDC_CNT__CS_FIFO_DED_COUNT_MASK 0x00000C00L
+//TA_EDC_CNT
+#define TA_EDC_CNT__TA_FS_DFIFO_SEC_COUNT__SHIFT 0x0
+#define TA_EDC_CNT__TA_FS_DFIFO_DED_COUNT__SHIFT 0x2
+#define TA_EDC_CNT__TA_FS_AFIFO_SEC_COUNT__SHIFT 0x4
+#define TA_EDC_CNT__TA_FS_AFIFO_DED_COUNT__SHIFT 0x6
+#define TA_EDC_CNT__TA_FL_LFIFO_SEC_COUNT__SHIFT 0x8
+#define TA_EDC_CNT__TA_FL_LFIFO_DED_COUNT__SHIFT 0xa
+#define TA_EDC_CNT__TA_FX_LFIFO_SEC_COUNT__SHIFT 0xc
+#define TA_EDC_CNT__TA_FX_LFIFO_DED_COUNT__SHIFT 0xe
+#define TA_EDC_CNT__TA_FS_CFIFO_SEC_COUNT__SHIFT 0x10
+#define TA_EDC_CNT__TA_FS_CFIFO_DED_COUNT__SHIFT 0x12
+#define TA_EDC_CNT__TA_FS_DFIFO_SEC_COUNT_MASK 0x00000003L
+#define TA_EDC_CNT__TA_FS_DFIFO_DED_COUNT_MASK 0x0000000CL
+#define TA_EDC_CNT__TA_FS_AFIFO_SEC_COUNT_MASK 0x00000030L
+#define TA_EDC_CNT__TA_FS_AFIFO_DED_COUNT_MASK 0x000000C0L
+#define TA_EDC_CNT__TA_FL_LFIFO_SEC_COUNT_MASK 0x00000300L
+#define TA_EDC_CNT__TA_FL_LFIFO_DED_COUNT_MASK 0x00000C00L
+#define TA_EDC_CNT__TA_FX_LFIFO_SEC_COUNT_MASK 0x00003000L
+#define TA_EDC_CNT__TA_FX_LFIFO_DED_COUNT_MASK 0x0000C000L
+#define TA_EDC_CNT__TA_FS_CFIFO_SEC_COUNT_MASK 0x00030000L
+#define TA_EDC_CNT__TA_FS_CFIFO_DED_COUNT_MASK 0x000C0000L
+
+// addressBlock: gc_ea_gceadec2
+//GCEA_EDC_CNT
+#define GCEA_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT__SHIFT 0x0
+#define GCEA_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT__SHIFT 0x2
+#define GCEA_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT__SHIFT 0x4
+#define GCEA_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT__SHIFT 0x6
+#define GCEA_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT__SHIFT 0x8
+#define GCEA_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT__SHIFT 0xa
+#define GCEA_EDC_CNT__RRET_TAGMEM_SEC_COUNT__SHIFT 0xc
+#define GCEA_EDC_CNT__RRET_TAGMEM_DED_COUNT__SHIFT 0xe
+#define GCEA_EDC_CNT__WRET_TAGMEM_SEC_COUNT__SHIFT 0x10
+#define GCEA_EDC_CNT__WRET_TAGMEM_DED_COUNT__SHIFT 0x12
+#define GCEA_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT__SHIFT 0x14
+#define GCEA_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT__SHIFT 0x16
+#define GCEA_EDC_CNT__IORD_CMDMEM_SED_COUNT__SHIFT 0x18
+#define GCEA_EDC_CNT__IOWR_CMDMEM_SED_COUNT__SHIFT 0x1a
+#define GCEA_EDC_CNT__IOWR_DATAMEM_SED_COUNT__SHIFT 0x1c
+#define GCEA_EDC_CNT__MAM_AFMEM_SEC_COUNT__SHIFT 0x1e
+#define GCEA_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT_MASK 0x00000003L
+#define GCEA_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT_MASK 0x0000000CL
+#define GCEA_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT_MASK 0x00000030L
+#define GCEA_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
+#define GCEA_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT_MASK 0x00000300L
+#define GCEA_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT_MASK 0x00000C00L
+#define GCEA_EDC_CNT__RRET_TAGMEM_SEC_COUNT_MASK 0x00003000L
+#define GCEA_EDC_CNT__RRET_TAGMEM_DED_COUNT_MASK 0x0000C000L
+#define GCEA_EDC_CNT__WRET_TAGMEM_SEC_COUNT_MASK 0x00030000L
+#define GCEA_EDC_CNT__WRET_TAGMEM_DED_COUNT_MASK 0x000C0000L
+#define GCEA_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT_MASK 0x00300000L
+#define GCEA_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT_MASK 0x00C00000L
+#define GCEA_EDC_CNT__IORD_CMDMEM_SED_COUNT_MASK 0x03000000L
+#define GCEA_EDC_CNT__IOWR_CMDMEM_SED_COUNT_MASK 0x0C000000L
+#define GCEA_EDC_CNT__IOWR_DATAMEM_SED_COUNT_MASK 0x30000000L
+#define GCEA_EDC_CNT__MAM_AFMEM_SEC_COUNT_MASK 0xC0000000L
+//GCEA_EDC_CNT2
+#define GCEA_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT__SHIFT 0x0
+#define GCEA_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT__SHIFT 0x2
+#define GCEA_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT__SHIFT 0x4
+#define GCEA_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT__SHIFT 0x6
+#define GCEA_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT__SHIFT 0x8
+#define GCEA_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa
+#define GCEA_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc
+#define GCEA_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe
+#define GCEA_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10
+#define GCEA_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12
+#define GCEA_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14
+#define GCEA_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16
+#define GCEA_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT 0x18
+#define GCEA_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT 0x1a
+#define GCEA_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT 0x1c
+#define GCEA_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT 0x1e
+#define GCEA_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L
+#define GCEA_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL
+#define GCEA_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L
+#define GCEA_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
+#define GCEA_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT_MASK 0x00000300L
+#define GCEA_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L
+#define GCEA_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L
+#define GCEA_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L
+#define GCEA_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L
+#define GCEA_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L
+#define GCEA_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L
+#define GCEA_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L
+#define GCEA_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK 0x03000000L
+#define GCEA_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK 0x0C000000L
+#define GCEA_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK 0x30000000L
+#define GCEA_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK 0xC0000000L
+//GCEA_EDC_CNT3
+#define GCEA_EDC_CNT3__DRAMRD_PAGEMEM_DED_COUNT__SHIFT 0x0
+#define GCEA_EDC_CNT3__DRAMWR_PAGEMEM_DED_COUNT__SHIFT 0x2
+#define GCEA_EDC_CNT3__IORD_CMDMEM_DED_COUNT__SHIFT 0x4
+#define GCEA_EDC_CNT3__IOWR_CMDMEM_DED_COUNT__SHIFT 0x6
+#define GCEA_EDC_CNT3__IOWR_DATAMEM_DED_COUNT__SHIFT 0x8
+#define GCEA_EDC_CNT3__GMIRD_PAGEMEM_DED_COUNT__SHIFT 0xa
+#define GCEA_EDC_CNT3__GMIWR_PAGEMEM_DED_COUNT__SHIFT 0xc
+#define GCEA_EDC_CNT3__MAM_AFMEM_DED_COUNT__SHIFT 0xe
+#define GCEA_EDC_CNT3__MAM_A0MEM_SEC_COUNT__SHIFT 0x10
+#define GCEA_EDC_CNT3__MAM_A0MEM_DED_COUNT__SHIFT 0x12
+#define GCEA_EDC_CNT3__MAM_A1MEM_SEC_COUNT__SHIFT 0x14
+#define GCEA_EDC_CNT3__MAM_A1MEM_DED_COUNT__SHIFT 0x16
+#define GCEA_EDC_CNT3__MAM_A2MEM_SEC_COUNT__SHIFT 0x18
+#define GCEA_EDC_CNT3__MAM_A2MEM_DED_COUNT__SHIFT 0x1a
+#define GCEA_EDC_CNT3__MAM_A3MEM_SEC_COUNT__SHIFT 0x1c
+#define GCEA_EDC_CNT3__MAM_A3MEM_DED_COUNT__SHIFT 0x1e
+#define GCEA_EDC_CNT3__DRAMRD_PAGEMEM_DED_COUNT_MASK 0x00000003L
+#define GCEA_EDC_CNT3__DRAMWR_PAGEMEM_DED_COUNT_MASK 0x0000000CL
+#define GCEA_EDC_CNT3__IORD_CMDMEM_DED_COUNT_MASK 0x00000030L
+#define GCEA_EDC_CNT3__IOWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
+#define GCEA_EDC_CNT3__IOWR_DATAMEM_DED_COUNT_MASK 0x00000300L
+#define GCEA_EDC_CNT3__GMIRD_PAGEMEM_DED_COUNT_MASK 0x00000C00L
+#define GCEA_EDC_CNT3__GMIWR_PAGEMEM_DED_COUNT_MASK 0x00003000L
+#define GCEA_EDC_CNT3__MAM_AFMEM_DED_COUNT_MASK 0x0000C000L
+#define GCEA_EDC_CNT3__MAM_A0MEM_SEC_COUNT_MASK 0x00030000L
+#define GCEA_EDC_CNT3__MAM_A0MEM_DED_COUNT_MASK 0x000C0000L
+#define GCEA_EDC_CNT3__MAM_A1MEM_SEC_COUNT_MASK 0x00300000L
+#define GCEA_EDC_CNT3__MAM_A1MEM_DED_COUNT_MASK 0x00C00000L
+#define GCEA_EDC_CNT3__MAM_A2MEM_SEC_COUNT_MASK 0x03000000L
+#define GCEA_EDC_CNT3__MAM_A2MEM_DED_COUNT_MASK 0x0C000000L
+#define GCEA_EDC_CNT3__MAM_A3MEM_SEC_COUNT_MASK 0x30000000L
+#define GCEA_EDC_CNT3__MAM_A3MEM_DED_COUNT_MASK 0xC0000000L
+
+// addressBlock: gc_gfxudec
+//GRBM_GFX_INDEX
+#define GRBM_GFX_INDEX__INSTANCE_INDEX__SHIFT 0x0
+#define GRBM_GFX_INDEX__SH_INDEX__SHIFT 0x8
+#define GRBM_GFX_INDEX__SE_INDEX__SHIFT 0x10
+#define GRBM_GFX_INDEX__SH_BROADCAST_WRITES__SHIFT 0x1d
+#define GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES__SHIFT 0x1e
+#define GRBM_GFX_INDEX__SE_BROADCAST_WRITES__SHIFT 0x1f
+#define GRBM_GFX_INDEX__INSTANCE_INDEX_MASK 0x000000FFL
+#define GRBM_GFX_INDEX__SH_INDEX_MASK 0x0000FF00L
+#define GRBM_GFX_INDEX__SE_INDEX_MASK 0x00FF0000L
+#define GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK 0x20000000L
+#define GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK 0x40000000L
+#define GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK 0x80000000L
+
+// addressBlock: gc_utcl2_atcl2dec
+//ATC_L2_CNTL
+//ATC_L2_CACHE_4K_DSM_INDEX
+#define ATC_L2_CACHE_4K_DSM_INDEX__INDEX__SHIFT 0x0
+#define ATC_L2_CACHE_4K_DSM_INDEX__INDEX_MASK 0x000000FFL
+//ATC_L2_CACHE_2M_DSM_INDEX
+#define ATC_L2_CACHE_2M_DSM_INDEX__INDEX__SHIFT 0x0
+#define ATC_L2_CACHE_2M_DSM_INDEX__INDEX_MASK 0x000000FFL
+//ATC_L2_CACHE_4K_DSM_CNTL
+#define ATC_L2_CACHE_4K_DSM_CNTL__SEC_COUNT__SHIFT 0xd
+#define ATC_L2_CACHE_4K_DSM_CNTL__DED_COUNT__SHIFT 0xf
+#define ATC_L2_CACHE_4K_DSM_CNTL__SEC_COUNT_MASK 0x00006000L
+#define ATC_L2_CACHE_4K_DSM_CNTL__DED_COUNT_MASK 0x00018000L
+//ATC_L2_CACHE_2M_DSM_CNTL
+#define ATC_L2_CACHE_2M_DSM_CNTL__SEC_COUNT__SHIFT 0xd
+#define ATC_L2_CACHE_2M_DSM_CNTL__DED_COUNT__SHIFT 0xf
+#define ATC_L2_CACHE_2M_DSM_CNTL__SEC_COUNT_MASK 0x00006000L
+#define ATC_L2_CACHE_2M_DSM_CNTL__DED_COUNT_MASK 0x00018000L
+
+// addressBlock: gc_utcl2_vml2pfdec
+//VML2_MEM_ECC_INDEX
+#define VML2_MEM_ECC_INDEX__INDEX__SHIFT 0x0
+#define VML2_MEM_ECC_INDEX__INDEX_MASK 0x000000FFL
+//VML2_WALKER_MEM_ECC_INDEX
+#define VML2_WALKER_MEM_ECC_INDEX__INDEX__SHIFT 0x0
+#define VML2_WALKER_MEM_ECC_INDEX__INDEX_MASK 0x000000FFL
+//UTCL2_MEM_ECC_INDEX
+#define UTCL2_MEM_ECC_INDEX__INDEX__SHIFT 0x0
+#define UTCL2_MEM_ECC_INDEX__INDEX_MASK 0x000000FFL
+//VML2_MEM_ECC_CNTL
+#define VML2_MEM_ECC_CNTL__SEC_COUNT__SHIFT 0xc
+#define VML2_MEM_ECC_CNTL__DED_COUNT__SHIFT 0xe
+#define VML2_MEM_ECC_CNTL__SEC_COUNT_MASK 0x00003000L
+#define VML2_MEM_ECC_CNTL__DED_COUNT_MASK 0x0000C000L
+//VML2_WALKER_MEM_ECC_CNTL
+#define VML2_WALKER_MEM_ECC_CNTL__SEC_COUNT__SHIFT 0xc
+#define VML2_WALKER_MEM_ECC_CNTL__DED_COUNT__SHIFT 0xe
+#define VML2_WALKER_MEM_ECC_CNTL__SEC_COUNT_MASK 0x00003000L
+#define VML2_WALKER_MEM_ECC_CNTL__DED_COUNT_MASK 0x0000C000L
+//UTCL2_MEM_ECC_CNTL
+#define UTCL2_MEM_ECC_CNTL__SEC_COUNT__SHIFT 0xc
+#define UTCL2_MEM_ECC_CNTL__DED_COUNT__SHIFT 0xe
+#define UTCL2_MEM_ECC_CNTL__SEC_COUNT_MASK 0x00003000L
+#define UTCL2_MEM_ECC_CNTL__DED_COUNT_MASK 0x0000C000L
+
+// addressBlock: gc_rlcpdec
+//RLC_EDC_CNT
+#define RLC_EDC_CNT__RLCG_INSTR_RAM_SEC_COUNT__SHIFT 0x0
+#define RLC_EDC_CNT__RLCG_INSTR_RAM_DED_COUNT__SHIFT 0x2
+#define RLC_EDC_CNT__RLCG_SCRATCH_RAM_SEC_COUNT__SHIFT 0x4
+#define RLC_EDC_CNT__RLCG_SCRATCH_RAM_DED_COUNT__SHIFT 0x6
+#define RLC_EDC_CNT__RLCV_INSTR_RAM_SEC_COUNT__SHIFT 0x8
+#define RLC_EDC_CNT__RLCV_INSTR_RAM_DED_COUNT__SHIFT 0xa
+#define RLC_EDC_CNT__RLCV_SCRATCH_RAM_SEC_COUNT__SHIFT 0xc
+#define RLC_EDC_CNT__RLCV_SCRATCH_RAM_DED_COUNT__SHIFT 0xe
+#define RLC_EDC_CNT__RLC_TCTAG_RAM_SEC_COUNT__SHIFT 0x10
+#define RLC_EDC_CNT__RLC_TCTAG_RAM_DED_COUNT__SHIFT 0x12
+#define RLC_EDC_CNT__RLC_SPM_SCRATCH_RAM_SEC_COUNT__SHIFT 0x14
+#define RLC_EDC_CNT__RLC_SPM_SCRATCH_RAM_DED_COUNT__SHIFT 0x16
+#define RLC_EDC_CNT__RLC_SRM_DATA_RAM_SEC_COUNT__SHIFT 0x18
+#define RLC_EDC_CNT__RLC_SRM_DATA_RAM_DED_COUNT__SHIFT 0x1a
+#define RLC_EDC_CNT__RLC_SRM_ADDR_RAM_SEC_COUNT__SHIFT 0x1c
+#define RLC_EDC_CNT__RLC_SRM_ADDR_RAM_DED_COUNT__SHIFT 0x1e
+#define RLC_EDC_CNT__RLCG_INSTR_RAM_SEC_COUNT_MASK 0x00000003L
+#define RLC_EDC_CNT__RLCG_INSTR_RAM_DED_COUNT_MASK 0x0000000CL
+#define RLC_EDC_CNT__RLCG_SCRATCH_RAM_SEC_COUNT_MASK 0x00000030L
+#define RLC_EDC_CNT__RLCG_SCRATCH_RAM_DED_COUNT_MASK 0x000000C0L
+#define RLC_EDC_CNT__RLCV_INSTR_RAM_SEC_COUNT_MASK 0x00000300L
+#define RLC_EDC_CNT__RLCV_INSTR_RAM_DED_COUNT_MASK 0x00000C00L
+#define RLC_EDC_CNT__RLCV_SCRATCH_RAM_SEC_COUNT_MASK 0x00003000L
+#define RLC_EDC_CNT__RLCV_SCRATCH_RAM_DED_COUNT_MASK 0x0000C000L
+#define RLC_EDC_CNT__RLC_TCTAG_RAM_SEC_COUNT_MASK 0x00030000L
+#define RLC_EDC_CNT__RLC_TCTAG_RAM_DED_COUNT_MASK 0x000C0000L
+#define RLC_EDC_CNT__RLC_SPM_SCRATCH_RAM_SEC_COUNT_MASK 0x00300000L
+#define RLC_EDC_CNT__RLC_SPM_SCRATCH_RAM_DED_COUNT_MASK 0x00C00000L
+#define RLC_EDC_CNT__RLC_SRM_DATA_RAM_SEC_COUNT_MASK 0x03000000L
+#define RLC_EDC_CNT__RLC_SRM_DATA_RAM_DED_COUNT_MASK 0x0C000000L
+#define RLC_EDC_CNT__RLC_SRM_ADDR_RAM_SEC_COUNT_MASK 0x30000000L
+#define RLC_EDC_CNT__RLC_SRM_ADDR_RAM_DED_COUNT_MASK 0xC0000000L
+//RLC_EDC_CNT2
+#define RLC_EDC_CNT2__RLC_SPM_SE0_SCRATCH_RAM_SEC_COUNT__SHIFT 0x0
+#define RLC_EDC_CNT2__RLC_SPM_SE0_SCRATCH_RAM_DED_COUNT__SHIFT 0x2
+#define RLC_EDC_CNT2__RLC_SPM_SE1_SCRATCH_RAM_SEC_COUNT__SHIFT 0x4
+#define RLC_EDC_CNT2__RLC_SPM_SE1_SCRATCH_RAM_DED_COUNT__SHIFT 0x6
+#define RLC_EDC_CNT2__RLC_SPM_SE2_SCRATCH_RAM_SEC_COUNT__SHIFT 0x8
+#define RLC_EDC_CNT2__RLC_SPM_SE2_SCRATCH_RAM_DED_COUNT__SHIFT 0xa
+#define RLC_EDC_CNT2__RLC_SPM_SE3_SCRATCH_RAM_SEC_COUNT__SHIFT 0xc
+#define RLC_EDC_CNT2__RLC_SPM_SE3_SCRATCH_RAM_DED_COUNT__SHIFT 0xe
+#define RLC_EDC_CNT2__RLC_SPM_SE4_SCRATCH_RAM_SEC_COUNT__SHIFT 0x10
+#define RLC_EDC_CNT2__RLC_SPM_SE4_SCRATCH_RAM_DED_COUNT__SHIFT 0x12
+#define RLC_EDC_CNT2__RLC_SPM_SE5_SCRATCH_RAM_SEC_COUNT__SHIFT 0x14
+#define RLC_EDC_CNT2__RLC_SPM_SE5_SCRATCH_RAM_DED_COUNT__SHIFT 0x16
+#define RLC_EDC_CNT2__RLC_SPM_SE6_SCRATCH_RAM_SEC_COUNT__SHIFT 0x18
+#define RLC_EDC_CNT2__RLC_SPM_SE6_SCRATCH_RAM_DED_COUNT__SHIFT 0x1a
+#define RLC_EDC_CNT2__RLC_SPM_SE7_SCRATCH_RAM_SEC_COUNT__SHIFT 0x1c
+#define RLC_EDC_CNT2__RLC_SPM_SE7_SCRATCH_RAM_DED_COUNT__SHIFT 0x1e
+#define RLC_EDC_CNT2__RLC_SPM_SE0_SCRATCH_RAM_SEC_COUNT_MASK 0x00000003L
+#define RLC_EDC_CNT2__RLC_SPM_SE0_SCRATCH_RAM_DED_COUNT_MASK 0x0000000CL
+#define RLC_EDC_CNT2__RLC_SPM_SE1_SCRATCH_RAM_SEC_COUNT_MASK 0x00000030L
+#define RLC_EDC_CNT2__RLC_SPM_SE1_SCRATCH_RAM_DED_COUNT_MASK 0x000000C0L
+#define RLC_EDC_CNT2__RLC_SPM_SE2_SCRATCH_RAM_SEC_COUNT_MASK 0x00000300L
+#define RLC_EDC_CNT2__RLC_SPM_SE2_SCRATCH_RAM_DED_COUNT_MASK 0x00000C00L
+#define RLC_EDC_CNT2__RLC_SPM_SE3_SCRATCH_RAM_SEC_COUNT_MASK 0x00003000L
+#define RLC_EDC_CNT2__RLC_SPM_SE3_SCRATCH_RAM_DED_COUNT_MASK 0x0000C000L
+#define RLC_EDC_CNT2__RLC_SPM_SE4_SCRATCH_RAM_SEC_COUNT_MASK 0x00030000L
+#define RLC_EDC_CNT2__RLC_SPM_SE4_SCRATCH_RAM_DED_COUNT_MASK 0x000C0000L
+#define RLC_EDC_CNT2__RLC_SPM_SE5_SCRATCH_RAM_SEC_COUNT_MASK 0x00300000L
+#define RLC_EDC_CNT2__RLC_SPM_SE5_SCRATCH_RAM_DED_COUNT_MASK 0x00C00000L
+#define RLC_EDC_CNT2__RLC_SPM_SE6_SCRATCH_RAM_SEC_COUNT_MASK 0x03000000L
+#define RLC_EDC_CNT2__RLC_SPM_SE6_SCRATCH_RAM_DED_COUNT_MASK 0x0C000000L
+#define RLC_EDC_CNT2__RLC_SPM_SE7_SCRATCH_RAM_SEC_COUNT_MASK 0x30000000L
+#define RLC_EDC_CNT2__RLC_SPM_SE7_SCRATCH_RAM_DED_COUNT_MASK 0xC0000000L
+
+#endif \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_offset.h
index 352ffae7a7ca..2c3ce243861a 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_offset.h
@@ -1964,4 +1964,20 @@
#define mmATC_L2_PERFCOUNTER_RSLT_CNTL 0x084a
#define mmATC_L2_PERFCOUNTER_RSLT_CNTL_BASE_IDX 0
+/* MMEA */
+#define mmMMEA0_EDC_CNT_VG20 0x0206
+#define mmMMEA0_EDC_CNT_VG20_BASE_IDX 0
+#define mmMMEA0_EDC_CNT2_VG20 0x0207
+#define mmMMEA0_EDC_CNT2_VG20_BASE_IDX 0
+#define mmMMEA1_EDC_CNT_VG20 0x0346
+#define mmMMEA1_EDC_CNT_VG20_BASE_IDX 0
+#define mmMMEA1_EDC_CNT2_VG20 0x0347
+#define mmMMEA1_EDC_CNT2_VG20_BASE_IDX 0
+
+// addressBlock: mmhub_utcl2_vmsharedpfdec
+// base address: 0x6a040
+#define mmMC_VM_XGMI_LFB_CNTL 0x0823
+#define mmMC_VM_XGMI_LFB_CNTL_BASE_IDX 0
+#define mmMC_VM_XGMI_LFB_SIZE 0x0824
+#define mmMC_VM_XGMI_LFB_SIZE_BASE_IDX 0
#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_sh_mask.h
index 34278ef2aa1b..198f5f93ed1a 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_sh_mask.h
@@ -10124,4 +10124,126 @@
#define ATC_L2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
#define ATC_L2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+//MMEA0_EDC_CNT
+#define MMEA0_EDC_CNT_VG20__DRAMRD_CMDMEM_SEC_COUNT__SHIFT 0x0
+#define MMEA0_EDC_CNT_VG20__DRAMRD_CMDMEM_DED_COUNT__SHIFT 0x2
+#define MMEA0_EDC_CNT_VG20__DRAMWR_CMDMEM_SEC_COUNT__SHIFT 0x4
+#define MMEA0_EDC_CNT_VG20__DRAMWR_CMDMEM_DED_COUNT__SHIFT 0x6
+#define MMEA0_EDC_CNT_VG20__DRAMWR_DATAMEM_SEC_COUNT__SHIFT 0x8
+#define MMEA0_EDC_CNT_VG20__DRAMWR_DATAMEM_DED_COUNT__SHIFT 0xa
+#define MMEA0_EDC_CNT_VG20__RRET_TAGMEM_SEC_COUNT__SHIFT 0xc
+#define MMEA0_EDC_CNT_VG20__RRET_TAGMEM_DED_COUNT__SHIFT 0xe
+#define MMEA0_EDC_CNT_VG20__WRET_TAGMEM_SEC_COUNT__SHIFT 0x10
+#define MMEA0_EDC_CNT_VG20__WRET_TAGMEM_DED_COUNT__SHIFT 0x12
+#define MMEA0_EDC_CNT_VG20__DRAMRD_PAGEMEM_SED_COUNT__SHIFT 0x14
+#define MMEA0_EDC_CNT_VG20__DRAMWR_PAGEMEM_SED_COUNT__SHIFT 0x16
+#define MMEA0_EDC_CNT_VG20__IORD_CMDMEM_SED_COUNT__SHIFT 0x18
+#define MMEA0_EDC_CNT_VG20__IOWR_CMDMEM_SED_COUNT__SHIFT 0x1a
+#define MMEA0_EDC_CNT_VG20__IOWR_DATAMEM_SED_COUNT__SHIFT 0x1c
+#define MMEA0_EDC_CNT_VG20__DRAMRD_CMDMEM_SEC_COUNT_MASK 0x00000003L
+#define MMEA0_EDC_CNT_VG20__DRAMRD_CMDMEM_DED_COUNT_MASK 0x0000000CL
+#define MMEA0_EDC_CNT_VG20__DRAMWR_CMDMEM_SEC_COUNT_MASK 0x00000030L
+#define MMEA0_EDC_CNT_VG20__DRAMWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
+#define MMEA0_EDC_CNT_VG20__DRAMWR_DATAMEM_SEC_COUNT_MASK 0x00000300L
+#define MMEA0_EDC_CNT_VG20__DRAMWR_DATAMEM_DED_COUNT_MASK 0x00000C00L
+#define MMEA0_EDC_CNT_VG20__RRET_TAGMEM_SEC_COUNT_MASK 0x00003000L
+#define MMEA0_EDC_CNT_VG20__RRET_TAGMEM_DED_COUNT_MASK 0x0000C000L
+#define MMEA0_EDC_CNT_VG20__WRET_TAGMEM_SEC_COUNT_MASK 0x00030000L
+#define MMEA0_EDC_CNT_VG20__WRET_TAGMEM_DED_COUNT_MASK 0x000C0000L
+#define MMEA0_EDC_CNT_VG20__DRAMRD_PAGEMEM_SED_COUNT_MASK 0x00300000L
+#define MMEA0_EDC_CNT_VG20__DRAMWR_PAGEMEM_SED_COUNT_MASK 0x00C00000L
+#define MMEA0_EDC_CNT_VG20__IORD_CMDMEM_SED_COUNT_MASK 0x03000000L
+#define MMEA0_EDC_CNT_VG20__IOWR_CMDMEM_SED_COUNT_MASK 0x0C000000L
+#define MMEA0_EDC_CNT_VG20__IOWR_DATAMEM_SED_COUNT_MASK 0x30000000L
+//MMEA0_EDC_CNT2
+#define MMEA0_EDC_CNT2_VG20__GMIRD_CMDMEM_SEC_COUNT__SHIFT 0x0
+#define MMEA0_EDC_CNT2_VG20__GMIRD_CMDMEM_DED_COUNT__SHIFT 0x2
+#define MMEA0_EDC_CNT2_VG20__GMIWR_CMDMEM_SEC_COUNT__SHIFT 0x4
+#define MMEA0_EDC_CNT2_VG20__GMIWR_CMDMEM_DED_COUNT__SHIFT 0x6
+#define MMEA0_EDC_CNT2_VG20__GMIWR_DATAMEM_SEC_COUNT__SHIFT 0x8
+#define MMEA0_EDC_CNT2_VG20__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa
+#define MMEA0_EDC_CNT2_VG20__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc
+#define MMEA0_EDC_CNT2_VG20__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe
+#define MMEA0_EDC_CNT2_VG20__MAM_D0MEM_SED_COUNT__SHIFT 0x10
+#define MMEA0_EDC_CNT2_VG20__MAM_D1MEM_SED_COUNT__SHIFT 0x12
+#define MMEA0_EDC_CNT2_VG20__MAM_D2MEM_SED_COUNT__SHIFT 0x14
+#define MMEA0_EDC_CNT2_VG20__MAM_D3MEM_SED_COUNT__SHIFT 0x16
+#define MMEA0_EDC_CNT2_VG20__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L
+#define MMEA0_EDC_CNT2_VG20__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL
+#define MMEA0_EDC_CNT2_VG20__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L
+#define MMEA0_EDC_CNT2_VG20__GMIWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
+#define MMEA0_EDC_CNT2_VG20__GMIWR_DATAMEM_SEC_COUNT_MASK 0x00000300L
+#define MMEA0_EDC_CNT2_VG20__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L
+#define MMEA0_EDC_CNT2_VG20__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L
+#define MMEA0_EDC_CNT2_VG20__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L
+#define MMEA0_EDC_CNT2_VG20__MAM_D0MEM_SED_COUNT_MASK 0x00030000L
+#define MMEA0_EDC_CNT2_VG20__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L
+#define MMEA0_EDC_CNT2_VG20__MAM_D2MEM_SED_COUNT_MASK 0x00300000L
+#define MMEA0_EDC_CNT2_VG20__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L
+//MMEA1_EDC_CNT
+#define MMEA1_EDC_CNT_VG20__DRAMRD_CMDMEM_SEC_COUNT__SHIFT 0x0
+#define MMEA1_EDC_CNT_VG20__DRAMRD_CMDMEM_DED_COUNT__SHIFT 0x2
+#define MMEA1_EDC_CNT_VG20__DRAMWR_CMDMEM_SEC_COUNT__SHIFT 0x4
+#define MMEA1_EDC_CNT_VG20__DRAMWR_CMDMEM_DED_COUNT__SHIFT 0x6
+#define MMEA1_EDC_CNT_VG20__DRAMWR_DATAMEM_SEC_COUNT__SHIFT 0x8
+#define MMEA1_EDC_CNT_VG20__DRAMWR_DATAMEM_DED_COUNT__SHIFT 0xa
+#define MMEA1_EDC_CNT_VG20__RRET_TAGMEM_SEC_COUNT__SHIFT 0xc
+#define MMEA1_EDC_CNT_VG20__RRET_TAGMEM_DED_COUNT__SHIFT 0xe
+#define MMEA1_EDC_CNT_VG20__WRET_TAGMEM_SEC_COUNT__SHIFT 0x10
+#define MMEA1_EDC_CNT_VG20__WRET_TAGMEM_DED_COUNT__SHIFT 0x12
+#define MMEA1_EDC_CNT_VG20__DRAMRD_PAGEMEM_SED_COUNT__SHIFT 0x14
+#define MMEA1_EDC_CNT_VG20__DRAMWR_PAGEMEM_SED_COUNT__SHIFT 0x16
+#define MMEA1_EDC_CNT_VG20__IORD_CMDMEM_SED_COUNT__SHIFT 0x18
+#define MMEA1_EDC_CNT_VG20__IOWR_CMDMEM_SED_COUNT__SHIFT 0x1a
+#define MMEA1_EDC_CNT_VG20__IOWR_DATAMEM_SED_COUNT__SHIFT 0x1c
+#define MMEA1_EDC_CNT_VG20__DRAMRD_CMDMEM_SEC_COUNT_MASK 0x00000003L
+#define MMEA1_EDC_CNT_VG20__DRAMRD_CMDMEM_DED_COUNT_MASK 0x0000000CL
+#define MMEA1_EDC_CNT_VG20__DRAMWR_CMDMEM_SEC_COUNT_MASK 0x00000030L
+#define MMEA1_EDC_CNT_VG20__DRAMWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
+#define MMEA1_EDC_CNT_VG20__DRAMWR_DATAMEM_SEC_COUNT_MASK 0x00000300L
+#define MMEA1_EDC_CNT_VG20__DRAMWR_DATAMEM_DED_COUNT_MASK 0x00000C00L
+#define MMEA1_EDC_CNT_VG20__RRET_TAGMEM_SEC_COUNT_MASK 0x00003000L
+#define MMEA1_EDC_CNT_VG20__RRET_TAGMEM_DED_COUNT_MASK 0x0000C000L
+#define MMEA1_EDC_CNT_VG20__WRET_TAGMEM_SEC_COUNT_MASK 0x00030000L
+#define MMEA1_EDC_CNT_VG20__WRET_TAGMEM_DED_COUNT_MASK 0x000C0000L
+#define MMEA1_EDC_CNT_VG20__DRAMRD_PAGEMEM_SED_COUNT_MASK 0x00300000L
+#define MMEA1_EDC_CNT_VG20__DRAMWR_PAGEMEM_SED_COUNT_MASK 0x00C00000L
+#define MMEA1_EDC_CNT_VG20__IORD_CMDMEM_SED_COUNT_MASK 0x03000000L
+#define MMEA1_EDC_CNT_VG20__IOWR_CMDMEM_SED_COUNT_MASK 0x0C000000L
+#define MMEA1_EDC_CNT_VG20__IOWR_DATAMEM_SED_COUNT_MASK 0x30000000L
+//MMEA1_EDC_CNT2
+#define MMEA1_EDC_CNT2_VG20__GMIRD_CMDMEM_SEC_COUNT__SHIFT 0x0
+#define MMEA1_EDC_CNT2_VG20__GMIRD_CMDMEM_DED_COUNT__SHIFT 0x2
+#define MMEA1_EDC_CNT2_VG20__GMIWR_CMDMEM_SEC_COUNT__SHIFT 0x4
+#define MMEA1_EDC_CNT2_VG20__GMIWR_CMDMEM_DED_COUNT__SHIFT 0x6
+#define MMEA1_EDC_CNT2_VG20__GMIWR_DATAMEM_SEC_COUNT__SHIFT 0x8
+#define MMEA1_EDC_CNT2_VG20__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa
+#define MMEA1_EDC_CNT2_VG20__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc
+#define MMEA1_EDC_CNT2_VG20__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe
+#define MMEA1_EDC_CNT2_VG20__MAM_D0MEM_SED_COUNT__SHIFT 0x10
+#define MMEA1_EDC_CNT2_VG20__MAM_D1MEM_SED_COUNT__SHIFT 0x12
+#define MMEA1_EDC_CNT2_VG20__MAM_D2MEM_SED_COUNT__SHIFT 0x14
+#define MMEA1_EDC_CNT2_VG20__MAM_D3MEM_SED_COUNT__SHIFT 0x16
+#define MMEA1_EDC_CNT2_VG20__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L
+#define MMEA1_EDC_CNT2_VG20__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL
+#define MMEA1_EDC_CNT2_VG20__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L
+#define MMEA1_EDC_CNT2_VG20__GMIWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
+#define MMEA1_EDC_CNT2_VG20__GMIWR_DATAMEM_SEC_COUNT_MASK 0x00000300L
+#define MMEA1_EDC_CNT2_VG20__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L
+#define MMEA1_EDC_CNT2_VG20__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L
+#define MMEA1_EDC_CNT2_VG20__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L
+#define MMEA1_EDC_CNT2_VG20__MAM_D0MEM_SED_COUNT_MASK 0x00030000L
+#define MMEA1_EDC_CNT2_VG20__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L
+#define MMEA1_EDC_CNT2_VG20__MAM_D2MEM_SED_COUNT_MASK 0x00300000L
+#define MMEA1_EDC_CNT2_VG20__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L
+
+// addressBlock: mmhub_utcl2_vmsharedpfdec
+//MC_VM_XGMI_LFB_CNTL
+#define MC_VM_XGMI_LFB_CNTL__PF_LFB_REGION__SHIFT 0x0
+#define MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION__SHIFT 0x4
+#define MC_VM_XGMI_LFB_CNTL__PF_LFB_REGION_MASK 0x00000007L
+#define MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION_MASK 0x00000070L
+//MC_VM_XGMI_LFB_SIZE
+#define MC_VM_XGMI_LFB_SIZE__PF_LFB_SIZE__SHIFT 0x0
+#define MC_VM_XGMI_LFB_SIZE__PF_LFB_SIZE_MASK 0x0000FFFFL
#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_default.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_default.h
new file mode 100644
index 000000000000..ec631c816d18
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_default.h
@@ -0,0 +1,3933 @@
+/*
+ * Copyright (C) 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _mmhub_9_4_1_DEFAULT_HEADER
+#define _mmhub_9_4_1_DEFAULT_HEADER
+
+
+// addressBlock: mmhub_dagb_dagbdec0
+#define mmDAGB0_RDCLI0_DEFAULT 0xfe5fe0f9
+#define mmDAGB0_RDCLI1_DEFAULT 0xfe5fe0f9
+#define mmDAGB0_RDCLI2_DEFAULT 0xfe5fe0f9
+#define mmDAGB0_RDCLI3_DEFAULT 0xfe5fe0f9
+#define mmDAGB0_RDCLI4_DEFAULT 0xfe5fe0f9
+#define mmDAGB0_RDCLI5_DEFAULT 0xfe5fe0f9
+#define mmDAGB0_RDCLI6_DEFAULT 0xfe5fe0f9
+#define mmDAGB0_RDCLI7_DEFAULT 0xfe5fe0f9
+#define mmDAGB0_RDCLI8_DEFAULT 0xfe5fe0f9
+#define mmDAGB0_RDCLI9_DEFAULT 0xfe5fe0f9
+#define mmDAGB0_RDCLI10_DEFAULT 0xfe5fe0f9
+#define mmDAGB0_RDCLI11_DEFAULT 0xfe5fe0f9
+#define mmDAGB0_RDCLI12_DEFAULT 0xfe5fe0f9
+#define mmDAGB0_RDCLI13_DEFAULT 0xfe5fe0f9
+#define mmDAGB0_RDCLI14_DEFAULT 0xfe5fe0f9
+#define mmDAGB0_RDCLI15_DEFAULT 0xfe5fe0f9
+#define mmDAGB0_RD_CNTL_DEFAULT 0x03527df8
+#define mmDAGB0_RD_GMI_CNTL_DEFAULT 0x00003045
+#define mmDAGB0_RD_ADDR_DAGB_DEFAULT 0x00000039
+#define mmDAGB0_RD_OUTPUT_DAGB_MAX_BURST_DEFAULT 0x88888888
+#define mmDAGB0_RD_OUTPUT_DAGB_LAZY_TIMER_DEFAULT 0x11111111
+#define mmDAGB0_RD_CGTT_CLK_CTRL_DEFAULT 0x00000100
+#define mmDAGB0_L1TLB_RD_CGTT_CLK_CTRL_DEFAULT 0x00000100
+#define mmDAGB0_ATCVM_RD_CGTT_CLK_CTRL_DEFAULT 0x00000100
+#define mmDAGB0_RD_ADDR_DAGB_MAX_BURST0_DEFAULT 0x88888888
+#define mmDAGB0_RD_ADDR_DAGB_LAZY_TIMER0_DEFAULT 0x11111111
+#define mmDAGB0_RD_ADDR_DAGB_MAX_BURST1_DEFAULT 0x88888888
+#define mmDAGB0_RD_ADDR_DAGB_LAZY_TIMER1_DEFAULT 0x11111111
+#define mmDAGB0_RD_VC0_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB0_RD_VC1_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB0_RD_VC2_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB0_RD_VC3_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB0_RD_VC4_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB0_RD_VC5_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB0_RD_VC6_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB0_RD_VC7_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB0_RD_CNTL_MISC_DEFAULT 0x69a0e408
+#define mmDAGB0_RD_TLB_CREDIT_DEFAULT 0x2f7bdef7
+#define mmDAGB0_RDCLI_ASK_PENDING_DEFAULT 0x00000000
+#define mmDAGB0_RDCLI_GO_PENDING_DEFAULT 0x00000000
+#define mmDAGB0_RDCLI_GBLSEND_PENDING_DEFAULT 0x00000000
+#define mmDAGB0_RDCLI_TLB_PENDING_DEFAULT 0x00000000
+#define mmDAGB0_RDCLI_OARB_PENDING_DEFAULT 0x00000000
+#define mmDAGB0_RDCLI_OSD_PENDING_DEFAULT 0x00000000
+#define mmDAGB0_WRCLI0_DEFAULT 0xfe5fe0f9
+#define mmDAGB0_WRCLI1_DEFAULT 0xfe5fe0f9
+#define mmDAGB0_WRCLI2_DEFAULT 0xfe5fe0f9
+#define mmDAGB0_WRCLI3_DEFAULT 0xfe5fe0f9
+#define mmDAGB0_WRCLI4_DEFAULT 0xfe5fe0f9
+#define mmDAGB0_WRCLI5_DEFAULT 0xfe5fe0f9
+#define mmDAGB0_WRCLI6_DEFAULT 0xfe5fe0f9
+#define mmDAGB0_WRCLI7_DEFAULT 0xfe5fe0f9
+#define mmDAGB0_WRCLI8_DEFAULT 0xfe5fe0f9
+#define mmDAGB0_WRCLI9_DEFAULT 0xfe5fe0f9
+#define mmDAGB0_WRCLI10_DEFAULT 0xfe5fe0f9
+#define mmDAGB0_WRCLI11_DEFAULT 0xfe5fe0f9
+#define mmDAGB0_WRCLI12_DEFAULT 0xfe5fe0f9
+#define mmDAGB0_WRCLI13_DEFAULT 0xfe5fe0f9
+#define mmDAGB0_WRCLI14_DEFAULT 0xfe5fe0f9
+#define mmDAGB0_WRCLI15_DEFAULT 0xfe5fe0f9
+#define mmDAGB0_WR_CNTL_DEFAULT 0x03527df8
+#define mmDAGB0_WR_GMI_CNTL_DEFAULT 0x00003045
+#define mmDAGB0_WR_ADDR_DAGB_DEFAULT 0x00000039
+#define mmDAGB0_WR_OUTPUT_DAGB_MAX_BURST_DEFAULT 0x88888888
+#define mmDAGB0_WR_OUTPUT_DAGB_LAZY_TIMER_DEFAULT 0x11111111
+#define mmDAGB0_WR_CGTT_CLK_CTRL_DEFAULT 0x00000100
+#define mmDAGB0_L1TLB_WR_CGTT_CLK_CTRL_DEFAULT 0x00000100
+#define mmDAGB0_ATCVM_WR_CGTT_CLK_CTRL_DEFAULT 0x00000100
+#define mmDAGB0_WR_ADDR_DAGB_MAX_BURST0_DEFAULT 0x88888888
+#define mmDAGB0_WR_ADDR_DAGB_LAZY_TIMER0_DEFAULT 0x11111111
+#define mmDAGB0_WR_ADDR_DAGB_MAX_BURST1_DEFAULT 0x88888888
+#define mmDAGB0_WR_ADDR_DAGB_LAZY_TIMER1_DEFAULT 0x11111111
+#define mmDAGB0_WR_DATA_DAGB_DEFAULT 0x00000001
+#define mmDAGB0_WR_DATA_DAGB_MAX_BURST0_DEFAULT 0x11111111
+#define mmDAGB0_WR_DATA_DAGB_LAZY_TIMER0_DEFAULT 0x00000000
+#define mmDAGB0_WR_DATA_DAGB_MAX_BURST1_DEFAULT 0x11111111
+#define mmDAGB0_WR_DATA_DAGB_LAZY_TIMER1_DEFAULT 0x00000000
+#define mmDAGB0_WR_VC0_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB0_WR_VC1_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB0_WR_VC2_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB0_WR_VC3_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB0_WR_VC4_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB0_WR_VC5_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB0_WR_VC6_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB0_WR_VC7_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB0_WR_CNTL_MISC_DEFAULT 0x69a0e408
+#define mmDAGB0_WR_TLB_CREDIT_DEFAULT 0x2f7bdef7
+#define mmDAGB0_WR_DATA_CREDIT_DEFAULT 0x60606070
+#define mmDAGB0_WR_MISC_CREDIT_DEFAULT 0x0078dc88
+#define mmDAGB0_WRCLI_ASK_PENDING_DEFAULT 0x00000000
+#define mmDAGB0_WRCLI_GO_PENDING_DEFAULT 0x00000000
+#define mmDAGB0_WRCLI_GBLSEND_PENDING_DEFAULT 0x00000000
+#define mmDAGB0_WRCLI_TLB_PENDING_DEFAULT 0x00000000
+#define mmDAGB0_WRCLI_OARB_PENDING_DEFAULT 0x00000000
+#define mmDAGB0_WRCLI_OSD_PENDING_DEFAULT 0x00000000
+#define mmDAGB0_WRCLI_DBUS_ASK_PENDING_DEFAULT 0x00000000
+#define mmDAGB0_WRCLI_DBUS_GO_PENDING_DEFAULT 0x00000000
+#define mmDAGB0_DAGB_DLY_DEFAULT 0x00000000
+#define mmDAGB0_CNTL_MISC_DEFAULT 0xcf7c1ffa
+#define mmDAGB0_CNTL_MISC2_DEFAULT 0x003c0000
+#define mmDAGB0_FIFO_EMPTY_DEFAULT 0x00ffffff
+#define mmDAGB0_FIFO_FULL_DEFAULT 0x00000000
+#define mmDAGB0_WR_CREDITS_FULL_DEFAULT 0x1fffffff
+#define mmDAGB0_RD_CREDITS_FULL_DEFAULT 0x0003ffff
+#define mmDAGB0_PERFCOUNTER_LO_DEFAULT 0x00000000
+#define mmDAGB0_PERFCOUNTER_HI_DEFAULT 0x00000000
+#define mmDAGB0_PERFCOUNTER0_CFG_DEFAULT 0x00000000
+#define mmDAGB0_PERFCOUNTER1_CFG_DEFAULT 0x00000000
+#define mmDAGB0_PERFCOUNTER2_CFG_DEFAULT 0x00000000
+#define mmDAGB0_PERFCOUNTER_RSLT_CNTL_DEFAULT 0x04000000
+#define mmDAGB0_RESERVE0_DEFAULT 0xffffffff
+#define mmDAGB0_RESERVE1_DEFAULT 0xffffffff
+#define mmDAGB0_RESERVE2_DEFAULT 0xffffffff
+#define mmDAGB0_RESERVE3_DEFAULT 0xffffffff
+#define mmDAGB0_RESERVE4_DEFAULT 0xffffffff
+#define mmDAGB0_RESERVE5_DEFAULT 0xffffffff
+#define mmDAGB0_RESERVE6_DEFAULT 0xffffffff
+#define mmDAGB0_RESERVE7_DEFAULT 0xffffffff
+#define mmDAGB0_RESERVE8_DEFAULT 0xffffffff
+#define mmDAGB0_RESERVE9_DEFAULT 0xffffffff
+#define mmDAGB0_RESERVE10_DEFAULT 0xffffffff
+#define mmDAGB0_RESERVE11_DEFAULT 0xffffffff
+#define mmDAGB0_RESERVE12_DEFAULT 0xffffffff
+#define mmDAGB0_RESERVE13_DEFAULT 0xffffffff
+
+
+// addressBlock: mmhub_dagb_dagbdec1
+#define mmDAGB1_RDCLI0_DEFAULT 0xfe5fe0f9
+#define mmDAGB1_RDCLI1_DEFAULT 0xfe5fe0f9
+#define mmDAGB1_RDCLI2_DEFAULT 0xfe5fe0f9
+#define mmDAGB1_RDCLI3_DEFAULT 0xfe5fe0f9
+#define mmDAGB1_RDCLI4_DEFAULT 0xfe5fe0f9
+#define mmDAGB1_RDCLI5_DEFAULT 0xfe5fe0f9
+#define mmDAGB1_RDCLI6_DEFAULT 0xfe5fe0f9
+#define mmDAGB1_RDCLI7_DEFAULT 0xfe5fe0f9
+#define mmDAGB1_RDCLI8_DEFAULT 0xfe5fe0f9
+#define mmDAGB1_RDCLI9_DEFAULT 0xfe5fe0f9
+#define mmDAGB1_RDCLI10_DEFAULT 0xfe5fe0f9
+#define mmDAGB1_RDCLI11_DEFAULT 0xfe5fe0f9
+#define mmDAGB1_RDCLI12_DEFAULT 0xfe5fe0f9
+#define mmDAGB1_RDCLI13_DEFAULT 0xfe5fe0f9
+#define mmDAGB1_RDCLI14_DEFAULT 0xfe5fe0f9
+#define mmDAGB1_RDCLI15_DEFAULT 0xfe5fe0f9
+#define mmDAGB1_RD_CNTL_DEFAULT 0x03527df8
+#define mmDAGB1_RD_GMI_CNTL_DEFAULT 0x00003045
+#define mmDAGB1_RD_ADDR_DAGB_DEFAULT 0x00000039
+#define mmDAGB1_RD_OUTPUT_DAGB_MAX_BURST_DEFAULT 0x88888888
+#define mmDAGB1_RD_OUTPUT_DAGB_LAZY_TIMER_DEFAULT 0x11111111
+#define mmDAGB1_RD_CGTT_CLK_CTRL_DEFAULT 0x00000100
+#define mmDAGB1_L1TLB_RD_CGTT_CLK_CTRL_DEFAULT 0x00000100
+#define mmDAGB1_ATCVM_RD_CGTT_CLK_CTRL_DEFAULT 0x00000100
+#define mmDAGB1_RD_ADDR_DAGB_MAX_BURST0_DEFAULT 0x88888888
+#define mmDAGB1_RD_ADDR_DAGB_LAZY_TIMER0_DEFAULT 0x11111111
+#define mmDAGB1_RD_ADDR_DAGB_MAX_BURST1_DEFAULT 0x88888888
+#define mmDAGB1_RD_ADDR_DAGB_LAZY_TIMER1_DEFAULT 0x11111111
+#define mmDAGB1_RD_VC0_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB1_RD_VC1_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB1_RD_VC2_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB1_RD_VC3_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB1_RD_VC4_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB1_RD_VC5_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB1_RD_VC6_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB1_RD_VC7_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB1_RD_CNTL_MISC_DEFAULT 0x69a0e408
+#define mmDAGB1_RD_TLB_CREDIT_DEFAULT 0x2f7bdef7
+#define mmDAGB1_RDCLI_ASK_PENDING_DEFAULT 0x00000000
+#define mmDAGB1_RDCLI_GO_PENDING_DEFAULT 0x00000000
+#define mmDAGB1_RDCLI_GBLSEND_PENDING_DEFAULT 0x00000000
+#define mmDAGB1_RDCLI_TLB_PENDING_DEFAULT 0x00000000
+#define mmDAGB1_RDCLI_OARB_PENDING_DEFAULT 0x00000000
+#define mmDAGB1_RDCLI_OSD_PENDING_DEFAULT 0x00000000
+#define mmDAGB1_WRCLI0_DEFAULT 0xfe5fe0f9
+#define mmDAGB1_WRCLI1_DEFAULT 0xfe5fe0f9
+#define mmDAGB1_WRCLI2_DEFAULT 0xfe5fe0f9
+#define mmDAGB1_WRCLI3_DEFAULT 0xfe5fe0f9
+#define mmDAGB1_WRCLI4_DEFAULT 0xfe5fe0f9
+#define mmDAGB1_WRCLI5_DEFAULT 0xfe5fe0f9
+#define mmDAGB1_WRCLI6_DEFAULT 0xfe5fe0f9
+#define mmDAGB1_WRCLI7_DEFAULT 0xfe5fe0f9
+#define mmDAGB1_WRCLI8_DEFAULT 0xfe5fe0f9
+#define mmDAGB1_WRCLI9_DEFAULT 0xfe5fe0f9
+#define mmDAGB1_WRCLI10_DEFAULT 0xfe5fe0f9
+#define mmDAGB1_WRCLI11_DEFAULT 0xfe5fe0f9
+#define mmDAGB1_WRCLI12_DEFAULT 0xfe5fe0f9
+#define mmDAGB1_WRCLI13_DEFAULT 0xfe5fe0f9
+#define mmDAGB1_WRCLI14_DEFAULT 0xfe5fe0f9
+#define mmDAGB1_WRCLI15_DEFAULT 0xfe5fe0f9
+#define mmDAGB1_WR_CNTL_DEFAULT 0x03527df8
+#define mmDAGB1_WR_GMI_CNTL_DEFAULT 0x00003045
+#define mmDAGB1_WR_ADDR_DAGB_DEFAULT 0x00000039
+#define mmDAGB1_WR_OUTPUT_DAGB_MAX_BURST_DEFAULT 0x88888888
+#define mmDAGB1_WR_OUTPUT_DAGB_LAZY_TIMER_DEFAULT 0x11111111
+#define mmDAGB1_WR_CGTT_CLK_CTRL_DEFAULT 0x00000100
+#define mmDAGB1_L1TLB_WR_CGTT_CLK_CTRL_DEFAULT 0x00000100
+#define mmDAGB1_ATCVM_WR_CGTT_CLK_CTRL_DEFAULT 0x00000100
+#define mmDAGB1_WR_ADDR_DAGB_MAX_BURST0_DEFAULT 0x88888888
+#define mmDAGB1_WR_ADDR_DAGB_LAZY_TIMER0_DEFAULT 0x11111111
+#define mmDAGB1_WR_ADDR_DAGB_MAX_BURST1_DEFAULT 0x88888888
+#define mmDAGB1_WR_ADDR_DAGB_LAZY_TIMER1_DEFAULT 0x11111111
+#define mmDAGB1_WR_DATA_DAGB_DEFAULT 0x00000001
+#define mmDAGB1_WR_DATA_DAGB_MAX_BURST0_DEFAULT 0x11111111
+#define mmDAGB1_WR_DATA_DAGB_LAZY_TIMER0_DEFAULT 0x00000000
+#define mmDAGB1_WR_DATA_DAGB_MAX_BURST1_DEFAULT 0x11111111
+#define mmDAGB1_WR_DATA_DAGB_LAZY_TIMER1_DEFAULT 0x00000000
+#define mmDAGB1_WR_VC0_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB1_WR_VC1_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB1_WR_VC2_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB1_WR_VC3_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB1_WR_VC4_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB1_WR_VC5_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB1_WR_VC6_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB1_WR_VC7_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB1_WR_CNTL_MISC_DEFAULT 0x69a0e408
+#define mmDAGB1_WR_TLB_CREDIT_DEFAULT 0x2f7bdef7
+#define mmDAGB1_WR_DATA_CREDIT_DEFAULT 0x60606070
+#define mmDAGB1_WR_MISC_CREDIT_DEFAULT 0x0078dc88
+#define mmDAGB1_WRCLI_ASK_PENDING_DEFAULT 0x00000000
+#define mmDAGB1_WRCLI_GO_PENDING_DEFAULT 0x00000000
+#define mmDAGB1_WRCLI_GBLSEND_PENDING_DEFAULT 0x00000000
+#define mmDAGB1_WRCLI_TLB_PENDING_DEFAULT 0x00000000
+#define mmDAGB1_WRCLI_OARB_PENDING_DEFAULT 0x00000000
+#define mmDAGB1_WRCLI_OSD_PENDING_DEFAULT 0x00000000
+#define mmDAGB1_WRCLI_DBUS_ASK_PENDING_DEFAULT 0x00000000
+#define mmDAGB1_WRCLI_DBUS_GO_PENDING_DEFAULT 0x00000000
+#define mmDAGB1_DAGB_DLY_DEFAULT 0x00000000
+#define mmDAGB1_CNTL_MISC_DEFAULT 0xcf7c1ffa
+#define mmDAGB1_CNTL_MISC2_DEFAULT 0x003c0000
+#define mmDAGB1_FIFO_EMPTY_DEFAULT 0x00ffffff
+#define mmDAGB1_FIFO_FULL_DEFAULT 0x00000000
+#define mmDAGB1_WR_CREDITS_FULL_DEFAULT 0x1fffffff
+#define mmDAGB1_RD_CREDITS_FULL_DEFAULT 0x0003ffff
+#define mmDAGB1_PERFCOUNTER_LO_DEFAULT 0x00000000
+#define mmDAGB1_PERFCOUNTER_HI_DEFAULT 0x00000000
+#define mmDAGB1_PERFCOUNTER0_CFG_DEFAULT 0x00000000
+#define mmDAGB1_PERFCOUNTER1_CFG_DEFAULT 0x00000000
+#define mmDAGB1_PERFCOUNTER2_CFG_DEFAULT 0x00000000
+#define mmDAGB1_PERFCOUNTER_RSLT_CNTL_DEFAULT 0x04000000
+#define mmDAGB1_RESERVE0_DEFAULT 0xffffffff
+#define mmDAGB1_RESERVE1_DEFAULT 0xffffffff
+#define mmDAGB1_RESERVE2_DEFAULT 0xffffffff
+#define mmDAGB1_RESERVE3_DEFAULT 0xffffffff
+#define mmDAGB1_RESERVE4_DEFAULT 0xffffffff
+#define mmDAGB1_RESERVE5_DEFAULT 0xffffffff
+#define mmDAGB1_RESERVE6_DEFAULT 0xffffffff
+#define mmDAGB1_RESERVE7_DEFAULT 0xffffffff
+#define mmDAGB1_RESERVE8_DEFAULT 0xffffffff
+#define mmDAGB1_RESERVE9_DEFAULT 0xffffffff
+#define mmDAGB1_RESERVE10_DEFAULT 0xffffffff
+#define mmDAGB1_RESERVE11_DEFAULT 0xffffffff
+#define mmDAGB1_RESERVE12_DEFAULT 0xffffffff
+#define mmDAGB1_RESERVE13_DEFAULT 0xffffffff
+
+
+// addressBlock: mmhub_dagb_dagbdec2
+#define mmDAGB2_RDCLI0_DEFAULT 0xfe5fe0f9
+#define mmDAGB2_RDCLI1_DEFAULT 0xfe5fe0f9
+#define mmDAGB2_RDCLI2_DEFAULT 0xfe5fe0f9
+#define mmDAGB2_RDCLI3_DEFAULT 0xfe5fe0f9
+#define mmDAGB2_RDCLI4_DEFAULT 0xfe5fe0f9
+#define mmDAGB2_RDCLI5_DEFAULT 0xfe5fe0f9
+#define mmDAGB2_RDCLI6_DEFAULT 0xfe5fe0f9
+#define mmDAGB2_RDCLI7_DEFAULT 0xfe5fe0f9
+#define mmDAGB2_RDCLI8_DEFAULT 0xfe5fe0f9
+#define mmDAGB2_RDCLI9_DEFAULT 0xfe5fe0f9
+#define mmDAGB2_RDCLI10_DEFAULT 0xfe5fe0f9
+#define mmDAGB2_RDCLI11_DEFAULT 0xfe5fe0f9
+#define mmDAGB2_RDCLI12_DEFAULT 0xfe5fe0f9
+#define mmDAGB2_RDCLI13_DEFAULT 0xfe5fe0f9
+#define mmDAGB2_RDCLI14_DEFAULT 0xfe5fe0f9
+#define mmDAGB2_RDCLI15_DEFAULT 0xfe5fe0f9
+#define mmDAGB2_RD_CNTL_DEFAULT 0x03527df8
+#define mmDAGB2_RD_GMI_CNTL_DEFAULT 0x00003045
+#define mmDAGB2_RD_ADDR_DAGB_DEFAULT 0x00000039
+#define mmDAGB2_RD_OUTPUT_DAGB_MAX_BURST_DEFAULT 0x88888888
+#define mmDAGB2_RD_OUTPUT_DAGB_LAZY_TIMER_DEFAULT 0x11111111
+#define mmDAGB2_RD_CGTT_CLK_CTRL_DEFAULT 0x00000100
+#define mmDAGB2_L1TLB_RD_CGTT_CLK_CTRL_DEFAULT 0x00000100
+#define mmDAGB2_ATCVM_RD_CGTT_CLK_CTRL_DEFAULT 0x00000100
+#define mmDAGB2_RD_ADDR_DAGB_MAX_BURST0_DEFAULT 0x88888888
+#define mmDAGB2_RD_ADDR_DAGB_LAZY_TIMER0_DEFAULT 0x11111111
+#define mmDAGB2_RD_ADDR_DAGB_MAX_BURST1_DEFAULT 0x88888888
+#define mmDAGB2_RD_ADDR_DAGB_LAZY_TIMER1_DEFAULT 0x11111111
+#define mmDAGB2_RD_VC0_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB2_RD_VC1_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB2_RD_VC2_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB2_RD_VC3_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB2_RD_VC4_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB2_RD_VC5_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB2_RD_VC6_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB2_RD_VC7_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB2_RD_CNTL_MISC_DEFAULT 0x69a0e408
+#define mmDAGB2_RD_TLB_CREDIT_DEFAULT 0x2f7bdef7
+#define mmDAGB2_RDCLI_ASK_PENDING_DEFAULT 0x00000000
+#define mmDAGB2_RDCLI_GO_PENDING_DEFAULT 0x00000000
+#define mmDAGB2_RDCLI_GBLSEND_PENDING_DEFAULT 0x00000000
+#define mmDAGB2_RDCLI_TLB_PENDING_DEFAULT 0x00000000
+#define mmDAGB2_RDCLI_OARB_PENDING_DEFAULT 0x00000000
+#define mmDAGB2_RDCLI_OSD_PENDING_DEFAULT 0x00000000
+#define mmDAGB2_WRCLI0_DEFAULT 0xfe5fe0f9
+#define mmDAGB2_WRCLI1_DEFAULT 0xfe5fe0f9
+#define mmDAGB2_WRCLI2_DEFAULT 0xfe5fe0f9
+#define mmDAGB2_WRCLI3_DEFAULT 0xfe5fe0f9
+#define mmDAGB2_WRCLI4_DEFAULT 0xfe5fe0f9
+#define mmDAGB2_WRCLI5_DEFAULT 0xfe5fe0f9
+#define mmDAGB2_WRCLI6_DEFAULT 0xfe5fe0f9
+#define mmDAGB2_WRCLI7_DEFAULT 0xfe5fe0f9
+#define mmDAGB2_WRCLI8_DEFAULT 0xfe5fe0f9
+#define mmDAGB2_WRCLI9_DEFAULT 0xfe5fe0f9
+#define mmDAGB2_WRCLI10_DEFAULT 0xfe5fe0f9
+#define mmDAGB2_WRCLI11_DEFAULT 0xfe5fe0f9
+#define mmDAGB2_WRCLI12_DEFAULT 0xfe5fe0f9
+#define mmDAGB2_WRCLI13_DEFAULT 0xfe5fe0f9
+#define mmDAGB2_WRCLI14_DEFAULT 0xfe5fe0f9
+#define mmDAGB2_WRCLI15_DEFAULT 0xfe5fe0f9
+#define mmDAGB2_WR_CNTL_DEFAULT 0x03527df8
+#define mmDAGB2_WR_GMI_CNTL_DEFAULT 0x00003045
+#define mmDAGB2_WR_ADDR_DAGB_DEFAULT 0x00000039
+#define mmDAGB2_WR_OUTPUT_DAGB_MAX_BURST_DEFAULT 0x88888888
+#define mmDAGB2_WR_OUTPUT_DAGB_LAZY_TIMER_DEFAULT 0x11111111
+#define mmDAGB2_WR_CGTT_CLK_CTRL_DEFAULT 0x00000100
+#define mmDAGB2_L1TLB_WR_CGTT_CLK_CTRL_DEFAULT 0x00000100
+#define mmDAGB2_ATCVM_WR_CGTT_CLK_CTRL_DEFAULT 0x00000100
+#define mmDAGB2_WR_ADDR_DAGB_MAX_BURST0_DEFAULT 0x88888888
+#define mmDAGB2_WR_ADDR_DAGB_LAZY_TIMER0_DEFAULT 0x11111111
+#define mmDAGB2_WR_ADDR_DAGB_MAX_BURST1_DEFAULT 0x88888888
+#define mmDAGB2_WR_ADDR_DAGB_LAZY_TIMER1_DEFAULT 0x11111111
+#define mmDAGB2_WR_DATA_DAGB_DEFAULT 0x00000001
+#define mmDAGB2_WR_DATA_DAGB_MAX_BURST0_DEFAULT 0x11111111
+#define mmDAGB2_WR_DATA_DAGB_LAZY_TIMER0_DEFAULT 0x00000000
+#define mmDAGB2_WR_DATA_DAGB_MAX_BURST1_DEFAULT 0x11111111
+#define mmDAGB2_WR_DATA_DAGB_LAZY_TIMER1_DEFAULT 0x00000000
+#define mmDAGB2_WR_VC0_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB2_WR_VC1_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB2_WR_VC2_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB2_WR_VC3_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB2_WR_VC4_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB2_WR_VC5_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB2_WR_VC6_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB2_WR_VC7_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB2_WR_CNTL_MISC_DEFAULT 0x69a0e408
+#define mmDAGB2_WR_TLB_CREDIT_DEFAULT 0x2f7bdef7
+#define mmDAGB2_WR_DATA_CREDIT_DEFAULT 0x60606070
+#define mmDAGB2_WR_MISC_CREDIT_DEFAULT 0x0078dc88
+#define mmDAGB2_WRCLI_ASK_PENDING_DEFAULT 0x00000000
+#define mmDAGB2_WRCLI_GO_PENDING_DEFAULT 0x00000000
+#define mmDAGB2_WRCLI_GBLSEND_PENDING_DEFAULT 0x00000000
+#define mmDAGB2_WRCLI_TLB_PENDING_DEFAULT 0x00000000
+#define mmDAGB2_WRCLI_OARB_PENDING_DEFAULT 0x00000000
+#define mmDAGB2_WRCLI_OSD_PENDING_DEFAULT 0x00000000
+#define mmDAGB2_WRCLI_DBUS_ASK_PENDING_DEFAULT 0x00000000
+#define mmDAGB2_WRCLI_DBUS_GO_PENDING_DEFAULT 0x00000000
+#define mmDAGB2_DAGB_DLY_DEFAULT 0x00000000
+#define mmDAGB2_CNTL_MISC_DEFAULT 0xcf7c1ffa
+#define mmDAGB2_CNTL_MISC2_DEFAULT 0x003c0000
+#define mmDAGB2_FIFO_EMPTY_DEFAULT 0x00ffffff
+#define mmDAGB2_FIFO_FULL_DEFAULT 0x00000000
+#define mmDAGB2_WR_CREDITS_FULL_DEFAULT 0x1fffffff
+#define mmDAGB2_RD_CREDITS_FULL_DEFAULT 0x0003ffff
+#define mmDAGB2_PERFCOUNTER_LO_DEFAULT 0x00000000
+#define mmDAGB2_PERFCOUNTER_HI_DEFAULT 0x00000000
+#define mmDAGB2_PERFCOUNTER0_CFG_DEFAULT 0x00000000
+#define mmDAGB2_PERFCOUNTER1_CFG_DEFAULT 0x00000000
+#define mmDAGB2_PERFCOUNTER2_CFG_DEFAULT 0x00000000
+#define mmDAGB2_PERFCOUNTER_RSLT_CNTL_DEFAULT 0x04000000
+#define mmDAGB2_RESERVE0_DEFAULT 0xffffffff
+#define mmDAGB2_RESERVE1_DEFAULT 0xffffffff
+#define mmDAGB2_RESERVE2_DEFAULT 0xffffffff
+#define mmDAGB2_RESERVE3_DEFAULT 0xffffffff
+#define mmDAGB2_RESERVE4_DEFAULT 0xffffffff
+#define mmDAGB2_RESERVE5_DEFAULT 0xffffffff
+#define mmDAGB2_RESERVE6_DEFAULT 0xffffffff
+#define mmDAGB2_RESERVE7_DEFAULT 0xffffffff
+#define mmDAGB2_RESERVE8_DEFAULT 0xffffffff
+#define mmDAGB2_RESERVE9_DEFAULT 0xffffffff
+#define mmDAGB2_RESERVE10_DEFAULT 0xffffffff
+#define mmDAGB2_RESERVE11_DEFAULT 0xffffffff
+#define mmDAGB2_RESERVE12_DEFAULT 0xffffffff
+#define mmDAGB2_RESERVE13_DEFAULT 0xffffffff
+
+
+// addressBlock: mmhub_dagb_dagbdec3
+#define mmDAGB3_RDCLI0_DEFAULT 0xfe5fe0f9
+#define mmDAGB3_RDCLI1_DEFAULT 0xfe5fe0f9
+#define mmDAGB3_RDCLI2_DEFAULT 0xfe5fe0f9
+#define mmDAGB3_RDCLI3_DEFAULT 0xfe5fe0f9
+#define mmDAGB3_RDCLI4_DEFAULT 0xfe5fe0f9
+#define mmDAGB3_RDCLI5_DEFAULT 0xfe5fe0f9
+#define mmDAGB3_RDCLI6_DEFAULT 0xfe5fe0f9
+#define mmDAGB3_RDCLI7_DEFAULT 0xfe5fe0f9
+#define mmDAGB3_RDCLI8_DEFAULT 0xfe5fe0f9
+#define mmDAGB3_RDCLI9_DEFAULT 0xfe5fe0f9
+#define mmDAGB3_RDCLI10_DEFAULT 0xfe5fe0f9
+#define mmDAGB3_RDCLI11_DEFAULT 0xfe5fe0f9
+#define mmDAGB3_RDCLI12_DEFAULT 0xfe5fe0f9
+#define mmDAGB3_RDCLI13_DEFAULT 0xfe5fe0f9
+#define mmDAGB3_RDCLI14_DEFAULT 0xfe5fe0f9
+#define mmDAGB3_RDCLI15_DEFAULT 0xfe5fe0f9
+#define mmDAGB3_RD_CNTL_DEFAULT 0x03527df8
+#define mmDAGB3_RD_GMI_CNTL_DEFAULT 0x00003045
+#define mmDAGB3_RD_ADDR_DAGB_DEFAULT 0x00000039
+#define mmDAGB3_RD_OUTPUT_DAGB_MAX_BURST_DEFAULT 0x88888888
+#define mmDAGB3_RD_OUTPUT_DAGB_LAZY_TIMER_DEFAULT 0x11111111
+#define mmDAGB3_RD_CGTT_CLK_CTRL_DEFAULT 0x00000100
+#define mmDAGB3_L1TLB_RD_CGTT_CLK_CTRL_DEFAULT 0x00000100
+#define mmDAGB3_ATCVM_RD_CGTT_CLK_CTRL_DEFAULT 0x00000100
+#define mmDAGB3_RD_ADDR_DAGB_MAX_BURST0_DEFAULT 0x88888888
+#define mmDAGB3_RD_ADDR_DAGB_LAZY_TIMER0_DEFAULT 0x11111111
+#define mmDAGB3_RD_ADDR_DAGB_MAX_BURST1_DEFAULT 0x88888888
+#define mmDAGB3_RD_ADDR_DAGB_LAZY_TIMER1_DEFAULT 0x11111111
+#define mmDAGB3_RD_VC0_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB3_RD_VC1_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB3_RD_VC2_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB3_RD_VC3_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB3_RD_VC4_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB3_RD_VC5_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB3_RD_VC6_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB3_RD_VC7_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB3_RD_CNTL_MISC_DEFAULT 0x69a0e408
+#define mmDAGB3_RD_TLB_CREDIT_DEFAULT 0x2f7bdef7
+#define mmDAGB3_RDCLI_ASK_PENDING_DEFAULT 0x00000000
+#define mmDAGB3_RDCLI_GO_PENDING_DEFAULT 0x00000000
+#define mmDAGB3_RDCLI_GBLSEND_PENDING_DEFAULT 0x00000000
+#define mmDAGB3_RDCLI_TLB_PENDING_DEFAULT 0x00000000
+#define mmDAGB3_RDCLI_OARB_PENDING_DEFAULT 0x00000000
+#define mmDAGB3_RDCLI_OSD_PENDING_DEFAULT 0x00000000
+#define mmDAGB3_WRCLI0_DEFAULT 0xfe5fe0f9
+#define mmDAGB3_WRCLI1_DEFAULT 0xfe5fe0f9
+#define mmDAGB3_WRCLI2_DEFAULT 0xfe5fe0f9
+#define mmDAGB3_WRCLI3_DEFAULT 0xfe5fe0f9
+#define mmDAGB3_WRCLI4_DEFAULT 0xfe5fe0f9
+#define mmDAGB3_WRCLI5_DEFAULT 0xfe5fe0f9
+#define mmDAGB3_WRCLI6_DEFAULT 0xfe5fe0f9
+#define mmDAGB3_WRCLI7_DEFAULT 0xfe5fe0f9
+#define mmDAGB3_WRCLI8_DEFAULT 0xfe5fe0f9
+#define mmDAGB3_WRCLI9_DEFAULT 0xfe5fe0f9
+#define mmDAGB3_WRCLI10_DEFAULT 0xfe5fe0f9
+#define mmDAGB3_WRCLI11_DEFAULT 0xfe5fe0f9
+#define mmDAGB3_WRCLI12_DEFAULT 0xfe5fe0f9
+#define mmDAGB3_WRCLI13_DEFAULT 0xfe5fe0f9
+#define mmDAGB3_WRCLI14_DEFAULT 0xfe5fe0f9
+#define mmDAGB3_WRCLI15_DEFAULT 0xfe5fe0f9
+#define mmDAGB3_WR_CNTL_DEFAULT 0x03527df8
+#define mmDAGB3_WR_GMI_CNTL_DEFAULT 0x00003045
+#define mmDAGB3_WR_ADDR_DAGB_DEFAULT 0x00000039
+#define mmDAGB3_WR_OUTPUT_DAGB_MAX_BURST_DEFAULT 0x88888888
+#define mmDAGB3_WR_OUTPUT_DAGB_LAZY_TIMER_DEFAULT 0x11111111
+#define mmDAGB3_WR_CGTT_CLK_CTRL_DEFAULT 0x00000100
+#define mmDAGB3_L1TLB_WR_CGTT_CLK_CTRL_DEFAULT 0x00000100
+#define mmDAGB3_ATCVM_WR_CGTT_CLK_CTRL_DEFAULT 0x00000100
+#define mmDAGB3_WR_ADDR_DAGB_MAX_BURST0_DEFAULT 0x88888888
+#define mmDAGB3_WR_ADDR_DAGB_LAZY_TIMER0_DEFAULT 0x11111111
+#define mmDAGB3_WR_ADDR_DAGB_MAX_BURST1_DEFAULT 0x88888888
+#define mmDAGB3_WR_ADDR_DAGB_LAZY_TIMER1_DEFAULT 0x11111111
+#define mmDAGB3_WR_DATA_DAGB_DEFAULT 0x00000001
+#define mmDAGB3_WR_DATA_DAGB_MAX_BURST0_DEFAULT 0x11111111
+#define mmDAGB3_WR_DATA_DAGB_LAZY_TIMER0_DEFAULT 0x00000000
+#define mmDAGB3_WR_DATA_DAGB_MAX_BURST1_DEFAULT 0x11111111
+#define mmDAGB3_WR_DATA_DAGB_LAZY_TIMER1_DEFAULT 0x00000000
+#define mmDAGB3_WR_VC0_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB3_WR_VC1_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB3_WR_VC2_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB3_WR_VC3_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB3_WR_VC4_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB3_WR_VC5_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB3_WR_VC6_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB3_WR_VC7_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB3_WR_CNTL_MISC_DEFAULT 0x69a0e408
+#define mmDAGB3_WR_TLB_CREDIT_DEFAULT 0x2f7bdef7
+#define mmDAGB3_WR_DATA_CREDIT_DEFAULT 0x60606070
+#define mmDAGB3_WR_MISC_CREDIT_DEFAULT 0x0078dc88
+#define mmDAGB3_WRCLI_ASK_PENDING_DEFAULT 0x00000000
+#define mmDAGB3_WRCLI_GO_PENDING_DEFAULT 0x00000000
+#define mmDAGB3_WRCLI_GBLSEND_PENDING_DEFAULT 0x00000000
+#define mmDAGB3_WRCLI_TLB_PENDING_DEFAULT 0x00000000
+#define mmDAGB3_WRCLI_OARB_PENDING_DEFAULT 0x00000000
+#define mmDAGB3_WRCLI_OSD_PENDING_DEFAULT 0x00000000
+#define mmDAGB3_WRCLI_DBUS_ASK_PENDING_DEFAULT 0x00000000
+#define mmDAGB3_WRCLI_DBUS_GO_PENDING_DEFAULT 0x00000000
+#define mmDAGB3_DAGB_DLY_DEFAULT 0x00000000
+#define mmDAGB3_CNTL_MISC_DEFAULT 0xcf7c1ffa
+#define mmDAGB3_CNTL_MISC2_DEFAULT 0x003c0000
+#define mmDAGB3_FIFO_EMPTY_DEFAULT 0x00ffffff
+#define mmDAGB3_FIFO_FULL_DEFAULT 0x00000000
+#define mmDAGB3_WR_CREDITS_FULL_DEFAULT 0x1fffffff
+#define mmDAGB3_RD_CREDITS_FULL_DEFAULT 0x0003ffff
+#define mmDAGB3_PERFCOUNTER_LO_DEFAULT 0x00000000
+#define mmDAGB3_PERFCOUNTER_HI_DEFAULT 0x00000000
+#define mmDAGB3_PERFCOUNTER0_CFG_DEFAULT 0x00000000
+#define mmDAGB3_PERFCOUNTER1_CFG_DEFAULT 0x00000000
+#define mmDAGB3_PERFCOUNTER2_CFG_DEFAULT 0x00000000
+#define mmDAGB3_PERFCOUNTER_RSLT_CNTL_DEFAULT 0x04000000
+#define mmDAGB3_RESERVE0_DEFAULT 0xffffffff
+#define mmDAGB3_RESERVE1_DEFAULT 0xffffffff
+#define mmDAGB3_RESERVE2_DEFAULT 0xffffffff
+#define mmDAGB3_RESERVE3_DEFAULT 0xffffffff
+#define mmDAGB3_RESERVE4_DEFAULT 0xffffffff
+#define mmDAGB3_RESERVE5_DEFAULT 0xffffffff
+#define mmDAGB3_RESERVE6_DEFAULT 0xffffffff
+#define mmDAGB3_RESERVE7_DEFAULT 0xffffffff
+#define mmDAGB3_RESERVE8_DEFAULT 0xffffffff
+#define mmDAGB3_RESERVE9_DEFAULT 0xffffffff
+#define mmDAGB3_RESERVE10_DEFAULT 0xffffffff
+#define mmDAGB3_RESERVE11_DEFAULT 0xffffffff
+#define mmDAGB3_RESERVE12_DEFAULT 0xffffffff
+#define mmDAGB3_RESERVE13_DEFAULT 0xffffffff
+
+
+// addressBlock: mmhub_dagb_dagbdec4
+#define mmDAGB4_RDCLI0_DEFAULT 0xfe5fe0f9
+#define mmDAGB4_RDCLI1_DEFAULT 0xfe5fe0f9
+#define mmDAGB4_RDCLI2_DEFAULT 0xfe5fe0f9
+#define mmDAGB4_RDCLI3_DEFAULT 0xfe5fe0f9
+#define mmDAGB4_RDCLI4_DEFAULT 0xfe5fe0f9
+#define mmDAGB4_RDCLI5_DEFAULT 0xfe5fe0f9
+#define mmDAGB4_RDCLI6_DEFAULT 0xfe5fe0f9
+#define mmDAGB4_RDCLI7_DEFAULT 0xfe5fe0f9
+#define mmDAGB4_RDCLI8_DEFAULT 0xfe5fe0f9
+#define mmDAGB4_RDCLI9_DEFAULT 0xfe5fe0f9
+#define mmDAGB4_RDCLI10_DEFAULT 0xfe5fe0f9
+#define mmDAGB4_RDCLI11_DEFAULT 0xfe5fe0f9
+#define mmDAGB4_RDCLI12_DEFAULT 0xfe5fe0f9
+#define mmDAGB4_RDCLI13_DEFAULT 0xfe5fe0f9
+#define mmDAGB4_RDCLI14_DEFAULT 0xfe5fe0f9
+#define mmDAGB4_RDCLI15_DEFAULT 0xfe5fe0f9
+#define mmDAGB4_RD_CNTL_DEFAULT 0x03527df8
+#define mmDAGB4_RD_GMI_CNTL_DEFAULT 0x00003045
+#define mmDAGB4_RD_ADDR_DAGB_DEFAULT 0x00000039
+#define mmDAGB4_RD_OUTPUT_DAGB_MAX_BURST_DEFAULT 0x88888888
+#define mmDAGB4_RD_OUTPUT_DAGB_LAZY_TIMER_DEFAULT 0x11111111
+#define mmDAGB4_RD_CGTT_CLK_CTRL_DEFAULT 0x00000100
+#define mmDAGB4_L1TLB_RD_CGTT_CLK_CTRL_DEFAULT 0x00000100
+#define mmDAGB4_ATCVM_RD_CGTT_CLK_CTRL_DEFAULT 0x00000100
+#define mmDAGB4_RD_ADDR_DAGB_MAX_BURST0_DEFAULT 0x88888888
+#define mmDAGB4_RD_ADDR_DAGB_LAZY_TIMER0_DEFAULT 0x11111111
+#define mmDAGB4_RD_ADDR_DAGB_MAX_BURST1_DEFAULT 0x88888888
+#define mmDAGB4_RD_ADDR_DAGB_LAZY_TIMER1_DEFAULT 0x11111111
+#define mmDAGB4_RD_VC0_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB4_RD_VC1_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB4_RD_VC2_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB4_RD_VC3_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB4_RD_VC4_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB4_RD_VC5_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB4_RD_VC6_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB4_RD_VC7_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB4_RD_CNTL_MISC_DEFAULT 0x69a0e408
+#define mmDAGB4_RD_TLB_CREDIT_DEFAULT 0x2f7bdef7
+#define mmDAGB4_RDCLI_ASK_PENDING_DEFAULT 0x00000000
+#define mmDAGB4_RDCLI_GO_PENDING_DEFAULT 0x00000000
+#define mmDAGB4_RDCLI_GBLSEND_PENDING_DEFAULT 0x00000000
+#define mmDAGB4_RDCLI_TLB_PENDING_DEFAULT 0x00000000
+#define mmDAGB4_RDCLI_OARB_PENDING_DEFAULT 0x00000000
+#define mmDAGB4_RDCLI_OSD_PENDING_DEFAULT 0x00000000
+#define mmDAGB4_WRCLI0_DEFAULT 0xfe5fe0f9
+#define mmDAGB4_WRCLI1_DEFAULT 0xfe5fe0f9
+#define mmDAGB4_WRCLI2_DEFAULT 0xfe5fe0f9
+#define mmDAGB4_WRCLI3_DEFAULT 0xfe5fe0f9
+#define mmDAGB4_WRCLI4_DEFAULT 0xfe5fe0f9
+#define mmDAGB4_WRCLI5_DEFAULT 0xfe5fe0f9
+#define mmDAGB4_WRCLI6_DEFAULT 0xfe5fe0f9
+#define mmDAGB4_WRCLI7_DEFAULT 0xfe5fe0f9
+#define mmDAGB4_WRCLI8_DEFAULT 0xfe5fe0f9
+#define mmDAGB4_WRCLI9_DEFAULT 0xfe5fe0f9
+#define mmDAGB4_WRCLI10_DEFAULT 0xfe5fe0f9
+#define mmDAGB4_WRCLI11_DEFAULT 0xfe5fe0f9
+#define mmDAGB4_WRCLI12_DEFAULT 0xfe5fe0f9
+#define mmDAGB4_WRCLI13_DEFAULT 0xfe5fe0f9
+#define mmDAGB4_WRCLI14_DEFAULT 0xfe5fe0f9
+#define mmDAGB4_WRCLI15_DEFAULT 0xfe5fe0f9
+#define mmDAGB4_WR_CNTL_DEFAULT 0x03527df8
+#define mmDAGB4_WR_GMI_CNTL_DEFAULT 0x00003045
+#define mmDAGB4_WR_ADDR_DAGB_DEFAULT 0x00000039
+#define mmDAGB4_WR_OUTPUT_DAGB_MAX_BURST_DEFAULT 0x88888888
+#define mmDAGB4_WR_OUTPUT_DAGB_LAZY_TIMER_DEFAULT 0x11111111
+#define mmDAGB4_WR_CGTT_CLK_CTRL_DEFAULT 0x00000100
+#define mmDAGB4_L1TLB_WR_CGTT_CLK_CTRL_DEFAULT 0x00000100
+#define mmDAGB4_ATCVM_WR_CGTT_CLK_CTRL_DEFAULT 0x00000100
+#define mmDAGB4_WR_ADDR_DAGB_MAX_BURST0_DEFAULT 0x88888888
+#define mmDAGB4_WR_ADDR_DAGB_LAZY_TIMER0_DEFAULT 0x11111111
+#define mmDAGB4_WR_ADDR_DAGB_MAX_BURST1_DEFAULT 0x88888888
+#define mmDAGB4_WR_ADDR_DAGB_LAZY_TIMER1_DEFAULT 0x11111111
+#define mmDAGB4_WR_DATA_DAGB_DEFAULT 0x00000001
+#define mmDAGB4_WR_DATA_DAGB_MAX_BURST0_DEFAULT 0x11111111
+#define mmDAGB4_WR_DATA_DAGB_LAZY_TIMER0_DEFAULT 0x00000000
+#define mmDAGB4_WR_DATA_DAGB_MAX_BURST1_DEFAULT 0x11111111
+#define mmDAGB4_WR_DATA_DAGB_LAZY_TIMER1_DEFAULT 0x00000000
+#define mmDAGB4_WR_VC0_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB4_WR_VC1_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB4_WR_VC2_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB4_WR_VC3_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB4_WR_VC4_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB4_WR_VC5_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB4_WR_VC6_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB4_WR_VC7_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB4_WR_CNTL_MISC_DEFAULT 0x69a0e408
+#define mmDAGB4_WR_TLB_CREDIT_DEFAULT 0x2f7bdef7
+#define mmDAGB4_WR_DATA_CREDIT_DEFAULT 0x60606070
+#define mmDAGB4_WR_MISC_CREDIT_DEFAULT 0x0078dc88
+#define mmDAGB4_WRCLI_ASK_PENDING_DEFAULT 0x00000000
+#define mmDAGB4_WRCLI_GO_PENDING_DEFAULT 0x00000000
+#define mmDAGB4_WRCLI_GBLSEND_PENDING_DEFAULT 0x00000000
+#define mmDAGB4_WRCLI_TLB_PENDING_DEFAULT 0x00000000
+#define mmDAGB4_WRCLI_OARB_PENDING_DEFAULT 0x00000000
+#define mmDAGB4_WRCLI_OSD_PENDING_DEFAULT 0x00000000
+#define mmDAGB4_WRCLI_DBUS_ASK_PENDING_DEFAULT 0x00000000
+#define mmDAGB4_WRCLI_DBUS_GO_PENDING_DEFAULT 0x00000000
+#define mmDAGB4_DAGB_DLY_DEFAULT 0x00000000
+#define mmDAGB4_CNTL_MISC_DEFAULT 0xcf7c1ffa
+#define mmDAGB4_CNTL_MISC2_DEFAULT 0x003c0000
+#define mmDAGB4_FIFO_EMPTY_DEFAULT 0x00ffffff
+#define mmDAGB4_FIFO_FULL_DEFAULT 0x00000000
+#define mmDAGB4_WR_CREDITS_FULL_DEFAULT 0x1fffffff
+#define mmDAGB4_RD_CREDITS_FULL_DEFAULT 0x0003ffff
+#define mmDAGB4_PERFCOUNTER_LO_DEFAULT 0x00000000
+#define mmDAGB4_PERFCOUNTER_HI_DEFAULT 0x00000000
+#define mmDAGB4_PERFCOUNTER0_CFG_DEFAULT 0x00000000
+#define mmDAGB4_PERFCOUNTER1_CFG_DEFAULT 0x00000000
+#define mmDAGB4_PERFCOUNTER2_CFG_DEFAULT 0x00000000
+#define mmDAGB4_PERFCOUNTER_RSLT_CNTL_DEFAULT 0x04000000
+#define mmDAGB4_RESERVE0_DEFAULT 0xffffffff
+#define mmDAGB4_RESERVE1_DEFAULT 0xffffffff
+#define mmDAGB4_RESERVE2_DEFAULT 0xffffffff
+#define mmDAGB4_RESERVE3_DEFAULT 0xffffffff
+#define mmDAGB4_RESERVE4_DEFAULT 0xffffffff
+#define mmDAGB4_RESERVE5_DEFAULT 0xffffffff
+#define mmDAGB4_RESERVE6_DEFAULT 0xffffffff
+#define mmDAGB4_RESERVE7_DEFAULT 0xffffffff
+#define mmDAGB4_RESERVE8_DEFAULT 0xffffffff
+#define mmDAGB4_RESERVE9_DEFAULT 0xffffffff
+#define mmDAGB4_RESERVE10_DEFAULT 0xffffffff
+#define mmDAGB4_RESERVE11_DEFAULT 0xffffffff
+#define mmDAGB4_RESERVE12_DEFAULT 0xffffffff
+#define mmDAGB4_RESERVE13_DEFAULT 0xffffffff
+
+
+// addressBlock: mmhub_ea_mmeadec0
+#define mmMMEA0_DRAM_RD_CLI2GRP_MAP0_DEFAULT 0x55555555
+#define mmMMEA0_DRAM_RD_CLI2GRP_MAP1_DEFAULT 0x55555555
+#define mmMMEA0_DRAM_WR_CLI2GRP_MAP0_DEFAULT 0x55555555
+#define mmMMEA0_DRAM_WR_CLI2GRP_MAP1_DEFAULT 0x55555555
+#define mmMMEA0_DRAM_RD_GRP2VC_MAP_DEFAULT 0x00000e25
+#define mmMMEA0_DRAM_WR_GRP2VC_MAP_DEFAULT 0x00000e25
+#define mmMMEA0_DRAM_RD_LAZY_DEFAULT 0x78000924
+#define mmMMEA0_DRAM_WR_LAZY_DEFAULT 0x78000924
+#define mmMMEA0_DRAM_RD_CAM_CNTL_DEFAULT 0x16db4444
+#define mmMMEA0_DRAM_WR_CAM_CNTL_DEFAULT 0x16db4444
+#define mmMMEA0_DRAM_PAGE_BURST_DEFAULT 0x20002000
+#define mmMMEA0_DRAM_RD_PRI_AGE_DEFAULT 0x00db6249
+#define mmMMEA0_DRAM_WR_PRI_AGE_DEFAULT 0x00db6249
+#define mmMMEA0_DRAM_RD_PRI_QUEUING_DEFAULT 0x00000db6
+#define mmMMEA0_DRAM_WR_PRI_QUEUING_DEFAULT 0x00000db6
+#define mmMMEA0_DRAM_RD_PRI_FIXED_DEFAULT 0x00000924
+#define mmMMEA0_DRAM_WR_PRI_FIXED_DEFAULT 0x00000924
+#define mmMMEA0_DRAM_RD_PRI_URGENCY_DEFAULT 0x0000fdb6
+#define mmMMEA0_DRAM_WR_PRI_URGENCY_DEFAULT 0x0000fdb6
+#define mmMMEA0_DRAM_RD_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f
+#define mmMMEA0_DRAM_RD_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f
+#define mmMMEA0_DRAM_RD_PRI_QUANT_PRI3_DEFAULT 0xffffffff
+#define mmMMEA0_DRAM_WR_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f
+#define mmMMEA0_DRAM_WR_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f
+#define mmMMEA0_DRAM_WR_PRI_QUANT_PRI3_DEFAULT 0xffffffff
+#define mmMMEA0_GMI_RD_CLI2GRP_MAP0_DEFAULT 0x00000000
+#define mmMMEA0_GMI_RD_CLI2GRP_MAP1_DEFAULT 0x00000000
+#define mmMMEA0_GMI_WR_CLI2GRP_MAP0_DEFAULT 0x00000000
+#define mmMMEA0_GMI_WR_CLI2GRP_MAP1_DEFAULT 0x00000000
+#define mmMMEA0_GMI_RD_GRP2VC_MAP_DEFAULT 0x00000fff
+#define mmMMEA0_GMI_WR_GRP2VC_MAP_DEFAULT 0x00000fff
+#define mmMMEA0_GMI_RD_LAZY_DEFAULT 0x78000924
+#define mmMMEA0_GMI_WR_LAZY_DEFAULT 0x78000924
+#define mmMMEA0_GMI_RD_CAM_CNTL_DEFAULT 0x16db4444
+#define mmMMEA0_GMI_WR_CAM_CNTL_DEFAULT 0x16db4444
+#define mmMMEA0_GMI_PAGE_BURST_DEFAULT 0x20002000
+#define mmMMEA0_GMI_RD_PRI_AGE_DEFAULT 0x00db6249
+#define mmMMEA0_GMI_WR_PRI_AGE_DEFAULT 0x00db6249
+#define mmMMEA0_GMI_RD_PRI_QUEUING_DEFAULT 0x00000db6
+#define mmMMEA0_GMI_WR_PRI_QUEUING_DEFAULT 0x00000db6
+#define mmMMEA0_GMI_RD_PRI_FIXED_DEFAULT 0x00000924
+#define mmMMEA0_GMI_WR_PRI_FIXED_DEFAULT 0x00000924
+#define mmMMEA0_GMI_RD_PRI_URGENCY_DEFAULT 0x0000fdb6
+#define mmMMEA0_GMI_WR_PRI_URGENCY_DEFAULT 0x0000fdb6
+#define mmMMEA0_GMI_RD_PRI_URGENCY_MASKING_DEFAULT 0xffffffff
+#define mmMMEA0_GMI_WR_PRI_URGENCY_MASKING_DEFAULT 0xffffffff
+#define mmMMEA0_GMI_RD_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f
+#define mmMMEA0_GMI_RD_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f
+#define mmMMEA0_GMI_RD_PRI_QUANT_PRI3_DEFAULT 0xffffffff
+#define mmMMEA0_GMI_WR_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f
+#define mmMMEA0_GMI_WR_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f
+#define mmMMEA0_GMI_WR_PRI_QUANT_PRI3_DEFAULT 0xffffffff
+#define mmMMEA0_ADDRNORM_BASE_ADDR0_DEFAULT 0x00000000
+#define mmMMEA0_ADDRNORM_LIMIT_ADDR0_DEFAULT 0x00000000
+#define mmMMEA0_ADDRNORM_BASE_ADDR1_DEFAULT 0x00000000
+#define mmMMEA0_ADDRNORM_LIMIT_ADDR1_DEFAULT 0x00000000
+#define mmMMEA0_ADDRNORM_OFFSET_ADDR1_DEFAULT 0x00000000
+#define mmMMEA0_ADDRNORM_BASE_ADDR2_DEFAULT 0x00000000
+#define mmMMEA0_ADDRNORM_LIMIT_ADDR2_DEFAULT 0x00000000
+#define mmMMEA0_ADDRNORM_BASE_ADDR3_DEFAULT 0x00000000
+#define mmMMEA0_ADDRNORM_LIMIT_ADDR3_DEFAULT 0x00000000
+#define mmMMEA0_ADDRNORM_OFFSET_ADDR3_DEFAULT 0x00000000
+#define mmMMEA0_ADDRNORM_BASE_ADDR4_DEFAULT 0x00000000
+#define mmMMEA0_ADDRNORM_LIMIT_ADDR4_DEFAULT 0x00000000
+#define mmMMEA0_ADDRNORM_BASE_ADDR5_DEFAULT 0x00000000
+#define mmMMEA0_ADDRNORM_LIMIT_ADDR5_DEFAULT 0x00000000
+#define mmMMEA0_ADDRNORM_OFFSET_ADDR5_DEFAULT 0x00000000
+#define mmMMEA0_ADDRNORMDRAM_HOLE_CNTL_DEFAULT 0x00000000
+#define mmMMEA0_ADDRNORMGMI_HOLE_CNTL_DEFAULT 0x00000000
+#define mmMMEA0_ADDRNORMDRAM_NP2_CHANNEL_CFG_DEFAULT 0x00000000
+#define mmMMEA0_ADDRNORMGMI_NP2_CHANNEL_CFG_DEFAULT 0x00000000
+#define mmMMEA0_ADDRDEC_BANK_CFG_DEFAULT 0x000003cf
+#define mmMMEA0_ADDRDEC_MISC_CFG_DEFAULT 0xfffff000
+#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_BANK0_DEFAULT 0x00000000
+#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_BANK1_DEFAULT 0x00000000
+#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_BANK2_DEFAULT 0x00000000
+#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_BANK3_DEFAULT 0x00000000
+#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_BANK4_DEFAULT 0x00000000
+#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_BANK5_DEFAULT 0x00000000
+#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_PC_DEFAULT 0x00000000
+#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_PC2_DEFAULT 0x00000000
+#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_CS0_DEFAULT 0x00000000
+#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_CS1_DEFAULT 0x00000000
+#define mmMMEA0_ADDRDECDRAM_HARVEST_ENABLE_DEFAULT 0x00000000
+#define mmMMEA0_ADDRDECGMI_ADDR_HASH_BANK0_DEFAULT 0x00000000
+#define mmMMEA0_ADDRDECGMI_ADDR_HASH_BANK1_DEFAULT 0x00000000
+#define mmMMEA0_ADDRDECGMI_ADDR_HASH_BANK2_DEFAULT 0x00000000
+#define mmMMEA0_ADDRDECGMI_ADDR_HASH_BANK3_DEFAULT 0x00000000
+#define mmMMEA0_ADDRDECGMI_ADDR_HASH_BANK4_DEFAULT 0x00000000
+#define mmMMEA0_ADDRDECGMI_ADDR_HASH_BANK5_DEFAULT 0x00000000
+#define mmMMEA0_ADDRDECGMI_ADDR_HASH_PC_DEFAULT 0x00000000
+#define mmMMEA0_ADDRDECGMI_ADDR_HASH_PC2_DEFAULT 0x00000000
+#define mmMMEA0_ADDRDECGMI_ADDR_HASH_CS0_DEFAULT 0x00000000
+#define mmMMEA0_ADDRDECGMI_ADDR_HASH_CS1_DEFAULT 0x00000000
+#define mmMMEA0_ADDRDECGMI_HARVEST_ENABLE_DEFAULT 0x00000000
+#define mmMMEA0_ADDRDEC0_BASE_ADDR_CS0_DEFAULT 0x00000000
+#define mmMMEA0_ADDRDEC0_BASE_ADDR_CS1_DEFAULT 0x00000000
+#define mmMMEA0_ADDRDEC0_BASE_ADDR_CS2_DEFAULT 0x00000000
+#define mmMMEA0_ADDRDEC0_BASE_ADDR_CS3_DEFAULT 0x00000000
+#define mmMMEA0_ADDRDEC0_BASE_ADDR_SECCS0_DEFAULT 0x00000000
+#define mmMMEA0_ADDRDEC0_BASE_ADDR_SECCS1_DEFAULT 0x00000000
+#define mmMMEA0_ADDRDEC0_BASE_ADDR_SECCS2_DEFAULT 0x00000000
+#define mmMMEA0_ADDRDEC0_BASE_ADDR_SECCS3_DEFAULT 0x00000000
+#define mmMMEA0_ADDRDEC0_ADDR_MASK_CS01_DEFAULT 0xfffffffe
+#define mmMMEA0_ADDRDEC0_ADDR_MASK_CS23_DEFAULT 0xfffffffe
+#define mmMMEA0_ADDRDEC0_ADDR_MASK_SECCS01_DEFAULT 0xfffffffe
+#define mmMMEA0_ADDRDEC0_ADDR_MASK_SECCS23_DEFAULT 0xfffffffe
+#define mmMMEA0_ADDRDEC0_ADDR_CFG_CS01_DEFAULT 0x00050408
+#define mmMMEA0_ADDRDEC0_ADDR_CFG_CS23_DEFAULT 0x00050408
+#define mmMMEA0_ADDRDEC0_ADDR_SEL_CS01_DEFAULT 0x04076543
+#define mmMMEA0_ADDRDEC0_ADDR_SEL_CS23_DEFAULT 0x04076543
+#define mmMMEA0_ADDRDEC0_ADDR_SEL2_CS01_DEFAULT 0x00000008
+#define mmMMEA0_ADDRDEC0_ADDR_SEL2_CS23_DEFAULT 0x00000008
+#define mmMMEA0_ADDRDEC0_COL_SEL_LO_CS01_DEFAULT 0x87654321
+#define mmMMEA0_ADDRDEC0_COL_SEL_LO_CS23_DEFAULT 0x87654321
+#define mmMMEA0_ADDRDEC0_COL_SEL_HI_CS01_DEFAULT 0xa9876543
+#define mmMMEA0_ADDRDEC0_COL_SEL_HI_CS23_DEFAULT 0xa9876543
+#define mmMMEA0_ADDRDEC0_RM_SEL_CS01_DEFAULT 0x00000000
+#define mmMMEA0_ADDRDEC0_RM_SEL_CS23_DEFAULT 0x00000000
+#define mmMMEA0_ADDRDEC0_RM_SEL_SECCS01_DEFAULT 0x00000000
+#define mmMMEA0_ADDRDEC0_RM_SEL_SECCS23_DEFAULT 0x00000000
+#define mmMMEA0_ADDRDEC1_BASE_ADDR_CS0_DEFAULT 0x00000000
+#define mmMMEA0_ADDRDEC1_BASE_ADDR_CS1_DEFAULT 0x00000000
+#define mmMMEA0_ADDRDEC1_BASE_ADDR_CS2_DEFAULT 0x00000000
+#define mmMMEA0_ADDRDEC1_BASE_ADDR_CS3_DEFAULT 0x00000000
+#define mmMMEA0_ADDRDEC1_BASE_ADDR_SECCS0_DEFAULT 0x00000000
+#define mmMMEA0_ADDRDEC1_BASE_ADDR_SECCS1_DEFAULT 0x00000000
+#define mmMMEA0_ADDRDEC1_BASE_ADDR_SECCS2_DEFAULT 0x00000000
+#define mmMMEA0_ADDRDEC1_BASE_ADDR_SECCS3_DEFAULT 0x00000000
+#define mmMMEA0_ADDRDEC1_ADDR_MASK_CS01_DEFAULT 0xfffffffe
+#define mmMMEA0_ADDRDEC1_ADDR_MASK_CS23_DEFAULT 0xfffffffe
+#define mmMMEA0_ADDRDEC1_ADDR_MASK_SECCS01_DEFAULT 0xfffffffe
+#define mmMMEA0_ADDRDEC1_ADDR_MASK_SECCS23_DEFAULT 0xfffffffe
+#define mmMMEA0_ADDRDEC1_ADDR_CFG_CS01_DEFAULT 0x00050408
+#define mmMMEA0_ADDRDEC1_ADDR_CFG_CS23_DEFAULT 0x00050408
+#define mmMMEA0_ADDRDEC1_ADDR_SEL_CS01_DEFAULT 0x04076543
+#define mmMMEA0_ADDRDEC1_ADDR_SEL_CS23_DEFAULT 0x04076543
+#define mmMMEA0_ADDRDEC1_ADDR_SEL2_CS01_DEFAULT 0x00000008
+#define mmMMEA0_ADDRDEC1_ADDR_SEL2_CS23_DEFAULT 0x00000008
+#define mmMMEA0_ADDRDEC1_COL_SEL_LO_CS01_DEFAULT 0x87654321
+#define mmMMEA0_ADDRDEC1_COL_SEL_LO_CS23_DEFAULT 0x87654321
+#define mmMMEA0_ADDRDEC1_COL_SEL_HI_CS01_DEFAULT 0xa9876543
+#define mmMMEA0_ADDRDEC1_COL_SEL_HI_CS23_DEFAULT 0xa9876543
+#define mmMMEA0_ADDRDEC1_RM_SEL_CS01_DEFAULT 0x00000000
+#define mmMMEA0_ADDRDEC1_RM_SEL_CS23_DEFAULT 0x00000000
+#define mmMMEA0_ADDRDEC1_RM_SEL_SECCS01_DEFAULT 0x00000000
+#define mmMMEA0_ADDRDEC1_RM_SEL_SECCS23_DEFAULT 0x00000000
+#define mmMMEA0_ADDRDEC2_BASE_ADDR_CS0_DEFAULT 0x00000000
+#define mmMMEA0_ADDRDEC2_BASE_ADDR_CS1_DEFAULT 0x00000000
+#define mmMMEA0_ADDRDEC2_BASE_ADDR_CS2_DEFAULT 0x00000000
+#define mmMMEA0_ADDRDEC2_BASE_ADDR_CS3_DEFAULT 0x00000000
+#define mmMMEA0_ADDRDEC2_BASE_ADDR_SECCS0_DEFAULT 0x00000000
+#define mmMMEA0_ADDRDEC2_BASE_ADDR_SECCS1_DEFAULT 0x00000000
+#define mmMMEA0_ADDRDEC2_BASE_ADDR_SECCS2_DEFAULT 0x00000000
+#define mmMMEA0_ADDRDEC2_BASE_ADDR_SECCS3_DEFAULT 0x00000000
+#define mmMMEA0_ADDRDEC2_ADDR_MASK_CS01_DEFAULT 0xfffffffe
+#define mmMMEA0_ADDRDEC2_ADDR_MASK_CS23_DEFAULT 0xfffffffe
+#define mmMMEA0_ADDRDEC2_ADDR_MASK_SECCS01_DEFAULT 0xfffffffe
+#define mmMMEA0_ADDRDEC2_ADDR_MASK_SECCS23_DEFAULT 0xfffffffe
+#define mmMMEA0_ADDRDEC2_ADDR_CFG_CS01_DEFAULT 0x00050408
+#define mmMMEA0_ADDRDEC2_ADDR_CFG_CS23_DEFAULT 0x00050408
+#define mmMMEA0_ADDRDEC2_ADDR_SEL_CS01_DEFAULT 0x04076543
+#define mmMMEA0_ADDRDEC2_ADDR_SEL_CS23_DEFAULT 0x04076543
+#define mmMMEA0_ADDRDEC2_ADDR_SEL2_CS01_DEFAULT 0x00000008
+#define mmMMEA0_ADDRDEC2_ADDR_SEL2_CS23_DEFAULT 0x00000008
+#define mmMMEA0_ADDRDEC2_COL_SEL_LO_CS01_DEFAULT 0x87654321
+#define mmMMEA0_ADDRDEC2_COL_SEL_LO_CS23_DEFAULT 0x87654321
+#define mmMMEA0_ADDRDEC2_COL_SEL_HI_CS01_DEFAULT 0xa9876543
+#define mmMMEA0_ADDRDEC2_COL_SEL_HI_CS23_DEFAULT 0xa9876543
+#define mmMMEA0_ADDRDEC2_RM_SEL_CS01_DEFAULT 0x00000000
+#define mmMMEA0_ADDRDEC2_RM_SEL_CS23_DEFAULT 0x00000000
+#define mmMMEA0_ADDRDEC2_RM_SEL_SECCS01_DEFAULT 0x00000000
+#define mmMMEA0_ADDRDEC2_RM_SEL_SECCS23_DEFAULT 0x00000000
+#define mmMMEA0_ADDRNORMDRAM_GLOBAL_CNTL_DEFAULT 0x00600000
+#define mmMMEA0_ADDRNORMGMI_GLOBAL_CNTL_DEFAULT 0x00600000
+#define mmMMEA0_IO_RD_CLI2GRP_MAP0_DEFAULT 0xe4e4e4e4
+#define mmMMEA0_IO_RD_CLI2GRP_MAP1_DEFAULT 0xe4e4e4e4
+#define mmMMEA0_IO_WR_CLI2GRP_MAP0_DEFAULT 0xe4e4e4e4
+#define mmMMEA0_IO_WR_CLI2GRP_MAP1_DEFAULT 0xe4e4e4e4
+#define mmMMEA0_IO_RD_COMBINE_FLUSH_DEFAULT 0x00007777
+#define mmMMEA0_IO_WR_COMBINE_FLUSH_DEFAULT 0x00007777
+#define mmMMEA0_IO_GROUP_BURST_DEFAULT 0x1f031f03
+#define mmMMEA0_IO_RD_PRI_AGE_DEFAULT 0x00db6249
+#define mmMMEA0_IO_WR_PRI_AGE_DEFAULT 0x00db6249
+#define mmMMEA0_IO_RD_PRI_QUEUING_DEFAULT 0x00000db6
+#define mmMMEA0_IO_WR_PRI_QUEUING_DEFAULT 0x00000db6
+#define mmMMEA0_IO_RD_PRI_FIXED_DEFAULT 0x00000924
+#define mmMMEA0_IO_WR_PRI_FIXED_DEFAULT 0x00000924
+#define mmMMEA0_IO_RD_PRI_URGENCY_DEFAULT 0x00000492
+#define mmMMEA0_IO_WR_PRI_URGENCY_DEFAULT 0x00000492
+#define mmMMEA0_IO_RD_PRI_URGENCY_MASKING_DEFAULT 0xffffffff
+#define mmMMEA0_IO_WR_PRI_URGENCY_MASKING_DEFAULT 0xffffffff
+#define mmMMEA0_IO_RD_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f
+#define mmMMEA0_IO_RD_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f
+#define mmMMEA0_IO_RD_PRI_QUANT_PRI3_DEFAULT 0xffffffff
+#define mmMMEA0_IO_WR_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f
+#define mmMMEA0_IO_WR_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f
+#define mmMMEA0_IO_WR_PRI_QUANT_PRI3_DEFAULT 0xffffffff
+#define mmMMEA0_SDP_ARB_DRAM_DEFAULT 0x00101e40
+#define mmMMEA0_SDP_ARB_GMI_DEFAULT 0x00101e40
+#define mmMMEA0_SDP_ARB_FINAL_DEFAULT 0x00007fff
+#define mmMMEA0_SDP_DRAM_PRIORITY_DEFAULT 0x00000000
+#define mmMMEA0_SDP_GMI_PRIORITY_DEFAULT 0x00000000
+#define mmMMEA0_SDP_IO_PRIORITY_DEFAULT 0x00000000
+#define mmMMEA0_SDP_CREDITS_DEFAULT 0x000101bf
+#define mmMMEA0_SDP_TAG_RESERVE0_DEFAULT 0x00000000
+#define mmMMEA0_SDP_TAG_RESERVE1_DEFAULT 0x00000000
+#define mmMMEA0_SDP_VCC_RESERVE0_DEFAULT 0x00000000
+#define mmMMEA0_SDP_VCC_RESERVE1_DEFAULT 0x00000000
+#define mmMMEA0_SDP_VCD_RESERVE0_DEFAULT 0x00000000
+#define mmMMEA0_SDP_VCD_RESERVE1_DEFAULT 0x00000000
+#define mmMMEA0_SDP_REQ_CNTL_DEFAULT 0x0000001f
+#define mmMMEA0_MISC_DEFAULT 0x0c00a070
+#define mmMMEA0_LATENCY_SAMPLING_DEFAULT 0x00000000
+#define mmMMEA0_PERFCOUNTER_LO_DEFAULT 0x00000000
+#define mmMMEA0_PERFCOUNTER_HI_DEFAULT 0x00000000
+#define mmMMEA0_PERFCOUNTER0_CFG_DEFAULT 0x00000000
+#define mmMMEA0_PERFCOUNTER1_CFG_DEFAULT 0x00000000
+#define mmMMEA0_PERFCOUNTER_RSLT_CNTL_DEFAULT 0x04000000
+#define mmMMEA0_EDC_CNT_DEFAULT 0x00000000
+#define mmMMEA0_EDC_CNT2_DEFAULT 0x00000000
+#define mmMMEA0_DSM_CNTL_DEFAULT 0x00000000
+#define mmMMEA0_DSM_CNTLA_DEFAULT 0x00000000
+#define mmMMEA0_DSM_CNTLB_DEFAULT 0x00000000
+#define mmMMEA0_DSM_CNTL2_DEFAULT 0x00000000
+#define mmMMEA0_DSM_CNTL2A_DEFAULT 0x00000000
+#define mmMMEA0_DSM_CNTL2B_DEFAULT 0x00000000
+#define mmMMEA0_CGTT_CLK_CTRL_DEFAULT 0x00000100
+#define mmMMEA0_EDC_MODE_DEFAULT 0x00000000
+#define mmMMEA0_ERR_STATUS_DEFAULT 0x00000300
+#define mmMMEA0_MISC2_DEFAULT 0x00000000
+#define mmMMEA0_ADDRDEC_SELECT_DEFAULT 0x00000000
+#define mmMMEA0_EDC_CNT3_DEFAULT 0x00000000
+
+
+// addressBlock: mmhub_ea_mmeadec1
+#define mmMMEA1_DRAM_RD_CLI2GRP_MAP0_DEFAULT 0x55555555
+#define mmMMEA1_DRAM_RD_CLI2GRP_MAP1_DEFAULT 0x55555555
+#define mmMMEA1_DRAM_WR_CLI2GRP_MAP0_DEFAULT 0x55555555
+#define mmMMEA1_DRAM_WR_CLI2GRP_MAP1_DEFAULT 0x55555555
+#define mmMMEA1_DRAM_RD_GRP2VC_MAP_DEFAULT 0x00000e25
+#define mmMMEA1_DRAM_WR_GRP2VC_MAP_DEFAULT 0x00000e25
+#define mmMMEA1_DRAM_RD_LAZY_DEFAULT 0x78000924
+#define mmMMEA1_DRAM_WR_LAZY_DEFAULT 0x78000924
+#define mmMMEA1_DRAM_RD_CAM_CNTL_DEFAULT 0x16db4444
+#define mmMMEA1_DRAM_WR_CAM_CNTL_DEFAULT 0x16db4444
+#define mmMMEA1_DRAM_PAGE_BURST_DEFAULT 0x20002000
+#define mmMMEA1_DRAM_RD_PRI_AGE_DEFAULT 0x00db6249
+#define mmMMEA1_DRAM_WR_PRI_AGE_DEFAULT 0x00db6249
+#define mmMMEA1_DRAM_RD_PRI_QUEUING_DEFAULT 0x00000db6
+#define mmMMEA1_DRAM_WR_PRI_QUEUING_DEFAULT 0x00000db6
+#define mmMMEA1_DRAM_RD_PRI_FIXED_DEFAULT 0x00000924
+#define mmMMEA1_DRAM_WR_PRI_FIXED_DEFAULT 0x00000924
+#define mmMMEA1_DRAM_RD_PRI_URGENCY_DEFAULT 0x0000fdb6
+#define mmMMEA1_DRAM_WR_PRI_URGENCY_DEFAULT 0x0000fdb6
+#define mmMMEA1_DRAM_RD_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f
+#define mmMMEA1_DRAM_RD_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f
+#define mmMMEA1_DRAM_RD_PRI_QUANT_PRI3_DEFAULT 0xffffffff
+#define mmMMEA1_DRAM_WR_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f
+#define mmMMEA1_DRAM_WR_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f
+#define mmMMEA1_DRAM_WR_PRI_QUANT_PRI3_DEFAULT 0xffffffff
+#define mmMMEA1_GMI_RD_CLI2GRP_MAP0_DEFAULT 0x00000000
+#define mmMMEA1_GMI_RD_CLI2GRP_MAP1_DEFAULT 0x00000000
+#define mmMMEA1_GMI_WR_CLI2GRP_MAP0_DEFAULT 0x00000000
+#define mmMMEA1_GMI_WR_CLI2GRP_MAP1_DEFAULT 0x00000000
+#define mmMMEA1_GMI_RD_GRP2VC_MAP_DEFAULT 0x00000fff
+#define mmMMEA1_GMI_WR_GRP2VC_MAP_DEFAULT 0x00000fff
+#define mmMMEA1_GMI_RD_LAZY_DEFAULT 0x78000924
+#define mmMMEA1_GMI_WR_LAZY_DEFAULT 0x78000924
+#define mmMMEA1_GMI_RD_CAM_CNTL_DEFAULT 0x16db4444
+#define mmMMEA1_GMI_WR_CAM_CNTL_DEFAULT 0x16db4444
+#define mmMMEA1_GMI_PAGE_BURST_DEFAULT 0x20002000
+#define mmMMEA1_GMI_RD_PRI_AGE_DEFAULT 0x00db6249
+#define mmMMEA1_GMI_WR_PRI_AGE_DEFAULT 0x00db6249
+#define mmMMEA1_GMI_RD_PRI_QUEUING_DEFAULT 0x00000db6
+#define mmMMEA1_GMI_WR_PRI_QUEUING_DEFAULT 0x00000db6
+#define mmMMEA1_GMI_RD_PRI_FIXED_DEFAULT 0x00000924
+#define mmMMEA1_GMI_WR_PRI_FIXED_DEFAULT 0x00000924
+#define mmMMEA1_GMI_RD_PRI_URGENCY_DEFAULT 0x0000fdb6
+#define mmMMEA1_GMI_WR_PRI_URGENCY_DEFAULT 0x0000fdb6
+#define mmMMEA1_GMI_RD_PRI_URGENCY_MASKING_DEFAULT 0xffffffff
+#define mmMMEA1_GMI_WR_PRI_URGENCY_MASKING_DEFAULT 0xffffffff
+#define mmMMEA1_GMI_RD_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f
+#define mmMMEA1_GMI_RD_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f
+#define mmMMEA1_GMI_RD_PRI_QUANT_PRI3_DEFAULT 0xffffffff
+#define mmMMEA1_GMI_WR_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f
+#define mmMMEA1_GMI_WR_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f
+#define mmMMEA1_GMI_WR_PRI_QUANT_PRI3_DEFAULT 0xffffffff
+#define mmMMEA1_ADDRNORM_BASE_ADDR0_DEFAULT 0x00000000
+#define mmMMEA1_ADDRNORM_LIMIT_ADDR0_DEFAULT 0x00000000
+#define mmMMEA1_ADDRNORM_BASE_ADDR1_DEFAULT 0x00000000
+#define mmMMEA1_ADDRNORM_LIMIT_ADDR1_DEFAULT 0x00000000
+#define mmMMEA1_ADDRNORM_OFFSET_ADDR1_DEFAULT 0x00000000
+#define mmMMEA1_ADDRNORM_BASE_ADDR2_DEFAULT 0x00000000
+#define mmMMEA1_ADDRNORM_LIMIT_ADDR2_DEFAULT 0x00000000
+#define mmMMEA1_ADDRNORM_BASE_ADDR3_DEFAULT 0x00000000
+#define mmMMEA1_ADDRNORM_LIMIT_ADDR3_DEFAULT 0x00000000
+#define mmMMEA1_ADDRNORM_OFFSET_ADDR3_DEFAULT 0x00000000
+#define mmMMEA1_ADDRNORM_BASE_ADDR4_DEFAULT 0x00000000
+#define mmMMEA1_ADDRNORM_LIMIT_ADDR4_DEFAULT 0x00000000
+#define mmMMEA1_ADDRNORM_BASE_ADDR5_DEFAULT 0x00000000
+#define mmMMEA1_ADDRNORM_LIMIT_ADDR5_DEFAULT 0x00000000
+#define mmMMEA1_ADDRNORM_OFFSET_ADDR5_DEFAULT 0x00000000
+#define mmMMEA1_ADDRNORMDRAM_HOLE_CNTL_DEFAULT 0x00000000
+#define mmMMEA1_ADDRNORMGMI_HOLE_CNTL_DEFAULT 0x00000000
+#define mmMMEA1_ADDRNORMDRAM_NP2_CHANNEL_CFG_DEFAULT 0x00000000
+#define mmMMEA1_ADDRNORMGMI_NP2_CHANNEL_CFG_DEFAULT 0x00000000
+#define mmMMEA1_ADDRDEC_BANK_CFG_DEFAULT 0x000003cf
+#define mmMMEA1_ADDRDEC_MISC_CFG_DEFAULT 0xfffff000
+#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_BANK0_DEFAULT 0x00000000
+#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_BANK1_DEFAULT 0x00000000
+#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_BANK2_DEFAULT 0x00000000
+#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_BANK3_DEFAULT 0x00000000
+#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_BANK4_DEFAULT 0x00000000
+#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_BANK5_DEFAULT 0x00000000
+#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_PC_DEFAULT 0x00000000
+#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_PC2_DEFAULT 0x00000000
+#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_CS0_DEFAULT 0x00000000
+#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_CS1_DEFAULT 0x00000000
+#define mmMMEA1_ADDRDECDRAM_HARVEST_ENABLE_DEFAULT 0x00000000
+#define mmMMEA1_ADDRDECGMI_ADDR_HASH_BANK0_DEFAULT 0x00000000
+#define mmMMEA1_ADDRDECGMI_ADDR_HASH_BANK1_DEFAULT 0x00000000
+#define mmMMEA1_ADDRDECGMI_ADDR_HASH_BANK2_DEFAULT 0x00000000
+#define mmMMEA1_ADDRDECGMI_ADDR_HASH_BANK3_DEFAULT 0x00000000
+#define mmMMEA1_ADDRDECGMI_ADDR_HASH_BANK4_DEFAULT 0x00000000
+#define mmMMEA1_ADDRDECGMI_ADDR_HASH_BANK5_DEFAULT 0x00000000
+#define mmMMEA1_ADDRDECGMI_ADDR_HASH_PC_DEFAULT 0x00000000
+#define mmMMEA1_ADDRDECGMI_ADDR_HASH_PC2_DEFAULT 0x00000000
+#define mmMMEA1_ADDRDECGMI_ADDR_HASH_CS0_DEFAULT 0x00000000
+#define mmMMEA1_ADDRDECGMI_ADDR_HASH_CS1_DEFAULT 0x00000000
+#define mmMMEA1_ADDRDECGMI_HARVEST_ENABLE_DEFAULT 0x00000000
+#define mmMMEA1_ADDRDEC0_BASE_ADDR_CS0_DEFAULT 0x00000000
+#define mmMMEA1_ADDRDEC0_BASE_ADDR_CS1_DEFAULT 0x00000000
+#define mmMMEA1_ADDRDEC0_BASE_ADDR_CS2_DEFAULT 0x00000000
+#define mmMMEA1_ADDRDEC0_BASE_ADDR_CS3_DEFAULT 0x00000000
+#define mmMMEA1_ADDRDEC0_BASE_ADDR_SECCS0_DEFAULT 0x00000000
+#define mmMMEA1_ADDRDEC0_BASE_ADDR_SECCS1_DEFAULT 0x00000000
+#define mmMMEA1_ADDRDEC0_BASE_ADDR_SECCS2_DEFAULT 0x00000000
+#define mmMMEA1_ADDRDEC0_BASE_ADDR_SECCS3_DEFAULT 0x00000000
+#define mmMMEA1_ADDRDEC0_ADDR_MASK_CS01_DEFAULT 0xfffffffe
+#define mmMMEA1_ADDRDEC0_ADDR_MASK_CS23_DEFAULT 0xfffffffe
+#define mmMMEA1_ADDRDEC0_ADDR_MASK_SECCS01_DEFAULT 0xfffffffe
+#define mmMMEA1_ADDRDEC0_ADDR_MASK_SECCS23_DEFAULT 0xfffffffe
+#define mmMMEA1_ADDRDEC0_ADDR_CFG_CS01_DEFAULT 0x00050408
+#define mmMMEA1_ADDRDEC0_ADDR_CFG_CS23_DEFAULT 0x00050408
+#define mmMMEA1_ADDRDEC0_ADDR_SEL_CS01_DEFAULT 0x04076543
+#define mmMMEA1_ADDRDEC0_ADDR_SEL_CS23_DEFAULT 0x04076543
+#define mmMMEA1_ADDRDEC0_ADDR_SEL2_CS01_DEFAULT 0x00000008
+#define mmMMEA1_ADDRDEC0_ADDR_SEL2_CS23_DEFAULT 0x00000008
+#define mmMMEA1_ADDRDEC0_COL_SEL_LO_CS01_DEFAULT 0x87654321
+#define mmMMEA1_ADDRDEC0_COL_SEL_LO_CS23_DEFAULT 0x87654321
+#define mmMMEA1_ADDRDEC0_COL_SEL_HI_CS01_DEFAULT 0xa9876543
+#define mmMMEA1_ADDRDEC0_COL_SEL_HI_CS23_DEFAULT 0xa9876543
+#define mmMMEA1_ADDRDEC0_RM_SEL_CS01_DEFAULT 0x00000000
+#define mmMMEA1_ADDRDEC0_RM_SEL_CS23_DEFAULT 0x00000000
+#define mmMMEA1_ADDRDEC0_RM_SEL_SECCS01_DEFAULT 0x00000000
+#define mmMMEA1_ADDRDEC0_RM_SEL_SECCS23_DEFAULT 0x00000000
+#define mmMMEA1_ADDRDEC1_BASE_ADDR_CS0_DEFAULT 0x00000000
+#define mmMMEA1_ADDRDEC1_BASE_ADDR_CS1_DEFAULT 0x00000000
+#define mmMMEA1_ADDRDEC1_BASE_ADDR_CS2_DEFAULT 0x00000000
+#define mmMMEA1_ADDRDEC1_BASE_ADDR_CS3_DEFAULT 0x00000000
+#define mmMMEA1_ADDRDEC1_BASE_ADDR_SECCS0_DEFAULT 0x00000000
+#define mmMMEA1_ADDRDEC1_BASE_ADDR_SECCS1_DEFAULT 0x00000000
+#define mmMMEA1_ADDRDEC1_BASE_ADDR_SECCS2_DEFAULT 0x00000000
+#define mmMMEA1_ADDRDEC1_BASE_ADDR_SECCS3_DEFAULT 0x00000000
+#define mmMMEA1_ADDRDEC1_ADDR_MASK_CS01_DEFAULT 0xfffffffe
+#define mmMMEA1_ADDRDEC1_ADDR_MASK_CS23_DEFAULT 0xfffffffe
+#define mmMMEA1_ADDRDEC1_ADDR_MASK_SECCS01_DEFAULT 0xfffffffe
+#define mmMMEA1_ADDRDEC1_ADDR_MASK_SECCS23_DEFAULT 0xfffffffe
+#define mmMMEA1_ADDRDEC1_ADDR_CFG_CS01_DEFAULT 0x00050408
+#define mmMMEA1_ADDRDEC1_ADDR_CFG_CS23_DEFAULT 0x00050408
+#define mmMMEA1_ADDRDEC1_ADDR_SEL_CS01_DEFAULT 0x04076543
+#define mmMMEA1_ADDRDEC1_ADDR_SEL_CS23_DEFAULT 0x04076543
+#define mmMMEA1_ADDRDEC1_ADDR_SEL2_CS01_DEFAULT 0x00000008
+#define mmMMEA1_ADDRDEC1_ADDR_SEL2_CS23_DEFAULT 0x00000008
+#define mmMMEA1_ADDRDEC1_COL_SEL_LO_CS01_DEFAULT 0x87654321
+#define mmMMEA1_ADDRDEC1_COL_SEL_LO_CS23_DEFAULT 0x87654321
+#define mmMMEA1_ADDRDEC1_COL_SEL_HI_CS01_DEFAULT 0xa9876543
+#define mmMMEA1_ADDRDEC1_COL_SEL_HI_CS23_DEFAULT 0xa9876543
+#define mmMMEA1_ADDRDEC1_RM_SEL_CS01_DEFAULT 0x00000000
+#define mmMMEA1_ADDRDEC1_RM_SEL_CS23_DEFAULT 0x00000000
+#define mmMMEA1_ADDRDEC1_RM_SEL_SECCS01_DEFAULT 0x00000000
+#define mmMMEA1_ADDRDEC1_RM_SEL_SECCS23_DEFAULT 0x00000000
+#define mmMMEA1_ADDRDEC2_BASE_ADDR_CS0_DEFAULT 0x00000000
+#define mmMMEA1_ADDRDEC2_BASE_ADDR_CS1_DEFAULT 0x00000000
+#define mmMMEA1_ADDRDEC2_BASE_ADDR_CS2_DEFAULT 0x00000000
+#define mmMMEA1_ADDRDEC2_BASE_ADDR_CS3_DEFAULT 0x00000000
+#define mmMMEA1_ADDRDEC2_BASE_ADDR_SECCS0_DEFAULT 0x00000000
+#define mmMMEA1_ADDRDEC2_BASE_ADDR_SECCS1_DEFAULT 0x00000000
+#define mmMMEA1_ADDRDEC2_BASE_ADDR_SECCS2_DEFAULT 0x00000000
+#define mmMMEA1_ADDRDEC2_BASE_ADDR_SECCS3_DEFAULT 0x00000000
+#define mmMMEA1_ADDRDEC2_ADDR_MASK_CS01_DEFAULT 0xfffffffe
+#define mmMMEA1_ADDRDEC2_ADDR_MASK_CS23_DEFAULT 0xfffffffe
+#define mmMMEA1_ADDRDEC2_ADDR_MASK_SECCS01_DEFAULT 0xfffffffe
+#define mmMMEA1_ADDRDEC2_ADDR_MASK_SECCS23_DEFAULT 0xfffffffe
+#define mmMMEA1_ADDRDEC2_ADDR_CFG_CS01_DEFAULT 0x00050408
+#define mmMMEA1_ADDRDEC2_ADDR_CFG_CS23_DEFAULT 0x00050408
+#define mmMMEA1_ADDRDEC2_ADDR_SEL_CS01_DEFAULT 0x04076543
+#define mmMMEA1_ADDRDEC2_ADDR_SEL_CS23_DEFAULT 0x04076543
+#define mmMMEA1_ADDRDEC2_ADDR_SEL2_CS01_DEFAULT 0x00000008
+#define mmMMEA1_ADDRDEC2_ADDR_SEL2_CS23_DEFAULT 0x00000008
+#define mmMMEA1_ADDRDEC2_COL_SEL_LO_CS01_DEFAULT 0x87654321
+#define mmMMEA1_ADDRDEC2_COL_SEL_LO_CS23_DEFAULT 0x87654321
+#define mmMMEA1_ADDRDEC2_COL_SEL_HI_CS01_DEFAULT 0xa9876543
+#define mmMMEA1_ADDRDEC2_COL_SEL_HI_CS23_DEFAULT 0xa9876543
+#define mmMMEA1_ADDRDEC2_RM_SEL_CS01_DEFAULT 0x00000000
+#define mmMMEA1_ADDRDEC2_RM_SEL_CS23_DEFAULT 0x00000000
+#define mmMMEA1_ADDRDEC2_RM_SEL_SECCS01_DEFAULT 0x00000000
+#define mmMMEA1_ADDRDEC2_RM_SEL_SECCS23_DEFAULT 0x00000000
+#define mmMMEA1_ADDRNORMDRAM_GLOBAL_CNTL_DEFAULT 0x00600000
+#define mmMMEA1_ADDRNORMGMI_GLOBAL_CNTL_DEFAULT 0x00600000
+#define mmMMEA1_IO_RD_CLI2GRP_MAP0_DEFAULT 0xe4e4e4e4
+#define mmMMEA1_IO_RD_CLI2GRP_MAP1_DEFAULT 0xe4e4e4e4
+#define mmMMEA1_IO_WR_CLI2GRP_MAP0_DEFAULT 0xe4e4e4e4
+#define mmMMEA1_IO_WR_CLI2GRP_MAP1_DEFAULT 0xe4e4e4e4
+#define mmMMEA1_IO_RD_COMBINE_FLUSH_DEFAULT 0x00007777
+#define mmMMEA1_IO_WR_COMBINE_FLUSH_DEFAULT 0x00007777
+#define mmMMEA1_IO_GROUP_BURST_DEFAULT 0x1f031f03
+#define mmMMEA1_IO_RD_PRI_AGE_DEFAULT 0x00db6249
+#define mmMMEA1_IO_WR_PRI_AGE_DEFAULT 0x00db6249
+#define mmMMEA1_IO_RD_PRI_QUEUING_DEFAULT 0x00000db6
+#define mmMMEA1_IO_WR_PRI_QUEUING_DEFAULT 0x00000db6
+#define mmMMEA1_IO_RD_PRI_FIXED_DEFAULT 0x00000924
+#define mmMMEA1_IO_WR_PRI_FIXED_DEFAULT 0x00000924
+#define mmMMEA1_IO_RD_PRI_URGENCY_DEFAULT 0x00000492
+#define mmMMEA1_IO_WR_PRI_URGENCY_DEFAULT 0x00000492
+#define mmMMEA1_IO_RD_PRI_URGENCY_MASKING_DEFAULT 0xffffffff
+#define mmMMEA1_IO_WR_PRI_URGENCY_MASKING_DEFAULT 0xffffffff
+#define mmMMEA1_IO_RD_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f
+#define mmMMEA1_IO_RD_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f
+#define mmMMEA1_IO_RD_PRI_QUANT_PRI3_DEFAULT 0xffffffff
+#define mmMMEA1_IO_WR_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f
+#define mmMMEA1_IO_WR_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f
+#define mmMMEA1_IO_WR_PRI_QUANT_PRI3_DEFAULT 0xffffffff
+#define mmMMEA1_SDP_ARB_DRAM_DEFAULT 0x00101e40
+#define mmMMEA1_SDP_ARB_GMI_DEFAULT 0x00101e40
+#define mmMMEA1_SDP_ARB_FINAL_DEFAULT 0x00007fff
+#define mmMMEA1_SDP_DRAM_PRIORITY_DEFAULT 0x00000000
+#define mmMMEA1_SDP_GMI_PRIORITY_DEFAULT 0x00000000
+#define mmMMEA1_SDP_IO_PRIORITY_DEFAULT 0x00000000
+#define mmMMEA1_SDP_CREDITS_DEFAULT 0x000101bf
+#define mmMMEA1_SDP_TAG_RESERVE0_DEFAULT 0x00000000
+#define mmMMEA1_SDP_TAG_RESERVE1_DEFAULT 0x00000000
+#define mmMMEA1_SDP_VCC_RESERVE0_DEFAULT 0x00000000
+#define mmMMEA1_SDP_VCC_RESERVE1_DEFAULT 0x00000000
+#define mmMMEA1_SDP_VCD_RESERVE0_DEFAULT 0x00000000
+#define mmMMEA1_SDP_VCD_RESERVE1_DEFAULT 0x00000000
+#define mmMMEA1_SDP_REQ_CNTL_DEFAULT 0x0000001f
+#define mmMMEA1_MISC_DEFAULT 0x0c00a070
+#define mmMMEA1_LATENCY_SAMPLING_DEFAULT 0x00000000
+#define mmMMEA1_PERFCOUNTER_LO_DEFAULT 0x00000000
+#define mmMMEA1_PERFCOUNTER_HI_DEFAULT 0x00000000
+#define mmMMEA1_PERFCOUNTER0_CFG_DEFAULT 0x00000000
+#define mmMMEA1_PERFCOUNTER1_CFG_DEFAULT 0x00000000
+#define mmMMEA1_PERFCOUNTER_RSLT_CNTL_DEFAULT 0x04000000
+#define mmMMEA1_EDC_CNT_DEFAULT 0x00000000
+#define mmMMEA1_EDC_CNT2_DEFAULT 0x00000000
+#define mmMMEA1_DSM_CNTL_DEFAULT 0x00000000
+#define mmMMEA1_DSM_CNTLA_DEFAULT 0x00000000
+#define mmMMEA1_DSM_CNTLB_DEFAULT 0x00000000
+#define mmMMEA1_DSM_CNTL2_DEFAULT 0x00000000
+#define mmMMEA1_DSM_CNTL2A_DEFAULT 0x00000000
+#define mmMMEA1_DSM_CNTL2B_DEFAULT 0x00000000
+#define mmMMEA1_CGTT_CLK_CTRL_DEFAULT 0x00000100
+#define mmMMEA1_EDC_MODE_DEFAULT 0x00000000
+#define mmMMEA1_ERR_STATUS_DEFAULT 0x00000300
+#define mmMMEA1_MISC2_DEFAULT 0x00000000
+#define mmMMEA1_ADDRDEC_SELECT_DEFAULT 0x00000000
+#define mmMMEA1_EDC_CNT3_DEFAULT 0x00000000
+
+
+// addressBlock: mmhub_ea_mmeadec2
+#define mmMMEA2_DRAM_RD_CLI2GRP_MAP0_DEFAULT 0x55555555
+#define mmMMEA2_DRAM_RD_CLI2GRP_MAP1_DEFAULT 0x55555555
+#define mmMMEA2_DRAM_WR_CLI2GRP_MAP0_DEFAULT 0x55555555
+#define mmMMEA2_DRAM_WR_CLI2GRP_MAP1_DEFAULT 0x55555555
+#define mmMMEA2_DRAM_RD_GRP2VC_MAP_DEFAULT 0x00000e25
+#define mmMMEA2_DRAM_WR_GRP2VC_MAP_DEFAULT 0x00000e25
+#define mmMMEA2_DRAM_RD_LAZY_DEFAULT 0x78000924
+#define mmMMEA2_DRAM_WR_LAZY_DEFAULT 0x78000924
+#define mmMMEA2_DRAM_RD_CAM_CNTL_DEFAULT 0x16db4444
+#define mmMMEA2_DRAM_WR_CAM_CNTL_DEFAULT 0x16db4444
+#define mmMMEA2_DRAM_PAGE_BURST_DEFAULT 0x20002000
+#define mmMMEA2_DRAM_RD_PRI_AGE_DEFAULT 0x00db6249
+#define mmMMEA2_DRAM_WR_PRI_AGE_DEFAULT 0x00db6249
+#define mmMMEA2_DRAM_RD_PRI_QUEUING_DEFAULT 0x00000db6
+#define mmMMEA2_DRAM_WR_PRI_QUEUING_DEFAULT 0x00000db6
+#define mmMMEA2_DRAM_RD_PRI_FIXED_DEFAULT 0x00000924
+#define mmMMEA2_DRAM_WR_PRI_FIXED_DEFAULT 0x00000924
+#define mmMMEA2_DRAM_RD_PRI_URGENCY_DEFAULT 0x0000fdb6
+#define mmMMEA2_DRAM_WR_PRI_URGENCY_DEFAULT 0x0000fdb6
+#define mmMMEA2_DRAM_RD_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f
+#define mmMMEA2_DRAM_RD_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f
+#define mmMMEA2_DRAM_RD_PRI_QUANT_PRI3_DEFAULT 0xffffffff
+#define mmMMEA2_DRAM_WR_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f
+#define mmMMEA2_DRAM_WR_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f
+#define mmMMEA2_DRAM_WR_PRI_QUANT_PRI3_DEFAULT 0xffffffff
+#define mmMMEA2_GMI_RD_CLI2GRP_MAP0_DEFAULT 0x00000000
+#define mmMMEA2_GMI_RD_CLI2GRP_MAP1_DEFAULT 0x00000000
+#define mmMMEA2_GMI_WR_CLI2GRP_MAP0_DEFAULT 0x00000000
+#define mmMMEA2_GMI_WR_CLI2GRP_MAP1_DEFAULT 0x00000000
+#define mmMMEA2_GMI_RD_GRP2VC_MAP_DEFAULT 0x00000fff
+#define mmMMEA2_GMI_WR_GRP2VC_MAP_DEFAULT 0x00000fff
+#define mmMMEA2_GMI_RD_LAZY_DEFAULT 0x78000924
+#define mmMMEA2_GMI_WR_LAZY_DEFAULT 0x78000924
+#define mmMMEA2_GMI_RD_CAM_CNTL_DEFAULT 0x16db4444
+#define mmMMEA2_GMI_WR_CAM_CNTL_DEFAULT 0x16db4444
+#define mmMMEA2_GMI_PAGE_BURST_DEFAULT 0x20002000
+#define mmMMEA2_GMI_RD_PRI_AGE_DEFAULT 0x00db6249
+#define mmMMEA2_GMI_WR_PRI_AGE_DEFAULT 0x00db6249
+#define mmMMEA2_GMI_RD_PRI_QUEUING_DEFAULT 0x00000db6
+#define mmMMEA2_GMI_WR_PRI_QUEUING_DEFAULT 0x00000db6
+#define mmMMEA2_GMI_RD_PRI_FIXED_DEFAULT 0x00000924
+#define mmMMEA2_GMI_WR_PRI_FIXED_DEFAULT 0x00000924
+#define mmMMEA2_GMI_RD_PRI_URGENCY_DEFAULT 0x0000fdb6
+#define mmMMEA2_GMI_WR_PRI_URGENCY_DEFAULT 0x0000fdb6
+#define mmMMEA2_GMI_RD_PRI_URGENCY_MASKING_DEFAULT 0xffffffff
+#define mmMMEA2_GMI_WR_PRI_URGENCY_MASKING_DEFAULT 0xffffffff
+#define mmMMEA2_GMI_RD_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f
+#define mmMMEA2_GMI_RD_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f
+#define mmMMEA2_GMI_RD_PRI_QUANT_PRI3_DEFAULT 0xffffffff
+#define mmMMEA2_GMI_WR_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f
+#define mmMMEA2_GMI_WR_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f
+#define mmMMEA2_GMI_WR_PRI_QUANT_PRI3_DEFAULT 0xffffffff
+#define mmMMEA2_ADDRNORM_BASE_ADDR0_DEFAULT 0x00000000
+#define mmMMEA2_ADDRNORM_LIMIT_ADDR0_DEFAULT 0x00000000
+#define mmMMEA2_ADDRNORM_BASE_ADDR1_DEFAULT 0x00000000
+#define mmMMEA2_ADDRNORM_LIMIT_ADDR1_DEFAULT 0x00000000
+#define mmMMEA2_ADDRNORM_OFFSET_ADDR1_DEFAULT 0x00000000
+#define mmMMEA2_ADDRNORM_BASE_ADDR2_DEFAULT 0x00000000
+#define mmMMEA2_ADDRNORM_LIMIT_ADDR2_DEFAULT 0x00000000
+#define mmMMEA2_ADDRNORM_BASE_ADDR3_DEFAULT 0x00000000
+#define mmMMEA2_ADDRNORM_LIMIT_ADDR3_DEFAULT 0x00000000
+#define mmMMEA2_ADDRNORM_OFFSET_ADDR3_DEFAULT 0x00000000
+#define mmMMEA2_ADDRNORM_BASE_ADDR4_DEFAULT 0x00000000
+#define mmMMEA2_ADDRNORM_LIMIT_ADDR4_DEFAULT 0x00000000
+#define mmMMEA2_ADDRNORM_BASE_ADDR5_DEFAULT 0x00000000
+#define mmMMEA2_ADDRNORM_LIMIT_ADDR5_DEFAULT 0x00000000
+#define mmMMEA2_ADDRNORM_OFFSET_ADDR5_DEFAULT 0x00000000
+#define mmMMEA2_ADDRNORMDRAM_HOLE_CNTL_DEFAULT 0x00000000
+#define mmMMEA2_ADDRNORMGMI_HOLE_CNTL_DEFAULT 0x00000000
+#define mmMMEA2_ADDRNORMDRAM_NP2_CHANNEL_CFG_DEFAULT 0x00000000
+#define mmMMEA2_ADDRNORMGMI_NP2_CHANNEL_CFG_DEFAULT 0x00000000
+#define mmMMEA2_ADDRDEC_BANK_CFG_DEFAULT 0x000003cf
+#define mmMMEA2_ADDRDEC_MISC_CFG_DEFAULT 0xfffff000
+#define mmMMEA2_ADDRDECDRAM_ADDR_HASH_BANK0_DEFAULT 0x00000000
+#define mmMMEA2_ADDRDECDRAM_ADDR_HASH_BANK1_DEFAULT 0x00000000
+#define mmMMEA2_ADDRDECDRAM_ADDR_HASH_BANK2_DEFAULT 0x00000000
+#define mmMMEA2_ADDRDECDRAM_ADDR_HASH_BANK3_DEFAULT 0x00000000
+#define mmMMEA2_ADDRDECDRAM_ADDR_HASH_BANK4_DEFAULT 0x00000000
+#define mmMMEA2_ADDRDECDRAM_ADDR_HASH_BANK5_DEFAULT 0x00000000
+#define mmMMEA2_ADDRDECDRAM_ADDR_HASH_PC_DEFAULT 0x00000000
+#define mmMMEA2_ADDRDECDRAM_ADDR_HASH_PC2_DEFAULT 0x00000000
+#define mmMMEA2_ADDRDECDRAM_ADDR_HASH_CS0_DEFAULT 0x00000000
+#define mmMMEA2_ADDRDECDRAM_ADDR_HASH_CS1_DEFAULT 0x00000000
+#define mmMMEA2_ADDRDECDRAM_HARVEST_ENABLE_DEFAULT 0x00000000
+#define mmMMEA2_ADDRDECGMI_ADDR_HASH_BANK0_DEFAULT 0x00000000
+#define mmMMEA2_ADDRDECGMI_ADDR_HASH_BANK1_DEFAULT 0x00000000
+#define mmMMEA2_ADDRDECGMI_ADDR_HASH_BANK2_DEFAULT 0x00000000
+#define mmMMEA2_ADDRDECGMI_ADDR_HASH_BANK3_DEFAULT 0x00000000
+#define mmMMEA2_ADDRDECGMI_ADDR_HASH_BANK4_DEFAULT 0x00000000
+#define mmMMEA2_ADDRDECGMI_ADDR_HASH_BANK5_DEFAULT 0x00000000
+#define mmMMEA2_ADDRDECGMI_ADDR_HASH_PC_DEFAULT 0x00000000
+#define mmMMEA2_ADDRDECGMI_ADDR_HASH_PC2_DEFAULT 0x00000000
+#define mmMMEA2_ADDRDECGMI_ADDR_HASH_CS0_DEFAULT 0x00000000
+#define mmMMEA2_ADDRDECGMI_ADDR_HASH_CS1_DEFAULT 0x00000000
+#define mmMMEA2_ADDRDECGMI_HARVEST_ENABLE_DEFAULT 0x00000000
+#define mmMMEA2_ADDRDEC0_BASE_ADDR_CS0_DEFAULT 0x00000000
+#define mmMMEA2_ADDRDEC0_BASE_ADDR_CS1_DEFAULT 0x00000000
+#define mmMMEA2_ADDRDEC0_BASE_ADDR_CS2_DEFAULT 0x00000000
+#define mmMMEA2_ADDRDEC0_BASE_ADDR_CS3_DEFAULT 0x00000000
+#define mmMMEA2_ADDRDEC0_BASE_ADDR_SECCS0_DEFAULT 0x00000000
+#define mmMMEA2_ADDRDEC0_BASE_ADDR_SECCS1_DEFAULT 0x00000000
+#define mmMMEA2_ADDRDEC0_BASE_ADDR_SECCS2_DEFAULT 0x00000000
+#define mmMMEA2_ADDRDEC0_BASE_ADDR_SECCS3_DEFAULT 0x00000000
+#define mmMMEA2_ADDRDEC0_ADDR_MASK_CS01_DEFAULT 0xfffffffe
+#define mmMMEA2_ADDRDEC0_ADDR_MASK_CS23_DEFAULT 0xfffffffe
+#define mmMMEA2_ADDRDEC0_ADDR_MASK_SECCS01_DEFAULT 0xfffffffe
+#define mmMMEA2_ADDRDEC0_ADDR_MASK_SECCS23_DEFAULT 0xfffffffe
+#define mmMMEA2_ADDRDEC0_ADDR_CFG_CS01_DEFAULT 0x00050408
+#define mmMMEA2_ADDRDEC0_ADDR_CFG_CS23_DEFAULT 0x00050408
+#define mmMMEA2_ADDRDEC0_ADDR_SEL_CS01_DEFAULT 0x04076543
+#define mmMMEA2_ADDRDEC0_ADDR_SEL_CS23_DEFAULT 0x04076543
+#define mmMMEA2_ADDRDEC0_ADDR_SEL2_CS01_DEFAULT 0x00000008
+#define mmMMEA2_ADDRDEC0_ADDR_SEL2_CS23_DEFAULT 0x00000008
+#define mmMMEA2_ADDRDEC0_COL_SEL_LO_CS01_DEFAULT 0x87654321
+#define mmMMEA2_ADDRDEC0_COL_SEL_LO_CS23_DEFAULT 0x87654321
+#define mmMMEA2_ADDRDEC0_COL_SEL_HI_CS01_DEFAULT 0xa9876543
+#define mmMMEA2_ADDRDEC0_COL_SEL_HI_CS23_DEFAULT 0xa9876543
+#define mmMMEA2_ADDRDEC0_RM_SEL_CS01_DEFAULT 0x00000000
+#define mmMMEA2_ADDRDEC0_RM_SEL_CS23_DEFAULT 0x00000000
+#define mmMMEA2_ADDRDEC0_RM_SEL_SECCS01_DEFAULT 0x00000000
+#define mmMMEA2_ADDRDEC0_RM_SEL_SECCS23_DEFAULT 0x00000000
+#define mmMMEA2_ADDRDEC1_BASE_ADDR_CS0_DEFAULT 0x00000000
+#define mmMMEA2_ADDRDEC1_BASE_ADDR_CS1_DEFAULT 0x00000000
+#define mmMMEA2_ADDRDEC1_BASE_ADDR_CS2_DEFAULT 0x00000000
+#define mmMMEA2_ADDRDEC1_BASE_ADDR_CS3_DEFAULT 0x00000000
+#define mmMMEA2_ADDRDEC1_BASE_ADDR_SECCS0_DEFAULT 0x00000000
+#define mmMMEA2_ADDRDEC1_BASE_ADDR_SECCS1_DEFAULT 0x00000000
+#define mmMMEA2_ADDRDEC1_BASE_ADDR_SECCS2_DEFAULT 0x00000000
+#define mmMMEA2_ADDRDEC1_BASE_ADDR_SECCS3_DEFAULT 0x00000000
+#define mmMMEA2_ADDRDEC1_ADDR_MASK_CS01_DEFAULT 0xfffffffe
+#define mmMMEA2_ADDRDEC1_ADDR_MASK_CS23_DEFAULT 0xfffffffe
+#define mmMMEA2_ADDRDEC1_ADDR_MASK_SECCS01_DEFAULT 0xfffffffe
+#define mmMMEA2_ADDRDEC1_ADDR_MASK_SECCS23_DEFAULT 0xfffffffe
+#define mmMMEA2_ADDRDEC1_ADDR_CFG_CS01_DEFAULT 0x00050408
+#define mmMMEA2_ADDRDEC1_ADDR_CFG_CS23_DEFAULT 0x00050408
+#define mmMMEA2_ADDRDEC1_ADDR_SEL_CS01_DEFAULT 0x04076543
+#define mmMMEA2_ADDRDEC1_ADDR_SEL_CS23_DEFAULT 0x04076543
+#define mmMMEA2_ADDRDEC1_ADDR_SEL2_CS01_DEFAULT 0x00000008
+#define mmMMEA2_ADDRDEC1_ADDR_SEL2_CS23_DEFAULT 0x00000008
+#define mmMMEA2_ADDRDEC1_COL_SEL_LO_CS01_DEFAULT 0x87654321
+#define mmMMEA2_ADDRDEC1_COL_SEL_LO_CS23_DEFAULT 0x87654321
+#define mmMMEA2_ADDRDEC1_COL_SEL_HI_CS01_DEFAULT 0xa9876543
+#define mmMMEA2_ADDRDEC1_COL_SEL_HI_CS23_DEFAULT 0xa9876543
+#define mmMMEA2_ADDRDEC1_RM_SEL_CS01_DEFAULT 0x00000000
+#define mmMMEA2_ADDRDEC1_RM_SEL_CS23_DEFAULT 0x00000000
+#define mmMMEA2_ADDRDEC1_RM_SEL_SECCS01_DEFAULT 0x00000000
+#define mmMMEA2_ADDRDEC1_RM_SEL_SECCS23_DEFAULT 0x00000000
+#define mmMMEA2_ADDRDEC2_BASE_ADDR_CS0_DEFAULT 0x00000000
+#define mmMMEA2_ADDRDEC2_BASE_ADDR_CS1_DEFAULT 0x00000000
+#define mmMMEA2_ADDRDEC2_BASE_ADDR_CS2_DEFAULT 0x00000000
+#define mmMMEA2_ADDRDEC2_BASE_ADDR_CS3_DEFAULT 0x00000000
+#define mmMMEA2_ADDRDEC2_BASE_ADDR_SECCS0_DEFAULT 0x00000000
+#define mmMMEA2_ADDRDEC2_BASE_ADDR_SECCS1_DEFAULT 0x00000000
+#define mmMMEA2_ADDRDEC2_BASE_ADDR_SECCS2_DEFAULT 0x00000000
+#define mmMMEA2_ADDRDEC2_BASE_ADDR_SECCS3_DEFAULT 0x00000000
+#define mmMMEA2_ADDRDEC2_ADDR_MASK_CS01_DEFAULT 0xfffffffe
+#define mmMMEA2_ADDRDEC2_ADDR_MASK_CS23_DEFAULT 0xfffffffe
+#define mmMMEA2_ADDRDEC2_ADDR_MASK_SECCS01_DEFAULT 0xfffffffe
+#define mmMMEA2_ADDRDEC2_ADDR_MASK_SECCS23_DEFAULT 0xfffffffe
+#define mmMMEA2_ADDRDEC2_ADDR_CFG_CS01_DEFAULT 0x00050408
+#define mmMMEA2_ADDRDEC2_ADDR_CFG_CS23_DEFAULT 0x00050408
+#define mmMMEA2_ADDRDEC2_ADDR_SEL_CS01_DEFAULT 0x04076543
+#define mmMMEA2_ADDRDEC2_ADDR_SEL_CS23_DEFAULT 0x04076543
+#define mmMMEA2_ADDRDEC2_ADDR_SEL2_CS01_DEFAULT 0x00000008
+#define mmMMEA2_ADDRDEC2_ADDR_SEL2_CS23_DEFAULT 0x00000008
+#define mmMMEA2_ADDRDEC2_COL_SEL_LO_CS01_DEFAULT 0x87654321
+#define mmMMEA2_ADDRDEC2_COL_SEL_LO_CS23_DEFAULT 0x87654321
+#define mmMMEA2_ADDRDEC2_COL_SEL_HI_CS01_DEFAULT 0xa9876543
+#define mmMMEA2_ADDRDEC2_COL_SEL_HI_CS23_DEFAULT 0xa9876543
+#define mmMMEA2_ADDRDEC2_RM_SEL_CS01_DEFAULT 0x00000000
+#define mmMMEA2_ADDRDEC2_RM_SEL_CS23_DEFAULT 0x00000000
+#define mmMMEA2_ADDRDEC2_RM_SEL_SECCS01_DEFAULT 0x00000000
+#define mmMMEA2_ADDRDEC2_RM_SEL_SECCS23_DEFAULT 0x00000000
+#define mmMMEA2_ADDRNORMDRAM_GLOBAL_CNTL_DEFAULT 0x00600000
+#define mmMMEA2_ADDRNORMGMI_GLOBAL_CNTL_DEFAULT 0x00600000
+#define mmMMEA2_IO_RD_CLI2GRP_MAP0_DEFAULT 0xe4e4e4e4
+#define mmMMEA2_IO_RD_CLI2GRP_MAP1_DEFAULT 0xe4e4e4e4
+#define mmMMEA2_IO_WR_CLI2GRP_MAP0_DEFAULT 0xe4e4e4e4
+#define mmMMEA2_IO_WR_CLI2GRP_MAP1_DEFAULT 0xe4e4e4e4
+#define mmMMEA2_IO_RD_COMBINE_FLUSH_DEFAULT 0x00007777
+#define mmMMEA2_IO_WR_COMBINE_FLUSH_DEFAULT 0x00007777
+#define mmMMEA2_IO_GROUP_BURST_DEFAULT 0x1f031f03
+#define mmMMEA2_IO_RD_PRI_AGE_DEFAULT 0x00db6249
+#define mmMMEA2_IO_WR_PRI_AGE_DEFAULT 0x00db6249
+#define mmMMEA2_IO_RD_PRI_QUEUING_DEFAULT 0x00000db6
+#define mmMMEA2_IO_WR_PRI_QUEUING_DEFAULT 0x00000db6
+#define mmMMEA2_IO_RD_PRI_FIXED_DEFAULT 0x00000924
+#define mmMMEA2_IO_WR_PRI_FIXED_DEFAULT 0x00000924
+#define mmMMEA2_IO_RD_PRI_URGENCY_DEFAULT 0x00000492
+#define mmMMEA2_IO_WR_PRI_URGENCY_DEFAULT 0x00000492
+#define mmMMEA2_IO_RD_PRI_URGENCY_MASKING_DEFAULT 0xffffffff
+#define mmMMEA2_IO_WR_PRI_URGENCY_MASKING_DEFAULT 0xffffffff
+#define mmMMEA2_IO_RD_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f
+#define mmMMEA2_IO_RD_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f
+#define mmMMEA2_IO_RD_PRI_QUANT_PRI3_DEFAULT 0xffffffff
+#define mmMMEA2_IO_WR_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f
+#define mmMMEA2_IO_WR_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f
+#define mmMMEA2_IO_WR_PRI_QUANT_PRI3_DEFAULT 0xffffffff
+#define mmMMEA2_SDP_ARB_DRAM_DEFAULT 0x00101e40
+#define mmMMEA2_SDP_ARB_GMI_DEFAULT 0x00101e40
+#define mmMMEA2_SDP_ARB_FINAL_DEFAULT 0x00007fff
+#define mmMMEA2_SDP_DRAM_PRIORITY_DEFAULT 0x00000000
+#define mmMMEA2_SDP_GMI_PRIORITY_DEFAULT 0x00000000
+#define mmMMEA2_SDP_IO_PRIORITY_DEFAULT 0x00000000
+#define mmMMEA2_SDP_CREDITS_DEFAULT 0x000101bf
+#define mmMMEA2_SDP_TAG_RESERVE0_DEFAULT 0x00000000
+#define mmMMEA2_SDP_TAG_RESERVE1_DEFAULT 0x00000000
+#define mmMMEA2_SDP_VCC_RESERVE0_DEFAULT 0x00000000
+#define mmMMEA2_SDP_VCC_RESERVE1_DEFAULT 0x00000000
+#define mmMMEA2_SDP_VCD_RESERVE0_DEFAULT 0x00000000
+#define mmMMEA2_SDP_VCD_RESERVE1_DEFAULT 0x00000000
+#define mmMMEA2_SDP_REQ_CNTL_DEFAULT 0x0000001f
+#define mmMMEA2_MISC_DEFAULT 0x0c00a070
+#define mmMMEA2_LATENCY_SAMPLING_DEFAULT 0x00000000
+#define mmMMEA2_PERFCOUNTER_LO_DEFAULT 0x00000000
+#define mmMMEA2_PERFCOUNTER_HI_DEFAULT 0x00000000
+#define mmMMEA2_PERFCOUNTER0_CFG_DEFAULT 0x00000000
+#define mmMMEA2_PERFCOUNTER1_CFG_DEFAULT 0x00000000
+#define mmMMEA2_PERFCOUNTER_RSLT_CNTL_DEFAULT 0x04000000
+#define mmMMEA2_EDC_CNT_DEFAULT 0x00000000
+#define mmMMEA2_EDC_CNT2_DEFAULT 0x00000000
+#define mmMMEA2_DSM_CNTL_DEFAULT 0x00000000
+#define mmMMEA2_DSM_CNTLA_DEFAULT 0x00000000
+#define mmMMEA2_DSM_CNTLB_DEFAULT 0x00000000
+#define mmMMEA2_DSM_CNTL2_DEFAULT 0x00000000
+#define mmMMEA2_DSM_CNTL2A_DEFAULT 0x00000000
+#define mmMMEA2_DSM_CNTL2B_DEFAULT 0x00000000
+#define mmMMEA2_CGTT_CLK_CTRL_DEFAULT 0x00000100
+#define mmMMEA2_EDC_MODE_DEFAULT 0x00000000
+#define mmMMEA2_ERR_STATUS_DEFAULT 0x00000300
+#define mmMMEA2_MISC2_DEFAULT 0x00000000
+#define mmMMEA2_ADDRDEC_SELECT_DEFAULT 0x00000000
+#define mmMMEA2_EDC_CNT3_DEFAULT 0x00000000
+
+
+// addressBlock: mmhub_ea_mmeadec3
+#define mmMMEA3_DRAM_RD_CLI2GRP_MAP0_DEFAULT 0x55555555
+#define mmMMEA3_DRAM_RD_CLI2GRP_MAP1_DEFAULT 0x55555555
+#define mmMMEA3_DRAM_WR_CLI2GRP_MAP0_DEFAULT 0x55555555
+#define mmMMEA3_DRAM_WR_CLI2GRP_MAP1_DEFAULT 0x55555555
+#define mmMMEA3_DRAM_RD_GRP2VC_MAP_DEFAULT 0x00000e25
+#define mmMMEA3_DRAM_WR_GRP2VC_MAP_DEFAULT 0x00000e25
+#define mmMMEA3_DRAM_RD_LAZY_DEFAULT 0x78000924
+#define mmMMEA3_DRAM_WR_LAZY_DEFAULT 0x78000924
+#define mmMMEA3_DRAM_RD_CAM_CNTL_DEFAULT 0x16db4444
+#define mmMMEA3_DRAM_WR_CAM_CNTL_DEFAULT 0x16db4444
+#define mmMMEA3_DRAM_PAGE_BURST_DEFAULT 0x20002000
+#define mmMMEA3_DRAM_RD_PRI_AGE_DEFAULT 0x00db6249
+#define mmMMEA3_DRAM_WR_PRI_AGE_DEFAULT 0x00db6249
+#define mmMMEA3_DRAM_RD_PRI_QUEUING_DEFAULT 0x00000db6
+#define mmMMEA3_DRAM_WR_PRI_QUEUING_DEFAULT 0x00000db6
+#define mmMMEA3_DRAM_RD_PRI_FIXED_DEFAULT 0x00000924
+#define mmMMEA3_DRAM_WR_PRI_FIXED_DEFAULT 0x00000924
+#define mmMMEA3_DRAM_RD_PRI_URGENCY_DEFAULT 0x0000fdb6
+#define mmMMEA3_DRAM_WR_PRI_URGENCY_DEFAULT 0x0000fdb6
+#define mmMMEA3_DRAM_RD_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f
+#define mmMMEA3_DRAM_RD_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f
+#define mmMMEA3_DRAM_RD_PRI_QUANT_PRI3_DEFAULT 0xffffffff
+#define mmMMEA3_DRAM_WR_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f
+#define mmMMEA3_DRAM_WR_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f
+#define mmMMEA3_DRAM_WR_PRI_QUANT_PRI3_DEFAULT 0xffffffff
+#define mmMMEA3_GMI_RD_CLI2GRP_MAP0_DEFAULT 0x00000000
+#define mmMMEA3_GMI_RD_CLI2GRP_MAP1_DEFAULT 0x00000000
+#define mmMMEA3_GMI_WR_CLI2GRP_MAP0_DEFAULT 0x00000000
+#define mmMMEA3_GMI_WR_CLI2GRP_MAP1_DEFAULT 0x00000000
+#define mmMMEA3_GMI_RD_GRP2VC_MAP_DEFAULT 0x00000fff
+#define mmMMEA3_GMI_WR_GRP2VC_MAP_DEFAULT 0x00000fff
+#define mmMMEA3_GMI_RD_LAZY_DEFAULT 0x78000924
+#define mmMMEA3_GMI_WR_LAZY_DEFAULT 0x78000924
+#define mmMMEA3_GMI_RD_CAM_CNTL_DEFAULT 0x16db4444
+#define mmMMEA3_GMI_WR_CAM_CNTL_DEFAULT 0x16db4444
+#define mmMMEA3_GMI_PAGE_BURST_DEFAULT 0x20002000
+#define mmMMEA3_GMI_RD_PRI_AGE_DEFAULT 0x00db6249
+#define mmMMEA3_GMI_WR_PRI_AGE_DEFAULT 0x00db6249
+#define mmMMEA3_GMI_RD_PRI_QUEUING_DEFAULT 0x00000db6
+#define mmMMEA3_GMI_WR_PRI_QUEUING_DEFAULT 0x00000db6
+#define mmMMEA3_GMI_RD_PRI_FIXED_DEFAULT 0x00000924
+#define mmMMEA3_GMI_WR_PRI_FIXED_DEFAULT 0x00000924
+#define mmMMEA3_GMI_RD_PRI_URGENCY_DEFAULT 0x0000fdb6
+#define mmMMEA3_GMI_WR_PRI_URGENCY_DEFAULT 0x0000fdb6
+#define mmMMEA3_GMI_RD_PRI_URGENCY_MASKING_DEFAULT 0xffffffff
+#define mmMMEA3_GMI_WR_PRI_URGENCY_MASKING_DEFAULT 0xffffffff
+#define mmMMEA3_GMI_RD_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f
+#define mmMMEA3_GMI_RD_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f
+#define mmMMEA3_GMI_RD_PRI_QUANT_PRI3_DEFAULT 0xffffffff
+#define mmMMEA3_GMI_WR_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f
+#define mmMMEA3_GMI_WR_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f
+#define mmMMEA3_GMI_WR_PRI_QUANT_PRI3_DEFAULT 0xffffffff
+#define mmMMEA3_ADDRNORM_BASE_ADDR0_DEFAULT 0x00000000
+#define mmMMEA3_ADDRNORM_LIMIT_ADDR0_DEFAULT 0x00000000
+#define mmMMEA3_ADDRNORM_BASE_ADDR1_DEFAULT 0x00000000
+#define mmMMEA3_ADDRNORM_LIMIT_ADDR1_DEFAULT 0x00000000
+#define mmMMEA3_ADDRNORM_OFFSET_ADDR1_DEFAULT 0x00000000
+#define mmMMEA3_ADDRNORM_BASE_ADDR2_DEFAULT 0x00000000
+#define mmMMEA3_ADDRNORM_LIMIT_ADDR2_DEFAULT 0x00000000
+#define mmMMEA3_ADDRNORM_BASE_ADDR3_DEFAULT 0x00000000
+#define mmMMEA3_ADDRNORM_LIMIT_ADDR3_DEFAULT 0x00000000
+#define mmMMEA3_ADDRNORM_OFFSET_ADDR3_DEFAULT 0x00000000
+#define mmMMEA3_ADDRNORM_BASE_ADDR4_DEFAULT 0x00000000
+#define mmMMEA3_ADDRNORM_LIMIT_ADDR4_DEFAULT 0x00000000
+#define mmMMEA3_ADDRNORM_BASE_ADDR5_DEFAULT 0x00000000
+#define mmMMEA3_ADDRNORM_LIMIT_ADDR5_DEFAULT 0x00000000
+#define mmMMEA3_ADDRNORM_OFFSET_ADDR5_DEFAULT 0x00000000
+#define mmMMEA3_ADDRNORMDRAM_HOLE_CNTL_DEFAULT 0x00000000
+#define mmMMEA3_ADDRNORMGMI_HOLE_CNTL_DEFAULT 0x00000000
+#define mmMMEA3_ADDRNORMDRAM_NP2_CHANNEL_CFG_DEFAULT 0x00000000
+#define mmMMEA3_ADDRNORMGMI_NP2_CHANNEL_CFG_DEFAULT 0x00000000
+#define mmMMEA3_ADDRDEC_BANK_CFG_DEFAULT 0x000003cf
+#define mmMMEA3_ADDRDEC_MISC_CFG_DEFAULT 0xfffff000
+#define mmMMEA3_ADDRDECDRAM_ADDR_HASH_BANK0_DEFAULT 0x00000000
+#define mmMMEA3_ADDRDECDRAM_ADDR_HASH_BANK1_DEFAULT 0x00000000
+#define mmMMEA3_ADDRDECDRAM_ADDR_HASH_BANK2_DEFAULT 0x00000000
+#define mmMMEA3_ADDRDECDRAM_ADDR_HASH_BANK3_DEFAULT 0x00000000
+#define mmMMEA3_ADDRDECDRAM_ADDR_HASH_BANK4_DEFAULT 0x00000000
+#define mmMMEA3_ADDRDECDRAM_ADDR_HASH_BANK5_DEFAULT 0x00000000
+#define mmMMEA3_ADDRDECDRAM_ADDR_HASH_PC_DEFAULT 0x00000000
+#define mmMMEA3_ADDRDECDRAM_ADDR_HASH_PC2_DEFAULT 0x00000000
+#define mmMMEA3_ADDRDECDRAM_ADDR_HASH_CS0_DEFAULT 0x00000000
+#define mmMMEA3_ADDRDECDRAM_ADDR_HASH_CS1_DEFAULT 0x00000000
+#define mmMMEA3_ADDRDECDRAM_HARVEST_ENABLE_DEFAULT 0x00000000
+#define mmMMEA3_ADDRDECGMI_ADDR_HASH_BANK0_DEFAULT 0x00000000
+#define mmMMEA3_ADDRDECGMI_ADDR_HASH_BANK1_DEFAULT 0x00000000
+#define mmMMEA3_ADDRDECGMI_ADDR_HASH_BANK2_DEFAULT 0x00000000
+#define mmMMEA3_ADDRDECGMI_ADDR_HASH_BANK3_DEFAULT 0x00000000
+#define mmMMEA3_ADDRDECGMI_ADDR_HASH_BANK4_DEFAULT 0x00000000
+#define mmMMEA3_ADDRDECGMI_ADDR_HASH_BANK5_DEFAULT 0x00000000
+#define mmMMEA3_ADDRDECGMI_ADDR_HASH_PC_DEFAULT 0x00000000
+#define mmMMEA3_ADDRDECGMI_ADDR_HASH_PC2_DEFAULT 0x00000000
+#define mmMMEA3_ADDRDECGMI_ADDR_HASH_CS0_DEFAULT 0x00000000
+#define mmMMEA3_ADDRDECGMI_ADDR_HASH_CS1_DEFAULT 0x00000000
+#define mmMMEA3_ADDRDECGMI_HARVEST_ENABLE_DEFAULT 0x00000000
+#define mmMMEA3_ADDRDEC0_BASE_ADDR_CS0_DEFAULT 0x00000000
+#define mmMMEA3_ADDRDEC0_BASE_ADDR_CS1_DEFAULT 0x00000000
+#define mmMMEA3_ADDRDEC0_BASE_ADDR_CS2_DEFAULT 0x00000000
+#define mmMMEA3_ADDRDEC0_BASE_ADDR_CS3_DEFAULT 0x00000000
+#define mmMMEA3_ADDRDEC0_BASE_ADDR_SECCS0_DEFAULT 0x00000000
+#define mmMMEA3_ADDRDEC0_BASE_ADDR_SECCS1_DEFAULT 0x00000000
+#define mmMMEA3_ADDRDEC0_BASE_ADDR_SECCS2_DEFAULT 0x00000000
+#define mmMMEA3_ADDRDEC0_BASE_ADDR_SECCS3_DEFAULT 0x00000000
+#define mmMMEA3_ADDRDEC0_ADDR_MASK_CS01_DEFAULT 0xfffffffe
+#define mmMMEA3_ADDRDEC0_ADDR_MASK_CS23_DEFAULT 0xfffffffe
+#define mmMMEA3_ADDRDEC0_ADDR_MASK_SECCS01_DEFAULT 0xfffffffe
+#define mmMMEA3_ADDRDEC0_ADDR_MASK_SECCS23_DEFAULT 0xfffffffe
+#define mmMMEA3_ADDRDEC0_ADDR_CFG_CS01_DEFAULT 0x00050408
+#define mmMMEA3_ADDRDEC0_ADDR_CFG_CS23_DEFAULT 0x00050408
+#define mmMMEA3_ADDRDEC0_ADDR_SEL_CS01_DEFAULT 0x04076543
+#define mmMMEA3_ADDRDEC0_ADDR_SEL_CS23_DEFAULT 0x04076543
+#define mmMMEA3_ADDRDEC0_ADDR_SEL2_CS01_DEFAULT 0x00000008
+#define mmMMEA3_ADDRDEC0_ADDR_SEL2_CS23_DEFAULT 0x00000008
+#define mmMMEA3_ADDRDEC0_COL_SEL_LO_CS01_DEFAULT 0x87654321
+#define mmMMEA3_ADDRDEC0_COL_SEL_LO_CS23_DEFAULT 0x87654321
+#define mmMMEA3_ADDRDEC0_COL_SEL_HI_CS01_DEFAULT 0xa9876543
+#define mmMMEA3_ADDRDEC0_COL_SEL_HI_CS23_DEFAULT 0xa9876543
+#define mmMMEA3_ADDRDEC0_RM_SEL_CS01_DEFAULT 0x00000000
+#define mmMMEA3_ADDRDEC0_RM_SEL_CS23_DEFAULT 0x00000000
+#define mmMMEA3_ADDRDEC0_RM_SEL_SECCS01_DEFAULT 0x00000000
+#define mmMMEA3_ADDRDEC0_RM_SEL_SECCS23_DEFAULT 0x00000000
+#define mmMMEA3_ADDRDEC1_BASE_ADDR_CS0_DEFAULT 0x00000000
+#define mmMMEA3_ADDRDEC1_BASE_ADDR_CS1_DEFAULT 0x00000000
+#define mmMMEA3_ADDRDEC1_BASE_ADDR_CS2_DEFAULT 0x00000000
+#define mmMMEA3_ADDRDEC1_BASE_ADDR_CS3_DEFAULT 0x00000000
+#define mmMMEA3_ADDRDEC1_BASE_ADDR_SECCS0_DEFAULT 0x00000000
+#define mmMMEA3_ADDRDEC1_BASE_ADDR_SECCS1_DEFAULT 0x00000000
+#define mmMMEA3_ADDRDEC1_BASE_ADDR_SECCS2_DEFAULT 0x00000000
+#define mmMMEA3_ADDRDEC1_BASE_ADDR_SECCS3_DEFAULT 0x00000000
+#define mmMMEA3_ADDRDEC1_ADDR_MASK_CS01_DEFAULT 0xfffffffe
+#define mmMMEA3_ADDRDEC1_ADDR_MASK_CS23_DEFAULT 0xfffffffe
+#define mmMMEA3_ADDRDEC1_ADDR_MASK_SECCS01_DEFAULT 0xfffffffe
+#define mmMMEA3_ADDRDEC1_ADDR_MASK_SECCS23_DEFAULT 0xfffffffe
+#define mmMMEA3_ADDRDEC1_ADDR_CFG_CS01_DEFAULT 0x00050408
+#define mmMMEA3_ADDRDEC1_ADDR_CFG_CS23_DEFAULT 0x00050408
+#define mmMMEA3_ADDRDEC1_ADDR_SEL_CS01_DEFAULT 0x04076543
+#define mmMMEA3_ADDRDEC1_ADDR_SEL_CS23_DEFAULT 0x04076543
+#define mmMMEA3_ADDRDEC1_ADDR_SEL2_CS01_DEFAULT 0x00000008
+#define mmMMEA3_ADDRDEC1_ADDR_SEL2_CS23_DEFAULT 0x00000008
+#define mmMMEA3_ADDRDEC1_COL_SEL_LO_CS01_DEFAULT 0x87654321
+#define mmMMEA3_ADDRDEC1_COL_SEL_LO_CS23_DEFAULT 0x87654321
+#define mmMMEA3_ADDRDEC1_COL_SEL_HI_CS01_DEFAULT 0xa9876543
+#define mmMMEA3_ADDRDEC1_COL_SEL_HI_CS23_DEFAULT 0xa9876543
+#define mmMMEA3_ADDRDEC1_RM_SEL_CS01_DEFAULT 0x00000000
+#define mmMMEA3_ADDRDEC1_RM_SEL_CS23_DEFAULT 0x00000000
+#define mmMMEA3_ADDRDEC1_RM_SEL_SECCS01_DEFAULT 0x00000000
+#define mmMMEA3_ADDRDEC1_RM_SEL_SECCS23_DEFAULT 0x00000000
+#define mmMMEA3_ADDRDEC2_BASE_ADDR_CS0_DEFAULT 0x00000000
+#define mmMMEA3_ADDRDEC2_BASE_ADDR_CS1_DEFAULT 0x00000000
+#define mmMMEA3_ADDRDEC2_BASE_ADDR_CS2_DEFAULT 0x00000000
+#define mmMMEA3_ADDRDEC2_BASE_ADDR_CS3_DEFAULT 0x00000000
+#define mmMMEA3_ADDRDEC2_BASE_ADDR_SECCS0_DEFAULT 0x00000000
+#define mmMMEA3_ADDRDEC2_BASE_ADDR_SECCS1_DEFAULT 0x00000000
+#define mmMMEA3_ADDRDEC2_BASE_ADDR_SECCS2_DEFAULT 0x00000000
+#define mmMMEA3_ADDRDEC2_BASE_ADDR_SECCS3_DEFAULT 0x00000000
+#define mmMMEA3_ADDRDEC2_ADDR_MASK_CS01_DEFAULT 0xfffffffe
+#define mmMMEA3_ADDRDEC2_ADDR_MASK_CS23_DEFAULT 0xfffffffe
+#define mmMMEA3_ADDRDEC2_ADDR_MASK_SECCS01_DEFAULT 0xfffffffe
+#define mmMMEA3_ADDRDEC2_ADDR_MASK_SECCS23_DEFAULT 0xfffffffe
+#define mmMMEA3_ADDRDEC2_ADDR_CFG_CS01_DEFAULT 0x00050408
+#define mmMMEA3_ADDRDEC2_ADDR_CFG_CS23_DEFAULT 0x00050408
+#define mmMMEA3_ADDRDEC2_ADDR_SEL_CS01_DEFAULT 0x04076543
+#define mmMMEA3_ADDRDEC2_ADDR_SEL_CS23_DEFAULT 0x04076543
+#define mmMMEA3_ADDRDEC2_ADDR_SEL2_CS01_DEFAULT 0x00000008
+#define mmMMEA3_ADDRDEC2_ADDR_SEL2_CS23_DEFAULT 0x00000008
+#define mmMMEA3_ADDRDEC2_COL_SEL_LO_CS01_DEFAULT 0x87654321
+#define mmMMEA3_ADDRDEC2_COL_SEL_LO_CS23_DEFAULT 0x87654321
+#define mmMMEA3_ADDRDEC2_COL_SEL_HI_CS01_DEFAULT 0xa9876543
+#define mmMMEA3_ADDRDEC2_COL_SEL_HI_CS23_DEFAULT 0xa9876543
+#define mmMMEA3_ADDRDEC2_RM_SEL_CS01_DEFAULT 0x00000000
+#define mmMMEA3_ADDRDEC2_RM_SEL_CS23_DEFAULT 0x00000000
+#define mmMMEA3_ADDRDEC2_RM_SEL_SECCS01_DEFAULT 0x00000000
+#define mmMMEA3_ADDRDEC2_RM_SEL_SECCS23_DEFAULT 0x00000000
+#define mmMMEA3_ADDRNORMDRAM_GLOBAL_CNTL_DEFAULT 0x00600000
+#define mmMMEA3_ADDRNORMGMI_GLOBAL_CNTL_DEFAULT 0x00600000
+#define mmMMEA3_IO_RD_CLI2GRP_MAP0_DEFAULT 0xe4e4e4e4
+#define mmMMEA3_IO_RD_CLI2GRP_MAP1_DEFAULT 0xe4e4e4e4
+#define mmMMEA3_IO_WR_CLI2GRP_MAP0_DEFAULT 0xe4e4e4e4
+#define mmMMEA3_IO_WR_CLI2GRP_MAP1_DEFAULT 0xe4e4e4e4
+#define mmMMEA3_IO_RD_COMBINE_FLUSH_DEFAULT 0x00007777
+#define mmMMEA3_IO_WR_COMBINE_FLUSH_DEFAULT 0x00007777
+#define mmMMEA3_IO_GROUP_BURST_DEFAULT 0x1f031f03
+#define mmMMEA3_IO_RD_PRI_AGE_DEFAULT 0x00db6249
+#define mmMMEA3_IO_WR_PRI_AGE_DEFAULT 0x00db6249
+#define mmMMEA3_IO_RD_PRI_QUEUING_DEFAULT 0x00000db6
+#define mmMMEA3_IO_WR_PRI_QUEUING_DEFAULT 0x00000db6
+#define mmMMEA3_IO_RD_PRI_FIXED_DEFAULT 0x00000924
+#define mmMMEA3_IO_WR_PRI_FIXED_DEFAULT 0x00000924
+#define mmMMEA3_IO_RD_PRI_URGENCY_DEFAULT 0x00000492
+#define mmMMEA3_IO_WR_PRI_URGENCY_DEFAULT 0x00000492
+#define mmMMEA3_IO_RD_PRI_URGENCY_MASKING_DEFAULT 0xffffffff
+#define mmMMEA3_IO_WR_PRI_URGENCY_MASKING_DEFAULT 0xffffffff
+#define mmMMEA3_IO_RD_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f
+#define mmMMEA3_IO_RD_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f
+#define mmMMEA3_IO_RD_PRI_QUANT_PRI3_DEFAULT 0xffffffff
+#define mmMMEA3_IO_WR_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f
+#define mmMMEA3_IO_WR_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f
+#define mmMMEA3_IO_WR_PRI_QUANT_PRI3_DEFAULT 0xffffffff
+#define mmMMEA3_SDP_ARB_DRAM_DEFAULT 0x00101e40
+#define mmMMEA3_SDP_ARB_GMI_DEFAULT 0x00101e40
+#define mmMMEA3_SDP_ARB_FINAL_DEFAULT 0x00007fff
+#define mmMMEA3_SDP_DRAM_PRIORITY_DEFAULT 0x00000000
+#define mmMMEA3_SDP_GMI_PRIORITY_DEFAULT 0x00000000
+#define mmMMEA3_SDP_IO_PRIORITY_DEFAULT 0x00000000
+#define mmMMEA3_SDP_CREDITS_DEFAULT 0x000101bf
+#define mmMMEA3_SDP_TAG_RESERVE0_DEFAULT 0x00000000
+#define mmMMEA3_SDP_TAG_RESERVE1_DEFAULT 0x00000000
+#define mmMMEA3_SDP_VCC_RESERVE0_DEFAULT 0x00000000
+#define mmMMEA3_SDP_VCC_RESERVE1_DEFAULT 0x00000000
+#define mmMMEA3_SDP_VCD_RESERVE0_DEFAULT 0x00000000
+#define mmMMEA3_SDP_VCD_RESERVE1_DEFAULT 0x00000000
+#define mmMMEA3_SDP_REQ_CNTL_DEFAULT 0x0000001f
+#define mmMMEA3_MISC_DEFAULT 0x0c00a070
+#define mmMMEA3_LATENCY_SAMPLING_DEFAULT 0x00000000
+#define mmMMEA3_PERFCOUNTER_LO_DEFAULT 0x00000000
+#define mmMMEA3_PERFCOUNTER_HI_DEFAULT 0x00000000
+#define mmMMEA3_PERFCOUNTER0_CFG_DEFAULT 0x00000000
+#define mmMMEA3_PERFCOUNTER1_CFG_DEFAULT 0x00000000
+#define mmMMEA3_PERFCOUNTER_RSLT_CNTL_DEFAULT 0x04000000
+#define mmMMEA3_EDC_CNT_DEFAULT 0x00000000
+#define mmMMEA3_EDC_CNT2_DEFAULT 0x00000000
+#define mmMMEA3_DSM_CNTL_DEFAULT 0x00000000
+#define mmMMEA3_DSM_CNTLA_DEFAULT 0x00000000
+#define mmMMEA3_DSM_CNTLB_DEFAULT 0x00000000
+#define mmMMEA3_DSM_CNTL2_DEFAULT 0x00000000
+#define mmMMEA3_DSM_CNTL2A_DEFAULT 0x00000000
+#define mmMMEA3_DSM_CNTL2B_DEFAULT 0x00000000
+#define mmMMEA3_CGTT_CLK_CTRL_DEFAULT 0x00000100
+#define mmMMEA3_EDC_MODE_DEFAULT 0x00000000
+#define mmMMEA3_ERR_STATUS_DEFAULT 0x00000300
+#define mmMMEA3_MISC2_DEFAULT 0x00000000
+#define mmMMEA3_ADDRDEC_SELECT_DEFAULT 0x00000000
+#define mmMMEA3_EDC_CNT3_DEFAULT 0x00000000
+
+
+// addressBlock: mmhub_ea_mmeadec4
+#define mmMMEA4_DRAM_RD_CLI2GRP_MAP0_DEFAULT 0x55555555
+#define mmMMEA4_DRAM_RD_CLI2GRP_MAP1_DEFAULT 0x55555555
+#define mmMMEA4_DRAM_WR_CLI2GRP_MAP0_DEFAULT 0x55555555
+#define mmMMEA4_DRAM_WR_CLI2GRP_MAP1_DEFAULT 0x55555555
+#define mmMMEA4_DRAM_RD_GRP2VC_MAP_DEFAULT 0x00000e25
+#define mmMMEA4_DRAM_WR_GRP2VC_MAP_DEFAULT 0x00000e25
+#define mmMMEA4_DRAM_RD_LAZY_DEFAULT 0x78000924
+#define mmMMEA4_DRAM_WR_LAZY_DEFAULT 0x78000924
+#define mmMMEA4_DRAM_RD_CAM_CNTL_DEFAULT 0x16db4444
+#define mmMMEA4_DRAM_WR_CAM_CNTL_DEFAULT 0x16db4444
+#define mmMMEA4_DRAM_PAGE_BURST_DEFAULT 0x20002000
+#define mmMMEA4_DRAM_RD_PRI_AGE_DEFAULT 0x00db6249
+#define mmMMEA4_DRAM_WR_PRI_AGE_DEFAULT 0x00db6249
+#define mmMMEA4_DRAM_RD_PRI_QUEUING_DEFAULT 0x00000db6
+#define mmMMEA4_DRAM_WR_PRI_QUEUING_DEFAULT 0x00000db6
+#define mmMMEA4_DRAM_RD_PRI_FIXED_DEFAULT 0x00000924
+#define mmMMEA4_DRAM_WR_PRI_FIXED_DEFAULT 0x00000924
+#define mmMMEA4_DRAM_RD_PRI_URGENCY_DEFAULT 0x0000fdb6
+#define mmMMEA4_DRAM_WR_PRI_URGENCY_DEFAULT 0x0000fdb6
+#define mmMMEA4_DRAM_RD_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f
+#define mmMMEA4_DRAM_RD_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f
+#define mmMMEA4_DRAM_RD_PRI_QUANT_PRI3_DEFAULT 0xffffffff
+#define mmMMEA4_DRAM_WR_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f
+#define mmMMEA4_DRAM_WR_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f
+#define mmMMEA4_DRAM_WR_PRI_QUANT_PRI3_DEFAULT 0xffffffff
+#define mmMMEA4_GMI_RD_CLI2GRP_MAP0_DEFAULT 0x00000000
+#define mmMMEA4_GMI_RD_CLI2GRP_MAP1_DEFAULT 0x00000000
+#define mmMMEA4_GMI_WR_CLI2GRP_MAP0_DEFAULT 0x00000000
+#define mmMMEA4_GMI_WR_CLI2GRP_MAP1_DEFAULT 0x00000000
+#define mmMMEA4_GMI_RD_GRP2VC_MAP_DEFAULT 0x00000fff
+#define mmMMEA4_GMI_WR_GRP2VC_MAP_DEFAULT 0x00000fff
+#define mmMMEA4_GMI_RD_LAZY_DEFAULT 0x78000924
+#define mmMMEA4_GMI_WR_LAZY_DEFAULT 0x78000924
+#define mmMMEA4_GMI_RD_CAM_CNTL_DEFAULT 0x16db4444
+#define mmMMEA4_GMI_WR_CAM_CNTL_DEFAULT 0x16db4444
+#define mmMMEA4_GMI_PAGE_BURST_DEFAULT 0x20002000
+#define mmMMEA4_GMI_RD_PRI_AGE_DEFAULT 0x00db6249
+#define mmMMEA4_GMI_WR_PRI_AGE_DEFAULT 0x00db6249
+#define mmMMEA4_GMI_RD_PRI_QUEUING_DEFAULT 0x00000db6
+#define mmMMEA4_GMI_WR_PRI_QUEUING_DEFAULT 0x00000db6
+#define mmMMEA4_GMI_RD_PRI_FIXED_DEFAULT 0x00000924
+#define mmMMEA4_GMI_WR_PRI_FIXED_DEFAULT 0x00000924
+#define mmMMEA4_GMI_RD_PRI_URGENCY_DEFAULT 0x0000fdb6
+#define mmMMEA4_GMI_WR_PRI_URGENCY_DEFAULT 0x0000fdb6
+#define mmMMEA4_GMI_RD_PRI_URGENCY_MASKING_DEFAULT 0xffffffff
+#define mmMMEA4_GMI_WR_PRI_URGENCY_MASKING_DEFAULT 0xffffffff
+#define mmMMEA4_GMI_RD_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f
+#define mmMMEA4_GMI_RD_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f
+#define mmMMEA4_GMI_RD_PRI_QUANT_PRI3_DEFAULT 0xffffffff
+#define mmMMEA4_GMI_WR_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f
+#define mmMMEA4_GMI_WR_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f
+#define mmMMEA4_GMI_WR_PRI_QUANT_PRI3_DEFAULT 0xffffffff
+#define mmMMEA4_ADDRNORM_BASE_ADDR0_DEFAULT 0x00000000
+#define mmMMEA4_ADDRNORM_LIMIT_ADDR0_DEFAULT 0x00000000
+#define mmMMEA4_ADDRNORM_BASE_ADDR1_DEFAULT 0x00000000
+#define mmMMEA4_ADDRNORM_LIMIT_ADDR1_DEFAULT 0x00000000
+#define mmMMEA4_ADDRNORM_OFFSET_ADDR1_DEFAULT 0x00000000
+#define mmMMEA4_ADDRNORM_BASE_ADDR2_DEFAULT 0x00000000
+#define mmMMEA4_ADDRNORM_LIMIT_ADDR2_DEFAULT 0x00000000
+#define mmMMEA4_ADDRNORM_BASE_ADDR3_DEFAULT 0x00000000
+#define mmMMEA4_ADDRNORM_LIMIT_ADDR3_DEFAULT 0x00000000
+#define mmMMEA4_ADDRNORM_OFFSET_ADDR3_DEFAULT 0x00000000
+#define mmMMEA4_ADDRNORM_BASE_ADDR4_DEFAULT 0x00000000
+#define mmMMEA4_ADDRNORM_LIMIT_ADDR4_DEFAULT 0x00000000
+#define mmMMEA4_ADDRNORM_BASE_ADDR5_DEFAULT 0x00000000
+#define mmMMEA4_ADDRNORM_LIMIT_ADDR5_DEFAULT 0x00000000
+#define mmMMEA4_ADDRNORM_OFFSET_ADDR5_DEFAULT 0x00000000
+#define mmMMEA4_ADDRNORMDRAM_HOLE_CNTL_DEFAULT 0x00000000
+#define mmMMEA4_ADDRNORMGMI_HOLE_CNTL_DEFAULT 0x00000000
+#define mmMMEA4_ADDRNORMDRAM_NP2_CHANNEL_CFG_DEFAULT 0x00000000
+#define mmMMEA4_ADDRNORMGMI_NP2_CHANNEL_CFG_DEFAULT 0x00000000
+#define mmMMEA4_ADDRDEC_BANK_CFG_DEFAULT 0x000003cf
+#define mmMMEA4_ADDRDEC_MISC_CFG_DEFAULT 0xfffff000
+#define mmMMEA4_ADDRDECDRAM_ADDR_HASH_BANK0_DEFAULT 0x00000000
+#define mmMMEA4_ADDRDECDRAM_ADDR_HASH_BANK1_DEFAULT 0x00000000
+#define mmMMEA4_ADDRDECDRAM_ADDR_HASH_BANK2_DEFAULT 0x00000000
+#define mmMMEA4_ADDRDECDRAM_ADDR_HASH_BANK3_DEFAULT 0x00000000
+#define mmMMEA4_ADDRDECDRAM_ADDR_HASH_BANK4_DEFAULT 0x00000000
+#define mmMMEA4_ADDRDECDRAM_ADDR_HASH_BANK5_DEFAULT 0x00000000
+#define mmMMEA4_ADDRDECDRAM_ADDR_HASH_PC_DEFAULT 0x00000000
+#define mmMMEA4_ADDRDECDRAM_ADDR_HASH_PC2_DEFAULT 0x00000000
+#define mmMMEA4_ADDRDECDRAM_ADDR_HASH_CS0_DEFAULT 0x00000000
+#define mmMMEA4_ADDRDECDRAM_ADDR_HASH_CS1_DEFAULT 0x00000000
+#define mmMMEA4_ADDRDECDRAM_HARVEST_ENABLE_DEFAULT 0x00000000
+#define mmMMEA4_ADDRDECGMI_ADDR_HASH_BANK0_DEFAULT 0x00000000
+#define mmMMEA4_ADDRDECGMI_ADDR_HASH_BANK1_DEFAULT 0x00000000
+#define mmMMEA4_ADDRDECGMI_ADDR_HASH_BANK2_DEFAULT 0x00000000
+#define mmMMEA4_ADDRDECGMI_ADDR_HASH_BANK3_DEFAULT 0x00000000
+#define mmMMEA4_ADDRDECGMI_ADDR_HASH_BANK4_DEFAULT 0x00000000
+#define mmMMEA4_ADDRDECGMI_ADDR_HASH_BANK5_DEFAULT 0x00000000
+#define mmMMEA4_ADDRDECGMI_ADDR_HASH_PC_DEFAULT 0x00000000
+#define mmMMEA4_ADDRDECGMI_ADDR_HASH_PC2_DEFAULT 0x00000000
+#define mmMMEA4_ADDRDECGMI_ADDR_HASH_CS0_DEFAULT 0x00000000
+#define mmMMEA4_ADDRDECGMI_ADDR_HASH_CS1_DEFAULT 0x00000000
+#define mmMMEA4_ADDRDECGMI_HARVEST_ENABLE_DEFAULT 0x00000000
+#define mmMMEA4_ADDRDEC0_BASE_ADDR_CS0_DEFAULT 0x00000000
+#define mmMMEA4_ADDRDEC0_BASE_ADDR_CS1_DEFAULT 0x00000000
+#define mmMMEA4_ADDRDEC0_BASE_ADDR_CS2_DEFAULT 0x00000000
+#define mmMMEA4_ADDRDEC0_BASE_ADDR_CS3_DEFAULT 0x00000000
+#define mmMMEA4_ADDRDEC0_BASE_ADDR_SECCS0_DEFAULT 0x00000000
+#define mmMMEA4_ADDRDEC0_BASE_ADDR_SECCS1_DEFAULT 0x00000000
+#define mmMMEA4_ADDRDEC0_BASE_ADDR_SECCS2_DEFAULT 0x00000000
+#define mmMMEA4_ADDRDEC0_BASE_ADDR_SECCS3_DEFAULT 0x00000000
+#define mmMMEA4_ADDRDEC0_ADDR_MASK_CS01_DEFAULT 0xfffffffe
+#define mmMMEA4_ADDRDEC0_ADDR_MASK_CS23_DEFAULT 0xfffffffe
+#define mmMMEA4_ADDRDEC0_ADDR_MASK_SECCS01_DEFAULT 0xfffffffe
+#define mmMMEA4_ADDRDEC0_ADDR_MASK_SECCS23_DEFAULT 0xfffffffe
+#define mmMMEA4_ADDRDEC0_ADDR_CFG_CS01_DEFAULT 0x00050408
+#define mmMMEA4_ADDRDEC0_ADDR_CFG_CS23_DEFAULT 0x00050408
+#define mmMMEA4_ADDRDEC0_ADDR_SEL_CS01_DEFAULT 0x04076543
+#define mmMMEA4_ADDRDEC0_ADDR_SEL_CS23_DEFAULT 0x04076543
+#define mmMMEA4_ADDRDEC0_ADDR_SEL2_CS01_DEFAULT 0x00000008
+#define mmMMEA4_ADDRDEC0_ADDR_SEL2_CS23_DEFAULT 0x00000008
+#define mmMMEA4_ADDRDEC0_COL_SEL_LO_CS01_DEFAULT 0x87654321
+#define mmMMEA4_ADDRDEC0_COL_SEL_LO_CS23_DEFAULT 0x87654321
+#define mmMMEA4_ADDRDEC0_COL_SEL_HI_CS01_DEFAULT 0xa9876543
+#define mmMMEA4_ADDRDEC0_COL_SEL_HI_CS23_DEFAULT 0xa9876543
+#define mmMMEA4_ADDRDEC0_RM_SEL_CS01_DEFAULT 0x00000000
+#define mmMMEA4_ADDRDEC0_RM_SEL_CS23_DEFAULT 0x00000000
+#define mmMMEA4_ADDRDEC0_RM_SEL_SECCS01_DEFAULT 0x00000000
+#define mmMMEA4_ADDRDEC0_RM_SEL_SECCS23_DEFAULT 0x00000000
+#define mmMMEA4_ADDRDEC1_BASE_ADDR_CS0_DEFAULT 0x00000000
+#define mmMMEA4_ADDRDEC1_BASE_ADDR_CS1_DEFAULT 0x00000000
+#define mmMMEA4_ADDRDEC1_BASE_ADDR_CS2_DEFAULT 0x00000000
+#define mmMMEA4_ADDRDEC1_BASE_ADDR_CS3_DEFAULT 0x00000000
+#define mmMMEA4_ADDRDEC1_BASE_ADDR_SECCS0_DEFAULT 0x00000000
+#define mmMMEA4_ADDRDEC1_BASE_ADDR_SECCS1_DEFAULT 0x00000000
+#define mmMMEA4_ADDRDEC1_BASE_ADDR_SECCS2_DEFAULT 0x00000000
+#define mmMMEA4_ADDRDEC1_BASE_ADDR_SECCS3_DEFAULT 0x00000000
+#define mmMMEA4_ADDRDEC1_ADDR_MASK_CS01_DEFAULT 0xfffffffe
+#define mmMMEA4_ADDRDEC1_ADDR_MASK_CS23_DEFAULT 0xfffffffe
+#define mmMMEA4_ADDRDEC1_ADDR_MASK_SECCS01_DEFAULT 0xfffffffe
+#define mmMMEA4_ADDRDEC1_ADDR_MASK_SECCS23_DEFAULT 0xfffffffe
+#define mmMMEA4_ADDRDEC1_ADDR_CFG_CS01_DEFAULT 0x00050408
+#define mmMMEA4_ADDRDEC1_ADDR_CFG_CS23_DEFAULT 0x00050408
+#define mmMMEA4_ADDRDEC1_ADDR_SEL_CS01_DEFAULT 0x04076543
+#define mmMMEA4_ADDRDEC1_ADDR_SEL_CS23_DEFAULT 0x04076543
+#define mmMMEA4_ADDRDEC1_ADDR_SEL2_CS01_DEFAULT 0x00000008
+#define mmMMEA4_ADDRDEC1_ADDR_SEL2_CS23_DEFAULT 0x00000008
+#define mmMMEA4_ADDRDEC1_COL_SEL_LO_CS01_DEFAULT 0x87654321
+#define mmMMEA4_ADDRDEC1_COL_SEL_LO_CS23_DEFAULT 0x87654321
+#define mmMMEA4_ADDRDEC1_COL_SEL_HI_CS01_DEFAULT 0xa9876543
+#define mmMMEA4_ADDRDEC1_COL_SEL_HI_CS23_DEFAULT 0xa9876543
+#define mmMMEA4_ADDRDEC1_RM_SEL_CS01_DEFAULT 0x00000000
+#define mmMMEA4_ADDRDEC1_RM_SEL_CS23_DEFAULT 0x00000000
+#define mmMMEA4_ADDRDEC1_RM_SEL_SECCS01_DEFAULT 0x00000000
+#define mmMMEA4_ADDRDEC1_RM_SEL_SECCS23_DEFAULT 0x00000000
+#define mmMMEA4_ADDRDEC2_BASE_ADDR_CS0_DEFAULT 0x00000000
+#define mmMMEA4_ADDRDEC2_BASE_ADDR_CS1_DEFAULT 0x00000000
+#define mmMMEA4_ADDRDEC2_BASE_ADDR_CS2_DEFAULT 0x00000000
+#define mmMMEA4_ADDRDEC2_BASE_ADDR_CS3_DEFAULT 0x00000000
+#define mmMMEA4_ADDRDEC2_BASE_ADDR_SECCS0_DEFAULT 0x00000000
+#define mmMMEA4_ADDRDEC2_BASE_ADDR_SECCS1_DEFAULT 0x00000000
+#define mmMMEA4_ADDRDEC2_BASE_ADDR_SECCS2_DEFAULT 0x00000000
+#define mmMMEA4_ADDRDEC2_BASE_ADDR_SECCS3_DEFAULT 0x00000000
+#define mmMMEA4_ADDRDEC2_ADDR_MASK_CS01_DEFAULT 0xfffffffe
+#define mmMMEA4_ADDRDEC2_ADDR_MASK_CS23_DEFAULT 0xfffffffe
+#define mmMMEA4_ADDRDEC2_ADDR_MASK_SECCS01_DEFAULT 0xfffffffe
+#define mmMMEA4_ADDRDEC2_ADDR_MASK_SECCS23_DEFAULT 0xfffffffe
+#define mmMMEA4_ADDRDEC2_ADDR_CFG_CS01_DEFAULT 0x00050408
+#define mmMMEA4_ADDRDEC2_ADDR_CFG_CS23_DEFAULT 0x00050408
+#define mmMMEA4_ADDRDEC2_ADDR_SEL_CS01_DEFAULT 0x04076543
+#define mmMMEA4_ADDRDEC2_ADDR_SEL_CS23_DEFAULT 0x04076543
+#define mmMMEA4_ADDRDEC2_ADDR_SEL2_CS01_DEFAULT 0x00000008
+#define mmMMEA4_ADDRDEC2_ADDR_SEL2_CS23_DEFAULT 0x00000008
+#define mmMMEA4_ADDRDEC2_COL_SEL_LO_CS01_DEFAULT 0x87654321
+#define mmMMEA4_ADDRDEC2_COL_SEL_LO_CS23_DEFAULT 0x87654321
+#define mmMMEA4_ADDRDEC2_COL_SEL_HI_CS01_DEFAULT 0xa9876543
+#define mmMMEA4_ADDRDEC2_COL_SEL_HI_CS23_DEFAULT 0xa9876543
+#define mmMMEA4_ADDRDEC2_RM_SEL_CS01_DEFAULT 0x00000000
+#define mmMMEA4_ADDRDEC2_RM_SEL_CS23_DEFAULT 0x00000000
+#define mmMMEA4_ADDRDEC2_RM_SEL_SECCS01_DEFAULT 0x00000000
+#define mmMMEA4_ADDRDEC2_RM_SEL_SECCS23_DEFAULT 0x00000000
+#define mmMMEA4_ADDRNORMDRAM_GLOBAL_CNTL_DEFAULT 0x00600000
+#define mmMMEA4_ADDRNORMGMI_GLOBAL_CNTL_DEFAULT 0x00600000
+#define mmMMEA4_IO_RD_CLI2GRP_MAP0_DEFAULT 0xe4e4e4e4
+#define mmMMEA4_IO_RD_CLI2GRP_MAP1_DEFAULT 0xe4e4e4e4
+#define mmMMEA4_IO_WR_CLI2GRP_MAP0_DEFAULT 0xe4e4e4e4
+#define mmMMEA4_IO_WR_CLI2GRP_MAP1_DEFAULT 0xe4e4e4e4
+#define mmMMEA4_IO_RD_COMBINE_FLUSH_DEFAULT 0x00007777
+#define mmMMEA4_IO_WR_COMBINE_FLUSH_DEFAULT 0x00007777
+#define mmMMEA4_IO_GROUP_BURST_DEFAULT 0x1f031f03
+#define mmMMEA4_IO_RD_PRI_AGE_DEFAULT 0x00db6249
+#define mmMMEA4_IO_WR_PRI_AGE_DEFAULT 0x00db6249
+#define mmMMEA4_IO_RD_PRI_QUEUING_DEFAULT 0x00000db6
+#define mmMMEA4_IO_WR_PRI_QUEUING_DEFAULT 0x00000db6
+#define mmMMEA4_IO_RD_PRI_FIXED_DEFAULT 0x00000924
+#define mmMMEA4_IO_WR_PRI_FIXED_DEFAULT 0x00000924
+#define mmMMEA4_IO_RD_PRI_URGENCY_DEFAULT 0x00000492
+#define mmMMEA4_IO_WR_PRI_URGENCY_DEFAULT 0x00000492
+#define mmMMEA4_IO_RD_PRI_URGENCY_MASKING_DEFAULT 0xffffffff
+#define mmMMEA4_IO_WR_PRI_URGENCY_MASKING_DEFAULT 0xffffffff
+#define mmMMEA4_IO_RD_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f
+#define mmMMEA4_IO_RD_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f
+#define mmMMEA4_IO_RD_PRI_QUANT_PRI3_DEFAULT 0xffffffff
+#define mmMMEA4_IO_WR_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f
+#define mmMMEA4_IO_WR_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f
+#define mmMMEA4_IO_WR_PRI_QUANT_PRI3_DEFAULT 0xffffffff
+#define mmMMEA4_SDP_ARB_DRAM_DEFAULT 0x00101e40
+#define mmMMEA4_SDP_ARB_GMI_DEFAULT 0x00101e40
+#define mmMMEA4_SDP_ARB_FINAL_DEFAULT 0x00007fff
+#define mmMMEA4_SDP_DRAM_PRIORITY_DEFAULT 0x00000000
+#define mmMMEA4_SDP_GMI_PRIORITY_DEFAULT 0x00000000
+#define mmMMEA4_SDP_IO_PRIORITY_DEFAULT 0x00000000
+#define mmMMEA4_SDP_CREDITS_DEFAULT 0x000101bf
+#define mmMMEA4_SDP_TAG_RESERVE0_DEFAULT 0x00000000
+#define mmMMEA4_SDP_TAG_RESERVE1_DEFAULT 0x00000000
+#define mmMMEA4_SDP_VCC_RESERVE0_DEFAULT 0x00000000
+#define mmMMEA4_SDP_VCC_RESERVE1_DEFAULT 0x00000000
+#define mmMMEA4_SDP_VCD_RESERVE0_DEFAULT 0x00000000
+#define mmMMEA4_SDP_VCD_RESERVE1_DEFAULT 0x00000000
+#define mmMMEA4_SDP_REQ_CNTL_DEFAULT 0x0000001f
+#define mmMMEA4_MISC_DEFAULT 0x0c00a070
+#define mmMMEA4_LATENCY_SAMPLING_DEFAULT 0x00000000
+#define mmMMEA4_PERFCOUNTER_LO_DEFAULT 0x00000000
+#define mmMMEA4_PERFCOUNTER_HI_DEFAULT 0x00000000
+#define mmMMEA4_PERFCOUNTER0_CFG_DEFAULT 0x00000000
+#define mmMMEA4_PERFCOUNTER1_CFG_DEFAULT 0x00000000
+#define mmMMEA4_PERFCOUNTER_RSLT_CNTL_DEFAULT 0x04000000
+#define mmMMEA4_EDC_CNT_DEFAULT 0x00000000
+#define mmMMEA4_EDC_CNT2_DEFAULT 0x00000000
+#define mmMMEA4_DSM_CNTL_DEFAULT 0x00000000
+#define mmMMEA4_DSM_CNTLA_DEFAULT 0x00000000
+#define mmMMEA4_DSM_CNTLB_DEFAULT 0x00000000
+#define mmMMEA4_DSM_CNTL2_DEFAULT 0x00000000
+#define mmMMEA4_DSM_CNTL2A_DEFAULT 0x00000000
+#define mmMMEA4_DSM_CNTL2B_DEFAULT 0x00000000
+#define mmMMEA4_CGTT_CLK_CTRL_DEFAULT 0x00000100
+#define mmMMEA4_EDC_MODE_DEFAULT 0x00000000
+#define mmMMEA4_ERR_STATUS_DEFAULT 0x00000300
+#define mmMMEA4_MISC2_DEFAULT 0x00000000
+#define mmMMEA4_ADDRDEC_SELECT_DEFAULT 0x00000000
+#define mmMMEA4_EDC_CNT3_DEFAULT 0x00000000
+
+
+// addressBlock: mmhub_pctldec0
+#define mmPCTL0_CTRL_DEFAULT 0x00011040
+#define mmPCTL0_MMHUB_DEEPSLEEP_IB_DEFAULT 0x00000000
+#define mmPCTL0_MMHUB_DEEPSLEEP_OVERRIDE_DEFAULT 0x00000000
+#define mmPCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB_DEFAULT 0x00000000
+#define mmPCTL0_PG_IGNORE_DEEPSLEEP_DEFAULT 0x00000000
+#define mmPCTL0_PG_IGNORE_DEEPSLEEP_IB_DEFAULT 0x00000000
+#define mmPCTL0_SLICE0_CFG_DAGB_BUSY_DEFAULT 0x00000000
+#define mmPCTL0_SLICE0_CFG_DS_ALLOW_DEFAULT 0x00000000
+#define mmPCTL0_SLICE0_CFG_DS_ALLOW_IB_DEFAULT 0x00000000
+#define mmPCTL0_SLICE1_CFG_DAGB_BUSY_DEFAULT 0x00000000
+#define mmPCTL0_SLICE1_CFG_DS_ALLOW_DEFAULT 0x00000000
+#define mmPCTL0_SLICE1_CFG_DS_ALLOW_IB_DEFAULT 0x00000000
+#define mmPCTL0_SLICE2_CFG_DAGB_BUSY_DEFAULT 0x00000000
+#define mmPCTL0_SLICE2_CFG_DS_ALLOW_DEFAULT 0x00000000
+#define mmPCTL0_SLICE2_CFG_DS_ALLOW_IB_DEFAULT 0x00000000
+#define mmPCTL0_SLICE3_CFG_DAGB_BUSY_DEFAULT 0x00000000
+#define mmPCTL0_SLICE3_CFG_DS_ALLOW_DEFAULT 0x00000000
+#define mmPCTL0_SLICE3_CFG_DS_ALLOW_IB_DEFAULT 0x00000000
+#define mmPCTL0_SLICE4_CFG_DAGB_BUSY_DEFAULT 0x00000000
+#define mmPCTL0_SLICE4_CFG_DS_ALLOW_DEFAULT 0x00000000
+#define mmPCTL0_SLICE4_CFG_DS_ALLOW_IB_DEFAULT 0x00000000
+#define mmPCTL0_UTCL2_MISC_DEFAULT 0x00011000
+#define mmPCTL0_SLICE0_MISC_DEFAULT 0x00000800
+#define mmPCTL0_SLICE1_MISC_DEFAULT 0x00000800
+#define mmPCTL0_SLICE2_MISC_DEFAULT 0x00000800
+#define mmPCTL0_SLICE3_MISC_DEFAULT 0x00000800
+#define mmPCTL0_SLICE4_MISC_DEFAULT 0x00000800
+#define mmPCTL0_UTCL2_RENG_EXECUTE_DEFAULT 0x00000000
+#define mmPCTL0_SLICE0_RENG_EXECUTE_DEFAULT 0x00000000
+#define mmPCTL0_SLICE1_RENG_EXECUTE_DEFAULT 0x00000000
+#define mmPCTL0_SLICE2_RENG_EXECUTE_DEFAULT 0x00000000
+#define mmPCTL0_SLICE3_RENG_EXECUTE_DEFAULT 0x00000000
+#define mmPCTL0_SLICE4_RENG_EXECUTE_DEFAULT 0x00000000
+#define mmPCTL0_UTCL2_RENG_RAM_INDEX_DEFAULT 0x00000000
+#define mmPCTL0_UTCL2_RENG_RAM_DATA_DEFAULT 0x00000000
+#define mmPCTL0_SLICE0_RENG_RAM_INDEX_DEFAULT 0x00000000
+#define mmPCTL0_SLICE0_RENG_RAM_DATA_DEFAULT 0x00000000
+#define mmPCTL0_SLICE1_RENG_RAM_INDEX_DEFAULT 0x00000000
+#define mmPCTL0_SLICE1_RENG_RAM_DATA_DEFAULT 0x00000000
+#define mmPCTL0_SLICE2_RENG_RAM_INDEX_DEFAULT 0x00000000
+#define mmPCTL0_SLICE2_RENG_RAM_DATA_DEFAULT 0x00000000
+#define mmPCTL0_SLICE3_RENG_RAM_INDEX_DEFAULT 0x00000000
+#define mmPCTL0_SLICE3_RENG_RAM_DATA_DEFAULT 0x00000000
+#define mmPCTL0_SLICE4_RENG_RAM_INDEX_DEFAULT 0x00000000
+#define mmPCTL0_SLICE4_RENG_RAM_DATA_DEFAULT 0x00000000
+#define mmPCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE0_DEFAULT 0x00000000
+#define mmPCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE1_DEFAULT 0x00000000
+#define mmPCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE2_DEFAULT 0x00000000
+#define mmPCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE3_DEFAULT 0x00000000
+#define mmPCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE4_DEFAULT 0x00000000
+#define mmPCTL0_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET0_DEFAULT 0xffffffff
+#define mmPCTL0_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET1_DEFAULT 0xffffffff
+#define mmPCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE0_DEFAULT 0x00000000
+#define mmPCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE1_DEFAULT 0x00000000
+#define mmPCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE2_DEFAULT 0x00000000
+#define mmPCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE3_DEFAULT 0x00000000
+#define mmPCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE4_DEFAULT 0x00000000
+#define mmPCTL0_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET0_DEFAULT 0xffffffff
+#define mmPCTL0_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET1_DEFAULT 0xffffffff
+#define mmPCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE0_DEFAULT 0x00000000
+#define mmPCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE1_DEFAULT 0x00000000
+#define mmPCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE2_DEFAULT 0x00000000
+#define mmPCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE3_DEFAULT 0x00000000
+#define mmPCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE4_DEFAULT 0x00000000
+#define mmPCTL0_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET0_DEFAULT 0xffffffff
+#define mmPCTL0_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET1_DEFAULT 0xffffffff
+#define mmPCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE0_DEFAULT 0x00000000
+#define mmPCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE1_DEFAULT 0x00000000
+#define mmPCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE2_DEFAULT 0x00000000
+#define mmPCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE3_DEFAULT 0x00000000
+#define mmPCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE4_DEFAULT 0x00000000
+#define mmPCTL0_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET0_DEFAULT 0xffffffff
+#define mmPCTL0_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET1_DEFAULT 0xffffffff
+#define mmPCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE0_DEFAULT 0x00000000
+#define mmPCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE1_DEFAULT 0x00000000
+#define mmPCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE2_DEFAULT 0x00000000
+#define mmPCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE3_DEFAULT 0x00000000
+#define mmPCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE4_DEFAULT 0x00000000
+#define mmPCTL0_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET0_DEFAULT 0xffffffff
+#define mmPCTL0_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET1_DEFAULT 0xffffffff
+#define mmPCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE0_DEFAULT 0x00000000
+#define mmPCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE1_DEFAULT 0x00000000
+#define mmPCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE2_DEFAULT 0x00000000
+#define mmPCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE3_DEFAULT 0x00000000
+#define mmPCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE4_DEFAULT 0x00000000
+#define mmPCTL0_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET0_DEFAULT 0xffffffff
+#define mmPCTL0_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET1_DEFAULT 0xffffffff
+
+
+// addressBlock: mmhub_l1tlb_vml1dec
+#define mmVML1_0_MC_VM_MX_L1_TLB0_STATUS_DEFAULT 0x00000000
+#define mmVML1_0_MC_VM_MX_L1_TLB1_STATUS_DEFAULT 0x00000000
+#define mmVML1_0_MC_VM_MX_L1_TLB2_STATUS_DEFAULT 0x00000000
+#define mmVML1_0_MC_VM_MX_L1_TLB3_STATUS_DEFAULT 0x00000000
+#define mmVML1_0_MC_VM_MX_L1_TLB4_STATUS_DEFAULT 0x00000000
+#define mmVML1_0_MC_VM_MX_L1_TLB5_STATUS_DEFAULT 0x00000000
+#define mmVML1_0_MC_VM_MX_L1_TLB6_STATUS_DEFAULT 0x00000000
+#define mmVML1_0_MC_VM_MX_L1_TLB7_STATUS_DEFAULT 0x00000000
+
+
+// addressBlock: mmhub_l1tlb_vml1pldec
+#define mmVML1PL0_MC_VM_MX_L1_PERFCOUNTER0_CFG_DEFAULT 0x00000000
+#define mmVML1PL0_MC_VM_MX_L1_PERFCOUNTER1_CFG_DEFAULT 0x00000000
+#define mmVML1PL0_MC_VM_MX_L1_PERFCOUNTER2_CFG_DEFAULT 0x00000000
+#define mmVML1PL0_MC_VM_MX_L1_PERFCOUNTER3_CFG_DEFAULT 0x00000000
+#define mmVML1PL0_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL_DEFAULT 0x04000000
+
+
+// addressBlock: mmhub_l1tlb_vml1prdec
+#define mmVML1PR0_MC_VM_MX_L1_PERFCOUNTER_LO_DEFAULT 0x00000000
+#define mmVML1PR0_MC_VM_MX_L1_PERFCOUNTER_HI_DEFAULT 0x00000000
+
+
+// addressBlock: mmhub_utcl2_atcl2dec
+#define mmATCL2_0_ATC_L2_CNTL_DEFAULT 0x0001c0c9
+#define mmATCL2_0_ATC_L2_CNTL2_DEFAULT 0x00600100
+#define mmATCL2_0_ATC_L2_CACHE_DATA0_DEFAULT 0x00000000
+#define mmATCL2_0_ATC_L2_CACHE_DATA1_DEFAULT 0x00000000
+#define mmATCL2_0_ATC_L2_CACHE_DATA2_DEFAULT 0x00000000
+#define mmATCL2_0_ATC_L2_CNTL3_DEFAULT 0x000001f8
+#define mmATCL2_0_ATC_L2_STATUS_DEFAULT 0x00000000
+#define mmATCL2_0_ATC_L2_STATUS2_DEFAULT 0x00000000
+#define mmATCL2_0_ATC_L2_STATUS3_DEFAULT 0x00000000
+#define mmATCL2_0_ATC_L2_MISC_CG_DEFAULT 0x00000200
+#define mmATCL2_0_ATC_L2_MEM_POWER_LS_DEFAULT 0x00000208
+#define mmATCL2_0_ATC_L2_CGTT_CLK_CTRL_DEFAULT 0x00000080
+#define mmATCL2_0_ATC_L2_CACHE_4K_DSM_INDEX_DEFAULT 0x00000000
+#define mmATCL2_0_ATC_L2_CACHE_2M_DSM_INDEX_DEFAULT 0x00000000
+#define mmATCL2_0_ATC_L2_CACHE_4K_DSM_CNTL_DEFAULT 0x00000000
+#define mmATCL2_0_ATC_L2_CACHE_2M_DSM_CNTL_DEFAULT 0x00000000
+#define mmATCL2_0_ATC_L2_CNTL4_DEFAULT 0x00000000
+#define mmATCL2_0_ATC_L2_MM_GROUP_RT_CLASSES_DEFAULT 0x00000005
+
+
+// addressBlock: mmhub_utcl2_vml2pfdec
+#define mmVML2PF0_VM_L2_CNTL_DEFAULT 0x00080602
+#define mmVML2PF0_VM_L2_CNTL2_DEFAULT 0x00000000
+#define mmVML2PF0_VM_L2_CNTL3_DEFAULT 0x80100007
+#define mmVML2PF0_VM_L2_STATUS_DEFAULT 0x00000000
+#define mmVML2PF0_VM_DUMMY_PAGE_FAULT_CNTL_DEFAULT 0x00000090
+#define mmVML2PF0_VM_DUMMY_PAGE_FAULT_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2PF0_VM_DUMMY_PAGE_FAULT_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL_DEFAULT 0x3ffffffc
+#define mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL2_DEFAULT 0x000a0000
+#define mmVML2PF0_VM_L2_PROTECTION_FAULT_MM_CNTL3_DEFAULT 0xffffffff
+#define mmVML2PF0_VM_L2_PROTECTION_FAULT_MM_CNTL4_DEFAULT 0xffffffff
+#define mmVML2PF0_VM_L2_PROTECTION_FAULT_STATUS_DEFAULT 0x00000000
+#define mmVML2PF0_VM_L2_PROTECTION_FAULT_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2PF0_VM_L2_PROTECTION_FAULT_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2PF0_VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32_DEFAULT 0x00000000
+#define mmVML2PF0_VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32_DEFAULT 0x00000000
+#define mmVML2PF0_VM_L2_CNTL4_DEFAULT 0x000000c1
+#define mmVML2PF0_VM_L2_MM_GROUP_RT_CLASSES_DEFAULT 0x00000000
+#define mmVML2PF0_VM_L2_BANK_SELECT_RESERVED_CID_DEFAULT 0x00000000
+#define mmVML2PF0_VM_L2_BANK_SELECT_RESERVED_CID2_DEFAULT 0x00000000
+#define mmVML2PF0_VM_L2_CACHE_PARITY_CNTL_DEFAULT 0x00000000
+#define mmVML2PF0_VM_L2_CGTT_CLK_CTRL_DEFAULT 0x00000080
+
+
+// addressBlock: mmhub_utcl2_vml2vcdec
+#define mmVML2VC0_VM_CONTEXT0_CNTL_DEFAULT 0x007ffe80
+#define mmVML2VC0_VM_CONTEXT1_CNTL_DEFAULT 0x007ffe80
+#define mmVML2VC0_VM_CONTEXT2_CNTL_DEFAULT 0x007ffe80
+#define mmVML2VC0_VM_CONTEXT3_CNTL_DEFAULT 0x007ffe80
+#define mmVML2VC0_VM_CONTEXT4_CNTL_DEFAULT 0x007ffe80
+#define mmVML2VC0_VM_CONTEXT5_CNTL_DEFAULT 0x007ffe80
+#define mmVML2VC0_VM_CONTEXT6_CNTL_DEFAULT 0x007ffe80
+#define mmVML2VC0_VM_CONTEXT7_CNTL_DEFAULT 0x007ffe80
+#define mmVML2VC0_VM_CONTEXT8_CNTL_DEFAULT 0x007ffe80
+#define mmVML2VC0_VM_CONTEXT9_CNTL_DEFAULT 0x007ffe80
+#define mmVML2VC0_VM_CONTEXT10_CNTL_DEFAULT 0x007ffe80
+#define mmVML2VC0_VM_CONTEXT11_CNTL_DEFAULT 0x007ffe80
+#define mmVML2VC0_VM_CONTEXT12_CNTL_DEFAULT 0x007ffe80
+#define mmVML2VC0_VM_CONTEXT13_CNTL_DEFAULT 0x007ffe80
+#define mmVML2VC0_VM_CONTEXT14_CNTL_DEFAULT 0x007ffe80
+#define mmVML2VC0_VM_CONTEXT15_CNTL_DEFAULT 0x007ffe80
+#define mmVML2VC0_VM_CONTEXTS_DISABLE_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG0_SEM_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG1_SEM_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG2_SEM_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG3_SEM_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG4_SEM_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG5_SEM_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG6_SEM_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG7_SEM_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG8_SEM_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG9_SEM_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG10_SEM_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG11_SEM_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG12_SEM_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG13_SEM_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG14_SEM_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG15_SEM_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG16_SEM_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG17_SEM_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG0_REQ_DEFAULT 0x017c0000
+#define mmVML2VC0_VM_INVALIDATE_ENG1_REQ_DEFAULT 0x017c0000
+#define mmVML2VC0_VM_INVALIDATE_ENG2_REQ_DEFAULT 0x017c0000
+#define mmVML2VC0_VM_INVALIDATE_ENG3_REQ_DEFAULT 0x017c0000
+#define mmVML2VC0_VM_INVALIDATE_ENG4_REQ_DEFAULT 0x017c0000
+#define mmVML2VC0_VM_INVALIDATE_ENG5_REQ_DEFAULT 0x017c0000
+#define mmVML2VC0_VM_INVALIDATE_ENG6_REQ_DEFAULT 0x017c0000
+#define mmVML2VC0_VM_INVALIDATE_ENG7_REQ_DEFAULT 0x017c0000
+#define mmVML2VC0_VM_INVALIDATE_ENG8_REQ_DEFAULT 0x017c0000
+#define mmVML2VC0_VM_INVALIDATE_ENG9_REQ_DEFAULT 0x017c0000
+#define mmVML2VC0_VM_INVALIDATE_ENG10_REQ_DEFAULT 0x017c0000
+#define mmVML2VC0_VM_INVALIDATE_ENG11_REQ_DEFAULT 0x017c0000
+#define mmVML2VC0_VM_INVALIDATE_ENG12_REQ_DEFAULT 0x017c0000
+#define mmVML2VC0_VM_INVALIDATE_ENG13_REQ_DEFAULT 0x017c0000
+#define mmVML2VC0_VM_INVALIDATE_ENG14_REQ_DEFAULT 0x017c0000
+#define mmVML2VC0_VM_INVALIDATE_ENG15_REQ_DEFAULT 0x017c0000
+#define mmVML2VC0_VM_INVALIDATE_ENG16_REQ_DEFAULT 0x017c0000
+#define mmVML2VC0_VM_INVALIDATE_ENG17_REQ_DEFAULT 0x017c0000
+#define mmVML2VC0_VM_INVALIDATE_ENG0_ACK_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG1_ACK_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG2_ACK_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG3_ACK_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG4_ACK_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG5_ACK_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG6_ACK_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG7_ACK_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG8_ACK_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG9_ACK_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG10_ACK_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG11_ACK_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG12_ACK_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG13_ACK_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG14_ACK_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG15_ACK_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG16_ACK_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG17_ACK_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG0_ADDR_RANGE_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG0_ADDR_RANGE_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG1_ADDR_RANGE_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG1_ADDR_RANGE_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG2_ADDR_RANGE_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG2_ADDR_RANGE_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG3_ADDR_RANGE_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG3_ADDR_RANGE_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG4_ADDR_RANGE_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG4_ADDR_RANGE_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG5_ADDR_RANGE_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG5_ADDR_RANGE_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG6_ADDR_RANGE_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG6_ADDR_RANGE_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG7_ADDR_RANGE_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG7_ADDR_RANGE_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG8_ADDR_RANGE_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG8_ADDR_RANGE_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG9_ADDR_RANGE_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG9_ADDR_RANGE_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG10_ADDR_RANGE_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG10_ADDR_RANGE_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG11_ADDR_RANGE_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG11_ADDR_RANGE_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG12_ADDR_RANGE_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG12_ADDR_RANGE_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG13_ADDR_RANGE_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG13_ADDR_RANGE_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG14_ADDR_RANGE_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG14_ADDR_RANGE_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG15_ADDR_RANGE_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG15_ADDR_RANGE_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG16_ADDR_RANGE_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG17_ADDR_RANGE_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_INVALIDATE_ENG17_ADDR_RANGE_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC0_VM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
+
+
+// addressBlock: mmhub_utcl2_vmsharedpfdec
+#define mmVMSHAREDPF0_MC_VM_NB_MMIOBASE_DEFAULT 0x00000000
+#define mmVMSHAREDPF0_MC_VM_NB_MMIOLIMIT_DEFAULT 0x00000000
+#define mmVMSHAREDPF0_MC_VM_NB_PCI_CTRL_DEFAULT 0x00000000
+#define mmVMSHAREDPF0_MC_VM_NB_PCI_ARB_DEFAULT 0x00000008
+#define mmVMSHAREDPF0_MC_VM_NB_TOP_OF_DRAM_SLOT1_DEFAULT 0x00000000
+#define mmVMSHAREDPF0_MC_VM_NB_LOWER_TOP_OF_DRAM2_DEFAULT 0x00000000
+#define mmVMSHAREDPF0_MC_VM_NB_UPPER_TOP_OF_DRAM2_DEFAULT 0x00000000
+#define mmVMSHAREDPF0_MC_VM_FB_OFFSET_DEFAULT 0x00000000
+#define mmVMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB_DEFAULT 0x00000000
+#define mmVMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB_DEFAULT 0x00000000
+#define mmVMSHAREDPF0_MC_VM_STEERING_DEFAULT 0x00000001
+#define mmVMSHAREDPF0_MC_SHARED_VIRT_RESET_REQ_DEFAULT 0x00000000
+#define mmVMSHAREDPF0_MC_MEM_POWER_LS_DEFAULT 0x00000208
+#define mmVMSHAREDPF0_MC_VM_CACHEABLE_DRAM_ADDRESS_START_DEFAULT 0x00000000
+#define mmVMSHAREDPF0_MC_VM_CACHEABLE_DRAM_ADDRESS_END_DEFAULT 0x00000000
+#define mmVMSHAREDPF0_MC_VM_APT_CNTL_DEFAULT 0x00000000
+#define mmVMSHAREDPF0_MC_VM_LOCAL_HBM_ADDRESS_START_DEFAULT 0x00000000
+#define mmVMSHAREDPF0_MC_VM_LOCAL_HBM_ADDRESS_END_DEFAULT 0x000fffff
+#define mmVMSHAREDPF0_MC_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL_DEFAULT 0x00000000
+#define mmVMSHAREDPF0_MC_VM_XGMI_LFB_CNTL_DEFAULT 0x00000000
+#define mmVMSHAREDPF0_MC_VM_XGMI_LFB_SIZE_DEFAULT 0x00000000
+#define mmVMSHAREDPF0_MC_VM_CACHEABLE_DRAM_CNTL_DEFAULT 0x00000000
+
+
+// addressBlock: mmhub_utcl2_vmsharedvcdec
+#define mmVMSHAREDVC0_MC_VM_FB_LOCATION_BASE_DEFAULT 0x00000000
+#define mmVMSHAREDVC0_MC_VM_FB_LOCATION_TOP_DEFAULT 0x00000000
+#define mmVMSHAREDVC0_MC_VM_AGP_TOP_DEFAULT 0x00000000
+#define mmVMSHAREDVC0_MC_VM_AGP_BOT_DEFAULT 0x00000000
+#define mmVMSHAREDVC0_MC_VM_AGP_BASE_DEFAULT 0x00000000
+#define mmVMSHAREDVC0_MC_VM_SYSTEM_APERTURE_LOW_ADDR_DEFAULT 0x00000000
+#define mmVMSHAREDVC0_MC_VM_SYSTEM_APERTURE_HIGH_ADDR_DEFAULT 0x00000000
+#define mmVMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL_DEFAULT 0x00002501
+
+
+// addressBlock: mmhub_utcl2_vmsharedhvdec
+#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF0_DEFAULT 0x00000000
+#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF1_DEFAULT 0x00000000
+#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF2_DEFAULT 0x00000000
+#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF3_DEFAULT 0x00000000
+#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF4_DEFAULT 0x00000000
+#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF5_DEFAULT 0x00000000
+#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF6_DEFAULT 0x00000000
+#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF7_DEFAULT 0x00000000
+#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF8_DEFAULT 0x00000000
+#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF9_DEFAULT 0x00000000
+#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF10_DEFAULT 0x00000000
+#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF11_DEFAULT 0x00000000
+#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF12_DEFAULT 0x00000000
+#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF13_DEFAULT 0x00000000
+#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF14_DEFAULT 0x00000000
+#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF15_DEFAULT 0x00000000
+#define mmVMSHAREDHV0_VM_IOMMU_MMIO_CNTRL_1_DEFAULT 0x00000100
+#define mmVMSHAREDHV0_MC_VM_MARC_BASE_LO_0_DEFAULT 0x00000000
+#define mmVMSHAREDHV0_MC_VM_MARC_BASE_LO_1_DEFAULT 0x00000000
+#define mmVMSHAREDHV0_MC_VM_MARC_BASE_LO_2_DEFAULT 0x00000000
+#define mmVMSHAREDHV0_MC_VM_MARC_BASE_LO_3_DEFAULT 0x00000000
+#define mmVMSHAREDHV0_MC_VM_MARC_BASE_HI_0_DEFAULT 0x00000000
+#define mmVMSHAREDHV0_MC_VM_MARC_BASE_HI_1_DEFAULT 0x00000000
+#define mmVMSHAREDHV0_MC_VM_MARC_BASE_HI_2_DEFAULT 0x00000000
+#define mmVMSHAREDHV0_MC_VM_MARC_BASE_HI_3_DEFAULT 0x00000000
+#define mmVMSHAREDHV0_MC_VM_MARC_RELOC_LO_0_DEFAULT 0x00000000
+#define mmVMSHAREDHV0_MC_VM_MARC_RELOC_LO_1_DEFAULT 0x00000000
+#define mmVMSHAREDHV0_MC_VM_MARC_RELOC_LO_2_DEFAULT 0x00000000
+#define mmVMSHAREDHV0_MC_VM_MARC_RELOC_LO_3_DEFAULT 0x00000000
+#define mmVMSHAREDHV0_MC_VM_MARC_RELOC_HI_0_DEFAULT 0x00000000
+#define mmVMSHAREDHV0_MC_VM_MARC_RELOC_HI_1_DEFAULT 0x00000000
+#define mmVMSHAREDHV0_MC_VM_MARC_RELOC_HI_2_DEFAULT 0x00000000
+#define mmVMSHAREDHV0_MC_VM_MARC_RELOC_HI_3_DEFAULT 0x00000000
+#define mmVMSHAREDHV0_MC_VM_MARC_LEN_LO_0_DEFAULT 0x00000000
+#define mmVMSHAREDHV0_MC_VM_MARC_LEN_LO_1_DEFAULT 0x00000000
+#define mmVMSHAREDHV0_MC_VM_MARC_LEN_LO_2_DEFAULT 0x00000000
+#define mmVMSHAREDHV0_MC_VM_MARC_LEN_LO_3_DEFAULT 0x00000000
+#define mmVMSHAREDHV0_MC_VM_MARC_LEN_HI_0_DEFAULT 0x00000000
+#define mmVMSHAREDHV0_MC_VM_MARC_LEN_HI_1_DEFAULT 0x00000000
+#define mmVMSHAREDHV0_MC_VM_MARC_LEN_HI_2_DEFAULT 0x00000000
+#define mmVMSHAREDHV0_MC_VM_MARC_LEN_HI_3_DEFAULT 0x00000000
+#define mmVMSHAREDHV0_VM_IOMMU_CONTROL_REGISTER_DEFAULT 0x00000000
+#define mmVMSHAREDHV0_VM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER_DEFAULT 0x00000000
+#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_DEFAULT 0x00000000
+#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_0_DEFAULT 0x00000000
+#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_1_DEFAULT 0x00000000
+#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_2_DEFAULT 0x00000000
+#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_3_DEFAULT 0x00000000
+#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_4_DEFAULT 0x00000000
+#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_5_DEFAULT 0x00000000
+#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_6_DEFAULT 0x00000000
+#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_7_DEFAULT 0x00000000
+#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_8_DEFAULT 0x00000000
+#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_9_DEFAULT 0x00000000
+#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_10_DEFAULT 0x00000000
+#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_11_DEFAULT 0x00000000
+#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_12_DEFAULT 0x00000000
+#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_13_DEFAULT 0x00000000
+#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_14_DEFAULT 0x00000000
+#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_15_DEFAULT 0x00000000
+#define mmVMSHAREDHV0_UTCL2_CGTT_CLK_CTRL_DEFAULT 0x00000080
+#define mmVMSHAREDHV0_MC_SHARED_ACTIVE_FCN_ID_DEFAULT 0x00000000
+#define mmVMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE_DEFAULT 0x00000000
+
+
+// addressBlock: mmhub_utcl2_atcl2pfcntrdec
+#define mmATCL2PFCNTR0_ATC_L2_PERFCOUNTER_LO_DEFAULT 0x00000000
+#define mmATCL2PFCNTR0_ATC_L2_PERFCOUNTER_HI_DEFAULT 0x00000000
+
+
+// addressBlock: mmhub_utcl2_atcl2pfcntldec
+#define mmATCL2PFCNTL0_ATC_L2_PERFCOUNTER0_CFG_DEFAULT 0x00000000
+#define mmATCL2PFCNTL0_ATC_L2_PERFCOUNTER1_CFG_DEFAULT 0x00000000
+#define mmATCL2PFCNTL0_ATC_L2_PERFCOUNTER_RSLT_CNTL_DEFAULT 0x04000000
+
+
+// addressBlock: mmhub_utcl2_vml2pldec
+#define mmVML2PL0_MC_VM_L2_PERFCOUNTER0_CFG_DEFAULT 0x00000000
+#define mmVML2PL0_MC_VM_L2_PERFCOUNTER1_CFG_DEFAULT 0x00000000
+#define mmVML2PL0_MC_VM_L2_PERFCOUNTER2_CFG_DEFAULT 0x00000000
+#define mmVML2PL0_MC_VM_L2_PERFCOUNTER3_CFG_DEFAULT 0x00000000
+#define mmVML2PL0_MC_VM_L2_PERFCOUNTER4_CFG_DEFAULT 0x00000000
+#define mmVML2PL0_MC_VM_L2_PERFCOUNTER5_CFG_DEFAULT 0x00000000
+#define mmVML2PL0_MC_VM_L2_PERFCOUNTER6_CFG_DEFAULT 0x00000000
+#define mmVML2PL0_MC_VM_L2_PERFCOUNTER7_CFG_DEFAULT 0x00000000
+#define mmVML2PL0_MC_VM_L2_PERFCOUNTER_RSLT_CNTL_DEFAULT 0x04000000
+
+
+// addressBlock: mmhub_utcl2_vml2prdec
+#define mmVML2PR0_MC_VM_L2_PERFCOUNTER_LO_DEFAULT 0x00000000
+#define mmVML2PR0_MC_VM_L2_PERFCOUNTER_HI_DEFAULT 0x00000000
+
+
+// addressBlock: mmhub_dagb_dagbdec5
+#define mmDAGB5_RDCLI0_DEFAULT 0xfe5fe0f9
+#define mmDAGB5_RDCLI1_DEFAULT 0xfe5fe0f9
+#define mmDAGB5_RDCLI2_DEFAULT 0xfe5fe0f9
+#define mmDAGB5_RDCLI3_DEFAULT 0xfe5fe0f9
+#define mmDAGB5_RDCLI4_DEFAULT 0xfe5fe0f9
+#define mmDAGB5_RDCLI5_DEFAULT 0xfe5fe0f9
+#define mmDAGB5_RDCLI6_DEFAULT 0xfe5fe0f9
+#define mmDAGB5_RDCLI7_DEFAULT 0xfe5fe0f9
+#define mmDAGB5_RDCLI8_DEFAULT 0xfe5fe0f9
+#define mmDAGB5_RDCLI9_DEFAULT 0xfe5fe0f9
+#define mmDAGB5_RDCLI10_DEFAULT 0xfe5fe0f9
+#define mmDAGB5_RDCLI11_DEFAULT 0xfe5fe0f9
+#define mmDAGB5_RDCLI12_DEFAULT 0xfe5fe0f9
+#define mmDAGB5_RDCLI13_DEFAULT 0xfe5fe0f9
+#define mmDAGB5_RDCLI14_DEFAULT 0xfe5fe0f9
+#define mmDAGB5_RDCLI15_DEFAULT 0xfe5fe0f9
+#define mmDAGB5_RD_CNTL_DEFAULT 0x03527df8
+#define mmDAGB5_RD_GMI_CNTL_DEFAULT 0x00003045
+#define mmDAGB5_RD_ADDR_DAGB_DEFAULT 0x00000039
+#define mmDAGB5_RD_OUTPUT_DAGB_MAX_BURST_DEFAULT 0x88888888
+#define mmDAGB5_RD_OUTPUT_DAGB_LAZY_TIMER_DEFAULT 0x11111111
+#define mmDAGB5_RD_CGTT_CLK_CTRL_DEFAULT 0x00000100
+#define mmDAGB5_L1TLB_RD_CGTT_CLK_CTRL_DEFAULT 0x00000100
+#define mmDAGB5_ATCVM_RD_CGTT_CLK_CTRL_DEFAULT 0x00000100
+#define mmDAGB5_RD_ADDR_DAGB_MAX_BURST0_DEFAULT 0x88888888
+#define mmDAGB5_RD_ADDR_DAGB_LAZY_TIMER0_DEFAULT 0x11111111
+#define mmDAGB5_RD_ADDR_DAGB_MAX_BURST1_DEFAULT 0x88888888
+#define mmDAGB5_RD_ADDR_DAGB_LAZY_TIMER1_DEFAULT 0x11111111
+#define mmDAGB5_RD_VC0_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB5_RD_VC1_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB5_RD_VC2_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB5_RD_VC3_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB5_RD_VC4_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB5_RD_VC5_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB5_RD_VC6_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB5_RD_VC7_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB5_RD_CNTL_MISC_DEFAULT 0x69a0e408
+#define mmDAGB5_RD_TLB_CREDIT_DEFAULT 0x2f7bdef7
+#define mmDAGB5_RDCLI_ASK_PENDING_DEFAULT 0x00000000
+#define mmDAGB5_RDCLI_GO_PENDING_DEFAULT 0x00000000
+#define mmDAGB5_RDCLI_GBLSEND_PENDING_DEFAULT 0x00000000
+#define mmDAGB5_RDCLI_TLB_PENDING_DEFAULT 0x00000000
+#define mmDAGB5_RDCLI_OARB_PENDING_DEFAULT 0x00000000
+#define mmDAGB5_RDCLI_OSD_PENDING_DEFAULT 0x00000000
+#define mmDAGB5_WRCLI0_DEFAULT 0xfe5fe0f9
+#define mmDAGB5_WRCLI1_DEFAULT 0xfe5fe0f9
+#define mmDAGB5_WRCLI2_DEFAULT 0xfe5fe0f9
+#define mmDAGB5_WRCLI3_DEFAULT 0xfe5fe0f9
+#define mmDAGB5_WRCLI4_DEFAULT 0xfe5fe0f9
+#define mmDAGB5_WRCLI5_DEFAULT 0xfe5fe0f9
+#define mmDAGB5_WRCLI6_DEFAULT 0xfe5fe0f9
+#define mmDAGB5_WRCLI7_DEFAULT 0xfe5fe0f9
+#define mmDAGB5_WRCLI8_DEFAULT 0xfe5fe0f9
+#define mmDAGB5_WRCLI9_DEFAULT 0xfe5fe0f9
+#define mmDAGB5_WRCLI10_DEFAULT 0xfe5fe0f9
+#define mmDAGB5_WRCLI11_DEFAULT 0xfe5fe0f9
+#define mmDAGB5_WRCLI12_DEFAULT 0xfe5fe0f9
+#define mmDAGB5_WRCLI13_DEFAULT 0xfe5fe0f9
+#define mmDAGB5_WRCLI14_DEFAULT 0xfe5fe0f9
+#define mmDAGB5_WRCLI15_DEFAULT 0xfe5fe0f9
+#define mmDAGB5_WR_CNTL_DEFAULT 0x03527df8
+#define mmDAGB5_WR_GMI_CNTL_DEFAULT 0x00003045
+#define mmDAGB5_WR_ADDR_DAGB_DEFAULT 0x00000039
+#define mmDAGB5_WR_OUTPUT_DAGB_MAX_BURST_DEFAULT 0x88888888
+#define mmDAGB5_WR_OUTPUT_DAGB_LAZY_TIMER_DEFAULT 0x11111111
+#define mmDAGB5_WR_CGTT_CLK_CTRL_DEFAULT 0x00000100
+#define mmDAGB5_L1TLB_WR_CGTT_CLK_CTRL_DEFAULT 0x00000100
+#define mmDAGB5_ATCVM_WR_CGTT_CLK_CTRL_DEFAULT 0x00000100
+#define mmDAGB5_WR_ADDR_DAGB_MAX_BURST0_DEFAULT 0x88888888
+#define mmDAGB5_WR_ADDR_DAGB_LAZY_TIMER0_DEFAULT 0x11111111
+#define mmDAGB5_WR_ADDR_DAGB_MAX_BURST1_DEFAULT 0x88888888
+#define mmDAGB5_WR_ADDR_DAGB_LAZY_TIMER1_DEFAULT 0x11111111
+#define mmDAGB5_WR_DATA_DAGB_DEFAULT 0x00000001
+#define mmDAGB5_WR_DATA_DAGB_MAX_BURST0_DEFAULT 0x11111111
+#define mmDAGB5_WR_DATA_DAGB_LAZY_TIMER0_DEFAULT 0x00000000
+#define mmDAGB5_WR_DATA_DAGB_MAX_BURST1_DEFAULT 0x11111111
+#define mmDAGB5_WR_DATA_DAGB_LAZY_TIMER1_DEFAULT 0x00000000
+#define mmDAGB5_WR_VC0_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB5_WR_VC1_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB5_WR_VC2_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB5_WR_VC3_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB5_WR_VC4_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB5_WR_VC5_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB5_WR_VC6_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB5_WR_VC7_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB5_WR_CNTL_MISC_DEFAULT 0x69a0e408
+#define mmDAGB5_WR_TLB_CREDIT_DEFAULT 0x2f7bdef7
+#define mmDAGB5_WR_DATA_CREDIT_DEFAULT 0x60606070
+#define mmDAGB5_WR_MISC_CREDIT_DEFAULT 0x0078dc88
+#define mmDAGB5_WRCLI_ASK_PENDING_DEFAULT 0x00000000
+#define mmDAGB5_WRCLI_GO_PENDING_DEFAULT 0x00000000
+#define mmDAGB5_WRCLI_GBLSEND_PENDING_DEFAULT 0x00000000
+#define mmDAGB5_WRCLI_TLB_PENDING_DEFAULT 0x00000000
+#define mmDAGB5_WRCLI_OARB_PENDING_DEFAULT 0x00000000
+#define mmDAGB5_WRCLI_OSD_PENDING_DEFAULT 0x00000000
+#define mmDAGB5_WRCLI_DBUS_ASK_PENDING_DEFAULT 0x00000000
+#define mmDAGB5_WRCLI_DBUS_GO_PENDING_DEFAULT 0x00000000
+#define mmDAGB5_DAGB_DLY_DEFAULT 0x00000000
+#define mmDAGB5_CNTL_MISC_DEFAULT 0xcf7c1ffa
+#define mmDAGB5_CNTL_MISC2_DEFAULT 0x003c0000
+#define mmDAGB5_FIFO_EMPTY_DEFAULT 0x00ffffff
+#define mmDAGB5_FIFO_FULL_DEFAULT 0x00000000
+#define mmDAGB5_WR_CREDITS_FULL_DEFAULT 0x1fffffff
+#define mmDAGB5_RD_CREDITS_FULL_DEFAULT 0x0003ffff
+#define mmDAGB5_PERFCOUNTER_LO_DEFAULT 0x00000000
+#define mmDAGB5_PERFCOUNTER_HI_DEFAULT 0x00000000
+#define mmDAGB5_PERFCOUNTER0_CFG_DEFAULT 0x00000000
+#define mmDAGB5_PERFCOUNTER1_CFG_DEFAULT 0x00000000
+#define mmDAGB5_PERFCOUNTER2_CFG_DEFAULT 0x00000000
+#define mmDAGB5_PERFCOUNTER_RSLT_CNTL_DEFAULT 0x04000000
+#define mmDAGB5_RESERVE0_DEFAULT 0xffffffff
+#define mmDAGB5_RESERVE1_DEFAULT 0xffffffff
+#define mmDAGB5_RESERVE2_DEFAULT 0xffffffff
+#define mmDAGB5_RESERVE3_DEFAULT 0xffffffff
+#define mmDAGB5_RESERVE4_DEFAULT 0xffffffff
+#define mmDAGB5_RESERVE5_DEFAULT 0xffffffff
+#define mmDAGB5_RESERVE6_DEFAULT 0xffffffff
+#define mmDAGB5_RESERVE7_DEFAULT 0xffffffff
+#define mmDAGB5_RESERVE8_DEFAULT 0xffffffff
+#define mmDAGB5_RESERVE9_DEFAULT 0xffffffff
+#define mmDAGB5_RESERVE10_DEFAULT 0xffffffff
+#define mmDAGB5_RESERVE11_DEFAULT 0xffffffff
+#define mmDAGB5_RESERVE12_DEFAULT 0xffffffff
+#define mmDAGB5_RESERVE13_DEFAULT 0xffffffff
+
+
+// addressBlock: mmhub_dagb_dagbdec6
+#define mmDAGB6_RDCLI0_DEFAULT 0xfe5fe0f9
+#define mmDAGB6_RDCLI1_DEFAULT 0xfe5fe0f9
+#define mmDAGB6_RDCLI2_DEFAULT 0xfe5fe0f9
+#define mmDAGB6_RDCLI3_DEFAULT 0xfe5fe0f9
+#define mmDAGB6_RDCLI4_DEFAULT 0xfe5fe0f9
+#define mmDAGB6_RDCLI5_DEFAULT 0xfe5fe0f9
+#define mmDAGB6_RDCLI6_DEFAULT 0xfe5fe0f9
+#define mmDAGB6_RDCLI7_DEFAULT 0xfe5fe0f9
+#define mmDAGB6_RDCLI8_DEFAULT 0xfe5fe0f9
+#define mmDAGB6_RDCLI9_DEFAULT 0xfe5fe0f9
+#define mmDAGB6_RDCLI10_DEFAULT 0xfe5fe0f9
+#define mmDAGB6_RDCLI11_DEFAULT 0xfe5fe0f9
+#define mmDAGB6_RDCLI12_DEFAULT 0xfe5fe0f9
+#define mmDAGB6_RDCLI13_DEFAULT 0xfe5fe0f9
+#define mmDAGB6_RDCLI14_DEFAULT 0xfe5fe0f9
+#define mmDAGB6_RDCLI15_DEFAULT 0xfe5fe0f9
+#define mmDAGB6_RD_CNTL_DEFAULT 0x03527df8
+#define mmDAGB6_RD_GMI_CNTL_DEFAULT 0x00003045
+#define mmDAGB6_RD_ADDR_DAGB_DEFAULT 0x00000039
+#define mmDAGB6_RD_OUTPUT_DAGB_MAX_BURST_DEFAULT 0x88888888
+#define mmDAGB6_RD_OUTPUT_DAGB_LAZY_TIMER_DEFAULT 0x11111111
+#define mmDAGB6_RD_CGTT_CLK_CTRL_DEFAULT 0x00000100
+#define mmDAGB6_L1TLB_RD_CGTT_CLK_CTRL_DEFAULT 0x00000100
+#define mmDAGB6_ATCVM_RD_CGTT_CLK_CTRL_DEFAULT 0x00000100
+#define mmDAGB6_RD_ADDR_DAGB_MAX_BURST0_DEFAULT 0x88888888
+#define mmDAGB6_RD_ADDR_DAGB_LAZY_TIMER0_DEFAULT 0x11111111
+#define mmDAGB6_RD_ADDR_DAGB_MAX_BURST1_DEFAULT 0x88888888
+#define mmDAGB6_RD_ADDR_DAGB_LAZY_TIMER1_DEFAULT 0x11111111
+#define mmDAGB6_RD_VC0_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB6_RD_VC1_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB6_RD_VC2_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB6_RD_VC3_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB6_RD_VC4_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB6_RD_VC5_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB6_RD_VC6_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB6_RD_VC7_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB6_RD_CNTL_MISC_DEFAULT 0x69a0e408
+#define mmDAGB6_RD_TLB_CREDIT_DEFAULT 0x2f7bdef7
+#define mmDAGB6_RDCLI_ASK_PENDING_DEFAULT 0x00000000
+#define mmDAGB6_RDCLI_GO_PENDING_DEFAULT 0x00000000
+#define mmDAGB6_RDCLI_GBLSEND_PENDING_DEFAULT 0x00000000
+#define mmDAGB6_RDCLI_TLB_PENDING_DEFAULT 0x00000000
+#define mmDAGB6_RDCLI_OARB_PENDING_DEFAULT 0x00000000
+#define mmDAGB6_RDCLI_OSD_PENDING_DEFAULT 0x00000000
+#define mmDAGB6_WRCLI0_DEFAULT 0xfe5fe0f9
+#define mmDAGB6_WRCLI1_DEFAULT 0xfe5fe0f9
+#define mmDAGB6_WRCLI2_DEFAULT 0xfe5fe0f9
+#define mmDAGB6_WRCLI3_DEFAULT 0xfe5fe0f9
+#define mmDAGB6_WRCLI4_DEFAULT 0xfe5fe0f9
+#define mmDAGB6_WRCLI5_DEFAULT 0xfe5fe0f9
+#define mmDAGB6_WRCLI6_DEFAULT 0xfe5fe0f9
+#define mmDAGB6_WRCLI7_DEFAULT 0xfe5fe0f9
+#define mmDAGB6_WRCLI8_DEFAULT 0xfe5fe0f9
+#define mmDAGB6_WRCLI9_DEFAULT 0xfe5fe0f9
+#define mmDAGB6_WRCLI10_DEFAULT 0xfe5fe0f9
+#define mmDAGB6_WRCLI11_DEFAULT 0xfe5fe0f9
+#define mmDAGB6_WRCLI12_DEFAULT 0xfe5fe0f9
+#define mmDAGB6_WRCLI13_DEFAULT 0xfe5fe0f9
+#define mmDAGB6_WRCLI14_DEFAULT 0xfe5fe0f9
+#define mmDAGB6_WRCLI15_DEFAULT 0xfe5fe0f9
+#define mmDAGB6_WR_CNTL_DEFAULT 0x03527df8
+#define mmDAGB6_WR_GMI_CNTL_DEFAULT 0x00003045
+#define mmDAGB6_WR_ADDR_DAGB_DEFAULT 0x00000039
+#define mmDAGB6_WR_OUTPUT_DAGB_MAX_BURST_DEFAULT 0x88888888
+#define mmDAGB6_WR_OUTPUT_DAGB_LAZY_TIMER_DEFAULT 0x11111111
+#define mmDAGB6_WR_CGTT_CLK_CTRL_DEFAULT 0x00000100
+#define mmDAGB6_L1TLB_WR_CGTT_CLK_CTRL_DEFAULT 0x00000100
+#define mmDAGB6_ATCVM_WR_CGTT_CLK_CTRL_DEFAULT 0x00000100
+#define mmDAGB6_WR_ADDR_DAGB_MAX_BURST0_DEFAULT 0x88888888
+#define mmDAGB6_WR_ADDR_DAGB_LAZY_TIMER0_DEFAULT 0x11111111
+#define mmDAGB6_WR_ADDR_DAGB_MAX_BURST1_DEFAULT 0x88888888
+#define mmDAGB6_WR_ADDR_DAGB_LAZY_TIMER1_DEFAULT 0x11111111
+#define mmDAGB6_WR_DATA_DAGB_DEFAULT 0x00000001
+#define mmDAGB6_WR_DATA_DAGB_MAX_BURST0_DEFAULT 0x11111111
+#define mmDAGB6_WR_DATA_DAGB_LAZY_TIMER0_DEFAULT 0x00000000
+#define mmDAGB6_WR_DATA_DAGB_MAX_BURST1_DEFAULT 0x11111111
+#define mmDAGB6_WR_DATA_DAGB_LAZY_TIMER1_DEFAULT 0x00000000
+#define mmDAGB6_WR_VC0_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB6_WR_VC1_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB6_WR_VC2_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB6_WR_VC3_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB6_WR_VC4_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB6_WR_VC5_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB6_WR_VC6_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB6_WR_VC7_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB6_WR_CNTL_MISC_DEFAULT 0x69a0e408
+#define mmDAGB6_WR_TLB_CREDIT_DEFAULT 0x2f7bdef7
+#define mmDAGB6_WR_DATA_CREDIT_DEFAULT 0x60606070
+#define mmDAGB6_WR_MISC_CREDIT_DEFAULT 0x0078dc88
+#define mmDAGB6_WRCLI_ASK_PENDING_DEFAULT 0x00000000
+#define mmDAGB6_WRCLI_GO_PENDING_DEFAULT 0x00000000
+#define mmDAGB6_WRCLI_GBLSEND_PENDING_DEFAULT 0x00000000
+#define mmDAGB6_WRCLI_TLB_PENDING_DEFAULT 0x00000000
+#define mmDAGB6_WRCLI_OARB_PENDING_DEFAULT 0x00000000
+#define mmDAGB6_WRCLI_OSD_PENDING_DEFAULT 0x00000000
+#define mmDAGB6_WRCLI_DBUS_ASK_PENDING_DEFAULT 0x00000000
+#define mmDAGB6_WRCLI_DBUS_GO_PENDING_DEFAULT 0x00000000
+#define mmDAGB6_DAGB_DLY_DEFAULT 0x00000000
+#define mmDAGB6_CNTL_MISC_DEFAULT 0xcf7c1ffa
+#define mmDAGB6_CNTL_MISC2_DEFAULT 0x003c0000
+#define mmDAGB6_FIFO_EMPTY_DEFAULT 0x00ffffff
+#define mmDAGB6_FIFO_FULL_DEFAULT 0x00000000
+#define mmDAGB6_WR_CREDITS_FULL_DEFAULT 0x1fffffff
+#define mmDAGB6_RD_CREDITS_FULL_DEFAULT 0x0003ffff
+#define mmDAGB6_PERFCOUNTER_LO_DEFAULT 0x00000000
+#define mmDAGB6_PERFCOUNTER_HI_DEFAULT 0x00000000
+#define mmDAGB6_PERFCOUNTER0_CFG_DEFAULT 0x00000000
+#define mmDAGB6_PERFCOUNTER1_CFG_DEFAULT 0x00000000
+#define mmDAGB6_PERFCOUNTER2_CFG_DEFAULT 0x00000000
+#define mmDAGB6_PERFCOUNTER_RSLT_CNTL_DEFAULT 0x04000000
+#define mmDAGB6_RESERVE0_DEFAULT 0xffffffff
+#define mmDAGB6_RESERVE1_DEFAULT 0xffffffff
+#define mmDAGB6_RESERVE2_DEFAULT 0xffffffff
+#define mmDAGB6_RESERVE3_DEFAULT 0xffffffff
+#define mmDAGB6_RESERVE4_DEFAULT 0xffffffff
+#define mmDAGB6_RESERVE5_DEFAULT 0xffffffff
+#define mmDAGB6_RESERVE6_DEFAULT 0xffffffff
+#define mmDAGB6_RESERVE7_DEFAULT 0xffffffff
+#define mmDAGB6_RESERVE8_DEFAULT 0xffffffff
+#define mmDAGB6_RESERVE9_DEFAULT 0xffffffff
+#define mmDAGB6_RESERVE10_DEFAULT 0xffffffff
+#define mmDAGB6_RESERVE11_DEFAULT 0xffffffff
+#define mmDAGB6_RESERVE12_DEFAULT 0xffffffff
+#define mmDAGB6_RESERVE13_DEFAULT 0xffffffff
+
+
+// addressBlock: mmhub_dagb_dagbdec7
+#define mmDAGB7_RDCLI0_DEFAULT 0xfe5fe0f9
+#define mmDAGB7_RDCLI1_DEFAULT 0xfe5fe0f9
+#define mmDAGB7_RDCLI2_DEFAULT 0xfe5fe0f9
+#define mmDAGB7_RDCLI3_DEFAULT 0xfe5fe0f9
+#define mmDAGB7_RDCLI4_DEFAULT 0xfe5fe0f9
+#define mmDAGB7_RDCLI5_DEFAULT 0xfe5fe0f9
+#define mmDAGB7_RDCLI6_DEFAULT 0xfe5fe0f9
+#define mmDAGB7_RDCLI7_DEFAULT 0xfe5fe0f9
+#define mmDAGB7_RDCLI8_DEFAULT 0xfe5fe0f9
+#define mmDAGB7_RDCLI9_DEFAULT 0xfe5fe0f9
+#define mmDAGB7_RDCLI10_DEFAULT 0xfe5fe0f9
+#define mmDAGB7_RDCLI11_DEFAULT 0xfe5fe0f9
+#define mmDAGB7_RDCLI12_DEFAULT 0xfe5fe0f9
+#define mmDAGB7_RDCLI13_DEFAULT 0xfe5fe0f9
+#define mmDAGB7_RDCLI14_DEFAULT 0xfe5fe0f9
+#define mmDAGB7_RDCLI15_DEFAULT 0xfe5fe0f9
+#define mmDAGB7_RD_CNTL_DEFAULT 0x03527df8
+#define mmDAGB7_RD_GMI_CNTL_DEFAULT 0x00003045
+#define mmDAGB7_RD_ADDR_DAGB_DEFAULT 0x00000039
+#define mmDAGB7_RD_OUTPUT_DAGB_MAX_BURST_DEFAULT 0x88888888
+#define mmDAGB7_RD_OUTPUT_DAGB_LAZY_TIMER_DEFAULT 0x11111111
+#define mmDAGB7_RD_CGTT_CLK_CTRL_DEFAULT 0x00000100
+#define mmDAGB7_L1TLB_RD_CGTT_CLK_CTRL_DEFAULT 0x00000100
+#define mmDAGB7_ATCVM_RD_CGTT_CLK_CTRL_DEFAULT 0x00000100
+#define mmDAGB7_RD_ADDR_DAGB_MAX_BURST0_DEFAULT 0x88888888
+#define mmDAGB7_RD_ADDR_DAGB_LAZY_TIMER0_DEFAULT 0x11111111
+#define mmDAGB7_RD_ADDR_DAGB_MAX_BURST1_DEFAULT 0x88888888
+#define mmDAGB7_RD_ADDR_DAGB_LAZY_TIMER1_DEFAULT 0x11111111
+#define mmDAGB7_RD_VC0_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB7_RD_VC1_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB7_RD_VC2_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB7_RD_VC3_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB7_RD_VC4_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB7_RD_VC5_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB7_RD_VC6_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB7_RD_VC7_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB7_RD_CNTL_MISC_DEFAULT 0x69a0e408
+#define mmDAGB7_RD_TLB_CREDIT_DEFAULT 0x2f7bdef7
+#define mmDAGB7_RDCLI_ASK_PENDING_DEFAULT 0x00000000
+#define mmDAGB7_RDCLI_GO_PENDING_DEFAULT 0x00000000
+#define mmDAGB7_RDCLI_GBLSEND_PENDING_DEFAULT 0x00000000
+#define mmDAGB7_RDCLI_TLB_PENDING_DEFAULT 0x00000000
+#define mmDAGB7_RDCLI_OARB_PENDING_DEFAULT 0x00000000
+#define mmDAGB7_RDCLI_OSD_PENDING_DEFAULT 0x00000000
+#define mmDAGB7_WRCLI0_DEFAULT 0xfe5fe0f9
+#define mmDAGB7_WRCLI1_DEFAULT 0xfe5fe0f9
+#define mmDAGB7_WRCLI2_DEFAULT 0xfe5fe0f9
+#define mmDAGB7_WRCLI3_DEFAULT 0xfe5fe0f9
+#define mmDAGB7_WRCLI4_DEFAULT 0xfe5fe0f9
+#define mmDAGB7_WRCLI5_DEFAULT 0xfe5fe0f9
+#define mmDAGB7_WRCLI6_DEFAULT 0xfe5fe0f9
+#define mmDAGB7_WRCLI7_DEFAULT 0xfe5fe0f9
+#define mmDAGB7_WRCLI8_DEFAULT 0xfe5fe0f9
+#define mmDAGB7_WRCLI9_DEFAULT 0xfe5fe0f9
+#define mmDAGB7_WRCLI10_DEFAULT 0xfe5fe0f9
+#define mmDAGB7_WRCLI11_DEFAULT 0xfe5fe0f9
+#define mmDAGB7_WRCLI12_DEFAULT 0xfe5fe0f9
+#define mmDAGB7_WRCLI13_DEFAULT 0xfe5fe0f9
+#define mmDAGB7_WRCLI14_DEFAULT 0xfe5fe0f9
+#define mmDAGB7_WRCLI15_DEFAULT 0xfe5fe0f9
+#define mmDAGB7_WR_CNTL_DEFAULT 0x03527df8
+#define mmDAGB7_WR_GMI_CNTL_DEFAULT 0x00003045
+#define mmDAGB7_WR_ADDR_DAGB_DEFAULT 0x00000039
+#define mmDAGB7_WR_OUTPUT_DAGB_MAX_BURST_DEFAULT 0x88888888
+#define mmDAGB7_WR_OUTPUT_DAGB_LAZY_TIMER_DEFAULT 0x11111111
+#define mmDAGB7_WR_CGTT_CLK_CTRL_DEFAULT 0x00000100
+#define mmDAGB7_L1TLB_WR_CGTT_CLK_CTRL_DEFAULT 0x00000100
+#define mmDAGB7_ATCVM_WR_CGTT_CLK_CTRL_DEFAULT 0x00000100
+#define mmDAGB7_WR_ADDR_DAGB_MAX_BURST0_DEFAULT 0x88888888
+#define mmDAGB7_WR_ADDR_DAGB_LAZY_TIMER0_DEFAULT 0x11111111
+#define mmDAGB7_WR_ADDR_DAGB_MAX_BURST1_DEFAULT 0x88888888
+#define mmDAGB7_WR_ADDR_DAGB_LAZY_TIMER1_DEFAULT 0x11111111
+#define mmDAGB7_WR_DATA_DAGB_DEFAULT 0x00000001
+#define mmDAGB7_WR_DATA_DAGB_MAX_BURST0_DEFAULT 0x11111111
+#define mmDAGB7_WR_DATA_DAGB_LAZY_TIMER0_DEFAULT 0x00000000
+#define mmDAGB7_WR_DATA_DAGB_MAX_BURST1_DEFAULT 0x11111111
+#define mmDAGB7_WR_DATA_DAGB_LAZY_TIMER1_DEFAULT 0x00000000
+#define mmDAGB7_WR_VC0_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB7_WR_VC1_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB7_WR_VC2_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB7_WR_VC3_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB7_WR_VC4_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB7_WR_VC5_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB7_WR_VC6_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB7_WR_VC7_CNTL_DEFAULT 0xff2ff082
+#define mmDAGB7_WR_CNTL_MISC_DEFAULT 0x69a0e408
+#define mmDAGB7_WR_TLB_CREDIT_DEFAULT 0x2f7bdef7
+#define mmDAGB7_WR_DATA_CREDIT_DEFAULT 0x60606070
+#define mmDAGB7_WR_MISC_CREDIT_DEFAULT 0x0078dc88
+#define mmDAGB7_WRCLI_ASK_PENDING_DEFAULT 0x00000000
+#define mmDAGB7_WRCLI_GO_PENDING_DEFAULT 0x00000000
+#define mmDAGB7_WRCLI_GBLSEND_PENDING_DEFAULT 0x00000000
+#define mmDAGB7_WRCLI_TLB_PENDING_DEFAULT 0x00000000
+#define mmDAGB7_WRCLI_OARB_PENDING_DEFAULT 0x00000000
+#define mmDAGB7_WRCLI_OSD_PENDING_DEFAULT 0x00000000
+#define mmDAGB7_WRCLI_DBUS_ASK_PENDING_DEFAULT 0x00000000
+#define mmDAGB7_WRCLI_DBUS_GO_PENDING_DEFAULT 0x00000000
+#define mmDAGB7_DAGB_DLY_DEFAULT 0x00000000
+#define mmDAGB7_CNTL_MISC_DEFAULT 0xcf7c1ffa
+#define mmDAGB7_CNTL_MISC2_DEFAULT 0x003c0000
+#define mmDAGB7_FIFO_EMPTY_DEFAULT 0x00ffffff
+#define mmDAGB7_FIFO_FULL_DEFAULT 0x00000000
+#define mmDAGB7_WR_CREDITS_FULL_DEFAULT 0x1fffffff
+#define mmDAGB7_RD_CREDITS_FULL_DEFAULT 0x0003ffff
+#define mmDAGB7_PERFCOUNTER_LO_DEFAULT 0x00000000
+#define mmDAGB7_PERFCOUNTER_HI_DEFAULT 0x00000000
+#define mmDAGB7_PERFCOUNTER0_CFG_DEFAULT 0x00000000
+#define mmDAGB7_PERFCOUNTER1_CFG_DEFAULT 0x00000000
+#define mmDAGB7_PERFCOUNTER2_CFG_DEFAULT 0x00000000
+#define mmDAGB7_PERFCOUNTER_RSLT_CNTL_DEFAULT 0x04000000
+#define mmDAGB7_RESERVE0_DEFAULT 0xffffffff
+#define mmDAGB7_RESERVE1_DEFAULT 0xffffffff
+#define mmDAGB7_RESERVE2_DEFAULT 0xffffffff
+#define mmDAGB7_RESERVE3_DEFAULT 0xffffffff
+#define mmDAGB7_RESERVE4_DEFAULT 0xffffffff
+#define mmDAGB7_RESERVE5_DEFAULT 0xffffffff
+#define mmDAGB7_RESERVE6_DEFAULT 0xffffffff
+#define mmDAGB7_RESERVE7_DEFAULT 0xffffffff
+#define mmDAGB7_RESERVE8_DEFAULT 0xffffffff
+#define mmDAGB7_RESERVE9_DEFAULT 0xffffffff
+#define mmDAGB7_RESERVE10_DEFAULT 0xffffffff
+#define mmDAGB7_RESERVE11_DEFAULT 0xffffffff
+#define mmDAGB7_RESERVE12_DEFAULT 0xffffffff
+#define mmDAGB7_RESERVE13_DEFAULT 0xffffffff
+
+
+// addressBlock: mmhub_ea_mmeadec5
+#define mmMMEA5_DRAM_RD_CLI2GRP_MAP0_DEFAULT 0x55555555
+#define mmMMEA5_DRAM_RD_CLI2GRP_MAP1_DEFAULT 0x55555555
+#define mmMMEA5_DRAM_WR_CLI2GRP_MAP0_DEFAULT 0x55555555
+#define mmMMEA5_DRAM_WR_CLI2GRP_MAP1_DEFAULT 0x55555555
+#define mmMMEA5_DRAM_RD_GRP2VC_MAP_DEFAULT 0x00000e25
+#define mmMMEA5_DRAM_WR_GRP2VC_MAP_DEFAULT 0x00000e25
+#define mmMMEA5_DRAM_RD_LAZY_DEFAULT 0x78000924
+#define mmMMEA5_DRAM_WR_LAZY_DEFAULT 0x78000924
+#define mmMMEA5_DRAM_RD_CAM_CNTL_DEFAULT 0x16db4444
+#define mmMMEA5_DRAM_WR_CAM_CNTL_DEFAULT 0x16db4444
+#define mmMMEA5_DRAM_PAGE_BURST_DEFAULT 0x20002000
+#define mmMMEA5_DRAM_RD_PRI_AGE_DEFAULT 0x00db6249
+#define mmMMEA5_DRAM_WR_PRI_AGE_DEFAULT 0x00db6249
+#define mmMMEA5_DRAM_RD_PRI_QUEUING_DEFAULT 0x00000db6
+#define mmMMEA5_DRAM_WR_PRI_QUEUING_DEFAULT 0x00000db6
+#define mmMMEA5_DRAM_RD_PRI_FIXED_DEFAULT 0x00000924
+#define mmMMEA5_DRAM_WR_PRI_FIXED_DEFAULT 0x00000924
+#define mmMMEA5_DRAM_RD_PRI_URGENCY_DEFAULT 0x0000fdb6
+#define mmMMEA5_DRAM_WR_PRI_URGENCY_DEFAULT 0x0000fdb6
+#define mmMMEA5_DRAM_RD_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f
+#define mmMMEA5_DRAM_RD_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f
+#define mmMMEA5_DRAM_RD_PRI_QUANT_PRI3_DEFAULT 0xffffffff
+#define mmMMEA5_DRAM_WR_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f
+#define mmMMEA5_DRAM_WR_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f
+#define mmMMEA5_DRAM_WR_PRI_QUANT_PRI3_DEFAULT 0xffffffff
+#define mmMMEA5_GMI_RD_CLI2GRP_MAP0_DEFAULT 0x00000000
+#define mmMMEA5_GMI_RD_CLI2GRP_MAP1_DEFAULT 0x00000000
+#define mmMMEA5_GMI_WR_CLI2GRP_MAP0_DEFAULT 0x00000000
+#define mmMMEA5_GMI_WR_CLI2GRP_MAP1_DEFAULT 0x00000000
+#define mmMMEA5_GMI_RD_GRP2VC_MAP_DEFAULT 0x00000fff
+#define mmMMEA5_GMI_WR_GRP2VC_MAP_DEFAULT 0x00000fff
+#define mmMMEA5_GMI_RD_LAZY_DEFAULT 0x78000924
+#define mmMMEA5_GMI_WR_LAZY_DEFAULT 0x78000924
+#define mmMMEA5_GMI_RD_CAM_CNTL_DEFAULT 0x16db4444
+#define mmMMEA5_GMI_WR_CAM_CNTL_DEFAULT 0x16db4444
+#define mmMMEA5_GMI_PAGE_BURST_DEFAULT 0x20002000
+#define mmMMEA5_GMI_RD_PRI_AGE_DEFAULT 0x00db6249
+#define mmMMEA5_GMI_WR_PRI_AGE_DEFAULT 0x00db6249
+#define mmMMEA5_GMI_RD_PRI_QUEUING_DEFAULT 0x00000db6
+#define mmMMEA5_GMI_WR_PRI_QUEUING_DEFAULT 0x00000db6
+#define mmMMEA5_GMI_RD_PRI_FIXED_DEFAULT 0x00000924
+#define mmMMEA5_GMI_WR_PRI_FIXED_DEFAULT 0x00000924
+#define mmMMEA5_GMI_RD_PRI_URGENCY_DEFAULT 0x0000fdb6
+#define mmMMEA5_GMI_WR_PRI_URGENCY_DEFAULT 0x0000fdb6
+#define mmMMEA5_GMI_RD_PRI_URGENCY_MASKING_DEFAULT 0xffffffff
+#define mmMMEA5_GMI_WR_PRI_URGENCY_MASKING_DEFAULT 0xffffffff
+#define mmMMEA5_GMI_RD_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f
+#define mmMMEA5_GMI_RD_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f
+#define mmMMEA5_GMI_RD_PRI_QUANT_PRI3_DEFAULT 0xffffffff
+#define mmMMEA5_GMI_WR_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f
+#define mmMMEA5_GMI_WR_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f
+#define mmMMEA5_GMI_WR_PRI_QUANT_PRI3_DEFAULT 0xffffffff
+#define mmMMEA5_ADDRNORM_BASE_ADDR0_DEFAULT 0x00000000
+#define mmMMEA5_ADDRNORM_LIMIT_ADDR0_DEFAULT 0x00000000
+#define mmMMEA5_ADDRNORM_BASE_ADDR1_DEFAULT 0x00000000
+#define mmMMEA5_ADDRNORM_LIMIT_ADDR1_DEFAULT 0x00000000
+#define mmMMEA5_ADDRNORM_OFFSET_ADDR1_DEFAULT 0x00000000
+#define mmMMEA5_ADDRNORM_BASE_ADDR2_DEFAULT 0x00000000
+#define mmMMEA5_ADDRNORM_LIMIT_ADDR2_DEFAULT 0x00000000
+#define mmMMEA5_ADDRNORM_BASE_ADDR3_DEFAULT 0x00000000
+#define mmMMEA5_ADDRNORM_LIMIT_ADDR3_DEFAULT 0x00000000
+#define mmMMEA5_ADDRNORM_OFFSET_ADDR3_DEFAULT 0x00000000
+#define mmMMEA5_ADDRNORM_BASE_ADDR4_DEFAULT 0x00000000
+#define mmMMEA5_ADDRNORM_LIMIT_ADDR4_DEFAULT 0x00000000
+#define mmMMEA5_ADDRNORM_BASE_ADDR5_DEFAULT 0x00000000
+#define mmMMEA5_ADDRNORM_LIMIT_ADDR5_DEFAULT 0x00000000
+#define mmMMEA5_ADDRNORM_OFFSET_ADDR5_DEFAULT 0x00000000
+#define mmMMEA5_ADDRNORMDRAM_HOLE_CNTL_DEFAULT 0x00000000
+#define mmMMEA5_ADDRNORMGMI_HOLE_CNTL_DEFAULT 0x00000000
+#define mmMMEA5_ADDRNORMDRAM_NP2_CHANNEL_CFG_DEFAULT 0x00000000
+#define mmMMEA5_ADDRNORMGMI_NP2_CHANNEL_CFG_DEFAULT 0x00000000
+#define mmMMEA5_ADDRDEC_BANK_CFG_DEFAULT 0x000003cf
+#define mmMMEA5_ADDRDEC_MISC_CFG_DEFAULT 0xfffff000
+#define mmMMEA5_ADDRDECDRAM_ADDR_HASH_BANK0_DEFAULT 0x00000000
+#define mmMMEA5_ADDRDECDRAM_ADDR_HASH_BANK1_DEFAULT 0x00000000
+#define mmMMEA5_ADDRDECDRAM_ADDR_HASH_BANK2_DEFAULT 0x00000000
+#define mmMMEA5_ADDRDECDRAM_ADDR_HASH_BANK3_DEFAULT 0x00000000
+#define mmMMEA5_ADDRDECDRAM_ADDR_HASH_BANK4_DEFAULT 0x00000000
+#define mmMMEA5_ADDRDECDRAM_ADDR_HASH_BANK5_DEFAULT 0x00000000
+#define mmMMEA5_ADDRDECDRAM_ADDR_HASH_PC_DEFAULT 0x00000000
+#define mmMMEA5_ADDRDECDRAM_ADDR_HASH_PC2_DEFAULT 0x00000000
+#define mmMMEA5_ADDRDECDRAM_ADDR_HASH_CS0_DEFAULT 0x00000000
+#define mmMMEA5_ADDRDECDRAM_ADDR_HASH_CS1_DEFAULT 0x00000000
+#define mmMMEA5_ADDRDECDRAM_HARVEST_ENABLE_DEFAULT 0x00000000
+#define mmMMEA5_ADDRDECGMI_ADDR_HASH_BANK0_DEFAULT 0x00000000
+#define mmMMEA5_ADDRDECGMI_ADDR_HASH_BANK1_DEFAULT 0x00000000
+#define mmMMEA5_ADDRDECGMI_ADDR_HASH_BANK2_DEFAULT 0x00000000
+#define mmMMEA5_ADDRDECGMI_ADDR_HASH_BANK3_DEFAULT 0x00000000
+#define mmMMEA5_ADDRDECGMI_ADDR_HASH_BANK4_DEFAULT 0x00000000
+#define mmMMEA5_ADDRDECGMI_ADDR_HASH_BANK5_DEFAULT 0x00000000
+#define mmMMEA5_ADDRDECGMI_ADDR_HASH_PC_DEFAULT 0x00000000
+#define mmMMEA5_ADDRDECGMI_ADDR_HASH_PC2_DEFAULT 0x00000000
+#define mmMMEA5_ADDRDECGMI_ADDR_HASH_CS0_DEFAULT 0x00000000
+#define mmMMEA5_ADDRDECGMI_ADDR_HASH_CS1_DEFAULT 0x00000000
+#define mmMMEA5_ADDRDECGMI_HARVEST_ENABLE_DEFAULT 0x00000000
+#define mmMMEA5_ADDRDEC0_BASE_ADDR_CS0_DEFAULT 0x00000000
+#define mmMMEA5_ADDRDEC0_BASE_ADDR_CS1_DEFAULT 0x00000000
+#define mmMMEA5_ADDRDEC0_BASE_ADDR_CS2_DEFAULT 0x00000000
+#define mmMMEA5_ADDRDEC0_BASE_ADDR_CS3_DEFAULT 0x00000000
+#define mmMMEA5_ADDRDEC0_BASE_ADDR_SECCS0_DEFAULT 0x00000000
+#define mmMMEA5_ADDRDEC0_BASE_ADDR_SECCS1_DEFAULT 0x00000000
+#define mmMMEA5_ADDRDEC0_BASE_ADDR_SECCS2_DEFAULT 0x00000000
+#define mmMMEA5_ADDRDEC0_BASE_ADDR_SECCS3_DEFAULT 0x00000000
+#define mmMMEA5_ADDRDEC0_ADDR_MASK_CS01_DEFAULT 0xfffffffe
+#define mmMMEA5_ADDRDEC0_ADDR_MASK_CS23_DEFAULT 0xfffffffe
+#define mmMMEA5_ADDRDEC0_ADDR_MASK_SECCS01_DEFAULT 0xfffffffe
+#define mmMMEA5_ADDRDEC0_ADDR_MASK_SECCS23_DEFAULT 0xfffffffe
+#define mmMMEA5_ADDRDEC0_ADDR_CFG_CS01_DEFAULT 0x00050408
+#define mmMMEA5_ADDRDEC0_ADDR_CFG_CS23_DEFAULT 0x00050408
+#define mmMMEA5_ADDRDEC0_ADDR_SEL_CS01_DEFAULT 0x04076543
+#define mmMMEA5_ADDRDEC0_ADDR_SEL_CS23_DEFAULT 0x04076543
+#define mmMMEA5_ADDRDEC0_ADDR_SEL2_CS01_DEFAULT 0x00000008
+#define mmMMEA5_ADDRDEC0_ADDR_SEL2_CS23_DEFAULT 0x00000008
+#define mmMMEA5_ADDRDEC0_COL_SEL_LO_CS01_DEFAULT 0x87654321
+#define mmMMEA5_ADDRDEC0_COL_SEL_LO_CS23_DEFAULT 0x87654321
+#define mmMMEA5_ADDRDEC0_COL_SEL_HI_CS01_DEFAULT 0xa9876543
+#define mmMMEA5_ADDRDEC0_COL_SEL_HI_CS23_DEFAULT 0xa9876543
+#define mmMMEA5_ADDRDEC0_RM_SEL_CS01_DEFAULT 0x00000000
+#define mmMMEA5_ADDRDEC0_RM_SEL_CS23_DEFAULT 0x00000000
+#define mmMMEA5_ADDRDEC0_RM_SEL_SECCS01_DEFAULT 0x00000000
+#define mmMMEA5_ADDRDEC0_RM_SEL_SECCS23_DEFAULT 0x00000000
+#define mmMMEA5_ADDRDEC1_BASE_ADDR_CS0_DEFAULT 0x00000000
+#define mmMMEA5_ADDRDEC1_BASE_ADDR_CS1_DEFAULT 0x00000000
+#define mmMMEA5_ADDRDEC1_BASE_ADDR_CS2_DEFAULT 0x00000000
+#define mmMMEA5_ADDRDEC1_BASE_ADDR_CS3_DEFAULT 0x00000000
+#define mmMMEA5_ADDRDEC1_BASE_ADDR_SECCS0_DEFAULT 0x00000000
+#define mmMMEA5_ADDRDEC1_BASE_ADDR_SECCS1_DEFAULT 0x00000000
+#define mmMMEA5_ADDRDEC1_BASE_ADDR_SECCS2_DEFAULT 0x00000000
+#define mmMMEA5_ADDRDEC1_BASE_ADDR_SECCS3_DEFAULT 0x00000000
+#define mmMMEA5_ADDRDEC1_ADDR_MASK_CS01_DEFAULT 0xfffffffe
+#define mmMMEA5_ADDRDEC1_ADDR_MASK_CS23_DEFAULT 0xfffffffe
+#define mmMMEA5_ADDRDEC1_ADDR_MASK_SECCS01_DEFAULT 0xfffffffe
+#define mmMMEA5_ADDRDEC1_ADDR_MASK_SECCS23_DEFAULT 0xfffffffe
+#define mmMMEA5_ADDRDEC1_ADDR_CFG_CS01_DEFAULT 0x00050408
+#define mmMMEA5_ADDRDEC1_ADDR_CFG_CS23_DEFAULT 0x00050408
+#define mmMMEA5_ADDRDEC1_ADDR_SEL_CS01_DEFAULT 0x04076543
+#define mmMMEA5_ADDRDEC1_ADDR_SEL_CS23_DEFAULT 0x04076543
+#define mmMMEA5_ADDRDEC1_ADDR_SEL2_CS01_DEFAULT 0x00000008
+#define mmMMEA5_ADDRDEC1_ADDR_SEL2_CS23_DEFAULT 0x00000008
+#define mmMMEA5_ADDRDEC1_COL_SEL_LO_CS01_DEFAULT 0x87654321
+#define mmMMEA5_ADDRDEC1_COL_SEL_LO_CS23_DEFAULT 0x87654321
+#define mmMMEA5_ADDRDEC1_COL_SEL_HI_CS01_DEFAULT 0xa9876543
+#define mmMMEA5_ADDRDEC1_COL_SEL_HI_CS23_DEFAULT 0xa9876543
+#define mmMMEA5_ADDRDEC1_RM_SEL_CS01_DEFAULT 0x00000000
+#define mmMMEA5_ADDRDEC1_RM_SEL_CS23_DEFAULT 0x00000000
+#define mmMMEA5_ADDRDEC1_RM_SEL_SECCS01_DEFAULT 0x00000000
+#define mmMMEA5_ADDRDEC1_RM_SEL_SECCS23_DEFAULT 0x00000000
+#define mmMMEA5_ADDRDEC2_BASE_ADDR_CS0_DEFAULT 0x00000000
+#define mmMMEA5_ADDRDEC2_BASE_ADDR_CS1_DEFAULT 0x00000000
+#define mmMMEA5_ADDRDEC2_BASE_ADDR_CS2_DEFAULT 0x00000000
+#define mmMMEA5_ADDRDEC2_BASE_ADDR_CS3_DEFAULT 0x00000000
+#define mmMMEA5_ADDRDEC2_BASE_ADDR_SECCS0_DEFAULT 0x00000000
+#define mmMMEA5_ADDRDEC2_BASE_ADDR_SECCS1_DEFAULT 0x00000000
+#define mmMMEA5_ADDRDEC2_BASE_ADDR_SECCS2_DEFAULT 0x00000000
+#define mmMMEA5_ADDRDEC2_BASE_ADDR_SECCS3_DEFAULT 0x00000000
+#define mmMMEA5_ADDRDEC2_ADDR_MASK_CS01_DEFAULT 0xfffffffe
+#define mmMMEA5_ADDRDEC2_ADDR_MASK_CS23_DEFAULT 0xfffffffe
+#define mmMMEA5_ADDRDEC2_ADDR_MASK_SECCS01_DEFAULT 0xfffffffe
+#define mmMMEA5_ADDRDEC2_ADDR_MASK_SECCS23_DEFAULT 0xfffffffe
+#define mmMMEA5_ADDRDEC2_ADDR_CFG_CS01_DEFAULT 0x00050408
+#define mmMMEA5_ADDRDEC2_ADDR_CFG_CS23_DEFAULT 0x00050408
+#define mmMMEA5_ADDRDEC2_ADDR_SEL_CS01_DEFAULT 0x04076543
+#define mmMMEA5_ADDRDEC2_ADDR_SEL_CS23_DEFAULT 0x04076543
+#define mmMMEA5_ADDRDEC2_ADDR_SEL2_CS01_DEFAULT 0x00000008
+#define mmMMEA5_ADDRDEC2_ADDR_SEL2_CS23_DEFAULT 0x00000008
+#define mmMMEA5_ADDRDEC2_COL_SEL_LO_CS01_DEFAULT 0x87654321
+#define mmMMEA5_ADDRDEC2_COL_SEL_LO_CS23_DEFAULT 0x87654321
+#define mmMMEA5_ADDRDEC2_COL_SEL_HI_CS01_DEFAULT 0xa9876543
+#define mmMMEA5_ADDRDEC2_COL_SEL_HI_CS23_DEFAULT 0xa9876543
+#define mmMMEA5_ADDRDEC2_RM_SEL_CS01_DEFAULT 0x00000000
+#define mmMMEA5_ADDRDEC2_RM_SEL_CS23_DEFAULT 0x00000000
+#define mmMMEA5_ADDRDEC2_RM_SEL_SECCS01_DEFAULT 0x00000000
+#define mmMMEA5_ADDRDEC2_RM_SEL_SECCS23_DEFAULT 0x00000000
+#define mmMMEA5_ADDRNORMDRAM_GLOBAL_CNTL_DEFAULT 0x00600000
+#define mmMMEA5_ADDRNORMGMI_GLOBAL_CNTL_DEFAULT 0x00600000
+#define mmMMEA5_IO_RD_CLI2GRP_MAP0_DEFAULT 0xe4e4e4e4
+#define mmMMEA5_IO_RD_CLI2GRP_MAP1_DEFAULT 0xe4e4e4e4
+#define mmMMEA5_IO_WR_CLI2GRP_MAP0_DEFAULT 0xe4e4e4e4
+#define mmMMEA5_IO_WR_CLI2GRP_MAP1_DEFAULT 0xe4e4e4e4
+#define mmMMEA5_IO_RD_COMBINE_FLUSH_DEFAULT 0x00007777
+#define mmMMEA5_IO_WR_COMBINE_FLUSH_DEFAULT 0x00007777
+#define mmMMEA5_IO_GROUP_BURST_DEFAULT 0x1f031f03
+#define mmMMEA5_IO_RD_PRI_AGE_DEFAULT 0x00db6249
+#define mmMMEA5_IO_WR_PRI_AGE_DEFAULT 0x00db6249
+#define mmMMEA5_IO_RD_PRI_QUEUING_DEFAULT 0x00000db6
+#define mmMMEA5_IO_WR_PRI_QUEUING_DEFAULT 0x00000db6
+#define mmMMEA5_IO_RD_PRI_FIXED_DEFAULT 0x00000924
+#define mmMMEA5_IO_WR_PRI_FIXED_DEFAULT 0x00000924
+#define mmMMEA5_IO_RD_PRI_URGENCY_DEFAULT 0x00000492
+#define mmMMEA5_IO_WR_PRI_URGENCY_DEFAULT 0x00000492
+#define mmMMEA5_IO_RD_PRI_URGENCY_MASKING_DEFAULT 0xffffffff
+#define mmMMEA5_IO_WR_PRI_URGENCY_MASKING_DEFAULT 0xffffffff
+#define mmMMEA5_IO_RD_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f
+#define mmMMEA5_IO_RD_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f
+#define mmMMEA5_IO_RD_PRI_QUANT_PRI3_DEFAULT 0xffffffff
+#define mmMMEA5_IO_WR_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f
+#define mmMMEA5_IO_WR_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f
+#define mmMMEA5_IO_WR_PRI_QUANT_PRI3_DEFAULT 0xffffffff
+#define mmMMEA5_SDP_ARB_DRAM_DEFAULT 0x00101e40
+#define mmMMEA5_SDP_ARB_GMI_DEFAULT 0x00101e40
+#define mmMMEA5_SDP_ARB_FINAL_DEFAULT 0x00007fff
+#define mmMMEA5_SDP_DRAM_PRIORITY_DEFAULT 0x00000000
+#define mmMMEA5_SDP_GMI_PRIORITY_DEFAULT 0x00000000
+#define mmMMEA5_SDP_IO_PRIORITY_DEFAULT 0x00000000
+#define mmMMEA5_SDP_CREDITS_DEFAULT 0x000101bf
+#define mmMMEA5_SDP_TAG_RESERVE0_DEFAULT 0x00000000
+#define mmMMEA5_SDP_TAG_RESERVE1_DEFAULT 0x00000000
+#define mmMMEA5_SDP_VCC_RESERVE0_DEFAULT 0x00000000
+#define mmMMEA5_SDP_VCC_RESERVE1_DEFAULT 0x00000000
+#define mmMMEA5_SDP_VCD_RESERVE0_DEFAULT 0x00000000
+#define mmMMEA5_SDP_VCD_RESERVE1_DEFAULT 0x00000000
+#define mmMMEA5_SDP_REQ_CNTL_DEFAULT 0x0000001f
+#define mmMMEA5_MISC_DEFAULT 0x0c00a070
+#define mmMMEA5_LATENCY_SAMPLING_DEFAULT 0x00000000
+#define mmMMEA5_PERFCOUNTER_LO_DEFAULT 0x00000000
+#define mmMMEA5_PERFCOUNTER_HI_DEFAULT 0x00000000
+#define mmMMEA5_PERFCOUNTER0_CFG_DEFAULT 0x00000000
+#define mmMMEA5_PERFCOUNTER1_CFG_DEFAULT 0x00000000
+#define mmMMEA5_PERFCOUNTER_RSLT_CNTL_DEFAULT 0x04000000
+#define mmMMEA5_EDC_CNT_DEFAULT 0x00000000
+#define mmMMEA5_EDC_CNT2_DEFAULT 0x00000000
+#define mmMMEA5_DSM_CNTL_DEFAULT 0x00000000
+#define mmMMEA5_DSM_CNTLA_DEFAULT 0x00000000
+#define mmMMEA5_DSM_CNTLB_DEFAULT 0x00000000
+#define mmMMEA5_DSM_CNTL2_DEFAULT 0x00000000
+#define mmMMEA5_DSM_CNTL2A_DEFAULT 0x00000000
+#define mmMMEA5_DSM_CNTL2B_DEFAULT 0x00000000
+#define mmMMEA5_CGTT_CLK_CTRL_DEFAULT 0x00000100
+#define mmMMEA5_EDC_MODE_DEFAULT 0x00000000
+#define mmMMEA5_ERR_STATUS_DEFAULT 0x00000300
+#define mmMMEA5_MISC2_DEFAULT 0x00000000
+#define mmMMEA5_ADDRDEC_SELECT_DEFAULT 0x00000000
+#define mmMMEA5_EDC_CNT3_DEFAULT 0x00000000
+
+
+// addressBlock: mmhub_ea_mmeadec6
+#define mmMMEA6_DRAM_RD_CLI2GRP_MAP0_DEFAULT 0x55555555
+#define mmMMEA6_DRAM_RD_CLI2GRP_MAP1_DEFAULT 0x55555555
+#define mmMMEA6_DRAM_WR_CLI2GRP_MAP0_DEFAULT 0x55555555
+#define mmMMEA6_DRAM_WR_CLI2GRP_MAP1_DEFAULT 0x55555555
+#define mmMMEA6_DRAM_RD_GRP2VC_MAP_DEFAULT 0x00000e25
+#define mmMMEA6_DRAM_WR_GRP2VC_MAP_DEFAULT 0x00000e25
+#define mmMMEA6_DRAM_RD_LAZY_DEFAULT 0x78000924
+#define mmMMEA6_DRAM_WR_LAZY_DEFAULT 0x78000924
+#define mmMMEA6_DRAM_RD_CAM_CNTL_DEFAULT 0x16db4444
+#define mmMMEA6_DRAM_WR_CAM_CNTL_DEFAULT 0x16db4444
+#define mmMMEA6_DRAM_PAGE_BURST_DEFAULT 0x20002000
+#define mmMMEA6_DRAM_RD_PRI_AGE_DEFAULT 0x00db6249
+#define mmMMEA6_DRAM_WR_PRI_AGE_DEFAULT 0x00db6249
+#define mmMMEA6_DRAM_RD_PRI_QUEUING_DEFAULT 0x00000db6
+#define mmMMEA6_DRAM_WR_PRI_QUEUING_DEFAULT 0x00000db6
+#define mmMMEA6_DRAM_RD_PRI_FIXED_DEFAULT 0x00000924
+#define mmMMEA6_DRAM_WR_PRI_FIXED_DEFAULT 0x00000924
+#define mmMMEA6_DRAM_RD_PRI_URGENCY_DEFAULT 0x0000fdb6
+#define mmMMEA6_DRAM_WR_PRI_URGENCY_DEFAULT 0x0000fdb6
+#define mmMMEA6_DRAM_RD_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f
+#define mmMMEA6_DRAM_RD_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f
+#define mmMMEA6_DRAM_RD_PRI_QUANT_PRI3_DEFAULT 0xffffffff
+#define mmMMEA6_DRAM_WR_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f
+#define mmMMEA6_DRAM_WR_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f
+#define mmMMEA6_DRAM_WR_PRI_QUANT_PRI3_DEFAULT 0xffffffff
+#define mmMMEA6_GMI_RD_CLI2GRP_MAP0_DEFAULT 0x00000000
+#define mmMMEA6_GMI_RD_CLI2GRP_MAP1_DEFAULT 0x00000000
+#define mmMMEA6_GMI_WR_CLI2GRP_MAP0_DEFAULT 0x00000000
+#define mmMMEA6_GMI_WR_CLI2GRP_MAP1_DEFAULT 0x00000000
+#define mmMMEA6_GMI_RD_GRP2VC_MAP_DEFAULT 0x00000fff
+#define mmMMEA6_GMI_WR_GRP2VC_MAP_DEFAULT 0x00000fff
+#define mmMMEA6_GMI_RD_LAZY_DEFAULT 0x78000924
+#define mmMMEA6_GMI_WR_LAZY_DEFAULT 0x78000924
+#define mmMMEA6_GMI_RD_CAM_CNTL_DEFAULT 0x16db4444
+#define mmMMEA6_GMI_WR_CAM_CNTL_DEFAULT 0x16db4444
+#define mmMMEA6_GMI_PAGE_BURST_DEFAULT 0x20002000
+#define mmMMEA6_GMI_RD_PRI_AGE_DEFAULT 0x00db6249
+#define mmMMEA6_GMI_WR_PRI_AGE_DEFAULT 0x00db6249
+#define mmMMEA6_GMI_RD_PRI_QUEUING_DEFAULT 0x00000db6
+#define mmMMEA6_GMI_WR_PRI_QUEUING_DEFAULT 0x00000db6
+#define mmMMEA6_GMI_RD_PRI_FIXED_DEFAULT 0x00000924
+#define mmMMEA6_GMI_WR_PRI_FIXED_DEFAULT 0x00000924
+#define mmMMEA6_GMI_RD_PRI_URGENCY_DEFAULT 0x0000fdb6
+#define mmMMEA6_GMI_WR_PRI_URGENCY_DEFAULT 0x0000fdb6
+#define mmMMEA6_GMI_RD_PRI_URGENCY_MASKING_DEFAULT 0xffffffff
+#define mmMMEA6_GMI_WR_PRI_URGENCY_MASKING_DEFAULT 0xffffffff
+#define mmMMEA6_GMI_RD_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f
+#define mmMMEA6_GMI_RD_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f
+#define mmMMEA6_GMI_RD_PRI_QUANT_PRI3_DEFAULT 0xffffffff
+#define mmMMEA6_GMI_WR_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f
+#define mmMMEA6_GMI_WR_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f
+#define mmMMEA6_GMI_WR_PRI_QUANT_PRI3_DEFAULT 0xffffffff
+#define mmMMEA6_ADDRNORM_BASE_ADDR0_DEFAULT 0x00000000
+#define mmMMEA6_ADDRNORM_LIMIT_ADDR0_DEFAULT 0x00000000
+#define mmMMEA6_ADDRNORM_BASE_ADDR1_DEFAULT 0x00000000
+#define mmMMEA6_ADDRNORM_LIMIT_ADDR1_DEFAULT 0x00000000
+#define mmMMEA6_ADDRNORM_OFFSET_ADDR1_DEFAULT 0x00000000
+#define mmMMEA6_ADDRNORM_BASE_ADDR2_DEFAULT 0x00000000
+#define mmMMEA6_ADDRNORM_LIMIT_ADDR2_DEFAULT 0x00000000
+#define mmMMEA6_ADDRNORM_BASE_ADDR3_DEFAULT 0x00000000
+#define mmMMEA6_ADDRNORM_LIMIT_ADDR3_DEFAULT 0x00000000
+#define mmMMEA6_ADDRNORM_OFFSET_ADDR3_DEFAULT 0x00000000
+#define mmMMEA6_ADDRNORM_BASE_ADDR4_DEFAULT 0x00000000
+#define mmMMEA6_ADDRNORM_LIMIT_ADDR4_DEFAULT 0x00000000
+#define mmMMEA6_ADDRNORM_BASE_ADDR5_DEFAULT 0x00000000
+#define mmMMEA6_ADDRNORM_LIMIT_ADDR5_DEFAULT 0x00000000
+#define mmMMEA6_ADDRNORM_OFFSET_ADDR5_DEFAULT 0x00000000
+#define mmMMEA6_ADDRNORMDRAM_HOLE_CNTL_DEFAULT 0x00000000
+#define mmMMEA6_ADDRNORMGMI_HOLE_CNTL_DEFAULT 0x00000000
+#define mmMMEA6_ADDRNORMDRAM_NP2_CHANNEL_CFG_DEFAULT 0x00000000
+#define mmMMEA6_ADDRNORMGMI_NP2_CHANNEL_CFG_DEFAULT 0x00000000
+#define mmMMEA6_ADDRDEC_BANK_CFG_DEFAULT 0x000003cf
+#define mmMMEA6_ADDRDEC_MISC_CFG_DEFAULT 0xfffff000
+#define mmMMEA6_ADDRDECDRAM_ADDR_HASH_BANK0_DEFAULT 0x00000000
+#define mmMMEA6_ADDRDECDRAM_ADDR_HASH_BANK1_DEFAULT 0x00000000
+#define mmMMEA6_ADDRDECDRAM_ADDR_HASH_BANK2_DEFAULT 0x00000000
+#define mmMMEA6_ADDRDECDRAM_ADDR_HASH_BANK3_DEFAULT 0x00000000
+#define mmMMEA6_ADDRDECDRAM_ADDR_HASH_BANK4_DEFAULT 0x00000000
+#define mmMMEA6_ADDRDECDRAM_ADDR_HASH_BANK5_DEFAULT 0x00000000
+#define mmMMEA6_ADDRDECDRAM_ADDR_HASH_PC_DEFAULT 0x00000000
+#define mmMMEA6_ADDRDECDRAM_ADDR_HASH_PC2_DEFAULT 0x00000000
+#define mmMMEA6_ADDRDECDRAM_ADDR_HASH_CS0_DEFAULT 0x00000000
+#define mmMMEA6_ADDRDECDRAM_ADDR_HASH_CS1_DEFAULT 0x00000000
+#define mmMMEA6_ADDRDECDRAM_HARVEST_ENABLE_DEFAULT 0x00000000
+#define mmMMEA6_ADDRDECGMI_ADDR_HASH_BANK0_DEFAULT 0x00000000
+#define mmMMEA6_ADDRDECGMI_ADDR_HASH_BANK1_DEFAULT 0x00000000
+#define mmMMEA6_ADDRDECGMI_ADDR_HASH_BANK2_DEFAULT 0x00000000
+#define mmMMEA6_ADDRDECGMI_ADDR_HASH_BANK3_DEFAULT 0x00000000
+#define mmMMEA6_ADDRDECGMI_ADDR_HASH_BANK4_DEFAULT 0x00000000
+#define mmMMEA6_ADDRDECGMI_ADDR_HASH_BANK5_DEFAULT 0x00000000
+#define mmMMEA6_ADDRDECGMI_ADDR_HASH_PC_DEFAULT 0x00000000
+#define mmMMEA6_ADDRDECGMI_ADDR_HASH_PC2_DEFAULT 0x00000000
+#define mmMMEA6_ADDRDECGMI_ADDR_HASH_CS0_DEFAULT 0x00000000
+#define mmMMEA6_ADDRDECGMI_ADDR_HASH_CS1_DEFAULT 0x00000000
+#define mmMMEA6_ADDRDECGMI_HARVEST_ENABLE_DEFAULT 0x00000000
+#define mmMMEA6_ADDRDEC0_BASE_ADDR_CS0_DEFAULT 0x00000000
+#define mmMMEA6_ADDRDEC0_BASE_ADDR_CS1_DEFAULT 0x00000000
+#define mmMMEA6_ADDRDEC0_BASE_ADDR_CS2_DEFAULT 0x00000000
+#define mmMMEA6_ADDRDEC0_BASE_ADDR_CS3_DEFAULT 0x00000000
+#define mmMMEA6_ADDRDEC0_BASE_ADDR_SECCS0_DEFAULT 0x00000000
+#define mmMMEA6_ADDRDEC0_BASE_ADDR_SECCS1_DEFAULT 0x00000000
+#define mmMMEA6_ADDRDEC0_BASE_ADDR_SECCS2_DEFAULT 0x00000000
+#define mmMMEA6_ADDRDEC0_BASE_ADDR_SECCS3_DEFAULT 0x00000000
+#define mmMMEA6_ADDRDEC0_ADDR_MASK_CS01_DEFAULT 0xfffffffe
+#define mmMMEA6_ADDRDEC0_ADDR_MASK_CS23_DEFAULT 0xfffffffe
+#define mmMMEA6_ADDRDEC0_ADDR_MASK_SECCS01_DEFAULT 0xfffffffe
+#define mmMMEA6_ADDRDEC0_ADDR_MASK_SECCS23_DEFAULT 0xfffffffe
+#define mmMMEA6_ADDRDEC0_ADDR_CFG_CS01_DEFAULT 0x00050408
+#define mmMMEA6_ADDRDEC0_ADDR_CFG_CS23_DEFAULT 0x00050408
+#define mmMMEA6_ADDRDEC0_ADDR_SEL_CS01_DEFAULT 0x04076543
+#define mmMMEA6_ADDRDEC0_ADDR_SEL_CS23_DEFAULT 0x04076543
+#define mmMMEA6_ADDRDEC0_ADDR_SEL2_CS01_DEFAULT 0x00000008
+#define mmMMEA6_ADDRDEC0_ADDR_SEL2_CS23_DEFAULT 0x00000008
+#define mmMMEA6_ADDRDEC0_COL_SEL_LO_CS01_DEFAULT 0x87654321
+#define mmMMEA6_ADDRDEC0_COL_SEL_LO_CS23_DEFAULT 0x87654321
+#define mmMMEA6_ADDRDEC0_COL_SEL_HI_CS01_DEFAULT 0xa9876543
+#define mmMMEA6_ADDRDEC0_COL_SEL_HI_CS23_DEFAULT 0xa9876543
+#define mmMMEA6_ADDRDEC0_RM_SEL_CS01_DEFAULT 0x00000000
+#define mmMMEA6_ADDRDEC0_RM_SEL_CS23_DEFAULT 0x00000000
+#define mmMMEA6_ADDRDEC0_RM_SEL_SECCS01_DEFAULT 0x00000000
+#define mmMMEA6_ADDRDEC0_RM_SEL_SECCS23_DEFAULT 0x00000000
+#define mmMMEA6_ADDRDEC1_BASE_ADDR_CS0_DEFAULT 0x00000000
+#define mmMMEA6_ADDRDEC1_BASE_ADDR_CS1_DEFAULT 0x00000000
+#define mmMMEA6_ADDRDEC1_BASE_ADDR_CS2_DEFAULT 0x00000000
+#define mmMMEA6_ADDRDEC1_BASE_ADDR_CS3_DEFAULT 0x00000000
+#define mmMMEA6_ADDRDEC1_BASE_ADDR_SECCS0_DEFAULT 0x00000000
+#define mmMMEA6_ADDRDEC1_BASE_ADDR_SECCS1_DEFAULT 0x00000000
+#define mmMMEA6_ADDRDEC1_BASE_ADDR_SECCS2_DEFAULT 0x00000000
+#define mmMMEA6_ADDRDEC1_BASE_ADDR_SECCS3_DEFAULT 0x00000000
+#define mmMMEA6_ADDRDEC1_ADDR_MASK_CS01_DEFAULT 0xfffffffe
+#define mmMMEA6_ADDRDEC1_ADDR_MASK_CS23_DEFAULT 0xfffffffe
+#define mmMMEA6_ADDRDEC1_ADDR_MASK_SECCS01_DEFAULT 0xfffffffe
+#define mmMMEA6_ADDRDEC1_ADDR_MASK_SECCS23_DEFAULT 0xfffffffe
+#define mmMMEA6_ADDRDEC1_ADDR_CFG_CS01_DEFAULT 0x00050408
+#define mmMMEA6_ADDRDEC1_ADDR_CFG_CS23_DEFAULT 0x00050408
+#define mmMMEA6_ADDRDEC1_ADDR_SEL_CS01_DEFAULT 0x04076543
+#define mmMMEA6_ADDRDEC1_ADDR_SEL_CS23_DEFAULT 0x04076543
+#define mmMMEA6_ADDRDEC1_ADDR_SEL2_CS01_DEFAULT 0x00000008
+#define mmMMEA6_ADDRDEC1_ADDR_SEL2_CS23_DEFAULT 0x00000008
+#define mmMMEA6_ADDRDEC1_COL_SEL_LO_CS01_DEFAULT 0x87654321
+#define mmMMEA6_ADDRDEC1_COL_SEL_LO_CS23_DEFAULT 0x87654321
+#define mmMMEA6_ADDRDEC1_COL_SEL_HI_CS01_DEFAULT 0xa9876543
+#define mmMMEA6_ADDRDEC1_COL_SEL_HI_CS23_DEFAULT 0xa9876543
+#define mmMMEA6_ADDRDEC1_RM_SEL_CS01_DEFAULT 0x00000000
+#define mmMMEA6_ADDRDEC1_RM_SEL_CS23_DEFAULT 0x00000000
+#define mmMMEA6_ADDRDEC1_RM_SEL_SECCS01_DEFAULT 0x00000000
+#define mmMMEA6_ADDRDEC1_RM_SEL_SECCS23_DEFAULT 0x00000000
+#define mmMMEA6_ADDRDEC2_BASE_ADDR_CS0_DEFAULT 0x00000000
+#define mmMMEA6_ADDRDEC2_BASE_ADDR_CS1_DEFAULT 0x00000000
+#define mmMMEA6_ADDRDEC2_BASE_ADDR_CS2_DEFAULT 0x00000000
+#define mmMMEA6_ADDRDEC2_BASE_ADDR_CS3_DEFAULT 0x00000000
+#define mmMMEA6_ADDRDEC2_BASE_ADDR_SECCS0_DEFAULT 0x00000000
+#define mmMMEA6_ADDRDEC2_BASE_ADDR_SECCS1_DEFAULT 0x00000000
+#define mmMMEA6_ADDRDEC2_BASE_ADDR_SECCS2_DEFAULT 0x00000000
+#define mmMMEA6_ADDRDEC2_BASE_ADDR_SECCS3_DEFAULT 0x00000000
+#define mmMMEA6_ADDRDEC2_ADDR_MASK_CS01_DEFAULT 0xfffffffe
+#define mmMMEA6_ADDRDEC2_ADDR_MASK_CS23_DEFAULT 0xfffffffe
+#define mmMMEA6_ADDRDEC2_ADDR_MASK_SECCS01_DEFAULT 0xfffffffe
+#define mmMMEA6_ADDRDEC2_ADDR_MASK_SECCS23_DEFAULT 0xfffffffe
+#define mmMMEA6_ADDRDEC2_ADDR_CFG_CS01_DEFAULT 0x00050408
+#define mmMMEA6_ADDRDEC2_ADDR_CFG_CS23_DEFAULT 0x00050408
+#define mmMMEA6_ADDRDEC2_ADDR_SEL_CS01_DEFAULT 0x04076543
+#define mmMMEA6_ADDRDEC2_ADDR_SEL_CS23_DEFAULT 0x04076543
+#define mmMMEA6_ADDRDEC2_ADDR_SEL2_CS01_DEFAULT 0x00000008
+#define mmMMEA6_ADDRDEC2_ADDR_SEL2_CS23_DEFAULT 0x00000008
+#define mmMMEA6_ADDRDEC2_COL_SEL_LO_CS01_DEFAULT 0x87654321
+#define mmMMEA6_ADDRDEC2_COL_SEL_LO_CS23_DEFAULT 0x87654321
+#define mmMMEA6_ADDRDEC2_COL_SEL_HI_CS01_DEFAULT 0xa9876543
+#define mmMMEA6_ADDRDEC2_COL_SEL_HI_CS23_DEFAULT 0xa9876543
+#define mmMMEA6_ADDRDEC2_RM_SEL_CS01_DEFAULT 0x00000000
+#define mmMMEA6_ADDRDEC2_RM_SEL_CS23_DEFAULT 0x00000000
+#define mmMMEA6_ADDRDEC2_RM_SEL_SECCS01_DEFAULT 0x00000000
+#define mmMMEA6_ADDRDEC2_RM_SEL_SECCS23_DEFAULT 0x00000000
+#define mmMMEA6_ADDRNORMDRAM_GLOBAL_CNTL_DEFAULT 0x00600000
+#define mmMMEA6_ADDRNORMGMI_GLOBAL_CNTL_DEFAULT 0x00600000
+#define mmMMEA6_IO_RD_CLI2GRP_MAP0_DEFAULT 0xe4e4e4e4
+#define mmMMEA6_IO_RD_CLI2GRP_MAP1_DEFAULT 0xe4e4e4e4
+#define mmMMEA6_IO_WR_CLI2GRP_MAP0_DEFAULT 0xe4e4e4e4
+#define mmMMEA6_IO_WR_CLI2GRP_MAP1_DEFAULT 0xe4e4e4e4
+#define mmMMEA6_IO_RD_COMBINE_FLUSH_DEFAULT 0x00007777
+#define mmMMEA6_IO_WR_COMBINE_FLUSH_DEFAULT 0x00007777
+#define mmMMEA6_IO_GROUP_BURST_DEFAULT 0x1f031f03
+#define mmMMEA6_IO_RD_PRI_AGE_DEFAULT 0x00db6249
+#define mmMMEA6_IO_WR_PRI_AGE_DEFAULT 0x00db6249
+#define mmMMEA6_IO_RD_PRI_QUEUING_DEFAULT 0x00000db6
+#define mmMMEA6_IO_WR_PRI_QUEUING_DEFAULT 0x00000db6
+#define mmMMEA6_IO_RD_PRI_FIXED_DEFAULT 0x00000924
+#define mmMMEA6_IO_WR_PRI_FIXED_DEFAULT 0x00000924
+#define mmMMEA6_IO_RD_PRI_URGENCY_DEFAULT 0x00000492
+#define mmMMEA6_IO_WR_PRI_URGENCY_DEFAULT 0x00000492
+#define mmMMEA6_IO_RD_PRI_URGENCY_MASKING_DEFAULT 0xffffffff
+#define mmMMEA6_IO_WR_PRI_URGENCY_MASKING_DEFAULT 0xffffffff
+#define mmMMEA6_IO_RD_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f
+#define mmMMEA6_IO_RD_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f
+#define mmMMEA6_IO_RD_PRI_QUANT_PRI3_DEFAULT 0xffffffff
+#define mmMMEA6_IO_WR_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f
+#define mmMMEA6_IO_WR_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f
+#define mmMMEA6_IO_WR_PRI_QUANT_PRI3_DEFAULT 0xffffffff
+#define mmMMEA6_SDP_ARB_DRAM_DEFAULT 0x00101e40
+#define mmMMEA6_SDP_ARB_GMI_DEFAULT 0x00101e40
+#define mmMMEA6_SDP_ARB_FINAL_DEFAULT 0x00007fff
+#define mmMMEA6_SDP_DRAM_PRIORITY_DEFAULT 0x00000000
+#define mmMMEA6_SDP_GMI_PRIORITY_DEFAULT 0x00000000
+#define mmMMEA6_SDP_IO_PRIORITY_DEFAULT 0x00000000
+#define mmMMEA6_SDP_CREDITS_DEFAULT 0x000101bf
+#define mmMMEA6_SDP_TAG_RESERVE0_DEFAULT 0x00000000
+#define mmMMEA6_SDP_TAG_RESERVE1_DEFAULT 0x00000000
+#define mmMMEA6_SDP_VCC_RESERVE0_DEFAULT 0x00000000
+#define mmMMEA6_SDP_VCC_RESERVE1_DEFAULT 0x00000000
+#define mmMMEA6_SDP_VCD_RESERVE0_DEFAULT 0x00000000
+#define mmMMEA6_SDP_VCD_RESERVE1_DEFAULT 0x00000000
+#define mmMMEA6_SDP_REQ_CNTL_DEFAULT 0x0000001f
+#define mmMMEA6_MISC_DEFAULT 0x0c00a070
+#define mmMMEA6_LATENCY_SAMPLING_DEFAULT 0x00000000
+#define mmMMEA6_PERFCOUNTER_LO_DEFAULT 0x00000000
+#define mmMMEA6_PERFCOUNTER_HI_DEFAULT 0x00000000
+#define mmMMEA6_PERFCOUNTER0_CFG_DEFAULT 0x00000000
+#define mmMMEA6_PERFCOUNTER1_CFG_DEFAULT 0x00000000
+#define mmMMEA6_PERFCOUNTER_RSLT_CNTL_DEFAULT 0x04000000
+#define mmMMEA6_EDC_CNT_DEFAULT 0x00000000
+#define mmMMEA6_EDC_CNT2_DEFAULT 0x00000000
+#define mmMMEA6_DSM_CNTL_DEFAULT 0x00000000
+#define mmMMEA6_DSM_CNTLA_DEFAULT 0x00000000
+#define mmMMEA6_DSM_CNTLB_DEFAULT 0x00000000
+#define mmMMEA6_DSM_CNTL2_DEFAULT 0x00000000
+#define mmMMEA6_DSM_CNTL2A_DEFAULT 0x00000000
+#define mmMMEA6_DSM_CNTL2B_DEFAULT 0x00000000
+#define mmMMEA6_CGTT_CLK_CTRL_DEFAULT 0x00000100
+#define mmMMEA6_EDC_MODE_DEFAULT 0x00000000
+#define mmMMEA6_ERR_STATUS_DEFAULT 0x00000300
+#define mmMMEA6_MISC2_DEFAULT 0x00000000
+#define mmMMEA6_ADDRDEC_SELECT_DEFAULT 0x00000000
+#define mmMMEA6_EDC_CNT3_DEFAULT 0x00000000
+
+
+// addressBlock: mmhub_ea_mmeadec7
+#define mmMMEA7_DRAM_RD_CLI2GRP_MAP0_DEFAULT 0x55555555
+#define mmMMEA7_DRAM_RD_CLI2GRP_MAP1_DEFAULT 0x55555555
+#define mmMMEA7_DRAM_WR_CLI2GRP_MAP0_DEFAULT 0x55555555
+#define mmMMEA7_DRAM_WR_CLI2GRP_MAP1_DEFAULT 0x55555555
+#define mmMMEA7_DRAM_RD_GRP2VC_MAP_DEFAULT 0x00000e25
+#define mmMMEA7_DRAM_WR_GRP2VC_MAP_DEFAULT 0x00000e25
+#define mmMMEA7_DRAM_RD_LAZY_DEFAULT 0x78000924
+#define mmMMEA7_DRAM_WR_LAZY_DEFAULT 0x78000924
+#define mmMMEA7_DRAM_RD_CAM_CNTL_DEFAULT 0x16db4444
+#define mmMMEA7_DRAM_WR_CAM_CNTL_DEFAULT 0x16db4444
+#define mmMMEA7_DRAM_PAGE_BURST_DEFAULT 0x20002000
+#define mmMMEA7_DRAM_RD_PRI_AGE_DEFAULT 0x00db6249
+#define mmMMEA7_DRAM_WR_PRI_AGE_DEFAULT 0x00db6249
+#define mmMMEA7_DRAM_RD_PRI_QUEUING_DEFAULT 0x00000db6
+#define mmMMEA7_DRAM_WR_PRI_QUEUING_DEFAULT 0x00000db6
+#define mmMMEA7_DRAM_RD_PRI_FIXED_DEFAULT 0x00000924
+#define mmMMEA7_DRAM_WR_PRI_FIXED_DEFAULT 0x00000924
+#define mmMMEA7_DRAM_RD_PRI_URGENCY_DEFAULT 0x0000fdb6
+#define mmMMEA7_DRAM_WR_PRI_URGENCY_DEFAULT 0x0000fdb6
+#define mmMMEA7_DRAM_RD_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f
+#define mmMMEA7_DRAM_RD_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f
+#define mmMMEA7_DRAM_RD_PRI_QUANT_PRI3_DEFAULT 0xffffffff
+#define mmMMEA7_DRAM_WR_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f
+#define mmMMEA7_DRAM_WR_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f
+#define mmMMEA7_DRAM_WR_PRI_QUANT_PRI3_DEFAULT 0xffffffff
+#define mmMMEA7_GMI_RD_CLI2GRP_MAP0_DEFAULT 0x00000000
+#define mmMMEA7_GMI_RD_CLI2GRP_MAP1_DEFAULT 0x00000000
+#define mmMMEA7_GMI_WR_CLI2GRP_MAP0_DEFAULT 0x00000000
+#define mmMMEA7_GMI_WR_CLI2GRP_MAP1_DEFAULT 0x00000000
+#define mmMMEA7_GMI_RD_GRP2VC_MAP_DEFAULT 0x00000fff
+#define mmMMEA7_GMI_WR_GRP2VC_MAP_DEFAULT 0x00000fff
+#define mmMMEA7_GMI_RD_LAZY_DEFAULT 0x78000924
+#define mmMMEA7_GMI_WR_LAZY_DEFAULT 0x78000924
+#define mmMMEA7_GMI_RD_CAM_CNTL_DEFAULT 0x16db4444
+#define mmMMEA7_GMI_WR_CAM_CNTL_DEFAULT 0x16db4444
+#define mmMMEA7_GMI_PAGE_BURST_DEFAULT 0x20002000
+#define mmMMEA7_GMI_RD_PRI_AGE_DEFAULT 0x00db6249
+#define mmMMEA7_GMI_WR_PRI_AGE_DEFAULT 0x00db6249
+#define mmMMEA7_GMI_RD_PRI_QUEUING_DEFAULT 0x00000db6
+#define mmMMEA7_GMI_WR_PRI_QUEUING_DEFAULT 0x00000db6
+#define mmMMEA7_GMI_RD_PRI_FIXED_DEFAULT 0x00000924
+#define mmMMEA7_GMI_WR_PRI_FIXED_DEFAULT 0x00000924
+#define mmMMEA7_GMI_RD_PRI_URGENCY_DEFAULT 0x0000fdb6
+#define mmMMEA7_GMI_WR_PRI_URGENCY_DEFAULT 0x0000fdb6
+#define mmMMEA7_GMI_RD_PRI_URGENCY_MASKING_DEFAULT 0xffffffff
+#define mmMMEA7_GMI_WR_PRI_URGENCY_MASKING_DEFAULT 0xffffffff
+#define mmMMEA7_GMI_RD_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f
+#define mmMMEA7_GMI_RD_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f
+#define mmMMEA7_GMI_RD_PRI_QUANT_PRI3_DEFAULT 0xffffffff
+#define mmMMEA7_GMI_WR_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f
+#define mmMMEA7_GMI_WR_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f
+#define mmMMEA7_GMI_WR_PRI_QUANT_PRI3_DEFAULT 0xffffffff
+#define mmMMEA7_ADDRNORM_BASE_ADDR0_DEFAULT 0x00000000
+#define mmMMEA7_ADDRNORM_LIMIT_ADDR0_DEFAULT 0x00000000
+#define mmMMEA7_ADDRNORM_BASE_ADDR1_DEFAULT 0x00000000
+#define mmMMEA7_ADDRNORM_LIMIT_ADDR1_DEFAULT 0x00000000
+#define mmMMEA7_ADDRNORM_OFFSET_ADDR1_DEFAULT 0x00000000
+#define mmMMEA7_ADDRNORM_BASE_ADDR2_DEFAULT 0x00000000
+#define mmMMEA7_ADDRNORM_LIMIT_ADDR2_DEFAULT 0x00000000
+#define mmMMEA7_ADDRNORM_BASE_ADDR3_DEFAULT 0x00000000
+#define mmMMEA7_ADDRNORM_LIMIT_ADDR3_DEFAULT 0x00000000
+#define mmMMEA7_ADDRNORM_OFFSET_ADDR3_DEFAULT 0x00000000
+#define mmMMEA7_ADDRNORM_BASE_ADDR4_DEFAULT 0x00000000
+#define mmMMEA7_ADDRNORM_LIMIT_ADDR4_DEFAULT 0x00000000
+#define mmMMEA7_ADDRNORM_BASE_ADDR5_DEFAULT 0x00000000
+#define mmMMEA7_ADDRNORM_LIMIT_ADDR5_DEFAULT 0x00000000
+#define mmMMEA7_ADDRNORM_OFFSET_ADDR5_DEFAULT 0x00000000
+#define mmMMEA7_ADDRNORMDRAM_HOLE_CNTL_DEFAULT 0x00000000
+#define mmMMEA7_ADDRNORMGMI_HOLE_CNTL_DEFAULT 0x00000000
+#define mmMMEA7_ADDRNORMDRAM_NP2_CHANNEL_CFG_DEFAULT 0x00000000
+#define mmMMEA7_ADDRNORMGMI_NP2_CHANNEL_CFG_DEFAULT 0x00000000
+#define mmMMEA7_ADDRDEC_BANK_CFG_DEFAULT 0x000003cf
+#define mmMMEA7_ADDRDEC_MISC_CFG_DEFAULT 0xfffff000
+#define mmMMEA7_ADDRDECDRAM_ADDR_HASH_BANK0_DEFAULT 0x00000000
+#define mmMMEA7_ADDRDECDRAM_ADDR_HASH_BANK1_DEFAULT 0x00000000
+#define mmMMEA7_ADDRDECDRAM_ADDR_HASH_BANK2_DEFAULT 0x00000000
+#define mmMMEA7_ADDRDECDRAM_ADDR_HASH_BANK3_DEFAULT 0x00000000
+#define mmMMEA7_ADDRDECDRAM_ADDR_HASH_BANK4_DEFAULT 0x00000000
+#define mmMMEA7_ADDRDECDRAM_ADDR_HASH_BANK5_DEFAULT 0x00000000
+#define mmMMEA7_ADDRDECDRAM_ADDR_HASH_PC_DEFAULT 0x00000000
+#define mmMMEA7_ADDRDECDRAM_ADDR_HASH_PC2_DEFAULT 0x00000000
+#define mmMMEA7_ADDRDECDRAM_ADDR_HASH_CS0_DEFAULT 0x00000000
+#define mmMMEA7_ADDRDECDRAM_ADDR_HASH_CS1_DEFAULT 0x00000000
+#define mmMMEA7_ADDRDECDRAM_HARVEST_ENABLE_DEFAULT 0x00000000
+#define mmMMEA7_ADDRDECGMI_ADDR_HASH_BANK0_DEFAULT 0x00000000
+#define mmMMEA7_ADDRDECGMI_ADDR_HASH_BANK1_DEFAULT 0x00000000
+#define mmMMEA7_ADDRDECGMI_ADDR_HASH_BANK2_DEFAULT 0x00000000
+#define mmMMEA7_ADDRDECGMI_ADDR_HASH_BANK3_DEFAULT 0x00000000
+#define mmMMEA7_ADDRDECGMI_ADDR_HASH_BANK4_DEFAULT 0x00000000
+#define mmMMEA7_ADDRDECGMI_ADDR_HASH_BANK5_DEFAULT 0x00000000
+#define mmMMEA7_ADDRDECGMI_ADDR_HASH_PC_DEFAULT 0x00000000
+#define mmMMEA7_ADDRDECGMI_ADDR_HASH_PC2_DEFAULT 0x00000000
+#define mmMMEA7_ADDRDECGMI_ADDR_HASH_CS0_DEFAULT 0x00000000
+#define mmMMEA7_ADDRDECGMI_ADDR_HASH_CS1_DEFAULT 0x00000000
+#define mmMMEA7_ADDRDECGMI_HARVEST_ENABLE_DEFAULT 0x00000000
+#define mmMMEA7_ADDRDEC0_BASE_ADDR_CS0_DEFAULT 0x00000000
+#define mmMMEA7_ADDRDEC0_BASE_ADDR_CS1_DEFAULT 0x00000000
+#define mmMMEA7_ADDRDEC0_BASE_ADDR_CS2_DEFAULT 0x00000000
+#define mmMMEA7_ADDRDEC0_BASE_ADDR_CS3_DEFAULT 0x00000000
+#define mmMMEA7_ADDRDEC0_BASE_ADDR_SECCS0_DEFAULT 0x00000000
+#define mmMMEA7_ADDRDEC0_BASE_ADDR_SECCS1_DEFAULT 0x00000000
+#define mmMMEA7_ADDRDEC0_BASE_ADDR_SECCS2_DEFAULT 0x00000000
+#define mmMMEA7_ADDRDEC0_BASE_ADDR_SECCS3_DEFAULT 0x00000000
+#define mmMMEA7_ADDRDEC0_ADDR_MASK_CS01_DEFAULT 0xfffffffe
+#define mmMMEA7_ADDRDEC0_ADDR_MASK_CS23_DEFAULT 0xfffffffe
+#define mmMMEA7_ADDRDEC0_ADDR_MASK_SECCS01_DEFAULT 0xfffffffe
+#define mmMMEA7_ADDRDEC0_ADDR_MASK_SECCS23_DEFAULT 0xfffffffe
+#define mmMMEA7_ADDRDEC0_ADDR_CFG_CS01_DEFAULT 0x00050408
+#define mmMMEA7_ADDRDEC0_ADDR_CFG_CS23_DEFAULT 0x00050408
+#define mmMMEA7_ADDRDEC0_ADDR_SEL_CS01_DEFAULT 0x04076543
+#define mmMMEA7_ADDRDEC0_ADDR_SEL_CS23_DEFAULT 0x04076543
+#define mmMMEA7_ADDRDEC0_ADDR_SEL2_CS01_DEFAULT 0x00000008
+#define mmMMEA7_ADDRDEC0_ADDR_SEL2_CS23_DEFAULT 0x00000008
+#define mmMMEA7_ADDRDEC0_COL_SEL_LO_CS01_DEFAULT 0x87654321
+#define mmMMEA7_ADDRDEC0_COL_SEL_LO_CS23_DEFAULT 0x87654321
+#define mmMMEA7_ADDRDEC0_COL_SEL_HI_CS01_DEFAULT 0xa9876543
+#define mmMMEA7_ADDRDEC0_COL_SEL_HI_CS23_DEFAULT 0xa9876543
+#define mmMMEA7_ADDRDEC0_RM_SEL_CS01_DEFAULT 0x00000000
+#define mmMMEA7_ADDRDEC0_RM_SEL_CS23_DEFAULT 0x00000000
+#define mmMMEA7_ADDRDEC0_RM_SEL_SECCS01_DEFAULT 0x00000000
+#define mmMMEA7_ADDRDEC0_RM_SEL_SECCS23_DEFAULT 0x00000000
+#define mmMMEA7_ADDRDEC1_BASE_ADDR_CS0_DEFAULT 0x00000000
+#define mmMMEA7_ADDRDEC1_BASE_ADDR_CS1_DEFAULT 0x00000000
+#define mmMMEA7_ADDRDEC1_BASE_ADDR_CS2_DEFAULT 0x00000000
+#define mmMMEA7_ADDRDEC1_BASE_ADDR_CS3_DEFAULT 0x00000000
+#define mmMMEA7_ADDRDEC1_BASE_ADDR_SECCS0_DEFAULT 0x00000000
+#define mmMMEA7_ADDRDEC1_BASE_ADDR_SECCS1_DEFAULT 0x00000000
+#define mmMMEA7_ADDRDEC1_BASE_ADDR_SECCS2_DEFAULT 0x00000000
+#define mmMMEA7_ADDRDEC1_BASE_ADDR_SECCS3_DEFAULT 0x00000000
+#define mmMMEA7_ADDRDEC1_ADDR_MASK_CS01_DEFAULT 0xfffffffe
+#define mmMMEA7_ADDRDEC1_ADDR_MASK_CS23_DEFAULT 0xfffffffe
+#define mmMMEA7_ADDRDEC1_ADDR_MASK_SECCS01_DEFAULT 0xfffffffe
+#define mmMMEA7_ADDRDEC1_ADDR_MASK_SECCS23_DEFAULT 0xfffffffe
+#define mmMMEA7_ADDRDEC1_ADDR_CFG_CS01_DEFAULT 0x00050408
+#define mmMMEA7_ADDRDEC1_ADDR_CFG_CS23_DEFAULT 0x00050408
+#define mmMMEA7_ADDRDEC1_ADDR_SEL_CS01_DEFAULT 0x04076543
+#define mmMMEA7_ADDRDEC1_ADDR_SEL_CS23_DEFAULT 0x04076543
+#define mmMMEA7_ADDRDEC1_ADDR_SEL2_CS01_DEFAULT 0x00000008
+#define mmMMEA7_ADDRDEC1_ADDR_SEL2_CS23_DEFAULT 0x00000008
+#define mmMMEA7_ADDRDEC1_COL_SEL_LO_CS01_DEFAULT 0x87654321
+#define mmMMEA7_ADDRDEC1_COL_SEL_LO_CS23_DEFAULT 0x87654321
+#define mmMMEA7_ADDRDEC1_COL_SEL_HI_CS01_DEFAULT 0xa9876543
+#define mmMMEA7_ADDRDEC1_COL_SEL_HI_CS23_DEFAULT 0xa9876543
+#define mmMMEA7_ADDRDEC1_RM_SEL_CS01_DEFAULT 0x00000000
+#define mmMMEA7_ADDRDEC1_RM_SEL_CS23_DEFAULT 0x00000000
+#define mmMMEA7_ADDRDEC1_RM_SEL_SECCS01_DEFAULT 0x00000000
+#define mmMMEA7_ADDRDEC1_RM_SEL_SECCS23_DEFAULT 0x00000000
+#define mmMMEA7_ADDRDEC2_BASE_ADDR_CS0_DEFAULT 0x00000000
+#define mmMMEA7_ADDRDEC2_BASE_ADDR_CS1_DEFAULT 0x00000000
+#define mmMMEA7_ADDRDEC2_BASE_ADDR_CS2_DEFAULT 0x00000000
+#define mmMMEA7_ADDRDEC2_BASE_ADDR_CS3_DEFAULT 0x00000000
+#define mmMMEA7_ADDRDEC2_BASE_ADDR_SECCS0_DEFAULT 0x00000000
+#define mmMMEA7_ADDRDEC2_BASE_ADDR_SECCS1_DEFAULT 0x00000000
+#define mmMMEA7_ADDRDEC2_BASE_ADDR_SECCS2_DEFAULT 0x00000000
+#define mmMMEA7_ADDRDEC2_BASE_ADDR_SECCS3_DEFAULT 0x00000000
+#define mmMMEA7_ADDRDEC2_ADDR_MASK_CS01_DEFAULT 0xfffffffe
+#define mmMMEA7_ADDRDEC2_ADDR_MASK_CS23_DEFAULT 0xfffffffe
+#define mmMMEA7_ADDRDEC2_ADDR_MASK_SECCS01_DEFAULT 0xfffffffe
+#define mmMMEA7_ADDRDEC2_ADDR_MASK_SECCS23_DEFAULT 0xfffffffe
+#define mmMMEA7_ADDRDEC2_ADDR_CFG_CS01_DEFAULT 0x00050408
+#define mmMMEA7_ADDRDEC2_ADDR_CFG_CS23_DEFAULT 0x00050408
+#define mmMMEA7_ADDRDEC2_ADDR_SEL_CS01_DEFAULT 0x04076543
+#define mmMMEA7_ADDRDEC2_ADDR_SEL_CS23_DEFAULT 0x04076543
+#define mmMMEA7_ADDRDEC2_ADDR_SEL2_CS01_DEFAULT 0x00000008
+#define mmMMEA7_ADDRDEC2_ADDR_SEL2_CS23_DEFAULT 0x00000008
+#define mmMMEA7_ADDRDEC2_COL_SEL_LO_CS01_DEFAULT 0x87654321
+#define mmMMEA7_ADDRDEC2_COL_SEL_LO_CS23_DEFAULT 0x87654321
+#define mmMMEA7_ADDRDEC2_COL_SEL_HI_CS01_DEFAULT 0xa9876543
+#define mmMMEA7_ADDRDEC2_COL_SEL_HI_CS23_DEFAULT 0xa9876543
+#define mmMMEA7_ADDRDEC2_RM_SEL_CS01_DEFAULT 0x00000000
+#define mmMMEA7_ADDRDEC2_RM_SEL_CS23_DEFAULT 0x00000000
+#define mmMMEA7_ADDRDEC2_RM_SEL_SECCS01_DEFAULT 0x00000000
+#define mmMMEA7_ADDRDEC2_RM_SEL_SECCS23_DEFAULT 0x00000000
+#define mmMMEA7_ADDRNORMDRAM_GLOBAL_CNTL_DEFAULT 0x00600000
+#define mmMMEA7_ADDRNORMGMI_GLOBAL_CNTL_DEFAULT 0x00600000
+#define mmMMEA7_IO_RD_CLI2GRP_MAP0_DEFAULT 0xe4e4e4e4
+#define mmMMEA7_IO_RD_CLI2GRP_MAP1_DEFAULT 0xe4e4e4e4
+#define mmMMEA7_IO_WR_CLI2GRP_MAP0_DEFAULT 0xe4e4e4e4
+#define mmMMEA7_IO_WR_CLI2GRP_MAP1_DEFAULT 0xe4e4e4e4
+#define mmMMEA7_IO_RD_COMBINE_FLUSH_DEFAULT 0x00007777
+#define mmMMEA7_IO_WR_COMBINE_FLUSH_DEFAULT 0x00007777
+#define mmMMEA7_IO_GROUP_BURST_DEFAULT 0x1f031f03
+#define mmMMEA7_IO_RD_PRI_AGE_DEFAULT 0x00db6249
+#define mmMMEA7_IO_WR_PRI_AGE_DEFAULT 0x00db6249
+#define mmMMEA7_IO_RD_PRI_QUEUING_DEFAULT 0x00000db6
+#define mmMMEA7_IO_WR_PRI_QUEUING_DEFAULT 0x00000db6
+#define mmMMEA7_IO_RD_PRI_FIXED_DEFAULT 0x00000924
+#define mmMMEA7_IO_WR_PRI_FIXED_DEFAULT 0x00000924
+#define mmMMEA7_IO_RD_PRI_URGENCY_DEFAULT 0x00000492
+#define mmMMEA7_IO_WR_PRI_URGENCY_DEFAULT 0x00000492
+#define mmMMEA7_IO_RD_PRI_URGENCY_MASKING_DEFAULT 0xffffffff
+#define mmMMEA7_IO_WR_PRI_URGENCY_MASKING_DEFAULT 0xffffffff
+#define mmMMEA7_IO_RD_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f
+#define mmMMEA7_IO_RD_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f
+#define mmMMEA7_IO_RD_PRI_QUANT_PRI3_DEFAULT 0xffffffff
+#define mmMMEA7_IO_WR_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f
+#define mmMMEA7_IO_WR_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f
+#define mmMMEA7_IO_WR_PRI_QUANT_PRI3_DEFAULT 0xffffffff
+#define mmMMEA7_SDP_ARB_DRAM_DEFAULT 0x00101e40
+#define mmMMEA7_SDP_ARB_GMI_DEFAULT 0x00101e40
+#define mmMMEA7_SDP_ARB_FINAL_DEFAULT 0x00007fff
+#define mmMMEA7_SDP_DRAM_PRIORITY_DEFAULT 0x00000000
+#define mmMMEA7_SDP_GMI_PRIORITY_DEFAULT 0x00000000
+#define mmMMEA7_SDP_IO_PRIORITY_DEFAULT 0x00000000
+#define mmMMEA7_SDP_CREDITS_DEFAULT 0x000101bf
+#define mmMMEA7_SDP_TAG_RESERVE0_DEFAULT 0x00000000
+#define mmMMEA7_SDP_TAG_RESERVE1_DEFAULT 0x00000000
+#define mmMMEA7_SDP_VCC_RESERVE0_DEFAULT 0x00000000
+#define mmMMEA7_SDP_VCC_RESERVE1_DEFAULT 0x00000000
+#define mmMMEA7_SDP_VCD_RESERVE0_DEFAULT 0x00000000
+#define mmMMEA7_SDP_VCD_RESERVE1_DEFAULT 0x00000000
+#define mmMMEA7_SDP_REQ_CNTL_DEFAULT 0x0000001f
+#define mmMMEA7_MISC_DEFAULT 0x0c00a070
+#define mmMMEA7_LATENCY_SAMPLING_DEFAULT 0x00000000
+#define mmMMEA7_PERFCOUNTER_LO_DEFAULT 0x00000000
+#define mmMMEA7_PERFCOUNTER_HI_DEFAULT 0x00000000
+#define mmMMEA7_PERFCOUNTER0_CFG_DEFAULT 0x00000000
+#define mmMMEA7_PERFCOUNTER1_CFG_DEFAULT 0x00000000
+#define mmMMEA7_PERFCOUNTER_RSLT_CNTL_DEFAULT 0x04000000
+#define mmMMEA7_EDC_CNT_DEFAULT 0x00000000
+#define mmMMEA7_EDC_CNT2_DEFAULT 0x00000000
+#define mmMMEA7_DSM_CNTL_DEFAULT 0x00000000
+#define mmMMEA7_DSM_CNTLA_DEFAULT 0x00000000
+#define mmMMEA7_DSM_CNTLB_DEFAULT 0x00000000
+#define mmMMEA7_DSM_CNTL2_DEFAULT 0x00000000
+#define mmMMEA7_DSM_CNTL2A_DEFAULT 0x00000000
+#define mmMMEA7_DSM_CNTL2B_DEFAULT 0x00000000
+#define mmMMEA7_CGTT_CLK_CTRL_DEFAULT 0x00000100
+#define mmMMEA7_EDC_MODE_DEFAULT 0x00000000
+#define mmMMEA7_ERR_STATUS_DEFAULT 0x00000300
+#define mmMMEA7_MISC2_DEFAULT 0x00000000
+#define mmMMEA7_ADDRDEC_SELECT_DEFAULT 0x00000000
+#define mmMMEA7_EDC_CNT3_DEFAULT 0x00000000
+
+
+// addressBlock: mmhub_pctldec1
+#define mmPCTL1_CTRL_DEFAULT 0x00011040
+#define mmPCTL1_MMHUB_DEEPSLEEP_IB_DEFAULT 0x00000000
+#define mmPCTL1_MMHUB_DEEPSLEEP_OVERRIDE_DEFAULT 0x00000000
+#define mmPCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB_DEFAULT 0x00000000
+#define mmPCTL1_PG_IGNORE_DEEPSLEEP_DEFAULT 0x00000000
+#define mmPCTL1_PG_IGNORE_DEEPSLEEP_IB_DEFAULT 0x00000000
+#define mmPCTL1_SLICE0_CFG_DAGB_BUSY_DEFAULT 0x00000000
+#define mmPCTL1_SLICE0_CFG_DS_ALLOW_DEFAULT 0x00000000
+#define mmPCTL1_SLICE0_CFG_DS_ALLOW_IB_DEFAULT 0x00000000
+#define mmPCTL1_SLICE1_CFG_DAGB_BUSY_DEFAULT 0x00000000
+#define mmPCTL1_SLICE1_CFG_DS_ALLOW_DEFAULT 0x00000000
+#define mmPCTL1_SLICE1_CFG_DS_ALLOW_IB_DEFAULT 0x00000000
+#define mmPCTL1_SLICE2_CFG_DAGB_BUSY_DEFAULT 0x00000000
+#define mmPCTL1_SLICE2_CFG_DS_ALLOW_DEFAULT 0x00000000
+#define mmPCTL1_SLICE2_CFG_DS_ALLOW_IB_DEFAULT 0x00000000
+#define mmPCTL1_SLICE3_CFG_DAGB_BUSY_DEFAULT 0x00000000
+#define mmPCTL1_SLICE3_CFG_DS_ALLOW_DEFAULT 0x00000000
+#define mmPCTL1_SLICE3_CFG_DS_ALLOW_IB_DEFAULT 0x00000000
+#define mmPCTL1_SLICE4_CFG_DAGB_BUSY_DEFAULT 0x00000000
+#define mmPCTL1_SLICE4_CFG_DS_ALLOW_DEFAULT 0x00000000
+#define mmPCTL1_SLICE4_CFG_DS_ALLOW_IB_DEFAULT 0x00000000
+#define mmPCTL1_UTCL2_MISC_DEFAULT 0x00011000
+#define mmPCTL1_SLICE0_MISC_DEFAULT 0x00000800
+#define mmPCTL1_SLICE1_MISC_DEFAULT 0x00000800
+#define mmPCTL1_SLICE2_MISC_DEFAULT 0x00000800
+#define mmPCTL1_SLICE3_MISC_DEFAULT 0x00000800
+#define mmPCTL1_SLICE4_MISC_DEFAULT 0x00000800
+#define mmPCTL1_UTCL2_RENG_EXECUTE_DEFAULT 0x00000000
+#define mmPCTL1_SLICE0_RENG_EXECUTE_DEFAULT 0x00000000
+#define mmPCTL1_SLICE1_RENG_EXECUTE_DEFAULT 0x00000000
+#define mmPCTL1_SLICE2_RENG_EXECUTE_DEFAULT 0x00000000
+#define mmPCTL1_SLICE3_RENG_EXECUTE_DEFAULT 0x00000000
+#define mmPCTL1_SLICE4_RENG_EXECUTE_DEFAULT 0x00000000
+#define mmPCTL1_UTCL2_RENG_RAM_INDEX_DEFAULT 0x00000000
+#define mmPCTL1_UTCL2_RENG_RAM_DATA_DEFAULT 0x00000000
+#define mmPCTL1_SLICE0_RENG_RAM_INDEX_DEFAULT 0x00000000
+#define mmPCTL1_SLICE0_RENG_RAM_DATA_DEFAULT 0x00000000
+#define mmPCTL1_SLICE1_RENG_RAM_INDEX_DEFAULT 0x00000000
+#define mmPCTL1_SLICE1_RENG_RAM_DATA_DEFAULT 0x00000000
+#define mmPCTL1_SLICE2_RENG_RAM_INDEX_DEFAULT 0x00000000
+#define mmPCTL1_SLICE2_RENG_RAM_DATA_DEFAULT 0x00000000
+#define mmPCTL1_SLICE3_RENG_RAM_INDEX_DEFAULT 0x00000000
+#define mmPCTL1_SLICE3_RENG_RAM_DATA_DEFAULT 0x00000000
+#define mmPCTL1_SLICE4_RENG_RAM_INDEX_DEFAULT 0x00000000
+#define mmPCTL1_SLICE4_RENG_RAM_DATA_DEFAULT 0x00000000
+#define mmPCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE0_DEFAULT 0x00000000
+#define mmPCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE1_DEFAULT 0x00000000
+#define mmPCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE2_DEFAULT 0x00000000
+#define mmPCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE3_DEFAULT 0x00000000
+#define mmPCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE4_DEFAULT 0x00000000
+#define mmPCTL1_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET0_DEFAULT 0xffffffff
+#define mmPCTL1_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET1_DEFAULT 0xffffffff
+#define mmPCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE0_DEFAULT 0x00000000
+#define mmPCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE1_DEFAULT 0x00000000
+#define mmPCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE2_DEFAULT 0x00000000
+#define mmPCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE3_DEFAULT 0x00000000
+#define mmPCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE4_DEFAULT 0x00000000
+#define mmPCTL1_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET0_DEFAULT 0xffffffff
+#define mmPCTL1_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET1_DEFAULT 0xffffffff
+#define mmPCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE0_DEFAULT 0x00000000
+#define mmPCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE1_DEFAULT 0x00000000
+#define mmPCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE2_DEFAULT 0x00000000
+#define mmPCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE3_DEFAULT 0x00000000
+#define mmPCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE4_DEFAULT 0x00000000
+#define mmPCTL1_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET0_DEFAULT 0xffffffff
+#define mmPCTL1_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET1_DEFAULT 0xffffffff
+#define mmPCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE0_DEFAULT 0x00000000
+#define mmPCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE1_DEFAULT 0x00000000
+#define mmPCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE2_DEFAULT 0x00000000
+#define mmPCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE3_DEFAULT 0x00000000
+#define mmPCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE4_DEFAULT 0x00000000
+#define mmPCTL1_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET0_DEFAULT 0xffffffff
+#define mmPCTL1_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET1_DEFAULT 0xffffffff
+#define mmPCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE0_DEFAULT 0x00000000
+#define mmPCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE1_DEFAULT 0x00000000
+#define mmPCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE2_DEFAULT 0x00000000
+#define mmPCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE3_DEFAULT 0x00000000
+#define mmPCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE4_DEFAULT 0x00000000
+#define mmPCTL1_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET0_DEFAULT 0xffffffff
+#define mmPCTL1_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET1_DEFAULT 0xffffffff
+#define mmPCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE0_DEFAULT 0x00000000
+#define mmPCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE1_DEFAULT 0x00000000
+#define mmPCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE2_DEFAULT 0x00000000
+#define mmPCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE3_DEFAULT 0x00000000
+#define mmPCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE4_DEFAULT 0x00000000
+#define mmPCTL1_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET0_DEFAULT 0xffffffff
+#define mmPCTL1_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET1_DEFAULT 0xffffffff
+
+
+// addressBlock: mmhub_l1tlb_vml1dec:1
+#define mmVML1_1_MC_VM_MX_L1_TLB0_STATUS_DEFAULT 0x00000000
+#define mmVML1_1_MC_VM_MX_L1_TLB1_STATUS_DEFAULT 0x00000000
+#define mmVML1_1_MC_VM_MX_L1_TLB2_STATUS_DEFAULT 0x00000000
+#define mmVML1_1_MC_VM_MX_L1_TLB3_STATUS_DEFAULT 0x00000000
+#define mmVML1_1_MC_VM_MX_L1_TLB4_STATUS_DEFAULT 0x00000000
+#define mmVML1_1_MC_VM_MX_L1_TLB5_STATUS_DEFAULT 0x00000000
+#define mmVML1_1_MC_VM_MX_L1_TLB6_STATUS_DEFAULT 0x00000000
+#define mmVML1_1_MC_VM_MX_L1_TLB7_STATUS_DEFAULT 0x00000000
+#define mmVML1_1_MC_VM_MX_L1_TMZ_CNTL_DEFAULT 0x00000000
+
+
+// addressBlock: mmhub_l1tlb_vml1pldec:1
+#define mmVML1PL1_MC_VM_MX_L1_PERFCOUNTER0_CFG_DEFAULT 0x00000000
+#define mmVML1PL1_MC_VM_MX_L1_PERFCOUNTER1_CFG_DEFAULT 0x00000000
+#define mmVML1PL1_MC_VM_MX_L1_PERFCOUNTER2_CFG_DEFAULT 0x00000000
+#define mmVML1PL1_MC_VM_MX_L1_PERFCOUNTER3_CFG_DEFAULT 0x00000000
+#define mmVML1PL1_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL_DEFAULT 0x04000000
+
+
+// addressBlock: mmhub_l1tlb_vml1prdec:1
+#define mmVML1PR1_MC_VM_MX_L1_PERFCOUNTER_LO_DEFAULT 0x00000000
+#define mmVML1PR1_MC_VM_MX_L1_PERFCOUNTER_HI_DEFAULT 0x00000000
+
+
+// addressBlock: mmhub_utcl2_atcl2dec:1
+#define mmATCL2_1_ATC_L2_CNTL_DEFAULT 0x0001c0c9
+#define mmATCL2_1_ATC_L2_CNTL2_DEFAULT 0x00600100
+#define mmATCL2_1_ATC_L2_CACHE_DATA0_DEFAULT 0x00000000
+#define mmATCL2_1_ATC_L2_CACHE_DATA1_DEFAULT 0x00000000
+#define mmATCL2_1_ATC_L2_CACHE_DATA2_DEFAULT 0x00000000
+#define mmATCL2_1_ATC_L2_CNTL3_DEFAULT 0x000001f8
+#define mmATCL2_1_ATC_L2_STATUS_DEFAULT 0x00000000
+#define mmATCL2_1_ATC_L2_STATUS2_DEFAULT 0x00000000
+#define mmATCL2_1_ATC_L2_STATUS3_DEFAULT 0x00000000
+#define mmATCL2_1_ATC_L2_MISC_CG_DEFAULT 0x00000200
+#define mmATCL2_1_ATC_L2_MEM_POWER_LS_DEFAULT 0x00000208
+#define mmATCL2_1_ATC_L2_CGTT_CLK_CTRL_DEFAULT 0x00000080
+#define mmATCL2_1_ATC_L2_CACHE_4K_DSM_INDEX_DEFAULT 0x00000000
+#define mmATCL2_1_ATC_L2_CACHE_2M_DSM_INDEX_DEFAULT 0x00000000
+#define mmATCL2_1_ATC_L2_CACHE_4K_DSM_CNTL_DEFAULT 0x00000000
+#define mmATCL2_1_ATC_L2_CACHE_2M_DSM_CNTL_DEFAULT 0x00000000
+#define mmATCL2_1_ATC_L2_CNTL4_DEFAULT 0x00000000
+#define mmATCL2_1_ATC_L2_MM_GROUP_RT_CLASSES_DEFAULT 0x00000005
+
+
+// addressBlock: mmhub_utcl2_vml2pfdec:1
+#define mmVML2PF1_VM_L2_CNTL_DEFAULT 0x00080602
+#define mmVML2PF1_VM_L2_CNTL2_DEFAULT 0x00000000
+#define mmVML2PF1_VM_L2_CNTL3_DEFAULT 0x80100007
+#define mmVML2PF1_VM_L2_STATUS_DEFAULT 0x00000000
+#define mmVML2PF1_VM_DUMMY_PAGE_FAULT_CNTL_DEFAULT 0x00000090
+#define mmVML2PF1_VM_DUMMY_PAGE_FAULT_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2PF1_VM_DUMMY_PAGE_FAULT_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2PF1_VM_L2_PROTECTION_FAULT_CNTL_DEFAULT 0x3ffffffc
+#define mmVML2PF1_VM_L2_PROTECTION_FAULT_CNTL2_DEFAULT 0x000a0000
+#define mmVML2PF1_VM_L2_PROTECTION_FAULT_MM_CNTL3_DEFAULT 0xffffffff
+#define mmVML2PF1_VM_L2_PROTECTION_FAULT_MM_CNTL4_DEFAULT 0xffffffff
+#define mmVML2PF1_VM_L2_PROTECTION_FAULT_STATUS_DEFAULT 0x00000000
+#define mmVML2PF1_VM_L2_PROTECTION_FAULT_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2PF1_VM_L2_PROTECTION_FAULT_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2PF1_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2PF1_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2PF1_VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2PF1_VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2PF1_VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2PF1_VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2PF1_VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32_DEFAULT 0x00000000
+#define mmVML2PF1_VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32_DEFAULT 0x00000000
+#define mmVML2PF1_VM_L2_CNTL4_DEFAULT 0x000000c1
+#define mmVML2PF1_VM_L2_MM_GROUP_RT_CLASSES_DEFAULT 0x00000000
+#define mmVML2PF1_VM_L2_BANK_SELECT_RESERVED_CID_DEFAULT 0x00000000
+#define mmVML2PF1_VM_L2_BANK_SELECT_RESERVED_CID2_DEFAULT 0x00000000
+#define mmVML2PF1_VM_L2_CACHE_PARITY_CNTL_DEFAULT 0x00000000
+#define mmVML2PF1_VM_L2_CGTT_CLK_CTRL_DEFAULT 0x00000080
+
+
+// addressBlock: mmhub_utcl2_vml2vcdec:1
+#define mmVML2VC1_VM_CONTEXT0_CNTL_DEFAULT 0x007ffe80
+#define mmVML2VC1_VM_CONTEXT1_CNTL_DEFAULT 0x007ffe80
+#define mmVML2VC1_VM_CONTEXT2_CNTL_DEFAULT 0x007ffe80
+#define mmVML2VC1_VM_CONTEXT3_CNTL_DEFAULT 0x007ffe80
+#define mmVML2VC1_VM_CONTEXT4_CNTL_DEFAULT 0x007ffe80
+#define mmVML2VC1_VM_CONTEXT5_CNTL_DEFAULT 0x007ffe80
+#define mmVML2VC1_VM_CONTEXT6_CNTL_DEFAULT 0x007ffe80
+#define mmVML2VC1_VM_CONTEXT7_CNTL_DEFAULT 0x007ffe80
+#define mmVML2VC1_VM_CONTEXT8_CNTL_DEFAULT 0x007ffe80
+#define mmVML2VC1_VM_CONTEXT9_CNTL_DEFAULT 0x007ffe80
+#define mmVML2VC1_VM_CONTEXT10_CNTL_DEFAULT 0x007ffe80
+#define mmVML2VC1_VM_CONTEXT11_CNTL_DEFAULT 0x007ffe80
+#define mmVML2VC1_VM_CONTEXT12_CNTL_DEFAULT 0x007ffe80
+#define mmVML2VC1_VM_CONTEXT13_CNTL_DEFAULT 0x007ffe80
+#define mmVML2VC1_VM_CONTEXT14_CNTL_DEFAULT 0x007ffe80
+#define mmVML2VC1_VM_CONTEXT15_CNTL_DEFAULT 0x007ffe80
+#define mmVML2VC1_VM_CONTEXTS_DISABLE_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG0_SEM_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG1_SEM_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG2_SEM_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG3_SEM_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG4_SEM_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG5_SEM_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG6_SEM_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG7_SEM_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG8_SEM_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG9_SEM_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG10_SEM_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG11_SEM_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG12_SEM_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG13_SEM_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG14_SEM_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG15_SEM_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG16_SEM_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG17_SEM_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG0_REQ_DEFAULT 0x017c0000
+#define mmVML2VC1_VM_INVALIDATE_ENG1_REQ_DEFAULT 0x017c0000
+#define mmVML2VC1_VM_INVALIDATE_ENG2_REQ_DEFAULT 0x017c0000
+#define mmVML2VC1_VM_INVALIDATE_ENG3_REQ_DEFAULT 0x017c0000
+#define mmVML2VC1_VM_INVALIDATE_ENG4_REQ_DEFAULT 0x017c0000
+#define mmVML2VC1_VM_INVALIDATE_ENG5_REQ_DEFAULT 0x017c0000
+#define mmVML2VC1_VM_INVALIDATE_ENG6_REQ_DEFAULT 0x017c0000
+#define mmVML2VC1_VM_INVALIDATE_ENG7_REQ_DEFAULT 0x017c0000
+#define mmVML2VC1_VM_INVALIDATE_ENG8_REQ_DEFAULT 0x017c0000
+#define mmVML2VC1_VM_INVALIDATE_ENG9_REQ_DEFAULT 0x017c0000
+#define mmVML2VC1_VM_INVALIDATE_ENG10_REQ_DEFAULT 0x017c0000
+#define mmVML2VC1_VM_INVALIDATE_ENG11_REQ_DEFAULT 0x017c0000
+#define mmVML2VC1_VM_INVALIDATE_ENG12_REQ_DEFAULT 0x017c0000
+#define mmVML2VC1_VM_INVALIDATE_ENG13_REQ_DEFAULT 0x017c0000
+#define mmVML2VC1_VM_INVALIDATE_ENG14_REQ_DEFAULT 0x017c0000
+#define mmVML2VC1_VM_INVALIDATE_ENG15_REQ_DEFAULT 0x017c0000
+#define mmVML2VC1_VM_INVALIDATE_ENG16_REQ_DEFAULT 0x017c0000
+#define mmVML2VC1_VM_INVALIDATE_ENG17_REQ_DEFAULT 0x017c0000
+#define mmVML2VC1_VM_INVALIDATE_ENG0_ACK_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG1_ACK_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG2_ACK_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG3_ACK_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG4_ACK_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG5_ACK_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG6_ACK_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG7_ACK_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG8_ACK_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG9_ACK_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG10_ACK_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG11_ACK_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG12_ACK_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG13_ACK_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG14_ACK_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG15_ACK_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG16_ACK_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG17_ACK_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG0_ADDR_RANGE_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG0_ADDR_RANGE_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG1_ADDR_RANGE_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG1_ADDR_RANGE_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG2_ADDR_RANGE_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG2_ADDR_RANGE_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG3_ADDR_RANGE_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG3_ADDR_RANGE_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG4_ADDR_RANGE_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG4_ADDR_RANGE_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG5_ADDR_RANGE_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG5_ADDR_RANGE_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG6_ADDR_RANGE_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG6_ADDR_RANGE_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG7_ADDR_RANGE_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG7_ADDR_RANGE_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG8_ADDR_RANGE_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG8_ADDR_RANGE_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG9_ADDR_RANGE_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG9_ADDR_RANGE_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG10_ADDR_RANGE_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG10_ADDR_RANGE_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG11_ADDR_RANGE_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG11_ADDR_RANGE_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG12_ADDR_RANGE_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG12_ADDR_RANGE_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG13_ADDR_RANGE_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG13_ADDR_RANGE_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG14_ADDR_RANGE_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG14_ADDR_RANGE_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG15_ADDR_RANGE_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG15_ADDR_RANGE_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG16_ADDR_RANGE_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG17_ADDR_RANGE_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_INVALIDATE_ENG17_ADDR_RANGE_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
+#define mmVML2VC1_VM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
+
+
+// addressBlock: mmhub_utcl2_vmsharedpfdec:1
+#define mmVMSHAREDPF1_MC_VM_NB_MMIOBASE_DEFAULT 0x00000000
+#define mmVMSHAREDPF1_MC_VM_NB_MMIOLIMIT_DEFAULT 0x00000000
+#define mmVMSHAREDPF1_MC_VM_NB_PCI_CTRL_DEFAULT 0x00000000
+#define mmVMSHAREDPF1_MC_VM_NB_PCI_ARB_DEFAULT 0x00000008
+#define mmVMSHAREDPF1_MC_VM_NB_TOP_OF_DRAM_SLOT1_DEFAULT 0x00000000
+#define mmVMSHAREDPF1_MC_VM_NB_LOWER_TOP_OF_DRAM2_DEFAULT 0x00000000
+#define mmVMSHAREDPF1_MC_VM_NB_UPPER_TOP_OF_DRAM2_DEFAULT 0x00000000
+#define mmVMSHAREDPF1_MC_VM_FB_OFFSET_DEFAULT 0x00000000
+#define mmVMSHAREDPF1_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB_DEFAULT 0x00000000
+#define mmVMSHAREDPF1_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB_DEFAULT 0x00000000
+#define mmVMSHAREDPF1_MC_VM_STEERING_DEFAULT 0x00000001
+#define mmVMSHAREDPF1_MC_SHARED_VIRT_RESET_REQ_DEFAULT 0x00000000
+#define mmVMSHAREDPF1_MC_MEM_POWER_LS_DEFAULT 0x00000208
+#define mmVMSHAREDPF1_MC_VM_CACHEABLE_DRAM_ADDRESS_START_DEFAULT 0x00000000
+#define mmVMSHAREDPF1_MC_VM_CACHEABLE_DRAM_ADDRESS_END_DEFAULT 0x00000000
+#define mmVMSHAREDPF1_MC_VM_APT_CNTL_DEFAULT 0x00000000
+#define mmVMSHAREDPF1_MC_VM_LOCAL_HBM_ADDRESS_START_DEFAULT 0x00000000
+#define mmVMSHAREDPF1_MC_VM_LOCAL_HBM_ADDRESS_END_DEFAULT 0x000fffff
+#define mmVMSHAREDPF1_MC_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL_DEFAULT 0x00000000
+#define mmVMSHAREDPF1_MC_VM_XGMI_LFB_CNTL_DEFAULT 0x00000000
+#define mmVMSHAREDPF1_MC_VM_XGMI_LFB_SIZE_DEFAULT 0x00000000
+#define mmVMSHAREDPF1_MC_VM_CACHEABLE_DRAM_CNTL_DEFAULT 0x00000000
+
+
+// addressBlock: mmhub_utcl2_vmsharedvcdec:1
+#define mmVMSHAREDVC1_MC_VM_FB_LOCATION_BASE_DEFAULT 0x00000000
+#define mmVMSHAREDVC1_MC_VM_FB_LOCATION_TOP_DEFAULT 0x00000000
+#define mmVMSHAREDVC1_MC_VM_AGP_TOP_DEFAULT 0x00000000
+#define mmVMSHAREDVC1_MC_VM_AGP_BOT_DEFAULT 0x00000000
+#define mmVMSHAREDVC1_MC_VM_AGP_BASE_DEFAULT 0x00000000
+#define mmVMSHAREDVC1_MC_VM_SYSTEM_APERTURE_LOW_ADDR_DEFAULT 0x00000000
+#define mmVMSHAREDVC1_MC_VM_SYSTEM_APERTURE_HIGH_ADDR_DEFAULT 0x00000000
+#define mmVMSHAREDVC1_MC_VM_MX_L1_TLB_CNTL_DEFAULT 0x00002501
+
+
+// addressBlock: mmhub_utcl2_vmsharedhvdec:1
+#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF0_DEFAULT 0x00000000
+#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF1_DEFAULT 0x00000000
+#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF2_DEFAULT 0x00000000
+#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF3_DEFAULT 0x00000000
+#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF4_DEFAULT 0x00000000
+#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF5_DEFAULT 0x00000000
+#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF6_DEFAULT 0x00000000
+#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF7_DEFAULT 0x00000000
+#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF8_DEFAULT 0x00000000
+#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF9_DEFAULT 0x00000000
+#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF10_DEFAULT 0x00000000
+#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF11_DEFAULT 0x00000000
+#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF12_DEFAULT 0x00000000
+#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF13_DEFAULT 0x00000000
+#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF14_DEFAULT 0x00000000
+#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF15_DEFAULT 0x00000000
+#define mmVMSHAREDHV1_VM_IOMMU_MMIO_CNTRL_1_DEFAULT 0x00000100
+#define mmVMSHAREDHV1_MC_VM_MARC_BASE_LO_0_DEFAULT 0x00000000
+#define mmVMSHAREDHV1_MC_VM_MARC_BASE_LO_1_DEFAULT 0x00000000
+#define mmVMSHAREDHV1_MC_VM_MARC_BASE_LO_2_DEFAULT 0x00000000
+#define mmVMSHAREDHV1_MC_VM_MARC_BASE_LO_3_DEFAULT 0x00000000
+#define mmVMSHAREDHV1_MC_VM_MARC_BASE_HI_0_DEFAULT 0x00000000
+#define mmVMSHAREDHV1_MC_VM_MARC_BASE_HI_1_DEFAULT 0x00000000
+#define mmVMSHAREDHV1_MC_VM_MARC_BASE_HI_2_DEFAULT 0x00000000
+#define mmVMSHAREDHV1_MC_VM_MARC_BASE_HI_3_DEFAULT 0x00000000
+#define mmVMSHAREDHV1_MC_VM_MARC_RELOC_LO_0_DEFAULT 0x00000000
+#define mmVMSHAREDHV1_MC_VM_MARC_RELOC_LO_1_DEFAULT 0x00000000
+#define mmVMSHAREDHV1_MC_VM_MARC_RELOC_LO_2_DEFAULT 0x00000000
+#define mmVMSHAREDHV1_MC_VM_MARC_RELOC_LO_3_DEFAULT 0x00000000
+#define mmVMSHAREDHV1_MC_VM_MARC_RELOC_HI_0_DEFAULT 0x00000000
+#define mmVMSHAREDHV1_MC_VM_MARC_RELOC_HI_1_DEFAULT 0x00000000
+#define mmVMSHAREDHV1_MC_VM_MARC_RELOC_HI_2_DEFAULT 0x00000000
+#define mmVMSHAREDHV1_MC_VM_MARC_RELOC_HI_3_DEFAULT 0x00000000
+#define mmVMSHAREDHV1_MC_VM_MARC_LEN_LO_0_DEFAULT 0x00000000
+#define mmVMSHAREDHV1_MC_VM_MARC_LEN_LO_1_DEFAULT 0x00000000
+#define mmVMSHAREDHV1_MC_VM_MARC_LEN_LO_2_DEFAULT 0x00000000
+#define mmVMSHAREDHV1_MC_VM_MARC_LEN_LO_3_DEFAULT 0x00000000
+#define mmVMSHAREDHV1_MC_VM_MARC_LEN_HI_0_DEFAULT 0x00000000
+#define mmVMSHAREDHV1_MC_VM_MARC_LEN_HI_1_DEFAULT 0x00000000
+#define mmVMSHAREDHV1_MC_VM_MARC_LEN_HI_2_DEFAULT 0x00000000
+#define mmVMSHAREDHV1_MC_VM_MARC_LEN_HI_3_DEFAULT 0x00000000
+#define mmVMSHAREDHV1_VM_IOMMU_CONTROL_REGISTER_DEFAULT 0x00000000
+#define mmVMSHAREDHV1_VM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER_DEFAULT 0x00000000
+#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_DEFAULT 0x00000000
+#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_0_DEFAULT 0x00000000
+#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_1_DEFAULT 0x00000000
+#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_2_DEFAULT 0x00000000
+#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_3_DEFAULT 0x00000000
+#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_4_DEFAULT 0x00000000
+#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_5_DEFAULT 0x00000000
+#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_6_DEFAULT 0x00000000
+#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_7_DEFAULT 0x00000000
+#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_8_DEFAULT 0x00000000
+#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_9_DEFAULT 0x00000000
+#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_10_DEFAULT 0x00000000
+#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_11_DEFAULT 0x00000000
+#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_12_DEFAULT 0x00000000
+#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_13_DEFAULT 0x00000000
+#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_14_DEFAULT 0x00000000
+#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_15_DEFAULT 0x00000000
+#define mmVMSHAREDHV1_UTCL2_CGTT_CLK_CTRL_DEFAULT 0x00000080
+#define mmVMSHAREDHV1_MC_SHARED_ACTIVE_FCN_ID_DEFAULT 0x00000000
+#define mmVMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE_DEFAULT 0x00000000
+
+
+// addressBlock: mmhub_utcl2_atcl2pfcntrdec:1
+#define mmATCL2PFCNTR1_ATC_L2_PERFCOUNTER_LO_DEFAULT 0x00000000
+#define mmATCL2PFCNTR1_ATC_L2_PERFCOUNTER_HI_DEFAULT 0x00000000
+
+
+// addressBlock: mmhub_utcl2_atcl2pfcntldec:1
+#define mmATCL2PFCNTL1_ATC_L2_PERFCOUNTER0_CFG_DEFAULT 0x00000000
+#define mmATCL2PFCNTL1_ATC_L2_PERFCOUNTER1_CFG_DEFAULT 0x00000000
+#define mmATCL2PFCNTL1_ATC_L2_PERFCOUNTER_RSLT_CNTL_DEFAULT 0x04000000
+
+
+// addressBlock: mmhub_utcl2_vml2pldec:1
+#define mmVML2PL1_MC_VM_L2_PERFCOUNTER0_CFG_DEFAULT 0x00000000
+#define mmVML2PL1_MC_VM_L2_PERFCOUNTER1_CFG_DEFAULT 0x00000000
+#define mmVML2PL1_MC_VM_L2_PERFCOUNTER2_CFG_DEFAULT 0x00000000
+#define mmVML2PL1_MC_VM_L2_PERFCOUNTER3_CFG_DEFAULT 0x00000000
+#define mmVML2PL1_MC_VM_L2_PERFCOUNTER4_CFG_DEFAULT 0x00000000
+#define mmVML2PL1_MC_VM_L2_PERFCOUNTER5_CFG_DEFAULT 0x00000000
+#define mmVML2PL1_MC_VM_L2_PERFCOUNTER6_CFG_DEFAULT 0x00000000
+#define mmVML2PL1_MC_VM_L2_PERFCOUNTER7_CFG_DEFAULT 0x00000000
+#define mmVML2PL1_MC_VM_L2_PERFCOUNTER_RSLT_CNTL_DEFAULT 0x04000000
+
+
+// addressBlock: mmhub_utcl2_vml2prdec:1
+#define mmVML2PR1_MC_VM_L2_PERFCOUNTER_LO_DEFAULT 0x00000000
+#define mmVML2PR1_MC_VM_L2_PERFCOUNTER_HI_DEFAULT 0x00000000
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_offset.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_offset.h
new file mode 100644
index 000000000000..d8632ccf3494
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_offset.h
@@ -0,0 +1,7753 @@
+/*
+ * Copyright (C) 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _mmhub_9_4_1_OFFSET_HEADER
+#define _mmhub_9_4_1_OFFSET_HEADER
+
+
+
+// addressBlock: mmhub_dagb_dagbdec0
+// base address: 0x68000
+#define mmDAGB0_RDCLI0 0x0000
+#define mmDAGB0_RDCLI0_BASE_IDX 1
+#define mmDAGB0_RDCLI1 0x0001
+#define mmDAGB0_RDCLI1_BASE_IDX 1
+#define mmDAGB0_RDCLI2 0x0002
+#define mmDAGB0_RDCLI2_BASE_IDX 1
+#define mmDAGB0_RDCLI3 0x0003
+#define mmDAGB0_RDCLI3_BASE_IDX 1
+#define mmDAGB0_RDCLI4 0x0004
+#define mmDAGB0_RDCLI4_BASE_IDX 1
+#define mmDAGB0_RDCLI5 0x0005
+#define mmDAGB0_RDCLI5_BASE_IDX 1
+#define mmDAGB0_RDCLI6 0x0006
+#define mmDAGB0_RDCLI6_BASE_IDX 1
+#define mmDAGB0_RDCLI7 0x0007
+#define mmDAGB0_RDCLI7_BASE_IDX 1
+#define mmDAGB0_RDCLI8 0x0008
+#define mmDAGB0_RDCLI8_BASE_IDX 1
+#define mmDAGB0_RDCLI9 0x0009
+#define mmDAGB0_RDCLI9_BASE_IDX 1
+#define mmDAGB0_RDCLI10 0x000a
+#define mmDAGB0_RDCLI10_BASE_IDX 1
+#define mmDAGB0_RDCLI11 0x000b
+#define mmDAGB0_RDCLI11_BASE_IDX 1
+#define mmDAGB0_RDCLI12 0x000c
+#define mmDAGB0_RDCLI12_BASE_IDX 1
+#define mmDAGB0_RDCLI13 0x000d
+#define mmDAGB0_RDCLI13_BASE_IDX 1
+#define mmDAGB0_RDCLI14 0x000e
+#define mmDAGB0_RDCLI14_BASE_IDX 1
+#define mmDAGB0_RDCLI15 0x000f
+#define mmDAGB0_RDCLI15_BASE_IDX 1
+#define mmDAGB0_RD_CNTL 0x0010
+#define mmDAGB0_RD_CNTL_BASE_IDX 1
+#define mmDAGB0_RD_GMI_CNTL 0x0011
+#define mmDAGB0_RD_GMI_CNTL_BASE_IDX 1
+#define mmDAGB0_RD_ADDR_DAGB 0x0012
+#define mmDAGB0_RD_ADDR_DAGB_BASE_IDX 1
+#define mmDAGB0_RD_OUTPUT_DAGB_MAX_BURST 0x0013
+#define mmDAGB0_RD_OUTPUT_DAGB_MAX_BURST_BASE_IDX 1
+#define mmDAGB0_RD_OUTPUT_DAGB_LAZY_TIMER 0x0014
+#define mmDAGB0_RD_OUTPUT_DAGB_LAZY_TIMER_BASE_IDX 1
+#define mmDAGB0_RD_CGTT_CLK_CTRL 0x0015
+#define mmDAGB0_RD_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmDAGB0_L1TLB_RD_CGTT_CLK_CTRL 0x0016
+#define mmDAGB0_L1TLB_RD_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmDAGB0_ATCVM_RD_CGTT_CLK_CTRL 0x0017
+#define mmDAGB0_ATCVM_RD_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmDAGB0_RD_ADDR_DAGB_MAX_BURST0 0x0018
+#define mmDAGB0_RD_ADDR_DAGB_MAX_BURST0_BASE_IDX 1
+#define mmDAGB0_RD_ADDR_DAGB_LAZY_TIMER0 0x0019
+#define mmDAGB0_RD_ADDR_DAGB_LAZY_TIMER0_BASE_IDX 1
+#define mmDAGB0_RD_ADDR_DAGB_MAX_BURST1 0x001a
+#define mmDAGB0_RD_ADDR_DAGB_MAX_BURST1_BASE_IDX 1
+#define mmDAGB0_RD_ADDR_DAGB_LAZY_TIMER1 0x001b
+#define mmDAGB0_RD_ADDR_DAGB_LAZY_TIMER1_BASE_IDX 1
+#define mmDAGB0_RD_VC0_CNTL 0x001c
+#define mmDAGB0_RD_VC0_CNTL_BASE_IDX 1
+#define mmDAGB0_RD_VC1_CNTL 0x001d
+#define mmDAGB0_RD_VC1_CNTL_BASE_IDX 1
+#define mmDAGB0_RD_VC2_CNTL 0x001e
+#define mmDAGB0_RD_VC2_CNTL_BASE_IDX 1
+#define mmDAGB0_RD_VC3_CNTL 0x001f
+#define mmDAGB0_RD_VC3_CNTL_BASE_IDX 1
+#define mmDAGB0_RD_VC4_CNTL 0x0020
+#define mmDAGB0_RD_VC4_CNTL_BASE_IDX 1
+#define mmDAGB0_RD_VC5_CNTL 0x0021
+#define mmDAGB0_RD_VC5_CNTL_BASE_IDX 1
+#define mmDAGB0_RD_VC6_CNTL 0x0022
+#define mmDAGB0_RD_VC6_CNTL_BASE_IDX 1
+#define mmDAGB0_RD_VC7_CNTL 0x0023
+#define mmDAGB0_RD_VC7_CNTL_BASE_IDX 1
+#define mmDAGB0_RD_CNTL_MISC 0x0024
+#define mmDAGB0_RD_CNTL_MISC_BASE_IDX 1
+#define mmDAGB0_RD_TLB_CREDIT 0x0025
+#define mmDAGB0_RD_TLB_CREDIT_BASE_IDX 1
+#define mmDAGB0_RDCLI_ASK_PENDING 0x0026
+#define mmDAGB0_RDCLI_ASK_PENDING_BASE_IDX 1
+#define mmDAGB0_RDCLI_GO_PENDING 0x0027
+#define mmDAGB0_RDCLI_GO_PENDING_BASE_IDX 1
+#define mmDAGB0_RDCLI_GBLSEND_PENDING 0x0028
+#define mmDAGB0_RDCLI_GBLSEND_PENDING_BASE_IDX 1
+#define mmDAGB0_RDCLI_TLB_PENDING 0x0029
+#define mmDAGB0_RDCLI_TLB_PENDING_BASE_IDX 1
+#define mmDAGB0_RDCLI_OARB_PENDING 0x002a
+#define mmDAGB0_RDCLI_OARB_PENDING_BASE_IDX 1
+#define mmDAGB0_RDCLI_OSD_PENDING 0x002b
+#define mmDAGB0_RDCLI_OSD_PENDING_BASE_IDX 1
+#define mmDAGB0_WRCLI0 0x002c
+#define mmDAGB0_WRCLI0_BASE_IDX 1
+#define mmDAGB0_WRCLI1 0x002d
+#define mmDAGB0_WRCLI1_BASE_IDX 1
+#define mmDAGB0_WRCLI2 0x002e
+#define mmDAGB0_WRCLI2_BASE_IDX 1
+#define mmDAGB0_WRCLI3 0x002f
+#define mmDAGB0_WRCLI3_BASE_IDX 1
+#define mmDAGB0_WRCLI4 0x0030
+#define mmDAGB0_WRCLI4_BASE_IDX 1
+#define mmDAGB0_WRCLI5 0x0031
+#define mmDAGB0_WRCLI5_BASE_IDX 1
+#define mmDAGB0_WRCLI6 0x0032
+#define mmDAGB0_WRCLI6_BASE_IDX 1
+#define mmDAGB0_WRCLI7 0x0033
+#define mmDAGB0_WRCLI7_BASE_IDX 1
+#define mmDAGB0_WRCLI8 0x0034
+#define mmDAGB0_WRCLI8_BASE_IDX 1
+#define mmDAGB0_WRCLI9 0x0035
+#define mmDAGB0_WRCLI9_BASE_IDX 1
+#define mmDAGB0_WRCLI10 0x0036
+#define mmDAGB0_WRCLI10_BASE_IDX 1
+#define mmDAGB0_WRCLI11 0x0037
+#define mmDAGB0_WRCLI11_BASE_IDX 1
+#define mmDAGB0_WRCLI12 0x0038
+#define mmDAGB0_WRCLI12_BASE_IDX 1
+#define mmDAGB0_WRCLI13 0x0039
+#define mmDAGB0_WRCLI13_BASE_IDX 1
+#define mmDAGB0_WRCLI14 0x003a
+#define mmDAGB0_WRCLI14_BASE_IDX 1
+#define mmDAGB0_WRCLI15 0x003b
+#define mmDAGB0_WRCLI15_BASE_IDX 1
+#define mmDAGB0_WR_CNTL 0x003c
+#define mmDAGB0_WR_CNTL_BASE_IDX 1
+#define mmDAGB0_WR_GMI_CNTL 0x003d
+#define mmDAGB0_WR_GMI_CNTL_BASE_IDX 1
+#define mmDAGB0_WR_ADDR_DAGB 0x003e
+#define mmDAGB0_WR_ADDR_DAGB_BASE_IDX 1
+#define mmDAGB0_WR_OUTPUT_DAGB_MAX_BURST 0x003f
+#define mmDAGB0_WR_OUTPUT_DAGB_MAX_BURST_BASE_IDX 1
+#define mmDAGB0_WR_OUTPUT_DAGB_LAZY_TIMER 0x0040
+#define mmDAGB0_WR_OUTPUT_DAGB_LAZY_TIMER_BASE_IDX 1
+#define mmDAGB0_WR_CGTT_CLK_CTRL 0x0041
+#define mmDAGB0_WR_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmDAGB0_L1TLB_WR_CGTT_CLK_CTRL 0x0042
+#define mmDAGB0_L1TLB_WR_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmDAGB0_ATCVM_WR_CGTT_CLK_CTRL 0x0043
+#define mmDAGB0_ATCVM_WR_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmDAGB0_WR_ADDR_DAGB_MAX_BURST0 0x0044
+#define mmDAGB0_WR_ADDR_DAGB_MAX_BURST0_BASE_IDX 1
+#define mmDAGB0_WR_ADDR_DAGB_LAZY_TIMER0 0x0045
+#define mmDAGB0_WR_ADDR_DAGB_LAZY_TIMER0_BASE_IDX 1
+#define mmDAGB0_WR_ADDR_DAGB_MAX_BURST1 0x0046
+#define mmDAGB0_WR_ADDR_DAGB_MAX_BURST1_BASE_IDX 1
+#define mmDAGB0_WR_ADDR_DAGB_LAZY_TIMER1 0x0047
+#define mmDAGB0_WR_ADDR_DAGB_LAZY_TIMER1_BASE_IDX 1
+#define mmDAGB0_WR_DATA_DAGB 0x0048
+#define mmDAGB0_WR_DATA_DAGB_BASE_IDX 1
+#define mmDAGB0_WR_DATA_DAGB_MAX_BURST0 0x0049
+#define mmDAGB0_WR_DATA_DAGB_MAX_BURST0_BASE_IDX 1
+#define mmDAGB0_WR_DATA_DAGB_LAZY_TIMER0 0x004a
+#define mmDAGB0_WR_DATA_DAGB_LAZY_TIMER0_BASE_IDX 1
+#define mmDAGB0_WR_DATA_DAGB_MAX_BURST1 0x004b
+#define mmDAGB0_WR_DATA_DAGB_MAX_BURST1_BASE_IDX 1
+#define mmDAGB0_WR_DATA_DAGB_LAZY_TIMER1 0x004c
+#define mmDAGB0_WR_DATA_DAGB_LAZY_TIMER1_BASE_IDX 1
+#define mmDAGB0_WR_VC0_CNTL 0x004d
+#define mmDAGB0_WR_VC0_CNTL_BASE_IDX 1
+#define mmDAGB0_WR_VC1_CNTL 0x004e
+#define mmDAGB0_WR_VC1_CNTL_BASE_IDX 1
+#define mmDAGB0_WR_VC2_CNTL 0x004f
+#define mmDAGB0_WR_VC2_CNTL_BASE_IDX 1
+#define mmDAGB0_WR_VC3_CNTL 0x0050
+#define mmDAGB0_WR_VC3_CNTL_BASE_IDX 1
+#define mmDAGB0_WR_VC4_CNTL 0x0051
+#define mmDAGB0_WR_VC4_CNTL_BASE_IDX 1
+#define mmDAGB0_WR_VC5_CNTL 0x0052
+#define mmDAGB0_WR_VC5_CNTL_BASE_IDX 1
+#define mmDAGB0_WR_VC6_CNTL 0x0053
+#define mmDAGB0_WR_VC6_CNTL_BASE_IDX 1
+#define mmDAGB0_WR_VC7_CNTL 0x0054
+#define mmDAGB0_WR_VC7_CNTL_BASE_IDX 1
+#define mmDAGB0_WR_CNTL_MISC 0x0055
+#define mmDAGB0_WR_CNTL_MISC_BASE_IDX 1
+#define mmDAGB0_WR_TLB_CREDIT 0x0056
+#define mmDAGB0_WR_TLB_CREDIT_BASE_IDX 1
+#define mmDAGB0_WR_DATA_CREDIT 0x0057
+#define mmDAGB0_WR_DATA_CREDIT_BASE_IDX 1
+#define mmDAGB0_WR_MISC_CREDIT 0x0058
+#define mmDAGB0_WR_MISC_CREDIT_BASE_IDX 1
+#define mmDAGB0_WRCLI_ASK_PENDING 0x005d
+#define mmDAGB0_WRCLI_ASK_PENDING_BASE_IDX 1
+#define mmDAGB0_WRCLI_GO_PENDING 0x005e
+#define mmDAGB0_WRCLI_GO_PENDING_BASE_IDX 1
+#define mmDAGB0_WRCLI_GBLSEND_PENDING 0x005f
+#define mmDAGB0_WRCLI_GBLSEND_PENDING_BASE_IDX 1
+#define mmDAGB0_WRCLI_TLB_PENDING 0x0060
+#define mmDAGB0_WRCLI_TLB_PENDING_BASE_IDX 1
+#define mmDAGB0_WRCLI_OARB_PENDING 0x0061
+#define mmDAGB0_WRCLI_OARB_PENDING_BASE_IDX 1
+#define mmDAGB0_WRCLI_OSD_PENDING 0x0062
+#define mmDAGB0_WRCLI_OSD_PENDING_BASE_IDX 1
+#define mmDAGB0_WRCLI_DBUS_ASK_PENDING 0x0063
+#define mmDAGB0_WRCLI_DBUS_ASK_PENDING_BASE_IDX 1
+#define mmDAGB0_WRCLI_DBUS_GO_PENDING 0x0064
+#define mmDAGB0_WRCLI_DBUS_GO_PENDING_BASE_IDX 1
+#define mmDAGB0_DAGB_DLY 0x0065
+#define mmDAGB0_DAGB_DLY_BASE_IDX 1
+#define mmDAGB0_CNTL_MISC 0x0066
+#define mmDAGB0_CNTL_MISC_BASE_IDX 1
+#define mmDAGB0_CNTL_MISC2 0x0067
+#define mmDAGB0_CNTL_MISC2_BASE_IDX 1
+#define mmDAGB0_FIFO_EMPTY 0x0068
+#define mmDAGB0_FIFO_EMPTY_BASE_IDX 1
+#define mmDAGB0_FIFO_FULL 0x0069
+#define mmDAGB0_FIFO_FULL_BASE_IDX 1
+#define mmDAGB0_WR_CREDITS_FULL 0x006a
+#define mmDAGB0_WR_CREDITS_FULL_BASE_IDX 1
+#define mmDAGB0_RD_CREDITS_FULL 0x006b
+#define mmDAGB0_RD_CREDITS_FULL_BASE_IDX 1
+#define mmDAGB0_PERFCOUNTER_LO 0x006c
+#define mmDAGB0_PERFCOUNTER_LO_BASE_IDX 1
+#define mmDAGB0_PERFCOUNTER_HI 0x006d
+#define mmDAGB0_PERFCOUNTER_HI_BASE_IDX 1
+#define mmDAGB0_PERFCOUNTER0_CFG 0x006e
+#define mmDAGB0_PERFCOUNTER0_CFG_BASE_IDX 1
+#define mmDAGB0_PERFCOUNTER1_CFG 0x006f
+#define mmDAGB0_PERFCOUNTER1_CFG_BASE_IDX 1
+#define mmDAGB0_PERFCOUNTER2_CFG 0x0070
+#define mmDAGB0_PERFCOUNTER2_CFG_BASE_IDX 1
+#define mmDAGB0_PERFCOUNTER_RSLT_CNTL 0x0071
+#define mmDAGB0_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1
+#define mmDAGB0_RESERVE0 0x0072
+#define mmDAGB0_RESERVE0_BASE_IDX 1
+#define mmDAGB0_RESERVE1 0x0073
+#define mmDAGB0_RESERVE1_BASE_IDX 1
+#define mmDAGB0_RESERVE2 0x0074
+#define mmDAGB0_RESERVE2_BASE_IDX 1
+#define mmDAGB0_RESERVE3 0x0075
+#define mmDAGB0_RESERVE3_BASE_IDX 1
+#define mmDAGB0_RESERVE4 0x0076
+#define mmDAGB0_RESERVE4_BASE_IDX 1
+#define mmDAGB0_RESERVE5 0x0077
+#define mmDAGB0_RESERVE5_BASE_IDX 1
+#define mmDAGB0_RESERVE6 0x0078
+#define mmDAGB0_RESERVE6_BASE_IDX 1
+#define mmDAGB0_RESERVE7 0x0079
+#define mmDAGB0_RESERVE7_BASE_IDX 1
+#define mmDAGB0_RESERVE8 0x007a
+#define mmDAGB0_RESERVE8_BASE_IDX 1
+#define mmDAGB0_RESERVE9 0x007b
+#define mmDAGB0_RESERVE9_BASE_IDX 1
+#define mmDAGB0_RESERVE10 0x007c
+#define mmDAGB0_RESERVE10_BASE_IDX 1
+#define mmDAGB0_RESERVE11 0x007d
+#define mmDAGB0_RESERVE11_BASE_IDX 1
+#define mmDAGB0_RESERVE12 0x007e
+#define mmDAGB0_RESERVE12_BASE_IDX 1
+#define mmDAGB0_RESERVE13 0x007f
+#define mmDAGB0_RESERVE13_BASE_IDX 1
+
+
+// addressBlock: mmhub_dagb_dagbdec1
+// base address: 0x68200
+#define mmDAGB1_RDCLI0 0x0080
+#define mmDAGB1_RDCLI0_BASE_IDX 1
+#define mmDAGB1_RDCLI1 0x0081
+#define mmDAGB1_RDCLI1_BASE_IDX 1
+#define mmDAGB1_RDCLI2 0x0082
+#define mmDAGB1_RDCLI2_BASE_IDX 1
+#define mmDAGB1_RDCLI3 0x0083
+#define mmDAGB1_RDCLI3_BASE_IDX 1
+#define mmDAGB1_RDCLI4 0x0084
+#define mmDAGB1_RDCLI4_BASE_IDX 1
+#define mmDAGB1_RDCLI5 0x0085
+#define mmDAGB1_RDCLI5_BASE_IDX 1
+#define mmDAGB1_RDCLI6 0x0086
+#define mmDAGB1_RDCLI6_BASE_IDX 1
+#define mmDAGB1_RDCLI7 0x0087
+#define mmDAGB1_RDCLI7_BASE_IDX 1
+#define mmDAGB1_RDCLI8 0x0088
+#define mmDAGB1_RDCLI8_BASE_IDX 1
+#define mmDAGB1_RDCLI9 0x0089
+#define mmDAGB1_RDCLI9_BASE_IDX 1
+#define mmDAGB1_RDCLI10 0x008a
+#define mmDAGB1_RDCLI10_BASE_IDX 1
+#define mmDAGB1_RDCLI11 0x008b
+#define mmDAGB1_RDCLI11_BASE_IDX 1
+#define mmDAGB1_RDCLI12 0x008c
+#define mmDAGB1_RDCLI12_BASE_IDX 1
+#define mmDAGB1_RDCLI13 0x008d
+#define mmDAGB1_RDCLI13_BASE_IDX 1
+#define mmDAGB1_RDCLI14 0x008e
+#define mmDAGB1_RDCLI14_BASE_IDX 1
+#define mmDAGB1_RDCLI15 0x008f
+#define mmDAGB1_RDCLI15_BASE_IDX 1
+#define mmDAGB1_RD_CNTL 0x0090
+#define mmDAGB1_RD_CNTL_BASE_IDX 1
+#define mmDAGB1_RD_GMI_CNTL 0x0091
+#define mmDAGB1_RD_GMI_CNTL_BASE_IDX 1
+#define mmDAGB1_RD_ADDR_DAGB 0x0092
+#define mmDAGB1_RD_ADDR_DAGB_BASE_IDX 1
+#define mmDAGB1_RD_OUTPUT_DAGB_MAX_BURST 0x0093
+#define mmDAGB1_RD_OUTPUT_DAGB_MAX_BURST_BASE_IDX 1
+#define mmDAGB1_RD_OUTPUT_DAGB_LAZY_TIMER 0x0094
+#define mmDAGB1_RD_OUTPUT_DAGB_LAZY_TIMER_BASE_IDX 1
+#define mmDAGB1_RD_CGTT_CLK_CTRL 0x0095
+#define mmDAGB1_RD_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmDAGB1_L1TLB_RD_CGTT_CLK_CTRL 0x0096
+#define mmDAGB1_L1TLB_RD_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmDAGB1_ATCVM_RD_CGTT_CLK_CTRL 0x0097
+#define mmDAGB1_ATCVM_RD_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmDAGB1_RD_ADDR_DAGB_MAX_BURST0 0x0098
+#define mmDAGB1_RD_ADDR_DAGB_MAX_BURST0_BASE_IDX 1
+#define mmDAGB1_RD_ADDR_DAGB_LAZY_TIMER0 0x0099
+#define mmDAGB1_RD_ADDR_DAGB_LAZY_TIMER0_BASE_IDX 1
+#define mmDAGB1_RD_ADDR_DAGB_MAX_BURST1 0x009a
+#define mmDAGB1_RD_ADDR_DAGB_MAX_BURST1_BASE_IDX 1
+#define mmDAGB1_RD_ADDR_DAGB_LAZY_TIMER1 0x009b
+#define mmDAGB1_RD_ADDR_DAGB_LAZY_TIMER1_BASE_IDX 1
+#define mmDAGB1_RD_VC0_CNTL 0x009c
+#define mmDAGB1_RD_VC0_CNTL_BASE_IDX 1
+#define mmDAGB1_RD_VC1_CNTL 0x009d
+#define mmDAGB1_RD_VC1_CNTL_BASE_IDX 1
+#define mmDAGB1_RD_VC2_CNTL 0x009e
+#define mmDAGB1_RD_VC2_CNTL_BASE_IDX 1
+#define mmDAGB1_RD_VC3_CNTL 0x009f
+#define mmDAGB1_RD_VC3_CNTL_BASE_IDX 1
+#define mmDAGB1_RD_VC4_CNTL 0x00a0
+#define mmDAGB1_RD_VC4_CNTL_BASE_IDX 1
+#define mmDAGB1_RD_VC5_CNTL 0x00a1
+#define mmDAGB1_RD_VC5_CNTL_BASE_IDX 1
+#define mmDAGB1_RD_VC6_CNTL 0x00a2
+#define mmDAGB1_RD_VC6_CNTL_BASE_IDX 1
+#define mmDAGB1_RD_VC7_CNTL 0x00a3
+#define mmDAGB1_RD_VC7_CNTL_BASE_IDX 1
+#define mmDAGB1_RD_CNTL_MISC 0x00a4
+#define mmDAGB1_RD_CNTL_MISC_BASE_IDX 1
+#define mmDAGB1_RD_TLB_CREDIT 0x00a5
+#define mmDAGB1_RD_TLB_CREDIT_BASE_IDX 1
+#define mmDAGB1_RDCLI_ASK_PENDING 0x00a6
+#define mmDAGB1_RDCLI_ASK_PENDING_BASE_IDX 1
+#define mmDAGB1_RDCLI_GO_PENDING 0x00a7
+#define mmDAGB1_RDCLI_GO_PENDING_BASE_IDX 1
+#define mmDAGB1_RDCLI_GBLSEND_PENDING 0x00a8
+#define mmDAGB1_RDCLI_GBLSEND_PENDING_BASE_IDX 1
+#define mmDAGB1_RDCLI_TLB_PENDING 0x00a9
+#define mmDAGB1_RDCLI_TLB_PENDING_BASE_IDX 1
+#define mmDAGB1_RDCLI_OARB_PENDING 0x00aa
+#define mmDAGB1_RDCLI_OARB_PENDING_BASE_IDX 1
+#define mmDAGB1_RDCLI_OSD_PENDING 0x00ab
+#define mmDAGB1_RDCLI_OSD_PENDING_BASE_IDX 1
+#define mmDAGB1_WRCLI0 0x00ac
+#define mmDAGB1_WRCLI0_BASE_IDX 1
+#define mmDAGB1_WRCLI1 0x00ad
+#define mmDAGB1_WRCLI1_BASE_IDX 1
+#define mmDAGB1_WRCLI2 0x00ae
+#define mmDAGB1_WRCLI2_BASE_IDX 1
+#define mmDAGB1_WRCLI3 0x00af
+#define mmDAGB1_WRCLI3_BASE_IDX 1
+#define mmDAGB1_WRCLI4 0x00b0
+#define mmDAGB1_WRCLI4_BASE_IDX 1
+#define mmDAGB1_WRCLI5 0x00b1
+#define mmDAGB1_WRCLI5_BASE_IDX 1
+#define mmDAGB1_WRCLI6 0x00b2
+#define mmDAGB1_WRCLI6_BASE_IDX 1
+#define mmDAGB1_WRCLI7 0x00b3
+#define mmDAGB1_WRCLI7_BASE_IDX 1
+#define mmDAGB1_WRCLI8 0x00b4
+#define mmDAGB1_WRCLI8_BASE_IDX 1
+#define mmDAGB1_WRCLI9 0x00b5
+#define mmDAGB1_WRCLI9_BASE_IDX 1
+#define mmDAGB1_WRCLI10 0x00b6
+#define mmDAGB1_WRCLI10_BASE_IDX 1
+#define mmDAGB1_WRCLI11 0x00b7
+#define mmDAGB1_WRCLI11_BASE_IDX 1
+#define mmDAGB1_WRCLI12 0x00b8
+#define mmDAGB1_WRCLI12_BASE_IDX 1
+#define mmDAGB1_WRCLI13 0x00b9
+#define mmDAGB1_WRCLI13_BASE_IDX 1
+#define mmDAGB1_WRCLI14 0x00ba
+#define mmDAGB1_WRCLI14_BASE_IDX 1
+#define mmDAGB1_WRCLI15 0x00bb
+#define mmDAGB1_WRCLI15_BASE_IDX 1
+#define mmDAGB1_WR_CNTL 0x00bc
+#define mmDAGB1_WR_CNTL_BASE_IDX 1
+#define mmDAGB1_WR_GMI_CNTL 0x00bd
+#define mmDAGB1_WR_GMI_CNTL_BASE_IDX 1
+#define mmDAGB1_WR_ADDR_DAGB 0x00be
+#define mmDAGB1_WR_ADDR_DAGB_BASE_IDX 1
+#define mmDAGB1_WR_OUTPUT_DAGB_MAX_BURST 0x00bf
+#define mmDAGB1_WR_OUTPUT_DAGB_MAX_BURST_BASE_IDX 1
+#define mmDAGB1_WR_OUTPUT_DAGB_LAZY_TIMER 0x00c0
+#define mmDAGB1_WR_OUTPUT_DAGB_LAZY_TIMER_BASE_IDX 1
+#define mmDAGB1_WR_CGTT_CLK_CTRL 0x00c1
+#define mmDAGB1_WR_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmDAGB1_L1TLB_WR_CGTT_CLK_CTRL 0x00c2
+#define mmDAGB1_L1TLB_WR_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmDAGB1_ATCVM_WR_CGTT_CLK_CTRL 0x00c3
+#define mmDAGB1_ATCVM_WR_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmDAGB1_WR_ADDR_DAGB_MAX_BURST0 0x00c4
+#define mmDAGB1_WR_ADDR_DAGB_MAX_BURST0_BASE_IDX 1
+#define mmDAGB1_WR_ADDR_DAGB_LAZY_TIMER0 0x00c5
+#define mmDAGB1_WR_ADDR_DAGB_LAZY_TIMER0_BASE_IDX 1
+#define mmDAGB1_WR_ADDR_DAGB_MAX_BURST1 0x00c6
+#define mmDAGB1_WR_ADDR_DAGB_MAX_BURST1_BASE_IDX 1
+#define mmDAGB1_WR_ADDR_DAGB_LAZY_TIMER1 0x00c7
+#define mmDAGB1_WR_ADDR_DAGB_LAZY_TIMER1_BASE_IDX 1
+#define mmDAGB1_WR_DATA_DAGB 0x00c8
+#define mmDAGB1_WR_DATA_DAGB_BASE_IDX 1
+#define mmDAGB1_WR_DATA_DAGB_MAX_BURST0 0x00c9
+#define mmDAGB1_WR_DATA_DAGB_MAX_BURST0_BASE_IDX 1
+#define mmDAGB1_WR_DATA_DAGB_LAZY_TIMER0 0x00ca
+#define mmDAGB1_WR_DATA_DAGB_LAZY_TIMER0_BASE_IDX 1
+#define mmDAGB1_WR_DATA_DAGB_MAX_BURST1 0x00cb
+#define mmDAGB1_WR_DATA_DAGB_MAX_BURST1_BASE_IDX 1
+#define mmDAGB1_WR_DATA_DAGB_LAZY_TIMER1 0x00cc
+#define mmDAGB1_WR_DATA_DAGB_LAZY_TIMER1_BASE_IDX 1
+#define mmDAGB1_WR_VC0_CNTL 0x00cd
+#define mmDAGB1_WR_VC0_CNTL_BASE_IDX 1
+#define mmDAGB1_WR_VC1_CNTL 0x00ce
+#define mmDAGB1_WR_VC1_CNTL_BASE_IDX 1
+#define mmDAGB1_WR_VC2_CNTL 0x00cf
+#define mmDAGB1_WR_VC2_CNTL_BASE_IDX 1
+#define mmDAGB1_WR_VC3_CNTL 0x00d0
+#define mmDAGB1_WR_VC3_CNTL_BASE_IDX 1
+#define mmDAGB1_WR_VC4_CNTL 0x00d1
+#define mmDAGB1_WR_VC4_CNTL_BASE_IDX 1
+#define mmDAGB1_WR_VC5_CNTL 0x00d2
+#define mmDAGB1_WR_VC5_CNTL_BASE_IDX 1
+#define mmDAGB1_WR_VC6_CNTL 0x00d3
+#define mmDAGB1_WR_VC6_CNTL_BASE_IDX 1
+#define mmDAGB1_WR_VC7_CNTL 0x00d4
+#define mmDAGB1_WR_VC7_CNTL_BASE_IDX 1
+#define mmDAGB1_WR_CNTL_MISC 0x00d5
+#define mmDAGB1_WR_CNTL_MISC_BASE_IDX 1
+#define mmDAGB1_WR_TLB_CREDIT 0x00d6
+#define mmDAGB1_WR_TLB_CREDIT_BASE_IDX 1
+#define mmDAGB1_WR_DATA_CREDIT 0x00d7
+#define mmDAGB1_WR_DATA_CREDIT_BASE_IDX 1
+#define mmDAGB1_WR_MISC_CREDIT 0x00d8
+#define mmDAGB1_WR_MISC_CREDIT_BASE_IDX 1
+#define mmDAGB1_WRCLI_ASK_PENDING 0x00dd
+#define mmDAGB1_WRCLI_ASK_PENDING_BASE_IDX 1
+#define mmDAGB1_WRCLI_GO_PENDING 0x00de
+#define mmDAGB1_WRCLI_GO_PENDING_BASE_IDX 1
+#define mmDAGB1_WRCLI_GBLSEND_PENDING 0x00df
+#define mmDAGB1_WRCLI_GBLSEND_PENDING_BASE_IDX 1
+#define mmDAGB1_WRCLI_TLB_PENDING 0x00e0
+#define mmDAGB1_WRCLI_TLB_PENDING_BASE_IDX 1
+#define mmDAGB1_WRCLI_OARB_PENDING 0x00e1
+#define mmDAGB1_WRCLI_OARB_PENDING_BASE_IDX 1
+#define mmDAGB1_WRCLI_OSD_PENDING 0x00e2
+#define mmDAGB1_WRCLI_OSD_PENDING_BASE_IDX 1
+#define mmDAGB1_WRCLI_DBUS_ASK_PENDING 0x00e3
+#define mmDAGB1_WRCLI_DBUS_ASK_PENDING_BASE_IDX 1
+#define mmDAGB1_WRCLI_DBUS_GO_PENDING 0x00e4
+#define mmDAGB1_WRCLI_DBUS_GO_PENDING_BASE_IDX 1
+#define mmDAGB1_DAGB_DLY 0x00e5
+#define mmDAGB1_DAGB_DLY_BASE_IDX 1
+#define mmDAGB1_CNTL_MISC 0x00e6
+#define mmDAGB1_CNTL_MISC_BASE_IDX 1
+#define mmDAGB1_CNTL_MISC2 0x00e7
+#define mmDAGB1_CNTL_MISC2_BASE_IDX 1
+#define mmDAGB1_FIFO_EMPTY 0x00e8
+#define mmDAGB1_FIFO_EMPTY_BASE_IDX 1
+#define mmDAGB1_FIFO_FULL 0x00e9
+#define mmDAGB1_FIFO_FULL_BASE_IDX 1
+#define mmDAGB1_WR_CREDITS_FULL 0x00ea
+#define mmDAGB1_WR_CREDITS_FULL_BASE_IDX 1
+#define mmDAGB1_RD_CREDITS_FULL 0x00eb
+#define mmDAGB1_RD_CREDITS_FULL_BASE_IDX 1
+#define mmDAGB1_PERFCOUNTER_LO 0x00ec
+#define mmDAGB1_PERFCOUNTER_LO_BASE_IDX 1
+#define mmDAGB1_PERFCOUNTER_HI 0x00ed
+#define mmDAGB1_PERFCOUNTER_HI_BASE_IDX 1
+#define mmDAGB1_PERFCOUNTER0_CFG 0x00ee
+#define mmDAGB1_PERFCOUNTER0_CFG_BASE_IDX 1
+#define mmDAGB1_PERFCOUNTER1_CFG 0x00ef
+#define mmDAGB1_PERFCOUNTER1_CFG_BASE_IDX 1
+#define mmDAGB1_PERFCOUNTER2_CFG 0x00f0
+#define mmDAGB1_PERFCOUNTER2_CFG_BASE_IDX 1
+#define mmDAGB1_PERFCOUNTER_RSLT_CNTL 0x00f1
+#define mmDAGB1_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1
+#define mmDAGB1_RESERVE0 0x00f2
+#define mmDAGB1_RESERVE0_BASE_IDX 1
+#define mmDAGB1_RESERVE1 0x00f3
+#define mmDAGB1_RESERVE1_BASE_IDX 1
+#define mmDAGB1_RESERVE2 0x00f4
+#define mmDAGB1_RESERVE2_BASE_IDX 1
+#define mmDAGB1_RESERVE3 0x00f5
+#define mmDAGB1_RESERVE3_BASE_IDX 1
+#define mmDAGB1_RESERVE4 0x00f6
+#define mmDAGB1_RESERVE4_BASE_IDX 1
+#define mmDAGB1_RESERVE5 0x00f7
+#define mmDAGB1_RESERVE5_BASE_IDX 1
+#define mmDAGB1_RESERVE6 0x00f8
+#define mmDAGB1_RESERVE6_BASE_IDX 1
+#define mmDAGB1_RESERVE7 0x00f9
+#define mmDAGB1_RESERVE7_BASE_IDX 1
+#define mmDAGB1_RESERVE8 0x00fa
+#define mmDAGB1_RESERVE8_BASE_IDX 1
+#define mmDAGB1_RESERVE9 0x00fb
+#define mmDAGB1_RESERVE9_BASE_IDX 1
+#define mmDAGB1_RESERVE10 0x00fc
+#define mmDAGB1_RESERVE10_BASE_IDX 1
+#define mmDAGB1_RESERVE11 0x00fd
+#define mmDAGB1_RESERVE11_BASE_IDX 1
+#define mmDAGB1_RESERVE12 0x00fe
+#define mmDAGB1_RESERVE12_BASE_IDX 1
+#define mmDAGB1_RESERVE13 0x00ff
+#define mmDAGB1_RESERVE13_BASE_IDX 1
+
+
+// addressBlock: mmhub_dagb_dagbdec2
+// base address: 0x68400
+#define mmDAGB2_RDCLI0 0x0100
+#define mmDAGB2_RDCLI0_BASE_IDX 1
+#define mmDAGB2_RDCLI1 0x0101
+#define mmDAGB2_RDCLI1_BASE_IDX 1
+#define mmDAGB2_RDCLI2 0x0102
+#define mmDAGB2_RDCLI2_BASE_IDX 1
+#define mmDAGB2_RDCLI3 0x0103
+#define mmDAGB2_RDCLI3_BASE_IDX 1
+#define mmDAGB2_RDCLI4 0x0104
+#define mmDAGB2_RDCLI4_BASE_IDX 1
+#define mmDAGB2_RDCLI5 0x0105
+#define mmDAGB2_RDCLI5_BASE_IDX 1
+#define mmDAGB2_RDCLI6 0x0106
+#define mmDAGB2_RDCLI6_BASE_IDX 1
+#define mmDAGB2_RDCLI7 0x0107
+#define mmDAGB2_RDCLI7_BASE_IDX 1
+#define mmDAGB2_RDCLI8 0x0108
+#define mmDAGB2_RDCLI8_BASE_IDX 1
+#define mmDAGB2_RDCLI9 0x0109
+#define mmDAGB2_RDCLI9_BASE_IDX 1
+#define mmDAGB2_RDCLI10 0x010a
+#define mmDAGB2_RDCLI10_BASE_IDX 1
+#define mmDAGB2_RDCLI11 0x010b
+#define mmDAGB2_RDCLI11_BASE_IDX 1
+#define mmDAGB2_RDCLI12 0x010c
+#define mmDAGB2_RDCLI12_BASE_IDX 1
+#define mmDAGB2_RDCLI13 0x010d
+#define mmDAGB2_RDCLI13_BASE_IDX 1
+#define mmDAGB2_RDCLI14 0x010e
+#define mmDAGB2_RDCLI14_BASE_IDX 1
+#define mmDAGB2_RDCLI15 0x010f
+#define mmDAGB2_RDCLI15_BASE_IDX 1
+#define mmDAGB2_RD_CNTL 0x0110
+#define mmDAGB2_RD_CNTL_BASE_IDX 1
+#define mmDAGB2_RD_GMI_CNTL 0x0111
+#define mmDAGB2_RD_GMI_CNTL_BASE_IDX 1
+#define mmDAGB2_RD_ADDR_DAGB 0x0112
+#define mmDAGB2_RD_ADDR_DAGB_BASE_IDX 1
+#define mmDAGB2_RD_OUTPUT_DAGB_MAX_BURST 0x0113
+#define mmDAGB2_RD_OUTPUT_DAGB_MAX_BURST_BASE_IDX 1
+#define mmDAGB2_RD_OUTPUT_DAGB_LAZY_TIMER 0x0114
+#define mmDAGB2_RD_OUTPUT_DAGB_LAZY_TIMER_BASE_IDX 1
+#define mmDAGB2_RD_CGTT_CLK_CTRL 0x0115
+#define mmDAGB2_RD_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmDAGB2_L1TLB_RD_CGTT_CLK_CTRL 0x0116
+#define mmDAGB2_L1TLB_RD_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmDAGB2_ATCVM_RD_CGTT_CLK_CTRL 0x0117
+#define mmDAGB2_ATCVM_RD_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmDAGB2_RD_ADDR_DAGB_MAX_BURST0 0x0118
+#define mmDAGB2_RD_ADDR_DAGB_MAX_BURST0_BASE_IDX 1
+#define mmDAGB2_RD_ADDR_DAGB_LAZY_TIMER0 0x0119
+#define mmDAGB2_RD_ADDR_DAGB_LAZY_TIMER0_BASE_IDX 1
+#define mmDAGB2_RD_ADDR_DAGB_MAX_BURST1 0x011a
+#define mmDAGB2_RD_ADDR_DAGB_MAX_BURST1_BASE_IDX 1
+#define mmDAGB2_RD_ADDR_DAGB_LAZY_TIMER1 0x011b
+#define mmDAGB2_RD_ADDR_DAGB_LAZY_TIMER1_BASE_IDX 1
+#define mmDAGB2_RD_VC0_CNTL 0x011c
+#define mmDAGB2_RD_VC0_CNTL_BASE_IDX 1
+#define mmDAGB2_RD_VC1_CNTL 0x011d
+#define mmDAGB2_RD_VC1_CNTL_BASE_IDX 1
+#define mmDAGB2_RD_VC2_CNTL 0x011e
+#define mmDAGB2_RD_VC2_CNTL_BASE_IDX 1
+#define mmDAGB2_RD_VC3_CNTL 0x011f
+#define mmDAGB2_RD_VC3_CNTL_BASE_IDX 1
+#define mmDAGB2_RD_VC4_CNTL 0x0120
+#define mmDAGB2_RD_VC4_CNTL_BASE_IDX 1
+#define mmDAGB2_RD_VC5_CNTL 0x0121
+#define mmDAGB2_RD_VC5_CNTL_BASE_IDX 1
+#define mmDAGB2_RD_VC6_CNTL 0x0122
+#define mmDAGB2_RD_VC6_CNTL_BASE_IDX 1
+#define mmDAGB2_RD_VC7_CNTL 0x0123
+#define mmDAGB2_RD_VC7_CNTL_BASE_IDX 1
+#define mmDAGB2_RD_CNTL_MISC 0x0124
+#define mmDAGB2_RD_CNTL_MISC_BASE_IDX 1
+#define mmDAGB2_RD_TLB_CREDIT 0x0125
+#define mmDAGB2_RD_TLB_CREDIT_BASE_IDX 1
+#define mmDAGB2_RDCLI_ASK_PENDING 0x0126
+#define mmDAGB2_RDCLI_ASK_PENDING_BASE_IDX 1
+#define mmDAGB2_RDCLI_GO_PENDING 0x0127
+#define mmDAGB2_RDCLI_GO_PENDING_BASE_IDX 1
+#define mmDAGB2_RDCLI_GBLSEND_PENDING 0x0128
+#define mmDAGB2_RDCLI_GBLSEND_PENDING_BASE_IDX 1
+#define mmDAGB2_RDCLI_TLB_PENDING 0x0129
+#define mmDAGB2_RDCLI_TLB_PENDING_BASE_IDX 1
+#define mmDAGB2_RDCLI_OARB_PENDING 0x012a
+#define mmDAGB2_RDCLI_OARB_PENDING_BASE_IDX 1
+#define mmDAGB2_RDCLI_OSD_PENDING 0x012b
+#define mmDAGB2_RDCLI_OSD_PENDING_BASE_IDX 1
+#define mmDAGB2_WRCLI0 0x012c
+#define mmDAGB2_WRCLI0_BASE_IDX 1
+#define mmDAGB2_WRCLI1 0x012d
+#define mmDAGB2_WRCLI1_BASE_IDX 1
+#define mmDAGB2_WRCLI2 0x012e
+#define mmDAGB2_WRCLI2_BASE_IDX 1
+#define mmDAGB2_WRCLI3 0x012f
+#define mmDAGB2_WRCLI3_BASE_IDX 1
+#define mmDAGB2_WRCLI4 0x0130
+#define mmDAGB2_WRCLI4_BASE_IDX 1
+#define mmDAGB2_WRCLI5 0x0131
+#define mmDAGB2_WRCLI5_BASE_IDX 1
+#define mmDAGB2_WRCLI6 0x0132
+#define mmDAGB2_WRCLI6_BASE_IDX 1
+#define mmDAGB2_WRCLI7 0x0133
+#define mmDAGB2_WRCLI7_BASE_IDX 1
+#define mmDAGB2_WRCLI8 0x0134
+#define mmDAGB2_WRCLI8_BASE_IDX 1
+#define mmDAGB2_WRCLI9 0x0135
+#define mmDAGB2_WRCLI9_BASE_IDX 1
+#define mmDAGB2_WRCLI10 0x0136
+#define mmDAGB2_WRCLI10_BASE_IDX 1
+#define mmDAGB2_WRCLI11 0x0137
+#define mmDAGB2_WRCLI11_BASE_IDX 1
+#define mmDAGB2_WRCLI12 0x0138
+#define mmDAGB2_WRCLI12_BASE_IDX 1
+#define mmDAGB2_WRCLI13 0x0139
+#define mmDAGB2_WRCLI13_BASE_IDX 1
+#define mmDAGB2_WRCLI14 0x013a
+#define mmDAGB2_WRCLI14_BASE_IDX 1
+#define mmDAGB2_WRCLI15 0x013b
+#define mmDAGB2_WRCLI15_BASE_IDX 1
+#define mmDAGB2_WR_CNTL 0x013c
+#define mmDAGB2_WR_CNTL_BASE_IDX 1
+#define mmDAGB2_WR_GMI_CNTL 0x013d
+#define mmDAGB2_WR_GMI_CNTL_BASE_IDX 1
+#define mmDAGB2_WR_ADDR_DAGB 0x013e
+#define mmDAGB2_WR_ADDR_DAGB_BASE_IDX 1
+#define mmDAGB2_WR_OUTPUT_DAGB_MAX_BURST 0x013f
+#define mmDAGB2_WR_OUTPUT_DAGB_MAX_BURST_BASE_IDX 1
+#define mmDAGB2_WR_OUTPUT_DAGB_LAZY_TIMER 0x0140
+#define mmDAGB2_WR_OUTPUT_DAGB_LAZY_TIMER_BASE_IDX 1
+#define mmDAGB2_WR_CGTT_CLK_CTRL 0x0141
+#define mmDAGB2_WR_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmDAGB2_L1TLB_WR_CGTT_CLK_CTRL 0x0142
+#define mmDAGB2_L1TLB_WR_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmDAGB2_ATCVM_WR_CGTT_CLK_CTRL 0x0143
+#define mmDAGB2_ATCVM_WR_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmDAGB2_WR_ADDR_DAGB_MAX_BURST0 0x0144
+#define mmDAGB2_WR_ADDR_DAGB_MAX_BURST0_BASE_IDX 1
+#define mmDAGB2_WR_ADDR_DAGB_LAZY_TIMER0 0x0145
+#define mmDAGB2_WR_ADDR_DAGB_LAZY_TIMER0_BASE_IDX 1
+#define mmDAGB2_WR_ADDR_DAGB_MAX_BURST1 0x0146
+#define mmDAGB2_WR_ADDR_DAGB_MAX_BURST1_BASE_IDX 1
+#define mmDAGB2_WR_ADDR_DAGB_LAZY_TIMER1 0x0147
+#define mmDAGB2_WR_ADDR_DAGB_LAZY_TIMER1_BASE_IDX 1
+#define mmDAGB2_WR_DATA_DAGB 0x0148
+#define mmDAGB2_WR_DATA_DAGB_BASE_IDX 1
+#define mmDAGB2_WR_DATA_DAGB_MAX_BURST0 0x0149
+#define mmDAGB2_WR_DATA_DAGB_MAX_BURST0_BASE_IDX 1
+#define mmDAGB2_WR_DATA_DAGB_LAZY_TIMER0 0x014a
+#define mmDAGB2_WR_DATA_DAGB_LAZY_TIMER0_BASE_IDX 1
+#define mmDAGB2_WR_DATA_DAGB_MAX_BURST1 0x014b
+#define mmDAGB2_WR_DATA_DAGB_MAX_BURST1_BASE_IDX 1
+#define mmDAGB2_WR_DATA_DAGB_LAZY_TIMER1 0x014c
+#define mmDAGB2_WR_DATA_DAGB_LAZY_TIMER1_BASE_IDX 1
+#define mmDAGB2_WR_VC0_CNTL 0x014d
+#define mmDAGB2_WR_VC0_CNTL_BASE_IDX 1
+#define mmDAGB2_WR_VC1_CNTL 0x014e
+#define mmDAGB2_WR_VC1_CNTL_BASE_IDX 1
+#define mmDAGB2_WR_VC2_CNTL 0x014f
+#define mmDAGB2_WR_VC2_CNTL_BASE_IDX 1
+#define mmDAGB2_WR_VC3_CNTL 0x0150
+#define mmDAGB2_WR_VC3_CNTL_BASE_IDX 1
+#define mmDAGB2_WR_VC4_CNTL 0x0151
+#define mmDAGB2_WR_VC4_CNTL_BASE_IDX 1
+#define mmDAGB2_WR_VC5_CNTL 0x0152
+#define mmDAGB2_WR_VC5_CNTL_BASE_IDX 1
+#define mmDAGB2_WR_VC6_CNTL 0x0153
+#define mmDAGB2_WR_VC6_CNTL_BASE_IDX 1
+#define mmDAGB2_WR_VC7_CNTL 0x0154
+#define mmDAGB2_WR_VC7_CNTL_BASE_IDX 1
+#define mmDAGB2_WR_CNTL_MISC 0x0155
+#define mmDAGB2_WR_CNTL_MISC_BASE_IDX 1
+#define mmDAGB2_WR_TLB_CREDIT 0x0156
+#define mmDAGB2_WR_TLB_CREDIT_BASE_IDX 1
+#define mmDAGB2_WR_DATA_CREDIT 0x0157
+#define mmDAGB2_WR_DATA_CREDIT_BASE_IDX 1
+#define mmDAGB2_WR_MISC_CREDIT 0x0158
+#define mmDAGB2_WR_MISC_CREDIT_BASE_IDX 1
+#define mmDAGB2_WRCLI_ASK_PENDING 0x015d
+#define mmDAGB2_WRCLI_ASK_PENDING_BASE_IDX 1
+#define mmDAGB2_WRCLI_GO_PENDING 0x015e
+#define mmDAGB2_WRCLI_GO_PENDING_BASE_IDX 1
+#define mmDAGB2_WRCLI_GBLSEND_PENDING 0x015f
+#define mmDAGB2_WRCLI_GBLSEND_PENDING_BASE_IDX 1
+#define mmDAGB2_WRCLI_TLB_PENDING 0x0160
+#define mmDAGB2_WRCLI_TLB_PENDING_BASE_IDX 1
+#define mmDAGB2_WRCLI_OARB_PENDING 0x0161
+#define mmDAGB2_WRCLI_OARB_PENDING_BASE_IDX 1
+#define mmDAGB2_WRCLI_OSD_PENDING 0x0162
+#define mmDAGB2_WRCLI_OSD_PENDING_BASE_IDX 1
+#define mmDAGB2_WRCLI_DBUS_ASK_PENDING 0x0163
+#define mmDAGB2_WRCLI_DBUS_ASK_PENDING_BASE_IDX 1
+#define mmDAGB2_WRCLI_DBUS_GO_PENDING 0x0164
+#define mmDAGB2_WRCLI_DBUS_GO_PENDING_BASE_IDX 1
+#define mmDAGB2_DAGB_DLY 0x0165
+#define mmDAGB2_DAGB_DLY_BASE_IDX 1
+#define mmDAGB2_CNTL_MISC 0x0166
+#define mmDAGB2_CNTL_MISC_BASE_IDX 1
+#define mmDAGB2_CNTL_MISC2 0x0167
+#define mmDAGB2_CNTL_MISC2_BASE_IDX 1
+#define mmDAGB2_FIFO_EMPTY 0x0168
+#define mmDAGB2_FIFO_EMPTY_BASE_IDX 1
+#define mmDAGB2_FIFO_FULL 0x0169
+#define mmDAGB2_FIFO_FULL_BASE_IDX 1
+#define mmDAGB2_WR_CREDITS_FULL 0x016a
+#define mmDAGB2_WR_CREDITS_FULL_BASE_IDX 1
+#define mmDAGB2_RD_CREDITS_FULL 0x016b
+#define mmDAGB2_RD_CREDITS_FULL_BASE_IDX 1
+#define mmDAGB2_PERFCOUNTER_LO 0x016c
+#define mmDAGB2_PERFCOUNTER_LO_BASE_IDX 1
+#define mmDAGB2_PERFCOUNTER_HI 0x016d
+#define mmDAGB2_PERFCOUNTER_HI_BASE_IDX 1
+#define mmDAGB2_PERFCOUNTER0_CFG 0x016e
+#define mmDAGB2_PERFCOUNTER0_CFG_BASE_IDX 1
+#define mmDAGB2_PERFCOUNTER1_CFG 0x016f
+#define mmDAGB2_PERFCOUNTER1_CFG_BASE_IDX 1
+#define mmDAGB2_PERFCOUNTER2_CFG 0x0170
+#define mmDAGB2_PERFCOUNTER2_CFG_BASE_IDX 1
+#define mmDAGB2_PERFCOUNTER_RSLT_CNTL 0x0171
+#define mmDAGB2_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1
+#define mmDAGB2_RESERVE0 0x0172
+#define mmDAGB2_RESERVE0_BASE_IDX 1
+#define mmDAGB2_RESERVE1 0x0173
+#define mmDAGB2_RESERVE1_BASE_IDX 1
+#define mmDAGB2_RESERVE2 0x0174
+#define mmDAGB2_RESERVE2_BASE_IDX 1
+#define mmDAGB2_RESERVE3 0x0175
+#define mmDAGB2_RESERVE3_BASE_IDX 1
+#define mmDAGB2_RESERVE4 0x0176
+#define mmDAGB2_RESERVE4_BASE_IDX 1
+#define mmDAGB2_RESERVE5 0x0177
+#define mmDAGB2_RESERVE5_BASE_IDX 1
+#define mmDAGB2_RESERVE6 0x0178
+#define mmDAGB2_RESERVE6_BASE_IDX 1
+#define mmDAGB2_RESERVE7 0x0179
+#define mmDAGB2_RESERVE7_BASE_IDX 1
+#define mmDAGB2_RESERVE8 0x017a
+#define mmDAGB2_RESERVE8_BASE_IDX 1
+#define mmDAGB2_RESERVE9 0x017b
+#define mmDAGB2_RESERVE9_BASE_IDX 1
+#define mmDAGB2_RESERVE10 0x017c
+#define mmDAGB2_RESERVE10_BASE_IDX 1
+#define mmDAGB2_RESERVE11 0x017d
+#define mmDAGB2_RESERVE11_BASE_IDX 1
+#define mmDAGB2_RESERVE12 0x017e
+#define mmDAGB2_RESERVE12_BASE_IDX 1
+#define mmDAGB2_RESERVE13 0x017f
+#define mmDAGB2_RESERVE13_BASE_IDX 1
+
+
+// addressBlock: mmhub_dagb_dagbdec3
+// base address: 0x68600
+#define mmDAGB3_RDCLI0 0x0180
+#define mmDAGB3_RDCLI0_BASE_IDX 1
+#define mmDAGB3_RDCLI1 0x0181
+#define mmDAGB3_RDCLI1_BASE_IDX 1
+#define mmDAGB3_RDCLI2 0x0182
+#define mmDAGB3_RDCLI2_BASE_IDX 1
+#define mmDAGB3_RDCLI3 0x0183
+#define mmDAGB3_RDCLI3_BASE_IDX 1
+#define mmDAGB3_RDCLI4 0x0184
+#define mmDAGB3_RDCLI4_BASE_IDX 1
+#define mmDAGB3_RDCLI5 0x0185
+#define mmDAGB3_RDCLI5_BASE_IDX 1
+#define mmDAGB3_RDCLI6 0x0186
+#define mmDAGB3_RDCLI6_BASE_IDX 1
+#define mmDAGB3_RDCLI7 0x0187
+#define mmDAGB3_RDCLI7_BASE_IDX 1
+#define mmDAGB3_RDCLI8 0x0188
+#define mmDAGB3_RDCLI8_BASE_IDX 1
+#define mmDAGB3_RDCLI9 0x0189
+#define mmDAGB3_RDCLI9_BASE_IDX 1
+#define mmDAGB3_RDCLI10 0x018a
+#define mmDAGB3_RDCLI10_BASE_IDX 1
+#define mmDAGB3_RDCLI11 0x018b
+#define mmDAGB3_RDCLI11_BASE_IDX 1
+#define mmDAGB3_RDCLI12 0x018c
+#define mmDAGB3_RDCLI12_BASE_IDX 1
+#define mmDAGB3_RDCLI13 0x018d
+#define mmDAGB3_RDCLI13_BASE_IDX 1
+#define mmDAGB3_RDCLI14 0x018e
+#define mmDAGB3_RDCLI14_BASE_IDX 1
+#define mmDAGB3_RDCLI15 0x018f
+#define mmDAGB3_RDCLI15_BASE_IDX 1
+#define mmDAGB3_RD_CNTL 0x0190
+#define mmDAGB3_RD_CNTL_BASE_IDX 1
+#define mmDAGB3_RD_GMI_CNTL 0x0191
+#define mmDAGB3_RD_GMI_CNTL_BASE_IDX 1
+#define mmDAGB3_RD_ADDR_DAGB 0x0192
+#define mmDAGB3_RD_ADDR_DAGB_BASE_IDX 1
+#define mmDAGB3_RD_OUTPUT_DAGB_MAX_BURST 0x0193
+#define mmDAGB3_RD_OUTPUT_DAGB_MAX_BURST_BASE_IDX 1
+#define mmDAGB3_RD_OUTPUT_DAGB_LAZY_TIMER 0x0194
+#define mmDAGB3_RD_OUTPUT_DAGB_LAZY_TIMER_BASE_IDX 1
+#define mmDAGB3_RD_CGTT_CLK_CTRL 0x0195
+#define mmDAGB3_RD_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmDAGB3_L1TLB_RD_CGTT_CLK_CTRL 0x0196
+#define mmDAGB3_L1TLB_RD_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmDAGB3_ATCVM_RD_CGTT_CLK_CTRL 0x0197
+#define mmDAGB3_ATCVM_RD_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmDAGB3_RD_ADDR_DAGB_MAX_BURST0 0x0198
+#define mmDAGB3_RD_ADDR_DAGB_MAX_BURST0_BASE_IDX 1
+#define mmDAGB3_RD_ADDR_DAGB_LAZY_TIMER0 0x0199
+#define mmDAGB3_RD_ADDR_DAGB_LAZY_TIMER0_BASE_IDX 1
+#define mmDAGB3_RD_ADDR_DAGB_MAX_BURST1 0x019a
+#define mmDAGB3_RD_ADDR_DAGB_MAX_BURST1_BASE_IDX 1
+#define mmDAGB3_RD_ADDR_DAGB_LAZY_TIMER1 0x019b
+#define mmDAGB3_RD_ADDR_DAGB_LAZY_TIMER1_BASE_IDX 1
+#define mmDAGB3_RD_VC0_CNTL 0x019c
+#define mmDAGB3_RD_VC0_CNTL_BASE_IDX 1
+#define mmDAGB3_RD_VC1_CNTL 0x019d
+#define mmDAGB3_RD_VC1_CNTL_BASE_IDX 1
+#define mmDAGB3_RD_VC2_CNTL 0x019e
+#define mmDAGB3_RD_VC2_CNTL_BASE_IDX 1
+#define mmDAGB3_RD_VC3_CNTL 0x019f
+#define mmDAGB3_RD_VC3_CNTL_BASE_IDX 1
+#define mmDAGB3_RD_VC4_CNTL 0x01a0
+#define mmDAGB3_RD_VC4_CNTL_BASE_IDX 1
+#define mmDAGB3_RD_VC5_CNTL 0x01a1
+#define mmDAGB3_RD_VC5_CNTL_BASE_IDX 1
+#define mmDAGB3_RD_VC6_CNTL 0x01a2
+#define mmDAGB3_RD_VC6_CNTL_BASE_IDX 1
+#define mmDAGB3_RD_VC7_CNTL 0x01a3
+#define mmDAGB3_RD_VC7_CNTL_BASE_IDX 1
+#define mmDAGB3_RD_CNTL_MISC 0x01a4
+#define mmDAGB3_RD_CNTL_MISC_BASE_IDX 1
+#define mmDAGB3_RD_TLB_CREDIT 0x01a5
+#define mmDAGB3_RD_TLB_CREDIT_BASE_IDX 1
+#define mmDAGB3_RDCLI_ASK_PENDING 0x01a6
+#define mmDAGB3_RDCLI_ASK_PENDING_BASE_IDX 1
+#define mmDAGB3_RDCLI_GO_PENDING 0x01a7
+#define mmDAGB3_RDCLI_GO_PENDING_BASE_IDX 1
+#define mmDAGB3_RDCLI_GBLSEND_PENDING 0x01a8
+#define mmDAGB3_RDCLI_GBLSEND_PENDING_BASE_IDX 1
+#define mmDAGB3_RDCLI_TLB_PENDING 0x01a9
+#define mmDAGB3_RDCLI_TLB_PENDING_BASE_IDX 1
+#define mmDAGB3_RDCLI_OARB_PENDING 0x01aa
+#define mmDAGB3_RDCLI_OARB_PENDING_BASE_IDX 1
+#define mmDAGB3_RDCLI_OSD_PENDING 0x01ab
+#define mmDAGB3_RDCLI_OSD_PENDING_BASE_IDX 1
+#define mmDAGB3_WRCLI0 0x01ac
+#define mmDAGB3_WRCLI0_BASE_IDX 1
+#define mmDAGB3_WRCLI1 0x01ad
+#define mmDAGB3_WRCLI1_BASE_IDX 1
+#define mmDAGB3_WRCLI2 0x01ae
+#define mmDAGB3_WRCLI2_BASE_IDX 1
+#define mmDAGB3_WRCLI3 0x01af
+#define mmDAGB3_WRCLI3_BASE_IDX 1
+#define mmDAGB3_WRCLI4 0x01b0
+#define mmDAGB3_WRCLI4_BASE_IDX 1
+#define mmDAGB3_WRCLI5 0x01b1
+#define mmDAGB3_WRCLI5_BASE_IDX 1
+#define mmDAGB3_WRCLI6 0x01b2
+#define mmDAGB3_WRCLI6_BASE_IDX 1
+#define mmDAGB3_WRCLI7 0x01b3
+#define mmDAGB3_WRCLI7_BASE_IDX 1
+#define mmDAGB3_WRCLI8 0x01b4
+#define mmDAGB3_WRCLI8_BASE_IDX 1
+#define mmDAGB3_WRCLI9 0x01b5
+#define mmDAGB3_WRCLI9_BASE_IDX 1
+#define mmDAGB3_WRCLI10 0x01b6
+#define mmDAGB3_WRCLI10_BASE_IDX 1
+#define mmDAGB3_WRCLI11 0x01b7
+#define mmDAGB3_WRCLI11_BASE_IDX 1
+#define mmDAGB3_WRCLI12 0x01b8
+#define mmDAGB3_WRCLI12_BASE_IDX 1
+#define mmDAGB3_WRCLI13 0x01b9
+#define mmDAGB3_WRCLI13_BASE_IDX 1
+#define mmDAGB3_WRCLI14 0x01ba
+#define mmDAGB3_WRCLI14_BASE_IDX 1
+#define mmDAGB3_WRCLI15 0x01bb
+#define mmDAGB3_WRCLI15_BASE_IDX 1
+#define mmDAGB3_WR_CNTL 0x01bc
+#define mmDAGB3_WR_CNTL_BASE_IDX 1
+#define mmDAGB3_WR_GMI_CNTL 0x01bd
+#define mmDAGB3_WR_GMI_CNTL_BASE_IDX 1
+#define mmDAGB3_WR_ADDR_DAGB 0x01be
+#define mmDAGB3_WR_ADDR_DAGB_BASE_IDX 1
+#define mmDAGB3_WR_OUTPUT_DAGB_MAX_BURST 0x01bf
+#define mmDAGB3_WR_OUTPUT_DAGB_MAX_BURST_BASE_IDX 1
+#define mmDAGB3_WR_OUTPUT_DAGB_LAZY_TIMER 0x01c0
+#define mmDAGB3_WR_OUTPUT_DAGB_LAZY_TIMER_BASE_IDX 1
+#define mmDAGB3_WR_CGTT_CLK_CTRL 0x01c1
+#define mmDAGB3_WR_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmDAGB3_L1TLB_WR_CGTT_CLK_CTRL 0x01c2
+#define mmDAGB3_L1TLB_WR_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmDAGB3_ATCVM_WR_CGTT_CLK_CTRL 0x01c3
+#define mmDAGB3_ATCVM_WR_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmDAGB3_WR_ADDR_DAGB_MAX_BURST0 0x01c4
+#define mmDAGB3_WR_ADDR_DAGB_MAX_BURST0_BASE_IDX 1
+#define mmDAGB3_WR_ADDR_DAGB_LAZY_TIMER0 0x01c5
+#define mmDAGB3_WR_ADDR_DAGB_LAZY_TIMER0_BASE_IDX 1
+#define mmDAGB3_WR_ADDR_DAGB_MAX_BURST1 0x01c6
+#define mmDAGB3_WR_ADDR_DAGB_MAX_BURST1_BASE_IDX 1
+#define mmDAGB3_WR_ADDR_DAGB_LAZY_TIMER1 0x01c7
+#define mmDAGB3_WR_ADDR_DAGB_LAZY_TIMER1_BASE_IDX 1
+#define mmDAGB3_WR_DATA_DAGB 0x01c8
+#define mmDAGB3_WR_DATA_DAGB_BASE_IDX 1
+#define mmDAGB3_WR_DATA_DAGB_MAX_BURST0 0x01c9
+#define mmDAGB3_WR_DATA_DAGB_MAX_BURST0_BASE_IDX 1
+#define mmDAGB3_WR_DATA_DAGB_LAZY_TIMER0 0x01ca
+#define mmDAGB3_WR_DATA_DAGB_LAZY_TIMER0_BASE_IDX 1
+#define mmDAGB3_WR_DATA_DAGB_MAX_BURST1 0x01cb
+#define mmDAGB3_WR_DATA_DAGB_MAX_BURST1_BASE_IDX 1
+#define mmDAGB3_WR_DATA_DAGB_LAZY_TIMER1 0x01cc
+#define mmDAGB3_WR_DATA_DAGB_LAZY_TIMER1_BASE_IDX 1
+#define mmDAGB3_WR_VC0_CNTL 0x01cd
+#define mmDAGB3_WR_VC0_CNTL_BASE_IDX 1
+#define mmDAGB3_WR_VC1_CNTL 0x01ce
+#define mmDAGB3_WR_VC1_CNTL_BASE_IDX 1
+#define mmDAGB3_WR_VC2_CNTL 0x01cf
+#define mmDAGB3_WR_VC2_CNTL_BASE_IDX 1
+#define mmDAGB3_WR_VC3_CNTL 0x01d0
+#define mmDAGB3_WR_VC3_CNTL_BASE_IDX 1
+#define mmDAGB3_WR_VC4_CNTL 0x01d1
+#define mmDAGB3_WR_VC4_CNTL_BASE_IDX 1
+#define mmDAGB3_WR_VC5_CNTL 0x01d2
+#define mmDAGB3_WR_VC5_CNTL_BASE_IDX 1
+#define mmDAGB3_WR_VC6_CNTL 0x01d3
+#define mmDAGB3_WR_VC6_CNTL_BASE_IDX 1
+#define mmDAGB3_WR_VC7_CNTL 0x01d4
+#define mmDAGB3_WR_VC7_CNTL_BASE_IDX 1
+#define mmDAGB3_WR_CNTL_MISC 0x01d5
+#define mmDAGB3_WR_CNTL_MISC_BASE_IDX 1
+#define mmDAGB3_WR_TLB_CREDIT 0x01d6
+#define mmDAGB3_WR_TLB_CREDIT_BASE_IDX 1
+#define mmDAGB3_WR_DATA_CREDIT 0x01d7
+#define mmDAGB3_WR_DATA_CREDIT_BASE_IDX 1
+#define mmDAGB3_WR_MISC_CREDIT 0x01d8
+#define mmDAGB3_WR_MISC_CREDIT_BASE_IDX 1
+#define mmDAGB3_WRCLI_ASK_PENDING 0x01dd
+#define mmDAGB3_WRCLI_ASK_PENDING_BASE_IDX 1
+#define mmDAGB3_WRCLI_GO_PENDING 0x01de
+#define mmDAGB3_WRCLI_GO_PENDING_BASE_IDX 1
+#define mmDAGB3_WRCLI_GBLSEND_PENDING 0x01df
+#define mmDAGB3_WRCLI_GBLSEND_PENDING_BASE_IDX 1
+#define mmDAGB3_WRCLI_TLB_PENDING 0x01e0
+#define mmDAGB3_WRCLI_TLB_PENDING_BASE_IDX 1
+#define mmDAGB3_WRCLI_OARB_PENDING 0x01e1
+#define mmDAGB3_WRCLI_OARB_PENDING_BASE_IDX 1
+#define mmDAGB3_WRCLI_OSD_PENDING 0x01e2
+#define mmDAGB3_WRCLI_OSD_PENDING_BASE_IDX 1
+#define mmDAGB3_WRCLI_DBUS_ASK_PENDING 0x01e3
+#define mmDAGB3_WRCLI_DBUS_ASK_PENDING_BASE_IDX 1
+#define mmDAGB3_WRCLI_DBUS_GO_PENDING 0x01e4
+#define mmDAGB3_WRCLI_DBUS_GO_PENDING_BASE_IDX 1
+#define mmDAGB3_DAGB_DLY 0x01e5
+#define mmDAGB3_DAGB_DLY_BASE_IDX 1
+#define mmDAGB3_CNTL_MISC 0x01e6
+#define mmDAGB3_CNTL_MISC_BASE_IDX 1
+#define mmDAGB3_CNTL_MISC2 0x01e7
+#define mmDAGB3_CNTL_MISC2_BASE_IDX 1
+#define mmDAGB3_FIFO_EMPTY 0x01e8
+#define mmDAGB3_FIFO_EMPTY_BASE_IDX 1
+#define mmDAGB3_FIFO_FULL 0x01e9
+#define mmDAGB3_FIFO_FULL_BASE_IDX 1
+#define mmDAGB3_WR_CREDITS_FULL 0x01ea
+#define mmDAGB3_WR_CREDITS_FULL_BASE_IDX 1
+#define mmDAGB3_RD_CREDITS_FULL 0x01eb
+#define mmDAGB3_RD_CREDITS_FULL_BASE_IDX 1
+#define mmDAGB3_PERFCOUNTER_LO 0x01ec
+#define mmDAGB3_PERFCOUNTER_LO_BASE_IDX 1
+#define mmDAGB3_PERFCOUNTER_HI 0x01ed
+#define mmDAGB3_PERFCOUNTER_HI_BASE_IDX 1
+#define mmDAGB3_PERFCOUNTER0_CFG 0x01ee
+#define mmDAGB3_PERFCOUNTER0_CFG_BASE_IDX 1
+#define mmDAGB3_PERFCOUNTER1_CFG 0x01ef
+#define mmDAGB3_PERFCOUNTER1_CFG_BASE_IDX 1
+#define mmDAGB3_PERFCOUNTER2_CFG 0x01f0
+#define mmDAGB3_PERFCOUNTER2_CFG_BASE_IDX 1
+#define mmDAGB3_PERFCOUNTER_RSLT_CNTL 0x01f1
+#define mmDAGB3_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1
+#define mmDAGB3_RESERVE0 0x01f2
+#define mmDAGB3_RESERVE0_BASE_IDX 1
+#define mmDAGB3_RESERVE1 0x01f3
+#define mmDAGB3_RESERVE1_BASE_IDX 1
+#define mmDAGB3_RESERVE2 0x01f4
+#define mmDAGB3_RESERVE2_BASE_IDX 1
+#define mmDAGB3_RESERVE3 0x01f5
+#define mmDAGB3_RESERVE3_BASE_IDX 1
+#define mmDAGB3_RESERVE4 0x01f6
+#define mmDAGB3_RESERVE4_BASE_IDX 1
+#define mmDAGB3_RESERVE5 0x01f7
+#define mmDAGB3_RESERVE5_BASE_IDX 1
+#define mmDAGB3_RESERVE6 0x01f8
+#define mmDAGB3_RESERVE6_BASE_IDX 1
+#define mmDAGB3_RESERVE7 0x01f9
+#define mmDAGB3_RESERVE7_BASE_IDX 1
+#define mmDAGB3_RESERVE8 0x01fa
+#define mmDAGB3_RESERVE8_BASE_IDX 1
+#define mmDAGB3_RESERVE9 0x01fb
+#define mmDAGB3_RESERVE9_BASE_IDX 1
+#define mmDAGB3_RESERVE10 0x01fc
+#define mmDAGB3_RESERVE10_BASE_IDX 1
+#define mmDAGB3_RESERVE11 0x01fd
+#define mmDAGB3_RESERVE11_BASE_IDX 1
+#define mmDAGB3_RESERVE12 0x01fe
+#define mmDAGB3_RESERVE12_BASE_IDX 1
+#define mmDAGB3_RESERVE13 0x01ff
+#define mmDAGB3_RESERVE13_BASE_IDX 1
+
+
+// addressBlock: mmhub_dagb_dagbdec4
+// base address: 0x68800
+#define mmDAGB4_RDCLI0 0x0200
+#define mmDAGB4_RDCLI0_BASE_IDX 1
+#define mmDAGB4_RDCLI1 0x0201
+#define mmDAGB4_RDCLI1_BASE_IDX 1
+#define mmDAGB4_RDCLI2 0x0202
+#define mmDAGB4_RDCLI2_BASE_IDX 1
+#define mmDAGB4_RDCLI3 0x0203
+#define mmDAGB4_RDCLI3_BASE_IDX 1
+#define mmDAGB4_RDCLI4 0x0204
+#define mmDAGB4_RDCLI4_BASE_IDX 1
+#define mmDAGB4_RDCLI5 0x0205
+#define mmDAGB4_RDCLI5_BASE_IDX 1
+#define mmDAGB4_RDCLI6 0x0206
+#define mmDAGB4_RDCLI6_BASE_IDX 1
+#define mmDAGB4_RDCLI7 0x0207
+#define mmDAGB4_RDCLI7_BASE_IDX 1
+#define mmDAGB4_RDCLI8 0x0208
+#define mmDAGB4_RDCLI8_BASE_IDX 1
+#define mmDAGB4_RDCLI9 0x0209
+#define mmDAGB4_RDCLI9_BASE_IDX 1
+#define mmDAGB4_RDCLI10 0x020a
+#define mmDAGB4_RDCLI10_BASE_IDX 1
+#define mmDAGB4_RDCLI11 0x020b
+#define mmDAGB4_RDCLI11_BASE_IDX 1
+#define mmDAGB4_RDCLI12 0x020c
+#define mmDAGB4_RDCLI12_BASE_IDX 1
+#define mmDAGB4_RDCLI13 0x020d
+#define mmDAGB4_RDCLI13_BASE_IDX 1
+#define mmDAGB4_RDCLI14 0x020e
+#define mmDAGB4_RDCLI14_BASE_IDX 1
+#define mmDAGB4_RDCLI15 0x020f
+#define mmDAGB4_RDCLI15_BASE_IDX 1
+#define mmDAGB4_RD_CNTL 0x0210
+#define mmDAGB4_RD_CNTL_BASE_IDX 1
+#define mmDAGB4_RD_GMI_CNTL 0x0211
+#define mmDAGB4_RD_GMI_CNTL_BASE_IDX 1
+#define mmDAGB4_RD_ADDR_DAGB 0x0212
+#define mmDAGB4_RD_ADDR_DAGB_BASE_IDX 1
+#define mmDAGB4_RD_OUTPUT_DAGB_MAX_BURST 0x0213
+#define mmDAGB4_RD_OUTPUT_DAGB_MAX_BURST_BASE_IDX 1
+#define mmDAGB4_RD_OUTPUT_DAGB_LAZY_TIMER 0x0214
+#define mmDAGB4_RD_OUTPUT_DAGB_LAZY_TIMER_BASE_IDX 1
+#define mmDAGB4_RD_CGTT_CLK_CTRL 0x0215
+#define mmDAGB4_RD_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmDAGB4_L1TLB_RD_CGTT_CLK_CTRL 0x0216
+#define mmDAGB4_L1TLB_RD_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmDAGB4_ATCVM_RD_CGTT_CLK_CTRL 0x0217
+#define mmDAGB4_ATCVM_RD_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmDAGB4_RD_ADDR_DAGB_MAX_BURST0 0x0218
+#define mmDAGB4_RD_ADDR_DAGB_MAX_BURST0_BASE_IDX 1
+#define mmDAGB4_RD_ADDR_DAGB_LAZY_TIMER0 0x0219
+#define mmDAGB4_RD_ADDR_DAGB_LAZY_TIMER0_BASE_IDX 1
+#define mmDAGB4_RD_ADDR_DAGB_MAX_BURST1 0x021a
+#define mmDAGB4_RD_ADDR_DAGB_MAX_BURST1_BASE_IDX 1
+#define mmDAGB4_RD_ADDR_DAGB_LAZY_TIMER1 0x021b
+#define mmDAGB4_RD_ADDR_DAGB_LAZY_TIMER1_BASE_IDX 1
+#define mmDAGB4_RD_VC0_CNTL 0x021c
+#define mmDAGB4_RD_VC0_CNTL_BASE_IDX 1
+#define mmDAGB4_RD_VC1_CNTL 0x021d
+#define mmDAGB4_RD_VC1_CNTL_BASE_IDX 1
+#define mmDAGB4_RD_VC2_CNTL 0x021e
+#define mmDAGB4_RD_VC2_CNTL_BASE_IDX 1
+#define mmDAGB4_RD_VC3_CNTL 0x021f
+#define mmDAGB4_RD_VC3_CNTL_BASE_IDX 1
+#define mmDAGB4_RD_VC4_CNTL 0x0220
+#define mmDAGB4_RD_VC4_CNTL_BASE_IDX 1
+#define mmDAGB4_RD_VC5_CNTL 0x0221
+#define mmDAGB4_RD_VC5_CNTL_BASE_IDX 1
+#define mmDAGB4_RD_VC6_CNTL 0x0222
+#define mmDAGB4_RD_VC6_CNTL_BASE_IDX 1
+#define mmDAGB4_RD_VC7_CNTL 0x0223
+#define mmDAGB4_RD_VC7_CNTL_BASE_IDX 1
+#define mmDAGB4_RD_CNTL_MISC 0x0224
+#define mmDAGB4_RD_CNTL_MISC_BASE_IDX 1
+#define mmDAGB4_RD_TLB_CREDIT 0x0225
+#define mmDAGB4_RD_TLB_CREDIT_BASE_IDX 1
+#define mmDAGB4_RDCLI_ASK_PENDING 0x0226
+#define mmDAGB4_RDCLI_ASK_PENDING_BASE_IDX 1
+#define mmDAGB4_RDCLI_GO_PENDING 0x0227
+#define mmDAGB4_RDCLI_GO_PENDING_BASE_IDX 1
+#define mmDAGB4_RDCLI_GBLSEND_PENDING 0x0228
+#define mmDAGB4_RDCLI_GBLSEND_PENDING_BASE_IDX 1
+#define mmDAGB4_RDCLI_TLB_PENDING 0x0229
+#define mmDAGB4_RDCLI_TLB_PENDING_BASE_IDX 1
+#define mmDAGB4_RDCLI_OARB_PENDING 0x022a
+#define mmDAGB4_RDCLI_OARB_PENDING_BASE_IDX 1
+#define mmDAGB4_RDCLI_OSD_PENDING 0x022b
+#define mmDAGB4_RDCLI_OSD_PENDING_BASE_IDX 1
+#define mmDAGB4_WRCLI0 0x022c
+#define mmDAGB4_WRCLI0_BASE_IDX 1
+#define mmDAGB4_WRCLI1 0x022d
+#define mmDAGB4_WRCLI1_BASE_IDX 1
+#define mmDAGB4_WRCLI2 0x022e
+#define mmDAGB4_WRCLI2_BASE_IDX 1
+#define mmDAGB4_WRCLI3 0x022f
+#define mmDAGB4_WRCLI3_BASE_IDX 1
+#define mmDAGB4_WRCLI4 0x0230
+#define mmDAGB4_WRCLI4_BASE_IDX 1
+#define mmDAGB4_WRCLI5 0x0231
+#define mmDAGB4_WRCLI5_BASE_IDX 1
+#define mmDAGB4_WRCLI6 0x0232
+#define mmDAGB4_WRCLI6_BASE_IDX 1
+#define mmDAGB4_WRCLI7 0x0233
+#define mmDAGB4_WRCLI7_BASE_IDX 1
+#define mmDAGB4_WRCLI8 0x0234
+#define mmDAGB4_WRCLI8_BASE_IDX 1
+#define mmDAGB4_WRCLI9 0x0235
+#define mmDAGB4_WRCLI9_BASE_IDX 1
+#define mmDAGB4_WRCLI10 0x0236
+#define mmDAGB4_WRCLI10_BASE_IDX 1
+#define mmDAGB4_WRCLI11 0x0237
+#define mmDAGB4_WRCLI11_BASE_IDX 1
+#define mmDAGB4_WRCLI12 0x0238
+#define mmDAGB4_WRCLI12_BASE_IDX 1
+#define mmDAGB4_WRCLI13 0x0239
+#define mmDAGB4_WRCLI13_BASE_IDX 1
+#define mmDAGB4_WRCLI14 0x023a
+#define mmDAGB4_WRCLI14_BASE_IDX 1
+#define mmDAGB4_WRCLI15 0x023b
+#define mmDAGB4_WRCLI15_BASE_IDX 1
+#define mmDAGB4_WR_CNTL 0x023c
+#define mmDAGB4_WR_CNTL_BASE_IDX 1
+#define mmDAGB4_WR_GMI_CNTL 0x023d
+#define mmDAGB4_WR_GMI_CNTL_BASE_IDX 1
+#define mmDAGB4_WR_ADDR_DAGB 0x023e
+#define mmDAGB4_WR_ADDR_DAGB_BASE_IDX 1
+#define mmDAGB4_WR_OUTPUT_DAGB_MAX_BURST 0x023f
+#define mmDAGB4_WR_OUTPUT_DAGB_MAX_BURST_BASE_IDX 1
+#define mmDAGB4_WR_OUTPUT_DAGB_LAZY_TIMER 0x0240
+#define mmDAGB4_WR_OUTPUT_DAGB_LAZY_TIMER_BASE_IDX 1
+#define mmDAGB4_WR_CGTT_CLK_CTRL 0x0241
+#define mmDAGB4_WR_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmDAGB4_L1TLB_WR_CGTT_CLK_CTRL 0x0242
+#define mmDAGB4_L1TLB_WR_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmDAGB4_ATCVM_WR_CGTT_CLK_CTRL 0x0243
+#define mmDAGB4_ATCVM_WR_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmDAGB4_WR_ADDR_DAGB_MAX_BURST0 0x0244
+#define mmDAGB4_WR_ADDR_DAGB_MAX_BURST0_BASE_IDX 1
+#define mmDAGB4_WR_ADDR_DAGB_LAZY_TIMER0 0x0245
+#define mmDAGB4_WR_ADDR_DAGB_LAZY_TIMER0_BASE_IDX 1
+#define mmDAGB4_WR_ADDR_DAGB_MAX_BURST1 0x0246
+#define mmDAGB4_WR_ADDR_DAGB_MAX_BURST1_BASE_IDX 1
+#define mmDAGB4_WR_ADDR_DAGB_LAZY_TIMER1 0x0247
+#define mmDAGB4_WR_ADDR_DAGB_LAZY_TIMER1_BASE_IDX 1
+#define mmDAGB4_WR_DATA_DAGB 0x0248
+#define mmDAGB4_WR_DATA_DAGB_BASE_IDX 1
+#define mmDAGB4_WR_DATA_DAGB_MAX_BURST0 0x0249
+#define mmDAGB4_WR_DATA_DAGB_MAX_BURST0_BASE_IDX 1
+#define mmDAGB4_WR_DATA_DAGB_LAZY_TIMER0 0x024a
+#define mmDAGB4_WR_DATA_DAGB_LAZY_TIMER0_BASE_IDX 1
+#define mmDAGB4_WR_DATA_DAGB_MAX_BURST1 0x024b
+#define mmDAGB4_WR_DATA_DAGB_MAX_BURST1_BASE_IDX 1
+#define mmDAGB4_WR_DATA_DAGB_LAZY_TIMER1 0x024c
+#define mmDAGB4_WR_DATA_DAGB_LAZY_TIMER1_BASE_IDX 1
+#define mmDAGB4_WR_VC0_CNTL 0x024d
+#define mmDAGB4_WR_VC0_CNTL_BASE_IDX 1
+#define mmDAGB4_WR_VC1_CNTL 0x024e
+#define mmDAGB4_WR_VC1_CNTL_BASE_IDX 1
+#define mmDAGB4_WR_VC2_CNTL 0x024f
+#define mmDAGB4_WR_VC2_CNTL_BASE_IDX 1
+#define mmDAGB4_WR_VC3_CNTL 0x0250
+#define mmDAGB4_WR_VC3_CNTL_BASE_IDX 1
+#define mmDAGB4_WR_VC4_CNTL 0x0251
+#define mmDAGB4_WR_VC4_CNTL_BASE_IDX 1
+#define mmDAGB4_WR_VC5_CNTL 0x0252
+#define mmDAGB4_WR_VC5_CNTL_BASE_IDX 1
+#define mmDAGB4_WR_VC6_CNTL 0x0253
+#define mmDAGB4_WR_VC6_CNTL_BASE_IDX 1
+#define mmDAGB4_WR_VC7_CNTL 0x0254
+#define mmDAGB4_WR_VC7_CNTL_BASE_IDX 1
+#define mmDAGB4_WR_CNTL_MISC 0x0255
+#define mmDAGB4_WR_CNTL_MISC_BASE_IDX 1
+#define mmDAGB4_WR_TLB_CREDIT 0x0256
+#define mmDAGB4_WR_TLB_CREDIT_BASE_IDX 1
+#define mmDAGB4_WR_DATA_CREDIT 0x0257
+#define mmDAGB4_WR_DATA_CREDIT_BASE_IDX 1
+#define mmDAGB4_WR_MISC_CREDIT 0x0258
+#define mmDAGB4_WR_MISC_CREDIT_BASE_IDX 1
+#define mmDAGB4_WRCLI_ASK_PENDING 0x025d
+#define mmDAGB4_WRCLI_ASK_PENDING_BASE_IDX 1
+#define mmDAGB4_WRCLI_GO_PENDING 0x025e
+#define mmDAGB4_WRCLI_GO_PENDING_BASE_IDX 1
+#define mmDAGB4_WRCLI_GBLSEND_PENDING 0x025f
+#define mmDAGB4_WRCLI_GBLSEND_PENDING_BASE_IDX 1
+#define mmDAGB4_WRCLI_TLB_PENDING 0x0260
+#define mmDAGB4_WRCLI_TLB_PENDING_BASE_IDX 1
+#define mmDAGB4_WRCLI_OARB_PENDING 0x0261
+#define mmDAGB4_WRCLI_OARB_PENDING_BASE_IDX 1
+#define mmDAGB4_WRCLI_OSD_PENDING 0x0262
+#define mmDAGB4_WRCLI_OSD_PENDING_BASE_IDX 1
+#define mmDAGB4_WRCLI_DBUS_ASK_PENDING 0x0263
+#define mmDAGB4_WRCLI_DBUS_ASK_PENDING_BASE_IDX 1
+#define mmDAGB4_WRCLI_DBUS_GO_PENDING 0x0264
+#define mmDAGB4_WRCLI_DBUS_GO_PENDING_BASE_IDX 1
+#define mmDAGB4_DAGB_DLY 0x0265
+#define mmDAGB4_DAGB_DLY_BASE_IDX 1
+#define mmDAGB4_CNTL_MISC 0x0266
+#define mmDAGB4_CNTL_MISC_BASE_IDX 1
+#define mmDAGB4_CNTL_MISC2 0x0267
+#define mmDAGB4_CNTL_MISC2_BASE_IDX 1
+#define mmDAGB4_FIFO_EMPTY 0x0268
+#define mmDAGB4_FIFO_EMPTY_BASE_IDX 1
+#define mmDAGB4_FIFO_FULL 0x0269
+#define mmDAGB4_FIFO_FULL_BASE_IDX 1
+#define mmDAGB4_WR_CREDITS_FULL 0x026a
+#define mmDAGB4_WR_CREDITS_FULL_BASE_IDX 1
+#define mmDAGB4_RD_CREDITS_FULL 0x026b
+#define mmDAGB4_RD_CREDITS_FULL_BASE_IDX 1
+#define mmDAGB4_PERFCOUNTER_LO 0x026c
+#define mmDAGB4_PERFCOUNTER_LO_BASE_IDX 1
+#define mmDAGB4_PERFCOUNTER_HI 0x026d
+#define mmDAGB4_PERFCOUNTER_HI_BASE_IDX 1
+#define mmDAGB4_PERFCOUNTER0_CFG 0x026e
+#define mmDAGB4_PERFCOUNTER0_CFG_BASE_IDX 1
+#define mmDAGB4_PERFCOUNTER1_CFG 0x026f
+#define mmDAGB4_PERFCOUNTER1_CFG_BASE_IDX 1
+#define mmDAGB4_PERFCOUNTER2_CFG 0x0270
+#define mmDAGB4_PERFCOUNTER2_CFG_BASE_IDX 1
+#define mmDAGB4_PERFCOUNTER_RSLT_CNTL 0x0271
+#define mmDAGB4_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1
+#define mmDAGB4_RESERVE0 0x0272
+#define mmDAGB4_RESERVE0_BASE_IDX 1
+#define mmDAGB4_RESERVE1 0x0273
+#define mmDAGB4_RESERVE1_BASE_IDX 1
+#define mmDAGB4_RESERVE2 0x0274
+#define mmDAGB4_RESERVE2_BASE_IDX 1
+#define mmDAGB4_RESERVE3 0x0275
+#define mmDAGB4_RESERVE3_BASE_IDX 1
+#define mmDAGB4_RESERVE4 0x0276
+#define mmDAGB4_RESERVE4_BASE_IDX 1
+#define mmDAGB4_RESERVE5 0x0277
+#define mmDAGB4_RESERVE5_BASE_IDX 1
+#define mmDAGB4_RESERVE6 0x0278
+#define mmDAGB4_RESERVE6_BASE_IDX 1
+#define mmDAGB4_RESERVE7 0x0279
+#define mmDAGB4_RESERVE7_BASE_IDX 1
+#define mmDAGB4_RESERVE8 0x027a
+#define mmDAGB4_RESERVE8_BASE_IDX 1
+#define mmDAGB4_RESERVE9 0x027b
+#define mmDAGB4_RESERVE9_BASE_IDX 1
+#define mmDAGB4_RESERVE10 0x027c
+#define mmDAGB4_RESERVE10_BASE_IDX 1
+#define mmDAGB4_RESERVE11 0x027d
+#define mmDAGB4_RESERVE11_BASE_IDX 1
+#define mmDAGB4_RESERVE12 0x027e
+#define mmDAGB4_RESERVE12_BASE_IDX 1
+#define mmDAGB4_RESERVE13 0x027f
+#define mmDAGB4_RESERVE13_BASE_IDX 1
+
+
+// addressBlock: mmhub_ea_mmeadec0
+// base address: 0x68a00
+#define mmMMEA0_DRAM_RD_CLI2GRP_MAP0 0x0280
+#define mmMMEA0_DRAM_RD_CLI2GRP_MAP0_BASE_IDX 1
+#define mmMMEA0_DRAM_RD_CLI2GRP_MAP1 0x0281
+#define mmMMEA0_DRAM_RD_CLI2GRP_MAP1_BASE_IDX 1
+#define mmMMEA0_DRAM_WR_CLI2GRP_MAP0 0x0282
+#define mmMMEA0_DRAM_WR_CLI2GRP_MAP0_BASE_IDX 1
+#define mmMMEA0_DRAM_WR_CLI2GRP_MAP1 0x0283
+#define mmMMEA0_DRAM_WR_CLI2GRP_MAP1_BASE_IDX 1
+#define mmMMEA0_DRAM_RD_GRP2VC_MAP 0x0284
+#define mmMMEA0_DRAM_RD_GRP2VC_MAP_BASE_IDX 1
+#define mmMMEA0_DRAM_WR_GRP2VC_MAP 0x0285
+#define mmMMEA0_DRAM_WR_GRP2VC_MAP_BASE_IDX 1
+#define mmMMEA0_DRAM_RD_LAZY 0x0286
+#define mmMMEA0_DRAM_RD_LAZY_BASE_IDX 1
+#define mmMMEA0_DRAM_WR_LAZY 0x0287
+#define mmMMEA0_DRAM_WR_LAZY_BASE_IDX 1
+#define mmMMEA0_DRAM_RD_CAM_CNTL 0x0288
+#define mmMMEA0_DRAM_RD_CAM_CNTL_BASE_IDX 1
+#define mmMMEA0_DRAM_WR_CAM_CNTL 0x0289
+#define mmMMEA0_DRAM_WR_CAM_CNTL_BASE_IDX 1
+#define mmMMEA0_DRAM_PAGE_BURST 0x028a
+#define mmMMEA0_DRAM_PAGE_BURST_BASE_IDX 1
+#define mmMMEA0_DRAM_RD_PRI_AGE 0x028b
+#define mmMMEA0_DRAM_RD_PRI_AGE_BASE_IDX 1
+#define mmMMEA0_DRAM_WR_PRI_AGE 0x028c
+#define mmMMEA0_DRAM_WR_PRI_AGE_BASE_IDX 1
+#define mmMMEA0_DRAM_RD_PRI_QUEUING 0x028d
+#define mmMMEA0_DRAM_RD_PRI_QUEUING_BASE_IDX 1
+#define mmMMEA0_DRAM_WR_PRI_QUEUING 0x028e
+#define mmMMEA0_DRAM_WR_PRI_QUEUING_BASE_IDX 1
+#define mmMMEA0_DRAM_RD_PRI_FIXED 0x028f
+#define mmMMEA0_DRAM_RD_PRI_FIXED_BASE_IDX 1
+#define mmMMEA0_DRAM_WR_PRI_FIXED 0x0290
+#define mmMMEA0_DRAM_WR_PRI_FIXED_BASE_IDX 1
+#define mmMMEA0_DRAM_RD_PRI_URGENCY 0x0291
+#define mmMMEA0_DRAM_RD_PRI_URGENCY_BASE_IDX 1
+#define mmMMEA0_DRAM_WR_PRI_URGENCY 0x0292
+#define mmMMEA0_DRAM_WR_PRI_URGENCY_BASE_IDX 1
+#define mmMMEA0_DRAM_RD_PRI_QUANT_PRI1 0x0293
+#define mmMMEA0_DRAM_RD_PRI_QUANT_PRI1_BASE_IDX 1
+#define mmMMEA0_DRAM_RD_PRI_QUANT_PRI2 0x0294
+#define mmMMEA0_DRAM_RD_PRI_QUANT_PRI2_BASE_IDX 1
+#define mmMMEA0_DRAM_RD_PRI_QUANT_PRI3 0x0295
+#define mmMMEA0_DRAM_RD_PRI_QUANT_PRI3_BASE_IDX 1
+#define mmMMEA0_DRAM_WR_PRI_QUANT_PRI1 0x0296
+#define mmMMEA0_DRAM_WR_PRI_QUANT_PRI1_BASE_IDX 1
+#define mmMMEA0_DRAM_WR_PRI_QUANT_PRI2 0x0297
+#define mmMMEA0_DRAM_WR_PRI_QUANT_PRI2_BASE_IDX 1
+#define mmMMEA0_DRAM_WR_PRI_QUANT_PRI3 0x0298
+#define mmMMEA0_DRAM_WR_PRI_QUANT_PRI3_BASE_IDX 1
+#define mmMMEA0_GMI_RD_CLI2GRP_MAP0 0x0299
+#define mmMMEA0_GMI_RD_CLI2GRP_MAP0_BASE_IDX 1
+#define mmMMEA0_GMI_RD_CLI2GRP_MAP1 0x029a
+#define mmMMEA0_GMI_RD_CLI2GRP_MAP1_BASE_IDX 1
+#define mmMMEA0_GMI_WR_CLI2GRP_MAP0 0x029b
+#define mmMMEA0_GMI_WR_CLI2GRP_MAP0_BASE_IDX 1
+#define mmMMEA0_GMI_WR_CLI2GRP_MAP1 0x029c
+#define mmMMEA0_GMI_WR_CLI2GRP_MAP1_BASE_IDX 1
+#define mmMMEA0_GMI_RD_GRP2VC_MAP 0x029d
+#define mmMMEA0_GMI_RD_GRP2VC_MAP_BASE_IDX 1
+#define mmMMEA0_GMI_WR_GRP2VC_MAP 0x029e
+#define mmMMEA0_GMI_WR_GRP2VC_MAP_BASE_IDX 1
+#define mmMMEA0_GMI_RD_LAZY 0x029f
+#define mmMMEA0_GMI_RD_LAZY_BASE_IDX 1
+#define mmMMEA0_GMI_WR_LAZY 0x02a0
+#define mmMMEA0_GMI_WR_LAZY_BASE_IDX 1
+#define mmMMEA0_GMI_RD_CAM_CNTL 0x02a1
+#define mmMMEA0_GMI_RD_CAM_CNTL_BASE_IDX 1
+#define mmMMEA0_GMI_WR_CAM_CNTL 0x02a2
+#define mmMMEA0_GMI_WR_CAM_CNTL_BASE_IDX 1
+#define mmMMEA0_GMI_PAGE_BURST 0x02a3
+#define mmMMEA0_GMI_PAGE_BURST_BASE_IDX 1
+#define mmMMEA0_GMI_RD_PRI_AGE 0x02a4
+#define mmMMEA0_GMI_RD_PRI_AGE_BASE_IDX 1
+#define mmMMEA0_GMI_WR_PRI_AGE 0x02a5
+#define mmMMEA0_GMI_WR_PRI_AGE_BASE_IDX 1
+#define mmMMEA0_GMI_RD_PRI_QUEUING 0x02a6
+#define mmMMEA0_GMI_RD_PRI_QUEUING_BASE_IDX 1
+#define mmMMEA0_GMI_WR_PRI_QUEUING 0x02a7
+#define mmMMEA0_GMI_WR_PRI_QUEUING_BASE_IDX 1
+#define mmMMEA0_GMI_RD_PRI_FIXED 0x02a8
+#define mmMMEA0_GMI_RD_PRI_FIXED_BASE_IDX 1
+#define mmMMEA0_GMI_WR_PRI_FIXED 0x02a9
+#define mmMMEA0_GMI_WR_PRI_FIXED_BASE_IDX 1
+#define mmMMEA0_GMI_RD_PRI_URGENCY 0x02aa
+#define mmMMEA0_GMI_RD_PRI_URGENCY_BASE_IDX 1
+#define mmMMEA0_GMI_WR_PRI_URGENCY 0x02ab
+#define mmMMEA0_GMI_WR_PRI_URGENCY_BASE_IDX 1
+#define mmMMEA0_GMI_RD_PRI_URGENCY_MASKING 0x02ac
+#define mmMMEA0_GMI_RD_PRI_URGENCY_MASKING_BASE_IDX 1
+#define mmMMEA0_GMI_WR_PRI_URGENCY_MASKING 0x02ad
+#define mmMMEA0_GMI_WR_PRI_URGENCY_MASKING_BASE_IDX 1
+#define mmMMEA0_GMI_RD_PRI_QUANT_PRI1 0x02ae
+#define mmMMEA0_GMI_RD_PRI_QUANT_PRI1_BASE_IDX 1
+#define mmMMEA0_GMI_RD_PRI_QUANT_PRI2 0x02af
+#define mmMMEA0_GMI_RD_PRI_QUANT_PRI2_BASE_IDX 1
+#define mmMMEA0_GMI_RD_PRI_QUANT_PRI3 0x02b0
+#define mmMMEA0_GMI_RD_PRI_QUANT_PRI3_BASE_IDX 1
+#define mmMMEA0_GMI_WR_PRI_QUANT_PRI1 0x02b1
+#define mmMMEA0_GMI_WR_PRI_QUANT_PRI1_BASE_IDX 1
+#define mmMMEA0_GMI_WR_PRI_QUANT_PRI2 0x02b2
+#define mmMMEA0_GMI_WR_PRI_QUANT_PRI2_BASE_IDX 1
+#define mmMMEA0_GMI_WR_PRI_QUANT_PRI3 0x02b3
+#define mmMMEA0_GMI_WR_PRI_QUANT_PRI3_BASE_IDX 1
+#define mmMMEA0_ADDRNORM_BASE_ADDR0 0x02b4
+#define mmMMEA0_ADDRNORM_BASE_ADDR0_BASE_IDX 1
+#define mmMMEA0_ADDRNORM_LIMIT_ADDR0 0x02b5
+#define mmMMEA0_ADDRNORM_LIMIT_ADDR0_BASE_IDX 1
+#define mmMMEA0_ADDRNORM_BASE_ADDR1 0x02b6
+#define mmMMEA0_ADDRNORM_BASE_ADDR1_BASE_IDX 1
+#define mmMMEA0_ADDRNORM_LIMIT_ADDR1 0x02b7
+#define mmMMEA0_ADDRNORM_LIMIT_ADDR1_BASE_IDX 1
+#define mmMMEA0_ADDRNORM_OFFSET_ADDR1 0x02b8
+#define mmMMEA0_ADDRNORM_OFFSET_ADDR1_BASE_IDX 1
+#define mmMMEA0_ADDRNORM_BASE_ADDR2 0x02b9
+#define mmMMEA0_ADDRNORM_BASE_ADDR2_BASE_IDX 1
+#define mmMMEA0_ADDRNORM_LIMIT_ADDR2 0x02ba
+#define mmMMEA0_ADDRNORM_LIMIT_ADDR2_BASE_IDX 1
+#define mmMMEA0_ADDRNORM_BASE_ADDR3 0x02bb
+#define mmMMEA0_ADDRNORM_BASE_ADDR3_BASE_IDX 1
+#define mmMMEA0_ADDRNORM_LIMIT_ADDR3 0x02bc
+#define mmMMEA0_ADDRNORM_LIMIT_ADDR3_BASE_IDX 1
+#define mmMMEA0_ADDRNORM_OFFSET_ADDR3 0x02bd
+#define mmMMEA0_ADDRNORM_OFFSET_ADDR3_BASE_IDX 1
+#define mmMMEA0_ADDRNORM_BASE_ADDR4 0x02be
+#define mmMMEA0_ADDRNORM_BASE_ADDR4_BASE_IDX 1
+#define mmMMEA0_ADDRNORM_LIMIT_ADDR4 0x02bf
+#define mmMMEA0_ADDRNORM_LIMIT_ADDR4_BASE_IDX 1
+#define mmMMEA0_ADDRNORM_BASE_ADDR5 0x02c0
+#define mmMMEA0_ADDRNORM_BASE_ADDR5_BASE_IDX 1
+#define mmMMEA0_ADDRNORM_LIMIT_ADDR5 0x02c1
+#define mmMMEA0_ADDRNORM_LIMIT_ADDR5_BASE_IDX 1
+#define mmMMEA0_ADDRNORM_OFFSET_ADDR5 0x02c2
+#define mmMMEA0_ADDRNORM_OFFSET_ADDR5_BASE_IDX 1
+#define mmMMEA0_ADDRNORMDRAM_HOLE_CNTL 0x02c3
+#define mmMMEA0_ADDRNORMDRAM_HOLE_CNTL_BASE_IDX 1
+#define mmMMEA0_ADDRNORMGMI_HOLE_CNTL 0x02c4
+#define mmMMEA0_ADDRNORMGMI_HOLE_CNTL_BASE_IDX 1
+#define mmMMEA0_ADDRNORMDRAM_NP2_CHANNEL_CFG 0x02c5
+#define mmMMEA0_ADDRNORMDRAM_NP2_CHANNEL_CFG_BASE_IDX 1
+#define mmMMEA0_ADDRNORMGMI_NP2_CHANNEL_CFG 0x02c6
+#define mmMMEA0_ADDRNORMGMI_NP2_CHANNEL_CFG_BASE_IDX 1
+#define mmMMEA0_ADDRDEC_BANK_CFG 0x02c7
+#define mmMMEA0_ADDRDEC_BANK_CFG_BASE_IDX 1
+#define mmMMEA0_ADDRDEC_MISC_CFG 0x02c8
+#define mmMMEA0_ADDRDEC_MISC_CFG_BASE_IDX 1
+#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_BANK0 0x02c9
+#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_BANK0_BASE_IDX 1
+#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_BANK1 0x02ca
+#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_BANK1_BASE_IDX 1
+#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_BANK2 0x02cb
+#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_BANK2_BASE_IDX 1
+#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_BANK3 0x02cc
+#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_BANK3_BASE_IDX 1
+#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_BANK4 0x02cd
+#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_BANK4_BASE_IDX 1
+#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_BANK5 0x02ce
+#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_BANK5_BASE_IDX 1
+#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_PC 0x02cf
+#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_PC_BASE_IDX 1
+#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_PC2 0x02d0
+#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_PC2_BASE_IDX 1
+#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_CS0 0x02d1
+#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_CS0_BASE_IDX 1
+#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_CS1 0x02d2
+#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_CS1_BASE_IDX 1
+#define mmMMEA0_ADDRDECDRAM_HARVEST_ENABLE 0x02d3
+#define mmMMEA0_ADDRDECDRAM_HARVEST_ENABLE_BASE_IDX 1
+#define mmMMEA0_ADDRDECGMI_ADDR_HASH_BANK0 0x02d4
+#define mmMMEA0_ADDRDECGMI_ADDR_HASH_BANK0_BASE_IDX 1
+#define mmMMEA0_ADDRDECGMI_ADDR_HASH_BANK1 0x02d5
+#define mmMMEA0_ADDRDECGMI_ADDR_HASH_BANK1_BASE_IDX 1
+#define mmMMEA0_ADDRDECGMI_ADDR_HASH_BANK2 0x02d6
+#define mmMMEA0_ADDRDECGMI_ADDR_HASH_BANK2_BASE_IDX 1
+#define mmMMEA0_ADDRDECGMI_ADDR_HASH_BANK3 0x02d7
+#define mmMMEA0_ADDRDECGMI_ADDR_HASH_BANK3_BASE_IDX 1
+#define mmMMEA0_ADDRDECGMI_ADDR_HASH_BANK4 0x02d8
+#define mmMMEA0_ADDRDECGMI_ADDR_HASH_BANK4_BASE_IDX 1
+#define mmMMEA0_ADDRDECGMI_ADDR_HASH_BANK5 0x02d9
+#define mmMMEA0_ADDRDECGMI_ADDR_HASH_BANK5_BASE_IDX 1
+#define mmMMEA0_ADDRDECGMI_ADDR_HASH_PC 0x02da
+#define mmMMEA0_ADDRDECGMI_ADDR_HASH_PC_BASE_IDX 1
+#define mmMMEA0_ADDRDECGMI_ADDR_HASH_PC2 0x02db
+#define mmMMEA0_ADDRDECGMI_ADDR_HASH_PC2_BASE_IDX 1
+#define mmMMEA0_ADDRDECGMI_ADDR_HASH_CS0 0x02dc
+#define mmMMEA0_ADDRDECGMI_ADDR_HASH_CS0_BASE_IDX 1
+#define mmMMEA0_ADDRDECGMI_ADDR_HASH_CS1 0x02dd
+#define mmMMEA0_ADDRDECGMI_ADDR_HASH_CS1_BASE_IDX 1
+#define mmMMEA0_ADDRDECGMI_HARVEST_ENABLE 0x02de
+#define mmMMEA0_ADDRDECGMI_HARVEST_ENABLE_BASE_IDX 1
+#define mmMMEA0_ADDRDEC0_BASE_ADDR_CS0 0x02df
+#define mmMMEA0_ADDRDEC0_BASE_ADDR_CS0_BASE_IDX 1
+#define mmMMEA0_ADDRDEC0_BASE_ADDR_CS1 0x02e0
+#define mmMMEA0_ADDRDEC0_BASE_ADDR_CS1_BASE_IDX 1
+#define mmMMEA0_ADDRDEC0_BASE_ADDR_CS2 0x02e1
+#define mmMMEA0_ADDRDEC0_BASE_ADDR_CS2_BASE_IDX 1
+#define mmMMEA0_ADDRDEC0_BASE_ADDR_CS3 0x02e2
+#define mmMMEA0_ADDRDEC0_BASE_ADDR_CS3_BASE_IDX 1
+#define mmMMEA0_ADDRDEC0_BASE_ADDR_SECCS0 0x02e3
+#define mmMMEA0_ADDRDEC0_BASE_ADDR_SECCS0_BASE_IDX 1
+#define mmMMEA0_ADDRDEC0_BASE_ADDR_SECCS1 0x02e4
+#define mmMMEA0_ADDRDEC0_BASE_ADDR_SECCS1_BASE_IDX 1
+#define mmMMEA0_ADDRDEC0_BASE_ADDR_SECCS2 0x02e5
+#define mmMMEA0_ADDRDEC0_BASE_ADDR_SECCS2_BASE_IDX 1
+#define mmMMEA0_ADDRDEC0_BASE_ADDR_SECCS3 0x02e6
+#define mmMMEA0_ADDRDEC0_BASE_ADDR_SECCS3_BASE_IDX 1
+#define mmMMEA0_ADDRDEC0_ADDR_MASK_CS01 0x02e7
+#define mmMMEA0_ADDRDEC0_ADDR_MASK_CS01_BASE_IDX 1
+#define mmMMEA0_ADDRDEC0_ADDR_MASK_CS23 0x02e8
+#define mmMMEA0_ADDRDEC0_ADDR_MASK_CS23_BASE_IDX 1
+#define mmMMEA0_ADDRDEC0_ADDR_MASK_SECCS01 0x02e9
+#define mmMMEA0_ADDRDEC0_ADDR_MASK_SECCS01_BASE_IDX 1
+#define mmMMEA0_ADDRDEC0_ADDR_MASK_SECCS23 0x02ea
+#define mmMMEA0_ADDRDEC0_ADDR_MASK_SECCS23_BASE_IDX 1
+#define mmMMEA0_ADDRDEC0_ADDR_CFG_CS01 0x02eb
+#define mmMMEA0_ADDRDEC0_ADDR_CFG_CS01_BASE_IDX 1
+#define mmMMEA0_ADDRDEC0_ADDR_CFG_CS23 0x02ec
+#define mmMMEA0_ADDRDEC0_ADDR_CFG_CS23_BASE_IDX 1
+#define mmMMEA0_ADDRDEC0_ADDR_SEL_CS01 0x02ed
+#define mmMMEA0_ADDRDEC0_ADDR_SEL_CS01_BASE_IDX 1
+#define mmMMEA0_ADDRDEC0_ADDR_SEL_CS23 0x02ee
+#define mmMMEA0_ADDRDEC0_ADDR_SEL_CS23_BASE_IDX 1
+#define mmMMEA0_ADDRDEC0_ADDR_SEL2_CS01 0x02ef
+#define mmMMEA0_ADDRDEC0_ADDR_SEL2_CS01_BASE_IDX 1
+#define mmMMEA0_ADDRDEC0_ADDR_SEL2_CS23 0x02f0
+#define mmMMEA0_ADDRDEC0_ADDR_SEL2_CS23_BASE_IDX 1
+#define mmMMEA0_ADDRDEC0_COL_SEL_LO_CS01 0x02f1
+#define mmMMEA0_ADDRDEC0_COL_SEL_LO_CS01_BASE_IDX 1
+#define mmMMEA0_ADDRDEC0_COL_SEL_LO_CS23 0x02f2
+#define mmMMEA0_ADDRDEC0_COL_SEL_LO_CS23_BASE_IDX 1
+#define mmMMEA0_ADDRDEC0_COL_SEL_HI_CS01 0x02f3
+#define mmMMEA0_ADDRDEC0_COL_SEL_HI_CS01_BASE_IDX 1
+#define mmMMEA0_ADDRDEC0_COL_SEL_HI_CS23 0x02f4
+#define mmMMEA0_ADDRDEC0_COL_SEL_HI_CS23_BASE_IDX 1
+#define mmMMEA0_ADDRDEC0_RM_SEL_CS01 0x02f5
+#define mmMMEA0_ADDRDEC0_RM_SEL_CS01_BASE_IDX 1
+#define mmMMEA0_ADDRDEC0_RM_SEL_CS23 0x02f6
+#define mmMMEA0_ADDRDEC0_RM_SEL_CS23_BASE_IDX 1
+#define mmMMEA0_ADDRDEC0_RM_SEL_SECCS01 0x02f7
+#define mmMMEA0_ADDRDEC0_RM_SEL_SECCS01_BASE_IDX 1
+#define mmMMEA0_ADDRDEC0_RM_SEL_SECCS23 0x02f8
+#define mmMMEA0_ADDRDEC0_RM_SEL_SECCS23_BASE_IDX 1
+#define mmMMEA0_ADDRDEC1_BASE_ADDR_CS0 0x02f9
+#define mmMMEA0_ADDRDEC1_BASE_ADDR_CS0_BASE_IDX 1
+#define mmMMEA0_ADDRDEC1_BASE_ADDR_CS1 0x02fa
+#define mmMMEA0_ADDRDEC1_BASE_ADDR_CS1_BASE_IDX 1
+#define mmMMEA0_ADDRDEC1_BASE_ADDR_CS2 0x02fb
+#define mmMMEA0_ADDRDEC1_BASE_ADDR_CS2_BASE_IDX 1
+#define mmMMEA0_ADDRDEC1_BASE_ADDR_CS3 0x02fc
+#define mmMMEA0_ADDRDEC1_BASE_ADDR_CS3_BASE_IDX 1
+#define mmMMEA0_ADDRDEC1_BASE_ADDR_SECCS0 0x02fd
+#define mmMMEA0_ADDRDEC1_BASE_ADDR_SECCS0_BASE_IDX 1
+#define mmMMEA0_ADDRDEC1_BASE_ADDR_SECCS1 0x02fe
+#define mmMMEA0_ADDRDEC1_BASE_ADDR_SECCS1_BASE_IDX 1
+#define mmMMEA0_ADDRDEC1_BASE_ADDR_SECCS2 0x02ff
+#define mmMMEA0_ADDRDEC1_BASE_ADDR_SECCS2_BASE_IDX 1
+#define mmMMEA0_ADDRDEC1_BASE_ADDR_SECCS3 0x0300
+#define mmMMEA0_ADDRDEC1_BASE_ADDR_SECCS3_BASE_IDX 1
+#define mmMMEA0_ADDRDEC1_ADDR_MASK_CS01 0x0301
+#define mmMMEA0_ADDRDEC1_ADDR_MASK_CS01_BASE_IDX 1
+#define mmMMEA0_ADDRDEC1_ADDR_MASK_CS23 0x0302
+#define mmMMEA0_ADDRDEC1_ADDR_MASK_CS23_BASE_IDX 1
+#define mmMMEA0_ADDRDEC1_ADDR_MASK_SECCS01 0x0303
+#define mmMMEA0_ADDRDEC1_ADDR_MASK_SECCS01_BASE_IDX 1
+#define mmMMEA0_ADDRDEC1_ADDR_MASK_SECCS23 0x0304
+#define mmMMEA0_ADDRDEC1_ADDR_MASK_SECCS23_BASE_IDX 1
+#define mmMMEA0_ADDRDEC1_ADDR_CFG_CS01 0x0305
+#define mmMMEA0_ADDRDEC1_ADDR_CFG_CS01_BASE_IDX 1
+#define mmMMEA0_ADDRDEC1_ADDR_CFG_CS23 0x0306
+#define mmMMEA0_ADDRDEC1_ADDR_CFG_CS23_BASE_IDX 1
+#define mmMMEA0_ADDRDEC1_ADDR_SEL_CS01 0x0307
+#define mmMMEA0_ADDRDEC1_ADDR_SEL_CS01_BASE_IDX 1
+#define mmMMEA0_ADDRDEC1_ADDR_SEL_CS23 0x0308
+#define mmMMEA0_ADDRDEC1_ADDR_SEL_CS23_BASE_IDX 1
+#define mmMMEA0_ADDRDEC1_ADDR_SEL2_CS01 0x0309
+#define mmMMEA0_ADDRDEC1_ADDR_SEL2_CS01_BASE_IDX 1
+#define mmMMEA0_ADDRDEC1_ADDR_SEL2_CS23 0x030a
+#define mmMMEA0_ADDRDEC1_ADDR_SEL2_CS23_BASE_IDX 1
+#define mmMMEA0_ADDRDEC1_COL_SEL_LO_CS01 0x030b
+#define mmMMEA0_ADDRDEC1_COL_SEL_LO_CS01_BASE_IDX 1
+#define mmMMEA0_ADDRDEC1_COL_SEL_LO_CS23 0x030c
+#define mmMMEA0_ADDRDEC1_COL_SEL_LO_CS23_BASE_IDX 1
+#define mmMMEA0_ADDRDEC1_COL_SEL_HI_CS01 0x030d
+#define mmMMEA0_ADDRDEC1_COL_SEL_HI_CS01_BASE_IDX 1
+#define mmMMEA0_ADDRDEC1_COL_SEL_HI_CS23 0x030e
+#define mmMMEA0_ADDRDEC1_COL_SEL_HI_CS23_BASE_IDX 1
+#define mmMMEA0_ADDRDEC1_RM_SEL_CS01 0x030f
+#define mmMMEA0_ADDRDEC1_RM_SEL_CS01_BASE_IDX 1
+#define mmMMEA0_ADDRDEC1_RM_SEL_CS23 0x0310
+#define mmMMEA0_ADDRDEC1_RM_SEL_CS23_BASE_IDX 1
+#define mmMMEA0_ADDRDEC1_RM_SEL_SECCS01 0x0311
+#define mmMMEA0_ADDRDEC1_RM_SEL_SECCS01_BASE_IDX 1
+#define mmMMEA0_ADDRDEC1_RM_SEL_SECCS23 0x0312
+#define mmMMEA0_ADDRDEC1_RM_SEL_SECCS23_BASE_IDX 1
+#define mmMMEA0_ADDRDEC2_BASE_ADDR_CS0 0x0313
+#define mmMMEA0_ADDRDEC2_BASE_ADDR_CS0_BASE_IDX 1
+#define mmMMEA0_ADDRDEC2_BASE_ADDR_CS1 0x0314
+#define mmMMEA0_ADDRDEC2_BASE_ADDR_CS1_BASE_IDX 1
+#define mmMMEA0_ADDRDEC2_BASE_ADDR_CS2 0x0315
+#define mmMMEA0_ADDRDEC2_BASE_ADDR_CS2_BASE_IDX 1
+#define mmMMEA0_ADDRDEC2_BASE_ADDR_CS3 0x0316
+#define mmMMEA0_ADDRDEC2_BASE_ADDR_CS3_BASE_IDX 1
+#define mmMMEA0_ADDRDEC2_BASE_ADDR_SECCS0 0x0317
+#define mmMMEA0_ADDRDEC2_BASE_ADDR_SECCS0_BASE_IDX 1
+#define mmMMEA0_ADDRDEC2_BASE_ADDR_SECCS1 0x0318
+#define mmMMEA0_ADDRDEC2_BASE_ADDR_SECCS1_BASE_IDX 1
+#define mmMMEA0_ADDRDEC2_BASE_ADDR_SECCS2 0x0319
+#define mmMMEA0_ADDRDEC2_BASE_ADDR_SECCS2_BASE_IDX 1
+#define mmMMEA0_ADDRDEC2_BASE_ADDR_SECCS3 0x031a
+#define mmMMEA0_ADDRDEC2_BASE_ADDR_SECCS3_BASE_IDX 1
+#define mmMMEA0_ADDRDEC2_ADDR_MASK_CS01 0x031b
+#define mmMMEA0_ADDRDEC2_ADDR_MASK_CS01_BASE_IDX 1
+#define mmMMEA0_ADDRDEC2_ADDR_MASK_CS23 0x031c
+#define mmMMEA0_ADDRDEC2_ADDR_MASK_CS23_BASE_IDX 1
+#define mmMMEA0_ADDRDEC2_ADDR_MASK_SECCS01 0x031d
+#define mmMMEA0_ADDRDEC2_ADDR_MASK_SECCS01_BASE_IDX 1
+#define mmMMEA0_ADDRDEC2_ADDR_MASK_SECCS23 0x031e
+#define mmMMEA0_ADDRDEC2_ADDR_MASK_SECCS23_BASE_IDX 1
+#define mmMMEA0_ADDRDEC2_ADDR_CFG_CS01 0x031f
+#define mmMMEA0_ADDRDEC2_ADDR_CFG_CS01_BASE_IDX 1
+#define mmMMEA0_ADDRDEC2_ADDR_CFG_CS23 0x0320
+#define mmMMEA0_ADDRDEC2_ADDR_CFG_CS23_BASE_IDX 1
+#define mmMMEA0_ADDRDEC2_ADDR_SEL_CS01 0x0321
+#define mmMMEA0_ADDRDEC2_ADDR_SEL_CS01_BASE_IDX 1
+#define mmMMEA0_ADDRDEC2_ADDR_SEL_CS23 0x0322
+#define mmMMEA0_ADDRDEC2_ADDR_SEL_CS23_BASE_IDX 1
+#define mmMMEA0_ADDRDEC2_ADDR_SEL2_CS01 0x0323
+#define mmMMEA0_ADDRDEC2_ADDR_SEL2_CS01_BASE_IDX 1
+#define mmMMEA0_ADDRDEC2_ADDR_SEL2_CS23 0x0324
+#define mmMMEA0_ADDRDEC2_ADDR_SEL2_CS23_BASE_IDX 1
+#define mmMMEA0_ADDRDEC2_COL_SEL_LO_CS01 0x0325
+#define mmMMEA0_ADDRDEC2_COL_SEL_LO_CS01_BASE_IDX 1
+#define mmMMEA0_ADDRDEC2_COL_SEL_LO_CS23 0x0326
+#define mmMMEA0_ADDRDEC2_COL_SEL_LO_CS23_BASE_IDX 1
+#define mmMMEA0_ADDRDEC2_COL_SEL_HI_CS01 0x0327
+#define mmMMEA0_ADDRDEC2_COL_SEL_HI_CS01_BASE_IDX 1
+#define mmMMEA0_ADDRDEC2_COL_SEL_HI_CS23 0x0328
+#define mmMMEA0_ADDRDEC2_COL_SEL_HI_CS23_BASE_IDX 1
+#define mmMMEA0_ADDRDEC2_RM_SEL_CS01 0x0329
+#define mmMMEA0_ADDRDEC2_RM_SEL_CS01_BASE_IDX 1
+#define mmMMEA0_ADDRDEC2_RM_SEL_CS23 0x032a
+#define mmMMEA0_ADDRDEC2_RM_SEL_CS23_BASE_IDX 1
+#define mmMMEA0_ADDRDEC2_RM_SEL_SECCS01 0x032b
+#define mmMMEA0_ADDRDEC2_RM_SEL_SECCS01_BASE_IDX 1
+#define mmMMEA0_ADDRDEC2_RM_SEL_SECCS23 0x032c
+#define mmMMEA0_ADDRDEC2_RM_SEL_SECCS23_BASE_IDX 1
+#define mmMMEA0_ADDRNORMDRAM_GLOBAL_CNTL 0x032d
+#define mmMMEA0_ADDRNORMDRAM_GLOBAL_CNTL_BASE_IDX 1
+#define mmMMEA0_ADDRNORMGMI_GLOBAL_CNTL 0x032e
+#define mmMMEA0_ADDRNORMGMI_GLOBAL_CNTL_BASE_IDX 1
+#define mmMMEA0_IO_RD_CLI2GRP_MAP0 0x0355
+#define mmMMEA0_IO_RD_CLI2GRP_MAP0_BASE_IDX 1
+#define mmMMEA0_IO_RD_CLI2GRP_MAP1 0x0356
+#define mmMMEA0_IO_RD_CLI2GRP_MAP1_BASE_IDX 1
+#define mmMMEA0_IO_WR_CLI2GRP_MAP0 0x0357
+#define mmMMEA0_IO_WR_CLI2GRP_MAP0_BASE_IDX 1
+#define mmMMEA0_IO_WR_CLI2GRP_MAP1 0x0358
+#define mmMMEA0_IO_WR_CLI2GRP_MAP1_BASE_IDX 1
+#define mmMMEA0_IO_RD_COMBINE_FLUSH 0x0359
+#define mmMMEA0_IO_RD_COMBINE_FLUSH_BASE_IDX 1
+#define mmMMEA0_IO_WR_COMBINE_FLUSH 0x035a
+#define mmMMEA0_IO_WR_COMBINE_FLUSH_BASE_IDX 1
+#define mmMMEA0_IO_GROUP_BURST 0x035b
+#define mmMMEA0_IO_GROUP_BURST_BASE_IDX 1
+#define mmMMEA0_IO_RD_PRI_AGE 0x035c
+#define mmMMEA0_IO_RD_PRI_AGE_BASE_IDX 1
+#define mmMMEA0_IO_WR_PRI_AGE 0x035d
+#define mmMMEA0_IO_WR_PRI_AGE_BASE_IDX 1
+#define mmMMEA0_IO_RD_PRI_QUEUING 0x035e
+#define mmMMEA0_IO_RD_PRI_QUEUING_BASE_IDX 1
+#define mmMMEA0_IO_WR_PRI_QUEUING 0x035f
+#define mmMMEA0_IO_WR_PRI_QUEUING_BASE_IDX 1
+#define mmMMEA0_IO_RD_PRI_FIXED 0x0360
+#define mmMMEA0_IO_RD_PRI_FIXED_BASE_IDX 1
+#define mmMMEA0_IO_WR_PRI_FIXED 0x0361
+#define mmMMEA0_IO_WR_PRI_FIXED_BASE_IDX 1
+#define mmMMEA0_IO_RD_PRI_URGENCY 0x0362
+#define mmMMEA0_IO_RD_PRI_URGENCY_BASE_IDX 1
+#define mmMMEA0_IO_WR_PRI_URGENCY 0x0363
+#define mmMMEA0_IO_WR_PRI_URGENCY_BASE_IDX 1
+#define mmMMEA0_IO_RD_PRI_URGENCY_MASKING 0x0364
+#define mmMMEA0_IO_RD_PRI_URGENCY_MASKING_BASE_IDX 1
+#define mmMMEA0_IO_WR_PRI_URGENCY_MASKING 0x0365
+#define mmMMEA0_IO_WR_PRI_URGENCY_MASKING_BASE_IDX 1
+#define mmMMEA0_IO_RD_PRI_QUANT_PRI1 0x0366
+#define mmMMEA0_IO_RD_PRI_QUANT_PRI1_BASE_IDX 1
+#define mmMMEA0_IO_RD_PRI_QUANT_PRI2 0x0367
+#define mmMMEA0_IO_RD_PRI_QUANT_PRI2_BASE_IDX 1
+#define mmMMEA0_IO_RD_PRI_QUANT_PRI3 0x0368
+#define mmMMEA0_IO_RD_PRI_QUANT_PRI3_BASE_IDX 1
+#define mmMMEA0_IO_WR_PRI_QUANT_PRI1 0x0369
+#define mmMMEA0_IO_WR_PRI_QUANT_PRI1_BASE_IDX 1
+#define mmMMEA0_IO_WR_PRI_QUANT_PRI2 0x036a
+#define mmMMEA0_IO_WR_PRI_QUANT_PRI2_BASE_IDX 1
+#define mmMMEA0_IO_WR_PRI_QUANT_PRI3 0x036b
+#define mmMMEA0_IO_WR_PRI_QUANT_PRI3_BASE_IDX 1
+#define mmMMEA0_SDP_ARB_DRAM 0x036c
+#define mmMMEA0_SDP_ARB_DRAM_BASE_IDX 1
+#define mmMMEA0_SDP_ARB_GMI 0x036d
+#define mmMMEA0_SDP_ARB_GMI_BASE_IDX 1
+#define mmMMEA0_SDP_ARB_FINAL 0x036e
+#define mmMMEA0_SDP_ARB_FINAL_BASE_IDX 1
+#define mmMMEA0_SDP_DRAM_PRIORITY 0x036f
+#define mmMMEA0_SDP_DRAM_PRIORITY_BASE_IDX 1
+#define mmMMEA0_SDP_GMI_PRIORITY 0x0370
+#define mmMMEA0_SDP_GMI_PRIORITY_BASE_IDX 1
+#define mmMMEA0_SDP_IO_PRIORITY 0x0371
+#define mmMMEA0_SDP_IO_PRIORITY_BASE_IDX 1
+#define mmMMEA0_SDP_CREDITS 0x0372
+#define mmMMEA0_SDP_CREDITS_BASE_IDX 1
+#define mmMMEA0_SDP_TAG_RESERVE0 0x0373
+#define mmMMEA0_SDP_TAG_RESERVE0_BASE_IDX 1
+#define mmMMEA0_SDP_TAG_RESERVE1 0x0374
+#define mmMMEA0_SDP_TAG_RESERVE1_BASE_IDX 1
+#define mmMMEA0_SDP_VCC_RESERVE0 0x0375
+#define mmMMEA0_SDP_VCC_RESERVE0_BASE_IDX 1
+#define mmMMEA0_SDP_VCC_RESERVE1 0x0376
+#define mmMMEA0_SDP_VCC_RESERVE1_BASE_IDX 1
+#define mmMMEA0_SDP_VCD_RESERVE0 0x0377
+#define mmMMEA0_SDP_VCD_RESERVE0_BASE_IDX 1
+#define mmMMEA0_SDP_VCD_RESERVE1 0x0378
+#define mmMMEA0_SDP_VCD_RESERVE1_BASE_IDX 1
+#define mmMMEA0_SDP_REQ_CNTL 0x0379
+#define mmMMEA0_SDP_REQ_CNTL_BASE_IDX 1
+#define mmMMEA0_MISC 0x037a
+#define mmMMEA0_MISC_BASE_IDX 1
+#define mmMMEA0_LATENCY_SAMPLING 0x037b
+#define mmMMEA0_LATENCY_SAMPLING_BASE_IDX 1
+#define mmMMEA0_PERFCOUNTER_LO 0x037c
+#define mmMMEA0_PERFCOUNTER_LO_BASE_IDX 1
+#define mmMMEA0_PERFCOUNTER_HI 0x037d
+#define mmMMEA0_PERFCOUNTER_HI_BASE_IDX 1
+#define mmMMEA0_PERFCOUNTER0_CFG 0x037e
+#define mmMMEA0_PERFCOUNTER0_CFG_BASE_IDX 1
+#define mmMMEA0_PERFCOUNTER1_CFG 0x037f
+#define mmMMEA0_PERFCOUNTER1_CFG_BASE_IDX 1
+#define mmMMEA0_PERFCOUNTER_RSLT_CNTL 0x0380
+#define mmMMEA0_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1
+#define mmMMEA0_EDC_CNT 0x0386
+#define mmMMEA0_EDC_CNT_BASE_IDX 1
+#define mmMMEA0_EDC_CNT2 0x0387
+#define mmMMEA0_EDC_CNT2_BASE_IDX 1
+#define mmMMEA0_DSM_CNTL 0x0388
+#define mmMMEA0_DSM_CNTL_BASE_IDX 1
+#define mmMMEA0_DSM_CNTLA 0x0389
+#define mmMMEA0_DSM_CNTLA_BASE_IDX 1
+#define mmMMEA0_DSM_CNTLB 0x038a
+#define mmMMEA0_DSM_CNTLB_BASE_IDX 1
+#define mmMMEA0_DSM_CNTL2 0x038b
+#define mmMMEA0_DSM_CNTL2_BASE_IDX 1
+#define mmMMEA0_DSM_CNTL2A 0x038c
+#define mmMMEA0_DSM_CNTL2A_BASE_IDX 1
+#define mmMMEA0_DSM_CNTL2B 0x038d
+#define mmMMEA0_DSM_CNTL2B_BASE_IDX 1
+#define mmMMEA0_CGTT_CLK_CTRL 0x038f
+#define mmMMEA0_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmMMEA0_EDC_MODE 0x0390
+#define mmMMEA0_EDC_MODE_BASE_IDX 1
+#define mmMMEA0_ERR_STATUS 0x0391
+#define mmMMEA0_ERR_STATUS_BASE_IDX 1
+#define mmMMEA0_MISC2 0x0392
+#define mmMMEA0_MISC2_BASE_IDX 1
+#define mmMMEA0_ADDRDEC_SELECT 0x0393
+#define mmMMEA0_ADDRDEC_SELECT_BASE_IDX 1
+#define mmMMEA0_EDC_CNT3 0x0394
+#define mmMMEA0_EDC_CNT3_BASE_IDX 1
+
+
+// addressBlock: mmhub_ea_mmeadec1
+// base address: 0x68f00
+#define mmMMEA1_DRAM_RD_CLI2GRP_MAP0 0x03c0
+#define mmMMEA1_DRAM_RD_CLI2GRP_MAP0_BASE_IDX 1
+#define mmMMEA1_DRAM_RD_CLI2GRP_MAP1 0x03c1
+#define mmMMEA1_DRAM_RD_CLI2GRP_MAP1_BASE_IDX 1
+#define mmMMEA1_DRAM_WR_CLI2GRP_MAP0 0x03c2
+#define mmMMEA1_DRAM_WR_CLI2GRP_MAP0_BASE_IDX 1
+#define mmMMEA1_DRAM_WR_CLI2GRP_MAP1 0x03c3
+#define mmMMEA1_DRAM_WR_CLI2GRP_MAP1_BASE_IDX 1
+#define mmMMEA1_DRAM_RD_GRP2VC_MAP 0x03c4
+#define mmMMEA1_DRAM_RD_GRP2VC_MAP_BASE_IDX 1
+#define mmMMEA1_DRAM_WR_GRP2VC_MAP 0x03c5
+#define mmMMEA1_DRAM_WR_GRP2VC_MAP_BASE_IDX 1
+#define mmMMEA1_DRAM_RD_LAZY 0x03c6
+#define mmMMEA1_DRAM_RD_LAZY_BASE_IDX 1
+#define mmMMEA1_DRAM_WR_LAZY 0x03c7
+#define mmMMEA1_DRAM_WR_LAZY_BASE_IDX 1
+#define mmMMEA1_DRAM_RD_CAM_CNTL 0x03c8
+#define mmMMEA1_DRAM_RD_CAM_CNTL_BASE_IDX 1
+#define mmMMEA1_DRAM_WR_CAM_CNTL 0x03c9
+#define mmMMEA1_DRAM_WR_CAM_CNTL_BASE_IDX 1
+#define mmMMEA1_DRAM_PAGE_BURST 0x03ca
+#define mmMMEA1_DRAM_PAGE_BURST_BASE_IDX 1
+#define mmMMEA1_DRAM_RD_PRI_AGE 0x03cb
+#define mmMMEA1_DRAM_RD_PRI_AGE_BASE_IDX 1
+#define mmMMEA1_DRAM_WR_PRI_AGE 0x03cc
+#define mmMMEA1_DRAM_WR_PRI_AGE_BASE_IDX 1
+#define mmMMEA1_DRAM_RD_PRI_QUEUING 0x03cd
+#define mmMMEA1_DRAM_RD_PRI_QUEUING_BASE_IDX 1
+#define mmMMEA1_DRAM_WR_PRI_QUEUING 0x03ce
+#define mmMMEA1_DRAM_WR_PRI_QUEUING_BASE_IDX 1
+#define mmMMEA1_DRAM_RD_PRI_FIXED 0x03cf
+#define mmMMEA1_DRAM_RD_PRI_FIXED_BASE_IDX 1
+#define mmMMEA1_DRAM_WR_PRI_FIXED 0x03d0
+#define mmMMEA1_DRAM_WR_PRI_FIXED_BASE_IDX 1
+#define mmMMEA1_DRAM_RD_PRI_URGENCY 0x03d1
+#define mmMMEA1_DRAM_RD_PRI_URGENCY_BASE_IDX 1
+#define mmMMEA1_DRAM_WR_PRI_URGENCY 0x03d2
+#define mmMMEA1_DRAM_WR_PRI_URGENCY_BASE_IDX 1
+#define mmMMEA1_DRAM_RD_PRI_QUANT_PRI1 0x03d3
+#define mmMMEA1_DRAM_RD_PRI_QUANT_PRI1_BASE_IDX 1
+#define mmMMEA1_DRAM_RD_PRI_QUANT_PRI2 0x03d4
+#define mmMMEA1_DRAM_RD_PRI_QUANT_PRI2_BASE_IDX 1
+#define mmMMEA1_DRAM_RD_PRI_QUANT_PRI3 0x03d5
+#define mmMMEA1_DRAM_RD_PRI_QUANT_PRI3_BASE_IDX 1
+#define mmMMEA1_DRAM_WR_PRI_QUANT_PRI1 0x03d6
+#define mmMMEA1_DRAM_WR_PRI_QUANT_PRI1_BASE_IDX 1
+#define mmMMEA1_DRAM_WR_PRI_QUANT_PRI2 0x03d7
+#define mmMMEA1_DRAM_WR_PRI_QUANT_PRI2_BASE_IDX 1
+#define mmMMEA1_DRAM_WR_PRI_QUANT_PRI3 0x03d8
+#define mmMMEA1_DRAM_WR_PRI_QUANT_PRI3_BASE_IDX 1
+#define mmMMEA1_GMI_RD_CLI2GRP_MAP0 0x03d9
+#define mmMMEA1_GMI_RD_CLI2GRP_MAP0_BASE_IDX 1
+#define mmMMEA1_GMI_RD_CLI2GRP_MAP1 0x03da
+#define mmMMEA1_GMI_RD_CLI2GRP_MAP1_BASE_IDX 1
+#define mmMMEA1_GMI_WR_CLI2GRP_MAP0 0x03db
+#define mmMMEA1_GMI_WR_CLI2GRP_MAP0_BASE_IDX 1
+#define mmMMEA1_GMI_WR_CLI2GRP_MAP1 0x03dc
+#define mmMMEA1_GMI_WR_CLI2GRP_MAP1_BASE_IDX 1
+#define mmMMEA1_GMI_RD_GRP2VC_MAP 0x03dd
+#define mmMMEA1_GMI_RD_GRP2VC_MAP_BASE_IDX 1
+#define mmMMEA1_GMI_WR_GRP2VC_MAP 0x03de
+#define mmMMEA1_GMI_WR_GRP2VC_MAP_BASE_IDX 1
+#define mmMMEA1_GMI_RD_LAZY 0x03df
+#define mmMMEA1_GMI_RD_LAZY_BASE_IDX 1
+#define mmMMEA1_GMI_WR_LAZY 0x03e0
+#define mmMMEA1_GMI_WR_LAZY_BASE_IDX 1
+#define mmMMEA1_GMI_RD_CAM_CNTL 0x03e1
+#define mmMMEA1_GMI_RD_CAM_CNTL_BASE_IDX 1
+#define mmMMEA1_GMI_WR_CAM_CNTL 0x03e2
+#define mmMMEA1_GMI_WR_CAM_CNTL_BASE_IDX 1
+#define mmMMEA1_GMI_PAGE_BURST 0x03e3
+#define mmMMEA1_GMI_PAGE_BURST_BASE_IDX 1
+#define mmMMEA1_GMI_RD_PRI_AGE 0x03e4
+#define mmMMEA1_GMI_RD_PRI_AGE_BASE_IDX 1
+#define mmMMEA1_GMI_WR_PRI_AGE 0x03e5
+#define mmMMEA1_GMI_WR_PRI_AGE_BASE_IDX 1
+#define mmMMEA1_GMI_RD_PRI_QUEUING 0x03e6
+#define mmMMEA1_GMI_RD_PRI_QUEUING_BASE_IDX 1
+#define mmMMEA1_GMI_WR_PRI_QUEUING 0x03e7
+#define mmMMEA1_GMI_WR_PRI_QUEUING_BASE_IDX 1
+#define mmMMEA1_GMI_RD_PRI_FIXED 0x03e8
+#define mmMMEA1_GMI_RD_PRI_FIXED_BASE_IDX 1
+#define mmMMEA1_GMI_WR_PRI_FIXED 0x03e9
+#define mmMMEA1_GMI_WR_PRI_FIXED_BASE_IDX 1
+#define mmMMEA1_GMI_RD_PRI_URGENCY 0x03ea
+#define mmMMEA1_GMI_RD_PRI_URGENCY_BASE_IDX 1
+#define mmMMEA1_GMI_WR_PRI_URGENCY 0x03eb
+#define mmMMEA1_GMI_WR_PRI_URGENCY_BASE_IDX 1
+#define mmMMEA1_GMI_RD_PRI_URGENCY_MASKING 0x03ec
+#define mmMMEA1_GMI_RD_PRI_URGENCY_MASKING_BASE_IDX 1
+#define mmMMEA1_GMI_WR_PRI_URGENCY_MASKING 0x03ed
+#define mmMMEA1_GMI_WR_PRI_URGENCY_MASKING_BASE_IDX 1
+#define mmMMEA1_GMI_RD_PRI_QUANT_PRI1 0x03ee
+#define mmMMEA1_GMI_RD_PRI_QUANT_PRI1_BASE_IDX 1
+#define mmMMEA1_GMI_RD_PRI_QUANT_PRI2 0x03ef
+#define mmMMEA1_GMI_RD_PRI_QUANT_PRI2_BASE_IDX 1
+#define mmMMEA1_GMI_RD_PRI_QUANT_PRI3 0x03f0
+#define mmMMEA1_GMI_RD_PRI_QUANT_PRI3_BASE_IDX 1
+#define mmMMEA1_GMI_WR_PRI_QUANT_PRI1 0x03f1
+#define mmMMEA1_GMI_WR_PRI_QUANT_PRI1_BASE_IDX 1
+#define mmMMEA1_GMI_WR_PRI_QUANT_PRI2 0x03f2
+#define mmMMEA1_GMI_WR_PRI_QUANT_PRI2_BASE_IDX 1
+#define mmMMEA1_GMI_WR_PRI_QUANT_PRI3 0x03f3
+#define mmMMEA1_GMI_WR_PRI_QUANT_PRI3_BASE_IDX 1
+#define mmMMEA1_ADDRNORM_BASE_ADDR0 0x03f4
+#define mmMMEA1_ADDRNORM_BASE_ADDR0_BASE_IDX 1
+#define mmMMEA1_ADDRNORM_LIMIT_ADDR0 0x03f5
+#define mmMMEA1_ADDRNORM_LIMIT_ADDR0_BASE_IDX 1
+#define mmMMEA1_ADDRNORM_BASE_ADDR1 0x03f6
+#define mmMMEA1_ADDRNORM_BASE_ADDR1_BASE_IDX 1
+#define mmMMEA1_ADDRNORM_LIMIT_ADDR1 0x03f7
+#define mmMMEA1_ADDRNORM_LIMIT_ADDR1_BASE_IDX 1
+#define mmMMEA1_ADDRNORM_OFFSET_ADDR1 0x03f8
+#define mmMMEA1_ADDRNORM_OFFSET_ADDR1_BASE_IDX 1
+#define mmMMEA1_ADDRNORM_BASE_ADDR2 0x03f9
+#define mmMMEA1_ADDRNORM_BASE_ADDR2_BASE_IDX 1
+#define mmMMEA1_ADDRNORM_LIMIT_ADDR2 0x03fa
+#define mmMMEA1_ADDRNORM_LIMIT_ADDR2_BASE_IDX 1
+#define mmMMEA1_ADDRNORM_BASE_ADDR3 0x03fb
+#define mmMMEA1_ADDRNORM_BASE_ADDR3_BASE_IDX 1
+#define mmMMEA1_ADDRNORM_LIMIT_ADDR3 0x03fc
+#define mmMMEA1_ADDRNORM_LIMIT_ADDR3_BASE_IDX 1
+#define mmMMEA1_ADDRNORM_OFFSET_ADDR3 0x03fd
+#define mmMMEA1_ADDRNORM_OFFSET_ADDR3_BASE_IDX 1
+#define mmMMEA1_ADDRNORM_BASE_ADDR4 0x03fe
+#define mmMMEA1_ADDRNORM_BASE_ADDR4_BASE_IDX 1
+#define mmMMEA1_ADDRNORM_LIMIT_ADDR4 0x03ff
+#define mmMMEA1_ADDRNORM_LIMIT_ADDR4_BASE_IDX 1
+#define mmMMEA1_ADDRNORM_BASE_ADDR5 0x0400
+#define mmMMEA1_ADDRNORM_BASE_ADDR5_BASE_IDX 1
+#define mmMMEA1_ADDRNORM_LIMIT_ADDR5 0x0401
+#define mmMMEA1_ADDRNORM_LIMIT_ADDR5_BASE_IDX 1
+#define mmMMEA1_ADDRNORM_OFFSET_ADDR5 0x0402
+#define mmMMEA1_ADDRNORM_OFFSET_ADDR5_BASE_IDX 1
+#define mmMMEA1_ADDRNORMDRAM_HOLE_CNTL 0x0403
+#define mmMMEA1_ADDRNORMDRAM_HOLE_CNTL_BASE_IDX 1
+#define mmMMEA1_ADDRNORMGMI_HOLE_CNTL 0x0404
+#define mmMMEA1_ADDRNORMGMI_HOLE_CNTL_BASE_IDX 1
+#define mmMMEA1_ADDRNORMDRAM_NP2_CHANNEL_CFG 0x0405
+#define mmMMEA1_ADDRNORMDRAM_NP2_CHANNEL_CFG_BASE_IDX 1
+#define mmMMEA1_ADDRNORMGMI_NP2_CHANNEL_CFG 0x0406
+#define mmMMEA1_ADDRNORMGMI_NP2_CHANNEL_CFG_BASE_IDX 1
+#define mmMMEA1_ADDRDEC_BANK_CFG 0x0407
+#define mmMMEA1_ADDRDEC_BANK_CFG_BASE_IDX 1
+#define mmMMEA1_ADDRDEC_MISC_CFG 0x0408
+#define mmMMEA1_ADDRDEC_MISC_CFG_BASE_IDX 1
+#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_BANK0 0x0409
+#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_BANK0_BASE_IDX 1
+#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_BANK1 0x040a
+#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_BANK1_BASE_IDX 1
+#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_BANK2 0x040b
+#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_BANK2_BASE_IDX 1
+#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_BANK3 0x040c
+#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_BANK3_BASE_IDX 1
+#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_BANK4 0x040d
+#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_BANK4_BASE_IDX 1
+#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_BANK5 0x040e
+#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_BANK5_BASE_IDX 1
+#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_PC 0x040f
+#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_PC_BASE_IDX 1
+#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_PC2 0x0410
+#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_PC2_BASE_IDX 1
+#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_CS0 0x0411
+#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_CS0_BASE_IDX 1
+#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_CS1 0x0412
+#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_CS1_BASE_IDX 1
+#define mmMMEA1_ADDRDECDRAM_HARVEST_ENABLE 0x0413
+#define mmMMEA1_ADDRDECDRAM_HARVEST_ENABLE_BASE_IDX 1
+#define mmMMEA1_ADDRDECGMI_ADDR_HASH_BANK0 0x0414
+#define mmMMEA1_ADDRDECGMI_ADDR_HASH_BANK0_BASE_IDX 1
+#define mmMMEA1_ADDRDECGMI_ADDR_HASH_BANK1 0x0415
+#define mmMMEA1_ADDRDECGMI_ADDR_HASH_BANK1_BASE_IDX 1
+#define mmMMEA1_ADDRDECGMI_ADDR_HASH_BANK2 0x0416
+#define mmMMEA1_ADDRDECGMI_ADDR_HASH_BANK2_BASE_IDX 1
+#define mmMMEA1_ADDRDECGMI_ADDR_HASH_BANK3 0x0417
+#define mmMMEA1_ADDRDECGMI_ADDR_HASH_BANK3_BASE_IDX 1
+#define mmMMEA1_ADDRDECGMI_ADDR_HASH_BANK4 0x0418
+#define mmMMEA1_ADDRDECGMI_ADDR_HASH_BANK4_BASE_IDX 1
+#define mmMMEA1_ADDRDECGMI_ADDR_HASH_BANK5 0x0419
+#define mmMMEA1_ADDRDECGMI_ADDR_HASH_BANK5_BASE_IDX 1
+#define mmMMEA1_ADDRDECGMI_ADDR_HASH_PC 0x041a
+#define mmMMEA1_ADDRDECGMI_ADDR_HASH_PC_BASE_IDX 1
+#define mmMMEA1_ADDRDECGMI_ADDR_HASH_PC2 0x041b
+#define mmMMEA1_ADDRDECGMI_ADDR_HASH_PC2_BASE_IDX 1
+#define mmMMEA1_ADDRDECGMI_ADDR_HASH_CS0 0x041c
+#define mmMMEA1_ADDRDECGMI_ADDR_HASH_CS0_BASE_IDX 1
+#define mmMMEA1_ADDRDECGMI_ADDR_HASH_CS1 0x041d
+#define mmMMEA1_ADDRDECGMI_ADDR_HASH_CS1_BASE_IDX 1
+#define mmMMEA1_ADDRDECGMI_HARVEST_ENABLE 0x041e
+#define mmMMEA1_ADDRDECGMI_HARVEST_ENABLE_BASE_IDX 1
+#define mmMMEA1_ADDRDEC0_BASE_ADDR_CS0 0x041f
+#define mmMMEA1_ADDRDEC0_BASE_ADDR_CS0_BASE_IDX 1
+#define mmMMEA1_ADDRDEC0_BASE_ADDR_CS1 0x0420
+#define mmMMEA1_ADDRDEC0_BASE_ADDR_CS1_BASE_IDX 1
+#define mmMMEA1_ADDRDEC0_BASE_ADDR_CS2 0x0421
+#define mmMMEA1_ADDRDEC0_BASE_ADDR_CS2_BASE_IDX 1
+#define mmMMEA1_ADDRDEC0_BASE_ADDR_CS3 0x0422
+#define mmMMEA1_ADDRDEC0_BASE_ADDR_CS3_BASE_IDX 1
+#define mmMMEA1_ADDRDEC0_BASE_ADDR_SECCS0 0x0423
+#define mmMMEA1_ADDRDEC0_BASE_ADDR_SECCS0_BASE_IDX 1
+#define mmMMEA1_ADDRDEC0_BASE_ADDR_SECCS1 0x0424
+#define mmMMEA1_ADDRDEC0_BASE_ADDR_SECCS1_BASE_IDX 1
+#define mmMMEA1_ADDRDEC0_BASE_ADDR_SECCS2 0x0425
+#define mmMMEA1_ADDRDEC0_BASE_ADDR_SECCS2_BASE_IDX 1
+#define mmMMEA1_ADDRDEC0_BASE_ADDR_SECCS3 0x0426
+#define mmMMEA1_ADDRDEC0_BASE_ADDR_SECCS3_BASE_IDX 1
+#define mmMMEA1_ADDRDEC0_ADDR_MASK_CS01 0x0427
+#define mmMMEA1_ADDRDEC0_ADDR_MASK_CS01_BASE_IDX 1
+#define mmMMEA1_ADDRDEC0_ADDR_MASK_CS23 0x0428
+#define mmMMEA1_ADDRDEC0_ADDR_MASK_CS23_BASE_IDX 1
+#define mmMMEA1_ADDRDEC0_ADDR_MASK_SECCS01 0x0429
+#define mmMMEA1_ADDRDEC0_ADDR_MASK_SECCS01_BASE_IDX 1
+#define mmMMEA1_ADDRDEC0_ADDR_MASK_SECCS23 0x042a
+#define mmMMEA1_ADDRDEC0_ADDR_MASK_SECCS23_BASE_IDX 1
+#define mmMMEA1_ADDRDEC0_ADDR_CFG_CS01 0x042b
+#define mmMMEA1_ADDRDEC0_ADDR_CFG_CS01_BASE_IDX 1
+#define mmMMEA1_ADDRDEC0_ADDR_CFG_CS23 0x042c
+#define mmMMEA1_ADDRDEC0_ADDR_CFG_CS23_BASE_IDX 1
+#define mmMMEA1_ADDRDEC0_ADDR_SEL_CS01 0x042d
+#define mmMMEA1_ADDRDEC0_ADDR_SEL_CS01_BASE_IDX 1
+#define mmMMEA1_ADDRDEC0_ADDR_SEL_CS23 0x042e
+#define mmMMEA1_ADDRDEC0_ADDR_SEL_CS23_BASE_IDX 1
+#define mmMMEA1_ADDRDEC0_ADDR_SEL2_CS01 0x042f
+#define mmMMEA1_ADDRDEC0_ADDR_SEL2_CS01_BASE_IDX 1
+#define mmMMEA1_ADDRDEC0_ADDR_SEL2_CS23 0x0430
+#define mmMMEA1_ADDRDEC0_ADDR_SEL2_CS23_BASE_IDX 1
+#define mmMMEA1_ADDRDEC0_COL_SEL_LO_CS01 0x0431
+#define mmMMEA1_ADDRDEC0_COL_SEL_LO_CS01_BASE_IDX 1
+#define mmMMEA1_ADDRDEC0_COL_SEL_LO_CS23 0x0432
+#define mmMMEA1_ADDRDEC0_COL_SEL_LO_CS23_BASE_IDX 1
+#define mmMMEA1_ADDRDEC0_COL_SEL_HI_CS01 0x0433
+#define mmMMEA1_ADDRDEC0_COL_SEL_HI_CS01_BASE_IDX 1
+#define mmMMEA1_ADDRDEC0_COL_SEL_HI_CS23 0x0434
+#define mmMMEA1_ADDRDEC0_COL_SEL_HI_CS23_BASE_IDX 1
+#define mmMMEA1_ADDRDEC0_RM_SEL_CS01 0x0435
+#define mmMMEA1_ADDRDEC0_RM_SEL_CS01_BASE_IDX 1
+#define mmMMEA1_ADDRDEC0_RM_SEL_CS23 0x0436
+#define mmMMEA1_ADDRDEC0_RM_SEL_CS23_BASE_IDX 1
+#define mmMMEA1_ADDRDEC0_RM_SEL_SECCS01 0x0437
+#define mmMMEA1_ADDRDEC0_RM_SEL_SECCS01_BASE_IDX 1
+#define mmMMEA1_ADDRDEC0_RM_SEL_SECCS23 0x0438
+#define mmMMEA1_ADDRDEC0_RM_SEL_SECCS23_BASE_IDX 1
+#define mmMMEA1_ADDRDEC1_BASE_ADDR_CS0 0x0439
+#define mmMMEA1_ADDRDEC1_BASE_ADDR_CS0_BASE_IDX 1
+#define mmMMEA1_ADDRDEC1_BASE_ADDR_CS1 0x043a
+#define mmMMEA1_ADDRDEC1_BASE_ADDR_CS1_BASE_IDX 1
+#define mmMMEA1_ADDRDEC1_BASE_ADDR_CS2 0x043b
+#define mmMMEA1_ADDRDEC1_BASE_ADDR_CS2_BASE_IDX 1
+#define mmMMEA1_ADDRDEC1_BASE_ADDR_CS3 0x043c
+#define mmMMEA1_ADDRDEC1_BASE_ADDR_CS3_BASE_IDX 1
+#define mmMMEA1_ADDRDEC1_BASE_ADDR_SECCS0 0x043d
+#define mmMMEA1_ADDRDEC1_BASE_ADDR_SECCS0_BASE_IDX 1
+#define mmMMEA1_ADDRDEC1_BASE_ADDR_SECCS1 0x043e
+#define mmMMEA1_ADDRDEC1_BASE_ADDR_SECCS1_BASE_IDX 1
+#define mmMMEA1_ADDRDEC1_BASE_ADDR_SECCS2 0x043f
+#define mmMMEA1_ADDRDEC1_BASE_ADDR_SECCS2_BASE_IDX 1
+#define mmMMEA1_ADDRDEC1_BASE_ADDR_SECCS3 0x0440
+#define mmMMEA1_ADDRDEC1_BASE_ADDR_SECCS3_BASE_IDX 1
+#define mmMMEA1_ADDRDEC1_ADDR_MASK_CS01 0x0441
+#define mmMMEA1_ADDRDEC1_ADDR_MASK_CS01_BASE_IDX 1
+#define mmMMEA1_ADDRDEC1_ADDR_MASK_CS23 0x0442
+#define mmMMEA1_ADDRDEC1_ADDR_MASK_CS23_BASE_IDX 1
+#define mmMMEA1_ADDRDEC1_ADDR_MASK_SECCS01 0x0443
+#define mmMMEA1_ADDRDEC1_ADDR_MASK_SECCS01_BASE_IDX 1
+#define mmMMEA1_ADDRDEC1_ADDR_MASK_SECCS23 0x0444
+#define mmMMEA1_ADDRDEC1_ADDR_MASK_SECCS23_BASE_IDX 1
+#define mmMMEA1_ADDRDEC1_ADDR_CFG_CS01 0x0445
+#define mmMMEA1_ADDRDEC1_ADDR_CFG_CS01_BASE_IDX 1
+#define mmMMEA1_ADDRDEC1_ADDR_CFG_CS23 0x0446
+#define mmMMEA1_ADDRDEC1_ADDR_CFG_CS23_BASE_IDX 1
+#define mmMMEA1_ADDRDEC1_ADDR_SEL_CS01 0x0447
+#define mmMMEA1_ADDRDEC1_ADDR_SEL_CS01_BASE_IDX 1
+#define mmMMEA1_ADDRDEC1_ADDR_SEL_CS23 0x0448
+#define mmMMEA1_ADDRDEC1_ADDR_SEL_CS23_BASE_IDX 1
+#define mmMMEA1_ADDRDEC1_ADDR_SEL2_CS01 0x0449
+#define mmMMEA1_ADDRDEC1_ADDR_SEL2_CS01_BASE_IDX 1
+#define mmMMEA1_ADDRDEC1_ADDR_SEL2_CS23 0x044a
+#define mmMMEA1_ADDRDEC1_ADDR_SEL2_CS23_BASE_IDX 1
+#define mmMMEA1_ADDRDEC1_COL_SEL_LO_CS01 0x044b
+#define mmMMEA1_ADDRDEC1_COL_SEL_LO_CS01_BASE_IDX 1
+#define mmMMEA1_ADDRDEC1_COL_SEL_LO_CS23 0x044c
+#define mmMMEA1_ADDRDEC1_COL_SEL_LO_CS23_BASE_IDX 1
+#define mmMMEA1_ADDRDEC1_COL_SEL_HI_CS01 0x044d
+#define mmMMEA1_ADDRDEC1_COL_SEL_HI_CS01_BASE_IDX 1
+#define mmMMEA1_ADDRDEC1_COL_SEL_HI_CS23 0x044e
+#define mmMMEA1_ADDRDEC1_COL_SEL_HI_CS23_BASE_IDX 1
+#define mmMMEA1_ADDRDEC1_RM_SEL_CS01 0x044f
+#define mmMMEA1_ADDRDEC1_RM_SEL_CS01_BASE_IDX 1
+#define mmMMEA1_ADDRDEC1_RM_SEL_CS23 0x0450
+#define mmMMEA1_ADDRDEC1_RM_SEL_CS23_BASE_IDX 1
+#define mmMMEA1_ADDRDEC1_RM_SEL_SECCS01 0x0451
+#define mmMMEA1_ADDRDEC1_RM_SEL_SECCS01_BASE_IDX 1
+#define mmMMEA1_ADDRDEC1_RM_SEL_SECCS23 0x0452
+#define mmMMEA1_ADDRDEC1_RM_SEL_SECCS23_BASE_IDX 1
+#define mmMMEA1_ADDRDEC2_BASE_ADDR_CS0 0x0453
+#define mmMMEA1_ADDRDEC2_BASE_ADDR_CS0_BASE_IDX 1
+#define mmMMEA1_ADDRDEC2_BASE_ADDR_CS1 0x0454
+#define mmMMEA1_ADDRDEC2_BASE_ADDR_CS1_BASE_IDX 1
+#define mmMMEA1_ADDRDEC2_BASE_ADDR_CS2 0x0455
+#define mmMMEA1_ADDRDEC2_BASE_ADDR_CS2_BASE_IDX 1
+#define mmMMEA1_ADDRDEC2_BASE_ADDR_CS3 0x0456
+#define mmMMEA1_ADDRDEC2_BASE_ADDR_CS3_BASE_IDX 1
+#define mmMMEA1_ADDRDEC2_BASE_ADDR_SECCS0 0x0457
+#define mmMMEA1_ADDRDEC2_BASE_ADDR_SECCS0_BASE_IDX 1
+#define mmMMEA1_ADDRDEC2_BASE_ADDR_SECCS1 0x0458
+#define mmMMEA1_ADDRDEC2_BASE_ADDR_SECCS1_BASE_IDX 1
+#define mmMMEA1_ADDRDEC2_BASE_ADDR_SECCS2 0x0459
+#define mmMMEA1_ADDRDEC2_BASE_ADDR_SECCS2_BASE_IDX 1
+#define mmMMEA1_ADDRDEC2_BASE_ADDR_SECCS3 0x045a
+#define mmMMEA1_ADDRDEC2_BASE_ADDR_SECCS3_BASE_IDX 1
+#define mmMMEA1_ADDRDEC2_ADDR_MASK_CS01 0x045b
+#define mmMMEA1_ADDRDEC2_ADDR_MASK_CS01_BASE_IDX 1
+#define mmMMEA1_ADDRDEC2_ADDR_MASK_CS23 0x045c
+#define mmMMEA1_ADDRDEC2_ADDR_MASK_CS23_BASE_IDX 1
+#define mmMMEA1_ADDRDEC2_ADDR_MASK_SECCS01 0x045d
+#define mmMMEA1_ADDRDEC2_ADDR_MASK_SECCS01_BASE_IDX 1
+#define mmMMEA1_ADDRDEC2_ADDR_MASK_SECCS23 0x045e
+#define mmMMEA1_ADDRDEC2_ADDR_MASK_SECCS23_BASE_IDX 1
+#define mmMMEA1_ADDRDEC2_ADDR_CFG_CS01 0x045f
+#define mmMMEA1_ADDRDEC2_ADDR_CFG_CS01_BASE_IDX 1
+#define mmMMEA1_ADDRDEC2_ADDR_CFG_CS23 0x0460
+#define mmMMEA1_ADDRDEC2_ADDR_CFG_CS23_BASE_IDX 1
+#define mmMMEA1_ADDRDEC2_ADDR_SEL_CS01 0x0461
+#define mmMMEA1_ADDRDEC2_ADDR_SEL_CS01_BASE_IDX 1
+#define mmMMEA1_ADDRDEC2_ADDR_SEL_CS23 0x0462
+#define mmMMEA1_ADDRDEC2_ADDR_SEL_CS23_BASE_IDX 1
+#define mmMMEA1_ADDRDEC2_ADDR_SEL2_CS01 0x0463
+#define mmMMEA1_ADDRDEC2_ADDR_SEL2_CS01_BASE_IDX 1
+#define mmMMEA1_ADDRDEC2_ADDR_SEL2_CS23 0x0464
+#define mmMMEA1_ADDRDEC2_ADDR_SEL2_CS23_BASE_IDX 1
+#define mmMMEA1_ADDRDEC2_COL_SEL_LO_CS01 0x0465
+#define mmMMEA1_ADDRDEC2_COL_SEL_LO_CS01_BASE_IDX 1
+#define mmMMEA1_ADDRDEC2_COL_SEL_LO_CS23 0x0466
+#define mmMMEA1_ADDRDEC2_COL_SEL_LO_CS23_BASE_IDX 1
+#define mmMMEA1_ADDRDEC2_COL_SEL_HI_CS01 0x0467
+#define mmMMEA1_ADDRDEC2_COL_SEL_HI_CS01_BASE_IDX 1
+#define mmMMEA1_ADDRDEC2_COL_SEL_HI_CS23 0x0468
+#define mmMMEA1_ADDRDEC2_COL_SEL_HI_CS23_BASE_IDX 1
+#define mmMMEA1_ADDRDEC2_RM_SEL_CS01 0x0469
+#define mmMMEA1_ADDRDEC2_RM_SEL_CS01_BASE_IDX 1
+#define mmMMEA1_ADDRDEC2_RM_SEL_CS23 0x046a
+#define mmMMEA1_ADDRDEC2_RM_SEL_CS23_BASE_IDX 1
+#define mmMMEA1_ADDRDEC2_RM_SEL_SECCS01 0x046b
+#define mmMMEA1_ADDRDEC2_RM_SEL_SECCS01_BASE_IDX 1
+#define mmMMEA1_ADDRDEC2_RM_SEL_SECCS23 0x046c
+#define mmMMEA1_ADDRDEC2_RM_SEL_SECCS23_BASE_IDX 1
+#define mmMMEA1_ADDRNORMDRAM_GLOBAL_CNTL 0x046d
+#define mmMMEA1_ADDRNORMDRAM_GLOBAL_CNTL_BASE_IDX 1
+#define mmMMEA1_ADDRNORMGMI_GLOBAL_CNTL 0x046e
+#define mmMMEA1_ADDRNORMGMI_GLOBAL_CNTL_BASE_IDX 1
+#define mmMMEA1_IO_RD_CLI2GRP_MAP0 0x0495
+#define mmMMEA1_IO_RD_CLI2GRP_MAP0_BASE_IDX 1
+#define mmMMEA1_IO_RD_CLI2GRP_MAP1 0x0496
+#define mmMMEA1_IO_RD_CLI2GRP_MAP1_BASE_IDX 1
+#define mmMMEA1_IO_WR_CLI2GRP_MAP0 0x0497
+#define mmMMEA1_IO_WR_CLI2GRP_MAP0_BASE_IDX 1
+#define mmMMEA1_IO_WR_CLI2GRP_MAP1 0x0498
+#define mmMMEA1_IO_WR_CLI2GRP_MAP1_BASE_IDX 1
+#define mmMMEA1_IO_RD_COMBINE_FLUSH 0x0499
+#define mmMMEA1_IO_RD_COMBINE_FLUSH_BASE_IDX 1
+#define mmMMEA1_IO_WR_COMBINE_FLUSH 0x049a
+#define mmMMEA1_IO_WR_COMBINE_FLUSH_BASE_IDX 1
+#define mmMMEA1_IO_GROUP_BURST 0x049b
+#define mmMMEA1_IO_GROUP_BURST_BASE_IDX 1
+#define mmMMEA1_IO_RD_PRI_AGE 0x049c
+#define mmMMEA1_IO_RD_PRI_AGE_BASE_IDX 1
+#define mmMMEA1_IO_WR_PRI_AGE 0x049d
+#define mmMMEA1_IO_WR_PRI_AGE_BASE_IDX 1
+#define mmMMEA1_IO_RD_PRI_QUEUING 0x049e
+#define mmMMEA1_IO_RD_PRI_QUEUING_BASE_IDX 1
+#define mmMMEA1_IO_WR_PRI_QUEUING 0x049f
+#define mmMMEA1_IO_WR_PRI_QUEUING_BASE_IDX 1
+#define mmMMEA1_IO_RD_PRI_FIXED 0x04a0
+#define mmMMEA1_IO_RD_PRI_FIXED_BASE_IDX 1
+#define mmMMEA1_IO_WR_PRI_FIXED 0x04a1
+#define mmMMEA1_IO_WR_PRI_FIXED_BASE_IDX 1
+#define mmMMEA1_IO_RD_PRI_URGENCY 0x04a2
+#define mmMMEA1_IO_RD_PRI_URGENCY_BASE_IDX 1
+#define mmMMEA1_IO_WR_PRI_URGENCY 0x04a3
+#define mmMMEA1_IO_WR_PRI_URGENCY_BASE_IDX 1
+#define mmMMEA1_IO_RD_PRI_URGENCY_MASKING 0x04a4
+#define mmMMEA1_IO_RD_PRI_URGENCY_MASKING_BASE_IDX 1
+#define mmMMEA1_IO_WR_PRI_URGENCY_MASKING 0x04a5
+#define mmMMEA1_IO_WR_PRI_URGENCY_MASKING_BASE_IDX 1
+#define mmMMEA1_IO_RD_PRI_QUANT_PRI1 0x04a6
+#define mmMMEA1_IO_RD_PRI_QUANT_PRI1_BASE_IDX 1
+#define mmMMEA1_IO_RD_PRI_QUANT_PRI2 0x04a7
+#define mmMMEA1_IO_RD_PRI_QUANT_PRI2_BASE_IDX 1
+#define mmMMEA1_IO_RD_PRI_QUANT_PRI3 0x04a8
+#define mmMMEA1_IO_RD_PRI_QUANT_PRI3_BASE_IDX 1
+#define mmMMEA1_IO_WR_PRI_QUANT_PRI1 0x04a9
+#define mmMMEA1_IO_WR_PRI_QUANT_PRI1_BASE_IDX 1
+#define mmMMEA1_IO_WR_PRI_QUANT_PRI2 0x04aa
+#define mmMMEA1_IO_WR_PRI_QUANT_PRI2_BASE_IDX 1
+#define mmMMEA1_IO_WR_PRI_QUANT_PRI3 0x04ab
+#define mmMMEA1_IO_WR_PRI_QUANT_PRI3_BASE_IDX 1
+#define mmMMEA1_SDP_ARB_DRAM 0x04ac
+#define mmMMEA1_SDP_ARB_DRAM_BASE_IDX 1
+#define mmMMEA1_SDP_ARB_GMI 0x04ad
+#define mmMMEA1_SDP_ARB_GMI_BASE_IDX 1
+#define mmMMEA1_SDP_ARB_FINAL 0x04ae
+#define mmMMEA1_SDP_ARB_FINAL_BASE_IDX 1
+#define mmMMEA1_SDP_DRAM_PRIORITY 0x04af
+#define mmMMEA1_SDP_DRAM_PRIORITY_BASE_IDX 1
+#define mmMMEA1_SDP_GMI_PRIORITY 0x04b0
+#define mmMMEA1_SDP_GMI_PRIORITY_BASE_IDX 1
+#define mmMMEA1_SDP_IO_PRIORITY 0x04b1
+#define mmMMEA1_SDP_IO_PRIORITY_BASE_IDX 1
+#define mmMMEA1_SDP_CREDITS 0x04b2
+#define mmMMEA1_SDP_CREDITS_BASE_IDX 1
+#define mmMMEA1_SDP_TAG_RESERVE0 0x04b3
+#define mmMMEA1_SDP_TAG_RESERVE0_BASE_IDX 1
+#define mmMMEA1_SDP_TAG_RESERVE1 0x04b4
+#define mmMMEA1_SDP_TAG_RESERVE1_BASE_IDX 1
+#define mmMMEA1_SDP_VCC_RESERVE0 0x04b5
+#define mmMMEA1_SDP_VCC_RESERVE0_BASE_IDX 1
+#define mmMMEA1_SDP_VCC_RESERVE1 0x04b6
+#define mmMMEA1_SDP_VCC_RESERVE1_BASE_IDX 1
+#define mmMMEA1_SDP_VCD_RESERVE0 0x04b7
+#define mmMMEA1_SDP_VCD_RESERVE0_BASE_IDX 1
+#define mmMMEA1_SDP_VCD_RESERVE1 0x04b8
+#define mmMMEA1_SDP_VCD_RESERVE1_BASE_IDX 1
+#define mmMMEA1_SDP_REQ_CNTL 0x04b9
+#define mmMMEA1_SDP_REQ_CNTL_BASE_IDX 1
+#define mmMMEA1_MISC 0x04ba
+#define mmMMEA1_MISC_BASE_IDX 1
+#define mmMMEA1_LATENCY_SAMPLING 0x04bb
+#define mmMMEA1_LATENCY_SAMPLING_BASE_IDX 1
+#define mmMMEA1_PERFCOUNTER_LO 0x04bc
+#define mmMMEA1_PERFCOUNTER_LO_BASE_IDX 1
+#define mmMMEA1_PERFCOUNTER_HI 0x04bd
+#define mmMMEA1_PERFCOUNTER_HI_BASE_IDX 1
+#define mmMMEA1_PERFCOUNTER0_CFG 0x04be
+#define mmMMEA1_PERFCOUNTER0_CFG_BASE_IDX 1
+#define mmMMEA1_PERFCOUNTER1_CFG 0x04bf
+#define mmMMEA1_PERFCOUNTER1_CFG_BASE_IDX 1
+#define mmMMEA1_PERFCOUNTER_RSLT_CNTL 0x04c0
+#define mmMMEA1_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1
+#define mmMMEA1_EDC_CNT 0x04c6
+#define mmMMEA1_EDC_CNT_BASE_IDX 1
+#define mmMMEA1_EDC_CNT2 0x04c7
+#define mmMMEA1_EDC_CNT2_BASE_IDX 1
+#define mmMMEA1_DSM_CNTL 0x04c8
+#define mmMMEA1_DSM_CNTL_BASE_IDX 1
+#define mmMMEA1_DSM_CNTLA 0x04c9
+#define mmMMEA1_DSM_CNTLA_BASE_IDX 1
+#define mmMMEA1_DSM_CNTLB 0x04ca
+#define mmMMEA1_DSM_CNTLB_BASE_IDX 1
+#define mmMMEA1_DSM_CNTL2 0x04cb
+#define mmMMEA1_DSM_CNTL2_BASE_IDX 1
+#define mmMMEA1_DSM_CNTL2A 0x04cc
+#define mmMMEA1_DSM_CNTL2A_BASE_IDX 1
+#define mmMMEA1_DSM_CNTL2B 0x04cd
+#define mmMMEA1_DSM_CNTL2B_BASE_IDX 1
+#define mmMMEA1_CGTT_CLK_CTRL 0x04cf
+#define mmMMEA1_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmMMEA1_EDC_MODE 0x04d0
+#define mmMMEA1_EDC_MODE_BASE_IDX 1
+#define mmMMEA1_ERR_STATUS 0x04d1
+#define mmMMEA1_ERR_STATUS_BASE_IDX 1
+#define mmMMEA1_MISC2 0x04d2
+#define mmMMEA1_MISC2_BASE_IDX 1
+#define mmMMEA1_ADDRDEC_SELECT 0x04d3
+#define mmMMEA1_ADDRDEC_SELECT_BASE_IDX 1
+#define mmMMEA1_EDC_CNT3 0x04d4
+#define mmMMEA1_EDC_CNT3_BASE_IDX 1
+
+
+// addressBlock: mmhub_ea_mmeadec2
+// base address: 0x69400
+#define mmMMEA2_DRAM_RD_CLI2GRP_MAP0 0x0500
+#define mmMMEA2_DRAM_RD_CLI2GRP_MAP0_BASE_IDX 1
+#define mmMMEA2_DRAM_RD_CLI2GRP_MAP1 0x0501
+#define mmMMEA2_DRAM_RD_CLI2GRP_MAP1_BASE_IDX 1
+#define mmMMEA2_DRAM_WR_CLI2GRP_MAP0 0x0502
+#define mmMMEA2_DRAM_WR_CLI2GRP_MAP0_BASE_IDX 1
+#define mmMMEA2_DRAM_WR_CLI2GRP_MAP1 0x0503
+#define mmMMEA2_DRAM_WR_CLI2GRP_MAP1_BASE_IDX 1
+#define mmMMEA2_DRAM_RD_GRP2VC_MAP 0x0504
+#define mmMMEA2_DRAM_RD_GRP2VC_MAP_BASE_IDX 1
+#define mmMMEA2_DRAM_WR_GRP2VC_MAP 0x0505
+#define mmMMEA2_DRAM_WR_GRP2VC_MAP_BASE_IDX 1
+#define mmMMEA2_DRAM_RD_LAZY 0x0506
+#define mmMMEA2_DRAM_RD_LAZY_BASE_IDX 1
+#define mmMMEA2_DRAM_WR_LAZY 0x0507
+#define mmMMEA2_DRAM_WR_LAZY_BASE_IDX 1
+#define mmMMEA2_DRAM_RD_CAM_CNTL 0x0508
+#define mmMMEA2_DRAM_RD_CAM_CNTL_BASE_IDX 1
+#define mmMMEA2_DRAM_WR_CAM_CNTL 0x0509
+#define mmMMEA2_DRAM_WR_CAM_CNTL_BASE_IDX 1
+#define mmMMEA2_DRAM_PAGE_BURST 0x050a
+#define mmMMEA2_DRAM_PAGE_BURST_BASE_IDX 1
+#define mmMMEA2_DRAM_RD_PRI_AGE 0x050b
+#define mmMMEA2_DRAM_RD_PRI_AGE_BASE_IDX 1
+#define mmMMEA2_DRAM_WR_PRI_AGE 0x050c
+#define mmMMEA2_DRAM_WR_PRI_AGE_BASE_IDX 1
+#define mmMMEA2_DRAM_RD_PRI_QUEUING 0x050d
+#define mmMMEA2_DRAM_RD_PRI_QUEUING_BASE_IDX 1
+#define mmMMEA2_DRAM_WR_PRI_QUEUING 0x050e
+#define mmMMEA2_DRAM_WR_PRI_QUEUING_BASE_IDX 1
+#define mmMMEA2_DRAM_RD_PRI_FIXED 0x050f
+#define mmMMEA2_DRAM_RD_PRI_FIXED_BASE_IDX 1
+#define mmMMEA2_DRAM_WR_PRI_FIXED 0x0510
+#define mmMMEA2_DRAM_WR_PRI_FIXED_BASE_IDX 1
+#define mmMMEA2_DRAM_RD_PRI_URGENCY 0x0511
+#define mmMMEA2_DRAM_RD_PRI_URGENCY_BASE_IDX 1
+#define mmMMEA2_DRAM_WR_PRI_URGENCY 0x0512
+#define mmMMEA2_DRAM_WR_PRI_URGENCY_BASE_IDX 1
+#define mmMMEA2_DRAM_RD_PRI_QUANT_PRI1 0x0513
+#define mmMMEA2_DRAM_RD_PRI_QUANT_PRI1_BASE_IDX 1
+#define mmMMEA2_DRAM_RD_PRI_QUANT_PRI2 0x0514
+#define mmMMEA2_DRAM_RD_PRI_QUANT_PRI2_BASE_IDX 1
+#define mmMMEA2_DRAM_RD_PRI_QUANT_PRI3 0x0515
+#define mmMMEA2_DRAM_RD_PRI_QUANT_PRI3_BASE_IDX 1
+#define mmMMEA2_DRAM_WR_PRI_QUANT_PRI1 0x0516
+#define mmMMEA2_DRAM_WR_PRI_QUANT_PRI1_BASE_IDX 1
+#define mmMMEA2_DRAM_WR_PRI_QUANT_PRI2 0x0517
+#define mmMMEA2_DRAM_WR_PRI_QUANT_PRI2_BASE_IDX 1
+#define mmMMEA2_DRAM_WR_PRI_QUANT_PRI3 0x0518
+#define mmMMEA2_DRAM_WR_PRI_QUANT_PRI3_BASE_IDX 1
+#define mmMMEA2_GMI_RD_CLI2GRP_MAP0 0x0519
+#define mmMMEA2_GMI_RD_CLI2GRP_MAP0_BASE_IDX 1
+#define mmMMEA2_GMI_RD_CLI2GRP_MAP1 0x051a
+#define mmMMEA2_GMI_RD_CLI2GRP_MAP1_BASE_IDX 1
+#define mmMMEA2_GMI_WR_CLI2GRP_MAP0 0x051b
+#define mmMMEA2_GMI_WR_CLI2GRP_MAP0_BASE_IDX 1
+#define mmMMEA2_GMI_WR_CLI2GRP_MAP1 0x051c
+#define mmMMEA2_GMI_WR_CLI2GRP_MAP1_BASE_IDX 1
+#define mmMMEA2_GMI_RD_GRP2VC_MAP 0x051d
+#define mmMMEA2_GMI_RD_GRP2VC_MAP_BASE_IDX 1
+#define mmMMEA2_GMI_WR_GRP2VC_MAP 0x051e
+#define mmMMEA2_GMI_WR_GRP2VC_MAP_BASE_IDX 1
+#define mmMMEA2_GMI_RD_LAZY 0x051f
+#define mmMMEA2_GMI_RD_LAZY_BASE_IDX 1
+#define mmMMEA2_GMI_WR_LAZY 0x0520
+#define mmMMEA2_GMI_WR_LAZY_BASE_IDX 1
+#define mmMMEA2_GMI_RD_CAM_CNTL 0x0521
+#define mmMMEA2_GMI_RD_CAM_CNTL_BASE_IDX 1
+#define mmMMEA2_GMI_WR_CAM_CNTL 0x0522
+#define mmMMEA2_GMI_WR_CAM_CNTL_BASE_IDX 1
+#define mmMMEA2_GMI_PAGE_BURST 0x0523
+#define mmMMEA2_GMI_PAGE_BURST_BASE_IDX 1
+#define mmMMEA2_GMI_RD_PRI_AGE 0x0524
+#define mmMMEA2_GMI_RD_PRI_AGE_BASE_IDX 1
+#define mmMMEA2_GMI_WR_PRI_AGE 0x0525
+#define mmMMEA2_GMI_WR_PRI_AGE_BASE_IDX 1
+#define mmMMEA2_GMI_RD_PRI_QUEUING 0x0526
+#define mmMMEA2_GMI_RD_PRI_QUEUING_BASE_IDX 1
+#define mmMMEA2_GMI_WR_PRI_QUEUING 0x0527
+#define mmMMEA2_GMI_WR_PRI_QUEUING_BASE_IDX 1
+#define mmMMEA2_GMI_RD_PRI_FIXED 0x0528
+#define mmMMEA2_GMI_RD_PRI_FIXED_BASE_IDX 1
+#define mmMMEA2_GMI_WR_PRI_FIXED 0x0529
+#define mmMMEA2_GMI_WR_PRI_FIXED_BASE_IDX 1
+#define mmMMEA2_GMI_RD_PRI_URGENCY 0x052a
+#define mmMMEA2_GMI_RD_PRI_URGENCY_BASE_IDX 1
+#define mmMMEA2_GMI_WR_PRI_URGENCY 0x052b
+#define mmMMEA2_GMI_WR_PRI_URGENCY_BASE_IDX 1
+#define mmMMEA2_GMI_RD_PRI_URGENCY_MASKING 0x052c
+#define mmMMEA2_GMI_RD_PRI_URGENCY_MASKING_BASE_IDX 1
+#define mmMMEA2_GMI_WR_PRI_URGENCY_MASKING 0x052d
+#define mmMMEA2_GMI_WR_PRI_URGENCY_MASKING_BASE_IDX 1
+#define mmMMEA2_GMI_RD_PRI_QUANT_PRI1 0x052e
+#define mmMMEA2_GMI_RD_PRI_QUANT_PRI1_BASE_IDX 1
+#define mmMMEA2_GMI_RD_PRI_QUANT_PRI2 0x052f
+#define mmMMEA2_GMI_RD_PRI_QUANT_PRI2_BASE_IDX 1
+#define mmMMEA2_GMI_RD_PRI_QUANT_PRI3 0x0530
+#define mmMMEA2_GMI_RD_PRI_QUANT_PRI3_BASE_IDX 1
+#define mmMMEA2_GMI_WR_PRI_QUANT_PRI1 0x0531
+#define mmMMEA2_GMI_WR_PRI_QUANT_PRI1_BASE_IDX 1
+#define mmMMEA2_GMI_WR_PRI_QUANT_PRI2 0x0532
+#define mmMMEA2_GMI_WR_PRI_QUANT_PRI2_BASE_IDX 1
+#define mmMMEA2_GMI_WR_PRI_QUANT_PRI3 0x0533
+#define mmMMEA2_GMI_WR_PRI_QUANT_PRI3_BASE_IDX 1
+#define mmMMEA2_ADDRNORM_BASE_ADDR0 0x0534
+#define mmMMEA2_ADDRNORM_BASE_ADDR0_BASE_IDX 1
+#define mmMMEA2_ADDRNORM_LIMIT_ADDR0 0x0535
+#define mmMMEA2_ADDRNORM_LIMIT_ADDR0_BASE_IDX 1
+#define mmMMEA2_ADDRNORM_BASE_ADDR1 0x0536
+#define mmMMEA2_ADDRNORM_BASE_ADDR1_BASE_IDX 1
+#define mmMMEA2_ADDRNORM_LIMIT_ADDR1 0x0537
+#define mmMMEA2_ADDRNORM_LIMIT_ADDR1_BASE_IDX 1
+#define mmMMEA2_ADDRNORM_OFFSET_ADDR1 0x0538
+#define mmMMEA2_ADDRNORM_OFFSET_ADDR1_BASE_IDX 1
+#define mmMMEA2_ADDRNORM_BASE_ADDR2 0x0539
+#define mmMMEA2_ADDRNORM_BASE_ADDR2_BASE_IDX 1
+#define mmMMEA2_ADDRNORM_LIMIT_ADDR2 0x053a
+#define mmMMEA2_ADDRNORM_LIMIT_ADDR2_BASE_IDX 1
+#define mmMMEA2_ADDRNORM_BASE_ADDR3 0x053b
+#define mmMMEA2_ADDRNORM_BASE_ADDR3_BASE_IDX 1
+#define mmMMEA2_ADDRNORM_LIMIT_ADDR3 0x053c
+#define mmMMEA2_ADDRNORM_LIMIT_ADDR3_BASE_IDX 1
+#define mmMMEA2_ADDRNORM_OFFSET_ADDR3 0x053d
+#define mmMMEA2_ADDRNORM_OFFSET_ADDR3_BASE_IDX 1
+#define mmMMEA2_ADDRNORM_BASE_ADDR4 0x053e
+#define mmMMEA2_ADDRNORM_BASE_ADDR4_BASE_IDX 1
+#define mmMMEA2_ADDRNORM_LIMIT_ADDR4 0x053f
+#define mmMMEA2_ADDRNORM_LIMIT_ADDR4_BASE_IDX 1
+#define mmMMEA2_ADDRNORM_BASE_ADDR5 0x0540
+#define mmMMEA2_ADDRNORM_BASE_ADDR5_BASE_IDX 1
+#define mmMMEA2_ADDRNORM_LIMIT_ADDR5 0x0541
+#define mmMMEA2_ADDRNORM_LIMIT_ADDR5_BASE_IDX 1
+#define mmMMEA2_ADDRNORM_OFFSET_ADDR5 0x0542
+#define mmMMEA2_ADDRNORM_OFFSET_ADDR5_BASE_IDX 1
+#define mmMMEA2_ADDRNORMDRAM_HOLE_CNTL 0x0543
+#define mmMMEA2_ADDRNORMDRAM_HOLE_CNTL_BASE_IDX 1
+#define mmMMEA2_ADDRNORMGMI_HOLE_CNTL 0x0544
+#define mmMMEA2_ADDRNORMGMI_HOLE_CNTL_BASE_IDX 1
+#define mmMMEA2_ADDRNORMDRAM_NP2_CHANNEL_CFG 0x0545
+#define mmMMEA2_ADDRNORMDRAM_NP2_CHANNEL_CFG_BASE_IDX 1
+#define mmMMEA2_ADDRNORMGMI_NP2_CHANNEL_CFG 0x0546
+#define mmMMEA2_ADDRNORMGMI_NP2_CHANNEL_CFG_BASE_IDX 1
+#define mmMMEA2_ADDRDEC_BANK_CFG 0x0547
+#define mmMMEA2_ADDRDEC_BANK_CFG_BASE_IDX 1
+#define mmMMEA2_ADDRDEC_MISC_CFG 0x0548
+#define mmMMEA2_ADDRDEC_MISC_CFG_BASE_IDX 1
+#define mmMMEA2_ADDRDECDRAM_ADDR_HASH_BANK0 0x0549
+#define mmMMEA2_ADDRDECDRAM_ADDR_HASH_BANK0_BASE_IDX 1
+#define mmMMEA2_ADDRDECDRAM_ADDR_HASH_BANK1 0x054a
+#define mmMMEA2_ADDRDECDRAM_ADDR_HASH_BANK1_BASE_IDX 1
+#define mmMMEA2_ADDRDECDRAM_ADDR_HASH_BANK2 0x054b
+#define mmMMEA2_ADDRDECDRAM_ADDR_HASH_BANK2_BASE_IDX 1
+#define mmMMEA2_ADDRDECDRAM_ADDR_HASH_BANK3 0x054c
+#define mmMMEA2_ADDRDECDRAM_ADDR_HASH_BANK3_BASE_IDX 1
+#define mmMMEA2_ADDRDECDRAM_ADDR_HASH_BANK4 0x054d
+#define mmMMEA2_ADDRDECDRAM_ADDR_HASH_BANK4_BASE_IDX 1
+#define mmMMEA2_ADDRDECDRAM_ADDR_HASH_BANK5 0x054e
+#define mmMMEA2_ADDRDECDRAM_ADDR_HASH_BANK5_BASE_IDX 1
+#define mmMMEA2_ADDRDECDRAM_ADDR_HASH_PC 0x054f
+#define mmMMEA2_ADDRDECDRAM_ADDR_HASH_PC_BASE_IDX 1
+#define mmMMEA2_ADDRDECDRAM_ADDR_HASH_PC2 0x0550
+#define mmMMEA2_ADDRDECDRAM_ADDR_HASH_PC2_BASE_IDX 1
+#define mmMMEA2_ADDRDECDRAM_ADDR_HASH_CS0 0x0551
+#define mmMMEA2_ADDRDECDRAM_ADDR_HASH_CS0_BASE_IDX 1
+#define mmMMEA2_ADDRDECDRAM_ADDR_HASH_CS1 0x0552
+#define mmMMEA2_ADDRDECDRAM_ADDR_HASH_CS1_BASE_IDX 1
+#define mmMMEA2_ADDRDECDRAM_HARVEST_ENABLE 0x0553
+#define mmMMEA2_ADDRDECDRAM_HARVEST_ENABLE_BASE_IDX 1
+#define mmMMEA2_ADDRDECGMI_ADDR_HASH_BANK0 0x0554
+#define mmMMEA2_ADDRDECGMI_ADDR_HASH_BANK0_BASE_IDX 1
+#define mmMMEA2_ADDRDECGMI_ADDR_HASH_BANK1 0x0555
+#define mmMMEA2_ADDRDECGMI_ADDR_HASH_BANK1_BASE_IDX 1
+#define mmMMEA2_ADDRDECGMI_ADDR_HASH_BANK2 0x0556
+#define mmMMEA2_ADDRDECGMI_ADDR_HASH_BANK2_BASE_IDX 1
+#define mmMMEA2_ADDRDECGMI_ADDR_HASH_BANK3 0x0557
+#define mmMMEA2_ADDRDECGMI_ADDR_HASH_BANK3_BASE_IDX 1
+#define mmMMEA2_ADDRDECGMI_ADDR_HASH_BANK4 0x0558
+#define mmMMEA2_ADDRDECGMI_ADDR_HASH_BANK4_BASE_IDX 1
+#define mmMMEA2_ADDRDECGMI_ADDR_HASH_BANK5 0x0559
+#define mmMMEA2_ADDRDECGMI_ADDR_HASH_BANK5_BASE_IDX 1
+#define mmMMEA2_ADDRDECGMI_ADDR_HASH_PC 0x055a
+#define mmMMEA2_ADDRDECGMI_ADDR_HASH_PC_BASE_IDX 1
+#define mmMMEA2_ADDRDECGMI_ADDR_HASH_PC2 0x055b
+#define mmMMEA2_ADDRDECGMI_ADDR_HASH_PC2_BASE_IDX 1
+#define mmMMEA2_ADDRDECGMI_ADDR_HASH_CS0 0x055c
+#define mmMMEA2_ADDRDECGMI_ADDR_HASH_CS0_BASE_IDX 1
+#define mmMMEA2_ADDRDECGMI_ADDR_HASH_CS1 0x055d
+#define mmMMEA2_ADDRDECGMI_ADDR_HASH_CS1_BASE_IDX 1
+#define mmMMEA2_ADDRDECGMI_HARVEST_ENABLE 0x055e
+#define mmMMEA2_ADDRDECGMI_HARVEST_ENABLE_BASE_IDX 1
+#define mmMMEA2_ADDRDEC0_BASE_ADDR_CS0 0x055f
+#define mmMMEA2_ADDRDEC0_BASE_ADDR_CS0_BASE_IDX 1
+#define mmMMEA2_ADDRDEC0_BASE_ADDR_CS1 0x0560
+#define mmMMEA2_ADDRDEC0_BASE_ADDR_CS1_BASE_IDX 1
+#define mmMMEA2_ADDRDEC0_BASE_ADDR_CS2 0x0561
+#define mmMMEA2_ADDRDEC0_BASE_ADDR_CS2_BASE_IDX 1
+#define mmMMEA2_ADDRDEC0_BASE_ADDR_CS3 0x0562
+#define mmMMEA2_ADDRDEC0_BASE_ADDR_CS3_BASE_IDX 1
+#define mmMMEA2_ADDRDEC0_BASE_ADDR_SECCS0 0x0563
+#define mmMMEA2_ADDRDEC0_BASE_ADDR_SECCS0_BASE_IDX 1
+#define mmMMEA2_ADDRDEC0_BASE_ADDR_SECCS1 0x0564
+#define mmMMEA2_ADDRDEC0_BASE_ADDR_SECCS1_BASE_IDX 1
+#define mmMMEA2_ADDRDEC0_BASE_ADDR_SECCS2 0x0565
+#define mmMMEA2_ADDRDEC0_BASE_ADDR_SECCS2_BASE_IDX 1
+#define mmMMEA2_ADDRDEC0_BASE_ADDR_SECCS3 0x0566
+#define mmMMEA2_ADDRDEC0_BASE_ADDR_SECCS3_BASE_IDX 1
+#define mmMMEA2_ADDRDEC0_ADDR_MASK_CS01 0x0567
+#define mmMMEA2_ADDRDEC0_ADDR_MASK_CS01_BASE_IDX 1
+#define mmMMEA2_ADDRDEC0_ADDR_MASK_CS23 0x0568
+#define mmMMEA2_ADDRDEC0_ADDR_MASK_CS23_BASE_IDX 1
+#define mmMMEA2_ADDRDEC0_ADDR_MASK_SECCS01 0x0569
+#define mmMMEA2_ADDRDEC0_ADDR_MASK_SECCS01_BASE_IDX 1
+#define mmMMEA2_ADDRDEC0_ADDR_MASK_SECCS23 0x056a
+#define mmMMEA2_ADDRDEC0_ADDR_MASK_SECCS23_BASE_IDX 1
+#define mmMMEA2_ADDRDEC0_ADDR_CFG_CS01 0x056b
+#define mmMMEA2_ADDRDEC0_ADDR_CFG_CS01_BASE_IDX 1
+#define mmMMEA2_ADDRDEC0_ADDR_CFG_CS23 0x056c
+#define mmMMEA2_ADDRDEC0_ADDR_CFG_CS23_BASE_IDX 1
+#define mmMMEA2_ADDRDEC0_ADDR_SEL_CS01 0x056d
+#define mmMMEA2_ADDRDEC0_ADDR_SEL_CS01_BASE_IDX 1
+#define mmMMEA2_ADDRDEC0_ADDR_SEL_CS23 0x056e
+#define mmMMEA2_ADDRDEC0_ADDR_SEL_CS23_BASE_IDX 1
+#define mmMMEA2_ADDRDEC0_ADDR_SEL2_CS01 0x056f
+#define mmMMEA2_ADDRDEC0_ADDR_SEL2_CS01_BASE_IDX 1
+#define mmMMEA2_ADDRDEC0_ADDR_SEL2_CS23 0x0570
+#define mmMMEA2_ADDRDEC0_ADDR_SEL2_CS23_BASE_IDX 1
+#define mmMMEA2_ADDRDEC0_COL_SEL_LO_CS01 0x0571
+#define mmMMEA2_ADDRDEC0_COL_SEL_LO_CS01_BASE_IDX 1
+#define mmMMEA2_ADDRDEC0_COL_SEL_LO_CS23 0x0572
+#define mmMMEA2_ADDRDEC0_COL_SEL_LO_CS23_BASE_IDX 1
+#define mmMMEA2_ADDRDEC0_COL_SEL_HI_CS01 0x0573
+#define mmMMEA2_ADDRDEC0_COL_SEL_HI_CS01_BASE_IDX 1
+#define mmMMEA2_ADDRDEC0_COL_SEL_HI_CS23 0x0574
+#define mmMMEA2_ADDRDEC0_COL_SEL_HI_CS23_BASE_IDX 1
+#define mmMMEA2_ADDRDEC0_RM_SEL_CS01 0x0575
+#define mmMMEA2_ADDRDEC0_RM_SEL_CS01_BASE_IDX 1
+#define mmMMEA2_ADDRDEC0_RM_SEL_CS23 0x0576
+#define mmMMEA2_ADDRDEC0_RM_SEL_CS23_BASE_IDX 1
+#define mmMMEA2_ADDRDEC0_RM_SEL_SECCS01 0x0577
+#define mmMMEA2_ADDRDEC0_RM_SEL_SECCS01_BASE_IDX 1
+#define mmMMEA2_ADDRDEC0_RM_SEL_SECCS23 0x0578
+#define mmMMEA2_ADDRDEC0_RM_SEL_SECCS23_BASE_IDX 1
+#define mmMMEA2_ADDRDEC1_BASE_ADDR_CS0 0x0579
+#define mmMMEA2_ADDRDEC1_BASE_ADDR_CS0_BASE_IDX 1
+#define mmMMEA2_ADDRDEC1_BASE_ADDR_CS1 0x057a
+#define mmMMEA2_ADDRDEC1_BASE_ADDR_CS1_BASE_IDX 1
+#define mmMMEA2_ADDRDEC1_BASE_ADDR_CS2 0x057b
+#define mmMMEA2_ADDRDEC1_BASE_ADDR_CS2_BASE_IDX 1
+#define mmMMEA2_ADDRDEC1_BASE_ADDR_CS3 0x057c
+#define mmMMEA2_ADDRDEC1_BASE_ADDR_CS3_BASE_IDX 1
+#define mmMMEA2_ADDRDEC1_BASE_ADDR_SECCS0 0x057d
+#define mmMMEA2_ADDRDEC1_BASE_ADDR_SECCS0_BASE_IDX 1
+#define mmMMEA2_ADDRDEC1_BASE_ADDR_SECCS1 0x057e
+#define mmMMEA2_ADDRDEC1_BASE_ADDR_SECCS1_BASE_IDX 1
+#define mmMMEA2_ADDRDEC1_BASE_ADDR_SECCS2 0x057f
+#define mmMMEA2_ADDRDEC1_BASE_ADDR_SECCS2_BASE_IDX 1
+#define mmMMEA2_ADDRDEC1_BASE_ADDR_SECCS3 0x0580
+#define mmMMEA2_ADDRDEC1_BASE_ADDR_SECCS3_BASE_IDX 1
+#define mmMMEA2_ADDRDEC1_ADDR_MASK_CS01 0x0581
+#define mmMMEA2_ADDRDEC1_ADDR_MASK_CS01_BASE_IDX 1
+#define mmMMEA2_ADDRDEC1_ADDR_MASK_CS23 0x0582
+#define mmMMEA2_ADDRDEC1_ADDR_MASK_CS23_BASE_IDX 1
+#define mmMMEA2_ADDRDEC1_ADDR_MASK_SECCS01 0x0583
+#define mmMMEA2_ADDRDEC1_ADDR_MASK_SECCS01_BASE_IDX 1
+#define mmMMEA2_ADDRDEC1_ADDR_MASK_SECCS23 0x0584
+#define mmMMEA2_ADDRDEC1_ADDR_MASK_SECCS23_BASE_IDX 1
+#define mmMMEA2_ADDRDEC1_ADDR_CFG_CS01 0x0585
+#define mmMMEA2_ADDRDEC1_ADDR_CFG_CS01_BASE_IDX 1
+#define mmMMEA2_ADDRDEC1_ADDR_CFG_CS23 0x0586
+#define mmMMEA2_ADDRDEC1_ADDR_CFG_CS23_BASE_IDX 1
+#define mmMMEA2_ADDRDEC1_ADDR_SEL_CS01 0x0587
+#define mmMMEA2_ADDRDEC1_ADDR_SEL_CS01_BASE_IDX 1
+#define mmMMEA2_ADDRDEC1_ADDR_SEL_CS23 0x0588
+#define mmMMEA2_ADDRDEC1_ADDR_SEL_CS23_BASE_IDX 1
+#define mmMMEA2_ADDRDEC1_ADDR_SEL2_CS01 0x0589
+#define mmMMEA2_ADDRDEC1_ADDR_SEL2_CS01_BASE_IDX 1
+#define mmMMEA2_ADDRDEC1_ADDR_SEL2_CS23 0x058a
+#define mmMMEA2_ADDRDEC1_ADDR_SEL2_CS23_BASE_IDX 1
+#define mmMMEA2_ADDRDEC1_COL_SEL_LO_CS01 0x058b
+#define mmMMEA2_ADDRDEC1_COL_SEL_LO_CS01_BASE_IDX 1
+#define mmMMEA2_ADDRDEC1_COL_SEL_LO_CS23 0x058c
+#define mmMMEA2_ADDRDEC1_COL_SEL_LO_CS23_BASE_IDX 1
+#define mmMMEA2_ADDRDEC1_COL_SEL_HI_CS01 0x058d
+#define mmMMEA2_ADDRDEC1_COL_SEL_HI_CS01_BASE_IDX 1
+#define mmMMEA2_ADDRDEC1_COL_SEL_HI_CS23 0x058e
+#define mmMMEA2_ADDRDEC1_COL_SEL_HI_CS23_BASE_IDX 1
+#define mmMMEA2_ADDRDEC1_RM_SEL_CS01 0x058f
+#define mmMMEA2_ADDRDEC1_RM_SEL_CS01_BASE_IDX 1
+#define mmMMEA2_ADDRDEC1_RM_SEL_CS23 0x0590
+#define mmMMEA2_ADDRDEC1_RM_SEL_CS23_BASE_IDX 1
+#define mmMMEA2_ADDRDEC1_RM_SEL_SECCS01 0x0591
+#define mmMMEA2_ADDRDEC1_RM_SEL_SECCS01_BASE_IDX 1
+#define mmMMEA2_ADDRDEC1_RM_SEL_SECCS23 0x0592
+#define mmMMEA2_ADDRDEC1_RM_SEL_SECCS23_BASE_IDX 1
+#define mmMMEA2_ADDRDEC2_BASE_ADDR_CS0 0x0593
+#define mmMMEA2_ADDRDEC2_BASE_ADDR_CS0_BASE_IDX 1
+#define mmMMEA2_ADDRDEC2_BASE_ADDR_CS1 0x0594
+#define mmMMEA2_ADDRDEC2_BASE_ADDR_CS1_BASE_IDX 1
+#define mmMMEA2_ADDRDEC2_BASE_ADDR_CS2 0x0595
+#define mmMMEA2_ADDRDEC2_BASE_ADDR_CS2_BASE_IDX 1
+#define mmMMEA2_ADDRDEC2_BASE_ADDR_CS3 0x0596
+#define mmMMEA2_ADDRDEC2_BASE_ADDR_CS3_BASE_IDX 1
+#define mmMMEA2_ADDRDEC2_BASE_ADDR_SECCS0 0x0597
+#define mmMMEA2_ADDRDEC2_BASE_ADDR_SECCS0_BASE_IDX 1
+#define mmMMEA2_ADDRDEC2_BASE_ADDR_SECCS1 0x0598
+#define mmMMEA2_ADDRDEC2_BASE_ADDR_SECCS1_BASE_IDX 1
+#define mmMMEA2_ADDRDEC2_BASE_ADDR_SECCS2 0x0599
+#define mmMMEA2_ADDRDEC2_BASE_ADDR_SECCS2_BASE_IDX 1
+#define mmMMEA2_ADDRDEC2_BASE_ADDR_SECCS3 0x059a
+#define mmMMEA2_ADDRDEC2_BASE_ADDR_SECCS3_BASE_IDX 1
+#define mmMMEA2_ADDRDEC2_ADDR_MASK_CS01 0x059b
+#define mmMMEA2_ADDRDEC2_ADDR_MASK_CS01_BASE_IDX 1
+#define mmMMEA2_ADDRDEC2_ADDR_MASK_CS23 0x059c
+#define mmMMEA2_ADDRDEC2_ADDR_MASK_CS23_BASE_IDX 1
+#define mmMMEA2_ADDRDEC2_ADDR_MASK_SECCS01 0x059d
+#define mmMMEA2_ADDRDEC2_ADDR_MASK_SECCS01_BASE_IDX 1
+#define mmMMEA2_ADDRDEC2_ADDR_MASK_SECCS23 0x059e
+#define mmMMEA2_ADDRDEC2_ADDR_MASK_SECCS23_BASE_IDX 1
+#define mmMMEA2_ADDRDEC2_ADDR_CFG_CS01 0x059f
+#define mmMMEA2_ADDRDEC2_ADDR_CFG_CS01_BASE_IDX 1
+#define mmMMEA2_ADDRDEC2_ADDR_CFG_CS23 0x05a0
+#define mmMMEA2_ADDRDEC2_ADDR_CFG_CS23_BASE_IDX 1
+#define mmMMEA2_ADDRDEC2_ADDR_SEL_CS01 0x05a1
+#define mmMMEA2_ADDRDEC2_ADDR_SEL_CS01_BASE_IDX 1
+#define mmMMEA2_ADDRDEC2_ADDR_SEL_CS23 0x05a2
+#define mmMMEA2_ADDRDEC2_ADDR_SEL_CS23_BASE_IDX 1
+#define mmMMEA2_ADDRDEC2_ADDR_SEL2_CS01 0x05a3
+#define mmMMEA2_ADDRDEC2_ADDR_SEL2_CS01_BASE_IDX 1
+#define mmMMEA2_ADDRDEC2_ADDR_SEL2_CS23 0x05a4
+#define mmMMEA2_ADDRDEC2_ADDR_SEL2_CS23_BASE_IDX 1
+#define mmMMEA2_ADDRDEC2_COL_SEL_LO_CS01 0x05a5
+#define mmMMEA2_ADDRDEC2_COL_SEL_LO_CS01_BASE_IDX 1
+#define mmMMEA2_ADDRDEC2_COL_SEL_LO_CS23 0x05a6
+#define mmMMEA2_ADDRDEC2_COL_SEL_LO_CS23_BASE_IDX 1
+#define mmMMEA2_ADDRDEC2_COL_SEL_HI_CS01 0x05a7
+#define mmMMEA2_ADDRDEC2_COL_SEL_HI_CS01_BASE_IDX 1
+#define mmMMEA2_ADDRDEC2_COL_SEL_HI_CS23 0x05a8
+#define mmMMEA2_ADDRDEC2_COL_SEL_HI_CS23_BASE_IDX 1
+#define mmMMEA2_ADDRDEC2_RM_SEL_CS01 0x05a9
+#define mmMMEA2_ADDRDEC2_RM_SEL_CS01_BASE_IDX 1
+#define mmMMEA2_ADDRDEC2_RM_SEL_CS23 0x05aa
+#define mmMMEA2_ADDRDEC2_RM_SEL_CS23_BASE_IDX 1
+#define mmMMEA2_ADDRDEC2_RM_SEL_SECCS01 0x05ab
+#define mmMMEA2_ADDRDEC2_RM_SEL_SECCS01_BASE_IDX 1
+#define mmMMEA2_ADDRDEC2_RM_SEL_SECCS23 0x05ac
+#define mmMMEA2_ADDRDEC2_RM_SEL_SECCS23_BASE_IDX 1
+#define mmMMEA2_ADDRNORMDRAM_GLOBAL_CNTL 0x05ad
+#define mmMMEA2_ADDRNORMDRAM_GLOBAL_CNTL_BASE_IDX 1
+#define mmMMEA2_ADDRNORMGMI_GLOBAL_CNTL 0x05ae
+#define mmMMEA2_ADDRNORMGMI_GLOBAL_CNTL_BASE_IDX 1
+#define mmMMEA2_IO_RD_CLI2GRP_MAP0 0x05d5
+#define mmMMEA2_IO_RD_CLI2GRP_MAP0_BASE_IDX 1
+#define mmMMEA2_IO_RD_CLI2GRP_MAP1 0x05d6
+#define mmMMEA2_IO_RD_CLI2GRP_MAP1_BASE_IDX 1
+#define mmMMEA2_IO_WR_CLI2GRP_MAP0 0x05d7
+#define mmMMEA2_IO_WR_CLI2GRP_MAP0_BASE_IDX 1
+#define mmMMEA2_IO_WR_CLI2GRP_MAP1 0x05d8
+#define mmMMEA2_IO_WR_CLI2GRP_MAP1_BASE_IDX 1
+#define mmMMEA2_IO_RD_COMBINE_FLUSH 0x05d9
+#define mmMMEA2_IO_RD_COMBINE_FLUSH_BASE_IDX 1
+#define mmMMEA2_IO_WR_COMBINE_FLUSH 0x05da
+#define mmMMEA2_IO_WR_COMBINE_FLUSH_BASE_IDX 1
+#define mmMMEA2_IO_GROUP_BURST 0x05db
+#define mmMMEA2_IO_GROUP_BURST_BASE_IDX 1
+#define mmMMEA2_IO_RD_PRI_AGE 0x05dc
+#define mmMMEA2_IO_RD_PRI_AGE_BASE_IDX 1
+#define mmMMEA2_IO_WR_PRI_AGE 0x05dd
+#define mmMMEA2_IO_WR_PRI_AGE_BASE_IDX 1
+#define mmMMEA2_IO_RD_PRI_QUEUING 0x05de
+#define mmMMEA2_IO_RD_PRI_QUEUING_BASE_IDX 1
+#define mmMMEA2_IO_WR_PRI_QUEUING 0x05df
+#define mmMMEA2_IO_WR_PRI_QUEUING_BASE_IDX 1
+#define mmMMEA2_IO_RD_PRI_FIXED 0x05e0
+#define mmMMEA2_IO_RD_PRI_FIXED_BASE_IDX 1
+#define mmMMEA2_IO_WR_PRI_FIXED 0x05e1
+#define mmMMEA2_IO_WR_PRI_FIXED_BASE_IDX 1
+#define mmMMEA2_IO_RD_PRI_URGENCY 0x05e2
+#define mmMMEA2_IO_RD_PRI_URGENCY_BASE_IDX 1
+#define mmMMEA2_IO_WR_PRI_URGENCY 0x05e3
+#define mmMMEA2_IO_WR_PRI_URGENCY_BASE_IDX 1
+#define mmMMEA2_IO_RD_PRI_URGENCY_MASKING 0x05e4
+#define mmMMEA2_IO_RD_PRI_URGENCY_MASKING_BASE_IDX 1
+#define mmMMEA2_IO_WR_PRI_URGENCY_MASKING 0x05e5
+#define mmMMEA2_IO_WR_PRI_URGENCY_MASKING_BASE_IDX 1
+#define mmMMEA2_IO_RD_PRI_QUANT_PRI1 0x05e6
+#define mmMMEA2_IO_RD_PRI_QUANT_PRI1_BASE_IDX 1
+#define mmMMEA2_IO_RD_PRI_QUANT_PRI2 0x05e7
+#define mmMMEA2_IO_RD_PRI_QUANT_PRI2_BASE_IDX 1
+#define mmMMEA2_IO_RD_PRI_QUANT_PRI3 0x05e8
+#define mmMMEA2_IO_RD_PRI_QUANT_PRI3_BASE_IDX 1
+#define mmMMEA2_IO_WR_PRI_QUANT_PRI1 0x05e9
+#define mmMMEA2_IO_WR_PRI_QUANT_PRI1_BASE_IDX 1
+#define mmMMEA2_IO_WR_PRI_QUANT_PRI2 0x05ea
+#define mmMMEA2_IO_WR_PRI_QUANT_PRI2_BASE_IDX 1
+#define mmMMEA2_IO_WR_PRI_QUANT_PRI3 0x05eb
+#define mmMMEA2_IO_WR_PRI_QUANT_PRI3_BASE_IDX 1
+#define mmMMEA2_SDP_ARB_DRAM 0x05ec
+#define mmMMEA2_SDP_ARB_DRAM_BASE_IDX 1
+#define mmMMEA2_SDP_ARB_GMI 0x05ed
+#define mmMMEA2_SDP_ARB_GMI_BASE_IDX 1
+#define mmMMEA2_SDP_ARB_FINAL 0x05ee
+#define mmMMEA2_SDP_ARB_FINAL_BASE_IDX 1
+#define mmMMEA2_SDP_DRAM_PRIORITY 0x05ef
+#define mmMMEA2_SDP_DRAM_PRIORITY_BASE_IDX 1
+#define mmMMEA2_SDP_GMI_PRIORITY 0x05f0
+#define mmMMEA2_SDP_GMI_PRIORITY_BASE_IDX 1
+#define mmMMEA2_SDP_IO_PRIORITY 0x05f1
+#define mmMMEA2_SDP_IO_PRIORITY_BASE_IDX 1
+#define mmMMEA2_SDP_CREDITS 0x05f2
+#define mmMMEA2_SDP_CREDITS_BASE_IDX 1
+#define mmMMEA2_SDP_TAG_RESERVE0 0x05f3
+#define mmMMEA2_SDP_TAG_RESERVE0_BASE_IDX 1
+#define mmMMEA2_SDP_TAG_RESERVE1 0x05f4
+#define mmMMEA2_SDP_TAG_RESERVE1_BASE_IDX 1
+#define mmMMEA2_SDP_VCC_RESERVE0 0x05f5
+#define mmMMEA2_SDP_VCC_RESERVE0_BASE_IDX 1
+#define mmMMEA2_SDP_VCC_RESERVE1 0x05f6
+#define mmMMEA2_SDP_VCC_RESERVE1_BASE_IDX 1
+#define mmMMEA2_SDP_VCD_RESERVE0 0x05f7
+#define mmMMEA2_SDP_VCD_RESERVE0_BASE_IDX 1
+#define mmMMEA2_SDP_VCD_RESERVE1 0x05f8
+#define mmMMEA2_SDP_VCD_RESERVE1_BASE_IDX 1
+#define mmMMEA2_SDP_REQ_CNTL 0x05f9
+#define mmMMEA2_SDP_REQ_CNTL_BASE_IDX 1
+#define mmMMEA2_MISC 0x05fa
+#define mmMMEA2_MISC_BASE_IDX 1
+#define mmMMEA2_LATENCY_SAMPLING 0x05fb
+#define mmMMEA2_LATENCY_SAMPLING_BASE_IDX 1
+#define mmMMEA2_PERFCOUNTER_LO 0x05fc
+#define mmMMEA2_PERFCOUNTER_LO_BASE_IDX 1
+#define mmMMEA2_PERFCOUNTER_HI 0x05fd
+#define mmMMEA2_PERFCOUNTER_HI_BASE_IDX 1
+#define mmMMEA2_PERFCOUNTER0_CFG 0x05fe
+#define mmMMEA2_PERFCOUNTER0_CFG_BASE_IDX 1
+#define mmMMEA2_PERFCOUNTER1_CFG 0x05ff
+#define mmMMEA2_PERFCOUNTER1_CFG_BASE_IDX 1
+#define mmMMEA2_PERFCOUNTER_RSLT_CNTL 0x0600
+#define mmMMEA2_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1
+#define mmMMEA2_EDC_CNT 0x0606
+#define mmMMEA2_EDC_CNT_BASE_IDX 1
+#define mmMMEA2_EDC_CNT2 0x0607
+#define mmMMEA2_EDC_CNT2_BASE_IDX 1
+#define mmMMEA2_DSM_CNTL 0x0608
+#define mmMMEA2_DSM_CNTL_BASE_IDX 1
+#define mmMMEA2_DSM_CNTLA 0x0609
+#define mmMMEA2_DSM_CNTLA_BASE_IDX 1
+#define mmMMEA2_DSM_CNTLB 0x060a
+#define mmMMEA2_DSM_CNTLB_BASE_IDX 1
+#define mmMMEA2_DSM_CNTL2 0x060b
+#define mmMMEA2_DSM_CNTL2_BASE_IDX 1
+#define mmMMEA2_DSM_CNTL2A 0x060c
+#define mmMMEA2_DSM_CNTL2A_BASE_IDX 1
+#define mmMMEA2_DSM_CNTL2B 0x060d
+#define mmMMEA2_DSM_CNTL2B_BASE_IDX 1
+#define mmMMEA2_CGTT_CLK_CTRL 0x060f
+#define mmMMEA2_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmMMEA2_EDC_MODE 0x0610
+#define mmMMEA2_EDC_MODE_BASE_IDX 1
+#define mmMMEA2_ERR_STATUS 0x0611
+#define mmMMEA2_ERR_STATUS_BASE_IDX 1
+#define mmMMEA2_MISC2 0x0612
+#define mmMMEA2_MISC2_BASE_IDX 1
+#define mmMMEA2_ADDRDEC_SELECT 0x0613
+#define mmMMEA2_ADDRDEC_SELECT_BASE_IDX 1
+#define mmMMEA2_EDC_CNT3 0x0614
+#define mmMMEA2_EDC_CNT3_BASE_IDX 1
+
+
+// addressBlock: mmhub_ea_mmeadec3
+// base address: 0x69900
+#define mmMMEA3_DRAM_RD_CLI2GRP_MAP0 0x0640
+#define mmMMEA3_DRAM_RD_CLI2GRP_MAP0_BASE_IDX 1
+#define mmMMEA3_DRAM_RD_CLI2GRP_MAP1 0x0641
+#define mmMMEA3_DRAM_RD_CLI2GRP_MAP1_BASE_IDX 1
+#define mmMMEA3_DRAM_WR_CLI2GRP_MAP0 0x0642
+#define mmMMEA3_DRAM_WR_CLI2GRP_MAP0_BASE_IDX 1
+#define mmMMEA3_DRAM_WR_CLI2GRP_MAP1 0x0643
+#define mmMMEA3_DRAM_WR_CLI2GRP_MAP1_BASE_IDX 1
+#define mmMMEA3_DRAM_RD_GRP2VC_MAP 0x0644
+#define mmMMEA3_DRAM_RD_GRP2VC_MAP_BASE_IDX 1
+#define mmMMEA3_DRAM_WR_GRP2VC_MAP 0x0645
+#define mmMMEA3_DRAM_WR_GRP2VC_MAP_BASE_IDX 1
+#define mmMMEA3_DRAM_RD_LAZY 0x0646
+#define mmMMEA3_DRAM_RD_LAZY_BASE_IDX 1
+#define mmMMEA3_DRAM_WR_LAZY 0x0647
+#define mmMMEA3_DRAM_WR_LAZY_BASE_IDX 1
+#define mmMMEA3_DRAM_RD_CAM_CNTL 0x0648
+#define mmMMEA3_DRAM_RD_CAM_CNTL_BASE_IDX 1
+#define mmMMEA3_DRAM_WR_CAM_CNTL 0x0649
+#define mmMMEA3_DRAM_WR_CAM_CNTL_BASE_IDX 1
+#define mmMMEA3_DRAM_PAGE_BURST 0x064a
+#define mmMMEA3_DRAM_PAGE_BURST_BASE_IDX 1
+#define mmMMEA3_DRAM_RD_PRI_AGE 0x064b
+#define mmMMEA3_DRAM_RD_PRI_AGE_BASE_IDX 1
+#define mmMMEA3_DRAM_WR_PRI_AGE 0x064c
+#define mmMMEA3_DRAM_WR_PRI_AGE_BASE_IDX 1
+#define mmMMEA3_DRAM_RD_PRI_QUEUING 0x064d
+#define mmMMEA3_DRAM_RD_PRI_QUEUING_BASE_IDX 1
+#define mmMMEA3_DRAM_WR_PRI_QUEUING 0x064e
+#define mmMMEA3_DRAM_WR_PRI_QUEUING_BASE_IDX 1
+#define mmMMEA3_DRAM_RD_PRI_FIXED 0x064f
+#define mmMMEA3_DRAM_RD_PRI_FIXED_BASE_IDX 1
+#define mmMMEA3_DRAM_WR_PRI_FIXED 0x0650
+#define mmMMEA3_DRAM_WR_PRI_FIXED_BASE_IDX 1
+#define mmMMEA3_DRAM_RD_PRI_URGENCY 0x0651
+#define mmMMEA3_DRAM_RD_PRI_URGENCY_BASE_IDX 1
+#define mmMMEA3_DRAM_WR_PRI_URGENCY 0x0652
+#define mmMMEA3_DRAM_WR_PRI_URGENCY_BASE_IDX 1
+#define mmMMEA3_DRAM_RD_PRI_QUANT_PRI1 0x0653
+#define mmMMEA3_DRAM_RD_PRI_QUANT_PRI1_BASE_IDX 1
+#define mmMMEA3_DRAM_RD_PRI_QUANT_PRI2 0x0654
+#define mmMMEA3_DRAM_RD_PRI_QUANT_PRI2_BASE_IDX 1
+#define mmMMEA3_DRAM_RD_PRI_QUANT_PRI3 0x0655
+#define mmMMEA3_DRAM_RD_PRI_QUANT_PRI3_BASE_IDX 1
+#define mmMMEA3_DRAM_WR_PRI_QUANT_PRI1 0x0656
+#define mmMMEA3_DRAM_WR_PRI_QUANT_PRI1_BASE_IDX 1
+#define mmMMEA3_DRAM_WR_PRI_QUANT_PRI2 0x0657
+#define mmMMEA3_DRAM_WR_PRI_QUANT_PRI2_BASE_IDX 1
+#define mmMMEA3_DRAM_WR_PRI_QUANT_PRI3 0x0658
+#define mmMMEA3_DRAM_WR_PRI_QUANT_PRI3_BASE_IDX 1
+#define mmMMEA3_GMI_RD_CLI2GRP_MAP0 0x0659
+#define mmMMEA3_GMI_RD_CLI2GRP_MAP0_BASE_IDX 1
+#define mmMMEA3_GMI_RD_CLI2GRP_MAP1 0x065a
+#define mmMMEA3_GMI_RD_CLI2GRP_MAP1_BASE_IDX 1
+#define mmMMEA3_GMI_WR_CLI2GRP_MAP0 0x065b
+#define mmMMEA3_GMI_WR_CLI2GRP_MAP0_BASE_IDX 1
+#define mmMMEA3_GMI_WR_CLI2GRP_MAP1 0x065c
+#define mmMMEA3_GMI_WR_CLI2GRP_MAP1_BASE_IDX 1
+#define mmMMEA3_GMI_RD_GRP2VC_MAP 0x065d
+#define mmMMEA3_GMI_RD_GRP2VC_MAP_BASE_IDX 1
+#define mmMMEA3_GMI_WR_GRP2VC_MAP 0x065e
+#define mmMMEA3_GMI_WR_GRP2VC_MAP_BASE_IDX 1
+#define mmMMEA3_GMI_RD_LAZY 0x065f
+#define mmMMEA3_GMI_RD_LAZY_BASE_IDX 1
+#define mmMMEA3_GMI_WR_LAZY 0x0660
+#define mmMMEA3_GMI_WR_LAZY_BASE_IDX 1
+#define mmMMEA3_GMI_RD_CAM_CNTL 0x0661
+#define mmMMEA3_GMI_RD_CAM_CNTL_BASE_IDX 1
+#define mmMMEA3_GMI_WR_CAM_CNTL 0x0662
+#define mmMMEA3_GMI_WR_CAM_CNTL_BASE_IDX 1
+#define mmMMEA3_GMI_PAGE_BURST 0x0663
+#define mmMMEA3_GMI_PAGE_BURST_BASE_IDX 1
+#define mmMMEA3_GMI_RD_PRI_AGE 0x0664
+#define mmMMEA3_GMI_RD_PRI_AGE_BASE_IDX 1
+#define mmMMEA3_GMI_WR_PRI_AGE 0x0665
+#define mmMMEA3_GMI_WR_PRI_AGE_BASE_IDX 1
+#define mmMMEA3_GMI_RD_PRI_QUEUING 0x0666
+#define mmMMEA3_GMI_RD_PRI_QUEUING_BASE_IDX 1
+#define mmMMEA3_GMI_WR_PRI_QUEUING 0x0667
+#define mmMMEA3_GMI_WR_PRI_QUEUING_BASE_IDX 1
+#define mmMMEA3_GMI_RD_PRI_FIXED 0x0668
+#define mmMMEA3_GMI_RD_PRI_FIXED_BASE_IDX 1
+#define mmMMEA3_GMI_WR_PRI_FIXED 0x0669
+#define mmMMEA3_GMI_WR_PRI_FIXED_BASE_IDX 1
+#define mmMMEA3_GMI_RD_PRI_URGENCY 0x066a
+#define mmMMEA3_GMI_RD_PRI_URGENCY_BASE_IDX 1
+#define mmMMEA3_GMI_WR_PRI_URGENCY 0x066b
+#define mmMMEA3_GMI_WR_PRI_URGENCY_BASE_IDX 1
+#define mmMMEA3_GMI_RD_PRI_URGENCY_MASKING 0x066c
+#define mmMMEA3_GMI_RD_PRI_URGENCY_MASKING_BASE_IDX 1
+#define mmMMEA3_GMI_WR_PRI_URGENCY_MASKING 0x066d
+#define mmMMEA3_GMI_WR_PRI_URGENCY_MASKING_BASE_IDX 1
+#define mmMMEA3_GMI_RD_PRI_QUANT_PRI1 0x066e
+#define mmMMEA3_GMI_RD_PRI_QUANT_PRI1_BASE_IDX 1
+#define mmMMEA3_GMI_RD_PRI_QUANT_PRI2 0x066f
+#define mmMMEA3_GMI_RD_PRI_QUANT_PRI2_BASE_IDX 1
+#define mmMMEA3_GMI_RD_PRI_QUANT_PRI3 0x0670
+#define mmMMEA3_GMI_RD_PRI_QUANT_PRI3_BASE_IDX 1
+#define mmMMEA3_GMI_WR_PRI_QUANT_PRI1 0x0671
+#define mmMMEA3_GMI_WR_PRI_QUANT_PRI1_BASE_IDX 1
+#define mmMMEA3_GMI_WR_PRI_QUANT_PRI2 0x0672
+#define mmMMEA3_GMI_WR_PRI_QUANT_PRI2_BASE_IDX 1
+#define mmMMEA3_GMI_WR_PRI_QUANT_PRI3 0x0673
+#define mmMMEA3_GMI_WR_PRI_QUANT_PRI3_BASE_IDX 1
+#define mmMMEA3_ADDRNORM_BASE_ADDR0 0x0674
+#define mmMMEA3_ADDRNORM_BASE_ADDR0_BASE_IDX 1
+#define mmMMEA3_ADDRNORM_LIMIT_ADDR0 0x0675
+#define mmMMEA3_ADDRNORM_LIMIT_ADDR0_BASE_IDX 1
+#define mmMMEA3_ADDRNORM_BASE_ADDR1 0x0676
+#define mmMMEA3_ADDRNORM_BASE_ADDR1_BASE_IDX 1
+#define mmMMEA3_ADDRNORM_LIMIT_ADDR1 0x0677
+#define mmMMEA3_ADDRNORM_LIMIT_ADDR1_BASE_IDX 1
+#define mmMMEA3_ADDRNORM_OFFSET_ADDR1 0x0678
+#define mmMMEA3_ADDRNORM_OFFSET_ADDR1_BASE_IDX 1
+#define mmMMEA3_ADDRNORM_BASE_ADDR2 0x0679
+#define mmMMEA3_ADDRNORM_BASE_ADDR2_BASE_IDX 1
+#define mmMMEA3_ADDRNORM_LIMIT_ADDR2 0x067a
+#define mmMMEA3_ADDRNORM_LIMIT_ADDR2_BASE_IDX 1
+#define mmMMEA3_ADDRNORM_BASE_ADDR3 0x067b
+#define mmMMEA3_ADDRNORM_BASE_ADDR3_BASE_IDX 1
+#define mmMMEA3_ADDRNORM_LIMIT_ADDR3 0x067c
+#define mmMMEA3_ADDRNORM_LIMIT_ADDR3_BASE_IDX 1
+#define mmMMEA3_ADDRNORM_OFFSET_ADDR3 0x067d
+#define mmMMEA3_ADDRNORM_OFFSET_ADDR3_BASE_IDX 1
+#define mmMMEA3_ADDRNORM_BASE_ADDR4 0x067e
+#define mmMMEA3_ADDRNORM_BASE_ADDR4_BASE_IDX 1
+#define mmMMEA3_ADDRNORM_LIMIT_ADDR4 0x067f
+#define mmMMEA3_ADDRNORM_LIMIT_ADDR4_BASE_IDX 1
+#define mmMMEA3_ADDRNORM_BASE_ADDR5 0x0680
+#define mmMMEA3_ADDRNORM_BASE_ADDR5_BASE_IDX 1
+#define mmMMEA3_ADDRNORM_LIMIT_ADDR5 0x0681
+#define mmMMEA3_ADDRNORM_LIMIT_ADDR5_BASE_IDX 1
+#define mmMMEA3_ADDRNORM_OFFSET_ADDR5 0x0682
+#define mmMMEA3_ADDRNORM_OFFSET_ADDR5_BASE_IDX 1
+#define mmMMEA3_ADDRNORMDRAM_HOLE_CNTL 0x0683
+#define mmMMEA3_ADDRNORMDRAM_HOLE_CNTL_BASE_IDX 1
+#define mmMMEA3_ADDRNORMGMI_HOLE_CNTL 0x0684
+#define mmMMEA3_ADDRNORMGMI_HOLE_CNTL_BASE_IDX 1
+#define mmMMEA3_ADDRNORMDRAM_NP2_CHANNEL_CFG 0x0685
+#define mmMMEA3_ADDRNORMDRAM_NP2_CHANNEL_CFG_BASE_IDX 1
+#define mmMMEA3_ADDRNORMGMI_NP2_CHANNEL_CFG 0x0686
+#define mmMMEA3_ADDRNORMGMI_NP2_CHANNEL_CFG_BASE_IDX 1
+#define mmMMEA3_ADDRDEC_BANK_CFG 0x0687
+#define mmMMEA3_ADDRDEC_BANK_CFG_BASE_IDX 1
+#define mmMMEA3_ADDRDEC_MISC_CFG 0x0688
+#define mmMMEA3_ADDRDEC_MISC_CFG_BASE_IDX 1
+#define mmMMEA3_ADDRDECDRAM_ADDR_HASH_BANK0 0x0689
+#define mmMMEA3_ADDRDECDRAM_ADDR_HASH_BANK0_BASE_IDX 1
+#define mmMMEA3_ADDRDECDRAM_ADDR_HASH_BANK1 0x068a
+#define mmMMEA3_ADDRDECDRAM_ADDR_HASH_BANK1_BASE_IDX 1
+#define mmMMEA3_ADDRDECDRAM_ADDR_HASH_BANK2 0x068b
+#define mmMMEA3_ADDRDECDRAM_ADDR_HASH_BANK2_BASE_IDX 1
+#define mmMMEA3_ADDRDECDRAM_ADDR_HASH_BANK3 0x068c
+#define mmMMEA3_ADDRDECDRAM_ADDR_HASH_BANK3_BASE_IDX 1
+#define mmMMEA3_ADDRDECDRAM_ADDR_HASH_BANK4 0x068d
+#define mmMMEA3_ADDRDECDRAM_ADDR_HASH_BANK4_BASE_IDX 1
+#define mmMMEA3_ADDRDECDRAM_ADDR_HASH_BANK5 0x068e
+#define mmMMEA3_ADDRDECDRAM_ADDR_HASH_BANK5_BASE_IDX 1
+#define mmMMEA3_ADDRDECDRAM_ADDR_HASH_PC 0x068f
+#define mmMMEA3_ADDRDECDRAM_ADDR_HASH_PC_BASE_IDX 1
+#define mmMMEA3_ADDRDECDRAM_ADDR_HASH_PC2 0x0690
+#define mmMMEA3_ADDRDECDRAM_ADDR_HASH_PC2_BASE_IDX 1
+#define mmMMEA3_ADDRDECDRAM_ADDR_HASH_CS0 0x0691
+#define mmMMEA3_ADDRDECDRAM_ADDR_HASH_CS0_BASE_IDX 1
+#define mmMMEA3_ADDRDECDRAM_ADDR_HASH_CS1 0x0692
+#define mmMMEA3_ADDRDECDRAM_ADDR_HASH_CS1_BASE_IDX 1
+#define mmMMEA3_ADDRDECDRAM_HARVEST_ENABLE 0x0693
+#define mmMMEA3_ADDRDECDRAM_HARVEST_ENABLE_BASE_IDX 1
+#define mmMMEA3_ADDRDECGMI_ADDR_HASH_BANK0 0x0694
+#define mmMMEA3_ADDRDECGMI_ADDR_HASH_BANK0_BASE_IDX 1
+#define mmMMEA3_ADDRDECGMI_ADDR_HASH_BANK1 0x0695
+#define mmMMEA3_ADDRDECGMI_ADDR_HASH_BANK1_BASE_IDX 1
+#define mmMMEA3_ADDRDECGMI_ADDR_HASH_BANK2 0x0696
+#define mmMMEA3_ADDRDECGMI_ADDR_HASH_BANK2_BASE_IDX 1
+#define mmMMEA3_ADDRDECGMI_ADDR_HASH_BANK3 0x0697
+#define mmMMEA3_ADDRDECGMI_ADDR_HASH_BANK3_BASE_IDX 1
+#define mmMMEA3_ADDRDECGMI_ADDR_HASH_BANK4 0x0698
+#define mmMMEA3_ADDRDECGMI_ADDR_HASH_BANK4_BASE_IDX 1
+#define mmMMEA3_ADDRDECGMI_ADDR_HASH_BANK5 0x0699
+#define mmMMEA3_ADDRDECGMI_ADDR_HASH_BANK5_BASE_IDX 1
+#define mmMMEA3_ADDRDECGMI_ADDR_HASH_PC 0x069a
+#define mmMMEA3_ADDRDECGMI_ADDR_HASH_PC_BASE_IDX 1
+#define mmMMEA3_ADDRDECGMI_ADDR_HASH_PC2 0x069b
+#define mmMMEA3_ADDRDECGMI_ADDR_HASH_PC2_BASE_IDX 1
+#define mmMMEA3_ADDRDECGMI_ADDR_HASH_CS0 0x069c
+#define mmMMEA3_ADDRDECGMI_ADDR_HASH_CS0_BASE_IDX 1
+#define mmMMEA3_ADDRDECGMI_ADDR_HASH_CS1 0x069d
+#define mmMMEA3_ADDRDECGMI_ADDR_HASH_CS1_BASE_IDX 1
+#define mmMMEA3_ADDRDECGMI_HARVEST_ENABLE 0x069e
+#define mmMMEA3_ADDRDECGMI_HARVEST_ENABLE_BASE_IDX 1
+#define mmMMEA3_ADDRDEC0_BASE_ADDR_CS0 0x069f
+#define mmMMEA3_ADDRDEC0_BASE_ADDR_CS0_BASE_IDX 1
+#define mmMMEA3_ADDRDEC0_BASE_ADDR_CS1 0x06a0
+#define mmMMEA3_ADDRDEC0_BASE_ADDR_CS1_BASE_IDX 1
+#define mmMMEA3_ADDRDEC0_BASE_ADDR_CS2 0x06a1
+#define mmMMEA3_ADDRDEC0_BASE_ADDR_CS2_BASE_IDX 1
+#define mmMMEA3_ADDRDEC0_BASE_ADDR_CS3 0x06a2
+#define mmMMEA3_ADDRDEC0_BASE_ADDR_CS3_BASE_IDX 1
+#define mmMMEA3_ADDRDEC0_BASE_ADDR_SECCS0 0x06a3
+#define mmMMEA3_ADDRDEC0_BASE_ADDR_SECCS0_BASE_IDX 1
+#define mmMMEA3_ADDRDEC0_BASE_ADDR_SECCS1 0x06a4
+#define mmMMEA3_ADDRDEC0_BASE_ADDR_SECCS1_BASE_IDX 1
+#define mmMMEA3_ADDRDEC0_BASE_ADDR_SECCS2 0x06a5
+#define mmMMEA3_ADDRDEC0_BASE_ADDR_SECCS2_BASE_IDX 1
+#define mmMMEA3_ADDRDEC0_BASE_ADDR_SECCS3 0x06a6
+#define mmMMEA3_ADDRDEC0_BASE_ADDR_SECCS3_BASE_IDX 1
+#define mmMMEA3_ADDRDEC0_ADDR_MASK_CS01 0x06a7
+#define mmMMEA3_ADDRDEC0_ADDR_MASK_CS01_BASE_IDX 1
+#define mmMMEA3_ADDRDEC0_ADDR_MASK_CS23 0x06a8
+#define mmMMEA3_ADDRDEC0_ADDR_MASK_CS23_BASE_IDX 1
+#define mmMMEA3_ADDRDEC0_ADDR_MASK_SECCS01 0x06a9
+#define mmMMEA3_ADDRDEC0_ADDR_MASK_SECCS01_BASE_IDX 1
+#define mmMMEA3_ADDRDEC0_ADDR_MASK_SECCS23 0x06aa
+#define mmMMEA3_ADDRDEC0_ADDR_MASK_SECCS23_BASE_IDX 1
+#define mmMMEA3_ADDRDEC0_ADDR_CFG_CS01 0x06ab
+#define mmMMEA3_ADDRDEC0_ADDR_CFG_CS01_BASE_IDX 1
+#define mmMMEA3_ADDRDEC0_ADDR_CFG_CS23 0x06ac
+#define mmMMEA3_ADDRDEC0_ADDR_CFG_CS23_BASE_IDX 1
+#define mmMMEA3_ADDRDEC0_ADDR_SEL_CS01 0x06ad
+#define mmMMEA3_ADDRDEC0_ADDR_SEL_CS01_BASE_IDX 1
+#define mmMMEA3_ADDRDEC0_ADDR_SEL_CS23 0x06ae
+#define mmMMEA3_ADDRDEC0_ADDR_SEL_CS23_BASE_IDX 1
+#define mmMMEA3_ADDRDEC0_ADDR_SEL2_CS01 0x06af
+#define mmMMEA3_ADDRDEC0_ADDR_SEL2_CS01_BASE_IDX 1
+#define mmMMEA3_ADDRDEC0_ADDR_SEL2_CS23 0x06b0
+#define mmMMEA3_ADDRDEC0_ADDR_SEL2_CS23_BASE_IDX 1
+#define mmMMEA3_ADDRDEC0_COL_SEL_LO_CS01 0x06b1
+#define mmMMEA3_ADDRDEC0_COL_SEL_LO_CS01_BASE_IDX 1
+#define mmMMEA3_ADDRDEC0_COL_SEL_LO_CS23 0x06b2
+#define mmMMEA3_ADDRDEC0_COL_SEL_LO_CS23_BASE_IDX 1
+#define mmMMEA3_ADDRDEC0_COL_SEL_HI_CS01 0x06b3
+#define mmMMEA3_ADDRDEC0_COL_SEL_HI_CS01_BASE_IDX 1
+#define mmMMEA3_ADDRDEC0_COL_SEL_HI_CS23 0x06b4
+#define mmMMEA3_ADDRDEC0_COL_SEL_HI_CS23_BASE_IDX 1
+#define mmMMEA3_ADDRDEC0_RM_SEL_CS01 0x06b5
+#define mmMMEA3_ADDRDEC0_RM_SEL_CS01_BASE_IDX 1
+#define mmMMEA3_ADDRDEC0_RM_SEL_CS23 0x06b6
+#define mmMMEA3_ADDRDEC0_RM_SEL_CS23_BASE_IDX 1
+#define mmMMEA3_ADDRDEC0_RM_SEL_SECCS01 0x06b7
+#define mmMMEA3_ADDRDEC0_RM_SEL_SECCS01_BASE_IDX 1
+#define mmMMEA3_ADDRDEC0_RM_SEL_SECCS23 0x06b8
+#define mmMMEA3_ADDRDEC0_RM_SEL_SECCS23_BASE_IDX 1
+#define mmMMEA3_ADDRDEC1_BASE_ADDR_CS0 0x06b9
+#define mmMMEA3_ADDRDEC1_BASE_ADDR_CS0_BASE_IDX 1
+#define mmMMEA3_ADDRDEC1_BASE_ADDR_CS1 0x06ba
+#define mmMMEA3_ADDRDEC1_BASE_ADDR_CS1_BASE_IDX 1
+#define mmMMEA3_ADDRDEC1_BASE_ADDR_CS2 0x06bb
+#define mmMMEA3_ADDRDEC1_BASE_ADDR_CS2_BASE_IDX 1
+#define mmMMEA3_ADDRDEC1_BASE_ADDR_CS3 0x06bc
+#define mmMMEA3_ADDRDEC1_BASE_ADDR_CS3_BASE_IDX 1
+#define mmMMEA3_ADDRDEC1_BASE_ADDR_SECCS0 0x06bd
+#define mmMMEA3_ADDRDEC1_BASE_ADDR_SECCS0_BASE_IDX 1
+#define mmMMEA3_ADDRDEC1_BASE_ADDR_SECCS1 0x06be
+#define mmMMEA3_ADDRDEC1_BASE_ADDR_SECCS1_BASE_IDX 1
+#define mmMMEA3_ADDRDEC1_BASE_ADDR_SECCS2 0x06bf
+#define mmMMEA3_ADDRDEC1_BASE_ADDR_SECCS2_BASE_IDX 1
+#define mmMMEA3_ADDRDEC1_BASE_ADDR_SECCS3 0x06c0
+#define mmMMEA3_ADDRDEC1_BASE_ADDR_SECCS3_BASE_IDX 1
+#define mmMMEA3_ADDRDEC1_ADDR_MASK_CS01 0x06c1
+#define mmMMEA3_ADDRDEC1_ADDR_MASK_CS01_BASE_IDX 1
+#define mmMMEA3_ADDRDEC1_ADDR_MASK_CS23 0x06c2
+#define mmMMEA3_ADDRDEC1_ADDR_MASK_CS23_BASE_IDX 1
+#define mmMMEA3_ADDRDEC1_ADDR_MASK_SECCS01 0x06c3
+#define mmMMEA3_ADDRDEC1_ADDR_MASK_SECCS01_BASE_IDX 1
+#define mmMMEA3_ADDRDEC1_ADDR_MASK_SECCS23 0x06c4
+#define mmMMEA3_ADDRDEC1_ADDR_MASK_SECCS23_BASE_IDX 1
+#define mmMMEA3_ADDRDEC1_ADDR_CFG_CS01 0x06c5
+#define mmMMEA3_ADDRDEC1_ADDR_CFG_CS01_BASE_IDX 1
+#define mmMMEA3_ADDRDEC1_ADDR_CFG_CS23 0x06c6
+#define mmMMEA3_ADDRDEC1_ADDR_CFG_CS23_BASE_IDX 1
+#define mmMMEA3_ADDRDEC1_ADDR_SEL_CS01 0x06c7
+#define mmMMEA3_ADDRDEC1_ADDR_SEL_CS01_BASE_IDX 1
+#define mmMMEA3_ADDRDEC1_ADDR_SEL_CS23 0x06c8
+#define mmMMEA3_ADDRDEC1_ADDR_SEL_CS23_BASE_IDX 1
+#define mmMMEA3_ADDRDEC1_ADDR_SEL2_CS01 0x06c9
+#define mmMMEA3_ADDRDEC1_ADDR_SEL2_CS01_BASE_IDX 1
+#define mmMMEA3_ADDRDEC1_ADDR_SEL2_CS23 0x06ca
+#define mmMMEA3_ADDRDEC1_ADDR_SEL2_CS23_BASE_IDX 1
+#define mmMMEA3_ADDRDEC1_COL_SEL_LO_CS01 0x06cb
+#define mmMMEA3_ADDRDEC1_COL_SEL_LO_CS01_BASE_IDX 1
+#define mmMMEA3_ADDRDEC1_COL_SEL_LO_CS23 0x06cc
+#define mmMMEA3_ADDRDEC1_COL_SEL_LO_CS23_BASE_IDX 1
+#define mmMMEA3_ADDRDEC1_COL_SEL_HI_CS01 0x06cd
+#define mmMMEA3_ADDRDEC1_COL_SEL_HI_CS01_BASE_IDX 1
+#define mmMMEA3_ADDRDEC1_COL_SEL_HI_CS23 0x06ce
+#define mmMMEA3_ADDRDEC1_COL_SEL_HI_CS23_BASE_IDX 1
+#define mmMMEA3_ADDRDEC1_RM_SEL_CS01 0x06cf
+#define mmMMEA3_ADDRDEC1_RM_SEL_CS01_BASE_IDX 1
+#define mmMMEA3_ADDRDEC1_RM_SEL_CS23 0x06d0
+#define mmMMEA3_ADDRDEC1_RM_SEL_CS23_BASE_IDX 1
+#define mmMMEA3_ADDRDEC1_RM_SEL_SECCS01 0x06d1
+#define mmMMEA3_ADDRDEC1_RM_SEL_SECCS01_BASE_IDX 1
+#define mmMMEA3_ADDRDEC1_RM_SEL_SECCS23 0x06d2
+#define mmMMEA3_ADDRDEC1_RM_SEL_SECCS23_BASE_IDX 1
+#define mmMMEA3_ADDRDEC2_BASE_ADDR_CS0 0x06d3
+#define mmMMEA3_ADDRDEC2_BASE_ADDR_CS0_BASE_IDX 1
+#define mmMMEA3_ADDRDEC2_BASE_ADDR_CS1 0x06d4
+#define mmMMEA3_ADDRDEC2_BASE_ADDR_CS1_BASE_IDX 1
+#define mmMMEA3_ADDRDEC2_BASE_ADDR_CS2 0x06d5
+#define mmMMEA3_ADDRDEC2_BASE_ADDR_CS2_BASE_IDX 1
+#define mmMMEA3_ADDRDEC2_BASE_ADDR_CS3 0x06d6
+#define mmMMEA3_ADDRDEC2_BASE_ADDR_CS3_BASE_IDX 1
+#define mmMMEA3_ADDRDEC2_BASE_ADDR_SECCS0 0x06d7
+#define mmMMEA3_ADDRDEC2_BASE_ADDR_SECCS0_BASE_IDX 1
+#define mmMMEA3_ADDRDEC2_BASE_ADDR_SECCS1 0x06d8
+#define mmMMEA3_ADDRDEC2_BASE_ADDR_SECCS1_BASE_IDX 1
+#define mmMMEA3_ADDRDEC2_BASE_ADDR_SECCS2 0x06d9
+#define mmMMEA3_ADDRDEC2_BASE_ADDR_SECCS2_BASE_IDX 1
+#define mmMMEA3_ADDRDEC2_BASE_ADDR_SECCS3 0x06da
+#define mmMMEA3_ADDRDEC2_BASE_ADDR_SECCS3_BASE_IDX 1
+#define mmMMEA3_ADDRDEC2_ADDR_MASK_CS01 0x06db
+#define mmMMEA3_ADDRDEC2_ADDR_MASK_CS01_BASE_IDX 1
+#define mmMMEA3_ADDRDEC2_ADDR_MASK_CS23 0x06dc
+#define mmMMEA3_ADDRDEC2_ADDR_MASK_CS23_BASE_IDX 1
+#define mmMMEA3_ADDRDEC2_ADDR_MASK_SECCS01 0x06dd
+#define mmMMEA3_ADDRDEC2_ADDR_MASK_SECCS01_BASE_IDX 1
+#define mmMMEA3_ADDRDEC2_ADDR_MASK_SECCS23 0x06de
+#define mmMMEA3_ADDRDEC2_ADDR_MASK_SECCS23_BASE_IDX 1
+#define mmMMEA3_ADDRDEC2_ADDR_CFG_CS01 0x06df
+#define mmMMEA3_ADDRDEC2_ADDR_CFG_CS01_BASE_IDX 1
+#define mmMMEA3_ADDRDEC2_ADDR_CFG_CS23 0x06e0
+#define mmMMEA3_ADDRDEC2_ADDR_CFG_CS23_BASE_IDX 1
+#define mmMMEA3_ADDRDEC2_ADDR_SEL_CS01 0x06e1
+#define mmMMEA3_ADDRDEC2_ADDR_SEL_CS01_BASE_IDX 1
+#define mmMMEA3_ADDRDEC2_ADDR_SEL_CS23 0x06e2
+#define mmMMEA3_ADDRDEC2_ADDR_SEL_CS23_BASE_IDX 1
+#define mmMMEA3_ADDRDEC2_ADDR_SEL2_CS01 0x06e3
+#define mmMMEA3_ADDRDEC2_ADDR_SEL2_CS01_BASE_IDX 1
+#define mmMMEA3_ADDRDEC2_ADDR_SEL2_CS23 0x06e4
+#define mmMMEA3_ADDRDEC2_ADDR_SEL2_CS23_BASE_IDX 1
+#define mmMMEA3_ADDRDEC2_COL_SEL_LO_CS01 0x06e5
+#define mmMMEA3_ADDRDEC2_COL_SEL_LO_CS01_BASE_IDX 1
+#define mmMMEA3_ADDRDEC2_COL_SEL_LO_CS23 0x06e6
+#define mmMMEA3_ADDRDEC2_COL_SEL_LO_CS23_BASE_IDX 1
+#define mmMMEA3_ADDRDEC2_COL_SEL_HI_CS01 0x06e7
+#define mmMMEA3_ADDRDEC2_COL_SEL_HI_CS01_BASE_IDX 1
+#define mmMMEA3_ADDRDEC2_COL_SEL_HI_CS23 0x06e8
+#define mmMMEA3_ADDRDEC2_COL_SEL_HI_CS23_BASE_IDX 1
+#define mmMMEA3_ADDRDEC2_RM_SEL_CS01 0x06e9
+#define mmMMEA3_ADDRDEC2_RM_SEL_CS01_BASE_IDX 1
+#define mmMMEA3_ADDRDEC2_RM_SEL_CS23 0x06ea
+#define mmMMEA3_ADDRDEC2_RM_SEL_CS23_BASE_IDX 1
+#define mmMMEA3_ADDRDEC2_RM_SEL_SECCS01 0x06eb
+#define mmMMEA3_ADDRDEC2_RM_SEL_SECCS01_BASE_IDX 1
+#define mmMMEA3_ADDRDEC2_RM_SEL_SECCS23 0x06ec
+#define mmMMEA3_ADDRDEC2_RM_SEL_SECCS23_BASE_IDX 1
+#define mmMMEA3_ADDRNORMDRAM_GLOBAL_CNTL 0x06ed
+#define mmMMEA3_ADDRNORMDRAM_GLOBAL_CNTL_BASE_IDX 1
+#define mmMMEA3_ADDRNORMGMI_GLOBAL_CNTL 0x06ee
+#define mmMMEA3_ADDRNORMGMI_GLOBAL_CNTL_BASE_IDX 1
+#define mmMMEA3_IO_RD_CLI2GRP_MAP0 0x0715
+#define mmMMEA3_IO_RD_CLI2GRP_MAP0_BASE_IDX 1
+#define mmMMEA3_IO_RD_CLI2GRP_MAP1 0x0716
+#define mmMMEA3_IO_RD_CLI2GRP_MAP1_BASE_IDX 1
+#define mmMMEA3_IO_WR_CLI2GRP_MAP0 0x0717
+#define mmMMEA3_IO_WR_CLI2GRP_MAP0_BASE_IDX 1
+#define mmMMEA3_IO_WR_CLI2GRP_MAP1 0x0718
+#define mmMMEA3_IO_WR_CLI2GRP_MAP1_BASE_IDX 1
+#define mmMMEA3_IO_RD_COMBINE_FLUSH 0x0719
+#define mmMMEA3_IO_RD_COMBINE_FLUSH_BASE_IDX 1
+#define mmMMEA3_IO_WR_COMBINE_FLUSH 0x071a
+#define mmMMEA3_IO_WR_COMBINE_FLUSH_BASE_IDX 1
+#define mmMMEA3_IO_GROUP_BURST 0x071b
+#define mmMMEA3_IO_GROUP_BURST_BASE_IDX 1
+#define mmMMEA3_IO_RD_PRI_AGE 0x071c
+#define mmMMEA3_IO_RD_PRI_AGE_BASE_IDX 1
+#define mmMMEA3_IO_WR_PRI_AGE 0x071d
+#define mmMMEA3_IO_WR_PRI_AGE_BASE_IDX 1
+#define mmMMEA3_IO_RD_PRI_QUEUING 0x071e
+#define mmMMEA3_IO_RD_PRI_QUEUING_BASE_IDX 1
+#define mmMMEA3_IO_WR_PRI_QUEUING 0x071f
+#define mmMMEA3_IO_WR_PRI_QUEUING_BASE_IDX 1
+#define mmMMEA3_IO_RD_PRI_FIXED 0x0720
+#define mmMMEA3_IO_RD_PRI_FIXED_BASE_IDX 1
+#define mmMMEA3_IO_WR_PRI_FIXED 0x0721
+#define mmMMEA3_IO_WR_PRI_FIXED_BASE_IDX 1
+#define mmMMEA3_IO_RD_PRI_URGENCY 0x0722
+#define mmMMEA3_IO_RD_PRI_URGENCY_BASE_IDX 1
+#define mmMMEA3_IO_WR_PRI_URGENCY 0x0723
+#define mmMMEA3_IO_WR_PRI_URGENCY_BASE_IDX 1
+#define mmMMEA3_IO_RD_PRI_URGENCY_MASKING 0x0724
+#define mmMMEA3_IO_RD_PRI_URGENCY_MASKING_BASE_IDX 1
+#define mmMMEA3_IO_WR_PRI_URGENCY_MASKING 0x0725
+#define mmMMEA3_IO_WR_PRI_URGENCY_MASKING_BASE_IDX 1
+#define mmMMEA3_IO_RD_PRI_QUANT_PRI1 0x0726
+#define mmMMEA3_IO_RD_PRI_QUANT_PRI1_BASE_IDX 1
+#define mmMMEA3_IO_RD_PRI_QUANT_PRI2 0x0727
+#define mmMMEA3_IO_RD_PRI_QUANT_PRI2_BASE_IDX 1
+#define mmMMEA3_IO_RD_PRI_QUANT_PRI3 0x0728
+#define mmMMEA3_IO_RD_PRI_QUANT_PRI3_BASE_IDX 1
+#define mmMMEA3_IO_WR_PRI_QUANT_PRI1 0x0729
+#define mmMMEA3_IO_WR_PRI_QUANT_PRI1_BASE_IDX 1
+#define mmMMEA3_IO_WR_PRI_QUANT_PRI2 0x072a
+#define mmMMEA3_IO_WR_PRI_QUANT_PRI2_BASE_IDX 1
+#define mmMMEA3_IO_WR_PRI_QUANT_PRI3 0x072b
+#define mmMMEA3_IO_WR_PRI_QUANT_PRI3_BASE_IDX 1
+#define mmMMEA3_SDP_ARB_DRAM 0x072c
+#define mmMMEA3_SDP_ARB_DRAM_BASE_IDX 1
+#define mmMMEA3_SDP_ARB_GMI 0x072d
+#define mmMMEA3_SDP_ARB_GMI_BASE_IDX 1
+#define mmMMEA3_SDP_ARB_FINAL 0x072e
+#define mmMMEA3_SDP_ARB_FINAL_BASE_IDX 1
+#define mmMMEA3_SDP_DRAM_PRIORITY 0x072f
+#define mmMMEA3_SDP_DRAM_PRIORITY_BASE_IDX 1
+#define mmMMEA3_SDP_GMI_PRIORITY 0x0730
+#define mmMMEA3_SDP_GMI_PRIORITY_BASE_IDX 1
+#define mmMMEA3_SDP_IO_PRIORITY 0x0731
+#define mmMMEA3_SDP_IO_PRIORITY_BASE_IDX 1
+#define mmMMEA3_SDP_CREDITS 0x0732
+#define mmMMEA3_SDP_CREDITS_BASE_IDX 1
+#define mmMMEA3_SDP_TAG_RESERVE0 0x0733
+#define mmMMEA3_SDP_TAG_RESERVE0_BASE_IDX 1
+#define mmMMEA3_SDP_TAG_RESERVE1 0x0734
+#define mmMMEA3_SDP_TAG_RESERVE1_BASE_IDX 1
+#define mmMMEA3_SDP_VCC_RESERVE0 0x0735
+#define mmMMEA3_SDP_VCC_RESERVE0_BASE_IDX 1
+#define mmMMEA3_SDP_VCC_RESERVE1 0x0736
+#define mmMMEA3_SDP_VCC_RESERVE1_BASE_IDX 1
+#define mmMMEA3_SDP_VCD_RESERVE0 0x0737
+#define mmMMEA3_SDP_VCD_RESERVE0_BASE_IDX 1
+#define mmMMEA3_SDP_VCD_RESERVE1 0x0738
+#define mmMMEA3_SDP_VCD_RESERVE1_BASE_IDX 1
+#define mmMMEA3_SDP_REQ_CNTL 0x0739
+#define mmMMEA3_SDP_REQ_CNTL_BASE_IDX 1
+#define mmMMEA3_MISC 0x073a
+#define mmMMEA3_MISC_BASE_IDX 1
+#define mmMMEA3_LATENCY_SAMPLING 0x073b
+#define mmMMEA3_LATENCY_SAMPLING_BASE_IDX 1
+#define mmMMEA3_PERFCOUNTER_LO 0x073c
+#define mmMMEA3_PERFCOUNTER_LO_BASE_IDX 1
+#define mmMMEA3_PERFCOUNTER_HI 0x073d
+#define mmMMEA3_PERFCOUNTER_HI_BASE_IDX 1
+#define mmMMEA3_PERFCOUNTER0_CFG 0x073e
+#define mmMMEA3_PERFCOUNTER0_CFG_BASE_IDX 1
+#define mmMMEA3_PERFCOUNTER1_CFG 0x073f
+#define mmMMEA3_PERFCOUNTER1_CFG_BASE_IDX 1
+#define mmMMEA3_PERFCOUNTER_RSLT_CNTL 0x0740
+#define mmMMEA3_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1
+#define mmMMEA3_EDC_CNT 0x0746
+#define mmMMEA3_EDC_CNT_BASE_IDX 1
+#define mmMMEA3_EDC_CNT2 0x0747
+#define mmMMEA3_EDC_CNT2_BASE_IDX 1
+#define mmMMEA3_DSM_CNTL 0x0748
+#define mmMMEA3_DSM_CNTL_BASE_IDX 1
+#define mmMMEA3_DSM_CNTLA 0x0749
+#define mmMMEA3_DSM_CNTLA_BASE_IDX 1
+#define mmMMEA3_DSM_CNTLB 0x074a
+#define mmMMEA3_DSM_CNTLB_BASE_IDX 1
+#define mmMMEA3_DSM_CNTL2 0x074b
+#define mmMMEA3_DSM_CNTL2_BASE_IDX 1
+#define mmMMEA3_DSM_CNTL2A 0x074c
+#define mmMMEA3_DSM_CNTL2A_BASE_IDX 1
+#define mmMMEA3_DSM_CNTL2B 0x074d
+#define mmMMEA3_DSM_CNTL2B_BASE_IDX 1
+#define mmMMEA3_CGTT_CLK_CTRL 0x074f
+#define mmMMEA3_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmMMEA3_EDC_MODE 0x0750
+#define mmMMEA3_EDC_MODE_BASE_IDX 1
+#define mmMMEA3_ERR_STATUS 0x0751
+#define mmMMEA3_ERR_STATUS_BASE_IDX 1
+#define mmMMEA3_MISC2 0x0752
+#define mmMMEA3_MISC2_BASE_IDX 1
+#define mmMMEA3_ADDRDEC_SELECT 0x0753
+#define mmMMEA3_ADDRDEC_SELECT_BASE_IDX 1
+#define mmMMEA3_EDC_CNT3 0x0754
+#define mmMMEA3_EDC_CNT3_BASE_IDX 1
+
+
+// addressBlock: mmhub_ea_mmeadec4
+// base address: 0x69e00
+#define mmMMEA4_DRAM_RD_CLI2GRP_MAP0 0x0780
+#define mmMMEA4_DRAM_RD_CLI2GRP_MAP0_BASE_IDX 1
+#define mmMMEA4_DRAM_RD_CLI2GRP_MAP1 0x0781
+#define mmMMEA4_DRAM_RD_CLI2GRP_MAP1_BASE_IDX 1
+#define mmMMEA4_DRAM_WR_CLI2GRP_MAP0 0x0782
+#define mmMMEA4_DRAM_WR_CLI2GRP_MAP0_BASE_IDX 1
+#define mmMMEA4_DRAM_WR_CLI2GRP_MAP1 0x0783
+#define mmMMEA4_DRAM_WR_CLI2GRP_MAP1_BASE_IDX 1
+#define mmMMEA4_DRAM_RD_GRP2VC_MAP 0x0784
+#define mmMMEA4_DRAM_RD_GRP2VC_MAP_BASE_IDX 1
+#define mmMMEA4_DRAM_WR_GRP2VC_MAP 0x0785
+#define mmMMEA4_DRAM_WR_GRP2VC_MAP_BASE_IDX 1
+#define mmMMEA4_DRAM_RD_LAZY 0x0786
+#define mmMMEA4_DRAM_RD_LAZY_BASE_IDX 1
+#define mmMMEA4_DRAM_WR_LAZY 0x0787
+#define mmMMEA4_DRAM_WR_LAZY_BASE_IDX 1
+#define mmMMEA4_DRAM_RD_CAM_CNTL 0x0788
+#define mmMMEA4_DRAM_RD_CAM_CNTL_BASE_IDX 1
+#define mmMMEA4_DRAM_WR_CAM_CNTL 0x0789
+#define mmMMEA4_DRAM_WR_CAM_CNTL_BASE_IDX 1
+#define mmMMEA4_DRAM_PAGE_BURST 0x078a
+#define mmMMEA4_DRAM_PAGE_BURST_BASE_IDX 1
+#define mmMMEA4_DRAM_RD_PRI_AGE 0x078b
+#define mmMMEA4_DRAM_RD_PRI_AGE_BASE_IDX 1
+#define mmMMEA4_DRAM_WR_PRI_AGE 0x078c
+#define mmMMEA4_DRAM_WR_PRI_AGE_BASE_IDX 1
+#define mmMMEA4_DRAM_RD_PRI_QUEUING 0x078d
+#define mmMMEA4_DRAM_RD_PRI_QUEUING_BASE_IDX 1
+#define mmMMEA4_DRAM_WR_PRI_QUEUING 0x078e
+#define mmMMEA4_DRAM_WR_PRI_QUEUING_BASE_IDX 1
+#define mmMMEA4_DRAM_RD_PRI_FIXED 0x078f
+#define mmMMEA4_DRAM_RD_PRI_FIXED_BASE_IDX 1
+#define mmMMEA4_DRAM_WR_PRI_FIXED 0x0790
+#define mmMMEA4_DRAM_WR_PRI_FIXED_BASE_IDX 1
+#define mmMMEA4_DRAM_RD_PRI_URGENCY 0x0791
+#define mmMMEA4_DRAM_RD_PRI_URGENCY_BASE_IDX 1
+#define mmMMEA4_DRAM_WR_PRI_URGENCY 0x0792
+#define mmMMEA4_DRAM_WR_PRI_URGENCY_BASE_IDX 1
+#define mmMMEA4_DRAM_RD_PRI_QUANT_PRI1 0x0793
+#define mmMMEA4_DRAM_RD_PRI_QUANT_PRI1_BASE_IDX 1
+#define mmMMEA4_DRAM_RD_PRI_QUANT_PRI2 0x0794
+#define mmMMEA4_DRAM_RD_PRI_QUANT_PRI2_BASE_IDX 1
+#define mmMMEA4_DRAM_RD_PRI_QUANT_PRI3 0x0795
+#define mmMMEA4_DRAM_RD_PRI_QUANT_PRI3_BASE_IDX 1
+#define mmMMEA4_DRAM_WR_PRI_QUANT_PRI1 0x0796
+#define mmMMEA4_DRAM_WR_PRI_QUANT_PRI1_BASE_IDX 1
+#define mmMMEA4_DRAM_WR_PRI_QUANT_PRI2 0x0797
+#define mmMMEA4_DRAM_WR_PRI_QUANT_PRI2_BASE_IDX 1
+#define mmMMEA4_DRAM_WR_PRI_QUANT_PRI3 0x0798
+#define mmMMEA4_DRAM_WR_PRI_QUANT_PRI3_BASE_IDX 1
+#define mmMMEA4_GMI_RD_CLI2GRP_MAP0 0x0799
+#define mmMMEA4_GMI_RD_CLI2GRP_MAP0_BASE_IDX 1
+#define mmMMEA4_GMI_RD_CLI2GRP_MAP1 0x079a
+#define mmMMEA4_GMI_RD_CLI2GRP_MAP1_BASE_IDX 1
+#define mmMMEA4_GMI_WR_CLI2GRP_MAP0 0x079b
+#define mmMMEA4_GMI_WR_CLI2GRP_MAP0_BASE_IDX 1
+#define mmMMEA4_GMI_WR_CLI2GRP_MAP1 0x079c
+#define mmMMEA4_GMI_WR_CLI2GRP_MAP1_BASE_IDX 1
+#define mmMMEA4_GMI_RD_GRP2VC_MAP 0x079d
+#define mmMMEA4_GMI_RD_GRP2VC_MAP_BASE_IDX 1
+#define mmMMEA4_GMI_WR_GRP2VC_MAP 0x079e
+#define mmMMEA4_GMI_WR_GRP2VC_MAP_BASE_IDX 1
+#define mmMMEA4_GMI_RD_LAZY 0x079f
+#define mmMMEA4_GMI_RD_LAZY_BASE_IDX 1
+#define mmMMEA4_GMI_WR_LAZY 0x07a0
+#define mmMMEA4_GMI_WR_LAZY_BASE_IDX 1
+#define mmMMEA4_GMI_RD_CAM_CNTL 0x07a1
+#define mmMMEA4_GMI_RD_CAM_CNTL_BASE_IDX 1
+#define mmMMEA4_GMI_WR_CAM_CNTL 0x07a2
+#define mmMMEA4_GMI_WR_CAM_CNTL_BASE_IDX 1
+#define mmMMEA4_GMI_PAGE_BURST 0x07a3
+#define mmMMEA4_GMI_PAGE_BURST_BASE_IDX 1
+#define mmMMEA4_GMI_RD_PRI_AGE 0x07a4
+#define mmMMEA4_GMI_RD_PRI_AGE_BASE_IDX 1
+#define mmMMEA4_GMI_WR_PRI_AGE 0x07a5
+#define mmMMEA4_GMI_WR_PRI_AGE_BASE_IDX 1
+#define mmMMEA4_GMI_RD_PRI_QUEUING 0x07a6
+#define mmMMEA4_GMI_RD_PRI_QUEUING_BASE_IDX 1
+#define mmMMEA4_GMI_WR_PRI_QUEUING 0x07a7
+#define mmMMEA4_GMI_WR_PRI_QUEUING_BASE_IDX 1
+#define mmMMEA4_GMI_RD_PRI_FIXED 0x07a8
+#define mmMMEA4_GMI_RD_PRI_FIXED_BASE_IDX 1
+#define mmMMEA4_GMI_WR_PRI_FIXED 0x07a9
+#define mmMMEA4_GMI_WR_PRI_FIXED_BASE_IDX 1
+#define mmMMEA4_GMI_RD_PRI_URGENCY 0x07aa
+#define mmMMEA4_GMI_RD_PRI_URGENCY_BASE_IDX 1
+#define mmMMEA4_GMI_WR_PRI_URGENCY 0x07ab
+#define mmMMEA4_GMI_WR_PRI_URGENCY_BASE_IDX 1
+#define mmMMEA4_GMI_RD_PRI_URGENCY_MASKING 0x07ac
+#define mmMMEA4_GMI_RD_PRI_URGENCY_MASKING_BASE_IDX 1
+#define mmMMEA4_GMI_WR_PRI_URGENCY_MASKING 0x07ad
+#define mmMMEA4_GMI_WR_PRI_URGENCY_MASKING_BASE_IDX 1
+#define mmMMEA4_GMI_RD_PRI_QUANT_PRI1 0x07ae
+#define mmMMEA4_GMI_RD_PRI_QUANT_PRI1_BASE_IDX 1
+#define mmMMEA4_GMI_RD_PRI_QUANT_PRI2 0x07af
+#define mmMMEA4_GMI_RD_PRI_QUANT_PRI2_BASE_IDX 1
+#define mmMMEA4_GMI_RD_PRI_QUANT_PRI3 0x07b0
+#define mmMMEA4_GMI_RD_PRI_QUANT_PRI3_BASE_IDX 1
+#define mmMMEA4_GMI_WR_PRI_QUANT_PRI1 0x07b1
+#define mmMMEA4_GMI_WR_PRI_QUANT_PRI1_BASE_IDX 1
+#define mmMMEA4_GMI_WR_PRI_QUANT_PRI2 0x07b2
+#define mmMMEA4_GMI_WR_PRI_QUANT_PRI2_BASE_IDX 1
+#define mmMMEA4_GMI_WR_PRI_QUANT_PRI3 0x07b3
+#define mmMMEA4_GMI_WR_PRI_QUANT_PRI3_BASE_IDX 1
+#define mmMMEA4_ADDRNORM_BASE_ADDR0 0x07b4
+#define mmMMEA4_ADDRNORM_BASE_ADDR0_BASE_IDX 1
+#define mmMMEA4_ADDRNORM_LIMIT_ADDR0 0x07b5
+#define mmMMEA4_ADDRNORM_LIMIT_ADDR0_BASE_IDX 1
+#define mmMMEA4_ADDRNORM_BASE_ADDR1 0x07b6
+#define mmMMEA4_ADDRNORM_BASE_ADDR1_BASE_IDX 1
+#define mmMMEA4_ADDRNORM_LIMIT_ADDR1 0x07b7
+#define mmMMEA4_ADDRNORM_LIMIT_ADDR1_BASE_IDX 1
+#define mmMMEA4_ADDRNORM_OFFSET_ADDR1 0x07b8
+#define mmMMEA4_ADDRNORM_OFFSET_ADDR1_BASE_IDX 1
+#define mmMMEA4_ADDRNORM_BASE_ADDR2 0x07b9
+#define mmMMEA4_ADDRNORM_BASE_ADDR2_BASE_IDX 1
+#define mmMMEA4_ADDRNORM_LIMIT_ADDR2 0x07ba
+#define mmMMEA4_ADDRNORM_LIMIT_ADDR2_BASE_IDX 1
+#define mmMMEA4_ADDRNORM_BASE_ADDR3 0x07bb
+#define mmMMEA4_ADDRNORM_BASE_ADDR3_BASE_IDX 1
+#define mmMMEA4_ADDRNORM_LIMIT_ADDR3 0x07bc
+#define mmMMEA4_ADDRNORM_LIMIT_ADDR3_BASE_IDX 1
+#define mmMMEA4_ADDRNORM_OFFSET_ADDR3 0x07bd
+#define mmMMEA4_ADDRNORM_OFFSET_ADDR3_BASE_IDX 1
+#define mmMMEA4_ADDRNORM_BASE_ADDR4 0x07be
+#define mmMMEA4_ADDRNORM_BASE_ADDR4_BASE_IDX 1
+#define mmMMEA4_ADDRNORM_LIMIT_ADDR4 0x07bf
+#define mmMMEA4_ADDRNORM_LIMIT_ADDR4_BASE_IDX 1
+#define mmMMEA4_ADDRNORM_BASE_ADDR5 0x07c0
+#define mmMMEA4_ADDRNORM_BASE_ADDR5_BASE_IDX 1
+#define mmMMEA4_ADDRNORM_LIMIT_ADDR5 0x07c1
+#define mmMMEA4_ADDRNORM_LIMIT_ADDR5_BASE_IDX 1
+#define mmMMEA4_ADDRNORM_OFFSET_ADDR5 0x07c2
+#define mmMMEA4_ADDRNORM_OFFSET_ADDR5_BASE_IDX 1
+#define mmMMEA4_ADDRNORMDRAM_HOLE_CNTL 0x07c3
+#define mmMMEA4_ADDRNORMDRAM_HOLE_CNTL_BASE_IDX 1
+#define mmMMEA4_ADDRNORMGMI_HOLE_CNTL 0x07c4
+#define mmMMEA4_ADDRNORMGMI_HOLE_CNTL_BASE_IDX 1
+#define mmMMEA4_ADDRNORMDRAM_NP2_CHANNEL_CFG 0x07c5
+#define mmMMEA4_ADDRNORMDRAM_NP2_CHANNEL_CFG_BASE_IDX 1
+#define mmMMEA4_ADDRNORMGMI_NP2_CHANNEL_CFG 0x07c6
+#define mmMMEA4_ADDRNORMGMI_NP2_CHANNEL_CFG_BASE_IDX 1
+#define mmMMEA4_ADDRDEC_BANK_CFG 0x07c7
+#define mmMMEA4_ADDRDEC_BANK_CFG_BASE_IDX 1
+#define mmMMEA4_ADDRDEC_MISC_CFG 0x07c8
+#define mmMMEA4_ADDRDEC_MISC_CFG_BASE_IDX 1
+#define mmMMEA4_ADDRDECDRAM_ADDR_HASH_BANK0 0x07c9
+#define mmMMEA4_ADDRDECDRAM_ADDR_HASH_BANK0_BASE_IDX 1
+#define mmMMEA4_ADDRDECDRAM_ADDR_HASH_BANK1 0x07ca
+#define mmMMEA4_ADDRDECDRAM_ADDR_HASH_BANK1_BASE_IDX 1
+#define mmMMEA4_ADDRDECDRAM_ADDR_HASH_BANK2 0x07cb
+#define mmMMEA4_ADDRDECDRAM_ADDR_HASH_BANK2_BASE_IDX 1
+#define mmMMEA4_ADDRDECDRAM_ADDR_HASH_BANK3 0x07cc
+#define mmMMEA4_ADDRDECDRAM_ADDR_HASH_BANK3_BASE_IDX 1
+#define mmMMEA4_ADDRDECDRAM_ADDR_HASH_BANK4 0x07cd
+#define mmMMEA4_ADDRDECDRAM_ADDR_HASH_BANK4_BASE_IDX 1
+#define mmMMEA4_ADDRDECDRAM_ADDR_HASH_BANK5 0x07ce
+#define mmMMEA4_ADDRDECDRAM_ADDR_HASH_BANK5_BASE_IDX 1
+#define mmMMEA4_ADDRDECDRAM_ADDR_HASH_PC 0x07cf
+#define mmMMEA4_ADDRDECDRAM_ADDR_HASH_PC_BASE_IDX 1
+#define mmMMEA4_ADDRDECDRAM_ADDR_HASH_PC2 0x07d0
+#define mmMMEA4_ADDRDECDRAM_ADDR_HASH_PC2_BASE_IDX 1
+#define mmMMEA4_ADDRDECDRAM_ADDR_HASH_CS0 0x07d1
+#define mmMMEA4_ADDRDECDRAM_ADDR_HASH_CS0_BASE_IDX 1
+#define mmMMEA4_ADDRDECDRAM_ADDR_HASH_CS1 0x07d2
+#define mmMMEA4_ADDRDECDRAM_ADDR_HASH_CS1_BASE_IDX 1
+#define mmMMEA4_ADDRDECDRAM_HARVEST_ENABLE 0x07d3
+#define mmMMEA4_ADDRDECDRAM_HARVEST_ENABLE_BASE_IDX 1
+#define mmMMEA4_ADDRDECGMI_ADDR_HASH_BANK0 0x07d4
+#define mmMMEA4_ADDRDECGMI_ADDR_HASH_BANK0_BASE_IDX 1
+#define mmMMEA4_ADDRDECGMI_ADDR_HASH_BANK1 0x07d5
+#define mmMMEA4_ADDRDECGMI_ADDR_HASH_BANK1_BASE_IDX 1
+#define mmMMEA4_ADDRDECGMI_ADDR_HASH_BANK2 0x07d6
+#define mmMMEA4_ADDRDECGMI_ADDR_HASH_BANK2_BASE_IDX 1
+#define mmMMEA4_ADDRDECGMI_ADDR_HASH_BANK3 0x07d7
+#define mmMMEA4_ADDRDECGMI_ADDR_HASH_BANK3_BASE_IDX 1
+#define mmMMEA4_ADDRDECGMI_ADDR_HASH_BANK4 0x07d8
+#define mmMMEA4_ADDRDECGMI_ADDR_HASH_BANK4_BASE_IDX 1
+#define mmMMEA4_ADDRDECGMI_ADDR_HASH_BANK5 0x07d9
+#define mmMMEA4_ADDRDECGMI_ADDR_HASH_BANK5_BASE_IDX 1
+#define mmMMEA4_ADDRDECGMI_ADDR_HASH_PC 0x07da
+#define mmMMEA4_ADDRDECGMI_ADDR_HASH_PC_BASE_IDX 1
+#define mmMMEA4_ADDRDECGMI_ADDR_HASH_PC2 0x07db
+#define mmMMEA4_ADDRDECGMI_ADDR_HASH_PC2_BASE_IDX 1
+#define mmMMEA4_ADDRDECGMI_ADDR_HASH_CS0 0x07dc
+#define mmMMEA4_ADDRDECGMI_ADDR_HASH_CS0_BASE_IDX 1
+#define mmMMEA4_ADDRDECGMI_ADDR_HASH_CS1 0x07dd
+#define mmMMEA4_ADDRDECGMI_ADDR_HASH_CS1_BASE_IDX 1
+#define mmMMEA4_ADDRDECGMI_HARVEST_ENABLE 0x07de
+#define mmMMEA4_ADDRDECGMI_HARVEST_ENABLE_BASE_IDX 1
+#define mmMMEA4_ADDRDEC0_BASE_ADDR_CS0 0x07df
+#define mmMMEA4_ADDRDEC0_BASE_ADDR_CS0_BASE_IDX 1
+#define mmMMEA4_ADDRDEC0_BASE_ADDR_CS1 0x07e0
+#define mmMMEA4_ADDRDEC0_BASE_ADDR_CS1_BASE_IDX 1
+#define mmMMEA4_ADDRDEC0_BASE_ADDR_CS2 0x07e1
+#define mmMMEA4_ADDRDEC0_BASE_ADDR_CS2_BASE_IDX 1
+#define mmMMEA4_ADDRDEC0_BASE_ADDR_CS3 0x07e2
+#define mmMMEA4_ADDRDEC0_BASE_ADDR_CS3_BASE_IDX 1
+#define mmMMEA4_ADDRDEC0_BASE_ADDR_SECCS0 0x07e3
+#define mmMMEA4_ADDRDEC0_BASE_ADDR_SECCS0_BASE_IDX 1
+#define mmMMEA4_ADDRDEC0_BASE_ADDR_SECCS1 0x07e4
+#define mmMMEA4_ADDRDEC0_BASE_ADDR_SECCS1_BASE_IDX 1
+#define mmMMEA4_ADDRDEC0_BASE_ADDR_SECCS2 0x07e5
+#define mmMMEA4_ADDRDEC0_BASE_ADDR_SECCS2_BASE_IDX 1
+#define mmMMEA4_ADDRDEC0_BASE_ADDR_SECCS3 0x07e6
+#define mmMMEA4_ADDRDEC0_BASE_ADDR_SECCS3_BASE_IDX 1
+#define mmMMEA4_ADDRDEC0_ADDR_MASK_CS01 0x07e7
+#define mmMMEA4_ADDRDEC0_ADDR_MASK_CS01_BASE_IDX 1
+#define mmMMEA4_ADDRDEC0_ADDR_MASK_CS23 0x07e8
+#define mmMMEA4_ADDRDEC0_ADDR_MASK_CS23_BASE_IDX 1
+#define mmMMEA4_ADDRDEC0_ADDR_MASK_SECCS01 0x07e9
+#define mmMMEA4_ADDRDEC0_ADDR_MASK_SECCS01_BASE_IDX 1
+#define mmMMEA4_ADDRDEC0_ADDR_MASK_SECCS23 0x07ea
+#define mmMMEA4_ADDRDEC0_ADDR_MASK_SECCS23_BASE_IDX 1
+#define mmMMEA4_ADDRDEC0_ADDR_CFG_CS01 0x07eb
+#define mmMMEA4_ADDRDEC0_ADDR_CFG_CS01_BASE_IDX 1
+#define mmMMEA4_ADDRDEC0_ADDR_CFG_CS23 0x07ec
+#define mmMMEA4_ADDRDEC0_ADDR_CFG_CS23_BASE_IDX 1
+#define mmMMEA4_ADDRDEC0_ADDR_SEL_CS01 0x07ed
+#define mmMMEA4_ADDRDEC0_ADDR_SEL_CS01_BASE_IDX 1
+#define mmMMEA4_ADDRDEC0_ADDR_SEL_CS23 0x07ee
+#define mmMMEA4_ADDRDEC0_ADDR_SEL_CS23_BASE_IDX 1
+#define mmMMEA4_ADDRDEC0_ADDR_SEL2_CS01 0x07ef
+#define mmMMEA4_ADDRDEC0_ADDR_SEL2_CS01_BASE_IDX 1
+#define mmMMEA4_ADDRDEC0_ADDR_SEL2_CS23 0x07f0
+#define mmMMEA4_ADDRDEC0_ADDR_SEL2_CS23_BASE_IDX 1
+#define mmMMEA4_ADDRDEC0_COL_SEL_LO_CS01 0x07f1
+#define mmMMEA4_ADDRDEC0_COL_SEL_LO_CS01_BASE_IDX 1
+#define mmMMEA4_ADDRDEC0_COL_SEL_LO_CS23 0x07f2
+#define mmMMEA4_ADDRDEC0_COL_SEL_LO_CS23_BASE_IDX 1
+#define mmMMEA4_ADDRDEC0_COL_SEL_HI_CS01 0x07f3
+#define mmMMEA4_ADDRDEC0_COL_SEL_HI_CS01_BASE_IDX 1
+#define mmMMEA4_ADDRDEC0_COL_SEL_HI_CS23 0x07f4
+#define mmMMEA4_ADDRDEC0_COL_SEL_HI_CS23_BASE_IDX 1
+#define mmMMEA4_ADDRDEC0_RM_SEL_CS01 0x07f5
+#define mmMMEA4_ADDRDEC0_RM_SEL_CS01_BASE_IDX 1
+#define mmMMEA4_ADDRDEC0_RM_SEL_CS23 0x07f6
+#define mmMMEA4_ADDRDEC0_RM_SEL_CS23_BASE_IDX 1
+#define mmMMEA4_ADDRDEC0_RM_SEL_SECCS01 0x07f7
+#define mmMMEA4_ADDRDEC0_RM_SEL_SECCS01_BASE_IDX 1
+#define mmMMEA4_ADDRDEC0_RM_SEL_SECCS23 0x07f8
+#define mmMMEA4_ADDRDEC0_RM_SEL_SECCS23_BASE_IDX 1
+#define mmMMEA4_ADDRDEC1_BASE_ADDR_CS0 0x07f9
+#define mmMMEA4_ADDRDEC1_BASE_ADDR_CS0_BASE_IDX 1
+#define mmMMEA4_ADDRDEC1_BASE_ADDR_CS1 0x07fa
+#define mmMMEA4_ADDRDEC1_BASE_ADDR_CS1_BASE_IDX 1
+#define mmMMEA4_ADDRDEC1_BASE_ADDR_CS2 0x07fb
+#define mmMMEA4_ADDRDEC1_BASE_ADDR_CS2_BASE_IDX 1
+#define mmMMEA4_ADDRDEC1_BASE_ADDR_CS3 0x07fc
+#define mmMMEA4_ADDRDEC1_BASE_ADDR_CS3_BASE_IDX 1
+#define mmMMEA4_ADDRDEC1_BASE_ADDR_SECCS0 0x07fd
+#define mmMMEA4_ADDRDEC1_BASE_ADDR_SECCS0_BASE_IDX 1
+#define mmMMEA4_ADDRDEC1_BASE_ADDR_SECCS1 0x07fe
+#define mmMMEA4_ADDRDEC1_BASE_ADDR_SECCS1_BASE_IDX 1
+#define mmMMEA4_ADDRDEC1_BASE_ADDR_SECCS2 0x07ff
+#define mmMMEA4_ADDRDEC1_BASE_ADDR_SECCS2_BASE_IDX 1
+#define mmMMEA4_ADDRDEC1_BASE_ADDR_SECCS3 0x0800
+#define mmMMEA4_ADDRDEC1_BASE_ADDR_SECCS3_BASE_IDX 1
+#define mmMMEA4_ADDRDEC1_ADDR_MASK_CS01 0x0801
+#define mmMMEA4_ADDRDEC1_ADDR_MASK_CS01_BASE_IDX 1
+#define mmMMEA4_ADDRDEC1_ADDR_MASK_CS23 0x0802
+#define mmMMEA4_ADDRDEC1_ADDR_MASK_CS23_BASE_IDX 1
+#define mmMMEA4_ADDRDEC1_ADDR_MASK_SECCS01 0x0803
+#define mmMMEA4_ADDRDEC1_ADDR_MASK_SECCS01_BASE_IDX 1
+#define mmMMEA4_ADDRDEC1_ADDR_MASK_SECCS23 0x0804
+#define mmMMEA4_ADDRDEC1_ADDR_MASK_SECCS23_BASE_IDX 1
+#define mmMMEA4_ADDRDEC1_ADDR_CFG_CS01 0x0805
+#define mmMMEA4_ADDRDEC1_ADDR_CFG_CS01_BASE_IDX 1
+#define mmMMEA4_ADDRDEC1_ADDR_CFG_CS23 0x0806
+#define mmMMEA4_ADDRDEC1_ADDR_CFG_CS23_BASE_IDX 1
+#define mmMMEA4_ADDRDEC1_ADDR_SEL_CS01 0x0807
+#define mmMMEA4_ADDRDEC1_ADDR_SEL_CS01_BASE_IDX 1
+#define mmMMEA4_ADDRDEC1_ADDR_SEL_CS23 0x0808
+#define mmMMEA4_ADDRDEC1_ADDR_SEL_CS23_BASE_IDX 1
+#define mmMMEA4_ADDRDEC1_ADDR_SEL2_CS01 0x0809
+#define mmMMEA4_ADDRDEC1_ADDR_SEL2_CS01_BASE_IDX 1
+#define mmMMEA4_ADDRDEC1_ADDR_SEL2_CS23 0x080a
+#define mmMMEA4_ADDRDEC1_ADDR_SEL2_CS23_BASE_IDX 1
+#define mmMMEA4_ADDRDEC1_COL_SEL_LO_CS01 0x080b
+#define mmMMEA4_ADDRDEC1_COL_SEL_LO_CS01_BASE_IDX 1
+#define mmMMEA4_ADDRDEC1_COL_SEL_LO_CS23 0x080c
+#define mmMMEA4_ADDRDEC1_COL_SEL_LO_CS23_BASE_IDX 1
+#define mmMMEA4_ADDRDEC1_COL_SEL_HI_CS01 0x080d
+#define mmMMEA4_ADDRDEC1_COL_SEL_HI_CS01_BASE_IDX 1
+#define mmMMEA4_ADDRDEC1_COL_SEL_HI_CS23 0x080e
+#define mmMMEA4_ADDRDEC1_COL_SEL_HI_CS23_BASE_IDX 1
+#define mmMMEA4_ADDRDEC1_RM_SEL_CS01 0x080f
+#define mmMMEA4_ADDRDEC1_RM_SEL_CS01_BASE_IDX 1
+#define mmMMEA4_ADDRDEC1_RM_SEL_CS23 0x0810
+#define mmMMEA4_ADDRDEC1_RM_SEL_CS23_BASE_IDX 1
+#define mmMMEA4_ADDRDEC1_RM_SEL_SECCS01 0x0811
+#define mmMMEA4_ADDRDEC1_RM_SEL_SECCS01_BASE_IDX 1
+#define mmMMEA4_ADDRDEC1_RM_SEL_SECCS23 0x0812
+#define mmMMEA4_ADDRDEC1_RM_SEL_SECCS23_BASE_IDX 1
+#define mmMMEA4_ADDRDEC2_BASE_ADDR_CS0 0x0813
+#define mmMMEA4_ADDRDEC2_BASE_ADDR_CS0_BASE_IDX 1
+#define mmMMEA4_ADDRDEC2_BASE_ADDR_CS1 0x0814
+#define mmMMEA4_ADDRDEC2_BASE_ADDR_CS1_BASE_IDX 1
+#define mmMMEA4_ADDRDEC2_BASE_ADDR_CS2 0x0815
+#define mmMMEA4_ADDRDEC2_BASE_ADDR_CS2_BASE_IDX 1
+#define mmMMEA4_ADDRDEC2_BASE_ADDR_CS3 0x0816
+#define mmMMEA4_ADDRDEC2_BASE_ADDR_CS3_BASE_IDX 1
+#define mmMMEA4_ADDRDEC2_BASE_ADDR_SECCS0 0x0817
+#define mmMMEA4_ADDRDEC2_BASE_ADDR_SECCS0_BASE_IDX 1
+#define mmMMEA4_ADDRDEC2_BASE_ADDR_SECCS1 0x0818
+#define mmMMEA4_ADDRDEC2_BASE_ADDR_SECCS1_BASE_IDX 1
+#define mmMMEA4_ADDRDEC2_BASE_ADDR_SECCS2 0x0819
+#define mmMMEA4_ADDRDEC2_BASE_ADDR_SECCS2_BASE_IDX 1
+#define mmMMEA4_ADDRDEC2_BASE_ADDR_SECCS3 0x081a
+#define mmMMEA4_ADDRDEC2_BASE_ADDR_SECCS3_BASE_IDX 1
+#define mmMMEA4_ADDRDEC2_ADDR_MASK_CS01 0x081b
+#define mmMMEA4_ADDRDEC2_ADDR_MASK_CS01_BASE_IDX 1
+#define mmMMEA4_ADDRDEC2_ADDR_MASK_CS23 0x081c
+#define mmMMEA4_ADDRDEC2_ADDR_MASK_CS23_BASE_IDX 1
+#define mmMMEA4_ADDRDEC2_ADDR_MASK_SECCS01 0x081d
+#define mmMMEA4_ADDRDEC2_ADDR_MASK_SECCS01_BASE_IDX 1
+#define mmMMEA4_ADDRDEC2_ADDR_MASK_SECCS23 0x081e
+#define mmMMEA4_ADDRDEC2_ADDR_MASK_SECCS23_BASE_IDX 1
+#define mmMMEA4_ADDRDEC2_ADDR_CFG_CS01 0x081f
+#define mmMMEA4_ADDRDEC2_ADDR_CFG_CS01_BASE_IDX 1
+#define mmMMEA4_ADDRDEC2_ADDR_CFG_CS23 0x0820
+#define mmMMEA4_ADDRDEC2_ADDR_CFG_CS23_BASE_IDX 1
+#define mmMMEA4_ADDRDEC2_ADDR_SEL_CS01 0x0821
+#define mmMMEA4_ADDRDEC2_ADDR_SEL_CS01_BASE_IDX 1
+#define mmMMEA4_ADDRDEC2_ADDR_SEL_CS23 0x0822
+#define mmMMEA4_ADDRDEC2_ADDR_SEL_CS23_BASE_IDX 1
+#define mmMMEA4_ADDRDEC2_ADDR_SEL2_CS01 0x0823
+#define mmMMEA4_ADDRDEC2_ADDR_SEL2_CS01_BASE_IDX 1
+#define mmMMEA4_ADDRDEC2_ADDR_SEL2_CS23 0x0824
+#define mmMMEA4_ADDRDEC2_ADDR_SEL2_CS23_BASE_IDX 1
+#define mmMMEA4_ADDRDEC2_COL_SEL_LO_CS01 0x0825
+#define mmMMEA4_ADDRDEC2_COL_SEL_LO_CS01_BASE_IDX 1
+#define mmMMEA4_ADDRDEC2_COL_SEL_LO_CS23 0x0826
+#define mmMMEA4_ADDRDEC2_COL_SEL_LO_CS23_BASE_IDX 1
+#define mmMMEA4_ADDRDEC2_COL_SEL_HI_CS01 0x0827
+#define mmMMEA4_ADDRDEC2_COL_SEL_HI_CS01_BASE_IDX 1
+#define mmMMEA4_ADDRDEC2_COL_SEL_HI_CS23 0x0828
+#define mmMMEA4_ADDRDEC2_COL_SEL_HI_CS23_BASE_IDX 1
+#define mmMMEA4_ADDRDEC2_RM_SEL_CS01 0x0829
+#define mmMMEA4_ADDRDEC2_RM_SEL_CS01_BASE_IDX 1
+#define mmMMEA4_ADDRDEC2_RM_SEL_CS23 0x082a
+#define mmMMEA4_ADDRDEC2_RM_SEL_CS23_BASE_IDX 1
+#define mmMMEA4_ADDRDEC2_RM_SEL_SECCS01 0x082b
+#define mmMMEA4_ADDRDEC2_RM_SEL_SECCS01_BASE_IDX 1
+#define mmMMEA4_ADDRDEC2_RM_SEL_SECCS23 0x082c
+#define mmMMEA4_ADDRDEC2_RM_SEL_SECCS23_BASE_IDX 1
+#define mmMMEA4_ADDRNORMDRAM_GLOBAL_CNTL 0x082d
+#define mmMMEA4_ADDRNORMDRAM_GLOBAL_CNTL_BASE_IDX 1
+#define mmMMEA4_ADDRNORMGMI_GLOBAL_CNTL 0x082e
+#define mmMMEA4_ADDRNORMGMI_GLOBAL_CNTL_BASE_IDX 1
+#define mmMMEA4_IO_RD_CLI2GRP_MAP0 0x0855
+#define mmMMEA4_IO_RD_CLI2GRP_MAP0_BASE_IDX 1
+#define mmMMEA4_IO_RD_CLI2GRP_MAP1 0x0856
+#define mmMMEA4_IO_RD_CLI2GRP_MAP1_BASE_IDX 1
+#define mmMMEA4_IO_WR_CLI2GRP_MAP0 0x0857
+#define mmMMEA4_IO_WR_CLI2GRP_MAP0_BASE_IDX 1
+#define mmMMEA4_IO_WR_CLI2GRP_MAP1 0x0858
+#define mmMMEA4_IO_WR_CLI2GRP_MAP1_BASE_IDX 1
+#define mmMMEA4_IO_RD_COMBINE_FLUSH 0x0859
+#define mmMMEA4_IO_RD_COMBINE_FLUSH_BASE_IDX 1
+#define mmMMEA4_IO_WR_COMBINE_FLUSH 0x085a
+#define mmMMEA4_IO_WR_COMBINE_FLUSH_BASE_IDX 1
+#define mmMMEA4_IO_GROUP_BURST 0x085b
+#define mmMMEA4_IO_GROUP_BURST_BASE_IDX 1
+#define mmMMEA4_IO_RD_PRI_AGE 0x085c
+#define mmMMEA4_IO_RD_PRI_AGE_BASE_IDX 1
+#define mmMMEA4_IO_WR_PRI_AGE 0x085d
+#define mmMMEA4_IO_WR_PRI_AGE_BASE_IDX 1
+#define mmMMEA4_IO_RD_PRI_QUEUING 0x085e
+#define mmMMEA4_IO_RD_PRI_QUEUING_BASE_IDX 1
+#define mmMMEA4_IO_WR_PRI_QUEUING 0x085f
+#define mmMMEA4_IO_WR_PRI_QUEUING_BASE_IDX 1
+#define mmMMEA4_IO_RD_PRI_FIXED 0x0860
+#define mmMMEA4_IO_RD_PRI_FIXED_BASE_IDX 1
+#define mmMMEA4_IO_WR_PRI_FIXED 0x0861
+#define mmMMEA4_IO_WR_PRI_FIXED_BASE_IDX 1
+#define mmMMEA4_IO_RD_PRI_URGENCY 0x0862
+#define mmMMEA4_IO_RD_PRI_URGENCY_BASE_IDX 1
+#define mmMMEA4_IO_WR_PRI_URGENCY 0x0863
+#define mmMMEA4_IO_WR_PRI_URGENCY_BASE_IDX 1
+#define mmMMEA4_IO_RD_PRI_URGENCY_MASKING 0x0864
+#define mmMMEA4_IO_RD_PRI_URGENCY_MASKING_BASE_IDX 1
+#define mmMMEA4_IO_WR_PRI_URGENCY_MASKING 0x0865
+#define mmMMEA4_IO_WR_PRI_URGENCY_MASKING_BASE_IDX 1
+#define mmMMEA4_IO_RD_PRI_QUANT_PRI1 0x0866
+#define mmMMEA4_IO_RD_PRI_QUANT_PRI1_BASE_IDX 1
+#define mmMMEA4_IO_RD_PRI_QUANT_PRI2 0x0867
+#define mmMMEA4_IO_RD_PRI_QUANT_PRI2_BASE_IDX 1
+#define mmMMEA4_IO_RD_PRI_QUANT_PRI3 0x0868
+#define mmMMEA4_IO_RD_PRI_QUANT_PRI3_BASE_IDX 1
+#define mmMMEA4_IO_WR_PRI_QUANT_PRI1 0x0869
+#define mmMMEA4_IO_WR_PRI_QUANT_PRI1_BASE_IDX 1
+#define mmMMEA4_IO_WR_PRI_QUANT_PRI2 0x086a
+#define mmMMEA4_IO_WR_PRI_QUANT_PRI2_BASE_IDX 1
+#define mmMMEA4_IO_WR_PRI_QUANT_PRI3 0x086b
+#define mmMMEA4_IO_WR_PRI_QUANT_PRI3_BASE_IDX 1
+#define mmMMEA4_SDP_ARB_DRAM 0x086c
+#define mmMMEA4_SDP_ARB_DRAM_BASE_IDX 1
+#define mmMMEA4_SDP_ARB_GMI 0x086d
+#define mmMMEA4_SDP_ARB_GMI_BASE_IDX 1
+#define mmMMEA4_SDP_ARB_FINAL 0x086e
+#define mmMMEA4_SDP_ARB_FINAL_BASE_IDX 1
+#define mmMMEA4_SDP_DRAM_PRIORITY 0x086f
+#define mmMMEA4_SDP_DRAM_PRIORITY_BASE_IDX 1
+#define mmMMEA4_SDP_GMI_PRIORITY 0x0870
+#define mmMMEA4_SDP_GMI_PRIORITY_BASE_IDX 1
+#define mmMMEA4_SDP_IO_PRIORITY 0x0871
+#define mmMMEA4_SDP_IO_PRIORITY_BASE_IDX 1
+#define mmMMEA4_SDP_CREDITS 0x0872
+#define mmMMEA4_SDP_CREDITS_BASE_IDX 1
+#define mmMMEA4_SDP_TAG_RESERVE0 0x0873
+#define mmMMEA4_SDP_TAG_RESERVE0_BASE_IDX 1
+#define mmMMEA4_SDP_TAG_RESERVE1 0x0874
+#define mmMMEA4_SDP_TAG_RESERVE1_BASE_IDX 1
+#define mmMMEA4_SDP_VCC_RESERVE0 0x0875
+#define mmMMEA4_SDP_VCC_RESERVE0_BASE_IDX 1
+#define mmMMEA4_SDP_VCC_RESERVE1 0x0876
+#define mmMMEA4_SDP_VCC_RESERVE1_BASE_IDX 1
+#define mmMMEA4_SDP_VCD_RESERVE0 0x0877
+#define mmMMEA4_SDP_VCD_RESERVE0_BASE_IDX 1
+#define mmMMEA4_SDP_VCD_RESERVE1 0x0878
+#define mmMMEA4_SDP_VCD_RESERVE1_BASE_IDX 1
+#define mmMMEA4_SDP_REQ_CNTL 0x0879
+#define mmMMEA4_SDP_REQ_CNTL_BASE_IDX 1
+#define mmMMEA4_MISC 0x087a
+#define mmMMEA4_MISC_BASE_IDX 1
+#define mmMMEA4_LATENCY_SAMPLING 0x087b
+#define mmMMEA4_LATENCY_SAMPLING_BASE_IDX 1
+#define mmMMEA4_PERFCOUNTER_LO 0x087c
+#define mmMMEA4_PERFCOUNTER_LO_BASE_IDX 1
+#define mmMMEA4_PERFCOUNTER_HI 0x087d
+#define mmMMEA4_PERFCOUNTER_HI_BASE_IDX 1
+#define mmMMEA4_PERFCOUNTER0_CFG 0x087e
+#define mmMMEA4_PERFCOUNTER0_CFG_BASE_IDX 1
+#define mmMMEA4_PERFCOUNTER1_CFG 0x087f
+#define mmMMEA4_PERFCOUNTER1_CFG_BASE_IDX 1
+#define mmMMEA4_PERFCOUNTER_RSLT_CNTL 0x0880
+#define mmMMEA4_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1
+#define mmMMEA4_EDC_CNT 0x0886
+#define mmMMEA4_EDC_CNT_BASE_IDX 1
+#define mmMMEA4_EDC_CNT2 0x0887
+#define mmMMEA4_EDC_CNT2_BASE_IDX 1
+#define mmMMEA4_DSM_CNTL 0x0888
+#define mmMMEA4_DSM_CNTL_BASE_IDX 1
+#define mmMMEA4_DSM_CNTLA 0x0889
+#define mmMMEA4_DSM_CNTLA_BASE_IDX 1
+#define mmMMEA4_DSM_CNTLB 0x088a
+#define mmMMEA4_DSM_CNTLB_BASE_IDX 1
+#define mmMMEA4_DSM_CNTL2 0x088b
+#define mmMMEA4_DSM_CNTL2_BASE_IDX 1
+#define mmMMEA4_DSM_CNTL2A 0x088c
+#define mmMMEA4_DSM_CNTL2A_BASE_IDX 1
+#define mmMMEA4_DSM_CNTL2B 0x088d
+#define mmMMEA4_DSM_CNTL2B_BASE_IDX 1
+#define mmMMEA4_CGTT_CLK_CTRL 0x088f
+#define mmMMEA4_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmMMEA4_EDC_MODE 0x0890
+#define mmMMEA4_EDC_MODE_BASE_IDX 1
+#define mmMMEA4_ERR_STATUS 0x0891
+#define mmMMEA4_ERR_STATUS_BASE_IDX 1
+#define mmMMEA4_MISC2 0x0892
+#define mmMMEA4_MISC2_BASE_IDX 1
+#define mmMMEA4_ADDRDEC_SELECT 0x0893
+#define mmMMEA4_ADDRDEC_SELECT_BASE_IDX 1
+#define mmMMEA4_EDC_CNT3 0x0894
+#define mmMMEA4_EDC_CNT3_BASE_IDX 1
+
+
+// addressBlock: mmhub_pctldec0
+// base address: 0x6a300
+#define mmPCTL0_CTRL 0x08c0
+#define mmPCTL0_CTRL_BASE_IDX 1
+#define mmPCTL0_MMHUB_DEEPSLEEP_IB 0x08c1
+#define mmPCTL0_MMHUB_DEEPSLEEP_IB_BASE_IDX 1
+#define mmPCTL0_MMHUB_DEEPSLEEP_OVERRIDE 0x08c2
+#define mmPCTL0_MMHUB_DEEPSLEEP_OVERRIDE_BASE_IDX 1
+#define mmPCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB 0x08c3
+#define mmPCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB_BASE_IDX 1
+#define mmPCTL0_PG_IGNORE_DEEPSLEEP 0x08c4
+#define mmPCTL0_PG_IGNORE_DEEPSLEEP_BASE_IDX 1
+#define mmPCTL0_PG_IGNORE_DEEPSLEEP_IB 0x08c5
+#define mmPCTL0_PG_IGNORE_DEEPSLEEP_IB_BASE_IDX 1
+#define mmPCTL0_SLICE0_CFG_DAGB_BUSY 0x08c6
+#define mmPCTL0_SLICE0_CFG_DAGB_BUSY_BASE_IDX 1
+#define mmPCTL0_SLICE0_CFG_DS_ALLOW 0x08c7
+#define mmPCTL0_SLICE0_CFG_DS_ALLOW_BASE_IDX 1
+#define mmPCTL0_SLICE0_CFG_DS_ALLOW_IB 0x08c8
+#define mmPCTL0_SLICE0_CFG_DS_ALLOW_IB_BASE_IDX 1
+#define mmPCTL0_SLICE1_CFG_DAGB_BUSY 0x08c9
+#define mmPCTL0_SLICE1_CFG_DAGB_BUSY_BASE_IDX 1
+#define mmPCTL0_SLICE1_CFG_DS_ALLOW 0x08ca
+#define mmPCTL0_SLICE1_CFG_DS_ALLOW_BASE_IDX 1
+#define mmPCTL0_SLICE1_CFG_DS_ALLOW_IB 0x08cb
+#define mmPCTL0_SLICE1_CFG_DS_ALLOW_IB_BASE_IDX 1
+#define mmPCTL0_SLICE2_CFG_DAGB_BUSY 0x08cc
+#define mmPCTL0_SLICE2_CFG_DAGB_BUSY_BASE_IDX 1
+#define mmPCTL0_SLICE2_CFG_DS_ALLOW 0x08cd
+#define mmPCTL0_SLICE2_CFG_DS_ALLOW_BASE_IDX 1
+#define mmPCTL0_SLICE2_CFG_DS_ALLOW_IB 0x08ce
+#define mmPCTL0_SLICE2_CFG_DS_ALLOW_IB_BASE_IDX 1
+#define mmPCTL0_SLICE3_CFG_DAGB_BUSY 0x08cf
+#define mmPCTL0_SLICE3_CFG_DAGB_BUSY_BASE_IDX 1
+#define mmPCTL0_SLICE3_CFG_DS_ALLOW 0x08d0
+#define mmPCTL0_SLICE3_CFG_DS_ALLOW_BASE_IDX 1
+#define mmPCTL0_SLICE3_CFG_DS_ALLOW_IB 0x08d1
+#define mmPCTL0_SLICE3_CFG_DS_ALLOW_IB_BASE_IDX 1
+#define mmPCTL0_SLICE4_CFG_DAGB_BUSY 0x08d2
+#define mmPCTL0_SLICE4_CFG_DAGB_BUSY_BASE_IDX 1
+#define mmPCTL0_SLICE4_CFG_DS_ALLOW 0x08d3
+#define mmPCTL0_SLICE4_CFG_DS_ALLOW_BASE_IDX 1
+#define mmPCTL0_SLICE4_CFG_DS_ALLOW_IB 0x08d4
+#define mmPCTL0_SLICE4_CFG_DS_ALLOW_IB_BASE_IDX 1
+#define mmPCTL0_UTCL2_MISC 0x08d5
+#define mmPCTL0_UTCL2_MISC_BASE_IDX 1
+#define mmPCTL0_SLICE0_MISC 0x08d6
+#define mmPCTL0_SLICE0_MISC_BASE_IDX 1
+#define mmPCTL0_SLICE1_MISC 0x08d7
+#define mmPCTL0_SLICE1_MISC_BASE_IDX 1
+#define mmPCTL0_SLICE2_MISC 0x08d8
+#define mmPCTL0_SLICE2_MISC_BASE_IDX 1
+#define mmPCTL0_SLICE3_MISC 0x08d9
+#define mmPCTL0_SLICE3_MISC_BASE_IDX 1
+#define mmPCTL0_SLICE4_MISC 0x08da
+#define mmPCTL0_SLICE4_MISC_BASE_IDX 1
+#define mmPCTL0_UTCL2_RENG_EXECUTE 0x08db
+#define mmPCTL0_UTCL2_RENG_EXECUTE_BASE_IDX 1
+#define mmPCTL0_SLICE0_RENG_EXECUTE 0x08dc
+#define mmPCTL0_SLICE0_RENG_EXECUTE_BASE_IDX 1
+#define mmPCTL0_SLICE1_RENG_EXECUTE 0x08dd
+#define mmPCTL0_SLICE1_RENG_EXECUTE_BASE_IDX 1
+#define mmPCTL0_SLICE2_RENG_EXECUTE 0x08de
+#define mmPCTL0_SLICE2_RENG_EXECUTE_BASE_IDX 1
+#define mmPCTL0_SLICE3_RENG_EXECUTE 0x08df
+#define mmPCTL0_SLICE3_RENG_EXECUTE_BASE_IDX 1
+#define mmPCTL0_SLICE4_RENG_EXECUTE 0x08e0
+#define mmPCTL0_SLICE4_RENG_EXECUTE_BASE_IDX 1
+#define mmPCTL0_UTCL2_RENG_RAM_INDEX 0x08e1
+#define mmPCTL0_UTCL2_RENG_RAM_INDEX_BASE_IDX 1
+#define mmPCTL0_UTCL2_RENG_RAM_DATA 0x08e2
+#define mmPCTL0_UTCL2_RENG_RAM_DATA_BASE_IDX 1
+#define mmPCTL0_SLICE0_RENG_RAM_INDEX 0x08e3
+#define mmPCTL0_SLICE0_RENG_RAM_INDEX_BASE_IDX 1
+#define mmPCTL0_SLICE0_RENG_RAM_DATA 0x08e4
+#define mmPCTL0_SLICE0_RENG_RAM_DATA_BASE_IDX 1
+#define mmPCTL0_SLICE1_RENG_RAM_INDEX 0x08e5
+#define mmPCTL0_SLICE1_RENG_RAM_INDEX_BASE_IDX 1
+#define mmPCTL0_SLICE1_RENG_RAM_DATA 0x08e6
+#define mmPCTL0_SLICE1_RENG_RAM_DATA_BASE_IDX 1
+#define mmPCTL0_SLICE2_RENG_RAM_INDEX 0x08e7
+#define mmPCTL0_SLICE2_RENG_RAM_INDEX_BASE_IDX 1
+#define mmPCTL0_SLICE2_RENG_RAM_DATA 0x08e8
+#define mmPCTL0_SLICE2_RENG_RAM_DATA_BASE_IDX 1
+#define mmPCTL0_SLICE3_RENG_RAM_INDEX 0x08e9
+#define mmPCTL0_SLICE3_RENG_RAM_INDEX_BASE_IDX 1
+#define mmPCTL0_SLICE3_RENG_RAM_DATA 0x08ea
+#define mmPCTL0_SLICE3_RENG_RAM_DATA_BASE_IDX 1
+#define mmPCTL0_SLICE4_RENG_RAM_INDEX 0x08eb
+#define mmPCTL0_SLICE4_RENG_RAM_INDEX_BASE_IDX 1
+#define mmPCTL0_SLICE4_RENG_RAM_DATA 0x08ec
+#define mmPCTL0_SLICE4_RENG_RAM_DATA_BASE_IDX 1
+#define mmPCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE0 0x08ed
+#define mmPCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE0_BASE_IDX 1
+#define mmPCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE1 0x08ee
+#define mmPCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE1_BASE_IDX 1
+#define mmPCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE2 0x08ef
+#define mmPCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE2_BASE_IDX 1
+#define mmPCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE3 0x08f0
+#define mmPCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE3_BASE_IDX 1
+#define mmPCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE4 0x08f1
+#define mmPCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE4_BASE_IDX 1
+#define mmPCTL0_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET0 0x08f2
+#define mmPCTL0_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET0_BASE_IDX 1
+#define mmPCTL0_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET1 0x08f3
+#define mmPCTL0_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET1_BASE_IDX 1
+#define mmPCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE0 0x08f4
+#define mmPCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE0_BASE_IDX 1
+#define mmPCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE1 0x08f5
+#define mmPCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE1_BASE_IDX 1
+#define mmPCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE2 0x08f6
+#define mmPCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE2_BASE_IDX 1
+#define mmPCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE3 0x08f7
+#define mmPCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE3_BASE_IDX 1
+#define mmPCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE4 0x08f8
+#define mmPCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE4_BASE_IDX 1
+#define mmPCTL0_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET0 0x08f9
+#define mmPCTL0_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET0_BASE_IDX 1
+#define mmPCTL0_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET1 0x08fa
+#define mmPCTL0_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET1_BASE_IDX 1
+#define mmPCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE0 0x08fb
+#define mmPCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE0_BASE_IDX 1
+#define mmPCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE1 0x08fc
+#define mmPCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE1_BASE_IDX 1
+#define mmPCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE2 0x08fd
+#define mmPCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE2_BASE_IDX 1
+#define mmPCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE3 0x08fe
+#define mmPCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE3_BASE_IDX 1
+#define mmPCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE4 0x08ff
+#define mmPCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE4_BASE_IDX 1
+#define mmPCTL0_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET0 0x0900
+#define mmPCTL0_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET0_BASE_IDX 1
+#define mmPCTL0_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET1 0x0901
+#define mmPCTL0_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET1_BASE_IDX 1
+#define mmPCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE0 0x0902
+#define mmPCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE0_BASE_IDX 1
+#define mmPCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE1 0x0903
+#define mmPCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE1_BASE_IDX 1
+#define mmPCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE2 0x0904
+#define mmPCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE2_BASE_IDX 1
+#define mmPCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE3 0x0905
+#define mmPCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE3_BASE_IDX 1
+#define mmPCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE4 0x0906
+#define mmPCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE4_BASE_IDX 1
+#define mmPCTL0_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET0 0x0907
+#define mmPCTL0_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET0_BASE_IDX 1
+#define mmPCTL0_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET1 0x0908
+#define mmPCTL0_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET1_BASE_IDX 1
+#define mmPCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE0 0x0909
+#define mmPCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE0_BASE_IDX 1
+#define mmPCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE1 0x090a
+#define mmPCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE1_BASE_IDX 1
+#define mmPCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE2 0x090b
+#define mmPCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE2_BASE_IDX 1
+#define mmPCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE3 0x090c
+#define mmPCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE3_BASE_IDX 1
+#define mmPCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE4 0x090d
+#define mmPCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE4_BASE_IDX 1
+#define mmPCTL0_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET0 0x090e
+#define mmPCTL0_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET0_BASE_IDX 1
+#define mmPCTL0_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET1 0x090f
+#define mmPCTL0_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET1_BASE_IDX 1
+#define mmPCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE0 0x0910
+#define mmPCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE0_BASE_IDX 1
+#define mmPCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE1 0x0911
+#define mmPCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE1_BASE_IDX 1
+#define mmPCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE2 0x0912
+#define mmPCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE2_BASE_IDX 1
+#define mmPCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE3 0x0913
+#define mmPCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE3_BASE_IDX 1
+#define mmPCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE4 0x0914
+#define mmPCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE4_BASE_IDX 1
+#define mmPCTL0_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET0 0x0915
+#define mmPCTL0_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET0_BASE_IDX 1
+#define mmPCTL0_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET1 0x0916
+#define mmPCTL0_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET1_BASE_IDX 1
+
+
+// addressBlock: mmhub_l1tlb_vml1dec
+// base address: 0x6a500
+#define mmVML1_0_MC_VM_MX_L1_TLB0_STATUS 0x0948
+#define mmVML1_0_MC_VM_MX_L1_TLB0_STATUS_BASE_IDX 1
+#define mmVML1_0_MC_VM_MX_L1_TLB1_STATUS 0x0949
+#define mmVML1_0_MC_VM_MX_L1_TLB1_STATUS_BASE_IDX 1
+#define mmVML1_0_MC_VM_MX_L1_TLB2_STATUS 0x094a
+#define mmVML1_0_MC_VM_MX_L1_TLB2_STATUS_BASE_IDX 1
+#define mmVML1_0_MC_VM_MX_L1_TLB3_STATUS 0x094b
+#define mmVML1_0_MC_VM_MX_L1_TLB3_STATUS_BASE_IDX 1
+#define mmVML1_0_MC_VM_MX_L1_TLB4_STATUS 0x094c
+#define mmVML1_0_MC_VM_MX_L1_TLB4_STATUS_BASE_IDX 1
+#define mmVML1_0_MC_VM_MX_L1_TLB5_STATUS 0x094d
+#define mmVML1_0_MC_VM_MX_L1_TLB5_STATUS_BASE_IDX 1
+#define mmVML1_0_MC_VM_MX_L1_TLB6_STATUS 0x094e
+#define mmVML1_0_MC_VM_MX_L1_TLB6_STATUS_BASE_IDX 1
+#define mmVML1_0_MC_VM_MX_L1_TLB7_STATUS 0x094f
+#define mmVML1_0_MC_VM_MX_L1_TLB7_STATUS_BASE_IDX 1
+
+
+// addressBlock: mmhub_l1tlb_vml1pldec
+// base address: 0x6a580
+#define mmVML1PL0_MC_VM_MX_L1_PERFCOUNTER0_CFG 0x0960
+#define mmVML1PL0_MC_VM_MX_L1_PERFCOUNTER0_CFG_BASE_IDX 1
+#define mmVML1PL0_MC_VM_MX_L1_PERFCOUNTER1_CFG 0x0961
+#define mmVML1PL0_MC_VM_MX_L1_PERFCOUNTER1_CFG_BASE_IDX 1
+#define mmVML1PL0_MC_VM_MX_L1_PERFCOUNTER2_CFG 0x0962
+#define mmVML1PL0_MC_VM_MX_L1_PERFCOUNTER2_CFG_BASE_IDX 1
+#define mmVML1PL0_MC_VM_MX_L1_PERFCOUNTER3_CFG 0x0963
+#define mmVML1PL0_MC_VM_MX_L1_PERFCOUNTER3_CFG_BASE_IDX 1
+#define mmVML1PL0_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL 0x0964
+#define mmVML1PL0_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1
+
+
+// addressBlock: mmhub_l1tlb_vml1prdec
+// base address: 0x6a5c0
+#define mmVML1PR0_MC_VM_MX_L1_PERFCOUNTER_LO 0x0970
+#define mmVML1PR0_MC_VM_MX_L1_PERFCOUNTER_LO_BASE_IDX 1
+#define mmVML1PR0_MC_VM_MX_L1_PERFCOUNTER_HI 0x0971
+#define mmVML1PR0_MC_VM_MX_L1_PERFCOUNTER_HI_BASE_IDX 1
+
+
+// addressBlock: mmhub_utcl2_atcl2dec
+// base address: 0x6a600
+#define mmATCL2_0_ATC_L2_CNTL 0x0980
+#define mmATCL2_0_ATC_L2_CNTL_BASE_IDX 1
+#define mmATCL2_0_ATC_L2_CNTL2 0x0981
+#define mmATCL2_0_ATC_L2_CNTL2_BASE_IDX 1
+#define mmATCL2_0_ATC_L2_CACHE_DATA0 0x0984
+#define mmATCL2_0_ATC_L2_CACHE_DATA0_BASE_IDX 1
+#define mmATCL2_0_ATC_L2_CACHE_DATA1 0x0985
+#define mmATCL2_0_ATC_L2_CACHE_DATA1_BASE_IDX 1
+#define mmATCL2_0_ATC_L2_CACHE_DATA2 0x0986
+#define mmATCL2_0_ATC_L2_CACHE_DATA2_BASE_IDX 1
+#define mmATCL2_0_ATC_L2_CNTL3 0x0987
+#define mmATCL2_0_ATC_L2_CNTL3_BASE_IDX 1
+#define mmATCL2_0_ATC_L2_STATUS 0x0988
+#define mmATCL2_0_ATC_L2_STATUS_BASE_IDX 1
+#define mmATCL2_0_ATC_L2_STATUS2 0x0989
+#define mmATCL2_0_ATC_L2_STATUS2_BASE_IDX 1
+#define mmATCL2_0_ATC_L2_STATUS3 0x098a
+#define mmATCL2_0_ATC_L2_STATUS3_BASE_IDX 1
+#define mmATCL2_0_ATC_L2_MISC_CG 0x098b
+#define mmATCL2_0_ATC_L2_MISC_CG_BASE_IDX 1
+#define mmATCL2_0_ATC_L2_MEM_POWER_LS 0x098c
+#define mmATCL2_0_ATC_L2_MEM_POWER_LS_BASE_IDX 1
+#define mmATCL2_0_ATC_L2_CGTT_CLK_CTRL 0x098d
+#define mmATCL2_0_ATC_L2_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmATCL2_0_ATC_L2_CACHE_4K_DSM_INDEX 0x098e
+#define mmATCL2_0_ATC_L2_CACHE_4K_DSM_INDEX_BASE_IDX 1
+#define mmATCL2_0_ATC_L2_CACHE_2M_DSM_INDEX 0x098f
+#define mmATCL2_0_ATC_L2_CACHE_2M_DSM_INDEX_BASE_IDX 1
+#define mmATCL2_0_ATC_L2_CACHE_4K_DSM_CNTL 0x0990
+#define mmATCL2_0_ATC_L2_CACHE_4K_DSM_CNTL_BASE_IDX 1
+#define mmATCL2_0_ATC_L2_CACHE_2M_DSM_CNTL 0x0991
+#define mmATCL2_0_ATC_L2_CACHE_2M_DSM_CNTL_BASE_IDX 1
+#define mmATCL2_0_ATC_L2_CNTL4 0x0992
+#define mmATCL2_0_ATC_L2_CNTL4_BASE_IDX 1
+#define mmATCL2_0_ATC_L2_MM_GROUP_RT_CLASSES 0x0993
+#define mmATCL2_0_ATC_L2_MM_GROUP_RT_CLASSES_BASE_IDX 1
+
+
+// addressBlock: mmhub_utcl2_vml2pfdec
+// base address: 0x6a700
+#define mmVML2PF0_VM_L2_CNTL 0x09c0
+#define mmVML2PF0_VM_L2_CNTL_BASE_IDX 1
+#define mmVML2PF0_VM_L2_CNTL2 0x09c1
+#define mmVML2PF0_VM_L2_CNTL2_BASE_IDX 1
+#define mmVML2PF0_VM_L2_CNTL3 0x09c2
+#define mmVML2PF0_VM_L2_CNTL3_BASE_IDX 1
+#define mmVML2PF0_VM_L2_STATUS 0x09c3
+#define mmVML2PF0_VM_L2_STATUS_BASE_IDX 1
+#define mmVML2PF0_VM_DUMMY_PAGE_FAULT_CNTL 0x09c4
+#define mmVML2PF0_VM_DUMMY_PAGE_FAULT_CNTL_BASE_IDX 1
+#define mmVML2PF0_VM_DUMMY_PAGE_FAULT_ADDR_LO32 0x09c5
+#define mmVML2PF0_VM_DUMMY_PAGE_FAULT_ADDR_LO32_BASE_IDX 1
+#define mmVML2PF0_VM_DUMMY_PAGE_FAULT_ADDR_HI32 0x09c6
+#define mmVML2PF0_VM_DUMMY_PAGE_FAULT_ADDR_HI32_BASE_IDX 1
+#define mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL 0x09c7
+#define mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL_BASE_IDX 1
+#define mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL2 0x09c8
+#define mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL2_BASE_IDX 1
+#define mmVML2PF0_VM_L2_PROTECTION_FAULT_MM_CNTL3 0x09c9
+#define mmVML2PF0_VM_L2_PROTECTION_FAULT_MM_CNTL3_BASE_IDX 1
+#define mmVML2PF0_VM_L2_PROTECTION_FAULT_MM_CNTL4 0x09ca
+#define mmVML2PF0_VM_L2_PROTECTION_FAULT_MM_CNTL4_BASE_IDX 1
+#define mmVML2PF0_VM_L2_PROTECTION_FAULT_STATUS 0x09cb
+#define mmVML2PF0_VM_L2_PROTECTION_FAULT_STATUS_BASE_IDX 1
+#define mmVML2PF0_VM_L2_PROTECTION_FAULT_ADDR_LO32 0x09cc
+#define mmVML2PF0_VM_L2_PROTECTION_FAULT_ADDR_LO32_BASE_IDX 1
+#define mmVML2PF0_VM_L2_PROTECTION_FAULT_ADDR_HI32 0x09cd
+#define mmVML2PF0_VM_L2_PROTECTION_FAULT_ADDR_HI32_BASE_IDX 1
+#define mmVML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32 0x09ce
+#define mmVML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32_BASE_IDX 1
+#define mmVML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32 0x09cf
+#define mmVML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32_BASE_IDX 1
+#define mmVML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32 0x09d1
+#define mmVML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32_BASE_IDX 1
+#define mmVML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32 0x09d2
+#define mmVML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32_BASE_IDX 1
+#define mmVML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32 0x09d3
+#define mmVML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32_BASE_IDX 1
+#define mmVML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32 0x09d4
+#define mmVML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32_BASE_IDX 1
+#define mmVML2PF0_VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32 0x09d5
+#define mmVML2PF0_VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32_BASE_IDX 1
+#define mmVML2PF0_VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32 0x09d6
+#define mmVML2PF0_VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32_BASE_IDX 1
+#define mmVML2PF0_VM_L2_CNTL4 0x09d7
+#define mmVML2PF0_VM_L2_CNTL4_BASE_IDX 1
+#define mmVML2PF0_VM_L2_MM_GROUP_RT_CLASSES 0x09d8
+#define mmVML2PF0_VM_L2_MM_GROUP_RT_CLASSES_BASE_IDX 1
+#define mmVML2PF0_VM_L2_BANK_SELECT_RESERVED_CID 0x09d9
+#define mmVML2PF0_VM_L2_BANK_SELECT_RESERVED_CID_BASE_IDX 1
+#define mmVML2PF0_VM_L2_BANK_SELECT_RESERVED_CID2 0x09da
+#define mmVML2PF0_VM_L2_BANK_SELECT_RESERVED_CID2_BASE_IDX 1
+#define mmVML2PF0_VM_L2_CACHE_PARITY_CNTL 0x09db
+#define mmVML2PF0_VM_L2_CACHE_PARITY_CNTL_BASE_IDX 1
+#define mmVML2PF0_VM_L2_CGTT_CLK_CTRL 0x09de
+#define mmVML2PF0_VM_L2_CGTT_CLK_CTRL_BASE_IDX 1
+
+
+// addressBlock: mmhub_utcl2_vml2vcdec
+// base address: 0x6a800
+#define mmVML2VC0_VM_CONTEXT0_CNTL 0x0a00
+#define mmVML2VC0_VM_CONTEXT0_CNTL_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT1_CNTL 0x0a01
+#define mmVML2VC0_VM_CONTEXT1_CNTL_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT2_CNTL 0x0a02
+#define mmVML2VC0_VM_CONTEXT2_CNTL_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT3_CNTL 0x0a03
+#define mmVML2VC0_VM_CONTEXT3_CNTL_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT4_CNTL 0x0a04
+#define mmVML2VC0_VM_CONTEXT4_CNTL_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT5_CNTL 0x0a05
+#define mmVML2VC0_VM_CONTEXT5_CNTL_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT6_CNTL 0x0a06
+#define mmVML2VC0_VM_CONTEXT6_CNTL_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT7_CNTL 0x0a07
+#define mmVML2VC0_VM_CONTEXT7_CNTL_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT8_CNTL 0x0a08
+#define mmVML2VC0_VM_CONTEXT8_CNTL_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT9_CNTL 0x0a09
+#define mmVML2VC0_VM_CONTEXT9_CNTL_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT10_CNTL 0x0a0a
+#define mmVML2VC0_VM_CONTEXT10_CNTL_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT11_CNTL 0x0a0b
+#define mmVML2VC0_VM_CONTEXT11_CNTL_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT12_CNTL 0x0a0c
+#define mmVML2VC0_VM_CONTEXT12_CNTL_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT13_CNTL 0x0a0d
+#define mmVML2VC0_VM_CONTEXT13_CNTL_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT14_CNTL 0x0a0e
+#define mmVML2VC0_VM_CONTEXT14_CNTL_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT15_CNTL 0x0a0f
+#define mmVML2VC0_VM_CONTEXT15_CNTL_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXTS_DISABLE 0x0a10
+#define mmVML2VC0_VM_CONTEXTS_DISABLE_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG0_SEM 0x0a11
+#define mmVML2VC0_VM_INVALIDATE_ENG0_SEM_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG1_SEM 0x0a12
+#define mmVML2VC0_VM_INVALIDATE_ENG1_SEM_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG2_SEM 0x0a13
+#define mmVML2VC0_VM_INVALIDATE_ENG2_SEM_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG3_SEM 0x0a14
+#define mmVML2VC0_VM_INVALIDATE_ENG3_SEM_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG4_SEM 0x0a15
+#define mmVML2VC0_VM_INVALIDATE_ENG4_SEM_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG5_SEM 0x0a16
+#define mmVML2VC0_VM_INVALIDATE_ENG5_SEM_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG6_SEM 0x0a17
+#define mmVML2VC0_VM_INVALIDATE_ENG6_SEM_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG7_SEM 0x0a18
+#define mmVML2VC0_VM_INVALIDATE_ENG7_SEM_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG8_SEM 0x0a19
+#define mmVML2VC0_VM_INVALIDATE_ENG8_SEM_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG9_SEM 0x0a1a
+#define mmVML2VC0_VM_INVALIDATE_ENG9_SEM_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG10_SEM 0x0a1b
+#define mmVML2VC0_VM_INVALIDATE_ENG10_SEM_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG11_SEM 0x0a1c
+#define mmVML2VC0_VM_INVALIDATE_ENG11_SEM_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG12_SEM 0x0a1d
+#define mmVML2VC0_VM_INVALIDATE_ENG12_SEM_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG13_SEM 0x0a1e
+#define mmVML2VC0_VM_INVALIDATE_ENG13_SEM_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG14_SEM 0x0a1f
+#define mmVML2VC0_VM_INVALIDATE_ENG14_SEM_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG15_SEM 0x0a20
+#define mmVML2VC0_VM_INVALIDATE_ENG15_SEM_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG16_SEM 0x0a21
+#define mmVML2VC0_VM_INVALIDATE_ENG16_SEM_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG17_SEM 0x0a22
+#define mmVML2VC0_VM_INVALIDATE_ENG17_SEM_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG0_REQ 0x0a23
+#define mmVML2VC0_VM_INVALIDATE_ENG0_REQ_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG1_REQ 0x0a24
+#define mmVML2VC0_VM_INVALIDATE_ENG1_REQ_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG2_REQ 0x0a25
+#define mmVML2VC0_VM_INVALIDATE_ENG2_REQ_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG3_REQ 0x0a26
+#define mmVML2VC0_VM_INVALIDATE_ENG3_REQ_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG4_REQ 0x0a27
+#define mmVML2VC0_VM_INVALIDATE_ENG4_REQ_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG5_REQ 0x0a28
+#define mmVML2VC0_VM_INVALIDATE_ENG5_REQ_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG6_REQ 0x0a29
+#define mmVML2VC0_VM_INVALIDATE_ENG6_REQ_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG7_REQ 0x0a2a
+#define mmVML2VC0_VM_INVALIDATE_ENG7_REQ_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG8_REQ 0x0a2b
+#define mmVML2VC0_VM_INVALIDATE_ENG8_REQ_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG9_REQ 0x0a2c
+#define mmVML2VC0_VM_INVALIDATE_ENG9_REQ_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG10_REQ 0x0a2d
+#define mmVML2VC0_VM_INVALIDATE_ENG10_REQ_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG11_REQ 0x0a2e
+#define mmVML2VC0_VM_INVALIDATE_ENG11_REQ_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG12_REQ 0x0a2f
+#define mmVML2VC0_VM_INVALIDATE_ENG12_REQ_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG13_REQ 0x0a30
+#define mmVML2VC0_VM_INVALIDATE_ENG13_REQ_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG14_REQ 0x0a31
+#define mmVML2VC0_VM_INVALIDATE_ENG14_REQ_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG15_REQ 0x0a32
+#define mmVML2VC0_VM_INVALIDATE_ENG15_REQ_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG16_REQ 0x0a33
+#define mmVML2VC0_VM_INVALIDATE_ENG16_REQ_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG17_REQ 0x0a34
+#define mmVML2VC0_VM_INVALIDATE_ENG17_REQ_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG0_ACK 0x0a35
+#define mmVML2VC0_VM_INVALIDATE_ENG0_ACK_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG1_ACK 0x0a36
+#define mmVML2VC0_VM_INVALIDATE_ENG1_ACK_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG2_ACK 0x0a37
+#define mmVML2VC0_VM_INVALIDATE_ENG2_ACK_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG3_ACK 0x0a38
+#define mmVML2VC0_VM_INVALIDATE_ENG3_ACK_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG4_ACK 0x0a39
+#define mmVML2VC0_VM_INVALIDATE_ENG4_ACK_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG5_ACK 0x0a3a
+#define mmVML2VC0_VM_INVALIDATE_ENG5_ACK_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG6_ACK 0x0a3b
+#define mmVML2VC0_VM_INVALIDATE_ENG6_ACK_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG7_ACK 0x0a3c
+#define mmVML2VC0_VM_INVALIDATE_ENG7_ACK_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG8_ACK 0x0a3d
+#define mmVML2VC0_VM_INVALIDATE_ENG8_ACK_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG9_ACK 0x0a3e
+#define mmVML2VC0_VM_INVALIDATE_ENG9_ACK_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG10_ACK 0x0a3f
+#define mmVML2VC0_VM_INVALIDATE_ENG10_ACK_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG11_ACK 0x0a40
+#define mmVML2VC0_VM_INVALIDATE_ENG11_ACK_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG12_ACK 0x0a41
+#define mmVML2VC0_VM_INVALIDATE_ENG12_ACK_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG13_ACK 0x0a42
+#define mmVML2VC0_VM_INVALIDATE_ENG13_ACK_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG14_ACK 0x0a43
+#define mmVML2VC0_VM_INVALIDATE_ENG14_ACK_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG15_ACK 0x0a44
+#define mmVML2VC0_VM_INVALIDATE_ENG15_ACK_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG16_ACK 0x0a45
+#define mmVML2VC0_VM_INVALIDATE_ENG16_ACK_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG17_ACK 0x0a46
+#define mmVML2VC0_VM_INVALIDATE_ENG17_ACK_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG0_ADDR_RANGE_LO32 0x0a47
+#define mmVML2VC0_VM_INVALIDATE_ENG0_ADDR_RANGE_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG0_ADDR_RANGE_HI32 0x0a48
+#define mmVML2VC0_VM_INVALIDATE_ENG0_ADDR_RANGE_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG1_ADDR_RANGE_LO32 0x0a49
+#define mmVML2VC0_VM_INVALIDATE_ENG1_ADDR_RANGE_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG1_ADDR_RANGE_HI32 0x0a4a
+#define mmVML2VC0_VM_INVALIDATE_ENG1_ADDR_RANGE_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG2_ADDR_RANGE_LO32 0x0a4b
+#define mmVML2VC0_VM_INVALIDATE_ENG2_ADDR_RANGE_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG2_ADDR_RANGE_HI32 0x0a4c
+#define mmVML2VC0_VM_INVALIDATE_ENG2_ADDR_RANGE_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG3_ADDR_RANGE_LO32 0x0a4d
+#define mmVML2VC0_VM_INVALIDATE_ENG3_ADDR_RANGE_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG3_ADDR_RANGE_HI32 0x0a4e
+#define mmVML2VC0_VM_INVALIDATE_ENG3_ADDR_RANGE_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG4_ADDR_RANGE_LO32 0x0a4f
+#define mmVML2VC0_VM_INVALIDATE_ENG4_ADDR_RANGE_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG4_ADDR_RANGE_HI32 0x0a50
+#define mmVML2VC0_VM_INVALIDATE_ENG4_ADDR_RANGE_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG5_ADDR_RANGE_LO32 0x0a51
+#define mmVML2VC0_VM_INVALIDATE_ENG5_ADDR_RANGE_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG5_ADDR_RANGE_HI32 0x0a52
+#define mmVML2VC0_VM_INVALIDATE_ENG5_ADDR_RANGE_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG6_ADDR_RANGE_LO32 0x0a53
+#define mmVML2VC0_VM_INVALIDATE_ENG6_ADDR_RANGE_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG6_ADDR_RANGE_HI32 0x0a54
+#define mmVML2VC0_VM_INVALIDATE_ENG6_ADDR_RANGE_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG7_ADDR_RANGE_LO32 0x0a55
+#define mmVML2VC0_VM_INVALIDATE_ENG7_ADDR_RANGE_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG7_ADDR_RANGE_HI32 0x0a56
+#define mmVML2VC0_VM_INVALIDATE_ENG7_ADDR_RANGE_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG8_ADDR_RANGE_LO32 0x0a57
+#define mmVML2VC0_VM_INVALIDATE_ENG8_ADDR_RANGE_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG8_ADDR_RANGE_HI32 0x0a58
+#define mmVML2VC0_VM_INVALIDATE_ENG8_ADDR_RANGE_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG9_ADDR_RANGE_LO32 0x0a59
+#define mmVML2VC0_VM_INVALIDATE_ENG9_ADDR_RANGE_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG9_ADDR_RANGE_HI32 0x0a5a
+#define mmVML2VC0_VM_INVALIDATE_ENG9_ADDR_RANGE_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG10_ADDR_RANGE_LO32 0x0a5b
+#define mmVML2VC0_VM_INVALIDATE_ENG10_ADDR_RANGE_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG10_ADDR_RANGE_HI32 0x0a5c
+#define mmVML2VC0_VM_INVALIDATE_ENG10_ADDR_RANGE_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG11_ADDR_RANGE_LO32 0x0a5d
+#define mmVML2VC0_VM_INVALIDATE_ENG11_ADDR_RANGE_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG11_ADDR_RANGE_HI32 0x0a5e
+#define mmVML2VC0_VM_INVALIDATE_ENG11_ADDR_RANGE_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG12_ADDR_RANGE_LO32 0x0a5f
+#define mmVML2VC0_VM_INVALIDATE_ENG12_ADDR_RANGE_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG12_ADDR_RANGE_HI32 0x0a60
+#define mmVML2VC0_VM_INVALIDATE_ENG12_ADDR_RANGE_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG13_ADDR_RANGE_LO32 0x0a61
+#define mmVML2VC0_VM_INVALIDATE_ENG13_ADDR_RANGE_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG13_ADDR_RANGE_HI32 0x0a62
+#define mmVML2VC0_VM_INVALIDATE_ENG13_ADDR_RANGE_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG14_ADDR_RANGE_LO32 0x0a63
+#define mmVML2VC0_VM_INVALIDATE_ENG14_ADDR_RANGE_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG14_ADDR_RANGE_HI32 0x0a64
+#define mmVML2VC0_VM_INVALIDATE_ENG14_ADDR_RANGE_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG15_ADDR_RANGE_LO32 0x0a65
+#define mmVML2VC0_VM_INVALIDATE_ENG15_ADDR_RANGE_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG15_ADDR_RANGE_HI32 0x0a66
+#define mmVML2VC0_VM_INVALIDATE_ENG15_ADDR_RANGE_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32 0x0a67
+#define mmVML2VC0_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG16_ADDR_RANGE_HI32 0x0a68
+#define mmVML2VC0_VM_INVALIDATE_ENG16_ADDR_RANGE_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG17_ADDR_RANGE_LO32 0x0a69
+#define mmVML2VC0_VM_INVALIDATE_ENG17_ADDR_RANGE_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_INVALIDATE_ENG17_ADDR_RANGE_HI32 0x0a6a
+#define mmVML2VC0_VM_INVALIDATE_ENG17_ADDR_RANGE_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32 0x0a6b
+#define mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32 0x0a6c
+#define mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 0x0a6d
+#define mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32 0x0a6e
+#define mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32 0x0a6f
+#define mmVML2VC0_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32 0x0a70
+#define mmVML2VC0_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32 0x0a71
+#define mmVML2VC0_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32 0x0a72
+#define mmVML2VC0_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32 0x0a73
+#define mmVML2VC0_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32 0x0a74
+#define mmVML2VC0_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32 0x0a75
+#define mmVML2VC0_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32 0x0a76
+#define mmVML2VC0_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32 0x0a77
+#define mmVML2VC0_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32 0x0a78
+#define mmVML2VC0_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32 0x0a79
+#define mmVML2VC0_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32 0x0a7a
+#define mmVML2VC0_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32 0x0a7b
+#define mmVML2VC0_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32 0x0a7c
+#define mmVML2VC0_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32 0x0a7d
+#define mmVML2VC0_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32 0x0a7e
+#define mmVML2VC0_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32 0x0a7f
+#define mmVML2VC0_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32 0x0a80
+#define mmVML2VC0_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32 0x0a81
+#define mmVML2VC0_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32 0x0a82
+#define mmVML2VC0_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32 0x0a83
+#define mmVML2VC0_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32 0x0a84
+#define mmVML2VC0_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32 0x0a85
+#define mmVML2VC0_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32 0x0a86
+#define mmVML2VC0_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32 0x0a87
+#define mmVML2VC0_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32 0x0a88
+#define mmVML2VC0_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32 0x0a89
+#define mmVML2VC0_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32 0x0a8a
+#define mmVML2VC0_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32 0x0a8b
+#define mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32 0x0a8c
+#define mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32 0x0a8d
+#define mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32 0x0a8e
+#define mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32 0x0a8f
+#define mmVML2VC0_VM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32 0x0a90
+#define mmVML2VC0_VM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32 0x0a91
+#define mmVML2VC0_VM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32 0x0a92
+#define mmVML2VC0_VM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32 0x0a93
+#define mmVML2VC0_VM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32 0x0a94
+#define mmVML2VC0_VM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32 0x0a95
+#define mmVML2VC0_VM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32 0x0a96
+#define mmVML2VC0_VM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32 0x0a97
+#define mmVML2VC0_VM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32 0x0a98
+#define mmVML2VC0_VM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32 0x0a99
+#define mmVML2VC0_VM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32 0x0a9a
+#define mmVML2VC0_VM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32 0x0a9b
+#define mmVML2VC0_VM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32 0x0a9c
+#define mmVML2VC0_VM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32 0x0a9d
+#define mmVML2VC0_VM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32 0x0a9e
+#define mmVML2VC0_VM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32 0x0a9f
+#define mmVML2VC0_VM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32 0x0aa0
+#define mmVML2VC0_VM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32 0x0aa1
+#define mmVML2VC0_VM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32 0x0aa2
+#define mmVML2VC0_VM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32 0x0aa3
+#define mmVML2VC0_VM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32 0x0aa4
+#define mmVML2VC0_VM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32 0x0aa5
+#define mmVML2VC0_VM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32 0x0aa6
+#define mmVML2VC0_VM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32 0x0aa7
+#define mmVML2VC0_VM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32 0x0aa8
+#define mmVML2VC0_VM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32 0x0aa9
+#define mmVML2VC0_VM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32 0x0aaa
+#define mmVML2VC0_VM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32 0x0aab
+#define mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32 0x0aac
+#define mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32 0x0aad
+#define mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32 0x0aae
+#define mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32 0x0aaf
+#define mmVML2VC0_VM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32 0x0ab0
+#define mmVML2VC0_VM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32 0x0ab1
+#define mmVML2VC0_VM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32 0x0ab2
+#define mmVML2VC0_VM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32 0x0ab3
+#define mmVML2VC0_VM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32 0x0ab4
+#define mmVML2VC0_VM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32 0x0ab5
+#define mmVML2VC0_VM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32 0x0ab6
+#define mmVML2VC0_VM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32 0x0ab7
+#define mmVML2VC0_VM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32 0x0ab8
+#define mmVML2VC0_VM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32 0x0ab9
+#define mmVML2VC0_VM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32 0x0aba
+#define mmVML2VC0_VM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32 0x0abb
+#define mmVML2VC0_VM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32 0x0abc
+#define mmVML2VC0_VM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32 0x0abd
+#define mmVML2VC0_VM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32 0x0abe
+#define mmVML2VC0_VM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32 0x0abf
+#define mmVML2VC0_VM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32 0x0ac0
+#define mmVML2VC0_VM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32 0x0ac1
+#define mmVML2VC0_VM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32 0x0ac2
+#define mmVML2VC0_VM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32 0x0ac3
+#define mmVML2VC0_VM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32 0x0ac4
+#define mmVML2VC0_VM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32 0x0ac5
+#define mmVML2VC0_VM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32 0x0ac6
+#define mmVML2VC0_VM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32 0x0ac7
+#define mmVML2VC0_VM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32 0x0ac8
+#define mmVML2VC0_VM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32 0x0ac9
+#define mmVML2VC0_VM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC0_VM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32 0x0aca
+#define mmVML2VC0_VM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1
+
+
+// addressBlock: mmhub_utcl2_vmsharedpfdec
+// base address: 0x6ab90
+#define mmVMSHAREDPF0_MC_VM_NB_MMIOBASE 0x0ae4
+#define mmVMSHAREDPF0_MC_VM_NB_MMIOBASE_BASE_IDX 1
+#define mmVMSHAREDPF0_MC_VM_NB_MMIOLIMIT 0x0ae5
+#define mmVMSHAREDPF0_MC_VM_NB_MMIOLIMIT_BASE_IDX 1
+#define mmVMSHAREDPF0_MC_VM_NB_PCI_CTRL 0x0ae6
+#define mmVMSHAREDPF0_MC_VM_NB_PCI_CTRL_BASE_IDX 1
+#define mmVMSHAREDPF0_MC_VM_NB_PCI_ARB 0x0ae7
+#define mmVMSHAREDPF0_MC_VM_NB_PCI_ARB_BASE_IDX 1
+#define mmVMSHAREDPF0_MC_VM_NB_TOP_OF_DRAM_SLOT1 0x0ae8
+#define mmVMSHAREDPF0_MC_VM_NB_TOP_OF_DRAM_SLOT1_BASE_IDX 1
+#define mmVMSHAREDPF0_MC_VM_NB_LOWER_TOP_OF_DRAM2 0x0ae9
+#define mmVMSHAREDPF0_MC_VM_NB_LOWER_TOP_OF_DRAM2_BASE_IDX 1
+#define mmVMSHAREDPF0_MC_VM_NB_UPPER_TOP_OF_DRAM2 0x0aea
+#define mmVMSHAREDPF0_MC_VM_NB_UPPER_TOP_OF_DRAM2_BASE_IDX 1
+#define mmVMSHAREDPF0_MC_VM_FB_OFFSET 0x0aeb
+#define mmVMSHAREDPF0_MC_VM_FB_OFFSET_BASE_IDX 1
+#define mmVMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB 0x0aec
+#define mmVMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB_BASE_IDX 1
+#define mmVMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB 0x0aed
+#define mmVMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB_BASE_IDX 1
+#define mmVMSHAREDPF0_MC_VM_STEERING 0x0aee
+#define mmVMSHAREDPF0_MC_VM_STEERING_BASE_IDX 1
+#define mmVMSHAREDPF0_MC_SHARED_VIRT_RESET_REQ 0x0aef
+#define mmVMSHAREDPF0_MC_SHARED_VIRT_RESET_REQ_BASE_IDX 1
+#define mmVMSHAREDPF0_MC_MEM_POWER_LS 0x0af0
+#define mmVMSHAREDPF0_MC_MEM_POWER_LS_BASE_IDX 1
+#define mmVMSHAREDPF0_MC_VM_CACHEABLE_DRAM_ADDRESS_START 0x0af1
+#define mmVMSHAREDPF0_MC_VM_CACHEABLE_DRAM_ADDRESS_START_BASE_IDX 1
+#define mmVMSHAREDPF0_MC_VM_CACHEABLE_DRAM_ADDRESS_END 0x0af2
+#define mmVMSHAREDPF0_MC_VM_CACHEABLE_DRAM_ADDRESS_END_BASE_IDX 1
+#define mmVMSHAREDPF0_MC_VM_APT_CNTL 0x0af3
+#define mmVMSHAREDPF0_MC_VM_APT_CNTL_BASE_IDX 1
+#define mmVMSHAREDPF0_MC_VM_LOCAL_HBM_ADDRESS_START 0x0af4
+#define mmVMSHAREDPF0_MC_VM_LOCAL_HBM_ADDRESS_START_BASE_IDX 1
+#define mmVMSHAREDPF0_MC_VM_LOCAL_HBM_ADDRESS_END 0x0af5
+#define mmVMSHAREDPF0_MC_VM_LOCAL_HBM_ADDRESS_END_BASE_IDX 1
+#define mmVMSHAREDPF0_MC_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL 0x0af6
+#define mmVMSHAREDPF0_MC_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL_BASE_IDX 1
+#define mmVMSHAREDPF0_MC_VM_XGMI_LFB_CNTL 0x0af7
+#define mmVMSHAREDPF0_MC_VM_XGMI_LFB_CNTL_BASE_IDX 1
+#define mmVMSHAREDPF0_MC_VM_XGMI_LFB_SIZE 0x0af8
+#define mmVMSHAREDPF0_MC_VM_XGMI_LFB_SIZE_BASE_IDX 1
+#define mmVMSHAREDPF0_MC_VM_CACHEABLE_DRAM_CNTL 0x0af9
+#define mmVMSHAREDPF0_MC_VM_CACHEABLE_DRAM_CNTL_BASE_IDX 1
+
+
+// addressBlock: mmhub_utcl2_vmsharedvcdec
+// base address: 0x6ac00
+#define mmVMSHAREDVC0_MC_VM_FB_LOCATION_BASE 0x0b00
+#define mmVMSHAREDVC0_MC_VM_FB_LOCATION_BASE_BASE_IDX 1
+#define mmVMSHAREDVC0_MC_VM_FB_LOCATION_TOP 0x0b01
+#define mmVMSHAREDVC0_MC_VM_FB_LOCATION_TOP_BASE_IDX 1
+#define mmVMSHAREDVC0_MC_VM_AGP_TOP 0x0b02
+#define mmVMSHAREDVC0_MC_VM_AGP_TOP_BASE_IDX 1
+#define mmVMSHAREDVC0_MC_VM_AGP_BOT 0x0b03
+#define mmVMSHAREDVC0_MC_VM_AGP_BOT_BASE_IDX 1
+#define mmVMSHAREDVC0_MC_VM_AGP_BASE 0x0b04
+#define mmVMSHAREDVC0_MC_VM_AGP_BASE_BASE_IDX 1
+#define mmVMSHAREDVC0_MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x0b05
+#define mmVMSHAREDVC0_MC_VM_SYSTEM_APERTURE_LOW_ADDR_BASE_IDX 1
+#define mmVMSHAREDVC0_MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x0b06
+#define mmVMSHAREDVC0_MC_VM_SYSTEM_APERTURE_HIGH_ADDR_BASE_IDX 1
+#define mmVMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL 0x0b07
+#define mmVMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL_BASE_IDX 1
+
+
+// addressBlock: mmhub_utcl2_vmsharedhvdec
+// base address: 0x6ac80
+#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF0 0x0b20
+#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF0_BASE_IDX 1
+#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF1 0x0b21
+#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF1_BASE_IDX 1
+#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF2 0x0b22
+#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF2_BASE_IDX 1
+#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF3 0x0b23
+#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF3_BASE_IDX 1
+#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF4 0x0b24
+#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF4_BASE_IDX 1
+#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF5 0x0b25
+#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF5_BASE_IDX 1
+#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF6 0x0b26
+#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF6_BASE_IDX 1
+#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF7 0x0b27
+#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF7_BASE_IDX 1
+#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF8 0x0b28
+#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF8_BASE_IDX 1
+#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF9 0x0b29
+#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF9_BASE_IDX 1
+#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF10 0x0b2a
+#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF10_BASE_IDX 1
+#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF11 0x0b2b
+#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF11_BASE_IDX 1
+#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF12 0x0b2c
+#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF12_BASE_IDX 1
+#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF13 0x0b2d
+#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF13_BASE_IDX 1
+#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF14 0x0b2e
+#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF14_BASE_IDX 1
+#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF15 0x0b2f
+#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF15_BASE_IDX 1
+#define mmVMSHAREDHV0_VM_IOMMU_MMIO_CNTRL_1 0x0b30
+#define mmVMSHAREDHV0_VM_IOMMU_MMIO_CNTRL_1_BASE_IDX 1
+#define mmVMSHAREDHV0_MC_VM_MARC_BASE_LO_0 0x0b31
+#define mmVMSHAREDHV0_MC_VM_MARC_BASE_LO_0_BASE_IDX 1
+#define mmVMSHAREDHV0_MC_VM_MARC_BASE_LO_1 0x0b32
+#define mmVMSHAREDHV0_MC_VM_MARC_BASE_LO_1_BASE_IDX 1
+#define mmVMSHAREDHV0_MC_VM_MARC_BASE_LO_2 0x0b33
+#define mmVMSHAREDHV0_MC_VM_MARC_BASE_LO_2_BASE_IDX 1
+#define mmVMSHAREDHV0_MC_VM_MARC_BASE_LO_3 0x0b34
+#define mmVMSHAREDHV0_MC_VM_MARC_BASE_LO_3_BASE_IDX 1
+#define mmVMSHAREDHV0_MC_VM_MARC_BASE_HI_0 0x0b35
+#define mmVMSHAREDHV0_MC_VM_MARC_BASE_HI_0_BASE_IDX 1
+#define mmVMSHAREDHV0_MC_VM_MARC_BASE_HI_1 0x0b36
+#define mmVMSHAREDHV0_MC_VM_MARC_BASE_HI_1_BASE_IDX 1
+#define mmVMSHAREDHV0_MC_VM_MARC_BASE_HI_2 0x0b37
+#define mmVMSHAREDHV0_MC_VM_MARC_BASE_HI_2_BASE_IDX 1
+#define mmVMSHAREDHV0_MC_VM_MARC_BASE_HI_3 0x0b38
+#define mmVMSHAREDHV0_MC_VM_MARC_BASE_HI_3_BASE_IDX 1
+#define mmVMSHAREDHV0_MC_VM_MARC_RELOC_LO_0 0x0b39
+#define mmVMSHAREDHV0_MC_VM_MARC_RELOC_LO_0_BASE_IDX 1
+#define mmVMSHAREDHV0_MC_VM_MARC_RELOC_LO_1 0x0b3a
+#define mmVMSHAREDHV0_MC_VM_MARC_RELOC_LO_1_BASE_IDX 1
+#define mmVMSHAREDHV0_MC_VM_MARC_RELOC_LO_2 0x0b3b
+#define mmVMSHAREDHV0_MC_VM_MARC_RELOC_LO_2_BASE_IDX 1
+#define mmVMSHAREDHV0_MC_VM_MARC_RELOC_LO_3 0x0b3c
+#define mmVMSHAREDHV0_MC_VM_MARC_RELOC_LO_3_BASE_IDX 1
+#define mmVMSHAREDHV0_MC_VM_MARC_RELOC_HI_0 0x0b3d
+#define mmVMSHAREDHV0_MC_VM_MARC_RELOC_HI_0_BASE_IDX 1
+#define mmVMSHAREDHV0_MC_VM_MARC_RELOC_HI_1 0x0b3e
+#define mmVMSHAREDHV0_MC_VM_MARC_RELOC_HI_1_BASE_IDX 1
+#define mmVMSHAREDHV0_MC_VM_MARC_RELOC_HI_2 0x0b3f
+#define mmVMSHAREDHV0_MC_VM_MARC_RELOC_HI_2_BASE_IDX 1
+#define mmVMSHAREDHV0_MC_VM_MARC_RELOC_HI_3 0x0b40
+#define mmVMSHAREDHV0_MC_VM_MARC_RELOC_HI_3_BASE_IDX 1
+#define mmVMSHAREDHV0_MC_VM_MARC_LEN_LO_0 0x0b41
+#define mmVMSHAREDHV0_MC_VM_MARC_LEN_LO_0_BASE_IDX 1
+#define mmVMSHAREDHV0_MC_VM_MARC_LEN_LO_1 0x0b42
+#define mmVMSHAREDHV0_MC_VM_MARC_LEN_LO_1_BASE_IDX 1
+#define mmVMSHAREDHV0_MC_VM_MARC_LEN_LO_2 0x0b43
+#define mmVMSHAREDHV0_MC_VM_MARC_LEN_LO_2_BASE_IDX 1
+#define mmVMSHAREDHV0_MC_VM_MARC_LEN_LO_3 0x0b44
+#define mmVMSHAREDHV0_MC_VM_MARC_LEN_LO_3_BASE_IDX 1
+#define mmVMSHAREDHV0_MC_VM_MARC_LEN_HI_0 0x0b45
+#define mmVMSHAREDHV0_MC_VM_MARC_LEN_HI_0_BASE_IDX 1
+#define mmVMSHAREDHV0_MC_VM_MARC_LEN_HI_1 0x0b46
+#define mmVMSHAREDHV0_MC_VM_MARC_LEN_HI_1_BASE_IDX 1
+#define mmVMSHAREDHV0_MC_VM_MARC_LEN_HI_2 0x0b47
+#define mmVMSHAREDHV0_MC_VM_MARC_LEN_HI_2_BASE_IDX 1
+#define mmVMSHAREDHV0_MC_VM_MARC_LEN_HI_3 0x0b48
+#define mmVMSHAREDHV0_MC_VM_MARC_LEN_HI_3_BASE_IDX 1
+#define mmVMSHAREDHV0_VM_IOMMU_CONTROL_REGISTER 0x0b49
+#define mmVMSHAREDHV0_VM_IOMMU_CONTROL_REGISTER_BASE_IDX 1
+#define mmVMSHAREDHV0_VM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER 0x0b4a
+#define mmVMSHAREDHV0_VM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER_BASE_IDX 1
+#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL 0x0b4b
+#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_BASE_IDX 1
+#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_0 0x0b4c
+#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_0_BASE_IDX 1
+#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_1 0x0b4d
+#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_1_BASE_IDX 1
+#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_2 0x0b4e
+#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_2_BASE_IDX 1
+#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_3 0x0b4f
+#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_3_BASE_IDX 1
+#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_4 0x0b50
+#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_4_BASE_IDX 1
+#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_5 0x0b51
+#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_5_BASE_IDX 1
+#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_6 0x0b52
+#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_6_BASE_IDX 1
+#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_7 0x0b53
+#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_7_BASE_IDX 1
+#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_8 0x0b54
+#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_8_BASE_IDX 1
+#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_9 0x0b55
+#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_9_BASE_IDX 1
+#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_10 0x0b56
+#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_10_BASE_IDX 1
+#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_11 0x0b57
+#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_11_BASE_IDX 1
+#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_12 0x0b58
+#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_12_BASE_IDX 1
+#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_13 0x0b59
+#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_13_BASE_IDX 1
+#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_14 0x0b5a
+#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_14_BASE_IDX 1
+#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_15 0x0b5b
+#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_15_BASE_IDX 1
+#define mmVMSHAREDHV0_UTCL2_CGTT_CLK_CTRL 0x0b5c
+#define mmVMSHAREDHV0_UTCL2_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmVMSHAREDHV0_MC_SHARED_ACTIVE_FCN_ID 0x0b5d
+#define mmVMSHAREDHV0_MC_SHARED_ACTIVE_FCN_ID_BASE_IDX 1
+#define mmVMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE 0x0b5e
+#define mmVMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE_BASE_IDX 1
+
+
+// addressBlock: mmhub_utcl2_atcl2pfcntrdec
+// base address: 0x6adc0
+#define mmATCL2PFCNTR0_ATC_L2_PERFCOUNTER_LO 0x0b70
+#define mmATCL2PFCNTR0_ATC_L2_PERFCOUNTER_LO_BASE_IDX 1
+#define mmATCL2PFCNTR0_ATC_L2_PERFCOUNTER_HI 0x0b71
+#define mmATCL2PFCNTR0_ATC_L2_PERFCOUNTER_HI_BASE_IDX 1
+
+
+// addressBlock: mmhub_utcl2_atcl2pfcntldec
+// base address: 0x6add0
+#define mmATCL2PFCNTL0_ATC_L2_PERFCOUNTER0_CFG 0x0b74
+#define mmATCL2PFCNTL0_ATC_L2_PERFCOUNTER0_CFG_BASE_IDX 1
+#define mmATCL2PFCNTL0_ATC_L2_PERFCOUNTER1_CFG 0x0b75
+#define mmATCL2PFCNTL0_ATC_L2_PERFCOUNTER1_CFG_BASE_IDX 1
+#define mmATCL2PFCNTL0_ATC_L2_PERFCOUNTER_RSLT_CNTL 0x0b76
+#define mmATCL2PFCNTL0_ATC_L2_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1
+
+
+// addressBlock: mmhub_utcl2_vml2pldec
+// base address: 0x6ae00
+#define mmVML2PL0_MC_VM_L2_PERFCOUNTER0_CFG 0x0b80
+#define mmVML2PL0_MC_VM_L2_PERFCOUNTER0_CFG_BASE_IDX 1
+#define mmVML2PL0_MC_VM_L2_PERFCOUNTER1_CFG 0x0b81
+#define mmVML2PL0_MC_VM_L2_PERFCOUNTER1_CFG_BASE_IDX 1
+#define mmVML2PL0_MC_VM_L2_PERFCOUNTER2_CFG 0x0b82
+#define mmVML2PL0_MC_VM_L2_PERFCOUNTER2_CFG_BASE_IDX 1
+#define mmVML2PL0_MC_VM_L2_PERFCOUNTER3_CFG 0x0b83
+#define mmVML2PL0_MC_VM_L2_PERFCOUNTER3_CFG_BASE_IDX 1
+#define mmVML2PL0_MC_VM_L2_PERFCOUNTER4_CFG 0x0b84
+#define mmVML2PL0_MC_VM_L2_PERFCOUNTER4_CFG_BASE_IDX 1
+#define mmVML2PL0_MC_VM_L2_PERFCOUNTER5_CFG 0x0b85
+#define mmVML2PL0_MC_VM_L2_PERFCOUNTER5_CFG_BASE_IDX 1
+#define mmVML2PL0_MC_VM_L2_PERFCOUNTER6_CFG 0x0b86
+#define mmVML2PL0_MC_VM_L2_PERFCOUNTER6_CFG_BASE_IDX 1
+#define mmVML2PL0_MC_VM_L2_PERFCOUNTER7_CFG 0x0b87
+#define mmVML2PL0_MC_VM_L2_PERFCOUNTER7_CFG_BASE_IDX 1
+#define mmVML2PL0_MC_VM_L2_PERFCOUNTER_RSLT_CNTL 0x0b88
+#define mmVML2PL0_MC_VM_L2_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1
+
+
+// addressBlock: mmhub_utcl2_vml2prdec
+// base address: 0x6ae40
+#define mmVML2PR0_MC_VM_L2_PERFCOUNTER_LO 0x0b90
+#define mmVML2PR0_MC_VM_L2_PERFCOUNTER_LO_BASE_IDX 1
+#define mmVML2PR0_MC_VM_L2_PERFCOUNTER_HI 0x0b91
+#define mmVML2PR0_MC_VM_L2_PERFCOUNTER_HI_BASE_IDX 1
+
+
+// addressBlock: mmhub_dagb_dagbdec5
+// base address: 0x74000
+#define mmDAGB5_RDCLI0 0x3000
+#define mmDAGB5_RDCLI0_BASE_IDX 1
+#define mmDAGB5_RDCLI1 0x3001
+#define mmDAGB5_RDCLI1_BASE_IDX 1
+#define mmDAGB5_RDCLI2 0x3002
+#define mmDAGB5_RDCLI2_BASE_IDX 1
+#define mmDAGB5_RDCLI3 0x3003
+#define mmDAGB5_RDCLI3_BASE_IDX 1
+#define mmDAGB5_RDCLI4 0x3004
+#define mmDAGB5_RDCLI4_BASE_IDX 1
+#define mmDAGB5_RDCLI5 0x3005
+#define mmDAGB5_RDCLI5_BASE_IDX 1
+#define mmDAGB5_RDCLI6 0x3006
+#define mmDAGB5_RDCLI6_BASE_IDX 1
+#define mmDAGB5_RDCLI7 0x3007
+#define mmDAGB5_RDCLI7_BASE_IDX 1
+#define mmDAGB5_RDCLI8 0x3008
+#define mmDAGB5_RDCLI8_BASE_IDX 1
+#define mmDAGB5_RDCLI9 0x3009
+#define mmDAGB5_RDCLI9_BASE_IDX 1
+#define mmDAGB5_RDCLI10 0x300a
+#define mmDAGB5_RDCLI10_BASE_IDX 1
+#define mmDAGB5_RDCLI11 0x300b
+#define mmDAGB5_RDCLI11_BASE_IDX 1
+#define mmDAGB5_RDCLI12 0x300c
+#define mmDAGB5_RDCLI12_BASE_IDX 1
+#define mmDAGB5_RDCLI13 0x300d
+#define mmDAGB5_RDCLI13_BASE_IDX 1
+#define mmDAGB5_RDCLI14 0x300e
+#define mmDAGB5_RDCLI14_BASE_IDX 1
+#define mmDAGB5_RDCLI15 0x300f
+#define mmDAGB5_RDCLI15_BASE_IDX 1
+#define mmDAGB5_RD_CNTL 0x3010
+#define mmDAGB5_RD_CNTL_BASE_IDX 1
+#define mmDAGB5_RD_GMI_CNTL 0x3011
+#define mmDAGB5_RD_GMI_CNTL_BASE_IDX 1
+#define mmDAGB5_RD_ADDR_DAGB 0x3012
+#define mmDAGB5_RD_ADDR_DAGB_BASE_IDX 1
+#define mmDAGB5_RD_OUTPUT_DAGB_MAX_BURST 0x3013
+#define mmDAGB5_RD_OUTPUT_DAGB_MAX_BURST_BASE_IDX 1
+#define mmDAGB5_RD_OUTPUT_DAGB_LAZY_TIMER 0x3014
+#define mmDAGB5_RD_OUTPUT_DAGB_LAZY_TIMER_BASE_IDX 1
+#define mmDAGB5_RD_CGTT_CLK_CTRL 0x3015
+#define mmDAGB5_RD_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmDAGB5_L1TLB_RD_CGTT_CLK_CTRL 0x3016
+#define mmDAGB5_L1TLB_RD_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmDAGB5_ATCVM_RD_CGTT_CLK_CTRL 0x3017
+#define mmDAGB5_ATCVM_RD_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmDAGB5_RD_ADDR_DAGB_MAX_BURST0 0x3018
+#define mmDAGB5_RD_ADDR_DAGB_MAX_BURST0_BASE_IDX 1
+#define mmDAGB5_RD_ADDR_DAGB_LAZY_TIMER0 0x3019
+#define mmDAGB5_RD_ADDR_DAGB_LAZY_TIMER0_BASE_IDX 1
+#define mmDAGB5_RD_ADDR_DAGB_MAX_BURST1 0x301a
+#define mmDAGB5_RD_ADDR_DAGB_MAX_BURST1_BASE_IDX 1
+#define mmDAGB5_RD_ADDR_DAGB_LAZY_TIMER1 0x301b
+#define mmDAGB5_RD_ADDR_DAGB_LAZY_TIMER1_BASE_IDX 1
+#define mmDAGB5_RD_VC0_CNTL 0x301c
+#define mmDAGB5_RD_VC0_CNTL_BASE_IDX 1
+#define mmDAGB5_RD_VC1_CNTL 0x301d
+#define mmDAGB5_RD_VC1_CNTL_BASE_IDX 1
+#define mmDAGB5_RD_VC2_CNTL 0x301e
+#define mmDAGB5_RD_VC2_CNTL_BASE_IDX 1
+#define mmDAGB5_RD_VC3_CNTL 0x301f
+#define mmDAGB5_RD_VC3_CNTL_BASE_IDX 1
+#define mmDAGB5_RD_VC4_CNTL 0x3020
+#define mmDAGB5_RD_VC4_CNTL_BASE_IDX 1
+#define mmDAGB5_RD_VC5_CNTL 0x3021
+#define mmDAGB5_RD_VC5_CNTL_BASE_IDX 1
+#define mmDAGB5_RD_VC6_CNTL 0x3022
+#define mmDAGB5_RD_VC6_CNTL_BASE_IDX 1
+#define mmDAGB5_RD_VC7_CNTL 0x3023
+#define mmDAGB5_RD_VC7_CNTL_BASE_IDX 1
+#define mmDAGB5_RD_CNTL_MISC 0x3024
+#define mmDAGB5_RD_CNTL_MISC_BASE_IDX 1
+#define mmDAGB5_RD_TLB_CREDIT 0x3025
+#define mmDAGB5_RD_TLB_CREDIT_BASE_IDX 1
+#define mmDAGB5_RDCLI_ASK_PENDING 0x3026
+#define mmDAGB5_RDCLI_ASK_PENDING_BASE_IDX 1
+#define mmDAGB5_RDCLI_GO_PENDING 0x3027
+#define mmDAGB5_RDCLI_GO_PENDING_BASE_IDX 1
+#define mmDAGB5_RDCLI_GBLSEND_PENDING 0x3028
+#define mmDAGB5_RDCLI_GBLSEND_PENDING_BASE_IDX 1
+#define mmDAGB5_RDCLI_TLB_PENDING 0x3029
+#define mmDAGB5_RDCLI_TLB_PENDING_BASE_IDX 1
+#define mmDAGB5_RDCLI_OARB_PENDING 0x302a
+#define mmDAGB5_RDCLI_OARB_PENDING_BASE_IDX 1
+#define mmDAGB5_RDCLI_OSD_PENDING 0x302b
+#define mmDAGB5_RDCLI_OSD_PENDING_BASE_IDX 1
+#define mmDAGB5_WRCLI0 0x302c
+#define mmDAGB5_WRCLI0_BASE_IDX 1
+#define mmDAGB5_WRCLI1 0x302d
+#define mmDAGB5_WRCLI1_BASE_IDX 1
+#define mmDAGB5_WRCLI2 0x302e
+#define mmDAGB5_WRCLI2_BASE_IDX 1
+#define mmDAGB5_WRCLI3 0x302f
+#define mmDAGB5_WRCLI3_BASE_IDX 1
+#define mmDAGB5_WRCLI4 0x3030
+#define mmDAGB5_WRCLI4_BASE_IDX 1
+#define mmDAGB5_WRCLI5 0x3031
+#define mmDAGB5_WRCLI5_BASE_IDX 1
+#define mmDAGB5_WRCLI6 0x3032
+#define mmDAGB5_WRCLI6_BASE_IDX 1
+#define mmDAGB5_WRCLI7 0x3033
+#define mmDAGB5_WRCLI7_BASE_IDX 1
+#define mmDAGB5_WRCLI8 0x3034
+#define mmDAGB5_WRCLI8_BASE_IDX 1
+#define mmDAGB5_WRCLI9 0x3035
+#define mmDAGB5_WRCLI9_BASE_IDX 1
+#define mmDAGB5_WRCLI10 0x3036
+#define mmDAGB5_WRCLI10_BASE_IDX 1
+#define mmDAGB5_WRCLI11 0x3037
+#define mmDAGB5_WRCLI11_BASE_IDX 1
+#define mmDAGB5_WRCLI12 0x3038
+#define mmDAGB5_WRCLI12_BASE_IDX 1
+#define mmDAGB5_WRCLI13 0x3039
+#define mmDAGB5_WRCLI13_BASE_IDX 1
+#define mmDAGB5_WRCLI14 0x303a
+#define mmDAGB5_WRCLI14_BASE_IDX 1
+#define mmDAGB5_WRCLI15 0x303b
+#define mmDAGB5_WRCLI15_BASE_IDX 1
+#define mmDAGB5_WR_CNTL 0x303c
+#define mmDAGB5_WR_CNTL_BASE_IDX 1
+#define mmDAGB5_WR_GMI_CNTL 0x303d
+#define mmDAGB5_WR_GMI_CNTL_BASE_IDX 1
+#define mmDAGB5_WR_ADDR_DAGB 0x303e
+#define mmDAGB5_WR_ADDR_DAGB_BASE_IDX 1
+#define mmDAGB5_WR_OUTPUT_DAGB_MAX_BURST 0x303f
+#define mmDAGB5_WR_OUTPUT_DAGB_MAX_BURST_BASE_IDX 1
+#define mmDAGB5_WR_OUTPUT_DAGB_LAZY_TIMER 0x3040
+#define mmDAGB5_WR_OUTPUT_DAGB_LAZY_TIMER_BASE_IDX 1
+#define mmDAGB5_WR_CGTT_CLK_CTRL 0x3041
+#define mmDAGB5_WR_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmDAGB5_L1TLB_WR_CGTT_CLK_CTRL 0x3042
+#define mmDAGB5_L1TLB_WR_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmDAGB5_ATCVM_WR_CGTT_CLK_CTRL 0x3043
+#define mmDAGB5_ATCVM_WR_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmDAGB5_WR_ADDR_DAGB_MAX_BURST0 0x3044
+#define mmDAGB5_WR_ADDR_DAGB_MAX_BURST0_BASE_IDX 1
+#define mmDAGB5_WR_ADDR_DAGB_LAZY_TIMER0 0x3045
+#define mmDAGB5_WR_ADDR_DAGB_LAZY_TIMER0_BASE_IDX 1
+#define mmDAGB5_WR_ADDR_DAGB_MAX_BURST1 0x3046
+#define mmDAGB5_WR_ADDR_DAGB_MAX_BURST1_BASE_IDX 1
+#define mmDAGB5_WR_ADDR_DAGB_LAZY_TIMER1 0x3047
+#define mmDAGB5_WR_ADDR_DAGB_LAZY_TIMER1_BASE_IDX 1
+#define mmDAGB5_WR_DATA_DAGB 0x3048
+#define mmDAGB5_WR_DATA_DAGB_BASE_IDX 1
+#define mmDAGB5_WR_DATA_DAGB_MAX_BURST0 0x3049
+#define mmDAGB5_WR_DATA_DAGB_MAX_BURST0_BASE_IDX 1
+#define mmDAGB5_WR_DATA_DAGB_LAZY_TIMER0 0x304a
+#define mmDAGB5_WR_DATA_DAGB_LAZY_TIMER0_BASE_IDX 1
+#define mmDAGB5_WR_DATA_DAGB_MAX_BURST1 0x304b
+#define mmDAGB5_WR_DATA_DAGB_MAX_BURST1_BASE_IDX 1
+#define mmDAGB5_WR_DATA_DAGB_LAZY_TIMER1 0x304c
+#define mmDAGB5_WR_DATA_DAGB_LAZY_TIMER1_BASE_IDX 1
+#define mmDAGB5_WR_VC0_CNTL 0x304d
+#define mmDAGB5_WR_VC0_CNTL_BASE_IDX 1
+#define mmDAGB5_WR_VC1_CNTL 0x304e
+#define mmDAGB5_WR_VC1_CNTL_BASE_IDX 1
+#define mmDAGB5_WR_VC2_CNTL 0x304f
+#define mmDAGB5_WR_VC2_CNTL_BASE_IDX 1
+#define mmDAGB5_WR_VC3_CNTL 0x3050
+#define mmDAGB5_WR_VC3_CNTL_BASE_IDX 1
+#define mmDAGB5_WR_VC4_CNTL 0x3051
+#define mmDAGB5_WR_VC4_CNTL_BASE_IDX 1
+#define mmDAGB5_WR_VC5_CNTL 0x3052
+#define mmDAGB5_WR_VC5_CNTL_BASE_IDX 1
+#define mmDAGB5_WR_VC6_CNTL 0x3053
+#define mmDAGB5_WR_VC6_CNTL_BASE_IDX 1
+#define mmDAGB5_WR_VC7_CNTL 0x3054
+#define mmDAGB5_WR_VC7_CNTL_BASE_IDX 1
+#define mmDAGB5_WR_CNTL_MISC 0x3055
+#define mmDAGB5_WR_CNTL_MISC_BASE_IDX 1
+#define mmDAGB5_WR_TLB_CREDIT 0x3056
+#define mmDAGB5_WR_TLB_CREDIT_BASE_IDX 1
+#define mmDAGB5_WR_DATA_CREDIT 0x3057
+#define mmDAGB5_WR_DATA_CREDIT_BASE_IDX 1
+#define mmDAGB5_WR_MISC_CREDIT 0x3058
+#define mmDAGB5_WR_MISC_CREDIT_BASE_IDX 1
+#define mmDAGB5_WRCLI_ASK_PENDING 0x305d
+#define mmDAGB5_WRCLI_ASK_PENDING_BASE_IDX 1
+#define mmDAGB5_WRCLI_GO_PENDING 0x305e
+#define mmDAGB5_WRCLI_GO_PENDING_BASE_IDX 1
+#define mmDAGB5_WRCLI_GBLSEND_PENDING 0x305f
+#define mmDAGB5_WRCLI_GBLSEND_PENDING_BASE_IDX 1
+#define mmDAGB5_WRCLI_TLB_PENDING 0x3060
+#define mmDAGB5_WRCLI_TLB_PENDING_BASE_IDX 1
+#define mmDAGB5_WRCLI_OARB_PENDING 0x3061
+#define mmDAGB5_WRCLI_OARB_PENDING_BASE_IDX 1
+#define mmDAGB5_WRCLI_OSD_PENDING 0x3062
+#define mmDAGB5_WRCLI_OSD_PENDING_BASE_IDX 1
+#define mmDAGB5_WRCLI_DBUS_ASK_PENDING 0x3063
+#define mmDAGB5_WRCLI_DBUS_ASK_PENDING_BASE_IDX 1
+#define mmDAGB5_WRCLI_DBUS_GO_PENDING 0x3064
+#define mmDAGB5_WRCLI_DBUS_GO_PENDING_BASE_IDX 1
+#define mmDAGB5_DAGB_DLY 0x3065
+#define mmDAGB5_DAGB_DLY_BASE_IDX 1
+#define mmDAGB5_CNTL_MISC 0x3066
+#define mmDAGB5_CNTL_MISC_BASE_IDX 1
+#define mmDAGB5_CNTL_MISC2 0x3067
+#define mmDAGB5_CNTL_MISC2_BASE_IDX 1
+#define mmDAGB5_FIFO_EMPTY 0x3068
+#define mmDAGB5_FIFO_EMPTY_BASE_IDX 1
+#define mmDAGB5_FIFO_FULL 0x3069
+#define mmDAGB5_FIFO_FULL_BASE_IDX 1
+#define mmDAGB5_WR_CREDITS_FULL 0x306a
+#define mmDAGB5_WR_CREDITS_FULL_BASE_IDX 1
+#define mmDAGB5_RD_CREDITS_FULL 0x306b
+#define mmDAGB5_RD_CREDITS_FULL_BASE_IDX 1
+#define mmDAGB5_PERFCOUNTER_LO 0x306c
+#define mmDAGB5_PERFCOUNTER_LO_BASE_IDX 1
+#define mmDAGB5_PERFCOUNTER_HI 0x306d
+#define mmDAGB5_PERFCOUNTER_HI_BASE_IDX 1
+#define mmDAGB5_PERFCOUNTER0_CFG 0x306e
+#define mmDAGB5_PERFCOUNTER0_CFG_BASE_IDX 1
+#define mmDAGB5_PERFCOUNTER1_CFG 0x306f
+#define mmDAGB5_PERFCOUNTER1_CFG_BASE_IDX 1
+#define mmDAGB5_PERFCOUNTER2_CFG 0x3070
+#define mmDAGB5_PERFCOUNTER2_CFG_BASE_IDX 1
+#define mmDAGB5_PERFCOUNTER_RSLT_CNTL 0x3071
+#define mmDAGB5_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1
+#define mmDAGB5_RESERVE0 0x3072
+#define mmDAGB5_RESERVE0_BASE_IDX 1
+#define mmDAGB5_RESERVE1 0x3073
+#define mmDAGB5_RESERVE1_BASE_IDX 1
+#define mmDAGB5_RESERVE2 0x3074
+#define mmDAGB5_RESERVE2_BASE_IDX 1
+#define mmDAGB5_RESERVE3 0x3075
+#define mmDAGB5_RESERVE3_BASE_IDX 1
+#define mmDAGB5_RESERVE4 0x3076
+#define mmDAGB5_RESERVE4_BASE_IDX 1
+#define mmDAGB5_RESERVE5 0x3077
+#define mmDAGB5_RESERVE5_BASE_IDX 1
+#define mmDAGB5_RESERVE6 0x3078
+#define mmDAGB5_RESERVE6_BASE_IDX 1
+#define mmDAGB5_RESERVE7 0x3079
+#define mmDAGB5_RESERVE7_BASE_IDX 1
+#define mmDAGB5_RESERVE8 0x307a
+#define mmDAGB5_RESERVE8_BASE_IDX 1
+#define mmDAGB5_RESERVE9 0x307b
+#define mmDAGB5_RESERVE9_BASE_IDX 1
+#define mmDAGB5_RESERVE10 0x307c
+#define mmDAGB5_RESERVE10_BASE_IDX 1
+#define mmDAGB5_RESERVE11 0x307d
+#define mmDAGB5_RESERVE11_BASE_IDX 1
+#define mmDAGB5_RESERVE12 0x307e
+#define mmDAGB5_RESERVE12_BASE_IDX 1
+#define mmDAGB5_RESERVE13 0x307f
+#define mmDAGB5_RESERVE13_BASE_IDX 1
+
+
+// addressBlock: mmhub_dagb_dagbdec6
+// base address: 0x74200
+#define mmDAGB6_RDCLI0 0x3080
+#define mmDAGB6_RDCLI0_BASE_IDX 1
+#define mmDAGB6_RDCLI1 0x3081
+#define mmDAGB6_RDCLI1_BASE_IDX 1
+#define mmDAGB6_RDCLI2 0x3082
+#define mmDAGB6_RDCLI2_BASE_IDX 1
+#define mmDAGB6_RDCLI3 0x3083
+#define mmDAGB6_RDCLI3_BASE_IDX 1
+#define mmDAGB6_RDCLI4 0x3084
+#define mmDAGB6_RDCLI4_BASE_IDX 1
+#define mmDAGB6_RDCLI5 0x3085
+#define mmDAGB6_RDCLI5_BASE_IDX 1
+#define mmDAGB6_RDCLI6 0x3086
+#define mmDAGB6_RDCLI6_BASE_IDX 1
+#define mmDAGB6_RDCLI7 0x3087
+#define mmDAGB6_RDCLI7_BASE_IDX 1
+#define mmDAGB6_RDCLI8 0x3088
+#define mmDAGB6_RDCLI8_BASE_IDX 1
+#define mmDAGB6_RDCLI9 0x3089
+#define mmDAGB6_RDCLI9_BASE_IDX 1
+#define mmDAGB6_RDCLI10 0x308a
+#define mmDAGB6_RDCLI10_BASE_IDX 1
+#define mmDAGB6_RDCLI11 0x308b
+#define mmDAGB6_RDCLI11_BASE_IDX 1
+#define mmDAGB6_RDCLI12 0x308c
+#define mmDAGB6_RDCLI12_BASE_IDX 1
+#define mmDAGB6_RDCLI13 0x308d
+#define mmDAGB6_RDCLI13_BASE_IDX 1
+#define mmDAGB6_RDCLI14 0x308e
+#define mmDAGB6_RDCLI14_BASE_IDX 1
+#define mmDAGB6_RDCLI15 0x308f
+#define mmDAGB6_RDCLI15_BASE_IDX 1
+#define mmDAGB6_RD_CNTL 0x3090
+#define mmDAGB6_RD_CNTL_BASE_IDX 1
+#define mmDAGB6_RD_GMI_CNTL 0x3091
+#define mmDAGB6_RD_GMI_CNTL_BASE_IDX 1
+#define mmDAGB6_RD_ADDR_DAGB 0x3092
+#define mmDAGB6_RD_ADDR_DAGB_BASE_IDX 1
+#define mmDAGB6_RD_OUTPUT_DAGB_MAX_BURST 0x3093
+#define mmDAGB6_RD_OUTPUT_DAGB_MAX_BURST_BASE_IDX 1
+#define mmDAGB6_RD_OUTPUT_DAGB_LAZY_TIMER 0x3094
+#define mmDAGB6_RD_OUTPUT_DAGB_LAZY_TIMER_BASE_IDX 1
+#define mmDAGB6_RD_CGTT_CLK_CTRL 0x3095
+#define mmDAGB6_RD_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmDAGB6_L1TLB_RD_CGTT_CLK_CTRL 0x3096
+#define mmDAGB6_L1TLB_RD_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmDAGB6_ATCVM_RD_CGTT_CLK_CTRL 0x3097
+#define mmDAGB6_ATCVM_RD_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmDAGB6_RD_ADDR_DAGB_MAX_BURST0 0x3098
+#define mmDAGB6_RD_ADDR_DAGB_MAX_BURST0_BASE_IDX 1
+#define mmDAGB6_RD_ADDR_DAGB_LAZY_TIMER0 0x3099
+#define mmDAGB6_RD_ADDR_DAGB_LAZY_TIMER0_BASE_IDX 1
+#define mmDAGB6_RD_ADDR_DAGB_MAX_BURST1 0x309a
+#define mmDAGB6_RD_ADDR_DAGB_MAX_BURST1_BASE_IDX 1
+#define mmDAGB6_RD_ADDR_DAGB_LAZY_TIMER1 0x309b
+#define mmDAGB6_RD_ADDR_DAGB_LAZY_TIMER1_BASE_IDX 1
+#define mmDAGB6_RD_VC0_CNTL 0x309c
+#define mmDAGB6_RD_VC0_CNTL_BASE_IDX 1
+#define mmDAGB6_RD_VC1_CNTL 0x309d
+#define mmDAGB6_RD_VC1_CNTL_BASE_IDX 1
+#define mmDAGB6_RD_VC2_CNTL 0x309e
+#define mmDAGB6_RD_VC2_CNTL_BASE_IDX 1
+#define mmDAGB6_RD_VC3_CNTL 0x309f
+#define mmDAGB6_RD_VC3_CNTL_BASE_IDX 1
+#define mmDAGB6_RD_VC4_CNTL 0x30a0
+#define mmDAGB6_RD_VC4_CNTL_BASE_IDX 1
+#define mmDAGB6_RD_VC5_CNTL 0x30a1
+#define mmDAGB6_RD_VC5_CNTL_BASE_IDX 1
+#define mmDAGB6_RD_VC6_CNTL 0x30a2
+#define mmDAGB6_RD_VC6_CNTL_BASE_IDX 1
+#define mmDAGB6_RD_VC7_CNTL 0x30a3
+#define mmDAGB6_RD_VC7_CNTL_BASE_IDX 1
+#define mmDAGB6_RD_CNTL_MISC 0x30a4
+#define mmDAGB6_RD_CNTL_MISC_BASE_IDX 1
+#define mmDAGB6_RD_TLB_CREDIT 0x30a5
+#define mmDAGB6_RD_TLB_CREDIT_BASE_IDX 1
+#define mmDAGB6_RDCLI_ASK_PENDING 0x30a6
+#define mmDAGB6_RDCLI_ASK_PENDING_BASE_IDX 1
+#define mmDAGB6_RDCLI_GO_PENDING 0x30a7
+#define mmDAGB6_RDCLI_GO_PENDING_BASE_IDX 1
+#define mmDAGB6_RDCLI_GBLSEND_PENDING 0x30a8
+#define mmDAGB6_RDCLI_GBLSEND_PENDING_BASE_IDX 1
+#define mmDAGB6_RDCLI_TLB_PENDING 0x30a9
+#define mmDAGB6_RDCLI_TLB_PENDING_BASE_IDX 1
+#define mmDAGB6_RDCLI_OARB_PENDING 0x30aa
+#define mmDAGB6_RDCLI_OARB_PENDING_BASE_IDX 1
+#define mmDAGB6_RDCLI_OSD_PENDING 0x30ab
+#define mmDAGB6_RDCLI_OSD_PENDING_BASE_IDX 1
+#define mmDAGB6_WRCLI0 0x30ac
+#define mmDAGB6_WRCLI0_BASE_IDX 1
+#define mmDAGB6_WRCLI1 0x30ad
+#define mmDAGB6_WRCLI1_BASE_IDX 1
+#define mmDAGB6_WRCLI2 0x30ae
+#define mmDAGB6_WRCLI2_BASE_IDX 1
+#define mmDAGB6_WRCLI3 0x30af
+#define mmDAGB6_WRCLI3_BASE_IDX 1
+#define mmDAGB6_WRCLI4 0x30b0
+#define mmDAGB6_WRCLI4_BASE_IDX 1
+#define mmDAGB6_WRCLI5 0x30b1
+#define mmDAGB6_WRCLI5_BASE_IDX 1
+#define mmDAGB6_WRCLI6 0x30b2
+#define mmDAGB6_WRCLI6_BASE_IDX 1
+#define mmDAGB6_WRCLI7 0x30b3
+#define mmDAGB6_WRCLI7_BASE_IDX 1
+#define mmDAGB6_WRCLI8 0x30b4
+#define mmDAGB6_WRCLI8_BASE_IDX 1
+#define mmDAGB6_WRCLI9 0x30b5
+#define mmDAGB6_WRCLI9_BASE_IDX 1
+#define mmDAGB6_WRCLI10 0x30b6
+#define mmDAGB6_WRCLI10_BASE_IDX 1
+#define mmDAGB6_WRCLI11 0x30b7
+#define mmDAGB6_WRCLI11_BASE_IDX 1
+#define mmDAGB6_WRCLI12 0x30b8
+#define mmDAGB6_WRCLI12_BASE_IDX 1
+#define mmDAGB6_WRCLI13 0x30b9
+#define mmDAGB6_WRCLI13_BASE_IDX 1
+#define mmDAGB6_WRCLI14 0x30ba
+#define mmDAGB6_WRCLI14_BASE_IDX 1
+#define mmDAGB6_WRCLI15 0x30bb
+#define mmDAGB6_WRCLI15_BASE_IDX 1
+#define mmDAGB6_WR_CNTL 0x30bc
+#define mmDAGB6_WR_CNTL_BASE_IDX 1
+#define mmDAGB6_WR_GMI_CNTL 0x30bd
+#define mmDAGB6_WR_GMI_CNTL_BASE_IDX 1
+#define mmDAGB6_WR_ADDR_DAGB 0x30be
+#define mmDAGB6_WR_ADDR_DAGB_BASE_IDX 1
+#define mmDAGB6_WR_OUTPUT_DAGB_MAX_BURST 0x30bf
+#define mmDAGB6_WR_OUTPUT_DAGB_MAX_BURST_BASE_IDX 1
+#define mmDAGB6_WR_OUTPUT_DAGB_LAZY_TIMER 0x30c0
+#define mmDAGB6_WR_OUTPUT_DAGB_LAZY_TIMER_BASE_IDX 1
+#define mmDAGB6_WR_CGTT_CLK_CTRL 0x30c1
+#define mmDAGB6_WR_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmDAGB6_L1TLB_WR_CGTT_CLK_CTRL 0x30c2
+#define mmDAGB6_L1TLB_WR_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmDAGB6_ATCVM_WR_CGTT_CLK_CTRL 0x30c3
+#define mmDAGB6_ATCVM_WR_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmDAGB6_WR_ADDR_DAGB_MAX_BURST0 0x30c4
+#define mmDAGB6_WR_ADDR_DAGB_MAX_BURST0_BASE_IDX 1
+#define mmDAGB6_WR_ADDR_DAGB_LAZY_TIMER0 0x30c5
+#define mmDAGB6_WR_ADDR_DAGB_LAZY_TIMER0_BASE_IDX 1
+#define mmDAGB6_WR_ADDR_DAGB_MAX_BURST1 0x30c6
+#define mmDAGB6_WR_ADDR_DAGB_MAX_BURST1_BASE_IDX 1
+#define mmDAGB6_WR_ADDR_DAGB_LAZY_TIMER1 0x30c7
+#define mmDAGB6_WR_ADDR_DAGB_LAZY_TIMER1_BASE_IDX 1
+#define mmDAGB6_WR_DATA_DAGB 0x30c8
+#define mmDAGB6_WR_DATA_DAGB_BASE_IDX 1
+#define mmDAGB6_WR_DATA_DAGB_MAX_BURST0 0x30c9
+#define mmDAGB6_WR_DATA_DAGB_MAX_BURST0_BASE_IDX 1
+#define mmDAGB6_WR_DATA_DAGB_LAZY_TIMER0 0x30ca
+#define mmDAGB6_WR_DATA_DAGB_LAZY_TIMER0_BASE_IDX 1
+#define mmDAGB6_WR_DATA_DAGB_MAX_BURST1 0x30cb
+#define mmDAGB6_WR_DATA_DAGB_MAX_BURST1_BASE_IDX 1
+#define mmDAGB6_WR_DATA_DAGB_LAZY_TIMER1 0x30cc
+#define mmDAGB6_WR_DATA_DAGB_LAZY_TIMER1_BASE_IDX 1
+#define mmDAGB6_WR_VC0_CNTL 0x30cd
+#define mmDAGB6_WR_VC0_CNTL_BASE_IDX 1
+#define mmDAGB6_WR_VC1_CNTL 0x30ce
+#define mmDAGB6_WR_VC1_CNTL_BASE_IDX 1
+#define mmDAGB6_WR_VC2_CNTL 0x30cf
+#define mmDAGB6_WR_VC2_CNTL_BASE_IDX 1
+#define mmDAGB6_WR_VC3_CNTL 0x30d0
+#define mmDAGB6_WR_VC3_CNTL_BASE_IDX 1
+#define mmDAGB6_WR_VC4_CNTL 0x30d1
+#define mmDAGB6_WR_VC4_CNTL_BASE_IDX 1
+#define mmDAGB6_WR_VC5_CNTL 0x30d2
+#define mmDAGB6_WR_VC5_CNTL_BASE_IDX 1
+#define mmDAGB6_WR_VC6_CNTL 0x30d3
+#define mmDAGB6_WR_VC6_CNTL_BASE_IDX 1
+#define mmDAGB6_WR_VC7_CNTL 0x30d4
+#define mmDAGB6_WR_VC7_CNTL_BASE_IDX 1
+#define mmDAGB6_WR_CNTL_MISC 0x30d5
+#define mmDAGB6_WR_CNTL_MISC_BASE_IDX 1
+#define mmDAGB6_WR_TLB_CREDIT 0x30d6
+#define mmDAGB6_WR_TLB_CREDIT_BASE_IDX 1
+#define mmDAGB6_WR_DATA_CREDIT 0x30d7
+#define mmDAGB6_WR_DATA_CREDIT_BASE_IDX 1
+#define mmDAGB6_WR_MISC_CREDIT 0x30d8
+#define mmDAGB6_WR_MISC_CREDIT_BASE_IDX 1
+#define mmDAGB6_WRCLI_ASK_PENDING 0x30dd
+#define mmDAGB6_WRCLI_ASK_PENDING_BASE_IDX 1
+#define mmDAGB6_WRCLI_GO_PENDING 0x30de
+#define mmDAGB6_WRCLI_GO_PENDING_BASE_IDX 1
+#define mmDAGB6_WRCLI_GBLSEND_PENDING 0x30df
+#define mmDAGB6_WRCLI_GBLSEND_PENDING_BASE_IDX 1
+#define mmDAGB6_WRCLI_TLB_PENDING 0x30e0
+#define mmDAGB6_WRCLI_TLB_PENDING_BASE_IDX 1
+#define mmDAGB6_WRCLI_OARB_PENDING 0x30e1
+#define mmDAGB6_WRCLI_OARB_PENDING_BASE_IDX 1
+#define mmDAGB6_WRCLI_OSD_PENDING 0x30e2
+#define mmDAGB6_WRCLI_OSD_PENDING_BASE_IDX 1
+#define mmDAGB6_WRCLI_DBUS_ASK_PENDING 0x30e3
+#define mmDAGB6_WRCLI_DBUS_ASK_PENDING_BASE_IDX 1
+#define mmDAGB6_WRCLI_DBUS_GO_PENDING 0x30e4
+#define mmDAGB6_WRCLI_DBUS_GO_PENDING_BASE_IDX 1
+#define mmDAGB6_DAGB_DLY 0x30e5
+#define mmDAGB6_DAGB_DLY_BASE_IDX 1
+#define mmDAGB6_CNTL_MISC 0x30e6
+#define mmDAGB6_CNTL_MISC_BASE_IDX 1
+#define mmDAGB6_CNTL_MISC2 0x30e7
+#define mmDAGB6_CNTL_MISC2_BASE_IDX 1
+#define mmDAGB6_FIFO_EMPTY 0x30e8
+#define mmDAGB6_FIFO_EMPTY_BASE_IDX 1
+#define mmDAGB6_FIFO_FULL 0x30e9
+#define mmDAGB6_FIFO_FULL_BASE_IDX 1
+#define mmDAGB6_WR_CREDITS_FULL 0x30ea
+#define mmDAGB6_WR_CREDITS_FULL_BASE_IDX 1
+#define mmDAGB6_RD_CREDITS_FULL 0x30eb
+#define mmDAGB6_RD_CREDITS_FULL_BASE_IDX 1
+#define mmDAGB6_PERFCOUNTER_LO 0x30ec
+#define mmDAGB6_PERFCOUNTER_LO_BASE_IDX 1
+#define mmDAGB6_PERFCOUNTER_HI 0x30ed
+#define mmDAGB6_PERFCOUNTER_HI_BASE_IDX 1
+#define mmDAGB6_PERFCOUNTER0_CFG 0x30ee
+#define mmDAGB6_PERFCOUNTER0_CFG_BASE_IDX 1
+#define mmDAGB6_PERFCOUNTER1_CFG 0x30ef
+#define mmDAGB6_PERFCOUNTER1_CFG_BASE_IDX 1
+#define mmDAGB6_PERFCOUNTER2_CFG 0x30f0
+#define mmDAGB6_PERFCOUNTER2_CFG_BASE_IDX 1
+#define mmDAGB6_PERFCOUNTER_RSLT_CNTL 0x30f1
+#define mmDAGB6_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1
+#define mmDAGB6_RESERVE0 0x30f2
+#define mmDAGB6_RESERVE0_BASE_IDX 1
+#define mmDAGB6_RESERVE1 0x30f3
+#define mmDAGB6_RESERVE1_BASE_IDX 1
+#define mmDAGB6_RESERVE2 0x30f4
+#define mmDAGB6_RESERVE2_BASE_IDX 1
+#define mmDAGB6_RESERVE3 0x30f5
+#define mmDAGB6_RESERVE3_BASE_IDX 1
+#define mmDAGB6_RESERVE4 0x30f6
+#define mmDAGB6_RESERVE4_BASE_IDX 1
+#define mmDAGB6_RESERVE5 0x30f7
+#define mmDAGB6_RESERVE5_BASE_IDX 1
+#define mmDAGB6_RESERVE6 0x30f8
+#define mmDAGB6_RESERVE6_BASE_IDX 1
+#define mmDAGB6_RESERVE7 0x30f9
+#define mmDAGB6_RESERVE7_BASE_IDX 1
+#define mmDAGB6_RESERVE8 0x30fa
+#define mmDAGB6_RESERVE8_BASE_IDX 1
+#define mmDAGB6_RESERVE9 0x30fb
+#define mmDAGB6_RESERVE9_BASE_IDX 1
+#define mmDAGB6_RESERVE10 0x30fc
+#define mmDAGB6_RESERVE10_BASE_IDX 1
+#define mmDAGB6_RESERVE11 0x30fd
+#define mmDAGB6_RESERVE11_BASE_IDX 1
+#define mmDAGB6_RESERVE12 0x30fe
+#define mmDAGB6_RESERVE12_BASE_IDX 1
+#define mmDAGB6_RESERVE13 0x30ff
+#define mmDAGB6_RESERVE13_BASE_IDX 1
+
+
+// addressBlock: mmhub_dagb_dagbdec7
+// base address: 0x74400
+#define mmDAGB7_RDCLI0 0x3100
+#define mmDAGB7_RDCLI0_BASE_IDX 1
+#define mmDAGB7_RDCLI1 0x3101
+#define mmDAGB7_RDCLI1_BASE_IDX 1
+#define mmDAGB7_RDCLI2 0x3102
+#define mmDAGB7_RDCLI2_BASE_IDX 1
+#define mmDAGB7_RDCLI3 0x3103
+#define mmDAGB7_RDCLI3_BASE_IDX 1
+#define mmDAGB7_RDCLI4 0x3104
+#define mmDAGB7_RDCLI4_BASE_IDX 1
+#define mmDAGB7_RDCLI5 0x3105
+#define mmDAGB7_RDCLI5_BASE_IDX 1
+#define mmDAGB7_RDCLI6 0x3106
+#define mmDAGB7_RDCLI6_BASE_IDX 1
+#define mmDAGB7_RDCLI7 0x3107
+#define mmDAGB7_RDCLI7_BASE_IDX 1
+#define mmDAGB7_RDCLI8 0x3108
+#define mmDAGB7_RDCLI8_BASE_IDX 1
+#define mmDAGB7_RDCLI9 0x3109
+#define mmDAGB7_RDCLI9_BASE_IDX 1
+#define mmDAGB7_RDCLI10 0x310a
+#define mmDAGB7_RDCLI10_BASE_IDX 1
+#define mmDAGB7_RDCLI11 0x310b
+#define mmDAGB7_RDCLI11_BASE_IDX 1
+#define mmDAGB7_RDCLI12 0x310c
+#define mmDAGB7_RDCLI12_BASE_IDX 1
+#define mmDAGB7_RDCLI13 0x310d
+#define mmDAGB7_RDCLI13_BASE_IDX 1
+#define mmDAGB7_RDCLI14 0x310e
+#define mmDAGB7_RDCLI14_BASE_IDX 1
+#define mmDAGB7_RDCLI15 0x310f
+#define mmDAGB7_RDCLI15_BASE_IDX 1
+#define mmDAGB7_RD_CNTL 0x3110
+#define mmDAGB7_RD_CNTL_BASE_IDX 1
+#define mmDAGB7_RD_GMI_CNTL 0x3111
+#define mmDAGB7_RD_GMI_CNTL_BASE_IDX 1
+#define mmDAGB7_RD_ADDR_DAGB 0x3112
+#define mmDAGB7_RD_ADDR_DAGB_BASE_IDX 1
+#define mmDAGB7_RD_OUTPUT_DAGB_MAX_BURST 0x3113
+#define mmDAGB7_RD_OUTPUT_DAGB_MAX_BURST_BASE_IDX 1
+#define mmDAGB7_RD_OUTPUT_DAGB_LAZY_TIMER 0x3114
+#define mmDAGB7_RD_OUTPUT_DAGB_LAZY_TIMER_BASE_IDX 1
+#define mmDAGB7_RD_CGTT_CLK_CTRL 0x3115
+#define mmDAGB7_RD_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmDAGB7_L1TLB_RD_CGTT_CLK_CTRL 0x3116
+#define mmDAGB7_L1TLB_RD_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmDAGB7_ATCVM_RD_CGTT_CLK_CTRL 0x3117
+#define mmDAGB7_ATCVM_RD_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmDAGB7_RD_ADDR_DAGB_MAX_BURST0 0x3118
+#define mmDAGB7_RD_ADDR_DAGB_MAX_BURST0_BASE_IDX 1
+#define mmDAGB7_RD_ADDR_DAGB_LAZY_TIMER0 0x3119
+#define mmDAGB7_RD_ADDR_DAGB_LAZY_TIMER0_BASE_IDX 1
+#define mmDAGB7_RD_ADDR_DAGB_MAX_BURST1 0x311a
+#define mmDAGB7_RD_ADDR_DAGB_MAX_BURST1_BASE_IDX 1
+#define mmDAGB7_RD_ADDR_DAGB_LAZY_TIMER1 0x311b
+#define mmDAGB7_RD_ADDR_DAGB_LAZY_TIMER1_BASE_IDX 1
+#define mmDAGB7_RD_VC0_CNTL 0x311c
+#define mmDAGB7_RD_VC0_CNTL_BASE_IDX 1
+#define mmDAGB7_RD_VC1_CNTL 0x311d
+#define mmDAGB7_RD_VC1_CNTL_BASE_IDX 1
+#define mmDAGB7_RD_VC2_CNTL 0x311e
+#define mmDAGB7_RD_VC2_CNTL_BASE_IDX 1
+#define mmDAGB7_RD_VC3_CNTL 0x311f
+#define mmDAGB7_RD_VC3_CNTL_BASE_IDX 1
+#define mmDAGB7_RD_VC4_CNTL 0x3120
+#define mmDAGB7_RD_VC4_CNTL_BASE_IDX 1
+#define mmDAGB7_RD_VC5_CNTL 0x3121
+#define mmDAGB7_RD_VC5_CNTL_BASE_IDX 1
+#define mmDAGB7_RD_VC6_CNTL 0x3122
+#define mmDAGB7_RD_VC6_CNTL_BASE_IDX 1
+#define mmDAGB7_RD_VC7_CNTL 0x3123
+#define mmDAGB7_RD_VC7_CNTL_BASE_IDX 1
+#define mmDAGB7_RD_CNTL_MISC 0x3124
+#define mmDAGB7_RD_CNTL_MISC_BASE_IDX 1
+#define mmDAGB7_RD_TLB_CREDIT 0x3125
+#define mmDAGB7_RD_TLB_CREDIT_BASE_IDX 1
+#define mmDAGB7_RDCLI_ASK_PENDING 0x3126
+#define mmDAGB7_RDCLI_ASK_PENDING_BASE_IDX 1
+#define mmDAGB7_RDCLI_GO_PENDING 0x3127
+#define mmDAGB7_RDCLI_GO_PENDING_BASE_IDX 1
+#define mmDAGB7_RDCLI_GBLSEND_PENDING 0x3128
+#define mmDAGB7_RDCLI_GBLSEND_PENDING_BASE_IDX 1
+#define mmDAGB7_RDCLI_TLB_PENDING 0x3129
+#define mmDAGB7_RDCLI_TLB_PENDING_BASE_IDX 1
+#define mmDAGB7_RDCLI_OARB_PENDING 0x312a
+#define mmDAGB7_RDCLI_OARB_PENDING_BASE_IDX 1
+#define mmDAGB7_RDCLI_OSD_PENDING 0x312b
+#define mmDAGB7_RDCLI_OSD_PENDING_BASE_IDX 1
+#define mmDAGB7_WRCLI0 0x312c
+#define mmDAGB7_WRCLI0_BASE_IDX 1
+#define mmDAGB7_WRCLI1 0x312d
+#define mmDAGB7_WRCLI1_BASE_IDX 1
+#define mmDAGB7_WRCLI2 0x312e
+#define mmDAGB7_WRCLI2_BASE_IDX 1
+#define mmDAGB7_WRCLI3 0x312f
+#define mmDAGB7_WRCLI3_BASE_IDX 1
+#define mmDAGB7_WRCLI4 0x3130
+#define mmDAGB7_WRCLI4_BASE_IDX 1
+#define mmDAGB7_WRCLI5 0x3131
+#define mmDAGB7_WRCLI5_BASE_IDX 1
+#define mmDAGB7_WRCLI6 0x3132
+#define mmDAGB7_WRCLI6_BASE_IDX 1
+#define mmDAGB7_WRCLI7 0x3133
+#define mmDAGB7_WRCLI7_BASE_IDX 1
+#define mmDAGB7_WRCLI8 0x3134
+#define mmDAGB7_WRCLI8_BASE_IDX 1
+#define mmDAGB7_WRCLI9 0x3135
+#define mmDAGB7_WRCLI9_BASE_IDX 1
+#define mmDAGB7_WRCLI10 0x3136
+#define mmDAGB7_WRCLI10_BASE_IDX 1
+#define mmDAGB7_WRCLI11 0x3137
+#define mmDAGB7_WRCLI11_BASE_IDX 1
+#define mmDAGB7_WRCLI12 0x3138
+#define mmDAGB7_WRCLI12_BASE_IDX 1
+#define mmDAGB7_WRCLI13 0x3139
+#define mmDAGB7_WRCLI13_BASE_IDX 1
+#define mmDAGB7_WRCLI14 0x313a
+#define mmDAGB7_WRCLI14_BASE_IDX 1
+#define mmDAGB7_WRCLI15 0x313b
+#define mmDAGB7_WRCLI15_BASE_IDX 1
+#define mmDAGB7_WR_CNTL 0x313c
+#define mmDAGB7_WR_CNTL_BASE_IDX 1
+#define mmDAGB7_WR_GMI_CNTL 0x313d
+#define mmDAGB7_WR_GMI_CNTL_BASE_IDX 1
+#define mmDAGB7_WR_ADDR_DAGB 0x313e
+#define mmDAGB7_WR_ADDR_DAGB_BASE_IDX 1
+#define mmDAGB7_WR_OUTPUT_DAGB_MAX_BURST 0x313f
+#define mmDAGB7_WR_OUTPUT_DAGB_MAX_BURST_BASE_IDX 1
+#define mmDAGB7_WR_OUTPUT_DAGB_LAZY_TIMER 0x3140
+#define mmDAGB7_WR_OUTPUT_DAGB_LAZY_TIMER_BASE_IDX 1
+#define mmDAGB7_WR_CGTT_CLK_CTRL 0x3141
+#define mmDAGB7_WR_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmDAGB7_L1TLB_WR_CGTT_CLK_CTRL 0x3142
+#define mmDAGB7_L1TLB_WR_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmDAGB7_ATCVM_WR_CGTT_CLK_CTRL 0x3143
+#define mmDAGB7_ATCVM_WR_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmDAGB7_WR_ADDR_DAGB_MAX_BURST0 0x3144
+#define mmDAGB7_WR_ADDR_DAGB_MAX_BURST0_BASE_IDX 1
+#define mmDAGB7_WR_ADDR_DAGB_LAZY_TIMER0 0x3145
+#define mmDAGB7_WR_ADDR_DAGB_LAZY_TIMER0_BASE_IDX 1
+#define mmDAGB7_WR_ADDR_DAGB_MAX_BURST1 0x3146
+#define mmDAGB7_WR_ADDR_DAGB_MAX_BURST1_BASE_IDX 1
+#define mmDAGB7_WR_ADDR_DAGB_LAZY_TIMER1 0x3147
+#define mmDAGB7_WR_ADDR_DAGB_LAZY_TIMER1_BASE_IDX 1
+#define mmDAGB7_WR_DATA_DAGB 0x3148
+#define mmDAGB7_WR_DATA_DAGB_BASE_IDX 1
+#define mmDAGB7_WR_DATA_DAGB_MAX_BURST0 0x3149
+#define mmDAGB7_WR_DATA_DAGB_MAX_BURST0_BASE_IDX 1
+#define mmDAGB7_WR_DATA_DAGB_LAZY_TIMER0 0x314a
+#define mmDAGB7_WR_DATA_DAGB_LAZY_TIMER0_BASE_IDX 1
+#define mmDAGB7_WR_DATA_DAGB_MAX_BURST1 0x314b
+#define mmDAGB7_WR_DATA_DAGB_MAX_BURST1_BASE_IDX 1
+#define mmDAGB7_WR_DATA_DAGB_LAZY_TIMER1 0x314c
+#define mmDAGB7_WR_DATA_DAGB_LAZY_TIMER1_BASE_IDX 1
+#define mmDAGB7_WR_VC0_CNTL 0x314d
+#define mmDAGB7_WR_VC0_CNTL_BASE_IDX 1
+#define mmDAGB7_WR_VC1_CNTL 0x314e
+#define mmDAGB7_WR_VC1_CNTL_BASE_IDX 1
+#define mmDAGB7_WR_VC2_CNTL 0x314f
+#define mmDAGB7_WR_VC2_CNTL_BASE_IDX 1
+#define mmDAGB7_WR_VC3_CNTL 0x3150
+#define mmDAGB7_WR_VC3_CNTL_BASE_IDX 1
+#define mmDAGB7_WR_VC4_CNTL 0x3151
+#define mmDAGB7_WR_VC4_CNTL_BASE_IDX 1
+#define mmDAGB7_WR_VC5_CNTL 0x3152
+#define mmDAGB7_WR_VC5_CNTL_BASE_IDX 1
+#define mmDAGB7_WR_VC6_CNTL 0x3153
+#define mmDAGB7_WR_VC6_CNTL_BASE_IDX 1
+#define mmDAGB7_WR_VC7_CNTL 0x3154
+#define mmDAGB7_WR_VC7_CNTL_BASE_IDX 1
+#define mmDAGB7_WR_CNTL_MISC 0x3155
+#define mmDAGB7_WR_CNTL_MISC_BASE_IDX 1
+#define mmDAGB7_WR_TLB_CREDIT 0x3156
+#define mmDAGB7_WR_TLB_CREDIT_BASE_IDX 1
+#define mmDAGB7_WR_DATA_CREDIT 0x3157
+#define mmDAGB7_WR_DATA_CREDIT_BASE_IDX 1
+#define mmDAGB7_WR_MISC_CREDIT 0x3158
+#define mmDAGB7_WR_MISC_CREDIT_BASE_IDX 1
+#define mmDAGB7_WRCLI_ASK_PENDING 0x315d
+#define mmDAGB7_WRCLI_ASK_PENDING_BASE_IDX 1
+#define mmDAGB7_WRCLI_GO_PENDING 0x315e
+#define mmDAGB7_WRCLI_GO_PENDING_BASE_IDX 1
+#define mmDAGB7_WRCLI_GBLSEND_PENDING 0x315f
+#define mmDAGB7_WRCLI_GBLSEND_PENDING_BASE_IDX 1
+#define mmDAGB7_WRCLI_TLB_PENDING 0x3160
+#define mmDAGB7_WRCLI_TLB_PENDING_BASE_IDX 1
+#define mmDAGB7_WRCLI_OARB_PENDING 0x3161
+#define mmDAGB7_WRCLI_OARB_PENDING_BASE_IDX 1
+#define mmDAGB7_WRCLI_OSD_PENDING 0x3162
+#define mmDAGB7_WRCLI_OSD_PENDING_BASE_IDX 1
+#define mmDAGB7_WRCLI_DBUS_ASK_PENDING 0x3163
+#define mmDAGB7_WRCLI_DBUS_ASK_PENDING_BASE_IDX 1
+#define mmDAGB7_WRCLI_DBUS_GO_PENDING 0x3164
+#define mmDAGB7_WRCLI_DBUS_GO_PENDING_BASE_IDX 1
+#define mmDAGB7_DAGB_DLY 0x3165
+#define mmDAGB7_DAGB_DLY_BASE_IDX 1
+#define mmDAGB7_CNTL_MISC 0x3166
+#define mmDAGB7_CNTL_MISC_BASE_IDX 1
+#define mmDAGB7_CNTL_MISC2 0x3167
+#define mmDAGB7_CNTL_MISC2_BASE_IDX 1
+#define mmDAGB7_FIFO_EMPTY 0x3168
+#define mmDAGB7_FIFO_EMPTY_BASE_IDX 1
+#define mmDAGB7_FIFO_FULL 0x3169
+#define mmDAGB7_FIFO_FULL_BASE_IDX 1
+#define mmDAGB7_WR_CREDITS_FULL 0x316a
+#define mmDAGB7_WR_CREDITS_FULL_BASE_IDX 1
+#define mmDAGB7_RD_CREDITS_FULL 0x316b
+#define mmDAGB7_RD_CREDITS_FULL_BASE_IDX 1
+#define mmDAGB7_PERFCOUNTER_LO 0x316c
+#define mmDAGB7_PERFCOUNTER_LO_BASE_IDX 1
+#define mmDAGB7_PERFCOUNTER_HI 0x316d
+#define mmDAGB7_PERFCOUNTER_HI_BASE_IDX 1
+#define mmDAGB7_PERFCOUNTER0_CFG 0x316e
+#define mmDAGB7_PERFCOUNTER0_CFG_BASE_IDX 1
+#define mmDAGB7_PERFCOUNTER1_CFG 0x316f
+#define mmDAGB7_PERFCOUNTER1_CFG_BASE_IDX 1
+#define mmDAGB7_PERFCOUNTER2_CFG 0x3170
+#define mmDAGB7_PERFCOUNTER2_CFG_BASE_IDX 1
+#define mmDAGB7_PERFCOUNTER_RSLT_CNTL 0x3171
+#define mmDAGB7_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1
+#define mmDAGB7_RESERVE0 0x3172
+#define mmDAGB7_RESERVE0_BASE_IDX 1
+#define mmDAGB7_RESERVE1 0x3173
+#define mmDAGB7_RESERVE1_BASE_IDX 1
+#define mmDAGB7_RESERVE2 0x3174
+#define mmDAGB7_RESERVE2_BASE_IDX 1
+#define mmDAGB7_RESERVE3 0x3175
+#define mmDAGB7_RESERVE3_BASE_IDX 1
+#define mmDAGB7_RESERVE4 0x3176
+#define mmDAGB7_RESERVE4_BASE_IDX 1
+#define mmDAGB7_RESERVE5 0x3177
+#define mmDAGB7_RESERVE5_BASE_IDX 1
+#define mmDAGB7_RESERVE6 0x3178
+#define mmDAGB7_RESERVE6_BASE_IDX 1
+#define mmDAGB7_RESERVE7 0x3179
+#define mmDAGB7_RESERVE7_BASE_IDX 1
+#define mmDAGB7_RESERVE8 0x317a
+#define mmDAGB7_RESERVE8_BASE_IDX 1
+#define mmDAGB7_RESERVE9 0x317b
+#define mmDAGB7_RESERVE9_BASE_IDX 1
+#define mmDAGB7_RESERVE10 0x317c
+#define mmDAGB7_RESERVE10_BASE_IDX 1
+#define mmDAGB7_RESERVE11 0x317d
+#define mmDAGB7_RESERVE11_BASE_IDX 1
+#define mmDAGB7_RESERVE12 0x317e
+#define mmDAGB7_RESERVE12_BASE_IDX 1
+#define mmDAGB7_RESERVE13 0x317f
+#define mmDAGB7_RESERVE13_BASE_IDX 1
+
+
+// addressBlock: mmhub_ea_mmeadec5
+// base address: 0x74a00
+#define mmMMEA5_DRAM_RD_CLI2GRP_MAP0 0x3280
+#define mmMMEA5_DRAM_RD_CLI2GRP_MAP0_BASE_IDX 1
+#define mmMMEA5_DRAM_RD_CLI2GRP_MAP1 0x3281
+#define mmMMEA5_DRAM_RD_CLI2GRP_MAP1_BASE_IDX 1
+#define mmMMEA5_DRAM_WR_CLI2GRP_MAP0 0x3282
+#define mmMMEA5_DRAM_WR_CLI2GRP_MAP0_BASE_IDX 1
+#define mmMMEA5_DRAM_WR_CLI2GRP_MAP1 0x3283
+#define mmMMEA5_DRAM_WR_CLI2GRP_MAP1_BASE_IDX 1
+#define mmMMEA5_DRAM_RD_GRP2VC_MAP 0x3284
+#define mmMMEA5_DRAM_RD_GRP2VC_MAP_BASE_IDX 1
+#define mmMMEA5_DRAM_WR_GRP2VC_MAP 0x3285
+#define mmMMEA5_DRAM_WR_GRP2VC_MAP_BASE_IDX 1
+#define mmMMEA5_DRAM_RD_LAZY 0x3286
+#define mmMMEA5_DRAM_RD_LAZY_BASE_IDX 1
+#define mmMMEA5_DRAM_WR_LAZY 0x3287
+#define mmMMEA5_DRAM_WR_LAZY_BASE_IDX 1
+#define mmMMEA5_DRAM_RD_CAM_CNTL 0x3288
+#define mmMMEA5_DRAM_RD_CAM_CNTL_BASE_IDX 1
+#define mmMMEA5_DRAM_WR_CAM_CNTL 0x3289
+#define mmMMEA5_DRAM_WR_CAM_CNTL_BASE_IDX 1
+#define mmMMEA5_DRAM_PAGE_BURST 0x328a
+#define mmMMEA5_DRAM_PAGE_BURST_BASE_IDX 1
+#define mmMMEA5_DRAM_RD_PRI_AGE 0x328b
+#define mmMMEA5_DRAM_RD_PRI_AGE_BASE_IDX 1
+#define mmMMEA5_DRAM_WR_PRI_AGE 0x328c
+#define mmMMEA5_DRAM_WR_PRI_AGE_BASE_IDX 1
+#define mmMMEA5_DRAM_RD_PRI_QUEUING 0x328d
+#define mmMMEA5_DRAM_RD_PRI_QUEUING_BASE_IDX 1
+#define mmMMEA5_DRAM_WR_PRI_QUEUING 0x328e
+#define mmMMEA5_DRAM_WR_PRI_QUEUING_BASE_IDX 1
+#define mmMMEA5_DRAM_RD_PRI_FIXED 0x328f
+#define mmMMEA5_DRAM_RD_PRI_FIXED_BASE_IDX 1
+#define mmMMEA5_DRAM_WR_PRI_FIXED 0x3290
+#define mmMMEA5_DRAM_WR_PRI_FIXED_BASE_IDX 1
+#define mmMMEA5_DRAM_RD_PRI_URGENCY 0x3291
+#define mmMMEA5_DRAM_RD_PRI_URGENCY_BASE_IDX 1
+#define mmMMEA5_DRAM_WR_PRI_URGENCY 0x3292
+#define mmMMEA5_DRAM_WR_PRI_URGENCY_BASE_IDX 1
+#define mmMMEA5_DRAM_RD_PRI_QUANT_PRI1 0x3293
+#define mmMMEA5_DRAM_RD_PRI_QUANT_PRI1_BASE_IDX 1
+#define mmMMEA5_DRAM_RD_PRI_QUANT_PRI2 0x3294
+#define mmMMEA5_DRAM_RD_PRI_QUANT_PRI2_BASE_IDX 1
+#define mmMMEA5_DRAM_RD_PRI_QUANT_PRI3 0x3295
+#define mmMMEA5_DRAM_RD_PRI_QUANT_PRI3_BASE_IDX 1
+#define mmMMEA5_DRAM_WR_PRI_QUANT_PRI1 0x3296
+#define mmMMEA5_DRAM_WR_PRI_QUANT_PRI1_BASE_IDX 1
+#define mmMMEA5_DRAM_WR_PRI_QUANT_PRI2 0x3297
+#define mmMMEA5_DRAM_WR_PRI_QUANT_PRI2_BASE_IDX 1
+#define mmMMEA5_DRAM_WR_PRI_QUANT_PRI3 0x3298
+#define mmMMEA5_DRAM_WR_PRI_QUANT_PRI3_BASE_IDX 1
+#define mmMMEA5_GMI_RD_CLI2GRP_MAP0 0x3299
+#define mmMMEA5_GMI_RD_CLI2GRP_MAP0_BASE_IDX 1
+#define mmMMEA5_GMI_RD_CLI2GRP_MAP1 0x329a
+#define mmMMEA5_GMI_RD_CLI2GRP_MAP1_BASE_IDX 1
+#define mmMMEA5_GMI_WR_CLI2GRP_MAP0 0x329b
+#define mmMMEA5_GMI_WR_CLI2GRP_MAP0_BASE_IDX 1
+#define mmMMEA5_GMI_WR_CLI2GRP_MAP1 0x329c
+#define mmMMEA5_GMI_WR_CLI2GRP_MAP1_BASE_IDX 1
+#define mmMMEA5_GMI_RD_GRP2VC_MAP 0x329d
+#define mmMMEA5_GMI_RD_GRP2VC_MAP_BASE_IDX 1
+#define mmMMEA5_GMI_WR_GRP2VC_MAP 0x329e
+#define mmMMEA5_GMI_WR_GRP2VC_MAP_BASE_IDX 1
+#define mmMMEA5_GMI_RD_LAZY 0x329f
+#define mmMMEA5_GMI_RD_LAZY_BASE_IDX 1
+#define mmMMEA5_GMI_WR_LAZY 0x32a0
+#define mmMMEA5_GMI_WR_LAZY_BASE_IDX 1
+#define mmMMEA5_GMI_RD_CAM_CNTL 0x32a1
+#define mmMMEA5_GMI_RD_CAM_CNTL_BASE_IDX 1
+#define mmMMEA5_GMI_WR_CAM_CNTL 0x32a2
+#define mmMMEA5_GMI_WR_CAM_CNTL_BASE_IDX 1
+#define mmMMEA5_GMI_PAGE_BURST 0x32a3
+#define mmMMEA5_GMI_PAGE_BURST_BASE_IDX 1
+#define mmMMEA5_GMI_RD_PRI_AGE 0x32a4
+#define mmMMEA5_GMI_RD_PRI_AGE_BASE_IDX 1
+#define mmMMEA5_GMI_WR_PRI_AGE 0x32a5
+#define mmMMEA5_GMI_WR_PRI_AGE_BASE_IDX 1
+#define mmMMEA5_GMI_RD_PRI_QUEUING 0x32a6
+#define mmMMEA5_GMI_RD_PRI_QUEUING_BASE_IDX 1
+#define mmMMEA5_GMI_WR_PRI_QUEUING 0x32a7
+#define mmMMEA5_GMI_WR_PRI_QUEUING_BASE_IDX 1
+#define mmMMEA5_GMI_RD_PRI_FIXED 0x32a8
+#define mmMMEA5_GMI_RD_PRI_FIXED_BASE_IDX 1
+#define mmMMEA5_GMI_WR_PRI_FIXED 0x32a9
+#define mmMMEA5_GMI_WR_PRI_FIXED_BASE_IDX 1
+#define mmMMEA5_GMI_RD_PRI_URGENCY 0x32aa
+#define mmMMEA5_GMI_RD_PRI_URGENCY_BASE_IDX 1
+#define mmMMEA5_GMI_WR_PRI_URGENCY 0x32ab
+#define mmMMEA5_GMI_WR_PRI_URGENCY_BASE_IDX 1
+#define mmMMEA5_GMI_RD_PRI_URGENCY_MASKING 0x32ac
+#define mmMMEA5_GMI_RD_PRI_URGENCY_MASKING_BASE_IDX 1
+#define mmMMEA5_GMI_WR_PRI_URGENCY_MASKING 0x32ad
+#define mmMMEA5_GMI_WR_PRI_URGENCY_MASKING_BASE_IDX 1
+#define mmMMEA5_GMI_RD_PRI_QUANT_PRI1 0x32ae
+#define mmMMEA5_GMI_RD_PRI_QUANT_PRI1_BASE_IDX 1
+#define mmMMEA5_GMI_RD_PRI_QUANT_PRI2 0x32af
+#define mmMMEA5_GMI_RD_PRI_QUANT_PRI2_BASE_IDX 1
+#define mmMMEA5_GMI_RD_PRI_QUANT_PRI3 0x32b0
+#define mmMMEA5_GMI_RD_PRI_QUANT_PRI3_BASE_IDX 1
+#define mmMMEA5_GMI_WR_PRI_QUANT_PRI1 0x32b1
+#define mmMMEA5_GMI_WR_PRI_QUANT_PRI1_BASE_IDX 1
+#define mmMMEA5_GMI_WR_PRI_QUANT_PRI2 0x32b2
+#define mmMMEA5_GMI_WR_PRI_QUANT_PRI2_BASE_IDX 1
+#define mmMMEA5_GMI_WR_PRI_QUANT_PRI3 0x32b3
+#define mmMMEA5_GMI_WR_PRI_QUANT_PRI3_BASE_IDX 1
+#define mmMMEA5_ADDRNORM_BASE_ADDR0 0x32b4
+#define mmMMEA5_ADDRNORM_BASE_ADDR0_BASE_IDX 1
+#define mmMMEA5_ADDRNORM_LIMIT_ADDR0 0x32b5
+#define mmMMEA5_ADDRNORM_LIMIT_ADDR0_BASE_IDX 1
+#define mmMMEA5_ADDRNORM_BASE_ADDR1 0x32b6
+#define mmMMEA5_ADDRNORM_BASE_ADDR1_BASE_IDX 1
+#define mmMMEA5_ADDRNORM_LIMIT_ADDR1 0x32b7
+#define mmMMEA5_ADDRNORM_LIMIT_ADDR1_BASE_IDX 1
+#define mmMMEA5_ADDRNORM_OFFSET_ADDR1 0x32b8
+#define mmMMEA5_ADDRNORM_OFFSET_ADDR1_BASE_IDX 1
+#define mmMMEA5_ADDRNORM_BASE_ADDR2 0x32b9
+#define mmMMEA5_ADDRNORM_BASE_ADDR2_BASE_IDX 1
+#define mmMMEA5_ADDRNORM_LIMIT_ADDR2 0x32ba
+#define mmMMEA5_ADDRNORM_LIMIT_ADDR2_BASE_IDX 1
+#define mmMMEA5_ADDRNORM_BASE_ADDR3 0x32bb
+#define mmMMEA5_ADDRNORM_BASE_ADDR3_BASE_IDX 1
+#define mmMMEA5_ADDRNORM_LIMIT_ADDR3 0x32bc
+#define mmMMEA5_ADDRNORM_LIMIT_ADDR3_BASE_IDX 1
+#define mmMMEA5_ADDRNORM_OFFSET_ADDR3 0x32bd
+#define mmMMEA5_ADDRNORM_OFFSET_ADDR3_BASE_IDX 1
+#define mmMMEA5_ADDRNORM_BASE_ADDR4 0x32be
+#define mmMMEA5_ADDRNORM_BASE_ADDR4_BASE_IDX 1
+#define mmMMEA5_ADDRNORM_LIMIT_ADDR4 0x32bf
+#define mmMMEA5_ADDRNORM_LIMIT_ADDR4_BASE_IDX 1
+#define mmMMEA5_ADDRNORM_BASE_ADDR5 0x32c0
+#define mmMMEA5_ADDRNORM_BASE_ADDR5_BASE_IDX 1
+#define mmMMEA5_ADDRNORM_LIMIT_ADDR5 0x32c1
+#define mmMMEA5_ADDRNORM_LIMIT_ADDR5_BASE_IDX 1
+#define mmMMEA5_ADDRNORM_OFFSET_ADDR5 0x32c2
+#define mmMMEA5_ADDRNORM_OFFSET_ADDR5_BASE_IDX 1
+#define mmMMEA5_ADDRNORMDRAM_HOLE_CNTL 0x32c3
+#define mmMMEA5_ADDRNORMDRAM_HOLE_CNTL_BASE_IDX 1
+#define mmMMEA5_ADDRNORMGMI_HOLE_CNTL 0x32c4
+#define mmMMEA5_ADDRNORMGMI_HOLE_CNTL_BASE_IDX 1
+#define mmMMEA5_ADDRNORMDRAM_NP2_CHANNEL_CFG 0x32c5
+#define mmMMEA5_ADDRNORMDRAM_NP2_CHANNEL_CFG_BASE_IDX 1
+#define mmMMEA5_ADDRNORMGMI_NP2_CHANNEL_CFG 0x32c6
+#define mmMMEA5_ADDRNORMGMI_NP2_CHANNEL_CFG_BASE_IDX 1
+#define mmMMEA5_ADDRDEC_BANK_CFG 0x32c7
+#define mmMMEA5_ADDRDEC_BANK_CFG_BASE_IDX 1
+#define mmMMEA5_ADDRDEC_MISC_CFG 0x32c8
+#define mmMMEA5_ADDRDEC_MISC_CFG_BASE_IDX 1
+#define mmMMEA5_ADDRDECDRAM_ADDR_HASH_BANK0 0x32c9
+#define mmMMEA5_ADDRDECDRAM_ADDR_HASH_BANK0_BASE_IDX 1
+#define mmMMEA5_ADDRDECDRAM_ADDR_HASH_BANK1 0x32ca
+#define mmMMEA5_ADDRDECDRAM_ADDR_HASH_BANK1_BASE_IDX 1
+#define mmMMEA5_ADDRDECDRAM_ADDR_HASH_BANK2 0x32cb
+#define mmMMEA5_ADDRDECDRAM_ADDR_HASH_BANK2_BASE_IDX 1
+#define mmMMEA5_ADDRDECDRAM_ADDR_HASH_BANK3 0x32cc
+#define mmMMEA5_ADDRDECDRAM_ADDR_HASH_BANK3_BASE_IDX 1
+#define mmMMEA5_ADDRDECDRAM_ADDR_HASH_BANK4 0x32cd
+#define mmMMEA5_ADDRDECDRAM_ADDR_HASH_BANK4_BASE_IDX 1
+#define mmMMEA5_ADDRDECDRAM_ADDR_HASH_BANK5 0x32ce
+#define mmMMEA5_ADDRDECDRAM_ADDR_HASH_BANK5_BASE_IDX 1
+#define mmMMEA5_ADDRDECDRAM_ADDR_HASH_PC 0x32cf
+#define mmMMEA5_ADDRDECDRAM_ADDR_HASH_PC_BASE_IDX 1
+#define mmMMEA5_ADDRDECDRAM_ADDR_HASH_PC2 0x32d0
+#define mmMMEA5_ADDRDECDRAM_ADDR_HASH_PC2_BASE_IDX 1
+#define mmMMEA5_ADDRDECDRAM_ADDR_HASH_CS0 0x32d1
+#define mmMMEA5_ADDRDECDRAM_ADDR_HASH_CS0_BASE_IDX 1
+#define mmMMEA5_ADDRDECDRAM_ADDR_HASH_CS1 0x32d2
+#define mmMMEA5_ADDRDECDRAM_ADDR_HASH_CS1_BASE_IDX 1
+#define mmMMEA5_ADDRDECDRAM_HARVEST_ENABLE 0x32d3
+#define mmMMEA5_ADDRDECDRAM_HARVEST_ENABLE_BASE_IDX 1
+#define mmMMEA5_ADDRDECGMI_ADDR_HASH_BANK0 0x32d4
+#define mmMMEA5_ADDRDECGMI_ADDR_HASH_BANK0_BASE_IDX 1
+#define mmMMEA5_ADDRDECGMI_ADDR_HASH_BANK1 0x32d5
+#define mmMMEA5_ADDRDECGMI_ADDR_HASH_BANK1_BASE_IDX 1
+#define mmMMEA5_ADDRDECGMI_ADDR_HASH_BANK2 0x32d6
+#define mmMMEA5_ADDRDECGMI_ADDR_HASH_BANK2_BASE_IDX 1
+#define mmMMEA5_ADDRDECGMI_ADDR_HASH_BANK3 0x32d7
+#define mmMMEA5_ADDRDECGMI_ADDR_HASH_BANK3_BASE_IDX 1
+#define mmMMEA5_ADDRDECGMI_ADDR_HASH_BANK4 0x32d8
+#define mmMMEA5_ADDRDECGMI_ADDR_HASH_BANK4_BASE_IDX 1
+#define mmMMEA5_ADDRDECGMI_ADDR_HASH_BANK5 0x32d9
+#define mmMMEA5_ADDRDECGMI_ADDR_HASH_BANK5_BASE_IDX 1
+#define mmMMEA5_ADDRDECGMI_ADDR_HASH_PC 0x32da
+#define mmMMEA5_ADDRDECGMI_ADDR_HASH_PC_BASE_IDX 1
+#define mmMMEA5_ADDRDECGMI_ADDR_HASH_PC2 0x32db
+#define mmMMEA5_ADDRDECGMI_ADDR_HASH_PC2_BASE_IDX 1
+#define mmMMEA5_ADDRDECGMI_ADDR_HASH_CS0 0x32dc
+#define mmMMEA5_ADDRDECGMI_ADDR_HASH_CS0_BASE_IDX 1
+#define mmMMEA5_ADDRDECGMI_ADDR_HASH_CS1 0x32dd
+#define mmMMEA5_ADDRDECGMI_ADDR_HASH_CS1_BASE_IDX 1
+#define mmMMEA5_ADDRDECGMI_HARVEST_ENABLE 0x32de
+#define mmMMEA5_ADDRDECGMI_HARVEST_ENABLE_BASE_IDX 1
+#define mmMMEA5_ADDRDEC0_BASE_ADDR_CS0 0x32df
+#define mmMMEA5_ADDRDEC0_BASE_ADDR_CS0_BASE_IDX 1
+#define mmMMEA5_ADDRDEC0_BASE_ADDR_CS1 0x32e0
+#define mmMMEA5_ADDRDEC0_BASE_ADDR_CS1_BASE_IDX 1
+#define mmMMEA5_ADDRDEC0_BASE_ADDR_CS2 0x32e1
+#define mmMMEA5_ADDRDEC0_BASE_ADDR_CS2_BASE_IDX 1
+#define mmMMEA5_ADDRDEC0_BASE_ADDR_CS3 0x32e2
+#define mmMMEA5_ADDRDEC0_BASE_ADDR_CS3_BASE_IDX 1
+#define mmMMEA5_ADDRDEC0_BASE_ADDR_SECCS0 0x32e3
+#define mmMMEA5_ADDRDEC0_BASE_ADDR_SECCS0_BASE_IDX 1
+#define mmMMEA5_ADDRDEC0_BASE_ADDR_SECCS1 0x32e4
+#define mmMMEA5_ADDRDEC0_BASE_ADDR_SECCS1_BASE_IDX 1
+#define mmMMEA5_ADDRDEC0_BASE_ADDR_SECCS2 0x32e5
+#define mmMMEA5_ADDRDEC0_BASE_ADDR_SECCS2_BASE_IDX 1
+#define mmMMEA5_ADDRDEC0_BASE_ADDR_SECCS3 0x32e6
+#define mmMMEA5_ADDRDEC0_BASE_ADDR_SECCS3_BASE_IDX 1
+#define mmMMEA5_ADDRDEC0_ADDR_MASK_CS01 0x32e7
+#define mmMMEA5_ADDRDEC0_ADDR_MASK_CS01_BASE_IDX 1
+#define mmMMEA5_ADDRDEC0_ADDR_MASK_CS23 0x32e8
+#define mmMMEA5_ADDRDEC0_ADDR_MASK_CS23_BASE_IDX 1
+#define mmMMEA5_ADDRDEC0_ADDR_MASK_SECCS01 0x32e9
+#define mmMMEA5_ADDRDEC0_ADDR_MASK_SECCS01_BASE_IDX 1
+#define mmMMEA5_ADDRDEC0_ADDR_MASK_SECCS23 0x32ea
+#define mmMMEA5_ADDRDEC0_ADDR_MASK_SECCS23_BASE_IDX 1
+#define mmMMEA5_ADDRDEC0_ADDR_CFG_CS01 0x32eb
+#define mmMMEA5_ADDRDEC0_ADDR_CFG_CS01_BASE_IDX 1
+#define mmMMEA5_ADDRDEC0_ADDR_CFG_CS23 0x32ec
+#define mmMMEA5_ADDRDEC0_ADDR_CFG_CS23_BASE_IDX 1
+#define mmMMEA5_ADDRDEC0_ADDR_SEL_CS01 0x32ed
+#define mmMMEA5_ADDRDEC0_ADDR_SEL_CS01_BASE_IDX 1
+#define mmMMEA5_ADDRDEC0_ADDR_SEL_CS23 0x32ee
+#define mmMMEA5_ADDRDEC0_ADDR_SEL_CS23_BASE_IDX 1
+#define mmMMEA5_ADDRDEC0_ADDR_SEL2_CS01 0x32ef
+#define mmMMEA5_ADDRDEC0_ADDR_SEL2_CS01_BASE_IDX 1
+#define mmMMEA5_ADDRDEC0_ADDR_SEL2_CS23 0x32f0
+#define mmMMEA5_ADDRDEC0_ADDR_SEL2_CS23_BASE_IDX 1
+#define mmMMEA5_ADDRDEC0_COL_SEL_LO_CS01 0x32f1
+#define mmMMEA5_ADDRDEC0_COL_SEL_LO_CS01_BASE_IDX 1
+#define mmMMEA5_ADDRDEC0_COL_SEL_LO_CS23 0x32f2
+#define mmMMEA5_ADDRDEC0_COL_SEL_LO_CS23_BASE_IDX 1
+#define mmMMEA5_ADDRDEC0_COL_SEL_HI_CS01 0x32f3
+#define mmMMEA5_ADDRDEC0_COL_SEL_HI_CS01_BASE_IDX 1
+#define mmMMEA5_ADDRDEC0_COL_SEL_HI_CS23 0x32f4
+#define mmMMEA5_ADDRDEC0_COL_SEL_HI_CS23_BASE_IDX 1
+#define mmMMEA5_ADDRDEC0_RM_SEL_CS01 0x32f5
+#define mmMMEA5_ADDRDEC0_RM_SEL_CS01_BASE_IDX 1
+#define mmMMEA5_ADDRDEC0_RM_SEL_CS23 0x32f6
+#define mmMMEA5_ADDRDEC0_RM_SEL_CS23_BASE_IDX 1
+#define mmMMEA5_ADDRDEC0_RM_SEL_SECCS01 0x32f7
+#define mmMMEA5_ADDRDEC0_RM_SEL_SECCS01_BASE_IDX 1
+#define mmMMEA5_ADDRDEC0_RM_SEL_SECCS23 0x32f8
+#define mmMMEA5_ADDRDEC0_RM_SEL_SECCS23_BASE_IDX 1
+#define mmMMEA5_ADDRDEC1_BASE_ADDR_CS0 0x32f9
+#define mmMMEA5_ADDRDEC1_BASE_ADDR_CS0_BASE_IDX 1
+#define mmMMEA5_ADDRDEC1_BASE_ADDR_CS1 0x32fa
+#define mmMMEA5_ADDRDEC1_BASE_ADDR_CS1_BASE_IDX 1
+#define mmMMEA5_ADDRDEC1_BASE_ADDR_CS2 0x32fb
+#define mmMMEA5_ADDRDEC1_BASE_ADDR_CS2_BASE_IDX 1
+#define mmMMEA5_ADDRDEC1_BASE_ADDR_CS3 0x32fc
+#define mmMMEA5_ADDRDEC1_BASE_ADDR_CS3_BASE_IDX 1
+#define mmMMEA5_ADDRDEC1_BASE_ADDR_SECCS0 0x32fd
+#define mmMMEA5_ADDRDEC1_BASE_ADDR_SECCS0_BASE_IDX 1
+#define mmMMEA5_ADDRDEC1_BASE_ADDR_SECCS1 0x32fe
+#define mmMMEA5_ADDRDEC1_BASE_ADDR_SECCS1_BASE_IDX 1
+#define mmMMEA5_ADDRDEC1_BASE_ADDR_SECCS2 0x32ff
+#define mmMMEA5_ADDRDEC1_BASE_ADDR_SECCS2_BASE_IDX 1
+#define mmMMEA5_ADDRDEC1_BASE_ADDR_SECCS3 0x3300
+#define mmMMEA5_ADDRDEC1_BASE_ADDR_SECCS3_BASE_IDX 1
+#define mmMMEA5_ADDRDEC1_ADDR_MASK_CS01 0x3301
+#define mmMMEA5_ADDRDEC1_ADDR_MASK_CS01_BASE_IDX 1
+#define mmMMEA5_ADDRDEC1_ADDR_MASK_CS23 0x3302
+#define mmMMEA5_ADDRDEC1_ADDR_MASK_CS23_BASE_IDX 1
+#define mmMMEA5_ADDRDEC1_ADDR_MASK_SECCS01 0x3303
+#define mmMMEA5_ADDRDEC1_ADDR_MASK_SECCS01_BASE_IDX 1
+#define mmMMEA5_ADDRDEC1_ADDR_MASK_SECCS23 0x3304
+#define mmMMEA5_ADDRDEC1_ADDR_MASK_SECCS23_BASE_IDX 1
+#define mmMMEA5_ADDRDEC1_ADDR_CFG_CS01 0x3305
+#define mmMMEA5_ADDRDEC1_ADDR_CFG_CS01_BASE_IDX 1
+#define mmMMEA5_ADDRDEC1_ADDR_CFG_CS23 0x3306
+#define mmMMEA5_ADDRDEC1_ADDR_CFG_CS23_BASE_IDX 1
+#define mmMMEA5_ADDRDEC1_ADDR_SEL_CS01 0x3307
+#define mmMMEA5_ADDRDEC1_ADDR_SEL_CS01_BASE_IDX 1
+#define mmMMEA5_ADDRDEC1_ADDR_SEL_CS23 0x3308
+#define mmMMEA5_ADDRDEC1_ADDR_SEL_CS23_BASE_IDX 1
+#define mmMMEA5_ADDRDEC1_ADDR_SEL2_CS01 0x3309
+#define mmMMEA5_ADDRDEC1_ADDR_SEL2_CS01_BASE_IDX 1
+#define mmMMEA5_ADDRDEC1_ADDR_SEL2_CS23 0x330a
+#define mmMMEA5_ADDRDEC1_ADDR_SEL2_CS23_BASE_IDX 1
+#define mmMMEA5_ADDRDEC1_COL_SEL_LO_CS01 0x330b
+#define mmMMEA5_ADDRDEC1_COL_SEL_LO_CS01_BASE_IDX 1
+#define mmMMEA5_ADDRDEC1_COL_SEL_LO_CS23 0x330c
+#define mmMMEA5_ADDRDEC1_COL_SEL_LO_CS23_BASE_IDX 1
+#define mmMMEA5_ADDRDEC1_COL_SEL_HI_CS01 0x330d
+#define mmMMEA5_ADDRDEC1_COL_SEL_HI_CS01_BASE_IDX 1
+#define mmMMEA5_ADDRDEC1_COL_SEL_HI_CS23 0x330e
+#define mmMMEA5_ADDRDEC1_COL_SEL_HI_CS23_BASE_IDX 1
+#define mmMMEA5_ADDRDEC1_RM_SEL_CS01 0x330f
+#define mmMMEA5_ADDRDEC1_RM_SEL_CS01_BASE_IDX 1
+#define mmMMEA5_ADDRDEC1_RM_SEL_CS23 0x3310
+#define mmMMEA5_ADDRDEC1_RM_SEL_CS23_BASE_IDX 1
+#define mmMMEA5_ADDRDEC1_RM_SEL_SECCS01 0x3311
+#define mmMMEA5_ADDRDEC1_RM_SEL_SECCS01_BASE_IDX 1
+#define mmMMEA5_ADDRDEC1_RM_SEL_SECCS23 0x3312
+#define mmMMEA5_ADDRDEC1_RM_SEL_SECCS23_BASE_IDX 1
+#define mmMMEA5_ADDRDEC2_BASE_ADDR_CS0 0x3313
+#define mmMMEA5_ADDRDEC2_BASE_ADDR_CS0_BASE_IDX 1
+#define mmMMEA5_ADDRDEC2_BASE_ADDR_CS1 0x3314
+#define mmMMEA5_ADDRDEC2_BASE_ADDR_CS1_BASE_IDX 1
+#define mmMMEA5_ADDRDEC2_BASE_ADDR_CS2 0x3315
+#define mmMMEA5_ADDRDEC2_BASE_ADDR_CS2_BASE_IDX 1
+#define mmMMEA5_ADDRDEC2_BASE_ADDR_CS3 0x3316
+#define mmMMEA5_ADDRDEC2_BASE_ADDR_CS3_BASE_IDX 1
+#define mmMMEA5_ADDRDEC2_BASE_ADDR_SECCS0 0x3317
+#define mmMMEA5_ADDRDEC2_BASE_ADDR_SECCS0_BASE_IDX 1
+#define mmMMEA5_ADDRDEC2_BASE_ADDR_SECCS1 0x3318
+#define mmMMEA5_ADDRDEC2_BASE_ADDR_SECCS1_BASE_IDX 1
+#define mmMMEA5_ADDRDEC2_BASE_ADDR_SECCS2 0x3319
+#define mmMMEA5_ADDRDEC2_BASE_ADDR_SECCS2_BASE_IDX 1
+#define mmMMEA5_ADDRDEC2_BASE_ADDR_SECCS3 0x331a
+#define mmMMEA5_ADDRDEC2_BASE_ADDR_SECCS3_BASE_IDX 1
+#define mmMMEA5_ADDRDEC2_ADDR_MASK_CS01 0x331b
+#define mmMMEA5_ADDRDEC2_ADDR_MASK_CS01_BASE_IDX 1
+#define mmMMEA5_ADDRDEC2_ADDR_MASK_CS23 0x331c
+#define mmMMEA5_ADDRDEC2_ADDR_MASK_CS23_BASE_IDX 1
+#define mmMMEA5_ADDRDEC2_ADDR_MASK_SECCS01 0x331d
+#define mmMMEA5_ADDRDEC2_ADDR_MASK_SECCS01_BASE_IDX 1
+#define mmMMEA5_ADDRDEC2_ADDR_MASK_SECCS23 0x331e
+#define mmMMEA5_ADDRDEC2_ADDR_MASK_SECCS23_BASE_IDX 1
+#define mmMMEA5_ADDRDEC2_ADDR_CFG_CS01 0x331f
+#define mmMMEA5_ADDRDEC2_ADDR_CFG_CS01_BASE_IDX 1
+#define mmMMEA5_ADDRDEC2_ADDR_CFG_CS23 0x3320
+#define mmMMEA5_ADDRDEC2_ADDR_CFG_CS23_BASE_IDX 1
+#define mmMMEA5_ADDRDEC2_ADDR_SEL_CS01 0x3321
+#define mmMMEA5_ADDRDEC2_ADDR_SEL_CS01_BASE_IDX 1
+#define mmMMEA5_ADDRDEC2_ADDR_SEL_CS23 0x3322
+#define mmMMEA5_ADDRDEC2_ADDR_SEL_CS23_BASE_IDX 1
+#define mmMMEA5_ADDRDEC2_ADDR_SEL2_CS01 0x3323
+#define mmMMEA5_ADDRDEC2_ADDR_SEL2_CS01_BASE_IDX 1
+#define mmMMEA5_ADDRDEC2_ADDR_SEL2_CS23 0x3324
+#define mmMMEA5_ADDRDEC2_ADDR_SEL2_CS23_BASE_IDX 1
+#define mmMMEA5_ADDRDEC2_COL_SEL_LO_CS01 0x3325
+#define mmMMEA5_ADDRDEC2_COL_SEL_LO_CS01_BASE_IDX 1
+#define mmMMEA5_ADDRDEC2_COL_SEL_LO_CS23 0x3326
+#define mmMMEA5_ADDRDEC2_COL_SEL_LO_CS23_BASE_IDX 1
+#define mmMMEA5_ADDRDEC2_COL_SEL_HI_CS01 0x3327
+#define mmMMEA5_ADDRDEC2_COL_SEL_HI_CS01_BASE_IDX 1
+#define mmMMEA5_ADDRDEC2_COL_SEL_HI_CS23 0x3328
+#define mmMMEA5_ADDRDEC2_COL_SEL_HI_CS23_BASE_IDX 1
+#define mmMMEA5_ADDRDEC2_RM_SEL_CS01 0x3329
+#define mmMMEA5_ADDRDEC2_RM_SEL_CS01_BASE_IDX 1
+#define mmMMEA5_ADDRDEC2_RM_SEL_CS23 0x332a
+#define mmMMEA5_ADDRDEC2_RM_SEL_CS23_BASE_IDX 1
+#define mmMMEA5_ADDRDEC2_RM_SEL_SECCS01 0x332b
+#define mmMMEA5_ADDRDEC2_RM_SEL_SECCS01_BASE_IDX 1
+#define mmMMEA5_ADDRDEC2_RM_SEL_SECCS23 0x332c
+#define mmMMEA5_ADDRDEC2_RM_SEL_SECCS23_BASE_IDX 1
+#define mmMMEA5_ADDRNORMDRAM_GLOBAL_CNTL 0x332d
+#define mmMMEA5_ADDRNORMDRAM_GLOBAL_CNTL_BASE_IDX 1
+#define mmMMEA5_ADDRNORMGMI_GLOBAL_CNTL 0x332e
+#define mmMMEA5_ADDRNORMGMI_GLOBAL_CNTL_BASE_IDX 1
+#define mmMMEA5_IO_RD_CLI2GRP_MAP0 0x3355
+#define mmMMEA5_IO_RD_CLI2GRP_MAP0_BASE_IDX 1
+#define mmMMEA5_IO_RD_CLI2GRP_MAP1 0x3356
+#define mmMMEA5_IO_RD_CLI2GRP_MAP1_BASE_IDX 1
+#define mmMMEA5_IO_WR_CLI2GRP_MAP0 0x3357
+#define mmMMEA5_IO_WR_CLI2GRP_MAP0_BASE_IDX 1
+#define mmMMEA5_IO_WR_CLI2GRP_MAP1 0x3358
+#define mmMMEA5_IO_WR_CLI2GRP_MAP1_BASE_IDX 1
+#define mmMMEA5_IO_RD_COMBINE_FLUSH 0x3359
+#define mmMMEA5_IO_RD_COMBINE_FLUSH_BASE_IDX 1
+#define mmMMEA5_IO_WR_COMBINE_FLUSH 0x335a
+#define mmMMEA5_IO_WR_COMBINE_FLUSH_BASE_IDX 1
+#define mmMMEA5_IO_GROUP_BURST 0x335b
+#define mmMMEA5_IO_GROUP_BURST_BASE_IDX 1
+#define mmMMEA5_IO_RD_PRI_AGE 0x335c
+#define mmMMEA5_IO_RD_PRI_AGE_BASE_IDX 1
+#define mmMMEA5_IO_WR_PRI_AGE 0x335d
+#define mmMMEA5_IO_WR_PRI_AGE_BASE_IDX 1
+#define mmMMEA5_IO_RD_PRI_QUEUING 0x335e
+#define mmMMEA5_IO_RD_PRI_QUEUING_BASE_IDX 1
+#define mmMMEA5_IO_WR_PRI_QUEUING 0x335f
+#define mmMMEA5_IO_WR_PRI_QUEUING_BASE_IDX 1
+#define mmMMEA5_IO_RD_PRI_FIXED 0x3360
+#define mmMMEA5_IO_RD_PRI_FIXED_BASE_IDX 1
+#define mmMMEA5_IO_WR_PRI_FIXED 0x3361
+#define mmMMEA5_IO_WR_PRI_FIXED_BASE_IDX 1
+#define mmMMEA5_IO_RD_PRI_URGENCY 0x3362
+#define mmMMEA5_IO_RD_PRI_URGENCY_BASE_IDX 1
+#define mmMMEA5_IO_WR_PRI_URGENCY 0x3363
+#define mmMMEA5_IO_WR_PRI_URGENCY_BASE_IDX 1
+#define mmMMEA5_IO_RD_PRI_URGENCY_MASKING 0x3364
+#define mmMMEA5_IO_RD_PRI_URGENCY_MASKING_BASE_IDX 1
+#define mmMMEA5_IO_WR_PRI_URGENCY_MASKING 0x3365
+#define mmMMEA5_IO_WR_PRI_URGENCY_MASKING_BASE_IDX 1
+#define mmMMEA5_IO_RD_PRI_QUANT_PRI1 0x3366
+#define mmMMEA5_IO_RD_PRI_QUANT_PRI1_BASE_IDX 1
+#define mmMMEA5_IO_RD_PRI_QUANT_PRI2 0x3367
+#define mmMMEA5_IO_RD_PRI_QUANT_PRI2_BASE_IDX 1
+#define mmMMEA5_IO_RD_PRI_QUANT_PRI3 0x3368
+#define mmMMEA5_IO_RD_PRI_QUANT_PRI3_BASE_IDX 1
+#define mmMMEA5_IO_WR_PRI_QUANT_PRI1 0x3369
+#define mmMMEA5_IO_WR_PRI_QUANT_PRI1_BASE_IDX 1
+#define mmMMEA5_IO_WR_PRI_QUANT_PRI2 0x336a
+#define mmMMEA5_IO_WR_PRI_QUANT_PRI2_BASE_IDX 1
+#define mmMMEA5_IO_WR_PRI_QUANT_PRI3 0x336b
+#define mmMMEA5_IO_WR_PRI_QUANT_PRI3_BASE_IDX 1
+#define mmMMEA5_SDP_ARB_DRAM 0x336c
+#define mmMMEA5_SDP_ARB_DRAM_BASE_IDX 1
+#define mmMMEA5_SDP_ARB_GMI 0x336d
+#define mmMMEA5_SDP_ARB_GMI_BASE_IDX 1
+#define mmMMEA5_SDP_ARB_FINAL 0x336e
+#define mmMMEA5_SDP_ARB_FINAL_BASE_IDX 1
+#define mmMMEA5_SDP_DRAM_PRIORITY 0x336f
+#define mmMMEA5_SDP_DRAM_PRIORITY_BASE_IDX 1
+#define mmMMEA5_SDP_GMI_PRIORITY 0x3370
+#define mmMMEA5_SDP_GMI_PRIORITY_BASE_IDX 1
+#define mmMMEA5_SDP_IO_PRIORITY 0x3371
+#define mmMMEA5_SDP_IO_PRIORITY_BASE_IDX 1
+#define mmMMEA5_SDP_CREDITS 0x3372
+#define mmMMEA5_SDP_CREDITS_BASE_IDX 1
+#define mmMMEA5_SDP_TAG_RESERVE0 0x3373
+#define mmMMEA5_SDP_TAG_RESERVE0_BASE_IDX 1
+#define mmMMEA5_SDP_TAG_RESERVE1 0x3374
+#define mmMMEA5_SDP_TAG_RESERVE1_BASE_IDX 1
+#define mmMMEA5_SDP_VCC_RESERVE0 0x3375
+#define mmMMEA5_SDP_VCC_RESERVE0_BASE_IDX 1
+#define mmMMEA5_SDP_VCC_RESERVE1 0x3376
+#define mmMMEA5_SDP_VCC_RESERVE1_BASE_IDX 1
+#define mmMMEA5_SDP_VCD_RESERVE0 0x3377
+#define mmMMEA5_SDP_VCD_RESERVE0_BASE_IDX 1
+#define mmMMEA5_SDP_VCD_RESERVE1 0x3378
+#define mmMMEA5_SDP_VCD_RESERVE1_BASE_IDX 1
+#define mmMMEA5_SDP_REQ_CNTL 0x3379
+#define mmMMEA5_SDP_REQ_CNTL_BASE_IDX 1
+#define mmMMEA5_MISC 0x337a
+#define mmMMEA5_MISC_BASE_IDX 1
+#define mmMMEA5_LATENCY_SAMPLING 0x337b
+#define mmMMEA5_LATENCY_SAMPLING_BASE_IDX 1
+#define mmMMEA5_PERFCOUNTER_LO 0x337c
+#define mmMMEA5_PERFCOUNTER_LO_BASE_IDX 1
+#define mmMMEA5_PERFCOUNTER_HI 0x337d
+#define mmMMEA5_PERFCOUNTER_HI_BASE_IDX 1
+#define mmMMEA5_PERFCOUNTER0_CFG 0x337e
+#define mmMMEA5_PERFCOUNTER0_CFG_BASE_IDX 1
+#define mmMMEA5_PERFCOUNTER1_CFG 0x337f
+#define mmMMEA5_PERFCOUNTER1_CFG_BASE_IDX 1
+#define mmMMEA5_PERFCOUNTER_RSLT_CNTL 0x3380
+#define mmMMEA5_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1
+#define mmMMEA5_EDC_CNT 0x3386
+#define mmMMEA5_EDC_CNT_BASE_IDX 1
+#define mmMMEA5_EDC_CNT2 0x3387
+#define mmMMEA5_EDC_CNT2_BASE_IDX 1
+#define mmMMEA5_DSM_CNTL 0x3388
+#define mmMMEA5_DSM_CNTL_BASE_IDX 1
+#define mmMMEA5_DSM_CNTLA 0x3389
+#define mmMMEA5_DSM_CNTLA_BASE_IDX 1
+#define mmMMEA5_DSM_CNTLB 0x338a
+#define mmMMEA5_DSM_CNTLB_BASE_IDX 1
+#define mmMMEA5_DSM_CNTL2 0x338b
+#define mmMMEA5_DSM_CNTL2_BASE_IDX 1
+#define mmMMEA5_DSM_CNTL2A 0x338c
+#define mmMMEA5_DSM_CNTL2A_BASE_IDX 1
+#define mmMMEA5_DSM_CNTL2B 0x338d
+#define mmMMEA5_DSM_CNTL2B_BASE_IDX 1
+#define mmMMEA5_CGTT_CLK_CTRL 0x338f
+#define mmMMEA5_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmMMEA5_EDC_MODE 0x3390
+#define mmMMEA5_EDC_MODE_BASE_IDX 1
+#define mmMMEA5_ERR_STATUS 0x3391
+#define mmMMEA5_ERR_STATUS_BASE_IDX 1
+#define mmMMEA5_MISC2 0x3392
+#define mmMMEA5_MISC2_BASE_IDX 1
+#define mmMMEA5_ADDRDEC_SELECT 0x3393
+#define mmMMEA5_ADDRDEC_SELECT_BASE_IDX 1
+#define mmMMEA5_EDC_CNT3 0x3394
+#define mmMMEA5_EDC_CNT3_BASE_IDX 1
+
+
+// addressBlock: mmhub_ea_mmeadec6
+// base address: 0x74f00
+#define mmMMEA6_DRAM_RD_CLI2GRP_MAP0 0x33c0
+#define mmMMEA6_DRAM_RD_CLI2GRP_MAP0_BASE_IDX 1
+#define mmMMEA6_DRAM_RD_CLI2GRP_MAP1 0x33c1
+#define mmMMEA6_DRAM_RD_CLI2GRP_MAP1_BASE_IDX 1
+#define mmMMEA6_DRAM_WR_CLI2GRP_MAP0 0x33c2
+#define mmMMEA6_DRAM_WR_CLI2GRP_MAP0_BASE_IDX 1
+#define mmMMEA6_DRAM_WR_CLI2GRP_MAP1 0x33c3
+#define mmMMEA6_DRAM_WR_CLI2GRP_MAP1_BASE_IDX 1
+#define mmMMEA6_DRAM_RD_GRP2VC_MAP 0x33c4
+#define mmMMEA6_DRAM_RD_GRP2VC_MAP_BASE_IDX 1
+#define mmMMEA6_DRAM_WR_GRP2VC_MAP 0x33c5
+#define mmMMEA6_DRAM_WR_GRP2VC_MAP_BASE_IDX 1
+#define mmMMEA6_DRAM_RD_LAZY 0x33c6
+#define mmMMEA6_DRAM_RD_LAZY_BASE_IDX 1
+#define mmMMEA6_DRAM_WR_LAZY 0x33c7
+#define mmMMEA6_DRAM_WR_LAZY_BASE_IDX 1
+#define mmMMEA6_DRAM_RD_CAM_CNTL 0x33c8
+#define mmMMEA6_DRAM_RD_CAM_CNTL_BASE_IDX 1
+#define mmMMEA6_DRAM_WR_CAM_CNTL 0x33c9
+#define mmMMEA6_DRAM_WR_CAM_CNTL_BASE_IDX 1
+#define mmMMEA6_DRAM_PAGE_BURST 0x33ca
+#define mmMMEA6_DRAM_PAGE_BURST_BASE_IDX 1
+#define mmMMEA6_DRAM_RD_PRI_AGE 0x33cb
+#define mmMMEA6_DRAM_RD_PRI_AGE_BASE_IDX 1
+#define mmMMEA6_DRAM_WR_PRI_AGE 0x33cc
+#define mmMMEA6_DRAM_WR_PRI_AGE_BASE_IDX 1
+#define mmMMEA6_DRAM_RD_PRI_QUEUING 0x33cd
+#define mmMMEA6_DRAM_RD_PRI_QUEUING_BASE_IDX 1
+#define mmMMEA6_DRAM_WR_PRI_QUEUING 0x33ce
+#define mmMMEA6_DRAM_WR_PRI_QUEUING_BASE_IDX 1
+#define mmMMEA6_DRAM_RD_PRI_FIXED 0x33cf
+#define mmMMEA6_DRAM_RD_PRI_FIXED_BASE_IDX 1
+#define mmMMEA6_DRAM_WR_PRI_FIXED 0x33d0
+#define mmMMEA6_DRAM_WR_PRI_FIXED_BASE_IDX 1
+#define mmMMEA6_DRAM_RD_PRI_URGENCY 0x33d1
+#define mmMMEA6_DRAM_RD_PRI_URGENCY_BASE_IDX 1
+#define mmMMEA6_DRAM_WR_PRI_URGENCY 0x33d2
+#define mmMMEA6_DRAM_WR_PRI_URGENCY_BASE_IDX 1
+#define mmMMEA6_DRAM_RD_PRI_QUANT_PRI1 0x33d3
+#define mmMMEA6_DRAM_RD_PRI_QUANT_PRI1_BASE_IDX 1
+#define mmMMEA6_DRAM_RD_PRI_QUANT_PRI2 0x33d4
+#define mmMMEA6_DRAM_RD_PRI_QUANT_PRI2_BASE_IDX 1
+#define mmMMEA6_DRAM_RD_PRI_QUANT_PRI3 0x33d5
+#define mmMMEA6_DRAM_RD_PRI_QUANT_PRI3_BASE_IDX 1
+#define mmMMEA6_DRAM_WR_PRI_QUANT_PRI1 0x33d6
+#define mmMMEA6_DRAM_WR_PRI_QUANT_PRI1_BASE_IDX 1
+#define mmMMEA6_DRAM_WR_PRI_QUANT_PRI2 0x33d7
+#define mmMMEA6_DRAM_WR_PRI_QUANT_PRI2_BASE_IDX 1
+#define mmMMEA6_DRAM_WR_PRI_QUANT_PRI3 0x33d8
+#define mmMMEA6_DRAM_WR_PRI_QUANT_PRI3_BASE_IDX 1
+#define mmMMEA6_GMI_RD_CLI2GRP_MAP0 0x33d9
+#define mmMMEA6_GMI_RD_CLI2GRP_MAP0_BASE_IDX 1
+#define mmMMEA6_GMI_RD_CLI2GRP_MAP1 0x33da
+#define mmMMEA6_GMI_RD_CLI2GRP_MAP1_BASE_IDX 1
+#define mmMMEA6_GMI_WR_CLI2GRP_MAP0 0x33db
+#define mmMMEA6_GMI_WR_CLI2GRP_MAP0_BASE_IDX 1
+#define mmMMEA6_GMI_WR_CLI2GRP_MAP1 0x33dc
+#define mmMMEA6_GMI_WR_CLI2GRP_MAP1_BASE_IDX 1
+#define mmMMEA6_GMI_RD_GRP2VC_MAP 0x33dd
+#define mmMMEA6_GMI_RD_GRP2VC_MAP_BASE_IDX 1
+#define mmMMEA6_GMI_WR_GRP2VC_MAP 0x33de
+#define mmMMEA6_GMI_WR_GRP2VC_MAP_BASE_IDX 1
+#define mmMMEA6_GMI_RD_LAZY 0x33df
+#define mmMMEA6_GMI_RD_LAZY_BASE_IDX 1
+#define mmMMEA6_GMI_WR_LAZY 0x33e0
+#define mmMMEA6_GMI_WR_LAZY_BASE_IDX 1
+#define mmMMEA6_GMI_RD_CAM_CNTL 0x33e1
+#define mmMMEA6_GMI_RD_CAM_CNTL_BASE_IDX 1
+#define mmMMEA6_GMI_WR_CAM_CNTL 0x33e2
+#define mmMMEA6_GMI_WR_CAM_CNTL_BASE_IDX 1
+#define mmMMEA6_GMI_PAGE_BURST 0x33e3
+#define mmMMEA6_GMI_PAGE_BURST_BASE_IDX 1
+#define mmMMEA6_GMI_RD_PRI_AGE 0x33e4
+#define mmMMEA6_GMI_RD_PRI_AGE_BASE_IDX 1
+#define mmMMEA6_GMI_WR_PRI_AGE 0x33e5
+#define mmMMEA6_GMI_WR_PRI_AGE_BASE_IDX 1
+#define mmMMEA6_GMI_RD_PRI_QUEUING 0x33e6
+#define mmMMEA6_GMI_RD_PRI_QUEUING_BASE_IDX 1
+#define mmMMEA6_GMI_WR_PRI_QUEUING 0x33e7
+#define mmMMEA6_GMI_WR_PRI_QUEUING_BASE_IDX 1
+#define mmMMEA6_GMI_RD_PRI_FIXED 0x33e8
+#define mmMMEA6_GMI_RD_PRI_FIXED_BASE_IDX 1
+#define mmMMEA6_GMI_WR_PRI_FIXED 0x33e9
+#define mmMMEA6_GMI_WR_PRI_FIXED_BASE_IDX 1
+#define mmMMEA6_GMI_RD_PRI_URGENCY 0x33ea
+#define mmMMEA6_GMI_RD_PRI_URGENCY_BASE_IDX 1
+#define mmMMEA6_GMI_WR_PRI_URGENCY 0x33eb
+#define mmMMEA6_GMI_WR_PRI_URGENCY_BASE_IDX 1
+#define mmMMEA6_GMI_RD_PRI_URGENCY_MASKING 0x33ec
+#define mmMMEA6_GMI_RD_PRI_URGENCY_MASKING_BASE_IDX 1
+#define mmMMEA6_GMI_WR_PRI_URGENCY_MASKING 0x33ed
+#define mmMMEA6_GMI_WR_PRI_URGENCY_MASKING_BASE_IDX 1
+#define mmMMEA6_GMI_RD_PRI_QUANT_PRI1 0x33ee
+#define mmMMEA6_GMI_RD_PRI_QUANT_PRI1_BASE_IDX 1
+#define mmMMEA6_GMI_RD_PRI_QUANT_PRI2 0x33ef
+#define mmMMEA6_GMI_RD_PRI_QUANT_PRI2_BASE_IDX 1
+#define mmMMEA6_GMI_RD_PRI_QUANT_PRI3 0x33f0
+#define mmMMEA6_GMI_RD_PRI_QUANT_PRI3_BASE_IDX 1
+#define mmMMEA6_GMI_WR_PRI_QUANT_PRI1 0x33f1
+#define mmMMEA6_GMI_WR_PRI_QUANT_PRI1_BASE_IDX 1
+#define mmMMEA6_GMI_WR_PRI_QUANT_PRI2 0x33f2
+#define mmMMEA6_GMI_WR_PRI_QUANT_PRI2_BASE_IDX 1
+#define mmMMEA6_GMI_WR_PRI_QUANT_PRI3 0x33f3
+#define mmMMEA6_GMI_WR_PRI_QUANT_PRI3_BASE_IDX 1
+#define mmMMEA6_ADDRNORM_BASE_ADDR0 0x33f4
+#define mmMMEA6_ADDRNORM_BASE_ADDR0_BASE_IDX 1
+#define mmMMEA6_ADDRNORM_LIMIT_ADDR0 0x33f5
+#define mmMMEA6_ADDRNORM_LIMIT_ADDR0_BASE_IDX 1
+#define mmMMEA6_ADDRNORM_BASE_ADDR1 0x33f6
+#define mmMMEA6_ADDRNORM_BASE_ADDR1_BASE_IDX 1
+#define mmMMEA6_ADDRNORM_LIMIT_ADDR1 0x33f7
+#define mmMMEA6_ADDRNORM_LIMIT_ADDR1_BASE_IDX 1
+#define mmMMEA6_ADDRNORM_OFFSET_ADDR1 0x33f8
+#define mmMMEA6_ADDRNORM_OFFSET_ADDR1_BASE_IDX 1
+#define mmMMEA6_ADDRNORM_BASE_ADDR2 0x33f9
+#define mmMMEA6_ADDRNORM_BASE_ADDR2_BASE_IDX 1
+#define mmMMEA6_ADDRNORM_LIMIT_ADDR2 0x33fa
+#define mmMMEA6_ADDRNORM_LIMIT_ADDR2_BASE_IDX 1
+#define mmMMEA6_ADDRNORM_BASE_ADDR3 0x33fb
+#define mmMMEA6_ADDRNORM_BASE_ADDR3_BASE_IDX 1
+#define mmMMEA6_ADDRNORM_LIMIT_ADDR3 0x33fc
+#define mmMMEA6_ADDRNORM_LIMIT_ADDR3_BASE_IDX 1
+#define mmMMEA6_ADDRNORM_OFFSET_ADDR3 0x33fd
+#define mmMMEA6_ADDRNORM_OFFSET_ADDR3_BASE_IDX 1
+#define mmMMEA6_ADDRNORM_BASE_ADDR4 0x33fe
+#define mmMMEA6_ADDRNORM_BASE_ADDR4_BASE_IDX 1
+#define mmMMEA6_ADDRNORM_LIMIT_ADDR4 0x33ff
+#define mmMMEA6_ADDRNORM_LIMIT_ADDR4_BASE_IDX 1
+#define mmMMEA6_ADDRNORM_BASE_ADDR5 0x3400
+#define mmMMEA6_ADDRNORM_BASE_ADDR5_BASE_IDX 1
+#define mmMMEA6_ADDRNORM_LIMIT_ADDR5 0x3401
+#define mmMMEA6_ADDRNORM_LIMIT_ADDR5_BASE_IDX 1
+#define mmMMEA6_ADDRNORM_OFFSET_ADDR5 0x3402
+#define mmMMEA6_ADDRNORM_OFFSET_ADDR5_BASE_IDX 1
+#define mmMMEA6_ADDRNORMDRAM_HOLE_CNTL 0x3403
+#define mmMMEA6_ADDRNORMDRAM_HOLE_CNTL_BASE_IDX 1
+#define mmMMEA6_ADDRNORMGMI_HOLE_CNTL 0x3404
+#define mmMMEA6_ADDRNORMGMI_HOLE_CNTL_BASE_IDX 1
+#define mmMMEA6_ADDRNORMDRAM_NP2_CHANNEL_CFG 0x3405
+#define mmMMEA6_ADDRNORMDRAM_NP2_CHANNEL_CFG_BASE_IDX 1
+#define mmMMEA6_ADDRNORMGMI_NP2_CHANNEL_CFG 0x3406
+#define mmMMEA6_ADDRNORMGMI_NP2_CHANNEL_CFG_BASE_IDX 1
+#define mmMMEA6_ADDRDEC_BANK_CFG 0x3407
+#define mmMMEA6_ADDRDEC_BANK_CFG_BASE_IDX 1
+#define mmMMEA6_ADDRDEC_MISC_CFG 0x3408
+#define mmMMEA6_ADDRDEC_MISC_CFG_BASE_IDX 1
+#define mmMMEA6_ADDRDECDRAM_ADDR_HASH_BANK0 0x3409
+#define mmMMEA6_ADDRDECDRAM_ADDR_HASH_BANK0_BASE_IDX 1
+#define mmMMEA6_ADDRDECDRAM_ADDR_HASH_BANK1 0x340a
+#define mmMMEA6_ADDRDECDRAM_ADDR_HASH_BANK1_BASE_IDX 1
+#define mmMMEA6_ADDRDECDRAM_ADDR_HASH_BANK2 0x340b
+#define mmMMEA6_ADDRDECDRAM_ADDR_HASH_BANK2_BASE_IDX 1
+#define mmMMEA6_ADDRDECDRAM_ADDR_HASH_BANK3 0x340c
+#define mmMMEA6_ADDRDECDRAM_ADDR_HASH_BANK3_BASE_IDX 1
+#define mmMMEA6_ADDRDECDRAM_ADDR_HASH_BANK4 0x340d
+#define mmMMEA6_ADDRDECDRAM_ADDR_HASH_BANK4_BASE_IDX 1
+#define mmMMEA6_ADDRDECDRAM_ADDR_HASH_BANK5 0x340e
+#define mmMMEA6_ADDRDECDRAM_ADDR_HASH_BANK5_BASE_IDX 1
+#define mmMMEA6_ADDRDECDRAM_ADDR_HASH_PC 0x340f
+#define mmMMEA6_ADDRDECDRAM_ADDR_HASH_PC_BASE_IDX 1
+#define mmMMEA6_ADDRDECDRAM_ADDR_HASH_PC2 0x3410
+#define mmMMEA6_ADDRDECDRAM_ADDR_HASH_PC2_BASE_IDX 1
+#define mmMMEA6_ADDRDECDRAM_ADDR_HASH_CS0 0x3411
+#define mmMMEA6_ADDRDECDRAM_ADDR_HASH_CS0_BASE_IDX 1
+#define mmMMEA6_ADDRDECDRAM_ADDR_HASH_CS1 0x3412
+#define mmMMEA6_ADDRDECDRAM_ADDR_HASH_CS1_BASE_IDX 1
+#define mmMMEA6_ADDRDECDRAM_HARVEST_ENABLE 0x3413
+#define mmMMEA6_ADDRDECDRAM_HARVEST_ENABLE_BASE_IDX 1
+#define mmMMEA6_ADDRDECGMI_ADDR_HASH_BANK0 0x3414
+#define mmMMEA6_ADDRDECGMI_ADDR_HASH_BANK0_BASE_IDX 1
+#define mmMMEA6_ADDRDECGMI_ADDR_HASH_BANK1 0x3415
+#define mmMMEA6_ADDRDECGMI_ADDR_HASH_BANK1_BASE_IDX 1
+#define mmMMEA6_ADDRDECGMI_ADDR_HASH_BANK2 0x3416
+#define mmMMEA6_ADDRDECGMI_ADDR_HASH_BANK2_BASE_IDX 1
+#define mmMMEA6_ADDRDECGMI_ADDR_HASH_BANK3 0x3417
+#define mmMMEA6_ADDRDECGMI_ADDR_HASH_BANK3_BASE_IDX 1
+#define mmMMEA6_ADDRDECGMI_ADDR_HASH_BANK4 0x3418
+#define mmMMEA6_ADDRDECGMI_ADDR_HASH_BANK4_BASE_IDX 1
+#define mmMMEA6_ADDRDECGMI_ADDR_HASH_BANK5 0x3419
+#define mmMMEA6_ADDRDECGMI_ADDR_HASH_BANK5_BASE_IDX 1
+#define mmMMEA6_ADDRDECGMI_ADDR_HASH_PC 0x341a
+#define mmMMEA6_ADDRDECGMI_ADDR_HASH_PC_BASE_IDX 1
+#define mmMMEA6_ADDRDECGMI_ADDR_HASH_PC2 0x341b
+#define mmMMEA6_ADDRDECGMI_ADDR_HASH_PC2_BASE_IDX 1
+#define mmMMEA6_ADDRDECGMI_ADDR_HASH_CS0 0x341c
+#define mmMMEA6_ADDRDECGMI_ADDR_HASH_CS0_BASE_IDX 1
+#define mmMMEA6_ADDRDECGMI_ADDR_HASH_CS1 0x341d
+#define mmMMEA6_ADDRDECGMI_ADDR_HASH_CS1_BASE_IDX 1
+#define mmMMEA6_ADDRDECGMI_HARVEST_ENABLE 0x341e
+#define mmMMEA6_ADDRDECGMI_HARVEST_ENABLE_BASE_IDX 1
+#define mmMMEA6_ADDRDEC0_BASE_ADDR_CS0 0x341f
+#define mmMMEA6_ADDRDEC0_BASE_ADDR_CS0_BASE_IDX 1
+#define mmMMEA6_ADDRDEC0_BASE_ADDR_CS1 0x3420
+#define mmMMEA6_ADDRDEC0_BASE_ADDR_CS1_BASE_IDX 1
+#define mmMMEA6_ADDRDEC0_BASE_ADDR_CS2 0x3421
+#define mmMMEA6_ADDRDEC0_BASE_ADDR_CS2_BASE_IDX 1
+#define mmMMEA6_ADDRDEC0_BASE_ADDR_CS3 0x3422
+#define mmMMEA6_ADDRDEC0_BASE_ADDR_CS3_BASE_IDX 1
+#define mmMMEA6_ADDRDEC0_BASE_ADDR_SECCS0 0x3423
+#define mmMMEA6_ADDRDEC0_BASE_ADDR_SECCS0_BASE_IDX 1
+#define mmMMEA6_ADDRDEC0_BASE_ADDR_SECCS1 0x3424
+#define mmMMEA6_ADDRDEC0_BASE_ADDR_SECCS1_BASE_IDX 1
+#define mmMMEA6_ADDRDEC0_BASE_ADDR_SECCS2 0x3425
+#define mmMMEA6_ADDRDEC0_BASE_ADDR_SECCS2_BASE_IDX 1
+#define mmMMEA6_ADDRDEC0_BASE_ADDR_SECCS3 0x3426
+#define mmMMEA6_ADDRDEC0_BASE_ADDR_SECCS3_BASE_IDX 1
+#define mmMMEA6_ADDRDEC0_ADDR_MASK_CS01 0x3427
+#define mmMMEA6_ADDRDEC0_ADDR_MASK_CS01_BASE_IDX 1
+#define mmMMEA6_ADDRDEC0_ADDR_MASK_CS23 0x3428
+#define mmMMEA6_ADDRDEC0_ADDR_MASK_CS23_BASE_IDX 1
+#define mmMMEA6_ADDRDEC0_ADDR_MASK_SECCS01 0x3429
+#define mmMMEA6_ADDRDEC0_ADDR_MASK_SECCS01_BASE_IDX 1
+#define mmMMEA6_ADDRDEC0_ADDR_MASK_SECCS23 0x342a
+#define mmMMEA6_ADDRDEC0_ADDR_MASK_SECCS23_BASE_IDX 1
+#define mmMMEA6_ADDRDEC0_ADDR_CFG_CS01 0x342b
+#define mmMMEA6_ADDRDEC0_ADDR_CFG_CS01_BASE_IDX 1
+#define mmMMEA6_ADDRDEC0_ADDR_CFG_CS23 0x342c
+#define mmMMEA6_ADDRDEC0_ADDR_CFG_CS23_BASE_IDX 1
+#define mmMMEA6_ADDRDEC0_ADDR_SEL_CS01 0x342d
+#define mmMMEA6_ADDRDEC0_ADDR_SEL_CS01_BASE_IDX 1
+#define mmMMEA6_ADDRDEC0_ADDR_SEL_CS23 0x342e
+#define mmMMEA6_ADDRDEC0_ADDR_SEL_CS23_BASE_IDX 1
+#define mmMMEA6_ADDRDEC0_ADDR_SEL2_CS01 0x342f
+#define mmMMEA6_ADDRDEC0_ADDR_SEL2_CS01_BASE_IDX 1
+#define mmMMEA6_ADDRDEC0_ADDR_SEL2_CS23 0x3430
+#define mmMMEA6_ADDRDEC0_ADDR_SEL2_CS23_BASE_IDX 1
+#define mmMMEA6_ADDRDEC0_COL_SEL_LO_CS01 0x3431
+#define mmMMEA6_ADDRDEC0_COL_SEL_LO_CS01_BASE_IDX 1
+#define mmMMEA6_ADDRDEC0_COL_SEL_LO_CS23 0x3432
+#define mmMMEA6_ADDRDEC0_COL_SEL_LO_CS23_BASE_IDX 1
+#define mmMMEA6_ADDRDEC0_COL_SEL_HI_CS01 0x3433
+#define mmMMEA6_ADDRDEC0_COL_SEL_HI_CS01_BASE_IDX 1
+#define mmMMEA6_ADDRDEC0_COL_SEL_HI_CS23 0x3434
+#define mmMMEA6_ADDRDEC0_COL_SEL_HI_CS23_BASE_IDX 1
+#define mmMMEA6_ADDRDEC0_RM_SEL_CS01 0x3435
+#define mmMMEA6_ADDRDEC0_RM_SEL_CS01_BASE_IDX 1
+#define mmMMEA6_ADDRDEC0_RM_SEL_CS23 0x3436
+#define mmMMEA6_ADDRDEC0_RM_SEL_CS23_BASE_IDX 1
+#define mmMMEA6_ADDRDEC0_RM_SEL_SECCS01 0x3437
+#define mmMMEA6_ADDRDEC0_RM_SEL_SECCS01_BASE_IDX 1
+#define mmMMEA6_ADDRDEC0_RM_SEL_SECCS23 0x3438
+#define mmMMEA6_ADDRDEC0_RM_SEL_SECCS23_BASE_IDX 1
+#define mmMMEA6_ADDRDEC1_BASE_ADDR_CS0 0x3439
+#define mmMMEA6_ADDRDEC1_BASE_ADDR_CS0_BASE_IDX 1
+#define mmMMEA6_ADDRDEC1_BASE_ADDR_CS1 0x343a
+#define mmMMEA6_ADDRDEC1_BASE_ADDR_CS1_BASE_IDX 1
+#define mmMMEA6_ADDRDEC1_BASE_ADDR_CS2 0x343b
+#define mmMMEA6_ADDRDEC1_BASE_ADDR_CS2_BASE_IDX 1
+#define mmMMEA6_ADDRDEC1_BASE_ADDR_CS3 0x343c
+#define mmMMEA6_ADDRDEC1_BASE_ADDR_CS3_BASE_IDX 1
+#define mmMMEA6_ADDRDEC1_BASE_ADDR_SECCS0 0x343d
+#define mmMMEA6_ADDRDEC1_BASE_ADDR_SECCS0_BASE_IDX 1
+#define mmMMEA6_ADDRDEC1_BASE_ADDR_SECCS1 0x343e
+#define mmMMEA6_ADDRDEC1_BASE_ADDR_SECCS1_BASE_IDX 1
+#define mmMMEA6_ADDRDEC1_BASE_ADDR_SECCS2 0x343f
+#define mmMMEA6_ADDRDEC1_BASE_ADDR_SECCS2_BASE_IDX 1
+#define mmMMEA6_ADDRDEC1_BASE_ADDR_SECCS3 0x3440
+#define mmMMEA6_ADDRDEC1_BASE_ADDR_SECCS3_BASE_IDX 1
+#define mmMMEA6_ADDRDEC1_ADDR_MASK_CS01 0x3441
+#define mmMMEA6_ADDRDEC1_ADDR_MASK_CS01_BASE_IDX 1
+#define mmMMEA6_ADDRDEC1_ADDR_MASK_CS23 0x3442
+#define mmMMEA6_ADDRDEC1_ADDR_MASK_CS23_BASE_IDX 1
+#define mmMMEA6_ADDRDEC1_ADDR_MASK_SECCS01 0x3443
+#define mmMMEA6_ADDRDEC1_ADDR_MASK_SECCS01_BASE_IDX 1
+#define mmMMEA6_ADDRDEC1_ADDR_MASK_SECCS23 0x3444
+#define mmMMEA6_ADDRDEC1_ADDR_MASK_SECCS23_BASE_IDX 1
+#define mmMMEA6_ADDRDEC1_ADDR_CFG_CS01 0x3445
+#define mmMMEA6_ADDRDEC1_ADDR_CFG_CS01_BASE_IDX 1
+#define mmMMEA6_ADDRDEC1_ADDR_CFG_CS23 0x3446
+#define mmMMEA6_ADDRDEC1_ADDR_CFG_CS23_BASE_IDX 1
+#define mmMMEA6_ADDRDEC1_ADDR_SEL_CS01 0x3447
+#define mmMMEA6_ADDRDEC1_ADDR_SEL_CS01_BASE_IDX 1
+#define mmMMEA6_ADDRDEC1_ADDR_SEL_CS23 0x3448
+#define mmMMEA6_ADDRDEC1_ADDR_SEL_CS23_BASE_IDX 1
+#define mmMMEA6_ADDRDEC1_ADDR_SEL2_CS01 0x3449
+#define mmMMEA6_ADDRDEC1_ADDR_SEL2_CS01_BASE_IDX 1
+#define mmMMEA6_ADDRDEC1_ADDR_SEL2_CS23 0x344a
+#define mmMMEA6_ADDRDEC1_ADDR_SEL2_CS23_BASE_IDX 1
+#define mmMMEA6_ADDRDEC1_COL_SEL_LO_CS01 0x344b
+#define mmMMEA6_ADDRDEC1_COL_SEL_LO_CS01_BASE_IDX 1
+#define mmMMEA6_ADDRDEC1_COL_SEL_LO_CS23 0x344c
+#define mmMMEA6_ADDRDEC1_COL_SEL_LO_CS23_BASE_IDX 1
+#define mmMMEA6_ADDRDEC1_COL_SEL_HI_CS01 0x344d
+#define mmMMEA6_ADDRDEC1_COL_SEL_HI_CS01_BASE_IDX 1
+#define mmMMEA6_ADDRDEC1_COL_SEL_HI_CS23 0x344e
+#define mmMMEA6_ADDRDEC1_COL_SEL_HI_CS23_BASE_IDX 1
+#define mmMMEA6_ADDRDEC1_RM_SEL_CS01 0x344f
+#define mmMMEA6_ADDRDEC1_RM_SEL_CS01_BASE_IDX 1
+#define mmMMEA6_ADDRDEC1_RM_SEL_CS23 0x3450
+#define mmMMEA6_ADDRDEC1_RM_SEL_CS23_BASE_IDX 1
+#define mmMMEA6_ADDRDEC1_RM_SEL_SECCS01 0x3451
+#define mmMMEA6_ADDRDEC1_RM_SEL_SECCS01_BASE_IDX 1
+#define mmMMEA6_ADDRDEC1_RM_SEL_SECCS23 0x3452
+#define mmMMEA6_ADDRDEC1_RM_SEL_SECCS23_BASE_IDX 1
+#define mmMMEA6_ADDRDEC2_BASE_ADDR_CS0 0x3453
+#define mmMMEA6_ADDRDEC2_BASE_ADDR_CS0_BASE_IDX 1
+#define mmMMEA6_ADDRDEC2_BASE_ADDR_CS1 0x3454
+#define mmMMEA6_ADDRDEC2_BASE_ADDR_CS1_BASE_IDX 1
+#define mmMMEA6_ADDRDEC2_BASE_ADDR_CS2 0x3455
+#define mmMMEA6_ADDRDEC2_BASE_ADDR_CS2_BASE_IDX 1
+#define mmMMEA6_ADDRDEC2_BASE_ADDR_CS3 0x3456
+#define mmMMEA6_ADDRDEC2_BASE_ADDR_CS3_BASE_IDX 1
+#define mmMMEA6_ADDRDEC2_BASE_ADDR_SECCS0 0x3457
+#define mmMMEA6_ADDRDEC2_BASE_ADDR_SECCS0_BASE_IDX 1
+#define mmMMEA6_ADDRDEC2_BASE_ADDR_SECCS1 0x3458
+#define mmMMEA6_ADDRDEC2_BASE_ADDR_SECCS1_BASE_IDX 1
+#define mmMMEA6_ADDRDEC2_BASE_ADDR_SECCS2 0x3459
+#define mmMMEA6_ADDRDEC2_BASE_ADDR_SECCS2_BASE_IDX 1
+#define mmMMEA6_ADDRDEC2_BASE_ADDR_SECCS3 0x345a
+#define mmMMEA6_ADDRDEC2_BASE_ADDR_SECCS3_BASE_IDX 1
+#define mmMMEA6_ADDRDEC2_ADDR_MASK_CS01 0x345b
+#define mmMMEA6_ADDRDEC2_ADDR_MASK_CS01_BASE_IDX 1
+#define mmMMEA6_ADDRDEC2_ADDR_MASK_CS23 0x345c
+#define mmMMEA6_ADDRDEC2_ADDR_MASK_CS23_BASE_IDX 1
+#define mmMMEA6_ADDRDEC2_ADDR_MASK_SECCS01 0x345d
+#define mmMMEA6_ADDRDEC2_ADDR_MASK_SECCS01_BASE_IDX 1
+#define mmMMEA6_ADDRDEC2_ADDR_MASK_SECCS23 0x345e
+#define mmMMEA6_ADDRDEC2_ADDR_MASK_SECCS23_BASE_IDX 1
+#define mmMMEA6_ADDRDEC2_ADDR_CFG_CS01 0x345f
+#define mmMMEA6_ADDRDEC2_ADDR_CFG_CS01_BASE_IDX 1
+#define mmMMEA6_ADDRDEC2_ADDR_CFG_CS23 0x3460
+#define mmMMEA6_ADDRDEC2_ADDR_CFG_CS23_BASE_IDX 1
+#define mmMMEA6_ADDRDEC2_ADDR_SEL_CS01 0x3461
+#define mmMMEA6_ADDRDEC2_ADDR_SEL_CS01_BASE_IDX 1
+#define mmMMEA6_ADDRDEC2_ADDR_SEL_CS23 0x3462
+#define mmMMEA6_ADDRDEC2_ADDR_SEL_CS23_BASE_IDX 1
+#define mmMMEA6_ADDRDEC2_ADDR_SEL2_CS01 0x3463
+#define mmMMEA6_ADDRDEC2_ADDR_SEL2_CS01_BASE_IDX 1
+#define mmMMEA6_ADDRDEC2_ADDR_SEL2_CS23 0x3464
+#define mmMMEA6_ADDRDEC2_ADDR_SEL2_CS23_BASE_IDX 1
+#define mmMMEA6_ADDRDEC2_COL_SEL_LO_CS01 0x3465
+#define mmMMEA6_ADDRDEC2_COL_SEL_LO_CS01_BASE_IDX 1
+#define mmMMEA6_ADDRDEC2_COL_SEL_LO_CS23 0x3466
+#define mmMMEA6_ADDRDEC2_COL_SEL_LO_CS23_BASE_IDX 1
+#define mmMMEA6_ADDRDEC2_COL_SEL_HI_CS01 0x3467
+#define mmMMEA6_ADDRDEC2_COL_SEL_HI_CS01_BASE_IDX 1
+#define mmMMEA6_ADDRDEC2_COL_SEL_HI_CS23 0x3468
+#define mmMMEA6_ADDRDEC2_COL_SEL_HI_CS23_BASE_IDX 1
+#define mmMMEA6_ADDRDEC2_RM_SEL_CS01 0x3469
+#define mmMMEA6_ADDRDEC2_RM_SEL_CS01_BASE_IDX 1
+#define mmMMEA6_ADDRDEC2_RM_SEL_CS23 0x346a
+#define mmMMEA6_ADDRDEC2_RM_SEL_CS23_BASE_IDX 1
+#define mmMMEA6_ADDRDEC2_RM_SEL_SECCS01 0x346b
+#define mmMMEA6_ADDRDEC2_RM_SEL_SECCS01_BASE_IDX 1
+#define mmMMEA6_ADDRDEC2_RM_SEL_SECCS23 0x346c
+#define mmMMEA6_ADDRDEC2_RM_SEL_SECCS23_BASE_IDX 1
+#define mmMMEA6_ADDRNORMDRAM_GLOBAL_CNTL 0x346d
+#define mmMMEA6_ADDRNORMDRAM_GLOBAL_CNTL_BASE_IDX 1
+#define mmMMEA6_ADDRNORMGMI_GLOBAL_CNTL 0x346e
+#define mmMMEA6_ADDRNORMGMI_GLOBAL_CNTL_BASE_IDX 1
+#define mmMMEA6_IO_RD_CLI2GRP_MAP0 0x3495
+#define mmMMEA6_IO_RD_CLI2GRP_MAP0_BASE_IDX 1
+#define mmMMEA6_IO_RD_CLI2GRP_MAP1 0x3496
+#define mmMMEA6_IO_RD_CLI2GRP_MAP1_BASE_IDX 1
+#define mmMMEA6_IO_WR_CLI2GRP_MAP0 0x3497
+#define mmMMEA6_IO_WR_CLI2GRP_MAP0_BASE_IDX 1
+#define mmMMEA6_IO_WR_CLI2GRP_MAP1 0x3498
+#define mmMMEA6_IO_WR_CLI2GRP_MAP1_BASE_IDX 1
+#define mmMMEA6_IO_RD_COMBINE_FLUSH 0x3499
+#define mmMMEA6_IO_RD_COMBINE_FLUSH_BASE_IDX 1
+#define mmMMEA6_IO_WR_COMBINE_FLUSH 0x349a
+#define mmMMEA6_IO_WR_COMBINE_FLUSH_BASE_IDX 1
+#define mmMMEA6_IO_GROUP_BURST 0x349b
+#define mmMMEA6_IO_GROUP_BURST_BASE_IDX 1
+#define mmMMEA6_IO_RD_PRI_AGE 0x349c
+#define mmMMEA6_IO_RD_PRI_AGE_BASE_IDX 1
+#define mmMMEA6_IO_WR_PRI_AGE 0x349d
+#define mmMMEA6_IO_WR_PRI_AGE_BASE_IDX 1
+#define mmMMEA6_IO_RD_PRI_QUEUING 0x349e
+#define mmMMEA6_IO_RD_PRI_QUEUING_BASE_IDX 1
+#define mmMMEA6_IO_WR_PRI_QUEUING 0x349f
+#define mmMMEA6_IO_WR_PRI_QUEUING_BASE_IDX 1
+#define mmMMEA6_IO_RD_PRI_FIXED 0x34a0
+#define mmMMEA6_IO_RD_PRI_FIXED_BASE_IDX 1
+#define mmMMEA6_IO_WR_PRI_FIXED 0x34a1
+#define mmMMEA6_IO_WR_PRI_FIXED_BASE_IDX 1
+#define mmMMEA6_IO_RD_PRI_URGENCY 0x34a2
+#define mmMMEA6_IO_RD_PRI_URGENCY_BASE_IDX 1
+#define mmMMEA6_IO_WR_PRI_URGENCY 0x34a3
+#define mmMMEA6_IO_WR_PRI_URGENCY_BASE_IDX 1
+#define mmMMEA6_IO_RD_PRI_URGENCY_MASKING 0x34a4
+#define mmMMEA6_IO_RD_PRI_URGENCY_MASKING_BASE_IDX 1
+#define mmMMEA6_IO_WR_PRI_URGENCY_MASKING 0x34a5
+#define mmMMEA6_IO_WR_PRI_URGENCY_MASKING_BASE_IDX 1
+#define mmMMEA6_IO_RD_PRI_QUANT_PRI1 0x34a6
+#define mmMMEA6_IO_RD_PRI_QUANT_PRI1_BASE_IDX 1
+#define mmMMEA6_IO_RD_PRI_QUANT_PRI2 0x34a7
+#define mmMMEA6_IO_RD_PRI_QUANT_PRI2_BASE_IDX 1
+#define mmMMEA6_IO_RD_PRI_QUANT_PRI3 0x34a8
+#define mmMMEA6_IO_RD_PRI_QUANT_PRI3_BASE_IDX 1
+#define mmMMEA6_IO_WR_PRI_QUANT_PRI1 0x34a9
+#define mmMMEA6_IO_WR_PRI_QUANT_PRI1_BASE_IDX 1
+#define mmMMEA6_IO_WR_PRI_QUANT_PRI2 0x34aa
+#define mmMMEA6_IO_WR_PRI_QUANT_PRI2_BASE_IDX 1
+#define mmMMEA6_IO_WR_PRI_QUANT_PRI3 0x34ab
+#define mmMMEA6_IO_WR_PRI_QUANT_PRI3_BASE_IDX 1
+#define mmMMEA6_SDP_ARB_DRAM 0x34ac
+#define mmMMEA6_SDP_ARB_DRAM_BASE_IDX 1
+#define mmMMEA6_SDP_ARB_GMI 0x34ad
+#define mmMMEA6_SDP_ARB_GMI_BASE_IDX 1
+#define mmMMEA6_SDP_ARB_FINAL 0x34ae
+#define mmMMEA6_SDP_ARB_FINAL_BASE_IDX 1
+#define mmMMEA6_SDP_DRAM_PRIORITY 0x34af
+#define mmMMEA6_SDP_DRAM_PRIORITY_BASE_IDX 1
+#define mmMMEA6_SDP_GMI_PRIORITY 0x34b0
+#define mmMMEA6_SDP_GMI_PRIORITY_BASE_IDX 1
+#define mmMMEA6_SDP_IO_PRIORITY 0x34b1
+#define mmMMEA6_SDP_IO_PRIORITY_BASE_IDX 1
+#define mmMMEA6_SDP_CREDITS 0x34b2
+#define mmMMEA6_SDP_CREDITS_BASE_IDX 1
+#define mmMMEA6_SDP_TAG_RESERVE0 0x34b3
+#define mmMMEA6_SDP_TAG_RESERVE0_BASE_IDX 1
+#define mmMMEA6_SDP_TAG_RESERVE1 0x34b4
+#define mmMMEA6_SDP_TAG_RESERVE1_BASE_IDX 1
+#define mmMMEA6_SDP_VCC_RESERVE0 0x34b5
+#define mmMMEA6_SDP_VCC_RESERVE0_BASE_IDX 1
+#define mmMMEA6_SDP_VCC_RESERVE1 0x34b6
+#define mmMMEA6_SDP_VCC_RESERVE1_BASE_IDX 1
+#define mmMMEA6_SDP_VCD_RESERVE0 0x34b7
+#define mmMMEA6_SDP_VCD_RESERVE0_BASE_IDX 1
+#define mmMMEA6_SDP_VCD_RESERVE1 0x34b8
+#define mmMMEA6_SDP_VCD_RESERVE1_BASE_IDX 1
+#define mmMMEA6_SDP_REQ_CNTL 0x34b9
+#define mmMMEA6_SDP_REQ_CNTL_BASE_IDX 1
+#define mmMMEA6_MISC 0x34ba
+#define mmMMEA6_MISC_BASE_IDX 1
+#define mmMMEA6_LATENCY_SAMPLING 0x34bb
+#define mmMMEA6_LATENCY_SAMPLING_BASE_IDX 1
+#define mmMMEA6_PERFCOUNTER_LO 0x34bc
+#define mmMMEA6_PERFCOUNTER_LO_BASE_IDX 1
+#define mmMMEA6_PERFCOUNTER_HI 0x34bd
+#define mmMMEA6_PERFCOUNTER_HI_BASE_IDX 1
+#define mmMMEA6_PERFCOUNTER0_CFG 0x34be
+#define mmMMEA6_PERFCOUNTER0_CFG_BASE_IDX 1
+#define mmMMEA6_PERFCOUNTER1_CFG 0x34bf
+#define mmMMEA6_PERFCOUNTER1_CFG_BASE_IDX 1
+#define mmMMEA6_PERFCOUNTER_RSLT_CNTL 0x34c0
+#define mmMMEA6_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1
+#define mmMMEA6_EDC_CNT 0x34c6
+#define mmMMEA6_EDC_CNT_BASE_IDX 1
+#define mmMMEA6_EDC_CNT2 0x34c7
+#define mmMMEA6_EDC_CNT2_BASE_IDX 1
+#define mmMMEA6_DSM_CNTL 0x34c8
+#define mmMMEA6_DSM_CNTL_BASE_IDX 1
+#define mmMMEA6_DSM_CNTLA 0x34c9
+#define mmMMEA6_DSM_CNTLA_BASE_IDX 1
+#define mmMMEA6_DSM_CNTLB 0x34ca
+#define mmMMEA6_DSM_CNTLB_BASE_IDX 1
+#define mmMMEA6_DSM_CNTL2 0x34cb
+#define mmMMEA6_DSM_CNTL2_BASE_IDX 1
+#define mmMMEA6_DSM_CNTL2A 0x34cc
+#define mmMMEA6_DSM_CNTL2A_BASE_IDX 1
+#define mmMMEA6_DSM_CNTL2B 0x34cd
+#define mmMMEA6_DSM_CNTL2B_BASE_IDX 1
+#define mmMMEA6_CGTT_CLK_CTRL 0x34cf
+#define mmMMEA6_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmMMEA6_EDC_MODE 0x34d0
+#define mmMMEA6_EDC_MODE_BASE_IDX 1
+#define mmMMEA6_ERR_STATUS 0x34d1
+#define mmMMEA6_ERR_STATUS_BASE_IDX 1
+#define mmMMEA6_MISC2 0x34d2
+#define mmMMEA6_MISC2_BASE_IDX 1
+#define mmMMEA6_ADDRDEC_SELECT 0x34d3
+#define mmMMEA6_ADDRDEC_SELECT_BASE_IDX 1
+#define mmMMEA6_EDC_CNT3 0x34d4
+#define mmMMEA6_EDC_CNT3_BASE_IDX 1
+
+
+// addressBlock: mmhub_ea_mmeadec7
+// base address: 0x75400
+#define mmMMEA7_DRAM_RD_CLI2GRP_MAP0 0x3500
+#define mmMMEA7_DRAM_RD_CLI2GRP_MAP0_BASE_IDX 1
+#define mmMMEA7_DRAM_RD_CLI2GRP_MAP1 0x3501
+#define mmMMEA7_DRAM_RD_CLI2GRP_MAP1_BASE_IDX 1
+#define mmMMEA7_DRAM_WR_CLI2GRP_MAP0 0x3502
+#define mmMMEA7_DRAM_WR_CLI2GRP_MAP0_BASE_IDX 1
+#define mmMMEA7_DRAM_WR_CLI2GRP_MAP1 0x3503
+#define mmMMEA7_DRAM_WR_CLI2GRP_MAP1_BASE_IDX 1
+#define mmMMEA7_DRAM_RD_GRP2VC_MAP 0x3504
+#define mmMMEA7_DRAM_RD_GRP2VC_MAP_BASE_IDX 1
+#define mmMMEA7_DRAM_WR_GRP2VC_MAP 0x3505
+#define mmMMEA7_DRAM_WR_GRP2VC_MAP_BASE_IDX 1
+#define mmMMEA7_DRAM_RD_LAZY 0x3506
+#define mmMMEA7_DRAM_RD_LAZY_BASE_IDX 1
+#define mmMMEA7_DRAM_WR_LAZY 0x3507
+#define mmMMEA7_DRAM_WR_LAZY_BASE_IDX 1
+#define mmMMEA7_DRAM_RD_CAM_CNTL 0x3508
+#define mmMMEA7_DRAM_RD_CAM_CNTL_BASE_IDX 1
+#define mmMMEA7_DRAM_WR_CAM_CNTL 0x3509
+#define mmMMEA7_DRAM_WR_CAM_CNTL_BASE_IDX 1
+#define mmMMEA7_DRAM_PAGE_BURST 0x350a
+#define mmMMEA7_DRAM_PAGE_BURST_BASE_IDX 1
+#define mmMMEA7_DRAM_RD_PRI_AGE 0x350b
+#define mmMMEA7_DRAM_RD_PRI_AGE_BASE_IDX 1
+#define mmMMEA7_DRAM_WR_PRI_AGE 0x350c
+#define mmMMEA7_DRAM_WR_PRI_AGE_BASE_IDX 1
+#define mmMMEA7_DRAM_RD_PRI_QUEUING 0x350d
+#define mmMMEA7_DRAM_RD_PRI_QUEUING_BASE_IDX 1
+#define mmMMEA7_DRAM_WR_PRI_QUEUING 0x350e
+#define mmMMEA7_DRAM_WR_PRI_QUEUING_BASE_IDX 1
+#define mmMMEA7_DRAM_RD_PRI_FIXED 0x350f
+#define mmMMEA7_DRAM_RD_PRI_FIXED_BASE_IDX 1
+#define mmMMEA7_DRAM_WR_PRI_FIXED 0x3510
+#define mmMMEA7_DRAM_WR_PRI_FIXED_BASE_IDX 1
+#define mmMMEA7_DRAM_RD_PRI_URGENCY 0x3511
+#define mmMMEA7_DRAM_RD_PRI_URGENCY_BASE_IDX 1
+#define mmMMEA7_DRAM_WR_PRI_URGENCY 0x3512
+#define mmMMEA7_DRAM_WR_PRI_URGENCY_BASE_IDX 1
+#define mmMMEA7_DRAM_RD_PRI_QUANT_PRI1 0x3513
+#define mmMMEA7_DRAM_RD_PRI_QUANT_PRI1_BASE_IDX 1
+#define mmMMEA7_DRAM_RD_PRI_QUANT_PRI2 0x3514
+#define mmMMEA7_DRAM_RD_PRI_QUANT_PRI2_BASE_IDX 1
+#define mmMMEA7_DRAM_RD_PRI_QUANT_PRI3 0x3515
+#define mmMMEA7_DRAM_RD_PRI_QUANT_PRI3_BASE_IDX 1
+#define mmMMEA7_DRAM_WR_PRI_QUANT_PRI1 0x3516
+#define mmMMEA7_DRAM_WR_PRI_QUANT_PRI1_BASE_IDX 1
+#define mmMMEA7_DRAM_WR_PRI_QUANT_PRI2 0x3517
+#define mmMMEA7_DRAM_WR_PRI_QUANT_PRI2_BASE_IDX 1
+#define mmMMEA7_DRAM_WR_PRI_QUANT_PRI3 0x3518
+#define mmMMEA7_DRAM_WR_PRI_QUANT_PRI3_BASE_IDX 1
+#define mmMMEA7_GMI_RD_CLI2GRP_MAP0 0x3519
+#define mmMMEA7_GMI_RD_CLI2GRP_MAP0_BASE_IDX 1
+#define mmMMEA7_GMI_RD_CLI2GRP_MAP1 0x351a
+#define mmMMEA7_GMI_RD_CLI2GRP_MAP1_BASE_IDX 1
+#define mmMMEA7_GMI_WR_CLI2GRP_MAP0 0x351b
+#define mmMMEA7_GMI_WR_CLI2GRP_MAP0_BASE_IDX 1
+#define mmMMEA7_GMI_WR_CLI2GRP_MAP1 0x351c
+#define mmMMEA7_GMI_WR_CLI2GRP_MAP1_BASE_IDX 1
+#define mmMMEA7_GMI_RD_GRP2VC_MAP 0x351d
+#define mmMMEA7_GMI_RD_GRP2VC_MAP_BASE_IDX 1
+#define mmMMEA7_GMI_WR_GRP2VC_MAP 0x351e
+#define mmMMEA7_GMI_WR_GRP2VC_MAP_BASE_IDX 1
+#define mmMMEA7_GMI_RD_LAZY 0x351f
+#define mmMMEA7_GMI_RD_LAZY_BASE_IDX 1
+#define mmMMEA7_GMI_WR_LAZY 0x3520
+#define mmMMEA7_GMI_WR_LAZY_BASE_IDX 1
+#define mmMMEA7_GMI_RD_CAM_CNTL 0x3521
+#define mmMMEA7_GMI_RD_CAM_CNTL_BASE_IDX 1
+#define mmMMEA7_GMI_WR_CAM_CNTL 0x3522
+#define mmMMEA7_GMI_WR_CAM_CNTL_BASE_IDX 1
+#define mmMMEA7_GMI_PAGE_BURST 0x3523
+#define mmMMEA7_GMI_PAGE_BURST_BASE_IDX 1
+#define mmMMEA7_GMI_RD_PRI_AGE 0x3524
+#define mmMMEA7_GMI_RD_PRI_AGE_BASE_IDX 1
+#define mmMMEA7_GMI_WR_PRI_AGE 0x3525
+#define mmMMEA7_GMI_WR_PRI_AGE_BASE_IDX 1
+#define mmMMEA7_GMI_RD_PRI_QUEUING 0x3526
+#define mmMMEA7_GMI_RD_PRI_QUEUING_BASE_IDX 1
+#define mmMMEA7_GMI_WR_PRI_QUEUING 0x3527
+#define mmMMEA7_GMI_WR_PRI_QUEUING_BASE_IDX 1
+#define mmMMEA7_GMI_RD_PRI_FIXED 0x3528
+#define mmMMEA7_GMI_RD_PRI_FIXED_BASE_IDX 1
+#define mmMMEA7_GMI_WR_PRI_FIXED 0x3529
+#define mmMMEA7_GMI_WR_PRI_FIXED_BASE_IDX 1
+#define mmMMEA7_GMI_RD_PRI_URGENCY 0x352a
+#define mmMMEA7_GMI_RD_PRI_URGENCY_BASE_IDX 1
+#define mmMMEA7_GMI_WR_PRI_URGENCY 0x352b
+#define mmMMEA7_GMI_WR_PRI_URGENCY_BASE_IDX 1
+#define mmMMEA7_GMI_RD_PRI_URGENCY_MASKING 0x352c
+#define mmMMEA7_GMI_RD_PRI_URGENCY_MASKING_BASE_IDX 1
+#define mmMMEA7_GMI_WR_PRI_URGENCY_MASKING 0x352d
+#define mmMMEA7_GMI_WR_PRI_URGENCY_MASKING_BASE_IDX 1
+#define mmMMEA7_GMI_RD_PRI_QUANT_PRI1 0x352e
+#define mmMMEA7_GMI_RD_PRI_QUANT_PRI1_BASE_IDX 1
+#define mmMMEA7_GMI_RD_PRI_QUANT_PRI2 0x352f
+#define mmMMEA7_GMI_RD_PRI_QUANT_PRI2_BASE_IDX 1
+#define mmMMEA7_GMI_RD_PRI_QUANT_PRI3 0x3530
+#define mmMMEA7_GMI_RD_PRI_QUANT_PRI3_BASE_IDX 1
+#define mmMMEA7_GMI_WR_PRI_QUANT_PRI1 0x3531
+#define mmMMEA7_GMI_WR_PRI_QUANT_PRI1_BASE_IDX 1
+#define mmMMEA7_GMI_WR_PRI_QUANT_PRI2 0x3532
+#define mmMMEA7_GMI_WR_PRI_QUANT_PRI2_BASE_IDX 1
+#define mmMMEA7_GMI_WR_PRI_QUANT_PRI3 0x3533
+#define mmMMEA7_GMI_WR_PRI_QUANT_PRI3_BASE_IDX 1
+#define mmMMEA7_ADDRNORM_BASE_ADDR0 0x3534
+#define mmMMEA7_ADDRNORM_BASE_ADDR0_BASE_IDX 1
+#define mmMMEA7_ADDRNORM_LIMIT_ADDR0 0x3535
+#define mmMMEA7_ADDRNORM_LIMIT_ADDR0_BASE_IDX 1
+#define mmMMEA7_ADDRNORM_BASE_ADDR1 0x3536
+#define mmMMEA7_ADDRNORM_BASE_ADDR1_BASE_IDX 1
+#define mmMMEA7_ADDRNORM_LIMIT_ADDR1 0x3537
+#define mmMMEA7_ADDRNORM_LIMIT_ADDR1_BASE_IDX 1
+#define mmMMEA7_ADDRNORM_OFFSET_ADDR1 0x3538
+#define mmMMEA7_ADDRNORM_OFFSET_ADDR1_BASE_IDX 1
+#define mmMMEA7_ADDRNORM_BASE_ADDR2 0x3539
+#define mmMMEA7_ADDRNORM_BASE_ADDR2_BASE_IDX 1
+#define mmMMEA7_ADDRNORM_LIMIT_ADDR2 0x353a
+#define mmMMEA7_ADDRNORM_LIMIT_ADDR2_BASE_IDX 1
+#define mmMMEA7_ADDRNORM_BASE_ADDR3 0x353b
+#define mmMMEA7_ADDRNORM_BASE_ADDR3_BASE_IDX 1
+#define mmMMEA7_ADDRNORM_LIMIT_ADDR3 0x353c
+#define mmMMEA7_ADDRNORM_LIMIT_ADDR3_BASE_IDX 1
+#define mmMMEA7_ADDRNORM_OFFSET_ADDR3 0x353d
+#define mmMMEA7_ADDRNORM_OFFSET_ADDR3_BASE_IDX 1
+#define mmMMEA7_ADDRNORM_BASE_ADDR4 0x353e
+#define mmMMEA7_ADDRNORM_BASE_ADDR4_BASE_IDX 1
+#define mmMMEA7_ADDRNORM_LIMIT_ADDR4 0x353f
+#define mmMMEA7_ADDRNORM_LIMIT_ADDR4_BASE_IDX 1
+#define mmMMEA7_ADDRNORM_BASE_ADDR5 0x3540
+#define mmMMEA7_ADDRNORM_BASE_ADDR5_BASE_IDX 1
+#define mmMMEA7_ADDRNORM_LIMIT_ADDR5 0x3541
+#define mmMMEA7_ADDRNORM_LIMIT_ADDR5_BASE_IDX 1
+#define mmMMEA7_ADDRNORM_OFFSET_ADDR5 0x3542
+#define mmMMEA7_ADDRNORM_OFFSET_ADDR5_BASE_IDX 1
+#define mmMMEA7_ADDRNORMDRAM_HOLE_CNTL 0x3543
+#define mmMMEA7_ADDRNORMDRAM_HOLE_CNTL_BASE_IDX 1
+#define mmMMEA7_ADDRNORMGMI_HOLE_CNTL 0x3544
+#define mmMMEA7_ADDRNORMGMI_HOLE_CNTL_BASE_IDX 1
+#define mmMMEA7_ADDRNORMDRAM_NP2_CHANNEL_CFG 0x3545
+#define mmMMEA7_ADDRNORMDRAM_NP2_CHANNEL_CFG_BASE_IDX 1
+#define mmMMEA7_ADDRNORMGMI_NP2_CHANNEL_CFG 0x3546
+#define mmMMEA7_ADDRNORMGMI_NP2_CHANNEL_CFG_BASE_IDX 1
+#define mmMMEA7_ADDRDEC_BANK_CFG 0x3547
+#define mmMMEA7_ADDRDEC_BANK_CFG_BASE_IDX 1
+#define mmMMEA7_ADDRDEC_MISC_CFG 0x3548
+#define mmMMEA7_ADDRDEC_MISC_CFG_BASE_IDX 1
+#define mmMMEA7_ADDRDECDRAM_ADDR_HASH_BANK0 0x3549
+#define mmMMEA7_ADDRDECDRAM_ADDR_HASH_BANK0_BASE_IDX 1
+#define mmMMEA7_ADDRDECDRAM_ADDR_HASH_BANK1 0x354a
+#define mmMMEA7_ADDRDECDRAM_ADDR_HASH_BANK1_BASE_IDX 1
+#define mmMMEA7_ADDRDECDRAM_ADDR_HASH_BANK2 0x354b
+#define mmMMEA7_ADDRDECDRAM_ADDR_HASH_BANK2_BASE_IDX 1
+#define mmMMEA7_ADDRDECDRAM_ADDR_HASH_BANK3 0x354c
+#define mmMMEA7_ADDRDECDRAM_ADDR_HASH_BANK3_BASE_IDX 1
+#define mmMMEA7_ADDRDECDRAM_ADDR_HASH_BANK4 0x354d
+#define mmMMEA7_ADDRDECDRAM_ADDR_HASH_BANK4_BASE_IDX 1
+#define mmMMEA7_ADDRDECDRAM_ADDR_HASH_BANK5 0x354e
+#define mmMMEA7_ADDRDECDRAM_ADDR_HASH_BANK5_BASE_IDX 1
+#define mmMMEA7_ADDRDECDRAM_ADDR_HASH_PC 0x354f
+#define mmMMEA7_ADDRDECDRAM_ADDR_HASH_PC_BASE_IDX 1
+#define mmMMEA7_ADDRDECDRAM_ADDR_HASH_PC2 0x3550
+#define mmMMEA7_ADDRDECDRAM_ADDR_HASH_PC2_BASE_IDX 1
+#define mmMMEA7_ADDRDECDRAM_ADDR_HASH_CS0 0x3551
+#define mmMMEA7_ADDRDECDRAM_ADDR_HASH_CS0_BASE_IDX 1
+#define mmMMEA7_ADDRDECDRAM_ADDR_HASH_CS1 0x3552
+#define mmMMEA7_ADDRDECDRAM_ADDR_HASH_CS1_BASE_IDX 1
+#define mmMMEA7_ADDRDECDRAM_HARVEST_ENABLE 0x3553
+#define mmMMEA7_ADDRDECDRAM_HARVEST_ENABLE_BASE_IDX 1
+#define mmMMEA7_ADDRDECGMI_ADDR_HASH_BANK0 0x3554
+#define mmMMEA7_ADDRDECGMI_ADDR_HASH_BANK0_BASE_IDX 1
+#define mmMMEA7_ADDRDECGMI_ADDR_HASH_BANK1 0x3555
+#define mmMMEA7_ADDRDECGMI_ADDR_HASH_BANK1_BASE_IDX 1
+#define mmMMEA7_ADDRDECGMI_ADDR_HASH_BANK2 0x3556
+#define mmMMEA7_ADDRDECGMI_ADDR_HASH_BANK2_BASE_IDX 1
+#define mmMMEA7_ADDRDECGMI_ADDR_HASH_BANK3 0x3557
+#define mmMMEA7_ADDRDECGMI_ADDR_HASH_BANK3_BASE_IDX 1
+#define mmMMEA7_ADDRDECGMI_ADDR_HASH_BANK4 0x3558
+#define mmMMEA7_ADDRDECGMI_ADDR_HASH_BANK4_BASE_IDX 1
+#define mmMMEA7_ADDRDECGMI_ADDR_HASH_BANK5 0x3559
+#define mmMMEA7_ADDRDECGMI_ADDR_HASH_BANK5_BASE_IDX 1
+#define mmMMEA7_ADDRDECGMI_ADDR_HASH_PC 0x355a
+#define mmMMEA7_ADDRDECGMI_ADDR_HASH_PC_BASE_IDX 1
+#define mmMMEA7_ADDRDECGMI_ADDR_HASH_PC2 0x355b
+#define mmMMEA7_ADDRDECGMI_ADDR_HASH_PC2_BASE_IDX 1
+#define mmMMEA7_ADDRDECGMI_ADDR_HASH_CS0 0x355c
+#define mmMMEA7_ADDRDECGMI_ADDR_HASH_CS0_BASE_IDX 1
+#define mmMMEA7_ADDRDECGMI_ADDR_HASH_CS1 0x355d
+#define mmMMEA7_ADDRDECGMI_ADDR_HASH_CS1_BASE_IDX 1
+#define mmMMEA7_ADDRDECGMI_HARVEST_ENABLE 0x355e
+#define mmMMEA7_ADDRDECGMI_HARVEST_ENABLE_BASE_IDX 1
+#define mmMMEA7_ADDRDEC0_BASE_ADDR_CS0 0x355f
+#define mmMMEA7_ADDRDEC0_BASE_ADDR_CS0_BASE_IDX 1
+#define mmMMEA7_ADDRDEC0_BASE_ADDR_CS1 0x3560
+#define mmMMEA7_ADDRDEC0_BASE_ADDR_CS1_BASE_IDX 1
+#define mmMMEA7_ADDRDEC0_BASE_ADDR_CS2 0x3561
+#define mmMMEA7_ADDRDEC0_BASE_ADDR_CS2_BASE_IDX 1
+#define mmMMEA7_ADDRDEC0_BASE_ADDR_CS3 0x3562
+#define mmMMEA7_ADDRDEC0_BASE_ADDR_CS3_BASE_IDX 1
+#define mmMMEA7_ADDRDEC0_BASE_ADDR_SECCS0 0x3563
+#define mmMMEA7_ADDRDEC0_BASE_ADDR_SECCS0_BASE_IDX 1
+#define mmMMEA7_ADDRDEC0_BASE_ADDR_SECCS1 0x3564
+#define mmMMEA7_ADDRDEC0_BASE_ADDR_SECCS1_BASE_IDX 1
+#define mmMMEA7_ADDRDEC0_BASE_ADDR_SECCS2 0x3565
+#define mmMMEA7_ADDRDEC0_BASE_ADDR_SECCS2_BASE_IDX 1
+#define mmMMEA7_ADDRDEC0_BASE_ADDR_SECCS3 0x3566
+#define mmMMEA7_ADDRDEC0_BASE_ADDR_SECCS3_BASE_IDX 1
+#define mmMMEA7_ADDRDEC0_ADDR_MASK_CS01 0x3567
+#define mmMMEA7_ADDRDEC0_ADDR_MASK_CS01_BASE_IDX 1
+#define mmMMEA7_ADDRDEC0_ADDR_MASK_CS23 0x3568
+#define mmMMEA7_ADDRDEC0_ADDR_MASK_CS23_BASE_IDX 1
+#define mmMMEA7_ADDRDEC0_ADDR_MASK_SECCS01 0x3569
+#define mmMMEA7_ADDRDEC0_ADDR_MASK_SECCS01_BASE_IDX 1
+#define mmMMEA7_ADDRDEC0_ADDR_MASK_SECCS23 0x356a
+#define mmMMEA7_ADDRDEC0_ADDR_MASK_SECCS23_BASE_IDX 1
+#define mmMMEA7_ADDRDEC0_ADDR_CFG_CS01 0x356b
+#define mmMMEA7_ADDRDEC0_ADDR_CFG_CS01_BASE_IDX 1
+#define mmMMEA7_ADDRDEC0_ADDR_CFG_CS23 0x356c
+#define mmMMEA7_ADDRDEC0_ADDR_CFG_CS23_BASE_IDX 1
+#define mmMMEA7_ADDRDEC0_ADDR_SEL_CS01 0x356d
+#define mmMMEA7_ADDRDEC0_ADDR_SEL_CS01_BASE_IDX 1
+#define mmMMEA7_ADDRDEC0_ADDR_SEL_CS23 0x356e
+#define mmMMEA7_ADDRDEC0_ADDR_SEL_CS23_BASE_IDX 1
+#define mmMMEA7_ADDRDEC0_ADDR_SEL2_CS01 0x356f
+#define mmMMEA7_ADDRDEC0_ADDR_SEL2_CS01_BASE_IDX 1
+#define mmMMEA7_ADDRDEC0_ADDR_SEL2_CS23 0x3570
+#define mmMMEA7_ADDRDEC0_ADDR_SEL2_CS23_BASE_IDX 1
+#define mmMMEA7_ADDRDEC0_COL_SEL_LO_CS01 0x3571
+#define mmMMEA7_ADDRDEC0_COL_SEL_LO_CS01_BASE_IDX 1
+#define mmMMEA7_ADDRDEC0_COL_SEL_LO_CS23 0x3572
+#define mmMMEA7_ADDRDEC0_COL_SEL_LO_CS23_BASE_IDX 1
+#define mmMMEA7_ADDRDEC0_COL_SEL_HI_CS01 0x3573
+#define mmMMEA7_ADDRDEC0_COL_SEL_HI_CS01_BASE_IDX 1
+#define mmMMEA7_ADDRDEC0_COL_SEL_HI_CS23 0x3574
+#define mmMMEA7_ADDRDEC0_COL_SEL_HI_CS23_BASE_IDX 1
+#define mmMMEA7_ADDRDEC0_RM_SEL_CS01 0x3575
+#define mmMMEA7_ADDRDEC0_RM_SEL_CS01_BASE_IDX 1
+#define mmMMEA7_ADDRDEC0_RM_SEL_CS23 0x3576
+#define mmMMEA7_ADDRDEC0_RM_SEL_CS23_BASE_IDX 1
+#define mmMMEA7_ADDRDEC0_RM_SEL_SECCS01 0x3577
+#define mmMMEA7_ADDRDEC0_RM_SEL_SECCS01_BASE_IDX 1
+#define mmMMEA7_ADDRDEC0_RM_SEL_SECCS23 0x3578
+#define mmMMEA7_ADDRDEC0_RM_SEL_SECCS23_BASE_IDX 1
+#define mmMMEA7_ADDRDEC1_BASE_ADDR_CS0 0x3579
+#define mmMMEA7_ADDRDEC1_BASE_ADDR_CS0_BASE_IDX 1
+#define mmMMEA7_ADDRDEC1_BASE_ADDR_CS1 0x357a
+#define mmMMEA7_ADDRDEC1_BASE_ADDR_CS1_BASE_IDX 1
+#define mmMMEA7_ADDRDEC1_BASE_ADDR_CS2 0x357b
+#define mmMMEA7_ADDRDEC1_BASE_ADDR_CS2_BASE_IDX 1
+#define mmMMEA7_ADDRDEC1_BASE_ADDR_CS3 0x357c
+#define mmMMEA7_ADDRDEC1_BASE_ADDR_CS3_BASE_IDX 1
+#define mmMMEA7_ADDRDEC1_BASE_ADDR_SECCS0 0x357d
+#define mmMMEA7_ADDRDEC1_BASE_ADDR_SECCS0_BASE_IDX 1
+#define mmMMEA7_ADDRDEC1_BASE_ADDR_SECCS1 0x357e
+#define mmMMEA7_ADDRDEC1_BASE_ADDR_SECCS1_BASE_IDX 1
+#define mmMMEA7_ADDRDEC1_BASE_ADDR_SECCS2 0x357f
+#define mmMMEA7_ADDRDEC1_BASE_ADDR_SECCS2_BASE_IDX 1
+#define mmMMEA7_ADDRDEC1_BASE_ADDR_SECCS3 0x3580
+#define mmMMEA7_ADDRDEC1_BASE_ADDR_SECCS3_BASE_IDX 1
+#define mmMMEA7_ADDRDEC1_ADDR_MASK_CS01 0x3581
+#define mmMMEA7_ADDRDEC1_ADDR_MASK_CS01_BASE_IDX 1
+#define mmMMEA7_ADDRDEC1_ADDR_MASK_CS23 0x3582
+#define mmMMEA7_ADDRDEC1_ADDR_MASK_CS23_BASE_IDX 1
+#define mmMMEA7_ADDRDEC1_ADDR_MASK_SECCS01 0x3583
+#define mmMMEA7_ADDRDEC1_ADDR_MASK_SECCS01_BASE_IDX 1
+#define mmMMEA7_ADDRDEC1_ADDR_MASK_SECCS23 0x3584
+#define mmMMEA7_ADDRDEC1_ADDR_MASK_SECCS23_BASE_IDX 1
+#define mmMMEA7_ADDRDEC1_ADDR_CFG_CS01 0x3585
+#define mmMMEA7_ADDRDEC1_ADDR_CFG_CS01_BASE_IDX 1
+#define mmMMEA7_ADDRDEC1_ADDR_CFG_CS23 0x3586
+#define mmMMEA7_ADDRDEC1_ADDR_CFG_CS23_BASE_IDX 1
+#define mmMMEA7_ADDRDEC1_ADDR_SEL_CS01 0x3587
+#define mmMMEA7_ADDRDEC1_ADDR_SEL_CS01_BASE_IDX 1
+#define mmMMEA7_ADDRDEC1_ADDR_SEL_CS23 0x3588
+#define mmMMEA7_ADDRDEC1_ADDR_SEL_CS23_BASE_IDX 1
+#define mmMMEA7_ADDRDEC1_ADDR_SEL2_CS01 0x3589
+#define mmMMEA7_ADDRDEC1_ADDR_SEL2_CS01_BASE_IDX 1
+#define mmMMEA7_ADDRDEC1_ADDR_SEL2_CS23 0x358a
+#define mmMMEA7_ADDRDEC1_ADDR_SEL2_CS23_BASE_IDX 1
+#define mmMMEA7_ADDRDEC1_COL_SEL_LO_CS01 0x358b
+#define mmMMEA7_ADDRDEC1_COL_SEL_LO_CS01_BASE_IDX 1
+#define mmMMEA7_ADDRDEC1_COL_SEL_LO_CS23 0x358c
+#define mmMMEA7_ADDRDEC1_COL_SEL_LO_CS23_BASE_IDX 1
+#define mmMMEA7_ADDRDEC1_COL_SEL_HI_CS01 0x358d
+#define mmMMEA7_ADDRDEC1_COL_SEL_HI_CS01_BASE_IDX 1
+#define mmMMEA7_ADDRDEC1_COL_SEL_HI_CS23 0x358e
+#define mmMMEA7_ADDRDEC1_COL_SEL_HI_CS23_BASE_IDX 1
+#define mmMMEA7_ADDRDEC1_RM_SEL_CS01 0x358f
+#define mmMMEA7_ADDRDEC1_RM_SEL_CS01_BASE_IDX 1
+#define mmMMEA7_ADDRDEC1_RM_SEL_CS23 0x3590
+#define mmMMEA7_ADDRDEC1_RM_SEL_CS23_BASE_IDX 1
+#define mmMMEA7_ADDRDEC1_RM_SEL_SECCS01 0x3591
+#define mmMMEA7_ADDRDEC1_RM_SEL_SECCS01_BASE_IDX 1
+#define mmMMEA7_ADDRDEC1_RM_SEL_SECCS23 0x3592
+#define mmMMEA7_ADDRDEC1_RM_SEL_SECCS23_BASE_IDX 1
+#define mmMMEA7_ADDRDEC2_BASE_ADDR_CS0 0x3593
+#define mmMMEA7_ADDRDEC2_BASE_ADDR_CS0_BASE_IDX 1
+#define mmMMEA7_ADDRDEC2_BASE_ADDR_CS1 0x3594
+#define mmMMEA7_ADDRDEC2_BASE_ADDR_CS1_BASE_IDX 1
+#define mmMMEA7_ADDRDEC2_BASE_ADDR_CS2 0x3595
+#define mmMMEA7_ADDRDEC2_BASE_ADDR_CS2_BASE_IDX 1
+#define mmMMEA7_ADDRDEC2_BASE_ADDR_CS3 0x3596
+#define mmMMEA7_ADDRDEC2_BASE_ADDR_CS3_BASE_IDX 1
+#define mmMMEA7_ADDRDEC2_BASE_ADDR_SECCS0 0x3597
+#define mmMMEA7_ADDRDEC2_BASE_ADDR_SECCS0_BASE_IDX 1
+#define mmMMEA7_ADDRDEC2_BASE_ADDR_SECCS1 0x3598
+#define mmMMEA7_ADDRDEC2_BASE_ADDR_SECCS1_BASE_IDX 1
+#define mmMMEA7_ADDRDEC2_BASE_ADDR_SECCS2 0x3599
+#define mmMMEA7_ADDRDEC2_BASE_ADDR_SECCS2_BASE_IDX 1
+#define mmMMEA7_ADDRDEC2_BASE_ADDR_SECCS3 0x359a
+#define mmMMEA7_ADDRDEC2_BASE_ADDR_SECCS3_BASE_IDX 1
+#define mmMMEA7_ADDRDEC2_ADDR_MASK_CS01 0x359b
+#define mmMMEA7_ADDRDEC2_ADDR_MASK_CS01_BASE_IDX 1
+#define mmMMEA7_ADDRDEC2_ADDR_MASK_CS23 0x359c
+#define mmMMEA7_ADDRDEC2_ADDR_MASK_CS23_BASE_IDX 1
+#define mmMMEA7_ADDRDEC2_ADDR_MASK_SECCS01 0x359d
+#define mmMMEA7_ADDRDEC2_ADDR_MASK_SECCS01_BASE_IDX 1
+#define mmMMEA7_ADDRDEC2_ADDR_MASK_SECCS23 0x359e
+#define mmMMEA7_ADDRDEC2_ADDR_MASK_SECCS23_BASE_IDX 1
+#define mmMMEA7_ADDRDEC2_ADDR_CFG_CS01 0x359f
+#define mmMMEA7_ADDRDEC2_ADDR_CFG_CS01_BASE_IDX 1
+#define mmMMEA7_ADDRDEC2_ADDR_CFG_CS23 0x35a0
+#define mmMMEA7_ADDRDEC2_ADDR_CFG_CS23_BASE_IDX 1
+#define mmMMEA7_ADDRDEC2_ADDR_SEL_CS01 0x35a1
+#define mmMMEA7_ADDRDEC2_ADDR_SEL_CS01_BASE_IDX 1
+#define mmMMEA7_ADDRDEC2_ADDR_SEL_CS23 0x35a2
+#define mmMMEA7_ADDRDEC2_ADDR_SEL_CS23_BASE_IDX 1
+#define mmMMEA7_ADDRDEC2_ADDR_SEL2_CS01 0x35a3
+#define mmMMEA7_ADDRDEC2_ADDR_SEL2_CS01_BASE_IDX 1
+#define mmMMEA7_ADDRDEC2_ADDR_SEL2_CS23 0x35a4
+#define mmMMEA7_ADDRDEC2_ADDR_SEL2_CS23_BASE_IDX 1
+#define mmMMEA7_ADDRDEC2_COL_SEL_LO_CS01 0x35a5
+#define mmMMEA7_ADDRDEC2_COL_SEL_LO_CS01_BASE_IDX 1
+#define mmMMEA7_ADDRDEC2_COL_SEL_LO_CS23 0x35a6
+#define mmMMEA7_ADDRDEC2_COL_SEL_LO_CS23_BASE_IDX 1
+#define mmMMEA7_ADDRDEC2_COL_SEL_HI_CS01 0x35a7
+#define mmMMEA7_ADDRDEC2_COL_SEL_HI_CS01_BASE_IDX 1
+#define mmMMEA7_ADDRDEC2_COL_SEL_HI_CS23 0x35a8
+#define mmMMEA7_ADDRDEC2_COL_SEL_HI_CS23_BASE_IDX 1
+#define mmMMEA7_ADDRDEC2_RM_SEL_CS01 0x35a9
+#define mmMMEA7_ADDRDEC2_RM_SEL_CS01_BASE_IDX 1
+#define mmMMEA7_ADDRDEC2_RM_SEL_CS23 0x35aa
+#define mmMMEA7_ADDRDEC2_RM_SEL_CS23_BASE_IDX 1
+#define mmMMEA7_ADDRDEC2_RM_SEL_SECCS01 0x35ab
+#define mmMMEA7_ADDRDEC2_RM_SEL_SECCS01_BASE_IDX 1
+#define mmMMEA7_ADDRDEC2_RM_SEL_SECCS23 0x35ac
+#define mmMMEA7_ADDRDEC2_RM_SEL_SECCS23_BASE_IDX 1
+#define mmMMEA7_ADDRNORMDRAM_GLOBAL_CNTL 0x35ad
+#define mmMMEA7_ADDRNORMDRAM_GLOBAL_CNTL_BASE_IDX 1
+#define mmMMEA7_ADDRNORMGMI_GLOBAL_CNTL 0x35ae
+#define mmMMEA7_ADDRNORMGMI_GLOBAL_CNTL_BASE_IDX 1
+#define mmMMEA7_IO_RD_CLI2GRP_MAP0 0x35d5
+#define mmMMEA7_IO_RD_CLI2GRP_MAP0_BASE_IDX 1
+#define mmMMEA7_IO_RD_CLI2GRP_MAP1 0x35d6
+#define mmMMEA7_IO_RD_CLI2GRP_MAP1_BASE_IDX 1
+#define mmMMEA7_IO_WR_CLI2GRP_MAP0 0x35d7
+#define mmMMEA7_IO_WR_CLI2GRP_MAP0_BASE_IDX 1
+#define mmMMEA7_IO_WR_CLI2GRP_MAP1 0x35d8
+#define mmMMEA7_IO_WR_CLI2GRP_MAP1_BASE_IDX 1
+#define mmMMEA7_IO_RD_COMBINE_FLUSH 0x35d9
+#define mmMMEA7_IO_RD_COMBINE_FLUSH_BASE_IDX 1
+#define mmMMEA7_IO_WR_COMBINE_FLUSH 0x35da
+#define mmMMEA7_IO_WR_COMBINE_FLUSH_BASE_IDX 1
+#define mmMMEA7_IO_GROUP_BURST 0x35db
+#define mmMMEA7_IO_GROUP_BURST_BASE_IDX 1
+#define mmMMEA7_IO_RD_PRI_AGE 0x35dc
+#define mmMMEA7_IO_RD_PRI_AGE_BASE_IDX 1
+#define mmMMEA7_IO_WR_PRI_AGE 0x35dd
+#define mmMMEA7_IO_WR_PRI_AGE_BASE_IDX 1
+#define mmMMEA7_IO_RD_PRI_QUEUING 0x35de
+#define mmMMEA7_IO_RD_PRI_QUEUING_BASE_IDX 1
+#define mmMMEA7_IO_WR_PRI_QUEUING 0x35df
+#define mmMMEA7_IO_WR_PRI_QUEUING_BASE_IDX 1
+#define mmMMEA7_IO_RD_PRI_FIXED 0x35e0
+#define mmMMEA7_IO_RD_PRI_FIXED_BASE_IDX 1
+#define mmMMEA7_IO_WR_PRI_FIXED 0x35e1
+#define mmMMEA7_IO_WR_PRI_FIXED_BASE_IDX 1
+#define mmMMEA7_IO_RD_PRI_URGENCY 0x35e2
+#define mmMMEA7_IO_RD_PRI_URGENCY_BASE_IDX 1
+#define mmMMEA7_IO_WR_PRI_URGENCY 0x35e3
+#define mmMMEA7_IO_WR_PRI_URGENCY_BASE_IDX 1
+#define mmMMEA7_IO_RD_PRI_URGENCY_MASKING 0x35e4
+#define mmMMEA7_IO_RD_PRI_URGENCY_MASKING_BASE_IDX 1
+#define mmMMEA7_IO_WR_PRI_URGENCY_MASKING 0x35e5
+#define mmMMEA7_IO_WR_PRI_URGENCY_MASKING_BASE_IDX 1
+#define mmMMEA7_IO_RD_PRI_QUANT_PRI1 0x35e6
+#define mmMMEA7_IO_RD_PRI_QUANT_PRI1_BASE_IDX 1
+#define mmMMEA7_IO_RD_PRI_QUANT_PRI2 0x35e7
+#define mmMMEA7_IO_RD_PRI_QUANT_PRI2_BASE_IDX 1
+#define mmMMEA7_IO_RD_PRI_QUANT_PRI3 0x35e8
+#define mmMMEA7_IO_RD_PRI_QUANT_PRI3_BASE_IDX 1
+#define mmMMEA7_IO_WR_PRI_QUANT_PRI1 0x35e9
+#define mmMMEA7_IO_WR_PRI_QUANT_PRI1_BASE_IDX 1
+#define mmMMEA7_IO_WR_PRI_QUANT_PRI2 0x35ea
+#define mmMMEA7_IO_WR_PRI_QUANT_PRI2_BASE_IDX 1
+#define mmMMEA7_IO_WR_PRI_QUANT_PRI3 0x35eb
+#define mmMMEA7_IO_WR_PRI_QUANT_PRI3_BASE_IDX 1
+#define mmMMEA7_SDP_ARB_DRAM 0x35ec
+#define mmMMEA7_SDP_ARB_DRAM_BASE_IDX 1
+#define mmMMEA7_SDP_ARB_GMI 0x35ed
+#define mmMMEA7_SDP_ARB_GMI_BASE_IDX 1
+#define mmMMEA7_SDP_ARB_FINAL 0x35ee
+#define mmMMEA7_SDP_ARB_FINAL_BASE_IDX 1
+#define mmMMEA7_SDP_DRAM_PRIORITY 0x35ef
+#define mmMMEA7_SDP_DRAM_PRIORITY_BASE_IDX 1
+#define mmMMEA7_SDP_GMI_PRIORITY 0x35f0
+#define mmMMEA7_SDP_GMI_PRIORITY_BASE_IDX 1
+#define mmMMEA7_SDP_IO_PRIORITY 0x35f1
+#define mmMMEA7_SDP_IO_PRIORITY_BASE_IDX 1
+#define mmMMEA7_SDP_CREDITS 0x35f2
+#define mmMMEA7_SDP_CREDITS_BASE_IDX 1
+#define mmMMEA7_SDP_TAG_RESERVE0 0x35f3
+#define mmMMEA7_SDP_TAG_RESERVE0_BASE_IDX 1
+#define mmMMEA7_SDP_TAG_RESERVE1 0x35f4
+#define mmMMEA7_SDP_TAG_RESERVE1_BASE_IDX 1
+#define mmMMEA7_SDP_VCC_RESERVE0 0x35f5
+#define mmMMEA7_SDP_VCC_RESERVE0_BASE_IDX 1
+#define mmMMEA7_SDP_VCC_RESERVE1 0x35f6
+#define mmMMEA7_SDP_VCC_RESERVE1_BASE_IDX 1
+#define mmMMEA7_SDP_VCD_RESERVE0 0x35f7
+#define mmMMEA7_SDP_VCD_RESERVE0_BASE_IDX 1
+#define mmMMEA7_SDP_VCD_RESERVE1 0x35f8
+#define mmMMEA7_SDP_VCD_RESERVE1_BASE_IDX 1
+#define mmMMEA7_SDP_REQ_CNTL 0x35f9
+#define mmMMEA7_SDP_REQ_CNTL_BASE_IDX 1
+#define mmMMEA7_MISC 0x35fa
+#define mmMMEA7_MISC_BASE_IDX 1
+#define mmMMEA7_LATENCY_SAMPLING 0x35fb
+#define mmMMEA7_LATENCY_SAMPLING_BASE_IDX 1
+#define mmMMEA7_PERFCOUNTER_LO 0x35fc
+#define mmMMEA7_PERFCOUNTER_LO_BASE_IDX 1
+#define mmMMEA7_PERFCOUNTER_HI 0x35fd
+#define mmMMEA7_PERFCOUNTER_HI_BASE_IDX 1
+#define mmMMEA7_PERFCOUNTER0_CFG 0x35fe
+#define mmMMEA7_PERFCOUNTER0_CFG_BASE_IDX 1
+#define mmMMEA7_PERFCOUNTER1_CFG 0x35ff
+#define mmMMEA7_PERFCOUNTER1_CFG_BASE_IDX 1
+#define mmMMEA7_PERFCOUNTER_RSLT_CNTL 0x3600
+#define mmMMEA7_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1
+#define mmMMEA7_EDC_CNT 0x3606
+#define mmMMEA7_EDC_CNT_BASE_IDX 1
+#define mmMMEA7_EDC_CNT2 0x3607
+#define mmMMEA7_EDC_CNT2_BASE_IDX 1
+#define mmMMEA7_DSM_CNTL 0x3608
+#define mmMMEA7_DSM_CNTL_BASE_IDX 1
+#define mmMMEA7_DSM_CNTLA 0x3609
+#define mmMMEA7_DSM_CNTLA_BASE_IDX 1
+#define mmMMEA7_DSM_CNTLB 0x360a
+#define mmMMEA7_DSM_CNTLB_BASE_IDX 1
+#define mmMMEA7_DSM_CNTL2 0x360b
+#define mmMMEA7_DSM_CNTL2_BASE_IDX 1
+#define mmMMEA7_DSM_CNTL2A 0x360c
+#define mmMMEA7_DSM_CNTL2A_BASE_IDX 1
+#define mmMMEA7_DSM_CNTL2B 0x360d
+#define mmMMEA7_DSM_CNTL2B_BASE_IDX 1
+#define mmMMEA7_CGTT_CLK_CTRL 0x360f
+#define mmMMEA7_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmMMEA7_EDC_MODE 0x3610
+#define mmMMEA7_EDC_MODE_BASE_IDX 1
+#define mmMMEA7_ERR_STATUS 0x3611
+#define mmMMEA7_ERR_STATUS_BASE_IDX 1
+#define mmMMEA7_MISC2 0x3612
+#define mmMMEA7_MISC2_BASE_IDX 1
+#define mmMMEA7_ADDRDEC_SELECT 0x3613
+#define mmMMEA7_ADDRDEC_SELECT_BASE_IDX 1
+#define mmMMEA7_EDC_CNT3 0x3614
+#define mmMMEA7_EDC_CNT3_BASE_IDX 1
+
+
+// addressBlock: mmhub_pctldec1
+// base address: 0x76300
+#define mmPCTL1_CTRL 0x38c0
+#define mmPCTL1_CTRL_BASE_IDX 1
+#define mmPCTL1_MMHUB_DEEPSLEEP_IB 0x38c1
+#define mmPCTL1_MMHUB_DEEPSLEEP_IB_BASE_IDX 1
+#define mmPCTL1_MMHUB_DEEPSLEEP_OVERRIDE 0x38c2
+#define mmPCTL1_MMHUB_DEEPSLEEP_OVERRIDE_BASE_IDX 1
+#define mmPCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB 0x38c3
+#define mmPCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB_BASE_IDX 1
+#define mmPCTL1_PG_IGNORE_DEEPSLEEP 0x38c4
+#define mmPCTL1_PG_IGNORE_DEEPSLEEP_BASE_IDX 1
+#define mmPCTL1_PG_IGNORE_DEEPSLEEP_IB 0x38c5
+#define mmPCTL1_PG_IGNORE_DEEPSLEEP_IB_BASE_IDX 1
+#define mmPCTL1_SLICE0_CFG_DAGB_BUSY 0x38c6
+#define mmPCTL1_SLICE0_CFG_DAGB_BUSY_BASE_IDX 1
+#define mmPCTL1_SLICE0_CFG_DS_ALLOW 0x38c7
+#define mmPCTL1_SLICE0_CFG_DS_ALLOW_BASE_IDX 1
+#define mmPCTL1_SLICE0_CFG_DS_ALLOW_IB 0x38c8
+#define mmPCTL1_SLICE0_CFG_DS_ALLOW_IB_BASE_IDX 1
+#define mmPCTL1_SLICE1_CFG_DAGB_BUSY 0x38c9
+#define mmPCTL1_SLICE1_CFG_DAGB_BUSY_BASE_IDX 1
+#define mmPCTL1_SLICE1_CFG_DS_ALLOW 0x38ca
+#define mmPCTL1_SLICE1_CFG_DS_ALLOW_BASE_IDX 1
+#define mmPCTL1_SLICE1_CFG_DS_ALLOW_IB 0x38cb
+#define mmPCTL1_SLICE1_CFG_DS_ALLOW_IB_BASE_IDX 1
+#define mmPCTL1_SLICE2_CFG_DAGB_BUSY 0x38cc
+#define mmPCTL1_SLICE2_CFG_DAGB_BUSY_BASE_IDX 1
+#define mmPCTL1_SLICE2_CFG_DS_ALLOW 0x38cd
+#define mmPCTL1_SLICE2_CFG_DS_ALLOW_BASE_IDX 1
+#define mmPCTL1_SLICE2_CFG_DS_ALLOW_IB 0x38ce
+#define mmPCTL1_SLICE2_CFG_DS_ALLOW_IB_BASE_IDX 1
+#define mmPCTL1_SLICE3_CFG_DAGB_BUSY 0x38cf
+#define mmPCTL1_SLICE3_CFG_DAGB_BUSY_BASE_IDX 1
+#define mmPCTL1_SLICE3_CFG_DS_ALLOW 0x38d0
+#define mmPCTL1_SLICE3_CFG_DS_ALLOW_BASE_IDX 1
+#define mmPCTL1_SLICE3_CFG_DS_ALLOW_IB 0x38d1
+#define mmPCTL1_SLICE3_CFG_DS_ALLOW_IB_BASE_IDX 1
+#define mmPCTL1_SLICE4_CFG_DAGB_BUSY 0x38d2
+#define mmPCTL1_SLICE4_CFG_DAGB_BUSY_BASE_IDX 1
+#define mmPCTL1_SLICE4_CFG_DS_ALLOW 0x38d3
+#define mmPCTL1_SLICE4_CFG_DS_ALLOW_BASE_IDX 1
+#define mmPCTL1_SLICE4_CFG_DS_ALLOW_IB 0x38d4
+#define mmPCTL1_SLICE4_CFG_DS_ALLOW_IB_BASE_IDX 1
+#define mmPCTL1_UTCL2_MISC 0x38d5
+#define mmPCTL1_UTCL2_MISC_BASE_IDX 1
+#define mmPCTL1_SLICE0_MISC 0x38d6
+#define mmPCTL1_SLICE0_MISC_BASE_IDX 1
+#define mmPCTL1_SLICE1_MISC 0x38d7
+#define mmPCTL1_SLICE1_MISC_BASE_IDX 1
+#define mmPCTL1_SLICE2_MISC 0x38d8
+#define mmPCTL1_SLICE2_MISC_BASE_IDX 1
+#define mmPCTL1_SLICE3_MISC 0x38d9
+#define mmPCTL1_SLICE3_MISC_BASE_IDX 1
+#define mmPCTL1_SLICE4_MISC 0x38da
+#define mmPCTL1_SLICE4_MISC_BASE_IDX 1
+#define mmPCTL1_UTCL2_RENG_EXECUTE 0x38db
+#define mmPCTL1_UTCL2_RENG_EXECUTE_BASE_IDX 1
+#define mmPCTL1_SLICE0_RENG_EXECUTE 0x38dc
+#define mmPCTL1_SLICE0_RENG_EXECUTE_BASE_IDX 1
+#define mmPCTL1_SLICE1_RENG_EXECUTE 0x38dd
+#define mmPCTL1_SLICE1_RENG_EXECUTE_BASE_IDX 1
+#define mmPCTL1_SLICE2_RENG_EXECUTE 0x38de
+#define mmPCTL1_SLICE2_RENG_EXECUTE_BASE_IDX 1
+#define mmPCTL1_SLICE3_RENG_EXECUTE 0x38df
+#define mmPCTL1_SLICE3_RENG_EXECUTE_BASE_IDX 1
+#define mmPCTL1_SLICE4_RENG_EXECUTE 0x38e0
+#define mmPCTL1_SLICE4_RENG_EXECUTE_BASE_IDX 1
+#define mmPCTL1_UTCL2_RENG_RAM_INDEX 0x38e1
+#define mmPCTL1_UTCL2_RENG_RAM_INDEX_BASE_IDX 1
+#define mmPCTL1_UTCL2_RENG_RAM_DATA 0x38e2
+#define mmPCTL1_UTCL2_RENG_RAM_DATA_BASE_IDX 1
+#define mmPCTL1_SLICE0_RENG_RAM_INDEX 0x38e3
+#define mmPCTL1_SLICE0_RENG_RAM_INDEX_BASE_IDX 1
+#define mmPCTL1_SLICE0_RENG_RAM_DATA 0x38e4
+#define mmPCTL1_SLICE0_RENG_RAM_DATA_BASE_IDX 1
+#define mmPCTL1_SLICE1_RENG_RAM_INDEX 0x38e5
+#define mmPCTL1_SLICE1_RENG_RAM_INDEX_BASE_IDX 1
+#define mmPCTL1_SLICE1_RENG_RAM_DATA 0x38e6
+#define mmPCTL1_SLICE1_RENG_RAM_DATA_BASE_IDX 1
+#define mmPCTL1_SLICE2_RENG_RAM_INDEX 0x38e7
+#define mmPCTL1_SLICE2_RENG_RAM_INDEX_BASE_IDX 1
+#define mmPCTL1_SLICE2_RENG_RAM_DATA 0x38e8
+#define mmPCTL1_SLICE2_RENG_RAM_DATA_BASE_IDX 1
+#define mmPCTL1_SLICE3_RENG_RAM_INDEX 0x38e9
+#define mmPCTL1_SLICE3_RENG_RAM_INDEX_BASE_IDX 1
+#define mmPCTL1_SLICE3_RENG_RAM_DATA 0x38ea
+#define mmPCTL1_SLICE3_RENG_RAM_DATA_BASE_IDX 1
+#define mmPCTL1_SLICE4_RENG_RAM_INDEX 0x38eb
+#define mmPCTL1_SLICE4_RENG_RAM_INDEX_BASE_IDX 1
+#define mmPCTL1_SLICE4_RENG_RAM_DATA 0x38ec
+#define mmPCTL1_SLICE4_RENG_RAM_DATA_BASE_IDX 1
+#define mmPCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE0 0x38ed
+#define mmPCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE0_BASE_IDX 1
+#define mmPCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE1 0x38ee
+#define mmPCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE1_BASE_IDX 1
+#define mmPCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE2 0x38ef
+#define mmPCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE2_BASE_IDX 1
+#define mmPCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE3 0x38f0
+#define mmPCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE3_BASE_IDX 1
+#define mmPCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE4 0x38f1
+#define mmPCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE4_BASE_IDX 1
+#define mmPCTL1_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET0 0x38f2
+#define mmPCTL1_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET0_BASE_IDX 1
+#define mmPCTL1_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET1 0x38f3
+#define mmPCTL1_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET1_BASE_IDX 1
+#define mmPCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE0 0x38f4
+#define mmPCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE0_BASE_IDX 1
+#define mmPCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE1 0x38f5
+#define mmPCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE1_BASE_IDX 1
+#define mmPCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE2 0x38f6
+#define mmPCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE2_BASE_IDX 1
+#define mmPCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE3 0x38f7
+#define mmPCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE3_BASE_IDX 1
+#define mmPCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE4 0x38f8
+#define mmPCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE4_BASE_IDX 1
+#define mmPCTL1_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET0 0x38f9
+#define mmPCTL1_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET0_BASE_IDX 1
+#define mmPCTL1_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET1 0x38fa
+#define mmPCTL1_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET1_BASE_IDX 1
+#define mmPCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE0 0x38fb
+#define mmPCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE0_BASE_IDX 1
+#define mmPCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE1 0x38fc
+#define mmPCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE1_BASE_IDX 1
+#define mmPCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE2 0x38fd
+#define mmPCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE2_BASE_IDX 1
+#define mmPCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE3 0x38fe
+#define mmPCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE3_BASE_IDX 1
+#define mmPCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE4 0x38ff
+#define mmPCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE4_BASE_IDX 1
+#define mmPCTL1_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET0 0x3900
+#define mmPCTL1_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET0_BASE_IDX 1
+#define mmPCTL1_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET1 0x3901
+#define mmPCTL1_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET1_BASE_IDX 1
+#define mmPCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE0 0x3902
+#define mmPCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE0_BASE_IDX 1
+#define mmPCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE1 0x3903
+#define mmPCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE1_BASE_IDX 1
+#define mmPCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE2 0x3904
+#define mmPCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE2_BASE_IDX 1
+#define mmPCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE3 0x3905
+#define mmPCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE3_BASE_IDX 1
+#define mmPCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE4 0x3906
+#define mmPCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE4_BASE_IDX 1
+#define mmPCTL1_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET0 0x3907
+#define mmPCTL1_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET0_BASE_IDX 1
+#define mmPCTL1_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET1 0x3908
+#define mmPCTL1_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET1_BASE_IDX 1
+#define mmPCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE0 0x3909
+#define mmPCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE0_BASE_IDX 1
+#define mmPCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE1 0x390a
+#define mmPCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE1_BASE_IDX 1
+#define mmPCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE2 0x390b
+#define mmPCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE2_BASE_IDX 1
+#define mmPCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE3 0x390c
+#define mmPCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE3_BASE_IDX 1
+#define mmPCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE4 0x390d
+#define mmPCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE4_BASE_IDX 1
+#define mmPCTL1_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET0 0x390e
+#define mmPCTL1_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET0_BASE_IDX 1
+#define mmPCTL1_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET1 0x390f
+#define mmPCTL1_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET1_BASE_IDX 1
+#define mmPCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE0 0x3910
+#define mmPCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE0_BASE_IDX 1
+#define mmPCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE1 0x3911
+#define mmPCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE1_BASE_IDX 1
+#define mmPCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE2 0x3912
+#define mmPCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE2_BASE_IDX 1
+#define mmPCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE3 0x3913
+#define mmPCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE3_BASE_IDX 1
+#define mmPCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE4 0x3914
+#define mmPCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE4_BASE_IDX 1
+#define mmPCTL1_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET0 0x3915
+#define mmPCTL1_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET0_BASE_IDX 1
+#define mmPCTL1_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET1 0x3916
+#define mmPCTL1_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET1_BASE_IDX 1
+
+
+// addressBlock: mmhub_l1tlb_vml1dec:1
+// base address: 0x76500
+#define mmVML1_1_MC_VM_MX_L1_TLB0_STATUS 0x3948
+#define mmVML1_1_MC_VM_MX_L1_TLB0_STATUS_BASE_IDX 1
+#define mmVML1_1_MC_VM_MX_L1_TLB1_STATUS 0x3949
+#define mmVML1_1_MC_VM_MX_L1_TLB1_STATUS_BASE_IDX 1
+#define mmVML1_1_MC_VM_MX_L1_TLB2_STATUS 0x394a
+#define mmVML1_1_MC_VM_MX_L1_TLB2_STATUS_BASE_IDX 1
+#define mmVML1_1_MC_VM_MX_L1_TLB3_STATUS 0x394b
+#define mmVML1_1_MC_VM_MX_L1_TLB3_STATUS_BASE_IDX 1
+#define mmVML1_1_MC_VM_MX_L1_TLB4_STATUS 0x394c
+#define mmVML1_1_MC_VM_MX_L1_TLB4_STATUS_BASE_IDX 1
+#define mmVML1_1_MC_VM_MX_L1_TLB5_STATUS 0x394d
+#define mmVML1_1_MC_VM_MX_L1_TLB5_STATUS_BASE_IDX 1
+#define mmVML1_1_MC_VM_MX_L1_TLB6_STATUS 0x394e
+#define mmVML1_1_MC_VM_MX_L1_TLB6_STATUS_BASE_IDX 1
+#define mmVML1_1_MC_VM_MX_L1_TLB7_STATUS 0x394f
+#define mmVML1_1_MC_VM_MX_L1_TLB7_STATUS_BASE_IDX 1
+
+
+// addressBlock: mmhub_l1tlb_vml1pldec:1
+// base address: 0x76580
+#define mmVML1PL1_MC_VM_MX_L1_PERFCOUNTER0_CFG 0x3960
+#define mmVML1PL1_MC_VM_MX_L1_PERFCOUNTER0_CFG_BASE_IDX 1
+#define mmVML1PL1_MC_VM_MX_L1_PERFCOUNTER1_CFG 0x3961
+#define mmVML1PL1_MC_VM_MX_L1_PERFCOUNTER1_CFG_BASE_IDX 1
+#define mmVML1PL1_MC_VM_MX_L1_PERFCOUNTER2_CFG 0x3962
+#define mmVML1PL1_MC_VM_MX_L1_PERFCOUNTER2_CFG_BASE_IDX 1
+#define mmVML1PL1_MC_VM_MX_L1_PERFCOUNTER3_CFG 0x3963
+#define mmVML1PL1_MC_VM_MX_L1_PERFCOUNTER3_CFG_BASE_IDX 1
+#define mmVML1PL1_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL 0x3964
+#define mmVML1PL1_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1
+
+
+// addressBlock: mmhub_l1tlb_vml1prdec:1
+// base address: 0x765c0
+#define mmVML1PR1_MC_VM_MX_L1_PERFCOUNTER_LO 0x3970
+#define mmVML1PR1_MC_VM_MX_L1_PERFCOUNTER_LO_BASE_IDX 1
+#define mmVML1PR1_MC_VM_MX_L1_PERFCOUNTER_HI 0x3971
+#define mmVML1PR1_MC_VM_MX_L1_PERFCOUNTER_HI_BASE_IDX 1
+
+
+// addressBlock: mmhub_utcl2_atcl2dec:1
+// base address: 0x76600
+#define mmATCL2_1_ATC_L2_CNTL 0x3980
+#define mmATCL2_1_ATC_L2_CNTL_BASE_IDX 1
+#define mmATCL2_1_ATC_L2_CNTL2 0x3981
+#define mmATCL2_1_ATC_L2_CNTL2_BASE_IDX 1
+#define mmATCL2_1_ATC_L2_CACHE_DATA0 0x3984
+#define mmATCL2_1_ATC_L2_CACHE_DATA0_BASE_IDX 1
+#define mmATCL2_1_ATC_L2_CACHE_DATA1 0x3985
+#define mmATCL2_1_ATC_L2_CACHE_DATA1_BASE_IDX 1
+#define mmATCL2_1_ATC_L2_CACHE_DATA2 0x3986
+#define mmATCL2_1_ATC_L2_CACHE_DATA2_BASE_IDX 1
+#define mmATCL2_1_ATC_L2_CNTL3 0x3987
+#define mmATCL2_1_ATC_L2_CNTL3_BASE_IDX 1
+#define mmATCL2_1_ATC_L2_STATUS 0x3988
+#define mmATCL2_1_ATC_L2_STATUS_BASE_IDX 1
+#define mmATCL2_1_ATC_L2_STATUS2 0x3989
+#define mmATCL2_1_ATC_L2_STATUS2_BASE_IDX 1
+#define mmATCL2_1_ATC_L2_STATUS3 0x398a
+#define mmATCL2_1_ATC_L2_STATUS3_BASE_IDX 1
+#define mmATCL2_1_ATC_L2_MISC_CG 0x398b
+#define mmATCL2_1_ATC_L2_MISC_CG_BASE_IDX 1
+#define mmATCL2_1_ATC_L2_MEM_POWER_LS 0x398c
+#define mmATCL2_1_ATC_L2_MEM_POWER_LS_BASE_IDX 1
+#define mmATCL2_1_ATC_L2_CGTT_CLK_CTRL 0x398d
+#define mmATCL2_1_ATC_L2_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmATCL2_1_ATC_L2_CACHE_4K_DSM_INDEX 0x398e
+#define mmATCL2_1_ATC_L2_CACHE_4K_DSM_INDEX_BASE_IDX 1
+#define mmATCL2_1_ATC_L2_CACHE_2M_DSM_INDEX 0x398f
+#define mmATCL2_1_ATC_L2_CACHE_2M_DSM_INDEX_BASE_IDX 1
+#define mmATCL2_1_ATC_L2_CACHE_4K_DSM_CNTL 0x3990
+#define mmATCL2_1_ATC_L2_CACHE_4K_DSM_CNTL_BASE_IDX 1
+#define mmATCL2_1_ATC_L2_CACHE_2M_DSM_CNTL 0x3991
+#define mmATCL2_1_ATC_L2_CACHE_2M_DSM_CNTL_BASE_IDX 1
+#define mmATCL2_1_ATC_L2_CNTL4 0x3992
+#define mmATCL2_1_ATC_L2_CNTL4_BASE_IDX 1
+#define mmATCL2_1_ATC_L2_MM_GROUP_RT_CLASSES 0x3993
+#define mmATCL2_1_ATC_L2_MM_GROUP_RT_CLASSES_BASE_IDX 1
+
+
+// addressBlock: mmhub_utcl2_vml2pfdec:1
+// base address: 0x76700
+#define mmVML2PF1_VM_L2_CNTL 0x39c0
+#define mmVML2PF1_VM_L2_CNTL_BASE_IDX 1
+#define mmVML2PF1_VM_L2_CNTL2 0x39c1
+#define mmVML2PF1_VM_L2_CNTL2_BASE_IDX 1
+#define mmVML2PF1_VM_L2_CNTL3 0x39c2
+#define mmVML2PF1_VM_L2_CNTL3_BASE_IDX 1
+#define mmVML2PF1_VM_L2_STATUS 0x39c3
+#define mmVML2PF1_VM_L2_STATUS_BASE_IDX 1
+#define mmVML2PF1_VM_DUMMY_PAGE_FAULT_CNTL 0x39c4
+#define mmVML2PF1_VM_DUMMY_PAGE_FAULT_CNTL_BASE_IDX 1
+#define mmVML2PF1_VM_DUMMY_PAGE_FAULT_ADDR_LO32 0x39c5
+#define mmVML2PF1_VM_DUMMY_PAGE_FAULT_ADDR_LO32_BASE_IDX 1
+#define mmVML2PF1_VM_DUMMY_PAGE_FAULT_ADDR_HI32 0x39c6
+#define mmVML2PF1_VM_DUMMY_PAGE_FAULT_ADDR_HI32_BASE_IDX 1
+#define mmVML2PF1_VM_L2_PROTECTION_FAULT_CNTL 0x39c7
+#define mmVML2PF1_VM_L2_PROTECTION_FAULT_CNTL_BASE_IDX 1
+#define mmVML2PF1_VM_L2_PROTECTION_FAULT_CNTL2 0x39c8
+#define mmVML2PF1_VM_L2_PROTECTION_FAULT_CNTL2_BASE_IDX 1
+#define mmVML2PF1_VM_L2_PROTECTION_FAULT_MM_CNTL3 0x39c9
+#define mmVML2PF1_VM_L2_PROTECTION_FAULT_MM_CNTL3_BASE_IDX 1
+#define mmVML2PF1_VM_L2_PROTECTION_FAULT_MM_CNTL4 0x39ca
+#define mmVML2PF1_VM_L2_PROTECTION_FAULT_MM_CNTL4_BASE_IDX 1
+#define mmVML2PF1_VM_L2_PROTECTION_FAULT_STATUS 0x39cb
+#define mmVML2PF1_VM_L2_PROTECTION_FAULT_STATUS_BASE_IDX 1
+#define mmVML2PF1_VM_L2_PROTECTION_FAULT_ADDR_LO32 0x39cc
+#define mmVML2PF1_VM_L2_PROTECTION_FAULT_ADDR_LO32_BASE_IDX 1
+#define mmVML2PF1_VM_L2_PROTECTION_FAULT_ADDR_HI32 0x39cd
+#define mmVML2PF1_VM_L2_PROTECTION_FAULT_ADDR_HI32_BASE_IDX 1
+#define mmVML2PF1_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32 0x39ce
+#define mmVML2PF1_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32_BASE_IDX 1
+#define mmVML2PF1_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32 0x39cf
+#define mmVML2PF1_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32_BASE_IDX 1
+#define mmVML2PF1_VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32 0x39d1
+#define mmVML2PF1_VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32_BASE_IDX 1
+#define mmVML2PF1_VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32 0x39d2
+#define mmVML2PF1_VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32_BASE_IDX 1
+#define mmVML2PF1_VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32 0x39d3
+#define mmVML2PF1_VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32_BASE_IDX 1
+#define mmVML2PF1_VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32 0x39d4
+#define mmVML2PF1_VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32_BASE_IDX 1
+#define mmVML2PF1_VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32 0x39d5
+#define mmVML2PF1_VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32_BASE_IDX 1
+#define mmVML2PF1_VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32 0x39d6
+#define mmVML2PF1_VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32_BASE_IDX 1
+#define mmVML2PF1_VM_L2_CNTL4 0x39d7
+#define mmVML2PF1_VM_L2_CNTL4_BASE_IDX 1
+#define mmVML2PF1_VM_L2_MM_GROUP_RT_CLASSES 0x39d8
+#define mmVML2PF1_VM_L2_MM_GROUP_RT_CLASSES_BASE_IDX 1
+#define mmVML2PF1_VM_L2_BANK_SELECT_RESERVED_CID 0x39d9
+#define mmVML2PF1_VM_L2_BANK_SELECT_RESERVED_CID_BASE_IDX 1
+#define mmVML2PF1_VM_L2_BANK_SELECT_RESERVED_CID2 0x39da
+#define mmVML2PF1_VM_L2_BANK_SELECT_RESERVED_CID2_BASE_IDX 1
+#define mmVML2PF1_VM_L2_CACHE_PARITY_CNTL 0x39db
+#define mmVML2PF1_VM_L2_CACHE_PARITY_CNTL_BASE_IDX 1
+#define mmVML2PF1_VM_L2_CGTT_CLK_CTRL 0x39de
+#define mmVML2PF1_VM_L2_CGTT_CLK_CTRL_BASE_IDX 1
+
+
+// addressBlock: mmhub_utcl2_vml2vcdec:1
+// base address: 0x76800
+#define mmVML2VC1_VM_CONTEXT0_CNTL 0x3a00
+#define mmVML2VC1_VM_CONTEXT0_CNTL_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT1_CNTL 0x3a01
+#define mmVML2VC1_VM_CONTEXT1_CNTL_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT2_CNTL 0x3a02
+#define mmVML2VC1_VM_CONTEXT2_CNTL_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT3_CNTL 0x3a03
+#define mmVML2VC1_VM_CONTEXT3_CNTL_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT4_CNTL 0x3a04
+#define mmVML2VC1_VM_CONTEXT4_CNTL_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT5_CNTL 0x3a05
+#define mmVML2VC1_VM_CONTEXT5_CNTL_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT6_CNTL 0x3a06
+#define mmVML2VC1_VM_CONTEXT6_CNTL_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT7_CNTL 0x3a07
+#define mmVML2VC1_VM_CONTEXT7_CNTL_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT8_CNTL 0x3a08
+#define mmVML2VC1_VM_CONTEXT8_CNTL_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT9_CNTL 0x3a09
+#define mmVML2VC1_VM_CONTEXT9_CNTL_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT10_CNTL 0x3a0a
+#define mmVML2VC1_VM_CONTEXT10_CNTL_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT11_CNTL 0x3a0b
+#define mmVML2VC1_VM_CONTEXT11_CNTL_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT12_CNTL 0x3a0c
+#define mmVML2VC1_VM_CONTEXT12_CNTL_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT13_CNTL 0x3a0d
+#define mmVML2VC1_VM_CONTEXT13_CNTL_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT14_CNTL 0x3a0e
+#define mmVML2VC1_VM_CONTEXT14_CNTL_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT15_CNTL 0x3a0f
+#define mmVML2VC1_VM_CONTEXT15_CNTL_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXTS_DISABLE 0x3a10
+#define mmVML2VC1_VM_CONTEXTS_DISABLE_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG0_SEM 0x3a11
+#define mmVML2VC1_VM_INVALIDATE_ENG0_SEM_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG1_SEM 0x3a12
+#define mmVML2VC1_VM_INVALIDATE_ENG1_SEM_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG2_SEM 0x3a13
+#define mmVML2VC1_VM_INVALIDATE_ENG2_SEM_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG3_SEM 0x3a14
+#define mmVML2VC1_VM_INVALIDATE_ENG3_SEM_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG4_SEM 0x3a15
+#define mmVML2VC1_VM_INVALIDATE_ENG4_SEM_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG5_SEM 0x3a16
+#define mmVML2VC1_VM_INVALIDATE_ENG5_SEM_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG6_SEM 0x3a17
+#define mmVML2VC1_VM_INVALIDATE_ENG6_SEM_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG7_SEM 0x3a18
+#define mmVML2VC1_VM_INVALIDATE_ENG7_SEM_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG8_SEM 0x3a19
+#define mmVML2VC1_VM_INVALIDATE_ENG8_SEM_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG9_SEM 0x3a1a
+#define mmVML2VC1_VM_INVALIDATE_ENG9_SEM_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG10_SEM 0x3a1b
+#define mmVML2VC1_VM_INVALIDATE_ENG10_SEM_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG11_SEM 0x3a1c
+#define mmVML2VC1_VM_INVALIDATE_ENG11_SEM_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG12_SEM 0x3a1d
+#define mmVML2VC1_VM_INVALIDATE_ENG12_SEM_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG13_SEM 0x3a1e
+#define mmVML2VC1_VM_INVALIDATE_ENG13_SEM_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG14_SEM 0x3a1f
+#define mmVML2VC1_VM_INVALIDATE_ENG14_SEM_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG15_SEM 0x3a20
+#define mmVML2VC1_VM_INVALIDATE_ENG15_SEM_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG16_SEM 0x3a21
+#define mmVML2VC1_VM_INVALIDATE_ENG16_SEM_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG17_SEM 0x3a22
+#define mmVML2VC1_VM_INVALIDATE_ENG17_SEM_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG0_REQ 0x3a23
+#define mmVML2VC1_VM_INVALIDATE_ENG0_REQ_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG1_REQ 0x3a24
+#define mmVML2VC1_VM_INVALIDATE_ENG1_REQ_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG2_REQ 0x3a25
+#define mmVML2VC1_VM_INVALIDATE_ENG2_REQ_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG3_REQ 0x3a26
+#define mmVML2VC1_VM_INVALIDATE_ENG3_REQ_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG4_REQ 0x3a27
+#define mmVML2VC1_VM_INVALIDATE_ENG4_REQ_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG5_REQ 0x3a28
+#define mmVML2VC1_VM_INVALIDATE_ENG5_REQ_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG6_REQ 0x3a29
+#define mmVML2VC1_VM_INVALIDATE_ENG6_REQ_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG7_REQ 0x3a2a
+#define mmVML2VC1_VM_INVALIDATE_ENG7_REQ_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG8_REQ 0x3a2b
+#define mmVML2VC1_VM_INVALIDATE_ENG8_REQ_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG9_REQ 0x3a2c
+#define mmVML2VC1_VM_INVALIDATE_ENG9_REQ_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG10_REQ 0x3a2d
+#define mmVML2VC1_VM_INVALIDATE_ENG10_REQ_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG11_REQ 0x3a2e
+#define mmVML2VC1_VM_INVALIDATE_ENG11_REQ_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG12_REQ 0x3a2f
+#define mmVML2VC1_VM_INVALIDATE_ENG12_REQ_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG13_REQ 0x3a30
+#define mmVML2VC1_VM_INVALIDATE_ENG13_REQ_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG14_REQ 0x3a31
+#define mmVML2VC1_VM_INVALIDATE_ENG14_REQ_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG15_REQ 0x3a32
+#define mmVML2VC1_VM_INVALIDATE_ENG15_REQ_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG16_REQ 0x3a33
+#define mmVML2VC1_VM_INVALIDATE_ENG16_REQ_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG17_REQ 0x3a34
+#define mmVML2VC1_VM_INVALIDATE_ENG17_REQ_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG0_ACK 0x3a35
+#define mmVML2VC1_VM_INVALIDATE_ENG0_ACK_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG1_ACK 0x3a36
+#define mmVML2VC1_VM_INVALIDATE_ENG1_ACK_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG2_ACK 0x3a37
+#define mmVML2VC1_VM_INVALIDATE_ENG2_ACK_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG3_ACK 0x3a38
+#define mmVML2VC1_VM_INVALIDATE_ENG3_ACK_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG4_ACK 0x3a39
+#define mmVML2VC1_VM_INVALIDATE_ENG4_ACK_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG5_ACK 0x3a3a
+#define mmVML2VC1_VM_INVALIDATE_ENG5_ACK_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG6_ACK 0x3a3b
+#define mmVML2VC1_VM_INVALIDATE_ENG6_ACK_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG7_ACK 0x3a3c
+#define mmVML2VC1_VM_INVALIDATE_ENG7_ACK_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG8_ACK 0x3a3d
+#define mmVML2VC1_VM_INVALIDATE_ENG8_ACK_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG9_ACK 0x3a3e
+#define mmVML2VC1_VM_INVALIDATE_ENG9_ACK_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG10_ACK 0x3a3f
+#define mmVML2VC1_VM_INVALIDATE_ENG10_ACK_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG11_ACK 0x3a40
+#define mmVML2VC1_VM_INVALIDATE_ENG11_ACK_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG12_ACK 0x3a41
+#define mmVML2VC1_VM_INVALIDATE_ENG12_ACK_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG13_ACK 0x3a42
+#define mmVML2VC1_VM_INVALIDATE_ENG13_ACK_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG14_ACK 0x3a43
+#define mmVML2VC1_VM_INVALIDATE_ENG14_ACK_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG15_ACK 0x3a44
+#define mmVML2VC1_VM_INVALIDATE_ENG15_ACK_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG16_ACK 0x3a45
+#define mmVML2VC1_VM_INVALIDATE_ENG16_ACK_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG17_ACK 0x3a46
+#define mmVML2VC1_VM_INVALIDATE_ENG17_ACK_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG0_ADDR_RANGE_LO32 0x3a47
+#define mmVML2VC1_VM_INVALIDATE_ENG0_ADDR_RANGE_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG0_ADDR_RANGE_HI32 0x3a48
+#define mmVML2VC1_VM_INVALIDATE_ENG0_ADDR_RANGE_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG1_ADDR_RANGE_LO32 0x3a49
+#define mmVML2VC1_VM_INVALIDATE_ENG1_ADDR_RANGE_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG1_ADDR_RANGE_HI32 0x3a4a
+#define mmVML2VC1_VM_INVALIDATE_ENG1_ADDR_RANGE_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG2_ADDR_RANGE_LO32 0x3a4b
+#define mmVML2VC1_VM_INVALIDATE_ENG2_ADDR_RANGE_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG2_ADDR_RANGE_HI32 0x3a4c
+#define mmVML2VC1_VM_INVALIDATE_ENG2_ADDR_RANGE_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG3_ADDR_RANGE_LO32 0x3a4d
+#define mmVML2VC1_VM_INVALIDATE_ENG3_ADDR_RANGE_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG3_ADDR_RANGE_HI32 0x3a4e
+#define mmVML2VC1_VM_INVALIDATE_ENG3_ADDR_RANGE_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG4_ADDR_RANGE_LO32 0x3a4f
+#define mmVML2VC1_VM_INVALIDATE_ENG4_ADDR_RANGE_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG4_ADDR_RANGE_HI32 0x3a50
+#define mmVML2VC1_VM_INVALIDATE_ENG4_ADDR_RANGE_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG5_ADDR_RANGE_LO32 0x3a51
+#define mmVML2VC1_VM_INVALIDATE_ENG5_ADDR_RANGE_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG5_ADDR_RANGE_HI32 0x3a52
+#define mmVML2VC1_VM_INVALIDATE_ENG5_ADDR_RANGE_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG6_ADDR_RANGE_LO32 0x3a53
+#define mmVML2VC1_VM_INVALIDATE_ENG6_ADDR_RANGE_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG6_ADDR_RANGE_HI32 0x3a54
+#define mmVML2VC1_VM_INVALIDATE_ENG6_ADDR_RANGE_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG7_ADDR_RANGE_LO32 0x3a55
+#define mmVML2VC1_VM_INVALIDATE_ENG7_ADDR_RANGE_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG7_ADDR_RANGE_HI32 0x3a56
+#define mmVML2VC1_VM_INVALIDATE_ENG7_ADDR_RANGE_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG8_ADDR_RANGE_LO32 0x3a57
+#define mmVML2VC1_VM_INVALIDATE_ENG8_ADDR_RANGE_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG8_ADDR_RANGE_HI32 0x3a58
+#define mmVML2VC1_VM_INVALIDATE_ENG8_ADDR_RANGE_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG9_ADDR_RANGE_LO32 0x3a59
+#define mmVML2VC1_VM_INVALIDATE_ENG9_ADDR_RANGE_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG9_ADDR_RANGE_HI32 0x3a5a
+#define mmVML2VC1_VM_INVALIDATE_ENG9_ADDR_RANGE_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG10_ADDR_RANGE_LO32 0x3a5b
+#define mmVML2VC1_VM_INVALIDATE_ENG10_ADDR_RANGE_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG10_ADDR_RANGE_HI32 0x3a5c
+#define mmVML2VC1_VM_INVALIDATE_ENG10_ADDR_RANGE_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG11_ADDR_RANGE_LO32 0x3a5d
+#define mmVML2VC1_VM_INVALIDATE_ENG11_ADDR_RANGE_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG11_ADDR_RANGE_HI32 0x3a5e
+#define mmVML2VC1_VM_INVALIDATE_ENG11_ADDR_RANGE_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG12_ADDR_RANGE_LO32 0x3a5f
+#define mmVML2VC1_VM_INVALIDATE_ENG12_ADDR_RANGE_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG12_ADDR_RANGE_HI32 0x3a60
+#define mmVML2VC1_VM_INVALIDATE_ENG12_ADDR_RANGE_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG13_ADDR_RANGE_LO32 0x3a61
+#define mmVML2VC1_VM_INVALIDATE_ENG13_ADDR_RANGE_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG13_ADDR_RANGE_HI32 0x3a62
+#define mmVML2VC1_VM_INVALIDATE_ENG13_ADDR_RANGE_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG14_ADDR_RANGE_LO32 0x3a63
+#define mmVML2VC1_VM_INVALIDATE_ENG14_ADDR_RANGE_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG14_ADDR_RANGE_HI32 0x3a64
+#define mmVML2VC1_VM_INVALIDATE_ENG14_ADDR_RANGE_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG15_ADDR_RANGE_LO32 0x3a65
+#define mmVML2VC1_VM_INVALIDATE_ENG15_ADDR_RANGE_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG15_ADDR_RANGE_HI32 0x3a66
+#define mmVML2VC1_VM_INVALIDATE_ENG15_ADDR_RANGE_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32 0x3a67
+#define mmVML2VC1_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG16_ADDR_RANGE_HI32 0x3a68
+#define mmVML2VC1_VM_INVALIDATE_ENG16_ADDR_RANGE_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG17_ADDR_RANGE_LO32 0x3a69
+#define mmVML2VC1_VM_INVALIDATE_ENG17_ADDR_RANGE_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_INVALIDATE_ENG17_ADDR_RANGE_HI32 0x3a6a
+#define mmVML2VC1_VM_INVALIDATE_ENG17_ADDR_RANGE_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32 0x3a6b
+#define mmVML2VC1_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32 0x3a6c
+#define mmVML2VC1_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 0x3a6d
+#define mmVML2VC1_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32 0x3a6e
+#define mmVML2VC1_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32 0x3a6f
+#define mmVML2VC1_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32 0x3a70
+#define mmVML2VC1_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32 0x3a71
+#define mmVML2VC1_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32 0x3a72
+#define mmVML2VC1_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32 0x3a73
+#define mmVML2VC1_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32 0x3a74
+#define mmVML2VC1_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32 0x3a75
+#define mmVML2VC1_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32 0x3a76
+#define mmVML2VC1_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32 0x3a77
+#define mmVML2VC1_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32 0x3a78
+#define mmVML2VC1_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32 0x3a79
+#define mmVML2VC1_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32 0x3a7a
+#define mmVML2VC1_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32 0x3a7b
+#define mmVML2VC1_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32 0x3a7c
+#define mmVML2VC1_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32 0x3a7d
+#define mmVML2VC1_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32 0x3a7e
+#define mmVML2VC1_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32 0x3a7f
+#define mmVML2VC1_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32 0x3a80
+#define mmVML2VC1_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32 0x3a81
+#define mmVML2VC1_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32 0x3a82
+#define mmVML2VC1_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32 0x3a83
+#define mmVML2VC1_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32 0x3a84
+#define mmVML2VC1_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32 0x3a85
+#define mmVML2VC1_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32 0x3a86
+#define mmVML2VC1_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32 0x3a87
+#define mmVML2VC1_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32 0x3a88
+#define mmVML2VC1_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32 0x3a89
+#define mmVML2VC1_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32 0x3a8a
+#define mmVML2VC1_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32 0x3a8b
+#define mmVML2VC1_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32 0x3a8c
+#define mmVML2VC1_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32 0x3a8d
+#define mmVML2VC1_VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32 0x3a8e
+#define mmVML2VC1_VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32 0x3a8f
+#define mmVML2VC1_VM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32 0x3a90
+#define mmVML2VC1_VM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32 0x3a91
+#define mmVML2VC1_VM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32 0x3a92
+#define mmVML2VC1_VM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32 0x3a93
+#define mmVML2VC1_VM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32 0x3a94
+#define mmVML2VC1_VM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32 0x3a95
+#define mmVML2VC1_VM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32 0x3a96
+#define mmVML2VC1_VM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32 0x3a97
+#define mmVML2VC1_VM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32 0x3a98
+#define mmVML2VC1_VM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32 0x3a99
+#define mmVML2VC1_VM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32 0x3a9a
+#define mmVML2VC1_VM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32 0x3a9b
+#define mmVML2VC1_VM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32 0x3a9c
+#define mmVML2VC1_VM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32 0x3a9d
+#define mmVML2VC1_VM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32 0x3a9e
+#define mmVML2VC1_VM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32 0x3a9f
+#define mmVML2VC1_VM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32 0x3aa0
+#define mmVML2VC1_VM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32 0x3aa1
+#define mmVML2VC1_VM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32 0x3aa2
+#define mmVML2VC1_VM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32 0x3aa3
+#define mmVML2VC1_VM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32 0x3aa4
+#define mmVML2VC1_VM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32 0x3aa5
+#define mmVML2VC1_VM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32 0x3aa6
+#define mmVML2VC1_VM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32 0x3aa7
+#define mmVML2VC1_VM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32 0x3aa8
+#define mmVML2VC1_VM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32 0x3aa9
+#define mmVML2VC1_VM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32 0x3aaa
+#define mmVML2VC1_VM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32 0x3aab
+#define mmVML2VC1_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32 0x3aac
+#define mmVML2VC1_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32 0x3aad
+#define mmVML2VC1_VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32 0x3aae
+#define mmVML2VC1_VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32 0x3aaf
+#define mmVML2VC1_VM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32 0x3ab0
+#define mmVML2VC1_VM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32 0x3ab1
+#define mmVML2VC1_VM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32 0x3ab2
+#define mmVML2VC1_VM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32 0x3ab3
+#define mmVML2VC1_VM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32 0x3ab4
+#define mmVML2VC1_VM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32 0x3ab5
+#define mmVML2VC1_VM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32 0x3ab6
+#define mmVML2VC1_VM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32 0x3ab7
+#define mmVML2VC1_VM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32 0x3ab8
+#define mmVML2VC1_VM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32 0x3ab9
+#define mmVML2VC1_VM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32 0x3aba
+#define mmVML2VC1_VM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32 0x3abb
+#define mmVML2VC1_VM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32 0x3abc
+#define mmVML2VC1_VM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32 0x3abd
+#define mmVML2VC1_VM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32 0x3abe
+#define mmVML2VC1_VM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32 0x3abf
+#define mmVML2VC1_VM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32 0x3ac0
+#define mmVML2VC1_VM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32 0x3ac1
+#define mmVML2VC1_VM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32 0x3ac2
+#define mmVML2VC1_VM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32 0x3ac3
+#define mmVML2VC1_VM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32 0x3ac4
+#define mmVML2VC1_VM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32 0x3ac5
+#define mmVML2VC1_VM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32 0x3ac6
+#define mmVML2VC1_VM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32 0x3ac7
+#define mmVML2VC1_VM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32 0x3ac8
+#define mmVML2VC1_VM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32 0x3ac9
+#define mmVML2VC1_VM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1
+#define mmVML2VC1_VM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32 0x3aca
+#define mmVML2VC1_VM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1
+
+
+// addressBlock: mmhub_utcl2_vmsharedpfdec:1
+// base address: 0x76b90
+#define mmVMSHAREDPF1_MC_VM_NB_MMIOBASE 0x3ae4
+#define mmVMSHAREDPF1_MC_VM_NB_MMIOBASE_BASE_IDX 1
+#define mmVMSHAREDPF1_MC_VM_NB_MMIOLIMIT 0x3ae5
+#define mmVMSHAREDPF1_MC_VM_NB_MMIOLIMIT_BASE_IDX 1
+#define mmVMSHAREDPF1_MC_VM_NB_PCI_CTRL 0x3ae6
+#define mmVMSHAREDPF1_MC_VM_NB_PCI_CTRL_BASE_IDX 1
+#define mmVMSHAREDPF1_MC_VM_NB_PCI_ARB 0x3ae7
+#define mmVMSHAREDPF1_MC_VM_NB_PCI_ARB_BASE_IDX 1
+#define mmVMSHAREDPF1_MC_VM_NB_TOP_OF_DRAM_SLOT1 0x3ae8
+#define mmVMSHAREDPF1_MC_VM_NB_TOP_OF_DRAM_SLOT1_BASE_IDX 1
+#define mmVMSHAREDPF1_MC_VM_NB_LOWER_TOP_OF_DRAM2 0x3ae9
+#define mmVMSHAREDPF1_MC_VM_NB_LOWER_TOP_OF_DRAM2_BASE_IDX 1
+#define mmVMSHAREDPF1_MC_VM_NB_UPPER_TOP_OF_DRAM2 0x3aea
+#define mmVMSHAREDPF1_MC_VM_NB_UPPER_TOP_OF_DRAM2_BASE_IDX 1
+#define mmVMSHAREDPF1_MC_VM_FB_OFFSET 0x3aeb
+#define mmVMSHAREDPF1_MC_VM_FB_OFFSET_BASE_IDX 1
+#define mmVMSHAREDPF1_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB 0x3aec
+#define mmVMSHAREDPF1_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB_BASE_IDX 1
+#define mmVMSHAREDPF1_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB 0x3aed
+#define mmVMSHAREDPF1_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB_BASE_IDX 1
+#define mmVMSHAREDPF1_MC_VM_STEERING 0x3aee
+#define mmVMSHAREDPF1_MC_VM_STEERING_BASE_IDX 1
+#define mmVMSHAREDPF1_MC_SHARED_VIRT_RESET_REQ 0x3aef
+#define mmVMSHAREDPF1_MC_SHARED_VIRT_RESET_REQ_BASE_IDX 1
+#define mmVMSHAREDPF1_MC_MEM_POWER_LS 0x3af0
+#define mmVMSHAREDPF1_MC_MEM_POWER_LS_BASE_IDX 1
+#define mmVMSHAREDPF1_MC_VM_CACHEABLE_DRAM_ADDRESS_START 0x3af1
+#define mmVMSHAREDPF1_MC_VM_CACHEABLE_DRAM_ADDRESS_START_BASE_IDX 1
+#define mmVMSHAREDPF1_MC_VM_CACHEABLE_DRAM_ADDRESS_END 0x3af2
+#define mmVMSHAREDPF1_MC_VM_CACHEABLE_DRAM_ADDRESS_END_BASE_IDX 1
+#define mmVMSHAREDPF1_MC_VM_APT_CNTL 0x3af3
+#define mmVMSHAREDPF1_MC_VM_APT_CNTL_BASE_IDX 1
+#define mmVMSHAREDPF1_MC_VM_LOCAL_HBM_ADDRESS_START 0x3af4
+#define mmVMSHAREDPF1_MC_VM_LOCAL_HBM_ADDRESS_START_BASE_IDX 1
+#define mmVMSHAREDPF1_MC_VM_LOCAL_HBM_ADDRESS_END 0x3af5
+#define mmVMSHAREDPF1_MC_VM_LOCAL_HBM_ADDRESS_END_BASE_IDX 1
+#define mmVMSHAREDPF1_MC_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL 0x3af6
+#define mmVMSHAREDPF1_MC_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL_BASE_IDX 1
+#define mmVMSHAREDPF1_MC_VM_XGMI_LFB_CNTL 0x3af7
+#define mmVMSHAREDPF1_MC_VM_XGMI_LFB_CNTL_BASE_IDX 1
+#define mmVMSHAREDPF1_MC_VM_XGMI_LFB_SIZE 0x3af8
+#define mmVMSHAREDPF1_MC_VM_XGMI_LFB_SIZE_BASE_IDX 1
+#define mmVMSHAREDPF1_MC_VM_CACHEABLE_DRAM_CNTL 0x3af9
+#define mmVMSHAREDPF1_MC_VM_CACHEABLE_DRAM_CNTL_BASE_IDX 1
+
+
+// addressBlock: mmhub_utcl2_vmsharedvcdec:1
+// base address: 0x76c00
+#define mmVMSHAREDVC1_MC_VM_FB_LOCATION_BASE 0x3b00
+#define mmVMSHAREDVC1_MC_VM_FB_LOCATION_BASE_BASE_IDX 1
+#define mmVMSHAREDVC1_MC_VM_FB_LOCATION_TOP 0x3b01
+#define mmVMSHAREDVC1_MC_VM_FB_LOCATION_TOP_BASE_IDX 1
+#define mmVMSHAREDVC1_MC_VM_AGP_TOP 0x3b02
+#define mmVMSHAREDVC1_MC_VM_AGP_TOP_BASE_IDX 1
+#define mmVMSHAREDVC1_MC_VM_AGP_BOT 0x3b03
+#define mmVMSHAREDVC1_MC_VM_AGP_BOT_BASE_IDX 1
+#define mmVMSHAREDVC1_MC_VM_AGP_BASE 0x3b04
+#define mmVMSHAREDVC1_MC_VM_AGP_BASE_BASE_IDX 1
+#define mmVMSHAREDVC1_MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x3b05
+#define mmVMSHAREDVC1_MC_VM_SYSTEM_APERTURE_LOW_ADDR_BASE_IDX 1
+#define mmVMSHAREDVC1_MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x3b06
+#define mmVMSHAREDVC1_MC_VM_SYSTEM_APERTURE_HIGH_ADDR_BASE_IDX 1
+#define mmVMSHAREDVC1_MC_VM_MX_L1_TLB_CNTL 0x3b07
+#define mmVMSHAREDVC1_MC_VM_MX_L1_TLB_CNTL_BASE_IDX 1
+
+
+// addressBlock: mmhub_utcl2_vmsharedhvdec:1
+// base address: 0x76c80
+#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF0 0x3b20
+#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF0_BASE_IDX 1
+#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF1 0x3b21
+#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF1_BASE_IDX 1
+#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF2 0x3b22
+#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF2_BASE_IDX 1
+#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF3 0x3b23
+#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF3_BASE_IDX 1
+#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF4 0x3b24
+#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF4_BASE_IDX 1
+#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF5 0x3b25
+#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF5_BASE_IDX 1
+#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF6 0x3b26
+#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF6_BASE_IDX 1
+#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF7 0x3b27
+#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF7_BASE_IDX 1
+#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF8 0x3b28
+#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF8_BASE_IDX 1
+#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF9 0x3b29
+#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF9_BASE_IDX 1
+#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF10 0x3b2a
+#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF10_BASE_IDX 1
+#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF11 0x3b2b
+#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF11_BASE_IDX 1
+#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF12 0x3b2c
+#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF12_BASE_IDX 1
+#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF13 0x3b2d
+#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF13_BASE_IDX 1
+#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF14 0x3b2e
+#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF14_BASE_IDX 1
+#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF15 0x3b2f
+#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF15_BASE_IDX 1
+#define mmVMSHAREDHV1_VM_IOMMU_MMIO_CNTRL_1 0x3b30
+#define mmVMSHAREDHV1_VM_IOMMU_MMIO_CNTRL_1_BASE_IDX 1
+#define mmVMSHAREDHV1_MC_VM_MARC_BASE_LO_0 0x3b31
+#define mmVMSHAREDHV1_MC_VM_MARC_BASE_LO_0_BASE_IDX 1
+#define mmVMSHAREDHV1_MC_VM_MARC_BASE_LO_1 0x3b32
+#define mmVMSHAREDHV1_MC_VM_MARC_BASE_LO_1_BASE_IDX 1
+#define mmVMSHAREDHV1_MC_VM_MARC_BASE_LO_2 0x3b33
+#define mmVMSHAREDHV1_MC_VM_MARC_BASE_LO_2_BASE_IDX 1
+#define mmVMSHAREDHV1_MC_VM_MARC_BASE_LO_3 0x3b34
+#define mmVMSHAREDHV1_MC_VM_MARC_BASE_LO_3_BASE_IDX 1
+#define mmVMSHAREDHV1_MC_VM_MARC_BASE_HI_0 0x3b35
+#define mmVMSHAREDHV1_MC_VM_MARC_BASE_HI_0_BASE_IDX 1
+#define mmVMSHAREDHV1_MC_VM_MARC_BASE_HI_1 0x3b36
+#define mmVMSHAREDHV1_MC_VM_MARC_BASE_HI_1_BASE_IDX 1
+#define mmVMSHAREDHV1_MC_VM_MARC_BASE_HI_2 0x3b37
+#define mmVMSHAREDHV1_MC_VM_MARC_BASE_HI_2_BASE_IDX 1
+#define mmVMSHAREDHV1_MC_VM_MARC_BASE_HI_3 0x3b38
+#define mmVMSHAREDHV1_MC_VM_MARC_BASE_HI_3_BASE_IDX 1
+#define mmVMSHAREDHV1_MC_VM_MARC_RELOC_LO_0 0x3b39
+#define mmVMSHAREDHV1_MC_VM_MARC_RELOC_LO_0_BASE_IDX 1
+#define mmVMSHAREDHV1_MC_VM_MARC_RELOC_LO_1 0x3b3a
+#define mmVMSHAREDHV1_MC_VM_MARC_RELOC_LO_1_BASE_IDX 1
+#define mmVMSHAREDHV1_MC_VM_MARC_RELOC_LO_2 0x3b3b
+#define mmVMSHAREDHV1_MC_VM_MARC_RELOC_LO_2_BASE_IDX 1
+#define mmVMSHAREDHV1_MC_VM_MARC_RELOC_LO_3 0x3b3c
+#define mmVMSHAREDHV1_MC_VM_MARC_RELOC_LO_3_BASE_IDX 1
+#define mmVMSHAREDHV1_MC_VM_MARC_RELOC_HI_0 0x3b3d
+#define mmVMSHAREDHV1_MC_VM_MARC_RELOC_HI_0_BASE_IDX 1
+#define mmVMSHAREDHV1_MC_VM_MARC_RELOC_HI_1 0x3b3e
+#define mmVMSHAREDHV1_MC_VM_MARC_RELOC_HI_1_BASE_IDX 1
+#define mmVMSHAREDHV1_MC_VM_MARC_RELOC_HI_2 0x3b3f
+#define mmVMSHAREDHV1_MC_VM_MARC_RELOC_HI_2_BASE_IDX 1
+#define mmVMSHAREDHV1_MC_VM_MARC_RELOC_HI_3 0x3b40
+#define mmVMSHAREDHV1_MC_VM_MARC_RELOC_HI_3_BASE_IDX 1
+#define mmVMSHAREDHV1_MC_VM_MARC_LEN_LO_0 0x3b41
+#define mmVMSHAREDHV1_MC_VM_MARC_LEN_LO_0_BASE_IDX 1
+#define mmVMSHAREDHV1_MC_VM_MARC_LEN_LO_1 0x3b42
+#define mmVMSHAREDHV1_MC_VM_MARC_LEN_LO_1_BASE_IDX 1
+#define mmVMSHAREDHV1_MC_VM_MARC_LEN_LO_2 0x3b43
+#define mmVMSHAREDHV1_MC_VM_MARC_LEN_LO_2_BASE_IDX 1
+#define mmVMSHAREDHV1_MC_VM_MARC_LEN_LO_3 0x3b44
+#define mmVMSHAREDHV1_MC_VM_MARC_LEN_LO_3_BASE_IDX 1
+#define mmVMSHAREDHV1_MC_VM_MARC_LEN_HI_0 0x3b45
+#define mmVMSHAREDHV1_MC_VM_MARC_LEN_HI_0_BASE_IDX 1
+#define mmVMSHAREDHV1_MC_VM_MARC_LEN_HI_1 0x3b46
+#define mmVMSHAREDHV1_MC_VM_MARC_LEN_HI_1_BASE_IDX 1
+#define mmVMSHAREDHV1_MC_VM_MARC_LEN_HI_2 0x3b47
+#define mmVMSHAREDHV1_MC_VM_MARC_LEN_HI_2_BASE_IDX 1
+#define mmVMSHAREDHV1_MC_VM_MARC_LEN_HI_3 0x3b48
+#define mmVMSHAREDHV1_MC_VM_MARC_LEN_HI_3_BASE_IDX 1
+#define mmVMSHAREDHV1_VM_IOMMU_CONTROL_REGISTER 0x3b49
+#define mmVMSHAREDHV1_VM_IOMMU_CONTROL_REGISTER_BASE_IDX 1
+#define mmVMSHAREDHV1_VM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER 0x3b4a
+#define mmVMSHAREDHV1_VM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER_BASE_IDX 1
+#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL 0x3b4b
+#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_BASE_IDX 1
+#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_0 0x3b4c
+#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_0_BASE_IDX 1
+#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_1 0x3b4d
+#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_1_BASE_IDX 1
+#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_2 0x3b4e
+#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_2_BASE_IDX 1
+#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_3 0x3b4f
+#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_3_BASE_IDX 1
+#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_4 0x3b50
+#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_4_BASE_IDX 1
+#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_5 0x3b51
+#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_5_BASE_IDX 1
+#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_6 0x3b52
+#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_6_BASE_IDX 1
+#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_7 0x3b53
+#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_7_BASE_IDX 1
+#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_8 0x3b54
+#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_8_BASE_IDX 1
+#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_9 0x3b55
+#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_9_BASE_IDX 1
+#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_10 0x3b56
+#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_10_BASE_IDX 1
+#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_11 0x3b57
+#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_11_BASE_IDX 1
+#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_12 0x3b58
+#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_12_BASE_IDX 1
+#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_13 0x3b59
+#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_13_BASE_IDX 1
+#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_14 0x3b5a
+#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_14_BASE_IDX 1
+#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_15 0x3b5b
+#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_15_BASE_IDX 1
+#define mmVMSHAREDHV1_UTCL2_CGTT_CLK_CTRL 0x3b5c
+#define mmVMSHAREDHV1_UTCL2_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmVMSHAREDHV1_MC_SHARED_ACTIVE_FCN_ID 0x3b5d
+#define mmVMSHAREDHV1_MC_SHARED_ACTIVE_FCN_ID_BASE_IDX 1
+#define mmVMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE 0x3b5e
+#define mmVMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE_BASE_IDX 1
+
+
+// addressBlock: mmhub_utcl2_atcl2pfcntrdec:1
+// base address: 0x76dc0
+#define mmATCL2PFCNTR1_ATC_L2_PERFCOUNTER_LO 0x3b70
+#define mmATCL2PFCNTR1_ATC_L2_PERFCOUNTER_LO_BASE_IDX 1
+#define mmATCL2PFCNTR1_ATC_L2_PERFCOUNTER_HI 0x3b71
+#define mmATCL2PFCNTR1_ATC_L2_PERFCOUNTER_HI_BASE_IDX 1
+
+
+// addressBlock: mmhub_utcl2_atcl2pfcntldec:1
+// base address: 0x76dd0
+#define mmATCL2PFCNTL1_ATC_L2_PERFCOUNTER0_CFG 0x3b74
+#define mmATCL2PFCNTL1_ATC_L2_PERFCOUNTER0_CFG_BASE_IDX 1
+#define mmATCL2PFCNTL1_ATC_L2_PERFCOUNTER1_CFG 0x3b75
+#define mmATCL2PFCNTL1_ATC_L2_PERFCOUNTER1_CFG_BASE_IDX 1
+#define mmATCL2PFCNTL1_ATC_L2_PERFCOUNTER_RSLT_CNTL 0x3b76
+#define mmATCL2PFCNTL1_ATC_L2_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1
+
+
+// addressBlock: mmhub_utcl2_vml2pldec:1
+// base address: 0x76e00
+#define mmVML2PL1_MC_VM_L2_PERFCOUNTER0_CFG 0x3b80
+#define mmVML2PL1_MC_VM_L2_PERFCOUNTER0_CFG_BASE_IDX 1
+#define mmVML2PL1_MC_VM_L2_PERFCOUNTER1_CFG 0x3b81
+#define mmVML2PL1_MC_VM_L2_PERFCOUNTER1_CFG_BASE_IDX 1
+#define mmVML2PL1_MC_VM_L2_PERFCOUNTER2_CFG 0x3b82
+#define mmVML2PL1_MC_VM_L2_PERFCOUNTER2_CFG_BASE_IDX 1
+#define mmVML2PL1_MC_VM_L2_PERFCOUNTER3_CFG 0x3b83
+#define mmVML2PL1_MC_VM_L2_PERFCOUNTER3_CFG_BASE_IDX 1
+#define mmVML2PL1_MC_VM_L2_PERFCOUNTER4_CFG 0x3b84
+#define mmVML2PL1_MC_VM_L2_PERFCOUNTER4_CFG_BASE_IDX 1
+#define mmVML2PL1_MC_VM_L2_PERFCOUNTER5_CFG 0x3b85
+#define mmVML2PL1_MC_VM_L2_PERFCOUNTER5_CFG_BASE_IDX 1
+#define mmVML2PL1_MC_VM_L2_PERFCOUNTER6_CFG 0x3b86
+#define mmVML2PL1_MC_VM_L2_PERFCOUNTER6_CFG_BASE_IDX 1
+#define mmVML2PL1_MC_VM_L2_PERFCOUNTER7_CFG 0x3b87
+#define mmVML2PL1_MC_VM_L2_PERFCOUNTER7_CFG_BASE_IDX 1
+#define mmVML2PL1_MC_VM_L2_PERFCOUNTER_RSLT_CNTL 0x3b88
+#define mmVML2PL1_MC_VM_L2_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1
+
+
+// addressBlock: mmhub_utcl2_vml2prdec:1
+// base address: 0x76e40
+#define mmVML2PR1_MC_VM_L2_PERFCOUNTER_LO 0x3b90
+#define mmVML2PR1_MC_VM_L2_PERFCOUNTER_LO_BASE_IDX 1
+#define mmVML2PR1_MC_VM_L2_PERFCOUNTER_HI 0x3b91
+#define mmVML2PR1_MC_VM_L2_PERFCOUNTER_HI_BASE_IDX 1
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h
new file mode 100644
index 000000000000..111a71b434e2
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h
@@ -0,0 +1,45012 @@
+/*
+ * Copyright (C) 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _mmhub_9_4_1_SH_MASK_HEADER
+#define _mmhub_9_4_1_SH_MASK_HEADER
+
+
+// addressBlock: mmhub_dagb_dagbdec0
+//DAGB0_RDCLI0
+#define DAGB0_RDCLI0__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_RDCLI0__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_RDCLI0__URG_HIGH__SHIFT 0x4
+#define DAGB0_RDCLI0__URG_LOW__SHIFT 0x8
+#define DAGB0_RDCLI0__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_RDCLI0__MAX_BW__SHIFT 0xd
+#define DAGB0_RDCLI0__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_RDCLI0__MIN_BW__SHIFT 0x16
+#define DAGB0_RDCLI0__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_RDCLI0__MAX_OSD__SHIFT 0x1a
+#define DAGB0_RDCLI0__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_RDCLI0__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_RDCLI0__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_RDCLI0__URG_LOW_MASK 0x00000F00L
+#define DAGB0_RDCLI0__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_RDCLI0__MAX_BW_MASK 0x001FE000L
+#define DAGB0_RDCLI0__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_RDCLI0__MIN_BW_MASK 0x01C00000L
+#define DAGB0_RDCLI0__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_RDCLI0__MAX_OSD_MASK 0xFC000000L
+//DAGB0_RDCLI1
+#define DAGB0_RDCLI1__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_RDCLI1__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_RDCLI1__URG_HIGH__SHIFT 0x4
+#define DAGB0_RDCLI1__URG_LOW__SHIFT 0x8
+#define DAGB0_RDCLI1__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_RDCLI1__MAX_BW__SHIFT 0xd
+#define DAGB0_RDCLI1__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_RDCLI1__MIN_BW__SHIFT 0x16
+#define DAGB0_RDCLI1__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_RDCLI1__MAX_OSD__SHIFT 0x1a
+#define DAGB0_RDCLI1__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_RDCLI1__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_RDCLI1__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_RDCLI1__URG_LOW_MASK 0x00000F00L
+#define DAGB0_RDCLI1__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_RDCLI1__MAX_BW_MASK 0x001FE000L
+#define DAGB0_RDCLI1__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_RDCLI1__MIN_BW_MASK 0x01C00000L
+#define DAGB0_RDCLI1__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_RDCLI1__MAX_OSD_MASK 0xFC000000L
+//DAGB0_RDCLI2
+#define DAGB0_RDCLI2__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_RDCLI2__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_RDCLI2__URG_HIGH__SHIFT 0x4
+#define DAGB0_RDCLI2__URG_LOW__SHIFT 0x8
+#define DAGB0_RDCLI2__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_RDCLI2__MAX_BW__SHIFT 0xd
+#define DAGB0_RDCLI2__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_RDCLI2__MIN_BW__SHIFT 0x16
+#define DAGB0_RDCLI2__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_RDCLI2__MAX_OSD__SHIFT 0x1a
+#define DAGB0_RDCLI2__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_RDCLI2__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_RDCLI2__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_RDCLI2__URG_LOW_MASK 0x00000F00L
+#define DAGB0_RDCLI2__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_RDCLI2__MAX_BW_MASK 0x001FE000L
+#define DAGB0_RDCLI2__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_RDCLI2__MIN_BW_MASK 0x01C00000L
+#define DAGB0_RDCLI2__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_RDCLI2__MAX_OSD_MASK 0xFC000000L
+//DAGB0_RDCLI3
+#define DAGB0_RDCLI3__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_RDCLI3__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_RDCLI3__URG_HIGH__SHIFT 0x4
+#define DAGB0_RDCLI3__URG_LOW__SHIFT 0x8
+#define DAGB0_RDCLI3__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_RDCLI3__MAX_BW__SHIFT 0xd
+#define DAGB0_RDCLI3__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_RDCLI3__MIN_BW__SHIFT 0x16
+#define DAGB0_RDCLI3__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_RDCLI3__MAX_OSD__SHIFT 0x1a
+#define DAGB0_RDCLI3__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_RDCLI3__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_RDCLI3__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_RDCLI3__URG_LOW_MASK 0x00000F00L
+#define DAGB0_RDCLI3__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_RDCLI3__MAX_BW_MASK 0x001FE000L
+#define DAGB0_RDCLI3__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_RDCLI3__MIN_BW_MASK 0x01C00000L
+#define DAGB0_RDCLI3__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_RDCLI3__MAX_OSD_MASK 0xFC000000L
+//DAGB0_RDCLI4
+#define DAGB0_RDCLI4__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_RDCLI4__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_RDCLI4__URG_HIGH__SHIFT 0x4
+#define DAGB0_RDCLI4__URG_LOW__SHIFT 0x8
+#define DAGB0_RDCLI4__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_RDCLI4__MAX_BW__SHIFT 0xd
+#define DAGB0_RDCLI4__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_RDCLI4__MIN_BW__SHIFT 0x16
+#define DAGB0_RDCLI4__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_RDCLI4__MAX_OSD__SHIFT 0x1a
+#define DAGB0_RDCLI4__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_RDCLI4__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_RDCLI4__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_RDCLI4__URG_LOW_MASK 0x00000F00L
+#define DAGB0_RDCLI4__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_RDCLI4__MAX_BW_MASK 0x001FE000L
+#define DAGB0_RDCLI4__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_RDCLI4__MIN_BW_MASK 0x01C00000L
+#define DAGB0_RDCLI4__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_RDCLI4__MAX_OSD_MASK 0xFC000000L
+//DAGB0_RDCLI5
+#define DAGB0_RDCLI5__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_RDCLI5__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_RDCLI5__URG_HIGH__SHIFT 0x4
+#define DAGB0_RDCLI5__URG_LOW__SHIFT 0x8
+#define DAGB0_RDCLI5__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_RDCLI5__MAX_BW__SHIFT 0xd
+#define DAGB0_RDCLI5__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_RDCLI5__MIN_BW__SHIFT 0x16
+#define DAGB0_RDCLI5__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_RDCLI5__MAX_OSD__SHIFT 0x1a
+#define DAGB0_RDCLI5__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_RDCLI5__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_RDCLI5__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_RDCLI5__URG_LOW_MASK 0x00000F00L
+#define DAGB0_RDCLI5__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_RDCLI5__MAX_BW_MASK 0x001FE000L
+#define DAGB0_RDCLI5__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_RDCLI5__MIN_BW_MASK 0x01C00000L
+#define DAGB0_RDCLI5__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_RDCLI5__MAX_OSD_MASK 0xFC000000L
+//DAGB0_RDCLI6
+#define DAGB0_RDCLI6__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_RDCLI6__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_RDCLI6__URG_HIGH__SHIFT 0x4
+#define DAGB0_RDCLI6__URG_LOW__SHIFT 0x8
+#define DAGB0_RDCLI6__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_RDCLI6__MAX_BW__SHIFT 0xd
+#define DAGB0_RDCLI6__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_RDCLI6__MIN_BW__SHIFT 0x16
+#define DAGB0_RDCLI6__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_RDCLI6__MAX_OSD__SHIFT 0x1a
+#define DAGB0_RDCLI6__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_RDCLI6__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_RDCLI6__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_RDCLI6__URG_LOW_MASK 0x00000F00L
+#define DAGB0_RDCLI6__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_RDCLI6__MAX_BW_MASK 0x001FE000L
+#define DAGB0_RDCLI6__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_RDCLI6__MIN_BW_MASK 0x01C00000L
+#define DAGB0_RDCLI6__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_RDCLI6__MAX_OSD_MASK 0xFC000000L
+//DAGB0_RDCLI7
+#define DAGB0_RDCLI7__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_RDCLI7__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_RDCLI7__URG_HIGH__SHIFT 0x4
+#define DAGB0_RDCLI7__URG_LOW__SHIFT 0x8
+#define DAGB0_RDCLI7__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_RDCLI7__MAX_BW__SHIFT 0xd
+#define DAGB0_RDCLI7__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_RDCLI7__MIN_BW__SHIFT 0x16
+#define DAGB0_RDCLI7__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_RDCLI7__MAX_OSD__SHIFT 0x1a
+#define DAGB0_RDCLI7__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_RDCLI7__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_RDCLI7__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_RDCLI7__URG_LOW_MASK 0x00000F00L
+#define DAGB0_RDCLI7__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_RDCLI7__MAX_BW_MASK 0x001FE000L
+#define DAGB0_RDCLI7__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_RDCLI7__MIN_BW_MASK 0x01C00000L
+#define DAGB0_RDCLI7__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_RDCLI7__MAX_OSD_MASK 0xFC000000L
+//DAGB0_RDCLI8
+#define DAGB0_RDCLI8__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_RDCLI8__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_RDCLI8__URG_HIGH__SHIFT 0x4
+#define DAGB0_RDCLI8__URG_LOW__SHIFT 0x8
+#define DAGB0_RDCLI8__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_RDCLI8__MAX_BW__SHIFT 0xd
+#define DAGB0_RDCLI8__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_RDCLI8__MIN_BW__SHIFT 0x16
+#define DAGB0_RDCLI8__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_RDCLI8__MAX_OSD__SHIFT 0x1a
+#define DAGB0_RDCLI8__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_RDCLI8__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_RDCLI8__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_RDCLI8__URG_LOW_MASK 0x00000F00L
+#define DAGB0_RDCLI8__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_RDCLI8__MAX_BW_MASK 0x001FE000L
+#define DAGB0_RDCLI8__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_RDCLI8__MIN_BW_MASK 0x01C00000L
+#define DAGB0_RDCLI8__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_RDCLI8__MAX_OSD_MASK 0xFC000000L
+//DAGB0_RDCLI9
+#define DAGB0_RDCLI9__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_RDCLI9__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_RDCLI9__URG_HIGH__SHIFT 0x4
+#define DAGB0_RDCLI9__URG_LOW__SHIFT 0x8
+#define DAGB0_RDCLI9__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_RDCLI9__MAX_BW__SHIFT 0xd
+#define DAGB0_RDCLI9__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_RDCLI9__MIN_BW__SHIFT 0x16
+#define DAGB0_RDCLI9__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_RDCLI9__MAX_OSD__SHIFT 0x1a
+#define DAGB0_RDCLI9__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_RDCLI9__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_RDCLI9__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_RDCLI9__URG_LOW_MASK 0x00000F00L
+#define DAGB0_RDCLI9__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_RDCLI9__MAX_BW_MASK 0x001FE000L
+#define DAGB0_RDCLI9__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_RDCLI9__MIN_BW_MASK 0x01C00000L
+#define DAGB0_RDCLI9__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_RDCLI9__MAX_OSD_MASK 0xFC000000L
+//DAGB0_RDCLI10
+#define DAGB0_RDCLI10__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_RDCLI10__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_RDCLI10__URG_HIGH__SHIFT 0x4
+#define DAGB0_RDCLI10__URG_LOW__SHIFT 0x8
+#define DAGB0_RDCLI10__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_RDCLI10__MAX_BW__SHIFT 0xd
+#define DAGB0_RDCLI10__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_RDCLI10__MIN_BW__SHIFT 0x16
+#define DAGB0_RDCLI10__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_RDCLI10__MAX_OSD__SHIFT 0x1a
+#define DAGB0_RDCLI10__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_RDCLI10__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_RDCLI10__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_RDCLI10__URG_LOW_MASK 0x00000F00L
+#define DAGB0_RDCLI10__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_RDCLI10__MAX_BW_MASK 0x001FE000L
+#define DAGB0_RDCLI10__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_RDCLI10__MIN_BW_MASK 0x01C00000L
+#define DAGB0_RDCLI10__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_RDCLI10__MAX_OSD_MASK 0xFC000000L
+//DAGB0_RDCLI11
+#define DAGB0_RDCLI11__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_RDCLI11__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_RDCLI11__URG_HIGH__SHIFT 0x4
+#define DAGB0_RDCLI11__URG_LOW__SHIFT 0x8
+#define DAGB0_RDCLI11__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_RDCLI11__MAX_BW__SHIFT 0xd
+#define DAGB0_RDCLI11__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_RDCLI11__MIN_BW__SHIFT 0x16
+#define DAGB0_RDCLI11__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_RDCLI11__MAX_OSD__SHIFT 0x1a
+#define DAGB0_RDCLI11__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_RDCLI11__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_RDCLI11__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_RDCLI11__URG_LOW_MASK 0x00000F00L
+#define DAGB0_RDCLI11__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_RDCLI11__MAX_BW_MASK 0x001FE000L
+#define DAGB0_RDCLI11__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_RDCLI11__MIN_BW_MASK 0x01C00000L
+#define DAGB0_RDCLI11__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_RDCLI11__MAX_OSD_MASK 0xFC000000L
+//DAGB0_RDCLI12
+#define DAGB0_RDCLI12__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_RDCLI12__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_RDCLI12__URG_HIGH__SHIFT 0x4
+#define DAGB0_RDCLI12__URG_LOW__SHIFT 0x8
+#define DAGB0_RDCLI12__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_RDCLI12__MAX_BW__SHIFT 0xd
+#define DAGB0_RDCLI12__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_RDCLI12__MIN_BW__SHIFT 0x16
+#define DAGB0_RDCLI12__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_RDCLI12__MAX_OSD__SHIFT 0x1a
+#define DAGB0_RDCLI12__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_RDCLI12__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_RDCLI12__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_RDCLI12__URG_LOW_MASK 0x00000F00L
+#define DAGB0_RDCLI12__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_RDCLI12__MAX_BW_MASK 0x001FE000L
+#define DAGB0_RDCLI12__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_RDCLI12__MIN_BW_MASK 0x01C00000L
+#define DAGB0_RDCLI12__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_RDCLI12__MAX_OSD_MASK 0xFC000000L
+//DAGB0_RDCLI13
+#define DAGB0_RDCLI13__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_RDCLI13__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_RDCLI13__URG_HIGH__SHIFT 0x4
+#define DAGB0_RDCLI13__URG_LOW__SHIFT 0x8
+#define DAGB0_RDCLI13__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_RDCLI13__MAX_BW__SHIFT 0xd
+#define DAGB0_RDCLI13__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_RDCLI13__MIN_BW__SHIFT 0x16
+#define DAGB0_RDCLI13__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_RDCLI13__MAX_OSD__SHIFT 0x1a
+#define DAGB0_RDCLI13__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_RDCLI13__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_RDCLI13__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_RDCLI13__URG_LOW_MASK 0x00000F00L
+#define DAGB0_RDCLI13__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_RDCLI13__MAX_BW_MASK 0x001FE000L
+#define DAGB0_RDCLI13__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_RDCLI13__MIN_BW_MASK 0x01C00000L
+#define DAGB0_RDCLI13__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_RDCLI13__MAX_OSD_MASK 0xFC000000L
+//DAGB0_RDCLI14
+#define DAGB0_RDCLI14__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_RDCLI14__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_RDCLI14__URG_HIGH__SHIFT 0x4
+#define DAGB0_RDCLI14__URG_LOW__SHIFT 0x8
+#define DAGB0_RDCLI14__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_RDCLI14__MAX_BW__SHIFT 0xd
+#define DAGB0_RDCLI14__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_RDCLI14__MIN_BW__SHIFT 0x16
+#define DAGB0_RDCLI14__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_RDCLI14__MAX_OSD__SHIFT 0x1a
+#define DAGB0_RDCLI14__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_RDCLI14__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_RDCLI14__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_RDCLI14__URG_LOW_MASK 0x00000F00L
+#define DAGB0_RDCLI14__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_RDCLI14__MAX_BW_MASK 0x001FE000L
+#define DAGB0_RDCLI14__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_RDCLI14__MIN_BW_MASK 0x01C00000L
+#define DAGB0_RDCLI14__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_RDCLI14__MAX_OSD_MASK 0xFC000000L
+//DAGB0_RDCLI15
+#define DAGB0_RDCLI15__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_RDCLI15__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_RDCLI15__URG_HIGH__SHIFT 0x4
+#define DAGB0_RDCLI15__URG_LOW__SHIFT 0x8
+#define DAGB0_RDCLI15__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_RDCLI15__MAX_BW__SHIFT 0xd
+#define DAGB0_RDCLI15__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_RDCLI15__MIN_BW__SHIFT 0x16
+#define DAGB0_RDCLI15__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_RDCLI15__MAX_OSD__SHIFT 0x1a
+#define DAGB0_RDCLI15__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_RDCLI15__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_RDCLI15__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_RDCLI15__URG_LOW_MASK 0x00000F00L
+#define DAGB0_RDCLI15__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_RDCLI15__MAX_BW_MASK 0x001FE000L
+#define DAGB0_RDCLI15__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_RDCLI15__MIN_BW_MASK 0x01C00000L
+#define DAGB0_RDCLI15__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_RDCLI15__MAX_OSD_MASK 0xFC000000L
+//DAGB0_RD_CNTL
+#define DAGB0_RD_CNTL__SCLK_FREQ__SHIFT 0x0
+#define DAGB0_RD_CNTL__CLI_MAX_BW_WINDOW__SHIFT 0x4
+#define DAGB0_RD_CNTL__VC_MAX_BW_WINDOW__SHIFT 0xa
+#define DAGB0_RD_CNTL__IO_LEVEL_OVERRIDE_ENABLE__SHIFT 0x10
+#define DAGB0_RD_CNTL__IO_LEVEL__SHIFT 0x11
+#define DAGB0_RD_CNTL__IO_LEVEL_COMPLY_VC__SHIFT 0x14
+#define DAGB0_RD_CNTL__SHARE_VC_NUM__SHIFT 0x17
+#define DAGB0_RD_CNTL__SCLK_FREQ_MASK 0x0000000FL
+#define DAGB0_RD_CNTL__CLI_MAX_BW_WINDOW_MASK 0x000003F0L
+#define DAGB0_RD_CNTL__VC_MAX_BW_WINDOW_MASK 0x0000FC00L
+#define DAGB0_RD_CNTL__IO_LEVEL_OVERRIDE_ENABLE_MASK 0x00010000L
+#define DAGB0_RD_CNTL__IO_LEVEL_MASK 0x000E0000L
+#define DAGB0_RD_CNTL__IO_LEVEL_COMPLY_VC_MASK 0x00700000L
+#define DAGB0_RD_CNTL__SHARE_VC_NUM_MASK 0x03800000L
+//DAGB0_RD_GMI_CNTL
+#define DAGB0_RD_GMI_CNTL__EA_CREDIT__SHIFT 0x0
+#define DAGB0_RD_GMI_CNTL__LEVEL__SHIFT 0x6
+#define DAGB0_RD_GMI_CNTL__MAX_BURST__SHIFT 0x9
+#define DAGB0_RD_GMI_CNTL__LAZY_TIMER__SHIFT 0xd
+#define DAGB0_RD_GMI_CNTL__EA_CREDIT_MASK 0x0000003FL
+#define DAGB0_RD_GMI_CNTL__LEVEL_MASK 0x000001C0L
+#define DAGB0_RD_GMI_CNTL__MAX_BURST_MASK 0x00001E00L
+#define DAGB0_RD_GMI_CNTL__LAZY_TIMER_MASK 0x0001E000L
+//DAGB0_RD_ADDR_DAGB
+#define DAGB0_RD_ADDR_DAGB__DAGB_ENABLE__SHIFT 0x0
+#define DAGB0_RD_ADDR_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3
+#define DAGB0_RD_ADDR_DAGB__DISABLE_SELF_INIT__SHIFT 0x6
+#define DAGB0_RD_ADDR_DAGB__WHOAMI__SHIFT 0x7
+#define DAGB0_RD_ADDR_DAGB__DAGB_ENABLE_MASK 0x00000007L
+#define DAGB0_RD_ADDR_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L
+#define DAGB0_RD_ADDR_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L
+#define DAGB0_RD_ADDR_DAGB__WHOAMI_MASK 0x00001F80L
+//DAGB0_RD_OUTPUT_DAGB_MAX_BURST
+#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC0__SHIFT 0x0
+#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC1__SHIFT 0x4
+#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC2__SHIFT 0x8
+#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC3__SHIFT 0xc
+#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC4__SHIFT 0x10
+#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC5__SHIFT 0x14
+#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC6__SHIFT 0x18
+#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC7__SHIFT 0x1c
+#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC0_MASK 0x0000000FL
+#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC1_MASK 0x000000F0L
+#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC2_MASK 0x00000F00L
+#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC3_MASK 0x0000F000L
+#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC4_MASK 0x000F0000L
+#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC5_MASK 0x00F00000L
+#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC6_MASK 0x0F000000L
+#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC7_MASK 0xF0000000L
+//DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER
+#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC0__SHIFT 0x0
+#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC1__SHIFT 0x4
+#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC2__SHIFT 0x8
+#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC3__SHIFT 0xc
+#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC4__SHIFT 0x10
+#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC5__SHIFT 0x14
+#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC6__SHIFT 0x18
+#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC7__SHIFT 0x1c
+#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC0_MASK 0x0000000FL
+#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC1_MASK 0x000000F0L
+#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC2_MASK 0x00000F00L
+#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC3_MASK 0x0000F000L
+#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC4_MASK 0x000F0000L
+#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC5_MASK 0x00F00000L
+#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC6_MASK 0x0F000000L
+#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC7_MASK 0xF0000000L
+//DAGB0_RD_CGTT_CLK_CTRL
+#define DAGB0_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB0_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB0_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
+#define DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
+#define DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
+#define DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
+#define DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
+#define DAGB0_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB0_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB0_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
+#define DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
+#define DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
+#define DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
+#define DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
+//DAGB0_L1TLB_RD_CGTT_CLK_CTRL
+#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
+#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
+#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
+#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
+#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
+#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
+#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
+#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
+#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
+#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
+//DAGB0_ATCVM_RD_CGTT_CLK_CTRL
+#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
+#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
+#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
+#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
+#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
+#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
+#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
+#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
+#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
+#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
+//DAGB0_RD_ADDR_DAGB_MAX_BURST0
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L
+//DAGB0_RD_ADDR_DAGB_LAZY_TIMER0
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L
+//DAGB0_RD_ADDR_DAGB_MAX_BURST1
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L
+//DAGB0_RD_ADDR_DAGB_LAZY_TIMER1
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L
+//DAGB0_RD_VC0_CNTL
+#define DAGB0_RD_VC0_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB0_RD_VC0_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB0_RD_VC0_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB0_RD_VC0_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB0_RD_VC0_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB0_RD_VC0_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB0_RD_VC0_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB0_RD_VC0_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB0_RD_VC0_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB0_RD_VC0_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB0_RD_VC0_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB0_RD_VC0_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB0_RD_VC0_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB0_RD_VC0_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB0_RD_VC0_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB0_RD_VC0_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB0_RD_VC1_CNTL
+#define DAGB0_RD_VC1_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB0_RD_VC1_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB0_RD_VC1_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB0_RD_VC1_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB0_RD_VC1_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB0_RD_VC1_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB0_RD_VC1_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB0_RD_VC1_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB0_RD_VC1_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB0_RD_VC1_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB0_RD_VC1_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB0_RD_VC1_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB0_RD_VC1_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB0_RD_VC1_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB0_RD_VC1_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB0_RD_VC1_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB0_RD_VC2_CNTL
+#define DAGB0_RD_VC2_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB0_RD_VC2_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB0_RD_VC2_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB0_RD_VC2_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB0_RD_VC2_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB0_RD_VC2_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB0_RD_VC2_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB0_RD_VC2_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB0_RD_VC2_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB0_RD_VC2_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB0_RD_VC2_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB0_RD_VC2_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB0_RD_VC2_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB0_RD_VC2_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB0_RD_VC2_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB0_RD_VC2_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB0_RD_VC3_CNTL
+#define DAGB0_RD_VC3_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB0_RD_VC3_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB0_RD_VC3_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB0_RD_VC3_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB0_RD_VC3_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB0_RD_VC3_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB0_RD_VC3_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB0_RD_VC3_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB0_RD_VC3_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB0_RD_VC3_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB0_RD_VC3_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB0_RD_VC3_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB0_RD_VC3_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB0_RD_VC3_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB0_RD_VC3_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB0_RD_VC3_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB0_RD_VC4_CNTL
+#define DAGB0_RD_VC4_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB0_RD_VC4_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB0_RD_VC4_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB0_RD_VC4_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB0_RD_VC4_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB0_RD_VC4_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB0_RD_VC4_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB0_RD_VC4_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB0_RD_VC4_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB0_RD_VC4_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB0_RD_VC4_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB0_RD_VC4_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB0_RD_VC4_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB0_RD_VC4_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB0_RD_VC4_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB0_RD_VC4_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB0_RD_VC5_CNTL
+#define DAGB0_RD_VC5_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB0_RD_VC5_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB0_RD_VC5_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB0_RD_VC5_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB0_RD_VC5_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB0_RD_VC5_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB0_RD_VC5_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB0_RD_VC5_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB0_RD_VC5_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB0_RD_VC5_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB0_RD_VC5_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB0_RD_VC5_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB0_RD_VC5_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB0_RD_VC5_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB0_RD_VC5_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB0_RD_VC5_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB0_RD_VC6_CNTL
+#define DAGB0_RD_VC6_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB0_RD_VC6_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB0_RD_VC6_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB0_RD_VC6_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB0_RD_VC6_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB0_RD_VC6_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB0_RD_VC6_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB0_RD_VC6_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB0_RD_VC6_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB0_RD_VC6_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB0_RD_VC6_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB0_RD_VC6_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB0_RD_VC6_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB0_RD_VC6_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB0_RD_VC6_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB0_RD_VC6_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB0_RD_VC7_CNTL
+#define DAGB0_RD_VC7_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB0_RD_VC7_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB0_RD_VC7_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB0_RD_VC7_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB0_RD_VC7_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB0_RD_VC7_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB0_RD_VC7_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB0_RD_VC7_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB0_RD_VC7_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB0_RD_VC7_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB0_RD_VC7_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB0_RD_VC7_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB0_RD_VC7_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB0_RD_VC7_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB0_RD_VC7_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB0_RD_VC7_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB0_RD_CNTL_MISC
+#define DAGB0_RD_CNTL_MISC__STOR_POOL_CREDIT__SHIFT 0x0
+#define DAGB0_RD_CNTL_MISC__EA_POOL_CREDIT__SHIFT 0x6
+#define DAGB0_RD_CNTL_MISC__IO_EA_CREDIT__SHIFT 0xd
+#define DAGB0_RD_CNTL_MISC__STOR_CC_LEGACY_MODE__SHIFT 0x13
+#define DAGB0_RD_CNTL_MISC__EA_CC_LEGACY_MODE__SHIFT 0x14
+#define DAGB0_RD_CNTL_MISC__UTCL2_CID__SHIFT 0x15
+#define DAGB0_RD_CNTL_MISC__RDRET_FIFO_CREDITS__SHIFT 0x1a
+#define DAGB0_RD_CNTL_MISC__STOR_POOL_CREDIT_MASK 0x0000003FL
+#define DAGB0_RD_CNTL_MISC__EA_POOL_CREDIT_MASK 0x00001FC0L
+#define DAGB0_RD_CNTL_MISC__IO_EA_CREDIT_MASK 0x0007E000L
+#define DAGB0_RD_CNTL_MISC__STOR_CC_LEGACY_MODE_MASK 0x00080000L
+#define DAGB0_RD_CNTL_MISC__EA_CC_LEGACY_MODE_MASK 0x00100000L
+#define DAGB0_RD_CNTL_MISC__UTCL2_CID_MASK 0x03E00000L
+#define DAGB0_RD_CNTL_MISC__RDRET_FIFO_CREDITS_MASK 0xFC000000L
+//DAGB0_RD_TLB_CREDIT
+#define DAGB0_RD_TLB_CREDIT__TLB0__SHIFT 0x0
+#define DAGB0_RD_TLB_CREDIT__TLB1__SHIFT 0x5
+#define DAGB0_RD_TLB_CREDIT__TLB2__SHIFT 0xa
+#define DAGB0_RD_TLB_CREDIT__TLB3__SHIFT 0xf
+#define DAGB0_RD_TLB_CREDIT__TLB4__SHIFT 0x14
+#define DAGB0_RD_TLB_CREDIT__TLB5__SHIFT 0x19
+#define DAGB0_RD_TLB_CREDIT__TLB0_MASK 0x0000001FL
+#define DAGB0_RD_TLB_CREDIT__TLB1_MASK 0x000003E0L
+#define DAGB0_RD_TLB_CREDIT__TLB2_MASK 0x00007C00L
+#define DAGB0_RD_TLB_CREDIT__TLB3_MASK 0x000F8000L
+#define DAGB0_RD_TLB_CREDIT__TLB4_MASK 0x01F00000L
+#define DAGB0_RD_TLB_CREDIT__TLB5_MASK 0x3E000000L
+//DAGB0_RDCLI_ASK_PENDING
+#define DAGB0_RDCLI_ASK_PENDING__BUSY__SHIFT 0x0
+#define DAGB0_RDCLI_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB0_RDCLI_GO_PENDING
+#define DAGB0_RDCLI_GO_PENDING__BUSY__SHIFT 0x0
+#define DAGB0_RDCLI_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB0_RDCLI_GBLSEND_PENDING
+#define DAGB0_RDCLI_GBLSEND_PENDING__BUSY__SHIFT 0x0
+#define DAGB0_RDCLI_GBLSEND_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB0_RDCLI_TLB_PENDING
+#define DAGB0_RDCLI_TLB_PENDING__BUSY__SHIFT 0x0
+#define DAGB0_RDCLI_TLB_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB0_RDCLI_OARB_PENDING
+#define DAGB0_RDCLI_OARB_PENDING__BUSY__SHIFT 0x0
+#define DAGB0_RDCLI_OARB_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB0_RDCLI_OSD_PENDING
+#define DAGB0_RDCLI_OSD_PENDING__BUSY__SHIFT 0x0
+#define DAGB0_RDCLI_OSD_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB0_WRCLI0
+#define DAGB0_WRCLI0__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_WRCLI0__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_WRCLI0__URG_HIGH__SHIFT 0x4
+#define DAGB0_WRCLI0__URG_LOW__SHIFT 0x8
+#define DAGB0_WRCLI0__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_WRCLI0__MAX_BW__SHIFT 0xd
+#define DAGB0_WRCLI0__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_WRCLI0__MIN_BW__SHIFT 0x16
+#define DAGB0_WRCLI0__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_WRCLI0__MAX_OSD__SHIFT 0x1a
+#define DAGB0_WRCLI0__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_WRCLI0__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_WRCLI0__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_WRCLI0__URG_LOW_MASK 0x00000F00L
+#define DAGB0_WRCLI0__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_WRCLI0__MAX_BW_MASK 0x001FE000L
+#define DAGB0_WRCLI0__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_WRCLI0__MIN_BW_MASK 0x01C00000L
+#define DAGB0_WRCLI0__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_WRCLI0__MAX_OSD_MASK 0xFC000000L
+//DAGB0_WRCLI1
+#define DAGB0_WRCLI1__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_WRCLI1__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_WRCLI1__URG_HIGH__SHIFT 0x4
+#define DAGB0_WRCLI1__URG_LOW__SHIFT 0x8
+#define DAGB0_WRCLI1__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_WRCLI1__MAX_BW__SHIFT 0xd
+#define DAGB0_WRCLI1__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_WRCLI1__MIN_BW__SHIFT 0x16
+#define DAGB0_WRCLI1__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_WRCLI1__MAX_OSD__SHIFT 0x1a
+#define DAGB0_WRCLI1__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_WRCLI1__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_WRCLI1__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_WRCLI1__URG_LOW_MASK 0x00000F00L
+#define DAGB0_WRCLI1__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_WRCLI1__MAX_BW_MASK 0x001FE000L
+#define DAGB0_WRCLI1__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_WRCLI1__MIN_BW_MASK 0x01C00000L
+#define DAGB0_WRCLI1__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_WRCLI1__MAX_OSD_MASK 0xFC000000L
+//DAGB0_WRCLI2
+#define DAGB0_WRCLI2__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_WRCLI2__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_WRCLI2__URG_HIGH__SHIFT 0x4
+#define DAGB0_WRCLI2__URG_LOW__SHIFT 0x8
+#define DAGB0_WRCLI2__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_WRCLI2__MAX_BW__SHIFT 0xd
+#define DAGB0_WRCLI2__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_WRCLI2__MIN_BW__SHIFT 0x16
+#define DAGB0_WRCLI2__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_WRCLI2__MAX_OSD__SHIFT 0x1a
+#define DAGB0_WRCLI2__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_WRCLI2__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_WRCLI2__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_WRCLI2__URG_LOW_MASK 0x00000F00L
+#define DAGB0_WRCLI2__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_WRCLI2__MAX_BW_MASK 0x001FE000L
+#define DAGB0_WRCLI2__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_WRCLI2__MIN_BW_MASK 0x01C00000L
+#define DAGB0_WRCLI2__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_WRCLI2__MAX_OSD_MASK 0xFC000000L
+//DAGB0_WRCLI3
+#define DAGB0_WRCLI3__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_WRCLI3__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_WRCLI3__URG_HIGH__SHIFT 0x4
+#define DAGB0_WRCLI3__URG_LOW__SHIFT 0x8
+#define DAGB0_WRCLI3__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_WRCLI3__MAX_BW__SHIFT 0xd
+#define DAGB0_WRCLI3__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_WRCLI3__MIN_BW__SHIFT 0x16
+#define DAGB0_WRCLI3__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_WRCLI3__MAX_OSD__SHIFT 0x1a
+#define DAGB0_WRCLI3__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_WRCLI3__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_WRCLI3__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_WRCLI3__URG_LOW_MASK 0x00000F00L
+#define DAGB0_WRCLI3__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_WRCLI3__MAX_BW_MASK 0x001FE000L
+#define DAGB0_WRCLI3__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_WRCLI3__MIN_BW_MASK 0x01C00000L
+#define DAGB0_WRCLI3__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_WRCLI3__MAX_OSD_MASK 0xFC000000L
+//DAGB0_WRCLI4
+#define DAGB0_WRCLI4__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_WRCLI4__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_WRCLI4__URG_HIGH__SHIFT 0x4
+#define DAGB0_WRCLI4__URG_LOW__SHIFT 0x8
+#define DAGB0_WRCLI4__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_WRCLI4__MAX_BW__SHIFT 0xd
+#define DAGB0_WRCLI4__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_WRCLI4__MIN_BW__SHIFT 0x16
+#define DAGB0_WRCLI4__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_WRCLI4__MAX_OSD__SHIFT 0x1a
+#define DAGB0_WRCLI4__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_WRCLI4__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_WRCLI4__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_WRCLI4__URG_LOW_MASK 0x00000F00L
+#define DAGB0_WRCLI4__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_WRCLI4__MAX_BW_MASK 0x001FE000L
+#define DAGB0_WRCLI4__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_WRCLI4__MIN_BW_MASK 0x01C00000L
+#define DAGB0_WRCLI4__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_WRCLI4__MAX_OSD_MASK 0xFC000000L
+//DAGB0_WRCLI5
+#define DAGB0_WRCLI5__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_WRCLI5__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_WRCLI5__URG_HIGH__SHIFT 0x4
+#define DAGB0_WRCLI5__URG_LOW__SHIFT 0x8
+#define DAGB0_WRCLI5__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_WRCLI5__MAX_BW__SHIFT 0xd
+#define DAGB0_WRCLI5__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_WRCLI5__MIN_BW__SHIFT 0x16
+#define DAGB0_WRCLI5__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_WRCLI5__MAX_OSD__SHIFT 0x1a
+#define DAGB0_WRCLI5__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_WRCLI5__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_WRCLI5__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_WRCLI5__URG_LOW_MASK 0x00000F00L
+#define DAGB0_WRCLI5__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_WRCLI5__MAX_BW_MASK 0x001FE000L
+#define DAGB0_WRCLI5__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_WRCLI5__MIN_BW_MASK 0x01C00000L
+#define DAGB0_WRCLI5__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_WRCLI5__MAX_OSD_MASK 0xFC000000L
+//DAGB0_WRCLI6
+#define DAGB0_WRCLI6__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_WRCLI6__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_WRCLI6__URG_HIGH__SHIFT 0x4
+#define DAGB0_WRCLI6__URG_LOW__SHIFT 0x8
+#define DAGB0_WRCLI6__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_WRCLI6__MAX_BW__SHIFT 0xd
+#define DAGB0_WRCLI6__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_WRCLI6__MIN_BW__SHIFT 0x16
+#define DAGB0_WRCLI6__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_WRCLI6__MAX_OSD__SHIFT 0x1a
+#define DAGB0_WRCLI6__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_WRCLI6__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_WRCLI6__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_WRCLI6__URG_LOW_MASK 0x00000F00L
+#define DAGB0_WRCLI6__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_WRCLI6__MAX_BW_MASK 0x001FE000L
+#define DAGB0_WRCLI6__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_WRCLI6__MIN_BW_MASK 0x01C00000L
+#define DAGB0_WRCLI6__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_WRCLI6__MAX_OSD_MASK 0xFC000000L
+//DAGB0_WRCLI7
+#define DAGB0_WRCLI7__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_WRCLI7__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_WRCLI7__URG_HIGH__SHIFT 0x4
+#define DAGB0_WRCLI7__URG_LOW__SHIFT 0x8
+#define DAGB0_WRCLI7__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_WRCLI7__MAX_BW__SHIFT 0xd
+#define DAGB0_WRCLI7__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_WRCLI7__MIN_BW__SHIFT 0x16
+#define DAGB0_WRCLI7__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_WRCLI7__MAX_OSD__SHIFT 0x1a
+#define DAGB0_WRCLI7__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_WRCLI7__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_WRCLI7__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_WRCLI7__URG_LOW_MASK 0x00000F00L
+#define DAGB0_WRCLI7__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_WRCLI7__MAX_BW_MASK 0x001FE000L
+#define DAGB0_WRCLI7__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_WRCLI7__MIN_BW_MASK 0x01C00000L
+#define DAGB0_WRCLI7__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_WRCLI7__MAX_OSD_MASK 0xFC000000L
+//DAGB0_WRCLI8
+#define DAGB0_WRCLI8__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_WRCLI8__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_WRCLI8__URG_HIGH__SHIFT 0x4
+#define DAGB0_WRCLI8__URG_LOW__SHIFT 0x8
+#define DAGB0_WRCLI8__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_WRCLI8__MAX_BW__SHIFT 0xd
+#define DAGB0_WRCLI8__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_WRCLI8__MIN_BW__SHIFT 0x16
+#define DAGB0_WRCLI8__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_WRCLI8__MAX_OSD__SHIFT 0x1a
+#define DAGB0_WRCLI8__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_WRCLI8__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_WRCLI8__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_WRCLI8__URG_LOW_MASK 0x00000F00L
+#define DAGB0_WRCLI8__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_WRCLI8__MAX_BW_MASK 0x001FE000L
+#define DAGB0_WRCLI8__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_WRCLI8__MIN_BW_MASK 0x01C00000L
+#define DAGB0_WRCLI8__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_WRCLI8__MAX_OSD_MASK 0xFC000000L
+//DAGB0_WRCLI9
+#define DAGB0_WRCLI9__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_WRCLI9__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_WRCLI9__URG_HIGH__SHIFT 0x4
+#define DAGB0_WRCLI9__URG_LOW__SHIFT 0x8
+#define DAGB0_WRCLI9__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_WRCLI9__MAX_BW__SHIFT 0xd
+#define DAGB0_WRCLI9__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_WRCLI9__MIN_BW__SHIFT 0x16
+#define DAGB0_WRCLI9__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_WRCLI9__MAX_OSD__SHIFT 0x1a
+#define DAGB0_WRCLI9__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_WRCLI9__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_WRCLI9__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_WRCLI9__URG_LOW_MASK 0x00000F00L
+#define DAGB0_WRCLI9__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_WRCLI9__MAX_BW_MASK 0x001FE000L
+#define DAGB0_WRCLI9__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_WRCLI9__MIN_BW_MASK 0x01C00000L
+#define DAGB0_WRCLI9__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_WRCLI9__MAX_OSD_MASK 0xFC000000L
+//DAGB0_WRCLI10
+#define DAGB0_WRCLI10__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_WRCLI10__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_WRCLI10__URG_HIGH__SHIFT 0x4
+#define DAGB0_WRCLI10__URG_LOW__SHIFT 0x8
+#define DAGB0_WRCLI10__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_WRCLI10__MAX_BW__SHIFT 0xd
+#define DAGB0_WRCLI10__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_WRCLI10__MIN_BW__SHIFT 0x16
+#define DAGB0_WRCLI10__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_WRCLI10__MAX_OSD__SHIFT 0x1a
+#define DAGB0_WRCLI10__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_WRCLI10__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_WRCLI10__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_WRCLI10__URG_LOW_MASK 0x00000F00L
+#define DAGB0_WRCLI10__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_WRCLI10__MAX_BW_MASK 0x001FE000L
+#define DAGB0_WRCLI10__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_WRCLI10__MIN_BW_MASK 0x01C00000L
+#define DAGB0_WRCLI10__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_WRCLI10__MAX_OSD_MASK 0xFC000000L
+//DAGB0_WRCLI11
+#define DAGB0_WRCLI11__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_WRCLI11__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_WRCLI11__URG_HIGH__SHIFT 0x4
+#define DAGB0_WRCLI11__URG_LOW__SHIFT 0x8
+#define DAGB0_WRCLI11__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_WRCLI11__MAX_BW__SHIFT 0xd
+#define DAGB0_WRCLI11__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_WRCLI11__MIN_BW__SHIFT 0x16
+#define DAGB0_WRCLI11__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_WRCLI11__MAX_OSD__SHIFT 0x1a
+#define DAGB0_WRCLI11__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_WRCLI11__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_WRCLI11__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_WRCLI11__URG_LOW_MASK 0x00000F00L
+#define DAGB0_WRCLI11__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_WRCLI11__MAX_BW_MASK 0x001FE000L
+#define DAGB0_WRCLI11__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_WRCLI11__MIN_BW_MASK 0x01C00000L
+#define DAGB0_WRCLI11__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_WRCLI11__MAX_OSD_MASK 0xFC000000L
+//DAGB0_WRCLI12
+#define DAGB0_WRCLI12__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_WRCLI12__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_WRCLI12__URG_HIGH__SHIFT 0x4
+#define DAGB0_WRCLI12__URG_LOW__SHIFT 0x8
+#define DAGB0_WRCLI12__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_WRCLI12__MAX_BW__SHIFT 0xd
+#define DAGB0_WRCLI12__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_WRCLI12__MIN_BW__SHIFT 0x16
+#define DAGB0_WRCLI12__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_WRCLI12__MAX_OSD__SHIFT 0x1a
+#define DAGB0_WRCLI12__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_WRCLI12__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_WRCLI12__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_WRCLI12__URG_LOW_MASK 0x00000F00L
+#define DAGB0_WRCLI12__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_WRCLI12__MAX_BW_MASK 0x001FE000L
+#define DAGB0_WRCLI12__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_WRCLI12__MIN_BW_MASK 0x01C00000L
+#define DAGB0_WRCLI12__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_WRCLI12__MAX_OSD_MASK 0xFC000000L
+//DAGB0_WRCLI13
+#define DAGB0_WRCLI13__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_WRCLI13__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_WRCLI13__URG_HIGH__SHIFT 0x4
+#define DAGB0_WRCLI13__URG_LOW__SHIFT 0x8
+#define DAGB0_WRCLI13__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_WRCLI13__MAX_BW__SHIFT 0xd
+#define DAGB0_WRCLI13__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_WRCLI13__MIN_BW__SHIFT 0x16
+#define DAGB0_WRCLI13__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_WRCLI13__MAX_OSD__SHIFT 0x1a
+#define DAGB0_WRCLI13__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_WRCLI13__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_WRCLI13__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_WRCLI13__URG_LOW_MASK 0x00000F00L
+#define DAGB0_WRCLI13__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_WRCLI13__MAX_BW_MASK 0x001FE000L
+#define DAGB0_WRCLI13__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_WRCLI13__MIN_BW_MASK 0x01C00000L
+#define DAGB0_WRCLI13__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_WRCLI13__MAX_OSD_MASK 0xFC000000L
+//DAGB0_WRCLI14
+#define DAGB0_WRCLI14__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_WRCLI14__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_WRCLI14__URG_HIGH__SHIFT 0x4
+#define DAGB0_WRCLI14__URG_LOW__SHIFT 0x8
+#define DAGB0_WRCLI14__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_WRCLI14__MAX_BW__SHIFT 0xd
+#define DAGB0_WRCLI14__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_WRCLI14__MIN_BW__SHIFT 0x16
+#define DAGB0_WRCLI14__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_WRCLI14__MAX_OSD__SHIFT 0x1a
+#define DAGB0_WRCLI14__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_WRCLI14__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_WRCLI14__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_WRCLI14__URG_LOW_MASK 0x00000F00L
+#define DAGB0_WRCLI14__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_WRCLI14__MAX_BW_MASK 0x001FE000L
+#define DAGB0_WRCLI14__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_WRCLI14__MIN_BW_MASK 0x01C00000L
+#define DAGB0_WRCLI14__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_WRCLI14__MAX_OSD_MASK 0xFC000000L
+//DAGB0_WRCLI15
+#define DAGB0_WRCLI15__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_WRCLI15__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_WRCLI15__URG_HIGH__SHIFT 0x4
+#define DAGB0_WRCLI15__URG_LOW__SHIFT 0x8
+#define DAGB0_WRCLI15__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_WRCLI15__MAX_BW__SHIFT 0xd
+#define DAGB0_WRCLI15__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_WRCLI15__MIN_BW__SHIFT 0x16
+#define DAGB0_WRCLI15__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_WRCLI15__MAX_OSD__SHIFT 0x1a
+#define DAGB0_WRCLI15__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_WRCLI15__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_WRCLI15__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_WRCLI15__URG_LOW_MASK 0x00000F00L
+#define DAGB0_WRCLI15__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_WRCLI15__MAX_BW_MASK 0x001FE000L
+#define DAGB0_WRCLI15__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_WRCLI15__MIN_BW_MASK 0x01C00000L
+#define DAGB0_WRCLI15__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_WRCLI15__MAX_OSD_MASK 0xFC000000L
+//DAGB0_WR_CNTL
+#define DAGB0_WR_CNTL__SCLK_FREQ__SHIFT 0x0
+#define DAGB0_WR_CNTL__CLI_MAX_BW_WINDOW__SHIFT 0x4
+#define DAGB0_WR_CNTL__VC_MAX_BW_WINDOW__SHIFT 0xa
+#define DAGB0_WR_CNTL__IO_LEVEL_OVERRIDE_ENABLE__SHIFT 0x10
+#define DAGB0_WR_CNTL__IO_LEVEL__SHIFT 0x11
+#define DAGB0_WR_CNTL__IO_LEVEL_COMPLY_VC__SHIFT 0x14
+#define DAGB0_WR_CNTL__SHARE_VC_NUM__SHIFT 0x17
+#define DAGB0_WR_CNTL__SCLK_FREQ_MASK 0x0000000FL
+#define DAGB0_WR_CNTL__CLI_MAX_BW_WINDOW_MASK 0x000003F0L
+#define DAGB0_WR_CNTL__VC_MAX_BW_WINDOW_MASK 0x0000FC00L
+#define DAGB0_WR_CNTL__IO_LEVEL_OVERRIDE_ENABLE_MASK 0x00010000L
+#define DAGB0_WR_CNTL__IO_LEVEL_MASK 0x000E0000L
+#define DAGB0_WR_CNTL__IO_LEVEL_COMPLY_VC_MASK 0x00700000L
+#define DAGB0_WR_CNTL__SHARE_VC_NUM_MASK 0x03800000L
+//DAGB0_WR_GMI_CNTL
+#define DAGB0_WR_GMI_CNTL__EA_CREDIT__SHIFT 0x0
+#define DAGB0_WR_GMI_CNTL__LEVEL__SHIFT 0x6
+#define DAGB0_WR_GMI_CNTL__MAX_BURST__SHIFT 0x9
+#define DAGB0_WR_GMI_CNTL__LAZY_TIMER__SHIFT 0xd
+#define DAGB0_WR_GMI_CNTL__EA_CREDIT_MASK 0x0000003FL
+#define DAGB0_WR_GMI_CNTL__LEVEL_MASK 0x000001C0L
+#define DAGB0_WR_GMI_CNTL__MAX_BURST_MASK 0x00001E00L
+#define DAGB0_WR_GMI_CNTL__LAZY_TIMER_MASK 0x0001E000L
+//DAGB0_WR_ADDR_DAGB
+#define DAGB0_WR_ADDR_DAGB__DAGB_ENABLE__SHIFT 0x0
+#define DAGB0_WR_ADDR_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3
+#define DAGB0_WR_ADDR_DAGB__DISABLE_SELF_INIT__SHIFT 0x6
+#define DAGB0_WR_ADDR_DAGB__WHOAMI__SHIFT 0x7
+#define DAGB0_WR_ADDR_DAGB__DAGB_ENABLE_MASK 0x00000007L
+#define DAGB0_WR_ADDR_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L
+#define DAGB0_WR_ADDR_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L
+#define DAGB0_WR_ADDR_DAGB__WHOAMI_MASK 0x00001F80L
+//DAGB0_WR_OUTPUT_DAGB_MAX_BURST
+#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC0__SHIFT 0x0
+#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC1__SHIFT 0x4
+#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC2__SHIFT 0x8
+#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC3__SHIFT 0xc
+#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC4__SHIFT 0x10
+#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC5__SHIFT 0x14
+#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC6__SHIFT 0x18
+#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC7__SHIFT 0x1c
+#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC0_MASK 0x0000000FL
+#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC1_MASK 0x000000F0L
+#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC2_MASK 0x00000F00L
+#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC3_MASK 0x0000F000L
+#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC4_MASK 0x000F0000L
+#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC5_MASK 0x00F00000L
+#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC6_MASK 0x0F000000L
+#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC7_MASK 0xF0000000L
+//DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER
+#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC0__SHIFT 0x0
+#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC1__SHIFT 0x4
+#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC2__SHIFT 0x8
+#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC3__SHIFT 0xc
+#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC4__SHIFT 0x10
+#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC5__SHIFT 0x14
+#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC6__SHIFT 0x18
+#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC7__SHIFT 0x1c
+#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC0_MASK 0x0000000FL
+#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC1_MASK 0x000000F0L
+#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC2_MASK 0x00000F00L
+#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC3_MASK 0x0000F000L
+#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC4_MASK 0x000F0000L
+#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC5_MASK 0x00F00000L
+#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC6_MASK 0x0F000000L
+#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC7_MASK 0xF0000000L
+//DAGB0_WR_CGTT_CLK_CTRL
+#define DAGB0_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB0_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB0_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
+#define DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
+#define DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
+#define DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
+#define DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
+#define DAGB0_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB0_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB0_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
+#define DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
+#define DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
+#define DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
+#define DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
+//DAGB0_L1TLB_WR_CGTT_CLK_CTRL
+#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
+#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
+#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
+#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
+#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
+#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
+#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
+#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
+#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
+#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
+//DAGB0_ATCVM_WR_CGTT_CLK_CTRL
+#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
+#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
+#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
+#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
+#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
+#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
+#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
+#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
+#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
+#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
+//DAGB0_WR_ADDR_DAGB_MAX_BURST0
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L
+//DAGB0_WR_ADDR_DAGB_LAZY_TIMER0
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L
+//DAGB0_WR_ADDR_DAGB_MAX_BURST1
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L
+//DAGB0_WR_ADDR_DAGB_LAZY_TIMER1
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L
+//DAGB0_WR_DATA_DAGB
+#define DAGB0_WR_DATA_DAGB__DAGB_ENABLE__SHIFT 0x0
+#define DAGB0_WR_DATA_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3
+#define DAGB0_WR_DATA_DAGB__DISABLE_SELF_INIT__SHIFT 0x6
+#define DAGB0_WR_DATA_DAGB__WHOAMI__SHIFT 0x7
+#define DAGB0_WR_DATA_DAGB__DAGB_ENABLE_MASK 0x00000007L
+#define DAGB0_WR_DATA_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L
+#define DAGB0_WR_DATA_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L
+#define DAGB0_WR_DATA_DAGB__WHOAMI_MASK 0x00001F80L
+//DAGB0_WR_DATA_DAGB_MAX_BURST0
+#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0
+#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4
+#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8
+#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc
+#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10
+#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14
+#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18
+#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c
+#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL
+#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L
+#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L
+#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L
+#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L
+#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L
+#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L
+#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L
+//DAGB0_WR_DATA_DAGB_LAZY_TIMER0
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L
+//DAGB0_WR_DATA_DAGB_MAX_BURST1
+#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0
+#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4
+#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8
+#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc
+#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10
+#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14
+#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18
+#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c
+#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL
+#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L
+#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L
+#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L
+#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L
+#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L
+#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L
+#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L
+//DAGB0_WR_DATA_DAGB_LAZY_TIMER1
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L
+//DAGB0_WR_VC0_CNTL
+#define DAGB0_WR_VC0_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB0_WR_VC0_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB0_WR_VC0_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB0_WR_VC0_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB0_WR_VC0_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB0_WR_VC0_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB0_WR_VC0_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB0_WR_VC0_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB0_WR_VC0_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB0_WR_VC0_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB0_WR_VC0_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB0_WR_VC0_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB0_WR_VC0_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB0_WR_VC0_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB0_WR_VC0_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB0_WR_VC0_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB0_WR_VC1_CNTL
+#define DAGB0_WR_VC1_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB0_WR_VC1_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB0_WR_VC1_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB0_WR_VC1_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB0_WR_VC1_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB0_WR_VC1_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB0_WR_VC1_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB0_WR_VC1_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB0_WR_VC1_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB0_WR_VC1_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB0_WR_VC1_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB0_WR_VC1_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB0_WR_VC1_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB0_WR_VC1_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB0_WR_VC1_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB0_WR_VC1_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB0_WR_VC2_CNTL
+#define DAGB0_WR_VC2_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB0_WR_VC2_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB0_WR_VC2_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB0_WR_VC2_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB0_WR_VC2_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB0_WR_VC2_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB0_WR_VC2_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB0_WR_VC2_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB0_WR_VC2_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB0_WR_VC2_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB0_WR_VC2_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB0_WR_VC2_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB0_WR_VC2_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB0_WR_VC2_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB0_WR_VC2_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB0_WR_VC2_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB0_WR_VC3_CNTL
+#define DAGB0_WR_VC3_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB0_WR_VC3_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB0_WR_VC3_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB0_WR_VC3_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB0_WR_VC3_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB0_WR_VC3_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB0_WR_VC3_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB0_WR_VC3_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB0_WR_VC3_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB0_WR_VC3_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB0_WR_VC3_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB0_WR_VC3_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB0_WR_VC3_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB0_WR_VC3_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB0_WR_VC3_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB0_WR_VC3_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB0_WR_VC4_CNTL
+#define DAGB0_WR_VC4_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB0_WR_VC4_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB0_WR_VC4_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB0_WR_VC4_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB0_WR_VC4_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB0_WR_VC4_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB0_WR_VC4_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB0_WR_VC4_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB0_WR_VC4_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB0_WR_VC4_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB0_WR_VC4_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB0_WR_VC4_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB0_WR_VC4_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB0_WR_VC4_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB0_WR_VC4_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB0_WR_VC4_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB0_WR_VC5_CNTL
+#define DAGB0_WR_VC5_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB0_WR_VC5_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB0_WR_VC5_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB0_WR_VC5_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB0_WR_VC5_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB0_WR_VC5_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB0_WR_VC5_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB0_WR_VC5_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB0_WR_VC5_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB0_WR_VC5_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB0_WR_VC5_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB0_WR_VC5_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB0_WR_VC5_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB0_WR_VC5_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB0_WR_VC5_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB0_WR_VC5_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB0_WR_VC6_CNTL
+#define DAGB0_WR_VC6_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB0_WR_VC6_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB0_WR_VC6_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB0_WR_VC6_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB0_WR_VC6_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB0_WR_VC6_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB0_WR_VC6_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB0_WR_VC6_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB0_WR_VC6_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB0_WR_VC6_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB0_WR_VC6_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB0_WR_VC6_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB0_WR_VC6_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB0_WR_VC6_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB0_WR_VC6_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB0_WR_VC6_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB0_WR_VC7_CNTL
+#define DAGB0_WR_VC7_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB0_WR_VC7_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB0_WR_VC7_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB0_WR_VC7_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB0_WR_VC7_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB0_WR_VC7_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB0_WR_VC7_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB0_WR_VC7_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB0_WR_VC7_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB0_WR_VC7_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB0_WR_VC7_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB0_WR_VC7_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB0_WR_VC7_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB0_WR_VC7_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB0_WR_VC7_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB0_WR_VC7_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB0_WR_CNTL_MISC
+#define DAGB0_WR_CNTL_MISC__STOR_POOL_CREDIT__SHIFT 0x0
+#define DAGB0_WR_CNTL_MISC__EA_POOL_CREDIT__SHIFT 0x6
+#define DAGB0_WR_CNTL_MISC__IO_EA_CREDIT__SHIFT 0xd
+#define DAGB0_WR_CNTL_MISC__STOR_CC_LEGACY_MODE__SHIFT 0x13
+#define DAGB0_WR_CNTL_MISC__EA_CC_LEGACY_MODE__SHIFT 0x14
+#define DAGB0_WR_CNTL_MISC__UTCL2_CID__SHIFT 0x15
+#define DAGB0_WR_CNTL_MISC__RDRET_FIFO_CREDITS__SHIFT 0x1a
+#define DAGB0_WR_CNTL_MISC__STOR_POOL_CREDIT_MASK 0x0000003FL
+#define DAGB0_WR_CNTL_MISC__EA_POOL_CREDIT_MASK 0x00001FC0L
+#define DAGB0_WR_CNTL_MISC__IO_EA_CREDIT_MASK 0x0007E000L
+#define DAGB0_WR_CNTL_MISC__STOR_CC_LEGACY_MODE_MASK 0x00080000L
+#define DAGB0_WR_CNTL_MISC__EA_CC_LEGACY_MODE_MASK 0x00100000L
+#define DAGB0_WR_CNTL_MISC__UTCL2_CID_MASK 0x03E00000L
+#define DAGB0_WR_CNTL_MISC__RDRET_FIFO_CREDITS_MASK 0xFC000000L
+//DAGB0_WR_TLB_CREDIT
+#define DAGB0_WR_TLB_CREDIT__TLB0__SHIFT 0x0
+#define DAGB0_WR_TLB_CREDIT__TLB1__SHIFT 0x5
+#define DAGB0_WR_TLB_CREDIT__TLB2__SHIFT 0xa
+#define DAGB0_WR_TLB_CREDIT__TLB3__SHIFT 0xf
+#define DAGB0_WR_TLB_CREDIT__TLB4__SHIFT 0x14
+#define DAGB0_WR_TLB_CREDIT__TLB5__SHIFT 0x19
+#define DAGB0_WR_TLB_CREDIT__TLB0_MASK 0x0000001FL
+#define DAGB0_WR_TLB_CREDIT__TLB1_MASK 0x000003E0L
+#define DAGB0_WR_TLB_CREDIT__TLB2_MASK 0x00007C00L
+#define DAGB0_WR_TLB_CREDIT__TLB3_MASK 0x000F8000L
+#define DAGB0_WR_TLB_CREDIT__TLB4_MASK 0x01F00000L
+#define DAGB0_WR_TLB_CREDIT__TLB5_MASK 0x3E000000L
+//DAGB0_WR_DATA_CREDIT
+#define DAGB0_WR_DATA_CREDIT__DLOCK_VC_CREDITS__SHIFT 0x0
+#define DAGB0_WR_DATA_CREDIT__LARGE_BURST_CREDITS__SHIFT 0x8
+#define DAGB0_WR_DATA_CREDIT__MIDDLE_BURST_CREDITS__SHIFT 0x10
+#define DAGB0_WR_DATA_CREDIT__SMALL_BURST_CREDITS__SHIFT 0x18
+#define DAGB0_WR_DATA_CREDIT__DLOCK_VC_CREDITS_MASK 0x000000FFL
+#define DAGB0_WR_DATA_CREDIT__LARGE_BURST_CREDITS_MASK 0x0000FF00L
+#define DAGB0_WR_DATA_CREDIT__MIDDLE_BURST_CREDITS_MASK 0x00FF0000L
+#define DAGB0_WR_DATA_CREDIT__SMALL_BURST_CREDITS_MASK 0xFF000000L
+//DAGB0_WR_MISC_CREDIT
+#define DAGB0_WR_MISC_CREDIT__ATOMIC_CREDIT__SHIFT 0x0
+#define DAGB0_WR_MISC_CREDIT__DLOCK_VC_NUM__SHIFT 0x6
+#define DAGB0_WR_MISC_CREDIT__OSD_CREDIT__SHIFT 0x9
+#define DAGB0_WR_MISC_CREDIT__OSD_DLOCK_CREDIT__SHIFT 0x10
+#define DAGB0_WR_MISC_CREDIT__ATOMIC_CREDIT_MASK 0x0000003FL
+#define DAGB0_WR_MISC_CREDIT__DLOCK_VC_NUM_MASK 0x000001C0L
+#define DAGB0_WR_MISC_CREDIT__OSD_CREDIT_MASK 0x0000FE00L
+#define DAGB0_WR_MISC_CREDIT__OSD_DLOCK_CREDIT_MASK 0x007F0000L
+//DAGB0_WRCLI_ASK_PENDING
+#define DAGB0_WRCLI_ASK_PENDING__BUSY__SHIFT 0x0
+#define DAGB0_WRCLI_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB0_WRCLI_GO_PENDING
+#define DAGB0_WRCLI_GO_PENDING__BUSY__SHIFT 0x0
+#define DAGB0_WRCLI_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB0_WRCLI_GBLSEND_PENDING
+#define DAGB0_WRCLI_GBLSEND_PENDING__BUSY__SHIFT 0x0
+#define DAGB0_WRCLI_GBLSEND_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB0_WRCLI_TLB_PENDING
+#define DAGB0_WRCLI_TLB_PENDING__BUSY__SHIFT 0x0
+#define DAGB0_WRCLI_TLB_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB0_WRCLI_OARB_PENDING
+#define DAGB0_WRCLI_OARB_PENDING__BUSY__SHIFT 0x0
+#define DAGB0_WRCLI_OARB_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB0_WRCLI_OSD_PENDING
+#define DAGB0_WRCLI_OSD_PENDING__BUSY__SHIFT 0x0
+#define DAGB0_WRCLI_OSD_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB0_WRCLI_DBUS_ASK_PENDING
+#define DAGB0_WRCLI_DBUS_ASK_PENDING__BUSY__SHIFT 0x0
+#define DAGB0_WRCLI_DBUS_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB0_WRCLI_DBUS_GO_PENDING
+#define DAGB0_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0
+#define DAGB0_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB0_DAGB_DLY
+#define DAGB0_DAGB_DLY__DLY__SHIFT 0x0
+#define DAGB0_DAGB_DLY__CLI__SHIFT 0x8
+#define DAGB0_DAGB_DLY__POS__SHIFT 0x10
+#define DAGB0_DAGB_DLY__DLY_MASK 0x000000FFL
+#define DAGB0_DAGB_DLY__CLI_MASK 0x0000FF00L
+#define DAGB0_DAGB_DLY__POS_MASK 0x000F0000L
+//DAGB0_CNTL_MISC
+#define DAGB0_CNTL_MISC__EA_VC0_REMAP__SHIFT 0x0
+#define DAGB0_CNTL_MISC__EA_VC1_REMAP__SHIFT 0x3
+#define DAGB0_CNTL_MISC__EA_VC2_REMAP__SHIFT 0x6
+#define DAGB0_CNTL_MISC__EA_VC3_REMAP__SHIFT 0x9
+#define DAGB0_CNTL_MISC__EA_VC4_REMAP__SHIFT 0xc
+#define DAGB0_CNTL_MISC__EA_VC5_REMAP__SHIFT 0xf
+#define DAGB0_CNTL_MISC__EA_VC6_REMAP__SHIFT 0x12
+#define DAGB0_CNTL_MISC__EA_VC7_REMAP__SHIFT 0x15
+#define DAGB0_CNTL_MISC__BW_INIT_CYCLE__SHIFT 0x18
+#define DAGB0_CNTL_MISC__BW_RW_GAP_CYCLE__SHIFT 0x1e
+#define DAGB0_CNTL_MISC__EA_VC0_REMAP_MASK 0x00000007L
+#define DAGB0_CNTL_MISC__EA_VC1_REMAP_MASK 0x00000038L
+#define DAGB0_CNTL_MISC__EA_VC2_REMAP_MASK 0x000001C0L
+#define DAGB0_CNTL_MISC__EA_VC3_REMAP_MASK 0x00000E00L
+#define DAGB0_CNTL_MISC__EA_VC4_REMAP_MASK 0x00007000L
+#define DAGB0_CNTL_MISC__EA_VC5_REMAP_MASK 0x00038000L
+#define DAGB0_CNTL_MISC__EA_VC6_REMAP_MASK 0x001C0000L
+#define DAGB0_CNTL_MISC__EA_VC7_REMAP_MASK 0x00E00000L
+#define DAGB0_CNTL_MISC__BW_INIT_CYCLE_MASK 0x3F000000L
+#define DAGB0_CNTL_MISC__BW_RW_GAP_CYCLE_MASK 0xC0000000L
+//DAGB0_CNTL_MISC2
+#define DAGB0_CNTL_MISC2__URG_BOOST_ENABLE__SHIFT 0x0
+#define DAGB0_CNTL_MISC2__URG_HALT_ENABLE__SHIFT 0x1
+#define DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG__SHIFT 0x2
+#define DAGB0_CNTL_MISC2__DISABLE_WRRET_CG__SHIFT 0x3
+#define DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG__SHIFT 0x4
+#define DAGB0_CNTL_MISC2__DISABLE_RDRET_CG__SHIFT 0x5
+#define DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG__SHIFT 0x6
+#define DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG__SHIFT 0x7
+#define DAGB0_CNTL_MISC2__DISABLE_EAWRREQ_BUSY__SHIFT 0x8
+#define DAGB0_CNTL_MISC2__DISABLE_EARDREQ_BUSY__SHIFT 0x9
+#define DAGB0_CNTL_MISC2__SWAP_CTL__SHIFT 0xa
+#define DAGB0_CNTL_MISC2__RDRET_FIFO_PERF__SHIFT 0xb
+#define DAGB0_CNTL_MISC2__RDRET_FIFO_DLOCK_CREDITS__SHIFT 0x11
+#define DAGB0_CNTL_MISC2__URG_BOOST_ENABLE_MASK 0x00000001L
+#define DAGB0_CNTL_MISC2__URG_HALT_ENABLE_MASK 0x00000002L
+#define DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK 0x00000004L
+#define DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK 0x00000008L
+#define DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK 0x00000010L
+#define DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK 0x00000020L
+#define DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK 0x00000040L
+#define DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK 0x00000080L
+#define DAGB0_CNTL_MISC2__DISABLE_EAWRREQ_BUSY_MASK 0x00000100L
+#define DAGB0_CNTL_MISC2__DISABLE_EARDREQ_BUSY_MASK 0x00000200L
+#define DAGB0_CNTL_MISC2__SWAP_CTL_MASK 0x00000400L
+#define DAGB0_CNTL_MISC2__RDRET_FIFO_PERF_MASK 0x00000800L
+#define DAGB0_CNTL_MISC2__RDRET_FIFO_DLOCK_CREDITS_MASK 0x007E0000L
+//DAGB0_FIFO_EMPTY
+#define DAGB0_FIFO_EMPTY__EMPTY__SHIFT 0x0
+#define DAGB0_FIFO_EMPTY__EMPTY_MASK 0x00FFFFFFL
+//DAGB0_FIFO_FULL
+#define DAGB0_FIFO_FULL__FULL__SHIFT 0x0
+#define DAGB0_FIFO_FULL__FULL_MASK 0x007FFFFFL
+//DAGB0_WR_CREDITS_FULL
+#define DAGB0_WR_CREDITS_FULL__FULL__SHIFT 0x0
+#define DAGB0_WR_CREDITS_FULL__FULL_MASK 0x1FFFFFFFL
+//DAGB0_RD_CREDITS_FULL
+#define DAGB0_RD_CREDITS_FULL__FULL__SHIFT 0x0
+#define DAGB0_RD_CREDITS_FULL__FULL_MASK 0x0003FFFFL
+//DAGB0_PERFCOUNTER_LO
+#define DAGB0_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define DAGB0_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//DAGB0_PERFCOUNTER_HI
+#define DAGB0_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define DAGB0_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define DAGB0_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define DAGB0_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+//DAGB0_PERFCOUNTER0_CFG
+#define DAGB0_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define DAGB0_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define DAGB0_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define DAGB0_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define DAGB0_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define DAGB0_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define DAGB0_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define DAGB0_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define DAGB0_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define DAGB0_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//DAGB0_PERFCOUNTER1_CFG
+#define DAGB0_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define DAGB0_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define DAGB0_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define DAGB0_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define DAGB0_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define DAGB0_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define DAGB0_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define DAGB0_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define DAGB0_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define DAGB0_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//DAGB0_PERFCOUNTER2_CFG
+#define DAGB0_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0
+#define DAGB0_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8
+#define DAGB0_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18
+#define DAGB0_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c
+#define DAGB0_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d
+#define DAGB0_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL
+#define DAGB0_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define DAGB0_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L
+#define DAGB0_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L
+#define DAGB0_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L
+//DAGB0_PERFCOUNTER_RSLT_CNTL
+#define DAGB0_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define DAGB0_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define DAGB0_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define DAGB0_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define DAGB0_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define DAGB0_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define DAGB0_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define DAGB0_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define DAGB0_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define DAGB0_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define DAGB0_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define DAGB0_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+//DAGB0_RESERVE0
+#define DAGB0_RESERVE0__RESERVE__SHIFT 0x0
+#define DAGB0_RESERVE0__RESERVE_MASK 0xFFFFFFFFL
+//DAGB0_RESERVE1
+#define DAGB0_RESERVE1__RESERVE__SHIFT 0x0
+#define DAGB0_RESERVE1__RESERVE_MASK 0xFFFFFFFFL
+//DAGB0_RESERVE2
+#define DAGB0_RESERVE2__RESERVE__SHIFT 0x0
+#define DAGB0_RESERVE2__RESERVE_MASK 0xFFFFFFFFL
+//DAGB0_RESERVE3
+#define DAGB0_RESERVE3__RESERVE__SHIFT 0x0
+#define DAGB0_RESERVE3__RESERVE_MASK 0xFFFFFFFFL
+//DAGB0_RESERVE4
+#define DAGB0_RESERVE4__RESERVE__SHIFT 0x0
+#define DAGB0_RESERVE4__RESERVE_MASK 0xFFFFFFFFL
+//DAGB0_RESERVE5
+#define DAGB0_RESERVE5__RESERVE__SHIFT 0x0
+#define DAGB0_RESERVE5__RESERVE_MASK 0xFFFFFFFFL
+//DAGB0_RESERVE6
+#define DAGB0_RESERVE6__RESERVE__SHIFT 0x0
+#define DAGB0_RESERVE6__RESERVE_MASK 0xFFFFFFFFL
+//DAGB0_RESERVE7
+#define DAGB0_RESERVE7__RESERVE__SHIFT 0x0
+#define DAGB0_RESERVE7__RESERVE_MASK 0xFFFFFFFFL
+//DAGB0_RESERVE8
+#define DAGB0_RESERVE8__RESERVE__SHIFT 0x0
+#define DAGB0_RESERVE8__RESERVE_MASK 0xFFFFFFFFL
+//DAGB0_RESERVE9
+#define DAGB0_RESERVE9__RESERVE__SHIFT 0x0
+#define DAGB0_RESERVE9__RESERVE_MASK 0xFFFFFFFFL
+//DAGB0_RESERVE10
+#define DAGB0_RESERVE10__RESERVE__SHIFT 0x0
+#define DAGB0_RESERVE10__RESERVE_MASK 0xFFFFFFFFL
+//DAGB0_RESERVE11
+#define DAGB0_RESERVE11__RESERVE__SHIFT 0x0
+#define DAGB0_RESERVE11__RESERVE_MASK 0xFFFFFFFFL
+//DAGB0_RESERVE12
+#define DAGB0_RESERVE12__RESERVE__SHIFT 0x0
+#define DAGB0_RESERVE12__RESERVE_MASK 0xFFFFFFFFL
+//DAGB0_RESERVE13
+#define DAGB0_RESERVE13__RESERVE__SHIFT 0x0
+#define DAGB0_RESERVE13__RESERVE_MASK 0xFFFFFFFFL
+
+
+// addressBlock: mmhub_dagb_dagbdec1
+//DAGB1_RDCLI0
+#define DAGB1_RDCLI0__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_RDCLI0__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_RDCLI0__URG_HIGH__SHIFT 0x4
+#define DAGB1_RDCLI0__URG_LOW__SHIFT 0x8
+#define DAGB1_RDCLI0__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_RDCLI0__MAX_BW__SHIFT 0xd
+#define DAGB1_RDCLI0__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_RDCLI0__MIN_BW__SHIFT 0x16
+#define DAGB1_RDCLI0__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_RDCLI0__MAX_OSD__SHIFT 0x1a
+#define DAGB1_RDCLI0__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_RDCLI0__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_RDCLI0__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_RDCLI0__URG_LOW_MASK 0x00000F00L
+#define DAGB1_RDCLI0__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_RDCLI0__MAX_BW_MASK 0x001FE000L
+#define DAGB1_RDCLI0__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_RDCLI0__MIN_BW_MASK 0x01C00000L
+#define DAGB1_RDCLI0__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_RDCLI0__MAX_OSD_MASK 0xFC000000L
+//DAGB1_RDCLI1
+#define DAGB1_RDCLI1__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_RDCLI1__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_RDCLI1__URG_HIGH__SHIFT 0x4
+#define DAGB1_RDCLI1__URG_LOW__SHIFT 0x8
+#define DAGB1_RDCLI1__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_RDCLI1__MAX_BW__SHIFT 0xd
+#define DAGB1_RDCLI1__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_RDCLI1__MIN_BW__SHIFT 0x16
+#define DAGB1_RDCLI1__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_RDCLI1__MAX_OSD__SHIFT 0x1a
+#define DAGB1_RDCLI1__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_RDCLI1__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_RDCLI1__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_RDCLI1__URG_LOW_MASK 0x00000F00L
+#define DAGB1_RDCLI1__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_RDCLI1__MAX_BW_MASK 0x001FE000L
+#define DAGB1_RDCLI1__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_RDCLI1__MIN_BW_MASK 0x01C00000L
+#define DAGB1_RDCLI1__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_RDCLI1__MAX_OSD_MASK 0xFC000000L
+//DAGB1_RDCLI2
+#define DAGB1_RDCLI2__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_RDCLI2__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_RDCLI2__URG_HIGH__SHIFT 0x4
+#define DAGB1_RDCLI2__URG_LOW__SHIFT 0x8
+#define DAGB1_RDCLI2__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_RDCLI2__MAX_BW__SHIFT 0xd
+#define DAGB1_RDCLI2__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_RDCLI2__MIN_BW__SHIFT 0x16
+#define DAGB1_RDCLI2__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_RDCLI2__MAX_OSD__SHIFT 0x1a
+#define DAGB1_RDCLI2__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_RDCLI2__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_RDCLI2__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_RDCLI2__URG_LOW_MASK 0x00000F00L
+#define DAGB1_RDCLI2__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_RDCLI2__MAX_BW_MASK 0x001FE000L
+#define DAGB1_RDCLI2__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_RDCLI2__MIN_BW_MASK 0x01C00000L
+#define DAGB1_RDCLI2__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_RDCLI2__MAX_OSD_MASK 0xFC000000L
+//DAGB1_RDCLI3
+#define DAGB1_RDCLI3__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_RDCLI3__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_RDCLI3__URG_HIGH__SHIFT 0x4
+#define DAGB1_RDCLI3__URG_LOW__SHIFT 0x8
+#define DAGB1_RDCLI3__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_RDCLI3__MAX_BW__SHIFT 0xd
+#define DAGB1_RDCLI3__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_RDCLI3__MIN_BW__SHIFT 0x16
+#define DAGB1_RDCLI3__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_RDCLI3__MAX_OSD__SHIFT 0x1a
+#define DAGB1_RDCLI3__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_RDCLI3__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_RDCLI3__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_RDCLI3__URG_LOW_MASK 0x00000F00L
+#define DAGB1_RDCLI3__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_RDCLI3__MAX_BW_MASK 0x001FE000L
+#define DAGB1_RDCLI3__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_RDCLI3__MIN_BW_MASK 0x01C00000L
+#define DAGB1_RDCLI3__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_RDCLI3__MAX_OSD_MASK 0xFC000000L
+//DAGB1_RDCLI4
+#define DAGB1_RDCLI4__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_RDCLI4__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_RDCLI4__URG_HIGH__SHIFT 0x4
+#define DAGB1_RDCLI4__URG_LOW__SHIFT 0x8
+#define DAGB1_RDCLI4__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_RDCLI4__MAX_BW__SHIFT 0xd
+#define DAGB1_RDCLI4__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_RDCLI4__MIN_BW__SHIFT 0x16
+#define DAGB1_RDCLI4__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_RDCLI4__MAX_OSD__SHIFT 0x1a
+#define DAGB1_RDCLI4__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_RDCLI4__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_RDCLI4__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_RDCLI4__URG_LOW_MASK 0x00000F00L
+#define DAGB1_RDCLI4__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_RDCLI4__MAX_BW_MASK 0x001FE000L
+#define DAGB1_RDCLI4__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_RDCLI4__MIN_BW_MASK 0x01C00000L
+#define DAGB1_RDCLI4__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_RDCLI4__MAX_OSD_MASK 0xFC000000L
+//DAGB1_RDCLI5
+#define DAGB1_RDCLI5__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_RDCLI5__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_RDCLI5__URG_HIGH__SHIFT 0x4
+#define DAGB1_RDCLI5__URG_LOW__SHIFT 0x8
+#define DAGB1_RDCLI5__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_RDCLI5__MAX_BW__SHIFT 0xd
+#define DAGB1_RDCLI5__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_RDCLI5__MIN_BW__SHIFT 0x16
+#define DAGB1_RDCLI5__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_RDCLI5__MAX_OSD__SHIFT 0x1a
+#define DAGB1_RDCLI5__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_RDCLI5__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_RDCLI5__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_RDCLI5__URG_LOW_MASK 0x00000F00L
+#define DAGB1_RDCLI5__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_RDCLI5__MAX_BW_MASK 0x001FE000L
+#define DAGB1_RDCLI5__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_RDCLI5__MIN_BW_MASK 0x01C00000L
+#define DAGB1_RDCLI5__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_RDCLI5__MAX_OSD_MASK 0xFC000000L
+//DAGB1_RDCLI6
+#define DAGB1_RDCLI6__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_RDCLI6__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_RDCLI6__URG_HIGH__SHIFT 0x4
+#define DAGB1_RDCLI6__URG_LOW__SHIFT 0x8
+#define DAGB1_RDCLI6__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_RDCLI6__MAX_BW__SHIFT 0xd
+#define DAGB1_RDCLI6__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_RDCLI6__MIN_BW__SHIFT 0x16
+#define DAGB1_RDCLI6__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_RDCLI6__MAX_OSD__SHIFT 0x1a
+#define DAGB1_RDCLI6__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_RDCLI6__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_RDCLI6__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_RDCLI6__URG_LOW_MASK 0x00000F00L
+#define DAGB1_RDCLI6__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_RDCLI6__MAX_BW_MASK 0x001FE000L
+#define DAGB1_RDCLI6__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_RDCLI6__MIN_BW_MASK 0x01C00000L
+#define DAGB1_RDCLI6__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_RDCLI6__MAX_OSD_MASK 0xFC000000L
+//DAGB1_RDCLI7
+#define DAGB1_RDCLI7__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_RDCLI7__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_RDCLI7__URG_HIGH__SHIFT 0x4
+#define DAGB1_RDCLI7__URG_LOW__SHIFT 0x8
+#define DAGB1_RDCLI7__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_RDCLI7__MAX_BW__SHIFT 0xd
+#define DAGB1_RDCLI7__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_RDCLI7__MIN_BW__SHIFT 0x16
+#define DAGB1_RDCLI7__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_RDCLI7__MAX_OSD__SHIFT 0x1a
+#define DAGB1_RDCLI7__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_RDCLI7__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_RDCLI7__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_RDCLI7__URG_LOW_MASK 0x00000F00L
+#define DAGB1_RDCLI7__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_RDCLI7__MAX_BW_MASK 0x001FE000L
+#define DAGB1_RDCLI7__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_RDCLI7__MIN_BW_MASK 0x01C00000L
+#define DAGB1_RDCLI7__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_RDCLI7__MAX_OSD_MASK 0xFC000000L
+//DAGB1_RDCLI8
+#define DAGB1_RDCLI8__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_RDCLI8__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_RDCLI8__URG_HIGH__SHIFT 0x4
+#define DAGB1_RDCLI8__URG_LOW__SHIFT 0x8
+#define DAGB1_RDCLI8__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_RDCLI8__MAX_BW__SHIFT 0xd
+#define DAGB1_RDCLI8__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_RDCLI8__MIN_BW__SHIFT 0x16
+#define DAGB1_RDCLI8__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_RDCLI8__MAX_OSD__SHIFT 0x1a
+#define DAGB1_RDCLI8__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_RDCLI8__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_RDCLI8__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_RDCLI8__URG_LOW_MASK 0x00000F00L
+#define DAGB1_RDCLI8__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_RDCLI8__MAX_BW_MASK 0x001FE000L
+#define DAGB1_RDCLI8__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_RDCLI8__MIN_BW_MASK 0x01C00000L
+#define DAGB1_RDCLI8__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_RDCLI8__MAX_OSD_MASK 0xFC000000L
+//DAGB1_RDCLI9
+#define DAGB1_RDCLI9__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_RDCLI9__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_RDCLI9__URG_HIGH__SHIFT 0x4
+#define DAGB1_RDCLI9__URG_LOW__SHIFT 0x8
+#define DAGB1_RDCLI9__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_RDCLI9__MAX_BW__SHIFT 0xd
+#define DAGB1_RDCLI9__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_RDCLI9__MIN_BW__SHIFT 0x16
+#define DAGB1_RDCLI9__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_RDCLI9__MAX_OSD__SHIFT 0x1a
+#define DAGB1_RDCLI9__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_RDCLI9__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_RDCLI9__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_RDCLI9__URG_LOW_MASK 0x00000F00L
+#define DAGB1_RDCLI9__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_RDCLI9__MAX_BW_MASK 0x001FE000L
+#define DAGB1_RDCLI9__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_RDCLI9__MIN_BW_MASK 0x01C00000L
+#define DAGB1_RDCLI9__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_RDCLI9__MAX_OSD_MASK 0xFC000000L
+//DAGB1_RDCLI10
+#define DAGB1_RDCLI10__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_RDCLI10__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_RDCLI10__URG_HIGH__SHIFT 0x4
+#define DAGB1_RDCLI10__URG_LOW__SHIFT 0x8
+#define DAGB1_RDCLI10__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_RDCLI10__MAX_BW__SHIFT 0xd
+#define DAGB1_RDCLI10__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_RDCLI10__MIN_BW__SHIFT 0x16
+#define DAGB1_RDCLI10__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_RDCLI10__MAX_OSD__SHIFT 0x1a
+#define DAGB1_RDCLI10__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_RDCLI10__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_RDCLI10__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_RDCLI10__URG_LOW_MASK 0x00000F00L
+#define DAGB1_RDCLI10__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_RDCLI10__MAX_BW_MASK 0x001FE000L
+#define DAGB1_RDCLI10__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_RDCLI10__MIN_BW_MASK 0x01C00000L
+#define DAGB1_RDCLI10__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_RDCLI10__MAX_OSD_MASK 0xFC000000L
+//DAGB1_RDCLI11
+#define DAGB1_RDCLI11__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_RDCLI11__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_RDCLI11__URG_HIGH__SHIFT 0x4
+#define DAGB1_RDCLI11__URG_LOW__SHIFT 0x8
+#define DAGB1_RDCLI11__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_RDCLI11__MAX_BW__SHIFT 0xd
+#define DAGB1_RDCLI11__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_RDCLI11__MIN_BW__SHIFT 0x16
+#define DAGB1_RDCLI11__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_RDCLI11__MAX_OSD__SHIFT 0x1a
+#define DAGB1_RDCLI11__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_RDCLI11__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_RDCLI11__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_RDCLI11__URG_LOW_MASK 0x00000F00L
+#define DAGB1_RDCLI11__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_RDCLI11__MAX_BW_MASK 0x001FE000L
+#define DAGB1_RDCLI11__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_RDCLI11__MIN_BW_MASK 0x01C00000L
+#define DAGB1_RDCLI11__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_RDCLI11__MAX_OSD_MASK 0xFC000000L
+//DAGB1_RDCLI12
+#define DAGB1_RDCLI12__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_RDCLI12__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_RDCLI12__URG_HIGH__SHIFT 0x4
+#define DAGB1_RDCLI12__URG_LOW__SHIFT 0x8
+#define DAGB1_RDCLI12__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_RDCLI12__MAX_BW__SHIFT 0xd
+#define DAGB1_RDCLI12__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_RDCLI12__MIN_BW__SHIFT 0x16
+#define DAGB1_RDCLI12__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_RDCLI12__MAX_OSD__SHIFT 0x1a
+#define DAGB1_RDCLI12__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_RDCLI12__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_RDCLI12__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_RDCLI12__URG_LOW_MASK 0x00000F00L
+#define DAGB1_RDCLI12__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_RDCLI12__MAX_BW_MASK 0x001FE000L
+#define DAGB1_RDCLI12__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_RDCLI12__MIN_BW_MASK 0x01C00000L
+#define DAGB1_RDCLI12__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_RDCLI12__MAX_OSD_MASK 0xFC000000L
+//DAGB1_RDCLI13
+#define DAGB1_RDCLI13__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_RDCLI13__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_RDCLI13__URG_HIGH__SHIFT 0x4
+#define DAGB1_RDCLI13__URG_LOW__SHIFT 0x8
+#define DAGB1_RDCLI13__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_RDCLI13__MAX_BW__SHIFT 0xd
+#define DAGB1_RDCLI13__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_RDCLI13__MIN_BW__SHIFT 0x16
+#define DAGB1_RDCLI13__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_RDCLI13__MAX_OSD__SHIFT 0x1a
+#define DAGB1_RDCLI13__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_RDCLI13__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_RDCLI13__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_RDCLI13__URG_LOW_MASK 0x00000F00L
+#define DAGB1_RDCLI13__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_RDCLI13__MAX_BW_MASK 0x001FE000L
+#define DAGB1_RDCLI13__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_RDCLI13__MIN_BW_MASK 0x01C00000L
+#define DAGB1_RDCLI13__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_RDCLI13__MAX_OSD_MASK 0xFC000000L
+//DAGB1_RDCLI14
+#define DAGB1_RDCLI14__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_RDCLI14__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_RDCLI14__URG_HIGH__SHIFT 0x4
+#define DAGB1_RDCLI14__URG_LOW__SHIFT 0x8
+#define DAGB1_RDCLI14__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_RDCLI14__MAX_BW__SHIFT 0xd
+#define DAGB1_RDCLI14__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_RDCLI14__MIN_BW__SHIFT 0x16
+#define DAGB1_RDCLI14__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_RDCLI14__MAX_OSD__SHIFT 0x1a
+#define DAGB1_RDCLI14__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_RDCLI14__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_RDCLI14__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_RDCLI14__URG_LOW_MASK 0x00000F00L
+#define DAGB1_RDCLI14__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_RDCLI14__MAX_BW_MASK 0x001FE000L
+#define DAGB1_RDCLI14__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_RDCLI14__MIN_BW_MASK 0x01C00000L
+#define DAGB1_RDCLI14__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_RDCLI14__MAX_OSD_MASK 0xFC000000L
+//DAGB1_RDCLI15
+#define DAGB1_RDCLI15__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_RDCLI15__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_RDCLI15__URG_HIGH__SHIFT 0x4
+#define DAGB1_RDCLI15__URG_LOW__SHIFT 0x8
+#define DAGB1_RDCLI15__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_RDCLI15__MAX_BW__SHIFT 0xd
+#define DAGB1_RDCLI15__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_RDCLI15__MIN_BW__SHIFT 0x16
+#define DAGB1_RDCLI15__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_RDCLI15__MAX_OSD__SHIFT 0x1a
+#define DAGB1_RDCLI15__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_RDCLI15__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_RDCLI15__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_RDCLI15__URG_LOW_MASK 0x00000F00L
+#define DAGB1_RDCLI15__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_RDCLI15__MAX_BW_MASK 0x001FE000L
+#define DAGB1_RDCLI15__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_RDCLI15__MIN_BW_MASK 0x01C00000L
+#define DAGB1_RDCLI15__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_RDCLI15__MAX_OSD_MASK 0xFC000000L
+//DAGB1_RD_CNTL
+#define DAGB1_RD_CNTL__SCLK_FREQ__SHIFT 0x0
+#define DAGB1_RD_CNTL__CLI_MAX_BW_WINDOW__SHIFT 0x4
+#define DAGB1_RD_CNTL__VC_MAX_BW_WINDOW__SHIFT 0xa
+#define DAGB1_RD_CNTL__IO_LEVEL_OVERRIDE_ENABLE__SHIFT 0x10
+#define DAGB1_RD_CNTL__IO_LEVEL__SHIFT 0x11
+#define DAGB1_RD_CNTL__IO_LEVEL_COMPLY_VC__SHIFT 0x14
+#define DAGB1_RD_CNTL__SHARE_VC_NUM__SHIFT 0x17
+#define DAGB1_RD_CNTL__SCLK_FREQ_MASK 0x0000000FL
+#define DAGB1_RD_CNTL__CLI_MAX_BW_WINDOW_MASK 0x000003F0L
+#define DAGB1_RD_CNTL__VC_MAX_BW_WINDOW_MASK 0x0000FC00L
+#define DAGB1_RD_CNTL__IO_LEVEL_OVERRIDE_ENABLE_MASK 0x00010000L
+#define DAGB1_RD_CNTL__IO_LEVEL_MASK 0x000E0000L
+#define DAGB1_RD_CNTL__IO_LEVEL_COMPLY_VC_MASK 0x00700000L
+#define DAGB1_RD_CNTL__SHARE_VC_NUM_MASK 0x03800000L
+//DAGB1_RD_GMI_CNTL
+#define DAGB1_RD_GMI_CNTL__EA_CREDIT__SHIFT 0x0
+#define DAGB1_RD_GMI_CNTL__LEVEL__SHIFT 0x6
+#define DAGB1_RD_GMI_CNTL__MAX_BURST__SHIFT 0x9
+#define DAGB1_RD_GMI_CNTL__LAZY_TIMER__SHIFT 0xd
+#define DAGB1_RD_GMI_CNTL__EA_CREDIT_MASK 0x0000003FL
+#define DAGB1_RD_GMI_CNTL__LEVEL_MASK 0x000001C0L
+#define DAGB1_RD_GMI_CNTL__MAX_BURST_MASK 0x00001E00L
+#define DAGB1_RD_GMI_CNTL__LAZY_TIMER_MASK 0x0001E000L
+//DAGB1_RD_ADDR_DAGB
+#define DAGB1_RD_ADDR_DAGB__DAGB_ENABLE__SHIFT 0x0
+#define DAGB1_RD_ADDR_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3
+#define DAGB1_RD_ADDR_DAGB__DISABLE_SELF_INIT__SHIFT 0x6
+#define DAGB1_RD_ADDR_DAGB__WHOAMI__SHIFT 0x7
+#define DAGB1_RD_ADDR_DAGB__DAGB_ENABLE_MASK 0x00000007L
+#define DAGB1_RD_ADDR_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L
+#define DAGB1_RD_ADDR_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L
+#define DAGB1_RD_ADDR_DAGB__WHOAMI_MASK 0x00001F80L
+//DAGB1_RD_OUTPUT_DAGB_MAX_BURST
+#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC0__SHIFT 0x0
+#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC1__SHIFT 0x4
+#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC2__SHIFT 0x8
+#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC3__SHIFT 0xc
+#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC4__SHIFT 0x10
+#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC5__SHIFT 0x14
+#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC6__SHIFT 0x18
+#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC7__SHIFT 0x1c
+#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC0_MASK 0x0000000FL
+#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC1_MASK 0x000000F0L
+#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC2_MASK 0x00000F00L
+#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC3_MASK 0x0000F000L
+#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC4_MASK 0x000F0000L
+#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC5_MASK 0x00F00000L
+#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC6_MASK 0x0F000000L
+#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC7_MASK 0xF0000000L
+//DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER
+#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC0__SHIFT 0x0
+#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC1__SHIFT 0x4
+#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC2__SHIFT 0x8
+#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC3__SHIFT 0xc
+#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC4__SHIFT 0x10
+#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC5__SHIFT 0x14
+#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC6__SHIFT 0x18
+#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC7__SHIFT 0x1c
+#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC0_MASK 0x0000000FL
+#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC1_MASK 0x000000F0L
+#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC2_MASK 0x00000F00L
+#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC3_MASK 0x0000F000L
+#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC4_MASK 0x000F0000L
+#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC5_MASK 0x00F00000L
+#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC6_MASK 0x0F000000L
+#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC7_MASK 0xF0000000L
+//DAGB1_RD_CGTT_CLK_CTRL
+#define DAGB1_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB1_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB1_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
+#define DAGB1_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define DAGB1_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
+#define DAGB1_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
+#define DAGB1_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
+#define DAGB1_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
+#define DAGB1_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB1_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB1_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
+#define DAGB1_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define DAGB1_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
+#define DAGB1_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
+#define DAGB1_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
+#define DAGB1_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
+//DAGB1_L1TLB_RD_CGTT_CLK_CTRL
+#define DAGB1_L1TLB_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB1_L1TLB_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB1_L1TLB_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
+#define DAGB1_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define DAGB1_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
+#define DAGB1_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
+#define DAGB1_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
+#define DAGB1_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
+#define DAGB1_L1TLB_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB1_L1TLB_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB1_L1TLB_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
+#define DAGB1_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define DAGB1_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
+#define DAGB1_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
+#define DAGB1_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
+#define DAGB1_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
+//DAGB1_ATCVM_RD_CGTT_CLK_CTRL
+#define DAGB1_ATCVM_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB1_ATCVM_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB1_ATCVM_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
+#define DAGB1_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define DAGB1_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
+#define DAGB1_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
+#define DAGB1_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
+#define DAGB1_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
+#define DAGB1_ATCVM_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB1_ATCVM_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB1_ATCVM_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
+#define DAGB1_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define DAGB1_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
+#define DAGB1_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
+#define DAGB1_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
+#define DAGB1_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
+//DAGB1_RD_ADDR_DAGB_MAX_BURST0
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L
+//DAGB1_RD_ADDR_DAGB_LAZY_TIMER0
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L
+//DAGB1_RD_ADDR_DAGB_MAX_BURST1
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L
+//DAGB1_RD_ADDR_DAGB_LAZY_TIMER1
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L
+//DAGB1_RD_VC0_CNTL
+#define DAGB1_RD_VC0_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB1_RD_VC0_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB1_RD_VC0_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB1_RD_VC0_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB1_RD_VC0_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB1_RD_VC0_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB1_RD_VC0_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB1_RD_VC0_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB1_RD_VC0_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB1_RD_VC0_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB1_RD_VC0_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB1_RD_VC0_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB1_RD_VC0_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB1_RD_VC0_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB1_RD_VC0_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB1_RD_VC0_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB1_RD_VC1_CNTL
+#define DAGB1_RD_VC1_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB1_RD_VC1_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB1_RD_VC1_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB1_RD_VC1_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB1_RD_VC1_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB1_RD_VC1_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB1_RD_VC1_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB1_RD_VC1_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB1_RD_VC1_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB1_RD_VC1_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB1_RD_VC1_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB1_RD_VC1_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB1_RD_VC1_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB1_RD_VC1_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB1_RD_VC1_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB1_RD_VC1_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB1_RD_VC2_CNTL
+#define DAGB1_RD_VC2_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB1_RD_VC2_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB1_RD_VC2_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB1_RD_VC2_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB1_RD_VC2_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB1_RD_VC2_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB1_RD_VC2_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB1_RD_VC2_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB1_RD_VC2_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB1_RD_VC2_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB1_RD_VC2_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB1_RD_VC2_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB1_RD_VC2_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB1_RD_VC2_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB1_RD_VC2_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB1_RD_VC2_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB1_RD_VC3_CNTL
+#define DAGB1_RD_VC3_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB1_RD_VC3_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB1_RD_VC3_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB1_RD_VC3_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB1_RD_VC3_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB1_RD_VC3_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB1_RD_VC3_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB1_RD_VC3_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB1_RD_VC3_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB1_RD_VC3_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB1_RD_VC3_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB1_RD_VC3_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB1_RD_VC3_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB1_RD_VC3_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB1_RD_VC3_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB1_RD_VC3_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB1_RD_VC4_CNTL
+#define DAGB1_RD_VC4_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB1_RD_VC4_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB1_RD_VC4_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB1_RD_VC4_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB1_RD_VC4_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB1_RD_VC4_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB1_RD_VC4_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB1_RD_VC4_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB1_RD_VC4_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB1_RD_VC4_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB1_RD_VC4_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB1_RD_VC4_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB1_RD_VC4_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB1_RD_VC4_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB1_RD_VC4_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB1_RD_VC4_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB1_RD_VC5_CNTL
+#define DAGB1_RD_VC5_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB1_RD_VC5_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB1_RD_VC5_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB1_RD_VC5_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB1_RD_VC5_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB1_RD_VC5_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB1_RD_VC5_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB1_RD_VC5_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB1_RD_VC5_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB1_RD_VC5_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB1_RD_VC5_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB1_RD_VC5_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB1_RD_VC5_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB1_RD_VC5_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB1_RD_VC5_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB1_RD_VC5_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB1_RD_VC6_CNTL
+#define DAGB1_RD_VC6_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB1_RD_VC6_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB1_RD_VC6_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB1_RD_VC6_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB1_RD_VC6_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB1_RD_VC6_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB1_RD_VC6_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB1_RD_VC6_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB1_RD_VC6_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB1_RD_VC6_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB1_RD_VC6_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB1_RD_VC6_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB1_RD_VC6_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB1_RD_VC6_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB1_RD_VC6_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB1_RD_VC6_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB1_RD_VC7_CNTL
+#define DAGB1_RD_VC7_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB1_RD_VC7_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB1_RD_VC7_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB1_RD_VC7_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB1_RD_VC7_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB1_RD_VC7_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB1_RD_VC7_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB1_RD_VC7_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB1_RD_VC7_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB1_RD_VC7_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB1_RD_VC7_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB1_RD_VC7_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB1_RD_VC7_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB1_RD_VC7_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB1_RD_VC7_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB1_RD_VC7_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB1_RD_CNTL_MISC
+#define DAGB1_RD_CNTL_MISC__STOR_POOL_CREDIT__SHIFT 0x0
+#define DAGB1_RD_CNTL_MISC__EA_POOL_CREDIT__SHIFT 0x6
+#define DAGB1_RD_CNTL_MISC__IO_EA_CREDIT__SHIFT 0xd
+#define DAGB1_RD_CNTL_MISC__STOR_CC_LEGACY_MODE__SHIFT 0x13
+#define DAGB1_RD_CNTL_MISC__EA_CC_LEGACY_MODE__SHIFT 0x14
+#define DAGB1_RD_CNTL_MISC__UTCL2_CID__SHIFT 0x15
+#define DAGB1_RD_CNTL_MISC__RDRET_FIFO_CREDITS__SHIFT 0x1a
+#define DAGB1_RD_CNTL_MISC__STOR_POOL_CREDIT_MASK 0x0000003FL
+#define DAGB1_RD_CNTL_MISC__EA_POOL_CREDIT_MASK 0x00001FC0L
+#define DAGB1_RD_CNTL_MISC__IO_EA_CREDIT_MASK 0x0007E000L
+#define DAGB1_RD_CNTL_MISC__STOR_CC_LEGACY_MODE_MASK 0x00080000L
+#define DAGB1_RD_CNTL_MISC__EA_CC_LEGACY_MODE_MASK 0x00100000L
+#define DAGB1_RD_CNTL_MISC__UTCL2_CID_MASK 0x03E00000L
+#define DAGB1_RD_CNTL_MISC__RDRET_FIFO_CREDITS_MASK 0xFC000000L
+//DAGB1_RD_TLB_CREDIT
+#define DAGB1_RD_TLB_CREDIT__TLB0__SHIFT 0x0
+#define DAGB1_RD_TLB_CREDIT__TLB1__SHIFT 0x5
+#define DAGB1_RD_TLB_CREDIT__TLB2__SHIFT 0xa
+#define DAGB1_RD_TLB_CREDIT__TLB3__SHIFT 0xf
+#define DAGB1_RD_TLB_CREDIT__TLB4__SHIFT 0x14
+#define DAGB1_RD_TLB_CREDIT__TLB5__SHIFT 0x19
+#define DAGB1_RD_TLB_CREDIT__TLB0_MASK 0x0000001FL
+#define DAGB1_RD_TLB_CREDIT__TLB1_MASK 0x000003E0L
+#define DAGB1_RD_TLB_CREDIT__TLB2_MASK 0x00007C00L
+#define DAGB1_RD_TLB_CREDIT__TLB3_MASK 0x000F8000L
+#define DAGB1_RD_TLB_CREDIT__TLB4_MASK 0x01F00000L
+#define DAGB1_RD_TLB_CREDIT__TLB5_MASK 0x3E000000L
+//DAGB1_RDCLI_ASK_PENDING
+#define DAGB1_RDCLI_ASK_PENDING__BUSY__SHIFT 0x0
+#define DAGB1_RDCLI_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB1_RDCLI_GO_PENDING
+#define DAGB1_RDCLI_GO_PENDING__BUSY__SHIFT 0x0
+#define DAGB1_RDCLI_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB1_RDCLI_GBLSEND_PENDING
+#define DAGB1_RDCLI_GBLSEND_PENDING__BUSY__SHIFT 0x0
+#define DAGB1_RDCLI_GBLSEND_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB1_RDCLI_TLB_PENDING
+#define DAGB1_RDCLI_TLB_PENDING__BUSY__SHIFT 0x0
+#define DAGB1_RDCLI_TLB_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB1_RDCLI_OARB_PENDING
+#define DAGB1_RDCLI_OARB_PENDING__BUSY__SHIFT 0x0
+#define DAGB1_RDCLI_OARB_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB1_RDCLI_OSD_PENDING
+#define DAGB1_RDCLI_OSD_PENDING__BUSY__SHIFT 0x0
+#define DAGB1_RDCLI_OSD_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB1_WRCLI0
+#define DAGB1_WRCLI0__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_WRCLI0__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_WRCLI0__URG_HIGH__SHIFT 0x4
+#define DAGB1_WRCLI0__URG_LOW__SHIFT 0x8
+#define DAGB1_WRCLI0__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_WRCLI0__MAX_BW__SHIFT 0xd
+#define DAGB1_WRCLI0__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_WRCLI0__MIN_BW__SHIFT 0x16
+#define DAGB1_WRCLI0__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_WRCLI0__MAX_OSD__SHIFT 0x1a
+#define DAGB1_WRCLI0__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_WRCLI0__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_WRCLI0__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_WRCLI0__URG_LOW_MASK 0x00000F00L
+#define DAGB1_WRCLI0__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_WRCLI0__MAX_BW_MASK 0x001FE000L
+#define DAGB1_WRCLI0__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_WRCLI0__MIN_BW_MASK 0x01C00000L
+#define DAGB1_WRCLI0__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_WRCLI0__MAX_OSD_MASK 0xFC000000L
+//DAGB1_WRCLI1
+#define DAGB1_WRCLI1__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_WRCLI1__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_WRCLI1__URG_HIGH__SHIFT 0x4
+#define DAGB1_WRCLI1__URG_LOW__SHIFT 0x8
+#define DAGB1_WRCLI1__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_WRCLI1__MAX_BW__SHIFT 0xd
+#define DAGB1_WRCLI1__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_WRCLI1__MIN_BW__SHIFT 0x16
+#define DAGB1_WRCLI1__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_WRCLI1__MAX_OSD__SHIFT 0x1a
+#define DAGB1_WRCLI1__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_WRCLI1__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_WRCLI1__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_WRCLI1__URG_LOW_MASK 0x00000F00L
+#define DAGB1_WRCLI1__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_WRCLI1__MAX_BW_MASK 0x001FE000L
+#define DAGB1_WRCLI1__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_WRCLI1__MIN_BW_MASK 0x01C00000L
+#define DAGB1_WRCLI1__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_WRCLI1__MAX_OSD_MASK 0xFC000000L
+//DAGB1_WRCLI2
+#define DAGB1_WRCLI2__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_WRCLI2__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_WRCLI2__URG_HIGH__SHIFT 0x4
+#define DAGB1_WRCLI2__URG_LOW__SHIFT 0x8
+#define DAGB1_WRCLI2__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_WRCLI2__MAX_BW__SHIFT 0xd
+#define DAGB1_WRCLI2__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_WRCLI2__MIN_BW__SHIFT 0x16
+#define DAGB1_WRCLI2__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_WRCLI2__MAX_OSD__SHIFT 0x1a
+#define DAGB1_WRCLI2__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_WRCLI2__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_WRCLI2__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_WRCLI2__URG_LOW_MASK 0x00000F00L
+#define DAGB1_WRCLI2__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_WRCLI2__MAX_BW_MASK 0x001FE000L
+#define DAGB1_WRCLI2__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_WRCLI2__MIN_BW_MASK 0x01C00000L
+#define DAGB1_WRCLI2__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_WRCLI2__MAX_OSD_MASK 0xFC000000L
+//DAGB1_WRCLI3
+#define DAGB1_WRCLI3__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_WRCLI3__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_WRCLI3__URG_HIGH__SHIFT 0x4
+#define DAGB1_WRCLI3__URG_LOW__SHIFT 0x8
+#define DAGB1_WRCLI3__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_WRCLI3__MAX_BW__SHIFT 0xd
+#define DAGB1_WRCLI3__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_WRCLI3__MIN_BW__SHIFT 0x16
+#define DAGB1_WRCLI3__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_WRCLI3__MAX_OSD__SHIFT 0x1a
+#define DAGB1_WRCLI3__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_WRCLI3__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_WRCLI3__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_WRCLI3__URG_LOW_MASK 0x00000F00L
+#define DAGB1_WRCLI3__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_WRCLI3__MAX_BW_MASK 0x001FE000L
+#define DAGB1_WRCLI3__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_WRCLI3__MIN_BW_MASK 0x01C00000L
+#define DAGB1_WRCLI3__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_WRCLI3__MAX_OSD_MASK 0xFC000000L
+//DAGB1_WRCLI4
+#define DAGB1_WRCLI4__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_WRCLI4__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_WRCLI4__URG_HIGH__SHIFT 0x4
+#define DAGB1_WRCLI4__URG_LOW__SHIFT 0x8
+#define DAGB1_WRCLI4__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_WRCLI4__MAX_BW__SHIFT 0xd
+#define DAGB1_WRCLI4__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_WRCLI4__MIN_BW__SHIFT 0x16
+#define DAGB1_WRCLI4__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_WRCLI4__MAX_OSD__SHIFT 0x1a
+#define DAGB1_WRCLI4__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_WRCLI4__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_WRCLI4__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_WRCLI4__URG_LOW_MASK 0x00000F00L
+#define DAGB1_WRCLI4__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_WRCLI4__MAX_BW_MASK 0x001FE000L
+#define DAGB1_WRCLI4__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_WRCLI4__MIN_BW_MASK 0x01C00000L
+#define DAGB1_WRCLI4__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_WRCLI4__MAX_OSD_MASK 0xFC000000L
+//DAGB1_WRCLI5
+#define DAGB1_WRCLI5__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_WRCLI5__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_WRCLI5__URG_HIGH__SHIFT 0x4
+#define DAGB1_WRCLI5__URG_LOW__SHIFT 0x8
+#define DAGB1_WRCLI5__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_WRCLI5__MAX_BW__SHIFT 0xd
+#define DAGB1_WRCLI5__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_WRCLI5__MIN_BW__SHIFT 0x16
+#define DAGB1_WRCLI5__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_WRCLI5__MAX_OSD__SHIFT 0x1a
+#define DAGB1_WRCLI5__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_WRCLI5__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_WRCLI5__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_WRCLI5__URG_LOW_MASK 0x00000F00L
+#define DAGB1_WRCLI5__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_WRCLI5__MAX_BW_MASK 0x001FE000L
+#define DAGB1_WRCLI5__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_WRCLI5__MIN_BW_MASK 0x01C00000L
+#define DAGB1_WRCLI5__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_WRCLI5__MAX_OSD_MASK 0xFC000000L
+//DAGB1_WRCLI6
+#define DAGB1_WRCLI6__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_WRCLI6__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_WRCLI6__URG_HIGH__SHIFT 0x4
+#define DAGB1_WRCLI6__URG_LOW__SHIFT 0x8
+#define DAGB1_WRCLI6__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_WRCLI6__MAX_BW__SHIFT 0xd
+#define DAGB1_WRCLI6__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_WRCLI6__MIN_BW__SHIFT 0x16
+#define DAGB1_WRCLI6__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_WRCLI6__MAX_OSD__SHIFT 0x1a
+#define DAGB1_WRCLI6__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_WRCLI6__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_WRCLI6__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_WRCLI6__URG_LOW_MASK 0x00000F00L
+#define DAGB1_WRCLI6__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_WRCLI6__MAX_BW_MASK 0x001FE000L
+#define DAGB1_WRCLI6__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_WRCLI6__MIN_BW_MASK 0x01C00000L
+#define DAGB1_WRCLI6__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_WRCLI6__MAX_OSD_MASK 0xFC000000L
+//DAGB1_WRCLI7
+#define DAGB1_WRCLI7__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_WRCLI7__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_WRCLI7__URG_HIGH__SHIFT 0x4
+#define DAGB1_WRCLI7__URG_LOW__SHIFT 0x8
+#define DAGB1_WRCLI7__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_WRCLI7__MAX_BW__SHIFT 0xd
+#define DAGB1_WRCLI7__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_WRCLI7__MIN_BW__SHIFT 0x16
+#define DAGB1_WRCLI7__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_WRCLI7__MAX_OSD__SHIFT 0x1a
+#define DAGB1_WRCLI7__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_WRCLI7__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_WRCLI7__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_WRCLI7__URG_LOW_MASK 0x00000F00L
+#define DAGB1_WRCLI7__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_WRCLI7__MAX_BW_MASK 0x001FE000L
+#define DAGB1_WRCLI7__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_WRCLI7__MIN_BW_MASK 0x01C00000L
+#define DAGB1_WRCLI7__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_WRCLI7__MAX_OSD_MASK 0xFC000000L
+//DAGB1_WRCLI8
+#define DAGB1_WRCLI8__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_WRCLI8__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_WRCLI8__URG_HIGH__SHIFT 0x4
+#define DAGB1_WRCLI8__URG_LOW__SHIFT 0x8
+#define DAGB1_WRCLI8__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_WRCLI8__MAX_BW__SHIFT 0xd
+#define DAGB1_WRCLI8__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_WRCLI8__MIN_BW__SHIFT 0x16
+#define DAGB1_WRCLI8__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_WRCLI8__MAX_OSD__SHIFT 0x1a
+#define DAGB1_WRCLI8__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_WRCLI8__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_WRCLI8__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_WRCLI8__URG_LOW_MASK 0x00000F00L
+#define DAGB1_WRCLI8__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_WRCLI8__MAX_BW_MASK 0x001FE000L
+#define DAGB1_WRCLI8__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_WRCLI8__MIN_BW_MASK 0x01C00000L
+#define DAGB1_WRCLI8__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_WRCLI8__MAX_OSD_MASK 0xFC000000L
+//DAGB1_WRCLI9
+#define DAGB1_WRCLI9__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_WRCLI9__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_WRCLI9__URG_HIGH__SHIFT 0x4
+#define DAGB1_WRCLI9__URG_LOW__SHIFT 0x8
+#define DAGB1_WRCLI9__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_WRCLI9__MAX_BW__SHIFT 0xd
+#define DAGB1_WRCLI9__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_WRCLI9__MIN_BW__SHIFT 0x16
+#define DAGB1_WRCLI9__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_WRCLI9__MAX_OSD__SHIFT 0x1a
+#define DAGB1_WRCLI9__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_WRCLI9__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_WRCLI9__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_WRCLI9__URG_LOW_MASK 0x00000F00L
+#define DAGB1_WRCLI9__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_WRCLI9__MAX_BW_MASK 0x001FE000L
+#define DAGB1_WRCLI9__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_WRCLI9__MIN_BW_MASK 0x01C00000L
+#define DAGB1_WRCLI9__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_WRCLI9__MAX_OSD_MASK 0xFC000000L
+//DAGB1_WRCLI10
+#define DAGB1_WRCLI10__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_WRCLI10__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_WRCLI10__URG_HIGH__SHIFT 0x4
+#define DAGB1_WRCLI10__URG_LOW__SHIFT 0x8
+#define DAGB1_WRCLI10__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_WRCLI10__MAX_BW__SHIFT 0xd
+#define DAGB1_WRCLI10__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_WRCLI10__MIN_BW__SHIFT 0x16
+#define DAGB1_WRCLI10__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_WRCLI10__MAX_OSD__SHIFT 0x1a
+#define DAGB1_WRCLI10__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_WRCLI10__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_WRCLI10__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_WRCLI10__URG_LOW_MASK 0x00000F00L
+#define DAGB1_WRCLI10__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_WRCLI10__MAX_BW_MASK 0x001FE000L
+#define DAGB1_WRCLI10__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_WRCLI10__MIN_BW_MASK 0x01C00000L
+#define DAGB1_WRCLI10__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_WRCLI10__MAX_OSD_MASK 0xFC000000L
+//DAGB1_WRCLI11
+#define DAGB1_WRCLI11__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_WRCLI11__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_WRCLI11__URG_HIGH__SHIFT 0x4
+#define DAGB1_WRCLI11__URG_LOW__SHIFT 0x8
+#define DAGB1_WRCLI11__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_WRCLI11__MAX_BW__SHIFT 0xd
+#define DAGB1_WRCLI11__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_WRCLI11__MIN_BW__SHIFT 0x16
+#define DAGB1_WRCLI11__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_WRCLI11__MAX_OSD__SHIFT 0x1a
+#define DAGB1_WRCLI11__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_WRCLI11__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_WRCLI11__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_WRCLI11__URG_LOW_MASK 0x00000F00L
+#define DAGB1_WRCLI11__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_WRCLI11__MAX_BW_MASK 0x001FE000L
+#define DAGB1_WRCLI11__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_WRCLI11__MIN_BW_MASK 0x01C00000L
+#define DAGB1_WRCLI11__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_WRCLI11__MAX_OSD_MASK 0xFC000000L
+//DAGB1_WRCLI12
+#define DAGB1_WRCLI12__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_WRCLI12__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_WRCLI12__URG_HIGH__SHIFT 0x4
+#define DAGB1_WRCLI12__URG_LOW__SHIFT 0x8
+#define DAGB1_WRCLI12__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_WRCLI12__MAX_BW__SHIFT 0xd
+#define DAGB1_WRCLI12__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_WRCLI12__MIN_BW__SHIFT 0x16
+#define DAGB1_WRCLI12__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_WRCLI12__MAX_OSD__SHIFT 0x1a
+#define DAGB1_WRCLI12__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_WRCLI12__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_WRCLI12__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_WRCLI12__URG_LOW_MASK 0x00000F00L
+#define DAGB1_WRCLI12__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_WRCLI12__MAX_BW_MASK 0x001FE000L
+#define DAGB1_WRCLI12__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_WRCLI12__MIN_BW_MASK 0x01C00000L
+#define DAGB1_WRCLI12__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_WRCLI12__MAX_OSD_MASK 0xFC000000L
+//DAGB1_WRCLI13
+#define DAGB1_WRCLI13__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_WRCLI13__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_WRCLI13__URG_HIGH__SHIFT 0x4
+#define DAGB1_WRCLI13__URG_LOW__SHIFT 0x8
+#define DAGB1_WRCLI13__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_WRCLI13__MAX_BW__SHIFT 0xd
+#define DAGB1_WRCLI13__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_WRCLI13__MIN_BW__SHIFT 0x16
+#define DAGB1_WRCLI13__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_WRCLI13__MAX_OSD__SHIFT 0x1a
+#define DAGB1_WRCLI13__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_WRCLI13__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_WRCLI13__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_WRCLI13__URG_LOW_MASK 0x00000F00L
+#define DAGB1_WRCLI13__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_WRCLI13__MAX_BW_MASK 0x001FE000L
+#define DAGB1_WRCLI13__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_WRCLI13__MIN_BW_MASK 0x01C00000L
+#define DAGB1_WRCLI13__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_WRCLI13__MAX_OSD_MASK 0xFC000000L
+//DAGB1_WRCLI14
+#define DAGB1_WRCLI14__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_WRCLI14__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_WRCLI14__URG_HIGH__SHIFT 0x4
+#define DAGB1_WRCLI14__URG_LOW__SHIFT 0x8
+#define DAGB1_WRCLI14__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_WRCLI14__MAX_BW__SHIFT 0xd
+#define DAGB1_WRCLI14__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_WRCLI14__MIN_BW__SHIFT 0x16
+#define DAGB1_WRCLI14__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_WRCLI14__MAX_OSD__SHIFT 0x1a
+#define DAGB1_WRCLI14__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_WRCLI14__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_WRCLI14__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_WRCLI14__URG_LOW_MASK 0x00000F00L
+#define DAGB1_WRCLI14__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_WRCLI14__MAX_BW_MASK 0x001FE000L
+#define DAGB1_WRCLI14__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_WRCLI14__MIN_BW_MASK 0x01C00000L
+#define DAGB1_WRCLI14__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_WRCLI14__MAX_OSD_MASK 0xFC000000L
+//DAGB1_WRCLI15
+#define DAGB1_WRCLI15__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_WRCLI15__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_WRCLI15__URG_HIGH__SHIFT 0x4
+#define DAGB1_WRCLI15__URG_LOW__SHIFT 0x8
+#define DAGB1_WRCLI15__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_WRCLI15__MAX_BW__SHIFT 0xd
+#define DAGB1_WRCLI15__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_WRCLI15__MIN_BW__SHIFT 0x16
+#define DAGB1_WRCLI15__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_WRCLI15__MAX_OSD__SHIFT 0x1a
+#define DAGB1_WRCLI15__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_WRCLI15__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_WRCLI15__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_WRCLI15__URG_LOW_MASK 0x00000F00L
+#define DAGB1_WRCLI15__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_WRCLI15__MAX_BW_MASK 0x001FE000L
+#define DAGB1_WRCLI15__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_WRCLI15__MIN_BW_MASK 0x01C00000L
+#define DAGB1_WRCLI15__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_WRCLI15__MAX_OSD_MASK 0xFC000000L
+//DAGB1_WR_CNTL
+#define DAGB1_WR_CNTL__SCLK_FREQ__SHIFT 0x0
+#define DAGB1_WR_CNTL__CLI_MAX_BW_WINDOW__SHIFT 0x4
+#define DAGB1_WR_CNTL__VC_MAX_BW_WINDOW__SHIFT 0xa
+#define DAGB1_WR_CNTL__IO_LEVEL_OVERRIDE_ENABLE__SHIFT 0x10
+#define DAGB1_WR_CNTL__IO_LEVEL__SHIFT 0x11
+#define DAGB1_WR_CNTL__IO_LEVEL_COMPLY_VC__SHIFT 0x14
+#define DAGB1_WR_CNTL__SHARE_VC_NUM__SHIFT 0x17
+#define DAGB1_WR_CNTL__SCLK_FREQ_MASK 0x0000000FL
+#define DAGB1_WR_CNTL__CLI_MAX_BW_WINDOW_MASK 0x000003F0L
+#define DAGB1_WR_CNTL__VC_MAX_BW_WINDOW_MASK 0x0000FC00L
+#define DAGB1_WR_CNTL__IO_LEVEL_OVERRIDE_ENABLE_MASK 0x00010000L
+#define DAGB1_WR_CNTL__IO_LEVEL_MASK 0x000E0000L
+#define DAGB1_WR_CNTL__IO_LEVEL_COMPLY_VC_MASK 0x00700000L
+#define DAGB1_WR_CNTL__SHARE_VC_NUM_MASK 0x03800000L
+//DAGB1_WR_GMI_CNTL
+#define DAGB1_WR_GMI_CNTL__EA_CREDIT__SHIFT 0x0
+#define DAGB1_WR_GMI_CNTL__LEVEL__SHIFT 0x6
+#define DAGB1_WR_GMI_CNTL__MAX_BURST__SHIFT 0x9
+#define DAGB1_WR_GMI_CNTL__LAZY_TIMER__SHIFT 0xd
+#define DAGB1_WR_GMI_CNTL__EA_CREDIT_MASK 0x0000003FL
+#define DAGB1_WR_GMI_CNTL__LEVEL_MASK 0x000001C0L
+#define DAGB1_WR_GMI_CNTL__MAX_BURST_MASK 0x00001E00L
+#define DAGB1_WR_GMI_CNTL__LAZY_TIMER_MASK 0x0001E000L
+//DAGB1_WR_ADDR_DAGB
+#define DAGB1_WR_ADDR_DAGB__DAGB_ENABLE__SHIFT 0x0
+#define DAGB1_WR_ADDR_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3
+#define DAGB1_WR_ADDR_DAGB__DISABLE_SELF_INIT__SHIFT 0x6
+#define DAGB1_WR_ADDR_DAGB__WHOAMI__SHIFT 0x7
+#define DAGB1_WR_ADDR_DAGB__DAGB_ENABLE_MASK 0x00000007L
+#define DAGB1_WR_ADDR_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L
+#define DAGB1_WR_ADDR_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L
+#define DAGB1_WR_ADDR_DAGB__WHOAMI_MASK 0x00001F80L
+//DAGB1_WR_OUTPUT_DAGB_MAX_BURST
+#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC0__SHIFT 0x0
+#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC1__SHIFT 0x4
+#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC2__SHIFT 0x8
+#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC3__SHIFT 0xc
+#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC4__SHIFT 0x10
+#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC5__SHIFT 0x14
+#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC6__SHIFT 0x18
+#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC7__SHIFT 0x1c
+#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC0_MASK 0x0000000FL
+#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC1_MASK 0x000000F0L
+#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC2_MASK 0x00000F00L
+#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC3_MASK 0x0000F000L
+#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC4_MASK 0x000F0000L
+#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC5_MASK 0x00F00000L
+#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC6_MASK 0x0F000000L
+#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC7_MASK 0xF0000000L
+//DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER
+#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC0__SHIFT 0x0
+#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC1__SHIFT 0x4
+#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC2__SHIFT 0x8
+#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC3__SHIFT 0xc
+#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC4__SHIFT 0x10
+#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC5__SHIFT 0x14
+#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC6__SHIFT 0x18
+#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC7__SHIFT 0x1c
+#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC0_MASK 0x0000000FL
+#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC1_MASK 0x000000F0L
+#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC2_MASK 0x00000F00L
+#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC3_MASK 0x0000F000L
+#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC4_MASK 0x000F0000L
+#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC5_MASK 0x00F00000L
+#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC6_MASK 0x0F000000L
+#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC7_MASK 0xF0000000L
+//DAGB1_WR_CGTT_CLK_CTRL
+#define DAGB1_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB1_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB1_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
+#define DAGB1_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define DAGB1_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
+#define DAGB1_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
+#define DAGB1_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
+#define DAGB1_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
+#define DAGB1_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB1_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB1_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
+#define DAGB1_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define DAGB1_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
+#define DAGB1_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
+#define DAGB1_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
+#define DAGB1_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
+//DAGB1_L1TLB_WR_CGTT_CLK_CTRL
+#define DAGB1_L1TLB_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB1_L1TLB_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB1_L1TLB_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
+#define DAGB1_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define DAGB1_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
+#define DAGB1_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
+#define DAGB1_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
+#define DAGB1_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
+#define DAGB1_L1TLB_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB1_L1TLB_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB1_L1TLB_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
+#define DAGB1_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define DAGB1_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
+#define DAGB1_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
+#define DAGB1_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
+#define DAGB1_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
+//DAGB1_ATCVM_WR_CGTT_CLK_CTRL
+#define DAGB1_ATCVM_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB1_ATCVM_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB1_ATCVM_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
+#define DAGB1_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define DAGB1_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
+#define DAGB1_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
+#define DAGB1_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
+#define DAGB1_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
+#define DAGB1_ATCVM_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB1_ATCVM_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB1_ATCVM_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
+#define DAGB1_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define DAGB1_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
+#define DAGB1_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
+#define DAGB1_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
+#define DAGB1_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
+//DAGB1_WR_ADDR_DAGB_MAX_BURST0
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L
+//DAGB1_WR_ADDR_DAGB_LAZY_TIMER0
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L
+//DAGB1_WR_ADDR_DAGB_MAX_BURST1
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L
+//DAGB1_WR_ADDR_DAGB_LAZY_TIMER1
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L
+//DAGB1_WR_DATA_DAGB
+#define DAGB1_WR_DATA_DAGB__DAGB_ENABLE__SHIFT 0x0
+#define DAGB1_WR_DATA_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3
+#define DAGB1_WR_DATA_DAGB__DISABLE_SELF_INIT__SHIFT 0x6
+#define DAGB1_WR_DATA_DAGB__WHOAMI__SHIFT 0x7
+#define DAGB1_WR_DATA_DAGB__DAGB_ENABLE_MASK 0x00000007L
+#define DAGB1_WR_DATA_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L
+#define DAGB1_WR_DATA_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L
+#define DAGB1_WR_DATA_DAGB__WHOAMI_MASK 0x00001F80L
+//DAGB1_WR_DATA_DAGB_MAX_BURST0
+#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0
+#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4
+#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8
+#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc
+#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10
+#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14
+#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18
+#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c
+#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL
+#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L
+#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L
+#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L
+#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L
+#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L
+#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L
+#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L
+//DAGB1_WR_DATA_DAGB_LAZY_TIMER0
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L
+//DAGB1_WR_DATA_DAGB_MAX_BURST1
+#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0
+#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4
+#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8
+#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc
+#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10
+#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14
+#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18
+#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c
+#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL
+#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L
+#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L
+#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L
+#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L
+#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L
+#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L
+#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L
+//DAGB1_WR_DATA_DAGB_LAZY_TIMER1
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L
+//DAGB1_WR_VC0_CNTL
+#define DAGB1_WR_VC0_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB1_WR_VC0_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB1_WR_VC0_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB1_WR_VC0_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB1_WR_VC0_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB1_WR_VC0_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB1_WR_VC0_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB1_WR_VC0_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB1_WR_VC0_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB1_WR_VC0_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB1_WR_VC0_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB1_WR_VC0_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB1_WR_VC0_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB1_WR_VC0_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB1_WR_VC0_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB1_WR_VC0_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB1_WR_VC1_CNTL
+#define DAGB1_WR_VC1_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB1_WR_VC1_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB1_WR_VC1_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB1_WR_VC1_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB1_WR_VC1_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB1_WR_VC1_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB1_WR_VC1_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB1_WR_VC1_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB1_WR_VC1_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB1_WR_VC1_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB1_WR_VC1_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB1_WR_VC1_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB1_WR_VC1_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB1_WR_VC1_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB1_WR_VC1_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB1_WR_VC1_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB1_WR_VC2_CNTL
+#define DAGB1_WR_VC2_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB1_WR_VC2_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB1_WR_VC2_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB1_WR_VC2_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB1_WR_VC2_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB1_WR_VC2_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB1_WR_VC2_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB1_WR_VC2_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB1_WR_VC2_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB1_WR_VC2_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB1_WR_VC2_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB1_WR_VC2_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB1_WR_VC2_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB1_WR_VC2_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB1_WR_VC2_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB1_WR_VC2_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB1_WR_VC3_CNTL
+#define DAGB1_WR_VC3_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB1_WR_VC3_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB1_WR_VC3_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB1_WR_VC3_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB1_WR_VC3_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB1_WR_VC3_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB1_WR_VC3_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB1_WR_VC3_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB1_WR_VC3_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB1_WR_VC3_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB1_WR_VC3_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB1_WR_VC3_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB1_WR_VC3_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB1_WR_VC3_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB1_WR_VC3_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB1_WR_VC3_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB1_WR_VC4_CNTL
+#define DAGB1_WR_VC4_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB1_WR_VC4_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB1_WR_VC4_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB1_WR_VC4_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB1_WR_VC4_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB1_WR_VC4_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB1_WR_VC4_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB1_WR_VC4_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB1_WR_VC4_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB1_WR_VC4_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB1_WR_VC4_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB1_WR_VC4_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB1_WR_VC4_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB1_WR_VC4_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB1_WR_VC4_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB1_WR_VC4_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB1_WR_VC5_CNTL
+#define DAGB1_WR_VC5_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB1_WR_VC5_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB1_WR_VC5_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB1_WR_VC5_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB1_WR_VC5_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB1_WR_VC5_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB1_WR_VC5_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB1_WR_VC5_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB1_WR_VC5_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB1_WR_VC5_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB1_WR_VC5_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB1_WR_VC5_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB1_WR_VC5_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB1_WR_VC5_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB1_WR_VC5_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB1_WR_VC5_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB1_WR_VC6_CNTL
+#define DAGB1_WR_VC6_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB1_WR_VC6_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB1_WR_VC6_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB1_WR_VC6_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB1_WR_VC6_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB1_WR_VC6_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB1_WR_VC6_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB1_WR_VC6_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB1_WR_VC6_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB1_WR_VC6_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB1_WR_VC6_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB1_WR_VC6_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB1_WR_VC6_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB1_WR_VC6_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB1_WR_VC6_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB1_WR_VC6_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB1_WR_VC7_CNTL
+#define DAGB1_WR_VC7_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB1_WR_VC7_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB1_WR_VC7_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB1_WR_VC7_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB1_WR_VC7_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB1_WR_VC7_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB1_WR_VC7_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB1_WR_VC7_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB1_WR_VC7_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB1_WR_VC7_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB1_WR_VC7_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB1_WR_VC7_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB1_WR_VC7_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB1_WR_VC7_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB1_WR_VC7_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB1_WR_VC7_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB1_WR_CNTL_MISC
+#define DAGB1_WR_CNTL_MISC__STOR_POOL_CREDIT__SHIFT 0x0
+#define DAGB1_WR_CNTL_MISC__EA_POOL_CREDIT__SHIFT 0x6
+#define DAGB1_WR_CNTL_MISC__IO_EA_CREDIT__SHIFT 0xd
+#define DAGB1_WR_CNTL_MISC__STOR_CC_LEGACY_MODE__SHIFT 0x13
+#define DAGB1_WR_CNTL_MISC__EA_CC_LEGACY_MODE__SHIFT 0x14
+#define DAGB1_WR_CNTL_MISC__UTCL2_CID__SHIFT 0x15
+#define DAGB1_WR_CNTL_MISC__RDRET_FIFO_CREDITS__SHIFT 0x1a
+#define DAGB1_WR_CNTL_MISC__STOR_POOL_CREDIT_MASK 0x0000003FL
+#define DAGB1_WR_CNTL_MISC__EA_POOL_CREDIT_MASK 0x00001FC0L
+#define DAGB1_WR_CNTL_MISC__IO_EA_CREDIT_MASK 0x0007E000L
+#define DAGB1_WR_CNTL_MISC__STOR_CC_LEGACY_MODE_MASK 0x00080000L
+#define DAGB1_WR_CNTL_MISC__EA_CC_LEGACY_MODE_MASK 0x00100000L
+#define DAGB1_WR_CNTL_MISC__UTCL2_CID_MASK 0x03E00000L
+#define DAGB1_WR_CNTL_MISC__RDRET_FIFO_CREDITS_MASK 0xFC000000L
+//DAGB1_WR_TLB_CREDIT
+#define DAGB1_WR_TLB_CREDIT__TLB0__SHIFT 0x0
+#define DAGB1_WR_TLB_CREDIT__TLB1__SHIFT 0x5
+#define DAGB1_WR_TLB_CREDIT__TLB2__SHIFT 0xa
+#define DAGB1_WR_TLB_CREDIT__TLB3__SHIFT 0xf
+#define DAGB1_WR_TLB_CREDIT__TLB4__SHIFT 0x14
+#define DAGB1_WR_TLB_CREDIT__TLB5__SHIFT 0x19
+#define DAGB1_WR_TLB_CREDIT__TLB0_MASK 0x0000001FL
+#define DAGB1_WR_TLB_CREDIT__TLB1_MASK 0x000003E0L
+#define DAGB1_WR_TLB_CREDIT__TLB2_MASK 0x00007C00L
+#define DAGB1_WR_TLB_CREDIT__TLB3_MASK 0x000F8000L
+#define DAGB1_WR_TLB_CREDIT__TLB4_MASK 0x01F00000L
+#define DAGB1_WR_TLB_CREDIT__TLB5_MASK 0x3E000000L
+//DAGB1_WR_DATA_CREDIT
+#define DAGB1_WR_DATA_CREDIT__DLOCK_VC_CREDITS__SHIFT 0x0
+#define DAGB1_WR_DATA_CREDIT__LARGE_BURST_CREDITS__SHIFT 0x8
+#define DAGB1_WR_DATA_CREDIT__MIDDLE_BURST_CREDITS__SHIFT 0x10
+#define DAGB1_WR_DATA_CREDIT__SMALL_BURST_CREDITS__SHIFT 0x18
+#define DAGB1_WR_DATA_CREDIT__DLOCK_VC_CREDITS_MASK 0x000000FFL
+#define DAGB1_WR_DATA_CREDIT__LARGE_BURST_CREDITS_MASK 0x0000FF00L
+#define DAGB1_WR_DATA_CREDIT__MIDDLE_BURST_CREDITS_MASK 0x00FF0000L
+#define DAGB1_WR_DATA_CREDIT__SMALL_BURST_CREDITS_MASK 0xFF000000L
+//DAGB1_WR_MISC_CREDIT
+#define DAGB1_WR_MISC_CREDIT__ATOMIC_CREDIT__SHIFT 0x0
+#define DAGB1_WR_MISC_CREDIT__DLOCK_VC_NUM__SHIFT 0x6
+#define DAGB1_WR_MISC_CREDIT__OSD_CREDIT__SHIFT 0x9
+#define DAGB1_WR_MISC_CREDIT__OSD_DLOCK_CREDIT__SHIFT 0x10
+#define DAGB1_WR_MISC_CREDIT__ATOMIC_CREDIT_MASK 0x0000003FL
+#define DAGB1_WR_MISC_CREDIT__DLOCK_VC_NUM_MASK 0x000001C0L
+#define DAGB1_WR_MISC_CREDIT__OSD_CREDIT_MASK 0x0000FE00L
+#define DAGB1_WR_MISC_CREDIT__OSD_DLOCK_CREDIT_MASK 0x007F0000L
+//DAGB1_WRCLI_ASK_PENDING
+#define DAGB1_WRCLI_ASK_PENDING__BUSY__SHIFT 0x0
+#define DAGB1_WRCLI_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB1_WRCLI_GO_PENDING
+#define DAGB1_WRCLI_GO_PENDING__BUSY__SHIFT 0x0
+#define DAGB1_WRCLI_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB1_WRCLI_GBLSEND_PENDING
+#define DAGB1_WRCLI_GBLSEND_PENDING__BUSY__SHIFT 0x0
+#define DAGB1_WRCLI_GBLSEND_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB1_WRCLI_TLB_PENDING
+#define DAGB1_WRCLI_TLB_PENDING__BUSY__SHIFT 0x0
+#define DAGB1_WRCLI_TLB_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB1_WRCLI_OARB_PENDING
+#define DAGB1_WRCLI_OARB_PENDING__BUSY__SHIFT 0x0
+#define DAGB1_WRCLI_OARB_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB1_WRCLI_OSD_PENDING
+#define DAGB1_WRCLI_OSD_PENDING__BUSY__SHIFT 0x0
+#define DAGB1_WRCLI_OSD_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB1_WRCLI_DBUS_ASK_PENDING
+#define DAGB1_WRCLI_DBUS_ASK_PENDING__BUSY__SHIFT 0x0
+#define DAGB1_WRCLI_DBUS_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB1_WRCLI_DBUS_GO_PENDING
+#define DAGB1_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0
+#define DAGB1_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB1_DAGB_DLY
+#define DAGB1_DAGB_DLY__DLY__SHIFT 0x0
+#define DAGB1_DAGB_DLY__CLI__SHIFT 0x8
+#define DAGB1_DAGB_DLY__POS__SHIFT 0x10
+#define DAGB1_DAGB_DLY__DLY_MASK 0x000000FFL
+#define DAGB1_DAGB_DLY__CLI_MASK 0x0000FF00L
+#define DAGB1_DAGB_DLY__POS_MASK 0x000F0000L
+//DAGB1_CNTL_MISC
+#define DAGB1_CNTL_MISC__EA_VC0_REMAP__SHIFT 0x0
+#define DAGB1_CNTL_MISC__EA_VC1_REMAP__SHIFT 0x3
+#define DAGB1_CNTL_MISC__EA_VC2_REMAP__SHIFT 0x6
+#define DAGB1_CNTL_MISC__EA_VC3_REMAP__SHIFT 0x9
+#define DAGB1_CNTL_MISC__EA_VC4_REMAP__SHIFT 0xc
+#define DAGB1_CNTL_MISC__EA_VC5_REMAP__SHIFT 0xf
+#define DAGB1_CNTL_MISC__EA_VC6_REMAP__SHIFT 0x12
+#define DAGB1_CNTL_MISC__EA_VC7_REMAP__SHIFT 0x15
+#define DAGB1_CNTL_MISC__BW_INIT_CYCLE__SHIFT 0x18
+#define DAGB1_CNTL_MISC__BW_RW_GAP_CYCLE__SHIFT 0x1e
+#define DAGB1_CNTL_MISC__EA_VC0_REMAP_MASK 0x00000007L
+#define DAGB1_CNTL_MISC__EA_VC1_REMAP_MASK 0x00000038L
+#define DAGB1_CNTL_MISC__EA_VC2_REMAP_MASK 0x000001C0L
+#define DAGB1_CNTL_MISC__EA_VC3_REMAP_MASK 0x00000E00L
+#define DAGB1_CNTL_MISC__EA_VC4_REMAP_MASK 0x00007000L
+#define DAGB1_CNTL_MISC__EA_VC5_REMAP_MASK 0x00038000L
+#define DAGB1_CNTL_MISC__EA_VC6_REMAP_MASK 0x001C0000L
+#define DAGB1_CNTL_MISC__EA_VC7_REMAP_MASK 0x00E00000L
+#define DAGB1_CNTL_MISC__BW_INIT_CYCLE_MASK 0x3F000000L
+#define DAGB1_CNTL_MISC__BW_RW_GAP_CYCLE_MASK 0xC0000000L
+//DAGB1_CNTL_MISC2
+#define DAGB1_CNTL_MISC2__URG_BOOST_ENABLE__SHIFT 0x0
+#define DAGB1_CNTL_MISC2__URG_HALT_ENABLE__SHIFT 0x1
+#define DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG__SHIFT 0x2
+#define DAGB1_CNTL_MISC2__DISABLE_WRRET_CG__SHIFT 0x3
+#define DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG__SHIFT 0x4
+#define DAGB1_CNTL_MISC2__DISABLE_RDRET_CG__SHIFT 0x5
+#define DAGB1_CNTL_MISC2__DISABLE_TLBWR_CG__SHIFT 0x6
+#define DAGB1_CNTL_MISC2__DISABLE_TLBRD_CG__SHIFT 0x7
+#define DAGB1_CNTL_MISC2__DISABLE_EAWRREQ_BUSY__SHIFT 0x8
+#define DAGB1_CNTL_MISC2__DISABLE_EARDREQ_BUSY__SHIFT 0x9
+#define DAGB1_CNTL_MISC2__SWAP_CTL__SHIFT 0xa
+#define DAGB1_CNTL_MISC2__RDRET_FIFO_PERF__SHIFT 0xb
+#define DAGB1_CNTL_MISC2__RDRET_FIFO_DLOCK_CREDITS__SHIFT 0x11
+#define DAGB1_CNTL_MISC2__URG_BOOST_ENABLE_MASK 0x00000001L
+#define DAGB1_CNTL_MISC2__URG_HALT_ENABLE_MASK 0x00000002L
+#define DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG_MASK 0x00000004L
+#define DAGB1_CNTL_MISC2__DISABLE_WRRET_CG_MASK 0x00000008L
+#define DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG_MASK 0x00000010L
+#define DAGB1_CNTL_MISC2__DISABLE_RDRET_CG_MASK 0x00000020L
+#define DAGB1_CNTL_MISC2__DISABLE_TLBWR_CG_MASK 0x00000040L
+#define DAGB1_CNTL_MISC2__DISABLE_TLBRD_CG_MASK 0x00000080L
+#define DAGB1_CNTL_MISC2__DISABLE_EAWRREQ_BUSY_MASK 0x00000100L
+#define DAGB1_CNTL_MISC2__DISABLE_EARDREQ_BUSY_MASK 0x00000200L
+#define DAGB1_CNTL_MISC2__SWAP_CTL_MASK 0x00000400L
+#define DAGB1_CNTL_MISC2__RDRET_FIFO_PERF_MASK 0x00000800L
+#define DAGB1_CNTL_MISC2__RDRET_FIFO_DLOCK_CREDITS_MASK 0x007E0000L
+//DAGB1_FIFO_EMPTY
+#define DAGB1_FIFO_EMPTY__EMPTY__SHIFT 0x0
+#define DAGB1_FIFO_EMPTY__EMPTY_MASK 0x00FFFFFFL
+//DAGB1_FIFO_FULL
+#define DAGB1_FIFO_FULL__FULL__SHIFT 0x0
+#define DAGB1_FIFO_FULL__FULL_MASK 0x007FFFFFL
+//DAGB1_WR_CREDITS_FULL
+#define DAGB1_WR_CREDITS_FULL__FULL__SHIFT 0x0
+#define DAGB1_WR_CREDITS_FULL__FULL_MASK 0x1FFFFFFFL
+//DAGB1_RD_CREDITS_FULL
+#define DAGB1_RD_CREDITS_FULL__FULL__SHIFT 0x0
+#define DAGB1_RD_CREDITS_FULL__FULL_MASK 0x0003FFFFL
+//DAGB1_PERFCOUNTER_LO
+#define DAGB1_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define DAGB1_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//DAGB1_PERFCOUNTER_HI
+#define DAGB1_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define DAGB1_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define DAGB1_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define DAGB1_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+//DAGB1_PERFCOUNTER0_CFG
+#define DAGB1_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define DAGB1_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define DAGB1_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define DAGB1_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define DAGB1_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define DAGB1_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define DAGB1_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define DAGB1_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define DAGB1_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define DAGB1_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//DAGB1_PERFCOUNTER1_CFG
+#define DAGB1_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define DAGB1_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define DAGB1_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define DAGB1_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define DAGB1_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define DAGB1_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define DAGB1_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define DAGB1_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define DAGB1_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define DAGB1_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//DAGB1_PERFCOUNTER2_CFG
+#define DAGB1_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0
+#define DAGB1_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8
+#define DAGB1_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18
+#define DAGB1_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c
+#define DAGB1_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d
+#define DAGB1_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL
+#define DAGB1_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define DAGB1_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L
+#define DAGB1_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L
+#define DAGB1_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L
+//DAGB1_PERFCOUNTER_RSLT_CNTL
+#define DAGB1_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define DAGB1_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define DAGB1_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define DAGB1_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define DAGB1_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define DAGB1_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define DAGB1_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define DAGB1_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define DAGB1_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define DAGB1_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define DAGB1_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define DAGB1_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+//DAGB1_RESERVE0
+#define DAGB1_RESERVE0__RESERVE__SHIFT 0x0
+#define DAGB1_RESERVE0__RESERVE_MASK 0xFFFFFFFFL
+//DAGB1_RESERVE1
+#define DAGB1_RESERVE1__RESERVE__SHIFT 0x0
+#define DAGB1_RESERVE1__RESERVE_MASK 0xFFFFFFFFL
+//DAGB1_RESERVE2
+#define DAGB1_RESERVE2__RESERVE__SHIFT 0x0
+#define DAGB1_RESERVE2__RESERVE_MASK 0xFFFFFFFFL
+//DAGB1_RESERVE3
+#define DAGB1_RESERVE3__RESERVE__SHIFT 0x0
+#define DAGB1_RESERVE3__RESERVE_MASK 0xFFFFFFFFL
+//DAGB1_RESERVE4
+#define DAGB1_RESERVE4__RESERVE__SHIFT 0x0
+#define DAGB1_RESERVE4__RESERVE_MASK 0xFFFFFFFFL
+//DAGB1_RESERVE5
+#define DAGB1_RESERVE5__RESERVE__SHIFT 0x0
+#define DAGB1_RESERVE5__RESERVE_MASK 0xFFFFFFFFL
+//DAGB1_RESERVE6
+#define DAGB1_RESERVE6__RESERVE__SHIFT 0x0
+#define DAGB1_RESERVE6__RESERVE_MASK 0xFFFFFFFFL
+//DAGB1_RESERVE7
+#define DAGB1_RESERVE7__RESERVE__SHIFT 0x0
+#define DAGB1_RESERVE7__RESERVE_MASK 0xFFFFFFFFL
+//DAGB1_RESERVE8
+#define DAGB1_RESERVE8__RESERVE__SHIFT 0x0
+#define DAGB1_RESERVE8__RESERVE_MASK 0xFFFFFFFFL
+//DAGB1_RESERVE9
+#define DAGB1_RESERVE9__RESERVE__SHIFT 0x0
+#define DAGB1_RESERVE9__RESERVE_MASK 0xFFFFFFFFL
+//DAGB1_RESERVE10
+#define DAGB1_RESERVE10__RESERVE__SHIFT 0x0
+#define DAGB1_RESERVE10__RESERVE_MASK 0xFFFFFFFFL
+//DAGB1_RESERVE11
+#define DAGB1_RESERVE11__RESERVE__SHIFT 0x0
+#define DAGB1_RESERVE11__RESERVE_MASK 0xFFFFFFFFL
+//DAGB1_RESERVE12
+#define DAGB1_RESERVE12__RESERVE__SHIFT 0x0
+#define DAGB1_RESERVE12__RESERVE_MASK 0xFFFFFFFFL
+//DAGB1_RESERVE13
+#define DAGB1_RESERVE13__RESERVE__SHIFT 0x0
+#define DAGB1_RESERVE13__RESERVE_MASK 0xFFFFFFFFL
+
+
+// addressBlock: mmhub_dagb_dagbdec2
+//DAGB2_RDCLI0
+#define DAGB2_RDCLI0__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_RDCLI0__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_RDCLI0__URG_HIGH__SHIFT 0x4
+#define DAGB2_RDCLI0__URG_LOW__SHIFT 0x8
+#define DAGB2_RDCLI0__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_RDCLI0__MAX_BW__SHIFT 0xd
+#define DAGB2_RDCLI0__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_RDCLI0__MIN_BW__SHIFT 0x16
+#define DAGB2_RDCLI0__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_RDCLI0__MAX_OSD__SHIFT 0x1a
+#define DAGB2_RDCLI0__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_RDCLI0__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_RDCLI0__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_RDCLI0__URG_LOW_MASK 0x00000F00L
+#define DAGB2_RDCLI0__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_RDCLI0__MAX_BW_MASK 0x001FE000L
+#define DAGB2_RDCLI0__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_RDCLI0__MIN_BW_MASK 0x01C00000L
+#define DAGB2_RDCLI0__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_RDCLI0__MAX_OSD_MASK 0xFC000000L
+//DAGB2_RDCLI1
+#define DAGB2_RDCLI1__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_RDCLI1__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_RDCLI1__URG_HIGH__SHIFT 0x4
+#define DAGB2_RDCLI1__URG_LOW__SHIFT 0x8
+#define DAGB2_RDCLI1__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_RDCLI1__MAX_BW__SHIFT 0xd
+#define DAGB2_RDCLI1__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_RDCLI1__MIN_BW__SHIFT 0x16
+#define DAGB2_RDCLI1__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_RDCLI1__MAX_OSD__SHIFT 0x1a
+#define DAGB2_RDCLI1__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_RDCLI1__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_RDCLI1__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_RDCLI1__URG_LOW_MASK 0x00000F00L
+#define DAGB2_RDCLI1__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_RDCLI1__MAX_BW_MASK 0x001FE000L
+#define DAGB2_RDCLI1__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_RDCLI1__MIN_BW_MASK 0x01C00000L
+#define DAGB2_RDCLI1__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_RDCLI1__MAX_OSD_MASK 0xFC000000L
+//DAGB2_RDCLI2
+#define DAGB2_RDCLI2__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_RDCLI2__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_RDCLI2__URG_HIGH__SHIFT 0x4
+#define DAGB2_RDCLI2__URG_LOW__SHIFT 0x8
+#define DAGB2_RDCLI2__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_RDCLI2__MAX_BW__SHIFT 0xd
+#define DAGB2_RDCLI2__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_RDCLI2__MIN_BW__SHIFT 0x16
+#define DAGB2_RDCLI2__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_RDCLI2__MAX_OSD__SHIFT 0x1a
+#define DAGB2_RDCLI2__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_RDCLI2__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_RDCLI2__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_RDCLI2__URG_LOW_MASK 0x00000F00L
+#define DAGB2_RDCLI2__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_RDCLI2__MAX_BW_MASK 0x001FE000L
+#define DAGB2_RDCLI2__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_RDCLI2__MIN_BW_MASK 0x01C00000L
+#define DAGB2_RDCLI2__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_RDCLI2__MAX_OSD_MASK 0xFC000000L
+//DAGB2_RDCLI3
+#define DAGB2_RDCLI3__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_RDCLI3__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_RDCLI3__URG_HIGH__SHIFT 0x4
+#define DAGB2_RDCLI3__URG_LOW__SHIFT 0x8
+#define DAGB2_RDCLI3__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_RDCLI3__MAX_BW__SHIFT 0xd
+#define DAGB2_RDCLI3__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_RDCLI3__MIN_BW__SHIFT 0x16
+#define DAGB2_RDCLI3__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_RDCLI3__MAX_OSD__SHIFT 0x1a
+#define DAGB2_RDCLI3__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_RDCLI3__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_RDCLI3__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_RDCLI3__URG_LOW_MASK 0x00000F00L
+#define DAGB2_RDCLI3__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_RDCLI3__MAX_BW_MASK 0x001FE000L
+#define DAGB2_RDCLI3__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_RDCLI3__MIN_BW_MASK 0x01C00000L
+#define DAGB2_RDCLI3__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_RDCLI3__MAX_OSD_MASK 0xFC000000L
+//DAGB2_RDCLI4
+#define DAGB2_RDCLI4__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_RDCLI4__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_RDCLI4__URG_HIGH__SHIFT 0x4
+#define DAGB2_RDCLI4__URG_LOW__SHIFT 0x8
+#define DAGB2_RDCLI4__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_RDCLI4__MAX_BW__SHIFT 0xd
+#define DAGB2_RDCLI4__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_RDCLI4__MIN_BW__SHIFT 0x16
+#define DAGB2_RDCLI4__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_RDCLI4__MAX_OSD__SHIFT 0x1a
+#define DAGB2_RDCLI4__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_RDCLI4__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_RDCLI4__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_RDCLI4__URG_LOW_MASK 0x00000F00L
+#define DAGB2_RDCLI4__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_RDCLI4__MAX_BW_MASK 0x001FE000L
+#define DAGB2_RDCLI4__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_RDCLI4__MIN_BW_MASK 0x01C00000L
+#define DAGB2_RDCLI4__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_RDCLI4__MAX_OSD_MASK 0xFC000000L
+//DAGB2_RDCLI5
+#define DAGB2_RDCLI5__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_RDCLI5__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_RDCLI5__URG_HIGH__SHIFT 0x4
+#define DAGB2_RDCLI5__URG_LOW__SHIFT 0x8
+#define DAGB2_RDCLI5__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_RDCLI5__MAX_BW__SHIFT 0xd
+#define DAGB2_RDCLI5__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_RDCLI5__MIN_BW__SHIFT 0x16
+#define DAGB2_RDCLI5__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_RDCLI5__MAX_OSD__SHIFT 0x1a
+#define DAGB2_RDCLI5__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_RDCLI5__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_RDCLI5__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_RDCLI5__URG_LOW_MASK 0x00000F00L
+#define DAGB2_RDCLI5__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_RDCLI5__MAX_BW_MASK 0x001FE000L
+#define DAGB2_RDCLI5__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_RDCLI5__MIN_BW_MASK 0x01C00000L
+#define DAGB2_RDCLI5__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_RDCLI5__MAX_OSD_MASK 0xFC000000L
+//DAGB2_RDCLI6
+#define DAGB2_RDCLI6__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_RDCLI6__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_RDCLI6__URG_HIGH__SHIFT 0x4
+#define DAGB2_RDCLI6__URG_LOW__SHIFT 0x8
+#define DAGB2_RDCLI6__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_RDCLI6__MAX_BW__SHIFT 0xd
+#define DAGB2_RDCLI6__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_RDCLI6__MIN_BW__SHIFT 0x16
+#define DAGB2_RDCLI6__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_RDCLI6__MAX_OSD__SHIFT 0x1a
+#define DAGB2_RDCLI6__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_RDCLI6__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_RDCLI6__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_RDCLI6__URG_LOW_MASK 0x00000F00L
+#define DAGB2_RDCLI6__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_RDCLI6__MAX_BW_MASK 0x001FE000L
+#define DAGB2_RDCLI6__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_RDCLI6__MIN_BW_MASK 0x01C00000L
+#define DAGB2_RDCLI6__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_RDCLI6__MAX_OSD_MASK 0xFC000000L
+//DAGB2_RDCLI7
+#define DAGB2_RDCLI7__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_RDCLI7__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_RDCLI7__URG_HIGH__SHIFT 0x4
+#define DAGB2_RDCLI7__URG_LOW__SHIFT 0x8
+#define DAGB2_RDCLI7__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_RDCLI7__MAX_BW__SHIFT 0xd
+#define DAGB2_RDCLI7__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_RDCLI7__MIN_BW__SHIFT 0x16
+#define DAGB2_RDCLI7__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_RDCLI7__MAX_OSD__SHIFT 0x1a
+#define DAGB2_RDCLI7__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_RDCLI7__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_RDCLI7__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_RDCLI7__URG_LOW_MASK 0x00000F00L
+#define DAGB2_RDCLI7__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_RDCLI7__MAX_BW_MASK 0x001FE000L
+#define DAGB2_RDCLI7__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_RDCLI7__MIN_BW_MASK 0x01C00000L
+#define DAGB2_RDCLI7__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_RDCLI7__MAX_OSD_MASK 0xFC000000L
+//DAGB2_RDCLI8
+#define DAGB2_RDCLI8__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_RDCLI8__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_RDCLI8__URG_HIGH__SHIFT 0x4
+#define DAGB2_RDCLI8__URG_LOW__SHIFT 0x8
+#define DAGB2_RDCLI8__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_RDCLI8__MAX_BW__SHIFT 0xd
+#define DAGB2_RDCLI8__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_RDCLI8__MIN_BW__SHIFT 0x16
+#define DAGB2_RDCLI8__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_RDCLI8__MAX_OSD__SHIFT 0x1a
+#define DAGB2_RDCLI8__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_RDCLI8__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_RDCLI8__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_RDCLI8__URG_LOW_MASK 0x00000F00L
+#define DAGB2_RDCLI8__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_RDCLI8__MAX_BW_MASK 0x001FE000L
+#define DAGB2_RDCLI8__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_RDCLI8__MIN_BW_MASK 0x01C00000L
+#define DAGB2_RDCLI8__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_RDCLI8__MAX_OSD_MASK 0xFC000000L
+//DAGB2_RDCLI9
+#define DAGB2_RDCLI9__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_RDCLI9__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_RDCLI9__URG_HIGH__SHIFT 0x4
+#define DAGB2_RDCLI9__URG_LOW__SHIFT 0x8
+#define DAGB2_RDCLI9__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_RDCLI9__MAX_BW__SHIFT 0xd
+#define DAGB2_RDCLI9__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_RDCLI9__MIN_BW__SHIFT 0x16
+#define DAGB2_RDCLI9__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_RDCLI9__MAX_OSD__SHIFT 0x1a
+#define DAGB2_RDCLI9__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_RDCLI9__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_RDCLI9__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_RDCLI9__URG_LOW_MASK 0x00000F00L
+#define DAGB2_RDCLI9__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_RDCLI9__MAX_BW_MASK 0x001FE000L
+#define DAGB2_RDCLI9__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_RDCLI9__MIN_BW_MASK 0x01C00000L
+#define DAGB2_RDCLI9__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_RDCLI9__MAX_OSD_MASK 0xFC000000L
+//DAGB2_RDCLI10
+#define DAGB2_RDCLI10__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_RDCLI10__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_RDCLI10__URG_HIGH__SHIFT 0x4
+#define DAGB2_RDCLI10__URG_LOW__SHIFT 0x8
+#define DAGB2_RDCLI10__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_RDCLI10__MAX_BW__SHIFT 0xd
+#define DAGB2_RDCLI10__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_RDCLI10__MIN_BW__SHIFT 0x16
+#define DAGB2_RDCLI10__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_RDCLI10__MAX_OSD__SHIFT 0x1a
+#define DAGB2_RDCLI10__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_RDCLI10__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_RDCLI10__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_RDCLI10__URG_LOW_MASK 0x00000F00L
+#define DAGB2_RDCLI10__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_RDCLI10__MAX_BW_MASK 0x001FE000L
+#define DAGB2_RDCLI10__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_RDCLI10__MIN_BW_MASK 0x01C00000L
+#define DAGB2_RDCLI10__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_RDCLI10__MAX_OSD_MASK 0xFC000000L
+//DAGB2_RDCLI11
+#define DAGB2_RDCLI11__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_RDCLI11__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_RDCLI11__URG_HIGH__SHIFT 0x4
+#define DAGB2_RDCLI11__URG_LOW__SHIFT 0x8
+#define DAGB2_RDCLI11__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_RDCLI11__MAX_BW__SHIFT 0xd
+#define DAGB2_RDCLI11__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_RDCLI11__MIN_BW__SHIFT 0x16
+#define DAGB2_RDCLI11__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_RDCLI11__MAX_OSD__SHIFT 0x1a
+#define DAGB2_RDCLI11__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_RDCLI11__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_RDCLI11__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_RDCLI11__URG_LOW_MASK 0x00000F00L
+#define DAGB2_RDCLI11__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_RDCLI11__MAX_BW_MASK 0x001FE000L
+#define DAGB2_RDCLI11__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_RDCLI11__MIN_BW_MASK 0x01C00000L
+#define DAGB2_RDCLI11__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_RDCLI11__MAX_OSD_MASK 0xFC000000L
+//DAGB2_RDCLI12
+#define DAGB2_RDCLI12__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_RDCLI12__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_RDCLI12__URG_HIGH__SHIFT 0x4
+#define DAGB2_RDCLI12__URG_LOW__SHIFT 0x8
+#define DAGB2_RDCLI12__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_RDCLI12__MAX_BW__SHIFT 0xd
+#define DAGB2_RDCLI12__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_RDCLI12__MIN_BW__SHIFT 0x16
+#define DAGB2_RDCLI12__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_RDCLI12__MAX_OSD__SHIFT 0x1a
+#define DAGB2_RDCLI12__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_RDCLI12__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_RDCLI12__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_RDCLI12__URG_LOW_MASK 0x00000F00L
+#define DAGB2_RDCLI12__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_RDCLI12__MAX_BW_MASK 0x001FE000L
+#define DAGB2_RDCLI12__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_RDCLI12__MIN_BW_MASK 0x01C00000L
+#define DAGB2_RDCLI12__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_RDCLI12__MAX_OSD_MASK 0xFC000000L
+//DAGB2_RDCLI13
+#define DAGB2_RDCLI13__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_RDCLI13__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_RDCLI13__URG_HIGH__SHIFT 0x4
+#define DAGB2_RDCLI13__URG_LOW__SHIFT 0x8
+#define DAGB2_RDCLI13__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_RDCLI13__MAX_BW__SHIFT 0xd
+#define DAGB2_RDCLI13__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_RDCLI13__MIN_BW__SHIFT 0x16
+#define DAGB2_RDCLI13__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_RDCLI13__MAX_OSD__SHIFT 0x1a
+#define DAGB2_RDCLI13__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_RDCLI13__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_RDCLI13__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_RDCLI13__URG_LOW_MASK 0x00000F00L
+#define DAGB2_RDCLI13__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_RDCLI13__MAX_BW_MASK 0x001FE000L
+#define DAGB2_RDCLI13__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_RDCLI13__MIN_BW_MASK 0x01C00000L
+#define DAGB2_RDCLI13__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_RDCLI13__MAX_OSD_MASK 0xFC000000L
+//DAGB2_RDCLI14
+#define DAGB2_RDCLI14__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_RDCLI14__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_RDCLI14__URG_HIGH__SHIFT 0x4
+#define DAGB2_RDCLI14__URG_LOW__SHIFT 0x8
+#define DAGB2_RDCLI14__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_RDCLI14__MAX_BW__SHIFT 0xd
+#define DAGB2_RDCLI14__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_RDCLI14__MIN_BW__SHIFT 0x16
+#define DAGB2_RDCLI14__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_RDCLI14__MAX_OSD__SHIFT 0x1a
+#define DAGB2_RDCLI14__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_RDCLI14__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_RDCLI14__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_RDCLI14__URG_LOW_MASK 0x00000F00L
+#define DAGB2_RDCLI14__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_RDCLI14__MAX_BW_MASK 0x001FE000L
+#define DAGB2_RDCLI14__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_RDCLI14__MIN_BW_MASK 0x01C00000L
+#define DAGB2_RDCLI14__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_RDCLI14__MAX_OSD_MASK 0xFC000000L
+//DAGB2_RDCLI15
+#define DAGB2_RDCLI15__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_RDCLI15__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_RDCLI15__URG_HIGH__SHIFT 0x4
+#define DAGB2_RDCLI15__URG_LOW__SHIFT 0x8
+#define DAGB2_RDCLI15__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_RDCLI15__MAX_BW__SHIFT 0xd
+#define DAGB2_RDCLI15__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_RDCLI15__MIN_BW__SHIFT 0x16
+#define DAGB2_RDCLI15__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_RDCLI15__MAX_OSD__SHIFT 0x1a
+#define DAGB2_RDCLI15__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_RDCLI15__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_RDCLI15__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_RDCLI15__URG_LOW_MASK 0x00000F00L
+#define DAGB2_RDCLI15__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_RDCLI15__MAX_BW_MASK 0x001FE000L
+#define DAGB2_RDCLI15__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_RDCLI15__MIN_BW_MASK 0x01C00000L
+#define DAGB2_RDCLI15__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_RDCLI15__MAX_OSD_MASK 0xFC000000L
+//DAGB2_RD_CNTL
+#define DAGB2_RD_CNTL__SCLK_FREQ__SHIFT 0x0
+#define DAGB2_RD_CNTL__CLI_MAX_BW_WINDOW__SHIFT 0x4
+#define DAGB2_RD_CNTL__VC_MAX_BW_WINDOW__SHIFT 0xa
+#define DAGB2_RD_CNTL__IO_LEVEL_OVERRIDE_ENABLE__SHIFT 0x10
+#define DAGB2_RD_CNTL__IO_LEVEL__SHIFT 0x11
+#define DAGB2_RD_CNTL__IO_LEVEL_COMPLY_VC__SHIFT 0x14
+#define DAGB2_RD_CNTL__SHARE_VC_NUM__SHIFT 0x17
+#define DAGB2_RD_CNTL__SCLK_FREQ_MASK 0x0000000FL
+#define DAGB2_RD_CNTL__CLI_MAX_BW_WINDOW_MASK 0x000003F0L
+#define DAGB2_RD_CNTL__VC_MAX_BW_WINDOW_MASK 0x0000FC00L
+#define DAGB2_RD_CNTL__IO_LEVEL_OVERRIDE_ENABLE_MASK 0x00010000L
+#define DAGB2_RD_CNTL__IO_LEVEL_MASK 0x000E0000L
+#define DAGB2_RD_CNTL__IO_LEVEL_COMPLY_VC_MASK 0x00700000L
+#define DAGB2_RD_CNTL__SHARE_VC_NUM_MASK 0x03800000L
+//DAGB2_RD_GMI_CNTL
+#define DAGB2_RD_GMI_CNTL__EA_CREDIT__SHIFT 0x0
+#define DAGB2_RD_GMI_CNTL__LEVEL__SHIFT 0x6
+#define DAGB2_RD_GMI_CNTL__MAX_BURST__SHIFT 0x9
+#define DAGB2_RD_GMI_CNTL__LAZY_TIMER__SHIFT 0xd
+#define DAGB2_RD_GMI_CNTL__EA_CREDIT_MASK 0x0000003FL
+#define DAGB2_RD_GMI_CNTL__LEVEL_MASK 0x000001C0L
+#define DAGB2_RD_GMI_CNTL__MAX_BURST_MASK 0x00001E00L
+#define DAGB2_RD_GMI_CNTL__LAZY_TIMER_MASK 0x0001E000L
+//DAGB2_RD_ADDR_DAGB
+#define DAGB2_RD_ADDR_DAGB__DAGB_ENABLE__SHIFT 0x0
+#define DAGB2_RD_ADDR_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3
+#define DAGB2_RD_ADDR_DAGB__DISABLE_SELF_INIT__SHIFT 0x6
+#define DAGB2_RD_ADDR_DAGB__WHOAMI__SHIFT 0x7
+#define DAGB2_RD_ADDR_DAGB__DAGB_ENABLE_MASK 0x00000007L
+#define DAGB2_RD_ADDR_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L
+#define DAGB2_RD_ADDR_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L
+#define DAGB2_RD_ADDR_DAGB__WHOAMI_MASK 0x00001F80L
+//DAGB2_RD_OUTPUT_DAGB_MAX_BURST
+#define DAGB2_RD_OUTPUT_DAGB_MAX_BURST__VC0__SHIFT 0x0
+#define DAGB2_RD_OUTPUT_DAGB_MAX_BURST__VC1__SHIFT 0x4
+#define DAGB2_RD_OUTPUT_DAGB_MAX_BURST__VC2__SHIFT 0x8
+#define DAGB2_RD_OUTPUT_DAGB_MAX_BURST__VC3__SHIFT 0xc
+#define DAGB2_RD_OUTPUT_DAGB_MAX_BURST__VC4__SHIFT 0x10
+#define DAGB2_RD_OUTPUT_DAGB_MAX_BURST__VC5__SHIFT 0x14
+#define DAGB2_RD_OUTPUT_DAGB_MAX_BURST__VC6__SHIFT 0x18
+#define DAGB2_RD_OUTPUT_DAGB_MAX_BURST__VC7__SHIFT 0x1c
+#define DAGB2_RD_OUTPUT_DAGB_MAX_BURST__VC0_MASK 0x0000000FL
+#define DAGB2_RD_OUTPUT_DAGB_MAX_BURST__VC1_MASK 0x000000F0L
+#define DAGB2_RD_OUTPUT_DAGB_MAX_BURST__VC2_MASK 0x00000F00L
+#define DAGB2_RD_OUTPUT_DAGB_MAX_BURST__VC3_MASK 0x0000F000L
+#define DAGB2_RD_OUTPUT_DAGB_MAX_BURST__VC4_MASK 0x000F0000L
+#define DAGB2_RD_OUTPUT_DAGB_MAX_BURST__VC5_MASK 0x00F00000L
+#define DAGB2_RD_OUTPUT_DAGB_MAX_BURST__VC6_MASK 0x0F000000L
+#define DAGB2_RD_OUTPUT_DAGB_MAX_BURST__VC7_MASK 0xF0000000L
+//DAGB2_RD_OUTPUT_DAGB_LAZY_TIMER
+#define DAGB2_RD_OUTPUT_DAGB_LAZY_TIMER__VC0__SHIFT 0x0
+#define DAGB2_RD_OUTPUT_DAGB_LAZY_TIMER__VC1__SHIFT 0x4
+#define DAGB2_RD_OUTPUT_DAGB_LAZY_TIMER__VC2__SHIFT 0x8
+#define DAGB2_RD_OUTPUT_DAGB_LAZY_TIMER__VC3__SHIFT 0xc
+#define DAGB2_RD_OUTPUT_DAGB_LAZY_TIMER__VC4__SHIFT 0x10
+#define DAGB2_RD_OUTPUT_DAGB_LAZY_TIMER__VC5__SHIFT 0x14
+#define DAGB2_RD_OUTPUT_DAGB_LAZY_TIMER__VC6__SHIFT 0x18
+#define DAGB2_RD_OUTPUT_DAGB_LAZY_TIMER__VC7__SHIFT 0x1c
+#define DAGB2_RD_OUTPUT_DAGB_LAZY_TIMER__VC0_MASK 0x0000000FL
+#define DAGB2_RD_OUTPUT_DAGB_LAZY_TIMER__VC1_MASK 0x000000F0L
+#define DAGB2_RD_OUTPUT_DAGB_LAZY_TIMER__VC2_MASK 0x00000F00L
+#define DAGB2_RD_OUTPUT_DAGB_LAZY_TIMER__VC3_MASK 0x0000F000L
+#define DAGB2_RD_OUTPUT_DAGB_LAZY_TIMER__VC4_MASK 0x000F0000L
+#define DAGB2_RD_OUTPUT_DAGB_LAZY_TIMER__VC5_MASK 0x00F00000L
+#define DAGB2_RD_OUTPUT_DAGB_LAZY_TIMER__VC6_MASK 0x0F000000L
+#define DAGB2_RD_OUTPUT_DAGB_LAZY_TIMER__VC7_MASK 0xF0000000L
+//DAGB2_RD_CGTT_CLK_CTRL
+#define DAGB2_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB2_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB2_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
+#define DAGB2_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define DAGB2_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
+#define DAGB2_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
+#define DAGB2_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
+#define DAGB2_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
+#define DAGB2_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB2_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB2_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
+#define DAGB2_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define DAGB2_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
+#define DAGB2_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
+#define DAGB2_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
+#define DAGB2_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
+//DAGB2_L1TLB_RD_CGTT_CLK_CTRL
+#define DAGB2_L1TLB_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB2_L1TLB_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB2_L1TLB_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
+#define DAGB2_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define DAGB2_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
+#define DAGB2_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
+#define DAGB2_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
+#define DAGB2_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
+#define DAGB2_L1TLB_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB2_L1TLB_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB2_L1TLB_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
+#define DAGB2_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define DAGB2_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
+#define DAGB2_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
+#define DAGB2_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
+#define DAGB2_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
+//DAGB2_ATCVM_RD_CGTT_CLK_CTRL
+#define DAGB2_ATCVM_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB2_ATCVM_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB2_ATCVM_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
+#define DAGB2_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define DAGB2_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
+#define DAGB2_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
+#define DAGB2_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
+#define DAGB2_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
+#define DAGB2_ATCVM_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB2_ATCVM_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB2_ATCVM_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
+#define DAGB2_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define DAGB2_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
+#define DAGB2_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
+#define DAGB2_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
+#define DAGB2_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
+//DAGB2_RD_ADDR_DAGB_MAX_BURST0
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L
+//DAGB2_RD_ADDR_DAGB_LAZY_TIMER0
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L
+//DAGB2_RD_ADDR_DAGB_MAX_BURST1
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L
+#define DAGB2_RD_ADDR_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L
+//DAGB2_RD_ADDR_DAGB_LAZY_TIMER1
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L
+#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L
+//DAGB2_RD_VC0_CNTL
+#define DAGB2_RD_VC0_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB2_RD_VC0_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB2_RD_VC0_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB2_RD_VC0_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB2_RD_VC0_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB2_RD_VC0_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB2_RD_VC0_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB2_RD_VC0_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB2_RD_VC0_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB2_RD_VC0_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB2_RD_VC0_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB2_RD_VC0_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB2_RD_VC0_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB2_RD_VC0_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB2_RD_VC0_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB2_RD_VC0_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB2_RD_VC1_CNTL
+#define DAGB2_RD_VC1_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB2_RD_VC1_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB2_RD_VC1_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB2_RD_VC1_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB2_RD_VC1_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB2_RD_VC1_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB2_RD_VC1_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB2_RD_VC1_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB2_RD_VC1_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB2_RD_VC1_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB2_RD_VC1_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB2_RD_VC1_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB2_RD_VC1_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB2_RD_VC1_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB2_RD_VC1_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB2_RD_VC1_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB2_RD_VC2_CNTL
+#define DAGB2_RD_VC2_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB2_RD_VC2_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB2_RD_VC2_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB2_RD_VC2_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB2_RD_VC2_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB2_RD_VC2_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB2_RD_VC2_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB2_RD_VC2_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB2_RD_VC2_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB2_RD_VC2_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB2_RD_VC2_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB2_RD_VC2_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB2_RD_VC2_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB2_RD_VC2_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB2_RD_VC2_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB2_RD_VC2_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB2_RD_VC3_CNTL
+#define DAGB2_RD_VC3_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB2_RD_VC3_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB2_RD_VC3_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB2_RD_VC3_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB2_RD_VC3_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB2_RD_VC3_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB2_RD_VC3_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB2_RD_VC3_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB2_RD_VC3_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB2_RD_VC3_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB2_RD_VC3_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB2_RD_VC3_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB2_RD_VC3_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB2_RD_VC3_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB2_RD_VC3_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB2_RD_VC3_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB2_RD_VC4_CNTL
+#define DAGB2_RD_VC4_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB2_RD_VC4_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB2_RD_VC4_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB2_RD_VC4_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB2_RD_VC4_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB2_RD_VC4_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB2_RD_VC4_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB2_RD_VC4_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB2_RD_VC4_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB2_RD_VC4_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB2_RD_VC4_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB2_RD_VC4_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB2_RD_VC4_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB2_RD_VC4_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB2_RD_VC4_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB2_RD_VC4_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB2_RD_VC5_CNTL
+#define DAGB2_RD_VC5_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB2_RD_VC5_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB2_RD_VC5_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB2_RD_VC5_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB2_RD_VC5_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB2_RD_VC5_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB2_RD_VC5_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB2_RD_VC5_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB2_RD_VC5_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB2_RD_VC5_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB2_RD_VC5_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB2_RD_VC5_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB2_RD_VC5_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB2_RD_VC5_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB2_RD_VC5_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB2_RD_VC5_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB2_RD_VC6_CNTL
+#define DAGB2_RD_VC6_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB2_RD_VC6_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB2_RD_VC6_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB2_RD_VC6_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB2_RD_VC6_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB2_RD_VC6_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB2_RD_VC6_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB2_RD_VC6_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB2_RD_VC6_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB2_RD_VC6_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB2_RD_VC6_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB2_RD_VC6_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB2_RD_VC6_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB2_RD_VC6_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB2_RD_VC6_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB2_RD_VC6_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB2_RD_VC7_CNTL
+#define DAGB2_RD_VC7_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB2_RD_VC7_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB2_RD_VC7_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB2_RD_VC7_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB2_RD_VC7_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB2_RD_VC7_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB2_RD_VC7_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB2_RD_VC7_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB2_RD_VC7_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB2_RD_VC7_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB2_RD_VC7_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB2_RD_VC7_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB2_RD_VC7_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB2_RD_VC7_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB2_RD_VC7_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB2_RD_VC7_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB2_RD_CNTL_MISC
+#define DAGB2_RD_CNTL_MISC__STOR_POOL_CREDIT__SHIFT 0x0
+#define DAGB2_RD_CNTL_MISC__EA_POOL_CREDIT__SHIFT 0x6
+#define DAGB2_RD_CNTL_MISC__IO_EA_CREDIT__SHIFT 0xd
+#define DAGB2_RD_CNTL_MISC__STOR_CC_LEGACY_MODE__SHIFT 0x13
+#define DAGB2_RD_CNTL_MISC__EA_CC_LEGACY_MODE__SHIFT 0x14
+#define DAGB2_RD_CNTL_MISC__UTCL2_CID__SHIFT 0x15
+#define DAGB2_RD_CNTL_MISC__RDRET_FIFO_CREDITS__SHIFT 0x1a
+#define DAGB2_RD_CNTL_MISC__STOR_POOL_CREDIT_MASK 0x0000003FL
+#define DAGB2_RD_CNTL_MISC__EA_POOL_CREDIT_MASK 0x00001FC0L
+#define DAGB2_RD_CNTL_MISC__IO_EA_CREDIT_MASK 0x0007E000L
+#define DAGB2_RD_CNTL_MISC__STOR_CC_LEGACY_MODE_MASK 0x00080000L
+#define DAGB2_RD_CNTL_MISC__EA_CC_LEGACY_MODE_MASK 0x00100000L
+#define DAGB2_RD_CNTL_MISC__UTCL2_CID_MASK 0x03E00000L
+#define DAGB2_RD_CNTL_MISC__RDRET_FIFO_CREDITS_MASK 0xFC000000L
+//DAGB2_RD_TLB_CREDIT
+#define DAGB2_RD_TLB_CREDIT__TLB0__SHIFT 0x0
+#define DAGB2_RD_TLB_CREDIT__TLB1__SHIFT 0x5
+#define DAGB2_RD_TLB_CREDIT__TLB2__SHIFT 0xa
+#define DAGB2_RD_TLB_CREDIT__TLB3__SHIFT 0xf
+#define DAGB2_RD_TLB_CREDIT__TLB4__SHIFT 0x14
+#define DAGB2_RD_TLB_CREDIT__TLB5__SHIFT 0x19
+#define DAGB2_RD_TLB_CREDIT__TLB0_MASK 0x0000001FL
+#define DAGB2_RD_TLB_CREDIT__TLB1_MASK 0x000003E0L
+#define DAGB2_RD_TLB_CREDIT__TLB2_MASK 0x00007C00L
+#define DAGB2_RD_TLB_CREDIT__TLB3_MASK 0x000F8000L
+#define DAGB2_RD_TLB_CREDIT__TLB4_MASK 0x01F00000L
+#define DAGB2_RD_TLB_CREDIT__TLB5_MASK 0x3E000000L
+//DAGB2_RDCLI_ASK_PENDING
+#define DAGB2_RDCLI_ASK_PENDING__BUSY__SHIFT 0x0
+#define DAGB2_RDCLI_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB2_RDCLI_GO_PENDING
+#define DAGB2_RDCLI_GO_PENDING__BUSY__SHIFT 0x0
+#define DAGB2_RDCLI_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB2_RDCLI_GBLSEND_PENDING
+#define DAGB2_RDCLI_GBLSEND_PENDING__BUSY__SHIFT 0x0
+#define DAGB2_RDCLI_GBLSEND_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB2_RDCLI_TLB_PENDING
+#define DAGB2_RDCLI_TLB_PENDING__BUSY__SHIFT 0x0
+#define DAGB2_RDCLI_TLB_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB2_RDCLI_OARB_PENDING
+#define DAGB2_RDCLI_OARB_PENDING__BUSY__SHIFT 0x0
+#define DAGB2_RDCLI_OARB_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB2_RDCLI_OSD_PENDING
+#define DAGB2_RDCLI_OSD_PENDING__BUSY__SHIFT 0x0
+#define DAGB2_RDCLI_OSD_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB2_WRCLI0
+#define DAGB2_WRCLI0__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_WRCLI0__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_WRCLI0__URG_HIGH__SHIFT 0x4
+#define DAGB2_WRCLI0__URG_LOW__SHIFT 0x8
+#define DAGB2_WRCLI0__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_WRCLI0__MAX_BW__SHIFT 0xd
+#define DAGB2_WRCLI0__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_WRCLI0__MIN_BW__SHIFT 0x16
+#define DAGB2_WRCLI0__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_WRCLI0__MAX_OSD__SHIFT 0x1a
+#define DAGB2_WRCLI0__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_WRCLI0__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_WRCLI0__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_WRCLI0__URG_LOW_MASK 0x00000F00L
+#define DAGB2_WRCLI0__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_WRCLI0__MAX_BW_MASK 0x001FE000L
+#define DAGB2_WRCLI0__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_WRCLI0__MIN_BW_MASK 0x01C00000L
+#define DAGB2_WRCLI0__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_WRCLI0__MAX_OSD_MASK 0xFC000000L
+//DAGB2_WRCLI1
+#define DAGB2_WRCLI1__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_WRCLI1__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_WRCLI1__URG_HIGH__SHIFT 0x4
+#define DAGB2_WRCLI1__URG_LOW__SHIFT 0x8
+#define DAGB2_WRCLI1__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_WRCLI1__MAX_BW__SHIFT 0xd
+#define DAGB2_WRCLI1__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_WRCLI1__MIN_BW__SHIFT 0x16
+#define DAGB2_WRCLI1__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_WRCLI1__MAX_OSD__SHIFT 0x1a
+#define DAGB2_WRCLI1__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_WRCLI1__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_WRCLI1__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_WRCLI1__URG_LOW_MASK 0x00000F00L
+#define DAGB2_WRCLI1__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_WRCLI1__MAX_BW_MASK 0x001FE000L
+#define DAGB2_WRCLI1__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_WRCLI1__MIN_BW_MASK 0x01C00000L
+#define DAGB2_WRCLI1__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_WRCLI1__MAX_OSD_MASK 0xFC000000L
+//DAGB2_WRCLI2
+#define DAGB2_WRCLI2__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_WRCLI2__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_WRCLI2__URG_HIGH__SHIFT 0x4
+#define DAGB2_WRCLI2__URG_LOW__SHIFT 0x8
+#define DAGB2_WRCLI2__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_WRCLI2__MAX_BW__SHIFT 0xd
+#define DAGB2_WRCLI2__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_WRCLI2__MIN_BW__SHIFT 0x16
+#define DAGB2_WRCLI2__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_WRCLI2__MAX_OSD__SHIFT 0x1a
+#define DAGB2_WRCLI2__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_WRCLI2__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_WRCLI2__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_WRCLI2__URG_LOW_MASK 0x00000F00L
+#define DAGB2_WRCLI2__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_WRCLI2__MAX_BW_MASK 0x001FE000L
+#define DAGB2_WRCLI2__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_WRCLI2__MIN_BW_MASK 0x01C00000L
+#define DAGB2_WRCLI2__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_WRCLI2__MAX_OSD_MASK 0xFC000000L
+//DAGB2_WRCLI3
+#define DAGB2_WRCLI3__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_WRCLI3__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_WRCLI3__URG_HIGH__SHIFT 0x4
+#define DAGB2_WRCLI3__URG_LOW__SHIFT 0x8
+#define DAGB2_WRCLI3__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_WRCLI3__MAX_BW__SHIFT 0xd
+#define DAGB2_WRCLI3__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_WRCLI3__MIN_BW__SHIFT 0x16
+#define DAGB2_WRCLI3__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_WRCLI3__MAX_OSD__SHIFT 0x1a
+#define DAGB2_WRCLI3__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_WRCLI3__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_WRCLI3__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_WRCLI3__URG_LOW_MASK 0x00000F00L
+#define DAGB2_WRCLI3__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_WRCLI3__MAX_BW_MASK 0x001FE000L
+#define DAGB2_WRCLI3__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_WRCLI3__MIN_BW_MASK 0x01C00000L
+#define DAGB2_WRCLI3__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_WRCLI3__MAX_OSD_MASK 0xFC000000L
+//DAGB2_WRCLI4
+#define DAGB2_WRCLI4__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_WRCLI4__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_WRCLI4__URG_HIGH__SHIFT 0x4
+#define DAGB2_WRCLI4__URG_LOW__SHIFT 0x8
+#define DAGB2_WRCLI4__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_WRCLI4__MAX_BW__SHIFT 0xd
+#define DAGB2_WRCLI4__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_WRCLI4__MIN_BW__SHIFT 0x16
+#define DAGB2_WRCLI4__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_WRCLI4__MAX_OSD__SHIFT 0x1a
+#define DAGB2_WRCLI4__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_WRCLI4__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_WRCLI4__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_WRCLI4__URG_LOW_MASK 0x00000F00L
+#define DAGB2_WRCLI4__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_WRCLI4__MAX_BW_MASK 0x001FE000L
+#define DAGB2_WRCLI4__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_WRCLI4__MIN_BW_MASK 0x01C00000L
+#define DAGB2_WRCLI4__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_WRCLI4__MAX_OSD_MASK 0xFC000000L
+//DAGB2_WRCLI5
+#define DAGB2_WRCLI5__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_WRCLI5__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_WRCLI5__URG_HIGH__SHIFT 0x4
+#define DAGB2_WRCLI5__URG_LOW__SHIFT 0x8
+#define DAGB2_WRCLI5__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_WRCLI5__MAX_BW__SHIFT 0xd
+#define DAGB2_WRCLI5__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_WRCLI5__MIN_BW__SHIFT 0x16
+#define DAGB2_WRCLI5__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_WRCLI5__MAX_OSD__SHIFT 0x1a
+#define DAGB2_WRCLI5__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_WRCLI5__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_WRCLI5__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_WRCLI5__URG_LOW_MASK 0x00000F00L
+#define DAGB2_WRCLI5__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_WRCLI5__MAX_BW_MASK 0x001FE000L
+#define DAGB2_WRCLI5__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_WRCLI5__MIN_BW_MASK 0x01C00000L
+#define DAGB2_WRCLI5__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_WRCLI5__MAX_OSD_MASK 0xFC000000L
+//DAGB2_WRCLI6
+#define DAGB2_WRCLI6__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_WRCLI6__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_WRCLI6__URG_HIGH__SHIFT 0x4
+#define DAGB2_WRCLI6__URG_LOW__SHIFT 0x8
+#define DAGB2_WRCLI6__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_WRCLI6__MAX_BW__SHIFT 0xd
+#define DAGB2_WRCLI6__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_WRCLI6__MIN_BW__SHIFT 0x16
+#define DAGB2_WRCLI6__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_WRCLI6__MAX_OSD__SHIFT 0x1a
+#define DAGB2_WRCLI6__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_WRCLI6__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_WRCLI6__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_WRCLI6__URG_LOW_MASK 0x00000F00L
+#define DAGB2_WRCLI6__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_WRCLI6__MAX_BW_MASK 0x001FE000L
+#define DAGB2_WRCLI6__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_WRCLI6__MIN_BW_MASK 0x01C00000L
+#define DAGB2_WRCLI6__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_WRCLI6__MAX_OSD_MASK 0xFC000000L
+//DAGB2_WRCLI7
+#define DAGB2_WRCLI7__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_WRCLI7__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_WRCLI7__URG_HIGH__SHIFT 0x4
+#define DAGB2_WRCLI7__URG_LOW__SHIFT 0x8
+#define DAGB2_WRCLI7__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_WRCLI7__MAX_BW__SHIFT 0xd
+#define DAGB2_WRCLI7__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_WRCLI7__MIN_BW__SHIFT 0x16
+#define DAGB2_WRCLI7__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_WRCLI7__MAX_OSD__SHIFT 0x1a
+#define DAGB2_WRCLI7__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_WRCLI7__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_WRCLI7__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_WRCLI7__URG_LOW_MASK 0x00000F00L
+#define DAGB2_WRCLI7__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_WRCLI7__MAX_BW_MASK 0x001FE000L
+#define DAGB2_WRCLI7__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_WRCLI7__MIN_BW_MASK 0x01C00000L
+#define DAGB2_WRCLI7__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_WRCLI7__MAX_OSD_MASK 0xFC000000L
+//DAGB2_WRCLI8
+#define DAGB2_WRCLI8__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_WRCLI8__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_WRCLI8__URG_HIGH__SHIFT 0x4
+#define DAGB2_WRCLI8__URG_LOW__SHIFT 0x8
+#define DAGB2_WRCLI8__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_WRCLI8__MAX_BW__SHIFT 0xd
+#define DAGB2_WRCLI8__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_WRCLI8__MIN_BW__SHIFT 0x16
+#define DAGB2_WRCLI8__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_WRCLI8__MAX_OSD__SHIFT 0x1a
+#define DAGB2_WRCLI8__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_WRCLI8__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_WRCLI8__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_WRCLI8__URG_LOW_MASK 0x00000F00L
+#define DAGB2_WRCLI8__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_WRCLI8__MAX_BW_MASK 0x001FE000L
+#define DAGB2_WRCLI8__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_WRCLI8__MIN_BW_MASK 0x01C00000L
+#define DAGB2_WRCLI8__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_WRCLI8__MAX_OSD_MASK 0xFC000000L
+//DAGB2_WRCLI9
+#define DAGB2_WRCLI9__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_WRCLI9__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_WRCLI9__URG_HIGH__SHIFT 0x4
+#define DAGB2_WRCLI9__URG_LOW__SHIFT 0x8
+#define DAGB2_WRCLI9__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_WRCLI9__MAX_BW__SHIFT 0xd
+#define DAGB2_WRCLI9__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_WRCLI9__MIN_BW__SHIFT 0x16
+#define DAGB2_WRCLI9__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_WRCLI9__MAX_OSD__SHIFT 0x1a
+#define DAGB2_WRCLI9__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_WRCLI9__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_WRCLI9__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_WRCLI9__URG_LOW_MASK 0x00000F00L
+#define DAGB2_WRCLI9__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_WRCLI9__MAX_BW_MASK 0x001FE000L
+#define DAGB2_WRCLI9__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_WRCLI9__MIN_BW_MASK 0x01C00000L
+#define DAGB2_WRCLI9__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_WRCLI9__MAX_OSD_MASK 0xFC000000L
+//DAGB2_WRCLI10
+#define DAGB2_WRCLI10__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_WRCLI10__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_WRCLI10__URG_HIGH__SHIFT 0x4
+#define DAGB2_WRCLI10__URG_LOW__SHIFT 0x8
+#define DAGB2_WRCLI10__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_WRCLI10__MAX_BW__SHIFT 0xd
+#define DAGB2_WRCLI10__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_WRCLI10__MIN_BW__SHIFT 0x16
+#define DAGB2_WRCLI10__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_WRCLI10__MAX_OSD__SHIFT 0x1a
+#define DAGB2_WRCLI10__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_WRCLI10__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_WRCLI10__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_WRCLI10__URG_LOW_MASK 0x00000F00L
+#define DAGB2_WRCLI10__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_WRCLI10__MAX_BW_MASK 0x001FE000L
+#define DAGB2_WRCLI10__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_WRCLI10__MIN_BW_MASK 0x01C00000L
+#define DAGB2_WRCLI10__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_WRCLI10__MAX_OSD_MASK 0xFC000000L
+//DAGB2_WRCLI11
+#define DAGB2_WRCLI11__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_WRCLI11__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_WRCLI11__URG_HIGH__SHIFT 0x4
+#define DAGB2_WRCLI11__URG_LOW__SHIFT 0x8
+#define DAGB2_WRCLI11__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_WRCLI11__MAX_BW__SHIFT 0xd
+#define DAGB2_WRCLI11__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_WRCLI11__MIN_BW__SHIFT 0x16
+#define DAGB2_WRCLI11__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_WRCLI11__MAX_OSD__SHIFT 0x1a
+#define DAGB2_WRCLI11__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_WRCLI11__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_WRCLI11__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_WRCLI11__URG_LOW_MASK 0x00000F00L
+#define DAGB2_WRCLI11__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_WRCLI11__MAX_BW_MASK 0x001FE000L
+#define DAGB2_WRCLI11__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_WRCLI11__MIN_BW_MASK 0x01C00000L
+#define DAGB2_WRCLI11__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_WRCLI11__MAX_OSD_MASK 0xFC000000L
+//DAGB2_WRCLI12
+#define DAGB2_WRCLI12__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_WRCLI12__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_WRCLI12__URG_HIGH__SHIFT 0x4
+#define DAGB2_WRCLI12__URG_LOW__SHIFT 0x8
+#define DAGB2_WRCLI12__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_WRCLI12__MAX_BW__SHIFT 0xd
+#define DAGB2_WRCLI12__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_WRCLI12__MIN_BW__SHIFT 0x16
+#define DAGB2_WRCLI12__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_WRCLI12__MAX_OSD__SHIFT 0x1a
+#define DAGB2_WRCLI12__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_WRCLI12__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_WRCLI12__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_WRCLI12__URG_LOW_MASK 0x00000F00L
+#define DAGB2_WRCLI12__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_WRCLI12__MAX_BW_MASK 0x001FE000L
+#define DAGB2_WRCLI12__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_WRCLI12__MIN_BW_MASK 0x01C00000L
+#define DAGB2_WRCLI12__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_WRCLI12__MAX_OSD_MASK 0xFC000000L
+//DAGB2_WRCLI13
+#define DAGB2_WRCLI13__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_WRCLI13__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_WRCLI13__URG_HIGH__SHIFT 0x4
+#define DAGB2_WRCLI13__URG_LOW__SHIFT 0x8
+#define DAGB2_WRCLI13__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_WRCLI13__MAX_BW__SHIFT 0xd
+#define DAGB2_WRCLI13__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_WRCLI13__MIN_BW__SHIFT 0x16
+#define DAGB2_WRCLI13__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_WRCLI13__MAX_OSD__SHIFT 0x1a
+#define DAGB2_WRCLI13__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_WRCLI13__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_WRCLI13__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_WRCLI13__URG_LOW_MASK 0x00000F00L
+#define DAGB2_WRCLI13__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_WRCLI13__MAX_BW_MASK 0x001FE000L
+#define DAGB2_WRCLI13__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_WRCLI13__MIN_BW_MASK 0x01C00000L
+#define DAGB2_WRCLI13__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_WRCLI13__MAX_OSD_MASK 0xFC000000L
+//DAGB2_WRCLI14
+#define DAGB2_WRCLI14__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_WRCLI14__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_WRCLI14__URG_HIGH__SHIFT 0x4
+#define DAGB2_WRCLI14__URG_LOW__SHIFT 0x8
+#define DAGB2_WRCLI14__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_WRCLI14__MAX_BW__SHIFT 0xd
+#define DAGB2_WRCLI14__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_WRCLI14__MIN_BW__SHIFT 0x16
+#define DAGB2_WRCLI14__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_WRCLI14__MAX_OSD__SHIFT 0x1a
+#define DAGB2_WRCLI14__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_WRCLI14__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_WRCLI14__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_WRCLI14__URG_LOW_MASK 0x00000F00L
+#define DAGB2_WRCLI14__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_WRCLI14__MAX_BW_MASK 0x001FE000L
+#define DAGB2_WRCLI14__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_WRCLI14__MIN_BW_MASK 0x01C00000L
+#define DAGB2_WRCLI14__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_WRCLI14__MAX_OSD_MASK 0xFC000000L
+//DAGB2_WRCLI15
+#define DAGB2_WRCLI15__VIRT_CHAN__SHIFT 0x0
+#define DAGB2_WRCLI15__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB2_WRCLI15__URG_HIGH__SHIFT 0x4
+#define DAGB2_WRCLI15__URG_LOW__SHIFT 0x8
+#define DAGB2_WRCLI15__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB2_WRCLI15__MAX_BW__SHIFT 0xd
+#define DAGB2_WRCLI15__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB2_WRCLI15__MIN_BW__SHIFT 0x16
+#define DAGB2_WRCLI15__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB2_WRCLI15__MAX_OSD__SHIFT 0x1a
+#define DAGB2_WRCLI15__VIRT_CHAN_MASK 0x00000007L
+#define DAGB2_WRCLI15__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB2_WRCLI15__URG_HIGH_MASK 0x000000F0L
+#define DAGB2_WRCLI15__URG_LOW_MASK 0x00000F00L
+#define DAGB2_WRCLI15__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB2_WRCLI15__MAX_BW_MASK 0x001FE000L
+#define DAGB2_WRCLI15__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB2_WRCLI15__MIN_BW_MASK 0x01C00000L
+#define DAGB2_WRCLI15__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB2_WRCLI15__MAX_OSD_MASK 0xFC000000L
+//DAGB2_WR_CNTL
+#define DAGB2_WR_CNTL__SCLK_FREQ__SHIFT 0x0
+#define DAGB2_WR_CNTL__CLI_MAX_BW_WINDOW__SHIFT 0x4
+#define DAGB2_WR_CNTL__VC_MAX_BW_WINDOW__SHIFT 0xa
+#define DAGB2_WR_CNTL__IO_LEVEL_OVERRIDE_ENABLE__SHIFT 0x10
+#define DAGB2_WR_CNTL__IO_LEVEL__SHIFT 0x11
+#define DAGB2_WR_CNTL__IO_LEVEL_COMPLY_VC__SHIFT 0x14
+#define DAGB2_WR_CNTL__SHARE_VC_NUM__SHIFT 0x17
+#define DAGB2_WR_CNTL__SCLK_FREQ_MASK 0x0000000FL
+#define DAGB2_WR_CNTL__CLI_MAX_BW_WINDOW_MASK 0x000003F0L
+#define DAGB2_WR_CNTL__VC_MAX_BW_WINDOW_MASK 0x0000FC00L
+#define DAGB2_WR_CNTL__IO_LEVEL_OVERRIDE_ENABLE_MASK 0x00010000L
+#define DAGB2_WR_CNTL__IO_LEVEL_MASK 0x000E0000L
+#define DAGB2_WR_CNTL__IO_LEVEL_COMPLY_VC_MASK 0x00700000L
+#define DAGB2_WR_CNTL__SHARE_VC_NUM_MASK 0x03800000L
+//DAGB2_WR_GMI_CNTL
+#define DAGB2_WR_GMI_CNTL__EA_CREDIT__SHIFT 0x0
+#define DAGB2_WR_GMI_CNTL__LEVEL__SHIFT 0x6
+#define DAGB2_WR_GMI_CNTL__MAX_BURST__SHIFT 0x9
+#define DAGB2_WR_GMI_CNTL__LAZY_TIMER__SHIFT 0xd
+#define DAGB2_WR_GMI_CNTL__EA_CREDIT_MASK 0x0000003FL
+#define DAGB2_WR_GMI_CNTL__LEVEL_MASK 0x000001C0L
+#define DAGB2_WR_GMI_CNTL__MAX_BURST_MASK 0x00001E00L
+#define DAGB2_WR_GMI_CNTL__LAZY_TIMER_MASK 0x0001E000L
+//DAGB2_WR_ADDR_DAGB
+#define DAGB2_WR_ADDR_DAGB__DAGB_ENABLE__SHIFT 0x0
+#define DAGB2_WR_ADDR_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3
+#define DAGB2_WR_ADDR_DAGB__DISABLE_SELF_INIT__SHIFT 0x6
+#define DAGB2_WR_ADDR_DAGB__WHOAMI__SHIFT 0x7
+#define DAGB2_WR_ADDR_DAGB__DAGB_ENABLE_MASK 0x00000007L
+#define DAGB2_WR_ADDR_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L
+#define DAGB2_WR_ADDR_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L
+#define DAGB2_WR_ADDR_DAGB__WHOAMI_MASK 0x00001F80L
+//DAGB2_WR_OUTPUT_DAGB_MAX_BURST
+#define DAGB2_WR_OUTPUT_DAGB_MAX_BURST__VC0__SHIFT 0x0
+#define DAGB2_WR_OUTPUT_DAGB_MAX_BURST__VC1__SHIFT 0x4
+#define DAGB2_WR_OUTPUT_DAGB_MAX_BURST__VC2__SHIFT 0x8
+#define DAGB2_WR_OUTPUT_DAGB_MAX_BURST__VC3__SHIFT 0xc
+#define DAGB2_WR_OUTPUT_DAGB_MAX_BURST__VC4__SHIFT 0x10
+#define DAGB2_WR_OUTPUT_DAGB_MAX_BURST__VC5__SHIFT 0x14
+#define DAGB2_WR_OUTPUT_DAGB_MAX_BURST__VC6__SHIFT 0x18
+#define DAGB2_WR_OUTPUT_DAGB_MAX_BURST__VC7__SHIFT 0x1c
+#define DAGB2_WR_OUTPUT_DAGB_MAX_BURST__VC0_MASK 0x0000000FL
+#define DAGB2_WR_OUTPUT_DAGB_MAX_BURST__VC1_MASK 0x000000F0L
+#define DAGB2_WR_OUTPUT_DAGB_MAX_BURST__VC2_MASK 0x00000F00L
+#define DAGB2_WR_OUTPUT_DAGB_MAX_BURST__VC3_MASK 0x0000F000L
+#define DAGB2_WR_OUTPUT_DAGB_MAX_BURST__VC4_MASK 0x000F0000L
+#define DAGB2_WR_OUTPUT_DAGB_MAX_BURST__VC5_MASK 0x00F00000L
+#define DAGB2_WR_OUTPUT_DAGB_MAX_BURST__VC6_MASK 0x0F000000L
+#define DAGB2_WR_OUTPUT_DAGB_MAX_BURST__VC7_MASK 0xF0000000L
+//DAGB2_WR_OUTPUT_DAGB_LAZY_TIMER
+#define DAGB2_WR_OUTPUT_DAGB_LAZY_TIMER__VC0__SHIFT 0x0
+#define DAGB2_WR_OUTPUT_DAGB_LAZY_TIMER__VC1__SHIFT 0x4
+#define DAGB2_WR_OUTPUT_DAGB_LAZY_TIMER__VC2__SHIFT 0x8
+#define DAGB2_WR_OUTPUT_DAGB_LAZY_TIMER__VC3__SHIFT 0xc
+#define DAGB2_WR_OUTPUT_DAGB_LAZY_TIMER__VC4__SHIFT 0x10
+#define DAGB2_WR_OUTPUT_DAGB_LAZY_TIMER__VC5__SHIFT 0x14
+#define DAGB2_WR_OUTPUT_DAGB_LAZY_TIMER__VC6__SHIFT 0x18
+#define DAGB2_WR_OUTPUT_DAGB_LAZY_TIMER__VC7__SHIFT 0x1c
+#define DAGB2_WR_OUTPUT_DAGB_LAZY_TIMER__VC0_MASK 0x0000000FL
+#define DAGB2_WR_OUTPUT_DAGB_LAZY_TIMER__VC1_MASK 0x000000F0L
+#define DAGB2_WR_OUTPUT_DAGB_LAZY_TIMER__VC2_MASK 0x00000F00L
+#define DAGB2_WR_OUTPUT_DAGB_LAZY_TIMER__VC3_MASK 0x0000F000L
+#define DAGB2_WR_OUTPUT_DAGB_LAZY_TIMER__VC4_MASK 0x000F0000L
+#define DAGB2_WR_OUTPUT_DAGB_LAZY_TIMER__VC5_MASK 0x00F00000L
+#define DAGB2_WR_OUTPUT_DAGB_LAZY_TIMER__VC6_MASK 0x0F000000L
+#define DAGB2_WR_OUTPUT_DAGB_LAZY_TIMER__VC7_MASK 0xF0000000L
+//DAGB2_WR_CGTT_CLK_CTRL
+#define DAGB2_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB2_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB2_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
+#define DAGB2_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define DAGB2_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
+#define DAGB2_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
+#define DAGB2_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
+#define DAGB2_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
+#define DAGB2_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB2_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB2_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
+#define DAGB2_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define DAGB2_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
+#define DAGB2_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
+#define DAGB2_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
+#define DAGB2_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
+//DAGB2_L1TLB_WR_CGTT_CLK_CTRL
+#define DAGB2_L1TLB_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB2_L1TLB_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB2_L1TLB_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
+#define DAGB2_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define DAGB2_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
+#define DAGB2_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
+#define DAGB2_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
+#define DAGB2_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
+#define DAGB2_L1TLB_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB2_L1TLB_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB2_L1TLB_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
+#define DAGB2_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define DAGB2_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
+#define DAGB2_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
+#define DAGB2_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
+#define DAGB2_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
+//DAGB2_ATCVM_WR_CGTT_CLK_CTRL
+#define DAGB2_ATCVM_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB2_ATCVM_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB2_ATCVM_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
+#define DAGB2_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define DAGB2_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
+#define DAGB2_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
+#define DAGB2_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
+#define DAGB2_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
+#define DAGB2_ATCVM_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB2_ATCVM_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB2_ATCVM_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
+#define DAGB2_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define DAGB2_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
+#define DAGB2_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
+#define DAGB2_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
+#define DAGB2_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
+//DAGB2_WR_ADDR_DAGB_MAX_BURST0
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L
+//DAGB2_WR_ADDR_DAGB_LAZY_TIMER0
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L
+//DAGB2_WR_ADDR_DAGB_MAX_BURST1
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L
+#define DAGB2_WR_ADDR_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L
+//DAGB2_WR_ADDR_DAGB_LAZY_TIMER1
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L
+#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L
+//DAGB2_WR_DATA_DAGB
+#define DAGB2_WR_DATA_DAGB__DAGB_ENABLE__SHIFT 0x0
+#define DAGB2_WR_DATA_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3
+#define DAGB2_WR_DATA_DAGB__DISABLE_SELF_INIT__SHIFT 0x6
+#define DAGB2_WR_DATA_DAGB__WHOAMI__SHIFT 0x7
+#define DAGB2_WR_DATA_DAGB__DAGB_ENABLE_MASK 0x00000007L
+#define DAGB2_WR_DATA_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L
+#define DAGB2_WR_DATA_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L
+#define DAGB2_WR_DATA_DAGB__WHOAMI_MASK 0x00001F80L
+//DAGB2_WR_DATA_DAGB_MAX_BURST0
+#define DAGB2_WR_DATA_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0
+#define DAGB2_WR_DATA_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4
+#define DAGB2_WR_DATA_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8
+#define DAGB2_WR_DATA_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc
+#define DAGB2_WR_DATA_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10
+#define DAGB2_WR_DATA_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14
+#define DAGB2_WR_DATA_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18
+#define DAGB2_WR_DATA_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c
+#define DAGB2_WR_DATA_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL
+#define DAGB2_WR_DATA_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L
+#define DAGB2_WR_DATA_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L
+#define DAGB2_WR_DATA_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L
+#define DAGB2_WR_DATA_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L
+#define DAGB2_WR_DATA_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L
+#define DAGB2_WR_DATA_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L
+#define DAGB2_WR_DATA_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L
+//DAGB2_WR_DATA_DAGB_LAZY_TIMER0
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L
+//DAGB2_WR_DATA_DAGB_MAX_BURST1
+#define DAGB2_WR_DATA_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0
+#define DAGB2_WR_DATA_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4
+#define DAGB2_WR_DATA_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8
+#define DAGB2_WR_DATA_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc
+#define DAGB2_WR_DATA_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10
+#define DAGB2_WR_DATA_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14
+#define DAGB2_WR_DATA_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18
+#define DAGB2_WR_DATA_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c
+#define DAGB2_WR_DATA_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL
+#define DAGB2_WR_DATA_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L
+#define DAGB2_WR_DATA_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L
+#define DAGB2_WR_DATA_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L
+#define DAGB2_WR_DATA_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L
+#define DAGB2_WR_DATA_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L
+#define DAGB2_WR_DATA_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L
+#define DAGB2_WR_DATA_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L
+//DAGB2_WR_DATA_DAGB_LAZY_TIMER1
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L
+#define DAGB2_WR_DATA_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L
+//DAGB2_WR_VC0_CNTL
+#define DAGB2_WR_VC0_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB2_WR_VC0_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB2_WR_VC0_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB2_WR_VC0_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB2_WR_VC0_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB2_WR_VC0_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB2_WR_VC0_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB2_WR_VC0_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB2_WR_VC0_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB2_WR_VC0_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB2_WR_VC0_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB2_WR_VC0_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB2_WR_VC0_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB2_WR_VC0_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB2_WR_VC0_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB2_WR_VC0_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB2_WR_VC1_CNTL
+#define DAGB2_WR_VC1_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB2_WR_VC1_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB2_WR_VC1_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB2_WR_VC1_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB2_WR_VC1_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB2_WR_VC1_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB2_WR_VC1_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB2_WR_VC1_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB2_WR_VC1_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB2_WR_VC1_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB2_WR_VC1_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB2_WR_VC1_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB2_WR_VC1_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB2_WR_VC1_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB2_WR_VC1_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB2_WR_VC1_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB2_WR_VC2_CNTL
+#define DAGB2_WR_VC2_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB2_WR_VC2_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB2_WR_VC2_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB2_WR_VC2_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB2_WR_VC2_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB2_WR_VC2_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB2_WR_VC2_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB2_WR_VC2_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB2_WR_VC2_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB2_WR_VC2_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB2_WR_VC2_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB2_WR_VC2_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB2_WR_VC2_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB2_WR_VC2_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB2_WR_VC2_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB2_WR_VC2_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB2_WR_VC3_CNTL
+#define DAGB2_WR_VC3_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB2_WR_VC3_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB2_WR_VC3_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB2_WR_VC3_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB2_WR_VC3_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB2_WR_VC3_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB2_WR_VC3_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB2_WR_VC3_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB2_WR_VC3_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB2_WR_VC3_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB2_WR_VC3_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB2_WR_VC3_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB2_WR_VC3_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB2_WR_VC3_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB2_WR_VC3_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB2_WR_VC3_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB2_WR_VC4_CNTL
+#define DAGB2_WR_VC4_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB2_WR_VC4_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB2_WR_VC4_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB2_WR_VC4_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB2_WR_VC4_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB2_WR_VC4_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB2_WR_VC4_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB2_WR_VC4_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB2_WR_VC4_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB2_WR_VC4_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB2_WR_VC4_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB2_WR_VC4_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB2_WR_VC4_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB2_WR_VC4_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB2_WR_VC4_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB2_WR_VC4_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB2_WR_VC5_CNTL
+#define DAGB2_WR_VC5_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB2_WR_VC5_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB2_WR_VC5_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB2_WR_VC5_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB2_WR_VC5_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB2_WR_VC5_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB2_WR_VC5_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB2_WR_VC5_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB2_WR_VC5_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB2_WR_VC5_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB2_WR_VC5_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB2_WR_VC5_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB2_WR_VC5_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB2_WR_VC5_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB2_WR_VC5_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB2_WR_VC5_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB2_WR_VC6_CNTL
+#define DAGB2_WR_VC6_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB2_WR_VC6_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB2_WR_VC6_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB2_WR_VC6_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB2_WR_VC6_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB2_WR_VC6_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB2_WR_VC6_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB2_WR_VC6_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB2_WR_VC6_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB2_WR_VC6_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB2_WR_VC6_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB2_WR_VC6_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB2_WR_VC6_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB2_WR_VC6_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB2_WR_VC6_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB2_WR_VC6_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB2_WR_VC7_CNTL
+#define DAGB2_WR_VC7_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB2_WR_VC7_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB2_WR_VC7_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB2_WR_VC7_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB2_WR_VC7_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB2_WR_VC7_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB2_WR_VC7_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB2_WR_VC7_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB2_WR_VC7_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB2_WR_VC7_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB2_WR_VC7_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB2_WR_VC7_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB2_WR_VC7_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB2_WR_VC7_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB2_WR_VC7_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB2_WR_VC7_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB2_WR_CNTL_MISC
+#define DAGB2_WR_CNTL_MISC__STOR_POOL_CREDIT__SHIFT 0x0
+#define DAGB2_WR_CNTL_MISC__EA_POOL_CREDIT__SHIFT 0x6
+#define DAGB2_WR_CNTL_MISC__IO_EA_CREDIT__SHIFT 0xd
+#define DAGB2_WR_CNTL_MISC__STOR_CC_LEGACY_MODE__SHIFT 0x13
+#define DAGB2_WR_CNTL_MISC__EA_CC_LEGACY_MODE__SHIFT 0x14
+#define DAGB2_WR_CNTL_MISC__UTCL2_CID__SHIFT 0x15
+#define DAGB2_WR_CNTL_MISC__RDRET_FIFO_CREDITS__SHIFT 0x1a
+#define DAGB2_WR_CNTL_MISC__STOR_POOL_CREDIT_MASK 0x0000003FL
+#define DAGB2_WR_CNTL_MISC__EA_POOL_CREDIT_MASK 0x00001FC0L
+#define DAGB2_WR_CNTL_MISC__IO_EA_CREDIT_MASK 0x0007E000L
+#define DAGB2_WR_CNTL_MISC__STOR_CC_LEGACY_MODE_MASK 0x00080000L
+#define DAGB2_WR_CNTL_MISC__EA_CC_LEGACY_MODE_MASK 0x00100000L
+#define DAGB2_WR_CNTL_MISC__UTCL2_CID_MASK 0x03E00000L
+#define DAGB2_WR_CNTL_MISC__RDRET_FIFO_CREDITS_MASK 0xFC000000L
+//DAGB2_WR_TLB_CREDIT
+#define DAGB2_WR_TLB_CREDIT__TLB0__SHIFT 0x0
+#define DAGB2_WR_TLB_CREDIT__TLB1__SHIFT 0x5
+#define DAGB2_WR_TLB_CREDIT__TLB2__SHIFT 0xa
+#define DAGB2_WR_TLB_CREDIT__TLB3__SHIFT 0xf
+#define DAGB2_WR_TLB_CREDIT__TLB4__SHIFT 0x14
+#define DAGB2_WR_TLB_CREDIT__TLB5__SHIFT 0x19
+#define DAGB2_WR_TLB_CREDIT__TLB0_MASK 0x0000001FL
+#define DAGB2_WR_TLB_CREDIT__TLB1_MASK 0x000003E0L
+#define DAGB2_WR_TLB_CREDIT__TLB2_MASK 0x00007C00L
+#define DAGB2_WR_TLB_CREDIT__TLB3_MASK 0x000F8000L
+#define DAGB2_WR_TLB_CREDIT__TLB4_MASK 0x01F00000L
+#define DAGB2_WR_TLB_CREDIT__TLB5_MASK 0x3E000000L
+//DAGB2_WR_DATA_CREDIT
+#define DAGB2_WR_DATA_CREDIT__DLOCK_VC_CREDITS__SHIFT 0x0
+#define DAGB2_WR_DATA_CREDIT__LARGE_BURST_CREDITS__SHIFT 0x8
+#define DAGB2_WR_DATA_CREDIT__MIDDLE_BURST_CREDITS__SHIFT 0x10
+#define DAGB2_WR_DATA_CREDIT__SMALL_BURST_CREDITS__SHIFT 0x18
+#define DAGB2_WR_DATA_CREDIT__DLOCK_VC_CREDITS_MASK 0x000000FFL
+#define DAGB2_WR_DATA_CREDIT__LARGE_BURST_CREDITS_MASK 0x0000FF00L
+#define DAGB2_WR_DATA_CREDIT__MIDDLE_BURST_CREDITS_MASK 0x00FF0000L
+#define DAGB2_WR_DATA_CREDIT__SMALL_BURST_CREDITS_MASK 0xFF000000L
+//DAGB2_WR_MISC_CREDIT
+#define DAGB2_WR_MISC_CREDIT__ATOMIC_CREDIT__SHIFT 0x0
+#define DAGB2_WR_MISC_CREDIT__DLOCK_VC_NUM__SHIFT 0x6
+#define DAGB2_WR_MISC_CREDIT__OSD_CREDIT__SHIFT 0x9
+#define DAGB2_WR_MISC_CREDIT__OSD_DLOCK_CREDIT__SHIFT 0x10
+#define DAGB2_WR_MISC_CREDIT__ATOMIC_CREDIT_MASK 0x0000003FL
+#define DAGB2_WR_MISC_CREDIT__DLOCK_VC_NUM_MASK 0x000001C0L
+#define DAGB2_WR_MISC_CREDIT__OSD_CREDIT_MASK 0x0000FE00L
+#define DAGB2_WR_MISC_CREDIT__OSD_DLOCK_CREDIT_MASK 0x007F0000L
+//DAGB2_WRCLI_ASK_PENDING
+#define DAGB2_WRCLI_ASK_PENDING__BUSY__SHIFT 0x0
+#define DAGB2_WRCLI_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB2_WRCLI_GO_PENDING
+#define DAGB2_WRCLI_GO_PENDING__BUSY__SHIFT 0x0
+#define DAGB2_WRCLI_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB2_WRCLI_GBLSEND_PENDING
+#define DAGB2_WRCLI_GBLSEND_PENDING__BUSY__SHIFT 0x0
+#define DAGB2_WRCLI_GBLSEND_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB2_WRCLI_TLB_PENDING
+#define DAGB2_WRCLI_TLB_PENDING__BUSY__SHIFT 0x0
+#define DAGB2_WRCLI_TLB_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB2_WRCLI_OARB_PENDING
+#define DAGB2_WRCLI_OARB_PENDING__BUSY__SHIFT 0x0
+#define DAGB2_WRCLI_OARB_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB2_WRCLI_OSD_PENDING
+#define DAGB2_WRCLI_OSD_PENDING__BUSY__SHIFT 0x0
+#define DAGB2_WRCLI_OSD_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB2_WRCLI_DBUS_ASK_PENDING
+#define DAGB2_WRCLI_DBUS_ASK_PENDING__BUSY__SHIFT 0x0
+#define DAGB2_WRCLI_DBUS_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB2_WRCLI_DBUS_GO_PENDING
+#define DAGB2_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0
+#define DAGB2_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB2_DAGB_DLY
+#define DAGB2_DAGB_DLY__DLY__SHIFT 0x0
+#define DAGB2_DAGB_DLY__CLI__SHIFT 0x8
+#define DAGB2_DAGB_DLY__POS__SHIFT 0x10
+#define DAGB2_DAGB_DLY__DLY_MASK 0x000000FFL
+#define DAGB2_DAGB_DLY__CLI_MASK 0x0000FF00L
+#define DAGB2_DAGB_DLY__POS_MASK 0x000F0000L
+//DAGB2_CNTL_MISC
+#define DAGB2_CNTL_MISC__EA_VC0_REMAP__SHIFT 0x0
+#define DAGB2_CNTL_MISC__EA_VC1_REMAP__SHIFT 0x3
+#define DAGB2_CNTL_MISC__EA_VC2_REMAP__SHIFT 0x6
+#define DAGB2_CNTL_MISC__EA_VC3_REMAP__SHIFT 0x9
+#define DAGB2_CNTL_MISC__EA_VC4_REMAP__SHIFT 0xc
+#define DAGB2_CNTL_MISC__EA_VC5_REMAP__SHIFT 0xf
+#define DAGB2_CNTL_MISC__EA_VC6_REMAP__SHIFT 0x12
+#define DAGB2_CNTL_MISC__EA_VC7_REMAP__SHIFT 0x15
+#define DAGB2_CNTL_MISC__BW_INIT_CYCLE__SHIFT 0x18
+#define DAGB2_CNTL_MISC__BW_RW_GAP_CYCLE__SHIFT 0x1e
+#define DAGB2_CNTL_MISC__EA_VC0_REMAP_MASK 0x00000007L
+#define DAGB2_CNTL_MISC__EA_VC1_REMAP_MASK 0x00000038L
+#define DAGB2_CNTL_MISC__EA_VC2_REMAP_MASK 0x000001C0L
+#define DAGB2_CNTL_MISC__EA_VC3_REMAP_MASK 0x00000E00L
+#define DAGB2_CNTL_MISC__EA_VC4_REMAP_MASK 0x00007000L
+#define DAGB2_CNTL_MISC__EA_VC5_REMAP_MASK 0x00038000L
+#define DAGB2_CNTL_MISC__EA_VC6_REMAP_MASK 0x001C0000L
+#define DAGB2_CNTL_MISC__EA_VC7_REMAP_MASK 0x00E00000L
+#define DAGB2_CNTL_MISC__BW_INIT_CYCLE_MASK 0x3F000000L
+#define DAGB2_CNTL_MISC__BW_RW_GAP_CYCLE_MASK 0xC0000000L
+//DAGB2_CNTL_MISC2
+#define DAGB2_CNTL_MISC2__URG_BOOST_ENABLE__SHIFT 0x0
+#define DAGB2_CNTL_MISC2__URG_HALT_ENABLE__SHIFT 0x1
+#define DAGB2_CNTL_MISC2__DISABLE_WRREQ_CG__SHIFT 0x2
+#define DAGB2_CNTL_MISC2__DISABLE_WRRET_CG__SHIFT 0x3
+#define DAGB2_CNTL_MISC2__DISABLE_RDREQ_CG__SHIFT 0x4
+#define DAGB2_CNTL_MISC2__DISABLE_RDRET_CG__SHIFT 0x5
+#define DAGB2_CNTL_MISC2__DISABLE_TLBWR_CG__SHIFT 0x6
+#define DAGB2_CNTL_MISC2__DISABLE_TLBRD_CG__SHIFT 0x7
+#define DAGB2_CNTL_MISC2__DISABLE_EAWRREQ_BUSY__SHIFT 0x8
+#define DAGB2_CNTL_MISC2__DISABLE_EARDREQ_BUSY__SHIFT 0x9
+#define DAGB2_CNTL_MISC2__SWAP_CTL__SHIFT 0xa
+#define DAGB2_CNTL_MISC2__RDRET_FIFO_PERF__SHIFT 0xb
+#define DAGB2_CNTL_MISC2__RDRET_FIFO_DLOCK_CREDITS__SHIFT 0x11
+#define DAGB2_CNTL_MISC2__URG_BOOST_ENABLE_MASK 0x00000001L
+#define DAGB2_CNTL_MISC2__URG_HALT_ENABLE_MASK 0x00000002L
+#define DAGB2_CNTL_MISC2__DISABLE_WRREQ_CG_MASK 0x00000004L
+#define DAGB2_CNTL_MISC2__DISABLE_WRRET_CG_MASK 0x00000008L
+#define DAGB2_CNTL_MISC2__DISABLE_RDREQ_CG_MASK 0x00000010L
+#define DAGB2_CNTL_MISC2__DISABLE_RDRET_CG_MASK 0x00000020L
+#define DAGB2_CNTL_MISC2__DISABLE_TLBWR_CG_MASK 0x00000040L
+#define DAGB2_CNTL_MISC2__DISABLE_TLBRD_CG_MASK 0x00000080L
+#define DAGB2_CNTL_MISC2__DISABLE_EAWRREQ_BUSY_MASK 0x00000100L
+#define DAGB2_CNTL_MISC2__DISABLE_EARDREQ_BUSY_MASK 0x00000200L
+#define DAGB2_CNTL_MISC2__SWAP_CTL_MASK 0x00000400L
+#define DAGB2_CNTL_MISC2__RDRET_FIFO_PERF_MASK 0x00000800L
+#define DAGB2_CNTL_MISC2__RDRET_FIFO_DLOCK_CREDITS_MASK 0x007E0000L
+//DAGB2_FIFO_EMPTY
+#define DAGB2_FIFO_EMPTY__EMPTY__SHIFT 0x0
+#define DAGB2_FIFO_EMPTY__EMPTY_MASK 0x00FFFFFFL
+//DAGB2_FIFO_FULL
+#define DAGB2_FIFO_FULL__FULL__SHIFT 0x0
+#define DAGB2_FIFO_FULL__FULL_MASK 0x007FFFFFL
+//DAGB2_WR_CREDITS_FULL
+#define DAGB2_WR_CREDITS_FULL__FULL__SHIFT 0x0
+#define DAGB2_WR_CREDITS_FULL__FULL_MASK 0x1FFFFFFFL
+//DAGB2_RD_CREDITS_FULL
+#define DAGB2_RD_CREDITS_FULL__FULL__SHIFT 0x0
+#define DAGB2_RD_CREDITS_FULL__FULL_MASK 0x0003FFFFL
+//DAGB2_PERFCOUNTER_LO
+#define DAGB2_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define DAGB2_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//DAGB2_PERFCOUNTER_HI
+#define DAGB2_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define DAGB2_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define DAGB2_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define DAGB2_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+//DAGB2_PERFCOUNTER0_CFG
+#define DAGB2_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define DAGB2_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define DAGB2_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define DAGB2_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define DAGB2_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define DAGB2_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define DAGB2_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define DAGB2_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define DAGB2_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define DAGB2_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//DAGB2_PERFCOUNTER1_CFG
+#define DAGB2_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define DAGB2_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define DAGB2_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define DAGB2_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define DAGB2_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define DAGB2_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define DAGB2_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define DAGB2_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define DAGB2_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define DAGB2_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//DAGB2_PERFCOUNTER2_CFG
+#define DAGB2_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0
+#define DAGB2_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8
+#define DAGB2_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18
+#define DAGB2_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c
+#define DAGB2_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d
+#define DAGB2_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL
+#define DAGB2_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define DAGB2_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L
+#define DAGB2_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L
+#define DAGB2_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L
+//DAGB2_PERFCOUNTER_RSLT_CNTL
+#define DAGB2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define DAGB2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define DAGB2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define DAGB2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define DAGB2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define DAGB2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define DAGB2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define DAGB2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define DAGB2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define DAGB2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define DAGB2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define DAGB2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+//DAGB2_RESERVE0
+#define DAGB2_RESERVE0__RESERVE__SHIFT 0x0
+#define DAGB2_RESERVE0__RESERVE_MASK 0xFFFFFFFFL
+//DAGB2_RESERVE1
+#define DAGB2_RESERVE1__RESERVE__SHIFT 0x0
+#define DAGB2_RESERVE1__RESERVE_MASK 0xFFFFFFFFL
+//DAGB2_RESERVE2
+#define DAGB2_RESERVE2__RESERVE__SHIFT 0x0
+#define DAGB2_RESERVE2__RESERVE_MASK 0xFFFFFFFFL
+//DAGB2_RESERVE3
+#define DAGB2_RESERVE3__RESERVE__SHIFT 0x0
+#define DAGB2_RESERVE3__RESERVE_MASK 0xFFFFFFFFL
+//DAGB2_RESERVE4
+#define DAGB2_RESERVE4__RESERVE__SHIFT 0x0
+#define DAGB2_RESERVE4__RESERVE_MASK 0xFFFFFFFFL
+//DAGB2_RESERVE5
+#define DAGB2_RESERVE5__RESERVE__SHIFT 0x0
+#define DAGB2_RESERVE5__RESERVE_MASK 0xFFFFFFFFL
+//DAGB2_RESERVE6
+#define DAGB2_RESERVE6__RESERVE__SHIFT 0x0
+#define DAGB2_RESERVE6__RESERVE_MASK 0xFFFFFFFFL
+//DAGB2_RESERVE7
+#define DAGB2_RESERVE7__RESERVE__SHIFT 0x0
+#define DAGB2_RESERVE7__RESERVE_MASK 0xFFFFFFFFL
+//DAGB2_RESERVE8
+#define DAGB2_RESERVE8__RESERVE__SHIFT 0x0
+#define DAGB2_RESERVE8__RESERVE_MASK 0xFFFFFFFFL
+//DAGB2_RESERVE9
+#define DAGB2_RESERVE9__RESERVE__SHIFT 0x0
+#define DAGB2_RESERVE9__RESERVE_MASK 0xFFFFFFFFL
+//DAGB2_RESERVE10
+#define DAGB2_RESERVE10__RESERVE__SHIFT 0x0
+#define DAGB2_RESERVE10__RESERVE_MASK 0xFFFFFFFFL
+//DAGB2_RESERVE11
+#define DAGB2_RESERVE11__RESERVE__SHIFT 0x0
+#define DAGB2_RESERVE11__RESERVE_MASK 0xFFFFFFFFL
+//DAGB2_RESERVE12
+#define DAGB2_RESERVE12__RESERVE__SHIFT 0x0
+#define DAGB2_RESERVE12__RESERVE_MASK 0xFFFFFFFFL
+//DAGB2_RESERVE13
+#define DAGB2_RESERVE13__RESERVE__SHIFT 0x0
+#define DAGB2_RESERVE13__RESERVE_MASK 0xFFFFFFFFL
+
+
+// addressBlock: mmhub_dagb_dagbdec3
+//DAGB3_RDCLI0
+#define DAGB3_RDCLI0__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_RDCLI0__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_RDCLI0__URG_HIGH__SHIFT 0x4
+#define DAGB3_RDCLI0__URG_LOW__SHIFT 0x8
+#define DAGB3_RDCLI0__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_RDCLI0__MAX_BW__SHIFT 0xd
+#define DAGB3_RDCLI0__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_RDCLI0__MIN_BW__SHIFT 0x16
+#define DAGB3_RDCLI0__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_RDCLI0__MAX_OSD__SHIFT 0x1a
+#define DAGB3_RDCLI0__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_RDCLI0__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_RDCLI0__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_RDCLI0__URG_LOW_MASK 0x00000F00L
+#define DAGB3_RDCLI0__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_RDCLI0__MAX_BW_MASK 0x001FE000L
+#define DAGB3_RDCLI0__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_RDCLI0__MIN_BW_MASK 0x01C00000L
+#define DAGB3_RDCLI0__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_RDCLI0__MAX_OSD_MASK 0xFC000000L
+//DAGB3_RDCLI1
+#define DAGB3_RDCLI1__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_RDCLI1__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_RDCLI1__URG_HIGH__SHIFT 0x4
+#define DAGB3_RDCLI1__URG_LOW__SHIFT 0x8
+#define DAGB3_RDCLI1__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_RDCLI1__MAX_BW__SHIFT 0xd
+#define DAGB3_RDCLI1__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_RDCLI1__MIN_BW__SHIFT 0x16
+#define DAGB3_RDCLI1__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_RDCLI1__MAX_OSD__SHIFT 0x1a
+#define DAGB3_RDCLI1__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_RDCLI1__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_RDCLI1__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_RDCLI1__URG_LOW_MASK 0x00000F00L
+#define DAGB3_RDCLI1__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_RDCLI1__MAX_BW_MASK 0x001FE000L
+#define DAGB3_RDCLI1__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_RDCLI1__MIN_BW_MASK 0x01C00000L
+#define DAGB3_RDCLI1__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_RDCLI1__MAX_OSD_MASK 0xFC000000L
+//DAGB3_RDCLI2
+#define DAGB3_RDCLI2__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_RDCLI2__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_RDCLI2__URG_HIGH__SHIFT 0x4
+#define DAGB3_RDCLI2__URG_LOW__SHIFT 0x8
+#define DAGB3_RDCLI2__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_RDCLI2__MAX_BW__SHIFT 0xd
+#define DAGB3_RDCLI2__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_RDCLI2__MIN_BW__SHIFT 0x16
+#define DAGB3_RDCLI2__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_RDCLI2__MAX_OSD__SHIFT 0x1a
+#define DAGB3_RDCLI2__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_RDCLI2__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_RDCLI2__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_RDCLI2__URG_LOW_MASK 0x00000F00L
+#define DAGB3_RDCLI2__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_RDCLI2__MAX_BW_MASK 0x001FE000L
+#define DAGB3_RDCLI2__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_RDCLI2__MIN_BW_MASK 0x01C00000L
+#define DAGB3_RDCLI2__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_RDCLI2__MAX_OSD_MASK 0xFC000000L
+//DAGB3_RDCLI3
+#define DAGB3_RDCLI3__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_RDCLI3__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_RDCLI3__URG_HIGH__SHIFT 0x4
+#define DAGB3_RDCLI3__URG_LOW__SHIFT 0x8
+#define DAGB3_RDCLI3__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_RDCLI3__MAX_BW__SHIFT 0xd
+#define DAGB3_RDCLI3__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_RDCLI3__MIN_BW__SHIFT 0x16
+#define DAGB3_RDCLI3__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_RDCLI3__MAX_OSD__SHIFT 0x1a
+#define DAGB3_RDCLI3__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_RDCLI3__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_RDCLI3__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_RDCLI3__URG_LOW_MASK 0x00000F00L
+#define DAGB3_RDCLI3__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_RDCLI3__MAX_BW_MASK 0x001FE000L
+#define DAGB3_RDCLI3__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_RDCLI3__MIN_BW_MASK 0x01C00000L
+#define DAGB3_RDCLI3__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_RDCLI3__MAX_OSD_MASK 0xFC000000L
+//DAGB3_RDCLI4
+#define DAGB3_RDCLI4__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_RDCLI4__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_RDCLI4__URG_HIGH__SHIFT 0x4
+#define DAGB3_RDCLI4__URG_LOW__SHIFT 0x8
+#define DAGB3_RDCLI4__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_RDCLI4__MAX_BW__SHIFT 0xd
+#define DAGB3_RDCLI4__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_RDCLI4__MIN_BW__SHIFT 0x16
+#define DAGB3_RDCLI4__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_RDCLI4__MAX_OSD__SHIFT 0x1a
+#define DAGB3_RDCLI4__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_RDCLI4__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_RDCLI4__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_RDCLI4__URG_LOW_MASK 0x00000F00L
+#define DAGB3_RDCLI4__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_RDCLI4__MAX_BW_MASK 0x001FE000L
+#define DAGB3_RDCLI4__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_RDCLI4__MIN_BW_MASK 0x01C00000L
+#define DAGB3_RDCLI4__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_RDCLI4__MAX_OSD_MASK 0xFC000000L
+//DAGB3_RDCLI5
+#define DAGB3_RDCLI5__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_RDCLI5__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_RDCLI5__URG_HIGH__SHIFT 0x4
+#define DAGB3_RDCLI5__URG_LOW__SHIFT 0x8
+#define DAGB3_RDCLI5__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_RDCLI5__MAX_BW__SHIFT 0xd
+#define DAGB3_RDCLI5__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_RDCLI5__MIN_BW__SHIFT 0x16
+#define DAGB3_RDCLI5__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_RDCLI5__MAX_OSD__SHIFT 0x1a
+#define DAGB3_RDCLI5__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_RDCLI5__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_RDCLI5__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_RDCLI5__URG_LOW_MASK 0x00000F00L
+#define DAGB3_RDCLI5__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_RDCLI5__MAX_BW_MASK 0x001FE000L
+#define DAGB3_RDCLI5__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_RDCLI5__MIN_BW_MASK 0x01C00000L
+#define DAGB3_RDCLI5__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_RDCLI5__MAX_OSD_MASK 0xFC000000L
+//DAGB3_RDCLI6
+#define DAGB3_RDCLI6__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_RDCLI6__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_RDCLI6__URG_HIGH__SHIFT 0x4
+#define DAGB3_RDCLI6__URG_LOW__SHIFT 0x8
+#define DAGB3_RDCLI6__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_RDCLI6__MAX_BW__SHIFT 0xd
+#define DAGB3_RDCLI6__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_RDCLI6__MIN_BW__SHIFT 0x16
+#define DAGB3_RDCLI6__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_RDCLI6__MAX_OSD__SHIFT 0x1a
+#define DAGB3_RDCLI6__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_RDCLI6__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_RDCLI6__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_RDCLI6__URG_LOW_MASK 0x00000F00L
+#define DAGB3_RDCLI6__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_RDCLI6__MAX_BW_MASK 0x001FE000L
+#define DAGB3_RDCLI6__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_RDCLI6__MIN_BW_MASK 0x01C00000L
+#define DAGB3_RDCLI6__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_RDCLI6__MAX_OSD_MASK 0xFC000000L
+//DAGB3_RDCLI7
+#define DAGB3_RDCLI7__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_RDCLI7__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_RDCLI7__URG_HIGH__SHIFT 0x4
+#define DAGB3_RDCLI7__URG_LOW__SHIFT 0x8
+#define DAGB3_RDCLI7__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_RDCLI7__MAX_BW__SHIFT 0xd
+#define DAGB3_RDCLI7__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_RDCLI7__MIN_BW__SHIFT 0x16
+#define DAGB3_RDCLI7__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_RDCLI7__MAX_OSD__SHIFT 0x1a
+#define DAGB3_RDCLI7__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_RDCLI7__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_RDCLI7__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_RDCLI7__URG_LOW_MASK 0x00000F00L
+#define DAGB3_RDCLI7__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_RDCLI7__MAX_BW_MASK 0x001FE000L
+#define DAGB3_RDCLI7__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_RDCLI7__MIN_BW_MASK 0x01C00000L
+#define DAGB3_RDCLI7__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_RDCLI7__MAX_OSD_MASK 0xFC000000L
+//DAGB3_RDCLI8
+#define DAGB3_RDCLI8__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_RDCLI8__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_RDCLI8__URG_HIGH__SHIFT 0x4
+#define DAGB3_RDCLI8__URG_LOW__SHIFT 0x8
+#define DAGB3_RDCLI8__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_RDCLI8__MAX_BW__SHIFT 0xd
+#define DAGB3_RDCLI8__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_RDCLI8__MIN_BW__SHIFT 0x16
+#define DAGB3_RDCLI8__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_RDCLI8__MAX_OSD__SHIFT 0x1a
+#define DAGB3_RDCLI8__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_RDCLI8__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_RDCLI8__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_RDCLI8__URG_LOW_MASK 0x00000F00L
+#define DAGB3_RDCLI8__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_RDCLI8__MAX_BW_MASK 0x001FE000L
+#define DAGB3_RDCLI8__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_RDCLI8__MIN_BW_MASK 0x01C00000L
+#define DAGB3_RDCLI8__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_RDCLI8__MAX_OSD_MASK 0xFC000000L
+//DAGB3_RDCLI9
+#define DAGB3_RDCLI9__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_RDCLI9__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_RDCLI9__URG_HIGH__SHIFT 0x4
+#define DAGB3_RDCLI9__URG_LOW__SHIFT 0x8
+#define DAGB3_RDCLI9__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_RDCLI9__MAX_BW__SHIFT 0xd
+#define DAGB3_RDCLI9__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_RDCLI9__MIN_BW__SHIFT 0x16
+#define DAGB3_RDCLI9__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_RDCLI9__MAX_OSD__SHIFT 0x1a
+#define DAGB3_RDCLI9__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_RDCLI9__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_RDCLI9__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_RDCLI9__URG_LOW_MASK 0x00000F00L
+#define DAGB3_RDCLI9__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_RDCLI9__MAX_BW_MASK 0x001FE000L
+#define DAGB3_RDCLI9__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_RDCLI9__MIN_BW_MASK 0x01C00000L
+#define DAGB3_RDCLI9__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_RDCLI9__MAX_OSD_MASK 0xFC000000L
+//DAGB3_RDCLI10
+#define DAGB3_RDCLI10__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_RDCLI10__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_RDCLI10__URG_HIGH__SHIFT 0x4
+#define DAGB3_RDCLI10__URG_LOW__SHIFT 0x8
+#define DAGB3_RDCLI10__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_RDCLI10__MAX_BW__SHIFT 0xd
+#define DAGB3_RDCLI10__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_RDCLI10__MIN_BW__SHIFT 0x16
+#define DAGB3_RDCLI10__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_RDCLI10__MAX_OSD__SHIFT 0x1a
+#define DAGB3_RDCLI10__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_RDCLI10__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_RDCLI10__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_RDCLI10__URG_LOW_MASK 0x00000F00L
+#define DAGB3_RDCLI10__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_RDCLI10__MAX_BW_MASK 0x001FE000L
+#define DAGB3_RDCLI10__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_RDCLI10__MIN_BW_MASK 0x01C00000L
+#define DAGB3_RDCLI10__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_RDCLI10__MAX_OSD_MASK 0xFC000000L
+//DAGB3_RDCLI11
+#define DAGB3_RDCLI11__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_RDCLI11__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_RDCLI11__URG_HIGH__SHIFT 0x4
+#define DAGB3_RDCLI11__URG_LOW__SHIFT 0x8
+#define DAGB3_RDCLI11__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_RDCLI11__MAX_BW__SHIFT 0xd
+#define DAGB3_RDCLI11__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_RDCLI11__MIN_BW__SHIFT 0x16
+#define DAGB3_RDCLI11__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_RDCLI11__MAX_OSD__SHIFT 0x1a
+#define DAGB3_RDCLI11__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_RDCLI11__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_RDCLI11__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_RDCLI11__URG_LOW_MASK 0x00000F00L
+#define DAGB3_RDCLI11__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_RDCLI11__MAX_BW_MASK 0x001FE000L
+#define DAGB3_RDCLI11__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_RDCLI11__MIN_BW_MASK 0x01C00000L
+#define DAGB3_RDCLI11__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_RDCLI11__MAX_OSD_MASK 0xFC000000L
+//DAGB3_RDCLI12
+#define DAGB3_RDCLI12__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_RDCLI12__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_RDCLI12__URG_HIGH__SHIFT 0x4
+#define DAGB3_RDCLI12__URG_LOW__SHIFT 0x8
+#define DAGB3_RDCLI12__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_RDCLI12__MAX_BW__SHIFT 0xd
+#define DAGB3_RDCLI12__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_RDCLI12__MIN_BW__SHIFT 0x16
+#define DAGB3_RDCLI12__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_RDCLI12__MAX_OSD__SHIFT 0x1a
+#define DAGB3_RDCLI12__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_RDCLI12__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_RDCLI12__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_RDCLI12__URG_LOW_MASK 0x00000F00L
+#define DAGB3_RDCLI12__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_RDCLI12__MAX_BW_MASK 0x001FE000L
+#define DAGB3_RDCLI12__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_RDCLI12__MIN_BW_MASK 0x01C00000L
+#define DAGB3_RDCLI12__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_RDCLI12__MAX_OSD_MASK 0xFC000000L
+//DAGB3_RDCLI13
+#define DAGB3_RDCLI13__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_RDCLI13__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_RDCLI13__URG_HIGH__SHIFT 0x4
+#define DAGB3_RDCLI13__URG_LOW__SHIFT 0x8
+#define DAGB3_RDCLI13__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_RDCLI13__MAX_BW__SHIFT 0xd
+#define DAGB3_RDCLI13__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_RDCLI13__MIN_BW__SHIFT 0x16
+#define DAGB3_RDCLI13__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_RDCLI13__MAX_OSD__SHIFT 0x1a
+#define DAGB3_RDCLI13__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_RDCLI13__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_RDCLI13__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_RDCLI13__URG_LOW_MASK 0x00000F00L
+#define DAGB3_RDCLI13__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_RDCLI13__MAX_BW_MASK 0x001FE000L
+#define DAGB3_RDCLI13__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_RDCLI13__MIN_BW_MASK 0x01C00000L
+#define DAGB3_RDCLI13__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_RDCLI13__MAX_OSD_MASK 0xFC000000L
+//DAGB3_RDCLI14
+#define DAGB3_RDCLI14__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_RDCLI14__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_RDCLI14__URG_HIGH__SHIFT 0x4
+#define DAGB3_RDCLI14__URG_LOW__SHIFT 0x8
+#define DAGB3_RDCLI14__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_RDCLI14__MAX_BW__SHIFT 0xd
+#define DAGB3_RDCLI14__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_RDCLI14__MIN_BW__SHIFT 0x16
+#define DAGB3_RDCLI14__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_RDCLI14__MAX_OSD__SHIFT 0x1a
+#define DAGB3_RDCLI14__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_RDCLI14__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_RDCLI14__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_RDCLI14__URG_LOW_MASK 0x00000F00L
+#define DAGB3_RDCLI14__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_RDCLI14__MAX_BW_MASK 0x001FE000L
+#define DAGB3_RDCLI14__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_RDCLI14__MIN_BW_MASK 0x01C00000L
+#define DAGB3_RDCLI14__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_RDCLI14__MAX_OSD_MASK 0xFC000000L
+//DAGB3_RDCLI15
+#define DAGB3_RDCLI15__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_RDCLI15__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_RDCLI15__URG_HIGH__SHIFT 0x4
+#define DAGB3_RDCLI15__URG_LOW__SHIFT 0x8
+#define DAGB3_RDCLI15__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_RDCLI15__MAX_BW__SHIFT 0xd
+#define DAGB3_RDCLI15__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_RDCLI15__MIN_BW__SHIFT 0x16
+#define DAGB3_RDCLI15__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_RDCLI15__MAX_OSD__SHIFT 0x1a
+#define DAGB3_RDCLI15__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_RDCLI15__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_RDCLI15__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_RDCLI15__URG_LOW_MASK 0x00000F00L
+#define DAGB3_RDCLI15__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_RDCLI15__MAX_BW_MASK 0x001FE000L
+#define DAGB3_RDCLI15__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_RDCLI15__MIN_BW_MASK 0x01C00000L
+#define DAGB3_RDCLI15__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_RDCLI15__MAX_OSD_MASK 0xFC000000L
+//DAGB3_RD_CNTL
+#define DAGB3_RD_CNTL__SCLK_FREQ__SHIFT 0x0
+#define DAGB3_RD_CNTL__CLI_MAX_BW_WINDOW__SHIFT 0x4
+#define DAGB3_RD_CNTL__VC_MAX_BW_WINDOW__SHIFT 0xa
+#define DAGB3_RD_CNTL__IO_LEVEL_OVERRIDE_ENABLE__SHIFT 0x10
+#define DAGB3_RD_CNTL__IO_LEVEL__SHIFT 0x11
+#define DAGB3_RD_CNTL__IO_LEVEL_COMPLY_VC__SHIFT 0x14
+#define DAGB3_RD_CNTL__SHARE_VC_NUM__SHIFT 0x17
+#define DAGB3_RD_CNTL__SCLK_FREQ_MASK 0x0000000FL
+#define DAGB3_RD_CNTL__CLI_MAX_BW_WINDOW_MASK 0x000003F0L
+#define DAGB3_RD_CNTL__VC_MAX_BW_WINDOW_MASK 0x0000FC00L
+#define DAGB3_RD_CNTL__IO_LEVEL_OVERRIDE_ENABLE_MASK 0x00010000L
+#define DAGB3_RD_CNTL__IO_LEVEL_MASK 0x000E0000L
+#define DAGB3_RD_CNTL__IO_LEVEL_COMPLY_VC_MASK 0x00700000L
+#define DAGB3_RD_CNTL__SHARE_VC_NUM_MASK 0x03800000L
+//DAGB3_RD_GMI_CNTL
+#define DAGB3_RD_GMI_CNTL__EA_CREDIT__SHIFT 0x0
+#define DAGB3_RD_GMI_CNTL__LEVEL__SHIFT 0x6
+#define DAGB3_RD_GMI_CNTL__MAX_BURST__SHIFT 0x9
+#define DAGB3_RD_GMI_CNTL__LAZY_TIMER__SHIFT 0xd
+#define DAGB3_RD_GMI_CNTL__EA_CREDIT_MASK 0x0000003FL
+#define DAGB3_RD_GMI_CNTL__LEVEL_MASK 0x000001C0L
+#define DAGB3_RD_GMI_CNTL__MAX_BURST_MASK 0x00001E00L
+#define DAGB3_RD_GMI_CNTL__LAZY_TIMER_MASK 0x0001E000L
+//DAGB3_RD_ADDR_DAGB
+#define DAGB3_RD_ADDR_DAGB__DAGB_ENABLE__SHIFT 0x0
+#define DAGB3_RD_ADDR_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3
+#define DAGB3_RD_ADDR_DAGB__DISABLE_SELF_INIT__SHIFT 0x6
+#define DAGB3_RD_ADDR_DAGB__WHOAMI__SHIFT 0x7
+#define DAGB3_RD_ADDR_DAGB__DAGB_ENABLE_MASK 0x00000007L
+#define DAGB3_RD_ADDR_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L
+#define DAGB3_RD_ADDR_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L
+#define DAGB3_RD_ADDR_DAGB__WHOAMI_MASK 0x00001F80L
+//DAGB3_RD_OUTPUT_DAGB_MAX_BURST
+#define DAGB3_RD_OUTPUT_DAGB_MAX_BURST__VC0__SHIFT 0x0
+#define DAGB3_RD_OUTPUT_DAGB_MAX_BURST__VC1__SHIFT 0x4
+#define DAGB3_RD_OUTPUT_DAGB_MAX_BURST__VC2__SHIFT 0x8
+#define DAGB3_RD_OUTPUT_DAGB_MAX_BURST__VC3__SHIFT 0xc
+#define DAGB3_RD_OUTPUT_DAGB_MAX_BURST__VC4__SHIFT 0x10
+#define DAGB3_RD_OUTPUT_DAGB_MAX_BURST__VC5__SHIFT 0x14
+#define DAGB3_RD_OUTPUT_DAGB_MAX_BURST__VC6__SHIFT 0x18
+#define DAGB3_RD_OUTPUT_DAGB_MAX_BURST__VC7__SHIFT 0x1c
+#define DAGB3_RD_OUTPUT_DAGB_MAX_BURST__VC0_MASK 0x0000000FL
+#define DAGB3_RD_OUTPUT_DAGB_MAX_BURST__VC1_MASK 0x000000F0L
+#define DAGB3_RD_OUTPUT_DAGB_MAX_BURST__VC2_MASK 0x00000F00L
+#define DAGB3_RD_OUTPUT_DAGB_MAX_BURST__VC3_MASK 0x0000F000L
+#define DAGB3_RD_OUTPUT_DAGB_MAX_BURST__VC4_MASK 0x000F0000L
+#define DAGB3_RD_OUTPUT_DAGB_MAX_BURST__VC5_MASK 0x00F00000L
+#define DAGB3_RD_OUTPUT_DAGB_MAX_BURST__VC6_MASK 0x0F000000L
+#define DAGB3_RD_OUTPUT_DAGB_MAX_BURST__VC7_MASK 0xF0000000L
+//DAGB3_RD_OUTPUT_DAGB_LAZY_TIMER
+#define DAGB3_RD_OUTPUT_DAGB_LAZY_TIMER__VC0__SHIFT 0x0
+#define DAGB3_RD_OUTPUT_DAGB_LAZY_TIMER__VC1__SHIFT 0x4
+#define DAGB3_RD_OUTPUT_DAGB_LAZY_TIMER__VC2__SHIFT 0x8
+#define DAGB3_RD_OUTPUT_DAGB_LAZY_TIMER__VC3__SHIFT 0xc
+#define DAGB3_RD_OUTPUT_DAGB_LAZY_TIMER__VC4__SHIFT 0x10
+#define DAGB3_RD_OUTPUT_DAGB_LAZY_TIMER__VC5__SHIFT 0x14
+#define DAGB3_RD_OUTPUT_DAGB_LAZY_TIMER__VC6__SHIFT 0x18
+#define DAGB3_RD_OUTPUT_DAGB_LAZY_TIMER__VC7__SHIFT 0x1c
+#define DAGB3_RD_OUTPUT_DAGB_LAZY_TIMER__VC0_MASK 0x0000000FL
+#define DAGB3_RD_OUTPUT_DAGB_LAZY_TIMER__VC1_MASK 0x000000F0L
+#define DAGB3_RD_OUTPUT_DAGB_LAZY_TIMER__VC2_MASK 0x00000F00L
+#define DAGB3_RD_OUTPUT_DAGB_LAZY_TIMER__VC3_MASK 0x0000F000L
+#define DAGB3_RD_OUTPUT_DAGB_LAZY_TIMER__VC4_MASK 0x000F0000L
+#define DAGB3_RD_OUTPUT_DAGB_LAZY_TIMER__VC5_MASK 0x00F00000L
+#define DAGB3_RD_OUTPUT_DAGB_LAZY_TIMER__VC6_MASK 0x0F000000L
+#define DAGB3_RD_OUTPUT_DAGB_LAZY_TIMER__VC7_MASK 0xF0000000L
+//DAGB3_RD_CGTT_CLK_CTRL
+#define DAGB3_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB3_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB3_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
+#define DAGB3_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define DAGB3_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
+#define DAGB3_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
+#define DAGB3_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
+#define DAGB3_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
+#define DAGB3_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB3_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB3_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
+#define DAGB3_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define DAGB3_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
+#define DAGB3_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
+#define DAGB3_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
+#define DAGB3_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
+//DAGB3_L1TLB_RD_CGTT_CLK_CTRL
+#define DAGB3_L1TLB_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB3_L1TLB_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB3_L1TLB_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
+#define DAGB3_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define DAGB3_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
+#define DAGB3_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
+#define DAGB3_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
+#define DAGB3_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
+#define DAGB3_L1TLB_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB3_L1TLB_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB3_L1TLB_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
+#define DAGB3_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define DAGB3_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
+#define DAGB3_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
+#define DAGB3_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
+#define DAGB3_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
+//DAGB3_ATCVM_RD_CGTT_CLK_CTRL
+#define DAGB3_ATCVM_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB3_ATCVM_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB3_ATCVM_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
+#define DAGB3_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define DAGB3_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
+#define DAGB3_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
+#define DAGB3_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
+#define DAGB3_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
+#define DAGB3_ATCVM_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB3_ATCVM_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB3_ATCVM_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
+#define DAGB3_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define DAGB3_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
+#define DAGB3_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
+#define DAGB3_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
+#define DAGB3_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
+//DAGB3_RD_ADDR_DAGB_MAX_BURST0
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L
+//DAGB3_RD_ADDR_DAGB_LAZY_TIMER0
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L
+//DAGB3_RD_ADDR_DAGB_MAX_BURST1
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L
+#define DAGB3_RD_ADDR_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L
+//DAGB3_RD_ADDR_DAGB_LAZY_TIMER1
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L
+#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L
+//DAGB3_RD_VC0_CNTL
+#define DAGB3_RD_VC0_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB3_RD_VC0_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB3_RD_VC0_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB3_RD_VC0_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB3_RD_VC0_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB3_RD_VC0_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB3_RD_VC0_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB3_RD_VC0_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB3_RD_VC0_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB3_RD_VC0_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB3_RD_VC0_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB3_RD_VC0_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB3_RD_VC0_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB3_RD_VC0_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB3_RD_VC0_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB3_RD_VC0_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB3_RD_VC1_CNTL
+#define DAGB3_RD_VC1_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB3_RD_VC1_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB3_RD_VC1_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB3_RD_VC1_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB3_RD_VC1_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB3_RD_VC1_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB3_RD_VC1_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB3_RD_VC1_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB3_RD_VC1_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB3_RD_VC1_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB3_RD_VC1_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB3_RD_VC1_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB3_RD_VC1_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB3_RD_VC1_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB3_RD_VC1_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB3_RD_VC1_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB3_RD_VC2_CNTL
+#define DAGB3_RD_VC2_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB3_RD_VC2_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB3_RD_VC2_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB3_RD_VC2_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB3_RD_VC2_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB3_RD_VC2_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB3_RD_VC2_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB3_RD_VC2_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB3_RD_VC2_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB3_RD_VC2_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB3_RD_VC2_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB3_RD_VC2_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB3_RD_VC2_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB3_RD_VC2_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB3_RD_VC2_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB3_RD_VC2_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB3_RD_VC3_CNTL
+#define DAGB3_RD_VC3_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB3_RD_VC3_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB3_RD_VC3_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB3_RD_VC3_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB3_RD_VC3_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB3_RD_VC3_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB3_RD_VC3_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB3_RD_VC3_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB3_RD_VC3_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB3_RD_VC3_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB3_RD_VC3_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB3_RD_VC3_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB3_RD_VC3_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB3_RD_VC3_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB3_RD_VC3_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB3_RD_VC3_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB3_RD_VC4_CNTL
+#define DAGB3_RD_VC4_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB3_RD_VC4_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB3_RD_VC4_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB3_RD_VC4_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB3_RD_VC4_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB3_RD_VC4_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB3_RD_VC4_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB3_RD_VC4_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB3_RD_VC4_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB3_RD_VC4_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB3_RD_VC4_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB3_RD_VC4_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB3_RD_VC4_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB3_RD_VC4_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB3_RD_VC4_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB3_RD_VC4_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB3_RD_VC5_CNTL
+#define DAGB3_RD_VC5_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB3_RD_VC5_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB3_RD_VC5_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB3_RD_VC5_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB3_RD_VC5_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB3_RD_VC5_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB3_RD_VC5_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB3_RD_VC5_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB3_RD_VC5_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB3_RD_VC5_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB3_RD_VC5_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB3_RD_VC5_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB3_RD_VC5_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB3_RD_VC5_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB3_RD_VC5_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB3_RD_VC5_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB3_RD_VC6_CNTL
+#define DAGB3_RD_VC6_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB3_RD_VC6_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB3_RD_VC6_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB3_RD_VC6_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB3_RD_VC6_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB3_RD_VC6_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB3_RD_VC6_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB3_RD_VC6_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB3_RD_VC6_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB3_RD_VC6_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB3_RD_VC6_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB3_RD_VC6_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB3_RD_VC6_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB3_RD_VC6_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB3_RD_VC6_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB3_RD_VC6_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB3_RD_VC7_CNTL
+#define DAGB3_RD_VC7_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB3_RD_VC7_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB3_RD_VC7_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB3_RD_VC7_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB3_RD_VC7_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB3_RD_VC7_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB3_RD_VC7_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB3_RD_VC7_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB3_RD_VC7_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB3_RD_VC7_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB3_RD_VC7_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB3_RD_VC7_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB3_RD_VC7_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB3_RD_VC7_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB3_RD_VC7_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB3_RD_VC7_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB3_RD_CNTL_MISC
+#define DAGB3_RD_CNTL_MISC__STOR_POOL_CREDIT__SHIFT 0x0
+#define DAGB3_RD_CNTL_MISC__EA_POOL_CREDIT__SHIFT 0x6
+#define DAGB3_RD_CNTL_MISC__IO_EA_CREDIT__SHIFT 0xd
+#define DAGB3_RD_CNTL_MISC__STOR_CC_LEGACY_MODE__SHIFT 0x13
+#define DAGB3_RD_CNTL_MISC__EA_CC_LEGACY_MODE__SHIFT 0x14
+#define DAGB3_RD_CNTL_MISC__UTCL2_CID__SHIFT 0x15
+#define DAGB3_RD_CNTL_MISC__RDRET_FIFO_CREDITS__SHIFT 0x1a
+#define DAGB3_RD_CNTL_MISC__STOR_POOL_CREDIT_MASK 0x0000003FL
+#define DAGB3_RD_CNTL_MISC__EA_POOL_CREDIT_MASK 0x00001FC0L
+#define DAGB3_RD_CNTL_MISC__IO_EA_CREDIT_MASK 0x0007E000L
+#define DAGB3_RD_CNTL_MISC__STOR_CC_LEGACY_MODE_MASK 0x00080000L
+#define DAGB3_RD_CNTL_MISC__EA_CC_LEGACY_MODE_MASK 0x00100000L
+#define DAGB3_RD_CNTL_MISC__UTCL2_CID_MASK 0x03E00000L
+#define DAGB3_RD_CNTL_MISC__RDRET_FIFO_CREDITS_MASK 0xFC000000L
+//DAGB3_RD_TLB_CREDIT
+#define DAGB3_RD_TLB_CREDIT__TLB0__SHIFT 0x0
+#define DAGB3_RD_TLB_CREDIT__TLB1__SHIFT 0x5
+#define DAGB3_RD_TLB_CREDIT__TLB2__SHIFT 0xa
+#define DAGB3_RD_TLB_CREDIT__TLB3__SHIFT 0xf
+#define DAGB3_RD_TLB_CREDIT__TLB4__SHIFT 0x14
+#define DAGB3_RD_TLB_CREDIT__TLB5__SHIFT 0x19
+#define DAGB3_RD_TLB_CREDIT__TLB0_MASK 0x0000001FL
+#define DAGB3_RD_TLB_CREDIT__TLB1_MASK 0x000003E0L
+#define DAGB3_RD_TLB_CREDIT__TLB2_MASK 0x00007C00L
+#define DAGB3_RD_TLB_CREDIT__TLB3_MASK 0x000F8000L
+#define DAGB3_RD_TLB_CREDIT__TLB4_MASK 0x01F00000L
+#define DAGB3_RD_TLB_CREDIT__TLB5_MASK 0x3E000000L
+//DAGB3_RDCLI_ASK_PENDING
+#define DAGB3_RDCLI_ASK_PENDING__BUSY__SHIFT 0x0
+#define DAGB3_RDCLI_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB3_RDCLI_GO_PENDING
+#define DAGB3_RDCLI_GO_PENDING__BUSY__SHIFT 0x0
+#define DAGB3_RDCLI_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB3_RDCLI_GBLSEND_PENDING
+#define DAGB3_RDCLI_GBLSEND_PENDING__BUSY__SHIFT 0x0
+#define DAGB3_RDCLI_GBLSEND_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB3_RDCLI_TLB_PENDING
+#define DAGB3_RDCLI_TLB_PENDING__BUSY__SHIFT 0x0
+#define DAGB3_RDCLI_TLB_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB3_RDCLI_OARB_PENDING
+#define DAGB3_RDCLI_OARB_PENDING__BUSY__SHIFT 0x0
+#define DAGB3_RDCLI_OARB_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB3_RDCLI_OSD_PENDING
+#define DAGB3_RDCLI_OSD_PENDING__BUSY__SHIFT 0x0
+#define DAGB3_RDCLI_OSD_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB3_WRCLI0
+#define DAGB3_WRCLI0__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_WRCLI0__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_WRCLI0__URG_HIGH__SHIFT 0x4
+#define DAGB3_WRCLI0__URG_LOW__SHIFT 0x8
+#define DAGB3_WRCLI0__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_WRCLI0__MAX_BW__SHIFT 0xd
+#define DAGB3_WRCLI0__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_WRCLI0__MIN_BW__SHIFT 0x16
+#define DAGB3_WRCLI0__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_WRCLI0__MAX_OSD__SHIFT 0x1a
+#define DAGB3_WRCLI0__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_WRCLI0__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_WRCLI0__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_WRCLI0__URG_LOW_MASK 0x00000F00L
+#define DAGB3_WRCLI0__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_WRCLI0__MAX_BW_MASK 0x001FE000L
+#define DAGB3_WRCLI0__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_WRCLI0__MIN_BW_MASK 0x01C00000L
+#define DAGB3_WRCLI0__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_WRCLI0__MAX_OSD_MASK 0xFC000000L
+//DAGB3_WRCLI1
+#define DAGB3_WRCLI1__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_WRCLI1__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_WRCLI1__URG_HIGH__SHIFT 0x4
+#define DAGB3_WRCLI1__URG_LOW__SHIFT 0x8
+#define DAGB3_WRCLI1__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_WRCLI1__MAX_BW__SHIFT 0xd
+#define DAGB3_WRCLI1__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_WRCLI1__MIN_BW__SHIFT 0x16
+#define DAGB3_WRCLI1__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_WRCLI1__MAX_OSD__SHIFT 0x1a
+#define DAGB3_WRCLI1__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_WRCLI1__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_WRCLI1__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_WRCLI1__URG_LOW_MASK 0x00000F00L
+#define DAGB3_WRCLI1__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_WRCLI1__MAX_BW_MASK 0x001FE000L
+#define DAGB3_WRCLI1__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_WRCLI1__MIN_BW_MASK 0x01C00000L
+#define DAGB3_WRCLI1__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_WRCLI1__MAX_OSD_MASK 0xFC000000L
+//DAGB3_WRCLI2
+#define DAGB3_WRCLI2__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_WRCLI2__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_WRCLI2__URG_HIGH__SHIFT 0x4
+#define DAGB3_WRCLI2__URG_LOW__SHIFT 0x8
+#define DAGB3_WRCLI2__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_WRCLI2__MAX_BW__SHIFT 0xd
+#define DAGB3_WRCLI2__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_WRCLI2__MIN_BW__SHIFT 0x16
+#define DAGB3_WRCLI2__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_WRCLI2__MAX_OSD__SHIFT 0x1a
+#define DAGB3_WRCLI2__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_WRCLI2__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_WRCLI2__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_WRCLI2__URG_LOW_MASK 0x00000F00L
+#define DAGB3_WRCLI2__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_WRCLI2__MAX_BW_MASK 0x001FE000L
+#define DAGB3_WRCLI2__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_WRCLI2__MIN_BW_MASK 0x01C00000L
+#define DAGB3_WRCLI2__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_WRCLI2__MAX_OSD_MASK 0xFC000000L
+//DAGB3_WRCLI3
+#define DAGB3_WRCLI3__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_WRCLI3__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_WRCLI3__URG_HIGH__SHIFT 0x4
+#define DAGB3_WRCLI3__URG_LOW__SHIFT 0x8
+#define DAGB3_WRCLI3__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_WRCLI3__MAX_BW__SHIFT 0xd
+#define DAGB3_WRCLI3__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_WRCLI3__MIN_BW__SHIFT 0x16
+#define DAGB3_WRCLI3__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_WRCLI3__MAX_OSD__SHIFT 0x1a
+#define DAGB3_WRCLI3__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_WRCLI3__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_WRCLI3__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_WRCLI3__URG_LOW_MASK 0x00000F00L
+#define DAGB3_WRCLI3__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_WRCLI3__MAX_BW_MASK 0x001FE000L
+#define DAGB3_WRCLI3__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_WRCLI3__MIN_BW_MASK 0x01C00000L
+#define DAGB3_WRCLI3__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_WRCLI3__MAX_OSD_MASK 0xFC000000L
+//DAGB3_WRCLI4
+#define DAGB3_WRCLI4__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_WRCLI4__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_WRCLI4__URG_HIGH__SHIFT 0x4
+#define DAGB3_WRCLI4__URG_LOW__SHIFT 0x8
+#define DAGB3_WRCLI4__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_WRCLI4__MAX_BW__SHIFT 0xd
+#define DAGB3_WRCLI4__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_WRCLI4__MIN_BW__SHIFT 0x16
+#define DAGB3_WRCLI4__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_WRCLI4__MAX_OSD__SHIFT 0x1a
+#define DAGB3_WRCLI4__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_WRCLI4__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_WRCLI4__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_WRCLI4__URG_LOW_MASK 0x00000F00L
+#define DAGB3_WRCLI4__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_WRCLI4__MAX_BW_MASK 0x001FE000L
+#define DAGB3_WRCLI4__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_WRCLI4__MIN_BW_MASK 0x01C00000L
+#define DAGB3_WRCLI4__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_WRCLI4__MAX_OSD_MASK 0xFC000000L
+//DAGB3_WRCLI5
+#define DAGB3_WRCLI5__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_WRCLI5__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_WRCLI5__URG_HIGH__SHIFT 0x4
+#define DAGB3_WRCLI5__URG_LOW__SHIFT 0x8
+#define DAGB3_WRCLI5__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_WRCLI5__MAX_BW__SHIFT 0xd
+#define DAGB3_WRCLI5__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_WRCLI5__MIN_BW__SHIFT 0x16
+#define DAGB3_WRCLI5__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_WRCLI5__MAX_OSD__SHIFT 0x1a
+#define DAGB3_WRCLI5__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_WRCLI5__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_WRCLI5__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_WRCLI5__URG_LOW_MASK 0x00000F00L
+#define DAGB3_WRCLI5__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_WRCLI5__MAX_BW_MASK 0x001FE000L
+#define DAGB3_WRCLI5__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_WRCLI5__MIN_BW_MASK 0x01C00000L
+#define DAGB3_WRCLI5__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_WRCLI5__MAX_OSD_MASK 0xFC000000L
+//DAGB3_WRCLI6
+#define DAGB3_WRCLI6__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_WRCLI6__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_WRCLI6__URG_HIGH__SHIFT 0x4
+#define DAGB3_WRCLI6__URG_LOW__SHIFT 0x8
+#define DAGB3_WRCLI6__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_WRCLI6__MAX_BW__SHIFT 0xd
+#define DAGB3_WRCLI6__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_WRCLI6__MIN_BW__SHIFT 0x16
+#define DAGB3_WRCLI6__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_WRCLI6__MAX_OSD__SHIFT 0x1a
+#define DAGB3_WRCLI6__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_WRCLI6__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_WRCLI6__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_WRCLI6__URG_LOW_MASK 0x00000F00L
+#define DAGB3_WRCLI6__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_WRCLI6__MAX_BW_MASK 0x001FE000L
+#define DAGB3_WRCLI6__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_WRCLI6__MIN_BW_MASK 0x01C00000L
+#define DAGB3_WRCLI6__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_WRCLI6__MAX_OSD_MASK 0xFC000000L
+//DAGB3_WRCLI7
+#define DAGB3_WRCLI7__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_WRCLI7__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_WRCLI7__URG_HIGH__SHIFT 0x4
+#define DAGB3_WRCLI7__URG_LOW__SHIFT 0x8
+#define DAGB3_WRCLI7__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_WRCLI7__MAX_BW__SHIFT 0xd
+#define DAGB3_WRCLI7__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_WRCLI7__MIN_BW__SHIFT 0x16
+#define DAGB3_WRCLI7__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_WRCLI7__MAX_OSD__SHIFT 0x1a
+#define DAGB3_WRCLI7__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_WRCLI7__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_WRCLI7__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_WRCLI7__URG_LOW_MASK 0x00000F00L
+#define DAGB3_WRCLI7__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_WRCLI7__MAX_BW_MASK 0x001FE000L
+#define DAGB3_WRCLI7__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_WRCLI7__MIN_BW_MASK 0x01C00000L
+#define DAGB3_WRCLI7__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_WRCLI7__MAX_OSD_MASK 0xFC000000L
+//DAGB3_WRCLI8
+#define DAGB3_WRCLI8__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_WRCLI8__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_WRCLI8__URG_HIGH__SHIFT 0x4
+#define DAGB3_WRCLI8__URG_LOW__SHIFT 0x8
+#define DAGB3_WRCLI8__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_WRCLI8__MAX_BW__SHIFT 0xd
+#define DAGB3_WRCLI8__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_WRCLI8__MIN_BW__SHIFT 0x16
+#define DAGB3_WRCLI8__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_WRCLI8__MAX_OSD__SHIFT 0x1a
+#define DAGB3_WRCLI8__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_WRCLI8__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_WRCLI8__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_WRCLI8__URG_LOW_MASK 0x00000F00L
+#define DAGB3_WRCLI8__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_WRCLI8__MAX_BW_MASK 0x001FE000L
+#define DAGB3_WRCLI8__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_WRCLI8__MIN_BW_MASK 0x01C00000L
+#define DAGB3_WRCLI8__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_WRCLI8__MAX_OSD_MASK 0xFC000000L
+//DAGB3_WRCLI9
+#define DAGB3_WRCLI9__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_WRCLI9__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_WRCLI9__URG_HIGH__SHIFT 0x4
+#define DAGB3_WRCLI9__URG_LOW__SHIFT 0x8
+#define DAGB3_WRCLI9__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_WRCLI9__MAX_BW__SHIFT 0xd
+#define DAGB3_WRCLI9__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_WRCLI9__MIN_BW__SHIFT 0x16
+#define DAGB3_WRCLI9__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_WRCLI9__MAX_OSD__SHIFT 0x1a
+#define DAGB3_WRCLI9__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_WRCLI9__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_WRCLI9__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_WRCLI9__URG_LOW_MASK 0x00000F00L
+#define DAGB3_WRCLI9__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_WRCLI9__MAX_BW_MASK 0x001FE000L
+#define DAGB3_WRCLI9__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_WRCLI9__MIN_BW_MASK 0x01C00000L
+#define DAGB3_WRCLI9__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_WRCLI9__MAX_OSD_MASK 0xFC000000L
+//DAGB3_WRCLI10
+#define DAGB3_WRCLI10__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_WRCLI10__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_WRCLI10__URG_HIGH__SHIFT 0x4
+#define DAGB3_WRCLI10__URG_LOW__SHIFT 0x8
+#define DAGB3_WRCLI10__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_WRCLI10__MAX_BW__SHIFT 0xd
+#define DAGB3_WRCLI10__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_WRCLI10__MIN_BW__SHIFT 0x16
+#define DAGB3_WRCLI10__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_WRCLI10__MAX_OSD__SHIFT 0x1a
+#define DAGB3_WRCLI10__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_WRCLI10__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_WRCLI10__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_WRCLI10__URG_LOW_MASK 0x00000F00L
+#define DAGB3_WRCLI10__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_WRCLI10__MAX_BW_MASK 0x001FE000L
+#define DAGB3_WRCLI10__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_WRCLI10__MIN_BW_MASK 0x01C00000L
+#define DAGB3_WRCLI10__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_WRCLI10__MAX_OSD_MASK 0xFC000000L
+//DAGB3_WRCLI11
+#define DAGB3_WRCLI11__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_WRCLI11__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_WRCLI11__URG_HIGH__SHIFT 0x4
+#define DAGB3_WRCLI11__URG_LOW__SHIFT 0x8
+#define DAGB3_WRCLI11__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_WRCLI11__MAX_BW__SHIFT 0xd
+#define DAGB3_WRCLI11__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_WRCLI11__MIN_BW__SHIFT 0x16
+#define DAGB3_WRCLI11__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_WRCLI11__MAX_OSD__SHIFT 0x1a
+#define DAGB3_WRCLI11__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_WRCLI11__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_WRCLI11__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_WRCLI11__URG_LOW_MASK 0x00000F00L
+#define DAGB3_WRCLI11__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_WRCLI11__MAX_BW_MASK 0x001FE000L
+#define DAGB3_WRCLI11__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_WRCLI11__MIN_BW_MASK 0x01C00000L
+#define DAGB3_WRCLI11__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_WRCLI11__MAX_OSD_MASK 0xFC000000L
+//DAGB3_WRCLI12
+#define DAGB3_WRCLI12__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_WRCLI12__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_WRCLI12__URG_HIGH__SHIFT 0x4
+#define DAGB3_WRCLI12__URG_LOW__SHIFT 0x8
+#define DAGB3_WRCLI12__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_WRCLI12__MAX_BW__SHIFT 0xd
+#define DAGB3_WRCLI12__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_WRCLI12__MIN_BW__SHIFT 0x16
+#define DAGB3_WRCLI12__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_WRCLI12__MAX_OSD__SHIFT 0x1a
+#define DAGB3_WRCLI12__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_WRCLI12__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_WRCLI12__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_WRCLI12__URG_LOW_MASK 0x00000F00L
+#define DAGB3_WRCLI12__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_WRCLI12__MAX_BW_MASK 0x001FE000L
+#define DAGB3_WRCLI12__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_WRCLI12__MIN_BW_MASK 0x01C00000L
+#define DAGB3_WRCLI12__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_WRCLI12__MAX_OSD_MASK 0xFC000000L
+//DAGB3_WRCLI13
+#define DAGB3_WRCLI13__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_WRCLI13__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_WRCLI13__URG_HIGH__SHIFT 0x4
+#define DAGB3_WRCLI13__URG_LOW__SHIFT 0x8
+#define DAGB3_WRCLI13__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_WRCLI13__MAX_BW__SHIFT 0xd
+#define DAGB3_WRCLI13__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_WRCLI13__MIN_BW__SHIFT 0x16
+#define DAGB3_WRCLI13__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_WRCLI13__MAX_OSD__SHIFT 0x1a
+#define DAGB3_WRCLI13__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_WRCLI13__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_WRCLI13__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_WRCLI13__URG_LOW_MASK 0x00000F00L
+#define DAGB3_WRCLI13__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_WRCLI13__MAX_BW_MASK 0x001FE000L
+#define DAGB3_WRCLI13__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_WRCLI13__MIN_BW_MASK 0x01C00000L
+#define DAGB3_WRCLI13__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_WRCLI13__MAX_OSD_MASK 0xFC000000L
+//DAGB3_WRCLI14
+#define DAGB3_WRCLI14__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_WRCLI14__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_WRCLI14__URG_HIGH__SHIFT 0x4
+#define DAGB3_WRCLI14__URG_LOW__SHIFT 0x8
+#define DAGB3_WRCLI14__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_WRCLI14__MAX_BW__SHIFT 0xd
+#define DAGB3_WRCLI14__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_WRCLI14__MIN_BW__SHIFT 0x16
+#define DAGB3_WRCLI14__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_WRCLI14__MAX_OSD__SHIFT 0x1a
+#define DAGB3_WRCLI14__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_WRCLI14__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_WRCLI14__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_WRCLI14__URG_LOW_MASK 0x00000F00L
+#define DAGB3_WRCLI14__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_WRCLI14__MAX_BW_MASK 0x001FE000L
+#define DAGB3_WRCLI14__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_WRCLI14__MIN_BW_MASK 0x01C00000L
+#define DAGB3_WRCLI14__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_WRCLI14__MAX_OSD_MASK 0xFC000000L
+//DAGB3_WRCLI15
+#define DAGB3_WRCLI15__VIRT_CHAN__SHIFT 0x0
+#define DAGB3_WRCLI15__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB3_WRCLI15__URG_HIGH__SHIFT 0x4
+#define DAGB3_WRCLI15__URG_LOW__SHIFT 0x8
+#define DAGB3_WRCLI15__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB3_WRCLI15__MAX_BW__SHIFT 0xd
+#define DAGB3_WRCLI15__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB3_WRCLI15__MIN_BW__SHIFT 0x16
+#define DAGB3_WRCLI15__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB3_WRCLI15__MAX_OSD__SHIFT 0x1a
+#define DAGB3_WRCLI15__VIRT_CHAN_MASK 0x00000007L
+#define DAGB3_WRCLI15__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB3_WRCLI15__URG_HIGH_MASK 0x000000F0L
+#define DAGB3_WRCLI15__URG_LOW_MASK 0x00000F00L
+#define DAGB3_WRCLI15__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB3_WRCLI15__MAX_BW_MASK 0x001FE000L
+#define DAGB3_WRCLI15__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB3_WRCLI15__MIN_BW_MASK 0x01C00000L
+#define DAGB3_WRCLI15__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB3_WRCLI15__MAX_OSD_MASK 0xFC000000L
+//DAGB3_WR_CNTL
+#define DAGB3_WR_CNTL__SCLK_FREQ__SHIFT 0x0
+#define DAGB3_WR_CNTL__CLI_MAX_BW_WINDOW__SHIFT 0x4
+#define DAGB3_WR_CNTL__VC_MAX_BW_WINDOW__SHIFT 0xa
+#define DAGB3_WR_CNTL__IO_LEVEL_OVERRIDE_ENABLE__SHIFT 0x10
+#define DAGB3_WR_CNTL__IO_LEVEL__SHIFT 0x11
+#define DAGB3_WR_CNTL__IO_LEVEL_COMPLY_VC__SHIFT 0x14
+#define DAGB3_WR_CNTL__SHARE_VC_NUM__SHIFT 0x17
+#define DAGB3_WR_CNTL__SCLK_FREQ_MASK 0x0000000FL
+#define DAGB3_WR_CNTL__CLI_MAX_BW_WINDOW_MASK 0x000003F0L
+#define DAGB3_WR_CNTL__VC_MAX_BW_WINDOW_MASK 0x0000FC00L
+#define DAGB3_WR_CNTL__IO_LEVEL_OVERRIDE_ENABLE_MASK 0x00010000L
+#define DAGB3_WR_CNTL__IO_LEVEL_MASK 0x000E0000L
+#define DAGB3_WR_CNTL__IO_LEVEL_COMPLY_VC_MASK 0x00700000L
+#define DAGB3_WR_CNTL__SHARE_VC_NUM_MASK 0x03800000L
+//DAGB3_WR_GMI_CNTL
+#define DAGB3_WR_GMI_CNTL__EA_CREDIT__SHIFT 0x0
+#define DAGB3_WR_GMI_CNTL__LEVEL__SHIFT 0x6
+#define DAGB3_WR_GMI_CNTL__MAX_BURST__SHIFT 0x9
+#define DAGB3_WR_GMI_CNTL__LAZY_TIMER__SHIFT 0xd
+#define DAGB3_WR_GMI_CNTL__EA_CREDIT_MASK 0x0000003FL
+#define DAGB3_WR_GMI_CNTL__LEVEL_MASK 0x000001C0L
+#define DAGB3_WR_GMI_CNTL__MAX_BURST_MASK 0x00001E00L
+#define DAGB3_WR_GMI_CNTL__LAZY_TIMER_MASK 0x0001E000L
+//DAGB3_WR_ADDR_DAGB
+#define DAGB3_WR_ADDR_DAGB__DAGB_ENABLE__SHIFT 0x0
+#define DAGB3_WR_ADDR_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3
+#define DAGB3_WR_ADDR_DAGB__DISABLE_SELF_INIT__SHIFT 0x6
+#define DAGB3_WR_ADDR_DAGB__WHOAMI__SHIFT 0x7
+#define DAGB3_WR_ADDR_DAGB__DAGB_ENABLE_MASK 0x00000007L
+#define DAGB3_WR_ADDR_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L
+#define DAGB3_WR_ADDR_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L
+#define DAGB3_WR_ADDR_DAGB__WHOAMI_MASK 0x00001F80L
+//DAGB3_WR_OUTPUT_DAGB_MAX_BURST
+#define DAGB3_WR_OUTPUT_DAGB_MAX_BURST__VC0__SHIFT 0x0
+#define DAGB3_WR_OUTPUT_DAGB_MAX_BURST__VC1__SHIFT 0x4
+#define DAGB3_WR_OUTPUT_DAGB_MAX_BURST__VC2__SHIFT 0x8
+#define DAGB3_WR_OUTPUT_DAGB_MAX_BURST__VC3__SHIFT 0xc
+#define DAGB3_WR_OUTPUT_DAGB_MAX_BURST__VC4__SHIFT 0x10
+#define DAGB3_WR_OUTPUT_DAGB_MAX_BURST__VC5__SHIFT 0x14
+#define DAGB3_WR_OUTPUT_DAGB_MAX_BURST__VC6__SHIFT 0x18
+#define DAGB3_WR_OUTPUT_DAGB_MAX_BURST__VC7__SHIFT 0x1c
+#define DAGB3_WR_OUTPUT_DAGB_MAX_BURST__VC0_MASK 0x0000000FL
+#define DAGB3_WR_OUTPUT_DAGB_MAX_BURST__VC1_MASK 0x000000F0L
+#define DAGB3_WR_OUTPUT_DAGB_MAX_BURST__VC2_MASK 0x00000F00L
+#define DAGB3_WR_OUTPUT_DAGB_MAX_BURST__VC3_MASK 0x0000F000L
+#define DAGB3_WR_OUTPUT_DAGB_MAX_BURST__VC4_MASK 0x000F0000L
+#define DAGB3_WR_OUTPUT_DAGB_MAX_BURST__VC5_MASK 0x00F00000L
+#define DAGB3_WR_OUTPUT_DAGB_MAX_BURST__VC6_MASK 0x0F000000L
+#define DAGB3_WR_OUTPUT_DAGB_MAX_BURST__VC7_MASK 0xF0000000L
+//DAGB3_WR_OUTPUT_DAGB_LAZY_TIMER
+#define DAGB3_WR_OUTPUT_DAGB_LAZY_TIMER__VC0__SHIFT 0x0
+#define DAGB3_WR_OUTPUT_DAGB_LAZY_TIMER__VC1__SHIFT 0x4
+#define DAGB3_WR_OUTPUT_DAGB_LAZY_TIMER__VC2__SHIFT 0x8
+#define DAGB3_WR_OUTPUT_DAGB_LAZY_TIMER__VC3__SHIFT 0xc
+#define DAGB3_WR_OUTPUT_DAGB_LAZY_TIMER__VC4__SHIFT 0x10
+#define DAGB3_WR_OUTPUT_DAGB_LAZY_TIMER__VC5__SHIFT 0x14
+#define DAGB3_WR_OUTPUT_DAGB_LAZY_TIMER__VC6__SHIFT 0x18
+#define DAGB3_WR_OUTPUT_DAGB_LAZY_TIMER__VC7__SHIFT 0x1c
+#define DAGB3_WR_OUTPUT_DAGB_LAZY_TIMER__VC0_MASK 0x0000000FL
+#define DAGB3_WR_OUTPUT_DAGB_LAZY_TIMER__VC1_MASK 0x000000F0L
+#define DAGB3_WR_OUTPUT_DAGB_LAZY_TIMER__VC2_MASK 0x00000F00L
+#define DAGB3_WR_OUTPUT_DAGB_LAZY_TIMER__VC3_MASK 0x0000F000L
+#define DAGB3_WR_OUTPUT_DAGB_LAZY_TIMER__VC4_MASK 0x000F0000L
+#define DAGB3_WR_OUTPUT_DAGB_LAZY_TIMER__VC5_MASK 0x00F00000L
+#define DAGB3_WR_OUTPUT_DAGB_LAZY_TIMER__VC6_MASK 0x0F000000L
+#define DAGB3_WR_OUTPUT_DAGB_LAZY_TIMER__VC7_MASK 0xF0000000L
+//DAGB3_WR_CGTT_CLK_CTRL
+#define DAGB3_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB3_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB3_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
+#define DAGB3_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define DAGB3_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
+#define DAGB3_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
+#define DAGB3_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
+#define DAGB3_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
+#define DAGB3_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB3_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB3_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
+#define DAGB3_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define DAGB3_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
+#define DAGB3_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
+#define DAGB3_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
+#define DAGB3_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
+//DAGB3_L1TLB_WR_CGTT_CLK_CTRL
+#define DAGB3_L1TLB_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB3_L1TLB_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB3_L1TLB_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
+#define DAGB3_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define DAGB3_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
+#define DAGB3_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
+#define DAGB3_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
+#define DAGB3_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
+#define DAGB3_L1TLB_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB3_L1TLB_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB3_L1TLB_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
+#define DAGB3_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define DAGB3_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
+#define DAGB3_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
+#define DAGB3_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
+#define DAGB3_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
+//DAGB3_ATCVM_WR_CGTT_CLK_CTRL
+#define DAGB3_ATCVM_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB3_ATCVM_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB3_ATCVM_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
+#define DAGB3_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define DAGB3_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
+#define DAGB3_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
+#define DAGB3_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
+#define DAGB3_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
+#define DAGB3_ATCVM_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB3_ATCVM_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB3_ATCVM_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
+#define DAGB3_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define DAGB3_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
+#define DAGB3_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
+#define DAGB3_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
+#define DAGB3_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
+//DAGB3_WR_ADDR_DAGB_MAX_BURST0
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L
+//DAGB3_WR_ADDR_DAGB_LAZY_TIMER0
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L
+//DAGB3_WR_ADDR_DAGB_MAX_BURST1
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L
+#define DAGB3_WR_ADDR_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L
+//DAGB3_WR_ADDR_DAGB_LAZY_TIMER1
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L
+#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L
+//DAGB3_WR_DATA_DAGB
+#define DAGB3_WR_DATA_DAGB__DAGB_ENABLE__SHIFT 0x0
+#define DAGB3_WR_DATA_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3
+#define DAGB3_WR_DATA_DAGB__DISABLE_SELF_INIT__SHIFT 0x6
+#define DAGB3_WR_DATA_DAGB__WHOAMI__SHIFT 0x7
+#define DAGB3_WR_DATA_DAGB__DAGB_ENABLE_MASK 0x00000007L
+#define DAGB3_WR_DATA_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L
+#define DAGB3_WR_DATA_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L
+#define DAGB3_WR_DATA_DAGB__WHOAMI_MASK 0x00001F80L
+//DAGB3_WR_DATA_DAGB_MAX_BURST0
+#define DAGB3_WR_DATA_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0
+#define DAGB3_WR_DATA_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4
+#define DAGB3_WR_DATA_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8
+#define DAGB3_WR_DATA_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc
+#define DAGB3_WR_DATA_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10
+#define DAGB3_WR_DATA_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14
+#define DAGB3_WR_DATA_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18
+#define DAGB3_WR_DATA_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c
+#define DAGB3_WR_DATA_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL
+#define DAGB3_WR_DATA_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L
+#define DAGB3_WR_DATA_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L
+#define DAGB3_WR_DATA_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L
+#define DAGB3_WR_DATA_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L
+#define DAGB3_WR_DATA_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L
+#define DAGB3_WR_DATA_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L
+#define DAGB3_WR_DATA_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L
+//DAGB3_WR_DATA_DAGB_LAZY_TIMER0
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L
+//DAGB3_WR_DATA_DAGB_MAX_BURST1
+#define DAGB3_WR_DATA_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0
+#define DAGB3_WR_DATA_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4
+#define DAGB3_WR_DATA_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8
+#define DAGB3_WR_DATA_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc
+#define DAGB3_WR_DATA_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10
+#define DAGB3_WR_DATA_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14
+#define DAGB3_WR_DATA_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18
+#define DAGB3_WR_DATA_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c
+#define DAGB3_WR_DATA_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL
+#define DAGB3_WR_DATA_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L
+#define DAGB3_WR_DATA_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L
+#define DAGB3_WR_DATA_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L
+#define DAGB3_WR_DATA_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L
+#define DAGB3_WR_DATA_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L
+#define DAGB3_WR_DATA_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L
+#define DAGB3_WR_DATA_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L
+//DAGB3_WR_DATA_DAGB_LAZY_TIMER1
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L
+#define DAGB3_WR_DATA_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L
+//DAGB3_WR_VC0_CNTL
+#define DAGB3_WR_VC0_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB3_WR_VC0_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB3_WR_VC0_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB3_WR_VC0_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB3_WR_VC0_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB3_WR_VC0_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB3_WR_VC0_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB3_WR_VC0_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB3_WR_VC0_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB3_WR_VC0_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB3_WR_VC0_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB3_WR_VC0_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB3_WR_VC0_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB3_WR_VC0_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB3_WR_VC0_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB3_WR_VC0_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB3_WR_VC1_CNTL
+#define DAGB3_WR_VC1_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB3_WR_VC1_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB3_WR_VC1_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB3_WR_VC1_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB3_WR_VC1_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB3_WR_VC1_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB3_WR_VC1_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB3_WR_VC1_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB3_WR_VC1_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB3_WR_VC1_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB3_WR_VC1_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB3_WR_VC1_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB3_WR_VC1_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB3_WR_VC1_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB3_WR_VC1_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB3_WR_VC1_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB3_WR_VC2_CNTL
+#define DAGB3_WR_VC2_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB3_WR_VC2_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB3_WR_VC2_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB3_WR_VC2_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB3_WR_VC2_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB3_WR_VC2_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB3_WR_VC2_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB3_WR_VC2_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB3_WR_VC2_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB3_WR_VC2_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB3_WR_VC2_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB3_WR_VC2_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB3_WR_VC2_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB3_WR_VC2_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB3_WR_VC2_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB3_WR_VC2_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB3_WR_VC3_CNTL
+#define DAGB3_WR_VC3_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB3_WR_VC3_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB3_WR_VC3_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB3_WR_VC3_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB3_WR_VC3_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB3_WR_VC3_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB3_WR_VC3_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB3_WR_VC3_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB3_WR_VC3_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB3_WR_VC3_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB3_WR_VC3_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB3_WR_VC3_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB3_WR_VC3_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB3_WR_VC3_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB3_WR_VC3_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB3_WR_VC3_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB3_WR_VC4_CNTL
+#define DAGB3_WR_VC4_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB3_WR_VC4_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB3_WR_VC4_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB3_WR_VC4_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB3_WR_VC4_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB3_WR_VC4_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB3_WR_VC4_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB3_WR_VC4_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB3_WR_VC4_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB3_WR_VC4_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB3_WR_VC4_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB3_WR_VC4_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB3_WR_VC4_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB3_WR_VC4_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB3_WR_VC4_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB3_WR_VC4_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB3_WR_VC5_CNTL
+#define DAGB3_WR_VC5_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB3_WR_VC5_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB3_WR_VC5_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB3_WR_VC5_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB3_WR_VC5_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB3_WR_VC5_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB3_WR_VC5_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB3_WR_VC5_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB3_WR_VC5_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB3_WR_VC5_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB3_WR_VC5_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB3_WR_VC5_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB3_WR_VC5_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB3_WR_VC5_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB3_WR_VC5_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB3_WR_VC5_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB3_WR_VC6_CNTL
+#define DAGB3_WR_VC6_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB3_WR_VC6_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB3_WR_VC6_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB3_WR_VC6_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB3_WR_VC6_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB3_WR_VC6_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB3_WR_VC6_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB3_WR_VC6_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB3_WR_VC6_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB3_WR_VC6_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB3_WR_VC6_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB3_WR_VC6_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB3_WR_VC6_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB3_WR_VC6_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB3_WR_VC6_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB3_WR_VC6_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB3_WR_VC7_CNTL
+#define DAGB3_WR_VC7_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB3_WR_VC7_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB3_WR_VC7_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB3_WR_VC7_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB3_WR_VC7_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB3_WR_VC7_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB3_WR_VC7_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB3_WR_VC7_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB3_WR_VC7_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB3_WR_VC7_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB3_WR_VC7_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB3_WR_VC7_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB3_WR_VC7_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB3_WR_VC7_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB3_WR_VC7_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB3_WR_VC7_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB3_WR_CNTL_MISC
+#define DAGB3_WR_CNTL_MISC__STOR_POOL_CREDIT__SHIFT 0x0
+#define DAGB3_WR_CNTL_MISC__EA_POOL_CREDIT__SHIFT 0x6
+#define DAGB3_WR_CNTL_MISC__IO_EA_CREDIT__SHIFT 0xd
+#define DAGB3_WR_CNTL_MISC__STOR_CC_LEGACY_MODE__SHIFT 0x13
+#define DAGB3_WR_CNTL_MISC__EA_CC_LEGACY_MODE__SHIFT 0x14
+#define DAGB3_WR_CNTL_MISC__UTCL2_CID__SHIFT 0x15
+#define DAGB3_WR_CNTL_MISC__RDRET_FIFO_CREDITS__SHIFT 0x1a
+#define DAGB3_WR_CNTL_MISC__STOR_POOL_CREDIT_MASK 0x0000003FL
+#define DAGB3_WR_CNTL_MISC__EA_POOL_CREDIT_MASK 0x00001FC0L
+#define DAGB3_WR_CNTL_MISC__IO_EA_CREDIT_MASK 0x0007E000L
+#define DAGB3_WR_CNTL_MISC__STOR_CC_LEGACY_MODE_MASK 0x00080000L
+#define DAGB3_WR_CNTL_MISC__EA_CC_LEGACY_MODE_MASK 0x00100000L
+#define DAGB3_WR_CNTL_MISC__UTCL2_CID_MASK 0x03E00000L
+#define DAGB3_WR_CNTL_MISC__RDRET_FIFO_CREDITS_MASK 0xFC000000L
+//DAGB3_WR_TLB_CREDIT
+#define DAGB3_WR_TLB_CREDIT__TLB0__SHIFT 0x0
+#define DAGB3_WR_TLB_CREDIT__TLB1__SHIFT 0x5
+#define DAGB3_WR_TLB_CREDIT__TLB2__SHIFT 0xa
+#define DAGB3_WR_TLB_CREDIT__TLB3__SHIFT 0xf
+#define DAGB3_WR_TLB_CREDIT__TLB4__SHIFT 0x14
+#define DAGB3_WR_TLB_CREDIT__TLB5__SHIFT 0x19
+#define DAGB3_WR_TLB_CREDIT__TLB0_MASK 0x0000001FL
+#define DAGB3_WR_TLB_CREDIT__TLB1_MASK 0x000003E0L
+#define DAGB3_WR_TLB_CREDIT__TLB2_MASK 0x00007C00L
+#define DAGB3_WR_TLB_CREDIT__TLB3_MASK 0x000F8000L
+#define DAGB3_WR_TLB_CREDIT__TLB4_MASK 0x01F00000L
+#define DAGB3_WR_TLB_CREDIT__TLB5_MASK 0x3E000000L
+//DAGB3_WR_DATA_CREDIT
+#define DAGB3_WR_DATA_CREDIT__DLOCK_VC_CREDITS__SHIFT 0x0
+#define DAGB3_WR_DATA_CREDIT__LARGE_BURST_CREDITS__SHIFT 0x8
+#define DAGB3_WR_DATA_CREDIT__MIDDLE_BURST_CREDITS__SHIFT 0x10
+#define DAGB3_WR_DATA_CREDIT__SMALL_BURST_CREDITS__SHIFT 0x18
+#define DAGB3_WR_DATA_CREDIT__DLOCK_VC_CREDITS_MASK 0x000000FFL
+#define DAGB3_WR_DATA_CREDIT__LARGE_BURST_CREDITS_MASK 0x0000FF00L
+#define DAGB3_WR_DATA_CREDIT__MIDDLE_BURST_CREDITS_MASK 0x00FF0000L
+#define DAGB3_WR_DATA_CREDIT__SMALL_BURST_CREDITS_MASK 0xFF000000L
+//DAGB3_WR_MISC_CREDIT
+#define DAGB3_WR_MISC_CREDIT__ATOMIC_CREDIT__SHIFT 0x0
+#define DAGB3_WR_MISC_CREDIT__DLOCK_VC_NUM__SHIFT 0x6
+#define DAGB3_WR_MISC_CREDIT__OSD_CREDIT__SHIFT 0x9
+#define DAGB3_WR_MISC_CREDIT__OSD_DLOCK_CREDIT__SHIFT 0x10
+#define DAGB3_WR_MISC_CREDIT__ATOMIC_CREDIT_MASK 0x0000003FL
+#define DAGB3_WR_MISC_CREDIT__DLOCK_VC_NUM_MASK 0x000001C0L
+#define DAGB3_WR_MISC_CREDIT__OSD_CREDIT_MASK 0x0000FE00L
+#define DAGB3_WR_MISC_CREDIT__OSD_DLOCK_CREDIT_MASK 0x007F0000L
+//DAGB3_WRCLI_ASK_PENDING
+#define DAGB3_WRCLI_ASK_PENDING__BUSY__SHIFT 0x0
+#define DAGB3_WRCLI_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB3_WRCLI_GO_PENDING
+#define DAGB3_WRCLI_GO_PENDING__BUSY__SHIFT 0x0
+#define DAGB3_WRCLI_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB3_WRCLI_GBLSEND_PENDING
+#define DAGB3_WRCLI_GBLSEND_PENDING__BUSY__SHIFT 0x0
+#define DAGB3_WRCLI_GBLSEND_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB3_WRCLI_TLB_PENDING
+#define DAGB3_WRCLI_TLB_PENDING__BUSY__SHIFT 0x0
+#define DAGB3_WRCLI_TLB_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB3_WRCLI_OARB_PENDING
+#define DAGB3_WRCLI_OARB_PENDING__BUSY__SHIFT 0x0
+#define DAGB3_WRCLI_OARB_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB3_WRCLI_OSD_PENDING
+#define DAGB3_WRCLI_OSD_PENDING__BUSY__SHIFT 0x0
+#define DAGB3_WRCLI_OSD_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB3_WRCLI_DBUS_ASK_PENDING
+#define DAGB3_WRCLI_DBUS_ASK_PENDING__BUSY__SHIFT 0x0
+#define DAGB3_WRCLI_DBUS_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB3_WRCLI_DBUS_GO_PENDING
+#define DAGB3_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0
+#define DAGB3_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB3_DAGB_DLY
+#define DAGB3_DAGB_DLY__DLY__SHIFT 0x0
+#define DAGB3_DAGB_DLY__CLI__SHIFT 0x8
+#define DAGB3_DAGB_DLY__POS__SHIFT 0x10
+#define DAGB3_DAGB_DLY__DLY_MASK 0x000000FFL
+#define DAGB3_DAGB_DLY__CLI_MASK 0x0000FF00L
+#define DAGB3_DAGB_DLY__POS_MASK 0x000F0000L
+//DAGB3_CNTL_MISC
+#define DAGB3_CNTL_MISC__EA_VC0_REMAP__SHIFT 0x0
+#define DAGB3_CNTL_MISC__EA_VC1_REMAP__SHIFT 0x3
+#define DAGB3_CNTL_MISC__EA_VC2_REMAP__SHIFT 0x6
+#define DAGB3_CNTL_MISC__EA_VC3_REMAP__SHIFT 0x9
+#define DAGB3_CNTL_MISC__EA_VC4_REMAP__SHIFT 0xc
+#define DAGB3_CNTL_MISC__EA_VC5_REMAP__SHIFT 0xf
+#define DAGB3_CNTL_MISC__EA_VC6_REMAP__SHIFT 0x12
+#define DAGB3_CNTL_MISC__EA_VC7_REMAP__SHIFT 0x15
+#define DAGB3_CNTL_MISC__BW_INIT_CYCLE__SHIFT 0x18
+#define DAGB3_CNTL_MISC__BW_RW_GAP_CYCLE__SHIFT 0x1e
+#define DAGB3_CNTL_MISC__EA_VC0_REMAP_MASK 0x00000007L
+#define DAGB3_CNTL_MISC__EA_VC1_REMAP_MASK 0x00000038L
+#define DAGB3_CNTL_MISC__EA_VC2_REMAP_MASK 0x000001C0L
+#define DAGB3_CNTL_MISC__EA_VC3_REMAP_MASK 0x00000E00L
+#define DAGB3_CNTL_MISC__EA_VC4_REMAP_MASK 0x00007000L
+#define DAGB3_CNTL_MISC__EA_VC5_REMAP_MASK 0x00038000L
+#define DAGB3_CNTL_MISC__EA_VC6_REMAP_MASK 0x001C0000L
+#define DAGB3_CNTL_MISC__EA_VC7_REMAP_MASK 0x00E00000L
+#define DAGB3_CNTL_MISC__BW_INIT_CYCLE_MASK 0x3F000000L
+#define DAGB3_CNTL_MISC__BW_RW_GAP_CYCLE_MASK 0xC0000000L
+//DAGB3_CNTL_MISC2
+#define DAGB3_CNTL_MISC2__URG_BOOST_ENABLE__SHIFT 0x0
+#define DAGB3_CNTL_MISC2__URG_HALT_ENABLE__SHIFT 0x1
+#define DAGB3_CNTL_MISC2__DISABLE_WRREQ_CG__SHIFT 0x2
+#define DAGB3_CNTL_MISC2__DISABLE_WRRET_CG__SHIFT 0x3
+#define DAGB3_CNTL_MISC2__DISABLE_RDREQ_CG__SHIFT 0x4
+#define DAGB3_CNTL_MISC2__DISABLE_RDRET_CG__SHIFT 0x5
+#define DAGB3_CNTL_MISC2__DISABLE_TLBWR_CG__SHIFT 0x6
+#define DAGB3_CNTL_MISC2__DISABLE_TLBRD_CG__SHIFT 0x7
+#define DAGB3_CNTL_MISC2__DISABLE_EAWRREQ_BUSY__SHIFT 0x8
+#define DAGB3_CNTL_MISC2__DISABLE_EARDREQ_BUSY__SHIFT 0x9
+#define DAGB3_CNTL_MISC2__SWAP_CTL__SHIFT 0xa
+#define DAGB3_CNTL_MISC2__RDRET_FIFO_PERF__SHIFT 0xb
+#define DAGB3_CNTL_MISC2__RDRET_FIFO_DLOCK_CREDITS__SHIFT 0x11
+#define DAGB3_CNTL_MISC2__URG_BOOST_ENABLE_MASK 0x00000001L
+#define DAGB3_CNTL_MISC2__URG_HALT_ENABLE_MASK 0x00000002L
+#define DAGB3_CNTL_MISC2__DISABLE_WRREQ_CG_MASK 0x00000004L
+#define DAGB3_CNTL_MISC2__DISABLE_WRRET_CG_MASK 0x00000008L
+#define DAGB3_CNTL_MISC2__DISABLE_RDREQ_CG_MASK 0x00000010L
+#define DAGB3_CNTL_MISC2__DISABLE_RDRET_CG_MASK 0x00000020L
+#define DAGB3_CNTL_MISC2__DISABLE_TLBWR_CG_MASK 0x00000040L
+#define DAGB3_CNTL_MISC2__DISABLE_TLBRD_CG_MASK 0x00000080L
+#define DAGB3_CNTL_MISC2__DISABLE_EAWRREQ_BUSY_MASK 0x00000100L
+#define DAGB3_CNTL_MISC2__DISABLE_EARDREQ_BUSY_MASK 0x00000200L
+#define DAGB3_CNTL_MISC2__SWAP_CTL_MASK 0x00000400L
+#define DAGB3_CNTL_MISC2__RDRET_FIFO_PERF_MASK 0x00000800L
+#define DAGB3_CNTL_MISC2__RDRET_FIFO_DLOCK_CREDITS_MASK 0x007E0000L
+//DAGB3_FIFO_EMPTY
+#define DAGB3_FIFO_EMPTY__EMPTY__SHIFT 0x0
+#define DAGB3_FIFO_EMPTY__EMPTY_MASK 0x00FFFFFFL
+//DAGB3_FIFO_FULL
+#define DAGB3_FIFO_FULL__FULL__SHIFT 0x0
+#define DAGB3_FIFO_FULL__FULL_MASK 0x007FFFFFL
+//DAGB3_WR_CREDITS_FULL
+#define DAGB3_WR_CREDITS_FULL__FULL__SHIFT 0x0
+#define DAGB3_WR_CREDITS_FULL__FULL_MASK 0x1FFFFFFFL
+//DAGB3_RD_CREDITS_FULL
+#define DAGB3_RD_CREDITS_FULL__FULL__SHIFT 0x0
+#define DAGB3_RD_CREDITS_FULL__FULL_MASK 0x0003FFFFL
+//DAGB3_PERFCOUNTER_LO
+#define DAGB3_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define DAGB3_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//DAGB3_PERFCOUNTER_HI
+#define DAGB3_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define DAGB3_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define DAGB3_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define DAGB3_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+//DAGB3_PERFCOUNTER0_CFG
+#define DAGB3_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define DAGB3_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define DAGB3_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define DAGB3_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define DAGB3_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define DAGB3_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define DAGB3_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define DAGB3_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define DAGB3_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define DAGB3_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//DAGB3_PERFCOUNTER1_CFG
+#define DAGB3_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define DAGB3_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define DAGB3_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define DAGB3_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define DAGB3_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define DAGB3_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define DAGB3_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define DAGB3_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define DAGB3_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define DAGB3_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//DAGB3_PERFCOUNTER2_CFG
+#define DAGB3_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0
+#define DAGB3_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8
+#define DAGB3_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18
+#define DAGB3_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c
+#define DAGB3_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d
+#define DAGB3_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL
+#define DAGB3_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define DAGB3_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L
+#define DAGB3_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L
+#define DAGB3_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L
+//DAGB3_PERFCOUNTER_RSLT_CNTL
+#define DAGB3_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define DAGB3_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define DAGB3_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define DAGB3_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define DAGB3_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define DAGB3_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define DAGB3_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define DAGB3_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define DAGB3_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define DAGB3_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define DAGB3_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define DAGB3_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+//DAGB3_RESERVE0
+#define DAGB3_RESERVE0__RESERVE__SHIFT 0x0
+#define DAGB3_RESERVE0__RESERVE_MASK 0xFFFFFFFFL
+//DAGB3_RESERVE1
+#define DAGB3_RESERVE1__RESERVE__SHIFT 0x0
+#define DAGB3_RESERVE1__RESERVE_MASK 0xFFFFFFFFL
+//DAGB3_RESERVE2
+#define DAGB3_RESERVE2__RESERVE__SHIFT 0x0
+#define DAGB3_RESERVE2__RESERVE_MASK 0xFFFFFFFFL
+//DAGB3_RESERVE3
+#define DAGB3_RESERVE3__RESERVE__SHIFT 0x0
+#define DAGB3_RESERVE3__RESERVE_MASK 0xFFFFFFFFL
+//DAGB3_RESERVE4
+#define DAGB3_RESERVE4__RESERVE__SHIFT 0x0
+#define DAGB3_RESERVE4__RESERVE_MASK 0xFFFFFFFFL
+//DAGB3_RESERVE5
+#define DAGB3_RESERVE5__RESERVE__SHIFT 0x0
+#define DAGB3_RESERVE5__RESERVE_MASK 0xFFFFFFFFL
+//DAGB3_RESERVE6
+#define DAGB3_RESERVE6__RESERVE__SHIFT 0x0
+#define DAGB3_RESERVE6__RESERVE_MASK 0xFFFFFFFFL
+//DAGB3_RESERVE7
+#define DAGB3_RESERVE7__RESERVE__SHIFT 0x0
+#define DAGB3_RESERVE7__RESERVE_MASK 0xFFFFFFFFL
+//DAGB3_RESERVE8
+#define DAGB3_RESERVE8__RESERVE__SHIFT 0x0
+#define DAGB3_RESERVE8__RESERVE_MASK 0xFFFFFFFFL
+//DAGB3_RESERVE9
+#define DAGB3_RESERVE9__RESERVE__SHIFT 0x0
+#define DAGB3_RESERVE9__RESERVE_MASK 0xFFFFFFFFL
+//DAGB3_RESERVE10
+#define DAGB3_RESERVE10__RESERVE__SHIFT 0x0
+#define DAGB3_RESERVE10__RESERVE_MASK 0xFFFFFFFFL
+//DAGB3_RESERVE11
+#define DAGB3_RESERVE11__RESERVE__SHIFT 0x0
+#define DAGB3_RESERVE11__RESERVE_MASK 0xFFFFFFFFL
+//DAGB3_RESERVE12
+#define DAGB3_RESERVE12__RESERVE__SHIFT 0x0
+#define DAGB3_RESERVE12__RESERVE_MASK 0xFFFFFFFFL
+//DAGB3_RESERVE13
+#define DAGB3_RESERVE13__RESERVE__SHIFT 0x0
+#define DAGB3_RESERVE13__RESERVE_MASK 0xFFFFFFFFL
+
+
+// addressBlock: mmhub_dagb_dagbdec4
+//DAGB4_RDCLI0
+#define DAGB4_RDCLI0__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_RDCLI0__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_RDCLI0__URG_HIGH__SHIFT 0x4
+#define DAGB4_RDCLI0__URG_LOW__SHIFT 0x8
+#define DAGB4_RDCLI0__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_RDCLI0__MAX_BW__SHIFT 0xd
+#define DAGB4_RDCLI0__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_RDCLI0__MIN_BW__SHIFT 0x16
+#define DAGB4_RDCLI0__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_RDCLI0__MAX_OSD__SHIFT 0x1a
+#define DAGB4_RDCLI0__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_RDCLI0__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_RDCLI0__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_RDCLI0__URG_LOW_MASK 0x00000F00L
+#define DAGB4_RDCLI0__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_RDCLI0__MAX_BW_MASK 0x001FE000L
+#define DAGB4_RDCLI0__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_RDCLI0__MIN_BW_MASK 0x01C00000L
+#define DAGB4_RDCLI0__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_RDCLI0__MAX_OSD_MASK 0xFC000000L
+//DAGB4_RDCLI1
+#define DAGB4_RDCLI1__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_RDCLI1__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_RDCLI1__URG_HIGH__SHIFT 0x4
+#define DAGB4_RDCLI1__URG_LOW__SHIFT 0x8
+#define DAGB4_RDCLI1__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_RDCLI1__MAX_BW__SHIFT 0xd
+#define DAGB4_RDCLI1__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_RDCLI1__MIN_BW__SHIFT 0x16
+#define DAGB4_RDCLI1__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_RDCLI1__MAX_OSD__SHIFT 0x1a
+#define DAGB4_RDCLI1__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_RDCLI1__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_RDCLI1__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_RDCLI1__URG_LOW_MASK 0x00000F00L
+#define DAGB4_RDCLI1__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_RDCLI1__MAX_BW_MASK 0x001FE000L
+#define DAGB4_RDCLI1__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_RDCLI1__MIN_BW_MASK 0x01C00000L
+#define DAGB4_RDCLI1__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_RDCLI1__MAX_OSD_MASK 0xFC000000L
+//DAGB4_RDCLI2
+#define DAGB4_RDCLI2__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_RDCLI2__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_RDCLI2__URG_HIGH__SHIFT 0x4
+#define DAGB4_RDCLI2__URG_LOW__SHIFT 0x8
+#define DAGB4_RDCLI2__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_RDCLI2__MAX_BW__SHIFT 0xd
+#define DAGB4_RDCLI2__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_RDCLI2__MIN_BW__SHIFT 0x16
+#define DAGB4_RDCLI2__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_RDCLI2__MAX_OSD__SHIFT 0x1a
+#define DAGB4_RDCLI2__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_RDCLI2__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_RDCLI2__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_RDCLI2__URG_LOW_MASK 0x00000F00L
+#define DAGB4_RDCLI2__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_RDCLI2__MAX_BW_MASK 0x001FE000L
+#define DAGB4_RDCLI2__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_RDCLI2__MIN_BW_MASK 0x01C00000L
+#define DAGB4_RDCLI2__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_RDCLI2__MAX_OSD_MASK 0xFC000000L
+//DAGB4_RDCLI3
+#define DAGB4_RDCLI3__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_RDCLI3__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_RDCLI3__URG_HIGH__SHIFT 0x4
+#define DAGB4_RDCLI3__URG_LOW__SHIFT 0x8
+#define DAGB4_RDCLI3__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_RDCLI3__MAX_BW__SHIFT 0xd
+#define DAGB4_RDCLI3__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_RDCLI3__MIN_BW__SHIFT 0x16
+#define DAGB4_RDCLI3__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_RDCLI3__MAX_OSD__SHIFT 0x1a
+#define DAGB4_RDCLI3__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_RDCLI3__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_RDCLI3__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_RDCLI3__URG_LOW_MASK 0x00000F00L
+#define DAGB4_RDCLI3__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_RDCLI3__MAX_BW_MASK 0x001FE000L
+#define DAGB4_RDCLI3__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_RDCLI3__MIN_BW_MASK 0x01C00000L
+#define DAGB4_RDCLI3__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_RDCLI3__MAX_OSD_MASK 0xFC000000L
+//DAGB4_RDCLI4
+#define DAGB4_RDCLI4__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_RDCLI4__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_RDCLI4__URG_HIGH__SHIFT 0x4
+#define DAGB4_RDCLI4__URG_LOW__SHIFT 0x8
+#define DAGB4_RDCLI4__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_RDCLI4__MAX_BW__SHIFT 0xd
+#define DAGB4_RDCLI4__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_RDCLI4__MIN_BW__SHIFT 0x16
+#define DAGB4_RDCLI4__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_RDCLI4__MAX_OSD__SHIFT 0x1a
+#define DAGB4_RDCLI4__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_RDCLI4__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_RDCLI4__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_RDCLI4__URG_LOW_MASK 0x00000F00L
+#define DAGB4_RDCLI4__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_RDCLI4__MAX_BW_MASK 0x001FE000L
+#define DAGB4_RDCLI4__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_RDCLI4__MIN_BW_MASK 0x01C00000L
+#define DAGB4_RDCLI4__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_RDCLI4__MAX_OSD_MASK 0xFC000000L
+//DAGB4_RDCLI5
+#define DAGB4_RDCLI5__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_RDCLI5__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_RDCLI5__URG_HIGH__SHIFT 0x4
+#define DAGB4_RDCLI5__URG_LOW__SHIFT 0x8
+#define DAGB4_RDCLI5__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_RDCLI5__MAX_BW__SHIFT 0xd
+#define DAGB4_RDCLI5__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_RDCLI5__MIN_BW__SHIFT 0x16
+#define DAGB4_RDCLI5__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_RDCLI5__MAX_OSD__SHIFT 0x1a
+#define DAGB4_RDCLI5__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_RDCLI5__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_RDCLI5__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_RDCLI5__URG_LOW_MASK 0x00000F00L
+#define DAGB4_RDCLI5__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_RDCLI5__MAX_BW_MASK 0x001FE000L
+#define DAGB4_RDCLI5__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_RDCLI5__MIN_BW_MASK 0x01C00000L
+#define DAGB4_RDCLI5__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_RDCLI5__MAX_OSD_MASK 0xFC000000L
+//DAGB4_RDCLI6
+#define DAGB4_RDCLI6__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_RDCLI6__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_RDCLI6__URG_HIGH__SHIFT 0x4
+#define DAGB4_RDCLI6__URG_LOW__SHIFT 0x8
+#define DAGB4_RDCLI6__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_RDCLI6__MAX_BW__SHIFT 0xd
+#define DAGB4_RDCLI6__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_RDCLI6__MIN_BW__SHIFT 0x16
+#define DAGB4_RDCLI6__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_RDCLI6__MAX_OSD__SHIFT 0x1a
+#define DAGB4_RDCLI6__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_RDCLI6__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_RDCLI6__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_RDCLI6__URG_LOW_MASK 0x00000F00L
+#define DAGB4_RDCLI6__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_RDCLI6__MAX_BW_MASK 0x001FE000L
+#define DAGB4_RDCLI6__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_RDCLI6__MIN_BW_MASK 0x01C00000L
+#define DAGB4_RDCLI6__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_RDCLI6__MAX_OSD_MASK 0xFC000000L
+//DAGB4_RDCLI7
+#define DAGB4_RDCLI7__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_RDCLI7__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_RDCLI7__URG_HIGH__SHIFT 0x4
+#define DAGB4_RDCLI7__URG_LOW__SHIFT 0x8
+#define DAGB4_RDCLI7__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_RDCLI7__MAX_BW__SHIFT 0xd
+#define DAGB4_RDCLI7__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_RDCLI7__MIN_BW__SHIFT 0x16
+#define DAGB4_RDCLI7__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_RDCLI7__MAX_OSD__SHIFT 0x1a
+#define DAGB4_RDCLI7__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_RDCLI7__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_RDCLI7__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_RDCLI7__URG_LOW_MASK 0x00000F00L
+#define DAGB4_RDCLI7__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_RDCLI7__MAX_BW_MASK 0x001FE000L
+#define DAGB4_RDCLI7__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_RDCLI7__MIN_BW_MASK 0x01C00000L
+#define DAGB4_RDCLI7__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_RDCLI7__MAX_OSD_MASK 0xFC000000L
+//DAGB4_RDCLI8
+#define DAGB4_RDCLI8__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_RDCLI8__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_RDCLI8__URG_HIGH__SHIFT 0x4
+#define DAGB4_RDCLI8__URG_LOW__SHIFT 0x8
+#define DAGB4_RDCLI8__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_RDCLI8__MAX_BW__SHIFT 0xd
+#define DAGB4_RDCLI8__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_RDCLI8__MIN_BW__SHIFT 0x16
+#define DAGB4_RDCLI8__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_RDCLI8__MAX_OSD__SHIFT 0x1a
+#define DAGB4_RDCLI8__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_RDCLI8__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_RDCLI8__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_RDCLI8__URG_LOW_MASK 0x00000F00L
+#define DAGB4_RDCLI8__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_RDCLI8__MAX_BW_MASK 0x001FE000L
+#define DAGB4_RDCLI8__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_RDCLI8__MIN_BW_MASK 0x01C00000L
+#define DAGB4_RDCLI8__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_RDCLI8__MAX_OSD_MASK 0xFC000000L
+//DAGB4_RDCLI9
+#define DAGB4_RDCLI9__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_RDCLI9__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_RDCLI9__URG_HIGH__SHIFT 0x4
+#define DAGB4_RDCLI9__URG_LOW__SHIFT 0x8
+#define DAGB4_RDCLI9__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_RDCLI9__MAX_BW__SHIFT 0xd
+#define DAGB4_RDCLI9__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_RDCLI9__MIN_BW__SHIFT 0x16
+#define DAGB4_RDCLI9__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_RDCLI9__MAX_OSD__SHIFT 0x1a
+#define DAGB4_RDCLI9__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_RDCLI9__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_RDCLI9__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_RDCLI9__URG_LOW_MASK 0x00000F00L
+#define DAGB4_RDCLI9__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_RDCLI9__MAX_BW_MASK 0x001FE000L
+#define DAGB4_RDCLI9__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_RDCLI9__MIN_BW_MASK 0x01C00000L
+#define DAGB4_RDCLI9__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_RDCLI9__MAX_OSD_MASK 0xFC000000L
+//DAGB4_RDCLI10
+#define DAGB4_RDCLI10__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_RDCLI10__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_RDCLI10__URG_HIGH__SHIFT 0x4
+#define DAGB4_RDCLI10__URG_LOW__SHIFT 0x8
+#define DAGB4_RDCLI10__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_RDCLI10__MAX_BW__SHIFT 0xd
+#define DAGB4_RDCLI10__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_RDCLI10__MIN_BW__SHIFT 0x16
+#define DAGB4_RDCLI10__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_RDCLI10__MAX_OSD__SHIFT 0x1a
+#define DAGB4_RDCLI10__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_RDCLI10__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_RDCLI10__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_RDCLI10__URG_LOW_MASK 0x00000F00L
+#define DAGB4_RDCLI10__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_RDCLI10__MAX_BW_MASK 0x001FE000L
+#define DAGB4_RDCLI10__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_RDCLI10__MIN_BW_MASK 0x01C00000L
+#define DAGB4_RDCLI10__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_RDCLI10__MAX_OSD_MASK 0xFC000000L
+//DAGB4_RDCLI11
+#define DAGB4_RDCLI11__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_RDCLI11__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_RDCLI11__URG_HIGH__SHIFT 0x4
+#define DAGB4_RDCLI11__URG_LOW__SHIFT 0x8
+#define DAGB4_RDCLI11__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_RDCLI11__MAX_BW__SHIFT 0xd
+#define DAGB4_RDCLI11__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_RDCLI11__MIN_BW__SHIFT 0x16
+#define DAGB4_RDCLI11__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_RDCLI11__MAX_OSD__SHIFT 0x1a
+#define DAGB4_RDCLI11__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_RDCLI11__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_RDCLI11__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_RDCLI11__URG_LOW_MASK 0x00000F00L
+#define DAGB4_RDCLI11__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_RDCLI11__MAX_BW_MASK 0x001FE000L
+#define DAGB4_RDCLI11__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_RDCLI11__MIN_BW_MASK 0x01C00000L
+#define DAGB4_RDCLI11__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_RDCLI11__MAX_OSD_MASK 0xFC000000L
+//DAGB4_RDCLI12
+#define DAGB4_RDCLI12__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_RDCLI12__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_RDCLI12__URG_HIGH__SHIFT 0x4
+#define DAGB4_RDCLI12__URG_LOW__SHIFT 0x8
+#define DAGB4_RDCLI12__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_RDCLI12__MAX_BW__SHIFT 0xd
+#define DAGB4_RDCLI12__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_RDCLI12__MIN_BW__SHIFT 0x16
+#define DAGB4_RDCLI12__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_RDCLI12__MAX_OSD__SHIFT 0x1a
+#define DAGB4_RDCLI12__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_RDCLI12__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_RDCLI12__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_RDCLI12__URG_LOW_MASK 0x00000F00L
+#define DAGB4_RDCLI12__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_RDCLI12__MAX_BW_MASK 0x001FE000L
+#define DAGB4_RDCLI12__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_RDCLI12__MIN_BW_MASK 0x01C00000L
+#define DAGB4_RDCLI12__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_RDCLI12__MAX_OSD_MASK 0xFC000000L
+//DAGB4_RDCLI13
+#define DAGB4_RDCLI13__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_RDCLI13__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_RDCLI13__URG_HIGH__SHIFT 0x4
+#define DAGB4_RDCLI13__URG_LOW__SHIFT 0x8
+#define DAGB4_RDCLI13__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_RDCLI13__MAX_BW__SHIFT 0xd
+#define DAGB4_RDCLI13__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_RDCLI13__MIN_BW__SHIFT 0x16
+#define DAGB4_RDCLI13__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_RDCLI13__MAX_OSD__SHIFT 0x1a
+#define DAGB4_RDCLI13__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_RDCLI13__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_RDCLI13__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_RDCLI13__URG_LOW_MASK 0x00000F00L
+#define DAGB4_RDCLI13__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_RDCLI13__MAX_BW_MASK 0x001FE000L
+#define DAGB4_RDCLI13__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_RDCLI13__MIN_BW_MASK 0x01C00000L
+#define DAGB4_RDCLI13__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_RDCLI13__MAX_OSD_MASK 0xFC000000L
+//DAGB4_RDCLI14
+#define DAGB4_RDCLI14__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_RDCLI14__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_RDCLI14__URG_HIGH__SHIFT 0x4
+#define DAGB4_RDCLI14__URG_LOW__SHIFT 0x8
+#define DAGB4_RDCLI14__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_RDCLI14__MAX_BW__SHIFT 0xd
+#define DAGB4_RDCLI14__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_RDCLI14__MIN_BW__SHIFT 0x16
+#define DAGB4_RDCLI14__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_RDCLI14__MAX_OSD__SHIFT 0x1a
+#define DAGB4_RDCLI14__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_RDCLI14__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_RDCLI14__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_RDCLI14__URG_LOW_MASK 0x00000F00L
+#define DAGB4_RDCLI14__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_RDCLI14__MAX_BW_MASK 0x001FE000L
+#define DAGB4_RDCLI14__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_RDCLI14__MIN_BW_MASK 0x01C00000L
+#define DAGB4_RDCLI14__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_RDCLI14__MAX_OSD_MASK 0xFC000000L
+//DAGB4_RDCLI15
+#define DAGB4_RDCLI15__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_RDCLI15__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_RDCLI15__URG_HIGH__SHIFT 0x4
+#define DAGB4_RDCLI15__URG_LOW__SHIFT 0x8
+#define DAGB4_RDCLI15__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_RDCLI15__MAX_BW__SHIFT 0xd
+#define DAGB4_RDCLI15__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_RDCLI15__MIN_BW__SHIFT 0x16
+#define DAGB4_RDCLI15__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_RDCLI15__MAX_OSD__SHIFT 0x1a
+#define DAGB4_RDCLI15__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_RDCLI15__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_RDCLI15__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_RDCLI15__URG_LOW_MASK 0x00000F00L
+#define DAGB4_RDCLI15__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_RDCLI15__MAX_BW_MASK 0x001FE000L
+#define DAGB4_RDCLI15__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_RDCLI15__MIN_BW_MASK 0x01C00000L
+#define DAGB4_RDCLI15__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_RDCLI15__MAX_OSD_MASK 0xFC000000L
+//DAGB4_RD_CNTL
+#define DAGB4_RD_CNTL__SCLK_FREQ__SHIFT 0x0
+#define DAGB4_RD_CNTL__CLI_MAX_BW_WINDOW__SHIFT 0x4
+#define DAGB4_RD_CNTL__VC_MAX_BW_WINDOW__SHIFT 0xa
+#define DAGB4_RD_CNTL__IO_LEVEL_OVERRIDE_ENABLE__SHIFT 0x10
+#define DAGB4_RD_CNTL__IO_LEVEL__SHIFT 0x11
+#define DAGB4_RD_CNTL__IO_LEVEL_COMPLY_VC__SHIFT 0x14
+#define DAGB4_RD_CNTL__SHARE_VC_NUM__SHIFT 0x17
+#define DAGB4_RD_CNTL__SCLK_FREQ_MASK 0x0000000FL
+#define DAGB4_RD_CNTL__CLI_MAX_BW_WINDOW_MASK 0x000003F0L
+#define DAGB4_RD_CNTL__VC_MAX_BW_WINDOW_MASK 0x0000FC00L
+#define DAGB4_RD_CNTL__IO_LEVEL_OVERRIDE_ENABLE_MASK 0x00010000L
+#define DAGB4_RD_CNTL__IO_LEVEL_MASK 0x000E0000L
+#define DAGB4_RD_CNTL__IO_LEVEL_COMPLY_VC_MASK 0x00700000L
+#define DAGB4_RD_CNTL__SHARE_VC_NUM_MASK 0x03800000L
+//DAGB4_RD_GMI_CNTL
+#define DAGB4_RD_GMI_CNTL__EA_CREDIT__SHIFT 0x0
+#define DAGB4_RD_GMI_CNTL__LEVEL__SHIFT 0x6
+#define DAGB4_RD_GMI_CNTL__MAX_BURST__SHIFT 0x9
+#define DAGB4_RD_GMI_CNTL__LAZY_TIMER__SHIFT 0xd
+#define DAGB4_RD_GMI_CNTL__EA_CREDIT_MASK 0x0000003FL
+#define DAGB4_RD_GMI_CNTL__LEVEL_MASK 0x000001C0L
+#define DAGB4_RD_GMI_CNTL__MAX_BURST_MASK 0x00001E00L
+#define DAGB4_RD_GMI_CNTL__LAZY_TIMER_MASK 0x0001E000L
+//DAGB4_RD_ADDR_DAGB
+#define DAGB4_RD_ADDR_DAGB__DAGB_ENABLE__SHIFT 0x0
+#define DAGB4_RD_ADDR_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3
+#define DAGB4_RD_ADDR_DAGB__DISABLE_SELF_INIT__SHIFT 0x6
+#define DAGB4_RD_ADDR_DAGB__WHOAMI__SHIFT 0x7
+#define DAGB4_RD_ADDR_DAGB__DAGB_ENABLE_MASK 0x00000007L
+#define DAGB4_RD_ADDR_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L
+#define DAGB4_RD_ADDR_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L
+#define DAGB4_RD_ADDR_DAGB__WHOAMI_MASK 0x00001F80L
+//DAGB4_RD_OUTPUT_DAGB_MAX_BURST
+#define DAGB4_RD_OUTPUT_DAGB_MAX_BURST__VC0__SHIFT 0x0
+#define DAGB4_RD_OUTPUT_DAGB_MAX_BURST__VC1__SHIFT 0x4
+#define DAGB4_RD_OUTPUT_DAGB_MAX_BURST__VC2__SHIFT 0x8
+#define DAGB4_RD_OUTPUT_DAGB_MAX_BURST__VC3__SHIFT 0xc
+#define DAGB4_RD_OUTPUT_DAGB_MAX_BURST__VC4__SHIFT 0x10
+#define DAGB4_RD_OUTPUT_DAGB_MAX_BURST__VC5__SHIFT 0x14
+#define DAGB4_RD_OUTPUT_DAGB_MAX_BURST__VC6__SHIFT 0x18
+#define DAGB4_RD_OUTPUT_DAGB_MAX_BURST__VC7__SHIFT 0x1c
+#define DAGB4_RD_OUTPUT_DAGB_MAX_BURST__VC0_MASK 0x0000000FL
+#define DAGB4_RD_OUTPUT_DAGB_MAX_BURST__VC1_MASK 0x000000F0L
+#define DAGB4_RD_OUTPUT_DAGB_MAX_BURST__VC2_MASK 0x00000F00L
+#define DAGB4_RD_OUTPUT_DAGB_MAX_BURST__VC3_MASK 0x0000F000L
+#define DAGB4_RD_OUTPUT_DAGB_MAX_BURST__VC4_MASK 0x000F0000L
+#define DAGB4_RD_OUTPUT_DAGB_MAX_BURST__VC5_MASK 0x00F00000L
+#define DAGB4_RD_OUTPUT_DAGB_MAX_BURST__VC6_MASK 0x0F000000L
+#define DAGB4_RD_OUTPUT_DAGB_MAX_BURST__VC7_MASK 0xF0000000L
+//DAGB4_RD_OUTPUT_DAGB_LAZY_TIMER
+#define DAGB4_RD_OUTPUT_DAGB_LAZY_TIMER__VC0__SHIFT 0x0
+#define DAGB4_RD_OUTPUT_DAGB_LAZY_TIMER__VC1__SHIFT 0x4
+#define DAGB4_RD_OUTPUT_DAGB_LAZY_TIMER__VC2__SHIFT 0x8
+#define DAGB4_RD_OUTPUT_DAGB_LAZY_TIMER__VC3__SHIFT 0xc
+#define DAGB4_RD_OUTPUT_DAGB_LAZY_TIMER__VC4__SHIFT 0x10
+#define DAGB4_RD_OUTPUT_DAGB_LAZY_TIMER__VC5__SHIFT 0x14
+#define DAGB4_RD_OUTPUT_DAGB_LAZY_TIMER__VC6__SHIFT 0x18
+#define DAGB4_RD_OUTPUT_DAGB_LAZY_TIMER__VC7__SHIFT 0x1c
+#define DAGB4_RD_OUTPUT_DAGB_LAZY_TIMER__VC0_MASK 0x0000000FL
+#define DAGB4_RD_OUTPUT_DAGB_LAZY_TIMER__VC1_MASK 0x000000F0L
+#define DAGB4_RD_OUTPUT_DAGB_LAZY_TIMER__VC2_MASK 0x00000F00L
+#define DAGB4_RD_OUTPUT_DAGB_LAZY_TIMER__VC3_MASK 0x0000F000L
+#define DAGB4_RD_OUTPUT_DAGB_LAZY_TIMER__VC4_MASK 0x000F0000L
+#define DAGB4_RD_OUTPUT_DAGB_LAZY_TIMER__VC5_MASK 0x00F00000L
+#define DAGB4_RD_OUTPUT_DAGB_LAZY_TIMER__VC6_MASK 0x0F000000L
+#define DAGB4_RD_OUTPUT_DAGB_LAZY_TIMER__VC7_MASK 0xF0000000L
+//DAGB4_RD_CGTT_CLK_CTRL
+#define DAGB4_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB4_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB4_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
+#define DAGB4_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define DAGB4_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
+#define DAGB4_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
+#define DAGB4_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
+#define DAGB4_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
+#define DAGB4_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB4_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB4_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
+#define DAGB4_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define DAGB4_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
+#define DAGB4_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
+#define DAGB4_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
+#define DAGB4_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
+//DAGB4_L1TLB_RD_CGTT_CLK_CTRL
+#define DAGB4_L1TLB_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB4_L1TLB_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB4_L1TLB_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
+#define DAGB4_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define DAGB4_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
+#define DAGB4_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
+#define DAGB4_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
+#define DAGB4_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
+#define DAGB4_L1TLB_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB4_L1TLB_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB4_L1TLB_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
+#define DAGB4_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define DAGB4_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
+#define DAGB4_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
+#define DAGB4_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
+#define DAGB4_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
+//DAGB4_ATCVM_RD_CGTT_CLK_CTRL
+#define DAGB4_ATCVM_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB4_ATCVM_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB4_ATCVM_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
+#define DAGB4_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define DAGB4_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
+#define DAGB4_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
+#define DAGB4_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
+#define DAGB4_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
+#define DAGB4_ATCVM_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB4_ATCVM_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB4_ATCVM_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
+#define DAGB4_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define DAGB4_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
+#define DAGB4_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
+#define DAGB4_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
+#define DAGB4_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
+//DAGB4_RD_ADDR_DAGB_MAX_BURST0
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L
+//DAGB4_RD_ADDR_DAGB_LAZY_TIMER0
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L
+//DAGB4_RD_ADDR_DAGB_MAX_BURST1
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L
+#define DAGB4_RD_ADDR_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L
+//DAGB4_RD_ADDR_DAGB_LAZY_TIMER1
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L
+#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L
+//DAGB4_RD_VC0_CNTL
+#define DAGB4_RD_VC0_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB4_RD_VC0_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB4_RD_VC0_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB4_RD_VC0_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB4_RD_VC0_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB4_RD_VC0_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB4_RD_VC0_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB4_RD_VC0_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB4_RD_VC0_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB4_RD_VC0_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB4_RD_VC0_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB4_RD_VC0_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB4_RD_VC0_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB4_RD_VC0_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB4_RD_VC0_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB4_RD_VC0_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB4_RD_VC1_CNTL
+#define DAGB4_RD_VC1_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB4_RD_VC1_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB4_RD_VC1_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB4_RD_VC1_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB4_RD_VC1_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB4_RD_VC1_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB4_RD_VC1_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB4_RD_VC1_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB4_RD_VC1_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB4_RD_VC1_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB4_RD_VC1_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB4_RD_VC1_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB4_RD_VC1_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB4_RD_VC1_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB4_RD_VC1_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB4_RD_VC1_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB4_RD_VC2_CNTL
+#define DAGB4_RD_VC2_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB4_RD_VC2_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB4_RD_VC2_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB4_RD_VC2_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB4_RD_VC2_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB4_RD_VC2_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB4_RD_VC2_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB4_RD_VC2_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB4_RD_VC2_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB4_RD_VC2_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB4_RD_VC2_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB4_RD_VC2_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB4_RD_VC2_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB4_RD_VC2_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB4_RD_VC2_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB4_RD_VC2_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB4_RD_VC3_CNTL
+#define DAGB4_RD_VC3_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB4_RD_VC3_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB4_RD_VC3_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB4_RD_VC3_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB4_RD_VC3_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB4_RD_VC3_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB4_RD_VC3_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB4_RD_VC3_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB4_RD_VC3_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB4_RD_VC3_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB4_RD_VC3_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB4_RD_VC3_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB4_RD_VC3_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB4_RD_VC3_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB4_RD_VC3_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB4_RD_VC3_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB4_RD_VC4_CNTL
+#define DAGB4_RD_VC4_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB4_RD_VC4_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB4_RD_VC4_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB4_RD_VC4_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB4_RD_VC4_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB4_RD_VC4_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB4_RD_VC4_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB4_RD_VC4_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB4_RD_VC4_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB4_RD_VC4_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB4_RD_VC4_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB4_RD_VC4_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB4_RD_VC4_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB4_RD_VC4_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB4_RD_VC4_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB4_RD_VC4_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB4_RD_VC5_CNTL
+#define DAGB4_RD_VC5_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB4_RD_VC5_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB4_RD_VC5_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB4_RD_VC5_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB4_RD_VC5_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB4_RD_VC5_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB4_RD_VC5_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB4_RD_VC5_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB4_RD_VC5_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB4_RD_VC5_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB4_RD_VC5_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB4_RD_VC5_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB4_RD_VC5_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB4_RD_VC5_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB4_RD_VC5_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB4_RD_VC5_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB4_RD_VC6_CNTL
+#define DAGB4_RD_VC6_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB4_RD_VC6_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB4_RD_VC6_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB4_RD_VC6_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB4_RD_VC6_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB4_RD_VC6_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB4_RD_VC6_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB4_RD_VC6_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB4_RD_VC6_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB4_RD_VC6_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB4_RD_VC6_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB4_RD_VC6_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB4_RD_VC6_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB4_RD_VC6_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB4_RD_VC6_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB4_RD_VC6_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB4_RD_VC7_CNTL
+#define DAGB4_RD_VC7_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB4_RD_VC7_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB4_RD_VC7_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB4_RD_VC7_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB4_RD_VC7_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB4_RD_VC7_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB4_RD_VC7_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB4_RD_VC7_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB4_RD_VC7_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB4_RD_VC7_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB4_RD_VC7_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB4_RD_VC7_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB4_RD_VC7_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB4_RD_VC7_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB4_RD_VC7_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB4_RD_VC7_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB4_RD_CNTL_MISC
+#define DAGB4_RD_CNTL_MISC__STOR_POOL_CREDIT__SHIFT 0x0
+#define DAGB4_RD_CNTL_MISC__EA_POOL_CREDIT__SHIFT 0x6
+#define DAGB4_RD_CNTL_MISC__IO_EA_CREDIT__SHIFT 0xd
+#define DAGB4_RD_CNTL_MISC__STOR_CC_LEGACY_MODE__SHIFT 0x13
+#define DAGB4_RD_CNTL_MISC__EA_CC_LEGACY_MODE__SHIFT 0x14
+#define DAGB4_RD_CNTL_MISC__UTCL2_CID__SHIFT 0x15
+#define DAGB4_RD_CNTL_MISC__RDRET_FIFO_CREDITS__SHIFT 0x1a
+#define DAGB4_RD_CNTL_MISC__STOR_POOL_CREDIT_MASK 0x0000003FL
+#define DAGB4_RD_CNTL_MISC__EA_POOL_CREDIT_MASK 0x00001FC0L
+#define DAGB4_RD_CNTL_MISC__IO_EA_CREDIT_MASK 0x0007E000L
+#define DAGB4_RD_CNTL_MISC__STOR_CC_LEGACY_MODE_MASK 0x00080000L
+#define DAGB4_RD_CNTL_MISC__EA_CC_LEGACY_MODE_MASK 0x00100000L
+#define DAGB4_RD_CNTL_MISC__UTCL2_CID_MASK 0x03E00000L
+#define DAGB4_RD_CNTL_MISC__RDRET_FIFO_CREDITS_MASK 0xFC000000L
+//DAGB4_RD_TLB_CREDIT
+#define DAGB4_RD_TLB_CREDIT__TLB0__SHIFT 0x0
+#define DAGB4_RD_TLB_CREDIT__TLB1__SHIFT 0x5
+#define DAGB4_RD_TLB_CREDIT__TLB2__SHIFT 0xa
+#define DAGB4_RD_TLB_CREDIT__TLB3__SHIFT 0xf
+#define DAGB4_RD_TLB_CREDIT__TLB4__SHIFT 0x14
+#define DAGB4_RD_TLB_CREDIT__TLB5__SHIFT 0x19
+#define DAGB4_RD_TLB_CREDIT__TLB0_MASK 0x0000001FL
+#define DAGB4_RD_TLB_CREDIT__TLB1_MASK 0x000003E0L
+#define DAGB4_RD_TLB_CREDIT__TLB2_MASK 0x00007C00L
+#define DAGB4_RD_TLB_CREDIT__TLB3_MASK 0x000F8000L
+#define DAGB4_RD_TLB_CREDIT__TLB4_MASK 0x01F00000L
+#define DAGB4_RD_TLB_CREDIT__TLB5_MASK 0x3E000000L
+//DAGB4_RDCLI_ASK_PENDING
+#define DAGB4_RDCLI_ASK_PENDING__BUSY__SHIFT 0x0
+#define DAGB4_RDCLI_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB4_RDCLI_GO_PENDING
+#define DAGB4_RDCLI_GO_PENDING__BUSY__SHIFT 0x0
+#define DAGB4_RDCLI_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB4_RDCLI_GBLSEND_PENDING
+#define DAGB4_RDCLI_GBLSEND_PENDING__BUSY__SHIFT 0x0
+#define DAGB4_RDCLI_GBLSEND_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB4_RDCLI_TLB_PENDING
+#define DAGB4_RDCLI_TLB_PENDING__BUSY__SHIFT 0x0
+#define DAGB4_RDCLI_TLB_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB4_RDCLI_OARB_PENDING
+#define DAGB4_RDCLI_OARB_PENDING__BUSY__SHIFT 0x0
+#define DAGB4_RDCLI_OARB_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB4_RDCLI_OSD_PENDING
+#define DAGB4_RDCLI_OSD_PENDING__BUSY__SHIFT 0x0
+#define DAGB4_RDCLI_OSD_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB4_WRCLI0
+#define DAGB4_WRCLI0__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_WRCLI0__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_WRCLI0__URG_HIGH__SHIFT 0x4
+#define DAGB4_WRCLI0__URG_LOW__SHIFT 0x8
+#define DAGB4_WRCLI0__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_WRCLI0__MAX_BW__SHIFT 0xd
+#define DAGB4_WRCLI0__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_WRCLI0__MIN_BW__SHIFT 0x16
+#define DAGB4_WRCLI0__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_WRCLI0__MAX_OSD__SHIFT 0x1a
+#define DAGB4_WRCLI0__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_WRCLI0__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_WRCLI0__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_WRCLI0__URG_LOW_MASK 0x00000F00L
+#define DAGB4_WRCLI0__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_WRCLI0__MAX_BW_MASK 0x001FE000L
+#define DAGB4_WRCLI0__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_WRCLI0__MIN_BW_MASK 0x01C00000L
+#define DAGB4_WRCLI0__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_WRCLI0__MAX_OSD_MASK 0xFC000000L
+//DAGB4_WRCLI1
+#define DAGB4_WRCLI1__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_WRCLI1__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_WRCLI1__URG_HIGH__SHIFT 0x4
+#define DAGB4_WRCLI1__URG_LOW__SHIFT 0x8
+#define DAGB4_WRCLI1__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_WRCLI1__MAX_BW__SHIFT 0xd
+#define DAGB4_WRCLI1__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_WRCLI1__MIN_BW__SHIFT 0x16
+#define DAGB4_WRCLI1__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_WRCLI1__MAX_OSD__SHIFT 0x1a
+#define DAGB4_WRCLI1__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_WRCLI1__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_WRCLI1__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_WRCLI1__URG_LOW_MASK 0x00000F00L
+#define DAGB4_WRCLI1__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_WRCLI1__MAX_BW_MASK 0x001FE000L
+#define DAGB4_WRCLI1__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_WRCLI1__MIN_BW_MASK 0x01C00000L
+#define DAGB4_WRCLI1__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_WRCLI1__MAX_OSD_MASK 0xFC000000L
+//DAGB4_WRCLI2
+#define DAGB4_WRCLI2__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_WRCLI2__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_WRCLI2__URG_HIGH__SHIFT 0x4
+#define DAGB4_WRCLI2__URG_LOW__SHIFT 0x8
+#define DAGB4_WRCLI2__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_WRCLI2__MAX_BW__SHIFT 0xd
+#define DAGB4_WRCLI2__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_WRCLI2__MIN_BW__SHIFT 0x16
+#define DAGB4_WRCLI2__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_WRCLI2__MAX_OSD__SHIFT 0x1a
+#define DAGB4_WRCLI2__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_WRCLI2__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_WRCLI2__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_WRCLI2__URG_LOW_MASK 0x00000F00L
+#define DAGB4_WRCLI2__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_WRCLI2__MAX_BW_MASK 0x001FE000L
+#define DAGB4_WRCLI2__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_WRCLI2__MIN_BW_MASK 0x01C00000L
+#define DAGB4_WRCLI2__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_WRCLI2__MAX_OSD_MASK 0xFC000000L
+//DAGB4_WRCLI3
+#define DAGB4_WRCLI3__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_WRCLI3__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_WRCLI3__URG_HIGH__SHIFT 0x4
+#define DAGB4_WRCLI3__URG_LOW__SHIFT 0x8
+#define DAGB4_WRCLI3__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_WRCLI3__MAX_BW__SHIFT 0xd
+#define DAGB4_WRCLI3__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_WRCLI3__MIN_BW__SHIFT 0x16
+#define DAGB4_WRCLI3__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_WRCLI3__MAX_OSD__SHIFT 0x1a
+#define DAGB4_WRCLI3__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_WRCLI3__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_WRCLI3__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_WRCLI3__URG_LOW_MASK 0x00000F00L
+#define DAGB4_WRCLI3__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_WRCLI3__MAX_BW_MASK 0x001FE000L
+#define DAGB4_WRCLI3__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_WRCLI3__MIN_BW_MASK 0x01C00000L
+#define DAGB4_WRCLI3__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_WRCLI3__MAX_OSD_MASK 0xFC000000L
+//DAGB4_WRCLI4
+#define DAGB4_WRCLI4__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_WRCLI4__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_WRCLI4__URG_HIGH__SHIFT 0x4
+#define DAGB4_WRCLI4__URG_LOW__SHIFT 0x8
+#define DAGB4_WRCLI4__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_WRCLI4__MAX_BW__SHIFT 0xd
+#define DAGB4_WRCLI4__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_WRCLI4__MIN_BW__SHIFT 0x16
+#define DAGB4_WRCLI4__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_WRCLI4__MAX_OSD__SHIFT 0x1a
+#define DAGB4_WRCLI4__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_WRCLI4__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_WRCLI4__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_WRCLI4__URG_LOW_MASK 0x00000F00L
+#define DAGB4_WRCLI4__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_WRCLI4__MAX_BW_MASK 0x001FE000L
+#define DAGB4_WRCLI4__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_WRCLI4__MIN_BW_MASK 0x01C00000L
+#define DAGB4_WRCLI4__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_WRCLI4__MAX_OSD_MASK 0xFC000000L
+//DAGB4_WRCLI5
+#define DAGB4_WRCLI5__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_WRCLI5__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_WRCLI5__URG_HIGH__SHIFT 0x4
+#define DAGB4_WRCLI5__URG_LOW__SHIFT 0x8
+#define DAGB4_WRCLI5__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_WRCLI5__MAX_BW__SHIFT 0xd
+#define DAGB4_WRCLI5__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_WRCLI5__MIN_BW__SHIFT 0x16
+#define DAGB4_WRCLI5__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_WRCLI5__MAX_OSD__SHIFT 0x1a
+#define DAGB4_WRCLI5__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_WRCLI5__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_WRCLI5__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_WRCLI5__URG_LOW_MASK 0x00000F00L
+#define DAGB4_WRCLI5__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_WRCLI5__MAX_BW_MASK 0x001FE000L
+#define DAGB4_WRCLI5__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_WRCLI5__MIN_BW_MASK 0x01C00000L
+#define DAGB4_WRCLI5__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_WRCLI5__MAX_OSD_MASK 0xFC000000L
+//DAGB4_WRCLI6
+#define DAGB4_WRCLI6__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_WRCLI6__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_WRCLI6__URG_HIGH__SHIFT 0x4
+#define DAGB4_WRCLI6__URG_LOW__SHIFT 0x8
+#define DAGB4_WRCLI6__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_WRCLI6__MAX_BW__SHIFT 0xd
+#define DAGB4_WRCLI6__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_WRCLI6__MIN_BW__SHIFT 0x16
+#define DAGB4_WRCLI6__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_WRCLI6__MAX_OSD__SHIFT 0x1a
+#define DAGB4_WRCLI6__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_WRCLI6__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_WRCLI6__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_WRCLI6__URG_LOW_MASK 0x00000F00L
+#define DAGB4_WRCLI6__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_WRCLI6__MAX_BW_MASK 0x001FE000L
+#define DAGB4_WRCLI6__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_WRCLI6__MIN_BW_MASK 0x01C00000L
+#define DAGB4_WRCLI6__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_WRCLI6__MAX_OSD_MASK 0xFC000000L
+//DAGB4_WRCLI7
+#define DAGB4_WRCLI7__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_WRCLI7__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_WRCLI7__URG_HIGH__SHIFT 0x4
+#define DAGB4_WRCLI7__URG_LOW__SHIFT 0x8
+#define DAGB4_WRCLI7__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_WRCLI7__MAX_BW__SHIFT 0xd
+#define DAGB4_WRCLI7__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_WRCLI7__MIN_BW__SHIFT 0x16
+#define DAGB4_WRCLI7__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_WRCLI7__MAX_OSD__SHIFT 0x1a
+#define DAGB4_WRCLI7__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_WRCLI7__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_WRCLI7__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_WRCLI7__URG_LOW_MASK 0x00000F00L
+#define DAGB4_WRCLI7__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_WRCLI7__MAX_BW_MASK 0x001FE000L
+#define DAGB4_WRCLI7__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_WRCLI7__MIN_BW_MASK 0x01C00000L
+#define DAGB4_WRCLI7__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_WRCLI7__MAX_OSD_MASK 0xFC000000L
+//DAGB4_WRCLI8
+#define DAGB4_WRCLI8__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_WRCLI8__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_WRCLI8__URG_HIGH__SHIFT 0x4
+#define DAGB4_WRCLI8__URG_LOW__SHIFT 0x8
+#define DAGB4_WRCLI8__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_WRCLI8__MAX_BW__SHIFT 0xd
+#define DAGB4_WRCLI8__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_WRCLI8__MIN_BW__SHIFT 0x16
+#define DAGB4_WRCLI8__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_WRCLI8__MAX_OSD__SHIFT 0x1a
+#define DAGB4_WRCLI8__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_WRCLI8__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_WRCLI8__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_WRCLI8__URG_LOW_MASK 0x00000F00L
+#define DAGB4_WRCLI8__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_WRCLI8__MAX_BW_MASK 0x001FE000L
+#define DAGB4_WRCLI8__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_WRCLI8__MIN_BW_MASK 0x01C00000L
+#define DAGB4_WRCLI8__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_WRCLI8__MAX_OSD_MASK 0xFC000000L
+//DAGB4_WRCLI9
+#define DAGB4_WRCLI9__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_WRCLI9__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_WRCLI9__URG_HIGH__SHIFT 0x4
+#define DAGB4_WRCLI9__URG_LOW__SHIFT 0x8
+#define DAGB4_WRCLI9__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_WRCLI9__MAX_BW__SHIFT 0xd
+#define DAGB4_WRCLI9__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_WRCLI9__MIN_BW__SHIFT 0x16
+#define DAGB4_WRCLI9__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_WRCLI9__MAX_OSD__SHIFT 0x1a
+#define DAGB4_WRCLI9__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_WRCLI9__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_WRCLI9__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_WRCLI9__URG_LOW_MASK 0x00000F00L
+#define DAGB4_WRCLI9__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_WRCLI9__MAX_BW_MASK 0x001FE000L
+#define DAGB4_WRCLI9__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_WRCLI9__MIN_BW_MASK 0x01C00000L
+#define DAGB4_WRCLI9__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_WRCLI9__MAX_OSD_MASK 0xFC000000L
+//DAGB4_WRCLI10
+#define DAGB4_WRCLI10__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_WRCLI10__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_WRCLI10__URG_HIGH__SHIFT 0x4
+#define DAGB4_WRCLI10__URG_LOW__SHIFT 0x8
+#define DAGB4_WRCLI10__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_WRCLI10__MAX_BW__SHIFT 0xd
+#define DAGB4_WRCLI10__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_WRCLI10__MIN_BW__SHIFT 0x16
+#define DAGB4_WRCLI10__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_WRCLI10__MAX_OSD__SHIFT 0x1a
+#define DAGB4_WRCLI10__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_WRCLI10__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_WRCLI10__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_WRCLI10__URG_LOW_MASK 0x00000F00L
+#define DAGB4_WRCLI10__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_WRCLI10__MAX_BW_MASK 0x001FE000L
+#define DAGB4_WRCLI10__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_WRCLI10__MIN_BW_MASK 0x01C00000L
+#define DAGB4_WRCLI10__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_WRCLI10__MAX_OSD_MASK 0xFC000000L
+//DAGB4_WRCLI11
+#define DAGB4_WRCLI11__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_WRCLI11__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_WRCLI11__URG_HIGH__SHIFT 0x4
+#define DAGB4_WRCLI11__URG_LOW__SHIFT 0x8
+#define DAGB4_WRCLI11__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_WRCLI11__MAX_BW__SHIFT 0xd
+#define DAGB4_WRCLI11__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_WRCLI11__MIN_BW__SHIFT 0x16
+#define DAGB4_WRCLI11__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_WRCLI11__MAX_OSD__SHIFT 0x1a
+#define DAGB4_WRCLI11__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_WRCLI11__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_WRCLI11__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_WRCLI11__URG_LOW_MASK 0x00000F00L
+#define DAGB4_WRCLI11__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_WRCLI11__MAX_BW_MASK 0x001FE000L
+#define DAGB4_WRCLI11__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_WRCLI11__MIN_BW_MASK 0x01C00000L
+#define DAGB4_WRCLI11__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_WRCLI11__MAX_OSD_MASK 0xFC000000L
+//DAGB4_WRCLI12
+#define DAGB4_WRCLI12__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_WRCLI12__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_WRCLI12__URG_HIGH__SHIFT 0x4
+#define DAGB4_WRCLI12__URG_LOW__SHIFT 0x8
+#define DAGB4_WRCLI12__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_WRCLI12__MAX_BW__SHIFT 0xd
+#define DAGB4_WRCLI12__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_WRCLI12__MIN_BW__SHIFT 0x16
+#define DAGB4_WRCLI12__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_WRCLI12__MAX_OSD__SHIFT 0x1a
+#define DAGB4_WRCLI12__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_WRCLI12__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_WRCLI12__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_WRCLI12__URG_LOW_MASK 0x00000F00L
+#define DAGB4_WRCLI12__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_WRCLI12__MAX_BW_MASK 0x001FE000L
+#define DAGB4_WRCLI12__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_WRCLI12__MIN_BW_MASK 0x01C00000L
+#define DAGB4_WRCLI12__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_WRCLI12__MAX_OSD_MASK 0xFC000000L
+//DAGB4_WRCLI13
+#define DAGB4_WRCLI13__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_WRCLI13__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_WRCLI13__URG_HIGH__SHIFT 0x4
+#define DAGB4_WRCLI13__URG_LOW__SHIFT 0x8
+#define DAGB4_WRCLI13__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_WRCLI13__MAX_BW__SHIFT 0xd
+#define DAGB4_WRCLI13__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_WRCLI13__MIN_BW__SHIFT 0x16
+#define DAGB4_WRCLI13__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_WRCLI13__MAX_OSD__SHIFT 0x1a
+#define DAGB4_WRCLI13__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_WRCLI13__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_WRCLI13__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_WRCLI13__URG_LOW_MASK 0x00000F00L
+#define DAGB4_WRCLI13__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_WRCLI13__MAX_BW_MASK 0x001FE000L
+#define DAGB4_WRCLI13__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_WRCLI13__MIN_BW_MASK 0x01C00000L
+#define DAGB4_WRCLI13__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_WRCLI13__MAX_OSD_MASK 0xFC000000L
+//DAGB4_WRCLI14
+#define DAGB4_WRCLI14__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_WRCLI14__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_WRCLI14__URG_HIGH__SHIFT 0x4
+#define DAGB4_WRCLI14__URG_LOW__SHIFT 0x8
+#define DAGB4_WRCLI14__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_WRCLI14__MAX_BW__SHIFT 0xd
+#define DAGB4_WRCLI14__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_WRCLI14__MIN_BW__SHIFT 0x16
+#define DAGB4_WRCLI14__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_WRCLI14__MAX_OSD__SHIFT 0x1a
+#define DAGB4_WRCLI14__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_WRCLI14__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_WRCLI14__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_WRCLI14__URG_LOW_MASK 0x00000F00L
+#define DAGB4_WRCLI14__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_WRCLI14__MAX_BW_MASK 0x001FE000L
+#define DAGB4_WRCLI14__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_WRCLI14__MIN_BW_MASK 0x01C00000L
+#define DAGB4_WRCLI14__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_WRCLI14__MAX_OSD_MASK 0xFC000000L
+//DAGB4_WRCLI15
+#define DAGB4_WRCLI15__VIRT_CHAN__SHIFT 0x0
+#define DAGB4_WRCLI15__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB4_WRCLI15__URG_HIGH__SHIFT 0x4
+#define DAGB4_WRCLI15__URG_LOW__SHIFT 0x8
+#define DAGB4_WRCLI15__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB4_WRCLI15__MAX_BW__SHIFT 0xd
+#define DAGB4_WRCLI15__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB4_WRCLI15__MIN_BW__SHIFT 0x16
+#define DAGB4_WRCLI15__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB4_WRCLI15__MAX_OSD__SHIFT 0x1a
+#define DAGB4_WRCLI15__VIRT_CHAN_MASK 0x00000007L
+#define DAGB4_WRCLI15__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB4_WRCLI15__URG_HIGH_MASK 0x000000F0L
+#define DAGB4_WRCLI15__URG_LOW_MASK 0x00000F00L
+#define DAGB4_WRCLI15__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB4_WRCLI15__MAX_BW_MASK 0x001FE000L
+#define DAGB4_WRCLI15__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB4_WRCLI15__MIN_BW_MASK 0x01C00000L
+#define DAGB4_WRCLI15__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB4_WRCLI15__MAX_OSD_MASK 0xFC000000L
+//DAGB4_WR_CNTL
+#define DAGB4_WR_CNTL__SCLK_FREQ__SHIFT 0x0
+#define DAGB4_WR_CNTL__CLI_MAX_BW_WINDOW__SHIFT 0x4
+#define DAGB4_WR_CNTL__VC_MAX_BW_WINDOW__SHIFT 0xa
+#define DAGB4_WR_CNTL__IO_LEVEL_OVERRIDE_ENABLE__SHIFT 0x10
+#define DAGB4_WR_CNTL__IO_LEVEL__SHIFT 0x11
+#define DAGB4_WR_CNTL__IO_LEVEL_COMPLY_VC__SHIFT 0x14
+#define DAGB4_WR_CNTL__SHARE_VC_NUM__SHIFT 0x17
+#define DAGB4_WR_CNTL__SCLK_FREQ_MASK 0x0000000FL
+#define DAGB4_WR_CNTL__CLI_MAX_BW_WINDOW_MASK 0x000003F0L
+#define DAGB4_WR_CNTL__VC_MAX_BW_WINDOW_MASK 0x0000FC00L
+#define DAGB4_WR_CNTL__IO_LEVEL_OVERRIDE_ENABLE_MASK 0x00010000L
+#define DAGB4_WR_CNTL__IO_LEVEL_MASK 0x000E0000L
+#define DAGB4_WR_CNTL__IO_LEVEL_COMPLY_VC_MASK 0x00700000L
+#define DAGB4_WR_CNTL__SHARE_VC_NUM_MASK 0x03800000L
+//DAGB4_WR_GMI_CNTL
+#define DAGB4_WR_GMI_CNTL__EA_CREDIT__SHIFT 0x0
+#define DAGB4_WR_GMI_CNTL__LEVEL__SHIFT 0x6
+#define DAGB4_WR_GMI_CNTL__MAX_BURST__SHIFT 0x9
+#define DAGB4_WR_GMI_CNTL__LAZY_TIMER__SHIFT 0xd
+#define DAGB4_WR_GMI_CNTL__EA_CREDIT_MASK 0x0000003FL
+#define DAGB4_WR_GMI_CNTL__LEVEL_MASK 0x000001C0L
+#define DAGB4_WR_GMI_CNTL__MAX_BURST_MASK 0x00001E00L
+#define DAGB4_WR_GMI_CNTL__LAZY_TIMER_MASK 0x0001E000L
+//DAGB4_WR_ADDR_DAGB
+#define DAGB4_WR_ADDR_DAGB__DAGB_ENABLE__SHIFT 0x0
+#define DAGB4_WR_ADDR_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3
+#define DAGB4_WR_ADDR_DAGB__DISABLE_SELF_INIT__SHIFT 0x6
+#define DAGB4_WR_ADDR_DAGB__WHOAMI__SHIFT 0x7
+#define DAGB4_WR_ADDR_DAGB__DAGB_ENABLE_MASK 0x00000007L
+#define DAGB4_WR_ADDR_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L
+#define DAGB4_WR_ADDR_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L
+#define DAGB4_WR_ADDR_DAGB__WHOAMI_MASK 0x00001F80L
+//DAGB4_WR_OUTPUT_DAGB_MAX_BURST
+#define DAGB4_WR_OUTPUT_DAGB_MAX_BURST__VC0__SHIFT 0x0
+#define DAGB4_WR_OUTPUT_DAGB_MAX_BURST__VC1__SHIFT 0x4
+#define DAGB4_WR_OUTPUT_DAGB_MAX_BURST__VC2__SHIFT 0x8
+#define DAGB4_WR_OUTPUT_DAGB_MAX_BURST__VC3__SHIFT 0xc
+#define DAGB4_WR_OUTPUT_DAGB_MAX_BURST__VC4__SHIFT 0x10
+#define DAGB4_WR_OUTPUT_DAGB_MAX_BURST__VC5__SHIFT 0x14
+#define DAGB4_WR_OUTPUT_DAGB_MAX_BURST__VC6__SHIFT 0x18
+#define DAGB4_WR_OUTPUT_DAGB_MAX_BURST__VC7__SHIFT 0x1c
+#define DAGB4_WR_OUTPUT_DAGB_MAX_BURST__VC0_MASK 0x0000000FL
+#define DAGB4_WR_OUTPUT_DAGB_MAX_BURST__VC1_MASK 0x000000F0L
+#define DAGB4_WR_OUTPUT_DAGB_MAX_BURST__VC2_MASK 0x00000F00L
+#define DAGB4_WR_OUTPUT_DAGB_MAX_BURST__VC3_MASK 0x0000F000L
+#define DAGB4_WR_OUTPUT_DAGB_MAX_BURST__VC4_MASK 0x000F0000L
+#define DAGB4_WR_OUTPUT_DAGB_MAX_BURST__VC5_MASK 0x00F00000L
+#define DAGB4_WR_OUTPUT_DAGB_MAX_BURST__VC6_MASK 0x0F000000L
+#define DAGB4_WR_OUTPUT_DAGB_MAX_BURST__VC7_MASK 0xF0000000L
+//DAGB4_WR_OUTPUT_DAGB_LAZY_TIMER
+#define DAGB4_WR_OUTPUT_DAGB_LAZY_TIMER__VC0__SHIFT 0x0
+#define DAGB4_WR_OUTPUT_DAGB_LAZY_TIMER__VC1__SHIFT 0x4
+#define DAGB4_WR_OUTPUT_DAGB_LAZY_TIMER__VC2__SHIFT 0x8
+#define DAGB4_WR_OUTPUT_DAGB_LAZY_TIMER__VC3__SHIFT 0xc
+#define DAGB4_WR_OUTPUT_DAGB_LAZY_TIMER__VC4__SHIFT 0x10
+#define DAGB4_WR_OUTPUT_DAGB_LAZY_TIMER__VC5__SHIFT 0x14
+#define DAGB4_WR_OUTPUT_DAGB_LAZY_TIMER__VC6__SHIFT 0x18
+#define DAGB4_WR_OUTPUT_DAGB_LAZY_TIMER__VC7__SHIFT 0x1c
+#define DAGB4_WR_OUTPUT_DAGB_LAZY_TIMER__VC0_MASK 0x0000000FL
+#define DAGB4_WR_OUTPUT_DAGB_LAZY_TIMER__VC1_MASK 0x000000F0L
+#define DAGB4_WR_OUTPUT_DAGB_LAZY_TIMER__VC2_MASK 0x00000F00L
+#define DAGB4_WR_OUTPUT_DAGB_LAZY_TIMER__VC3_MASK 0x0000F000L
+#define DAGB4_WR_OUTPUT_DAGB_LAZY_TIMER__VC4_MASK 0x000F0000L
+#define DAGB4_WR_OUTPUT_DAGB_LAZY_TIMER__VC5_MASK 0x00F00000L
+#define DAGB4_WR_OUTPUT_DAGB_LAZY_TIMER__VC6_MASK 0x0F000000L
+#define DAGB4_WR_OUTPUT_DAGB_LAZY_TIMER__VC7_MASK 0xF0000000L
+//DAGB4_WR_CGTT_CLK_CTRL
+#define DAGB4_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB4_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB4_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
+#define DAGB4_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define DAGB4_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
+#define DAGB4_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
+#define DAGB4_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
+#define DAGB4_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
+#define DAGB4_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB4_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB4_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
+#define DAGB4_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define DAGB4_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
+#define DAGB4_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
+#define DAGB4_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
+#define DAGB4_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
+//DAGB4_L1TLB_WR_CGTT_CLK_CTRL
+#define DAGB4_L1TLB_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB4_L1TLB_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB4_L1TLB_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
+#define DAGB4_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define DAGB4_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
+#define DAGB4_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
+#define DAGB4_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
+#define DAGB4_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
+#define DAGB4_L1TLB_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB4_L1TLB_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB4_L1TLB_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
+#define DAGB4_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define DAGB4_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
+#define DAGB4_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
+#define DAGB4_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
+#define DAGB4_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
+//DAGB4_ATCVM_WR_CGTT_CLK_CTRL
+#define DAGB4_ATCVM_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB4_ATCVM_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB4_ATCVM_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
+#define DAGB4_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define DAGB4_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
+#define DAGB4_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
+#define DAGB4_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
+#define DAGB4_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
+#define DAGB4_ATCVM_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB4_ATCVM_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB4_ATCVM_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
+#define DAGB4_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define DAGB4_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
+#define DAGB4_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
+#define DAGB4_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
+#define DAGB4_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
+//DAGB4_WR_ADDR_DAGB_MAX_BURST0
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L
+//DAGB4_WR_ADDR_DAGB_LAZY_TIMER0
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L
+//DAGB4_WR_ADDR_DAGB_MAX_BURST1
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L
+#define DAGB4_WR_ADDR_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L
+//DAGB4_WR_ADDR_DAGB_LAZY_TIMER1
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L
+#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L
+//DAGB4_WR_DATA_DAGB
+#define DAGB4_WR_DATA_DAGB__DAGB_ENABLE__SHIFT 0x0
+#define DAGB4_WR_DATA_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3
+#define DAGB4_WR_DATA_DAGB__DISABLE_SELF_INIT__SHIFT 0x6
+#define DAGB4_WR_DATA_DAGB__WHOAMI__SHIFT 0x7
+#define DAGB4_WR_DATA_DAGB__DAGB_ENABLE_MASK 0x00000007L
+#define DAGB4_WR_DATA_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L
+#define DAGB4_WR_DATA_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L
+#define DAGB4_WR_DATA_DAGB__WHOAMI_MASK 0x00001F80L
+//DAGB4_WR_DATA_DAGB_MAX_BURST0
+#define DAGB4_WR_DATA_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0
+#define DAGB4_WR_DATA_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4
+#define DAGB4_WR_DATA_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8
+#define DAGB4_WR_DATA_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc
+#define DAGB4_WR_DATA_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10
+#define DAGB4_WR_DATA_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14
+#define DAGB4_WR_DATA_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18
+#define DAGB4_WR_DATA_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c
+#define DAGB4_WR_DATA_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL
+#define DAGB4_WR_DATA_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L
+#define DAGB4_WR_DATA_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L
+#define DAGB4_WR_DATA_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L
+#define DAGB4_WR_DATA_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L
+#define DAGB4_WR_DATA_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L
+#define DAGB4_WR_DATA_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L
+#define DAGB4_WR_DATA_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L
+//DAGB4_WR_DATA_DAGB_LAZY_TIMER0
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L
+//DAGB4_WR_DATA_DAGB_MAX_BURST1
+#define DAGB4_WR_DATA_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0
+#define DAGB4_WR_DATA_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4
+#define DAGB4_WR_DATA_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8
+#define DAGB4_WR_DATA_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc
+#define DAGB4_WR_DATA_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10
+#define DAGB4_WR_DATA_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14
+#define DAGB4_WR_DATA_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18
+#define DAGB4_WR_DATA_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c
+#define DAGB4_WR_DATA_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL
+#define DAGB4_WR_DATA_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L
+#define DAGB4_WR_DATA_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L
+#define DAGB4_WR_DATA_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L
+#define DAGB4_WR_DATA_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L
+#define DAGB4_WR_DATA_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L
+#define DAGB4_WR_DATA_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L
+#define DAGB4_WR_DATA_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L
+//DAGB4_WR_DATA_DAGB_LAZY_TIMER1
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L
+#define DAGB4_WR_DATA_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L
+//DAGB4_WR_VC0_CNTL
+#define DAGB4_WR_VC0_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB4_WR_VC0_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB4_WR_VC0_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB4_WR_VC0_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB4_WR_VC0_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB4_WR_VC0_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB4_WR_VC0_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB4_WR_VC0_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB4_WR_VC0_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB4_WR_VC0_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB4_WR_VC0_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB4_WR_VC0_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB4_WR_VC0_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB4_WR_VC0_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB4_WR_VC0_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB4_WR_VC0_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB4_WR_VC1_CNTL
+#define DAGB4_WR_VC1_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB4_WR_VC1_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB4_WR_VC1_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB4_WR_VC1_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB4_WR_VC1_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB4_WR_VC1_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB4_WR_VC1_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB4_WR_VC1_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB4_WR_VC1_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB4_WR_VC1_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB4_WR_VC1_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB4_WR_VC1_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB4_WR_VC1_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB4_WR_VC1_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB4_WR_VC1_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB4_WR_VC1_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB4_WR_VC2_CNTL
+#define DAGB4_WR_VC2_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB4_WR_VC2_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB4_WR_VC2_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB4_WR_VC2_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB4_WR_VC2_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB4_WR_VC2_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB4_WR_VC2_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB4_WR_VC2_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB4_WR_VC2_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB4_WR_VC2_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB4_WR_VC2_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB4_WR_VC2_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB4_WR_VC2_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB4_WR_VC2_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB4_WR_VC2_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB4_WR_VC2_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB4_WR_VC3_CNTL
+#define DAGB4_WR_VC3_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB4_WR_VC3_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB4_WR_VC3_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB4_WR_VC3_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB4_WR_VC3_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB4_WR_VC3_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB4_WR_VC3_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB4_WR_VC3_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB4_WR_VC3_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB4_WR_VC3_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB4_WR_VC3_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB4_WR_VC3_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB4_WR_VC3_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB4_WR_VC3_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB4_WR_VC3_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB4_WR_VC3_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB4_WR_VC4_CNTL
+#define DAGB4_WR_VC4_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB4_WR_VC4_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB4_WR_VC4_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB4_WR_VC4_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB4_WR_VC4_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB4_WR_VC4_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB4_WR_VC4_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB4_WR_VC4_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB4_WR_VC4_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB4_WR_VC4_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB4_WR_VC4_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB4_WR_VC4_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB4_WR_VC4_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB4_WR_VC4_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB4_WR_VC4_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB4_WR_VC4_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB4_WR_VC5_CNTL
+#define DAGB4_WR_VC5_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB4_WR_VC5_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB4_WR_VC5_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB4_WR_VC5_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB4_WR_VC5_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB4_WR_VC5_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB4_WR_VC5_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB4_WR_VC5_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB4_WR_VC5_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB4_WR_VC5_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB4_WR_VC5_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB4_WR_VC5_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB4_WR_VC5_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB4_WR_VC5_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB4_WR_VC5_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB4_WR_VC5_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB4_WR_VC6_CNTL
+#define DAGB4_WR_VC6_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB4_WR_VC6_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB4_WR_VC6_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB4_WR_VC6_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB4_WR_VC6_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB4_WR_VC6_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB4_WR_VC6_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB4_WR_VC6_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB4_WR_VC6_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB4_WR_VC6_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB4_WR_VC6_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB4_WR_VC6_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB4_WR_VC6_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB4_WR_VC6_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB4_WR_VC6_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB4_WR_VC6_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB4_WR_VC7_CNTL
+#define DAGB4_WR_VC7_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB4_WR_VC7_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB4_WR_VC7_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB4_WR_VC7_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB4_WR_VC7_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB4_WR_VC7_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB4_WR_VC7_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB4_WR_VC7_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB4_WR_VC7_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB4_WR_VC7_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB4_WR_VC7_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB4_WR_VC7_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB4_WR_VC7_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB4_WR_VC7_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB4_WR_VC7_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB4_WR_VC7_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB4_WR_CNTL_MISC
+#define DAGB4_WR_CNTL_MISC__STOR_POOL_CREDIT__SHIFT 0x0
+#define DAGB4_WR_CNTL_MISC__EA_POOL_CREDIT__SHIFT 0x6
+#define DAGB4_WR_CNTL_MISC__IO_EA_CREDIT__SHIFT 0xd
+#define DAGB4_WR_CNTL_MISC__STOR_CC_LEGACY_MODE__SHIFT 0x13
+#define DAGB4_WR_CNTL_MISC__EA_CC_LEGACY_MODE__SHIFT 0x14
+#define DAGB4_WR_CNTL_MISC__UTCL2_CID__SHIFT 0x15
+#define DAGB4_WR_CNTL_MISC__RDRET_FIFO_CREDITS__SHIFT 0x1a
+#define DAGB4_WR_CNTL_MISC__STOR_POOL_CREDIT_MASK 0x0000003FL
+#define DAGB4_WR_CNTL_MISC__EA_POOL_CREDIT_MASK 0x00001FC0L
+#define DAGB4_WR_CNTL_MISC__IO_EA_CREDIT_MASK 0x0007E000L
+#define DAGB4_WR_CNTL_MISC__STOR_CC_LEGACY_MODE_MASK 0x00080000L
+#define DAGB4_WR_CNTL_MISC__EA_CC_LEGACY_MODE_MASK 0x00100000L
+#define DAGB4_WR_CNTL_MISC__UTCL2_CID_MASK 0x03E00000L
+#define DAGB4_WR_CNTL_MISC__RDRET_FIFO_CREDITS_MASK 0xFC000000L
+//DAGB4_WR_TLB_CREDIT
+#define DAGB4_WR_TLB_CREDIT__TLB0__SHIFT 0x0
+#define DAGB4_WR_TLB_CREDIT__TLB1__SHIFT 0x5
+#define DAGB4_WR_TLB_CREDIT__TLB2__SHIFT 0xa
+#define DAGB4_WR_TLB_CREDIT__TLB3__SHIFT 0xf
+#define DAGB4_WR_TLB_CREDIT__TLB4__SHIFT 0x14
+#define DAGB4_WR_TLB_CREDIT__TLB5__SHIFT 0x19
+#define DAGB4_WR_TLB_CREDIT__TLB0_MASK 0x0000001FL
+#define DAGB4_WR_TLB_CREDIT__TLB1_MASK 0x000003E0L
+#define DAGB4_WR_TLB_CREDIT__TLB2_MASK 0x00007C00L
+#define DAGB4_WR_TLB_CREDIT__TLB3_MASK 0x000F8000L
+#define DAGB4_WR_TLB_CREDIT__TLB4_MASK 0x01F00000L
+#define DAGB4_WR_TLB_CREDIT__TLB5_MASK 0x3E000000L
+//DAGB4_WR_DATA_CREDIT
+#define DAGB4_WR_DATA_CREDIT__DLOCK_VC_CREDITS__SHIFT 0x0
+#define DAGB4_WR_DATA_CREDIT__LARGE_BURST_CREDITS__SHIFT 0x8
+#define DAGB4_WR_DATA_CREDIT__MIDDLE_BURST_CREDITS__SHIFT 0x10
+#define DAGB4_WR_DATA_CREDIT__SMALL_BURST_CREDITS__SHIFT 0x18
+#define DAGB4_WR_DATA_CREDIT__DLOCK_VC_CREDITS_MASK 0x000000FFL
+#define DAGB4_WR_DATA_CREDIT__LARGE_BURST_CREDITS_MASK 0x0000FF00L
+#define DAGB4_WR_DATA_CREDIT__MIDDLE_BURST_CREDITS_MASK 0x00FF0000L
+#define DAGB4_WR_DATA_CREDIT__SMALL_BURST_CREDITS_MASK 0xFF000000L
+//DAGB4_WR_MISC_CREDIT
+#define DAGB4_WR_MISC_CREDIT__ATOMIC_CREDIT__SHIFT 0x0
+#define DAGB4_WR_MISC_CREDIT__DLOCK_VC_NUM__SHIFT 0x6
+#define DAGB4_WR_MISC_CREDIT__OSD_CREDIT__SHIFT 0x9
+#define DAGB4_WR_MISC_CREDIT__OSD_DLOCK_CREDIT__SHIFT 0x10
+#define DAGB4_WR_MISC_CREDIT__ATOMIC_CREDIT_MASK 0x0000003FL
+#define DAGB4_WR_MISC_CREDIT__DLOCK_VC_NUM_MASK 0x000001C0L
+#define DAGB4_WR_MISC_CREDIT__OSD_CREDIT_MASK 0x0000FE00L
+#define DAGB4_WR_MISC_CREDIT__OSD_DLOCK_CREDIT_MASK 0x007F0000L
+//DAGB4_WRCLI_ASK_PENDING
+#define DAGB4_WRCLI_ASK_PENDING__BUSY__SHIFT 0x0
+#define DAGB4_WRCLI_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB4_WRCLI_GO_PENDING
+#define DAGB4_WRCLI_GO_PENDING__BUSY__SHIFT 0x0
+#define DAGB4_WRCLI_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB4_WRCLI_GBLSEND_PENDING
+#define DAGB4_WRCLI_GBLSEND_PENDING__BUSY__SHIFT 0x0
+#define DAGB4_WRCLI_GBLSEND_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB4_WRCLI_TLB_PENDING
+#define DAGB4_WRCLI_TLB_PENDING__BUSY__SHIFT 0x0
+#define DAGB4_WRCLI_TLB_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB4_WRCLI_OARB_PENDING
+#define DAGB4_WRCLI_OARB_PENDING__BUSY__SHIFT 0x0
+#define DAGB4_WRCLI_OARB_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB4_WRCLI_OSD_PENDING
+#define DAGB4_WRCLI_OSD_PENDING__BUSY__SHIFT 0x0
+#define DAGB4_WRCLI_OSD_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB4_WRCLI_DBUS_ASK_PENDING
+#define DAGB4_WRCLI_DBUS_ASK_PENDING__BUSY__SHIFT 0x0
+#define DAGB4_WRCLI_DBUS_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB4_WRCLI_DBUS_GO_PENDING
+#define DAGB4_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0
+#define DAGB4_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB4_DAGB_DLY
+#define DAGB4_DAGB_DLY__DLY__SHIFT 0x0
+#define DAGB4_DAGB_DLY__CLI__SHIFT 0x8
+#define DAGB4_DAGB_DLY__POS__SHIFT 0x10
+#define DAGB4_DAGB_DLY__DLY_MASK 0x000000FFL
+#define DAGB4_DAGB_DLY__CLI_MASK 0x0000FF00L
+#define DAGB4_DAGB_DLY__POS_MASK 0x000F0000L
+//DAGB4_CNTL_MISC
+#define DAGB4_CNTL_MISC__EA_VC0_REMAP__SHIFT 0x0
+#define DAGB4_CNTL_MISC__EA_VC1_REMAP__SHIFT 0x3
+#define DAGB4_CNTL_MISC__EA_VC2_REMAP__SHIFT 0x6
+#define DAGB4_CNTL_MISC__EA_VC3_REMAP__SHIFT 0x9
+#define DAGB4_CNTL_MISC__EA_VC4_REMAP__SHIFT 0xc
+#define DAGB4_CNTL_MISC__EA_VC5_REMAP__SHIFT 0xf
+#define DAGB4_CNTL_MISC__EA_VC6_REMAP__SHIFT 0x12
+#define DAGB4_CNTL_MISC__EA_VC7_REMAP__SHIFT 0x15
+#define DAGB4_CNTL_MISC__BW_INIT_CYCLE__SHIFT 0x18
+#define DAGB4_CNTL_MISC__BW_RW_GAP_CYCLE__SHIFT 0x1e
+#define DAGB4_CNTL_MISC__EA_VC0_REMAP_MASK 0x00000007L
+#define DAGB4_CNTL_MISC__EA_VC1_REMAP_MASK 0x00000038L
+#define DAGB4_CNTL_MISC__EA_VC2_REMAP_MASK 0x000001C0L
+#define DAGB4_CNTL_MISC__EA_VC3_REMAP_MASK 0x00000E00L
+#define DAGB4_CNTL_MISC__EA_VC4_REMAP_MASK 0x00007000L
+#define DAGB4_CNTL_MISC__EA_VC5_REMAP_MASK 0x00038000L
+#define DAGB4_CNTL_MISC__EA_VC6_REMAP_MASK 0x001C0000L
+#define DAGB4_CNTL_MISC__EA_VC7_REMAP_MASK 0x00E00000L
+#define DAGB4_CNTL_MISC__BW_INIT_CYCLE_MASK 0x3F000000L
+#define DAGB4_CNTL_MISC__BW_RW_GAP_CYCLE_MASK 0xC0000000L
+//DAGB4_CNTL_MISC2
+#define DAGB4_CNTL_MISC2__URG_BOOST_ENABLE__SHIFT 0x0
+#define DAGB4_CNTL_MISC2__URG_HALT_ENABLE__SHIFT 0x1
+#define DAGB4_CNTL_MISC2__DISABLE_WRREQ_CG__SHIFT 0x2
+#define DAGB4_CNTL_MISC2__DISABLE_WRRET_CG__SHIFT 0x3
+#define DAGB4_CNTL_MISC2__DISABLE_RDREQ_CG__SHIFT 0x4
+#define DAGB4_CNTL_MISC2__DISABLE_RDRET_CG__SHIFT 0x5
+#define DAGB4_CNTL_MISC2__DISABLE_TLBWR_CG__SHIFT 0x6
+#define DAGB4_CNTL_MISC2__DISABLE_TLBRD_CG__SHIFT 0x7
+#define DAGB4_CNTL_MISC2__DISABLE_EAWRREQ_BUSY__SHIFT 0x8
+#define DAGB4_CNTL_MISC2__DISABLE_EARDREQ_BUSY__SHIFT 0x9
+#define DAGB4_CNTL_MISC2__SWAP_CTL__SHIFT 0xa
+#define DAGB4_CNTL_MISC2__RDRET_FIFO_PERF__SHIFT 0xb
+#define DAGB4_CNTL_MISC2__RDRET_FIFO_DLOCK_CREDITS__SHIFT 0x11
+#define DAGB4_CNTL_MISC2__URG_BOOST_ENABLE_MASK 0x00000001L
+#define DAGB4_CNTL_MISC2__URG_HALT_ENABLE_MASK 0x00000002L
+#define DAGB4_CNTL_MISC2__DISABLE_WRREQ_CG_MASK 0x00000004L
+#define DAGB4_CNTL_MISC2__DISABLE_WRRET_CG_MASK 0x00000008L
+#define DAGB4_CNTL_MISC2__DISABLE_RDREQ_CG_MASK 0x00000010L
+#define DAGB4_CNTL_MISC2__DISABLE_RDRET_CG_MASK 0x00000020L
+#define DAGB4_CNTL_MISC2__DISABLE_TLBWR_CG_MASK 0x00000040L
+#define DAGB4_CNTL_MISC2__DISABLE_TLBRD_CG_MASK 0x00000080L
+#define DAGB4_CNTL_MISC2__DISABLE_EAWRREQ_BUSY_MASK 0x00000100L
+#define DAGB4_CNTL_MISC2__DISABLE_EARDREQ_BUSY_MASK 0x00000200L
+#define DAGB4_CNTL_MISC2__SWAP_CTL_MASK 0x00000400L
+#define DAGB4_CNTL_MISC2__RDRET_FIFO_PERF_MASK 0x00000800L
+#define DAGB4_CNTL_MISC2__RDRET_FIFO_DLOCK_CREDITS_MASK 0x007E0000L
+//DAGB4_FIFO_EMPTY
+#define DAGB4_FIFO_EMPTY__EMPTY__SHIFT 0x0
+#define DAGB4_FIFO_EMPTY__EMPTY_MASK 0x00FFFFFFL
+//DAGB4_FIFO_FULL
+#define DAGB4_FIFO_FULL__FULL__SHIFT 0x0
+#define DAGB4_FIFO_FULL__FULL_MASK 0x007FFFFFL
+//DAGB4_WR_CREDITS_FULL
+#define DAGB4_WR_CREDITS_FULL__FULL__SHIFT 0x0
+#define DAGB4_WR_CREDITS_FULL__FULL_MASK 0x1FFFFFFFL
+//DAGB4_RD_CREDITS_FULL
+#define DAGB4_RD_CREDITS_FULL__FULL__SHIFT 0x0
+#define DAGB4_RD_CREDITS_FULL__FULL_MASK 0x0003FFFFL
+//DAGB4_PERFCOUNTER_LO
+#define DAGB4_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define DAGB4_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//DAGB4_PERFCOUNTER_HI
+#define DAGB4_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define DAGB4_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define DAGB4_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define DAGB4_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+//DAGB4_PERFCOUNTER0_CFG
+#define DAGB4_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define DAGB4_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define DAGB4_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define DAGB4_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define DAGB4_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define DAGB4_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define DAGB4_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define DAGB4_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define DAGB4_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define DAGB4_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//DAGB4_PERFCOUNTER1_CFG
+#define DAGB4_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define DAGB4_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define DAGB4_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define DAGB4_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define DAGB4_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define DAGB4_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define DAGB4_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define DAGB4_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define DAGB4_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define DAGB4_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//DAGB4_PERFCOUNTER2_CFG
+#define DAGB4_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0
+#define DAGB4_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8
+#define DAGB4_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18
+#define DAGB4_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c
+#define DAGB4_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d
+#define DAGB4_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL
+#define DAGB4_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define DAGB4_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L
+#define DAGB4_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L
+#define DAGB4_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L
+//DAGB4_PERFCOUNTER_RSLT_CNTL
+#define DAGB4_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define DAGB4_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define DAGB4_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define DAGB4_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define DAGB4_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define DAGB4_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define DAGB4_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define DAGB4_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define DAGB4_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define DAGB4_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define DAGB4_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define DAGB4_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+//DAGB4_RESERVE0
+#define DAGB4_RESERVE0__RESERVE__SHIFT 0x0
+#define DAGB4_RESERVE0__RESERVE_MASK 0xFFFFFFFFL
+//DAGB4_RESERVE1
+#define DAGB4_RESERVE1__RESERVE__SHIFT 0x0
+#define DAGB4_RESERVE1__RESERVE_MASK 0xFFFFFFFFL
+//DAGB4_RESERVE2
+#define DAGB4_RESERVE2__RESERVE__SHIFT 0x0
+#define DAGB4_RESERVE2__RESERVE_MASK 0xFFFFFFFFL
+//DAGB4_RESERVE3
+#define DAGB4_RESERVE3__RESERVE__SHIFT 0x0
+#define DAGB4_RESERVE3__RESERVE_MASK 0xFFFFFFFFL
+//DAGB4_RESERVE4
+#define DAGB4_RESERVE4__RESERVE__SHIFT 0x0
+#define DAGB4_RESERVE4__RESERVE_MASK 0xFFFFFFFFL
+//DAGB4_RESERVE5
+#define DAGB4_RESERVE5__RESERVE__SHIFT 0x0
+#define DAGB4_RESERVE5__RESERVE_MASK 0xFFFFFFFFL
+//DAGB4_RESERVE6
+#define DAGB4_RESERVE6__RESERVE__SHIFT 0x0
+#define DAGB4_RESERVE6__RESERVE_MASK 0xFFFFFFFFL
+//DAGB4_RESERVE7
+#define DAGB4_RESERVE7__RESERVE__SHIFT 0x0
+#define DAGB4_RESERVE7__RESERVE_MASK 0xFFFFFFFFL
+//DAGB4_RESERVE8
+#define DAGB4_RESERVE8__RESERVE__SHIFT 0x0
+#define DAGB4_RESERVE8__RESERVE_MASK 0xFFFFFFFFL
+//DAGB4_RESERVE9
+#define DAGB4_RESERVE9__RESERVE__SHIFT 0x0
+#define DAGB4_RESERVE9__RESERVE_MASK 0xFFFFFFFFL
+//DAGB4_RESERVE10
+#define DAGB4_RESERVE10__RESERVE__SHIFT 0x0
+#define DAGB4_RESERVE10__RESERVE_MASK 0xFFFFFFFFL
+//DAGB4_RESERVE11
+#define DAGB4_RESERVE11__RESERVE__SHIFT 0x0
+#define DAGB4_RESERVE11__RESERVE_MASK 0xFFFFFFFFL
+//DAGB4_RESERVE12
+#define DAGB4_RESERVE12__RESERVE__SHIFT 0x0
+#define DAGB4_RESERVE12__RESERVE_MASK 0xFFFFFFFFL
+//DAGB4_RESERVE13
+#define DAGB4_RESERVE13__RESERVE__SHIFT 0x0
+#define DAGB4_RESERVE13__RESERVE_MASK 0xFFFFFFFFL
+
+
+// addressBlock: mmhub_ea_mmeadec0
+//MMEA0_DRAM_RD_CLI2GRP_MAP0
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA0_DRAM_RD_CLI2GRP_MAP1
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA0_DRAM_WR_CLI2GRP_MAP0
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA0_DRAM_WR_CLI2GRP_MAP1
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA0_DRAM_RD_GRP2VC_MAP
+#define MMEA0_DRAM_RD_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define MMEA0_DRAM_RD_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define MMEA0_DRAM_RD_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define MMEA0_DRAM_RD_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define MMEA0_DRAM_RD_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define MMEA0_DRAM_RD_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define MMEA0_DRAM_RD_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define MMEA0_DRAM_RD_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//MMEA0_DRAM_WR_GRP2VC_MAP
+#define MMEA0_DRAM_WR_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define MMEA0_DRAM_WR_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define MMEA0_DRAM_WR_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define MMEA0_DRAM_WR_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define MMEA0_DRAM_WR_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define MMEA0_DRAM_WR_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define MMEA0_DRAM_WR_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define MMEA0_DRAM_WR_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//MMEA0_DRAM_RD_LAZY
+#define MMEA0_DRAM_RD_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define MMEA0_DRAM_RD_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define MMEA0_DRAM_RD_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define MMEA0_DRAM_RD_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define MMEA0_DRAM_RD_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define MMEA0_DRAM_RD_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define MMEA0_DRAM_RD_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b
+#define MMEA0_DRAM_RD_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define MMEA0_DRAM_RD_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define MMEA0_DRAM_RD_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define MMEA0_DRAM_RD_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define MMEA0_DRAM_RD_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define MMEA0_DRAM_RD_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+#define MMEA0_DRAM_RD_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L
+//MMEA0_DRAM_WR_LAZY
+#define MMEA0_DRAM_WR_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define MMEA0_DRAM_WR_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define MMEA0_DRAM_WR_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define MMEA0_DRAM_WR_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define MMEA0_DRAM_WR_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define MMEA0_DRAM_WR_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define MMEA0_DRAM_WR_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b
+#define MMEA0_DRAM_WR_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define MMEA0_DRAM_WR_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define MMEA0_DRAM_WR_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define MMEA0_DRAM_WR_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define MMEA0_DRAM_WR_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define MMEA0_DRAM_WR_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+#define MMEA0_DRAM_WR_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L
+//MMEA0_DRAM_RD_CAM_CNTL
+#define MMEA0_DRAM_RD_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define MMEA0_DRAM_RD_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define MMEA0_DRAM_RD_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define MMEA0_DRAM_RD_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define MMEA0_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define MMEA0_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define MMEA0_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define MMEA0_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define MMEA0_DRAM_RD_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c
+#define MMEA0_DRAM_RD_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define MMEA0_DRAM_RD_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define MMEA0_DRAM_RD_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define MMEA0_DRAM_RD_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define MMEA0_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define MMEA0_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define MMEA0_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define MMEA0_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+#define MMEA0_DRAM_RD_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L
+//MMEA0_DRAM_WR_CAM_CNTL
+#define MMEA0_DRAM_WR_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define MMEA0_DRAM_WR_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define MMEA0_DRAM_WR_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define MMEA0_DRAM_WR_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define MMEA0_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define MMEA0_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define MMEA0_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define MMEA0_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define MMEA0_DRAM_WR_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c
+#define MMEA0_DRAM_WR_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define MMEA0_DRAM_WR_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define MMEA0_DRAM_WR_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define MMEA0_DRAM_WR_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define MMEA0_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define MMEA0_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define MMEA0_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define MMEA0_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+#define MMEA0_DRAM_WR_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L
+//MMEA0_DRAM_PAGE_BURST
+#define MMEA0_DRAM_PAGE_BURST__RD_LIMIT_LO__SHIFT 0x0
+#define MMEA0_DRAM_PAGE_BURST__RD_LIMIT_HI__SHIFT 0x8
+#define MMEA0_DRAM_PAGE_BURST__WR_LIMIT_LO__SHIFT 0x10
+#define MMEA0_DRAM_PAGE_BURST__WR_LIMIT_HI__SHIFT 0x18
+#define MMEA0_DRAM_PAGE_BURST__RD_LIMIT_LO_MASK 0x000000FFL
+#define MMEA0_DRAM_PAGE_BURST__RD_LIMIT_HI_MASK 0x0000FF00L
+#define MMEA0_DRAM_PAGE_BURST__WR_LIMIT_LO_MASK 0x00FF0000L
+#define MMEA0_DRAM_PAGE_BURST__WR_LIMIT_HI_MASK 0xFF000000L
+//MMEA0_DRAM_RD_PRI_AGE
+#define MMEA0_DRAM_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA0_DRAM_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA0_DRAM_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA0_DRAM_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA0_DRAM_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA0_DRAM_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA0_DRAM_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA0_DRAM_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA0_DRAM_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA0_DRAM_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA0_DRAM_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA0_DRAM_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA0_DRAM_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA0_DRAM_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA0_DRAM_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA0_DRAM_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA0_DRAM_WR_PRI_AGE
+#define MMEA0_DRAM_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA0_DRAM_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA0_DRAM_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA0_DRAM_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA0_DRAM_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA0_DRAM_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA0_DRAM_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA0_DRAM_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA0_DRAM_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA0_DRAM_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA0_DRAM_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA0_DRAM_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA0_DRAM_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA0_DRAM_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA0_DRAM_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA0_DRAM_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA0_DRAM_RD_PRI_QUEUING
+#define MMEA0_DRAM_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA0_DRAM_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA0_DRAM_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA0_DRAM_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA0_DRAM_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA0_DRAM_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA0_DRAM_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA0_DRAM_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA0_DRAM_WR_PRI_QUEUING
+#define MMEA0_DRAM_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA0_DRAM_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA0_DRAM_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA0_DRAM_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA0_DRAM_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA0_DRAM_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA0_DRAM_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA0_DRAM_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA0_DRAM_RD_PRI_FIXED
+#define MMEA0_DRAM_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA0_DRAM_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA0_DRAM_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA0_DRAM_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA0_DRAM_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA0_DRAM_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA0_DRAM_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA0_DRAM_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA0_DRAM_WR_PRI_FIXED
+#define MMEA0_DRAM_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA0_DRAM_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA0_DRAM_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA0_DRAM_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA0_DRAM_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA0_DRAM_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA0_DRAM_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA0_DRAM_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA0_DRAM_RD_PRI_URGENCY
+#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA0_DRAM_WR_PRI_URGENCY
+#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA0_DRAM_RD_PRI_QUANT_PRI1
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA0_DRAM_RD_PRI_QUANT_PRI2
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA0_DRAM_RD_PRI_QUANT_PRI3
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA0_DRAM_WR_PRI_QUANT_PRI1
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA0_DRAM_WR_PRI_QUANT_PRI2
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA0_DRAM_WR_PRI_QUANT_PRI3
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA0_GMI_RD_CLI2GRP_MAP0
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA0_GMI_RD_CLI2GRP_MAP1
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA0_GMI_WR_CLI2GRP_MAP0
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA0_GMI_WR_CLI2GRP_MAP1
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA0_GMI_RD_GRP2VC_MAP
+#define MMEA0_GMI_RD_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define MMEA0_GMI_RD_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define MMEA0_GMI_RD_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define MMEA0_GMI_RD_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define MMEA0_GMI_RD_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define MMEA0_GMI_RD_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define MMEA0_GMI_RD_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define MMEA0_GMI_RD_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//MMEA0_GMI_WR_GRP2VC_MAP
+#define MMEA0_GMI_WR_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define MMEA0_GMI_WR_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define MMEA0_GMI_WR_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define MMEA0_GMI_WR_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define MMEA0_GMI_WR_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define MMEA0_GMI_WR_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define MMEA0_GMI_WR_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define MMEA0_GMI_WR_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//MMEA0_GMI_RD_LAZY
+#define MMEA0_GMI_RD_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define MMEA0_GMI_RD_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define MMEA0_GMI_RD_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define MMEA0_GMI_RD_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define MMEA0_GMI_RD_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define MMEA0_GMI_RD_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define MMEA0_GMI_RD_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b
+#define MMEA0_GMI_RD_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define MMEA0_GMI_RD_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define MMEA0_GMI_RD_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define MMEA0_GMI_RD_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define MMEA0_GMI_RD_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define MMEA0_GMI_RD_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+#define MMEA0_GMI_RD_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L
+//MMEA0_GMI_WR_LAZY
+#define MMEA0_GMI_WR_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define MMEA0_GMI_WR_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define MMEA0_GMI_WR_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define MMEA0_GMI_WR_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define MMEA0_GMI_WR_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define MMEA0_GMI_WR_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define MMEA0_GMI_WR_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b
+#define MMEA0_GMI_WR_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define MMEA0_GMI_WR_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define MMEA0_GMI_WR_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define MMEA0_GMI_WR_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define MMEA0_GMI_WR_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define MMEA0_GMI_WR_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+#define MMEA0_GMI_WR_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L
+//MMEA0_GMI_RD_CAM_CNTL
+#define MMEA0_GMI_RD_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define MMEA0_GMI_RD_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define MMEA0_GMI_RD_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define MMEA0_GMI_RD_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define MMEA0_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define MMEA0_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define MMEA0_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define MMEA0_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define MMEA0_GMI_RD_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c
+#define MMEA0_GMI_RD_CAM_CNTL__PAGEBASED_CHAINING__SHIFT 0x1d
+#define MMEA0_GMI_RD_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define MMEA0_GMI_RD_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define MMEA0_GMI_RD_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define MMEA0_GMI_RD_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define MMEA0_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define MMEA0_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define MMEA0_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define MMEA0_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+#define MMEA0_GMI_RD_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L
+#define MMEA0_GMI_RD_CAM_CNTL__PAGEBASED_CHAINING_MASK 0x20000000L
+//MMEA0_GMI_WR_CAM_CNTL
+#define MMEA0_GMI_WR_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define MMEA0_GMI_WR_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define MMEA0_GMI_WR_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define MMEA0_GMI_WR_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define MMEA0_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define MMEA0_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define MMEA0_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define MMEA0_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define MMEA0_GMI_WR_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c
+#define MMEA0_GMI_WR_CAM_CNTL__PAGEBASED_CHAINING__SHIFT 0x1d
+#define MMEA0_GMI_WR_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define MMEA0_GMI_WR_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define MMEA0_GMI_WR_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define MMEA0_GMI_WR_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define MMEA0_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define MMEA0_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define MMEA0_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define MMEA0_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+#define MMEA0_GMI_WR_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L
+#define MMEA0_GMI_WR_CAM_CNTL__PAGEBASED_CHAINING_MASK 0x20000000L
+//MMEA0_GMI_PAGE_BURST
+#define MMEA0_GMI_PAGE_BURST__RD_LIMIT_LO__SHIFT 0x0
+#define MMEA0_GMI_PAGE_BURST__RD_LIMIT_HI__SHIFT 0x8
+#define MMEA0_GMI_PAGE_BURST__WR_LIMIT_LO__SHIFT 0x10
+#define MMEA0_GMI_PAGE_BURST__WR_LIMIT_HI__SHIFT 0x18
+#define MMEA0_GMI_PAGE_BURST__RD_LIMIT_LO_MASK 0x000000FFL
+#define MMEA0_GMI_PAGE_BURST__RD_LIMIT_HI_MASK 0x0000FF00L
+#define MMEA0_GMI_PAGE_BURST__WR_LIMIT_LO_MASK 0x00FF0000L
+#define MMEA0_GMI_PAGE_BURST__WR_LIMIT_HI_MASK 0xFF000000L
+//MMEA0_GMI_RD_PRI_AGE
+#define MMEA0_GMI_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA0_GMI_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA0_GMI_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA0_GMI_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA0_GMI_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA0_GMI_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA0_GMI_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA0_GMI_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA0_GMI_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA0_GMI_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA0_GMI_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA0_GMI_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA0_GMI_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA0_GMI_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA0_GMI_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA0_GMI_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA0_GMI_WR_PRI_AGE
+#define MMEA0_GMI_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA0_GMI_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA0_GMI_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA0_GMI_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA0_GMI_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA0_GMI_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA0_GMI_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA0_GMI_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA0_GMI_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA0_GMI_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA0_GMI_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA0_GMI_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA0_GMI_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA0_GMI_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA0_GMI_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA0_GMI_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA0_GMI_RD_PRI_QUEUING
+#define MMEA0_GMI_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA0_GMI_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA0_GMI_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA0_GMI_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA0_GMI_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA0_GMI_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA0_GMI_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA0_GMI_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA0_GMI_WR_PRI_QUEUING
+#define MMEA0_GMI_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA0_GMI_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA0_GMI_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA0_GMI_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA0_GMI_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA0_GMI_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA0_GMI_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA0_GMI_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA0_GMI_RD_PRI_FIXED
+#define MMEA0_GMI_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA0_GMI_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA0_GMI_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA0_GMI_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA0_GMI_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA0_GMI_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA0_GMI_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA0_GMI_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA0_GMI_WR_PRI_FIXED
+#define MMEA0_GMI_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA0_GMI_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA0_GMI_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA0_GMI_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA0_GMI_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA0_GMI_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA0_GMI_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA0_GMI_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA0_GMI_RD_PRI_URGENCY
+#define MMEA0_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA0_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA0_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA0_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA0_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA0_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA0_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA0_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA0_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA0_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA0_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA0_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA0_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA0_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA0_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA0_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA0_GMI_WR_PRI_URGENCY
+#define MMEA0_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA0_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA0_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA0_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA0_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA0_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA0_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA0_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA0_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA0_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA0_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA0_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA0_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA0_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA0_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA0_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA0_GMI_RD_PRI_URGENCY_MASKING
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L
+#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L
+//MMEA0_GMI_WR_PRI_URGENCY_MASKING
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L
+#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L
+//MMEA0_GMI_RD_PRI_QUANT_PRI1
+#define MMEA0_GMI_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA0_GMI_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA0_GMI_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA0_GMI_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA0_GMI_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA0_GMI_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA0_GMI_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA0_GMI_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA0_GMI_RD_PRI_QUANT_PRI2
+#define MMEA0_GMI_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA0_GMI_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA0_GMI_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA0_GMI_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA0_GMI_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA0_GMI_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA0_GMI_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA0_GMI_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA0_GMI_RD_PRI_QUANT_PRI3
+#define MMEA0_GMI_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA0_GMI_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA0_GMI_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA0_GMI_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA0_GMI_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA0_GMI_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA0_GMI_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA0_GMI_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA0_GMI_WR_PRI_QUANT_PRI1
+#define MMEA0_GMI_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA0_GMI_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA0_GMI_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA0_GMI_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA0_GMI_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA0_GMI_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA0_GMI_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA0_GMI_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA0_GMI_WR_PRI_QUANT_PRI2
+#define MMEA0_GMI_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA0_GMI_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA0_GMI_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA0_GMI_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA0_GMI_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA0_GMI_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA0_GMI_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA0_GMI_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA0_GMI_WR_PRI_QUANT_PRI3
+#define MMEA0_GMI_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA0_GMI_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA0_GMI_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA0_GMI_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA0_GMI_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA0_GMI_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA0_GMI_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA0_GMI_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA0_ADDRNORM_BASE_ADDR0
+#define MMEA0_ADDRNORM_BASE_ADDR0__ADDR_RNG_VAL__SHIFT 0x0
+#define MMEA0_ADDRNORM_BASE_ADDR0__LGCY_MMIO_HOLE_EN__SHIFT 0x1
+#define MMEA0_ADDRNORM_BASE_ADDR0__INTLV_NUM_CHAN__SHIFT 0x2
+#define MMEA0_ADDRNORM_BASE_ADDR0__INTLV_NUM_DIES__SHIFT 0x6
+#define MMEA0_ADDRNORM_BASE_ADDR0__INTLV_NUM_SOCKETS__SHIFT 0x8
+#define MMEA0_ADDRNORM_BASE_ADDR0__INTLV_ADDR_SEL__SHIFT 0x9
+#define MMEA0_ADDRNORM_BASE_ADDR0__BASE_ADDR__SHIFT 0xc
+#define MMEA0_ADDRNORM_BASE_ADDR0__ADDR_RNG_VAL_MASK 0x00000001L
+#define MMEA0_ADDRNORM_BASE_ADDR0__LGCY_MMIO_HOLE_EN_MASK 0x00000002L
+#define MMEA0_ADDRNORM_BASE_ADDR0__INTLV_NUM_CHAN_MASK 0x0000003CL
+#define MMEA0_ADDRNORM_BASE_ADDR0__INTLV_NUM_DIES_MASK 0x000000C0L
+#define MMEA0_ADDRNORM_BASE_ADDR0__INTLV_NUM_SOCKETS_MASK 0x00000100L
+#define MMEA0_ADDRNORM_BASE_ADDR0__INTLV_ADDR_SEL_MASK 0x00000E00L
+#define MMEA0_ADDRNORM_BASE_ADDR0__BASE_ADDR_MASK 0xFFFFF000L
+//MMEA0_ADDRNORM_LIMIT_ADDR0
+#define MMEA0_ADDRNORM_LIMIT_ADDR0__DST_FABRIC_ID__SHIFT 0x0
+#define MMEA0_ADDRNORM_LIMIT_ADDR0__LIMIT_ADDR__SHIFT 0xc
+#define MMEA0_ADDRNORM_LIMIT_ADDR0__DST_FABRIC_ID_MASK 0x0000001FL
+#define MMEA0_ADDRNORM_LIMIT_ADDR0__LIMIT_ADDR_MASK 0xFFFFF000L
+//MMEA0_ADDRNORM_BASE_ADDR1
+#define MMEA0_ADDRNORM_BASE_ADDR1__ADDR_RNG_VAL__SHIFT 0x0
+#define MMEA0_ADDRNORM_BASE_ADDR1__LGCY_MMIO_HOLE_EN__SHIFT 0x1
+#define MMEA0_ADDRNORM_BASE_ADDR1__INTLV_NUM_CHAN__SHIFT 0x2
+#define MMEA0_ADDRNORM_BASE_ADDR1__INTLV_NUM_DIES__SHIFT 0x6
+#define MMEA0_ADDRNORM_BASE_ADDR1__INTLV_NUM_SOCKETS__SHIFT 0x8
+#define MMEA0_ADDRNORM_BASE_ADDR1__INTLV_ADDR_SEL__SHIFT 0x9
+#define MMEA0_ADDRNORM_BASE_ADDR1__BASE_ADDR__SHIFT 0xc
+#define MMEA0_ADDRNORM_BASE_ADDR1__ADDR_RNG_VAL_MASK 0x00000001L
+#define MMEA0_ADDRNORM_BASE_ADDR1__LGCY_MMIO_HOLE_EN_MASK 0x00000002L
+#define MMEA0_ADDRNORM_BASE_ADDR1__INTLV_NUM_CHAN_MASK 0x0000003CL
+#define MMEA0_ADDRNORM_BASE_ADDR1__INTLV_NUM_DIES_MASK 0x000000C0L
+#define MMEA0_ADDRNORM_BASE_ADDR1__INTLV_NUM_SOCKETS_MASK 0x00000100L
+#define MMEA0_ADDRNORM_BASE_ADDR1__INTLV_ADDR_SEL_MASK 0x00000E00L
+#define MMEA0_ADDRNORM_BASE_ADDR1__BASE_ADDR_MASK 0xFFFFF000L
+//MMEA0_ADDRNORM_LIMIT_ADDR1
+#define MMEA0_ADDRNORM_LIMIT_ADDR1__DST_FABRIC_ID__SHIFT 0x0
+#define MMEA0_ADDRNORM_LIMIT_ADDR1__LIMIT_ADDR__SHIFT 0xc
+#define MMEA0_ADDRNORM_LIMIT_ADDR1__DST_FABRIC_ID_MASK 0x0000001FL
+#define MMEA0_ADDRNORM_LIMIT_ADDR1__LIMIT_ADDR_MASK 0xFFFFF000L
+//MMEA0_ADDRNORM_OFFSET_ADDR1
+#define MMEA0_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_EN__SHIFT 0x0
+#define MMEA0_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET__SHIFT 0x14
+#define MMEA0_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_EN_MASK 0x00000001L
+#define MMEA0_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_MASK 0xFFF00000L
+//MMEA0_ADDRNORM_BASE_ADDR2
+#define MMEA0_ADDRNORM_BASE_ADDR2__ADDR_RNG_VAL__SHIFT 0x0
+#define MMEA0_ADDRNORM_BASE_ADDR2__LGCY_MMIO_HOLE_EN__SHIFT 0x1
+#define MMEA0_ADDRNORM_BASE_ADDR2__INTLV_NUM_CHAN__SHIFT 0x2
+#define MMEA0_ADDRNORM_BASE_ADDR2__INTLV_NUM_DIES__SHIFT 0x6
+#define MMEA0_ADDRNORM_BASE_ADDR2__INTLV_NUM_SOCKETS__SHIFT 0x8
+#define MMEA0_ADDRNORM_BASE_ADDR2__INTLV_ADDR_SEL__SHIFT 0x9
+#define MMEA0_ADDRNORM_BASE_ADDR2__BASE_ADDR__SHIFT 0xc
+#define MMEA0_ADDRNORM_BASE_ADDR2__ADDR_RNG_VAL_MASK 0x00000001L
+#define MMEA0_ADDRNORM_BASE_ADDR2__LGCY_MMIO_HOLE_EN_MASK 0x00000002L
+#define MMEA0_ADDRNORM_BASE_ADDR2__INTLV_NUM_CHAN_MASK 0x0000003CL
+#define MMEA0_ADDRNORM_BASE_ADDR2__INTLV_NUM_DIES_MASK 0x000000C0L
+#define MMEA0_ADDRNORM_BASE_ADDR2__INTLV_NUM_SOCKETS_MASK 0x00000100L
+#define MMEA0_ADDRNORM_BASE_ADDR2__INTLV_ADDR_SEL_MASK 0x00000E00L
+#define MMEA0_ADDRNORM_BASE_ADDR2__BASE_ADDR_MASK 0xFFFFF000L
+//MMEA0_ADDRNORM_LIMIT_ADDR2
+#define MMEA0_ADDRNORM_LIMIT_ADDR2__DST_FABRIC_ID__SHIFT 0x0
+#define MMEA0_ADDRNORM_LIMIT_ADDR2__LIMIT_ADDR__SHIFT 0xc
+#define MMEA0_ADDRNORM_LIMIT_ADDR2__DST_FABRIC_ID_MASK 0x0000001FL
+#define MMEA0_ADDRNORM_LIMIT_ADDR2__LIMIT_ADDR_MASK 0xFFFFF000L
+//MMEA0_ADDRNORM_BASE_ADDR3
+#define MMEA0_ADDRNORM_BASE_ADDR3__ADDR_RNG_VAL__SHIFT 0x0
+#define MMEA0_ADDRNORM_BASE_ADDR3__LGCY_MMIO_HOLE_EN__SHIFT 0x1
+#define MMEA0_ADDRNORM_BASE_ADDR3__INTLV_NUM_CHAN__SHIFT 0x2
+#define MMEA0_ADDRNORM_BASE_ADDR3__INTLV_NUM_DIES__SHIFT 0x6
+#define MMEA0_ADDRNORM_BASE_ADDR3__INTLV_NUM_SOCKETS__SHIFT 0x8
+#define MMEA0_ADDRNORM_BASE_ADDR3__INTLV_ADDR_SEL__SHIFT 0x9
+#define MMEA0_ADDRNORM_BASE_ADDR3__BASE_ADDR__SHIFT 0xc
+#define MMEA0_ADDRNORM_BASE_ADDR3__ADDR_RNG_VAL_MASK 0x00000001L
+#define MMEA0_ADDRNORM_BASE_ADDR3__LGCY_MMIO_HOLE_EN_MASK 0x00000002L
+#define MMEA0_ADDRNORM_BASE_ADDR3__INTLV_NUM_CHAN_MASK 0x0000003CL
+#define MMEA0_ADDRNORM_BASE_ADDR3__INTLV_NUM_DIES_MASK 0x000000C0L
+#define MMEA0_ADDRNORM_BASE_ADDR3__INTLV_NUM_SOCKETS_MASK 0x00000100L
+#define MMEA0_ADDRNORM_BASE_ADDR3__INTLV_ADDR_SEL_MASK 0x00000E00L
+#define MMEA0_ADDRNORM_BASE_ADDR3__BASE_ADDR_MASK 0xFFFFF000L
+//MMEA0_ADDRNORM_LIMIT_ADDR3
+#define MMEA0_ADDRNORM_LIMIT_ADDR3__DST_FABRIC_ID__SHIFT 0x0
+#define MMEA0_ADDRNORM_LIMIT_ADDR3__LIMIT_ADDR__SHIFT 0xc
+#define MMEA0_ADDRNORM_LIMIT_ADDR3__DST_FABRIC_ID_MASK 0x0000001FL
+#define MMEA0_ADDRNORM_LIMIT_ADDR3__LIMIT_ADDR_MASK 0xFFFFF000L
+//MMEA0_ADDRNORM_OFFSET_ADDR3
+#define MMEA0_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET_EN__SHIFT 0x0
+#define MMEA0_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET__SHIFT 0x14
+#define MMEA0_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET_EN_MASK 0x00000001L
+#define MMEA0_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET_MASK 0xFFF00000L
+//MMEA0_ADDRNORM_BASE_ADDR4
+#define MMEA0_ADDRNORM_BASE_ADDR4__ADDR_RNG_VAL__SHIFT 0x0
+#define MMEA0_ADDRNORM_BASE_ADDR4__LGCY_MMIO_HOLE_EN__SHIFT 0x1
+#define MMEA0_ADDRNORM_BASE_ADDR4__INTLV_NUM_CHAN__SHIFT 0x2
+#define MMEA0_ADDRNORM_BASE_ADDR4__INTLV_NUM_DIES__SHIFT 0x6
+#define MMEA0_ADDRNORM_BASE_ADDR4__INTLV_NUM_SOCKETS__SHIFT 0x8
+#define MMEA0_ADDRNORM_BASE_ADDR4__INTLV_ADDR_SEL__SHIFT 0x9
+#define MMEA0_ADDRNORM_BASE_ADDR4__BASE_ADDR__SHIFT 0xc
+#define MMEA0_ADDRNORM_BASE_ADDR4__ADDR_RNG_VAL_MASK 0x00000001L
+#define MMEA0_ADDRNORM_BASE_ADDR4__LGCY_MMIO_HOLE_EN_MASK 0x00000002L
+#define MMEA0_ADDRNORM_BASE_ADDR4__INTLV_NUM_CHAN_MASK 0x0000003CL
+#define MMEA0_ADDRNORM_BASE_ADDR4__INTLV_NUM_DIES_MASK 0x000000C0L
+#define MMEA0_ADDRNORM_BASE_ADDR4__INTLV_NUM_SOCKETS_MASK 0x00000100L
+#define MMEA0_ADDRNORM_BASE_ADDR4__INTLV_ADDR_SEL_MASK 0x00000E00L
+#define MMEA0_ADDRNORM_BASE_ADDR4__BASE_ADDR_MASK 0xFFFFF000L
+//MMEA0_ADDRNORM_LIMIT_ADDR4
+#define MMEA0_ADDRNORM_LIMIT_ADDR4__DST_FABRIC_ID__SHIFT 0x0
+#define MMEA0_ADDRNORM_LIMIT_ADDR4__LIMIT_ADDR__SHIFT 0xc
+#define MMEA0_ADDRNORM_LIMIT_ADDR4__DST_FABRIC_ID_MASK 0x0000001FL
+#define MMEA0_ADDRNORM_LIMIT_ADDR4__LIMIT_ADDR_MASK 0xFFFFF000L
+//MMEA0_ADDRNORM_BASE_ADDR5
+#define MMEA0_ADDRNORM_BASE_ADDR5__ADDR_RNG_VAL__SHIFT 0x0
+#define MMEA0_ADDRNORM_BASE_ADDR5__LGCY_MMIO_HOLE_EN__SHIFT 0x1
+#define MMEA0_ADDRNORM_BASE_ADDR5__INTLV_NUM_CHAN__SHIFT 0x2
+#define MMEA0_ADDRNORM_BASE_ADDR5__INTLV_NUM_DIES__SHIFT 0x6
+#define MMEA0_ADDRNORM_BASE_ADDR5__INTLV_NUM_SOCKETS__SHIFT 0x8
+#define MMEA0_ADDRNORM_BASE_ADDR5__INTLV_ADDR_SEL__SHIFT 0x9
+#define MMEA0_ADDRNORM_BASE_ADDR5__BASE_ADDR__SHIFT 0xc
+#define MMEA0_ADDRNORM_BASE_ADDR5__ADDR_RNG_VAL_MASK 0x00000001L
+#define MMEA0_ADDRNORM_BASE_ADDR5__LGCY_MMIO_HOLE_EN_MASK 0x00000002L
+#define MMEA0_ADDRNORM_BASE_ADDR5__INTLV_NUM_CHAN_MASK 0x0000003CL
+#define MMEA0_ADDRNORM_BASE_ADDR5__INTLV_NUM_DIES_MASK 0x000000C0L
+#define MMEA0_ADDRNORM_BASE_ADDR5__INTLV_NUM_SOCKETS_MASK 0x00000100L
+#define MMEA0_ADDRNORM_BASE_ADDR5__INTLV_ADDR_SEL_MASK 0x00000E00L
+#define MMEA0_ADDRNORM_BASE_ADDR5__BASE_ADDR_MASK 0xFFFFF000L
+//MMEA0_ADDRNORM_LIMIT_ADDR5
+#define MMEA0_ADDRNORM_LIMIT_ADDR5__DST_FABRIC_ID__SHIFT 0x0
+#define MMEA0_ADDRNORM_LIMIT_ADDR5__LIMIT_ADDR__SHIFT 0xc
+#define MMEA0_ADDRNORM_LIMIT_ADDR5__DST_FABRIC_ID_MASK 0x0000001FL
+#define MMEA0_ADDRNORM_LIMIT_ADDR5__LIMIT_ADDR_MASK 0xFFFFF000L
+//MMEA0_ADDRNORM_OFFSET_ADDR5
+#define MMEA0_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET_EN__SHIFT 0x0
+#define MMEA0_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET__SHIFT 0x14
+#define MMEA0_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET_EN_MASK 0x00000001L
+#define MMEA0_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET_MASK 0xFFF00000L
+//MMEA0_ADDRNORMDRAM_HOLE_CNTL
+#define MMEA0_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_VALID__SHIFT 0x0
+#define MMEA0_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_OFFSET__SHIFT 0x7
+#define MMEA0_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_VALID_MASK 0x00000001L
+#define MMEA0_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_OFFSET_MASK 0x0000FF80L
+//MMEA0_ADDRNORMGMI_HOLE_CNTL
+#define MMEA0_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_VALID__SHIFT 0x0
+#define MMEA0_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_OFFSET__SHIFT 0x7
+#define MMEA0_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_VALID_MASK 0x00000001L
+#define MMEA0_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_OFFSET_MASK 0x0000FF80L
+//MMEA0_ADDRNORMDRAM_NP2_CHANNEL_CFG
+#define MMEA0_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE0__SHIFT 0x0
+#define MMEA0_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE1__SHIFT 0x6
+#define MMEA0_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE0_MASK 0x0000003FL
+#define MMEA0_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE1_MASK 0x00000FC0L
+//MMEA0_ADDRNORMGMI_NP2_CHANNEL_CFG
+#define MMEA0_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE2__SHIFT 0x0
+#define MMEA0_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE3__SHIFT 0x6
+#define MMEA0_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE2_MASK 0x0000003FL
+#define MMEA0_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE3_MASK 0x00000FC0L
+//MMEA0_ADDRDEC_BANK_CFG
+#define MMEA0_ADDRDEC_BANK_CFG__BANK_MASK_DRAM__SHIFT 0x0
+#define MMEA0_ADDRDEC_BANK_CFG__BANK_MASK_GMI__SHIFT 0x6
+#define MMEA0_ADDRDEC_BANK_CFG__BANKGROUP_SEL_DRAM__SHIFT 0xc
+#define MMEA0_ADDRDEC_BANK_CFG__BANKGROUP_SEL_GMI__SHIFT 0xf
+#define MMEA0_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_DRAM__SHIFT 0x12
+#define MMEA0_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_GMI__SHIFT 0x13
+#define MMEA0_ADDRDEC_BANK_CFG__BANK_MASK_DRAM_MASK 0x0000003FL
+#define MMEA0_ADDRDEC_BANK_CFG__BANK_MASK_GMI_MASK 0x00000FC0L
+#define MMEA0_ADDRDEC_BANK_CFG__BANKGROUP_SEL_DRAM_MASK 0x00007000L
+#define MMEA0_ADDRDEC_BANK_CFG__BANKGROUP_SEL_GMI_MASK 0x00038000L
+#define MMEA0_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_DRAM_MASK 0x00040000L
+#define MMEA0_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_GMI_MASK 0x00080000L
+//MMEA0_ADDRDEC_MISC_CFG
+#define MMEA0_ADDRDEC_MISC_CFG__VCM_EN0__SHIFT 0x0
+#define MMEA0_ADDRDEC_MISC_CFG__VCM_EN1__SHIFT 0x1
+#define MMEA0_ADDRDEC_MISC_CFG__VCM_EN2__SHIFT 0x2
+#define MMEA0_ADDRDEC_MISC_CFG__PCH_MASK_DRAM__SHIFT 0x8
+#define MMEA0_ADDRDEC_MISC_CFG__PCH_MASK_GMI__SHIFT 0x9
+#define MMEA0_ADDRDEC_MISC_CFG__CH_MASK_DRAM__SHIFT 0xc
+#define MMEA0_ADDRDEC_MISC_CFG__CH_MASK_GMI__SHIFT 0x11
+#define MMEA0_ADDRDEC_MISC_CFG__CS_MASK_DRAM__SHIFT 0x16
+#define MMEA0_ADDRDEC_MISC_CFG__CS_MASK_GMI__SHIFT 0x18
+#define MMEA0_ADDRDEC_MISC_CFG__RM_MASK_DRAM__SHIFT 0x1a
+#define MMEA0_ADDRDEC_MISC_CFG__RM_MASK_GMI__SHIFT 0x1d
+#define MMEA0_ADDRDEC_MISC_CFG__VCM_EN0_MASK 0x00000001L
+#define MMEA0_ADDRDEC_MISC_CFG__VCM_EN1_MASK 0x00000002L
+#define MMEA0_ADDRDEC_MISC_CFG__VCM_EN2_MASK 0x00000004L
+#define MMEA0_ADDRDEC_MISC_CFG__PCH_MASK_DRAM_MASK 0x00000100L
+#define MMEA0_ADDRDEC_MISC_CFG__PCH_MASK_GMI_MASK 0x00000200L
+#define MMEA0_ADDRDEC_MISC_CFG__CH_MASK_DRAM_MASK 0x0001F000L
+#define MMEA0_ADDRDEC_MISC_CFG__CH_MASK_GMI_MASK 0x003E0000L
+#define MMEA0_ADDRDEC_MISC_CFG__CS_MASK_DRAM_MASK 0x00C00000L
+#define MMEA0_ADDRDEC_MISC_CFG__CS_MASK_GMI_MASK 0x03000000L
+#define MMEA0_ADDRDEC_MISC_CFG__RM_MASK_DRAM_MASK 0x1C000000L
+#define MMEA0_ADDRDEC_MISC_CFG__RM_MASK_GMI_MASK 0xE0000000L
+//MMEA0_ADDRDECDRAM_ADDR_HASH_BANK0
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK0__XOR_ENABLE__SHIFT 0x0
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK0__COL_XOR__SHIFT 0x1
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK0__ROW_XOR__SHIFT 0xe
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK0__XOR_ENABLE_MASK 0x00000001L
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK0__COL_XOR_MASK 0x00003FFEL
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK0__ROW_XOR_MASK 0xFFFFC000L
+//MMEA0_ADDRDECDRAM_ADDR_HASH_BANK1
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK1__XOR_ENABLE__SHIFT 0x0
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK1__COL_XOR__SHIFT 0x1
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK1__ROW_XOR__SHIFT 0xe
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK1__XOR_ENABLE_MASK 0x00000001L
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK1__COL_XOR_MASK 0x00003FFEL
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK1__ROW_XOR_MASK 0xFFFFC000L
+//MMEA0_ADDRDECDRAM_ADDR_HASH_BANK2
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK2__XOR_ENABLE__SHIFT 0x0
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK2__COL_XOR__SHIFT 0x1
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK2__ROW_XOR__SHIFT 0xe
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK2__XOR_ENABLE_MASK 0x00000001L
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK2__COL_XOR_MASK 0x00003FFEL
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK2__ROW_XOR_MASK 0xFFFFC000L
+//MMEA0_ADDRDECDRAM_ADDR_HASH_BANK3
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK3__XOR_ENABLE__SHIFT 0x0
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK3__COL_XOR__SHIFT 0x1
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK3__ROW_XOR__SHIFT 0xe
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK3__XOR_ENABLE_MASK 0x00000001L
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK3__COL_XOR_MASK 0x00003FFEL
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK3__ROW_XOR_MASK 0xFFFFC000L
+//MMEA0_ADDRDECDRAM_ADDR_HASH_BANK4
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK4__XOR_ENABLE__SHIFT 0x0
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK4__COL_XOR__SHIFT 0x1
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK4__ROW_XOR__SHIFT 0xe
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK4__XOR_ENABLE_MASK 0x00000001L
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK4__COL_XOR_MASK 0x00003FFEL
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK4__ROW_XOR_MASK 0xFFFFC000L
+//MMEA0_ADDRDECDRAM_ADDR_HASH_BANK5
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK5__XOR_ENABLE__SHIFT 0x0
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK5__COL_XOR__SHIFT 0x1
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK5__ROW_XOR__SHIFT 0xe
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK5__XOR_ENABLE_MASK 0x00000001L
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK5__COL_XOR_MASK 0x00003FFEL
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK5__ROW_XOR_MASK 0xFFFFC000L
+//MMEA0_ADDRDECDRAM_ADDR_HASH_PC
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_PC__XOR_ENABLE__SHIFT 0x0
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_PC__COL_XOR__SHIFT 0x1
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_PC__ROW_XOR__SHIFT 0xe
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_PC__XOR_ENABLE_MASK 0x00000001L
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_PC__COL_XOR_MASK 0x00003FFEL
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_PC__ROW_XOR_MASK 0xFFFFC000L
+//MMEA0_ADDRDECDRAM_ADDR_HASH_PC2
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_PC2__BANK_XOR__SHIFT 0x0
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_PC2__BANK_XOR_MASK 0x0000003FL
+//MMEA0_ADDRDECDRAM_ADDR_HASH_CS0
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_CS0__XOR_ENABLE__SHIFT 0x0
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_CS0__NA_XOR__SHIFT 0x1
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_CS0__XOR_ENABLE_MASK 0x00000001L
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_CS0__NA_XOR_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDECDRAM_ADDR_HASH_CS1
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_CS1__XOR_ENABLE__SHIFT 0x0
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_CS1__NA_XOR__SHIFT 0x1
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_CS1__XOR_ENABLE_MASK 0x00000001L
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_CS1__NA_XOR_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDECDRAM_HARVEST_ENABLE
+#define MMEA0_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_EN__SHIFT 0x0
+#define MMEA0_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_VAL__SHIFT 0x1
+#define MMEA0_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_EN__SHIFT 0x2
+#define MMEA0_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_VAL__SHIFT 0x3
+#define MMEA0_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_EN__SHIFT 0x4
+#define MMEA0_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_VAL__SHIFT 0x5
+#define MMEA0_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_EN_MASK 0x00000001L
+#define MMEA0_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_VAL_MASK 0x00000002L
+#define MMEA0_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_EN_MASK 0x00000004L
+#define MMEA0_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_VAL_MASK 0x00000008L
+#define MMEA0_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_EN_MASK 0x00000010L
+#define MMEA0_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_VAL_MASK 0x00000020L
+//MMEA0_ADDRDECGMI_ADDR_HASH_BANK0
+#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK0__XOR_ENABLE__SHIFT 0x0
+#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK0__COL_XOR__SHIFT 0x1
+#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK0__ROW_XOR__SHIFT 0xe
+#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK0__XOR_ENABLE_MASK 0x00000001L
+#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK0__COL_XOR_MASK 0x00003FFEL
+#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK0__ROW_XOR_MASK 0xFFFFC000L
+//MMEA0_ADDRDECGMI_ADDR_HASH_BANK1
+#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK1__XOR_ENABLE__SHIFT 0x0
+#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK1__COL_XOR__SHIFT 0x1
+#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK1__ROW_XOR__SHIFT 0xe
+#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK1__XOR_ENABLE_MASK 0x00000001L
+#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK1__COL_XOR_MASK 0x00003FFEL
+#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK1__ROW_XOR_MASK 0xFFFFC000L
+//MMEA0_ADDRDECGMI_ADDR_HASH_BANK2
+#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK2__XOR_ENABLE__SHIFT 0x0
+#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK2__COL_XOR__SHIFT 0x1
+#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK2__ROW_XOR__SHIFT 0xe
+#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK2__XOR_ENABLE_MASK 0x00000001L
+#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK2__COL_XOR_MASK 0x00003FFEL
+#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK2__ROW_XOR_MASK 0xFFFFC000L
+//MMEA0_ADDRDECGMI_ADDR_HASH_BANK3
+#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK3__XOR_ENABLE__SHIFT 0x0
+#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK3__COL_XOR__SHIFT 0x1
+#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK3__ROW_XOR__SHIFT 0xe
+#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK3__XOR_ENABLE_MASK 0x00000001L
+#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK3__COL_XOR_MASK 0x00003FFEL
+#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK3__ROW_XOR_MASK 0xFFFFC000L
+//MMEA0_ADDRDECGMI_ADDR_HASH_BANK4
+#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK4__XOR_ENABLE__SHIFT 0x0
+#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK4__COL_XOR__SHIFT 0x1
+#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK4__ROW_XOR__SHIFT 0xe
+#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK4__XOR_ENABLE_MASK 0x00000001L
+#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK4__COL_XOR_MASK 0x00003FFEL
+#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK4__ROW_XOR_MASK 0xFFFFC000L
+//MMEA0_ADDRDECGMI_ADDR_HASH_BANK5
+#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK5__XOR_ENABLE__SHIFT 0x0
+#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK5__COL_XOR__SHIFT 0x1
+#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK5__ROW_XOR__SHIFT 0xe
+#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK5__XOR_ENABLE_MASK 0x00000001L
+#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK5__COL_XOR_MASK 0x00003FFEL
+#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK5__ROW_XOR_MASK 0xFFFFC000L
+//MMEA0_ADDRDECGMI_ADDR_HASH_PC
+#define MMEA0_ADDRDECGMI_ADDR_HASH_PC__XOR_ENABLE__SHIFT 0x0
+#define MMEA0_ADDRDECGMI_ADDR_HASH_PC__COL_XOR__SHIFT 0x1
+#define MMEA0_ADDRDECGMI_ADDR_HASH_PC__ROW_XOR__SHIFT 0xe
+#define MMEA0_ADDRDECGMI_ADDR_HASH_PC__XOR_ENABLE_MASK 0x00000001L
+#define MMEA0_ADDRDECGMI_ADDR_HASH_PC__COL_XOR_MASK 0x00003FFEL
+#define MMEA0_ADDRDECGMI_ADDR_HASH_PC__ROW_XOR_MASK 0xFFFFC000L
+//MMEA0_ADDRDECGMI_ADDR_HASH_PC2
+#define MMEA0_ADDRDECGMI_ADDR_HASH_PC2__BANK_XOR__SHIFT 0x0
+#define MMEA0_ADDRDECGMI_ADDR_HASH_PC2__BANK_XOR_MASK 0x0000003FL
+//MMEA0_ADDRDECGMI_ADDR_HASH_CS0
+#define MMEA0_ADDRDECGMI_ADDR_HASH_CS0__XOR_ENABLE__SHIFT 0x0
+#define MMEA0_ADDRDECGMI_ADDR_HASH_CS0__NA_XOR__SHIFT 0x1
+#define MMEA0_ADDRDECGMI_ADDR_HASH_CS0__XOR_ENABLE_MASK 0x00000001L
+#define MMEA0_ADDRDECGMI_ADDR_HASH_CS0__NA_XOR_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDECGMI_ADDR_HASH_CS1
+#define MMEA0_ADDRDECGMI_ADDR_HASH_CS1__XOR_ENABLE__SHIFT 0x0
+#define MMEA0_ADDRDECGMI_ADDR_HASH_CS1__NA_XOR__SHIFT 0x1
+#define MMEA0_ADDRDECGMI_ADDR_HASH_CS1__XOR_ENABLE_MASK 0x00000001L
+#define MMEA0_ADDRDECGMI_ADDR_HASH_CS1__NA_XOR_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDECGMI_HARVEST_ENABLE
+#define MMEA0_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_EN__SHIFT 0x0
+#define MMEA0_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_VAL__SHIFT 0x1
+#define MMEA0_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_EN__SHIFT 0x2
+#define MMEA0_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_VAL__SHIFT 0x3
+#define MMEA0_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_EN__SHIFT 0x4
+#define MMEA0_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_VAL__SHIFT 0x5
+#define MMEA0_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_EN_MASK 0x00000001L
+#define MMEA0_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_VAL_MASK 0x00000002L
+#define MMEA0_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_EN_MASK 0x00000004L
+#define MMEA0_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_VAL_MASK 0x00000008L
+#define MMEA0_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_EN_MASK 0x00000010L
+#define MMEA0_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_VAL_MASK 0x00000020L
+//MMEA0_ADDRDEC0_BASE_ADDR_CS0
+#define MMEA0_ADDRDEC0_BASE_ADDR_CS0__CS_EN__SHIFT 0x0
+#define MMEA0_ADDRDEC0_BASE_ADDR_CS0__BASE_ADDR__SHIFT 0x1
+#define MMEA0_ADDRDEC0_BASE_ADDR_CS0__CS_EN_MASK 0x00000001L
+#define MMEA0_ADDRDEC0_BASE_ADDR_CS0__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDEC0_BASE_ADDR_CS1
+#define MMEA0_ADDRDEC0_BASE_ADDR_CS1__CS_EN__SHIFT 0x0
+#define MMEA0_ADDRDEC0_BASE_ADDR_CS1__BASE_ADDR__SHIFT 0x1
+#define MMEA0_ADDRDEC0_BASE_ADDR_CS1__CS_EN_MASK 0x00000001L
+#define MMEA0_ADDRDEC0_BASE_ADDR_CS1__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDEC0_BASE_ADDR_CS2
+#define MMEA0_ADDRDEC0_BASE_ADDR_CS2__CS_EN__SHIFT 0x0
+#define MMEA0_ADDRDEC0_BASE_ADDR_CS2__BASE_ADDR__SHIFT 0x1
+#define MMEA0_ADDRDEC0_BASE_ADDR_CS2__CS_EN_MASK 0x00000001L
+#define MMEA0_ADDRDEC0_BASE_ADDR_CS2__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDEC0_BASE_ADDR_CS3
+#define MMEA0_ADDRDEC0_BASE_ADDR_CS3__CS_EN__SHIFT 0x0
+#define MMEA0_ADDRDEC0_BASE_ADDR_CS3__BASE_ADDR__SHIFT 0x1
+#define MMEA0_ADDRDEC0_BASE_ADDR_CS3__CS_EN_MASK 0x00000001L
+#define MMEA0_ADDRDEC0_BASE_ADDR_CS3__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDEC0_BASE_ADDR_SECCS0
+#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS0__CS_EN__SHIFT 0x0
+#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS0__BASE_ADDR__SHIFT 0x1
+#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS0__CS_EN_MASK 0x00000001L
+#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS0__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDEC0_BASE_ADDR_SECCS1
+#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS1__CS_EN__SHIFT 0x0
+#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS1__BASE_ADDR__SHIFT 0x1
+#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS1__CS_EN_MASK 0x00000001L
+#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS1__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDEC0_BASE_ADDR_SECCS2
+#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS2__CS_EN__SHIFT 0x0
+#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS2__BASE_ADDR__SHIFT 0x1
+#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS2__CS_EN_MASK 0x00000001L
+#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS2__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDEC0_BASE_ADDR_SECCS3
+#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS3__CS_EN__SHIFT 0x0
+#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS3__BASE_ADDR__SHIFT 0x1
+#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS3__CS_EN_MASK 0x00000001L
+#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS3__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDEC0_ADDR_MASK_CS01
+#define MMEA0_ADDRDEC0_ADDR_MASK_CS01__ADDR_MASK__SHIFT 0x1
+#define MMEA0_ADDRDEC0_ADDR_MASK_CS01__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDEC0_ADDR_MASK_CS23
+#define MMEA0_ADDRDEC0_ADDR_MASK_CS23__ADDR_MASK__SHIFT 0x1
+#define MMEA0_ADDRDEC0_ADDR_MASK_CS23__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDEC0_ADDR_MASK_SECCS01
+#define MMEA0_ADDRDEC0_ADDR_MASK_SECCS01__ADDR_MASK__SHIFT 0x1
+#define MMEA0_ADDRDEC0_ADDR_MASK_SECCS01__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDEC0_ADDR_MASK_SECCS23
+#define MMEA0_ADDRDEC0_ADDR_MASK_SECCS23__ADDR_MASK__SHIFT 0x1
+#define MMEA0_ADDRDEC0_ADDR_MASK_SECCS23__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDEC0_ADDR_CFG_CS01
+#define MMEA0_ADDRDEC0_ADDR_CFG_CS01__NUM_BANK_GROUPS__SHIFT 0x1
+#define MMEA0_ADDRDEC0_ADDR_CFG_CS01__NUM_RM__SHIFT 0x4
+#define MMEA0_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_LO__SHIFT 0x8
+#define MMEA0_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_HI__SHIFT 0xc
+#define MMEA0_ADDRDEC0_ADDR_CFG_CS01__NUM_COL__SHIFT 0x10
+#define MMEA0_ADDRDEC0_ADDR_CFG_CS01__NUM_BANKS__SHIFT 0x14
+#define MMEA0_ADDRDEC0_ADDR_CFG_CS01__HI_COL_EN__SHIFT 0x1f
+#define MMEA0_ADDRDEC0_ADDR_CFG_CS01__NUM_BANK_GROUPS_MASK 0x0000000EL
+#define MMEA0_ADDRDEC0_ADDR_CFG_CS01__NUM_RM_MASK 0x00000030L
+#define MMEA0_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_LO_MASK 0x00000F00L
+#define MMEA0_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_HI_MASK 0x0000F000L
+#define MMEA0_ADDRDEC0_ADDR_CFG_CS01__NUM_COL_MASK 0x000F0000L
+#define MMEA0_ADDRDEC0_ADDR_CFG_CS01__NUM_BANKS_MASK 0x00300000L
+#define MMEA0_ADDRDEC0_ADDR_CFG_CS01__HI_COL_EN_MASK 0x80000000L
+//MMEA0_ADDRDEC0_ADDR_CFG_CS23
+#define MMEA0_ADDRDEC0_ADDR_CFG_CS23__NUM_BANK_GROUPS__SHIFT 0x1
+#define MMEA0_ADDRDEC0_ADDR_CFG_CS23__NUM_RM__SHIFT 0x4
+#define MMEA0_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_LO__SHIFT 0x8
+#define MMEA0_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_HI__SHIFT 0xc
+#define MMEA0_ADDRDEC0_ADDR_CFG_CS23__NUM_COL__SHIFT 0x10
+#define MMEA0_ADDRDEC0_ADDR_CFG_CS23__NUM_BANKS__SHIFT 0x14
+#define MMEA0_ADDRDEC0_ADDR_CFG_CS23__HI_COL_EN__SHIFT 0x1f
+#define MMEA0_ADDRDEC0_ADDR_CFG_CS23__NUM_BANK_GROUPS_MASK 0x0000000EL
+#define MMEA0_ADDRDEC0_ADDR_CFG_CS23__NUM_RM_MASK 0x00000030L
+#define MMEA0_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_LO_MASK 0x00000F00L
+#define MMEA0_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_HI_MASK 0x0000F000L
+#define MMEA0_ADDRDEC0_ADDR_CFG_CS23__NUM_COL_MASK 0x000F0000L
+#define MMEA0_ADDRDEC0_ADDR_CFG_CS23__NUM_BANKS_MASK 0x00300000L
+#define MMEA0_ADDRDEC0_ADDR_CFG_CS23__HI_COL_EN_MASK 0x80000000L
+//MMEA0_ADDRDEC0_ADDR_SEL_CS01
+#define MMEA0_ADDRDEC0_ADDR_SEL_CS01__BANK0__SHIFT 0x0
+#define MMEA0_ADDRDEC0_ADDR_SEL_CS01__BANK1__SHIFT 0x4
+#define MMEA0_ADDRDEC0_ADDR_SEL_CS01__BANK2__SHIFT 0x8
+#define MMEA0_ADDRDEC0_ADDR_SEL_CS01__BANK3__SHIFT 0xc
+#define MMEA0_ADDRDEC0_ADDR_SEL_CS01__BANK4__SHIFT 0x10
+#define MMEA0_ADDRDEC0_ADDR_SEL_CS01__ROW_LO__SHIFT 0x18
+#define MMEA0_ADDRDEC0_ADDR_SEL_CS01__ROW_HI__SHIFT 0x1c
+#define MMEA0_ADDRDEC0_ADDR_SEL_CS01__BANK0_MASK 0x0000000FL
+#define MMEA0_ADDRDEC0_ADDR_SEL_CS01__BANK1_MASK 0x000000F0L
+#define MMEA0_ADDRDEC0_ADDR_SEL_CS01__BANK2_MASK 0x00000F00L
+#define MMEA0_ADDRDEC0_ADDR_SEL_CS01__BANK3_MASK 0x0000F000L
+#define MMEA0_ADDRDEC0_ADDR_SEL_CS01__BANK4_MASK 0x001F0000L
+#define MMEA0_ADDRDEC0_ADDR_SEL_CS01__ROW_LO_MASK 0x0F000000L
+#define MMEA0_ADDRDEC0_ADDR_SEL_CS01__ROW_HI_MASK 0xF0000000L
+//MMEA0_ADDRDEC0_ADDR_SEL_CS23
+#define MMEA0_ADDRDEC0_ADDR_SEL_CS23__BANK0__SHIFT 0x0
+#define MMEA0_ADDRDEC0_ADDR_SEL_CS23__BANK1__SHIFT 0x4
+#define MMEA0_ADDRDEC0_ADDR_SEL_CS23__BANK2__SHIFT 0x8
+#define MMEA0_ADDRDEC0_ADDR_SEL_CS23__BANK3__SHIFT 0xc
+#define MMEA0_ADDRDEC0_ADDR_SEL_CS23__BANK4__SHIFT 0x10
+#define MMEA0_ADDRDEC0_ADDR_SEL_CS23__ROW_LO__SHIFT 0x18
+#define MMEA0_ADDRDEC0_ADDR_SEL_CS23__ROW_HI__SHIFT 0x1c
+#define MMEA0_ADDRDEC0_ADDR_SEL_CS23__BANK0_MASK 0x0000000FL
+#define MMEA0_ADDRDEC0_ADDR_SEL_CS23__BANK1_MASK 0x000000F0L
+#define MMEA0_ADDRDEC0_ADDR_SEL_CS23__BANK2_MASK 0x00000F00L
+#define MMEA0_ADDRDEC0_ADDR_SEL_CS23__BANK3_MASK 0x0000F000L
+#define MMEA0_ADDRDEC0_ADDR_SEL_CS23__BANK4_MASK 0x001F0000L
+#define MMEA0_ADDRDEC0_ADDR_SEL_CS23__ROW_LO_MASK 0x0F000000L
+#define MMEA0_ADDRDEC0_ADDR_SEL_CS23__ROW_HI_MASK 0xF0000000L
+//MMEA0_ADDRDEC0_ADDR_SEL2_CS01
+#define MMEA0_ADDRDEC0_ADDR_SEL2_CS01__BANK5__SHIFT 0x0
+#define MMEA0_ADDRDEC0_ADDR_SEL2_CS01__BANK5_MASK 0x0000001FL
+//MMEA0_ADDRDEC0_ADDR_SEL2_CS23
+#define MMEA0_ADDRDEC0_ADDR_SEL2_CS23__BANK5__SHIFT 0x0
+#define MMEA0_ADDRDEC0_ADDR_SEL2_CS23__BANK5_MASK 0x0000001FL
+//MMEA0_ADDRDEC0_COL_SEL_LO_CS01
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL0__SHIFT 0x0
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL1__SHIFT 0x4
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL2__SHIFT 0x8
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL3__SHIFT 0xc
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL4__SHIFT 0x10
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL5__SHIFT 0x14
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL6__SHIFT 0x18
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL7__SHIFT 0x1c
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL0_MASK 0x0000000FL
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL1_MASK 0x000000F0L
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL2_MASK 0x00000F00L
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL3_MASK 0x0000F000L
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL4_MASK 0x000F0000L
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL5_MASK 0x00F00000L
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL6_MASK 0x0F000000L
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL7_MASK 0xF0000000L
+//MMEA0_ADDRDEC0_COL_SEL_LO_CS23
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL0__SHIFT 0x0
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL1__SHIFT 0x4
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL2__SHIFT 0x8
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL3__SHIFT 0xc
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL4__SHIFT 0x10
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL5__SHIFT 0x14
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL6__SHIFT 0x18
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL7__SHIFT 0x1c
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL0_MASK 0x0000000FL
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL1_MASK 0x000000F0L
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL2_MASK 0x00000F00L
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL3_MASK 0x0000F000L
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL4_MASK 0x000F0000L
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL5_MASK 0x00F00000L
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL6_MASK 0x0F000000L
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL7_MASK 0xF0000000L
+//MMEA0_ADDRDEC0_COL_SEL_HI_CS01
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL8__SHIFT 0x0
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL9__SHIFT 0x4
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL10__SHIFT 0x8
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL11__SHIFT 0xc
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL12__SHIFT 0x10
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL13__SHIFT 0x14
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL14__SHIFT 0x18
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL15__SHIFT 0x1c
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL8_MASK 0x0000000FL
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL9_MASK 0x000000F0L
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL10_MASK 0x00000F00L
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL11_MASK 0x0000F000L
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL12_MASK 0x000F0000L
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL13_MASK 0x00F00000L
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL14_MASK 0x0F000000L
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL15_MASK 0xF0000000L
+//MMEA0_ADDRDEC0_COL_SEL_HI_CS23
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL8__SHIFT 0x0
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL9__SHIFT 0x4
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL10__SHIFT 0x8
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL11__SHIFT 0xc
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL12__SHIFT 0x10
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL13__SHIFT 0x14
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL14__SHIFT 0x18
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL15__SHIFT 0x1c
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL8_MASK 0x0000000FL
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL9_MASK 0x000000F0L
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL10_MASK 0x00000F00L
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL11_MASK 0x0000F000L
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL12_MASK 0x000F0000L
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL13_MASK 0x00F00000L
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL14_MASK 0x0F000000L
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL15_MASK 0xF0000000L
+//MMEA0_ADDRDEC0_RM_SEL_CS01
+#define MMEA0_ADDRDEC0_RM_SEL_CS01__RM0__SHIFT 0x0
+#define MMEA0_ADDRDEC0_RM_SEL_CS01__RM1__SHIFT 0x4
+#define MMEA0_ADDRDEC0_RM_SEL_CS01__RM2__SHIFT 0x8
+#define MMEA0_ADDRDEC0_RM_SEL_CS01__CHAN_BIT__SHIFT 0xc
+#define MMEA0_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA0_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA0_ADDRDEC0_RM_SEL_CS01__RM0_MASK 0x0000000FL
+#define MMEA0_ADDRDEC0_RM_SEL_CS01__RM1_MASK 0x000000F0L
+#define MMEA0_ADDRDEC0_RM_SEL_CS01__RM2_MASK 0x00000F00L
+#define MMEA0_ADDRDEC0_RM_SEL_CS01__CHAN_BIT_MASK 0x0000F000L
+#define MMEA0_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA0_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA0_ADDRDEC0_RM_SEL_CS23
+#define MMEA0_ADDRDEC0_RM_SEL_CS23__RM0__SHIFT 0x0
+#define MMEA0_ADDRDEC0_RM_SEL_CS23__RM1__SHIFT 0x4
+#define MMEA0_ADDRDEC0_RM_SEL_CS23__RM2__SHIFT 0x8
+#define MMEA0_ADDRDEC0_RM_SEL_CS23__CHAN_BIT__SHIFT 0xc
+#define MMEA0_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA0_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA0_ADDRDEC0_RM_SEL_CS23__RM0_MASK 0x0000000FL
+#define MMEA0_ADDRDEC0_RM_SEL_CS23__RM1_MASK 0x000000F0L
+#define MMEA0_ADDRDEC0_RM_SEL_CS23__RM2_MASK 0x00000F00L
+#define MMEA0_ADDRDEC0_RM_SEL_CS23__CHAN_BIT_MASK 0x0000F000L
+#define MMEA0_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA0_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA0_ADDRDEC0_RM_SEL_SECCS01
+#define MMEA0_ADDRDEC0_RM_SEL_SECCS01__RM0__SHIFT 0x0
+#define MMEA0_ADDRDEC0_RM_SEL_SECCS01__RM1__SHIFT 0x4
+#define MMEA0_ADDRDEC0_RM_SEL_SECCS01__RM2__SHIFT 0x8
+#define MMEA0_ADDRDEC0_RM_SEL_SECCS01__CHAN_BIT__SHIFT 0xc
+#define MMEA0_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA0_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA0_ADDRDEC0_RM_SEL_SECCS01__RM0_MASK 0x0000000FL
+#define MMEA0_ADDRDEC0_RM_SEL_SECCS01__RM1_MASK 0x000000F0L
+#define MMEA0_ADDRDEC0_RM_SEL_SECCS01__RM2_MASK 0x00000F00L
+#define MMEA0_ADDRDEC0_RM_SEL_SECCS01__CHAN_BIT_MASK 0x0000F000L
+#define MMEA0_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA0_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA0_ADDRDEC0_RM_SEL_SECCS23
+#define MMEA0_ADDRDEC0_RM_SEL_SECCS23__RM0__SHIFT 0x0
+#define MMEA0_ADDRDEC0_RM_SEL_SECCS23__RM1__SHIFT 0x4
+#define MMEA0_ADDRDEC0_RM_SEL_SECCS23__RM2__SHIFT 0x8
+#define MMEA0_ADDRDEC0_RM_SEL_SECCS23__CHAN_BIT__SHIFT 0xc
+#define MMEA0_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA0_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA0_ADDRDEC0_RM_SEL_SECCS23__RM0_MASK 0x0000000FL
+#define MMEA0_ADDRDEC0_RM_SEL_SECCS23__RM1_MASK 0x000000F0L
+#define MMEA0_ADDRDEC0_RM_SEL_SECCS23__RM2_MASK 0x00000F00L
+#define MMEA0_ADDRDEC0_RM_SEL_SECCS23__CHAN_BIT_MASK 0x0000F000L
+#define MMEA0_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA0_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA0_ADDRDEC1_BASE_ADDR_CS0
+#define MMEA0_ADDRDEC1_BASE_ADDR_CS0__CS_EN__SHIFT 0x0
+#define MMEA0_ADDRDEC1_BASE_ADDR_CS0__BASE_ADDR__SHIFT 0x1
+#define MMEA0_ADDRDEC1_BASE_ADDR_CS0__CS_EN_MASK 0x00000001L
+#define MMEA0_ADDRDEC1_BASE_ADDR_CS0__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDEC1_BASE_ADDR_CS1
+#define MMEA0_ADDRDEC1_BASE_ADDR_CS1__CS_EN__SHIFT 0x0
+#define MMEA0_ADDRDEC1_BASE_ADDR_CS1__BASE_ADDR__SHIFT 0x1
+#define MMEA0_ADDRDEC1_BASE_ADDR_CS1__CS_EN_MASK 0x00000001L
+#define MMEA0_ADDRDEC1_BASE_ADDR_CS1__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDEC1_BASE_ADDR_CS2
+#define MMEA0_ADDRDEC1_BASE_ADDR_CS2__CS_EN__SHIFT 0x0
+#define MMEA0_ADDRDEC1_BASE_ADDR_CS2__BASE_ADDR__SHIFT 0x1
+#define MMEA0_ADDRDEC1_BASE_ADDR_CS2__CS_EN_MASK 0x00000001L
+#define MMEA0_ADDRDEC1_BASE_ADDR_CS2__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDEC1_BASE_ADDR_CS3
+#define MMEA0_ADDRDEC1_BASE_ADDR_CS3__CS_EN__SHIFT 0x0
+#define MMEA0_ADDRDEC1_BASE_ADDR_CS3__BASE_ADDR__SHIFT 0x1
+#define MMEA0_ADDRDEC1_BASE_ADDR_CS3__CS_EN_MASK 0x00000001L
+#define MMEA0_ADDRDEC1_BASE_ADDR_CS3__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDEC1_BASE_ADDR_SECCS0
+#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS0__CS_EN__SHIFT 0x0
+#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS0__BASE_ADDR__SHIFT 0x1
+#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS0__CS_EN_MASK 0x00000001L
+#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS0__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDEC1_BASE_ADDR_SECCS1
+#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS1__CS_EN__SHIFT 0x0
+#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS1__BASE_ADDR__SHIFT 0x1
+#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS1__CS_EN_MASK 0x00000001L
+#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS1__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDEC1_BASE_ADDR_SECCS2
+#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS2__CS_EN__SHIFT 0x0
+#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS2__BASE_ADDR__SHIFT 0x1
+#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS2__CS_EN_MASK 0x00000001L
+#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS2__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDEC1_BASE_ADDR_SECCS3
+#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS3__CS_EN__SHIFT 0x0
+#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS3__BASE_ADDR__SHIFT 0x1
+#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS3__CS_EN_MASK 0x00000001L
+#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS3__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDEC1_ADDR_MASK_CS01
+#define MMEA0_ADDRDEC1_ADDR_MASK_CS01__ADDR_MASK__SHIFT 0x1
+#define MMEA0_ADDRDEC1_ADDR_MASK_CS01__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDEC1_ADDR_MASK_CS23
+#define MMEA0_ADDRDEC1_ADDR_MASK_CS23__ADDR_MASK__SHIFT 0x1
+#define MMEA0_ADDRDEC1_ADDR_MASK_CS23__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDEC1_ADDR_MASK_SECCS01
+#define MMEA0_ADDRDEC1_ADDR_MASK_SECCS01__ADDR_MASK__SHIFT 0x1
+#define MMEA0_ADDRDEC1_ADDR_MASK_SECCS01__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDEC1_ADDR_MASK_SECCS23
+#define MMEA0_ADDRDEC1_ADDR_MASK_SECCS23__ADDR_MASK__SHIFT 0x1
+#define MMEA0_ADDRDEC1_ADDR_MASK_SECCS23__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDEC1_ADDR_CFG_CS01
+#define MMEA0_ADDRDEC1_ADDR_CFG_CS01__NUM_BANK_GROUPS__SHIFT 0x1
+#define MMEA0_ADDRDEC1_ADDR_CFG_CS01__NUM_RM__SHIFT 0x4
+#define MMEA0_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_LO__SHIFT 0x8
+#define MMEA0_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_HI__SHIFT 0xc
+#define MMEA0_ADDRDEC1_ADDR_CFG_CS01__NUM_COL__SHIFT 0x10
+#define MMEA0_ADDRDEC1_ADDR_CFG_CS01__NUM_BANKS__SHIFT 0x14
+#define MMEA0_ADDRDEC1_ADDR_CFG_CS01__HI_COL_EN__SHIFT 0x1f
+#define MMEA0_ADDRDEC1_ADDR_CFG_CS01__NUM_BANK_GROUPS_MASK 0x0000000EL
+#define MMEA0_ADDRDEC1_ADDR_CFG_CS01__NUM_RM_MASK 0x00000030L
+#define MMEA0_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_LO_MASK 0x00000F00L
+#define MMEA0_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_HI_MASK 0x0000F000L
+#define MMEA0_ADDRDEC1_ADDR_CFG_CS01__NUM_COL_MASK 0x000F0000L
+#define MMEA0_ADDRDEC1_ADDR_CFG_CS01__NUM_BANKS_MASK 0x00300000L
+#define MMEA0_ADDRDEC1_ADDR_CFG_CS01__HI_COL_EN_MASK 0x80000000L
+//MMEA0_ADDRDEC1_ADDR_CFG_CS23
+#define MMEA0_ADDRDEC1_ADDR_CFG_CS23__NUM_BANK_GROUPS__SHIFT 0x1
+#define MMEA0_ADDRDEC1_ADDR_CFG_CS23__NUM_RM__SHIFT 0x4
+#define MMEA0_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_LO__SHIFT 0x8
+#define MMEA0_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_HI__SHIFT 0xc
+#define MMEA0_ADDRDEC1_ADDR_CFG_CS23__NUM_COL__SHIFT 0x10
+#define MMEA0_ADDRDEC1_ADDR_CFG_CS23__NUM_BANKS__SHIFT 0x14
+#define MMEA0_ADDRDEC1_ADDR_CFG_CS23__HI_COL_EN__SHIFT 0x1f
+#define MMEA0_ADDRDEC1_ADDR_CFG_CS23__NUM_BANK_GROUPS_MASK 0x0000000EL
+#define MMEA0_ADDRDEC1_ADDR_CFG_CS23__NUM_RM_MASK 0x00000030L
+#define MMEA0_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_LO_MASK 0x00000F00L
+#define MMEA0_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_HI_MASK 0x0000F000L
+#define MMEA0_ADDRDEC1_ADDR_CFG_CS23__NUM_COL_MASK 0x000F0000L
+#define MMEA0_ADDRDEC1_ADDR_CFG_CS23__NUM_BANKS_MASK 0x00300000L
+#define MMEA0_ADDRDEC1_ADDR_CFG_CS23__HI_COL_EN_MASK 0x80000000L
+//MMEA0_ADDRDEC1_ADDR_SEL_CS01
+#define MMEA0_ADDRDEC1_ADDR_SEL_CS01__BANK0__SHIFT 0x0
+#define MMEA0_ADDRDEC1_ADDR_SEL_CS01__BANK1__SHIFT 0x4
+#define MMEA0_ADDRDEC1_ADDR_SEL_CS01__BANK2__SHIFT 0x8
+#define MMEA0_ADDRDEC1_ADDR_SEL_CS01__BANK3__SHIFT 0xc
+#define MMEA0_ADDRDEC1_ADDR_SEL_CS01__BANK4__SHIFT 0x10
+#define MMEA0_ADDRDEC1_ADDR_SEL_CS01__ROW_LO__SHIFT 0x18
+#define MMEA0_ADDRDEC1_ADDR_SEL_CS01__ROW_HI__SHIFT 0x1c
+#define MMEA0_ADDRDEC1_ADDR_SEL_CS01__BANK0_MASK 0x0000000FL
+#define MMEA0_ADDRDEC1_ADDR_SEL_CS01__BANK1_MASK 0x000000F0L
+#define MMEA0_ADDRDEC1_ADDR_SEL_CS01__BANK2_MASK 0x00000F00L
+#define MMEA0_ADDRDEC1_ADDR_SEL_CS01__BANK3_MASK 0x0000F000L
+#define MMEA0_ADDRDEC1_ADDR_SEL_CS01__BANK4_MASK 0x001F0000L
+#define MMEA0_ADDRDEC1_ADDR_SEL_CS01__ROW_LO_MASK 0x0F000000L
+#define MMEA0_ADDRDEC1_ADDR_SEL_CS01__ROW_HI_MASK 0xF0000000L
+//MMEA0_ADDRDEC1_ADDR_SEL_CS23
+#define MMEA0_ADDRDEC1_ADDR_SEL_CS23__BANK0__SHIFT 0x0
+#define MMEA0_ADDRDEC1_ADDR_SEL_CS23__BANK1__SHIFT 0x4
+#define MMEA0_ADDRDEC1_ADDR_SEL_CS23__BANK2__SHIFT 0x8
+#define MMEA0_ADDRDEC1_ADDR_SEL_CS23__BANK3__SHIFT 0xc
+#define MMEA0_ADDRDEC1_ADDR_SEL_CS23__BANK4__SHIFT 0x10
+#define MMEA0_ADDRDEC1_ADDR_SEL_CS23__ROW_LO__SHIFT 0x18
+#define MMEA0_ADDRDEC1_ADDR_SEL_CS23__ROW_HI__SHIFT 0x1c
+#define MMEA0_ADDRDEC1_ADDR_SEL_CS23__BANK0_MASK 0x0000000FL
+#define MMEA0_ADDRDEC1_ADDR_SEL_CS23__BANK1_MASK 0x000000F0L
+#define MMEA0_ADDRDEC1_ADDR_SEL_CS23__BANK2_MASK 0x00000F00L
+#define MMEA0_ADDRDEC1_ADDR_SEL_CS23__BANK3_MASK 0x0000F000L
+#define MMEA0_ADDRDEC1_ADDR_SEL_CS23__BANK4_MASK 0x001F0000L
+#define MMEA0_ADDRDEC1_ADDR_SEL_CS23__ROW_LO_MASK 0x0F000000L
+#define MMEA0_ADDRDEC1_ADDR_SEL_CS23__ROW_HI_MASK 0xF0000000L
+//MMEA0_ADDRDEC1_ADDR_SEL2_CS01
+#define MMEA0_ADDRDEC1_ADDR_SEL2_CS01__BANK5__SHIFT 0x0
+#define MMEA0_ADDRDEC1_ADDR_SEL2_CS01__BANK5_MASK 0x0000001FL
+//MMEA0_ADDRDEC1_ADDR_SEL2_CS23
+#define MMEA0_ADDRDEC1_ADDR_SEL2_CS23__BANK5__SHIFT 0x0
+#define MMEA0_ADDRDEC1_ADDR_SEL2_CS23__BANK5_MASK 0x0000001FL
+//MMEA0_ADDRDEC1_COL_SEL_LO_CS01
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL0__SHIFT 0x0
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL1__SHIFT 0x4
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL2__SHIFT 0x8
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL3__SHIFT 0xc
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL4__SHIFT 0x10
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL5__SHIFT 0x14
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL6__SHIFT 0x18
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL7__SHIFT 0x1c
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL0_MASK 0x0000000FL
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL1_MASK 0x000000F0L
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL2_MASK 0x00000F00L
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL3_MASK 0x0000F000L
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL4_MASK 0x000F0000L
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL5_MASK 0x00F00000L
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL6_MASK 0x0F000000L
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL7_MASK 0xF0000000L
+//MMEA0_ADDRDEC1_COL_SEL_LO_CS23
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL0__SHIFT 0x0
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL1__SHIFT 0x4
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL2__SHIFT 0x8
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL3__SHIFT 0xc
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL4__SHIFT 0x10
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL5__SHIFT 0x14
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL6__SHIFT 0x18
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL7__SHIFT 0x1c
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL0_MASK 0x0000000FL
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL1_MASK 0x000000F0L
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL2_MASK 0x00000F00L
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL3_MASK 0x0000F000L
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL4_MASK 0x000F0000L
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL5_MASK 0x00F00000L
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL6_MASK 0x0F000000L
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL7_MASK 0xF0000000L
+//MMEA0_ADDRDEC1_COL_SEL_HI_CS01
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL8__SHIFT 0x0
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL9__SHIFT 0x4
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL10__SHIFT 0x8
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL11__SHIFT 0xc
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL12__SHIFT 0x10
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL13__SHIFT 0x14
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL14__SHIFT 0x18
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL15__SHIFT 0x1c
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL8_MASK 0x0000000FL
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL9_MASK 0x000000F0L
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL10_MASK 0x00000F00L
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL11_MASK 0x0000F000L
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL12_MASK 0x000F0000L
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL13_MASK 0x00F00000L
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL14_MASK 0x0F000000L
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL15_MASK 0xF0000000L
+//MMEA0_ADDRDEC1_COL_SEL_HI_CS23
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL8__SHIFT 0x0
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL9__SHIFT 0x4
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL10__SHIFT 0x8
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL11__SHIFT 0xc
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL12__SHIFT 0x10
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL13__SHIFT 0x14
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL14__SHIFT 0x18
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL15__SHIFT 0x1c
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL8_MASK 0x0000000FL
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL9_MASK 0x000000F0L
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL10_MASK 0x00000F00L
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL11_MASK 0x0000F000L
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL12_MASK 0x000F0000L
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL13_MASK 0x00F00000L
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL14_MASK 0x0F000000L
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL15_MASK 0xF0000000L
+//MMEA0_ADDRDEC1_RM_SEL_CS01
+#define MMEA0_ADDRDEC1_RM_SEL_CS01__RM0__SHIFT 0x0
+#define MMEA0_ADDRDEC1_RM_SEL_CS01__RM1__SHIFT 0x4
+#define MMEA0_ADDRDEC1_RM_SEL_CS01__RM2__SHIFT 0x8
+#define MMEA0_ADDRDEC1_RM_SEL_CS01__CHAN_BIT__SHIFT 0xc
+#define MMEA0_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA0_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA0_ADDRDEC1_RM_SEL_CS01__RM0_MASK 0x0000000FL
+#define MMEA0_ADDRDEC1_RM_SEL_CS01__RM1_MASK 0x000000F0L
+#define MMEA0_ADDRDEC1_RM_SEL_CS01__RM2_MASK 0x00000F00L
+#define MMEA0_ADDRDEC1_RM_SEL_CS01__CHAN_BIT_MASK 0x0000F000L
+#define MMEA0_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA0_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA0_ADDRDEC1_RM_SEL_CS23
+#define MMEA0_ADDRDEC1_RM_SEL_CS23__RM0__SHIFT 0x0
+#define MMEA0_ADDRDEC1_RM_SEL_CS23__RM1__SHIFT 0x4
+#define MMEA0_ADDRDEC1_RM_SEL_CS23__RM2__SHIFT 0x8
+#define MMEA0_ADDRDEC1_RM_SEL_CS23__CHAN_BIT__SHIFT 0xc
+#define MMEA0_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA0_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA0_ADDRDEC1_RM_SEL_CS23__RM0_MASK 0x0000000FL
+#define MMEA0_ADDRDEC1_RM_SEL_CS23__RM1_MASK 0x000000F0L
+#define MMEA0_ADDRDEC1_RM_SEL_CS23__RM2_MASK 0x00000F00L
+#define MMEA0_ADDRDEC1_RM_SEL_CS23__CHAN_BIT_MASK 0x0000F000L
+#define MMEA0_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA0_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA0_ADDRDEC1_RM_SEL_SECCS01
+#define MMEA0_ADDRDEC1_RM_SEL_SECCS01__RM0__SHIFT 0x0
+#define MMEA0_ADDRDEC1_RM_SEL_SECCS01__RM1__SHIFT 0x4
+#define MMEA0_ADDRDEC1_RM_SEL_SECCS01__RM2__SHIFT 0x8
+#define MMEA0_ADDRDEC1_RM_SEL_SECCS01__CHAN_BIT__SHIFT 0xc
+#define MMEA0_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA0_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA0_ADDRDEC1_RM_SEL_SECCS01__RM0_MASK 0x0000000FL
+#define MMEA0_ADDRDEC1_RM_SEL_SECCS01__RM1_MASK 0x000000F0L
+#define MMEA0_ADDRDEC1_RM_SEL_SECCS01__RM2_MASK 0x00000F00L
+#define MMEA0_ADDRDEC1_RM_SEL_SECCS01__CHAN_BIT_MASK 0x0000F000L
+#define MMEA0_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA0_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA0_ADDRDEC1_RM_SEL_SECCS23
+#define MMEA0_ADDRDEC1_RM_SEL_SECCS23__RM0__SHIFT 0x0
+#define MMEA0_ADDRDEC1_RM_SEL_SECCS23__RM1__SHIFT 0x4
+#define MMEA0_ADDRDEC1_RM_SEL_SECCS23__RM2__SHIFT 0x8
+#define MMEA0_ADDRDEC1_RM_SEL_SECCS23__CHAN_BIT__SHIFT 0xc
+#define MMEA0_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA0_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA0_ADDRDEC1_RM_SEL_SECCS23__RM0_MASK 0x0000000FL
+#define MMEA0_ADDRDEC1_RM_SEL_SECCS23__RM1_MASK 0x000000F0L
+#define MMEA0_ADDRDEC1_RM_SEL_SECCS23__RM2_MASK 0x00000F00L
+#define MMEA0_ADDRDEC1_RM_SEL_SECCS23__CHAN_BIT_MASK 0x0000F000L
+#define MMEA0_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA0_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA0_ADDRDEC2_BASE_ADDR_CS0
+#define MMEA0_ADDRDEC2_BASE_ADDR_CS0__CS_EN__SHIFT 0x0
+#define MMEA0_ADDRDEC2_BASE_ADDR_CS0__BASE_ADDR__SHIFT 0x1
+#define MMEA0_ADDRDEC2_BASE_ADDR_CS0__CS_EN_MASK 0x00000001L
+#define MMEA0_ADDRDEC2_BASE_ADDR_CS0__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDEC2_BASE_ADDR_CS1
+#define MMEA0_ADDRDEC2_BASE_ADDR_CS1__CS_EN__SHIFT 0x0
+#define MMEA0_ADDRDEC2_BASE_ADDR_CS1__BASE_ADDR__SHIFT 0x1
+#define MMEA0_ADDRDEC2_BASE_ADDR_CS1__CS_EN_MASK 0x00000001L
+#define MMEA0_ADDRDEC2_BASE_ADDR_CS1__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDEC2_BASE_ADDR_CS2
+#define MMEA0_ADDRDEC2_BASE_ADDR_CS2__CS_EN__SHIFT 0x0
+#define MMEA0_ADDRDEC2_BASE_ADDR_CS2__BASE_ADDR__SHIFT 0x1
+#define MMEA0_ADDRDEC2_BASE_ADDR_CS2__CS_EN_MASK 0x00000001L
+#define MMEA0_ADDRDEC2_BASE_ADDR_CS2__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDEC2_BASE_ADDR_CS3
+#define MMEA0_ADDRDEC2_BASE_ADDR_CS3__CS_EN__SHIFT 0x0
+#define MMEA0_ADDRDEC2_BASE_ADDR_CS3__BASE_ADDR__SHIFT 0x1
+#define MMEA0_ADDRDEC2_BASE_ADDR_CS3__CS_EN_MASK 0x00000001L
+#define MMEA0_ADDRDEC2_BASE_ADDR_CS3__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDEC2_BASE_ADDR_SECCS0
+#define MMEA0_ADDRDEC2_BASE_ADDR_SECCS0__CS_EN__SHIFT 0x0
+#define MMEA0_ADDRDEC2_BASE_ADDR_SECCS0__BASE_ADDR__SHIFT 0x1
+#define MMEA0_ADDRDEC2_BASE_ADDR_SECCS0__CS_EN_MASK 0x00000001L
+#define MMEA0_ADDRDEC2_BASE_ADDR_SECCS0__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDEC2_BASE_ADDR_SECCS1
+#define MMEA0_ADDRDEC2_BASE_ADDR_SECCS1__CS_EN__SHIFT 0x0
+#define MMEA0_ADDRDEC2_BASE_ADDR_SECCS1__BASE_ADDR__SHIFT 0x1
+#define MMEA0_ADDRDEC2_BASE_ADDR_SECCS1__CS_EN_MASK 0x00000001L
+#define MMEA0_ADDRDEC2_BASE_ADDR_SECCS1__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDEC2_BASE_ADDR_SECCS2
+#define MMEA0_ADDRDEC2_BASE_ADDR_SECCS2__CS_EN__SHIFT 0x0
+#define MMEA0_ADDRDEC2_BASE_ADDR_SECCS2__BASE_ADDR__SHIFT 0x1
+#define MMEA0_ADDRDEC2_BASE_ADDR_SECCS2__CS_EN_MASK 0x00000001L
+#define MMEA0_ADDRDEC2_BASE_ADDR_SECCS2__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDEC2_BASE_ADDR_SECCS3
+#define MMEA0_ADDRDEC2_BASE_ADDR_SECCS3__CS_EN__SHIFT 0x0
+#define MMEA0_ADDRDEC2_BASE_ADDR_SECCS3__BASE_ADDR__SHIFT 0x1
+#define MMEA0_ADDRDEC2_BASE_ADDR_SECCS3__CS_EN_MASK 0x00000001L
+#define MMEA0_ADDRDEC2_BASE_ADDR_SECCS3__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDEC2_ADDR_MASK_CS01
+#define MMEA0_ADDRDEC2_ADDR_MASK_CS01__ADDR_MASK__SHIFT 0x1
+#define MMEA0_ADDRDEC2_ADDR_MASK_CS01__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDEC2_ADDR_MASK_CS23
+#define MMEA0_ADDRDEC2_ADDR_MASK_CS23__ADDR_MASK__SHIFT 0x1
+#define MMEA0_ADDRDEC2_ADDR_MASK_CS23__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDEC2_ADDR_MASK_SECCS01
+#define MMEA0_ADDRDEC2_ADDR_MASK_SECCS01__ADDR_MASK__SHIFT 0x1
+#define MMEA0_ADDRDEC2_ADDR_MASK_SECCS01__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDEC2_ADDR_MASK_SECCS23
+#define MMEA0_ADDRDEC2_ADDR_MASK_SECCS23__ADDR_MASK__SHIFT 0x1
+#define MMEA0_ADDRDEC2_ADDR_MASK_SECCS23__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDEC2_ADDR_CFG_CS01
+#define MMEA0_ADDRDEC2_ADDR_CFG_CS01__NUM_BANK_GROUPS__SHIFT 0x1
+#define MMEA0_ADDRDEC2_ADDR_CFG_CS01__NUM_RM__SHIFT 0x4
+#define MMEA0_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_LO__SHIFT 0x8
+#define MMEA0_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_HI__SHIFT 0xc
+#define MMEA0_ADDRDEC2_ADDR_CFG_CS01__NUM_COL__SHIFT 0x10
+#define MMEA0_ADDRDEC2_ADDR_CFG_CS01__NUM_BANKS__SHIFT 0x14
+#define MMEA0_ADDRDEC2_ADDR_CFG_CS01__HI_COL_EN__SHIFT 0x1f
+#define MMEA0_ADDRDEC2_ADDR_CFG_CS01__NUM_BANK_GROUPS_MASK 0x0000000EL
+#define MMEA0_ADDRDEC2_ADDR_CFG_CS01__NUM_RM_MASK 0x00000030L
+#define MMEA0_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_LO_MASK 0x00000F00L
+#define MMEA0_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_HI_MASK 0x0000F000L
+#define MMEA0_ADDRDEC2_ADDR_CFG_CS01__NUM_COL_MASK 0x000F0000L
+#define MMEA0_ADDRDEC2_ADDR_CFG_CS01__NUM_BANKS_MASK 0x00300000L
+#define MMEA0_ADDRDEC2_ADDR_CFG_CS01__HI_COL_EN_MASK 0x80000000L
+//MMEA0_ADDRDEC2_ADDR_CFG_CS23
+#define MMEA0_ADDRDEC2_ADDR_CFG_CS23__NUM_BANK_GROUPS__SHIFT 0x1
+#define MMEA0_ADDRDEC2_ADDR_CFG_CS23__NUM_RM__SHIFT 0x4
+#define MMEA0_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_LO__SHIFT 0x8
+#define MMEA0_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_HI__SHIFT 0xc
+#define MMEA0_ADDRDEC2_ADDR_CFG_CS23__NUM_COL__SHIFT 0x10
+#define MMEA0_ADDRDEC2_ADDR_CFG_CS23__NUM_BANKS__SHIFT 0x14
+#define MMEA0_ADDRDEC2_ADDR_CFG_CS23__HI_COL_EN__SHIFT 0x1f
+#define MMEA0_ADDRDEC2_ADDR_CFG_CS23__NUM_BANK_GROUPS_MASK 0x0000000EL
+#define MMEA0_ADDRDEC2_ADDR_CFG_CS23__NUM_RM_MASK 0x00000030L
+#define MMEA0_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_LO_MASK 0x00000F00L
+#define MMEA0_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_HI_MASK 0x0000F000L
+#define MMEA0_ADDRDEC2_ADDR_CFG_CS23__NUM_COL_MASK 0x000F0000L
+#define MMEA0_ADDRDEC2_ADDR_CFG_CS23__NUM_BANKS_MASK 0x00300000L
+#define MMEA0_ADDRDEC2_ADDR_CFG_CS23__HI_COL_EN_MASK 0x80000000L
+//MMEA0_ADDRDEC2_ADDR_SEL_CS01
+#define MMEA0_ADDRDEC2_ADDR_SEL_CS01__BANK0__SHIFT 0x0
+#define MMEA0_ADDRDEC2_ADDR_SEL_CS01__BANK1__SHIFT 0x4
+#define MMEA0_ADDRDEC2_ADDR_SEL_CS01__BANK2__SHIFT 0x8
+#define MMEA0_ADDRDEC2_ADDR_SEL_CS01__BANK3__SHIFT 0xc
+#define MMEA0_ADDRDEC2_ADDR_SEL_CS01__BANK4__SHIFT 0x10
+#define MMEA0_ADDRDEC2_ADDR_SEL_CS01__ROW_LO__SHIFT 0x18
+#define MMEA0_ADDRDEC2_ADDR_SEL_CS01__ROW_HI__SHIFT 0x1c
+#define MMEA0_ADDRDEC2_ADDR_SEL_CS01__BANK0_MASK 0x0000000FL
+#define MMEA0_ADDRDEC2_ADDR_SEL_CS01__BANK1_MASK 0x000000F0L
+#define MMEA0_ADDRDEC2_ADDR_SEL_CS01__BANK2_MASK 0x00000F00L
+#define MMEA0_ADDRDEC2_ADDR_SEL_CS01__BANK3_MASK 0x0000F000L
+#define MMEA0_ADDRDEC2_ADDR_SEL_CS01__BANK4_MASK 0x001F0000L
+#define MMEA0_ADDRDEC2_ADDR_SEL_CS01__ROW_LO_MASK 0x0F000000L
+#define MMEA0_ADDRDEC2_ADDR_SEL_CS01__ROW_HI_MASK 0xF0000000L
+//MMEA0_ADDRDEC2_ADDR_SEL_CS23
+#define MMEA0_ADDRDEC2_ADDR_SEL_CS23__BANK0__SHIFT 0x0
+#define MMEA0_ADDRDEC2_ADDR_SEL_CS23__BANK1__SHIFT 0x4
+#define MMEA0_ADDRDEC2_ADDR_SEL_CS23__BANK2__SHIFT 0x8
+#define MMEA0_ADDRDEC2_ADDR_SEL_CS23__BANK3__SHIFT 0xc
+#define MMEA0_ADDRDEC2_ADDR_SEL_CS23__BANK4__SHIFT 0x10
+#define MMEA0_ADDRDEC2_ADDR_SEL_CS23__ROW_LO__SHIFT 0x18
+#define MMEA0_ADDRDEC2_ADDR_SEL_CS23__ROW_HI__SHIFT 0x1c
+#define MMEA0_ADDRDEC2_ADDR_SEL_CS23__BANK0_MASK 0x0000000FL
+#define MMEA0_ADDRDEC2_ADDR_SEL_CS23__BANK1_MASK 0x000000F0L
+#define MMEA0_ADDRDEC2_ADDR_SEL_CS23__BANK2_MASK 0x00000F00L
+#define MMEA0_ADDRDEC2_ADDR_SEL_CS23__BANK3_MASK 0x0000F000L
+#define MMEA0_ADDRDEC2_ADDR_SEL_CS23__BANK4_MASK 0x001F0000L
+#define MMEA0_ADDRDEC2_ADDR_SEL_CS23__ROW_LO_MASK 0x0F000000L
+#define MMEA0_ADDRDEC2_ADDR_SEL_CS23__ROW_HI_MASK 0xF0000000L
+//MMEA0_ADDRDEC2_ADDR_SEL2_CS01
+#define MMEA0_ADDRDEC2_ADDR_SEL2_CS01__BANK5__SHIFT 0x0
+#define MMEA0_ADDRDEC2_ADDR_SEL2_CS01__BANK5_MASK 0x0000001FL
+//MMEA0_ADDRDEC2_ADDR_SEL2_CS23
+#define MMEA0_ADDRDEC2_ADDR_SEL2_CS23__BANK5__SHIFT 0x0
+#define MMEA0_ADDRDEC2_ADDR_SEL2_CS23__BANK5_MASK 0x0000001FL
+//MMEA0_ADDRDEC2_COL_SEL_LO_CS01
+#define MMEA0_ADDRDEC2_COL_SEL_LO_CS01__COL0__SHIFT 0x0
+#define MMEA0_ADDRDEC2_COL_SEL_LO_CS01__COL1__SHIFT 0x4
+#define MMEA0_ADDRDEC2_COL_SEL_LO_CS01__COL2__SHIFT 0x8
+#define MMEA0_ADDRDEC2_COL_SEL_LO_CS01__COL3__SHIFT 0xc
+#define MMEA0_ADDRDEC2_COL_SEL_LO_CS01__COL4__SHIFT 0x10
+#define MMEA0_ADDRDEC2_COL_SEL_LO_CS01__COL5__SHIFT 0x14
+#define MMEA0_ADDRDEC2_COL_SEL_LO_CS01__COL6__SHIFT 0x18
+#define MMEA0_ADDRDEC2_COL_SEL_LO_CS01__COL7__SHIFT 0x1c
+#define MMEA0_ADDRDEC2_COL_SEL_LO_CS01__COL0_MASK 0x0000000FL
+#define MMEA0_ADDRDEC2_COL_SEL_LO_CS01__COL1_MASK 0x000000F0L
+#define MMEA0_ADDRDEC2_COL_SEL_LO_CS01__COL2_MASK 0x00000F00L
+#define MMEA0_ADDRDEC2_COL_SEL_LO_CS01__COL3_MASK 0x0000F000L
+#define MMEA0_ADDRDEC2_COL_SEL_LO_CS01__COL4_MASK 0x000F0000L
+#define MMEA0_ADDRDEC2_COL_SEL_LO_CS01__COL5_MASK 0x00F00000L
+#define MMEA0_ADDRDEC2_COL_SEL_LO_CS01__COL6_MASK 0x0F000000L
+#define MMEA0_ADDRDEC2_COL_SEL_LO_CS01__COL7_MASK 0xF0000000L
+//MMEA0_ADDRDEC2_COL_SEL_LO_CS23
+#define MMEA0_ADDRDEC2_COL_SEL_LO_CS23__COL0__SHIFT 0x0
+#define MMEA0_ADDRDEC2_COL_SEL_LO_CS23__COL1__SHIFT 0x4
+#define MMEA0_ADDRDEC2_COL_SEL_LO_CS23__COL2__SHIFT 0x8
+#define MMEA0_ADDRDEC2_COL_SEL_LO_CS23__COL3__SHIFT 0xc
+#define MMEA0_ADDRDEC2_COL_SEL_LO_CS23__COL4__SHIFT 0x10
+#define MMEA0_ADDRDEC2_COL_SEL_LO_CS23__COL5__SHIFT 0x14
+#define MMEA0_ADDRDEC2_COL_SEL_LO_CS23__COL6__SHIFT 0x18
+#define MMEA0_ADDRDEC2_COL_SEL_LO_CS23__COL7__SHIFT 0x1c
+#define MMEA0_ADDRDEC2_COL_SEL_LO_CS23__COL0_MASK 0x0000000FL
+#define MMEA0_ADDRDEC2_COL_SEL_LO_CS23__COL1_MASK 0x000000F0L
+#define MMEA0_ADDRDEC2_COL_SEL_LO_CS23__COL2_MASK 0x00000F00L
+#define MMEA0_ADDRDEC2_COL_SEL_LO_CS23__COL3_MASK 0x0000F000L
+#define MMEA0_ADDRDEC2_COL_SEL_LO_CS23__COL4_MASK 0x000F0000L
+#define MMEA0_ADDRDEC2_COL_SEL_LO_CS23__COL5_MASK 0x00F00000L
+#define MMEA0_ADDRDEC2_COL_SEL_LO_CS23__COL6_MASK 0x0F000000L
+#define MMEA0_ADDRDEC2_COL_SEL_LO_CS23__COL7_MASK 0xF0000000L
+//MMEA0_ADDRDEC2_COL_SEL_HI_CS01
+#define MMEA0_ADDRDEC2_COL_SEL_HI_CS01__COL8__SHIFT 0x0
+#define MMEA0_ADDRDEC2_COL_SEL_HI_CS01__COL9__SHIFT 0x4
+#define MMEA0_ADDRDEC2_COL_SEL_HI_CS01__COL10__SHIFT 0x8
+#define MMEA0_ADDRDEC2_COL_SEL_HI_CS01__COL11__SHIFT 0xc
+#define MMEA0_ADDRDEC2_COL_SEL_HI_CS01__COL12__SHIFT 0x10
+#define MMEA0_ADDRDEC2_COL_SEL_HI_CS01__COL13__SHIFT 0x14
+#define MMEA0_ADDRDEC2_COL_SEL_HI_CS01__COL14__SHIFT 0x18
+#define MMEA0_ADDRDEC2_COL_SEL_HI_CS01__COL15__SHIFT 0x1c
+#define MMEA0_ADDRDEC2_COL_SEL_HI_CS01__COL8_MASK 0x0000000FL
+#define MMEA0_ADDRDEC2_COL_SEL_HI_CS01__COL9_MASK 0x000000F0L
+#define MMEA0_ADDRDEC2_COL_SEL_HI_CS01__COL10_MASK 0x00000F00L
+#define MMEA0_ADDRDEC2_COL_SEL_HI_CS01__COL11_MASK 0x0000F000L
+#define MMEA0_ADDRDEC2_COL_SEL_HI_CS01__COL12_MASK 0x000F0000L
+#define MMEA0_ADDRDEC2_COL_SEL_HI_CS01__COL13_MASK 0x00F00000L
+#define MMEA0_ADDRDEC2_COL_SEL_HI_CS01__COL14_MASK 0x0F000000L
+#define MMEA0_ADDRDEC2_COL_SEL_HI_CS01__COL15_MASK 0xF0000000L
+//MMEA0_ADDRDEC2_COL_SEL_HI_CS23
+#define MMEA0_ADDRDEC2_COL_SEL_HI_CS23__COL8__SHIFT 0x0
+#define MMEA0_ADDRDEC2_COL_SEL_HI_CS23__COL9__SHIFT 0x4
+#define MMEA0_ADDRDEC2_COL_SEL_HI_CS23__COL10__SHIFT 0x8
+#define MMEA0_ADDRDEC2_COL_SEL_HI_CS23__COL11__SHIFT 0xc
+#define MMEA0_ADDRDEC2_COL_SEL_HI_CS23__COL12__SHIFT 0x10
+#define MMEA0_ADDRDEC2_COL_SEL_HI_CS23__COL13__SHIFT 0x14
+#define MMEA0_ADDRDEC2_COL_SEL_HI_CS23__COL14__SHIFT 0x18
+#define MMEA0_ADDRDEC2_COL_SEL_HI_CS23__COL15__SHIFT 0x1c
+#define MMEA0_ADDRDEC2_COL_SEL_HI_CS23__COL8_MASK 0x0000000FL
+#define MMEA0_ADDRDEC2_COL_SEL_HI_CS23__COL9_MASK 0x000000F0L
+#define MMEA0_ADDRDEC2_COL_SEL_HI_CS23__COL10_MASK 0x00000F00L
+#define MMEA0_ADDRDEC2_COL_SEL_HI_CS23__COL11_MASK 0x0000F000L
+#define MMEA0_ADDRDEC2_COL_SEL_HI_CS23__COL12_MASK 0x000F0000L
+#define MMEA0_ADDRDEC2_COL_SEL_HI_CS23__COL13_MASK 0x00F00000L
+#define MMEA0_ADDRDEC2_COL_SEL_HI_CS23__COL14_MASK 0x0F000000L
+#define MMEA0_ADDRDEC2_COL_SEL_HI_CS23__COL15_MASK 0xF0000000L
+//MMEA0_ADDRDEC2_RM_SEL_CS01
+#define MMEA0_ADDRDEC2_RM_SEL_CS01__RM0__SHIFT 0x0
+#define MMEA0_ADDRDEC2_RM_SEL_CS01__RM1__SHIFT 0x4
+#define MMEA0_ADDRDEC2_RM_SEL_CS01__RM2__SHIFT 0x8
+#define MMEA0_ADDRDEC2_RM_SEL_CS01__CHAN_BIT__SHIFT 0xc
+#define MMEA0_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA0_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA0_ADDRDEC2_RM_SEL_CS01__RM0_MASK 0x0000000FL
+#define MMEA0_ADDRDEC2_RM_SEL_CS01__RM1_MASK 0x000000F0L
+#define MMEA0_ADDRDEC2_RM_SEL_CS01__RM2_MASK 0x00000F00L
+#define MMEA0_ADDRDEC2_RM_SEL_CS01__CHAN_BIT_MASK 0x0000F000L
+#define MMEA0_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA0_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA0_ADDRDEC2_RM_SEL_CS23
+#define MMEA0_ADDRDEC2_RM_SEL_CS23__RM0__SHIFT 0x0
+#define MMEA0_ADDRDEC2_RM_SEL_CS23__RM1__SHIFT 0x4
+#define MMEA0_ADDRDEC2_RM_SEL_CS23__RM2__SHIFT 0x8
+#define MMEA0_ADDRDEC2_RM_SEL_CS23__CHAN_BIT__SHIFT 0xc
+#define MMEA0_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA0_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA0_ADDRDEC2_RM_SEL_CS23__RM0_MASK 0x0000000FL
+#define MMEA0_ADDRDEC2_RM_SEL_CS23__RM1_MASK 0x000000F0L
+#define MMEA0_ADDRDEC2_RM_SEL_CS23__RM2_MASK 0x00000F00L
+#define MMEA0_ADDRDEC2_RM_SEL_CS23__CHAN_BIT_MASK 0x0000F000L
+#define MMEA0_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA0_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA0_ADDRDEC2_RM_SEL_SECCS01
+#define MMEA0_ADDRDEC2_RM_SEL_SECCS01__RM0__SHIFT 0x0
+#define MMEA0_ADDRDEC2_RM_SEL_SECCS01__RM1__SHIFT 0x4
+#define MMEA0_ADDRDEC2_RM_SEL_SECCS01__RM2__SHIFT 0x8
+#define MMEA0_ADDRDEC2_RM_SEL_SECCS01__CHAN_BIT__SHIFT 0xc
+#define MMEA0_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA0_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA0_ADDRDEC2_RM_SEL_SECCS01__RM0_MASK 0x0000000FL
+#define MMEA0_ADDRDEC2_RM_SEL_SECCS01__RM1_MASK 0x000000F0L
+#define MMEA0_ADDRDEC2_RM_SEL_SECCS01__RM2_MASK 0x00000F00L
+#define MMEA0_ADDRDEC2_RM_SEL_SECCS01__CHAN_BIT_MASK 0x0000F000L
+#define MMEA0_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA0_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA0_ADDRDEC2_RM_SEL_SECCS23
+#define MMEA0_ADDRDEC2_RM_SEL_SECCS23__RM0__SHIFT 0x0
+#define MMEA0_ADDRDEC2_RM_SEL_SECCS23__RM1__SHIFT 0x4
+#define MMEA0_ADDRDEC2_RM_SEL_SECCS23__RM2__SHIFT 0x8
+#define MMEA0_ADDRDEC2_RM_SEL_SECCS23__CHAN_BIT__SHIFT 0xc
+#define MMEA0_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA0_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA0_ADDRDEC2_RM_SEL_SECCS23__RM0_MASK 0x0000000FL
+#define MMEA0_ADDRDEC2_RM_SEL_SECCS23__RM1_MASK 0x000000F0L
+#define MMEA0_ADDRDEC2_RM_SEL_SECCS23__RM2_MASK 0x00000F00L
+#define MMEA0_ADDRDEC2_RM_SEL_SECCS23__CHAN_BIT_MASK 0x0000F000L
+#define MMEA0_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA0_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA0_ADDRNORMDRAM_GLOBAL_CNTL
+#define MMEA0_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K__SHIFT 0x14
+#define MMEA0_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M__SHIFT 0x15
+#define MMEA0_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G__SHIFT 0x16
+#define MMEA0_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K_MASK 0x00100000L
+#define MMEA0_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M_MASK 0x00200000L
+#define MMEA0_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G_MASK 0x00400000L
+//MMEA0_ADDRNORMGMI_GLOBAL_CNTL
+#define MMEA0_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K__SHIFT 0x14
+#define MMEA0_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M__SHIFT 0x15
+#define MMEA0_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G__SHIFT 0x16
+#define MMEA0_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K_MASK 0x00100000L
+#define MMEA0_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M_MASK 0x00200000L
+#define MMEA0_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G_MASK 0x00400000L
+//MMEA0_IO_RD_CLI2GRP_MAP0
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA0_IO_RD_CLI2GRP_MAP1
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA0_IO_WR_CLI2GRP_MAP0
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA0_IO_WR_CLI2GRP_MAP1
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA0_IO_RD_COMBINE_FLUSH
+#define MMEA0_IO_RD_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0
+#define MMEA0_IO_RD_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4
+#define MMEA0_IO_RD_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8
+#define MMEA0_IO_RD_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc
+#define MMEA0_IO_RD_COMBINE_FLUSH__FORWARD_COMB_ONLY__SHIFT 0x10
+#define MMEA0_IO_RD_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL
+#define MMEA0_IO_RD_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L
+#define MMEA0_IO_RD_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L
+#define MMEA0_IO_RD_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L
+#define MMEA0_IO_RD_COMBINE_FLUSH__FORWARD_COMB_ONLY_MASK 0x00010000L
+//MMEA0_IO_WR_COMBINE_FLUSH
+#define MMEA0_IO_WR_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0
+#define MMEA0_IO_WR_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4
+#define MMEA0_IO_WR_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8
+#define MMEA0_IO_WR_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc
+#define MMEA0_IO_WR_COMBINE_FLUSH__FORWARD_COMB_ONLY__SHIFT 0x10
+#define MMEA0_IO_WR_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL
+#define MMEA0_IO_WR_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L
+#define MMEA0_IO_WR_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L
+#define MMEA0_IO_WR_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L
+#define MMEA0_IO_WR_COMBINE_FLUSH__FORWARD_COMB_ONLY_MASK 0x00010000L
+//MMEA0_IO_GROUP_BURST
+#define MMEA0_IO_GROUP_BURST__RD_LIMIT_LO__SHIFT 0x0
+#define MMEA0_IO_GROUP_BURST__RD_LIMIT_HI__SHIFT 0x8
+#define MMEA0_IO_GROUP_BURST__WR_LIMIT_LO__SHIFT 0x10
+#define MMEA0_IO_GROUP_BURST__WR_LIMIT_HI__SHIFT 0x18
+#define MMEA0_IO_GROUP_BURST__RD_LIMIT_LO_MASK 0x000000FFL
+#define MMEA0_IO_GROUP_BURST__RD_LIMIT_HI_MASK 0x0000FF00L
+#define MMEA0_IO_GROUP_BURST__WR_LIMIT_LO_MASK 0x00FF0000L
+#define MMEA0_IO_GROUP_BURST__WR_LIMIT_HI_MASK 0xFF000000L
+//MMEA0_IO_RD_PRI_AGE
+#define MMEA0_IO_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA0_IO_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA0_IO_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA0_IO_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA0_IO_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA0_IO_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA0_IO_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA0_IO_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA0_IO_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA0_IO_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA0_IO_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA0_IO_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA0_IO_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA0_IO_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA0_IO_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA0_IO_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA0_IO_WR_PRI_AGE
+#define MMEA0_IO_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA0_IO_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA0_IO_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA0_IO_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA0_IO_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA0_IO_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA0_IO_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA0_IO_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA0_IO_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA0_IO_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA0_IO_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA0_IO_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA0_IO_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA0_IO_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA0_IO_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA0_IO_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA0_IO_RD_PRI_QUEUING
+#define MMEA0_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA0_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA0_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA0_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA0_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA0_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA0_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA0_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA0_IO_WR_PRI_QUEUING
+#define MMEA0_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA0_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA0_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA0_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA0_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA0_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA0_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA0_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA0_IO_RD_PRI_FIXED
+#define MMEA0_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA0_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA0_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA0_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA0_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA0_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA0_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA0_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA0_IO_WR_PRI_FIXED
+#define MMEA0_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA0_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA0_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA0_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA0_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA0_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA0_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA0_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA0_IO_RD_PRI_URGENCY
+#define MMEA0_IO_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA0_IO_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA0_IO_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA0_IO_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA0_IO_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA0_IO_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA0_IO_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA0_IO_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA0_IO_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA0_IO_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA0_IO_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA0_IO_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA0_IO_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA0_IO_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA0_IO_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA0_IO_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA0_IO_WR_PRI_URGENCY
+#define MMEA0_IO_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA0_IO_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA0_IO_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA0_IO_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA0_IO_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA0_IO_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA0_IO_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA0_IO_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA0_IO_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA0_IO_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA0_IO_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA0_IO_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA0_IO_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA0_IO_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA0_IO_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA0_IO_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA0_IO_RD_PRI_URGENCY_MASKING
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L
+#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L
+//MMEA0_IO_WR_PRI_URGENCY_MASKING
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L
+#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L
+//MMEA0_IO_RD_PRI_QUANT_PRI1
+#define MMEA0_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA0_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA0_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA0_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA0_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA0_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA0_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA0_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA0_IO_RD_PRI_QUANT_PRI2
+#define MMEA0_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA0_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA0_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA0_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA0_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA0_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA0_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA0_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA0_IO_RD_PRI_QUANT_PRI3
+#define MMEA0_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA0_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA0_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA0_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA0_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA0_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA0_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA0_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA0_IO_WR_PRI_QUANT_PRI1
+#define MMEA0_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA0_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA0_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA0_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA0_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA0_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA0_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA0_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA0_IO_WR_PRI_QUANT_PRI2
+#define MMEA0_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA0_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA0_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA0_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA0_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA0_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA0_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA0_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA0_IO_WR_PRI_QUANT_PRI3
+#define MMEA0_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA0_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA0_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA0_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA0_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA0_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA0_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA0_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA0_SDP_ARB_DRAM
+#define MMEA0_SDP_ARB_DRAM__RDWR_BURST_LIMIT_CYCL__SHIFT 0x0
+#define MMEA0_SDP_ARB_DRAM__RDWR_BURST_LIMIT_DATA__SHIFT 0x8
+#define MMEA0_SDP_ARB_DRAM__EARLY_SW2RD_ON_PRI__SHIFT 0x10
+#define MMEA0_SDP_ARB_DRAM__EARLY_SW2WR_ON_PRI__SHIFT 0x11
+#define MMEA0_SDP_ARB_DRAM__EARLY_SW2RD_ON_RES__SHIFT 0x12
+#define MMEA0_SDP_ARB_DRAM__EARLY_SW2WR_ON_RES__SHIFT 0x13
+#define MMEA0_SDP_ARB_DRAM__EOB_ON_EXPIRE__SHIFT 0x14
+#define MMEA0_SDP_ARB_DRAM__DECOUPLE_RDWR_BNKSTATE__SHIFT 0x15
+#define MMEA0_SDP_ARB_DRAM__RDWR_BURST_LIMIT_CYCL_MASK 0x0000007FL
+#define MMEA0_SDP_ARB_DRAM__RDWR_BURST_LIMIT_DATA_MASK 0x00007F00L
+#define MMEA0_SDP_ARB_DRAM__EARLY_SW2RD_ON_PRI_MASK 0x00010000L
+#define MMEA0_SDP_ARB_DRAM__EARLY_SW2WR_ON_PRI_MASK 0x00020000L
+#define MMEA0_SDP_ARB_DRAM__EARLY_SW2RD_ON_RES_MASK 0x00040000L
+#define MMEA0_SDP_ARB_DRAM__EARLY_SW2WR_ON_RES_MASK 0x00080000L
+#define MMEA0_SDP_ARB_DRAM__EOB_ON_EXPIRE_MASK 0x00100000L
+#define MMEA0_SDP_ARB_DRAM__DECOUPLE_RDWR_BNKSTATE_MASK 0x00200000L
+//MMEA0_SDP_ARB_GMI
+#define MMEA0_SDP_ARB_GMI__RDWR_BURST_LIMIT_CYCL__SHIFT 0x0
+#define MMEA0_SDP_ARB_GMI__RDWR_BURST_LIMIT_DATA__SHIFT 0x8
+#define MMEA0_SDP_ARB_GMI__EARLY_SW2RD_ON_PRI__SHIFT 0x10
+#define MMEA0_SDP_ARB_GMI__EARLY_SW2WR_ON_PRI__SHIFT 0x11
+#define MMEA0_SDP_ARB_GMI__EARLY_SW2RD_ON_RES__SHIFT 0x12
+#define MMEA0_SDP_ARB_GMI__EARLY_SW2WR_ON_RES__SHIFT 0x13
+#define MMEA0_SDP_ARB_GMI__EOB_ON_EXPIRE__SHIFT 0x14
+#define MMEA0_SDP_ARB_GMI__DECOUPLE_RDWR_BNKSTATE__SHIFT 0x15
+#define MMEA0_SDP_ARB_GMI__ALLOW_CHAIN_BREAKING__SHIFT 0x16
+#define MMEA0_SDP_ARB_GMI__RDWR_BURST_LIMIT_CYCL_MASK 0x0000007FL
+#define MMEA0_SDP_ARB_GMI__RDWR_BURST_LIMIT_DATA_MASK 0x00007F00L
+#define MMEA0_SDP_ARB_GMI__EARLY_SW2RD_ON_PRI_MASK 0x00010000L
+#define MMEA0_SDP_ARB_GMI__EARLY_SW2WR_ON_PRI_MASK 0x00020000L
+#define MMEA0_SDP_ARB_GMI__EARLY_SW2RD_ON_RES_MASK 0x00040000L
+#define MMEA0_SDP_ARB_GMI__EARLY_SW2WR_ON_RES_MASK 0x00080000L
+#define MMEA0_SDP_ARB_GMI__EOB_ON_EXPIRE_MASK 0x00100000L
+#define MMEA0_SDP_ARB_GMI__DECOUPLE_RDWR_BNKSTATE_MASK 0x00200000L
+#define MMEA0_SDP_ARB_GMI__ALLOW_CHAIN_BREAKING_MASK 0x00400000L
+//MMEA0_SDP_ARB_FINAL
+#define MMEA0_SDP_ARB_FINAL__DRAM_BURST_LIMIT__SHIFT 0x0
+#define MMEA0_SDP_ARB_FINAL__GMI_BURST_LIMIT__SHIFT 0x5
+#define MMEA0_SDP_ARB_FINAL__IO_BURST_LIMIT__SHIFT 0xa
+#define MMEA0_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER__SHIFT 0xf
+#define MMEA0_SDP_ARB_FINAL__RDONLY_VC0__SHIFT 0x11
+#define MMEA0_SDP_ARB_FINAL__RDONLY_VC1__SHIFT 0x12
+#define MMEA0_SDP_ARB_FINAL__RDONLY_VC2__SHIFT 0x13
+#define MMEA0_SDP_ARB_FINAL__RDONLY_VC3__SHIFT 0x14
+#define MMEA0_SDP_ARB_FINAL__RDONLY_VC4__SHIFT 0x15
+#define MMEA0_SDP_ARB_FINAL__RDONLY_VC5__SHIFT 0x16
+#define MMEA0_SDP_ARB_FINAL__RDONLY_VC6__SHIFT 0x17
+#define MMEA0_SDP_ARB_FINAL__RDONLY_VC7__SHIFT 0x18
+#define MMEA0_SDP_ARB_FINAL__ERREVENT_ON_ERROR__SHIFT 0x19
+#define MMEA0_SDP_ARB_FINAL__HALTREQ_ON_ERROR__SHIFT 0x1a
+#define MMEA0_SDP_ARB_FINAL__GMI_BURST_STRETCH__SHIFT 0x1b
+#define MMEA0_SDP_ARB_FINAL__DRAM_BURST_LIMIT_MASK 0x0000001FL
+#define MMEA0_SDP_ARB_FINAL__GMI_BURST_LIMIT_MASK 0x000003E0L
+#define MMEA0_SDP_ARB_FINAL__IO_BURST_LIMIT_MASK 0x00007C00L
+#define MMEA0_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER_MASK 0x00018000L
+#define MMEA0_SDP_ARB_FINAL__RDONLY_VC0_MASK 0x00020000L
+#define MMEA0_SDP_ARB_FINAL__RDONLY_VC1_MASK 0x00040000L
+#define MMEA0_SDP_ARB_FINAL__RDONLY_VC2_MASK 0x00080000L
+#define MMEA0_SDP_ARB_FINAL__RDONLY_VC3_MASK 0x00100000L
+#define MMEA0_SDP_ARB_FINAL__RDONLY_VC4_MASK 0x00200000L
+#define MMEA0_SDP_ARB_FINAL__RDONLY_VC5_MASK 0x00400000L
+#define MMEA0_SDP_ARB_FINAL__RDONLY_VC6_MASK 0x00800000L
+#define MMEA0_SDP_ARB_FINAL__RDONLY_VC7_MASK 0x01000000L
+#define MMEA0_SDP_ARB_FINAL__ERREVENT_ON_ERROR_MASK 0x02000000L
+#define MMEA0_SDP_ARB_FINAL__HALTREQ_ON_ERROR_MASK 0x04000000L
+#define MMEA0_SDP_ARB_FINAL__GMI_BURST_STRETCH_MASK 0x08000000L
+//MMEA0_SDP_DRAM_PRIORITY
+#define MMEA0_SDP_DRAM_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0
+#define MMEA0_SDP_DRAM_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4
+#define MMEA0_SDP_DRAM_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8
+#define MMEA0_SDP_DRAM_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc
+#define MMEA0_SDP_DRAM_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10
+#define MMEA0_SDP_DRAM_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14
+#define MMEA0_SDP_DRAM_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18
+#define MMEA0_SDP_DRAM_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c
+#define MMEA0_SDP_DRAM_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL
+#define MMEA0_SDP_DRAM_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L
+#define MMEA0_SDP_DRAM_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L
+#define MMEA0_SDP_DRAM_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L
+#define MMEA0_SDP_DRAM_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L
+#define MMEA0_SDP_DRAM_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L
+#define MMEA0_SDP_DRAM_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L
+#define MMEA0_SDP_DRAM_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L
+//MMEA0_SDP_GMI_PRIORITY
+#define MMEA0_SDP_GMI_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0
+#define MMEA0_SDP_GMI_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4
+#define MMEA0_SDP_GMI_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8
+#define MMEA0_SDP_GMI_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc
+#define MMEA0_SDP_GMI_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10
+#define MMEA0_SDP_GMI_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14
+#define MMEA0_SDP_GMI_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18
+#define MMEA0_SDP_GMI_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c
+#define MMEA0_SDP_GMI_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL
+#define MMEA0_SDP_GMI_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L
+#define MMEA0_SDP_GMI_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L
+#define MMEA0_SDP_GMI_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L
+#define MMEA0_SDP_GMI_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L
+#define MMEA0_SDP_GMI_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L
+#define MMEA0_SDP_GMI_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L
+#define MMEA0_SDP_GMI_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L
+//MMEA0_SDP_IO_PRIORITY
+#define MMEA0_SDP_IO_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0
+#define MMEA0_SDP_IO_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4
+#define MMEA0_SDP_IO_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8
+#define MMEA0_SDP_IO_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc
+#define MMEA0_SDP_IO_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10
+#define MMEA0_SDP_IO_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14
+#define MMEA0_SDP_IO_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18
+#define MMEA0_SDP_IO_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c
+#define MMEA0_SDP_IO_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL
+#define MMEA0_SDP_IO_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L
+#define MMEA0_SDP_IO_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L
+#define MMEA0_SDP_IO_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L
+#define MMEA0_SDP_IO_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L
+#define MMEA0_SDP_IO_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L
+#define MMEA0_SDP_IO_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L
+#define MMEA0_SDP_IO_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L
+//MMEA0_SDP_CREDITS
+#define MMEA0_SDP_CREDITS__TAG_LIMIT__SHIFT 0x0
+#define MMEA0_SDP_CREDITS__WR_RESP_CREDITS__SHIFT 0x8
+#define MMEA0_SDP_CREDITS__RD_RESP_CREDITS__SHIFT 0x10
+#define MMEA0_SDP_CREDITS__TAG_LIMIT_MASK 0x000000FFL
+#define MMEA0_SDP_CREDITS__WR_RESP_CREDITS_MASK 0x00007F00L
+#define MMEA0_SDP_CREDITS__RD_RESP_CREDITS_MASK 0x007F0000L
+//MMEA0_SDP_TAG_RESERVE0
+#define MMEA0_SDP_TAG_RESERVE0__VC0__SHIFT 0x0
+#define MMEA0_SDP_TAG_RESERVE0__VC1__SHIFT 0x8
+#define MMEA0_SDP_TAG_RESERVE0__VC2__SHIFT 0x10
+#define MMEA0_SDP_TAG_RESERVE0__VC3__SHIFT 0x18
+#define MMEA0_SDP_TAG_RESERVE0__VC0_MASK 0x000000FFL
+#define MMEA0_SDP_TAG_RESERVE0__VC1_MASK 0x0000FF00L
+#define MMEA0_SDP_TAG_RESERVE0__VC2_MASK 0x00FF0000L
+#define MMEA0_SDP_TAG_RESERVE0__VC3_MASK 0xFF000000L
+//MMEA0_SDP_TAG_RESERVE1
+#define MMEA0_SDP_TAG_RESERVE1__VC4__SHIFT 0x0
+#define MMEA0_SDP_TAG_RESERVE1__VC5__SHIFT 0x8
+#define MMEA0_SDP_TAG_RESERVE1__VC6__SHIFT 0x10
+#define MMEA0_SDP_TAG_RESERVE1__VC7__SHIFT 0x18
+#define MMEA0_SDP_TAG_RESERVE1__VC4_MASK 0x000000FFL
+#define MMEA0_SDP_TAG_RESERVE1__VC5_MASK 0x0000FF00L
+#define MMEA0_SDP_TAG_RESERVE1__VC6_MASK 0x00FF0000L
+#define MMEA0_SDP_TAG_RESERVE1__VC7_MASK 0xFF000000L
+//MMEA0_SDP_VCC_RESERVE0
+#define MMEA0_SDP_VCC_RESERVE0__VC0_CREDITS__SHIFT 0x0
+#define MMEA0_SDP_VCC_RESERVE0__VC1_CREDITS__SHIFT 0x6
+#define MMEA0_SDP_VCC_RESERVE0__VC2_CREDITS__SHIFT 0xc
+#define MMEA0_SDP_VCC_RESERVE0__VC3_CREDITS__SHIFT 0x12
+#define MMEA0_SDP_VCC_RESERVE0__VC4_CREDITS__SHIFT 0x18
+#define MMEA0_SDP_VCC_RESERVE0__VC0_CREDITS_MASK 0x0000003FL
+#define MMEA0_SDP_VCC_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L
+#define MMEA0_SDP_VCC_RESERVE0__VC2_CREDITS_MASK 0x0003F000L
+#define MMEA0_SDP_VCC_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L
+#define MMEA0_SDP_VCC_RESERVE0__VC4_CREDITS_MASK 0x3F000000L
+//MMEA0_SDP_VCC_RESERVE1
+#define MMEA0_SDP_VCC_RESERVE1__VC5_CREDITS__SHIFT 0x0
+#define MMEA0_SDP_VCC_RESERVE1__VC6_CREDITS__SHIFT 0x6
+#define MMEA0_SDP_VCC_RESERVE1__VC7_CREDITS__SHIFT 0xc
+#define MMEA0_SDP_VCC_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f
+#define MMEA0_SDP_VCC_RESERVE1__VC5_CREDITS_MASK 0x0000003FL
+#define MMEA0_SDP_VCC_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L
+#define MMEA0_SDP_VCC_RESERVE1__VC7_CREDITS_MASK 0x0003F000L
+#define MMEA0_SDP_VCC_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L
+//MMEA0_SDP_VCD_RESERVE0
+#define MMEA0_SDP_VCD_RESERVE0__VC0_CREDITS__SHIFT 0x0
+#define MMEA0_SDP_VCD_RESERVE0__VC1_CREDITS__SHIFT 0x6
+#define MMEA0_SDP_VCD_RESERVE0__VC2_CREDITS__SHIFT 0xc
+#define MMEA0_SDP_VCD_RESERVE0__VC3_CREDITS__SHIFT 0x12
+#define MMEA0_SDP_VCD_RESERVE0__VC4_CREDITS__SHIFT 0x18
+#define MMEA0_SDP_VCD_RESERVE0__VC0_CREDITS_MASK 0x0000003FL
+#define MMEA0_SDP_VCD_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L
+#define MMEA0_SDP_VCD_RESERVE0__VC2_CREDITS_MASK 0x0003F000L
+#define MMEA0_SDP_VCD_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L
+#define MMEA0_SDP_VCD_RESERVE0__VC4_CREDITS_MASK 0x3F000000L
+//MMEA0_SDP_VCD_RESERVE1
+#define MMEA0_SDP_VCD_RESERVE1__VC5_CREDITS__SHIFT 0x0
+#define MMEA0_SDP_VCD_RESERVE1__VC6_CREDITS__SHIFT 0x6
+#define MMEA0_SDP_VCD_RESERVE1__VC7_CREDITS__SHIFT 0xc
+#define MMEA0_SDP_VCD_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f
+#define MMEA0_SDP_VCD_RESERVE1__VC5_CREDITS_MASK 0x0000003FL
+#define MMEA0_SDP_VCD_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L
+#define MMEA0_SDP_VCD_RESERVE1__VC7_CREDITS_MASK 0x0003F000L
+#define MMEA0_SDP_VCD_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L
+//MMEA0_SDP_REQ_CNTL
+#define MMEA0_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ__SHIFT 0x0
+#define MMEA0_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE__SHIFT 0x1
+#define MMEA0_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC__SHIFT 0x2
+#define MMEA0_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM__SHIFT 0x3
+#define MMEA0_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_GMI__SHIFT 0x4
+#define MMEA0_SDP_REQ_CNTL__INNER_DOMAIN_MODE__SHIFT 0x5
+#define MMEA0_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ_MASK 0x00000001L
+#define MMEA0_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE_MASK 0x00000002L
+#define MMEA0_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC_MASK 0x00000004L
+#define MMEA0_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM_MASK 0x00000008L
+#define MMEA0_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_GMI_MASK 0x00000010L
+#define MMEA0_SDP_REQ_CNTL__INNER_DOMAIN_MODE_MASK 0x00000020L
+//MMEA0_MISC
+#define MMEA0_MISC__RELATIVE_PRI_IN_DRAM_RD_ARB__SHIFT 0x0
+#define MMEA0_MISC__RELATIVE_PRI_IN_DRAM_WR_ARB__SHIFT 0x1
+#define MMEA0_MISC__RELATIVE_PRI_IN_GMI_RD_ARB__SHIFT 0x2
+#define MMEA0_MISC__RELATIVE_PRI_IN_GMI_WR_ARB__SHIFT 0x3
+#define MMEA0_MISC__RELATIVE_PRI_IN_IO_RD_ARB__SHIFT 0x4
+#define MMEA0_MISC__RELATIVE_PRI_IN_IO_WR_ARB__SHIFT 0x5
+#define MMEA0_MISC__EARLYWRRET_ENABLE_VC0__SHIFT 0x6
+#define MMEA0_MISC__EARLYWRRET_ENABLE_VC1__SHIFT 0x7
+#define MMEA0_MISC__EARLYWRRET_ENABLE_VC2__SHIFT 0x8
+#define MMEA0_MISC__EARLYWRRET_ENABLE_VC3__SHIFT 0x9
+#define MMEA0_MISC__EARLYWRRET_ENABLE_VC4__SHIFT 0xa
+#define MMEA0_MISC__EARLYWRRET_ENABLE_VC5__SHIFT 0xb
+#define MMEA0_MISC__EARLYWRRET_ENABLE_VC6__SHIFT 0xc
+#define MMEA0_MISC__EARLYWRRET_ENABLE_VC7__SHIFT 0xd
+#define MMEA0_MISC__EARLY_SDP_ORIGDATA__SHIFT 0xe
+#define MMEA0_MISC__LINKMGR_DYNAMIC_MODE__SHIFT 0xf
+#define MMEA0_MISC__LINKMGR_HALT_THRESHOLD__SHIFT 0x11
+#define MMEA0_MISC__LINKMGR_RECONNECT_DELAY__SHIFT 0x13
+#define MMEA0_MISC__LINKMGR_IDLE_THRESHOLD__SHIFT 0x15
+#define MMEA0_MISC__FAVOUR_MIDCHAIN_CS_IN_DRAM_ARB__SHIFT 0x1a
+#define MMEA0_MISC__FAVOUR_MIDCHAIN_CS_IN_GMI_ARB__SHIFT 0x1b
+#define MMEA0_MISC__FAVOUR_LAST_CS_IN_DRAM_ARB__SHIFT 0x1c
+#define MMEA0_MISC__FAVOUR_LAST_CS_IN_GMI_ARB__SHIFT 0x1d
+#define MMEA0_MISC__SWITCH_CS_ON_W2R_IN_DRAM_ARB__SHIFT 0x1e
+#define MMEA0_MISC__SWITCH_CS_ON_W2R_IN_GMI_ARB__SHIFT 0x1f
+#define MMEA0_MISC__RELATIVE_PRI_IN_DRAM_RD_ARB_MASK 0x00000001L
+#define MMEA0_MISC__RELATIVE_PRI_IN_DRAM_WR_ARB_MASK 0x00000002L
+#define MMEA0_MISC__RELATIVE_PRI_IN_GMI_RD_ARB_MASK 0x00000004L
+#define MMEA0_MISC__RELATIVE_PRI_IN_GMI_WR_ARB_MASK 0x00000008L
+#define MMEA0_MISC__RELATIVE_PRI_IN_IO_RD_ARB_MASK 0x00000010L
+#define MMEA0_MISC__RELATIVE_PRI_IN_IO_WR_ARB_MASK 0x00000020L
+#define MMEA0_MISC__EARLYWRRET_ENABLE_VC0_MASK 0x00000040L
+#define MMEA0_MISC__EARLYWRRET_ENABLE_VC1_MASK 0x00000080L
+#define MMEA0_MISC__EARLYWRRET_ENABLE_VC2_MASK 0x00000100L
+#define MMEA0_MISC__EARLYWRRET_ENABLE_VC3_MASK 0x00000200L
+#define MMEA0_MISC__EARLYWRRET_ENABLE_VC4_MASK 0x00000400L
+#define MMEA0_MISC__EARLYWRRET_ENABLE_VC5_MASK 0x00000800L
+#define MMEA0_MISC__EARLYWRRET_ENABLE_VC6_MASK 0x00001000L
+#define MMEA0_MISC__EARLYWRRET_ENABLE_VC7_MASK 0x00002000L
+#define MMEA0_MISC__EARLY_SDP_ORIGDATA_MASK 0x00004000L
+#define MMEA0_MISC__LINKMGR_DYNAMIC_MODE_MASK 0x00018000L
+#define MMEA0_MISC__LINKMGR_HALT_THRESHOLD_MASK 0x00060000L
+#define MMEA0_MISC__LINKMGR_RECONNECT_DELAY_MASK 0x00180000L
+#define MMEA0_MISC__LINKMGR_IDLE_THRESHOLD_MASK 0x03E00000L
+#define MMEA0_MISC__FAVOUR_MIDCHAIN_CS_IN_DRAM_ARB_MASK 0x04000000L
+#define MMEA0_MISC__FAVOUR_MIDCHAIN_CS_IN_GMI_ARB_MASK 0x08000000L
+#define MMEA0_MISC__FAVOUR_LAST_CS_IN_DRAM_ARB_MASK 0x10000000L
+#define MMEA0_MISC__FAVOUR_LAST_CS_IN_GMI_ARB_MASK 0x20000000L
+#define MMEA0_MISC__SWITCH_CS_ON_W2R_IN_DRAM_ARB_MASK 0x40000000L
+#define MMEA0_MISC__SWITCH_CS_ON_W2R_IN_GMI_ARB_MASK 0x80000000L
+//MMEA0_LATENCY_SAMPLING
+#define MMEA0_LATENCY_SAMPLING__SAMPLER0_DRAM__SHIFT 0x0
+#define MMEA0_LATENCY_SAMPLING__SAMPLER1_DRAM__SHIFT 0x1
+#define MMEA0_LATENCY_SAMPLING__SAMPLER0_GMI__SHIFT 0x2
+#define MMEA0_LATENCY_SAMPLING__SAMPLER1_GMI__SHIFT 0x3
+#define MMEA0_LATENCY_SAMPLING__SAMPLER0_IO__SHIFT 0x4
+#define MMEA0_LATENCY_SAMPLING__SAMPLER1_IO__SHIFT 0x5
+#define MMEA0_LATENCY_SAMPLING__SAMPLER0_READ__SHIFT 0x6
+#define MMEA0_LATENCY_SAMPLING__SAMPLER1_READ__SHIFT 0x7
+#define MMEA0_LATENCY_SAMPLING__SAMPLER0_WRITE__SHIFT 0x8
+#define MMEA0_LATENCY_SAMPLING__SAMPLER1_WRITE__SHIFT 0x9
+#define MMEA0_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET__SHIFT 0xa
+#define MMEA0_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET__SHIFT 0xb
+#define MMEA0_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET__SHIFT 0xc
+#define MMEA0_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET__SHIFT 0xd
+#define MMEA0_LATENCY_SAMPLING__SAMPLER0_VC__SHIFT 0xe
+#define MMEA0_LATENCY_SAMPLING__SAMPLER1_VC__SHIFT 0x16
+#define MMEA0_LATENCY_SAMPLING__SAMPLER0_DRAM_MASK 0x00000001L
+#define MMEA0_LATENCY_SAMPLING__SAMPLER1_DRAM_MASK 0x00000002L
+#define MMEA0_LATENCY_SAMPLING__SAMPLER0_GMI_MASK 0x00000004L
+#define MMEA0_LATENCY_SAMPLING__SAMPLER1_GMI_MASK 0x00000008L
+#define MMEA0_LATENCY_SAMPLING__SAMPLER0_IO_MASK 0x00000010L
+#define MMEA0_LATENCY_SAMPLING__SAMPLER1_IO_MASK 0x00000020L
+#define MMEA0_LATENCY_SAMPLING__SAMPLER0_READ_MASK 0x00000040L
+#define MMEA0_LATENCY_SAMPLING__SAMPLER1_READ_MASK 0x00000080L
+#define MMEA0_LATENCY_SAMPLING__SAMPLER0_WRITE_MASK 0x00000100L
+#define MMEA0_LATENCY_SAMPLING__SAMPLER1_WRITE_MASK 0x00000200L
+#define MMEA0_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET_MASK 0x00000400L
+#define MMEA0_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET_MASK 0x00000800L
+#define MMEA0_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET_MASK 0x00001000L
+#define MMEA0_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET_MASK 0x00002000L
+#define MMEA0_LATENCY_SAMPLING__SAMPLER0_VC_MASK 0x003FC000L
+#define MMEA0_LATENCY_SAMPLING__SAMPLER1_VC_MASK 0x3FC00000L
+//MMEA0_PERFCOUNTER_LO
+#define MMEA0_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define MMEA0_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//MMEA0_PERFCOUNTER_HI
+#define MMEA0_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define MMEA0_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define MMEA0_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define MMEA0_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+//MMEA0_PERFCOUNTER0_CFG
+#define MMEA0_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define MMEA0_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define MMEA0_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define MMEA0_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define MMEA0_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define MMEA0_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define MMEA0_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MMEA0_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define MMEA0_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define MMEA0_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//MMEA0_PERFCOUNTER1_CFG
+#define MMEA0_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define MMEA0_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define MMEA0_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define MMEA0_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define MMEA0_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define MMEA0_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define MMEA0_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MMEA0_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define MMEA0_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define MMEA0_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//MMEA0_PERFCOUNTER_RSLT_CNTL
+#define MMEA0_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define MMEA0_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define MMEA0_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define MMEA0_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define MMEA0_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define MMEA0_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define MMEA0_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define MMEA0_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define MMEA0_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define MMEA0_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define MMEA0_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define MMEA0_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+//MMEA0_EDC_CNT
+#define MMEA0_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT__SHIFT 0x0
+#define MMEA0_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT__SHIFT 0x2
+#define MMEA0_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT__SHIFT 0x4
+#define MMEA0_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT__SHIFT 0x6
+#define MMEA0_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT__SHIFT 0x8
+#define MMEA0_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT__SHIFT 0xa
+#define MMEA0_EDC_CNT__RRET_TAGMEM_SEC_COUNT__SHIFT 0xc
+#define MMEA0_EDC_CNT__RRET_TAGMEM_DED_COUNT__SHIFT 0xe
+#define MMEA0_EDC_CNT__WRET_TAGMEM_SEC_COUNT__SHIFT 0x10
+#define MMEA0_EDC_CNT__WRET_TAGMEM_DED_COUNT__SHIFT 0x12
+#define MMEA0_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT__SHIFT 0x14
+#define MMEA0_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT__SHIFT 0x16
+#define MMEA0_EDC_CNT__IORD_CMDMEM_SED_COUNT__SHIFT 0x18
+#define MMEA0_EDC_CNT__IOWR_CMDMEM_SED_COUNT__SHIFT 0x1a
+#define MMEA0_EDC_CNT__IOWR_DATAMEM_SED_COUNT__SHIFT 0x1c
+#define MMEA0_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT_MASK 0x00000003L
+#define MMEA0_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT_MASK 0x0000000CL
+#define MMEA0_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT_MASK 0x00000030L
+#define MMEA0_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
+#define MMEA0_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT_MASK 0x00000300L
+#define MMEA0_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT_MASK 0x00000C00L
+#define MMEA0_EDC_CNT__RRET_TAGMEM_SEC_COUNT_MASK 0x00003000L
+#define MMEA0_EDC_CNT__RRET_TAGMEM_DED_COUNT_MASK 0x0000C000L
+#define MMEA0_EDC_CNT__WRET_TAGMEM_SEC_COUNT_MASK 0x00030000L
+#define MMEA0_EDC_CNT__WRET_TAGMEM_DED_COUNT_MASK 0x000C0000L
+#define MMEA0_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT_MASK 0x00300000L
+#define MMEA0_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT_MASK 0x00C00000L
+#define MMEA0_EDC_CNT__IORD_CMDMEM_SED_COUNT_MASK 0x03000000L
+#define MMEA0_EDC_CNT__IOWR_CMDMEM_SED_COUNT_MASK 0x0C000000L
+#define MMEA0_EDC_CNT__IOWR_DATAMEM_SED_COUNT_MASK 0x30000000L
+//MMEA0_EDC_CNT2
+#define MMEA0_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT__SHIFT 0x0
+#define MMEA0_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT__SHIFT 0x2
+#define MMEA0_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT__SHIFT 0x4
+#define MMEA0_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT__SHIFT 0x6
+#define MMEA0_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT__SHIFT 0x8
+#define MMEA0_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa
+#define MMEA0_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc
+#define MMEA0_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe
+#define MMEA0_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10
+#define MMEA0_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12
+#define MMEA0_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14
+#define MMEA0_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16
+#define MMEA0_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT 0x18
+#define MMEA0_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT 0x1a
+#define MMEA0_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT 0x1c
+#define MMEA0_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT 0x1e
+#define MMEA0_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L
+#define MMEA0_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL
+#define MMEA0_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L
+#define MMEA0_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
+#define MMEA0_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT_MASK 0x00000300L
+#define MMEA0_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L
+#define MMEA0_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L
+#define MMEA0_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L
+#define MMEA0_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L
+#define MMEA0_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L
+#define MMEA0_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L
+#define MMEA0_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L
+#define MMEA0_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK 0x03000000L
+#define MMEA0_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK 0x0C000000L
+#define MMEA0_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK 0x30000000L
+#define MMEA0_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK 0xC0000000L
+//MMEA0_DSM_CNTL
+#define MMEA0_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define MMEA0_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define MMEA0_DSM_CNTL__DRAMWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x3
+#define MMEA0_DSM_CNTL__DRAMWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define MMEA0_DSM_CNTL__DRAMWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0x6
+#define MMEA0_DSM_CNTL__DRAMWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define MMEA0_DSM_CNTL__RRET_TAGMEM_DSM_IRRITATOR_DATA__SHIFT 0x9
+#define MMEA0_DSM_CNTL__RRET_TAGMEM_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define MMEA0_DSM_CNTL__WRET_TAGMEM_DSM_IRRITATOR_DATA__SHIFT 0xc
+#define MMEA0_DSM_CNTL__WRET_TAGMEM_ENABLE_SINGLE_WRITE__SHIFT 0xe
+#define MMEA0_DSM_CNTL__GMIRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0xf
+#define MMEA0_DSM_CNTL__GMIRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x11
+#define MMEA0_DSM_CNTL__GMIWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x12
+#define MMEA0_DSM_CNTL__GMIWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x14
+#define MMEA0_DSM_CNTL__GMIWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0x15
+#define MMEA0_DSM_CNTL__GMIWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0x17
+#define MMEA0_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define MMEA0_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define MMEA0_DSM_CNTL__DRAMWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000018L
+#define MMEA0_DSM_CNTL__DRAMWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define MMEA0_DSM_CNTL__DRAMWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define MMEA0_DSM_CNTL__DRAMWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define MMEA0_DSM_CNTL__RRET_TAGMEM_DSM_IRRITATOR_DATA_MASK 0x00000600L
+#define MMEA0_DSM_CNTL__RRET_TAGMEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+#define MMEA0_DSM_CNTL__WRET_TAGMEM_DSM_IRRITATOR_DATA_MASK 0x00003000L
+#define MMEA0_DSM_CNTL__WRET_TAGMEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
+#define MMEA0_DSM_CNTL__GMIRD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00018000L
+#define MMEA0_DSM_CNTL__GMIRD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L
+#define MMEA0_DSM_CNTL__GMIWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L
+#define MMEA0_DSM_CNTL__GMIWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L
+#define MMEA0_DSM_CNTL__GMIWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x00600000L
+#define MMEA0_DSM_CNTL__GMIWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00800000L
+//MMEA0_DSM_CNTLA
+#define MMEA0_DSM_CNTLA__DRAMRD_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define MMEA0_DSM_CNTLA__DRAMRD_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define MMEA0_DSM_CNTLA__DRAMWR_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x3
+#define MMEA0_DSM_CNTLA__DRAMWR_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define MMEA0_DSM_CNTLA__IORD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x6
+#define MMEA0_DSM_CNTLA__IORD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define MMEA0_DSM_CNTLA__IOWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x9
+#define MMEA0_DSM_CNTLA__IOWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define MMEA0_DSM_CNTLA__IOWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0xc
+#define MMEA0_DSM_CNTLA__IOWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0xe
+#define MMEA0_DSM_CNTLA__GMIRD_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0xf
+#define MMEA0_DSM_CNTLA__GMIRD_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x11
+#define MMEA0_DSM_CNTLA__GMIWR_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x12
+#define MMEA0_DSM_CNTLA__GMIWR_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x14
+#define MMEA0_DSM_CNTLA__DRAMRD_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define MMEA0_DSM_CNTLA__DRAMRD_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define MMEA0_DSM_CNTLA__DRAMWR_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00000018L
+#define MMEA0_DSM_CNTLA__DRAMWR_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define MMEA0_DSM_CNTLA__IORD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define MMEA0_DSM_CNTLA__IORD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define MMEA0_DSM_CNTLA__IOWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000600L
+#define MMEA0_DSM_CNTLA__IOWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+#define MMEA0_DSM_CNTLA__IOWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x00003000L
+#define MMEA0_DSM_CNTLA__IOWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
+#define MMEA0_DSM_CNTLA__GMIRD_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00018000L
+#define MMEA0_DSM_CNTLA__GMIRD_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L
+#define MMEA0_DSM_CNTLA__GMIWR_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L
+#define MMEA0_DSM_CNTLA__GMIWR_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L
+//MMEA0_DSM_CNTL2
+#define MMEA0_DSM_CNTL2__DRAMRD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define MMEA0_DSM_CNTL2__DRAMRD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define MMEA0_DSM_CNTL2__DRAMWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define MMEA0_DSM_CNTL2__DRAMWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x5
+#define MMEA0_DSM_CNTL2__DRAMWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define MMEA0_DSM_CNTL2__DRAMWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0x8
+#define MMEA0_DSM_CNTL2__RRET_TAGMEM_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define MMEA0_DSM_CNTL2__RRET_TAGMEM_SELECT_INJECT_DELAY__SHIFT 0xb
+#define MMEA0_DSM_CNTL2__WRET_TAGMEM_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define MMEA0_DSM_CNTL2__WRET_TAGMEM_SELECT_INJECT_DELAY__SHIFT 0xe
+#define MMEA0_DSM_CNTL2__GMIRD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0xf
+#define MMEA0_DSM_CNTL2__GMIRD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x11
+#define MMEA0_DSM_CNTL2__GMIWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x12
+#define MMEA0_DSM_CNTL2__GMIWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x14
+#define MMEA0_DSM_CNTL2__GMIWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0x15
+#define MMEA0_DSM_CNTL2__GMIWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0x17
+#define MMEA0_DSM_CNTL2__INJECT_DELAY__SHIFT 0x1a
+#define MMEA0_DSM_CNTL2__DRAMRD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define MMEA0_DSM_CNTL2__DRAMRD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define MMEA0_DSM_CNTL2__DRAMWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define MMEA0_DSM_CNTL2__DRAMWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define MMEA0_DSM_CNTL2__DRAMWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define MMEA0_DSM_CNTL2__DRAMWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define MMEA0_DSM_CNTL2__RRET_TAGMEM_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define MMEA0_DSM_CNTL2__RRET_TAGMEM_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define MMEA0_DSM_CNTL2__WRET_TAGMEM_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define MMEA0_DSM_CNTL2__WRET_TAGMEM_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define MMEA0_DSM_CNTL2__GMIRD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00018000L
+#define MMEA0_DSM_CNTL2__GMIRD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00020000L
+#define MMEA0_DSM_CNTL2__GMIWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L
+#define MMEA0_DSM_CNTL2__GMIWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00100000L
+#define MMEA0_DSM_CNTL2__GMIWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x00600000L
+#define MMEA0_DSM_CNTL2__GMIWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00800000L
+#define MMEA0_DSM_CNTL2__INJECT_DELAY_MASK 0xFC000000L
+//MMEA0_DSM_CNTL2A
+#define MMEA0_DSM_CNTL2A__DRAMRD_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define MMEA0_DSM_CNTL2A__DRAMRD_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define MMEA0_DSM_CNTL2A__DRAMWR_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define MMEA0_DSM_CNTL2A__DRAMWR_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x5
+#define MMEA0_DSM_CNTL2A__IORD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define MMEA0_DSM_CNTL2A__IORD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x8
+#define MMEA0_DSM_CNTL2A__IOWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define MMEA0_DSM_CNTL2A__IOWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0xb
+#define MMEA0_DSM_CNTL2A__IOWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define MMEA0_DSM_CNTL2A__IOWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0xe
+#define MMEA0_DSM_CNTL2A__GMIRD_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0xf
+#define MMEA0_DSM_CNTL2A__GMIRD_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x11
+#define MMEA0_DSM_CNTL2A__GMIWR_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x12
+#define MMEA0_DSM_CNTL2A__GMIWR_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x14
+#define MMEA0_DSM_CNTL2A__DRAMRD_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define MMEA0_DSM_CNTL2A__DRAMRD_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define MMEA0_DSM_CNTL2A__DRAMWR_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define MMEA0_DSM_CNTL2A__DRAMWR_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define MMEA0_DSM_CNTL2A__IORD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define MMEA0_DSM_CNTL2A__IORD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define MMEA0_DSM_CNTL2A__IOWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define MMEA0_DSM_CNTL2A__IOWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define MMEA0_DSM_CNTL2A__IOWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define MMEA0_DSM_CNTL2A__IOWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define MMEA0_DSM_CNTL2A__GMIRD_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00018000L
+#define MMEA0_DSM_CNTL2A__GMIRD_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00020000L
+#define MMEA0_DSM_CNTL2A__GMIWR_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L
+#define MMEA0_DSM_CNTL2A__GMIWR_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00100000L
+//MMEA0_CGTT_CLK_CTRL
+#define MMEA0_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define MMEA0_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define MMEA0_CGTT_CLK_CTRL__SPARE0__SHIFT 0xc
+#define MMEA0_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_WRITE__SHIFT 0x14
+#define MMEA0_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_READ__SHIFT 0x15
+#define MMEA0_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_RETURN__SHIFT 0x16
+#define MMEA0_CGTT_CLK_CTRL__SPARE1__SHIFT 0x17
+#define MMEA0_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define MMEA0_CGTT_CLK_CTRL__SOFT_OVERRIDE_WRITE__SHIFT 0x1c
+#define MMEA0_CGTT_CLK_CTRL__SOFT_OVERRIDE_READ__SHIFT 0x1d
+#define MMEA0_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN__SHIFT 0x1e
+#define MMEA0_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER__SHIFT 0x1f
+#define MMEA0_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define MMEA0_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define MMEA0_CGTT_CLK_CTRL__SPARE0_MASK 0x000FF000L
+#define MMEA0_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_WRITE_MASK 0x00100000L
+#define MMEA0_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_READ_MASK 0x00200000L
+#define MMEA0_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_RETURN_MASK 0x00400000L
+#define MMEA0_CGTT_CLK_CTRL__SPARE1_MASK 0x07800000L
+#define MMEA0_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define MMEA0_CGTT_CLK_CTRL__SOFT_OVERRIDE_WRITE_MASK 0x10000000L
+#define MMEA0_CGTT_CLK_CTRL__SOFT_OVERRIDE_READ_MASK 0x20000000L
+#define MMEA0_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN_MASK 0x40000000L
+#define MMEA0_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER_MASK 0x80000000L
+//MMEA0_EDC_MODE
+#define MMEA0_EDC_MODE__COUNT_FED_OUT__SHIFT 0x10
+#define MMEA0_EDC_MODE__GATE_FUE__SHIFT 0x11
+#define MMEA0_EDC_MODE__DED_MODE__SHIFT 0x14
+#define MMEA0_EDC_MODE__PROP_FED__SHIFT 0x1d
+#define MMEA0_EDC_MODE__BYPASS__SHIFT 0x1f
+#define MMEA0_EDC_MODE__COUNT_FED_OUT_MASK 0x00010000L
+#define MMEA0_EDC_MODE__GATE_FUE_MASK 0x00020000L
+#define MMEA0_EDC_MODE__DED_MODE_MASK 0x00300000L
+#define MMEA0_EDC_MODE__PROP_FED_MASK 0x20000000L
+#define MMEA0_EDC_MODE__BYPASS_MASK 0x80000000L
+//MMEA0_ERR_STATUS
+#define MMEA0_ERR_STATUS__SDP_RDRSP_STATUS__SHIFT 0x0
+#define MMEA0_ERR_STATUS__SDP_WRRSP_STATUS__SHIFT 0x4
+#define MMEA0_ERR_STATUS__SDP_RDRSP_DATASTATUS__SHIFT 0x8
+#define MMEA0_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR__SHIFT 0xa
+#define MMEA0_ERR_STATUS__CLEAR_ERROR_STATUS__SHIFT 0xb
+#define MMEA0_ERR_STATUS__BUSY_ON_ERROR__SHIFT 0xc
+#define MMEA0_ERR_STATUS__FUE_FLAG__SHIFT 0xd
+#define MMEA0_ERR_STATUS__SDP_RDRSP_STATUS_MASK 0x0000000FL
+#define MMEA0_ERR_STATUS__SDP_WRRSP_STATUS_MASK 0x000000F0L
+#define MMEA0_ERR_STATUS__SDP_RDRSP_DATASTATUS_MASK 0x00000300L
+#define MMEA0_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR_MASK 0x00000400L
+#define MMEA0_ERR_STATUS__CLEAR_ERROR_STATUS_MASK 0x00000800L
+#define MMEA0_ERR_STATUS__BUSY_ON_ERROR_MASK 0x00001000L
+#define MMEA0_ERR_STATUS__FUE_FLAG_MASK 0x00002000L
+//MMEA0_MISC2
+#define MMEA0_MISC2__CSGROUP_SWAP_IN_DRAM_ARB__SHIFT 0x0
+#define MMEA0_MISC2__CSGROUP_SWAP_IN_GMI_ARB__SHIFT 0x1
+#define MMEA0_MISC2__CSGRP_BURST_LIMIT_DATA_DRAM__SHIFT 0x2
+#define MMEA0_MISC2__CSGRP_BURST_LIMIT_DATA_GMI__SHIFT 0x7
+#define MMEA0_MISC2__IO_RDWR_PRIORITY_ENABLE__SHIFT 0xc
+#define MMEA0_MISC2__RRET_SWAP_MODE__SHIFT 0xd
+#define MMEA0_MISC2__CSGROUP_SWAP_IN_DRAM_ARB_MASK 0x00000001L
+#define MMEA0_MISC2__CSGROUP_SWAP_IN_GMI_ARB_MASK 0x00000002L
+#define MMEA0_MISC2__CSGRP_BURST_LIMIT_DATA_DRAM_MASK 0x0000007CL
+#define MMEA0_MISC2__CSGRP_BURST_LIMIT_DATA_GMI_MASK 0x00000F80L
+#define MMEA0_MISC2__IO_RDWR_PRIORITY_ENABLE_MASK 0x00001000L
+#define MMEA0_MISC2__RRET_SWAP_MODE_MASK 0x00002000L
+//MMEA0_ADDRDEC_SELECT
+#define MMEA0_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_START__SHIFT 0x0
+#define MMEA0_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_END__SHIFT 0x5
+#define MMEA0_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_START__SHIFT 0xa
+#define MMEA0_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_END__SHIFT 0xf
+#define MMEA0_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_START_MASK 0x0000001FL
+#define MMEA0_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_END_MASK 0x000003E0L
+#define MMEA0_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_START_MASK 0x00007C00L
+#define MMEA0_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_END_MASK 0x000F8000L
+//MMEA0_EDC_CNT3
+#define MMEA0_EDC_CNT3__DRAMRD_PAGEMEM_DED_COUNT__SHIFT 0x0
+#define MMEA0_EDC_CNT3__DRAMWR_PAGEMEM_DED_COUNT__SHIFT 0x2
+#define MMEA0_EDC_CNT3__IORD_CMDMEM_DED_COUNT__SHIFT 0x4
+#define MMEA0_EDC_CNT3__IOWR_CMDMEM_DED_COUNT__SHIFT 0x6
+#define MMEA0_EDC_CNT3__IOWR_DATAMEM_DED_COUNT__SHIFT 0x8
+#define MMEA0_EDC_CNT3__GMIRD_PAGEMEM_DED_COUNT__SHIFT 0xa
+#define MMEA0_EDC_CNT3__GMIWR_PAGEMEM_DED_COUNT__SHIFT 0xc
+#define MMEA0_EDC_CNT3__DRAMRD_PAGEMEM_DED_COUNT_MASK 0x00000003L
+#define MMEA0_EDC_CNT3__DRAMWR_PAGEMEM_DED_COUNT_MASK 0x0000000CL
+#define MMEA0_EDC_CNT3__IORD_CMDMEM_DED_COUNT_MASK 0x00000030L
+#define MMEA0_EDC_CNT3__IOWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
+#define MMEA0_EDC_CNT3__IOWR_DATAMEM_DED_COUNT_MASK 0x00000300L
+#define MMEA0_EDC_CNT3__GMIRD_PAGEMEM_DED_COUNT_MASK 0x00000C00L
+#define MMEA0_EDC_CNT3__GMIWR_PAGEMEM_DED_COUNT_MASK 0x00003000L
+
+
+// addressBlock: mmhub_ea_mmeadec1
+//MMEA1_DRAM_RD_CLI2GRP_MAP0
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA1_DRAM_RD_CLI2GRP_MAP1
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA1_DRAM_WR_CLI2GRP_MAP0
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA1_DRAM_WR_CLI2GRP_MAP1
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA1_DRAM_RD_GRP2VC_MAP
+#define MMEA1_DRAM_RD_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define MMEA1_DRAM_RD_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define MMEA1_DRAM_RD_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define MMEA1_DRAM_RD_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define MMEA1_DRAM_RD_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define MMEA1_DRAM_RD_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define MMEA1_DRAM_RD_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define MMEA1_DRAM_RD_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//MMEA1_DRAM_WR_GRP2VC_MAP
+#define MMEA1_DRAM_WR_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define MMEA1_DRAM_WR_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define MMEA1_DRAM_WR_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define MMEA1_DRAM_WR_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define MMEA1_DRAM_WR_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define MMEA1_DRAM_WR_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define MMEA1_DRAM_WR_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define MMEA1_DRAM_WR_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//MMEA1_DRAM_RD_LAZY
+#define MMEA1_DRAM_RD_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define MMEA1_DRAM_RD_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define MMEA1_DRAM_RD_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define MMEA1_DRAM_RD_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define MMEA1_DRAM_RD_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define MMEA1_DRAM_RD_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define MMEA1_DRAM_RD_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b
+#define MMEA1_DRAM_RD_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define MMEA1_DRAM_RD_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define MMEA1_DRAM_RD_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define MMEA1_DRAM_RD_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define MMEA1_DRAM_RD_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define MMEA1_DRAM_RD_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+#define MMEA1_DRAM_RD_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L
+//MMEA1_DRAM_WR_LAZY
+#define MMEA1_DRAM_WR_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define MMEA1_DRAM_WR_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define MMEA1_DRAM_WR_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define MMEA1_DRAM_WR_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define MMEA1_DRAM_WR_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define MMEA1_DRAM_WR_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define MMEA1_DRAM_WR_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b
+#define MMEA1_DRAM_WR_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define MMEA1_DRAM_WR_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define MMEA1_DRAM_WR_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define MMEA1_DRAM_WR_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define MMEA1_DRAM_WR_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define MMEA1_DRAM_WR_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+#define MMEA1_DRAM_WR_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L
+//MMEA1_DRAM_RD_CAM_CNTL
+#define MMEA1_DRAM_RD_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define MMEA1_DRAM_RD_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define MMEA1_DRAM_RD_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define MMEA1_DRAM_RD_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define MMEA1_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define MMEA1_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define MMEA1_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define MMEA1_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define MMEA1_DRAM_RD_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c
+#define MMEA1_DRAM_RD_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define MMEA1_DRAM_RD_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define MMEA1_DRAM_RD_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define MMEA1_DRAM_RD_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define MMEA1_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define MMEA1_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define MMEA1_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define MMEA1_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+#define MMEA1_DRAM_RD_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L
+//MMEA1_DRAM_WR_CAM_CNTL
+#define MMEA1_DRAM_WR_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define MMEA1_DRAM_WR_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define MMEA1_DRAM_WR_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define MMEA1_DRAM_WR_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define MMEA1_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define MMEA1_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define MMEA1_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define MMEA1_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define MMEA1_DRAM_WR_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c
+#define MMEA1_DRAM_WR_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define MMEA1_DRAM_WR_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define MMEA1_DRAM_WR_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define MMEA1_DRAM_WR_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define MMEA1_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define MMEA1_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define MMEA1_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define MMEA1_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+#define MMEA1_DRAM_WR_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L
+//MMEA1_DRAM_PAGE_BURST
+#define MMEA1_DRAM_PAGE_BURST__RD_LIMIT_LO__SHIFT 0x0
+#define MMEA1_DRAM_PAGE_BURST__RD_LIMIT_HI__SHIFT 0x8
+#define MMEA1_DRAM_PAGE_BURST__WR_LIMIT_LO__SHIFT 0x10
+#define MMEA1_DRAM_PAGE_BURST__WR_LIMIT_HI__SHIFT 0x18
+#define MMEA1_DRAM_PAGE_BURST__RD_LIMIT_LO_MASK 0x000000FFL
+#define MMEA1_DRAM_PAGE_BURST__RD_LIMIT_HI_MASK 0x0000FF00L
+#define MMEA1_DRAM_PAGE_BURST__WR_LIMIT_LO_MASK 0x00FF0000L
+#define MMEA1_DRAM_PAGE_BURST__WR_LIMIT_HI_MASK 0xFF000000L
+//MMEA1_DRAM_RD_PRI_AGE
+#define MMEA1_DRAM_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA1_DRAM_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA1_DRAM_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA1_DRAM_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA1_DRAM_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA1_DRAM_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA1_DRAM_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA1_DRAM_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA1_DRAM_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA1_DRAM_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA1_DRAM_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA1_DRAM_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA1_DRAM_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA1_DRAM_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA1_DRAM_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA1_DRAM_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA1_DRAM_WR_PRI_AGE
+#define MMEA1_DRAM_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA1_DRAM_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA1_DRAM_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA1_DRAM_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA1_DRAM_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA1_DRAM_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA1_DRAM_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA1_DRAM_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA1_DRAM_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA1_DRAM_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA1_DRAM_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA1_DRAM_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA1_DRAM_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA1_DRAM_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA1_DRAM_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA1_DRAM_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA1_DRAM_RD_PRI_QUEUING
+#define MMEA1_DRAM_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA1_DRAM_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA1_DRAM_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA1_DRAM_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA1_DRAM_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA1_DRAM_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA1_DRAM_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA1_DRAM_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA1_DRAM_WR_PRI_QUEUING
+#define MMEA1_DRAM_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA1_DRAM_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA1_DRAM_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA1_DRAM_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA1_DRAM_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA1_DRAM_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA1_DRAM_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA1_DRAM_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA1_DRAM_RD_PRI_FIXED
+#define MMEA1_DRAM_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA1_DRAM_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA1_DRAM_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA1_DRAM_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA1_DRAM_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA1_DRAM_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA1_DRAM_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA1_DRAM_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA1_DRAM_WR_PRI_FIXED
+#define MMEA1_DRAM_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA1_DRAM_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA1_DRAM_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA1_DRAM_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA1_DRAM_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA1_DRAM_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA1_DRAM_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA1_DRAM_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA1_DRAM_RD_PRI_URGENCY
+#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA1_DRAM_WR_PRI_URGENCY
+#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA1_DRAM_RD_PRI_QUANT_PRI1
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA1_DRAM_RD_PRI_QUANT_PRI2
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA1_DRAM_RD_PRI_QUANT_PRI3
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA1_DRAM_WR_PRI_QUANT_PRI1
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA1_DRAM_WR_PRI_QUANT_PRI2
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA1_DRAM_WR_PRI_QUANT_PRI3
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA1_GMI_RD_CLI2GRP_MAP0
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA1_GMI_RD_CLI2GRP_MAP1
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA1_GMI_WR_CLI2GRP_MAP0
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA1_GMI_WR_CLI2GRP_MAP1
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA1_GMI_RD_GRP2VC_MAP
+#define MMEA1_GMI_RD_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define MMEA1_GMI_RD_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define MMEA1_GMI_RD_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define MMEA1_GMI_RD_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define MMEA1_GMI_RD_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define MMEA1_GMI_RD_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define MMEA1_GMI_RD_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define MMEA1_GMI_RD_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//MMEA1_GMI_WR_GRP2VC_MAP
+#define MMEA1_GMI_WR_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define MMEA1_GMI_WR_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define MMEA1_GMI_WR_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define MMEA1_GMI_WR_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define MMEA1_GMI_WR_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define MMEA1_GMI_WR_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define MMEA1_GMI_WR_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define MMEA1_GMI_WR_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//MMEA1_GMI_RD_LAZY
+#define MMEA1_GMI_RD_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define MMEA1_GMI_RD_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define MMEA1_GMI_RD_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define MMEA1_GMI_RD_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define MMEA1_GMI_RD_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define MMEA1_GMI_RD_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define MMEA1_GMI_RD_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b
+#define MMEA1_GMI_RD_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define MMEA1_GMI_RD_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define MMEA1_GMI_RD_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define MMEA1_GMI_RD_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define MMEA1_GMI_RD_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define MMEA1_GMI_RD_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+#define MMEA1_GMI_RD_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L
+//MMEA1_GMI_WR_LAZY
+#define MMEA1_GMI_WR_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define MMEA1_GMI_WR_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define MMEA1_GMI_WR_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define MMEA1_GMI_WR_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define MMEA1_GMI_WR_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define MMEA1_GMI_WR_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define MMEA1_GMI_WR_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b
+#define MMEA1_GMI_WR_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define MMEA1_GMI_WR_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define MMEA1_GMI_WR_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define MMEA1_GMI_WR_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define MMEA1_GMI_WR_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define MMEA1_GMI_WR_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+#define MMEA1_GMI_WR_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L
+//MMEA1_GMI_RD_CAM_CNTL
+#define MMEA1_GMI_RD_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define MMEA1_GMI_RD_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define MMEA1_GMI_RD_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define MMEA1_GMI_RD_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define MMEA1_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define MMEA1_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define MMEA1_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define MMEA1_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define MMEA1_GMI_RD_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c
+#define MMEA1_GMI_RD_CAM_CNTL__PAGEBASED_CHAINING__SHIFT 0x1d
+#define MMEA1_GMI_RD_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define MMEA1_GMI_RD_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define MMEA1_GMI_RD_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define MMEA1_GMI_RD_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define MMEA1_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define MMEA1_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define MMEA1_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define MMEA1_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+#define MMEA1_GMI_RD_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L
+#define MMEA1_GMI_RD_CAM_CNTL__PAGEBASED_CHAINING_MASK 0x20000000L
+//MMEA1_GMI_WR_CAM_CNTL
+#define MMEA1_GMI_WR_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define MMEA1_GMI_WR_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define MMEA1_GMI_WR_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define MMEA1_GMI_WR_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define MMEA1_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define MMEA1_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define MMEA1_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define MMEA1_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define MMEA1_GMI_WR_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c
+#define MMEA1_GMI_WR_CAM_CNTL__PAGEBASED_CHAINING__SHIFT 0x1d
+#define MMEA1_GMI_WR_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define MMEA1_GMI_WR_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define MMEA1_GMI_WR_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define MMEA1_GMI_WR_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define MMEA1_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define MMEA1_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define MMEA1_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define MMEA1_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+#define MMEA1_GMI_WR_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L
+#define MMEA1_GMI_WR_CAM_CNTL__PAGEBASED_CHAINING_MASK 0x20000000L
+//MMEA1_GMI_PAGE_BURST
+#define MMEA1_GMI_PAGE_BURST__RD_LIMIT_LO__SHIFT 0x0
+#define MMEA1_GMI_PAGE_BURST__RD_LIMIT_HI__SHIFT 0x8
+#define MMEA1_GMI_PAGE_BURST__WR_LIMIT_LO__SHIFT 0x10
+#define MMEA1_GMI_PAGE_BURST__WR_LIMIT_HI__SHIFT 0x18
+#define MMEA1_GMI_PAGE_BURST__RD_LIMIT_LO_MASK 0x000000FFL
+#define MMEA1_GMI_PAGE_BURST__RD_LIMIT_HI_MASK 0x0000FF00L
+#define MMEA1_GMI_PAGE_BURST__WR_LIMIT_LO_MASK 0x00FF0000L
+#define MMEA1_GMI_PAGE_BURST__WR_LIMIT_HI_MASK 0xFF000000L
+//MMEA1_GMI_RD_PRI_AGE
+#define MMEA1_GMI_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA1_GMI_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA1_GMI_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA1_GMI_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA1_GMI_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA1_GMI_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA1_GMI_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA1_GMI_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA1_GMI_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA1_GMI_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA1_GMI_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA1_GMI_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA1_GMI_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA1_GMI_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA1_GMI_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA1_GMI_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA1_GMI_WR_PRI_AGE
+#define MMEA1_GMI_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA1_GMI_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA1_GMI_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA1_GMI_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA1_GMI_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA1_GMI_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA1_GMI_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA1_GMI_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA1_GMI_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA1_GMI_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA1_GMI_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA1_GMI_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA1_GMI_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA1_GMI_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA1_GMI_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA1_GMI_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA1_GMI_RD_PRI_QUEUING
+#define MMEA1_GMI_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA1_GMI_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA1_GMI_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA1_GMI_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA1_GMI_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA1_GMI_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA1_GMI_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA1_GMI_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA1_GMI_WR_PRI_QUEUING
+#define MMEA1_GMI_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA1_GMI_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA1_GMI_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA1_GMI_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA1_GMI_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA1_GMI_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA1_GMI_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA1_GMI_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA1_GMI_RD_PRI_FIXED
+#define MMEA1_GMI_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA1_GMI_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA1_GMI_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA1_GMI_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA1_GMI_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA1_GMI_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA1_GMI_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA1_GMI_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA1_GMI_WR_PRI_FIXED
+#define MMEA1_GMI_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA1_GMI_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA1_GMI_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA1_GMI_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA1_GMI_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA1_GMI_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA1_GMI_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA1_GMI_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA1_GMI_RD_PRI_URGENCY
+#define MMEA1_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA1_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA1_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA1_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA1_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA1_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA1_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA1_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA1_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA1_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA1_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA1_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA1_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA1_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA1_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA1_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA1_GMI_WR_PRI_URGENCY
+#define MMEA1_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA1_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA1_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA1_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA1_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA1_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA1_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA1_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA1_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA1_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA1_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA1_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA1_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA1_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA1_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA1_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA1_GMI_RD_PRI_URGENCY_MASKING
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L
+#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L
+//MMEA1_GMI_WR_PRI_URGENCY_MASKING
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L
+#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L
+//MMEA1_GMI_RD_PRI_QUANT_PRI1
+#define MMEA1_GMI_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA1_GMI_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA1_GMI_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA1_GMI_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA1_GMI_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA1_GMI_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA1_GMI_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA1_GMI_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA1_GMI_RD_PRI_QUANT_PRI2
+#define MMEA1_GMI_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA1_GMI_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA1_GMI_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA1_GMI_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA1_GMI_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA1_GMI_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA1_GMI_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA1_GMI_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA1_GMI_RD_PRI_QUANT_PRI3
+#define MMEA1_GMI_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA1_GMI_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA1_GMI_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA1_GMI_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA1_GMI_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA1_GMI_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA1_GMI_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA1_GMI_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA1_GMI_WR_PRI_QUANT_PRI1
+#define MMEA1_GMI_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA1_GMI_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA1_GMI_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA1_GMI_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA1_GMI_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA1_GMI_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA1_GMI_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA1_GMI_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA1_GMI_WR_PRI_QUANT_PRI2
+#define MMEA1_GMI_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA1_GMI_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA1_GMI_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA1_GMI_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA1_GMI_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA1_GMI_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA1_GMI_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA1_GMI_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA1_GMI_WR_PRI_QUANT_PRI3
+#define MMEA1_GMI_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA1_GMI_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA1_GMI_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA1_GMI_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA1_GMI_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA1_GMI_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA1_GMI_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA1_GMI_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA1_ADDRNORM_BASE_ADDR0
+#define MMEA1_ADDRNORM_BASE_ADDR0__ADDR_RNG_VAL__SHIFT 0x0
+#define MMEA1_ADDRNORM_BASE_ADDR0__LGCY_MMIO_HOLE_EN__SHIFT 0x1
+#define MMEA1_ADDRNORM_BASE_ADDR0__INTLV_NUM_CHAN__SHIFT 0x2
+#define MMEA1_ADDRNORM_BASE_ADDR0__INTLV_NUM_DIES__SHIFT 0x6
+#define MMEA1_ADDRNORM_BASE_ADDR0__INTLV_NUM_SOCKETS__SHIFT 0x8
+#define MMEA1_ADDRNORM_BASE_ADDR0__INTLV_ADDR_SEL__SHIFT 0x9
+#define MMEA1_ADDRNORM_BASE_ADDR0__BASE_ADDR__SHIFT 0xc
+#define MMEA1_ADDRNORM_BASE_ADDR0__ADDR_RNG_VAL_MASK 0x00000001L
+#define MMEA1_ADDRNORM_BASE_ADDR0__LGCY_MMIO_HOLE_EN_MASK 0x00000002L
+#define MMEA1_ADDRNORM_BASE_ADDR0__INTLV_NUM_CHAN_MASK 0x0000003CL
+#define MMEA1_ADDRNORM_BASE_ADDR0__INTLV_NUM_DIES_MASK 0x000000C0L
+#define MMEA1_ADDRNORM_BASE_ADDR0__INTLV_NUM_SOCKETS_MASK 0x00000100L
+#define MMEA1_ADDRNORM_BASE_ADDR0__INTLV_ADDR_SEL_MASK 0x00000E00L
+#define MMEA1_ADDRNORM_BASE_ADDR0__BASE_ADDR_MASK 0xFFFFF000L
+//MMEA1_ADDRNORM_LIMIT_ADDR0
+#define MMEA1_ADDRNORM_LIMIT_ADDR0__DST_FABRIC_ID__SHIFT 0x0
+#define MMEA1_ADDRNORM_LIMIT_ADDR0__LIMIT_ADDR__SHIFT 0xc
+#define MMEA1_ADDRNORM_LIMIT_ADDR0__DST_FABRIC_ID_MASK 0x0000001FL
+#define MMEA1_ADDRNORM_LIMIT_ADDR0__LIMIT_ADDR_MASK 0xFFFFF000L
+//MMEA1_ADDRNORM_BASE_ADDR1
+#define MMEA1_ADDRNORM_BASE_ADDR1__ADDR_RNG_VAL__SHIFT 0x0
+#define MMEA1_ADDRNORM_BASE_ADDR1__LGCY_MMIO_HOLE_EN__SHIFT 0x1
+#define MMEA1_ADDRNORM_BASE_ADDR1__INTLV_NUM_CHAN__SHIFT 0x2
+#define MMEA1_ADDRNORM_BASE_ADDR1__INTLV_NUM_DIES__SHIFT 0x6
+#define MMEA1_ADDRNORM_BASE_ADDR1__INTLV_NUM_SOCKETS__SHIFT 0x8
+#define MMEA1_ADDRNORM_BASE_ADDR1__INTLV_ADDR_SEL__SHIFT 0x9
+#define MMEA1_ADDRNORM_BASE_ADDR1__BASE_ADDR__SHIFT 0xc
+#define MMEA1_ADDRNORM_BASE_ADDR1__ADDR_RNG_VAL_MASK 0x00000001L
+#define MMEA1_ADDRNORM_BASE_ADDR1__LGCY_MMIO_HOLE_EN_MASK 0x00000002L
+#define MMEA1_ADDRNORM_BASE_ADDR1__INTLV_NUM_CHAN_MASK 0x0000003CL
+#define MMEA1_ADDRNORM_BASE_ADDR1__INTLV_NUM_DIES_MASK 0x000000C0L
+#define MMEA1_ADDRNORM_BASE_ADDR1__INTLV_NUM_SOCKETS_MASK 0x00000100L
+#define MMEA1_ADDRNORM_BASE_ADDR1__INTLV_ADDR_SEL_MASK 0x00000E00L
+#define MMEA1_ADDRNORM_BASE_ADDR1__BASE_ADDR_MASK 0xFFFFF000L
+//MMEA1_ADDRNORM_LIMIT_ADDR1
+#define MMEA1_ADDRNORM_LIMIT_ADDR1__DST_FABRIC_ID__SHIFT 0x0
+#define MMEA1_ADDRNORM_LIMIT_ADDR1__LIMIT_ADDR__SHIFT 0xc
+#define MMEA1_ADDRNORM_LIMIT_ADDR1__DST_FABRIC_ID_MASK 0x0000001FL
+#define MMEA1_ADDRNORM_LIMIT_ADDR1__LIMIT_ADDR_MASK 0xFFFFF000L
+//MMEA1_ADDRNORM_OFFSET_ADDR1
+#define MMEA1_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_EN__SHIFT 0x0
+#define MMEA1_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET__SHIFT 0x14
+#define MMEA1_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_EN_MASK 0x00000001L
+#define MMEA1_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_MASK 0xFFF00000L
+//MMEA1_ADDRNORM_BASE_ADDR2
+#define MMEA1_ADDRNORM_BASE_ADDR2__ADDR_RNG_VAL__SHIFT 0x0
+#define MMEA1_ADDRNORM_BASE_ADDR2__LGCY_MMIO_HOLE_EN__SHIFT 0x1
+#define MMEA1_ADDRNORM_BASE_ADDR2__INTLV_NUM_CHAN__SHIFT 0x2
+#define MMEA1_ADDRNORM_BASE_ADDR2__INTLV_NUM_DIES__SHIFT 0x6
+#define MMEA1_ADDRNORM_BASE_ADDR2__INTLV_NUM_SOCKETS__SHIFT 0x8
+#define MMEA1_ADDRNORM_BASE_ADDR2__INTLV_ADDR_SEL__SHIFT 0x9
+#define MMEA1_ADDRNORM_BASE_ADDR2__BASE_ADDR__SHIFT 0xc
+#define MMEA1_ADDRNORM_BASE_ADDR2__ADDR_RNG_VAL_MASK 0x00000001L
+#define MMEA1_ADDRNORM_BASE_ADDR2__LGCY_MMIO_HOLE_EN_MASK 0x00000002L
+#define MMEA1_ADDRNORM_BASE_ADDR2__INTLV_NUM_CHAN_MASK 0x0000003CL
+#define MMEA1_ADDRNORM_BASE_ADDR2__INTLV_NUM_DIES_MASK 0x000000C0L
+#define MMEA1_ADDRNORM_BASE_ADDR2__INTLV_NUM_SOCKETS_MASK 0x00000100L
+#define MMEA1_ADDRNORM_BASE_ADDR2__INTLV_ADDR_SEL_MASK 0x00000E00L
+#define MMEA1_ADDRNORM_BASE_ADDR2__BASE_ADDR_MASK 0xFFFFF000L
+//MMEA1_ADDRNORM_LIMIT_ADDR2
+#define MMEA1_ADDRNORM_LIMIT_ADDR2__DST_FABRIC_ID__SHIFT 0x0
+#define MMEA1_ADDRNORM_LIMIT_ADDR2__LIMIT_ADDR__SHIFT 0xc
+#define MMEA1_ADDRNORM_LIMIT_ADDR2__DST_FABRIC_ID_MASK 0x0000001FL
+#define MMEA1_ADDRNORM_LIMIT_ADDR2__LIMIT_ADDR_MASK 0xFFFFF000L
+//MMEA1_ADDRNORM_BASE_ADDR3
+#define MMEA1_ADDRNORM_BASE_ADDR3__ADDR_RNG_VAL__SHIFT 0x0
+#define MMEA1_ADDRNORM_BASE_ADDR3__LGCY_MMIO_HOLE_EN__SHIFT 0x1
+#define MMEA1_ADDRNORM_BASE_ADDR3__INTLV_NUM_CHAN__SHIFT 0x2
+#define MMEA1_ADDRNORM_BASE_ADDR3__INTLV_NUM_DIES__SHIFT 0x6
+#define MMEA1_ADDRNORM_BASE_ADDR3__INTLV_NUM_SOCKETS__SHIFT 0x8
+#define MMEA1_ADDRNORM_BASE_ADDR3__INTLV_ADDR_SEL__SHIFT 0x9
+#define MMEA1_ADDRNORM_BASE_ADDR3__BASE_ADDR__SHIFT 0xc
+#define MMEA1_ADDRNORM_BASE_ADDR3__ADDR_RNG_VAL_MASK 0x00000001L
+#define MMEA1_ADDRNORM_BASE_ADDR3__LGCY_MMIO_HOLE_EN_MASK 0x00000002L
+#define MMEA1_ADDRNORM_BASE_ADDR3__INTLV_NUM_CHAN_MASK 0x0000003CL
+#define MMEA1_ADDRNORM_BASE_ADDR3__INTLV_NUM_DIES_MASK 0x000000C0L
+#define MMEA1_ADDRNORM_BASE_ADDR3__INTLV_NUM_SOCKETS_MASK 0x00000100L
+#define MMEA1_ADDRNORM_BASE_ADDR3__INTLV_ADDR_SEL_MASK 0x00000E00L
+#define MMEA1_ADDRNORM_BASE_ADDR3__BASE_ADDR_MASK 0xFFFFF000L
+//MMEA1_ADDRNORM_LIMIT_ADDR3
+#define MMEA1_ADDRNORM_LIMIT_ADDR3__DST_FABRIC_ID__SHIFT 0x0
+#define MMEA1_ADDRNORM_LIMIT_ADDR3__LIMIT_ADDR__SHIFT 0xc
+#define MMEA1_ADDRNORM_LIMIT_ADDR3__DST_FABRIC_ID_MASK 0x0000001FL
+#define MMEA1_ADDRNORM_LIMIT_ADDR3__LIMIT_ADDR_MASK 0xFFFFF000L
+//MMEA1_ADDRNORM_OFFSET_ADDR3
+#define MMEA1_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET_EN__SHIFT 0x0
+#define MMEA1_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET__SHIFT 0x14
+#define MMEA1_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET_EN_MASK 0x00000001L
+#define MMEA1_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET_MASK 0xFFF00000L
+//MMEA1_ADDRNORM_BASE_ADDR4
+#define MMEA1_ADDRNORM_BASE_ADDR4__ADDR_RNG_VAL__SHIFT 0x0
+#define MMEA1_ADDRNORM_BASE_ADDR4__LGCY_MMIO_HOLE_EN__SHIFT 0x1
+#define MMEA1_ADDRNORM_BASE_ADDR4__INTLV_NUM_CHAN__SHIFT 0x2
+#define MMEA1_ADDRNORM_BASE_ADDR4__INTLV_NUM_DIES__SHIFT 0x6
+#define MMEA1_ADDRNORM_BASE_ADDR4__INTLV_NUM_SOCKETS__SHIFT 0x8
+#define MMEA1_ADDRNORM_BASE_ADDR4__INTLV_ADDR_SEL__SHIFT 0x9
+#define MMEA1_ADDRNORM_BASE_ADDR4__BASE_ADDR__SHIFT 0xc
+#define MMEA1_ADDRNORM_BASE_ADDR4__ADDR_RNG_VAL_MASK 0x00000001L
+#define MMEA1_ADDRNORM_BASE_ADDR4__LGCY_MMIO_HOLE_EN_MASK 0x00000002L
+#define MMEA1_ADDRNORM_BASE_ADDR4__INTLV_NUM_CHAN_MASK 0x0000003CL
+#define MMEA1_ADDRNORM_BASE_ADDR4__INTLV_NUM_DIES_MASK 0x000000C0L
+#define MMEA1_ADDRNORM_BASE_ADDR4__INTLV_NUM_SOCKETS_MASK 0x00000100L
+#define MMEA1_ADDRNORM_BASE_ADDR4__INTLV_ADDR_SEL_MASK 0x00000E00L
+#define MMEA1_ADDRNORM_BASE_ADDR4__BASE_ADDR_MASK 0xFFFFF000L
+//MMEA1_ADDRNORM_LIMIT_ADDR4
+#define MMEA1_ADDRNORM_LIMIT_ADDR4__DST_FABRIC_ID__SHIFT 0x0
+#define MMEA1_ADDRNORM_LIMIT_ADDR4__LIMIT_ADDR__SHIFT 0xc
+#define MMEA1_ADDRNORM_LIMIT_ADDR4__DST_FABRIC_ID_MASK 0x0000001FL
+#define MMEA1_ADDRNORM_LIMIT_ADDR4__LIMIT_ADDR_MASK 0xFFFFF000L
+//MMEA1_ADDRNORM_BASE_ADDR5
+#define MMEA1_ADDRNORM_BASE_ADDR5__ADDR_RNG_VAL__SHIFT 0x0
+#define MMEA1_ADDRNORM_BASE_ADDR5__LGCY_MMIO_HOLE_EN__SHIFT 0x1
+#define MMEA1_ADDRNORM_BASE_ADDR5__INTLV_NUM_CHAN__SHIFT 0x2
+#define MMEA1_ADDRNORM_BASE_ADDR5__INTLV_NUM_DIES__SHIFT 0x6
+#define MMEA1_ADDRNORM_BASE_ADDR5__INTLV_NUM_SOCKETS__SHIFT 0x8
+#define MMEA1_ADDRNORM_BASE_ADDR5__INTLV_ADDR_SEL__SHIFT 0x9
+#define MMEA1_ADDRNORM_BASE_ADDR5__BASE_ADDR__SHIFT 0xc
+#define MMEA1_ADDRNORM_BASE_ADDR5__ADDR_RNG_VAL_MASK 0x00000001L
+#define MMEA1_ADDRNORM_BASE_ADDR5__LGCY_MMIO_HOLE_EN_MASK 0x00000002L
+#define MMEA1_ADDRNORM_BASE_ADDR5__INTLV_NUM_CHAN_MASK 0x0000003CL
+#define MMEA1_ADDRNORM_BASE_ADDR5__INTLV_NUM_DIES_MASK 0x000000C0L
+#define MMEA1_ADDRNORM_BASE_ADDR5__INTLV_NUM_SOCKETS_MASK 0x00000100L
+#define MMEA1_ADDRNORM_BASE_ADDR5__INTLV_ADDR_SEL_MASK 0x00000E00L
+#define MMEA1_ADDRNORM_BASE_ADDR5__BASE_ADDR_MASK 0xFFFFF000L
+//MMEA1_ADDRNORM_LIMIT_ADDR5
+#define MMEA1_ADDRNORM_LIMIT_ADDR5__DST_FABRIC_ID__SHIFT 0x0
+#define MMEA1_ADDRNORM_LIMIT_ADDR5__LIMIT_ADDR__SHIFT 0xc
+#define MMEA1_ADDRNORM_LIMIT_ADDR5__DST_FABRIC_ID_MASK 0x0000001FL
+#define MMEA1_ADDRNORM_LIMIT_ADDR5__LIMIT_ADDR_MASK 0xFFFFF000L
+//MMEA1_ADDRNORM_OFFSET_ADDR5
+#define MMEA1_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET_EN__SHIFT 0x0
+#define MMEA1_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET__SHIFT 0x14
+#define MMEA1_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET_EN_MASK 0x00000001L
+#define MMEA1_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET_MASK 0xFFF00000L
+//MMEA1_ADDRNORMDRAM_HOLE_CNTL
+#define MMEA1_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_VALID__SHIFT 0x0
+#define MMEA1_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_OFFSET__SHIFT 0x7
+#define MMEA1_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_VALID_MASK 0x00000001L
+#define MMEA1_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_OFFSET_MASK 0x0000FF80L
+//MMEA1_ADDRNORMGMI_HOLE_CNTL
+#define MMEA1_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_VALID__SHIFT 0x0
+#define MMEA1_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_OFFSET__SHIFT 0x7
+#define MMEA1_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_VALID_MASK 0x00000001L
+#define MMEA1_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_OFFSET_MASK 0x0000FF80L
+//MMEA1_ADDRNORMDRAM_NP2_CHANNEL_CFG
+#define MMEA1_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE0__SHIFT 0x0
+#define MMEA1_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE1__SHIFT 0x6
+#define MMEA1_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE0_MASK 0x0000003FL
+#define MMEA1_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE1_MASK 0x00000FC0L
+//MMEA1_ADDRNORMGMI_NP2_CHANNEL_CFG
+#define MMEA1_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE2__SHIFT 0x0
+#define MMEA1_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE3__SHIFT 0x6
+#define MMEA1_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE2_MASK 0x0000003FL
+#define MMEA1_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE3_MASK 0x00000FC0L
+//MMEA1_ADDRDEC_BANK_CFG
+#define MMEA1_ADDRDEC_BANK_CFG__BANK_MASK_DRAM__SHIFT 0x0
+#define MMEA1_ADDRDEC_BANK_CFG__BANK_MASK_GMI__SHIFT 0x6
+#define MMEA1_ADDRDEC_BANK_CFG__BANKGROUP_SEL_DRAM__SHIFT 0xc
+#define MMEA1_ADDRDEC_BANK_CFG__BANKGROUP_SEL_GMI__SHIFT 0xf
+#define MMEA1_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_DRAM__SHIFT 0x12
+#define MMEA1_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_GMI__SHIFT 0x13
+#define MMEA1_ADDRDEC_BANK_CFG__BANK_MASK_DRAM_MASK 0x0000003FL
+#define MMEA1_ADDRDEC_BANK_CFG__BANK_MASK_GMI_MASK 0x00000FC0L
+#define MMEA1_ADDRDEC_BANK_CFG__BANKGROUP_SEL_DRAM_MASK 0x00007000L
+#define MMEA1_ADDRDEC_BANK_CFG__BANKGROUP_SEL_GMI_MASK 0x00038000L
+#define MMEA1_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_DRAM_MASK 0x00040000L
+#define MMEA1_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_GMI_MASK 0x00080000L
+//MMEA1_ADDRDEC_MISC_CFG
+#define MMEA1_ADDRDEC_MISC_CFG__VCM_EN0__SHIFT 0x0
+#define MMEA1_ADDRDEC_MISC_CFG__VCM_EN1__SHIFT 0x1
+#define MMEA1_ADDRDEC_MISC_CFG__VCM_EN2__SHIFT 0x2
+#define MMEA1_ADDRDEC_MISC_CFG__PCH_MASK_DRAM__SHIFT 0x8
+#define MMEA1_ADDRDEC_MISC_CFG__PCH_MASK_GMI__SHIFT 0x9
+#define MMEA1_ADDRDEC_MISC_CFG__CH_MASK_DRAM__SHIFT 0xc
+#define MMEA1_ADDRDEC_MISC_CFG__CH_MASK_GMI__SHIFT 0x11
+#define MMEA1_ADDRDEC_MISC_CFG__CS_MASK_DRAM__SHIFT 0x16
+#define MMEA1_ADDRDEC_MISC_CFG__CS_MASK_GMI__SHIFT 0x18
+#define MMEA1_ADDRDEC_MISC_CFG__RM_MASK_DRAM__SHIFT 0x1a
+#define MMEA1_ADDRDEC_MISC_CFG__RM_MASK_GMI__SHIFT 0x1d
+#define MMEA1_ADDRDEC_MISC_CFG__VCM_EN0_MASK 0x00000001L
+#define MMEA1_ADDRDEC_MISC_CFG__VCM_EN1_MASK 0x00000002L
+#define MMEA1_ADDRDEC_MISC_CFG__VCM_EN2_MASK 0x00000004L
+#define MMEA1_ADDRDEC_MISC_CFG__PCH_MASK_DRAM_MASK 0x00000100L
+#define MMEA1_ADDRDEC_MISC_CFG__PCH_MASK_GMI_MASK 0x00000200L
+#define MMEA1_ADDRDEC_MISC_CFG__CH_MASK_DRAM_MASK 0x0001F000L
+#define MMEA1_ADDRDEC_MISC_CFG__CH_MASK_GMI_MASK 0x003E0000L
+#define MMEA1_ADDRDEC_MISC_CFG__CS_MASK_DRAM_MASK 0x00C00000L
+#define MMEA1_ADDRDEC_MISC_CFG__CS_MASK_GMI_MASK 0x03000000L
+#define MMEA1_ADDRDEC_MISC_CFG__RM_MASK_DRAM_MASK 0x1C000000L
+#define MMEA1_ADDRDEC_MISC_CFG__RM_MASK_GMI_MASK 0xE0000000L
+//MMEA1_ADDRDECDRAM_ADDR_HASH_BANK0
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK0__XOR_ENABLE__SHIFT 0x0
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK0__COL_XOR__SHIFT 0x1
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK0__ROW_XOR__SHIFT 0xe
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK0__XOR_ENABLE_MASK 0x00000001L
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK0__COL_XOR_MASK 0x00003FFEL
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK0__ROW_XOR_MASK 0xFFFFC000L
+//MMEA1_ADDRDECDRAM_ADDR_HASH_BANK1
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK1__XOR_ENABLE__SHIFT 0x0
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK1__COL_XOR__SHIFT 0x1
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK1__ROW_XOR__SHIFT 0xe
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK1__XOR_ENABLE_MASK 0x00000001L
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK1__COL_XOR_MASK 0x00003FFEL
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK1__ROW_XOR_MASK 0xFFFFC000L
+//MMEA1_ADDRDECDRAM_ADDR_HASH_BANK2
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK2__XOR_ENABLE__SHIFT 0x0
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK2__COL_XOR__SHIFT 0x1
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK2__ROW_XOR__SHIFT 0xe
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK2__XOR_ENABLE_MASK 0x00000001L
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK2__COL_XOR_MASK 0x00003FFEL
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK2__ROW_XOR_MASK 0xFFFFC000L
+//MMEA1_ADDRDECDRAM_ADDR_HASH_BANK3
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK3__XOR_ENABLE__SHIFT 0x0
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK3__COL_XOR__SHIFT 0x1
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK3__ROW_XOR__SHIFT 0xe
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK3__XOR_ENABLE_MASK 0x00000001L
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK3__COL_XOR_MASK 0x00003FFEL
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK3__ROW_XOR_MASK 0xFFFFC000L
+//MMEA1_ADDRDECDRAM_ADDR_HASH_BANK4
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK4__XOR_ENABLE__SHIFT 0x0
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK4__COL_XOR__SHIFT 0x1
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK4__ROW_XOR__SHIFT 0xe
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK4__XOR_ENABLE_MASK 0x00000001L
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK4__COL_XOR_MASK 0x00003FFEL
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK4__ROW_XOR_MASK 0xFFFFC000L
+//MMEA1_ADDRDECDRAM_ADDR_HASH_BANK5
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK5__XOR_ENABLE__SHIFT 0x0
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK5__COL_XOR__SHIFT 0x1
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK5__ROW_XOR__SHIFT 0xe
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK5__XOR_ENABLE_MASK 0x00000001L
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK5__COL_XOR_MASK 0x00003FFEL
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK5__ROW_XOR_MASK 0xFFFFC000L
+//MMEA1_ADDRDECDRAM_ADDR_HASH_PC
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_PC__XOR_ENABLE__SHIFT 0x0
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_PC__COL_XOR__SHIFT 0x1
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_PC__ROW_XOR__SHIFT 0xe
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_PC__XOR_ENABLE_MASK 0x00000001L
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_PC__COL_XOR_MASK 0x00003FFEL
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_PC__ROW_XOR_MASK 0xFFFFC000L
+//MMEA1_ADDRDECDRAM_ADDR_HASH_PC2
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_PC2__BANK_XOR__SHIFT 0x0
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_PC2__BANK_XOR_MASK 0x0000003FL
+//MMEA1_ADDRDECDRAM_ADDR_HASH_CS0
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_CS0__XOR_ENABLE__SHIFT 0x0
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_CS0__NA_XOR__SHIFT 0x1
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_CS0__XOR_ENABLE_MASK 0x00000001L
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_CS0__NA_XOR_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDECDRAM_ADDR_HASH_CS1
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_CS1__XOR_ENABLE__SHIFT 0x0
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_CS1__NA_XOR__SHIFT 0x1
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_CS1__XOR_ENABLE_MASK 0x00000001L
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_CS1__NA_XOR_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDECDRAM_HARVEST_ENABLE
+#define MMEA1_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_EN__SHIFT 0x0
+#define MMEA1_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_VAL__SHIFT 0x1
+#define MMEA1_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_EN__SHIFT 0x2
+#define MMEA1_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_VAL__SHIFT 0x3
+#define MMEA1_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_EN__SHIFT 0x4
+#define MMEA1_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_VAL__SHIFT 0x5
+#define MMEA1_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_EN_MASK 0x00000001L
+#define MMEA1_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_VAL_MASK 0x00000002L
+#define MMEA1_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_EN_MASK 0x00000004L
+#define MMEA1_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_VAL_MASK 0x00000008L
+#define MMEA1_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_EN_MASK 0x00000010L
+#define MMEA1_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_VAL_MASK 0x00000020L
+//MMEA1_ADDRDECGMI_ADDR_HASH_BANK0
+#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK0__XOR_ENABLE__SHIFT 0x0
+#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK0__COL_XOR__SHIFT 0x1
+#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK0__ROW_XOR__SHIFT 0xe
+#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK0__XOR_ENABLE_MASK 0x00000001L
+#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK0__COL_XOR_MASK 0x00003FFEL
+#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK0__ROW_XOR_MASK 0xFFFFC000L
+//MMEA1_ADDRDECGMI_ADDR_HASH_BANK1
+#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK1__XOR_ENABLE__SHIFT 0x0
+#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK1__COL_XOR__SHIFT 0x1
+#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK1__ROW_XOR__SHIFT 0xe
+#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK1__XOR_ENABLE_MASK 0x00000001L
+#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK1__COL_XOR_MASK 0x00003FFEL
+#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK1__ROW_XOR_MASK 0xFFFFC000L
+//MMEA1_ADDRDECGMI_ADDR_HASH_BANK2
+#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK2__XOR_ENABLE__SHIFT 0x0
+#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK2__COL_XOR__SHIFT 0x1
+#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK2__ROW_XOR__SHIFT 0xe
+#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK2__XOR_ENABLE_MASK 0x00000001L
+#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK2__COL_XOR_MASK 0x00003FFEL
+#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK2__ROW_XOR_MASK 0xFFFFC000L
+//MMEA1_ADDRDECGMI_ADDR_HASH_BANK3
+#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK3__XOR_ENABLE__SHIFT 0x0
+#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK3__COL_XOR__SHIFT 0x1
+#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK3__ROW_XOR__SHIFT 0xe
+#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK3__XOR_ENABLE_MASK 0x00000001L
+#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK3__COL_XOR_MASK 0x00003FFEL
+#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK3__ROW_XOR_MASK 0xFFFFC000L
+//MMEA1_ADDRDECGMI_ADDR_HASH_BANK4
+#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK4__XOR_ENABLE__SHIFT 0x0
+#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK4__COL_XOR__SHIFT 0x1
+#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK4__ROW_XOR__SHIFT 0xe
+#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK4__XOR_ENABLE_MASK 0x00000001L
+#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK4__COL_XOR_MASK 0x00003FFEL
+#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK4__ROW_XOR_MASK 0xFFFFC000L
+//MMEA1_ADDRDECGMI_ADDR_HASH_BANK5
+#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK5__XOR_ENABLE__SHIFT 0x0
+#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK5__COL_XOR__SHIFT 0x1
+#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK5__ROW_XOR__SHIFT 0xe
+#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK5__XOR_ENABLE_MASK 0x00000001L
+#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK5__COL_XOR_MASK 0x00003FFEL
+#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK5__ROW_XOR_MASK 0xFFFFC000L
+//MMEA1_ADDRDECGMI_ADDR_HASH_PC
+#define MMEA1_ADDRDECGMI_ADDR_HASH_PC__XOR_ENABLE__SHIFT 0x0
+#define MMEA1_ADDRDECGMI_ADDR_HASH_PC__COL_XOR__SHIFT 0x1
+#define MMEA1_ADDRDECGMI_ADDR_HASH_PC__ROW_XOR__SHIFT 0xe
+#define MMEA1_ADDRDECGMI_ADDR_HASH_PC__XOR_ENABLE_MASK 0x00000001L
+#define MMEA1_ADDRDECGMI_ADDR_HASH_PC__COL_XOR_MASK 0x00003FFEL
+#define MMEA1_ADDRDECGMI_ADDR_HASH_PC__ROW_XOR_MASK 0xFFFFC000L
+//MMEA1_ADDRDECGMI_ADDR_HASH_PC2
+#define MMEA1_ADDRDECGMI_ADDR_HASH_PC2__BANK_XOR__SHIFT 0x0
+#define MMEA1_ADDRDECGMI_ADDR_HASH_PC2__BANK_XOR_MASK 0x0000003FL
+//MMEA1_ADDRDECGMI_ADDR_HASH_CS0
+#define MMEA1_ADDRDECGMI_ADDR_HASH_CS0__XOR_ENABLE__SHIFT 0x0
+#define MMEA1_ADDRDECGMI_ADDR_HASH_CS0__NA_XOR__SHIFT 0x1
+#define MMEA1_ADDRDECGMI_ADDR_HASH_CS0__XOR_ENABLE_MASK 0x00000001L
+#define MMEA1_ADDRDECGMI_ADDR_HASH_CS0__NA_XOR_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDECGMI_ADDR_HASH_CS1
+#define MMEA1_ADDRDECGMI_ADDR_HASH_CS1__XOR_ENABLE__SHIFT 0x0
+#define MMEA1_ADDRDECGMI_ADDR_HASH_CS1__NA_XOR__SHIFT 0x1
+#define MMEA1_ADDRDECGMI_ADDR_HASH_CS1__XOR_ENABLE_MASK 0x00000001L
+#define MMEA1_ADDRDECGMI_ADDR_HASH_CS1__NA_XOR_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDECGMI_HARVEST_ENABLE
+#define MMEA1_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_EN__SHIFT 0x0
+#define MMEA1_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_VAL__SHIFT 0x1
+#define MMEA1_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_EN__SHIFT 0x2
+#define MMEA1_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_VAL__SHIFT 0x3
+#define MMEA1_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_EN__SHIFT 0x4
+#define MMEA1_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_VAL__SHIFT 0x5
+#define MMEA1_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_EN_MASK 0x00000001L
+#define MMEA1_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_VAL_MASK 0x00000002L
+#define MMEA1_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_EN_MASK 0x00000004L
+#define MMEA1_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_VAL_MASK 0x00000008L
+#define MMEA1_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_EN_MASK 0x00000010L
+#define MMEA1_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_VAL_MASK 0x00000020L
+//MMEA1_ADDRDEC0_BASE_ADDR_CS0
+#define MMEA1_ADDRDEC0_BASE_ADDR_CS0__CS_EN__SHIFT 0x0
+#define MMEA1_ADDRDEC0_BASE_ADDR_CS0__BASE_ADDR__SHIFT 0x1
+#define MMEA1_ADDRDEC0_BASE_ADDR_CS0__CS_EN_MASK 0x00000001L
+#define MMEA1_ADDRDEC0_BASE_ADDR_CS0__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDEC0_BASE_ADDR_CS1
+#define MMEA1_ADDRDEC0_BASE_ADDR_CS1__CS_EN__SHIFT 0x0
+#define MMEA1_ADDRDEC0_BASE_ADDR_CS1__BASE_ADDR__SHIFT 0x1
+#define MMEA1_ADDRDEC0_BASE_ADDR_CS1__CS_EN_MASK 0x00000001L
+#define MMEA1_ADDRDEC0_BASE_ADDR_CS1__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDEC0_BASE_ADDR_CS2
+#define MMEA1_ADDRDEC0_BASE_ADDR_CS2__CS_EN__SHIFT 0x0
+#define MMEA1_ADDRDEC0_BASE_ADDR_CS2__BASE_ADDR__SHIFT 0x1
+#define MMEA1_ADDRDEC0_BASE_ADDR_CS2__CS_EN_MASK 0x00000001L
+#define MMEA1_ADDRDEC0_BASE_ADDR_CS2__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDEC0_BASE_ADDR_CS3
+#define MMEA1_ADDRDEC0_BASE_ADDR_CS3__CS_EN__SHIFT 0x0
+#define MMEA1_ADDRDEC0_BASE_ADDR_CS3__BASE_ADDR__SHIFT 0x1
+#define MMEA1_ADDRDEC0_BASE_ADDR_CS3__CS_EN_MASK 0x00000001L
+#define MMEA1_ADDRDEC0_BASE_ADDR_CS3__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDEC0_BASE_ADDR_SECCS0
+#define MMEA1_ADDRDEC0_BASE_ADDR_SECCS0__CS_EN__SHIFT 0x0
+#define MMEA1_ADDRDEC0_BASE_ADDR_SECCS0__BASE_ADDR__SHIFT 0x1
+#define MMEA1_ADDRDEC0_BASE_ADDR_SECCS0__CS_EN_MASK 0x00000001L
+#define MMEA1_ADDRDEC0_BASE_ADDR_SECCS0__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDEC0_BASE_ADDR_SECCS1
+#define MMEA1_ADDRDEC0_BASE_ADDR_SECCS1__CS_EN__SHIFT 0x0
+#define MMEA1_ADDRDEC0_BASE_ADDR_SECCS1__BASE_ADDR__SHIFT 0x1
+#define MMEA1_ADDRDEC0_BASE_ADDR_SECCS1__CS_EN_MASK 0x00000001L
+#define MMEA1_ADDRDEC0_BASE_ADDR_SECCS1__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDEC0_BASE_ADDR_SECCS2
+#define MMEA1_ADDRDEC0_BASE_ADDR_SECCS2__CS_EN__SHIFT 0x0
+#define MMEA1_ADDRDEC0_BASE_ADDR_SECCS2__BASE_ADDR__SHIFT 0x1
+#define MMEA1_ADDRDEC0_BASE_ADDR_SECCS2__CS_EN_MASK 0x00000001L
+#define MMEA1_ADDRDEC0_BASE_ADDR_SECCS2__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDEC0_BASE_ADDR_SECCS3
+#define MMEA1_ADDRDEC0_BASE_ADDR_SECCS3__CS_EN__SHIFT 0x0
+#define MMEA1_ADDRDEC0_BASE_ADDR_SECCS3__BASE_ADDR__SHIFT 0x1
+#define MMEA1_ADDRDEC0_BASE_ADDR_SECCS3__CS_EN_MASK 0x00000001L
+#define MMEA1_ADDRDEC0_BASE_ADDR_SECCS3__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDEC0_ADDR_MASK_CS01
+#define MMEA1_ADDRDEC0_ADDR_MASK_CS01__ADDR_MASK__SHIFT 0x1
+#define MMEA1_ADDRDEC0_ADDR_MASK_CS01__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDEC0_ADDR_MASK_CS23
+#define MMEA1_ADDRDEC0_ADDR_MASK_CS23__ADDR_MASK__SHIFT 0x1
+#define MMEA1_ADDRDEC0_ADDR_MASK_CS23__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDEC0_ADDR_MASK_SECCS01
+#define MMEA1_ADDRDEC0_ADDR_MASK_SECCS01__ADDR_MASK__SHIFT 0x1
+#define MMEA1_ADDRDEC0_ADDR_MASK_SECCS01__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDEC0_ADDR_MASK_SECCS23
+#define MMEA1_ADDRDEC0_ADDR_MASK_SECCS23__ADDR_MASK__SHIFT 0x1
+#define MMEA1_ADDRDEC0_ADDR_MASK_SECCS23__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDEC0_ADDR_CFG_CS01
+#define MMEA1_ADDRDEC0_ADDR_CFG_CS01__NUM_BANK_GROUPS__SHIFT 0x1
+#define MMEA1_ADDRDEC0_ADDR_CFG_CS01__NUM_RM__SHIFT 0x4
+#define MMEA1_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_LO__SHIFT 0x8
+#define MMEA1_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_HI__SHIFT 0xc
+#define MMEA1_ADDRDEC0_ADDR_CFG_CS01__NUM_COL__SHIFT 0x10
+#define MMEA1_ADDRDEC0_ADDR_CFG_CS01__NUM_BANKS__SHIFT 0x14
+#define MMEA1_ADDRDEC0_ADDR_CFG_CS01__HI_COL_EN__SHIFT 0x1f
+#define MMEA1_ADDRDEC0_ADDR_CFG_CS01__NUM_BANK_GROUPS_MASK 0x0000000EL
+#define MMEA1_ADDRDEC0_ADDR_CFG_CS01__NUM_RM_MASK 0x00000030L
+#define MMEA1_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_LO_MASK 0x00000F00L
+#define MMEA1_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_HI_MASK 0x0000F000L
+#define MMEA1_ADDRDEC0_ADDR_CFG_CS01__NUM_COL_MASK 0x000F0000L
+#define MMEA1_ADDRDEC0_ADDR_CFG_CS01__NUM_BANKS_MASK 0x00300000L
+#define MMEA1_ADDRDEC0_ADDR_CFG_CS01__HI_COL_EN_MASK 0x80000000L
+//MMEA1_ADDRDEC0_ADDR_CFG_CS23
+#define MMEA1_ADDRDEC0_ADDR_CFG_CS23__NUM_BANK_GROUPS__SHIFT 0x1
+#define MMEA1_ADDRDEC0_ADDR_CFG_CS23__NUM_RM__SHIFT 0x4
+#define MMEA1_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_LO__SHIFT 0x8
+#define MMEA1_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_HI__SHIFT 0xc
+#define MMEA1_ADDRDEC0_ADDR_CFG_CS23__NUM_COL__SHIFT 0x10
+#define MMEA1_ADDRDEC0_ADDR_CFG_CS23__NUM_BANKS__SHIFT 0x14
+#define MMEA1_ADDRDEC0_ADDR_CFG_CS23__HI_COL_EN__SHIFT 0x1f
+#define MMEA1_ADDRDEC0_ADDR_CFG_CS23__NUM_BANK_GROUPS_MASK 0x0000000EL
+#define MMEA1_ADDRDEC0_ADDR_CFG_CS23__NUM_RM_MASK 0x00000030L
+#define MMEA1_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_LO_MASK 0x00000F00L
+#define MMEA1_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_HI_MASK 0x0000F000L
+#define MMEA1_ADDRDEC0_ADDR_CFG_CS23__NUM_COL_MASK 0x000F0000L
+#define MMEA1_ADDRDEC0_ADDR_CFG_CS23__NUM_BANKS_MASK 0x00300000L
+#define MMEA1_ADDRDEC0_ADDR_CFG_CS23__HI_COL_EN_MASK 0x80000000L
+//MMEA1_ADDRDEC0_ADDR_SEL_CS01
+#define MMEA1_ADDRDEC0_ADDR_SEL_CS01__BANK0__SHIFT 0x0
+#define MMEA1_ADDRDEC0_ADDR_SEL_CS01__BANK1__SHIFT 0x4
+#define MMEA1_ADDRDEC0_ADDR_SEL_CS01__BANK2__SHIFT 0x8
+#define MMEA1_ADDRDEC0_ADDR_SEL_CS01__BANK3__SHIFT 0xc
+#define MMEA1_ADDRDEC0_ADDR_SEL_CS01__BANK4__SHIFT 0x10
+#define MMEA1_ADDRDEC0_ADDR_SEL_CS01__ROW_LO__SHIFT 0x18
+#define MMEA1_ADDRDEC0_ADDR_SEL_CS01__ROW_HI__SHIFT 0x1c
+#define MMEA1_ADDRDEC0_ADDR_SEL_CS01__BANK0_MASK 0x0000000FL
+#define MMEA1_ADDRDEC0_ADDR_SEL_CS01__BANK1_MASK 0x000000F0L
+#define MMEA1_ADDRDEC0_ADDR_SEL_CS01__BANK2_MASK 0x00000F00L
+#define MMEA1_ADDRDEC0_ADDR_SEL_CS01__BANK3_MASK 0x0000F000L
+#define MMEA1_ADDRDEC0_ADDR_SEL_CS01__BANK4_MASK 0x001F0000L
+#define MMEA1_ADDRDEC0_ADDR_SEL_CS01__ROW_LO_MASK 0x0F000000L
+#define MMEA1_ADDRDEC0_ADDR_SEL_CS01__ROW_HI_MASK 0xF0000000L
+//MMEA1_ADDRDEC0_ADDR_SEL_CS23
+#define MMEA1_ADDRDEC0_ADDR_SEL_CS23__BANK0__SHIFT 0x0
+#define MMEA1_ADDRDEC0_ADDR_SEL_CS23__BANK1__SHIFT 0x4
+#define MMEA1_ADDRDEC0_ADDR_SEL_CS23__BANK2__SHIFT 0x8
+#define MMEA1_ADDRDEC0_ADDR_SEL_CS23__BANK3__SHIFT 0xc
+#define MMEA1_ADDRDEC0_ADDR_SEL_CS23__BANK4__SHIFT 0x10
+#define MMEA1_ADDRDEC0_ADDR_SEL_CS23__ROW_LO__SHIFT 0x18
+#define MMEA1_ADDRDEC0_ADDR_SEL_CS23__ROW_HI__SHIFT 0x1c
+#define MMEA1_ADDRDEC0_ADDR_SEL_CS23__BANK0_MASK 0x0000000FL
+#define MMEA1_ADDRDEC0_ADDR_SEL_CS23__BANK1_MASK 0x000000F0L
+#define MMEA1_ADDRDEC0_ADDR_SEL_CS23__BANK2_MASK 0x00000F00L
+#define MMEA1_ADDRDEC0_ADDR_SEL_CS23__BANK3_MASK 0x0000F000L
+#define MMEA1_ADDRDEC0_ADDR_SEL_CS23__BANK4_MASK 0x001F0000L
+#define MMEA1_ADDRDEC0_ADDR_SEL_CS23__ROW_LO_MASK 0x0F000000L
+#define MMEA1_ADDRDEC0_ADDR_SEL_CS23__ROW_HI_MASK 0xF0000000L
+//MMEA1_ADDRDEC0_ADDR_SEL2_CS01
+#define MMEA1_ADDRDEC0_ADDR_SEL2_CS01__BANK5__SHIFT 0x0
+#define MMEA1_ADDRDEC0_ADDR_SEL2_CS01__BANK5_MASK 0x0000001FL
+//MMEA1_ADDRDEC0_ADDR_SEL2_CS23
+#define MMEA1_ADDRDEC0_ADDR_SEL2_CS23__BANK5__SHIFT 0x0
+#define MMEA1_ADDRDEC0_ADDR_SEL2_CS23__BANK5_MASK 0x0000001FL
+//MMEA1_ADDRDEC0_COL_SEL_LO_CS01
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS01__COL0__SHIFT 0x0
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS01__COL1__SHIFT 0x4
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS01__COL2__SHIFT 0x8
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS01__COL3__SHIFT 0xc
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS01__COL4__SHIFT 0x10
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS01__COL5__SHIFT 0x14
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS01__COL6__SHIFT 0x18
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS01__COL7__SHIFT 0x1c
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS01__COL0_MASK 0x0000000FL
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS01__COL1_MASK 0x000000F0L
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS01__COL2_MASK 0x00000F00L
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS01__COL3_MASK 0x0000F000L
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS01__COL4_MASK 0x000F0000L
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS01__COL5_MASK 0x00F00000L
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS01__COL6_MASK 0x0F000000L
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS01__COL7_MASK 0xF0000000L
+//MMEA1_ADDRDEC0_COL_SEL_LO_CS23
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS23__COL0__SHIFT 0x0
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS23__COL1__SHIFT 0x4
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS23__COL2__SHIFT 0x8
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS23__COL3__SHIFT 0xc
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS23__COL4__SHIFT 0x10
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS23__COL5__SHIFT 0x14
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS23__COL6__SHIFT 0x18
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS23__COL7__SHIFT 0x1c
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS23__COL0_MASK 0x0000000FL
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS23__COL1_MASK 0x000000F0L
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS23__COL2_MASK 0x00000F00L
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS23__COL3_MASK 0x0000F000L
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS23__COL4_MASK 0x000F0000L
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS23__COL5_MASK 0x00F00000L
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS23__COL6_MASK 0x0F000000L
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS23__COL7_MASK 0xF0000000L
+//MMEA1_ADDRDEC0_COL_SEL_HI_CS01
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS01__COL8__SHIFT 0x0
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS01__COL9__SHIFT 0x4
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS01__COL10__SHIFT 0x8
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS01__COL11__SHIFT 0xc
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS01__COL12__SHIFT 0x10
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS01__COL13__SHIFT 0x14
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS01__COL14__SHIFT 0x18
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS01__COL15__SHIFT 0x1c
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS01__COL8_MASK 0x0000000FL
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS01__COL9_MASK 0x000000F0L
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS01__COL10_MASK 0x00000F00L
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS01__COL11_MASK 0x0000F000L
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS01__COL12_MASK 0x000F0000L
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS01__COL13_MASK 0x00F00000L
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS01__COL14_MASK 0x0F000000L
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS01__COL15_MASK 0xF0000000L
+//MMEA1_ADDRDEC0_COL_SEL_HI_CS23
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS23__COL8__SHIFT 0x0
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS23__COL9__SHIFT 0x4
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS23__COL10__SHIFT 0x8
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS23__COL11__SHIFT 0xc
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS23__COL12__SHIFT 0x10
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS23__COL13__SHIFT 0x14
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS23__COL14__SHIFT 0x18
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS23__COL15__SHIFT 0x1c
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS23__COL8_MASK 0x0000000FL
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS23__COL9_MASK 0x000000F0L
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS23__COL10_MASK 0x00000F00L
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS23__COL11_MASK 0x0000F000L
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS23__COL12_MASK 0x000F0000L
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS23__COL13_MASK 0x00F00000L
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS23__COL14_MASK 0x0F000000L
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS23__COL15_MASK 0xF0000000L
+//MMEA1_ADDRDEC0_RM_SEL_CS01
+#define MMEA1_ADDRDEC0_RM_SEL_CS01__RM0__SHIFT 0x0
+#define MMEA1_ADDRDEC0_RM_SEL_CS01__RM1__SHIFT 0x4
+#define MMEA1_ADDRDEC0_RM_SEL_CS01__RM2__SHIFT 0x8
+#define MMEA1_ADDRDEC0_RM_SEL_CS01__CHAN_BIT__SHIFT 0xc
+#define MMEA1_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA1_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA1_ADDRDEC0_RM_SEL_CS01__RM0_MASK 0x0000000FL
+#define MMEA1_ADDRDEC0_RM_SEL_CS01__RM1_MASK 0x000000F0L
+#define MMEA1_ADDRDEC0_RM_SEL_CS01__RM2_MASK 0x00000F00L
+#define MMEA1_ADDRDEC0_RM_SEL_CS01__CHAN_BIT_MASK 0x0000F000L
+#define MMEA1_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA1_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA1_ADDRDEC0_RM_SEL_CS23
+#define MMEA1_ADDRDEC0_RM_SEL_CS23__RM0__SHIFT 0x0
+#define MMEA1_ADDRDEC0_RM_SEL_CS23__RM1__SHIFT 0x4
+#define MMEA1_ADDRDEC0_RM_SEL_CS23__RM2__SHIFT 0x8
+#define MMEA1_ADDRDEC0_RM_SEL_CS23__CHAN_BIT__SHIFT 0xc
+#define MMEA1_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA1_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA1_ADDRDEC0_RM_SEL_CS23__RM0_MASK 0x0000000FL
+#define MMEA1_ADDRDEC0_RM_SEL_CS23__RM1_MASK 0x000000F0L
+#define MMEA1_ADDRDEC0_RM_SEL_CS23__RM2_MASK 0x00000F00L
+#define MMEA1_ADDRDEC0_RM_SEL_CS23__CHAN_BIT_MASK 0x0000F000L
+#define MMEA1_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA1_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA1_ADDRDEC0_RM_SEL_SECCS01
+#define MMEA1_ADDRDEC0_RM_SEL_SECCS01__RM0__SHIFT 0x0
+#define MMEA1_ADDRDEC0_RM_SEL_SECCS01__RM1__SHIFT 0x4
+#define MMEA1_ADDRDEC0_RM_SEL_SECCS01__RM2__SHIFT 0x8
+#define MMEA1_ADDRDEC0_RM_SEL_SECCS01__CHAN_BIT__SHIFT 0xc
+#define MMEA1_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA1_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA1_ADDRDEC0_RM_SEL_SECCS01__RM0_MASK 0x0000000FL
+#define MMEA1_ADDRDEC0_RM_SEL_SECCS01__RM1_MASK 0x000000F0L
+#define MMEA1_ADDRDEC0_RM_SEL_SECCS01__RM2_MASK 0x00000F00L
+#define MMEA1_ADDRDEC0_RM_SEL_SECCS01__CHAN_BIT_MASK 0x0000F000L
+#define MMEA1_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA1_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA1_ADDRDEC0_RM_SEL_SECCS23
+#define MMEA1_ADDRDEC0_RM_SEL_SECCS23__RM0__SHIFT 0x0
+#define MMEA1_ADDRDEC0_RM_SEL_SECCS23__RM1__SHIFT 0x4
+#define MMEA1_ADDRDEC0_RM_SEL_SECCS23__RM2__SHIFT 0x8
+#define MMEA1_ADDRDEC0_RM_SEL_SECCS23__CHAN_BIT__SHIFT 0xc
+#define MMEA1_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA1_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA1_ADDRDEC0_RM_SEL_SECCS23__RM0_MASK 0x0000000FL
+#define MMEA1_ADDRDEC0_RM_SEL_SECCS23__RM1_MASK 0x000000F0L
+#define MMEA1_ADDRDEC0_RM_SEL_SECCS23__RM2_MASK 0x00000F00L
+#define MMEA1_ADDRDEC0_RM_SEL_SECCS23__CHAN_BIT_MASK 0x0000F000L
+#define MMEA1_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA1_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA1_ADDRDEC1_BASE_ADDR_CS0
+#define MMEA1_ADDRDEC1_BASE_ADDR_CS0__CS_EN__SHIFT 0x0
+#define MMEA1_ADDRDEC1_BASE_ADDR_CS0__BASE_ADDR__SHIFT 0x1
+#define MMEA1_ADDRDEC1_BASE_ADDR_CS0__CS_EN_MASK 0x00000001L
+#define MMEA1_ADDRDEC1_BASE_ADDR_CS0__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDEC1_BASE_ADDR_CS1
+#define MMEA1_ADDRDEC1_BASE_ADDR_CS1__CS_EN__SHIFT 0x0
+#define MMEA1_ADDRDEC1_BASE_ADDR_CS1__BASE_ADDR__SHIFT 0x1
+#define MMEA1_ADDRDEC1_BASE_ADDR_CS1__CS_EN_MASK 0x00000001L
+#define MMEA1_ADDRDEC1_BASE_ADDR_CS1__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDEC1_BASE_ADDR_CS2
+#define MMEA1_ADDRDEC1_BASE_ADDR_CS2__CS_EN__SHIFT 0x0
+#define MMEA1_ADDRDEC1_BASE_ADDR_CS2__BASE_ADDR__SHIFT 0x1
+#define MMEA1_ADDRDEC1_BASE_ADDR_CS2__CS_EN_MASK 0x00000001L
+#define MMEA1_ADDRDEC1_BASE_ADDR_CS2__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDEC1_BASE_ADDR_CS3
+#define MMEA1_ADDRDEC1_BASE_ADDR_CS3__CS_EN__SHIFT 0x0
+#define MMEA1_ADDRDEC1_BASE_ADDR_CS3__BASE_ADDR__SHIFT 0x1
+#define MMEA1_ADDRDEC1_BASE_ADDR_CS3__CS_EN_MASK 0x00000001L
+#define MMEA1_ADDRDEC1_BASE_ADDR_CS3__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDEC1_BASE_ADDR_SECCS0
+#define MMEA1_ADDRDEC1_BASE_ADDR_SECCS0__CS_EN__SHIFT 0x0
+#define MMEA1_ADDRDEC1_BASE_ADDR_SECCS0__BASE_ADDR__SHIFT 0x1
+#define MMEA1_ADDRDEC1_BASE_ADDR_SECCS0__CS_EN_MASK 0x00000001L
+#define MMEA1_ADDRDEC1_BASE_ADDR_SECCS0__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDEC1_BASE_ADDR_SECCS1
+#define MMEA1_ADDRDEC1_BASE_ADDR_SECCS1__CS_EN__SHIFT 0x0
+#define MMEA1_ADDRDEC1_BASE_ADDR_SECCS1__BASE_ADDR__SHIFT 0x1
+#define MMEA1_ADDRDEC1_BASE_ADDR_SECCS1__CS_EN_MASK 0x00000001L
+#define MMEA1_ADDRDEC1_BASE_ADDR_SECCS1__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDEC1_BASE_ADDR_SECCS2
+#define MMEA1_ADDRDEC1_BASE_ADDR_SECCS2__CS_EN__SHIFT 0x0
+#define MMEA1_ADDRDEC1_BASE_ADDR_SECCS2__BASE_ADDR__SHIFT 0x1
+#define MMEA1_ADDRDEC1_BASE_ADDR_SECCS2__CS_EN_MASK 0x00000001L
+#define MMEA1_ADDRDEC1_BASE_ADDR_SECCS2__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDEC1_BASE_ADDR_SECCS3
+#define MMEA1_ADDRDEC1_BASE_ADDR_SECCS3__CS_EN__SHIFT 0x0
+#define MMEA1_ADDRDEC1_BASE_ADDR_SECCS3__BASE_ADDR__SHIFT 0x1
+#define MMEA1_ADDRDEC1_BASE_ADDR_SECCS3__CS_EN_MASK 0x00000001L
+#define MMEA1_ADDRDEC1_BASE_ADDR_SECCS3__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDEC1_ADDR_MASK_CS01
+#define MMEA1_ADDRDEC1_ADDR_MASK_CS01__ADDR_MASK__SHIFT 0x1
+#define MMEA1_ADDRDEC1_ADDR_MASK_CS01__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDEC1_ADDR_MASK_CS23
+#define MMEA1_ADDRDEC1_ADDR_MASK_CS23__ADDR_MASK__SHIFT 0x1
+#define MMEA1_ADDRDEC1_ADDR_MASK_CS23__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDEC1_ADDR_MASK_SECCS01
+#define MMEA1_ADDRDEC1_ADDR_MASK_SECCS01__ADDR_MASK__SHIFT 0x1
+#define MMEA1_ADDRDEC1_ADDR_MASK_SECCS01__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDEC1_ADDR_MASK_SECCS23
+#define MMEA1_ADDRDEC1_ADDR_MASK_SECCS23__ADDR_MASK__SHIFT 0x1
+#define MMEA1_ADDRDEC1_ADDR_MASK_SECCS23__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDEC1_ADDR_CFG_CS01
+#define MMEA1_ADDRDEC1_ADDR_CFG_CS01__NUM_BANK_GROUPS__SHIFT 0x1
+#define MMEA1_ADDRDEC1_ADDR_CFG_CS01__NUM_RM__SHIFT 0x4
+#define MMEA1_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_LO__SHIFT 0x8
+#define MMEA1_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_HI__SHIFT 0xc
+#define MMEA1_ADDRDEC1_ADDR_CFG_CS01__NUM_COL__SHIFT 0x10
+#define MMEA1_ADDRDEC1_ADDR_CFG_CS01__NUM_BANKS__SHIFT 0x14
+#define MMEA1_ADDRDEC1_ADDR_CFG_CS01__HI_COL_EN__SHIFT 0x1f
+#define MMEA1_ADDRDEC1_ADDR_CFG_CS01__NUM_BANK_GROUPS_MASK 0x0000000EL
+#define MMEA1_ADDRDEC1_ADDR_CFG_CS01__NUM_RM_MASK 0x00000030L
+#define MMEA1_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_LO_MASK 0x00000F00L
+#define MMEA1_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_HI_MASK 0x0000F000L
+#define MMEA1_ADDRDEC1_ADDR_CFG_CS01__NUM_COL_MASK 0x000F0000L
+#define MMEA1_ADDRDEC1_ADDR_CFG_CS01__NUM_BANKS_MASK 0x00300000L
+#define MMEA1_ADDRDEC1_ADDR_CFG_CS01__HI_COL_EN_MASK 0x80000000L
+//MMEA1_ADDRDEC1_ADDR_CFG_CS23
+#define MMEA1_ADDRDEC1_ADDR_CFG_CS23__NUM_BANK_GROUPS__SHIFT 0x1
+#define MMEA1_ADDRDEC1_ADDR_CFG_CS23__NUM_RM__SHIFT 0x4
+#define MMEA1_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_LO__SHIFT 0x8
+#define MMEA1_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_HI__SHIFT 0xc
+#define MMEA1_ADDRDEC1_ADDR_CFG_CS23__NUM_COL__SHIFT 0x10
+#define MMEA1_ADDRDEC1_ADDR_CFG_CS23__NUM_BANKS__SHIFT 0x14
+#define MMEA1_ADDRDEC1_ADDR_CFG_CS23__HI_COL_EN__SHIFT 0x1f
+#define MMEA1_ADDRDEC1_ADDR_CFG_CS23__NUM_BANK_GROUPS_MASK 0x0000000EL
+#define MMEA1_ADDRDEC1_ADDR_CFG_CS23__NUM_RM_MASK 0x00000030L
+#define MMEA1_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_LO_MASK 0x00000F00L
+#define MMEA1_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_HI_MASK 0x0000F000L
+#define MMEA1_ADDRDEC1_ADDR_CFG_CS23__NUM_COL_MASK 0x000F0000L
+#define MMEA1_ADDRDEC1_ADDR_CFG_CS23__NUM_BANKS_MASK 0x00300000L
+#define MMEA1_ADDRDEC1_ADDR_CFG_CS23__HI_COL_EN_MASK 0x80000000L
+//MMEA1_ADDRDEC1_ADDR_SEL_CS01
+#define MMEA1_ADDRDEC1_ADDR_SEL_CS01__BANK0__SHIFT 0x0
+#define MMEA1_ADDRDEC1_ADDR_SEL_CS01__BANK1__SHIFT 0x4
+#define MMEA1_ADDRDEC1_ADDR_SEL_CS01__BANK2__SHIFT 0x8
+#define MMEA1_ADDRDEC1_ADDR_SEL_CS01__BANK3__SHIFT 0xc
+#define MMEA1_ADDRDEC1_ADDR_SEL_CS01__BANK4__SHIFT 0x10
+#define MMEA1_ADDRDEC1_ADDR_SEL_CS01__ROW_LO__SHIFT 0x18
+#define MMEA1_ADDRDEC1_ADDR_SEL_CS01__ROW_HI__SHIFT 0x1c
+#define MMEA1_ADDRDEC1_ADDR_SEL_CS01__BANK0_MASK 0x0000000FL
+#define MMEA1_ADDRDEC1_ADDR_SEL_CS01__BANK1_MASK 0x000000F0L
+#define MMEA1_ADDRDEC1_ADDR_SEL_CS01__BANK2_MASK 0x00000F00L
+#define MMEA1_ADDRDEC1_ADDR_SEL_CS01__BANK3_MASK 0x0000F000L
+#define MMEA1_ADDRDEC1_ADDR_SEL_CS01__BANK4_MASK 0x001F0000L
+#define MMEA1_ADDRDEC1_ADDR_SEL_CS01__ROW_LO_MASK 0x0F000000L
+#define MMEA1_ADDRDEC1_ADDR_SEL_CS01__ROW_HI_MASK 0xF0000000L
+//MMEA1_ADDRDEC1_ADDR_SEL_CS23
+#define MMEA1_ADDRDEC1_ADDR_SEL_CS23__BANK0__SHIFT 0x0
+#define MMEA1_ADDRDEC1_ADDR_SEL_CS23__BANK1__SHIFT 0x4
+#define MMEA1_ADDRDEC1_ADDR_SEL_CS23__BANK2__SHIFT 0x8
+#define MMEA1_ADDRDEC1_ADDR_SEL_CS23__BANK3__SHIFT 0xc
+#define MMEA1_ADDRDEC1_ADDR_SEL_CS23__BANK4__SHIFT 0x10
+#define MMEA1_ADDRDEC1_ADDR_SEL_CS23__ROW_LO__SHIFT 0x18
+#define MMEA1_ADDRDEC1_ADDR_SEL_CS23__ROW_HI__SHIFT 0x1c
+#define MMEA1_ADDRDEC1_ADDR_SEL_CS23__BANK0_MASK 0x0000000FL
+#define MMEA1_ADDRDEC1_ADDR_SEL_CS23__BANK1_MASK 0x000000F0L
+#define MMEA1_ADDRDEC1_ADDR_SEL_CS23__BANK2_MASK 0x00000F00L
+#define MMEA1_ADDRDEC1_ADDR_SEL_CS23__BANK3_MASK 0x0000F000L
+#define MMEA1_ADDRDEC1_ADDR_SEL_CS23__BANK4_MASK 0x001F0000L
+#define MMEA1_ADDRDEC1_ADDR_SEL_CS23__ROW_LO_MASK 0x0F000000L
+#define MMEA1_ADDRDEC1_ADDR_SEL_CS23__ROW_HI_MASK 0xF0000000L
+//MMEA1_ADDRDEC1_ADDR_SEL2_CS01
+#define MMEA1_ADDRDEC1_ADDR_SEL2_CS01__BANK5__SHIFT 0x0
+#define MMEA1_ADDRDEC1_ADDR_SEL2_CS01__BANK5_MASK 0x0000001FL
+//MMEA1_ADDRDEC1_ADDR_SEL2_CS23
+#define MMEA1_ADDRDEC1_ADDR_SEL2_CS23__BANK5__SHIFT 0x0
+#define MMEA1_ADDRDEC1_ADDR_SEL2_CS23__BANK5_MASK 0x0000001FL
+//MMEA1_ADDRDEC1_COL_SEL_LO_CS01
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS01__COL0__SHIFT 0x0
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS01__COL1__SHIFT 0x4
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS01__COL2__SHIFT 0x8
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS01__COL3__SHIFT 0xc
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS01__COL4__SHIFT 0x10
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS01__COL5__SHIFT 0x14
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS01__COL6__SHIFT 0x18
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS01__COL7__SHIFT 0x1c
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS01__COL0_MASK 0x0000000FL
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS01__COL1_MASK 0x000000F0L
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS01__COL2_MASK 0x00000F00L
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS01__COL3_MASK 0x0000F000L
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS01__COL4_MASK 0x000F0000L
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS01__COL5_MASK 0x00F00000L
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS01__COL6_MASK 0x0F000000L
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS01__COL7_MASK 0xF0000000L
+//MMEA1_ADDRDEC1_COL_SEL_LO_CS23
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS23__COL0__SHIFT 0x0
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS23__COL1__SHIFT 0x4
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS23__COL2__SHIFT 0x8
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS23__COL3__SHIFT 0xc
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS23__COL4__SHIFT 0x10
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS23__COL5__SHIFT 0x14
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS23__COL6__SHIFT 0x18
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS23__COL7__SHIFT 0x1c
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS23__COL0_MASK 0x0000000FL
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS23__COL1_MASK 0x000000F0L
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS23__COL2_MASK 0x00000F00L
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS23__COL3_MASK 0x0000F000L
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS23__COL4_MASK 0x000F0000L
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS23__COL5_MASK 0x00F00000L
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS23__COL6_MASK 0x0F000000L
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS23__COL7_MASK 0xF0000000L
+//MMEA1_ADDRDEC1_COL_SEL_HI_CS01
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS01__COL8__SHIFT 0x0
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS01__COL9__SHIFT 0x4
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS01__COL10__SHIFT 0x8
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS01__COL11__SHIFT 0xc
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS01__COL12__SHIFT 0x10
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS01__COL13__SHIFT 0x14
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS01__COL14__SHIFT 0x18
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS01__COL15__SHIFT 0x1c
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS01__COL8_MASK 0x0000000FL
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS01__COL9_MASK 0x000000F0L
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS01__COL10_MASK 0x00000F00L
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS01__COL11_MASK 0x0000F000L
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS01__COL12_MASK 0x000F0000L
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS01__COL13_MASK 0x00F00000L
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS01__COL14_MASK 0x0F000000L
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS01__COL15_MASK 0xF0000000L
+//MMEA1_ADDRDEC1_COL_SEL_HI_CS23
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS23__COL8__SHIFT 0x0
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS23__COL9__SHIFT 0x4
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS23__COL10__SHIFT 0x8
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS23__COL11__SHIFT 0xc
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS23__COL12__SHIFT 0x10
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS23__COL13__SHIFT 0x14
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS23__COL14__SHIFT 0x18
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS23__COL15__SHIFT 0x1c
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS23__COL8_MASK 0x0000000FL
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS23__COL9_MASK 0x000000F0L
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS23__COL10_MASK 0x00000F00L
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS23__COL11_MASK 0x0000F000L
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS23__COL12_MASK 0x000F0000L
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS23__COL13_MASK 0x00F00000L
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS23__COL14_MASK 0x0F000000L
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS23__COL15_MASK 0xF0000000L
+//MMEA1_ADDRDEC1_RM_SEL_CS01
+#define MMEA1_ADDRDEC1_RM_SEL_CS01__RM0__SHIFT 0x0
+#define MMEA1_ADDRDEC1_RM_SEL_CS01__RM1__SHIFT 0x4
+#define MMEA1_ADDRDEC1_RM_SEL_CS01__RM2__SHIFT 0x8
+#define MMEA1_ADDRDEC1_RM_SEL_CS01__CHAN_BIT__SHIFT 0xc
+#define MMEA1_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA1_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA1_ADDRDEC1_RM_SEL_CS01__RM0_MASK 0x0000000FL
+#define MMEA1_ADDRDEC1_RM_SEL_CS01__RM1_MASK 0x000000F0L
+#define MMEA1_ADDRDEC1_RM_SEL_CS01__RM2_MASK 0x00000F00L
+#define MMEA1_ADDRDEC1_RM_SEL_CS01__CHAN_BIT_MASK 0x0000F000L
+#define MMEA1_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA1_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA1_ADDRDEC1_RM_SEL_CS23
+#define MMEA1_ADDRDEC1_RM_SEL_CS23__RM0__SHIFT 0x0
+#define MMEA1_ADDRDEC1_RM_SEL_CS23__RM1__SHIFT 0x4
+#define MMEA1_ADDRDEC1_RM_SEL_CS23__RM2__SHIFT 0x8
+#define MMEA1_ADDRDEC1_RM_SEL_CS23__CHAN_BIT__SHIFT 0xc
+#define MMEA1_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA1_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA1_ADDRDEC1_RM_SEL_CS23__RM0_MASK 0x0000000FL
+#define MMEA1_ADDRDEC1_RM_SEL_CS23__RM1_MASK 0x000000F0L
+#define MMEA1_ADDRDEC1_RM_SEL_CS23__RM2_MASK 0x00000F00L
+#define MMEA1_ADDRDEC1_RM_SEL_CS23__CHAN_BIT_MASK 0x0000F000L
+#define MMEA1_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA1_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA1_ADDRDEC1_RM_SEL_SECCS01
+#define MMEA1_ADDRDEC1_RM_SEL_SECCS01__RM0__SHIFT 0x0
+#define MMEA1_ADDRDEC1_RM_SEL_SECCS01__RM1__SHIFT 0x4
+#define MMEA1_ADDRDEC1_RM_SEL_SECCS01__RM2__SHIFT 0x8
+#define MMEA1_ADDRDEC1_RM_SEL_SECCS01__CHAN_BIT__SHIFT 0xc
+#define MMEA1_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA1_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA1_ADDRDEC1_RM_SEL_SECCS01__RM0_MASK 0x0000000FL
+#define MMEA1_ADDRDEC1_RM_SEL_SECCS01__RM1_MASK 0x000000F0L
+#define MMEA1_ADDRDEC1_RM_SEL_SECCS01__RM2_MASK 0x00000F00L
+#define MMEA1_ADDRDEC1_RM_SEL_SECCS01__CHAN_BIT_MASK 0x0000F000L
+#define MMEA1_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA1_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA1_ADDRDEC1_RM_SEL_SECCS23
+#define MMEA1_ADDRDEC1_RM_SEL_SECCS23__RM0__SHIFT 0x0
+#define MMEA1_ADDRDEC1_RM_SEL_SECCS23__RM1__SHIFT 0x4
+#define MMEA1_ADDRDEC1_RM_SEL_SECCS23__RM2__SHIFT 0x8
+#define MMEA1_ADDRDEC1_RM_SEL_SECCS23__CHAN_BIT__SHIFT 0xc
+#define MMEA1_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA1_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA1_ADDRDEC1_RM_SEL_SECCS23__RM0_MASK 0x0000000FL
+#define MMEA1_ADDRDEC1_RM_SEL_SECCS23__RM1_MASK 0x000000F0L
+#define MMEA1_ADDRDEC1_RM_SEL_SECCS23__RM2_MASK 0x00000F00L
+#define MMEA1_ADDRDEC1_RM_SEL_SECCS23__CHAN_BIT_MASK 0x0000F000L
+#define MMEA1_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA1_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA1_ADDRDEC2_BASE_ADDR_CS0
+#define MMEA1_ADDRDEC2_BASE_ADDR_CS0__CS_EN__SHIFT 0x0
+#define MMEA1_ADDRDEC2_BASE_ADDR_CS0__BASE_ADDR__SHIFT 0x1
+#define MMEA1_ADDRDEC2_BASE_ADDR_CS0__CS_EN_MASK 0x00000001L
+#define MMEA1_ADDRDEC2_BASE_ADDR_CS0__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDEC2_BASE_ADDR_CS1
+#define MMEA1_ADDRDEC2_BASE_ADDR_CS1__CS_EN__SHIFT 0x0
+#define MMEA1_ADDRDEC2_BASE_ADDR_CS1__BASE_ADDR__SHIFT 0x1
+#define MMEA1_ADDRDEC2_BASE_ADDR_CS1__CS_EN_MASK 0x00000001L
+#define MMEA1_ADDRDEC2_BASE_ADDR_CS1__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDEC2_BASE_ADDR_CS2
+#define MMEA1_ADDRDEC2_BASE_ADDR_CS2__CS_EN__SHIFT 0x0
+#define MMEA1_ADDRDEC2_BASE_ADDR_CS2__BASE_ADDR__SHIFT 0x1
+#define MMEA1_ADDRDEC2_BASE_ADDR_CS2__CS_EN_MASK 0x00000001L
+#define MMEA1_ADDRDEC2_BASE_ADDR_CS2__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDEC2_BASE_ADDR_CS3
+#define MMEA1_ADDRDEC2_BASE_ADDR_CS3__CS_EN__SHIFT 0x0
+#define MMEA1_ADDRDEC2_BASE_ADDR_CS3__BASE_ADDR__SHIFT 0x1
+#define MMEA1_ADDRDEC2_BASE_ADDR_CS3__CS_EN_MASK 0x00000001L
+#define MMEA1_ADDRDEC2_BASE_ADDR_CS3__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDEC2_BASE_ADDR_SECCS0
+#define MMEA1_ADDRDEC2_BASE_ADDR_SECCS0__CS_EN__SHIFT 0x0
+#define MMEA1_ADDRDEC2_BASE_ADDR_SECCS0__BASE_ADDR__SHIFT 0x1
+#define MMEA1_ADDRDEC2_BASE_ADDR_SECCS0__CS_EN_MASK 0x00000001L
+#define MMEA1_ADDRDEC2_BASE_ADDR_SECCS0__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDEC2_BASE_ADDR_SECCS1
+#define MMEA1_ADDRDEC2_BASE_ADDR_SECCS1__CS_EN__SHIFT 0x0
+#define MMEA1_ADDRDEC2_BASE_ADDR_SECCS1__BASE_ADDR__SHIFT 0x1
+#define MMEA1_ADDRDEC2_BASE_ADDR_SECCS1__CS_EN_MASK 0x00000001L
+#define MMEA1_ADDRDEC2_BASE_ADDR_SECCS1__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDEC2_BASE_ADDR_SECCS2
+#define MMEA1_ADDRDEC2_BASE_ADDR_SECCS2__CS_EN__SHIFT 0x0
+#define MMEA1_ADDRDEC2_BASE_ADDR_SECCS2__BASE_ADDR__SHIFT 0x1
+#define MMEA1_ADDRDEC2_BASE_ADDR_SECCS2__CS_EN_MASK 0x00000001L
+#define MMEA1_ADDRDEC2_BASE_ADDR_SECCS2__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDEC2_BASE_ADDR_SECCS3
+#define MMEA1_ADDRDEC2_BASE_ADDR_SECCS3__CS_EN__SHIFT 0x0
+#define MMEA1_ADDRDEC2_BASE_ADDR_SECCS3__BASE_ADDR__SHIFT 0x1
+#define MMEA1_ADDRDEC2_BASE_ADDR_SECCS3__CS_EN_MASK 0x00000001L
+#define MMEA1_ADDRDEC2_BASE_ADDR_SECCS3__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDEC2_ADDR_MASK_CS01
+#define MMEA1_ADDRDEC2_ADDR_MASK_CS01__ADDR_MASK__SHIFT 0x1
+#define MMEA1_ADDRDEC2_ADDR_MASK_CS01__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDEC2_ADDR_MASK_CS23
+#define MMEA1_ADDRDEC2_ADDR_MASK_CS23__ADDR_MASK__SHIFT 0x1
+#define MMEA1_ADDRDEC2_ADDR_MASK_CS23__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDEC2_ADDR_MASK_SECCS01
+#define MMEA1_ADDRDEC2_ADDR_MASK_SECCS01__ADDR_MASK__SHIFT 0x1
+#define MMEA1_ADDRDEC2_ADDR_MASK_SECCS01__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDEC2_ADDR_MASK_SECCS23
+#define MMEA1_ADDRDEC2_ADDR_MASK_SECCS23__ADDR_MASK__SHIFT 0x1
+#define MMEA1_ADDRDEC2_ADDR_MASK_SECCS23__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDEC2_ADDR_CFG_CS01
+#define MMEA1_ADDRDEC2_ADDR_CFG_CS01__NUM_BANK_GROUPS__SHIFT 0x1
+#define MMEA1_ADDRDEC2_ADDR_CFG_CS01__NUM_RM__SHIFT 0x4
+#define MMEA1_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_LO__SHIFT 0x8
+#define MMEA1_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_HI__SHIFT 0xc
+#define MMEA1_ADDRDEC2_ADDR_CFG_CS01__NUM_COL__SHIFT 0x10
+#define MMEA1_ADDRDEC2_ADDR_CFG_CS01__NUM_BANKS__SHIFT 0x14
+#define MMEA1_ADDRDEC2_ADDR_CFG_CS01__HI_COL_EN__SHIFT 0x1f
+#define MMEA1_ADDRDEC2_ADDR_CFG_CS01__NUM_BANK_GROUPS_MASK 0x0000000EL
+#define MMEA1_ADDRDEC2_ADDR_CFG_CS01__NUM_RM_MASK 0x00000030L
+#define MMEA1_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_LO_MASK 0x00000F00L
+#define MMEA1_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_HI_MASK 0x0000F000L
+#define MMEA1_ADDRDEC2_ADDR_CFG_CS01__NUM_COL_MASK 0x000F0000L
+#define MMEA1_ADDRDEC2_ADDR_CFG_CS01__NUM_BANKS_MASK 0x00300000L
+#define MMEA1_ADDRDEC2_ADDR_CFG_CS01__HI_COL_EN_MASK 0x80000000L
+//MMEA1_ADDRDEC2_ADDR_CFG_CS23
+#define MMEA1_ADDRDEC2_ADDR_CFG_CS23__NUM_BANK_GROUPS__SHIFT 0x1
+#define MMEA1_ADDRDEC2_ADDR_CFG_CS23__NUM_RM__SHIFT 0x4
+#define MMEA1_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_LO__SHIFT 0x8
+#define MMEA1_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_HI__SHIFT 0xc
+#define MMEA1_ADDRDEC2_ADDR_CFG_CS23__NUM_COL__SHIFT 0x10
+#define MMEA1_ADDRDEC2_ADDR_CFG_CS23__NUM_BANKS__SHIFT 0x14
+#define MMEA1_ADDRDEC2_ADDR_CFG_CS23__HI_COL_EN__SHIFT 0x1f
+#define MMEA1_ADDRDEC2_ADDR_CFG_CS23__NUM_BANK_GROUPS_MASK 0x0000000EL
+#define MMEA1_ADDRDEC2_ADDR_CFG_CS23__NUM_RM_MASK 0x00000030L
+#define MMEA1_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_LO_MASK 0x00000F00L
+#define MMEA1_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_HI_MASK 0x0000F000L
+#define MMEA1_ADDRDEC2_ADDR_CFG_CS23__NUM_COL_MASK 0x000F0000L
+#define MMEA1_ADDRDEC2_ADDR_CFG_CS23__NUM_BANKS_MASK 0x00300000L
+#define MMEA1_ADDRDEC2_ADDR_CFG_CS23__HI_COL_EN_MASK 0x80000000L
+//MMEA1_ADDRDEC2_ADDR_SEL_CS01
+#define MMEA1_ADDRDEC2_ADDR_SEL_CS01__BANK0__SHIFT 0x0
+#define MMEA1_ADDRDEC2_ADDR_SEL_CS01__BANK1__SHIFT 0x4
+#define MMEA1_ADDRDEC2_ADDR_SEL_CS01__BANK2__SHIFT 0x8
+#define MMEA1_ADDRDEC2_ADDR_SEL_CS01__BANK3__SHIFT 0xc
+#define MMEA1_ADDRDEC2_ADDR_SEL_CS01__BANK4__SHIFT 0x10
+#define MMEA1_ADDRDEC2_ADDR_SEL_CS01__ROW_LO__SHIFT 0x18
+#define MMEA1_ADDRDEC2_ADDR_SEL_CS01__ROW_HI__SHIFT 0x1c
+#define MMEA1_ADDRDEC2_ADDR_SEL_CS01__BANK0_MASK 0x0000000FL
+#define MMEA1_ADDRDEC2_ADDR_SEL_CS01__BANK1_MASK 0x000000F0L
+#define MMEA1_ADDRDEC2_ADDR_SEL_CS01__BANK2_MASK 0x00000F00L
+#define MMEA1_ADDRDEC2_ADDR_SEL_CS01__BANK3_MASK 0x0000F000L
+#define MMEA1_ADDRDEC2_ADDR_SEL_CS01__BANK4_MASK 0x001F0000L
+#define MMEA1_ADDRDEC2_ADDR_SEL_CS01__ROW_LO_MASK 0x0F000000L
+#define MMEA1_ADDRDEC2_ADDR_SEL_CS01__ROW_HI_MASK 0xF0000000L
+//MMEA1_ADDRDEC2_ADDR_SEL_CS23
+#define MMEA1_ADDRDEC2_ADDR_SEL_CS23__BANK0__SHIFT 0x0
+#define MMEA1_ADDRDEC2_ADDR_SEL_CS23__BANK1__SHIFT 0x4
+#define MMEA1_ADDRDEC2_ADDR_SEL_CS23__BANK2__SHIFT 0x8
+#define MMEA1_ADDRDEC2_ADDR_SEL_CS23__BANK3__SHIFT 0xc
+#define MMEA1_ADDRDEC2_ADDR_SEL_CS23__BANK4__SHIFT 0x10
+#define MMEA1_ADDRDEC2_ADDR_SEL_CS23__ROW_LO__SHIFT 0x18
+#define MMEA1_ADDRDEC2_ADDR_SEL_CS23__ROW_HI__SHIFT 0x1c
+#define MMEA1_ADDRDEC2_ADDR_SEL_CS23__BANK0_MASK 0x0000000FL
+#define MMEA1_ADDRDEC2_ADDR_SEL_CS23__BANK1_MASK 0x000000F0L
+#define MMEA1_ADDRDEC2_ADDR_SEL_CS23__BANK2_MASK 0x00000F00L
+#define MMEA1_ADDRDEC2_ADDR_SEL_CS23__BANK3_MASK 0x0000F000L
+#define MMEA1_ADDRDEC2_ADDR_SEL_CS23__BANK4_MASK 0x001F0000L
+#define MMEA1_ADDRDEC2_ADDR_SEL_CS23__ROW_LO_MASK 0x0F000000L
+#define MMEA1_ADDRDEC2_ADDR_SEL_CS23__ROW_HI_MASK 0xF0000000L
+//MMEA1_ADDRDEC2_ADDR_SEL2_CS01
+#define MMEA1_ADDRDEC2_ADDR_SEL2_CS01__BANK5__SHIFT 0x0
+#define MMEA1_ADDRDEC2_ADDR_SEL2_CS01__BANK5_MASK 0x0000001FL
+//MMEA1_ADDRDEC2_ADDR_SEL2_CS23
+#define MMEA1_ADDRDEC2_ADDR_SEL2_CS23__BANK5__SHIFT 0x0
+#define MMEA1_ADDRDEC2_ADDR_SEL2_CS23__BANK5_MASK 0x0000001FL
+//MMEA1_ADDRDEC2_COL_SEL_LO_CS01
+#define MMEA1_ADDRDEC2_COL_SEL_LO_CS01__COL0__SHIFT 0x0
+#define MMEA1_ADDRDEC2_COL_SEL_LO_CS01__COL1__SHIFT 0x4
+#define MMEA1_ADDRDEC2_COL_SEL_LO_CS01__COL2__SHIFT 0x8
+#define MMEA1_ADDRDEC2_COL_SEL_LO_CS01__COL3__SHIFT 0xc
+#define MMEA1_ADDRDEC2_COL_SEL_LO_CS01__COL4__SHIFT 0x10
+#define MMEA1_ADDRDEC2_COL_SEL_LO_CS01__COL5__SHIFT 0x14
+#define MMEA1_ADDRDEC2_COL_SEL_LO_CS01__COL6__SHIFT 0x18
+#define MMEA1_ADDRDEC2_COL_SEL_LO_CS01__COL7__SHIFT 0x1c
+#define MMEA1_ADDRDEC2_COL_SEL_LO_CS01__COL0_MASK 0x0000000FL
+#define MMEA1_ADDRDEC2_COL_SEL_LO_CS01__COL1_MASK 0x000000F0L
+#define MMEA1_ADDRDEC2_COL_SEL_LO_CS01__COL2_MASK 0x00000F00L
+#define MMEA1_ADDRDEC2_COL_SEL_LO_CS01__COL3_MASK 0x0000F000L
+#define MMEA1_ADDRDEC2_COL_SEL_LO_CS01__COL4_MASK 0x000F0000L
+#define MMEA1_ADDRDEC2_COL_SEL_LO_CS01__COL5_MASK 0x00F00000L
+#define MMEA1_ADDRDEC2_COL_SEL_LO_CS01__COL6_MASK 0x0F000000L
+#define MMEA1_ADDRDEC2_COL_SEL_LO_CS01__COL7_MASK 0xF0000000L
+//MMEA1_ADDRDEC2_COL_SEL_LO_CS23
+#define MMEA1_ADDRDEC2_COL_SEL_LO_CS23__COL0__SHIFT 0x0
+#define MMEA1_ADDRDEC2_COL_SEL_LO_CS23__COL1__SHIFT 0x4
+#define MMEA1_ADDRDEC2_COL_SEL_LO_CS23__COL2__SHIFT 0x8
+#define MMEA1_ADDRDEC2_COL_SEL_LO_CS23__COL3__SHIFT 0xc
+#define MMEA1_ADDRDEC2_COL_SEL_LO_CS23__COL4__SHIFT 0x10
+#define MMEA1_ADDRDEC2_COL_SEL_LO_CS23__COL5__SHIFT 0x14
+#define MMEA1_ADDRDEC2_COL_SEL_LO_CS23__COL6__SHIFT 0x18
+#define MMEA1_ADDRDEC2_COL_SEL_LO_CS23__COL7__SHIFT 0x1c
+#define MMEA1_ADDRDEC2_COL_SEL_LO_CS23__COL0_MASK 0x0000000FL
+#define MMEA1_ADDRDEC2_COL_SEL_LO_CS23__COL1_MASK 0x000000F0L
+#define MMEA1_ADDRDEC2_COL_SEL_LO_CS23__COL2_MASK 0x00000F00L
+#define MMEA1_ADDRDEC2_COL_SEL_LO_CS23__COL3_MASK 0x0000F000L
+#define MMEA1_ADDRDEC2_COL_SEL_LO_CS23__COL4_MASK 0x000F0000L
+#define MMEA1_ADDRDEC2_COL_SEL_LO_CS23__COL5_MASK 0x00F00000L
+#define MMEA1_ADDRDEC2_COL_SEL_LO_CS23__COL6_MASK 0x0F000000L
+#define MMEA1_ADDRDEC2_COL_SEL_LO_CS23__COL7_MASK 0xF0000000L
+//MMEA1_ADDRDEC2_COL_SEL_HI_CS01
+#define MMEA1_ADDRDEC2_COL_SEL_HI_CS01__COL8__SHIFT 0x0
+#define MMEA1_ADDRDEC2_COL_SEL_HI_CS01__COL9__SHIFT 0x4
+#define MMEA1_ADDRDEC2_COL_SEL_HI_CS01__COL10__SHIFT 0x8
+#define MMEA1_ADDRDEC2_COL_SEL_HI_CS01__COL11__SHIFT 0xc
+#define MMEA1_ADDRDEC2_COL_SEL_HI_CS01__COL12__SHIFT 0x10
+#define MMEA1_ADDRDEC2_COL_SEL_HI_CS01__COL13__SHIFT 0x14
+#define MMEA1_ADDRDEC2_COL_SEL_HI_CS01__COL14__SHIFT 0x18
+#define MMEA1_ADDRDEC2_COL_SEL_HI_CS01__COL15__SHIFT 0x1c
+#define MMEA1_ADDRDEC2_COL_SEL_HI_CS01__COL8_MASK 0x0000000FL
+#define MMEA1_ADDRDEC2_COL_SEL_HI_CS01__COL9_MASK 0x000000F0L
+#define MMEA1_ADDRDEC2_COL_SEL_HI_CS01__COL10_MASK 0x00000F00L
+#define MMEA1_ADDRDEC2_COL_SEL_HI_CS01__COL11_MASK 0x0000F000L
+#define MMEA1_ADDRDEC2_COL_SEL_HI_CS01__COL12_MASK 0x000F0000L
+#define MMEA1_ADDRDEC2_COL_SEL_HI_CS01__COL13_MASK 0x00F00000L
+#define MMEA1_ADDRDEC2_COL_SEL_HI_CS01__COL14_MASK 0x0F000000L
+#define MMEA1_ADDRDEC2_COL_SEL_HI_CS01__COL15_MASK 0xF0000000L
+//MMEA1_ADDRDEC2_COL_SEL_HI_CS23
+#define MMEA1_ADDRDEC2_COL_SEL_HI_CS23__COL8__SHIFT 0x0
+#define MMEA1_ADDRDEC2_COL_SEL_HI_CS23__COL9__SHIFT 0x4
+#define MMEA1_ADDRDEC2_COL_SEL_HI_CS23__COL10__SHIFT 0x8
+#define MMEA1_ADDRDEC2_COL_SEL_HI_CS23__COL11__SHIFT 0xc
+#define MMEA1_ADDRDEC2_COL_SEL_HI_CS23__COL12__SHIFT 0x10
+#define MMEA1_ADDRDEC2_COL_SEL_HI_CS23__COL13__SHIFT 0x14
+#define MMEA1_ADDRDEC2_COL_SEL_HI_CS23__COL14__SHIFT 0x18
+#define MMEA1_ADDRDEC2_COL_SEL_HI_CS23__COL15__SHIFT 0x1c
+#define MMEA1_ADDRDEC2_COL_SEL_HI_CS23__COL8_MASK 0x0000000FL
+#define MMEA1_ADDRDEC2_COL_SEL_HI_CS23__COL9_MASK 0x000000F0L
+#define MMEA1_ADDRDEC2_COL_SEL_HI_CS23__COL10_MASK 0x00000F00L
+#define MMEA1_ADDRDEC2_COL_SEL_HI_CS23__COL11_MASK 0x0000F000L
+#define MMEA1_ADDRDEC2_COL_SEL_HI_CS23__COL12_MASK 0x000F0000L
+#define MMEA1_ADDRDEC2_COL_SEL_HI_CS23__COL13_MASK 0x00F00000L
+#define MMEA1_ADDRDEC2_COL_SEL_HI_CS23__COL14_MASK 0x0F000000L
+#define MMEA1_ADDRDEC2_COL_SEL_HI_CS23__COL15_MASK 0xF0000000L
+//MMEA1_ADDRDEC2_RM_SEL_CS01
+#define MMEA1_ADDRDEC2_RM_SEL_CS01__RM0__SHIFT 0x0
+#define MMEA1_ADDRDEC2_RM_SEL_CS01__RM1__SHIFT 0x4
+#define MMEA1_ADDRDEC2_RM_SEL_CS01__RM2__SHIFT 0x8
+#define MMEA1_ADDRDEC2_RM_SEL_CS01__CHAN_BIT__SHIFT 0xc
+#define MMEA1_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA1_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA1_ADDRDEC2_RM_SEL_CS01__RM0_MASK 0x0000000FL
+#define MMEA1_ADDRDEC2_RM_SEL_CS01__RM1_MASK 0x000000F0L
+#define MMEA1_ADDRDEC2_RM_SEL_CS01__RM2_MASK 0x00000F00L
+#define MMEA1_ADDRDEC2_RM_SEL_CS01__CHAN_BIT_MASK 0x0000F000L
+#define MMEA1_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA1_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA1_ADDRDEC2_RM_SEL_CS23
+#define MMEA1_ADDRDEC2_RM_SEL_CS23__RM0__SHIFT 0x0
+#define MMEA1_ADDRDEC2_RM_SEL_CS23__RM1__SHIFT 0x4
+#define MMEA1_ADDRDEC2_RM_SEL_CS23__RM2__SHIFT 0x8
+#define MMEA1_ADDRDEC2_RM_SEL_CS23__CHAN_BIT__SHIFT 0xc
+#define MMEA1_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA1_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA1_ADDRDEC2_RM_SEL_CS23__RM0_MASK 0x0000000FL
+#define MMEA1_ADDRDEC2_RM_SEL_CS23__RM1_MASK 0x000000F0L
+#define MMEA1_ADDRDEC2_RM_SEL_CS23__RM2_MASK 0x00000F00L
+#define MMEA1_ADDRDEC2_RM_SEL_CS23__CHAN_BIT_MASK 0x0000F000L
+#define MMEA1_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA1_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA1_ADDRDEC2_RM_SEL_SECCS01
+#define MMEA1_ADDRDEC2_RM_SEL_SECCS01__RM0__SHIFT 0x0
+#define MMEA1_ADDRDEC2_RM_SEL_SECCS01__RM1__SHIFT 0x4
+#define MMEA1_ADDRDEC2_RM_SEL_SECCS01__RM2__SHIFT 0x8
+#define MMEA1_ADDRDEC2_RM_SEL_SECCS01__CHAN_BIT__SHIFT 0xc
+#define MMEA1_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA1_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA1_ADDRDEC2_RM_SEL_SECCS01__RM0_MASK 0x0000000FL
+#define MMEA1_ADDRDEC2_RM_SEL_SECCS01__RM1_MASK 0x000000F0L
+#define MMEA1_ADDRDEC2_RM_SEL_SECCS01__RM2_MASK 0x00000F00L
+#define MMEA1_ADDRDEC2_RM_SEL_SECCS01__CHAN_BIT_MASK 0x0000F000L
+#define MMEA1_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA1_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA1_ADDRDEC2_RM_SEL_SECCS23
+#define MMEA1_ADDRDEC2_RM_SEL_SECCS23__RM0__SHIFT 0x0
+#define MMEA1_ADDRDEC2_RM_SEL_SECCS23__RM1__SHIFT 0x4
+#define MMEA1_ADDRDEC2_RM_SEL_SECCS23__RM2__SHIFT 0x8
+#define MMEA1_ADDRDEC2_RM_SEL_SECCS23__CHAN_BIT__SHIFT 0xc
+#define MMEA1_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA1_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA1_ADDRDEC2_RM_SEL_SECCS23__RM0_MASK 0x0000000FL
+#define MMEA1_ADDRDEC2_RM_SEL_SECCS23__RM1_MASK 0x000000F0L
+#define MMEA1_ADDRDEC2_RM_SEL_SECCS23__RM2_MASK 0x00000F00L
+#define MMEA1_ADDRDEC2_RM_SEL_SECCS23__CHAN_BIT_MASK 0x0000F000L
+#define MMEA1_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA1_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA1_ADDRNORMDRAM_GLOBAL_CNTL
+#define MMEA1_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K__SHIFT 0x14
+#define MMEA1_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M__SHIFT 0x15
+#define MMEA1_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G__SHIFT 0x16
+#define MMEA1_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K_MASK 0x00100000L
+#define MMEA1_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M_MASK 0x00200000L
+#define MMEA1_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G_MASK 0x00400000L
+//MMEA1_ADDRNORMGMI_GLOBAL_CNTL
+#define MMEA1_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K__SHIFT 0x14
+#define MMEA1_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M__SHIFT 0x15
+#define MMEA1_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G__SHIFT 0x16
+#define MMEA1_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K_MASK 0x00100000L
+#define MMEA1_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M_MASK 0x00200000L
+#define MMEA1_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G_MASK 0x00400000L
+//MMEA1_IO_RD_CLI2GRP_MAP0
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA1_IO_RD_CLI2GRP_MAP1
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA1_IO_WR_CLI2GRP_MAP0
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA1_IO_WR_CLI2GRP_MAP1
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA1_IO_RD_COMBINE_FLUSH
+#define MMEA1_IO_RD_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0
+#define MMEA1_IO_RD_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4
+#define MMEA1_IO_RD_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8
+#define MMEA1_IO_RD_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc
+#define MMEA1_IO_RD_COMBINE_FLUSH__FORWARD_COMB_ONLY__SHIFT 0x10
+#define MMEA1_IO_RD_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL
+#define MMEA1_IO_RD_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L
+#define MMEA1_IO_RD_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L
+#define MMEA1_IO_RD_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L
+#define MMEA1_IO_RD_COMBINE_FLUSH__FORWARD_COMB_ONLY_MASK 0x00010000L
+//MMEA1_IO_WR_COMBINE_FLUSH
+#define MMEA1_IO_WR_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0
+#define MMEA1_IO_WR_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4
+#define MMEA1_IO_WR_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8
+#define MMEA1_IO_WR_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc
+#define MMEA1_IO_WR_COMBINE_FLUSH__FORWARD_COMB_ONLY__SHIFT 0x10
+#define MMEA1_IO_WR_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL
+#define MMEA1_IO_WR_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L
+#define MMEA1_IO_WR_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L
+#define MMEA1_IO_WR_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L
+#define MMEA1_IO_WR_COMBINE_FLUSH__FORWARD_COMB_ONLY_MASK 0x00010000L
+//MMEA1_IO_GROUP_BURST
+#define MMEA1_IO_GROUP_BURST__RD_LIMIT_LO__SHIFT 0x0
+#define MMEA1_IO_GROUP_BURST__RD_LIMIT_HI__SHIFT 0x8
+#define MMEA1_IO_GROUP_BURST__WR_LIMIT_LO__SHIFT 0x10
+#define MMEA1_IO_GROUP_BURST__WR_LIMIT_HI__SHIFT 0x18
+#define MMEA1_IO_GROUP_BURST__RD_LIMIT_LO_MASK 0x000000FFL
+#define MMEA1_IO_GROUP_BURST__RD_LIMIT_HI_MASK 0x0000FF00L
+#define MMEA1_IO_GROUP_BURST__WR_LIMIT_LO_MASK 0x00FF0000L
+#define MMEA1_IO_GROUP_BURST__WR_LIMIT_HI_MASK 0xFF000000L
+//MMEA1_IO_RD_PRI_AGE
+#define MMEA1_IO_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA1_IO_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA1_IO_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA1_IO_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA1_IO_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA1_IO_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA1_IO_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA1_IO_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA1_IO_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA1_IO_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA1_IO_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA1_IO_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA1_IO_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA1_IO_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA1_IO_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA1_IO_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA1_IO_WR_PRI_AGE
+#define MMEA1_IO_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA1_IO_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA1_IO_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA1_IO_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA1_IO_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA1_IO_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA1_IO_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA1_IO_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA1_IO_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA1_IO_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA1_IO_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA1_IO_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA1_IO_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA1_IO_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA1_IO_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA1_IO_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA1_IO_RD_PRI_QUEUING
+#define MMEA1_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA1_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA1_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA1_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA1_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA1_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA1_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA1_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA1_IO_WR_PRI_QUEUING
+#define MMEA1_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA1_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA1_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA1_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA1_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA1_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA1_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA1_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA1_IO_RD_PRI_FIXED
+#define MMEA1_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA1_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA1_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA1_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA1_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA1_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA1_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA1_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA1_IO_WR_PRI_FIXED
+#define MMEA1_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA1_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA1_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA1_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA1_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA1_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA1_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA1_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA1_IO_RD_PRI_URGENCY
+#define MMEA1_IO_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA1_IO_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA1_IO_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA1_IO_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA1_IO_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA1_IO_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA1_IO_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA1_IO_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA1_IO_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA1_IO_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA1_IO_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA1_IO_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA1_IO_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA1_IO_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA1_IO_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA1_IO_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA1_IO_WR_PRI_URGENCY
+#define MMEA1_IO_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA1_IO_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA1_IO_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA1_IO_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA1_IO_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA1_IO_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA1_IO_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA1_IO_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA1_IO_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA1_IO_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA1_IO_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA1_IO_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA1_IO_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA1_IO_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA1_IO_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA1_IO_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA1_IO_RD_PRI_URGENCY_MASKING
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L
+#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L
+//MMEA1_IO_WR_PRI_URGENCY_MASKING
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L
+#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L
+//MMEA1_IO_RD_PRI_QUANT_PRI1
+#define MMEA1_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA1_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA1_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA1_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA1_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA1_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA1_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA1_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA1_IO_RD_PRI_QUANT_PRI2
+#define MMEA1_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA1_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA1_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA1_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA1_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA1_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA1_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA1_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA1_IO_RD_PRI_QUANT_PRI3
+#define MMEA1_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA1_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA1_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA1_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA1_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA1_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA1_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA1_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA1_IO_WR_PRI_QUANT_PRI1
+#define MMEA1_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA1_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA1_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA1_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA1_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA1_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA1_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA1_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA1_IO_WR_PRI_QUANT_PRI2
+#define MMEA1_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA1_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA1_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA1_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA1_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA1_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA1_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA1_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA1_IO_WR_PRI_QUANT_PRI3
+#define MMEA1_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA1_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA1_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA1_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA1_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA1_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA1_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA1_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA1_SDP_ARB_DRAM
+#define MMEA1_SDP_ARB_DRAM__RDWR_BURST_LIMIT_CYCL__SHIFT 0x0
+#define MMEA1_SDP_ARB_DRAM__RDWR_BURST_LIMIT_DATA__SHIFT 0x8
+#define MMEA1_SDP_ARB_DRAM__EARLY_SW2RD_ON_PRI__SHIFT 0x10
+#define MMEA1_SDP_ARB_DRAM__EARLY_SW2WR_ON_PRI__SHIFT 0x11
+#define MMEA1_SDP_ARB_DRAM__EARLY_SW2RD_ON_RES__SHIFT 0x12
+#define MMEA1_SDP_ARB_DRAM__EARLY_SW2WR_ON_RES__SHIFT 0x13
+#define MMEA1_SDP_ARB_DRAM__EOB_ON_EXPIRE__SHIFT 0x14
+#define MMEA1_SDP_ARB_DRAM__DECOUPLE_RDWR_BNKSTATE__SHIFT 0x15
+#define MMEA1_SDP_ARB_DRAM__RDWR_BURST_LIMIT_CYCL_MASK 0x0000007FL
+#define MMEA1_SDP_ARB_DRAM__RDWR_BURST_LIMIT_DATA_MASK 0x00007F00L
+#define MMEA1_SDP_ARB_DRAM__EARLY_SW2RD_ON_PRI_MASK 0x00010000L
+#define MMEA1_SDP_ARB_DRAM__EARLY_SW2WR_ON_PRI_MASK 0x00020000L
+#define MMEA1_SDP_ARB_DRAM__EARLY_SW2RD_ON_RES_MASK 0x00040000L
+#define MMEA1_SDP_ARB_DRAM__EARLY_SW2WR_ON_RES_MASK 0x00080000L
+#define MMEA1_SDP_ARB_DRAM__EOB_ON_EXPIRE_MASK 0x00100000L
+#define MMEA1_SDP_ARB_DRAM__DECOUPLE_RDWR_BNKSTATE_MASK 0x00200000L
+//MMEA1_SDP_ARB_GMI
+#define MMEA1_SDP_ARB_GMI__RDWR_BURST_LIMIT_CYCL__SHIFT 0x0
+#define MMEA1_SDP_ARB_GMI__RDWR_BURST_LIMIT_DATA__SHIFT 0x8
+#define MMEA1_SDP_ARB_GMI__EARLY_SW2RD_ON_PRI__SHIFT 0x10
+#define MMEA1_SDP_ARB_GMI__EARLY_SW2WR_ON_PRI__SHIFT 0x11
+#define MMEA1_SDP_ARB_GMI__EARLY_SW2RD_ON_RES__SHIFT 0x12
+#define MMEA1_SDP_ARB_GMI__EARLY_SW2WR_ON_RES__SHIFT 0x13
+#define MMEA1_SDP_ARB_GMI__EOB_ON_EXPIRE__SHIFT 0x14
+#define MMEA1_SDP_ARB_GMI__DECOUPLE_RDWR_BNKSTATE__SHIFT 0x15
+#define MMEA1_SDP_ARB_GMI__ALLOW_CHAIN_BREAKING__SHIFT 0x16
+#define MMEA1_SDP_ARB_GMI__RDWR_BURST_LIMIT_CYCL_MASK 0x0000007FL
+#define MMEA1_SDP_ARB_GMI__RDWR_BURST_LIMIT_DATA_MASK 0x00007F00L
+#define MMEA1_SDP_ARB_GMI__EARLY_SW2RD_ON_PRI_MASK 0x00010000L
+#define MMEA1_SDP_ARB_GMI__EARLY_SW2WR_ON_PRI_MASK 0x00020000L
+#define MMEA1_SDP_ARB_GMI__EARLY_SW2RD_ON_RES_MASK 0x00040000L
+#define MMEA1_SDP_ARB_GMI__EARLY_SW2WR_ON_RES_MASK 0x00080000L
+#define MMEA1_SDP_ARB_GMI__EOB_ON_EXPIRE_MASK 0x00100000L
+#define MMEA1_SDP_ARB_GMI__DECOUPLE_RDWR_BNKSTATE_MASK 0x00200000L
+#define MMEA1_SDP_ARB_GMI__ALLOW_CHAIN_BREAKING_MASK 0x00400000L
+//MMEA1_SDP_ARB_FINAL
+#define MMEA1_SDP_ARB_FINAL__DRAM_BURST_LIMIT__SHIFT 0x0
+#define MMEA1_SDP_ARB_FINAL__GMI_BURST_LIMIT__SHIFT 0x5
+#define MMEA1_SDP_ARB_FINAL__IO_BURST_LIMIT__SHIFT 0xa
+#define MMEA1_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER__SHIFT 0xf
+#define MMEA1_SDP_ARB_FINAL__RDONLY_VC0__SHIFT 0x11
+#define MMEA1_SDP_ARB_FINAL__RDONLY_VC1__SHIFT 0x12
+#define MMEA1_SDP_ARB_FINAL__RDONLY_VC2__SHIFT 0x13
+#define MMEA1_SDP_ARB_FINAL__RDONLY_VC3__SHIFT 0x14
+#define MMEA1_SDP_ARB_FINAL__RDONLY_VC4__SHIFT 0x15
+#define MMEA1_SDP_ARB_FINAL__RDONLY_VC5__SHIFT 0x16
+#define MMEA1_SDP_ARB_FINAL__RDONLY_VC6__SHIFT 0x17
+#define MMEA1_SDP_ARB_FINAL__RDONLY_VC7__SHIFT 0x18
+#define MMEA1_SDP_ARB_FINAL__ERREVENT_ON_ERROR__SHIFT 0x19
+#define MMEA1_SDP_ARB_FINAL__HALTREQ_ON_ERROR__SHIFT 0x1a
+#define MMEA1_SDP_ARB_FINAL__GMI_BURST_STRETCH__SHIFT 0x1b
+#define MMEA1_SDP_ARB_FINAL__DRAM_BURST_LIMIT_MASK 0x0000001FL
+#define MMEA1_SDP_ARB_FINAL__GMI_BURST_LIMIT_MASK 0x000003E0L
+#define MMEA1_SDP_ARB_FINAL__IO_BURST_LIMIT_MASK 0x00007C00L
+#define MMEA1_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER_MASK 0x00018000L
+#define MMEA1_SDP_ARB_FINAL__RDONLY_VC0_MASK 0x00020000L
+#define MMEA1_SDP_ARB_FINAL__RDONLY_VC1_MASK 0x00040000L
+#define MMEA1_SDP_ARB_FINAL__RDONLY_VC2_MASK 0x00080000L
+#define MMEA1_SDP_ARB_FINAL__RDONLY_VC3_MASK 0x00100000L
+#define MMEA1_SDP_ARB_FINAL__RDONLY_VC4_MASK 0x00200000L
+#define MMEA1_SDP_ARB_FINAL__RDONLY_VC5_MASK 0x00400000L
+#define MMEA1_SDP_ARB_FINAL__RDONLY_VC6_MASK 0x00800000L
+#define MMEA1_SDP_ARB_FINAL__RDONLY_VC7_MASK 0x01000000L
+#define MMEA1_SDP_ARB_FINAL__ERREVENT_ON_ERROR_MASK 0x02000000L
+#define MMEA1_SDP_ARB_FINAL__HALTREQ_ON_ERROR_MASK 0x04000000L
+#define MMEA1_SDP_ARB_FINAL__GMI_BURST_STRETCH_MASK 0x08000000L
+//MMEA1_SDP_DRAM_PRIORITY
+#define MMEA1_SDP_DRAM_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0
+#define MMEA1_SDP_DRAM_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4
+#define MMEA1_SDP_DRAM_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8
+#define MMEA1_SDP_DRAM_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc
+#define MMEA1_SDP_DRAM_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10
+#define MMEA1_SDP_DRAM_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14
+#define MMEA1_SDP_DRAM_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18
+#define MMEA1_SDP_DRAM_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c
+#define MMEA1_SDP_DRAM_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL
+#define MMEA1_SDP_DRAM_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L
+#define MMEA1_SDP_DRAM_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L
+#define MMEA1_SDP_DRAM_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L
+#define MMEA1_SDP_DRAM_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L
+#define MMEA1_SDP_DRAM_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L
+#define MMEA1_SDP_DRAM_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L
+#define MMEA1_SDP_DRAM_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L
+//MMEA1_SDP_GMI_PRIORITY
+#define MMEA1_SDP_GMI_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0
+#define MMEA1_SDP_GMI_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4
+#define MMEA1_SDP_GMI_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8
+#define MMEA1_SDP_GMI_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc
+#define MMEA1_SDP_GMI_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10
+#define MMEA1_SDP_GMI_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14
+#define MMEA1_SDP_GMI_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18
+#define MMEA1_SDP_GMI_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c
+#define MMEA1_SDP_GMI_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL
+#define MMEA1_SDP_GMI_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L
+#define MMEA1_SDP_GMI_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L
+#define MMEA1_SDP_GMI_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L
+#define MMEA1_SDP_GMI_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L
+#define MMEA1_SDP_GMI_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L
+#define MMEA1_SDP_GMI_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L
+#define MMEA1_SDP_GMI_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L
+//MMEA1_SDP_IO_PRIORITY
+#define MMEA1_SDP_IO_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0
+#define MMEA1_SDP_IO_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4
+#define MMEA1_SDP_IO_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8
+#define MMEA1_SDP_IO_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc
+#define MMEA1_SDP_IO_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10
+#define MMEA1_SDP_IO_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14
+#define MMEA1_SDP_IO_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18
+#define MMEA1_SDP_IO_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c
+#define MMEA1_SDP_IO_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL
+#define MMEA1_SDP_IO_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L
+#define MMEA1_SDP_IO_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L
+#define MMEA1_SDP_IO_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L
+#define MMEA1_SDP_IO_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L
+#define MMEA1_SDP_IO_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L
+#define MMEA1_SDP_IO_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L
+#define MMEA1_SDP_IO_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L
+//MMEA1_SDP_CREDITS
+#define MMEA1_SDP_CREDITS__TAG_LIMIT__SHIFT 0x0
+#define MMEA1_SDP_CREDITS__WR_RESP_CREDITS__SHIFT 0x8
+#define MMEA1_SDP_CREDITS__RD_RESP_CREDITS__SHIFT 0x10
+#define MMEA1_SDP_CREDITS__TAG_LIMIT_MASK 0x000000FFL
+#define MMEA1_SDP_CREDITS__WR_RESP_CREDITS_MASK 0x00007F00L
+#define MMEA1_SDP_CREDITS__RD_RESP_CREDITS_MASK 0x007F0000L
+//MMEA1_SDP_TAG_RESERVE0
+#define MMEA1_SDP_TAG_RESERVE0__VC0__SHIFT 0x0
+#define MMEA1_SDP_TAG_RESERVE0__VC1__SHIFT 0x8
+#define MMEA1_SDP_TAG_RESERVE0__VC2__SHIFT 0x10
+#define MMEA1_SDP_TAG_RESERVE0__VC3__SHIFT 0x18
+#define MMEA1_SDP_TAG_RESERVE0__VC0_MASK 0x000000FFL
+#define MMEA1_SDP_TAG_RESERVE0__VC1_MASK 0x0000FF00L
+#define MMEA1_SDP_TAG_RESERVE0__VC2_MASK 0x00FF0000L
+#define MMEA1_SDP_TAG_RESERVE0__VC3_MASK 0xFF000000L
+//MMEA1_SDP_TAG_RESERVE1
+#define MMEA1_SDP_TAG_RESERVE1__VC4__SHIFT 0x0
+#define MMEA1_SDP_TAG_RESERVE1__VC5__SHIFT 0x8
+#define MMEA1_SDP_TAG_RESERVE1__VC6__SHIFT 0x10
+#define MMEA1_SDP_TAG_RESERVE1__VC7__SHIFT 0x18
+#define MMEA1_SDP_TAG_RESERVE1__VC4_MASK 0x000000FFL
+#define MMEA1_SDP_TAG_RESERVE1__VC5_MASK 0x0000FF00L
+#define MMEA1_SDP_TAG_RESERVE1__VC6_MASK 0x00FF0000L
+#define MMEA1_SDP_TAG_RESERVE1__VC7_MASK 0xFF000000L
+//MMEA1_SDP_VCC_RESERVE0
+#define MMEA1_SDP_VCC_RESERVE0__VC0_CREDITS__SHIFT 0x0
+#define MMEA1_SDP_VCC_RESERVE0__VC1_CREDITS__SHIFT 0x6
+#define MMEA1_SDP_VCC_RESERVE0__VC2_CREDITS__SHIFT 0xc
+#define MMEA1_SDP_VCC_RESERVE0__VC3_CREDITS__SHIFT 0x12
+#define MMEA1_SDP_VCC_RESERVE0__VC4_CREDITS__SHIFT 0x18
+#define MMEA1_SDP_VCC_RESERVE0__VC0_CREDITS_MASK 0x0000003FL
+#define MMEA1_SDP_VCC_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L
+#define MMEA1_SDP_VCC_RESERVE0__VC2_CREDITS_MASK 0x0003F000L
+#define MMEA1_SDP_VCC_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L
+#define MMEA1_SDP_VCC_RESERVE0__VC4_CREDITS_MASK 0x3F000000L
+//MMEA1_SDP_VCC_RESERVE1
+#define MMEA1_SDP_VCC_RESERVE1__VC5_CREDITS__SHIFT 0x0
+#define MMEA1_SDP_VCC_RESERVE1__VC6_CREDITS__SHIFT 0x6
+#define MMEA1_SDP_VCC_RESERVE1__VC7_CREDITS__SHIFT 0xc
+#define MMEA1_SDP_VCC_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f
+#define MMEA1_SDP_VCC_RESERVE1__VC5_CREDITS_MASK 0x0000003FL
+#define MMEA1_SDP_VCC_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L
+#define MMEA1_SDP_VCC_RESERVE1__VC7_CREDITS_MASK 0x0003F000L
+#define MMEA1_SDP_VCC_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L
+//MMEA1_SDP_VCD_RESERVE0
+#define MMEA1_SDP_VCD_RESERVE0__VC0_CREDITS__SHIFT 0x0
+#define MMEA1_SDP_VCD_RESERVE0__VC1_CREDITS__SHIFT 0x6
+#define MMEA1_SDP_VCD_RESERVE0__VC2_CREDITS__SHIFT 0xc
+#define MMEA1_SDP_VCD_RESERVE0__VC3_CREDITS__SHIFT 0x12
+#define MMEA1_SDP_VCD_RESERVE0__VC4_CREDITS__SHIFT 0x18
+#define MMEA1_SDP_VCD_RESERVE0__VC0_CREDITS_MASK 0x0000003FL
+#define MMEA1_SDP_VCD_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L
+#define MMEA1_SDP_VCD_RESERVE0__VC2_CREDITS_MASK 0x0003F000L
+#define MMEA1_SDP_VCD_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L
+#define MMEA1_SDP_VCD_RESERVE0__VC4_CREDITS_MASK 0x3F000000L
+//MMEA1_SDP_VCD_RESERVE1
+#define MMEA1_SDP_VCD_RESERVE1__VC5_CREDITS__SHIFT 0x0
+#define MMEA1_SDP_VCD_RESERVE1__VC6_CREDITS__SHIFT 0x6
+#define MMEA1_SDP_VCD_RESERVE1__VC7_CREDITS__SHIFT 0xc
+#define MMEA1_SDP_VCD_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f
+#define MMEA1_SDP_VCD_RESERVE1__VC5_CREDITS_MASK 0x0000003FL
+#define MMEA1_SDP_VCD_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L
+#define MMEA1_SDP_VCD_RESERVE1__VC7_CREDITS_MASK 0x0003F000L
+#define MMEA1_SDP_VCD_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L
+//MMEA1_SDP_REQ_CNTL
+#define MMEA1_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ__SHIFT 0x0
+#define MMEA1_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE__SHIFT 0x1
+#define MMEA1_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC__SHIFT 0x2
+#define MMEA1_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM__SHIFT 0x3
+#define MMEA1_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_GMI__SHIFT 0x4
+#define MMEA1_SDP_REQ_CNTL__INNER_DOMAIN_MODE__SHIFT 0x5
+#define MMEA1_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ_MASK 0x00000001L
+#define MMEA1_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE_MASK 0x00000002L
+#define MMEA1_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC_MASK 0x00000004L
+#define MMEA1_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM_MASK 0x00000008L
+#define MMEA1_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_GMI_MASK 0x00000010L
+#define MMEA1_SDP_REQ_CNTL__INNER_DOMAIN_MODE_MASK 0x00000020L
+//MMEA1_MISC
+#define MMEA1_MISC__RELATIVE_PRI_IN_DRAM_RD_ARB__SHIFT 0x0
+#define MMEA1_MISC__RELATIVE_PRI_IN_DRAM_WR_ARB__SHIFT 0x1
+#define MMEA1_MISC__RELATIVE_PRI_IN_GMI_RD_ARB__SHIFT 0x2
+#define MMEA1_MISC__RELATIVE_PRI_IN_GMI_WR_ARB__SHIFT 0x3
+#define MMEA1_MISC__RELATIVE_PRI_IN_IO_RD_ARB__SHIFT 0x4
+#define MMEA1_MISC__RELATIVE_PRI_IN_IO_WR_ARB__SHIFT 0x5
+#define MMEA1_MISC__EARLYWRRET_ENABLE_VC0__SHIFT 0x6
+#define MMEA1_MISC__EARLYWRRET_ENABLE_VC1__SHIFT 0x7
+#define MMEA1_MISC__EARLYWRRET_ENABLE_VC2__SHIFT 0x8
+#define MMEA1_MISC__EARLYWRRET_ENABLE_VC3__SHIFT 0x9
+#define MMEA1_MISC__EARLYWRRET_ENABLE_VC4__SHIFT 0xa
+#define MMEA1_MISC__EARLYWRRET_ENABLE_VC5__SHIFT 0xb
+#define MMEA1_MISC__EARLYWRRET_ENABLE_VC6__SHIFT 0xc
+#define MMEA1_MISC__EARLYWRRET_ENABLE_VC7__SHIFT 0xd
+#define MMEA1_MISC__EARLY_SDP_ORIGDATA__SHIFT 0xe
+#define MMEA1_MISC__LINKMGR_DYNAMIC_MODE__SHIFT 0xf
+#define MMEA1_MISC__LINKMGR_HALT_THRESHOLD__SHIFT 0x11
+#define MMEA1_MISC__LINKMGR_RECONNECT_DELAY__SHIFT 0x13
+#define MMEA1_MISC__LINKMGR_IDLE_THRESHOLD__SHIFT 0x15
+#define MMEA1_MISC__FAVOUR_MIDCHAIN_CS_IN_DRAM_ARB__SHIFT 0x1a
+#define MMEA1_MISC__FAVOUR_MIDCHAIN_CS_IN_GMI_ARB__SHIFT 0x1b
+#define MMEA1_MISC__FAVOUR_LAST_CS_IN_DRAM_ARB__SHIFT 0x1c
+#define MMEA1_MISC__FAVOUR_LAST_CS_IN_GMI_ARB__SHIFT 0x1d
+#define MMEA1_MISC__SWITCH_CS_ON_W2R_IN_DRAM_ARB__SHIFT 0x1e
+#define MMEA1_MISC__SWITCH_CS_ON_W2R_IN_GMI_ARB__SHIFT 0x1f
+#define MMEA1_MISC__RELATIVE_PRI_IN_DRAM_RD_ARB_MASK 0x00000001L
+#define MMEA1_MISC__RELATIVE_PRI_IN_DRAM_WR_ARB_MASK 0x00000002L
+#define MMEA1_MISC__RELATIVE_PRI_IN_GMI_RD_ARB_MASK 0x00000004L
+#define MMEA1_MISC__RELATIVE_PRI_IN_GMI_WR_ARB_MASK 0x00000008L
+#define MMEA1_MISC__RELATIVE_PRI_IN_IO_RD_ARB_MASK 0x00000010L
+#define MMEA1_MISC__RELATIVE_PRI_IN_IO_WR_ARB_MASK 0x00000020L
+#define MMEA1_MISC__EARLYWRRET_ENABLE_VC0_MASK 0x00000040L
+#define MMEA1_MISC__EARLYWRRET_ENABLE_VC1_MASK 0x00000080L
+#define MMEA1_MISC__EARLYWRRET_ENABLE_VC2_MASK 0x00000100L
+#define MMEA1_MISC__EARLYWRRET_ENABLE_VC3_MASK 0x00000200L
+#define MMEA1_MISC__EARLYWRRET_ENABLE_VC4_MASK 0x00000400L
+#define MMEA1_MISC__EARLYWRRET_ENABLE_VC5_MASK 0x00000800L
+#define MMEA1_MISC__EARLYWRRET_ENABLE_VC6_MASK 0x00001000L
+#define MMEA1_MISC__EARLYWRRET_ENABLE_VC7_MASK 0x00002000L
+#define MMEA1_MISC__EARLY_SDP_ORIGDATA_MASK 0x00004000L
+#define MMEA1_MISC__LINKMGR_DYNAMIC_MODE_MASK 0x00018000L
+#define MMEA1_MISC__LINKMGR_HALT_THRESHOLD_MASK 0x00060000L
+#define MMEA1_MISC__LINKMGR_RECONNECT_DELAY_MASK 0x00180000L
+#define MMEA1_MISC__LINKMGR_IDLE_THRESHOLD_MASK 0x03E00000L
+#define MMEA1_MISC__FAVOUR_MIDCHAIN_CS_IN_DRAM_ARB_MASK 0x04000000L
+#define MMEA1_MISC__FAVOUR_MIDCHAIN_CS_IN_GMI_ARB_MASK 0x08000000L
+#define MMEA1_MISC__FAVOUR_LAST_CS_IN_DRAM_ARB_MASK 0x10000000L
+#define MMEA1_MISC__FAVOUR_LAST_CS_IN_GMI_ARB_MASK 0x20000000L
+#define MMEA1_MISC__SWITCH_CS_ON_W2R_IN_DRAM_ARB_MASK 0x40000000L
+#define MMEA1_MISC__SWITCH_CS_ON_W2R_IN_GMI_ARB_MASK 0x80000000L
+//MMEA1_LATENCY_SAMPLING
+#define MMEA1_LATENCY_SAMPLING__SAMPLER0_DRAM__SHIFT 0x0
+#define MMEA1_LATENCY_SAMPLING__SAMPLER1_DRAM__SHIFT 0x1
+#define MMEA1_LATENCY_SAMPLING__SAMPLER0_GMI__SHIFT 0x2
+#define MMEA1_LATENCY_SAMPLING__SAMPLER1_GMI__SHIFT 0x3
+#define MMEA1_LATENCY_SAMPLING__SAMPLER0_IO__SHIFT 0x4
+#define MMEA1_LATENCY_SAMPLING__SAMPLER1_IO__SHIFT 0x5
+#define MMEA1_LATENCY_SAMPLING__SAMPLER0_READ__SHIFT 0x6
+#define MMEA1_LATENCY_SAMPLING__SAMPLER1_READ__SHIFT 0x7
+#define MMEA1_LATENCY_SAMPLING__SAMPLER0_WRITE__SHIFT 0x8
+#define MMEA1_LATENCY_SAMPLING__SAMPLER1_WRITE__SHIFT 0x9
+#define MMEA1_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET__SHIFT 0xa
+#define MMEA1_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET__SHIFT 0xb
+#define MMEA1_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET__SHIFT 0xc
+#define MMEA1_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET__SHIFT 0xd
+#define MMEA1_LATENCY_SAMPLING__SAMPLER0_VC__SHIFT 0xe
+#define MMEA1_LATENCY_SAMPLING__SAMPLER1_VC__SHIFT 0x16
+#define MMEA1_LATENCY_SAMPLING__SAMPLER0_DRAM_MASK 0x00000001L
+#define MMEA1_LATENCY_SAMPLING__SAMPLER1_DRAM_MASK 0x00000002L
+#define MMEA1_LATENCY_SAMPLING__SAMPLER0_GMI_MASK 0x00000004L
+#define MMEA1_LATENCY_SAMPLING__SAMPLER1_GMI_MASK 0x00000008L
+#define MMEA1_LATENCY_SAMPLING__SAMPLER0_IO_MASK 0x00000010L
+#define MMEA1_LATENCY_SAMPLING__SAMPLER1_IO_MASK 0x00000020L
+#define MMEA1_LATENCY_SAMPLING__SAMPLER0_READ_MASK 0x00000040L
+#define MMEA1_LATENCY_SAMPLING__SAMPLER1_READ_MASK 0x00000080L
+#define MMEA1_LATENCY_SAMPLING__SAMPLER0_WRITE_MASK 0x00000100L
+#define MMEA1_LATENCY_SAMPLING__SAMPLER1_WRITE_MASK 0x00000200L
+#define MMEA1_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET_MASK 0x00000400L
+#define MMEA1_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET_MASK 0x00000800L
+#define MMEA1_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET_MASK 0x00001000L
+#define MMEA1_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET_MASK 0x00002000L
+#define MMEA1_LATENCY_SAMPLING__SAMPLER0_VC_MASK 0x003FC000L
+#define MMEA1_LATENCY_SAMPLING__SAMPLER1_VC_MASK 0x3FC00000L
+//MMEA1_PERFCOUNTER_LO
+#define MMEA1_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define MMEA1_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//MMEA1_PERFCOUNTER_HI
+#define MMEA1_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define MMEA1_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define MMEA1_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define MMEA1_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+//MMEA1_PERFCOUNTER0_CFG
+#define MMEA1_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define MMEA1_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define MMEA1_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define MMEA1_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define MMEA1_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define MMEA1_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define MMEA1_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MMEA1_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define MMEA1_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define MMEA1_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//MMEA1_PERFCOUNTER1_CFG
+#define MMEA1_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define MMEA1_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define MMEA1_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define MMEA1_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define MMEA1_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define MMEA1_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define MMEA1_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MMEA1_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define MMEA1_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define MMEA1_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//MMEA1_PERFCOUNTER_RSLT_CNTL
+#define MMEA1_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define MMEA1_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define MMEA1_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define MMEA1_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define MMEA1_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define MMEA1_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define MMEA1_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define MMEA1_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define MMEA1_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define MMEA1_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define MMEA1_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define MMEA1_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+//MMEA1_EDC_CNT
+#define MMEA1_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT__SHIFT 0x0
+#define MMEA1_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT__SHIFT 0x2
+#define MMEA1_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT__SHIFT 0x4
+#define MMEA1_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT__SHIFT 0x6
+#define MMEA1_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT__SHIFT 0x8
+#define MMEA1_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT__SHIFT 0xa
+#define MMEA1_EDC_CNT__RRET_TAGMEM_SEC_COUNT__SHIFT 0xc
+#define MMEA1_EDC_CNT__RRET_TAGMEM_DED_COUNT__SHIFT 0xe
+#define MMEA1_EDC_CNT__WRET_TAGMEM_SEC_COUNT__SHIFT 0x10
+#define MMEA1_EDC_CNT__WRET_TAGMEM_DED_COUNT__SHIFT 0x12
+#define MMEA1_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT__SHIFT 0x14
+#define MMEA1_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT__SHIFT 0x16
+#define MMEA1_EDC_CNT__IORD_CMDMEM_SED_COUNT__SHIFT 0x18
+#define MMEA1_EDC_CNT__IOWR_CMDMEM_SED_COUNT__SHIFT 0x1a
+#define MMEA1_EDC_CNT__IOWR_DATAMEM_SED_COUNT__SHIFT 0x1c
+#define MMEA1_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT_MASK 0x00000003L
+#define MMEA1_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT_MASK 0x0000000CL
+#define MMEA1_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT_MASK 0x00000030L
+#define MMEA1_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
+#define MMEA1_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT_MASK 0x00000300L
+#define MMEA1_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT_MASK 0x00000C00L
+#define MMEA1_EDC_CNT__RRET_TAGMEM_SEC_COUNT_MASK 0x00003000L
+#define MMEA1_EDC_CNT__RRET_TAGMEM_DED_COUNT_MASK 0x0000C000L
+#define MMEA1_EDC_CNT__WRET_TAGMEM_SEC_COUNT_MASK 0x00030000L
+#define MMEA1_EDC_CNT__WRET_TAGMEM_DED_COUNT_MASK 0x000C0000L
+#define MMEA1_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT_MASK 0x00300000L
+#define MMEA1_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT_MASK 0x00C00000L
+#define MMEA1_EDC_CNT__IORD_CMDMEM_SED_COUNT_MASK 0x03000000L
+#define MMEA1_EDC_CNT__IOWR_CMDMEM_SED_COUNT_MASK 0x0C000000L
+#define MMEA1_EDC_CNT__IOWR_DATAMEM_SED_COUNT_MASK 0x30000000L
+//MMEA1_EDC_CNT2
+#define MMEA1_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT__SHIFT 0x0
+#define MMEA1_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT__SHIFT 0x2
+#define MMEA1_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT__SHIFT 0x4
+#define MMEA1_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT__SHIFT 0x6
+#define MMEA1_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT__SHIFT 0x8
+#define MMEA1_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa
+#define MMEA1_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc
+#define MMEA1_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe
+#define MMEA1_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10
+#define MMEA1_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12
+#define MMEA1_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14
+#define MMEA1_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16
+#define MMEA1_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT 0x18
+#define MMEA1_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT 0x1a
+#define MMEA1_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT 0x1c
+#define MMEA1_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT 0x1e
+#define MMEA1_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L
+#define MMEA1_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL
+#define MMEA1_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L
+#define MMEA1_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
+#define MMEA1_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT_MASK 0x00000300L
+#define MMEA1_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L
+#define MMEA1_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L
+#define MMEA1_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L
+#define MMEA1_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L
+#define MMEA1_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L
+#define MMEA1_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L
+#define MMEA1_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L
+#define MMEA1_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK 0x03000000L
+#define MMEA1_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK 0x0C000000L
+#define MMEA1_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK 0x30000000L
+#define MMEA1_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK 0xC0000000L
+//MMEA1_DSM_CNTL
+#define MMEA1_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define MMEA1_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define MMEA1_DSM_CNTL__DRAMWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x3
+#define MMEA1_DSM_CNTL__DRAMWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define MMEA1_DSM_CNTL__DRAMWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0x6
+#define MMEA1_DSM_CNTL__DRAMWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define MMEA1_DSM_CNTL__RRET_TAGMEM_DSM_IRRITATOR_DATA__SHIFT 0x9
+#define MMEA1_DSM_CNTL__RRET_TAGMEM_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define MMEA1_DSM_CNTL__WRET_TAGMEM_DSM_IRRITATOR_DATA__SHIFT 0xc
+#define MMEA1_DSM_CNTL__WRET_TAGMEM_ENABLE_SINGLE_WRITE__SHIFT 0xe
+#define MMEA1_DSM_CNTL__GMIRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0xf
+#define MMEA1_DSM_CNTL__GMIRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x11
+#define MMEA1_DSM_CNTL__GMIWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x12
+#define MMEA1_DSM_CNTL__GMIWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x14
+#define MMEA1_DSM_CNTL__GMIWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0x15
+#define MMEA1_DSM_CNTL__GMIWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0x17
+#define MMEA1_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define MMEA1_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define MMEA1_DSM_CNTL__DRAMWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000018L
+#define MMEA1_DSM_CNTL__DRAMWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define MMEA1_DSM_CNTL__DRAMWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define MMEA1_DSM_CNTL__DRAMWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define MMEA1_DSM_CNTL__RRET_TAGMEM_DSM_IRRITATOR_DATA_MASK 0x00000600L
+#define MMEA1_DSM_CNTL__RRET_TAGMEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+#define MMEA1_DSM_CNTL__WRET_TAGMEM_DSM_IRRITATOR_DATA_MASK 0x00003000L
+#define MMEA1_DSM_CNTL__WRET_TAGMEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
+#define MMEA1_DSM_CNTL__GMIRD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00018000L
+#define MMEA1_DSM_CNTL__GMIRD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L
+#define MMEA1_DSM_CNTL__GMIWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L
+#define MMEA1_DSM_CNTL__GMIWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L
+#define MMEA1_DSM_CNTL__GMIWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x00600000L
+#define MMEA1_DSM_CNTL__GMIWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00800000L
+//MMEA1_DSM_CNTLA
+#define MMEA1_DSM_CNTLA__DRAMRD_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define MMEA1_DSM_CNTLA__DRAMRD_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define MMEA1_DSM_CNTLA__DRAMWR_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x3
+#define MMEA1_DSM_CNTLA__DRAMWR_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define MMEA1_DSM_CNTLA__IORD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x6
+#define MMEA1_DSM_CNTLA__IORD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define MMEA1_DSM_CNTLA__IOWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x9
+#define MMEA1_DSM_CNTLA__IOWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define MMEA1_DSM_CNTLA__IOWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0xc
+#define MMEA1_DSM_CNTLA__IOWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0xe
+#define MMEA1_DSM_CNTLA__GMIRD_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0xf
+#define MMEA1_DSM_CNTLA__GMIRD_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x11
+#define MMEA1_DSM_CNTLA__GMIWR_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x12
+#define MMEA1_DSM_CNTLA__GMIWR_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x14
+#define MMEA1_DSM_CNTLA__DRAMRD_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define MMEA1_DSM_CNTLA__DRAMRD_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define MMEA1_DSM_CNTLA__DRAMWR_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00000018L
+#define MMEA1_DSM_CNTLA__DRAMWR_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define MMEA1_DSM_CNTLA__IORD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define MMEA1_DSM_CNTLA__IORD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define MMEA1_DSM_CNTLA__IOWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000600L
+#define MMEA1_DSM_CNTLA__IOWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+#define MMEA1_DSM_CNTLA__IOWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x00003000L
+#define MMEA1_DSM_CNTLA__IOWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
+#define MMEA1_DSM_CNTLA__GMIRD_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00018000L
+#define MMEA1_DSM_CNTLA__GMIRD_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L
+#define MMEA1_DSM_CNTLA__GMIWR_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L
+#define MMEA1_DSM_CNTLA__GMIWR_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L
+//MMEA1_DSM_CNTL2
+#define MMEA1_DSM_CNTL2__DRAMRD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define MMEA1_DSM_CNTL2__DRAMRD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define MMEA1_DSM_CNTL2__DRAMWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define MMEA1_DSM_CNTL2__DRAMWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x5
+#define MMEA1_DSM_CNTL2__DRAMWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define MMEA1_DSM_CNTL2__DRAMWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0x8
+#define MMEA1_DSM_CNTL2__RRET_TAGMEM_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define MMEA1_DSM_CNTL2__RRET_TAGMEM_SELECT_INJECT_DELAY__SHIFT 0xb
+#define MMEA1_DSM_CNTL2__WRET_TAGMEM_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define MMEA1_DSM_CNTL2__WRET_TAGMEM_SELECT_INJECT_DELAY__SHIFT 0xe
+#define MMEA1_DSM_CNTL2__GMIRD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0xf
+#define MMEA1_DSM_CNTL2__GMIRD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x11
+#define MMEA1_DSM_CNTL2__GMIWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x12
+#define MMEA1_DSM_CNTL2__GMIWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x14
+#define MMEA1_DSM_CNTL2__GMIWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0x15
+#define MMEA1_DSM_CNTL2__GMIWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0x17
+#define MMEA1_DSM_CNTL2__INJECT_DELAY__SHIFT 0x1a
+#define MMEA1_DSM_CNTL2__DRAMRD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define MMEA1_DSM_CNTL2__DRAMRD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define MMEA1_DSM_CNTL2__DRAMWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define MMEA1_DSM_CNTL2__DRAMWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define MMEA1_DSM_CNTL2__DRAMWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define MMEA1_DSM_CNTL2__DRAMWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define MMEA1_DSM_CNTL2__RRET_TAGMEM_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define MMEA1_DSM_CNTL2__RRET_TAGMEM_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define MMEA1_DSM_CNTL2__WRET_TAGMEM_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define MMEA1_DSM_CNTL2__WRET_TAGMEM_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define MMEA1_DSM_CNTL2__GMIRD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00018000L
+#define MMEA1_DSM_CNTL2__GMIRD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00020000L
+#define MMEA1_DSM_CNTL2__GMIWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L
+#define MMEA1_DSM_CNTL2__GMIWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00100000L
+#define MMEA1_DSM_CNTL2__GMIWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x00600000L
+#define MMEA1_DSM_CNTL2__GMIWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00800000L
+#define MMEA1_DSM_CNTL2__INJECT_DELAY_MASK 0xFC000000L
+//MMEA1_DSM_CNTL2A
+#define MMEA1_DSM_CNTL2A__DRAMRD_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define MMEA1_DSM_CNTL2A__DRAMRD_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define MMEA1_DSM_CNTL2A__DRAMWR_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define MMEA1_DSM_CNTL2A__DRAMWR_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x5
+#define MMEA1_DSM_CNTL2A__IORD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define MMEA1_DSM_CNTL2A__IORD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x8
+#define MMEA1_DSM_CNTL2A__IOWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define MMEA1_DSM_CNTL2A__IOWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0xb
+#define MMEA1_DSM_CNTL2A__IOWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define MMEA1_DSM_CNTL2A__IOWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0xe
+#define MMEA1_DSM_CNTL2A__GMIRD_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0xf
+#define MMEA1_DSM_CNTL2A__GMIRD_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x11
+#define MMEA1_DSM_CNTL2A__GMIWR_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x12
+#define MMEA1_DSM_CNTL2A__GMIWR_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x14
+#define MMEA1_DSM_CNTL2A__DRAMRD_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define MMEA1_DSM_CNTL2A__DRAMRD_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define MMEA1_DSM_CNTL2A__DRAMWR_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define MMEA1_DSM_CNTL2A__DRAMWR_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define MMEA1_DSM_CNTL2A__IORD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define MMEA1_DSM_CNTL2A__IORD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define MMEA1_DSM_CNTL2A__IOWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define MMEA1_DSM_CNTL2A__IOWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define MMEA1_DSM_CNTL2A__IOWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define MMEA1_DSM_CNTL2A__IOWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define MMEA1_DSM_CNTL2A__GMIRD_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00018000L
+#define MMEA1_DSM_CNTL2A__GMIRD_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00020000L
+#define MMEA1_DSM_CNTL2A__GMIWR_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L
+#define MMEA1_DSM_CNTL2A__GMIWR_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00100000L
+//MMEA1_CGTT_CLK_CTRL
+#define MMEA1_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define MMEA1_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define MMEA1_CGTT_CLK_CTRL__SPARE0__SHIFT 0xc
+#define MMEA1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_WRITE__SHIFT 0x14
+#define MMEA1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_READ__SHIFT 0x15
+#define MMEA1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_RETURN__SHIFT 0x16
+#define MMEA1_CGTT_CLK_CTRL__SPARE1__SHIFT 0x17
+#define MMEA1_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define MMEA1_CGTT_CLK_CTRL__SOFT_OVERRIDE_WRITE__SHIFT 0x1c
+#define MMEA1_CGTT_CLK_CTRL__SOFT_OVERRIDE_READ__SHIFT 0x1d
+#define MMEA1_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN__SHIFT 0x1e
+#define MMEA1_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER__SHIFT 0x1f
+#define MMEA1_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define MMEA1_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define MMEA1_CGTT_CLK_CTRL__SPARE0_MASK 0x000FF000L
+#define MMEA1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_WRITE_MASK 0x00100000L
+#define MMEA1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_READ_MASK 0x00200000L
+#define MMEA1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_RETURN_MASK 0x00400000L
+#define MMEA1_CGTT_CLK_CTRL__SPARE1_MASK 0x07800000L
+#define MMEA1_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define MMEA1_CGTT_CLK_CTRL__SOFT_OVERRIDE_WRITE_MASK 0x10000000L
+#define MMEA1_CGTT_CLK_CTRL__SOFT_OVERRIDE_READ_MASK 0x20000000L
+#define MMEA1_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN_MASK 0x40000000L
+#define MMEA1_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER_MASK 0x80000000L
+//MMEA1_EDC_MODE
+#define MMEA1_EDC_MODE__COUNT_FED_OUT__SHIFT 0x10
+#define MMEA1_EDC_MODE__GATE_FUE__SHIFT 0x11
+#define MMEA1_EDC_MODE__DED_MODE__SHIFT 0x14
+#define MMEA1_EDC_MODE__PROP_FED__SHIFT 0x1d
+#define MMEA1_EDC_MODE__BYPASS__SHIFT 0x1f
+#define MMEA1_EDC_MODE__COUNT_FED_OUT_MASK 0x00010000L
+#define MMEA1_EDC_MODE__GATE_FUE_MASK 0x00020000L
+#define MMEA1_EDC_MODE__DED_MODE_MASK 0x00300000L
+#define MMEA1_EDC_MODE__PROP_FED_MASK 0x20000000L
+#define MMEA1_EDC_MODE__BYPASS_MASK 0x80000000L
+//MMEA1_ERR_STATUS
+#define MMEA1_ERR_STATUS__SDP_RDRSP_STATUS__SHIFT 0x0
+#define MMEA1_ERR_STATUS__SDP_WRRSP_STATUS__SHIFT 0x4
+#define MMEA1_ERR_STATUS__SDP_RDRSP_DATASTATUS__SHIFT 0x8
+#define MMEA1_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR__SHIFT 0xa
+#define MMEA1_ERR_STATUS__CLEAR_ERROR_STATUS__SHIFT 0xb
+#define MMEA1_ERR_STATUS__BUSY_ON_ERROR__SHIFT 0xc
+#define MMEA1_ERR_STATUS__FUE_FLAG__SHIFT 0xd
+#define MMEA1_ERR_STATUS__SDP_RDRSP_STATUS_MASK 0x0000000FL
+#define MMEA1_ERR_STATUS__SDP_WRRSP_STATUS_MASK 0x000000F0L
+#define MMEA1_ERR_STATUS__SDP_RDRSP_DATASTATUS_MASK 0x00000300L
+#define MMEA1_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR_MASK 0x00000400L
+#define MMEA1_ERR_STATUS__CLEAR_ERROR_STATUS_MASK 0x00000800L
+#define MMEA1_ERR_STATUS__BUSY_ON_ERROR_MASK 0x00001000L
+#define MMEA1_ERR_STATUS__FUE_FLAG_MASK 0x00002000L
+//MMEA1_MISC2
+#define MMEA1_MISC2__CSGROUP_SWAP_IN_DRAM_ARB__SHIFT 0x0
+#define MMEA1_MISC2__CSGROUP_SWAP_IN_GMI_ARB__SHIFT 0x1
+#define MMEA1_MISC2__CSGRP_BURST_LIMIT_DATA_DRAM__SHIFT 0x2
+#define MMEA1_MISC2__CSGRP_BURST_LIMIT_DATA_GMI__SHIFT 0x7
+#define MMEA1_MISC2__IO_RDWR_PRIORITY_ENABLE__SHIFT 0xc
+#define MMEA1_MISC2__RRET_SWAP_MODE__SHIFT 0xd
+#define MMEA1_MISC2__CSGROUP_SWAP_IN_DRAM_ARB_MASK 0x00000001L
+#define MMEA1_MISC2__CSGROUP_SWAP_IN_GMI_ARB_MASK 0x00000002L
+#define MMEA1_MISC2__CSGRP_BURST_LIMIT_DATA_DRAM_MASK 0x0000007CL
+#define MMEA1_MISC2__CSGRP_BURST_LIMIT_DATA_GMI_MASK 0x00000F80L
+#define MMEA1_MISC2__IO_RDWR_PRIORITY_ENABLE_MASK 0x00001000L
+#define MMEA1_MISC2__RRET_SWAP_MODE_MASK 0x00002000L
+//MMEA1_ADDRDEC_SELECT
+#define MMEA1_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_START__SHIFT 0x0
+#define MMEA1_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_END__SHIFT 0x5
+#define MMEA1_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_START__SHIFT 0xa
+#define MMEA1_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_END__SHIFT 0xf
+#define MMEA1_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_START_MASK 0x0000001FL
+#define MMEA1_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_END_MASK 0x000003E0L
+#define MMEA1_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_START_MASK 0x00007C00L
+#define MMEA1_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_END_MASK 0x000F8000L
+//MMEA1_EDC_CNT3
+#define MMEA1_EDC_CNT3__DRAMRD_PAGEMEM_DED_COUNT__SHIFT 0x0
+#define MMEA1_EDC_CNT3__DRAMWR_PAGEMEM_DED_COUNT__SHIFT 0x2
+#define MMEA1_EDC_CNT3__IORD_CMDMEM_DED_COUNT__SHIFT 0x4
+#define MMEA1_EDC_CNT3__IOWR_CMDMEM_DED_COUNT__SHIFT 0x6
+#define MMEA1_EDC_CNT3__IOWR_DATAMEM_DED_COUNT__SHIFT 0x8
+#define MMEA1_EDC_CNT3__GMIRD_PAGEMEM_DED_COUNT__SHIFT 0xa
+#define MMEA1_EDC_CNT3__GMIWR_PAGEMEM_DED_COUNT__SHIFT 0xc
+#define MMEA1_EDC_CNT3__DRAMRD_PAGEMEM_DED_COUNT_MASK 0x00000003L
+#define MMEA1_EDC_CNT3__DRAMWR_PAGEMEM_DED_COUNT_MASK 0x0000000CL
+#define MMEA1_EDC_CNT3__IORD_CMDMEM_DED_COUNT_MASK 0x00000030L
+#define MMEA1_EDC_CNT3__IOWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
+#define MMEA1_EDC_CNT3__IOWR_DATAMEM_DED_COUNT_MASK 0x00000300L
+#define MMEA1_EDC_CNT3__GMIRD_PAGEMEM_DED_COUNT_MASK 0x00000C00L
+#define MMEA1_EDC_CNT3__GMIWR_PAGEMEM_DED_COUNT_MASK 0x00003000L
+
+
+// addressBlock: mmhub_ea_mmeadec2
+//MMEA2_DRAM_RD_CLI2GRP_MAP0
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA2_DRAM_RD_CLI2GRP_MAP1
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA2_DRAM_WR_CLI2GRP_MAP0
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA2_DRAM_WR_CLI2GRP_MAP1
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA2_DRAM_RD_GRP2VC_MAP
+#define MMEA2_DRAM_RD_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define MMEA2_DRAM_RD_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define MMEA2_DRAM_RD_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define MMEA2_DRAM_RD_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define MMEA2_DRAM_RD_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define MMEA2_DRAM_RD_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define MMEA2_DRAM_RD_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define MMEA2_DRAM_RD_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//MMEA2_DRAM_WR_GRP2VC_MAP
+#define MMEA2_DRAM_WR_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define MMEA2_DRAM_WR_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define MMEA2_DRAM_WR_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define MMEA2_DRAM_WR_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define MMEA2_DRAM_WR_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define MMEA2_DRAM_WR_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define MMEA2_DRAM_WR_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define MMEA2_DRAM_WR_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//MMEA2_DRAM_RD_LAZY
+#define MMEA2_DRAM_RD_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define MMEA2_DRAM_RD_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define MMEA2_DRAM_RD_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define MMEA2_DRAM_RD_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define MMEA2_DRAM_RD_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define MMEA2_DRAM_RD_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define MMEA2_DRAM_RD_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b
+#define MMEA2_DRAM_RD_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define MMEA2_DRAM_RD_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define MMEA2_DRAM_RD_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define MMEA2_DRAM_RD_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define MMEA2_DRAM_RD_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define MMEA2_DRAM_RD_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+#define MMEA2_DRAM_RD_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L
+//MMEA2_DRAM_WR_LAZY
+#define MMEA2_DRAM_WR_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define MMEA2_DRAM_WR_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define MMEA2_DRAM_WR_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define MMEA2_DRAM_WR_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define MMEA2_DRAM_WR_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define MMEA2_DRAM_WR_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define MMEA2_DRAM_WR_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b
+#define MMEA2_DRAM_WR_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define MMEA2_DRAM_WR_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define MMEA2_DRAM_WR_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define MMEA2_DRAM_WR_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define MMEA2_DRAM_WR_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define MMEA2_DRAM_WR_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+#define MMEA2_DRAM_WR_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L
+//MMEA2_DRAM_RD_CAM_CNTL
+#define MMEA2_DRAM_RD_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define MMEA2_DRAM_RD_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define MMEA2_DRAM_RD_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define MMEA2_DRAM_RD_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define MMEA2_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define MMEA2_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define MMEA2_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define MMEA2_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define MMEA2_DRAM_RD_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c
+#define MMEA2_DRAM_RD_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define MMEA2_DRAM_RD_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define MMEA2_DRAM_RD_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define MMEA2_DRAM_RD_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define MMEA2_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define MMEA2_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define MMEA2_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define MMEA2_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+#define MMEA2_DRAM_RD_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L
+//MMEA2_DRAM_WR_CAM_CNTL
+#define MMEA2_DRAM_WR_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define MMEA2_DRAM_WR_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define MMEA2_DRAM_WR_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define MMEA2_DRAM_WR_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define MMEA2_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define MMEA2_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define MMEA2_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define MMEA2_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define MMEA2_DRAM_WR_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c
+#define MMEA2_DRAM_WR_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define MMEA2_DRAM_WR_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define MMEA2_DRAM_WR_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define MMEA2_DRAM_WR_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define MMEA2_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define MMEA2_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define MMEA2_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define MMEA2_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+#define MMEA2_DRAM_WR_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L
+//MMEA2_DRAM_PAGE_BURST
+#define MMEA2_DRAM_PAGE_BURST__RD_LIMIT_LO__SHIFT 0x0
+#define MMEA2_DRAM_PAGE_BURST__RD_LIMIT_HI__SHIFT 0x8
+#define MMEA2_DRAM_PAGE_BURST__WR_LIMIT_LO__SHIFT 0x10
+#define MMEA2_DRAM_PAGE_BURST__WR_LIMIT_HI__SHIFT 0x18
+#define MMEA2_DRAM_PAGE_BURST__RD_LIMIT_LO_MASK 0x000000FFL
+#define MMEA2_DRAM_PAGE_BURST__RD_LIMIT_HI_MASK 0x0000FF00L
+#define MMEA2_DRAM_PAGE_BURST__WR_LIMIT_LO_MASK 0x00FF0000L
+#define MMEA2_DRAM_PAGE_BURST__WR_LIMIT_HI_MASK 0xFF000000L
+//MMEA2_DRAM_RD_PRI_AGE
+#define MMEA2_DRAM_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA2_DRAM_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA2_DRAM_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA2_DRAM_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA2_DRAM_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA2_DRAM_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA2_DRAM_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA2_DRAM_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA2_DRAM_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA2_DRAM_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA2_DRAM_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA2_DRAM_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA2_DRAM_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA2_DRAM_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA2_DRAM_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA2_DRAM_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA2_DRAM_WR_PRI_AGE
+#define MMEA2_DRAM_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA2_DRAM_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA2_DRAM_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA2_DRAM_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA2_DRAM_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA2_DRAM_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA2_DRAM_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA2_DRAM_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA2_DRAM_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA2_DRAM_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA2_DRAM_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA2_DRAM_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA2_DRAM_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA2_DRAM_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA2_DRAM_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA2_DRAM_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA2_DRAM_RD_PRI_QUEUING
+#define MMEA2_DRAM_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA2_DRAM_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA2_DRAM_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA2_DRAM_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA2_DRAM_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA2_DRAM_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA2_DRAM_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA2_DRAM_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA2_DRAM_WR_PRI_QUEUING
+#define MMEA2_DRAM_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA2_DRAM_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA2_DRAM_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA2_DRAM_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA2_DRAM_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA2_DRAM_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA2_DRAM_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA2_DRAM_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA2_DRAM_RD_PRI_FIXED
+#define MMEA2_DRAM_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA2_DRAM_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA2_DRAM_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA2_DRAM_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA2_DRAM_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA2_DRAM_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA2_DRAM_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA2_DRAM_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA2_DRAM_WR_PRI_FIXED
+#define MMEA2_DRAM_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA2_DRAM_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA2_DRAM_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA2_DRAM_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA2_DRAM_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA2_DRAM_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA2_DRAM_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA2_DRAM_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA2_DRAM_RD_PRI_URGENCY
+#define MMEA2_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA2_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA2_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA2_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA2_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA2_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA2_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA2_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA2_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA2_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA2_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA2_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA2_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA2_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA2_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA2_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA2_DRAM_WR_PRI_URGENCY
+#define MMEA2_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA2_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA2_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA2_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA2_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA2_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA2_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA2_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA2_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA2_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA2_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA2_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA2_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA2_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA2_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA2_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA2_DRAM_RD_PRI_QUANT_PRI1
+#define MMEA2_DRAM_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA2_DRAM_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA2_DRAM_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA2_DRAM_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA2_DRAM_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA2_DRAM_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA2_DRAM_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA2_DRAM_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA2_DRAM_RD_PRI_QUANT_PRI2
+#define MMEA2_DRAM_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA2_DRAM_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA2_DRAM_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA2_DRAM_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA2_DRAM_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA2_DRAM_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA2_DRAM_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA2_DRAM_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA2_DRAM_RD_PRI_QUANT_PRI3
+#define MMEA2_DRAM_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA2_DRAM_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA2_DRAM_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA2_DRAM_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA2_DRAM_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA2_DRAM_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA2_DRAM_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA2_DRAM_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA2_DRAM_WR_PRI_QUANT_PRI1
+#define MMEA2_DRAM_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA2_DRAM_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA2_DRAM_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA2_DRAM_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA2_DRAM_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA2_DRAM_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA2_DRAM_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA2_DRAM_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA2_DRAM_WR_PRI_QUANT_PRI2
+#define MMEA2_DRAM_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA2_DRAM_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA2_DRAM_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA2_DRAM_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA2_DRAM_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA2_DRAM_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA2_DRAM_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA2_DRAM_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA2_DRAM_WR_PRI_QUANT_PRI3
+#define MMEA2_DRAM_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA2_DRAM_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA2_DRAM_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA2_DRAM_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA2_DRAM_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA2_DRAM_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA2_DRAM_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA2_DRAM_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA2_GMI_RD_CLI2GRP_MAP0
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA2_GMI_RD_CLI2GRP_MAP1
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA2_GMI_WR_CLI2GRP_MAP0
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA2_GMI_WR_CLI2GRP_MAP1
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA2_GMI_RD_GRP2VC_MAP
+#define MMEA2_GMI_RD_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define MMEA2_GMI_RD_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define MMEA2_GMI_RD_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define MMEA2_GMI_RD_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define MMEA2_GMI_RD_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define MMEA2_GMI_RD_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define MMEA2_GMI_RD_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define MMEA2_GMI_RD_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//MMEA2_GMI_WR_GRP2VC_MAP
+#define MMEA2_GMI_WR_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define MMEA2_GMI_WR_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define MMEA2_GMI_WR_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define MMEA2_GMI_WR_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define MMEA2_GMI_WR_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define MMEA2_GMI_WR_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define MMEA2_GMI_WR_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define MMEA2_GMI_WR_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//MMEA2_GMI_RD_LAZY
+#define MMEA2_GMI_RD_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define MMEA2_GMI_RD_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define MMEA2_GMI_RD_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define MMEA2_GMI_RD_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define MMEA2_GMI_RD_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define MMEA2_GMI_RD_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define MMEA2_GMI_RD_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b
+#define MMEA2_GMI_RD_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define MMEA2_GMI_RD_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define MMEA2_GMI_RD_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define MMEA2_GMI_RD_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define MMEA2_GMI_RD_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define MMEA2_GMI_RD_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+#define MMEA2_GMI_RD_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L
+//MMEA2_GMI_WR_LAZY
+#define MMEA2_GMI_WR_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define MMEA2_GMI_WR_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define MMEA2_GMI_WR_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define MMEA2_GMI_WR_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define MMEA2_GMI_WR_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define MMEA2_GMI_WR_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define MMEA2_GMI_WR_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b
+#define MMEA2_GMI_WR_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define MMEA2_GMI_WR_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define MMEA2_GMI_WR_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define MMEA2_GMI_WR_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define MMEA2_GMI_WR_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define MMEA2_GMI_WR_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+#define MMEA2_GMI_WR_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L
+//MMEA2_GMI_RD_CAM_CNTL
+#define MMEA2_GMI_RD_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define MMEA2_GMI_RD_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define MMEA2_GMI_RD_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define MMEA2_GMI_RD_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define MMEA2_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define MMEA2_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define MMEA2_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define MMEA2_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define MMEA2_GMI_RD_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c
+#define MMEA2_GMI_RD_CAM_CNTL__PAGEBASED_CHAINING__SHIFT 0x1d
+#define MMEA2_GMI_RD_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define MMEA2_GMI_RD_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define MMEA2_GMI_RD_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define MMEA2_GMI_RD_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define MMEA2_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define MMEA2_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define MMEA2_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define MMEA2_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+#define MMEA2_GMI_RD_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L
+#define MMEA2_GMI_RD_CAM_CNTL__PAGEBASED_CHAINING_MASK 0x20000000L
+//MMEA2_GMI_WR_CAM_CNTL
+#define MMEA2_GMI_WR_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define MMEA2_GMI_WR_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define MMEA2_GMI_WR_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define MMEA2_GMI_WR_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define MMEA2_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define MMEA2_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define MMEA2_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define MMEA2_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define MMEA2_GMI_WR_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c
+#define MMEA2_GMI_WR_CAM_CNTL__PAGEBASED_CHAINING__SHIFT 0x1d
+#define MMEA2_GMI_WR_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define MMEA2_GMI_WR_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define MMEA2_GMI_WR_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define MMEA2_GMI_WR_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define MMEA2_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define MMEA2_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define MMEA2_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define MMEA2_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+#define MMEA2_GMI_WR_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L
+#define MMEA2_GMI_WR_CAM_CNTL__PAGEBASED_CHAINING_MASK 0x20000000L
+//MMEA2_GMI_PAGE_BURST
+#define MMEA2_GMI_PAGE_BURST__RD_LIMIT_LO__SHIFT 0x0
+#define MMEA2_GMI_PAGE_BURST__RD_LIMIT_HI__SHIFT 0x8
+#define MMEA2_GMI_PAGE_BURST__WR_LIMIT_LO__SHIFT 0x10
+#define MMEA2_GMI_PAGE_BURST__WR_LIMIT_HI__SHIFT 0x18
+#define MMEA2_GMI_PAGE_BURST__RD_LIMIT_LO_MASK 0x000000FFL
+#define MMEA2_GMI_PAGE_BURST__RD_LIMIT_HI_MASK 0x0000FF00L
+#define MMEA2_GMI_PAGE_BURST__WR_LIMIT_LO_MASK 0x00FF0000L
+#define MMEA2_GMI_PAGE_BURST__WR_LIMIT_HI_MASK 0xFF000000L
+//MMEA2_GMI_RD_PRI_AGE
+#define MMEA2_GMI_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA2_GMI_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA2_GMI_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA2_GMI_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA2_GMI_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA2_GMI_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA2_GMI_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA2_GMI_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA2_GMI_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA2_GMI_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA2_GMI_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA2_GMI_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA2_GMI_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA2_GMI_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA2_GMI_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA2_GMI_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA2_GMI_WR_PRI_AGE
+#define MMEA2_GMI_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA2_GMI_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA2_GMI_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA2_GMI_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA2_GMI_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA2_GMI_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA2_GMI_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA2_GMI_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA2_GMI_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA2_GMI_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA2_GMI_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA2_GMI_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA2_GMI_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA2_GMI_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA2_GMI_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA2_GMI_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA2_GMI_RD_PRI_QUEUING
+#define MMEA2_GMI_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA2_GMI_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA2_GMI_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA2_GMI_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA2_GMI_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA2_GMI_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA2_GMI_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA2_GMI_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA2_GMI_WR_PRI_QUEUING
+#define MMEA2_GMI_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA2_GMI_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA2_GMI_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA2_GMI_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA2_GMI_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA2_GMI_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA2_GMI_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA2_GMI_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA2_GMI_RD_PRI_FIXED
+#define MMEA2_GMI_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA2_GMI_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA2_GMI_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA2_GMI_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA2_GMI_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA2_GMI_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA2_GMI_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA2_GMI_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA2_GMI_WR_PRI_FIXED
+#define MMEA2_GMI_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA2_GMI_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA2_GMI_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA2_GMI_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA2_GMI_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA2_GMI_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA2_GMI_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA2_GMI_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA2_GMI_RD_PRI_URGENCY
+#define MMEA2_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA2_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA2_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA2_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA2_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA2_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA2_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA2_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA2_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA2_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA2_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA2_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA2_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA2_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA2_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA2_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA2_GMI_WR_PRI_URGENCY
+#define MMEA2_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA2_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA2_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA2_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA2_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA2_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA2_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA2_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA2_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA2_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA2_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA2_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA2_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA2_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA2_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA2_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA2_GMI_RD_PRI_URGENCY_MASKING
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L
+#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L
+//MMEA2_GMI_WR_PRI_URGENCY_MASKING
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L
+#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L
+//MMEA2_GMI_RD_PRI_QUANT_PRI1
+#define MMEA2_GMI_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA2_GMI_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA2_GMI_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA2_GMI_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA2_GMI_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA2_GMI_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA2_GMI_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA2_GMI_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA2_GMI_RD_PRI_QUANT_PRI2
+#define MMEA2_GMI_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA2_GMI_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA2_GMI_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA2_GMI_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA2_GMI_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA2_GMI_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA2_GMI_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA2_GMI_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA2_GMI_RD_PRI_QUANT_PRI3
+#define MMEA2_GMI_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA2_GMI_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA2_GMI_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA2_GMI_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA2_GMI_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA2_GMI_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA2_GMI_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA2_GMI_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA2_GMI_WR_PRI_QUANT_PRI1
+#define MMEA2_GMI_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA2_GMI_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA2_GMI_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA2_GMI_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA2_GMI_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA2_GMI_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA2_GMI_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA2_GMI_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA2_GMI_WR_PRI_QUANT_PRI2
+#define MMEA2_GMI_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA2_GMI_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA2_GMI_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA2_GMI_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA2_GMI_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA2_GMI_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA2_GMI_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA2_GMI_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA2_GMI_WR_PRI_QUANT_PRI3
+#define MMEA2_GMI_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA2_GMI_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA2_GMI_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA2_GMI_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA2_GMI_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA2_GMI_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA2_GMI_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA2_GMI_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA2_ADDRNORM_BASE_ADDR0
+#define MMEA2_ADDRNORM_BASE_ADDR0__ADDR_RNG_VAL__SHIFT 0x0
+#define MMEA2_ADDRNORM_BASE_ADDR0__LGCY_MMIO_HOLE_EN__SHIFT 0x1
+#define MMEA2_ADDRNORM_BASE_ADDR0__INTLV_NUM_CHAN__SHIFT 0x2
+#define MMEA2_ADDRNORM_BASE_ADDR0__INTLV_NUM_DIES__SHIFT 0x6
+#define MMEA2_ADDRNORM_BASE_ADDR0__INTLV_NUM_SOCKETS__SHIFT 0x8
+#define MMEA2_ADDRNORM_BASE_ADDR0__INTLV_ADDR_SEL__SHIFT 0x9
+#define MMEA2_ADDRNORM_BASE_ADDR0__BASE_ADDR__SHIFT 0xc
+#define MMEA2_ADDRNORM_BASE_ADDR0__ADDR_RNG_VAL_MASK 0x00000001L
+#define MMEA2_ADDRNORM_BASE_ADDR0__LGCY_MMIO_HOLE_EN_MASK 0x00000002L
+#define MMEA2_ADDRNORM_BASE_ADDR0__INTLV_NUM_CHAN_MASK 0x0000003CL
+#define MMEA2_ADDRNORM_BASE_ADDR0__INTLV_NUM_DIES_MASK 0x000000C0L
+#define MMEA2_ADDRNORM_BASE_ADDR0__INTLV_NUM_SOCKETS_MASK 0x00000100L
+#define MMEA2_ADDRNORM_BASE_ADDR0__INTLV_ADDR_SEL_MASK 0x00000E00L
+#define MMEA2_ADDRNORM_BASE_ADDR0__BASE_ADDR_MASK 0xFFFFF000L
+//MMEA2_ADDRNORM_LIMIT_ADDR0
+#define MMEA2_ADDRNORM_LIMIT_ADDR0__DST_FABRIC_ID__SHIFT 0x0
+#define MMEA2_ADDRNORM_LIMIT_ADDR0__LIMIT_ADDR__SHIFT 0xc
+#define MMEA2_ADDRNORM_LIMIT_ADDR0__DST_FABRIC_ID_MASK 0x0000001FL
+#define MMEA2_ADDRNORM_LIMIT_ADDR0__LIMIT_ADDR_MASK 0xFFFFF000L
+//MMEA2_ADDRNORM_BASE_ADDR1
+#define MMEA2_ADDRNORM_BASE_ADDR1__ADDR_RNG_VAL__SHIFT 0x0
+#define MMEA2_ADDRNORM_BASE_ADDR1__LGCY_MMIO_HOLE_EN__SHIFT 0x1
+#define MMEA2_ADDRNORM_BASE_ADDR1__INTLV_NUM_CHAN__SHIFT 0x2
+#define MMEA2_ADDRNORM_BASE_ADDR1__INTLV_NUM_DIES__SHIFT 0x6
+#define MMEA2_ADDRNORM_BASE_ADDR1__INTLV_NUM_SOCKETS__SHIFT 0x8
+#define MMEA2_ADDRNORM_BASE_ADDR1__INTLV_ADDR_SEL__SHIFT 0x9
+#define MMEA2_ADDRNORM_BASE_ADDR1__BASE_ADDR__SHIFT 0xc
+#define MMEA2_ADDRNORM_BASE_ADDR1__ADDR_RNG_VAL_MASK 0x00000001L
+#define MMEA2_ADDRNORM_BASE_ADDR1__LGCY_MMIO_HOLE_EN_MASK 0x00000002L
+#define MMEA2_ADDRNORM_BASE_ADDR1__INTLV_NUM_CHAN_MASK 0x0000003CL
+#define MMEA2_ADDRNORM_BASE_ADDR1__INTLV_NUM_DIES_MASK 0x000000C0L
+#define MMEA2_ADDRNORM_BASE_ADDR1__INTLV_NUM_SOCKETS_MASK 0x00000100L
+#define MMEA2_ADDRNORM_BASE_ADDR1__INTLV_ADDR_SEL_MASK 0x00000E00L
+#define MMEA2_ADDRNORM_BASE_ADDR1__BASE_ADDR_MASK 0xFFFFF000L
+//MMEA2_ADDRNORM_LIMIT_ADDR1
+#define MMEA2_ADDRNORM_LIMIT_ADDR1__DST_FABRIC_ID__SHIFT 0x0
+#define MMEA2_ADDRNORM_LIMIT_ADDR1__LIMIT_ADDR__SHIFT 0xc
+#define MMEA2_ADDRNORM_LIMIT_ADDR1__DST_FABRIC_ID_MASK 0x0000001FL
+#define MMEA2_ADDRNORM_LIMIT_ADDR1__LIMIT_ADDR_MASK 0xFFFFF000L
+//MMEA2_ADDRNORM_OFFSET_ADDR1
+#define MMEA2_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_EN__SHIFT 0x0
+#define MMEA2_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET__SHIFT 0x14
+#define MMEA2_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_EN_MASK 0x00000001L
+#define MMEA2_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_MASK 0xFFF00000L
+//MMEA2_ADDRNORM_BASE_ADDR2
+#define MMEA2_ADDRNORM_BASE_ADDR2__ADDR_RNG_VAL__SHIFT 0x0
+#define MMEA2_ADDRNORM_BASE_ADDR2__LGCY_MMIO_HOLE_EN__SHIFT 0x1
+#define MMEA2_ADDRNORM_BASE_ADDR2__INTLV_NUM_CHAN__SHIFT 0x2
+#define MMEA2_ADDRNORM_BASE_ADDR2__INTLV_NUM_DIES__SHIFT 0x6
+#define MMEA2_ADDRNORM_BASE_ADDR2__INTLV_NUM_SOCKETS__SHIFT 0x8
+#define MMEA2_ADDRNORM_BASE_ADDR2__INTLV_ADDR_SEL__SHIFT 0x9
+#define MMEA2_ADDRNORM_BASE_ADDR2__BASE_ADDR__SHIFT 0xc
+#define MMEA2_ADDRNORM_BASE_ADDR2__ADDR_RNG_VAL_MASK 0x00000001L
+#define MMEA2_ADDRNORM_BASE_ADDR2__LGCY_MMIO_HOLE_EN_MASK 0x00000002L
+#define MMEA2_ADDRNORM_BASE_ADDR2__INTLV_NUM_CHAN_MASK 0x0000003CL
+#define MMEA2_ADDRNORM_BASE_ADDR2__INTLV_NUM_DIES_MASK 0x000000C0L
+#define MMEA2_ADDRNORM_BASE_ADDR2__INTLV_NUM_SOCKETS_MASK 0x00000100L
+#define MMEA2_ADDRNORM_BASE_ADDR2__INTLV_ADDR_SEL_MASK 0x00000E00L
+#define MMEA2_ADDRNORM_BASE_ADDR2__BASE_ADDR_MASK 0xFFFFF000L
+//MMEA2_ADDRNORM_LIMIT_ADDR2
+#define MMEA2_ADDRNORM_LIMIT_ADDR2__DST_FABRIC_ID__SHIFT 0x0
+#define MMEA2_ADDRNORM_LIMIT_ADDR2__LIMIT_ADDR__SHIFT 0xc
+#define MMEA2_ADDRNORM_LIMIT_ADDR2__DST_FABRIC_ID_MASK 0x0000001FL
+#define MMEA2_ADDRNORM_LIMIT_ADDR2__LIMIT_ADDR_MASK 0xFFFFF000L
+//MMEA2_ADDRNORM_BASE_ADDR3
+#define MMEA2_ADDRNORM_BASE_ADDR3__ADDR_RNG_VAL__SHIFT 0x0
+#define MMEA2_ADDRNORM_BASE_ADDR3__LGCY_MMIO_HOLE_EN__SHIFT 0x1
+#define MMEA2_ADDRNORM_BASE_ADDR3__INTLV_NUM_CHAN__SHIFT 0x2
+#define MMEA2_ADDRNORM_BASE_ADDR3__INTLV_NUM_DIES__SHIFT 0x6
+#define MMEA2_ADDRNORM_BASE_ADDR3__INTLV_NUM_SOCKETS__SHIFT 0x8
+#define MMEA2_ADDRNORM_BASE_ADDR3__INTLV_ADDR_SEL__SHIFT 0x9
+#define MMEA2_ADDRNORM_BASE_ADDR3__BASE_ADDR__SHIFT 0xc
+#define MMEA2_ADDRNORM_BASE_ADDR3__ADDR_RNG_VAL_MASK 0x00000001L
+#define MMEA2_ADDRNORM_BASE_ADDR3__LGCY_MMIO_HOLE_EN_MASK 0x00000002L
+#define MMEA2_ADDRNORM_BASE_ADDR3__INTLV_NUM_CHAN_MASK 0x0000003CL
+#define MMEA2_ADDRNORM_BASE_ADDR3__INTLV_NUM_DIES_MASK 0x000000C0L
+#define MMEA2_ADDRNORM_BASE_ADDR3__INTLV_NUM_SOCKETS_MASK 0x00000100L
+#define MMEA2_ADDRNORM_BASE_ADDR3__INTLV_ADDR_SEL_MASK 0x00000E00L
+#define MMEA2_ADDRNORM_BASE_ADDR3__BASE_ADDR_MASK 0xFFFFF000L
+//MMEA2_ADDRNORM_LIMIT_ADDR3
+#define MMEA2_ADDRNORM_LIMIT_ADDR3__DST_FABRIC_ID__SHIFT 0x0
+#define MMEA2_ADDRNORM_LIMIT_ADDR3__LIMIT_ADDR__SHIFT 0xc
+#define MMEA2_ADDRNORM_LIMIT_ADDR3__DST_FABRIC_ID_MASK 0x0000001FL
+#define MMEA2_ADDRNORM_LIMIT_ADDR3__LIMIT_ADDR_MASK 0xFFFFF000L
+//MMEA2_ADDRNORM_OFFSET_ADDR3
+#define MMEA2_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET_EN__SHIFT 0x0
+#define MMEA2_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET__SHIFT 0x14
+#define MMEA2_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET_EN_MASK 0x00000001L
+#define MMEA2_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET_MASK 0xFFF00000L
+//MMEA2_ADDRNORM_BASE_ADDR4
+#define MMEA2_ADDRNORM_BASE_ADDR4__ADDR_RNG_VAL__SHIFT 0x0
+#define MMEA2_ADDRNORM_BASE_ADDR4__LGCY_MMIO_HOLE_EN__SHIFT 0x1
+#define MMEA2_ADDRNORM_BASE_ADDR4__INTLV_NUM_CHAN__SHIFT 0x2
+#define MMEA2_ADDRNORM_BASE_ADDR4__INTLV_NUM_DIES__SHIFT 0x6
+#define MMEA2_ADDRNORM_BASE_ADDR4__INTLV_NUM_SOCKETS__SHIFT 0x8
+#define MMEA2_ADDRNORM_BASE_ADDR4__INTLV_ADDR_SEL__SHIFT 0x9
+#define MMEA2_ADDRNORM_BASE_ADDR4__BASE_ADDR__SHIFT 0xc
+#define MMEA2_ADDRNORM_BASE_ADDR4__ADDR_RNG_VAL_MASK 0x00000001L
+#define MMEA2_ADDRNORM_BASE_ADDR4__LGCY_MMIO_HOLE_EN_MASK 0x00000002L
+#define MMEA2_ADDRNORM_BASE_ADDR4__INTLV_NUM_CHAN_MASK 0x0000003CL
+#define MMEA2_ADDRNORM_BASE_ADDR4__INTLV_NUM_DIES_MASK 0x000000C0L
+#define MMEA2_ADDRNORM_BASE_ADDR4__INTLV_NUM_SOCKETS_MASK 0x00000100L
+#define MMEA2_ADDRNORM_BASE_ADDR4__INTLV_ADDR_SEL_MASK 0x00000E00L
+#define MMEA2_ADDRNORM_BASE_ADDR4__BASE_ADDR_MASK 0xFFFFF000L
+//MMEA2_ADDRNORM_LIMIT_ADDR4
+#define MMEA2_ADDRNORM_LIMIT_ADDR4__DST_FABRIC_ID__SHIFT 0x0
+#define MMEA2_ADDRNORM_LIMIT_ADDR4__LIMIT_ADDR__SHIFT 0xc
+#define MMEA2_ADDRNORM_LIMIT_ADDR4__DST_FABRIC_ID_MASK 0x0000001FL
+#define MMEA2_ADDRNORM_LIMIT_ADDR4__LIMIT_ADDR_MASK 0xFFFFF000L
+//MMEA2_ADDRNORM_BASE_ADDR5
+#define MMEA2_ADDRNORM_BASE_ADDR5__ADDR_RNG_VAL__SHIFT 0x0
+#define MMEA2_ADDRNORM_BASE_ADDR5__LGCY_MMIO_HOLE_EN__SHIFT 0x1
+#define MMEA2_ADDRNORM_BASE_ADDR5__INTLV_NUM_CHAN__SHIFT 0x2
+#define MMEA2_ADDRNORM_BASE_ADDR5__INTLV_NUM_DIES__SHIFT 0x6
+#define MMEA2_ADDRNORM_BASE_ADDR5__INTLV_NUM_SOCKETS__SHIFT 0x8
+#define MMEA2_ADDRNORM_BASE_ADDR5__INTLV_ADDR_SEL__SHIFT 0x9
+#define MMEA2_ADDRNORM_BASE_ADDR5__BASE_ADDR__SHIFT 0xc
+#define MMEA2_ADDRNORM_BASE_ADDR5__ADDR_RNG_VAL_MASK 0x00000001L
+#define MMEA2_ADDRNORM_BASE_ADDR5__LGCY_MMIO_HOLE_EN_MASK 0x00000002L
+#define MMEA2_ADDRNORM_BASE_ADDR5__INTLV_NUM_CHAN_MASK 0x0000003CL
+#define MMEA2_ADDRNORM_BASE_ADDR5__INTLV_NUM_DIES_MASK 0x000000C0L
+#define MMEA2_ADDRNORM_BASE_ADDR5__INTLV_NUM_SOCKETS_MASK 0x00000100L
+#define MMEA2_ADDRNORM_BASE_ADDR5__INTLV_ADDR_SEL_MASK 0x00000E00L
+#define MMEA2_ADDRNORM_BASE_ADDR5__BASE_ADDR_MASK 0xFFFFF000L
+//MMEA2_ADDRNORM_LIMIT_ADDR5
+#define MMEA2_ADDRNORM_LIMIT_ADDR5__DST_FABRIC_ID__SHIFT 0x0
+#define MMEA2_ADDRNORM_LIMIT_ADDR5__LIMIT_ADDR__SHIFT 0xc
+#define MMEA2_ADDRNORM_LIMIT_ADDR5__DST_FABRIC_ID_MASK 0x0000001FL
+#define MMEA2_ADDRNORM_LIMIT_ADDR5__LIMIT_ADDR_MASK 0xFFFFF000L
+//MMEA2_ADDRNORM_OFFSET_ADDR5
+#define MMEA2_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET_EN__SHIFT 0x0
+#define MMEA2_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET__SHIFT 0x14
+#define MMEA2_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET_EN_MASK 0x00000001L
+#define MMEA2_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET_MASK 0xFFF00000L
+//MMEA2_ADDRNORMDRAM_HOLE_CNTL
+#define MMEA2_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_VALID__SHIFT 0x0
+#define MMEA2_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_OFFSET__SHIFT 0x7
+#define MMEA2_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_VALID_MASK 0x00000001L
+#define MMEA2_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_OFFSET_MASK 0x0000FF80L
+//MMEA2_ADDRNORMGMI_HOLE_CNTL
+#define MMEA2_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_VALID__SHIFT 0x0
+#define MMEA2_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_OFFSET__SHIFT 0x7
+#define MMEA2_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_VALID_MASK 0x00000001L
+#define MMEA2_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_OFFSET_MASK 0x0000FF80L
+//MMEA2_ADDRNORMDRAM_NP2_CHANNEL_CFG
+#define MMEA2_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE0__SHIFT 0x0
+#define MMEA2_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE1__SHIFT 0x6
+#define MMEA2_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE0_MASK 0x0000003FL
+#define MMEA2_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE1_MASK 0x00000FC0L
+//MMEA2_ADDRNORMGMI_NP2_CHANNEL_CFG
+#define MMEA2_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE2__SHIFT 0x0
+#define MMEA2_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE3__SHIFT 0x6
+#define MMEA2_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE2_MASK 0x0000003FL
+#define MMEA2_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE3_MASK 0x00000FC0L
+//MMEA2_ADDRDEC_BANK_CFG
+#define MMEA2_ADDRDEC_BANK_CFG__BANK_MASK_DRAM__SHIFT 0x0
+#define MMEA2_ADDRDEC_BANK_CFG__BANK_MASK_GMI__SHIFT 0x6
+#define MMEA2_ADDRDEC_BANK_CFG__BANKGROUP_SEL_DRAM__SHIFT 0xc
+#define MMEA2_ADDRDEC_BANK_CFG__BANKGROUP_SEL_GMI__SHIFT 0xf
+#define MMEA2_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_DRAM__SHIFT 0x12
+#define MMEA2_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_GMI__SHIFT 0x13
+#define MMEA2_ADDRDEC_BANK_CFG__BANK_MASK_DRAM_MASK 0x0000003FL
+#define MMEA2_ADDRDEC_BANK_CFG__BANK_MASK_GMI_MASK 0x00000FC0L
+#define MMEA2_ADDRDEC_BANK_CFG__BANKGROUP_SEL_DRAM_MASK 0x00007000L
+#define MMEA2_ADDRDEC_BANK_CFG__BANKGROUP_SEL_GMI_MASK 0x00038000L
+#define MMEA2_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_DRAM_MASK 0x00040000L
+#define MMEA2_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_GMI_MASK 0x00080000L
+//MMEA2_ADDRDEC_MISC_CFG
+#define MMEA2_ADDRDEC_MISC_CFG__VCM_EN0__SHIFT 0x0
+#define MMEA2_ADDRDEC_MISC_CFG__VCM_EN1__SHIFT 0x1
+#define MMEA2_ADDRDEC_MISC_CFG__VCM_EN2__SHIFT 0x2
+#define MMEA2_ADDRDEC_MISC_CFG__PCH_MASK_DRAM__SHIFT 0x8
+#define MMEA2_ADDRDEC_MISC_CFG__PCH_MASK_GMI__SHIFT 0x9
+#define MMEA2_ADDRDEC_MISC_CFG__CH_MASK_DRAM__SHIFT 0xc
+#define MMEA2_ADDRDEC_MISC_CFG__CH_MASK_GMI__SHIFT 0x11
+#define MMEA2_ADDRDEC_MISC_CFG__CS_MASK_DRAM__SHIFT 0x16
+#define MMEA2_ADDRDEC_MISC_CFG__CS_MASK_GMI__SHIFT 0x18
+#define MMEA2_ADDRDEC_MISC_CFG__RM_MASK_DRAM__SHIFT 0x1a
+#define MMEA2_ADDRDEC_MISC_CFG__RM_MASK_GMI__SHIFT 0x1d
+#define MMEA2_ADDRDEC_MISC_CFG__VCM_EN0_MASK 0x00000001L
+#define MMEA2_ADDRDEC_MISC_CFG__VCM_EN1_MASK 0x00000002L
+#define MMEA2_ADDRDEC_MISC_CFG__VCM_EN2_MASK 0x00000004L
+#define MMEA2_ADDRDEC_MISC_CFG__PCH_MASK_DRAM_MASK 0x00000100L
+#define MMEA2_ADDRDEC_MISC_CFG__PCH_MASK_GMI_MASK 0x00000200L
+#define MMEA2_ADDRDEC_MISC_CFG__CH_MASK_DRAM_MASK 0x0001F000L
+#define MMEA2_ADDRDEC_MISC_CFG__CH_MASK_GMI_MASK 0x003E0000L
+#define MMEA2_ADDRDEC_MISC_CFG__CS_MASK_DRAM_MASK 0x00C00000L
+#define MMEA2_ADDRDEC_MISC_CFG__CS_MASK_GMI_MASK 0x03000000L
+#define MMEA2_ADDRDEC_MISC_CFG__RM_MASK_DRAM_MASK 0x1C000000L
+#define MMEA2_ADDRDEC_MISC_CFG__RM_MASK_GMI_MASK 0xE0000000L
+//MMEA2_ADDRDECDRAM_ADDR_HASH_BANK0
+#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK0__XOR_ENABLE__SHIFT 0x0
+#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK0__COL_XOR__SHIFT 0x1
+#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK0__ROW_XOR__SHIFT 0xe
+#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK0__XOR_ENABLE_MASK 0x00000001L
+#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK0__COL_XOR_MASK 0x00003FFEL
+#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK0__ROW_XOR_MASK 0xFFFFC000L
+//MMEA2_ADDRDECDRAM_ADDR_HASH_BANK1
+#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK1__XOR_ENABLE__SHIFT 0x0
+#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK1__COL_XOR__SHIFT 0x1
+#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK1__ROW_XOR__SHIFT 0xe
+#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK1__XOR_ENABLE_MASK 0x00000001L
+#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK1__COL_XOR_MASK 0x00003FFEL
+#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK1__ROW_XOR_MASK 0xFFFFC000L
+//MMEA2_ADDRDECDRAM_ADDR_HASH_BANK2
+#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK2__XOR_ENABLE__SHIFT 0x0
+#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK2__COL_XOR__SHIFT 0x1
+#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK2__ROW_XOR__SHIFT 0xe
+#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK2__XOR_ENABLE_MASK 0x00000001L
+#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK2__COL_XOR_MASK 0x00003FFEL
+#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK2__ROW_XOR_MASK 0xFFFFC000L
+//MMEA2_ADDRDECDRAM_ADDR_HASH_BANK3
+#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK3__XOR_ENABLE__SHIFT 0x0
+#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK3__COL_XOR__SHIFT 0x1
+#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK3__ROW_XOR__SHIFT 0xe
+#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK3__XOR_ENABLE_MASK 0x00000001L
+#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK3__COL_XOR_MASK 0x00003FFEL
+#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK3__ROW_XOR_MASK 0xFFFFC000L
+//MMEA2_ADDRDECDRAM_ADDR_HASH_BANK4
+#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK4__XOR_ENABLE__SHIFT 0x0
+#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK4__COL_XOR__SHIFT 0x1
+#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK4__ROW_XOR__SHIFT 0xe
+#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK4__XOR_ENABLE_MASK 0x00000001L
+#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK4__COL_XOR_MASK 0x00003FFEL
+#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK4__ROW_XOR_MASK 0xFFFFC000L
+//MMEA2_ADDRDECDRAM_ADDR_HASH_BANK5
+#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK5__XOR_ENABLE__SHIFT 0x0
+#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK5__COL_XOR__SHIFT 0x1
+#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK5__ROW_XOR__SHIFT 0xe
+#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK5__XOR_ENABLE_MASK 0x00000001L
+#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK5__COL_XOR_MASK 0x00003FFEL
+#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK5__ROW_XOR_MASK 0xFFFFC000L
+//MMEA2_ADDRDECDRAM_ADDR_HASH_PC
+#define MMEA2_ADDRDECDRAM_ADDR_HASH_PC__XOR_ENABLE__SHIFT 0x0
+#define MMEA2_ADDRDECDRAM_ADDR_HASH_PC__COL_XOR__SHIFT 0x1
+#define MMEA2_ADDRDECDRAM_ADDR_HASH_PC__ROW_XOR__SHIFT 0xe
+#define MMEA2_ADDRDECDRAM_ADDR_HASH_PC__XOR_ENABLE_MASK 0x00000001L
+#define MMEA2_ADDRDECDRAM_ADDR_HASH_PC__COL_XOR_MASK 0x00003FFEL
+#define MMEA2_ADDRDECDRAM_ADDR_HASH_PC__ROW_XOR_MASK 0xFFFFC000L
+//MMEA2_ADDRDECDRAM_ADDR_HASH_PC2
+#define MMEA2_ADDRDECDRAM_ADDR_HASH_PC2__BANK_XOR__SHIFT 0x0
+#define MMEA2_ADDRDECDRAM_ADDR_HASH_PC2__BANK_XOR_MASK 0x0000003FL
+//MMEA2_ADDRDECDRAM_ADDR_HASH_CS0
+#define MMEA2_ADDRDECDRAM_ADDR_HASH_CS0__XOR_ENABLE__SHIFT 0x0
+#define MMEA2_ADDRDECDRAM_ADDR_HASH_CS0__NA_XOR__SHIFT 0x1
+#define MMEA2_ADDRDECDRAM_ADDR_HASH_CS0__XOR_ENABLE_MASK 0x00000001L
+#define MMEA2_ADDRDECDRAM_ADDR_HASH_CS0__NA_XOR_MASK 0xFFFFFFFEL
+//MMEA2_ADDRDECDRAM_ADDR_HASH_CS1
+#define MMEA2_ADDRDECDRAM_ADDR_HASH_CS1__XOR_ENABLE__SHIFT 0x0
+#define MMEA2_ADDRDECDRAM_ADDR_HASH_CS1__NA_XOR__SHIFT 0x1
+#define MMEA2_ADDRDECDRAM_ADDR_HASH_CS1__XOR_ENABLE_MASK 0x00000001L
+#define MMEA2_ADDRDECDRAM_ADDR_HASH_CS1__NA_XOR_MASK 0xFFFFFFFEL
+//MMEA2_ADDRDECDRAM_HARVEST_ENABLE
+#define MMEA2_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_EN__SHIFT 0x0
+#define MMEA2_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_VAL__SHIFT 0x1
+#define MMEA2_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_EN__SHIFT 0x2
+#define MMEA2_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_VAL__SHIFT 0x3
+#define MMEA2_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_EN__SHIFT 0x4
+#define MMEA2_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_VAL__SHIFT 0x5
+#define MMEA2_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_EN_MASK 0x00000001L
+#define MMEA2_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_VAL_MASK 0x00000002L
+#define MMEA2_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_EN_MASK 0x00000004L
+#define MMEA2_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_VAL_MASK 0x00000008L
+#define MMEA2_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_EN_MASK 0x00000010L
+#define MMEA2_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_VAL_MASK 0x00000020L
+//MMEA2_ADDRDECGMI_ADDR_HASH_BANK0
+#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK0__XOR_ENABLE__SHIFT 0x0
+#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK0__COL_XOR__SHIFT 0x1
+#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK0__ROW_XOR__SHIFT 0xe
+#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK0__XOR_ENABLE_MASK 0x00000001L
+#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK0__COL_XOR_MASK 0x00003FFEL
+#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK0__ROW_XOR_MASK 0xFFFFC000L
+//MMEA2_ADDRDECGMI_ADDR_HASH_BANK1
+#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK1__XOR_ENABLE__SHIFT 0x0
+#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK1__COL_XOR__SHIFT 0x1
+#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK1__ROW_XOR__SHIFT 0xe
+#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK1__XOR_ENABLE_MASK 0x00000001L
+#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK1__COL_XOR_MASK 0x00003FFEL
+#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK1__ROW_XOR_MASK 0xFFFFC000L
+//MMEA2_ADDRDECGMI_ADDR_HASH_BANK2
+#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK2__XOR_ENABLE__SHIFT 0x0
+#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK2__COL_XOR__SHIFT 0x1
+#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK2__ROW_XOR__SHIFT 0xe
+#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK2__XOR_ENABLE_MASK 0x00000001L
+#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK2__COL_XOR_MASK 0x00003FFEL
+#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK2__ROW_XOR_MASK 0xFFFFC000L
+//MMEA2_ADDRDECGMI_ADDR_HASH_BANK3
+#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK3__XOR_ENABLE__SHIFT 0x0
+#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK3__COL_XOR__SHIFT 0x1
+#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK3__ROW_XOR__SHIFT 0xe
+#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK3__XOR_ENABLE_MASK 0x00000001L
+#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK3__COL_XOR_MASK 0x00003FFEL
+#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK3__ROW_XOR_MASK 0xFFFFC000L
+//MMEA2_ADDRDECGMI_ADDR_HASH_BANK4
+#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK4__XOR_ENABLE__SHIFT 0x0
+#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK4__COL_XOR__SHIFT 0x1
+#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK4__ROW_XOR__SHIFT 0xe
+#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK4__XOR_ENABLE_MASK 0x00000001L
+#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK4__COL_XOR_MASK 0x00003FFEL
+#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK4__ROW_XOR_MASK 0xFFFFC000L
+//MMEA2_ADDRDECGMI_ADDR_HASH_BANK5
+#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK5__XOR_ENABLE__SHIFT 0x0
+#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK5__COL_XOR__SHIFT 0x1
+#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK5__ROW_XOR__SHIFT 0xe
+#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK5__XOR_ENABLE_MASK 0x00000001L
+#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK5__COL_XOR_MASK 0x00003FFEL
+#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK5__ROW_XOR_MASK 0xFFFFC000L
+//MMEA2_ADDRDECGMI_ADDR_HASH_PC
+#define MMEA2_ADDRDECGMI_ADDR_HASH_PC__XOR_ENABLE__SHIFT 0x0
+#define MMEA2_ADDRDECGMI_ADDR_HASH_PC__COL_XOR__SHIFT 0x1
+#define MMEA2_ADDRDECGMI_ADDR_HASH_PC__ROW_XOR__SHIFT 0xe
+#define MMEA2_ADDRDECGMI_ADDR_HASH_PC__XOR_ENABLE_MASK 0x00000001L
+#define MMEA2_ADDRDECGMI_ADDR_HASH_PC__COL_XOR_MASK 0x00003FFEL
+#define MMEA2_ADDRDECGMI_ADDR_HASH_PC__ROW_XOR_MASK 0xFFFFC000L
+//MMEA2_ADDRDECGMI_ADDR_HASH_PC2
+#define MMEA2_ADDRDECGMI_ADDR_HASH_PC2__BANK_XOR__SHIFT 0x0
+#define MMEA2_ADDRDECGMI_ADDR_HASH_PC2__BANK_XOR_MASK 0x0000003FL
+//MMEA2_ADDRDECGMI_ADDR_HASH_CS0
+#define MMEA2_ADDRDECGMI_ADDR_HASH_CS0__XOR_ENABLE__SHIFT 0x0
+#define MMEA2_ADDRDECGMI_ADDR_HASH_CS0__NA_XOR__SHIFT 0x1
+#define MMEA2_ADDRDECGMI_ADDR_HASH_CS0__XOR_ENABLE_MASK 0x00000001L
+#define MMEA2_ADDRDECGMI_ADDR_HASH_CS0__NA_XOR_MASK 0xFFFFFFFEL
+//MMEA2_ADDRDECGMI_ADDR_HASH_CS1
+#define MMEA2_ADDRDECGMI_ADDR_HASH_CS1__XOR_ENABLE__SHIFT 0x0
+#define MMEA2_ADDRDECGMI_ADDR_HASH_CS1__NA_XOR__SHIFT 0x1
+#define MMEA2_ADDRDECGMI_ADDR_HASH_CS1__XOR_ENABLE_MASK 0x00000001L
+#define MMEA2_ADDRDECGMI_ADDR_HASH_CS1__NA_XOR_MASK 0xFFFFFFFEL
+//MMEA2_ADDRDECGMI_HARVEST_ENABLE
+#define MMEA2_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_EN__SHIFT 0x0
+#define MMEA2_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_VAL__SHIFT 0x1
+#define MMEA2_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_EN__SHIFT 0x2
+#define MMEA2_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_VAL__SHIFT 0x3
+#define MMEA2_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_EN__SHIFT 0x4
+#define MMEA2_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_VAL__SHIFT 0x5
+#define MMEA2_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_EN_MASK 0x00000001L
+#define MMEA2_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_VAL_MASK 0x00000002L
+#define MMEA2_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_EN_MASK 0x00000004L
+#define MMEA2_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_VAL_MASK 0x00000008L
+#define MMEA2_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_EN_MASK 0x00000010L
+#define MMEA2_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_VAL_MASK 0x00000020L
+//MMEA2_ADDRDEC0_BASE_ADDR_CS0
+#define MMEA2_ADDRDEC0_BASE_ADDR_CS0__CS_EN__SHIFT 0x0
+#define MMEA2_ADDRDEC0_BASE_ADDR_CS0__BASE_ADDR__SHIFT 0x1
+#define MMEA2_ADDRDEC0_BASE_ADDR_CS0__CS_EN_MASK 0x00000001L
+#define MMEA2_ADDRDEC0_BASE_ADDR_CS0__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA2_ADDRDEC0_BASE_ADDR_CS1
+#define MMEA2_ADDRDEC0_BASE_ADDR_CS1__CS_EN__SHIFT 0x0
+#define MMEA2_ADDRDEC0_BASE_ADDR_CS1__BASE_ADDR__SHIFT 0x1
+#define MMEA2_ADDRDEC0_BASE_ADDR_CS1__CS_EN_MASK 0x00000001L
+#define MMEA2_ADDRDEC0_BASE_ADDR_CS1__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA2_ADDRDEC0_BASE_ADDR_CS2
+#define MMEA2_ADDRDEC0_BASE_ADDR_CS2__CS_EN__SHIFT 0x0
+#define MMEA2_ADDRDEC0_BASE_ADDR_CS2__BASE_ADDR__SHIFT 0x1
+#define MMEA2_ADDRDEC0_BASE_ADDR_CS2__CS_EN_MASK 0x00000001L
+#define MMEA2_ADDRDEC0_BASE_ADDR_CS2__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA2_ADDRDEC0_BASE_ADDR_CS3
+#define MMEA2_ADDRDEC0_BASE_ADDR_CS3__CS_EN__SHIFT 0x0
+#define MMEA2_ADDRDEC0_BASE_ADDR_CS3__BASE_ADDR__SHIFT 0x1
+#define MMEA2_ADDRDEC0_BASE_ADDR_CS3__CS_EN_MASK 0x00000001L
+#define MMEA2_ADDRDEC0_BASE_ADDR_CS3__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA2_ADDRDEC0_BASE_ADDR_SECCS0
+#define MMEA2_ADDRDEC0_BASE_ADDR_SECCS0__CS_EN__SHIFT 0x0
+#define MMEA2_ADDRDEC0_BASE_ADDR_SECCS0__BASE_ADDR__SHIFT 0x1
+#define MMEA2_ADDRDEC0_BASE_ADDR_SECCS0__CS_EN_MASK 0x00000001L
+#define MMEA2_ADDRDEC0_BASE_ADDR_SECCS0__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA2_ADDRDEC0_BASE_ADDR_SECCS1
+#define MMEA2_ADDRDEC0_BASE_ADDR_SECCS1__CS_EN__SHIFT 0x0
+#define MMEA2_ADDRDEC0_BASE_ADDR_SECCS1__BASE_ADDR__SHIFT 0x1
+#define MMEA2_ADDRDEC0_BASE_ADDR_SECCS1__CS_EN_MASK 0x00000001L
+#define MMEA2_ADDRDEC0_BASE_ADDR_SECCS1__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA2_ADDRDEC0_BASE_ADDR_SECCS2
+#define MMEA2_ADDRDEC0_BASE_ADDR_SECCS2__CS_EN__SHIFT 0x0
+#define MMEA2_ADDRDEC0_BASE_ADDR_SECCS2__BASE_ADDR__SHIFT 0x1
+#define MMEA2_ADDRDEC0_BASE_ADDR_SECCS2__CS_EN_MASK 0x00000001L
+#define MMEA2_ADDRDEC0_BASE_ADDR_SECCS2__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA2_ADDRDEC0_BASE_ADDR_SECCS3
+#define MMEA2_ADDRDEC0_BASE_ADDR_SECCS3__CS_EN__SHIFT 0x0
+#define MMEA2_ADDRDEC0_BASE_ADDR_SECCS3__BASE_ADDR__SHIFT 0x1
+#define MMEA2_ADDRDEC0_BASE_ADDR_SECCS3__CS_EN_MASK 0x00000001L
+#define MMEA2_ADDRDEC0_BASE_ADDR_SECCS3__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA2_ADDRDEC0_ADDR_MASK_CS01
+#define MMEA2_ADDRDEC0_ADDR_MASK_CS01__ADDR_MASK__SHIFT 0x1
+#define MMEA2_ADDRDEC0_ADDR_MASK_CS01__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA2_ADDRDEC0_ADDR_MASK_CS23
+#define MMEA2_ADDRDEC0_ADDR_MASK_CS23__ADDR_MASK__SHIFT 0x1
+#define MMEA2_ADDRDEC0_ADDR_MASK_CS23__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA2_ADDRDEC0_ADDR_MASK_SECCS01
+#define MMEA2_ADDRDEC0_ADDR_MASK_SECCS01__ADDR_MASK__SHIFT 0x1
+#define MMEA2_ADDRDEC0_ADDR_MASK_SECCS01__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA2_ADDRDEC0_ADDR_MASK_SECCS23
+#define MMEA2_ADDRDEC0_ADDR_MASK_SECCS23__ADDR_MASK__SHIFT 0x1
+#define MMEA2_ADDRDEC0_ADDR_MASK_SECCS23__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA2_ADDRDEC0_ADDR_CFG_CS01
+#define MMEA2_ADDRDEC0_ADDR_CFG_CS01__NUM_BANK_GROUPS__SHIFT 0x1
+#define MMEA2_ADDRDEC0_ADDR_CFG_CS01__NUM_RM__SHIFT 0x4
+#define MMEA2_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_LO__SHIFT 0x8
+#define MMEA2_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_HI__SHIFT 0xc
+#define MMEA2_ADDRDEC0_ADDR_CFG_CS01__NUM_COL__SHIFT 0x10
+#define MMEA2_ADDRDEC0_ADDR_CFG_CS01__NUM_BANKS__SHIFT 0x14
+#define MMEA2_ADDRDEC0_ADDR_CFG_CS01__HI_COL_EN__SHIFT 0x1f
+#define MMEA2_ADDRDEC0_ADDR_CFG_CS01__NUM_BANK_GROUPS_MASK 0x0000000EL
+#define MMEA2_ADDRDEC0_ADDR_CFG_CS01__NUM_RM_MASK 0x00000030L
+#define MMEA2_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_LO_MASK 0x00000F00L
+#define MMEA2_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_HI_MASK 0x0000F000L
+#define MMEA2_ADDRDEC0_ADDR_CFG_CS01__NUM_COL_MASK 0x000F0000L
+#define MMEA2_ADDRDEC0_ADDR_CFG_CS01__NUM_BANKS_MASK 0x00300000L
+#define MMEA2_ADDRDEC0_ADDR_CFG_CS01__HI_COL_EN_MASK 0x80000000L
+//MMEA2_ADDRDEC0_ADDR_CFG_CS23
+#define MMEA2_ADDRDEC0_ADDR_CFG_CS23__NUM_BANK_GROUPS__SHIFT 0x1
+#define MMEA2_ADDRDEC0_ADDR_CFG_CS23__NUM_RM__SHIFT 0x4
+#define MMEA2_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_LO__SHIFT 0x8
+#define MMEA2_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_HI__SHIFT 0xc
+#define MMEA2_ADDRDEC0_ADDR_CFG_CS23__NUM_COL__SHIFT 0x10
+#define MMEA2_ADDRDEC0_ADDR_CFG_CS23__NUM_BANKS__SHIFT 0x14
+#define MMEA2_ADDRDEC0_ADDR_CFG_CS23__HI_COL_EN__SHIFT 0x1f
+#define MMEA2_ADDRDEC0_ADDR_CFG_CS23__NUM_BANK_GROUPS_MASK 0x0000000EL
+#define MMEA2_ADDRDEC0_ADDR_CFG_CS23__NUM_RM_MASK 0x00000030L
+#define MMEA2_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_LO_MASK 0x00000F00L
+#define MMEA2_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_HI_MASK 0x0000F000L
+#define MMEA2_ADDRDEC0_ADDR_CFG_CS23__NUM_COL_MASK 0x000F0000L
+#define MMEA2_ADDRDEC0_ADDR_CFG_CS23__NUM_BANKS_MASK 0x00300000L
+#define MMEA2_ADDRDEC0_ADDR_CFG_CS23__HI_COL_EN_MASK 0x80000000L
+//MMEA2_ADDRDEC0_ADDR_SEL_CS01
+#define MMEA2_ADDRDEC0_ADDR_SEL_CS01__BANK0__SHIFT 0x0
+#define MMEA2_ADDRDEC0_ADDR_SEL_CS01__BANK1__SHIFT 0x4
+#define MMEA2_ADDRDEC0_ADDR_SEL_CS01__BANK2__SHIFT 0x8
+#define MMEA2_ADDRDEC0_ADDR_SEL_CS01__BANK3__SHIFT 0xc
+#define MMEA2_ADDRDEC0_ADDR_SEL_CS01__BANK4__SHIFT 0x10
+#define MMEA2_ADDRDEC0_ADDR_SEL_CS01__ROW_LO__SHIFT 0x18
+#define MMEA2_ADDRDEC0_ADDR_SEL_CS01__ROW_HI__SHIFT 0x1c
+#define MMEA2_ADDRDEC0_ADDR_SEL_CS01__BANK0_MASK 0x0000000FL
+#define MMEA2_ADDRDEC0_ADDR_SEL_CS01__BANK1_MASK 0x000000F0L
+#define MMEA2_ADDRDEC0_ADDR_SEL_CS01__BANK2_MASK 0x00000F00L
+#define MMEA2_ADDRDEC0_ADDR_SEL_CS01__BANK3_MASK 0x0000F000L
+#define MMEA2_ADDRDEC0_ADDR_SEL_CS01__BANK4_MASK 0x001F0000L
+#define MMEA2_ADDRDEC0_ADDR_SEL_CS01__ROW_LO_MASK 0x0F000000L
+#define MMEA2_ADDRDEC0_ADDR_SEL_CS01__ROW_HI_MASK 0xF0000000L
+//MMEA2_ADDRDEC0_ADDR_SEL_CS23
+#define MMEA2_ADDRDEC0_ADDR_SEL_CS23__BANK0__SHIFT 0x0
+#define MMEA2_ADDRDEC0_ADDR_SEL_CS23__BANK1__SHIFT 0x4
+#define MMEA2_ADDRDEC0_ADDR_SEL_CS23__BANK2__SHIFT 0x8
+#define MMEA2_ADDRDEC0_ADDR_SEL_CS23__BANK3__SHIFT 0xc
+#define MMEA2_ADDRDEC0_ADDR_SEL_CS23__BANK4__SHIFT 0x10
+#define MMEA2_ADDRDEC0_ADDR_SEL_CS23__ROW_LO__SHIFT 0x18
+#define MMEA2_ADDRDEC0_ADDR_SEL_CS23__ROW_HI__SHIFT 0x1c
+#define MMEA2_ADDRDEC0_ADDR_SEL_CS23__BANK0_MASK 0x0000000FL
+#define MMEA2_ADDRDEC0_ADDR_SEL_CS23__BANK1_MASK 0x000000F0L
+#define MMEA2_ADDRDEC0_ADDR_SEL_CS23__BANK2_MASK 0x00000F00L
+#define MMEA2_ADDRDEC0_ADDR_SEL_CS23__BANK3_MASK 0x0000F000L
+#define MMEA2_ADDRDEC0_ADDR_SEL_CS23__BANK4_MASK 0x001F0000L
+#define MMEA2_ADDRDEC0_ADDR_SEL_CS23__ROW_LO_MASK 0x0F000000L
+#define MMEA2_ADDRDEC0_ADDR_SEL_CS23__ROW_HI_MASK 0xF0000000L
+//MMEA2_ADDRDEC0_ADDR_SEL2_CS01
+#define MMEA2_ADDRDEC0_ADDR_SEL2_CS01__BANK5__SHIFT 0x0
+#define MMEA2_ADDRDEC0_ADDR_SEL2_CS01__BANK5_MASK 0x0000001FL
+//MMEA2_ADDRDEC0_ADDR_SEL2_CS23
+#define MMEA2_ADDRDEC0_ADDR_SEL2_CS23__BANK5__SHIFT 0x0
+#define MMEA2_ADDRDEC0_ADDR_SEL2_CS23__BANK5_MASK 0x0000001FL
+//MMEA2_ADDRDEC0_COL_SEL_LO_CS01
+#define MMEA2_ADDRDEC0_COL_SEL_LO_CS01__COL0__SHIFT 0x0
+#define MMEA2_ADDRDEC0_COL_SEL_LO_CS01__COL1__SHIFT 0x4
+#define MMEA2_ADDRDEC0_COL_SEL_LO_CS01__COL2__SHIFT 0x8
+#define MMEA2_ADDRDEC0_COL_SEL_LO_CS01__COL3__SHIFT 0xc
+#define MMEA2_ADDRDEC0_COL_SEL_LO_CS01__COL4__SHIFT 0x10
+#define MMEA2_ADDRDEC0_COL_SEL_LO_CS01__COL5__SHIFT 0x14
+#define MMEA2_ADDRDEC0_COL_SEL_LO_CS01__COL6__SHIFT 0x18
+#define MMEA2_ADDRDEC0_COL_SEL_LO_CS01__COL7__SHIFT 0x1c
+#define MMEA2_ADDRDEC0_COL_SEL_LO_CS01__COL0_MASK 0x0000000FL
+#define MMEA2_ADDRDEC0_COL_SEL_LO_CS01__COL1_MASK 0x000000F0L
+#define MMEA2_ADDRDEC0_COL_SEL_LO_CS01__COL2_MASK 0x00000F00L
+#define MMEA2_ADDRDEC0_COL_SEL_LO_CS01__COL3_MASK 0x0000F000L
+#define MMEA2_ADDRDEC0_COL_SEL_LO_CS01__COL4_MASK 0x000F0000L
+#define MMEA2_ADDRDEC0_COL_SEL_LO_CS01__COL5_MASK 0x00F00000L
+#define MMEA2_ADDRDEC0_COL_SEL_LO_CS01__COL6_MASK 0x0F000000L
+#define MMEA2_ADDRDEC0_COL_SEL_LO_CS01__COL7_MASK 0xF0000000L
+//MMEA2_ADDRDEC0_COL_SEL_LO_CS23
+#define MMEA2_ADDRDEC0_COL_SEL_LO_CS23__COL0__SHIFT 0x0
+#define MMEA2_ADDRDEC0_COL_SEL_LO_CS23__COL1__SHIFT 0x4
+#define MMEA2_ADDRDEC0_COL_SEL_LO_CS23__COL2__SHIFT 0x8
+#define MMEA2_ADDRDEC0_COL_SEL_LO_CS23__COL3__SHIFT 0xc
+#define MMEA2_ADDRDEC0_COL_SEL_LO_CS23__COL4__SHIFT 0x10
+#define MMEA2_ADDRDEC0_COL_SEL_LO_CS23__COL5__SHIFT 0x14
+#define MMEA2_ADDRDEC0_COL_SEL_LO_CS23__COL6__SHIFT 0x18
+#define MMEA2_ADDRDEC0_COL_SEL_LO_CS23__COL7__SHIFT 0x1c
+#define MMEA2_ADDRDEC0_COL_SEL_LO_CS23__COL0_MASK 0x0000000FL
+#define MMEA2_ADDRDEC0_COL_SEL_LO_CS23__COL1_MASK 0x000000F0L
+#define MMEA2_ADDRDEC0_COL_SEL_LO_CS23__COL2_MASK 0x00000F00L
+#define MMEA2_ADDRDEC0_COL_SEL_LO_CS23__COL3_MASK 0x0000F000L
+#define MMEA2_ADDRDEC0_COL_SEL_LO_CS23__COL4_MASK 0x000F0000L
+#define MMEA2_ADDRDEC0_COL_SEL_LO_CS23__COL5_MASK 0x00F00000L
+#define MMEA2_ADDRDEC0_COL_SEL_LO_CS23__COL6_MASK 0x0F000000L
+#define MMEA2_ADDRDEC0_COL_SEL_LO_CS23__COL7_MASK 0xF0000000L
+//MMEA2_ADDRDEC0_COL_SEL_HI_CS01
+#define MMEA2_ADDRDEC0_COL_SEL_HI_CS01__COL8__SHIFT 0x0
+#define MMEA2_ADDRDEC0_COL_SEL_HI_CS01__COL9__SHIFT 0x4
+#define MMEA2_ADDRDEC0_COL_SEL_HI_CS01__COL10__SHIFT 0x8
+#define MMEA2_ADDRDEC0_COL_SEL_HI_CS01__COL11__SHIFT 0xc
+#define MMEA2_ADDRDEC0_COL_SEL_HI_CS01__COL12__SHIFT 0x10
+#define MMEA2_ADDRDEC0_COL_SEL_HI_CS01__COL13__SHIFT 0x14
+#define MMEA2_ADDRDEC0_COL_SEL_HI_CS01__COL14__SHIFT 0x18
+#define MMEA2_ADDRDEC0_COL_SEL_HI_CS01__COL15__SHIFT 0x1c
+#define MMEA2_ADDRDEC0_COL_SEL_HI_CS01__COL8_MASK 0x0000000FL
+#define MMEA2_ADDRDEC0_COL_SEL_HI_CS01__COL9_MASK 0x000000F0L
+#define MMEA2_ADDRDEC0_COL_SEL_HI_CS01__COL10_MASK 0x00000F00L
+#define MMEA2_ADDRDEC0_COL_SEL_HI_CS01__COL11_MASK 0x0000F000L
+#define MMEA2_ADDRDEC0_COL_SEL_HI_CS01__COL12_MASK 0x000F0000L
+#define MMEA2_ADDRDEC0_COL_SEL_HI_CS01__COL13_MASK 0x00F00000L
+#define MMEA2_ADDRDEC0_COL_SEL_HI_CS01__COL14_MASK 0x0F000000L
+#define MMEA2_ADDRDEC0_COL_SEL_HI_CS01__COL15_MASK 0xF0000000L
+//MMEA2_ADDRDEC0_COL_SEL_HI_CS23
+#define MMEA2_ADDRDEC0_COL_SEL_HI_CS23__COL8__SHIFT 0x0
+#define MMEA2_ADDRDEC0_COL_SEL_HI_CS23__COL9__SHIFT 0x4
+#define MMEA2_ADDRDEC0_COL_SEL_HI_CS23__COL10__SHIFT 0x8
+#define MMEA2_ADDRDEC0_COL_SEL_HI_CS23__COL11__SHIFT 0xc
+#define MMEA2_ADDRDEC0_COL_SEL_HI_CS23__COL12__SHIFT 0x10
+#define MMEA2_ADDRDEC0_COL_SEL_HI_CS23__COL13__SHIFT 0x14
+#define MMEA2_ADDRDEC0_COL_SEL_HI_CS23__COL14__SHIFT 0x18
+#define MMEA2_ADDRDEC0_COL_SEL_HI_CS23__COL15__SHIFT 0x1c
+#define MMEA2_ADDRDEC0_COL_SEL_HI_CS23__COL8_MASK 0x0000000FL
+#define MMEA2_ADDRDEC0_COL_SEL_HI_CS23__COL9_MASK 0x000000F0L
+#define MMEA2_ADDRDEC0_COL_SEL_HI_CS23__COL10_MASK 0x00000F00L
+#define MMEA2_ADDRDEC0_COL_SEL_HI_CS23__COL11_MASK 0x0000F000L
+#define MMEA2_ADDRDEC0_COL_SEL_HI_CS23__COL12_MASK 0x000F0000L
+#define MMEA2_ADDRDEC0_COL_SEL_HI_CS23__COL13_MASK 0x00F00000L
+#define MMEA2_ADDRDEC0_COL_SEL_HI_CS23__COL14_MASK 0x0F000000L
+#define MMEA2_ADDRDEC0_COL_SEL_HI_CS23__COL15_MASK 0xF0000000L
+//MMEA2_ADDRDEC0_RM_SEL_CS01
+#define MMEA2_ADDRDEC0_RM_SEL_CS01__RM0__SHIFT 0x0
+#define MMEA2_ADDRDEC0_RM_SEL_CS01__RM1__SHIFT 0x4
+#define MMEA2_ADDRDEC0_RM_SEL_CS01__RM2__SHIFT 0x8
+#define MMEA2_ADDRDEC0_RM_SEL_CS01__CHAN_BIT__SHIFT 0xc
+#define MMEA2_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA2_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA2_ADDRDEC0_RM_SEL_CS01__RM0_MASK 0x0000000FL
+#define MMEA2_ADDRDEC0_RM_SEL_CS01__RM1_MASK 0x000000F0L
+#define MMEA2_ADDRDEC0_RM_SEL_CS01__RM2_MASK 0x00000F00L
+#define MMEA2_ADDRDEC0_RM_SEL_CS01__CHAN_BIT_MASK 0x0000F000L
+#define MMEA2_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA2_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA2_ADDRDEC0_RM_SEL_CS23
+#define MMEA2_ADDRDEC0_RM_SEL_CS23__RM0__SHIFT 0x0
+#define MMEA2_ADDRDEC0_RM_SEL_CS23__RM1__SHIFT 0x4
+#define MMEA2_ADDRDEC0_RM_SEL_CS23__RM2__SHIFT 0x8
+#define MMEA2_ADDRDEC0_RM_SEL_CS23__CHAN_BIT__SHIFT 0xc
+#define MMEA2_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA2_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA2_ADDRDEC0_RM_SEL_CS23__RM0_MASK 0x0000000FL
+#define MMEA2_ADDRDEC0_RM_SEL_CS23__RM1_MASK 0x000000F0L
+#define MMEA2_ADDRDEC0_RM_SEL_CS23__RM2_MASK 0x00000F00L
+#define MMEA2_ADDRDEC0_RM_SEL_CS23__CHAN_BIT_MASK 0x0000F000L
+#define MMEA2_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA2_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA2_ADDRDEC0_RM_SEL_SECCS01
+#define MMEA2_ADDRDEC0_RM_SEL_SECCS01__RM0__SHIFT 0x0
+#define MMEA2_ADDRDEC0_RM_SEL_SECCS01__RM1__SHIFT 0x4
+#define MMEA2_ADDRDEC0_RM_SEL_SECCS01__RM2__SHIFT 0x8
+#define MMEA2_ADDRDEC0_RM_SEL_SECCS01__CHAN_BIT__SHIFT 0xc
+#define MMEA2_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA2_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA2_ADDRDEC0_RM_SEL_SECCS01__RM0_MASK 0x0000000FL
+#define MMEA2_ADDRDEC0_RM_SEL_SECCS01__RM1_MASK 0x000000F0L
+#define MMEA2_ADDRDEC0_RM_SEL_SECCS01__RM2_MASK 0x00000F00L
+#define MMEA2_ADDRDEC0_RM_SEL_SECCS01__CHAN_BIT_MASK 0x0000F000L
+#define MMEA2_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA2_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA2_ADDRDEC0_RM_SEL_SECCS23
+#define MMEA2_ADDRDEC0_RM_SEL_SECCS23__RM0__SHIFT 0x0
+#define MMEA2_ADDRDEC0_RM_SEL_SECCS23__RM1__SHIFT 0x4
+#define MMEA2_ADDRDEC0_RM_SEL_SECCS23__RM2__SHIFT 0x8
+#define MMEA2_ADDRDEC0_RM_SEL_SECCS23__CHAN_BIT__SHIFT 0xc
+#define MMEA2_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA2_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA2_ADDRDEC0_RM_SEL_SECCS23__RM0_MASK 0x0000000FL
+#define MMEA2_ADDRDEC0_RM_SEL_SECCS23__RM1_MASK 0x000000F0L
+#define MMEA2_ADDRDEC0_RM_SEL_SECCS23__RM2_MASK 0x00000F00L
+#define MMEA2_ADDRDEC0_RM_SEL_SECCS23__CHAN_BIT_MASK 0x0000F000L
+#define MMEA2_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA2_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA2_ADDRDEC1_BASE_ADDR_CS0
+#define MMEA2_ADDRDEC1_BASE_ADDR_CS0__CS_EN__SHIFT 0x0
+#define MMEA2_ADDRDEC1_BASE_ADDR_CS0__BASE_ADDR__SHIFT 0x1
+#define MMEA2_ADDRDEC1_BASE_ADDR_CS0__CS_EN_MASK 0x00000001L
+#define MMEA2_ADDRDEC1_BASE_ADDR_CS0__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA2_ADDRDEC1_BASE_ADDR_CS1
+#define MMEA2_ADDRDEC1_BASE_ADDR_CS1__CS_EN__SHIFT 0x0
+#define MMEA2_ADDRDEC1_BASE_ADDR_CS1__BASE_ADDR__SHIFT 0x1
+#define MMEA2_ADDRDEC1_BASE_ADDR_CS1__CS_EN_MASK 0x00000001L
+#define MMEA2_ADDRDEC1_BASE_ADDR_CS1__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA2_ADDRDEC1_BASE_ADDR_CS2
+#define MMEA2_ADDRDEC1_BASE_ADDR_CS2__CS_EN__SHIFT 0x0
+#define MMEA2_ADDRDEC1_BASE_ADDR_CS2__BASE_ADDR__SHIFT 0x1
+#define MMEA2_ADDRDEC1_BASE_ADDR_CS2__CS_EN_MASK 0x00000001L
+#define MMEA2_ADDRDEC1_BASE_ADDR_CS2__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA2_ADDRDEC1_BASE_ADDR_CS3
+#define MMEA2_ADDRDEC1_BASE_ADDR_CS3__CS_EN__SHIFT 0x0
+#define MMEA2_ADDRDEC1_BASE_ADDR_CS3__BASE_ADDR__SHIFT 0x1
+#define MMEA2_ADDRDEC1_BASE_ADDR_CS3__CS_EN_MASK 0x00000001L
+#define MMEA2_ADDRDEC1_BASE_ADDR_CS3__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA2_ADDRDEC1_BASE_ADDR_SECCS0
+#define MMEA2_ADDRDEC1_BASE_ADDR_SECCS0__CS_EN__SHIFT 0x0
+#define MMEA2_ADDRDEC1_BASE_ADDR_SECCS0__BASE_ADDR__SHIFT 0x1
+#define MMEA2_ADDRDEC1_BASE_ADDR_SECCS0__CS_EN_MASK 0x00000001L
+#define MMEA2_ADDRDEC1_BASE_ADDR_SECCS0__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA2_ADDRDEC1_BASE_ADDR_SECCS1
+#define MMEA2_ADDRDEC1_BASE_ADDR_SECCS1__CS_EN__SHIFT 0x0
+#define MMEA2_ADDRDEC1_BASE_ADDR_SECCS1__BASE_ADDR__SHIFT 0x1
+#define MMEA2_ADDRDEC1_BASE_ADDR_SECCS1__CS_EN_MASK 0x00000001L
+#define MMEA2_ADDRDEC1_BASE_ADDR_SECCS1__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA2_ADDRDEC1_BASE_ADDR_SECCS2
+#define MMEA2_ADDRDEC1_BASE_ADDR_SECCS2__CS_EN__SHIFT 0x0
+#define MMEA2_ADDRDEC1_BASE_ADDR_SECCS2__BASE_ADDR__SHIFT 0x1
+#define MMEA2_ADDRDEC1_BASE_ADDR_SECCS2__CS_EN_MASK 0x00000001L
+#define MMEA2_ADDRDEC1_BASE_ADDR_SECCS2__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA2_ADDRDEC1_BASE_ADDR_SECCS3
+#define MMEA2_ADDRDEC1_BASE_ADDR_SECCS3__CS_EN__SHIFT 0x0
+#define MMEA2_ADDRDEC1_BASE_ADDR_SECCS3__BASE_ADDR__SHIFT 0x1
+#define MMEA2_ADDRDEC1_BASE_ADDR_SECCS3__CS_EN_MASK 0x00000001L
+#define MMEA2_ADDRDEC1_BASE_ADDR_SECCS3__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA2_ADDRDEC1_ADDR_MASK_CS01
+#define MMEA2_ADDRDEC1_ADDR_MASK_CS01__ADDR_MASK__SHIFT 0x1
+#define MMEA2_ADDRDEC1_ADDR_MASK_CS01__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA2_ADDRDEC1_ADDR_MASK_CS23
+#define MMEA2_ADDRDEC1_ADDR_MASK_CS23__ADDR_MASK__SHIFT 0x1
+#define MMEA2_ADDRDEC1_ADDR_MASK_CS23__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA2_ADDRDEC1_ADDR_MASK_SECCS01
+#define MMEA2_ADDRDEC1_ADDR_MASK_SECCS01__ADDR_MASK__SHIFT 0x1
+#define MMEA2_ADDRDEC1_ADDR_MASK_SECCS01__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA2_ADDRDEC1_ADDR_MASK_SECCS23
+#define MMEA2_ADDRDEC1_ADDR_MASK_SECCS23__ADDR_MASK__SHIFT 0x1
+#define MMEA2_ADDRDEC1_ADDR_MASK_SECCS23__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA2_ADDRDEC1_ADDR_CFG_CS01
+#define MMEA2_ADDRDEC1_ADDR_CFG_CS01__NUM_BANK_GROUPS__SHIFT 0x1
+#define MMEA2_ADDRDEC1_ADDR_CFG_CS01__NUM_RM__SHIFT 0x4
+#define MMEA2_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_LO__SHIFT 0x8
+#define MMEA2_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_HI__SHIFT 0xc
+#define MMEA2_ADDRDEC1_ADDR_CFG_CS01__NUM_COL__SHIFT 0x10
+#define MMEA2_ADDRDEC1_ADDR_CFG_CS01__NUM_BANKS__SHIFT 0x14
+#define MMEA2_ADDRDEC1_ADDR_CFG_CS01__HI_COL_EN__SHIFT 0x1f
+#define MMEA2_ADDRDEC1_ADDR_CFG_CS01__NUM_BANK_GROUPS_MASK 0x0000000EL
+#define MMEA2_ADDRDEC1_ADDR_CFG_CS01__NUM_RM_MASK 0x00000030L
+#define MMEA2_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_LO_MASK 0x00000F00L
+#define MMEA2_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_HI_MASK 0x0000F000L
+#define MMEA2_ADDRDEC1_ADDR_CFG_CS01__NUM_COL_MASK 0x000F0000L
+#define MMEA2_ADDRDEC1_ADDR_CFG_CS01__NUM_BANKS_MASK 0x00300000L
+#define MMEA2_ADDRDEC1_ADDR_CFG_CS01__HI_COL_EN_MASK 0x80000000L
+//MMEA2_ADDRDEC1_ADDR_CFG_CS23
+#define MMEA2_ADDRDEC1_ADDR_CFG_CS23__NUM_BANK_GROUPS__SHIFT 0x1
+#define MMEA2_ADDRDEC1_ADDR_CFG_CS23__NUM_RM__SHIFT 0x4
+#define MMEA2_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_LO__SHIFT 0x8
+#define MMEA2_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_HI__SHIFT 0xc
+#define MMEA2_ADDRDEC1_ADDR_CFG_CS23__NUM_COL__SHIFT 0x10
+#define MMEA2_ADDRDEC1_ADDR_CFG_CS23__NUM_BANKS__SHIFT 0x14
+#define MMEA2_ADDRDEC1_ADDR_CFG_CS23__HI_COL_EN__SHIFT 0x1f
+#define MMEA2_ADDRDEC1_ADDR_CFG_CS23__NUM_BANK_GROUPS_MASK 0x0000000EL
+#define MMEA2_ADDRDEC1_ADDR_CFG_CS23__NUM_RM_MASK 0x00000030L
+#define MMEA2_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_LO_MASK 0x00000F00L
+#define MMEA2_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_HI_MASK 0x0000F000L
+#define MMEA2_ADDRDEC1_ADDR_CFG_CS23__NUM_COL_MASK 0x000F0000L
+#define MMEA2_ADDRDEC1_ADDR_CFG_CS23__NUM_BANKS_MASK 0x00300000L
+#define MMEA2_ADDRDEC1_ADDR_CFG_CS23__HI_COL_EN_MASK 0x80000000L
+//MMEA2_ADDRDEC1_ADDR_SEL_CS01
+#define MMEA2_ADDRDEC1_ADDR_SEL_CS01__BANK0__SHIFT 0x0
+#define MMEA2_ADDRDEC1_ADDR_SEL_CS01__BANK1__SHIFT 0x4
+#define MMEA2_ADDRDEC1_ADDR_SEL_CS01__BANK2__SHIFT 0x8
+#define MMEA2_ADDRDEC1_ADDR_SEL_CS01__BANK3__SHIFT 0xc
+#define MMEA2_ADDRDEC1_ADDR_SEL_CS01__BANK4__SHIFT 0x10
+#define MMEA2_ADDRDEC1_ADDR_SEL_CS01__ROW_LO__SHIFT 0x18
+#define MMEA2_ADDRDEC1_ADDR_SEL_CS01__ROW_HI__SHIFT 0x1c
+#define MMEA2_ADDRDEC1_ADDR_SEL_CS01__BANK0_MASK 0x0000000FL
+#define MMEA2_ADDRDEC1_ADDR_SEL_CS01__BANK1_MASK 0x000000F0L
+#define MMEA2_ADDRDEC1_ADDR_SEL_CS01__BANK2_MASK 0x00000F00L
+#define MMEA2_ADDRDEC1_ADDR_SEL_CS01__BANK3_MASK 0x0000F000L
+#define MMEA2_ADDRDEC1_ADDR_SEL_CS01__BANK4_MASK 0x001F0000L
+#define MMEA2_ADDRDEC1_ADDR_SEL_CS01__ROW_LO_MASK 0x0F000000L
+#define MMEA2_ADDRDEC1_ADDR_SEL_CS01__ROW_HI_MASK 0xF0000000L
+//MMEA2_ADDRDEC1_ADDR_SEL_CS23
+#define MMEA2_ADDRDEC1_ADDR_SEL_CS23__BANK0__SHIFT 0x0
+#define MMEA2_ADDRDEC1_ADDR_SEL_CS23__BANK1__SHIFT 0x4
+#define MMEA2_ADDRDEC1_ADDR_SEL_CS23__BANK2__SHIFT 0x8
+#define MMEA2_ADDRDEC1_ADDR_SEL_CS23__BANK3__SHIFT 0xc
+#define MMEA2_ADDRDEC1_ADDR_SEL_CS23__BANK4__SHIFT 0x10
+#define MMEA2_ADDRDEC1_ADDR_SEL_CS23__ROW_LO__SHIFT 0x18
+#define MMEA2_ADDRDEC1_ADDR_SEL_CS23__ROW_HI__SHIFT 0x1c
+#define MMEA2_ADDRDEC1_ADDR_SEL_CS23__BANK0_MASK 0x0000000FL
+#define MMEA2_ADDRDEC1_ADDR_SEL_CS23__BANK1_MASK 0x000000F0L
+#define MMEA2_ADDRDEC1_ADDR_SEL_CS23__BANK2_MASK 0x00000F00L
+#define MMEA2_ADDRDEC1_ADDR_SEL_CS23__BANK3_MASK 0x0000F000L
+#define MMEA2_ADDRDEC1_ADDR_SEL_CS23__BANK4_MASK 0x001F0000L
+#define MMEA2_ADDRDEC1_ADDR_SEL_CS23__ROW_LO_MASK 0x0F000000L
+#define MMEA2_ADDRDEC1_ADDR_SEL_CS23__ROW_HI_MASK 0xF0000000L
+//MMEA2_ADDRDEC1_ADDR_SEL2_CS01
+#define MMEA2_ADDRDEC1_ADDR_SEL2_CS01__BANK5__SHIFT 0x0
+#define MMEA2_ADDRDEC1_ADDR_SEL2_CS01__BANK5_MASK 0x0000001FL
+//MMEA2_ADDRDEC1_ADDR_SEL2_CS23
+#define MMEA2_ADDRDEC1_ADDR_SEL2_CS23__BANK5__SHIFT 0x0
+#define MMEA2_ADDRDEC1_ADDR_SEL2_CS23__BANK5_MASK 0x0000001FL
+//MMEA2_ADDRDEC1_COL_SEL_LO_CS01
+#define MMEA2_ADDRDEC1_COL_SEL_LO_CS01__COL0__SHIFT 0x0
+#define MMEA2_ADDRDEC1_COL_SEL_LO_CS01__COL1__SHIFT 0x4
+#define MMEA2_ADDRDEC1_COL_SEL_LO_CS01__COL2__SHIFT 0x8
+#define MMEA2_ADDRDEC1_COL_SEL_LO_CS01__COL3__SHIFT 0xc
+#define MMEA2_ADDRDEC1_COL_SEL_LO_CS01__COL4__SHIFT 0x10
+#define MMEA2_ADDRDEC1_COL_SEL_LO_CS01__COL5__SHIFT 0x14
+#define MMEA2_ADDRDEC1_COL_SEL_LO_CS01__COL6__SHIFT 0x18
+#define MMEA2_ADDRDEC1_COL_SEL_LO_CS01__COL7__SHIFT 0x1c
+#define MMEA2_ADDRDEC1_COL_SEL_LO_CS01__COL0_MASK 0x0000000FL
+#define MMEA2_ADDRDEC1_COL_SEL_LO_CS01__COL1_MASK 0x000000F0L
+#define MMEA2_ADDRDEC1_COL_SEL_LO_CS01__COL2_MASK 0x00000F00L
+#define MMEA2_ADDRDEC1_COL_SEL_LO_CS01__COL3_MASK 0x0000F000L
+#define MMEA2_ADDRDEC1_COL_SEL_LO_CS01__COL4_MASK 0x000F0000L
+#define MMEA2_ADDRDEC1_COL_SEL_LO_CS01__COL5_MASK 0x00F00000L
+#define MMEA2_ADDRDEC1_COL_SEL_LO_CS01__COL6_MASK 0x0F000000L
+#define MMEA2_ADDRDEC1_COL_SEL_LO_CS01__COL7_MASK 0xF0000000L
+//MMEA2_ADDRDEC1_COL_SEL_LO_CS23
+#define MMEA2_ADDRDEC1_COL_SEL_LO_CS23__COL0__SHIFT 0x0
+#define MMEA2_ADDRDEC1_COL_SEL_LO_CS23__COL1__SHIFT 0x4
+#define MMEA2_ADDRDEC1_COL_SEL_LO_CS23__COL2__SHIFT 0x8
+#define MMEA2_ADDRDEC1_COL_SEL_LO_CS23__COL3__SHIFT 0xc
+#define MMEA2_ADDRDEC1_COL_SEL_LO_CS23__COL4__SHIFT 0x10
+#define MMEA2_ADDRDEC1_COL_SEL_LO_CS23__COL5__SHIFT 0x14
+#define MMEA2_ADDRDEC1_COL_SEL_LO_CS23__COL6__SHIFT 0x18
+#define MMEA2_ADDRDEC1_COL_SEL_LO_CS23__COL7__SHIFT 0x1c
+#define MMEA2_ADDRDEC1_COL_SEL_LO_CS23__COL0_MASK 0x0000000FL
+#define MMEA2_ADDRDEC1_COL_SEL_LO_CS23__COL1_MASK 0x000000F0L
+#define MMEA2_ADDRDEC1_COL_SEL_LO_CS23__COL2_MASK 0x00000F00L
+#define MMEA2_ADDRDEC1_COL_SEL_LO_CS23__COL3_MASK 0x0000F000L
+#define MMEA2_ADDRDEC1_COL_SEL_LO_CS23__COL4_MASK 0x000F0000L
+#define MMEA2_ADDRDEC1_COL_SEL_LO_CS23__COL5_MASK 0x00F00000L
+#define MMEA2_ADDRDEC1_COL_SEL_LO_CS23__COL6_MASK 0x0F000000L
+#define MMEA2_ADDRDEC1_COL_SEL_LO_CS23__COL7_MASK 0xF0000000L
+//MMEA2_ADDRDEC1_COL_SEL_HI_CS01
+#define MMEA2_ADDRDEC1_COL_SEL_HI_CS01__COL8__SHIFT 0x0
+#define MMEA2_ADDRDEC1_COL_SEL_HI_CS01__COL9__SHIFT 0x4
+#define MMEA2_ADDRDEC1_COL_SEL_HI_CS01__COL10__SHIFT 0x8
+#define MMEA2_ADDRDEC1_COL_SEL_HI_CS01__COL11__SHIFT 0xc
+#define MMEA2_ADDRDEC1_COL_SEL_HI_CS01__COL12__SHIFT 0x10
+#define MMEA2_ADDRDEC1_COL_SEL_HI_CS01__COL13__SHIFT 0x14
+#define MMEA2_ADDRDEC1_COL_SEL_HI_CS01__COL14__SHIFT 0x18
+#define MMEA2_ADDRDEC1_COL_SEL_HI_CS01__COL15__SHIFT 0x1c
+#define MMEA2_ADDRDEC1_COL_SEL_HI_CS01__COL8_MASK 0x0000000FL
+#define MMEA2_ADDRDEC1_COL_SEL_HI_CS01__COL9_MASK 0x000000F0L
+#define MMEA2_ADDRDEC1_COL_SEL_HI_CS01__COL10_MASK 0x00000F00L
+#define MMEA2_ADDRDEC1_COL_SEL_HI_CS01__COL11_MASK 0x0000F000L
+#define MMEA2_ADDRDEC1_COL_SEL_HI_CS01__COL12_MASK 0x000F0000L
+#define MMEA2_ADDRDEC1_COL_SEL_HI_CS01__COL13_MASK 0x00F00000L
+#define MMEA2_ADDRDEC1_COL_SEL_HI_CS01__COL14_MASK 0x0F000000L
+#define MMEA2_ADDRDEC1_COL_SEL_HI_CS01__COL15_MASK 0xF0000000L
+//MMEA2_ADDRDEC1_COL_SEL_HI_CS23
+#define MMEA2_ADDRDEC1_COL_SEL_HI_CS23__COL8__SHIFT 0x0
+#define MMEA2_ADDRDEC1_COL_SEL_HI_CS23__COL9__SHIFT 0x4
+#define MMEA2_ADDRDEC1_COL_SEL_HI_CS23__COL10__SHIFT 0x8
+#define MMEA2_ADDRDEC1_COL_SEL_HI_CS23__COL11__SHIFT 0xc
+#define MMEA2_ADDRDEC1_COL_SEL_HI_CS23__COL12__SHIFT 0x10
+#define MMEA2_ADDRDEC1_COL_SEL_HI_CS23__COL13__SHIFT 0x14
+#define MMEA2_ADDRDEC1_COL_SEL_HI_CS23__COL14__SHIFT 0x18
+#define MMEA2_ADDRDEC1_COL_SEL_HI_CS23__COL15__SHIFT 0x1c
+#define MMEA2_ADDRDEC1_COL_SEL_HI_CS23__COL8_MASK 0x0000000FL
+#define MMEA2_ADDRDEC1_COL_SEL_HI_CS23__COL9_MASK 0x000000F0L
+#define MMEA2_ADDRDEC1_COL_SEL_HI_CS23__COL10_MASK 0x00000F00L
+#define MMEA2_ADDRDEC1_COL_SEL_HI_CS23__COL11_MASK 0x0000F000L
+#define MMEA2_ADDRDEC1_COL_SEL_HI_CS23__COL12_MASK 0x000F0000L
+#define MMEA2_ADDRDEC1_COL_SEL_HI_CS23__COL13_MASK 0x00F00000L
+#define MMEA2_ADDRDEC1_COL_SEL_HI_CS23__COL14_MASK 0x0F000000L
+#define MMEA2_ADDRDEC1_COL_SEL_HI_CS23__COL15_MASK 0xF0000000L
+//MMEA2_ADDRDEC1_RM_SEL_CS01
+#define MMEA2_ADDRDEC1_RM_SEL_CS01__RM0__SHIFT 0x0
+#define MMEA2_ADDRDEC1_RM_SEL_CS01__RM1__SHIFT 0x4
+#define MMEA2_ADDRDEC1_RM_SEL_CS01__RM2__SHIFT 0x8
+#define MMEA2_ADDRDEC1_RM_SEL_CS01__CHAN_BIT__SHIFT 0xc
+#define MMEA2_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA2_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA2_ADDRDEC1_RM_SEL_CS01__RM0_MASK 0x0000000FL
+#define MMEA2_ADDRDEC1_RM_SEL_CS01__RM1_MASK 0x000000F0L
+#define MMEA2_ADDRDEC1_RM_SEL_CS01__RM2_MASK 0x00000F00L
+#define MMEA2_ADDRDEC1_RM_SEL_CS01__CHAN_BIT_MASK 0x0000F000L
+#define MMEA2_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA2_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA2_ADDRDEC1_RM_SEL_CS23
+#define MMEA2_ADDRDEC1_RM_SEL_CS23__RM0__SHIFT 0x0
+#define MMEA2_ADDRDEC1_RM_SEL_CS23__RM1__SHIFT 0x4
+#define MMEA2_ADDRDEC1_RM_SEL_CS23__RM2__SHIFT 0x8
+#define MMEA2_ADDRDEC1_RM_SEL_CS23__CHAN_BIT__SHIFT 0xc
+#define MMEA2_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA2_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA2_ADDRDEC1_RM_SEL_CS23__RM0_MASK 0x0000000FL
+#define MMEA2_ADDRDEC1_RM_SEL_CS23__RM1_MASK 0x000000F0L
+#define MMEA2_ADDRDEC1_RM_SEL_CS23__RM2_MASK 0x00000F00L
+#define MMEA2_ADDRDEC1_RM_SEL_CS23__CHAN_BIT_MASK 0x0000F000L
+#define MMEA2_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA2_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA2_ADDRDEC1_RM_SEL_SECCS01
+#define MMEA2_ADDRDEC1_RM_SEL_SECCS01__RM0__SHIFT 0x0
+#define MMEA2_ADDRDEC1_RM_SEL_SECCS01__RM1__SHIFT 0x4
+#define MMEA2_ADDRDEC1_RM_SEL_SECCS01__RM2__SHIFT 0x8
+#define MMEA2_ADDRDEC1_RM_SEL_SECCS01__CHAN_BIT__SHIFT 0xc
+#define MMEA2_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA2_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA2_ADDRDEC1_RM_SEL_SECCS01__RM0_MASK 0x0000000FL
+#define MMEA2_ADDRDEC1_RM_SEL_SECCS01__RM1_MASK 0x000000F0L
+#define MMEA2_ADDRDEC1_RM_SEL_SECCS01__RM2_MASK 0x00000F00L
+#define MMEA2_ADDRDEC1_RM_SEL_SECCS01__CHAN_BIT_MASK 0x0000F000L
+#define MMEA2_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA2_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA2_ADDRDEC1_RM_SEL_SECCS23
+#define MMEA2_ADDRDEC1_RM_SEL_SECCS23__RM0__SHIFT 0x0
+#define MMEA2_ADDRDEC1_RM_SEL_SECCS23__RM1__SHIFT 0x4
+#define MMEA2_ADDRDEC1_RM_SEL_SECCS23__RM2__SHIFT 0x8
+#define MMEA2_ADDRDEC1_RM_SEL_SECCS23__CHAN_BIT__SHIFT 0xc
+#define MMEA2_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA2_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA2_ADDRDEC1_RM_SEL_SECCS23__RM0_MASK 0x0000000FL
+#define MMEA2_ADDRDEC1_RM_SEL_SECCS23__RM1_MASK 0x000000F0L
+#define MMEA2_ADDRDEC1_RM_SEL_SECCS23__RM2_MASK 0x00000F00L
+#define MMEA2_ADDRDEC1_RM_SEL_SECCS23__CHAN_BIT_MASK 0x0000F000L
+#define MMEA2_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA2_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA2_ADDRDEC2_BASE_ADDR_CS0
+#define MMEA2_ADDRDEC2_BASE_ADDR_CS0__CS_EN__SHIFT 0x0
+#define MMEA2_ADDRDEC2_BASE_ADDR_CS0__BASE_ADDR__SHIFT 0x1
+#define MMEA2_ADDRDEC2_BASE_ADDR_CS0__CS_EN_MASK 0x00000001L
+#define MMEA2_ADDRDEC2_BASE_ADDR_CS0__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA2_ADDRDEC2_BASE_ADDR_CS1
+#define MMEA2_ADDRDEC2_BASE_ADDR_CS1__CS_EN__SHIFT 0x0
+#define MMEA2_ADDRDEC2_BASE_ADDR_CS1__BASE_ADDR__SHIFT 0x1
+#define MMEA2_ADDRDEC2_BASE_ADDR_CS1__CS_EN_MASK 0x00000001L
+#define MMEA2_ADDRDEC2_BASE_ADDR_CS1__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA2_ADDRDEC2_BASE_ADDR_CS2
+#define MMEA2_ADDRDEC2_BASE_ADDR_CS2__CS_EN__SHIFT 0x0
+#define MMEA2_ADDRDEC2_BASE_ADDR_CS2__BASE_ADDR__SHIFT 0x1
+#define MMEA2_ADDRDEC2_BASE_ADDR_CS2__CS_EN_MASK 0x00000001L
+#define MMEA2_ADDRDEC2_BASE_ADDR_CS2__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA2_ADDRDEC2_BASE_ADDR_CS3
+#define MMEA2_ADDRDEC2_BASE_ADDR_CS3__CS_EN__SHIFT 0x0
+#define MMEA2_ADDRDEC2_BASE_ADDR_CS3__BASE_ADDR__SHIFT 0x1
+#define MMEA2_ADDRDEC2_BASE_ADDR_CS3__CS_EN_MASK 0x00000001L
+#define MMEA2_ADDRDEC2_BASE_ADDR_CS3__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA2_ADDRDEC2_BASE_ADDR_SECCS0
+#define MMEA2_ADDRDEC2_BASE_ADDR_SECCS0__CS_EN__SHIFT 0x0
+#define MMEA2_ADDRDEC2_BASE_ADDR_SECCS0__BASE_ADDR__SHIFT 0x1
+#define MMEA2_ADDRDEC2_BASE_ADDR_SECCS0__CS_EN_MASK 0x00000001L
+#define MMEA2_ADDRDEC2_BASE_ADDR_SECCS0__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA2_ADDRDEC2_BASE_ADDR_SECCS1
+#define MMEA2_ADDRDEC2_BASE_ADDR_SECCS1__CS_EN__SHIFT 0x0
+#define MMEA2_ADDRDEC2_BASE_ADDR_SECCS1__BASE_ADDR__SHIFT 0x1
+#define MMEA2_ADDRDEC2_BASE_ADDR_SECCS1__CS_EN_MASK 0x00000001L
+#define MMEA2_ADDRDEC2_BASE_ADDR_SECCS1__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA2_ADDRDEC2_BASE_ADDR_SECCS2
+#define MMEA2_ADDRDEC2_BASE_ADDR_SECCS2__CS_EN__SHIFT 0x0
+#define MMEA2_ADDRDEC2_BASE_ADDR_SECCS2__BASE_ADDR__SHIFT 0x1
+#define MMEA2_ADDRDEC2_BASE_ADDR_SECCS2__CS_EN_MASK 0x00000001L
+#define MMEA2_ADDRDEC2_BASE_ADDR_SECCS2__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA2_ADDRDEC2_BASE_ADDR_SECCS3
+#define MMEA2_ADDRDEC2_BASE_ADDR_SECCS3__CS_EN__SHIFT 0x0
+#define MMEA2_ADDRDEC2_BASE_ADDR_SECCS3__BASE_ADDR__SHIFT 0x1
+#define MMEA2_ADDRDEC2_BASE_ADDR_SECCS3__CS_EN_MASK 0x00000001L
+#define MMEA2_ADDRDEC2_BASE_ADDR_SECCS3__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA2_ADDRDEC2_ADDR_MASK_CS01
+#define MMEA2_ADDRDEC2_ADDR_MASK_CS01__ADDR_MASK__SHIFT 0x1
+#define MMEA2_ADDRDEC2_ADDR_MASK_CS01__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA2_ADDRDEC2_ADDR_MASK_CS23
+#define MMEA2_ADDRDEC2_ADDR_MASK_CS23__ADDR_MASK__SHIFT 0x1
+#define MMEA2_ADDRDEC2_ADDR_MASK_CS23__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA2_ADDRDEC2_ADDR_MASK_SECCS01
+#define MMEA2_ADDRDEC2_ADDR_MASK_SECCS01__ADDR_MASK__SHIFT 0x1
+#define MMEA2_ADDRDEC2_ADDR_MASK_SECCS01__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA2_ADDRDEC2_ADDR_MASK_SECCS23
+#define MMEA2_ADDRDEC2_ADDR_MASK_SECCS23__ADDR_MASK__SHIFT 0x1
+#define MMEA2_ADDRDEC2_ADDR_MASK_SECCS23__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA2_ADDRDEC2_ADDR_CFG_CS01
+#define MMEA2_ADDRDEC2_ADDR_CFG_CS01__NUM_BANK_GROUPS__SHIFT 0x1
+#define MMEA2_ADDRDEC2_ADDR_CFG_CS01__NUM_RM__SHIFT 0x4
+#define MMEA2_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_LO__SHIFT 0x8
+#define MMEA2_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_HI__SHIFT 0xc
+#define MMEA2_ADDRDEC2_ADDR_CFG_CS01__NUM_COL__SHIFT 0x10
+#define MMEA2_ADDRDEC2_ADDR_CFG_CS01__NUM_BANKS__SHIFT 0x14
+#define MMEA2_ADDRDEC2_ADDR_CFG_CS01__HI_COL_EN__SHIFT 0x1f
+#define MMEA2_ADDRDEC2_ADDR_CFG_CS01__NUM_BANK_GROUPS_MASK 0x0000000EL
+#define MMEA2_ADDRDEC2_ADDR_CFG_CS01__NUM_RM_MASK 0x00000030L
+#define MMEA2_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_LO_MASK 0x00000F00L
+#define MMEA2_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_HI_MASK 0x0000F000L
+#define MMEA2_ADDRDEC2_ADDR_CFG_CS01__NUM_COL_MASK 0x000F0000L
+#define MMEA2_ADDRDEC2_ADDR_CFG_CS01__NUM_BANKS_MASK 0x00300000L
+#define MMEA2_ADDRDEC2_ADDR_CFG_CS01__HI_COL_EN_MASK 0x80000000L
+//MMEA2_ADDRDEC2_ADDR_CFG_CS23
+#define MMEA2_ADDRDEC2_ADDR_CFG_CS23__NUM_BANK_GROUPS__SHIFT 0x1
+#define MMEA2_ADDRDEC2_ADDR_CFG_CS23__NUM_RM__SHIFT 0x4
+#define MMEA2_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_LO__SHIFT 0x8
+#define MMEA2_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_HI__SHIFT 0xc
+#define MMEA2_ADDRDEC2_ADDR_CFG_CS23__NUM_COL__SHIFT 0x10
+#define MMEA2_ADDRDEC2_ADDR_CFG_CS23__NUM_BANKS__SHIFT 0x14
+#define MMEA2_ADDRDEC2_ADDR_CFG_CS23__HI_COL_EN__SHIFT 0x1f
+#define MMEA2_ADDRDEC2_ADDR_CFG_CS23__NUM_BANK_GROUPS_MASK 0x0000000EL
+#define MMEA2_ADDRDEC2_ADDR_CFG_CS23__NUM_RM_MASK 0x00000030L
+#define MMEA2_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_LO_MASK 0x00000F00L
+#define MMEA2_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_HI_MASK 0x0000F000L
+#define MMEA2_ADDRDEC2_ADDR_CFG_CS23__NUM_COL_MASK 0x000F0000L
+#define MMEA2_ADDRDEC2_ADDR_CFG_CS23__NUM_BANKS_MASK 0x00300000L
+#define MMEA2_ADDRDEC2_ADDR_CFG_CS23__HI_COL_EN_MASK 0x80000000L
+//MMEA2_ADDRDEC2_ADDR_SEL_CS01
+#define MMEA2_ADDRDEC2_ADDR_SEL_CS01__BANK0__SHIFT 0x0
+#define MMEA2_ADDRDEC2_ADDR_SEL_CS01__BANK1__SHIFT 0x4
+#define MMEA2_ADDRDEC2_ADDR_SEL_CS01__BANK2__SHIFT 0x8
+#define MMEA2_ADDRDEC2_ADDR_SEL_CS01__BANK3__SHIFT 0xc
+#define MMEA2_ADDRDEC2_ADDR_SEL_CS01__BANK4__SHIFT 0x10
+#define MMEA2_ADDRDEC2_ADDR_SEL_CS01__ROW_LO__SHIFT 0x18
+#define MMEA2_ADDRDEC2_ADDR_SEL_CS01__ROW_HI__SHIFT 0x1c
+#define MMEA2_ADDRDEC2_ADDR_SEL_CS01__BANK0_MASK 0x0000000FL
+#define MMEA2_ADDRDEC2_ADDR_SEL_CS01__BANK1_MASK 0x000000F0L
+#define MMEA2_ADDRDEC2_ADDR_SEL_CS01__BANK2_MASK 0x00000F00L
+#define MMEA2_ADDRDEC2_ADDR_SEL_CS01__BANK3_MASK 0x0000F000L
+#define MMEA2_ADDRDEC2_ADDR_SEL_CS01__BANK4_MASK 0x001F0000L
+#define MMEA2_ADDRDEC2_ADDR_SEL_CS01__ROW_LO_MASK 0x0F000000L
+#define MMEA2_ADDRDEC2_ADDR_SEL_CS01__ROW_HI_MASK 0xF0000000L
+//MMEA2_ADDRDEC2_ADDR_SEL_CS23
+#define MMEA2_ADDRDEC2_ADDR_SEL_CS23__BANK0__SHIFT 0x0
+#define MMEA2_ADDRDEC2_ADDR_SEL_CS23__BANK1__SHIFT 0x4
+#define MMEA2_ADDRDEC2_ADDR_SEL_CS23__BANK2__SHIFT 0x8
+#define MMEA2_ADDRDEC2_ADDR_SEL_CS23__BANK3__SHIFT 0xc
+#define MMEA2_ADDRDEC2_ADDR_SEL_CS23__BANK4__SHIFT 0x10
+#define MMEA2_ADDRDEC2_ADDR_SEL_CS23__ROW_LO__SHIFT 0x18
+#define MMEA2_ADDRDEC2_ADDR_SEL_CS23__ROW_HI__SHIFT 0x1c
+#define MMEA2_ADDRDEC2_ADDR_SEL_CS23__BANK0_MASK 0x0000000FL
+#define MMEA2_ADDRDEC2_ADDR_SEL_CS23__BANK1_MASK 0x000000F0L
+#define MMEA2_ADDRDEC2_ADDR_SEL_CS23__BANK2_MASK 0x00000F00L
+#define MMEA2_ADDRDEC2_ADDR_SEL_CS23__BANK3_MASK 0x0000F000L
+#define MMEA2_ADDRDEC2_ADDR_SEL_CS23__BANK4_MASK 0x001F0000L
+#define MMEA2_ADDRDEC2_ADDR_SEL_CS23__ROW_LO_MASK 0x0F000000L
+#define MMEA2_ADDRDEC2_ADDR_SEL_CS23__ROW_HI_MASK 0xF0000000L
+//MMEA2_ADDRDEC2_ADDR_SEL2_CS01
+#define MMEA2_ADDRDEC2_ADDR_SEL2_CS01__BANK5__SHIFT 0x0
+#define MMEA2_ADDRDEC2_ADDR_SEL2_CS01__BANK5_MASK 0x0000001FL
+//MMEA2_ADDRDEC2_ADDR_SEL2_CS23
+#define MMEA2_ADDRDEC2_ADDR_SEL2_CS23__BANK5__SHIFT 0x0
+#define MMEA2_ADDRDEC2_ADDR_SEL2_CS23__BANK5_MASK 0x0000001FL
+//MMEA2_ADDRDEC2_COL_SEL_LO_CS01
+#define MMEA2_ADDRDEC2_COL_SEL_LO_CS01__COL0__SHIFT 0x0
+#define MMEA2_ADDRDEC2_COL_SEL_LO_CS01__COL1__SHIFT 0x4
+#define MMEA2_ADDRDEC2_COL_SEL_LO_CS01__COL2__SHIFT 0x8
+#define MMEA2_ADDRDEC2_COL_SEL_LO_CS01__COL3__SHIFT 0xc
+#define MMEA2_ADDRDEC2_COL_SEL_LO_CS01__COL4__SHIFT 0x10
+#define MMEA2_ADDRDEC2_COL_SEL_LO_CS01__COL5__SHIFT 0x14
+#define MMEA2_ADDRDEC2_COL_SEL_LO_CS01__COL6__SHIFT 0x18
+#define MMEA2_ADDRDEC2_COL_SEL_LO_CS01__COL7__SHIFT 0x1c
+#define MMEA2_ADDRDEC2_COL_SEL_LO_CS01__COL0_MASK 0x0000000FL
+#define MMEA2_ADDRDEC2_COL_SEL_LO_CS01__COL1_MASK 0x000000F0L
+#define MMEA2_ADDRDEC2_COL_SEL_LO_CS01__COL2_MASK 0x00000F00L
+#define MMEA2_ADDRDEC2_COL_SEL_LO_CS01__COL3_MASK 0x0000F000L
+#define MMEA2_ADDRDEC2_COL_SEL_LO_CS01__COL4_MASK 0x000F0000L
+#define MMEA2_ADDRDEC2_COL_SEL_LO_CS01__COL5_MASK 0x00F00000L
+#define MMEA2_ADDRDEC2_COL_SEL_LO_CS01__COL6_MASK 0x0F000000L
+#define MMEA2_ADDRDEC2_COL_SEL_LO_CS01__COL7_MASK 0xF0000000L
+//MMEA2_ADDRDEC2_COL_SEL_LO_CS23
+#define MMEA2_ADDRDEC2_COL_SEL_LO_CS23__COL0__SHIFT 0x0
+#define MMEA2_ADDRDEC2_COL_SEL_LO_CS23__COL1__SHIFT 0x4
+#define MMEA2_ADDRDEC2_COL_SEL_LO_CS23__COL2__SHIFT 0x8
+#define MMEA2_ADDRDEC2_COL_SEL_LO_CS23__COL3__SHIFT 0xc
+#define MMEA2_ADDRDEC2_COL_SEL_LO_CS23__COL4__SHIFT 0x10
+#define MMEA2_ADDRDEC2_COL_SEL_LO_CS23__COL5__SHIFT 0x14
+#define MMEA2_ADDRDEC2_COL_SEL_LO_CS23__COL6__SHIFT 0x18
+#define MMEA2_ADDRDEC2_COL_SEL_LO_CS23__COL7__SHIFT 0x1c
+#define MMEA2_ADDRDEC2_COL_SEL_LO_CS23__COL0_MASK 0x0000000FL
+#define MMEA2_ADDRDEC2_COL_SEL_LO_CS23__COL1_MASK 0x000000F0L
+#define MMEA2_ADDRDEC2_COL_SEL_LO_CS23__COL2_MASK 0x00000F00L
+#define MMEA2_ADDRDEC2_COL_SEL_LO_CS23__COL3_MASK 0x0000F000L
+#define MMEA2_ADDRDEC2_COL_SEL_LO_CS23__COL4_MASK 0x000F0000L
+#define MMEA2_ADDRDEC2_COL_SEL_LO_CS23__COL5_MASK 0x00F00000L
+#define MMEA2_ADDRDEC2_COL_SEL_LO_CS23__COL6_MASK 0x0F000000L
+#define MMEA2_ADDRDEC2_COL_SEL_LO_CS23__COL7_MASK 0xF0000000L
+//MMEA2_ADDRDEC2_COL_SEL_HI_CS01
+#define MMEA2_ADDRDEC2_COL_SEL_HI_CS01__COL8__SHIFT 0x0
+#define MMEA2_ADDRDEC2_COL_SEL_HI_CS01__COL9__SHIFT 0x4
+#define MMEA2_ADDRDEC2_COL_SEL_HI_CS01__COL10__SHIFT 0x8
+#define MMEA2_ADDRDEC2_COL_SEL_HI_CS01__COL11__SHIFT 0xc
+#define MMEA2_ADDRDEC2_COL_SEL_HI_CS01__COL12__SHIFT 0x10
+#define MMEA2_ADDRDEC2_COL_SEL_HI_CS01__COL13__SHIFT 0x14
+#define MMEA2_ADDRDEC2_COL_SEL_HI_CS01__COL14__SHIFT 0x18
+#define MMEA2_ADDRDEC2_COL_SEL_HI_CS01__COL15__SHIFT 0x1c
+#define MMEA2_ADDRDEC2_COL_SEL_HI_CS01__COL8_MASK 0x0000000FL
+#define MMEA2_ADDRDEC2_COL_SEL_HI_CS01__COL9_MASK 0x000000F0L
+#define MMEA2_ADDRDEC2_COL_SEL_HI_CS01__COL10_MASK 0x00000F00L
+#define MMEA2_ADDRDEC2_COL_SEL_HI_CS01__COL11_MASK 0x0000F000L
+#define MMEA2_ADDRDEC2_COL_SEL_HI_CS01__COL12_MASK 0x000F0000L
+#define MMEA2_ADDRDEC2_COL_SEL_HI_CS01__COL13_MASK 0x00F00000L
+#define MMEA2_ADDRDEC2_COL_SEL_HI_CS01__COL14_MASK 0x0F000000L
+#define MMEA2_ADDRDEC2_COL_SEL_HI_CS01__COL15_MASK 0xF0000000L
+//MMEA2_ADDRDEC2_COL_SEL_HI_CS23
+#define MMEA2_ADDRDEC2_COL_SEL_HI_CS23__COL8__SHIFT 0x0
+#define MMEA2_ADDRDEC2_COL_SEL_HI_CS23__COL9__SHIFT 0x4
+#define MMEA2_ADDRDEC2_COL_SEL_HI_CS23__COL10__SHIFT 0x8
+#define MMEA2_ADDRDEC2_COL_SEL_HI_CS23__COL11__SHIFT 0xc
+#define MMEA2_ADDRDEC2_COL_SEL_HI_CS23__COL12__SHIFT 0x10
+#define MMEA2_ADDRDEC2_COL_SEL_HI_CS23__COL13__SHIFT 0x14
+#define MMEA2_ADDRDEC2_COL_SEL_HI_CS23__COL14__SHIFT 0x18
+#define MMEA2_ADDRDEC2_COL_SEL_HI_CS23__COL15__SHIFT 0x1c
+#define MMEA2_ADDRDEC2_COL_SEL_HI_CS23__COL8_MASK 0x0000000FL
+#define MMEA2_ADDRDEC2_COL_SEL_HI_CS23__COL9_MASK 0x000000F0L
+#define MMEA2_ADDRDEC2_COL_SEL_HI_CS23__COL10_MASK 0x00000F00L
+#define MMEA2_ADDRDEC2_COL_SEL_HI_CS23__COL11_MASK 0x0000F000L
+#define MMEA2_ADDRDEC2_COL_SEL_HI_CS23__COL12_MASK 0x000F0000L
+#define MMEA2_ADDRDEC2_COL_SEL_HI_CS23__COL13_MASK 0x00F00000L
+#define MMEA2_ADDRDEC2_COL_SEL_HI_CS23__COL14_MASK 0x0F000000L
+#define MMEA2_ADDRDEC2_COL_SEL_HI_CS23__COL15_MASK 0xF0000000L
+//MMEA2_ADDRDEC2_RM_SEL_CS01
+#define MMEA2_ADDRDEC2_RM_SEL_CS01__RM0__SHIFT 0x0
+#define MMEA2_ADDRDEC2_RM_SEL_CS01__RM1__SHIFT 0x4
+#define MMEA2_ADDRDEC2_RM_SEL_CS01__RM2__SHIFT 0x8
+#define MMEA2_ADDRDEC2_RM_SEL_CS01__CHAN_BIT__SHIFT 0xc
+#define MMEA2_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA2_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA2_ADDRDEC2_RM_SEL_CS01__RM0_MASK 0x0000000FL
+#define MMEA2_ADDRDEC2_RM_SEL_CS01__RM1_MASK 0x000000F0L
+#define MMEA2_ADDRDEC2_RM_SEL_CS01__RM2_MASK 0x00000F00L
+#define MMEA2_ADDRDEC2_RM_SEL_CS01__CHAN_BIT_MASK 0x0000F000L
+#define MMEA2_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA2_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA2_ADDRDEC2_RM_SEL_CS23
+#define MMEA2_ADDRDEC2_RM_SEL_CS23__RM0__SHIFT 0x0
+#define MMEA2_ADDRDEC2_RM_SEL_CS23__RM1__SHIFT 0x4
+#define MMEA2_ADDRDEC2_RM_SEL_CS23__RM2__SHIFT 0x8
+#define MMEA2_ADDRDEC2_RM_SEL_CS23__CHAN_BIT__SHIFT 0xc
+#define MMEA2_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA2_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA2_ADDRDEC2_RM_SEL_CS23__RM0_MASK 0x0000000FL
+#define MMEA2_ADDRDEC2_RM_SEL_CS23__RM1_MASK 0x000000F0L
+#define MMEA2_ADDRDEC2_RM_SEL_CS23__RM2_MASK 0x00000F00L
+#define MMEA2_ADDRDEC2_RM_SEL_CS23__CHAN_BIT_MASK 0x0000F000L
+#define MMEA2_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA2_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA2_ADDRDEC2_RM_SEL_SECCS01
+#define MMEA2_ADDRDEC2_RM_SEL_SECCS01__RM0__SHIFT 0x0
+#define MMEA2_ADDRDEC2_RM_SEL_SECCS01__RM1__SHIFT 0x4
+#define MMEA2_ADDRDEC2_RM_SEL_SECCS01__RM2__SHIFT 0x8
+#define MMEA2_ADDRDEC2_RM_SEL_SECCS01__CHAN_BIT__SHIFT 0xc
+#define MMEA2_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA2_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA2_ADDRDEC2_RM_SEL_SECCS01__RM0_MASK 0x0000000FL
+#define MMEA2_ADDRDEC2_RM_SEL_SECCS01__RM1_MASK 0x000000F0L
+#define MMEA2_ADDRDEC2_RM_SEL_SECCS01__RM2_MASK 0x00000F00L
+#define MMEA2_ADDRDEC2_RM_SEL_SECCS01__CHAN_BIT_MASK 0x0000F000L
+#define MMEA2_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA2_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA2_ADDRDEC2_RM_SEL_SECCS23
+#define MMEA2_ADDRDEC2_RM_SEL_SECCS23__RM0__SHIFT 0x0
+#define MMEA2_ADDRDEC2_RM_SEL_SECCS23__RM1__SHIFT 0x4
+#define MMEA2_ADDRDEC2_RM_SEL_SECCS23__RM2__SHIFT 0x8
+#define MMEA2_ADDRDEC2_RM_SEL_SECCS23__CHAN_BIT__SHIFT 0xc
+#define MMEA2_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA2_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA2_ADDRDEC2_RM_SEL_SECCS23__RM0_MASK 0x0000000FL
+#define MMEA2_ADDRDEC2_RM_SEL_SECCS23__RM1_MASK 0x000000F0L
+#define MMEA2_ADDRDEC2_RM_SEL_SECCS23__RM2_MASK 0x00000F00L
+#define MMEA2_ADDRDEC2_RM_SEL_SECCS23__CHAN_BIT_MASK 0x0000F000L
+#define MMEA2_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA2_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA2_ADDRNORMDRAM_GLOBAL_CNTL
+#define MMEA2_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K__SHIFT 0x14
+#define MMEA2_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M__SHIFT 0x15
+#define MMEA2_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G__SHIFT 0x16
+#define MMEA2_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K_MASK 0x00100000L
+#define MMEA2_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M_MASK 0x00200000L
+#define MMEA2_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G_MASK 0x00400000L
+//MMEA2_ADDRNORMGMI_GLOBAL_CNTL
+#define MMEA2_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K__SHIFT 0x14
+#define MMEA2_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M__SHIFT 0x15
+#define MMEA2_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G__SHIFT 0x16
+#define MMEA2_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K_MASK 0x00100000L
+#define MMEA2_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M_MASK 0x00200000L
+#define MMEA2_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G_MASK 0x00400000L
+//MMEA2_IO_RD_CLI2GRP_MAP0
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA2_IO_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA2_IO_RD_CLI2GRP_MAP1
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA2_IO_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA2_IO_WR_CLI2GRP_MAP0
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA2_IO_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA2_IO_WR_CLI2GRP_MAP1
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA2_IO_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA2_IO_RD_COMBINE_FLUSH
+#define MMEA2_IO_RD_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0
+#define MMEA2_IO_RD_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4
+#define MMEA2_IO_RD_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8
+#define MMEA2_IO_RD_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc
+#define MMEA2_IO_RD_COMBINE_FLUSH__FORWARD_COMB_ONLY__SHIFT 0x10
+#define MMEA2_IO_RD_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL
+#define MMEA2_IO_RD_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L
+#define MMEA2_IO_RD_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L
+#define MMEA2_IO_RD_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L
+#define MMEA2_IO_RD_COMBINE_FLUSH__FORWARD_COMB_ONLY_MASK 0x00010000L
+//MMEA2_IO_WR_COMBINE_FLUSH
+#define MMEA2_IO_WR_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0
+#define MMEA2_IO_WR_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4
+#define MMEA2_IO_WR_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8
+#define MMEA2_IO_WR_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc
+#define MMEA2_IO_WR_COMBINE_FLUSH__FORWARD_COMB_ONLY__SHIFT 0x10
+#define MMEA2_IO_WR_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL
+#define MMEA2_IO_WR_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L
+#define MMEA2_IO_WR_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L
+#define MMEA2_IO_WR_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L
+#define MMEA2_IO_WR_COMBINE_FLUSH__FORWARD_COMB_ONLY_MASK 0x00010000L
+//MMEA2_IO_GROUP_BURST
+#define MMEA2_IO_GROUP_BURST__RD_LIMIT_LO__SHIFT 0x0
+#define MMEA2_IO_GROUP_BURST__RD_LIMIT_HI__SHIFT 0x8
+#define MMEA2_IO_GROUP_BURST__WR_LIMIT_LO__SHIFT 0x10
+#define MMEA2_IO_GROUP_BURST__WR_LIMIT_HI__SHIFT 0x18
+#define MMEA2_IO_GROUP_BURST__RD_LIMIT_LO_MASK 0x000000FFL
+#define MMEA2_IO_GROUP_BURST__RD_LIMIT_HI_MASK 0x0000FF00L
+#define MMEA2_IO_GROUP_BURST__WR_LIMIT_LO_MASK 0x00FF0000L
+#define MMEA2_IO_GROUP_BURST__WR_LIMIT_HI_MASK 0xFF000000L
+//MMEA2_IO_RD_PRI_AGE
+#define MMEA2_IO_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA2_IO_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA2_IO_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA2_IO_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA2_IO_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA2_IO_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA2_IO_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA2_IO_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA2_IO_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA2_IO_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA2_IO_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA2_IO_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA2_IO_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA2_IO_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA2_IO_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA2_IO_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA2_IO_WR_PRI_AGE
+#define MMEA2_IO_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA2_IO_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA2_IO_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA2_IO_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA2_IO_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA2_IO_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA2_IO_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA2_IO_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA2_IO_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA2_IO_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA2_IO_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA2_IO_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA2_IO_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA2_IO_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA2_IO_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA2_IO_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA2_IO_RD_PRI_QUEUING
+#define MMEA2_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA2_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA2_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA2_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA2_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA2_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA2_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA2_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA2_IO_WR_PRI_QUEUING
+#define MMEA2_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA2_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA2_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA2_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA2_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA2_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA2_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA2_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA2_IO_RD_PRI_FIXED
+#define MMEA2_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA2_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA2_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA2_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA2_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA2_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA2_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA2_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA2_IO_WR_PRI_FIXED
+#define MMEA2_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA2_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA2_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA2_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA2_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA2_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA2_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA2_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA2_IO_RD_PRI_URGENCY
+#define MMEA2_IO_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA2_IO_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA2_IO_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA2_IO_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA2_IO_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA2_IO_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA2_IO_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA2_IO_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA2_IO_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA2_IO_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA2_IO_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA2_IO_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA2_IO_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA2_IO_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA2_IO_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA2_IO_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA2_IO_WR_PRI_URGENCY
+#define MMEA2_IO_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA2_IO_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA2_IO_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA2_IO_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA2_IO_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA2_IO_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA2_IO_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA2_IO_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA2_IO_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA2_IO_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA2_IO_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA2_IO_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA2_IO_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA2_IO_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA2_IO_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA2_IO_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA2_IO_RD_PRI_URGENCY_MASKING
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L
+#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L
+//MMEA2_IO_WR_PRI_URGENCY_MASKING
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L
+#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L
+//MMEA2_IO_RD_PRI_QUANT_PRI1
+#define MMEA2_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA2_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA2_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA2_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA2_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA2_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA2_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA2_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA2_IO_RD_PRI_QUANT_PRI2
+#define MMEA2_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA2_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA2_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA2_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA2_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA2_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA2_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA2_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA2_IO_RD_PRI_QUANT_PRI3
+#define MMEA2_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA2_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA2_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA2_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA2_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA2_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA2_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA2_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA2_IO_WR_PRI_QUANT_PRI1
+#define MMEA2_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA2_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA2_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA2_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA2_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA2_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA2_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA2_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA2_IO_WR_PRI_QUANT_PRI2
+#define MMEA2_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA2_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA2_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA2_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA2_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA2_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA2_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA2_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA2_IO_WR_PRI_QUANT_PRI3
+#define MMEA2_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA2_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA2_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA2_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA2_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA2_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA2_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA2_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA2_SDP_ARB_DRAM
+#define MMEA2_SDP_ARB_DRAM__RDWR_BURST_LIMIT_CYCL__SHIFT 0x0
+#define MMEA2_SDP_ARB_DRAM__RDWR_BURST_LIMIT_DATA__SHIFT 0x8
+#define MMEA2_SDP_ARB_DRAM__EARLY_SW2RD_ON_PRI__SHIFT 0x10
+#define MMEA2_SDP_ARB_DRAM__EARLY_SW2WR_ON_PRI__SHIFT 0x11
+#define MMEA2_SDP_ARB_DRAM__EARLY_SW2RD_ON_RES__SHIFT 0x12
+#define MMEA2_SDP_ARB_DRAM__EARLY_SW2WR_ON_RES__SHIFT 0x13
+#define MMEA2_SDP_ARB_DRAM__EOB_ON_EXPIRE__SHIFT 0x14
+#define MMEA2_SDP_ARB_DRAM__DECOUPLE_RDWR_BNKSTATE__SHIFT 0x15
+#define MMEA2_SDP_ARB_DRAM__RDWR_BURST_LIMIT_CYCL_MASK 0x0000007FL
+#define MMEA2_SDP_ARB_DRAM__RDWR_BURST_LIMIT_DATA_MASK 0x00007F00L
+#define MMEA2_SDP_ARB_DRAM__EARLY_SW2RD_ON_PRI_MASK 0x00010000L
+#define MMEA2_SDP_ARB_DRAM__EARLY_SW2WR_ON_PRI_MASK 0x00020000L
+#define MMEA2_SDP_ARB_DRAM__EARLY_SW2RD_ON_RES_MASK 0x00040000L
+#define MMEA2_SDP_ARB_DRAM__EARLY_SW2WR_ON_RES_MASK 0x00080000L
+#define MMEA2_SDP_ARB_DRAM__EOB_ON_EXPIRE_MASK 0x00100000L
+#define MMEA2_SDP_ARB_DRAM__DECOUPLE_RDWR_BNKSTATE_MASK 0x00200000L
+//MMEA2_SDP_ARB_GMI
+#define MMEA2_SDP_ARB_GMI__RDWR_BURST_LIMIT_CYCL__SHIFT 0x0
+#define MMEA2_SDP_ARB_GMI__RDWR_BURST_LIMIT_DATA__SHIFT 0x8
+#define MMEA2_SDP_ARB_GMI__EARLY_SW2RD_ON_PRI__SHIFT 0x10
+#define MMEA2_SDP_ARB_GMI__EARLY_SW2WR_ON_PRI__SHIFT 0x11
+#define MMEA2_SDP_ARB_GMI__EARLY_SW2RD_ON_RES__SHIFT 0x12
+#define MMEA2_SDP_ARB_GMI__EARLY_SW2WR_ON_RES__SHIFT 0x13
+#define MMEA2_SDP_ARB_GMI__EOB_ON_EXPIRE__SHIFT 0x14
+#define MMEA2_SDP_ARB_GMI__DECOUPLE_RDWR_BNKSTATE__SHIFT 0x15
+#define MMEA2_SDP_ARB_GMI__ALLOW_CHAIN_BREAKING__SHIFT 0x16
+#define MMEA2_SDP_ARB_GMI__RDWR_BURST_LIMIT_CYCL_MASK 0x0000007FL
+#define MMEA2_SDP_ARB_GMI__RDWR_BURST_LIMIT_DATA_MASK 0x00007F00L
+#define MMEA2_SDP_ARB_GMI__EARLY_SW2RD_ON_PRI_MASK 0x00010000L
+#define MMEA2_SDP_ARB_GMI__EARLY_SW2WR_ON_PRI_MASK 0x00020000L
+#define MMEA2_SDP_ARB_GMI__EARLY_SW2RD_ON_RES_MASK 0x00040000L
+#define MMEA2_SDP_ARB_GMI__EARLY_SW2WR_ON_RES_MASK 0x00080000L
+#define MMEA2_SDP_ARB_GMI__EOB_ON_EXPIRE_MASK 0x00100000L
+#define MMEA2_SDP_ARB_GMI__DECOUPLE_RDWR_BNKSTATE_MASK 0x00200000L
+#define MMEA2_SDP_ARB_GMI__ALLOW_CHAIN_BREAKING_MASK 0x00400000L
+//MMEA2_SDP_ARB_FINAL
+#define MMEA2_SDP_ARB_FINAL__DRAM_BURST_LIMIT__SHIFT 0x0
+#define MMEA2_SDP_ARB_FINAL__GMI_BURST_LIMIT__SHIFT 0x5
+#define MMEA2_SDP_ARB_FINAL__IO_BURST_LIMIT__SHIFT 0xa
+#define MMEA2_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER__SHIFT 0xf
+#define MMEA2_SDP_ARB_FINAL__RDONLY_VC0__SHIFT 0x11
+#define MMEA2_SDP_ARB_FINAL__RDONLY_VC1__SHIFT 0x12
+#define MMEA2_SDP_ARB_FINAL__RDONLY_VC2__SHIFT 0x13
+#define MMEA2_SDP_ARB_FINAL__RDONLY_VC3__SHIFT 0x14
+#define MMEA2_SDP_ARB_FINAL__RDONLY_VC4__SHIFT 0x15
+#define MMEA2_SDP_ARB_FINAL__RDONLY_VC5__SHIFT 0x16
+#define MMEA2_SDP_ARB_FINAL__RDONLY_VC6__SHIFT 0x17
+#define MMEA2_SDP_ARB_FINAL__RDONLY_VC7__SHIFT 0x18
+#define MMEA2_SDP_ARB_FINAL__ERREVENT_ON_ERROR__SHIFT 0x19
+#define MMEA2_SDP_ARB_FINAL__HALTREQ_ON_ERROR__SHIFT 0x1a
+#define MMEA2_SDP_ARB_FINAL__GMI_BURST_STRETCH__SHIFT 0x1b
+#define MMEA2_SDP_ARB_FINAL__DRAM_BURST_LIMIT_MASK 0x0000001FL
+#define MMEA2_SDP_ARB_FINAL__GMI_BURST_LIMIT_MASK 0x000003E0L
+#define MMEA2_SDP_ARB_FINAL__IO_BURST_LIMIT_MASK 0x00007C00L
+#define MMEA2_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER_MASK 0x00018000L
+#define MMEA2_SDP_ARB_FINAL__RDONLY_VC0_MASK 0x00020000L
+#define MMEA2_SDP_ARB_FINAL__RDONLY_VC1_MASK 0x00040000L
+#define MMEA2_SDP_ARB_FINAL__RDONLY_VC2_MASK 0x00080000L
+#define MMEA2_SDP_ARB_FINAL__RDONLY_VC3_MASK 0x00100000L
+#define MMEA2_SDP_ARB_FINAL__RDONLY_VC4_MASK 0x00200000L
+#define MMEA2_SDP_ARB_FINAL__RDONLY_VC5_MASK 0x00400000L
+#define MMEA2_SDP_ARB_FINAL__RDONLY_VC6_MASK 0x00800000L
+#define MMEA2_SDP_ARB_FINAL__RDONLY_VC7_MASK 0x01000000L
+#define MMEA2_SDP_ARB_FINAL__ERREVENT_ON_ERROR_MASK 0x02000000L
+#define MMEA2_SDP_ARB_FINAL__HALTREQ_ON_ERROR_MASK 0x04000000L
+#define MMEA2_SDP_ARB_FINAL__GMI_BURST_STRETCH_MASK 0x08000000L
+//MMEA2_SDP_DRAM_PRIORITY
+#define MMEA2_SDP_DRAM_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0
+#define MMEA2_SDP_DRAM_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4
+#define MMEA2_SDP_DRAM_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8
+#define MMEA2_SDP_DRAM_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc
+#define MMEA2_SDP_DRAM_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10
+#define MMEA2_SDP_DRAM_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14
+#define MMEA2_SDP_DRAM_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18
+#define MMEA2_SDP_DRAM_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c
+#define MMEA2_SDP_DRAM_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL
+#define MMEA2_SDP_DRAM_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L
+#define MMEA2_SDP_DRAM_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L
+#define MMEA2_SDP_DRAM_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L
+#define MMEA2_SDP_DRAM_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L
+#define MMEA2_SDP_DRAM_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L
+#define MMEA2_SDP_DRAM_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L
+#define MMEA2_SDP_DRAM_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L
+//MMEA2_SDP_GMI_PRIORITY
+#define MMEA2_SDP_GMI_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0
+#define MMEA2_SDP_GMI_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4
+#define MMEA2_SDP_GMI_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8
+#define MMEA2_SDP_GMI_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc
+#define MMEA2_SDP_GMI_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10
+#define MMEA2_SDP_GMI_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14
+#define MMEA2_SDP_GMI_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18
+#define MMEA2_SDP_GMI_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c
+#define MMEA2_SDP_GMI_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL
+#define MMEA2_SDP_GMI_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L
+#define MMEA2_SDP_GMI_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L
+#define MMEA2_SDP_GMI_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L
+#define MMEA2_SDP_GMI_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L
+#define MMEA2_SDP_GMI_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L
+#define MMEA2_SDP_GMI_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L
+#define MMEA2_SDP_GMI_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L
+//MMEA2_SDP_IO_PRIORITY
+#define MMEA2_SDP_IO_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0
+#define MMEA2_SDP_IO_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4
+#define MMEA2_SDP_IO_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8
+#define MMEA2_SDP_IO_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc
+#define MMEA2_SDP_IO_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10
+#define MMEA2_SDP_IO_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14
+#define MMEA2_SDP_IO_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18
+#define MMEA2_SDP_IO_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c
+#define MMEA2_SDP_IO_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL
+#define MMEA2_SDP_IO_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L
+#define MMEA2_SDP_IO_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L
+#define MMEA2_SDP_IO_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L
+#define MMEA2_SDP_IO_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L
+#define MMEA2_SDP_IO_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L
+#define MMEA2_SDP_IO_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L
+#define MMEA2_SDP_IO_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L
+//MMEA2_SDP_CREDITS
+#define MMEA2_SDP_CREDITS__TAG_LIMIT__SHIFT 0x0
+#define MMEA2_SDP_CREDITS__WR_RESP_CREDITS__SHIFT 0x8
+#define MMEA2_SDP_CREDITS__RD_RESP_CREDITS__SHIFT 0x10
+#define MMEA2_SDP_CREDITS__TAG_LIMIT_MASK 0x000000FFL
+#define MMEA2_SDP_CREDITS__WR_RESP_CREDITS_MASK 0x00007F00L
+#define MMEA2_SDP_CREDITS__RD_RESP_CREDITS_MASK 0x007F0000L
+//MMEA2_SDP_TAG_RESERVE0
+#define MMEA2_SDP_TAG_RESERVE0__VC0__SHIFT 0x0
+#define MMEA2_SDP_TAG_RESERVE0__VC1__SHIFT 0x8
+#define MMEA2_SDP_TAG_RESERVE0__VC2__SHIFT 0x10
+#define MMEA2_SDP_TAG_RESERVE0__VC3__SHIFT 0x18
+#define MMEA2_SDP_TAG_RESERVE0__VC0_MASK 0x000000FFL
+#define MMEA2_SDP_TAG_RESERVE0__VC1_MASK 0x0000FF00L
+#define MMEA2_SDP_TAG_RESERVE0__VC2_MASK 0x00FF0000L
+#define MMEA2_SDP_TAG_RESERVE0__VC3_MASK 0xFF000000L
+//MMEA2_SDP_TAG_RESERVE1
+#define MMEA2_SDP_TAG_RESERVE1__VC4__SHIFT 0x0
+#define MMEA2_SDP_TAG_RESERVE1__VC5__SHIFT 0x8
+#define MMEA2_SDP_TAG_RESERVE1__VC6__SHIFT 0x10
+#define MMEA2_SDP_TAG_RESERVE1__VC7__SHIFT 0x18
+#define MMEA2_SDP_TAG_RESERVE1__VC4_MASK 0x000000FFL
+#define MMEA2_SDP_TAG_RESERVE1__VC5_MASK 0x0000FF00L
+#define MMEA2_SDP_TAG_RESERVE1__VC6_MASK 0x00FF0000L
+#define MMEA2_SDP_TAG_RESERVE1__VC7_MASK 0xFF000000L
+//MMEA2_SDP_VCC_RESERVE0
+#define MMEA2_SDP_VCC_RESERVE0__VC0_CREDITS__SHIFT 0x0
+#define MMEA2_SDP_VCC_RESERVE0__VC1_CREDITS__SHIFT 0x6
+#define MMEA2_SDP_VCC_RESERVE0__VC2_CREDITS__SHIFT 0xc
+#define MMEA2_SDP_VCC_RESERVE0__VC3_CREDITS__SHIFT 0x12
+#define MMEA2_SDP_VCC_RESERVE0__VC4_CREDITS__SHIFT 0x18
+#define MMEA2_SDP_VCC_RESERVE0__VC0_CREDITS_MASK 0x0000003FL
+#define MMEA2_SDP_VCC_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L
+#define MMEA2_SDP_VCC_RESERVE0__VC2_CREDITS_MASK 0x0003F000L
+#define MMEA2_SDP_VCC_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L
+#define MMEA2_SDP_VCC_RESERVE0__VC4_CREDITS_MASK 0x3F000000L
+//MMEA2_SDP_VCC_RESERVE1
+#define MMEA2_SDP_VCC_RESERVE1__VC5_CREDITS__SHIFT 0x0
+#define MMEA2_SDP_VCC_RESERVE1__VC6_CREDITS__SHIFT 0x6
+#define MMEA2_SDP_VCC_RESERVE1__VC7_CREDITS__SHIFT 0xc
+#define MMEA2_SDP_VCC_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f
+#define MMEA2_SDP_VCC_RESERVE1__VC5_CREDITS_MASK 0x0000003FL
+#define MMEA2_SDP_VCC_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L
+#define MMEA2_SDP_VCC_RESERVE1__VC7_CREDITS_MASK 0x0003F000L
+#define MMEA2_SDP_VCC_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L
+//MMEA2_SDP_VCD_RESERVE0
+#define MMEA2_SDP_VCD_RESERVE0__VC0_CREDITS__SHIFT 0x0
+#define MMEA2_SDP_VCD_RESERVE0__VC1_CREDITS__SHIFT 0x6
+#define MMEA2_SDP_VCD_RESERVE0__VC2_CREDITS__SHIFT 0xc
+#define MMEA2_SDP_VCD_RESERVE0__VC3_CREDITS__SHIFT 0x12
+#define MMEA2_SDP_VCD_RESERVE0__VC4_CREDITS__SHIFT 0x18
+#define MMEA2_SDP_VCD_RESERVE0__VC0_CREDITS_MASK 0x0000003FL
+#define MMEA2_SDP_VCD_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L
+#define MMEA2_SDP_VCD_RESERVE0__VC2_CREDITS_MASK 0x0003F000L
+#define MMEA2_SDP_VCD_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L
+#define MMEA2_SDP_VCD_RESERVE0__VC4_CREDITS_MASK 0x3F000000L
+//MMEA2_SDP_VCD_RESERVE1
+#define MMEA2_SDP_VCD_RESERVE1__VC5_CREDITS__SHIFT 0x0
+#define MMEA2_SDP_VCD_RESERVE1__VC6_CREDITS__SHIFT 0x6
+#define MMEA2_SDP_VCD_RESERVE1__VC7_CREDITS__SHIFT 0xc
+#define MMEA2_SDP_VCD_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f
+#define MMEA2_SDP_VCD_RESERVE1__VC5_CREDITS_MASK 0x0000003FL
+#define MMEA2_SDP_VCD_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L
+#define MMEA2_SDP_VCD_RESERVE1__VC7_CREDITS_MASK 0x0003F000L
+#define MMEA2_SDP_VCD_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L
+//MMEA2_SDP_REQ_CNTL
+#define MMEA2_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ__SHIFT 0x0
+#define MMEA2_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE__SHIFT 0x1
+#define MMEA2_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC__SHIFT 0x2
+#define MMEA2_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM__SHIFT 0x3
+#define MMEA2_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_GMI__SHIFT 0x4
+#define MMEA2_SDP_REQ_CNTL__INNER_DOMAIN_MODE__SHIFT 0x5
+#define MMEA2_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ_MASK 0x00000001L
+#define MMEA2_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE_MASK 0x00000002L
+#define MMEA2_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC_MASK 0x00000004L
+#define MMEA2_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM_MASK 0x00000008L
+#define MMEA2_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_GMI_MASK 0x00000010L
+#define MMEA2_SDP_REQ_CNTL__INNER_DOMAIN_MODE_MASK 0x00000020L
+//MMEA2_MISC
+#define MMEA2_MISC__RELATIVE_PRI_IN_DRAM_RD_ARB__SHIFT 0x0
+#define MMEA2_MISC__RELATIVE_PRI_IN_DRAM_WR_ARB__SHIFT 0x1
+#define MMEA2_MISC__RELATIVE_PRI_IN_GMI_RD_ARB__SHIFT 0x2
+#define MMEA2_MISC__RELATIVE_PRI_IN_GMI_WR_ARB__SHIFT 0x3
+#define MMEA2_MISC__RELATIVE_PRI_IN_IO_RD_ARB__SHIFT 0x4
+#define MMEA2_MISC__RELATIVE_PRI_IN_IO_WR_ARB__SHIFT 0x5
+#define MMEA2_MISC__EARLYWRRET_ENABLE_VC0__SHIFT 0x6
+#define MMEA2_MISC__EARLYWRRET_ENABLE_VC1__SHIFT 0x7
+#define MMEA2_MISC__EARLYWRRET_ENABLE_VC2__SHIFT 0x8
+#define MMEA2_MISC__EARLYWRRET_ENABLE_VC3__SHIFT 0x9
+#define MMEA2_MISC__EARLYWRRET_ENABLE_VC4__SHIFT 0xa
+#define MMEA2_MISC__EARLYWRRET_ENABLE_VC5__SHIFT 0xb
+#define MMEA2_MISC__EARLYWRRET_ENABLE_VC6__SHIFT 0xc
+#define MMEA2_MISC__EARLYWRRET_ENABLE_VC7__SHIFT 0xd
+#define MMEA2_MISC__EARLY_SDP_ORIGDATA__SHIFT 0xe
+#define MMEA2_MISC__LINKMGR_DYNAMIC_MODE__SHIFT 0xf
+#define MMEA2_MISC__LINKMGR_HALT_THRESHOLD__SHIFT 0x11
+#define MMEA2_MISC__LINKMGR_RECONNECT_DELAY__SHIFT 0x13
+#define MMEA2_MISC__LINKMGR_IDLE_THRESHOLD__SHIFT 0x15
+#define MMEA2_MISC__FAVOUR_MIDCHAIN_CS_IN_DRAM_ARB__SHIFT 0x1a
+#define MMEA2_MISC__FAVOUR_MIDCHAIN_CS_IN_GMI_ARB__SHIFT 0x1b
+#define MMEA2_MISC__FAVOUR_LAST_CS_IN_DRAM_ARB__SHIFT 0x1c
+#define MMEA2_MISC__FAVOUR_LAST_CS_IN_GMI_ARB__SHIFT 0x1d
+#define MMEA2_MISC__SWITCH_CS_ON_W2R_IN_DRAM_ARB__SHIFT 0x1e
+#define MMEA2_MISC__SWITCH_CS_ON_W2R_IN_GMI_ARB__SHIFT 0x1f
+#define MMEA2_MISC__RELATIVE_PRI_IN_DRAM_RD_ARB_MASK 0x00000001L
+#define MMEA2_MISC__RELATIVE_PRI_IN_DRAM_WR_ARB_MASK 0x00000002L
+#define MMEA2_MISC__RELATIVE_PRI_IN_GMI_RD_ARB_MASK 0x00000004L
+#define MMEA2_MISC__RELATIVE_PRI_IN_GMI_WR_ARB_MASK 0x00000008L
+#define MMEA2_MISC__RELATIVE_PRI_IN_IO_RD_ARB_MASK 0x00000010L
+#define MMEA2_MISC__RELATIVE_PRI_IN_IO_WR_ARB_MASK 0x00000020L
+#define MMEA2_MISC__EARLYWRRET_ENABLE_VC0_MASK 0x00000040L
+#define MMEA2_MISC__EARLYWRRET_ENABLE_VC1_MASK 0x00000080L
+#define MMEA2_MISC__EARLYWRRET_ENABLE_VC2_MASK 0x00000100L
+#define MMEA2_MISC__EARLYWRRET_ENABLE_VC3_MASK 0x00000200L
+#define MMEA2_MISC__EARLYWRRET_ENABLE_VC4_MASK 0x00000400L
+#define MMEA2_MISC__EARLYWRRET_ENABLE_VC5_MASK 0x00000800L
+#define MMEA2_MISC__EARLYWRRET_ENABLE_VC6_MASK 0x00001000L
+#define MMEA2_MISC__EARLYWRRET_ENABLE_VC7_MASK 0x00002000L
+#define MMEA2_MISC__EARLY_SDP_ORIGDATA_MASK 0x00004000L
+#define MMEA2_MISC__LINKMGR_DYNAMIC_MODE_MASK 0x00018000L
+#define MMEA2_MISC__LINKMGR_HALT_THRESHOLD_MASK 0x00060000L
+#define MMEA2_MISC__LINKMGR_RECONNECT_DELAY_MASK 0x00180000L
+#define MMEA2_MISC__LINKMGR_IDLE_THRESHOLD_MASK 0x03E00000L
+#define MMEA2_MISC__FAVOUR_MIDCHAIN_CS_IN_DRAM_ARB_MASK 0x04000000L
+#define MMEA2_MISC__FAVOUR_MIDCHAIN_CS_IN_GMI_ARB_MASK 0x08000000L
+#define MMEA2_MISC__FAVOUR_LAST_CS_IN_DRAM_ARB_MASK 0x10000000L
+#define MMEA2_MISC__FAVOUR_LAST_CS_IN_GMI_ARB_MASK 0x20000000L
+#define MMEA2_MISC__SWITCH_CS_ON_W2R_IN_DRAM_ARB_MASK 0x40000000L
+#define MMEA2_MISC__SWITCH_CS_ON_W2R_IN_GMI_ARB_MASK 0x80000000L
+//MMEA2_LATENCY_SAMPLING
+#define MMEA2_LATENCY_SAMPLING__SAMPLER0_DRAM__SHIFT 0x0
+#define MMEA2_LATENCY_SAMPLING__SAMPLER1_DRAM__SHIFT 0x1
+#define MMEA2_LATENCY_SAMPLING__SAMPLER0_GMI__SHIFT 0x2
+#define MMEA2_LATENCY_SAMPLING__SAMPLER1_GMI__SHIFT 0x3
+#define MMEA2_LATENCY_SAMPLING__SAMPLER0_IO__SHIFT 0x4
+#define MMEA2_LATENCY_SAMPLING__SAMPLER1_IO__SHIFT 0x5
+#define MMEA2_LATENCY_SAMPLING__SAMPLER0_READ__SHIFT 0x6
+#define MMEA2_LATENCY_SAMPLING__SAMPLER1_READ__SHIFT 0x7
+#define MMEA2_LATENCY_SAMPLING__SAMPLER0_WRITE__SHIFT 0x8
+#define MMEA2_LATENCY_SAMPLING__SAMPLER1_WRITE__SHIFT 0x9
+#define MMEA2_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET__SHIFT 0xa
+#define MMEA2_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET__SHIFT 0xb
+#define MMEA2_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET__SHIFT 0xc
+#define MMEA2_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET__SHIFT 0xd
+#define MMEA2_LATENCY_SAMPLING__SAMPLER0_VC__SHIFT 0xe
+#define MMEA2_LATENCY_SAMPLING__SAMPLER1_VC__SHIFT 0x16
+#define MMEA2_LATENCY_SAMPLING__SAMPLER0_DRAM_MASK 0x00000001L
+#define MMEA2_LATENCY_SAMPLING__SAMPLER1_DRAM_MASK 0x00000002L
+#define MMEA2_LATENCY_SAMPLING__SAMPLER0_GMI_MASK 0x00000004L
+#define MMEA2_LATENCY_SAMPLING__SAMPLER1_GMI_MASK 0x00000008L
+#define MMEA2_LATENCY_SAMPLING__SAMPLER0_IO_MASK 0x00000010L
+#define MMEA2_LATENCY_SAMPLING__SAMPLER1_IO_MASK 0x00000020L
+#define MMEA2_LATENCY_SAMPLING__SAMPLER0_READ_MASK 0x00000040L
+#define MMEA2_LATENCY_SAMPLING__SAMPLER1_READ_MASK 0x00000080L
+#define MMEA2_LATENCY_SAMPLING__SAMPLER0_WRITE_MASK 0x00000100L
+#define MMEA2_LATENCY_SAMPLING__SAMPLER1_WRITE_MASK 0x00000200L
+#define MMEA2_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET_MASK 0x00000400L
+#define MMEA2_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET_MASK 0x00000800L
+#define MMEA2_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET_MASK 0x00001000L
+#define MMEA2_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET_MASK 0x00002000L
+#define MMEA2_LATENCY_SAMPLING__SAMPLER0_VC_MASK 0x003FC000L
+#define MMEA2_LATENCY_SAMPLING__SAMPLER1_VC_MASK 0x3FC00000L
+//MMEA2_PERFCOUNTER_LO
+#define MMEA2_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define MMEA2_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//MMEA2_PERFCOUNTER_HI
+#define MMEA2_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define MMEA2_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define MMEA2_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define MMEA2_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+//MMEA2_PERFCOUNTER0_CFG
+#define MMEA2_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define MMEA2_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define MMEA2_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define MMEA2_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define MMEA2_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define MMEA2_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define MMEA2_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MMEA2_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define MMEA2_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define MMEA2_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//MMEA2_PERFCOUNTER1_CFG
+#define MMEA2_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define MMEA2_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define MMEA2_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define MMEA2_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define MMEA2_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define MMEA2_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define MMEA2_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MMEA2_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define MMEA2_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define MMEA2_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//MMEA2_PERFCOUNTER_RSLT_CNTL
+#define MMEA2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define MMEA2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define MMEA2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define MMEA2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define MMEA2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define MMEA2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define MMEA2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define MMEA2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define MMEA2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define MMEA2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define MMEA2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define MMEA2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+//MMEA2_EDC_CNT
+#define MMEA2_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT__SHIFT 0x0
+#define MMEA2_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT__SHIFT 0x2
+#define MMEA2_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT__SHIFT 0x4
+#define MMEA2_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT__SHIFT 0x6
+#define MMEA2_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT__SHIFT 0x8
+#define MMEA2_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT__SHIFT 0xa
+#define MMEA2_EDC_CNT__RRET_TAGMEM_SEC_COUNT__SHIFT 0xc
+#define MMEA2_EDC_CNT__RRET_TAGMEM_DED_COUNT__SHIFT 0xe
+#define MMEA2_EDC_CNT__WRET_TAGMEM_SEC_COUNT__SHIFT 0x10
+#define MMEA2_EDC_CNT__WRET_TAGMEM_DED_COUNT__SHIFT 0x12
+#define MMEA2_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT__SHIFT 0x14
+#define MMEA2_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT__SHIFT 0x16
+#define MMEA2_EDC_CNT__IORD_CMDMEM_SED_COUNT__SHIFT 0x18
+#define MMEA2_EDC_CNT__IOWR_CMDMEM_SED_COUNT__SHIFT 0x1a
+#define MMEA2_EDC_CNT__IOWR_DATAMEM_SED_COUNT__SHIFT 0x1c
+#define MMEA2_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT_MASK 0x00000003L
+#define MMEA2_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT_MASK 0x0000000CL
+#define MMEA2_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT_MASK 0x00000030L
+#define MMEA2_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
+#define MMEA2_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT_MASK 0x00000300L
+#define MMEA2_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT_MASK 0x00000C00L
+#define MMEA2_EDC_CNT__RRET_TAGMEM_SEC_COUNT_MASK 0x00003000L
+#define MMEA2_EDC_CNT__RRET_TAGMEM_DED_COUNT_MASK 0x0000C000L
+#define MMEA2_EDC_CNT__WRET_TAGMEM_SEC_COUNT_MASK 0x00030000L
+#define MMEA2_EDC_CNT__WRET_TAGMEM_DED_COUNT_MASK 0x000C0000L
+#define MMEA2_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT_MASK 0x00300000L
+#define MMEA2_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT_MASK 0x00C00000L
+#define MMEA2_EDC_CNT__IORD_CMDMEM_SED_COUNT_MASK 0x03000000L
+#define MMEA2_EDC_CNT__IOWR_CMDMEM_SED_COUNT_MASK 0x0C000000L
+#define MMEA2_EDC_CNT__IOWR_DATAMEM_SED_COUNT_MASK 0x30000000L
+//MMEA2_EDC_CNT2
+#define MMEA2_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT__SHIFT 0x0
+#define MMEA2_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT__SHIFT 0x2
+#define MMEA2_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT__SHIFT 0x4
+#define MMEA2_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT__SHIFT 0x6
+#define MMEA2_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT__SHIFT 0x8
+#define MMEA2_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa
+#define MMEA2_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc
+#define MMEA2_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe
+#define MMEA2_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10
+#define MMEA2_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12
+#define MMEA2_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14
+#define MMEA2_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16
+#define MMEA2_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT 0x18
+#define MMEA2_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT 0x1a
+#define MMEA2_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT 0x1c
+#define MMEA2_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT 0x1e
+#define MMEA2_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L
+#define MMEA2_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL
+#define MMEA2_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L
+#define MMEA2_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
+#define MMEA2_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT_MASK 0x00000300L
+#define MMEA2_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L
+#define MMEA2_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L
+#define MMEA2_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L
+#define MMEA2_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L
+#define MMEA2_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L
+#define MMEA2_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L
+#define MMEA2_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L
+#define MMEA2_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK 0x03000000L
+#define MMEA2_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK 0x0C000000L
+#define MMEA2_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK 0x30000000L
+#define MMEA2_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK 0xC0000000L
+//MMEA2_DSM_CNTL
+#define MMEA2_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define MMEA2_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define MMEA2_DSM_CNTL__DRAMWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x3
+#define MMEA2_DSM_CNTL__DRAMWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define MMEA2_DSM_CNTL__DRAMWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0x6
+#define MMEA2_DSM_CNTL__DRAMWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define MMEA2_DSM_CNTL__RRET_TAGMEM_DSM_IRRITATOR_DATA__SHIFT 0x9
+#define MMEA2_DSM_CNTL__RRET_TAGMEM_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define MMEA2_DSM_CNTL__WRET_TAGMEM_DSM_IRRITATOR_DATA__SHIFT 0xc
+#define MMEA2_DSM_CNTL__WRET_TAGMEM_ENABLE_SINGLE_WRITE__SHIFT 0xe
+#define MMEA2_DSM_CNTL__GMIRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0xf
+#define MMEA2_DSM_CNTL__GMIRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x11
+#define MMEA2_DSM_CNTL__GMIWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x12
+#define MMEA2_DSM_CNTL__GMIWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x14
+#define MMEA2_DSM_CNTL__GMIWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0x15
+#define MMEA2_DSM_CNTL__GMIWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0x17
+#define MMEA2_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define MMEA2_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define MMEA2_DSM_CNTL__DRAMWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000018L
+#define MMEA2_DSM_CNTL__DRAMWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define MMEA2_DSM_CNTL__DRAMWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define MMEA2_DSM_CNTL__DRAMWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define MMEA2_DSM_CNTL__RRET_TAGMEM_DSM_IRRITATOR_DATA_MASK 0x00000600L
+#define MMEA2_DSM_CNTL__RRET_TAGMEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+#define MMEA2_DSM_CNTL__WRET_TAGMEM_DSM_IRRITATOR_DATA_MASK 0x00003000L
+#define MMEA2_DSM_CNTL__WRET_TAGMEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
+#define MMEA2_DSM_CNTL__GMIRD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00018000L
+#define MMEA2_DSM_CNTL__GMIRD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L
+#define MMEA2_DSM_CNTL__GMIWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L
+#define MMEA2_DSM_CNTL__GMIWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L
+#define MMEA2_DSM_CNTL__GMIWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x00600000L
+#define MMEA2_DSM_CNTL__GMIWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00800000L
+//MMEA2_DSM_CNTLA
+#define MMEA2_DSM_CNTLA__DRAMRD_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define MMEA2_DSM_CNTLA__DRAMRD_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define MMEA2_DSM_CNTLA__DRAMWR_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x3
+#define MMEA2_DSM_CNTLA__DRAMWR_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define MMEA2_DSM_CNTLA__IORD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x6
+#define MMEA2_DSM_CNTLA__IORD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define MMEA2_DSM_CNTLA__IOWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x9
+#define MMEA2_DSM_CNTLA__IOWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define MMEA2_DSM_CNTLA__IOWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0xc
+#define MMEA2_DSM_CNTLA__IOWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0xe
+#define MMEA2_DSM_CNTLA__GMIRD_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0xf
+#define MMEA2_DSM_CNTLA__GMIRD_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x11
+#define MMEA2_DSM_CNTLA__GMIWR_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x12
+#define MMEA2_DSM_CNTLA__GMIWR_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x14
+#define MMEA2_DSM_CNTLA__DRAMRD_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define MMEA2_DSM_CNTLA__DRAMRD_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define MMEA2_DSM_CNTLA__DRAMWR_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00000018L
+#define MMEA2_DSM_CNTLA__DRAMWR_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define MMEA2_DSM_CNTLA__IORD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define MMEA2_DSM_CNTLA__IORD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define MMEA2_DSM_CNTLA__IOWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000600L
+#define MMEA2_DSM_CNTLA__IOWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+#define MMEA2_DSM_CNTLA__IOWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x00003000L
+#define MMEA2_DSM_CNTLA__IOWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
+#define MMEA2_DSM_CNTLA__GMIRD_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00018000L
+#define MMEA2_DSM_CNTLA__GMIRD_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L
+#define MMEA2_DSM_CNTLA__GMIWR_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L
+#define MMEA2_DSM_CNTLA__GMIWR_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L
+//MMEA2_DSM_CNTL2
+#define MMEA2_DSM_CNTL2__DRAMRD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define MMEA2_DSM_CNTL2__DRAMRD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define MMEA2_DSM_CNTL2__DRAMWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define MMEA2_DSM_CNTL2__DRAMWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x5
+#define MMEA2_DSM_CNTL2__DRAMWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define MMEA2_DSM_CNTL2__DRAMWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0x8
+#define MMEA2_DSM_CNTL2__RRET_TAGMEM_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define MMEA2_DSM_CNTL2__RRET_TAGMEM_SELECT_INJECT_DELAY__SHIFT 0xb
+#define MMEA2_DSM_CNTL2__WRET_TAGMEM_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define MMEA2_DSM_CNTL2__WRET_TAGMEM_SELECT_INJECT_DELAY__SHIFT 0xe
+#define MMEA2_DSM_CNTL2__GMIRD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0xf
+#define MMEA2_DSM_CNTL2__GMIRD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x11
+#define MMEA2_DSM_CNTL2__GMIWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x12
+#define MMEA2_DSM_CNTL2__GMIWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x14
+#define MMEA2_DSM_CNTL2__GMIWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0x15
+#define MMEA2_DSM_CNTL2__GMIWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0x17
+#define MMEA2_DSM_CNTL2__INJECT_DELAY__SHIFT 0x1a
+#define MMEA2_DSM_CNTL2__DRAMRD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define MMEA2_DSM_CNTL2__DRAMRD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define MMEA2_DSM_CNTL2__DRAMWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define MMEA2_DSM_CNTL2__DRAMWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define MMEA2_DSM_CNTL2__DRAMWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define MMEA2_DSM_CNTL2__DRAMWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define MMEA2_DSM_CNTL2__RRET_TAGMEM_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define MMEA2_DSM_CNTL2__RRET_TAGMEM_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define MMEA2_DSM_CNTL2__WRET_TAGMEM_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define MMEA2_DSM_CNTL2__WRET_TAGMEM_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define MMEA2_DSM_CNTL2__GMIRD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00018000L
+#define MMEA2_DSM_CNTL2__GMIRD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00020000L
+#define MMEA2_DSM_CNTL2__GMIWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L
+#define MMEA2_DSM_CNTL2__GMIWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00100000L
+#define MMEA2_DSM_CNTL2__GMIWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x00600000L
+#define MMEA2_DSM_CNTL2__GMIWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00800000L
+#define MMEA2_DSM_CNTL2__INJECT_DELAY_MASK 0xFC000000L
+//MMEA2_DSM_CNTL2A
+#define MMEA2_DSM_CNTL2A__DRAMRD_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define MMEA2_DSM_CNTL2A__DRAMRD_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define MMEA2_DSM_CNTL2A__DRAMWR_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define MMEA2_DSM_CNTL2A__DRAMWR_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x5
+#define MMEA2_DSM_CNTL2A__IORD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define MMEA2_DSM_CNTL2A__IORD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x8
+#define MMEA2_DSM_CNTL2A__IOWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define MMEA2_DSM_CNTL2A__IOWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0xb
+#define MMEA2_DSM_CNTL2A__IOWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define MMEA2_DSM_CNTL2A__IOWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0xe
+#define MMEA2_DSM_CNTL2A__GMIRD_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0xf
+#define MMEA2_DSM_CNTL2A__GMIRD_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x11
+#define MMEA2_DSM_CNTL2A__GMIWR_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x12
+#define MMEA2_DSM_CNTL2A__GMIWR_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x14
+#define MMEA2_DSM_CNTL2A__DRAMRD_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define MMEA2_DSM_CNTL2A__DRAMRD_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define MMEA2_DSM_CNTL2A__DRAMWR_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define MMEA2_DSM_CNTL2A__DRAMWR_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define MMEA2_DSM_CNTL2A__IORD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define MMEA2_DSM_CNTL2A__IORD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define MMEA2_DSM_CNTL2A__IOWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define MMEA2_DSM_CNTL2A__IOWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define MMEA2_DSM_CNTL2A__IOWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define MMEA2_DSM_CNTL2A__IOWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define MMEA2_DSM_CNTL2A__GMIRD_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00018000L
+#define MMEA2_DSM_CNTL2A__GMIRD_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00020000L
+#define MMEA2_DSM_CNTL2A__GMIWR_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L
+#define MMEA2_DSM_CNTL2A__GMIWR_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00100000L
+//MMEA2_CGTT_CLK_CTRL
+#define MMEA2_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define MMEA2_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define MMEA2_CGTT_CLK_CTRL__SPARE0__SHIFT 0xc
+#define MMEA2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_WRITE__SHIFT 0x14
+#define MMEA2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_READ__SHIFT 0x15
+#define MMEA2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_RETURN__SHIFT 0x16
+#define MMEA2_CGTT_CLK_CTRL__SPARE1__SHIFT 0x17
+#define MMEA2_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define MMEA2_CGTT_CLK_CTRL__SOFT_OVERRIDE_WRITE__SHIFT 0x1c
+#define MMEA2_CGTT_CLK_CTRL__SOFT_OVERRIDE_READ__SHIFT 0x1d
+#define MMEA2_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN__SHIFT 0x1e
+#define MMEA2_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER__SHIFT 0x1f
+#define MMEA2_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define MMEA2_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define MMEA2_CGTT_CLK_CTRL__SPARE0_MASK 0x000FF000L
+#define MMEA2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_WRITE_MASK 0x00100000L
+#define MMEA2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_READ_MASK 0x00200000L
+#define MMEA2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_RETURN_MASK 0x00400000L
+#define MMEA2_CGTT_CLK_CTRL__SPARE1_MASK 0x07800000L
+#define MMEA2_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define MMEA2_CGTT_CLK_CTRL__SOFT_OVERRIDE_WRITE_MASK 0x10000000L
+#define MMEA2_CGTT_CLK_CTRL__SOFT_OVERRIDE_READ_MASK 0x20000000L
+#define MMEA2_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN_MASK 0x40000000L
+#define MMEA2_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER_MASK 0x80000000L
+//MMEA2_EDC_MODE
+#define MMEA2_EDC_MODE__COUNT_FED_OUT__SHIFT 0x10
+#define MMEA2_EDC_MODE__GATE_FUE__SHIFT 0x11
+#define MMEA2_EDC_MODE__DED_MODE__SHIFT 0x14
+#define MMEA2_EDC_MODE__PROP_FED__SHIFT 0x1d
+#define MMEA2_EDC_MODE__BYPASS__SHIFT 0x1f
+#define MMEA2_EDC_MODE__COUNT_FED_OUT_MASK 0x00010000L
+#define MMEA2_EDC_MODE__GATE_FUE_MASK 0x00020000L
+#define MMEA2_EDC_MODE__DED_MODE_MASK 0x00300000L
+#define MMEA2_EDC_MODE__PROP_FED_MASK 0x20000000L
+#define MMEA2_EDC_MODE__BYPASS_MASK 0x80000000L
+//MMEA2_ERR_STATUS
+#define MMEA2_ERR_STATUS__SDP_RDRSP_STATUS__SHIFT 0x0
+#define MMEA2_ERR_STATUS__SDP_WRRSP_STATUS__SHIFT 0x4
+#define MMEA2_ERR_STATUS__SDP_RDRSP_DATASTATUS__SHIFT 0x8
+#define MMEA2_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR__SHIFT 0xa
+#define MMEA2_ERR_STATUS__CLEAR_ERROR_STATUS__SHIFT 0xb
+#define MMEA2_ERR_STATUS__BUSY_ON_ERROR__SHIFT 0xc
+#define MMEA2_ERR_STATUS__FUE_FLAG__SHIFT 0xd
+#define MMEA2_ERR_STATUS__SDP_RDRSP_STATUS_MASK 0x0000000FL
+#define MMEA2_ERR_STATUS__SDP_WRRSP_STATUS_MASK 0x000000F0L
+#define MMEA2_ERR_STATUS__SDP_RDRSP_DATASTATUS_MASK 0x00000300L
+#define MMEA2_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR_MASK 0x00000400L
+#define MMEA2_ERR_STATUS__CLEAR_ERROR_STATUS_MASK 0x00000800L
+#define MMEA2_ERR_STATUS__BUSY_ON_ERROR_MASK 0x00001000L
+#define MMEA2_ERR_STATUS__FUE_FLAG_MASK 0x00002000L
+//MMEA2_MISC2
+#define MMEA2_MISC2__CSGROUP_SWAP_IN_DRAM_ARB__SHIFT 0x0
+#define MMEA2_MISC2__CSGROUP_SWAP_IN_GMI_ARB__SHIFT 0x1
+#define MMEA2_MISC2__CSGRP_BURST_LIMIT_DATA_DRAM__SHIFT 0x2
+#define MMEA2_MISC2__CSGRP_BURST_LIMIT_DATA_GMI__SHIFT 0x7
+#define MMEA2_MISC2__IO_RDWR_PRIORITY_ENABLE__SHIFT 0xc
+#define MMEA2_MISC2__RRET_SWAP_MODE__SHIFT 0xd
+#define MMEA2_MISC2__CSGROUP_SWAP_IN_DRAM_ARB_MASK 0x00000001L
+#define MMEA2_MISC2__CSGROUP_SWAP_IN_GMI_ARB_MASK 0x00000002L
+#define MMEA2_MISC2__CSGRP_BURST_LIMIT_DATA_DRAM_MASK 0x0000007CL
+#define MMEA2_MISC2__CSGRP_BURST_LIMIT_DATA_GMI_MASK 0x00000F80L
+#define MMEA2_MISC2__IO_RDWR_PRIORITY_ENABLE_MASK 0x00001000L
+#define MMEA2_MISC2__RRET_SWAP_MODE_MASK 0x00002000L
+//MMEA2_ADDRDEC_SELECT
+#define MMEA2_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_START__SHIFT 0x0
+#define MMEA2_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_END__SHIFT 0x5
+#define MMEA2_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_START__SHIFT 0xa
+#define MMEA2_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_END__SHIFT 0xf
+#define MMEA2_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_START_MASK 0x0000001FL
+#define MMEA2_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_END_MASK 0x000003E0L
+#define MMEA2_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_START_MASK 0x00007C00L
+#define MMEA2_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_END_MASK 0x000F8000L
+//MMEA2_EDC_CNT3
+#define MMEA2_EDC_CNT3__DRAMRD_PAGEMEM_DED_COUNT__SHIFT 0x0
+#define MMEA2_EDC_CNT3__DRAMWR_PAGEMEM_DED_COUNT__SHIFT 0x2
+#define MMEA2_EDC_CNT3__IORD_CMDMEM_DED_COUNT__SHIFT 0x4
+#define MMEA2_EDC_CNT3__IOWR_CMDMEM_DED_COUNT__SHIFT 0x6
+#define MMEA2_EDC_CNT3__IOWR_DATAMEM_DED_COUNT__SHIFT 0x8
+#define MMEA2_EDC_CNT3__GMIRD_PAGEMEM_DED_COUNT__SHIFT 0xa
+#define MMEA2_EDC_CNT3__GMIWR_PAGEMEM_DED_COUNT__SHIFT 0xc
+#define MMEA2_EDC_CNT3__DRAMRD_PAGEMEM_DED_COUNT_MASK 0x00000003L
+#define MMEA2_EDC_CNT3__DRAMWR_PAGEMEM_DED_COUNT_MASK 0x0000000CL
+#define MMEA2_EDC_CNT3__IORD_CMDMEM_DED_COUNT_MASK 0x00000030L
+#define MMEA2_EDC_CNT3__IOWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
+#define MMEA2_EDC_CNT3__IOWR_DATAMEM_DED_COUNT_MASK 0x00000300L
+#define MMEA2_EDC_CNT3__GMIRD_PAGEMEM_DED_COUNT_MASK 0x00000C00L
+#define MMEA2_EDC_CNT3__GMIWR_PAGEMEM_DED_COUNT_MASK 0x00003000L
+
+
+// addressBlock: mmhub_ea_mmeadec3
+//MMEA3_DRAM_RD_CLI2GRP_MAP0
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA3_DRAM_RD_CLI2GRP_MAP1
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA3_DRAM_WR_CLI2GRP_MAP0
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA3_DRAM_WR_CLI2GRP_MAP1
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA3_DRAM_RD_GRP2VC_MAP
+#define MMEA3_DRAM_RD_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define MMEA3_DRAM_RD_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define MMEA3_DRAM_RD_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define MMEA3_DRAM_RD_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define MMEA3_DRAM_RD_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define MMEA3_DRAM_RD_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define MMEA3_DRAM_RD_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define MMEA3_DRAM_RD_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//MMEA3_DRAM_WR_GRP2VC_MAP
+#define MMEA3_DRAM_WR_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define MMEA3_DRAM_WR_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define MMEA3_DRAM_WR_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define MMEA3_DRAM_WR_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define MMEA3_DRAM_WR_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define MMEA3_DRAM_WR_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define MMEA3_DRAM_WR_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define MMEA3_DRAM_WR_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//MMEA3_DRAM_RD_LAZY
+#define MMEA3_DRAM_RD_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define MMEA3_DRAM_RD_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define MMEA3_DRAM_RD_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define MMEA3_DRAM_RD_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define MMEA3_DRAM_RD_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define MMEA3_DRAM_RD_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define MMEA3_DRAM_RD_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b
+#define MMEA3_DRAM_RD_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define MMEA3_DRAM_RD_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define MMEA3_DRAM_RD_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define MMEA3_DRAM_RD_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define MMEA3_DRAM_RD_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define MMEA3_DRAM_RD_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+#define MMEA3_DRAM_RD_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L
+//MMEA3_DRAM_WR_LAZY
+#define MMEA3_DRAM_WR_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define MMEA3_DRAM_WR_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define MMEA3_DRAM_WR_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define MMEA3_DRAM_WR_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define MMEA3_DRAM_WR_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define MMEA3_DRAM_WR_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define MMEA3_DRAM_WR_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b
+#define MMEA3_DRAM_WR_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define MMEA3_DRAM_WR_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define MMEA3_DRAM_WR_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define MMEA3_DRAM_WR_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define MMEA3_DRAM_WR_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define MMEA3_DRAM_WR_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+#define MMEA3_DRAM_WR_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L
+//MMEA3_DRAM_RD_CAM_CNTL
+#define MMEA3_DRAM_RD_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define MMEA3_DRAM_RD_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define MMEA3_DRAM_RD_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define MMEA3_DRAM_RD_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define MMEA3_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define MMEA3_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define MMEA3_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define MMEA3_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define MMEA3_DRAM_RD_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c
+#define MMEA3_DRAM_RD_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define MMEA3_DRAM_RD_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define MMEA3_DRAM_RD_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define MMEA3_DRAM_RD_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define MMEA3_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define MMEA3_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define MMEA3_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define MMEA3_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+#define MMEA3_DRAM_RD_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L
+//MMEA3_DRAM_WR_CAM_CNTL
+#define MMEA3_DRAM_WR_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define MMEA3_DRAM_WR_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define MMEA3_DRAM_WR_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define MMEA3_DRAM_WR_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define MMEA3_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define MMEA3_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define MMEA3_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define MMEA3_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define MMEA3_DRAM_WR_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c
+#define MMEA3_DRAM_WR_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define MMEA3_DRAM_WR_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define MMEA3_DRAM_WR_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define MMEA3_DRAM_WR_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define MMEA3_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define MMEA3_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define MMEA3_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define MMEA3_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+#define MMEA3_DRAM_WR_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L
+//MMEA3_DRAM_PAGE_BURST
+#define MMEA3_DRAM_PAGE_BURST__RD_LIMIT_LO__SHIFT 0x0
+#define MMEA3_DRAM_PAGE_BURST__RD_LIMIT_HI__SHIFT 0x8
+#define MMEA3_DRAM_PAGE_BURST__WR_LIMIT_LO__SHIFT 0x10
+#define MMEA3_DRAM_PAGE_BURST__WR_LIMIT_HI__SHIFT 0x18
+#define MMEA3_DRAM_PAGE_BURST__RD_LIMIT_LO_MASK 0x000000FFL
+#define MMEA3_DRAM_PAGE_BURST__RD_LIMIT_HI_MASK 0x0000FF00L
+#define MMEA3_DRAM_PAGE_BURST__WR_LIMIT_LO_MASK 0x00FF0000L
+#define MMEA3_DRAM_PAGE_BURST__WR_LIMIT_HI_MASK 0xFF000000L
+//MMEA3_DRAM_RD_PRI_AGE
+#define MMEA3_DRAM_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA3_DRAM_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA3_DRAM_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA3_DRAM_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA3_DRAM_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA3_DRAM_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA3_DRAM_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA3_DRAM_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA3_DRAM_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA3_DRAM_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA3_DRAM_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA3_DRAM_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA3_DRAM_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA3_DRAM_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA3_DRAM_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA3_DRAM_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA3_DRAM_WR_PRI_AGE
+#define MMEA3_DRAM_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA3_DRAM_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA3_DRAM_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA3_DRAM_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA3_DRAM_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA3_DRAM_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA3_DRAM_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA3_DRAM_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA3_DRAM_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA3_DRAM_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA3_DRAM_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA3_DRAM_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA3_DRAM_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA3_DRAM_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA3_DRAM_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA3_DRAM_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA3_DRAM_RD_PRI_QUEUING
+#define MMEA3_DRAM_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA3_DRAM_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA3_DRAM_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA3_DRAM_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA3_DRAM_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA3_DRAM_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA3_DRAM_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA3_DRAM_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA3_DRAM_WR_PRI_QUEUING
+#define MMEA3_DRAM_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA3_DRAM_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA3_DRAM_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA3_DRAM_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA3_DRAM_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA3_DRAM_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA3_DRAM_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA3_DRAM_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA3_DRAM_RD_PRI_FIXED
+#define MMEA3_DRAM_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA3_DRAM_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA3_DRAM_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA3_DRAM_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA3_DRAM_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA3_DRAM_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA3_DRAM_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA3_DRAM_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA3_DRAM_WR_PRI_FIXED
+#define MMEA3_DRAM_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA3_DRAM_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA3_DRAM_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA3_DRAM_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA3_DRAM_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA3_DRAM_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA3_DRAM_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA3_DRAM_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA3_DRAM_RD_PRI_URGENCY
+#define MMEA3_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA3_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA3_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA3_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA3_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA3_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA3_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA3_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA3_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA3_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA3_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA3_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA3_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA3_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA3_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA3_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA3_DRAM_WR_PRI_URGENCY
+#define MMEA3_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA3_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA3_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA3_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA3_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA3_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA3_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA3_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA3_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA3_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA3_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA3_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA3_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA3_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA3_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA3_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA3_DRAM_RD_PRI_QUANT_PRI1
+#define MMEA3_DRAM_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA3_DRAM_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA3_DRAM_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA3_DRAM_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA3_DRAM_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA3_DRAM_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA3_DRAM_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA3_DRAM_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA3_DRAM_RD_PRI_QUANT_PRI2
+#define MMEA3_DRAM_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA3_DRAM_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA3_DRAM_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA3_DRAM_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA3_DRAM_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA3_DRAM_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA3_DRAM_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA3_DRAM_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA3_DRAM_RD_PRI_QUANT_PRI3
+#define MMEA3_DRAM_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA3_DRAM_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA3_DRAM_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA3_DRAM_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA3_DRAM_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA3_DRAM_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA3_DRAM_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA3_DRAM_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA3_DRAM_WR_PRI_QUANT_PRI1
+#define MMEA3_DRAM_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA3_DRAM_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA3_DRAM_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA3_DRAM_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA3_DRAM_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA3_DRAM_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA3_DRAM_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA3_DRAM_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA3_DRAM_WR_PRI_QUANT_PRI2
+#define MMEA3_DRAM_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA3_DRAM_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA3_DRAM_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA3_DRAM_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA3_DRAM_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA3_DRAM_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA3_DRAM_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA3_DRAM_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA3_DRAM_WR_PRI_QUANT_PRI3
+#define MMEA3_DRAM_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA3_DRAM_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA3_DRAM_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA3_DRAM_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA3_DRAM_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA3_DRAM_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA3_DRAM_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA3_DRAM_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA3_GMI_RD_CLI2GRP_MAP0
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA3_GMI_RD_CLI2GRP_MAP1
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA3_GMI_WR_CLI2GRP_MAP0
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA3_GMI_WR_CLI2GRP_MAP1
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA3_GMI_RD_GRP2VC_MAP
+#define MMEA3_GMI_RD_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define MMEA3_GMI_RD_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define MMEA3_GMI_RD_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define MMEA3_GMI_RD_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define MMEA3_GMI_RD_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define MMEA3_GMI_RD_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define MMEA3_GMI_RD_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define MMEA3_GMI_RD_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//MMEA3_GMI_WR_GRP2VC_MAP
+#define MMEA3_GMI_WR_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define MMEA3_GMI_WR_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define MMEA3_GMI_WR_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define MMEA3_GMI_WR_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define MMEA3_GMI_WR_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define MMEA3_GMI_WR_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define MMEA3_GMI_WR_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define MMEA3_GMI_WR_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//MMEA3_GMI_RD_LAZY
+#define MMEA3_GMI_RD_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define MMEA3_GMI_RD_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define MMEA3_GMI_RD_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define MMEA3_GMI_RD_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define MMEA3_GMI_RD_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define MMEA3_GMI_RD_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define MMEA3_GMI_RD_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b
+#define MMEA3_GMI_RD_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define MMEA3_GMI_RD_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define MMEA3_GMI_RD_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define MMEA3_GMI_RD_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define MMEA3_GMI_RD_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define MMEA3_GMI_RD_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+#define MMEA3_GMI_RD_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L
+//MMEA3_GMI_WR_LAZY
+#define MMEA3_GMI_WR_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define MMEA3_GMI_WR_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define MMEA3_GMI_WR_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define MMEA3_GMI_WR_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define MMEA3_GMI_WR_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define MMEA3_GMI_WR_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define MMEA3_GMI_WR_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b
+#define MMEA3_GMI_WR_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define MMEA3_GMI_WR_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define MMEA3_GMI_WR_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define MMEA3_GMI_WR_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define MMEA3_GMI_WR_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define MMEA3_GMI_WR_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+#define MMEA3_GMI_WR_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L
+//MMEA3_GMI_RD_CAM_CNTL
+#define MMEA3_GMI_RD_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define MMEA3_GMI_RD_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define MMEA3_GMI_RD_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define MMEA3_GMI_RD_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define MMEA3_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define MMEA3_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define MMEA3_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define MMEA3_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define MMEA3_GMI_RD_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c
+#define MMEA3_GMI_RD_CAM_CNTL__PAGEBASED_CHAINING__SHIFT 0x1d
+#define MMEA3_GMI_RD_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define MMEA3_GMI_RD_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define MMEA3_GMI_RD_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define MMEA3_GMI_RD_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define MMEA3_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define MMEA3_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define MMEA3_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define MMEA3_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+#define MMEA3_GMI_RD_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L
+#define MMEA3_GMI_RD_CAM_CNTL__PAGEBASED_CHAINING_MASK 0x20000000L
+//MMEA3_GMI_WR_CAM_CNTL
+#define MMEA3_GMI_WR_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define MMEA3_GMI_WR_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define MMEA3_GMI_WR_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define MMEA3_GMI_WR_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define MMEA3_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define MMEA3_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define MMEA3_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define MMEA3_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define MMEA3_GMI_WR_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c
+#define MMEA3_GMI_WR_CAM_CNTL__PAGEBASED_CHAINING__SHIFT 0x1d
+#define MMEA3_GMI_WR_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define MMEA3_GMI_WR_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define MMEA3_GMI_WR_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define MMEA3_GMI_WR_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define MMEA3_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define MMEA3_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define MMEA3_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define MMEA3_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+#define MMEA3_GMI_WR_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L
+#define MMEA3_GMI_WR_CAM_CNTL__PAGEBASED_CHAINING_MASK 0x20000000L
+//MMEA3_GMI_PAGE_BURST
+#define MMEA3_GMI_PAGE_BURST__RD_LIMIT_LO__SHIFT 0x0
+#define MMEA3_GMI_PAGE_BURST__RD_LIMIT_HI__SHIFT 0x8
+#define MMEA3_GMI_PAGE_BURST__WR_LIMIT_LO__SHIFT 0x10
+#define MMEA3_GMI_PAGE_BURST__WR_LIMIT_HI__SHIFT 0x18
+#define MMEA3_GMI_PAGE_BURST__RD_LIMIT_LO_MASK 0x000000FFL
+#define MMEA3_GMI_PAGE_BURST__RD_LIMIT_HI_MASK 0x0000FF00L
+#define MMEA3_GMI_PAGE_BURST__WR_LIMIT_LO_MASK 0x00FF0000L
+#define MMEA3_GMI_PAGE_BURST__WR_LIMIT_HI_MASK 0xFF000000L
+//MMEA3_GMI_RD_PRI_AGE
+#define MMEA3_GMI_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA3_GMI_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA3_GMI_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA3_GMI_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA3_GMI_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA3_GMI_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA3_GMI_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA3_GMI_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA3_GMI_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA3_GMI_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA3_GMI_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA3_GMI_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA3_GMI_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA3_GMI_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA3_GMI_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA3_GMI_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA3_GMI_WR_PRI_AGE
+#define MMEA3_GMI_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA3_GMI_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA3_GMI_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA3_GMI_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA3_GMI_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA3_GMI_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA3_GMI_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA3_GMI_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA3_GMI_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA3_GMI_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA3_GMI_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA3_GMI_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA3_GMI_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA3_GMI_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA3_GMI_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA3_GMI_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA3_GMI_RD_PRI_QUEUING
+#define MMEA3_GMI_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA3_GMI_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA3_GMI_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA3_GMI_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA3_GMI_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA3_GMI_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA3_GMI_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA3_GMI_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA3_GMI_WR_PRI_QUEUING
+#define MMEA3_GMI_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA3_GMI_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA3_GMI_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA3_GMI_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA3_GMI_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA3_GMI_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA3_GMI_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA3_GMI_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA3_GMI_RD_PRI_FIXED
+#define MMEA3_GMI_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA3_GMI_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA3_GMI_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA3_GMI_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA3_GMI_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA3_GMI_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA3_GMI_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA3_GMI_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA3_GMI_WR_PRI_FIXED
+#define MMEA3_GMI_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA3_GMI_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA3_GMI_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA3_GMI_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA3_GMI_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA3_GMI_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA3_GMI_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA3_GMI_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA3_GMI_RD_PRI_URGENCY
+#define MMEA3_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA3_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA3_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA3_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA3_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA3_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA3_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA3_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA3_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA3_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA3_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA3_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA3_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA3_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA3_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA3_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA3_GMI_WR_PRI_URGENCY
+#define MMEA3_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA3_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA3_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA3_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA3_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA3_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA3_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA3_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA3_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA3_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA3_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA3_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA3_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA3_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA3_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA3_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA3_GMI_RD_PRI_URGENCY_MASKING
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L
+#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L
+//MMEA3_GMI_WR_PRI_URGENCY_MASKING
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L
+#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L
+//MMEA3_GMI_RD_PRI_QUANT_PRI1
+#define MMEA3_GMI_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA3_GMI_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA3_GMI_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA3_GMI_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA3_GMI_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA3_GMI_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA3_GMI_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA3_GMI_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA3_GMI_RD_PRI_QUANT_PRI2
+#define MMEA3_GMI_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA3_GMI_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA3_GMI_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA3_GMI_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA3_GMI_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA3_GMI_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA3_GMI_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA3_GMI_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA3_GMI_RD_PRI_QUANT_PRI3
+#define MMEA3_GMI_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA3_GMI_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA3_GMI_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA3_GMI_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA3_GMI_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA3_GMI_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA3_GMI_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA3_GMI_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA3_GMI_WR_PRI_QUANT_PRI1
+#define MMEA3_GMI_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA3_GMI_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA3_GMI_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA3_GMI_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA3_GMI_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA3_GMI_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA3_GMI_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA3_GMI_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA3_GMI_WR_PRI_QUANT_PRI2
+#define MMEA3_GMI_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA3_GMI_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA3_GMI_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA3_GMI_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA3_GMI_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA3_GMI_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA3_GMI_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA3_GMI_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA3_GMI_WR_PRI_QUANT_PRI3
+#define MMEA3_GMI_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA3_GMI_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA3_GMI_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA3_GMI_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA3_GMI_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA3_GMI_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA3_GMI_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA3_GMI_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA3_ADDRNORM_BASE_ADDR0
+#define MMEA3_ADDRNORM_BASE_ADDR0__ADDR_RNG_VAL__SHIFT 0x0
+#define MMEA3_ADDRNORM_BASE_ADDR0__LGCY_MMIO_HOLE_EN__SHIFT 0x1
+#define MMEA3_ADDRNORM_BASE_ADDR0__INTLV_NUM_CHAN__SHIFT 0x2
+#define MMEA3_ADDRNORM_BASE_ADDR0__INTLV_NUM_DIES__SHIFT 0x6
+#define MMEA3_ADDRNORM_BASE_ADDR0__INTLV_NUM_SOCKETS__SHIFT 0x8
+#define MMEA3_ADDRNORM_BASE_ADDR0__INTLV_ADDR_SEL__SHIFT 0x9
+#define MMEA3_ADDRNORM_BASE_ADDR0__BASE_ADDR__SHIFT 0xc
+#define MMEA3_ADDRNORM_BASE_ADDR0__ADDR_RNG_VAL_MASK 0x00000001L
+#define MMEA3_ADDRNORM_BASE_ADDR0__LGCY_MMIO_HOLE_EN_MASK 0x00000002L
+#define MMEA3_ADDRNORM_BASE_ADDR0__INTLV_NUM_CHAN_MASK 0x0000003CL
+#define MMEA3_ADDRNORM_BASE_ADDR0__INTLV_NUM_DIES_MASK 0x000000C0L
+#define MMEA3_ADDRNORM_BASE_ADDR0__INTLV_NUM_SOCKETS_MASK 0x00000100L
+#define MMEA3_ADDRNORM_BASE_ADDR0__INTLV_ADDR_SEL_MASK 0x00000E00L
+#define MMEA3_ADDRNORM_BASE_ADDR0__BASE_ADDR_MASK 0xFFFFF000L
+//MMEA3_ADDRNORM_LIMIT_ADDR0
+#define MMEA3_ADDRNORM_LIMIT_ADDR0__DST_FABRIC_ID__SHIFT 0x0
+#define MMEA3_ADDRNORM_LIMIT_ADDR0__LIMIT_ADDR__SHIFT 0xc
+#define MMEA3_ADDRNORM_LIMIT_ADDR0__DST_FABRIC_ID_MASK 0x0000001FL
+#define MMEA3_ADDRNORM_LIMIT_ADDR0__LIMIT_ADDR_MASK 0xFFFFF000L
+//MMEA3_ADDRNORM_BASE_ADDR1
+#define MMEA3_ADDRNORM_BASE_ADDR1__ADDR_RNG_VAL__SHIFT 0x0
+#define MMEA3_ADDRNORM_BASE_ADDR1__LGCY_MMIO_HOLE_EN__SHIFT 0x1
+#define MMEA3_ADDRNORM_BASE_ADDR1__INTLV_NUM_CHAN__SHIFT 0x2
+#define MMEA3_ADDRNORM_BASE_ADDR1__INTLV_NUM_DIES__SHIFT 0x6
+#define MMEA3_ADDRNORM_BASE_ADDR1__INTLV_NUM_SOCKETS__SHIFT 0x8
+#define MMEA3_ADDRNORM_BASE_ADDR1__INTLV_ADDR_SEL__SHIFT 0x9
+#define MMEA3_ADDRNORM_BASE_ADDR1__BASE_ADDR__SHIFT 0xc
+#define MMEA3_ADDRNORM_BASE_ADDR1__ADDR_RNG_VAL_MASK 0x00000001L
+#define MMEA3_ADDRNORM_BASE_ADDR1__LGCY_MMIO_HOLE_EN_MASK 0x00000002L
+#define MMEA3_ADDRNORM_BASE_ADDR1__INTLV_NUM_CHAN_MASK 0x0000003CL
+#define MMEA3_ADDRNORM_BASE_ADDR1__INTLV_NUM_DIES_MASK 0x000000C0L
+#define MMEA3_ADDRNORM_BASE_ADDR1__INTLV_NUM_SOCKETS_MASK 0x00000100L
+#define MMEA3_ADDRNORM_BASE_ADDR1__INTLV_ADDR_SEL_MASK 0x00000E00L
+#define MMEA3_ADDRNORM_BASE_ADDR1__BASE_ADDR_MASK 0xFFFFF000L
+//MMEA3_ADDRNORM_LIMIT_ADDR1
+#define MMEA3_ADDRNORM_LIMIT_ADDR1__DST_FABRIC_ID__SHIFT 0x0
+#define MMEA3_ADDRNORM_LIMIT_ADDR1__LIMIT_ADDR__SHIFT 0xc
+#define MMEA3_ADDRNORM_LIMIT_ADDR1__DST_FABRIC_ID_MASK 0x0000001FL
+#define MMEA3_ADDRNORM_LIMIT_ADDR1__LIMIT_ADDR_MASK 0xFFFFF000L
+//MMEA3_ADDRNORM_OFFSET_ADDR1
+#define MMEA3_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_EN__SHIFT 0x0
+#define MMEA3_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET__SHIFT 0x14
+#define MMEA3_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_EN_MASK 0x00000001L
+#define MMEA3_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_MASK 0xFFF00000L
+//MMEA3_ADDRNORM_BASE_ADDR2
+#define MMEA3_ADDRNORM_BASE_ADDR2__ADDR_RNG_VAL__SHIFT 0x0
+#define MMEA3_ADDRNORM_BASE_ADDR2__LGCY_MMIO_HOLE_EN__SHIFT 0x1
+#define MMEA3_ADDRNORM_BASE_ADDR2__INTLV_NUM_CHAN__SHIFT 0x2
+#define MMEA3_ADDRNORM_BASE_ADDR2__INTLV_NUM_DIES__SHIFT 0x6
+#define MMEA3_ADDRNORM_BASE_ADDR2__INTLV_NUM_SOCKETS__SHIFT 0x8
+#define MMEA3_ADDRNORM_BASE_ADDR2__INTLV_ADDR_SEL__SHIFT 0x9
+#define MMEA3_ADDRNORM_BASE_ADDR2__BASE_ADDR__SHIFT 0xc
+#define MMEA3_ADDRNORM_BASE_ADDR2__ADDR_RNG_VAL_MASK 0x00000001L
+#define MMEA3_ADDRNORM_BASE_ADDR2__LGCY_MMIO_HOLE_EN_MASK 0x00000002L
+#define MMEA3_ADDRNORM_BASE_ADDR2__INTLV_NUM_CHAN_MASK 0x0000003CL
+#define MMEA3_ADDRNORM_BASE_ADDR2__INTLV_NUM_DIES_MASK 0x000000C0L
+#define MMEA3_ADDRNORM_BASE_ADDR2__INTLV_NUM_SOCKETS_MASK 0x00000100L
+#define MMEA3_ADDRNORM_BASE_ADDR2__INTLV_ADDR_SEL_MASK 0x00000E00L
+#define MMEA3_ADDRNORM_BASE_ADDR2__BASE_ADDR_MASK 0xFFFFF000L
+//MMEA3_ADDRNORM_LIMIT_ADDR2
+#define MMEA3_ADDRNORM_LIMIT_ADDR2__DST_FABRIC_ID__SHIFT 0x0
+#define MMEA3_ADDRNORM_LIMIT_ADDR2__LIMIT_ADDR__SHIFT 0xc
+#define MMEA3_ADDRNORM_LIMIT_ADDR2__DST_FABRIC_ID_MASK 0x0000001FL
+#define MMEA3_ADDRNORM_LIMIT_ADDR2__LIMIT_ADDR_MASK 0xFFFFF000L
+//MMEA3_ADDRNORM_BASE_ADDR3
+#define MMEA3_ADDRNORM_BASE_ADDR3__ADDR_RNG_VAL__SHIFT 0x0
+#define MMEA3_ADDRNORM_BASE_ADDR3__LGCY_MMIO_HOLE_EN__SHIFT 0x1
+#define MMEA3_ADDRNORM_BASE_ADDR3__INTLV_NUM_CHAN__SHIFT 0x2
+#define MMEA3_ADDRNORM_BASE_ADDR3__INTLV_NUM_DIES__SHIFT 0x6
+#define MMEA3_ADDRNORM_BASE_ADDR3__INTLV_NUM_SOCKETS__SHIFT 0x8
+#define MMEA3_ADDRNORM_BASE_ADDR3__INTLV_ADDR_SEL__SHIFT 0x9
+#define MMEA3_ADDRNORM_BASE_ADDR3__BASE_ADDR__SHIFT 0xc
+#define MMEA3_ADDRNORM_BASE_ADDR3__ADDR_RNG_VAL_MASK 0x00000001L
+#define MMEA3_ADDRNORM_BASE_ADDR3__LGCY_MMIO_HOLE_EN_MASK 0x00000002L
+#define MMEA3_ADDRNORM_BASE_ADDR3__INTLV_NUM_CHAN_MASK 0x0000003CL
+#define MMEA3_ADDRNORM_BASE_ADDR3__INTLV_NUM_DIES_MASK 0x000000C0L
+#define MMEA3_ADDRNORM_BASE_ADDR3__INTLV_NUM_SOCKETS_MASK 0x00000100L
+#define MMEA3_ADDRNORM_BASE_ADDR3__INTLV_ADDR_SEL_MASK 0x00000E00L
+#define MMEA3_ADDRNORM_BASE_ADDR3__BASE_ADDR_MASK 0xFFFFF000L
+//MMEA3_ADDRNORM_LIMIT_ADDR3
+#define MMEA3_ADDRNORM_LIMIT_ADDR3__DST_FABRIC_ID__SHIFT 0x0
+#define MMEA3_ADDRNORM_LIMIT_ADDR3__LIMIT_ADDR__SHIFT 0xc
+#define MMEA3_ADDRNORM_LIMIT_ADDR3__DST_FABRIC_ID_MASK 0x0000001FL
+#define MMEA3_ADDRNORM_LIMIT_ADDR3__LIMIT_ADDR_MASK 0xFFFFF000L
+//MMEA3_ADDRNORM_OFFSET_ADDR3
+#define MMEA3_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET_EN__SHIFT 0x0
+#define MMEA3_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET__SHIFT 0x14
+#define MMEA3_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET_EN_MASK 0x00000001L
+#define MMEA3_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET_MASK 0xFFF00000L
+//MMEA3_ADDRNORM_BASE_ADDR4
+#define MMEA3_ADDRNORM_BASE_ADDR4__ADDR_RNG_VAL__SHIFT 0x0
+#define MMEA3_ADDRNORM_BASE_ADDR4__LGCY_MMIO_HOLE_EN__SHIFT 0x1
+#define MMEA3_ADDRNORM_BASE_ADDR4__INTLV_NUM_CHAN__SHIFT 0x2
+#define MMEA3_ADDRNORM_BASE_ADDR4__INTLV_NUM_DIES__SHIFT 0x6
+#define MMEA3_ADDRNORM_BASE_ADDR4__INTLV_NUM_SOCKETS__SHIFT 0x8
+#define MMEA3_ADDRNORM_BASE_ADDR4__INTLV_ADDR_SEL__SHIFT 0x9
+#define MMEA3_ADDRNORM_BASE_ADDR4__BASE_ADDR__SHIFT 0xc
+#define MMEA3_ADDRNORM_BASE_ADDR4__ADDR_RNG_VAL_MASK 0x00000001L
+#define MMEA3_ADDRNORM_BASE_ADDR4__LGCY_MMIO_HOLE_EN_MASK 0x00000002L
+#define MMEA3_ADDRNORM_BASE_ADDR4__INTLV_NUM_CHAN_MASK 0x0000003CL
+#define MMEA3_ADDRNORM_BASE_ADDR4__INTLV_NUM_DIES_MASK 0x000000C0L
+#define MMEA3_ADDRNORM_BASE_ADDR4__INTLV_NUM_SOCKETS_MASK 0x00000100L
+#define MMEA3_ADDRNORM_BASE_ADDR4__INTLV_ADDR_SEL_MASK 0x00000E00L
+#define MMEA3_ADDRNORM_BASE_ADDR4__BASE_ADDR_MASK 0xFFFFF000L
+//MMEA3_ADDRNORM_LIMIT_ADDR4
+#define MMEA3_ADDRNORM_LIMIT_ADDR4__DST_FABRIC_ID__SHIFT 0x0
+#define MMEA3_ADDRNORM_LIMIT_ADDR4__LIMIT_ADDR__SHIFT 0xc
+#define MMEA3_ADDRNORM_LIMIT_ADDR4__DST_FABRIC_ID_MASK 0x0000001FL
+#define MMEA3_ADDRNORM_LIMIT_ADDR4__LIMIT_ADDR_MASK 0xFFFFF000L
+//MMEA3_ADDRNORM_BASE_ADDR5
+#define MMEA3_ADDRNORM_BASE_ADDR5__ADDR_RNG_VAL__SHIFT 0x0
+#define MMEA3_ADDRNORM_BASE_ADDR5__LGCY_MMIO_HOLE_EN__SHIFT 0x1
+#define MMEA3_ADDRNORM_BASE_ADDR5__INTLV_NUM_CHAN__SHIFT 0x2
+#define MMEA3_ADDRNORM_BASE_ADDR5__INTLV_NUM_DIES__SHIFT 0x6
+#define MMEA3_ADDRNORM_BASE_ADDR5__INTLV_NUM_SOCKETS__SHIFT 0x8
+#define MMEA3_ADDRNORM_BASE_ADDR5__INTLV_ADDR_SEL__SHIFT 0x9
+#define MMEA3_ADDRNORM_BASE_ADDR5__BASE_ADDR__SHIFT 0xc
+#define MMEA3_ADDRNORM_BASE_ADDR5__ADDR_RNG_VAL_MASK 0x00000001L
+#define MMEA3_ADDRNORM_BASE_ADDR5__LGCY_MMIO_HOLE_EN_MASK 0x00000002L
+#define MMEA3_ADDRNORM_BASE_ADDR5__INTLV_NUM_CHAN_MASK 0x0000003CL
+#define MMEA3_ADDRNORM_BASE_ADDR5__INTLV_NUM_DIES_MASK 0x000000C0L
+#define MMEA3_ADDRNORM_BASE_ADDR5__INTLV_NUM_SOCKETS_MASK 0x00000100L
+#define MMEA3_ADDRNORM_BASE_ADDR5__INTLV_ADDR_SEL_MASK 0x00000E00L
+#define MMEA3_ADDRNORM_BASE_ADDR5__BASE_ADDR_MASK 0xFFFFF000L
+//MMEA3_ADDRNORM_LIMIT_ADDR5
+#define MMEA3_ADDRNORM_LIMIT_ADDR5__DST_FABRIC_ID__SHIFT 0x0
+#define MMEA3_ADDRNORM_LIMIT_ADDR5__LIMIT_ADDR__SHIFT 0xc
+#define MMEA3_ADDRNORM_LIMIT_ADDR5__DST_FABRIC_ID_MASK 0x0000001FL
+#define MMEA3_ADDRNORM_LIMIT_ADDR5__LIMIT_ADDR_MASK 0xFFFFF000L
+//MMEA3_ADDRNORM_OFFSET_ADDR5
+#define MMEA3_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET_EN__SHIFT 0x0
+#define MMEA3_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET__SHIFT 0x14
+#define MMEA3_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET_EN_MASK 0x00000001L
+#define MMEA3_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET_MASK 0xFFF00000L
+//MMEA3_ADDRNORMDRAM_HOLE_CNTL
+#define MMEA3_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_VALID__SHIFT 0x0
+#define MMEA3_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_OFFSET__SHIFT 0x7
+#define MMEA3_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_VALID_MASK 0x00000001L
+#define MMEA3_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_OFFSET_MASK 0x0000FF80L
+//MMEA3_ADDRNORMGMI_HOLE_CNTL
+#define MMEA3_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_VALID__SHIFT 0x0
+#define MMEA3_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_OFFSET__SHIFT 0x7
+#define MMEA3_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_VALID_MASK 0x00000001L
+#define MMEA3_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_OFFSET_MASK 0x0000FF80L
+//MMEA3_ADDRNORMDRAM_NP2_CHANNEL_CFG
+#define MMEA3_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE0__SHIFT 0x0
+#define MMEA3_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE1__SHIFT 0x6
+#define MMEA3_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE0_MASK 0x0000003FL
+#define MMEA3_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE1_MASK 0x00000FC0L
+//MMEA3_ADDRNORMGMI_NP2_CHANNEL_CFG
+#define MMEA3_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE2__SHIFT 0x0
+#define MMEA3_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE3__SHIFT 0x6
+#define MMEA3_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE2_MASK 0x0000003FL
+#define MMEA3_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE3_MASK 0x00000FC0L
+//MMEA3_ADDRDEC_BANK_CFG
+#define MMEA3_ADDRDEC_BANK_CFG__BANK_MASK_DRAM__SHIFT 0x0
+#define MMEA3_ADDRDEC_BANK_CFG__BANK_MASK_GMI__SHIFT 0x6
+#define MMEA3_ADDRDEC_BANK_CFG__BANKGROUP_SEL_DRAM__SHIFT 0xc
+#define MMEA3_ADDRDEC_BANK_CFG__BANKGROUP_SEL_GMI__SHIFT 0xf
+#define MMEA3_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_DRAM__SHIFT 0x12
+#define MMEA3_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_GMI__SHIFT 0x13
+#define MMEA3_ADDRDEC_BANK_CFG__BANK_MASK_DRAM_MASK 0x0000003FL
+#define MMEA3_ADDRDEC_BANK_CFG__BANK_MASK_GMI_MASK 0x00000FC0L
+#define MMEA3_ADDRDEC_BANK_CFG__BANKGROUP_SEL_DRAM_MASK 0x00007000L
+#define MMEA3_ADDRDEC_BANK_CFG__BANKGROUP_SEL_GMI_MASK 0x00038000L
+#define MMEA3_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_DRAM_MASK 0x00040000L
+#define MMEA3_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_GMI_MASK 0x00080000L
+//MMEA3_ADDRDEC_MISC_CFG
+#define MMEA3_ADDRDEC_MISC_CFG__VCM_EN0__SHIFT 0x0
+#define MMEA3_ADDRDEC_MISC_CFG__VCM_EN1__SHIFT 0x1
+#define MMEA3_ADDRDEC_MISC_CFG__VCM_EN2__SHIFT 0x2
+#define MMEA3_ADDRDEC_MISC_CFG__PCH_MASK_DRAM__SHIFT 0x8
+#define MMEA3_ADDRDEC_MISC_CFG__PCH_MASK_GMI__SHIFT 0x9
+#define MMEA3_ADDRDEC_MISC_CFG__CH_MASK_DRAM__SHIFT 0xc
+#define MMEA3_ADDRDEC_MISC_CFG__CH_MASK_GMI__SHIFT 0x11
+#define MMEA3_ADDRDEC_MISC_CFG__CS_MASK_DRAM__SHIFT 0x16
+#define MMEA3_ADDRDEC_MISC_CFG__CS_MASK_GMI__SHIFT 0x18
+#define MMEA3_ADDRDEC_MISC_CFG__RM_MASK_DRAM__SHIFT 0x1a
+#define MMEA3_ADDRDEC_MISC_CFG__RM_MASK_GMI__SHIFT 0x1d
+#define MMEA3_ADDRDEC_MISC_CFG__VCM_EN0_MASK 0x00000001L
+#define MMEA3_ADDRDEC_MISC_CFG__VCM_EN1_MASK 0x00000002L
+#define MMEA3_ADDRDEC_MISC_CFG__VCM_EN2_MASK 0x00000004L
+#define MMEA3_ADDRDEC_MISC_CFG__PCH_MASK_DRAM_MASK 0x00000100L
+#define MMEA3_ADDRDEC_MISC_CFG__PCH_MASK_GMI_MASK 0x00000200L
+#define MMEA3_ADDRDEC_MISC_CFG__CH_MASK_DRAM_MASK 0x0001F000L
+#define MMEA3_ADDRDEC_MISC_CFG__CH_MASK_GMI_MASK 0x003E0000L
+#define MMEA3_ADDRDEC_MISC_CFG__CS_MASK_DRAM_MASK 0x00C00000L
+#define MMEA3_ADDRDEC_MISC_CFG__CS_MASK_GMI_MASK 0x03000000L
+#define MMEA3_ADDRDEC_MISC_CFG__RM_MASK_DRAM_MASK 0x1C000000L
+#define MMEA3_ADDRDEC_MISC_CFG__RM_MASK_GMI_MASK 0xE0000000L
+//MMEA3_ADDRDECDRAM_ADDR_HASH_BANK0
+#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK0__XOR_ENABLE__SHIFT 0x0
+#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK0__COL_XOR__SHIFT 0x1
+#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK0__ROW_XOR__SHIFT 0xe
+#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK0__XOR_ENABLE_MASK 0x00000001L
+#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK0__COL_XOR_MASK 0x00003FFEL
+#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK0__ROW_XOR_MASK 0xFFFFC000L
+//MMEA3_ADDRDECDRAM_ADDR_HASH_BANK1
+#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK1__XOR_ENABLE__SHIFT 0x0
+#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK1__COL_XOR__SHIFT 0x1
+#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK1__ROW_XOR__SHIFT 0xe
+#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK1__XOR_ENABLE_MASK 0x00000001L
+#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK1__COL_XOR_MASK 0x00003FFEL
+#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK1__ROW_XOR_MASK 0xFFFFC000L
+//MMEA3_ADDRDECDRAM_ADDR_HASH_BANK2
+#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK2__XOR_ENABLE__SHIFT 0x0
+#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK2__COL_XOR__SHIFT 0x1
+#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK2__ROW_XOR__SHIFT 0xe
+#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK2__XOR_ENABLE_MASK 0x00000001L
+#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK2__COL_XOR_MASK 0x00003FFEL
+#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK2__ROW_XOR_MASK 0xFFFFC000L
+//MMEA3_ADDRDECDRAM_ADDR_HASH_BANK3
+#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK3__XOR_ENABLE__SHIFT 0x0
+#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK3__COL_XOR__SHIFT 0x1
+#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK3__ROW_XOR__SHIFT 0xe
+#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK3__XOR_ENABLE_MASK 0x00000001L
+#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK3__COL_XOR_MASK 0x00003FFEL
+#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK3__ROW_XOR_MASK 0xFFFFC000L
+//MMEA3_ADDRDECDRAM_ADDR_HASH_BANK4
+#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK4__XOR_ENABLE__SHIFT 0x0
+#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK4__COL_XOR__SHIFT 0x1
+#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK4__ROW_XOR__SHIFT 0xe
+#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK4__XOR_ENABLE_MASK 0x00000001L
+#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK4__COL_XOR_MASK 0x00003FFEL
+#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK4__ROW_XOR_MASK 0xFFFFC000L
+//MMEA3_ADDRDECDRAM_ADDR_HASH_BANK5
+#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK5__XOR_ENABLE__SHIFT 0x0
+#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK5__COL_XOR__SHIFT 0x1
+#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK5__ROW_XOR__SHIFT 0xe
+#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK5__XOR_ENABLE_MASK 0x00000001L
+#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK5__COL_XOR_MASK 0x00003FFEL
+#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK5__ROW_XOR_MASK 0xFFFFC000L
+//MMEA3_ADDRDECDRAM_ADDR_HASH_PC
+#define MMEA3_ADDRDECDRAM_ADDR_HASH_PC__XOR_ENABLE__SHIFT 0x0
+#define MMEA3_ADDRDECDRAM_ADDR_HASH_PC__COL_XOR__SHIFT 0x1
+#define MMEA3_ADDRDECDRAM_ADDR_HASH_PC__ROW_XOR__SHIFT 0xe
+#define MMEA3_ADDRDECDRAM_ADDR_HASH_PC__XOR_ENABLE_MASK 0x00000001L
+#define MMEA3_ADDRDECDRAM_ADDR_HASH_PC__COL_XOR_MASK 0x00003FFEL
+#define MMEA3_ADDRDECDRAM_ADDR_HASH_PC__ROW_XOR_MASK 0xFFFFC000L
+//MMEA3_ADDRDECDRAM_ADDR_HASH_PC2
+#define MMEA3_ADDRDECDRAM_ADDR_HASH_PC2__BANK_XOR__SHIFT 0x0
+#define MMEA3_ADDRDECDRAM_ADDR_HASH_PC2__BANK_XOR_MASK 0x0000003FL
+//MMEA3_ADDRDECDRAM_ADDR_HASH_CS0
+#define MMEA3_ADDRDECDRAM_ADDR_HASH_CS0__XOR_ENABLE__SHIFT 0x0
+#define MMEA3_ADDRDECDRAM_ADDR_HASH_CS0__NA_XOR__SHIFT 0x1
+#define MMEA3_ADDRDECDRAM_ADDR_HASH_CS0__XOR_ENABLE_MASK 0x00000001L
+#define MMEA3_ADDRDECDRAM_ADDR_HASH_CS0__NA_XOR_MASK 0xFFFFFFFEL
+//MMEA3_ADDRDECDRAM_ADDR_HASH_CS1
+#define MMEA3_ADDRDECDRAM_ADDR_HASH_CS1__XOR_ENABLE__SHIFT 0x0
+#define MMEA3_ADDRDECDRAM_ADDR_HASH_CS1__NA_XOR__SHIFT 0x1
+#define MMEA3_ADDRDECDRAM_ADDR_HASH_CS1__XOR_ENABLE_MASK 0x00000001L
+#define MMEA3_ADDRDECDRAM_ADDR_HASH_CS1__NA_XOR_MASK 0xFFFFFFFEL
+//MMEA3_ADDRDECDRAM_HARVEST_ENABLE
+#define MMEA3_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_EN__SHIFT 0x0
+#define MMEA3_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_VAL__SHIFT 0x1
+#define MMEA3_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_EN__SHIFT 0x2
+#define MMEA3_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_VAL__SHIFT 0x3
+#define MMEA3_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_EN__SHIFT 0x4
+#define MMEA3_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_VAL__SHIFT 0x5
+#define MMEA3_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_EN_MASK 0x00000001L
+#define MMEA3_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_VAL_MASK 0x00000002L
+#define MMEA3_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_EN_MASK 0x00000004L
+#define MMEA3_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_VAL_MASK 0x00000008L
+#define MMEA3_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_EN_MASK 0x00000010L
+#define MMEA3_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_VAL_MASK 0x00000020L
+//MMEA3_ADDRDECGMI_ADDR_HASH_BANK0
+#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK0__XOR_ENABLE__SHIFT 0x0
+#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK0__COL_XOR__SHIFT 0x1
+#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK0__ROW_XOR__SHIFT 0xe
+#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK0__XOR_ENABLE_MASK 0x00000001L
+#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK0__COL_XOR_MASK 0x00003FFEL
+#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK0__ROW_XOR_MASK 0xFFFFC000L
+//MMEA3_ADDRDECGMI_ADDR_HASH_BANK1
+#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK1__XOR_ENABLE__SHIFT 0x0
+#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK1__COL_XOR__SHIFT 0x1
+#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK1__ROW_XOR__SHIFT 0xe
+#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK1__XOR_ENABLE_MASK 0x00000001L
+#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK1__COL_XOR_MASK 0x00003FFEL
+#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK1__ROW_XOR_MASK 0xFFFFC000L
+//MMEA3_ADDRDECGMI_ADDR_HASH_BANK2
+#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK2__XOR_ENABLE__SHIFT 0x0
+#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK2__COL_XOR__SHIFT 0x1
+#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK2__ROW_XOR__SHIFT 0xe
+#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK2__XOR_ENABLE_MASK 0x00000001L
+#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK2__COL_XOR_MASK 0x00003FFEL
+#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK2__ROW_XOR_MASK 0xFFFFC000L
+//MMEA3_ADDRDECGMI_ADDR_HASH_BANK3
+#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK3__XOR_ENABLE__SHIFT 0x0
+#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK3__COL_XOR__SHIFT 0x1
+#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK3__ROW_XOR__SHIFT 0xe
+#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK3__XOR_ENABLE_MASK 0x00000001L
+#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK3__COL_XOR_MASK 0x00003FFEL
+#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK3__ROW_XOR_MASK 0xFFFFC000L
+//MMEA3_ADDRDECGMI_ADDR_HASH_BANK4
+#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK4__XOR_ENABLE__SHIFT 0x0
+#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK4__COL_XOR__SHIFT 0x1
+#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK4__ROW_XOR__SHIFT 0xe
+#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK4__XOR_ENABLE_MASK 0x00000001L
+#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK4__COL_XOR_MASK 0x00003FFEL
+#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK4__ROW_XOR_MASK 0xFFFFC000L
+//MMEA3_ADDRDECGMI_ADDR_HASH_BANK5
+#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK5__XOR_ENABLE__SHIFT 0x0
+#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK5__COL_XOR__SHIFT 0x1
+#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK5__ROW_XOR__SHIFT 0xe
+#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK5__XOR_ENABLE_MASK 0x00000001L
+#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK5__COL_XOR_MASK 0x00003FFEL
+#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK5__ROW_XOR_MASK 0xFFFFC000L
+//MMEA3_ADDRDECGMI_ADDR_HASH_PC
+#define MMEA3_ADDRDECGMI_ADDR_HASH_PC__XOR_ENABLE__SHIFT 0x0
+#define MMEA3_ADDRDECGMI_ADDR_HASH_PC__COL_XOR__SHIFT 0x1
+#define MMEA3_ADDRDECGMI_ADDR_HASH_PC__ROW_XOR__SHIFT 0xe
+#define MMEA3_ADDRDECGMI_ADDR_HASH_PC__XOR_ENABLE_MASK 0x00000001L
+#define MMEA3_ADDRDECGMI_ADDR_HASH_PC__COL_XOR_MASK 0x00003FFEL
+#define MMEA3_ADDRDECGMI_ADDR_HASH_PC__ROW_XOR_MASK 0xFFFFC000L
+//MMEA3_ADDRDECGMI_ADDR_HASH_PC2
+#define MMEA3_ADDRDECGMI_ADDR_HASH_PC2__BANK_XOR__SHIFT 0x0
+#define MMEA3_ADDRDECGMI_ADDR_HASH_PC2__BANK_XOR_MASK 0x0000003FL
+//MMEA3_ADDRDECGMI_ADDR_HASH_CS0
+#define MMEA3_ADDRDECGMI_ADDR_HASH_CS0__XOR_ENABLE__SHIFT 0x0
+#define MMEA3_ADDRDECGMI_ADDR_HASH_CS0__NA_XOR__SHIFT 0x1
+#define MMEA3_ADDRDECGMI_ADDR_HASH_CS0__XOR_ENABLE_MASK 0x00000001L
+#define MMEA3_ADDRDECGMI_ADDR_HASH_CS0__NA_XOR_MASK 0xFFFFFFFEL
+//MMEA3_ADDRDECGMI_ADDR_HASH_CS1
+#define MMEA3_ADDRDECGMI_ADDR_HASH_CS1__XOR_ENABLE__SHIFT 0x0
+#define MMEA3_ADDRDECGMI_ADDR_HASH_CS1__NA_XOR__SHIFT 0x1
+#define MMEA3_ADDRDECGMI_ADDR_HASH_CS1__XOR_ENABLE_MASK 0x00000001L
+#define MMEA3_ADDRDECGMI_ADDR_HASH_CS1__NA_XOR_MASK 0xFFFFFFFEL
+//MMEA3_ADDRDECGMI_HARVEST_ENABLE
+#define MMEA3_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_EN__SHIFT 0x0
+#define MMEA3_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_VAL__SHIFT 0x1
+#define MMEA3_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_EN__SHIFT 0x2
+#define MMEA3_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_VAL__SHIFT 0x3
+#define MMEA3_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_EN__SHIFT 0x4
+#define MMEA3_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_VAL__SHIFT 0x5
+#define MMEA3_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_EN_MASK 0x00000001L
+#define MMEA3_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_VAL_MASK 0x00000002L
+#define MMEA3_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_EN_MASK 0x00000004L
+#define MMEA3_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_VAL_MASK 0x00000008L
+#define MMEA3_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_EN_MASK 0x00000010L
+#define MMEA3_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_VAL_MASK 0x00000020L
+//MMEA3_ADDRDEC0_BASE_ADDR_CS0
+#define MMEA3_ADDRDEC0_BASE_ADDR_CS0__CS_EN__SHIFT 0x0
+#define MMEA3_ADDRDEC0_BASE_ADDR_CS0__BASE_ADDR__SHIFT 0x1
+#define MMEA3_ADDRDEC0_BASE_ADDR_CS0__CS_EN_MASK 0x00000001L
+#define MMEA3_ADDRDEC0_BASE_ADDR_CS0__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA3_ADDRDEC0_BASE_ADDR_CS1
+#define MMEA3_ADDRDEC0_BASE_ADDR_CS1__CS_EN__SHIFT 0x0
+#define MMEA3_ADDRDEC0_BASE_ADDR_CS1__BASE_ADDR__SHIFT 0x1
+#define MMEA3_ADDRDEC0_BASE_ADDR_CS1__CS_EN_MASK 0x00000001L
+#define MMEA3_ADDRDEC0_BASE_ADDR_CS1__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA3_ADDRDEC0_BASE_ADDR_CS2
+#define MMEA3_ADDRDEC0_BASE_ADDR_CS2__CS_EN__SHIFT 0x0
+#define MMEA3_ADDRDEC0_BASE_ADDR_CS2__BASE_ADDR__SHIFT 0x1
+#define MMEA3_ADDRDEC0_BASE_ADDR_CS2__CS_EN_MASK 0x00000001L
+#define MMEA3_ADDRDEC0_BASE_ADDR_CS2__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA3_ADDRDEC0_BASE_ADDR_CS3
+#define MMEA3_ADDRDEC0_BASE_ADDR_CS3__CS_EN__SHIFT 0x0
+#define MMEA3_ADDRDEC0_BASE_ADDR_CS3__BASE_ADDR__SHIFT 0x1
+#define MMEA3_ADDRDEC0_BASE_ADDR_CS3__CS_EN_MASK 0x00000001L
+#define MMEA3_ADDRDEC0_BASE_ADDR_CS3__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA3_ADDRDEC0_BASE_ADDR_SECCS0
+#define MMEA3_ADDRDEC0_BASE_ADDR_SECCS0__CS_EN__SHIFT 0x0
+#define MMEA3_ADDRDEC0_BASE_ADDR_SECCS0__BASE_ADDR__SHIFT 0x1
+#define MMEA3_ADDRDEC0_BASE_ADDR_SECCS0__CS_EN_MASK 0x00000001L
+#define MMEA3_ADDRDEC0_BASE_ADDR_SECCS0__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA3_ADDRDEC0_BASE_ADDR_SECCS1
+#define MMEA3_ADDRDEC0_BASE_ADDR_SECCS1__CS_EN__SHIFT 0x0
+#define MMEA3_ADDRDEC0_BASE_ADDR_SECCS1__BASE_ADDR__SHIFT 0x1
+#define MMEA3_ADDRDEC0_BASE_ADDR_SECCS1__CS_EN_MASK 0x00000001L
+#define MMEA3_ADDRDEC0_BASE_ADDR_SECCS1__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA3_ADDRDEC0_BASE_ADDR_SECCS2
+#define MMEA3_ADDRDEC0_BASE_ADDR_SECCS2__CS_EN__SHIFT 0x0
+#define MMEA3_ADDRDEC0_BASE_ADDR_SECCS2__BASE_ADDR__SHIFT 0x1
+#define MMEA3_ADDRDEC0_BASE_ADDR_SECCS2__CS_EN_MASK 0x00000001L
+#define MMEA3_ADDRDEC0_BASE_ADDR_SECCS2__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA3_ADDRDEC0_BASE_ADDR_SECCS3
+#define MMEA3_ADDRDEC0_BASE_ADDR_SECCS3__CS_EN__SHIFT 0x0
+#define MMEA3_ADDRDEC0_BASE_ADDR_SECCS3__BASE_ADDR__SHIFT 0x1
+#define MMEA3_ADDRDEC0_BASE_ADDR_SECCS3__CS_EN_MASK 0x00000001L
+#define MMEA3_ADDRDEC0_BASE_ADDR_SECCS3__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA3_ADDRDEC0_ADDR_MASK_CS01
+#define MMEA3_ADDRDEC0_ADDR_MASK_CS01__ADDR_MASK__SHIFT 0x1
+#define MMEA3_ADDRDEC0_ADDR_MASK_CS01__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA3_ADDRDEC0_ADDR_MASK_CS23
+#define MMEA3_ADDRDEC0_ADDR_MASK_CS23__ADDR_MASK__SHIFT 0x1
+#define MMEA3_ADDRDEC0_ADDR_MASK_CS23__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA3_ADDRDEC0_ADDR_MASK_SECCS01
+#define MMEA3_ADDRDEC0_ADDR_MASK_SECCS01__ADDR_MASK__SHIFT 0x1
+#define MMEA3_ADDRDEC0_ADDR_MASK_SECCS01__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA3_ADDRDEC0_ADDR_MASK_SECCS23
+#define MMEA3_ADDRDEC0_ADDR_MASK_SECCS23__ADDR_MASK__SHIFT 0x1
+#define MMEA3_ADDRDEC0_ADDR_MASK_SECCS23__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA3_ADDRDEC0_ADDR_CFG_CS01
+#define MMEA3_ADDRDEC0_ADDR_CFG_CS01__NUM_BANK_GROUPS__SHIFT 0x1
+#define MMEA3_ADDRDEC0_ADDR_CFG_CS01__NUM_RM__SHIFT 0x4
+#define MMEA3_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_LO__SHIFT 0x8
+#define MMEA3_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_HI__SHIFT 0xc
+#define MMEA3_ADDRDEC0_ADDR_CFG_CS01__NUM_COL__SHIFT 0x10
+#define MMEA3_ADDRDEC0_ADDR_CFG_CS01__NUM_BANKS__SHIFT 0x14
+#define MMEA3_ADDRDEC0_ADDR_CFG_CS01__HI_COL_EN__SHIFT 0x1f
+#define MMEA3_ADDRDEC0_ADDR_CFG_CS01__NUM_BANK_GROUPS_MASK 0x0000000EL
+#define MMEA3_ADDRDEC0_ADDR_CFG_CS01__NUM_RM_MASK 0x00000030L
+#define MMEA3_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_LO_MASK 0x00000F00L
+#define MMEA3_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_HI_MASK 0x0000F000L
+#define MMEA3_ADDRDEC0_ADDR_CFG_CS01__NUM_COL_MASK 0x000F0000L
+#define MMEA3_ADDRDEC0_ADDR_CFG_CS01__NUM_BANKS_MASK 0x00300000L
+#define MMEA3_ADDRDEC0_ADDR_CFG_CS01__HI_COL_EN_MASK 0x80000000L
+//MMEA3_ADDRDEC0_ADDR_CFG_CS23
+#define MMEA3_ADDRDEC0_ADDR_CFG_CS23__NUM_BANK_GROUPS__SHIFT 0x1
+#define MMEA3_ADDRDEC0_ADDR_CFG_CS23__NUM_RM__SHIFT 0x4
+#define MMEA3_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_LO__SHIFT 0x8
+#define MMEA3_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_HI__SHIFT 0xc
+#define MMEA3_ADDRDEC0_ADDR_CFG_CS23__NUM_COL__SHIFT 0x10
+#define MMEA3_ADDRDEC0_ADDR_CFG_CS23__NUM_BANKS__SHIFT 0x14
+#define MMEA3_ADDRDEC0_ADDR_CFG_CS23__HI_COL_EN__SHIFT 0x1f
+#define MMEA3_ADDRDEC0_ADDR_CFG_CS23__NUM_BANK_GROUPS_MASK 0x0000000EL
+#define MMEA3_ADDRDEC0_ADDR_CFG_CS23__NUM_RM_MASK 0x00000030L
+#define MMEA3_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_LO_MASK 0x00000F00L
+#define MMEA3_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_HI_MASK 0x0000F000L
+#define MMEA3_ADDRDEC0_ADDR_CFG_CS23__NUM_COL_MASK 0x000F0000L
+#define MMEA3_ADDRDEC0_ADDR_CFG_CS23__NUM_BANKS_MASK 0x00300000L
+#define MMEA3_ADDRDEC0_ADDR_CFG_CS23__HI_COL_EN_MASK 0x80000000L
+//MMEA3_ADDRDEC0_ADDR_SEL_CS01
+#define MMEA3_ADDRDEC0_ADDR_SEL_CS01__BANK0__SHIFT 0x0
+#define MMEA3_ADDRDEC0_ADDR_SEL_CS01__BANK1__SHIFT 0x4
+#define MMEA3_ADDRDEC0_ADDR_SEL_CS01__BANK2__SHIFT 0x8
+#define MMEA3_ADDRDEC0_ADDR_SEL_CS01__BANK3__SHIFT 0xc
+#define MMEA3_ADDRDEC0_ADDR_SEL_CS01__BANK4__SHIFT 0x10
+#define MMEA3_ADDRDEC0_ADDR_SEL_CS01__ROW_LO__SHIFT 0x18
+#define MMEA3_ADDRDEC0_ADDR_SEL_CS01__ROW_HI__SHIFT 0x1c
+#define MMEA3_ADDRDEC0_ADDR_SEL_CS01__BANK0_MASK 0x0000000FL
+#define MMEA3_ADDRDEC0_ADDR_SEL_CS01__BANK1_MASK 0x000000F0L
+#define MMEA3_ADDRDEC0_ADDR_SEL_CS01__BANK2_MASK 0x00000F00L
+#define MMEA3_ADDRDEC0_ADDR_SEL_CS01__BANK3_MASK 0x0000F000L
+#define MMEA3_ADDRDEC0_ADDR_SEL_CS01__BANK4_MASK 0x001F0000L
+#define MMEA3_ADDRDEC0_ADDR_SEL_CS01__ROW_LO_MASK 0x0F000000L
+#define MMEA3_ADDRDEC0_ADDR_SEL_CS01__ROW_HI_MASK 0xF0000000L
+//MMEA3_ADDRDEC0_ADDR_SEL_CS23
+#define MMEA3_ADDRDEC0_ADDR_SEL_CS23__BANK0__SHIFT 0x0
+#define MMEA3_ADDRDEC0_ADDR_SEL_CS23__BANK1__SHIFT 0x4
+#define MMEA3_ADDRDEC0_ADDR_SEL_CS23__BANK2__SHIFT 0x8
+#define MMEA3_ADDRDEC0_ADDR_SEL_CS23__BANK3__SHIFT 0xc
+#define MMEA3_ADDRDEC0_ADDR_SEL_CS23__BANK4__SHIFT 0x10
+#define MMEA3_ADDRDEC0_ADDR_SEL_CS23__ROW_LO__SHIFT 0x18
+#define MMEA3_ADDRDEC0_ADDR_SEL_CS23__ROW_HI__SHIFT 0x1c
+#define MMEA3_ADDRDEC0_ADDR_SEL_CS23__BANK0_MASK 0x0000000FL
+#define MMEA3_ADDRDEC0_ADDR_SEL_CS23__BANK1_MASK 0x000000F0L
+#define MMEA3_ADDRDEC0_ADDR_SEL_CS23__BANK2_MASK 0x00000F00L
+#define MMEA3_ADDRDEC0_ADDR_SEL_CS23__BANK3_MASK 0x0000F000L
+#define MMEA3_ADDRDEC0_ADDR_SEL_CS23__BANK4_MASK 0x001F0000L
+#define MMEA3_ADDRDEC0_ADDR_SEL_CS23__ROW_LO_MASK 0x0F000000L
+#define MMEA3_ADDRDEC0_ADDR_SEL_CS23__ROW_HI_MASK 0xF0000000L
+//MMEA3_ADDRDEC0_ADDR_SEL2_CS01
+#define MMEA3_ADDRDEC0_ADDR_SEL2_CS01__BANK5__SHIFT 0x0
+#define MMEA3_ADDRDEC0_ADDR_SEL2_CS01__BANK5_MASK 0x0000001FL
+//MMEA3_ADDRDEC0_ADDR_SEL2_CS23
+#define MMEA3_ADDRDEC0_ADDR_SEL2_CS23__BANK5__SHIFT 0x0
+#define MMEA3_ADDRDEC0_ADDR_SEL2_CS23__BANK5_MASK 0x0000001FL
+//MMEA3_ADDRDEC0_COL_SEL_LO_CS01
+#define MMEA3_ADDRDEC0_COL_SEL_LO_CS01__COL0__SHIFT 0x0
+#define MMEA3_ADDRDEC0_COL_SEL_LO_CS01__COL1__SHIFT 0x4
+#define MMEA3_ADDRDEC0_COL_SEL_LO_CS01__COL2__SHIFT 0x8
+#define MMEA3_ADDRDEC0_COL_SEL_LO_CS01__COL3__SHIFT 0xc
+#define MMEA3_ADDRDEC0_COL_SEL_LO_CS01__COL4__SHIFT 0x10
+#define MMEA3_ADDRDEC0_COL_SEL_LO_CS01__COL5__SHIFT 0x14
+#define MMEA3_ADDRDEC0_COL_SEL_LO_CS01__COL6__SHIFT 0x18
+#define MMEA3_ADDRDEC0_COL_SEL_LO_CS01__COL7__SHIFT 0x1c
+#define MMEA3_ADDRDEC0_COL_SEL_LO_CS01__COL0_MASK 0x0000000FL
+#define MMEA3_ADDRDEC0_COL_SEL_LO_CS01__COL1_MASK 0x000000F0L
+#define MMEA3_ADDRDEC0_COL_SEL_LO_CS01__COL2_MASK 0x00000F00L
+#define MMEA3_ADDRDEC0_COL_SEL_LO_CS01__COL3_MASK 0x0000F000L
+#define MMEA3_ADDRDEC0_COL_SEL_LO_CS01__COL4_MASK 0x000F0000L
+#define MMEA3_ADDRDEC0_COL_SEL_LO_CS01__COL5_MASK 0x00F00000L
+#define MMEA3_ADDRDEC0_COL_SEL_LO_CS01__COL6_MASK 0x0F000000L
+#define MMEA3_ADDRDEC0_COL_SEL_LO_CS01__COL7_MASK 0xF0000000L
+//MMEA3_ADDRDEC0_COL_SEL_LO_CS23
+#define MMEA3_ADDRDEC0_COL_SEL_LO_CS23__COL0__SHIFT 0x0
+#define MMEA3_ADDRDEC0_COL_SEL_LO_CS23__COL1__SHIFT 0x4
+#define MMEA3_ADDRDEC0_COL_SEL_LO_CS23__COL2__SHIFT 0x8
+#define MMEA3_ADDRDEC0_COL_SEL_LO_CS23__COL3__SHIFT 0xc
+#define MMEA3_ADDRDEC0_COL_SEL_LO_CS23__COL4__SHIFT 0x10
+#define MMEA3_ADDRDEC0_COL_SEL_LO_CS23__COL5__SHIFT 0x14
+#define MMEA3_ADDRDEC0_COL_SEL_LO_CS23__COL6__SHIFT 0x18
+#define MMEA3_ADDRDEC0_COL_SEL_LO_CS23__COL7__SHIFT 0x1c
+#define MMEA3_ADDRDEC0_COL_SEL_LO_CS23__COL0_MASK 0x0000000FL
+#define MMEA3_ADDRDEC0_COL_SEL_LO_CS23__COL1_MASK 0x000000F0L
+#define MMEA3_ADDRDEC0_COL_SEL_LO_CS23__COL2_MASK 0x00000F00L
+#define MMEA3_ADDRDEC0_COL_SEL_LO_CS23__COL3_MASK 0x0000F000L
+#define MMEA3_ADDRDEC0_COL_SEL_LO_CS23__COL4_MASK 0x000F0000L
+#define MMEA3_ADDRDEC0_COL_SEL_LO_CS23__COL5_MASK 0x00F00000L
+#define MMEA3_ADDRDEC0_COL_SEL_LO_CS23__COL6_MASK 0x0F000000L
+#define MMEA3_ADDRDEC0_COL_SEL_LO_CS23__COL7_MASK 0xF0000000L
+//MMEA3_ADDRDEC0_COL_SEL_HI_CS01
+#define MMEA3_ADDRDEC0_COL_SEL_HI_CS01__COL8__SHIFT 0x0
+#define MMEA3_ADDRDEC0_COL_SEL_HI_CS01__COL9__SHIFT 0x4
+#define MMEA3_ADDRDEC0_COL_SEL_HI_CS01__COL10__SHIFT 0x8
+#define MMEA3_ADDRDEC0_COL_SEL_HI_CS01__COL11__SHIFT 0xc
+#define MMEA3_ADDRDEC0_COL_SEL_HI_CS01__COL12__SHIFT 0x10
+#define MMEA3_ADDRDEC0_COL_SEL_HI_CS01__COL13__SHIFT 0x14
+#define MMEA3_ADDRDEC0_COL_SEL_HI_CS01__COL14__SHIFT 0x18
+#define MMEA3_ADDRDEC0_COL_SEL_HI_CS01__COL15__SHIFT 0x1c
+#define MMEA3_ADDRDEC0_COL_SEL_HI_CS01__COL8_MASK 0x0000000FL
+#define MMEA3_ADDRDEC0_COL_SEL_HI_CS01__COL9_MASK 0x000000F0L
+#define MMEA3_ADDRDEC0_COL_SEL_HI_CS01__COL10_MASK 0x00000F00L
+#define MMEA3_ADDRDEC0_COL_SEL_HI_CS01__COL11_MASK 0x0000F000L
+#define MMEA3_ADDRDEC0_COL_SEL_HI_CS01__COL12_MASK 0x000F0000L
+#define MMEA3_ADDRDEC0_COL_SEL_HI_CS01__COL13_MASK 0x00F00000L
+#define MMEA3_ADDRDEC0_COL_SEL_HI_CS01__COL14_MASK 0x0F000000L
+#define MMEA3_ADDRDEC0_COL_SEL_HI_CS01__COL15_MASK 0xF0000000L
+//MMEA3_ADDRDEC0_COL_SEL_HI_CS23
+#define MMEA3_ADDRDEC0_COL_SEL_HI_CS23__COL8__SHIFT 0x0
+#define MMEA3_ADDRDEC0_COL_SEL_HI_CS23__COL9__SHIFT 0x4
+#define MMEA3_ADDRDEC0_COL_SEL_HI_CS23__COL10__SHIFT 0x8
+#define MMEA3_ADDRDEC0_COL_SEL_HI_CS23__COL11__SHIFT 0xc
+#define MMEA3_ADDRDEC0_COL_SEL_HI_CS23__COL12__SHIFT 0x10
+#define MMEA3_ADDRDEC0_COL_SEL_HI_CS23__COL13__SHIFT 0x14
+#define MMEA3_ADDRDEC0_COL_SEL_HI_CS23__COL14__SHIFT 0x18
+#define MMEA3_ADDRDEC0_COL_SEL_HI_CS23__COL15__SHIFT 0x1c
+#define MMEA3_ADDRDEC0_COL_SEL_HI_CS23__COL8_MASK 0x0000000FL
+#define MMEA3_ADDRDEC0_COL_SEL_HI_CS23__COL9_MASK 0x000000F0L
+#define MMEA3_ADDRDEC0_COL_SEL_HI_CS23__COL10_MASK 0x00000F00L
+#define MMEA3_ADDRDEC0_COL_SEL_HI_CS23__COL11_MASK 0x0000F000L
+#define MMEA3_ADDRDEC0_COL_SEL_HI_CS23__COL12_MASK 0x000F0000L
+#define MMEA3_ADDRDEC0_COL_SEL_HI_CS23__COL13_MASK 0x00F00000L
+#define MMEA3_ADDRDEC0_COL_SEL_HI_CS23__COL14_MASK 0x0F000000L
+#define MMEA3_ADDRDEC0_COL_SEL_HI_CS23__COL15_MASK 0xF0000000L
+//MMEA3_ADDRDEC0_RM_SEL_CS01
+#define MMEA3_ADDRDEC0_RM_SEL_CS01__RM0__SHIFT 0x0
+#define MMEA3_ADDRDEC0_RM_SEL_CS01__RM1__SHIFT 0x4
+#define MMEA3_ADDRDEC0_RM_SEL_CS01__RM2__SHIFT 0x8
+#define MMEA3_ADDRDEC0_RM_SEL_CS01__CHAN_BIT__SHIFT 0xc
+#define MMEA3_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA3_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA3_ADDRDEC0_RM_SEL_CS01__RM0_MASK 0x0000000FL
+#define MMEA3_ADDRDEC0_RM_SEL_CS01__RM1_MASK 0x000000F0L
+#define MMEA3_ADDRDEC0_RM_SEL_CS01__RM2_MASK 0x00000F00L
+#define MMEA3_ADDRDEC0_RM_SEL_CS01__CHAN_BIT_MASK 0x0000F000L
+#define MMEA3_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA3_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA3_ADDRDEC0_RM_SEL_CS23
+#define MMEA3_ADDRDEC0_RM_SEL_CS23__RM0__SHIFT 0x0
+#define MMEA3_ADDRDEC0_RM_SEL_CS23__RM1__SHIFT 0x4
+#define MMEA3_ADDRDEC0_RM_SEL_CS23__RM2__SHIFT 0x8
+#define MMEA3_ADDRDEC0_RM_SEL_CS23__CHAN_BIT__SHIFT 0xc
+#define MMEA3_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA3_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA3_ADDRDEC0_RM_SEL_CS23__RM0_MASK 0x0000000FL
+#define MMEA3_ADDRDEC0_RM_SEL_CS23__RM1_MASK 0x000000F0L
+#define MMEA3_ADDRDEC0_RM_SEL_CS23__RM2_MASK 0x00000F00L
+#define MMEA3_ADDRDEC0_RM_SEL_CS23__CHAN_BIT_MASK 0x0000F000L
+#define MMEA3_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA3_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA3_ADDRDEC0_RM_SEL_SECCS01
+#define MMEA3_ADDRDEC0_RM_SEL_SECCS01__RM0__SHIFT 0x0
+#define MMEA3_ADDRDEC0_RM_SEL_SECCS01__RM1__SHIFT 0x4
+#define MMEA3_ADDRDEC0_RM_SEL_SECCS01__RM2__SHIFT 0x8
+#define MMEA3_ADDRDEC0_RM_SEL_SECCS01__CHAN_BIT__SHIFT 0xc
+#define MMEA3_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA3_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA3_ADDRDEC0_RM_SEL_SECCS01__RM0_MASK 0x0000000FL
+#define MMEA3_ADDRDEC0_RM_SEL_SECCS01__RM1_MASK 0x000000F0L
+#define MMEA3_ADDRDEC0_RM_SEL_SECCS01__RM2_MASK 0x00000F00L
+#define MMEA3_ADDRDEC0_RM_SEL_SECCS01__CHAN_BIT_MASK 0x0000F000L
+#define MMEA3_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA3_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA3_ADDRDEC0_RM_SEL_SECCS23
+#define MMEA3_ADDRDEC0_RM_SEL_SECCS23__RM0__SHIFT 0x0
+#define MMEA3_ADDRDEC0_RM_SEL_SECCS23__RM1__SHIFT 0x4
+#define MMEA3_ADDRDEC0_RM_SEL_SECCS23__RM2__SHIFT 0x8
+#define MMEA3_ADDRDEC0_RM_SEL_SECCS23__CHAN_BIT__SHIFT 0xc
+#define MMEA3_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA3_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA3_ADDRDEC0_RM_SEL_SECCS23__RM0_MASK 0x0000000FL
+#define MMEA3_ADDRDEC0_RM_SEL_SECCS23__RM1_MASK 0x000000F0L
+#define MMEA3_ADDRDEC0_RM_SEL_SECCS23__RM2_MASK 0x00000F00L
+#define MMEA3_ADDRDEC0_RM_SEL_SECCS23__CHAN_BIT_MASK 0x0000F000L
+#define MMEA3_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA3_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA3_ADDRDEC1_BASE_ADDR_CS0
+#define MMEA3_ADDRDEC1_BASE_ADDR_CS0__CS_EN__SHIFT 0x0
+#define MMEA3_ADDRDEC1_BASE_ADDR_CS0__BASE_ADDR__SHIFT 0x1
+#define MMEA3_ADDRDEC1_BASE_ADDR_CS0__CS_EN_MASK 0x00000001L
+#define MMEA3_ADDRDEC1_BASE_ADDR_CS0__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA3_ADDRDEC1_BASE_ADDR_CS1
+#define MMEA3_ADDRDEC1_BASE_ADDR_CS1__CS_EN__SHIFT 0x0
+#define MMEA3_ADDRDEC1_BASE_ADDR_CS1__BASE_ADDR__SHIFT 0x1
+#define MMEA3_ADDRDEC1_BASE_ADDR_CS1__CS_EN_MASK 0x00000001L
+#define MMEA3_ADDRDEC1_BASE_ADDR_CS1__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA3_ADDRDEC1_BASE_ADDR_CS2
+#define MMEA3_ADDRDEC1_BASE_ADDR_CS2__CS_EN__SHIFT 0x0
+#define MMEA3_ADDRDEC1_BASE_ADDR_CS2__BASE_ADDR__SHIFT 0x1
+#define MMEA3_ADDRDEC1_BASE_ADDR_CS2__CS_EN_MASK 0x00000001L
+#define MMEA3_ADDRDEC1_BASE_ADDR_CS2__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA3_ADDRDEC1_BASE_ADDR_CS3
+#define MMEA3_ADDRDEC1_BASE_ADDR_CS3__CS_EN__SHIFT 0x0
+#define MMEA3_ADDRDEC1_BASE_ADDR_CS3__BASE_ADDR__SHIFT 0x1
+#define MMEA3_ADDRDEC1_BASE_ADDR_CS3__CS_EN_MASK 0x00000001L
+#define MMEA3_ADDRDEC1_BASE_ADDR_CS3__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA3_ADDRDEC1_BASE_ADDR_SECCS0
+#define MMEA3_ADDRDEC1_BASE_ADDR_SECCS0__CS_EN__SHIFT 0x0
+#define MMEA3_ADDRDEC1_BASE_ADDR_SECCS0__BASE_ADDR__SHIFT 0x1
+#define MMEA3_ADDRDEC1_BASE_ADDR_SECCS0__CS_EN_MASK 0x00000001L
+#define MMEA3_ADDRDEC1_BASE_ADDR_SECCS0__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA3_ADDRDEC1_BASE_ADDR_SECCS1
+#define MMEA3_ADDRDEC1_BASE_ADDR_SECCS1__CS_EN__SHIFT 0x0
+#define MMEA3_ADDRDEC1_BASE_ADDR_SECCS1__BASE_ADDR__SHIFT 0x1
+#define MMEA3_ADDRDEC1_BASE_ADDR_SECCS1__CS_EN_MASK 0x00000001L
+#define MMEA3_ADDRDEC1_BASE_ADDR_SECCS1__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA3_ADDRDEC1_BASE_ADDR_SECCS2
+#define MMEA3_ADDRDEC1_BASE_ADDR_SECCS2__CS_EN__SHIFT 0x0
+#define MMEA3_ADDRDEC1_BASE_ADDR_SECCS2__BASE_ADDR__SHIFT 0x1
+#define MMEA3_ADDRDEC1_BASE_ADDR_SECCS2__CS_EN_MASK 0x00000001L
+#define MMEA3_ADDRDEC1_BASE_ADDR_SECCS2__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA3_ADDRDEC1_BASE_ADDR_SECCS3
+#define MMEA3_ADDRDEC1_BASE_ADDR_SECCS3__CS_EN__SHIFT 0x0
+#define MMEA3_ADDRDEC1_BASE_ADDR_SECCS3__BASE_ADDR__SHIFT 0x1
+#define MMEA3_ADDRDEC1_BASE_ADDR_SECCS3__CS_EN_MASK 0x00000001L
+#define MMEA3_ADDRDEC1_BASE_ADDR_SECCS3__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA3_ADDRDEC1_ADDR_MASK_CS01
+#define MMEA3_ADDRDEC1_ADDR_MASK_CS01__ADDR_MASK__SHIFT 0x1
+#define MMEA3_ADDRDEC1_ADDR_MASK_CS01__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA3_ADDRDEC1_ADDR_MASK_CS23
+#define MMEA3_ADDRDEC1_ADDR_MASK_CS23__ADDR_MASK__SHIFT 0x1
+#define MMEA3_ADDRDEC1_ADDR_MASK_CS23__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA3_ADDRDEC1_ADDR_MASK_SECCS01
+#define MMEA3_ADDRDEC1_ADDR_MASK_SECCS01__ADDR_MASK__SHIFT 0x1
+#define MMEA3_ADDRDEC1_ADDR_MASK_SECCS01__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA3_ADDRDEC1_ADDR_MASK_SECCS23
+#define MMEA3_ADDRDEC1_ADDR_MASK_SECCS23__ADDR_MASK__SHIFT 0x1
+#define MMEA3_ADDRDEC1_ADDR_MASK_SECCS23__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA3_ADDRDEC1_ADDR_CFG_CS01
+#define MMEA3_ADDRDEC1_ADDR_CFG_CS01__NUM_BANK_GROUPS__SHIFT 0x1
+#define MMEA3_ADDRDEC1_ADDR_CFG_CS01__NUM_RM__SHIFT 0x4
+#define MMEA3_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_LO__SHIFT 0x8
+#define MMEA3_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_HI__SHIFT 0xc
+#define MMEA3_ADDRDEC1_ADDR_CFG_CS01__NUM_COL__SHIFT 0x10
+#define MMEA3_ADDRDEC1_ADDR_CFG_CS01__NUM_BANKS__SHIFT 0x14
+#define MMEA3_ADDRDEC1_ADDR_CFG_CS01__HI_COL_EN__SHIFT 0x1f
+#define MMEA3_ADDRDEC1_ADDR_CFG_CS01__NUM_BANK_GROUPS_MASK 0x0000000EL
+#define MMEA3_ADDRDEC1_ADDR_CFG_CS01__NUM_RM_MASK 0x00000030L
+#define MMEA3_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_LO_MASK 0x00000F00L
+#define MMEA3_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_HI_MASK 0x0000F000L
+#define MMEA3_ADDRDEC1_ADDR_CFG_CS01__NUM_COL_MASK 0x000F0000L
+#define MMEA3_ADDRDEC1_ADDR_CFG_CS01__NUM_BANKS_MASK 0x00300000L
+#define MMEA3_ADDRDEC1_ADDR_CFG_CS01__HI_COL_EN_MASK 0x80000000L
+//MMEA3_ADDRDEC1_ADDR_CFG_CS23
+#define MMEA3_ADDRDEC1_ADDR_CFG_CS23__NUM_BANK_GROUPS__SHIFT 0x1
+#define MMEA3_ADDRDEC1_ADDR_CFG_CS23__NUM_RM__SHIFT 0x4
+#define MMEA3_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_LO__SHIFT 0x8
+#define MMEA3_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_HI__SHIFT 0xc
+#define MMEA3_ADDRDEC1_ADDR_CFG_CS23__NUM_COL__SHIFT 0x10
+#define MMEA3_ADDRDEC1_ADDR_CFG_CS23__NUM_BANKS__SHIFT 0x14
+#define MMEA3_ADDRDEC1_ADDR_CFG_CS23__HI_COL_EN__SHIFT 0x1f
+#define MMEA3_ADDRDEC1_ADDR_CFG_CS23__NUM_BANK_GROUPS_MASK 0x0000000EL
+#define MMEA3_ADDRDEC1_ADDR_CFG_CS23__NUM_RM_MASK 0x00000030L
+#define MMEA3_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_LO_MASK 0x00000F00L
+#define MMEA3_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_HI_MASK 0x0000F000L
+#define MMEA3_ADDRDEC1_ADDR_CFG_CS23__NUM_COL_MASK 0x000F0000L
+#define MMEA3_ADDRDEC1_ADDR_CFG_CS23__NUM_BANKS_MASK 0x00300000L
+#define MMEA3_ADDRDEC1_ADDR_CFG_CS23__HI_COL_EN_MASK 0x80000000L
+//MMEA3_ADDRDEC1_ADDR_SEL_CS01
+#define MMEA3_ADDRDEC1_ADDR_SEL_CS01__BANK0__SHIFT 0x0
+#define MMEA3_ADDRDEC1_ADDR_SEL_CS01__BANK1__SHIFT 0x4
+#define MMEA3_ADDRDEC1_ADDR_SEL_CS01__BANK2__SHIFT 0x8
+#define MMEA3_ADDRDEC1_ADDR_SEL_CS01__BANK3__SHIFT 0xc
+#define MMEA3_ADDRDEC1_ADDR_SEL_CS01__BANK4__SHIFT 0x10
+#define MMEA3_ADDRDEC1_ADDR_SEL_CS01__ROW_LO__SHIFT 0x18
+#define MMEA3_ADDRDEC1_ADDR_SEL_CS01__ROW_HI__SHIFT 0x1c
+#define MMEA3_ADDRDEC1_ADDR_SEL_CS01__BANK0_MASK 0x0000000FL
+#define MMEA3_ADDRDEC1_ADDR_SEL_CS01__BANK1_MASK 0x000000F0L
+#define MMEA3_ADDRDEC1_ADDR_SEL_CS01__BANK2_MASK 0x00000F00L
+#define MMEA3_ADDRDEC1_ADDR_SEL_CS01__BANK3_MASK 0x0000F000L
+#define MMEA3_ADDRDEC1_ADDR_SEL_CS01__BANK4_MASK 0x001F0000L
+#define MMEA3_ADDRDEC1_ADDR_SEL_CS01__ROW_LO_MASK 0x0F000000L
+#define MMEA3_ADDRDEC1_ADDR_SEL_CS01__ROW_HI_MASK 0xF0000000L
+//MMEA3_ADDRDEC1_ADDR_SEL_CS23
+#define MMEA3_ADDRDEC1_ADDR_SEL_CS23__BANK0__SHIFT 0x0
+#define MMEA3_ADDRDEC1_ADDR_SEL_CS23__BANK1__SHIFT 0x4
+#define MMEA3_ADDRDEC1_ADDR_SEL_CS23__BANK2__SHIFT 0x8
+#define MMEA3_ADDRDEC1_ADDR_SEL_CS23__BANK3__SHIFT 0xc
+#define MMEA3_ADDRDEC1_ADDR_SEL_CS23__BANK4__SHIFT 0x10
+#define MMEA3_ADDRDEC1_ADDR_SEL_CS23__ROW_LO__SHIFT 0x18
+#define MMEA3_ADDRDEC1_ADDR_SEL_CS23__ROW_HI__SHIFT 0x1c
+#define MMEA3_ADDRDEC1_ADDR_SEL_CS23__BANK0_MASK 0x0000000FL
+#define MMEA3_ADDRDEC1_ADDR_SEL_CS23__BANK1_MASK 0x000000F0L
+#define MMEA3_ADDRDEC1_ADDR_SEL_CS23__BANK2_MASK 0x00000F00L
+#define MMEA3_ADDRDEC1_ADDR_SEL_CS23__BANK3_MASK 0x0000F000L
+#define MMEA3_ADDRDEC1_ADDR_SEL_CS23__BANK4_MASK 0x001F0000L
+#define MMEA3_ADDRDEC1_ADDR_SEL_CS23__ROW_LO_MASK 0x0F000000L
+#define MMEA3_ADDRDEC1_ADDR_SEL_CS23__ROW_HI_MASK 0xF0000000L
+//MMEA3_ADDRDEC1_ADDR_SEL2_CS01
+#define MMEA3_ADDRDEC1_ADDR_SEL2_CS01__BANK5__SHIFT 0x0
+#define MMEA3_ADDRDEC1_ADDR_SEL2_CS01__BANK5_MASK 0x0000001FL
+//MMEA3_ADDRDEC1_ADDR_SEL2_CS23
+#define MMEA3_ADDRDEC1_ADDR_SEL2_CS23__BANK5__SHIFT 0x0
+#define MMEA3_ADDRDEC1_ADDR_SEL2_CS23__BANK5_MASK 0x0000001FL
+//MMEA3_ADDRDEC1_COL_SEL_LO_CS01
+#define MMEA3_ADDRDEC1_COL_SEL_LO_CS01__COL0__SHIFT 0x0
+#define MMEA3_ADDRDEC1_COL_SEL_LO_CS01__COL1__SHIFT 0x4
+#define MMEA3_ADDRDEC1_COL_SEL_LO_CS01__COL2__SHIFT 0x8
+#define MMEA3_ADDRDEC1_COL_SEL_LO_CS01__COL3__SHIFT 0xc
+#define MMEA3_ADDRDEC1_COL_SEL_LO_CS01__COL4__SHIFT 0x10
+#define MMEA3_ADDRDEC1_COL_SEL_LO_CS01__COL5__SHIFT 0x14
+#define MMEA3_ADDRDEC1_COL_SEL_LO_CS01__COL6__SHIFT 0x18
+#define MMEA3_ADDRDEC1_COL_SEL_LO_CS01__COL7__SHIFT 0x1c
+#define MMEA3_ADDRDEC1_COL_SEL_LO_CS01__COL0_MASK 0x0000000FL
+#define MMEA3_ADDRDEC1_COL_SEL_LO_CS01__COL1_MASK 0x000000F0L
+#define MMEA3_ADDRDEC1_COL_SEL_LO_CS01__COL2_MASK 0x00000F00L
+#define MMEA3_ADDRDEC1_COL_SEL_LO_CS01__COL3_MASK 0x0000F000L
+#define MMEA3_ADDRDEC1_COL_SEL_LO_CS01__COL4_MASK 0x000F0000L
+#define MMEA3_ADDRDEC1_COL_SEL_LO_CS01__COL5_MASK 0x00F00000L
+#define MMEA3_ADDRDEC1_COL_SEL_LO_CS01__COL6_MASK 0x0F000000L
+#define MMEA3_ADDRDEC1_COL_SEL_LO_CS01__COL7_MASK 0xF0000000L
+//MMEA3_ADDRDEC1_COL_SEL_LO_CS23
+#define MMEA3_ADDRDEC1_COL_SEL_LO_CS23__COL0__SHIFT 0x0
+#define MMEA3_ADDRDEC1_COL_SEL_LO_CS23__COL1__SHIFT 0x4
+#define MMEA3_ADDRDEC1_COL_SEL_LO_CS23__COL2__SHIFT 0x8
+#define MMEA3_ADDRDEC1_COL_SEL_LO_CS23__COL3__SHIFT 0xc
+#define MMEA3_ADDRDEC1_COL_SEL_LO_CS23__COL4__SHIFT 0x10
+#define MMEA3_ADDRDEC1_COL_SEL_LO_CS23__COL5__SHIFT 0x14
+#define MMEA3_ADDRDEC1_COL_SEL_LO_CS23__COL6__SHIFT 0x18
+#define MMEA3_ADDRDEC1_COL_SEL_LO_CS23__COL7__SHIFT 0x1c
+#define MMEA3_ADDRDEC1_COL_SEL_LO_CS23__COL0_MASK 0x0000000FL
+#define MMEA3_ADDRDEC1_COL_SEL_LO_CS23__COL1_MASK 0x000000F0L
+#define MMEA3_ADDRDEC1_COL_SEL_LO_CS23__COL2_MASK 0x00000F00L
+#define MMEA3_ADDRDEC1_COL_SEL_LO_CS23__COL3_MASK 0x0000F000L
+#define MMEA3_ADDRDEC1_COL_SEL_LO_CS23__COL4_MASK 0x000F0000L
+#define MMEA3_ADDRDEC1_COL_SEL_LO_CS23__COL5_MASK 0x00F00000L
+#define MMEA3_ADDRDEC1_COL_SEL_LO_CS23__COL6_MASK 0x0F000000L
+#define MMEA3_ADDRDEC1_COL_SEL_LO_CS23__COL7_MASK 0xF0000000L
+//MMEA3_ADDRDEC1_COL_SEL_HI_CS01
+#define MMEA3_ADDRDEC1_COL_SEL_HI_CS01__COL8__SHIFT 0x0
+#define MMEA3_ADDRDEC1_COL_SEL_HI_CS01__COL9__SHIFT 0x4
+#define MMEA3_ADDRDEC1_COL_SEL_HI_CS01__COL10__SHIFT 0x8
+#define MMEA3_ADDRDEC1_COL_SEL_HI_CS01__COL11__SHIFT 0xc
+#define MMEA3_ADDRDEC1_COL_SEL_HI_CS01__COL12__SHIFT 0x10
+#define MMEA3_ADDRDEC1_COL_SEL_HI_CS01__COL13__SHIFT 0x14
+#define MMEA3_ADDRDEC1_COL_SEL_HI_CS01__COL14__SHIFT 0x18
+#define MMEA3_ADDRDEC1_COL_SEL_HI_CS01__COL15__SHIFT 0x1c
+#define MMEA3_ADDRDEC1_COL_SEL_HI_CS01__COL8_MASK 0x0000000FL
+#define MMEA3_ADDRDEC1_COL_SEL_HI_CS01__COL9_MASK 0x000000F0L
+#define MMEA3_ADDRDEC1_COL_SEL_HI_CS01__COL10_MASK 0x00000F00L
+#define MMEA3_ADDRDEC1_COL_SEL_HI_CS01__COL11_MASK 0x0000F000L
+#define MMEA3_ADDRDEC1_COL_SEL_HI_CS01__COL12_MASK 0x000F0000L
+#define MMEA3_ADDRDEC1_COL_SEL_HI_CS01__COL13_MASK 0x00F00000L
+#define MMEA3_ADDRDEC1_COL_SEL_HI_CS01__COL14_MASK 0x0F000000L
+#define MMEA3_ADDRDEC1_COL_SEL_HI_CS01__COL15_MASK 0xF0000000L
+//MMEA3_ADDRDEC1_COL_SEL_HI_CS23
+#define MMEA3_ADDRDEC1_COL_SEL_HI_CS23__COL8__SHIFT 0x0
+#define MMEA3_ADDRDEC1_COL_SEL_HI_CS23__COL9__SHIFT 0x4
+#define MMEA3_ADDRDEC1_COL_SEL_HI_CS23__COL10__SHIFT 0x8
+#define MMEA3_ADDRDEC1_COL_SEL_HI_CS23__COL11__SHIFT 0xc
+#define MMEA3_ADDRDEC1_COL_SEL_HI_CS23__COL12__SHIFT 0x10
+#define MMEA3_ADDRDEC1_COL_SEL_HI_CS23__COL13__SHIFT 0x14
+#define MMEA3_ADDRDEC1_COL_SEL_HI_CS23__COL14__SHIFT 0x18
+#define MMEA3_ADDRDEC1_COL_SEL_HI_CS23__COL15__SHIFT 0x1c
+#define MMEA3_ADDRDEC1_COL_SEL_HI_CS23__COL8_MASK 0x0000000FL
+#define MMEA3_ADDRDEC1_COL_SEL_HI_CS23__COL9_MASK 0x000000F0L
+#define MMEA3_ADDRDEC1_COL_SEL_HI_CS23__COL10_MASK 0x00000F00L
+#define MMEA3_ADDRDEC1_COL_SEL_HI_CS23__COL11_MASK 0x0000F000L
+#define MMEA3_ADDRDEC1_COL_SEL_HI_CS23__COL12_MASK 0x000F0000L
+#define MMEA3_ADDRDEC1_COL_SEL_HI_CS23__COL13_MASK 0x00F00000L
+#define MMEA3_ADDRDEC1_COL_SEL_HI_CS23__COL14_MASK 0x0F000000L
+#define MMEA3_ADDRDEC1_COL_SEL_HI_CS23__COL15_MASK 0xF0000000L
+//MMEA3_ADDRDEC1_RM_SEL_CS01
+#define MMEA3_ADDRDEC1_RM_SEL_CS01__RM0__SHIFT 0x0
+#define MMEA3_ADDRDEC1_RM_SEL_CS01__RM1__SHIFT 0x4
+#define MMEA3_ADDRDEC1_RM_SEL_CS01__RM2__SHIFT 0x8
+#define MMEA3_ADDRDEC1_RM_SEL_CS01__CHAN_BIT__SHIFT 0xc
+#define MMEA3_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA3_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA3_ADDRDEC1_RM_SEL_CS01__RM0_MASK 0x0000000FL
+#define MMEA3_ADDRDEC1_RM_SEL_CS01__RM1_MASK 0x000000F0L
+#define MMEA3_ADDRDEC1_RM_SEL_CS01__RM2_MASK 0x00000F00L
+#define MMEA3_ADDRDEC1_RM_SEL_CS01__CHAN_BIT_MASK 0x0000F000L
+#define MMEA3_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA3_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA3_ADDRDEC1_RM_SEL_CS23
+#define MMEA3_ADDRDEC1_RM_SEL_CS23__RM0__SHIFT 0x0
+#define MMEA3_ADDRDEC1_RM_SEL_CS23__RM1__SHIFT 0x4
+#define MMEA3_ADDRDEC1_RM_SEL_CS23__RM2__SHIFT 0x8
+#define MMEA3_ADDRDEC1_RM_SEL_CS23__CHAN_BIT__SHIFT 0xc
+#define MMEA3_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA3_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA3_ADDRDEC1_RM_SEL_CS23__RM0_MASK 0x0000000FL
+#define MMEA3_ADDRDEC1_RM_SEL_CS23__RM1_MASK 0x000000F0L
+#define MMEA3_ADDRDEC1_RM_SEL_CS23__RM2_MASK 0x00000F00L
+#define MMEA3_ADDRDEC1_RM_SEL_CS23__CHAN_BIT_MASK 0x0000F000L
+#define MMEA3_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA3_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA3_ADDRDEC1_RM_SEL_SECCS01
+#define MMEA3_ADDRDEC1_RM_SEL_SECCS01__RM0__SHIFT 0x0
+#define MMEA3_ADDRDEC1_RM_SEL_SECCS01__RM1__SHIFT 0x4
+#define MMEA3_ADDRDEC1_RM_SEL_SECCS01__RM2__SHIFT 0x8
+#define MMEA3_ADDRDEC1_RM_SEL_SECCS01__CHAN_BIT__SHIFT 0xc
+#define MMEA3_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA3_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA3_ADDRDEC1_RM_SEL_SECCS01__RM0_MASK 0x0000000FL
+#define MMEA3_ADDRDEC1_RM_SEL_SECCS01__RM1_MASK 0x000000F0L
+#define MMEA3_ADDRDEC1_RM_SEL_SECCS01__RM2_MASK 0x00000F00L
+#define MMEA3_ADDRDEC1_RM_SEL_SECCS01__CHAN_BIT_MASK 0x0000F000L
+#define MMEA3_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA3_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA3_ADDRDEC1_RM_SEL_SECCS23
+#define MMEA3_ADDRDEC1_RM_SEL_SECCS23__RM0__SHIFT 0x0
+#define MMEA3_ADDRDEC1_RM_SEL_SECCS23__RM1__SHIFT 0x4
+#define MMEA3_ADDRDEC1_RM_SEL_SECCS23__RM2__SHIFT 0x8
+#define MMEA3_ADDRDEC1_RM_SEL_SECCS23__CHAN_BIT__SHIFT 0xc
+#define MMEA3_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA3_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA3_ADDRDEC1_RM_SEL_SECCS23__RM0_MASK 0x0000000FL
+#define MMEA3_ADDRDEC1_RM_SEL_SECCS23__RM1_MASK 0x000000F0L
+#define MMEA3_ADDRDEC1_RM_SEL_SECCS23__RM2_MASK 0x00000F00L
+#define MMEA3_ADDRDEC1_RM_SEL_SECCS23__CHAN_BIT_MASK 0x0000F000L
+#define MMEA3_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA3_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA3_ADDRDEC2_BASE_ADDR_CS0
+#define MMEA3_ADDRDEC2_BASE_ADDR_CS0__CS_EN__SHIFT 0x0
+#define MMEA3_ADDRDEC2_BASE_ADDR_CS0__BASE_ADDR__SHIFT 0x1
+#define MMEA3_ADDRDEC2_BASE_ADDR_CS0__CS_EN_MASK 0x00000001L
+#define MMEA3_ADDRDEC2_BASE_ADDR_CS0__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA3_ADDRDEC2_BASE_ADDR_CS1
+#define MMEA3_ADDRDEC2_BASE_ADDR_CS1__CS_EN__SHIFT 0x0
+#define MMEA3_ADDRDEC2_BASE_ADDR_CS1__BASE_ADDR__SHIFT 0x1
+#define MMEA3_ADDRDEC2_BASE_ADDR_CS1__CS_EN_MASK 0x00000001L
+#define MMEA3_ADDRDEC2_BASE_ADDR_CS1__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA3_ADDRDEC2_BASE_ADDR_CS2
+#define MMEA3_ADDRDEC2_BASE_ADDR_CS2__CS_EN__SHIFT 0x0
+#define MMEA3_ADDRDEC2_BASE_ADDR_CS2__BASE_ADDR__SHIFT 0x1
+#define MMEA3_ADDRDEC2_BASE_ADDR_CS2__CS_EN_MASK 0x00000001L
+#define MMEA3_ADDRDEC2_BASE_ADDR_CS2__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA3_ADDRDEC2_BASE_ADDR_CS3
+#define MMEA3_ADDRDEC2_BASE_ADDR_CS3__CS_EN__SHIFT 0x0
+#define MMEA3_ADDRDEC2_BASE_ADDR_CS3__BASE_ADDR__SHIFT 0x1
+#define MMEA3_ADDRDEC2_BASE_ADDR_CS3__CS_EN_MASK 0x00000001L
+#define MMEA3_ADDRDEC2_BASE_ADDR_CS3__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA3_ADDRDEC2_BASE_ADDR_SECCS0
+#define MMEA3_ADDRDEC2_BASE_ADDR_SECCS0__CS_EN__SHIFT 0x0
+#define MMEA3_ADDRDEC2_BASE_ADDR_SECCS0__BASE_ADDR__SHIFT 0x1
+#define MMEA3_ADDRDEC2_BASE_ADDR_SECCS0__CS_EN_MASK 0x00000001L
+#define MMEA3_ADDRDEC2_BASE_ADDR_SECCS0__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA3_ADDRDEC2_BASE_ADDR_SECCS1
+#define MMEA3_ADDRDEC2_BASE_ADDR_SECCS1__CS_EN__SHIFT 0x0
+#define MMEA3_ADDRDEC2_BASE_ADDR_SECCS1__BASE_ADDR__SHIFT 0x1
+#define MMEA3_ADDRDEC2_BASE_ADDR_SECCS1__CS_EN_MASK 0x00000001L
+#define MMEA3_ADDRDEC2_BASE_ADDR_SECCS1__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA3_ADDRDEC2_BASE_ADDR_SECCS2
+#define MMEA3_ADDRDEC2_BASE_ADDR_SECCS2__CS_EN__SHIFT 0x0
+#define MMEA3_ADDRDEC2_BASE_ADDR_SECCS2__BASE_ADDR__SHIFT 0x1
+#define MMEA3_ADDRDEC2_BASE_ADDR_SECCS2__CS_EN_MASK 0x00000001L
+#define MMEA3_ADDRDEC2_BASE_ADDR_SECCS2__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA3_ADDRDEC2_BASE_ADDR_SECCS3
+#define MMEA3_ADDRDEC2_BASE_ADDR_SECCS3__CS_EN__SHIFT 0x0
+#define MMEA3_ADDRDEC2_BASE_ADDR_SECCS3__BASE_ADDR__SHIFT 0x1
+#define MMEA3_ADDRDEC2_BASE_ADDR_SECCS3__CS_EN_MASK 0x00000001L
+#define MMEA3_ADDRDEC2_BASE_ADDR_SECCS3__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA3_ADDRDEC2_ADDR_MASK_CS01
+#define MMEA3_ADDRDEC2_ADDR_MASK_CS01__ADDR_MASK__SHIFT 0x1
+#define MMEA3_ADDRDEC2_ADDR_MASK_CS01__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA3_ADDRDEC2_ADDR_MASK_CS23
+#define MMEA3_ADDRDEC2_ADDR_MASK_CS23__ADDR_MASK__SHIFT 0x1
+#define MMEA3_ADDRDEC2_ADDR_MASK_CS23__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA3_ADDRDEC2_ADDR_MASK_SECCS01
+#define MMEA3_ADDRDEC2_ADDR_MASK_SECCS01__ADDR_MASK__SHIFT 0x1
+#define MMEA3_ADDRDEC2_ADDR_MASK_SECCS01__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA3_ADDRDEC2_ADDR_MASK_SECCS23
+#define MMEA3_ADDRDEC2_ADDR_MASK_SECCS23__ADDR_MASK__SHIFT 0x1
+#define MMEA3_ADDRDEC2_ADDR_MASK_SECCS23__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA3_ADDRDEC2_ADDR_CFG_CS01
+#define MMEA3_ADDRDEC2_ADDR_CFG_CS01__NUM_BANK_GROUPS__SHIFT 0x1
+#define MMEA3_ADDRDEC2_ADDR_CFG_CS01__NUM_RM__SHIFT 0x4
+#define MMEA3_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_LO__SHIFT 0x8
+#define MMEA3_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_HI__SHIFT 0xc
+#define MMEA3_ADDRDEC2_ADDR_CFG_CS01__NUM_COL__SHIFT 0x10
+#define MMEA3_ADDRDEC2_ADDR_CFG_CS01__NUM_BANKS__SHIFT 0x14
+#define MMEA3_ADDRDEC2_ADDR_CFG_CS01__HI_COL_EN__SHIFT 0x1f
+#define MMEA3_ADDRDEC2_ADDR_CFG_CS01__NUM_BANK_GROUPS_MASK 0x0000000EL
+#define MMEA3_ADDRDEC2_ADDR_CFG_CS01__NUM_RM_MASK 0x00000030L
+#define MMEA3_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_LO_MASK 0x00000F00L
+#define MMEA3_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_HI_MASK 0x0000F000L
+#define MMEA3_ADDRDEC2_ADDR_CFG_CS01__NUM_COL_MASK 0x000F0000L
+#define MMEA3_ADDRDEC2_ADDR_CFG_CS01__NUM_BANKS_MASK 0x00300000L
+#define MMEA3_ADDRDEC2_ADDR_CFG_CS01__HI_COL_EN_MASK 0x80000000L
+//MMEA3_ADDRDEC2_ADDR_CFG_CS23
+#define MMEA3_ADDRDEC2_ADDR_CFG_CS23__NUM_BANK_GROUPS__SHIFT 0x1
+#define MMEA3_ADDRDEC2_ADDR_CFG_CS23__NUM_RM__SHIFT 0x4
+#define MMEA3_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_LO__SHIFT 0x8
+#define MMEA3_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_HI__SHIFT 0xc
+#define MMEA3_ADDRDEC2_ADDR_CFG_CS23__NUM_COL__SHIFT 0x10
+#define MMEA3_ADDRDEC2_ADDR_CFG_CS23__NUM_BANKS__SHIFT 0x14
+#define MMEA3_ADDRDEC2_ADDR_CFG_CS23__HI_COL_EN__SHIFT 0x1f
+#define MMEA3_ADDRDEC2_ADDR_CFG_CS23__NUM_BANK_GROUPS_MASK 0x0000000EL
+#define MMEA3_ADDRDEC2_ADDR_CFG_CS23__NUM_RM_MASK 0x00000030L
+#define MMEA3_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_LO_MASK 0x00000F00L
+#define MMEA3_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_HI_MASK 0x0000F000L
+#define MMEA3_ADDRDEC2_ADDR_CFG_CS23__NUM_COL_MASK 0x000F0000L
+#define MMEA3_ADDRDEC2_ADDR_CFG_CS23__NUM_BANKS_MASK 0x00300000L
+#define MMEA3_ADDRDEC2_ADDR_CFG_CS23__HI_COL_EN_MASK 0x80000000L
+//MMEA3_ADDRDEC2_ADDR_SEL_CS01
+#define MMEA3_ADDRDEC2_ADDR_SEL_CS01__BANK0__SHIFT 0x0
+#define MMEA3_ADDRDEC2_ADDR_SEL_CS01__BANK1__SHIFT 0x4
+#define MMEA3_ADDRDEC2_ADDR_SEL_CS01__BANK2__SHIFT 0x8
+#define MMEA3_ADDRDEC2_ADDR_SEL_CS01__BANK3__SHIFT 0xc
+#define MMEA3_ADDRDEC2_ADDR_SEL_CS01__BANK4__SHIFT 0x10
+#define MMEA3_ADDRDEC2_ADDR_SEL_CS01__ROW_LO__SHIFT 0x18
+#define MMEA3_ADDRDEC2_ADDR_SEL_CS01__ROW_HI__SHIFT 0x1c
+#define MMEA3_ADDRDEC2_ADDR_SEL_CS01__BANK0_MASK 0x0000000FL
+#define MMEA3_ADDRDEC2_ADDR_SEL_CS01__BANK1_MASK 0x000000F0L
+#define MMEA3_ADDRDEC2_ADDR_SEL_CS01__BANK2_MASK 0x00000F00L
+#define MMEA3_ADDRDEC2_ADDR_SEL_CS01__BANK3_MASK 0x0000F000L
+#define MMEA3_ADDRDEC2_ADDR_SEL_CS01__BANK4_MASK 0x001F0000L
+#define MMEA3_ADDRDEC2_ADDR_SEL_CS01__ROW_LO_MASK 0x0F000000L
+#define MMEA3_ADDRDEC2_ADDR_SEL_CS01__ROW_HI_MASK 0xF0000000L
+//MMEA3_ADDRDEC2_ADDR_SEL_CS23
+#define MMEA3_ADDRDEC2_ADDR_SEL_CS23__BANK0__SHIFT 0x0
+#define MMEA3_ADDRDEC2_ADDR_SEL_CS23__BANK1__SHIFT 0x4
+#define MMEA3_ADDRDEC2_ADDR_SEL_CS23__BANK2__SHIFT 0x8
+#define MMEA3_ADDRDEC2_ADDR_SEL_CS23__BANK3__SHIFT 0xc
+#define MMEA3_ADDRDEC2_ADDR_SEL_CS23__BANK4__SHIFT 0x10
+#define MMEA3_ADDRDEC2_ADDR_SEL_CS23__ROW_LO__SHIFT 0x18
+#define MMEA3_ADDRDEC2_ADDR_SEL_CS23__ROW_HI__SHIFT 0x1c
+#define MMEA3_ADDRDEC2_ADDR_SEL_CS23__BANK0_MASK 0x0000000FL
+#define MMEA3_ADDRDEC2_ADDR_SEL_CS23__BANK1_MASK 0x000000F0L
+#define MMEA3_ADDRDEC2_ADDR_SEL_CS23__BANK2_MASK 0x00000F00L
+#define MMEA3_ADDRDEC2_ADDR_SEL_CS23__BANK3_MASK 0x0000F000L
+#define MMEA3_ADDRDEC2_ADDR_SEL_CS23__BANK4_MASK 0x001F0000L
+#define MMEA3_ADDRDEC2_ADDR_SEL_CS23__ROW_LO_MASK 0x0F000000L
+#define MMEA3_ADDRDEC2_ADDR_SEL_CS23__ROW_HI_MASK 0xF0000000L
+//MMEA3_ADDRDEC2_ADDR_SEL2_CS01
+#define MMEA3_ADDRDEC2_ADDR_SEL2_CS01__BANK5__SHIFT 0x0
+#define MMEA3_ADDRDEC2_ADDR_SEL2_CS01__BANK5_MASK 0x0000001FL
+//MMEA3_ADDRDEC2_ADDR_SEL2_CS23
+#define MMEA3_ADDRDEC2_ADDR_SEL2_CS23__BANK5__SHIFT 0x0
+#define MMEA3_ADDRDEC2_ADDR_SEL2_CS23__BANK5_MASK 0x0000001FL
+//MMEA3_ADDRDEC2_COL_SEL_LO_CS01
+#define MMEA3_ADDRDEC2_COL_SEL_LO_CS01__COL0__SHIFT 0x0
+#define MMEA3_ADDRDEC2_COL_SEL_LO_CS01__COL1__SHIFT 0x4
+#define MMEA3_ADDRDEC2_COL_SEL_LO_CS01__COL2__SHIFT 0x8
+#define MMEA3_ADDRDEC2_COL_SEL_LO_CS01__COL3__SHIFT 0xc
+#define MMEA3_ADDRDEC2_COL_SEL_LO_CS01__COL4__SHIFT 0x10
+#define MMEA3_ADDRDEC2_COL_SEL_LO_CS01__COL5__SHIFT 0x14
+#define MMEA3_ADDRDEC2_COL_SEL_LO_CS01__COL6__SHIFT 0x18
+#define MMEA3_ADDRDEC2_COL_SEL_LO_CS01__COL7__SHIFT 0x1c
+#define MMEA3_ADDRDEC2_COL_SEL_LO_CS01__COL0_MASK 0x0000000FL
+#define MMEA3_ADDRDEC2_COL_SEL_LO_CS01__COL1_MASK 0x000000F0L
+#define MMEA3_ADDRDEC2_COL_SEL_LO_CS01__COL2_MASK 0x00000F00L
+#define MMEA3_ADDRDEC2_COL_SEL_LO_CS01__COL3_MASK 0x0000F000L
+#define MMEA3_ADDRDEC2_COL_SEL_LO_CS01__COL4_MASK 0x000F0000L
+#define MMEA3_ADDRDEC2_COL_SEL_LO_CS01__COL5_MASK 0x00F00000L
+#define MMEA3_ADDRDEC2_COL_SEL_LO_CS01__COL6_MASK 0x0F000000L
+#define MMEA3_ADDRDEC2_COL_SEL_LO_CS01__COL7_MASK 0xF0000000L
+//MMEA3_ADDRDEC2_COL_SEL_LO_CS23
+#define MMEA3_ADDRDEC2_COL_SEL_LO_CS23__COL0__SHIFT 0x0
+#define MMEA3_ADDRDEC2_COL_SEL_LO_CS23__COL1__SHIFT 0x4
+#define MMEA3_ADDRDEC2_COL_SEL_LO_CS23__COL2__SHIFT 0x8
+#define MMEA3_ADDRDEC2_COL_SEL_LO_CS23__COL3__SHIFT 0xc
+#define MMEA3_ADDRDEC2_COL_SEL_LO_CS23__COL4__SHIFT 0x10
+#define MMEA3_ADDRDEC2_COL_SEL_LO_CS23__COL5__SHIFT 0x14
+#define MMEA3_ADDRDEC2_COL_SEL_LO_CS23__COL6__SHIFT 0x18
+#define MMEA3_ADDRDEC2_COL_SEL_LO_CS23__COL7__SHIFT 0x1c
+#define MMEA3_ADDRDEC2_COL_SEL_LO_CS23__COL0_MASK 0x0000000FL
+#define MMEA3_ADDRDEC2_COL_SEL_LO_CS23__COL1_MASK 0x000000F0L
+#define MMEA3_ADDRDEC2_COL_SEL_LO_CS23__COL2_MASK 0x00000F00L
+#define MMEA3_ADDRDEC2_COL_SEL_LO_CS23__COL3_MASK 0x0000F000L
+#define MMEA3_ADDRDEC2_COL_SEL_LO_CS23__COL4_MASK 0x000F0000L
+#define MMEA3_ADDRDEC2_COL_SEL_LO_CS23__COL5_MASK 0x00F00000L
+#define MMEA3_ADDRDEC2_COL_SEL_LO_CS23__COL6_MASK 0x0F000000L
+#define MMEA3_ADDRDEC2_COL_SEL_LO_CS23__COL7_MASK 0xF0000000L
+//MMEA3_ADDRDEC2_COL_SEL_HI_CS01
+#define MMEA3_ADDRDEC2_COL_SEL_HI_CS01__COL8__SHIFT 0x0
+#define MMEA3_ADDRDEC2_COL_SEL_HI_CS01__COL9__SHIFT 0x4
+#define MMEA3_ADDRDEC2_COL_SEL_HI_CS01__COL10__SHIFT 0x8
+#define MMEA3_ADDRDEC2_COL_SEL_HI_CS01__COL11__SHIFT 0xc
+#define MMEA3_ADDRDEC2_COL_SEL_HI_CS01__COL12__SHIFT 0x10
+#define MMEA3_ADDRDEC2_COL_SEL_HI_CS01__COL13__SHIFT 0x14
+#define MMEA3_ADDRDEC2_COL_SEL_HI_CS01__COL14__SHIFT 0x18
+#define MMEA3_ADDRDEC2_COL_SEL_HI_CS01__COL15__SHIFT 0x1c
+#define MMEA3_ADDRDEC2_COL_SEL_HI_CS01__COL8_MASK 0x0000000FL
+#define MMEA3_ADDRDEC2_COL_SEL_HI_CS01__COL9_MASK 0x000000F0L
+#define MMEA3_ADDRDEC2_COL_SEL_HI_CS01__COL10_MASK 0x00000F00L
+#define MMEA3_ADDRDEC2_COL_SEL_HI_CS01__COL11_MASK 0x0000F000L
+#define MMEA3_ADDRDEC2_COL_SEL_HI_CS01__COL12_MASK 0x000F0000L
+#define MMEA3_ADDRDEC2_COL_SEL_HI_CS01__COL13_MASK 0x00F00000L
+#define MMEA3_ADDRDEC2_COL_SEL_HI_CS01__COL14_MASK 0x0F000000L
+#define MMEA3_ADDRDEC2_COL_SEL_HI_CS01__COL15_MASK 0xF0000000L
+//MMEA3_ADDRDEC2_COL_SEL_HI_CS23
+#define MMEA3_ADDRDEC2_COL_SEL_HI_CS23__COL8__SHIFT 0x0
+#define MMEA3_ADDRDEC2_COL_SEL_HI_CS23__COL9__SHIFT 0x4
+#define MMEA3_ADDRDEC2_COL_SEL_HI_CS23__COL10__SHIFT 0x8
+#define MMEA3_ADDRDEC2_COL_SEL_HI_CS23__COL11__SHIFT 0xc
+#define MMEA3_ADDRDEC2_COL_SEL_HI_CS23__COL12__SHIFT 0x10
+#define MMEA3_ADDRDEC2_COL_SEL_HI_CS23__COL13__SHIFT 0x14
+#define MMEA3_ADDRDEC2_COL_SEL_HI_CS23__COL14__SHIFT 0x18
+#define MMEA3_ADDRDEC2_COL_SEL_HI_CS23__COL15__SHIFT 0x1c
+#define MMEA3_ADDRDEC2_COL_SEL_HI_CS23__COL8_MASK 0x0000000FL
+#define MMEA3_ADDRDEC2_COL_SEL_HI_CS23__COL9_MASK 0x000000F0L
+#define MMEA3_ADDRDEC2_COL_SEL_HI_CS23__COL10_MASK 0x00000F00L
+#define MMEA3_ADDRDEC2_COL_SEL_HI_CS23__COL11_MASK 0x0000F000L
+#define MMEA3_ADDRDEC2_COL_SEL_HI_CS23__COL12_MASK 0x000F0000L
+#define MMEA3_ADDRDEC2_COL_SEL_HI_CS23__COL13_MASK 0x00F00000L
+#define MMEA3_ADDRDEC2_COL_SEL_HI_CS23__COL14_MASK 0x0F000000L
+#define MMEA3_ADDRDEC2_COL_SEL_HI_CS23__COL15_MASK 0xF0000000L
+//MMEA3_ADDRDEC2_RM_SEL_CS01
+#define MMEA3_ADDRDEC2_RM_SEL_CS01__RM0__SHIFT 0x0
+#define MMEA3_ADDRDEC2_RM_SEL_CS01__RM1__SHIFT 0x4
+#define MMEA3_ADDRDEC2_RM_SEL_CS01__RM2__SHIFT 0x8
+#define MMEA3_ADDRDEC2_RM_SEL_CS01__CHAN_BIT__SHIFT 0xc
+#define MMEA3_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA3_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA3_ADDRDEC2_RM_SEL_CS01__RM0_MASK 0x0000000FL
+#define MMEA3_ADDRDEC2_RM_SEL_CS01__RM1_MASK 0x000000F0L
+#define MMEA3_ADDRDEC2_RM_SEL_CS01__RM2_MASK 0x00000F00L
+#define MMEA3_ADDRDEC2_RM_SEL_CS01__CHAN_BIT_MASK 0x0000F000L
+#define MMEA3_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA3_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA3_ADDRDEC2_RM_SEL_CS23
+#define MMEA3_ADDRDEC2_RM_SEL_CS23__RM0__SHIFT 0x0
+#define MMEA3_ADDRDEC2_RM_SEL_CS23__RM1__SHIFT 0x4
+#define MMEA3_ADDRDEC2_RM_SEL_CS23__RM2__SHIFT 0x8
+#define MMEA3_ADDRDEC2_RM_SEL_CS23__CHAN_BIT__SHIFT 0xc
+#define MMEA3_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA3_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA3_ADDRDEC2_RM_SEL_CS23__RM0_MASK 0x0000000FL
+#define MMEA3_ADDRDEC2_RM_SEL_CS23__RM1_MASK 0x000000F0L
+#define MMEA3_ADDRDEC2_RM_SEL_CS23__RM2_MASK 0x00000F00L
+#define MMEA3_ADDRDEC2_RM_SEL_CS23__CHAN_BIT_MASK 0x0000F000L
+#define MMEA3_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA3_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA3_ADDRDEC2_RM_SEL_SECCS01
+#define MMEA3_ADDRDEC2_RM_SEL_SECCS01__RM0__SHIFT 0x0
+#define MMEA3_ADDRDEC2_RM_SEL_SECCS01__RM1__SHIFT 0x4
+#define MMEA3_ADDRDEC2_RM_SEL_SECCS01__RM2__SHIFT 0x8
+#define MMEA3_ADDRDEC2_RM_SEL_SECCS01__CHAN_BIT__SHIFT 0xc
+#define MMEA3_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA3_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA3_ADDRDEC2_RM_SEL_SECCS01__RM0_MASK 0x0000000FL
+#define MMEA3_ADDRDEC2_RM_SEL_SECCS01__RM1_MASK 0x000000F0L
+#define MMEA3_ADDRDEC2_RM_SEL_SECCS01__RM2_MASK 0x00000F00L
+#define MMEA3_ADDRDEC2_RM_SEL_SECCS01__CHAN_BIT_MASK 0x0000F000L
+#define MMEA3_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA3_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA3_ADDRDEC2_RM_SEL_SECCS23
+#define MMEA3_ADDRDEC2_RM_SEL_SECCS23__RM0__SHIFT 0x0
+#define MMEA3_ADDRDEC2_RM_SEL_SECCS23__RM1__SHIFT 0x4
+#define MMEA3_ADDRDEC2_RM_SEL_SECCS23__RM2__SHIFT 0x8
+#define MMEA3_ADDRDEC2_RM_SEL_SECCS23__CHAN_BIT__SHIFT 0xc
+#define MMEA3_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA3_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA3_ADDRDEC2_RM_SEL_SECCS23__RM0_MASK 0x0000000FL
+#define MMEA3_ADDRDEC2_RM_SEL_SECCS23__RM1_MASK 0x000000F0L
+#define MMEA3_ADDRDEC2_RM_SEL_SECCS23__RM2_MASK 0x00000F00L
+#define MMEA3_ADDRDEC2_RM_SEL_SECCS23__CHAN_BIT_MASK 0x0000F000L
+#define MMEA3_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA3_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA3_ADDRNORMDRAM_GLOBAL_CNTL
+#define MMEA3_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K__SHIFT 0x14
+#define MMEA3_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M__SHIFT 0x15
+#define MMEA3_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G__SHIFT 0x16
+#define MMEA3_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K_MASK 0x00100000L
+#define MMEA3_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M_MASK 0x00200000L
+#define MMEA3_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G_MASK 0x00400000L
+//MMEA3_ADDRNORMGMI_GLOBAL_CNTL
+#define MMEA3_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K__SHIFT 0x14
+#define MMEA3_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M__SHIFT 0x15
+#define MMEA3_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G__SHIFT 0x16
+#define MMEA3_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K_MASK 0x00100000L
+#define MMEA3_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M_MASK 0x00200000L
+#define MMEA3_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G_MASK 0x00400000L
+//MMEA3_IO_RD_CLI2GRP_MAP0
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA3_IO_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA3_IO_RD_CLI2GRP_MAP1
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA3_IO_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA3_IO_WR_CLI2GRP_MAP0
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA3_IO_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA3_IO_WR_CLI2GRP_MAP1
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA3_IO_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA3_IO_RD_COMBINE_FLUSH
+#define MMEA3_IO_RD_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0
+#define MMEA3_IO_RD_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4
+#define MMEA3_IO_RD_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8
+#define MMEA3_IO_RD_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc
+#define MMEA3_IO_RD_COMBINE_FLUSH__FORWARD_COMB_ONLY__SHIFT 0x10
+#define MMEA3_IO_RD_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL
+#define MMEA3_IO_RD_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L
+#define MMEA3_IO_RD_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L
+#define MMEA3_IO_RD_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L
+#define MMEA3_IO_RD_COMBINE_FLUSH__FORWARD_COMB_ONLY_MASK 0x00010000L
+//MMEA3_IO_WR_COMBINE_FLUSH
+#define MMEA3_IO_WR_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0
+#define MMEA3_IO_WR_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4
+#define MMEA3_IO_WR_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8
+#define MMEA3_IO_WR_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc
+#define MMEA3_IO_WR_COMBINE_FLUSH__FORWARD_COMB_ONLY__SHIFT 0x10
+#define MMEA3_IO_WR_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL
+#define MMEA3_IO_WR_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L
+#define MMEA3_IO_WR_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L
+#define MMEA3_IO_WR_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L
+#define MMEA3_IO_WR_COMBINE_FLUSH__FORWARD_COMB_ONLY_MASK 0x00010000L
+//MMEA3_IO_GROUP_BURST
+#define MMEA3_IO_GROUP_BURST__RD_LIMIT_LO__SHIFT 0x0
+#define MMEA3_IO_GROUP_BURST__RD_LIMIT_HI__SHIFT 0x8
+#define MMEA3_IO_GROUP_BURST__WR_LIMIT_LO__SHIFT 0x10
+#define MMEA3_IO_GROUP_BURST__WR_LIMIT_HI__SHIFT 0x18
+#define MMEA3_IO_GROUP_BURST__RD_LIMIT_LO_MASK 0x000000FFL
+#define MMEA3_IO_GROUP_BURST__RD_LIMIT_HI_MASK 0x0000FF00L
+#define MMEA3_IO_GROUP_BURST__WR_LIMIT_LO_MASK 0x00FF0000L
+#define MMEA3_IO_GROUP_BURST__WR_LIMIT_HI_MASK 0xFF000000L
+//MMEA3_IO_RD_PRI_AGE
+#define MMEA3_IO_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA3_IO_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA3_IO_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA3_IO_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA3_IO_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA3_IO_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA3_IO_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA3_IO_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA3_IO_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA3_IO_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA3_IO_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA3_IO_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA3_IO_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA3_IO_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA3_IO_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA3_IO_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA3_IO_WR_PRI_AGE
+#define MMEA3_IO_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA3_IO_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA3_IO_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA3_IO_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA3_IO_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA3_IO_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA3_IO_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA3_IO_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA3_IO_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA3_IO_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA3_IO_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA3_IO_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA3_IO_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA3_IO_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA3_IO_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA3_IO_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA3_IO_RD_PRI_QUEUING
+#define MMEA3_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA3_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA3_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA3_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA3_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA3_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA3_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA3_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA3_IO_WR_PRI_QUEUING
+#define MMEA3_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA3_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA3_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA3_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA3_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA3_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA3_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA3_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA3_IO_RD_PRI_FIXED
+#define MMEA3_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA3_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA3_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA3_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA3_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA3_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA3_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA3_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA3_IO_WR_PRI_FIXED
+#define MMEA3_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA3_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA3_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA3_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA3_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA3_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA3_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA3_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA3_IO_RD_PRI_URGENCY
+#define MMEA3_IO_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA3_IO_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA3_IO_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA3_IO_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA3_IO_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA3_IO_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA3_IO_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA3_IO_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA3_IO_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA3_IO_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA3_IO_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA3_IO_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA3_IO_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA3_IO_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA3_IO_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA3_IO_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA3_IO_WR_PRI_URGENCY
+#define MMEA3_IO_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA3_IO_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA3_IO_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA3_IO_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA3_IO_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA3_IO_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA3_IO_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA3_IO_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA3_IO_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA3_IO_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA3_IO_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA3_IO_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA3_IO_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA3_IO_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA3_IO_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA3_IO_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA3_IO_RD_PRI_URGENCY_MASKING
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L
+#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L
+//MMEA3_IO_WR_PRI_URGENCY_MASKING
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L
+#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L
+//MMEA3_IO_RD_PRI_QUANT_PRI1
+#define MMEA3_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA3_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA3_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA3_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA3_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA3_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA3_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA3_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA3_IO_RD_PRI_QUANT_PRI2
+#define MMEA3_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA3_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA3_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA3_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA3_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA3_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA3_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA3_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA3_IO_RD_PRI_QUANT_PRI3
+#define MMEA3_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA3_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA3_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA3_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA3_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA3_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA3_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA3_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA3_IO_WR_PRI_QUANT_PRI1
+#define MMEA3_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA3_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA3_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA3_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA3_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA3_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA3_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA3_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA3_IO_WR_PRI_QUANT_PRI2
+#define MMEA3_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA3_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA3_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA3_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA3_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA3_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA3_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA3_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA3_IO_WR_PRI_QUANT_PRI3
+#define MMEA3_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA3_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA3_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA3_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA3_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA3_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA3_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA3_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA3_SDP_ARB_DRAM
+#define MMEA3_SDP_ARB_DRAM__RDWR_BURST_LIMIT_CYCL__SHIFT 0x0
+#define MMEA3_SDP_ARB_DRAM__RDWR_BURST_LIMIT_DATA__SHIFT 0x8
+#define MMEA3_SDP_ARB_DRAM__EARLY_SW2RD_ON_PRI__SHIFT 0x10
+#define MMEA3_SDP_ARB_DRAM__EARLY_SW2WR_ON_PRI__SHIFT 0x11
+#define MMEA3_SDP_ARB_DRAM__EARLY_SW2RD_ON_RES__SHIFT 0x12
+#define MMEA3_SDP_ARB_DRAM__EARLY_SW2WR_ON_RES__SHIFT 0x13
+#define MMEA3_SDP_ARB_DRAM__EOB_ON_EXPIRE__SHIFT 0x14
+#define MMEA3_SDP_ARB_DRAM__DECOUPLE_RDWR_BNKSTATE__SHIFT 0x15
+#define MMEA3_SDP_ARB_DRAM__RDWR_BURST_LIMIT_CYCL_MASK 0x0000007FL
+#define MMEA3_SDP_ARB_DRAM__RDWR_BURST_LIMIT_DATA_MASK 0x00007F00L
+#define MMEA3_SDP_ARB_DRAM__EARLY_SW2RD_ON_PRI_MASK 0x00010000L
+#define MMEA3_SDP_ARB_DRAM__EARLY_SW2WR_ON_PRI_MASK 0x00020000L
+#define MMEA3_SDP_ARB_DRAM__EARLY_SW2RD_ON_RES_MASK 0x00040000L
+#define MMEA3_SDP_ARB_DRAM__EARLY_SW2WR_ON_RES_MASK 0x00080000L
+#define MMEA3_SDP_ARB_DRAM__EOB_ON_EXPIRE_MASK 0x00100000L
+#define MMEA3_SDP_ARB_DRAM__DECOUPLE_RDWR_BNKSTATE_MASK 0x00200000L
+//MMEA3_SDP_ARB_GMI
+#define MMEA3_SDP_ARB_GMI__RDWR_BURST_LIMIT_CYCL__SHIFT 0x0
+#define MMEA3_SDP_ARB_GMI__RDWR_BURST_LIMIT_DATA__SHIFT 0x8
+#define MMEA3_SDP_ARB_GMI__EARLY_SW2RD_ON_PRI__SHIFT 0x10
+#define MMEA3_SDP_ARB_GMI__EARLY_SW2WR_ON_PRI__SHIFT 0x11
+#define MMEA3_SDP_ARB_GMI__EARLY_SW2RD_ON_RES__SHIFT 0x12
+#define MMEA3_SDP_ARB_GMI__EARLY_SW2WR_ON_RES__SHIFT 0x13
+#define MMEA3_SDP_ARB_GMI__EOB_ON_EXPIRE__SHIFT 0x14
+#define MMEA3_SDP_ARB_GMI__DECOUPLE_RDWR_BNKSTATE__SHIFT 0x15
+#define MMEA3_SDP_ARB_GMI__ALLOW_CHAIN_BREAKING__SHIFT 0x16
+#define MMEA3_SDP_ARB_GMI__RDWR_BURST_LIMIT_CYCL_MASK 0x0000007FL
+#define MMEA3_SDP_ARB_GMI__RDWR_BURST_LIMIT_DATA_MASK 0x00007F00L
+#define MMEA3_SDP_ARB_GMI__EARLY_SW2RD_ON_PRI_MASK 0x00010000L
+#define MMEA3_SDP_ARB_GMI__EARLY_SW2WR_ON_PRI_MASK 0x00020000L
+#define MMEA3_SDP_ARB_GMI__EARLY_SW2RD_ON_RES_MASK 0x00040000L
+#define MMEA3_SDP_ARB_GMI__EARLY_SW2WR_ON_RES_MASK 0x00080000L
+#define MMEA3_SDP_ARB_GMI__EOB_ON_EXPIRE_MASK 0x00100000L
+#define MMEA3_SDP_ARB_GMI__DECOUPLE_RDWR_BNKSTATE_MASK 0x00200000L
+#define MMEA3_SDP_ARB_GMI__ALLOW_CHAIN_BREAKING_MASK 0x00400000L
+//MMEA3_SDP_ARB_FINAL
+#define MMEA3_SDP_ARB_FINAL__DRAM_BURST_LIMIT__SHIFT 0x0
+#define MMEA3_SDP_ARB_FINAL__GMI_BURST_LIMIT__SHIFT 0x5
+#define MMEA3_SDP_ARB_FINAL__IO_BURST_LIMIT__SHIFT 0xa
+#define MMEA3_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER__SHIFT 0xf
+#define MMEA3_SDP_ARB_FINAL__RDONLY_VC0__SHIFT 0x11
+#define MMEA3_SDP_ARB_FINAL__RDONLY_VC1__SHIFT 0x12
+#define MMEA3_SDP_ARB_FINAL__RDONLY_VC2__SHIFT 0x13
+#define MMEA3_SDP_ARB_FINAL__RDONLY_VC3__SHIFT 0x14
+#define MMEA3_SDP_ARB_FINAL__RDONLY_VC4__SHIFT 0x15
+#define MMEA3_SDP_ARB_FINAL__RDONLY_VC5__SHIFT 0x16
+#define MMEA3_SDP_ARB_FINAL__RDONLY_VC6__SHIFT 0x17
+#define MMEA3_SDP_ARB_FINAL__RDONLY_VC7__SHIFT 0x18
+#define MMEA3_SDP_ARB_FINAL__ERREVENT_ON_ERROR__SHIFT 0x19
+#define MMEA3_SDP_ARB_FINAL__HALTREQ_ON_ERROR__SHIFT 0x1a
+#define MMEA3_SDP_ARB_FINAL__GMI_BURST_STRETCH__SHIFT 0x1b
+#define MMEA3_SDP_ARB_FINAL__DRAM_BURST_LIMIT_MASK 0x0000001FL
+#define MMEA3_SDP_ARB_FINAL__GMI_BURST_LIMIT_MASK 0x000003E0L
+#define MMEA3_SDP_ARB_FINAL__IO_BURST_LIMIT_MASK 0x00007C00L
+#define MMEA3_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER_MASK 0x00018000L
+#define MMEA3_SDP_ARB_FINAL__RDONLY_VC0_MASK 0x00020000L
+#define MMEA3_SDP_ARB_FINAL__RDONLY_VC1_MASK 0x00040000L
+#define MMEA3_SDP_ARB_FINAL__RDONLY_VC2_MASK 0x00080000L
+#define MMEA3_SDP_ARB_FINAL__RDONLY_VC3_MASK 0x00100000L
+#define MMEA3_SDP_ARB_FINAL__RDONLY_VC4_MASK 0x00200000L
+#define MMEA3_SDP_ARB_FINAL__RDONLY_VC5_MASK 0x00400000L
+#define MMEA3_SDP_ARB_FINAL__RDONLY_VC6_MASK 0x00800000L
+#define MMEA3_SDP_ARB_FINAL__RDONLY_VC7_MASK 0x01000000L
+#define MMEA3_SDP_ARB_FINAL__ERREVENT_ON_ERROR_MASK 0x02000000L
+#define MMEA3_SDP_ARB_FINAL__HALTREQ_ON_ERROR_MASK 0x04000000L
+#define MMEA3_SDP_ARB_FINAL__GMI_BURST_STRETCH_MASK 0x08000000L
+//MMEA3_SDP_DRAM_PRIORITY
+#define MMEA3_SDP_DRAM_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0
+#define MMEA3_SDP_DRAM_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4
+#define MMEA3_SDP_DRAM_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8
+#define MMEA3_SDP_DRAM_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc
+#define MMEA3_SDP_DRAM_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10
+#define MMEA3_SDP_DRAM_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14
+#define MMEA3_SDP_DRAM_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18
+#define MMEA3_SDP_DRAM_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c
+#define MMEA3_SDP_DRAM_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL
+#define MMEA3_SDP_DRAM_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L
+#define MMEA3_SDP_DRAM_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L
+#define MMEA3_SDP_DRAM_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L
+#define MMEA3_SDP_DRAM_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L
+#define MMEA3_SDP_DRAM_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L
+#define MMEA3_SDP_DRAM_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L
+#define MMEA3_SDP_DRAM_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L
+//MMEA3_SDP_GMI_PRIORITY
+#define MMEA3_SDP_GMI_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0
+#define MMEA3_SDP_GMI_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4
+#define MMEA3_SDP_GMI_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8
+#define MMEA3_SDP_GMI_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc
+#define MMEA3_SDP_GMI_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10
+#define MMEA3_SDP_GMI_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14
+#define MMEA3_SDP_GMI_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18
+#define MMEA3_SDP_GMI_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c
+#define MMEA3_SDP_GMI_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL
+#define MMEA3_SDP_GMI_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L
+#define MMEA3_SDP_GMI_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L
+#define MMEA3_SDP_GMI_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L
+#define MMEA3_SDP_GMI_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L
+#define MMEA3_SDP_GMI_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L
+#define MMEA3_SDP_GMI_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L
+#define MMEA3_SDP_GMI_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L
+//MMEA3_SDP_IO_PRIORITY
+#define MMEA3_SDP_IO_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0
+#define MMEA3_SDP_IO_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4
+#define MMEA3_SDP_IO_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8
+#define MMEA3_SDP_IO_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc
+#define MMEA3_SDP_IO_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10
+#define MMEA3_SDP_IO_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14
+#define MMEA3_SDP_IO_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18
+#define MMEA3_SDP_IO_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c
+#define MMEA3_SDP_IO_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL
+#define MMEA3_SDP_IO_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L
+#define MMEA3_SDP_IO_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L
+#define MMEA3_SDP_IO_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L
+#define MMEA3_SDP_IO_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L
+#define MMEA3_SDP_IO_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L
+#define MMEA3_SDP_IO_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L
+#define MMEA3_SDP_IO_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L
+//MMEA3_SDP_CREDITS
+#define MMEA3_SDP_CREDITS__TAG_LIMIT__SHIFT 0x0
+#define MMEA3_SDP_CREDITS__WR_RESP_CREDITS__SHIFT 0x8
+#define MMEA3_SDP_CREDITS__RD_RESP_CREDITS__SHIFT 0x10
+#define MMEA3_SDP_CREDITS__TAG_LIMIT_MASK 0x000000FFL
+#define MMEA3_SDP_CREDITS__WR_RESP_CREDITS_MASK 0x00007F00L
+#define MMEA3_SDP_CREDITS__RD_RESP_CREDITS_MASK 0x007F0000L
+//MMEA3_SDP_TAG_RESERVE0
+#define MMEA3_SDP_TAG_RESERVE0__VC0__SHIFT 0x0
+#define MMEA3_SDP_TAG_RESERVE0__VC1__SHIFT 0x8
+#define MMEA3_SDP_TAG_RESERVE0__VC2__SHIFT 0x10
+#define MMEA3_SDP_TAG_RESERVE0__VC3__SHIFT 0x18
+#define MMEA3_SDP_TAG_RESERVE0__VC0_MASK 0x000000FFL
+#define MMEA3_SDP_TAG_RESERVE0__VC1_MASK 0x0000FF00L
+#define MMEA3_SDP_TAG_RESERVE0__VC2_MASK 0x00FF0000L
+#define MMEA3_SDP_TAG_RESERVE0__VC3_MASK 0xFF000000L
+//MMEA3_SDP_TAG_RESERVE1
+#define MMEA3_SDP_TAG_RESERVE1__VC4__SHIFT 0x0
+#define MMEA3_SDP_TAG_RESERVE1__VC5__SHIFT 0x8
+#define MMEA3_SDP_TAG_RESERVE1__VC6__SHIFT 0x10
+#define MMEA3_SDP_TAG_RESERVE1__VC7__SHIFT 0x18
+#define MMEA3_SDP_TAG_RESERVE1__VC4_MASK 0x000000FFL
+#define MMEA3_SDP_TAG_RESERVE1__VC5_MASK 0x0000FF00L
+#define MMEA3_SDP_TAG_RESERVE1__VC6_MASK 0x00FF0000L
+#define MMEA3_SDP_TAG_RESERVE1__VC7_MASK 0xFF000000L
+//MMEA3_SDP_VCC_RESERVE0
+#define MMEA3_SDP_VCC_RESERVE0__VC0_CREDITS__SHIFT 0x0
+#define MMEA3_SDP_VCC_RESERVE0__VC1_CREDITS__SHIFT 0x6
+#define MMEA3_SDP_VCC_RESERVE0__VC2_CREDITS__SHIFT 0xc
+#define MMEA3_SDP_VCC_RESERVE0__VC3_CREDITS__SHIFT 0x12
+#define MMEA3_SDP_VCC_RESERVE0__VC4_CREDITS__SHIFT 0x18
+#define MMEA3_SDP_VCC_RESERVE0__VC0_CREDITS_MASK 0x0000003FL
+#define MMEA3_SDP_VCC_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L
+#define MMEA3_SDP_VCC_RESERVE0__VC2_CREDITS_MASK 0x0003F000L
+#define MMEA3_SDP_VCC_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L
+#define MMEA3_SDP_VCC_RESERVE0__VC4_CREDITS_MASK 0x3F000000L
+//MMEA3_SDP_VCC_RESERVE1
+#define MMEA3_SDP_VCC_RESERVE1__VC5_CREDITS__SHIFT 0x0
+#define MMEA3_SDP_VCC_RESERVE1__VC6_CREDITS__SHIFT 0x6
+#define MMEA3_SDP_VCC_RESERVE1__VC7_CREDITS__SHIFT 0xc
+#define MMEA3_SDP_VCC_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f
+#define MMEA3_SDP_VCC_RESERVE1__VC5_CREDITS_MASK 0x0000003FL
+#define MMEA3_SDP_VCC_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L
+#define MMEA3_SDP_VCC_RESERVE1__VC7_CREDITS_MASK 0x0003F000L
+#define MMEA3_SDP_VCC_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L
+//MMEA3_SDP_VCD_RESERVE0
+#define MMEA3_SDP_VCD_RESERVE0__VC0_CREDITS__SHIFT 0x0
+#define MMEA3_SDP_VCD_RESERVE0__VC1_CREDITS__SHIFT 0x6
+#define MMEA3_SDP_VCD_RESERVE0__VC2_CREDITS__SHIFT 0xc
+#define MMEA3_SDP_VCD_RESERVE0__VC3_CREDITS__SHIFT 0x12
+#define MMEA3_SDP_VCD_RESERVE0__VC4_CREDITS__SHIFT 0x18
+#define MMEA3_SDP_VCD_RESERVE0__VC0_CREDITS_MASK 0x0000003FL
+#define MMEA3_SDP_VCD_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L
+#define MMEA3_SDP_VCD_RESERVE0__VC2_CREDITS_MASK 0x0003F000L
+#define MMEA3_SDP_VCD_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L
+#define MMEA3_SDP_VCD_RESERVE0__VC4_CREDITS_MASK 0x3F000000L
+//MMEA3_SDP_VCD_RESERVE1
+#define MMEA3_SDP_VCD_RESERVE1__VC5_CREDITS__SHIFT 0x0
+#define MMEA3_SDP_VCD_RESERVE1__VC6_CREDITS__SHIFT 0x6
+#define MMEA3_SDP_VCD_RESERVE1__VC7_CREDITS__SHIFT 0xc
+#define MMEA3_SDP_VCD_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f
+#define MMEA3_SDP_VCD_RESERVE1__VC5_CREDITS_MASK 0x0000003FL
+#define MMEA3_SDP_VCD_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L
+#define MMEA3_SDP_VCD_RESERVE1__VC7_CREDITS_MASK 0x0003F000L
+#define MMEA3_SDP_VCD_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L
+//MMEA3_SDP_REQ_CNTL
+#define MMEA3_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ__SHIFT 0x0
+#define MMEA3_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE__SHIFT 0x1
+#define MMEA3_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC__SHIFT 0x2
+#define MMEA3_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM__SHIFT 0x3
+#define MMEA3_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_GMI__SHIFT 0x4
+#define MMEA3_SDP_REQ_CNTL__INNER_DOMAIN_MODE__SHIFT 0x5
+#define MMEA3_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ_MASK 0x00000001L
+#define MMEA3_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE_MASK 0x00000002L
+#define MMEA3_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC_MASK 0x00000004L
+#define MMEA3_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM_MASK 0x00000008L
+#define MMEA3_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_GMI_MASK 0x00000010L
+#define MMEA3_SDP_REQ_CNTL__INNER_DOMAIN_MODE_MASK 0x00000020L
+//MMEA3_MISC
+#define MMEA3_MISC__RELATIVE_PRI_IN_DRAM_RD_ARB__SHIFT 0x0
+#define MMEA3_MISC__RELATIVE_PRI_IN_DRAM_WR_ARB__SHIFT 0x1
+#define MMEA3_MISC__RELATIVE_PRI_IN_GMI_RD_ARB__SHIFT 0x2
+#define MMEA3_MISC__RELATIVE_PRI_IN_GMI_WR_ARB__SHIFT 0x3
+#define MMEA3_MISC__RELATIVE_PRI_IN_IO_RD_ARB__SHIFT 0x4
+#define MMEA3_MISC__RELATIVE_PRI_IN_IO_WR_ARB__SHIFT 0x5
+#define MMEA3_MISC__EARLYWRRET_ENABLE_VC0__SHIFT 0x6
+#define MMEA3_MISC__EARLYWRRET_ENABLE_VC1__SHIFT 0x7
+#define MMEA3_MISC__EARLYWRRET_ENABLE_VC2__SHIFT 0x8
+#define MMEA3_MISC__EARLYWRRET_ENABLE_VC3__SHIFT 0x9
+#define MMEA3_MISC__EARLYWRRET_ENABLE_VC4__SHIFT 0xa
+#define MMEA3_MISC__EARLYWRRET_ENABLE_VC5__SHIFT 0xb
+#define MMEA3_MISC__EARLYWRRET_ENABLE_VC6__SHIFT 0xc
+#define MMEA3_MISC__EARLYWRRET_ENABLE_VC7__SHIFT 0xd
+#define MMEA3_MISC__EARLY_SDP_ORIGDATA__SHIFT 0xe
+#define MMEA3_MISC__LINKMGR_DYNAMIC_MODE__SHIFT 0xf
+#define MMEA3_MISC__LINKMGR_HALT_THRESHOLD__SHIFT 0x11
+#define MMEA3_MISC__LINKMGR_RECONNECT_DELAY__SHIFT 0x13
+#define MMEA3_MISC__LINKMGR_IDLE_THRESHOLD__SHIFT 0x15
+#define MMEA3_MISC__FAVOUR_MIDCHAIN_CS_IN_DRAM_ARB__SHIFT 0x1a
+#define MMEA3_MISC__FAVOUR_MIDCHAIN_CS_IN_GMI_ARB__SHIFT 0x1b
+#define MMEA3_MISC__FAVOUR_LAST_CS_IN_DRAM_ARB__SHIFT 0x1c
+#define MMEA3_MISC__FAVOUR_LAST_CS_IN_GMI_ARB__SHIFT 0x1d
+#define MMEA3_MISC__SWITCH_CS_ON_W2R_IN_DRAM_ARB__SHIFT 0x1e
+#define MMEA3_MISC__SWITCH_CS_ON_W2R_IN_GMI_ARB__SHIFT 0x1f
+#define MMEA3_MISC__RELATIVE_PRI_IN_DRAM_RD_ARB_MASK 0x00000001L
+#define MMEA3_MISC__RELATIVE_PRI_IN_DRAM_WR_ARB_MASK 0x00000002L
+#define MMEA3_MISC__RELATIVE_PRI_IN_GMI_RD_ARB_MASK 0x00000004L
+#define MMEA3_MISC__RELATIVE_PRI_IN_GMI_WR_ARB_MASK 0x00000008L
+#define MMEA3_MISC__RELATIVE_PRI_IN_IO_RD_ARB_MASK 0x00000010L
+#define MMEA3_MISC__RELATIVE_PRI_IN_IO_WR_ARB_MASK 0x00000020L
+#define MMEA3_MISC__EARLYWRRET_ENABLE_VC0_MASK 0x00000040L
+#define MMEA3_MISC__EARLYWRRET_ENABLE_VC1_MASK 0x00000080L
+#define MMEA3_MISC__EARLYWRRET_ENABLE_VC2_MASK 0x00000100L
+#define MMEA3_MISC__EARLYWRRET_ENABLE_VC3_MASK 0x00000200L
+#define MMEA3_MISC__EARLYWRRET_ENABLE_VC4_MASK 0x00000400L
+#define MMEA3_MISC__EARLYWRRET_ENABLE_VC5_MASK 0x00000800L
+#define MMEA3_MISC__EARLYWRRET_ENABLE_VC6_MASK 0x00001000L
+#define MMEA3_MISC__EARLYWRRET_ENABLE_VC7_MASK 0x00002000L
+#define MMEA3_MISC__EARLY_SDP_ORIGDATA_MASK 0x00004000L
+#define MMEA3_MISC__LINKMGR_DYNAMIC_MODE_MASK 0x00018000L
+#define MMEA3_MISC__LINKMGR_HALT_THRESHOLD_MASK 0x00060000L
+#define MMEA3_MISC__LINKMGR_RECONNECT_DELAY_MASK 0x00180000L
+#define MMEA3_MISC__LINKMGR_IDLE_THRESHOLD_MASK 0x03E00000L
+#define MMEA3_MISC__FAVOUR_MIDCHAIN_CS_IN_DRAM_ARB_MASK 0x04000000L
+#define MMEA3_MISC__FAVOUR_MIDCHAIN_CS_IN_GMI_ARB_MASK 0x08000000L
+#define MMEA3_MISC__FAVOUR_LAST_CS_IN_DRAM_ARB_MASK 0x10000000L
+#define MMEA3_MISC__FAVOUR_LAST_CS_IN_GMI_ARB_MASK 0x20000000L
+#define MMEA3_MISC__SWITCH_CS_ON_W2R_IN_DRAM_ARB_MASK 0x40000000L
+#define MMEA3_MISC__SWITCH_CS_ON_W2R_IN_GMI_ARB_MASK 0x80000000L
+//MMEA3_LATENCY_SAMPLING
+#define MMEA3_LATENCY_SAMPLING__SAMPLER0_DRAM__SHIFT 0x0
+#define MMEA3_LATENCY_SAMPLING__SAMPLER1_DRAM__SHIFT 0x1
+#define MMEA3_LATENCY_SAMPLING__SAMPLER0_GMI__SHIFT 0x2
+#define MMEA3_LATENCY_SAMPLING__SAMPLER1_GMI__SHIFT 0x3
+#define MMEA3_LATENCY_SAMPLING__SAMPLER0_IO__SHIFT 0x4
+#define MMEA3_LATENCY_SAMPLING__SAMPLER1_IO__SHIFT 0x5
+#define MMEA3_LATENCY_SAMPLING__SAMPLER0_READ__SHIFT 0x6
+#define MMEA3_LATENCY_SAMPLING__SAMPLER1_READ__SHIFT 0x7
+#define MMEA3_LATENCY_SAMPLING__SAMPLER0_WRITE__SHIFT 0x8
+#define MMEA3_LATENCY_SAMPLING__SAMPLER1_WRITE__SHIFT 0x9
+#define MMEA3_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET__SHIFT 0xa
+#define MMEA3_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET__SHIFT 0xb
+#define MMEA3_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET__SHIFT 0xc
+#define MMEA3_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET__SHIFT 0xd
+#define MMEA3_LATENCY_SAMPLING__SAMPLER0_VC__SHIFT 0xe
+#define MMEA3_LATENCY_SAMPLING__SAMPLER1_VC__SHIFT 0x16
+#define MMEA3_LATENCY_SAMPLING__SAMPLER0_DRAM_MASK 0x00000001L
+#define MMEA3_LATENCY_SAMPLING__SAMPLER1_DRAM_MASK 0x00000002L
+#define MMEA3_LATENCY_SAMPLING__SAMPLER0_GMI_MASK 0x00000004L
+#define MMEA3_LATENCY_SAMPLING__SAMPLER1_GMI_MASK 0x00000008L
+#define MMEA3_LATENCY_SAMPLING__SAMPLER0_IO_MASK 0x00000010L
+#define MMEA3_LATENCY_SAMPLING__SAMPLER1_IO_MASK 0x00000020L
+#define MMEA3_LATENCY_SAMPLING__SAMPLER0_READ_MASK 0x00000040L
+#define MMEA3_LATENCY_SAMPLING__SAMPLER1_READ_MASK 0x00000080L
+#define MMEA3_LATENCY_SAMPLING__SAMPLER0_WRITE_MASK 0x00000100L
+#define MMEA3_LATENCY_SAMPLING__SAMPLER1_WRITE_MASK 0x00000200L
+#define MMEA3_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET_MASK 0x00000400L
+#define MMEA3_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET_MASK 0x00000800L
+#define MMEA3_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET_MASK 0x00001000L
+#define MMEA3_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET_MASK 0x00002000L
+#define MMEA3_LATENCY_SAMPLING__SAMPLER0_VC_MASK 0x003FC000L
+#define MMEA3_LATENCY_SAMPLING__SAMPLER1_VC_MASK 0x3FC00000L
+//MMEA3_PERFCOUNTER_LO
+#define MMEA3_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define MMEA3_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//MMEA3_PERFCOUNTER_HI
+#define MMEA3_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define MMEA3_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define MMEA3_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define MMEA3_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+//MMEA3_PERFCOUNTER0_CFG
+#define MMEA3_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define MMEA3_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define MMEA3_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define MMEA3_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define MMEA3_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define MMEA3_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define MMEA3_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MMEA3_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define MMEA3_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define MMEA3_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//MMEA3_PERFCOUNTER1_CFG
+#define MMEA3_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define MMEA3_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define MMEA3_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define MMEA3_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define MMEA3_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define MMEA3_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define MMEA3_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MMEA3_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define MMEA3_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define MMEA3_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//MMEA3_PERFCOUNTER_RSLT_CNTL
+#define MMEA3_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define MMEA3_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define MMEA3_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define MMEA3_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define MMEA3_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define MMEA3_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define MMEA3_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define MMEA3_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define MMEA3_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define MMEA3_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define MMEA3_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define MMEA3_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+//MMEA3_EDC_CNT
+#define MMEA3_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT__SHIFT 0x0
+#define MMEA3_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT__SHIFT 0x2
+#define MMEA3_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT__SHIFT 0x4
+#define MMEA3_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT__SHIFT 0x6
+#define MMEA3_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT__SHIFT 0x8
+#define MMEA3_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT__SHIFT 0xa
+#define MMEA3_EDC_CNT__RRET_TAGMEM_SEC_COUNT__SHIFT 0xc
+#define MMEA3_EDC_CNT__RRET_TAGMEM_DED_COUNT__SHIFT 0xe
+#define MMEA3_EDC_CNT__WRET_TAGMEM_SEC_COUNT__SHIFT 0x10
+#define MMEA3_EDC_CNT__WRET_TAGMEM_DED_COUNT__SHIFT 0x12
+#define MMEA3_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT__SHIFT 0x14
+#define MMEA3_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT__SHIFT 0x16
+#define MMEA3_EDC_CNT__IORD_CMDMEM_SED_COUNT__SHIFT 0x18
+#define MMEA3_EDC_CNT__IOWR_CMDMEM_SED_COUNT__SHIFT 0x1a
+#define MMEA3_EDC_CNT__IOWR_DATAMEM_SED_COUNT__SHIFT 0x1c
+#define MMEA3_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT_MASK 0x00000003L
+#define MMEA3_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT_MASK 0x0000000CL
+#define MMEA3_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT_MASK 0x00000030L
+#define MMEA3_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
+#define MMEA3_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT_MASK 0x00000300L
+#define MMEA3_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT_MASK 0x00000C00L
+#define MMEA3_EDC_CNT__RRET_TAGMEM_SEC_COUNT_MASK 0x00003000L
+#define MMEA3_EDC_CNT__RRET_TAGMEM_DED_COUNT_MASK 0x0000C000L
+#define MMEA3_EDC_CNT__WRET_TAGMEM_SEC_COUNT_MASK 0x00030000L
+#define MMEA3_EDC_CNT__WRET_TAGMEM_DED_COUNT_MASK 0x000C0000L
+#define MMEA3_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT_MASK 0x00300000L
+#define MMEA3_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT_MASK 0x00C00000L
+#define MMEA3_EDC_CNT__IORD_CMDMEM_SED_COUNT_MASK 0x03000000L
+#define MMEA3_EDC_CNT__IOWR_CMDMEM_SED_COUNT_MASK 0x0C000000L
+#define MMEA3_EDC_CNT__IOWR_DATAMEM_SED_COUNT_MASK 0x30000000L
+//MMEA3_EDC_CNT2
+#define MMEA3_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT__SHIFT 0x0
+#define MMEA3_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT__SHIFT 0x2
+#define MMEA3_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT__SHIFT 0x4
+#define MMEA3_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT__SHIFT 0x6
+#define MMEA3_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT__SHIFT 0x8
+#define MMEA3_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa
+#define MMEA3_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc
+#define MMEA3_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe
+#define MMEA3_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10
+#define MMEA3_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12
+#define MMEA3_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14
+#define MMEA3_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16
+#define MMEA3_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT 0x18
+#define MMEA3_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT 0x1a
+#define MMEA3_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT 0x1c
+#define MMEA3_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT 0x1e
+#define MMEA3_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L
+#define MMEA3_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL
+#define MMEA3_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L
+#define MMEA3_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
+#define MMEA3_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT_MASK 0x00000300L
+#define MMEA3_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L
+#define MMEA3_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L
+#define MMEA3_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L
+#define MMEA3_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L
+#define MMEA3_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L
+#define MMEA3_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L
+#define MMEA3_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L
+#define MMEA3_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK 0x03000000L
+#define MMEA3_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK 0x0C000000L
+#define MMEA3_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK 0x30000000L
+#define MMEA3_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK 0xC0000000L
+//MMEA3_DSM_CNTL
+#define MMEA3_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define MMEA3_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define MMEA3_DSM_CNTL__DRAMWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x3
+#define MMEA3_DSM_CNTL__DRAMWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define MMEA3_DSM_CNTL__DRAMWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0x6
+#define MMEA3_DSM_CNTL__DRAMWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define MMEA3_DSM_CNTL__RRET_TAGMEM_DSM_IRRITATOR_DATA__SHIFT 0x9
+#define MMEA3_DSM_CNTL__RRET_TAGMEM_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define MMEA3_DSM_CNTL__WRET_TAGMEM_DSM_IRRITATOR_DATA__SHIFT 0xc
+#define MMEA3_DSM_CNTL__WRET_TAGMEM_ENABLE_SINGLE_WRITE__SHIFT 0xe
+#define MMEA3_DSM_CNTL__GMIRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0xf
+#define MMEA3_DSM_CNTL__GMIRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x11
+#define MMEA3_DSM_CNTL__GMIWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x12
+#define MMEA3_DSM_CNTL__GMIWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x14
+#define MMEA3_DSM_CNTL__GMIWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0x15
+#define MMEA3_DSM_CNTL__GMIWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0x17
+#define MMEA3_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define MMEA3_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define MMEA3_DSM_CNTL__DRAMWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000018L
+#define MMEA3_DSM_CNTL__DRAMWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define MMEA3_DSM_CNTL__DRAMWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define MMEA3_DSM_CNTL__DRAMWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define MMEA3_DSM_CNTL__RRET_TAGMEM_DSM_IRRITATOR_DATA_MASK 0x00000600L
+#define MMEA3_DSM_CNTL__RRET_TAGMEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+#define MMEA3_DSM_CNTL__WRET_TAGMEM_DSM_IRRITATOR_DATA_MASK 0x00003000L
+#define MMEA3_DSM_CNTL__WRET_TAGMEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
+#define MMEA3_DSM_CNTL__GMIRD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00018000L
+#define MMEA3_DSM_CNTL__GMIRD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L
+#define MMEA3_DSM_CNTL__GMIWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L
+#define MMEA3_DSM_CNTL__GMIWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L
+#define MMEA3_DSM_CNTL__GMIWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x00600000L
+#define MMEA3_DSM_CNTL__GMIWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00800000L
+//MMEA3_DSM_CNTLA
+#define MMEA3_DSM_CNTLA__DRAMRD_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define MMEA3_DSM_CNTLA__DRAMRD_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define MMEA3_DSM_CNTLA__DRAMWR_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x3
+#define MMEA3_DSM_CNTLA__DRAMWR_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define MMEA3_DSM_CNTLA__IORD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x6
+#define MMEA3_DSM_CNTLA__IORD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define MMEA3_DSM_CNTLA__IOWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x9
+#define MMEA3_DSM_CNTLA__IOWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define MMEA3_DSM_CNTLA__IOWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0xc
+#define MMEA3_DSM_CNTLA__IOWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0xe
+#define MMEA3_DSM_CNTLA__GMIRD_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0xf
+#define MMEA3_DSM_CNTLA__GMIRD_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x11
+#define MMEA3_DSM_CNTLA__GMIWR_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x12
+#define MMEA3_DSM_CNTLA__GMIWR_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x14
+#define MMEA3_DSM_CNTLA__DRAMRD_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define MMEA3_DSM_CNTLA__DRAMRD_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define MMEA3_DSM_CNTLA__DRAMWR_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00000018L
+#define MMEA3_DSM_CNTLA__DRAMWR_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define MMEA3_DSM_CNTLA__IORD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define MMEA3_DSM_CNTLA__IORD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define MMEA3_DSM_CNTLA__IOWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000600L
+#define MMEA3_DSM_CNTLA__IOWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+#define MMEA3_DSM_CNTLA__IOWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x00003000L
+#define MMEA3_DSM_CNTLA__IOWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
+#define MMEA3_DSM_CNTLA__GMIRD_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00018000L
+#define MMEA3_DSM_CNTLA__GMIRD_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L
+#define MMEA3_DSM_CNTLA__GMIWR_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L
+#define MMEA3_DSM_CNTLA__GMIWR_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L
+//MMEA3_DSM_CNTL2
+#define MMEA3_DSM_CNTL2__DRAMRD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define MMEA3_DSM_CNTL2__DRAMRD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define MMEA3_DSM_CNTL2__DRAMWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define MMEA3_DSM_CNTL2__DRAMWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x5
+#define MMEA3_DSM_CNTL2__DRAMWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define MMEA3_DSM_CNTL2__DRAMWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0x8
+#define MMEA3_DSM_CNTL2__RRET_TAGMEM_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define MMEA3_DSM_CNTL2__RRET_TAGMEM_SELECT_INJECT_DELAY__SHIFT 0xb
+#define MMEA3_DSM_CNTL2__WRET_TAGMEM_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define MMEA3_DSM_CNTL2__WRET_TAGMEM_SELECT_INJECT_DELAY__SHIFT 0xe
+#define MMEA3_DSM_CNTL2__GMIRD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0xf
+#define MMEA3_DSM_CNTL2__GMIRD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x11
+#define MMEA3_DSM_CNTL2__GMIWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x12
+#define MMEA3_DSM_CNTL2__GMIWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x14
+#define MMEA3_DSM_CNTL2__GMIWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0x15
+#define MMEA3_DSM_CNTL2__GMIWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0x17
+#define MMEA3_DSM_CNTL2__INJECT_DELAY__SHIFT 0x1a
+#define MMEA3_DSM_CNTL2__DRAMRD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define MMEA3_DSM_CNTL2__DRAMRD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define MMEA3_DSM_CNTL2__DRAMWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define MMEA3_DSM_CNTL2__DRAMWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define MMEA3_DSM_CNTL2__DRAMWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define MMEA3_DSM_CNTL2__DRAMWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define MMEA3_DSM_CNTL2__RRET_TAGMEM_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define MMEA3_DSM_CNTL2__RRET_TAGMEM_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define MMEA3_DSM_CNTL2__WRET_TAGMEM_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define MMEA3_DSM_CNTL2__WRET_TAGMEM_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define MMEA3_DSM_CNTL2__GMIRD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00018000L
+#define MMEA3_DSM_CNTL2__GMIRD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00020000L
+#define MMEA3_DSM_CNTL2__GMIWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L
+#define MMEA3_DSM_CNTL2__GMIWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00100000L
+#define MMEA3_DSM_CNTL2__GMIWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x00600000L
+#define MMEA3_DSM_CNTL2__GMIWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00800000L
+#define MMEA3_DSM_CNTL2__INJECT_DELAY_MASK 0xFC000000L
+//MMEA3_DSM_CNTL2A
+#define MMEA3_DSM_CNTL2A__DRAMRD_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define MMEA3_DSM_CNTL2A__DRAMRD_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define MMEA3_DSM_CNTL2A__DRAMWR_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define MMEA3_DSM_CNTL2A__DRAMWR_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x5
+#define MMEA3_DSM_CNTL2A__IORD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define MMEA3_DSM_CNTL2A__IORD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x8
+#define MMEA3_DSM_CNTL2A__IOWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define MMEA3_DSM_CNTL2A__IOWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0xb
+#define MMEA3_DSM_CNTL2A__IOWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define MMEA3_DSM_CNTL2A__IOWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0xe
+#define MMEA3_DSM_CNTL2A__GMIRD_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0xf
+#define MMEA3_DSM_CNTL2A__GMIRD_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x11
+#define MMEA3_DSM_CNTL2A__GMIWR_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x12
+#define MMEA3_DSM_CNTL2A__GMIWR_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x14
+#define MMEA3_DSM_CNTL2A__DRAMRD_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define MMEA3_DSM_CNTL2A__DRAMRD_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define MMEA3_DSM_CNTL2A__DRAMWR_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define MMEA3_DSM_CNTL2A__DRAMWR_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define MMEA3_DSM_CNTL2A__IORD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define MMEA3_DSM_CNTL2A__IORD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define MMEA3_DSM_CNTL2A__IOWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define MMEA3_DSM_CNTL2A__IOWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define MMEA3_DSM_CNTL2A__IOWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define MMEA3_DSM_CNTL2A__IOWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define MMEA3_DSM_CNTL2A__GMIRD_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00018000L
+#define MMEA3_DSM_CNTL2A__GMIRD_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00020000L
+#define MMEA3_DSM_CNTL2A__GMIWR_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L
+#define MMEA3_DSM_CNTL2A__GMIWR_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00100000L
+//MMEA3_CGTT_CLK_CTRL
+#define MMEA3_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define MMEA3_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define MMEA3_CGTT_CLK_CTRL__SPARE0__SHIFT 0xc
+#define MMEA3_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_WRITE__SHIFT 0x14
+#define MMEA3_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_READ__SHIFT 0x15
+#define MMEA3_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_RETURN__SHIFT 0x16
+#define MMEA3_CGTT_CLK_CTRL__SPARE1__SHIFT 0x17
+#define MMEA3_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define MMEA3_CGTT_CLK_CTRL__SOFT_OVERRIDE_WRITE__SHIFT 0x1c
+#define MMEA3_CGTT_CLK_CTRL__SOFT_OVERRIDE_READ__SHIFT 0x1d
+#define MMEA3_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN__SHIFT 0x1e
+#define MMEA3_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER__SHIFT 0x1f
+#define MMEA3_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define MMEA3_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define MMEA3_CGTT_CLK_CTRL__SPARE0_MASK 0x000FF000L
+#define MMEA3_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_WRITE_MASK 0x00100000L
+#define MMEA3_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_READ_MASK 0x00200000L
+#define MMEA3_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_RETURN_MASK 0x00400000L
+#define MMEA3_CGTT_CLK_CTRL__SPARE1_MASK 0x07800000L
+#define MMEA3_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define MMEA3_CGTT_CLK_CTRL__SOFT_OVERRIDE_WRITE_MASK 0x10000000L
+#define MMEA3_CGTT_CLK_CTRL__SOFT_OVERRIDE_READ_MASK 0x20000000L
+#define MMEA3_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN_MASK 0x40000000L
+#define MMEA3_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER_MASK 0x80000000L
+//MMEA3_EDC_MODE
+#define MMEA3_EDC_MODE__COUNT_FED_OUT__SHIFT 0x10
+#define MMEA3_EDC_MODE__GATE_FUE__SHIFT 0x11
+#define MMEA3_EDC_MODE__DED_MODE__SHIFT 0x14
+#define MMEA3_EDC_MODE__PROP_FED__SHIFT 0x1d
+#define MMEA3_EDC_MODE__BYPASS__SHIFT 0x1f
+#define MMEA3_EDC_MODE__COUNT_FED_OUT_MASK 0x00010000L
+#define MMEA3_EDC_MODE__GATE_FUE_MASK 0x00020000L
+#define MMEA3_EDC_MODE__DED_MODE_MASK 0x00300000L
+#define MMEA3_EDC_MODE__PROP_FED_MASK 0x20000000L
+#define MMEA3_EDC_MODE__BYPASS_MASK 0x80000000L
+//MMEA3_ERR_STATUS
+#define MMEA3_ERR_STATUS__SDP_RDRSP_STATUS__SHIFT 0x0
+#define MMEA3_ERR_STATUS__SDP_WRRSP_STATUS__SHIFT 0x4
+#define MMEA3_ERR_STATUS__SDP_RDRSP_DATASTATUS__SHIFT 0x8
+#define MMEA3_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR__SHIFT 0xa
+#define MMEA3_ERR_STATUS__CLEAR_ERROR_STATUS__SHIFT 0xb
+#define MMEA3_ERR_STATUS__BUSY_ON_ERROR__SHIFT 0xc
+#define MMEA3_ERR_STATUS__FUE_FLAG__SHIFT 0xd
+#define MMEA3_ERR_STATUS__SDP_RDRSP_STATUS_MASK 0x0000000FL
+#define MMEA3_ERR_STATUS__SDP_WRRSP_STATUS_MASK 0x000000F0L
+#define MMEA3_ERR_STATUS__SDP_RDRSP_DATASTATUS_MASK 0x00000300L
+#define MMEA3_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR_MASK 0x00000400L
+#define MMEA3_ERR_STATUS__CLEAR_ERROR_STATUS_MASK 0x00000800L
+#define MMEA3_ERR_STATUS__BUSY_ON_ERROR_MASK 0x00001000L
+#define MMEA3_ERR_STATUS__FUE_FLAG_MASK 0x00002000L
+//MMEA3_MISC2
+#define MMEA3_MISC2__CSGROUP_SWAP_IN_DRAM_ARB__SHIFT 0x0
+#define MMEA3_MISC2__CSGROUP_SWAP_IN_GMI_ARB__SHIFT 0x1
+#define MMEA3_MISC2__CSGRP_BURST_LIMIT_DATA_DRAM__SHIFT 0x2
+#define MMEA3_MISC2__CSGRP_BURST_LIMIT_DATA_GMI__SHIFT 0x7
+#define MMEA3_MISC2__IO_RDWR_PRIORITY_ENABLE__SHIFT 0xc
+#define MMEA3_MISC2__RRET_SWAP_MODE__SHIFT 0xd
+#define MMEA3_MISC2__CSGROUP_SWAP_IN_DRAM_ARB_MASK 0x00000001L
+#define MMEA3_MISC2__CSGROUP_SWAP_IN_GMI_ARB_MASK 0x00000002L
+#define MMEA3_MISC2__CSGRP_BURST_LIMIT_DATA_DRAM_MASK 0x0000007CL
+#define MMEA3_MISC2__CSGRP_BURST_LIMIT_DATA_GMI_MASK 0x00000F80L
+#define MMEA3_MISC2__IO_RDWR_PRIORITY_ENABLE_MASK 0x00001000L
+#define MMEA3_MISC2__RRET_SWAP_MODE_MASK 0x00002000L
+//MMEA3_ADDRDEC_SELECT
+#define MMEA3_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_START__SHIFT 0x0
+#define MMEA3_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_END__SHIFT 0x5
+#define MMEA3_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_START__SHIFT 0xa
+#define MMEA3_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_END__SHIFT 0xf
+#define MMEA3_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_START_MASK 0x0000001FL
+#define MMEA3_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_END_MASK 0x000003E0L
+#define MMEA3_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_START_MASK 0x00007C00L
+#define MMEA3_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_END_MASK 0x000F8000L
+//MMEA3_EDC_CNT3
+#define MMEA3_EDC_CNT3__DRAMRD_PAGEMEM_DED_COUNT__SHIFT 0x0
+#define MMEA3_EDC_CNT3__DRAMWR_PAGEMEM_DED_COUNT__SHIFT 0x2
+#define MMEA3_EDC_CNT3__IORD_CMDMEM_DED_COUNT__SHIFT 0x4
+#define MMEA3_EDC_CNT3__IOWR_CMDMEM_DED_COUNT__SHIFT 0x6
+#define MMEA3_EDC_CNT3__IOWR_DATAMEM_DED_COUNT__SHIFT 0x8
+#define MMEA3_EDC_CNT3__GMIRD_PAGEMEM_DED_COUNT__SHIFT 0xa
+#define MMEA3_EDC_CNT3__GMIWR_PAGEMEM_DED_COUNT__SHIFT 0xc
+#define MMEA3_EDC_CNT3__DRAMRD_PAGEMEM_DED_COUNT_MASK 0x00000003L
+#define MMEA3_EDC_CNT3__DRAMWR_PAGEMEM_DED_COUNT_MASK 0x0000000CL
+#define MMEA3_EDC_CNT3__IORD_CMDMEM_DED_COUNT_MASK 0x00000030L
+#define MMEA3_EDC_CNT3__IOWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
+#define MMEA3_EDC_CNT3__IOWR_DATAMEM_DED_COUNT_MASK 0x00000300L
+#define MMEA3_EDC_CNT3__GMIRD_PAGEMEM_DED_COUNT_MASK 0x00000C00L
+#define MMEA3_EDC_CNT3__GMIWR_PAGEMEM_DED_COUNT_MASK 0x00003000L
+
+
+// addressBlock: mmhub_ea_mmeadec4
+//MMEA4_DRAM_RD_CLI2GRP_MAP0
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA4_DRAM_RD_CLI2GRP_MAP1
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA4_DRAM_WR_CLI2GRP_MAP0
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA4_DRAM_WR_CLI2GRP_MAP1
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA4_DRAM_RD_GRP2VC_MAP
+#define MMEA4_DRAM_RD_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define MMEA4_DRAM_RD_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define MMEA4_DRAM_RD_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define MMEA4_DRAM_RD_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define MMEA4_DRAM_RD_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define MMEA4_DRAM_RD_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define MMEA4_DRAM_RD_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define MMEA4_DRAM_RD_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//MMEA4_DRAM_WR_GRP2VC_MAP
+#define MMEA4_DRAM_WR_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define MMEA4_DRAM_WR_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define MMEA4_DRAM_WR_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define MMEA4_DRAM_WR_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define MMEA4_DRAM_WR_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define MMEA4_DRAM_WR_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define MMEA4_DRAM_WR_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define MMEA4_DRAM_WR_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//MMEA4_DRAM_RD_LAZY
+#define MMEA4_DRAM_RD_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define MMEA4_DRAM_RD_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define MMEA4_DRAM_RD_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define MMEA4_DRAM_RD_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define MMEA4_DRAM_RD_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define MMEA4_DRAM_RD_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define MMEA4_DRAM_RD_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b
+#define MMEA4_DRAM_RD_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define MMEA4_DRAM_RD_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define MMEA4_DRAM_RD_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define MMEA4_DRAM_RD_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define MMEA4_DRAM_RD_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define MMEA4_DRAM_RD_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+#define MMEA4_DRAM_RD_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L
+//MMEA4_DRAM_WR_LAZY
+#define MMEA4_DRAM_WR_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define MMEA4_DRAM_WR_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define MMEA4_DRAM_WR_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define MMEA4_DRAM_WR_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define MMEA4_DRAM_WR_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define MMEA4_DRAM_WR_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define MMEA4_DRAM_WR_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b
+#define MMEA4_DRAM_WR_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define MMEA4_DRAM_WR_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define MMEA4_DRAM_WR_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define MMEA4_DRAM_WR_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define MMEA4_DRAM_WR_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define MMEA4_DRAM_WR_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+#define MMEA4_DRAM_WR_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L
+//MMEA4_DRAM_RD_CAM_CNTL
+#define MMEA4_DRAM_RD_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define MMEA4_DRAM_RD_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define MMEA4_DRAM_RD_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define MMEA4_DRAM_RD_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define MMEA4_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define MMEA4_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define MMEA4_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define MMEA4_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define MMEA4_DRAM_RD_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c
+#define MMEA4_DRAM_RD_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define MMEA4_DRAM_RD_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define MMEA4_DRAM_RD_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define MMEA4_DRAM_RD_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define MMEA4_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define MMEA4_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define MMEA4_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define MMEA4_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+#define MMEA4_DRAM_RD_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L
+//MMEA4_DRAM_WR_CAM_CNTL
+#define MMEA4_DRAM_WR_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define MMEA4_DRAM_WR_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define MMEA4_DRAM_WR_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define MMEA4_DRAM_WR_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define MMEA4_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define MMEA4_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define MMEA4_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define MMEA4_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define MMEA4_DRAM_WR_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c
+#define MMEA4_DRAM_WR_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define MMEA4_DRAM_WR_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define MMEA4_DRAM_WR_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define MMEA4_DRAM_WR_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define MMEA4_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define MMEA4_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define MMEA4_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define MMEA4_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+#define MMEA4_DRAM_WR_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L
+//MMEA4_DRAM_PAGE_BURST
+#define MMEA4_DRAM_PAGE_BURST__RD_LIMIT_LO__SHIFT 0x0
+#define MMEA4_DRAM_PAGE_BURST__RD_LIMIT_HI__SHIFT 0x8
+#define MMEA4_DRAM_PAGE_BURST__WR_LIMIT_LO__SHIFT 0x10
+#define MMEA4_DRAM_PAGE_BURST__WR_LIMIT_HI__SHIFT 0x18
+#define MMEA4_DRAM_PAGE_BURST__RD_LIMIT_LO_MASK 0x000000FFL
+#define MMEA4_DRAM_PAGE_BURST__RD_LIMIT_HI_MASK 0x0000FF00L
+#define MMEA4_DRAM_PAGE_BURST__WR_LIMIT_LO_MASK 0x00FF0000L
+#define MMEA4_DRAM_PAGE_BURST__WR_LIMIT_HI_MASK 0xFF000000L
+//MMEA4_DRAM_RD_PRI_AGE
+#define MMEA4_DRAM_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA4_DRAM_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA4_DRAM_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA4_DRAM_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA4_DRAM_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA4_DRAM_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA4_DRAM_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA4_DRAM_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA4_DRAM_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA4_DRAM_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA4_DRAM_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA4_DRAM_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA4_DRAM_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA4_DRAM_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA4_DRAM_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA4_DRAM_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA4_DRAM_WR_PRI_AGE
+#define MMEA4_DRAM_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA4_DRAM_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA4_DRAM_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA4_DRAM_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA4_DRAM_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA4_DRAM_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA4_DRAM_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA4_DRAM_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA4_DRAM_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA4_DRAM_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA4_DRAM_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA4_DRAM_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA4_DRAM_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA4_DRAM_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA4_DRAM_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA4_DRAM_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA4_DRAM_RD_PRI_QUEUING
+#define MMEA4_DRAM_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA4_DRAM_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA4_DRAM_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA4_DRAM_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA4_DRAM_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA4_DRAM_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA4_DRAM_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA4_DRAM_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA4_DRAM_WR_PRI_QUEUING
+#define MMEA4_DRAM_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA4_DRAM_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA4_DRAM_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA4_DRAM_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA4_DRAM_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA4_DRAM_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA4_DRAM_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA4_DRAM_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA4_DRAM_RD_PRI_FIXED
+#define MMEA4_DRAM_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA4_DRAM_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA4_DRAM_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA4_DRAM_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA4_DRAM_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA4_DRAM_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA4_DRAM_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA4_DRAM_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA4_DRAM_WR_PRI_FIXED
+#define MMEA4_DRAM_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA4_DRAM_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA4_DRAM_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA4_DRAM_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA4_DRAM_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA4_DRAM_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA4_DRAM_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA4_DRAM_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA4_DRAM_RD_PRI_URGENCY
+#define MMEA4_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA4_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA4_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA4_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA4_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA4_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA4_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA4_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA4_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA4_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA4_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA4_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA4_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA4_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA4_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA4_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA4_DRAM_WR_PRI_URGENCY
+#define MMEA4_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA4_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA4_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA4_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA4_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA4_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA4_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA4_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA4_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA4_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA4_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA4_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA4_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA4_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA4_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA4_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA4_DRAM_RD_PRI_QUANT_PRI1
+#define MMEA4_DRAM_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA4_DRAM_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA4_DRAM_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA4_DRAM_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA4_DRAM_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA4_DRAM_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA4_DRAM_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA4_DRAM_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA4_DRAM_RD_PRI_QUANT_PRI2
+#define MMEA4_DRAM_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA4_DRAM_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA4_DRAM_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA4_DRAM_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA4_DRAM_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA4_DRAM_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA4_DRAM_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA4_DRAM_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA4_DRAM_RD_PRI_QUANT_PRI3
+#define MMEA4_DRAM_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA4_DRAM_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA4_DRAM_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA4_DRAM_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA4_DRAM_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA4_DRAM_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA4_DRAM_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA4_DRAM_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA4_DRAM_WR_PRI_QUANT_PRI1
+#define MMEA4_DRAM_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA4_DRAM_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA4_DRAM_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA4_DRAM_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA4_DRAM_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA4_DRAM_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA4_DRAM_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA4_DRAM_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA4_DRAM_WR_PRI_QUANT_PRI2
+#define MMEA4_DRAM_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA4_DRAM_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA4_DRAM_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA4_DRAM_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA4_DRAM_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA4_DRAM_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA4_DRAM_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA4_DRAM_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA4_DRAM_WR_PRI_QUANT_PRI3
+#define MMEA4_DRAM_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA4_DRAM_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA4_DRAM_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA4_DRAM_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA4_DRAM_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA4_DRAM_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA4_DRAM_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA4_DRAM_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA4_GMI_RD_CLI2GRP_MAP0
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA4_GMI_RD_CLI2GRP_MAP1
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA4_GMI_WR_CLI2GRP_MAP0
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA4_GMI_WR_CLI2GRP_MAP1
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA4_GMI_RD_GRP2VC_MAP
+#define MMEA4_GMI_RD_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define MMEA4_GMI_RD_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define MMEA4_GMI_RD_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define MMEA4_GMI_RD_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define MMEA4_GMI_RD_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define MMEA4_GMI_RD_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define MMEA4_GMI_RD_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define MMEA4_GMI_RD_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//MMEA4_GMI_WR_GRP2VC_MAP
+#define MMEA4_GMI_WR_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define MMEA4_GMI_WR_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define MMEA4_GMI_WR_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define MMEA4_GMI_WR_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define MMEA4_GMI_WR_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define MMEA4_GMI_WR_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define MMEA4_GMI_WR_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define MMEA4_GMI_WR_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//MMEA4_GMI_RD_LAZY
+#define MMEA4_GMI_RD_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define MMEA4_GMI_RD_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define MMEA4_GMI_RD_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define MMEA4_GMI_RD_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define MMEA4_GMI_RD_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define MMEA4_GMI_RD_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define MMEA4_GMI_RD_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b
+#define MMEA4_GMI_RD_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define MMEA4_GMI_RD_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define MMEA4_GMI_RD_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define MMEA4_GMI_RD_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define MMEA4_GMI_RD_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define MMEA4_GMI_RD_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+#define MMEA4_GMI_RD_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L
+//MMEA4_GMI_WR_LAZY
+#define MMEA4_GMI_WR_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define MMEA4_GMI_WR_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define MMEA4_GMI_WR_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define MMEA4_GMI_WR_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define MMEA4_GMI_WR_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define MMEA4_GMI_WR_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define MMEA4_GMI_WR_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b
+#define MMEA4_GMI_WR_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define MMEA4_GMI_WR_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define MMEA4_GMI_WR_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define MMEA4_GMI_WR_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define MMEA4_GMI_WR_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define MMEA4_GMI_WR_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+#define MMEA4_GMI_WR_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L
+//MMEA4_GMI_RD_CAM_CNTL
+#define MMEA4_GMI_RD_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define MMEA4_GMI_RD_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define MMEA4_GMI_RD_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define MMEA4_GMI_RD_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define MMEA4_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define MMEA4_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define MMEA4_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define MMEA4_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define MMEA4_GMI_RD_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c
+#define MMEA4_GMI_RD_CAM_CNTL__PAGEBASED_CHAINING__SHIFT 0x1d
+#define MMEA4_GMI_RD_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define MMEA4_GMI_RD_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define MMEA4_GMI_RD_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define MMEA4_GMI_RD_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define MMEA4_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define MMEA4_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define MMEA4_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define MMEA4_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+#define MMEA4_GMI_RD_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L
+#define MMEA4_GMI_RD_CAM_CNTL__PAGEBASED_CHAINING_MASK 0x20000000L
+//MMEA4_GMI_WR_CAM_CNTL
+#define MMEA4_GMI_WR_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define MMEA4_GMI_WR_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define MMEA4_GMI_WR_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define MMEA4_GMI_WR_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define MMEA4_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define MMEA4_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define MMEA4_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define MMEA4_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define MMEA4_GMI_WR_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c
+#define MMEA4_GMI_WR_CAM_CNTL__PAGEBASED_CHAINING__SHIFT 0x1d
+#define MMEA4_GMI_WR_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define MMEA4_GMI_WR_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define MMEA4_GMI_WR_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define MMEA4_GMI_WR_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define MMEA4_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define MMEA4_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define MMEA4_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define MMEA4_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+#define MMEA4_GMI_WR_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L
+#define MMEA4_GMI_WR_CAM_CNTL__PAGEBASED_CHAINING_MASK 0x20000000L
+//MMEA4_GMI_PAGE_BURST
+#define MMEA4_GMI_PAGE_BURST__RD_LIMIT_LO__SHIFT 0x0
+#define MMEA4_GMI_PAGE_BURST__RD_LIMIT_HI__SHIFT 0x8
+#define MMEA4_GMI_PAGE_BURST__WR_LIMIT_LO__SHIFT 0x10
+#define MMEA4_GMI_PAGE_BURST__WR_LIMIT_HI__SHIFT 0x18
+#define MMEA4_GMI_PAGE_BURST__RD_LIMIT_LO_MASK 0x000000FFL
+#define MMEA4_GMI_PAGE_BURST__RD_LIMIT_HI_MASK 0x0000FF00L
+#define MMEA4_GMI_PAGE_BURST__WR_LIMIT_LO_MASK 0x00FF0000L
+#define MMEA4_GMI_PAGE_BURST__WR_LIMIT_HI_MASK 0xFF000000L
+//MMEA4_GMI_RD_PRI_AGE
+#define MMEA4_GMI_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA4_GMI_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA4_GMI_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA4_GMI_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA4_GMI_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA4_GMI_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA4_GMI_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA4_GMI_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA4_GMI_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA4_GMI_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA4_GMI_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA4_GMI_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA4_GMI_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA4_GMI_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA4_GMI_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA4_GMI_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA4_GMI_WR_PRI_AGE
+#define MMEA4_GMI_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA4_GMI_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA4_GMI_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA4_GMI_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA4_GMI_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA4_GMI_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA4_GMI_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA4_GMI_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA4_GMI_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA4_GMI_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA4_GMI_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA4_GMI_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA4_GMI_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA4_GMI_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA4_GMI_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA4_GMI_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA4_GMI_RD_PRI_QUEUING
+#define MMEA4_GMI_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA4_GMI_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA4_GMI_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA4_GMI_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA4_GMI_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA4_GMI_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA4_GMI_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA4_GMI_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA4_GMI_WR_PRI_QUEUING
+#define MMEA4_GMI_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA4_GMI_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA4_GMI_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA4_GMI_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA4_GMI_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA4_GMI_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA4_GMI_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA4_GMI_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA4_GMI_RD_PRI_FIXED
+#define MMEA4_GMI_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA4_GMI_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA4_GMI_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA4_GMI_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA4_GMI_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA4_GMI_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA4_GMI_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA4_GMI_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA4_GMI_WR_PRI_FIXED
+#define MMEA4_GMI_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA4_GMI_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA4_GMI_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA4_GMI_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA4_GMI_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA4_GMI_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA4_GMI_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA4_GMI_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA4_GMI_RD_PRI_URGENCY
+#define MMEA4_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA4_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA4_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA4_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA4_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA4_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA4_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA4_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA4_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA4_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA4_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA4_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA4_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA4_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA4_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA4_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA4_GMI_WR_PRI_URGENCY
+#define MMEA4_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA4_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA4_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA4_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA4_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA4_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA4_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA4_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA4_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA4_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA4_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA4_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA4_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA4_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA4_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA4_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA4_GMI_RD_PRI_URGENCY_MASKING
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L
+#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L
+//MMEA4_GMI_WR_PRI_URGENCY_MASKING
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L
+#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L
+//MMEA4_GMI_RD_PRI_QUANT_PRI1
+#define MMEA4_GMI_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA4_GMI_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA4_GMI_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA4_GMI_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA4_GMI_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA4_GMI_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA4_GMI_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA4_GMI_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA4_GMI_RD_PRI_QUANT_PRI2
+#define MMEA4_GMI_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA4_GMI_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA4_GMI_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA4_GMI_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA4_GMI_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA4_GMI_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA4_GMI_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA4_GMI_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA4_GMI_RD_PRI_QUANT_PRI3
+#define MMEA4_GMI_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA4_GMI_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA4_GMI_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA4_GMI_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA4_GMI_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA4_GMI_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA4_GMI_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA4_GMI_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA4_GMI_WR_PRI_QUANT_PRI1
+#define MMEA4_GMI_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA4_GMI_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA4_GMI_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA4_GMI_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA4_GMI_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA4_GMI_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA4_GMI_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA4_GMI_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA4_GMI_WR_PRI_QUANT_PRI2
+#define MMEA4_GMI_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA4_GMI_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA4_GMI_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA4_GMI_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA4_GMI_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA4_GMI_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA4_GMI_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA4_GMI_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA4_GMI_WR_PRI_QUANT_PRI3
+#define MMEA4_GMI_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA4_GMI_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA4_GMI_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA4_GMI_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA4_GMI_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA4_GMI_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA4_GMI_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA4_GMI_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA4_ADDRNORM_BASE_ADDR0
+#define MMEA4_ADDRNORM_BASE_ADDR0__ADDR_RNG_VAL__SHIFT 0x0
+#define MMEA4_ADDRNORM_BASE_ADDR0__LGCY_MMIO_HOLE_EN__SHIFT 0x1
+#define MMEA4_ADDRNORM_BASE_ADDR0__INTLV_NUM_CHAN__SHIFT 0x2
+#define MMEA4_ADDRNORM_BASE_ADDR0__INTLV_NUM_DIES__SHIFT 0x6
+#define MMEA4_ADDRNORM_BASE_ADDR0__INTLV_NUM_SOCKETS__SHIFT 0x8
+#define MMEA4_ADDRNORM_BASE_ADDR0__INTLV_ADDR_SEL__SHIFT 0x9
+#define MMEA4_ADDRNORM_BASE_ADDR0__BASE_ADDR__SHIFT 0xc
+#define MMEA4_ADDRNORM_BASE_ADDR0__ADDR_RNG_VAL_MASK 0x00000001L
+#define MMEA4_ADDRNORM_BASE_ADDR0__LGCY_MMIO_HOLE_EN_MASK 0x00000002L
+#define MMEA4_ADDRNORM_BASE_ADDR0__INTLV_NUM_CHAN_MASK 0x0000003CL
+#define MMEA4_ADDRNORM_BASE_ADDR0__INTLV_NUM_DIES_MASK 0x000000C0L
+#define MMEA4_ADDRNORM_BASE_ADDR0__INTLV_NUM_SOCKETS_MASK 0x00000100L
+#define MMEA4_ADDRNORM_BASE_ADDR0__INTLV_ADDR_SEL_MASK 0x00000E00L
+#define MMEA4_ADDRNORM_BASE_ADDR0__BASE_ADDR_MASK 0xFFFFF000L
+//MMEA4_ADDRNORM_LIMIT_ADDR0
+#define MMEA4_ADDRNORM_LIMIT_ADDR0__DST_FABRIC_ID__SHIFT 0x0
+#define MMEA4_ADDRNORM_LIMIT_ADDR0__LIMIT_ADDR__SHIFT 0xc
+#define MMEA4_ADDRNORM_LIMIT_ADDR0__DST_FABRIC_ID_MASK 0x0000001FL
+#define MMEA4_ADDRNORM_LIMIT_ADDR0__LIMIT_ADDR_MASK 0xFFFFF000L
+//MMEA4_ADDRNORM_BASE_ADDR1
+#define MMEA4_ADDRNORM_BASE_ADDR1__ADDR_RNG_VAL__SHIFT 0x0
+#define MMEA4_ADDRNORM_BASE_ADDR1__LGCY_MMIO_HOLE_EN__SHIFT 0x1
+#define MMEA4_ADDRNORM_BASE_ADDR1__INTLV_NUM_CHAN__SHIFT 0x2
+#define MMEA4_ADDRNORM_BASE_ADDR1__INTLV_NUM_DIES__SHIFT 0x6
+#define MMEA4_ADDRNORM_BASE_ADDR1__INTLV_NUM_SOCKETS__SHIFT 0x8
+#define MMEA4_ADDRNORM_BASE_ADDR1__INTLV_ADDR_SEL__SHIFT 0x9
+#define MMEA4_ADDRNORM_BASE_ADDR1__BASE_ADDR__SHIFT 0xc
+#define MMEA4_ADDRNORM_BASE_ADDR1__ADDR_RNG_VAL_MASK 0x00000001L
+#define MMEA4_ADDRNORM_BASE_ADDR1__LGCY_MMIO_HOLE_EN_MASK 0x00000002L
+#define MMEA4_ADDRNORM_BASE_ADDR1__INTLV_NUM_CHAN_MASK 0x0000003CL
+#define MMEA4_ADDRNORM_BASE_ADDR1__INTLV_NUM_DIES_MASK 0x000000C0L
+#define MMEA4_ADDRNORM_BASE_ADDR1__INTLV_NUM_SOCKETS_MASK 0x00000100L
+#define MMEA4_ADDRNORM_BASE_ADDR1__INTLV_ADDR_SEL_MASK 0x00000E00L
+#define MMEA4_ADDRNORM_BASE_ADDR1__BASE_ADDR_MASK 0xFFFFF000L
+//MMEA4_ADDRNORM_LIMIT_ADDR1
+#define MMEA4_ADDRNORM_LIMIT_ADDR1__DST_FABRIC_ID__SHIFT 0x0
+#define MMEA4_ADDRNORM_LIMIT_ADDR1__LIMIT_ADDR__SHIFT 0xc
+#define MMEA4_ADDRNORM_LIMIT_ADDR1__DST_FABRIC_ID_MASK 0x0000001FL
+#define MMEA4_ADDRNORM_LIMIT_ADDR1__LIMIT_ADDR_MASK 0xFFFFF000L
+//MMEA4_ADDRNORM_OFFSET_ADDR1
+#define MMEA4_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_EN__SHIFT 0x0
+#define MMEA4_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET__SHIFT 0x14
+#define MMEA4_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_EN_MASK 0x00000001L
+#define MMEA4_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_MASK 0xFFF00000L
+//MMEA4_ADDRNORM_BASE_ADDR2
+#define MMEA4_ADDRNORM_BASE_ADDR2__ADDR_RNG_VAL__SHIFT 0x0
+#define MMEA4_ADDRNORM_BASE_ADDR2__LGCY_MMIO_HOLE_EN__SHIFT 0x1
+#define MMEA4_ADDRNORM_BASE_ADDR2__INTLV_NUM_CHAN__SHIFT 0x2
+#define MMEA4_ADDRNORM_BASE_ADDR2__INTLV_NUM_DIES__SHIFT 0x6
+#define MMEA4_ADDRNORM_BASE_ADDR2__INTLV_NUM_SOCKETS__SHIFT 0x8
+#define MMEA4_ADDRNORM_BASE_ADDR2__INTLV_ADDR_SEL__SHIFT 0x9
+#define MMEA4_ADDRNORM_BASE_ADDR2__BASE_ADDR__SHIFT 0xc
+#define MMEA4_ADDRNORM_BASE_ADDR2__ADDR_RNG_VAL_MASK 0x00000001L
+#define MMEA4_ADDRNORM_BASE_ADDR2__LGCY_MMIO_HOLE_EN_MASK 0x00000002L
+#define MMEA4_ADDRNORM_BASE_ADDR2__INTLV_NUM_CHAN_MASK 0x0000003CL
+#define MMEA4_ADDRNORM_BASE_ADDR2__INTLV_NUM_DIES_MASK 0x000000C0L
+#define MMEA4_ADDRNORM_BASE_ADDR2__INTLV_NUM_SOCKETS_MASK 0x00000100L
+#define MMEA4_ADDRNORM_BASE_ADDR2__INTLV_ADDR_SEL_MASK 0x00000E00L
+#define MMEA4_ADDRNORM_BASE_ADDR2__BASE_ADDR_MASK 0xFFFFF000L
+//MMEA4_ADDRNORM_LIMIT_ADDR2
+#define MMEA4_ADDRNORM_LIMIT_ADDR2__DST_FABRIC_ID__SHIFT 0x0
+#define MMEA4_ADDRNORM_LIMIT_ADDR2__LIMIT_ADDR__SHIFT 0xc
+#define MMEA4_ADDRNORM_LIMIT_ADDR2__DST_FABRIC_ID_MASK 0x0000001FL
+#define MMEA4_ADDRNORM_LIMIT_ADDR2__LIMIT_ADDR_MASK 0xFFFFF000L
+//MMEA4_ADDRNORM_BASE_ADDR3
+#define MMEA4_ADDRNORM_BASE_ADDR3__ADDR_RNG_VAL__SHIFT 0x0
+#define MMEA4_ADDRNORM_BASE_ADDR3__LGCY_MMIO_HOLE_EN__SHIFT 0x1
+#define MMEA4_ADDRNORM_BASE_ADDR3__INTLV_NUM_CHAN__SHIFT 0x2
+#define MMEA4_ADDRNORM_BASE_ADDR3__INTLV_NUM_DIES__SHIFT 0x6
+#define MMEA4_ADDRNORM_BASE_ADDR3__INTLV_NUM_SOCKETS__SHIFT 0x8
+#define MMEA4_ADDRNORM_BASE_ADDR3__INTLV_ADDR_SEL__SHIFT 0x9
+#define MMEA4_ADDRNORM_BASE_ADDR3__BASE_ADDR__SHIFT 0xc
+#define MMEA4_ADDRNORM_BASE_ADDR3__ADDR_RNG_VAL_MASK 0x00000001L
+#define MMEA4_ADDRNORM_BASE_ADDR3__LGCY_MMIO_HOLE_EN_MASK 0x00000002L
+#define MMEA4_ADDRNORM_BASE_ADDR3__INTLV_NUM_CHAN_MASK 0x0000003CL
+#define MMEA4_ADDRNORM_BASE_ADDR3__INTLV_NUM_DIES_MASK 0x000000C0L
+#define MMEA4_ADDRNORM_BASE_ADDR3__INTLV_NUM_SOCKETS_MASK 0x00000100L
+#define MMEA4_ADDRNORM_BASE_ADDR3__INTLV_ADDR_SEL_MASK 0x00000E00L
+#define MMEA4_ADDRNORM_BASE_ADDR3__BASE_ADDR_MASK 0xFFFFF000L
+//MMEA4_ADDRNORM_LIMIT_ADDR3
+#define MMEA4_ADDRNORM_LIMIT_ADDR3__DST_FABRIC_ID__SHIFT 0x0
+#define MMEA4_ADDRNORM_LIMIT_ADDR3__LIMIT_ADDR__SHIFT 0xc
+#define MMEA4_ADDRNORM_LIMIT_ADDR3__DST_FABRIC_ID_MASK 0x0000001FL
+#define MMEA4_ADDRNORM_LIMIT_ADDR3__LIMIT_ADDR_MASK 0xFFFFF000L
+//MMEA4_ADDRNORM_OFFSET_ADDR3
+#define MMEA4_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET_EN__SHIFT 0x0
+#define MMEA4_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET__SHIFT 0x14
+#define MMEA4_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET_EN_MASK 0x00000001L
+#define MMEA4_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET_MASK 0xFFF00000L
+//MMEA4_ADDRNORM_BASE_ADDR4
+#define MMEA4_ADDRNORM_BASE_ADDR4__ADDR_RNG_VAL__SHIFT 0x0
+#define MMEA4_ADDRNORM_BASE_ADDR4__LGCY_MMIO_HOLE_EN__SHIFT 0x1
+#define MMEA4_ADDRNORM_BASE_ADDR4__INTLV_NUM_CHAN__SHIFT 0x2
+#define MMEA4_ADDRNORM_BASE_ADDR4__INTLV_NUM_DIES__SHIFT 0x6
+#define MMEA4_ADDRNORM_BASE_ADDR4__INTLV_NUM_SOCKETS__SHIFT 0x8
+#define MMEA4_ADDRNORM_BASE_ADDR4__INTLV_ADDR_SEL__SHIFT 0x9
+#define MMEA4_ADDRNORM_BASE_ADDR4__BASE_ADDR__SHIFT 0xc
+#define MMEA4_ADDRNORM_BASE_ADDR4__ADDR_RNG_VAL_MASK 0x00000001L
+#define MMEA4_ADDRNORM_BASE_ADDR4__LGCY_MMIO_HOLE_EN_MASK 0x00000002L
+#define MMEA4_ADDRNORM_BASE_ADDR4__INTLV_NUM_CHAN_MASK 0x0000003CL
+#define MMEA4_ADDRNORM_BASE_ADDR4__INTLV_NUM_DIES_MASK 0x000000C0L
+#define MMEA4_ADDRNORM_BASE_ADDR4__INTLV_NUM_SOCKETS_MASK 0x00000100L
+#define MMEA4_ADDRNORM_BASE_ADDR4__INTLV_ADDR_SEL_MASK 0x00000E00L
+#define MMEA4_ADDRNORM_BASE_ADDR4__BASE_ADDR_MASK 0xFFFFF000L
+//MMEA4_ADDRNORM_LIMIT_ADDR4
+#define MMEA4_ADDRNORM_LIMIT_ADDR4__DST_FABRIC_ID__SHIFT 0x0
+#define MMEA4_ADDRNORM_LIMIT_ADDR4__LIMIT_ADDR__SHIFT 0xc
+#define MMEA4_ADDRNORM_LIMIT_ADDR4__DST_FABRIC_ID_MASK 0x0000001FL
+#define MMEA4_ADDRNORM_LIMIT_ADDR4__LIMIT_ADDR_MASK 0xFFFFF000L
+//MMEA4_ADDRNORM_BASE_ADDR5
+#define MMEA4_ADDRNORM_BASE_ADDR5__ADDR_RNG_VAL__SHIFT 0x0
+#define MMEA4_ADDRNORM_BASE_ADDR5__LGCY_MMIO_HOLE_EN__SHIFT 0x1
+#define MMEA4_ADDRNORM_BASE_ADDR5__INTLV_NUM_CHAN__SHIFT 0x2
+#define MMEA4_ADDRNORM_BASE_ADDR5__INTLV_NUM_DIES__SHIFT 0x6
+#define MMEA4_ADDRNORM_BASE_ADDR5__INTLV_NUM_SOCKETS__SHIFT 0x8
+#define MMEA4_ADDRNORM_BASE_ADDR5__INTLV_ADDR_SEL__SHIFT 0x9
+#define MMEA4_ADDRNORM_BASE_ADDR5__BASE_ADDR__SHIFT 0xc
+#define MMEA4_ADDRNORM_BASE_ADDR5__ADDR_RNG_VAL_MASK 0x00000001L
+#define MMEA4_ADDRNORM_BASE_ADDR5__LGCY_MMIO_HOLE_EN_MASK 0x00000002L
+#define MMEA4_ADDRNORM_BASE_ADDR5__INTLV_NUM_CHAN_MASK 0x0000003CL
+#define MMEA4_ADDRNORM_BASE_ADDR5__INTLV_NUM_DIES_MASK 0x000000C0L
+#define MMEA4_ADDRNORM_BASE_ADDR5__INTLV_NUM_SOCKETS_MASK 0x00000100L
+#define MMEA4_ADDRNORM_BASE_ADDR5__INTLV_ADDR_SEL_MASK 0x00000E00L
+#define MMEA4_ADDRNORM_BASE_ADDR5__BASE_ADDR_MASK 0xFFFFF000L
+//MMEA4_ADDRNORM_LIMIT_ADDR5
+#define MMEA4_ADDRNORM_LIMIT_ADDR5__DST_FABRIC_ID__SHIFT 0x0
+#define MMEA4_ADDRNORM_LIMIT_ADDR5__LIMIT_ADDR__SHIFT 0xc
+#define MMEA4_ADDRNORM_LIMIT_ADDR5__DST_FABRIC_ID_MASK 0x0000001FL
+#define MMEA4_ADDRNORM_LIMIT_ADDR5__LIMIT_ADDR_MASK 0xFFFFF000L
+//MMEA4_ADDRNORM_OFFSET_ADDR5
+#define MMEA4_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET_EN__SHIFT 0x0
+#define MMEA4_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET__SHIFT 0x14
+#define MMEA4_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET_EN_MASK 0x00000001L
+#define MMEA4_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET_MASK 0xFFF00000L
+//MMEA4_ADDRNORMDRAM_HOLE_CNTL
+#define MMEA4_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_VALID__SHIFT 0x0
+#define MMEA4_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_OFFSET__SHIFT 0x7
+#define MMEA4_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_VALID_MASK 0x00000001L
+#define MMEA4_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_OFFSET_MASK 0x0000FF80L
+//MMEA4_ADDRNORMGMI_HOLE_CNTL
+#define MMEA4_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_VALID__SHIFT 0x0
+#define MMEA4_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_OFFSET__SHIFT 0x7
+#define MMEA4_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_VALID_MASK 0x00000001L
+#define MMEA4_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_OFFSET_MASK 0x0000FF80L
+//MMEA4_ADDRNORMDRAM_NP2_CHANNEL_CFG
+#define MMEA4_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE0__SHIFT 0x0
+#define MMEA4_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE1__SHIFT 0x6
+#define MMEA4_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE0_MASK 0x0000003FL
+#define MMEA4_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE1_MASK 0x00000FC0L
+//MMEA4_ADDRNORMGMI_NP2_CHANNEL_CFG
+#define MMEA4_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE2__SHIFT 0x0
+#define MMEA4_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE3__SHIFT 0x6
+#define MMEA4_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE2_MASK 0x0000003FL
+#define MMEA4_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE3_MASK 0x00000FC0L
+//MMEA4_ADDRDEC_BANK_CFG
+#define MMEA4_ADDRDEC_BANK_CFG__BANK_MASK_DRAM__SHIFT 0x0
+#define MMEA4_ADDRDEC_BANK_CFG__BANK_MASK_GMI__SHIFT 0x6
+#define MMEA4_ADDRDEC_BANK_CFG__BANKGROUP_SEL_DRAM__SHIFT 0xc
+#define MMEA4_ADDRDEC_BANK_CFG__BANKGROUP_SEL_GMI__SHIFT 0xf
+#define MMEA4_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_DRAM__SHIFT 0x12
+#define MMEA4_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_GMI__SHIFT 0x13
+#define MMEA4_ADDRDEC_BANK_CFG__BANK_MASK_DRAM_MASK 0x0000003FL
+#define MMEA4_ADDRDEC_BANK_CFG__BANK_MASK_GMI_MASK 0x00000FC0L
+#define MMEA4_ADDRDEC_BANK_CFG__BANKGROUP_SEL_DRAM_MASK 0x00007000L
+#define MMEA4_ADDRDEC_BANK_CFG__BANKGROUP_SEL_GMI_MASK 0x00038000L
+#define MMEA4_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_DRAM_MASK 0x00040000L
+#define MMEA4_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_GMI_MASK 0x00080000L
+//MMEA4_ADDRDEC_MISC_CFG
+#define MMEA4_ADDRDEC_MISC_CFG__VCM_EN0__SHIFT 0x0
+#define MMEA4_ADDRDEC_MISC_CFG__VCM_EN1__SHIFT 0x1
+#define MMEA4_ADDRDEC_MISC_CFG__VCM_EN2__SHIFT 0x2
+#define MMEA4_ADDRDEC_MISC_CFG__PCH_MASK_DRAM__SHIFT 0x8
+#define MMEA4_ADDRDEC_MISC_CFG__PCH_MASK_GMI__SHIFT 0x9
+#define MMEA4_ADDRDEC_MISC_CFG__CH_MASK_DRAM__SHIFT 0xc
+#define MMEA4_ADDRDEC_MISC_CFG__CH_MASK_GMI__SHIFT 0x11
+#define MMEA4_ADDRDEC_MISC_CFG__CS_MASK_DRAM__SHIFT 0x16
+#define MMEA4_ADDRDEC_MISC_CFG__CS_MASK_GMI__SHIFT 0x18
+#define MMEA4_ADDRDEC_MISC_CFG__RM_MASK_DRAM__SHIFT 0x1a
+#define MMEA4_ADDRDEC_MISC_CFG__RM_MASK_GMI__SHIFT 0x1d
+#define MMEA4_ADDRDEC_MISC_CFG__VCM_EN0_MASK 0x00000001L
+#define MMEA4_ADDRDEC_MISC_CFG__VCM_EN1_MASK 0x00000002L
+#define MMEA4_ADDRDEC_MISC_CFG__VCM_EN2_MASK 0x00000004L
+#define MMEA4_ADDRDEC_MISC_CFG__PCH_MASK_DRAM_MASK 0x00000100L
+#define MMEA4_ADDRDEC_MISC_CFG__PCH_MASK_GMI_MASK 0x00000200L
+#define MMEA4_ADDRDEC_MISC_CFG__CH_MASK_DRAM_MASK 0x0001F000L
+#define MMEA4_ADDRDEC_MISC_CFG__CH_MASK_GMI_MASK 0x003E0000L
+#define MMEA4_ADDRDEC_MISC_CFG__CS_MASK_DRAM_MASK 0x00C00000L
+#define MMEA4_ADDRDEC_MISC_CFG__CS_MASK_GMI_MASK 0x03000000L
+#define MMEA4_ADDRDEC_MISC_CFG__RM_MASK_DRAM_MASK 0x1C000000L
+#define MMEA4_ADDRDEC_MISC_CFG__RM_MASK_GMI_MASK 0xE0000000L
+//MMEA4_ADDRDECDRAM_ADDR_HASH_BANK0
+#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK0__XOR_ENABLE__SHIFT 0x0
+#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK0__COL_XOR__SHIFT 0x1
+#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK0__ROW_XOR__SHIFT 0xe
+#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK0__XOR_ENABLE_MASK 0x00000001L
+#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK0__COL_XOR_MASK 0x00003FFEL
+#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK0__ROW_XOR_MASK 0xFFFFC000L
+//MMEA4_ADDRDECDRAM_ADDR_HASH_BANK1
+#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK1__XOR_ENABLE__SHIFT 0x0
+#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK1__COL_XOR__SHIFT 0x1
+#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK1__ROW_XOR__SHIFT 0xe
+#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK1__XOR_ENABLE_MASK 0x00000001L
+#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK1__COL_XOR_MASK 0x00003FFEL
+#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK1__ROW_XOR_MASK 0xFFFFC000L
+//MMEA4_ADDRDECDRAM_ADDR_HASH_BANK2
+#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK2__XOR_ENABLE__SHIFT 0x0
+#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK2__COL_XOR__SHIFT 0x1
+#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK2__ROW_XOR__SHIFT 0xe
+#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK2__XOR_ENABLE_MASK 0x00000001L
+#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK2__COL_XOR_MASK 0x00003FFEL
+#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK2__ROW_XOR_MASK 0xFFFFC000L
+//MMEA4_ADDRDECDRAM_ADDR_HASH_BANK3
+#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK3__XOR_ENABLE__SHIFT 0x0
+#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK3__COL_XOR__SHIFT 0x1
+#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK3__ROW_XOR__SHIFT 0xe
+#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK3__XOR_ENABLE_MASK 0x00000001L
+#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK3__COL_XOR_MASK 0x00003FFEL
+#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK3__ROW_XOR_MASK 0xFFFFC000L
+//MMEA4_ADDRDECDRAM_ADDR_HASH_BANK4
+#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK4__XOR_ENABLE__SHIFT 0x0
+#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK4__COL_XOR__SHIFT 0x1
+#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK4__ROW_XOR__SHIFT 0xe
+#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK4__XOR_ENABLE_MASK 0x00000001L
+#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK4__COL_XOR_MASK 0x00003FFEL
+#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK4__ROW_XOR_MASK 0xFFFFC000L
+//MMEA4_ADDRDECDRAM_ADDR_HASH_BANK5
+#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK5__XOR_ENABLE__SHIFT 0x0
+#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK5__COL_XOR__SHIFT 0x1
+#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK5__ROW_XOR__SHIFT 0xe
+#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK5__XOR_ENABLE_MASK 0x00000001L
+#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK5__COL_XOR_MASK 0x00003FFEL
+#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK5__ROW_XOR_MASK 0xFFFFC000L
+//MMEA4_ADDRDECDRAM_ADDR_HASH_PC
+#define MMEA4_ADDRDECDRAM_ADDR_HASH_PC__XOR_ENABLE__SHIFT 0x0
+#define MMEA4_ADDRDECDRAM_ADDR_HASH_PC__COL_XOR__SHIFT 0x1
+#define MMEA4_ADDRDECDRAM_ADDR_HASH_PC__ROW_XOR__SHIFT 0xe
+#define MMEA4_ADDRDECDRAM_ADDR_HASH_PC__XOR_ENABLE_MASK 0x00000001L
+#define MMEA4_ADDRDECDRAM_ADDR_HASH_PC__COL_XOR_MASK 0x00003FFEL
+#define MMEA4_ADDRDECDRAM_ADDR_HASH_PC__ROW_XOR_MASK 0xFFFFC000L
+//MMEA4_ADDRDECDRAM_ADDR_HASH_PC2
+#define MMEA4_ADDRDECDRAM_ADDR_HASH_PC2__BANK_XOR__SHIFT 0x0
+#define MMEA4_ADDRDECDRAM_ADDR_HASH_PC2__BANK_XOR_MASK 0x0000003FL
+//MMEA4_ADDRDECDRAM_ADDR_HASH_CS0
+#define MMEA4_ADDRDECDRAM_ADDR_HASH_CS0__XOR_ENABLE__SHIFT 0x0
+#define MMEA4_ADDRDECDRAM_ADDR_HASH_CS0__NA_XOR__SHIFT 0x1
+#define MMEA4_ADDRDECDRAM_ADDR_HASH_CS0__XOR_ENABLE_MASK 0x00000001L
+#define MMEA4_ADDRDECDRAM_ADDR_HASH_CS0__NA_XOR_MASK 0xFFFFFFFEL
+//MMEA4_ADDRDECDRAM_ADDR_HASH_CS1
+#define MMEA4_ADDRDECDRAM_ADDR_HASH_CS1__XOR_ENABLE__SHIFT 0x0
+#define MMEA4_ADDRDECDRAM_ADDR_HASH_CS1__NA_XOR__SHIFT 0x1
+#define MMEA4_ADDRDECDRAM_ADDR_HASH_CS1__XOR_ENABLE_MASK 0x00000001L
+#define MMEA4_ADDRDECDRAM_ADDR_HASH_CS1__NA_XOR_MASK 0xFFFFFFFEL
+//MMEA4_ADDRDECDRAM_HARVEST_ENABLE
+#define MMEA4_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_EN__SHIFT 0x0
+#define MMEA4_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_VAL__SHIFT 0x1
+#define MMEA4_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_EN__SHIFT 0x2
+#define MMEA4_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_VAL__SHIFT 0x3
+#define MMEA4_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_EN__SHIFT 0x4
+#define MMEA4_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_VAL__SHIFT 0x5
+#define MMEA4_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_EN_MASK 0x00000001L
+#define MMEA4_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_VAL_MASK 0x00000002L
+#define MMEA4_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_EN_MASK 0x00000004L
+#define MMEA4_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_VAL_MASK 0x00000008L
+#define MMEA4_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_EN_MASK 0x00000010L
+#define MMEA4_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_VAL_MASK 0x00000020L
+//MMEA4_ADDRDECGMI_ADDR_HASH_BANK0
+#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK0__XOR_ENABLE__SHIFT 0x0
+#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK0__COL_XOR__SHIFT 0x1
+#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK0__ROW_XOR__SHIFT 0xe
+#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK0__XOR_ENABLE_MASK 0x00000001L
+#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK0__COL_XOR_MASK 0x00003FFEL
+#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK0__ROW_XOR_MASK 0xFFFFC000L
+//MMEA4_ADDRDECGMI_ADDR_HASH_BANK1
+#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK1__XOR_ENABLE__SHIFT 0x0
+#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK1__COL_XOR__SHIFT 0x1
+#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK1__ROW_XOR__SHIFT 0xe
+#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK1__XOR_ENABLE_MASK 0x00000001L
+#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK1__COL_XOR_MASK 0x00003FFEL
+#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK1__ROW_XOR_MASK 0xFFFFC000L
+//MMEA4_ADDRDECGMI_ADDR_HASH_BANK2
+#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK2__XOR_ENABLE__SHIFT 0x0
+#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK2__COL_XOR__SHIFT 0x1
+#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK2__ROW_XOR__SHIFT 0xe
+#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK2__XOR_ENABLE_MASK 0x00000001L
+#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK2__COL_XOR_MASK 0x00003FFEL
+#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK2__ROW_XOR_MASK 0xFFFFC000L
+//MMEA4_ADDRDECGMI_ADDR_HASH_BANK3
+#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK3__XOR_ENABLE__SHIFT 0x0
+#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK3__COL_XOR__SHIFT 0x1
+#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK3__ROW_XOR__SHIFT 0xe
+#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK3__XOR_ENABLE_MASK 0x00000001L
+#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK3__COL_XOR_MASK 0x00003FFEL
+#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK3__ROW_XOR_MASK 0xFFFFC000L
+//MMEA4_ADDRDECGMI_ADDR_HASH_BANK4
+#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK4__XOR_ENABLE__SHIFT 0x0
+#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK4__COL_XOR__SHIFT 0x1
+#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK4__ROW_XOR__SHIFT 0xe
+#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK4__XOR_ENABLE_MASK 0x00000001L
+#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK4__COL_XOR_MASK 0x00003FFEL
+#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK4__ROW_XOR_MASK 0xFFFFC000L
+//MMEA4_ADDRDECGMI_ADDR_HASH_BANK5
+#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK5__XOR_ENABLE__SHIFT 0x0
+#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK5__COL_XOR__SHIFT 0x1
+#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK5__ROW_XOR__SHIFT 0xe
+#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK5__XOR_ENABLE_MASK 0x00000001L
+#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK5__COL_XOR_MASK 0x00003FFEL
+#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK5__ROW_XOR_MASK 0xFFFFC000L
+//MMEA4_ADDRDECGMI_ADDR_HASH_PC
+#define MMEA4_ADDRDECGMI_ADDR_HASH_PC__XOR_ENABLE__SHIFT 0x0
+#define MMEA4_ADDRDECGMI_ADDR_HASH_PC__COL_XOR__SHIFT 0x1
+#define MMEA4_ADDRDECGMI_ADDR_HASH_PC__ROW_XOR__SHIFT 0xe
+#define MMEA4_ADDRDECGMI_ADDR_HASH_PC__XOR_ENABLE_MASK 0x00000001L
+#define MMEA4_ADDRDECGMI_ADDR_HASH_PC__COL_XOR_MASK 0x00003FFEL
+#define MMEA4_ADDRDECGMI_ADDR_HASH_PC__ROW_XOR_MASK 0xFFFFC000L
+//MMEA4_ADDRDECGMI_ADDR_HASH_PC2
+#define MMEA4_ADDRDECGMI_ADDR_HASH_PC2__BANK_XOR__SHIFT 0x0
+#define MMEA4_ADDRDECGMI_ADDR_HASH_PC2__BANK_XOR_MASK 0x0000003FL
+//MMEA4_ADDRDECGMI_ADDR_HASH_CS0
+#define MMEA4_ADDRDECGMI_ADDR_HASH_CS0__XOR_ENABLE__SHIFT 0x0
+#define MMEA4_ADDRDECGMI_ADDR_HASH_CS0__NA_XOR__SHIFT 0x1
+#define MMEA4_ADDRDECGMI_ADDR_HASH_CS0__XOR_ENABLE_MASK 0x00000001L
+#define MMEA4_ADDRDECGMI_ADDR_HASH_CS0__NA_XOR_MASK 0xFFFFFFFEL
+//MMEA4_ADDRDECGMI_ADDR_HASH_CS1
+#define MMEA4_ADDRDECGMI_ADDR_HASH_CS1__XOR_ENABLE__SHIFT 0x0
+#define MMEA4_ADDRDECGMI_ADDR_HASH_CS1__NA_XOR__SHIFT 0x1
+#define MMEA4_ADDRDECGMI_ADDR_HASH_CS1__XOR_ENABLE_MASK 0x00000001L
+#define MMEA4_ADDRDECGMI_ADDR_HASH_CS1__NA_XOR_MASK 0xFFFFFFFEL
+//MMEA4_ADDRDECGMI_HARVEST_ENABLE
+#define MMEA4_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_EN__SHIFT 0x0
+#define MMEA4_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_VAL__SHIFT 0x1
+#define MMEA4_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_EN__SHIFT 0x2
+#define MMEA4_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_VAL__SHIFT 0x3
+#define MMEA4_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_EN__SHIFT 0x4
+#define MMEA4_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_VAL__SHIFT 0x5
+#define MMEA4_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_EN_MASK 0x00000001L
+#define MMEA4_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_VAL_MASK 0x00000002L
+#define MMEA4_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_EN_MASK 0x00000004L
+#define MMEA4_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_VAL_MASK 0x00000008L
+#define MMEA4_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_EN_MASK 0x00000010L
+#define MMEA4_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_VAL_MASK 0x00000020L
+//MMEA4_ADDRDEC0_BASE_ADDR_CS0
+#define MMEA4_ADDRDEC0_BASE_ADDR_CS0__CS_EN__SHIFT 0x0
+#define MMEA4_ADDRDEC0_BASE_ADDR_CS0__BASE_ADDR__SHIFT 0x1
+#define MMEA4_ADDRDEC0_BASE_ADDR_CS0__CS_EN_MASK 0x00000001L
+#define MMEA4_ADDRDEC0_BASE_ADDR_CS0__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA4_ADDRDEC0_BASE_ADDR_CS1
+#define MMEA4_ADDRDEC0_BASE_ADDR_CS1__CS_EN__SHIFT 0x0
+#define MMEA4_ADDRDEC0_BASE_ADDR_CS1__BASE_ADDR__SHIFT 0x1
+#define MMEA4_ADDRDEC0_BASE_ADDR_CS1__CS_EN_MASK 0x00000001L
+#define MMEA4_ADDRDEC0_BASE_ADDR_CS1__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA4_ADDRDEC0_BASE_ADDR_CS2
+#define MMEA4_ADDRDEC0_BASE_ADDR_CS2__CS_EN__SHIFT 0x0
+#define MMEA4_ADDRDEC0_BASE_ADDR_CS2__BASE_ADDR__SHIFT 0x1
+#define MMEA4_ADDRDEC0_BASE_ADDR_CS2__CS_EN_MASK 0x00000001L
+#define MMEA4_ADDRDEC0_BASE_ADDR_CS2__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA4_ADDRDEC0_BASE_ADDR_CS3
+#define MMEA4_ADDRDEC0_BASE_ADDR_CS3__CS_EN__SHIFT 0x0
+#define MMEA4_ADDRDEC0_BASE_ADDR_CS3__BASE_ADDR__SHIFT 0x1
+#define MMEA4_ADDRDEC0_BASE_ADDR_CS3__CS_EN_MASK 0x00000001L
+#define MMEA4_ADDRDEC0_BASE_ADDR_CS3__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA4_ADDRDEC0_BASE_ADDR_SECCS0
+#define MMEA4_ADDRDEC0_BASE_ADDR_SECCS0__CS_EN__SHIFT 0x0
+#define MMEA4_ADDRDEC0_BASE_ADDR_SECCS0__BASE_ADDR__SHIFT 0x1
+#define MMEA4_ADDRDEC0_BASE_ADDR_SECCS0__CS_EN_MASK 0x00000001L
+#define MMEA4_ADDRDEC0_BASE_ADDR_SECCS0__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA4_ADDRDEC0_BASE_ADDR_SECCS1
+#define MMEA4_ADDRDEC0_BASE_ADDR_SECCS1__CS_EN__SHIFT 0x0
+#define MMEA4_ADDRDEC0_BASE_ADDR_SECCS1__BASE_ADDR__SHIFT 0x1
+#define MMEA4_ADDRDEC0_BASE_ADDR_SECCS1__CS_EN_MASK 0x00000001L
+#define MMEA4_ADDRDEC0_BASE_ADDR_SECCS1__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA4_ADDRDEC0_BASE_ADDR_SECCS2
+#define MMEA4_ADDRDEC0_BASE_ADDR_SECCS2__CS_EN__SHIFT 0x0
+#define MMEA4_ADDRDEC0_BASE_ADDR_SECCS2__BASE_ADDR__SHIFT 0x1
+#define MMEA4_ADDRDEC0_BASE_ADDR_SECCS2__CS_EN_MASK 0x00000001L
+#define MMEA4_ADDRDEC0_BASE_ADDR_SECCS2__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA4_ADDRDEC0_BASE_ADDR_SECCS3
+#define MMEA4_ADDRDEC0_BASE_ADDR_SECCS3__CS_EN__SHIFT 0x0
+#define MMEA4_ADDRDEC0_BASE_ADDR_SECCS3__BASE_ADDR__SHIFT 0x1
+#define MMEA4_ADDRDEC0_BASE_ADDR_SECCS3__CS_EN_MASK 0x00000001L
+#define MMEA4_ADDRDEC0_BASE_ADDR_SECCS3__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA4_ADDRDEC0_ADDR_MASK_CS01
+#define MMEA4_ADDRDEC0_ADDR_MASK_CS01__ADDR_MASK__SHIFT 0x1
+#define MMEA4_ADDRDEC0_ADDR_MASK_CS01__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA4_ADDRDEC0_ADDR_MASK_CS23
+#define MMEA4_ADDRDEC0_ADDR_MASK_CS23__ADDR_MASK__SHIFT 0x1
+#define MMEA4_ADDRDEC0_ADDR_MASK_CS23__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA4_ADDRDEC0_ADDR_MASK_SECCS01
+#define MMEA4_ADDRDEC0_ADDR_MASK_SECCS01__ADDR_MASK__SHIFT 0x1
+#define MMEA4_ADDRDEC0_ADDR_MASK_SECCS01__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA4_ADDRDEC0_ADDR_MASK_SECCS23
+#define MMEA4_ADDRDEC0_ADDR_MASK_SECCS23__ADDR_MASK__SHIFT 0x1
+#define MMEA4_ADDRDEC0_ADDR_MASK_SECCS23__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA4_ADDRDEC0_ADDR_CFG_CS01
+#define MMEA4_ADDRDEC0_ADDR_CFG_CS01__NUM_BANK_GROUPS__SHIFT 0x1
+#define MMEA4_ADDRDEC0_ADDR_CFG_CS01__NUM_RM__SHIFT 0x4
+#define MMEA4_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_LO__SHIFT 0x8
+#define MMEA4_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_HI__SHIFT 0xc
+#define MMEA4_ADDRDEC0_ADDR_CFG_CS01__NUM_COL__SHIFT 0x10
+#define MMEA4_ADDRDEC0_ADDR_CFG_CS01__NUM_BANKS__SHIFT 0x14
+#define MMEA4_ADDRDEC0_ADDR_CFG_CS01__HI_COL_EN__SHIFT 0x1f
+#define MMEA4_ADDRDEC0_ADDR_CFG_CS01__NUM_BANK_GROUPS_MASK 0x0000000EL
+#define MMEA4_ADDRDEC0_ADDR_CFG_CS01__NUM_RM_MASK 0x00000030L
+#define MMEA4_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_LO_MASK 0x00000F00L
+#define MMEA4_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_HI_MASK 0x0000F000L
+#define MMEA4_ADDRDEC0_ADDR_CFG_CS01__NUM_COL_MASK 0x000F0000L
+#define MMEA4_ADDRDEC0_ADDR_CFG_CS01__NUM_BANKS_MASK 0x00300000L
+#define MMEA4_ADDRDEC0_ADDR_CFG_CS01__HI_COL_EN_MASK 0x80000000L
+//MMEA4_ADDRDEC0_ADDR_CFG_CS23
+#define MMEA4_ADDRDEC0_ADDR_CFG_CS23__NUM_BANK_GROUPS__SHIFT 0x1
+#define MMEA4_ADDRDEC0_ADDR_CFG_CS23__NUM_RM__SHIFT 0x4
+#define MMEA4_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_LO__SHIFT 0x8
+#define MMEA4_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_HI__SHIFT 0xc
+#define MMEA4_ADDRDEC0_ADDR_CFG_CS23__NUM_COL__SHIFT 0x10
+#define MMEA4_ADDRDEC0_ADDR_CFG_CS23__NUM_BANKS__SHIFT 0x14
+#define MMEA4_ADDRDEC0_ADDR_CFG_CS23__HI_COL_EN__SHIFT 0x1f
+#define MMEA4_ADDRDEC0_ADDR_CFG_CS23__NUM_BANK_GROUPS_MASK 0x0000000EL
+#define MMEA4_ADDRDEC0_ADDR_CFG_CS23__NUM_RM_MASK 0x00000030L
+#define MMEA4_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_LO_MASK 0x00000F00L
+#define MMEA4_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_HI_MASK 0x0000F000L
+#define MMEA4_ADDRDEC0_ADDR_CFG_CS23__NUM_COL_MASK 0x000F0000L
+#define MMEA4_ADDRDEC0_ADDR_CFG_CS23__NUM_BANKS_MASK 0x00300000L
+#define MMEA4_ADDRDEC0_ADDR_CFG_CS23__HI_COL_EN_MASK 0x80000000L
+//MMEA4_ADDRDEC0_ADDR_SEL_CS01
+#define MMEA4_ADDRDEC0_ADDR_SEL_CS01__BANK0__SHIFT 0x0
+#define MMEA4_ADDRDEC0_ADDR_SEL_CS01__BANK1__SHIFT 0x4
+#define MMEA4_ADDRDEC0_ADDR_SEL_CS01__BANK2__SHIFT 0x8
+#define MMEA4_ADDRDEC0_ADDR_SEL_CS01__BANK3__SHIFT 0xc
+#define MMEA4_ADDRDEC0_ADDR_SEL_CS01__BANK4__SHIFT 0x10
+#define MMEA4_ADDRDEC0_ADDR_SEL_CS01__ROW_LO__SHIFT 0x18
+#define MMEA4_ADDRDEC0_ADDR_SEL_CS01__ROW_HI__SHIFT 0x1c
+#define MMEA4_ADDRDEC0_ADDR_SEL_CS01__BANK0_MASK 0x0000000FL
+#define MMEA4_ADDRDEC0_ADDR_SEL_CS01__BANK1_MASK 0x000000F0L
+#define MMEA4_ADDRDEC0_ADDR_SEL_CS01__BANK2_MASK 0x00000F00L
+#define MMEA4_ADDRDEC0_ADDR_SEL_CS01__BANK3_MASK 0x0000F000L
+#define MMEA4_ADDRDEC0_ADDR_SEL_CS01__BANK4_MASK 0x001F0000L
+#define MMEA4_ADDRDEC0_ADDR_SEL_CS01__ROW_LO_MASK 0x0F000000L
+#define MMEA4_ADDRDEC0_ADDR_SEL_CS01__ROW_HI_MASK 0xF0000000L
+//MMEA4_ADDRDEC0_ADDR_SEL_CS23
+#define MMEA4_ADDRDEC0_ADDR_SEL_CS23__BANK0__SHIFT 0x0
+#define MMEA4_ADDRDEC0_ADDR_SEL_CS23__BANK1__SHIFT 0x4
+#define MMEA4_ADDRDEC0_ADDR_SEL_CS23__BANK2__SHIFT 0x8
+#define MMEA4_ADDRDEC0_ADDR_SEL_CS23__BANK3__SHIFT 0xc
+#define MMEA4_ADDRDEC0_ADDR_SEL_CS23__BANK4__SHIFT 0x10
+#define MMEA4_ADDRDEC0_ADDR_SEL_CS23__ROW_LO__SHIFT 0x18
+#define MMEA4_ADDRDEC0_ADDR_SEL_CS23__ROW_HI__SHIFT 0x1c
+#define MMEA4_ADDRDEC0_ADDR_SEL_CS23__BANK0_MASK 0x0000000FL
+#define MMEA4_ADDRDEC0_ADDR_SEL_CS23__BANK1_MASK 0x000000F0L
+#define MMEA4_ADDRDEC0_ADDR_SEL_CS23__BANK2_MASK 0x00000F00L
+#define MMEA4_ADDRDEC0_ADDR_SEL_CS23__BANK3_MASK 0x0000F000L
+#define MMEA4_ADDRDEC0_ADDR_SEL_CS23__BANK4_MASK 0x001F0000L
+#define MMEA4_ADDRDEC0_ADDR_SEL_CS23__ROW_LO_MASK 0x0F000000L
+#define MMEA4_ADDRDEC0_ADDR_SEL_CS23__ROW_HI_MASK 0xF0000000L
+//MMEA4_ADDRDEC0_ADDR_SEL2_CS01
+#define MMEA4_ADDRDEC0_ADDR_SEL2_CS01__BANK5__SHIFT 0x0
+#define MMEA4_ADDRDEC0_ADDR_SEL2_CS01__BANK5_MASK 0x0000001FL
+//MMEA4_ADDRDEC0_ADDR_SEL2_CS23
+#define MMEA4_ADDRDEC0_ADDR_SEL2_CS23__BANK5__SHIFT 0x0
+#define MMEA4_ADDRDEC0_ADDR_SEL2_CS23__BANK5_MASK 0x0000001FL
+//MMEA4_ADDRDEC0_COL_SEL_LO_CS01
+#define MMEA4_ADDRDEC0_COL_SEL_LO_CS01__COL0__SHIFT 0x0
+#define MMEA4_ADDRDEC0_COL_SEL_LO_CS01__COL1__SHIFT 0x4
+#define MMEA4_ADDRDEC0_COL_SEL_LO_CS01__COL2__SHIFT 0x8
+#define MMEA4_ADDRDEC0_COL_SEL_LO_CS01__COL3__SHIFT 0xc
+#define MMEA4_ADDRDEC0_COL_SEL_LO_CS01__COL4__SHIFT 0x10
+#define MMEA4_ADDRDEC0_COL_SEL_LO_CS01__COL5__SHIFT 0x14
+#define MMEA4_ADDRDEC0_COL_SEL_LO_CS01__COL6__SHIFT 0x18
+#define MMEA4_ADDRDEC0_COL_SEL_LO_CS01__COL7__SHIFT 0x1c
+#define MMEA4_ADDRDEC0_COL_SEL_LO_CS01__COL0_MASK 0x0000000FL
+#define MMEA4_ADDRDEC0_COL_SEL_LO_CS01__COL1_MASK 0x000000F0L
+#define MMEA4_ADDRDEC0_COL_SEL_LO_CS01__COL2_MASK 0x00000F00L
+#define MMEA4_ADDRDEC0_COL_SEL_LO_CS01__COL3_MASK 0x0000F000L
+#define MMEA4_ADDRDEC0_COL_SEL_LO_CS01__COL4_MASK 0x000F0000L
+#define MMEA4_ADDRDEC0_COL_SEL_LO_CS01__COL5_MASK 0x00F00000L
+#define MMEA4_ADDRDEC0_COL_SEL_LO_CS01__COL6_MASK 0x0F000000L
+#define MMEA4_ADDRDEC0_COL_SEL_LO_CS01__COL7_MASK 0xF0000000L
+//MMEA4_ADDRDEC0_COL_SEL_LO_CS23
+#define MMEA4_ADDRDEC0_COL_SEL_LO_CS23__COL0__SHIFT 0x0
+#define MMEA4_ADDRDEC0_COL_SEL_LO_CS23__COL1__SHIFT 0x4
+#define MMEA4_ADDRDEC0_COL_SEL_LO_CS23__COL2__SHIFT 0x8
+#define MMEA4_ADDRDEC0_COL_SEL_LO_CS23__COL3__SHIFT 0xc
+#define MMEA4_ADDRDEC0_COL_SEL_LO_CS23__COL4__SHIFT 0x10
+#define MMEA4_ADDRDEC0_COL_SEL_LO_CS23__COL5__SHIFT 0x14
+#define MMEA4_ADDRDEC0_COL_SEL_LO_CS23__COL6__SHIFT 0x18
+#define MMEA4_ADDRDEC0_COL_SEL_LO_CS23__COL7__SHIFT 0x1c
+#define MMEA4_ADDRDEC0_COL_SEL_LO_CS23__COL0_MASK 0x0000000FL
+#define MMEA4_ADDRDEC0_COL_SEL_LO_CS23__COL1_MASK 0x000000F0L
+#define MMEA4_ADDRDEC0_COL_SEL_LO_CS23__COL2_MASK 0x00000F00L
+#define MMEA4_ADDRDEC0_COL_SEL_LO_CS23__COL3_MASK 0x0000F000L
+#define MMEA4_ADDRDEC0_COL_SEL_LO_CS23__COL4_MASK 0x000F0000L
+#define MMEA4_ADDRDEC0_COL_SEL_LO_CS23__COL5_MASK 0x00F00000L
+#define MMEA4_ADDRDEC0_COL_SEL_LO_CS23__COL6_MASK 0x0F000000L
+#define MMEA4_ADDRDEC0_COL_SEL_LO_CS23__COL7_MASK 0xF0000000L
+//MMEA4_ADDRDEC0_COL_SEL_HI_CS01
+#define MMEA4_ADDRDEC0_COL_SEL_HI_CS01__COL8__SHIFT 0x0
+#define MMEA4_ADDRDEC0_COL_SEL_HI_CS01__COL9__SHIFT 0x4
+#define MMEA4_ADDRDEC0_COL_SEL_HI_CS01__COL10__SHIFT 0x8
+#define MMEA4_ADDRDEC0_COL_SEL_HI_CS01__COL11__SHIFT 0xc
+#define MMEA4_ADDRDEC0_COL_SEL_HI_CS01__COL12__SHIFT 0x10
+#define MMEA4_ADDRDEC0_COL_SEL_HI_CS01__COL13__SHIFT 0x14
+#define MMEA4_ADDRDEC0_COL_SEL_HI_CS01__COL14__SHIFT 0x18
+#define MMEA4_ADDRDEC0_COL_SEL_HI_CS01__COL15__SHIFT 0x1c
+#define MMEA4_ADDRDEC0_COL_SEL_HI_CS01__COL8_MASK 0x0000000FL
+#define MMEA4_ADDRDEC0_COL_SEL_HI_CS01__COL9_MASK 0x000000F0L
+#define MMEA4_ADDRDEC0_COL_SEL_HI_CS01__COL10_MASK 0x00000F00L
+#define MMEA4_ADDRDEC0_COL_SEL_HI_CS01__COL11_MASK 0x0000F000L
+#define MMEA4_ADDRDEC0_COL_SEL_HI_CS01__COL12_MASK 0x000F0000L
+#define MMEA4_ADDRDEC0_COL_SEL_HI_CS01__COL13_MASK 0x00F00000L
+#define MMEA4_ADDRDEC0_COL_SEL_HI_CS01__COL14_MASK 0x0F000000L
+#define MMEA4_ADDRDEC0_COL_SEL_HI_CS01__COL15_MASK 0xF0000000L
+//MMEA4_ADDRDEC0_COL_SEL_HI_CS23
+#define MMEA4_ADDRDEC0_COL_SEL_HI_CS23__COL8__SHIFT 0x0
+#define MMEA4_ADDRDEC0_COL_SEL_HI_CS23__COL9__SHIFT 0x4
+#define MMEA4_ADDRDEC0_COL_SEL_HI_CS23__COL10__SHIFT 0x8
+#define MMEA4_ADDRDEC0_COL_SEL_HI_CS23__COL11__SHIFT 0xc
+#define MMEA4_ADDRDEC0_COL_SEL_HI_CS23__COL12__SHIFT 0x10
+#define MMEA4_ADDRDEC0_COL_SEL_HI_CS23__COL13__SHIFT 0x14
+#define MMEA4_ADDRDEC0_COL_SEL_HI_CS23__COL14__SHIFT 0x18
+#define MMEA4_ADDRDEC0_COL_SEL_HI_CS23__COL15__SHIFT 0x1c
+#define MMEA4_ADDRDEC0_COL_SEL_HI_CS23__COL8_MASK 0x0000000FL
+#define MMEA4_ADDRDEC0_COL_SEL_HI_CS23__COL9_MASK 0x000000F0L
+#define MMEA4_ADDRDEC0_COL_SEL_HI_CS23__COL10_MASK 0x00000F00L
+#define MMEA4_ADDRDEC0_COL_SEL_HI_CS23__COL11_MASK 0x0000F000L
+#define MMEA4_ADDRDEC0_COL_SEL_HI_CS23__COL12_MASK 0x000F0000L
+#define MMEA4_ADDRDEC0_COL_SEL_HI_CS23__COL13_MASK 0x00F00000L
+#define MMEA4_ADDRDEC0_COL_SEL_HI_CS23__COL14_MASK 0x0F000000L
+#define MMEA4_ADDRDEC0_COL_SEL_HI_CS23__COL15_MASK 0xF0000000L
+//MMEA4_ADDRDEC0_RM_SEL_CS01
+#define MMEA4_ADDRDEC0_RM_SEL_CS01__RM0__SHIFT 0x0
+#define MMEA4_ADDRDEC0_RM_SEL_CS01__RM1__SHIFT 0x4
+#define MMEA4_ADDRDEC0_RM_SEL_CS01__RM2__SHIFT 0x8
+#define MMEA4_ADDRDEC0_RM_SEL_CS01__CHAN_BIT__SHIFT 0xc
+#define MMEA4_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA4_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA4_ADDRDEC0_RM_SEL_CS01__RM0_MASK 0x0000000FL
+#define MMEA4_ADDRDEC0_RM_SEL_CS01__RM1_MASK 0x000000F0L
+#define MMEA4_ADDRDEC0_RM_SEL_CS01__RM2_MASK 0x00000F00L
+#define MMEA4_ADDRDEC0_RM_SEL_CS01__CHAN_BIT_MASK 0x0000F000L
+#define MMEA4_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA4_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA4_ADDRDEC0_RM_SEL_CS23
+#define MMEA4_ADDRDEC0_RM_SEL_CS23__RM0__SHIFT 0x0
+#define MMEA4_ADDRDEC0_RM_SEL_CS23__RM1__SHIFT 0x4
+#define MMEA4_ADDRDEC0_RM_SEL_CS23__RM2__SHIFT 0x8
+#define MMEA4_ADDRDEC0_RM_SEL_CS23__CHAN_BIT__SHIFT 0xc
+#define MMEA4_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA4_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA4_ADDRDEC0_RM_SEL_CS23__RM0_MASK 0x0000000FL
+#define MMEA4_ADDRDEC0_RM_SEL_CS23__RM1_MASK 0x000000F0L
+#define MMEA4_ADDRDEC0_RM_SEL_CS23__RM2_MASK 0x00000F00L
+#define MMEA4_ADDRDEC0_RM_SEL_CS23__CHAN_BIT_MASK 0x0000F000L
+#define MMEA4_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA4_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA4_ADDRDEC0_RM_SEL_SECCS01
+#define MMEA4_ADDRDEC0_RM_SEL_SECCS01__RM0__SHIFT 0x0
+#define MMEA4_ADDRDEC0_RM_SEL_SECCS01__RM1__SHIFT 0x4
+#define MMEA4_ADDRDEC0_RM_SEL_SECCS01__RM2__SHIFT 0x8
+#define MMEA4_ADDRDEC0_RM_SEL_SECCS01__CHAN_BIT__SHIFT 0xc
+#define MMEA4_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA4_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA4_ADDRDEC0_RM_SEL_SECCS01__RM0_MASK 0x0000000FL
+#define MMEA4_ADDRDEC0_RM_SEL_SECCS01__RM1_MASK 0x000000F0L
+#define MMEA4_ADDRDEC0_RM_SEL_SECCS01__RM2_MASK 0x00000F00L
+#define MMEA4_ADDRDEC0_RM_SEL_SECCS01__CHAN_BIT_MASK 0x0000F000L
+#define MMEA4_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA4_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA4_ADDRDEC0_RM_SEL_SECCS23
+#define MMEA4_ADDRDEC0_RM_SEL_SECCS23__RM0__SHIFT 0x0
+#define MMEA4_ADDRDEC0_RM_SEL_SECCS23__RM1__SHIFT 0x4
+#define MMEA4_ADDRDEC0_RM_SEL_SECCS23__RM2__SHIFT 0x8
+#define MMEA4_ADDRDEC0_RM_SEL_SECCS23__CHAN_BIT__SHIFT 0xc
+#define MMEA4_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA4_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA4_ADDRDEC0_RM_SEL_SECCS23__RM0_MASK 0x0000000FL
+#define MMEA4_ADDRDEC0_RM_SEL_SECCS23__RM1_MASK 0x000000F0L
+#define MMEA4_ADDRDEC0_RM_SEL_SECCS23__RM2_MASK 0x00000F00L
+#define MMEA4_ADDRDEC0_RM_SEL_SECCS23__CHAN_BIT_MASK 0x0000F000L
+#define MMEA4_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA4_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA4_ADDRDEC1_BASE_ADDR_CS0
+#define MMEA4_ADDRDEC1_BASE_ADDR_CS0__CS_EN__SHIFT 0x0
+#define MMEA4_ADDRDEC1_BASE_ADDR_CS0__BASE_ADDR__SHIFT 0x1
+#define MMEA4_ADDRDEC1_BASE_ADDR_CS0__CS_EN_MASK 0x00000001L
+#define MMEA4_ADDRDEC1_BASE_ADDR_CS0__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA4_ADDRDEC1_BASE_ADDR_CS1
+#define MMEA4_ADDRDEC1_BASE_ADDR_CS1__CS_EN__SHIFT 0x0
+#define MMEA4_ADDRDEC1_BASE_ADDR_CS1__BASE_ADDR__SHIFT 0x1
+#define MMEA4_ADDRDEC1_BASE_ADDR_CS1__CS_EN_MASK 0x00000001L
+#define MMEA4_ADDRDEC1_BASE_ADDR_CS1__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA4_ADDRDEC1_BASE_ADDR_CS2
+#define MMEA4_ADDRDEC1_BASE_ADDR_CS2__CS_EN__SHIFT 0x0
+#define MMEA4_ADDRDEC1_BASE_ADDR_CS2__BASE_ADDR__SHIFT 0x1
+#define MMEA4_ADDRDEC1_BASE_ADDR_CS2__CS_EN_MASK 0x00000001L
+#define MMEA4_ADDRDEC1_BASE_ADDR_CS2__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA4_ADDRDEC1_BASE_ADDR_CS3
+#define MMEA4_ADDRDEC1_BASE_ADDR_CS3__CS_EN__SHIFT 0x0
+#define MMEA4_ADDRDEC1_BASE_ADDR_CS3__BASE_ADDR__SHIFT 0x1
+#define MMEA4_ADDRDEC1_BASE_ADDR_CS3__CS_EN_MASK 0x00000001L
+#define MMEA4_ADDRDEC1_BASE_ADDR_CS3__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA4_ADDRDEC1_BASE_ADDR_SECCS0
+#define MMEA4_ADDRDEC1_BASE_ADDR_SECCS0__CS_EN__SHIFT 0x0
+#define MMEA4_ADDRDEC1_BASE_ADDR_SECCS0__BASE_ADDR__SHIFT 0x1
+#define MMEA4_ADDRDEC1_BASE_ADDR_SECCS0__CS_EN_MASK 0x00000001L
+#define MMEA4_ADDRDEC1_BASE_ADDR_SECCS0__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA4_ADDRDEC1_BASE_ADDR_SECCS1
+#define MMEA4_ADDRDEC1_BASE_ADDR_SECCS1__CS_EN__SHIFT 0x0
+#define MMEA4_ADDRDEC1_BASE_ADDR_SECCS1__BASE_ADDR__SHIFT 0x1
+#define MMEA4_ADDRDEC1_BASE_ADDR_SECCS1__CS_EN_MASK 0x00000001L
+#define MMEA4_ADDRDEC1_BASE_ADDR_SECCS1__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA4_ADDRDEC1_BASE_ADDR_SECCS2
+#define MMEA4_ADDRDEC1_BASE_ADDR_SECCS2__CS_EN__SHIFT 0x0
+#define MMEA4_ADDRDEC1_BASE_ADDR_SECCS2__BASE_ADDR__SHIFT 0x1
+#define MMEA4_ADDRDEC1_BASE_ADDR_SECCS2__CS_EN_MASK 0x00000001L
+#define MMEA4_ADDRDEC1_BASE_ADDR_SECCS2__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA4_ADDRDEC1_BASE_ADDR_SECCS3
+#define MMEA4_ADDRDEC1_BASE_ADDR_SECCS3__CS_EN__SHIFT 0x0
+#define MMEA4_ADDRDEC1_BASE_ADDR_SECCS3__BASE_ADDR__SHIFT 0x1
+#define MMEA4_ADDRDEC1_BASE_ADDR_SECCS3__CS_EN_MASK 0x00000001L
+#define MMEA4_ADDRDEC1_BASE_ADDR_SECCS3__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA4_ADDRDEC1_ADDR_MASK_CS01
+#define MMEA4_ADDRDEC1_ADDR_MASK_CS01__ADDR_MASK__SHIFT 0x1
+#define MMEA4_ADDRDEC1_ADDR_MASK_CS01__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA4_ADDRDEC1_ADDR_MASK_CS23
+#define MMEA4_ADDRDEC1_ADDR_MASK_CS23__ADDR_MASK__SHIFT 0x1
+#define MMEA4_ADDRDEC1_ADDR_MASK_CS23__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA4_ADDRDEC1_ADDR_MASK_SECCS01
+#define MMEA4_ADDRDEC1_ADDR_MASK_SECCS01__ADDR_MASK__SHIFT 0x1
+#define MMEA4_ADDRDEC1_ADDR_MASK_SECCS01__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA4_ADDRDEC1_ADDR_MASK_SECCS23
+#define MMEA4_ADDRDEC1_ADDR_MASK_SECCS23__ADDR_MASK__SHIFT 0x1
+#define MMEA4_ADDRDEC1_ADDR_MASK_SECCS23__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA4_ADDRDEC1_ADDR_CFG_CS01
+#define MMEA4_ADDRDEC1_ADDR_CFG_CS01__NUM_BANK_GROUPS__SHIFT 0x1
+#define MMEA4_ADDRDEC1_ADDR_CFG_CS01__NUM_RM__SHIFT 0x4
+#define MMEA4_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_LO__SHIFT 0x8
+#define MMEA4_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_HI__SHIFT 0xc
+#define MMEA4_ADDRDEC1_ADDR_CFG_CS01__NUM_COL__SHIFT 0x10
+#define MMEA4_ADDRDEC1_ADDR_CFG_CS01__NUM_BANKS__SHIFT 0x14
+#define MMEA4_ADDRDEC1_ADDR_CFG_CS01__HI_COL_EN__SHIFT 0x1f
+#define MMEA4_ADDRDEC1_ADDR_CFG_CS01__NUM_BANK_GROUPS_MASK 0x0000000EL
+#define MMEA4_ADDRDEC1_ADDR_CFG_CS01__NUM_RM_MASK 0x00000030L
+#define MMEA4_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_LO_MASK 0x00000F00L
+#define MMEA4_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_HI_MASK 0x0000F000L
+#define MMEA4_ADDRDEC1_ADDR_CFG_CS01__NUM_COL_MASK 0x000F0000L
+#define MMEA4_ADDRDEC1_ADDR_CFG_CS01__NUM_BANKS_MASK 0x00300000L
+#define MMEA4_ADDRDEC1_ADDR_CFG_CS01__HI_COL_EN_MASK 0x80000000L
+//MMEA4_ADDRDEC1_ADDR_CFG_CS23
+#define MMEA4_ADDRDEC1_ADDR_CFG_CS23__NUM_BANK_GROUPS__SHIFT 0x1
+#define MMEA4_ADDRDEC1_ADDR_CFG_CS23__NUM_RM__SHIFT 0x4
+#define MMEA4_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_LO__SHIFT 0x8
+#define MMEA4_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_HI__SHIFT 0xc
+#define MMEA4_ADDRDEC1_ADDR_CFG_CS23__NUM_COL__SHIFT 0x10
+#define MMEA4_ADDRDEC1_ADDR_CFG_CS23__NUM_BANKS__SHIFT 0x14
+#define MMEA4_ADDRDEC1_ADDR_CFG_CS23__HI_COL_EN__SHIFT 0x1f
+#define MMEA4_ADDRDEC1_ADDR_CFG_CS23__NUM_BANK_GROUPS_MASK 0x0000000EL
+#define MMEA4_ADDRDEC1_ADDR_CFG_CS23__NUM_RM_MASK 0x00000030L
+#define MMEA4_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_LO_MASK 0x00000F00L
+#define MMEA4_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_HI_MASK 0x0000F000L
+#define MMEA4_ADDRDEC1_ADDR_CFG_CS23__NUM_COL_MASK 0x000F0000L
+#define MMEA4_ADDRDEC1_ADDR_CFG_CS23__NUM_BANKS_MASK 0x00300000L
+#define MMEA4_ADDRDEC1_ADDR_CFG_CS23__HI_COL_EN_MASK 0x80000000L
+//MMEA4_ADDRDEC1_ADDR_SEL_CS01
+#define MMEA4_ADDRDEC1_ADDR_SEL_CS01__BANK0__SHIFT 0x0
+#define MMEA4_ADDRDEC1_ADDR_SEL_CS01__BANK1__SHIFT 0x4
+#define MMEA4_ADDRDEC1_ADDR_SEL_CS01__BANK2__SHIFT 0x8
+#define MMEA4_ADDRDEC1_ADDR_SEL_CS01__BANK3__SHIFT 0xc
+#define MMEA4_ADDRDEC1_ADDR_SEL_CS01__BANK4__SHIFT 0x10
+#define MMEA4_ADDRDEC1_ADDR_SEL_CS01__ROW_LO__SHIFT 0x18
+#define MMEA4_ADDRDEC1_ADDR_SEL_CS01__ROW_HI__SHIFT 0x1c
+#define MMEA4_ADDRDEC1_ADDR_SEL_CS01__BANK0_MASK 0x0000000FL
+#define MMEA4_ADDRDEC1_ADDR_SEL_CS01__BANK1_MASK 0x000000F0L
+#define MMEA4_ADDRDEC1_ADDR_SEL_CS01__BANK2_MASK 0x00000F00L
+#define MMEA4_ADDRDEC1_ADDR_SEL_CS01__BANK3_MASK 0x0000F000L
+#define MMEA4_ADDRDEC1_ADDR_SEL_CS01__BANK4_MASK 0x001F0000L
+#define MMEA4_ADDRDEC1_ADDR_SEL_CS01__ROW_LO_MASK 0x0F000000L
+#define MMEA4_ADDRDEC1_ADDR_SEL_CS01__ROW_HI_MASK 0xF0000000L
+//MMEA4_ADDRDEC1_ADDR_SEL_CS23
+#define MMEA4_ADDRDEC1_ADDR_SEL_CS23__BANK0__SHIFT 0x0
+#define MMEA4_ADDRDEC1_ADDR_SEL_CS23__BANK1__SHIFT 0x4
+#define MMEA4_ADDRDEC1_ADDR_SEL_CS23__BANK2__SHIFT 0x8
+#define MMEA4_ADDRDEC1_ADDR_SEL_CS23__BANK3__SHIFT 0xc
+#define MMEA4_ADDRDEC1_ADDR_SEL_CS23__BANK4__SHIFT 0x10
+#define MMEA4_ADDRDEC1_ADDR_SEL_CS23__ROW_LO__SHIFT 0x18
+#define MMEA4_ADDRDEC1_ADDR_SEL_CS23__ROW_HI__SHIFT 0x1c
+#define MMEA4_ADDRDEC1_ADDR_SEL_CS23__BANK0_MASK 0x0000000FL
+#define MMEA4_ADDRDEC1_ADDR_SEL_CS23__BANK1_MASK 0x000000F0L
+#define MMEA4_ADDRDEC1_ADDR_SEL_CS23__BANK2_MASK 0x00000F00L
+#define MMEA4_ADDRDEC1_ADDR_SEL_CS23__BANK3_MASK 0x0000F000L
+#define MMEA4_ADDRDEC1_ADDR_SEL_CS23__BANK4_MASK 0x001F0000L
+#define MMEA4_ADDRDEC1_ADDR_SEL_CS23__ROW_LO_MASK 0x0F000000L
+#define MMEA4_ADDRDEC1_ADDR_SEL_CS23__ROW_HI_MASK 0xF0000000L
+//MMEA4_ADDRDEC1_ADDR_SEL2_CS01
+#define MMEA4_ADDRDEC1_ADDR_SEL2_CS01__BANK5__SHIFT 0x0
+#define MMEA4_ADDRDEC1_ADDR_SEL2_CS01__BANK5_MASK 0x0000001FL
+//MMEA4_ADDRDEC1_ADDR_SEL2_CS23
+#define MMEA4_ADDRDEC1_ADDR_SEL2_CS23__BANK5__SHIFT 0x0
+#define MMEA4_ADDRDEC1_ADDR_SEL2_CS23__BANK5_MASK 0x0000001FL
+//MMEA4_ADDRDEC1_COL_SEL_LO_CS01
+#define MMEA4_ADDRDEC1_COL_SEL_LO_CS01__COL0__SHIFT 0x0
+#define MMEA4_ADDRDEC1_COL_SEL_LO_CS01__COL1__SHIFT 0x4
+#define MMEA4_ADDRDEC1_COL_SEL_LO_CS01__COL2__SHIFT 0x8
+#define MMEA4_ADDRDEC1_COL_SEL_LO_CS01__COL3__SHIFT 0xc
+#define MMEA4_ADDRDEC1_COL_SEL_LO_CS01__COL4__SHIFT 0x10
+#define MMEA4_ADDRDEC1_COL_SEL_LO_CS01__COL5__SHIFT 0x14
+#define MMEA4_ADDRDEC1_COL_SEL_LO_CS01__COL6__SHIFT 0x18
+#define MMEA4_ADDRDEC1_COL_SEL_LO_CS01__COL7__SHIFT 0x1c
+#define MMEA4_ADDRDEC1_COL_SEL_LO_CS01__COL0_MASK 0x0000000FL
+#define MMEA4_ADDRDEC1_COL_SEL_LO_CS01__COL1_MASK 0x000000F0L
+#define MMEA4_ADDRDEC1_COL_SEL_LO_CS01__COL2_MASK 0x00000F00L
+#define MMEA4_ADDRDEC1_COL_SEL_LO_CS01__COL3_MASK 0x0000F000L
+#define MMEA4_ADDRDEC1_COL_SEL_LO_CS01__COL4_MASK 0x000F0000L
+#define MMEA4_ADDRDEC1_COL_SEL_LO_CS01__COL5_MASK 0x00F00000L
+#define MMEA4_ADDRDEC1_COL_SEL_LO_CS01__COL6_MASK 0x0F000000L
+#define MMEA4_ADDRDEC1_COL_SEL_LO_CS01__COL7_MASK 0xF0000000L
+//MMEA4_ADDRDEC1_COL_SEL_LO_CS23
+#define MMEA4_ADDRDEC1_COL_SEL_LO_CS23__COL0__SHIFT 0x0
+#define MMEA4_ADDRDEC1_COL_SEL_LO_CS23__COL1__SHIFT 0x4
+#define MMEA4_ADDRDEC1_COL_SEL_LO_CS23__COL2__SHIFT 0x8
+#define MMEA4_ADDRDEC1_COL_SEL_LO_CS23__COL3__SHIFT 0xc
+#define MMEA4_ADDRDEC1_COL_SEL_LO_CS23__COL4__SHIFT 0x10
+#define MMEA4_ADDRDEC1_COL_SEL_LO_CS23__COL5__SHIFT 0x14
+#define MMEA4_ADDRDEC1_COL_SEL_LO_CS23__COL6__SHIFT 0x18
+#define MMEA4_ADDRDEC1_COL_SEL_LO_CS23__COL7__SHIFT 0x1c
+#define MMEA4_ADDRDEC1_COL_SEL_LO_CS23__COL0_MASK 0x0000000FL
+#define MMEA4_ADDRDEC1_COL_SEL_LO_CS23__COL1_MASK 0x000000F0L
+#define MMEA4_ADDRDEC1_COL_SEL_LO_CS23__COL2_MASK 0x00000F00L
+#define MMEA4_ADDRDEC1_COL_SEL_LO_CS23__COL3_MASK 0x0000F000L
+#define MMEA4_ADDRDEC1_COL_SEL_LO_CS23__COL4_MASK 0x000F0000L
+#define MMEA4_ADDRDEC1_COL_SEL_LO_CS23__COL5_MASK 0x00F00000L
+#define MMEA4_ADDRDEC1_COL_SEL_LO_CS23__COL6_MASK 0x0F000000L
+#define MMEA4_ADDRDEC1_COL_SEL_LO_CS23__COL7_MASK 0xF0000000L
+//MMEA4_ADDRDEC1_COL_SEL_HI_CS01
+#define MMEA4_ADDRDEC1_COL_SEL_HI_CS01__COL8__SHIFT 0x0
+#define MMEA4_ADDRDEC1_COL_SEL_HI_CS01__COL9__SHIFT 0x4
+#define MMEA4_ADDRDEC1_COL_SEL_HI_CS01__COL10__SHIFT 0x8
+#define MMEA4_ADDRDEC1_COL_SEL_HI_CS01__COL11__SHIFT 0xc
+#define MMEA4_ADDRDEC1_COL_SEL_HI_CS01__COL12__SHIFT 0x10
+#define MMEA4_ADDRDEC1_COL_SEL_HI_CS01__COL13__SHIFT 0x14
+#define MMEA4_ADDRDEC1_COL_SEL_HI_CS01__COL14__SHIFT 0x18
+#define MMEA4_ADDRDEC1_COL_SEL_HI_CS01__COL15__SHIFT 0x1c
+#define MMEA4_ADDRDEC1_COL_SEL_HI_CS01__COL8_MASK 0x0000000FL
+#define MMEA4_ADDRDEC1_COL_SEL_HI_CS01__COL9_MASK 0x000000F0L
+#define MMEA4_ADDRDEC1_COL_SEL_HI_CS01__COL10_MASK 0x00000F00L
+#define MMEA4_ADDRDEC1_COL_SEL_HI_CS01__COL11_MASK 0x0000F000L
+#define MMEA4_ADDRDEC1_COL_SEL_HI_CS01__COL12_MASK 0x000F0000L
+#define MMEA4_ADDRDEC1_COL_SEL_HI_CS01__COL13_MASK 0x00F00000L
+#define MMEA4_ADDRDEC1_COL_SEL_HI_CS01__COL14_MASK 0x0F000000L
+#define MMEA4_ADDRDEC1_COL_SEL_HI_CS01__COL15_MASK 0xF0000000L
+//MMEA4_ADDRDEC1_COL_SEL_HI_CS23
+#define MMEA4_ADDRDEC1_COL_SEL_HI_CS23__COL8__SHIFT 0x0
+#define MMEA4_ADDRDEC1_COL_SEL_HI_CS23__COL9__SHIFT 0x4
+#define MMEA4_ADDRDEC1_COL_SEL_HI_CS23__COL10__SHIFT 0x8
+#define MMEA4_ADDRDEC1_COL_SEL_HI_CS23__COL11__SHIFT 0xc
+#define MMEA4_ADDRDEC1_COL_SEL_HI_CS23__COL12__SHIFT 0x10
+#define MMEA4_ADDRDEC1_COL_SEL_HI_CS23__COL13__SHIFT 0x14
+#define MMEA4_ADDRDEC1_COL_SEL_HI_CS23__COL14__SHIFT 0x18
+#define MMEA4_ADDRDEC1_COL_SEL_HI_CS23__COL15__SHIFT 0x1c
+#define MMEA4_ADDRDEC1_COL_SEL_HI_CS23__COL8_MASK 0x0000000FL
+#define MMEA4_ADDRDEC1_COL_SEL_HI_CS23__COL9_MASK 0x000000F0L
+#define MMEA4_ADDRDEC1_COL_SEL_HI_CS23__COL10_MASK 0x00000F00L
+#define MMEA4_ADDRDEC1_COL_SEL_HI_CS23__COL11_MASK 0x0000F000L
+#define MMEA4_ADDRDEC1_COL_SEL_HI_CS23__COL12_MASK 0x000F0000L
+#define MMEA4_ADDRDEC1_COL_SEL_HI_CS23__COL13_MASK 0x00F00000L
+#define MMEA4_ADDRDEC1_COL_SEL_HI_CS23__COL14_MASK 0x0F000000L
+#define MMEA4_ADDRDEC1_COL_SEL_HI_CS23__COL15_MASK 0xF0000000L
+//MMEA4_ADDRDEC1_RM_SEL_CS01
+#define MMEA4_ADDRDEC1_RM_SEL_CS01__RM0__SHIFT 0x0
+#define MMEA4_ADDRDEC1_RM_SEL_CS01__RM1__SHIFT 0x4
+#define MMEA4_ADDRDEC1_RM_SEL_CS01__RM2__SHIFT 0x8
+#define MMEA4_ADDRDEC1_RM_SEL_CS01__CHAN_BIT__SHIFT 0xc
+#define MMEA4_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA4_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA4_ADDRDEC1_RM_SEL_CS01__RM0_MASK 0x0000000FL
+#define MMEA4_ADDRDEC1_RM_SEL_CS01__RM1_MASK 0x000000F0L
+#define MMEA4_ADDRDEC1_RM_SEL_CS01__RM2_MASK 0x00000F00L
+#define MMEA4_ADDRDEC1_RM_SEL_CS01__CHAN_BIT_MASK 0x0000F000L
+#define MMEA4_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA4_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA4_ADDRDEC1_RM_SEL_CS23
+#define MMEA4_ADDRDEC1_RM_SEL_CS23__RM0__SHIFT 0x0
+#define MMEA4_ADDRDEC1_RM_SEL_CS23__RM1__SHIFT 0x4
+#define MMEA4_ADDRDEC1_RM_SEL_CS23__RM2__SHIFT 0x8
+#define MMEA4_ADDRDEC1_RM_SEL_CS23__CHAN_BIT__SHIFT 0xc
+#define MMEA4_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA4_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA4_ADDRDEC1_RM_SEL_CS23__RM0_MASK 0x0000000FL
+#define MMEA4_ADDRDEC1_RM_SEL_CS23__RM1_MASK 0x000000F0L
+#define MMEA4_ADDRDEC1_RM_SEL_CS23__RM2_MASK 0x00000F00L
+#define MMEA4_ADDRDEC1_RM_SEL_CS23__CHAN_BIT_MASK 0x0000F000L
+#define MMEA4_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA4_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA4_ADDRDEC1_RM_SEL_SECCS01
+#define MMEA4_ADDRDEC1_RM_SEL_SECCS01__RM0__SHIFT 0x0
+#define MMEA4_ADDRDEC1_RM_SEL_SECCS01__RM1__SHIFT 0x4
+#define MMEA4_ADDRDEC1_RM_SEL_SECCS01__RM2__SHIFT 0x8
+#define MMEA4_ADDRDEC1_RM_SEL_SECCS01__CHAN_BIT__SHIFT 0xc
+#define MMEA4_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA4_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA4_ADDRDEC1_RM_SEL_SECCS01__RM0_MASK 0x0000000FL
+#define MMEA4_ADDRDEC1_RM_SEL_SECCS01__RM1_MASK 0x000000F0L
+#define MMEA4_ADDRDEC1_RM_SEL_SECCS01__RM2_MASK 0x00000F00L
+#define MMEA4_ADDRDEC1_RM_SEL_SECCS01__CHAN_BIT_MASK 0x0000F000L
+#define MMEA4_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA4_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA4_ADDRDEC1_RM_SEL_SECCS23
+#define MMEA4_ADDRDEC1_RM_SEL_SECCS23__RM0__SHIFT 0x0
+#define MMEA4_ADDRDEC1_RM_SEL_SECCS23__RM1__SHIFT 0x4
+#define MMEA4_ADDRDEC1_RM_SEL_SECCS23__RM2__SHIFT 0x8
+#define MMEA4_ADDRDEC1_RM_SEL_SECCS23__CHAN_BIT__SHIFT 0xc
+#define MMEA4_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA4_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA4_ADDRDEC1_RM_SEL_SECCS23__RM0_MASK 0x0000000FL
+#define MMEA4_ADDRDEC1_RM_SEL_SECCS23__RM1_MASK 0x000000F0L
+#define MMEA4_ADDRDEC1_RM_SEL_SECCS23__RM2_MASK 0x00000F00L
+#define MMEA4_ADDRDEC1_RM_SEL_SECCS23__CHAN_BIT_MASK 0x0000F000L
+#define MMEA4_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA4_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA4_ADDRDEC2_BASE_ADDR_CS0
+#define MMEA4_ADDRDEC2_BASE_ADDR_CS0__CS_EN__SHIFT 0x0
+#define MMEA4_ADDRDEC2_BASE_ADDR_CS0__BASE_ADDR__SHIFT 0x1
+#define MMEA4_ADDRDEC2_BASE_ADDR_CS0__CS_EN_MASK 0x00000001L
+#define MMEA4_ADDRDEC2_BASE_ADDR_CS0__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA4_ADDRDEC2_BASE_ADDR_CS1
+#define MMEA4_ADDRDEC2_BASE_ADDR_CS1__CS_EN__SHIFT 0x0
+#define MMEA4_ADDRDEC2_BASE_ADDR_CS1__BASE_ADDR__SHIFT 0x1
+#define MMEA4_ADDRDEC2_BASE_ADDR_CS1__CS_EN_MASK 0x00000001L
+#define MMEA4_ADDRDEC2_BASE_ADDR_CS1__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA4_ADDRDEC2_BASE_ADDR_CS2
+#define MMEA4_ADDRDEC2_BASE_ADDR_CS2__CS_EN__SHIFT 0x0
+#define MMEA4_ADDRDEC2_BASE_ADDR_CS2__BASE_ADDR__SHIFT 0x1
+#define MMEA4_ADDRDEC2_BASE_ADDR_CS2__CS_EN_MASK 0x00000001L
+#define MMEA4_ADDRDEC2_BASE_ADDR_CS2__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA4_ADDRDEC2_BASE_ADDR_CS3
+#define MMEA4_ADDRDEC2_BASE_ADDR_CS3__CS_EN__SHIFT 0x0
+#define MMEA4_ADDRDEC2_BASE_ADDR_CS3__BASE_ADDR__SHIFT 0x1
+#define MMEA4_ADDRDEC2_BASE_ADDR_CS3__CS_EN_MASK 0x00000001L
+#define MMEA4_ADDRDEC2_BASE_ADDR_CS3__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA4_ADDRDEC2_BASE_ADDR_SECCS0
+#define MMEA4_ADDRDEC2_BASE_ADDR_SECCS0__CS_EN__SHIFT 0x0
+#define MMEA4_ADDRDEC2_BASE_ADDR_SECCS0__BASE_ADDR__SHIFT 0x1
+#define MMEA4_ADDRDEC2_BASE_ADDR_SECCS0__CS_EN_MASK 0x00000001L
+#define MMEA4_ADDRDEC2_BASE_ADDR_SECCS0__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA4_ADDRDEC2_BASE_ADDR_SECCS1
+#define MMEA4_ADDRDEC2_BASE_ADDR_SECCS1__CS_EN__SHIFT 0x0
+#define MMEA4_ADDRDEC2_BASE_ADDR_SECCS1__BASE_ADDR__SHIFT 0x1
+#define MMEA4_ADDRDEC2_BASE_ADDR_SECCS1__CS_EN_MASK 0x00000001L
+#define MMEA4_ADDRDEC2_BASE_ADDR_SECCS1__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA4_ADDRDEC2_BASE_ADDR_SECCS2
+#define MMEA4_ADDRDEC2_BASE_ADDR_SECCS2__CS_EN__SHIFT 0x0
+#define MMEA4_ADDRDEC2_BASE_ADDR_SECCS2__BASE_ADDR__SHIFT 0x1
+#define MMEA4_ADDRDEC2_BASE_ADDR_SECCS2__CS_EN_MASK 0x00000001L
+#define MMEA4_ADDRDEC2_BASE_ADDR_SECCS2__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA4_ADDRDEC2_BASE_ADDR_SECCS3
+#define MMEA4_ADDRDEC2_BASE_ADDR_SECCS3__CS_EN__SHIFT 0x0
+#define MMEA4_ADDRDEC2_BASE_ADDR_SECCS3__BASE_ADDR__SHIFT 0x1
+#define MMEA4_ADDRDEC2_BASE_ADDR_SECCS3__CS_EN_MASK 0x00000001L
+#define MMEA4_ADDRDEC2_BASE_ADDR_SECCS3__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA4_ADDRDEC2_ADDR_MASK_CS01
+#define MMEA4_ADDRDEC2_ADDR_MASK_CS01__ADDR_MASK__SHIFT 0x1
+#define MMEA4_ADDRDEC2_ADDR_MASK_CS01__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA4_ADDRDEC2_ADDR_MASK_CS23
+#define MMEA4_ADDRDEC2_ADDR_MASK_CS23__ADDR_MASK__SHIFT 0x1
+#define MMEA4_ADDRDEC2_ADDR_MASK_CS23__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA4_ADDRDEC2_ADDR_MASK_SECCS01
+#define MMEA4_ADDRDEC2_ADDR_MASK_SECCS01__ADDR_MASK__SHIFT 0x1
+#define MMEA4_ADDRDEC2_ADDR_MASK_SECCS01__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA4_ADDRDEC2_ADDR_MASK_SECCS23
+#define MMEA4_ADDRDEC2_ADDR_MASK_SECCS23__ADDR_MASK__SHIFT 0x1
+#define MMEA4_ADDRDEC2_ADDR_MASK_SECCS23__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA4_ADDRDEC2_ADDR_CFG_CS01
+#define MMEA4_ADDRDEC2_ADDR_CFG_CS01__NUM_BANK_GROUPS__SHIFT 0x1
+#define MMEA4_ADDRDEC2_ADDR_CFG_CS01__NUM_RM__SHIFT 0x4
+#define MMEA4_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_LO__SHIFT 0x8
+#define MMEA4_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_HI__SHIFT 0xc
+#define MMEA4_ADDRDEC2_ADDR_CFG_CS01__NUM_COL__SHIFT 0x10
+#define MMEA4_ADDRDEC2_ADDR_CFG_CS01__NUM_BANKS__SHIFT 0x14
+#define MMEA4_ADDRDEC2_ADDR_CFG_CS01__HI_COL_EN__SHIFT 0x1f
+#define MMEA4_ADDRDEC2_ADDR_CFG_CS01__NUM_BANK_GROUPS_MASK 0x0000000EL
+#define MMEA4_ADDRDEC2_ADDR_CFG_CS01__NUM_RM_MASK 0x00000030L
+#define MMEA4_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_LO_MASK 0x00000F00L
+#define MMEA4_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_HI_MASK 0x0000F000L
+#define MMEA4_ADDRDEC2_ADDR_CFG_CS01__NUM_COL_MASK 0x000F0000L
+#define MMEA4_ADDRDEC2_ADDR_CFG_CS01__NUM_BANKS_MASK 0x00300000L
+#define MMEA4_ADDRDEC2_ADDR_CFG_CS01__HI_COL_EN_MASK 0x80000000L
+//MMEA4_ADDRDEC2_ADDR_CFG_CS23
+#define MMEA4_ADDRDEC2_ADDR_CFG_CS23__NUM_BANK_GROUPS__SHIFT 0x1
+#define MMEA4_ADDRDEC2_ADDR_CFG_CS23__NUM_RM__SHIFT 0x4
+#define MMEA4_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_LO__SHIFT 0x8
+#define MMEA4_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_HI__SHIFT 0xc
+#define MMEA4_ADDRDEC2_ADDR_CFG_CS23__NUM_COL__SHIFT 0x10
+#define MMEA4_ADDRDEC2_ADDR_CFG_CS23__NUM_BANKS__SHIFT 0x14
+#define MMEA4_ADDRDEC2_ADDR_CFG_CS23__HI_COL_EN__SHIFT 0x1f
+#define MMEA4_ADDRDEC2_ADDR_CFG_CS23__NUM_BANK_GROUPS_MASK 0x0000000EL
+#define MMEA4_ADDRDEC2_ADDR_CFG_CS23__NUM_RM_MASK 0x00000030L
+#define MMEA4_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_LO_MASK 0x00000F00L
+#define MMEA4_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_HI_MASK 0x0000F000L
+#define MMEA4_ADDRDEC2_ADDR_CFG_CS23__NUM_COL_MASK 0x000F0000L
+#define MMEA4_ADDRDEC2_ADDR_CFG_CS23__NUM_BANKS_MASK 0x00300000L
+#define MMEA4_ADDRDEC2_ADDR_CFG_CS23__HI_COL_EN_MASK 0x80000000L
+//MMEA4_ADDRDEC2_ADDR_SEL_CS01
+#define MMEA4_ADDRDEC2_ADDR_SEL_CS01__BANK0__SHIFT 0x0
+#define MMEA4_ADDRDEC2_ADDR_SEL_CS01__BANK1__SHIFT 0x4
+#define MMEA4_ADDRDEC2_ADDR_SEL_CS01__BANK2__SHIFT 0x8
+#define MMEA4_ADDRDEC2_ADDR_SEL_CS01__BANK3__SHIFT 0xc
+#define MMEA4_ADDRDEC2_ADDR_SEL_CS01__BANK4__SHIFT 0x10
+#define MMEA4_ADDRDEC2_ADDR_SEL_CS01__ROW_LO__SHIFT 0x18
+#define MMEA4_ADDRDEC2_ADDR_SEL_CS01__ROW_HI__SHIFT 0x1c
+#define MMEA4_ADDRDEC2_ADDR_SEL_CS01__BANK0_MASK 0x0000000FL
+#define MMEA4_ADDRDEC2_ADDR_SEL_CS01__BANK1_MASK 0x000000F0L
+#define MMEA4_ADDRDEC2_ADDR_SEL_CS01__BANK2_MASK 0x00000F00L
+#define MMEA4_ADDRDEC2_ADDR_SEL_CS01__BANK3_MASK 0x0000F000L
+#define MMEA4_ADDRDEC2_ADDR_SEL_CS01__BANK4_MASK 0x001F0000L
+#define MMEA4_ADDRDEC2_ADDR_SEL_CS01__ROW_LO_MASK 0x0F000000L
+#define MMEA4_ADDRDEC2_ADDR_SEL_CS01__ROW_HI_MASK 0xF0000000L
+//MMEA4_ADDRDEC2_ADDR_SEL_CS23
+#define MMEA4_ADDRDEC2_ADDR_SEL_CS23__BANK0__SHIFT 0x0
+#define MMEA4_ADDRDEC2_ADDR_SEL_CS23__BANK1__SHIFT 0x4
+#define MMEA4_ADDRDEC2_ADDR_SEL_CS23__BANK2__SHIFT 0x8
+#define MMEA4_ADDRDEC2_ADDR_SEL_CS23__BANK3__SHIFT 0xc
+#define MMEA4_ADDRDEC2_ADDR_SEL_CS23__BANK4__SHIFT 0x10
+#define MMEA4_ADDRDEC2_ADDR_SEL_CS23__ROW_LO__SHIFT 0x18
+#define MMEA4_ADDRDEC2_ADDR_SEL_CS23__ROW_HI__SHIFT 0x1c
+#define MMEA4_ADDRDEC2_ADDR_SEL_CS23__BANK0_MASK 0x0000000FL
+#define MMEA4_ADDRDEC2_ADDR_SEL_CS23__BANK1_MASK 0x000000F0L
+#define MMEA4_ADDRDEC2_ADDR_SEL_CS23__BANK2_MASK 0x00000F00L
+#define MMEA4_ADDRDEC2_ADDR_SEL_CS23__BANK3_MASK 0x0000F000L
+#define MMEA4_ADDRDEC2_ADDR_SEL_CS23__BANK4_MASK 0x001F0000L
+#define MMEA4_ADDRDEC2_ADDR_SEL_CS23__ROW_LO_MASK 0x0F000000L
+#define MMEA4_ADDRDEC2_ADDR_SEL_CS23__ROW_HI_MASK 0xF0000000L
+//MMEA4_ADDRDEC2_ADDR_SEL2_CS01
+#define MMEA4_ADDRDEC2_ADDR_SEL2_CS01__BANK5__SHIFT 0x0
+#define MMEA4_ADDRDEC2_ADDR_SEL2_CS01__BANK5_MASK 0x0000001FL
+//MMEA4_ADDRDEC2_ADDR_SEL2_CS23
+#define MMEA4_ADDRDEC2_ADDR_SEL2_CS23__BANK5__SHIFT 0x0
+#define MMEA4_ADDRDEC2_ADDR_SEL2_CS23__BANK5_MASK 0x0000001FL
+//MMEA4_ADDRDEC2_COL_SEL_LO_CS01
+#define MMEA4_ADDRDEC2_COL_SEL_LO_CS01__COL0__SHIFT 0x0
+#define MMEA4_ADDRDEC2_COL_SEL_LO_CS01__COL1__SHIFT 0x4
+#define MMEA4_ADDRDEC2_COL_SEL_LO_CS01__COL2__SHIFT 0x8
+#define MMEA4_ADDRDEC2_COL_SEL_LO_CS01__COL3__SHIFT 0xc
+#define MMEA4_ADDRDEC2_COL_SEL_LO_CS01__COL4__SHIFT 0x10
+#define MMEA4_ADDRDEC2_COL_SEL_LO_CS01__COL5__SHIFT 0x14
+#define MMEA4_ADDRDEC2_COL_SEL_LO_CS01__COL6__SHIFT 0x18
+#define MMEA4_ADDRDEC2_COL_SEL_LO_CS01__COL7__SHIFT 0x1c
+#define MMEA4_ADDRDEC2_COL_SEL_LO_CS01__COL0_MASK 0x0000000FL
+#define MMEA4_ADDRDEC2_COL_SEL_LO_CS01__COL1_MASK 0x000000F0L
+#define MMEA4_ADDRDEC2_COL_SEL_LO_CS01__COL2_MASK 0x00000F00L
+#define MMEA4_ADDRDEC2_COL_SEL_LO_CS01__COL3_MASK 0x0000F000L
+#define MMEA4_ADDRDEC2_COL_SEL_LO_CS01__COL4_MASK 0x000F0000L
+#define MMEA4_ADDRDEC2_COL_SEL_LO_CS01__COL5_MASK 0x00F00000L
+#define MMEA4_ADDRDEC2_COL_SEL_LO_CS01__COL6_MASK 0x0F000000L
+#define MMEA4_ADDRDEC2_COL_SEL_LO_CS01__COL7_MASK 0xF0000000L
+//MMEA4_ADDRDEC2_COL_SEL_LO_CS23
+#define MMEA4_ADDRDEC2_COL_SEL_LO_CS23__COL0__SHIFT 0x0
+#define MMEA4_ADDRDEC2_COL_SEL_LO_CS23__COL1__SHIFT 0x4
+#define MMEA4_ADDRDEC2_COL_SEL_LO_CS23__COL2__SHIFT 0x8
+#define MMEA4_ADDRDEC2_COL_SEL_LO_CS23__COL3__SHIFT 0xc
+#define MMEA4_ADDRDEC2_COL_SEL_LO_CS23__COL4__SHIFT 0x10
+#define MMEA4_ADDRDEC2_COL_SEL_LO_CS23__COL5__SHIFT 0x14
+#define MMEA4_ADDRDEC2_COL_SEL_LO_CS23__COL6__SHIFT 0x18
+#define MMEA4_ADDRDEC2_COL_SEL_LO_CS23__COL7__SHIFT 0x1c
+#define MMEA4_ADDRDEC2_COL_SEL_LO_CS23__COL0_MASK 0x0000000FL
+#define MMEA4_ADDRDEC2_COL_SEL_LO_CS23__COL1_MASK 0x000000F0L
+#define MMEA4_ADDRDEC2_COL_SEL_LO_CS23__COL2_MASK 0x00000F00L
+#define MMEA4_ADDRDEC2_COL_SEL_LO_CS23__COL3_MASK 0x0000F000L
+#define MMEA4_ADDRDEC2_COL_SEL_LO_CS23__COL4_MASK 0x000F0000L
+#define MMEA4_ADDRDEC2_COL_SEL_LO_CS23__COL5_MASK 0x00F00000L
+#define MMEA4_ADDRDEC2_COL_SEL_LO_CS23__COL6_MASK 0x0F000000L
+#define MMEA4_ADDRDEC2_COL_SEL_LO_CS23__COL7_MASK 0xF0000000L
+//MMEA4_ADDRDEC2_COL_SEL_HI_CS01
+#define MMEA4_ADDRDEC2_COL_SEL_HI_CS01__COL8__SHIFT 0x0
+#define MMEA4_ADDRDEC2_COL_SEL_HI_CS01__COL9__SHIFT 0x4
+#define MMEA4_ADDRDEC2_COL_SEL_HI_CS01__COL10__SHIFT 0x8
+#define MMEA4_ADDRDEC2_COL_SEL_HI_CS01__COL11__SHIFT 0xc
+#define MMEA4_ADDRDEC2_COL_SEL_HI_CS01__COL12__SHIFT 0x10
+#define MMEA4_ADDRDEC2_COL_SEL_HI_CS01__COL13__SHIFT 0x14
+#define MMEA4_ADDRDEC2_COL_SEL_HI_CS01__COL14__SHIFT 0x18
+#define MMEA4_ADDRDEC2_COL_SEL_HI_CS01__COL15__SHIFT 0x1c
+#define MMEA4_ADDRDEC2_COL_SEL_HI_CS01__COL8_MASK 0x0000000FL
+#define MMEA4_ADDRDEC2_COL_SEL_HI_CS01__COL9_MASK 0x000000F0L
+#define MMEA4_ADDRDEC2_COL_SEL_HI_CS01__COL10_MASK 0x00000F00L
+#define MMEA4_ADDRDEC2_COL_SEL_HI_CS01__COL11_MASK 0x0000F000L
+#define MMEA4_ADDRDEC2_COL_SEL_HI_CS01__COL12_MASK 0x000F0000L
+#define MMEA4_ADDRDEC2_COL_SEL_HI_CS01__COL13_MASK 0x00F00000L
+#define MMEA4_ADDRDEC2_COL_SEL_HI_CS01__COL14_MASK 0x0F000000L
+#define MMEA4_ADDRDEC2_COL_SEL_HI_CS01__COL15_MASK 0xF0000000L
+//MMEA4_ADDRDEC2_COL_SEL_HI_CS23
+#define MMEA4_ADDRDEC2_COL_SEL_HI_CS23__COL8__SHIFT 0x0
+#define MMEA4_ADDRDEC2_COL_SEL_HI_CS23__COL9__SHIFT 0x4
+#define MMEA4_ADDRDEC2_COL_SEL_HI_CS23__COL10__SHIFT 0x8
+#define MMEA4_ADDRDEC2_COL_SEL_HI_CS23__COL11__SHIFT 0xc
+#define MMEA4_ADDRDEC2_COL_SEL_HI_CS23__COL12__SHIFT 0x10
+#define MMEA4_ADDRDEC2_COL_SEL_HI_CS23__COL13__SHIFT 0x14
+#define MMEA4_ADDRDEC2_COL_SEL_HI_CS23__COL14__SHIFT 0x18
+#define MMEA4_ADDRDEC2_COL_SEL_HI_CS23__COL15__SHIFT 0x1c
+#define MMEA4_ADDRDEC2_COL_SEL_HI_CS23__COL8_MASK 0x0000000FL
+#define MMEA4_ADDRDEC2_COL_SEL_HI_CS23__COL9_MASK 0x000000F0L
+#define MMEA4_ADDRDEC2_COL_SEL_HI_CS23__COL10_MASK 0x00000F00L
+#define MMEA4_ADDRDEC2_COL_SEL_HI_CS23__COL11_MASK 0x0000F000L
+#define MMEA4_ADDRDEC2_COL_SEL_HI_CS23__COL12_MASK 0x000F0000L
+#define MMEA4_ADDRDEC2_COL_SEL_HI_CS23__COL13_MASK 0x00F00000L
+#define MMEA4_ADDRDEC2_COL_SEL_HI_CS23__COL14_MASK 0x0F000000L
+#define MMEA4_ADDRDEC2_COL_SEL_HI_CS23__COL15_MASK 0xF0000000L
+//MMEA4_ADDRDEC2_RM_SEL_CS01
+#define MMEA4_ADDRDEC2_RM_SEL_CS01__RM0__SHIFT 0x0
+#define MMEA4_ADDRDEC2_RM_SEL_CS01__RM1__SHIFT 0x4
+#define MMEA4_ADDRDEC2_RM_SEL_CS01__RM2__SHIFT 0x8
+#define MMEA4_ADDRDEC2_RM_SEL_CS01__CHAN_BIT__SHIFT 0xc
+#define MMEA4_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA4_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA4_ADDRDEC2_RM_SEL_CS01__RM0_MASK 0x0000000FL
+#define MMEA4_ADDRDEC2_RM_SEL_CS01__RM1_MASK 0x000000F0L
+#define MMEA4_ADDRDEC2_RM_SEL_CS01__RM2_MASK 0x00000F00L
+#define MMEA4_ADDRDEC2_RM_SEL_CS01__CHAN_BIT_MASK 0x0000F000L
+#define MMEA4_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA4_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA4_ADDRDEC2_RM_SEL_CS23
+#define MMEA4_ADDRDEC2_RM_SEL_CS23__RM0__SHIFT 0x0
+#define MMEA4_ADDRDEC2_RM_SEL_CS23__RM1__SHIFT 0x4
+#define MMEA4_ADDRDEC2_RM_SEL_CS23__RM2__SHIFT 0x8
+#define MMEA4_ADDRDEC2_RM_SEL_CS23__CHAN_BIT__SHIFT 0xc
+#define MMEA4_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA4_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA4_ADDRDEC2_RM_SEL_CS23__RM0_MASK 0x0000000FL
+#define MMEA4_ADDRDEC2_RM_SEL_CS23__RM1_MASK 0x000000F0L
+#define MMEA4_ADDRDEC2_RM_SEL_CS23__RM2_MASK 0x00000F00L
+#define MMEA4_ADDRDEC2_RM_SEL_CS23__CHAN_BIT_MASK 0x0000F000L
+#define MMEA4_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA4_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA4_ADDRDEC2_RM_SEL_SECCS01
+#define MMEA4_ADDRDEC2_RM_SEL_SECCS01__RM0__SHIFT 0x0
+#define MMEA4_ADDRDEC2_RM_SEL_SECCS01__RM1__SHIFT 0x4
+#define MMEA4_ADDRDEC2_RM_SEL_SECCS01__RM2__SHIFT 0x8
+#define MMEA4_ADDRDEC2_RM_SEL_SECCS01__CHAN_BIT__SHIFT 0xc
+#define MMEA4_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA4_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA4_ADDRDEC2_RM_SEL_SECCS01__RM0_MASK 0x0000000FL
+#define MMEA4_ADDRDEC2_RM_SEL_SECCS01__RM1_MASK 0x000000F0L
+#define MMEA4_ADDRDEC2_RM_SEL_SECCS01__RM2_MASK 0x00000F00L
+#define MMEA4_ADDRDEC2_RM_SEL_SECCS01__CHAN_BIT_MASK 0x0000F000L
+#define MMEA4_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA4_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA4_ADDRDEC2_RM_SEL_SECCS23
+#define MMEA4_ADDRDEC2_RM_SEL_SECCS23__RM0__SHIFT 0x0
+#define MMEA4_ADDRDEC2_RM_SEL_SECCS23__RM1__SHIFT 0x4
+#define MMEA4_ADDRDEC2_RM_SEL_SECCS23__RM2__SHIFT 0x8
+#define MMEA4_ADDRDEC2_RM_SEL_SECCS23__CHAN_BIT__SHIFT 0xc
+#define MMEA4_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA4_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA4_ADDRDEC2_RM_SEL_SECCS23__RM0_MASK 0x0000000FL
+#define MMEA4_ADDRDEC2_RM_SEL_SECCS23__RM1_MASK 0x000000F0L
+#define MMEA4_ADDRDEC2_RM_SEL_SECCS23__RM2_MASK 0x00000F00L
+#define MMEA4_ADDRDEC2_RM_SEL_SECCS23__CHAN_BIT_MASK 0x0000F000L
+#define MMEA4_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA4_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA4_ADDRNORMDRAM_GLOBAL_CNTL
+#define MMEA4_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K__SHIFT 0x14
+#define MMEA4_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M__SHIFT 0x15
+#define MMEA4_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G__SHIFT 0x16
+#define MMEA4_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K_MASK 0x00100000L
+#define MMEA4_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M_MASK 0x00200000L
+#define MMEA4_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G_MASK 0x00400000L
+//MMEA4_ADDRNORMGMI_GLOBAL_CNTL
+#define MMEA4_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K__SHIFT 0x14
+#define MMEA4_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M__SHIFT 0x15
+#define MMEA4_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G__SHIFT 0x16
+#define MMEA4_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K_MASK 0x00100000L
+#define MMEA4_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M_MASK 0x00200000L
+#define MMEA4_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G_MASK 0x00400000L
+//MMEA4_IO_RD_CLI2GRP_MAP0
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA4_IO_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA4_IO_RD_CLI2GRP_MAP1
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA4_IO_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA4_IO_WR_CLI2GRP_MAP0
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA4_IO_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA4_IO_WR_CLI2GRP_MAP1
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA4_IO_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA4_IO_RD_COMBINE_FLUSH
+#define MMEA4_IO_RD_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0
+#define MMEA4_IO_RD_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4
+#define MMEA4_IO_RD_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8
+#define MMEA4_IO_RD_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc
+#define MMEA4_IO_RD_COMBINE_FLUSH__FORWARD_COMB_ONLY__SHIFT 0x10
+#define MMEA4_IO_RD_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL
+#define MMEA4_IO_RD_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L
+#define MMEA4_IO_RD_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L
+#define MMEA4_IO_RD_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L
+#define MMEA4_IO_RD_COMBINE_FLUSH__FORWARD_COMB_ONLY_MASK 0x00010000L
+//MMEA4_IO_WR_COMBINE_FLUSH
+#define MMEA4_IO_WR_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0
+#define MMEA4_IO_WR_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4
+#define MMEA4_IO_WR_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8
+#define MMEA4_IO_WR_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc
+#define MMEA4_IO_WR_COMBINE_FLUSH__FORWARD_COMB_ONLY__SHIFT 0x10
+#define MMEA4_IO_WR_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL
+#define MMEA4_IO_WR_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L
+#define MMEA4_IO_WR_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L
+#define MMEA4_IO_WR_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L
+#define MMEA4_IO_WR_COMBINE_FLUSH__FORWARD_COMB_ONLY_MASK 0x00010000L
+//MMEA4_IO_GROUP_BURST
+#define MMEA4_IO_GROUP_BURST__RD_LIMIT_LO__SHIFT 0x0
+#define MMEA4_IO_GROUP_BURST__RD_LIMIT_HI__SHIFT 0x8
+#define MMEA4_IO_GROUP_BURST__WR_LIMIT_LO__SHIFT 0x10
+#define MMEA4_IO_GROUP_BURST__WR_LIMIT_HI__SHIFT 0x18
+#define MMEA4_IO_GROUP_BURST__RD_LIMIT_LO_MASK 0x000000FFL
+#define MMEA4_IO_GROUP_BURST__RD_LIMIT_HI_MASK 0x0000FF00L
+#define MMEA4_IO_GROUP_BURST__WR_LIMIT_LO_MASK 0x00FF0000L
+#define MMEA4_IO_GROUP_BURST__WR_LIMIT_HI_MASK 0xFF000000L
+//MMEA4_IO_RD_PRI_AGE
+#define MMEA4_IO_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA4_IO_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA4_IO_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA4_IO_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA4_IO_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA4_IO_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA4_IO_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA4_IO_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA4_IO_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA4_IO_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA4_IO_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA4_IO_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA4_IO_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA4_IO_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA4_IO_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA4_IO_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA4_IO_WR_PRI_AGE
+#define MMEA4_IO_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA4_IO_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA4_IO_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA4_IO_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA4_IO_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA4_IO_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA4_IO_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA4_IO_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA4_IO_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA4_IO_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA4_IO_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA4_IO_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA4_IO_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA4_IO_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA4_IO_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA4_IO_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA4_IO_RD_PRI_QUEUING
+#define MMEA4_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA4_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA4_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA4_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA4_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA4_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA4_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA4_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA4_IO_WR_PRI_QUEUING
+#define MMEA4_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA4_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA4_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA4_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA4_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA4_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA4_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA4_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA4_IO_RD_PRI_FIXED
+#define MMEA4_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA4_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA4_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA4_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA4_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA4_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA4_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA4_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA4_IO_WR_PRI_FIXED
+#define MMEA4_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA4_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA4_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA4_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA4_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA4_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA4_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA4_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA4_IO_RD_PRI_URGENCY
+#define MMEA4_IO_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA4_IO_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA4_IO_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA4_IO_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA4_IO_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA4_IO_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA4_IO_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA4_IO_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA4_IO_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA4_IO_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA4_IO_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA4_IO_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA4_IO_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA4_IO_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA4_IO_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA4_IO_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA4_IO_WR_PRI_URGENCY
+#define MMEA4_IO_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA4_IO_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA4_IO_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA4_IO_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA4_IO_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA4_IO_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA4_IO_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA4_IO_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA4_IO_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA4_IO_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA4_IO_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA4_IO_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA4_IO_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA4_IO_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA4_IO_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA4_IO_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA4_IO_RD_PRI_URGENCY_MASKING
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L
+#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L
+//MMEA4_IO_WR_PRI_URGENCY_MASKING
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L
+#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L
+//MMEA4_IO_RD_PRI_QUANT_PRI1
+#define MMEA4_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA4_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA4_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA4_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA4_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA4_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA4_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA4_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA4_IO_RD_PRI_QUANT_PRI2
+#define MMEA4_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA4_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA4_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA4_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA4_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA4_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA4_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA4_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA4_IO_RD_PRI_QUANT_PRI3
+#define MMEA4_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA4_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA4_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA4_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA4_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA4_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA4_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA4_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA4_IO_WR_PRI_QUANT_PRI1
+#define MMEA4_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA4_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA4_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA4_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA4_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA4_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA4_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA4_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA4_IO_WR_PRI_QUANT_PRI2
+#define MMEA4_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA4_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA4_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA4_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA4_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA4_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA4_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA4_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA4_IO_WR_PRI_QUANT_PRI3
+#define MMEA4_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA4_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA4_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA4_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA4_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA4_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA4_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA4_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA4_SDP_ARB_DRAM
+#define MMEA4_SDP_ARB_DRAM__RDWR_BURST_LIMIT_CYCL__SHIFT 0x0
+#define MMEA4_SDP_ARB_DRAM__RDWR_BURST_LIMIT_DATA__SHIFT 0x8
+#define MMEA4_SDP_ARB_DRAM__EARLY_SW2RD_ON_PRI__SHIFT 0x10
+#define MMEA4_SDP_ARB_DRAM__EARLY_SW2WR_ON_PRI__SHIFT 0x11
+#define MMEA4_SDP_ARB_DRAM__EARLY_SW2RD_ON_RES__SHIFT 0x12
+#define MMEA4_SDP_ARB_DRAM__EARLY_SW2WR_ON_RES__SHIFT 0x13
+#define MMEA4_SDP_ARB_DRAM__EOB_ON_EXPIRE__SHIFT 0x14
+#define MMEA4_SDP_ARB_DRAM__DECOUPLE_RDWR_BNKSTATE__SHIFT 0x15
+#define MMEA4_SDP_ARB_DRAM__RDWR_BURST_LIMIT_CYCL_MASK 0x0000007FL
+#define MMEA4_SDP_ARB_DRAM__RDWR_BURST_LIMIT_DATA_MASK 0x00007F00L
+#define MMEA4_SDP_ARB_DRAM__EARLY_SW2RD_ON_PRI_MASK 0x00010000L
+#define MMEA4_SDP_ARB_DRAM__EARLY_SW2WR_ON_PRI_MASK 0x00020000L
+#define MMEA4_SDP_ARB_DRAM__EARLY_SW2RD_ON_RES_MASK 0x00040000L
+#define MMEA4_SDP_ARB_DRAM__EARLY_SW2WR_ON_RES_MASK 0x00080000L
+#define MMEA4_SDP_ARB_DRAM__EOB_ON_EXPIRE_MASK 0x00100000L
+#define MMEA4_SDP_ARB_DRAM__DECOUPLE_RDWR_BNKSTATE_MASK 0x00200000L
+//MMEA4_SDP_ARB_GMI
+#define MMEA4_SDP_ARB_GMI__RDWR_BURST_LIMIT_CYCL__SHIFT 0x0
+#define MMEA4_SDP_ARB_GMI__RDWR_BURST_LIMIT_DATA__SHIFT 0x8
+#define MMEA4_SDP_ARB_GMI__EARLY_SW2RD_ON_PRI__SHIFT 0x10
+#define MMEA4_SDP_ARB_GMI__EARLY_SW2WR_ON_PRI__SHIFT 0x11
+#define MMEA4_SDP_ARB_GMI__EARLY_SW2RD_ON_RES__SHIFT 0x12
+#define MMEA4_SDP_ARB_GMI__EARLY_SW2WR_ON_RES__SHIFT 0x13
+#define MMEA4_SDP_ARB_GMI__EOB_ON_EXPIRE__SHIFT 0x14
+#define MMEA4_SDP_ARB_GMI__DECOUPLE_RDWR_BNKSTATE__SHIFT 0x15
+#define MMEA4_SDP_ARB_GMI__ALLOW_CHAIN_BREAKING__SHIFT 0x16
+#define MMEA4_SDP_ARB_GMI__RDWR_BURST_LIMIT_CYCL_MASK 0x0000007FL
+#define MMEA4_SDP_ARB_GMI__RDWR_BURST_LIMIT_DATA_MASK 0x00007F00L
+#define MMEA4_SDP_ARB_GMI__EARLY_SW2RD_ON_PRI_MASK 0x00010000L
+#define MMEA4_SDP_ARB_GMI__EARLY_SW2WR_ON_PRI_MASK 0x00020000L
+#define MMEA4_SDP_ARB_GMI__EARLY_SW2RD_ON_RES_MASK 0x00040000L
+#define MMEA4_SDP_ARB_GMI__EARLY_SW2WR_ON_RES_MASK 0x00080000L
+#define MMEA4_SDP_ARB_GMI__EOB_ON_EXPIRE_MASK 0x00100000L
+#define MMEA4_SDP_ARB_GMI__DECOUPLE_RDWR_BNKSTATE_MASK 0x00200000L
+#define MMEA4_SDP_ARB_GMI__ALLOW_CHAIN_BREAKING_MASK 0x00400000L
+//MMEA4_SDP_ARB_FINAL
+#define MMEA4_SDP_ARB_FINAL__DRAM_BURST_LIMIT__SHIFT 0x0
+#define MMEA4_SDP_ARB_FINAL__GMI_BURST_LIMIT__SHIFT 0x5
+#define MMEA4_SDP_ARB_FINAL__IO_BURST_LIMIT__SHIFT 0xa
+#define MMEA4_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER__SHIFT 0xf
+#define MMEA4_SDP_ARB_FINAL__RDONLY_VC0__SHIFT 0x11
+#define MMEA4_SDP_ARB_FINAL__RDONLY_VC1__SHIFT 0x12
+#define MMEA4_SDP_ARB_FINAL__RDONLY_VC2__SHIFT 0x13
+#define MMEA4_SDP_ARB_FINAL__RDONLY_VC3__SHIFT 0x14
+#define MMEA4_SDP_ARB_FINAL__RDONLY_VC4__SHIFT 0x15
+#define MMEA4_SDP_ARB_FINAL__RDONLY_VC5__SHIFT 0x16
+#define MMEA4_SDP_ARB_FINAL__RDONLY_VC6__SHIFT 0x17
+#define MMEA4_SDP_ARB_FINAL__RDONLY_VC7__SHIFT 0x18
+#define MMEA4_SDP_ARB_FINAL__ERREVENT_ON_ERROR__SHIFT 0x19
+#define MMEA4_SDP_ARB_FINAL__HALTREQ_ON_ERROR__SHIFT 0x1a
+#define MMEA4_SDP_ARB_FINAL__GMI_BURST_STRETCH__SHIFT 0x1b
+#define MMEA4_SDP_ARB_FINAL__DRAM_BURST_LIMIT_MASK 0x0000001FL
+#define MMEA4_SDP_ARB_FINAL__GMI_BURST_LIMIT_MASK 0x000003E0L
+#define MMEA4_SDP_ARB_FINAL__IO_BURST_LIMIT_MASK 0x00007C00L
+#define MMEA4_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER_MASK 0x00018000L
+#define MMEA4_SDP_ARB_FINAL__RDONLY_VC0_MASK 0x00020000L
+#define MMEA4_SDP_ARB_FINAL__RDONLY_VC1_MASK 0x00040000L
+#define MMEA4_SDP_ARB_FINAL__RDONLY_VC2_MASK 0x00080000L
+#define MMEA4_SDP_ARB_FINAL__RDONLY_VC3_MASK 0x00100000L
+#define MMEA4_SDP_ARB_FINAL__RDONLY_VC4_MASK 0x00200000L
+#define MMEA4_SDP_ARB_FINAL__RDONLY_VC5_MASK 0x00400000L
+#define MMEA4_SDP_ARB_FINAL__RDONLY_VC6_MASK 0x00800000L
+#define MMEA4_SDP_ARB_FINAL__RDONLY_VC7_MASK 0x01000000L
+#define MMEA4_SDP_ARB_FINAL__ERREVENT_ON_ERROR_MASK 0x02000000L
+#define MMEA4_SDP_ARB_FINAL__HALTREQ_ON_ERROR_MASK 0x04000000L
+#define MMEA4_SDP_ARB_FINAL__GMI_BURST_STRETCH_MASK 0x08000000L
+//MMEA4_SDP_DRAM_PRIORITY
+#define MMEA4_SDP_DRAM_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0
+#define MMEA4_SDP_DRAM_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4
+#define MMEA4_SDP_DRAM_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8
+#define MMEA4_SDP_DRAM_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc
+#define MMEA4_SDP_DRAM_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10
+#define MMEA4_SDP_DRAM_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14
+#define MMEA4_SDP_DRAM_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18
+#define MMEA4_SDP_DRAM_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c
+#define MMEA4_SDP_DRAM_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL
+#define MMEA4_SDP_DRAM_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L
+#define MMEA4_SDP_DRAM_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L
+#define MMEA4_SDP_DRAM_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L
+#define MMEA4_SDP_DRAM_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L
+#define MMEA4_SDP_DRAM_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L
+#define MMEA4_SDP_DRAM_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L
+#define MMEA4_SDP_DRAM_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L
+//MMEA4_SDP_GMI_PRIORITY
+#define MMEA4_SDP_GMI_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0
+#define MMEA4_SDP_GMI_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4
+#define MMEA4_SDP_GMI_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8
+#define MMEA4_SDP_GMI_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc
+#define MMEA4_SDP_GMI_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10
+#define MMEA4_SDP_GMI_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14
+#define MMEA4_SDP_GMI_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18
+#define MMEA4_SDP_GMI_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c
+#define MMEA4_SDP_GMI_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL
+#define MMEA4_SDP_GMI_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L
+#define MMEA4_SDP_GMI_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L
+#define MMEA4_SDP_GMI_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L
+#define MMEA4_SDP_GMI_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L
+#define MMEA4_SDP_GMI_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L
+#define MMEA4_SDP_GMI_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L
+#define MMEA4_SDP_GMI_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L
+//MMEA4_SDP_IO_PRIORITY
+#define MMEA4_SDP_IO_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0
+#define MMEA4_SDP_IO_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4
+#define MMEA4_SDP_IO_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8
+#define MMEA4_SDP_IO_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc
+#define MMEA4_SDP_IO_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10
+#define MMEA4_SDP_IO_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14
+#define MMEA4_SDP_IO_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18
+#define MMEA4_SDP_IO_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c
+#define MMEA4_SDP_IO_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL
+#define MMEA4_SDP_IO_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L
+#define MMEA4_SDP_IO_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L
+#define MMEA4_SDP_IO_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L
+#define MMEA4_SDP_IO_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L
+#define MMEA4_SDP_IO_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L
+#define MMEA4_SDP_IO_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L
+#define MMEA4_SDP_IO_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L
+//MMEA4_SDP_CREDITS
+#define MMEA4_SDP_CREDITS__TAG_LIMIT__SHIFT 0x0
+#define MMEA4_SDP_CREDITS__WR_RESP_CREDITS__SHIFT 0x8
+#define MMEA4_SDP_CREDITS__RD_RESP_CREDITS__SHIFT 0x10
+#define MMEA4_SDP_CREDITS__TAG_LIMIT_MASK 0x000000FFL
+#define MMEA4_SDP_CREDITS__WR_RESP_CREDITS_MASK 0x00007F00L
+#define MMEA4_SDP_CREDITS__RD_RESP_CREDITS_MASK 0x007F0000L
+//MMEA4_SDP_TAG_RESERVE0
+#define MMEA4_SDP_TAG_RESERVE0__VC0__SHIFT 0x0
+#define MMEA4_SDP_TAG_RESERVE0__VC1__SHIFT 0x8
+#define MMEA4_SDP_TAG_RESERVE0__VC2__SHIFT 0x10
+#define MMEA4_SDP_TAG_RESERVE0__VC3__SHIFT 0x18
+#define MMEA4_SDP_TAG_RESERVE0__VC0_MASK 0x000000FFL
+#define MMEA4_SDP_TAG_RESERVE0__VC1_MASK 0x0000FF00L
+#define MMEA4_SDP_TAG_RESERVE0__VC2_MASK 0x00FF0000L
+#define MMEA4_SDP_TAG_RESERVE0__VC3_MASK 0xFF000000L
+//MMEA4_SDP_TAG_RESERVE1
+#define MMEA4_SDP_TAG_RESERVE1__VC4__SHIFT 0x0
+#define MMEA4_SDP_TAG_RESERVE1__VC5__SHIFT 0x8
+#define MMEA4_SDP_TAG_RESERVE1__VC6__SHIFT 0x10
+#define MMEA4_SDP_TAG_RESERVE1__VC7__SHIFT 0x18
+#define MMEA4_SDP_TAG_RESERVE1__VC4_MASK 0x000000FFL
+#define MMEA4_SDP_TAG_RESERVE1__VC5_MASK 0x0000FF00L
+#define MMEA4_SDP_TAG_RESERVE1__VC6_MASK 0x00FF0000L
+#define MMEA4_SDP_TAG_RESERVE1__VC7_MASK 0xFF000000L
+//MMEA4_SDP_VCC_RESERVE0
+#define MMEA4_SDP_VCC_RESERVE0__VC0_CREDITS__SHIFT 0x0
+#define MMEA4_SDP_VCC_RESERVE0__VC1_CREDITS__SHIFT 0x6
+#define MMEA4_SDP_VCC_RESERVE0__VC2_CREDITS__SHIFT 0xc
+#define MMEA4_SDP_VCC_RESERVE0__VC3_CREDITS__SHIFT 0x12
+#define MMEA4_SDP_VCC_RESERVE0__VC4_CREDITS__SHIFT 0x18
+#define MMEA4_SDP_VCC_RESERVE0__VC0_CREDITS_MASK 0x0000003FL
+#define MMEA4_SDP_VCC_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L
+#define MMEA4_SDP_VCC_RESERVE0__VC2_CREDITS_MASK 0x0003F000L
+#define MMEA4_SDP_VCC_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L
+#define MMEA4_SDP_VCC_RESERVE0__VC4_CREDITS_MASK 0x3F000000L
+//MMEA4_SDP_VCC_RESERVE1
+#define MMEA4_SDP_VCC_RESERVE1__VC5_CREDITS__SHIFT 0x0
+#define MMEA4_SDP_VCC_RESERVE1__VC6_CREDITS__SHIFT 0x6
+#define MMEA4_SDP_VCC_RESERVE1__VC7_CREDITS__SHIFT 0xc
+#define MMEA4_SDP_VCC_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f
+#define MMEA4_SDP_VCC_RESERVE1__VC5_CREDITS_MASK 0x0000003FL
+#define MMEA4_SDP_VCC_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L
+#define MMEA4_SDP_VCC_RESERVE1__VC7_CREDITS_MASK 0x0003F000L
+#define MMEA4_SDP_VCC_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L
+//MMEA4_SDP_VCD_RESERVE0
+#define MMEA4_SDP_VCD_RESERVE0__VC0_CREDITS__SHIFT 0x0
+#define MMEA4_SDP_VCD_RESERVE0__VC1_CREDITS__SHIFT 0x6
+#define MMEA4_SDP_VCD_RESERVE0__VC2_CREDITS__SHIFT 0xc
+#define MMEA4_SDP_VCD_RESERVE0__VC3_CREDITS__SHIFT 0x12
+#define MMEA4_SDP_VCD_RESERVE0__VC4_CREDITS__SHIFT 0x18
+#define MMEA4_SDP_VCD_RESERVE0__VC0_CREDITS_MASK 0x0000003FL
+#define MMEA4_SDP_VCD_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L
+#define MMEA4_SDP_VCD_RESERVE0__VC2_CREDITS_MASK 0x0003F000L
+#define MMEA4_SDP_VCD_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L
+#define MMEA4_SDP_VCD_RESERVE0__VC4_CREDITS_MASK 0x3F000000L
+//MMEA4_SDP_VCD_RESERVE1
+#define MMEA4_SDP_VCD_RESERVE1__VC5_CREDITS__SHIFT 0x0
+#define MMEA4_SDP_VCD_RESERVE1__VC6_CREDITS__SHIFT 0x6
+#define MMEA4_SDP_VCD_RESERVE1__VC7_CREDITS__SHIFT 0xc
+#define MMEA4_SDP_VCD_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f
+#define MMEA4_SDP_VCD_RESERVE1__VC5_CREDITS_MASK 0x0000003FL
+#define MMEA4_SDP_VCD_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L
+#define MMEA4_SDP_VCD_RESERVE1__VC7_CREDITS_MASK 0x0003F000L
+#define MMEA4_SDP_VCD_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L
+//MMEA4_SDP_REQ_CNTL
+#define MMEA4_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ__SHIFT 0x0
+#define MMEA4_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE__SHIFT 0x1
+#define MMEA4_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC__SHIFT 0x2
+#define MMEA4_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM__SHIFT 0x3
+#define MMEA4_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_GMI__SHIFT 0x4
+#define MMEA4_SDP_REQ_CNTL__INNER_DOMAIN_MODE__SHIFT 0x5
+#define MMEA4_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ_MASK 0x00000001L
+#define MMEA4_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE_MASK 0x00000002L
+#define MMEA4_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC_MASK 0x00000004L
+#define MMEA4_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM_MASK 0x00000008L
+#define MMEA4_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_GMI_MASK 0x00000010L
+#define MMEA4_SDP_REQ_CNTL__INNER_DOMAIN_MODE_MASK 0x00000020L
+//MMEA4_MISC
+#define MMEA4_MISC__RELATIVE_PRI_IN_DRAM_RD_ARB__SHIFT 0x0
+#define MMEA4_MISC__RELATIVE_PRI_IN_DRAM_WR_ARB__SHIFT 0x1
+#define MMEA4_MISC__RELATIVE_PRI_IN_GMI_RD_ARB__SHIFT 0x2
+#define MMEA4_MISC__RELATIVE_PRI_IN_GMI_WR_ARB__SHIFT 0x3
+#define MMEA4_MISC__RELATIVE_PRI_IN_IO_RD_ARB__SHIFT 0x4
+#define MMEA4_MISC__RELATIVE_PRI_IN_IO_WR_ARB__SHIFT 0x5
+#define MMEA4_MISC__EARLYWRRET_ENABLE_VC0__SHIFT 0x6
+#define MMEA4_MISC__EARLYWRRET_ENABLE_VC1__SHIFT 0x7
+#define MMEA4_MISC__EARLYWRRET_ENABLE_VC2__SHIFT 0x8
+#define MMEA4_MISC__EARLYWRRET_ENABLE_VC3__SHIFT 0x9
+#define MMEA4_MISC__EARLYWRRET_ENABLE_VC4__SHIFT 0xa
+#define MMEA4_MISC__EARLYWRRET_ENABLE_VC5__SHIFT 0xb
+#define MMEA4_MISC__EARLYWRRET_ENABLE_VC6__SHIFT 0xc
+#define MMEA4_MISC__EARLYWRRET_ENABLE_VC7__SHIFT 0xd
+#define MMEA4_MISC__EARLY_SDP_ORIGDATA__SHIFT 0xe
+#define MMEA4_MISC__LINKMGR_DYNAMIC_MODE__SHIFT 0xf
+#define MMEA4_MISC__LINKMGR_HALT_THRESHOLD__SHIFT 0x11
+#define MMEA4_MISC__LINKMGR_RECONNECT_DELAY__SHIFT 0x13
+#define MMEA4_MISC__LINKMGR_IDLE_THRESHOLD__SHIFT 0x15
+#define MMEA4_MISC__FAVOUR_MIDCHAIN_CS_IN_DRAM_ARB__SHIFT 0x1a
+#define MMEA4_MISC__FAVOUR_MIDCHAIN_CS_IN_GMI_ARB__SHIFT 0x1b
+#define MMEA4_MISC__FAVOUR_LAST_CS_IN_DRAM_ARB__SHIFT 0x1c
+#define MMEA4_MISC__FAVOUR_LAST_CS_IN_GMI_ARB__SHIFT 0x1d
+#define MMEA4_MISC__SWITCH_CS_ON_W2R_IN_DRAM_ARB__SHIFT 0x1e
+#define MMEA4_MISC__SWITCH_CS_ON_W2R_IN_GMI_ARB__SHIFT 0x1f
+#define MMEA4_MISC__RELATIVE_PRI_IN_DRAM_RD_ARB_MASK 0x00000001L
+#define MMEA4_MISC__RELATIVE_PRI_IN_DRAM_WR_ARB_MASK 0x00000002L
+#define MMEA4_MISC__RELATIVE_PRI_IN_GMI_RD_ARB_MASK 0x00000004L
+#define MMEA4_MISC__RELATIVE_PRI_IN_GMI_WR_ARB_MASK 0x00000008L
+#define MMEA4_MISC__RELATIVE_PRI_IN_IO_RD_ARB_MASK 0x00000010L
+#define MMEA4_MISC__RELATIVE_PRI_IN_IO_WR_ARB_MASK 0x00000020L
+#define MMEA4_MISC__EARLYWRRET_ENABLE_VC0_MASK 0x00000040L
+#define MMEA4_MISC__EARLYWRRET_ENABLE_VC1_MASK 0x00000080L
+#define MMEA4_MISC__EARLYWRRET_ENABLE_VC2_MASK 0x00000100L
+#define MMEA4_MISC__EARLYWRRET_ENABLE_VC3_MASK 0x00000200L
+#define MMEA4_MISC__EARLYWRRET_ENABLE_VC4_MASK 0x00000400L
+#define MMEA4_MISC__EARLYWRRET_ENABLE_VC5_MASK 0x00000800L
+#define MMEA4_MISC__EARLYWRRET_ENABLE_VC6_MASK 0x00001000L
+#define MMEA4_MISC__EARLYWRRET_ENABLE_VC7_MASK 0x00002000L
+#define MMEA4_MISC__EARLY_SDP_ORIGDATA_MASK 0x00004000L
+#define MMEA4_MISC__LINKMGR_DYNAMIC_MODE_MASK 0x00018000L
+#define MMEA4_MISC__LINKMGR_HALT_THRESHOLD_MASK 0x00060000L
+#define MMEA4_MISC__LINKMGR_RECONNECT_DELAY_MASK 0x00180000L
+#define MMEA4_MISC__LINKMGR_IDLE_THRESHOLD_MASK 0x03E00000L
+#define MMEA4_MISC__FAVOUR_MIDCHAIN_CS_IN_DRAM_ARB_MASK 0x04000000L
+#define MMEA4_MISC__FAVOUR_MIDCHAIN_CS_IN_GMI_ARB_MASK 0x08000000L
+#define MMEA4_MISC__FAVOUR_LAST_CS_IN_DRAM_ARB_MASK 0x10000000L
+#define MMEA4_MISC__FAVOUR_LAST_CS_IN_GMI_ARB_MASK 0x20000000L
+#define MMEA4_MISC__SWITCH_CS_ON_W2R_IN_DRAM_ARB_MASK 0x40000000L
+#define MMEA4_MISC__SWITCH_CS_ON_W2R_IN_GMI_ARB_MASK 0x80000000L
+//MMEA4_LATENCY_SAMPLING
+#define MMEA4_LATENCY_SAMPLING__SAMPLER0_DRAM__SHIFT 0x0
+#define MMEA4_LATENCY_SAMPLING__SAMPLER1_DRAM__SHIFT 0x1
+#define MMEA4_LATENCY_SAMPLING__SAMPLER0_GMI__SHIFT 0x2
+#define MMEA4_LATENCY_SAMPLING__SAMPLER1_GMI__SHIFT 0x3
+#define MMEA4_LATENCY_SAMPLING__SAMPLER0_IO__SHIFT 0x4
+#define MMEA4_LATENCY_SAMPLING__SAMPLER1_IO__SHIFT 0x5
+#define MMEA4_LATENCY_SAMPLING__SAMPLER0_READ__SHIFT 0x6
+#define MMEA4_LATENCY_SAMPLING__SAMPLER1_READ__SHIFT 0x7
+#define MMEA4_LATENCY_SAMPLING__SAMPLER0_WRITE__SHIFT 0x8
+#define MMEA4_LATENCY_SAMPLING__SAMPLER1_WRITE__SHIFT 0x9
+#define MMEA4_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET__SHIFT 0xa
+#define MMEA4_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET__SHIFT 0xb
+#define MMEA4_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET__SHIFT 0xc
+#define MMEA4_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET__SHIFT 0xd
+#define MMEA4_LATENCY_SAMPLING__SAMPLER0_VC__SHIFT 0xe
+#define MMEA4_LATENCY_SAMPLING__SAMPLER1_VC__SHIFT 0x16
+#define MMEA4_LATENCY_SAMPLING__SAMPLER0_DRAM_MASK 0x00000001L
+#define MMEA4_LATENCY_SAMPLING__SAMPLER1_DRAM_MASK 0x00000002L
+#define MMEA4_LATENCY_SAMPLING__SAMPLER0_GMI_MASK 0x00000004L
+#define MMEA4_LATENCY_SAMPLING__SAMPLER1_GMI_MASK 0x00000008L
+#define MMEA4_LATENCY_SAMPLING__SAMPLER0_IO_MASK 0x00000010L
+#define MMEA4_LATENCY_SAMPLING__SAMPLER1_IO_MASK 0x00000020L
+#define MMEA4_LATENCY_SAMPLING__SAMPLER0_READ_MASK 0x00000040L
+#define MMEA4_LATENCY_SAMPLING__SAMPLER1_READ_MASK 0x00000080L
+#define MMEA4_LATENCY_SAMPLING__SAMPLER0_WRITE_MASK 0x00000100L
+#define MMEA4_LATENCY_SAMPLING__SAMPLER1_WRITE_MASK 0x00000200L
+#define MMEA4_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET_MASK 0x00000400L
+#define MMEA4_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET_MASK 0x00000800L
+#define MMEA4_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET_MASK 0x00001000L
+#define MMEA4_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET_MASK 0x00002000L
+#define MMEA4_LATENCY_SAMPLING__SAMPLER0_VC_MASK 0x003FC000L
+#define MMEA4_LATENCY_SAMPLING__SAMPLER1_VC_MASK 0x3FC00000L
+//MMEA4_PERFCOUNTER_LO
+#define MMEA4_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define MMEA4_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//MMEA4_PERFCOUNTER_HI
+#define MMEA4_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define MMEA4_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define MMEA4_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define MMEA4_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+//MMEA4_PERFCOUNTER0_CFG
+#define MMEA4_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define MMEA4_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define MMEA4_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define MMEA4_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define MMEA4_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define MMEA4_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define MMEA4_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MMEA4_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define MMEA4_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define MMEA4_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//MMEA4_PERFCOUNTER1_CFG
+#define MMEA4_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define MMEA4_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define MMEA4_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define MMEA4_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define MMEA4_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define MMEA4_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define MMEA4_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MMEA4_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define MMEA4_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define MMEA4_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//MMEA4_PERFCOUNTER_RSLT_CNTL
+#define MMEA4_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define MMEA4_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define MMEA4_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define MMEA4_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define MMEA4_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define MMEA4_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define MMEA4_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define MMEA4_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define MMEA4_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define MMEA4_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define MMEA4_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define MMEA4_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+//MMEA4_EDC_CNT
+#define MMEA4_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT__SHIFT 0x0
+#define MMEA4_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT__SHIFT 0x2
+#define MMEA4_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT__SHIFT 0x4
+#define MMEA4_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT__SHIFT 0x6
+#define MMEA4_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT__SHIFT 0x8
+#define MMEA4_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT__SHIFT 0xa
+#define MMEA4_EDC_CNT__RRET_TAGMEM_SEC_COUNT__SHIFT 0xc
+#define MMEA4_EDC_CNT__RRET_TAGMEM_DED_COUNT__SHIFT 0xe
+#define MMEA4_EDC_CNT__WRET_TAGMEM_SEC_COUNT__SHIFT 0x10
+#define MMEA4_EDC_CNT__WRET_TAGMEM_DED_COUNT__SHIFT 0x12
+#define MMEA4_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT__SHIFT 0x14
+#define MMEA4_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT__SHIFT 0x16
+#define MMEA4_EDC_CNT__IORD_CMDMEM_SED_COUNT__SHIFT 0x18
+#define MMEA4_EDC_CNT__IOWR_CMDMEM_SED_COUNT__SHIFT 0x1a
+#define MMEA4_EDC_CNT__IOWR_DATAMEM_SED_COUNT__SHIFT 0x1c
+#define MMEA4_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT_MASK 0x00000003L
+#define MMEA4_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT_MASK 0x0000000CL
+#define MMEA4_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT_MASK 0x00000030L
+#define MMEA4_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
+#define MMEA4_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT_MASK 0x00000300L
+#define MMEA4_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT_MASK 0x00000C00L
+#define MMEA4_EDC_CNT__RRET_TAGMEM_SEC_COUNT_MASK 0x00003000L
+#define MMEA4_EDC_CNT__RRET_TAGMEM_DED_COUNT_MASK 0x0000C000L
+#define MMEA4_EDC_CNT__WRET_TAGMEM_SEC_COUNT_MASK 0x00030000L
+#define MMEA4_EDC_CNT__WRET_TAGMEM_DED_COUNT_MASK 0x000C0000L
+#define MMEA4_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT_MASK 0x00300000L
+#define MMEA4_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT_MASK 0x00C00000L
+#define MMEA4_EDC_CNT__IORD_CMDMEM_SED_COUNT_MASK 0x03000000L
+#define MMEA4_EDC_CNT__IOWR_CMDMEM_SED_COUNT_MASK 0x0C000000L
+#define MMEA4_EDC_CNT__IOWR_DATAMEM_SED_COUNT_MASK 0x30000000L
+//MMEA4_EDC_CNT2
+#define MMEA4_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT__SHIFT 0x0
+#define MMEA4_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT__SHIFT 0x2
+#define MMEA4_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT__SHIFT 0x4
+#define MMEA4_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT__SHIFT 0x6
+#define MMEA4_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT__SHIFT 0x8
+#define MMEA4_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa
+#define MMEA4_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc
+#define MMEA4_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe
+#define MMEA4_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10
+#define MMEA4_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12
+#define MMEA4_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14
+#define MMEA4_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16
+#define MMEA4_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT 0x18
+#define MMEA4_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT 0x1a
+#define MMEA4_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT 0x1c
+#define MMEA4_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT 0x1e
+#define MMEA4_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L
+#define MMEA4_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL
+#define MMEA4_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L
+#define MMEA4_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
+#define MMEA4_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT_MASK 0x00000300L
+#define MMEA4_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L
+#define MMEA4_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L
+#define MMEA4_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L
+#define MMEA4_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L
+#define MMEA4_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L
+#define MMEA4_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L
+#define MMEA4_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L
+#define MMEA4_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK 0x03000000L
+#define MMEA4_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK 0x0C000000L
+#define MMEA4_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK 0x30000000L
+#define MMEA4_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK 0xC0000000L
+//MMEA4_DSM_CNTL
+#define MMEA4_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define MMEA4_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define MMEA4_DSM_CNTL__DRAMWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x3
+#define MMEA4_DSM_CNTL__DRAMWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define MMEA4_DSM_CNTL__DRAMWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0x6
+#define MMEA4_DSM_CNTL__DRAMWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define MMEA4_DSM_CNTL__RRET_TAGMEM_DSM_IRRITATOR_DATA__SHIFT 0x9
+#define MMEA4_DSM_CNTL__RRET_TAGMEM_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define MMEA4_DSM_CNTL__WRET_TAGMEM_DSM_IRRITATOR_DATA__SHIFT 0xc
+#define MMEA4_DSM_CNTL__WRET_TAGMEM_ENABLE_SINGLE_WRITE__SHIFT 0xe
+#define MMEA4_DSM_CNTL__GMIRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0xf
+#define MMEA4_DSM_CNTL__GMIRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x11
+#define MMEA4_DSM_CNTL__GMIWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x12
+#define MMEA4_DSM_CNTL__GMIWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x14
+#define MMEA4_DSM_CNTL__GMIWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0x15
+#define MMEA4_DSM_CNTL__GMIWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0x17
+#define MMEA4_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define MMEA4_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define MMEA4_DSM_CNTL__DRAMWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000018L
+#define MMEA4_DSM_CNTL__DRAMWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define MMEA4_DSM_CNTL__DRAMWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define MMEA4_DSM_CNTL__DRAMWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define MMEA4_DSM_CNTL__RRET_TAGMEM_DSM_IRRITATOR_DATA_MASK 0x00000600L
+#define MMEA4_DSM_CNTL__RRET_TAGMEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+#define MMEA4_DSM_CNTL__WRET_TAGMEM_DSM_IRRITATOR_DATA_MASK 0x00003000L
+#define MMEA4_DSM_CNTL__WRET_TAGMEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
+#define MMEA4_DSM_CNTL__GMIRD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00018000L
+#define MMEA4_DSM_CNTL__GMIRD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L
+#define MMEA4_DSM_CNTL__GMIWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L
+#define MMEA4_DSM_CNTL__GMIWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L
+#define MMEA4_DSM_CNTL__GMIWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x00600000L
+#define MMEA4_DSM_CNTL__GMIWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00800000L
+//MMEA4_DSM_CNTLA
+#define MMEA4_DSM_CNTLA__DRAMRD_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define MMEA4_DSM_CNTLA__DRAMRD_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define MMEA4_DSM_CNTLA__DRAMWR_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x3
+#define MMEA4_DSM_CNTLA__DRAMWR_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define MMEA4_DSM_CNTLA__IORD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x6
+#define MMEA4_DSM_CNTLA__IORD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define MMEA4_DSM_CNTLA__IOWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x9
+#define MMEA4_DSM_CNTLA__IOWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define MMEA4_DSM_CNTLA__IOWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0xc
+#define MMEA4_DSM_CNTLA__IOWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0xe
+#define MMEA4_DSM_CNTLA__GMIRD_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0xf
+#define MMEA4_DSM_CNTLA__GMIRD_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x11
+#define MMEA4_DSM_CNTLA__GMIWR_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x12
+#define MMEA4_DSM_CNTLA__GMIWR_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x14
+#define MMEA4_DSM_CNTLA__DRAMRD_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define MMEA4_DSM_CNTLA__DRAMRD_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define MMEA4_DSM_CNTLA__DRAMWR_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00000018L
+#define MMEA4_DSM_CNTLA__DRAMWR_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define MMEA4_DSM_CNTLA__IORD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define MMEA4_DSM_CNTLA__IORD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define MMEA4_DSM_CNTLA__IOWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000600L
+#define MMEA4_DSM_CNTLA__IOWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+#define MMEA4_DSM_CNTLA__IOWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x00003000L
+#define MMEA4_DSM_CNTLA__IOWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
+#define MMEA4_DSM_CNTLA__GMIRD_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00018000L
+#define MMEA4_DSM_CNTLA__GMIRD_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L
+#define MMEA4_DSM_CNTLA__GMIWR_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L
+#define MMEA4_DSM_CNTLA__GMIWR_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L
+//MMEA4_DSM_CNTL2
+#define MMEA4_DSM_CNTL2__DRAMRD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define MMEA4_DSM_CNTL2__DRAMRD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define MMEA4_DSM_CNTL2__DRAMWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define MMEA4_DSM_CNTL2__DRAMWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x5
+#define MMEA4_DSM_CNTL2__DRAMWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define MMEA4_DSM_CNTL2__DRAMWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0x8
+#define MMEA4_DSM_CNTL2__RRET_TAGMEM_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define MMEA4_DSM_CNTL2__RRET_TAGMEM_SELECT_INJECT_DELAY__SHIFT 0xb
+#define MMEA4_DSM_CNTL2__WRET_TAGMEM_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define MMEA4_DSM_CNTL2__WRET_TAGMEM_SELECT_INJECT_DELAY__SHIFT 0xe
+#define MMEA4_DSM_CNTL2__GMIRD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0xf
+#define MMEA4_DSM_CNTL2__GMIRD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x11
+#define MMEA4_DSM_CNTL2__GMIWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x12
+#define MMEA4_DSM_CNTL2__GMIWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x14
+#define MMEA4_DSM_CNTL2__GMIWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0x15
+#define MMEA4_DSM_CNTL2__GMIWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0x17
+#define MMEA4_DSM_CNTL2__INJECT_DELAY__SHIFT 0x1a
+#define MMEA4_DSM_CNTL2__DRAMRD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define MMEA4_DSM_CNTL2__DRAMRD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define MMEA4_DSM_CNTL2__DRAMWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define MMEA4_DSM_CNTL2__DRAMWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define MMEA4_DSM_CNTL2__DRAMWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define MMEA4_DSM_CNTL2__DRAMWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define MMEA4_DSM_CNTL2__RRET_TAGMEM_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define MMEA4_DSM_CNTL2__RRET_TAGMEM_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define MMEA4_DSM_CNTL2__WRET_TAGMEM_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define MMEA4_DSM_CNTL2__WRET_TAGMEM_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define MMEA4_DSM_CNTL2__GMIRD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00018000L
+#define MMEA4_DSM_CNTL2__GMIRD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00020000L
+#define MMEA4_DSM_CNTL2__GMIWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L
+#define MMEA4_DSM_CNTL2__GMIWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00100000L
+#define MMEA4_DSM_CNTL2__GMIWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x00600000L
+#define MMEA4_DSM_CNTL2__GMIWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00800000L
+#define MMEA4_DSM_CNTL2__INJECT_DELAY_MASK 0xFC000000L
+//MMEA4_DSM_CNTL2A
+#define MMEA4_DSM_CNTL2A__DRAMRD_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define MMEA4_DSM_CNTL2A__DRAMRD_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define MMEA4_DSM_CNTL2A__DRAMWR_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define MMEA4_DSM_CNTL2A__DRAMWR_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x5
+#define MMEA4_DSM_CNTL2A__IORD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define MMEA4_DSM_CNTL2A__IORD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x8
+#define MMEA4_DSM_CNTL2A__IOWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define MMEA4_DSM_CNTL2A__IOWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0xb
+#define MMEA4_DSM_CNTL2A__IOWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define MMEA4_DSM_CNTL2A__IOWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0xe
+#define MMEA4_DSM_CNTL2A__GMIRD_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0xf
+#define MMEA4_DSM_CNTL2A__GMIRD_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x11
+#define MMEA4_DSM_CNTL2A__GMIWR_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x12
+#define MMEA4_DSM_CNTL2A__GMIWR_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x14
+#define MMEA4_DSM_CNTL2A__DRAMRD_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define MMEA4_DSM_CNTL2A__DRAMRD_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define MMEA4_DSM_CNTL2A__DRAMWR_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define MMEA4_DSM_CNTL2A__DRAMWR_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define MMEA4_DSM_CNTL2A__IORD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define MMEA4_DSM_CNTL2A__IORD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define MMEA4_DSM_CNTL2A__IOWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define MMEA4_DSM_CNTL2A__IOWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define MMEA4_DSM_CNTL2A__IOWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define MMEA4_DSM_CNTL2A__IOWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define MMEA4_DSM_CNTL2A__GMIRD_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00018000L
+#define MMEA4_DSM_CNTL2A__GMIRD_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00020000L
+#define MMEA4_DSM_CNTL2A__GMIWR_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L
+#define MMEA4_DSM_CNTL2A__GMIWR_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00100000L
+//MMEA4_CGTT_CLK_CTRL
+#define MMEA4_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define MMEA4_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define MMEA4_CGTT_CLK_CTRL__SPARE0__SHIFT 0xc
+#define MMEA4_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_WRITE__SHIFT 0x14
+#define MMEA4_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_READ__SHIFT 0x15
+#define MMEA4_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_RETURN__SHIFT 0x16
+#define MMEA4_CGTT_CLK_CTRL__SPARE1__SHIFT 0x17
+#define MMEA4_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define MMEA4_CGTT_CLK_CTRL__SOFT_OVERRIDE_WRITE__SHIFT 0x1c
+#define MMEA4_CGTT_CLK_CTRL__SOFT_OVERRIDE_READ__SHIFT 0x1d
+#define MMEA4_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN__SHIFT 0x1e
+#define MMEA4_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER__SHIFT 0x1f
+#define MMEA4_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define MMEA4_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define MMEA4_CGTT_CLK_CTRL__SPARE0_MASK 0x000FF000L
+#define MMEA4_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_WRITE_MASK 0x00100000L
+#define MMEA4_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_READ_MASK 0x00200000L
+#define MMEA4_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_RETURN_MASK 0x00400000L
+#define MMEA4_CGTT_CLK_CTRL__SPARE1_MASK 0x07800000L
+#define MMEA4_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define MMEA4_CGTT_CLK_CTRL__SOFT_OVERRIDE_WRITE_MASK 0x10000000L
+#define MMEA4_CGTT_CLK_CTRL__SOFT_OVERRIDE_READ_MASK 0x20000000L
+#define MMEA4_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN_MASK 0x40000000L
+#define MMEA4_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER_MASK 0x80000000L
+//MMEA4_EDC_MODE
+#define MMEA4_EDC_MODE__COUNT_FED_OUT__SHIFT 0x10
+#define MMEA4_EDC_MODE__GATE_FUE__SHIFT 0x11
+#define MMEA4_EDC_MODE__DED_MODE__SHIFT 0x14
+#define MMEA4_EDC_MODE__PROP_FED__SHIFT 0x1d
+#define MMEA4_EDC_MODE__BYPASS__SHIFT 0x1f
+#define MMEA4_EDC_MODE__COUNT_FED_OUT_MASK 0x00010000L
+#define MMEA4_EDC_MODE__GATE_FUE_MASK 0x00020000L
+#define MMEA4_EDC_MODE__DED_MODE_MASK 0x00300000L
+#define MMEA4_EDC_MODE__PROP_FED_MASK 0x20000000L
+#define MMEA4_EDC_MODE__BYPASS_MASK 0x80000000L
+//MMEA4_ERR_STATUS
+#define MMEA4_ERR_STATUS__SDP_RDRSP_STATUS__SHIFT 0x0
+#define MMEA4_ERR_STATUS__SDP_WRRSP_STATUS__SHIFT 0x4
+#define MMEA4_ERR_STATUS__SDP_RDRSP_DATASTATUS__SHIFT 0x8
+#define MMEA4_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR__SHIFT 0xa
+#define MMEA4_ERR_STATUS__CLEAR_ERROR_STATUS__SHIFT 0xb
+#define MMEA4_ERR_STATUS__BUSY_ON_ERROR__SHIFT 0xc
+#define MMEA4_ERR_STATUS__FUE_FLAG__SHIFT 0xd
+#define MMEA4_ERR_STATUS__SDP_RDRSP_STATUS_MASK 0x0000000FL
+#define MMEA4_ERR_STATUS__SDP_WRRSP_STATUS_MASK 0x000000F0L
+#define MMEA4_ERR_STATUS__SDP_RDRSP_DATASTATUS_MASK 0x00000300L
+#define MMEA4_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR_MASK 0x00000400L
+#define MMEA4_ERR_STATUS__CLEAR_ERROR_STATUS_MASK 0x00000800L
+#define MMEA4_ERR_STATUS__BUSY_ON_ERROR_MASK 0x00001000L
+#define MMEA4_ERR_STATUS__FUE_FLAG_MASK 0x00002000L
+//MMEA4_MISC2
+#define MMEA4_MISC2__CSGROUP_SWAP_IN_DRAM_ARB__SHIFT 0x0
+#define MMEA4_MISC2__CSGROUP_SWAP_IN_GMI_ARB__SHIFT 0x1
+#define MMEA4_MISC2__CSGRP_BURST_LIMIT_DATA_DRAM__SHIFT 0x2
+#define MMEA4_MISC2__CSGRP_BURST_LIMIT_DATA_GMI__SHIFT 0x7
+#define MMEA4_MISC2__IO_RDWR_PRIORITY_ENABLE__SHIFT 0xc
+#define MMEA4_MISC2__RRET_SWAP_MODE__SHIFT 0xd
+#define MMEA4_MISC2__CSGROUP_SWAP_IN_DRAM_ARB_MASK 0x00000001L
+#define MMEA4_MISC2__CSGROUP_SWAP_IN_GMI_ARB_MASK 0x00000002L
+#define MMEA4_MISC2__CSGRP_BURST_LIMIT_DATA_DRAM_MASK 0x0000007CL
+#define MMEA4_MISC2__CSGRP_BURST_LIMIT_DATA_GMI_MASK 0x00000F80L
+#define MMEA4_MISC2__IO_RDWR_PRIORITY_ENABLE_MASK 0x00001000L
+#define MMEA4_MISC2__RRET_SWAP_MODE_MASK 0x00002000L
+//MMEA4_ADDRDEC_SELECT
+#define MMEA4_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_START__SHIFT 0x0
+#define MMEA4_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_END__SHIFT 0x5
+#define MMEA4_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_START__SHIFT 0xa
+#define MMEA4_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_END__SHIFT 0xf
+#define MMEA4_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_START_MASK 0x0000001FL
+#define MMEA4_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_END_MASK 0x000003E0L
+#define MMEA4_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_START_MASK 0x00007C00L
+#define MMEA4_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_END_MASK 0x000F8000L
+//MMEA4_EDC_CNT3
+#define MMEA4_EDC_CNT3__DRAMRD_PAGEMEM_DED_COUNT__SHIFT 0x0
+#define MMEA4_EDC_CNT3__DRAMWR_PAGEMEM_DED_COUNT__SHIFT 0x2
+#define MMEA4_EDC_CNT3__IORD_CMDMEM_DED_COUNT__SHIFT 0x4
+#define MMEA4_EDC_CNT3__IOWR_CMDMEM_DED_COUNT__SHIFT 0x6
+#define MMEA4_EDC_CNT3__IOWR_DATAMEM_DED_COUNT__SHIFT 0x8
+#define MMEA4_EDC_CNT3__GMIRD_PAGEMEM_DED_COUNT__SHIFT 0xa
+#define MMEA4_EDC_CNT3__GMIWR_PAGEMEM_DED_COUNT__SHIFT 0xc
+#define MMEA4_EDC_CNT3__DRAMRD_PAGEMEM_DED_COUNT_MASK 0x00000003L
+#define MMEA4_EDC_CNT3__DRAMWR_PAGEMEM_DED_COUNT_MASK 0x0000000CL
+#define MMEA4_EDC_CNT3__IORD_CMDMEM_DED_COUNT_MASK 0x00000030L
+#define MMEA4_EDC_CNT3__IOWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
+#define MMEA4_EDC_CNT3__IOWR_DATAMEM_DED_COUNT_MASK 0x00000300L
+#define MMEA4_EDC_CNT3__GMIRD_PAGEMEM_DED_COUNT_MASK 0x00000C00L
+#define MMEA4_EDC_CNT3__GMIWR_PAGEMEM_DED_COUNT_MASK 0x00003000L
+
+
+// addressBlock: mmhub_pctldec0
+//PCTL0_CTRL
+#define PCTL0_CTRL__PG_ENABLE__SHIFT 0x0
+#define PCTL0_CTRL__ALLOW_DEEP_SLEEP_MODE__SHIFT 0x1
+#define PCTL0_CTRL__STCTRL_RSMU_IDLE_THRESHOLD__SHIFT 0x4
+#define PCTL0_CTRL__STCTRL_DAGB_IDLE_THRESHOLD__SHIFT 0xb
+#define PCTL0_CTRL__STCTRL_IGNORE_PROTECTION_FAULT__SHIFT 0x10
+#define PCTL0_CTRL__OVR_EA0_SDP_PARTACK__SHIFT 0x11
+#define PCTL0_CTRL__OVR_EA1_SDP_PARTACK__SHIFT 0x12
+#define PCTL0_CTRL__OVR_EA2_SDP_PARTACK__SHIFT 0x13
+#define PCTL0_CTRL__OVR_EA3_SDP_PARTACK__SHIFT 0x14
+#define PCTL0_CTRL__OVR_EA4_SDP_PARTACK__SHIFT 0x15
+#define PCTL0_CTRL__OVR_EA0_SDP_FULLACK__SHIFT 0x16
+#define PCTL0_CTRL__OVR_EA1_SDP_FULLACK__SHIFT 0x17
+#define PCTL0_CTRL__OVR_EA2_SDP_FULLACK__SHIFT 0x18
+#define PCTL0_CTRL__OVR_EA3_SDP_FULLACK__SHIFT 0x19
+#define PCTL0_CTRL__OVR_EA4_SDP_FULLACK__SHIFT 0x1a
+#define PCTL0_CTRL__PGFSM_CMD_STATUS__SHIFT 0x1b
+#define PCTL0_CTRL__PG_ENABLE_MASK 0x00000001L
+#define PCTL0_CTRL__ALLOW_DEEP_SLEEP_MODE_MASK 0x0000000EL
+#define PCTL0_CTRL__STCTRL_RSMU_IDLE_THRESHOLD_MASK 0x000007F0L
+#define PCTL0_CTRL__STCTRL_DAGB_IDLE_THRESHOLD_MASK 0x0000F800L
+#define PCTL0_CTRL__STCTRL_IGNORE_PROTECTION_FAULT_MASK 0x00010000L
+#define PCTL0_CTRL__OVR_EA0_SDP_PARTACK_MASK 0x00020000L
+#define PCTL0_CTRL__OVR_EA1_SDP_PARTACK_MASK 0x00040000L
+#define PCTL0_CTRL__OVR_EA2_SDP_PARTACK_MASK 0x00080000L
+#define PCTL0_CTRL__OVR_EA3_SDP_PARTACK_MASK 0x00100000L
+#define PCTL0_CTRL__OVR_EA4_SDP_PARTACK_MASK 0x00200000L
+#define PCTL0_CTRL__OVR_EA0_SDP_FULLACK_MASK 0x00400000L
+#define PCTL0_CTRL__OVR_EA1_SDP_FULLACK_MASK 0x00800000L
+#define PCTL0_CTRL__OVR_EA2_SDP_FULLACK_MASK 0x01000000L
+#define PCTL0_CTRL__OVR_EA3_SDP_FULLACK_MASK 0x02000000L
+#define PCTL0_CTRL__OVR_EA4_SDP_FULLACK_MASK 0x04000000L
+#define PCTL0_CTRL__PGFSM_CMD_STATUS_MASK 0x18000000L
+//PCTL0_MMHUB_DEEPSLEEP_IB
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS0__SHIFT 0x0
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS1__SHIFT 0x1
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS2__SHIFT 0x2
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS3__SHIFT 0x3
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS4__SHIFT 0x4
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS5__SHIFT 0x5
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS6__SHIFT 0x6
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS7__SHIFT 0x7
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS8__SHIFT 0x8
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS9__SHIFT 0x9
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS10__SHIFT 0xa
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS11__SHIFT 0xb
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS12__SHIFT 0xc
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS13__SHIFT 0xd
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS14__SHIFT 0xe
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS15__SHIFT 0xf
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS16__SHIFT 0x10
+#define PCTL0_MMHUB_DEEPSLEEP_IB__SETCLEAR__SHIFT 0x1f
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS0_MASK 0x00000001L
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS1_MASK 0x00000002L
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS2_MASK 0x00000004L
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS3_MASK 0x00000008L
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS4_MASK 0x00000010L
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS5_MASK 0x00000020L
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS6_MASK 0x00000040L
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS7_MASK 0x00000080L
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS8_MASK 0x00000100L
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS9_MASK 0x00000200L
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS10_MASK 0x00000400L
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS11_MASK 0x00000800L
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS12_MASK 0x00001000L
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS13_MASK 0x00002000L
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS14_MASK 0x00004000L
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS15_MASK 0x00008000L
+#define PCTL0_MMHUB_DEEPSLEEP_IB__DS16_MASK 0x00010000L
+#define PCTL0_MMHUB_DEEPSLEEP_IB__SETCLEAR_MASK 0x80000000L
+//PCTL0_MMHUB_DEEPSLEEP_OVERRIDE
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS0__SHIFT 0x0
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS1__SHIFT 0x1
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS2__SHIFT 0x2
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS3__SHIFT 0x3
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS4__SHIFT 0x4
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS5__SHIFT 0x5
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS6__SHIFT 0x6
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS7__SHIFT 0x7
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS8__SHIFT 0x8
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS9__SHIFT 0x9
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS10__SHIFT 0xa
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS11__SHIFT 0xb
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS12__SHIFT 0xc
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS13__SHIFT 0xd
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS14__SHIFT 0xe
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS15__SHIFT 0xf
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS16__SHIFT 0x10
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS_ATHUB__SHIFT 0x11
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS0_MASK 0x00000001L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS1_MASK 0x00000002L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS2_MASK 0x00000004L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS3_MASK 0x00000008L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS4_MASK 0x00000010L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS5_MASK 0x00000020L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS6_MASK 0x00000040L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS7_MASK 0x00000080L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS8_MASK 0x00000100L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS9_MASK 0x00000200L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS10_MASK 0x00000400L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS11_MASK 0x00000800L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS12_MASK 0x00001000L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS13_MASK 0x00002000L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS14_MASK 0x00004000L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS15_MASK 0x00008000L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS16_MASK 0x00010000L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS_ATHUB_MASK 0x00020000L
+//PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS0__SHIFT 0x0
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS1__SHIFT 0x1
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS2__SHIFT 0x2
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS3__SHIFT 0x3
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS4__SHIFT 0x4
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS5__SHIFT 0x5
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS6__SHIFT 0x6
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS7__SHIFT 0x7
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS8__SHIFT 0x8
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS9__SHIFT 0x9
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS10__SHIFT 0xa
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS11__SHIFT 0xb
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS12__SHIFT 0xc
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS13__SHIFT 0xd
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS14__SHIFT 0xe
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS15__SHIFT 0xf
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS16__SHIFT 0x10
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS0_MASK 0x00000001L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS1_MASK 0x00000002L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS2_MASK 0x00000004L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS3_MASK 0x00000008L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS4_MASK 0x00000010L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS5_MASK 0x00000020L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS6_MASK 0x00000040L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS7_MASK 0x00000080L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS8_MASK 0x00000100L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS9_MASK 0x00000200L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS10_MASK 0x00000400L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS11_MASK 0x00000800L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS12_MASK 0x00001000L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS13_MASK 0x00002000L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS14_MASK 0x00004000L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS15_MASK 0x00008000L
+#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS16_MASK 0x00010000L
+//PCTL0_PG_IGNORE_DEEPSLEEP
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS0__SHIFT 0x0
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS1__SHIFT 0x1
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS2__SHIFT 0x2
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS3__SHIFT 0x3
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS4__SHIFT 0x4
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS5__SHIFT 0x5
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS6__SHIFT 0x6
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS7__SHIFT 0x7
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS8__SHIFT 0x8
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS9__SHIFT 0x9
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS10__SHIFT 0xa
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS11__SHIFT 0xb
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS12__SHIFT 0xc
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS13__SHIFT 0xd
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS14__SHIFT 0xe
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS15__SHIFT 0xf
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS16__SHIFT 0x10
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS_ATHUB__SHIFT 0x11
+#define PCTL0_PG_IGNORE_DEEPSLEEP__ALLIPS__SHIFT 0x12
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS0_MASK 0x00000001L
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS1_MASK 0x00000002L
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS2_MASK 0x00000004L
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS3_MASK 0x00000008L
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS4_MASK 0x00000010L
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS5_MASK 0x00000020L
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS6_MASK 0x00000040L
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS7_MASK 0x00000080L
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS8_MASK 0x00000100L
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS9_MASK 0x00000200L
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS10_MASK 0x00000400L
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS11_MASK 0x00000800L
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS12_MASK 0x00001000L
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS13_MASK 0x00002000L
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS14_MASK 0x00004000L
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS15_MASK 0x00008000L
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS16_MASK 0x00010000L
+#define PCTL0_PG_IGNORE_DEEPSLEEP__DS_ATHUB_MASK 0x00020000L
+#define PCTL0_PG_IGNORE_DEEPSLEEP__ALLIPS_MASK 0x00040000L
+//PCTL0_PG_IGNORE_DEEPSLEEP_IB
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS0__SHIFT 0x0
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS1__SHIFT 0x1
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS2__SHIFT 0x2
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS3__SHIFT 0x3
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS4__SHIFT 0x4
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS5__SHIFT 0x5
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS6__SHIFT 0x6
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS7__SHIFT 0x7
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS8__SHIFT 0x8
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS9__SHIFT 0x9
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS10__SHIFT 0xa
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS11__SHIFT 0xb
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS12__SHIFT 0xc
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS13__SHIFT 0xd
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS14__SHIFT 0xe
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS15__SHIFT 0xf
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS16__SHIFT 0x10
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__ALLIPS__SHIFT 0x11
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS0_MASK 0x00000001L
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS1_MASK 0x00000002L
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS2_MASK 0x00000004L
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS3_MASK 0x00000008L
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS4_MASK 0x00000010L
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS5_MASK 0x00000020L
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS6_MASK 0x00000040L
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS7_MASK 0x00000080L
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS8_MASK 0x00000100L
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS9_MASK 0x00000200L
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS10_MASK 0x00000400L
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS11_MASK 0x00000800L
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS12_MASK 0x00001000L
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS13_MASK 0x00002000L
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS14_MASK 0x00004000L
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS15_MASK 0x00008000L
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS16_MASK 0x00010000L
+#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__ALLIPS_MASK 0x00020000L
+//PCTL0_SLICE0_CFG_DAGB_BUSY
+#define PCTL0_SLICE0_CFG_DAGB_BUSY__DB_LNCFG__SHIFT 0x0
+#define PCTL0_SLICE0_CFG_DAGB_BUSY__DB_LNCFG_MASK 0xFFFFFFFFL
+//PCTL0_SLICE0_CFG_DS_ALLOW
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS0__SHIFT 0x0
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS1__SHIFT 0x1
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS2__SHIFT 0x2
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS3__SHIFT 0x3
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS4__SHIFT 0x4
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS5__SHIFT 0x5
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS6__SHIFT 0x6
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS7__SHIFT 0x7
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS8__SHIFT 0x8
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS9__SHIFT 0x9
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS10__SHIFT 0xa
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS11__SHIFT 0xb
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS12__SHIFT 0xc
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS13__SHIFT 0xd
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS14__SHIFT 0xe
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS15__SHIFT 0xf
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS16__SHIFT 0x10
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS0_MASK 0x00000001L
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS1_MASK 0x00000002L
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS2_MASK 0x00000004L
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS3_MASK 0x00000008L
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS4_MASK 0x00000010L
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS5_MASK 0x00000020L
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS6_MASK 0x00000040L
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS7_MASK 0x00000080L
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS8_MASK 0x00000100L
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS9_MASK 0x00000200L
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS10_MASK 0x00000400L
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS11_MASK 0x00000800L
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS12_MASK 0x00001000L
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS13_MASK 0x00002000L
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS14_MASK 0x00004000L
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS15_MASK 0x00008000L
+#define PCTL0_SLICE0_CFG_DS_ALLOW__DS16_MASK 0x00010000L
+//PCTL0_SLICE0_CFG_DS_ALLOW_IB
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS0__SHIFT 0x0
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS1__SHIFT 0x1
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS2__SHIFT 0x2
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS3__SHIFT 0x3
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS4__SHIFT 0x4
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS5__SHIFT 0x5
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS6__SHIFT 0x6
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS7__SHIFT 0x7
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS8__SHIFT 0x8
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS9__SHIFT 0x9
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS10__SHIFT 0xa
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS11__SHIFT 0xb
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS12__SHIFT 0xc
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS13__SHIFT 0xd
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS14__SHIFT 0xe
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS15__SHIFT 0xf
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS16__SHIFT 0x10
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS0_MASK 0x00000001L
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS1_MASK 0x00000002L
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS2_MASK 0x00000004L
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS3_MASK 0x00000008L
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS4_MASK 0x00000010L
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS5_MASK 0x00000020L
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS6_MASK 0x00000040L
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS7_MASK 0x00000080L
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS8_MASK 0x00000100L
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS9_MASK 0x00000200L
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS10_MASK 0x00000400L
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS11_MASK 0x00000800L
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS12_MASK 0x00001000L
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS13_MASK 0x00002000L
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS14_MASK 0x00004000L
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS15_MASK 0x00008000L
+#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS16_MASK 0x00010000L
+//PCTL0_SLICE1_CFG_DAGB_BUSY
+#define PCTL0_SLICE1_CFG_DAGB_BUSY__DB_LNCFG__SHIFT 0x0
+#define PCTL0_SLICE1_CFG_DAGB_BUSY__DB_LNCFG_MASK 0xFFFFFFFFL
+//PCTL0_SLICE1_CFG_DS_ALLOW
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS0__SHIFT 0x0
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS1__SHIFT 0x1
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS2__SHIFT 0x2
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS3__SHIFT 0x3
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS4__SHIFT 0x4
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS5__SHIFT 0x5
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS6__SHIFT 0x6
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS7__SHIFT 0x7
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS8__SHIFT 0x8
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS9__SHIFT 0x9
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS10__SHIFT 0xa
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS11__SHIFT 0xb
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS12__SHIFT 0xc
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS13__SHIFT 0xd
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS14__SHIFT 0xe
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS15__SHIFT 0xf
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS16__SHIFT 0x10
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS0_MASK 0x00000001L
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS1_MASK 0x00000002L
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS2_MASK 0x00000004L
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS3_MASK 0x00000008L
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS4_MASK 0x00000010L
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS5_MASK 0x00000020L
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS6_MASK 0x00000040L
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS7_MASK 0x00000080L
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS8_MASK 0x00000100L
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS9_MASK 0x00000200L
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS10_MASK 0x00000400L
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS11_MASK 0x00000800L
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS12_MASK 0x00001000L
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS13_MASK 0x00002000L
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS14_MASK 0x00004000L
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS15_MASK 0x00008000L
+#define PCTL0_SLICE1_CFG_DS_ALLOW__DS16_MASK 0x00010000L
+//PCTL0_SLICE1_CFG_DS_ALLOW_IB
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS0__SHIFT 0x0
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS1__SHIFT 0x1
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS2__SHIFT 0x2
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS3__SHIFT 0x3
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS4__SHIFT 0x4
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS5__SHIFT 0x5
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS6__SHIFT 0x6
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS7__SHIFT 0x7
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS8__SHIFT 0x8
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS9__SHIFT 0x9
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS10__SHIFT 0xa
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS11__SHIFT 0xb
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS12__SHIFT 0xc
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS13__SHIFT 0xd
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS14__SHIFT 0xe
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS15__SHIFT 0xf
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS16__SHIFT 0x10
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS0_MASK 0x00000001L
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS1_MASK 0x00000002L
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS2_MASK 0x00000004L
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS3_MASK 0x00000008L
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS4_MASK 0x00000010L
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS5_MASK 0x00000020L
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS6_MASK 0x00000040L
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS7_MASK 0x00000080L
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS8_MASK 0x00000100L
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS9_MASK 0x00000200L
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS10_MASK 0x00000400L
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS11_MASK 0x00000800L
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS12_MASK 0x00001000L
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS13_MASK 0x00002000L
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS14_MASK 0x00004000L
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS15_MASK 0x00008000L
+#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS16_MASK 0x00010000L
+//PCTL0_SLICE2_CFG_DAGB_BUSY
+#define PCTL0_SLICE2_CFG_DAGB_BUSY__DB_LNCFG__SHIFT 0x0
+#define PCTL0_SLICE2_CFG_DAGB_BUSY__DB_LNCFG_MASK 0xFFFFFFFFL
+//PCTL0_SLICE2_CFG_DS_ALLOW
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS0__SHIFT 0x0
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS1__SHIFT 0x1
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS2__SHIFT 0x2
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS3__SHIFT 0x3
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS4__SHIFT 0x4
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS5__SHIFT 0x5
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS6__SHIFT 0x6
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS7__SHIFT 0x7
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS8__SHIFT 0x8
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS9__SHIFT 0x9
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS10__SHIFT 0xa
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS11__SHIFT 0xb
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS12__SHIFT 0xc
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS13__SHIFT 0xd
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS14__SHIFT 0xe
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS15__SHIFT 0xf
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS16__SHIFT 0x10
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS0_MASK 0x00000001L
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS1_MASK 0x00000002L
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS2_MASK 0x00000004L
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS3_MASK 0x00000008L
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS4_MASK 0x00000010L
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS5_MASK 0x00000020L
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS6_MASK 0x00000040L
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS7_MASK 0x00000080L
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS8_MASK 0x00000100L
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS9_MASK 0x00000200L
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS10_MASK 0x00000400L
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS11_MASK 0x00000800L
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS12_MASK 0x00001000L
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS13_MASK 0x00002000L
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS14_MASK 0x00004000L
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS15_MASK 0x00008000L
+#define PCTL0_SLICE2_CFG_DS_ALLOW__DS16_MASK 0x00010000L
+//PCTL0_SLICE2_CFG_DS_ALLOW_IB
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS0__SHIFT 0x0
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS1__SHIFT 0x1
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS2__SHIFT 0x2
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS3__SHIFT 0x3
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS4__SHIFT 0x4
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS5__SHIFT 0x5
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS6__SHIFT 0x6
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS7__SHIFT 0x7
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS8__SHIFT 0x8
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS9__SHIFT 0x9
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS10__SHIFT 0xa
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS11__SHIFT 0xb
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS12__SHIFT 0xc
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS13__SHIFT 0xd
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS14__SHIFT 0xe
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS15__SHIFT 0xf
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS16__SHIFT 0x10
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS0_MASK 0x00000001L
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS1_MASK 0x00000002L
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS2_MASK 0x00000004L
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS3_MASK 0x00000008L
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS4_MASK 0x00000010L
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS5_MASK 0x00000020L
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS6_MASK 0x00000040L
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS7_MASK 0x00000080L
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS8_MASK 0x00000100L
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS9_MASK 0x00000200L
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS10_MASK 0x00000400L
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS11_MASK 0x00000800L
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS12_MASK 0x00001000L
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS13_MASK 0x00002000L
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS14_MASK 0x00004000L
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS15_MASK 0x00008000L
+#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS16_MASK 0x00010000L
+//PCTL0_SLICE3_CFG_DAGB_BUSY
+#define PCTL0_SLICE3_CFG_DAGB_BUSY__DB_LNCFG__SHIFT 0x0
+#define PCTL0_SLICE3_CFG_DAGB_BUSY__DB_LNCFG_MASK 0xFFFFFFFFL
+//PCTL0_SLICE3_CFG_DS_ALLOW
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS0__SHIFT 0x0
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS1__SHIFT 0x1
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS2__SHIFT 0x2
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS3__SHIFT 0x3
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS4__SHIFT 0x4
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS5__SHIFT 0x5
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS6__SHIFT 0x6
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS7__SHIFT 0x7
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS8__SHIFT 0x8
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS9__SHIFT 0x9
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS10__SHIFT 0xa
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS11__SHIFT 0xb
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS12__SHIFT 0xc
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS13__SHIFT 0xd
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS14__SHIFT 0xe
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS15__SHIFT 0xf
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS16__SHIFT 0x10
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS0_MASK 0x00000001L
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS1_MASK 0x00000002L
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS2_MASK 0x00000004L
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS3_MASK 0x00000008L
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS4_MASK 0x00000010L
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS5_MASK 0x00000020L
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS6_MASK 0x00000040L
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS7_MASK 0x00000080L
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS8_MASK 0x00000100L
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS9_MASK 0x00000200L
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS10_MASK 0x00000400L
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS11_MASK 0x00000800L
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS12_MASK 0x00001000L
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS13_MASK 0x00002000L
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS14_MASK 0x00004000L
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS15_MASK 0x00008000L
+#define PCTL0_SLICE3_CFG_DS_ALLOW__DS16_MASK 0x00010000L
+//PCTL0_SLICE3_CFG_DS_ALLOW_IB
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS0__SHIFT 0x0
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS1__SHIFT 0x1
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS2__SHIFT 0x2
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS3__SHIFT 0x3
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS4__SHIFT 0x4
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS5__SHIFT 0x5
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS6__SHIFT 0x6
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS7__SHIFT 0x7
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS8__SHIFT 0x8
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS9__SHIFT 0x9
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS10__SHIFT 0xa
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS11__SHIFT 0xb
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS12__SHIFT 0xc
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS13__SHIFT 0xd
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS14__SHIFT 0xe
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS15__SHIFT 0xf
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS16__SHIFT 0x10
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS0_MASK 0x00000001L
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS1_MASK 0x00000002L
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS2_MASK 0x00000004L
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS3_MASK 0x00000008L
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS4_MASK 0x00000010L
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS5_MASK 0x00000020L
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS6_MASK 0x00000040L
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS7_MASK 0x00000080L
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS8_MASK 0x00000100L
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS9_MASK 0x00000200L
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS10_MASK 0x00000400L
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS11_MASK 0x00000800L
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS12_MASK 0x00001000L
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS13_MASK 0x00002000L
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS14_MASK 0x00004000L
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS15_MASK 0x00008000L
+#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS16_MASK 0x00010000L
+//PCTL0_SLICE4_CFG_DAGB_BUSY
+#define PCTL0_SLICE4_CFG_DAGB_BUSY__DB_LNCFG__SHIFT 0x0
+#define PCTL0_SLICE4_CFG_DAGB_BUSY__DB_LNCFG_MASK 0xFFFFFFFFL
+//PCTL0_SLICE4_CFG_DS_ALLOW
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS0__SHIFT 0x0
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS1__SHIFT 0x1
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS2__SHIFT 0x2
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS3__SHIFT 0x3
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS4__SHIFT 0x4
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS5__SHIFT 0x5
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS6__SHIFT 0x6
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS7__SHIFT 0x7
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS8__SHIFT 0x8
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS9__SHIFT 0x9
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS10__SHIFT 0xa
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS11__SHIFT 0xb
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS12__SHIFT 0xc
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS13__SHIFT 0xd
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS14__SHIFT 0xe
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS15__SHIFT 0xf
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS16__SHIFT 0x10
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS0_MASK 0x00000001L
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS1_MASK 0x00000002L
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS2_MASK 0x00000004L
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS3_MASK 0x00000008L
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS4_MASK 0x00000010L
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS5_MASK 0x00000020L
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS6_MASK 0x00000040L
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS7_MASK 0x00000080L
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS8_MASK 0x00000100L
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS9_MASK 0x00000200L
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS10_MASK 0x00000400L
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS11_MASK 0x00000800L
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS12_MASK 0x00001000L
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS13_MASK 0x00002000L
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS14_MASK 0x00004000L
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS15_MASK 0x00008000L
+#define PCTL0_SLICE4_CFG_DS_ALLOW__DS16_MASK 0x00010000L
+//PCTL0_SLICE4_CFG_DS_ALLOW_IB
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS0__SHIFT 0x0
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS1__SHIFT 0x1
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS2__SHIFT 0x2
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS3__SHIFT 0x3
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS4__SHIFT 0x4
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS5__SHIFT 0x5
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS6__SHIFT 0x6
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS7__SHIFT 0x7
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS8__SHIFT 0x8
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS9__SHIFT 0x9
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS10__SHIFT 0xa
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS11__SHIFT 0xb
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS12__SHIFT 0xc
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS13__SHIFT 0xd
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS14__SHIFT 0xe
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS15__SHIFT 0xf
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS16__SHIFT 0x10
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS0_MASK 0x00000001L
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS1_MASK 0x00000002L
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS2_MASK 0x00000004L
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS3_MASK 0x00000008L
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS4_MASK 0x00000010L
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS5_MASK 0x00000020L
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS6_MASK 0x00000040L
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS7_MASK 0x00000080L
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS8_MASK 0x00000100L
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS9_MASK 0x00000200L
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS10_MASK 0x00000400L
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS11_MASK 0x00000800L
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS12_MASK 0x00001000L
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS13_MASK 0x00002000L
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS14_MASK 0x00004000L
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS15_MASK 0x00008000L
+#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS16_MASK 0x00010000L
+//PCTL0_UTCL2_MISC
+#define PCTL0_UTCL2_MISC__CRITICAL_REGS_LOCK__SHIFT 0xb
+#define PCTL0_UTCL2_MISC__TILE_IDLE_THRESHOLD__SHIFT 0xc
+#define PCTL0_UTCL2_MISC__RENG_MEM_LS_ENABLE__SHIFT 0xf
+#define PCTL0_UTCL2_MISC__STCTRL_FORCE_PGFSM_CMD_DONE__SHIFT 0x10
+#define PCTL0_UTCL2_MISC__RENG_EXECUTE_ON_REG_UPDATE__SHIFT 0x11
+#define PCTL0_UTCL2_MISC__RD_TIMER_ENABLE__SHIFT 0x12
+#define PCTL0_UTCL2_MISC__CRITICAL_REGS_LOCK_MASK 0x00000800L
+#define PCTL0_UTCL2_MISC__TILE_IDLE_THRESHOLD_MASK 0x00007000L
+#define PCTL0_UTCL2_MISC__RENG_MEM_LS_ENABLE_MASK 0x00008000L
+#define PCTL0_UTCL2_MISC__STCTRL_FORCE_PGFSM_CMD_DONE_MASK 0x00010000L
+#define PCTL0_UTCL2_MISC__RENG_EXECUTE_ON_REG_UPDATE_MASK 0x00020000L
+#define PCTL0_UTCL2_MISC__RD_TIMER_ENABLE_MASK 0x00040000L
+//PCTL0_SLICE0_MISC
+#define PCTL0_SLICE0_MISC__CRITICAL_REGS_LOCK__SHIFT 0xa
+#define PCTL0_SLICE0_MISC__TILE_IDLE_THRESHOLD__SHIFT 0xb
+#define PCTL0_SLICE0_MISC__RENG_MEM_LS_ENABLE__SHIFT 0xe
+#define PCTL0_SLICE0_MISC__STCTRL_FORCE_PGFSM_CMD_DONE__SHIFT 0xf
+#define PCTL0_SLICE0_MISC__DEEPSLEEP_DISCSDP__SHIFT 0x10
+#define PCTL0_SLICE0_MISC__RENG_EXECUTE_ON_REG_UPDATE__SHIFT 0x11
+#define PCTL0_SLICE0_MISC__RD_TIMER_ENABLE__SHIFT 0x12
+#define PCTL0_SLICE0_MISC__CRITICAL_REGS_LOCK_MASK 0x00000400L
+#define PCTL0_SLICE0_MISC__TILE_IDLE_THRESHOLD_MASK 0x00003800L
+#define PCTL0_SLICE0_MISC__RENG_MEM_LS_ENABLE_MASK 0x00004000L
+#define PCTL0_SLICE0_MISC__STCTRL_FORCE_PGFSM_CMD_DONE_MASK 0x00008000L
+#define PCTL0_SLICE0_MISC__DEEPSLEEP_DISCSDP_MASK 0x00010000L
+#define PCTL0_SLICE0_MISC__RENG_EXECUTE_ON_REG_UPDATE_MASK 0x00020000L
+#define PCTL0_SLICE0_MISC__RD_TIMER_ENABLE_MASK 0x00040000L
+//PCTL0_SLICE1_MISC
+#define PCTL0_SLICE1_MISC__CRITICAL_REGS_LOCK__SHIFT 0xa
+#define PCTL0_SLICE1_MISC__TILE_IDLE_THRESHOLD__SHIFT 0xb
+#define PCTL0_SLICE1_MISC__RENG_MEM_LS_ENABLE__SHIFT 0xe
+#define PCTL0_SLICE1_MISC__STCTRL_FORCE_PGFSM_CMD_DONE__SHIFT 0xf
+#define PCTL0_SLICE1_MISC__DEEPSLEEP_DISCSDP__SHIFT 0x10
+#define PCTL0_SLICE1_MISC__RENG_EXECUTE_ON_REG_UPDATE__SHIFT 0x11
+#define PCTL0_SLICE1_MISC__RD_TIMER_ENABLE__SHIFT 0x12
+#define PCTL0_SLICE1_MISC__CRITICAL_REGS_LOCK_MASK 0x00000400L
+#define PCTL0_SLICE1_MISC__TILE_IDLE_THRESHOLD_MASK 0x00003800L
+#define PCTL0_SLICE1_MISC__RENG_MEM_LS_ENABLE_MASK 0x00004000L
+#define PCTL0_SLICE1_MISC__STCTRL_FORCE_PGFSM_CMD_DONE_MASK 0x00008000L
+#define PCTL0_SLICE1_MISC__DEEPSLEEP_DISCSDP_MASK 0x00010000L
+#define PCTL0_SLICE1_MISC__RENG_EXECUTE_ON_REG_UPDATE_MASK 0x00020000L
+#define PCTL0_SLICE1_MISC__RD_TIMER_ENABLE_MASK 0x00040000L
+//PCTL0_SLICE2_MISC
+#define PCTL0_SLICE2_MISC__CRITICAL_REGS_LOCK__SHIFT 0xa
+#define PCTL0_SLICE2_MISC__TILE_IDLE_THRESHOLD__SHIFT 0xb
+#define PCTL0_SLICE2_MISC__RENG_MEM_LS_ENABLE__SHIFT 0xe
+#define PCTL0_SLICE2_MISC__STCTRL_FORCE_PGFSM_CMD_DONE__SHIFT 0xf
+#define PCTL0_SLICE2_MISC__DEEPSLEEP_DISCSDP__SHIFT 0x10
+#define PCTL0_SLICE2_MISC__RENG_EXECUTE_ON_REG_UPDATE__SHIFT 0x11
+#define PCTL0_SLICE2_MISC__RD_TIMER_ENABLE__SHIFT 0x12
+#define PCTL0_SLICE2_MISC__CRITICAL_REGS_LOCK_MASK 0x00000400L
+#define PCTL0_SLICE2_MISC__TILE_IDLE_THRESHOLD_MASK 0x00003800L
+#define PCTL0_SLICE2_MISC__RENG_MEM_LS_ENABLE_MASK 0x00004000L
+#define PCTL0_SLICE2_MISC__STCTRL_FORCE_PGFSM_CMD_DONE_MASK 0x00008000L
+#define PCTL0_SLICE2_MISC__DEEPSLEEP_DISCSDP_MASK 0x00010000L
+#define PCTL0_SLICE2_MISC__RENG_EXECUTE_ON_REG_UPDATE_MASK 0x00020000L
+#define PCTL0_SLICE2_MISC__RD_TIMER_ENABLE_MASK 0x00040000L
+//PCTL0_SLICE3_MISC
+#define PCTL0_SLICE3_MISC__CRITICAL_REGS_LOCK__SHIFT 0xa
+#define PCTL0_SLICE3_MISC__TILE_IDLE_THRESHOLD__SHIFT 0xb
+#define PCTL0_SLICE3_MISC__RENG_MEM_LS_ENABLE__SHIFT 0xe
+#define PCTL0_SLICE3_MISC__STCTRL_FORCE_PGFSM_CMD_DONE__SHIFT 0xf
+#define PCTL0_SLICE3_MISC__DEEPSLEEP_DISCSDP__SHIFT 0x10
+#define PCTL0_SLICE3_MISC__RENG_EXECUTE_ON_REG_UPDATE__SHIFT 0x11
+#define PCTL0_SLICE3_MISC__RD_TIMER_ENABLE__SHIFT 0x12
+#define PCTL0_SLICE3_MISC__CRITICAL_REGS_LOCK_MASK 0x00000400L
+#define PCTL0_SLICE3_MISC__TILE_IDLE_THRESHOLD_MASK 0x00003800L
+#define PCTL0_SLICE3_MISC__RENG_MEM_LS_ENABLE_MASK 0x00004000L
+#define PCTL0_SLICE3_MISC__STCTRL_FORCE_PGFSM_CMD_DONE_MASK 0x00008000L
+#define PCTL0_SLICE3_MISC__DEEPSLEEP_DISCSDP_MASK 0x00010000L
+#define PCTL0_SLICE3_MISC__RENG_EXECUTE_ON_REG_UPDATE_MASK 0x00020000L
+#define PCTL0_SLICE3_MISC__RD_TIMER_ENABLE_MASK 0x00040000L
+//PCTL0_SLICE4_MISC
+#define PCTL0_SLICE4_MISC__CRITICAL_REGS_LOCK__SHIFT 0xa
+#define PCTL0_SLICE4_MISC__TILE_IDLE_THRESHOLD__SHIFT 0xb
+#define PCTL0_SLICE4_MISC__RENG_MEM_LS_ENABLE__SHIFT 0xe
+#define PCTL0_SLICE4_MISC__STCTRL_FORCE_PGFSM_CMD_DONE__SHIFT 0xf
+#define PCTL0_SLICE4_MISC__DEEPSLEEP_DISCSDP__SHIFT 0x10
+#define PCTL0_SLICE4_MISC__RENG_EXECUTE_ON_REG_UPDATE__SHIFT 0x11
+#define PCTL0_SLICE4_MISC__RD_TIMER_ENABLE__SHIFT 0x12
+#define PCTL0_SLICE4_MISC__CRITICAL_REGS_LOCK_MASK 0x00000400L
+#define PCTL0_SLICE4_MISC__TILE_IDLE_THRESHOLD_MASK 0x00003800L
+#define PCTL0_SLICE4_MISC__RENG_MEM_LS_ENABLE_MASK 0x00004000L
+#define PCTL0_SLICE4_MISC__STCTRL_FORCE_PGFSM_CMD_DONE_MASK 0x00008000L
+#define PCTL0_SLICE4_MISC__DEEPSLEEP_DISCSDP_MASK 0x00010000L
+#define PCTL0_SLICE4_MISC__RENG_EXECUTE_ON_REG_UPDATE_MASK 0x00020000L
+#define PCTL0_SLICE4_MISC__RD_TIMER_ENABLE_MASK 0x00040000L
+//PCTL0_UTCL2_RENG_EXECUTE
+#define PCTL0_UTCL2_RENG_EXECUTE__RENG_EXECUTE_NOW__SHIFT 0x0
+#define PCTL0_UTCL2_RENG_EXECUTE__RENG_EXECUTE_NOW_MODE__SHIFT 0x1
+#define PCTL0_UTCL2_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR__SHIFT 0x2
+#define PCTL0_UTCL2_RENG_EXECUTE__RENG_EXECUTE_END_PTR__SHIFT 0xd
+#define PCTL0_UTCL2_RENG_EXECUTE__RENG_EXECUTE_NOW_MASK 0x00000001L
+#define PCTL0_UTCL2_RENG_EXECUTE__RENG_EXECUTE_NOW_MODE_MASK 0x00000002L
+#define PCTL0_UTCL2_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR_MASK 0x00001FFCL
+#define PCTL0_UTCL2_RENG_EXECUTE__RENG_EXECUTE_END_PTR_MASK 0x00FFE000L
+//PCTL0_SLICE0_RENG_EXECUTE
+#define PCTL0_SLICE0_RENG_EXECUTE__RENG_EXECUTE_NOW__SHIFT 0x0
+#define PCTL0_SLICE0_RENG_EXECUTE__RENG_EXECUTE_NOW_MODE__SHIFT 0x1
+#define PCTL0_SLICE0_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR__SHIFT 0x2
+#define PCTL0_SLICE0_RENG_EXECUTE__RENG_EXECUTE_END_PTR__SHIFT 0xc
+#define PCTL0_SLICE0_RENG_EXECUTE__RENG_EXECUTE_NOW_MASK 0x00000001L
+#define PCTL0_SLICE0_RENG_EXECUTE__RENG_EXECUTE_NOW_MODE_MASK 0x00000002L
+#define PCTL0_SLICE0_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR_MASK 0x00000FFCL
+#define PCTL0_SLICE0_RENG_EXECUTE__RENG_EXECUTE_END_PTR_MASK 0x003FF000L
+//PCTL0_SLICE1_RENG_EXECUTE
+#define PCTL0_SLICE1_RENG_EXECUTE__RENG_EXECUTE_NOW__SHIFT 0x0
+#define PCTL0_SLICE1_RENG_EXECUTE__RENG_EXECUTE_NOW_MODE__SHIFT 0x1
+#define PCTL0_SLICE1_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR__SHIFT 0x2
+#define PCTL0_SLICE1_RENG_EXECUTE__RENG_EXECUTE_END_PTR__SHIFT 0xc
+#define PCTL0_SLICE1_RENG_EXECUTE__RENG_EXECUTE_NOW_MASK 0x00000001L
+#define PCTL0_SLICE1_RENG_EXECUTE__RENG_EXECUTE_NOW_MODE_MASK 0x00000002L
+#define PCTL0_SLICE1_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR_MASK 0x00000FFCL
+#define PCTL0_SLICE1_RENG_EXECUTE__RENG_EXECUTE_END_PTR_MASK 0x003FF000L
+//PCTL0_SLICE2_RENG_EXECUTE
+#define PCTL0_SLICE2_RENG_EXECUTE__RENG_EXECUTE_NOW__SHIFT 0x0
+#define PCTL0_SLICE2_RENG_EXECUTE__RENG_EXECUTE_NOW_MODE__SHIFT 0x1
+#define PCTL0_SLICE2_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR__SHIFT 0x2
+#define PCTL0_SLICE2_RENG_EXECUTE__RENG_EXECUTE_END_PTR__SHIFT 0xc
+#define PCTL0_SLICE2_RENG_EXECUTE__RENG_EXECUTE_NOW_MASK 0x00000001L
+#define PCTL0_SLICE2_RENG_EXECUTE__RENG_EXECUTE_NOW_MODE_MASK 0x00000002L
+#define PCTL0_SLICE2_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR_MASK 0x00000FFCL
+#define PCTL0_SLICE2_RENG_EXECUTE__RENG_EXECUTE_END_PTR_MASK 0x003FF000L
+//PCTL0_SLICE3_RENG_EXECUTE
+#define PCTL0_SLICE3_RENG_EXECUTE__RENG_EXECUTE_NOW__SHIFT 0x0
+#define PCTL0_SLICE3_RENG_EXECUTE__RENG_EXECUTE_NOW_MODE__SHIFT 0x1
+#define PCTL0_SLICE3_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR__SHIFT 0x2
+#define PCTL0_SLICE3_RENG_EXECUTE__RENG_EXECUTE_END_PTR__SHIFT 0xc
+#define PCTL0_SLICE3_RENG_EXECUTE__RENG_EXECUTE_NOW_MASK 0x00000001L
+#define PCTL0_SLICE3_RENG_EXECUTE__RENG_EXECUTE_NOW_MODE_MASK 0x00000002L
+#define PCTL0_SLICE3_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR_MASK 0x00000FFCL
+#define PCTL0_SLICE3_RENG_EXECUTE__RENG_EXECUTE_END_PTR_MASK 0x003FF000L
+//PCTL0_SLICE4_RENG_EXECUTE
+#define PCTL0_SLICE4_RENG_EXECUTE__RENG_EXECUTE_NOW__SHIFT 0x0
+#define PCTL0_SLICE4_RENG_EXECUTE__RENG_EXECUTE_NOW_MODE__SHIFT 0x1
+#define PCTL0_SLICE4_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR__SHIFT 0x2
+#define PCTL0_SLICE4_RENG_EXECUTE__RENG_EXECUTE_END_PTR__SHIFT 0xc
+#define PCTL0_SLICE4_RENG_EXECUTE__RENG_EXECUTE_NOW_MASK 0x00000001L
+#define PCTL0_SLICE4_RENG_EXECUTE__RENG_EXECUTE_NOW_MODE_MASK 0x00000002L
+#define PCTL0_SLICE4_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR_MASK 0x00000FFCL
+#define PCTL0_SLICE4_RENG_EXECUTE__RENG_EXECUTE_END_PTR_MASK 0x003FF000L
+//PCTL0_UTCL2_RENG_RAM_INDEX
+#define PCTL0_UTCL2_RENG_RAM_INDEX__RENG_RAM_INDEX__SHIFT 0x0
+#define PCTL0_UTCL2_RENG_RAM_INDEX__RENG_RAM_INDEX_MASK 0x000007FFL
+//PCTL0_UTCL2_RENG_RAM_DATA
+#define PCTL0_UTCL2_RENG_RAM_DATA__RENG_RAM_DATA__SHIFT 0x0
+#define PCTL0_UTCL2_RENG_RAM_DATA__RENG_RAM_DATA_MASK 0xFFFFFFFFL
+//PCTL0_SLICE0_RENG_RAM_INDEX
+#define PCTL0_SLICE0_RENG_RAM_INDEX__RENG_RAM_INDEX__SHIFT 0x0
+#define PCTL0_SLICE0_RENG_RAM_INDEX__RENG_RAM_INDEX_MASK 0x000003FFL
+//PCTL0_SLICE0_RENG_RAM_DATA
+#define PCTL0_SLICE0_RENG_RAM_DATA__RENG_RAM_DATA__SHIFT 0x0
+#define PCTL0_SLICE0_RENG_RAM_DATA__RENG_RAM_DATA_MASK 0xFFFFFFFFL
+//PCTL0_SLICE1_RENG_RAM_INDEX
+#define PCTL0_SLICE1_RENG_RAM_INDEX__RENG_RAM_INDEX__SHIFT 0x0
+#define PCTL0_SLICE1_RENG_RAM_INDEX__RENG_RAM_INDEX_MASK 0x000003FFL
+//PCTL0_SLICE1_RENG_RAM_DATA
+#define PCTL0_SLICE1_RENG_RAM_DATA__RENG_RAM_DATA__SHIFT 0x0
+#define PCTL0_SLICE1_RENG_RAM_DATA__RENG_RAM_DATA_MASK 0xFFFFFFFFL
+//PCTL0_SLICE2_RENG_RAM_INDEX
+#define PCTL0_SLICE2_RENG_RAM_INDEX__RENG_RAM_INDEX__SHIFT 0x0
+#define PCTL0_SLICE2_RENG_RAM_INDEX__RENG_RAM_INDEX_MASK 0x000003FFL
+//PCTL0_SLICE2_RENG_RAM_DATA
+#define PCTL0_SLICE2_RENG_RAM_DATA__RENG_RAM_DATA__SHIFT 0x0
+#define PCTL0_SLICE2_RENG_RAM_DATA__RENG_RAM_DATA_MASK 0xFFFFFFFFL
+//PCTL0_SLICE3_RENG_RAM_INDEX
+#define PCTL0_SLICE3_RENG_RAM_INDEX__RENG_RAM_INDEX__SHIFT 0x0
+#define PCTL0_SLICE3_RENG_RAM_INDEX__RENG_RAM_INDEX_MASK 0x000003FFL
+//PCTL0_SLICE3_RENG_RAM_DATA
+#define PCTL0_SLICE3_RENG_RAM_DATA__RENG_RAM_DATA__SHIFT 0x0
+#define PCTL0_SLICE3_RENG_RAM_DATA__RENG_RAM_DATA_MASK 0xFFFFFFFFL
+//PCTL0_SLICE4_RENG_RAM_INDEX
+#define PCTL0_SLICE4_RENG_RAM_INDEX__RENG_RAM_INDEX__SHIFT 0x0
+#define PCTL0_SLICE4_RENG_RAM_INDEX__RENG_RAM_INDEX_MASK 0x000003FFL
+//PCTL0_SLICE4_RENG_RAM_DATA
+#define PCTL0_SLICE4_RENG_RAM_DATA__RENG_RAM_DATA__SHIFT 0x0
+#define PCTL0_SLICE4_RENG_RAM_DATA__RENG_RAM_DATA_MASK 0xFFFFFFFFL
+//PCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE0
+#define PCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE1
+#define PCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE2
+#define PCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE3
+#define PCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE4
+#define PCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL0_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET0
+#define PCTL0_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL0__SHIFT 0x0
+#define PCTL0_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL1__SHIFT 0x10
+#define PCTL0_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL0_MASK 0x0000FFFFL
+#define PCTL0_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL1_MASK 0xFFFF0000L
+//PCTL0_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET1
+#define PCTL0_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL0__SHIFT 0x0
+#define PCTL0_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL1__SHIFT 0x10
+#define PCTL0_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL0_MASK 0x0000FFFFL
+#define PCTL0_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL1_MASK 0xFFFF0000L
+//PCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE0
+#define PCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE1
+#define PCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE2
+#define PCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE3
+#define PCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE4
+#define PCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL0_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET0
+#define PCTL0_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL0__SHIFT 0x0
+#define PCTL0_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL1__SHIFT 0x10
+#define PCTL0_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL0_MASK 0x0000FFFFL
+#define PCTL0_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL1_MASK 0xFFFF0000L
+//PCTL0_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET1
+#define PCTL0_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL0__SHIFT 0x0
+#define PCTL0_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL1__SHIFT 0x10
+#define PCTL0_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL0_MASK 0x0000FFFFL
+#define PCTL0_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL1_MASK 0xFFFF0000L
+//PCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE0
+#define PCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE1
+#define PCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE2
+#define PCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE3
+#define PCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE4
+#define PCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL0_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET0
+#define PCTL0_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL0__SHIFT 0x0
+#define PCTL0_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL1__SHIFT 0x10
+#define PCTL0_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL0_MASK 0x0000FFFFL
+#define PCTL0_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL1_MASK 0xFFFF0000L
+//PCTL0_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET1
+#define PCTL0_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL0__SHIFT 0x0
+#define PCTL0_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL1__SHIFT 0x10
+#define PCTL0_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL0_MASK 0x0000FFFFL
+#define PCTL0_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL1_MASK 0xFFFF0000L
+//PCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE0
+#define PCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE1
+#define PCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE2
+#define PCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE3
+#define PCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE4
+#define PCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL0_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET0
+#define PCTL0_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL0__SHIFT 0x0
+#define PCTL0_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL1__SHIFT 0x10
+#define PCTL0_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL0_MASK 0x0000FFFFL
+#define PCTL0_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL1_MASK 0xFFFF0000L
+//PCTL0_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET1
+#define PCTL0_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL0__SHIFT 0x0
+#define PCTL0_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL1__SHIFT 0x10
+#define PCTL0_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL0_MASK 0x0000FFFFL
+#define PCTL0_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL1_MASK 0xFFFF0000L
+//PCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE0
+#define PCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE1
+#define PCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE2
+#define PCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE3
+#define PCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE4
+#define PCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL0_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET0
+#define PCTL0_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL0__SHIFT 0x0
+#define PCTL0_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL1__SHIFT 0x10
+#define PCTL0_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL0_MASK 0x0000FFFFL
+#define PCTL0_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL1_MASK 0xFFFF0000L
+//PCTL0_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET1
+#define PCTL0_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL0__SHIFT 0x0
+#define PCTL0_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL1__SHIFT 0x10
+#define PCTL0_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL0_MASK 0x0000FFFFL
+#define PCTL0_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL1_MASK 0xFFFF0000L
+//PCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE0
+#define PCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE1
+#define PCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE2
+#define PCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE3
+#define PCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE4
+#define PCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL0_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET0
+#define PCTL0_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL0__SHIFT 0x0
+#define PCTL0_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL1__SHIFT 0x10
+#define PCTL0_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL0_MASK 0x0000FFFFL
+#define PCTL0_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL1_MASK 0xFFFF0000L
+//PCTL0_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET1
+#define PCTL0_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL0__SHIFT 0x0
+#define PCTL0_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL1__SHIFT 0x10
+#define PCTL0_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL0_MASK 0x0000FFFFL
+#define PCTL0_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL1_MASK 0xFFFF0000L
+
+
+// addressBlock: mmhub_l1tlb_vml1dec
+//VML1_0_MC_VM_MX_L1_TLB0_STATUS
+#define VML1_0_MC_VM_MX_L1_TLB0_STATUS__BUSY__SHIFT 0x0
+#define VML1_0_MC_VM_MX_L1_TLB0_STATUS__FOUND_PARITY_ERRORS__SHIFT 0x1
+#define VML1_0_MC_VM_MX_L1_TLB0_STATUS__BUSY_MASK 0x00000001L
+#define VML1_0_MC_VM_MX_L1_TLB0_STATUS__FOUND_PARITY_ERRORS_MASK 0x00000002L
+//VML1_0_MC_VM_MX_L1_TLB1_STATUS
+#define VML1_0_MC_VM_MX_L1_TLB1_STATUS__BUSY__SHIFT 0x0
+#define VML1_0_MC_VM_MX_L1_TLB1_STATUS__FOUND_PARITY_ERRORS__SHIFT 0x1
+#define VML1_0_MC_VM_MX_L1_TLB1_STATUS__BUSY_MASK 0x00000001L
+#define VML1_0_MC_VM_MX_L1_TLB1_STATUS__FOUND_PARITY_ERRORS_MASK 0x00000002L
+//VML1_0_MC_VM_MX_L1_TLB2_STATUS
+#define VML1_0_MC_VM_MX_L1_TLB2_STATUS__BUSY__SHIFT 0x0
+#define VML1_0_MC_VM_MX_L1_TLB2_STATUS__FOUND_PARITY_ERRORS__SHIFT 0x1
+#define VML1_0_MC_VM_MX_L1_TLB2_STATUS__BUSY_MASK 0x00000001L
+#define VML1_0_MC_VM_MX_L1_TLB2_STATUS__FOUND_PARITY_ERRORS_MASK 0x00000002L
+//VML1_0_MC_VM_MX_L1_TLB3_STATUS
+#define VML1_0_MC_VM_MX_L1_TLB3_STATUS__BUSY__SHIFT 0x0
+#define VML1_0_MC_VM_MX_L1_TLB3_STATUS__FOUND_PARITY_ERRORS__SHIFT 0x1
+#define VML1_0_MC_VM_MX_L1_TLB3_STATUS__BUSY_MASK 0x00000001L
+#define VML1_0_MC_VM_MX_L1_TLB3_STATUS__FOUND_PARITY_ERRORS_MASK 0x00000002L
+//VML1_0_MC_VM_MX_L1_TLB4_STATUS
+#define VML1_0_MC_VM_MX_L1_TLB4_STATUS__BUSY__SHIFT 0x0
+#define VML1_0_MC_VM_MX_L1_TLB4_STATUS__FOUND_PARITY_ERRORS__SHIFT 0x1
+#define VML1_0_MC_VM_MX_L1_TLB4_STATUS__BUSY_MASK 0x00000001L
+#define VML1_0_MC_VM_MX_L1_TLB4_STATUS__FOUND_PARITY_ERRORS_MASK 0x00000002L
+//VML1_0_MC_VM_MX_L1_TLB5_STATUS
+#define VML1_0_MC_VM_MX_L1_TLB5_STATUS__BUSY__SHIFT 0x0
+#define VML1_0_MC_VM_MX_L1_TLB5_STATUS__FOUND_PARITY_ERRORS__SHIFT 0x1
+#define VML1_0_MC_VM_MX_L1_TLB5_STATUS__BUSY_MASK 0x00000001L
+#define VML1_0_MC_VM_MX_L1_TLB5_STATUS__FOUND_PARITY_ERRORS_MASK 0x00000002L
+//VML1_0_MC_VM_MX_L1_TLB6_STATUS
+#define VML1_0_MC_VM_MX_L1_TLB6_STATUS__BUSY__SHIFT 0x0
+#define VML1_0_MC_VM_MX_L1_TLB6_STATUS__FOUND_PARITY_ERRORS__SHIFT 0x1
+#define VML1_0_MC_VM_MX_L1_TLB6_STATUS__BUSY_MASK 0x00000001L
+#define VML1_0_MC_VM_MX_L1_TLB6_STATUS__FOUND_PARITY_ERRORS_MASK 0x00000002L
+//VML1_0_MC_VM_MX_L1_TLB7_STATUS
+#define VML1_0_MC_VM_MX_L1_TLB7_STATUS__BUSY__SHIFT 0x0
+#define VML1_0_MC_VM_MX_L1_TLB7_STATUS__FOUND_PARITY_ERRORS__SHIFT 0x1
+#define VML1_0_MC_VM_MX_L1_TLB7_STATUS__BUSY_MASK 0x00000001L
+#define VML1_0_MC_VM_MX_L1_TLB7_STATUS__FOUND_PARITY_ERRORS_MASK 0x00000002L
+
+
+// addressBlock: mmhub_l1tlb_vml1pldec
+//VML1PL0_MC_VM_MX_L1_PERFCOUNTER0_CFG
+#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//VML1PL0_MC_VM_MX_L1_PERFCOUNTER1_CFG
+#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//VML1PL0_MC_VM_MX_L1_PERFCOUNTER2_CFG
+#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0
+#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8
+#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18
+#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c
+#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d
+#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL
+#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L
+#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L
+#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L
+//VML1PL0_MC_VM_MX_L1_PERFCOUNTER3_CFG
+#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER3_CFG__PERF_SEL__SHIFT 0x0
+#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER3_CFG__PERF_SEL_END__SHIFT 0x8
+#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER3_CFG__PERF_MODE__SHIFT 0x18
+#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER3_CFG__ENABLE__SHIFT 0x1c
+#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER3_CFG__CLEAR__SHIFT 0x1d
+#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER3_CFG__PERF_SEL_MASK 0x000000FFL
+#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER3_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER3_CFG__PERF_MODE_MASK 0x0F000000L
+#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER3_CFG__ENABLE_MASK 0x10000000L
+#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER3_CFG__CLEAR_MASK 0x20000000L
+//VML1PL0_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL
+#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+
+
+// addressBlock: mmhub_l1tlb_vml1prdec
+//VML1PR0_MC_VM_MX_L1_PERFCOUNTER_LO
+#define VML1PR0_MC_VM_MX_L1_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define VML1PR0_MC_VM_MX_L1_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//VML1PR0_MC_VM_MX_L1_PERFCOUNTER_HI
+#define VML1PR0_MC_VM_MX_L1_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define VML1PR0_MC_VM_MX_L1_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define VML1PR0_MC_VM_MX_L1_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define VML1PR0_MC_VM_MX_L1_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+
+
+// addressBlock: mmhub_utcl2_atcl2dec
+//ATCL2_0_ATC_L2_CNTL
+#define ATCL2_0_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READ_REQUESTS__SHIFT 0x0
+#define ATCL2_0_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITE_REQUESTS__SHIFT 0x3
+#define ATCL2_0_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READS_DEPENDS_ON_ADDR_MOD__SHIFT 0x6
+#define ATCL2_0_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITES_DEPENDS_ON_ADDR_MOD__SHIFT 0x7
+#define ATCL2_0_ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_READ_REQUESTS__SHIFT 0x8
+#define ATCL2_0_ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_WRITE_REQUESTS__SHIFT 0xb
+#define ATCL2_0_ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_READS_DEPENDS_ON_ADDR_MOD__SHIFT 0xe
+#define ATCL2_0_ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_WRITES_DEPENDS_ON_ADDR_MOD__SHIFT 0xf
+#define ATCL2_0_ATC_L2_CNTL__CACHE_INVALIDATE_MODE__SHIFT 0x10
+#define ATCL2_0_ATC_L2_CNTL__ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY__SHIFT 0x13
+#define ATCL2_0_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READ_REQUESTS_MASK 0x00000003L
+#define ATCL2_0_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITE_REQUESTS_MASK 0x00000018L
+#define ATCL2_0_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READS_DEPENDS_ON_ADDR_MOD_MASK 0x00000040L
+#define ATCL2_0_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITES_DEPENDS_ON_ADDR_MOD_MASK 0x00000080L
+#define ATCL2_0_ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_READ_REQUESTS_MASK 0x00000300L
+#define ATCL2_0_ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_WRITE_REQUESTS_MASK 0x00001800L
+#define ATCL2_0_ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_READS_DEPENDS_ON_ADDR_MOD_MASK 0x00004000L
+#define ATCL2_0_ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_WRITES_DEPENDS_ON_ADDR_MOD_MASK 0x00008000L
+#define ATCL2_0_ATC_L2_CNTL__CACHE_INVALIDATE_MODE_MASK 0x00070000L
+#define ATCL2_0_ATC_L2_CNTL__ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY_MASK 0x00080000L
+//ATCL2_0_ATC_L2_CNTL2
+#define ATCL2_0_ATC_L2_CNTL2__BANK_SELECT__SHIFT 0x0
+#define ATCL2_0_ATC_L2_CNTL2__L2_CACHE_UPDATE_MODE__SHIFT 0x6
+#define ATCL2_0_ATC_L2_CNTL2__ENABLE_L2_CACHE_LRU_UPDATE_BY_WRITE__SHIFT 0x8
+#define ATCL2_0_ATC_L2_CNTL2__L2_CACHE_SWAP_TAG_INDEX_LSBS__SHIFT 0x9
+#define ATCL2_0_ATC_L2_CNTL2__L2_CACHE_VMID_MODE__SHIFT 0xc
+#define ATCL2_0_ATC_L2_CNTL2__L2_CACHE_UPDATE_WILDCARD_REFERENCE_VALUE__SHIFT 0xf
+#define ATCL2_0_ATC_L2_CNTL2__L2_BIGK_FRAGMENT_SIZE__SHIFT 0x15
+#define ATCL2_0_ATC_L2_CNTL2__L2_4K_BIGK_SWAP_ENABLE__SHIFT 0x1b
+#define ATCL2_0_ATC_L2_CNTL2__BANK_SELECT_MASK 0x0000003FL
+#define ATCL2_0_ATC_L2_CNTL2__L2_CACHE_UPDATE_MODE_MASK 0x000000C0L
+#define ATCL2_0_ATC_L2_CNTL2__ENABLE_L2_CACHE_LRU_UPDATE_BY_WRITE_MASK 0x00000100L
+#define ATCL2_0_ATC_L2_CNTL2__L2_CACHE_SWAP_TAG_INDEX_LSBS_MASK 0x00000E00L
+#define ATCL2_0_ATC_L2_CNTL2__L2_CACHE_VMID_MODE_MASK 0x00007000L
+#define ATCL2_0_ATC_L2_CNTL2__L2_CACHE_UPDATE_WILDCARD_REFERENCE_VALUE_MASK 0x001F8000L
+#define ATCL2_0_ATC_L2_CNTL2__L2_BIGK_FRAGMENT_SIZE_MASK 0x07E00000L
+#define ATCL2_0_ATC_L2_CNTL2__L2_4K_BIGK_SWAP_ENABLE_MASK 0x08000000L
+//ATCL2_0_ATC_L2_CACHE_DATA0
+#define ATCL2_0_ATC_L2_CACHE_DATA0__DATA_REGISTER_VALID__SHIFT 0x0
+#define ATCL2_0_ATC_L2_CACHE_DATA0__CACHE_ENTRY_VALID__SHIFT 0x1
+#define ATCL2_0_ATC_L2_CACHE_DATA0__CACHED_ATTRIBUTES__SHIFT 0x2
+#define ATCL2_0_ATC_L2_CACHE_DATA0__VIRTUAL_PAGE_ADDRESS_HIGH__SHIFT 0x17
+#define ATCL2_0_ATC_L2_CACHE_DATA0__DATA_REGISTER_VALID_MASK 0x00000001L
+#define ATCL2_0_ATC_L2_CACHE_DATA0__CACHE_ENTRY_VALID_MASK 0x00000002L
+#define ATCL2_0_ATC_L2_CACHE_DATA0__CACHED_ATTRIBUTES_MASK 0x007FFFFCL
+#define ATCL2_0_ATC_L2_CACHE_DATA0__VIRTUAL_PAGE_ADDRESS_HIGH_MASK 0x07800000L
+//ATCL2_0_ATC_L2_CACHE_DATA1
+#define ATCL2_0_ATC_L2_CACHE_DATA1__VIRTUAL_PAGE_ADDRESS_LOW__SHIFT 0x0
+#define ATCL2_0_ATC_L2_CACHE_DATA1__VIRTUAL_PAGE_ADDRESS_LOW_MASK 0xFFFFFFFFL
+//ATCL2_0_ATC_L2_CACHE_DATA2
+#define ATCL2_0_ATC_L2_CACHE_DATA2__PHYSICAL_PAGE_ADDRESS__SHIFT 0x0
+#define ATCL2_0_ATC_L2_CACHE_DATA2__PHYSICAL_PAGE_ADDRESS_MASK 0xFFFFFFFFL
+//ATCL2_0_ATC_L2_CNTL3
+#define ATCL2_0_ATC_L2_CNTL3__DELAY_SEND_INVALIDATION_REQUEST__SHIFT 0x0
+#define ATCL2_0_ATC_L2_CNTL3__ATS_REQUEST_CREDIT_MINUS1__SHIFT 0x3
+#define ATCL2_0_ATC_L2_CNTL3__COMPCLKREQ_OFF_HYSTERESIS__SHIFT 0x9
+#define ATCL2_0_ATC_L2_CNTL3__DELAY_SEND_INVALIDATION_REQUEST_MASK 0x00000007L
+#define ATCL2_0_ATC_L2_CNTL3__ATS_REQUEST_CREDIT_MINUS1_MASK 0x000001F8L
+#define ATCL2_0_ATC_L2_CNTL3__COMPCLKREQ_OFF_HYSTERESIS_MASK 0x00000E00L
+//ATCL2_0_ATC_L2_STATUS
+#define ATCL2_0_ATC_L2_STATUS__BUSY__SHIFT 0x0
+#define ATCL2_0_ATC_L2_STATUS__PARITY_ERROR_INFO__SHIFT 0x1
+#define ATCL2_0_ATC_L2_STATUS__BUSY_MASK 0x00000001L
+#define ATCL2_0_ATC_L2_STATUS__PARITY_ERROR_INFO_MASK 0x7FFFFFFEL
+//ATCL2_0_ATC_L2_STATUS2
+#define ATCL2_0_ATC_L2_STATUS2__IFIFO_NON_FATAL_PARITY_ERROR_INFO__SHIFT 0x0
+#define ATCL2_0_ATC_L2_STATUS2__IFIFO_FATAL_PARITY_ERROR_INFO__SHIFT 0x8
+#define ATCL2_0_ATC_L2_STATUS2__IFIFO_NON_FATAL_PARITY_ERROR_INFO_MASK 0x000000FFL
+#define ATCL2_0_ATC_L2_STATUS2__IFIFO_FATAL_PARITY_ERROR_INFO_MASK 0x0000FF00L
+//ATCL2_0_ATC_L2_STATUS3
+#define ATCL2_0_ATC_L2_STATUS3__BUSY__SHIFT 0x0
+#define ATCL2_0_ATC_L2_STATUS3__PARITY_ERROR_INFO__SHIFT 0x1
+#define ATCL2_0_ATC_L2_STATUS3__BUSY_MASK 0x00000001L
+#define ATCL2_0_ATC_L2_STATUS3__PARITY_ERROR_INFO_MASK 0x7FFFFFFEL
+//ATCL2_0_ATC_L2_MISC_CG
+#define ATCL2_0_ATC_L2_MISC_CG__OFFDLY__SHIFT 0x6
+#define ATCL2_0_ATC_L2_MISC_CG__ENABLE__SHIFT 0x12
+#define ATCL2_0_ATC_L2_MISC_CG__MEM_LS_ENABLE__SHIFT 0x13
+#define ATCL2_0_ATC_L2_MISC_CG__OFFDLY_MASK 0x00000FC0L
+#define ATCL2_0_ATC_L2_MISC_CG__ENABLE_MASK 0x00040000L
+#define ATCL2_0_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK 0x00080000L
+//ATCL2_0_ATC_L2_MEM_POWER_LS
+#define ATCL2_0_ATC_L2_MEM_POWER_LS__LS_SETUP__SHIFT 0x0
+#define ATCL2_0_ATC_L2_MEM_POWER_LS__LS_HOLD__SHIFT 0x6
+#define ATCL2_0_ATC_L2_MEM_POWER_LS__LS_SETUP_MASK 0x0000003FL
+#define ATCL2_0_ATC_L2_MEM_POWER_LS__LS_HOLD_MASK 0x00000FC0L
+//ATCL2_0_ATC_L2_CGTT_CLK_CTRL
+#define ATCL2_0_ATC_L2_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define ATCL2_0_ATC_L2_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define ATCL2_0_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE__SHIFT 0xf
+#define ATCL2_0_ATC_L2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x10
+#define ATCL2_0_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x18
+#define ATCL2_0_ATC_L2_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define ATCL2_0_ATC_L2_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define ATCL2_0_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L
+#define ATCL2_0_ATC_L2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00FF0000L
+#define ATCL2_0_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0xFF000000L
+//ATCL2_0_ATC_L2_CACHE_4K_DSM_INDEX
+#define ATCL2_0_ATC_L2_CACHE_4K_DSM_INDEX__INDEX__SHIFT 0x0
+#define ATCL2_0_ATC_L2_CACHE_4K_DSM_INDEX__INDEX_MASK 0x000000FFL
+//ATCL2_0_ATC_L2_CACHE_2M_DSM_INDEX
+#define ATCL2_0_ATC_L2_CACHE_2M_DSM_INDEX__INDEX__SHIFT 0x0
+#define ATCL2_0_ATC_L2_CACHE_2M_DSM_INDEX__INDEX_MASK 0x000000FFL
+//ATCL2_0_ATC_L2_CACHE_4K_DSM_CNTL
+#define ATCL2_0_ATC_L2_CACHE_4K_DSM_CNTL__INJECT_DELAY__SHIFT 0x0
+#define ATCL2_0_ATC_L2_CACHE_4K_DSM_CNTL__DSM_IRRITATOR_DATA__SHIFT 0x6
+#define ATCL2_0_ATC_L2_CACHE_4K_DSM_CNTL__ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define ATCL2_0_ATC_L2_CACHE_4K_DSM_CNTL__ENABLE_ERROR_INJECT__SHIFT 0x9
+#define ATCL2_0_ATC_L2_CACHE_4K_DSM_CNTL__SELECT_INJECT_DELAY__SHIFT 0xb
+#define ATCL2_0_ATC_L2_CACHE_4K_DSM_CNTL__WRITE_COUNTERS__SHIFT 0xc
+#define ATCL2_0_ATC_L2_CACHE_4K_DSM_CNTL__SEC_COUNT__SHIFT 0xd
+#define ATCL2_0_ATC_L2_CACHE_4K_DSM_CNTL__DED_COUNT__SHIFT 0xf
+#define ATCL2_0_ATC_L2_CACHE_4K_DSM_CNTL__TEST_FUE__SHIFT 0x11
+#define ATCL2_0_ATC_L2_CACHE_4K_DSM_CNTL__INJECT_DELAY_MASK 0x0000003FL
+#define ATCL2_0_ATC_L2_CACHE_4K_DSM_CNTL__DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define ATCL2_0_ATC_L2_CACHE_4K_DSM_CNTL__ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define ATCL2_0_ATC_L2_CACHE_4K_DSM_CNTL__ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define ATCL2_0_ATC_L2_CACHE_4K_DSM_CNTL__SELECT_INJECT_DELAY_MASK 0x00000800L
+#define ATCL2_0_ATC_L2_CACHE_4K_DSM_CNTL__WRITE_COUNTERS_MASK 0x00001000L
+#define ATCL2_0_ATC_L2_CACHE_4K_DSM_CNTL__SEC_COUNT_MASK 0x00006000L
+#define ATCL2_0_ATC_L2_CACHE_4K_DSM_CNTL__DED_COUNT_MASK 0x00018000L
+#define ATCL2_0_ATC_L2_CACHE_4K_DSM_CNTL__TEST_FUE_MASK 0x00020000L
+//ATCL2_0_ATC_L2_CACHE_2M_DSM_CNTL
+#define ATCL2_0_ATC_L2_CACHE_2M_DSM_CNTL__INJECT_DELAY__SHIFT 0x0
+#define ATCL2_0_ATC_L2_CACHE_2M_DSM_CNTL__DSM_IRRITATOR_DATA__SHIFT 0x6
+#define ATCL2_0_ATC_L2_CACHE_2M_DSM_CNTL__ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define ATCL2_0_ATC_L2_CACHE_2M_DSM_CNTL__ENABLE_ERROR_INJECT__SHIFT 0x9
+#define ATCL2_0_ATC_L2_CACHE_2M_DSM_CNTL__SELECT_INJECT_DELAY__SHIFT 0xb
+#define ATCL2_0_ATC_L2_CACHE_2M_DSM_CNTL__WRITE_COUNTERS__SHIFT 0xc
+#define ATCL2_0_ATC_L2_CACHE_2M_DSM_CNTL__SEC_COUNT__SHIFT 0xd
+#define ATCL2_0_ATC_L2_CACHE_2M_DSM_CNTL__DED_COUNT__SHIFT 0xf
+#define ATCL2_0_ATC_L2_CACHE_2M_DSM_CNTL__TEST_FUE__SHIFT 0x11
+#define ATCL2_0_ATC_L2_CACHE_2M_DSM_CNTL__INJECT_DELAY_MASK 0x0000003FL
+#define ATCL2_0_ATC_L2_CACHE_2M_DSM_CNTL__DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define ATCL2_0_ATC_L2_CACHE_2M_DSM_CNTL__ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define ATCL2_0_ATC_L2_CACHE_2M_DSM_CNTL__ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define ATCL2_0_ATC_L2_CACHE_2M_DSM_CNTL__SELECT_INJECT_DELAY_MASK 0x00000800L
+#define ATCL2_0_ATC_L2_CACHE_2M_DSM_CNTL__WRITE_COUNTERS_MASK 0x00001000L
+#define ATCL2_0_ATC_L2_CACHE_2M_DSM_CNTL__SEC_COUNT_MASK 0x00006000L
+#define ATCL2_0_ATC_L2_CACHE_2M_DSM_CNTL__DED_COUNT_MASK 0x00018000L
+#define ATCL2_0_ATC_L2_CACHE_2M_DSM_CNTL__TEST_FUE_MASK 0x00020000L
+//ATCL2_0_ATC_L2_CNTL4
+#define ATCL2_0_ATC_L2_CNTL4__MM_NONRT_IFIFO_ACTIVE_TRANSACTION_LIMIT__SHIFT 0x0
+#define ATCL2_0_ATC_L2_CNTL4__MM_SOFTRT_IFIFO_ACTIVE_TRANSACTION_LIMIT__SHIFT 0xa
+#define ATCL2_0_ATC_L2_CNTL4__MM_NONRT_IFIFO_ACTIVE_TRANSACTION_LIMIT_MASK 0x000003FFL
+#define ATCL2_0_ATC_L2_CNTL4__MM_SOFTRT_IFIFO_ACTIVE_TRANSACTION_LIMIT_MASK 0x000FFC00L
+//ATCL2_0_ATC_L2_MM_GROUP_RT_CLASSES
+#define ATCL2_0_ATC_L2_MM_GROUP_RT_CLASSES__GROUP_RT_CLASS__SHIFT 0x0
+#define ATCL2_0_ATC_L2_MM_GROUP_RT_CLASSES__GROUP_RT_CLASS_MASK 0xFFFFFFFFL
+
+
+// addressBlock: mmhub_utcl2_vml2pfdec
+//VML2PF0_VM_L2_CNTL
+#define VML2PF0_VM_L2_CNTL__ENABLE_L2_CACHE__SHIFT 0x0
+#define VML2PF0_VM_L2_CNTL__ENABLE_L2_FRAGMENT_PROCESSING__SHIFT 0x1
+#define VML2PF0_VM_L2_CNTL__L2_CACHE_PTE_ENDIAN_SWAP_MODE__SHIFT 0x2
+#define VML2PF0_VM_L2_CNTL__L2_CACHE_PDE_ENDIAN_SWAP_MODE__SHIFT 0x4
+#define VML2PF0_VM_L2_CNTL__L2_PDE0_CACHE_TAG_GENERATION_MODE__SHIFT 0x8
+#define VML2PF0_VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE__SHIFT 0x9
+#define VML2PF0_VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE__SHIFT 0xa
+#define VML2PF0_VM_L2_CNTL__ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY__SHIFT 0xb
+#define VML2PF0_VM_L2_CNTL__L2_PDE0_CACHE_SPLIT_MODE__SHIFT 0xc
+#define VML2PF0_VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE__SHIFT 0xf
+#define VML2PF0_VM_L2_CNTL__PDE_FAULT_CLASSIFICATION__SHIFT 0x12
+#define VML2PF0_VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE__SHIFT 0x13
+#define VML2PF0_VM_L2_CNTL__IDENTITY_MODE_FRAGMENT_SIZE__SHIFT 0x15
+#define VML2PF0_VM_L2_CNTL__L2_PTE_CACHE_ADDR_MODE__SHIFT 0x1a
+#define VML2PF0_VM_L2_CNTL__ENABLE_L2_CACHE_MASK 0x00000001L
+#define VML2PF0_VM_L2_CNTL__ENABLE_L2_FRAGMENT_PROCESSING_MASK 0x00000002L
+#define VML2PF0_VM_L2_CNTL__L2_CACHE_PTE_ENDIAN_SWAP_MODE_MASK 0x0000000CL
+#define VML2PF0_VM_L2_CNTL__L2_CACHE_PDE_ENDIAN_SWAP_MODE_MASK 0x00000030L
+#define VML2PF0_VM_L2_CNTL__L2_PDE0_CACHE_TAG_GENERATION_MODE_MASK 0x00000100L
+#define VML2PF0_VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE_MASK 0x00000200L
+#define VML2PF0_VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE_MASK 0x00000400L
+#define VML2PF0_VM_L2_CNTL__ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY_MASK 0x00000800L
+#define VML2PF0_VM_L2_CNTL__L2_PDE0_CACHE_SPLIT_MODE_MASK 0x00007000L
+#define VML2PF0_VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE_MASK 0x00038000L
+#define VML2PF0_VM_L2_CNTL__PDE_FAULT_CLASSIFICATION_MASK 0x00040000L
+#define VML2PF0_VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE_MASK 0x00180000L
+#define VML2PF0_VM_L2_CNTL__IDENTITY_MODE_FRAGMENT_SIZE_MASK 0x03E00000L
+#define VML2PF0_VM_L2_CNTL__L2_PTE_CACHE_ADDR_MODE_MASK 0x0C000000L
+//VML2PF0_VM_L2_CNTL2
+#define VML2PF0_VM_L2_CNTL2__INVALIDATE_ALL_L1_TLBS__SHIFT 0x0
+#define VML2PF0_VM_L2_CNTL2__INVALIDATE_L2_CACHE__SHIFT 0x1
+#define VML2PF0_VM_L2_CNTL2__DISABLE_INVALIDATE_PER_DOMAIN__SHIFT 0x15
+#define VML2PF0_VM_L2_CNTL2__DISABLE_BIGK_CACHE_OPTIMIZATION__SHIFT 0x16
+#define VML2PF0_VM_L2_CNTL2__L2_PTE_CACHE_VMID_MODE__SHIFT 0x17
+#define VML2PF0_VM_L2_CNTL2__INVALIDATE_CACHE_MODE__SHIFT 0x1a
+#define VML2PF0_VM_L2_CNTL2__PDE_CACHE_EFFECTIVE_SIZE__SHIFT 0x1c
+#define VML2PF0_VM_L2_CNTL2__INVALIDATE_ALL_L1_TLBS_MASK 0x00000001L
+#define VML2PF0_VM_L2_CNTL2__INVALIDATE_L2_CACHE_MASK 0x00000002L
+#define VML2PF0_VM_L2_CNTL2__DISABLE_INVALIDATE_PER_DOMAIN_MASK 0x00200000L
+#define VML2PF0_VM_L2_CNTL2__DISABLE_BIGK_CACHE_OPTIMIZATION_MASK 0x00400000L
+#define VML2PF0_VM_L2_CNTL2__L2_PTE_CACHE_VMID_MODE_MASK 0x03800000L
+#define VML2PF0_VM_L2_CNTL2__INVALIDATE_CACHE_MODE_MASK 0x0C000000L
+#define VML2PF0_VM_L2_CNTL2__PDE_CACHE_EFFECTIVE_SIZE_MASK 0x70000000L
+//VML2PF0_VM_L2_CNTL3
+#define VML2PF0_VM_L2_CNTL3__BANK_SELECT__SHIFT 0x0
+#define VML2PF0_VM_L2_CNTL3__L2_CACHE_UPDATE_MODE__SHIFT 0x6
+#define VML2PF0_VM_L2_CNTL3__L2_CACHE_UPDATE_WILDCARD_REFERENCE_VALUE__SHIFT 0x8
+#define VML2PF0_VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0xf
+#define VML2PF0_VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY__SHIFT 0x14
+#define VML2PF0_VM_L2_CNTL3__L2_CACHE_4K_EFFECTIVE_SIZE__SHIFT 0x15
+#define VML2PF0_VM_L2_CNTL3__L2_CACHE_BIGK_EFFECTIVE_SIZE__SHIFT 0x18
+#define VML2PF0_VM_L2_CNTL3__L2_CACHE_4K_FORCE_MISS__SHIFT 0x1c
+#define VML2PF0_VM_L2_CNTL3__L2_CACHE_BIGK_FORCE_MISS__SHIFT 0x1d
+#define VML2PF0_VM_L2_CNTL3__PDE_CACHE_FORCE_MISS__SHIFT 0x1e
+#define VML2PF0_VM_L2_CNTL3__L2_CACHE_4K_ASSOCIATIVITY__SHIFT 0x1f
+#define VML2PF0_VM_L2_CNTL3__BANK_SELECT_MASK 0x0000003FL
+#define VML2PF0_VM_L2_CNTL3__L2_CACHE_UPDATE_MODE_MASK 0x000000C0L
+#define VML2PF0_VM_L2_CNTL3__L2_CACHE_UPDATE_WILDCARD_REFERENCE_VALUE_MASK 0x00001F00L
+#define VML2PF0_VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000F8000L
+#define VML2PF0_VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK 0x00100000L
+#define VML2PF0_VM_L2_CNTL3__L2_CACHE_4K_EFFECTIVE_SIZE_MASK 0x00E00000L
+#define VML2PF0_VM_L2_CNTL3__L2_CACHE_BIGK_EFFECTIVE_SIZE_MASK 0x0F000000L
+#define VML2PF0_VM_L2_CNTL3__L2_CACHE_4K_FORCE_MISS_MASK 0x10000000L
+#define VML2PF0_VM_L2_CNTL3__L2_CACHE_BIGK_FORCE_MISS_MASK 0x20000000L
+#define VML2PF0_VM_L2_CNTL3__PDE_CACHE_FORCE_MISS_MASK 0x40000000L
+#define VML2PF0_VM_L2_CNTL3__L2_CACHE_4K_ASSOCIATIVITY_MASK 0x80000000L
+//VML2PF0_VM_L2_STATUS
+#define VML2PF0_VM_L2_STATUS__L2_BUSY__SHIFT 0x0
+#define VML2PF0_VM_L2_STATUS__CONTEXT_DOMAIN_BUSY__SHIFT 0x1
+#define VML2PF0_VM_L2_STATUS__FOUND_4K_PTE_CACHE_PARITY_ERRORS__SHIFT 0x11
+#define VML2PF0_VM_L2_STATUS__FOUND_BIGK_PTE_CACHE_PARITY_ERRORS__SHIFT 0x12
+#define VML2PF0_VM_L2_STATUS__FOUND_PDE0_CACHE_PARITY_ERRORS__SHIFT 0x13
+#define VML2PF0_VM_L2_STATUS__FOUND_PDE1_CACHE_PARITY_ERRORS__SHIFT 0x14
+#define VML2PF0_VM_L2_STATUS__FOUND_PDE2_CACHE_PARITY_ERRORS__SHIFT 0x15
+#define VML2PF0_VM_L2_STATUS__L2_BUSY_MASK 0x00000001L
+#define VML2PF0_VM_L2_STATUS__CONTEXT_DOMAIN_BUSY_MASK 0x0001FFFEL
+#define VML2PF0_VM_L2_STATUS__FOUND_4K_PTE_CACHE_PARITY_ERRORS_MASK 0x00020000L
+#define VML2PF0_VM_L2_STATUS__FOUND_BIGK_PTE_CACHE_PARITY_ERRORS_MASK 0x00040000L
+#define VML2PF0_VM_L2_STATUS__FOUND_PDE0_CACHE_PARITY_ERRORS_MASK 0x00080000L
+#define VML2PF0_VM_L2_STATUS__FOUND_PDE1_CACHE_PARITY_ERRORS_MASK 0x00100000L
+#define VML2PF0_VM_L2_STATUS__FOUND_PDE2_CACHE_PARITY_ERRORS_MASK 0x00200000L
+//VML2PF0_VM_DUMMY_PAGE_FAULT_CNTL
+#define VML2PF0_VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_FAULT_ENABLE__SHIFT 0x0
+#define VML2PF0_VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_ADDRESS_LOGICAL__SHIFT 0x1
+#define VML2PF0_VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_COMPARE_MSBS__SHIFT 0x2
+#define VML2PF0_VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_FAULT_ENABLE_MASK 0x00000001L
+#define VML2PF0_VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_ADDRESS_LOGICAL_MASK 0x00000002L
+#define VML2PF0_VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_COMPARE_MSBS_MASK 0x000000FCL
+//VML2PF0_VM_DUMMY_PAGE_FAULT_ADDR_LO32
+#define VML2PF0_VM_DUMMY_PAGE_FAULT_ADDR_LO32__DUMMY_PAGE_ADDR_LO32__SHIFT 0x0
+#define VML2PF0_VM_DUMMY_PAGE_FAULT_ADDR_LO32__DUMMY_PAGE_ADDR_LO32_MASK 0xFFFFFFFFL
+//VML2PF0_VM_DUMMY_PAGE_FAULT_ADDR_HI32
+#define VML2PF0_VM_DUMMY_PAGE_FAULT_ADDR_HI32__DUMMY_PAGE_ADDR_HI4__SHIFT 0x0
+#define VML2PF0_VM_DUMMY_PAGE_FAULT_ADDR_HI32__DUMMY_PAGE_ADDR_HI4_MASK 0x0000000FL
+//VML2PF0_VM_L2_PROTECTION_FAULT_CNTL
+#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x0
+#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__ALLOW_SUBSEQUENT_PROTECTION_FAULT_STATUS_ADDR_UPDATES__SHIFT 0x1
+#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x2
+#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x3
+#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__PDE1_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x4
+#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__PDE2_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x5
+#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x6
+#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__NACK_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x7
+#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x8
+#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x9
+#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xb
+#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__CLIENT_ID_NO_RETRY_FAULT_INTERRUPT__SHIFT 0xd
+#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__OTHER_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT__SHIFT 0x1d
+#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__CRASH_ON_NO_RETRY_FAULT__SHIFT 0x1e
+#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__CRASH_ON_RETRY_FAULT__SHIFT 0x1f
+#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00000001L
+#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__ALLOW_SUBSEQUENT_PROTECTION_FAULT_STATUS_ADDR_UPDATES_MASK 0x00000002L
+#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000004L
+#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000008L
+#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__PDE1_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000010L
+#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__PDE2_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000020L
+#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000040L
+#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__NACK_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000080L
+#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000100L
+#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000200L
+#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000800L
+#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__CLIENT_ID_NO_RETRY_FAULT_INTERRUPT_MASK 0x1FFFE000L
+#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__OTHER_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT_MASK 0x20000000L
+#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__CRASH_ON_NO_RETRY_FAULT_MASK 0x40000000L
+#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__CRASH_ON_RETRY_FAULT_MASK 0x80000000L
+//VML2PF0_VM_L2_PROTECTION_FAULT_CNTL2
+#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL2__CLIENT_ID_PRT_FAULT_INTERRUPT__SHIFT 0x0
+#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL2__OTHER_CLIENT_ID_PRT_FAULT_INTERRUPT__SHIFT 0x10
+#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL2__ACTIVE_PAGE_MIGRATION_PTE__SHIFT 0x11
+#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL2__ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY__SHIFT 0x12
+#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL2__ENABLE_RETRY_FAULT_INTERRUPT__SHIFT 0x13
+#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL2__CLIENT_ID_PRT_FAULT_INTERRUPT_MASK 0x0000FFFFL
+#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL2__OTHER_CLIENT_ID_PRT_FAULT_INTERRUPT_MASK 0x00010000L
+#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL2__ACTIVE_PAGE_MIGRATION_PTE_MASK 0x00020000L
+#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL2__ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY_MASK 0x00040000L
+#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL2__ENABLE_RETRY_FAULT_INTERRUPT_MASK 0x00080000L
+//VML2PF0_VM_L2_PROTECTION_FAULT_MM_CNTL3
+#define VML2PF0_VM_L2_PROTECTION_FAULT_MM_CNTL3__VML1_READ_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT__SHIFT 0x0
+#define VML2PF0_VM_L2_PROTECTION_FAULT_MM_CNTL3__VML1_READ_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT_MASK 0xFFFFFFFFL
+//VML2PF0_VM_L2_PROTECTION_FAULT_MM_CNTL4
+#define VML2PF0_VM_L2_PROTECTION_FAULT_MM_CNTL4__VML1_WRITE_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT__SHIFT 0x0
+#define VML2PF0_VM_L2_PROTECTION_FAULT_MM_CNTL4__VML1_WRITE_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT_MASK 0xFFFFFFFFL
+//VML2PF0_VM_L2_PROTECTION_FAULT_STATUS
+#define VML2PF0_VM_L2_PROTECTION_FAULT_STATUS__MORE_FAULTS__SHIFT 0x0
+#define VML2PF0_VM_L2_PROTECTION_FAULT_STATUS__WALKER_ERROR__SHIFT 0x1
+#define VML2PF0_VM_L2_PROTECTION_FAULT_STATUS__PERMISSION_FAULTS__SHIFT 0x4
+#define VML2PF0_VM_L2_PROTECTION_FAULT_STATUS__MAPPING_ERROR__SHIFT 0x8
+#define VML2PF0_VM_L2_PROTECTION_FAULT_STATUS__CID__SHIFT 0x9
+#define VML2PF0_VM_L2_PROTECTION_FAULT_STATUS__RW__SHIFT 0x12
+#define VML2PF0_VM_L2_PROTECTION_FAULT_STATUS__ATOMIC__SHIFT 0x13
+#define VML2PF0_VM_L2_PROTECTION_FAULT_STATUS__VMID__SHIFT 0x14
+#define VML2PF0_VM_L2_PROTECTION_FAULT_STATUS__VF__SHIFT 0x18
+#define VML2PF0_VM_L2_PROTECTION_FAULT_STATUS__VFID__SHIFT 0x19
+#define VML2PF0_VM_L2_PROTECTION_FAULT_STATUS__MORE_FAULTS_MASK 0x00000001L
+#define VML2PF0_VM_L2_PROTECTION_FAULT_STATUS__WALKER_ERROR_MASK 0x0000000EL
+#define VML2PF0_VM_L2_PROTECTION_FAULT_STATUS__PERMISSION_FAULTS_MASK 0x000000F0L
+#define VML2PF0_VM_L2_PROTECTION_FAULT_STATUS__MAPPING_ERROR_MASK 0x00000100L
+#define VML2PF0_VM_L2_PROTECTION_FAULT_STATUS__CID_MASK 0x0003FE00L
+#define VML2PF0_VM_L2_PROTECTION_FAULT_STATUS__RW_MASK 0x00040000L
+#define VML2PF0_VM_L2_PROTECTION_FAULT_STATUS__ATOMIC_MASK 0x00080000L
+#define VML2PF0_VM_L2_PROTECTION_FAULT_STATUS__VMID_MASK 0x00F00000L
+#define VML2PF0_VM_L2_PROTECTION_FAULT_STATUS__VF_MASK 0x01000000L
+#define VML2PF0_VM_L2_PROTECTION_FAULT_STATUS__VFID_MASK 0x1E000000L
+//VML2PF0_VM_L2_PROTECTION_FAULT_ADDR_LO32
+#define VML2PF0_VM_L2_PROTECTION_FAULT_ADDR_LO32__LOGICAL_PAGE_ADDR_LO32__SHIFT 0x0
+#define VML2PF0_VM_L2_PROTECTION_FAULT_ADDR_LO32__LOGICAL_PAGE_ADDR_LO32_MASK 0xFFFFFFFFL
+//VML2PF0_VM_L2_PROTECTION_FAULT_ADDR_HI32
+#define VML2PF0_VM_L2_PROTECTION_FAULT_ADDR_HI32__LOGICAL_PAGE_ADDR_HI4__SHIFT 0x0
+#define VML2PF0_VM_L2_PROTECTION_FAULT_ADDR_HI32__LOGICAL_PAGE_ADDR_HI4_MASK 0x0000000FL
+//VML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32
+#define VML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32__PHYSICAL_PAGE_ADDR_LO32__SHIFT 0x0
+#define VML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32__PHYSICAL_PAGE_ADDR_LO32_MASK 0xFFFFFFFFL
+//VML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32
+#define VML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32__PHYSICAL_PAGE_ADDR_HI4__SHIFT 0x0
+#define VML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32__PHYSICAL_PAGE_ADDR_HI4_MASK 0x0000000FL
+//VML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32
+#define VML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32
+#define VML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32
+#define VML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32
+#define VML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2PF0_VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32
+#define VML2PF0_VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32__PHYSICAL_PAGE_OFFSET_LO32__SHIFT 0x0
+#define VML2PF0_VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32__PHYSICAL_PAGE_OFFSET_LO32_MASK 0xFFFFFFFFL
+//VML2PF0_VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32
+#define VML2PF0_VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32__PHYSICAL_PAGE_OFFSET_HI4__SHIFT 0x0
+#define VML2PF0_VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32__PHYSICAL_PAGE_OFFSET_HI4_MASK 0x0000000FL
+//VML2PF0_VM_L2_CNTL4
+#define VML2PF0_VM_L2_CNTL4__L2_CACHE_4K_PARTITION_COUNT__SHIFT 0x0
+#define VML2PF0_VM_L2_CNTL4__VMC_TAP_PDE_REQUEST_PHYSICAL__SHIFT 0x6
+#define VML2PF0_VM_L2_CNTL4__VMC_TAP_PTE_REQUEST_PHYSICAL__SHIFT 0x7
+#define VML2PF0_VM_L2_CNTL4__MM_NONRT_IFIFO_ACTIVE_TRANSACTION_LIMIT__SHIFT 0x8
+#define VML2PF0_VM_L2_CNTL4__MM_SOFTRT_IFIFO_ACTIVE_TRANSACTION_LIMIT__SHIFT 0x12
+#define VML2PF0_VM_L2_CNTL4__BPM_CGCGLS_OVERRIDE__SHIFT 0x1c
+#define VML2PF0_VM_L2_CNTL4__L2_CACHE_4K_PARTITION_COUNT_MASK 0x0000003FL
+#define VML2PF0_VM_L2_CNTL4__VMC_TAP_PDE_REQUEST_PHYSICAL_MASK 0x00000040L
+#define VML2PF0_VM_L2_CNTL4__VMC_TAP_PTE_REQUEST_PHYSICAL_MASK 0x00000080L
+#define VML2PF0_VM_L2_CNTL4__MM_NONRT_IFIFO_ACTIVE_TRANSACTION_LIMIT_MASK 0x0003FF00L
+#define VML2PF0_VM_L2_CNTL4__MM_SOFTRT_IFIFO_ACTIVE_TRANSACTION_LIMIT_MASK 0x0FFC0000L
+#define VML2PF0_VM_L2_CNTL4__BPM_CGCGLS_OVERRIDE_MASK 0x10000000L
+//VML2PF0_VM_L2_MM_GROUP_RT_CLASSES
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_0_RT_CLASS__SHIFT 0x0
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_1_RT_CLASS__SHIFT 0x1
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_2_RT_CLASS__SHIFT 0x2
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_3_RT_CLASS__SHIFT 0x3
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_4_RT_CLASS__SHIFT 0x4
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_5_RT_CLASS__SHIFT 0x5
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_6_RT_CLASS__SHIFT 0x6
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_7_RT_CLASS__SHIFT 0x7
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_8_RT_CLASS__SHIFT 0x8
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_9_RT_CLASS__SHIFT 0x9
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_10_RT_CLASS__SHIFT 0xa
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_11_RT_CLASS__SHIFT 0xb
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_12_RT_CLASS__SHIFT 0xc
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_13_RT_CLASS__SHIFT 0xd
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_14_RT_CLASS__SHIFT 0xe
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_15_RT_CLASS__SHIFT 0xf
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_16_RT_CLASS__SHIFT 0x10
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_17_RT_CLASS__SHIFT 0x11
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_18_RT_CLASS__SHIFT 0x12
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_19_RT_CLASS__SHIFT 0x13
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_20_RT_CLASS__SHIFT 0x14
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_21_RT_CLASS__SHIFT 0x15
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_22_RT_CLASS__SHIFT 0x16
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_23_RT_CLASS__SHIFT 0x17
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_24_RT_CLASS__SHIFT 0x18
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_25_RT_CLASS__SHIFT 0x19
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_26_RT_CLASS__SHIFT 0x1a
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_27_RT_CLASS__SHIFT 0x1b
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_28_RT_CLASS__SHIFT 0x1c
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_29_RT_CLASS__SHIFT 0x1d
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_30_RT_CLASS__SHIFT 0x1e
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_31_RT_CLASS__SHIFT 0x1f
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_0_RT_CLASS_MASK 0x00000001L
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_1_RT_CLASS_MASK 0x00000002L
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_2_RT_CLASS_MASK 0x00000004L
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_3_RT_CLASS_MASK 0x00000008L
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_4_RT_CLASS_MASK 0x00000010L
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_5_RT_CLASS_MASK 0x00000020L
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_6_RT_CLASS_MASK 0x00000040L
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_7_RT_CLASS_MASK 0x00000080L
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_8_RT_CLASS_MASK 0x00000100L
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_9_RT_CLASS_MASK 0x00000200L
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_10_RT_CLASS_MASK 0x00000400L
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_11_RT_CLASS_MASK 0x00000800L
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_12_RT_CLASS_MASK 0x00001000L
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_13_RT_CLASS_MASK 0x00002000L
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_14_RT_CLASS_MASK 0x00004000L
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_15_RT_CLASS_MASK 0x00008000L
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_16_RT_CLASS_MASK 0x00010000L
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_17_RT_CLASS_MASK 0x00020000L
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_18_RT_CLASS_MASK 0x00040000L
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_19_RT_CLASS_MASK 0x00080000L
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_20_RT_CLASS_MASK 0x00100000L
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_21_RT_CLASS_MASK 0x00200000L
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_22_RT_CLASS_MASK 0x00400000L
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_23_RT_CLASS_MASK 0x00800000L
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_24_RT_CLASS_MASK 0x01000000L
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_25_RT_CLASS_MASK 0x02000000L
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_26_RT_CLASS_MASK 0x04000000L
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_27_RT_CLASS_MASK 0x08000000L
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_28_RT_CLASS_MASK 0x10000000L
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_29_RT_CLASS_MASK 0x20000000L
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_30_RT_CLASS_MASK 0x40000000L
+#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_31_RT_CLASS_MASK 0x80000000L
+//VML2PF0_VM_L2_BANK_SELECT_RESERVED_CID
+#define VML2PF0_VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_READ_CLIENT_ID__SHIFT 0x0
+#define VML2PF0_VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_WRITE_CLIENT_ID__SHIFT 0xa
+#define VML2PF0_VM_L2_BANK_SELECT_RESERVED_CID__ENABLE__SHIFT 0x14
+#define VML2PF0_VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_INVALIDATION_MODE__SHIFT 0x18
+#define VML2PF0_VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_PRIVATE_INVALIDATION__SHIFT 0x19
+#define VML2PF0_VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_READ_CLIENT_ID_MASK 0x000001FFL
+#define VML2PF0_VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_WRITE_CLIENT_ID_MASK 0x0007FC00L
+#define VML2PF0_VM_L2_BANK_SELECT_RESERVED_CID__ENABLE_MASK 0x00100000L
+#define VML2PF0_VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_INVALIDATION_MODE_MASK 0x01000000L
+#define VML2PF0_VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_PRIVATE_INVALIDATION_MASK 0x02000000L
+//VML2PF0_VM_L2_BANK_SELECT_RESERVED_CID2
+#define VML2PF0_VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_READ_CLIENT_ID__SHIFT 0x0
+#define VML2PF0_VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_WRITE_CLIENT_ID__SHIFT 0xa
+#define VML2PF0_VM_L2_BANK_SELECT_RESERVED_CID2__ENABLE__SHIFT 0x14
+#define VML2PF0_VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_INVALIDATION_MODE__SHIFT 0x18
+#define VML2PF0_VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_PRIVATE_INVALIDATION__SHIFT 0x19
+#define VML2PF0_VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_READ_CLIENT_ID_MASK 0x000001FFL
+#define VML2PF0_VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_WRITE_CLIENT_ID_MASK 0x0007FC00L
+#define VML2PF0_VM_L2_BANK_SELECT_RESERVED_CID2__ENABLE_MASK 0x00100000L
+#define VML2PF0_VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_INVALIDATION_MODE_MASK 0x01000000L
+#define VML2PF0_VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_PRIVATE_INVALIDATION_MASK 0x02000000L
+//VML2PF0_VM_L2_CACHE_PARITY_CNTL
+#define VML2PF0_VM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_4K_PTE_CACHES__SHIFT 0x0
+#define VML2PF0_VM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_BIGK_PTE_CACHES__SHIFT 0x1
+#define VML2PF0_VM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_PDE_CACHES__SHIFT 0x2
+#define VML2PF0_VM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_4K_PTE_CACHE__SHIFT 0x3
+#define VML2PF0_VM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_BIGK_PTE_CACHE__SHIFT 0x4
+#define VML2PF0_VM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_PDE_CACHE__SHIFT 0x5
+#define VML2PF0_VM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_BANK__SHIFT 0x6
+#define VML2PF0_VM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_NUMBER__SHIFT 0x9
+#define VML2PF0_VM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_ASSOC__SHIFT 0xc
+#define VML2PF0_VM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_4K_PTE_CACHES_MASK 0x00000001L
+#define VML2PF0_VM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_BIGK_PTE_CACHES_MASK 0x00000002L
+#define VML2PF0_VM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_PDE_CACHES_MASK 0x00000004L
+#define VML2PF0_VM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_4K_PTE_CACHE_MASK 0x00000008L
+#define VML2PF0_VM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_BIGK_PTE_CACHE_MASK 0x00000010L
+#define VML2PF0_VM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_PDE_CACHE_MASK 0x00000020L
+#define VML2PF0_VM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_BANK_MASK 0x000001C0L
+#define VML2PF0_VM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_NUMBER_MASK 0x00000E00L
+#define VML2PF0_VM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_ASSOC_MASK 0x0000F000L
+//VML2PF0_VM_L2_CGTT_CLK_CTRL
+#define VML2PF0_VM_L2_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define VML2PF0_VM_L2_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define VML2PF0_VM_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE__SHIFT 0xf
+#define VML2PF0_VM_L2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x10
+#define VML2PF0_VM_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x18
+#define VML2PF0_VM_L2_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define VML2PF0_VM_L2_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define VML2PF0_VM_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L
+#define VML2PF0_VM_L2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00FF0000L
+#define VML2PF0_VM_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0xFF000000L
+
+
+// addressBlock: mmhub_utcl2_vml2vcdec
+//VML2VC0_VM_CONTEXT0_CNTL
+#define VML2VC0_VM_CONTEXT0_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT0_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VML2VC0_VM_CONTEXT0_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VML2VC0_VM_CONTEXT0_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VML2VC0_VM_CONTEXT0_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VML2VC0_VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VML2VC0_VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VML2VC0_VM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VML2VC0_VM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VML2VC0_VM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VML2VC0_VM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VML2VC0_VM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VML2VC0_VM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VML2VC0_VM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VML2VC0_VM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VML2VC0_VM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VML2VC0_VM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VML2VC0_VM_CONTEXT0_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VML2VC0_VM_CONTEXT0_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VML2VC0_VM_CONTEXT0_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VML2VC0_VM_CONTEXT0_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VML2VC0_VM_CONTEXT0_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VML2VC0_VM_CONTEXT0_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VML2VC0_VM_CONTEXT0_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VML2VC0_VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VML2VC0_VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VML2VC0_VM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VML2VC0_VM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VML2VC0_VM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VML2VC0_VM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VML2VC0_VM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VML2VC0_VM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VML2VC0_VM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VML2VC0_VM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VML2VC0_VM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VML2VC0_VM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VML2VC0_VM_CONTEXT0_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VML2VC0_VM_CONTEXT0_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VML2VC0_VM_CONTEXT1_CNTL
+#define VML2VC0_VM_CONTEXT1_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VML2VC0_VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VML2VC0_VM_CONTEXT1_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VML2VC0_VM_CONTEXT1_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VML2VC0_VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VML2VC0_VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VML2VC0_VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VML2VC0_VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VML2VC0_VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VML2VC0_VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VML2VC0_VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VML2VC0_VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VML2VC0_VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VML2VC0_VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VML2VC0_VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VML2VC0_VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VML2VC0_VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VML2VC0_VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VML2VC0_VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VML2VC0_VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VML2VC0_VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VML2VC0_VM_CONTEXT1_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VML2VC0_VM_CONTEXT1_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VML2VC0_VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VML2VC0_VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VML2VC0_VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VML2VC0_VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VML2VC0_VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VML2VC0_VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VML2VC0_VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VML2VC0_VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VML2VC0_VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VML2VC0_VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VML2VC0_VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VML2VC0_VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VML2VC0_VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VML2VC0_VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VML2VC0_VM_CONTEXT2_CNTL
+#define VML2VC0_VM_CONTEXT2_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT2_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VML2VC0_VM_CONTEXT2_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VML2VC0_VM_CONTEXT2_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VML2VC0_VM_CONTEXT2_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VML2VC0_VM_CONTEXT2_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VML2VC0_VM_CONTEXT2_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VML2VC0_VM_CONTEXT2_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VML2VC0_VM_CONTEXT2_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VML2VC0_VM_CONTEXT2_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VML2VC0_VM_CONTEXT2_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VML2VC0_VM_CONTEXT2_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VML2VC0_VM_CONTEXT2_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VML2VC0_VM_CONTEXT2_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VML2VC0_VM_CONTEXT2_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VML2VC0_VM_CONTEXT2_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VML2VC0_VM_CONTEXT2_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VML2VC0_VM_CONTEXT2_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VML2VC0_VM_CONTEXT2_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VML2VC0_VM_CONTEXT2_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VML2VC0_VM_CONTEXT2_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VML2VC0_VM_CONTEXT2_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VML2VC0_VM_CONTEXT2_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VML2VC0_VM_CONTEXT2_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VML2VC0_VM_CONTEXT2_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VML2VC0_VM_CONTEXT2_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VML2VC0_VM_CONTEXT2_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VML2VC0_VM_CONTEXT2_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VML2VC0_VM_CONTEXT2_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VML2VC0_VM_CONTEXT2_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VML2VC0_VM_CONTEXT2_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VML2VC0_VM_CONTEXT2_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VML2VC0_VM_CONTEXT2_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VML2VC0_VM_CONTEXT2_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VML2VC0_VM_CONTEXT2_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VML2VC0_VM_CONTEXT2_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VML2VC0_VM_CONTEXT2_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VML2VC0_VM_CONTEXT2_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VML2VC0_VM_CONTEXT3_CNTL
+#define VML2VC0_VM_CONTEXT3_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT3_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VML2VC0_VM_CONTEXT3_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VML2VC0_VM_CONTEXT3_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VML2VC0_VM_CONTEXT3_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VML2VC0_VM_CONTEXT3_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VML2VC0_VM_CONTEXT3_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VML2VC0_VM_CONTEXT3_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VML2VC0_VM_CONTEXT3_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VML2VC0_VM_CONTEXT3_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VML2VC0_VM_CONTEXT3_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VML2VC0_VM_CONTEXT3_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VML2VC0_VM_CONTEXT3_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VML2VC0_VM_CONTEXT3_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VML2VC0_VM_CONTEXT3_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VML2VC0_VM_CONTEXT3_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VML2VC0_VM_CONTEXT3_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VML2VC0_VM_CONTEXT3_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VML2VC0_VM_CONTEXT3_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VML2VC0_VM_CONTEXT3_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VML2VC0_VM_CONTEXT3_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VML2VC0_VM_CONTEXT3_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VML2VC0_VM_CONTEXT3_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VML2VC0_VM_CONTEXT3_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VML2VC0_VM_CONTEXT3_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VML2VC0_VM_CONTEXT3_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VML2VC0_VM_CONTEXT3_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VML2VC0_VM_CONTEXT3_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VML2VC0_VM_CONTEXT3_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VML2VC0_VM_CONTEXT3_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VML2VC0_VM_CONTEXT3_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VML2VC0_VM_CONTEXT3_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VML2VC0_VM_CONTEXT3_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VML2VC0_VM_CONTEXT3_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VML2VC0_VM_CONTEXT3_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VML2VC0_VM_CONTEXT3_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VML2VC0_VM_CONTEXT3_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VML2VC0_VM_CONTEXT3_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VML2VC0_VM_CONTEXT4_CNTL
+#define VML2VC0_VM_CONTEXT4_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT4_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VML2VC0_VM_CONTEXT4_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VML2VC0_VM_CONTEXT4_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VML2VC0_VM_CONTEXT4_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VML2VC0_VM_CONTEXT4_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VML2VC0_VM_CONTEXT4_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VML2VC0_VM_CONTEXT4_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VML2VC0_VM_CONTEXT4_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VML2VC0_VM_CONTEXT4_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VML2VC0_VM_CONTEXT4_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VML2VC0_VM_CONTEXT4_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VML2VC0_VM_CONTEXT4_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VML2VC0_VM_CONTEXT4_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VML2VC0_VM_CONTEXT4_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VML2VC0_VM_CONTEXT4_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VML2VC0_VM_CONTEXT4_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VML2VC0_VM_CONTEXT4_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VML2VC0_VM_CONTEXT4_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VML2VC0_VM_CONTEXT4_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VML2VC0_VM_CONTEXT4_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VML2VC0_VM_CONTEXT4_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VML2VC0_VM_CONTEXT4_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VML2VC0_VM_CONTEXT4_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VML2VC0_VM_CONTEXT4_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VML2VC0_VM_CONTEXT4_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VML2VC0_VM_CONTEXT4_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VML2VC0_VM_CONTEXT4_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VML2VC0_VM_CONTEXT4_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VML2VC0_VM_CONTEXT4_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VML2VC0_VM_CONTEXT4_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VML2VC0_VM_CONTEXT4_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VML2VC0_VM_CONTEXT4_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VML2VC0_VM_CONTEXT4_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VML2VC0_VM_CONTEXT4_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VML2VC0_VM_CONTEXT4_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VML2VC0_VM_CONTEXT4_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VML2VC0_VM_CONTEXT4_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VML2VC0_VM_CONTEXT5_CNTL
+#define VML2VC0_VM_CONTEXT5_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT5_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VML2VC0_VM_CONTEXT5_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VML2VC0_VM_CONTEXT5_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VML2VC0_VM_CONTEXT5_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VML2VC0_VM_CONTEXT5_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VML2VC0_VM_CONTEXT5_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VML2VC0_VM_CONTEXT5_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VML2VC0_VM_CONTEXT5_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VML2VC0_VM_CONTEXT5_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VML2VC0_VM_CONTEXT5_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VML2VC0_VM_CONTEXT5_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VML2VC0_VM_CONTEXT5_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VML2VC0_VM_CONTEXT5_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VML2VC0_VM_CONTEXT5_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VML2VC0_VM_CONTEXT5_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VML2VC0_VM_CONTEXT5_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VML2VC0_VM_CONTEXT5_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VML2VC0_VM_CONTEXT5_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VML2VC0_VM_CONTEXT5_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VML2VC0_VM_CONTEXT5_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VML2VC0_VM_CONTEXT5_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VML2VC0_VM_CONTEXT5_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VML2VC0_VM_CONTEXT5_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VML2VC0_VM_CONTEXT5_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VML2VC0_VM_CONTEXT5_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VML2VC0_VM_CONTEXT5_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VML2VC0_VM_CONTEXT5_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VML2VC0_VM_CONTEXT5_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VML2VC0_VM_CONTEXT5_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VML2VC0_VM_CONTEXT5_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VML2VC0_VM_CONTEXT5_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VML2VC0_VM_CONTEXT5_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VML2VC0_VM_CONTEXT5_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VML2VC0_VM_CONTEXT5_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VML2VC0_VM_CONTEXT5_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VML2VC0_VM_CONTEXT5_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VML2VC0_VM_CONTEXT5_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VML2VC0_VM_CONTEXT6_CNTL
+#define VML2VC0_VM_CONTEXT6_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT6_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VML2VC0_VM_CONTEXT6_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VML2VC0_VM_CONTEXT6_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VML2VC0_VM_CONTEXT6_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VML2VC0_VM_CONTEXT6_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VML2VC0_VM_CONTEXT6_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VML2VC0_VM_CONTEXT6_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VML2VC0_VM_CONTEXT6_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VML2VC0_VM_CONTEXT6_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VML2VC0_VM_CONTEXT6_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VML2VC0_VM_CONTEXT6_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VML2VC0_VM_CONTEXT6_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VML2VC0_VM_CONTEXT6_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VML2VC0_VM_CONTEXT6_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VML2VC0_VM_CONTEXT6_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VML2VC0_VM_CONTEXT6_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VML2VC0_VM_CONTEXT6_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VML2VC0_VM_CONTEXT6_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VML2VC0_VM_CONTEXT6_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VML2VC0_VM_CONTEXT6_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VML2VC0_VM_CONTEXT6_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VML2VC0_VM_CONTEXT6_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VML2VC0_VM_CONTEXT6_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VML2VC0_VM_CONTEXT6_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VML2VC0_VM_CONTEXT6_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VML2VC0_VM_CONTEXT6_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VML2VC0_VM_CONTEXT6_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VML2VC0_VM_CONTEXT6_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VML2VC0_VM_CONTEXT6_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VML2VC0_VM_CONTEXT6_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VML2VC0_VM_CONTEXT6_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VML2VC0_VM_CONTEXT6_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VML2VC0_VM_CONTEXT6_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VML2VC0_VM_CONTEXT6_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VML2VC0_VM_CONTEXT6_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VML2VC0_VM_CONTEXT6_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VML2VC0_VM_CONTEXT6_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VML2VC0_VM_CONTEXT7_CNTL
+#define VML2VC0_VM_CONTEXT7_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT7_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VML2VC0_VM_CONTEXT7_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VML2VC0_VM_CONTEXT7_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VML2VC0_VM_CONTEXT7_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VML2VC0_VM_CONTEXT7_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VML2VC0_VM_CONTEXT7_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VML2VC0_VM_CONTEXT7_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VML2VC0_VM_CONTEXT7_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VML2VC0_VM_CONTEXT7_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VML2VC0_VM_CONTEXT7_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VML2VC0_VM_CONTEXT7_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VML2VC0_VM_CONTEXT7_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VML2VC0_VM_CONTEXT7_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VML2VC0_VM_CONTEXT7_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VML2VC0_VM_CONTEXT7_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VML2VC0_VM_CONTEXT7_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VML2VC0_VM_CONTEXT7_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VML2VC0_VM_CONTEXT7_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VML2VC0_VM_CONTEXT7_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VML2VC0_VM_CONTEXT7_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VML2VC0_VM_CONTEXT7_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VML2VC0_VM_CONTEXT7_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VML2VC0_VM_CONTEXT7_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VML2VC0_VM_CONTEXT7_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VML2VC0_VM_CONTEXT7_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VML2VC0_VM_CONTEXT7_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VML2VC0_VM_CONTEXT7_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VML2VC0_VM_CONTEXT7_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VML2VC0_VM_CONTEXT7_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VML2VC0_VM_CONTEXT7_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VML2VC0_VM_CONTEXT7_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VML2VC0_VM_CONTEXT7_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VML2VC0_VM_CONTEXT7_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VML2VC0_VM_CONTEXT7_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VML2VC0_VM_CONTEXT7_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VML2VC0_VM_CONTEXT7_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VML2VC0_VM_CONTEXT7_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VML2VC0_VM_CONTEXT8_CNTL
+#define VML2VC0_VM_CONTEXT8_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT8_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VML2VC0_VM_CONTEXT8_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VML2VC0_VM_CONTEXT8_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VML2VC0_VM_CONTEXT8_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VML2VC0_VM_CONTEXT8_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VML2VC0_VM_CONTEXT8_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VML2VC0_VM_CONTEXT8_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VML2VC0_VM_CONTEXT8_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VML2VC0_VM_CONTEXT8_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VML2VC0_VM_CONTEXT8_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VML2VC0_VM_CONTEXT8_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VML2VC0_VM_CONTEXT8_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VML2VC0_VM_CONTEXT8_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VML2VC0_VM_CONTEXT8_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VML2VC0_VM_CONTEXT8_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VML2VC0_VM_CONTEXT8_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VML2VC0_VM_CONTEXT8_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VML2VC0_VM_CONTEXT8_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VML2VC0_VM_CONTEXT8_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VML2VC0_VM_CONTEXT8_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VML2VC0_VM_CONTEXT8_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VML2VC0_VM_CONTEXT8_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VML2VC0_VM_CONTEXT8_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VML2VC0_VM_CONTEXT8_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VML2VC0_VM_CONTEXT8_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VML2VC0_VM_CONTEXT8_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VML2VC0_VM_CONTEXT8_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VML2VC0_VM_CONTEXT8_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VML2VC0_VM_CONTEXT8_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VML2VC0_VM_CONTEXT8_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VML2VC0_VM_CONTEXT8_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VML2VC0_VM_CONTEXT8_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VML2VC0_VM_CONTEXT8_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VML2VC0_VM_CONTEXT8_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VML2VC0_VM_CONTEXT8_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VML2VC0_VM_CONTEXT8_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VML2VC0_VM_CONTEXT8_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VML2VC0_VM_CONTEXT9_CNTL
+#define VML2VC0_VM_CONTEXT9_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT9_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VML2VC0_VM_CONTEXT9_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VML2VC0_VM_CONTEXT9_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VML2VC0_VM_CONTEXT9_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VML2VC0_VM_CONTEXT9_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VML2VC0_VM_CONTEXT9_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VML2VC0_VM_CONTEXT9_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VML2VC0_VM_CONTEXT9_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VML2VC0_VM_CONTEXT9_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VML2VC0_VM_CONTEXT9_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VML2VC0_VM_CONTEXT9_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VML2VC0_VM_CONTEXT9_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VML2VC0_VM_CONTEXT9_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VML2VC0_VM_CONTEXT9_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VML2VC0_VM_CONTEXT9_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VML2VC0_VM_CONTEXT9_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VML2VC0_VM_CONTEXT9_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VML2VC0_VM_CONTEXT9_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VML2VC0_VM_CONTEXT9_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VML2VC0_VM_CONTEXT9_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VML2VC0_VM_CONTEXT9_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VML2VC0_VM_CONTEXT9_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VML2VC0_VM_CONTEXT9_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VML2VC0_VM_CONTEXT9_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VML2VC0_VM_CONTEXT9_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VML2VC0_VM_CONTEXT9_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VML2VC0_VM_CONTEXT9_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VML2VC0_VM_CONTEXT9_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VML2VC0_VM_CONTEXT9_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VML2VC0_VM_CONTEXT9_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VML2VC0_VM_CONTEXT9_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VML2VC0_VM_CONTEXT9_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VML2VC0_VM_CONTEXT9_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VML2VC0_VM_CONTEXT9_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VML2VC0_VM_CONTEXT9_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VML2VC0_VM_CONTEXT9_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VML2VC0_VM_CONTEXT9_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VML2VC0_VM_CONTEXT10_CNTL
+#define VML2VC0_VM_CONTEXT10_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT10_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VML2VC0_VM_CONTEXT10_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VML2VC0_VM_CONTEXT10_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VML2VC0_VM_CONTEXT10_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VML2VC0_VM_CONTEXT10_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VML2VC0_VM_CONTEXT10_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VML2VC0_VM_CONTEXT10_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VML2VC0_VM_CONTEXT10_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VML2VC0_VM_CONTEXT10_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VML2VC0_VM_CONTEXT10_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VML2VC0_VM_CONTEXT10_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VML2VC0_VM_CONTEXT10_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VML2VC0_VM_CONTEXT10_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VML2VC0_VM_CONTEXT10_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VML2VC0_VM_CONTEXT10_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VML2VC0_VM_CONTEXT10_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VML2VC0_VM_CONTEXT10_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VML2VC0_VM_CONTEXT10_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VML2VC0_VM_CONTEXT10_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VML2VC0_VM_CONTEXT10_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VML2VC0_VM_CONTEXT10_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VML2VC0_VM_CONTEXT10_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VML2VC0_VM_CONTEXT10_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VML2VC0_VM_CONTEXT10_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VML2VC0_VM_CONTEXT10_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VML2VC0_VM_CONTEXT10_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VML2VC0_VM_CONTEXT10_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VML2VC0_VM_CONTEXT10_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VML2VC0_VM_CONTEXT10_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VML2VC0_VM_CONTEXT10_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VML2VC0_VM_CONTEXT10_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VML2VC0_VM_CONTEXT10_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VML2VC0_VM_CONTEXT10_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VML2VC0_VM_CONTEXT10_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VML2VC0_VM_CONTEXT10_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VML2VC0_VM_CONTEXT10_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VML2VC0_VM_CONTEXT10_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VML2VC0_VM_CONTEXT11_CNTL
+#define VML2VC0_VM_CONTEXT11_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT11_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VML2VC0_VM_CONTEXT11_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VML2VC0_VM_CONTEXT11_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VML2VC0_VM_CONTEXT11_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VML2VC0_VM_CONTEXT11_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VML2VC0_VM_CONTEXT11_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VML2VC0_VM_CONTEXT11_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VML2VC0_VM_CONTEXT11_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VML2VC0_VM_CONTEXT11_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VML2VC0_VM_CONTEXT11_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VML2VC0_VM_CONTEXT11_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VML2VC0_VM_CONTEXT11_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VML2VC0_VM_CONTEXT11_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VML2VC0_VM_CONTEXT11_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VML2VC0_VM_CONTEXT11_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VML2VC0_VM_CONTEXT11_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VML2VC0_VM_CONTEXT11_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VML2VC0_VM_CONTEXT11_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VML2VC0_VM_CONTEXT11_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VML2VC0_VM_CONTEXT11_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VML2VC0_VM_CONTEXT11_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VML2VC0_VM_CONTEXT11_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VML2VC0_VM_CONTEXT11_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VML2VC0_VM_CONTEXT11_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VML2VC0_VM_CONTEXT11_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VML2VC0_VM_CONTEXT11_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VML2VC0_VM_CONTEXT11_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VML2VC0_VM_CONTEXT11_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VML2VC0_VM_CONTEXT11_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VML2VC0_VM_CONTEXT11_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VML2VC0_VM_CONTEXT11_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VML2VC0_VM_CONTEXT11_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VML2VC0_VM_CONTEXT11_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VML2VC0_VM_CONTEXT11_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VML2VC0_VM_CONTEXT11_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VML2VC0_VM_CONTEXT11_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VML2VC0_VM_CONTEXT11_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VML2VC0_VM_CONTEXT12_CNTL
+#define VML2VC0_VM_CONTEXT12_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT12_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VML2VC0_VM_CONTEXT12_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VML2VC0_VM_CONTEXT12_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VML2VC0_VM_CONTEXT12_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VML2VC0_VM_CONTEXT12_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VML2VC0_VM_CONTEXT12_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VML2VC0_VM_CONTEXT12_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VML2VC0_VM_CONTEXT12_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VML2VC0_VM_CONTEXT12_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VML2VC0_VM_CONTEXT12_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VML2VC0_VM_CONTEXT12_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VML2VC0_VM_CONTEXT12_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VML2VC0_VM_CONTEXT12_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VML2VC0_VM_CONTEXT12_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VML2VC0_VM_CONTEXT12_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VML2VC0_VM_CONTEXT12_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VML2VC0_VM_CONTEXT12_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VML2VC0_VM_CONTEXT12_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VML2VC0_VM_CONTEXT12_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VML2VC0_VM_CONTEXT12_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VML2VC0_VM_CONTEXT12_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VML2VC0_VM_CONTEXT12_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VML2VC0_VM_CONTEXT12_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VML2VC0_VM_CONTEXT12_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VML2VC0_VM_CONTEXT12_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VML2VC0_VM_CONTEXT12_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VML2VC0_VM_CONTEXT12_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VML2VC0_VM_CONTEXT12_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VML2VC0_VM_CONTEXT12_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VML2VC0_VM_CONTEXT12_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VML2VC0_VM_CONTEXT12_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VML2VC0_VM_CONTEXT12_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VML2VC0_VM_CONTEXT12_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VML2VC0_VM_CONTEXT12_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VML2VC0_VM_CONTEXT12_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VML2VC0_VM_CONTEXT12_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VML2VC0_VM_CONTEXT12_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VML2VC0_VM_CONTEXT13_CNTL
+#define VML2VC0_VM_CONTEXT13_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT13_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VML2VC0_VM_CONTEXT13_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VML2VC0_VM_CONTEXT13_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VML2VC0_VM_CONTEXT13_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VML2VC0_VM_CONTEXT13_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VML2VC0_VM_CONTEXT13_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VML2VC0_VM_CONTEXT13_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VML2VC0_VM_CONTEXT13_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VML2VC0_VM_CONTEXT13_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VML2VC0_VM_CONTEXT13_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VML2VC0_VM_CONTEXT13_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VML2VC0_VM_CONTEXT13_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VML2VC0_VM_CONTEXT13_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VML2VC0_VM_CONTEXT13_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VML2VC0_VM_CONTEXT13_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VML2VC0_VM_CONTEXT13_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VML2VC0_VM_CONTEXT13_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VML2VC0_VM_CONTEXT13_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VML2VC0_VM_CONTEXT13_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VML2VC0_VM_CONTEXT13_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VML2VC0_VM_CONTEXT13_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VML2VC0_VM_CONTEXT13_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VML2VC0_VM_CONTEXT13_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VML2VC0_VM_CONTEXT13_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VML2VC0_VM_CONTEXT13_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VML2VC0_VM_CONTEXT13_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VML2VC0_VM_CONTEXT13_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VML2VC0_VM_CONTEXT13_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VML2VC0_VM_CONTEXT13_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VML2VC0_VM_CONTEXT13_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VML2VC0_VM_CONTEXT13_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VML2VC0_VM_CONTEXT13_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VML2VC0_VM_CONTEXT13_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VML2VC0_VM_CONTEXT13_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VML2VC0_VM_CONTEXT13_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VML2VC0_VM_CONTEXT13_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VML2VC0_VM_CONTEXT13_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VML2VC0_VM_CONTEXT14_CNTL
+#define VML2VC0_VM_CONTEXT14_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT14_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VML2VC0_VM_CONTEXT14_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VML2VC0_VM_CONTEXT14_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VML2VC0_VM_CONTEXT14_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VML2VC0_VM_CONTEXT14_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VML2VC0_VM_CONTEXT14_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VML2VC0_VM_CONTEXT14_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VML2VC0_VM_CONTEXT14_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VML2VC0_VM_CONTEXT14_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VML2VC0_VM_CONTEXT14_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VML2VC0_VM_CONTEXT14_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VML2VC0_VM_CONTEXT14_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VML2VC0_VM_CONTEXT14_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VML2VC0_VM_CONTEXT14_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VML2VC0_VM_CONTEXT14_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VML2VC0_VM_CONTEXT14_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VML2VC0_VM_CONTEXT14_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VML2VC0_VM_CONTEXT14_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VML2VC0_VM_CONTEXT14_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VML2VC0_VM_CONTEXT14_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VML2VC0_VM_CONTEXT14_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VML2VC0_VM_CONTEXT14_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VML2VC0_VM_CONTEXT14_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VML2VC0_VM_CONTEXT14_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VML2VC0_VM_CONTEXT14_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VML2VC0_VM_CONTEXT14_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VML2VC0_VM_CONTEXT14_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VML2VC0_VM_CONTEXT14_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VML2VC0_VM_CONTEXT14_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VML2VC0_VM_CONTEXT14_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VML2VC0_VM_CONTEXT14_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VML2VC0_VM_CONTEXT14_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VML2VC0_VM_CONTEXT14_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VML2VC0_VM_CONTEXT14_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VML2VC0_VM_CONTEXT14_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VML2VC0_VM_CONTEXT14_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VML2VC0_VM_CONTEXT14_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VML2VC0_VM_CONTEXT15_CNTL
+#define VML2VC0_VM_CONTEXT15_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT15_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VML2VC0_VM_CONTEXT15_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VML2VC0_VM_CONTEXT15_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VML2VC0_VM_CONTEXT15_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VML2VC0_VM_CONTEXT15_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VML2VC0_VM_CONTEXT15_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VML2VC0_VM_CONTEXT15_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VML2VC0_VM_CONTEXT15_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VML2VC0_VM_CONTEXT15_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VML2VC0_VM_CONTEXT15_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VML2VC0_VM_CONTEXT15_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VML2VC0_VM_CONTEXT15_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VML2VC0_VM_CONTEXT15_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VML2VC0_VM_CONTEXT15_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VML2VC0_VM_CONTEXT15_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VML2VC0_VM_CONTEXT15_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VML2VC0_VM_CONTEXT15_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VML2VC0_VM_CONTEXT15_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VML2VC0_VM_CONTEXT15_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VML2VC0_VM_CONTEXT15_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VML2VC0_VM_CONTEXT15_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VML2VC0_VM_CONTEXT15_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VML2VC0_VM_CONTEXT15_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VML2VC0_VM_CONTEXT15_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VML2VC0_VM_CONTEXT15_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VML2VC0_VM_CONTEXT15_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VML2VC0_VM_CONTEXT15_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VML2VC0_VM_CONTEXT15_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VML2VC0_VM_CONTEXT15_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VML2VC0_VM_CONTEXT15_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VML2VC0_VM_CONTEXT15_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VML2VC0_VM_CONTEXT15_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VML2VC0_VM_CONTEXT15_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VML2VC0_VM_CONTEXT15_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VML2VC0_VM_CONTEXT15_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VML2VC0_VM_CONTEXT15_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VML2VC0_VM_CONTEXT15_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VML2VC0_VM_CONTEXTS_DISABLE
+#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_0__SHIFT 0x0
+#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_1__SHIFT 0x1
+#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_2__SHIFT 0x2
+#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_3__SHIFT 0x3
+#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_4__SHIFT 0x4
+#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_5__SHIFT 0x5
+#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_6__SHIFT 0x6
+#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_7__SHIFT 0x7
+#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_8__SHIFT 0x8
+#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_9__SHIFT 0x9
+#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_10__SHIFT 0xa
+#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_11__SHIFT 0xb
+#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_12__SHIFT 0xc
+#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_13__SHIFT 0xd
+#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_14__SHIFT 0xe
+#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_15__SHIFT 0xf
+#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_0_MASK 0x00000001L
+#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_1_MASK 0x00000002L
+#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_2_MASK 0x00000004L
+#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_3_MASK 0x00000008L
+#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_4_MASK 0x00000010L
+#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_5_MASK 0x00000020L
+#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_6_MASK 0x00000040L
+#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_7_MASK 0x00000080L
+#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_8_MASK 0x00000100L
+#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_9_MASK 0x00000200L
+#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_10_MASK 0x00000400L
+#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_11_MASK 0x00000800L
+#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_12_MASK 0x00001000L
+#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_13_MASK 0x00002000L
+#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_14_MASK 0x00004000L
+#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_15_MASK 0x00008000L
+//VML2VC0_VM_INVALIDATE_ENG0_SEM
+#define VML2VC0_VM_INVALIDATE_ENG0_SEM__SEMAPHORE__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG0_SEM__SEMAPHORE_MASK 0x00000001L
+//VML2VC0_VM_INVALIDATE_ENG1_SEM
+#define VML2VC0_VM_INVALIDATE_ENG1_SEM__SEMAPHORE__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG1_SEM__SEMAPHORE_MASK 0x00000001L
+//VML2VC0_VM_INVALIDATE_ENG2_SEM
+#define VML2VC0_VM_INVALIDATE_ENG2_SEM__SEMAPHORE__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG2_SEM__SEMAPHORE_MASK 0x00000001L
+//VML2VC0_VM_INVALIDATE_ENG3_SEM
+#define VML2VC0_VM_INVALIDATE_ENG3_SEM__SEMAPHORE__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG3_SEM__SEMAPHORE_MASK 0x00000001L
+//VML2VC0_VM_INVALIDATE_ENG4_SEM
+#define VML2VC0_VM_INVALIDATE_ENG4_SEM__SEMAPHORE__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG4_SEM__SEMAPHORE_MASK 0x00000001L
+//VML2VC0_VM_INVALIDATE_ENG5_SEM
+#define VML2VC0_VM_INVALIDATE_ENG5_SEM__SEMAPHORE__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG5_SEM__SEMAPHORE_MASK 0x00000001L
+//VML2VC0_VM_INVALIDATE_ENG6_SEM
+#define VML2VC0_VM_INVALIDATE_ENG6_SEM__SEMAPHORE__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG6_SEM__SEMAPHORE_MASK 0x00000001L
+//VML2VC0_VM_INVALIDATE_ENG7_SEM
+#define VML2VC0_VM_INVALIDATE_ENG7_SEM__SEMAPHORE__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG7_SEM__SEMAPHORE_MASK 0x00000001L
+//VML2VC0_VM_INVALIDATE_ENG8_SEM
+#define VML2VC0_VM_INVALIDATE_ENG8_SEM__SEMAPHORE__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG8_SEM__SEMAPHORE_MASK 0x00000001L
+//VML2VC0_VM_INVALIDATE_ENG9_SEM
+#define VML2VC0_VM_INVALIDATE_ENG9_SEM__SEMAPHORE__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG9_SEM__SEMAPHORE_MASK 0x00000001L
+//VML2VC0_VM_INVALIDATE_ENG10_SEM
+#define VML2VC0_VM_INVALIDATE_ENG10_SEM__SEMAPHORE__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG10_SEM__SEMAPHORE_MASK 0x00000001L
+//VML2VC0_VM_INVALIDATE_ENG11_SEM
+#define VML2VC0_VM_INVALIDATE_ENG11_SEM__SEMAPHORE__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG11_SEM__SEMAPHORE_MASK 0x00000001L
+//VML2VC0_VM_INVALIDATE_ENG12_SEM
+#define VML2VC0_VM_INVALIDATE_ENG12_SEM__SEMAPHORE__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG12_SEM__SEMAPHORE_MASK 0x00000001L
+//VML2VC0_VM_INVALIDATE_ENG13_SEM
+#define VML2VC0_VM_INVALIDATE_ENG13_SEM__SEMAPHORE__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG13_SEM__SEMAPHORE_MASK 0x00000001L
+//VML2VC0_VM_INVALIDATE_ENG14_SEM
+#define VML2VC0_VM_INVALIDATE_ENG14_SEM__SEMAPHORE__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG14_SEM__SEMAPHORE_MASK 0x00000001L
+//VML2VC0_VM_INVALIDATE_ENG15_SEM
+#define VML2VC0_VM_INVALIDATE_ENG15_SEM__SEMAPHORE__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG15_SEM__SEMAPHORE_MASK 0x00000001L
+//VML2VC0_VM_INVALIDATE_ENG16_SEM
+#define VML2VC0_VM_INVALIDATE_ENG16_SEM__SEMAPHORE__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG16_SEM__SEMAPHORE_MASK 0x00000001L
+//VML2VC0_VM_INVALIDATE_ENG17_SEM
+#define VML2VC0_VM_INVALIDATE_ENG17_SEM__SEMAPHORE__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG17_SEM__SEMAPHORE_MASK 0x00000001L
+//VML2VC0_VM_INVALIDATE_ENG0_REQ
+#define VML2VC0_VM_INVALIDATE_ENG0_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG0_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VML2VC0_VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VML2VC0_VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VML2VC0_VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VML2VC0_VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VML2VC0_VM_INVALIDATE_ENG0_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VML2VC0_VM_INVALIDATE_ENG0_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VML2VC0_VM_INVALIDATE_ENG0_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VML2VC0_VM_INVALIDATE_ENG0_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VML2VC0_VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VML2VC0_VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VML2VC0_VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VML2VC0_VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VML2VC0_VM_INVALIDATE_ENG0_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VML2VC0_VM_INVALIDATE_ENG0_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VML2VC0_VM_INVALIDATE_ENG1_REQ
+#define VML2VC0_VM_INVALIDATE_ENG1_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG1_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VML2VC0_VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VML2VC0_VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VML2VC0_VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VML2VC0_VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VML2VC0_VM_INVALIDATE_ENG1_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VML2VC0_VM_INVALIDATE_ENG1_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VML2VC0_VM_INVALIDATE_ENG1_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VML2VC0_VM_INVALIDATE_ENG1_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VML2VC0_VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VML2VC0_VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VML2VC0_VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VML2VC0_VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VML2VC0_VM_INVALIDATE_ENG1_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VML2VC0_VM_INVALIDATE_ENG1_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VML2VC0_VM_INVALIDATE_ENG2_REQ
+#define VML2VC0_VM_INVALIDATE_ENG2_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG2_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VML2VC0_VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VML2VC0_VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VML2VC0_VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VML2VC0_VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VML2VC0_VM_INVALIDATE_ENG2_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VML2VC0_VM_INVALIDATE_ENG2_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VML2VC0_VM_INVALIDATE_ENG2_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VML2VC0_VM_INVALIDATE_ENG2_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VML2VC0_VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VML2VC0_VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VML2VC0_VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VML2VC0_VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VML2VC0_VM_INVALIDATE_ENG2_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VML2VC0_VM_INVALIDATE_ENG2_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VML2VC0_VM_INVALIDATE_ENG3_REQ
+#define VML2VC0_VM_INVALIDATE_ENG3_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG3_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VML2VC0_VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VML2VC0_VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VML2VC0_VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VML2VC0_VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VML2VC0_VM_INVALIDATE_ENG3_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VML2VC0_VM_INVALIDATE_ENG3_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VML2VC0_VM_INVALIDATE_ENG3_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VML2VC0_VM_INVALIDATE_ENG3_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VML2VC0_VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VML2VC0_VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VML2VC0_VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VML2VC0_VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VML2VC0_VM_INVALIDATE_ENG3_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VML2VC0_VM_INVALIDATE_ENG3_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VML2VC0_VM_INVALIDATE_ENG4_REQ
+#define VML2VC0_VM_INVALIDATE_ENG4_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG4_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VML2VC0_VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VML2VC0_VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VML2VC0_VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VML2VC0_VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VML2VC0_VM_INVALIDATE_ENG4_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VML2VC0_VM_INVALIDATE_ENG4_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VML2VC0_VM_INVALIDATE_ENG4_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VML2VC0_VM_INVALIDATE_ENG4_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VML2VC0_VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VML2VC0_VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VML2VC0_VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VML2VC0_VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VML2VC0_VM_INVALIDATE_ENG4_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VML2VC0_VM_INVALIDATE_ENG4_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VML2VC0_VM_INVALIDATE_ENG5_REQ
+#define VML2VC0_VM_INVALIDATE_ENG5_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG5_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VML2VC0_VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VML2VC0_VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VML2VC0_VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VML2VC0_VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VML2VC0_VM_INVALIDATE_ENG5_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VML2VC0_VM_INVALIDATE_ENG5_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VML2VC0_VM_INVALIDATE_ENG5_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VML2VC0_VM_INVALIDATE_ENG5_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VML2VC0_VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VML2VC0_VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VML2VC0_VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VML2VC0_VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VML2VC0_VM_INVALIDATE_ENG5_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VML2VC0_VM_INVALIDATE_ENG5_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VML2VC0_VM_INVALIDATE_ENG6_REQ
+#define VML2VC0_VM_INVALIDATE_ENG6_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG6_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VML2VC0_VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VML2VC0_VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VML2VC0_VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VML2VC0_VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VML2VC0_VM_INVALIDATE_ENG6_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VML2VC0_VM_INVALIDATE_ENG6_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VML2VC0_VM_INVALIDATE_ENG6_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VML2VC0_VM_INVALIDATE_ENG6_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VML2VC0_VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VML2VC0_VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VML2VC0_VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VML2VC0_VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VML2VC0_VM_INVALIDATE_ENG6_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VML2VC0_VM_INVALIDATE_ENG6_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VML2VC0_VM_INVALIDATE_ENG7_REQ
+#define VML2VC0_VM_INVALIDATE_ENG7_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG7_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VML2VC0_VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VML2VC0_VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VML2VC0_VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VML2VC0_VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VML2VC0_VM_INVALIDATE_ENG7_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VML2VC0_VM_INVALIDATE_ENG7_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VML2VC0_VM_INVALIDATE_ENG7_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VML2VC0_VM_INVALIDATE_ENG7_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VML2VC0_VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VML2VC0_VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VML2VC0_VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VML2VC0_VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VML2VC0_VM_INVALIDATE_ENG7_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VML2VC0_VM_INVALIDATE_ENG7_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VML2VC0_VM_INVALIDATE_ENG8_REQ
+#define VML2VC0_VM_INVALIDATE_ENG8_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG8_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VML2VC0_VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VML2VC0_VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VML2VC0_VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VML2VC0_VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VML2VC0_VM_INVALIDATE_ENG8_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VML2VC0_VM_INVALIDATE_ENG8_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VML2VC0_VM_INVALIDATE_ENG8_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VML2VC0_VM_INVALIDATE_ENG8_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VML2VC0_VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VML2VC0_VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VML2VC0_VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VML2VC0_VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VML2VC0_VM_INVALIDATE_ENG8_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VML2VC0_VM_INVALIDATE_ENG8_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VML2VC0_VM_INVALIDATE_ENG9_REQ
+#define VML2VC0_VM_INVALIDATE_ENG9_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG9_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VML2VC0_VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VML2VC0_VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VML2VC0_VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VML2VC0_VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VML2VC0_VM_INVALIDATE_ENG9_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VML2VC0_VM_INVALIDATE_ENG9_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VML2VC0_VM_INVALIDATE_ENG9_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VML2VC0_VM_INVALIDATE_ENG9_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VML2VC0_VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VML2VC0_VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VML2VC0_VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VML2VC0_VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VML2VC0_VM_INVALIDATE_ENG9_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VML2VC0_VM_INVALIDATE_ENG9_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VML2VC0_VM_INVALIDATE_ENG10_REQ
+#define VML2VC0_VM_INVALIDATE_ENG10_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG10_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VML2VC0_VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VML2VC0_VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VML2VC0_VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VML2VC0_VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VML2VC0_VM_INVALIDATE_ENG10_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VML2VC0_VM_INVALIDATE_ENG10_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VML2VC0_VM_INVALIDATE_ENG10_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VML2VC0_VM_INVALIDATE_ENG10_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VML2VC0_VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VML2VC0_VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VML2VC0_VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VML2VC0_VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VML2VC0_VM_INVALIDATE_ENG10_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VML2VC0_VM_INVALIDATE_ENG10_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VML2VC0_VM_INVALIDATE_ENG11_REQ
+#define VML2VC0_VM_INVALIDATE_ENG11_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG11_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VML2VC0_VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VML2VC0_VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VML2VC0_VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VML2VC0_VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VML2VC0_VM_INVALIDATE_ENG11_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VML2VC0_VM_INVALIDATE_ENG11_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VML2VC0_VM_INVALIDATE_ENG11_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VML2VC0_VM_INVALIDATE_ENG11_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VML2VC0_VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VML2VC0_VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VML2VC0_VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VML2VC0_VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VML2VC0_VM_INVALIDATE_ENG11_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VML2VC0_VM_INVALIDATE_ENG11_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VML2VC0_VM_INVALIDATE_ENG12_REQ
+#define VML2VC0_VM_INVALIDATE_ENG12_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG12_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VML2VC0_VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VML2VC0_VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VML2VC0_VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VML2VC0_VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VML2VC0_VM_INVALIDATE_ENG12_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VML2VC0_VM_INVALIDATE_ENG12_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VML2VC0_VM_INVALIDATE_ENG12_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VML2VC0_VM_INVALIDATE_ENG12_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VML2VC0_VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VML2VC0_VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VML2VC0_VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VML2VC0_VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VML2VC0_VM_INVALIDATE_ENG12_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VML2VC0_VM_INVALIDATE_ENG12_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VML2VC0_VM_INVALIDATE_ENG13_REQ
+#define VML2VC0_VM_INVALIDATE_ENG13_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG13_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VML2VC0_VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VML2VC0_VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VML2VC0_VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VML2VC0_VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VML2VC0_VM_INVALIDATE_ENG13_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VML2VC0_VM_INVALIDATE_ENG13_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VML2VC0_VM_INVALIDATE_ENG13_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VML2VC0_VM_INVALIDATE_ENG13_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VML2VC0_VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VML2VC0_VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VML2VC0_VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VML2VC0_VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VML2VC0_VM_INVALIDATE_ENG13_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VML2VC0_VM_INVALIDATE_ENG13_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VML2VC0_VM_INVALIDATE_ENG14_REQ
+#define VML2VC0_VM_INVALIDATE_ENG14_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG14_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VML2VC0_VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VML2VC0_VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VML2VC0_VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VML2VC0_VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VML2VC0_VM_INVALIDATE_ENG14_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VML2VC0_VM_INVALIDATE_ENG14_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VML2VC0_VM_INVALIDATE_ENG14_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VML2VC0_VM_INVALIDATE_ENG14_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VML2VC0_VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VML2VC0_VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VML2VC0_VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VML2VC0_VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VML2VC0_VM_INVALIDATE_ENG14_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VML2VC0_VM_INVALIDATE_ENG14_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VML2VC0_VM_INVALIDATE_ENG15_REQ
+#define VML2VC0_VM_INVALIDATE_ENG15_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG15_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VML2VC0_VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VML2VC0_VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VML2VC0_VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VML2VC0_VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VML2VC0_VM_INVALIDATE_ENG15_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VML2VC0_VM_INVALIDATE_ENG15_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VML2VC0_VM_INVALIDATE_ENG15_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VML2VC0_VM_INVALIDATE_ENG15_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VML2VC0_VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VML2VC0_VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VML2VC0_VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VML2VC0_VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VML2VC0_VM_INVALIDATE_ENG15_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VML2VC0_VM_INVALIDATE_ENG15_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VML2VC0_VM_INVALIDATE_ENG16_REQ
+#define VML2VC0_VM_INVALIDATE_ENG16_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG16_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VML2VC0_VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VML2VC0_VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VML2VC0_VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VML2VC0_VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VML2VC0_VM_INVALIDATE_ENG16_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VML2VC0_VM_INVALIDATE_ENG16_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VML2VC0_VM_INVALIDATE_ENG16_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VML2VC0_VM_INVALIDATE_ENG16_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VML2VC0_VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VML2VC0_VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VML2VC0_VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VML2VC0_VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VML2VC0_VM_INVALIDATE_ENG16_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VML2VC0_VM_INVALIDATE_ENG16_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VML2VC0_VM_INVALIDATE_ENG17_REQ
+#define VML2VC0_VM_INVALIDATE_ENG17_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG17_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VML2VC0_VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VML2VC0_VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VML2VC0_VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VML2VC0_VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VML2VC0_VM_INVALIDATE_ENG17_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VML2VC0_VM_INVALIDATE_ENG17_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VML2VC0_VM_INVALIDATE_ENG17_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VML2VC0_VM_INVALIDATE_ENG17_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VML2VC0_VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VML2VC0_VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VML2VC0_VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VML2VC0_VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VML2VC0_VM_INVALIDATE_ENG17_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VML2VC0_VM_INVALIDATE_ENG17_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VML2VC0_VM_INVALIDATE_ENG0_ACK
+#define VML2VC0_VM_INVALIDATE_ENG0_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG0_ACK__SEMAPHORE__SHIFT 0x10
+#define VML2VC0_VM_INVALIDATE_ENG0_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VML2VC0_VM_INVALIDATE_ENG0_ACK__SEMAPHORE_MASK 0x00010000L
+//VML2VC0_VM_INVALIDATE_ENG1_ACK
+#define VML2VC0_VM_INVALIDATE_ENG1_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG1_ACK__SEMAPHORE__SHIFT 0x10
+#define VML2VC0_VM_INVALIDATE_ENG1_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VML2VC0_VM_INVALIDATE_ENG1_ACK__SEMAPHORE_MASK 0x00010000L
+//VML2VC0_VM_INVALIDATE_ENG2_ACK
+#define VML2VC0_VM_INVALIDATE_ENG2_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG2_ACK__SEMAPHORE__SHIFT 0x10
+#define VML2VC0_VM_INVALIDATE_ENG2_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VML2VC0_VM_INVALIDATE_ENG2_ACK__SEMAPHORE_MASK 0x00010000L
+//VML2VC0_VM_INVALIDATE_ENG3_ACK
+#define VML2VC0_VM_INVALIDATE_ENG3_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG3_ACK__SEMAPHORE__SHIFT 0x10
+#define VML2VC0_VM_INVALIDATE_ENG3_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VML2VC0_VM_INVALIDATE_ENG3_ACK__SEMAPHORE_MASK 0x00010000L
+//VML2VC0_VM_INVALIDATE_ENG4_ACK
+#define VML2VC0_VM_INVALIDATE_ENG4_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG4_ACK__SEMAPHORE__SHIFT 0x10
+#define VML2VC0_VM_INVALIDATE_ENG4_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VML2VC0_VM_INVALIDATE_ENG4_ACK__SEMAPHORE_MASK 0x00010000L
+//VML2VC0_VM_INVALIDATE_ENG5_ACK
+#define VML2VC0_VM_INVALIDATE_ENG5_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG5_ACK__SEMAPHORE__SHIFT 0x10
+#define VML2VC0_VM_INVALIDATE_ENG5_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VML2VC0_VM_INVALIDATE_ENG5_ACK__SEMAPHORE_MASK 0x00010000L
+//VML2VC0_VM_INVALIDATE_ENG6_ACK
+#define VML2VC0_VM_INVALIDATE_ENG6_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG6_ACK__SEMAPHORE__SHIFT 0x10
+#define VML2VC0_VM_INVALIDATE_ENG6_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VML2VC0_VM_INVALIDATE_ENG6_ACK__SEMAPHORE_MASK 0x00010000L
+//VML2VC0_VM_INVALIDATE_ENG7_ACK
+#define VML2VC0_VM_INVALIDATE_ENG7_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG7_ACK__SEMAPHORE__SHIFT 0x10
+#define VML2VC0_VM_INVALIDATE_ENG7_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VML2VC0_VM_INVALIDATE_ENG7_ACK__SEMAPHORE_MASK 0x00010000L
+//VML2VC0_VM_INVALIDATE_ENG8_ACK
+#define VML2VC0_VM_INVALIDATE_ENG8_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG8_ACK__SEMAPHORE__SHIFT 0x10
+#define VML2VC0_VM_INVALIDATE_ENG8_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VML2VC0_VM_INVALIDATE_ENG8_ACK__SEMAPHORE_MASK 0x00010000L
+//VML2VC0_VM_INVALIDATE_ENG9_ACK
+#define VML2VC0_VM_INVALIDATE_ENG9_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG9_ACK__SEMAPHORE__SHIFT 0x10
+#define VML2VC0_VM_INVALIDATE_ENG9_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VML2VC0_VM_INVALIDATE_ENG9_ACK__SEMAPHORE_MASK 0x00010000L
+//VML2VC0_VM_INVALIDATE_ENG10_ACK
+#define VML2VC0_VM_INVALIDATE_ENG10_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG10_ACK__SEMAPHORE__SHIFT 0x10
+#define VML2VC0_VM_INVALIDATE_ENG10_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VML2VC0_VM_INVALIDATE_ENG10_ACK__SEMAPHORE_MASK 0x00010000L
+//VML2VC0_VM_INVALIDATE_ENG11_ACK
+#define VML2VC0_VM_INVALIDATE_ENG11_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG11_ACK__SEMAPHORE__SHIFT 0x10
+#define VML2VC0_VM_INVALIDATE_ENG11_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VML2VC0_VM_INVALIDATE_ENG11_ACK__SEMAPHORE_MASK 0x00010000L
+//VML2VC0_VM_INVALIDATE_ENG12_ACK
+#define VML2VC0_VM_INVALIDATE_ENG12_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG12_ACK__SEMAPHORE__SHIFT 0x10
+#define VML2VC0_VM_INVALIDATE_ENG12_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VML2VC0_VM_INVALIDATE_ENG12_ACK__SEMAPHORE_MASK 0x00010000L
+//VML2VC0_VM_INVALIDATE_ENG13_ACK
+#define VML2VC0_VM_INVALIDATE_ENG13_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG13_ACK__SEMAPHORE__SHIFT 0x10
+#define VML2VC0_VM_INVALIDATE_ENG13_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VML2VC0_VM_INVALIDATE_ENG13_ACK__SEMAPHORE_MASK 0x00010000L
+//VML2VC0_VM_INVALIDATE_ENG14_ACK
+#define VML2VC0_VM_INVALIDATE_ENG14_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG14_ACK__SEMAPHORE__SHIFT 0x10
+#define VML2VC0_VM_INVALIDATE_ENG14_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VML2VC0_VM_INVALIDATE_ENG14_ACK__SEMAPHORE_MASK 0x00010000L
+//VML2VC0_VM_INVALIDATE_ENG15_ACK
+#define VML2VC0_VM_INVALIDATE_ENG15_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG15_ACK__SEMAPHORE__SHIFT 0x10
+#define VML2VC0_VM_INVALIDATE_ENG15_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VML2VC0_VM_INVALIDATE_ENG15_ACK__SEMAPHORE_MASK 0x00010000L
+//VML2VC0_VM_INVALIDATE_ENG16_ACK
+#define VML2VC0_VM_INVALIDATE_ENG16_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG16_ACK__SEMAPHORE__SHIFT 0x10
+#define VML2VC0_VM_INVALIDATE_ENG16_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VML2VC0_VM_INVALIDATE_ENG16_ACK__SEMAPHORE_MASK 0x00010000L
+//VML2VC0_VM_INVALIDATE_ENG17_ACK
+#define VML2VC0_VM_INVALIDATE_ENG17_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG17_ACK__SEMAPHORE__SHIFT 0x10
+#define VML2VC0_VM_INVALIDATE_ENG17_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VML2VC0_VM_INVALIDATE_ENG17_ACK__SEMAPHORE_MASK 0x00010000L
+//VML2VC0_VM_INVALIDATE_ENG0_ADDR_RANGE_LO32
+#define VML2VC0_VM_INVALIDATE_ENG0_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG0_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VML2VC0_VM_INVALIDATE_ENG0_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VML2VC0_VM_INVALIDATE_ENG0_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VML2VC0_VM_INVALIDATE_ENG0_ADDR_RANGE_HI32
+#define VML2VC0_VM_INVALIDATE_ENG0_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG0_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VML2VC0_VM_INVALIDATE_ENG1_ADDR_RANGE_LO32
+#define VML2VC0_VM_INVALIDATE_ENG1_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG1_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VML2VC0_VM_INVALIDATE_ENG1_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VML2VC0_VM_INVALIDATE_ENG1_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VML2VC0_VM_INVALIDATE_ENG1_ADDR_RANGE_HI32
+#define VML2VC0_VM_INVALIDATE_ENG1_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG1_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VML2VC0_VM_INVALIDATE_ENG2_ADDR_RANGE_LO32
+#define VML2VC0_VM_INVALIDATE_ENG2_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG2_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VML2VC0_VM_INVALIDATE_ENG2_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VML2VC0_VM_INVALIDATE_ENG2_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VML2VC0_VM_INVALIDATE_ENG2_ADDR_RANGE_HI32
+#define VML2VC0_VM_INVALIDATE_ENG2_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG2_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VML2VC0_VM_INVALIDATE_ENG3_ADDR_RANGE_LO32
+#define VML2VC0_VM_INVALIDATE_ENG3_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG3_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VML2VC0_VM_INVALIDATE_ENG3_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VML2VC0_VM_INVALIDATE_ENG3_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VML2VC0_VM_INVALIDATE_ENG3_ADDR_RANGE_HI32
+#define VML2VC0_VM_INVALIDATE_ENG3_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG3_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VML2VC0_VM_INVALIDATE_ENG4_ADDR_RANGE_LO32
+#define VML2VC0_VM_INVALIDATE_ENG4_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG4_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VML2VC0_VM_INVALIDATE_ENG4_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VML2VC0_VM_INVALIDATE_ENG4_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VML2VC0_VM_INVALIDATE_ENG4_ADDR_RANGE_HI32
+#define VML2VC0_VM_INVALIDATE_ENG4_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG4_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VML2VC0_VM_INVALIDATE_ENG5_ADDR_RANGE_LO32
+#define VML2VC0_VM_INVALIDATE_ENG5_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG5_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VML2VC0_VM_INVALIDATE_ENG5_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VML2VC0_VM_INVALIDATE_ENG5_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VML2VC0_VM_INVALIDATE_ENG5_ADDR_RANGE_HI32
+#define VML2VC0_VM_INVALIDATE_ENG5_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG5_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VML2VC0_VM_INVALIDATE_ENG6_ADDR_RANGE_LO32
+#define VML2VC0_VM_INVALIDATE_ENG6_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG6_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VML2VC0_VM_INVALIDATE_ENG6_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VML2VC0_VM_INVALIDATE_ENG6_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VML2VC0_VM_INVALIDATE_ENG6_ADDR_RANGE_HI32
+#define VML2VC0_VM_INVALIDATE_ENG6_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG6_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VML2VC0_VM_INVALIDATE_ENG7_ADDR_RANGE_LO32
+#define VML2VC0_VM_INVALIDATE_ENG7_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG7_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VML2VC0_VM_INVALIDATE_ENG7_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VML2VC0_VM_INVALIDATE_ENG7_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VML2VC0_VM_INVALIDATE_ENG7_ADDR_RANGE_HI32
+#define VML2VC0_VM_INVALIDATE_ENG7_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG7_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VML2VC0_VM_INVALIDATE_ENG8_ADDR_RANGE_LO32
+#define VML2VC0_VM_INVALIDATE_ENG8_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG8_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VML2VC0_VM_INVALIDATE_ENG8_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VML2VC0_VM_INVALIDATE_ENG8_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VML2VC0_VM_INVALIDATE_ENG8_ADDR_RANGE_HI32
+#define VML2VC0_VM_INVALIDATE_ENG8_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG8_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VML2VC0_VM_INVALIDATE_ENG9_ADDR_RANGE_LO32
+#define VML2VC0_VM_INVALIDATE_ENG9_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG9_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VML2VC0_VM_INVALIDATE_ENG9_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VML2VC0_VM_INVALIDATE_ENG9_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VML2VC0_VM_INVALIDATE_ENG9_ADDR_RANGE_HI32
+#define VML2VC0_VM_INVALIDATE_ENG9_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG9_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VML2VC0_VM_INVALIDATE_ENG10_ADDR_RANGE_LO32
+#define VML2VC0_VM_INVALIDATE_ENG10_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG10_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VML2VC0_VM_INVALIDATE_ENG10_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VML2VC0_VM_INVALIDATE_ENG10_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VML2VC0_VM_INVALIDATE_ENG10_ADDR_RANGE_HI32
+#define VML2VC0_VM_INVALIDATE_ENG10_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG10_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VML2VC0_VM_INVALIDATE_ENG11_ADDR_RANGE_LO32
+#define VML2VC0_VM_INVALIDATE_ENG11_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG11_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VML2VC0_VM_INVALIDATE_ENG11_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VML2VC0_VM_INVALIDATE_ENG11_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VML2VC0_VM_INVALIDATE_ENG11_ADDR_RANGE_HI32
+#define VML2VC0_VM_INVALIDATE_ENG11_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG11_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VML2VC0_VM_INVALIDATE_ENG12_ADDR_RANGE_LO32
+#define VML2VC0_VM_INVALIDATE_ENG12_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG12_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VML2VC0_VM_INVALIDATE_ENG12_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VML2VC0_VM_INVALIDATE_ENG12_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VML2VC0_VM_INVALIDATE_ENG12_ADDR_RANGE_HI32
+#define VML2VC0_VM_INVALIDATE_ENG12_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG12_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VML2VC0_VM_INVALIDATE_ENG13_ADDR_RANGE_LO32
+#define VML2VC0_VM_INVALIDATE_ENG13_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG13_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VML2VC0_VM_INVALIDATE_ENG13_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VML2VC0_VM_INVALIDATE_ENG13_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VML2VC0_VM_INVALIDATE_ENG13_ADDR_RANGE_HI32
+#define VML2VC0_VM_INVALIDATE_ENG13_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG13_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VML2VC0_VM_INVALIDATE_ENG14_ADDR_RANGE_LO32
+#define VML2VC0_VM_INVALIDATE_ENG14_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG14_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VML2VC0_VM_INVALIDATE_ENG14_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VML2VC0_VM_INVALIDATE_ENG14_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VML2VC0_VM_INVALIDATE_ENG14_ADDR_RANGE_HI32
+#define VML2VC0_VM_INVALIDATE_ENG14_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG14_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VML2VC0_VM_INVALIDATE_ENG15_ADDR_RANGE_LO32
+#define VML2VC0_VM_INVALIDATE_ENG15_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG15_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VML2VC0_VM_INVALIDATE_ENG15_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VML2VC0_VM_INVALIDATE_ENG15_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VML2VC0_VM_INVALIDATE_ENG15_ADDR_RANGE_HI32
+#define VML2VC0_VM_INVALIDATE_ENG15_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG15_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VML2VC0_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32
+#define VML2VC0_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VML2VC0_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VML2VC0_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VML2VC0_VM_INVALIDATE_ENG16_ADDR_RANGE_HI32
+#define VML2VC0_VM_INVALIDATE_ENG16_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG16_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VML2VC0_VM_INVALIDATE_ENG17_ADDR_RANGE_LO32
+#define VML2VC0_VM_INVALIDATE_ENG17_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG17_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VML2VC0_VM_INVALIDATE_ENG17_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VML2VC0_VM_INVALIDATE_ENG17_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VML2VC0_VM_INVALIDATE_ENG17_ADDR_RANGE_HI32
+#define VML2VC0_VM_INVALIDATE_ENG17_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VML2VC0_VM_INVALIDATE_ENG17_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32
+#define VML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32
+#define VML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32
+#define VML2VC0_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32
+#define VML2VC0_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32
+#define VML2VC0_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32
+#define VML2VC0_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32
+#define VML2VC0_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32
+#define VML2VC0_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32
+#define VML2VC0_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32
+#define VML2VC0_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32
+#define VML2VC0_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32
+#define VML2VC0_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32
+#define VML2VC0_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32
+#define VML2VC0_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32
+#define VML2VC0_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32
+#define VML2VC0_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32
+#define VML2VC0_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32
+#define VML2VC0_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32
+#define VML2VC0_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32
+#define VML2VC0_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32
+#define VML2VC0_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32
+#define VML2VC0_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32
+#define VML2VC0_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32
+#define VML2VC0_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32
+#define VML2VC0_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32
+#define VML2VC0_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32
+#define VML2VC0_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32
+#define VML2VC0_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32
+#define VML2VC0_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32
+#define VML2VC0_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32
+#define VML2VC0_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32
+#define VML2VC0_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32
+#define VML2VC0_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32
+#define VML2VC0_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2VC0_VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32
+#define VML2VC0_VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32
+#define VML2VC0_VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2VC0_VM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32
+#define VML2VC0_VM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32
+#define VML2VC0_VM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2VC0_VM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32
+#define VML2VC0_VM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32
+#define VML2VC0_VM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2VC0_VM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32
+#define VML2VC0_VM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32
+#define VML2VC0_VM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2VC0_VM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32
+#define VML2VC0_VM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32
+#define VML2VC0_VM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2VC0_VM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32
+#define VML2VC0_VM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32
+#define VML2VC0_VM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2VC0_VM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32
+#define VML2VC0_VM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32
+#define VML2VC0_VM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2VC0_VM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32
+#define VML2VC0_VM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32
+#define VML2VC0_VM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2VC0_VM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32
+#define VML2VC0_VM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32
+#define VML2VC0_VM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2VC0_VM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32
+#define VML2VC0_VM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32
+#define VML2VC0_VM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2VC0_VM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32
+#define VML2VC0_VM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32
+#define VML2VC0_VM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2VC0_VM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32
+#define VML2VC0_VM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32
+#define VML2VC0_VM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2VC0_VM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32
+#define VML2VC0_VM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32
+#define VML2VC0_VM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2VC0_VM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32
+#define VML2VC0_VM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32
+#define VML2VC0_VM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2VC0_VM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32
+#define VML2VC0_VM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32
+#define VML2VC0_VM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2VC0_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32
+#define VML2VC0_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32
+#define VML2VC0_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2VC0_VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32
+#define VML2VC0_VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32
+#define VML2VC0_VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2VC0_VM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32
+#define VML2VC0_VM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32
+#define VML2VC0_VM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2VC0_VM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32
+#define VML2VC0_VM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32
+#define VML2VC0_VM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2VC0_VM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32
+#define VML2VC0_VM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32
+#define VML2VC0_VM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2VC0_VM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32
+#define VML2VC0_VM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32
+#define VML2VC0_VM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2VC0_VM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32
+#define VML2VC0_VM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32
+#define VML2VC0_VM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2VC0_VM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32
+#define VML2VC0_VM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32
+#define VML2VC0_VM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2VC0_VM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32
+#define VML2VC0_VM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32
+#define VML2VC0_VM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2VC0_VM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32
+#define VML2VC0_VM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32
+#define VML2VC0_VM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2VC0_VM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32
+#define VML2VC0_VM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32
+#define VML2VC0_VM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2VC0_VM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32
+#define VML2VC0_VM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32
+#define VML2VC0_VM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2VC0_VM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32
+#define VML2VC0_VM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32
+#define VML2VC0_VM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2VC0_VM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32
+#define VML2VC0_VM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32
+#define VML2VC0_VM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2VC0_VM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32
+#define VML2VC0_VM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32
+#define VML2VC0_VM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2VC0_VM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32
+#define VML2VC0_VM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC0_VM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32
+#define VML2VC0_VM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC0_VM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+
+
+// addressBlock: mmhub_utcl2_vmsharedpfdec
+//VMSHAREDPF0_MC_VM_NB_MMIOBASE
+#define VMSHAREDPF0_MC_VM_NB_MMIOBASE__MMIOBASE__SHIFT 0x0
+#define VMSHAREDPF0_MC_VM_NB_MMIOBASE__MMIOBASE_MASK 0xFFFFFFFFL
+//VMSHAREDPF0_MC_VM_NB_MMIOLIMIT
+#define VMSHAREDPF0_MC_VM_NB_MMIOLIMIT__MMIOLIMIT__SHIFT 0x0
+#define VMSHAREDPF0_MC_VM_NB_MMIOLIMIT__MMIOLIMIT_MASK 0xFFFFFFFFL
+//VMSHAREDPF0_MC_VM_NB_PCI_CTRL
+#define VMSHAREDPF0_MC_VM_NB_PCI_CTRL__MMIOENABLE__SHIFT 0x17
+#define VMSHAREDPF0_MC_VM_NB_PCI_CTRL__MMIOENABLE_MASK 0x00800000L
+//VMSHAREDPF0_MC_VM_NB_PCI_ARB
+#define VMSHAREDPF0_MC_VM_NB_PCI_ARB__VGA_HOLE__SHIFT 0x3
+#define VMSHAREDPF0_MC_VM_NB_PCI_ARB__VGA_HOLE_MASK 0x00000008L
+//VMSHAREDPF0_MC_VM_NB_TOP_OF_DRAM_SLOT1
+#define VMSHAREDPF0_MC_VM_NB_TOP_OF_DRAM_SLOT1__TOP_OF_DRAM__SHIFT 0x17
+#define VMSHAREDPF0_MC_VM_NB_TOP_OF_DRAM_SLOT1__TOP_OF_DRAM_MASK 0xFF800000L
+//VMSHAREDPF0_MC_VM_NB_LOWER_TOP_OF_DRAM2
+#define VMSHAREDPF0_MC_VM_NB_LOWER_TOP_OF_DRAM2__ENABLE__SHIFT 0x0
+#define VMSHAREDPF0_MC_VM_NB_LOWER_TOP_OF_DRAM2__LOWER_TOM2__SHIFT 0x17
+#define VMSHAREDPF0_MC_VM_NB_LOWER_TOP_OF_DRAM2__ENABLE_MASK 0x00000001L
+#define VMSHAREDPF0_MC_VM_NB_LOWER_TOP_OF_DRAM2__LOWER_TOM2_MASK 0xFF800000L
+//VMSHAREDPF0_MC_VM_NB_UPPER_TOP_OF_DRAM2
+#define VMSHAREDPF0_MC_VM_NB_UPPER_TOP_OF_DRAM2__UPPER_TOM2__SHIFT 0x0
+#define VMSHAREDPF0_MC_VM_NB_UPPER_TOP_OF_DRAM2__UPPER_TOM2_MASK 0x00000FFFL
+//VMSHAREDPF0_MC_VM_FB_OFFSET
+#define VMSHAREDPF0_MC_VM_FB_OFFSET__FB_OFFSET__SHIFT 0x0
+#define VMSHAREDPF0_MC_VM_FB_OFFSET__FB_OFFSET_MASK 0x00FFFFFFL
+//VMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB
+#define VMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB__PHYSICAL_PAGE_NUMBER_LSB__SHIFT 0x0
+#define VMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB__PHYSICAL_PAGE_NUMBER_LSB_MASK 0xFFFFFFFFL
+//VMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB
+#define VMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB__PHYSICAL_PAGE_NUMBER_MSB__SHIFT 0x0
+#define VMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB__PHYSICAL_PAGE_NUMBER_MSB_MASK 0x0000000FL
+//VMSHAREDPF0_MC_VM_STEERING
+#define VMSHAREDPF0_MC_VM_STEERING__DEFAULT_STEERING__SHIFT 0x0
+#define VMSHAREDPF0_MC_VM_STEERING__DEFAULT_STEERING_MASK 0x00000003L
+//VMSHAREDPF0_MC_SHARED_VIRT_RESET_REQ
+#define VMSHAREDPF0_MC_SHARED_VIRT_RESET_REQ__VF__SHIFT 0x0
+#define VMSHAREDPF0_MC_SHARED_VIRT_RESET_REQ__PF__SHIFT 0x1f
+#define VMSHAREDPF0_MC_SHARED_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL
+#define VMSHAREDPF0_MC_SHARED_VIRT_RESET_REQ__PF_MASK 0x80000000L
+//VMSHAREDPF0_MC_MEM_POWER_LS
+#define VMSHAREDPF0_MC_MEM_POWER_LS__LS_SETUP__SHIFT 0x0
+#define VMSHAREDPF0_MC_MEM_POWER_LS__LS_HOLD__SHIFT 0x6
+#define VMSHAREDPF0_MC_MEM_POWER_LS__LS_SETUP_MASK 0x0000003FL
+#define VMSHAREDPF0_MC_MEM_POWER_LS__LS_HOLD_MASK 0x00000FC0L
+//VMSHAREDPF0_MC_VM_CACHEABLE_DRAM_ADDRESS_START
+#define VMSHAREDPF0_MC_VM_CACHEABLE_DRAM_ADDRESS_START__ADDRESS__SHIFT 0x0
+#define VMSHAREDPF0_MC_VM_CACHEABLE_DRAM_ADDRESS_START__ADDRESS_MASK 0x000FFFFFL
+//VMSHAREDPF0_MC_VM_CACHEABLE_DRAM_ADDRESS_END
+#define VMSHAREDPF0_MC_VM_CACHEABLE_DRAM_ADDRESS_END__ADDRESS__SHIFT 0x0
+#define VMSHAREDPF0_MC_VM_CACHEABLE_DRAM_ADDRESS_END__ADDRESS_MASK 0x000FFFFFL
+//VMSHAREDPF0_MC_VM_APT_CNTL
+#define VMSHAREDPF0_MC_VM_APT_CNTL__FORCE_MTYPE_UC__SHIFT 0x0
+#define VMSHAREDPF0_MC_VM_APT_CNTL__DIRECT_SYSTEM_EN__SHIFT 0x1
+#define VMSHAREDPF0_MC_VM_APT_CNTL__FORCE_MTYPE_UC_MASK 0x00000001L
+#define VMSHAREDPF0_MC_VM_APT_CNTL__DIRECT_SYSTEM_EN_MASK 0x00000002L
+//VMSHAREDPF0_MC_VM_LOCAL_HBM_ADDRESS_START
+#define VMSHAREDPF0_MC_VM_LOCAL_HBM_ADDRESS_START__ADDRESS__SHIFT 0x0
+#define VMSHAREDPF0_MC_VM_LOCAL_HBM_ADDRESS_START__ADDRESS_MASK 0x000FFFFFL
+//VMSHAREDPF0_MC_VM_LOCAL_HBM_ADDRESS_END
+#define VMSHAREDPF0_MC_VM_LOCAL_HBM_ADDRESS_END__ADDRESS__SHIFT 0x0
+#define VMSHAREDPF0_MC_VM_LOCAL_HBM_ADDRESS_END__ADDRESS_MASK 0x000FFFFFL
+//VMSHAREDPF0_MC_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL
+#define VMSHAREDPF0_MC_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL__LOCK__SHIFT 0x0
+#define VMSHAREDPF0_MC_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL__LOCK_MASK 0x00000001L
+//VMSHAREDPF0_MC_VM_XGMI_LFB_CNTL
+#define VMSHAREDPF0_MC_VM_XGMI_LFB_CNTL__PF_LFB_REGION__SHIFT 0x0
+#define VMSHAREDPF0_MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION__SHIFT 0x4
+#define VMSHAREDPF0_MC_VM_XGMI_LFB_CNTL__PF_LFB_REGION_MASK 0x0000000FL
+#define VMSHAREDPF0_MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION_MASK 0x000000F0L
+//VMSHAREDPF0_MC_VM_XGMI_LFB_SIZE
+#define VMSHAREDPF0_MC_VM_XGMI_LFB_SIZE__PF_LFB_SIZE__SHIFT 0x0
+#define VMSHAREDPF0_MC_VM_XGMI_LFB_SIZE__PF_LFB_SIZE_MASK 0x0001FFFFL
+//VMSHAREDPF0_MC_VM_CACHEABLE_DRAM_CNTL
+#define VMSHAREDPF0_MC_VM_CACHEABLE_DRAM_CNTL__ENABLE_CACHEABLE_DRAM_ADDRESS_APERTURE__SHIFT 0x0
+#define VMSHAREDPF0_MC_VM_CACHEABLE_DRAM_CNTL__ENABLE_CACHEABLE_DRAM_ADDRESS_APERTURE_MASK 0x00000001L
+
+
+// addressBlock: mmhub_utcl2_vmsharedvcdec
+//VMSHAREDVC0_MC_VM_FB_LOCATION_BASE
+#define VMSHAREDVC0_MC_VM_FB_LOCATION_BASE__FB_BASE__SHIFT 0x0
+#define VMSHAREDVC0_MC_VM_FB_LOCATION_BASE__FB_BASE_MASK 0x00FFFFFFL
+//VMSHAREDVC0_MC_VM_FB_LOCATION_TOP
+#define VMSHAREDVC0_MC_VM_FB_LOCATION_TOP__FB_TOP__SHIFT 0x0
+#define VMSHAREDVC0_MC_VM_FB_LOCATION_TOP__FB_TOP_MASK 0x00FFFFFFL
+//VMSHAREDVC0_MC_VM_AGP_TOP
+#define VMSHAREDVC0_MC_VM_AGP_TOP__AGP_TOP__SHIFT 0x0
+#define VMSHAREDVC0_MC_VM_AGP_TOP__AGP_TOP_MASK 0x00FFFFFFL
+//VMSHAREDVC0_MC_VM_AGP_BOT
+#define VMSHAREDVC0_MC_VM_AGP_BOT__AGP_BOT__SHIFT 0x0
+#define VMSHAREDVC0_MC_VM_AGP_BOT__AGP_BOT_MASK 0x00FFFFFFL
+//VMSHAREDVC0_MC_VM_AGP_BASE
+#define VMSHAREDVC0_MC_VM_AGP_BASE__AGP_BASE__SHIFT 0x0
+#define VMSHAREDVC0_MC_VM_AGP_BASE__AGP_BASE_MASK 0x00FFFFFFL
+//VMSHAREDVC0_MC_VM_SYSTEM_APERTURE_LOW_ADDR
+#define VMSHAREDVC0_MC_VM_SYSTEM_APERTURE_LOW_ADDR__LOGICAL_ADDR__SHIFT 0x0
+#define VMSHAREDVC0_MC_VM_SYSTEM_APERTURE_LOW_ADDR__LOGICAL_ADDR_MASK 0x3FFFFFFFL
+//VMSHAREDVC0_MC_VM_SYSTEM_APERTURE_HIGH_ADDR
+#define VMSHAREDVC0_MC_VM_SYSTEM_APERTURE_HIGH_ADDR__LOGICAL_ADDR__SHIFT 0x0
+#define VMSHAREDVC0_MC_VM_SYSTEM_APERTURE_HIGH_ADDR__LOGICAL_ADDR_MASK 0x3FFFFFFFL
+//VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL
+#define VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB__SHIFT 0x0
+#define VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE__SHIFT 0x3
+#define VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT 0x5
+#define VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL__SHIFT 0x6
+#define VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL__ECO_BITS__SHIFT 0x7
+#define VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL__MTYPE__SHIFT 0xb
+#define VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL__ATC_EN__SHIFT 0xd
+#define VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB_MASK 0x00000001L
+#define VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK 0x00000018L
+#define VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS_MASK 0x00000020L
+#define VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL_MASK 0x00000040L
+#define VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL__ECO_BITS_MASK 0x00000780L
+#define VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL__MTYPE_MASK 0x00001800L
+#define VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL__ATC_EN_MASK 0x00002000L
+
+
+// addressBlock: mmhub_utcl2_vmsharedhvdec
+//VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF0
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF0__VF_FB_SIZE__SHIFT 0x0
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF0__VF_FB_OFFSET__SHIFT 0x10
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF0__VF_FB_SIZE_MASK 0x0000FFFFL
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF0__VF_FB_OFFSET_MASK 0xFFFF0000L
+//VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF1
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF1__VF_FB_SIZE__SHIFT 0x0
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF1__VF_FB_OFFSET__SHIFT 0x10
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF1__VF_FB_SIZE_MASK 0x0000FFFFL
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF1__VF_FB_OFFSET_MASK 0xFFFF0000L
+//VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF2
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF2__VF_FB_SIZE__SHIFT 0x0
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF2__VF_FB_OFFSET__SHIFT 0x10
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF2__VF_FB_SIZE_MASK 0x0000FFFFL
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF2__VF_FB_OFFSET_MASK 0xFFFF0000L
+//VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF3
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF3__VF_FB_SIZE__SHIFT 0x0
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF3__VF_FB_OFFSET__SHIFT 0x10
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF3__VF_FB_SIZE_MASK 0x0000FFFFL
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF3__VF_FB_OFFSET_MASK 0xFFFF0000L
+//VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF4
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF4__VF_FB_SIZE__SHIFT 0x0
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF4__VF_FB_OFFSET__SHIFT 0x10
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF4__VF_FB_SIZE_MASK 0x0000FFFFL
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF4__VF_FB_OFFSET_MASK 0xFFFF0000L
+//VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF5
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF5__VF_FB_SIZE__SHIFT 0x0
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF5__VF_FB_OFFSET__SHIFT 0x10
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF5__VF_FB_SIZE_MASK 0x0000FFFFL
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF5__VF_FB_OFFSET_MASK 0xFFFF0000L
+//VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF6
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF6__VF_FB_SIZE__SHIFT 0x0
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF6__VF_FB_OFFSET__SHIFT 0x10
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF6__VF_FB_SIZE_MASK 0x0000FFFFL
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF6__VF_FB_OFFSET_MASK 0xFFFF0000L
+//VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF7
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF7__VF_FB_SIZE__SHIFT 0x0
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF7__VF_FB_OFFSET__SHIFT 0x10
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF7__VF_FB_SIZE_MASK 0x0000FFFFL
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF7__VF_FB_OFFSET_MASK 0xFFFF0000L
+//VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF8
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF8__VF_FB_SIZE__SHIFT 0x0
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF8__VF_FB_OFFSET__SHIFT 0x10
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF8__VF_FB_SIZE_MASK 0x0000FFFFL
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF8__VF_FB_OFFSET_MASK 0xFFFF0000L
+//VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF9
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF9__VF_FB_SIZE__SHIFT 0x0
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF9__VF_FB_OFFSET__SHIFT 0x10
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF9__VF_FB_SIZE_MASK 0x0000FFFFL
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF9__VF_FB_OFFSET_MASK 0xFFFF0000L
+//VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF10
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF10__VF_FB_SIZE__SHIFT 0x0
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF10__VF_FB_OFFSET__SHIFT 0x10
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF10__VF_FB_SIZE_MASK 0x0000FFFFL
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF10__VF_FB_OFFSET_MASK 0xFFFF0000L
+//VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF11
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF11__VF_FB_SIZE__SHIFT 0x0
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF11__VF_FB_OFFSET__SHIFT 0x10
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF11__VF_FB_SIZE_MASK 0x0000FFFFL
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF11__VF_FB_OFFSET_MASK 0xFFFF0000L
+//VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF12
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF12__VF_FB_SIZE__SHIFT 0x0
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF12__VF_FB_OFFSET__SHIFT 0x10
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF12__VF_FB_SIZE_MASK 0x0000FFFFL
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF12__VF_FB_OFFSET_MASK 0xFFFF0000L
+//VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF13
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF13__VF_FB_SIZE__SHIFT 0x0
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF13__VF_FB_OFFSET__SHIFT 0x10
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF13__VF_FB_SIZE_MASK 0x0000FFFFL
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF13__VF_FB_OFFSET_MASK 0xFFFF0000L
+//VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF14
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF14__VF_FB_SIZE__SHIFT 0x0
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF14__VF_FB_OFFSET__SHIFT 0x10
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF14__VF_FB_SIZE_MASK 0x0000FFFFL
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF14__VF_FB_OFFSET_MASK 0xFFFF0000L
+//VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF15
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF15__VF_FB_SIZE__SHIFT 0x0
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF15__VF_FB_OFFSET__SHIFT 0x10
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF15__VF_FB_SIZE_MASK 0x0000FFFFL
+#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF15__VF_FB_OFFSET_MASK 0xFFFF0000L
+//VMSHAREDHV0_VM_IOMMU_MMIO_CNTRL_1
+#define VMSHAREDHV0_VM_IOMMU_MMIO_CNTRL_1__MARC_EN__SHIFT 0x8
+#define VMSHAREDHV0_VM_IOMMU_MMIO_CNTRL_1__MARC_EN_MASK 0x00000100L
+//VMSHAREDHV0_MC_VM_MARC_BASE_LO_0
+#define VMSHAREDHV0_MC_VM_MARC_BASE_LO_0__MARC_BASE_LO_0__SHIFT 0xc
+#define VMSHAREDHV0_MC_VM_MARC_BASE_LO_0__MARC_BASE_LO_0_MASK 0xFFFFF000L
+//VMSHAREDHV0_MC_VM_MARC_BASE_LO_1
+#define VMSHAREDHV0_MC_VM_MARC_BASE_LO_1__MARC_BASE_LO_1__SHIFT 0xc
+#define VMSHAREDHV0_MC_VM_MARC_BASE_LO_1__MARC_BASE_LO_1_MASK 0xFFFFF000L
+//VMSHAREDHV0_MC_VM_MARC_BASE_LO_2
+#define VMSHAREDHV0_MC_VM_MARC_BASE_LO_2__MARC_BASE_LO_2__SHIFT 0xc
+#define VMSHAREDHV0_MC_VM_MARC_BASE_LO_2__MARC_BASE_LO_2_MASK 0xFFFFF000L
+//VMSHAREDHV0_MC_VM_MARC_BASE_LO_3
+#define VMSHAREDHV0_MC_VM_MARC_BASE_LO_3__MARC_BASE_LO_3__SHIFT 0xc
+#define VMSHAREDHV0_MC_VM_MARC_BASE_LO_3__MARC_BASE_LO_3_MASK 0xFFFFF000L
+//VMSHAREDHV0_MC_VM_MARC_BASE_HI_0
+#define VMSHAREDHV0_MC_VM_MARC_BASE_HI_0__MARC_BASE_HI_0__SHIFT 0x0
+#define VMSHAREDHV0_MC_VM_MARC_BASE_HI_0__MARC_BASE_HI_0_MASK 0x000FFFFFL
+//VMSHAREDHV0_MC_VM_MARC_BASE_HI_1
+#define VMSHAREDHV0_MC_VM_MARC_BASE_HI_1__MARC_BASE_HI_1__SHIFT 0x0
+#define VMSHAREDHV0_MC_VM_MARC_BASE_HI_1__MARC_BASE_HI_1_MASK 0x000FFFFFL
+//VMSHAREDHV0_MC_VM_MARC_BASE_HI_2
+#define VMSHAREDHV0_MC_VM_MARC_BASE_HI_2__MARC_BASE_HI_2__SHIFT 0x0
+#define VMSHAREDHV0_MC_VM_MARC_BASE_HI_2__MARC_BASE_HI_2_MASK 0x000FFFFFL
+//VMSHAREDHV0_MC_VM_MARC_BASE_HI_3
+#define VMSHAREDHV0_MC_VM_MARC_BASE_HI_3__MARC_BASE_HI_3__SHIFT 0x0
+#define VMSHAREDHV0_MC_VM_MARC_BASE_HI_3__MARC_BASE_HI_3_MASK 0x000FFFFFL
+//VMSHAREDHV0_MC_VM_MARC_RELOC_LO_0
+#define VMSHAREDHV0_MC_VM_MARC_RELOC_LO_0__MARC_ENABLE_0__SHIFT 0x0
+#define VMSHAREDHV0_MC_VM_MARC_RELOC_LO_0__MARC_READONLY_0__SHIFT 0x1
+#define VMSHAREDHV0_MC_VM_MARC_RELOC_LO_0__MARC_RELOC_LO_0__SHIFT 0xc
+#define VMSHAREDHV0_MC_VM_MARC_RELOC_LO_0__MARC_ENABLE_0_MASK 0x00000001L
+#define VMSHAREDHV0_MC_VM_MARC_RELOC_LO_0__MARC_READONLY_0_MASK 0x00000002L
+#define VMSHAREDHV0_MC_VM_MARC_RELOC_LO_0__MARC_RELOC_LO_0_MASK 0xFFFFF000L
+//VMSHAREDHV0_MC_VM_MARC_RELOC_LO_1
+#define VMSHAREDHV0_MC_VM_MARC_RELOC_LO_1__MARC_ENABLE_1__SHIFT 0x0
+#define VMSHAREDHV0_MC_VM_MARC_RELOC_LO_1__MARC_READONLY_1__SHIFT 0x1
+#define VMSHAREDHV0_MC_VM_MARC_RELOC_LO_1__MARC_RELOC_LO_1__SHIFT 0xc
+#define VMSHAREDHV0_MC_VM_MARC_RELOC_LO_1__MARC_ENABLE_1_MASK 0x00000001L
+#define VMSHAREDHV0_MC_VM_MARC_RELOC_LO_1__MARC_READONLY_1_MASK 0x00000002L
+#define VMSHAREDHV0_MC_VM_MARC_RELOC_LO_1__MARC_RELOC_LO_1_MASK 0xFFFFF000L
+//VMSHAREDHV0_MC_VM_MARC_RELOC_LO_2
+#define VMSHAREDHV0_MC_VM_MARC_RELOC_LO_2__MARC_ENABLE_2__SHIFT 0x0
+#define VMSHAREDHV0_MC_VM_MARC_RELOC_LO_2__MARC_READONLY_2__SHIFT 0x1
+#define VMSHAREDHV0_MC_VM_MARC_RELOC_LO_2__MARC_RELOC_LO_2__SHIFT 0xc
+#define VMSHAREDHV0_MC_VM_MARC_RELOC_LO_2__MARC_ENABLE_2_MASK 0x00000001L
+#define VMSHAREDHV0_MC_VM_MARC_RELOC_LO_2__MARC_READONLY_2_MASK 0x00000002L
+#define VMSHAREDHV0_MC_VM_MARC_RELOC_LO_2__MARC_RELOC_LO_2_MASK 0xFFFFF000L
+//VMSHAREDHV0_MC_VM_MARC_RELOC_LO_3
+#define VMSHAREDHV0_MC_VM_MARC_RELOC_LO_3__MARC_ENABLE_3__SHIFT 0x0
+#define VMSHAREDHV0_MC_VM_MARC_RELOC_LO_3__MARC_READONLY_3__SHIFT 0x1
+#define VMSHAREDHV0_MC_VM_MARC_RELOC_LO_3__MARC_RELOC_LO_3__SHIFT 0xc
+#define VMSHAREDHV0_MC_VM_MARC_RELOC_LO_3__MARC_ENABLE_3_MASK 0x00000001L
+#define VMSHAREDHV0_MC_VM_MARC_RELOC_LO_3__MARC_READONLY_3_MASK 0x00000002L
+#define VMSHAREDHV0_MC_VM_MARC_RELOC_LO_3__MARC_RELOC_LO_3_MASK 0xFFFFF000L
+//VMSHAREDHV0_MC_VM_MARC_RELOC_HI_0
+#define VMSHAREDHV0_MC_VM_MARC_RELOC_HI_0__MARC_RELOC_HI_0__SHIFT 0x0
+#define VMSHAREDHV0_MC_VM_MARC_RELOC_HI_0__MARC_RELOC_HI_0_MASK 0x000FFFFFL
+//VMSHAREDHV0_MC_VM_MARC_RELOC_HI_1
+#define VMSHAREDHV0_MC_VM_MARC_RELOC_HI_1__MARC_RELOC_HI_1__SHIFT 0x0
+#define VMSHAREDHV0_MC_VM_MARC_RELOC_HI_1__MARC_RELOC_HI_1_MASK 0x000FFFFFL
+//VMSHAREDHV0_MC_VM_MARC_RELOC_HI_2
+#define VMSHAREDHV0_MC_VM_MARC_RELOC_HI_2__MARC_RELOC_HI_2__SHIFT 0x0
+#define VMSHAREDHV0_MC_VM_MARC_RELOC_HI_2__MARC_RELOC_HI_2_MASK 0x000FFFFFL
+//VMSHAREDHV0_MC_VM_MARC_RELOC_HI_3
+#define VMSHAREDHV0_MC_VM_MARC_RELOC_HI_3__MARC_RELOC_HI_3__SHIFT 0x0
+#define VMSHAREDHV0_MC_VM_MARC_RELOC_HI_3__MARC_RELOC_HI_3_MASK 0x000FFFFFL
+//VMSHAREDHV0_MC_VM_MARC_LEN_LO_0
+#define VMSHAREDHV0_MC_VM_MARC_LEN_LO_0__MARC_LEN_LO_0__SHIFT 0xc
+#define VMSHAREDHV0_MC_VM_MARC_LEN_LO_0__MARC_LEN_LO_0_MASK 0xFFFFF000L
+//VMSHAREDHV0_MC_VM_MARC_LEN_LO_1
+#define VMSHAREDHV0_MC_VM_MARC_LEN_LO_1__MARC_LEN_LO_1__SHIFT 0xc
+#define VMSHAREDHV0_MC_VM_MARC_LEN_LO_1__MARC_LEN_LO_1_MASK 0xFFFFF000L
+//VMSHAREDHV0_MC_VM_MARC_LEN_LO_2
+#define VMSHAREDHV0_MC_VM_MARC_LEN_LO_2__MARC_LEN_LO_2__SHIFT 0xc
+#define VMSHAREDHV0_MC_VM_MARC_LEN_LO_2__MARC_LEN_LO_2_MASK 0xFFFFF000L
+//VMSHAREDHV0_MC_VM_MARC_LEN_LO_3
+#define VMSHAREDHV0_MC_VM_MARC_LEN_LO_3__MARC_LEN_LO_3__SHIFT 0xc
+#define VMSHAREDHV0_MC_VM_MARC_LEN_LO_3__MARC_LEN_LO_3_MASK 0xFFFFF000L
+//VMSHAREDHV0_MC_VM_MARC_LEN_HI_0
+#define VMSHAREDHV0_MC_VM_MARC_LEN_HI_0__MARC_LEN_HI_0__SHIFT 0x0
+#define VMSHAREDHV0_MC_VM_MARC_LEN_HI_0__MARC_LEN_HI_0_MASK 0x000FFFFFL
+//VMSHAREDHV0_MC_VM_MARC_LEN_HI_1
+#define VMSHAREDHV0_MC_VM_MARC_LEN_HI_1__MARC_LEN_HI_1__SHIFT 0x0
+#define VMSHAREDHV0_MC_VM_MARC_LEN_HI_1__MARC_LEN_HI_1_MASK 0x000FFFFFL
+//VMSHAREDHV0_MC_VM_MARC_LEN_HI_2
+#define VMSHAREDHV0_MC_VM_MARC_LEN_HI_2__MARC_LEN_HI_2__SHIFT 0x0
+#define VMSHAREDHV0_MC_VM_MARC_LEN_HI_2__MARC_LEN_HI_2_MASK 0x000FFFFFL
+//VMSHAREDHV0_MC_VM_MARC_LEN_HI_3
+#define VMSHAREDHV0_MC_VM_MARC_LEN_HI_3__MARC_LEN_HI_3__SHIFT 0x0
+#define VMSHAREDHV0_MC_VM_MARC_LEN_HI_3__MARC_LEN_HI_3_MASK 0x000FFFFFL
+//VMSHAREDHV0_VM_IOMMU_CONTROL_REGISTER
+#define VMSHAREDHV0_VM_IOMMU_CONTROL_REGISTER__IOMMUEN__SHIFT 0x0
+#define VMSHAREDHV0_VM_IOMMU_CONTROL_REGISTER__IOMMUEN_MASK 0x00000001L
+//VMSHAREDHV0_VM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER
+#define VMSHAREDHV0_VM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER__PERFOPTEN__SHIFT 0xd
+#define VMSHAREDHV0_VM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER__PERFOPTEN_MASK 0x00002000L
+//VMSHAREDHV0_VM_PCIE_ATS_CNTL
+#define VMSHAREDHV0_VM_PCIE_ATS_CNTL__STU__SHIFT 0x10
+#define VMSHAREDHV0_VM_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0x1f
+#define VMSHAREDHV0_VM_PCIE_ATS_CNTL__STU_MASK 0x001F0000L
+#define VMSHAREDHV0_VM_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x80000000L
+//VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_0
+#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_0__ATC_ENABLE__SHIFT 0x1f
+#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_0__ATC_ENABLE_MASK 0x80000000L
+//VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_1
+#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_1__ATC_ENABLE__SHIFT 0x1f
+#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_1__ATC_ENABLE_MASK 0x80000000L
+//VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_2
+#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_2__ATC_ENABLE__SHIFT 0x1f
+#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_2__ATC_ENABLE_MASK 0x80000000L
+//VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_3
+#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_3__ATC_ENABLE__SHIFT 0x1f
+#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_3__ATC_ENABLE_MASK 0x80000000L
+//VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_4
+#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_4__ATC_ENABLE__SHIFT 0x1f
+#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_4__ATC_ENABLE_MASK 0x80000000L
+//VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_5
+#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_5__ATC_ENABLE__SHIFT 0x1f
+#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_5__ATC_ENABLE_MASK 0x80000000L
+//VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_6
+#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_6__ATC_ENABLE__SHIFT 0x1f
+#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_6__ATC_ENABLE_MASK 0x80000000L
+//VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_7
+#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_7__ATC_ENABLE__SHIFT 0x1f
+#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_7__ATC_ENABLE_MASK 0x80000000L
+//VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_8
+#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_8__ATC_ENABLE__SHIFT 0x1f
+#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_8__ATC_ENABLE_MASK 0x80000000L
+//VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_9
+#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_9__ATC_ENABLE__SHIFT 0x1f
+#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_9__ATC_ENABLE_MASK 0x80000000L
+//VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_10
+#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_10__ATC_ENABLE__SHIFT 0x1f
+#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_10__ATC_ENABLE_MASK 0x80000000L
+//VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_11
+#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_11__ATC_ENABLE__SHIFT 0x1f
+#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_11__ATC_ENABLE_MASK 0x80000000L
+//VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_12
+#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_12__ATC_ENABLE__SHIFT 0x1f
+#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_12__ATC_ENABLE_MASK 0x80000000L
+//VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_13
+#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_13__ATC_ENABLE__SHIFT 0x1f
+#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_13__ATC_ENABLE_MASK 0x80000000L
+//VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_14
+#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_14__ATC_ENABLE__SHIFT 0x1f
+#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_14__ATC_ENABLE_MASK 0x80000000L
+//VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_15
+#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_15__ATC_ENABLE__SHIFT 0x1f
+#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_15__ATC_ENABLE_MASK 0x80000000L
+//VMSHAREDHV0_UTCL2_CGTT_CLK_CTRL
+#define VMSHAREDHV0_UTCL2_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define VMSHAREDHV0_UTCL2_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define VMSHAREDHV0_UTCL2_CGTT_CLK_CTRL__SOFT_OVERRIDE_EXTRA__SHIFT 0xc
+#define VMSHAREDHV0_UTCL2_CGTT_CLK_CTRL__MGLS_OVERRIDE__SHIFT 0xf
+#define VMSHAREDHV0_UTCL2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x10
+#define VMSHAREDHV0_UTCL2_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x18
+#define VMSHAREDHV0_UTCL2_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define VMSHAREDHV0_UTCL2_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define VMSHAREDHV0_UTCL2_CGTT_CLK_CTRL__SOFT_OVERRIDE_EXTRA_MASK 0x00007000L
+#define VMSHAREDHV0_UTCL2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L
+#define VMSHAREDHV0_UTCL2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00FF0000L
+#define VMSHAREDHV0_UTCL2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0xFF000000L
+//VMSHAREDHV0_MC_SHARED_ACTIVE_FCN_ID
+#define VMSHAREDHV0_MC_SHARED_ACTIVE_FCN_ID__VFID__SHIFT 0x0
+#define VMSHAREDHV0_MC_SHARED_ACTIVE_FCN_ID__VF__SHIFT 0x1f
+#define VMSHAREDHV0_MC_SHARED_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL
+#define VMSHAREDHV0_MC_SHARED_ACTIVE_FCN_ID__VF_MASK 0x80000000L
+//VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE
+#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF0__SHIFT 0x0
+#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF1__SHIFT 0x1
+#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF2__SHIFT 0x2
+#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF3__SHIFT 0x3
+#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF4__SHIFT 0x4
+#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF5__SHIFT 0x5
+#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF6__SHIFT 0x6
+#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF7__SHIFT 0x7
+#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF8__SHIFT 0x8
+#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF9__SHIFT 0x9
+#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF10__SHIFT 0xa
+#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF11__SHIFT 0xb
+#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF12__SHIFT 0xc
+#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF13__SHIFT 0xd
+#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF14__SHIFT 0xe
+#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF15__SHIFT 0xf
+#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_PF__SHIFT 0x1f
+#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF0_MASK 0x00000001L
+#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF1_MASK 0x00000002L
+#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF2_MASK 0x00000004L
+#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF3_MASK 0x00000008L
+#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF4_MASK 0x00000010L
+#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF5_MASK 0x00000020L
+#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF6_MASK 0x00000040L
+#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF7_MASK 0x00000080L
+#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF8_MASK 0x00000100L
+#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF9_MASK 0x00000200L
+#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF10_MASK 0x00000400L
+#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF11_MASK 0x00000800L
+#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF12_MASK 0x00001000L
+#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF13_MASK 0x00002000L
+#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF14_MASK 0x00004000L
+#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF15_MASK 0x00008000L
+#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_PF_MASK 0x80000000L
+
+
+// addressBlock: mmhub_utcl2_atcl2pfcntrdec
+//ATCL2PFCNTR0_ATC_L2_PERFCOUNTER_LO
+#define ATCL2PFCNTR0_ATC_L2_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define ATCL2PFCNTR0_ATC_L2_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//ATCL2PFCNTR0_ATC_L2_PERFCOUNTER_HI
+#define ATCL2PFCNTR0_ATC_L2_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define ATCL2PFCNTR0_ATC_L2_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define ATCL2PFCNTR0_ATC_L2_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define ATCL2PFCNTR0_ATC_L2_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+
+
+// addressBlock: mmhub_utcl2_atcl2pfcntldec
+//ATCL2PFCNTL0_ATC_L2_PERFCOUNTER0_CFG
+#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//ATCL2PFCNTL0_ATC_L2_PERFCOUNTER1_CFG
+#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//ATCL2PFCNTL0_ATC_L2_PERFCOUNTER_RSLT_CNTL
+#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+
+
+// addressBlock: mmhub_utcl2_vml2pldec
+//VML2PL0_MC_VM_L2_PERFCOUNTER0_CFG
+#define VML2PL0_MC_VM_L2_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define VML2PL0_MC_VM_L2_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define VML2PL0_MC_VM_L2_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define VML2PL0_MC_VM_L2_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define VML2PL0_MC_VM_L2_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define VML2PL0_MC_VM_L2_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define VML2PL0_MC_VM_L2_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define VML2PL0_MC_VM_L2_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define VML2PL0_MC_VM_L2_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define VML2PL0_MC_VM_L2_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//VML2PL0_MC_VM_L2_PERFCOUNTER1_CFG
+#define VML2PL0_MC_VM_L2_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define VML2PL0_MC_VM_L2_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define VML2PL0_MC_VM_L2_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define VML2PL0_MC_VM_L2_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define VML2PL0_MC_VM_L2_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define VML2PL0_MC_VM_L2_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define VML2PL0_MC_VM_L2_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define VML2PL0_MC_VM_L2_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define VML2PL0_MC_VM_L2_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define VML2PL0_MC_VM_L2_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//VML2PL0_MC_VM_L2_PERFCOUNTER2_CFG
+#define VML2PL0_MC_VM_L2_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0
+#define VML2PL0_MC_VM_L2_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8
+#define VML2PL0_MC_VM_L2_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18
+#define VML2PL0_MC_VM_L2_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c
+#define VML2PL0_MC_VM_L2_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d
+#define VML2PL0_MC_VM_L2_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL
+#define VML2PL0_MC_VM_L2_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define VML2PL0_MC_VM_L2_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L
+#define VML2PL0_MC_VM_L2_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L
+#define VML2PL0_MC_VM_L2_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L
+//VML2PL0_MC_VM_L2_PERFCOUNTER3_CFG
+#define VML2PL0_MC_VM_L2_PERFCOUNTER3_CFG__PERF_SEL__SHIFT 0x0
+#define VML2PL0_MC_VM_L2_PERFCOUNTER3_CFG__PERF_SEL_END__SHIFT 0x8
+#define VML2PL0_MC_VM_L2_PERFCOUNTER3_CFG__PERF_MODE__SHIFT 0x18
+#define VML2PL0_MC_VM_L2_PERFCOUNTER3_CFG__ENABLE__SHIFT 0x1c
+#define VML2PL0_MC_VM_L2_PERFCOUNTER3_CFG__CLEAR__SHIFT 0x1d
+#define VML2PL0_MC_VM_L2_PERFCOUNTER3_CFG__PERF_SEL_MASK 0x000000FFL
+#define VML2PL0_MC_VM_L2_PERFCOUNTER3_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define VML2PL0_MC_VM_L2_PERFCOUNTER3_CFG__PERF_MODE_MASK 0x0F000000L
+#define VML2PL0_MC_VM_L2_PERFCOUNTER3_CFG__ENABLE_MASK 0x10000000L
+#define VML2PL0_MC_VM_L2_PERFCOUNTER3_CFG__CLEAR_MASK 0x20000000L
+//VML2PL0_MC_VM_L2_PERFCOUNTER4_CFG
+#define VML2PL0_MC_VM_L2_PERFCOUNTER4_CFG__PERF_SEL__SHIFT 0x0
+#define VML2PL0_MC_VM_L2_PERFCOUNTER4_CFG__PERF_SEL_END__SHIFT 0x8
+#define VML2PL0_MC_VM_L2_PERFCOUNTER4_CFG__PERF_MODE__SHIFT 0x18
+#define VML2PL0_MC_VM_L2_PERFCOUNTER4_CFG__ENABLE__SHIFT 0x1c
+#define VML2PL0_MC_VM_L2_PERFCOUNTER4_CFG__CLEAR__SHIFT 0x1d
+#define VML2PL0_MC_VM_L2_PERFCOUNTER4_CFG__PERF_SEL_MASK 0x000000FFL
+#define VML2PL0_MC_VM_L2_PERFCOUNTER4_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define VML2PL0_MC_VM_L2_PERFCOUNTER4_CFG__PERF_MODE_MASK 0x0F000000L
+#define VML2PL0_MC_VM_L2_PERFCOUNTER4_CFG__ENABLE_MASK 0x10000000L
+#define VML2PL0_MC_VM_L2_PERFCOUNTER4_CFG__CLEAR_MASK 0x20000000L
+//VML2PL0_MC_VM_L2_PERFCOUNTER5_CFG
+#define VML2PL0_MC_VM_L2_PERFCOUNTER5_CFG__PERF_SEL__SHIFT 0x0
+#define VML2PL0_MC_VM_L2_PERFCOUNTER5_CFG__PERF_SEL_END__SHIFT 0x8
+#define VML2PL0_MC_VM_L2_PERFCOUNTER5_CFG__PERF_MODE__SHIFT 0x18
+#define VML2PL0_MC_VM_L2_PERFCOUNTER5_CFG__ENABLE__SHIFT 0x1c
+#define VML2PL0_MC_VM_L2_PERFCOUNTER5_CFG__CLEAR__SHIFT 0x1d
+#define VML2PL0_MC_VM_L2_PERFCOUNTER5_CFG__PERF_SEL_MASK 0x000000FFL
+#define VML2PL0_MC_VM_L2_PERFCOUNTER5_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define VML2PL0_MC_VM_L2_PERFCOUNTER5_CFG__PERF_MODE_MASK 0x0F000000L
+#define VML2PL0_MC_VM_L2_PERFCOUNTER5_CFG__ENABLE_MASK 0x10000000L
+#define VML2PL0_MC_VM_L2_PERFCOUNTER5_CFG__CLEAR_MASK 0x20000000L
+//VML2PL0_MC_VM_L2_PERFCOUNTER6_CFG
+#define VML2PL0_MC_VM_L2_PERFCOUNTER6_CFG__PERF_SEL__SHIFT 0x0
+#define VML2PL0_MC_VM_L2_PERFCOUNTER6_CFG__PERF_SEL_END__SHIFT 0x8
+#define VML2PL0_MC_VM_L2_PERFCOUNTER6_CFG__PERF_MODE__SHIFT 0x18
+#define VML2PL0_MC_VM_L2_PERFCOUNTER6_CFG__ENABLE__SHIFT 0x1c
+#define VML2PL0_MC_VM_L2_PERFCOUNTER6_CFG__CLEAR__SHIFT 0x1d
+#define VML2PL0_MC_VM_L2_PERFCOUNTER6_CFG__PERF_SEL_MASK 0x000000FFL
+#define VML2PL0_MC_VM_L2_PERFCOUNTER6_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define VML2PL0_MC_VM_L2_PERFCOUNTER6_CFG__PERF_MODE_MASK 0x0F000000L
+#define VML2PL0_MC_VM_L2_PERFCOUNTER6_CFG__ENABLE_MASK 0x10000000L
+#define VML2PL0_MC_VM_L2_PERFCOUNTER6_CFG__CLEAR_MASK 0x20000000L
+//VML2PL0_MC_VM_L2_PERFCOUNTER7_CFG
+#define VML2PL0_MC_VM_L2_PERFCOUNTER7_CFG__PERF_SEL__SHIFT 0x0
+#define VML2PL0_MC_VM_L2_PERFCOUNTER7_CFG__PERF_SEL_END__SHIFT 0x8
+#define VML2PL0_MC_VM_L2_PERFCOUNTER7_CFG__PERF_MODE__SHIFT 0x18
+#define VML2PL0_MC_VM_L2_PERFCOUNTER7_CFG__ENABLE__SHIFT 0x1c
+#define VML2PL0_MC_VM_L2_PERFCOUNTER7_CFG__CLEAR__SHIFT 0x1d
+#define VML2PL0_MC_VM_L2_PERFCOUNTER7_CFG__PERF_SEL_MASK 0x000000FFL
+#define VML2PL0_MC_VM_L2_PERFCOUNTER7_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define VML2PL0_MC_VM_L2_PERFCOUNTER7_CFG__PERF_MODE_MASK 0x0F000000L
+#define VML2PL0_MC_VM_L2_PERFCOUNTER7_CFG__ENABLE_MASK 0x10000000L
+#define VML2PL0_MC_VM_L2_PERFCOUNTER7_CFG__CLEAR_MASK 0x20000000L
+//VML2PL0_MC_VM_L2_PERFCOUNTER_RSLT_CNTL
+#define VML2PL0_MC_VM_L2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define VML2PL0_MC_VM_L2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define VML2PL0_MC_VM_L2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define VML2PL0_MC_VM_L2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define VML2PL0_MC_VM_L2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define VML2PL0_MC_VM_L2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define VML2PL0_MC_VM_L2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define VML2PL0_MC_VM_L2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define VML2PL0_MC_VM_L2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define VML2PL0_MC_VM_L2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define VML2PL0_MC_VM_L2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define VML2PL0_MC_VM_L2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+
+
+// addressBlock: mmhub_utcl2_vml2prdec
+//VML2PR0_MC_VM_L2_PERFCOUNTER_LO
+#define VML2PR0_MC_VM_L2_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define VML2PR0_MC_VM_L2_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//VML2PR0_MC_VM_L2_PERFCOUNTER_HI
+#define VML2PR0_MC_VM_L2_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define VML2PR0_MC_VM_L2_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define VML2PR0_MC_VM_L2_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define VML2PR0_MC_VM_L2_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+
+
+// addressBlock: mmhub_dagb_dagbdec5
+//DAGB5_RDCLI0
+#define DAGB5_RDCLI0__VIRT_CHAN__SHIFT 0x0
+#define DAGB5_RDCLI0__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB5_RDCLI0__URG_HIGH__SHIFT 0x4
+#define DAGB5_RDCLI0__URG_LOW__SHIFT 0x8
+#define DAGB5_RDCLI0__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB5_RDCLI0__MAX_BW__SHIFT 0xd
+#define DAGB5_RDCLI0__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB5_RDCLI0__MIN_BW__SHIFT 0x16
+#define DAGB5_RDCLI0__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB5_RDCLI0__MAX_OSD__SHIFT 0x1a
+#define DAGB5_RDCLI0__VIRT_CHAN_MASK 0x00000007L
+#define DAGB5_RDCLI0__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB5_RDCLI0__URG_HIGH_MASK 0x000000F0L
+#define DAGB5_RDCLI0__URG_LOW_MASK 0x00000F00L
+#define DAGB5_RDCLI0__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB5_RDCLI0__MAX_BW_MASK 0x001FE000L
+#define DAGB5_RDCLI0__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB5_RDCLI0__MIN_BW_MASK 0x01C00000L
+#define DAGB5_RDCLI0__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB5_RDCLI0__MAX_OSD_MASK 0xFC000000L
+//DAGB5_RDCLI1
+#define DAGB5_RDCLI1__VIRT_CHAN__SHIFT 0x0
+#define DAGB5_RDCLI1__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB5_RDCLI1__URG_HIGH__SHIFT 0x4
+#define DAGB5_RDCLI1__URG_LOW__SHIFT 0x8
+#define DAGB5_RDCLI1__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB5_RDCLI1__MAX_BW__SHIFT 0xd
+#define DAGB5_RDCLI1__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB5_RDCLI1__MIN_BW__SHIFT 0x16
+#define DAGB5_RDCLI1__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB5_RDCLI1__MAX_OSD__SHIFT 0x1a
+#define DAGB5_RDCLI1__VIRT_CHAN_MASK 0x00000007L
+#define DAGB5_RDCLI1__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB5_RDCLI1__URG_HIGH_MASK 0x000000F0L
+#define DAGB5_RDCLI1__URG_LOW_MASK 0x00000F00L
+#define DAGB5_RDCLI1__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB5_RDCLI1__MAX_BW_MASK 0x001FE000L
+#define DAGB5_RDCLI1__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB5_RDCLI1__MIN_BW_MASK 0x01C00000L
+#define DAGB5_RDCLI1__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB5_RDCLI1__MAX_OSD_MASK 0xFC000000L
+//DAGB5_RDCLI2
+#define DAGB5_RDCLI2__VIRT_CHAN__SHIFT 0x0
+#define DAGB5_RDCLI2__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB5_RDCLI2__URG_HIGH__SHIFT 0x4
+#define DAGB5_RDCLI2__URG_LOW__SHIFT 0x8
+#define DAGB5_RDCLI2__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB5_RDCLI2__MAX_BW__SHIFT 0xd
+#define DAGB5_RDCLI2__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB5_RDCLI2__MIN_BW__SHIFT 0x16
+#define DAGB5_RDCLI2__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB5_RDCLI2__MAX_OSD__SHIFT 0x1a
+#define DAGB5_RDCLI2__VIRT_CHAN_MASK 0x00000007L
+#define DAGB5_RDCLI2__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB5_RDCLI2__URG_HIGH_MASK 0x000000F0L
+#define DAGB5_RDCLI2__URG_LOW_MASK 0x00000F00L
+#define DAGB5_RDCLI2__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB5_RDCLI2__MAX_BW_MASK 0x001FE000L
+#define DAGB5_RDCLI2__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB5_RDCLI2__MIN_BW_MASK 0x01C00000L
+#define DAGB5_RDCLI2__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB5_RDCLI2__MAX_OSD_MASK 0xFC000000L
+//DAGB5_RDCLI3
+#define DAGB5_RDCLI3__VIRT_CHAN__SHIFT 0x0
+#define DAGB5_RDCLI3__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB5_RDCLI3__URG_HIGH__SHIFT 0x4
+#define DAGB5_RDCLI3__URG_LOW__SHIFT 0x8
+#define DAGB5_RDCLI3__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB5_RDCLI3__MAX_BW__SHIFT 0xd
+#define DAGB5_RDCLI3__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB5_RDCLI3__MIN_BW__SHIFT 0x16
+#define DAGB5_RDCLI3__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB5_RDCLI3__MAX_OSD__SHIFT 0x1a
+#define DAGB5_RDCLI3__VIRT_CHAN_MASK 0x00000007L
+#define DAGB5_RDCLI3__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB5_RDCLI3__URG_HIGH_MASK 0x000000F0L
+#define DAGB5_RDCLI3__URG_LOW_MASK 0x00000F00L
+#define DAGB5_RDCLI3__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB5_RDCLI3__MAX_BW_MASK 0x001FE000L
+#define DAGB5_RDCLI3__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB5_RDCLI3__MIN_BW_MASK 0x01C00000L
+#define DAGB5_RDCLI3__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB5_RDCLI3__MAX_OSD_MASK 0xFC000000L
+//DAGB5_RDCLI4
+#define DAGB5_RDCLI4__VIRT_CHAN__SHIFT 0x0
+#define DAGB5_RDCLI4__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB5_RDCLI4__URG_HIGH__SHIFT 0x4
+#define DAGB5_RDCLI4__URG_LOW__SHIFT 0x8
+#define DAGB5_RDCLI4__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB5_RDCLI4__MAX_BW__SHIFT 0xd
+#define DAGB5_RDCLI4__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB5_RDCLI4__MIN_BW__SHIFT 0x16
+#define DAGB5_RDCLI4__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB5_RDCLI4__MAX_OSD__SHIFT 0x1a
+#define DAGB5_RDCLI4__VIRT_CHAN_MASK 0x00000007L
+#define DAGB5_RDCLI4__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB5_RDCLI4__URG_HIGH_MASK 0x000000F0L
+#define DAGB5_RDCLI4__URG_LOW_MASK 0x00000F00L
+#define DAGB5_RDCLI4__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB5_RDCLI4__MAX_BW_MASK 0x001FE000L
+#define DAGB5_RDCLI4__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB5_RDCLI4__MIN_BW_MASK 0x01C00000L
+#define DAGB5_RDCLI4__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB5_RDCLI4__MAX_OSD_MASK 0xFC000000L
+//DAGB5_RDCLI5
+#define DAGB5_RDCLI5__VIRT_CHAN__SHIFT 0x0
+#define DAGB5_RDCLI5__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB5_RDCLI5__URG_HIGH__SHIFT 0x4
+#define DAGB5_RDCLI5__URG_LOW__SHIFT 0x8
+#define DAGB5_RDCLI5__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB5_RDCLI5__MAX_BW__SHIFT 0xd
+#define DAGB5_RDCLI5__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB5_RDCLI5__MIN_BW__SHIFT 0x16
+#define DAGB5_RDCLI5__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB5_RDCLI5__MAX_OSD__SHIFT 0x1a
+#define DAGB5_RDCLI5__VIRT_CHAN_MASK 0x00000007L
+#define DAGB5_RDCLI5__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB5_RDCLI5__URG_HIGH_MASK 0x000000F0L
+#define DAGB5_RDCLI5__URG_LOW_MASK 0x00000F00L
+#define DAGB5_RDCLI5__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB5_RDCLI5__MAX_BW_MASK 0x001FE000L
+#define DAGB5_RDCLI5__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB5_RDCLI5__MIN_BW_MASK 0x01C00000L
+#define DAGB5_RDCLI5__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB5_RDCLI5__MAX_OSD_MASK 0xFC000000L
+//DAGB5_RDCLI6
+#define DAGB5_RDCLI6__VIRT_CHAN__SHIFT 0x0
+#define DAGB5_RDCLI6__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB5_RDCLI6__URG_HIGH__SHIFT 0x4
+#define DAGB5_RDCLI6__URG_LOW__SHIFT 0x8
+#define DAGB5_RDCLI6__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB5_RDCLI6__MAX_BW__SHIFT 0xd
+#define DAGB5_RDCLI6__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB5_RDCLI6__MIN_BW__SHIFT 0x16
+#define DAGB5_RDCLI6__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB5_RDCLI6__MAX_OSD__SHIFT 0x1a
+#define DAGB5_RDCLI6__VIRT_CHAN_MASK 0x00000007L
+#define DAGB5_RDCLI6__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB5_RDCLI6__URG_HIGH_MASK 0x000000F0L
+#define DAGB5_RDCLI6__URG_LOW_MASK 0x00000F00L
+#define DAGB5_RDCLI6__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB5_RDCLI6__MAX_BW_MASK 0x001FE000L
+#define DAGB5_RDCLI6__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB5_RDCLI6__MIN_BW_MASK 0x01C00000L
+#define DAGB5_RDCLI6__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB5_RDCLI6__MAX_OSD_MASK 0xFC000000L
+//DAGB5_RDCLI7
+#define DAGB5_RDCLI7__VIRT_CHAN__SHIFT 0x0
+#define DAGB5_RDCLI7__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB5_RDCLI7__URG_HIGH__SHIFT 0x4
+#define DAGB5_RDCLI7__URG_LOW__SHIFT 0x8
+#define DAGB5_RDCLI7__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB5_RDCLI7__MAX_BW__SHIFT 0xd
+#define DAGB5_RDCLI7__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB5_RDCLI7__MIN_BW__SHIFT 0x16
+#define DAGB5_RDCLI7__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB5_RDCLI7__MAX_OSD__SHIFT 0x1a
+#define DAGB5_RDCLI7__VIRT_CHAN_MASK 0x00000007L
+#define DAGB5_RDCLI7__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB5_RDCLI7__URG_HIGH_MASK 0x000000F0L
+#define DAGB5_RDCLI7__URG_LOW_MASK 0x00000F00L
+#define DAGB5_RDCLI7__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB5_RDCLI7__MAX_BW_MASK 0x001FE000L
+#define DAGB5_RDCLI7__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB5_RDCLI7__MIN_BW_MASK 0x01C00000L
+#define DAGB5_RDCLI7__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB5_RDCLI7__MAX_OSD_MASK 0xFC000000L
+//DAGB5_RDCLI8
+#define DAGB5_RDCLI8__VIRT_CHAN__SHIFT 0x0
+#define DAGB5_RDCLI8__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB5_RDCLI8__URG_HIGH__SHIFT 0x4
+#define DAGB5_RDCLI8__URG_LOW__SHIFT 0x8
+#define DAGB5_RDCLI8__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB5_RDCLI8__MAX_BW__SHIFT 0xd
+#define DAGB5_RDCLI8__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB5_RDCLI8__MIN_BW__SHIFT 0x16
+#define DAGB5_RDCLI8__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB5_RDCLI8__MAX_OSD__SHIFT 0x1a
+#define DAGB5_RDCLI8__VIRT_CHAN_MASK 0x00000007L
+#define DAGB5_RDCLI8__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB5_RDCLI8__URG_HIGH_MASK 0x000000F0L
+#define DAGB5_RDCLI8__URG_LOW_MASK 0x00000F00L
+#define DAGB5_RDCLI8__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB5_RDCLI8__MAX_BW_MASK 0x001FE000L
+#define DAGB5_RDCLI8__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB5_RDCLI8__MIN_BW_MASK 0x01C00000L
+#define DAGB5_RDCLI8__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB5_RDCLI8__MAX_OSD_MASK 0xFC000000L
+//DAGB5_RDCLI9
+#define DAGB5_RDCLI9__VIRT_CHAN__SHIFT 0x0
+#define DAGB5_RDCLI9__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB5_RDCLI9__URG_HIGH__SHIFT 0x4
+#define DAGB5_RDCLI9__URG_LOW__SHIFT 0x8
+#define DAGB5_RDCLI9__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB5_RDCLI9__MAX_BW__SHIFT 0xd
+#define DAGB5_RDCLI9__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB5_RDCLI9__MIN_BW__SHIFT 0x16
+#define DAGB5_RDCLI9__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB5_RDCLI9__MAX_OSD__SHIFT 0x1a
+#define DAGB5_RDCLI9__VIRT_CHAN_MASK 0x00000007L
+#define DAGB5_RDCLI9__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB5_RDCLI9__URG_HIGH_MASK 0x000000F0L
+#define DAGB5_RDCLI9__URG_LOW_MASK 0x00000F00L
+#define DAGB5_RDCLI9__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB5_RDCLI9__MAX_BW_MASK 0x001FE000L
+#define DAGB5_RDCLI9__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB5_RDCLI9__MIN_BW_MASK 0x01C00000L
+#define DAGB5_RDCLI9__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB5_RDCLI9__MAX_OSD_MASK 0xFC000000L
+//DAGB5_RDCLI10
+#define DAGB5_RDCLI10__VIRT_CHAN__SHIFT 0x0
+#define DAGB5_RDCLI10__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB5_RDCLI10__URG_HIGH__SHIFT 0x4
+#define DAGB5_RDCLI10__URG_LOW__SHIFT 0x8
+#define DAGB5_RDCLI10__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB5_RDCLI10__MAX_BW__SHIFT 0xd
+#define DAGB5_RDCLI10__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB5_RDCLI10__MIN_BW__SHIFT 0x16
+#define DAGB5_RDCLI10__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB5_RDCLI10__MAX_OSD__SHIFT 0x1a
+#define DAGB5_RDCLI10__VIRT_CHAN_MASK 0x00000007L
+#define DAGB5_RDCLI10__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB5_RDCLI10__URG_HIGH_MASK 0x000000F0L
+#define DAGB5_RDCLI10__URG_LOW_MASK 0x00000F00L
+#define DAGB5_RDCLI10__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB5_RDCLI10__MAX_BW_MASK 0x001FE000L
+#define DAGB5_RDCLI10__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB5_RDCLI10__MIN_BW_MASK 0x01C00000L
+#define DAGB5_RDCLI10__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB5_RDCLI10__MAX_OSD_MASK 0xFC000000L
+//DAGB5_RDCLI11
+#define DAGB5_RDCLI11__VIRT_CHAN__SHIFT 0x0
+#define DAGB5_RDCLI11__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB5_RDCLI11__URG_HIGH__SHIFT 0x4
+#define DAGB5_RDCLI11__URG_LOW__SHIFT 0x8
+#define DAGB5_RDCLI11__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB5_RDCLI11__MAX_BW__SHIFT 0xd
+#define DAGB5_RDCLI11__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB5_RDCLI11__MIN_BW__SHIFT 0x16
+#define DAGB5_RDCLI11__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB5_RDCLI11__MAX_OSD__SHIFT 0x1a
+#define DAGB5_RDCLI11__VIRT_CHAN_MASK 0x00000007L
+#define DAGB5_RDCLI11__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB5_RDCLI11__URG_HIGH_MASK 0x000000F0L
+#define DAGB5_RDCLI11__URG_LOW_MASK 0x00000F00L
+#define DAGB5_RDCLI11__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB5_RDCLI11__MAX_BW_MASK 0x001FE000L
+#define DAGB5_RDCLI11__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB5_RDCLI11__MIN_BW_MASK 0x01C00000L
+#define DAGB5_RDCLI11__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB5_RDCLI11__MAX_OSD_MASK 0xFC000000L
+//DAGB5_RDCLI12
+#define DAGB5_RDCLI12__VIRT_CHAN__SHIFT 0x0
+#define DAGB5_RDCLI12__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB5_RDCLI12__URG_HIGH__SHIFT 0x4
+#define DAGB5_RDCLI12__URG_LOW__SHIFT 0x8
+#define DAGB5_RDCLI12__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB5_RDCLI12__MAX_BW__SHIFT 0xd
+#define DAGB5_RDCLI12__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB5_RDCLI12__MIN_BW__SHIFT 0x16
+#define DAGB5_RDCLI12__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB5_RDCLI12__MAX_OSD__SHIFT 0x1a
+#define DAGB5_RDCLI12__VIRT_CHAN_MASK 0x00000007L
+#define DAGB5_RDCLI12__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB5_RDCLI12__URG_HIGH_MASK 0x000000F0L
+#define DAGB5_RDCLI12__URG_LOW_MASK 0x00000F00L
+#define DAGB5_RDCLI12__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB5_RDCLI12__MAX_BW_MASK 0x001FE000L
+#define DAGB5_RDCLI12__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB5_RDCLI12__MIN_BW_MASK 0x01C00000L
+#define DAGB5_RDCLI12__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB5_RDCLI12__MAX_OSD_MASK 0xFC000000L
+//DAGB5_RDCLI13
+#define DAGB5_RDCLI13__VIRT_CHAN__SHIFT 0x0
+#define DAGB5_RDCLI13__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB5_RDCLI13__URG_HIGH__SHIFT 0x4
+#define DAGB5_RDCLI13__URG_LOW__SHIFT 0x8
+#define DAGB5_RDCLI13__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB5_RDCLI13__MAX_BW__SHIFT 0xd
+#define DAGB5_RDCLI13__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB5_RDCLI13__MIN_BW__SHIFT 0x16
+#define DAGB5_RDCLI13__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB5_RDCLI13__MAX_OSD__SHIFT 0x1a
+#define DAGB5_RDCLI13__VIRT_CHAN_MASK 0x00000007L
+#define DAGB5_RDCLI13__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB5_RDCLI13__URG_HIGH_MASK 0x000000F0L
+#define DAGB5_RDCLI13__URG_LOW_MASK 0x00000F00L
+#define DAGB5_RDCLI13__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB5_RDCLI13__MAX_BW_MASK 0x001FE000L
+#define DAGB5_RDCLI13__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB5_RDCLI13__MIN_BW_MASK 0x01C00000L
+#define DAGB5_RDCLI13__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB5_RDCLI13__MAX_OSD_MASK 0xFC000000L
+//DAGB5_RDCLI14
+#define DAGB5_RDCLI14__VIRT_CHAN__SHIFT 0x0
+#define DAGB5_RDCLI14__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB5_RDCLI14__URG_HIGH__SHIFT 0x4
+#define DAGB5_RDCLI14__URG_LOW__SHIFT 0x8
+#define DAGB5_RDCLI14__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB5_RDCLI14__MAX_BW__SHIFT 0xd
+#define DAGB5_RDCLI14__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB5_RDCLI14__MIN_BW__SHIFT 0x16
+#define DAGB5_RDCLI14__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB5_RDCLI14__MAX_OSD__SHIFT 0x1a
+#define DAGB5_RDCLI14__VIRT_CHAN_MASK 0x00000007L
+#define DAGB5_RDCLI14__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB5_RDCLI14__URG_HIGH_MASK 0x000000F0L
+#define DAGB5_RDCLI14__URG_LOW_MASK 0x00000F00L
+#define DAGB5_RDCLI14__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB5_RDCLI14__MAX_BW_MASK 0x001FE000L
+#define DAGB5_RDCLI14__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB5_RDCLI14__MIN_BW_MASK 0x01C00000L
+#define DAGB5_RDCLI14__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB5_RDCLI14__MAX_OSD_MASK 0xFC000000L
+//DAGB5_RDCLI15
+#define DAGB5_RDCLI15__VIRT_CHAN__SHIFT 0x0
+#define DAGB5_RDCLI15__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB5_RDCLI15__URG_HIGH__SHIFT 0x4
+#define DAGB5_RDCLI15__URG_LOW__SHIFT 0x8
+#define DAGB5_RDCLI15__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB5_RDCLI15__MAX_BW__SHIFT 0xd
+#define DAGB5_RDCLI15__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB5_RDCLI15__MIN_BW__SHIFT 0x16
+#define DAGB5_RDCLI15__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB5_RDCLI15__MAX_OSD__SHIFT 0x1a
+#define DAGB5_RDCLI15__VIRT_CHAN_MASK 0x00000007L
+#define DAGB5_RDCLI15__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB5_RDCLI15__URG_HIGH_MASK 0x000000F0L
+#define DAGB5_RDCLI15__URG_LOW_MASK 0x00000F00L
+#define DAGB5_RDCLI15__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB5_RDCLI15__MAX_BW_MASK 0x001FE000L
+#define DAGB5_RDCLI15__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB5_RDCLI15__MIN_BW_MASK 0x01C00000L
+#define DAGB5_RDCLI15__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB5_RDCLI15__MAX_OSD_MASK 0xFC000000L
+//DAGB5_RD_CNTL
+#define DAGB5_RD_CNTL__SCLK_FREQ__SHIFT 0x0
+#define DAGB5_RD_CNTL__CLI_MAX_BW_WINDOW__SHIFT 0x4
+#define DAGB5_RD_CNTL__VC_MAX_BW_WINDOW__SHIFT 0xa
+#define DAGB5_RD_CNTL__IO_LEVEL_OVERRIDE_ENABLE__SHIFT 0x10
+#define DAGB5_RD_CNTL__IO_LEVEL__SHIFT 0x11
+#define DAGB5_RD_CNTL__IO_LEVEL_COMPLY_VC__SHIFT 0x14
+#define DAGB5_RD_CNTL__SHARE_VC_NUM__SHIFT 0x17
+#define DAGB5_RD_CNTL__SCLK_FREQ_MASK 0x0000000FL
+#define DAGB5_RD_CNTL__CLI_MAX_BW_WINDOW_MASK 0x000003F0L
+#define DAGB5_RD_CNTL__VC_MAX_BW_WINDOW_MASK 0x0000FC00L
+#define DAGB5_RD_CNTL__IO_LEVEL_OVERRIDE_ENABLE_MASK 0x00010000L
+#define DAGB5_RD_CNTL__IO_LEVEL_MASK 0x000E0000L
+#define DAGB5_RD_CNTL__IO_LEVEL_COMPLY_VC_MASK 0x00700000L
+#define DAGB5_RD_CNTL__SHARE_VC_NUM_MASK 0x03800000L
+//DAGB5_RD_GMI_CNTL
+#define DAGB5_RD_GMI_CNTL__EA_CREDIT__SHIFT 0x0
+#define DAGB5_RD_GMI_CNTL__LEVEL__SHIFT 0x6
+#define DAGB5_RD_GMI_CNTL__MAX_BURST__SHIFT 0x9
+#define DAGB5_RD_GMI_CNTL__LAZY_TIMER__SHIFT 0xd
+#define DAGB5_RD_GMI_CNTL__EA_CREDIT_MASK 0x0000003FL
+#define DAGB5_RD_GMI_CNTL__LEVEL_MASK 0x000001C0L
+#define DAGB5_RD_GMI_CNTL__MAX_BURST_MASK 0x00001E00L
+#define DAGB5_RD_GMI_CNTL__LAZY_TIMER_MASK 0x0001E000L
+//DAGB5_RD_ADDR_DAGB
+#define DAGB5_RD_ADDR_DAGB__DAGB_ENABLE__SHIFT 0x0
+#define DAGB5_RD_ADDR_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3
+#define DAGB5_RD_ADDR_DAGB__DISABLE_SELF_INIT__SHIFT 0x6
+#define DAGB5_RD_ADDR_DAGB__WHOAMI__SHIFT 0x7
+#define DAGB5_RD_ADDR_DAGB__DAGB_ENABLE_MASK 0x00000007L
+#define DAGB5_RD_ADDR_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L
+#define DAGB5_RD_ADDR_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L
+#define DAGB5_RD_ADDR_DAGB__WHOAMI_MASK 0x00001F80L
+//DAGB5_RD_OUTPUT_DAGB_MAX_BURST
+#define DAGB5_RD_OUTPUT_DAGB_MAX_BURST__VC0__SHIFT 0x0
+#define DAGB5_RD_OUTPUT_DAGB_MAX_BURST__VC1__SHIFT 0x4
+#define DAGB5_RD_OUTPUT_DAGB_MAX_BURST__VC2__SHIFT 0x8
+#define DAGB5_RD_OUTPUT_DAGB_MAX_BURST__VC3__SHIFT 0xc
+#define DAGB5_RD_OUTPUT_DAGB_MAX_BURST__VC4__SHIFT 0x10
+#define DAGB5_RD_OUTPUT_DAGB_MAX_BURST__VC5__SHIFT 0x14
+#define DAGB5_RD_OUTPUT_DAGB_MAX_BURST__VC6__SHIFT 0x18
+#define DAGB5_RD_OUTPUT_DAGB_MAX_BURST__VC7__SHIFT 0x1c
+#define DAGB5_RD_OUTPUT_DAGB_MAX_BURST__VC0_MASK 0x0000000FL
+#define DAGB5_RD_OUTPUT_DAGB_MAX_BURST__VC1_MASK 0x000000F0L
+#define DAGB5_RD_OUTPUT_DAGB_MAX_BURST__VC2_MASK 0x00000F00L
+#define DAGB5_RD_OUTPUT_DAGB_MAX_BURST__VC3_MASK 0x0000F000L
+#define DAGB5_RD_OUTPUT_DAGB_MAX_BURST__VC4_MASK 0x000F0000L
+#define DAGB5_RD_OUTPUT_DAGB_MAX_BURST__VC5_MASK 0x00F00000L
+#define DAGB5_RD_OUTPUT_DAGB_MAX_BURST__VC6_MASK 0x0F000000L
+#define DAGB5_RD_OUTPUT_DAGB_MAX_BURST__VC7_MASK 0xF0000000L
+//DAGB5_RD_OUTPUT_DAGB_LAZY_TIMER
+#define DAGB5_RD_OUTPUT_DAGB_LAZY_TIMER__VC0__SHIFT 0x0
+#define DAGB5_RD_OUTPUT_DAGB_LAZY_TIMER__VC1__SHIFT 0x4
+#define DAGB5_RD_OUTPUT_DAGB_LAZY_TIMER__VC2__SHIFT 0x8
+#define DAGB5_RD_OUTPUT_DAGB_LAZY_TIMER__VC3__SHIFT 0xc
+#define DAGB5_RD_OUTPUT_DAGB_LAZY_TIMER__VC4__SHIFT 0x10
+#define DAGB5_RD_OUTPUT_DAGB_LAZY_TIMER__VC5__SHIFT 0x14
+#define DAGB5_RD_OUTPUT_DAGB_LAZY_TIMER__VC6__SHIFT 0x18
+#define DAGB5_RD_OUTPUT_DAGB_LAZY_TIMER__VC7__SHIFT 0x1c
+#define DAGB5_RD_OUTPUT_DAGB_LAZY_TIMER__VC0_MASK 0x0000000FL
+#define DAGB5_RD_OUTPUT_DAGB_LAZY_TIMER__VC1_MASK 0x000000F0L
+#define DAGB5_RD_OUTPUT_DAGB_LAZY_TIMER__VC2_MASK 0x00000F00L
+#define DAGB5_RD_OUTPUT_DAGB_LAZY_TIMER__VC3_MASK 0x0000F000L
+#define DAGB5_RD_OUTPUT_DAGB_LAZY_TIMER__VC4_MASK 0x000F0000L
+#define DAGB5_RD_OUTPUT_DAGB_LAZY_TIMER__VC5_MASK 0x00F00000L
+#define DAGB5_RD_OUTPUT_DAGB_LAZY_TIMER__VC6_MASK 0x0F000000L
+#define DAGB5_RD_OUTPUT_DAGB_LAZY_TIMER__VC7_MASK 0xF0000000L
+//DAGB5_RD_CGTT_CLK_CTRL
+#define DAGB5_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB5_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB5_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
+#define DAGB5_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define DAGB5_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
+#define DAGB5_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
+#define DAGB5_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
+#define DAGB5_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
+#define DAGB5_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB5_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB5_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
+#define DAGB5_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define DAGB5_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
+#define DAGB5_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
+#define DAGB5_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
+#define DAGB5_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
+//DAGB5_L1TLB_RD_CGTT_CLK_CTRL
+#define DAGB5_L1TLB_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB5_L1TLB_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB5_L1TLB_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
+#define DAGB5_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define DAGB5_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
+#define DAGB5_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
+#define DAGB5_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
+#define DAGB5_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
+#define DAGB5_L1TLB_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB5_L1TLB_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB5_L1TLB_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
+#define DAGB5_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define DAGB5_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
+#define DAGB5_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
+#define DAGB5_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
+#define DAGB5_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
+//DAGB5_ATCVM_RD_CGTT_CLK_CTRL
+#define DAGB5_ATCVM_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB5_ATCVM_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB5_ATCVM_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
+#define DAGB5_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define DAGB5_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
+#define DAGB5_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
+#define DAGB5_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
+#define DAGB5_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
+#define DAGB5_ATCVM_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB5_ATCVM_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB5_ATCVM_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
+#define DAGB5_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define DAGB5_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
+#define DAGB5_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
+#define DAGB5_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
+#define DAGB5_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
+//DAGB5_RD_ADDR_DAGB_MAX_BURST0
+#define DAGB5_RD_ADDR_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0
+#define DAGB5_RD_ADDR_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4
+#define DAGB5_RD_ADDR_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8
+#define DAGB5_RD_ADDR_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc
+#define DAGB5_RD_ADDR_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10
+#define DAGB5_RD_ADDR_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14
+#define DAGB5_RD_ADDR_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18
+#define DAGB5_RD_ADDR_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c
+#define DAGB5_RD_ADDR_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL
+#define DAGB5_RD_ADDR_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L
+#define DAGB5_RD_ADDR_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L
+#define DAGB5_RD_ADDR_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L
+#define DAGB5_RD_ADDR_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L
+#define DAGB5_RD_ADDR_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L
+#define DAGB5_RD_ADDR_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L
+#define DAGB5_RD_ADDR_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L
+//DAGB5_RD_ADDR_DAGB_LAZY_TIMER0
+#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0
+#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4
+#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8
+#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc
+#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10
+#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14
+#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18
+#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c
+#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL
+#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L
+#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L
+#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L
+#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L
+#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L
+#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L
+#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L
+//DAGB5_RD_ADDR_DAGB_MAX_BURST1
+#define DAGB5_RD_ADDR_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0
+#define DAGB5_RD_ADDR_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4
+#define DAGB5_RD_ADDR_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8
+#define DAGB5_RD_ADDR_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc
+#define DAGB5_RD_ADDR_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10
+#define DAGB5_RD_ADDR_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14
+#define DAGB5_RD_ADDR_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18
+#define DAGB5_RD_ADDR_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c
+#define DAGB5_RD_ADDR_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL
+#define DAGB5_RD_ADDR_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L
+#define DAGB5_RD_ADDR_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L
+#define DAGB5_RD_ADDR_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L
+#define DAGB5_RD_ADDR_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L
+#define DAGB5_RD_ADDR_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L
+#define DAGB5_RD_ADDR_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L
+#define DAGB5_RD_ADDR_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L
+//DAGB5_RD_ADDR_DAGB_LAZY_TIMER1
+#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0
+#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4
+#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8
+#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc
+#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10
+#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14
+#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18
+#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c
+#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL
+#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L
+#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L
+#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L
+#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L
+#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L
+#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L
+#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L
+//DAGB5_RD_VC0_CNTL
+#define DAGB5_RD_VC0_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB5_RD_VC0_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB5_RD_VC0_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB5_RD_VC0_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB5_RD_VC0_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB5_RD_VC0_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB5_RD_VC0_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB5_RD_VC0_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB5_RD_VC0_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB5_RD_VC0_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB5_RD_VC0_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB5_RD_VC0_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB5_RD_VC0_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB5_RD_VC0_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB5_RD_VC0_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB5_RD_VC0_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB5_RD_VC1_CNTL
+#define DAGB5_RD_VC1_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB5_RD_VC1_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB5_RD_VC1_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB5_RD_VC1_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB5_RD_VC1_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB5_RD_VC1_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB5_RD_VC1_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB5_RD_VC1_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB5_RD_VC1_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB5_RD_VC1_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB5_RD_VC1_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB5_RD_VC1_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB5_RD_VC1_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB5_RD_VC1_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB5_RD_VC1_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB5_RD_VC1_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB5_RD_VC2_CNTL
+#define DAGB5_RD_VC2_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB5_RD_VC2_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB5_RD_VC2_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB5_RD_VC2_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB5_RD_VC2_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB5_RD_VC2_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB5_RD_VC2_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB5_RD_VC2_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB5_RD_VC2_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB5_RD_VC2_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB5_RD_VC2_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB5_RD_VC2_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB5_RD_VC2_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB5_RD_VC2_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB5_RD_VC2_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB5_RD_VC2_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB5_RD_VC3_CNTL
+#define DAGB5_RD_VC3_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB5_RD_VC3_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB5_RD_VC3_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB5_RD_VC3_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB5_RD_VC3_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB5_RD_VC3_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB5_RD_VC3_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB5_RD_VC3_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB5_RD_VC3_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB5_RD_VC3_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB5_RD_VC3_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB5_RD_VC3_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB5_RD_VC3_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB5_RD_VC3_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB5_RD_VC3_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB5_RD_VC3_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB5_RD_VC4_CNTL
+#define DAGB5_RD_VC4_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB5_RD_VC4_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB5_RD_VC4_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB5_RD_VC4_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB5_RD_VC4_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB5_RD_VC4_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB5_RD_VC4_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB5_RD_VC4_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB5_RD_VC4_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB5_RD_VC4_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB5_RD_VC4_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB5_RD_VC4_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB5_RD_VC4_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB5_RD_VC4_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB5_RD_VC4_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB5_RD_VC4_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB5_RD_VC5_CNTL
+#define DAGB5_RD_VC5_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB5_RD_VC5_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB5_RD_VC5_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB5_RD_VC5_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB5_RD_VC5_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB5_RD_VC5_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB5_RD_VC5_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB5_RD_VC5_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB5_RD_VC5_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB5_RD_VC5_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB5_RD_VC5_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB5_RD_VC5_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB5_RD_VC5_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB5_RD_VC5_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB5_RD_VC5_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB5_RD_VC5_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB5_RD_VC6_CNTL
+#define DAGB5_RD_VC6_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB5_RD_VC6_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB5_RD_VC6_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB5_RD_VC6_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB5_RD_VC6_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB5_RD_VC6_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB5_RD_VC6_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB5_RD_VC6_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB5_RD_VC6_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB5_RD_VC6_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB5_RD_VC6_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB5_RD_VC6_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB5_RD_VC6_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB5_RD_VC6_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB5_RD_VC6_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB5_RD_VC6_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB5_RD_VC7_CNTL
+#define DAGB5_RD_VC7_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB5_RD_VC7_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB5_RD_VC7_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB5_RD_VC7_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB5_RD_VC7_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB5_RD_VC7_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB5_RD_VC7_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB5_RD_VC7_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB5_RD_VC7_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB5_RD_VC7_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB5_RD_VC7_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB5_RD_VC7_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB5_RD_VC7_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB5_RD_VC7_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB5_RD_VC7_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB5_RD_VC7_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB5_RD_CNTL_MISC
+#define DAGB5_RD_CNTL_MISC__STOR_POOL_CREDIT__SHIFT 0x0
+#define DAGB5_RD_CNTL_MISC__EA_POOL_CREDIT__SHIFT 0x6
+#define DAGB5_RD_CNTL_MISC__IO_EA_CREDIT__SHIFT 0xd
+#define DAGB5_RD_CNTL_MISC__STOR_CC_LEGACY_MODE__SHIFT 0x13
+#define DAGB5_RD_CNTL_MISC__EA_CC_LEGACY_MODE__SHIFT 0x14
+#define DAGB5_RD_CNTL_MISC__UTCL2_CID__SHIFT 0x15
+#define DAGB5_RD_CNTL_MISC__RDRET_FIFO_CREDITS__SHIFT 0x1a
+#define DAGB5_RD_CNTL_MISC__STOR_POOL_CREDIT_MASK 0x0000003FL
+#define DAGB5_RD_CNTL_MISC__EA_POOL_CREDIT_MASK 0x00001FC0L
+#define DAGB5_RD_CNTL_MISC__IO_EA_CREDIT_MASK 0x0007E000L
+#define DAGB5_RD_CNTL_MISC__STOR_CC_LEGACY_MODE_MASK 0x00080000L
+#define DAGB5_RD_CNTL_MISC__EA_CC_LEGACY_MODE_MASK 0x00100000L
+#define DAGB5_RD_CNTL_MISC__UTCL2_CID_MASK 0x03E00000L
+#define DAGB5_RD_CNTL_MISC__RDRET_FIFO_CREDITS_MASK 0xFC000000L
+//DAGB5_RD_TLB_CREDIT
+#define DAGB5_RD_TLB_CREDIT__TLB0__SHIFT 0x0
+#define DAGB5_RD_TLB_CREDIT__TLB1__SHIFT 0x5
+#define DAGB5_RD_TLB_CREDIT__TLB2__SHIFT 0xa
+#define DAGB5_RD_TLB_CREDIT__TLB3__SHIFT 0xf
+#define DAGB5_RD_TLB_CREDIT__TLB4__SHIFT 0x14
+#define DAGB5_RD_TLB_CREDIT__TLB5__SHIFT 0x19
+#define DAGB5_RD_TLB_CREDIT__TLB0_MASK 0x0000001FL
+#define DAGB5_RD_TLB_CREDIT__TLB1_MASK 0x000003E0L
+#define DAGB5_RD_TLB_CREDIT__TLB2_MASK 0x00007C00L
+#define DAGB5_RD_TLB_CREDIT__TLB3_MASK 0x000F8000L
+#define DAGB5_RD_TLB_CREDIT__TLB4_MASK 0x01F00000L
+#define DAGB5_RD_TLB_CREDIT__TLB5_MASK 0x3E000000L
+//DAGB5_RDCLI_ASK_PENDING
+#define DAGB5_RDCLI_ASK_PENDING__BUSY__SHIFT 0x0
+#define DAGB5_RDCLI_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB5_RDCLI_GO_PENDING
+#define DAGB5_RDCLI_GO_PENDING__BUSY__SHIFT 0x0
+#define DAGB5_RDCLI_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB5_RDCLI_GBLSEND_PENDING
+#define DAGB5_RDCLI_GBLSEND_PENDING__BUSY__SHIFT 0x0
+#define DAGB5_RDCLI_GBLSEND_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB5_RDCLI_TLB_PENDING
+#define DAGB5_RDCLI_TLB_PENDING__BUSY__SHIFT 0x0
+#define DAGB5_RDCLI_TLB_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB5_RDCLI_OARB_PENDING
+#define DAGB5_RDCLI_OARB_PENDING__BUSY__SHIFT 0x0
+#define DAGB5_RDCLI_OARB_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB5_RDCLI_OSD_PENDING
+#define DAGB5_RDCLI_OSD_PENDING__BUSY__SHIFT 0x0
+#define DAGB5_RDCLI_OSD_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB5_WRCLI0
+#define DAGB5_WRCLI0__VIRT_CHAN__SHIFT 0x0
+#define DAGB5_WRCLI0__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB5_WRCLI0__URG_HIGH__SHIFT 0x4
+#define DAGB5_WRCLI0__URG_LOW__SHIFT 0x8
+#define DAGB5_WRCLI0__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB5_WRCLI0__MAX_BW__SHIFT 0xd
+#define DAGB5_WRCLI0__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB5_WRCLI0__MIN_BW__SHIFT 0x16
+#define DAGB5_WRCLI0__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB5_WRCLI0__MAX_OSD__SHIFT 0x1a
+#define DAGB5_WRCLI0__VIRT_CHAN_MASK 0x00000007L
+#define DAGB5_WRCLI0__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB5_WRCLI0__URG_HIGH_MASK 0x000000F0L
+#define DAGB5_WRCLI0__URG_LOW_MASK 0x00000F00L
+#define DAGB5_WRCLI0__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB5_WRCLI0__MAX_BW_MASK 0x001FE000L
+#define DAGB5_WRCLI0__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB5_WRCLI0__MIN_BW_MASK 0x01C00000L
+#define DAGB5_WRCLI0__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB5_WRCLI0__MAX_OSD_MASK 0xFC000000L
+//DAGB5_WRCLI1
+#define DAGB5_WRCLI1__VIRT_CHAN__SHIFT 0x0
+#define DAGB5_WRCLI1__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB5_WRCLI1__URG_HIGH__SHIFT 0x4
+#define DAGB5_WRCLI1__URG_LOW__SHIFT 0x8
+#define DAGB5_WRCLI1__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB5_WRCLI1__MAX_BW__SHIFT 0xd
+#define DAGB5_WRCLI1__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB5_WRCLI1__MIN_BW__SHIFT 0x16
+#define DAGB5_WRCLI1__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB5_WRCLI1__MAX_OSD__SHIFT 0x1a
+#define DAGB5_WRCLI1__VIRT_CHAN_MASK 0x00000007L
+#define DAGB5_WRCLI1__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB5_WRCLI1__URG_HIGH_MASK 0x000000F0L
+#define DAGB5_WRCLI1__URG_LOW_MASK 0x00000F00L
+#define DAGB5_WRCLI1__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB5_WRCLI1__MAX_BW_MASK 0x001FE000L
+#define DAGB5_WRCLI1__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB5_WRCLI1__MIN_BW_MASK 0x01C00000L
+#define DAGB5_WRCLI1__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB5_WRCLI1__MAX_OSD_MASK 0xFC000000L
+//DAGB5_WRCLI2
+#define DAGB5_WRCLI2__VIRT_CHAN__SHIFT 0x0
+#define DAGB5_WRCLI2__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB5_WRCLI2__URG_HIGH__SHIFT 0x4
+#define DAGB5_WRCLI2__URG_LOW__SHIFT 0x8
+#define DAGB5_WRCLI2__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB5_WRCLI2__MAX_BW__SHIFT 0xd
+#define DAGB5_WRCLI2__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB5_WRCLI2__MIN_BW__SHIFT 0x16
+#define DAGB5_WRCLI2__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB5_WRCLI2__MAX_OSD__SHIFT 0x1a
+#define DAGB5_WRCLI2__VIRT_CHAN_MASK 0x00000007L
+#define DAGB5_WRCLI2__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB5_WRCLI2__URG_HIGH_MASK 0x000000F0L
+#define DAGB5_WRCLI2__URG_LOW_MASK 0x00000F00L
+#define DAGB5_WRCLI2__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB5_WRCLI2__MAX_BW_MASK 0x001FE000L
+#define DAGB5_WRCLI2__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB5_WRCLI2__MIN_BW_MASK 0x01C00000L
+#define DAGB5_WRCLI2__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB5_WRCLI2__MAX_OSD_MASK 0xFC000000L
+//DAGB5_WRCLI3
+#define DAGB5_WRCLI3__VIRT_CHAN__SHIFT 0x0
+#define DAGB5_WRCLI3__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB5_WRCLI3__URG_HIGH__SHIFT 0x4
+#define DAGB5_WRCLI3__URG_LOW__SHIFT 0x8
+#define DAGB5_WRCLI3__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB5_WRCLI3__MAX_BW__SHIFT 0xd
+#define DAGB5_WRCLI3__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB5_WRCLI3__MIN_BW__SHIFT 0x16
+#define DAGB5_WRCLI3__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB5_WRCLI3__MAX_OSD__SHIFT 0x1a
+#define DAGB5_WRCLI3__VIRT_CHAN_MASK 0x00000007L
+#define DAGB5_WRCLI3__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB5_WRCLI3__URG_HIGH_MASK 0x000000F0L
+#define DAGB5_WRCLI3__URG_LOW_MASK 0x00000F00L
+#define DAGB5_WRCLI3__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB5_WRCLI3__MAX_BW_MASK 0x001FE000L
+#define DAGB5_WRCLI3__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB5_WRCLI3__MIN_BW_MASK 0x01C00000L
+#define DAGB5_WRCLI3__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB5_WRCLI3__MAX_OSD_MASK 0xFC000000L
+//DAGB5_WRCLI4
+#define DAGB5_WRCLI4__VIRT_CHAN__SHIFT 0x0
+#define DAGB5_WRCLI4__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB5_WRCLI4__URG_HIGH__SHIFT 0x4
+#define DAGB5_WRCLI4__URG_LOW__SHIFT 0x8
+#define DAGB5_WRCLI4__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB5_WRCLI4__MAX_BW__SHIFT 0xd
+#define DAGB5_WRCLI4__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB5_WRCLI4__MIN_BW__SHIFT 0x16
+#define DAGB5_WRCLI4__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB5_WRCLI4__MAX_OSD__SHIFT 0x1a
+#define DAGB5_WRCLI4__VIRT_CHAN_MASK 0x00000007L
+#define DAGB5_WRCLI4__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB5_WRCLI4__URG_HIGH_MASK 0x000000F0L
+#define DAGB5_WRCLI4__URG_LOW_MASK 0x00000F00L
+#define DAGB5_WRCLI4__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB5_WRCLI4__MAX_BW_MASK 0x001FE000L
+#define DAGB5_WRCLI4__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB5_WRCLI4__MIN_BW_MASK 0x01C00000L
+#define DAGB5_WRCLI4__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB5_WRCLI4__MAX_OSD_MASK 0xFC000000L
+//DAGB5_WRCLI5
+#define DAGB5_WRCLI5__VIRT_CHAN__SHIFT 0x0
+#define DAGB5_WRCLI5__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB5_WRCLI5__URG_HIGH__SHIFT 0x4
+#define DAGB5_WRCLI5__URG_LOW__SHIFT 0x8
+#define DAGB5_WRCLI5__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB5_WRCLI5__MAX_BW__SHIFT 0xd
+#define DAGB5_WRCLI5__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB5_WRCLI5__MIN_BW__SHIFT 0x16
+#define DAGB5_WRCLI5__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB5_WRCLI5__MAX_OSD__SHIFT 0x1a
+#define DAGB5_WRCLI5__VIRT_CHAN_MASK 0x00000007L
+#define DAGB5_WRCLI5__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB5_WRCLI5__URG_HIGH_MASK 0x000000F0L
+#define DAGB5_WRCLI5__URG_LOW_MASK 0x00000F00L
+#define DAGB5_WRCLI5__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB5_WRCLI5__MAX_BW_MASK 0x001FE000L
+#define DAGB5_WRCLI5__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB5_WRCLI5__MIN_BW_MASK 0x01C00000L
+#define DAGB5_WRCLI5__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB5_WRCLI5__MAX_OSD_MASK 0xFC000000L
+//DAGB5_WRCLI6
+#define DAGB5_WRCLI6__VIRT_CHAN__SHIFT 0x0
+#define DAGB5_WRCLI6__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB5_WRCLI6__URG_HIGH__SHIFT 0x4
+#define DAGB5_WRCLI6__URG_LOW__SHIFT 0x8
+#define DAGB5_WRCLI6__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB5_WRCLI6__MAX_BW__SHIFT 0xd
+#define DAGB5_WRCLI6__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB5_WRCLI6__MIN_BW__SHIFT 0x16
+#define DAGB5_WRCLI6__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB5_WRCLI6__MAX_OSD__SHIFT 0x1a
+#define DAGB5_WRCLI6__VIRT_CHAN_MASK 0x00000007L
+#define DAGB5_WRCLI6__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB5_WRCLI6__URG_HIGH_MASK 0x000000F0L
+#define DAGB5_WRCLI6__URG_LOW_MASK 0x00000F00L
+#define DAGB5_WRCLI6__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB5_WRCLI6__MAX_BW_MASK 0x001FE000L
+#define DAGB5_WRCLI6__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB5_WRCLI6__MIN_BW_MASK 0x01C00000L
+#define DAGB5_WRCLI6__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB5_WRCLI6__MAX_OSD_MASK 0xFC000000L
+//DAGB5_WRCLI7
+#define DAGB5_WRCLI7__VIRT_CHAN__SHIFT 0x0
+#define DAGB5_WRCLI7__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB5_WRCLI7__URG_HIGH__SHIFT 0x4
+#define DAGB5_WRCLI7__URG_LOW__SHIFT 0x8
+#define DAGB5_WRCLI7__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB5_WRCLI7__MAX_BW__SHIFT 0xd
+#define DAGB5_WRCLI7__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB5_WRCLI7__MIN_BW__SHIFT 0x16
+#define DAGB5_WRCLI7__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB5_WRCLI7__MAX_OSD__SHIFT 0x1a
+#define DAGB5_WRCLI7__VIRT_CHAN_MASK 0x00000007L
+#define DAGB5_WRCLI7__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB5_WRCLI7__URG_HIGH_MASK 0x000000F0L
+#define DAGB5_WRCLI7__URG_LOW_MASK 0x00000F00L
+#define DAGB5_WRCLI7__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB5_WRCLI7__MAX_BW_MASK 0x001FE000L
+#define DAGB5_WRCLI7__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB5_WRCLI7__MIN_BW_MASK 0x01C00000L
+#define DAGB5_WRCLI7__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB5_WRCLI7__MAX_OSD_MASK 0xFC000000L
+//DAGB5_WRCLI8
+#define DAGB5_WRCLI8__VIRT_CHAN__SHIFT 0x0
+#define DAGB5_WRCLI8__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB5_WRCLI8__URG_HIGH__SHIFT 0x4
+#define DAGB5_WRCLI8__URG_LOW__SHIFT 0x8
+#define DAGB5_WRCLI8__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB5_WRCLI8__MAX_BW__SHIFT 0xd
+#define DAGB5_WRCLI8__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB5_WRCLI8__MIN_BW__SHIFT 0x16
+#define DAGB5_WRCLI8__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB5_WRCLI8__MAX_OSD__SHIFT 0x1a
+#define DAGB5_WRCLI8__VIRT_CHAN_MASK 0x00000007L
+#define DAGB5_WRCLI8__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB5_WRCLI8__URG_HIGH_MASK 0x000000F0L
+#define DAGB5_WRCLI8__URG_LOW_MASK 0x00000F00L
+#define DAGB5_WRCLI8__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB5_WRCLI8__MAX_BW_MASK 0x001FE000L
+#define DAGB5_WRCLI8__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB5_WRCLI8__MIN_BW_MASK 0x01C00000L
+#define DAGB5_WRCLI8__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB5_WRCLI8__MAX_OSD_MASK 0xFC000000L
+//DAGB5_WRCLI9
+#define DAGB5_WRCLI9__VIRT_CHAN__SHIFT 0x0
+#define DAGB5_WRCLI9__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB5_WRCLI9__URG_HIGH__SHIFT 0x4
+#define DAGB5_WRCLI9__URG_LOW__SHIFT 0x8
+#define DAGB5_WRCLI9__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB5_WRCLI9__MAX_BW__SHIFT 0xd
+#define DAGB5_WRCLI9__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB5_WRCLI9__MIN_BW__SHIFT 0x16
+#define DAGB5_WRCLI9__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB5_WRCLI9__MAX_OSD__SHIFT 0x1a
+#define DAGB5_WRCLI9__VIRT_CHAN_MASK 0x00000007L
+#define DAGB5_WRCLI9__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB5_WRCLI9__URG_HIGH_MASK 0x000000F0L
+#define DAGB5_WRCLI9__URG_LOW_MASK 0x00000F00L
+#define DAGB5_WRCLI9__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB5_WRCLI9__MAX_BW_MASK 0x001FE000L
+#define DAGB5_WRCLI9__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB5_WRCLI9__MIN_BW_MASK 0x01C00000L
+#define DAGB5_WRCLI9__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB5_WRCLI9__MAX_OSD_MASK 0xFC000000L
+//DAGB5_WRCLI10
+#define DAGB5_WRCLI10__VIRT_CHAN__SHIFT 0x0
+#define DAGB5_WRCLI10__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB5_WRCLI10__URG_HIGH__SHIFT 0x4
+#define DAGB5_WRCLI10__URG_LOW__SHIFT 0x8
+#define DAGB5_WRCLI10__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB5_WRCLI10__MAX_BW__SHIFT 0xd
+#define DAGB5_WRCLI10__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB5_WRCLI10__MIN_BW__SHIFT 0x16
+#define DAGB5_WRCLI10__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB5_WRCLI10__MAX_OSD__SHIFT 0x1a
+#define DAGB5_WRCLI10__VIRT_CHAN_MASK 0x00000007L
+#define DAGB5_WRCLI10__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB5_WRCLI10__URG_HIGH_MASK 0x000000F0L
+#define DAGB5_WRCLI10__URG_LOW_MASK 0x00000F00L
+#define DAGB5_WRCLI10__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB5_WRCLI10__MAX_BW_MASK 0x001FE000L
+#define DAGB5_WRCLI10__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB5_WRCLI10__MIN_BW_MASK 0x01C00000L
+#define DAGB5_WRCLI10__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB5_WRCLI10__MAX_OSD_MASK 0xFC000000L
+//DAGB5_WRCLI11
+#define DAGB5_WRCLI11__VIRT_CHAN__SHIFT 0x0
+#define DAGB5_WRCLI11__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB5_WRCLI11__URG_HIGH__SHIFT 0x4
+#define DAGB5_WRCLI11__URG_LOW__SHIFT 0x8
+#define DAGB5_WRCLI11__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB5_WRCLI11__MAX_BW__SHIFT 0xd
+#define DAGB5_WRCLI11__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB5_WRCLI11__MIN_BW__SHIFT 0x16
+#define DAGB5_WRCLI11__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB5_WRCLI11__MAX_OSD__SHIFT 0x1a
+#define DAGB5_WRCLI11__VIRT_CHAN_MASK 0x00000007L
+#define DAGB5_WRCLI11__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB5_WRCLI11__URG_HIGH_MASK 0x000000F0L
+#define DAGB5_WRCLI11__URG_LOW_MASK 0x00000F00L
+#define DAGB5_WRCLI11__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB5_WRCLI11__MAX_BW_MASK 0x001FE000L
+#define DAGB5_WRCLI11__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB5_WRCLI11__MIN_BW_MASK 0x01C00000L
+#define DAGB5_WRCLI11__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB5_WRCLI11__MAX_OSD_MASK 0xFC000000L
+//DAGB5_WRCLI12
+#define DAGB5_WRCLI12__VIRT_CHAN__SHIFT 0x0
+#define DAGB5_WRCLI12__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB5_WRCLI12__URG_HIGH__SHIFT 0x4
+#define DAGB5_WRCLI12__URG_LOW__SHIFT 0x8
+#define DAGB5_WRCLI12__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB5_WRCLI12__MAX_BW__SHIFT 0xd
+#define DAGB5_WRCLI12__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB5_WRCLI12__MIN_BW__SHIFT 0x16
+#define DAGB5_WRCLI12__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB5_WRCLI12__MAX_OSD__SHIFT 0x1a
+#define DAGB5_WRCLI12__VIRT_CHAN_MASK 0x00000007L
+#define DAGB5_WRCLI12__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB5_WRCLI12__URG_HIGH_MASK 0x000000F0L
+#define DAGB5_WRCLI12__URG_LOW_MASK 0x00000F00L
+#define DAGB5_WRCLI12__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB5_WRCLI12__MAX_BW_MASK 0x001FE000L
+#define DAGB5_WRCLI12__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB5_WRCLI12__MIN_BW_MASK 0x01C00000L
+#define DAGB5_WRCLI12__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB5_WRCLI12__MAX_OSD_MASK 0xFC000000L
+//DAGB5_WRCLI13
+#define DAGB5_WRCLI13__VIRT_CHAN__SHIFT 0x0
+#define DAGB5_WRCLI13__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB5_WRCLI13__URG_HIGH__SHIFT 0x4
+#define DAGB5_WRCLI13__URG_LOW__SHIFT 0x8
+#define DAGB5_WRCLI13__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB5_WRCLI13__MAX_BW__SHIFT 0xd
+#define DAGB5_WRCLI13__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB5_WRCLI13__MIN_BW__SHIFT 0x16
+#define DAGB5_WRCLI13__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB5_WRCLI13__MAX_OSD__SHIFT 0x1a
+#define DAGB5_WRCLI13__VIRT_CHAN_MASK 0x00000007L
+#define DAGB5_WRCLI13__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB5_WRCLI13__URG_HIGH_MASK 0x000000F0L
+#define DAGB5_WRCLI13__URG_LOW_MASK 0x00000F00L
+#define DAGB5_WRCLI13__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB5_WRCLI13__MAX_BW_MASK 0x001FE000L
+#define DAGB5_WRCLI13__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB5_WRCLI13__MIN_BW_MASK 0x01C00000L
+#define DAGB5_WRCLI13__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB5_WRCLI13__MAX_OSD_MASK 0xFC000000L
+//DAGB5_WRCLI14
+#define DAGB5_WRCLI14__VIRT_CHAN__SHIFT 0x0
+#define DAGB5_WRCLI14__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB5_WRCLI14__URG_HIGH__SHIFT 0x4
+#define DAGB5_WRCLI14__URG_LOW__SHIFT 0x8
+#define DAGB5_WRCLI14__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB5_WRCLI14__MAX_BW__SHIFT 0xd
+#define DAGB5_WRCLI14__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB5_WRCLI14__MIN_BW__SHIFT 0x16
+#define DAGB5_WRCLI14__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB5_WRCLI14__MAX_OSD__SHIFT 0x1a
+#define DAGB5_WRCLI14__VIRT_CHAN_MASK 0x00000007L
+#define DAGB5_WRCLI14__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB5_WRCLI14__URG_HIGH_MASK 0x000000F0L
+#define DAGB5_WRCLI14__URG_LOW_MASK 0x00000F00L
+#define DAGB5_WRCLI14__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB5_WRCLI14__MAX_BW_MASK 0x001FE000L
+#define DAGB5_WRCLI14__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB5_WRCLI14__MIN_BW_MASK 0x01C00000L
+#define DAGB5_WRCLI14__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB5_WRCLI14__MAX_OSD_MASK 0xFC000000L
+//DAGB5_WRCLI15
+#define DAGB5_WRCLI15__VIRT_CHAN__SHIFT 0x0
+#define DAGB5_WRCLI15__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB5_WRCLI15__URG_HIGH__SHIFT 0x4
+#define DAGB5_WRCLI15__URG_LOW__SHIFT 0x8
+#define DAGB5_WRCLI15__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB5_WRCLI15__MAX_BW__SHIFT 0xd
+#define DAGB5_WRCLI15__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB5_WRCLI15__MIN_BW__SHIFT 0x16
+#define DAGB5_WRCLI15__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB5_WRCLI15__MAX_OSD__SHIFT 0x1a
+#define DAGB5_WRCLI15__VIRT_CHAN_MASK 0x00000007L
+#define DAGB5_WRCLI15__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB5_WRCLI15__URG_HIGH_MASK 0x000000F0L
+#define DAGB5_WRCLI15__URG_LOW_MASK 0x00000F00L
+#define DAGB5_WRCLI15__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB5_WRCLI15__MAX_BW_MASK 0x001FE000L
+#define DAGB5_WRCLI15__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB5_WRCLI15__MIN_BW_MASK 0x01C00000L
+#define DAGB5_WRCLI15__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB5_WRCLI15__MAX_OSD_MASK 0xFC000000L
+//DAGB5_WR_CNTL
+#define DAGB5_WR_CNTL__SCLK_FREQ__SHIFT 0x0
+#define DAGB5_WR_CNTL__CLI_MAX_BW_WINDOW__SHIFT 0x4
+#define DAGB5_WR_CNTL__VC_MAX_BW_WINDOW__SHIFT 0xa
+#define DAGB5_WR_CNTL__IO_LEVEL_OVERRIDE_ENABLE__SHIFT 0x10
+#define DAGB5_WR_CNTL__IO_LEVEL__SHIFT 0x11
+#define DAGB5_WR_CNTL__IO_LEVEL_COMPLY_VC__SHIFT 0x14
+#define DAGB5_WR_CNTL__SHARE_VC_NUM__SHIFT 0x17
+#define DAGB5_WR_CNTL__SCLK_FREQ_MASK 0x0000000FL
+#define DAGB5_WR_CNTL__CLI_MAX_BW_WINDOW_MASK 0x000003F0L
+#define DAGB5_WR_CNTL__VC_MAX_BW_WINDOW_MASK 0x0000FC00L
+#define DAGB5_WR_CNTL__IO_LEVEL_OVERRIDE_ENABLE_MASK 0x00010000L
+#define DAGB5_WR_CNTL__IO_LEVEL_MASK 0x000E0000L
+#define DAGB5_WR_CNTL__IO_LEVEL_COMPLY_VC_MASK 0x00700000L
+#define DAGB5_WR_CNTL__SHARE_VC_NUM_MASK 0x03800000L
+//DAGB5_WR_GMI_CNTL
+#define DAGB5_WR_GMI_CNTL__EA_CREDIT__SHIFT 0x0
+#define DAGB5_WR_GMI_CNTL__LEVEL__SHIFT 0x6
+#define DAGB5_WR_GMI_CNTL__MAX_BURST__SHIFT 0x9
+#define DAGB5_WR_GMI_CNTL__LAZY_TIMER__SHIFT 0xd
+#define DAGB5_WR_GMI_CNTL__EA_CREDIT_MASK 0x0000003FL
+#define DAGB5_WR_GMI_CNTL__LEVEL_MASK 0x000001C0L
+#define DAGB5_WR_GMI_CNTL__MAX_BURST_MASK 0x00001E00L
+#define DAGB5_WR_GMI_CNTL__LAZY_TIMER_MASK 0x0001E000L
+//DAGB5_WR_ADDR_DAGB
+#define DAGB5_WR_ADDR_DAGB__DAGB_ENABLE__SHIFT 0x0
+#define DAGB5_WR_ADDR_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3
+#define DAGB5_WR_ADDR_DAGB__DISABLE_SELF_INIT__SHIFT 0x6
+#define DAGB5_WR_ADDR_DAGB__WHOAMI__SHIFT 0x7
+#define DAGB5_WR_ADDR_DAGB__DAGB_ENABLE_MASK 0x00000007L
+#define DAGB5_WR_ADDR_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L
+#define DAGB5_WR_ADDR_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L
+#define DAGB5_WR_ADDR_DAGB__WHOAMI_MASK 0x00001F80L
+//DAGB5_WR_OUTPUT_DAGB_MAX_BURST
+#define DAGB5_WR_OUTPUT_DAGB_MAX_BURST__VC0__SHIFT 0x0
+#define DAGB5_WR_OUTPUT_DAGB_MAX_BURST__VC1__SHIFT 0x4
+#define DAGB5_WR_OUTPUT_DAGB_MAX_BURST__VC2__SHIFT 0x8
+#define DAGB5_WR_OUTPUT_DAGB_MAX_BURST__VC3__SHIFT 0xc
+#define DAGB5_WR_OUTPUT_DAGB_MAX_BURST__VC4__SHIFT 0x10
+#define DAGB5_WR_OUTPUT_DAGB_MAX_BURST__VC5__SHIFT 0x14
+#define DAGB5_WR_OUTPUT_DAGB_MAX_BURST__VC6__SHIFT 0x18
+#define DAGB5_WR_OUTPUT_DAGB_MAX_BURST__VC7__SHIFT 0x1c
+#define DAGB5_WR_OUTPUT_DAGB_MAX_BURST__VC0_MASK 0x0000000FL
+#define DAGB5_WR_OUTPUT_DAGB_MAX_BURST__VC1_MASK 0x000000F0L
+#define DAGB5_WR_OUTPUT_DAGB_MAX_BURST__VC2_MASK 0x00000F00L
+#define DAGB5_WR_OUTPUT_DAGB_MAX_BURST__VC3_MASK 0x0000F000L
+#define DAGB5_WR_OUTPUT_DAGB_MAX_BURST__VC4_MASK 0x000F0000L
+#define DAGB5_WR_OUTPUT_DAGB_MAX_BURST__VC5_MASK 0x00F00000L
+#define DAGB5_WR_OUTPUT_DAGB_MAX_BURST__VC6_MASK 0x0F000000L
+#define DAGB5_WR_OUTPUT_DAGB_MAX_BURST__VC7_MASK 0xF0000000L
+//DAGB5_WR_OUTPUT_DAGB_LAZY_TIMER
+#define DAGB5_WR_OUTPUT_DAGB_LAZY_TIMER__VC0__SHIFT 0x0
+#define DAGB5_WR_OUTPUT_DAGB_LAZY_TIMER__VC1__SHIFT 0x4
+#define DAGB5_WR_OUTPUT_DAGB_LAZY_TIMER__VC2__SHIFT 0x8
+#define DAGB5_WR_OUTPUT_DAGB_LAZY_TIMER__VC3__SHIFT 0xc
+#define DAGB5_WR_OUTPUT_DAGB_LAZY_TIMER__VC4__SHIFT 0x10
+#define DAGB5_WR_OUTPUT_DAGB_LAZY_TIMER__VC5__SHIFT 0x14
+#define DAGB5_WR_OUTPUT_DAGB_LAZY_TIMER__VC6__SHIFT 0x18
+#define DAGB5_WR_OUTPUT_DAGB_LAZY_TIMER__VC7__SHIFT 0x1c
+#define DAGB5_WR_OUTPUT_DAGB_LAZY_TIMER__VC0_MASK 0x0000000FL
+#define DAGB5_WR_OUTPUT_DAGB_LAZY_TIMER__VC1_MASK 0x000000F0L
+#define DAGB5_WR_OUTPUT_DAGB_LAZY_TIMER__VC2_MASK 0x00000F00L
+#define DAGB5_WR_OUTPUT_DAGB_LAZY_TIMER__VC3_MASK 0x0000F000L
+#define DAGB5_WR_OUTPUT_DAGB_LAZY_TIMER__VC4_MASK 0x000F0000L
+#define DAGB5_WR_OUTPUT_DAGB_LAZY_TIMER__VC5_MASK 0x00F00000L
+#define DAGB5_WR_OUTPUT_DAGB_LAZY_TIMER__VC6_MASK 0x0F000000L
+#define DAGB5_WR_OUTPUT_DAGB_LAZY_TIMER__VC7_MASK 0xF0000000L
+//DAGB5_WR_CGTT_CLK_CTRL
+#define DAGB5_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB5_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB5_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
+#define DAGB5_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define DAGB5_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
+#define DAGB5_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
+#define DAGB5_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
+#define DAGB5_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
+#define DAGB5_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB5_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB5_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
+#define DAGB5_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define DAGB5_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
+#define DAGB5_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
+#define DAGB5_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
+#define DAGB5_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
+//DAGB5_L1TLB_WR_CGTT_CLK_CTRL
+#define DAGB5_L1TLB_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB5_L1TLB_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB5_L1TLB_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
+#define DAGB5_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define DAGB5_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
+#define DAGB5_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
+#define DAGB5_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
+#define DAGB5_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
+#define DAGB5_L1TLB_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB5_L1TLB_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB5_L1TLB_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
+#define DAGB5_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define DAGB5_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
+#define DAGB5_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
+#define DAGB5_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
+#define DAGB5_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
+//DAGB5_ATCVM_WR_CGTT_CLK_CTRL
+#define DAGB5_ATCVM_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB5_ATCVM_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB5_ATCVM_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
+#define DAGB5_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define DAGB5_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
+#define DAGB5_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
+#define DAGB5_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
+#define DAGB5_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
+#define DAGB5_ATCVM_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB5_ATCVM_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB5_ATCVM_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
+#define DAGB5_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define DAGB5_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
+#define DAGB5_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
+#define DAGB5_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
+#define DAGB5_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
+//DAGB5_WR_ADDR_DAGB_MAX_BURST0
+#define DAGB5_WR_ADDR_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0
+#define DAGB5_WR_ADDR_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4
+#define DAGB5_WR_ADDR_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8
+#define DAGB5_WR_ADDR_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc
+#define DAGB5_WR_ADDR_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10
+#define DAGB5_WR_ADDR_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14
+#define DAGB5_WR_ADDR_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18
+#define DAGB5_WR_ADDR_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c
+#define DAGB5_WR_ADDR_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL
+#define DAGB5_WR_ADDR_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L
+#define DAGB5_WR_ADDR_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L
+#define DAGB5_WR_ADDR_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L
+#define DAGB5_WR_ADDR_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L
+#define DAGB5_WR_ADDR_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L
+#define DAGB5_WR_ADDR_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L
+#define DAGB5_WR_ADDR_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L
+//DAGB5_WR_ADDR_DAGB_LAZY_TIMER0
+#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0
+#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4
+#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8
+#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc
+#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10
+#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14
+#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18
+#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c
+#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL
+#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L
+#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L
+#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L
+#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L
+#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L
+#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L
+#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L
+//DAGB5_WR_ADDR_DAGB_MAX_BURST1
+#define DAGB5_WR_ADDR_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0
+#define DAGB5_WR_ADDR_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4
+#define DAGB5_WR_ADDR_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8
+#define DAGB5_WR_ADDR_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc
+#define DAGB5_WR_ADDR_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10
+#define DAGB5_WR_ADDR_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14
+#define DAGB5_WR_ADDR_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18
+#define DAGB5_WR_ADDR_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c
+#define DAGB5_WR_ADDR_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL
+#define DAGB5_WR_ADDR_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L
+#define DAGB5_WR_ADDR_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L
+#define DAGB5_WR_ADDR_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L
+#define DAGB5_WR_ADDR_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L
+#define DAGB5_WR_ADDR_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L
+#define DAGB5_WR_ADDR_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L
+#define DAGB5_WR_ADDR_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L
+//DAGB5_WR_ADDR_DAGB_LAZY_TIMER1
+#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0
+#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4
+#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8
+#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc
+#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10
+#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14
+#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18
+#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c
+#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL
+#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L
+#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L
+#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L
+#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L
+#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L
+#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L
+#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L
+//DAGB5_WR_DATA_DAGB
+#define DAGB5_WR_DATA_DAGB__DAGB_ENABLE__SHIFT 0x0
+#define DAGB5_WR_DATA_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3
+#define DAGB5_WR_DATA_DAGB__DISABLE_SELF_INIT__SHIFT 0x6
+#define DAGB5_WR_DATA_DAGB__WHOAMI__SHIFT 0x7
+#define DAGB5_WR_DATA_DAGB__DAGB_ENABLE_MASK 0x00000007L
+#define DAGB5_WR_DATA_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L
+#define DAGB5_WR_DATA_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L
+#define DAGB5_WR_DATA_DAGB__WHOAMI_MASK 0x00001F80L
+//DAGB5_WR_DATA_DAGB_MAX_BURST0
+#define DAGB5_WR_DATA_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0
+#define DAGB5_WR_DATA_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4
+#define DAGB5_WR_DATA_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8
+#define DAGB5_WR_DATA_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc
+#define DAGB5_WR_DATA_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10
+#define DAGB5_WR_DATA_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14
+#define DAGB5_WR_DATA_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18
+#define DAGB5_WR_DATA_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c
+#define DAGB5_WR_DATA_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL
+#define DAGB5_WR_DATA_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L
+#define DAGB5_WR_DATA_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L
+#define DAGB5_WR_DATA_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L
+#define DAGB5_WR_DATA_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L
+#define DAGB5_WR_DATA_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L
+#define DAGB5_WR_DATA_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L
+#define DAGB5_WR_DATA_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L
+//DAGB5_WR_DATA_DAGB_LAZY_TIMER0
+#define DAGB5_WR_DATA_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0
+#define DAGB5_WR_DATA_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4
+#define DAGB5_WR_DATA_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8
+#define DAGB5_WR_DATA_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc
+#define DAGB5_WR_DATA_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10
+#define DAGB5_WR_DATA_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14
+#define DAGB5_WR_DATA_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18
+#define DAGB5_WR_DATA_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c
+#define DAGB5_WR_DATA_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL
+#define DAGB5_WR_DATA_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L
+#define DAGB5_WR_DATA_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L
+#define DAGB5_WR_DATA_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L
+#define DAGB5_WR_DATA_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L
+#define DAGB5_WR_DATA_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L
+#define DAGB5_WR_DATA_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L
+#define DAGB5_WR_DATA_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L
+//DAGB5_WR_DATA_DAGB_MAX_BURST1
+#define DAGB5_WR_DATA_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0
+#define DAGB5_WR_DATA_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4
+#define DAGB5_WR_DATA_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8
+#define DAGB5_WR_DATA_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc
+#define DAGB5_WR_DATA_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10
+#define DAGB5_WR_DATA_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14
+#define DAGB5_WR_DATA_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18
+#define DAGB5_WR_DATA_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c
+#define DAGB5_WR_DATA_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL
+#define DAGB5_WR_DATA_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L
+#define DAGB5_WR_DATA_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L
+#define DAGB5_WR_DATA_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L
+#define DAGB5_WR_DATA_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L
+#define DAGB5_WR_DATA_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L
+#define DAGB5_WR_DATA_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L
+#define DAGB5_WR_DATA_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L
+//DAGB5_WR_DATA_DAGB_LAZY_TIMER1
+#define DAGB5_WR_DATA_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0
+#define DAGB5_WR_DATA_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4
+#define DAGB5_WR_DATA_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8
+#define DAGB5_WR_DATA_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc
+#define DAGB5_WR_DATA_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10
+#define DAGB5_WR_DATA_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14
+#define DAGB5_WR_DATA_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18
+#define DAGB5_WR_DATA_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c
+#define DAGB5_WR_DATA_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL
+#define DAGB5_WR_DATA_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L
+#define DAGB5_WR_DATA_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L
+#define DAGB5_WR_DATA_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L
+#define DAGB5_WR_DATA_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L
+#define DAGB5_WR_DATA_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L
+#define DAGB5_WR_DATA_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L
+#define DAGB5_WR_DATA_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L
+//DAGB5_WR_VC0_CNTL
+#define DAGB5_WR_VC0_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB5_WR_VC0_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB5_WR_VC0_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB5_WR_VC0_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB5_WR_VC0_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB5_WR_VC0_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB5_WR_VC0_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB5_WR_VC0_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB5_WR_VC0_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB5_WR_VC0_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB5_WR_VC0_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB5_WR_VC0_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB5_WR_VC0_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB5_WR_VC0_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB5_WR_VC0_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB5_WR_VC0_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB5_WR_VC1_CNTL
+#define DAGB5_WR_VC1_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB5_WR_VC1_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB5_WR_VC1_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB5_WR_VC1_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB5_WR_VC1_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB5_WR_VC1_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB5_WR_VC1_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB5_WR_VC1_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB5_WR_VC1_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB5_WR_VC1_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB5_WR_VC1_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB5_WR_VC1_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB5_WR_VC1_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB5_WR_VC1_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB5_WR_VC1_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB5_WR_VC1_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB5_WR_VC2_CNTL
+#define DAGB5_WR_VC2_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB5_WR_VC2_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB5_WR_VC2_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB5_WR_VC2_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB5_WR_VC2_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB5_WR_VC2_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB5_WR_VC2_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB5_WR_VC2_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB5_WR_VC2_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB5_WR_VC2_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB5_WR_VC2_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB5_WR_VC2_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB5_WR_VC2_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB5_WR_VC2_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB5_WR_VC2_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB5_WR_VC2_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB5_WR_VC3_CNTL
+#define DAGB5_WR_VC3_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB5_WR_VC3_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB5_WR_VC3_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB5_WR_VC3_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB5_WR_VC3_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB5_WR_VC3_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB5_WR_VC3_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB5_WR_VC3_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB5_WR_VC3_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB5_WR_VC3_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB5_WR_VC3_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB5_WR_VC3_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB5_WR_VC3_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB5_WR_VC3_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB5_WR_VC3_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB5_WR_VC3_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB5_WR_VC4_CNTL
+#define DAGB5_WR_VC4_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB5_WR_VC4_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB5_WR_VC4_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB5_WR_VC4_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB5_WR_VC4_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB5_WR_VC4_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB5_WR_VC4_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB5_WR_VC4_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB5_WR_VC4_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB5_WR_VC4_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB5_WR_VC4_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB5_WR_VC4_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB5_WR_VC4_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB5_WR_VC4_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB5_WR_VC4_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB5_WR_VC4_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB5_WR_VC5_CNTL
+#define DAGB5_WR_VC5_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB5_WR_VC5_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB5_WR_VC5_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB5_WR_VC5_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB5_WR_VC5_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB5_WR_VC5_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB5_WR_VC5_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB5_WR_VC5_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB5_WR_VC5_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB5_WR_VC5_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB5_WR_VC5_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB5_WR_VC5_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB5_WR_VC5_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB5_WR_VC5_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB5_WR_VC5_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB5_WR_VC5_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB5_WR_VC6_CNTL
+#define DAGB5_WR_VC6_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB5_WR_VC6_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB5_WR_VC6_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB5_WR_VC6_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB5_WR_VC6_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB5_WR_VC6_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB5_WR_VC6_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB5_WR_VC6_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB5_WR_VC6_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB5_WR_VC6_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB5_WR_VC6_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB5_WR_VC6_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB5_WR_VC6_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB5_WR_VC6_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB5_WR_VC6_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB5_WR_VC6_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB5_WR_VC7_CNTL
+#define DAGB5_WR_VC7_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB5_WR_VC7_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB5_WR_VC7_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB5_WR_VC7_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB5_WR_VC7_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB5_WR_VC7_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB5_WR_VC7_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB5_WR_VC7_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB5_WR_VC7_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB5_WR_VC7_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB5_WR_VC7_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB5_WR_VC7_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB5_WR_VC7_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB5_WR_VC7_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB5_WR_VC7_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB5_WR_VC7_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB5_WR_CNTL_MISC
+#define DAGB5_WR_CNTL_MISC__STOR_POOL_CREDIT__SHIFT 0x0
+#define DAGB5_WR_CNTL_MISC__EA_POOL_CREDIT__SHIFT 0x6
+#define DAGB5_WR_CNTL_MISC__IO_EA_CREDIT__SHIFT 0xd
+#define DAGB5_WR_CNTL_MISC__STOR_CC_LEGACY_MODE__SHIFT 0x13
+#define DAGB5_WR_CNTL_MISC__EA_CC_LEGACY_MODE__SHIFT 0x14
+#define DAGB5_WR_CNTL_MISC__UTCL2_CID__SHIFT 0x15
+#define DAGB5_WR_CNTL_MISC__RDRET_FIFO_CREDITS__SHIFT 0x1a
+#define DAGB5_WR_CNTL_MISC__STOR_POOL_CREDIT_MASK 0x0000003FL
+#define DAGB5_WR_CNTL_MISC__EA_POOL_CREDIT_MASK 0x00001FC0L
+#define DAGB5_WR_CNTL_MISC__IO_EA_CREDIT_MASK 0x0007E000L
+#define DAGB5_WR_CNTL_MISC__STOR_CC_LEGACY_MODE_MASK 0x00080000L
+#define DAGB5_WR_CNTL_MISC__EA_CC_LEGACY_MODE_MASK 0x00100000L
+#define DAGB5_WR_CNTL_MISC__UTCL2_CID_MASK 0x03E00000L
+#define DAGB5_WR_CNTL_MISC__RDRET_FIFO_CREDITS_MASK 0xFC000000L
+//DAGB5_WR_TLB_CREDIT
+#define DAGB5_WR_TLB_CREDIT__TLB0__SHIFT 0x0
+#define DAGB5_WR_TLB_CREDIT__TLB1__SHIFT 0x5
+#define DAGB5_WR_TLB_CREDIT__TLB2__SHIFT 0xa
+#define DAGB5_WR_TLB_CREDIT__TLB3__SHIFT 0xf
+#define DAGB5_WR_TLB_CREDIT__TLB4__SHIFT 0x14
+#define DAGB5_WR_TLB_CREDIT__TLB5__SHIFT 0x19
+#define DAGB5_WR_TLB_CREDIT__TLB0_MASK 0x0000001FL
+#define DAGB5_WR_TLB_CREDIT__TLB1_MASK 0x000003E0L
+#define DAGB5_WR_TLB_CREDIT__TLB2_MASK 0x00007C00L
+#define DAGB5_WR_TLB_CREDIT__TLB3_MASK 0x000F8000L
+#define DAGB5_WR_TLB_CREDIT__TLB4_MASK 0x01F00000L
+#define DAGB5_WR_TLB_CREDIT__TLB5_MASK 0x3E000000L
+//DAGB5_WR_DATA_CREDIT
+#define DAGB5_WR_DATA_CREDIT__DLOCK_VC_CREDITS__SHIFT 0x0
+#define DAGB5_WR_DATA_CREDIT__LARGE_BURST_CREDITS__SHIFT 0x8
+#define DAGB5_WR_DATA_CREDIT__MIDDLE_BURST_CREDITS__SHIFT 0x10
+#define DAGB5_WR_DATA_CREDIT__SMALL_BURST_CREDITS__SHIFT 0x18
+#define DAGB5_WR_DATA_CREDIT__DLOCK_VC_CREDITS_MASK 0x000000FFL
+#define DAGB5_WR_DATA_CREDIT__LARGE_BURST_CREDITS_MASK 0x0000FF00L
+#define DAGB5_WR_DATA_CREDIT__MIDDLE_BURST_CREDITS_MASK 0x00FF0000L
+#define DAGB5_WR_DATA_CREDIT__SMALL_BURST_CREDITS_MASK 0xFF000000L
+//DAGB5_WR_MISC_CREDIT
+#define DAGB5_WR_MISC_CREDIT__ATOMIC_CREDIT__SHIFT 0x0
+#define DAGB5_WR_MISC_CREDIT__DLOCK_VC_NUM__SHIFT 0x6
+#define DAGB5_WR_MISC_CREDIT__OSD_CREDIT__SHIFT 0x9
+#define DAGB5_WR_MISC_CREDIT__OSD_DLOCK_CREDIT__SHIFT 0x10
+#define DAGB5_WR_MISC_CREDIT__ATOMIC_CREDIT_MASK 0x0000003FL
+#define DAGB5_WR_MISC_CREDIT__DLOCK_VC_NUM_MASK 0x000001C0L
+#define DAGB5_WR_MISC_CREDIT__OSD_CREDIT_MASK 0x0000FE00L
+#define DAGB5_WR_MISC_CREDIT__OSD_DLOCK_CREDIT_MASK 0x007F0000L
+//DAGB5_WRCLI_ASK_PENDING
+#define DAGB5_WRCLI_ASK_PENDING__BUSY__SHIFT 0x0
+#define DAGB5_WRCLI_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB5_WRCLI_GO_PENDING
+#define DAGB5_WRCLI_GO_PENDING__BUSY__SHIFT 0x0
+#define DAGB5_WRCLI_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB5_WRCLI_GBLSEND_PENDING
+#define DAGB5_WRCLI_GBLSEND_PENDING__BUSY__SHIFT 0x0
+#define DAGB5_WRCLI_GBLSEND_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB5_WRCLI_TLB_PENDING
+#define DAGB5_WRCLI_TLB_PENDING__BUSY__SHIFT 0x0
+#define DAGB5_WRCLI_TLB_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB5_WRCLI_OARB_PENDING
+#define DAGB5_WRCLI_OARB_PENDING__BUSY__SHIFT 0x0
+#define DAGB5_WRCLI_OARB_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB5_WRCLI_OSD_PENDING
+#define DAGB5_WRCLI_OSD_PENDING__BUSY__SHIFT 0x0
+#define DAGB5_WRCLI_OSD_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB5_WRCLI_DBUS_ASK_PENDING
+#define DAGB5_WRCLI_DBUS_ASK_PENDING__BUSY__SHIFT 0x0
+#define DAGB5_WRCLI_DBUS_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB5_WRCLI_DBUS_GO_PENDING
+#define DAGB5_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0
+#define DAGB5_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB5_DAGB_DLY
+#define DAGB5_DAGB_DLY__DLY__SHIFT 0x0
+#define DAGB5_DAGB_DLY__CLI__SHIFT 0x8
+#define DAGB5_DAGB_DLY__POS__SHIFT 0x10
+#define DAGB5_DAGB_DLY__DLY_MASK 0x000000FFL
+#define DAGB5_DAGB_DLY__CLI_MASK 0x0000FF00L
+#define DAGB5_DAGB_DLY__POS_MASK 0x000F0000L
+//DAGB5_CNTL_MISC
+#define DAGB5_CNTL_MISC__EA_VC0_REMAP__SHIFT 0x0
+#define DAGB5_CNTL_MISC__EA_VC1_REMAP__SHIFT 0x3
+#define DAGB5_CNTL_MISC__EA_VC2_REMAP__SHIFT 0x6
+#define DAGB5_CNTL_MISC__EA_VC3_REMAP__SHIFT 0x9
+#define DAGB5_CNTL_MISC__EA_VC4_REMAP__SHIFT 0xc
+#define DAGB5_CNTL_MISC__EA_VC5_REMAP__SHIFT 0xf
+#define DAGB5_CNTL_MISC__EA_VC6_REMAP__SHIFT 0x12
+#define DAGB5_CNTL_MISC__EA_VC7_REMAP__SHIFT 0x15
+#define DAGB5_CNTL_MISC__BW_INIT_CYCLE__SHIFT 0x18
+#define DAGB5_CNTL_MISC__BW_RW_GAP_CYCLE__SHIFT 0x1e
+#define DAGB5_CNTL_MISC__EA_VC0_REMAP_MASK 0x00000007L
+#define DAGB5_CNTL_MISC__EA_VC1_REMAP_MASK 0x00000038L
+#define DAGB5_CNTL_MISC__EA_VC2_REMAP_MASK 0x000001C0L
+#define DAGB5_CNTL_MISC__EA_VC3_REMAP_MASK 0x00000E00L
+#define DAGB5_CNTL_MISC__EA_VC4_REMAP_MASK 0x00007000L
+#define DAGB5_CNTL_MISC__EA_VC5_REMAP_MASK 0x00038000L
+#define DAGB5_CNTL_MISC__EA_VC6_REMAP_MASK 0x001C0000L
+#define DAGB5_CNTL_MISC__EA_VC7_REMAP_MASK 0x00E00000L
+#define DAGB5_CNTL_MISC__BW_INIT_CYCLE_MASK 0x3F000000L
+#define DAGB5_CNTL_MISC__BW_RW_GAP_CYCLE_MASK 0xC0000000L
+//DAGB5_CNTL_MISC2
+#define DAGB5_CNTL_MISC2__URG_BOOST_ENABLE__SHIFT 0x0
+#define DAGB5_CNTL_MISC2__URG_HALT_ENABLE__SHIFT 0x1
+#define DAGB5_CNTL_MISC2__DISABLE_WRREQ_CG__SHIFT 0x2
+#define DAGB5_CNTL_MISC2__DISABLE_WRRET_CG__SHIFT 0x3
+#define DAGB5_CNTL_MISC2__DISABLE_RDREQ_CG__SHIFT 0x4
+#define DAGB5_CNTL_MISC2__DISABLE_RDRET_CG__SHIFT 0x5
+#define DAGB5_CNTL_MISC2__DISABLE_TLBWR_CG__SHIFT 0x6
+#define DAGB5_CNTL_MISC2__DISABLE_TLBRD_CG__SHIFT 0x7
+#define DAGB5_CNTL_MISC2__DISABLE_EAWRREQ_BUSY__SHIFT 0x8
+#define DAGB5_CNTL_MISC2__DISABLE_EARDREQ_BUSY__SHIFT 0x9
+#define DAGB5_CNTL_MISC2__SWAP_CTL__SHIFT 0xa
+#define DAGB5_CNTL_MISC2__RDRET_FIFO_PERF__SHIFT 0xb
+#define DAGB5_CNTL_MISC2__RDRET_FIFO_DLOCK_CREDITS__SHIFT 0x11
+#define DAGB5_CNTL_MISC2__URG_BOOST_ENABLE_MASK 0x00000001L
+#define DAGB5_CNTL_MISC2__URG_HALT_ENABLE_MASK 0x00000002L
+#define DAGB5_CNTL_MISC2__DISABLE_WRREQ_CG_MASK 0x00000004L
+#define DAGB5_CNTL_MISC2__DISABLE_WRRET_CG_MASK 0x00000008L
+#define DAGB5_CNTL_MISC2__DISABLE_RDREQ_CG_MASK 0x00000010L
+#define DAGB5_CNTL_MISC2__DISABLE_RDRET_CG_MASK 0x00000020L
+#define DAGB5_CNTL_MISC2__DISABLE_TLBWR_CG_MASK 0x00000040L
+#define DAGB5_CNTL_MISC2__DISABLE_TLBRD_CG_MASK 0x00000080L
+#define DAGB5_CNTL_MISC2__DISABLE_EAWRREQ_BUSY_MASK 0x00000100L
+#define DAGB5_CNTL_MISC2__DISABLE_EARDREQ_BUSY_MASK 0x00000200L
+#define DAGB5_CNTL_MISC2__SWAP_CTL_MASK 0x00000400L
+#define DAGB5_CNTL_MISC2__RDRET_FIFO_PERF_MASK 0x00000800L
+#define DAGB5_CNTL_MISC2__RDRET_FIFO_DLOCK_CREDITS_MASK 0x007E0000L
+//DAGB5_FIFO_EMPTY
+#define DAGB5_FIFO_EMPTY__EMPTY__SHIFT 0x0
+#define DAGB5_FIFO_EMPTY__EMPTY_MASK 0x00FFFFFFL
+//DAGB5_FIFO_FULL
+#define DAGB5_FIFO_FULL__FULL__SHIFT 0x0
+#define DAGB5_FIFO_FULL__FULL_MASK 0x007FFFFFL
+//DAGB5_WR_CREDITS_FULL
+#define DAGB5_WR_CREDITS_FULL__FULL__SHIFT 0x0
+#define DAGB5_WR_CREDITS_FULL__FULL_MASK 0x1FFFFFFFL
+//DAGB5_RD_CREDITS_FULL
+#define DAGB5_RD_CREDITS_FULL__FULL__SHIFT 0x0
+#define DAGB5_RD_CREDITS_FULL__FULL_MASK 0x0003FFFFL
+//DAGB5_PERFCOUNTER_LO
+#define DAGB5_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define DAGB5_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//DAGB5_PERFCOUNTER_HI
+#define DAGB5_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define DAGB5_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define DAGB5_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define DAGB5_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+//DAGB5_PERFCOUNTER0_CFG
+#define DAGB5_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define DAGB5_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define DAGB5_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define DAGB5_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define DAGB5_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define DAGB5_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define DAGB5_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define DAGB5_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define DAGB5_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define DAGB5_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//DAGB5_PERFCOUNTER1_CFG
+#define DAGB5_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define DAGB5_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define DAGB5_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define DAGB5_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define DAGB5_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define DAGB5_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define DAGB5_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define DAGB5_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define DAGB5_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define DAGB5_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//DAGB5_PERFCOUNTER2_CFG
+#define DAGB5_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0
+#define DAGB5_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8
+#define DAGB5_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18
+#define DAGB5_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c
+#define DAGB5_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d
+#define DAGB5_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL
+#define DAGB5_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define DAGB5_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L
+#define DAGB5_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L
+#define DAGB5_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L
+//DAGB5_PERFCOUNTER_RSLT_CNTL
+#define DAGB5_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define DAGB5_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define DAGB5_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define DAGB5_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define DAGB5_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define DAGB5_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define DAGB5_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define DAGB5_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define DAGB5_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define DAGB5_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define DAGB5_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define DAGB5_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+//DAGB5_RESERVE0
+#define DAGB5_RESERVE0__RESERVE__SHIFT 0x0
+#define DAGB5_RESERVE0__RESERVE_MASK 0xFFFFFFFFL
+//DAGB5_RESERVE1
+#define DAGB5_RESERVE1__RESERVE__SHIFT 0x0
+#define DAGB5_RESERVE1__RESERVE_MASK 0xFFFFFFFFL
+//DAGB5_RESERVE2
+#define DAGB5_RESERVE2__RESERVE__SHIFT 0x0
+#define DAGB5_RESERVE2__RESERVE_MASK 0xFFFFFFFFL
+//DAGB5_RESERVE3
+#define DAGB5_RESERVE3__RESERVE__SHIFT 0x0
+#define DAGB5_RESERVE3__RESERVE_MASK 0xFFFFFFFFL
+//DAGB5_RESERVE4
+#define DAGB5_RESERVE4__RESERVE__SHIFT 0x0
+#define DAGB5_RESERVE4__RESERVE_MASK 0xFFFFFFFFL
+//DAGB5_RESERVE5
+#define DAGB5_RESERVE5__RESERVE__SHIFT 0x0
+#define DAGB5_RESERVE5__RESERVE_MASK 0xFFFFFFFFL
+//DAGB5_RESERVE6
+#define DAGB5_RESERVE6__RESERVE__SHIFT 0x0
+#define DAGB5_RESERVE6__RESERVE_MASK 0xFFFFFFFFL
+//DAGB5_RESERVE7
+#define DAGB5_RESERVE7__RESERVE__SHIFT 0x0
+#define DAGB5_RESERVE7__RESERVE_MASK 0xFFFFFFFFL
+//DAGB5_RESERVE8
+#define DAGB5_RESERVE8__RESERVE__SHIFT 0x0
+#define DAGB5_RESERVE8__RESERVE_MASK 0xFFFFFFFFL
+//DAGB5_RESERVE9
+#define DAGB5_RESERVE9__RESERVE__SHIFT 0x0
+#define DAGB5_RESERVE9__RESERVE_MASK 0xFFFFFFFFL
+//DAGB5_RESERVE10
+#define DAGB5_RESERVE10__RESERVE__SHIFT 0x0
+#define DAGB5_RESERVE10__RESERVE_MASK 0xFFFFFFFFL
+//DAGB5_RESERVE11
+#define DAGB5_RESERVE11__RESERVE__SHIFT 0x0
+#define DAGB5_RESERVE11__RESERVE_MASK 0xFFFFFFFFL
+//DAGB5_RESERVE12
+#define DAGB5_RESERVE12__RESERVE__SHIFT 0x0
+#define DAGB5_RESERVE12__RESERVE_MASK 0xFFFFFFFFL
+//DAGB5_RESERVE13
+#define DAGB5_RESERVE13__RESERVE__SHIFT 0x0
+#define DAGB5_RESERVE13__RESERVE_MASK 0xFFFFFFFFL
+
+
+// addressBlock: mmhub_dagb_dagbdec6
+//DAGB6_RDCLI0
+#define DAGB6_RDCLI0__VIRT_CHAN__SHIFT 0x0
+#define DAGB6_RDCLI0__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB6_RDCLI0__URG_HIGH__SHIFT 0x4
+#define DAGB6_RDCLI0__URG_LOW__SHIFT 0x8
+#define DAGB6_RDCLI0__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB6_RDCLI0__MAX_BW__SHIFT 0xd
+#define DAGB6_RDCLI0__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB6_RDCLI0__MIN_BW__SHIFT 0x16
+#define DAGB6_RDCLI0__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB6_RDCLI0__MAX_OSD__SHIFT 0x1a
+#define DAGB6_RDCLI0__VIRT_CHAN_MASK 0x00000007L
+#define DAGB6_RDCLI0__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB6_RDCLI0__URG_HIGH_MASK 0x000000F0L
+#define DAGB6_RDCLI0__URG_LOW_MASK 0x00000F00L
+#define DAGB6_RDCLI0__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB6_RDCLI0__MAX_BW_MASK 0x001FE000L
+#define DAGB6_RDCLI0__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB6_RDCLI0__MIN_BW_MASK 0x01C00000L
+#define DAGB6_RDCLI0__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB6_RDCLI0__MAX_OSD_MASK 0xFC000000L
+//DAGB6_RDCLI1
+#define DAGB6_RDCLI1__VIRT_CHAN__SHIFT 0x0
+#define DAGB6_RDCLI1__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB6_RDCLI1__URG_HIGH__SHIFT 0x4
+#define DAGB6_RDCLI1__URG_LOW__SHIFT 0x8
+#define DAGB6_RDCLI1__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB6_RDCLI1__MAX_BW__SHIFT 0xd
+#define DAGB6_RDCLI1__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB6_RDCLI1__MIN_BW__SHIFT 0x16
+#define DAGB6_RDCLI1__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB6_RDCLI1__MAX_OSD__SHIFT 0x1a
+#define DAGB6_RDCLI1__VIRT_CHAN_MASK 0x00000007L
+#define DAGB6_RDCLI1__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB6_RDCLI1__URG_HIGH_MASK 0x000000F0L
+#define DAGB6_RDCLI1__URG_LOW_MASK 0x00000F00L
+#define DAGB6_RDCLI1__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB6_RDCLI1__MAX_BW_MASK 0x001FE000L
+#define DAGB6_RDCLI1__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB6_RDCLI1__MIN_BW_MASK 0x01C00000L
+#define DAGB6_RDCLI1__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB6_RDCLI1__MAX_OSD_MASK 0xFC000000L
+//DAGB6_RDCLI2
+#define DAGB6_RDCLI2__VIRT_CHAN__SHIFT 0x0
+#define DAGB6_RDCLI2__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB6_RDCLI2__URG_HIGH__SHIFT 0x4
+#define DAGB6_RDCLI2__URG_LOW__SHIFT 0x8
+#define DAGB6_RDCLI2__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB6_RDCLI2__MAX_BW__SHIFT 0xd
+#define DAGB6_RDCLI2__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB6_RDCLI2__MIN_BW__SHIFT 0x16
+#define DAGB6_RDCLI2__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB6_RDCLI2__MAX_OSD__SHIFT 0x1a
+#define DAGB6_RDCLI2__VIRT_CHAN_MASK 0x00000007L
+#define DAGB6_RDCLI2__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB6_RDCLI2__URG_HIGH_MASK 0x000000F0L
+#define DAGB6_RDCLI2__URG_LOW_MASK 0x00000F00L
+#define DAGB6_RDCLI2__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB6_RDCLI2__MAX_BW_MASK 0x001FE000L
+#define DAGB6_RDCLI2__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB6_RDCLI2__MIN_BW_MASK 0x01C00000L
+#define DAGB6_RDCLI2__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB6_RDCLI2__MAX_OSD_MASK 0xFC000000L
+//DAGB6_RDCLI3
+#define DAGB6_RDCLI3__VIRT_CHAN__SHIFT 0x0
+#define DAGB6_RDCLI3__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB6_RDCLI3__URG_HIGH__SHIFT 0x4
+#define DAGB6_RDCLI3__URG_LOW__SHIFT 0x8
+#define DAGB6_RDCLI3__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB6_RDCLI3__MAX_BW__SHIFT 0xd
+#define DAGB6_RDCLI3__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB6_RDCLI3__MIN_BW__SHIFT 0x16
+#define DAGB6_RDCLI3__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB6_RDCLI3__MAX_OSD__SHIFT 0x1a
+#define DAGB6_RDCLI3__VIRT_CHAN_MASK 0x00000007L
+#define DAGB6_RDCLI3__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB6_RDCLI3__URG_HIGH_MASK 0x000000F0L
+#define DAGB6_RDCLI3__URG_LOW_MASK 0x00000F00L
+#define DAGB6_RDCLI3__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB6_RDCLI3__MAX_BW_MASK 0x001FE000L
+#define DAGB6_RDCLI3__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB6_RDCLI3__MIN_BW_MASK 0x01C00000L
+#define DAGB6_RDCLI3__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB6_RDCLI3__MAX_OSD_MASK 0xFC000000L
+//DAGB6_RDCLI4
+#define DAGB6_RDCLI4__VIRT_CHAN__SHIFT 0x0
+#define DAGB6_RDCLI4__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB6_RDCLI4__URG_HIGH__SHIFT 0x4
+#define DAGB6_RDCLI4__URG_LOW__SHIFT 0x8
+#define DAGB6_RDCLI4__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB6_RDCLI4__MAX_BW__SHIFT 0xd
+#define DAGB6_RDCLI4__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB6_RDCLI4__MIN_BW__SHIFT 0x16
+#define DAGB6_RDCLI4__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB6_RDCLI4__MAX_OSD__SHIFT 0x1a
+#define DAGB6_RDCLI4__VIRT_CHAN_MASK 0x00000007L
+#define DAGB6_RDCLI4__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB6_RDCLI4__URG_HIGH_MASK 0x000000F0L
+#define DAGB6_RDCLI4__URG_LOW_MASK 0x00000F00L
+#define DAGB6_RDCLI4__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB6_RDCLI4__MAX_BW_MASK 0x001FE000L
+#define DAGB6_RDCLI4__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB6_RDCLI4__MIN_BW_MASK 0x01C00000L
+#define DAGB6_RDCLI4__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB6_RDCLI4__MAX_OSD_MASK 0xFC000000L
+//DAGB6_RDCLI5
+#define DAGB6_RDCLI5__VIRT_CHAN__SHIFT 0x0
+#define DAGB6_RDCLI5__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB6_RDCLI5__URG_HIGH__SHIFT 0x4
+#define DAGB6_RDCLI5__URG_LOW__SHIFT 0x8
+#define DAGB6_RDCLI5__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB6_RDCLI5__MAX_BW__SHIFT 0xd
+#define DAGB6_RDCLI5__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB6_RDCLI5__MIN_BW__SHIFT 0x16
+#define DAGB6_RDCLI5__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB6_RDCLI5__MAX_OSD__SHIFT 0x1a
+#define DAGB6_RDCLI5__VIRT_CHAN_MASK 0x00000007L
+#define DAGB6_RDCLI5__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB6_RDCLI5__URG_HIGH_MASK 0x000000F0L
+#define DAGB6_RDCLI5__URG_LOW_MASK 0x00000F00L
+#define DAGB6_RDCLI5__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB6_RDCLI5__MAX_BW_MASK 0x001FE000L
+#define DAGB6_RDCLI5__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB6_RDCLI5__MIN_BW_MASK 0x01C00000L
+#define DAGB6_RDCLI5__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB6_RDCLI5__MAX_OSD_MASK 0xFC000000L
+//DAGB6_RDCLI6
+#define DAGB6_RDCLI6__VIRT_CHAN__SHIFT 0x0
+#define DAGB6_RDCLI6__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB6_RDCLI6__URG_HIGH__SHIFT 0x4
+#define DAGB6_RDCLI6__URG_LOW__SHIFT 0x8
+#define DAGB6_RDCLI6__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB6_RDCLI6__MAX_BW__SHIFT 0xd
+#define DAGB6_RDCLI6__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB6_RDCLI6__MIN_BW__SHIFT 0x16
+#define DAGB6_RDCLI6__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB6_RDCLI6__MAX_OSD__SHIFT 0x1a
+#define DAGB6_RDCLI6__VIRT_CHAN_MASK 0x00000007L
+#define DAGB6_RDCLI6__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB6_RDCLI6__URG_HIGH_MASK 0x000000F0L
+#define DAGB6_RDCLI6__URG_LOW_MASK 0x00000F00L
+#define DAGB6_RDCLI6__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB6_RDCLI6__MAX_BW_MASK 0x001FE000L
+#define DAGB6_RDCLI6__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB6_RDCLI6__MIN_BW_MASK 0x01C00000L
+#define DAGB6_RDCLI6__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB6_RDCLI6__MAX_OSD_MASK 0xFC000000L
+//DAGB6_RDCLI7
+#define DAGB6_RDCLI7__VIRT_CHAN__SHIFT 0x0
+#define DAGB6_RDCLI7__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB6_RDCLI7__URG_HIGH__SHIFT 0x4
+#define DAGB6_RDCLI7__URG_LOW__SHIFT 0x8
+#define DAGB6_RDCLI7__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB6_RDCLI7__MAX_BW__SHIFT 0xd
+#define DAGB6_RDCLI7__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB6_RDCLI7__MIN_BW__SHIFT 0x16
+#define DAGB6_RDCLI7__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB6_RDCLI7__MAX_OSD__SHIFT 0x1a
+#define DAGB6_RDCLI7__VIRT_CHAN_MASK 0x00000007L
+#define DAGB6_RDCLI7__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB6_RDCLI7__URG_HIGH_MASK 0x000000F0L
+#define DAGB6_RDCLI7__URG_LOW_MASK 0x00000F00L
+#define DAGB6_RDCLI7__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB6_RDCLI7__MAX_BW_MASK 0x001FE000L
+#define DAGB6_RDCLI7__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB6_RDCLI7__MIN_BW_MASK 0x01C00000L
+#define DAGB6_RDCLI7__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB6_RDCLI7__MAX_OSD_MASK 0xFC000000L
+//DAGB6_RDCLI8
+#define DAGB6_RDCLI8__VIRT_CHAN__SHIFT 0x0
+#define DAGB6_RDCLI8__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB6_RDCLI8__URG_HIGH__SHIFT 0x4
+#define DAGB6_RDCLI8__URG_LOW__SHIFT 0x8
+#define DAGB6_RDCLI8__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB6_RDCLI8__MAX_BW__SHIFT 0xd
+#define DAGB6_RDCLI8__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB6_RDCLI8__MIN_BW__SHIFT 0x16
+#define DAGB6_RDCLI8__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB6_RDCLI8__MAX_OSD__SHIFT 0x1a
+#define DAGB6_RDCLI8__VIRT_CHAN_MASK 0x00000007L
+#define DAGB6_RDCLI8__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB6_RDCLI8__URG_HIGH_MASK 0x000000F0L
+#define DAGB6_RDCLI8__URG_LOW_MASK 0x00000F00L
+#define DAGB6_RDCLI8__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB6_RDCLI8__MAX_BW_MASK 0x001FE000L
+#define DAGB6_RDCLI8__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB6_RDCLI8__MIN_BW_MASK 0x01C00000L
+#define DAGB6_RDCLI8__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB6_RDCLI8__MAX_OSD_MASK 0xFC000000L
+//DAGB6_RDCLI9
+#define DAGB6_RDCLI9__VIRT_CHAN__SHIFT 0x0
+#define DAGB6_RDCLI9__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB6_RDCLI9__URG_HIGH__SHIFT 0x4
+#define DAGB6_RDCLI9__URG_LOW__SHIFT 0x8
+#define DAGB6_RDCLI9__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB6_RDCLI9__MAX_BW__SHIFT 0xd
+#define DAGB6_RDCLI9__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB6_RDCLI9__MIN_BW__SHIFT 0x16
+#define DAGB6_RDCLI9__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB6_RDCLI9__MAX_OSD__SHIFT 0x1a
+#define DAGB6_RDCLI9__VIRT_CHAN_MASK 0x00000007L
+#define DAGB6_RDCLI9__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB6_RDCLI9__URG_HIGH_MASK 0x000000F0L
+#define DAGB6_RDCLI9__URG_LOW_MASK 0x00000F00L
+#define DAGB6_RDCLI9__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB6_RDCLI9__MAX_BW_MASK 0x001FE000L
+#define DAGB6_RDCLI9__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB6_RDCLI9__MIN_BW_MASK 0x01C00000L
+#define DAGB6_RDCLI9__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB6_RDCLI9__MAX_OSD_MASK 0xFC000000L
+//DAGB6_RDCLI10
+#define DAGB6_RDCLI10__VIRT_CHAN__SHIFT 0x0
+#define DAGB6_RDCLI10__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB6_RDCLI10__URG_HIGH__SHIFT 0x4
+#define DAGB6_RDCLI10__URG_LOW__SHIFT 0x8
+#define DAGB6_RDCLI10__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB6_RDCLI10__MAX_BW__SHIFT 0xd
+#define DAGB6_RDCLI10__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB6_RDCLI10__MIN_BW__SHIFT 0x16
+#define DAGB6_RDCLI10__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB6_RDCLI10__MAX_OSD__SHIFT 0x1a
+#define DAGB6_RDCLI10__VIRT_CHAN_MASK 0x00000007L
+#define DAGB6_RDCLI10__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB6_RDCLI10__URG_HIGH_MASK 0x000000F0L
+#define DAGB6_RDCLI10__URG_LOW_MASK 0x00000F00L
+#define DAGB6_RDCLI10__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB6_RDCLI10__MAX_BW_MASK 0x001FE000L
+#define DAGB6_RDCLI10__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB6_RDCLI10__MIN_BW_MASK 0x01C00000L
+#define DAGB6_RDCLI10__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB6_RDCLI10__MAX_OSD_MASK 0xFC000000L
+//DAGB6_RDCLI11
+#define DAGB6_RDCLI11__VIRT_CHAN__SHIFT 0x0
+#define DAGB6_RDCLI11__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB6_RDCLI11__URG_HIGH__SHIFT 0x4
+#define DAGB6_RDCLI11__URG_LOW__SHIFT 0x8
+#define DAGB6_RDCLI11__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB6_RDCLI11__MAX_BW__SHIFT 0xd
+#define DAGB6_RDCLI11__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB6_RDCLI11__MIN_BW__SHIFT 0x16
+#define DAGB6_RDCLI11__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB6_RDCLI11__MAX_OSD__SHIFT 0x1a
+#define DAGB6_RDCLI11__VIRT_CHAN_MASK 0x00000007L
+#define DAGB6_RDCLI11__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB6_RDCLI11__URG_HIGH_MASK 0x000000F0L
+#define DAGB6_RDCLI11__URG_LOW_MASK 0x00000F00L
+#define DAGB6_RDCLI11__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB6_RDCLI11__MAX_BW_MASK 0x001FE000L
+#define DAGB6_RDCLI11__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB6_RDCLI11__MIN_BW_MASK 0x01C00000L
+#define DAGB6_RDCLI11__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB6_RDCLI11__MAX_OSD_MASK 0xFC000000L
+//DAGB6_RDCLI12
+#define DAGB6_RDCLI12__VIRT_CHAN__SHIFT 0x0
+#define DAGB6_RDCLI12__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB6_RDCLI12__URG_HIGH__SHIFT 0x4
+#define DAGB6_RDCLI12__URG_LOW__SHIFT 0x8
+#define DAGB6_RDCLI12__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB6_RDCLI12__MAX_BW__SHIFT 0xd
+#define DAGB6_RDCLI12__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB6_RDCLI12__MIN_BW__SHIFT 0x16
+#define DAGB6_RDCLI12__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB6_RDCLI12__MAX_OSD__SHIFT 0x1a
+#define DAGB6_RDCLI12__VIRT_CHAN_MASK 0x00000007L
+#define DAGB6_RDCLI12__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB6_RDCLI12__URG_HIGH_MASK 0x000000F0L
+#define DAGB6_RDCLI12__URG_LOW_MASK 0x00000F00L
+#define DAGB6_RDCLI12__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB6_RDCLI12__MAX_BW_MASK 0x001FE000L
+#define DAGB6_RDCLI12__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB6_RDCLI12__MIN_BW_MASK 0x01C00000L
+#define DAGB6_RDCLI12__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB6_RDCLI12__MAX_OSD_MASK 0xFC000000L
+//DAGB6_RDCLI13
+#define DAGB6_RDCLI13__VIRT_CHAN__SHIFT 0x0
+#define DAGB6_RDCLI13__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB6_RDCLI13__URG_HIGH__SHIFT 0x4
+#define DAGB6_RDCLI13__URG_LOW__SHIFT 0x8
+#define DAGB6_RDCLI13__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB6_RDCLI13__MAX_BW__SHIFT 0xd
+#define DAGB6_RDCLI13__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB6_RDCLI13__MIN_BW__SHIFT 0x16
+#define DAGB6_RDCLI13__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB6_RDCLI13__MAX_OSD__SHIFT 0x1a
+#define DAGB6_RDCLI13__VIRT_CHAN_MASK 0x00000007L
+#define DAGB6_RDCLI13__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB6_RDCLI13__URG_HIGH_MASK 0x000000F0L
+#define DAGB6_RDCLI13__URG_LOW_MASK 0x00000F00L
+#define DAGB6_RDCLI13__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB6_RDCLI13__MAX_BW_MASK 0x001FE000L
+#define DAGB6_RDCLI13__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB6_RDCLI13__MIN_BW_MASK 0x01C00000L
+#define DAGB6_RDCLI13__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB6_RDCLI13__MAX_OSD_MASK 0xFC000000L
+//DAGB6_RDCLI14
+#define DAGB6_RDCLI14__VIRT_CHAN__SHIFT 0x0
+#define DAGB6_RDCLI14__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB6_RDCLI14__URG_HIGH__SHIFT 0x4
+#define DAGB6_RDCLI14__URG_LOW__SHIFT 0x8
+#define DAGB6_RDCLI14__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB6_RDCLI14__MAX_BW__SHIFT 0xd
+#define DAGB6_RDCLI14__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB6_RDCLI14__MIN_BW__SHIFT 0x16
+#define DAGB6_RDCLI14__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB6_RDCLI14__MAX_OSD__SHIFT 0x1a
+#define DAGB6_RDCLI14__VIRT_CHAN_MASK 0x00000007L
+#define DAGB6_RDCLI14__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB6_RDCLI14__URG_HIGH_MASK 0x000000F0L
+#define DAGB6_RDCLI14__URG_LOW_MASK 0x00000F00L
+#define DAGB6_RDCLI14__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB6_RDCLI14__MAX_BW_MASK 0x001FE000L
+#define DAGB6_RDCLI14__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB6_RDCLI14__MIN_BW_MASK 0x01C00000L
+#define DAGB6_RDCLI14__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB6_RDCLI14__MAX_OSD_MASK 0xFC000000L
+//DAGB6_RDCLI15
+#define DAGB6_RDCLI15__VIRT_CHAN__SHIFT 0x0
+#define DAGB6_RDCLI15__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB6_RDCLI15__URG_HIGH__SHIFT 0x4
+#define DAGB6_RDCLI15__URG_LOW__SHIFT 0x8
+#define DAGB6_RDCLI15__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB6_RDCLI15__MAX_BW__SHIFT 0xd
+#define DAGB6_RDCLI15__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB6_RDCLI15__MIN_BW__SHIFT 0x16
+#define DAGB6_RDCLI15__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB6_RDCLI15__MAX_OSD__SHIFT 0x1a
+#define DAGB6_RDCLI15__VIRT_CHAN_MASK 0x00000007L
+#define DAGB6_RDCLI15__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB6_RDCLI15__URG_HIGH_MASK 0x000000F0L
+#define DAGB6_RDCLI15__URG_LOW_MASK 0x00000F00L
+#define DAGB6_RDCLI15__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB6_RDCLI15__MAX_BW_MASK 0x001FE000L
+#define DAGB6_RDCLI15__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB6_RDCLI15__MIN_BW_MASK 0x01C00000L
+#define DAGB6_RDCLI15__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB6_RDCLI15__MAX_OSD_MASK 0xFC000000L
+//DAGB6_RD_CNTL
+#define DAGB6_RD_CNTL__SCLK_FREQ__SHIFT 0x0
+#define DAGB6_RD_CNTL__CLI_MAX_BW_WINDOW__SHIFT 0x4
+#define DAGB6_RD_CNTL__VC_MAX_BW_WINDOW__SHIFT 0xa
+#define DAGB6_RD_CNTL__IO_LEVEL_OVERRIDE_ENABLE__SHIFT 0x10
+#define DAGB6_RD_CNTL__IO_LEVEL__SHIFT 0x11
+#define DAGB6_RD_CNTL__IO_LEVEL_COMPLY_VC__SHIFT 0x14
+#define DAGB6_RD_CNTL__SHARE_VC_NUM__SHIFT 0x17
+#define DAGB6_RD_CNTL__SCLK_FREQ_MASK 0x0000000FL
+#define DAGB6_RD_CNTL__CLI_MAX_BW_WINDOW_MASK 0x000003F0L
+#define DAGB6_RD_CNTL__VC_MAX_BW_WINDOW_MASK 0x0000FC00L
+#define DAGB6_RD_CNTL__IO_LEVEL_OVERRIDE_ENABLE_MASK 0x00010000L
+#define DAGB6_RD_CNTL__IO_LEVEL_MASK 0x000E0000L
+#define DAGB6_RD_CNTL__IO_LEVEL_COMPLY_VC_MASK 0x00700000L
+#define DAGB6_RD_CNTL__SHARE_VC_NUM_MASK 0x03800000L
+//DAGB6_RD_GMI_CNTL
+#define DAGB6_RD_GMI_CNTL__EA_CREDIT__SHIFT 0x0
+#define DAGB6_RD_GMI_CNTL__LEVEL__SHIFT 0x6
+#define DAGB6_RD_GMI_CNTL__MAX_BURST__SHIFT 0x9
+#define DAGB6_RD_GMI_CNTL__LAZY_TIMER__SHIFT 0xd
+#define DAGB6_RD_GMI_CNTL__EA_CREDIT_MASK 0x0000003FL
+#define DAGB6_RD_GMI_CNTL__LEVEL_MASK 0x000001C0L
+#define DAGB6_RD_GMI_CNTL__MAX_BURST_MASK 0x00001E00L
+#define DAGB6_RD_GMI_CNTL__LAZY_TIMER_MASK 0x0001E000L
+//DAGB6_RD_ADDR_DAGB
+#define DAGB6_RD_ADDR_DAGB__DAGB_ENABLE__SHIFT 0x0
+#define DAGB6_RD_ADDR_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3
+#define DAGB6_RD_ADDR_DAGB__DISABLE_SELF_INIT__SHIFT 0x6
+#define DAGB6_RD_ADDR_DAGB__WHOAMI__SHIFT 0x7
+#define DAGB6_RD_ADDR_DAGB__DAGB_ENABLE_MASK 0x00000007L
+#define DAGB6_RD_ADDR_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L
+#define DAGB6_RD_ADDR_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L
+#define DAGB6_RD_ADDR_DAGB__WHOAMI_MASK 0x00001F80L
+//DAGB6_RD_OUTPUT_DAGB_MAX_BURST
+#define DAGB6_RD_OUTPUT_DAGB_MAX_BURST__VC0__SHIFT 0x0
+#define DAGB6_RD_OUTPUT_DAGB_MAX_BURST__VC1__SHIFT 0x4
+#define DAGB6_RD_OUTPUT_DAGB_MAX_BURST__VC2__SHIFT 0x8
+#define DAGB6_RD_OUTPUT_DAGB_MAX_BURST__VC3__SHIFT 0xc
+#define DAGB6_RD_OUTPUT_DAGB_MAX_BURST__VC4__SHIFT 0x10
+#define DAGB6_RD_OUTPUT_DAGB_MAX_BURST__VC5__SHIFT 0x14
+#define DAGB6_RD_OUTPUT_DAGB_MAX_BURST__VC6__SHIFT 0x18
+#define DAGB6_RD_OUTPUT_DAGB_MAX_BURST__VC7__SHIFT 0x1c
+#define DAGB6_RD_OUTPUT_DAGB_MAX_BURST__VC0_MASK 0x0000000FL
+#define DAGB6_RD_OUTPUT_DAGB_MAX_BURST__VC1_MASK 0x000000F0L
+#define DAGB6_RD_OUTPUT_DAGB_MAX_BURST__VC2_MASK 0x00000F00L
+#define DAGB6_RD_OUTPUT_DAGB_MAX_BURST__VC3_MASK 0x0000F000L
+#define DAGB6_RD_OUTPUT_DAGB_MAX_BURST__VC4_MASK 0x000F0000L
+#define DAGB6_RD_OUTPUT_DAGB_MAX_BURST__VC5_MASK 0x00F00000L
+#define DAGB6_RD_OUTPUT_DAGB_MAX_BURST__VC6_MASK 0x0F000000L
+#define DAGB6_RD_OUTPUT_DAGB_MAX_BURST__VC7_MASK 0xF0000000L
+//DAGB6_RD_OUTPUT_DAGB_LAZY_TIMER
+#define DAGB6_RD_OUTPUT_DAGB_LAZY_TIMER__VC0__SHIFT 0x0
+#define DAGB6_RD_OUTPUT_DAGB_LAZY_TIMER__VC1__SHIFT 0x4
+#define DAGB6_RD_OUTPUT_DAGB_LAZY_TIMER__VC2__SHIFT 0x8
+#define DAGB6_RD_OUTPUT_DAGB_LAZY_TIMER__VC3__SHIFT 0xc
+#define DAGB6_RD_OUTPUT_DAGB_LAZY_TIMER__VC4__SHIFT 0x10
+#define DAGB6_RD_OUTPUT_DAGB_LAZY_TIMER__VC5__SHIFT 0x14
+#define DAGB6_RD_OUTPUT_DAGB_LAZY_TIMER__VC6__SHIFT 0x18
+#define DAGB6_RD_OUTPUT_DAGB_LAZY_TIMER__VC7__SHIFT 0x1c
+#define DAGB6_RD_OUTPUT_DAGB_LAZY_TIMER__VC0_MASK 0x0000000FL
+#define DAGB6_RD_OUTPUT_DAGB_LAZY_TIMER__VC1_MASK 0x000000F0L
+#define DAGB6_RD_OUTPUT_DAGB_LAZY_TIMER__VC2_MASK 0x00000F00L
+#define DAGB6_RD_OUTPUT_DAGB_LAZY_TIMER__VC3_MASK 0x0000F000L
+#define DAGB6_RD_OUTPUT_DAGB_LAZY_TIMER__VC4_MASK 0x000F0000L
+#define DAGB6_RD_OUTPUT_DAGB_LAZY_TIMER__VC5_MASK 0x00F00000L
+#define DAGB6_RD_OUTPUT_DAGB_LAZY_TIMER__VC6_MASK 0x0F000000L
+#define DAGB6_RD_OUTPUT_DAGB_LAZY_TIMER__VC7_MASK 0xF0000000L
+//DAGB6_RD_CGTT_CLK_CTRL
+#define DAGB6_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB6_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB6_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
+#define DAGB6_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define DAGB6_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
+#define DAGB6_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
+#define DAGB6_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
+#define DAGB6_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
+#define DAGB6_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB6_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB6_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
+#define DAGB6_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define DAGB6_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
+#define DAGB6_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
+#define DAGB6_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
+#define DAGB6_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
+//DAGB6_L1TLB_RD_CGTT_CLK_CTRL
+#define DAGB6_L1TLB_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB6_L1TLB_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB6_L1TLB_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
+#define DAGB6_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define DAGB6_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
+#define DAGB6_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
+#define DAGB6_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
+#define DAGB6_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
+#define DAGB6_L1TLB_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB6_L1TLB_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB6_L1TLB_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
+#define DAGB6_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define DAGB6_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
+#define DAGB6_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
+#define DAGB6_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
+#define DAGB6_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
+//DAGB6_ATCVM_RD_CGTT_CLK_CTRL
+#define DAGB6_ATCVM_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB6_ATCVM_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB6_ATCVM_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
+#define DAGB6_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define DAGB6_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
+#define DAGB6_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
+#define DAGB6_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
+#define DAGB6_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
+#define DAGB6_ATCVM_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB6_ATCVM_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB6_ATCVM_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
+#define DAGB6_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define DAGB6_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
+#define DAGB6_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
+#define DAGB6_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
+#define DAGB6_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
+//DAGB6_RD_ADDR_DAGB_MAX_BURST0
+#define DAGB6_RD_ADDR_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0
+#define DAGB6_RD_ADDR_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4
+#define DAGB6_RD_ADDR_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8
+#define DAGB6_RD_ADDR_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc
+#define DAGB6_RD_ADDR_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10
+#define DAGB6_RD_ADDR_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14
+#define DAGB6_RD_ADDR_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18
+#define DAGB6_RD_ADDR_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c
+#define DAGB6_RD_ADDR_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL
+#define DAGB6_RD_ADDR_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L
+#define DAGB6_RD_ADDR_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L
+#define DAGB6_RD_ADDR_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L
+#define DAGB6_RD_ADDR_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L
+#define DAGB6_RD_ADDR_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L
+#define DAGB6_RD_ADDR_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L
+#define DAGB6_RD_ADDR_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L
+//DAGB6_RD_ADDR_DAGB_LAZY_TIMER0
+#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0
+#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4
+#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8
+#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc
+#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10
+#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14
+#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18
+#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c
+#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL
+#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L
+#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L
+#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L
+#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L
+#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L
+#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L
+#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L
+//DAGB6_RD_ADDR_DAGB_MAX_BURST1
+#define DAGB6_RD_ADDR_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0
+#define DAGB6_RD_ADDR_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4
+#define DAGB6_RD_ADDR_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8
+#define DAGB6_RD_ADDR_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc
+#define DAGB6_RD_ADDR_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10
+#define DAGB6_RD_ADDR_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14
+#define DAGB6_RD_ADDR_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18
+#define DAGB6_RD_ADDR_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c
+#define DAGB6_RD_ADDR_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL
+#define DAGB6_RD_ADDR_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L
+#define DAGB6_RD_ADDR_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L
+#define DAGB6_RD_ADDR_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L
+#define DAGB6_RD_ADDR_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L
+#define DAGB6_RD_ADDR_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L
+#define DAGB6_RD_ADDR_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L
+#define DAGB6_RD_ADDR_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L
+//DAGB6_RD_ADDR_DAGB_LAZY_TIMER1
+#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0
+#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4
+#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8
+#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc
+#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10
+#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14
+#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18
+#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c
+#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL
+#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L
+#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L
+#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L
+#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L
+#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L
+#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L
+#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L
+//DAGB6_RD_VC0_CNTL
+#define DAGB6_RD_VC0_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB6_RD_VC0_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB6_RD_VC0_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB6_RD_VC0_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB6_RD_VC0_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB6_RD_VC0_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB6_RD_VC0_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB6_RD_VC0_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB6_RD_VC0_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB6_RD_VC0_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB6_RD_VC0_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB6_RD_VC0_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB6_RD_VC0_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB6_RD_VC0_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB6_RD_VC0_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB6_RD_VC0_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB6_RD_VC1_CNTL
+#define DAGB6_RD_VC1_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB6_RD_VC1_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB6_RD_VC1_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB6_RD_VC1_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB6_RD_VC1_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB6_RD_VC1_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB6_RD_VC1_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB6_RD_VC1_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB6_RD_VC1_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB6_RD_VC1_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB6_RD_VC1_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB6_RD_VC1_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB6_RD_VC1_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB6_RD_VC1_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB6_RD_VC1_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB6_RD_VC1_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB6_RD_VC2_CNTL
+#define DAGB6_RD_VC2_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB6_RD_VC2_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB6_RD_VC2_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB6_RD_VC2_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB6_RD_VC2_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB6_RD_VC2_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB6_RD_VC2_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB6_RD_VC2_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB6_RD_VC2_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB6_RD_VC2_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB6_RD_VC2_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB6_RD_VC2_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB6_RD_VC2_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB6_RD_VC2_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB6_RD_VC2_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB6_RD_VC2_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB6_RD_VC3_CNTL
+#define DAGB6_RD_VC3_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB6_RD_VC3_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB6_RD_VC3_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB6_RD_VC3_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB6_RD_VC3_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB6_RD_VC3_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB6_RD_VC3_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB6_RD_VC3_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB6_RD_VC3_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB6_RD_VC3_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB6_RD_VC3_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB6_RD_VC3_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB6_RD_VC3_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB6_RD_VC3_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB6_RD_VC3_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB6_RD_VC3_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB6_RD_VC4_CNTL
+#define DAGB6_RD_VC4_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB6_RD_VC4_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB6_RD_VC4_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB6_RD_VC4_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB6_RD_VC4_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB6_RD_VC4_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB6_RD_VC4_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB6_RD_VC4_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB6_RD_VC4_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB6_RD_VC4_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB6_RD_VC4_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB6_RD_VC4_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB6_RD_VC4_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB6_RD_VC4_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB6_RD_VC4_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB6_RD_VC4_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB6_RD_VC5_CNTL
+#define DAGB6_RD_VC5_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB6_RD_VC5_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB6_RD_VC5_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB6_RD_VC5_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB6_RD_VC5_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB6_RD_VC5_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB6_RD_VC5_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB6_RD_VC5_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB6_RD_VC5_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB6_RD_VC5_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB6_RD_VC5_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB6_RD_VC5_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB6_RD_VC5_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB6_RD_VC5_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB6_RD_VC5_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB6_RD_VC5_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB6_RD_VC6_CNTL
+#define DAGB6_RD_VC6_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB6_RD_VC6_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB6_RD_VC6_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB6_RD_VC6_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB6_RD_VC6_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB6_RD_VC6_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB6_RD_VC6_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB6_RD_VC6_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB6_RD_VC6_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB6_RD_VC6_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB6_RD_VC6_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB6_RD_VC6_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB6_RD_VC6_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB6_RD_VC6_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB6_RD_VC6_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB6_RD_VC6_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB6_RD_VC7_CNTL
+#define DAGB6_RD_VC7_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB6_RD_VC7_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB6_RD_VC7_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB6_RD_VC7_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB6_RD_VC7_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB6_RD_VC7_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB6_RD_VC7_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB6_RD_VC7_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB6_RD_VC7_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB6_RD_VC7_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB6_RD_VC7_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB6_RD_VC7_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB6_RD_VC7_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB6_RD_VC7_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB6_RD_VC7_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB6_RD_VC7_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB6_RD_CNTL_MISC
+#define DAGB6_RD_CNTL_MISC__STOR_POOL_CREDIT__SHIFT 0x0
+#define DAGB6_RD_CNTL_MISC__EA_POOL_CREDIT__SHIFT 0x6
+#define DAGB6_RD_CNTL_MISC__IO_EA_CREDIT__SHIFT 0xd
+#define DAGB6_RD_CNTL_MISC__STOR_CC_LEGACY_MODE__SHIFT 0x13
+#define DAGB6_RD_CNTL_MISC__EA_CC_LEGACY_MODE__SHIFT 0x14
+#define DAGB6_RD_CNTL_MISC__UTCL2_CID__SHIFT 0x15
+#define DAGB6_RD_CNTL_MISC__RDRET_FIFO_CREDITS__SHIFT 0x1a
+#define DAGB6_RD_CNTL_MISC__STOR_POOL_CREDIT_MASK 0x0000003FL
+#define DAGB6_RD_CNTL_MISC__EA_POOL_CREDIT_MASK 0x00001FC0L
+#define DAGB6_RD_CNTL_MISC__IO_EA_CREDIT_MASK 0x0007E000L
+#define DAGB6_RD_CNTL_MISC__STOR_CC_LEGACY_MODE_MASK 0x00080000L
+#define DAGB6_RD_CNTL_MISC__EA_CC_LEGACY_MODE_MASK 0x00100000L
+#define DAGB6_RD_CNTL_MISC__UTCL2_CID_MASK 0x03E00000L
+#define DAGB6_RD_CNTL_MISC__RDRET_FIFO_CREDITS_MASK 0xFC000000L
+//DAGB6_RD_TLB_CREDIT
+#define DAGB6_RD_TLB_CREDIT__TLB0__SHIFT 0x0
+#define DAGB6_RD_TLB_CREDIT__TLB1__SHIFT 0x5
+#define DAGB6_RD_TLB_CREDIT__TLB2__SHIFT 0xa
+#define DAGB6_RD_TLB_CREDIT__TLB3__SHIFT 0xf
+#define DAGB6_RD_TLB_CREDIT__TLB4__SHIFT 0x14
+#define DAGB6_RD_TLB_CREDIT__TLB5__SHIFT 0x19
+#define DAGB6_RD_TLB_CREDIT__TLB0_MASK 0x0000001FL
+#define DAGB6_RD_TLB_CREDIT__TLB1_MASK 0x000003E0L
+#define DAGB6_RD_TLB_CREDIT__TLB2_MASK 0x00007C00L
+#define DAGB6_RD_TLB_CREDIT__TLB3_MASK 0x000F8000L
+#define DAGB6_RD_TLB_CREDIT__TLB4_MASK 0x01F00000L
+#define DAGB6_RD_TLB_CREDIT__TLB5_MASK 0x3E000000L
+//DAGB6_RDCLI_ASK_PENDING
+#define DAGB6_RDCLI_ASK_PENDING__BUSY__SHIFT 0x0
+#define DAGB6_RDCLI_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB6_RDCLI_GO_PENDING
+#define DAGB6_RDCLI_GO_PENDING__BUSY__SHIFT 0x0
+#define DAGB6_RDCLI_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB6_RDCLI_GBLSEND_PENDING
+#define DAGB6_RDCLI_GBLSEND_PENDING__BUSY__SHIFT 0x0
+#define DAGB6_RDCLI_GBLSEND_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB6_RDCLI_TLB_PENDING
+#define DAGB6_RDCLI_TLB_PENDING__BUSY__SHIFT 0x0
+#define DAGB6_RDCLI_TLB_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB6_RDCLI_OARB_PENDING
+#define DAGB6_RDCLI_OARB_PENDING__BUSY__SHIFT 0x0
+#define DAGB6_RDCLI_OARB_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB6_RDCLI_OSD_PENDING
+#define DAGB6_RDCLI_OSD_PENDING__BUSY__SHIFT 0x0
+#define DAGB6_RDCLI_OSD_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB6_WRCLI0
+#define DAGB6_WRCLI0__VIRT_CHAN__SHIFT 0x0
+#define DAGB6_WRCLI0__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB6_WRCLI0__URG_HIGH__SHIFT 0x4
+#define DAGB6_WRCLI0__URG_LOW__SHIFT 0x8
+#define DAGB6_WRCLI0__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB6_WRCLI0__MAX_BW__SHIFT 0xd
+#define DAGB6_WRCLI0__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB6_WRCLI0__MIN_BW__SHIFT 0x16
+#define DAGB6_WRCLI0__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB6_WRCLI0__MAX_OSD__SHIFT 0x1a
+#define DAGB6_WRCLI0__VIRT_CHAN_MASK 0x00000007L
+#define DAGB6_WRCLI0__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB6_WRCLI0__URG_HIGH_MASK 0x000000F0L
+#define DAGB6_WRCLI0__URG_LOW_MASK 0x00000F00L
+#define DAGB6_WRCLI0__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB6_WRCLI0__MAX_BW_MASK 0x001FE000L
+#define DAGB6_WRCLI0__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB6_WRCLI0__MIN_BW_MASK 0x01C00000L
+#define DAGB6_WRCLI0__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB6_WRCLI0__MAX_OSD_MASK 0xFC000000L
+//DAGB6_WRCLI1
+#define DAGB6_WRCLI1__VIRT_CHAN__SHIFT 0x0
+#define DAGB6_WRCLI1__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB6_WRCLI1__URG_HIGH__SHIFT 0x4
+#define DAGB6_WRCLI1__URG_LOW__SHIFT 0x8
+#define DAGB6_WRCLI1__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB6_WRCLI1__MAX_BW__SHIFT 0xd
+#define DAGB6_WRCLI1__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB6_WRCLI1__MIN_BW__SHIFT 0x16
+#define DAGB6_WRCLI1__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB6_WRCLI1__MAX_OSD__SHIFT 0x1a
+#define DAGB6_WRCLI1__VIRT_CHAN_MASK 0x00000007L
+#define DAGB6_WRCLI1__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB6_WRCLI1__URG_HIGH_MASK 0x000000F0L
+#define DAGB6_WRCLI1__URG_LOW_MASK 0x00000F00L
+#define DAGB6_WRCLI1__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB6_WRCLI1__MAX_BW_MASK 0x001FE000L
+#define DAGB6_WRCLI1__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB6_WRCLI1__MIN_BW_MASK 0x01C00000L
+#define DAGB6_WRCLI1__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB6_WRCLI1__MAX_OSD_MASK 0xFC000000L
+//DAGB6_WRCLI2
+#define DAGB6_WRCLI2__VIRT_CHAN__SHIFT 0x0
+#define DAGB6_WRCLI2__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB6_WRCLI2__URG_HIGH__SHIFT 0x4
+#define DAGB6_WRCLI2__URG_LOW__SHIFT 0x8
+#define DAGB6_WRCLI2__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB6_WRCLI2__MAX_BW__SHIFT 0xd
+#define DAGB6_WRCLI2__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB6_WRCLI2__MIN_BW__SHIFT 0x16
+#define DAGB6_WRCLI2__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB6_WRCLI2__MAX_OSD__SHIFT 0x1a
+#define DAGB6_WRCLI2__VIRT_CHAN_MASK 0x00000007L
+#define DAGB6_WRCLI2__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB6_WRCLI2__URG_HIGH_MASK 0x000000F0L
+#define DAGB6_WRCLI2__URG_LOW_MASK 0x00000F00L
+#define DAGB6_WRCLI2__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB6_WRCLI2__MAX_BW_MASK 0x001FE000L
+#define DAGB6_WRCLI2__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB6_WRCLI2__MIN_BW_MASK 0x01C00000L
+#define DAGB6_WRCLI2__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB6_WRCLI2__MAX_OSD_MASK 0xFC000000L
+//DAGB6_WRCLI3
+#define DAGB6_WRCLI3__VIRT_CHAN__SHIFT 0x0
+#define DAGB6_WRCLI3__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB6_WRCLI3__URG_HIGH__SHIFT 0x4
+#define DAGB6_WRCLI3__URG_LOW__SHIFT 0x8
+#define DAGB6_WRCLI3__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB6_WRCLI3__MAX_BW__SHIFT 0xd
+#define DAGB6_WRCLI3__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB6_WRCLI3__MIN_BW__SHIFT 0x16
+#define DAGB6_WRCLI3__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB6_WRCLI3__MAX_OSD__SHIFT 0x1a
+#define DAGB6_WRCLI3__VIRT_CHAN_MASK 0x00000007L
+#define DAGB6_WRCLI3__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB6_WRCLI3__URG_HIGH_MASK 0x000000F0L
+#define DAGB6_WRCLI3__URG_LOW_MASK 0x00000F00L
+#define DAGB6_WRCLI3__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB6_WRCLI3__MAX_BW_MASK 0x001FE000L
+#define DAGB6_WRCLI3__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB6_WRCLI3__MIN_BW_MASK 0x01C00000L
+#define DAGB6_WRCLI3__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB6_WRCLI3__MAX_OSD_MASK 0xFC000000L
+//DAGB6_WRCLI4
+#define DAGB6_WRCLI4__VIRT_CHAN__SHIFT 0x0
+#define DAGB6_WRCLI4__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB6_WRCLI4__URG_HIGH__SHIFT 0x4
+#define DAGB6_WRCLI4__URG_LOW__SHIFT 0x8
+#define DAGB6_WRCLI4__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB6_WRCLI4__MAX_BW__SHIFT 0xd
+#define DAGB6_WRCLI4__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB6_WRCLI4__MIN_BW__SHIFT 0x16
+#define DAGB6_WRCLI4__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB6_WRCLI4__MAX_OSD__SHIFT 0x1a
+#define DAGB6_WRCLI4__VIRT_CHAN_MASK 0x00000007L
+#define DAGB6_WRCLI4__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB6_WRCLI4__URG_HIGH_MASK 0x000000F0L
+#define DAGB6_WRCLI4__URG_LOW_MASK 0x00000F00L
+#define DAGB6_WRCLI4__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB6_WRCLI4__MAX_BW_MASK 0x001FE000L
+#define DAGB6_WRCLI4__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB6_WRCLI4__MIN_BW_MASK 0x01C00000L
+#define DAGB6_WRCLI4__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB6_WRCLI4__MAX_OSD_MASK 0xFC000000L
+//DAGB6_WRCLI5
+#define DAGB6_WRCLI5__VIRT_CHAN__SHIFT 0x0
+#define DAGB6_WRCLI5__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB6_WRCLI5__URG_HIGH__SHIFT 0x4
+#define DAGB6_WRCLI5__URG_LOW__SHIFT 0x8
+#define DAGB6_WRCLI5__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB6_WRCLI5__MAX_BW__SHIFT 0xd
+#define DAGB6_WRCLI5__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB6_WRCLI5__MIN_BW__SHIFT 0x16
+#define DAGB6_WRCLI5__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB6_WRCLI5__MAX_OSD__SHIFT 0x1a
+#define DAGB6_WRCLI5__VIRT_CHAN_MASK 0x00000007L
+#define DAGB6_WRCLI5__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB6_WRCLI5__URG_HIGH_MASK 0x000000F0L
+#define DAGB6_WRCLI5__URG_LOW_MASK 0x00000F00L
+#define DAGB6_WRCLI5__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB6_WRCLI5__MAX_BW_MASK 0x001FE000L
+#define DAGB6_WRCLI5__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB6_WRCLI5__MIN_BW_MASK 0x01C00000L
+#define DAGB6_WRCLI5__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB6_WRCLI5__MAX_OSD_MASK 0xFC000000L
+//DAGB6_WRCLI6
+#define DAGB6_WRCLI6__VIRT_CHAN__SHIFT 0x0
+#define DAGB6_WRCLI6__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB6_WRCLI6__URG_HIGH__SHIFT 0x4
+#define DAGB6_WRCLI6__URG_LOW__SHIFT 0x8
+#define DAGB6_WRCLI6__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB6_WRCLI6__MAX_BW__SHIFT 0xd
+#define DAGB6_WRCLI6__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB6_WRCLI6__MIN_BW__SHIFT 0x16
+#define DAGB6_WRCLI6__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB6_WRCLI6__MAX_OSD__SHIFT 0x1a
+#define DAGB6_WRCLI6__VIRT_CHAN_MASK 0x00000007L
+#define DAGB6_WRCLI6__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB6_WRCLI6__URG_HIGH_MASK 0x000000F0L
+#define DAGB6_WRCLI6__URG_LOW_MASK 0x00000F00L
+#define DAGB6_WRCLI6__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB6_WRCLI6__MAX_BW_MASK 0x001FE000L
+#define DAGB6_WRCLI6__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB6_WRCLI6__MIN_BW_MASK 0x01C00000L
+#define DAGB6_WRCLI6__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB6_WRCLI6__MAX_OSD_MASK 0xFC000000L
+//DAGB6_WRCLI7
+#define DAGB6_WRCLI7__VIRT_CHAN__SHIFT 0x0
+#define DAGB6_WRCLI7__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB6_WRCLI7__URG_HIGH__SHIFT 0x4
+#define DAGB6_WRCLI7__URG_LOW__SHIFT 0x8
+#define DAGB6_WRCLI7__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB6_WRCLI7__MAX_BW__SHIFT 0xd
+#define DAGB6_WRCLI7__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB6_WRCLI7__MIN_BW__SHIFT 0x16
+#define DAGB6_WRCLI7__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB6_WRCLI7__MAX_OSD__SHIFT 0x1a
+#define DAGB6_WRCLI7__VIRT_CHAN_MASK 0x00000007L
+#define DAGB6_WRCLI7__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB6_WRCLI7__URG_HIGH_MASK 0x000000F0L
+#define DAGB6_WRCLI7__URG_LOW_MASK 0x00000F00L
+#define DAGB6_WRCLI7__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB6_WRCLI7__MAX_BW_MASK 0x001FE000L
+#define DAGB6_WRCLI7__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB6_WRCLI7__MIN_BW_MASK 0x01C00000L
+#define DAGB6_WRCLI7__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB6_WRCLI7__MAX_OSD_MASK 0xFC000000L
+//DAGB6_WRCLI8
+#define DAGB6_WRCLI8__VIRT_CHAN__SHIFT 0x0
+#define DAGB6_WRCLI8__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB6_WRCLI8__URG_HIGH__SHIFT 0x4
+#define DAGB6_WRCLI8__URG_LOW__SHIFT 0x8
+#define DAGB6_WRCLI8__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB6_WRCLI8__MAX_BW__SHIFT 0xd
+#define DAGB6_WRCLI8__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB6_WRCLI8__MIN_BW__SHIFT 0x16
+#define DAGB6_WRCLI8__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB6_WRCLI8__MAX_OSD__SHIFT 0x1a
+#define DAGB6_WRCLI8__VIRT_CHAN_MASK 0x00000007L
+#define DAGB6_WRCLI8__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB6_WRCLI8__URG_HIGH_MASK 0x000000F0L
+#define DAGB6_WRCLI8__URG_LOW_MASK 0x00000F00L
+#define DAGB6_WRCLI8__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB6_WRCLI8__MAX_BW_MASK 0x001FE000L
+#define DAGB6_WRCLI8__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB6_WRCLI8__MIN_BW_MASK 0x01C00000L
+#define DAGB6_WRCLI8__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB6_WRCLI8__MAX_OSD_MASK 0xFC000000L
+//DAGB6_WRCLI9
+#define DAGB6_WRCLI9__VIRT_CHAN__SHIFT 0x0
+#define DAGB6_WRCLI9__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB6_WRCLI9__URG_HIGH__SHIFT 0x4
+#define DAGB6_WRCLI9__URG_LOW__SHIFT 0x8
+#define DAGB6_WRCLI9__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB6_WRCLI9__MAX_BW__SHIFT 0xd
+#define DAGB6_WRCLI9__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB6_WRCLI9__MIN_BW__SHIFT 0x16
+#define DAGB6_WRCLI9__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB6_WRCLI9__MAX_OSD__SHIFT 0x1a
+#define DAGB6_WRCLI9__VIRT_CHAN_MASK 0x00000007L
+#define DAGB6_WRCLI9__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB6_WRCLI9__URG_HIGH_MASK 0x000000F0L
+#define DAGB6_WRCLI9__URG_LOW_MASK 0x00000F00L
+#define DAGB6_WRCLI9__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB6_WRCLI9__MAX_BW_MASK 0x001FE000L
+#define DAGB6_WRCLI9__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB6_WRCLI9__MIN_BW_MASK 0x01C00000L
+#define DAGB6_WRCLI9__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB6_WRCLI9__MAX_OSD_MASK 0xFC000000L
+//DAGB6_WRCLI10
+#define DAGB6_WRCLI10__VIRT_CHAN__SHIFT 0x0
+#define DAGB6_WRCLI10__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB6_WRCLI10__URG_HIGH__SHIFT 0x4
+#define DAGB6_WRCLI10__URG_LOW__SHIFT 0x8
+#define DAGB6_WRCLI10__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB6_WRCLI10__MAX_BW__SHIFT 0xd
+#define DAGB6_WRCLI10__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB6_WRCLI10__MIN_BW__SHIFT 0x16
+#define DAGB6_WRCLI10__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB6_WRCLI10__MAX_OSD__SHIFT 0x1a
+#define DAGB6_WRCLI10__VIRT_CHAN_MASK 0x00000007L
+#define DAGB6_WRCLI10__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB6_WRCLI10__URG_HIGH_MASK 0x000000F0L
+#define DAGB6_WRCLI10__URG_LOW_MASK 0x00000F00L
+#define DAGB6_WRCLI10__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB6_WRCLI10__MAX_BW_MASK 0x001FE000L
+#define DAGB6_WRCLI10__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB6_WRCLI10__MIN_BW_MASK 0x01C00000L
+#define DAGB6_WRCLI10__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB6_WRCLI10__MAX_OSD_MASK 0xFC000000L
+//DAGB6_WRCLI11
+#define DAGB6_WRCLI11__VIRT_CHAN__SHIFT 0x0
+#define DAGB6_WRCLI11__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB6_WRCLI11__URG_HIGH__SHIFT 0x4
+#define DAGB6_WRCLI11__URG_LOW__SHIFT 0x8
+#define DAGB6_WRCLI11__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB6_WRCLI11__MAX_BW__SHIFT 0xd
+#define DAGB6_WRCLI11__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB6_WRCLI11__MIN_BW__SHIFT 0x16
+#define DAGB6_WRCLI11__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB6_WRCLI11__MAX_OSD__SHIFT 0x1a
+#define DAGB6_WRCLI11__VIRT_CHAN_MASK 0x00000007L
+#define DAGB6_WRCLI11__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB6_WRCLI11__URG_HIGH_MASK 0x000000F0L
+#define DAGB6_WRCLI11__URG_LOW_MASK 0x00000F00L
+#define DAGB6_WRCLI11__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB6_WRCLI11__MAX_BW_MASK 0x001FE000L
+#define DAGB6_WRCLI11__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB6_WRCLI11__MIN_BW_MASK 0x01C00000L
+#define DAGB6_WRCLI11__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB6_WRCLI11__MAX_OSD_MASK 0xFC000000L
+//DAGB6_WRCLI12
+#define DAGB6_WRCLI12__VIRT_CHAN__SHIFT 0x0
+#define DAGB6_WRCLI12__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB6_WRCLI12__URG_HIGH__SHIFT 0x4
+#define DAGB6_WRCLI12__URG_LOW__SHIFT 0x8
+#define DAGB6_WRCLI12__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB6_WRCLI12__MAX_BW__SHIFT 0xd
+#define DAGB6_WRCLI12__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB6_WRCLI12__MIN_BW__SHIFT 0x16
+#define DAGB6_WRCLI12__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB6_WRCLI12__MAX_OSD__SHIFT 0x1a
+#define DAGB6_WRCLI12__VIRT_CHAN_MASK 0x00000007L
+#define DAGB6_WRCLI12__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB6_WRCLI12__URG_HIGH_MASK 0x000000F0L
+#define DAGB6_WRCLI12__URG_LOW_MASK 0x00000F00L
+#define DAGB6_WRCLI12__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB6_WRCLI12__MAX_BW_MASK 0x001FE000L
+#define DAGB6_WRCLI12__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB6_WRCLI12__MIN_BW_MASK 0x01C00000L
+#define DAGB6_WRCLI12__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB6_WRCLI12__MAX_OSD_MASK 0xFC000000L
+//DAGB6_WRCLI13
+#define DAGB6_WRCLI13__VIRT_CHAN__SHIFT 0x0
+#define DAGB6_WRCLI13__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB6_WRCLI13__URG_HIGH__SHIFT 0x4
+#define DAGB6_WRCLI13__URG_LOW__SHIFT 0x8
+#define DAGB6_WRCLI13__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB6_WRCLI13__MAX_BW__SHIFT 0xd
+#define DAGB6_WRCLI13__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB6_WRCLI13__MIN_BW__SHIFT 0x16
+#define DAGB6_WRCLI13__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB6_WRCLI13__MAX_OSD__SHIFT 0x1a
+#define DAGB6_WRCLI13__VIRT_CHAN_MASK 0x00000007L
+#define DAGB6_WRCLI13__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB6_WRCLI13__URG_HIGH_MASK 0x000000F0L
+#define DAGB6_WRCLI13__URG_LOW_MASK 0x00000F00L
+#define DAGB6_WRCLI13__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB6_WRCLI13__MAX_BW_MASK 0x001FE000L
+#define DAGB6_WRCLI13__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB6_WRCLI13__MIN_BW_MASK 0x01C00000L
+#define DAGB6_WRCLI13__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB6_WRCLI13__MAX_OSD_MASK 0xFC000000L
+//DAGB6_WRCLI14
+#define DAGB6_WRCLI14__VIRT_CHAN__SHIFT 0x0
+#define DAGB6_WRCLI14__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB6_WRCLI14__URG_HIGH__SHIFT 0x4
+#define DAGB6_WRCLI14__URG_LOW__SHIFT 0x8
+#define DAGB6_WRCLI14__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB6_WRCLI14__MAX_BW__SHIFT 0xd
+#define DAGB6_WRCLI14__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB6_WRCLI14__MIN_BW__SHIFT 0x16
+#define DAGB6_WRCLI14__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB6_WRCLI14__MAX_OSD__SHIFT 0x1a
+#define DAGB6_WRCLI14__VIRT_CHAN_MASK 0x00000007L
+#define DAGB6_WRCLI14__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB6_WRCLI14__URG_HIGH_MASK 0x000000F0L
+#define DAGB6_WRCLI14__URG_LOW_MASK 0x00000F00L
+#define DAGB6_WRCLI14__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB6_WRCLI14__MAX_BW_MASK 0x001FE000L
+#define DAGB6_WRCLI14__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB6_WRCLI14__MIN_BW_MASK 0x01C00000L
+#define DAGB6_WRCLI14__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB6_WRCLI14__MAX_OSD_MASK 0xFC000000L
+//DAGB6_WRCLI15
+#define DAGB6_WRCLI15__VIRT_CHAN__SHIFT 0x0
+#define DAGB6_WRCLI15__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB6_WRCLI15__URG_HIGH__SHIFT 0x4
+#define DAGB6_WRCLI15__URG_LOW__SHIFT 0x8
+#define DAGB6_WRCLI15__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB6_WRCLI15__MAX_BW__SHIFT 0xd
+#define DAGB6_WRCLI15__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB6_WRCLI15__MIN_BW__SHIFT 0x16
+#define DAGB6_WRCLI15__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB6_WRCLI15__MAX_OSD__SHIFT 0x1a
+#define DAGB6_WRCLI15__VIRT_CHAN_MASK 0x00000007L
+#define DAGB6_WRCLI15__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB6_WRCLI15__URG_HIGH_MASK 0x000000F0L
+#define DAGB6_WRCLI15__URG_LOW_MASK 0x00000F00L
+#define DAGB6_WRCLI15__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB6_WRCLI15__MAX_BW_MASK 0x001FE000L
+#define DAGB6_WRCLI15__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB6_WRCLI15__MIN_BW_MASK 0x01C00000L
+#define DAGB6_WRCLI15__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB6_WRCLI15__MAX_OSD_MASK 0xFC000000L
+//DAGB6_WR_CNTL
+#define DAGB6_WR_CNTL__SCLK_FREQ__SHIFT 0x0
+#define DAGB6_WR_CNTL__CLI_MAX_BW_WINDOW__SHIFT 0x4
+#define DAGB6_WR_CNTL__VC_MAX_BW_WINDOW__SHIFT 0xa
+#define DAGB6_WR_CNTL__IO_LEVEL_OVERRIDE_ENABLE__SHIFT 0x10
+#define DAGB6_WR_CNTL__IO_LEVEL__SHIFT 0x11
+#define DAGB6_WR_CNTL__IO_LEVEL_COMPLY_VC__SHIFT 0x14
+#define DAGB6_WR_CNTL__SHARE_VC_NUM__SHIFT 0x17
+#define DAGB6_WR_CNTL__SCLK_FREQ_MASK 0x0000000FL
+#define DAGB6_WR_CNTL__CLI_MAX_BW_WINDOW_MASK 0x000003F0L
+#define DAGB6_WR_CNTL__VC_MAX_BW_WINDOW_MASK 0x0000FC00L
+#define DAGB6_WR_CNTL__IO_LEVEL_OVERRIDE_ENABLE_MASK 0x00010000L
+#define DAGB6_WR_CNTL__IO_LEVEL_MASK 0x000E0000L
+#define DAGB6_WR_CNTL__IO_LEVEL_COMPLY_VC_MASK 0x00700000L
+#define DAGB6_WR_CNTL__SHARE_VC_NUM_MASK 0x03800000L
+//DAGB6_WR_GMI_CNTL
+#define DAGB6_WR_GMI_CNTL__EA_CREDIT__SHIFT 0x0
+#define DAGB6_WR_GMI_CNTL__LEVEL__SHIFT 0x6
+#define DAGB6_WR_GMI_CNTL__MAX_BURST__SHIFT 0x9
+#define DAGB6_WR_GMI_CNTL__LAZY_TIMER__SHIFT 0xd
+#define DAGB6_WR_GMI_CNTL__EA_CREDIT_MASK 0x0000003FL
+#define DAGB6_WR_GMI_CNTL__LEVEL_MASK 0x000001C0L
+#define DAGB6_WR_GMI_CNTL__MAX_BURST_MASK 0x00001E00L
+#define DAGB6_WR_GMI_CNTL__LAZY_TIMER_MASK 0x0001E000L
+//DAGB6_WR_ADDR_DAGB
+#define DAGB6_WR_ADDR_DAGB__DAGB_ENABLE__SHIFT 0x0
+#define DAGB6_WR_ADDR_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3
+#define DAGB6_WR_ADDR_DAGB__DISABLE_SELF_INIT__SHIFT 0x6
+#define DAGB6_WR_ADDR_DAGB__WHOAMI__SHIFT 0x7
+#define DAGB6_WR_ADDR_DAGB__DAGB_ENABLE_MASK 0x00000007L
+#define DAGB6_WR_ADDR_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L
+#define DAGB6_WR_ADDR_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L
+#define DAGB6_WR_ADDR_DAGB__WHOAMI_MASK 0x00001F80L
+//DAGB6_WR_OUTPUT_DAGB_MAX_BURST
+#define DAGB6_WR_OUTPUT_DAGB_MAX_BURST__VC0__SHIFT 0x0
+#define DAGB6_WR_OUTPUT_DAGB_MAX_BURST__VC1__SHIFT 0x4
+#define DAGB6_WR_OUTPUT_DAGB_MAX_BURST__VC2__SHIFT 0x8
+#define DAGB6_WR_OUTPUT_DAGB_MAX_BURST__VC3__SHIFT 0xc
+#define DAGB6_WR_OUTPUT_DAGB_MAX_BURST__VC4__SHIFT 0x10
+#define DAGB6_WR_OUTPUT_DAGB_MAX_BURST__VC5__SHIFT 0x14
+#define DAGB6_WR_OUTPUT_DAGB_MAX_BURST__VC6__SHIFT 0x18
+#define DAGB6_WR_OUTPUT_DAGB_MAX_BURST__VC7__SHIFT 0x1c
+#define DAGB6_WR_OUTPUT_DAGB_MAX_BURST__VC0_MASK 0x0000000FL
+#define DAGB6_WR_OUTPUT_DAGB_MAX_BURST__VC1_MASK 0x000000F0L
+#define DAGB6_WR_OUTPUT_DAGB_MAX_BURST__VC2_MASK 0x00000F00L
+#define DAGB6_WR_OUTPUT_DAGB_MAX_BURST__VC3_MASK 0x0000F000L
+#define DAGB6_WR_OUTPUT_DAGB_MAX_BURST__VC4_MASK 0x000F0000L
+#define DAGB6_WR_OUTPUT_DAGB_MAX_BURST__VC5_MASK 0x00F00000L
+#define DAGB6_WR_OUTPUT_DAGB_MAX_BURST__VC6_MASK 0x0F000000L
+#define DAGB6_WR_OUTPUT_DAGB_MAX_BURST__VC7_MASK 0xF0000000L
+//DAGB6_WR_OUTPUT_DAGB_LAZY_TIMER
+#define DAGB6_WR_OUTPUT_DAGB_LAZY_TIMER__VC0__SHIFT 0x0
+#define DAGB6_WR_OUTPUT_DAGB_LAZY_TIMER__VC1__SHIFT 0x4
+#define DAGB6_WR_OUTPUT_DAGB_LAZY_TIMER__VC2__SHIFT 0x8
+#define DAGB6_WR_OUTPUT_DAGB_LAZY_TIMER__VC3__SHIFT 0xc
+#define DAGB6_WR_OUTPUT_DAGB_LAZY_TIMER__VC4__SHIFT 0x10
+#define DAGB6_WR_OUTPUT_DAGB_LAZY_TIMER__VC5__SHIFT 0x14
+#define DAGB6_WR_OUTPUT_DAGB_LAZY_TIMER__VC6__SHIFT 0x18
+#define DAGB6_WR_OUTPUT_DAGB_LAZY_TIMER__VC7__SHIFT 0x1c
+#define DAGB6_WR_OUTPUT_DAGB_LAZY_TIMER__VC0_MASK 0x0000000FL
+#define DAGB6_WR_OUTPUT_DAGB_LAZY_TIMER__VC1_MASK 0x000000F0L
+#define DAGB6_WR_OUTPUT_DAGB_LAZY_TIMER__VC2_MASK 0x00000F00L
+#define DAGB6_WR_OUTPUT_DAGB_LAZY_TIMER__VC3_MASK 0x0000F000L
+#define DAGB6_WR_OUTPUT_DAGB_LAZY_TIMER__VC4_MASK 0x000F0000L
+#define DAGB6_WR_OUTPUT_DAGB_LAZY_TIMER__VC5_MASK 0x00F00000L
+#define DAGB6_WR_OUTPUT_DAGB_LAZY_TIMER__VC6_MASK 0x0F000000L
+#define DAGB6_WR_OUTPUT_DAGB_LAZY_TIMER__VC7_MASK 0xF0000000L
+//DAGB6_WR_CGTT_CLK_CTRL
+#define DAGB6_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB6_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB6_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
+#define DAGB6_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define DAGB6_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
+#define DAGB6_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
+#define DAGB6_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
+#define DAGB6_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
+#define DAGB6_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB6_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB6_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
+#define DAGB6_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define DAGB6_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
+#define DAGB6_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
+#define DAGB6_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
+#define DAGB6_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
+//DAGB6_L1TLB_WR_CGTT_CLK_CTRL
+#define DAGB6_L1TLB_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB6_L1TLB_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB6_L1TLB_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
+#define DAGB6_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define DAGB6_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
+#define DAGB6_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
+#define DAGB6_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
+#define DAGB6_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
+#define DAGB6_L1TLB_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB6_L1TLB_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB6_L1TLB_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
+#define DAGB6_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define DAGB6_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
+#define DAGB6_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
+#define DAGB6_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
+#define DAGB6_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
+//DAGB6_ATCVM_WR_CGTT_CLK_CTRL
+#define DAGB6_ATCVM_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB6_ATCVM_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB6_ATCVM_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
+#define DAGB6_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define DAGB6_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
+#define DAGB6_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
+#define DAGB6_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
+#define DAGB6_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
+#define DAGB6_ATCVM_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB6_ATCVM_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB6_ATCVM_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
+#define DAGB6_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define DAGB6_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
+#define DAGB6_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
+#define DAGB6_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
+#define DAGB6_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
+//DAGB6_WR_ADDR_DAGB_MAX_BURST0
+#define DAGB6_WR_ADDR_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0
+#define DAGB6_WR_ADDR_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4
+#define DAGB6_WR_ADDR_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8
+#define DAGB6_WR_ADDR_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc
+#define DAGB6_WR_ADDR_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10
+#define DAGB6_WR_ADDR_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14
+#define DAGB6_WR_ADDR_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18
+#define DAGB6_WR_ADDR_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c
+#define DAGB6_WR_ADDR_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL
+#define DAGB6_WR_ADDR_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L
+#define DAGB6_WR_ADDR_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L
+#define DAGB6_WR_ADDR_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L
+#define DAGB6_WR_ADDR_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L
+#define DAGB6_WR_ADDR_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L
+#define DAGB6_WR_ADDR_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L
+#define DAGB6_WR_ADDR_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L
+//DAGB6_WR_ADDR_DAGB_LAZY_TIMER0
+#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0
+#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4
+#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8
+#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc
+#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10
+#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14
+#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18
+#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c
+#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL
+#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L
+#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L
+#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L
+#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L
+#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L
+#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L
+#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L
+//DAGB6_WR_ADDR_DAGB_MAX_BURST1
+#define DAGB6_WR_ADDR_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0
+#define DAGB6_WR_ADDR_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4
+#define DAGB6_WR_ADDR_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8
+#define DAGB6_WR_ADDR_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc
+#define DAGB6_WR_ADDR_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10
+#define DAGB6_WR_ADDR_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14
+#define DAGB6_WR_ADDR_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18
+#define DAGB6_WR_ADDR_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c
+#define DAGB6_WR_ADDR_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL
+#define DAGB6_WR_ADDR_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L
+#define DAGB6_WR_ADDR_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L
+#define DAGB6_WR_ADDR_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L
+#define DAGB6_WR_ADDR_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L
+#define DAGB6_WR_ADDR_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L
+#define DAGB6_WR_ADDR_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L
+#define DAGB6_WR_ADDR_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L
+//DAGB6_WR_ADDR_DAGB_LAZY_TIMER1
+#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0
+#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4
+#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8
+#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc
+#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10
+#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14
+#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18
+#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c
+#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL
+#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L
+#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L
+#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L
+#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L
+#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L
+#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L
+#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L
+//DAGB6_WR_DATA_DAGB
+#define DAGB6_WR_DATA_DAGB__DAGB_ENABLE__SHIFT 0x0
+#define DAGB6_WR_DATA_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3
+#define DAGB6_WR_DATA_DAGB__DISABLE_SELF_INIT__SHIFT 0x6
+#define DAGB6_WR_DATA_DAGB__WHOAMI__SHIFT 0x7
+#define DAGB6_WR_DATA_DAGB__DAGB_ENABLE_MASK 0x00000007L
+#define DAGB6_WR_DATA_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L
+#define DAGB6_WR_DATA_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L
+#define DAGB6_WR_DATA_DAGB__WHOAMI_MASK 0x00001F80L
+//DAGB6_WR_DATA_DAGB_MAX_BURST0
+#define DAGB6_WR_DATA_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0
+#define DAGB6_WR_DATA_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4
+#define DAGB6_WR_DATA_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8
+#define DAGB6_WR_DATA_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc
+#define DAGB6_WR_DATA_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10
+#define DAGB6_WR_DATA_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14
+#define DAGB6_WR_DATA_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18
+#define DAGB6_WR_DATA_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c
+#define DAGB6_WR_DATA_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL
+#define DAGB6_WR_DATA_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L
+#define DAGB6_WR_DATA_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L
+#define DAGB6_WR_DATA_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L
+#define DAGB6_WR_DATA_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L
+#define DAGB6_WR_DATA_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L
+#define DAGB6_WR_DATA_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L
+#define DAGB6_WR_DATA_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L
+//DAGB6_WR_DATA_DAGB_LAZY_TIMER0
+#define DAGB6_WR_DATA_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0
+#define DAGB6_WR_DATA_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4
+#define DAGB6_WR_DATA_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8
+#define DAGB6_WR_DATA_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc
+#define DAGB6_WR_DATA_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10
+#define DAGB6_WR_DATA_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14
+#define DAGB6_WR_DATA_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18
+#define DAGB6_WR_DATA_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c
+#define DAGB6_WR_DATA_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL
+#define DAGB6_WR_DATA_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L
+#define DAGB6_WR_DATA_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L
+#define DAGB6_WR_DATA_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L
+#define DAGB6_WR_DATA_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L
+#define DAGB6_WR_DATA_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L
+#define DAGB6_WR_DATA_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L
+#define DAGB6_WR_DATA_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L
+//DAGB6_WR_DATA_DAGB_MAX_BURST1
+#define DAGB6_WR_DATA_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0
+#define DAGB6_WR_DATA_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4
+#define DAGB6_WR_DATA_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8
+#define DAGB6_WR_DATA_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc
+#define DAGB6_WR_DATA_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10
+#define DAGB6_WR_DATA_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14
+#define DAGB6_WR_DATA_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18
+#define DAGB6_WR_DATA_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c
+#define DAGB6_WR_DATA_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL
+#define DAGB6_WR_DATA_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L
+#define DAGB6_WR_DATA_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L
+#define DAGB6_WR_DATA_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L
+#define DAGB6_WR_DATA_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L
+#define DAGB6_WR_DATA_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L
+#define DAGB6_WR_DATA_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L
+#define DAGB6_WR_DATA_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L
+//DAGB6_WR_DATA_DAGB_LAZY_TIMER1
+#define DAGB6_WR_DATA_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0
+#define DAGB6_WR_DATA_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4
+#define DAGB6_WR_DATA_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8
+#define DAGB6_WR_DATA_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc
+#define DAGB6_WR_DATA_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10
+#define DAGB6_WR_DATA_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14
+#define DAGB6_WR_DATA_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18
+#define DAGB6_WR_DATA_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c
+#define DAGB6_WR_DATA_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL
+#define DAGB6_WR_DATA_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L
+#define DAGB6_WR_DATA_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L
+#define DAGB6_WR_DATA_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L
+#define DAGB6_WR_DATA_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L
+#define DAGB6_WR_DATA_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L
+#define DAGB6_WR_DATA_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L
+#define DAGB6_WR_DATA_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L
+//DAGB6_WR_VC0_CNTL
+#define DAGB6_WR_VC0_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB6_WR_VC0_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB6_WR_VC0_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB6_WR_VC0_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB6_WR_VC0_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB6_WR_VC0_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB6_WR_VC0_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB6_WR_VC0_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB6_WR_VC0_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB6_WR_VC0_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB6_WR_VC0_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB6_WR_VC0_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB6_WR_VC0_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB6_WR_VC0_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB6_WR_VC0_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB6_WR_VC0_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB6_WR_VC1_CNTL
+#define DAGB6_WR_VC1_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB6_WR_VC1_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB6_WR_VC1_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB6_WR_VC1_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB6_WR_VC1_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB6_WR_VC1_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB6_WR_VC1_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB6_WR_VC1_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB6_WR_VC1_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB6_WR_VC1_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB6_WR_VC1_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB6_WR_VC1_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB6_WR_VC1_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB6_WR_VC1_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB6_WR_VC1_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB6_WR_VC1_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB6_WR_VC2_CNTL
+#define DAGB6_WR_VC2_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB6_WR_VC2_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB6_WR_VC2_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB6_WR_VC2_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB6_WR_VC2_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB6_WR_VC2_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB6_WR_VC2_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB6_WR_VC2_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB6_WR_VC2_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB6_WR_VC2_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB6_WR_VC2_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB6_WR_VC2_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB6_WR_VC2_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB6_WR_VC2_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB6_WR_VC2_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB6_WR_VC2_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB6_WR_VC3_CNTL
+#define DAGB6_WR_VC3_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB6_WR_VC3_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB6_WR_VC3_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB6_WR_VC3_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB6_WR_VC3_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB6_WR_VC3_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB6_WR_VC3_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB6_WR_VC3_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB6_WR_VC3_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB6_WR_VC3_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB6_WR_VC3_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB6_WR_VC3_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB6_WR_VC3_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB6_WR_VC3_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB6_WR_VC3_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB6_WR_VC3_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB6_WR_VC4_CNTL
+#define DAGB6_WR_VC4_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB6_WR_VC4_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB6_WR_VC4_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB6_WR_VC4_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB6_WR_VC4_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB6_WR_VC4_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB6_WR_VC4_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB6_WR_VC4_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB6_WR_VC4_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB6_WR_VC4_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB6_WR_VC4_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB6_WR_VC4_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB6_WR_VC4_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB6_WR_VC4_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB6_WR_VC4_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB6_WR_VC4_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB6_WR_VC5_CNTL
+#define DAGB6_WR_VC5_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB6_WR_VC5_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB6_WR_VC5_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB6_WR_VC5_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB6_WR_VC5_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB6_WR_VC5_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB6_WR_VC5_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB6_WR_VC5_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB6_WR_VC5_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB6_WR_VC5_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB6_WR_VC5_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB6_WR_VC5_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB6_WR_VC5_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB6_WR_VC5_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB6_WR_VC5_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB6_WR_VC5_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB6_WR_VC6_CNTL
+#define DAGB6_WR_VC6_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB6_WR_VC6_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB6_WR_VC6_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB6_WR_VC6_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB6_WR_VC6_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB6_WR_VC6_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB6_WR_VC6_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB6_WR_VC6_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB6_WR_VC6_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB6_WR_VC6_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB6_WR_VC6_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB6_WR_VC6_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB6_WR_VC6_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB6_WR_VC6_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB6_WR_VC6_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB6_WR_VC6_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB6_WR_VC7_CNTL
+#define DAGB6_WR_VC7_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB6_WR_VC7_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB6_WR_VC7_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB6_WR_VC7_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB6_WR_VC7_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB6_WR_VC7_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB6_WR_VC7_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB6_WR_VC7_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB6_WR_VC7_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB6_WR_VC7_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB6_WR_VC7_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB6_WR_VC7_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB6_WR_VC7_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB6_WR_VC7_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB6_WR_VC7_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB6_WR_VC7_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB6_WR_CNTL_MISC
+#define DAGB6_WR_CNTL_MISC__STOR_POOL_CREDIT__SHIFT 0x0
+#define DAGB6_WR_CNTL_MISC__EA_POOL_CREDIT__SHIFT 0x6
+#define DAGB6_WR_CNTL_MISC__IO_EA_CREDIT__SHIFT 0xd
+#define DAGB6_WR_CNTL_MISC__STOR_CC_LEGACY_MODE__SHIFT 0x13
+#define DAGB6_WR_CNTL_MISC__EA_CC_LEGACY_MODE__SHIFT 0x14
+#define DAGB6_WR_CNTL_MISC__UTCL2_CID__SHIFT 0x15
+#define DAGB6_WR_CNTL_MISC__RDRET_FIFO_CREDITS__SHIFT 0x1a
+#define DAGB6_WR_CNTL_MISC__STOR_POOL_CREDIT_MASK 0x0000003FL
+#define DAGB6_WR_CNTL_MISC__EA_POOL_CREDIT_MASK 0x00001FC0L
+#define DAGB6_WR_CNTL_MISC__IO_EA_CREDIT_MASK 0x0007E000L
+#define DAGB6_WR_CNTL_MISC__STOR_CC_LEGACY_MODE_MASK 0x00080000L
+#define DAGB6_WR_CNTL_MISC__EA_CC_LEGACY_MODE_MASK 0x00100000L
+#define DAGB6_WR_CNTL_MISC__UTCL2_CID_MASK 0x03E00000L
+#define DAGB6_WR_CNTL_MISC__RDRET_FIFO_CREDITS_MASK 0xFC000000L
+//DAGB6_WR_TLB_CREDIT
+#define DAGB6_WR_TLB_CREDIT__TLB0__SHIFT 0x0
+#define DAGB6_WR_TLB_CREDIT__TLB1__SHIFT 0x5
+#define DAGB6_WR_TLB_CREDIT__TLB2__SHIFT 0xa
+#define DAGB6_WR_TLB_CREDIT__TLB3__SHIFT 0xf
+#define DAGB6_WR_TLB_CREDIT__TLB4__SHIFT 0x14
+#define DAGB6_WR_TLB_CREDIT__TLB5__SHIFT 0x19
+#define DAGB6_WR_TLB_CREDIT__TLB0_MASK 0x0000001FL
+#define DAGB6_WR_TLB_CREDIT__TLB1_MASK 0x000003E0L
+#define DAGB6_WR_TLB_CREDIT__TLB2_MASK 0x00007C00L
+#define DAGB6_WR_TLB_CREDIT__TLB3_MASK 0x000F8000L
+#define DAGB6_WR_TLB_CREDIT__TLB4_MASK 0x01F00000L
+#define DAGB6_WR_TLB_CREDIT__TLB5_MASK 0x3E000000L
+//DAGB6_WR_DATA_CREDIT
+#define DAGB6_WR_DATA_CREDIT__DLOCK_VC_CREDITS__SHIFT 0x0
+#define DAGB6_WR_DATA_CREDIT__LARGE_BURST_CREDITS__SHIFT 0x8
+#define DAGB6_WR_DATA_CREDIT__MIDDLE_BURST_CREDITS__SHIFT 0x10
+#define DAGB6_WR_DATA_CREDIT__SMALL_BURST_CREDITS__SHIFT 0x18
+#define DAGB6_WR_DATA_CREDIT__DLOCK_VC_CREDITS_MASK 0x000000FFL
+#define DAGB6_WR_DATA_CREDIT__LARGE_BURST_CREDITS_MASK 0x0000FF00L
+#define DAGB6_WR_DATA_CREDIT__MIDDLE_BURST_CREDITS_MASK 0x00FF0000L
+#define DAGB6_WR_DATA_CREDIT__SMALL_BURST_CREDITS_MASK 0xFF000000L
+//DAGB6_WR_MISC_CREDIT
+#define DAGB6_WR_MISC_CREDIT__ATOMIC_CREDIT__SHIFT 0x0
+#define DAGB6_WR_MISC_CREDIT__DLOCK_VC_NUM__SHIFT 0x6
+#define DAGB6_WR_MISC_CREDIT__OSD_CREDIT__SHIFT 0x9
+#define DAGB6_WR_MISC_CREDIT__OSD_DLOCK_CREDIT__SHIFT 0x10
+#define DAGB6_WR_MISC_CREDIT__ATOMIC_CREDIT_MASK 0x0000003FL
+#define DAGB6_WR_MISC_CREDIT__DLOCK_VC_NUM_MASK 0x000001C0L
+#define DAGB6_WR_MISC_CREDIT__OSD_CREDIT_MASK 0x0000FE00L
+#define DAGB6_WR_MISC_CREDIT__OSD_DLOCK_CREDIT_MASK 0x007F0000L
+//DAGB6_WRCLI_ASK_PENDING
+#define DAGB6_WRCLI_ASK_PENDING__BUSY__SHIFT 0x0
+#define DAGB6_WRCLI_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB6_WRCLI_GO_PENDING
+#define DAGB6_WRCLI_GO_PENDING__BUSY__SHIFT 0x0
+#define DAGB6_WRCLI_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB6_WRCLI_GBLSEND_PENDING
+#define DAGB6_WRCLI_GBLSEND_PENDING__BUSY__SHIFT 0x0
+#define DAGB6_WRCLI_GBLSEND_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB6_WRCLI_TLB_PENDING
+#define DAGB6_WRCLI_TLB_PENDING__BUSY__SHIFT 0x0
+#define DAGB6_WRCLI_TLB_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB6_WRCLI_OARB_PENDING
+#define DAGB6_WRCLI_OARB_PENDING__BUSY__SHIFT 0x0
+#define DAGB6_WRCLI_OARB_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB6_WRCLI_OSD_PENDING
+#define DAGB6_WRCLI_OSD_PENDING__BUSY__SHIFT 0x0
+#define DAGB6_WRCLI_OSD_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB6_WRCLI_DBUS_ASK_PENDING
+#define DAGB6_WRCLI_DBUS_ASK_PENDING__BUSY__SHIFT 0x0
+#define DAGB6_WRCLI_DBUS_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB6_WRCLI_DBUS_GO_PENDING
+#define DAGB6_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0
+#define DAGB6_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB6_DAGB_DLY
+#define DAGB6_DAGB_DLY__DLY__SHIFT 0x0
+#define DAGB6_DAGB_DLY__CLI__SHIFT 0x8
+#define DAGB6_DAGB_DLY__POS__SHIFT 0x10
+#define DAGB6_DAGB_DLY__DLY_MASK 0x000000FFL
+#define DAGB6_DAGB_DLY__CLI_MASK 0x0000FF00L
+#define DAGB6_DAGB_DLY__POS_MASK 0x000F0000L
+//DAGB6_CNTL_MISC
+#define DAGB6_CNTL_MISC__EA_VC0_REMAP__SHIFT 0x0
+#define DAGB6_CNTL_MISC__EA_VC1_REMAP__SHIFT 0x3
+#define DAGB6_CNTL_MISC__EA_VC2_REMAP__SHIFT 0x6
+#define DAGB6_CNTL_MISC__EA_VC3_REMAP__SHIFT 0x9
+#define DAGB6_CNTL_MISC__EA_VC4_REMAP__SHIFT 0xc
+#define DAGB6_CNTL_MISC__EA_VC5_REMAP__SHIFT 0xf
+#define DAGB6_CNTL_MISC__EA_VC6_REMAP__SHIFT 0x12
+#define DAGB6_CNTL_MISC__EA_VC7_REMAP__SHIFT 0x15
+#define DAGB6_CNTL_MISC__BW_INIT_CYCLE__SHIFT 0x18
+#define DAGB6_CNTL_MISC__BW_RW_GAP_CYCLE__SHIFT 0x1e
+#define DAGB6_CNTL_MISC__EA_VC0_REMAP_MASK 0x00000007L
+#define DAGB6_CNTL_MISC__EA_VC1_REMAP_MASK 0x00000038L
+#define DAGB6_CNTL_MISC__EA_VC2_REMAP_MASK 0x000001C0L
+#define DAGB6_CNTL_MISC__EA_VC3_REMAP_MASK 0x00000E00L
+#define DAGB6_CNTL_MISC__EA_VC4_REMAP_MASK 0x00007000L
+#define DAGB6_CNTL_MISC__EA_VC5_REMAP_MASK 0x00038000L
+#define DAGB6_CNTL_MISC__EA_VC6_REMAP_MASK 0x001C0000L
+#define DAGB6_CNTL_MISC__EA_VC7_REMAP_MASK 0x00E00000L
+#define DAGB6_CNTL_MISC__BW_INIT_CYCLE_MASK 0x3F000000L
+#define DAGB6_CNTL_MISC__BW_RW_GAP_CYCLE_MASK 0xC0000000L
+//DAGB6_CNTL_MISC2
+#define DAGB6_CNTL_MISC2__URG_BOOST_ENABLE__SHIFT 0x0
+#define DAGB6_CNTL_MISC2__URG_HALT_ENABLE__SHIFT 0x1
+#define DAGB6_CNTL_MISC2__DISABLE_WRREQ_CG__SHIFT 0x2
+#define DAGB6_CNTL_MISC2__DISABLE_WRRET_CG__SHIFT 0x3
+#define DAGB6_CNTL_MISC2__DISABLE_RDREQ_CG__SHIFT 0x4
+#define DAGB6_CNTL_MISC2__DISABLE_RDRET_CG__SHIFT 0x5
+#define DAGB6_CNTL_MISC2__DISABLE_TLBWR_CG__SHIFT 0x6
+#define DAGB6_CNTL_MISC2__DISABLE_TLBRD_CG__SHIFT 0x7
+#define DAGB6_CNTL_MISC2__DISABLE_EAWRREQ_BUSY__SHIFT 0x8
+#define DAGB6_CNTL_MISC2__DISABLE_EARDREQ_BUSY__SHIFT 0x9
+#define DAGB6_CNTL_MISC2__SWAP_CTL__SHIFT 0xa
+#define DAGB6_CNTL_MISC2__RDRET_FIFO_PERF__SHIFT 0xb
+#define DAGB6_CNTL_MISC2__RDRET_FIFO_DLOCK_CREDITS__SHIFT 0x11
+#define DAGB6_CNTL_MISC2__URG_BOOST_ENABLE_MASK 0x00000001L
+#define DAGB6_CNTL_MISC2__URG_HALT_ENABLE_MASK 0x00000002L
+#define DAGB6_CNTL_MISC2__DISABLE_WRREQ_CG_MASK 0x00000004L
+#define DAGB6_CNTL_MISC2__DISABLE_WRRET_CG_MASK 0x00000008L
+#define DAGB6_CNTL_MISC2__DISABLE_RDREQ_CG_MASK 0x00000010L
+#define DAGB6_CNTL_MISC2__DISABLE_RDRET_CG_MASK 0x00000020L
+#define DAGB6_CNTL_MISC2__DISABLE_TLBWR_CG_MASK 0x00000040L
+#define DAGB6_CNTL_MISC2__DISABLE_TLBRD_CG_MASK 0x00000080L
+#define DAGB6_CNTL_MISC2__DISABLE_EAWRREQ_BUSY_MASK 0x00000100L
+#define DAGB6_CNTL_MISC2__DISABLE_EARDREQ_BUSY_MASK 0x00000200L
+#define DAGB6_CNTL_MISC2__SWAP_CTL_MASK 0x00000400L
+#define DAGB6_CNTL_MISC2__RDRET_FIFO_PERF_MASK 0x00000800L
+#define DAGB6_CNTL_MISC2__RDRET_FIFO_DLOCK_CREDITS_MASK 0x007E0000L
+//DAGB6_FIFO_EMPTY
+#define DAGB6_FIFO_EMPTY__EMPTY__SHIFT 0x0
+#define DAGB6_FIFO_EMPTY__EMPTY_MASK 0x00FFFFFFL
+//DAGB6_FIFO_FULL
+#define DAGB6_FIFO_FULL__FULL__SHIFT 0x0
+#define DAGB6_FIFO_FULL__FULL_MASK 0x007FFFFFL
+//DAGB6_WR_CREDITS_FULL
+#define DAGB6_WR_CREDITS_FULL__FULL__SHIFT 0x0
+#define DAGB6_WR_CREDITS_FULL__FULL_MASK 0x1FFFFFFFL
+//DAGB6_RD_CREDITS_FULL
+#define DAGB6_RD_CREDITS_FULL__FULL__SHIFT 0x0
+#define DAGB6_RD_CREDITS_FULL__FULL_MASK 0x0003FFFFL
+//DAGB6_PERFCOUNTER_LO
+#define DAGB6_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define DAGB6_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//DAGB6_PERFCOUNTER_HI
+#define DAGB6_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define DAGB6_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define DAGB6_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define DAGB6_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+//DAGB6_PERFCOUNTER0_CFG
+#define DAGB6_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define DAGB6_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define DAGB6_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define DAGB6_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define DAGB6_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define DAGB6_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define DAGB6_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define DAGB6_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define DAGB6_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define DAGB6_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//DAGB6_PERFCOUNTER1_CFG
+#define DAGB6_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define DAGB6_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define DAGB6_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define DAGB6_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define DAGB6_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define DAGB6_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define DAGB6_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define DAGB6_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define DAGB6_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define DAGB6_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//DAGB6_PERFCOUNTER2_CFG
+#define DAGB6_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0
+#define DAGB6_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8
+#define DAGB6_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18
+#define DAGB6_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c
+#define DAGB6_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d
+#define DAGB6_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL
+#define DAGB6_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define DAGB6_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L
+#define DAGB6_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L
+#define DAGB6_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L
+//DAGB6_PERFCOUNTER_RSLT_CNTL
+#define DAGB6_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define DAGB6_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define DAGB6_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define DAGB6_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define DAGB6_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define DAGB6_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define DAGB6_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define DAGB6_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define DAGB6_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define DAGB6_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define DAGB6_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define DAGB6_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+//DAGB6_RESERVE0
+#define DAGB6_RESERVE0__RESERVE__SHIFT 0x0
+#define DAGB6_RESERVE0__RESERVE_MASK 0xFFFFFFFFL
+//DAGB6_RESERVE1
+#define DAGB6_RESERVE1__RESERVE__SHIFT 0x0
+#define DAGB6_RESERVE1__RESERVE_MASK 0xFFFFFFFFL
+//DAGB6_RESERVE2
+#define DAGB6_RESERVE2__RESERVE__SHIFT 0x0
+#define DAGB6_RESERVE2__RESERVE_MASK 0xFFFFFFFFL
+//DAGB6_RESERVE3
+#define DAGB6_RESERVE3__RESERVE__SHIFT 0x0
+#define DAGB6_RESERVE3__RESERVE_MASK 0xFFFFFFFFL
+//DAGB6_RESERVE4
+#define DAGB6_RESERVE4__RESERVE__SHIFT 0x0
+#define DAGB6_RESERVE4__RESERVE_MASK 0xFFFFFFFFL
+//DAGB6_RESERVE5
+#define DAGB6_RESERVE5__RESERVE__SHIFT 0x0
+#define DAGB6_RESERVE5__RESERVE_MASK 0xFFFFFFFFL
+//DAGB6_RESERVE6
+#define DAGB6_RESERVE6__RESERVE__SHIFT 0x0
+#define DAGB6_RESERVE6__RESERVE_MASK 0xFFFFFFFFL
+//DAGB6_RESERVE7
+#define DAGB6_RESERVE7__RESERVE__SHIFT 0x0
+#define DAGB6_RESERVE7__RESERVE_MASK 0xFFFFFFFFL
+//DAGB6_RESERVE8
+#define DAGB6_RESERVE8__RESERVE__SHIFT 0x0
+#define DAGB6_RESERVE8__RESERVE_MASK 0xFFFFFFFFL
+//DAGB6_RESERVE9
+#define DAGB6_RESERVE9__RESERVE__SHIFT 0x0
+#define DAGB6_RESERVE9__RESERVE_MASK 0xFFFFFFFFL
+//DAGB6_RESERVE10
+#define DAGB6_RESERVE10__RESERVE__SHIFT 0x0
+#define DAGB6_RESERVE10__RESERVE_MASK 0xFFFFFFFFL
+//DAGB6_RESERVE11
+#define DAGB6_RESERVE11__RESERVE__SHIFT 0x0
+#define DAGB6_RESERVE11__RESERVE_MASK 0xFFFFFFFFL
+//DAGB6_RESERVE12
+#define DAGB6_RESERVE12__RESERVE__SHIFT 0x0
+#define DAGB6_RESERVE12__RESERVE_MASK 0xFFFFFFFFL
+//DAGB6_RESERVE13
+#define DAGB6_RESERVE13__RESERVE__SHIFT 0x0
+#define DAGB6_RESERVE13__RESERVE_MASK 0xFFFFFFFFL
+
+
+// addressBlock: mmhub_dagb_dagbdec7
+//DAGB7_RDCLI0
+#define DAGB7_RDCLI0__VIRT_CHAN__SHIFT 0x0
+#define DAGB7_RDCLI0__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB7_RDCLI0__URG_HIGH__SHIFT 0x4
+#define DAGB7_RDCLI0__URG_LOW__SHIFT 0x8
+#define DAGB7_RDCLI0__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB7_RDCLI0__MAX_BW__SHIFT 0xd
+#define DAGB7_RDCLI0__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB7_RDCLI0__MIN_BW__SHIFT 0x16
+#define DAGB7_RDCLI0__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB7_RDCLI0__MAX_OSD__SHIFT 0x1a
+#define DAGB7_RDCLI0__VIRT_CHAN_MASK 0x00000007L
+#define DAGB7_RDCLI0__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB7_RDCLI0__URG_HIGH_MASK 0x000000F0L
+#define DAGB7_RDCLI0__URG_LOW_MASK 0x00000F00L
+#define DAGB7_RDCLI0__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB7_RDCLI0__MAX_BW_MASK 0x001FE000L
+#define DAGB7_RDCLI0__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB7_RDCLI0__MIN_BW_MASK 0x01C00000L
+#define DAGB7_RDCLI0__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB7_RDCLI0__MAX_OSD_MASK 0xFC000000L
+//DAGB7_RDCLI1
+#define DAGB7_RDCLI1__VIRT_CHAN__SHIFT 0x0
+#define DAGB7_RDCLI1__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB7_RDCLI1__URG_HIGH__SHIFT 0x4
+#define DAGB7_RDCLI1__URG_LOW__SHIFT 0x8
+#define DAGB7_RDCLI1__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB7_RDCLI1__MAX_BW__SHIFT 0xd
+#define DAGB7_RDCLI1__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB7_RDCLI1__MIN_BW__SHIFT 0x16
+#define DAGB7_RDCLI1__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB7_RDCLI1__MAX_OSD__SHIFT 0x1a
+#define DAGB7_RDCLI1__VIRT_CHAN_MASK 0x00000007L
+#define DAGB7_RDCLI1__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB7_RDCLI1__URG_HIGH_MASK 0x000000F0L
+#define DAGB7_RDCLI1__URG_LOW_MASK 0x00000F00L
+#define DAGB7_RDCLI1__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB7_RDCLI1__MAX_BW_MASK 0x001FE000L
+#define DAGB7_RDCLI1__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB7_RDCLI1__MIN_BW_MASK 0x01C00000L
+#define DAGB7_RDCLI1__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB7_RDCLI1__MAX_OSD_MASK 0xFC000000L
+//DAGB7_RDCLI2
+#define DAGB7_RDCLI2__VIRT_CHAN__SHIFT 0x0
+#define DAGB7_RDCLI2__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB7_RDCLI2__URG_HIGH__SHIFT 0x4
+#define DAGB7_RDCLI2__URG_LOW__SHIFT 0x8
+#define DAGB7_RDCLI2__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB7_RDCLI2__MAX_BW__SHIFT 0xd
+#define DAGB7_RDCLI2__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB7_RDCLI2__MIN_BW__SHIFT 0x16
+#define DAGB7_RDCLI2__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB7_RDCLI2__MAX_OSD__SHIFT 0x1a
+#define DAGB7_RDCLI2__VIRT_CHAN_MASK 0x00000007L
+#define DAGB7_RDCLI2__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB7_RDCLI2__URG_HIGH_MASK 0x000000F0L
+#define DAGB7_RDCLI2__URG_LOW_MASK 0x00000F00L
+#define DAGB7_RDCLI2__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB7_RDCLI2__MAX_BW_MASK 0x001FE000L
+#define DAGB7_RDCLI2__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB7_RDCLI2__MIN_BW_MASK 0x01C00000L
+#define DAGB7_RDCLI2__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB7_RDCLI2__MAX_OSD_MASK 0xFC000000L
+//DAGB7_RDCLI3
+#define DAGB7_RDCLI3__VIRT_CHAN__SHIFT 0x0
+#define DAGB7_RDCLI3__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB7_RDCLI3__URG_HIGH__SHIFT 0x4
+#define DAGB7_RDCLI3__URG_LOW__SHIFT 0x8
+#define DAGB7_RDCLI3__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB7_RDCLI3__MAX_BW__SHIFT 0xd
+#define DAGB7_RDCLI3__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB7_RDCLI3__MIN_BW__SHIFT 0x16
+#define DAGB7_RDCLI3__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB7_RDCLI3__MAX_OSD__SHIFT 0x1a
+#define DAGB7_RDCLI3__VIRT_CHAN_MASK 0x00000007L
+#define DAGB7_RDCLI3__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB7_RDCLI3__URG_HIGH_MASK 0x000000F0L
+#define DAGB7_RDCLI3__URG_LOW_MASK 0x00000F00L
+#define DAGB7_RDCLI3__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB7_RDCLI3__MAX_BW_MASK 0x001FE000L
+#define DAGB7_RDCLI3__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB7_RDCLI3__MIN_BW_MASK 0x01C00000L
+#define DAGB7_RDCLI3__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB7_RDCLI3__MAX_OSD_MASK 0xFC000000L
+//DAGB7_RDCLI4
+#define DAGB7_RDCLI4__VIRT_CHAN__SHIFT 0x0
+#define DAGB7_RDCLI4__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB7_RDCLI4__URG_HIGH__SHIFT 0x4
+#define DAGB7_RDCLI4__URG_LOW__SHIFT 0x8
+#define DAGB7_RDCLI4__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB7_RDCLI4__MAX_BW__SHIFT 0xd
+#define DAGB7_RDCLI4__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB7_RDCLI4__MIN_BW__SHIFT 0x16
+#define DAGB7_RDCLI4__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB7_RDCLI4__MAX_OSD__SHIFT 0x1a
+#define DAGB7_RDCLI4__VIRT_CHAN_MASK 0x00000007L
+#define DAGB7_RDCLI4__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB7_RDCLI4__URG_HIGH_MASK 0x000000F0L
+#define DAGB7_RDCLI4__URG_LOW_MASK 0x00000F00L
+#define DAGB7_RDCLI4__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB7_RDCLI4__MAX_BW_MASK 0x001FE000L
+#define DAGB7_RDCLI4__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB7_RDCLI4__MIN_BW_MASK 0x01C00000L
+#define DAGB7_RDCLI4__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB7_RDCLI4__MAX_OSD_MASK 0xFC000000L
+//DAGB7_RDCLI5
+#define DAGB7_RDCLI5__VIRT_CHAN__SHIFT 0x0
+#define DAGB7_RDCLI5__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB7_RDCLI5__URG_HIGH__SHIFT 0x4
+#define DAGB7_RDCLI5__URG_LOW__SHIFT 0x8
+#define DAGB7_RDCLI5__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB7_RDCLI5__MAX_BW__SHIFT 0xd
+#define DAGB7_RDCLI5__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB7_RDCLI5__MIN_BW__SHIFT 0x16
+#define DAGB7_RDCLI5__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB7_RDCLI5__MAX_OSD__SHIFT 0x1a
+#define DAGB7_RDCLI5__VIRT_CHAN_MASK 0x00000007L
+#define DAGB7_RDCLI5__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB7_RDCLI5__URG_HIGH_MASK 0x000000F0L
+#define DAGB7_RDCLI5__URG_LOW_MASK 0x00000F00L
+#define DAGB7_RDCLI5__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB7_RDCLI5__MAX_BW_MASK 0x001FE000L
+#define DAGB7_RDCLI5__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB7_RDCLI5__MIN_BW_MASK 0x01C00000L
+#define DAGB7_RDCLI5__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB7_RDCLI5__MAX_OSD_MASK 0xFC000000L
+//DAGB7_RDCLI6
+#define DAGB7_RDCLI6__VIRT_CHAN__SHIFT 0x0
+#define DAGB7_RDCLI6__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB7_RDCLI6__URG_HIGH__SHIFT 0x4
+#define DAGB7_RDCLI6__URG_LOW__SHIFT 0x8
+#define DAGB7_RDCLI6__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB7_RDCLI6__MAX_BW__SHIFT 0xd
+#define DAGB7_RDCLI6__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB7_RDCLI6__MIN_BW__SHIFT 0x16
+#define DAGB7_RDCLI6__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB7_RDCLI6__MAX_OSD__SHIFT 0x1a
+#define DAGB7_RDCLI6__VIRT_CHAN_MASK 0x00000007L
+#define DAGB7_RDCLI6__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB7_RDCLI6__URG_HIGH_MASK 0x000000F0L
+#define DAGB7_RDCLI6__URG_LOW_MASK 0x00000F00L
+#define DAGB7_RDCLI6__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB7_RDCLI6__MAX_BW_MASK 0x001FE000L
+#define DAGB7_RDCLI6__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB7_RDCLI6__MIN_BW_MASK 0x01C00000L
+#define DAGB7_RDCLI6__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB7_RDCLI6__MAX_OSD_MASK 0xFC000000L
+//DAGB7_RDCLI7
+#define DAGB7_RDCLI7__VIRT_CHAN__SHIFT 0x0
+#define DAGB7_RDCLI7__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB7_RDCLI7__URG_HIGH__SHIFT 0x4
+#define DAGB7_RDCLI7__URG_LOW__SHIFT 0x8
+#define DAGB7_RDCLI7__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB7_RDCLI7__MAX_BW__SHIFT 0xd
+#define DAGB7_RDCLI7__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB7_RDCLI7__MIN_BW__SHIFT 0x16
+#define DAGB7_RDCLI7__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB7_RDCLI7__MAX_OSD__SHIFT 0x1a
+#define DAGB7_RDCLI7__VIRT_CHAN_MASK 0x00000007L
+#define DAGB7_RDCLI7__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB7_RDCLI7__URG_HIGH_MASK 0x000000F0L
+#define DAGB7_RDCLI7__URG_LOW_MASK 0x00000F00L
+#define DAGB7_RDCLI7__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB7_RDCLI7__MAX_BW_MASK 0x001FE000L
+#define DAGB7_RDCLI7__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB7_RDCLI7__MIN_BW_MASK 0x01C00000L
+#define DAGB7_RDCLI7__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB7_RDCLI7__MAX_OSD_MASK 0xFC000000L
+//DAGB7_RDCLI8
+#define DAGB7_RDCLI8__VIRT_CHAN__SHIFT 0x0
+#define DAGB7_RDCLI8__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB7_RDCLI8__URG_HIGH__SHIFT 0x4
+#define DAGB7_RDCLI8__URG_LOW__SHIFT 0x8
+#define DAGB7_RDCLI8__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB7_RDCLI8__MAX_BW__SHIFT 0xd
+#define DAGB7_RDCLI8__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB7_RDCLI8__MIN_BW__SHIFT 0x16
+#define DAGB7_RDCLI8__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB7_RDCLI8__MAX_OSD__SHIFT 0x1a
+#define DAGB7_RDCLI8__VIRT_CHAN_MASK 0x00000007L
+#define DAGB7_RDCLI8__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB7_RDCLI8__URG_HIGH_MASK 0x000000F0L
+#define DAGB7_RDCLI8__URG_LOW_MASK 0x00000F00L
+#define DAGB7_RDCLI8__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB7_RDCLI8__MAX_BW_MASK 0x001FE000L
+#define DAGB7_RDCLI8__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB7_RDCLI8__MIN_BW_MASK 0x01C00000L
+#define DAGB7_RDCLI8__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB7_RDCLI8__MAX_OSD_MASK 0xFC000000L
+//DAGB7_RDCLI9
+#define DAGB7_RDCLI9__VIRT_CHAN__SHIFT 0x0
+#define DAGB7_RDCLI9__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB7_RDCLI9__URG_HIGH__SHIFT 0x4
+#define DAGB7_RDCLI9__URG_LOW__SHIFT 0x8
+#define DAGB7_RDCLI9__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB7_RDCLI9__MAX_BW__SHIFT 0xd
+#define DAGB7_RDCLI9__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB7_RDCLI9__MIN_BW__SHIFT 0x16
+#define DAGB7_RDCLI9__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB7_RDCLI9__MAX_OSD__SHIFT 0x1a
+#define DAGB7_RDCLI9__VIRT_CHAN_MASK 0x00000007L
+#define DAGB7_RDCLI9__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB7_RDCLI9__URG_HIGH_MASK 0x000000F0L
+#define DAGB7_RDCLI9__URG_LOW_MASK 0x00000F00L
+#define DAGB7_RDCLI9__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB7_RDCLI9__MAX_BW_MASK 0x001FE000L
+#define DAGB7_RDCLI9__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB7_RDCLI9__MIN_BW_MASK 0x01C00000L
+#define DAGB7_RDCLI9__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB7_RDCLI9__MAX_OSD_MASK 0xFC000000L
+//DAGB7_RDCLI10
+#define DAGB7_RDCLI10__VIRT_CHAN__SHIFT 0x0
+#define DAGB7_RDCLI10__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB7_RDCLI10__URG_HIGH__SHIFT 0x4
+#define DAGB7_RDCLI10__URG_LOW__SHIFT 0x8
+#define DAGB7_RDCLI10__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB7_RDCLI10__MAX_BW__SHIFT 0xd
+#define DAGB7_RDCLI10__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB7_RDCLI10__MIN_BW__SHIFT 0x16
+#define DAGB7_RDCLI10__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB7_RDCLI10__MAX_OSD__SHIFT 0x1a
+#define DAGB7_RDCLI10__VIRT_CHAN_MASK 0x00000007L
+#define DAGB7_RDCLI10__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB7_RDCLI10__URG_HIGH_MASK 0x000000F0L
+#define DAGB7_RDCLI10__URG_LOW_MASK 0x00000F00L
+#define DAGB7_RDCLI10__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB7_RDCLI10__MAX_BW_MASK 0x001FE000L
+#define DAGB7_RDCLI10__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB7_RDCLI10__MIN_BW_MASK 0x01C00000L
+#define DAGB7_RDCLI10__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB7_RDCLI10__MAX_OSD_MASK 0xFC000000L
+//DAGB7_RDCLI11
+#define DAGB7_RDCLI11__VIRT_CHAN__SHIFT 0x0
+#define DAGB7_RDCLI11__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB7_RDCLI11__URG_HIGH__SHIFT 0x4
+#define DAGB7_RDCLI11__URG_LOW__SHIFT 0x8
+#define DAGB7_RDCLI11__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB7_RDCLI11__MAX_BW__SHIFT 0xd
+#define DAGB7_RDCLI11__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB7_RDCLI11__MIN_BW__SHIFT 0x16
+#define DAGB7_RDCLI11__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB7_RDCLI11__MAX_OSD__SHIFT 0x1a
+#define DAGB7_RDCLI11__VIRT_CHAN_MASK 0x00000007L
+#define DAGB7_RDCLI11__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB7_RDCLI11__URG_HIGH_MASK 0x000000F0L
+#define DAGB7_RDCLI11__URG_LOW_MASK 0x00000F00L
+#define DAGB7_RDCLI11__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB7_RDCLI11__MAX_BW_MASK 0x001FE000L
+#define DAGB7_RDCLI11__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB7_RDCLI11__MIN_BW_MASK 0x01C00000L
+#define DAGB7_RDCLI11__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB7_RDCLI11__MAX_OSD_MASK 0xFC000000L
+//DAGB7_RDCLI12
+#define DAGB7_RDCLI12__VIRT_CHAN__SHIFT 0x0
+#define DAGB7_RDCLI12__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB7_RDCLI12__URG_HIGH__SHIFT 0x4
+#define DAGB7_RDCLI12__URG_LOW__SHIFT 0x8
+#define DAGB7_RDCLI12__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB7_RDCLI12__MAX_BW__SHIFT 0xd
+#define DAGB7_RDCLI12__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB7_RDCLI12__MIN_BW__SHIFT 0x16
+#define DAGB7_RDCLI12__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB7_RDCLI12__MAX_OSD__SHIFT 0x1a
+#define DAGB7_RDCLI12__VIRT_CHAN_MASK 0x00000007L
+#define DAGB7_RDCLI12__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB7_RDCLI12__URG_HIGH_MASK 0x000000F0L
+#define DAGB7_RDCLI12__URG_LOW_MASK 0x00000F00L
+#define DAGB7_RDCLI12__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB7_RDCLI12__MAX_BW_MASK 0x001FE000L
+#define DAGB7_RDCLI12__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB7_RDCLI12__MIN_BW_MASK 0x01C00000L
+#define DAGB7_RDCLI12__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB7_RDCLI12__MAX_OSD_MASK 0xFC000000L
+//DAGB7_RDCLI13
+#define DAGB7_RDCLI13__VIRT_CHAN__SHIFT 0x0
+#define DAGB7_RDCLI13__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB7_RDCLI13__URG_HIGH__SHIFT 0x4
+#define DAGB7_RDCLI13__URG_LOW__SHIFT 0x8
+#define DAGB7_RDCLI13__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB7_RDCLI13__MAX_BW__SHIFT 0xd
+#define DAGB7_RDCLI13__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB7_RDCLI13__MIN_BW__SHIFT 0x16
+#define DAGB7_RDCLI13__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB7_RDCLI13__MAX_OSD__SHIFT 0x1a
+#define DAGB7_RDCLI13__VIRT_CHAN_MASK 0x00000007L
+#define DAGB7_RDCLI13__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB7_RDCLI13__URG_HIGH_MASK 0x000000F0L
+#define DAGB7_RDCLI13__URG_LOW_MASK 0x00000F00L
+#define DAGB7_RDCLI13__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB7_RDCLI13__MAX_BW_MASK 0x001FE000L
+#define DAGB7_RDCLI13__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB7_RDCLI13__MIN_BW_MASK 0x01C00000L
+#define DAGB7_RDCLI13__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB7_RDCLI13__MAX_OSD_MASK 0xFC000000L
+//DAGB7_RDCLI14
+#define DAGB7_RDCLI14__VIRT_CHAN__SHIFT 0x0
+#define DAGB7_RDCLI14__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB7_RDCLI14__URG_HIGH__SHIFT 0x4
+#define DAGB7_RDCLI14__URG_LOW__SHIFT 0x8
+#define DAGB7_RDCLI14__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB7_RDCLI14__MAX_BW__SHIFT 0xd
+#define DAGB7_RDCLI14__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB7_RDCLI14__MIN_BW__SHIFT 0x16
+#define DAGB7_RDCLI14__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB7_RDCLI14__MAX_OSD__SHIFT 0x1a
+#define DAGB7_RDCLI14__VIRT_CHAN_MASK 0x00000007L
+#define DAGB7_RDCLI14__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB7_RDCLI14__URG_HIGH_MASK 0x000000F0L
+#define DAGB7_RDCLI14__URG_LOW_MASK 0x00000F00L
+#define DAGB7_RDCLI14__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB7_RDCLI14__MAX_BW_MASK 0x001FE000L
+#define DAGB7_RDCLI14__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB7_RDCLI14__MIN_BW_MASK 0x01C00000L
+#define DAGB7_RDCLI14__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB7_RDCLI14__MAX_OSD_MASK 0xFC000000L
+//DAGB7_RDCLI15
+#define DAGB7_RDCLI15__VIRT_CHAN__SHIFT 0x0
+#define DAGB7_RDCLI15__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB7_RDCLI15__URG_HIGH__SHIFT 0x4
+#define DAGB7_RDCLI15__URG_LOW__SHIFT 0x8
+#define DAGB7_RDCLI15__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB7_RDCLI15__MAX_BW__SHIFT 0xd
+#define DAGB7_RDCLI15__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB7_RDCLI15__MIN_BW__SHIFT 0x16
+#define DAGB7_RDCLI15__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB7_RDCLI15__MAX_OSD__SHIFT 0x1a
+#define DAGB7_RDCLI15__VIRT_CHAN_MASK 0x00000007L
+#define DAGB7_RDCLI15__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB7_RDCLI15__URG_HIGH_MASK 0x000000F0L
+#define DAGB7_RDCLI15__URG_LOW_MASK 0x00000F00L
+#define DAGB7_RDCLI15__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB7_RDCLI15__MAX_BW_MASK 0x001FE000L
+#define DAGB7_RDCLI15__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB7_RDCLI15__MIN_BW_MASK 0x01C00000L
+#define DAGB7_RDCLI15__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB7_RDCLI15__MAX_OSD_MASK 0xFC000000L
+//DAGB7_RD_CNTL
+#define DAGB7_RD_CNTL__SCLK_FREQ__SHIFT 0x0
+#define DAGB7_RD_CNTL__CLI_MAX_BW_WINDOW__SHIFT 0x4
+#define DAGB7_RD_CNTL__VC_MAX_BW_WINDOW__SHIFT 0xa
+#define DAGB7_RD_CNTL__IO_LEVEL_OVERRIDE_ENABLE__SHIFT 0x10
+#define DAGB7_RD_CNTL__IO_LEVEL__SHIFT 0x11
+#define DAGB7_RD_CNTL__IO_LEVEL_COMPLY_VC__SHIFT 0x14
+#define DAGB7_RD_CNTL__SHARE_VC_NUM__SHIFT 0x17
+#define DAGB7_RD_CNTL__SCLK_FREQ_MASK 0x0000000FL
+#define DAGB7_RD_CNTL__CLI_MAX_BW_WINDOW_MASK 0x000003F0L
+#define DAGB7_RD_CNTL__VC_MAX_BW_WINDOW_MASK 0x0000FC00L
+#define DAGB7_RD_CNTL__IO_LEVEL_OVERRIDE_ENABLE_MASK 0x00010000L
+#define DAGB7_RD_CNTL__IO_LEVEL_MASK 0x000E0000L
+#define DAGB7_RD_CNTL__IO_LEVEL_COMPLY_VC_MASK 0x00700000L
+#define DAGB7_RD_CNTL__SHARE_VC_NUM_MASK 0x03800000L
+//DAGB7_RD_GMI_CNTL
+#define DAGB7_RD_GMI_CNTL__EA_CREDIT__SHIFT 0x0
+#define DAGB7_RD_GMI_CNTL__LEVEL__SHIFT 0x6
+#define DAGB7_RD_GMI_CNTL__MAX_BURST__SHIFT 0x9
+#define DAGB7_RD_GMI_CNTL__LAZY_TIMER__SHIFT 0xd
+#define DAGB7_RD_GMI_CNTL__EA_CREDIT_MASK 0x0000003FL
+#define DAGB7_RD_GMI_CNTL__LEVEL_MASK 0x000001C0L
+#define DAGB7_RD_GMI_CNTL__MAX_BURST_MASK 0x00001E00L
+#define DAGB7_RD_GMI_CNTL__LAZY_TIMER_MASK 0x0001E000L
+//DAGB7_RD_ADDR_DAGB
+#define DAGB7_RD_ADDR_DAGB__DAGB_ENABLE__SHIFT 0x0
+#define DAGB7_RD_ADDR_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3
+#define DAGB7_RD_ADDR_DAGB__DISABLE_SELF_INIT__SHIFT 0x6
+#define DAGB7_RD_ADDR_DAGB__WHOAMI__SHIFT 0x7
+#define DAGB7_RD_ADDR_DAGB__DAGB_ENABLE_MASK 0x00000007L
+#define DAGB7_RD_ADDR_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L
+#define DAGB7_RD_ADDR_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L
+#define DAGB7_RD_ADDR_DAGB__WHOAMI_MASK 0x00001F80L
+//DAGB7_RD_OUTPUT_DAGB_MAX_BURST
+#define DAGB7_RD_OUTPUT_DAGB_MAX_BURST__VC0__SHIFT 0x0
+#define DAGB7_RD_OUTPUT_DAGB_MAX_BURST__VC1__SHIFT 0x4
+#define DAGB7_RD_OUTPUT_DAGB_MAX_BURST__VC2__SHIFT 0x8
+#define DAGB7_RD_OUTPUT_DAGB_MAX_BURST__VC3__SHIFT 0xc
+#define DAGB7_RD_OUTPUT_DAGB_MAX_BURST__VC4__SHIFT 0x10
+#define DAGB7_RD_OUTPUT_DAGB_MAX_BURST__VC5__SHIFT 0x14
+#define DAGB7_RD_OUTPUT_DAGB_MAX_BURST__VC6__SHIFT 0x18
+#define DAGB7_RD_OUTPUT_DAGB_MAX_BURST__VC7__SHIFT 0x1c
+#define DAGB7_RD_OUTPUT_DAGB_MAX_BURST__VC0_MASK 0x0000000FL
+#define DAGB7_RD_OUTPUT_DAGB_MAX_BURST__VC1_MASK 0x000000F0L
+#define DAGB7_RD_OUTPUT_DAGB_MAX_BURST__VC2_MASK 0x00000F00L
+#define DAGB7_RD_OUTPUT_DAGB_MAX_BURST__VC3_MASK 0x0000F000L
+#define DAGB7_RD_OUTPUT_DAGB_MAX_BURST__VC4_MASK 0x000F0000L
+#define DAGB7_RD_OUTPUT_DAGB_MAX_BURST__VC5_MASK 0x00F00000L
+#define DAGB7_RD_OUTPUT_DAGB_MAX_BURST__VC6_MASK 0x0F000000L
+#define DAGB7_RD_OUTPUT_DAGB_MAX_BURST__VC7_MASK 0xF0000000L
+//DAGB7_RD_OUTPUT_DAGB_LAZY_TIMER
+#define DAGB7_RD_OUTPUT_DAGB_LAZY_TIMER__VC0__SHIFT 0x0
+#define DAGB7_RD_OUTPUT_DAGB_LAZY_TIMER__VC1__SHIFT 0x4
+#define DAGB7_RD_OUTPUT_DAGB_LAZY_TIMER__VC2__SHIFT 0x8
+#define DAGB7_RD_OUTPUT_DAGB_LAZY_TIMER__VC3__SHIFT 0xc
+#define DAGB7_RD_OUTPUT_DAGB_LAZY_TIMER__VC4__SHIFT 0x10
+#define DAGB7_RD_OUTPUT_DAGB_LAZY_TIMER__VC5__SHIFT 0x14
+#define DAGB7_RD_OUTPUT_DAGB_LAZY_TIMER__VC6__SHIFT 0x18
+#define DAGB7_RD_OUTPUT_DAGB_LAZY_TIMER__VC7__SHIFT 0x1c
+#define DAGB7_RD_OUTPUT_DAGB_LAZY_TIMER__VC0_MASK 0x0000000FL
+#define DAGB7_RD_OUTPUT_DAGB_LAZY_TIMER__VC1_MASK 0x000000F0L
+#define DAGB7_RD_OUTPUT_DAGB_LAZY_TIMER__VC2_MASK 0x00000F00L
+#define DAGB7_RD_OUTPUT_DAGB_LAZY_TIMER__VC3_MASK 0x0000F000L
+#define DAGB7_RD_OUTPUT_DAGB_LAZY_TIMER__VC4_MASK 0x000F0000L
+#define DAGB7_RD_OUTPUT_DAGB_LAZY_TIMER__VC5_MASK 0x00F00000L
+#define DAGB7_RD_OUTPUT_DAGB_LAZY_TIMER__VC6_MASK 0x0F000000L
+#define DAGB7_RD_OUTPUT_DAGB_LAZY_TIMER__VC7_MASK 0xF0000000L
+//DAGB7_RD_CGTT_CLK_CTRL
+#define DAGB7_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB7_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB7_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
+#define DAGB7_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define DAGB7_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
+#define DAGB7_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
+#define DAGB7_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
+#define DAGB7_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
+#define DAGB7_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB7_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB7_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
+#define DAGB7_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define DAGB7_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
+#define DAGB7_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
+#define DAGB7_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
+#define DAGB7_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
+//DAGB7_L1TLB_RD_CGTT_CLK_CTRL
+#define DAGB7_L1TLB_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB7_L1TLB_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB7_L1TLB_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
+#define DAGB7_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define DAGB7_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
+#define DAGB7_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
+#define DAGB7_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
+#define DAGB7_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
+#define DAGB7_L1TLB_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB7_L1TLB_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB7_L1TLB_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
+#define DAGB7_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define DAGB7_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
+#define DAGB7_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
+#define DAGB7_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
+#define DAGB7_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
+//DAGB7_ATCVM_RD_CGTT_CLK_CTRL
+#define DAGB7_ATCVM_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB7_ATCVM_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB7_ATCVM_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
+#define DAGB7_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define DAGB7_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
+#define DAGB7_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
+#define DAGB7_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
+#define DAGB7_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
+#define DAGB7_ATCVM_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB7_ATCVM_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB7_ATCVM_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
+#define DAGB7_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define DAGB7_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
+#define DAGB7_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
+#define DAGB7_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
+#define DAGB7_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
+//DAGB7_RD_ADDR_DAGB_MAX_BURST0
+#define DAGB7_RD_ADDR_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0
+#define DAGB7_RD_ADDR_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4
+#define DAGB7_RD_ADDR_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8
+#define DAGB7_RD_ADDR_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc
+#define DAGB7_RD_ADDR_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10
+#define DAGB7_RD_ADDR_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14
+#define DAGB7_RD_ADDR_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18
+#define DAGB7_RD_ADDR_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c
+#define DAGB7_RD_ADDR_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL
+#define DAGB7_RD_ADDR_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L
+#define DAGB7_RD_ADDR_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L
+#define DAGB7_RD_ADDR_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L
+#define DAGB7_RD_ADDR_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L
+#define DAGB7_RD_ADDR_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L
+#define DAGB7_RD_ADDR_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L
+#define DAGB7_RD_ADDR_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L
+//DAGB7_RD_ADDR_DAGB_LAZY_TIMER0
+#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0
+#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4
+#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8
+#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc
+#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10
+#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14
+#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18
+#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c
+#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL
+#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L
+#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L
+#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L
+#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L
+#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L
+#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L
+#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L
+//DAGB7_RD_ADDR_DAGB_MAX_BURST1
+#define DAGB7_RD_ADDR_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0
+#define DAGB7_RD_ADDR_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4
+#define DAGB7_RD_ADDR_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8
+#define DAGB7_RD_ADDR_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc
+#define DAGB7_RD_ADDR_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10
+#define DAGB7_RD_ADDR_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14
+#define DAGB7_RD_ADDR_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18
+#define DAGB7_RD_ADDR_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c
+#define DAGB7_RD_ADDR_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL
+#define DAGB7_RD_ADDR_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L
+#define DAGB7_RD_ADDR_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L
+#define DAGB7_RD_ADDR_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L
+#define DAGB7_RD_ADDR_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L
+#define DAGB7_RD_ADDR_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L
+#define DAGB7_RD_ADDR_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L
+#define DAGB7_RD_ADDR_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L
+//DAGB7_RD_ADDR_DAGB_LAZY_TIMER1
+#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0
+#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4
+#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8
+#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc
+#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10
+#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14
+#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18
+#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c
+#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL
+#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L
+#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L
+#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L
+#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L
+#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L
+#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L
+#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L
+//DAGB7_RD_VC0_CNTL
+#define DAGB7_RD_VC0_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB7_RD_VC0_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB7_RD_VC0_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB7_RD_VC0_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB7_RD_VC0_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB7_RD_VC0_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB7_RD_VC0_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB7_RD_VC0_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB7_RD_VC0_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB7_RD_VC0_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB7_RD_VC0_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB7_RD_VC0_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB7_RD_VC0_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB7_RD_VC0_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB7_RD_VC0_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB7_RD_VC0_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB7_RD_VC1_CNTL
+#define DAGB7_RD_VC1_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB7_RD_VC1_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB7_RD_VC1_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB7_RD_VC1_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB7_RD_VC1_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB7_RD_VC1_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB7_RD_VC1_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB7_RD_VC1_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB7_RD_VC1_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB7_RD_VC1_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB7_RD_VC1_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB7_RD_VC1_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB7_RD_VC1_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB7_RD_VC1_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB7_RD_VC1_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB7_RD_VC1_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB7_RD_VC2_CNTL
+#define DAGB7_RD_VC2_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB7_RD_VC2_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB7_RD_VC2_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB7_RD_VC2_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB7_RD_VC2_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB7_RD_VC2_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB7_RD_VC2_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB7_RD_VC2_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB7_RD_VC2_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB7_RD_VC2_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB7_RD_VC2_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB7_RD_VC2_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB7_RD_VC2_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB7_RD_VC2_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB7_RD_VC2_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB7_RD_VC2_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB7_RD_VC3_CNTL
+#define DAGB7_RD_VC3_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB7_RD_VC3_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB7_RD_VC3_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB7_RD_VC3_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB7_RD_VC3_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB7_RD_VC3_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB7_RD_VC3_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB7_RD_VC3_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB7_RD_VC3_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB7_RD_VC3_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB7_RD_VC3_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB7_RD_VC3_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB7_RD_VC3_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB7_RD_VC3_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB7_RD_VC3_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB7_RD_VC3_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB7_RD_VC4_CNTL
+#define DAGB7_RD_VC4_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB7_RD_VC4_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB7_RD_VC4_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB7_RD_VC4_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB7_RD_VC4_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB7_RD_VC4_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB7_RD_VC4_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB7_RD_VC4_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB7_RD_VC4_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB7_RD_VC4_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB7_RD_VC4_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB7_RD_VC4_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB7_RD_VC4_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB7_RD_VC4_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB7_RD_VC4_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB7_RD_VC4_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB7_RD_VC5_CNTL
+#define DAGB7_RD_VC5_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB7_RD_VC5_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB7_RD_VC5_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB7_RD_VC5_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB7_RD_VC5_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB7_RD_VC5_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB7_RD_VC5_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB7_RD_VC5_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB7_RD_VC5_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB7_RD_VC5_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB7_RD_VC5_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB7_RD_VC5_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB7_RD_VC5_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB7_RD_VC5_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB7_RD_VC5_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB7_RD_VC5_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB7_RD_VC6_CNTL
+#define DAGB7_RD_VC6_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB7_RD_VC6_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB7_RD_VC6_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB7_RD_VC6_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB7_RD_VC6_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB7_RD_VC6_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB7_RD_VC6_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB7_RD_VC6_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB7_RD_VC6_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB7_RD_VC6_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB7_RD_VC6_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB7_RD_VC6_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB7_RD_VC6_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB7_RD_VC6_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB7_RD_VC6_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB7_RD_VC6_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB7_RD_VC7_CNTL
+#define DAGB7_RD_VC7_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB7_RD_VC7_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB7_RD_VC7_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB7_RD_VC7_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB7_RD_VC7_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB7_RD_VC7_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB7_RD_VC7_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB7_RD_VC7_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB7_RD_VC7_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB7_RD_VC7_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB7_RD_VC7_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB7_RD_VC7_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB7_RD_VC7_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB7_RD_VC7_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB7_RD_VC7_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB7_RD_VC7_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB7_RD_CNTL_MISC
+#define DAGB7_RD_CNTL_MISC__STOR_POOL_CREDIT__SHIFT 0x0
+#define DAGB7_RD_CNTL_MISC__EA_POOL_CREDIT__SHIFT 0x6
+#define DAGB7_RD_CNTL_MISC__IO_EA_CREDIT__SHIFT 0xd
+#define DAGB7_RD_CNTL_MISC__STOR_CC_LEGACY_MODE__SHIFT 0x13
+#define DAGB7_RD_CNTL_MISC__EA_CC_LEGACY_MODE__SHIFT 0x14
+#define DAGB7_RD_CNTL_MISC__UTCL2_CID__SHIFT 0x15
+#define DAGB7_RD_CNTL_MISC__RDRET_FIFO_CREDITS__SHIFT 0x1a
+#define DAGB7_RD_CNTL_MISC__STOR_POOL_CREDIT_MASK 0x0000003FL
+#define DAGB7_RD_CNTL_MISC__EA_POOL_CREDIT_MASK 0x00001FC0L
+#define DAGB7_RD_CNTL_MISC__IO_EA_CREDIT_MASK 0x0007E000L
+#define DAGB7_RD_CNTL_MISC__STOR_CC_LEGACY_MODE_MASK 0x00080000L
+#define DAGB7_RD_CNTL_MISC__EA_CC_LEGACY_MODE_MASK 0x00100000L
+#define DAGB7_RD_CNTL_MISC__UTCL2_CID_MASK 0x03E00000L
+#define DAGB7_RD_CNTL_MISC__RDRET_FIFO_CREDITS_MASK 0xFC000000L
+//DAGB7_RD_TLB_CREDIT
+#define DAGB7_RD_TLB_CREDIT__TLB0__SHIFT 0x0
+#define DAGB7_RD_TLB_CREDIT__TLB1__SHIFT 0x5
+#define DAGB7_RD_TLB_CREDIT__TLB2__SHIFT 0xa
+#define DAGB7_RD_TLB_CREDIT__TLB3__SHIFT 0xf
+#define DAGB7_RD_TLB_CREDIT__TLB4__SHIFT 0x14
+#define DAGB7_RD_TLB_CREDIT__TLB5__SHIFT 0x19
+#define DAGB7_RD_TLB_CREDIT__TLB0_MASK 0x0000001FL
+#define DAGB7_RD_TLB_CREDIT__TLB1_MASK 0x000003E0L
+#define DAGB7_RD_TLB_CREDIT__TLB2_MASK 0x00007C00L
+#define DAGB7_RD_TLB_CREDIT__TLB3_MASK 0x000F8000L
+#define DAGB7_RD_TLB_CREDIT__TLB4_MASK 0x01F00000L
+#define DAGB7_RD_TLB_CREDIT__TLB5_MASK 0x3E000000L
+//DAGB7_RDCLI_ASK_PENDING
+#define DAGB7_RDCLI_ASK_PENDING__BUSY__SHIFT 0x0
+#define DAGB7_RDCLI_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB7_RDCLI_GO_PENDING
+#define DAGB7_RDCLI_GO_PENDING__BUSY__SHIFT 0x0
+#define DAGB7_RDCLI_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB7_RDCLI_GBLSEND_PENDING
+#define DAGB7_RDCLI_GBLSEND_PENDING__BUSY__SHIFT 0x0
+#define DAGB7_RDCLI_GBLSEND_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB7_RDCLI_TLB_PENDING
+#define DAGB7_RDCLI_TLB_PENDING__BUSY__SHIFT 0x0
+#define DAGB7_RDCLI_TLB_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB7_RDCLI_OARB_PENDING
+#define DAGB7_RDCLI_OARB_PENDING__BUSY__SHIFT 0x0
+#define DAGB7_RDCLI_OARB_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB7_RDCLI_OSD_PENDING
+#define DAGB7_RDCLI_OSD_PENDING__BUSY__SHIFT 0x0
+#define DAGB7_RDCLI_OSD_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB7_WRCLI0
+#define DAGB7_WRCLI0__VIRT_CHAN__SHIFT 0x0
+#define DAGB7_WRCLI0__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB7_WRCLI0__URG_HIGH__SHIFT 0x4
+#define DAGB7_WRCLI0__URG_LOW__SHIFT 0x8
+#define DAGB7_WRCLI0__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB7_WRCLI0__MAX_BW__SHIFT 0xd
+#define DAGB7_WRCLI0__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB7_WRCLI0__MIN_BW__SHIFT 0x16
+#define DAGB7_WRCLI0__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB7_WRCLI0__MAX_OSD__SHIFT 0x1a
+#define DAGB7_WRCLI0__VIRT_CHAN_MASK 0x00000007L
+#define DAGB7_WRCLI0__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB7_WRCLI0__URG_HIGH_MASK 0x000000F0L
+#define DAGB7_WRCLI0__URG_LOW_MASK 0x00000F00L
+#define DAGB7_WRCLI0__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB7_WRCLI0__MAX_BW_MASK 0x001FE000L
+#define DAGB7_WRCLI0__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB7_WRCLI0__MIN_BW_MASK 0x01C00000L
+#define DAGB7_WRCLI0__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB7_WRCLI0__MAX_OSD_MASK 0xFC000000L
+//DAGB7_WRCLI1
+#define DAGB7_WRCLI1__VIRT_CHAN__SHIFT 0x0
+#define DAGB7_WRCLI1__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB7_WRCLI1__URG_HIGH__SHIFT 0x4
+#define DAGB7_WRCLI1__URG_LOW__SHIFT 0x8
+#define DAGB7_WRCLI1__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB7_WRCLI1__MAX_BW__SHIFT 0xd
+#define DAGB7_WRCLI1__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB7_WRCLI1__MIN_BW__SHIFT 0x16
+#define DAGB7_WRCLI1__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB7_WRCLI1__MAX_OSD__SHIFT 0x1a
+#define DAGB7_WRCLI1__VIRT_CHAN_MASK 0x00000007L
+#define DAGB7_WRCLI1__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB7_WRCLI1__URG_HIGH_MASK 0x000000F0L
+#define DAGB7_WRCLI1__URG_LOW_MASK 0x00000F00L
+#define DAGB7_WRCLI1__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB7_WRCLI1__MAX_BW_MASK 0x001FE000L
+#define DAGB7_WRCLI1__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB7_WRCLI1__MIN_BW_MASK 0x01C00000L
+#define DAGB7_WRCLI1__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB7_WRCLI1__MAX_OSD_MASK 0xFC000000L
+//DAGB7_WRCLI2
+#define DAGB7_WRCLI2__VIRT_CHAN__SHIFT 0x0
+#define DAGB7_WRCLI2__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB7_WRCLI2__URG_HIGH__SHIFT 0x4
+#define DAGB7_WRCLI2__URG_LOW__SHIFT 0x8
+#define DAGB7_WRCLI2__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB7_WRCLI2__MAX_BW__SHIFT 0xd
+#define DAGB7_WRCLI2__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB7_WRCLI2__MIN_BW__SHIFT 0x16
+#define DAGB7_WRCLI2__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB7_WRCLI2__MAX_OSD__SHIFT 0x1a
+#define DAGB7_WRCLI2__VIRT_CHAN_MASK 0x00000007L
+#define DAGB7_WRCLI2__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB7_WRCLI2__URG_HIGH_MASK 0x000000F0L
+#define DAGB7_WRCLI2__URG_LOW_MASK 0x00000F00L
+#define DAGB7_WRCLI2__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB7_WRCLI2__MAX_BW_MASK 0x001FE000L
+#define DAGB7_WRCLI2__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB7_WRCLI2__MIN_BW_MASK 0x01C00000L
+#define DAGB7_WRCLI2__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB7_WRCLI2__MAX_OSD_MASK 0xFC000000L
+//DAGB7_WRCLI3
+#define DAGB7_WRCLI3__VIRT_CHAN__SHIFT 0x0
+#define DAGB7_WRCLI3__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB7_WRCLI3__URG_HIGH__SHIFT 0x4
+#define DAGB7_WRCLI3__URG_LOW__SHIFT 0x8
+#define DAGB7_WRCLI3__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB7_WRCLI3__MAX_BW__SHIFT 0xd
+#define DAGB7_WRCLI3__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB7_WRCLI3__MIN_BW__SHIFT 0x16
+#define DAGB7_WRCLI3__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB7_WRCLI3__MAX_OSD__SHIFT 0x1a
+#define DAGB7_WRCLI3__VIRT_CHAN_MASK 0x00000007L
+#define DAGB7_WRCLI3__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB7_WRCLI3__URG_HIGH_MASK 0x000000F0L
+#define DAGB7_WRCLI3__URG_LOW_MASK 0x00000F00L
+#define DAGB7_WRCLI3__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB7_WRCLI3__MAX_BW_MASK 0x001FE000L
+#define DAGB7_WRCLI3__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB7_WRCLI3__MIN_BW_MASK 0x01C00000L
+#define DAGB7_WRCLI3__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB7_WRCLI3__MAX_OSD_MASK 0xFC000000L
+//DAGB7_WRCLI4
+#define DAGB7_WRCLI4__VIRT_CHAN__SHIFT 0x0
+#define DAGB7_WRCLI4__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB7_WRCLI4__URG_HIGH__SHIFT 0x4
+#define DAGB7_WRCLI4__URG_LOW__SHIFT 0x8
+#define DAGB7_WRCLI4__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB7_WRCLI4__MAX_BW__SHIFT 0xd
+#define DAGB7_WRCLI4__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB7_WRCLI4__MIN_BW__SHIFT 0x16
+#define DAGB7_WRCLI4__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB7_WRCLI4__MAX_OSD__SHIFT 0x1a
+#define DAGB7_WRCLI4__VIRT_CHAN_MASK 0x00000007L
+#define DAGB7_WRCLI4__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB7_WRCLI4__URG_HIGH_MASK 0x000000F0L
+#define DAGB7_WRCLI4__URG_LOW_MASK 0x00000F00L
+#define DAGB7_WRCLI4__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB7_WRCLI4__MAX_BW_MASK 0x001FE000L
+#define DAGB7_WRCLI4__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB7_WRCLI4__MIN_BW_MASK 0x01C00000L
+#define DAGB7_WRCLI4__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB7_WRCLI4__MAX_OSD_MASK 0xFC000000L
+//DAGB7_WRCLI5
+#define DAGB7_WRCLI5__VIRT_CHAN__SHIFT 0x0
+#define DAGB7_WRCLI5__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB7_WRCLI5__URG_HIGH__SHIFT 0x4
+#define DAGB7_WRCLI5__URG_LOW__SHIFT 0x8
+#define DAGB7_WRCLI5__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB7_WRCLI5__MAX_BW__SHIFT 0xd
+#define DAGB7_WRCLI5__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB7_WRCLI5__MIN_BW__SHIFT 0x16
+#define DAGB7_WRCLI5__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB7_WRCLI5__MAX_OSD__SHIFT 0x1a
+#define DAGB7_WRCLI5__VIRT_CHAN_MASK 0x00000007L
+#define DAGB7_WRCLI5__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB7_WRCLI5__URG_HIGH_MASK 0x000000F0L
+#define DAGB7_WRCLI5__URG_LOW_MASK 0x00000F00L
+#define DAGB7_WRCLI5__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB7_WRCLI5__MAX_BW_MASK 0x001FE000L
+#define DAGB7_WRCLI5__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB7_WRCLI5__MIN_BW_MASK 0x01C00000L
+#define DAGB7_WRCLI5__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB7_WRCLI5__MAX_OSD_MASK 0xFC000000L
+//DAGB7_WRCLI6
+#define DAGB7_WRCLI6__VIRT_CHAN__SHIFT 0x0
+#define DAGB7_WRCLI6__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB7_WRCLI6__URG_HIGH__SHIFT 0x4
+#define DAGB7_WRCLI6__URG_LOW__SHIFT 0x8
+#define DAGB7_WRCLI6__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB7_WRCLI6__MAX_BW__SHIFT 0xd
+#define DAGB7_WRCLI6__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB7_WRCLI6__MIN_BW__SHIFT 0x16
+#define DAGB7_WRCLI6__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB7_WRCLI6__MAX_OSD__SHIFT 0x1a
+#define DAGB7_WRCLI6__VIRT_CHAN_MASK 0x00000007L
+#define DAGB7_WRCLI6__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB7_WRCLI6__URG_HIGH_MASK 0x000000F0L
+#define DAGB7_WRCLI6__URG_LOW_MASK 0x00000F00L
+#define DAGB7_WRCLI6__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB7_WRCLI6__MAX_BW_MASK 0x001FE000L
+#define DAGB7_WRCLI6__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB7_WRCLI6__MIN_BW_MASK 0x01C00000L
+#define DAGB7_WRCLI6__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB7_WRCLI6__MAX_OSD_MASK 0xFC000000L
+//DAGB7_WRCLI7
+#define DAGB7_WRCLI7__VIRT_CHAN__SHIFT 0x0
+#define DAGB7_WRCLI7__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB7_WRCLI7__URG_HIGH__SHIFT 0x4
+#define DAGB7_WRCLI7__URG_LOW__SHIFT 0x8
+#define DAGB7_WRCLI7__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB7_WRCLI7__MAX_BW__SHIFT 0xd
+#define DAGB7_WRCLI7__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB7_WRCLI7__MIN_BW__SHIFT 0x16
+#define DAGB7_WRCLI7__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB7_WRCLI7__MAX_OSD__SHIFT 0x1a
+#define DAGB7_WRCLI7__VIRT_CHAN_MASK 0x00000007L
+#define DAGB7_WRCLI7__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB7_WRCLI7__URG_HIGH_MASK 0x000000F0L
+#define DAGB7_WRCLI7__URG_LOW_MASK 0x00000F00L
+#define DAGB7_WRCLI7__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB7_WRCLI7__MAX_BW_MASK 0x001FE000L
+#define DAGB7_WRCLI7__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB7_WRCLI7__MIN_BW_MASK 0x01C00000L
+#define DAGB7_WRCLI7__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB7_WRCLI7__MAX_OSD_MASK 0xFC000000L
+//DAGB7_WRCLI8
+#define DAGB7_WRCLI8__VIRT_CHAN__SHIFT 0x0
+#define DAGB7_WRCLI8__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB7_WRCLI8__URG_HIGH__SHIFT 0x4
+#define DAGB7_WRCLI8__URG_LOW__SHIFT 0x8
+#define DAGB7_WRCLI8__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB7_WRCLI8__MAX_BW__SHIFT 0xd
+#define DAGB7_WRCLI8__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB7_WRCLI8__MIN_BW__SHIFT 0x16
+#define DAGB7_WRCLI8__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB7_WRCLI8__MAX_OSD__SHIFT 0x1a
+#define DAGB7_WRCLI8__VIRT_CHAN_MASK 0x00000007L
+#define DAGB7_WRCLI8__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB7_WRCLI8__URG_HIGH_MASK 0x000000F0L
+#define DAGB7_WRCLI8__URG_LOW_MASK 0x00000F00L
+#define DAGB7_WRCLI8__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB7_WRCLI8__MAX_BW_MASK 0x001FE000L
+#define DAGB7_WRCLI8__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB7_WRCLI8__MIN_BW_MASK 0x01C00000L
+#define DAGB7_WRCLI8__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB7_WRCLI8__MAX_OSD_MASK 0xFC000000L
+//DAGB7_WRCLI9
+#define DAGB7_WRCLI9__VIRT_CHAN__SHIFT 0x0
+#define DAGB7_WRCLI9__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB7_WRCLI9__URG_HIGH__SHIFT 0x4
+#define DAGB7_WRCLI9__URG_LOW__SHIFT 0x8
+#define DAGB7_WRCLI9__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB7_WRCLI9__MAX_BW__SHIFT 0xd
+#define DAGB7_WRCLI9__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB7_WRCLI9__MIN_BW__SHIFT 0x16
+#define DAGB7_WRCLI9__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB7_WRCLI9__MAX_OSD__SHIFT 0x1a
+#define DAGB7_WRCLI9__VIRT_CHAN_MASK 0x00000007L
+#define DAGB7_WRCLI9__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB7_WRCLI9__URG_HIGH_MASK 0x000000F0L
+#define DAGB7_WRCLI9__URG_LOW_MASK 0x00000F00L
+#define DAGB7_WRCLI9__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB7_WRCLI9__MAX_BW_MASK 0x001FE000L
+#define DAGB7_WRCLI9__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB7_WRCLI9__MIN_BW_MASK 0x01C00000L
+#define DAGB7_WRCLI9__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB7_WRCLI9__MAX_OSD_MASK 0xFC000000L
+//DAGB7_WRCLI10
+#define DAGB7_WRCLI10__VIRT_CHAN__SHIFT 0x0
+#define DAGB7_WRCLI10__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB7_WRCLI10__URG_HIGH__SHIFT 0x4
+#define DAGB7_WRCLI10__URG_LOW__SHIFT 0x8
+#define DAGB7_WRCLI10__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB7_WRCLI10__MAX_BW__SHIFT 0xd
+#define DAGB7_WRCLI10__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB7_WRCLI10__MIN_BW__SHIFT 0x16
+#define DAGB7_WRCLI10__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB7_WRCLI10__MAX_OSD__SHIFT 0x1a
+#define DAGB7_WRCLI10__VIRT_CHAN_MASK 0x00000007L
+#define DAGB7_WRCLI10__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB7_WRCLI10__URG_HIGH_MASK 0x000000F0L
+#define DAGB7_WRCLI10__URG_LOW_MASK 0x00000F00L
+#define DAGB7_WRCLI10__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB7_WRCLI10__MAX_BW_MASK 0x001FE000L
+#define DAGB7_WRCLI10__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB7_WRCLI10__MIN_BW_MASK 0x01C00000L
+#define DAGB7_WRCLI10__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB7_WRCLI10__MAX_OSD_MASK 0xFC000000L
+//DAGB7_WRCLI11
+#define DAGB7_WRCLI11__VIRT_CHAN__SHIFT 0x0
+#define DAGB7_WRCLI11__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB7_WRCLI11__URG_HIGH__SHIFT 0x4
+#define DAGB7_WRCLI11__URG_LOW__SHIFT 0x8
+#define DAGB7_WRCLI11__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB7_WRCLI11__MAX_BW__SHIFT 0xd
+#define DAGB7_WRCLI11__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB7_WRCLI11__MIN_BW__SHIFT 0x16
+#define DAGB7_WRCLI11__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB7_WRCLI11__MAX_OSD__SHIFT 0x1a
+#define DAGB7_WRCLI11__VIRT_CHAN_MASK 0x00000007L
+#define DAGB7_WRCLI11__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB7_WRCLI11__URG_HIGH_MASK 0x000000F0L
+#define DAGB7_WRCLI11__URG_LOW_MASK 0x00000F00L
+#define DAGB7_WRCLI11__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB7_WRCLI11__MAX_BW_MASK 0x001FE000L
+#define DAGB7_WRCLI11__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB7_WRCLI11__MIN_BW_MASK 0x01C00000L
+#define DAGB7_WRCLI11__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB7_WRCLI11__MAX_OSD_MASK 0xFC000000L
+//DAGB7_WRCLI12
+#define DAGB7_WRCLI12__VIRT_CHAN__SHIFT 0x0
+#define DAGB7_WRCLI12__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB7_WRCLI12__URG_HIGH__SHIFT 0x4
+#define DAGB7_WRCLI12__URG_LOW__SHIFT 0x8
+#define DAGB7_WRCLI12__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB7_WRCLI12__MAX_BW__SHIFT 0xd
+#define DAGB7_WRCLI12__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB7_WRCLI12__MIN_BW__SHIFT 0x16
+#define DAGB7_WRCLI12__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB7_WRCLI12__MAX_OSD__SHIFT 0x1a
+#define DAGB7_WRCLI12__VIRT_CHAN_MASK 0x00000007L
+#define DAGB7_WRCLI12__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB7_WRCLI12__URG_HIGH_MASK 0x000000F0L
+#define DAGB7_WRCLI12__URG_LOW_MASK 0x00000F00L
+#define DAGB7_WRCLI12__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB7_WRCLI12__MAX_BW_MASK 0x001FE000L
+#define DAGB7_WRCLI12__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB7_WRCLI12__MIN_BW_MASK 0x01C00000L
+#define DAGB7_WRCLI12__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB7_WRCLI12__MAX_OSD_MASK 0xFC000000L
+//DAGB7_WRCLI13
+#define DAGB7_WRCLI13__VIRT_CHAN__SHIFT 0x0
+#define DAGB7_WRCLI13__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB7_WRCLI13__URG_HIGH__SHIFT 0x4
+#define DAGB7_WRCLI13__URG_LOW__SHIFT 0x8
+#define DAGB7_WRCLI13__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB7_WRCLI13__MAX_BW__SHIFT 0xd
+#define DAGB7_WRCLI13__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB7_WRCLI13__MIN_BW__SHIFT 0x16
+#define DAGB7_WRCLI13__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB7_WRCLI13__MAX_OSD__SHIFT 0x1a
+#define DAGB7_WRCLI13__VIRT_CHAN_MASK 0x00000007L
+#define DAGB7_WRCLI13__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB7_WRCLI13__URG_HIGH_MASK 0x000000F0L
+#define DAGB7_WRCLI13__URG_LOW_MASK 0x00000F00L
+#define DAGB7_WRCLI13__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB7_WRCLI13__MAX_BW_MASK 0x001FE000L
+#define DAGB7_WRCLI13__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB7_WRCLI13__MIN_BW_MASK 0x01C00000L
+#define DAGB7_WRCLI13__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB7_WRCLI13__MAX_OSD_MASK 0xFC000000L
+//DAGB7_WRCLI14
+#define DAGB7_WRCLI14__VIRT_CHAN__SHIFT 0x0
+#define DAGB7_WRCLI14__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB7_WRCLI14__URG_HIGH__SHIFT 0x4
+#define DAGB7_WRCLI14__URG_LOW__SHIFT 0x8
+#define DAGB7_WRCLI14__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB7_WRCLI14__MAX_BW__SHIFT 0xd
+#define DAGB7_WRCLI14__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB7_WRCLI14__MIN_BW__SHIFT 0x16
+#define DAGB7_WRCLI14__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB7_WRCLI14__MAX_OSD__SHIFT 0x1a
+#define DAGB7_WRCLI14__VIRT_CHAN_MASK 0x00000007L
+#define DAGB7_WRCLI14__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB7_WRCLI14__URG_HIGH_MASK 0x000000F0L
+#define DAGB7_WRCLI14__URG_LOW_MASK 0x00000F00L
+#define DAGB7_WRCLI14__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB7_WRCLI14__MAX_BW_MASK 0x001FE000L
+#define DAGB7_WRCLI14__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB7_WRCLI14__MIN_BW_MASK 0x01C00000L
+#define DAGB7_WRCLI14__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB7_WRCLI14__MAX_OSD_MASK 0xFC000000L
+//DAGB7_WRCLI15
+#define DAGB7_WRCLI15__VIRT_CHAN__SHIFT 0x0
+#define DAGB7_WRCLI15__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB7_WRCLI15__URG_HIGH__SHIFT 0x4
+#define DAGB7_WRCLI15__URG_LOW__SHIFT 0x8
+#define DAGB7_WRCLI15__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB7_WRCLI15__MAX_BW__SHIFT 0xd
+#define DAGB7_WRCLI15__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB7_WRCLI15__MIN_BW__SHIFT 0x16
+#define DAGB7_WRCLI15__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB7_WRCLI15__MAX_OSD__SHIFT 0x1a
+#define DAGB7_WRCLI15__VIRT_CHAN_MASK 0x00000007L
+#define DAGB7_WRCLI15__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB7_WRCLI15__URG_HIGH_MASK 0x000000F0L
+#define DAGB7_WRCLI15__URG_LOW_MASK 0x00000F00L
+#define DAGB7_WRCLI15__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB7_WRCLI15__MAX_BW_MASK 0x001FE000L
+#define DAGB7_WRCLI15__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB7_WRCLI15__MIN_BW_MASK 0x01C00000L
+#define DAGB7_WRCLI15__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB7_WRCLI15__MAX_OSD_MASK 0xFC000000L
+//DAGB7_WR_CNTL
+#define DAGB7_WR_CNTL__SCLK_FREQ__SHIFT 0x0
+#define DAGB7_WR_CNTL__CLI_MAX_BW_WINDOW__SHIFT 0x4
+#define DAGB7_WR_CNTL__VC_MAX_BW_WINDOW__SHIFT 0xa
+#define DAGB7_WR_CNTL__IO_LEVEL_OVERRIDE_ENABLE__SHIFT 0x10
+#define DAGB7_WR_CNTL__IO_LEVEL__SHIFT 0x11
+#define DAGB7_WR_CNTL__IO_LEVEL_COMPLY_VC__SHIFT 0x14
+#define DAGB7_WR_CNTL__SHARE_VC_NUM__SHIFT 0x17
+#define DAGB7_WR_CNTL__SCLK_FREQ_MASK 0x0000000FL
+#define DAGB7_WR_CNTL__CLI_MAX_BW_WINDOW_MASK 0x000003F0L
+#define DAGB7_WR_CNTL__VC_MAX_BW_WINDOW_MASK 0x0000FC00L
+#define DAGB7_WR_CNTL__IO_LEVEL_OVERRIDE_ENABLE_MASK 0x00010000L
+#define DAGB7_WR_CNTL__IO_LEVEL_MASK 0x000E0000L
+#define DAGB7_WR_CNTL__IO_LEVEL_COMPLY_VC_MASK 0x00700000L
+#define DAGB7_WR_CNTL__SHARE_VC_NUM_MASK 0x03800000L
+//DAGB7_WR_GMI_CNTL
+#define DAGB7_WR_GMI_CNTL__EA_CREDIT__SHIFT 0x0
+#define DAGB7_WR_GMI_CNTL__LEVEL__SHIFT 0x6
+#define DAGB7_WR_GMI_CNTL__MAX_BURST__SHIFT 0x9
+#define DAGB7_WR_GMI_CNTL__LAZY_TIMER__SHIFT 0xd
+#define DAGB7_WR_GMI_CNTL__EA_CREDIT_MASK 0x0000003FL
+#define DAGB7_WR_GMI_CNTL__LEVEL_MASK 0x000001C0L
+#define DAGB7_WR_GMI_CNTL__MAX_BURST_MASK 0x00001E00L
+#define DAGB7_WR_GMI_CNTL__LAZY_TIMER_MASK 0x0001E000L
+//DAGB7_WR_ADDR_DAGB
+#define DAGB7_WR_ADDR_DAGB__DAGB_ENABLE__SHIFT 0x0
+#define DAGB7_WR_ADDR_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3
+#define DAGB7_WR_ADDR_DAGB__DISABLE_SELF_INIT__SHIFT 0x6
+#define DAGB7_WR_ADDR_DAGB__WHOAMI__SHIFT 0x7
+#define DAGB7_WR_ADDR_DAGB__DAGB_ENABLE_MASK 0x00000007L
+#define DAGB7_WR_ADDR_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L
+#define DAGB7_WR_ADDR_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L
+#define DAGB7_WR_ADDR_DAGB__WHOAMI_MASK 0x00001F80L
+//DAGB7_WR_OUTPUT_DAGB_MAX_BURST
+#define DAGB7_WR_OUTPUT_DAGB_MAX_BURST__VC0__SHIFT 0x0
+#define DAGB7_WR_OUTPUT_DAGB_MAX_BURST__VC1__SHIFT 0x4
+#define DAGB7_WR_OUTPUT_DAGB_MAX_BURST__VC2__SHIFT 0x8
+#define DAGB7_WR_OUTPUT_DAGB_MAX_BURST__VC3__SHIFT 0xc
+#define DAGB7_WR_OUTPUT_DAGB_MAX_BURST__VC4__SHIFT 0x10
+#define DAGB7_WR_OUTPUT_DAGB_MAX_BURST__VC5__SHIFT 0x14
+#define DAGB7_WR_OUTPUT_DAGB_MAX_BURST__VC6__SHIFT 0x18
+#define DAGB7_WR_OUTPUT_DAGB_MAX_BURST__VC7__SHIFT 0x1c
+#define DAGB7_WR_OUTPUT_DAGB_MAX_BURST__VC0_MASK 0x0000000FL
+#define DAGB7_WR_OUTPUT_DAGB_MAX_BURST__VC1_MASK 0x000000F0L
+#define DAGB7_WR_OUTPUT_DAGB_MAX_BURST__VC2_MASK 0x00000F00L
+#define DAGB7_WR_OUTPUT_DAGB_MAX_BURST__VC3_MASK 0x0000F000L
+#define DAGB7_WR_OUTPUT_DAGB_MAX_BURST__VC4_MASK 0x000F0000L
+#define DAGB7_WR_OUTPUT_DAGB_MAX_BURST__VC5_MASK 0x00F00000L
+#define DAGB7_WR_OUTPUT_DAGB_MAX_BURST__VC6_MASK 0x0F000000L
+#define DAGB7_WR_OUTPUT_DAGB_MAX_BURST__VC7_MASK 0xF0000000L
+//DAGB7_WR_OUTPUT_DAGB_LAZY_TIMER
+#define DAGB7_WR_OUTPUT_DAGB_LAZY_TIMER__VC0__SHIFT 0x0
+#define DAGB7_WR_OUTPUT_DAGB_LAZY_TIMER__VC1__SHIFT 0x4
+#define DAGB7_WR_OUTPUT_DAGB_LAZY_TIMER__VC2__SHIFT 0x8
+#define DAGB7_WR_OUTPUT_DAGB_LAZY_TIMER__VC3__SHIFT 0xc
+#define DAGB7_WR_OUTPUT_DAGB_LAZY_TIMER__VC4__SHIFT 0x10
+#define DAGB7_WR_OUTPUT_DAGB_LAZY_TIMER__VC5__SHIFT 0x14
+#define DAGB7_WR_OUTPUT_DAGB_LAZY_TIMER__VC6__SHIFT 0x18
+#define DAGB7_WR_OUTPUT_DAGB_LAZY_TIMER__VC7__SHIFT 0x1c
+#define DAGB7_WR_OUTPUT_DAGB_LAZY_TIMER__VC0_MASK 0x0000000FL
+#define DAGB7_WR_OUTPUT_DAGB_LAZY_TIMER__VC1_MASK 0x000000F0L
+#define DAGB7_WR_OUTPUT_DAGB_LAZY_TIMER__VC2_MASK 0x00000F00L
+#define DAGB7_WR_OUTPUT_DAGB_LAZY_TIMER__VC3_MASK 0x0000F000L
+#define DAGB7_WR_OUTPUT_DAGB_LAZY_TIMER__VC4_MASK 0x000F0000L
+#define DAGB7_WR_OUTPUT_DAGB_LAZY_TIMER__VC5_MASK 0x00F00000L
+#define DAGB7_WR_OUTPUT_DAGB_LAZY_TIMER__VC6_MASK 0x0F000000L
+#define DAGB7_WR_OUTPUT_DAGB_LAZY_TIMER__VC7_MASK 0xF0000000L
+//DAGB7_WR_CGTT_CLK_CTRL
+#define DAGB7_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB7_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB7_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
+#define DAGB7_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define DAGB7_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
+#define DAGB7_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
+#define DAGB7_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
+#define DAGB7_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
+#define DAGB7_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB7_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB7_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
+#define DAGB7_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define DAGB7_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
+#define DAGB7_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
+#define DAGB7_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
+#define DAGB7_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
+//DAGB7_L1TLB_WR_CGTT_CLK_CTRL
+#define DAGB7_L1TLB_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB7_L1TLB_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB7_L1TLB_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
+#define DAGB7_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define DAGB7_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
+#define DAGB7_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
+#define DAGB7_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
+#define DAGB7_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
+#define DAGB7_L1TLB_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB7_L1TLB_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB7_L1TLB_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
+#define DAGB7_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define DAGB7_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
+#define DAGB7_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
+#define DAGB7_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
+#define DAGB7_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
+//DAGB7_ATCVM_WR_CGTT_CLK_CTRL
+#define DAGB7_ATCVM_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB7_ATCVM_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB7_ATCVM_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
+#define DAGB7_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define DAGB7_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
+#define DAGB7_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
+#define DAGB7_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
+#define DAGB7_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
+#define DAGB7_ATCVM_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB7_ATCVM_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB7_ATCVM_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
+#define DAGB7_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define DAGB7_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
+#define DAGB7_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
+#define DAGB7_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
+#define DAGB7_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
+//DAGB7_WR_ADDR_DAGB_MAX_BURST0
+#define DAGB7_WR_ADDR_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0
+#define DAGB7_WR_ADDR_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4
+#define DAGB7_WR_ADDR_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8
+#define DAGB7_WR_ADDR_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc
+#define DAGB7_WR_ADDR_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10
+#define DAGB7_WR_ADDR_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14
+#define DAGB7_WR_ADDR_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18
+#define DAGB7_WR_ADDR_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c
+#define DAGB7_WR_ADDR_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL
+#define DAGB7_WR_ADDR_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L
+#define DAGB7_WR_ADDR_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L
+#define DAGB7_WR_ADDR_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L
+#define DAGB7_WR_ADDR_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L
+#define DAGB7_WR_ADDR_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L
+#define DAGB7_WR_ADDR_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L
+#define DAGB7_WR_ADDR_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L
+//DAGB7_WR_ADDR_DAGB_LAZY_TIMER0
+#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0
+#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4
+#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8
+#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc
+#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10
+#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14
+#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18
+#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c
+#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL
+#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L
+#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L
+#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L
+#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L
+#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L
+#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L
+#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L
+//DAGB7_WR_ADDR_DAGB_MAX_BURST1
+#define DAGB7_WR_ADDR_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0
+#define DAGB7_WR_ADDR_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4
+#define DAGB7_WR_ADDR_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8
+#define DAGB7_WR_ADDR_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc
+#define DAGB7_WR_ADDR_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10
+#define DAGB7_WR_ADDR_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14
+#define DAGB7_WR_ADDR_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18
+#define DAGB7_WR_ADDR_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c
+#define DAGB7_WR_ADDR_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL
+#define DAGB7_WR_ADDR_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L
+#define DAGB7_WR_ADDR_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L
+#define DAGB7_WR_ADDR_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L
+#define DAGB7_WR_ADDR_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L
+#define DAGB7_WR_ADDR_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L
+#define DAGB7_WR_ADDR_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L
+#define DAGB7_WR_ADDR_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L
+//DAGB7_WR_ADDR_DAGB_LAZY_TIMER1
+#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0
+#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4
+#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8
+#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc
+#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10
+#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14
+#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18
+#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c
+#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL
+#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L
+#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L
+#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L
+#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L
+#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L
+#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L
+#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L
+//DAGB7_WR_DATA_DAGB
+#define DAGB7_WR_DATA_DAGB__DAGB_ENABLE__SHIFT 0x0
+#define DAGB7_WR_DATA_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3
+#define DAGB7_WR_DATA_DAGB__DISABLE_SELF_INIT__SHIFT 0x6
+#define DAGB7_WR_DATA_DAGB__WHOAMI__SHIFT 0x7
+#define DAGB7_WR_DATA_DAGB__DAGB_ENABLE_MASK 0x00000007L
+#define DAGB7_WR_DATA_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L
+#define DAGB7_WR_DATA_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L
+#define DAGB7_WR_DATA_DAGB__WHOAMI_MASK 0x00001F80L
+//DAGB7_WR_DATA_DAGB_MAX_BURST0
+#define DAGB7_WR_DATA_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0
+#define DAGB7_WR_DATA_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4
+#define DAGB7_WR_DATA_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8
+#define DAGB7_WR_DATA_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc
+#define DAGB7_WR_DATA_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10
+#define DAGB7_WR_DATA_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14
+#define DAGB7_WR_DATA_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18
+#define DAGB7_WR_DATA_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c
+#define DAGB7_WR_DATA_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL
+#define DAGB7_WR_DATA_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L
+#define DAGB7_WR_DATA_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L
+#define DAGB7_WR_DATA_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L
+#define DAGB7_WR_DATA_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L
+#define DAGB7_WR_DATA_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L
+#define DAGB7_WR_DATA_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L
+#define DAGB7_WR_DATA_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L
+//DAGB7_WR_DATA_DAGB_LAZY_TIMER0
+#define DAGB7_WR_DATA_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0
+#define DAGB7_WR_DATA_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4
+#define DAGB7_WR_DATA_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8
+#define DAGB7_WR_DATA_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc
+#define DAGB7_WR_DATA_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10
+#define DAGB7_WR_DATA_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14
+#define DAGB7_WR_DATA_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18
+#define DAGB7_WR_DATA_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c
+#define DAGB7_WR_DATA_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL
+#define DAGB7_WR_DATA_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L
+#define DAGB7_WR_DATA_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L
+#define DAGB7_WR_DATA_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L
+#define DAGB7_WR_DATA_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L
+#define DAGB7_WR_DATA_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L
+#define DAGB7_WR_DATA_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L
+#define DAGB7_WR_DATA_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L
+//DAGB7_WR_DATA_DAGB_MAX_BURST1
+#define DAGB7_WR_DATA_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0
+#define DAGB7_WR_DATA_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4
+#define DAGB7_WR_DATA_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8
+#define DAGB7_WR_DATA_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc
+#define DAGB7_WR_DATA_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10
+#define DAGB7_WR_DATA_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14
+#define DAGB7_WR_DATA_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18
+#define DAGB7_WR_DATA_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c
+#define DAGB7_WR_DATA_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL
+#define DAGB7_WR_DATA_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L
+#define DAGB7_WR_DATA_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L
+#define DAGB7_WR_DATA_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L
+#define DAGB7_WR_DATA_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L
+#define DAGB7_WR_DATA_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L
+#define DAGB7_WR_DATA_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L
+#define DAGB7_WR_DATA_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L
+//DAGB7_WR_DATA_DAGB_LAZY_TIMER1
+#define DAGB7_WR_DATA_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0
+#define DAGB7_WR_DATA_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4
+#define DAGB7_WR_DATA_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8
+#define DAGB7_WR_DATA_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc
+#define DAGB7_WR_DATA_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10
+#define DAGB7_WR_DATA_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14
+#define DAGB7_WR_DATA_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18
+#define DAGB7_WR_DATA_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c
+#define DAGB7_WR_DATA_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL
+#define DAGB7_WR_DATA_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L
+#define DAGB7_WR_DATA_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L
+#define DAGB7_WR_DATA_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L
+#define DAGB7_WR_DATA_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L
+#define DAGB7_WR_DATA_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L
+#define DAGB7_WR_DATA_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L
+#define DAGB7_WR_DATA_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L
+//DAGB7_WR_VC0_CNTL
+#define DAGB7_WR_VC0_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB7_WR_VC0_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB7_WR_VC0_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB7_WR_VC0_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB7_WR_VC0_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB7_WR_VC0_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB7_WR_VC0_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB7_WR_VC0_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB7_WR_VC0_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB7_WR_VC0_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB7_WR_VC0_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB7_WR_VC0_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB7_WR_VC0_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB7_WR_VC0_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB7_WR_VC0_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB7_WR_VC0_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB7_WR_VC1_CNTL
+#define DAGB7_WR_VC1_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB7_WR_VC1_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB7_WR_VC1_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB7_WR_VC1_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB7_WR_VC1_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB7_WR_VC1_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB7_WR_VC1_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB7_WR_VC1_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB7_WR_VC1_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB7_WR_VC1_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB7_WR_VC1_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB7_WR_VC1_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB7_WR_VC1_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB7_WR_VC1_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB7_WR_VC1_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB7_WR_VC1_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB7_WR_VC2_CNTL
+#define DAGB7_WR_VC2_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB7_WR_VC2_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB7_WR_VC2_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB7_WR_VC2_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB7_WR_VC2_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB7_WR_VC2_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB7_WR_VC2_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB7_WR_VC2_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB7_WR_VC2_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB7_WR_VC2_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB7_WR_VC2_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB7_WR_VC2_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB7_WR_VC2_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB7_WR_VC2_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB7_WR_VC2_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB7_WR_VC2_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB7_WR_VC3_CNTL
+#define DAGB7_WR_VC3_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB7_WR_VC3_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB7_WR_VC3_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB7_WR_VC3_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB7_WR_VC3_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB7_WR_VC3_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB7_WR_VC3_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB7_WR_VC3_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB7_WR_VC3_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB7_WR_VC3_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB7_WR_VC3_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB7_WR_VC3_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB7_WR_VC3_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB7_WR_VC3_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB7_WR_VC3_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB7_WR_VC3_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB7_WR_VC4_CNTL
+#define DAGB7_WR_VC4_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB7_WR_VC4_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB7_WR_VC4_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB7_WR_VC4_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB7_WR_VC4_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB7_WR_VC4_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB7_WR_VC4_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB7_WR_VC4_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB7_WR_VC4_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB7_WR_VC4_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB7_WR_VC4_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB7_WR_VC4_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB7_WR_VC4_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB7_WR_VC4_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB7_WR_VC4_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB7_WR_VC4_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB7_WR_VC5_CNTL
+#define DAGB7_WR_VC5_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB7_WR_VC5_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB7_WR_VC5_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB7_WR_VC5_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB7_WR_VC5_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB7_WR_VC5_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB7_WR_VC5_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB7_WR_VC5_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB7_WR_VC5_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB7_WR_VC5_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB7_WR_VC5_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB7_WR_VC5_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB7_WR_VC5_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB7_WR_VC5_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB7_WR_VC5_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB7_WR_VC5_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB7_WR_VC6_CNTL
+#define DAGB7_WR_VC6_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB7_WR_VC6_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB7_WR_VC6_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB7_WR_VC6_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB7_WR_VC6_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB7_WR_VC6_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB7_WR_VC6_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB7_WR_VC6_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB7_WR_VC6_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB7_WR_VC6_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB7_WR_VC6_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB7_WR_VC6_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB7_WR_VC6_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB7_WR_VC6_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB7_WR_VC6_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB7_WR_VC6_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB7_WR_VC7_CNTL
+#define DAGB7_WR_VC7_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB7_WR_VC7_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB7_WR_VC7_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB7_WR_VC7_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB7_WR_VC7_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB7_WR_VC7_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB7_WR_VC7_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB7_WR_VC7_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB7_WR_VC7_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB7_WR_VC7_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB7_WR_VC7_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB7_WR_VC7_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB7_WR_VC7_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB7_WR_VC7_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB7_WR_VC7_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB7_WR_VC7_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB7_WR_CNTL_MISC
+#define DAGB7_WR_CNTL_MISC__STOR_POOL_CREDIT__SHIFT 0x0
+#define DAGB7_WR_CNTL_MISC__EA_POOL_CREDIT__SHIFT 0x6
+#define DAGB7_WR_CNTL_MISC__IO_EA_CREDIT__SHIFT 0xd
+#define DAGB7_WR_CNTL_MISC__STOR_CC_LEGACY_MODE__SHIFT 0x13
+#define DAGB7_WR_CNTL_MISC__EA_CC_LEGACY_MODE__SHIFT 0x14
+#define DAGB7_WR_CNTL_MISC__UTCL2_CID__SHIFT 0x15
+#define DAGB7_WR_CNTL_MISC__RDRET_FIFO_CREDITS__SHIFT 0x1a
+#define DAGB7_WR_CNTL_MISC__STOR_POOL_CREDIT_MASK 0x0000003FL
+#define DAGB7_WR_CNTL_MISC__EA_POOL_CREDIT_MASK 0x00001FC0L
+#define DAGB7_WR_CNTL_MISC__IO_EA_CREDIT_MASK 0x0007E000L
+#define DAGB7_WR_CNTL_MISC__STOR_CC_LEGACY_MODE_MASK 0x00080000L
+#define DAGB7_WR_CNTL_MISC__EA_CC_LEGACY_MODE_MASK 0x00100000L
+#define DAGB7_WR_CNTL_MISC__UTCL2_CID_MASK 0x03E00000L
+#define DAGB7_WR_CNTL_MISC__RDRET_FIFO_CREDITS_MASK 0xFC000000L
+//DAGB7_WR_TLB_CREDIT
+#define DAGB7_WR_TLB_CREDIT__TLB0__SHIFT 0x0
+#define DAGB7_WR_TLB_CREDIT__TLB1__SHIFT 0x5
+#define DAGB7_WR_TLB_CREDIT__TLB2__SHIFT 0xa
+#define DAGB7_WR_TLB_CREDIT__TLB3__SHIFT 0xf
+#define DAGB7_WR_TLB_CREDIT__TLB4__SHIFT 0x14
+#define DAGB7_WR_TLB_CREDIT__TLB5__SHIFT 0x19
+#define DAGB7_WR_TLB_CREDIT__TLB0_MASK 0x0000001FL
+#define DAGB7_WR_TLB_CREDIT__TLB1_MASK 0x000003E0L
+#define DAGB7_WR_TLB_CREDIT__TLB2_MASK 0x00007C00L
+#define DAGB7_WR_TLB_CREDIT__TLB3_MASK 0x000F8000L
+#define DAGB7_WR_TLB_CREDIT__TLB4_MASK 0x01F00000L
+#define DAGB7_WR_TLB_CREDIT__TLB5_MASK 0x3E000000L
+//DAGB7_WR_DATA_CREDIT
+#define DAGB7_WR_DATA_CREDIT__DLOCK_VC_CREDITS__SHIFT 0x0
+#define DAGB7_WR_DATA_CREDIT__LARGE_BURST_CREDITS__SHIFT 0x8
+#define DAGB7_WR_DATA_CREDIT__MIDDLE_BURST_CREDITS__SHIFT 0x10
+#define DAGB7_WR_DATA_CREDIT__SMALL_BURST_CREDITS__SHIFT 0x18
+#define DAGB7_WR_DATA_CREDIT__DLOCK_VC_CREDITS_MASK 0x000000FFL
+#define DAGB7_WR_DATA_CREDIT__LARGE_BURST_CREDITS_MASK 0x0000FF00L
+#define DAGB7_WR_DATA_CREDIT__MIDDLE_BURST_CREDITS_MASK 0x00FF0000L
+#define DAGB7_WR_DATA_CREDIT__SMALL_BURST_CREDITS_MASK 0xFF000000L
+//DAGB7_WR_MISC_CREDIT
+#define DAGB7_WR_MISC_CREDIT__ATOMIC_CREDIT__SHIFT 0x0
+#define DAGB7_WR_MISC_CREDIT__DLOCK_VC_NUM__SHIFT 0x6
+#define DAGB7_WR_MISC_CREDIT__OSD_CREDIT__SHIFT 0x9
+#define DAGB7_WR_MISC_CREDIT__OSD_DLOCK_CREDIT__SHIFT 0x10
+#define DAGB7_WR_MISC_CREDIT__ATOMIC_CREDIT_MASK 0x0000003FL
+#define DAGB7_WR_MISC_CREDIT__DLOCK_VC_NUM_MASK 0x000001C0L
+#define DAGB7_WR_MISC_CREDIT__OSD_CREDIT_MASK 0x0000FE00L
+#define DAGB7_WR_MISC_CREDIT__OSD_DLOCK_CREDIT_MASK 0x007F0000L
+//DAGB7_WRCLI_ASK_PENDING
+#define DAGB7_WRCLI_ASK_PENDING__BUSY__SHIFT 0x0
+#define DAGB7_WRCLI_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB7_WRCLI_GO_PENDING
+#define DAGB7_WRCLI_GO_PENDING__BUSY__SHIFT 0x0
+#define DAGB7_WRCLI_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB7_WRCLI_GBLSEND_PENDING
+#define DAGB7_WRCLI_GBLSEND_PENDING__BUSY__SHIFT 0x0
+#define DAGB7_WRCLI_GBLSEND_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB7_WRCLI_TLB_PENDING
+#define DAGB7_WRCLI_TLB_PENDING__BUSY__SHIFT 0x0
+#define DAGB7_WRCLI_TLB_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB7_WRCLI_OARB_PENDING
+#define DAGB7_WRCLI_OARB_PENDING__BUSY__SHIFT 0x0
+#define DAGB7_WRCLI_OARB_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB7_WRCLI_OSD_PENDING
+#define DAGB7_WRCLI_OSD_PENDING__BUSY__SHIFT 0x0
+#define DAGB7_WRCLI_OSD_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB7_WRCLI_DBUS_ASK_PENDING
+#define DAGB7_WRCLI_DBUS_ASK_PENDING__BUSY__SHIFT 0x0
+#define DAGB7_WRCLI_DBUS_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB7_WRCLI_DBUS_GO_PENDING
+#define DAGB7_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0
+#define DAGB7_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB7_DAGB_DLY
+#define DAGB7_DAGB_DLY__DLY__SHIFT 0x0
+#define DAGB7_DAGB_DLY__CLI__SHIFT 0x8
+#define DAGB7_DAGB_DLY__POS__SHIFT 0x10
+#define DAGB7_DAGB_DLY__DLY_MASK 0x000000FFL
+#define DAGB7_DAGB_DLY__CLI_MASK 0x0000FF00L
+#define DAGB7_DAGB_DLY__POS_MASK 0x000F0000L
+//DAGB7_CNTL_MISC
+#define DAGB7_CNTL_MISC__EA_VC0_REMAP__SHIFT 0x0
+#define DAGB7_CNTL_MISC__EA_VC1_REMAP__SHIFT 0x3
+#define DAGB7_CNTL_MISC__EA_VC2_REMAP__SHIFT 0x6
+#define DAGB7_CNTL_MISC__EA_VC3_REMAP__SHIFT 0x9
+#define DAGB7_CNTL_MISC__EA_VC4_REMAP__SHIFT 0xc
+#define DAGB7_CNTL_MISC__EA_VC5_REMAP__SHIFT 0xf
+#define DAGB7_CNTL_MISC__EA_VC6_REMAP__SHIFT 0x12
+#define DAGB7_CNTL_MISC__EA_VC7_REMAP__SHIFT 0x15
+#define DAGB7_CNTL_MISC__BW_INIT_CYCLE__SHIFT 0x18
+#define DAGB7_CNTL_MISC__BW_RW_GAP_CYCLE__SHIFT 0x1e
+#define DAGB7_CNTL_MISC__EA_VC0_REMAP_MASK 0x00000007L
+#define DAGB7_CNTL_MISC__EA_VC1_REMAP_MASK 0x00000038L
+#define DAGB7_CNTL_MISC__EA_VC2_REMAP_MASK 0x000001C0L
+#define DAGB7_CNTL_MISC__EA_VC3_REMAP_MASK 0x00000E00L
+#define DAGB7_CNTL_MISC__EA_VC4_REMAP_MASK 0x00007000L
+#define DAGB7_CNTL_MISC__EA_VC5_REMAP_MASK 0x00038000L
+#define DAGB7_CNTL_MISC__EA_VC6_REMAP_MASK 0x001C0000L
+#define DAGB7_CNTL_MISC__EA_VC7_REMAP_MASK 0x00E00000L
+#define DAGB7_CNTL_MISC__BW_INIT_CYCLE_MASK 0x3F000000L
+#define DAGB7_CNTL_MISC__BW_RW_GAP_CYCLE_MASK 0xC0000000L
+//DAGB7_CNTL_MISC2
+#define DAGB7_CNTL_MISC2__URG_BOOST_ENABLE__SHIFT 0x0
+#define DAGB7_CNTL_MISC2__URG_HALT_ENABLE__SHIFT 0x1
+#define DAGB7_CNTL_MISC2__DISABLE_WRREQ_CG__SHIFT 0x2
+#define DAGB7_CNTL_MISC2__DISABLE_WRRET_CG__SHIFT 0x3
+#define DAGB7_CNTL_MISC2__DISABLE_RDREQ_CG__SHIFT 0x4
+#define DAGB7_CNTL_MISC2__DISABLE_RDRET_CG__SHIFT 0x5
+#define DAGB7_CNTL_MISC2__DISABLE_TLBWR_CG__SHIFT 0x6
+#define DAGB7_CNTL_MISC2__DISABLE_TLBRD_CG__SHIFT 0x7
+#define DAGB7_CNTL_MISC2__DISABLE_EAWRREQ_BUSY__SHIFT 0x8
+#define DAGB7_CNTL_MISC2__DISABLE_EARDREQ_BUSY__SHIFT 0x9
+#define DAGB7_CNTL_MISC2__SWAP_CTL__SHIFT 0xa
+#define DAGB7_CNTL_MISC2__RDRET_FIFO_PERF__SHIFT 0xb
+#define DAGB7_CNTL_MISC2__RDRET_FIFO_DLOCK_CREDITS__SHIFT 0x11
+#define DAGB7_CNTL_MISC2__URG_BOOST_ENABLE_MASK 0x00000001L
+#define DAGB7_CNTL_MISC2__URG_HALT_ENABLE_MASK 0x00000002L
+#define DAGB7_CNTL_MISC2__DISABLE_WRREQ_CG_MASK 0x00000004L
+#define DAGB7_CNTL_MISC2__DISABLE_WRRET_CG_MASK 0x00000008L
+#define DAGB7_CNTL_MISC2__DISABLE_RDREQ_CG_MASK 0x00000010L
+#define DAGB7_CNTL_MISC2__DISABLE_RDRET_CG_MASK 0x00000020L
+#define DAGB7_CNTL_MISC2__DISABLE_TLBWR_CG_MASK 0x00000040L
+#define DAGB7_CNTL_MISC2__DISABLE_TLBRD_CG_MASK 0x00000080L
+#define DAGB7_CNTL_MISC2__DISABLE_EAWRREQ_BUSY_MASK 0x00000100L
+#define DAGB7_CNTL_MISC2__DISABLE_EARDREQ_BUSY_MASK 0x00000200L
+#define DAGB7_CNTL_MISC2__SWAP_CTL_MASK 0x00000400L
+#define DAGB7_CNTL_MISC2__RDRET_FIFO_PERF_MASK 0x00000800L
+#define DAGB7_CNTL_MISC2__RDRET_FIFO_DLOCK_CREDITS_MASK 0x007E0000L
+//DAGB7_FIFO_EMPTY
+#define DAGB7_FIFO_EMPTY__EMPTY__SHIFT 0x0
+#define DAGB7_FIFO_EMPTY__EMPTY_MASK 0x00FFFFFFL
+//DAGB7_FIFO_FULL
+#define DAGB7_FIFO_FULL__FULL__SHIFT 0x0
+#define DAGB7_FIFO_FULL__FULL_MASK 0x007FFFFFL
+//DAGB7_WR_CREDITS_FULL
+#define DAGB7_WR_CREDITS_FULL__FULL__SHIFT 0x0
+#define DAGB7_WR_CREDITS_FULL__FULL_MASK 0x1FFFFFFFL
+//DAGB7_RD_CREDITS_FULL
+#define DAGB7_RD_CREDITS_FULL__FULL__SHIFT 0x0
+#define DAGB7_RD_CREDITS_FULL__FULL_MASK 0x0003FFFFL
+//DAGB7_PERFCOUNTER_LO
+#define DAGB7_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define DAGB7_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//DAGB7_PERFCOUNTER_HI
+#define DAGB7_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define DAGB7_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define DAGB7_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define DAGB7_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+//DAGB7_PERFCOUNTER0_CFG
+#define DAGB7_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define DAGB7_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define DAGB7_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define DAGB7_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define DAGB7_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define DAGB7_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define DAGB7_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define DAGB7_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define DAGB7_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define DAGB7_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//DAGB7_PERFCOUNTER1_CFG
+#define DAGB7_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define DAGB7_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define DAGB7_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define DAGB7_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define DAGB7_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define DAGB7_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define DAGB7_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define DAGB7_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define DAGB7_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define DAGB7_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//DAGB7_PERFCOUNTER2_CFG
+#define DAGB7_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0
+#define DAGB7_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8
+#define DAGB7_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18
+#define DAGB7_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c
+#define DAGB7_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d
+#define DAGB7_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL
+#define DAGB7_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define DAGB7_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L
+#define DAGB7_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L
+#define DAGB7_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L
+//DAGB7_PERFCOUNTER_RSLT_CNTL
+#define DAGB7_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define DAGB7_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define DAGB7_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define DAGB7_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define DAGB7_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define DAGB7_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define DAGB7_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define DAGB7_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define DAGB7_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define DAGB7_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define DAGB7_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define DAGB7_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+//DAGB7_RESERVE0
+#define DAGB7_RESERVE0__RESERVE__SHIFT 0x0
+#define DAGB7_RESERVE0__RESERVE_MASK 0xFFFFFFFFL
+//DAGB7_RESERVE1
+#define DAGB7_RESERVE1__RESERVE__SHIFT 0x0
+#define DAGB7_RESERVE1__RESERVE_MASK 0xFFFFFFFFL
+//DAGB7_RESERVE2
+#define DAGB7_RESERVE2__RESERVE__SHIFT 0x0
+#define DAGB7_RESERVE2__RESERVE_MASK 0xFFFFFFFFL
+//DAGB7_RESERVE3
+#define DAGB7_RESERVE3__RESERVE__SHIFT 0x0
+#define DAGB7_RESERVE3__RESERVE_MASK 0xFFFFFFFFL
+//DAGB7_RESERVE4
+#define DAGB7_RESERVE4__RESERVE__SHIFT 0x0
+#define DAGB7_RESERVE4__RESERVE_MASK 0xFFFFFFFFL
+//DAGB7_RESERVE5
+#define DAGB7_RESERVE5__RESERVE__SHIFT 0x0
+#define DAGB7_RESERVE5__RESERVE_MASK 0xFFFFFFFFL
+//DAGB7_RESERVE6
+#define DAGB7_RESERVE6__RESERVE__SHIFT 0x0
+#define DAGB7_RESERVE6__RESERVE_MASK 0xFFFFFFFFL
+//DAGB7_RESERVE7
+#define DAGB7_RESERVE7__RESERVE__SHIFT 0x0
+#define DAGB7_RESERVE7__RESERVE_MASK 0xFFFFFFFFL
+//DAGB7_RESERVE8
+#define DAGB7_RESERVE8__RESERVE__SHIFT 0x0
+#define DAGB7_RESERVE8__RESERVE_MASK 0xFFFFFFFFL
+//DAGB7_RESERVE9
+#define DAGB7_RESERVE9__RESERVE__SHIFT 0x0
+#define DAGB7_RESERVE9__RESERVE_MASK 0xFFFFFFFFL
+//DAGB7_RESERVE10
+#define DAGB7_RESERVE10__RESERVE__SHIFT 0x0
+#define DAGB7_RESERVE10__RESERVE_MASK 0xFFFFFFFFL
+//DAGB7_RESERVE11
+#define DAGB7_RESERVE11__RESERVE__SHIFT 0x0
+#define DAGB7_RESERVE11__RESERVE_MASK 0xFFFFFFFFL
+//DAGB7_RESERVE12
+#define DAGB7_RESERVE12__RESERVE__SHIFT 0x0
+#define DAGB7_RESERVE12__RESERVE_MASK 0xFFFFFFFFL
+//DAGB7_RESERVE13
+#define DAGB7_RESERVE13__RESERVE__SHIFT 0x0
+#define DAGB7_RESERVE13__RESERVE_MASK 0xFFFFFFFFL
+
+
+// addressBlock: mmhub_ea_mmeadec5
+//MMEA5_DRAM_RD_CLI2GRP_MAP0
+#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA5_DRAM_RD_CLI2GRP_MAP1
+#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA5_DRAM_WR_CLI2GRP_MAP0
+#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA5_DRAM_WR_CLI2GRP_MAP1
+#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA5_DRAM_RD_GRP2VC_MAP
+#define MMEA5_DRAM_RD_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define MMEA5_DRAM_RD_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define MMEA5_DRAM_RD_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define MMEA5_DRAM_RD_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define MMEA5_DRAM_RD_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define MMEA5_DRAM_RD_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define MMEA5_DRAM_RD_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define MMEA5_DRAM_RD_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//MMEA5_DRAM_WR_GRP2VC_MAP
+#define MMEA5_DRAM_WR_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define MMEA5_DRAM_WR_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define MMEA5_DRAM_WR_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define MMEA5_DRAM_WR_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define MMEA5_DRAM_WR_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define MMEA5_DRAM_WR_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define MMEA5_DRAM_WR_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define MMEA5_DRAM_WR_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//MMEA5_DRAM_RD_LAZY
+#define MMEA5_DRAM_RD_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define MMEA5_DRAM_RD_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define MMEA5_DRAM_RD_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define MMEA5_DRAM_RD_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define MMEA5_DRAM_RD_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define MMEA5_DRAM_RD_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define MMEA5_DRAM_RD_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b
+#define MMEA5_DRAM_RD_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define MMEA5_DRAM_RD_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define MMEA5_DRAM_RD_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define MMEA5_DRAM_RD_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define MMEA5_DRAM_RD_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define MMEA5_DRAM_RD_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+#define MMEA5_DRAM_RD_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L
+//MMEA5_DRAM_WR_LAZY
+#define MMEA5_DRAM_WR_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define MMEA5_DRAM_WR_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define MMEA5_DRAM_WR_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define MMEA5_DRAM_WR_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define MMEA5_DRAM_WR_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define MMEA5_DRAM_WR_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define MMEA5_DRAM_WR_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b
+#define MMEA5_DRAM_WR_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define MMEA5_DRAM_WR_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define MMEA5_DRAM_WR_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define MMEA5_DRAM_WR_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define MMEA5_DRAM_WR_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define MMEA5_DRAM_WR_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+#define MMEA5_DRAM_WR_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L
+//MMEA5_DRAM_RD_CAM_CNTL
+#define MMEA5_DRAM_RD_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define MMEA5_DRAM_RD_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define MMEA5_DRAM_RD_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define MMEA5_DRAM_RD_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define MMEA5_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define MMEA5_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define MMEA5_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define MMEA5_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define MMEA5_DRAM_RD_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c
+#define MMEA5_DRAM_RD_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define MMEA5_DRAM_RD_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define MMEA5_DRAM_RD_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define MMEA5_DRAM_RD_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define MMEA5_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define MMEA5_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define MMEA5_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define MMEA5_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+#define MMEA5_DRAM_RD_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L
+//MMEA5_DRAM_WR_CAM_CNTL
+#define MMEA5_DRAM_WR_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define MMEA5_DRAM_WR_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define MMEA5_DRAM_WR_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define MMEA5_DRAM_WR_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define MMEA5_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define MMEA5_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define MMEA5_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define MMEA5_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define MMEA5_DRAM_WR_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c
+#define MMEA5_DRAM_WR_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define MMEA5_DRAM_WR_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define MMEA5_DRAM_WR_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define MMEA5_DRAM_WR_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define MMEA5_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define MMEA5_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define MMEA5_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define MMEA5_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+#define MMEA5_DRAM_WR_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L
+//MMEA5_DRAM_PAGE_BURST
+#define MMEA5_DRAM_PAGE_BURST__RD_LIMIT_LO__SHIFT 0x0
+#define MMEA5_DRAM_PAGE_BURST__RD_LIMIT_HI__SHIFT 0x8
+#define MMEA5_DRAM_PAGE_BURST__WR_LIMIT_LO__SHIFT 0x10
+#define MMEA5_DRAM_PAGE_BURST__WR_LIMIT_HI__SHIFT 0x18
+#define MMEA5_DRAM_PAGE_BURST__RD_LIMIT_LO_MASK 0x000000FFL
+#define MMEA5_DRAM_PAGE_BURST__RD_LIMIT_HI_MASK 0x0000FF00L
+#define MMEA5_DRAM_PAGE_BURST__WR_LIMIT_LO_MASK 0x00FF0000L
+#define MMEA5_DRAM_PAGE_BURST__WR_LIMIT_HI_MASK 0xFF000000L
+//MMEA5_DRAM_RD_PRI_AGE
+#define MMEA5_DRAM_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA5_DRAM_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA5_DRAM_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA5_DRAM_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA5_DRAM_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA5_DRAM_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA5_DRAM_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA5_DRAM_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA5_DRAM_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA5_DRAM_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA5_DRAM_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA5_DRAM_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA5_DRAM_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA5_DRAM_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA5_DRAM_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA5_DRAM_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA5_DRAM_WR_PRI_AGE
+#define MMEA5_DRAM_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA5_DRAM_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA5_DRAM_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA5_DRAM_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA5_DRAM_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA5_DRAM_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA5_DRAM_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA5_DRAM_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA5_DRAM_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA5_DRAM_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA5_DRAM_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA5_DRAM_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA5_DRAM_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA5_DRAM_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA5_DRAM_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA5_DRAM_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA5_DRAM_RD_PRI_QUEUING
+#define MMEA5_DRAM_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA5_DRAM_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA5_DRAM_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA5_DRAM_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA5_DRAM_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA5_DRAM_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA5_DRAM_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA5_DRAM_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA5_DRAM_WR_PRI_QUEUING
+#define MMEA5_DRAM_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA5_DRAM_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA5_DRAM_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA5_DRAM_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA5_DRAM_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA5_DRAM_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA5_DRAM_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA5_DRAM_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA5_DRAM_RD_PRI_FIXED
+#define MMEA5_DRAM_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA5_DRAM_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA5_DRAM_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA5_DRAM_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA5_DRAM_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA5_DRAM_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA5_DRAM_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA5_DRAM_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA5_DRAM_WR_PRI_FIXED
+#define MMEA5_DRAM_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA5_DRAM_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA5_DRAM_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA5_DRAM_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA5_DRAM_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA5_DRAM_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA5_DRAM_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA5_DRAM_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA5_DRAM_RD_PRI_URGENCY
+#define MMEA5_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA5_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA5_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA5_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA5_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA5_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA5_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA5_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA5_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA5_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA5_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA5_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA5_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA5_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA5_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA5_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA5_DRAM_WR_PRI_URGENCY
+#define MMEA5_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA5_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA5_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA5_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA5_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA5_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA5_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA5_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA5_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA5_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA5_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA5_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA5_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA5_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA5_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA5_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA5_DRAM_RD_PRI_QUANT_PRI1
+#define MMEA5_DRAM_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA5_DRAM_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA5_DRAM_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA5_DRAM_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA5_DRAM_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA5_DRAM_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA5_DRAM_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA5_DRAM_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA5_DRAM_RD_PRI_QUANT_PRI2
+#define MMEA5_DRAM_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA5_DRAM_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA5_DRAM_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA5_DRAM_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA5_DRAM_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA5_DRAM_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA5_DRAM_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA5_DRAM_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA5_DRAM_RD_PRI_QUANT_PRI3
+#define MMEA5_DRAM_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA5_DRAM_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA5_DRAM_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA5_DRAM_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA5_DRAM_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA5_DRAM_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA5_DRAM_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA5_DRAM_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA5_DRAM_WR_PRI_QUANT_PRI1
+#define MMEA5_DRAM_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA5_DRAM_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA5_DRAM_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA5_DRAM_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA5_DRAM_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA5_DRAM_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA5_DRAM_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA5_DRAM_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA5_DRAM_WR_PRI_QUANT_PRI2
+#define MMEA5_DRAM_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA5_DRAM_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA5_DRAM_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA5_DRAM_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA5_DRAM_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA5_DRAM_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA5_DRAM_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA5_DRAM_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA5_DRAM_WR_PRI_QUANT_PRI3
+#define MMEA5_DRAM_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA5_DRAM_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA5_DRAM_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA5_DRAM_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA5_DRAM_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA5_DRAM_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA5_DRAM_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA5_DRAM_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA5_GMI_RD_CLI2GRP_MAP0
+#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA5_GMI_RD_CLI2GRP_MAP1
+#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA5_GMI_WR_CLI2GRP_MAP0
+#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA5_GMI_WR_CLI2GRP_MAP1
+#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA5_GMI_RD_GRP2VC_MAP
+#define MMEA5_GMI_RD_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define MMEA5_GMI_RD_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define MMEA5_GMI_RD_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define MMEA5_GMI_RD_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define MMEA5_GMI_RD_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define MMEA5_GMI_RD_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define MMEA5_GMI_RD_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define MMEA5_GMI_RD_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//MMEA5_GMI_WR_GRP2VC_MAP
+#define MMEA5_GMI_WR_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define MMEA5_GMI_WR_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define MMEA5_GMI_WR_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define MMEA5_GMI_WR_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define MMEA5_GMI_WR_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define MMEA5_GMI_WR_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define MMEA5_GMI_WR_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define MMEA5_GMI_WR_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//MMEA5_GMI_RD_LAZY
+#define MMEA5_GMI_RD_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define MMEA5_GMI_RD_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define MMEA5_GMI_RD_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define MMEA5_GMI_RD_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define MMEA5_GMI_RD_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define MMEA5_GMI_RD_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define MMEA5_GMI_RD_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b
+#define MMEA5_GMI_RD_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define MMEA5_GMI_RD_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define MMEA5_GMI_RD_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define MMEA5_GMI_RD_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define MMEA5_GMI_RD_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define MMEA5_GMI_RD_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+#define MMEA5_GMI_RD_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L
+//MMEA5_GMI_WR_LAZY
+#define MMEA5_GMI_WR_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define MMEA5_GMI_WR_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define MMEA5_GMI_WR_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define MMEA5_GMI_WR_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define MMEA5_GMI_WR_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define MMEA5_GMI_WR_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define MMEA5_GMI_WR_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b
+#define MMEA5_GMI_WR_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define MMEA5_GMI_WR_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define MMEA5_GMI_WR_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define MMEA5_GMI_WR_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define MMEA5_GMI_WR_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define MMEA5_GMI_WR_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+#define MMEA5_GMI_WR_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L
+//MMEA5_GMI_RD_CAM_CNTL
+#define MMEA5_GMI_RD_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define MMEA5_GMI_RD_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define MMEA5_GMI_RD_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define MMEA5_GMI_RD_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define MMEA5_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define MMEA5_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define MMEA5_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define MMEA5_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define MMEA5_GMI_RD_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c
+#define MMEA5_GMI_RD_CAM_CNTL__PAGEBASED_CHAINING__SHIFT 0x1d
+#define MMEA5_GMI_RD_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define MMEA5_GMI_RD_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define MMEA5_GMI_RD_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define MMEA5_GMI_RD_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define MMEA5_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define MMEA5_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define MMEA5_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define MMEA5_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+#define MMEA5_GMI_RD_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L
+#define MMEA5_GMI_RD_CAM_CNTL__PAGEBASED_CHAINING_MASK 0x20000000L
+//MMEA5_GMI_WR_CAM_CNTL
+#define MMEA5_GMI_WR_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define MMEA5_GMI_WR_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define MMEA5_GMI_WR_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define MMEA5_GMI_WR_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define MMEA5_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define MMEA5_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define MMEA5_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define MMEA5_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define MMEA5_GMI_WR_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c
+#define MMEA5_GMI_WR_CAM_CNTL__PAGEBASED_CHAINING__SHIFT 0x1d
+#define MMEA5_GMI_WR_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define MMEA5_GMI_WR_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define MMEA5_GMI_WR_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define MMEA5_GMI_WR_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define MMEA5_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define MMEA5_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define MMEA5_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define MMEA5_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+#define MMEA5_GMI_WR_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L
+#define MMEA5_GMI_WR_CAM_CNTL__PAGEBASED_CHAINING_MASK 0x20000000L
+//MMEA5_GMI_PAGE_BURST
+#define MMEA5_GMI_PAGE_BURST__RD_LIMIT_LO__SHIFT 0x0
+#define MMEA5_GMI_PAGE_BURST__RD_LIMIT_HI__SHIFT 0x8
+#define MMEA5_GMI_PAGE_BURST__WR_LIMIT_LO__SHIFT 0x10
+#define MMEA5_GMI_PAGE_BURST__WR_LIMIT_HI__SHIFT 0x18
+#define MMEA5_GMI_PAGE_BURST__RD_LIMIT_LO_MASK 0x000000FFL
+#define MMEA5_GMI_PAGE_BURST__RD_LIMIT_HI_MASK 0x0000FF00L
+#define MMEA5_GMI_PAGE_BURST__WR_LIMIT_LO_MASK 0x00FF0000L
+#define MMEA5_GMI_PAGE_BURST__WR_LIMIT_HI_MASK 0xFF000000L
+//MMEA5_GMI_RD_PRI_AGE
+#define MMEA5_GMI_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA5_GMI_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA5_GMI_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA5_GMI_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA5_GMI_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA5_GMI_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA5_GMI_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA5_GMI_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA5_GMI_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA5_GMI_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA5_GMI_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA5_GMI_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA5_GMI_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA5_GMI_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA5_GMI_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA5_GMI_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA5_GMI_WR_PRI_AGE
+#define MMEA5_GMI_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA5_GMI_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA5_GMI_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA5_GMI_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA5_GMI_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA5_GMI_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA5_GMI_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA5_GMI_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA5_GMI_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA5_GMI_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA5_GMI_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA5_GMI_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA5_GMI_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA5_GMI_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA5_GMI_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA5_GMI_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA5_GMI_RD_PRI_QUEUING
+#define MMEA5_GMI_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA5_GMI_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA5_GMI_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA5_GMI_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA5_GMI_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA5_GMI_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA5_GMI_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA5_GMI_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA5_GMI_WR_PRI_QUEUING
+#define MMEA5_GMI_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA5_GMI_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA5_GMI_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA5_GMI_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA5_GMI_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA5_GMI_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA5_GMI_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA5_GMI_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA5_GMI_RD_PRI_FIXED
+#define MMEA5_GMI_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA5_GMI_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA5_GMI_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA5_GMI_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA5_GMI_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA5_GMI_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA5_GMI_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA5_GMI_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA5_GMI_WR_PRI_FIXED
+#define MMEA5_GMI_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA5_GMI_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA5_GMI_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA5_GMI_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA5_GMI_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA5_GMI_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA5_GMI_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA5_GMI_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA5_GMI_RD_PRI_URGENCY
+#define MMEA5_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA5_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA5_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA5_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA5_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA5_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA5_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA5_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA5_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA5_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA5_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA5_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA5_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA5_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA5_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA5_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA5_GMI_WR_PRI_URGENCY
+#define MMEA5_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA5_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA5_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA5_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA5_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA5_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA5_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA5_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA5_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA5_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA5_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA5_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA5_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA5_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA5_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA5_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA5_GMI_RD_PRI_URGENCY_MASKING
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L
+#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L
+//MMEA5_GMI_WR_PRI_URGENCY_MASKING
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L
+#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L
+//MMEA5_GMI_RD_PRI_QUANT_PRI1
+#define MMEA5_GMI_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA5_GMI_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA5_GMI_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA5_GMI_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA5_GMI_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA5_GMI_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA5_GMI_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA5_GMI_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA5_GMI_RD_PRI_QUANT_PRI2
+#define MMEA5_GMI_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA5_GMI_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA5_GMI_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA5_GMI_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA5_GMI_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA5_GMI_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA5_GMI_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA5_GMI_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA5_GMI_RD_PRI_QUANT_PRI3
+#define MMEA5_GMI_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA5_GMI_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA5_GMI_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA5_GMI_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA5_GMI_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA5_GMI_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA5_GMI_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA5_GMI_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA5_GMI_WR_PRI_QUANT_PRI1
+#define MMEA5_GMI_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA5_GMI_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA5_GMI_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA5_GMI_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA5_GMI_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA5_GMI_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA5_GMI_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA5_GMI_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA5_GMI_WR_PRI_QUANT_PRI2
+#define MMEA5_GMI_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA5_GMI_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA5_GMI_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA5_GMI_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA5_GMI_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA5_GMI_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA5_GMI_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA5_GMI_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA5_GMI_WR_PRI_QUANT_PRI3
+#define MMEA5_GMI_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA5_GMI_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA5_GMI_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA5_GMI_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA5_GMI_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA5_GMI_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA5_GMI_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA5_GMI_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA5_ADDRNORM_BASE_ADDR0
+#define MMEA5_ADDRNORM_BASE_ADDR0__ADDR_RNG_VAL__SHIFT 0x0
+#define MMEA5_ADDRNORM_BASE_ADDR0__LGCY_MMIO_HOLE_EN__SHIFT 0x1
+#define MMEA5_ADDRNORM_BASE_ADDR0__INTLV_NUM_CHAN__SHIFT 0x2
+#define MMEA5_ADDRNORM_BASE_ADDR0__INTLV_NUM_DIES__SHIFT 0x6
+#define MMEA5_ADDRNORM_BASE_ADDR0__INTLV_NUM_SOCKETS__SHIFT 0x8
+#define MMEA5_ADDRNORM_BASE_ADDR0__INTLV_ADDR_SEL__SHIFT 0x9
+#define MMEA5_ADDRNORM_BASE_ADDR0__BASE_ADDR__SHIFT 0xc
+#define MMEA5_ADDRNORM_BASE_ADDR0__ADDR_RNG_VAL_MASK 0x00000001L
+#define MMEA5_ADDRNORM_BASE_ADDR0__LGCY_MMIO_HOLE_EN_MASK 0x00000002L
+#define MMEA5_ADDRNORM_BASE_ADDR0__INTLV_NUM_CHAN_MASK 0x0000003CL
+#define MMEA5_ADDRNORM_BASE_ADDR0__INTLV_NUM_DIES_MASK 0x000000C0L
+#define MMEA5_ADDRNORM_BASE_ADDR0__INTLV_NUM_SOCKETS_MASK 0x00000100L
+#define MMEA5_ADDRNORM_BASE_ADDR0__INTLV_ADDR_SEL_MASK 0x00000E00L
+#define MMEA5_ADDRNORM_BASE_ADDR0__BASE_ADDR_MASK 0xFFFFF000L
+//MMEA5_ADDRNORM_LIMIT_ADDR0
+#define MMEA5_ADDRNORM_LIMIT_ADDR0__DST_FABRIC_ID__SHIFT 0x0
+#define MMEA5_ADDRNORM_LIMIT_ADDR0__LIMIT_ADDR__SHIFT 0xc
+#define MMEA5_ADDRNORM_LIMIT_ADDR0__DST_FABRIC_ID_MASK 0x0000001FL
+#define MMEA5_ADDRNORM_LIMIT_ADDR0__LIMIT_ADDR_MASK 0xFFFFF000L
+//MMEA5_ADDRNORM_BASE_ADDR1
+#define MMEA5_ADDRNORM_BASE_ADDR1__ADDR_RNG_VAL__SHIFT 0x0
+#define MMEA5_ADDRNORM_BASE_ADDR1__LGCY_MMIO_HOLE_EN__SHIFT 0x1
+#define MMEA5_ADDRNORM_BASE_ADDR1__INTLV_NUM_CHAN__SHIFT 0x2
+#define MMEA5_ADDRNORM_BASE_ADDR1__INTLV_NUM_DIES__SHIFT 0x6
+#define MMEA5_ADDRNORM_BASE_ADDR1__INTLV_NUM_SOCKETS__SHIFT 0x8
+#define MMEA5_ADDRNORM_BASE_ADDR1__INTLV_ADDR_SEL__SHIFT 0x9
+#define MMEA5_ADDRNORM_BASE_ADDR1__BASE_ADDR__SHIFT 0xc
+#define MMEA5_ADDRNORM_BASE_ADDR1__ADDR_RNG_VAL_MASK 0x00000001L
+#define MMEA5_ADDRNORM_BASE_ADDR1__LGCY_MMIO_HOLE_EN_MASK 0x00000002L
+#define MMEA5_ADDRNORM_BASE_ADDR1__INTLV_NUM_CHAN_MASK 0x0000003CL
+#define MMEA5_ADDRNORM_BASE_ADDR1__INTLV_NUM_DIES_MASK 0x000000C0L
+#define MMEA5_ADDRNORM_BASE_ADDR1__INTLV_NUM_SOCKETS_MASK 0x00000100L
+#define MMEA5_ADDRNORM_BASE_ADDR1__INTLV_ADDR_SEL_MASK 0x00000E00L
+#define MMEA5_ADDRNORM_BASE_ADDR1__BASE_ADDR_MASK 0xFFFFF000L
+//MMEA5_ADDRNORM_LIMIT_ADDR1
+#define MMEA5_ADDRNORM_LIMIT_ADDR1__DST_FABRIC_ID__SHIFT 0x0
+#define MMEA5_ADDRNORM_LIMIT_ADDR1__LIMIT_ADDR__SHIFT 0xc
+#define MMEA5_ADDRNORM_LIMIT_ADDR1__DST_FABRIC_ID_MASK 0x0000001FL
+#define MMEA5_ADDRNORM_LIMIT_ADDR1__LIMIT_ADDR_MASK 0xFFFFF000L
+//MMEA5_ADDRNORM_OFFSET_ADDR1
+#define MMEA5_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_EN__SHIFT 0x0
+#define MMEA5_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET__SHIFT 0x14
+#define MMEA5_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_EN_MASK 0x00000001L
+#define MMEA5_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_MASK 0xFFF00000L
+//MMEA5_ADDRNORM_BASE_ADDR2
+#define MMEA5_ADDRNORM_BASE_ADDR2__ADDR_RNG_VAL__SHIFT 0x0
+#define MMEA5_ADDRNORM_BASE_ADDR2__LGCY_MMIO_HOLE_EN__SHIFT 0x1
+#define MMEA5_ADDRNORM_BASE_ADDR2__INTLV_NUM_CHAN__SHIFT 0x2
+#define MMEA5_ADDRNORM_BASE_ADDR2__INTLV_NUM_DIES__SHIFT 0x6
+#define MMEA5_ADDRNORM_BASE_ADDR2__INTLV_NUM_SOCKETS__SHIFT 0x8
+#define MMEA5_ADDRNORM_BASE_ADDR2__INTLV_ADDR_SEL__SHIFT 0x9
+#define MMEA5_ADDRNORM_BASE_ADDR2__BASE_ADDR__SHIFT 0xc
+#define MMEA5_ADDRNORM_BASE_ADDR2__ADDR_RNG_VAL_MASK 0x00000001L
+#define MMEA5_ADDRNORM_BASE_ADDR2__LGCY_MMIO_HOLE_EN_MASK 0x00000002L
+#define MMEA5_ADDRNORM_BASE_ADDR2__INTLV_NUM_CHAN_MASK 0x0000003CL
+#define MMEA5_ADDRNORM_BASE_ADDR2__INTLV_NUM_DIES_MASK 0x000000C0L
+#define MMEA5_ADDRNORM_BASE_ADDR2__INTLV_NUM_SOCKETS_MASK 0x00000100L
+#define MMEA5_ADDRNORM_BASE_ADDR2__INTLV_ADDR_SEL_MASK 0x00000E00L
+#define MMEA5_ADDRNORM_BASE_ADDR2__BASE_ADDR_MASK 0xFFFFF000L
+//MMEA5_ADDRNORM_LIMIT_ADDR2
+#define MMEA5_ADDRNORM_LIMIT_ADDR2__DST_FABRIC_ID__SHIFT 0x0
+#define MMEA5_ADDRNORM_LIMIT_ADDR2__LIMIT_ADDR__SHIFT 0xc
+#define MMEA5_ADDRNORM_LIMIT_ADDR2__DST_FABRIC_ID_MASK 0x0000001FL
+#define MMEA5_ADDRNORM_LIMIT_ADDR2__LIMIT_ADDR_MASK 0xFFFFF000L
+//MMEA5_ADDRNORM_BASE_ADDR3
+#define MMEA5_ADDRNORM_BASE_ADDR3__ADDR_RNG_VAL__SHIFT 0x0
+#define MMEA5_ADDRNORM_BASE_ADDR3__LGCY_MMIO_HOLE_EN__SHIFT 0x1
+#define MMEA5_ADDRNORM_BASE_ADDR3__INTLV_NUM_CHAN__SHIFT 0x2
+#define MMEA5_ADDRNORM_BASE_ADDR3__INTLV_NUM_DIES__SHIFT 0x6
+#define MMEA5_ADDRNORM_BASE_ADDR3__INTLV_NUM_SOCKETS__SHIFT 0x8
+#define MMEA5_ADDRNORM_BASE_ADDR3__INTLV_ADDR_SEL__SHIFT 0x9
+#define MMEA5_ADDRNORM_BASE_ADDR3__BASE_ADDR__SHIFT 0xc
+#define MMEA5_ADDRNORM_BASE_ADDR3__ADDR_RNG_VAL_MASK 0x00000001L
+#define MMEA5_ADDRNORM_BASE_ADDR3__LGCY_MMIO_HOLE_EN_MASK 0x00000002L
+#define MMEA5_ADDRNORM_BASE_ADDR3__INTLV_NUM_CHAN_MASK 0x0000003CL
+#define MMEA5_ADDRNORM_BASE_ADDR3__INTLV_NUM_DIES_MASK 0x000000C0L
+#define MMEA5_ADDRNORM_BASE_ADDR3__INTLV_NUM_SOCKETS_MASK 0x00000100L
+#define MMEA5_ADDRNORM_BASE_ADDR3__INTLV_ADDR_SEL_MASK 0x00000E00L
+#define MMEA5_ADDRNORM_BASE_ADDR3__BASE_ADDR_MASK 0xFFFFF000L
+//MMEA5_ADDRNORM_LIMIT_ADDR3
+#define MMEA5_ADDRNORM_LIMIT_ADDR3__DST_FABRIC_ID__SHIFT 0x0
+#define MMEA5_ADDRNORM_LIMIT_ADDR3__LIMIT_ADDR__SHIFT 0xc
+#define MMEA5_ADDRNORM_LIMIT_ADDR3__DST_FABRIC_ID_MASK 0x0000001FL
+#define MMEA5_ADDRNORM_LIMIT_ADDR3__LIMIT_ADDR_MASK 0xFFFFF000L
+//MMEA5_ADDRNORM_OFFSET_ADDR3
+#define MMEA5_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET_EN__SHIFT 0x0
+#define MMEA5_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET__SHIFT 0x14
+#define MMEA5_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET_EN_MASK 0x00000001L
+#define MMEA5_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET_MASK 0xFFF00000L
+//MMEA5_ADDRNORM_BASE_ADDR4
+#define MMEA5_ADDRNORM_BASE_ADDR4__ADDR_RNG_VAL__SHIFT 0x0
+#define MMEA5_ADDRNORM_BASE_ADDR4__LGCY_MMIO_HOLE_EN__SHIFT 0x1
+#define MMEA5_ADDRNORM_BASE_ADDR4__INTLV_NUM_CHAN__SHIFT 0x2
+#define MMEA5_ADDRNORM_BASE_ADDR4__INTLV_NUM_DIES__SHIFT 0x6
+#define MMEA5_ADDRNORM_BASE_ADDR4__INTLV_NUM_SOCKETS__SHIFT 0x8
+#define MMEA5_ADDRNORM_BASE_ADDR4__INTLV_ADDR_SEL__SHIFT 0x9
+#define MMEA5_ADDRNORM_BASE_ADDR4__BASE_ADDR__SHIFT 0xc
+#define MMEA5_ADDRNORM_BASE_ADDR4__ADDR_RNG_VAL_MASK 0x00000001L
+#define MMEA5_ADDRNORM_BASE_ADDR4__LGCY_MMIO_HOLE_EN_MASK 0x00000002L
+#define MMEA5_ADDRNORM_BASE_ADDR4__INTLV_NUM_CHAN_MASK 0x0000003CL
+#define MMEA5_ADDRNORM_BASE_ADDR4__INTLV_NUM_DIES_MASK 0x000000C0L
+#define MMEA5_ADDRNORM_BASE_ADDR4__INTLV_NUM_SOCKETS_MASK 0x00000100L
+#define MMEA5_ADDRNORM_BASE_ADDR4__INTLV_ADDR_SEL_MASK 0x00000E00L
+#define MMEA5_ADDRNORM_BASE_ADDR4__BASE_ADDR_MASK 0xFFFFF000L
+//MMEA5_ADDRNORM_LIMIT_ADDR4
+#define MMEA5_ADDRNORM_LIMIT_ADDR4__DST_FABRIC_ID__SHIFT 0x0
+#define MMEA5_ADDRNORM_LIMIT_ADDR4__LIMIT_ADDR__SHIFT 0xc
+#define MMEA5_ADDRNORM_LIMIT_ADDR4__DST_FABRIC_ID_MASK 0x0000001FL
+#define MMEA5_ADDRNORM_LIMIT_ADDR4__LIMIT_ADDR_MASK 0xFFFFF000L
+//MMEA5_ADDRNORM_BASE_ADDR5
+#define MMEA5_ADDRNORM_BASE_ADDR5__ADDR_RNG_VAL__SHIFT 0x0
+#define MMEA5_ADDRNORM_BASE_ADDR5__LGCY_MMIO_HOLE_EN__SHIFT 0x1
+#define MMEA5_ADDRNORM_BASE_ADDR5__INTLV_NUM_CHAN__SHIFT 0x2
+#define MMEA5_ADDRNORM_BASE_ADDR5__INTLV_NUM_DIES__SHIFT 0x6
+#define MMEA5_ADDRNORM_BASE_ADDR5__INTLV_NUM_SOCKETS__SHIFT 0x8
+#define MMEA5_ADDRNORM_BASE_ADDR5__INTLV_ADDR_SEL__SHIFT 0x9
+#define MMEA5_ADDRNORM_BASE_ADDR5__BASE_ADDR__SHIFT 0xc
+#define MMEA5_ADDRNORM_BASE_ADDR5__ADDR_RNG_VAL_MASK 0x00000001L
+#define MMEA5_ADDRNORM_BASE_ADDR5__LGCY_MMIO_HOLE_EN_MASK 0x00000002L
+#define MMEA5_ADDRNORM_BASE_ADDR5__INTLV_NUM_CHAN_MASK 0x0000003CL
+#define MMEA5_ADDRNORM_BASE_ADDR5__INTLV_NUM_DIES_MASK 0x000000C0L
+#define MMEA5_ADDRNORM_BASE_ADDR5__INTLV_NUM_SOCKETS_MASK 0x00000100L
+#define MMEA5_ADDRNORM_BASE_ADDR5__INTLV_ADDR_SEL_MASK 0x00000E00L
+#define MMEA5_ADDRNORM_BASE_ADDR5__BASE_ADDR_MASK 0xFFFFF000L
+//MMEA5_ADDRNORM_LIMIT_ADDR5
+#define MMEA5_ADDRNORM_LIMIT_ADDR5__DST_FABRIC_ID__SHIFT 0x0
+#define MMEA5_ADDRNORM_LIMIT_ADDR5__LIMIT_ADDR__SHIFT 0xc
+#define MMEA5_ADDRNORM_LIMIT_ADDR5__DST_FABRIC_ID_MASK 0x0000001FL
+#define MMEA5_ADDRNORM_LIMIT_ADDR5__LIMIT_ADDR_MASK 0xFFFFF000L
+//MMEA5_ADDRNORM_OFFSET_ADDR5
+#define MMEA5_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET_EN__SHIFT 0x0
+#define MMEA5_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET__SHIFT 0x14
+#define MMEA5_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET_EN_MASK 0x00000001L
+#define MMEA5_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET_MASK 0xFFF00000L
+//MMEA5_ADDRNORMDRAM_HOLE_CNTL
+#define MMEA5_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_VALID__SHIFT 0x0
+#define MMEA5_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_OFFSET__SHIFT 0x7
+#define MMEA5_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_VALID_MASK 0x00000001L
+#define MMEA5_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_OFFSET_MASK 0x0000FF80L
+//MMEA5_ADDRNORMGMI_HOLE_CNTL
+#define MMEA5_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_VALID__SHIFT 0x0
+#define MMEA5_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_OFFSET__SHIFT 0x7
+#define MMEA5_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_VALID_MASK 0x00000001L
+#define MMEA5_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_OFFSET_MASK 0x0000FF80L
+//MMEA5_ADDRNORMDRAM_NP2_CHANNEL_CFG
+#define MMEA5_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE0__SHIFT 0x0
+#define MMEA5_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE1__SHIFT 0x6
+#define MMEA5_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE0_MASK 0x0000003FL
+#define MMEA5_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE1_MASK 0x00000FC0L
+//MMEA5_ADDRNORMGMI_NP2_CHANNEL_CFG
+#define MMEA5_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE2__SHIFT 0x0
+#define MMEA5_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE3__SHIFT 0x6
+#define MMEA5_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE2_MASK 0x0000003FL
+#define MMEA5_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE3_MASK 0x00000FC0L
+//MMEA5_ADDRDEC_BANK_CFG
+#define MMEA5_ADDRDEC_BANK_CFG__BANK_MASK_DRAM__SHIFT 0x0
+#define MMEA5_ADDRDEC_BANK_CFG__BANK_MASK_GMI__SHIFT 0x6
+#define MMEA5_ADDRDEC_BANK_CFG__BANKGROUP_SEL_DRAM__SHIFT 0xc
+#define MMEA5_ADDRDEC_BANK_CFG__BANKGROUP_SEL_GMI__SHIFT 0xf
+#define MMEA5_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_DRAM__SHIFT 0x12
+#define MMEA5_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_GMI__SHIFT 0x13
+#define MMEA5_ADDRDEC_BANK_CFG__BANK_MASK_DRAM_MASK 0x0000003FL
+#define MMEA5_ADDRDEC_BANK_CFG__BANK_MASK_GMI_MASK 0x00000FC0L
+#define MMEA5_ADDRDEC_BANK_CFG__BANKGROUP_SEL_DRAM_MASK 0x00007000L
+#define MMEA5_ADDRDEC_BANK_CFG__BANKGROUP_SEL_GMI_MASK 0x00038000L
+#define MMEA5_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_DRAM_MASK 0x00040000L
+#define MMEA5_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_GMI_MASK 0x00080000L
+//MMEA5_ADDRDEC_MISC_CFG
+#define MMEA5_ADDRDEC_MISC_CFG__VCM_EN0__SHIFT 0x0
+#define MMEA5_ADDRDEC_MISC_CFG__VCM_EN1__SHIFT 0x1
+#define MMEA5_ADDRDEC_MISC_CFG__VCM_EN2__SHIFT 0x2
+#define MMEA5_ADDRDEC_MISC_CFG__PCH_MASK_DRAM__SHIFT 0x8
+#define MMEA5_ADDRDEC_MISC_CFG__PCH_MASK_GMI__SHIFT 0x9
+#define MMEA5_ADDRDEC_MISC_CFG__CH_MASK_DRAM__SHIFT 0xc
+#define MMEA5_ADDRDEC_MISC_CFG__CH_MASK_GMI__SHIFT 0x11
+#define MMEA5_ADDRDEC_MISC_CFG__CS_MASK_DRAM__SHIFT 0x16
+#define MMEA5_ADDRDEC_MISC_CFG__CS_MASK_GMI__SHIFT 0x18
+#define MMEA5_ADDRDEC_MISC_CFG__RM_MASK_DRAM__SHIFT 0x1a
+#define MMEA5_ADDRDEC_MISC_CFG__RM_MASK_GMI__SHIFT 0x1d
+#define MMEA5_ADDRDEC_MISC_CFG__VCM_EN0_MASK 0x00000001L
+#define MMEA5_ADDRDEC_MISC_CFG__VCM_EN1_MASK 0x00000002L
+#define MMEA5_ADDRDEC_MISC_CFG__VCM_EN2_MASK 0x00000004L
+#define MMEA5_ADDRDEC_MISC_CFG__PCH_MASK_DRAM_MASK 0x00000100L
+#define MMEA5_ADDRDEC_MISC_CFG__PCH_MASK_GMI_MASK 0x00000200L
+#define MMEA5_ADDRDEC_MISC_CFG__CH_MASK_DRAM_MASK 0x0001F000L
+#define MMEA5_ADDRDEC_MISC_CFG__CH_MASK_GMI_MASK 0x003E0000L
+#define MMEA5_ADDRDEC_MISC_CFG__CS_MASK_DRAM_MASK 0x00C00000L
+#define MMEA5_ADDRDEC_MISC_CFG__CS_MASK_GMI_MASK 0x03000000L
+#define MMEA5_ADDRDEC_MISC_CFG__RM_MASK_DRAM_MASK 0x1C000000L
+#define MMEA5_ADDRDEC_MISC_CFG__RM_MASK_GMI_MASK 0xE0000000L
+//MMEA5_ADDRDECDRAM_ADDR_HASH_BANK0
+#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK0__XOR_ENABLE__SHIFT 0x0
+#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK0__COL_XOR__SHIFT 0x1
+#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK0__ROW_XOR__SHIFT 0xe
+#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK0__XOR_ENABLE_MASK 0x00000001L
+#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK0__COL_XOR_MASK 0x00003FFEL
+#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK0__ROW_XOR_MASK 0xFFFFC000L
+//MMEA5_ADDRDECDRAM_ADDR_HASH_BANK1
+#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK1__XOR_ENABLE__SHIFT 0x0
+#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK1__COL_XOR__SHIFT 0x1
+#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK1__ROW_XOR__SHIFT 0xe
+#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK1__XOR_ENABLE_MASK 0x00000001L
+#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK1__COL_XOR_MASK 0x00003FFEL
+#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK1__ROW_XOR_MASK 0xFFFFC000L
+//MMEA5_ADDRDECDRAM_ADDR_HASH_BANK2
+#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK2__XOR_ENABLE__SHIFT 0x0
+#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK2__COL_XOR__SHIFT 0x1
+#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK2__ROW_XOR__SHIFT 0xe
+#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK2__XOR_ENABLE_MASK 0x00000001L
+#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK2__COL_XOR_MASK 0x00003FFEL
+#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK2__ROW_XOR_MASK 0xFFFFC000L
+//MMEA5_ADDRDECDRAM_ADDR_HASH_BANK3
+#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK3__XOR_ENABLE__SHIFT 0x0
+#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK3__COL_XOR__SHIFT 0x1
+#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK3__ROW_XOR__SHIFT 0xe
+#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK3__XOR_ENABLE_MASK 0x00000001L
+#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK3__COL_XOR_MASK 0x00003FFEL
+#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK3__ROW_XOR_MASK 0xFFFFC000L
+//MMEA5_ADDRDECDRAM_ADDR_HASH_BANK4
+#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK4__XOR_ENABLE__SHIFT 0x0
+#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK4__COL_XOR__SHIFT 0x1
+#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK4__ROW_XOR__SHIFT 0xe
+#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK4__XOR_ENABLE_MASK 0x00000001L
+#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK4__COL_XOR_MASK 0x00003FFEL
+#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK4__ROW_XOR_MASK 0xFFFFC000L
+//MMEA5_ADDRDECDRAM_ADDR_HASH_BANK5
+#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK5__XOR_ENABLE__SHIFT 0x0
+#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK5__COL_XOR__SHIFT 0x1
+#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK5__ROW_XOR__SHIFT 0xe
+#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK5__XOR_ENABLE_MASK 0x00000001L
+#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK5__COL_XOR_MASK 0x00003FFEL
+#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK5__ROW_XOR_MASK 0xFFFFC000L
+//MMEA5_ADDRDECDRAM_ADDR_HASH_PC
+#define MMEA5_ADDRDECDRAM_ADDR_HASH_PC__XOR_ENABLE__SHIFT 0x0
+#define MMEA5_ADDRDECDRAM_ADDR_HASH_PC__COL_XOR__SHIFT 0x1
+#define MMEA5_ADDRDECDRAM_ADDR_HASH_PC__ROW_XOR__SHIFT 0xe
+#define MMEA5_ADDRDECDRAM_ADDR_HASH_PC__XOR_ENABLE_MASK 0x00000001L
+#define MMEA5_ADDRDECDRAM_ADDR_HASH_PC__COL_XOR_MASK 0x00003FFEL
+#define MMEA5_ADDRDECDRAM_ADDR_HASH_PC__ROW_XOR_MASK 0xFFFFC000L
+//MMEA5_ADDRDECDRAM_ADDR_HASH_PC2
+#define MMEA5_ADDRDECDRAM_ADDR_HASH_PC2__BANK_XOR__SHIFT 0x0
+#define MMEA5_ADDRDECDRAM_ADDR_HASH_PC2__BANK_XOR_MASK 0x0000003FL
+//MMEA5_ADDRDECDRAM_ADDR_HASH_CS0
+#define MMEA5_ADDRDECDRAM_ADDR_HASH_CS0__XOR_ENABLE__SHIFT 0x0
+#define MMEA5_ADDRDECDRAM_ADDR_HASH_CS0__NA_XOR__SHIFT 0x1
+#define MMEA5_ADDRDECDRAM_ADDR_HASH_CS0__XOR_ENABLE_MASK 0x00000001L
+#define MMEA5_ADDRDECDRAM_ADDR_HASH_CS0__NA_XOR_MASK 0xFFFFFFFEL
+//MMEA5_ADDRDECDRAM_ADDR_HASH_CS1
+#define MMEA5_ADDRDECDRAM_ADDR_HASH_CS1__XOR_ENABLE__SHIFT 0x0
+#define MMEA5_ADDRDECDRAM_ADDR_HASH_CS1__NA_XOR__SHIFT 0x1
+#define MMEA5_ADDRDECDRAM_ADDR_HASH_CS1__XOR_ENABLE_MASK 0x00000001L
+#define MMEA5_ADDRDECDRAM_ADDR_HASH_CS1__NA_XOR_MASK 0xFFFFFFFEL
+//MMEA5_ADDRDECDRAM_HARVEST_ENABLE
+#define MMEA5_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_EN__SHIFT 0x0
+#define MMEA5_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_VAL__SHIFT 0x1
+#define MMEA5_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_EN__SHIFT 0x2
+#define MMEA5_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_VAL__SHIFT 0x3
+#define MMEA5_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_EN__SHIFT 0x4
+#define MMEA5_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_VAL__SHIFT 0x5
+#define MMEA5_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_EN_MASK 0x00000001L
+#define MMEA5_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_VAL_MASK 0x00000002L
+#define MMEA5_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_EN_MASK 0x00000004L
+#define MMEA5_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_VAL_MASK 0x00000008L
+#define MMEA5_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_EN_MASK 0x00000010L
+#define MMEA5_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_VAL_MASK 0x00000020L
+//MMEA5_ADDRDECGMI_ADDR_HASH_BANK0
+#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK0__XOR_ENABLE__SHIFT 0x0
+#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK0__COL_XOR__SHIFT 0x1
+#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK0__ROW_XOR__SHIFT 0xe
+#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK0__XOR_ENABLE_MASK 0x00000001L
+#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK0__COL_XOR_MASK 0x00003FFEL
+#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK0__ROW_XOR_MASK 0xFFFFC000L
+//MMEA5_ADDRDECGMI_ADDR_HASH_BANK1
+#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK1__XOR_ENABLE__SHIFT 0x0
+#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK1__COL_XOR__SHIFT 0x1
+#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK1__ROW_XOR__SHIFT 0xe
+#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK1__XOR_ENABLE_MASK 0x00000001L
+#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK1__COL_XOR_MASK 0x00003FFEL
+#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK1__ROW_XOR_MASK 0xFFFFC000L
+//MMEA5_ADDRDECGMI_ADDR_HASH_BANK2
+#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK2__XOR_ENABLE__SHIFT 0x0
+#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK2__COL_XOR__SHIFT 0x1
+#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK2__ROW_XOR__SHIFT 0xe
+#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK2__XOR_ENABLE_MASK 0x00000001L
+#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK2__COL_XOR_MASK 0x00003FFEL
+#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK2__ROW_XOR_MASK 0xFFFFC000L
+//MMEA5_ADDRDECGMI_ADDR_HASH_BANK3
+#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK3__XOR_ENABLE__SHIFT 0x0
+#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK3__COL_XOR__SHIFT 0x1
+#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK3__ROW_XOR__SHIFT 0xe
+#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK3__XOR_ENABLE_MASK 0x00000001L
+#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK3__COL_XOR_MASK 0x00003FFEL
+#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK3__ROW_XOR_MASK 0xFFFFC000L
+//MMEA5_ADDRDECGMI_ADDR_HASH_BANK4
+#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK4__XOR_ENABLE__SHIFT 0x0
+#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK4__COL_XOR__SHIFT 0x1
+#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK4__ROW_XOR__SHIFT 0xe
+#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK4__XOR_ENABLE_MASK 0x00000001L
+#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK4__COL_XOR_MASK 0x00003FFEL
+#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK4__ROW_XOR_MASK 0xFFFFC000L
+//MMEA5_ADDRDECGMI_ADDR_HASH_BANK5
+#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK5__XOR_ENABLE__SHIFT 0x0
+#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK5__COL_XOR__SHIFT 0x1
+#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK5__ROW_XOR__SHIFT 0xe
+#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK5__XOR_ENABLE_MASK 0x00000001L
+#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK5__COL_XOR_MASK 0x00003FFEL
+#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK5__ROW_XOR_MASK 0xFFFFC000L
+//MMEA5_ADDRDECGMI_ADDR_HASH_PC
+#define MMEA5_ADDRDECGMI_ADDR_HASH_PC__XOR_ENABLE__SHIFT 0x0
+#define MMEA5_ADDRDECGMI_ADDR_HASH_PC__COL_XOR__SHIFT 0x1
+#define MMEA5_ADDRDECGMI_ADDR_HASH_PC__ROW_XOR__SHIFT 0xe
+#define MMEA5_ADDRDECGMI_ADDR_HASH_PC__XOR_ENABLE_MASK 0x00000001L
+#define MMEA5_ADDRDECGMI_ADDR_HASH_PC__COL_XOR_MASK 0x00003FFEL
+#define MMEA5_ADDRDECGMI_ADDR_HASH_PC__ROW_XOR_MASK 0xFFFFC000L
+//MMEA5_ADDRDECGMI_ADDR_HASH_PC2
+#define MMEA5_ADDRDECGMI_ADDR_HASH_PC2__BANK_XOR__SHIFT 0x0
+#define MMEA5_ADDRDECGMI_ADDR_HASH_PC2__BANK_XOR_MASK 0x0000003FL
+//MMEA5_ADDRDECGMI_ADDR_HASH_CS0
+#define MMEA5_ADDRDECGMI_ADDR_HASH_CS0__XOR_ENABLE__SHIFT 0x0
+#define MMEA5_ADDRDECGMI_ADDR_HASH_CS0__NA_XOR__SHIFT 0x1
+#define MMEA5_ADDRDECGMI_ADDR_HASH_CS0__XOR_ENABLE_MASK 0x00000001L
+#define MMEA5_ADDRDECGMI_ADDR_HASH_CS0__NA_XOR_MASK 0xFFFFFFFEL
+//MMEA5_ADDRDECGMI_ADDR_HASH_CS1
+#define MMEA5_ADDRDECGMI_ADDR_HASH_CS1__XOR_ENABLE__SHIFT 0x0
+#define MMEA5_ADDRDECGMI_ADDR_HASH_CS1__NA_XOR__SHIFT 0x1
+#define MMEA5_ADDRDECGMI_ADDR_HASH_CS1__XOR_ENABLE_MASK 0x00000001L
+#define MMEA5_ADDRDECGMI_ADDR_HASH_CS1__NA_XOR_MASK 0xFFFFFFFEL
+//MMEA5_ADDRDECGMI_HARVEST_ENABLE
+#define MMEA5_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_EN__SHIFT 0x0
+#define MMEA5_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_VAL__SHIFT 0x1
+#define MMEA5_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_EN__SHIFT 0x2
+#define MMEA5_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_VAL__SHIFT 0x3
+#define MMEA5_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_EN__SHIFT 0x4
+#define MMEA5_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_VAL__SHIFT 0x5
+#define MMEA5_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_EN_MASK 0x00000001L
+#define MMEA5_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_VAL_MASK 0x00000002L
+#define MMEA5_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_EN_MASK 0x00000004L
+#define MMEA5_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_VAL_MASK 0x00000008L
+#define MMEA5_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_EN_MASK 0x00000010L
+#define MMEA5_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_VAL_MASK 0x00000020L
+//MMEA5_ADDRDEC0_BASE_ADDR_CS0
+#define MMEA5_ADDRDEC0_BASE_ADDR_CS0__CS_EN__SHIFT 0x0
+#define MMEA5_ADDRDEC0_BASE_ADDR_CS0__BASE_ADDR__SHIFT 0x1
+#define MMEA5_ADDRDEC0_BASE_ADDR_CS0__CS_EN_MASK 0x00000001L
+#define MMEA5_ADDRDEC0_BASE_ADDR_CS0__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA5_ADDRDEC0_BASE_ADDR_CS1
+#define MMEA5_ADDRDEC0_BASE_ADDR_CS1__CS_EN__SHIFT 0x0
+#define MMEA5_ADDRDEC0_BASE_ADDR_CS1__BASE_ADDR__SHIFT 0x1
+#define MMEA5_ADDRDEC0_BASE_ADDR_CS1__CS_EN_MASK 0x00000001L
+#define MMEA5_ADDRDEC0_BASE_ADDR_CS1__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA5_ADDRDEC0_BASE_ADDR_CS2
+#define MMEA5_ADDRDEC0_BASE_ADDR_CS2__CS_EN__SHIFT 0x0
+#define MMEA5_ADDRDEC0_BASE_ADDR_CS2__BASE_ADDR__SHIFT 0x1
+#define MMEA5_ADDRDEC0_BASE_ADDR_CS2__CS_EN_MASK 0x00000001L
+#define MMEA5_ADDRDEC0_BASE_ADDR_CS2__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA5_ADDRDEC0_BASE_ADDR_CS3
+#define MMEA5_ADDRDEC0_BASE_ADDR_CS3__CS_EN__SHIFT 0x0
+#define MMEA5_ADDRDEC0_BASE_ADDR_CS3__BASE_ADDR__SHIFT 0x1
+#define MMEA5_ADDRDEC0_BASE_ADDR_CS3__CS_EN_MASK 0x00000001L
+#define MMEA5_ADDRDEC0_BASE_ADDR_CS3__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA5_ADDRDEC0_BASE_ADDR_SECCS0
+#define MMEA5_ADDRDEC0_BASE_ADDR_SECCS0__CS_EN__SHIFT 0x0
+#define MMEA5_ADDRDEC0_BASE_ADDR_SECCS0__BASE_ADDR__SHIFT 0x1
+#define MMEA5_ADDRDEC0_BASE_ADDR_SECCS0__CS_EN_MASK 0x00000001L
+#define MMEA5_ADDRDEC0_BASE_ADDR_SECCS0__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA5_ADDRDEC0_BASE_ADDR_SECCS1
+#define MMEA5_ADDRDEC0_BASE_ADDR_SECCS1__CS_EN__SHIFT 0x0
+#define MMEA5_ADDRDEC0_BASE_ADDR_SECCS1__BASE_ADDR__SHIFT 0x1
+#define MMEA5_ADDRDEC0_BASE_ADDR_SECCS1__CS_EN_MASK 0x00000001L
+#define MMEA5_ADDRDEC0_BASE_ADDR_SECCS1__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA5_ADDRDEC0_BASE_ADDR_SECCS2
+#define MMEA5_ADDRDEC0_BASE_ADDR_SECCS2__CS_EN__SHIFT 0x0
+#define MMEA5_ADDRDEC0_BASE_ADDR_SECCS2__BASE_ADDR__SHIFT 0x1
+#define MMEA5_ADDRDEC0_BASE_ADDR_SECCS2__CS_EN_MASK 0x00000001L
+#define MMEA5_ADDRDEC0_BASE_ADDR_SECCS2__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA5_ADDRDEC0_BASE_ADDR_SECCS3
+#define MMEA5_ADDRDEC0_BASE_ADDR_SECCS3__CS_EN__SHIFT 0x0
+#define MMEA5_ADDRDEC0_BASE_ADDR_SECCS3__BASE_ADDR__SHIFT 0x1
+#define MMEA5_ADDRDEC0_BASE_ADDR_SECCS3__CS_EN_MASK 0x00000001L
+#define MMEA5_ADDRDEC0_BASE_ADDR_SECCS3__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA5_ADDRDEC0_ADDR_MASK_CS01
+#define MMEA5_ADDRDEC0_ADDR_MASK_CS01__ADDR_MASK__SHIFT 0x1
+#define MMEA5_ADDRDEC0_ADDR_MASK_CS01__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA5_ADDRDEC0_ADDR_MASK_CS23
+#define MMEA5_ADDRDEC0_ADDR_MASK_CS23__ADDR_MASK__SHIFT 0x1
+#define MMEA5_ADDRDEC0_ADDR_MASK_CS23__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA5_ADDRDEC0_ADDR_MASK_SECCS01
+#define MMEA5_ADDRDEC0_ADDR_MASK_SECCS01__ADDR_MASK__SHIFT 0x1
+#define MMEA5_ADDRDEC0_ADDR_MASK_SECCS01__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA5_ADDRDEC0_ADDR_MASK_SECCS23
+#define MMEA5_ADDRDEC0_ADDR_MASK_SECCS23__ADDR_MASK__SHIFT 0x1
+#define MMEA5_ADDRDEC0_ADDR_MASK_SECCS23__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA5_ADDRDEC0_ADDR_CFG_CS01
+#define MMEA5_ADDRDEC0_ADDR_CFG_CS01__NUM_BANK_GROUPS__SHIFT 0x1
+#define MMEA5_ADDRDEC0_ADDR_CFG_CS01__NUM_RM__SHIFT 0x4
+#define MMEA5_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_LO__SHIFT 0x8
+#define MMEA5_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_HI__SHIFT 0xc
+#define MMEA5_ADDRDEC0_ADDR_CFG_CS01__NUM_COL__SHIFT 0x10
+#define MMEA5_ADDRDEC0_ADDR_CFG_CS01__NUM_BANKS__SHIFT 0x14
+#define MMEA5_ADDRDEC0_ADDR_CFG_CS01__HI_COL_EN__SHIFT 0x1f
+#define MMEA5_ADDRDEC0_ADDR_CFG_CS01__NUM_BANK_GROUPS_MASK 0x0000000EL
+#define MMEA5_ADDRDEC0_ADDR_CFG_CS01__NUM_RM_MASK 0x00000030L
+#define MMEA5_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_LO_MASK 0x00000F00L
+#define MMEA5_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_HI_MASK 0x0000F000L
+#define MMEA5_ADDRDEC0_ADDR_CFG_CS01__NUM_COL_MASK 0x000F0000L
+#define MMEA5_ADDRDEC0_ADDR_CFG_CS01__NUM_BANKS_MASK 0x00300000L
+#define MMEA5_ADDRDEC0_ADDR_CFG_CS01__HI_COL_EN_MASK 0x80000000L
+//MMEA5_ADDRDEC0_ADDR_CFG_CS23
+#define MMEA5_ADDRDEC0_ADDR_CFG_CS23__NUM_BANK_GROUPS__SHIFT 0x1
+#define MMEA5_ADDRDEC0_ADDR_CFG_CS23__NUM_RM__SHIFT 0x4
+#define MMEA5_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_LO__SHIFT 0x8
+#define MMEA5_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_HI__SHIFT 0xc
+#define MMEA5_ADDRDEC0_ADDR_CFG_CS23__NUM_COL__SHIFT 0x10
+#define MMEA5_ADDRDEC0_ADDR_CFG_CS23__NUM_BANKS__SHIFT 0x14
+#define MMEA5_ADDRDEC0_ADDR_CFG_CS23__HI_COL_EN__SHIFT 0x1f
+#define MMEA5_ADDRDEC0_ADDR_CFG_CS23__NUM_BANK_GROUPS_MASK 0x0000000EL
+#define MMEA5_ADDRDEC0_ADDR_CFG_CS23__NUM_RM_MASK 0x00000030L
+#define MMEA5_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_LO_MASK 0x00000F00L
+#define MMEA5_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_HI_MASK 0x0000F000L
+#define MMEA5_ADDRDEC0_ADDR_CFG_CS23__NUM_COL_MASK 0x000F0000L
+#define MMEA5_ADDRDEC0_ADDR_CFG_CS23__NUM_BANKS_MASK 0x00300000L
+#define MMEA5_ADDRDEC0_ADDR_CFG_CS23__HI_COL_EN_MASK 0x80000000L
+//MMEA5_ADDRDEC0_ADDR_SEL_CS01
+#define MMEA5_ADDRDEC0_ADDR_SEL_CS01__BANK0__SHIFT 0x0
+#define MMEA5_ADDRDEC0_ADDR_SEL_CS01__BANK1__SHIFT 0x4
+#define MMEA5_ADDRDEC0_ADDR_SEL_CS01__BANK2__SHIFT 0x8
+#define MMEA5_ADDRDEC0_ADDR_SEL_CS01__BANK3__SHIFT 0xc
+#define MMEA5_ADDRDEC0_ADDR_SEL_CS01__BANK4__SHIFT 0x10
+#define MMEA5_ADDRDEC0_ADDR_SEL_CS01__ROW_LO__SHIFT 0x18
+#define MMEA5_ADDRDEC0_ADDR_SEL_CS01__ROW_HI__SHIFT 0x1c
+#define MMEA5_ADDRDEC0_ADDR_SEL_CS01__BANK0_MASK 0x0000000FL
+#define MMEA5_ADDRDEC0_ADDR_SEL_CS01__BANK1_MASK 0x000000F0L
+#define MMEA5_ADDRDEC0_ADDR_SEL_CS01__BANK2_MASK 0x00000F00L
+#define MMEA5_ADDRDEC0_ADDR_SEL_CS01__BANK3_MASK 0x0000F000L
+#define MMEA5_ADDRDEC0_ADDR_SEL_CS01__BANK4_MASK 0x001F0000L
+#define MMEA5_ADDRDEC0_ADDR_SEL_CS01__ROW_LO_MASK 0x0F000000L
+#define MMEA5_ADDRDEC0_ADDR_SEL_CS01__ROW_HI_MASK 0xF0000000L
+//MMEA5_ADDRDEC0_ADDR_SEL_CS23
+#define MMEA5_ADDRDEC0_ADDR_SEL_CS23__BANK0__SHIFT 0x0
+#define MMEA5_ADDRDEC0_ADDR_SEL_CS23__BANK1__SHIFT 0x4
+#define MMEA5_ADDRDEC0_ADDR_SEL_CS23__BANK2__SHIFT 0x8
+#define MMEA5_ADDRDEC0_ADDR_SEL_CS23__BANK3__SHIFT 0xc
+#define MMEA5_ADDRDEC0_ADDR_SEL_CS23__BANK4__SHIFT 0x10
+#define MMEA5_ADDRDEC0_ADDR_SEL_CS23__ROW_LO__SHIFT 0x18
+#define MMEA5_ADDRDEC0_ADDR_SEL_CS23__ROW_HI__SHIFT 0x1c
+#define MMEA5_ADDRDEC0_ADDR_SEL_CS23__BANK0_MASK 0x0000000FL
+#define MMEA5_ADDRDEC0_ADDR_SEL_CS23__BANK1_MASK 0x000000F0L
+#define MMEA5_ADDRDEC0_ADDR_SEL_CS23__BANK2_MASK 0x00000F00L
+#define MMEA5_ADDRDEC0_ADDR_SEL_CS23__BANK3_MASK 0x0000F000L
+#define MMEA5_ADDRDEC0_ADDR_SEL_CS23__BANK4_MASK 0x001F0000L
+#define MMEA5_ADDRDEC0_ADDR_SEL_CS23__ROW_LO_MASK 0x0F000000L
+#define MMEA5_ADDRDEC0_ADDR_SEL_CS23__ROW_HI_MASK 0xF0000000L
+//MMEA5_ADDRDEC0_ADDR_SEL2_CS01
+#define MMEA5_ADDRDEC0_ADDR_SEL2_CS01__BANK5__SHIFT 0x0
+#define MMEA5_ADDRDEC0_ADDR_SEL2_CS01__BANK5_MASK 0x0000001FL
+//MMEA5_ADDRDEC0_ADDR_SEL2_CS23
+#define MMEA5_ADDRDEC0_ADDR_SEL2_CS23__BANK5__SHIFT 0x0
+#define MMEA5_ADDRDEC0_ADDR_SEL2_CS23__BANK5_MASK 0x0000001FL
+//MMEA5_ADDRDEC0_COL_SEL_LO_CS01
+#define MMEA5_ADDRDEC0_COL_SEL_LO_CS01__COL0__SHIFT 0x0
+#define MMEA5_ADDRDEC0_COL_SEL_LO_CS01__COL1__SHIFT 0x4
+#define MMEA5_ADDRDEC0_COL_SEL_LO_CS01__COL2__SHIFT 0x8
+#define MMEA5_ADDRDEC0_COL_SEL_LO_CS01__COL3__SHIFT 0xc
+#define MMEA5_ADDRDEC0_COL_SEL_LO_CS01__COL4__SHIFT 0x10
+#define MMEA5_ADDRDEC0_COL_SEL_LO_CS01__COL5__SHIFT 0x14
+#define MMEA5_ADDRDEC0_COL_SEL_LO_CS01__COL6__SHIFT 0x18
+#define MMEA5_ADDRDEC0_COL_SEL_LO_CS01__COL7__SHIFT 0x1c
+#define MMEA5_ADDRDEC0_COL_SEL_LO_CS01__COL0_MASK 0x0000000FL
+#define MMEA5_ADDRDEC0_COL_SEL_LO_CS01__COL1_MASK 0x000000F0L
+#define MMEA5_ADDRDEC0_COL_SEL_LO_CS01__COL2_MASK 0x00000F00L
+#define MMEA5_ADDRDEC0_COL_SEL_LO_CS01__COL3_MASK 0x0000F000L
+#define MMEA5_ADDRDEC0_COL_SEL_LO_CS01__COL4_MASK 0x000F0000L
+#define MMEA5_ADDRDEC0_COL_SEL_LO_CS01__COL5_MASK 0x00F00000L
+#define MMEA5_ADDRDEC0_COL_SEL_LO_CS01__COL6_MASK 0x0F000000L
+#define MMEA5_ADDRDEC0_COL_SEL_LO_CS01__COL7_MASK 0xF0000000L
+//MMEA5_ADDRDEC0_COL_SEL_LO_CS23
+#define MMEA5_ADDRDEC0_COL_SEL_LO_CS23__COL0__SHIFT 0x0
+#define MMEA5_ADDRDEC0_COL_SEL_LO_CS23__COL1__SHIFT 0x4
+#define MMEA5_ADDRDEC0_COL_SEL_LO_CS23__COL2__SHIFT 0x8
+#define MMEA5_ADDRDEC0_COL_SEL_LO_CS23__COL3__SHIFT 0xc
+#define MMEA5_ADDRDEC0_COL_SEL_LO_CS23__COL4__SHIFT 0x10
+#define MMEA5_ADDRDEC0_COL_SEL_LO_CS23__COL5__SHIFT 0x14
+#define MMEA5_ADDRDEC0_COL_SEL_LO_CS23__COL6__SHIFT 0x18
+#define MMEA5_ADDRDEC0_COL_SEL_LO_CS23__COL7__SHIFT 0x1c
+#define MMEA5_ADDRDEC0_COL_SEL_LO_CS23__COL0_MASK 0x0000000FL
+#define MMEA5_ADDRDEC0_COL_SEL_LO_CS23__COL1_MASK 0x000000F0L
+#define MMEA5_ADDRDEC0_COL_SEL_LO_CS23__COL2_MASK 0x00000F00L
+#define MMEA5_ADDRDEC0_COL_SEL_LO_CS23__COL3_MASK 0x0000F000L
+#define MMEA5_ADDRDEC0_COL_SEL_LO_CS23__COL4_MASK 0x000F0000L
+#define MMEA5_ADDRDEC0_COL_SEL_LO_CS23__COL5_MASK 0x00F00000L
+#define MMEA5_ADDRDEC0_COL_SEL_LO_CS23__COL6_MASK 0x0F000000L
+#define MMEA5_ADDRDEC0_COL_SEL_LO_CS23__COL7_MASK 0xF0000000L
+//MMEA5_ADDRDEC0_COL_SEL_HI_CS01
+#define MMEA5_ADDRDEC0_COL_SEL_HI_CS01__COL8__SHIFT 0x0
+#define MMEA5_ADDRDEC0_COL_SEL_HI_CS01__COL9__SHIFT 0x4
+#define MMEA5_ADDRDEC0_COL_SEL_HI_CS01__COL10__SHIFT 0x8
+#define MMEA5_ADDRDEC0_COL_SEL_HI_CS01__COL11__SHIFT 0xc
+#define MMEA5_ADDRDEC0_COL_SEL_HI_CS01__COL12__SHIFT 0x10
+#define MMEA5_ADDRDEC0_COL_SEL_HI_CS01__COL13__SHIFT 0x14
+#define MMEA5_ADDRDEC0_COL_SEL_HI_CS01__COL14__SHIFT 0x18
+#define MMEA5_ADDRDEC0_COL_SEL_HI_CS01__COL15__SHIFT 0x1c
+#define MMEA5_ADDRDEC0_COL_SEL_HI_CS01__COL8_MASK 0x0000000FL
+#define MMEA5_ADDRDEC0_COL_SEL_HI_CS01__COL9_MASK 0x000000F0L
+#define MMEA5_ADDRDEC0_COL_SEL_HI_CS01__COL10_MASK 0x00000F00L
+#define MMEA5_ADDRDEC0_COL_SEL_HI_CS01__COL11_MASK 0x0000F000L
+#define MMEA5_ADDRDEC0_COL_SEL_HI_CS01__COL12_MASK 0x000F0000L
+#define MMEA5_ADDRDEC0_COL_SEL_HI_CS01__COL13_MASK 0x00F00000L
+#define MMEA5_ADDRDEC0_COL_SEL_HI_CS01__COL14_MASK 0x0F000000L
+#define MMEA5_ADDRDEC0_COL_SEL_HI_CS01__COL15_MASK 0xF0000000L
+//MMEA5_ADDRDEC0_COL_SEL_HI_CS23
+#define MMEA5_ADDRDEC0_COL_SEL_HI_CS23__COL8__SHIFT 0x0
+#define MMEA5_ADDRDEC0_COL_SEL_HI_CS23__COL9__SHIFT 0x4
+#define MMEA5_ADDRDEC0_COL_SEL_HI_CS23__COL10__SHIFT 0x8
+#define MMEA5_ADDRDEC0_COL_SEL_HI_CS23__COL11__SHIFT 0xc
+#define MMEA5_ADDRDEC0_COL_SEL_HI_CS23__COL12__SHIFT 0x10
+#define MMEA5_ADDRDEC0_COL_SEL_HI_CS23__COL13__SHIFT 0x14
+#define MMEA5_ADDRDEC0_COL_SEL_HI_CS23__COL14__SHIFT 0x18
+#define MMEA5_ADDRDEC0_COL_SEL_HI_CS23__COL15__SHIFT 0x1c
+#define MMEA5_ADDRDEC0_COL_SEL_HI_CS23__COL8_MASK 0x0000000FL
+#define MMEA5_ADDRDEC0_COL_SEL_HI_CS23__COL9_MASK 0x000000F0L
+#define MMEA5_ADDRDEC0_COL_SEL_HI_CS23__COL10_MASK 0x00000F00L
+#define MMEA5_ADDRDEC0_COL_SEL_HI_CS23__COL11_MASK 0x0000F000L
+#define MMEA5_ADDRDEC0_COL_SEL_HI_CS23__COL12_MASK 0x000F0000L
+#define MMEA5_ADDRDEC0_COL_SEL_HI_CS23__COL13_MASK 0x00F00000L
+#define MMEA5_ADDRDEC0_COL_SEL_HI_CS23__COL14_MASK 0x0F000000L
+#define MMEA5_ADDRDEC0_COL_SEL_HI_CS23__COL15_MASK 0xF0000000L
+//MMEA5_ADDRDEC0_RM_SEL_CS01
+#define MMEA5_ADDRDEC0_RM_SEL_CS01__RM0__SHIFT 0x0
+#define MMEA5_ADDRDEC0_RM_SEL_CS01__RM1__SHIFT 0x4
+#define MMEA5_ADDRDEC0_RM_SEL_CS01__RM2__SHIFT 0x8
+#define MMEA5_ADDRDEC0_RM_SEL_CS01__CHAN_BIT__SHIFT 0xc
+#define MMEA5_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA5_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA5_ADDRDEC0_RM_SEL_CS01__RM0_MASK 0x0000000FL
+#define MMEA5_ADDRDEC0_RM_SEL_CS01__RM1_MASK 0x000000F0L
+#define MMEA5_ADDRDEC0_RM_SEL_CS01__RM2_MASK 0x00000F00L
+#define MMEA5_ADDRDEC0_RM_SEL_CS01__CHAN_BIT_MASK 0x0000F000L
+#define MMEA5_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA5_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA5_ADDRDEC0_RM_SEL_CS23
+#define MMEA5_ADDRDEC0_RM_SEL_CS23__RM0__SHIFT 0x0
+#define MMEA5_ADDRDEC0_RM_SEL_CS23__RM1__SHIFT 0x4
+#define MMEA5_ADDRDEC0_RM_SEL_CS23__RM2__SHIFT 0x8
+#define MMEA5_ADDRDEC0_RM_SEL_CS23__CHAN_BIT__SHIFT 0xc
+#define MMEA5_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA5_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA5_ADDRDEC0_RM_SEL_CS23__RM0_MASK 0x0000000FL
+#define MMEA5_ADDRDEC0_RM_SEL_CS23__RM1_MASK 0x000000F0L
+#define MMEA5_ADDRDEC0_RM_SEL_CS23__RM2_MASK 0x00000F00L
+#define MMEA5_ADDRDEC0_RM_SEL_CS23__CHAN_BIT_MASK 0x0000F000L
+#define MMEA5_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA5_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA5_ADDRDEC0_RM_SEL_SECCS01
+#define MMEA5_ADDRDEC0_RM_SEL_SECCS01__RM0__SHIFT 0x0
+#define MMEA5_ADDRDEC0_RM_SEL_SECCS01__RM1__SHIFT 0x4
+#define MMEA5_ADDRDEC0_RM_SEL_SECCS01__RM2__SHIFT 0x8
+#define MMEA5_ADDRDEC0_RM_SEL_SECCS01__CHAN_BIT__SHIFT 0xc
+#define MMEA5_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA5_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA5_ADDRDEC0_RM_SEL_SECCS01__RM0_MASK 0x0000000FL
+#define MMEA5_ADDRDEC0_RM_SEL_SECCS01__RM1_MASK 0x000000F0L
+#define MMEA5_ADDRDEC0_RM_SEL_SECCS01__RM2_MASK 0x00000F00L
+#define MMEA5_ADDRDEC0_RM_SEL_SECCS01__CHAN_BIT_MASK 0x0000F000L
+#define MMEA5_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA5_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA5_ADDRDEC0_RM_SEL_SECCS23
+#define MMEA5_ADDRDEC0_RM_SEL_SECCS23__RM0__SHIFT 0x0
+#define MMEA5_ADDRDEC0_RM_SEL_SECCS23__RM1__SHIFT 0x4
+#define MMEA5_ADDRDEC0_RM_SEL_SECCS23__RM2__SHIFT 0x8
+#define MMEA5_ADDRDEC0_RM_SEL_SECCS23__CHAN_BIT__SHIFT 0xc
+#define MMEA5_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA5_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA5_ADDRDEC0_RM_SEL_SECCS23__RM0_MASK 0x0000000FL
+#define MMEA5_ADDRDEC0_RM_SEL_SECCS23__RM1_MASK 0x000000F0L
+#define MMEA5_ADDRDEC0_RM_SEL_SECCS23__RM2_MASK 0x00000F00L
+#define MMEA5_ADDRDEC0_RM_SEL_SECCS23__CHAN_BIT_MASK 0x0000F000L
+#define MMEA5_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA5_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA5_ADDRDEC1_BASE_ADDR_CS0
+#define MMEA5_ADDRDEC1_BASE_ADDR_CS0__CS_EN__SHIFT 0x0
+#define MMEA5_ADDRDEC1_BASE_ADDR_CS0__BASE_ADDR__SHIFT 0x1
+#define MMEA5_ADDRDEC1_BASE_ADDR_CS0__CS_EN_MASK 0x00000001L
+#define MMEA5_ADDRDEC1_BASE_ADDR_CS0__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA5_ADDRDEC1_BASE_ADDR_CS1
+#define MMEA5_ADDRDEC1_BASE_ADDR_CS1__CS_EN__SHIFT 0x0
+#define MMEA5_ADDRDEC1_BASE_ADDR_CS1__BASE_ADDR__SHIFT 0x1
+#define MMEA5_ADDRDEC1_BASE_ADDR_CS1__CS_EN_MASK 0x00000001L
+#define MMEA5_ADDRDEC1_BASE_ADDR_CS1__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA5_ADDRDEC1_BASE_ADDR_CS2
+#define MMEA5_ADDRDEC1_BASE_ADDR_CS2__CS_EN__SHIFT 0x0
+#define MMEA5_ADDRDEC1_BASE_ADDR_CS2__BASE_ADDR__SHIFT 0x1
+#define MMEA5_ADDRDEC1_BASE_ADDR_CS2__CS_EN_MASK 0x00000001L
+#define MMEA5_ADDRDEC1_BASE_ADDR_CS2__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA5_ADDRDEC1_BASE_ADDR_CS3
+#define MMEA5_ADDRDEC1_BASE_ADDR_CS3__CS_EN__SHIFT 0x0
+#define MMEA5_ADDRDEC1_BASE_ADDR_CS3__BASE_ADDR__SHIFT 0x1
+#define MMEA5_ADDRDEC1_BASE_ADDR_CS3__CS_EN_MASK 0x00000001L
+#define MMEA5_ADDRDEC1_BASE_ADDR_CS3__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA5_ADDRDEC1_BASE_ADDR_SECCS0
+#define MMEA5_ADDRDEC1_BASE_ADDR_SECCS0__CS_EN__SHIFT 0x0
+#define MMEA5_ADDRDEC1_BASE_ADDR_SECCS0__BASE_ADDR__SHIFT 0x1
+#define MMEA5_ADDRDEC1_BASE_ADDR_SECCS0__CS_EN_MASK 0x00000001L
+#define MMEA5_ADDRDEC1_BASE_ADDR_SECCS0__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA5_ADDRDEC1_BASE_ADDR_SECCS1
+#define MMEA5_ADDRDEC1_BASE_ADDR_SECCS1__CS_EN__SHIFT 0x0
+#define MMEA5_ADDRDEC1_BASE_ADDR_SECCS1__BASE_ADDR__SHIFT 0x1
+#define MMEA5_ADDRDEC1_BASE_ADDR_SECCS1__CS_EN_MASK 0x00000001L
+#define MMEA5_ADDRDEC1_BASE_ADDR_SECCS1__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA5_ADDRDEC1_BASE_ADDR_SECCS2
+#define MMEA5_ADDRDEC1_BASE_ADDR_SECCS2__CS_EN__SHIFT 0x0
+#define MMEA5_ADDRDEC1_BASE_ADDR_SECCS2__BASE_ADDR__SHIFT 0x1
+#define MMEA5_ADDRDEC1_BASE_ADDR_SECCS2__CS_EN_MASK 0x00000001L
+#define MMEA5_ADDRDEC1_BASE_ADDR_SECCS2__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA5_ADDRDEC1_BASE_ADDR_SECCS3
+#define MMEA5_ADDRDEC1_BASE_ADDR_SECCS3__CS_EN__SHIFT 0x0
+#define MMEA5_ADDRDEC1_BASE_ADDR_SECCS3__BASE_ADDR__SHIFT 0x1
+#define MMEA5_ADDRDEC1_BASE_ADDR_SECCS3__CS_EN_MASK 0x00000001L
+#define MMEA5_ADDRDEC1_BASE_ADDR_SECCS3__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA5_ADDRDEC1_ADDR_MASK_CS01
+#define MMEA5_ADDRDEC1_ADDR_MASK_CS01__ADDR_MASK__SHIFT 0x1
+#define MMEA5_ADDRDEC1_ADDR_MASK_CS01__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA5_ADDRDEC1_ADDR_MASK_CS23
+#define MMEA5_ADDRDEC1_ADDR_MASK_CS23__ADDR_MASK__SHIFT 0x1
+#define MMEA5_ADDRDEC1_ADDR_MASK_CS23__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA5_ADDRDEC1_ADDR_MASK_SECCS01
+#define MMEA5_ADDRDEC1_ADDR_MASK_SECCS01__ADDR_MASK__SHIFT 0x1
+#define MMEA5_ADDRDEC1_ADDR_MASK_SECCS01__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA5_ADDRDEC1_ADDR_MASK_SECCS23
+#define MMEA5_ADDRDEC1_ADDR_MASK_SECCS23__ADDR_MASK__SHIFT 0x1
+#define MMEA5_ADDRDEC1_ADDR_MASK_SECCS23__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA5_ADDRDEC1_ADDR_CFG_CS01
+#define MMEA5_ADDRDEC1_ADDR_CFG_CS01__NUM_BANK_GROUPS__SHIFT 0x1
+#define MMEA5_ADDRDEC1_ADDR_CFG_CS01__NUM_RM__SHIFT 0x4
+#define MMEA5_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_LO__SHIFT 0x8
+#define MMEA5_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_HI__SHIFT 0xc
+#define MMEA5_ADDRDEC1_ADDR_CFG_CS01__NUM_COL__SHIFT 0x10
+#define MMEA5_ADDRDEC1_ADDR_CFG_CS01__NUM_BANKS__SHIFT 0x14
+#define MMEA5_ADDRDEC1_ADDR_CFG_CS01__HI_COL_EN__SHIFT 0x1f
+#define MMEA5_ADDRDEC1_ADDR_CFG_CS01__NUM_BANK_GROUPS_MASK 0x0000000EL
+#define MMEA5_ADDRDEC1_ADDR_CFG_CS01__NUM_RM_MASK 0x00000030L
+#define MMEA5_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_LO_MASK 0x00000F00L
+#define MMEA5_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_HI_MASK 0x0000F000L
+#define MMEA5_ADDRDEC1_ADDR_CFG_CS01__NUM_COL_MASK 0x000F0000L
+#define MMEA5_ADDRDEC1_ADDR_CFG_CS01__NUM_BANKS_MASK 0x00300000L
+#define MMEA5_ADDRDEC1_ADDR_CFG_CS01__HI_COL_EN_MASK 0x80000000L
+//MMEA5_ADDRDEC1_ADDR_CFG_CS23
+#define MMEA5_ADDRDEC1_ADDR_CFG_CS23__NUM_BANK_GROUPS__SHIFT 0x1
+#define MMEA5_ADDRDEC1_ADDR_CFG_CS23__NUM_RM__SHIFT 0x4
+#define MMEA5_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_LO__SHIFT 0x8
+#define MMEA5_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_HI__SHIFT 0xc
+#define MMEA5_ADDRDEC1_ADDR_CFG_CS23__NUM_COL__SHIFT 0x10
+#define MMEA5_ADDRDEC1_ADDR_CFG_CS23__NUM_BANKS__SHIFT 0x14
+#define MMEA5_ADDRDEC1_ADDR_CFG_CS23__HI_COL_EN__SHIFT 0x1f
+#define MMEA5_ADDRDEC1_ADDR_CFG_CS23__NUM_BANK_GROUPS_MASK 0x0000000EL
+#define MMEA5_ADDRDEC1_ADDR_CFG_CS23__NUM_RM_MASK 0x00000030L
+#define MMEA5_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_LO_MASK 0x00000F00L
+#define MMEA5_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_HI_MASK 0x0000F000L
+#define MMEA5_ADDRDEC1_ADDR_CFG_CS23__NUM_COL_MASK 0x000F0000L
+#define MMEA5_ADDRDEC1_ADDR_CFG_CS23__NUM_BANKS_MASK 0x00300000L
+#define MMEA5_ADDRDEC1_ADDR_CFG_CS23__HI_COL_EN_MASK 0x80000000L
+//MMEA5_ADDRDEC1_ADDR_SEL_CS01
+#define MMEA5_ADDRDEC1_ADDR_SEL_CS01__BANK0__SHIFT 0x0
+#define MMEA5_ADDRDEC1_ADDR_SEL_CS01__BANK1__SHIFT 0x4
+#define MMEA5_ADDRDEC1_ADDR_SEL_CS01__BANK2__SHIFT 0x8
+#define MMEA5_ADDRDEC1_ADDR_SEL_CS01__BANK3__SHIFT 0xc
+#define MMEA5_ADDRDEC1_ADDR_SEL_CS01__BANK4__SHIFT 0x10
+#define MMEA5_ADDRDEC1_ADDR_SEL_CS01__ROW_LO__SHIFT 0x18
+#define MMEA5_ADDRDEC1_ADDR_SEL_CS01__ROW_HI__SHIFT 0x1c
+#define MMEA5_ADDRDEC1_ADDR_SEL_CS01__BANK0_MASK 0x0000000FL
+#define MMEA5_ADDRDEC1_ADDR_SEL_CS01__BANK1_MASK 0x000000F0L
+#define MMEA5_ADDRDEC1_ADDR_SEL_CS01__BANK2_MASK 0x00000F00L
+#define MMEA5_ADDRDEC1_ADDR_SEL_CS01__BANK3_MASK 0x0000F000L
+#define MMEA5_ADDRDEC1_ADDR_SEL_CS01__BANK4_MASK 0x001F0000L
+#define MMEA5_ADDRDEC1_ADDR_SEL_CS01__ROW_LO_MASK 0x0F000000L
+#define MMEA5_ADDRDEC1_ADDR_SEL_CS01__ROW_HI_MASK 0xF0000000L
+//MMEA5_ADDRDEC1_ADDR_SEL_CS23
+#define MMEA5_ADDRDEC1_ADDR_SEL_CS23__BANK0__SHIFT 0x0
+#define MMEA5_ADDRDEC1_ADDR_SEL_CS23__BANK1__SHIFT 0x4
+#define MMEA5_ADDRDEC1_ADDR_SEL_CS23__BANK2__SHIFT 0x8
+#define MMEA5_ADDRDEC1_ADDR_SEL_CS23__BANK3__SHIFT 0xc
+#define MMEA5_ADDRDEC1_ADDR_SEL_CS23__BANK4__SHIFT 0x10
+#define MMEA5_ADDRDEC1_ADDR_SEL_CS23__ROW_LO__SHIFT 0x18
+#define MMEA5_ADDRDEC1_ADDR_SEL_CS23__ROW_HI__SHIFT 0x1c
+#define MMEA5_ADDRDEC1_ADDR_SEL_CS23__BANK0_MASK 0x0000000FL
+#define MMEA5_ADDRDEC1_ADDR_SEL_CS23__BANK1_MASK 0x000000F0L
+#define MMEA5_ADDRDEC1_ADDR_SEL_CS23__BANK2_MASK 0x00000F00L
+#define MMEA5_ADDRDEC1_ADDR_SEL_CS23__BANK3_MASK 0x0000F000L
+#define MMEA5_ADDRDEC1_ADDR_SEL_CS23__BANK4_MASK 0x001F0000L
+#define MMEA5_ADDRDEC1_ADDR_SEL_CS23__ROW_LO_MASK 0x0F000000L
+#define MMEA5_ADDRDEC1_ADDR_SEL_CS23__ROW_HI_MASK 0xF0000000L
+//MMEA5_ADDRDEC1_ADDR_SEL2_CS01
+#define MMEA5_ADDRDEC1_ADDR_SEL2_CS01__BANK5__SHIFT 0x0
+#define MMEA5_ADDRDEC1_ADDR_SEL2_CS01__BANK5_MASK 0x0000001FL
+//MMEA5_ADDRDEC1_ADDR_SEL2_CS23
+#define MMEA5_ADDRDEC1_ADDR_SEL2_CS23__BANK5__SHIFT 0x0
+#define MMEA5_ADDRDEC1_ADDR_SEL2_CS23__BANK5_MASK 0x0000001FL
+//MMEA5_ADDRDEC1_COL_SEL_LO_CS01
+#define MMEA5_ADDRDEC1_COL_SEL_LO_CS01__COL0__SHIFT 0x0
+#define MMEA5_ADDRDEC1_COL_SEL_LO_CS01__COL1__SHIFT 0x4
+#define MMEA5_ADDRDEC1_COL_SEL_LO_CS01__COL2__SHIFT 0x8
+#define MMEA5_ADDRDEC1_COL_SEL_LO_CS01__COL3__SHIFT 0xc
+#define MMEA5_ADDRDEC1_COL_SEL_LO_CS01__COL4__SHIFT 0x10
+#define MMEA5_ADDRDEC1_COL_SEL_LO_CS01__COL5__SHIFT 0x14
+#define MMEA5_ADDRDEC1_COL_SEL_LO_CS01__COL6__SHIFT 0x18
+#define MMEA5_ADDRDEC1_COL_SEL_LO_CS01__COL7__SHIFT 0x1c
+#define MMEA5_ADDRDEC1_COL_SEL_LO_CS01__COL0_MASK 0x0000000FL
+#define MMEA5_ADDRDEC1_COL_SEL_LO_CS01__COL1_MASK 0x000000F0L
+#define MMEA5_ADDRDEC1_COL_SEL_LO_CS01__COL2_MASK 0x00000F00L
+#define MMEA5_ADDRDEC1_COL_SEL_LO_CS01__COL3_MASK 0x0000F000L
+#define MMEA5_ADDRDEC1_COL_SEL_LO_CS01__COL4_MASK 0x000F0000L
+#define MMEA5_ADDRDEC1_COL_SEL_LO_CS01__COL5_MASK 0x00F00000L
+#define MMEA5_ADDRDEC1_COL_SEL_LO_CS01__COL6_MASK 0x0F000000L
+#define MMEA5_ADDRDEC1_COL_SEL_LO_CS01__COL7_MASK 0xF0000000L
+//MMEA5_ADDRDEC1_COL_SEL_LO_CS23
+#define MMEA5_ADDRDEC1_COL_SEL_LO_CS23__COL0__SHIFT 0x0
+#define MMEA5_ADDRDEC1_COL_SEL_LO_CS23__COL1__SHIFT 0x4
+#define MMEA5_ADDRDEC1_COL_SEL_LO_CS23__COL2__SHIFT 0x8
+#define MMEA5_ADDRDEC1_COL_SEL_LO_CS23__COL3__SHIFT 0xc
+#define MMEA5_ADDRDEC1_COL_SEL_LO_CS23__COL4__SHIFT 0x10
+#define MMEA5_ADDRDEC1_COL_SEL_LO_CS23__COL5__SHIFT 0x14
+#define MMEA5_ADDRDEC1_COL_SEL_LO_CS23__COL6__SHIFT 0x18
+#define MMEA5_ADDRDEC1_COL_SEL_LO_CS23__COL7__SHIFT 0x1c
+#define MMEA5_ADDRDEC1_COL_SEL_LO_CS23__COL0_MASK 0x0000000FL
+#define MMEA5_ADDRDEC1_COL_SEL_LO_CS23__COL1_MASK 0x000000F0L
+#define MMEA5_ADDRDEC1_COL_SEL_LO_CS23__COL2_MASK 0x00000F00L
+#define MMEA5_ADDRDEC1_COL_SEL_LO_CS23__COL3_MASK 0x0000F000L
+#define MMEA5_ADDRDEC1_COL_SEL_LO_CS23__COL4_MASK 0x000F0000L
+#define MMEA5_ADDRDEC1_COL_SEL_LO_CS23__COL5_MASK 0x00F00000L
+#define MMEA5_ADDRDEC1_COL_SEL_LO_CS23__COL6_MASK 0x0F000000L
+#define MMEA5_ADDRDEC1_COL_SEL_LO_CS23__COL7_MASK 0xF0000000L
+//MMEA5_ADDRDEC1_COL_SEL_HI_CS01
+#define MMEA5_ADDRDEC1_COL_SEL_HI_CS01__COL8__SHIFT 0x0
+#define MMEA5_ADDRDEC1_COL_SEL_HI_CS01__COL9__SHIFT 0x4
+#define MMEA5_ADDRDEC1_COL_SEL_HI_CS01__COL10__SHIFT 0x8
+#define MMEA5_ADDRDEC1_COL_SEL_HI_CS01__COL11__SHIFT 0xc
+#define MMEA5_ADDRDEC1_COL_SEL_HI_CS01__COL12__SHIFT 0x10
+#define MMEA5_ADDRDEC1_COL_SEL_HI_CS01__COL13__SHIFT 0x14
+#define MMEA5_ADDRDEC1_COL_SEL_HI_CS01__COL14__SHIFT 0x18
+#define MMEA5_ADDRDEC1_COL_SEL_HI_CS01__COL15__SHIFT 0x1c
+#define MMEA5_ADDRDEC1_COL_SEL_HI_CS01__COL8_MASK 0x0000000FL
+#define MMEA5_ADDRDEC1_COL_SEL_HI_CS01__COL9_MASK 0x000000F0L
+#define MMEA5_ADDRDEC1_COL_SEL_HI_CS01__COL10_MASK 0x00000F00L
+#define MMEA5_ADDRDEC1_COL_SEL_HI_CS01__COL11_MASK 0x0000F000L
+#define MMEA5_ADDRDEC1_COL_SEL_HI_CS01__COL12_MASK 0x000F0000L
+#define MMEA5_ADDRDEC1_COL_SEL_HI_CS01__COL13_MASK 0x00F00000L
+#define MMEA5_ADDRDEC1_COL_SEL_HI_CS01__COL14_MASK 0x0F000000L
+#define MMEA5_ADDRDEC1_COL_SEL_HI_CS01__COL15_MASK 0xF0000000L
+//MMEA5_ADDRDEC1_COL_SEL_HI_CS23
+#define MMEA5_ADDRDEC1_COL_SEL_HI_CS23__COL8__SHIFT 0x0
+#define MMEA5_ADDRDEC1_COL_SEL_HI_CS23__COL9__SHIFT 0x4
+#define MMEA5_ADDRDEC1_COL_SEL_HI_CS23__COL10__SHIFT 0x8
+#define MMEA5_ADDRDEC1_COL_SEL_HI_CS23__COL11__SHIFT 0xc
+#define MMEA5_ADDRDEC1_COL_SEL_HI_CS23__COL12__SHIFT 0x10
+#define MMEA5_ADDRDEC1_COL_SEL_HI_CS23__COL13__SHIFT 0x14
+#define MMEA5_ADDRDEC1_COL_SEL_HI_CS23__COL14__SHIFT 0x18
+#define MMEA5_ADDRDEC1_COL_SEL_HI_CS23__COL15__SHIFT 0x1c
+#define MMEA5_ADDRDEC1_COL_SEL_HI_CS23__COL8_MASK 0x0000000FL
+#define MMEA5_ADDRDEC1_COL_SEL_HI_CS23__COL9_MASK 0x000000F0L
+#define MMEA5_ADDRDEC1_COL_SEL_HI_CS23__COL10_MASK 0x00000F00L
+#define MMEA5_ADDRDEC1_COL_SEL_HI_CS23__COL11_MASK 0x0000F000L
+#define MMEA5_ADDRDEC1_COL_SEL_HI_CS23__COL12_MASK 0x000F0000L
+#define MMEA5_ADDRDEC1_COL_SEL_HI_CS23__COL13_MASK 0x00F00000L
+#define MMEA5_ADDRDEC1_COL_SEL_HI_CS23__COL14_MASK 0x0F000000L
+#define MMEA5_ADDRDEC1_COL_SEL_HI_CS23__COL15_MASK 0xF0000000L
+//MMEA5_ADDRDEC1_RM_SEL_CS01
+#define MMEA5_ADDRDEC1_RM_SEL_CS01__RM0__SHIFT 0x0
+#define MMEA5_ADDRDEC1_RM_SEL_CS01__RM1__SHIFT 0x4
+#define MMEA5_ADDRDEC1_RM_SEL_CS01__RM2__SHIFT 0x8
+#define MMEA5_ADDRDEC1_RM_SEL_CS01__CHAN_BIT__SHIFT 0xc
+#define MMEA5_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA5_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA5_ADDRDEC1_RM_SEL_CS01__RM0_MASK 0x0000000FL
+#define MMEA5_ADDRDEC1_RM_SEL_CS01__RM1_MASK 0x000000F0L
+#define MMEA5_ADDRDEC1_RM_SEL_CS01__RM2_MASK 0x00000F00L
+#define MMEA5_ADDRDEC1_RM_SEL_CS01__CHAN_BIT_MASK 0x0000F000L
+#define MMEA5_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA5_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA5_ADDRDEC1_RM_SEL_CS23
+#define MMEA5_ADDRDEC1_RM_SEL_CS23__RM0__SHIFT 0x0
+#define MMEA5_ADDRDEC1_RM_SEL_CS23__RM1__SHIFT 0x4
+#define MMEA5_ADDRDEC1_RM_SEL_CS23__RM2__SHIFT 0x8
+#define MMEA5_ADDRDEC1_RM_SEL_CS23__CHAN_BIT__SHIFT 0xc
+#define MMEA5_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA5_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA5_ADDRDEC1_RM_SEL_CS23__RM0_MASK 0x0000000FL
+#define MMEA5_ADDRDEC1_RM_SEL_CS23__RM1_MASK 0x000000F0L
+#define MMEA5_ADDRDEC1_RM_SEL_CS23__RM2_MASK 0x00000F00L
+#define MMEA5_ADDRDEC1_RM_SEL_CS23__CHAN_BIT_MASK 0x0000F000L
+#define MMEA5_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA5_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA5_ADDRDEC1_RM_SEL_SECCS01
+#define MMEA5_ADDRDEC1_RM_SEL_SECCS01__RM0__SHIFT 0x0
+#define MMEA5_ADDRDEC1_RM_SEL_SECCS01__RM1__SHIFT 0x4
+#define MMEA5_ADDRDEC1_RM_SEL_SECCS01__RM2__SHIFT 0x8
+#define MMEA5_ADDRDEC1_RM_SEL_SECCS01__CHAN_BIT__SHIFT 0xc
+#define MMEA5_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA5_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA5_ADDRDEC1_RM_SEL_SECCS01__RM0_MASK 0x0000000FL
+#define MMEA5_ADDRDEC1_RM_SEL_SECCS01__RM1_MASK 0x000000F0L
+#define MMEA5_ADDRDEC1_RM_SEL_SECCS01__RM2_MASK 0x00000F00L
+#define MMEA5_ADDRDEC1_RM_SEL_SECCS01__CHAN_BIT_MASK 0x0000F000L
+#define MMEA5_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA5_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA5_ADDRDEC1_RM_SEL_SECCS23
+#define MMEA5_ADDRDEC1_RM_SEL_SECCS23__RM0__SHIFT 0x0
+#define MMEA5_ADDRDEC1_RM_SEL_SECCS23__RM1__SHIFT 0x4
+#define MMEA5_ADDRDEC1_RM_SEL_SECCS23__RM2__SHIFT 0x8
+#define MMEA5_ADDRDEC1_RM_SEL_SECCS23__CHAN_BIT__SHIFT 0xc
+#define MMEA5_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA5_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA5_ADDRDEC1_RM_SEL_SECCS23__RM0_MASK 0x0000000FL
+#define MMEA5_ADDRDEC1_RM_SEL_SECCS23__RM1_MASK 0x000000F0L
+#define MMEA5_ADDRDEC1_RM_SEL_SECCS23__RM2_MASK 0x00000F00L
+#define MMEA5_ADDRDEC1_RM_SEL_SECCS23__CHAN_BIT_MASK 0x0000F000L
+#define MMEA5_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA5_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA5_ADDRDEC2_BASE_ADDR_CS0
+#define MMEA5_ADDRDEC2_BASE_ADDR_CS0__CS_EN__SHIFT 0x0
+#define MMEA5_ADDRDEC2_BASE_ADDR_CS0__BASE_ADDR__SHIFT 0x1
+#define MMEA5_ADDRDEC2_BASE_ADDR_CS0__CS_EN_MASK 0x00000001L
+#define MMEA5_ADDRDEC2_BASE_ADDR_CS0__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA5_ADDRDEC2_BASE_ADDR_CS1
+#define MMEA5_ADDRDEC2_BASE_ADDR_CS1__CS_EN__SHIFT 0x0
+#define MMEA5_ADDRDEC2_BASE_ADDR_CS1__BASE_ADDR__SHIFT 0x1
+#define MMEA5_ADDRDEC2_BASE_ADDR_CS1__CS_EN_MASK 0x00000001L
+#define MMEA5_ADDRDEC2_BASE_ADDR_CS1__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA5_ADDRDEC2_BASE_ADDR_CS2
+#define MMEA5_ADDRDEC2_BASE_ADDR_CS2__CS_EN__SHIFT 0x0
+#define MMEA5_ADDRDEC2_BASE_ADDR_CS2__BASE_ADDR__SHIFT 0x1
+#define MMEA5_ADDRDEC2_BASE_ADDR_CS2__CS_EN_MASK 0x00000001L
+#define MMEA5_ADDRDEC2_BASE_ADDR_CS2__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA5_ADDRDEC2_BASE_ADDR_CS3
+#define MMEA5_ADDRDEC2_BASE_ADDR_CS3__CS_EN__SHIFT 0x0
+#define MMEA5_ADDRDEC2_BASE_ADDR_CS3__BASE_ADDR__SHIFT 0x1
+#define MMEA5_ADDRDEC2_BASE_ADDR_CS3__CS_EN_MASK 0x00000001L
+#define MMEA5_ADDRDEC2_BASE_ADDR_CS3__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA5_ADDRDEC2_BASE_ADDR_SECCS0
+#define MMEA5_ADDRDEC2_BASE_ADDR_SECCS0__CS_EN__SHIFT 0x0
+#define MMEA5_ADDRDEC2_BASE_ADDR_SECCS0__BASE_ADDR__SHIFT 0x1
+#define MMEA5_ADDRDEC2_BASE_ADDR_SECCS0__CS_EN_MASK 0x00000001L
+#define MMEA5_ADDRDEC2_BASE_ADDR_SECCS0__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA5_ADDRDEC2_BASE_ADDR_SECCS1
+#define MMEA5_ADDRDEC2_BASE_ADDR_SECCS1__CS_EN__SHIFT 0x0
+#define MMEA5_ADDRDEC2_BASE_ADDR_SECCS1__BASE_ADDR__SHIFT 0x1
+#define MMEA5_ADDRDEC2_BASE_ADDR_SECCS1__CS_EN_MASK 0x00000001L
+#define MMEA5_ADDRDEC2_BASE_ADDR_SECCS1__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA5_ADDRDEC2_BASE_ADDR_SECCS2
+#define MMEA5_ADDRDEC2_BASE_ADDR_SECCS2__CS_EN__SHIFT 0x0
+#define MMEA5_ADDRDEC2_BASE_ADDR_SECCS2__BASE_ADDR__SHIFT 0x1
+#define MMEA5_ADDRDEC2_BASE_ADDR_SECCS2__CS_EN_MASK 0x00000001L
+#define MMEA5_ADDRDEC2_BASE_ADDR_SECCS2__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA5_ADDRDEC2_BASE_ADDR_SECCS3
+#define MMEA5_ADDRDEC2_BASE_ADDR_SECCS3__CS_EN__SHIFT 0x0
+#define MMEA5_ADDRDEC2_BASE_ADDR_SECCS3__BASE_ADDR__SHIFT 0x1
+#define MMEA5_ADDRDEC2_BASE_ADDR_SECCS3__CS_EN_MASK 0x00000001L
+#define MMEA5_ADDRDEC2_BASE_ADDR_SECCS3__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA5_ADDRDEC2_ADDR_MASK_CS01
+#define MMEA5_ADDRDEC2_ADDR_MASK_CS01__ADDR_MASK__SHIFT 0x1
+#define MMEA5_ADDRDEC2_ADDR_MASK_CS01__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA5_ADDRDEC2_ADDR_MASK_CS23
+#define MMEA5_ADDRDEC2_ADDR_MASK_CS23__ADDR_MASK__SHIFT 0x1
+#define MMEA5_ADDRDEC2_ADDR_MASK_CS23__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA5_ADDRDEC2_ADDR_MASK_SECCS01
+#define MMEA5_ADDRDEC2_ADDR_MASK_SECCS01__ADDR_MASK__SHIFT 0x1
+#define MMEA5_ADDRDEC2_ADDR_MASK_SECCS01__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA5_ADDRDEC2_ADDR_MASK_SECCS23
+#define MMEA5_ADDRDEC2_ADDR_MASK_SECCS23__ADDR_MASK__SHIFT 0x1
+#define MMEA5_ADDRDEC2_ADDR_MASK_SECCS23__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA5_ADDRDEC2_ADDR_CFG_CS01
+#define MMEA5_ADDRDEC2_ADDR_CFG_CS01__NUM_BANK_GROUPS__SHIFT 0x1
+#define MMEA5_ADDRDEC2_ADDR_CFG_CS01__NUM_RM__SHIFT 0x4
+#define MMEA5_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_LO__SHIFT 0x8
+#define MMEA5_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_HI__SHIFT 0xc
+#define MMEA5_ADDRDEC2_ADDR_CFG_CS01__NUM_COL__SHIFT 0x10
+#define MMEA5_ADDRDEC2_ADDR_CFG_CS01__NUM_BANKS__SHIFT 0x14
+#define MMEA5_ADDRDEC2_ADDR_CFG_CS01__HI_COL_EN__SHIFT 0x1f
+#define MMEA5_ADDRDEC2_ADDR_CFG_CS01__NUM_BANK_GROUPS_MASK 0x0000000EL
+#define MMEA5_ADDRDEC2_ADDR_CFG_CS01__NUM_RM_MASK 0x00000030L
+#define MMEA5_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_LO_MASK 0x00000F00L
+#define MMEA5_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_HI_MASK 0x0000F000L
+#define MMEA5_ADDRDEC2_ADDR_CFG_CS01__NUM_COL_MASK 0x000F0000L
+#define MMEA5_ADDRDEC2_ADDR_CFG_CS01__NUM_BANKS_MASK 0x00300000L
+#define MMEA5_ADDRDEC2_ADDR_CFG_CS01__HI_COL_EN_MASK 0x80000000L
+//MMEA5_ADDRDEC2_ADDR_CFG_CS23
+#define MMEA5_ADDRDEC2_ADDR_CFG_CS23__NUM_BANK_GROUPS__SHIFT 0x1
+#define MMEA5_ADDRDEC2_ADDR_CFG_CS23__NUM_RM__SHIFT 0x4
+#define MMEA5_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_LO__SHIFT 0x8
+#define MMEA5_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_HI__SHIFT 0xc
+#define MMEA5_ADDRDEC2_ADDR_CFG_CS23__NUM_COL__SHIFT 0x10
+#define MMEA5_ADDRDEC2_ADDR_CFG_CS23__NUM_BANKS__SHIFT 0x14
+#define MMEA5_ADDRDEC2_ADDR_CFG_CS23__HI_COL_EN__SHIFT 0x1f
+#define MMEA5_ADDRDEC2_ADDR_CFG_CS23__NUM_BANK_GROUPS_MASK 0x0000000EL
+#define MMEA5_ADDRDEC2_ADDR_CFG_CS23__NUM_RM_MASK 0x00000030L
+#define MMEA5_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_LO_MASK 0x00000F00L
+#define MMEA5_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_HI_MASK 0x0000F000L
+#define MMEA5_ADDRDEC2_ADDR_CFG_CS23__NUM_COL_MASK 0x000F0000L
+#define MMEA5_ADDRDEC2_ADDR_CFG_CS23__NUM_BANKS_MASK 0x00300000L
+#define MMEA5_ADDRDEC2_ADDR_CFG_CS23__HI_COL_EN_MASK 0x80000000L
+//MMEA5_ADDRDEC2_ADDR_SEL_CS01
+#define MMEA5_ADDRDEC2_ADDR_SEL_CS01__BANK0__SHIFT 0x0
+#define MMEA5_ADDRDEC2_ADDR_SEL_CS01__BANK1__SHIFT 0x4
+#define MMEA5_ADDRDEC2_ADDR_SEL_CS01__BANK2__SHIFT 0x8
+#define MMEA5_ADDRDEC2_ADDR_SEL_CS01__BANK3__SHIFT 0xc
+#define MMEA5_ADDRDEC2_ADDR_SEL_CS01__BANK4__SHIFT 0x10
+#define MMEA5_ADDRDEC2_ADDR_SEL_CS01__ROW_LO__SHIFT 0x18
+#define MMEA5_ADDRDEC2_ADDR_SEL_CS01__ROW_HI__SHIFT 0x1c
+#define MMEA5_ADDRDEC2_ADDR_SEL_CS01__BANK0_MASK 0x0000000FL
+#define MMEA5_ADDRDEC2_ADDR_SEL_CS01__BANK1_MASK 0x000000F0L
+#define MMEA5_ADDRDEC2_ADDR_SEL_CS01__BANK2_MASK 0x00000F00L
+#define MMEA5_ADDRDEC2_ADDR_SEL_CS01__BANK3_MASK 0x0000F000L
+#define MMEA5_ADDRDEC2_ADDR_SEL_CS01__BANK4_MASK 0x001F0000L
+#define MMEA5_ADDRDEC2_ADDR_SEL_CS01__ROW_LO_MASK 0x0F000000L
+#define MMEA5_ADDRDEC2_ADDR_SEL_CS01__ROW_HI_MASK 0xF0000000L
+//MMEA5_ADDRDEC2_ADDR_SEL_CS23
+#define MMEA5_ADDRDEC2_ADDR_SEL_CS23__BANK0__SHIFT 0x0
+#define MMEA5_ADDRDEC2_ADDR_SEL_CS23__BANK1__SHIFT 0x4
+#define MMEA5_ADDRDEC2_ADDR_SEL_CS23__BANK2__SHIFT 0x8
+#define MMEA5_ADDRDEC2_ADDR_SEL_CS23__BANK3__SHIFT 0xc
+#define MMEA5_ADDRDEC2_ADDR_SEL_CS23__BANK4__SHIFT 0x10
+#define MMEA5_ADDRDEC2_ADDR_SEL_CS23__ROW_LO__SHIFT 0x18
+#define MMEA5_ADDRDEC2_ADDR_SEL_CS23__ROW_HI__SHIFT 0x1c
+#define MMEA5_ADDRDEC2_ADDR_SEL_CS23__BANK0_MASK 0x0000000FL
+#define MMEA5_ADDRDEC2_ADDR_SEL_CS23__BANK1_MASK 0x000000F0L
+#define MMEA5_ADDRDEC2_ADDR_SEL_CS23__BANK2_MASK 0x00000F00L
+#define MMEA5_ADDRDEC2_ADDR_SEL_CS23__BANK3_MASK 0x0000F000L
+#define MMEA5_ADDRDEC2_ADDR_SEL_CS23__BANK4_MASK 0x001F0000L
+#define MMEA5_ADDRDEC2_ADDR_SEL_CS23__ROW_LO_MASK 0x0F000000L
+#define MMEA5_ADDRDEC2_ADDR_SEL_CS23__ROW_HI_MASK 0xF0000000L
+//MMEA5_ADDRDEC2_ADDR_SEL2_CS01
+#define MMEA5_ADDRDEC2_ADDR_SEL2_CS01__BANK5__SHIFT 0x0
+#define MMEA5_ADDRDEC2_ADDR_SEL2_CS01__BANK5_MASK 0x0000001FL
+//MMEA5_ADDRDEC2_ADDR_SEL2_CS23
+#define MMEA5_ADDRDEC2_ADDR_SEL2_CS23__BANK5__SHIFT 0x0
+#define MMEA5_ADDRDEC2_ADDR_SEL2_CS23__BANK5_MASK 0x0000001FL
+//MMEA5_ADDRDEC2_COL_SEL_LO_CS01
+#define MMEA5_ADDRDEC2_COL_SEL_LO_CS01__COL0__SHIFT 0x0
+#define MMEA5_ADDRDEC2_COL_SEL_LO_CS01__COL1__SHIFT 0x4
+#define MMEA5_ADDRDEC2_COL_SEL_LO_CS01__COL2__SHIFT 0x8
+#define MMEA5_ADDRDEC2_COL_SEL_LO_CS01__COL3__SHIFT 0xc
+#define MMEA5_ADDRDEC2_COL_SEL_LO_CS01__COL4__SHIFT 0x10
+#define MMEA5_ADDRDEC2_COL_SEL_LO_CS01__COL5__SHIFT 0x14
+#define MMEA5_ADDRDEC2_COL_SEL_LO_CS01__COL6__SHIFT 0x18
+#define MMEA5_ADDRDEC2_COL_SEL_LO_CS01__COL7__SHIFT 0x1c
+#define MMEA5_ADDRDEC2_COL_SEL_LO_CS01__COL0_MASK 0x0000000FL
+#define MMEA5_ADDRDEC2_COL_SEL_LO_CS01__COL1_MASK 0x000000F0L
+#define MMEA5_ADDRDEC2_COL_SEL_LO_CS01__COL2_MASK 0x00000F00L
+#define MMEA5_ADDRDEC2_COL_SEL_LO_CS01__COL3_MASK 0x0000F000L
+#define MMEA5_ADDRDEC2_COL_SEL_LO_CS01__COL4_MASK 0x000F0000L
+#define MMEA5_ADDRDEC2_COL_SEL_LO_CS01__COL5_MASK 0x00F00000L
+#define MMEA5_ADDRDEC2_COL_SEL_LO_CS01__COL6_MASK 0x0F000000L
+#define MMEA5_ADDRDEC2_COL_SEL_LO_CS01__COL7_MASK 0xF0000000L
+//MMEA5_ADDRDEC2_COL_SEL_LO_CS23
+#define MMEA5_ADDRDEC2_COL_SEL_LO_CS23__COL0__SHIFT 0x0
+#define MMEA5_ADDRDEC2_COL_SEL_LO_CS23__COL1__SHIFT 0x4
+#define MMEA5_ADDRDEC2_COL_SEL_LO_CS23__COL2__SHIFT 0x8
+#define MMEA5_ADDRDEC2_COL_SEL_LO_CS23__COL3__SHIFT 0xc
+#define MMEA5_ADDRDEC2_COL_SEL_LO_CS23__COL4__SHIFT 0x10
+#define MMEA5_ADDRDEC2_COL_SEL_LO_CS23__COL5__SHIFT 0x14
+#define MMEA5_ADDRDEC2_COL_SEL_LO_CS23__COL6__SHIFT 0x18
+#define MMEA5_ADDRDEC2_COL_SEL_LO_CS23__COL7__SHIFT 0x1c
+#define MMEA5_ADDRDEC2_COL_SEL_LO_CS23__COL0_MASK 0x0000000FL
+#define MMEA5_ADDRDEC2_COL_SEL_LO_CS23__COL1_MASK 0x000000F0L
+#define MMEA5_ADDRDEC2_COL_SEL_LO_CS23__COL2_MASK 0x00000F00L
+#define MMEA5_ADDRDEC2_COL_SEL_LO_CS23__COL3_MASK 0x0000F000L
+#define MMEA5_ADDRDEC2_COL_SEL_LO_CS23__COL4_MASK 0x000F0000L
+#define MMEA5_ADDRDEC2_COL_SEL_LO_CS23__COL5_MASK 0x00F00000L
+#define MMEA5_ADDRDEC2_COL_SEL_LO_CS23__COL6_MASK 0x0F000000L
+#define MMEA5_ADDRDEC2_COL_SEL_LO_CS23__COL7_MASK 0xF0000000L
+//MMEA5_ADDRDEC2_COL_SEL_HI_CS01
+#define MMEA5_ADDRDEC2_COL_SEL_HI_CS01__COL8__SHIFT 0x0
+#define MMEA5_ADDRDEC2_COL_SEL_HI_CS01__COL9__SHIFT 0x4
+#define MMEA5_ADDRDEC2_COL_SEL_HI_CS01__COL10__SHIFT 0x8
+#define MMEA5_ADDRDEC2_COL_SEL_HI_CS01__COL11__SHIFT 0xc
+#define MMEA5_ADDRDEC2_COL_SEL_HI_CS01__COL12__SHIFT 0x10
+#define MMEA5_ADDRDEC2_COL_SEL_HI_CS01__COL13__SHIFT 0x14
+#define MMEA5_ADDRDEC2_COL_SEL_HI_CS01__COL14__SHIFT 0x18
+#define MMEA5_ADDRDEC2_COL_SEL_HI_CS01__COL15__SHIFT 0x1c
+#define MMEA5_ADDRDEC2_COL_SEL_HI_CS01__COL8_MASK 0x0000000FL
+#define MMEA5_ADDRDEC2_COL_SEL_HI_CS01__COL9_MASK 0x000000F0L
+#define MMEA5_ADDRDEC2_COL_SEL_HI_CS01__COL10_MASK 0x00000F00L
+#define MMEA5_ADDRDEC2_COL_SEL_HI_CS01__COL11_MASK 0x0000F000L
+#define MMEA5_ADDRDEC2_COL_SEL_HI_CS01__COL12_MASK 0x000F0000L
+#define MMEA5_ADDRDEC2_COL_SEL_HI_CS01__COL13_MASK 0x00F00000L
+#define MMEA5_ADDRDEC2_COL_SEL_HI_CS01__COL14_MASK 0x0F000000L
+#define MMEA5_ADDRDEC2_COL_SEL_HI_CS01__COL15_MASK 0xF0000000L
+//MMEA5_ADDRDEC2_COL_SEL_HI_CS23
+#define MMEA5_ADDRDEC2_COL_SEL_HI_CS23__COL8__SHIFT 0x0
+#define MMEA5_ADDRDEC2_COL_SEL_HI_CS23__COL9__SHIFT 0x4
+#define MMEA5_ADDRDEC2_COL_SEL_HI_CS23__COL10__SHIFT 0x8
+#define MMEA5_ADDRDEC2_COL_SEL_HI_CS23__COL11__SHIFT 0xc
+#define MMEA5_ADDRDEC2_COL_SEL_HI_CS23__COL12__SHIFT 0x10
+#define MMEA5_ADDRDEC2_COL_SEL_HI_CS23__COL13__SHIFT 0x14
+#define MMEA5_ADDRDEC2_COL_SEL_HI_CS23__COL14__SHIFT 0x18
+#define MMEA5_ADDRDEC2_COL_SEL_HI_CS23__COL15__SHIFT 0x1c
+#define MMEA5_ADDRDEC2_COL_SEL_HI_CS23__COL8_MASK 0x0000000FL
+#define MMEA5_ADDRDEC2_COL_SEL_HI_CS23__COL9_MASK 0x000000F0L
+#define MMEA5_ADDRDEC2_COL_SEL_HI_CS23__COL10_MASK 0x00000F00L
+#define MMEA5_ADDRDEC2_COL_SEL_HI_CS23__COL11_MASK 0x0000F000L
+#define MMEA5_ADDRDEC2_COL_SEL_HI_CS23__COL12_MASK 0x000F0000L
+#define MMEA5_ADDRDEC2_COL_SEL_HI_CS23__COL13_MASK 0x00F00000L
+#define MMEA5_ADDRDEC2_COL_SEL_HI_CS23__COL14_MASK 0x0F000000L
+#define MMEA5_ADDRDEC2_COL_SEL_HI_CS23__COL15_MASK 0xF0000000L
+//MMEA5_ADDRDEC2_RM_SEL_CS01
+#define MMEA5_ADDRDEC2_RM_SEL_CS01__RM0__SHIFT 0x0
+#define MMEA5_ADDRDEC2_RM_SEL_CS01__RM1__SHIFT 0x4
+#define MMEA5_ADDRDEC2_RM_SEL_CS01__RM2__SHIFT 0x8
+#define MMEA5_ADDRDEC2_RM_SEL_CS01__CHAN_BIT__SHIFT 0xc
+#define MMEA5_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA5_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA5_ADDRDEC2_RM_SEL_CS01__RM0_MASK 0x0000000FL
+#define MMEA5_ADDRDEC2_RM_SEL_CS01__RM1_MASK 0x000000F0L
+#define MMEA5_ADDRDEC2_RM_SEL_CS01__RM2_MASK 0x00000F00L
+#define MMEA5_ADDRDEC2_RM_SEL_CS01__CHAN_BIT_MASK 0x0000F000L
+#define MMEA5_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA5_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA5_ADDRDEC2_RM_SEL_CS23
+#define MMEA5_ADDRDEC2_RM_SEL_CS23__RM0__SHIFT 0x0
+#define MMEA5_ADDRDEC2_RM_SEL_CS23__RM1__SHIFT 0x4
+#define MMEA5_ADDRDEC2_RM_SEL_CS23__RM2__SHIFT 0x8
+#define MMEA5_ADDRDEC2_RM_SEL_CS23__CHAN_BIT__SHIFT 0xc
+#define MMEA5_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA5_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA5_ADDRDEC2_RM_SEL_CS23__RM0_MASK 0x0000000FL
+#define MMEA5_ADDRDEC2_RM_SEL_CS23__RM1_MASK 0x000000F0L
+#define MMEA5_ADDRDEC2_RM_SEL_CS23__RM2_MASK 0x00000F00L
+#define MMEA5_ADDRDEC2_RM_SEL_CS23__CHAN_BIT_MASK 0x0000F000L
+#define MMEA5_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA5_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA5_ADDRDEC2_RM_SEL_SECCS01
+#define MMEA5_ADDRDEC2_RM_SEL_SECCS01__RM0__SHIFT 0x0
+#define MMEA5_ADDRDEC2_RM_SEL_SECCS01__RM1__SHIFT 0x4
+#define MMEA5_ADDRDEC2_RM_SEL_SECCS01__RM2__SHIFT 0x8
+#define MMEA5_ADDRDEC2_RM_SEL_SECCS01__CHAN_BIT__SHIFT 0xc
+#define MMEA5_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA5_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA5_ADDRDEC2_RM_SEL_SECCS01__RM0_MASK 0x0000000FL
+#define MMEA5_ADDRDEC2_RM_SEL_SECCS01__RM1_MASK 0x000000F0L
+#define MMEA5_ADDRDEC2_RM_SEL_SECCS01__RM2_MASK 0x00000F00L
+#define MMEA5_ADDRDEC2_RM_SEL_SECCS01__CHAN_BIT_MASK 0x0000F000L
+#define MMEA5_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA5_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA5_ADDRDEC2_RM_SEL_SECCS23
+#define MMEA5_ADDRDEC2_RM_SEL_SECCS23__RM0__SHIFT 0x0
+#define MMEA5_ADDRDEC2_RM_SEL_SECCS23__RM1__SHIFT 0x4
+#define MMEA5_ADDRDEC2_RM_SEL_SECCS23__RM2__SHIFT 0x8
+#define MMEA5_ADDRDEC2_RM_SEL_SECCS23__CHAN_BIT__SHIFT 0xc
+#define MMEA5_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA5_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA5_ADDRDEC2_RM_SEL_SECCS23__RM0_MASK 0x0000000FL
+#define MMEA5_ADDRDEC2_RM_SEL_SECCS23__RM1_MASK 0x000000F0L
+#define MMEA5_ADDRDEC2_RM_SEL_SECCS23__RM2_MASK 0x00000F00L
+#define MMEA5_ADDRDEC2_RM_SEL_SECCS23__CHAN_BIT_MASK 0x0000F000L
+#define MMEA5_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA5_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA5_ADDRNORMDRAM_GLOBAL_CNTL
+#define MMEA5_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K__SHIFT 0x14
+#define MMEA5_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M__SHIFT 0x15
+#define MMEA5_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G__SHIFT 0x16
+#define MMEA5_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K_MASK 0x00100000L
+#define MMEA5_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M_MASK 0x00200000L
+#define MMEA5_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G_MASK 0x00400000L
+//MMEA5_ADDRNORMGMI_GLOBAL_CNTL
+#define MMEA5_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K__SHIFT 0x14
+#define MMEA5_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M__SHIFT 0x15
+#define MMEA5_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G__SHIFT 0x16
+#define MMEA5_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K_MASK 0x00100000L
+#define MMEA5_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M_MASK 0x00200000L
+#define MMEA5_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G_MASK 0x00400000L
+//MMEA5_IO_RD_CLI2GRP_MAP0
+#define MMEA5_IO_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA5_IO_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA5_IO_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA5_IO_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA5_IO_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA5_IO_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA5_IO_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA5_IO_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA5_IO_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA5_IO_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA5_IO_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA5_IO_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA5_IO_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA5_IO_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA5_IO_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA5_IO_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA5_IO_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA5_IO_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA5_IO_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA5_IO_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA5_IO_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA5_IO_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA5_IO_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA5_IO_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA5_IO_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA5_IO_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA5_IO_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA5_IO_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA5_IO_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA5_IO_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA5_IO_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA5_IO_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA5_IO_RD_CLI2GRP_MAP1
+#define MMEA5_IO_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA5_IO_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA5_IO_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA5_IO_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA5_IO_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA5_IO_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA5_IO_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA5_IO_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA5_IO_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA5_IO_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA5_IO_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA5_IO_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA5_IO_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA5_IO_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA5_IO_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA5_IO_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA5_IO_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA5_IO_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA5_IO_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA5_IO_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA5_IO_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA5_IO_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA5_IO_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA5_IO_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA5_IO_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA5_IO_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA5_IO_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA5_IO_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA5_IO_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA5_IO_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA5_IO_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA5_IO_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA5_IO_WR_CLI2GRP_MAP0
+#define MMEA5_IO_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA5_IO_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA5_IO_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA5_IO_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA5_IO_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA5_IO_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA5_IO_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA5_IO_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA5_IO_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA5_IO_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA5_IO_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA5_IO_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA5_IO_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA5_IO_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA5_IO_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA5_IO_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA5_IO_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA5_IO_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA5_IO_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA5_IO_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA5_IO_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA5_IO_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA5_IO_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA5_IO_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA5_IO_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA5_IO_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA5_IO_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA5_IO_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA5_IO_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA5_IO_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA5_IO_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA5_IO_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA5_IO_WR_CLI2GRP_MAP1
+#define MMEA5_IO_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA5_IO_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA5_IO_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA5_IO_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA5_IO_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA5_IO_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA5_IO_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA5_IO_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA5_IO_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA5_IO_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA5_IO_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA5_IO_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA5_IO_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA5_IO_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA5_IO_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA5_IO_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA5_IO_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA5_IO_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA5_IO_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA5_IO_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA5_IO_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA5_IO_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA5_IO_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA5_IO_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA5_IO_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA5_IO_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA5_IO_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA5_IO_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA5_IO_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA5_IO_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA5_IO_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA5_IO_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA5_IO_RD_COMBINE_FLUSH
+#define MMEA5_IO_RD_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0
+#define MMEA5_IO_RD_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4
+#define MMEA5_IO_RD_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8
+#define MMEA5_IO_RD_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc
+#define MMEA5_IO_RD_COMBINE_FLUSH__FORWARD_COMB_ONLY__SHIFT 0x10
+#define MMEA5_IO_RD_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL
+#define MMEA5_IO_RD_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L
+#define MMEA5_IO_RD_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L
+#define MMEA5_IO_RD_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L
+#define MMEA5_IO_RD_COMBINE_FLUSH__FORWARD_COMB_ONLY_MASK 0x00010000L
+//MMEA5_IO_WR_COMBINE_FLUSH
+#define MMEA5_IO_WR_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0
+#define MMEA5_IO_WR_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4
+#define MMEA5_IO_WR_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8
+#define MMEA5_IO_WR_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc
+#define MMEA5_IO_WR_COMBINE_FLUSH__FORWARD_COMB_ONLY__SHIFT 0x10
+#define MMEA5_IO_WR_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL
+#define MMEA5_IO_WR_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L
+#define MMEA5_IO_WR_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L
+#define MMEA5_IO_WR_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L
+#define MMEA5_IO_WR_COMBINE_FLUSH__FORWARD_COMB_ONLY_MASK 0x00010000L
+//MMEA5_IO_GROUP_BURST
+#define MMEA5_IO_GROUP_BURST__RD_LIMIT_LO__SHIFT 0x0
+#define MMEA5_IO_GROUP_BURST__RD_LIMIT_HI__SHIFT 0x8
+#define MMEA5_IO_GROUP_BURST__WR_LIMIT_LO__SHIFT 0x10
+#define MMEA5_IO_GROUP_BURST__WR_LIMIT_HI__SHIFT 0x18
+#define MMEA5_IO_GROUP_BURST__RD_LIMIT_LO_MASK 0x000000FFL
+#define MMEA5_IO_GROUP_BURST__RD_LIMIT_HI_MASK 0x0000FF00L
+#define MMEA5_IO_GROUP_BURST__WR_LIMIT_LO_MASK 0x00FF0000L
+#define MMEA5_IO_GROUP_BURST__WR_LIMIT_HI_MASK 0xFF000000L
+//MMEA5_IO_RD_PRI_AGE
+#define MMEA5_IO_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA5_IO_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA5_IO_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA5_IO_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA5_IO_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA5_IO_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA5_IO_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA5_IO_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA5_IO_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA5_IO_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA5_IO_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA5_IO_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA5_IO_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA5_IO_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA5_IO_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA5_IO_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA5_IO_WR_PRI_AGE
+#define MMEA5_IO_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA5_IO_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA5_IO_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA5_IO_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA5_IO_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA5_IO_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA5_IO_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA5_IO_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA5_IO_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA5_IO_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA5_IO_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA5_IO_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA5_IO_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA5_IO_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA5_IO_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA5_IO_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA5_IO_RD_PRI_QUEUING
+#define MMEA5_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA5_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA5_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA5_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA5_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA5_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA5_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA5_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA5_IO_WR_PRI_QUEUING
+#define MMEA5_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA5_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA5_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA5_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA5_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA5_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA5_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA5_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA5_IO_RD_PRI_FIXED
+#define MMEA5_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA5_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA5_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA5_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA5_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA5_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA5_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA5_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA5_IO_WR_PRI_FIXED
+#define MMEA5_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA5_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA5_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA5_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA5_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA5_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA5_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA5_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA5_IO_RD_PRI_URGENCY
+#define MMEA5_IO_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA5_IO_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA5_IO_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA5_IO_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA5_IO_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA5_IO_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA5_IO_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA5_IO_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA5_IO_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA5_IO_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA5_IO_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA5_IO_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA5_IO_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA5_IO_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA5_IO_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA5_IO_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA5_IO_WR_PRI_URGENCY
+#define MMEA5_IO_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA5_IO_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA5_IO_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA5_IO_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA5_IO_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA5_IO_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA5_IO_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA5_IO_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA5_IO_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA5_IO_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA5_IO_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA5_IO_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA5_IO_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA5_IO_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA5_IO_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA5_IO_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA5_IO_RD_PRI_URGENCY_MASKING
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L
+#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L
+//MMEA5_IO_WR_PRI_URGENCY_MASKING
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L
+#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L
+//MMEA5_IO_RD_PRI_QUANT_PRI1
+#define MMEA5_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA5_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA5_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA5_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA5_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA5_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA5_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA5_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA5_IO_RD_PRI_QUANT_PRI2
+#define MMEA5_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA5_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA5_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA5_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA5_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA5_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA5_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA5_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA5_IO_RD_PRI_QUANT_PRI3
+#define MMEA5_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA5_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA5_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA5_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA5_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA5_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA5_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA5_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA5_IO_WR_PRI_QUANT_PRI1
+#define MMEA5_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA5_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA5_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA5_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA5_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA5_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA5_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA5_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA5_IO_WR_PRI_QUANT_PRI2
+#define MMEA5_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA5_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA5_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA5_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA5_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA5_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA5_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA5_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA5_IO_WR_PRI_QUANT_PRI3
+#define MMEA5_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA5_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA5_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA5_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA5_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA5_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA5_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA5_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA5_SDP_ARB_DRAM
+#define MMEA5_SDP_ARB_DRAM__RDWR_BURST_LIMIT_CYCL__SHIFT 0x0
+#define MMEA5_SDP_ARB_DRAM__RDWR_BURST_LIMIT_DATA__SHIFT 0x8
+#define MMEA5_SDP_ARB_DRAM__EARLY_SW2RD_ON_PRI__SHIFT 0x10
+#define MMEA5_SDP_ARB_DRAM__EARLY_SW2WR_ON_PRI__SHIFT 0x11
+#define MMEA5_SDP_ARB_DRAM__EARLY_SW2RD_ON_RES__SHIFT 0x12
+#define MMEA5_SDP_ARB_DRAM__EARLY_SW2WR_ON_RES__SHIFT 0x13
+#define MMEA5_SDP_ARB_DRAM__EOB_ON_EXPIRE__SHIFT 0x14
+#define MMEA5_SDP_ARB_DRAM__DECOUPLE_RDWR_BNKSTATE__SHIFT 0x15
+#define MMEA5_SDP_ARB_DRAM__RDWR_BURST_LIMIT_CYCL_MASK 0x0000007FL
+#define MMEA5_SDP_ARB_DRAM__RDWR_BURST_LIMIT_DATA_MASK 0x00007F00L
+#define MMEA5_SDP_ARB_DRAM__EARLY_SW2RD_ON_PRI_MASK 0x00010000L
+#define MMEA5_SDP_ARB_DRAM__EARLY_SW2WR_ON_PRI_MASK 0x00020000L
+#define MMEA5_SDP_ARB_DRAM__EARLY_SW2RD_ON_RES_MASK 0x00040000L
+#define MMEA5_SDP_ARB_DRAM__EARLY_SW2WR_ON_RES_MASK 0x00080000L
+#define MMEA5_SDP_ARB_DRAM__EOB_ON_EXPIRE_MASK 0x00100000L
+#define MMEA5_SDP_ARB_DRAM__DECOUPLE_RDWR_BNKSTATE_MASK 0x00200000L
+//MMEA5_SDP_ARB_GMI
+#define MMEA5_SDP_ARB_GMI__RDWR_BURST_LIMIT_CYCL__SHIFT 0x0
+#define MMEA5_SDP_ARB_GMI__RDWR_BURST_LIMIT_DATA__SHIFT 0x8
+#define MMEA5_SDP_ARB_GMI__EARLY_SW2RD_ON_PRI__SHIFT 0x10
+#define MMEA5_SDP_ARB_GMI__EARLY_SW2WR_ON_PRI__SHIFT 0x11
+#define MMEA5_SDP_ARB_GMI__EARLY_SW2RD_ON_RES__SHIFT 0x12
+#define MMEA5_SDP_ARB_GMI__EARLY_SW2WR_ON_RES__SHIFT 0x13
+#define MMEA5_SDP_ARB_GMI__EOB_ON_EXPIRE__SHIFT 0x14
+#define MMEA5_SDP_ARB_GMI__DECOUPLE_RDWR_BNKSTATE__SHIFT 0x15
+#define MMEA5_SDP_ARB_GMI__ALLOW_CHAIN_BREAKING__SHIFT 0x16
+#define MMEA5_SDP_ARB_GMI__RDWR_BURST_LIMIT_CYCL_MASK 0x0000007FL
+#define MMEA5_SDP_ARB_GMI__RDWR_BURST_LIMIT_DATA_MASK 0x00007F00L
+#define MMEA5_SDP_ARB_GMI__EARLY_SW2RD_ON_PRI_MASK 0x00010000L
+#define MMEA5_SDP_ARB_GMI__EARLY_SW2WR_ON_PRI_MASK 0x00020000L
+#define MMEA5_SDP_ARB_GMI__EARLY_SW2RD_ON_RES_MASK 0x00040000L
+#define MMEA5_SDP_ARB_GMI__EARLY_SW2WR_ON_RES_MASK 0x00080000L
+#define MMEA5_SDP_ARB_GMI__EOB_ON_EXPIRE_MASK 0x00100000L
+#define MMEA5_SDP_ARB_GMI__DECOUPLE_RDWR_BNKSTATE_MASK 0x00200000L
+#define MMEA5_SDP_ARB_GMI__ALLOW_CHAIN_BREAKING_MASK 0x00400000L
+//MMEA5_SDP_ARB_FINAL
+#define MMEA5_SDP_ARB_FINAL__DRAM_BURST_LIMIT__SHIFT 0x0
+#define MMEA5_SDP_ARB_FINAL__GMI_BURST_LIMIT__SHIFT 0x5
+#define MMEA5_SDP_ARB_FINAL__IO_BURST_LIMIT__SHIFT 0xa
+#define MMEA5_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER__SHIFT 0xf
+#define MMEA5_SDP_ARB_FINAL__RDONLY_VC0__SHIFT 0x11
+#define MMEA5_SDP_ARB_FINAL__RDONLY_VC1__SHIFT 0x12
+#define MMEA5_SDP_ARB_FINAL__RDONLY_VC2__SHIFT 0x13
+#define MMEA5_SDP_ARB_FINAL__RDONLY_VC3__SHIFT 0x14
+#define MMEA5_SDP_ARB_FINAL__RDONLY_VC4__SHIFT 0x15
+#define MMEA5_SDP_ARB_FINAL__RDONLY_VC5__SHIFT 0x16
+#define MMEA5_SDP_ARB_FINAL__RDONLY_VC6__SHIFT 0x17
+#define MMEA5_SDP_ARB_FINAL__RDONLY_VC7__SHIFT 0x18
+#define MMEA5_SDP_ARB_FINAL__ERREVENT_ON_ERROR__SHIFT 0x19
+#define MMEA5_SDP_ARB_FINAL__HALTREQ_ON_ERROR__SHIFT 0x1a
+#define MMEA5_SDP_ARB_FINAL__GMI_BURST_STRETCH__SHIFT 0x1b
+#define MMEA5_SDP_ARB_FINAL__DRAM_BURST_LIMIT_MASK 0x0000001FL
+#define MMEA5_SDP_ARB_FINAL__GMI_BURST_LIMIT_MASK 0x000003E0L
+#define MMEA5_SDP_ARB_FINAL__IO_BURST_LIMIT_MASK 0x00007C00L
+#define MMEA5_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER_MASK 0x00018000L
+#define MMEA5_SDP_ARB_FINAL__RDONLY_VC0_MASK 0x00020000L
+#define MMEA5_SDP_ARB_FINAL__RDONLY_VC1_MASK 0x00040000L
+#define MMEA5_SDP_ARB_FINAL__RDONLY_VC2_MASK 0x00080000L
+#define MMEA5_SDP_ARB_FINAL__RDONLY_VC3_MASK 0x00100000L
+#define MMEA5_SDP_ARB_FINAL__RDONLY_VC4_MASK 0x00200000L
+#define MMEA5_SDP_ARB_FINAL__RDONLY_VC5_MASK 0x00400000L
+#define MMEA5_SDP_ARB_FINAL__RDONLY_VC6_MASK 0x00800000L
+#define MMEA5_SDP_ARB_FINAL__RDONLY_VC7_MASK 0x01000000L
+#define MMEA5_SDP_ARB_FINAL__ERREVENT_ON_ERROR_MASK 0x02000000L
+#define MMEA5_SDP_ARB_FINAL__HALTREQ_ON_ERROR_MASK 0x04000000L
+#define MMEA5_SDP_ARB_FINAL__GMI_BURST_STRETCH_MASK 0x08000000L
+//MMEA5_SDP_DRAM_PRIORITY
+#define MMEA5_SDP_DRAM_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0
+#define MMEA5_SDP_DRAM_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4
+#define MMEA5_SDP_DRAM_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8
+#define MMEA5_SDP_DRAM_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc
+#define MMEA5_SDP_DRAM_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10
+#define MMEA5_SDP_DRAM_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14
+#define MMEA5_SDP_DRAM_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18
+#define MMEA5_SDP_DRAM_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c
+#define MMEA5_SDP_DRAM_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL
+#define MMEA5_SDP_DRAM_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L
+#define MMEA5_SDP_DRAM_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L
+#define MMEA5_SDP_DRAM_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L
+#define MMEA5_SDP_DRAM_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L
+#define MMEA5_SDP_DRAM_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L
+#define MMEA5_SDP_DRAM_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L
+#define MMEA5_SDP_DRAM_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L
+//MMEA5_SDP_GMI_PRIORITY
+#define MMEA5_SDP_GMI_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0
+#define MMEA5_SDP_GMI_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4
+#define MMEA5_SDP_GMI_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8
+#define MMEA5_SDP_GMI_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc
+#define MMEA5_SDP_GMI_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10
+#define MMEA5_SDP_GMI_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14
+#define MMEA5_SDP_GMI_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18
+#define MMEA5_SDP_GMI_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c
+#define MMEA5_SDP_GMI_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL
+#define MMEA5_SDP_GMI_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L
+#define MMEA5_SDP_GMI_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L
+#define MMEA5_SDP_GMI_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L
+#define MMEA5_SDP_GMI_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L
+#define MMEA5_SDP_GMI_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L
+#define MMEA5_SDP_GMI_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L
+#define MMEA5_SDP_GMI_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L
+//MMEA5_SDP_IO_PRIORITY
+#define MMEA5_SDP_IO_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0
+#define MMEA5_SDP_IO_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4
+#define MMEA5_SDP_IO_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8
+#define MMEA5_SDP_IO_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc
+#define MMEA5_SDP_IO_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10
+#define MMEA5_SDP_IO_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14
+#define MMEA5_SDP_IO_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18
+#define MMEA5_SDP_IO_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c
+#define MMEA5_SDP_IO_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL
+#define MMEA5_SDP_IO_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L
+#define MMEA5_SDP_IO_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L
+#define MMEA5_SDP_IO_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L
+#define MMEA5_SDP_IO_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L
+#define MMEA5_SDP_IO_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L
+#define MMEA5_SDP_IO_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L
+#define MMEA5_SDP_IO_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L
+//MMEA5_SDP_CREDITS
+#define MMEA5_SDP_CREDITS__TAG_LIMIT__SHIFT 0x0
+#define MMEA5_SDP_CREDITS__WR_RESP_CREDITS__SHIFT 0x8
+#define MMEA5_SDP_CREDITS__RD_RESP_CREDITS__SHIFT 0x10
+#define MMEA5_SDP_CREDITS__TAG_LIMIT_MASK 0x000000FFL
+#define MMEA5_SDP_CREDITS__WR_RESP_CREDITS_MASK 0x00007F00L
+#define MMEA5_SDP_CREDITS__RD_RESP_CREDITS_MASK 0x007F0000L
+//MMEA5_SDP_TAG_RESERVE0
+#define MMEA5_SDP_TAG_RESERVE0__VC0__SHIFT 0x0
+#define MMEA5_SDP_TAG_RESERVE0__VC1__SHIFT 0x8
+#define MMEA5_SDP_TAG_RESERVE0__VC2__SHIFT 0x10
+#define MMEA5_SDP_TAG_RESERVE0__VC3__SHIFT 0x18
+#define MMEA5_SDP_TAG_RESERVE0__VC0_MASK 0x000000FFL
+#define MMEA5_SDP_TAG_RESERVE0__VC1_MASK 0x0000FF00L
+#define MMEA5_SDP_TAG_RESERVE0__VC2_MASK 0x00FF0000L
+#define MMEA5_SDP_TAG_RESERVE0__VC3_MASK 0xFF000000L
+//MMEA5_SDP_TAG_RESERVE1
+#define MMEA5_SDP_TAG_RESERVE1__VC4__SHIFT 0x0
+#define MMEA5_SDP_TAG_RESERVE1__VC5__SHIFT 0x8
+#define MMEA5_SDP_TAG_RESERVE1__VC6__SHIFT 0x10
+#define MMEA5_SDP_TAG_RESERVE1__VC7__SHIFT 0x18
+#define MMEA5_SDP_TAG_RESERVE1__VC4_MASK 0x000000FFL
+#define MMEA5_SDP_TAG_RESERVE1__VC5_MASK 0x0000FF00L
+#define MMEA5_SDP_TAG_RESERVE1__VC6_MASK 0x00FF0000L
+#define MMEA5_SDP_TAG_RESERVE1__VC7_MASK 0xFF000000L
+//MMEA5_SDP_VCC_RESERVE0
+#define MMEA5_SDP_VCC_RESERVE0__VC0_CREDITS__SHIFT 0x0
+#define MMEA5_SDP_VCC_RESERVE0__VC1_CREDITS__SHIFT 0x6
+#define MMEA5_SDP_VCC_RESERVE0__VC2_CREDITS__SHIFT 0xc
+#define MMEA5_SDP_VCC_RESERVE0__VC3_CREDITS__SHIFT 0x12
+#define MMEA5_SDP_VCC_RESERVE0__VC4_CREDITS__SHIFT 0x18
+#define MMEA5_SDP_VCC_RESERVE0__VC0_CREDITS_MASK 0x0000003FL
+#define MMEA5_SDP_VCC_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L
+#define MMEA5_SDP_VCC_RESERVE0__VC2_CREDITS_MASK 0x0003F000L
+#define MMEA5_SDP_VCC_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L
+#define MMEA5_SDP_VCC_RESERVE0__VC4_CREDITS_MASK 0x3F000000L
+//MMEA5_SDP_VCC_RESERVE1
+#define MMEA5_SDP_VCC_RESERVE1__VC5_CREDITS__SHIFT 0x0
+#define MMEA5_SDP_VCC_RESERVE1__VC6_CREDITS__SHIFT 0x6
+#define MMEA5_SDP_VCC_RESERVE1__VC7_CREDITS__SHIFT 0xc
+#define MMEA5_SDP_VCC_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f
+#define MMEA5_SDP_VCC_RESERVE1__VC5_CREDITS_MASK 0x0000003FL
+#define MMEA5_SDP_VCC_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L
+#define MMEA5_SDP_VCC_RESERVE1__VC7_CREDITS_MASK 0x0003F000L
+#define MMEA5_SDP_VCC_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L
+//MMEA5_SDP_VCD_RESERVE0
+#define MMEA5_SDP_VCD_RESERVE0__VC0_CREDITS__SHIFT 0x0
+#define MMEA5_SDP_VCD_RESERVE0__VC1_CREDITS__SHIFT 0x6
+#define MMEA5_SDP_VCD_RESERVE0__VC2_CREDITS__SHIFT 0xc
+#define MMEA5_SDP_VCD_RESERVE0__VC3_CREDITS__SHIFT 0x12
+#define MMEA5_SDP_VCD_RESERVE0__VC4_CREDITS__SHIFT 0x18
+#define MMEA5_SDP_VCD_RESERVE0__VC0_CREDITS_MASK 0x0000003FL
+#define MMEA5_SDP_VCD_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L
+#define MMEA5_SDP_VCD_RESERVE0__VC2_CREDITS_MASK 0x0003F000L
+#define MMEA5_SDP_VCD_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L
+#define MMEA5_SDP_VCD_RESERVE0__VC4_CREDITS_MASK 0x3F000000L
+//MMEA5_SDP_VCD_RESERVE1
+#define MMEA5_SDP_VCD_RESERVE1__VC5_CREDITS__SHIFT 0x0
+#define MMEA5_SDP_VCD_RESERVE1__VC6_CREDITS__SHIFT 0x6
+#define MMEA5_SDP_VCD_RESERVE1__VC7_CREDITS__SHIFT 0xc
+#define MMEA5_SDP_VCD_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f
+#define MMEA5_SDP_VCD_RESERVE1__VC5_CREDITS_MASK 0x0000003FL
+#define MMEA5_SDP_VCD_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L
+#define MMEA5_SDP_VCD_RESERVE1__VC7_CREDITS_MASK 0x0003F000L
+#define MMEA5_SDP_VCD_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L
+//MMEA5_SDP_REQ_CNTL
+#define MMEA5_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ__SHIFT 0x0
+#define MMEA5_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE__SHIFT 0x1
+#define MMEA5_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC__SHIFT 0x2
+#define MMEA5_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM__SHIFT 0x3
+#define MMEA5_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_GMI__SHIFT 0x4
+#define MMEA5_SDP_REQ_CNTL__INNER_DOMAIN_MODE__SHIFT 0x5
+#define MMEA5_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ_MASK 0x00000001L
+#define MMEA5_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE_MASK 0x00000002L
+#define MMEA5_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC_MASK 0x00000004L
+#define MMEA5_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM_MASK 0x00000008L
+#define MMEA5_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_GMI_MASK 0x00000010L
+#define MMEA5_SDP_REQ_CNTL__INNER_DOMAIN_MODE_MASK 0x00000020L
+//MMEA5_MISC
+#define MMEA5_MISC__RELATIVE_PRI_IN_DRAM_RD_ARB__SHIFT 0x0
+#define MMEA5_MISC__RELATIVE_PRI_IN_DRAM_WR_ARB__SHIFT 0x1
+#define MMEA5_MISC__RELATIVE_PRI_IN_GMI_RD_ARB__SHIFT 0x2
+#define MMEA5_MISC__RELATIVE_PRI_IN_GMI_WR_ARB__SHIFT 0x3
+#define MMEA5_MISC__RELATIVE_PRI_IN_IO_RD_ARB__SHIFT 0x4
+#define MMEA5_MISC__RELATIVE_PRI_IN_IO_WR_ARB__SHIFT 0x5
+#define MMEA5_MISC__EARLYWRRET_ENABLE_VC0__SHIFT 0x6
+#define MMEA5_MISC__EARLYWRRET_ENABLE_VC1__SHIFT 0x7
+#define MMEA5_MISC__EARLYWRRET_ENABLE_VC2__SHIFT 0x8
+#define MMEA5_MISC__EARLYWRRET_ENABLE_VC3__SHIFT 0x9
+#define MMEA5_MISC__EARLYWRRET_ENABLE_VC4__SHIFT 0xa
+#define MMEA5_MISC__EARLYWRRET_ENABLE_VC5__SHIFT 0xb
+#define MMEA5_MISC__EARLYWRRET_ENABLE_VC6__SHIFT 0xc
+#define MMEA5_MISC__EARLYWRRET_ENABLE_VC7__SHIFT 0xd
+#define MMEA5_MISC__EARLY_SDP_ORIGDATA__SHIFT 0xe
+#define MMEA5_MISC__LINKMGR_DYNAMIC_MODE__SHIFT 0xf
+#define MMEA5_MISC__LINKMGR_HALT_THRESHOLD__SHIFT 0x11
+#define MMEA5_MISC__LINKMGR_RECONNECT_DELAY__SHIFT 0x13
+#define MMEA5_MISC__LINKMGR_IDLE_THRESHOLD__SHIFT 0x15
+#define MMEA5_MISC__FAVOUR_MIDCHAIN_CS_IN_DRAM_ARB__SHIFT 0x1a
+#define MMEA5_MISC__FAVOUR_MIDCHAIN_CS_IN_GMI_ARB__SHIFT 0x1b
+#define MMEA5_MISC__FAVOUR_LAST_CS_IN_DRAM_ARB__SHIFT 0x1c
+#define MMEA5_MISC__FAVOUR_LAST_CS_IN_GMI_ARB__SHIFT 0x1d
+#define MMEA5_MISC__SWITCH_CS_ON_W2R_IN_DRAM_ARB__SHIFT 0x1e
+#define MMEA5_MISC__SWITCH_CS_ON_W2R_IN_GMI_ARB__SHIFT 0x1f
+#define MMEA5_MISC__RELATIVE_PRI_IN_DRAM_RD_ARB_MASK 0x00000001L
+#define MMEA5_MISC__RELATIVE_PRI_IN_DRAM_WR_ARB_MASK 0x00000002L
+#define MMEA5_MISC__RELATIVE_PRI_IN_GMI_RD_ARB_MASK 0x00000004L
+#define MMEA5_MISC__RELATIVE_PRI_IN_GMI_WR_ARB_MASK 0x00000008L
+#define MMEA5_MISC__RELATIVE_PRI_IN_IO_RD_ARB_MASK 0x00000010L
+#define MMEA5_MISC__RELATIVE_PRI_IN_IO_WR_ARB_MASK 0x00000020L
+#define MMEA5_MISC__EARLYWRRET_ENABLE_VC0_MASK 0x00000040L
+#define MMEA5_MISC__EARLYWRRET_ENABLE_VC1_MASK 0x00000080L
+#define MMEA5_MISC__EARLYWRRET_ENABLE_VC2_MASK 0x00000100L
+#define MMEA5_MISC__EARLYWRRET_ENABLE_VC3_MASK 0x00000200L
+#define MMEA5_MISC__EARLYWRRET_ENABLE_VC4_MASK 0x00000400L
+#define MMEA5_MISC__EARLYWRRET_ENABLE_VC5_MASK 0x00000800L
+#define MMEA5_MISC__EARLYWRRET_ENABLE_VC6_MASK 0x00001000L
+#define MMEA5_MISC__EARLYWRRET_ENABLE_VC7_MASK 0x00002000L
+#define MMEA5_MISC__EARLY_SDP_ORIGDATA_MASK 0x00004000L
+#define MMEA5_MISC__LINKMGR_DYNAMIC_MODE_MASK 0x00018000L
+#define MMEA5_MISC__LINKMGR_HALT_THRESHOLD_MASK 0x00060000L
+#define MMEA5_MISC__LINKMGR_RECONNECT_DELAY_MASK 0x00180000L
+#define MMEA5_MISC__LINKMGR_IDLE_THRESHOLD_MASK 0x03E00000L
+#define MMEA5_MISC__FAVOUR_MIDCHAIN_CS_IN_DRAM_ARB_MASK 0x04000000L
+#define MMEA5_MISC__FAVOUR_MIDCHAIN_CS_IN_GMI_ARB_MASK 0x08000000L
+#define MMEA5_MISC__FAVOUR_LAST_CS_IN_DRAM_ARB_MASK 0x10000000L
+#define MMEA5_MISC__FAVOUR_LAST_CS_IN_GMI_ARB_MASK 0x20000000L
+#define MMEA5_MISC__SWITCH_CS_ON_W2R_IN_DRAM_ARB_MASK 0x40000000L
+#define MMEA5_MISC__SWITCH_CS_ON_W2R_IN_GMI_ARB_MASK 0x80000000L
+//MMEA5_LATENCY_SAMPLING
+#define MMEA5_LATENCY_SAMPLING__SAMPLER0_DRAM__SHIFT 0x0
+#define MMEA5_LATENCY_SAMPLING__SAMPLER1_DRAM__SHIFT 0x1
+#define MMEA5_LATENCY_SAMPLING__SAMPLER0_GMI__SHIFT 0x2
+#define MMEA5_LATENCY_SAMPLING__SAMPLER1_GMI__SHIFT 0x3
+#define MMEA5_LATENCY_SAMPLING__SAMPLER0_IO__SHIFT 0x4
+#define MMEA5_LATENCY_SAMPLING__SAMPLER1_IO__SHIFT 0x5
+#define MMEA5_LATENCY_SAMPLING__SAMPLER0_READ__SHIFT 0x6
+#define MMEA5_LATENCY_SAMPLING__SAMPLER1_READ__SHIFT 0x7
+#define MMEA5_LATENCY_SAMPLING__SAMPLER0_WRITE__SHIFT 0x8
+#define MMEA5_LATENCY_SAMPLING__SAMPLER1_WRITE__SHIFT 0x9
+#define MMEA5_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET__SHIFT 0xa
+#define MMEA5_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET__SHIFT 0xb
+#define MMEA5_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET__SHIFT 0xc
+#define MMEA5_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET__SHIFT 0xd
+#define MMEA5_LATENCY_SAMPLING__SAMPLER0_VC__SHIFT 0xe
+#define MMEA5_LATENCY_SAMPLING__SAMPLER1_VC__SHIFT 0x16
+#define MMEA5_LATENCY_SAMPLING__SAMPLER0_DRAM_MASK 0x00000001L
+#define MMEA5_LATENCY_SAMPLING__SAMPLER1_DRAM_MASK 0x00000002L
+#define MMEA5_LATENCY_SAMPLING__SAMPLER0_GMI_MASK 0x00000004L
+#define MMEA5_LATENCY_SAMPLING__SAMPLER1_GMI_MASK 0x00000008L
+#define MMEA5_LATENCY_SAMPLING__SAMPLER0_IO_MASK 0x00000010L
+#define MMEA5_LATENCY_SAMPLING__SAMPLER1_IO_MASK 0x00000020L
+#define MMEA5_LATENCY_SAMPLING__SAMPLER0_READ_MASK 0x00000040L
+#define MMEA5_LATENCY_SAMPLING__SAMPLER1_READ_MASK 0x00000080L
+#define MMEA5_LATENCY_SAMPLING__SAMPLER0_WRITE_MASK 0x00000100L
+#define MMEA5_LATENCY_SAMPLING__SAMPLER1_WRITE_MASK 0x00000200L
+#define MMEA5_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET_MASK 0x00000400L
+#define MMEA5_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET_MASK 0x00000800L
+#define MMEA5_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET_MASK 0x00001000L
+#define MMEA5_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET_MASK 0x00002000L
+#define MMEA5_LATENCY_SAMPLING__SAMPLER0_VC_MASK 0x003FC000L
+#define MMEA5_LATENCY_SAMPLING__SAMPLER1_VC_MASK 0x3FC00000L
+//MMEA5_PERFCOUNTER_LO
+#define MMEA5_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define MMEA5_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//MMEA5_PERFCOUNTER_HI
+#define MMEA5_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define MMEA5_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define MMEA5_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define MMEA5_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+//MMEA5_PERFCOUNTER0_CFG
+#define MMEA5_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define MMEA5_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define MMEA5_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define MMEA5_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define MMEA5_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define MMEA5_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define MMEA5_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MMEA5_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define MMEA5_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define MMEA5_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//MMEA5_PERFCOUNTER1_CFG
+#define MMEA5_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define MMEA5_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define MMEA5_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define MMEA5_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define MMEA5_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define MMEA5_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define MMEA5_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MMEA5_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define MMEA5_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define MMEA5_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//MMEA5_PERFCOUNTER_RSLT_CNTL
+#define MMEA5_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define MMEA5_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define MMEA5_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define MMEA5_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define MMEA5_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define MMEA5_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define MMEA5_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define MMEA5_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define MMEA5_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define MMEA5_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define MMEA5_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define MMEA5_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+//MMEA5_EDC_CNT
+#define MMEA5_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT__SHIFT 0x0
+#define MMEA5_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT__SHIFT 0x2
+#define MMEA5_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT__SHIFT 0x4
+#define MMEA5_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT__SHIFT 0x6
+#define MMEA5_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT__SHIFT 0x8
+#define MMEA5_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT__SHIFT 0xa
+#define MMEA5_EDC_CNT__RRET_TAGMEM_SEC_COUNT__SHIFT 0xc
+#define MMEA5_EDC_CNT__RRET_TAGMEM_DED_COUNT__SHIFT 0xe
+#define MMEA5_EDC_CNT__WRET_TAGMEM_SEC_COUNT__SHIFT 0x10
+#define MMEA5_EDC_CNT__WRET_TAGMEM_DED_COUNT__SHIFT 0x12
+#define MMEA5_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT__SHIFT 0x14
+#define MMEA5_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT__SHIFT 0x16
+#define MMEA5_EDC_CNT__IORD_CMDMEM_SED_COUNT__SHIFT 0x18
+#define MMEA5_EDC_CNT__IOWR_CMDMEM_SED_COUNT__SHIFT 0x1a
+#define MMEA5_EDC_CNT__IOWR_DATAMEM_SED_COUNT__SHIFT 0x1c
+#define MMEA5_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT_MASK 0x00000003L
+#define MMEA5_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT_MASK 0x0000000CL
+#define MMEA5_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT_MASK 0x00000030L
+#define MMEA5_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
+#define MMEA5_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT_MASK 0x00000300L
+#define MMEA5_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT_MASK 0x00000C00L
+#define MMEA5_EDC_CNT__RRET_TAGMEM_SEC_COUNT_MASK 0x00003000L
+#define MMEA5_EDC_CNT__RRET_TAGMEM_DED_COUNT_MASK 0x0000C000L
+#define MMEA5_EDC_CNT__WRET_TAGMEM_SEC_COUNT_MASK 0x00030000L
+#define MMEA5_EDC_CNT__WRET_TAGMEM_DED_COUNT_MASK 0x000C0000L
+#define MMEA5_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT_MASK 0x00300000L
+#define MMEA5_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT_MASK 0x00C00000L
+#define MMEA5_EDC_CNT__IORD_CMDMEM_SED_COUNT_MASK 0x03000000L
+#define MMEA5_EDC_CNT__IOWR_CMDMEM_SED_COUNT_MASK 0x0C000000L
+#define MMEA5_EDC_CNT__IOWR_DATAMEM_SED_COUNT_MASK 0x30000000L
+//MMEA5_EDC_CNT2
+#define MMEA5_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT__SHIFT 0x0
+#define MMEA5_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT__SHIFT 0x2
+#define MMEA5_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT__SHIFT 0x4
+#define MMEA5_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT__SHIFT 0x6
+#define MMEA5_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT__SHIFT 0x8
+#define MMEA5_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa
+#define MMEA5_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc
+#define MMEA5_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe
+#define MMEA5_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10
+#define MMEA5_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12
+#define MMEA5_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14
+#define MMEA5_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16
+#define MMEA5_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT 0x18
+#define MMEA5_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT 0x1a
+#define MMEA5_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT 0x1c
+#define MMEA5_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT 0x1e
+#define MMEA5_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L
+#define MMEA5_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL
+#define MMEA5_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L
+#define MMEA5_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
+#define MMEA5_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT_MASK 0x00000300L
+#define MMEA5_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L
+#define MMEA5_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L
+#define MMEA5_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L
+#define MMEA5_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L
+#define MMEA5_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L
+#define MMEA5_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L
+#define MMEA5_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L
+#define MMEA5_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK 0x03000000L
+#define MMEA5_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK 0x0C000000L
+#define MMEA5_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK 0x30000000L
+#define MMEA5_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK 0xC0000000L
+//MMEA5_DSM_CNTL
+#define MMEA5_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define MMEA5_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define MMEA5_DSM_CNTL__DRAMWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x3
+#define MMEA5_DSM_CNTL__DRAMWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define MMEA5_DSM_CNTL__DRAMWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0x6
+#define MMEA5_DSM_CNTL__DRAMWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define MMEA5_DSM_CNTL__RRET_TAGMEM_DSM_IRRITATOR_DATA__SHIFT 0x9
+#define MMEA5_DSM_CNTL__RRET_TAGMEM_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define MMEA5_DSM_CNTL__WRET_TAGMEM_DSM_IRRITATOR_DATA__SHIFT 0xc
+#define MMEA5_DSM_CNTL__WRET_TAGMEM_ENABLE_SINGLE_WRITE__SHIFT 0xe
+#define MMEA5_DSM_CNTL__GMIRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0xf
+#define MMEA5_DSM_CNTL__GMIRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x11
+#define MMEA5_DSM_CNTL__GMIWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x12
+#define MMEA5_DSM_CNTL__GMIWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x14
+#define MMEA5_DSM_CNTL__GMIWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0x15
+#define MMEA5_DSM_CNTL__GMIWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0x17
+#define MMEA5_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define MMEA5_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define MMEA5_DSM_CNTL__DRAMWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000018L
+#define MMEA5_DSM_CNTL__DRAMWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define MMEA5_DSM_CNTL__DRAMWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define MMEA5_DSM_CNTL__DRAMWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define MMEA5_DSM_CNTL__RRET_TAGMEM_DSM_IRRITATOR_DATA_MASK 0x00000600L
+#define MMEA5_DSM_CNTL__RRET_TAGMEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+#define MMEA5_DSM_CNTL__WRET_TAGMEM_DSM_IRRITATOR_DATA_MASK 0x00003000L
+#define MMEA5_DSM_CNTL__WRET_TAGMEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
+#define MMEA5_DSM_CNTL__GMIRD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00018000L
+#define MMEA5_DSM_CNTL__GMIRD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L
+#define MMEA5_DSM_CNTL__GMIWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L
+#define MMEA5_DSM_CNTL__GMIWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L
+#define MMEA5_DSM_CNTL__GMIWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x00600000L
+#define MMEA5_DSM_CNTL__GMIWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00800000L
+//MMEA5_DSM_CNTLA
+#define MMEA5_DSM_CNTLA__DRAMRD_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define MMEA5_DSM_CNTLA__DRAMRD_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define MMEA5_DSM_CNTLA__DRAMWR_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x3
+#define MMEA5_DSM_CNTLA__DRAMWR_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define MMEA5_DSM_CNTLA__IORD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x6
+#define MMEA5_DSM_CNTLA__IORD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define MMEA5_DSM_CNTLA__IOWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x9
+#define MMEA5_DSM_CNTLA__IOWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define MMEA5_DSM_CNTLA__IOWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0xc
+#define MMEA5_DSM_CNTLA__IOWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0xe
+#define MMEA5_DSM_CNTLA__GMIRD_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0xf
+#define MMEA5_DSM_CNTLA__GMIRD_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x11
+#define MMEA5_DSM_CNTLA__GMIWR_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x12
+#define MMEA5_DSM_CNTLA__GMIWR_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x14
+#define MMEA5_DSM_CNTLA__DRAMRD_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define MMEA5_DSM_CNTLA__DRAMRD_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define MMEA5_DSM_CNTLA__DRAMWR_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00000018L
+#define MMEA5_DSM_CNTLA__DRAMWR_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define MMEA5_DSM_CNTLA__IORD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define MMEA5_DSM_CNTLA__IORD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define MMEA5_DSM_CNTLA__IOWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000600L
+#define MMEA5_DSM_CNTLA__IOWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+#define MMEA5_DSM_CNTLA__IOWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x00003000L
+#define MMEA5_DSM_CNTLA__IOWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
+#define MMEA5_DSM_CNTLA__GMIRD_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00018000L
+#define MMEA5_DSM_CNTLA__GMIRD_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L
+#define MMEA5_DSM_CNTLA__GMIWR_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L
+#define MMEA5_DSM_CNTLA__GMIWR_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L
+//MMEA5_DSM_CNTL2
+#define MMEA5_DSM_CNTL2__DRAMRD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define MMEA5_DSM_CNTL2__DRAMRD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define MMEA5_DSM_CNTL2__DRAMWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define MMEA5_DSM_CNTL2__DRAMWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x5
+#define MMEA5_DSM_CNTL2__DRAMWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define MMEA5_DSM_CNTL2__DRAMWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0x8
+#define MMEA5_DSM_CNTL2__RRET_TAGMEM_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define MMEA5_DSM_CNTL2__RRET_TAGMEM_SELECT_INJECT_DELAY__SHIFT 0xb
+#define MMEA5_DSM_CNTL2__WRET_TAGMEM_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define MMEA5_DSM_CNTL2__WRET_TAGMEM_SELECT_INJECT_DELAY__SHIFT 0xe
+#define MMEA5_DSM_CNTL2__GMIRD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0xf
+#define MMEA5_DSM_CNTL2__GMIRD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x11
+#define MMEA5_DSM_CNTL2__GMIWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x12
+#define MMEA5_DSM_CNTL2__GMIWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x14
+#define MMEA5_DSM_CNTL2__GMIWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0x15
+#define MMEA5_DSM_CNTL2__GMIWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0x17
+#define MMEA5_DSM_CNTL2__INJECT_DELAY__SHIFT 0x1a
+#define MMEA5_DSM_CNTL2__DRAMRD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define MMEA5_DSM_CNTL2__DRAMRD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define MMEA5_DSM_CNTL2__DRAMWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define MMEA5_DSM_CNTL2__DRAMWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define MMEA5_DSM_CNTL2__DRAMWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define MMEA5_DSM_CNTL2__DRAMWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define MMEA5_DSM_CNTL2__RRET_TAGMEM_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define MMEA5_DSM_CNTL2__RRET_TAGMEM_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define MMEA5_DSM_CNTL2__WRET_TAGMEM_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define MMEA5_DSM_CNTL2__WRET_TAGMEM_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define MMEA5_DSM_CNTL2__GMIRD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00018000L
+#define MMEA5_DSM_CNTL2__GMIRD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00020000L
+#define MMEA5_DSM_CNTL2__GMIWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L
+#define MMEA5_DSM_CNTL2__GMIWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00100000L
+#define MMEA5_DSM_CNTL2__GMIWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x00600000L
+#define MMEA5_DSM_CNTL2__GMIWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00800000L
+#define MMEA5_DSM_CNTL2__INJECT_DELAY_MASK 0xFC000000L
+//MMEA5_DSM_CNTL2A
+#define MMEA5_DSM_CNTL2A__DRAMRD_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define MMEA5_DSM_CNTL2A__DRAMRD_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define MMEA5_DSM_CNTL2A__DRAMWR_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define MMEA5_DSM_CNTL2A__DRAMWR_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x5
+#define MMEA5_DSM_CNTL2A__IORD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define MMEA5_DSM_CNTL2A__IORD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x8
+#define MMEA5_DSM_CNTL2A__IOWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define MMEA5_DSM_CNTL2A__IOWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0xb
+#define MMEA5_DSM_CNTL2A__IOWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define MMEA5_DSM_CNTL2A__IOWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0xe
+#define MMEA5_DSM_CNTL2A__GMIRD_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0xf
+#define MMEA5_DSM_CNTL2A__GMIRD_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x11
+#define MMEA5_DSM_CNTL2A__GMIWR_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x12
+#define MMEA5_DSM_CNTL2A__GMIWR_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x14
+#define MMEA5_DSM_CNTL2A__DRAMRD_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define MMEA5_DSM_CNTL2A__DRAMRD_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define MMEA5_DSM_CNTL2A__DRAMWR_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define MMEA5_DSM_CNTL2A__DRAMWR_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define MMEA5_DSM_CNTL2A__IORD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define MMEA5_DSM_CNTL2A__IORD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define MMEA5_DSM_CNTL2A__IOWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define MMEA5_DSM_CNTL2A__IOWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define MMEA5_DSM_CNTL2A__IOWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define MMEA5_DSM_CNTL2A__IOWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define MMEA5_DSM_CNTL2A__GMIRD_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00018000L
+#define MMEA5_DSM_CNTL2A__GMIRD_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00020000L
+#define MMEA5_DSM_CNTL2A__GMIWR_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L
+#define MMEA5_DSM_CNTL2A__GMIWR_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00100000L
+//MMEA5_CGTT_CLK_CTRL
+#define MMEA5_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define MMEA5_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define MMEA5_CGTT_CLK_CTRL__SPARE0__SHIFT 0xc
+#define MMEA5_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_WRITE__SHIFT 0x14
+#define MMEA5_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_READ__SHIFT 0x15
+#define MMEA5_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_RETURN__SHIFT 0x16
+#define MMEA5_CGTT_CLK_CTRL__SPARE1__SHIFT 0x17
+#define MMEA5_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define MMEA5_CGTT_CLK_CTRL__SOFT_OVERRIDE_WRITE__SHIFT 0x1c
+#define MMEA5_CGTT_CLK_CTRL__SOFT_OVERRIDE_READ__SHIFT 0x1d
+#define MMEA5_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN__SHIFT 0x1e
+#define MMEA5_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER__SHIFT 0x1f
+#define MMEA5_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define MMEA5_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define MMEA5_CGTT_CLK_CTRL__SPARE0_MASK 0x000FF000L
+#define MMEA5_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_WRITE_MASK 0x00100000L
+#define MMEA5_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_READ_MASK 0x00200000L
+#define MMEA5_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_RETURN_MASK 0x00400000L
+#define MMEA5_CGTT_CLK_CTRL__SPARE1_MASK 0x07800000L
+#define MMEA5_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define MMEA5_CGTT_CLK_CTRL__SOFT_OVERRIDE_WRITE_MASK 0x10000000L
+#define MMEA5_CGTT_CLK_CTRL__SOFT_OVERRIDE_READ_MASK 0x20000000L
+#define MMEA5_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN_MASK 0x40000000L
+#define MMEA5_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER_MASK 0x80000000L
+//MMEA5_EDC_MODE
+#define MMEA5_EDC_MODE__COUNT_FED_OUT__SHIFT 0x10
+#define MMEA5_EDC_MODE__GATE_FUE__SHIFT 0x11
+#define MMEA5_EDC_MODE__DED_MODE__SHIFT 0x14
+#define MMEA5_EDC_MODE__PROP_FED__SHIFT 0x1d
+#define MMEA5_EDC_MODE__BYPASS__SHIFT 0x1f
+#define MMEA5_EDC_MODE__COUNT_FED_OUT_MASK 0x00010000L
+#define MMEA5_EDC_MODE__GATE_FUE_MASK 0x00020000L
+#define MMEA5_EDC_MODE__DED_MODE_MASK 0x00300000L
+#define MMEA5_EDC_MODE__PROP_FED_MASK 0x20000000L
+#define MMEA5_EDC_MODE__BYPASS_MASK 0x80000000L
+//MMEA5_ERR_STATUS
+#define MMEA5_ERR_STATUS__SDP_RDRSP_STATUS__SHIFT 0x0
+#define MMEA5_ERR_STATUS__SDP_WRRSP_STATUS__SHIFT 0x4
+#define MMEA5_ERR_STATUS__SDP_RDRSP_DATASTATUS__SHIFT 0x8
+#define MMEA5_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR__SHIFT 0xa
+#define MMEA5_ERR_STATUS__CLEAR_ERROR_STATUS__SHIFT 0xb
+#define MMEA5_ERR_STATUS__BUSY_ON_ERROR__SHIFT 0xc
+#define MMEA5_ERR_STATUS__FUE_FLAG__SHIFT 0xd
+#define MMEA5_ERR_STATUS__SDP_RDRSP_STATUS_MASK 0x0000000FL
+#define MMEA5_ERR_STATUS__SDP_WRRSP_STATUS_MASK 0x000000F0L
+#define MMEA5_ERR_STATUS__SDP_RDRSP_DATASTATUS_MASK 0x00000300L
+#define MMEA5_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR_MASK 0x00000400L
+#define MMEA5_ERR_STATUS__CLEAR_ERROR_STATUS_MASK 0x00000800L
+#define MMEA5_ERR_STATUS__BUSY_ON_ERROR_MASK 0x00001000L
+#define MMEA5_ERR_STATUS__FUE_FLAG_MASK 0x00002000L
+//MMEA5_MISC2
+#define MMEA5_MISC2__CSGROUP_SWAP_IN_DRAM_ARB__SHIFT 0x0
+#define MMEA5_MISC2__CSGROUP_SWAP_IN_GMI_ARB__SHIFT 0x1
+#define MMEA5_MISC2__CSGRP_BURST_LIMIT_DATA_DRAM__SHIFT 0x2
+#define MMEA5_MISC2__CSGRP_BURST_LIMIT_DATA_GMI__SHIFT 0x7
+#define MMEA5_MISC2__IO_RDWR_PRIORITY_ENABLE__SHIFT 0xc
+#define MMEA5_MISC2__RRET_SWAP_MODE__SHIFT 0xd
+#define MMEA5_MISC2__CSGROUP_SWAP_IN_DRAM_ARB_MASK 0x00000001L
+#define MMEA5_MISC2__CSGROUP_SWAP_IN_GMI_ARB_MASK 0x00000002L
+#define MMEA5_MISC2__CSGRP_BURST_LIMIT_DATA_DRAM_MASK 0x0000007CL
+#define MMEA5_MISC2__CSGRP_BURST_LIMIT_DATA_GMI_MASK 0x00000F80L
+#define MMEA5_MISC2__IO_RDWR_PRIORITY_ENABLE_MASK 0x00001000L
+#define MMEA5_MISC2__RRET_SWAP_MODE_MASK 0x00002000L
+//MMEA5_ADDRDEC_SELECT
+#define MMEA5_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_START__SHIFT 0x0
+#define MMEA5_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_END__SHIFT 0x5
+#define MMEA5_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_START__SHIFT 0xa
+#define MMEA5_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_END__SHIFT 0xf
+#define MMEA5_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_START_MASK 0x0000001FL
+#define MMEA5_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_END_MASK 0x000003E0L
+#define MMEA5_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_START_MASK 0x00007C00L
+#define MMEA5_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_END_MASK 0x000F8000L
+//MMEA5_EDC_CNT3
+#define MMEA5_EDC_CNT3__DRAMRD_PAGEMEM_DED_COUNT__SHIFT 0x0
+#define MMEA5_EDC_CNT3__DRAMWR_PAGEMEM_DED_COUNT__SHIFT 0x2
+#define MMEA5_EDC_CNT3__IORD_CMDMEM_DED_COUNT__SHIFT 0x4
+#define MMEA5_EDC_CNT3__IOWR_CMDMEM_DED_COUNT__SHIFT 0x6
+#define MMEA5_EDC_CNT3__IOWR_DATAMEM_DED_COUNT__SHIFT 0x8
+#define MMEA5_EDC_CNT3__GMIRD_PAGEMEM_DED_COUNT__SHIFT 0xa
+#define MMEA5_EDC_CNT3__GMIWR_PAGEMEM_DED_COUNT__SHIFT 0xc
+#define MMEA5_EDC_CNT3__DRAMRD_PAGEMEM_DED_COUNT_MASK 0x00000003L
+#define MMEA5_EDC_CNT3__DRAMWR_PAGEMEM_DED_COUNT_MASK 0x0000000CL
+#define MMEA5_EDC_CNT3__IORD_CMDMEM_DED_COUNT_MASK 0x00000030L
+#define MMEA5_EDC_CNT3__IOWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
+#define MMEA5_EDC_CNT3__IOWR_DATAMEM_DED_COUNT_MASK 0x00000300L
+#define MMEA5_EDC_CNT3__GMIRD_PAGEMEM_DED_COUNT_MASK 0x00000C00L
+#define MMEA5_EDC_CNT3__GMIWR_PAGEMEM_DED_COUNT_MASK 0x00003000L
+
+
+// addressBlock: mmhub_ea_mmeadec6
+//MMEA6_DRAM_RD_CLI2GRP_MAP0
+#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA6_DRAM_RD_CLI2GRP_MAP1
+#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA6_DRAM_WR_CLI2GRP_MAP0
+#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA6_DRAM_WR_CLI2GRP_MAP1
+#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA6_DRAM_RD_GRP2VC_MAP
+#define MMEA6_DRAM_RD_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define MMEA6_DRAM_RD_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define MMEA6_DRAM_RD_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define MMEA6_DRAM_RD_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define MMEA6_DRAM_RD_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define MMEA6_DRAM_RD_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define MMEA6_DRAM_RD_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define MMEA6_DRAM_RD_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//MMEA6_DRAM_WR_GRP2VC_MAP
+#define MMEA6_DRAM_WR_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define MMEA6_DRAM_WR_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define MMEA6_DRAM_WR_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define MMEA6_DRAM_WR_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define MMEA6_DRAM_WR_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define MMEA6_DRAM_WR_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define MMEA6_DRAM_WR_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define MMEA6_DRAM_WR_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//MMEA6_DRAM_RD_LAZY
+#define MMEA6_DRAM_RD_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define MMEA6_DRAM_RD_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define MMEA6_DRAM_RD_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define MMEA6_DRAM_RD_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define MMEA6_DRAM_RD_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define MMEA6_DRAM_RD_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define MMEA6_DRAM_RD_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b
+#define MMEA6_DRAM_RD_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define MMEA6_DRAM_RD_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define MMEA6_DRAM_RD_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define MMEA6_DRAM_RD_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define MMEA6_DRAM_RD_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define MMEA6_DRAM_RD_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+#define MMEA6_DRAM_RD_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L
+//MMEA6_DRAM_WR_LAZY
+#define MMEA6_DRAM_WR_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define MMEA6_DRAM_WR_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define MMEA6_DRAM_WR_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define MMEA6_DRAM_WR_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define MMEA6_DRAM_WR_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define MMEA6_DRAM_WR_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define MMEA6_DRAM_WR_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b
+#define MMEA6_DRAM_WR_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define MMEA6_DRAM_WR_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define MMEA6_DRAM_WR_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define MMEA6_DRAM_WR_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define MMEA6_DRAM_WR_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define MMEA6_DRAM_WR_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+#define MMEA6_DRAM_WR_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L
+//MMEA6_DRAM_RD_CAM_CNTL
+#define MMEA6_DRAM_RD_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define MMEA6_DRAM_RD_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define MMEA6_DRAM_RD_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define MMEA6_DRAM_RD_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define MMEA6_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define MMEA6_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define MMEA6_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define MMEA6_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define MMEA6_DRAM_RD_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c
+#define MMEA6_DRAM_RD_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define MMEA6_DRAM_RD_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define MMEA6_DRAM_RD_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define MMEA6_DRAM_RD_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define MMEA6_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define MMEA6_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define MMEA6_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define MMEA6_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+#define MMEA6_DRAM_RD_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L
+//MMEA6_DRAM_WR_CAM_CNTL
+#define MMEA6_DRAM_WR_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define MMEA6_DRAM_WR_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define MMEA6_DRAM_WR_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define MMEA6_DRAM_WR_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define MMEA6_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define MMEA6_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define MMEA6_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define MMEA6_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define MMEA6_DRAM_WR_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c
+#define MMEA6_DRAM_WR_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define MMEA6_DRAM_WR_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define MMEA6_DRAM_WR_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define MMEA6_DRAM_WR_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define MMEA6_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define MMEA6_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define MMEA6_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define MMEA6_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+#define MMEA6_DRAM_WR_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L
+//MMEA6_DRAM_PAGE_BURST
+#define MMEA6_DRAM_PAGE_BURST__RD_LIMIT_LO__SHIFT 0x0
+#define MMEA6_DRAM_PAGE_BURST__RD_LIMIT_HI__SHIFT 0x8
+#define MMEA6_DRAM_PAGE_BURST__WR_LIMIT_LO__SHIFT 0x10
+#define MMEA6_DRAM_PAGE_BURST__WR_LIMIT_HI__SHIFT 0x18
+#define MMEA6_DRAM_PAGE_BURST__RD_LIMIT_LO_MASK 0x000000FFL
+#define MMEA6_DRAM_PAGE_BURST__RD_LIMIT_HI_MASK 0x0000FF00L
+#define MMEA6_DRAM_PAGE_BURST__WR_LIMIT_LO_MASK 0x00FF0000L
+#define MMEA6_DRAM_PAGE_BURST__WR_LIMIT_HI_MASK 0xFF000000L
+//MMEA6_DRAM_RD_PRI_AGE
+#define MMEA6_DRAM_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA6_DRAM_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA6_DRAM_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA6_DRAM_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA6_DRAM_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA6_DRAM_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA6_DRAM_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA6_DRAM_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA6_DRAM_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA6_DRAM_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA6_DRAM_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA6_DRAM_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA6_DRAM_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA6_DRAM_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA6_DRAM_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA6_DRAM_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA6_DRAM_WR_PRI_AGE
+#define MMEA6_DRAM_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA6_DRAM_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA6_DRAM_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA6_DRAM_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA6_DRAM_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA6_DRAM_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA6_DRAM_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA6_DRAM_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA6_DRAM_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA6_DRAM_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA6_DRAM_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA6_DRAM_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA6_DRAM_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA6_DRAM_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA6_DRAM_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA6_DRAM_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA6_DRAM_RD_PRI_QUEUING
+#define MMEA6_DRAM_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA6_DRAM_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA6_DRAM_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA6_DRAM_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA6_DRAM_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA6_DRAM_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA6_DRAM_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA6_DRAM_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA6_DRAM_WR_PRI_QUEUING
+#define MMEA6_DRAM_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA6_DRAM_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA6_DRAM_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA6_DRAM_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA6_DRAM_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA6_DRAM_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA6_DRAM_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA6_DRAM_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA6_DRAM_RD_PRI_FIXED
+#define MMEA6_DRAM_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA6_DRAM_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA6_DRAM_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA6_DRAM_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA6_DRAM_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA6_DRAM_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA6_DRAM_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA6_DRAM_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA6_DRAM_WR_PRI_FIXED
+#define MMEA6_DRAM_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA6_DRAM_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA6_DRAM_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA6_DRAM_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA6_DRAM_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA6_DRAM_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA6_DRAM_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA6_DRAM_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA6_DRAM_RD_PRI_URGENCY
+#define MMEA6_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA6_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA6_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA6_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA6_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA6_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA6_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA6_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA6_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA6_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA6_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA6_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA6_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA6_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA6_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA6_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA6_DRAM_WR_PRI_URGENCY
+#define MMEA6_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA6_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA6_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA6_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA6_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA6_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA6_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA6_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA6_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA6_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA6_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA6_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA6_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA6_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA6_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA6_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA6_DRAM_RD_PRI_QUANT_PRI1
+#define MMEA6_DRAM_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA6_DRAM_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA6_DRAM_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA6_DRAM_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA6_DRAM_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA6_DRAM_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA6_DRAM_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA6_DRAM_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA6_DRAM_RD_PRI_QUANT_PRI2
+#define MMEA6_DRAM_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA6_DRAM_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA6_DRAM_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA6_DRAM_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA6_DRAM_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA6_DRAM_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA6_DRAM_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA6_DRAM_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA6_DRAM_RD_PRI_QUANT_PRI3
+#define MMEA6_DRAM_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA6_DRAM_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA6_DRAM_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA6_DRAM_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA6_DRAM_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA6_DRAM_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA6_DRAM_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA6_DRAM_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA6_DRAM_WR_PRI_QUANT_PRI1
+#define MMEA6_DRAM_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA6_DRAM_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA6_DRAM_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA6_DRAM_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA6_DRAM_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA6_DRAM_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA6_DRAM_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA6_DRAM_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA6_DRAM_WR_PRI_QUANT_PRI2
+#define MMEA6_DRAM_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA6_DRAM_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA6_DRAM_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA6_DRAM_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA6_DRAM_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA6_DRAM_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA6_DRAM_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA6_DRAM_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA6_DRAM_WR_PRI_QUANT_PRI3
+#define MMEA6_DRAM_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA6_DRAM_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA6_DRAM_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA6_DRAM_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA6_DRAM_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA6_DRAM_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA6_DRAM_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA6_DRAM_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA6_GMI_RD_CLI2GRP_MAP0
+#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA6_GMI_RD_CLI2GRP_MAP1
+#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA6_GMI_WR_CLI2GRP_MAP0
+#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA6_GMI_WR_CLI2GRP_MAP1
+#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA6_GMI_RD_GRP2VC_MAP
+#define MMEA6_GMI_RD_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define MMEA6_GMI_RD_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define MMEA6_GMI_RD_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define MMEA6_GMI_RD_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define MMEA6_GMI_RD_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define MMEA6_GMI_RD_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define MMEA6_GMI_RD_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define MMEA6_GMI_RD_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//MMEA6_GMI_WR_GRP2VC_MAP
+#define MMEA6_GMI_WR_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define MMEA6_GMI_WR_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define MMEA6_GMI_WR_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define MMEA6_GMI_WR_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define MMEA6_GMI_WR_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define MMEA6_GMI_WR_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define MMEA6_GMI_WR_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define MMEA6_GMI_WR_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//MMEA6_GMI_RD_LAZY
+#define MMEA6_GMI_RD_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define MMEA6_GMI_RD_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define MMEA6_GMI_RD_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define MMEA6_GMI_RD_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define MMEA6_GMI_RD_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define MMEA6_GMI_RD_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define MMEA6_GMI_RD_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b
+#define MMEA6_GMI_RD_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define MMEA6_GMI_RD_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define MMEA6_GMI_RD_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define MMEA6_GMI_RD_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define MMEA6_GMI_RD_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define MMEA6_GMI_RD_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+#define MMEA6_GMI_RD_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L
+//MMEA6_GMI_WR_LAZY
+#define MMEA6_GMI_WR_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define MMEA6_GMI_WR_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define MMEA6_GMI_WR_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define MMEA6_GMI_WR_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define MMEA6_GMI_WR_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define MMEA6_GMI_WR_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define MMEA6_GMI_WR_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b
+#define MMEA6_GMI_WR_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define MMEA6_GMI_WR_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define MMEA6_GMI_WR_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define MMEA6_GMI_WR_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define MMEA6_GMI_WR_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define MMEA6_GMI_WR_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+#define MMEA6_GMI_WR_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L
+//MMEA6_GMI_RD_CAM_CNTL
+#define MMEA6_GMI_RD_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define MMEA6_GMI_RD_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define MMEA6_GMI_RD_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define MMEA6_GMI_RD_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define MMEA6_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define MMEA6_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define MMEA6_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define MMEA6_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define MMEA6_GMI_RD_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c
+#define MMEA6_GMI_RD_CAM_CNTL__PAGEBASED_CHAINING__SHIFT 0x1d
+#define MMEA6_GMI_RD_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define MMEA6_GMI_RD_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define MMEA6_GMI_RD_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define MMEA6_GMI_RD_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define MMEA6_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define MMEA6_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define MMEA6_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define MMEA6_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+#define MMEA6_GMI_RD_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L
+#define MMEA6_GMI_RD_CAM_CNTL__PAGEBASED_CHAINING_MASK 0x20000000L
+//MMEA6_GMI_WR_CAM_CNTL
+#define MMEA6_GMI_WR_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define MMEA6_GMI_WR_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define MMEA6_GMI_WR_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define MMEA6_GMI_WR_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define MMEA6_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define MMEA6_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define MMEA6_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define MMEA6_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define MMEA6_GMI_WR_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c
+#define MMEA6_GMI_WR_CAM_CNTL__PAGEBASED_CHAINING__SHIFT 0x1d
+#define MMEA6_GMI_WR_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define MMEA6_GMI_WR_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define MMEA6_GMI_WR_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define MMEA6_GMI_WR_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define MMEA6_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define MMEA6_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define MMEA6_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define MMEA6_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+#define MMEA6_GMI_WR_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L
+#define MMEA6_GMI_WR_CAM_CNTL__PAGEBASED_CHAINING_MASK 0x20000000L
+//MMEA6_GMI_PAGE_BURST
+#define MMEA6_GMI_PAGE_BURST__RD_LIMIT_LO__SHIFT 0x0
+#define MMEA6_GMI_PAGE_BURST__RD_LIMIT_HI__SHIFT 0x8
+#define MMEA6_GMI_PAGE_BURST__WR_LIMIT_LO__SHIFT 0x10
+#define MMEA6_GMI_PAGE_BURST__WR_LIMIT_HI__SHIFT 0x18
+#define MMEA6_GMI_PAGE_BURST__RD_LIMIT_LO_MASK 0x000000FFL
+#define MMEA6_GMI_PAGE_BURST__RD_LIMIT_HI_MASK 0x0000FF00L
+#define MMEA6_GMI_PAGE_BURST__WR_LIMIT_LO_MASK 0x00FF0000L
+#define MMEA6_GMI_PAGE_BURST__WR_LIMIT_HI_MASK 0xFF000000L
+//MMEA6_GMI_RD_PRI_AGE
+#define MMEA6_GMI_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA6_GMI_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA6_GMI_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA6_GMI_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA6_GMI_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA6_GMI_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA6_GMI_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA6_GMI_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA6_GMI_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA6_GMI_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA6_GMI_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA6_GMI_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA6_GMI_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA6_GMI_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA6_GMI_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA6_GMI_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA6_GMI_WR_PRI_AGE
+#define MMEA6_GMI_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA6_GMI_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA6_GMI_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA6_GMI_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA6_GMI_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA6_GMI_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA6_GMI_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA6_GMI_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA6_GMI_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA6_GMI_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA6_GMI_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA6_GMI_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA6_GMI_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA6_GMI_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA6_GMI_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA6_GMI_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA6_GMI_RD_PRI_QUEUING
+#define MMEA6_GMI_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA6_GMI_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA6_GMI_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA6_GMI_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA6_GMI_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA6_GMI_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA6_GMI_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA6_GMI_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA6_GMI_WR_PRI_QUEUING
+#define MMEA6_GMI_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA6_GMI_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA6_GMI_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA6_GMI_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA6_GMI_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA6_GMI_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA6_GMI_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA6_GMI_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA6_GMI_RD_PRI_FIXED
+#define MMEA6_GMI_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA6_GMI_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA6_GMI_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA6_GMI_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA6_GMI_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA6_GMI_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA6_GMI_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA6_GMI_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA6_GMI_WR_PRI_FIXED
+#define MMEA6_GMI_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA6_GMI_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA6_GMI_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA6_GMI_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA6_GMI_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA6_GMI_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA6_GMI_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA6_GMI_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA6_GMI_RD_PRI_URGENCY
+#define MMEA6_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA6_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA6_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA6_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA6_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA6_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA6_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA6_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA6_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA6_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA6_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA6_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA6_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA6_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA6_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA6_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA6_GMI_WR_PRI_URGENCY
+#define MMEA6_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA6_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA6_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA6_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA6_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA6_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA6_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA6_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA6_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA6_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA6_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA6_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA6_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA6_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA6_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA6_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA6_GMI_RD_PRI_URGENCY_MASKING
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L
+#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L
+//MMEA6_GMI_WR_PRI_URGENCY_MASKING
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L
+#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L
+//MMEA6_GMI_RD_PRI_QUANT_PRI1
+#define MMEA6_GMI_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA6_GMI_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA6_GMI_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA6_GMI_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA6_GMI_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA6_GMI_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA6_GMI_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA6_GMI_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA6_GMI_RD_PRI_QUANT_PRI2
+#define MMEA6_GMI_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA6_GMI_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA6_GMI_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA6_GMI_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA6_GMI_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA6_GMI_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA6_GMI_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA6_GMI_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA6_GMI_RD_PRI_QUANT_PRI3
+#define MMEA6_GMI_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA6_GMI_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA6_GMI_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA6_GMI_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA6_GMI_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA6_GMI_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA6_GMI_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA6_GMI_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA6_GMI_WR_PRI_QUANT_PRI1
+#define MMEA6_GMI_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA6_GMI_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA6_GMI_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA6_GMI_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA6_GMI_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA6_GMI_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA6_GMI_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA6_GMI_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA6_GMI_WR_PRI_QUANT_PRI2
+#define MMEA6_GMI_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA6_GMI_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA6_GMI_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA6_GMI_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA6_GMI_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA6_GMI_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA6_GMI_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA6_GMI_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA6_GMI_WR_PRI_QUANT_PRI3
+#define MMEA6_GMI_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA6_GMI_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA6_GMI_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA6_GMI_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA6_GMI_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA6_GMI_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA6_GMI_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA6_GMI_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA6_ADDRNORM_BASE_ADDR0
+#define MMEA6_ADDRNORM_BASE_ADDR0__ADDR_RNG_VAL__SHIFT 0x0
+#define MMEA6_ADDRNORM_BASE_ADDR0__LGCY_MMIO_HOLE_EN__SHIFT 0x1
+#define MMEA6_ADDRNORM_BASE_ADDR0__INTLV_NUM_CHAN__SHIFT 0x2
+#define MMEA6_ADDRNORM_BASE_ADDR0__INTLV_NUM_DIES__SHIFT 0x6
+#define MMEA6_ADDRNORM_BASE_ADDR0__INTLV_NUM_SOCKETS__SHIFT 0x8
+#define MMEA6_ADDRNORM_BASE_ADDR0__INTLV_ADDR_SEL__SHIFT 0x9
+#define MMEA6_ADDRNORM_BASE_ADDR0__BASE_ADDR__SHIFT 0xc
+#define MMEA6_ADDRNORM_BASE_ADDR0__ADDR_RNG_VAL_MASK 0x00000001L
+#define MMEA6_ADDRNORM_BASE_ADDR0__LGCY_MMIO_HOLE_EN_MASK 0x00000002L
+#define MMEA6_ADDRNORM_BASE_ADDR0__INTLV_NUM_CHAN_MASK 0x0000003CL
+#define MMEA6_ADDRNORM_BASE_ADDR0__INTLV_NUM_DIES_MASK 0x000000C0L
+#define MMEA6_ADDRNORM_BASE_ADDR0__INTLV_NUM_SOCKETS_MASK 0x00000100L
+#define MMEA6_ADDRNORM_BASE_ADDR0__INTLV_ADDR_SEL_MASK 0x00000E00L
+#define MMEA6_ADDRNORM_BASE_ADDR0__BASE_ADDR_MASK 0xFFFFF000L
+//MMEA6_ADDRNORM_LIMIT_ADDR0
+#define MMEA6_ADDRNORM_LIMIT_ADDR0__DST_FABRIC_ID__SHIFT 0x0
+#define MMEA6_ADDRNORM_LIMIT_ADDR0__LIMIT_ADDR__SHIFT 0xc
+#define MMEA6_ADDRNORM_LIMIT_ADDR0__DST_FABRIC_ID_MASK 0x0000001FL
+#define MMEA6_ADDRNORM_LIMIT_ADDR0__LIMIT_ADDR_MASK 0xFFFFF000L
+//MMEA6_ADDRNORM_BASE_ADDR1
+#define MMEA6_ADDRNORM_BASE_ADDR1__ADDR_RNG_VAL__SHIFT 0x0
+#define MMEA6_ADDRNORM_BASE_ADDR1__LGCY_MMIO_HOLE_EN__SHIFT 0x1
+#define MMEA6_ADDRNORM_BASE_ADDR1__INTLV_NUM_CHAN__SHIFT 0x2
+#define MMEA6_ADDRNORM_BASE_ADDR1__INTLV_NUM_DIES__SHIFT 0x6
+#define MMEA6_ADDRNORM_BASE_ADDR1__INTLV_NUM_SOCKETS__SHIFT 0x8
+#define MMEA6_ADDRNORM_BASE_ADDR1__INTLV_ADDR_SEL__SHIFT 0x9
+#define MMEA6_ADDRNORM_BASE_ADDR1__BASE_ADDR__SHIFT 0xc
+#define MMEA6_ADDRNORM_BASE_ADDR1__ADDR_RNG_VAL_MASK 0x00000001L
+#define MMEA6_ADDRNORM_BASE_ADDR1__LGCY_MMIO_HOLE_EN_MASK 0x00000002L
+#define MMEA6_ADDRNORM_BASE_ADDR1__INTLV_NUM_CHAN_MASK 0x0000003CL
+#define MMEA6_ADDRNORM_BASE_ADDR1__INTLV_NUM_DIES_MASK 0x000000C0L
+#define MMEA6_ADDRNORM_BASE_ADDR1__INTLV_NUM_SOCKETS_MASK 0x00000100L
+#define MMEA6_ADDRNORM_BASE_ADDR1__INTLV_ADDR_SEL_MASK 0x00000E00L
+#define MMEA6_ADDRNORM_BASE_ADDR1__BASE_ADDR_MASK 0xFFFFF000L
+//MMEA6_ADDRNORM_LIMIT_ADDR1
+#define MMEA6_ADDRNORM_LIMIT_ADDR1__DST_FABRIC_ID__SHIFT 0x0
+#define MMEA6_ADDRNORM_LIMIT_ADDR1__LIMIT_ADDR__SHIFT 0xc
+#define MMEA6_ADDRNORM_LIMIT_ADDR1__DST_FABRIC_ID_MASK 0x0000001FL
+#define MMEA6_ADDRNORM_LIMIT_ADDR1__LIMIT_ADDR_MASK 0xFFFFF000L
+//MMEA6_ADDRNORM_OFFSET_ADDR1
+#define MMEA6_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_EN__SHIFT 0x0
+#define MMEA6_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET__SHIFT 0x14
+#define MMEA6_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_EN_MASK 0x00000001L
+#define MMEA6_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_MASK 0xFFF00000L
+//MMEA6_ADDRNORM_BASE_ADDR2
+#define MMEA6_ADDRNORM_BASE_ADDR2__ADDR_RNG_VAL__SHIFT 0x0
+#define MMEA6_ADDRNORM_BASE_ADDR2__LGCY_MMIO_HOLE_EN__SHIFT 0x1
+#define MMEA6_ADDRNORM_BASE_ADDR2__INTLV_NUM_CHAN__SHIFT 0x2
+#define MMEA6_ADDRNORM_BASE_ADDR2__INTLV_NUM_DIES__SHIFT 0x6
+#define MMEA6_ADDRNORM_BASE_ADDR2__INTLV_NUM_SOCKETS__SHIFT 0x8
+#define MMEA6_ADDRNORM_BASE_ADDR2__INTLV_ADDR_SEL__SHIFT 0x9
+#define MMEA6_ADDRNORM_BASE_ADDR2__BASE_ADDR__SHIFT 0xc
+#define MMEA6_ADDRNORM_BASE_ADDR2__ADDR_RNG_VAL_MASK 0x00000001L
+#define MMEA6_ADDRNORM_BASE_ADDR2__LGCY_MMIO_HOLE_EN_MASK 0x00000002L
+#define MMEA6_ADDRNORM_BASE_ADDR2__INTLV_NUM_CHAN_MASK 0x0000003CL
+#define MMEA6_ADDRNORM_BASE_ADDR2__INTLV_NUM_DIES_MASK 0x000000C0L
+#define MMEA6_ADDRNORM_BASE_ADDR2__INTLV_NUM_SOCKETS_MASK 0x00000100L
+#define MMEA6_ADDRNORM_BASE_ADDR2__INTLV_ADDR_SEL_MASK 0x00000E00L
+#define MMEA6_ADDRNORM_BASE_ADDR2__BASE_ADDR_MASK 0xFFFFF000L
+//MMEA6_ADDRNORM_LIMIT_ADDR2
+#define MMEA6_ADDRNORM_LIMIT_ADDR2__DST_FABRIC_ID__SHIFT 0x0
+#define MMEA6_ADDRNORM_LIMIT_ADDR2__LIMIT_ADDR__SHIFT 0xc
+#define MMEA6_ADDRNORM_LIMIT_ADDR2__DST_FABRIC_ID_MASK 0x0000001FL
+#define MMEA6_ADDRNORM_LIMIT_ADDR2__LIMIT_ADDR_MASK 0xFFFFF000L
+//MMEA6_ADDRNORM_BASE_ADDR3
+#define MMEA6_ADDRNORM_BASE_ADDR3__ADDR_RNG_VAL__SHIFT 0x0
+#define MMEA6_ADDRNORM_BASE_ADDR3__LGCY_MMIO_HOLE_EN__SHIFT 0x1
+#define MMEA6_ADDRNORM_BASE_ADDR3__INTLV_NUM_CHAN__SHIFT 0x2
+#define MMEA6_ADDRNORM_BASE_ADDR3__INTLV_NUM_DIES__SHIFT 0x6
+#define MMEA6_ADDRNORM_BASE_ADDR3__INTLV_NUM_SOCKETS__SHIFT 0x8
+#define MMEA6_ADDRNORM_BASE_ADDR3__INTLV_ADDR_SEL__SHIFT 0x9
+#define MMEA6_ADDRNORM_BASE_ADDR3__BASE_ADDR__SHIFT 0xc
+#define MMEA6_ADDRNORM_BASE_ADDR3__ADDR_RNG_VAL_MASK 0x00000001L
+#define MMEA6_ADDRNORM_BASE_ADDR3__LGCY_MMIO_HOLE_EN_MASK 0x00000002L
+#define MMEA6_ADDRNORM_BASE_ADDR3__INTLV_NUM_CHAN_MASK 0x0000003CL
+#define MMEA6_ADDRNORM_BASE_ADDR3__INTLV_NUM_DIES_MASK 0x000000C0L
+#define MMEA6_ADDRNORM_BASE_ADDR3__INTLV_NUM_SOCKETS_MASK 0x00000100L
+#define MMEA6_ADDRNORM_BASE_ADDR3__INTLV_ADDR_SEL_MASK 0x00000E00L
+#define MMEA6_ADDRNORM_BASE_ADDR3__BASE_ADDR_MASK 0xFFFFF000L
+//MMEA6_ADDRNORM_LIMIT_ADDR3
+#define MMEA6_ADDRNORM_LIMIT_ADDR3__DST_FABRIC_ID__SHIFT 0x0
+#define MMEA6_ADDRNORM_LIMIT_ADDR3__LIMIT_ADDR__SHIFT 0xc
+#define MMEA6_ADDRNORM_LIMIT_ADDR3__DST_FABRIC_ID_MASK 0x0000001FL
+#define MMEA6_ADDRNORM_LIMIT_ADDR3__LIMIT_ADDR_MASK 0xFFFFF000L
+//MMEA6_ADDRNORM_OFFSET_ADDR3
+#define MMEA6_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET_EN__SHIFT 0x0
+#define MMEA6_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET__SHIFT 0x14
+#define MMEA6_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET_EN_MASK 0x00000001L
+#define MMEA6_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET_MASK 0xFFF00000L
+//MMEA6_ADDRNORM_BASE_ADDR4
+#define MMEA6_ADDRNORM_BASE_ADDR4__ADDR_RNG_VAL__SHIFT 0x0
+#define MMEA6_ADDRNORM_BASE_ADDR4__LGCY_MMIO_HOLE_EN__SHIFT 0x1
+#define MMEA6_ADDRNORM_BASE_ADDR4__INTLV_NUM_CHAN__SHIFT 0x2
+#define MMEA6_ADDRNORM_BASE_ADDR4__INTLV_NUM_DIES__SHIFT 0x6
+#define MMEA6_ADDRNORM_BASE_ADDR4__INTLV_NUM_SOCKETS__SHIFT 0x8
+#define MMEA6_ADDRNORM_BASE_ADDR4__INTLV_ADDR_SEL__SHIFT 0x9
+#define MMEA6_ADDRNORM_BASE_ADDR4__BASE_ADDR__SHIFT 0xc
+#define MMEA6_ADDRNORM_BASE_ADDR4__ADDR_RNG_VAL_MASK 0x00000001L
+#define MMEA6_ADDRNORM_BASE_ADDR4__LGCY_MMIO_HOLE_EN_MASK 0x00000002L
+#define MMEA6_ADDRNORM_BASE_ADDR4__INTLV_NUM_CHAN_MASK 0x0000003CL
+#define MMEA6_ADDRNORM_BASE_ADDR4__INTLV_NUM_DIES_MASK 0x000000C0L
+#define MMEA6_ADDRNORM_BASE_ADDR4__INTLV_NUM_SOCKETS_MASK 0x00000100L
+#define MMEA6_ADDRNORM_BASE_ADDR4__INTLV_ADDR_SEL_MASK 0x00000E00L
+#define MMEA6_ADDRNORM_BASE_ADDR4__BASE_ADDR_MASK 0xFFFFF000L
+//MMEA6_ADDRNORM_LIMIT_ADDR4
+#define MMEA6_ADDRNORM_LIMIT_ADDR4__DST_FABRIC_ID__SHIFT 0x0
+#define MMEA6_ADDRNORM_LIMIT_ADDR4__LIMIT_ADDR__SHIFT 0xc
+#define MMEA6_ADDRNORM_LIMIT_ADDR4__DST_FABRIC_ID_MASK 0x0000001FL
+#define MMEA6_ADDRNORM_LIMIT_ADDR4__LIMIT_ADDR_MASK 0xFFFFF000L
+//MMEA6_ADDRNORM_BASE_ADDR5
+#define MMEA6_ADDRNORM_BASE_ADDR5__ADDR_RNG_VAL__SHIFT 0x0
+#define MMEA6_ADDRNORM_BASE_ADDR5__LGCY_MMIO_HOLE_EN__SHIFT 0x1
+#define MMEA6_ADDRNORM_BASE_ADDR5__INTLV_NUM_CHAN__SHIFT 0x2
+#define MMEA6_ADDRNORM_BASE_ADDR5__INTLV_NUM_DIES__SHIFT 0x6
+#define MMEA6_ADDRNORM_BASE_ADDR5__INTLV_NUM_SOCKETS__SHIFT 0x8
+#define MMEA6_ADDRNORM_BASE_ADDR5__INTLV_ADDR_SEL__SHIFT 0x9
+#define MMEA6_ADDRNORM_BASE_ADDR5__BASE_ADDR__SHIFT 0xc
+#define MMEA6_ADDRNORM_BASE_ADDR5__ADDR_RNG_VAL_MASK 0x00000001L
+#define MMEA6_ADDRNORM_BASE_ADDR5__LGCY_MMIO_HOLE_EN_MASK 0x00000002L
+#define MMEA6_ADDRNORM_BASE_ADDR5__INTLV_NUM_CHAN_MASK 0x0000003CL
+#define MMEA6_ADDRNORM_BASE_ADDR5__INTLV_NUM_DIES_MASK 0x000000C0L
+#define MMEA6_ADDRNORM_BASE_ADDR5__INTLV_NUM_SOCKETS_MASK 0x00000100L
+#define MMEA6_ADDRNORM_BASE_ADDR5__INTLV_ADDR_SEL_MASK 0x00000E00L
+#define MMEA6_ADDRNORM_BASE_ADDR5__BASE_ADDR_MASK 0xFFFFF000L
+//MMEA6_ADDRNORM_LIMIT_ADDR5
+#define MMEA6_ADDRNORM_LIMIT_ADDR5__DST_FABRIC_ID__SHIFT 0x0
+#define MMEA6_ADDRNORM_LIMIT_ADDR5__LIMIT_ADDR__SHIFT 0xc
+#define MMEA6_ADDRNORM_LIMIT_ADDR5__DST_FABRIC_ID_MASK 0x0000001FL
+#define MMEA6_ADDRNORM_LIMIT_ADDR5__LIMIT_ADDR_MASK 0xFFFFF000L
+//MMEA6_ADDRNORM_OFFSET_ADDR5
+#define MMEA6_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET_EN__SHIFT 0x0
+#define MMEA6_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET__SHIFT 0x14
+#define MMEA6_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET_EN_MASK 0x00000001L
+#define MMEA6_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET_MASK 0xFFF00000L
+//MMEA6_ADDRNORMDRAM_HOLE_CNTL
+#define MMEA6_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_VALID__SHIFT 0x0
+#define MMEA6_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_OFFSET__SHIFT 0x7
+#define MMEA6_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_VALID_MASK 0x00000001L
+#define MMEA6_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_OFFSET_MASK 0x0000FF80L
+//MMEA6_ADDRNORMGMI_HOLE_CNTL
+#define MMEA6_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_VALID__SHIFT 0x0
+#define MMEA6_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_OFFSET__SHIFT 0x7
+#define MMEA6_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_VALID_MASK 0x00000001L
+#define MMEA6_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_OFFSET_MASK 0x0000FF80L
+//MMEA6_ADDRNORMDRAM_NP2_CHANNEL_CFG
+#define MMEA6_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE0__SHIFT 0x0
+#define MMEA6_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE1__SHIFT 0x6
+#define MMEA6_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE0_MASK 0x0000003FL
+#define MMEA6_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE1_MASK 0x00000FC0L
+//MMEA6_ADDRNORMGMI_NP2_CHANNEL_CFG
+#define MMEA6_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE2__SHIFT 0x0
+#define MMEA6_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE3__SHIFT 0x6
+#define MMEA6_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE2_MASK 0x0000003FL
+#define MMEA6_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE3_MASK 0x00000FC0L
+//MMEA6_ADDRDEC_BANK_CFG
+#define MMEA6_ADDRDEC_BANK_CFG__BANK_MASK_DRAM__SHIFT 0x0
+#define MMEA6_ADDRDEC_BANK_CFG__BANK_MASK_GMI__SHIFT 0x6
+#define MMEA6_ADDRDEC_BANK_CFG__BANKGROUP_SEL_DRAM__SHIFT 0xc
+#define MMEA6_ADDRDEC_BANK_CFG__BANKGROUP_SEL_GMI__SHIFT 0xf
+#define MMEA6_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_DRAM__SHIFT 0x12
+#define MMEA6_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_GMI__SHIFT 0x13
+#define MMEA6_ADDRDEC_BANK_CFG__BANK_MASK_DRAM_MASK 0x0000003FL
+#define MMEA6_ADDRDEC_BANK_CFG__BANK_MASK_GMI_MASK 0x00000FC0L
+#define MMEA6_ADDRDEC_BANK_CFG__BANKGROUP_SEL_DRAM_MASK 0x00007000L
+#define MMEA6_ADDRDEC_BANK_CFG__BANKGROUP_SEL_GMI_MASK 0x00038000L
+#define MMEA6_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_DRAM_MASK 0x00040000L
+#define MMEA6_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_GMI_MASK 0x00080000L
+//MMEA6_ADDRDEC_MISC_CFG
+#define MMEA6_ADDRDEC_MISC_CFG__VCM_EN0__SHIFT 0x0
+#define MMEA6_ADDRDEC_MISC_CFG__VCM_EN1__SHIFT 0x1
+#define MMEA6_ADDRDEC_MISC_CFG__VCM_EN2__SHIFT 0x2
+#define MMEA6_ADDRDEC_MISC_CFG__PCH_MASK_DRAM__SHIFT 0x8
+#define MMEA6_ADDRDEC_MISC_CFG__PCH_MASK_GMI__SHIFT 0x9
+#define MMEA6_ADDRDEC_MISC_CFG__CH_MASK_DRAM__SHIFT 0xc
+#define MMEA6_ADDRDEC_MISC_CFG__CH_MASK_GMI__SHIFT 0x11
+#define MMEA6_ADDRDEC_MISC_CFG__CS_MASK_DRAM__SHIFT 0x16
+#define MMEA6_ADDRDEC_MISC_CFG__CS_MASK_GMI__SHIFT 0x18
+#define MMEA6_ADDRDEC_MISC_CFG__RM_MASK_DRAM__SHIFT 0x1a
+#define MMEA6_ADDRDEC_MISC_CFG__RM_MASK_GMI__SHIFT 0x1d
+#define MMEA6_ADDRDEC_MISC_CFG__VCM_EN0_MASK 0x00000001L
+#define MMEA6_ADDRDEC_MISC_CFG__VCM_EN1_MASK 0x00000002L
+#define MMEA6_ADDRDEC_MISC_CFG__VCM_EN2_MASK 0x00000004L
+#define MMEA6_ADDRDEC_MISC_CFG__PCH_MASK_DRAM_MASK 0x00000100L
+#define MMEA6_ADDRDEC_MISC_CFG__PCH_MASK_GMI_MASK 0x00000200L
+#define MMEA6_ADDRDEC_MISC_CFG__CH_MASK_DRAM_MASK 0x0001F000L
+#define MMEA6_ADDRDEC_MISC_CFG__CH_MASK_GMI_MASK 0x003E0000L
+#define MMEA6_ADDRDEC_MISC_CFG__CS_MASK_DRAM_MASK 0x00C00000L
+#define MMEA6_ADDRDEC_MISC_CFG__CS_MASK_GMI_MASK 0x03000000L
+#define MMEA6_ADDRDEC_MISC_CFG__RM_MASK_DRAM_MASK 0x1C000000L
+#define MMEA6_ADDRDEC_MISC_CFG__RM_MASK_GMI_MASK 0xE0000000L
+//MMEA6_ADDRDECDRAM_ADDR_HASH_BANK0
+#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK0__XOR_ENABLE__SHIFT 0x0
+#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK0__COL_XOR__SHIFT 0x1
+#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK0__ROW_XOR__SHIFT 0xe
+#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK0__XOR_ENABLE_MASK 0x00000001L
+#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK0__COL_XOR_MASK 0x00003FFEL
+#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK0__ROW_XOR_MASK 0xFFFFC000L
+//MMEA6_ADDRDECDRAM_ADDR_HASH_BANK1
+#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK1__XOR_ENABLE__SHIFT 0x0
+#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK1__COL_XOR__SHIFT 0x1
+#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK1__ROW_XOR__SHIFT 0xe
+#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK1__XOR_ENABLE_MASK 0x00000001L
+#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK1__COL_XOR_MASK 0x00003FFEL
+#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK1__ROW_XOR_MASK 0xFFFFC000L
+//MMEA6_ADDRDECDRAM_ADDR_HASH_BANK2
+#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK2__XOR_ENABLE__SHIFT 0x0
+#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK2__COL_XOR__SHIFT 0x1
+#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK2__ROW_XOR__SHIFT 0xe
+#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK2__XOR_ENABLE_MASK 0x00000001L
+#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK2__COL_XOR_MASK 0x00003FFEL
+#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK2__ROW_XOR_MASK 0xFFFFC000L
+//MMEA6_ADDRDECDRAM_ADDR_HASH_BANK3
+#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK3__XOR_ENABLE__SHIFT 0x0
+#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK3__COL_XOR__SHIFT 0x1
+#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK3__ROW_XOR__SHIFT 0xe
+#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK3__XOR_ENABLE_MASK 0x00000001L
+#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK3__COL_XOR_MASK 0x00003FFEL
+#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK3__ROW_XOR_MASK 0xFFFFC000L
+//MMEA6_ADDRDECDRAM_ADDR_HASH_BANK4
+#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK4__XOR_ENABLE__SHIFT 0x0
+#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK4__COL_XOR__SHIFT 0x1
+#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK4__ROW_XOR__SHIFT 0xe
+#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK4__XOR_ENABLE_MASK 0x00000001L
+#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK4__COL_XOR_MASK 0x00003FFEL
+#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK4__ROW_XOR_MASK 0xFFFFC000L
+//MMEA6_ADDRDECDRAM_ADDR_HASH_BANK5
+#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK5__XOR_ENABLE__SHIFT 0x0
+#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK5__COL_XOR__SHIFT 0x1
+#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK5__ROW_XOR__SHIFT 0xe
+#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK5__XOR_ENABLE_MASK 0x00000001L
+#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK5__COL_XOR_MASK 0x00003FFEL
+#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK5__ROW_XOR_MASK 0xFFFFC000L
+//MMEA6_ADDRDECDRAM_ADDR_HASH_PC
+#define MMEA6_ADDRDECDRAM_ADDR_HASH_PC__XOR_ENABLE__SHIFT 0x0
+#define MMEA6_ADDRDECDRAM_ADDR_HASH_PC__COL_XOR__SHIFT 0x1
+#define MMEA6_ADDRDECDRAM_ADDR_HASH_PC__ROW_XOR__SHIFT 0xe
+#define MMEA6_ADDRDECDRAM_ADDR_HASH_PC__XOR_ENABLE_MASK 0x00000001L
+#define MMEA6_ADDRDECDRAM_ADDR_HASH_PC__COL_XOR_MASK 0x00003FFEL
+#define MMEA6_ADDRDECDRAM_ADDR_HASH_PC__ROW_XOR_MASK 0xFFFFC000L
+//MMEA6_ADDRDECDRAM_ADDR_HASH_PC2
+#define MMEA6_ADDRDECDRAM_ADDR_HASH_PC2__BANK_XOR__SHIFT 0x0
+#define MMEA6_ADDRDECDRAM_ADDR_HASH_PC2__BANK_XOR_MASK 0x0000003FL
+//MMEA6_ADDRDECDRAM_ADDR_HASH_CS0
+#define MMEA6_ADDRDECDRAM_ADDR_HASH_CS0__XOR_ENABLE__SHIFT 0x0
+#define MMEA6_ADDRDECDRAM_ADDR_HASH_CS0__NA_XOR__SHIFT 0x1
+#define MMEA6_ADDRDECDRAM_ADDR_HASH_CS0__XOR_ENABLE_MASK 0x00000001L
+#define MMEA6_ADDRDECDRAM_ADDR_HASH_CS0__NA_XOR_MASK 0xFFFFFFFEL
+//MMEA6_ADDRDECDRAM_ADDR_HASH_CS1
+#define MMEA6_ADDRDECDRAM_ADDR_HASH_CS1__XOR_ENABLE__SHIFT 0x0
+#define MMEA6_ADDRDECDRAM_ADDR_HASH_CS1__NA_XOR__SHIFT 0x1
+#define MMEA6_ADDRDECDRAM_ADDR_HASH_CS1__XOR_ENABLE_MASK 0x00000001L
+#define MMEA6_ADDRDECDRAM_ADDR_HASH_CS1__NA_XOR_MASK 0xFFFFFFFEL
+//MMEA6_ADDRDECDRAM_HARVEST_ENABLE
+#define MMEA6_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_EN__SHIFT 0x0
+#define MMEA6_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_VAL__SHIFT 0x1
+#define MMEA6_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_EN__SHIFT 0x2
+#define MMEA6_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_VAL__SHIFT 0x3
+#define MMEA6_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_EN__SHIFT 0x4
+#define MMEA6_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_VAL__SHIFT 0x5
+#define MMEA6_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_EN_MASK 0x00000001L
+#define MMEA6_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_VAL_MASK 0x00000002L
+#define MMEA6_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_EN_MASK 0x00000004L
+#define MMEA6_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_VAL_MASK 0x00000008L
+#define MMEA6_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_EN_MASK 0x00000010L
+#define MMEA6_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_VAL_MASK 0x00000020L
+//MMEA6_ADDRDECGMI_ADDR_HASH_BANK0
+#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK0__XOR_ENABLE__SHIFT 0x0
+#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK0__COL_XOR__SHIFT 0x1
+#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK0__ROW_XOR__SHIFT 0xe
+#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK0__XOR_ENABLE_MASK 0x00000001L
+#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK0__COL_XOR_MASK 0x00003FFEL
+#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK0__ROW_XOR_MASK 0xFFFFC000L
+//MMEA6_ADDRDECGMI_ADDR_HASH_BANK1
+#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK1__XOR_ENABLE__SHIFT 0x0
+#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK1__COL_XOR__SHIFT 0x1
+#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK1__ROW_XOR__SHIFT 0xe
+#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK1__XOR_ENABLE_MASK 0x00000001L
+#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK1__COL_XOR_MASK 0x00003FFEL
+#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK1__ROW_XOR_MASK 0xFFFFC000L
+//MMEA6_ADDRDECGMI_ADDR_HASH_BANK2
+#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK2__XOR_ENABLE__SHIFT 0x0
+#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK2__COL_XOR__SHIFT 0x1
+#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK2__ROW_XOR__SHIFT 0xe
+#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK2__XOR_ENABLE_MASK 0x00000001L
+#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK2__COL_XOR_MASK 0x00003FFEL
+#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK2__ROW_XOR_MASK 0xFFFFC000L
+//MMEA6_ADDRDECGMI_ADDR_HASH_BANK3
+#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK3__XOR_ENABLE__SHIFT 0x0
+#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK3__COL_XOR__SHIFT 0x1
+#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK3__ROW_XOR__SHIFT 0xe
+#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK3__XOR_ENABLE_MASK 0x00000001L
+#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK3__COL_XOR_MASK 0x00003FFEL
+#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK3__ROW_XOR_MASK 0xFFFFC000L
+//MMEA6_ADDRDECGMI_ADDR_HASH_BANK4
+#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK4__XOR_ENABLE__SHIFT 0x0
+#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK4__COL_XOR__SHIFT 0x1
+#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK4__ROW_XOR__SHIFT 0xe
+#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK4__XOR_ENABLE_MASK 0x00000001L
+#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK4__COL_XOR_MASK 0x00003FFEL
+#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK4__ROW_XOR_MASK 0xFFFFC000L
+//MMEA6_ADDRDECGMI_ADDR_HASH_BANK5
+#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK5__XOR_ENABLE__SHIFT 0x0
+#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK5__COL_XOR__SHIFT 0x1
+#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK5__ROW_XOR__SHIFT 0xe
+#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK5__XOR_ENABLE_MASK 0x00000001L
+#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK5__COL_XOR_MASK 0x00003FFEL
+#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK5__ROW_XOR_MASK 0xFFFFC000L
+//MMEA6_ADDRDECGMI_ADDR_HASH_PC
+#define MMEA6_ADDRDECGMI_ADDR_HASH_PC__XOR_ENABLE__SHIFT 0x0
+#define MMEA6_ADDRDECGMI_ADDR_HASH_PC__COL_XOR__SHIFT 0x1
+#define MMEA6_ADDRDECGMI_ADDR_HASH_PC__ROW_XOR__SHIFT 0xe
+#define MMEA6_ADDRDECGMI_ADDR_HASH_PC__XOR_ENABLE_MASK 0x00000001L
+#define MMEA6_ADDRDECGMI_ADDR_HASH_PC__COL_XOR_MASK 0x00003FFEL
+#define MMEA6_ADDRDECGMI_ADDR_HASH_PC__ROW_XOR_MASK 0xFFFFC000L
+//MMEA6_ADDRDECGMI_ADDR_HASH_PC2
+#define MMEA6_ADDRDECGMI_ADDR_HASH_PC2__BANK_XOR__SHIFT 0x0
+#define MMEA6_ADDRDECGMI_ADDR_HASH_PC2__BANK_XOR_MASK 0x0000003FL
+//MMEA6_ADDRDECGMI_ADDR_HASH_CS0
+#define MMEA6_ADDRDECGMI_ADDR_HASH_CS0__XOR_ENABLE__SHIFT 0x0
+#define MMEA6_ADDRDECGMI_ADDR_HASH_CS0__NA_XOR__SHIFT 0x1
+#define MMEA6_ADDRDECGMI_ADDR_HASH_CS0__XOR_ENABLE_MASK 0x00000001L
+#define MMEA6_ADDRDECGMI_ADDR_HASH_CS0__NA_XOR_MASK 0xFFFFFFFEL
+//MMEA6_ADDRDECGMI_ADDR_HASH_CS1
+#define MMEA6_ADDRDECGMI_ADDR_HASH_CS1__XOR_ENABLE__SHIFT 0x0
+#define MMEA6_ADDRDECGMI_ADDR_HASH_CS1__NA_XOR__SHIFT 0x1
+#define MMEA6_ADDRDECGMI_ADDR_HASH_CS1__XOR_ENABLE_MASK 0x00000001L
+#define MMEA6_ADDRDECGMI_ADDR_HASH_CS1__NA_XOR_MASK 0xFFFFFFFEL
+//MMEA6_ADDRDECGMI_HARVEST_ENABLE
+#define MMEA6_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_EN__SHIFT 0x0
+#define MMEA6_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_VAL__SHIFT 0x1
+#define MMEA6_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_EN__SHIFT 0x2
+#define MMEA6_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_VAL__SHIFT 0x3
+#define MMEA6_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_EN__SHIFT 0x4
+#define MMEA6_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_VAL__SHIFT 0x5
+#define MMEA6_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_EN_MASK 0x00000001L
+#define MMEA6_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_VAL_MASK 0x00000002L
+#define MMEA6_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_EN_MASK 0x00000004L
+#define MMEA6_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_VAL_MASK 0x00000008L
+#define MMEA6_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_EN_MASK 0x00000010L
+#define MMEA6_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_VAL_MASK 0x00000020L
+//MMEA6_ADDRDEC0_BASE_ADDR_CS0
+#define MMEA6_ADDRDEC0_BASE_ADDR_CS0__CS_EN__SHIFT 0x0
+#define MMEA6_ADDRDEC0_BASE_ADDR_CS0__BASE_ADDR__SHIFT 0x1
+#define MMEA6_ADDRDEC0_BASE_ADDR_CS0__CS_EN_MASK 0x00000001L
+#define MMEA6_ADDRDEC0_BASE_ADDR_CS0__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA6_ADDRDEC0_BASE_ADDR_CS1
+#define MMEA6_ADDRDEC0_BASE_ADDR_CS1__CS_EN__SHIFT 0x0
+#define MMEA6_ADDRDEC0_BASE_ADDR_CS1__BASE_ADDR__SHIFT 0x1
+#define MMEA6_ADDRDEC0_BASE_ADDR_CS1__CS_EN_MASK 0x00000001L
+#define MMEA6_ADDRDEC0_BASE_ADDR_CS1__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA6_ADDRDEC0_BASE_ADDR_CS2
+#define MMEA6_ADDRDEC0_BASE_ADDR_CS2__CS_EN__SHIFT 0x0
+#define MMEA6_ADDRDEC0_BASE_ADDR_CS2__BASE_ADDR__SHIFT 0x1
+#define MMEA6_ADDRDEC0_BASE_ADDR_CS2__CS_EN_MASK 0x00000001L
+#define MMEA6_ADDRDEC0_BASE_ADDR_CS2__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA6_ADDRDEC0_BASE_ADDR_CS3
+#define MMEA6_ADDRDEC0_BASE_ADDR_CS3__CS_EN__SHIFT 0x0
+#define MMEA6_ADDRDEC0_BASE_ADDR_CS3__BASE_ADDR__SHIFT 0x1
+#define MMEA6_ADDRDEC0_BASE_ADDR_CS3__CS_EN_MASK 0x00000001L
+#define MMEA6_ADDRDEC0_BASE_ADDR_CS3__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA6_ADDRDEC0_BASE_ADDR_SECCS0
+#define MMEA6_ADDRDEC0_BASE_ADDR_SECCS0__CS_EN__SHIFT 0x0
+#define MMEA6_ADDRDEC0_BASE_ADDR_SECCS0__BASE_ADDR__SHIFT 0x1
+#define MMEA6_ADDRDEC0_BASE_ADDR_SECCS0__CS_EN_MASK 0x00000001L
+#define MMEA6_ADDRDEC0_BASE_ADDR_SECCS0__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA6_ADDRDEC0_BASE_ADDR_SECCS1
+#define MMEA6_ADDRDEC0_BASE_ADDR_SECCS1__CS_EN__SHIFT 0x0
+#define MMEA6_ADDRDEC0_BASE_ADDR_SECCS1__BASE_ADDR__SHIFT 0x1
+#define MMEA6_ADDRDEC0_BASE_ADDR_SECCS1__CS_EN_MASK 0x00000001L
+#define MMEA6_ADDRDEC0_BASE_ADDR_SECCS1__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA6_ADDRDEC0_BASE_ADDR_SECCS2
+#define MMEA6_ADDRDEC0_BASE_ADDR_SECCS2__CS_EN__SHIFT 0x0
+#define MMEA6_ADDRDEC0_BASE_ADDR_SECCS2__BASE_ADDR__SHIFT 0x1
+#define MMEA6_ADDRDEC0_BASE_ADDR_SECCS2__CS_EN_MASK 0x00000001L
+#define MMEA6_ADDRDEC0_BASE_ADDR_SECCS2__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA6_ADDRDEC0_BASE_ADDR_SECCS3
+#define MMEA6_ADDRDEC0_BASE_ADDR_SECCS3__CS_EN__SHIFT 0x0
+#define MMEA6_ADDRDEC0_BASE_ADDR_SECCS3__BASE_ADDR__SHIFT 0x1
+#define MMEA6_ADDRDEC0_BASE_ADDR_SECCS3__CS_EN_MASK 0x00000001L
+#define MMEA6_ADDRDEC0_BASE_ADDR_SECCS3__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA6_ADDRDEC0_ADDR_MASK_CS01
+#define MMEA6_ADDRDEC0_ADDR_MASK_CS01__ADDR_MASK__SHIFT 0x1
+#define MMEA6_ADDRDEC0_ADDR_MASK_CS01__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA6_ADDRDEC0_ADDR_MASK_CS23
+#define MMEA6_ADDRDEC0_ADDR_MASK_CS23__ADDR_MASK__SHIFT 0x1
+#define MMEA6_ADDRDEC0_ADDR_MASK_CS23__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA6_ADDRDEC0_ADDR_MASK_SECCS01
+#define MMEA6_ADDRDEC0_ADDR_MASK_SECCS01__ADDR_MASK__SHIFT 0x1
+#define MMEA6_ADDRDEC0_ADDR_MASK_SECCS01__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA6_ADDRDEC0_ADDR_MASK_SECCS23
+#define MMEA6_ADDRDEC0_ADDR_MASK_SECCS23__ADDR_MASK__SHIFT 0x1
+#define MMEA6_ADDRDEC0_ADDR_MASK_SECCS23__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA6_ADDRDEC0_ADDR_CFG_CS01
+#define MMEA6_ADDRDEC0_ADDR_CFG_CS01__NUM_BANK_GROUPS__SHIFT 0x1
+#define MMEA6_ADDRDEC0_ADDR_CFG_CS01__NUM_RM__SHIFT 0x4
+#define MMEA6_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_LO__SHIFT 0x8
+#define MMEA6_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_HI__SHIFT 0xc
+#define MMEA6_ADDRDEC0_ADDR_CFG_CS01__NUM_COL__SHIFT 0x10
+#define MMEA6_ADDRDEC0_ADDR_CFG_CS01__NUM_BANKS__SHIFT 0x14
+#define MMEA6_ADDRDEC0_ADDR_CFG_CS01__HI_COL_EN__SHIFT 0x1f
+#define MMEA6_ADDRDEC0_ADDR_CFG_CS01__NUM_BANK_GROUPS_MASK 0x0000000EL
+#define MMEA6_ADDRDEC0_ADDR_CFG_CS01__NUM_RM_MASK 0x00000030L
+#define MMEA6_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_LO_MASK 0x00000F00L
+#define MMEA6_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_HI_MASK 0x0000F000L
+#define MMEA6_ADDRDEC0_ADDR_CFG_CS01__NUM_COL_MASK 0x000F0000L
+#define MMEA6_ADDRDEC0_ADDR_CFG_CS01__NUM_BANKS_MASK 0x00300000L
+#define MMEA6_ADDRDEC0_ADDR_CFG_CS01__HI_COL_EN_MASK 0x80000000L
+//MMEA6_ADDRDEC0_ADDR_CFG_CS23
+#define MMEA6_ADDRDEC0_ADDR_CFG_CS23__NUM_BANK_GROUPS__SHIFT 0x1
+#define MMEA6_ADDRDEC0_ADDR_CFG_CS23__NUM_RM__SHIFT 0x4
+#define MMEA6_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_LO__SHIFT 0x8
+#define MMEA6_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_HI__SHIFT 0xc
+#define MMEA6_ADDRDEC0_ADDR_CFG_CS23__NUM_COL__SHIFT 0x10
+#define MMEA6_ADDRDEC0_ADDR_CFG_CS23__NUM_BANKS__SHIFT 0x14
+#define MMEA6_ADDRDEC0_ADDR_CFG_CS23__HI_COL_EN__SHIFT 0x1f
+#define MMEA6_ADDRDEC0_ADDR_CFG_CS23__NUM_BANK_GROUPS_MASK 0x0000000EL
+#define MMEA6_ADDRDEC0_ADDR_CFG_CS23__NUM_RM_MASK 0x00000030L
+#define MMEA6_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_LO_MASK 0x00000F00L
+#define MMEA6_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_HI_MASK 0x0000F000L
+#define MMEA6_ADDRDEC0_ADDR_CFG_CS23__NUM_COL_MASK 0x000F0000L
+#define MMEA6_ADDRDEC0_ADDR_CFG_CS23__NUM_BANKS_MASK 0x00300000L
+#define MMEA6_ADDRDEC0_ADDR_CFG_CS23__HI_COL_EN_MASK 0x80000000L
+//MMEA6_ADDRDEC0_ADDR_SEL_CS01
+#define MMEA6_ADDRDEC0_ADDR_SEL_CS01__BANK0__SHIFT 0x0
+#define MMEA6_ADDRDEC0_ADDR_SEL_CS01__BANK1__SHIFT 0x4
+#define MMEA6_ADDRDEC0_ADDR_SEL_CS01__BANK2__SHIFT 0x8
+#define MMEA6_ADDRDEC0_ADDR_SEL_CS01__BANK3__SHIFT 0xc
+#define MMEA6_ADDRDEC0_ADDR_SEL_CS01__BANK4__SHIFT 0x10
+#define MMEA6_ADDRDEC0_ADDR_SEL_CS01__ROW_LO__SHIFT 0x18
+#define MMEA6_ADDRDEC0_ADDR_SEL_CS01__ROW_HI__SHIFT 0x1c
+#define MMEA6_ADDRDEC0_ADDR_SEL_CS01__BANK0_MASK 0x0000000FL
+#define MMEA6_ADDRDEC0_ADDR_SEL_CS01__BANK1_MASK 0x000000F0L
+#define MMEA6_ADDRDEC0_ADDR_SEL_CS01__BANK2_MASK 0x00000F00L
+#define MMEA6_ADDRDEC0_ADDR_SEL_CS01__BANK3_MASK 0x0000F000L
+#define MMEA6_ADDRDEC0_ADDR_SEL_CS01__BANK4_MASK 0x001F0000L
+#define MMEA6_ADDRDEC0_ADDR_SEL_CS01__ROW_LO_MASK 0x0F000000L
+#define MMEA6_ADDRDEC0_ADDR_SEL_CS01__ROW_HI_MASK 0xF0000000L
+//MMEA6_ADDRDEC0_ADDR_SEL_CS23
+#define MMEA6_ADDRDEC0_ADDR_SEL_CS23__BANK0__SHIFT 0x0
+#define MMEA6_ADDRDEC0_ADDR_SEL_CS23__BANK1__SHIFT 0x4
+#define MMEA6_ADDRDEC0_ADDR_SEL_CS23__BANK2__SHIFT 0x8
+#define MMEA6_ADDRDEC0_ADDR_SEL_CS23__BANK3__SHIFT 0xc
+#define MMEA6_ADDRDEC0_ADDR_SEL_CS23__BANK4__SHIFT 0x10
+#define MMEA6_ADDRDEC0_ADDR_SEL_CS23__ROW_LO__SHIFT 0x18
+#define MMEA6_ADDRDEC0_ADDR_SEL_CS23__ROW_HI__SHIFT 0x1c
+#define MMEA6_ADDRDEC0_ADDR_SEL_CS23__BANK0_MASK 0x0000000FL
+#define MMEA6_ADDRDEC0_ADDR_SEL_CS23__BANK1_MASK 0x000000F0L
+#define MMEA6_ADDRDEC0_ADDR_SEL_CS23__BANK2_MASK 0x00000F00L
+#define MMEA6_ADDRDEC0_ADDR_SEL_CS23__BANK3_MASK 0x0000F000L
+#define MMEA6_ADDRDEC0_ADDR_SEL_CS23__BANK4_MASK 0x001F0000L
+#define MMEA6_ADDRDEC0_ADDR_SEL_CS23__ROW_LO_MASK 0x0F000000L
+#define MMEA6_ADDRDEC0_ADDR_SEL_CS23__ROW_HI_MASK 0xF0000000L
+//MMEA6_ADDRDEC0_ADDR_SEL2_CS01
+#define MMEA6_ADDRDEC0_ADDR_SEL2_CS01__BANK5__SHIFT 0x0
+#define MMEA6_ADDRDEC0_ADDR_SEL2_CS01__BANK5_MASK 0x0000001FL
+//MMEA6_ADDRDEC0_ADDR_SEL2_CS23
+#define MMEA6_ADDRDEC0_ADDR_SEL2_CS23__BANK5__SHIFT 0x0
+#define MMEA6_ADDRDEC0_ADDR_SEL2_CS23__BANK5_MASK 0x0000001FL
+//MMEA6_ADDRDEC0_COL_SEL_LO_CS01
+#define MMEA6_ADDRDEC0_COL_SEL_LO_CS01__COL0__SHIFT 0x0
+#define MMEA6_ADDRDEC0_COL_SEL_LO_CS01__COL1__SHIFT 0x4
+#define MMEA6_ADDRDEC0_COL_SEL_LO_CS01__COL2__SHIFT 0x8
+#define MMEA6_ADDRDEC0_COL_SEL_LO_CS01__COL3__SHIFT 0xc
+#define MMEA6_ADDRDEC0_COL_SEL_LO_CS01__COL4__SHIFT 0x10
+#define MMEA6_ADDRDEC0_COL_SEL_LO_CS01__COL5__SHIFT 0x14
+#define MMEA6_ADDRDEC0_COL_SEL_LO_CS01__COL6__SHIFT 0x18
+#define MMEA6_ADDRDEC0_COL_SEL_LO_CS01__COL7__SHIFT 0x1c
+#define MMEA6_ADDRDEC0_COL_SEL_LO_CS01__COL0_MASK 0x0000000FL
+#define MMEA6_ADDRDEC0_COL_SEL_LO_CS01__COL1_MASK 0x000000F0L
+#define MMEA6_ADDRDEC0_COL_SEL_LO_CS01__COL2_MASK 0x00000F00L
+#define MMEA6_ADDRDEC0_COL_SEL_LO_CS01__COL3_MASK 0x0000F000L
+#define MMEA6_ADDRDEC0_COL_SEL_LO_CS01__COL4_MASK 0x000F0000L
+#define MMEA6_ADDRDEC0_COL_SEL_LO_CS01__COL5_MASK 0x00F00000L
+#define MMEA6_ADDRDEC0_COL_SEL_LO_CS01__COL6_MASK 0x0F000000L
+#define MMEA6_ADDRDEC0_COL_SEL_LO_CS01__COL7_MASK 0xF0000000L
+//MMEA6_ADDRDEC0_COL_SEL_LO_CS23
+#define MMEA6_ADDRDEC0_COL_SEL_LO_CS23__COL0__SHIFT 0x0
+#define MMEA6_ADDRDEC0_COL_SEL_LO_CS23__COL1__SHIFT 0x4
+#define MMEA6_ADDRDEC0_COL_SEL_LO_CS23__COL2__SHIFT 0x8
+#define MMEA6_ADDRDEC0_COL_SEL_LO_CS23__COL3__SHIFT 0xc
+#define MMEA6_ADDRDEC0_COL_SEL_LO_CS23__COL4__SHIFT 0x10
+#define MMEA6_ADDRDEC0_COL_SEL_LO_CS23__COL5__SHIFT 0x14
+#define MMEA6_ADDRDEC0_COL_SEL_LO_CS23__COL6__SHIFT 0x18
+#define MMEA6_ADDRDEC0_COL_SEL_LO_CS23__COL7__SHIFT 0x1c
+#define MMEA6_ADDRDEC0_COL_SEL_LO_CS23__COL0_MASK 0x0000000FL
+#define MMEA6_ADDRDEC0_COL_SEL_LO_CS23__COL1_MASK 0x000000F0L
+#define MMEA6_ADDRDEC0_COL_SEL_LO_CS23__COL2_MASK 0x00000F00L
+#define MMEA6_ADDRDEC0_COL_SEL_LO_CS23__COL3_MASK 0x0000F000L
+#define MMEA6_ADDRDEC0_COL_SEL_LO_CS23__COL4_MASK 0x000F0000L
+#define MMEA6_ADDRDEC0_COL_SEL_LO_CS23__COL5_MASK 0x00F00000L
+#define MMEA6_ADDRDEC0_COL_SEL_LO_CS23__COL6_MASK 0x0F000000L
+#define MMEA6_ADDRDEC0_COL_SEL_LO_CS23__COL7_MASK 0xF0000000L
+//MMEA6_ADDRDEC0_COL_SEL_HI_CS01
+#define MMEA6_ADDRDEC0_COL_SEL_HI_CS01__COL8__SHIFT 0x0
+#define MMEA6_ADDRDEC0_COL_SEL_HI_CS01__COL9__SHIFT 0x4
+#define MMEA6_ADDRDEC0_COL_SEL_HI_CS01__COL10__SHIFT 0x8
+#define MMEA6_ADDRDEC0_COL_SEL_HI_CS01__COL11__SHIFT 0xc
+#define MMEA6_ADDRDEC0_COL_SEL_HI_CS01__COL12__SHIFT 0x10
+#define MMEA6_ADDRDEC0_COL_SEL_HI_CS01__COL13__SHIFT 0x14
+#define MMEA6_ADDRDEC0_COL_SEL_HI_CS01__COL14__SHIFT 0x18
+#define MMEA6_ADDRDEC0_COL_SEL_HI_CS01__COL15__SHIFT 0x1c
+#define MMEA6_ADDRDEC0_COL_SEL_HI_CS01__COL8_MASK 0x0000000FL
+#define MMEA6_ADDRDEC0_COL_SEL_HI_CS01__COL9_MASK 0x000000F0L
+#define MMEA6_ADDRDEC0_COL_SEL_HI_CS01__COL10_MASK 0x00000F00L
+#define MMEA6_ADDRDEC0_COL_SEL_HI_CS01__COL11_MASK 0x0000F000L
+#define MMEA6_ADDRDEC0_COL_SEL_HI_CS01__COL12_MASK 0x000F0000L
+#define MMEA6_ADDRDEC0_COL_SEL_HI_CS01__COL13_MASK 0x00F00000L
+#define MMEA6_ADDRDEC0_COL_SEL_HI_CS01__COL14_MASK 0x0F000000L
+#define MMEA6_ADDRDEC0_COL_SEL_HI_CS01__COL15_MASK 0xF0000000L
+//MMEA6_ADDRDEC0_COL_SEL_HI_CS23
+#define MMEA6_ADDRDEC0_COL_SEL_HI_CS23__COL8__SHIFT 0x0
+#define MMEA6_ADDRDEC0_COL_SEL_HI_CS23__COL9__SHIFT 0x4
+#define MMEA6_ADDRDEC0_COL_SEL_HI_CS23__COL10__SHIFT 0x8
+#define MMEA6_ADDRDEC0_COL_SEL_HI_CS23__COL11__SHIFT 0xc
+#define MMEA6_ADDRDEC0_COL_SEL_HI_CS23__COL12__SHIFT 0x10
+#define MMEA6_ADDRDEC0_COL_SEL_HI_CS23__COL13__SHIFT 0x14
+#define MMEA6_ADDRDEC0_COL_SEL_HI_CS23__COL14__SHIFT 0x18
+#define MMEA6_ADDRDEC0_COL_SEL_HI_CS23__COL15__SHIFT 0x1c
+#define MMEA6_ADDRDEC0_COL_SEL_HI_CS23__COL8_MASK 0x0000000FL
+#define MMEA6_ADDRDEC0_COL_SEL_HI_CS23__COL9_MASK 0x000000F0L
+#define MMEA6_ADDRDEC0_COL_SEL_HI_CS23__COL10_MASK 0x00000F00L
+#define MMEA6_ADDRDEC0_COL_SEL_HI_CS23__COL11_MASK 0x0000F000L
+#define MMEA6_ADDRDEC0_COL_SEL_HI_CS23__COL12_MASK 0x000F0000L
+#define MMEA6_ADDRDEC0_COL_SEL_HI_CS23__COL13_MASK 0x00F00000L
+#define MMEA6_ADDRDEC0_COL_SEL_HI_CS23__COL14_MASK 0x0F000000L
+#define MMEA6_ADDRDEC0_COL_SEL_HI_CS23__COL15_MASK 0xF0000000L
+//MMEA6_ADDRDEC0_RM_SEL_CS01
+#define MMEA6_ADDRDEC0_RM_SEL_CS01__RM0__SHIFT 0x0
+#define MMEA6_ADDRDEC0_RM_SEL_CS01__RM1__SHIFT 0x4
+#define MMEA6_ADDRDEC0_RM_SEL_CS01__RM2__SHIFT 0x8
+#define MMEA6_ADDRDEC0_RM_SEL_CS01__CHAN_BIT__SHIFT 0xc
+#define MMEA6_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA6_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA6_ADDRDEC0_RM_SEL_CS01__RM0_MASK 0x0000000FL
+#define MMEA6_ADDRDEC0_RM_SEL_CS01__RM1_MASK 0x000000F0L
+#define MMEA6_ADDRDEC0_RM_SEL_CS01__RM2_MASK 0x00000F00L
+#define MMEA6_ADDRDEC0_RM_SEL_CS01__CHAN_BIT_MASK 0x0000F000L
+#define MMEA6_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA6_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA6_ADDRDEC0_RM_SEL_CS23
+#define MMEA6_ADDRDEC0_RM_SEL_CS23__RM0__SHIFT 0x0
+#define MMEA6_ADDRDEC0_RM_SEL_CS23__RM1__SHIFT 0x4
+#define MMEA6_ADDRDEC0_RM_SEL_CS23__RM2__SHIFT 0x8
+#define MMEA6_ADDRDEC0_RM_SEL_CS23__CHAN_BIT__SHIFT 0xc
+#define MMEA6_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA6_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA6_ADDRDEC0_RM_SEL_CS23__RM0_MASK 0x0000000FL
+#define MMEA6_ADDRDEC0_RM_SEL_CS23__RM1_MASK 0x000000F0L
+#define MMEA6_ADDRDEC0_RM_SEL_CS23__RM2_MASK 0x00000F00L
+#define MMEA6_ADDRDEC0_RM_SEL_CS23__CHAN_BIT_MASK 0x0000F000L
+#define MMEA6_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA6_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA6_ADDRDEC0_RM_SEL_SECCS01
+#define MMEA6_ADDRDEC0_RM_SEL_SECCS01__RM0__SHIFT 0x0
+#define MMEA6_ADDRDEC0_RM_SEL_SECCS01__RM1__SHIFT 0x4
+#define MMEA6_ADDRDEC0_RM_SEL_SECCS01__RM2__SHIFT 0x8
+#define MMEA6_ADDRDEC0_RM_SEL_SECCS01__CHAN_BIT__SHIFT 0xc
+#define MMEA6_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA6_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA6_ADDRDEC0_RM_SEL_SECCS01__RM0_MASK 0x0000000FL
+#define MMEA6_ADDRDEC0_RM_SEL_SECCS01__RM1_MASK 0x000000F0L
+#define MMEA6_ADDRDEC0_RM_SEL_SECCS01__RM2_MASK 0x00000F00L
+#define MMEA6_ADDRDEC0_RM_SEL_SECCS01__CHAN_BIT_MASK 0x0000F000L
+#define MMEA6_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA6_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA6_ADDRDEC0_RM_SEL_SECCS23
+#define MMEA6_ADDRDEC0_RM_SEL_SECCS23__RM0__SHIFT 0x0
+#define MMEA6_ADDRDEC0_RM_SEL_SECCS23__RM1__SHIFT 0x4
+#define MMEA6_ADDRDEC0_RM_SEL_SECCS23__RM2__SHIFT 0x8
+#define MMEA6_ADDRDEC0_RM_SEL_SECCS23__CHAN_BIT__SHIFT 0xc
+#define MMEA6_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA6_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA6_ADDRDEC0_RM_SEL_SECCS23__RM0_MASK 0x0000000FL
+#define MMEA6_ADDRDEC0_RM_SEL_SECCS23__RM1_MASK 0x000000F0L
+#define MMEA6_ADDRDEC0_RM_SEL_SECCS23__RM2_MASK 0x00000F00L
+#define MMEA6_ADDRDEC0_RM_SEL_SECCS23__CHAN_BIT_MASK 0x0000F000L
+#define MMEA6_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA6_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA6_ADDRDEC1_BASE_ADDR_CS0
+#define MMEA6_ADDRDEC1_BASE_ADDR_CS0__CS_EN__SHIFT 0x0
+#define MMEA6_ADDRDEC1_BASE_ADDR_CS0__BASE_ADDR__SHIFT 0x1
+#define MMEA6_ADDRDEC1_BASE_ADDR_CS0__CS_EN_MASK 0x00000001L
+#define MMEA6_ADDRDEC1_BASE_ADDR_CS0__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA6_ADDRDEC1_BASE_ADDR_CS1
+#define MMEA6_ADDRDEC1_BASE_ADDR_CS1__CS_EN__SHIFT 0x0
+#define MMEA6_ADDRDEC1_BASE_ADDR_CS1__BASE_ADDR__SHIFT 0x1
+#define MMEA6_ADDRDEC1_BASE_ADDR_CS1__CS_EN_MASK 0x00000001L
+#define MMEA6_ADDRDEC1_BASE_ADDR_CS1__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA6_ADDRDEC1_BASE_ADDR_CS2
+#define MMEA6_ADDRDEC1_BASE_ADDR_CS2__CS_EN__SHIFT 0x0
+#define MMEA6_ADDRDEC1_BASE_ADDR_CS2__BASE_ADDR__SHIFT 0x1
+#define MMEA6_ADDRDEC1_BASE_ADDR_CS2__CS_EN_MASK 0x00000001L
+#define MMEA6_ADDRDEC1_BASE_ADDR_CS2__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA6_ADDRDEC1_BASE_ADDR_CS3
+#define MMEA6_ADDRDEC1_BASE_ADDR_CS3__CS_EN__SHIFT 0x0
+#define MMEA6_ADDRDEC1_BASE_ADDR_CS3__BASE_ADDR__SHIFT 0x1
+#define MMEA6_ADDRDEC1_BASE_ADDR_CS3__CS_EN_MASK 0x00000001L
+#define MMEA6_ADDRDEC1_BASE_ADDR_CS3__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA6_ADDRDEC1_BASE_ADDR_SECCS0
+#define MMEA6_ADDRDEC1_BASE_ADDR_SECCS0__CS_EN__SHIFT 0x0
+#define MMEA6_ADDRDEC1_BASE_ADDR_SECCS0__BASE_ADDR__SHIFT 0x1
+#define MMEA6_ADDRDEC1_BASE_ADDR_SECCS0__CS_EN_MASK 0x00000001L
+#define MMEA6_ADDRDEC1_BASE_ADDR_SECCS0__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA6_ADDRDEC1_BASE_ADDR_SECCS1
+#define MMEA6_ADDRDEC1_BASE_ADDR_SECCS1__CS_EN__SHIFT 0x0
+#define MMEA6_ADDRDEC1_BASE_ADDR_SECCS1__BASE_ADDR__SHIFT 0x1
+#define MMEA6_ADDRDEC1_BASE_ADDR_SECCS1__CS_EN_MASK 0x00000001L
+#define MMEA6_ADDRDEC1_BASE_ADDR_SECCS1__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA6_ADDRDEC1_BASE_ADDR_SECCS2
+#define MMEA6_ADDRDEC1_BASE_ADDR_SECCS2__CS_EN__SHIFT 0x0
+#define MMEA6_ADDRDEC1_BASE_ADDR_SECCS2__BASE_ADDR__SHIFT 0x1
+#define MMEA6_ADDRDEC1_BASE_ADDR_SECCS2__CS_EN_MASK 0x00000001L
+#define MMEA6_ADDRDEC1_BASE_ADDR_SECCS2__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA6_ADDRDEC1_BASE_ADDR_SECCS3
+#define MMEA6_ADDRDEC1_BASE_ADDR_SECCS3__CS_EN__SHIFT 0x0
+#define MMEA6_ADDRDEC1_BASE_ADDR_SECCS3__BASE_ADDR__SHIFT 0x1
+#define MMEA6_ADDRDEC1_BASE_ADDR_SECCS3__CS_EN_MASK 0x00000001L
+#define MMEA6_ADDRDEC1_BASE_ADDR_SECCS3__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA6_ADDRDEC1_ADDR_MASK_CS01
+#define MMEA6_ADDRDEC1_ADDR_MASK_CS01__ADDR_MASK__SHIFT 0x1
+#define MMEA6_ADDRDEC1_ADDR_MASK_CS01__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA6_ADDRDEC1_ADDR_MASK_CS23
+#define MMEA6_ADDRDEC1_ADDR_MASK_CS23__ADDR_MASK__SHIFT 0x1
+#define MMEA6_ADDRDEC1_ADDR_MASK_CS23__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA6_ADDRDEC1_ADDR_MASK_SECCS01
+#define MMEA6_ADDRDEC1_ADDR_MASK_SECCS01__ADDR_MASK__SHIFT 0x1
+#define MMEA6_ADDRDEC1_ADDR_MASK_SECCS01__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA6_ADDRDEC1_ADDR_MASK_SECCS23
+#define MMEA6_ADDRDEC1_ADDR_MASK_SECCS23__ADDR_MASK__SHIFT 0x1
+#define MMEA6_ADDRDEC1_ADDR_MASK_SECCS23__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA6_ADDRDEC1_ADDR_CFG_CS01
+#define MMEA6_ADDRDEC1_ADDR_CFG_CS01__NUM_BANK_GROUPS__SHIFT 0x1
+#define MMEA6_ADDRDEC1_ADDR_CFG_CS01__NUM_RM__SHIFT 0x4
+#define MMEA6_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_LO__SHIFT 0x8
+#define MMEA6_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_HI__SHIFT 0xc
+#define MMEA6_ADDRDEC1_ADDR_CFG_CS01__NUM_COL__SHIFT 0x10
+#define MMEA6_ADDRDEC1_ADDR_CFG_CS01__NUM_BANKS__SHIFT 0x14
+#define MMEA6_ADDRDEC1_ADDR_CFG_CS01__HI_COL_EN__SHIFT 0x1f
+#define MMEA6_ADDRDEC1_ADDR_CFG_CS01__NUM_BANK_GROUPS_MASK 0x0000000EL
+#define MMEA6_ADDRDEC1_ADDR_CFG_CS01__NUM_RM_MASK 0x00000030L
+#define MMEA6_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_LO_MASK 0x00000F00L
+#define MMEA6_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_HI_MASK 0x0000F000L
+#define MMEA6_ADDRDEC1_ADDR_CFG_CS01__NUM_COL_MASK 0x000F0000L
+#define MMEA6_ADDRDEC1_ADDR_CFG_CS01__NUM_BANKS_MASK 0x00300000L
+#define MMEA6_ADDRDEC1_ADDR_CFG_CS01__HI_COL_EN_MASK 0x80000000L
+//MMEA6_ADDRDEC1_ADDR_CFG_CS23
+#define MMEA6_ADDRDEC1_ADDR_CFG_CS23__NUM_BANK_GROUPS__SHIFT 0x1
+#define MMEA6_ADDRDEC1_ADDR_CFG_CS23__NUM_RM__SHIFT 0x4
+#define MMEA6_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_LO__SHIFT 0x8
+#define MMEA6_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_HI__SHIFT 0xc
+#define MMEA6_ADDRDEC1_ADDR_CFG_CS23__NUM_COL__SHIFT 0x10
+#define MMEA6_ADDRDEC1_ADDR_CFG_CS23__NUM_BANKS__SHIFT 0x14
+#define MMEA6_ADDRDEC1_ADDR_CFG_CS23__HI_COL_EN__SHIFT 0x1f
+#define MMEA6_ADDRDEC1_ADDR_CFG_CS23__NUM_BANK_GROUPS_MASK 0x0000000EL
+#define MMEA6_ADDRDEC1_ADDR_CFG_CS23__NUM_RM_MASK 0x00000030L
+#define MMEA6_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_LO_MASK 0x00000F00L
+#define MMEA6_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_HI_MASK 0x0000F000L
+#define MMEA6_ADDRDEC1_ADDR_CFG_CS23__NUM_COL_MASK 0x000F0000L
+#define MMEA6_ADDRDEC1_ADDR_CFG_CS23__NUM_BANKS_MASK 0x00300000L
+#define MMEA6_ADDRDEC1_ADDR_CFG_CS23__HI_COL_EN_MASK 0x80000000L
+//MMEA6_ADDRDEC1_ADDR_SEL_CS01
+#define MMEA6_ADDRDEC1_ADDR_SEL_CS01__BANK0__SHIFT 0x0
+#define MMEA6_ADDRDEC1_ADDR_SEL_CS01__BANK1__SHIFT 0x4
+#define MMEA6_ADDRDEC1_ADDR_SEL_CS01__BANK2__SHIFT 0x8
+#define MMEA6_ADDRDEC1_ADDR_SEL_CS01__BANK3__SHIFT 0xc
+#define MMEA6_ADDRDEC1_ADDR_SEL_CS01__BANK4__SHIFT 0x10
+#define MMEA6_ADDRDEC1_ADDR_SEL_CS01__ROW_LO__SHIFT 0x18
+#define MMEA6_ADDRDEC1_ADDR_SEL_CS01__ROW_HI__SHIFT 0x1c
+#define MMEA6_ADDRDEC1_ADDR_SEL_CS01__BANK0_MASK 0x0000000FL
+#define MMEA6_ADDRDEC1_ADDR_SEL_CS01__BANK1_MASK 0x000000F0L
+#define MMEA6_ADDRDEC1_ADDR_SEL_CS01__BANK2_MASK 0x00000F00L
+#define MMEA6_ADDRDEC1_ADDR_SEL_CS01__BANK3_MASK 0x0000F000L
+#define MMEA6_ADDRDEC1_ADDR_SEL_CS01__BANK4_MASK 0x001F0000L
+#define MMEA6_ADDRDEC1_ADDR_SEL_CS01__ROW_LO_MASK 0x0F000000L
+#define MMEA6_ADDRDEC1_ADDR_SEL_CS01__ROW_HI_MASK 0xF0000000L
+//MMEA6_ADDRDEC1_ADDR_SEL_CS23
+#define MMEA6_ADDRDEC1_ADDR_SEL_CS23__BANK0__SHIFT 0x0
+#define MMEA6_ADDRDEC1_ADDR_SEL_CS23__BANK1__SHIFT 0x4
+#define MMEA6_ADDRDEC1_ADDR_SEL_CS23__BANK2__SHIFT 0x8
+#define MMEA6_ADDRDEC1_ADDR_SEL_CS23__BANK3__SHIFT 0xc
+#define MMEA6_ADDRDEC1_ADDR_SEL_CS23__BANK4__SHIFT 0x10
+#define MMEA6_ADDRDEC1_ADDR_SEL_CS23__ROW_LO__SHIFT 0x18
+#define MMEA6_ADDRDEC1_ADDR_SEL_CS23__ROW_HI__SHIFT 0x1c
+#define MMEA6_ADDRDEC1_ADDR_SEL_CS23__BANK0_MASK 0x0000000FL
+#define MMEA6_ADDRDEC1_ADDR_SEL_CS23__BANK1_MASK 0x000000F0L
+#define MMEA6_ADDRDEC1_ADDR_SEL_CS23__BANK2_MASK 0x00000F00L
+#define MMEA6_ADDRDEC1_ADDR_SEL_CS23__BANK3_MASK 0x0000F000L
+#define MMEA6_ADDRDEC1_ADDR_SEL_CS23__BANK4_MASK 0x001F0000L
+#define MMEA6_ADDRDEC1_ADDR_SEL_CS23__ROW_LO_MASK 0x0F000000L
+#define MMEA6_ADDRDEC1_ADDR_SEL_CS23__ROW_HI_MASK 0xF0000000L
+//MMEA6_ADDRDEC1_ADDR_SEL2_CS01
+#define MMEA6_ADDRDEC1_ADDR_SEL2_CS01__BANK5__SHIFT 0x0
+#define MMEA6_ADDRDEC1_ADDR_SEL2_CS01__BANK5_MASK 0x0000001FL
+//MMEA6_ADDRDEC1_ADDR_SEL2_CS23
+#define MMEA6_ADDRDEC1_ADDR_SEL2_CS23__BANK5__SHIFT 0x0
+#define MMEA6_ADDRDEC1_ADDR_SEL2_CS23__BANK5_MASK 0x0000001FL
+//MMEA6_ADDRDEC1_COL_SEL_LO_CS01
+#define MMEA6_ADDRDEC1_COL_SEL_LO_CS01__COL0__SHIFT 0x0
+#define MMEA6_ADDRDEC1_COL_SEL_LO_CS01__COL1__SHIFT 0x4
+#define MMEA6_ADDRDEC1_COL_SEL_LO_CS01__COL2__SHIFT 0x8
+#define MMEA6_ADDRDEC1_COL_SEL_LO_CS01__COL3__SHIFT 0xc
+#define MMEA6_ADDRDEC1_COL_SEL_LO_CS01__COL4__SHIFT 0x10
+#define MMEA6_ADDRDEC1_COL_SEL_LO_CS01__COL5__SHIFT 0x14
+#define MMEA6_ADDRDEC1_COL_SEL_LO_CS01__COL6__SHIFT 0x18
+#define MMEA6_ADDRDEC1_COL_SEL_LO_CS01__COL7__SHIFT 0x1c
+#define MMEA6_ADDRDEC1_COL_SEL_LO_CS01__COL0_MASK 0x0000000FL
+#define MMEA6_ADDRDEC1_COL_SEL_LO_CS01__COL1_MASK 0x000000F0L
+#define MMEA6_ADDRDEC1_COL_SEL_LO_CS01__COL2_MASK 0x00000F00L
+#define MMEA6_ADDRDEC1_COL_SEL_LO_CS01__COL3_MASK 0x0000F000L
+#define MMEA6_ADDRDEC1_COL_SEL_LO_CS01__COL4_MASK 0x000F0000L
+#define MMEA6_ADDRDEC1_COL_SEL_LO_CS01__COL5_MASK 0x00F00000L
+#define MMEA6_ADDRDEC1_COL_SEL_LO_CS01__COL6_MASK 0x0F000000L
+#define MMEA6_ADDRDEC1_COL_SEL_LO_CS01__COL7_MASK 0xF0000000L
+//MMEA6_ADDRDEC1_COL_SEL_LO_CS23
+#define MMEA6_ADDRDEC1_COL_SEL_LO_CS23__COL0__SHIFT 0x0
+#define MMEA6_ADDRDEC1_COL_SEL_LO_CS23__COL1__SHIFT 0x4
+#define MMEA6_ADDRDEC1_COL_SEL_LO_CS23__COL2__SHIFT 0x8
+#define MMEA6_ADDRDEC1_COL_SEL_LO_CS23__COL3__SHIFT 0xc
+#define MMEA6_ADDRDEC1_COL_SEL_LO_CS23__COL4__SHIFT 0x10
+#define MMEA6_ADDRDEC1_COL_SEL_LO_CS23__COL5__SHIFT 0x14
+#define MMEA6_ADDRDEC1_COL_SEL_LO_CS23__COL6__SHIFT 0x18
+#define MMEA6_ADDRDEC1_COL_SEL_LO_CS23__COL7__SHIFT 0x1c
+#define MMEA6_ADDRDEC1_COL_SEL_LO_CS23__COL0_MASK 0x0000000FL
+#define MMEA6_ADDRDEC1_COL_SEL_LO_CS23__COL1_MASK 0x000000F0L
+#define MMEA6_ADDRDEC1_COL_SEL_LO_CS23__COL2_MASK 0x00000F00L
+#define MMEA6_ADDRDEC1_COL_SEL_LO_CS23__COL3_MASK 0x0000F000L
+#define MMEA6_ADDRDEC1_COL_SEL_LO_CS23__COL4_MASK 0x000F0000L
+#define MMEA6_ADDRDEC1_COL_SEL_LO_CS23__COL5_MASK 0x00F00000L
+#define MMEA6_ADDRDEC1_COL_SEL_LO_CS23__COL6_MASK 0x0F000000L
+#define MMEA6_ADDRDEC1_COL_SEL_LO_CS23__COL7_MASK 0xF0000000L
+//MMEA6_ADDRDEC1_COL_SEL_HI_CS01
+#define MMEA6_ADDRDEC1_COL_SEL_HI_CS01__COL8__SHIFT 0x0
+#define MMEA6_ADDRDEC1_COL_SEL_HI_CS01__COL9__SHIFT 0x4
+#define MMEA6_ADDRDEC1_COL_SEL_HI_CS01__COL10__SHIFT 0x8
+#define MMEA6_ADDRDEC1_COL_SEL_HI_CS01__COL11__SHIFT 0xc
+#define MMEA6_ADDRDEC1_COL_SEL_HI_CS01__COL12__SHIFT 0x10
+#define MMEA6_ADDRDEC1_COL_SEL_HI_CS01__COL13__SHIFT 0x14
+#define MMEA6_ADDRDEC1_COL_SEL_HI_CS01__COL14__SHIFT 0x18
+#define MMEA6_ADDRDEC1_COL_SEL_HI_CS01__COL15__SHIFT 0x1c
+#define MMEA6_ADDRDEC1_COL_SEL_HI_CS01__COL8_MASK 0x0000000FL
+#define MMEA6_ADDRDEC1_COL_SEL_HI_CS01__COL9_MASK 0x000000F0L
+#define MMEA6_ADDRDEC1_COL_SEL_HI_CS01__COL10_MASK 0x00000F00L
+#define MMEA6_ADDRDEC1_COL_SEL_HI_CS01__COL11_MASK 0x0000F000L
+#define MMEA6_ADDRDEC1_COL_SEL_HI_CS01__COL12_MASK 0x000F0000L
+#define MMEA6_ADDRDEC1_COL_SEL_HI_CS01__COL13_MASK 0x00F00000L
+#define MMEA6_ADDRDEC1_COL_SEL_HI_CS01__COL14_MASK 0x0F000000L
+#define MMEA6_ADDRDEC1_COL_SEL_HI_CS01__COL15_MASK 0xF0000000L
+//MMEA6_ADDRDEC1_COL_SEL_HI_CS23
+#define MMEA6_ADDRDEC1_COL_SEL_HI_CS23__COL8__SHIFT 0x0
+#define MMEA6_ADDRDEC1_COL_SEL_HI_CS23__COL9__SHIFT 0x4
+#define MMEA6_ADDRDEC1_COL_SEL_HI_CS23__COL10__SHIFT 0x8
+#define MMEA6_ADDRDEC1_COL_SEL_HI_CS23__COL11__SHIFT 0xc
+#define MMEA6_ADDRDEC1_COL_SEL_HI_CS23__COL12__SHIFT 0x10
+#define MMEA6_ADDRDEC1_COL_SEL_HI_CS23__COL13__SHIFT 0x14
+#define MMEA6_ADDRDEC1_COL_SEL_HI_CS23__COL14__SHIFT 0x18
+#define MMEA6_ADDRDEC1_COL_SEL_HI_CS23__COL15__SHIFT 0x1c
+#define MMEA6_ADDRDEC1_COL_SEL_HI_CS23__COL8_MASK 0x0000000FL
+#define MMEA6_ADDRDEC1_COL_SEL_HI_CS23__COL9_MASK 0x000000F0L
+#define MMEA6_ADDRDEC1_COL_SEL_HI_CS23__COL10_MASK 0x00000F00L
+#define MMEA6_ADDRDEC1_COL_SEL_HI_CS23__COL11_MASK 0x0000F000L
+#define MMEA6_ADDRDEC1_COL_SEL_HI_CS23__COL12_MASK 0x000F0000L
+#define MMEA6_ADDRDEC1_COL_SEL_HI_CS23__COL13_MASK 0x00F00000L
+#define MMEA6_ADDRDEC1_COL_SEL_HI_CS23__COL14_MASK 0x0F000000L
+#define MMEA6_ADDRDEC1_COL_SEL_HI_CS23__COL15_MASK 0xF0000000L
+//MMEA6_ADDRDEC1_RM_SEL_CS01
+#define MMEA6_ADDRDEC1_RM_SEL_CS01__RM0__SHIFT 0x0
+#define MMEA6_ADDRDEC1_RM_SEL_CS01__RM1__SHIFT 0x4
+#define MMEA6_ADDRDEC1_RM_SEL_CS01__RM2__SHIFT 0x8
+#define MMEA6_ADDRDEC1_RM_SEL_CS01__CHAN_BIT__SHIFT 0xc
+#define MMEA6_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA6_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA6_ADDRDEC1_RM_SEL_CS01__RM0_MASK 0x0000000FL
+#define MMEA6_ADDRDEC1_RM_SEL_CS01__RM1_MASK 0x000000F0L
+#define MMEA6_ADDRDEC1_RM_SEL_CS01__RM2_MASK 0x00000F00L
+#define MMEA6_ADDRDEC1_RM_SEL_CS01__CHAN_BIT_MASK 0x0000F000L
+#define MMEA6_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA6_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA6_ADDRDEC1_RM_SEL_CS23
+#define MMEA6_ADDRDEC1_RM_SEL_CS23__RM0__SHIFT 0x0
+#define MMEA6_ADDRDEC1_RM_SEL_CS23__RM1__SHIFT 0x4
+#define MMEA6_ADDRDEC1_RM_SEL_CS23__RM2__SHIFT 0x8
+#define MMEA6_ADDRDEC1_RM_SEL_CS23__CHAN_BIT__SHIFT 0xc
+#define MMEA6_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA6_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA6_ADDRDEC1_RM_SEL_CS23__RM0_MASK 0x0000000FL
+#define MMEA6_ADDRDEC1_RM_SEL_CS23__RM1_MASK 0x000000F0L
+#define MMEA6_ADDRDEC1_RM_SEL_CS23__RM2_MASK 0x00000F00L
+#define MMEA6_ADDRDEC1_RM_SEL_CS23__CHAN_BIT_MASK 0x0000F000L
+#define MMEA6_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA6_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA6_ADDRDEC1_RM_SEL_SECCS01
+#define MMEA6_ADDRDEC1_RM_SEL_SECCS01__RM0__SHIFT 0x0
+#define MMEA6_ADDRDEC1_RM_SEL_SECCS01__RM1__SHIFT 0x4
+#define MMEA6_ADDRDEC1_RM_SEL_SECCS01__RM2__SHIFT 0x8
+#define MMEA6_ADDRDEC1_RM_SEL_SECCS01__CHAN_BIT__SHIFT 0xc
+#define MMEA6_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA6_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA6_ADDRDEC1_RM_SEL_SECCS01__RM0_MASK 0x0000000FL
+#define MMEA6_ADDRDEC1_RM_SEL_SECCS01__RM1_MASK 0x000000F0L
+#define MMEA6_ADDRDEC1_RM_SEL_SECCS01__RM2_MASK 0x00000F00L
+#define MMEA6_ADDRDEC1_RM_SEL_SECCS01__CHAN_BIT_MASK 0x0000F000L
+#define MMEA6_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA6_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA6_ADDRDEC1_RM_SEL_SECCS23
+#define MMEA6_ADDRDEC1_RM_SEL_SECCS23__RM0__SHIFT 0x0
+#define MMEA6_ADDRDEC1_RM_SEL_SECCS23__RM1__SHIFT 0x4
+#define MMEA6_ADDRDEC1_RM_SEL_SECCS23__RM2__SHIFT 0x8
+#define MMEA6_ADDRDEC1_RM_SEL_SECCS23__CHAN_BIT__SHIFT 0xc
+#define MMEA6_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA6_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA6_ADDRDEC1_RM_SEL_SECCS23__RM0_MASK 0x0000000FL
+#define MMEA6_ADDRDEC1_RM_SEL_SECCS23__RM1_MASK 0x000000F0L
+#define MMEA6_ADDRDEC1_RM_SEL_SECCS23__RM2_MASK 0x00000F00L
+#define MMEA6_ADDRDEC1_RM_SEL_SECCS23__CHAN_BIT_MASK 0x0000F000L
+#define MMEA6_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA6_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA6_ADDRDEC2_BASE_ADDR_CS0
+#define MMEA6_ADDRDEC2_BASE_ADDR_CS0__CS_EN__SHIFT 0x0
+#define MMEA6_ADDRDEC2_BASE_ADDR_CS0__BASE_ADDR__SHIFT 0x1
+#define MMEA6_ADDRDEC2_BASE_ADDR_CS0__CS_EN_MASK 0x00000001L
+#define MMEA6_ADDRDEC2_BASE_ADDR_CS0__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA6_ADDRDEC2_BASE_ADDR_CS1
+#define MMEA6_ADDRDEC2_BASE_ADDR_CS1__CS_EN__SHIFT 0x0
+#define MMEA6_ADDRDEC2_BASE_ADDR_CS1__BASE_ADDR__SHIFT 0x1
+#define MMEA6_ADDRDEC2_BASE_ADDR_CS1__CS_EN_MASK 0x00000001L
+#define MMEA6_ADDRDEC2_BASE_ADDR_CS1__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA6_ADDRDEC2_BASE_ADDR_CS2
+#define MMEA6_ADDRDEC2_BASE_ADDR_CS2__CS_EN__SHIFT 0x0
+#define MMEA6_ADDRDEC2_BASE_ADDR_CS2__BASE_ADDR__SHIFT 0x1
+#define MMEA6_ADDRDEC2_BASE_ADDR_CS2__CS_EN_MASK 0x00000001L
+#define MMEA6_ADDRDEC2_BASE_ADDR_CS2__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA6_ADDRDEC2_BASE_ADDR_CS3
+#define MMEA6_ADDRDEC2_BASE_ADDR_CS3__CS_EN__SHIFT 0x0
+#define MMEA6_ADDRDEC2_BASE_ADDR_CS3__BASE_ADDR__SHIFT 0x1
+#define MMEA6_ADDRDEC2_BASE_ADDR_CS3__CS_EN_MASK 0x00000001L
+#define MMEA6_ADDRDEC2_BASE_ADDR_CS3__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA6_ADDRDEC2_BASE_ADDR_SECCS0
+#define MMEA6_ADDRDEC2_BASE_ADDR_SECCS0__CS_EN__SHIFT 0x0
+#define MMEA6_ADDRDEC2_BASE_ADDR_SECCS0__BASE_ADDR__SHIFT 0x1
+#define MMEA6_ADDRDEC2_BASE_ADDR_SECCS0__CS_EN_MASK 0x00000001L
+#define MMEA6_ADDRDEC2_BASE_ADDR_SECCS0__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA6_ADDRDEC2_BASE_ADDR_SECCS1
+#define MMEA6_ADDRDEC2_BASE_ADDR_SECCS1__CS_EN__SHIFT 0x0
+#define MMEA6_ADDRDEC2_BASE_ADDR_SECCS1__BASE_ADDR__SHIFT 0x1
+#define MMEA6_ADDRDEC2_BASE_ADDR_SECCS1__CS_EN_MASK 0x00000001L
+#define MMEA6_ADDRDEC2_BASE_ADDR_SECCS1__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA6_ADDRDEC2_BASE_ADDR_SECCS2
+#define MMEA6_ADDRDEC2_BASE_ADDR_SECCS2__CS_EN__SHIFT 0x0
+#define MMEA6_ADDRDEC2_BASE_ADDR_SECCS2__BASE_ADDR__SHIFT 0x1
+#define MMEA6_ADDRDEC2_BASE_ADDR_SECCS2__CS_EN_MASK 0x00000001L
+#define MMEA6_ADDRDEC2_BASE_ADDR_SECCS2__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA6_ADDRDEC2_BASE_ADDR_SECCS3
+#define MMEA6_ADDRDEC2_BASE_ADDR_SECCS3__CS_EN__SHIFT 0x0
+#define MMEA6_ADDRDEC2_BASE_ADDR_SECCS3__BASE_ADDR__SHIFT 0x1
+#define MMEA6_ADDRDEC2_BASE_ADDR_SECCS3__CS_EN_MASK 0x00000001L
+#define MMEA6_ADDRDEC2_BASE_ADDR_SECCS3__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA6_ADDRDEC2_ADDR_MASK_CS01
+#define MMEA6_ADDRDEC2_ADDR_MASK_CS01__ADDR_MASK__SHIFT 0x1
+#define MMEA6_ADDRDEC2_ADDR_MASK_CS01__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA6_ADDRDEC2_ADDR_MASK_CS23
+#define MMEA6_ADDRDEC2_ADDR_MASK_CS23__ADDR_MASK__SHIFT 0x1
+#define MMEA6_ADDRDEC2_ADDR_MASK_CS23__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA6_ADDRDEC2_ADDR_MASK_SECCS01
+#define MMEA6_ADDRDEC2_ADDR_MASK_SECCS01__ADDR_MASK__SHIFT 0x1
+#define MMEA6_ADDRDEC2_ADDR_MASK_SECCS01__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA6_ADDRDEC2_ADDR_MASK_SECCS23
+#define MMEA6_ADDRDEC2_ADDR_MASK_SECCS23__ADDR_MASK__SHIFT 0x1
+#define MMEA6_ADDRDEC2_ADDR_MASK_SECCS23__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA6_ADDRDEC2_ADDR_CFG_CS01
+#define MMEA6_ADDRDEC2_ADDR_CFG_CS01__NUM_BANK_GROUPS__SHIFT 0x1
+#define MMEA6_ADDRDEC2_ADDR_CFG_CS01__NUM_RM__SHIFT 0x4
+#define MMEA6_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_LO__SHIFT 0x8
+#define MMEA6_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_HI__SHIFT 0xc
+#define MMEA6_ADDRDEC2_ADDR_CFG_CS01__NUM_COL__SHIFT 0x10
+#define MMEA6_ADDRDEC2_ADDR_CFG_CS01__NUM_BANKS__SHIFT 0x14
+#define MMEA6_ADDRDEC2_ADDR_CFG_CS01__HI_COL_EN__SHIFT 0x1f
+#define MMEA6_ADDRDEC2_ADDR_CFG_CS01__NUM_BANK_GROUPS_MASK 0x0000000EL
+#define MMEA6_ADDRDEC2_ADDR_CFG_CS01__NUM_RM_MASK 0x00000030L
+#define MMEA6_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_LO_MASK 0x00000F00L
+#define MMEA6_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_HI_MASK 0x0000F000L
+#define MMEA6_ADDRDEC2_ADDR_CFG_CS01__NUM_COL_MASK 0x000F0000L
+#define MMEA6_ADDRDEC2_ADDR_CFG_CS01__NUM_BANKS_MASK 0x00300000L
+#define MMEA6_ADDRDEC2_ADDR_CFG_CS01__HI_COL_EN_MASK 0x80000000L
+//MMEA6_ADDRDEC2_ADDR_CFG_CS23
+#define MMEA6_ADDRDEC2_ADDR_CFG_CS23__NUM_BANK_GROUPS__SHIFT 0x1
+#define MMEA6_ADDRDEC2_ADDR_CFG_CS23__NUM_RM__SHIFT 0x4
+#define MMEA6_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_LO__SHIFT 0x8
+#define MMEA6_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_HI__SHIFT 0xc
+#define MMEA6_ADDRDEC2_ADDR_CFG_CS23__NUM_COL__SHIFT 0x10
+#define MMEA6_ADDRDEC2_ADDR_CFG_CS23__NUM_BANKS__SHIFT 0x14
+#define MMEA6_ADDRDEC2_ADDR_CFG_CS23__HI_COL_EN__SHIFT 0x1f
+#define MMEA6_ADDRDEC2_ADDR_CFG_CS23__NUM_BANK_GROUPS_MASK 0x0000000EL
+#define MMEA6_ADDRDEC2_ADDR_CFG_CS23__NUM_RM_MASK 0x00000030L
+#define MMEA6_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_LO_MASK 0x00000F00L
+#define MMEA6_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_HI_MASK 0x0000F000L
+#define MMEA6_ADDRDEC2_ADDR_CFG_CS23__NUM_COL_MASK 0x000F0000L
+#define MMEA6_ADDRDEC2_ADDR_CFG_CS23__NUM_BANKS_MASK 0x00300000L
+#define MMEA6_ADDRDEC2_ADDR_CFG_CS23__HI_COL_EN_MASK 0x80000000L
+//MMEA6_ADDRDEC2_ADDR_SEL_CS01
+#define MMEA6_ADDRDEC2_ADDR_SEL_CS01__BANK0__SHIFT 0x0
+#define MMEA6_ADDRDEC2_ADDR_SEL_CS01__BANK1__SHIFT 0x4
+#define MMEA6_ADDRDEC2_ADDR_SEL_CS01__BANK2__SHIFT 0x8
+#define MMEA6_ADDRDEC2_ADDR_SEL_CS01__BANK3__SHIFT 0xc
+#define MMEA6_ADDRDEC2_ADDR_SEL_CS01__BANK4__SHIFT 0x10
+#define MMEA6_ADDRDEC2_ADDR_SEL_CS01__ROW_LO__SHIFT 0x18
+#define MMEA6_ADDRDEC2_ADDR_SEL_CS01__ROW_HI__SHIFT 0x1c
+#define MMEA6_ADDRDEC2_ADDR_SEL_CS01__BANK0_MASK 0x0000000FL
+#define MMEA6_ADDRDEC2_ADDR_SEL_CS01__BANK1_MASK 0x000000F0L
+#define MMEA6_ADDRDEC2_ADDR_SEL_CS01__BANK2_MASK 0x00000F00L
+#define MMEA6_ADDRDEC2_ADDR_SEL_CS01__BANK3_MASK 0x0000F000L
+#define MMEA6_ADDRDEC2_ADDR_SEL_CS01__BANK4_MASK 0x001F0000L
+#define MMEA6_ADDRDEC2_ADDR_SEL_CS01__ROW_LO_MASK 0x0F000000L
+#define MMEA6_ADDRDEC2_ADDR_SEL_CS01__ROW_HI_MASK 0xF0000000L
+//MMEA6_ADDRDEC2_ADDR_SEL_CS23
+#define MMEA6_ADDRDEC2_ADDR_SEL_CS23__BANK0__SHIFT 0x0
+#define MMEA6_ADDRDEC2_ADDR_SEL_CS23__BANK1__SHIFT 0x4
+#define MMEA6_ADDRDEC2_ADDR_SEL_CS23__BANK2__SHIFT 0x8
+#define MMEA6_ADDRDEC2_ADDR_SEL_CS23__BANK3__SHIFT 0xc
+#define MMEA6_ADDRDEC2_ADDR_SEL_CS23__BANK4__SHIFT 0x10
+#define MMEA6_ADDRDEC2_ADDR_SEL_CS23__ROW_LO__SHIFT 0x18
+#define MMEA6_ADDRDEC2_ADDR_SEL_CS23__ROW_HI__SHIFT 0x1c
+#define MMEA6_ADDRDEC2_ADDR_SEL_CS23__BANK0_MASK 0x0000000FL
+#define MMEA6_ADDRDEC2_ADDR_SEL_CS23__BANK1_MASK 0x000000F0L
+#define MMEA6_ADDRDEC2_ADDR_SEL_CS23__BANK2_MASK 0x00000F00L
+#define MMEA6_ADDRDEC2_ADDR_SEL_CS23__BANK3_MASK 0x0000F000L
+#define MMEA6_ADDRDEC2_ADDR_SEL_CS23__BANK4_MASK 0x001F0000L
+#define MMEA6_ADDRDEC2_ADDR_SEL_CS23__ROW_LO_MASK 0x0F000000L
+#define MMEA6_ADDRDEC2_ADDR_SEL_CS23__ROW_HI_MASK 0xF0000000L
+//MMEA6_ADDRDEC2_ADDR_SEL2_CS01
+#define MMEA6_ADDRDEC2_ADDR_SEL2_CS01__BANK5__SHIFT 0x0
+#define MMEA6_ADDRDEC2_ADDR_SEL2_CS01__BANK5_MASK 0x0000001FL
+//MMEA6_ADDRDEC2_ADDR_SEL2_CS23
+#define MMEA6_ADDRDEC2_ADDR_SEL2_CS23__BANK5__SHIFT 0x0
+#define MMEA6_ADDRDEC2_ADDR_SEL2_CS23__BANK5_MASK 0x0000001FL
+//MMEA6_ADDRDEC2_COL_SEL_LO_CS01
+#define MMEA6_ADDRDEC2_COL_SEL_LO_CS01__COL0__SHIFT 0x0
+#define MMEA6_ADDRDEC2_COL_SEL_LO_CS01__COL1__SHIFT 0x4
+#define MMEA6_ADDRDEC2_COL_SEL_LO_CS01__COL2__SHIFT 0x8
+#define MMEA6_ADDRDEC2_COL_SEL_LO_CS01__COL3__SHIFT 0xc
+#define MMEA6_ADDRDEC2_COL_SEL_LO_CS01__COL4__SHIFT 0x10
+#define MMEA6_ADDRDEC2_COL_SEL_LO_CS01__COL5__SHIFT 0x14
+#define MMEA6_ADDRDEC2_COL_SEL_LO_CS01__COL6__SHIFT 0x18
+#define MMEA6_ADDRDEC2_COL_SEL_LO_CS01__COL7__SHIFT 0x1c
+#define MMEA6_ADDRDEC2_COL_SEL_LO_CS01__COL0_MASK 0x0000000FL
+#define MMEA6_ADDRDEC2_COL_SEL_LO_CS01__COL1_MASK 0x000000F0L
+#define MMEA6_ADDRDEC2_COL_SEL_LO_CS01__COL2_MASK 0x00000F00L
+#define MMEA6_ADDRDEC2_COL_SEL_LO_CS01__COL3_MASK 0x0000F000L
+#define MMEA6_ADDRDEC2_COL_SEL_LO_CS01__COL4_MASK 0x000F0000L
+#define MMEA6_ADDRDEC2_COL_SEL_LO_CS01__COL5_MASK 0x00F00000L
+#define MMEA6_ADDRDEC2_COL_SEL_LO_CS01__COL6_MASK 0x0F000000L
+#define MMEA6_ADDRDEC2_COL_SEL_LO_CS01__COL7_MASK 0xF0000000L
+//MMEA6_ADDRDEC2_COL_SEL_LO_CS23
+#define MMEA6_ADDRDEC2_COL_SEL_LO_CS23__COL0__SHIFT 0x0
+#define MMEA6_ADDRDEC2_COL_SEL_LO_CS23__COL1__SHIFT 0x4
+#define MMEA6_ADDRDEC2_COL_SEL_LO_CS23__COL2__SHIFT 0x8
+#define MMEA6_ADDRDEC2_COL_SEL_LO_CS23__COL3__SHIFT 0xc
+#define MMEA6_ADDRDEC2_COL_SEL_LO_CS23__COL4__SHIFT 0x10
+#define MMEA6_ADDRDEC2_COL_SEL_LO_CS23__COL5__SHIFT 0x14
+#define MMEA6_ADDRDEC2_COL_SEL_LO_CS23__COL6__SHIFT 0x18
+#define MMEA6_ADDRDEC2_COL_SEL_LO_CS23__COL7__SHIFT 0x1c
+#define MMEA6_ADDRDEC2_COL_SEL_LO_CS23__COL0_MASK 0x0000000FL
+#define MMEA6_ADDRDEC2_COL_SEL_LO_CS23__COL1_MASK 0x000000F0L
+#define MMEA6_ADDRDEC2_COL_SEL_LO_CS23__COL2_MASK 0x00000F00L
+#define MMEA6_ADDRDEC2_COL_SEL_LO_CS23__COL3_MASK 0x0000F000L
+#define MMEA6_ADDRDEC2_COL_SEL_LO_CS23__COL4_MASK 0x000F0000L
+#define MMEA6_ADDRDEC2_COL_SEL_LO_CS23__COL5_MASK 0x00F00000L
+#define MMEA6_ADDRDEC2_COL_SEL_LO_CS23__COL6_MASK 0x0F000000L
+#define MMEA6_ADDRDEC2_COL_SEL_LO_CS23__COL7_MASK 0xF0000000L
+//MMEA6_ADDRDEC2_COL_SEL_HI_CS01
+#define MMEA6_ADDRDEC2_COL_SEL_HI_CS01__COL8__SHIFT 0x0
+#define MMEA6_ADDRDEC2_COL_SEL_HI_CS01__COL9__SHIFT 0x4
+#define MMEA6_ADDRDEC2_COL_SEL_HI_CS01__COL10__SHIFT 0x8
+#define MMEA6_ADDRDEC2_COL_SEL_HI_CS01__COL11__SHIFT 0xc
+#define MMEA6_ADDRDEC2_COL_SEL_HI_CS01__COL12__SHIFT 0x10
+#define MMEA6_ADDRDEC2_COL_SEL_HI_CS01__COL13__SHIFT 0x14
+#define MMEA6_ADDRDEC2_COL_SEL_HI_CS01__COL14__SHIFT 0x18
+#define MMEA6_ADDRDEC2_COL_SEL_HI_CS01__COL15__SHIFT 0x1c
+#define MMEA6_ADDRDEC2_COL_SEL_HI_CS01__COL8_MASK 0x0000000FL
+#define MMEA6_ADDRDEC2_COL_SEL_HI_CS01__COL9_MASK 0x000000F0L
+#define MMEA6_ADDRDEC2_COL_SEL_HI_CS01__COL10_MASK 0x00000F00L
+#define MMEA6_ADDRDEC2_COL_SEL_HI_CS01__COL11_MASK 0x0000F000L
+#define MMEA6_ADDRDEC2_COL_SEL_HI_CS01__COL12_MASK 0x000F0000L
+#define MMEA6_ADDRDEC2_COL_SEL_HI_CS01__COL13_MASK 0x00F00000L
+#define MMEA6_ADDRDEC2_COL_SEL_HI_CS01__COL14_MASK 0x0F000000L
+#define MMEA6_ADDRDEC2_COL_SEL_HI_CS01__COL15_MASK 0xF0000000L
+//MMEA6_ADDRDEC2_COL_SEL_HI_CS23
+#define MMEA6_ADDRDEC2_COL_SEL_HI_CS23__COL8__SHIFT 0x0
+#define MMEA6_ADDRDEC2_COL_SEL_HI_CS23__COL9__SHIFT 0x4
+#define MMEA6_ADDRDEC2_COL_SEL_HI_CS23__COL10__SHIFT 0x8
+#define MMEA6_ADDRDEC2_COL_SEL_HI_CS23__COL11__SHIFT 0xc
+#define MMEA6_ADDRDEC2_COL_SEL_HI_CS23__COL12__SHIFT 0x10
+#define MMEA6_ADDRDEC2_COL_SEL_HI_CS23__COL13__SHIFT 0x14
+#define MMEA6_ADDRDEC2_COL_SEL_HI_CS23__COL14__SHIFT 0x18
+#define MMEA6_ADDRDEC2_COL_SEL_HI_CS23__COL15__SHIFT 0x1c
+#define MMEA6_ADDRDEC2_COL_SEL_HI_CS23__COL8_MASK 0x0000000FL
+#define MMEA6_ADDRDEC2_COL_SEL_HI_CS23__COL9_MASK 0x000000F0L
+#define MMEA6_ADDRDEC2_COL_SEL_HI_CS23__COL10_MASK 0x00000F00L
+#define MMEA6_ADDRDEC2_COL_SEL_HI_CS23__COL11_MASK 0x0000F000L
+#define MMEA6_ADDRDEC2_COL_SEL_HI_CS23__COL12_MASK 0x000F0000L
+#define MMEA6_ADDRDEC2_COL_SEL_HI_CS23__COL13_MASK 0x00F00000L
+#define MMEA6_ADDRDEC2_COL_SEL_HI_CS23__COL14_MASK 0x0F000000L
+#define MMEA6_ADDRDEC2_COL_SEL_HI_CS23__COL15_MASK 0xF0000000L
+//MMEA6_ADDRDEC2_RM_SEL_CS01
+#define MMEA6_ADDRDEC2_RM_SEL_CS01__RM0__SHIFT 0x0
+#define MMEA6_ADDRDEC2_RM_SEL_CS01__RM1__SHIFT 0x4
+#define MMEA6_ADDRDEC2_RM_SEL_CS01__RM2__SHIFT 0x8
+#define MMEA6_ADDRDEC2_RM_SEL_CS01__CHAN_BIT__SHIFT 0xc
+#define MMEA6_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA6_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA6_ADDRDEC2_RM_SEL_CS01__RM0_MASK 0x0000000FL
+#define MMEA6_ADDRDEC2_RM_SEL_CS01__RM1_MASK 0x000000F0L
+#define MMEA6_ADDRDEC2_RM_SEL_CS01__RM2_MASK 0x00000F00L
+#define MMEA6_ADDRDEC2_RM_SEL_CS01__CHAN_BIT_MASK 0x0000F000L
+#define MMEA6_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA6_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA6_ADDRDEC2_RM_SEL_CS23
+#define MMEA6_ADDRDEC2_RM_SEL_CS23__RM0__SHIFT 0x0
+#define MMEA6_ADDRDEC2_RM_SEL_CS23__RM1__SHIFT 0x4
+#define MMEA6_ADDRDEC2_RM_SEL_CS23__RM2__SHIFT 0x8
+#define MMEA6_ADDRDEC2_RM_SEL_CS23__CHAN_BIT__SHIFT 0xc
+#define MMEA6_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA6_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA6_ADDRDEC2_RM_SEL_CS23__RM0_MASK 0x0000000FL
+#define MMEA6_ADDRDEC2_RM_SEL_CS23__RM1_MASK 0x000000F0L
+#define MMEA6_ADDRDEC2_RM_SEL_CS23__RM2_MASK 0x00000F00L
+#define MMEA6_ADDRDEC2_RM_SEL_CS23__CHAN_BIT_MASK 0x0000F000L
+#define MMEA6_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA6_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA6_ADDRDEC2_RM_SEL_SECCS01
+#define MMEA6_ADDRDEC2_RM_SEL_SECCS01__RM0__SHIFT 0x0
+#define MMEA6_ADDRDEC2_RM_SEL_SECCS01__RM1__SHIFT 0x4
+#define MMEA6_ADDRDEC2_RM_SEL_SECCS01__RM2__SHIFT 0x8
+#define MMEA6_ADDRDEC2_RM_SEL_SECCS01__CHAN_BIT__SHIFT 0xc
+#define MMEA6_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA6_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA6_ADDRDEC2_RM_SEL_SECCS01__RM0_MASK 0x0000000FL
+#define MMEA6_ADDRDEC2_RM_SEL_SECCS01__RM1_MASK 0x000000F0L
+#define MMEA6_ADDRDEC2_RM_SEL_SECCS01__RM2_MASK 0x00000F00L
+#define MMEA6_ADDRDEC2_RM_SEL_SECCS01__CHAN_BIT_MASK 0x0000F000L
+#define MMEA6_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA6_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA6_ADDRDEC2_RM_SEL_SECCS23
+#define MMEA6_ADDRDEC2_RM_SEL_SECCS23__RM0__SHIFT 0x0
+#define MMEA6_ADDRDEC2_RM_SEL_SECCS23__RM1__SHIFT 0x4
+#define MMEA6_ADDRDEC2_RM_SEL_SECCS23__RM2__SHIFT 0x8
+#define MMEA6_ADDRDEC2_RM_SEL_SECCS23__CHAN_BIT__SHIFT 0xc
+#define MMEA6_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA6_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA6_ADDRDEC2_RM_SEL_SECCS23__RM0_MASK 0x0000000FL
+#define MMEA6_ADDRDEC2_RM_SEL_SECCS23__RM1_MASK 0x000000F0L
+#define MMEA6_ADDRDEC2_RM_SEL_SECCS23__RM2_MASK 0x00000F00L
+#define MMEA6_ADDRDEC2_RM_SEL_SECCS23__CHAN_BIT_MASK 0x0000F000L
+#define MMEA6_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA6_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA6_ADDRNORMDRAM_GLOBAL_CNTL
+#define MMEA6_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K__SHIFT 0x14
+#define MMEA6_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M__SHIFT 0x15
+#define MMEA6_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G__SHIFT 0x16
+#define MMEA6_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K_MASK 0x00100000L
+#define MMEA6_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M_MASK 0x00200000L
+#define MMEA6_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G_MASK 0x00400000L
+//MMEA6_ADDRNORMGMI_GLOBAL_CNTL
+#define MMEA6_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K__SHIFT 0x14
+#define MMEA6_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M__SHIFT 0x15
+#define MMEA6_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G__SHIFT 0x16
+#define MMEA6_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K_MASK 0x00100000L
+#define MMEA6_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M_MASK 0x00200000L
+#define MMEA6_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G_MASK 0x00400000L
+//MMEA6_IO_RD_CLI2GRP_MAP0
+#define MMEA6_IO_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA6_IO_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA6_IO_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA6_IO_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA6_IO_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA6_IO_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA6_IO_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA6_IO_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA6_IO_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA6_IO_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA6_IO_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA6_IO_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA6_IO_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA6_IO_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA6_IO_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA6_IO_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA6_IO_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA6_IO_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA6_IO_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA6_IO_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA6_IO_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA6_IO_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA6_IO_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA6_IO_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA6_IO_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA6_IO_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA6_IO_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA6_IO_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA6_IO_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA6_IO_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA6_IO_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA6_IO_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA6_IO_RD_CLI2GRP_MAP1
+#define MMEA6_IO_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA6_IO_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA6_IO_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA6_IO_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA6_IO_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA6_IO_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA6_IO_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA6_IO_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA6_IO_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA6_IO_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA6_IO_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA6_IO_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA6_IO_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA6_IO_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA6_IO_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA6_IO_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA6_IO_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA6_IO_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA6_IO_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA6_IO_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA6_IO_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA6_IO_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA6_IO_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA6_IO_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA6_IO_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA6_IO_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA6_IO_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA6_IO_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA6_IO_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA6_IO_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA6_IO_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA6_IO_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA6_IO_WR_CLI2GRP_MAP0
+#define MMEA6_IO_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA6_IO_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA6_IO_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA6_IO_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA6_IO_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA6_IO_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA6_IO_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA6_IO_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA6_IO_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA6_IO_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA6_IO_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA6_IO_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA6_IO_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA6_IO_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA6_IO_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA6_IO_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA6_IO_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA6_IO_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA6_IO_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA6_IO_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA6_IO_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA6_IO_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA6_IO_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA6_IO_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA6_IO_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA6_IO_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA6_IO_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA6_IO_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA6_IO_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA6_IO_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA6_IO_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA6_IO_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA6_IO_WR_CLI2GRP_MAP1
+#define MMEA6_IO_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA6_IO_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA6_IO_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA6_IO_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA6_IO_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA6_IO_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA6_IO_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA6_IO_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA6_IO_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA6_IO_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA6_IO_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA6_IO_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA6_IO_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA6_IO_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA6_IO_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA6_IO_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA6_IO_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA6_IO_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA6_IO_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA6_IO_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA6_IO_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA6_IO_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA6_IO_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA6_IO_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA6_IO_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA6_IO_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA6_IO_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA6_IO_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA6_IO_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA6_IO_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA6_IO_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA6_IO_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA6_IO_RD_COMBINE_FLUSH
+#define MMEA6_IO_RD_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0
+#define MMEA6_IO_RD_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4
+#define MMEA6_IO_RD_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8
+#define MMEA6_IO_RD_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc
+#define MMEA6_IO_RD_COMBINE_FLUSH__FORWARD_COMB_ONLY__SHIFT 0x10
+#define MMEA6_IO_RD_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL
+#define MMEA6_IO_RD_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L
+#define MMEA6_IO_RD_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L
+#define MMEA6_IO_RD_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L
+#define MMEA6_IO_RD_COMBINE_FLUSH__FORWARD_COMB_ONLY_MASK 0x00010000L
+//MMEA6_IO_WR_COMBINE_FLUSH
+#define MMEA6_IO_WR_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0
+#define MMEA6_IO_WR_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4
+#define MMEA6_IO_WR_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8
+#define MMEA6_IO_WR_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc
+#define MMEA6_IO_WR_COMBINE_FLUSH__FORWARD_COMB_ONLY__SHIFT 0x10
+#define MMEA6_IO_WR_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL
+#define MMEA6_IO_WR_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L
+#define MMEA6_IO_WR_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L
+#define MMEA6_IO_WR_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L
+#define MMEA6_IO_WR_COMBINE_FLUSH__FORWARD_COMB_ONLY_MASK 0x00010000L
+//MMEA6_IO_GROUP_BURST
+#define MMEA6_IO_GROUP_BURST__RD_LIMIT_LO__SHIFT 0x0
+#define MMEA6_IO_GROUP_BURST__RD_LIMIT_HI__SHIFT 0x8
+#define MMEA6_IO_GROUP_BURST__WR_LIMIT_LO__SHIFT 0x10
+#define MMEA6_IO_GROUP_BURST__WR_LIMIT_HI__SHIFT 0x18
+#define MMEA6_IO_GROUP_BURST__RD_LIMIT_LO_MASK 0x000000FFL
+#define MMEA6_IO_GROUP_BURST__RD_LIMIT_HI_MASK 0x0000FF00L
+#define MMEA6_IO_GROUP_BURST__WR_LIMIT_LO_MASK 0x00FF0000L
+#define MMEA6_IO_GROUP_BURST__WR_LIMIT_HI_MASK 0xFF000000L
+//MMEA6_IO_RD_PRI_AGE
+#define MMEA6_IO_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA6_IO_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA6_IO_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA6_IO_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA6_IO_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA6_IO_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA6_IO_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA6_IO_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA6_IO_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA6_IO_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA6_IO_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA6_IO_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA6_IO_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA6_IO_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA6_IO_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA6_IO_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA6_IO_WR_PRI_AGE
+#define MMEA6_IO_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA6_IO_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA6_IO_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA6_IO_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA6_IO_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA6_IO_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA6_IO_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA6_IO_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA6_IO_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA6_IO_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA6_IO_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA6_IO_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA6_IO_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA6_IO_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA6_IO_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA6_IO_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA6_IO_RD_PRI_QUEUING
+#define MMEA6_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA6_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA6_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA6_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA6_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA6_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA6_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA6_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA6_IO_WR_PRI_QUEUING
+#define MMEA6_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA6_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA6_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA6_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA6_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA6_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA6_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA6_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA6_IO_RD_PRI_FIXED
+#define MMEA6_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA6_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA6_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA6_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA6_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA6_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA6_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA6_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA6_IO_WR_PRI_FIXED
+#define MMEA6_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA6_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA6_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA6_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA6_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA6_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA6_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA6_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA6_IO_RD_PRI_URGENCY
+#define MMEA6_IO_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA6_IO_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA6_IO_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA6_IO_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA6_IO_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA6_IO_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA6_IO_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA6_IO_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA6_IO_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA6_IO_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA6_IO_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA6_IO_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA6_IO_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA6_IO_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA6_IO_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA6_IO_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA6_IO_WR_PRI_URGENCY
+#define MMEA6_IO_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA6_IO_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA6_IO_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA6_IO_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA6_IO_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA6_IO_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA6_IO_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA6_IO_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA6_IO_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA6_IO_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA6_IO_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA6_IO_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA6_IO_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA6_IO_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA6_IO_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA6_IO_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA6_IO_RD_PRI_URGENCY_MASKING
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L
+#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L
+//MMEA6_IO_WR_PRI_URGENCY_MASKING
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L
+#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L
+//MMEA6_IO_RD_PRI_QUANT_PRI1
+#define MMEA6_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA6_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA6_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA6_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA6_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA6_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA6_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA6_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA6_IO_RD_PRI_QUANT_PRI2
+#define MMEA6_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA6_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA6_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA6_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA6_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA6_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA6_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA6_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA6_IO_RD_PRI_QUANT_PRI3
+#define MMEA6_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA6_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA6_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA6_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA6_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA6_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA6_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA6_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA6_IO_WR_PRI_QUANT_PRI1
+#define MMEA6_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA6_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA6_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA6_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA6_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA6_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA6_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA6_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA6_IO_WR_PRI_QUANT_PRI2
+#define MMEA6_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA6_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA6_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA6_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA6_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA6_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA6_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA6_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA6_IO_WR_PRI_QUANT_PRI3
+#define MMEA6_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA6_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA6_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA6_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA6_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA6_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA6_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA6_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA6_SDP_ARB_DRAM
+#define MMEA6_SDP_ARB_DRAM__RDWR_BURST_LIMIT_CYCL__SHIFT 0x0
+#define MMEA6_SDP_ARB_DRAM__RDWR_BURST_LIMIT_DATA__SHIFT 0x8
+#define MMEA6_SDP_ARB_DRAM__EARLY_SW2RD_ON_PRI__SHIFT 0x10
+#define MMEA6_SDP_ARB_DRAM__EARLY_SW2WR_ON_PRI__SHIFT 0x11
+#define MMEA6_SDP_ARB_DRAM__EARLY_SW2RD_ON_RES__SHIFT 0x12
+#define MMEA6_SDP_ARB_DRAM__EARLY_SW2WR_ON_RES__SHIFT 0x13
+#define MMEA6_SDP_ARB_DRAM__EOB_ON_EXPIRE__SHIFT 0x14
+#define MMEA6_SDP_ARB_DRAM__DECOUPLE_RDWR_BNKSTATE__SHIFT 0x15
+#define MMEA6_SDP_ARB_DRAM__RDWR_BURST_LIMIT_CYCL_MASK 0x0000007FL
+#define MMEA6_SDP_ARB_DRAM__RDWR_BURST_LIMIT_DATA_MASK 0x00007F00L
+#define MMEA6_SDP_ARB_DRAM__EARLY_SW2RD_ON_PRI_MASK 0x00010000L
+#define MMEA6_SDP_ARB_DRAM__EARLY_SW2WR_ON_PRI_MASK 0x00020000L
+#define MMEA6_SDP_ARB_DRAM__EARLY_SW2RD_ON_RES_MASK 0x00040000L
+#define MMEA6_SDP_ARB_DRAM__EARLY_SW2WR_ON_RES_MASK 0x00080000L
+#define MMEA6_SDP_ARB_DRAM__EOB_ON_EXPIRE_MASK 0x00100000L
+#define MMEA6_SDP_ARB_DRAM__DECOUPLE_RDWR_BNKSTATE_MASK 0x00200000L
+//MMEA6_SDP_ARB_GMI
+#define MMEA6_SDP_ARB_GMI__RDWR_BURST_LIMIT_CYCL__SHIFT 0x0
+#define MMEA6_SDP_ARB_GMI__RDWR_BURST_LIMIT_DATA__SHIFT 0x8
+#define MMEA6_SDP_ARB_GMI__EARLY_SW2RD_ON_PRI__SHIFT 0x10
+#define MMEA6_SDP_ARB_GMI__EARLY_SW2WR_ON_PRI__SHIFT 0x11
+#define MMEA6_SDP_ARB_GMI__EARLY_SW2RD_ON_RES__SHIFT 0x12
+#define MMEA6_SDP_ARB_GMI__EARLY_SW2WR_ON_RES__SHIFT 0x13
+#define MMEA6_SDP_ARB_GMI__EOB_ON_EXPIRE__SHIFT 0x14
+#define MMEA6_SDP_ARB_GMI__DECOUPLE_RDWR_BNKSTATE__SHIFT 0x15
+#define MMEA6_SDP_ARB_GMI__ALLOW_CHAIN_BREAKING__SHIFT 0x16
+#define MMEA6_SDP_ARB_GMI__RDWR_BURST_LIMIT_CYCL_MASK 0x0000007FL
+#define MMEA6_SDP_ARB_GMI__RDWR_BURST_LIMIT_DATA_MASK 0x00007F00L
+#define MMEA6_SDP_ARB_GMI__EARLY_SW2RD_ON_PRI_MASK 0x00010000L
+#define MMEA6_SDP_ARB_GMI__EARLY_SW2WR_ON_PRI_MASK 0x00020000L
+#define MMEA6_SDP_ARB_GMI__EARLY_SW2RD_ON_RES_MASK 0x00040000L
+#define MMEA6_SDP_ARB_GMI__EARLY_SW2WR_ON_RES_MASK 0x00080000L
+#define MMEA6_SDP_ARB_GMI__EOB_ON_EXPIRE_MASK 0x00100000L
+#define MMEA6_SDP_ARB_GMI__DECOUPLE_RDWR_BNKSTATE_MASK 0x00200000L
+#define MMEA6_SDP_ARB_GMI__ALLOW_CHAIN_BREAKING_MASK 0x00400000L
+//MMEA6_SDP_ARB_FINAL
+#define MMEA6_SDP_ARB_FINAL__DRAM_BURST_LIMIT__SHIFT 0x0
+#define MMEA6_SDP_ARB_FINAL__GMI_BURST_LIMIT__SHIFT 0x5
+#define MMEA6_SDP_ARB_FINAL__IO_BURST_LIMIT__SHIFT 0xa
+#define MMEA6_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER__SHIFT 0xf
+#define MMEA6_SDP_ARB_FINAL__RDONLY_VC0__SHIFT 0x11
+#define MMEA6_SDP_ARB_FINAL__RDONLY_VC1__SHIFT 0x12
+#define MMEA6_SDP_ARB_FINAL__RDONLY_VC2__SHIFT 0x13
+#define MMEA6_SDP_ARB_FINAL__RDONLY_VC3__SHIFT 0x14
+#define MMEA6_SDP_ARB_FINAL__RDONLY_VC4__SHIFT 0x15
+#define MMEA6_SDP_ARB_FINAL__RDONLY_VC5__SHIFT 0x16
+#define MMEA6_SDP_ARB_FINAL__RDONLY_VC6__SHIFT 0x17
+#define MMEA6_SDP_ARB_FINAL__RDONLY_VC7__SHIFT 0x18
+#define MMEA6_SDP_ARB_FINAL__ERREVENT_ON_ERROR__SHIFT 0x19
+#define MMEA6_SDP_ARB_FINAL__HALTREQ_ON_ERROR__SHIFT 0x1a
+#define MMEA6_SDP_ARB_FINAL__GMI_BURST_STRETCH__SHIFT 0x1b
+#define MMEA6_SDP_ARB_FINAL__DRAM_BURST_LIMIT_MASK 0x0000001FL
+#define MMEA6_SDP_ARB_FINAL__GMI_BURST_LIMIT_MASK 0x000003E0L
+#define MMEA6_SDP_ARB_FINAL__IO_BURST_LIMIT_MASK 0x00007C00L
+#define MMEA6_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER_MASK 0x00018000L
+#define MMEA6_SDP_ARB_FINAL__RDONLY_VC0_MASK 0x00020000L
+#define MMEA6_SDP_ARB_FINAL__RDONLY_VC1_MASK 0x00040000L
+#define MMEA6_SDP_ARB_FINAL__RDONLY_VC2_MASK 0x00080000L
+#define MMEA6_SDP_ARB_FINAL__RDONLY_VC3_MASK 0x00100000L
+#define MMEA6_SDP_ARB_FINAL__RDONLY_VC4_MASK 0x00200000L
+#define MMEA6_SDP_ARB_FINAL__RDONLY_VC5_MASK 0x00400000L
+#define MMEA6_SDP_ARB_FINAL__RDONLY_VC6_MASK 0x00800000L
+#define MMEA6_SDP_ARB_FINAL__RDONLY_VC7_MASK 0x01000000L
+#define MMEA6_SDP_ARB_FINAL__ERREVENT_ON_ERROR_MASK 0x02000000L
+#define MMEA6_SDP_ARB_FINAL__HALTREQ_ON_ERROR_MASK 0x04000000L
+#define MMEA6_SDP_ARB_FINAL__GMI_BURST_STRETCH_MASK 0x08000000L
+//MMEA6_SDP_DRAM_PRIORITY
+#define MMEA6_SDP_DRAM_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0
+#define MMEA6_SDP_DRAM_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4
+#define MMEA6_SDP_DRAM_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8
+#define MMEA6_SDP_DRAM_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc
+#define MMEA6_SDP_DRAM_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10
+#define MMEA6_SDP_DRAM_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14
+#define MMEA6_SDP_DRAM_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18
+#define MMEA6_SDP_DRAM_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c
+#define MMEA6_SDP_DRAM_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL
+#define MMEA6_SDP_DRAM_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L
+#define MMEA6_SDP_DRAM_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L
+#define MMEA6_SDP_DRAM_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L
+#define MMEA6_SDP_DRAM_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L
+#define MMEA6_SDP_DRAM_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L
+#define MMEA6_SDP_DRAM_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L
+#define MMEA6_SDP_DRAM_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L
+//MMEA6_SDP_GMI_PRIORITY
+#define MMEA6_SDP_GMI_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0
+#define MMEA6_SDP_GMI_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4
+#define MMEA6_SDP_GMI_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8
+#define MMEA6_SDP_GMI_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc
+#define MMEA6_SDP_GMI_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10
+#define MMEA6_SDP_GMI_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14
+#define MMEA6_SDP_GMI_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18
+#define MMEA6_SDP_GMI_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c
+#define MMEA6_SDP_GMI_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL
+#define MMEA6_SDP_GMI_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L
+#define MMEA6_SDP_GMI_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L
+#define MMEA6_SDP_GMI_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L
+#define MMEA6_SDP_GMI_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L
+#define MMEA6_SDP_GMI_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L
+#define MMEA6_SDP_GMI_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L
+#define MMEA6_SDP_GMI_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L
+//MMEA6_SDP_IO_PRIORITY
+#define MMEA6_SDP_IO_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0
+#define MMEA6_SDP_IO_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4
+#define MMEA6_SDP_IO_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8
+#define MMEA6_SDP_IO_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc
+#define MMEA6_SDP_IO_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10
+#define MMEA6_SDP_IO_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14
+#define MMEA6_SDP_IO_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18
+#define MMEA6_SDP_IO_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c
+#define MMEA6_SDP_IO_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL
+#define MMEA6_SDP_IO_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L
+#define MMEA6_SDP_IO_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L
+#define MMEA6_SDP_IO_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L
+#define MMEA6_SDP_IO_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L
+#define MMEA6_SDP_IO_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L
+#define MMEA6_SDP_IO_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L
+#define MMEA6_SDP_IO_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L
+//MMEA6_SDP_CREDITS
+#define MMEA6_SDP_CREDITS__TAG_LIMIT__SHIFT 0x0
+#define MMEA6_SDP_CREDITS__WR_RESP_CREDITS__SHIFT 0x8
+#define MMEA6_SDP_CREDITS__RD_RESP_CREDITS__SHIFT 0x10
+#define MMEA6_SDP_CREDITS__TAG_LIMIT_MASK 0x000000FFL
+#define MMEA6_SDP_CREDITS__WR_RESP_CREDITS_MASK 0x00007F00L
+#define MMEA6_SDP_CREDITS__RD_RESP_CREDITS_MASK 0x007F0000L
+//MMEA6_SDP_TAG_RESERVE0
+#define MMEA6_SDP_TAG_RESERVE0__VC0__SHIFT 0x0
+#define MMEA6_SDP_TAG_RESERVE0__VC1__SHIFT 0x8
+#define MMEA6_SDP_TAG_RESERVE0__VC2__SHIFT 0x10
+#define MMEA6_SDP_TAG_RESERVE0__VC3__SHIFT 0x18
+#define MMEA6_SDP_TAG_RESERVE0__VC0_MASK 0x000000FFL
+#define MMEA6_SDP_TAG_RESERVE0__VC1_MASK 0x0000FF00L
+#define MMEA6_SDP_TAG_RESERVE0__VC2_MASK 0x00FF0000L
+#define MMEA6_SDP_TAG_RESERVE0__VC3_MASK 0xFF000000L
+//MMEA6_SDP_TAG_RESERVE1
+#define MMEA6_SDP_TAG_RESERVE1__VC4__SHIFT 0x0
+#define MMEA6_SDP_TAG_RESERVE1__VC5__SHIFT 0x8
+#define MMEA6_SDP_TAG_RESERVE1__VC6__SHIFT 0x10
+#define MMEA6_SDP_TAG_RESERVE1__VC7__SHIFT 0x18
+#define MMEA6_SDP_TAG_RESERVE1__VC4_MASK 0x000000FFL
+#define MMEA6_SDP_TAG_RESERVE1__VC5_MASK 0x0000FF00L
+#define MMEA6_SDP_TAG_RESERVE1__VC6_MASK 0x00FF0000L
+#define MMEA6_SDP_TAG_RESERVE1__VC7_MASK 0xFF000000L
+//MMEA6_SDP_VCC_RESERVE0
+#define MMEA6_SDP_VCC_RESERVE0__VC0_CREDITS__SHIFT 0x0
+#define MMEA6_SDP_VCC_RESERVE0__VC1_CREDITS__SHIFT 0x6
+#define MMEA6_SDP_VCC_RESERVE0__VC2_CREDITS__SHIFT 0xc
+#define MMEA6_SDP_VCC_RESERVE0__VC3_CREDITS__SHIFT 0x12
+#define MMEA6_SDP_VCC_RESERVE0__VC4_CREDITS__SHIFT 0x18
+#define MMEA6_SDP_VCC_RESERVE0__VC0_CREDITS_MASK 0x0000003FL
+#define MMEA6_SDP_VCC_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L
+#define MMEA6_SDP_VCC_RESERVE0__VC2_CREDITS_MASK 0x0003F000L
+#define MMEA6_SDP_VCC_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L
+#define MMEA6_SDP_VCC_RESERVE0__VC4_CREDITS_MASK 0x3F000000L
+//MMEA6_SDP_VCC_RESERVE1
+#define MMEA6_SDP_VCC_RESERVE1__VC5_CREDITS__SHIFT 0x0
+#define MMEA6_SDP_VCC_RESERVE1__VC6_CREDITS__SHIFT 0x6
+#define MMEA6_SDP_VCC_RESERVE1__VC7_CREDITS__SHIFT 0xc
+#define MMEA6_SDP_VCC_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f
+#define MMEA6_SDP_VCC_RESERVE1__VC5_CREDITS_MASK 0x0000003FL
+#define MMEA6_SDP_VCC_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L
+#define MMEA6_SDP_VCC_RESERVE1__VC7_CREDITS_MASK 0x0003F000L
+#define MMEA6_SDP_VCC_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L
+//MMEA6_SDP_VCD_RESERVE0
+#define MMEA6_SDP_VCD_RESERVE0__VC0_CREDITS__SHIFT 0x0
+#define MMEA6_SDP_VCD_RESERVE0__VC1_CREDITS__SHIFT 0x6
+#define MMEA6_SDP_VCD_RESERVE0__VC2_CREDITS__SHIFT 0xc
+#define MMEA6_SDP_VCD_RESERVE0__VC3_CREDITS__SHIFT 0x12
+#define MMEA6_SDP_VCD_RESERVE0__VC4_CREDITS__SHIFT 0x18
+#define MMEA6_SDP_VCD_RESERVE0__VC0_CREDITS_MASK 0x0000003FL
+#define MMEA6_SDP_VCD_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L
+#define MMEA6_SDP_VCD_RESERVE0__VC2_CREDITS_MASK 0x0003F000L
+#define MMEA6_SDP_VCD_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L
+#define MMEA6_SDP_VCD_RESERVE0__VC4_CREDITS_MASK 0x3F000000L
+//MMEA6_SDP_VCD_RESERVE1
+#define MMEA6_SDP_VCD_RESERVE1__VC5_CREDITS__SHIFT 0x0
+#define MMEA6_SDP_VCD_RESERVE1__VC6_CREDITS__SHIFT 0x6
+#define MMEA6_SDP_VCD_RESERVE1__VC7_CREDITS__SHIFT 0xc
+#define MMEA6_SDP_VCD_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f
+#define MMEA6_SDP_VCD_RESERVE1__VC5_CREDITS_MASK 0x0000003FL
+#define MMEA6_SDP_VCD_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L
+#define MMEA6_SDP_VCD_RESERVE1__VC7_CREDITS_MASK 0x0003F000L
+#define MMEA6_SDP_VCD_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L
+//MMEA6_SDP_REQ_CNTL
+#define MMEA6_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ__SHIFT 0x0
+#define MMEA6_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE__SHIFT 0x1
+#define MMEA6_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC__SHIFT 0x2
+#define MMEA6_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM__SHIFT 0x3
+#define MMEA6_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_GMI__SHIFT 0x4
+#define MMEA6_SDP_REQ_CNTL__INNER_DOMAIN_MODE__SHIFT 0x5
+#define MMEA6_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ_MASK 0x00000001L
+#define MMEA6_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE_MASK 0x00000002L
+#define MMEA6_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC_MASK 0x00000004L
+#define MMEA6_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM_MASK 0x00000008L
+#define MMEA6_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_GMI_MASK 0x00000010L
+#define MMEA6_SDP_REQ_CNTL__INNER_DOMAIN_MODE_MASK 0x00000020L
+//MMEA6_MISC
+#define MMEA6_MISC__RELATIVE_PRI_IN_DRAM_RD_ARB__SHIFT 0x0
+#define MMEA6_MISC__RELATIVE_PRI_IN_DRAM_WR_ARB__SHIFT 0x1
+#define MMEA6_MISC__RELATIVE_PRI_IN_GMI_RD_ARB__SHIFT 0x2
+#define MMEA6_MISC__RELATIVE_PRI_IN_GMI_WR_ARB__SHIFT 0x3
+#define MMEA6_MISC__RELATIVE_PRI_IN_IO_RD_ARB__SHIFT 0x4
+#define MMEA6_MISC__RELATIVE_PRI_IN_IO_WR_ARB__SHIFT 0x5
+#define MMEA6_MISC__EARLYWRRET_ENABLE_VC0__SHIFT 0x6
+#define MMEA6_MISC__EARLYWRRET_ENABLE_VC1__SHIFT 0x7
+#define MMEA6_MISC__EARLYWRRET_ENABLE_VC2__SHIFT 0x8
+#define MMEA6_MISC__EARLYWRRET_ENABLE_VC3__SHIFT 0x9
+#define MMEA6_MISC__EARLYWRRET_ENABLE_VC4__SHIFT 0xa
+#define MMEA6_MISC__EARLYWRRET_ENABLE_VC5__SHIFT 0xb
+#define MMEA6_MISC__EARLYWRRET_ENABLE_VC6__SHIFT 0xc
+#define MMEA6_MISC__EARLYWRRET_ENABLE_VC7__SHIFT 0xd
+#define MMEA6_MISC__EARLY_SDP_ORIGDATA__SHIFT 0xe
+#define MMEA6_MISC__LINKMGR_DYNAMIC_MODE__SHIFT 0xf
+#define MMEA6_MISC__LINKMGR_HALT_THRESHOLD__SHIFT 0x11
+#define MMEA6_MISC__LINKMGR_RECONNECT_DELAY__SHIFT 0x13
+#define MMEA6_MISC__LINKMGR_IDLE_THRESHOLD__SHIFT 0x15
+#define MMEA6_MISC__FAVOUR_MIDCHAIN_CS_IN_DRAM_ARB__SHIFT 0x1a
+#define MMEA6_MISC__FAVOUR_MIDCHAIN_CS_IN_GMI_ARB__SHIFT 0x1b
+#define MMEA6_MISC__FAVOUR_LAST_CS_IN_DRAM_ARB__SHIFT 0x1c
+#define MMEA6_MISC__FAVOUR_LAST_CS_IN_GMI_ARB__SHIFT 0x1d
+#define MMEA6_MISC__SWITCH_CS_ON_W2R_IN_DRAM_ARB__SHIFT 0x1e
+#define MMEA6_MISC__SWITCH_CS_ON_W2R_IN_GMI_ARB__SHIFT 0x1f
+#define MMEA6_MISC__RELATIVE_PRI_IN_DRAM_RD_ARB_MASK 0x00000001L
+#define MMEA6_MISC__RELATIVE_PRI_IN_DRAM_WR_ARB_MASK 0x00000002L
+#define MMEA6_MISC__RELATIVE_PRI_IN_GMI_RD_ARB_MASK 0x00000004L
+#define MMEA6_MISC__RELATIVE_PRI_IN_GMI_WR_ARB_MASK 0x00000008L
+#define MMEA6_MISC__RELATIVE_PRI_IN_IO_RD_ARB_MASK 0x00000010L
+#define MMEA6_MISC__RELATIVE_PRI_IN_IO_WR_ARB_MASK 0x00000020L
+#define MMEA6_MISC__EARLYWRRET_ENABLE_VC0_MASK 0x00000040L
+#define MMEA6_MISC__EARLYWRRET_ENABLE_VC1_MASK 0x00000080L
+#define MMEA6_MISC__EARLYWRRET_ENABLE_VC2_MASK 0x00000100L
+#define MMEA6_MISC__EARLYWRRET_ENABLE_VC3_MASK 0x00000200L
+#define MMEA6_MISC__EARLYWRRET_ENABLE_VC4_MASK 0x00000400L
+#define MMEA6_MISC__EARLYWRRET_ENABLE_VC5_MASK 0x00000800L
+#define MMEA6_MISC__EARLYWRRET_ENABLE_VC6_MASK 0x00001000L
+#define MMEA6_MISC__EARLYWRRET_ENABLE_VC7_MASK 0x00002000L
+#define MMEA6_MISC__EARLY_SDP_ORIGDATA_MASK 0x00004000L
+#define MMEA6_MISC__LINKMGR_DYNAMIC_MODE_MASK 0x00018000L
+#define MMEA6_MISC__LINKMGR_HALT_THRESHOLD_MASK 0x00060000L
+#define MMEA6_MISC__LINKMGR_RECONNECT_DELAY_MASK 0x00180000L
+#define MMEA6_MISC__LINKMGR_IDLE_THRESHOLD_MASK 0x03E00000L
+#define MMEA6_MISC__FAVOUR_MIDCHAIN_CS_IN_DRAM_ARB_MASK 0x04000000L
+#define MMEA6_MISC__FAVOUR_MIDCHAIN_CS_IN_GMI_ARB_MASK 0x08000000L
+#define MMEA6_MISC__FAVOUR_LAST_CS_IN_DRAM_ARB_MASK 0x10000000L
+#define MMEA6_MISC__FAVOUR_LAST_CS_IN_GMI_ARB_MASK 0x20000000L
+#define MMEA6_MISC__SWITCH_CS_ON_W2R_IN_DRAM_ARB_MASK 0x40000000L
+#define MMEA6_MISC__SWITCH_CS_ON_W2R_IN_GMI_ARB_MASK 0x80000000L
+//MMEA6_LATENCY_SAMPLING
+#define MMEA6_LATENCY_SAMPLING__SAMPLER0_DRAM__SHIFT 0x0
+#define MMEA6_LATENCY_SAMPLING__SAMPLER1_DRAM__SHIFT 0x1
+#define MMEA6_LATENCY_SAMPLING__SAMPLER0_GMI__SHIFT 0x2
+#define MMEA6_LATENCY_SAMPLING__SAMPLER1_GMI__SHIFT 0x3
+#define MMEA6_LATENCY_SAMPLING__SAMPLER0_IO__SHIFT 0x4
+#define MMEA6_LATENCY_SAMPLING__SAMPLER1_IO__SHIFT 0x5
+#define MMEA6_LATENCY_SAMPLING__SAMPLER0_READ__SHIFT 0x6
+#define MMEA6_LATENCY_SAMPLING__SAMPLER1_READ__SHIFT 0x7
+#define MMEA6_LATENCY_SAMPLING__SAMPLER0_WRITE__SHIFT 0x8
+#define MMEA6_LATENCY_SAMPLING__SAMPLER1_WRITE__SHIFT 0x9
+#define MMEA6_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET__SHIFT 0xa
+#define MMEA6_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET__SHIFT 0xb
+#define MMEA6_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET__SHIFT 0xc
+#define MMEA6_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET__SHIFT 0xd
+#define MMEA6_LATENCY_SAMPLING__SAMPLER0_VC__SHIFT 0xe
+#define MMEA6_LATENCY_SAMPLING__SAMPLER1_VC__SHIFT 0x16
+#define MMEA6_LATENCY_SAMPLING__SAMPLER0_DRAM_MASK 0x00000001L
+#define MMEA6_LATENCY_SAMPLING__SAMPLER1_DRAM_MASK 0x00000002L
+#define MMEA6_LATENCY_SAMPLING__SAMPLER0_GMI_MASK 0x00000004L
+#define MMEA6_LATENCY_SAMPLING__SAMPLER1_GMI_MASK 0x00000008L
+#define MMEA6_LATENCY_SAMPLING__SAMPLER0_IO_MASK 0x00000010L
+#define MMEA6_LATENCY_SAMPLING__SAMPLER1_IO_MASK 0x00000020L
+#define MMEA6_LATENCY_SAMPLING__SAMPLER0_READ_MASK 0x00000040L
+#define MMEA6_LATENCY_SAMPLING__SAMPLER1_READ_MASK 0x00000080L
+#define MMEA6_LATENCY_SAMPLING__SAMPLER0_WRITE_MASK 0x00000100L
+#define MMEA6_LATENCY_SAMPLING__SAMPLER1_WRITE_MASK 0x00000200L
+#define MMEA6_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET_MASK 0x00000400L
+#define MMEA6_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET_MASK 0x00000800L
+#define MMEA6_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET_MASK 0x00001000L
+#define MMEA6_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET_MASK 0x00002000L
+#define MMEA6_LATENCY_SAMPLING__SAMPLER0_VC_MASK 0x003FC000L
+#define MMEA6_LATENCY_SAMPLING__SAMPLER1_VC_MASK 0x3FC00000L
+//MMEA6_PERFCOUNTER_LO
+#define MMEA6_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define MMEA6_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//MMEA6_PERFCOUNTER_HI
+#define MMEA6_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define MMEA6_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define MMEA6_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define MMEA6_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+//MMEA6_PERFCOUNTER0_CFG
+#define MMEA6_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define MMEA6_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define MMEA6_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define MMEA6_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define MMEA6_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define MMEA6_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define MMEA6_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MMEA6_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define MMEA6_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define MMEA6_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//MMEA6_PERFCOUNTER1_CFG
+#define MMEA6_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define MMEA6_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define MMEA6_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define MMEA6_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define MMEA6_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define MMEA6_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define MMEA6_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MMEA6_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define MMEA6_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define MMEA6_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//MMEA6_PERFCOUNTER_RSLT_CNTL
+#define MMEA6_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define MMEA6_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define MMEA6_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define MMEA6_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define MMEA6_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define MMEA6_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define MMEA6_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define MMEA6_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define MMEA6_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define MMEA6_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define MMEA6_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define MMEA6_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+//MMEA6_EDC_CNT
+#define MMEA6_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT__SHIFT 0x0
+#define MMEA6_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT__SHIFT 0x2
+#define MMEA6_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT__SHIFT 0x4
+#define MMEA6_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT__SHIFT 0x6
+#define MMEA6_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT__SHIFT 0x8
+#define MMEA6_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT__SHIFT 0xa
+#define MMEA6_EDC_CNT__RRET_TAGMEM_SEC_COUNT__SHIFT 0xc
+#define MMEA6_EDC_CNT__RRET_TAGMEM_DED_COUNT__SHIFT 0xe
+#define MMEA6_EDC_CNT__WRET_TAGMEM_SEC_COUNT__SHIFT 0x10
+#define MMEA6_EDC_CNT__WRET_TAGMEM_DED_COUNT__SHIFT 0x12
+#define MMEA6_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT__SHIFT 0x14
+#define MMEA6_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT__SHIFT 0x16
+#define MMEA6_EDC_CNT__IORD_CMDMEM_SED_COUNT__SHIFT 0x18
+#define MMEA6_EDC_CNT__IOWR_CMDMEM_SED_COUNT__SHIFT 0x1a
+#define MMEA6_EDC_CNT__IOWR_DATAMEM_SED_COUNT__SHIFT 0x1c
+#define MMEA6_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT_MASK 0x00000003L
+#define MMEA6_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT_MASK 0x0000000CL
+#define MMEA6_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT_MASK 0x00000030L
+#define MMEA6_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
+#define MMEA6_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT_MASK 0x00000300L
+#define MMEA6_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT_MASK 0x00000C00L
+#define MMEA6_EDC_CNT__RRET_TAGMEM_SEC_COUNT_MASK 0x00003000L
+#define MMEA6_EDC_CNT__RRET_TAGMEM_DED_COUNT_MASK 0x0000C000L
+#define MMEA6_EDC_CNT__WRET_TAGMEM_SEC_COUNT_MASK 0x00030000L
+#define MMEA6_EDC_CNT__WRET_TAGMEM_DED_COUNT_MASK 0x000C0000L
+#define MMEA6_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT_MASK 0x00300000L
+#define MMEA6_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT_MASK 0x00C00000L
+#define MMEA6_EDC_CNT__IORD_CMDMEM_SED_COUNT_MASK 0x03000000L
+#define MMEA6_EDC_CNT__IOWR_CMDMEM_SED_COUNT_MASK 0x0C000000L
+#define MMEA6_EDC_CNT__IOWR_DATAMEM_SED_COUNT_MASK 0x30000000L
+//MMEA6_EDC_CNT2
+#define MMEA6_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT__SHIFT 0x0
+#define MMEA6_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT__SHIFT 0x2
+#define MMEA6_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT__SHIFT 0x4
+#define MMEA6_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT__SHIFT 0x6
+#define MMEA6_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT__SHIFT 0x8
+#define MMEA6_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa
+#define MMEA6_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc
+#define MMEA6_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe
+#define MMEA6_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10
+#define MMEA6_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12
+#define MMEA6_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14
+#define MMEA6_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16
+#define MMEA6_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT 0x18
+#define MMEA6_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT 0x1a
+#define MMEA6_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT 0x1c
+#define MMEA6_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT 0x1e
+#define MMEA6_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L
+#define MMEA6_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL
+#define MMEA6_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L
+#define MMEA6_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
+#define MMEA6_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT_MASK 0x00000300L
+#define MMEA6_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L
+#define MMEA6_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L
+#define MMEA6_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L
+#define MMEA6_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L
+#define MMEA6_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L
+#define MMEA6_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L
+#define MMEA6_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L
+#define MMEA6_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK 0x03000000L
+#define MMEA6_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK 0x0C000000L
+#define MMEA6_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK 0x30000000L
+#define MMEA6_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK 0xC0000000L
+//MMEA6_DSM_CNTL
+#define MMEA6_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define MMEA6_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define MMEA6_DSM_CNTL__DRAMWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x3
+#define MMEA6_DSM_CNTL__DRAMWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define MMEA6_DSM_CNTL__DRAMWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0x6
+#define MMEA6_DSM_CNTL__DRAMWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define MMEA6_DSM_CNTL__RRET_TAGMEM_DSM_IRRITATOR_DATA__SHIFT 0x9
+#define MMEA6_DSM_CNTL__RRET_TAGMEM_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define MMEA6_DSM_CNTL__WRET_TAGMEM_DSM_IRRITATOR_DATA__SHIFT 0xc
+#define MMEA6_DSM_CNTL__WRET_TAGMEM_ENABLE_SINGLE_WRITE__SHIFT 0xe
+#define MMEA6_DSM_CNTL__GMIRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0xf
+#define MMEA6_DSM_CNTL__GMIRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x11
+#define MMEA6_DSM_CNTL__GMIWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x12
+#define MMEA6_DSM_CNTL__GMIWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x14
+#define MMEA6_DSM_CNTL__GMIWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0x15
+#define MMEA6_DSM_CNTL__GMIWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0x17
+#define MMEA6_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define MMEA6_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define MMEA6_DSM_CNTL__DRAMWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000018L
+#define MMEA6_DSM_CNTL__DRAMWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define MMEA6_DSM_CNTL__DRAMWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define MMEA6_DSM_CNTL__DRAMWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define MMEA6_DSM_CNTL__RRET_TAGMEM_DSM_IRRITATOR_DATA_MASK 0x00000600L
+#define MMEA6_DSM_CNTL__RRET_TAGMEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+#define MMEA6_DSM_CNTL__WRET_TAGMEM_DSM_IRRITATOR_DATA_MASK 0x00003000L
+#define MMEA6_DSM_CNTL__WRET_TAGMEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
+#define MMEA6_DSM_CNTL__GMIRD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00018000L
+#define MMEA6_DSM_CNTL__GMIRD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L
+#define MMEA6_DSM_CNTL__GMIWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L
+#define MMEA6_DSM_CNTL__GMIWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L
+#define MMEA6_DSM_CNTL__GMIWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x00600000L
+#define MMEA6_DSM_CNTL__GMIWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00800000L
+//MMEA6_DSM_CNTLA
+#define MMEA6_DSM_CNTLA__DRAMRD_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define MMEA6_DSM_CNTLA__DRAMRD_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define MMEA6_DSM_CNTLA__DRAMWR_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x3
+#define MMEA6_DSM_CNTLA__DRAMWR_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define MMEA6_DSM_CNTLA__IORD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x6
+#define MMEA6_DSM_CNTLA__IORD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define MMEA6_DSM_CNTLA__IOWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x9
+#define MMEA6_DSM_CNTLA__IOWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define MMEA6_DSM_CNTLA__IOWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0xc
+#define MMEA6_DSM_CNTLA__IOWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0xe
+#define MMEA6_DSM_CNTLA__GMIRD_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0xf
+#define MMEA6_DSM_CNTLA__GMIRD_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x11
+#define MMEA6_DSM_CNTLA__GMIWR_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x12
+#define MMEA6_DSM_CNTLA__GMIWR_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x14
+#define MMEA6_DSM_CNTLA__DRAMRD_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define MMEA6_DSM_CNTLA__DRAMRD_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define MMEA6_DSM_CNTLA__DRAMWR_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00000018L
+#define MMEA6_DSM_CNTLA__DRAMWR_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define MMEA6_DSM_CNTLA__IORD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define MMEA6_DSM_CNTLA__IORD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define MMEA6_DSM_CNTLA__IOWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000600L
+#define MMEA6_DSM_CNTLA__IOWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+#define MMEA6_DSM_CNTLA__IOWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x00003000L
+#define MMEA6_DSM_CNTLA__IOWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
+#define MMEA6_DSM_CNTLA__GMIRD_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00018000L
+#define MMEA6_DSM_CNTLA__GMIRD_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L
+#define MMEA6_DSM_CNTLA__GMIWR_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L
+#define MMEA6_DSM_CNTLA__GMIWR_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L
+//MMEA6_DSM_CNTL2
+#define MMEA6_DSM_CNTL2__DRAMRD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define MMEA6_DSM_CNTL2__DRAMRD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define MMEA6_DSM_CNTL2__DRAMWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define MMEA6_DSM_CNTL2__DRAMWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x5
+#define MMEA6_DSM_CNTL2__DRAMWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define MMEA6_DSM_CNTL2__DRAMWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0x8
+#define MMEA6_DSM_CNTL2__RRET_TAGMEM_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define MMEA6_DSM_CNTL2__RRET_TAGMEM_SELECT_INJECT_DELAY__SHIFT 0xb
+#define MMEA6_DSM_CNTL2__WRET_TAGMEM_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define MMEA6_DSM_CNTL2__WRET_TAGMEM_SELECT_INJECT_DELAY__SHIFT 0xe
+#define MMEA6_DSM_CNTL2__GMIRD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0xf
+#define MMEA6_DSM_CNTL2__GMIRD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x11
+#define MMEA6_DSM_CNTL2__GMIWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x12
+#define MMEA6_DSM_CNTL2__GMIWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x14
+#define MMEA6_DSM_CNTL2__GMIWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0x15
+#define MMEA6_DSM_CNTL2__GMIWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0x17
+#define MMEA6_DSM_CNTL2__INJECT_DELAY__SHIFT 0x1a
+#define MMEA6_DSM_CNTL2__DRAMRD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define MMEA6_DSM_CNTL2__DRAMRD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define MMEA6_DSM_CNTL2__DRAMWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define MMEA6_DSM_CNTL2__DRAMWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define MMEA6_DSM_CNTL2__DRAMWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define MMEA6_DSM_CNTL2__DRAMWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define MMEA6_DSM_CNTL2__RRET_TAGMEM_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define MMEA6_DSM_CNTL2__RRET_TAGMEM_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define MMEA6_DSM_CNTL2__WRET_TAGMEM_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define MMEA6_DSM_CNTL2__WRET_TAGMEM_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define MMEA6_DSM_CNTL2__GMIRD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00018000L
+#define MMEA6_DSM_CNTL2__GMIRD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00020000L
+#define MMEA6_DSM_CNTL2__GMIWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L
+#define MMEA6_DSM_CNTL2__GMIWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00100000L
+#define MMEA6_DSM_CNTL2__GMIWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x00600000L
+#define MMEA6_DSM_CNTL2__GMIWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00800000L
+#define MMEA6_DSM_CNTL2__INJECT_DELAY_MASK 0xFC000000L
+//MMEA6_DSM_CNTL2A
+#define MMEA6_DSM_CNTL2A__DRAMRD_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define MMEA6_DSM_CNTL2A__DRAMRD_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define MMEA6_DSM_CNTL2A__DRAMWR_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define MMEA6_DSM_CNTL2A__DRAMWR_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x5
+#define MMEA6_DSM_CNTL2A__IORD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define MMEA6_DSM_CNTL2A__IORD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x8
+#define MMEA6_DSM_CNTL2A__IOWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define MMEA6_DSM_CNTL2A__IOWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0xb
+#define MMEA6_DSM_CNTL2A__IOWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define MMEA6_DSM_CNTL2A__IOWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0xe
+#define MMEA6_DSM_CNTL2A__GMIRD_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0xf
+#define MMEA6_DSM_CNTL2A__GMIRD_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x11
+#define MMEA6_DSM_CNTL2A__GMIWR_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x12
+#define MMEA6_DSM_CNTL2A__GMIWR_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x14
+#define MMEA6_DSM_CNTL2A__DRAMRD_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define MMEA6_DSM_CNTL2A__DRAMRD_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define MMEA6_DSM_CNTL2A__DRAMWR_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define MMEA6_DSM_CNTL2A__DRAMWR_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define MMEA6_DSM_CNTL2A__IORD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define MMEA6_DSM_CNTL2A__IORD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define MMEA6_DSM_CNTL2A__IOWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define MMEA6_DSM_CNTL2A__IOWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define MMEA6_DSM_CNTL2A__IOWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define MMEA6_DSM_CNTL2A__IOWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define MMEA6_DSM_CNTL2A__GMIRD_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00018000L
+#define MMEA6_DSM_CNTL2A__GMIRD_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00020000L
+#define MMEA6_DSM_CNTL2A__GMIWR_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L
+#define MMEA6_DSM_CNTL2A__GMIWR_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00100000L
+//MMEA6_CGTT_CLK_CTRL
+#define MMEA6_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define MMEA6_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define MMEA6_CGTT_CLK_CTRL__SPARE0__SHIFT 0xc
+#define MMEA6_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_WRITE__SHIFT 0x14
+#define MMEA6_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_READ__SHIFT 0x15
+#define MMEA6_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_RETURN__SHIFT 0x16
+#define MMEA6_CGTT_CLK_CTRL__SPARE1__SHIFT 0x17
+#define MMEA6_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define MMEA6_CGTT_CLK_CTRL__SOFT_OVERRIDE_WRITE__SHIFT 0x1c
+#define MMEA6_CGTT_CLK_CTRL__SOFT_OVERRIDE_READ__SHIFT 0x1d
+#define MMEA6_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN__SHIFT 0x1e
+#define MMEA6_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER__SHIFT 0x1f
+#define MMEA6_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define MMEA6_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define MMEA6_CGTT_CLK_CTRL__SPARE0_MASK 0x000FF000L
+#define MMEA6_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_WRITE_MASK 0x00100000L
+#define MMEA6_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_READ_MASK 0x00200000L
+#define MMEA6_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_RETURN_MASK 0x00400000L
+#define MMEA6_CGTT_CLK_CTRL__SPARE1_MASK 0x07800000L
+#define MMEA6_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define MMEA6_CGTT_CLK_CTRL__SOFT_OVERRIDE_WRITE_MASK 0x10000000L
+#define MMEA6_CGTT_CLK_CTRL__SOFT_OVERRIDE_READ_MASK 0x20000000L
+#define MMEA6_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN_MASK 0x40000000L
+#define MMEA6_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER_MASK 0x80000000L
+//MMEA6_EDC_MODE
+#define MMEA6_EDC_MODE__COUNT_FED_OUT__SHIFT 0x10
+#define MMEA6_EDC_MODE__GATE_FUE__SHIFT 0x11
+#define MMEA6_EDC_MODE__DED_MODE__SHIFT 0x14
+#define MMEA6_EDC_MODE__PROP_FED__SHIFT 0x1d
+#define MMEA6_EDC_MODE__BYPASS__SHIFT 0x1f
+#define MMEA6_EDC_MODE__COUNT_FED_OUT_MASK 0x00010000L
+#define MMEA6_EDC_MODE__GATE_FUE_MASK 0x00020000L
+#define MMEA6_EDC_MODE__DED_MODE_MASK 0x00300000L
+#define MMEA6_EDC_MODE__PROP_FED_MASK 0x20000000L
+#define MMEA6_EDC_MODE__BYPASS_MASK 0x80000000L
+//MMEA6_ERR_STATUS
+#define MMEA6_ERR_STATUS__SDP_RDRSP_STATUS__SHIFT 0x0
+#define MMEA6_ERR_STATUS__SDP_WRRSP_STATUS__SHIFT 0x4
+#define MMEA6_ERR_STATUS__SDP_RDRSP_DATASTATUS__SHIFT 0x8
+#define MMEA6_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR__SHIFT 0xa
+#define MMEA6_ERR_STATUS__CLEAR_ERROR_STATUS__SHIFT 0xb
+#define MMEA6_ERR_STATUS__BUSY_ON_ERROR__SHIFT 0xc
+#define MMEA6_ERR_STATUS__FUE_FLAG__SHIFT 0xd
+#define MMEA6_ERR_STATUS__SDP_RDRSP_STATUS_MASK 0x0000000FL
+#define MMEA6_ERR_STATUS__SDP_WRRSP_STATUS_MASK 0x000000F0L
+#define MMEA6_ERR_STATUS__SDP_RDRSP_DATASTATUS_MASK 0x00000300L
+#define MMEA6_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR_MASK 0x00000400L
+#define MMEA6_ERR_STATUS__CLEAR_ERROR_STATUS_MASK 0x00000800L
+#define MMEA6_ERR_STATUS__BUSY_ON_ERROR_MASK 0x00001000L
+#define MMEA6_ERR_STATUS__FUE_FLAG_MASK 0x00002000L
+//MMEA6_MISC2
+#define MMEA6_MISC2__CSGROUP_SWAP_IN_DRAM_ARB__SHIFT 0x0
+#define MMEA6_MISC2__CSGROUP_SWAP_IN_GMI_ARB__SHIFT 0x1
+#define MMEA6_MISC2__CSGRP_BURST_LIMIT_DATA_DRAM__SHIFT 0x2
+#define MMEA6_MISC2__CSGRP_BURST_LIMIT_DATA_GMI__SHIFT 0x7
+#define MMEA6_MISC2__IO_RDWR_PRIORITY_ENABLE__SHIFT 0xc
+#define MMEA6_MISC2__RRET_SWAP_MODE__SHIFT 0xd
+#define MMEA6_MISC2__CSGROUP_SWAP_IN_DRAM_ARB_MASK 0x00000001L
+#define MMEA6_MISC2__CSGROUP_SWAP_IN_GMI_ARB_MASK 0x00000002L
+#define MMEA6_MISC2__CSGRP_BURST_LIMIT_DATA_DRAM_MASK 0x0000007CL
+#define MMEA6_MISC2__CSGRP_BURST_LIMIT_DATA_GMI_MASK 0x00000F80L
+#define MMEA6_MISC2__IO_RDWR_PRIORITY_ENABLE_MASK 0x00001000L
+#define MMEA6_MISC2__RRET_SWAP_MODE_MASK 0x00002000L
+//MMEA6_ADDRDEC_SELECT
+#define MMEA6_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_START__SHIFT 0x0
+#define MMEA6_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_END__SHIFT 0x5
+#define MMEA6_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_START__SHIFT 0xa
+#define MMEA6_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_END__SHIFT 0xf
+#define MMEA6_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_START_MASK 0x0000001FL
+#define MMEA6_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_END_MASK 0x000003E0L
+#define MMEA6_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_START_MASK 0x00007C00L
+#define MMEA6_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_END_MASK 0x000F8000L
+//MMEA6_EDC_CNT3
+#define MMEA6_EDC_CNT3__DRAMRD_PAGEMEM_DED_COUNT__SHIFT 0x0
+#define MMEA6_EDC_CNT3__DRAMWR_PAGEMEM_DED_COUNT__SHIFT 0x2
+#define MMEA6_EDC_CNT3__IORD_CMDMEM_DED_COUNT__SHIFT 0x4
+#define MMEA6_EDC_CNT3__IOWR_CMDMEM_DED_COUNT__SHIFT 0x6
+#define MMEA6_EDC_CNT3__IOWR_DATAMEM_DED_COUNT__SHIFT 0x8
+#define MMEA6_EDC_CNT3__GMIRD_PAGEMEM_DED_COUNT__SHIFT 0xa
+#define MMEA6_EDC_CNT3__GMIWR_PAGEMEM_DED_COUNT__SHIFT 0xc
+#define MMEA6_EDC_CNT3__DRAMRD_PAGEMEM_DED_COUNT_MASK 0x00000003L
+#define MMEA6_EDC_CNT3__DRAMWR_PAGEMEM_DED_COUNT_MASK 0x0000000CL
+#define MMEA6_EDC_CNT3__IORD_CMDMEM_DED_COUNT_MASK 0x00000030L
+#define MMEA6_EDC_CNT3__IOWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
+#define MMEA6_EDC_CNT3__IOWR_DATAMEM_DED_COUNT_MASK 0x00000300L
+#define MMEA6_EDC_CNT3__GMIRD_PAGEMEM_DED_COUNT_MASK 0x00000C00L
+#define MMEA6_EDC_CNT3__GMIWR_PAGEMEM_DED_COUNT_MASK 0x00003000L
+
+
+// addressBlock: mmhub_ea_mmeadec7
+//MMEA7_DRAM_RD_CLI2GRP_MAP0
+#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA7_DRAM_RD_CLI2GRP_MAP1
+#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA7_DRAM_WR_CLI2GRP_MAP0
+#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA7_DRAM_WR_CLI2GRP_MAP1
+#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA7_DRAM_RD_GRP2VC_MAP
+#define MMEA7_DRAM_RD_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define MMEA7_DRAM_RD_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define MMEA7_DRAM_RD_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define MMEA7_DRAM_RD_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define MMEA7_DRAM_RD_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define MMEA7_DRAM_RD_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define MMEA7_DRAM_RD_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define MMEA7_DRAM_RD_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//MMEA7_DRAM_WR_GRP2VC_MAP
+#define MMEA7_DRAM_WR_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define MMEA7_DRAM_WR_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define MMEA7_DRAM_WR_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define MMEA7_DRAM_WR_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define MMEA7_DRAM_WR_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define MMEA7_DRAM_WR_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define MMEA7_DRAM_WR_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define MMEA7_DRAM_WR_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//MMEA7_DRAM_RD_LAZY
+#define MMEA7_DRAM_RD_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define MMEA7_DRAM_RD_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define MMEA7_DRAM_RD_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define MMEA7_DRAM_RD_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define MMEA7_DRAM_RD_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define MMEA7_DRAM_RD_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define MMEA7_DRAM_RD_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b
+#define MMEA7_DRAM_RD_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define MMEA7_DRAM_RD_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define MMEA7_DRAM_RD_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define MMEA7_DRAM_RD_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define MMEA7_DRAM_RD_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define MMEA7_DRAM_RD_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+#define MMEA7_DRAM_RD_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L
+//MMEA7_DRAM_WR_LAZY
+#define MMEA7_DRAM_WR_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define MMEA7_DRAM_WR_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define MMEA7_DRAM_WR_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define MMEA7_DRAM_WR_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define MMEA7_DRAM_WR_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define MMEA7_DRAM_WR_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define MMEA7_DRAM_WR_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b
+#define MMEA7_DRAM_WR_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define MMEA7_DRAM_WR_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define MMEA7_DRAM_WR_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define MMEA7_DRAM_WR_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define MMEA7_DRAM_WR_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define MMEA7_DRAM_WR_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+#define MMEA7_DRAM_WR_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L
+//MMEA7_DRAM_RD_CAM_CNTL
+#define MMEA7_DRAM_RD_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define MMEA7_DRAM_RD_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define MMEA7_DRAM_RD_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define MMEA7_DRAM_RD_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define MMEA7_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define MMEA7_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define MMEA7_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define MMEA7_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define MMEA7_DRAM_RD_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c
+#define MMEA7_DRAM_RD_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define MMEA7_DRAM_RD_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define MMEA7_DRAM_RD_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define MMEA7_DRAM_RD_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define MMEA7_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define MMEA7_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define MMEA7_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define MMEA7_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+#define MMEA7_DRAM_RD_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L
+//MMEA7_DRAM_WR_CAM_CNTL
+#define MMEA7_DRAM_WR_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define MMEA7_DRAM_WR_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define MMEA7_DRAM_WR_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define MMEA7_DRAM_WR_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define MMEA7_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define MMEA7_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define MMEA7_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define MMEA7_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define MMEA7_DRAM_WR_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c
+#define MMEA7_DRAM_WR_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define MMEA7_DRAM_WR_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define MMEA7_DRAM_WR_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define MMEA7_DRAM_WR_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define MMEA7_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define MMEA7_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define MMEA7_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define MMEA7_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+#define MMEA7_DRAM_WR_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L
+//MMEA7_DRAM_PAGE_BURST
+#define MMEA7_DRAM_PAGE_BURST__RD_LIMIT_LO__SHIFT 0x0
+#define MMEA7_DRAM_PAGE_BURST__RD_LIMIT_HI__SHIFT 0x8
+#define MMEA7_DRAM_PAGE_BURST__WR_LIMIT_LO__SHIFT 0x10
+#define MMEA7_DRAM_PAGE_BURST__WR_LIMIT_HI__SHIFT 0x18
+#define MMEA7_DRAM_PAGE_BURST__RD_LIMIT_LO_MASK 0x000000FFL
+#define MMEA7_DRAM_PAGE_BURST__RD_LIMIT_HI_MASK 0x0000FF00L
+#define MMEA7_DRAM_PAGE_BURST__WR_LIMIT_LO_MASK 0x00FF0000L
+#define MMEA7_DRAM_PAGE_BURST__WR_LIMIT_HI_MASK 0xFF000000L
+//MMEA7_DRAM_RD_PRI_AGE
+#define MMEA7_DRAM_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA7_DRAM_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA7_DRAM_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA7_DRAM_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA7_DRAM_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA7_DRAM_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA7_DRAM_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA7_DRAM_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA7_DRAM_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA7_DRAM_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA7_DRAM_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA7_DRAM_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA7_DRAM_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA7_DRAM_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA7_DRAM_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA7_DRAM_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA7_DRAM_WR_PRI_AGE
+#define MMEA7_DRAM_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA7_DRAM_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA7_DRAM_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA7_DRAM_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA7_DRAM_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA7_DRAM_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA7_DRAM_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA7_DRAM_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA7_DRAM_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA7_DRAM_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA7_DRAM_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA7_DRAM_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA7_DRAM_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA7_DRAM_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA7_DRAM_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA7_DRAM_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA7_DRAM_RD_PRI_QUEUING
+#define MMEA7_DRAM_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA7_DRAM_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA7_DRAM_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA7_DRAM_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA7_DRAM_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA7_DRAM_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA7_DRAM_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA7_DRAM_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA7_DRAM_WR_PRI_QUEUING
+#define MMEA7_DRAM_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA7_DRAM_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA7_DRAM_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA7_DRAM_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA7_DRAM_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA7_DRAM_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA7_DRAM_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA7_DRAM_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA7_DRAM_RD_PRI_FIXED
+#define MMEA7_DRAM_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA7_DRAM_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA7_DRAM_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA7_DRAM_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA7_DRAM_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA7_DRAM_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA7_DRAM_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA7_DRAM_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA7_DRAM_WR_PRI_FIXED
+#define MMEA7_DRAM_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA7_DRAM_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA7_DRAM_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA7_DRAM_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA7_DRAM_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA7_DRAM_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA7_DRAM_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA7_DRAM_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA7_DRAM_RD_PRI_URGENCY
+#define MMEA7_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA7_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA7_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA7_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA7_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA7_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA7_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA7_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA7_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA7_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA7_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA7_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA7_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA7_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA7_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA7_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA7_DRAM_WR_PRI_URGENCY
+#define MMEA7_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA7_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA7_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA7_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA7_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA7_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA7_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA7_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA7_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA7_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA7_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA7_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA7_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA7_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA7_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA7_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA7_DRAM_RD_PRI_QUANT_PRI1
+#define MMEA7_DRAM_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA7_DRAM_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA7_DRAM_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA7_DRAM_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA7_DRAM_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA7_DRAM_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA7_DRAM_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA7_DRAM_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA7_DRAM_RD_PRI_QUANT_PRI2
+#define MMEA7_DRAM_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA7_DRAM_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA7_DRAM_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA7_DRAM_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA7_DRAM_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA7_DRAM_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA7_DRAM_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA7_DRAM_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA7_DRAM_RD_PRI_QUANT_PRI3
+#define MMEA7_DRAM_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA7_DRAM_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA7_DRAM_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA7_DRAM_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA7_DRAM_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA7_DRAM_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA7_DRAM_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA7_DRAM_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA7_DRAM_WR_PRI_QUANT_PRI1
+#define MMEA7_DRAM_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA7_DRAM_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA7_DRAM_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA7_DRAM_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA7_DRAM_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA7_DRAM_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA7_DRAM_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA7_DRAM_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA7_DRAM_WR_PRI_QUANT_PRI2
+#define MMEA7_DRAM_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA7_DRAM_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA7_DRAM_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA7_DRAM_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA7_DRAM_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA7_DRAM_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA7_DRAM_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA7_DRAM_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA7_DRAM_WR_PRI_QUANT_PRI3
+#define MMEA7_DRAM_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA7_DRAM_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA7_DRAM_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA7_DRAM_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA7_DRAM_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA7_DRAM_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA7_DRAM_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA7_DRAM_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA7_GMI_RD_CLI2GRP_MAP0
+#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA7_GMI_RD_CLI2GRP_MAP1
+#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA7_GMI_WR_CLI2GRP_MAP0
+#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA7_GMI_WR_CLI2GRP_MAP1
+#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA7_GMI_RD_GRP2VC_MAP
+#define MMEA7_GMI_RD_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define MMEA7_GMI_RD_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define MMEA7_GMI_RD_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define MMEA7_GMI_RD_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define MMEA7_GMI_RD_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define MMEA7_GMI_RD_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define MMEA7_GMI_RD_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define MMEA7_GMI_RD_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//MMEA7_GMI_WR_GRP2VC_MAP
+#define MMEA7_GMI_WR_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define MMEA7_GMI_WR_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define MMEA7_GMI_WR_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define MMEA7_GMI_WR_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define MMEA7_GMI_WR_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define MMEA7_GMI_WR_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define MMEA7_GMI_WR_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define MMEA7_GMI_WR_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//MMEA7_GMI_RD_LAZY
+#define MMEA7_GMI_RD_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define MMEA7_GMI_RD_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define MMEA7_GMI_RD_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define MMEA7_GMI_RD_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define MMEA7_GMI_RD_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define MMEA7_GMI_RD_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define MMEA7_GMI_RD_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b
+#define MMEA7_GMI_RD_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define MMEA7_GMI_RD_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define MMEA7_GMI_RD_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define MMEA7_GMI_RD_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define MMEA7_GMI_RD_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define MMEA7_GMI_RD_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+#define MMEA7_GMI_RD_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L
+//MMEA7_GMI_WR_LAZY
+#define MMEA7_GMI_WR_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define MMEA7_GMI_WR_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define MMEA7_GMI_WR_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define MMEA7_GMI_WR_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define MMEA7_GMI_WR_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define MMEA7_GMI_WR_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define MMEA7_GMI_WR_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b
+#define MMEA7_GMI_WR_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define MMEA7_GMI_WR_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define MMEA7_GMI_WR_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define MMEA7_GMI_WR_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define MMEA7_GMI_WR_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define MMEA7_GMI_WR_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+#define MMEA7_GMI_WR_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L
+//MMEA7_GMI_RD_CAM_CNTL
+#define MMEA7_GMI_RD_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define MMEA7_GMI_RD_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define MMEA7_GMI_RD_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define MMEA7_GMI_RD_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define MMEA7_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define MMEA7_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define MMEA7_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define MMEA7_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define MMEA7_GMI_RD_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c
+#define MMEA7_GMI_RD_CAM_CNTL__PAGEBASED_CHAINING__SHIFT 0x1d
+#define MMEA7_GMI_RD_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define MMEA7_GMI_RD_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define MMEA7_GMI_RD_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define MMEA7_GMI_RD_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define MMEA7_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define MMEA7_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define MMEA7_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define MMEA7_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+#define MMEA7_GMI_RD_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L
+#define MMEA7_GMI_RD_CAM_CNTL__PAGEBASED_CHAINING_MASK 0x20000000L
+//MMEA7_GMI_WR_CAM_CNTL
+#define MMEA7_GMI_WR_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define MMEA7_GMI_WR_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define MMEA7_GMI_WR_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define MMEA7_GMI_WR_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define MMEA7_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define MMEA7_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define MMEA7_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define MMEA7_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define MMEA7_GMI_WR_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c
+#define MMEA7_GMI_WR_CAM_CNTL__PAGEBASED_CHAINING__SHIFT 0x1d
+#define MMEA7_GMI_WR_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define MMEA7_GMI_WR_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define MMEA7_GMI_WR_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define MMEA7_GMI_WR_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define MMEA7_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define MMEA7_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define MMEA7_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define MMEA7_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+#define MMEA7_GMI_WR_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L
+#define MMEA7_GMI_WR_CAM_CNTL__PAGEBASED_CHAINING_MASK 0x20000000L
+//MMEA7_GMI_PAGE_BURST
+#define MMEA7_GMI_PAGE_BURST__RD_LIMIT_LO__SHIFT 0x0
+#define MMEA7_GMI_PAGE_BURST__RD_LIMIT_HI__SHIFT 0x8
+#define MMEA7_GMI_PAGE_BURST__WR_LIMIT_LO__SHIFT 0x10
+#define MMEA7_GMI_PAGE_BURST__WR_LIMIT_HI__SHIFT 0x18
+#define MMEA7_GMI_PAGE_BURST__RD_LIMIT_LO_MASK 0x000000FFL
+#define MMEA7_GMI_PAGE_BURST__RD_LIMIT_HI_MASK 0x0000FF00L
+#define MMEA7_GMI_PAGE_BURST__WR_LIMIT_LO_MASK 0x00FF0000L
+#define MMEA7_GMI_PAGE_BURST__WR_LIMIT_HI_MASK 0xFF000000L
+//MMEA7_GMI_RD_PRI_AGE
+#define MMEA7_GMI_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA7_GMI_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA7_GMI_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA7_GMI_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA7_GMI_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA7_GMI_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA7_GMI_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA7_GMI_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA7_GMI_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA7_GMI_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA7_GMI_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA7_GMI_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA7_GMI_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA7_GMI_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA7_GMI_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA7_GMI_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA7_GMI_WR_PRI_AGE
+#define MMEA7_GMI_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA7_GMI_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA7_GMI_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA7_GMI_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA7_GMI_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA7_GMI_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA7_GMI_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA7_GMI_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA7_GMI_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA7_GMI_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA7_GMI_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA7_GMI_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA7_GMI_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA7_GMI_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA7_GMI_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA7_GMI_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA7_GMI_RD_PRI_QUEUING
+#define MMEA7_GMI_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA7_GMI_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA7_GMI_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA7_GMI_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA7_GMI_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA7_GMI_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA7_GMI_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA7_GMI_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA7_GMI_WR_PRI_QUEUING
+#define MMEA7_GMI_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA7_GMI_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA7_GMI_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA7_GMI_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA7_GMI_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA7_GMI_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA7_GMI_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA7_GMI_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA7_GMI_RD_PRI_FIXED
+#define MMEA7_GMI_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA7_GMI_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA7_GMI_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA7_GMI_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA7_GMI_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA7_GMI_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA7_GMI_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA7_GMI_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA7_GMI_WR_PRI_FIXED
+#define MMEA7_GMI_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA7_GMI_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA7_GMI_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA7_GMI_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA7_GMI_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA7_GMI_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA7_GMI_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA7_GMI_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA7_GMI_RD_PRI_URGENCY
+#define MMEA7_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA7_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA7_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA7_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA7_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA7_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA7_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA7_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA7_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA7_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA7_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA7_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA7_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA7_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA7_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA7_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA7_GMI_WR_PRI_URGENCY
+#define MMEA7_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA7_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA7_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA7_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA7_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA7_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA7_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA7_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA7_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA7_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA7_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA7_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA7_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA7_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA7_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA7_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA7_GMI_RD_PRI_URGENCY_MASKING
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L
+#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L
+//MMEA7_GMI_WR_PRI_URGENCY_MASKING
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L
+#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L
+//MMEA7_GMI_RD_PRI_QUANT_PRI1
+#define MMEA7_GMI_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA7_GMI_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA7_GMI_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA7_GMI_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA7_GMI_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA7_GMI_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA7_GMI_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA7_GMI_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA7_GMI_RD_PRI_QUANT_PRI2
+#define MMEA7_GMI_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA7_GMI_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA7_GMI_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA7_GMI_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA7_GMI_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA7_GMI_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA7_GMI_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA7_GMI_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA7_GMI_RD_PRI_QUANT_PRI3
+#define MMEA7_GMI_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA7_GMI_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA7_GMI_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA7_GMI_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA7_GMI_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA7_GMI_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA7_GMI_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA7_GMI_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA7_GMI_WR_PRI_QUANT_PRI1
+#define MMEA7_GMI_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA7_GMI_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA7_GMI_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA7_GMI_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA7_GMI_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA7_GMI_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA7_GMI_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA7_GMI_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA7_GMI_WR_PRI_QUANT_PRI2
+#define MMEA7_GMI_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA7_GMI_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA7_GMI_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA7_GMI_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA7_GMI_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA7_GMI_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA7_GMI_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA7_GMI_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA7_GMI_WR_PRI_QUANT_PRI3
+#define MMEA7_GMI_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA7_GMI_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA7_GMI_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA7_GMI_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA7_GMI_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA7_GMI_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA7_GMI_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA7_GMI_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA7_ADDRNORM_BASE_ADDR0
+#define MMEA7_ADDRNORM_BASE_ADDR0__ADDR_RNG_VAL__SHIFT 0x0
+#define MMEA7_ADDRNORM_BASE_ADDR0__LGCY_MMIO_HOLE_EN__SHIFT 0x1
+#define MMEA7_ADDRNORM_BASE_ADDR0__INTLV_NUM_CHAN__SHIFT 0x2
+#define MMEA7_ADDRNORM_BASE_ADDR0__INTLV_NUM_DIES__SHIFT 0x6
+#define MMEA7_ADDRNORM_BASE_ADDR0__INTLV_NUM_SOCKETS__SHIFT 0x8
+#define MMEA7_ADDRNORM_BASE_ADDR0__INTLV_ADDR_SEL__SHIFT 0x9
+#define MMEA7_ADDRNORM_BASE_ADDR0__BASE_ADDR__SHIFT 0xc
+#define MMEA7_ADDRNORM_BASE_ADDR0__ADDR_RNG_VAL_MASK 0x00000001L
+#define MMEA7_ADDRNORM_BASE_ADDR0__LGCY_MMIO_HOLE_EN_MASK 0x00000002L
+#define MMEA7_ADDRNORM_BASE_ADDR0__INTLV_NUM_CHAN_MASK 0x0000003CL
+#define MMEA7_ADDRNORM_BASE_ADDR0__INTLV_NUM_DIES_MASK 0x000000C0L
+#define MMEA7_ADDRNORM_BASE_ADDR0__INTLV_NUM_SOCKETS_MASK 0x00000100L
+#define MMEA7_ADDRNORM_BASE_ADDR0__INTLV_ADDR_SEL_MASK 0x00000E00L
+#define MMEA7_ADDRNORM_BASE_ADDR0__BASE_ADDR_MASK 0xFFFFF000L
+//MMEA7_ADDRNORM_LIMIT_ADDR0
+#define MMEA7_ADDRNORM_LIMIT_ADDR0__DST_FABRIC_ID__SHIFT 0x0
+#define MMEA7_ADDRNORM_LIMIT_ADDR0__LIMIT_ADDR__SHIFT 0xc
+#define MMEA7_ADDRNORM_LIMIT_ADDR0__DST_FABRIC_ID_MASK 0x0000001FL
+#define MMEA7_ADDRNORM_LIMIT_ADDR0__LIMIT_ADDR_MASK 0xFFFFF000L
+//MMEA7_ADDRNORM_BASE_ADDR1
+#define MMEA7_ADDRNORM_BASE_ADDR1__ADDR_RNG_VAL__SHIFT 0x0
+#define MMEA7_ADDRNORM_BASE_ADDR1__LGCY_MMIO_HOLE_EN__SHIFT 0x1
+#define MMEA7_ADDRNORM_BASE_ADDR1__INTLV_NUM_CHAN__SHIFT 0x2
+#define MMEA7_ADDRNORM_BASE_ADDR1__INTLV_NUM_DIES__SHIFT 0x6
+#define MMEA7_ADDRNORM_BASE_ADDR1__INTLV_NUM_SOCKETS__SHIFT 0x8
+#define MMEA7_ADDRNORM_BASE_ADDR1__INTLV_ADDR_SEL__SHIFT 0x9
+#define MMEA7_ADDRNORM_BASE_ADDR1__BASE_ADDR__SHIFT 0xc
+#define MMEA7_ADDRNORM_BASE_ADDR1__ADDR_RNG_VAL_MASK 0x00000001L
+#define MMEA7_ADDRNORM_BASE_ADDR1__LGCY_MMIO_HOLE_EN_MASK 0x00000002L
+#define MMEA7_ADDRNORM_BASE_ADDR1__INTLV_NUM_CHAN_MASK 0x0000003CL
+#define MMEA7_ADDRNORM_BASE_ADDR1__INTLV_NUM_DIES_MASK 0x000000C0L
+#define MMEA7_ADDRNORM_BASE_ADDR1__INTLV_NUM_SOCKETS_MASK 0x00000100L
+#define MMEA7_ADDRNORM_BASE_ADDR1__INTLV_ADDR_SEL_MASK 0x00000E00L
+#define MMEA7_ADDRNORM_BASE_ADDR1__BASE_ADDR_MASK 0xFFFFF000L
+//MMEA7_ADDRNORM_LIMIT_ADDR1
+#define MMEA7_ADDRNORM_LIMIT_ADDR1__DST_FABRIC_ID__SHIFT 0x0
+#define MMEA7_ADDRNORM_LIMIT_ADDR1__LIMIT_ADDR__SHIFT 0xc
+#define MMEA7_ADDRNORM_LIMIT_ADDR1__DST_FABRIC_ID_MASK 0x0000001FL
+#define MMEA7_ADDRNORM_LIMIT_ADDR1__LIMIT_ADDR_MASK 0xFFFFF000L
+//MMEA7_ADDRNORM_OFFSET_ADDR1
+#define MMEA7_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_EN__SHIFT 0x0
+#define MMEA7_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET__SHIFT 0x14
+#define MMEA7_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_EN_MASK 0x00000001L
+#define MMEA7_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_MASK 0xFFF00000L
+//MMEA7_ADDRNORM_BASE_ADDR2
+#define MMEA7_ADDRNORM_BASE_ADDR2__ADDR_RNG_VAL__SHIFT 0x0
+#define MMEA7_ADDRNORM_BASE_ADDR2__LGCY_MMIO_HOLE_EN__SHIFT 0x1
+#define MMEA7_ADDRNORM_BASE_ADDR2__INTLV_NUM_CHAN__SHIFT 0x2
+#define MMEA7_ADDRNORM_BASE_ADDR2__INTLV_NUM_DIES__SHIFT 0x6
+#define MMEA7_ADDRNORM_BASE_ADDR2__INTLV_NUM_SOCKETS__SHIFT 0x8
+#define MMEA7_ADDRNORM_BASE_ADDR2__INTLV_ADDR_SEL__SHIFT 0x9
+#define MMEA7_ADDRNORM_BASE_ADDR2__BASE_ADDR__SHIFT 0xc
+#define MMEA7_ADDRNORM_BASE_ADDR2__ADDR_RNG_VAL_MASK 0x00000001L
+#define MMEA7_ADDRNORM_BASE_ADDR2__LGCY_MMIO_HOLE_EN_MASK 0x00000002L
+#define MMEA7_ADDRNORM_BASE_ADDR2__INTLV_NUM_CHAN_MASK 0x0000003CL
+#define MMEA7_ADDRNORM_BASE_ADDR2__INTLV_NUM_DIES_MASK 0x000000C0L
+#define MMEA7_ADDRNORM_BASE_ADDR2__INTLV_NUM_SOCKETS_MASK 0x00000100L
+#define MMEA7_ADDRNORM_BASE_ADDR2__INTLV_ADDR_SEL_MASK 0x00000E00L
+#define MMEA7_ADDRNORM_BASE_ADDR2__BASE_ADDR_MASK 0xFFFFF000L
+//MMEA7_ADDRNORM_LIMIT_ADDR2
+#define MMEA7_ADDRNORM_LIMIT_ADDR2__DST_FABRIC_ID__SHIFT 0x0
+#define MMEA7_ADDRNORM_LIMIT_ADDR2__LIMIT_ADDR__SHIFT 0xc
+#define MMEA7_ADDRNORM_LIMIT_ADDR2__DST_FABRIC_ID_MASK 0x0000001FL
+#define MMEA7_ADDRNORM_LIMIT_ADDR2__LIMIT_ADDR_MASK 0xFFFFF000L
+//MMEA7_ADDRNORM_BASE_ADDR3
+#define MMEA7_ADDRNORM_BASE_ADDR3__ADDR_RNG_VAL__SHIFT 0x0
+#define MMEA7_ADDRNORM_BASE_ADDR3__LGCY_MMIO_HOLE_EN__SHIFT 0x1
+#define MMEA7_ADDRNORM_BASE_ADDR3__INTLV_NUM_CHAN__SHIFT 0x2
+#define MMEA7_ADDRNORM_BASE_ADDR3__INTLV_NUM_DIES__SHIFT 0x6
+#define MMEA7_ADDRNORM_BASE_ADDR3__INTLV_NUM_SOCKETS__SHIFT 0x8
+#define MMEA7_ADDRNORM_BASE_ADDR3__INTLV_ADDR_SEL__SHIFT 0x9
+#define MMEA7_ADDRNORM_BASE_ADDR3__BASE_ADDR__SHIFT 0xc
+#define MMEA7_ADDRNORM_BASE_ADDR3__ADDR_RNG_VAL_MASK 0x00000001L
+#define MMEA7_ADDRNORM_BASE_ADDR3__LGCY_MMIO_HOLE_EN_MASK 0x00000002L
+#define MMEA7_ADDRNORM_BASE_ADDR3__INTLV_NUM_CHAN_MASK 0x0000003CL
+#define MMEA7_ADDRNORM_BASE_ADDR3__INTLV_NUM_DIES_MASK 0x000000C0L
+#define MMEA7_ADDRNORM_BASE_ADDR3__INTLV_NUM_SOCKETS_MASK 0x00000100L
+#define MMEA7_ADDRNORM_BASE_ADDR3__INTLV_ADDR_SEL_MASK 0x00000E00L
+#define MMEA7_ADDRNORM_BASE_ADDR3__BASE_ADDR_MASK 0xFFFFF000L
+//MMEA7_ADDRNORM_LIMIT_ADDR3
+#define MMEA7_ADDRNORM_LIMIT_ADDR3__DST_FABRIC_ID__SHIFT 0x0
+#define MMEA7_ADDRNORM_LIMIT_ADDR3__LIMIT_ADDR__SHIFT 0xc
+#define MMEA7_ADDRNORM_LIMIT_ADDR3__DST_FABRIC_ID_MASK 0x0000001FL
+#define MMEA7_ADDRNORM_LIMIT_ADDR3__LIMIT_ADDR_MASK 0xFFFFF000L
+//MMEA7_ADDRNORM_OFFSET_ADDR3
+#define MMEA7_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET_EN__SHIFT 0x0
+#define MMEA7_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET__SHIFT 0x14
+#define MMEA7_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET_EN_MASK 0x00000001L
+#define MMEA7_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET_MASK 0xFFF00000L
+//MMEA7_ADDRNORM_BASE_ADDR4
+#define MMEA7_ADDRNORM_BASE_ADDR4__ADDR_RNG_VAL__SHIFT 0x0
+#define MMEA7_ADDRNORM_BASE_ADDR4__LGCY_MMIO_HOLE_EN__SHIFT 0x1
+#define MMEA7_ADDRNORM_BASE_ADDR4__INTLV_NUM_CHAN__SHIFT 0x2
+#define MMEA7_ADDRNORM_BASE_ADDR4__INTLV_NUM_DIES__SHIFT 0x6
+#define MMEA7_ADDRNORM_BASE_ADDR4__INTLV_NUM_SOCKETS__SHIFT 0x8
+#define MMEA7_ADDRNORM_BASE_ADDR4__INTLV_ADDR_SEL__SHIFT 0x9
+#define MMEA7_ADDRNORM_BASE_ADDR4__BASE_ADDR__SHIFT 0xc
+#define MMEA7_ADDRNORM_BASE_ADDR4__ADDR_RNG_VAL_MASK 0x00000001L
+#define MMEA7_ADDRNORM_BASE_ADDR4__LGCY_MMIO_HOLE_EN_MASK 0x00000002L
+#define MMEA7_ADDRNORM_BASE_ADDR4__INTLV_NUM_CHAN_MASK 0x0000003CL
+#define MMEA7_ADDRNORM_BASE_ADDR4__INTLV_NUM_DIES_MASK 0x000000C0L
+#define MMEA7_ADDRNORM_BASE_ADDR4__INTLV_NUM_SOCKETS_MASK 0x00000100L
+#define MMEA7_ADDRNORM_BASE_ADDR4__INTLV_ADDR_SEL_MASK 0x00000E00L
+#define MMEA7_ADDRNORM_BASE_ADDR4__BASE_ADDR_MASK 0xFFFFF000L
+//MMEA7_ADDRNORM_LIMIT_ADDR4
+#define MMEA7_ADDRNORM_LIMIT_ADDR4__DST_FABRIC_ID__SHIFT 0x0
+#define MMEA7_ADDRNORM_LIMIT_ADDR4__LIMIT_ADDR__SHIFT 0xc
+#define MMEA7_ADDRNORM_LIMIT_ADDR4__DST_FABRIC_ID_MASK 0x0000001FL
+#define MMEA7_ADDRNORM_LIMIT_ADDR4__LIMIT_ADDR_MASK 0xFFFFF000L
+//MMEA7_ADDRNORM_BASE_ADDR5
+#define MMEA7_ADDRNORM_BASE_ADDR5__ADDR_RNG_VAL__SHIFT 0x0
+#define MMEA7_ADDRNORM_BASE_ADDR5__LGCY_MMIO_HOLE_EN__SHIFT 0x1
+#define MMEA7_ADDRNORM_BASE_ADDR5__INTLV_NUM_CHAN__SHIFT 0x2
+#define MMEA7_ADDRNORM_BASE_ADDR5__INTLV_NUM_DIES__SHIFT 0x6
+#define MMEA7_ADDRNORM_BASE_ADDR5__INTLV_NUM_SOCKETS__SHIFT 0x8
+#define MMEA7_ADDRNORM_BASE_ADDR5__INTLV_ADDR_SEL__SHIFT 0x9
+#define MMEA7_ADDRNORM_BASE_ADDR5__BASE_ADDR__SHIFT 0xc
+#define MMEA7_ADDRNORM_BASE_ADDR5__ADDR_RNG_VAL_MASK 0x00000001L
+#define MMEA7_ADDRNORM_BASE_ADDR5__LGCY_MMIO_HOLE_EN_MASK 0x00000002L
+#define MMEA7_ADDRNORM_BASE_ADDR5__INTLV_NUM_CHAN_MASK 0x0000003CL
+#define MMEA7_ADDRNORM_BASE_ADDR5__INTLV_NUM_DIES_MASK 0x000000C0L
+#define MMEA7_ADDRNORM_BASE_ADDR5__INTLV_NUM_SOCKETS_MASK 0x00000100L
+#define MMEA7_ADDRNORM_BASE_ADDR5__INTLV_ADDR_SEL_MASK 0x00000E00L
+#define MMEA7_ADDRNORM_BASE_ADDR5__BASE_ADDR_MASK 0xFFFFF000L
+//MMEA7_ADDRNORM_LIMIT_ADDR5
+#define MMEA7_ADDRNORM_LIMIT_ADDR5__DST_FABRIC_ID__SHIFT 0x0
+#define MMEA7_ADDRNORM_LIMIT_ADDR5__LIMIT_ADDR__SHIFT 0xc
+#define MMEA7_ADDRNORM_LIMIT_ADDR5__DST_FABRIC_ID_MASK 0x0000001FL
+#define MMEA7_ADDRNORM_LIMIT_ADDR5__LIMIT_ADDR_MASK 0xFFFFF000L
+//MMEA7_ADDRNORM_OFFSET_ADDR5
+#define MMEA7_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET_EN__SHIFT 0x0
+#define MMEA7_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET__SHIFT 0x14
+#define MMEA7_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET_EN_MASK 0x00000001L
+#define MMEA7_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET_MASK 0xFFF00000L
+//MMEA7_ADDRNORMDRAM_HOLE_CNTL
+#define MMEA7_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_VALID__SHIFT 0x0
+#define MMEA7_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_OFFSET__SHIFT 0x7
+#define MMEA7_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_VALID_MASK 0x00000001L
+#define MMEA7_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_OFFSET_MASK 0x0000FF80L
+//MMEA7_ADDRNORMGMI_HOLE_CNTL
+#define MMEA7_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_VALID__SHIFT 0x0
+#define MMEA7_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_OFFSET__SHIFT 0x7
+#define MMEA7_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_VALID_MASK 0x00000001L
+#define MMEA7_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_OFFSET_MASK 0x0000FF80L
+//MMEA7_ADDRNORMDRAM_NP2_CHANNEL_CFG
+#define MMEA7_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE0__SHIFT 0x0
+#define MMEA7_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE1__SHIFT 0x6
+#define MMEA7_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE0_MASK 0x0000003FL
+#define MMEA7_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE1_MASK 0x00000FC0L
+//MMEA7_ADDRNORMGMI_NP2_CHANNEL_CFG
+#define MMEA7_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE2__SHIFT 0x0
+#define MMEA7_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE3__SHIFT 0x6
+#define MMEA7_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE2_MASK 0x0000003FL
+#define MMEA7_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE3_MASK 0x00000FC0L
+//MMEA7_ADDRDEC_BANK_CFG
+#define MMEA7_ADDRDEC_BANK_CFG__BANK_MASK_DRAM__SHIFT 0x0
+#define MMEA7_ADDRDEC_BANK_CFG__BANK_MASK_GMI__SHIFT 0x6
+#define MMEA7_ADDRDEC_BANK_CFG__BANKGROUP_SEL_DRAM__SHIFT 0xc
+#define MMEA7_ADDRDEC_BANK_CFG__BANKGROUP_SEL_GMI__SHIFT 0xf
+#define MMEA7_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_DRAM__SHIFT 0x12
+#define MMEA7_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_GMI__SHIFT 0x13
+#define MMEA7_ADDRDEC_BANK_CFG__BANK_MASK_DRAM_MASK 0x0000003FL
+#define MMEA7_ADDRDEC_BANK_CFG__BANK_MASK_GMI_MASK 0x00000FC0L
+#define MMEA7_ADDRDEC_BANK_CFG__BANKGROUP_SEL_DRAM_MASK 0x00007000L
+#define MMEA7_ADDRDEC_BANK_CFG__BANKGROUP_SEL_GMI_MASK 0x00038000L
+#define MMEA7_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_DRAM_MASK 0x00040000L
+#define MMEA7_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_GMI_MASK 0x00080000L
+//MMEA7_ADDRDEC_MISC_CFG
+#define MMEA7_ADDRDEC_MISC_CFG__VCM_EN0__SHIFT 0x0
+#define MMEA7_ADDRDEC_MISC_CFG__VCM_EN1__SHIFT 0x1
+#define MMEA7_ADDRDEC_MISC_CFG__VCM_EN2__SHIFT 0x2
+#define MMEA7_ADDRDEC_MISC_CFG__PCH_MASK_DRAM__SHIFT 0x8
+#define MMEA7_ADDRDEC_MISC_CFG__PCH_MASK_GMI__SHIFT 0x9
+#define MMEA7_ADDRDEC_MISC_CFG__CH_MASK_DRAM__SHIFT 0xc
+#define MMEA7_ADDRDEC_MISC_CFG__CH_MASK_GMI__SHIFT 0x11
+#define MMEA7_ADDRDEC_MISC_CFG__CS_MASK_DRAM__SHIFT 0x16
+#define MMEA7_ADDRDEC_MISC_CFG__CS_MASK_GMI__SHIFT 0x18
+#define MMEA7_ADDRDEC_MISC_CFG__RM_MASK_DRAM__SHIFT 0x1a
+#define MMEA7_ADDRDEC_MISC_CFG__RM_MASK_GMI__SHIFT 0x1d
+#define MMEA7_ADDRDEC_MISC_CFG__VCM_EN0_MASK 0x00000001L
+#define MMEA7_ADDRDEC_MISC_CFG__VCM_EN1_MASK 0x00000002L
+#define MMEA7_ADDRDEC_MISC_CFG__VCM_EN2_MASK 0x00000004L
+#define MMEA7_ADDRDEC_MISC_CFG__PCH_MASK_DRAM_MASK 0x00000100L
+#define MMEA7_ADDRDEC_MISC_CFG__PCH_MASK_GMI_MASK 0x00000200L
+#define MMEA7_ADDRDEC_MISC_CFG__CH_MASK_DRAM_MASK 0x0001F000L
+#define MMEA7_ADDRDEC_MISC_CFG__CH_MASK_GMI_MASK 0x003E0000L
+#define MMEA7_ADDRDEC_MISC_CFG__CS_MASK_DRAM_MASK 0x00C00000L
+#define MMEA7_ADDRDEC_MISC_CFG__CS_MASK_GMI_MASK 0x03000000L
+#define MMEA7_ADDRDEC_MISC_CFG__RM_MASK_DRAM_MASK 0x1C000000L
+#define MMEA7_ADDRDEC_MISC_CFG__RM_MASK_GMI_MASK 0xE0000000L
+//MMEA7_ADDRDECDRAM_ADDR_HASH_BANK0
+#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK0__XOR_ENABLE__SHIFT 0x0
+#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK0__COL_XOR__SHIFT 0x1
+#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK0__ROW_XOR__SHIFT 0xe
+#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK0__XOR_ENABLE_MASK 0x00000001L
+#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK0__COL_XOR_MASK 0x00003FFEL
+#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK0__ROW_XOR_MASK 0xFFFFC000L
+//MMEA7_ADDRDECDRAM_ADDR_HASH_BANK1
+#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK1__XOR_ENABLE__SHIFT 0x0
+#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK1__COL_XOR__SHIFT 0x1
+#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK1__ROW_XOR__SHIFT 0xe
+#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK1__XOR_ENABLE_MASK 0x00000001L
+#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK1__COL_XOR_MASK 0x00003FFEL
+#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK1__ROW_XOR_MASK 0xFFFFC000L
+//MMEA7_ADDRDECDRAM_ADDR_HASH_BANK2
+#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK2__XOR_ENABLE__SHIFT 0x0
+#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK2__COL_XOR__SHIFT 0x1
+#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK2__ROW_XOR__SHIFT 0xe
+#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK2__XOR_ENABLE_MASK 0x00000001L
+#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK2__COL_XOR_MASK 0x00003FFEL
+#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK2__ROW_XOR_MASK 0xFFFFC000L
+//MMEA7_ADDRDECDRAM_ADDR_HASH_BANK3
+#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK3__XOR_ENABLE__SHIFT 0x0
+#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK3__COL_XOR__SHIFT 0x1
+#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK3__ROW_XOR__SHIFT 0xe
+#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK3__XOR_ENABLE_MASK 0x00000001L
+#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK3__COL_XOR_MASK 0x00003FFEL
+#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK3__ROW_XOR_MASK 0xFFFFC000L
+//MMEA7_ADDRDECDRAM_ADDR_HASH_BANK4
+#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK4__XOR_ENABLE__SHIFT 0x0
+#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK4__COL_XOR__SHIFT 0x1
+#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK4__ROW_XOR__SHIFT 0xe
+#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK4__XOR_ENABLE_MASK 0x00000001L
+#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK4__COL_XOR_MASK 0x00003FFEL
+#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK4__ROW_XOR_MASK 0xFFFFC000L
+//MMEA7_ADDRDECDRAM_ADDR_HASH_BANK5
+#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK5__XOR_ENABLE__SHIFT 0x0
+#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK5__COL_XOR__SHIFT 0x1
+#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK5__ROW_XOR__SHIFT 0xe
+#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK5__XOR_ENABLE_MASK 0x00000001L
+#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK5__COL_XOR_MASK 0x00003FFEL
+#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK5__ROW_XOR_MASK 0xFFFFC000L
+//MMEA7_ADDRDECDRAM_ADDR_HASH_PC
+#define MMEA7_ADDRDECDRAM_ADDR_HASH_PC__XOR_ENABLE__SHIFT 0x0
+#define MMEA7_ADDRDECDRAM_ADDR_HASH_PC__COL_XOR__SHIFT 0x1
+#define MMEA7_ADDRDECDRAM_ADDR_HASH_PC__ROW_XOR__SHIFT 0xe
+#define MMEA7_ADDRDECDRAM_ADDR_HASH_PC__XOR_ENABLE_MASK 0x00000001L
+#define MMEA7_ADDRDECDRAM_ADDR_HASH_PC__COL_XOR_MASK 0x00003FFEL
+#define MMEA7_ADDRDECDRAM_ADDR_HASH_PC__ROW_XOR_MASK 0xFFFFC000L
+//MMEA7_ADDRDECDRAM_ADDR_HASH_PC2
+#define MMEA7_ADDRDECDRAM_ADDR_HASH_PC2__BANK_XOR__SHIFT 0x0
+#define MMEA7_ADDRDECDRAM_ADDR_HASH_PC2__BANK_XOR_MASK 0x0000003FL
+//MMEA7_ADDRDECDRAM_ADDR_HASH_CS0
+#define MMEA7_ADDRDECDRAM_ADDR_HASH_CS0__XOR_ENABLE__SHIFT 0x0
+#define MMEA7_ADDRDECDRAM_ADDR_HASH_CS0__NA_XOR__SHIFT 0x1
+#define MMEA7_ADDRDECDRAM_ADDR_HASH_CS0__XOR_ENABLE_MASK 0x00000001L
+#define MMEA7_ADDRDECDRAM_ADDR_HASH_CS0__NA_XOR_MASK 0xFFFFFFFEL
+//MMEA7_ADDRDECDRAM_ADDR_HASH_CS1
+#define MMEA7_ADDRDECDRAM_ADDR_HASH_CS1__XOR_ENABLE__SHIFT 0x0
+#define MMEA7_ADDRDECDRAM_ADDR_HASH_CS1__NA_XOR__SHIFT 0x1
+#define MMEA7_ADDRDECDRAM_ADDR_HASH_CS1__XOR_ENABLE_MASK 0x00000001L
+#define MMEA7_ADDRDECDRAM_ADDR_HASH_CS1__NA_XOR_MASK 0xFFFFFFFEL
+//MMEA7_ADDRDECDRAM_HARVEST_ENABLE
+#define MMEA7_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_EN__SHIFT 0x0
+#define MMEA7_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_VAL__SHIFT 0x1
+#define MMEA7_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_EN__SHIFT 0x2
+#define MMEA7_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_VAL__SHIFT 0x3
+#define MMEA7_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_EN__SHIFT 0x4
+#define MMEA7_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_VAL__SHIFT 0x5
+#define MMEA7_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_EN_MASK 0x00000001L
+#define MMEA7_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_VAL_MASK 0x00000002L
+#define MMEA7_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_EN_MASK 0x00000004L
+#define MMEA7_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_VAL_MASK 0x00000008L
+#define MMEA7_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_EN_MASK 0x00000010L
+#define MMEA7_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_VAL_MASK 0x00000020L
+//MMEA7_ADDRDECGMI_ADDR_HASH_BANK0
+#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK0__XOR_ENABLE__SHIFT 0x0
+#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK0__COL_XOR__SHIFT 0x1
+#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK0__ROW_XOR__SHIFT 0xe
+#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK0__XOR_ENABLE_MASK 0x00000001L
+#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK0__COL_XOR_MASK 0x00003FFEL
+#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK0__ROW_XOR_MASK 0xFFFFC000L
+//MMEA7_ADDRDECGMI_ADDR_HASH_BANK1
+#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK1__XOR_ENABLE__SHIFT 0x0
+#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK1__COL_XOR__SHIFT 0x1
+#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK1__ROW_XOR__SHIFT 0xe
+#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK1__XOR_ENABLE_MASK 0x00000001L
+#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK1__COL_XOR_MASK 0x00003FFEL
+#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK1__ROW_XOR_MASK 0xFFFFC000L
+//MMEA7_ADDRDECGMI_ADDR_HASH_BANK2
+#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK2__XOR_ENABLE__SHIFT 0x0
+#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK2__COL_XOR__SHIFT 0x1
+#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK2__ROW_XOR__SHIFT 0xe
+#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK2__XOR_ENABLE_MASK 0x00000001L
+#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK2__COL_XOR_MASK 0x00003FFEL
+#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK2__ROW_XOR_MASK 0xFFFFC000L
+//MMEA7_ADDRDECGMI_ADDR_HASH_BANK3
+#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK3__XOR_ENABLE__SHIFT 0x0
+#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK3__COL_XOR__SHIFT 0x1
+#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK3__ROW_XOR__SHIFT 0xe
+#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK3__XOR_ENABLE_MASK 0x00000001L
+#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK3__COL_XOR_MASK 0x00003FFEL
+#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK3__ROW_XOR_MASK 0xFFFFC000L
+//MMEA7_ADDRDECGMI_ADDR_HASH_BANK4
+#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK4__XOR_ENABLE__SHIFT 0x0
+#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK4__COL_XOR__SHIFT 0x1
+#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK4__ROW_XOR__SHIFT 0xe
+#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK4__XOR_ENABLE_MASK 0x00000001L
+#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK4__COL_XOR_MASK 0x00003FFEL
+#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK4__ROW_XOR_MASK 0xFFFFC000L
+//MMEA7_ADDRDECGMI_ADDR_HASH_BANK5
+#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK5__XOR_ENABLE__SHIFT 0x0
+#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK5__COL_XOR__SHIFT 0x1
+#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK5__ROW_XOR__SHIFT 0xe
+#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK5__XOR_ENABLE_MASK 0x00000001L
+#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK5__COL_XOR_MASK 0x00003FFEL
+#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK5__ROW_XOR_MASK 0xFFFFC000L
+//MMEA7_ADDRDECGMI_ADDR_HASH_PC
+#define MMEA7_ADDRDECGMI_ADDR_HASH_PC__XOR_ENABLE__SHIFT 0x0
+#define MMEA7_ADDRDECGMI_ADDR_HASH_PC__COL_XOR__SHIFT 0x1
+#define MMEA7_ADDRDECGMI_ADDR_HASH_PC__ROW_XOR__SHIFT 0xe
+#define MMEA7_ADDRDECGMI_ADDR_HASH_PC__XOR_ENABLE_MASK 0x00000001L
+#define MMEA7_ADDRDECGMI_ADDR_HASH_PC__COL_XOR_MASK 0x00003FFEL
+#define MMEA7_ADDRDECGMI_ADDR_HASH_PC__ROW_XOR_MASK 0xFFFFC000L
+//MMEA7_ADDRDECGMI_ADDR_HASH_PC2
+#define MMEA7_ADDRDECGMI_ADDR_HASH_PC2__BANK_XOR__SHIFT 0x0
+#define MMEA7_ADDRDECGMI_ADDR_HASH_PC2__BANK_XOR_MASK 0x0000003FL
+//MMEA7_ADDRDECGMI_ADDR_HASH_CS0
+#define MMEA7_ADDRDECGMI_ADDR_HASH_CS0__XOR_ENABLE__SHIFT 0x0
+#define MMEA7_ADDRDECGMI_ADDR_HASH_CS0__NA_XOR__SHIFT 0x1
+#define MMEA7_ADDRDECGMI_ADDR_HASH_CS0__XOR_ENABLE_MASK 0x00000001L
+#define MMEA7_ADDRDECGMI_ADDR_HASH_CS0__NA_XOR_MASK 0xFFFFFFFEL
+//MMEA7_ADDRDECGMI_ADDR_HASH_CS1
+#define MMEA7_ADDRDECGMI_ADDR_HASH_CS1__XOR_ENABLE__SHIFT 0x0
+#define MMEA7_ADDRDECGMI_ADDR_HASH_CS1__NA_XOR__SHIFT 0x1
+#define MMEA7_ADDRDECGMI_ADDR_HASH_CS1__XOR_ENABLE_MASK 0x00000001L
+#define MMEA7_ADDRDECGMI_ADDR_HASH_CS1__NA_XOR_MASK 0xFFFFFFFEL
+//MMEA7_ADDRDECGMI_HARVEST_ENABLE
+#define MMEA7_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_EN__SHIFT 0x0
+#define MMEA7_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_VAL__SHIFT 0x1
+#define MMEA7_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_EN__SHIFT 0x2
+#define MMEA7_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_VAL__SHIFT 0x3
+#define MMEA7_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_EN__SHIFT 0x4
+#define MMEA7_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_VAL__SHIFT 0x5
+#define MMEA7_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_EN_MASK 0x00000001L
+#define MMEA7_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_VAL_MASK 0x00000002L
+#define MMEA7_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_EN_MASK 0x00000004L
+#define MMEA7_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_VAL_MASK 0x00000008L
+#define MMEA7_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_EN_MASK 0x00000010L
+#define MMEA7_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_VAL_MASK 0x00000020L
+//MMEA7_ADDRDEC0_BASE_ADDR_CS0
+#define MMEA7_ADDRDEC0_BASE_ADDR_CS0__CS_EN__SHIFT 0x0
+#define MMEA7_ADDRDEC0_BASE_ADDR_CS0__BASE_ADDR__SHIFT 0x1
+#define MMEA7_ADDRDEC0_BASE_ADDR_CS0__CS_EN_MASK 0x00000001L
+#define MMEA7_ADDRDEC0_BASE_ADDR_CS0__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA7_ADDRDEC0_BASE_ADDR_CS1
+#define MMEA7_ADDRDEC0_BASE_ADDR_CS1__CS_EN__SHIFT 0x0
+#define MMEA7_ADDRDEC0_BASE_ADDR_CS1__BASE_ADDR__SHIFT 0x1
+#define MMEA7_ADDRDEC0_BASE_ADDR_CS1__CS_EN_MASK 0x00000001L
+#define MMEA7_ADDRDEC0_BASE_ADDR_CS1__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA7_ADDRDEC0_BASE_ADDR_CS2
+#define MMEA7_ADDRDEC0_BASE_ADDR_CS2__CS_EN__SHIFT 0x0
+#define MMEA7_ADDRDEC0_BASE_ADDR_CS2__BASE_ADDR__SHIFT 0x1
+#define MMEA7_ADDRDEC0_BASE_ADDR_CS2__CS_EN_MASK 0x00000001L
+#define MMEA7_ADDRDEC0_BASE_ADDR_CS2__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA7_ADDRDEC0_BASE_ADDR_CS3
+#define MMEA7_ADDRDEC0_BASE_ADDR_CS3__CS_EN__SHIFT 0x0
+#define MMEA7_ADDRDEC0_BASE_ADDR_CS3__BASE_ADDR__SHIFT 0x1
+#define MMEA7_ADDRDEC0_BASE_ADDR_CS3__CS_EN_MASK 0x00000001L
+#define MMEA7_ADDRDEC0_BASE_ADDR_CS3__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA7_ADDRDEC0_BASE_ADDR_SECCS0
+#define MMEA7_ADDRDEC0_BASE_ADDR_SECCS0__CS_EN__SHIFT 0x0
+#define MMEA7_ADDRDEC0_BASE_ADDR_SECCS0__BASE_ADDR__SHIFT 0x1
+#define MMEA7_ADDRDEC0_BASE_ADDR_SECCS0__CS_EN_MASK 0x00000001L
+#define MMEA7_ADDRDEC0_BASE_ADDR_SECCS0__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA7_ADDRDEC0_BASE_ADDR_SECCS1
+#define MMEA7_ADDRDEC0_BASE_ADDR_SECCS1__CS_EN__SHIFT 0x0
+#define MMEA7_ADDRDEC0_BASE_ADDR_SECCS1__BASE_ADDR__SHIFT 0x1
+#define MMEA7_ADDRDEC0_BASE_ADDR_SECCS1__CS_EN_MASK 0x00000001L
+#define MMEA7_ADDRDEC0_BASE_ADDR_SECCS1__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA7_ADDRDEC0_BASE_ADDR_SECCS2
+#define MMEA7_ADDRDEC0_BASE_ADDR_SECCS2__CS_EN__SHIFT 0x0
+#define MMEA7_ADDRDEC0_BASE_ADDR_SECCS2__BASE_ADDR__SHIFT 0x1
+#define MMEA7_ADDRDEC0_BASE_ADDR_SECCS2__CS_EN_MASK 0x00000001L
+#define MMEA7_ADDRDEC0_BASE_ADDR_SECCS2__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA7_ADDRDEC0_BASE_ADDR_SECCS3
+#define MMEA7_ADDRDEC0_BASE_ADDR_SECCS3__CS_EN__SHIFT 0x0
+#define MMEA7_ADDRDEC0_BASE_ADDR_SECCS3__BASE_ADDR__SHIFT 0x1
+#define MMEA7_ADDRDEC0_BASE_ADDR_SECCS3__CS_EN_MASK 0x00000001L
+#define MMEA7_ADDRDEC0_BASE_ADDR_SECCS3__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA7_ADDRDEC0_ADDR_MASK_CS01
+#define MMEA7_ADDRDEC0_ADDR_MASK_CS01__ADDR_MASK__SHIFT 0x1
+#define MMEA7_ADDRDEC0_ADDR_MASK_CS01__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA7_ADDRDEC0_ADDR_MASK_CS23
+#define MMEA7_ADDRDEC0_ADDR_MASK_CS23__ADDR_MASK__SHIFT 0x1
+#define MMEA7_ADDRDEC0_ADDR_MASK_CS23__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA7_ADDRDEC0_ADDR_MASK_SECCS01
+#define MMEA7_ADDRDEC0_ADDR_MASK_SECCS01__ADDR_MASK__SHIFT 0x1
+#define MMEA7_ADDRDEC0_ADDR_MASK_SECCS01__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA7_ADDRDEC0_ADDR_MASK_SECCS23
+#define MMEA7_ADDRDEC0_ADDR_MASK_SECCS23__ADDR_MASK__SHIFT 0x1
+#define MMEA7_ADDRDEC0_ADDR_MASK_SECCS23__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA7_ADDRDEC0_ADDR_CFG_CS01
+#define MMEA7_ADDRDEC0_ADDR_CFG_CS01__NUM_BANK_GROUPS__SHIFT 0x1
+#define MMEA7_ADDRDEC0_ADDR_CFG_CS01__NUM_RM__SHIFT 0x4
+#define MMEA7_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_LO__SHIFT 0x8
+#define MMEA7_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_HI__SHIFT 0xc
+#define MMEA7_ADDRDEC0_ADDR_CFG_CS01__NUM_COL__SHIFT 0x10
+#define MMEA7_ADDRDEC0_ADDR_CFG_CS01__NUM_BANKS__SHIFT 0x14
+#define MMEA7_ADDRDEC0_ADDR_CFG_CS01__HI_COL_EN__SHIFT 0x1f
+#define MMEA7_ADDRDEC0_ADDR_CFG_CS01__NUM_BANK_GROUPS_MASK 0x0000000EL
+#define MMEA7_ADDRDEC0_ADDR_CFG_CS01__NUM_RM_MASK 0x00000030L
+#define MMEA7_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_LO_MASK 0x00000F00L
+#define MMEA7_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_HI_MASK 0x0000F000L
+#define MMEA7_ADDRDEC0_ADDR_CFG_CS01__NUM_COL_MASK 0x000F0000L
+#define MMEA7_ADDRDEC0_ADDR_CFG_CS01__NUM_BANKS_MASK 0x00300000L
+#define MMEA7_ADDRDEC0_ADDR_CFG_CS01__HI_COL_EN_MASK 0x80000000L
+//MMEA7_ADDRDEC0_ADDR_CFG_CS23
+#define MMEA7_ADDRDEC0_ADDR_CFG_CS23__NUM_BANK_GROUPS__SHIFT 0x1
+#define MMEA7_ADDRDEC0_ADDR_CFG_CS23__NUM_RM__SHIFT 0x4
+#define MMEA7_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_LO__SHIFT 0x8
+#define MMEA7_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_HI__SHIFT 0xc
+#define MMEA7_ADDRDEC0_ADDR_CFG_CS23__NUM_COL__SHIFT 0x10
+#define MMEA7_ADDRDEC0_ADDR_CFG_CS23__NUM_BANKS__SHIFT 0x14
+#define MMEA7_ADDRDEC0_ADDR_CFG_CS23__HI_COL_EN__SHIFT 0x1f
+#define MMEA7_ADDRDEC0_ADDR_CFG_CS23__NUM_BANK_GROUPS_MASK 0x0000000EL
+#define MMEA7_ADDRDEC0_ADDR_CFG_CS23__NUM_RM_MASK 0x00000030L
+#define MMEA7_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_LO_MASK 0x00000F00L
+#define MMEA7_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_HI_MASK 0x0000F000L
+#define MMEA7_ADDRDEC0_ADDR_CFG_CS23__NUM_COL_MASK 0x000F0000L
+#define MMEA7_ADDRDEC0_ADDR_CFG_CS23__NUM_BANKS_MASK 0x00300000L
+#define MMEA7_ADDRDEC0_ADDR_CFG_CS23__HI_COL_EN_MASK 0x80000000L
+//MMEA7_ADDRDEC0_ADDR_SEL_CS01
+#define MMEA7_ADDRDEC0_ADDR_SEL_CS01__BANK0__SHIFT 0x0
+#define MMEA7_ADDRDEC0_ADDR_SEL_CS01__BANK1__SHIFT 0x4
+#define MMEA7_ADDRDEC0_ADDR_SEL_CS01__BANK2__SHIFT 0x8
+#define MMEA7_ADDRDEC0_ADDR_SEL_CS01__BANK3__SHIFT 0xc
+#define MMEA7_ADDRDEC0_ADDR_SEL_CS01__BANK4__SHIFT 0x10
+#define MMEA7_ADDRDEC0_ADDR_SEL_CS01__ROW_LO__SHIFT 0x18
+#define MMEA7_ADDRDEC0_ADDR_SEL_CS01__ROW_HI__SHIFT 0x1c
+#define MMEA7_ADDRDEC0_ADDR_SEL_CS01__BANK0_MASK 0x0000000FL
+#define MMEA7_ADDRDEC0_ADDR_SEL_CS01__BANK1_MASK 0x000000F0L
+#define MMEA7_ADDRDEC0_ADDR_SEL_CS01__BANK2_MASK 0x00000F00L
+#define MMEA7_ADDRDEC0_ADDR_SEL_CS01__BANK3_MASK 0x0000F000L
+#define MMEA7_ADDRDEC0_ADDR_SEL_CS01__BANK4_MASK 0x001F0000L
+#define MMEA7_ADDRDEC0_ADDR_SEL_CS01__ROW_LO_MASK 0x0F000000L
+#define MMEA7_ADDRDEC0_ADDR_SEL_CS01__ROW_HI_MASK 0xF0000000L
+//MMEA7_ADDRDEC0_ADDR_SEL_CS23
+#define MMEA7_ADDRDEC0_ADDR_SEL_CS23__BANK0__SHIFT 0x0
+#define MMEA7_ADDRDEC0_ADDR_SEL_CS23__BANK1__SHIFT 0x4
+#define MMEA7_ADDRDEC0_ADDR_SEL_CS23__BANK2__SHIFT 0x8
+#define MMEA7_ADDRDEC0_ADDR_SEL_CS23__BANK3__SHIFT 0xc
+#define MMEA7_ADDRDEC0_ADDR_SEL_CS23__BANK4__SHIFT 0x10
+#define MMEA7_ADDRDEC0_ADDR_SEL_CS23__ROW_LO__SHIFT 0x18
+#define MMEA7_ADDRDEC0_ADDR_SEL_CS23__ROW_HI__SHIFT 0x1c
+#define MMEA7_ADDRDEC0_ADDR_SEL_CS23__BANK0_MASK 0x0000000FL
+#define MMEA7_ADDRDEC0_ADDR_SEL_CS23__BANK1_MASK 0x000000F0L
+#define MMEA7_ADDRDEC0_ADDR_SEL_CS23__BANK2_MASK 0x00000F00L
+#define MMEA7_ADDRDEC0_ADDR_SEL_CS23__BANK3_MASK 0x0000F000L
+#define MMEA7_ADDRDEC0_ADDR_SEL_CS23__BANK4_MASK 0x001F0000L
+#define MMEA7_ADDRDEC0_ADDR_SEL_CS23__ROW_LO_MASK 0x0F000000L
+#define MMEA7_ADDRDEC0_ADDR_SEL_CS23__ROW_HI_MASK 0xF0000000L
+//MMEA7_ADDRDEC0_ADDR_SEL2_CS01
+#define MMEA7_ADDRDEC0_ADDR_SEL2_CS01__BANK5__SHIFT 0x0
+#define MMEA7_ADDRDEC0_ADDR_SEL2_CS01__BANK5_MASK 0x0000001FL
+//MMEA7_ADDRDEC0_ADDR_SEL2_CS23
+#define MMEA7_ADDRDEC0_ADDR_SEL2_CS23__BANK5__SHIFT 0x0
+#define MMEA7_ADDRDEC0_ADDR_SEL2_CS23__BANK5_MASK 0x0000001FL
+//MMEA7_ADDRDEC0_COL_SEL_LO_CS01
+#define MMEA7_ADDRDEC0_COL_SEL_LO_CS01__COL0__SHIFT 0x0
+#define MMEA7_ADDRDEC0_COL_SEL_LO_CS01__COL1__SHIFT 0x4
+#define MMEA7_ADDRDEC0_COL_SEL_LO_CS01__COL2__SHIFT 0x8
+#define MMEA7_ADDRDEC0_COL_SEL_LO_CS01__COL3__SHIFT 0xc
+#define MMEA7_ADDRDEC0_COL_SEL_LO_CS01__COL4__SHIFT 0x10
+#define MMEA7_ADDRDEC0_COL_SEL_LO_CS01__COL5__SHIFT 0x14
+#define MMEA7_ADDRDEC0_COL_SEL_LO_CS01__COL6__SHIFT 0x18
+#define MMEA7_ADDRDEC0_COL_SEL_LO_CS01__COL7__SHIFT 0x1c
+#define MMEA7_ADDRDEC0_COL_SEL_LO_CS01__COL0_MASK 0x0000000FL
+#define MMEA7_ADDRDEC0_COL_SEL_LO_CS01__COL1_MASK 0x000000F0L
+#define MMEA7_ADDRDEC0_COL_SEL_LO_CS01__COL2_MASK 0x00000F00L
+#define MMEA7_ADDRDEC0_COL_SEL_LO_CS01__COL3_MASK 0x0000F000L
+#define MMEA7_ADDRDEC0_COL_SEL_LO_CS01__COL4_MASK 0x000F0000L
+#define MMEA7_ADDRDEC0_COL_SEL_LO_CS01__COL5_MASK 0x00F00000L
+#define MMEA7_ADDRDEC0_COL_SEL_LO_CS01__COL6_MASK 0x0F000000L
+#define MMEA7_ADDRDEC0_COL_SEL_LO_CS01__COL7_MASK 0xF0000000L
+//MMEA7_ADDRDEC0_COL_SEL_LO_CS23
+#define MMEA7_ADDRDEC0_COL_SEL_LO_CS23__COL0__SHIFT 0x0
+#define MMEA7_ADDRDEC0_COL_SEL_LO_CS23__COL1__SHIFT 0x4
+#define MMEA7_ADDRDEC0_COL_SEL_LO_CS23__COL2__SHIFT 0x8
+#define MMEA7_ADDRDEC0_COL_SEL_LO_CS23__COL3__SHIFT 0xc
+#define MMEA7_ADDRDEC0_COL_SEL_LO_CS23__COL4__SHIFT 0x10
+#define MMEA7_ADDRDEC0_COL_SEL_LO_CS23__COL5__SHIFT 0x14
+#define MMEA7_ADDRDEC0_COL_SEL_LO_CS23__COL6__SHIFT 0x18
+#define MMEA7_ADDRDEC0_COL_SEL_LO_CS23__COL7__SHIFT 0x1c
+#define MMEA7_ADDRDEC0_COL_SEL_LO_CS23__COL0_MASK 0x0000000FL
+#define MMEA7_ADDRDEC0_COL_SEL_LO_CS23__COL1_MASK 0x000000F0L
+#define MMEA7_ADDRDEC0_COL_SEL_LO_CS23__COL2_MASK 0x00000F00L
+#define MMEA7_ADDRDEC0_COL_SEL_LO_CS23__COL3_MASK 0x0000F000L
+#define MMEA7_ADDRDEC0_COL_SEL_LO_CS23__COL4_MASK 0x000F0000L
+#define MMEA7_ADDRDEC0_COL_SEL_LO_CS23__COL5_MASK 0x00F00000L
+#define MMEA7_ADDRDEC0_COL_SEL_LO_CS23__COL6_MASK 0x0F000000L
+#define MMEA7_ADDRDEC0_COL_SEL_LO_CS23__COL7_MASK 0xF0000000L
+//MMEA7_ADDRDEC0_COL_SEL_HI_CS01
+#define MMEA7_ADDRDEC0_COL_SEL_HI_CS01__COL8__SHIFT 0x0
+#define MMEA7_ADDRDEC0_COL_SEL_HI_CS01__COL9__SHIFT 0x4
+#define MMEA7_ADDRDEC0_COL_SEL_HI_CS01__COL10__SHIFT 0x8
+#define MMEA7_ADDRDEC0_COL_SEL_HI_CS01__COL11__SHIFT 0xc
+#define MMEA7_ADDRDEC0_COL_SEL_HI_CS01__COL12__SHIFT 0x10
+#define MMEA7_ADDRDEC0_COL_SEL_HI_CS01__COL13__SHIFT 0x14
+#define MMEA7_ADDRDEC0_COL_SEL_HI_CS01__COL14__SHIFT 0x18
+#define MMEA7_ADDRDEC0_COL_SEL_HI_CS01__COL15__SHIFT 0x1c
+#define MMEA7_ADDRDEC0_COL_SEL_HI_CS01__COL8_MASK 0x0000000FL
+#define MMEA7_ADDRDEC0_COL_SEL_HI_CS01__COL9_MASK 0x000000F0L
+#define MMEA7_ADDRDEC0_COL_SEL_HI_CS01__COL10_MASK 0x00000F00L
+#define MMEA7_ADDRDEC0_COL_SEL_HI_CS01__COL11_MASK 0x0000F000L
+#define MMEA7_ADDRDEC0_COL_SEL_HI_CS01__COL12_MASK 0x000F0000L
+#define MMEA7_ADDRDEC0_COL_SEL_HI_CS01__COL13_MASK 0x00F00000L
+#define MMEA7_ADDRDEC0_COL_SEL_HI_CS01__COL14_MASK 0x0F000000L
+#define MMEA7_ADDRDEC0_COL_SEL_HI_CS01__COL15_MASK 0xF0000000L
+//MMEA7_ADDRDEC0_COL_SEL_HI_CS23
+#define MMEA7_ADDRDEC0_COL_SEL_HI_CS23__COL8__SHIFT 0x0
+#define MMEA7_ADDRDEC0_COL_SEL_HI_CS23__COL9__SHIFT 0x4
+#define MMEA7_ADDRDEC0_COL_SEL_HI_CS23__COL10__SHIFT 0x8
+#define MMEA7_ADDRDEC0_COL_SEL_HI_CS23__COL11__SHIFT 0xc
+#define MMEA7_ADDRDEC0_COL_SEL_HI_CS23__COL12__SHIFT 0x10
+#define MMEA7_ADDRDEC0_COL_SEL_HI_CS23__COL13__SHIFT 0x14
+#define MMEA7_ADDRDEC0_COL_SEL_HI_CS23__COL14__SHIFT 0x18
+#define MMEA7_ADDRDEC0_COL_SEL_HI_CS23__COL15__SHIFT 0x1c
+#define MMEA7_ADDRDEC0_COL_SEL_HI_CS23__COL8_MASK 0x0000000FL
+#define MMEA7_ADDRDEC0_COL_SEL_HI_CS23__COL9_MASK 0x000000F0L
+#define MMEA7_ADDRDEC0_COL_SEL_HI_CS23__COL10_MASK 0x00000F00L
+#define MMEA7_ADDRDEC0_COL_SEL_HI_CS23__COL11_MASK 0x0000F000L
+#define MMEA7_ADDRDEC0_COL_SEL_HI_CS23__COL12_MASK 0x000F0000L
+#define MMEA7_ADDRDEC0_COL_SEL_HI_CS23__COL13_MASK 0x00F00000L
+#define MMEA7_ADDRDEC0_COL_SEL_HI_CS23__COL14_MASK 0x0F000000L
+#define MMEA7_ADDRDEC0_COL_SEL_HI_CS23__COL15_MASK 0xF0000000L
+//MMEA7_ADDRDEC0_RM_SEL_CS01
+#define MMEA7_ADDRDEC0_RM_SEL_CS01__RM0__SHIFT 0x0
+#define MMEA7_ADDRDEC0_RM_SEL_CS01__RM1__SHIFT 0x4
+#define MMEA7_ADDRDEC0_RM_SEL_CS01__RM2__SHIFT 0x8
+#define MMEA7_ADDRDEC0_RM_SEL_CS01__CHAN_BIT__SHIFT 0xc
+#define MMEA7_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA7_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA7_ADDRDEC0_RM_SEL_CS01__RM0_MASK 0x0000000FL
+#define MMEA7_ADDRDEC0_RM_SEL_CS01__RM1_MASK 0x000000F0L
+#define MMEA7_ADDRDEC0_RM_SEL_CS01__RM2_MASK 0x00000F00L
+#define MMEA7_ADDRDEC0_RM_SEL_CS01__CHAN_BIT_MASK 0x0000F000L
+#define MMEA7_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA7_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA7_ADDRDEC0_RM_SEL_CS23
+#define MMEA7_ADDRDEC0_RM_SEL_CS23__RM0__SHIFT 0x0
+#define MMEA7_ADDRDEC0_RM_SEL_CS23__RM1__SHIFT 0x4
+#define MMEA7_ADDRDEC0_RM_SEL_CS23__RM2__SHIFT 0x8
+#define MMEA7_ADDRDEC0_RM_SEL_CS23__CHAN_BIT__SHIFT 0xc
+#define MMEA7_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA7_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA7_ADDRDEC0_RM_SEL_CS23__RM0_MASK 0x0000000FL
+#define MMEA7_ADDRDEC0_RM_SEL_CS23__RM1_MASK 0x000000F0L
+#define MMEA7_ADDRDEC0_RM_SEL_CS23__RM2_MASK 0x00000F00L
+#define MMEA7_ADDRDEC0_RM_SEL_CS23__CHAN_BIT_MASK 0x0000F000L
+#define MMEA7_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA7_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA7_ADDRDEC0_RM_SEL_SECCS01
+#define MMEA7_ADDRDEC0_RM_SEL_SECCS01__RM0__SHIFT 0x0
+#define MMEA7_ADDRDEC0_RM_SEL_SECCS01__RM1__SHIFT 0x4
+#define MMEA7_ADDRDEC0_RM_SEL_SECCS01__RM2__SHIFT 0x8
+#define MMEA7_ADDRDEC0_RM_SEL_SECCS01__CHAN_BIT__SHIFT 0xc
+#define MMEA7_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA7_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA7_ADDRDEC0_RM_SEL_SECCS01__RM0_MASK 0x0000000FL
+#define MMEA7_ADDRDEC0_RM_SEL_SECCS01__RM1_MASK 0x000000F0L
+#define MMEA7_ADDRDEC0_RM_SEL_SECCS01__RM2_MASK 0x00000F00L
+#define MMEA7_ADDRDEC0_RM_SEL_SECCS01__CHAN_BIT_MASK 0x0000F000L
+#define MMEA7_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA7_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA7_ADDRDEC0_RM_SEL_SECCS23
+#define MMEA7_ADDRDEC0_RM_SEL_SECCS23__RM0__SHIFT 0x0
+#define MMEA7_ADDRDEC0_RM_SEL_SECCS23__RM1__SHIFT 0x4
+#define MMEA7_ADDRDEC0_RM_SEL_SECCS23__RM2__SHIFT 0x8
+#define MMEA7_ADDRDEC0_RM_SEL_SECCS23__CHAN_BIT__SHIFT 0xc
+#define MMEA7_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA7_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA7_ADDRDEC0_RM_SEL_SECCS23__RM0_MASK 0x0000000FL
+#define MMEA7_ADDRDEC0_RM_SEL_SECCS23__RM1_MASK 0x000000F0L
+#define MMEA7_ADDRDEC0_RM_SEL_SECCS23__RM2_MASK 0x00000F00L
+#define MMEA7_ADDRDEC0_RM_SEL_SECCS23__CHAN_BIT_MASK 0x0000F000L
+#define MMEA7_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA7_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA7_ADDRDEC1_BASE_ADDR_CS0
+#define MMEA7_ADDRDEC1_BASE_ADDR_CS0__CS_EN__SHIFT 0x0
+#define MMEA7_ADDRDEC1_BASE_ADDR_CS0__BASE_ADDR__SHIFT 0x1
+#define MMEA7_ADDRDEC1_BASE_ADDR_CS0__CS_EN_MASK 0x00000001L
+#define MMEA7_ADDRDEC1_BASE_ADDR_CS0__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA7_ADDRDEC1_BASE_ADDR_CS1
+#define MMEA7_ADDRDEC1_BASE_ADDR_CS1__CS_EN__SHIFT 0x0
+#define MMEA7_ADDRDEC1_BASE_ADDR_CS1__BASE_ADDR__SHIFT 0x1
+#define MMEA7_ADDRDEC1_BASE_ADDR_CS1__CS_EN_MASK 0x00000001L
+#define MMEA7_ADDRDEC1_BASE_ADDR_CS1__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA7_ADDRDEC1_BASE_ADDR_CS2
+#define MMEA7_ADDRDEC1_BASE_ADDR_CS2__CS_EN__SHIFT 0x0
+#define MMEA7_ADDRDEC1_BASE_ADDR_CS2__BASE_ADDR__SHIFT 0x1
+#define MMEA7_ADDRDEC1_BASE_ADDR_CS2__CS_EN_MASK 0x00000001L
+#define MMEA7_ADDRDEC1_BASE_ADDR_CS2__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA7_ADDRDEC1_BASE_ADDR_CS3
+#define MMEA7_ADDRDEC1_BASE_ADDR_CS3__CS_EN__SHIFT 0x0
+#define MMEA7_ADDRDEC1_BASE_ADDR_CS3__BASE_ADDR__SHIFT 0x1
+#define MMEA7_ADDRDEC1_BASE_ADDR_CS3__CS_EN_MASK 0x00000001L
+#define MMEA7_ADDRDEC1_BASE_ADDR_CS3__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA7_ADDRDEC1_BASE_ADDR_SECCS0
+#define MMEA7_ADDRDEC1_BASE_ADDR_SECCS0__CS_EN__SHIFT 0x0
+#define MMEA7_ADDRDEC1_BASE_ADDR_SECCS0__BASE_ADDR__SHIFT 0x1
+#define MMEA7_ADDRDEC1_BASE_ADDR_SECCS0__CS_EN_MASK 0x00000001L
+#define MMEA7_ADDRDEC1_BASE_ADDR_SECCS0__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA7_ADDRDEC1_BASE_ADDR_SECCS1
+#define MMEA7_ADDRDEC1_BASE_ADDR_SECCS1__CS_EN__SHIFT 0x0
+#define MMEA7_ADDRDEC1_BASE_ADDR_SECCS1__BASE_ADDR__SHIFT 0x1
+#define MMEA7_ADDRDEC1_BASE_ADDR_SECCS1__CS_EN_MASK 0x00000001L
+#define MMEA7_ADDRDEC1_BASE_ADDR_SECCS1__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA7_ADDRDEC1_BASE_ADDR_SECCS2
+#define MMEA7_ADDRDEC1_BASE_ADDR_SECCS2__CS_EN__SHIFT 0x0
+#define MMEA7_ADDRDEC1_BASE_ADDR_SECCS2__BASE_ADDR__SHIFT 0x1
+#define MMEA7_ADDRDEC1_BASE_ADDR_SECCS2__CS_EN_MASK 0x00000001L
+#define MMEA7_ADDRDEC1_BASE_ADDR_SECCS2__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA7_ADDRDEC1_BASE_ADDR_SECCS3
+#define MMEA7_ADDRDEC1_BASE_ADDR_SECCS3__CS_EN__SHIFT 0x0
+#define MMEA7_ADDRDEC1_BASE_ADDR_SECCS3__BASE_ADDR__SHIFT 0x1
+#define MMEA7_ADDRDEC1_BASE_ADDR_SECCS3__CS_EN_MASK 0x00000001L
+#define MMEA7_ADDRDEC1_BASE_ADDR_SECCS3__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA7_ADDRDEC1_ADDR_MASK_CS01
+#define MMEA7_ADDRDEC1_ADDR_MASK_CS01__ADDR_MASK__SHIFT 0x1
+#define MMEA7_ADDRDEC1_ADDR_MASK_CS01__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA7_ADDRDEC1_ADDR_MASK_CS23
+#define MMEA7_ADDRDEC1_ADDR_MASK_CS23__ADDR_MASK__SHIFT 0x1
+#define MMEA7_ADDRDEC1_ADDR_MASK_CS23__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA7_ADDRDEC1_ADDR_MASK_SECCS01
+#define MMEA7_ADDRDEC1_ADDR_MASK_SECCS01__ADDR_MASK__SHIFT 0x1
+#define MMEA7_ADDRDEC1_ADDR_MASK_SECCS01__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA7_ADDRDEC1_ADDR_MASK_SECCS23
+#define MMEA7_ADDRDEC1_ADDR_MASK_SECCS23__ADDR_MASK__SHIFT 0x1
+#define MMEA7_ADDRDEC1_ADDR_MASK_SECCS23__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA7_ADDRDEC1_ADDR_CFG_CS01
+#define MMEA7_ADDRDEC1_ADDR_CFG_CS01__NUM_BANK_GROUPS__SHIFT 0x1
+#define MMEA7_ADDRDEC1_ADDR_CFG_CS01__NUM_RM__SHIFT 0x4
+#define MMEA7_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_LO__SHIFT 0x8
+#define MMEA7_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_HI__SHIFT 0xc
+#define MMEA7_ADDRDEC1_ADDR_CFG_CS01__NUM_COL__SHIFT 0x10
+#define MMEA7_ADDRDEC1_ADDR_CFG_CS01__NUM_BANKS__SHIFT 0x14
+#define MMEA7_ADDRDEC1_ADDR_CFG_CS01__HI_COL_EN__SHIFT 0x1f
+#define MMEA7_ADDRDEC1_ADDR_CFG_CS01__NUM_BANK_GROUPS_MASK 0x0000000EL
+#define MMEA7_ADDRDEC1_ADDR_CFG_CS01__NUM_RM_MASK 0x00000030L
+#define MMEA7_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_LO_MASK 0x00000F00L
+#define MMEA7_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_HI_MASK 0x0000F000L
+#define MMEA7_ADDRDEC1_ADDR_CFG_CS01__NUM_COL_MASK 0x000F0000L
+#define MMEA7_ADDRDEC1_ADDR_CFG_CS01__NUM_BANKS_MASK 0x00300000L
+#define MMEA7_ADDRDEC1_ADDR_CFG_CS01__HI_COL_EN_MASK 0x80000000L
+//MMEA7_ADDRDEC1_ADDR_CFG_CS23
+#define MMEA7_ADDRDEC1_ADDR_CFG_CS23__NUM_BANK_GROUPS__SHIFT 0x1
+#define MMEA7_ADDRDEC1_ADDR_CFG_CS23__NUM_RM__SHIFT 0x4
+#define MMEA7_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_LO__SHIFT 0x8
+#define MMEA7_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_HI__SHIFT 0xc
+#define MMEA7_ADDRDEC1_ADDR_CFG_CS23__NUM_COL__SHIFT 0x10
+#define MMEA7_ADDRDEC1_ADDR_CFG_CS23__NUM_BANKS__SHIFT 0x14
+#define MMEA7_ADDRDEC1_ADDR_CFG_CS23__HI_COL_EN__SHIFT 0x1f
+#define MMEA7_ADDRDEC1_ADDR_CFG_CS23__NUM_BANK_GROUPS_MASK 0x0000000EL
+#define MMEA7_ADDRDEC1_ADDR_CFG_CS23__NUM_RM_MASK 0x00000030L
+#define MMEA7_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_LO_MASK 0x00000F00L
+#define MMEA7_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_HI_MASK 0x0000F000L
+#define MMEA7_ADDRDEC1_ADDR_CFG_CS23__NUM_COL_MASK 0x000F0000L
+#define MMEA7_ADDRDEC1_ADDR_CFG_CS23__NUM_BANKS_MASK 0x00300000L
+#define MMEA7_ADDRDEC1_ADDR_CFG_CS23__HI_COL_EN_MASK 0x80000000L
+//MMEA7_ADDRDEC1_ADDR_SEL_CS01
+#define MMEA7_ADDRDEC1_ADDR_SEL_CS01__BANK0__SHIFT 0x0
+#define MMEA7_ADDRDEC1_ADDR_SEL_CS01__BANK1__SHIFT 0x4
+#define MMEA7_ADDRDEC1_ADDR_SEL_CS01__BANK2__SHIFT 0x8
+#define MMEA7_ADDRDEC1_ADDR_SEL_CS01__BANK3__SHIFT 0xc
+#define MMEA7_ADDRDEC1_ADDR_SEL_CS01__BANK4__SHIFT 0x10
+#define MMEA7_ADDRDEC1_ADDR_SEL_CS01__ROW_LO__SHIFT 0x18
+#define MMEA7_ADDRDEC1_ADDR_SEL_CS01__ROW_HI__SHIFT 0x1c
+#define MMEA7_ADDRDEC1_ADDR_SEL_CS01__BANK0_MASK 0x0000000FL
+#define MMEA7_ADDRDEC1_ADDR_SEL_CS01__BANK1_MASK 0x000000F0L
+#define MMEA7_ADDRDEC1_ADDR_SEL_CS01__BANK2_MASK 0x00000F00L
+#define MMEA7_ADDRDEC1_ADDR_SEL_CS01__BANK3_MASK 0x0000F000L
+#define MMEA7_ADDRDEC1_ADDR_SEL_CS01__BANK4_MASK 0x001F0000L
+#define MMEA7_ADDRDEC1_ADDR_SEL_CS01__ROW_LO_MASK 0x0F000000L
+#define MMEA7_ADDRDEC1_ADDR_SEL_CS01__ROW_HI_MASK 0xF0000000L
+//MMEA7_ADDRDEC1_ADDR_SEL_CS23
+#define MMEA7_ADDRDEC1_ADDR_SEL_CS23__BANK0__SHIFT 0x0
+#define MMEA7_ADDRDEC1_ADDR_SEL_CS23__BANK1__SHIFT 0x4
+#define MMEA7_ADDRDEC1_ADDR_SEL_CS23__BANK2__SHIFT 0x8
+#define MMEA7_ADDRDEC1_ADDR_SEL_CS23__BANK3__SHIFT 0xc
+#define MMEA7_ADDRDEC1_ADDR_SEL_CS23__BANK4__SHIFT 0x10
+#define MMEA7_ADDRDEC1_ADDR_SEL_CS23__ROW_LO__SHIFT 0x18
+#define MMEA7_ADDRDEC1_ADDR_SEL_CS23__ROW_HI__SHIFT 0x1c
+#define MMEA7_ADDRDEC1_ADDR_SEL_CS23__BANK0_MASK 0x0000000FL
+#define MMEA7_ADDRDEC1_ADDR_SEL_CS23__BANK1_MASK 0x000000F0L
+#define MMEA7_ADDRDEC1_ADDR_SEL_CS23__BANK2_MASK 0x00000F00L
+#define MMEA7_ADDRDEC1_ADDR_SEL_CS23__BANK3_MASK 0x0000F000L
+#define MMEA7_ADDRDEC1_ADDR_SEL_CS23__BANK4_MASK 0x001F0000L
+#define MMEA7_ADDRDEC1_ADDR_SEL_CS23__ROW_LO_MASK 0x0F000000L
+#define MMEA7_ADDRDEC1_ADDR_SEL_CS23__ROW_HI_MASK 0xF0000000L
+//MMEA7_ADDRDEC1_ADDR_SEL2_CS01
+#define MMEA7_ADDRDEC1_ADDR_SEL2_CS01__BANK5__SHIFT 0x0
+#define MMEA7_ADDRDEC1_ADDR_SEL2_CS01__BANK5_MASK 0x0000001FL
+//MMEA7_ADDRDEC1_ADDR_SEL2_CS23
+#define MMEA7_ADDRDEC1_ADDR_SEL2_CS23__BANK5__SHIFT 0x0
+#define MMEA7_ADDRDEC1_ADDR_SEL2_CS23__BANK5_MASK 0x0000001FL
+//MMEA7_ADDRDEC1_COL_SEL_LO_CS01
+#define MMEA7_ADDRDEC1_COL_SEL_LO_CS01__COL0__SHIFT 0x0
+#define MMEA7_ADDRDEC1_COL_SEL_LO_CS01__COL1__SHIFT 0x4
+#define MMEA7_ADDRDEC1_COL_SEL_LO_CS01__COL2__SHIFT 0x8
+#define MMEA7_ADDRDEC1_COL_SEL_LO_CS01__COL3__SHIFT 0xc
+#define MMEA7_ADDRDEC1_COL_SEL_LO_CS01__COL4__SHIFT 0x10
+#define MMEA7_ADDRDEC1_COL_SEL_LO_CS01__COL5__SHIFT 0x14
+#define MMEA7_ADDRDEC1_COL_SEL_LO_CS01__COL6__SHIFT 0x18
+#define MMEA7_ADDRDEC1_COL_SEL_LO_CS01__COL7__SHIFT 0x1c
+#define MMEA7_ADDRDEC1_COL_SEL_LO_CS01__COL0_MASK 0x0000000FL
+#define MMEA7_ADDRDEC1_COL_SEL_LO_CS01__COL1_MASK 0x000000F0L
+#define MMEA7_ADDRDEC1_COL_SEL_LO_CS01__COL2_MASK 0x00000F00L
+#define MMEA7_ADDRDEC1_COL_SEL_LO_CS01__COL3_MASK 0x0000F000L
+#define MMEA7_ADDRDEC1_COL_SEL_LO_CS01__COL4_MASK 0x000F0000L
+#define MMEA7_ADDRDEC1_COL_SEL_LO_CS01__COL5_MASK 0x00F00000L
+#define MMEA7_ADDRDEC1_COL_SEL_LO_CS01__COL6_MASK 0x0F000000L
+#define MMEA7_ADDRDEC1_COL_SEL_LO_CS01__COL7_MASK 0xF0000000L
+//MMEA7_ADDRDEC1_COL_SEL_LO_CS23
+#define MMEA7_ADDRDEC1_COL_SEL_LO_CS23__COL0__SHIFT 0x0
+#define MMEA7_ADDRDEC1_COL_SEL_LO_CS23__COL1__SHIFT 0x4
+#define MMEA7_ADDRDEC1_COL_SEL_LO_CS23__COL2__SHIFT 0x8
+#define MMEA7_ADDRDEC1_COL_SEL_LO_CS23__COL3__SHIFT 0xc
+#define MMEA7_ADDRDEC1_COL_SEL_LO_CS23__COL4__SHIFT 0x10
+#define MMEA7_ADDRDEC1_COL_SEL_LO_CS23__COL5__SHIFT 0x14
+#define MMEA7_ADDRDEC1_COL_SEL_LO_CS23__COL6__SHIFT 0x18
+#define MMEA7_ADDRDEC1_COL_SEL_LO_CS23__COL7__SHIFT 0x1c
+#define MMEA7_ADDRDEC1_COL_SEL_LO_CS23__COL0_MASK 0x0000000FL
+#define MMEA7_ADDRDEC1_COL_SEL_LO_CS23__COL1_MASK 0x000000F0L
+#define MMEA7_ADDRDEC1_COL_SEL_LO_CS23__COL2_MASK 0x00000F00L
+#define MMEA7_ADDRDEC1_COL_SEL_LO_CS23__COL3_MASK 0x0000F000L
+#define MMEA7_ADDRDEC1_COL_SEL_LO_CS23__COL4_MASK 0x000F0000L
+#define MMEA7_ADDRDEC1_COL_SEL_LO_CS23__COL5_MASK 0x00F00000L
+#define MMEA7_ADDRDEC1_COL_SEL_LO_CS23__COL6_MASK 0x0F000000L
+#define MMEA7_ADDRDEC1_COL_SEL_LO_CS23__COL7_MASK 0xF0000000L
+//MMEA7_ADDRDEC1_COL_SEL_HI_CS01
+#define MMEA7_ADDRDEC1_COL_SEL_HI_CS01__COL8__SHIFT 0x0
+#define MMEA7_ADDRDEC1_COL_SEL_HI_CS01__COL9__SHIFT 0x4
+#define MMEA7_ADDRDEC1_COL_SEL_HI_CS01__COL10__SHIFT 0x8
+#define MMEA7_ADDRDEC1_COL_SEL_HI_CS01__COL11__SHIFT 0xc
+#define MMEA7_ADDRDEC1_COL_SEL_HI_CS01__COL12__SHIFT 0x10
+#define MMEA7_ADDRDEC1_COL_SEL_HI_CS01__COL13__SHIFT 0x14
+#define MMEA7_ADDRDEC1_COL_SEL_HI_CS01__COL14__SHIFT 0x18
+#define MMEA7_ADDRDEC1_COL_SEL_HI_CS01__COL15__SHIFT 0x1c
+#define MMEA7_ADDRDEC1_COL_SEL_HI_CS01__COL8_MASK 0x0000000FL
+#define MMEA7_ADDRDEC1_COL_SEL_HI_CS01__COL9_MASK 0x000000F0L
+#define MMEA7_ADDRDEC1_COL_SEL_HI_CS01__COL10_MASK 0x00000F00L
+#define MMEA7_ADDRDEC1_COL_SEL_HI_CS01__COL11_MASK 0x0000F000L
+#define MMEA7_ADDRDEC1_COL_SEL_HI_CS01__COL12_MASK 0x000F0000L
+#define MMEA7_ADDRDEC1_COL_SEL_HI_CS01__COL13_MASK 0x00F00000L
+#define MMEA7_ADDRDEC1_COL_SEL_HI_CS01__COL14_MASK 0x0F000000L
+#define MMEA7_ADDRDEC1_COL_SEL_HI_CS01__COL15_MASK 0xF0000000L
+//MMEA7_ADDRDEC1_COL_SEL_HI_CS23
+#define MMEA7_ADDRDEC1_COL_SEL_HI_CS23__COL8__SHIFT 0x0
+#define MMEA7_ADDRDEC1_COL_SEL_HI_CS23__COL9__SHIFT 0x4
+#define MMEA7_ADDRDEC1_COL_SEL_HI_CS23__COL10__SHIFT 0x8
+#define MMEA7_ADDRDEC1_COL_SEL_HI_CS23__COL11__SHIFT 0xc
+#define MMEA7_ADDRDEC1_COL_SEL_HI_CS23__COL12__SHIFT 0x10
+#define MMEA7_ADDRDEC1_COL_SEL_HI_CS23__COL13__SHIFT 0x14
+#define MMEA7_ADDRDEC1_COL_SEL_HI_CS23__COL14__SHIFT 0x18
+#define MMEA7_ADDRDEC1_COL_SEL_HI_CS23__COL15__SHIFT 0x1c
+#define MMEA7_ADDRDEC1_COL_SEL_HI_CS23__COL8_MASK 0x0000000FL
+#define MMEA7_ADDRDEC1_COL_SEL_HI_CS23__COL9_MASK 0x000000F0L
+#define MMEA7_ADDRDEC1_COL_SEL_HI_CS23__COL10_MASK 0x00000F00L
+#define MMEA7_ADDRDEC1_COL_SEL_HI_CS23__COL11_MASK 0x0000F000L
+#define MMEA7_ADDRDEC1_COL_SEL_HI_CS23__COL12_MASK 0x000F0000L
+#define MMEA7_ADDRDEC1_COL_SEL_HI_CS23__COL13_MASK 0x00F00000L
+#define MMEA7_ADDRDEC1_COL_SEL_HI_CS23__COL14_MASK 0x0F000000L
+#define MMEA7_ADDRDEC1_COL_SEL_HI_CS23__COL15_MASK 0xF0000000L
+//MMEA7_ADDRDEC1_RM_SEL_CS01
+#define MMEA7_ADDRDEC1_RM_SEL_CS01__RM0__SHIFT 0x0
+#define MMEA7_ADDRDEC1_RM_SEL_CS01__RM1__SHIFT 0x4
+#define MMEA7_ADDRDEC1_RM_SEL_CS01__RM2__SHIFT 0x8
+#define MMEA7_ADDRDEC1_RM_SEL_CS01__CHAN_BIT__SHIFT 0xc
+#define MMEA7_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA7_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA7_ADDRDEC1_RM_SEL_CS01__RM0_MASK 0x0000000FL
+#define MMEA7_ADDRDEC1_RM_SEL_CS01__RM1_MASK 0x000000F0L
+#define MMEA7_ADDRDEC1_RM_SEL_CS01__RM2_MASK 0x00000F00L
+#define MMEA7_ADDRDEC1_RM_SEL_CS01__CHAN_BIT_MASK 0x0000F000L
+#define MMEA7_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA7_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA7_ADDRDEC1_RM_SEL_CS23
+#define MMEA7_ADDRDEC1_RM_SEL_CS23__RM0__SHIFT 0x0
+#define MMEA7_ADDRDEC1_RM_SEL_CS23__RM1__SHIFT 0x4
+#define MMEA7_ADDRDEC1_RM_SEL_CS23__RM2__SHIFT 0x8
+#define MMEA7_ADDRDEC1_RM_SEL_CS23__CHAN_BIT__SHIFT 0xc
+#define MMEA7_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA7_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA7_ADDRDEC1_RM_SEL_CS23__RM0_MASK 0x0000000FL
+#define MMEA7_ADDRDEC1_RM_SEL_CS23__RM1_MASK 0x000000F0L
+#define MMEA7_ADDRDEC1_RM_SEL_CS23__RM2_MASK 0x00000F00L
+#define MMEA7_ADDRDEC1_RM_SEL_CS23__CHAN_BIT_MASK 0x0000F000L
+#define MMEA7_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA7_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA7_ADDRDEC1_RM_SEL_SECCS01
+#define MMEA7_ADDRDEC1_RM_SEL_SECCS01__RM0__SHIFT 0x0
+#define MMEA7_ADDRDEC1_RM_SEL_SECCS01__RM1__SHIFT 0x4
+#define MMEA7_ADDRDEC1_RM_SEL_SECCS01__RM2__SHIFT 0x8
+#define MMEA7_ADDRDEC1_RM_SEL_SECCS01__CHAN_BIT__SHIFT 0xc
+#define MMEA7_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA7_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA7_ADDRDEC1_RM_SEL_SECCS01__RM0_MASK 0x0000000FL
+#define MMEA7_ADDRDEC1_RM_SEL_SECCS01__RM1_MASK 0x000000F0L
+#define MMEA7_ADDRDEC1_RM_SEL_SECCS01__RM2_MASK 0x00000F00L
+#define MMEA7_ADDRDEC1_RM_SEL_SECCS01__CHAN_BIT_MASK 0x0000F000L
+#define MMEA7_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA7_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA7_ADDRDEC1_RM_SEL_SECCS23
+#define MMEA7_ADDRDEC1_RM_SEL_SECCS23__RM0__SHIFT 0x0
+#define MMEA7_ADDRDEC1_RM_SEL_SECCS23__RM1__SHIFT 0x4
+#define MMEA7_ADDRDEC1_RM_SEL_SECCS23__RM2__SHIFT 0x8
+#define MMEA7_ADDRDEC1_RM_SEL_SECCS23__CHAN_BIT__SHIFT 0xc
+#define MMEA7_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA7_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA7_ADDRDEC1_RM_SEL_SECCS23__RM0_MASK 0x0000000FL
+#define MMEA7_ADDRDEC1_RM_SEL_SECCS23__RM1_MASK 0x000000F0L
+#define MMEA7_ADDRDEC1_RM_SEL_SECCS23__RM2_MASK 0x00000F00L
+#define MMEA7_ADDRDEC1_RM_SEL_SECCS23__CHAN_BIT_MASK 0x0000F000L
+#define MMEA7_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA7_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA7_ADDRDEC2_BASE_ADDR_CS0
+#define MMEA7_ADDRDEC2_BASE_ADDR_CS0__CS_EN__SHIFT 0x0
+#define MMEA7_ADDRDEC2_BASE_ADDR_CS0__BASE_ADDR__SHIFT 0x1
+#define MMEA7_ADDRDEC2_BASE_ADDR_CS0__CS_EN_MASK 0x00000001L
+#define MMEA7_ADDRDEC2_BASE_ADDR_CS0__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA7_ADDRDEC2_BASE_ADDR_CS1
+#define MMEA7_ADDRDEC2_BASE_ADDR_CS1__CS_EN__SHIFT 0x0
+#define MMEA7_ADDRDEC2_BASE_ADDR_CS1__BASE_ADDR__SHIFT 0x1
+#define MMEA7_ADDRDEC2_BASE_ADDR_CS1__CS_EN_MASK 0x00000001L
+#define MMEA7_ADDRDEC2_BASE_ADDR_CS1__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA7_ADDRDEC2_BASE_ADDR_CS2
+#define MMEA7_ADDRDEC2_BASE_ADDR_CS2__CS_EN__SHIFT 0x0
+#define MMEA7_ADDRDEC2_BASE_ADDR_CS2__BASE_ADDR__SHIFT 0x1
+#define MMEA7_ADDRDEC2_BASE_ADDR_CS2__CS_EN_MASK 0x00000001L
+#define MMEA7_ADDRDEC2_BASE_ADDR_CS2__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA7_ADDRDEC2_BASE_ADDR_CS3
+#define MMEA7_ADDRDEC2_BASE_ADDR_CS3__CS_EN__SHIFT 0x0
+#define MMEA7_ADDRDEC2_BASE_ADDR_CS3__BASE_ADDR__SHIFT 0x1
+#define MMEA7_ADDRDEC2_BASE_ADDR_CS3__CS_EN_MASK 0x00000001L
+#define MMEA7_ADDRDEC2_BASE_ADDR_CS3__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA7_ADDRDEC2_BASE_ADDR_SECCS0
+#define MMEA7_ADDRDEC2_BASE_ADDR_SECCS0__CS_EN__SHIFT 0x0
+#define MMEA7_ADDRDEC2_BASE_ADDR_SECCS0__BASE_ADDR__SHIFT 0x1
+#define MMEA7_ADDRDEC2_BASE_ADDR_SECCS0__CS_EN_MASK 0x00000001L
+#define MMEA7_ADDRDEC2_BASE_ADDR_SECCS0__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA7_ADDRDEC2_BASE_ADDR_SECCS1
+#define MMEA7_ADDRDEC2_BASE_ADDR_SECCS1__CS_EN__SHIFT 0x0
+#define MMEA7_ADDRDEC2_BASE_ADDR_SECCS1__BASE_ADDR__SHIFT 0x1
+#define MMEA7_ADDRDEC2_BASE_ADDR_SECCS1__CS_EN_MASK 0x00000001L
+#define MMEA7_ADDRDEC2_BASE_ADDR_SECCS1__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA7_ADDRDEC2_BASE_ADDR_SECCS2
+#define MMEA7_ADDRDEC2_BASE_ADDR_SECCS2__CS_EN__SHIFT 0x0
+#define MMEA7_ADDRDEC2_BASE_ADDR_SECCS2__BASE_ADDR__SHIFT 0x1
+#define MMEA7_ADDRDEC2_BASE_ADDR_SECCS2__CS_EN_MASK 0x00000001L
+#define MMEA7_ADDRDEC2_BASE_ADDR_SECCS2__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA7_ADDRDEC2_BASE_ADDR_SECCS3
+#define MMEA7_ADDRDEC2_BASE_ADDR_SECCS3__CS_EN__SHIFT 0x0
+#define MMEA7_ADDRDEC2_BASE_ADDR_SECCS3__BASE_ADDR__SHIFT 0x1
+#define MMEA7_ADDRDEC2_BASE_ADDR_SECCS3__CS_EN_MASK 0x00000001L
+#define MMEA7_ADDRDEC2_BASE_ADDR_SECCS3__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA7_ADDRDEC2_ADDR_MASK_CS01
+#define MMEA7_ADDRDEC2_ADDR_MASK_CS01__ADDR_MASK__SHIFT 0x1
+#define MMEA7_ADDRDEC2_ADDR_MASK_CS01__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA7_ADDRDEC2_ADDR_MASK_CS23
+#define MMEA7_ADDRDEC2_ADDR_MASK_CS23__ADDR_MASK__SHIFT 0x1
+#define MMEA7_ADDRDEC2_ADDR_MASK_CS23__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA7_ADDRDEC2_ADDR_MASK_SECCS01
+#define MMEA7_ADDRDEC2_ADDR_MASK_SECCS01__ADDR_MASK__SHIFT 0x1
+#define MMEA7_ADDRDEC2_ADDR_MASK_SECCS01__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA7_ADDRDEC2_ADDR_MASK_SECCS23
+#define MMEA7_ADDRDEC2_ADDR_MASK_SECCS23__ADDR_MASK__SHIFT 0x1
+#define MMEA7_ADDRDEC2_ADDR_MASK_SECCS23__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA7_ADDRDEC2_ADDR_CFG_CS01
+#define MMEA7_ADDRDEC2_ADDR_CFG_CS01__NUM_BANK_GROUPS__SHIFT 0x1
+#define MMEA7_ADDRDEC2_ADDR_CFG_CS01__NUM_RM__SHIFT 0x4
+#define MMEA7_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_LO__SHIFT 0x8
+#define MMEA7_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_HI__SHIFT 0xc
+#define MMEA7_ADDRDEC2_ADDR_CFG_CS01__NUM_COL__SHIFT 0x10
+#define MMEA7_ADDRDEC2_ADDR_CFG_CS01__NUM_BANKS__SHIFT 0x14
+#define MMEA7_ADDRDEC2_ADDR_CFG_CS01__HI_COL_EN__SHIFT 0x1f
+#define MMEA7_ADDRDEC2_ADDR_CFG_CS01__NUM_BANK_GROUPS_MASK 0x0000000EL
+#define MMEA7_ADDRDEC2_ADDR_CFG_CS01__NUM_RM_MASK 0x00000030L
+#define MMEA7_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_LO_MASK 0x00000F00L
+#define MMEA7_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_HI_MASK 0x0000F000L
+#define MMEA7_ADDRDEC2_ADDR_CFG_CS01__NUM_COL_MASK 0x000F0000L
+#define MMEA7_ADDRDEC2_ADDR_CFG_CS01__NUM_BANKS_MASK 0x00300000L
+#define MMEA7_ADDRDEC2_ADDR_CFG_CS01__HI_COL_EN_MASK 0x80000000L
+//MMEA7_ADDRDEC2_ADDR_CFG_CS23
+#define MMEA7_ADDRDEC2_ADDR_CFG_CS23__NUM_BANK_GROUPS__SHIFT 0x1
+#define MMEA7_ADDRDEC2_ADDR_CFG_CS23__NUM_RM__SHIFT 0x4
+#define MMEA7_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_LO__SHIFT 0x8
+#define MMEA7_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_HI__SHIFT 0xc
+#define MMEA7_ADDRDEC2_ADDR_CFG_CS23__NUM_COL__SHIFT 0x10
+#define MMEA7_ADDRDEC2_ADDR_CFG_CS23__NUM_BANKS__SHIFT 0x14
+#define MMEA7_ADDRDEC2_ADDR_CFG_CS23__HI_COL_EN__SHIFT 0x1f
+#define MMEA7_ADDRDEC2_ADDR_CFG_CS23__NUM_BANK_GROUPS_MASK 0x0000000EL
+#define MMEA7_ADDRDEC2_ADDR_CFG_CS23__NUM_RM_MASK 0x00000030L
+#define MMEA7_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_LO_MASK 0x00000F00L
+#define MMEA7_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_HI_MASK 0x0000F000L
+#define MMEA7_ADDRDEC2_ADDR_CFG_CS23__NUM_COL_MASK 0x000F0000L
+#define MMEA7_ADDRDEC2_ADDR_CFG_CS23__NUM_BANKS_MASK 0x00300000L
+#define MMEA7_ADDRDEC2_ADDR_CFG_CS23__HI_COL_EN_MASK 0x80000000L
+//MMEA7_ADDRDEC2_ADDR_SEL_CS01
+#define MMEA7_ADDRDEC2_ADDR_SEL_CS01__BANK0__SHIFT 0x0
+#define MMEA7_ADDRDEC2_ADDR_SEL_CS01__BANK1__SHIFT 0x4
+#define MMEA7_ADDRDEC2_ADDR_SEL_CS01__BANK2__SHIFT 0x8
+#define MMEA7_ADDRDEC2_ADDR_SEL_CS01__BANK3__SHIFT 0xc
+#define MMEA7_ADDRDEC2_ADDR_SEL_CS01__BANK4__SHIFT 0x10
+#define MMEA7_ADDRDEC2_ADDR_SEL_CS01__ROW_LO__SHIFT 0x18
+#define MMEA7_ADDRDEC2_ADDR_SEL_CS01__ROW_HI__SHIFT 0x1c
+#define MMEA7_ADDRDEC2_ADDR_SEL_CS01__BANK0_MASK 0x0000000FL
+#define MMEA7_ADDRDEC2_ADDR_SEL_CS01__BANK1_MASK 0x000000F0L
+#define MMEA7_ADDRDEC2_ADDR_SEL_CS01__BANK2_MASK 0x00000F00L
+#define MMEA7_ADDRDEC2_ADDR_SEL_CS01__BANK3_MASK 0x0000F000L
+#define MMEA7_ADDRDEC2_ADDR_SEL_CS01__BANK4_MASK 0x001F0000L
+#define MMEA7_ADDRDEC2_ADDR_SEL_CS01__ROW_LO_MASK 0x0F000000L
+#define MMEA7_ADDRDEC2_ADDR_SEL_CS01__ROW_HI_MASK 0xF0000000L
+//MMEA7_ADDRDEC2_ADDR_SEL_CS23
+#define MMEA7_ADDRDEC2_ADDR_SEL_CS23__BANK0__SHIFT 0x0
+#define MMEA7_ADDRDEC2_ADDR_SEL_CS23__BANK1__SHIFT 0x4
+#define MMEA7_ADDRDEC2_ADDR_SEL_CS23__BANK2__SHIFT 0x8
+#define MMEA7_ADDRDEC2_ADDR_SEL_CS23__BANK3__SHIFT 0xc
+#define MMEA7_ADDRDEC2_ADDR_SEL_CS23__BANK4__SHIFT 0x10
+#define MMEA7_ADDRDEC2_ADDR_SEL_CS23__ROW_LO__SHIFT 0x18
+#define MMEA7_ADDRDEC2_ADDR_SEL_CS23__ROW_HI__SHIFT 0x1c
+#define MMEA7_ADDRDEC2_ADDR_SEL_CS23__BANK0_MASK 0x0000000FL
+#define MMEA7_ADDRDEC2_ADDR_SEL_CS23__BANK1_MASK 0x000000F0L
+#define MMEA7_ADDRDEC2_ADDR_SEL_CS23__BANK2_MASK 0x00000F00L
+#define MMEA7_ADDRDEC2_ADDR_SEL_CS23__BANK3_MASK 0x0000F000L
+#define MMEA7_ADDRDEC2_ADDR_SEL_CS23__BANK4_MASK 0x001F0000L
+#define MMEA7_ADDRDEC2_ADDR_SEL_CS23__ROW_LO_MASK 0x0F000000L
+#define MMEA7_ADDRDEC2_ADDR_SEL_CS23__ROW_HI_MASK 0xF0000000L
+//MMEA7_ADDRDEC2_ADDR_SEL2_CS01
+#define MMEA7_ADDRDEC2_ADDR_SEL2_CS01__BANK5__SHIFT 0x0
+#define MMEA7_ADDRDEC2_ADDR_SEL2_CS01__BANK5_MASK 0x0000001FL
+//MMEA7_ADDRDEC2_ADDR_SEL2_CS23
+#define MMEA7_ADDRDEC2_ADDR_SEL2_CS23__BANK5__SHIFT 0x0
+#define MMEA7_ADDRDEC2_ADDR_SEL2_CS23__BANK5_MASK 0x0000001FL
+//MMEA7_ADDRDEC2_COL_SEL_LO_CS01
+#define MMEA7_ADDRDEC2_COL_SEL_LO_CS01__COL0__SHIFT 0x0
+#define MMEA7_ADDRDEC2_COL_SEL_LO_CS01__COL1__SHIFT 0x4
+#define MMEA7_ADDRDEC2_COL_SEL_LO_CS01__COL2__SHIFT 0x8
+#define MMEA7_ADDRDEC2_COL_SEL_LO_CS01__COL3__SHIFT 0xc
+#define MMEA7_ADDRDEC2_COL_SEL_LO_CS01__COL4__SHIFT 0x10
+#define MMEA7_ADDRDEC2_COL_SEL_LO_CS01__COL5__SHIFT 0x14
+#define MMEA7_ADDRDEC2_COL_SEL_LO_CS01__COL6__SHIFT 0x18
+#define MMEA7_ADDRDEC2_COL_SEL_LO_CS01__COL7__SHIFT 0x1c
+#define MMEA7_ADDRDEC2_COL_SEL_LO_CS01__COL0_MASK 0x0000000FL
+#define MMEA7_ADDRDEC2_COL_SEL_LO_CS01__COL1_MASK 0x000000F0L
+#define MMEA7_ADDRDEC2_COL_SEL_LO_CS01__COL2_MASK 0x00000F00L
+#define MMEA7_ADDRDEC2_COL_SEL_LO_CS01__COL3_MASK 0x0000F000L
+#define MMEA7_ADDRDEC2_COL_SEL_LO_CS01__COL4_MASK 0x000F0000L
+#define MMEA7_ADDRDEC2_COL_SEL_LO_CS01__COL5_MASK 0x00F00000L
+#define MMEA7_ADDRDEC2_COL_SEL_LO_CS01__COL6_MASK 0x0F000000L
+#define MMEA7_ADDRDEC2_COL_SEL_LO_CS01__COL7_MASK 0xF0000000L
+//MMEA7_ADDRDEC2_COL_SEL_LO_CS23
+#define MMEA7_ADDRDEC2_COL_SEL_LO_CS23__COL0__SHIFT 0x0
+#define MMEA7_ADDRDEC2_COL_SEL_LO_CS23__COL1__SHIFT 0x4
+#define MMEA7_ADDRDEC2_COL_SEL_LO_CS23__COL2__SHIFT 0x8
+#define MMEA7_ADDRDEC2_COL_SEL_LO_CS23__COL3__SHIFT 0xc
+#define MMEA7_ADDRDEC2_COL_SEL_LO_CS23__COL4__SHIFT 0x10
+#define MMEA7_ADDRDEC2_COL_SEL_LO_CS23__COL5__SHIFT 0x14
+#define MMEA7_ADDRDEC2_COL_SEL_LO_CS23__COL6__SHIFT 0x18
+#define MMEA7_ADDRDEC2_COL_SEL_LO_CS23__COL7__SHIFT 0x1c
+#define MMEA7_ADDRDEC2_COL_SEL_LO_CS23__COL0_MASK 0x0000000FL
+#define MMEA7_ADDRDEC2_COL_SEL_LO_CS23__COL1_MASK 0x000000F0L
+#define MMEA7_ADDRDEC2_COL_SEL_LO_CS23__COL2_MASK 0x00000F00L
+#define MMEA7_ADDRDEC2_COL_SEL_LO_CS23__COL3_MASK 0x0000F000L
+#define MMEA7_ADDRDEC2_COL_SEL_LO_CS23__COL4_MASK 0x000F0000L
+#define MMEA7_ADDRDEC2_COL_SEL_LO_CS23__COL5_MASK 0x00F00000L
+#define MMEA7_ADDRDEC2_COL_SEL_LO_CS23__COL6_MASK 0x0F000000L
+#define MMEA7_ADDRDEC2_COL_SEL_LO_CS23__COL7_MASK 0xF0000000L
+//MMEA7_ADDRDEC2_COL_SEL_HI_CS01
+#define MMEA7_ADDRDEC2_COL_SEL_HI_CS01__COL8__SHIFT 0x0
+#define MMEA7_ADDRDEC2_COL_SEL_HI_CS01__COL9__SHIFT 0x4
+#define MMEA7_ADDRDEC2_COL_SEL_HI_CS01__COL10__SHIFT 0x8
+#define MMEA7_ADDRDEC2_COL_SEL_HI_CS01__COL11__SHIFT 0xc
+#define MMEA7_ADDRDEC2_COL_SEL_HI_CS01__COL12__SHIFT 0x10
+#define MMEA7_ADDRDEC2_COL_SEL_HI_CS01__COL13__SHIFT 0x14
+#define MMEA7_ADDRDEC2_COL_SEL_HI_CS01__COL14__SHIFT 0x18
+#define MMEA7_ADDRDEC2_COL_SEL_HI_CS01__COL15__SHIFT 0x1c
+#define MMEA7_ADDRDEC2_COL_SEL_HI_CS01__COL8_MASK 0x0000000FL
+#define MMEA7_ADDRDEC2_COL_SEL_HI_CS01__COL9_MASK 0x000000F0L
+#define MMEA7_ADDRDEC2_COL_SEL_HI_CS01__COL10_MASK 0x00000F00L
+#define MMEA7_ADDRDEC2_COL_SEL_HI_CS01__COL11_MASK 0x0000F000L
+#define MMEA7_ADDRDEC2_COL_SEL_HI_CS01__COL12_MASK 0x000F0000L
+#define MMEA7_ADDRDEC2_COL_SEL_HI_CS01__COL13_MASK 0x00F00000L
+#define MMEA7_ADDRDEC2_COL_SEL_HI_CS01__COL14_MASK 0x0F000000L
+#define MMEA7_ADDRDEC2_COL_SEL_HI_CS01__COL15_MASK 0xF0000000L
+//MMEA7_ADDRDEC2_COL_SEL_HI_CS23
+#define MMEA7_ADDRDEC2_COL_SEL_HI_CS23__COL8__SHIFT 0x0
+#define MMEA7_ADDRDEC2_COL_SEL_HI_CS23__COL9__SHIFT 0x4
+#define MMEA7_ADDRDEC2_COL_SEL_HI_CS23__COL10__SHIFT 0x8
+#define MMEA7_ADDRDEC2_COL_SEL_HI_CS23__COL11__SHIFT 0xc
+#define MMEA7_ADDRDEC2_COL_SEL_HI_CS23__COL12__SHIFT 0x10
+#define MMEA7_ADDRDEC2_COL_SEL_HI_CS23__COL13__SHIFT 0x14
+#define MMEA7_ADDRDEC2_COL_SEL_HI_CS23__COL14__SHIFT 0x18
+#define MMEA7_ADDRDEC2_COL_SEL_HI_CS23__COL15__SHIFT 0x1c
+#define MMEA7_ADDRDEC2_COL_SEL_HI_CS23__COL8_MASK 0x0000000FL
+#define MMEA7_ADDRDEC2_COL_SEL_HI_CS23__COL9_MASK 0x000000F0L
+#define MMEA7_ADDRDEC2_COL_SEL_HI_CS23__COL10_MASK 0x00000F00L
+#define MMEA7_ADDRDEC2_COL_SEL_HI_CS23__COL11_MASK 0x0000F000L
+#define MMEA7_ADDRDEC2_COL_SEL_HI_CS23__COL12_MASK 0x000F0000L
+#define MMEA7_ADDRDEC2_COL_SEL_HI_CS23__COL13_MASK 0x00F00000L
+#define MMEA7_ADDRDEC2_COL_SEL_HI_CS23__COL14_MASK 0x0F000000L
+#define MMEA7_ADDRDEC2_COL_SEL_HI_CS23__COL15_MASK 0xF0000000L
+//MMEA7_ADDRDEC2_RM_SEL_CS01
+#define MMEA7_ADDRDEC2_RM_SEL_CS01__RM0__SHIFT 0x0
+#define MMEA7_ADDRDEC2_RM_SEL_CS01__RM1__SHIFT 0x4
+#define MMEA7_ADDRDEC2_RM_SEL_CS01__RM2__SHIFT 0x8
+#define MMEA7_ADDRDEC2_RM_SEL_CS01__CHAN_BIT__SHIFT 0xc
+#define MMEA7_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA7_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA7_ADDRDEC2_RM_SEL_CS01__RM0_MASK 0x0000000FL
+#define MMEA7_ADDRDEC2_RM_SEL_CS01__RM1_MASK 0x000000F0L
+#define MMEA7_ADDRDEC2_RM_SEL_CS01__RM2_MASK 0x00000F00L
+#define MMEA7_ADDRDEC2_RM_SEL_CS01__CHAN_BIT_MASK 0x0000F000L
+#define MMEA7_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA7_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA7_ADDRDEC2_RM_SEL_CS23
+#define MMEA7_ADDRDEC2_RM_SEL_CS23__RM0__SHIFT 0x0
+#define MMEA7_ADDRDEC2_RM_SEL_CS23__RM1__SHIFT 0x4
+#define MMEA7_ADDRDEC2_RM_SEL_CS23__RM2__SHIFT 0x8
+#define MMEA7_ADDRDEC2_RM_SEL_CS23__CHAN_BIT__SHIFT 0xc
+#define MMEA7_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA7_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA7_ADDRDEC2_RM_SEL_CS23__RM0_MASK 0x0000000FL
+#define MMEA7_ADDRDEC2_RM_SEL_CS23__RM1_MASK 0x000000F0L
+#define MMEA7_ADDRDEC2_RM_SEL_CS23__RM2_MASK 0x00000F00L
+#define MMEA7_ADDRDEC2_RM_SEL_CS23__CHAN_BIT_MASK 0x0000F000L
+#define MMEA7_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA7_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA7_ADDRDEC2_RM_SEL_SECCS01
+#define MMEA7_ADDRDEC2_RM_SEL_SECCS01__RM0__SHIFT 0x0
+#define MMEA7_ADDRDEC2_RM_SEL_SECCS01__RM1__SHIFT 0x4
+#define MMEA7_ADDRDEC2_RM_SEL_SECCS01__RM2__SHIFT 0x8
+#define MMEA7_ADDRDEC2_RM_SEL_SECCS01__CHAN_BIT__SHIFT 0xc
+#define MMEA7_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA7_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA7_ADDRDEC2_RM_SEL_SECCS01__RM0_MASK 0x0000000FL
+#define MMEA7_ADDRDEC2_RM_SEL_SECCS01__RM1_MASK 0x000000F0L
+#define MMEA7_ADDRDEC2_RM_SEL_SECCS01__RM2_MASK 0x00000F00L
+#define MMEA7_ADDRDEC2_RM_SEL_SECCS01__CHAN_BIT_MASK 0x0000F000L
+#define MMEA7_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA7_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA7_ADDRDEC2_RM_SEL_SECCS23
+#define MMEA7_ADDRDEC2_RM_SEL_SECCS23__RM0__SHIFT 0x0
+#define MMEA7_ADDRDEC2_RM_SEL_SECCS23__RM1__SHIFT 0x4
+#define MMEA7_ADDRDEC2_RM_SEL_SECCS23__RM2__SHIFT 0x8
+#define MMEA7_ADDRDEC2_RM_SEL_SECCS23__CHAN_BIT__SHIFT 0xc
+#define MMEA7_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA7_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA7_ADDRDEC2_RM_SEL_SECCS23__RM0_MASK 0x0000000FL
+#define MMEA7_ADDRDEC2_RM_SEL_SECCS23__RM1_MASK 0x000000F0L
+#define MMEA7_ADDRDEC2_RM_SEL_SECCS23__RM2_MASK 0x00000F00L
+#define MMEA7_ADDRDEC2_RM_SEL_SECCS23__CHAN_BIT_MASK 0x0000F000L
+#define MMEA7_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA7_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA7_ADDRNORMDRAM_GLOBAL_CNTL
+#define MMEA7_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K__SHIFT 0x14
+#define MMEA7_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M__SHIFT 0x15
+#define MMEA7_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G__SHIFT 0x16
+#define MMEA7_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K_MASK 0x00100000L
+#define MMEA7_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M_MASK 0x00200000L
+#define MMEA7_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G_MASK 0x00400000L
+//MMEA7_ADDRNORMGMI_GLOBAL_CNTL
+#define MMEA7_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K__SHIFT 0x14
+#define MMEA7_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M__SHIFT 0x15
+#define MMEA7_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G__SHIFT 0x16
+#define MMEA7_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K_MASK 0x00100000L
+#define MMEA7_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M_MASK 0x00200000L
+#define MMEA7_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G_MASK 0x00400000L
+//MMEA7_IO_RD_CLI2GRP_MAP0
+#define MMEA7_IO_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA7_IO_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA7_IO_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA7_IO_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA7_IO_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA7_IO_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA7_IO_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA7_IO_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA7_IO_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA7_IO_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA7_IO_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA7_IO_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA7_IO_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA7_IO_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA7_IO_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA7_IO_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA7_IO_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA7_IO_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA7_IO_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA7_IO_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA7_IO_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA7_IO_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA7_IO_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA7_IO_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA7_IO_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA7_IO_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA7_IO_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA7_IO_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA7_IO_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA7_IO_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA7_IO_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA7_IO_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA7_IO_RD_CLI2GRP_MAP1
+#define MMEA7_IO_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA7_IO_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA7_IO_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA7_IO_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA7_IO_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA7_IO_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA7_IO_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA7_IO_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA7_IO_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA7_IO_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA7_IO_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA7_IO_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA7_IO_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA7_IO_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA7_IO_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA7_IO_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA7_IO_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA7_IO_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA7_IO_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA7_IO_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA7_IO_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA7_IO_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA7_IO_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA7_IO_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA7_IO_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA7_IO_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA7_IO_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA7_IO_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA7_IO_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA7_IO_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA7_IO_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA7_IO_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA7_IO_WR_CLI2GRP_MAP0
+#define MMEA7_IO_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA7_IO_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA7_IO_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA7_IO_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA7_IO_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA7_IO_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA7_IO_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA7_IO_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA7_IO_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA7_IO_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA7_IO_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA7_IO_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA7_IO_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA7_IO_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA7_IO_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA7_IO_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA7_IO_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA7_IO_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA7_IO_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA7_IO_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA7_IO_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA7_IO_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA7_IO_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA7_IO_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA7_IO_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA7_IO_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA7_IO_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA7_IO_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA7_IO_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA7_IO_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA7_IO_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA7_IO_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA7_IO_WR_CLI2GRP_MAP1
+#define MMEA7_IO_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA7_IO_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA7_IO_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA7_IO_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA7_IO_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA7_IO_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA7_IO_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA7_IO_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA7_IO_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA7_IO_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA7_IO_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA7_IO_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA7_IO_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA7_IO_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA7_IO_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA7_IO_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA7_IO_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA7_IO_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA7_IO_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA7_IO_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA7_IO_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA7_IO_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA7_IO_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA7_IO_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA7_IO_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA7_IO_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA7_IO_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA7_IO_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA7_IO_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA7_IO_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA7_IO_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA7_IO_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA7_IO_RD_COMBINE_FLUSH
+#define MMEA7_IO_RD_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0
+#define MMEA7_IO_RD_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4
+#define MMEA7_IO_RD_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8
+#define MMEA7_IO_RD_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc
+#define MMEA7_IO_RD_COMBINE_FLUSH__FORWARD_COMB_ONLY__SHIFT 0x10
+#define MMEA7_IO_RD_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL
+#define MMEA7_IO_RD_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L
+#define MMEA7_IO_RD_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L
+#define MMEA7_IO_RD_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L
+#define MMEA7_IO_RD_COMBINE_FLUSH__FORWARD_COMB_ONLY_MASK 0x00010000L
+//MMEA7_IO_WR_COMBINE_FLUSH
+#define MMEA7_IO_WR_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0
+#define MMEA7_IO_WR_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4
+#define MMEA7_IO_WR_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8
+#define MMEA7_IO_WR_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc
+#define MMEA7_IO_WR_COMBINE_FLUSH__FORWARD_COMB_ONLY__SHIFT 0x10
+#define MMEA7_IO_WR_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL
+#define MMEA7_IO_WR_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L
+#define MMEA7_IO_WR_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L
+#define MMEA7_IO_WR_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L
+#define MMEA7_IO_WR_COMBINE_FLUSH__FORWARD_COMB_ONLY_MASK 0x00010000L
+//MMEA7_IO_GROUP_BURST
+#define MMEA7_IO_GROUP_BURST__RD_LIMIT_LO__SHIFT 0x0
+#define MMEA7_IO_GROUP_BURST__RD_LIMIT_HI__SHIFT 0x8
+#define MMEA7_IO_GROUP_BURST__WR_LIMIT_LO__SHIFT 0x10
+#define MMEA7_IO_GROUP_BURST__WR_LIMIT_HI__SHIFT 0x18
+#define MMEA7_IO_GROUP_BURST__RD_LIMIT_LO_MASK 0x000000FFL
+#define MMEA7_IO_GROUP_BURST__RD_LIMIT_HI_MASK 0x0000FF00L
+#define MMEA7_IO_GROUP_BURST__WR_LIMIT_LO_MASK 0x00FF0000L
+#define MMEA7_IO_GROUP_BURST__WR_LIMIT_HI_MASK 0xFF000000L
+//MMEA7_IO_RD_PRI_AGE
+#define MMEA7_IO_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA7_IO_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA7_IO_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA7_IO_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA7_IO_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA7_IO_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA7_IO_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA7_IO_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA7_IO_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA7_IO_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA7_IO_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA7_IO_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA7_IO_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA7_IO_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA7_IO_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA7_IO_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA7_IO_WR_PRI_AGE
+#define MMEA7_IO_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA7_IO_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA7_IO_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA7_IO_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA7_IO_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA7_IO_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA7_IO_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA7_IO_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA7_IO_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA7_IO_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA7_IO_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA7_IO_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA7_IO_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA7_IO_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA7_IO_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA7_IO_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA7_IO_RD_PRI_QUEUING
+#define MMEA7_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA7_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA7_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA7_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA7_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA7_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA7_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA7_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA7_IO_WR_PRI_QUEUING
+#define MMEA7_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA7_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA7_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA7_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA7_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA7_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA7_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA7_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA7_IO_RD_PRI_FIXED
+#define MMEA7_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA7_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA7_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA7_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA7_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA7_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA7_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA7_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA7_IO_WR_PRI_FIXED
+#define MMEA7_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA7_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA7_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA7_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA7_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA7_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA7_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA7_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA7_IO_RD_PRI_URGENCY
+#define MMEA7_IO_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA7_IO_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA7_IO_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA7_IO_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA7_IO_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA7_IO_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA7_IO_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA7_IO_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA7_IO_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA7_IO_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA7_IO_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA7_IO_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA7_IO_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA7_IO_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA7_IO_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA7_IO_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA7_IO_WR_PRI_URGENCY
+#define MMEA7_IO_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA7_IO_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA7_IO_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA7_IO_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA7_IO_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA7_IO_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA7_IO_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA7_IO_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA7_IO_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA7_IO_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA7_IO_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA7_IO_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA7_IO_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA7_IO_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA7_IO_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA7_IO_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA7_IO_RD_PRI_URGENCY_MASKING
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L
+#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L
+//MMEA7_IO_WR_PRI_URGENCY_MASKING
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L
+#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L
+//MMEA7_IO_RD_PRI_QUANT_PRI1
+#define MMEA7_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA7_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA7_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA7_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA7_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA7_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA7_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA7_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA7_IO_RD_PRI_QUANT_PRI2
+#define MMEA7_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA7_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA7_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA7_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA7_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA7_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA7_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA7_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA7_IO_RD_PRI_QUANT_PRI3
+#define MMEA7_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA7_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA7_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA7_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA7_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA7_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA7_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA7_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA7_IO_WR_PRI_QUANT_PRI1
+#define MMEA7_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA7_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA7_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA7_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA7_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA7_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA7_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA7_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA7_IO_WR_PRI_QUANT_PRI2
+#define MMEA7_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA7_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA7_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA7_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA7_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA7_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA7_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA7_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA7_IO_WR_PRI_QUANT_PRI3
+#define MMEA7_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA7_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA7_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA7_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA7_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA7_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA7_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA7_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA7_SDP_ARB_DRAM
+#define MMEA7_SDP_ARB_DRAM__RDWR_BURST_LIMIT_CYCL__SHIFT 0x0
+#define MMEA7_SDP_ARB_DRAM__RDWR_BURST_LIMIT_DATA__SHIFT 0x8
+#define MMEA7_SDP_ARB_DRAM__EARLY_SW2RD_ON_PRI__SHIFT 0x10
+#define MMEA7_SDP_ARB_DRAM__EARLY_SW2WR_ON_PRI__SHIFT 0x11
+#define MMEA7_SDP_ARB_DRAM__EARLY_SW2RD_ON_RES__SHIFT 0x12
+#define MMEA7_SDP_ARB_DRAM__EARLY_SW2WR_ON_RES__SHIFT 0x13
+#define MMEA7_SDP_ARB_DRAM__EOB_ON_EXPIRE__SHIFT 0x14
+#define MMEA7_SDP_ARB_DRAM__DECOUPLE_RDWR_BNKSTATE__SHIFT 0x15
+#define MMEA7_SDP_ARB_DRAM__RDWR_BURST_LIMIT_CYCL_MASK 0x0000007FL
+#define MMEA7_SDP_ARB_DRAM__RDWR_BURST_LIMIT_DATA_MASK 0x00007F00L
+#define MMEA7_SDP_ARB_DRAM__EARLY_SW2RD_ON_PRI_MASK 0x00010000L
+#define MMEA7_SDP_ARB_DRAM__EARLY_SW2WR_ON_PRI_MASK 0x00020000L
+#define MMEA7_SDP_ARB_DRAM__EARLY_SW2RD_ON_RES_MASK 0x00040000L
+#define MMEA7_SDP_ARB_DRAM__EARLY_SW2WR_ON_RES_MASK 0x00080000L
+#define MMEA7_SDP_ARB_DRAM__EOB_ON_EXPIRE_MASK 0x00100000L
+#define MMEA7_SDP_ARB_DRAM__DECOUPLE_RDWR_BNKSTATE_MASK 0x00200000L
+//MMEA7_SDP_ARB_GMI
+#define MMEA7_SDP_ARB_GMI__RDWR_BURST_LIMIT_CYCL__SHIFT 0x0
+#define MMEA7_SDP_ARB_GMI__RDWR_BURST_LIMIT_DATA__SHIFT 0x8
+#define MMEA7_SDP_ARB_GMI__EARLY_SW2RD_ON_PRI__SHIFT 0x10
+#define MMEA7_SDP_ARB_GMI__EARLY_SW2WR_ON_PRI__SHIFT 0x11
+#define MMEA7_SDP_ARB_GMI__EARLY_SW2RD_ON_RES__SHIFT 0x12
+#define MMEA7_SDP_ARB_GMI__EARLY_SW2WR_ON_RES__SHIFT 0x13
+#define MMEA7_SDP_ARB_GMI__EOB_ON_EXPIRE__SHIFT 0x14
+#define MMEA7_SDP_ARB_GMI__DECOUPLE_RDWR_BNKSTATE__SHIFT 0x15
+#define MMEA7_SDP_ARB_GMI__ALLOW_CHAIN_BREAKING__SHIFT 0x16
+#define MMEA7_SDP_ARB_GMI__RDWR_BURST_LIMIT_CYCL_MASK 0x0000007FL
+#define MMEA7_SDP_ARB_GMI__RDWR_BURST_LIMIT_DATA_MASK 0x00007F00L
+#define MMEA7_SDP_ARB_GMI__EARLY_SW2RD_ON_PRI_MASK 0x00010000L
+#define MMEA7_SDP_ARB_GMI__EARLY_SW2WR_ON_PRI_MASK 0x00020000L
+#define MMEA7_SDP_ARB_GMI__EARLY_SW2RD_ON_RES_MASK 0x00040000L
+#define MMEA7_SDP_ARB_GMI__EARLY_SW2WR_ON_RES_MASK 0x00080000L
+#define MMEA7_SDP_ARB_GMI__EOB_ON_EXPIRE_MASK 0x00100000L
+#define MMEA7_SDP_ARB_GMI__DECOUPLE_RDWR_BNKSTATE_MASK 0x00200000L
+#define MMEA7_SDP_ARB_GMI__ALLOW_CHAIN_BREAKING_MASK 0x00400000L
+//MMEA7_SDP_ARB_FINAL
+#define MMEA7_SDP_ARB_FINAL__DRAM_BURST_LIMIT__SHIFT 0x0
+#define MMEA7_SDP_ARB_FINAL__GMI_BURST_LIMIT__SHIFT 0x5
+#define MMEA7_SDP_ARB_FINAL__IO_BURST_LIMIT__SHIFT 0xa
+#define MMEA7_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER__SHIFT 0xf
+#define MMEA7_SDP_ARB_FINAL__RDONLY_VC0__SHIFT 0x11
+#define MMEA7_SDP_ARB_FINAL__RDONLY_VC1__SHIFT 0x12
+#define MMEA7_SDP_ARB_FINAL__RDONLY_VC2__SHIFT 0x13
+#define MMEA7_SDP_ARB_FINAL__RDONLY_VC3__SHIFT 0x14
+#define MMEA7_SDP_ARB_FINAL__RDONLY_VC4__SHIFT 0x15
+#define MMEA7_SDP_ARB_FINAL__RDONLY_VC5__SHIFT 0x16
+#define MMEA7_SDP_ARB_FINAL__RDONLY_VC6__SHIFT 0x17
+#define MMEA7_SDP_ARB_FINAL__RDONLY_VC7__SHIFT 0x18
+#define MMEA7_SDP_ARB_FINAL__ERREVENT_ON_ERROR__SHIFT 0x19
+#define MMEA7_SDP_ARB_FINAL__HALTREQ_ON_ERROR__SHIFT 0x1a
+#define MMEA7_SDP_ARB_FINAL__GMI_BURST_STRETCH__SHIFT 0x1b
+#define MMEA7_SDP_ARB_FINAL__DRAM_BURST_LIMIT_MASK 0x0000001FL
+#define MMEA7_SDP_ARB_FINAL__GMI_BURST_LIMIT_MASK 0x000003E0L
+#define MMEA7_SDP_ARB_FINAL__IO_BURST_LIMIT_MASK 0x00007C00L
+#define MMEA7_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER_MASK 0x00018000L
+#define MMEA7_SDP_ARB_FINAL__RDONLY_VC0_MASK 0x00020000L
+#define MMEA7_SDP_ARB_FINAL__RDONLY_VC1_MASK 0x00040000L
+#define MMEA7_SDP_ARB_FINAL__RDONLY_VC2_MASK 0x00080000L
+#define MMEA7_SDP_ARB_FINAL__RDONLY_VC3_MASK 0x00100000L
+#define MMEA7_SDP_ARB_FINAL__RDONLY_VC4_MASK 0x00200000L
+#define MMEA7_SDP_ARB_FINAL__RDONLY_VC5_MASK 0x00400000L
+#define MMEA7_SDP_ARB_FINAL__RDONLY_VC6_MASK 0x00800000L
+#define MMEA7_SDP_ARB_FINAL__RDONLY_VC7_MASK 0x01000000L
+#define MMEA7_SDP_ARB_FINAL__ERREVENT_ON_ERROR_MASK 0x02000000L
+#define MMEA7_SDP_ARB_FINAL__HALTREQ_ON_ERROR_MASK 0x04000000L
+#define MMEA7_SDP_ARB_FINAL__GMI_BURST_STRETCH_MASK 0x08000000L
+//MMEA7_SDP_DRAM_PRIORITY
+#define MMEA7_SDP_DRAM_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0
+#define MMEA7_SDP_DRAM_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4
+#define MMEA7_SDP_DRAM_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8
+#define MMEA7_SDP_DRAM_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc
+#define MMEA7_SDP_DRAM_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10
+#define MMEA7_SDP_DRAM_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14
+#define MMEA7_SDP_DRAM_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18
+#define MMEA7_SDP_DRAM_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c
+#define MMEA7_SDP_DRAM_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL
+#define MMEA7_SDP_DRAM_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L
+#define MMEA7_SDP_DRAM_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L
+#define MMEA7_SDP_DRAM_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L
+#define MMEA7_SDP_DRAM_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L
+#define MMEA7_SDP_DRAM_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L
+#define MMEA7_SDP_DRAM_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L
+#define MMEA7_SDP_DRAM_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L
+//MMEA7_SDP_GMI_PRIORITY
+#define MMEA7_SDP_GMI_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0
+#define MMEA7_SDP_GMI_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4
+#define MMEA7_SDP_GMI_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8
+#define MMEA7_SDP_GMI_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc
+#define MMEA7_SDP_GMI_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10
+#define MMEA7_SDP_GMI_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14
+#define MMEA7_SDP_GMI_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18
+#define MMEA7_SDP_GMI_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c
+#define MMEA7_SDP_GMI_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL
+#define MMEA7_SDP_GMI_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L
+#define MMEA7_SDP_GMI_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L
+#define MMEA7_SDP_GMI_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L
+#define MMEA7_SDP_GMI_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L
+#define MMEA7_SDP_GMI_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L
+#define MMEA7_SDP_GMI_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L
+#define MMEA7_SDP_GMI_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L
+//MMEA7_SDP_IO_PRIORITY
+#define MMEA7_SDP_IO_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0
+#define MMEA7_SDP_IO_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4
+#define MMEA7_SDP_IO_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8
+#define MMEA7_SDP_IO_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc
+#define MMEA7_SDP_IO_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10
+#define MMEA7_SDP_IO_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14
+#define MMEA7_SDP_IO_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18
+#define MMEA7_SDP_IO_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c
+#define MMEA7_SDP_IO_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL
+#define MMEA7_SDP_IO_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L
+#define MMEA7_SDP_IO_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L
+#define MMEA7_SDP_IO_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L
+#define MMEA7_SDP_IO_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L
+#define MMEA7_SDP_IO_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L
+#define MMEA7_SDP_IO_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L
+#define MMEA7_SDP_IO_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L
+//MMEA7_SDP_CREDITS
+#define MMEA7_SDP_CREDITS__TAG_LIMIT__SHIFT 0x0
+#define MMEA7_SDP_CREDITS__WR_RESP_CREDITS__SHIFT 0x8
+#define MMEA7_SDP_CREDITS__RD_RESP_CREDITS__SHIFT 0x10
+#define MMEA7_SDP_CREDITS__TAG_LIMIT_MASK 0x000000FFL
+#define MMEA7_SDP_CREDITS__WR_RESP_CREDITS_MASK 0x00007F00L
+#define MMEA7_SDP_CREDITS__RD_RESP_CREDITS_MASK 0x007F0000L
+//MMEA7_SDP_TAG_RESERVE0
+#define MMEA7_SDP_TAG_RESERVE0__VC0__SHIFT 0x0
+#define MMEA7_SDP_TAG_RESERVE0__VC1__SHIFT 0x8
+#define MMEA7_SDP_TAG_RESERVE0__VC2__SHIFT 0x10
+#define MMEA7_SDP_TAG_RESERVE0__VC3__SHIFT 0x18
+#define MMEA7_SDP_TAG_RESERVE0__VC0_MASK 0x000000FFL
+#define MMEA7_SDP_TAG_RESERVE0__VC1_MASK 0x0000FF00L
+#define MMEA7_SDP_TAG_RESERVE0__VC2_MASK 0x00FF0000L
+#define MMEA7_SDP_TAG_RESERVE0__VC3_MASK 0xFF000000L
+//MMEA7_SDP_TAG_RESERVE1
+#define MMEA7_SDP_TAG_RESERVE1__VC4__SHIFT 0x0
+#define MMEA7_SDP_TAG_RESERVE1__VC5__SHIFT 0x8
+#define MMEA7_SDP_TAG_RESERVE1__VC6__SHIFT 0x10
+#define MMEA7_SDP_TAG_RESERVE1__VC7__SHIFT 0x18
+#define MMEA7_SDP_TAG_RESERVE1__VC4_MASK 0x000000FFL
+#define MMEA7_SDP_TAG_RESERVE1__VC5_MASK 0x0000FF00L
+#define MMEA7_SDP_TAG_RESERVE1__VC6_MASK 0x00FF0000L
+#define MMEA7_SDP_TAG_RESERVE1__VC7_MASK 0xFF000000L
+//MMEA7_SDP_VCC_RESERVE0
+#define MMEA7_SDP_VCC_RESERVE0__VC0_CREDITS__SHIFT 0x0
+#define MMEA7_SDP_VCC_RESERVE0__VC1_CREDITS__SHIFT 0x6
+#define MMEA7_SDP_VCC_RESERVE0__VC2_CREDITS__SHIFT 0xc
+#define MMEA7_SDP_VCC_RESERVE0__VC3_CREDITS__SHIFT 0x12
+#define MMEA7_SDP_VCC_RESERVE0__VC4_CREDITS__SHIFT 0x18
+#define MMEA7_SDP_VCC_RESERVE0__VC0_CREDITS_MASK 0x0000003FL
+#define MMEA7_SDP_VCC_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L
+#define MMEA7_SDP_VCC_RESERVE0__VC2_CREDITS_MASK 0x0003F000L
+#define MMEA7_SDP_VCC_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L
+#define MMEA7_SDP_VCC_RESERVE0__VC4_CREDITS_MASK 0x3F000000L
+//MMEA7_SDP_VCC_RESERVE1
+#define MMEA7_SDP_VCC_RESERVE1__VC5_CREDITS__SHIFT 0x0
+#define MMEA7_SDP_VCC_RESERVE1__VC6_CREDITS__SHIFT 0x6
+#define MMEA7_SDP_VCC_RESERVE1__VC7_CREDITS__SHIFT 0xc
+#define MMEA7_SDP_VCC_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f
+#define MMEA7_SDP_VCC_RESERVE1__VC5_CREDITS_MASK 0x0000003FL
+#define MMEA7_SDP_VCC_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L
+#define MMEA7_SDP_VCC_RESERVE1__VC7_CREDITS_MASK 0x0003F000L
+#define MMEA7_SDP_VCC_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L
+//MMEA7_SDP_VCD_RESERVE0
+#define MMEA7_SDP_VCD_RESERVE0__VC0_CREDITS__SHIFT 0x0
+#define MMEA7_SDP_VCD_RESERVE0__VC1_CREDITS__SHIFT 0x6
+#define MMEA7_SDP_VCD_RESERVE0__VC2_CREDITS__SHIFT 0xc
+#define MMEA7_SDP_VCD_RESERVE0__VC3_CREDITS__SHIFT 0x12
+#define MMEA7_SDP_VCD_RESERVE0__VC4_CREDITS__SHIFT 0x18
+#define MMEA7_SDP_VCD_RESERVE0__VC0_CREDITS_MASK 0x0000003FL
+#define MMEA7_SDP_VCD_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L
+#define MMEA7_SDP_VCD_RESERVE0__VC2_CREDITS_MASK 0x0003F000L
+#define MMEA7_SDP_VCD_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L
+#define MMEA7_SDP_VCD_RESERVE0__VC4_CREDITS_MASK 0x3F000000L
+//MMEA7_SDP_VCD_RESERVE1
+#define MMEA7_SDP_VCD_RESERVE1__VC5_CREDITS__SHIFT 0x0
+#define MMEA7_SDP_VCD_RESERVE1__VC6_CREDITS__SHIFT 0x6
+#define MMEA7_SDP_VCD_RESERVE1__VC7_CREDITS__SHIFT 0xc
+#define MMEA7_SDP_VCD_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f
+#define MMEA7_SDP_VCD_RESERVE1__VC5_CREDITS_MASK 0x0000003FL
+#define MMEA7_SDP_VCD_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L
+#define MMEA7_SDP_VCD_RESERVE1__VC7_CREDITS_MASK 0x0003F000L
+#define MMEA7_SDP_VCD_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L
+//MMEA7_SDP_REQ_CNTL
+#define MMEA7_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ__SHIFT 0x0
+#define MMEA7_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE__SHIFT 0x1
+#define MMEA7_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC__SHIFT 0x2
+#define MMEA7_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM__SHIFT 0x3
+#define MMEA7_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_GMI__SHIFT 0x4
+#define MMEA7_SDP_REQ_CNTL__INNER_DOMAIN_MODE__SHIFT 0x5
+#define MMEA7_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ_MASK 0x00000001L
+#define MMEA7_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE_MASK 0x00000002L
+#define MMEA7_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC_MASK 0x00000004L
+#define MMEA7_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM_MASK 0x00000008L
+#define MMEA7_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_GMI_MASK 0x00000010L
+#define MMEA7_SDP_REQ_CNTL__INNER_DOMAIN_MODE_MASK 0x00000020L
+//MMEA7_MISC
+#define MMEA7_MISC__RELATIVE_PRI_IN_DRAM_RD_ARB__SHIFT 0x0
+#define MMEA7_MISC__RELATIVE_PRI_IN_DRAM_WR_ARB__SHIFT 0x1
+#define MMEA7_MISC__RELATIVE_PRI_IN_GMI_RD_ARB__SHIFT 0x2
+#define MMEA7_MISC__RELATIVE_PRI_IN_GMI_WR_ARB__SHIFT 0x3
+#define MMEA7_MISC__RELATIVE_PRI_IN_IO_RD_ARB__SHIFT 0x4
+#define MMEA7_MISC__RELATIVE_PRI_IN_IO_WR_ARB__SHIFT 0x5
+#define MMEA7_MISC__EARLYWRRET_ENABLE_VC0__SHIFT 0x6
+#define MMEA7_MISC__EARLYWRRET_ENABLE_VC1__SHIFT 0x7
+#define MMEA7_MISC__EARLYWRRET_ENABLE_VC2__SHIFT 0x8
+#define MMEA7_MISC__EARLYWRRET_ENABLE_VC3__SHIFT 0x9
+#define MMEA7_MISC__EARLYWRRET_ENABLE_VC4__SHIFT 0xa
+#define MMEA7_MISC__EARLYWRRET_ENABLE_VC5__SHIFT 0xb
+#define MMEA7_MISC__EARLYWRRET_ENABLE_VC6__SHIFT 0xc
+#define MMEA7_MISC__EARLYWRRET_ENABLE_VC7__SHIFT 0xd
+#define MMEA7_MISC__EARLY_SDP_ORIGDATA__SHIFT 0xe
+#define MMEA7_MISC__LINKMGR_DYNAMIC_MODE__SHIFT 0xf
+#define MMEA7_MISC__LINKMGR_HALT_THRESHOLD__SHIFT 0x11
+#define MMEA7_MISC__LINKMGR_RECONNECT_DELAY__SHIFT 0x13
+#define MMEA7_MISC__LINKMGR_IDLE_THRESHOLD__SHIFT 0x15
+#define MMEA7_MISC__FAVOUR_MIDCHAIN_CS_IN_DRAM_ARB__SHIFT 0x1a
+#define MMEA7_MISC__FAVOUR_MIDCHAIN_CS_IN_GMI_ARB__SHIFT 0x1b
+#define MMEA7_MISC__FAVOUR_LAST_CS_IN_DRAM_ARB__SHIFT 0x1c
+#define MMEA7_MISC__FAVOUR_LAST_CS_IN_GMI_ARB__SHIFT 0x1d
+#define MMEA7_MISC__SWITCH_CS_ON_W2R_IN_DRAM_ARB__SHIFT 0x1e
+#define MMEA7_MISC__SWITCH_CS_ON_W2R_IN_GMI_ARB__SHIFT 0x1f
+#define MMEA7_MISC__RELATIVE_PRI_IN_DRAM_RD_ARB_MASK 0x00000001L
+#define MMEA7_MISC__RELATIVE_PRI_IN_DRAM_WR_ARB_MASK 0x00000002L
+#define MMEA7_MISC__RELATIVE_PRI_IN_GMI_RD_ARB_MASK 0x00000004L
+#define MMEA7_MISC__RELATIVE_PRI_IN_GMI_WR_ARB_MASK 0x00000008L
+#define MMEA7_MISC__RELATIVE_PRI_IN_IO_RD_ARB_MASK 0x00000010L
+#define MMEA7_MISC__RELATIVE_PRI_IN_IO_WR_ARB_MASK 0x00000020L
+#define MMEA7_MISC__EARLYWRRET_ENABLE_VC0_MASK 0x00000040L
+#define MMEA7_MISC__EARLYWRRET_ENABLE_VC1_MASK 0x00000080L
+#define MMEA7_MISC__EARLYWRRET_ENABLE_VC2_MASK 0x00000100L
+#define MMEA7_MISC__EARLYWRRET_ENABLE_VC3_MASK 0x00000200L
+#define MMEA7_MISC__EARLYWRRET_ENABLE_VC4_MASK 0x00000400L
+#define MMEA7_MISC__EARLYWRRET_ENABLE_VC5_MASK 0x00000800L
+#define MMEA7_MISC__EARLYWRRET_ENABLE_VC6_MASK 0x00001000L
+#define MMEA7_MISC__EARLYWRRET_ENABLE_VC7_MASK 0x00002000L
+#define MMEA7_MISC__EARLY_SDP_ORIGDATA_MASK 0x00004000L
+#define MMEA7_MISC__LINKMGR_DYNAMIC_MODE_MASK 0x00018000L
+#define MMEA7_MISC__LINKMGR_HALT_THRESHOLD_MASK 0x00060000L
+#define MMEA7_MISC__LINKMGR_RECONNECT_DELAY_MASK 0x00180000L
+#define MMEA7_MISC__LINKMGR_IDLE_THRESHOLD_MASK 0x03E00000L
+#define MMEA7_MISC__FAVOUR_MIDCHAIN_CS_IN_DRAM_ARB_MASK 0x04000000L
+#define MMEA7_MISC__FAVOUR_MIDCHAIN_CS_IN_GMI_ARB_MASK 0x08000000L
+#define MMEA7_MISC__FAVOUR_LAST_CS_IN_DRAM_ARB_MASK 0x10000000L
+#define MMEA7_MISC__FAVOUR_LAST_CS_IN_GMI_ARB_MASK 0x20000000L
+#define MMEA7_MISC__SWITCH_CS_ON_W2R_IN_DRAM_ARB_MASK 0x40000000L
+#define MMEA7_MISC__SWITCH_CS_ON_W2R_IN_GMI_ARB_MASK 0x80000000L
+//MMEA7_LATENCY_SAMPLING
+#define MMEA7_LATENCY_SAMPLING__SAMPLER0_DRAM__SHIFT 0x0
+#define MMEA7_LATENCY_SAMPLING__SAMPLER1_DRAM__SHIFT 0x1
+#define MMEA7_LATENCY_SAMPLING__SAMPLER0_GMI__SHIFT 0x2
+#define MMEA7_LATENCY_SAMPLING__SAMPLER1_GMI__SHIFT 0x3
+#define MMEA7_LATENCY_SAMPLING__SAMPLER0_IO__SHIFT 0x4
+#define MMEA7_LATENCY_SAMPLING__SAMPLER1_IO__SHIFT 0x5
+#define MMEA7_LATENCY_SAMPLING__SAMPLER0_READ__SHIFT 0x6
+#define MMEA7_LATENCY_SAMPLING__SAMPLER1_READ__SHIFT 0x7
+#define MMEA7_LATENCY_SAMPLING__SAMPLER0_WRITE__SHIFT 0x8
+#define MMEA7_LATENCY_SAMPLING__SAMPLER1_WRITE__SHIFT 0x9
+#define MMEA7_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET__SHIFT 0xa
+#define MMEA7_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET__SHIFT 0xb
+#define MMEA7_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET__SHIFT 0xc
+#define MMEA7_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET__SHIFT 0xd
+#define MMEA7_LATENCY_SAMPLING__SAMPLER0_VC__SHIFT 0xe
+#define MMEA7_LATENCY_SAMPLING__SAMPLER1_VC__SHIFT 0x16
+#define MMEA7_LATENCY_SAMPLING__SAMPLER0_DRAM_MASK 0x00000001L
+#define MMEA7_LATENCY_SAMPLING__SAMPLER1_DRAM_MASK 0x00000002L
+#define MMEA7_LATENCY_SAMPLING__SAMPLER0_GMI_MASK 0x00000004L
+#define MMEA7_LATENCY_SAMPLING__SAMPLER1_GMI_MASK 0x00000008L
+#define MMEA7_LATENCY_SAMPLING__SAMPLER0_IO_MASK 0x00000010L
+#define MMEA7_LATENCY_SAMPLING__SAMPLER1_IO_MASK 0x00000020L
+#define MMEA7_LATENCY_SAMPLING__SAMPLER0_READ_MASK 0x00000040L
+#define MMEA7_LATENCY_SAMPLING__SAMPLER1_READ_MASK 0x00000080L
+#define MMEA7_LATENCY_SAMPLING__SAMPLER0_WRITE_MASK 0x00000100L
+#define MMEA7_LATENCY_SAMPLING__SAMPLER1_WRITE_MASK 0x00000200L
+#define MMEA7_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET_MASK 0x00000400L
+#define MMEA7_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET_MASK 0x00000800L
+#define MMEA7_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET_MASK 0x00001000L
+#define MMEA7_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET_MASK 0x00002000L
+#define MMEA7_LATENCY_SAMPLING__SAMPLER0_VC_MASK 0x003FC000L
+#define MMEA7_LATENCY_SAMPLING__SAMPLER1_VC_MASK 0x3FC00000L
+//MMEA7_PERFCOUNTER_LO
+#define MMEA7_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define MMEA7_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//MMEA7_PERFCOUNTER_HI
+#define MMEA7_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define MMEA7_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define MMEA7_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define MMEA7_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+//MMEA7_PERFCOUNTER0_CFG
+#define MMEA7_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define MMEA7_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define MMEA7_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define MMEA7_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define MMEA7_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define MMEA7_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define MMEA7_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MMEA7_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define MMEA7_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define MMEA7_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//MMEA7_PERFCOUNTER1_CFG
+#define MMEA7_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define MMEA7_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define MMEA7_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define MMEA7_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define MMEA7_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define MMEA7_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define MMEA7_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MMEA7_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define MMEA7_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define MMEA7_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//MMEA7_PERFCOUNTER_RSLT_CNTL
+#define MMEA7_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define MMEA7_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define MMEA7_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define MMEA7_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define MMEA7_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define MMEA7_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define MMEA7_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define MMEA7_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define MMEA7_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define MMEA7_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define MMEA7_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define MMEA7_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+//MMEA7_EDC_CNT
+#define MMEA7_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT__SHIFT 0x0
+#define MMEA7_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT__SHIFT 0x2
+#define MMEA7_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT__SHIFT 0x4
+#define MMEA7_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT__SHIFT 0x6
+#define MMEA7_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT__SHIFT 0x8
+#define MMEA7_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT__SHIFT 0xa
+#define MMEA7_EDC_CNT__RRET_TAGMEM_SEC_COUNT__SHIFT 0xc
+#define MMEA7_EDC_CNT__RRET_TAGMEM_DED_COUNT__SHIFT 0xe
+#define MMEA7_EDC_CNT__WRET_TAGMEM_SEC_COUNT__SHIFT 0x10
+#define MMEA7_EDC_CNT__WRET_TAGMEM_DED_COUNT__SHIFT 0x12
+#define MMEA7_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT__SHIFT 0x14
+#define MMEA7_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT__SHIFT 0x16
+#define MMEA7_EDC_CNT__IORD_CMDMEM_SED_COUNT__SHIFT 0x18
+#define MMEA7_EDC_CNT__IOWR_CMDMEM_SED_COUNT__SHIFT 0x1a
+#define MMEA7_EDC_CNT__IOWR_DATAMEM_SED_COUNT__SHIFT 0x1c
+#define MMEA7_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT_MASK 0x00000003L
+#define MMEA7_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT_MASK 0x0000000CL
+#define MMEA7_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT_MASK 0x00000030L
+#define MMEA7_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
+#define MMEA7_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT_MASK 0x00000300L
+#define MMEA7_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT_MASK 0x00000C00L
+#define MMEA7_EDC_CNT__RRET_TAGMEM_SEC_COUNT_MASK 0x00003000L
+#define MMEA7_EDC_CNT__RRET_TAGMEM_DED_COUNT_MASK 0x0000C000L
+#define MMEA7_EDC_CNT__WRET_TAGMEM_SEC_COUNT_MASK 0x00030000L
+#define MMEA7_EDC_CNT__WRET_TAGMEM_DED_COUNT_MASK 0x000C0000L
+#define MMEA7_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT_MASK 0x00300000L
+#define MMEA7_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT_MASK 0x00C00000L
+#define MMEA7_EDC_CNT__IORD_CMDMEM_SED_COUNT_MASK 0x03000000L
+#define MMEA7_EDC_CNT__IOWR_CMDMEM_SED_COUNT_MASK 0x0C000000L
+#define MMEA7_EDC_CNT__IOWR_DATAMEM_SED_COUNT_MASK 0x30000000L
+//MMEA7_EDC_CNT2
+#define MMEA7_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT__SHIFT 0x0
+#define MMEA7_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT__SHIFT 0x2
+#define MMEA7_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT__SHIFT 0x4
+#define MMEA7_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT__SHIFT 0x6
+#define MMEA7_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT__SHIFT 0x8
+#define MMEA7_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa
+#define MMEA7_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc
+#define MMEA7_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe
+#define MMEA7_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10
+#define MMEA7_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12
+#define MMEA7_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14
+#define MMEA7_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16
+#define MMEA7_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT 0x18
+#define MMEA7_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT 0x1a
+#define MMEA7_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT 0x1c
+#define MMEA7_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT 0x1e
+#define MMEA7_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L
+#define MMEA7_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL
+#define MMEA7_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L
+#define MMEA7_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
+#define MMEA7_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT_MASK 0x00000300L
+#define MMEA7_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L
+#define MMEA7_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L
+#define MMEA7_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L
+#define MMEA7_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L
+#define MMEA7_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L
+#define MMEA7_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L
+#define MMEA7_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L
+#define MMEA7_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK 0x03000000L
+#define MMEA7_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK 0x0C000000L
+#define MMEA7_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK 0x30000000L
+#define MMEA7_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK 0xC0000000L
+//MMEA7_DSM_CNTL
+#define MMEA7_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define MMEA7_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define MMEA7_DSM_CNTL__DRAMWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x3
+#define MMEA7_DSM_CNTL__DRAMWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define MMEA7_DSM_CNTL__DRAMWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0x6
+#define MMEA7_DSM_CNTL__DRAMWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define MMEA7_DSM_CNTL__RRET_TAGMEM_DSM_IRRITATOR_DATA__SHIFT 0x9
+#define MMEA7_DSM_CNTL__RRET_TAGMEM_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define MMEA7_DSM_CNTL__WRET_TAGMEM_DSM_IRRITATOR_DATA__SHIFT 0xc
+#define MMEA7_DSM_CNTL__WRET_TAGMEM_ENABLE_SINGLE_WRITE__SHIFT 0xe
+#define MMEA7_DSM_CNTL__GMIRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0xf
+#define MMEA7_DSM_CNTL__GMIRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x11
+#define MMEA7_DSM_CNTL__GMIWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x12
+#define MMEA7_DSM_CNTL__GMIWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x14
+#define MMEA7_DSM_CNTL__GMIWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0x15
+#define MMEA7_DSM_CNTL__GMIWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0x17
+#define MMEA7_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define MMEA7_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define MMEA7_DSM_CNTL__DRAMWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000018L
+#define MMEA7_DSM_CNTL__DRAMWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define MMEA7_DSM_CNTL__DRAMWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define MMEA7_DSM_CNTL__DRAMWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define MMEA7_DSM_CNTL__RRET_TAGMEM_DSM_IRRITATOR_DATA_MASK 0x00000600L
+#define MMEA7_DSM_CNTL__RRET_TAGMEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+#define MMEA7_DSM_CNTL__WRET_TAGMEM_DSM_IRRITATOR_DATA_MASK 0x00003000L
+#define MMEA7_DSM_CNTL__WRET_TAGMEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
+#define MMEA7_DSM_CNTL__GMIRD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00018000L
+#define MMEA7_DSM_CNTL__GMIRD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L
+#define MMEA7_DSM_CNTL__GMIWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L
+#define MMEA7_DSM_CNTL__GMIWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L
+#define MMEA7_DSM_CNTL__GMIWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x00600000L
+#define MMEA7_DSM_CNTL__GMIWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00800000L
+//MMEA7_DSM_CNTLA
+#define MMEA7_DSM_CNTLA__DRAMRD_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define MMEA7_DSM_CNTLA__DRAMRD_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define MMEA7_DSM_CNTLA__DRAMWR_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x3
+#define MMEA7_DSM_CNTLA__DRAMWR_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define MMEA7_DSM_CNTLA__IORD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x6
+#define MMEA7_DSM_CNTLA__IORD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define MMEA7_DSM_CNTLA__IOWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x9
+#define MMEA7_DSM_CNTLA__IOWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define MMEA7_DSM_CNTLA__IOWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0xc
+#define MMEA7_DSM_CNTLA__IOWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0xe
+#define MMEA7_DSM_CNTLA__GMIRD_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0xf
+#define MMEA7_DSM_CNTLA__GMIRD_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x11
+#define MMEA7_DSM_CNTLA__GMIWR_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x12
+#define MMEA7_DSM_CNTLA__GMIWR_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x14
+#define MMEA7_DSM_CNTLA__DRAMRD_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define MMEA7_DSM_CNTLA__DRAMRD_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define MMEA7_DSM_CNTLA__DRAMWR_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00000018L
+#define MMEA7_DSM_CNTLA__DRAMWR_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define MMEA7_DSM_CNTLA__IORD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define MMEA7_DSM_CNTLA__IORD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define MMEA7_DSM_CNTLA__IOWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000600L
+#define MMEA7_DSM_CNTLA__IOWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+#define MMEA7_DSM_CNTLA__IOWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x00003000L
+#define MMEA7_DSM_CNTLA__IOWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
+#define MMEA7_DSM_CNTLA__GMIRD_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00018000L
+#define MMEA7_DSM_CNTLA__GMIRD_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L
+#define MMEA7_DSM_CNTLA__GMIWR_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L
+#define MMEA7_DSM_CNTLA__GMIWR_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L
+//MMEA7_DSM_CNTL2
+#define MMEA7_DSM_CNTL2__DRAMRD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define MMEA7_DSM_CNTL2__DRAMRD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define MMEA7_DSM_CNTL2__DRAMWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define MMEA7_DSM_CNTL2__DRAMWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x5
+#define MMEA7_DSM_CNTL2__DRAMWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define MMEA7_DSM_CNTL2__DRAMWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0x8
+#define MMEA7_DSM_CNTL2__RRET_TAGMEM_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define MMEA7_DSM_CNTL2__RRET_TAGMEM_SELECT_INJECT_DELAY__SHIFT 0xb
+#define MMEA7_DSM_CNTL2__WRET_TAGMEM_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define MMEA7_DSM_CNTL2__WRET_TAGMEM_SELECT_INJECT_DELAY__SHIFT 0xe
+#define MMEA7_DSM_CNTL2__GMIRD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0xf
+#define MMEA7_DSM_CNTL2__GMIRD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x11
+#define MMEA7_DSM_CNTL2__GMIWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x12
+#define MMEA7_DSM_CNTL2__GMIWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x14
+#define MMEA7_DSM_CNTL2__GMIWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0x15
+#define MMEA7_DSM_CNTL2__GMIWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0x17
+#define MMEA7_DSM_CNTL2__INJECT_DELAY__SHIFT 0x1a
+#define MMEA7_DSM_CNTL2__DRAMRD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define MMEA7_DSM_CNTL2__DRAMRD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define MMEA7_DSM_CNTL2__DRAMWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define MMEA7_DSM_CNTL2__DRAMWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define MMEA7_DSM_CNTL2__DRAMWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define MMEA7_DSM_CNTL2__DRAMWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define MMEA7_DSM_CNTL2__RRET_TAGMEM_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define MMEA7_DSM_CNTL2__RRET_TAGMEM_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define MMEA7_DSM_CNTL2__WRET_TAGMEM_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define MMEA7_DSM_CNTL2__WRET_TAGMEM_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define MMEA7_DSM_CNTL2__GMIRD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00018000L
+#define MMEA7_DSM_CNTL2__GMIRD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00020000L
+#define MMEA7_DSM_CNTL2__GMIWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L
+#define MMEA7_DSM_CNTL2__GMIWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00100000L
+#define MMEA7_DSM_CNTL2__GMIWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x00600000L
+#define MMEA7_DSM_CNTL2__GMIWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00800000L
+#define MMEA7_DSM_CNTL2__INJECT_DELAY_MASK 0xFC000000L
+//MMEA7_DSM_CNTL2A
+#define MMEA7_DSM_CNTL2A__DRAMRD_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define MMEA7_DSM_CNTL2A__DRAMRD_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define MMEA7_DSM_CNTL2A__DRAMWR_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define MMEA7_DSM_CNTL2A__DRAMWR_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x5
+#define MMEA7_DSM_CNTL2A__IORD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define MMEA7_DSM_CNTL2A__IORD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x8
+#define MMEA7_DSM_CNTL2A__IOWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define MMEA7_DSM_CNTL2A__IOWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0xb
+#define MMEA7_DSM_CNTL2A__IOWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define MMEA7_DSM_CNTL2A__IOWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0xe
+#define MMEA7_DSM_CNTL2A__GMIRD_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0xf
+#define MMEA7_DSM_CNTL2A__GMIRD_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x11
+#define MMEA7_DSM_CNTL2A__GMIWR_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x12
+#define MMEA7_DSM_CNTL2A__GMIWR_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x14
+#define MMEA7_DSM_CNTL2A__DRAMRD_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define MMEA7_DSM_CNTL2A__DRAMRD_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define MMEA7_DSM_CNTL2A__DRAMWR_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define MMEA7_DSM_CNTL2A__DRAMWR_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define MMEA7_DSM_CNTL2A__IORD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define MMEA7_DSM_CNTL2A__IORD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define MMEA7_DSM_CNTL2A__IOWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define MMEA7_DSM_CNTL2A__IOWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define MMEA7_DSM_CNTL2A__IOWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define MMEA7_DSM_CNTL2A__IOWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define MMEA7_DSM_CNTL2A__GMIRD_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00018000L
+#define MMEA7_DSM_CNTL2A__GMIRD_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00020000L
+#define MMEA7_DSM_CNTL2A__GMIWR_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L
+#define MMEA7_DSM_CNTL2A__GMIWR_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00100000L
+//MMEA7_CGTT_CLK_CTRL
+#define MMEA7_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define MMEA7_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define MMEA7_CGTT_CLK_CTRL__SPARE0__SHIFT 0xc
+#define MMEA7_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_WRITE__SHIFT 0x14
+#define MMEA7_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_READ__SHIFT 0x15
+#define MMEA7_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_RETURN__SHIFT 0x16
+#define MMEA7_CGTT_CLK_CTRL__SPARE1__SHIFT 0x17
+#define MMEA7_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define MMEA7_CGTT_CLK_CTRL__SOFT_OVERRIDE_WRITE__SHIFT 0x1c
+#define MMEA7_CGTT_CLK_CTRL__SOFT_OVERRIDE_READ__SHIFT 0x1d
+#define MMEA7_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN__SHIFT 0x1e
+#define MMEA7_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER__SHIFT 0x1f
+#define MMEA7_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define MMEA7_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define MMEA7_CGTT_CLK_CTRL__SPARE0_MASK 0x000FF000L
+#define MMEA7_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_WRITE_MASK 0x00100000L
+#define MMEA7_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_READ_MASK 0x00200000L
+#define MMEA7_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_RETURN_MASK 0x00400000L
+#define MMEA7_CGTT_CLK_CTRL__SPARE1_MASK 0x07800000L
+#define MMEA7_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define MMEA7_CGTT_CLK_CTRL__SOFT_OVERRIDE_WRITE_MASK 0x10000000L
+#define MMEA7_CGTT_CLK_CTRL__SOFT_OVERRIDE_READ_MASK 0x20000000L
+#define MMEA7_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN_MASK 0x40000000L
+#define MMEA7_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER_MASK 0x80000000L
+//MMEA7_EDC_MODE
+#define MMEA7_EDC_MODE__COUNT_FED_OUT__SHIFT 0x10
+#define MMEA7_EDC_MODE__GATE_FUE__SHIFT 0x11
+#define MMEA7_EDC_MODE__DED_MODE__SHIFT 0x14
+#define MMEA7_EDC_MODE__PROP_FED__SHIFT 0x1d
+#define MMEA7_EDC_MODE__BYPASS__SHIFT 0x1f
+#define MMEA7_EDC_MODE__COUNT_FED_OUT_MASK 0x00010000L
+#define MMEA7_EDC_MODE__GATE_FUE_MASK 0x00020000L
+#define MMEA7_EDC_MODE__DED_MODE_MASK 0x00300000L
+#define MMEA7_EDC_MODE__PROP_FED_MASK 0x20000000L
+#define MMEA7_EDC_MODE__BYPASS_MASK 0x80000000L
+//MMEA7_ERR_STATUS
+#define MMEA7_ERR_STATUS__SDP_RDRSP_STATUS__SHIFT 0x0
+#define MMEA7_ERR_STATUS__SDP_WRRSP_STATUS__SHIFT 0x4
+#define MMEA7_ERR_STATUS__SDP_RDRSP_DATASTATUS__SHIFT 0x8
+#define MMEA7_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR__SHIFT 0xa
+#define MMEA7_ERR_STATUS__CLEAR_ERROR_STATUS__SHIFT 0xb
+#define MMEA7_ERR_STATUS__BUSY_ON_ERROR__SHIFT 0xc
+#define MMEA7_ERR_STATUS__FUE_FLAG__SHIFT 0xd
+#define MMEA7_ERR_STATUS__SDP_RDRSP_STATUS_MASK 0x0000000FL
+#define MMEA7_ERR_STATUS__SDP_WRRSP_STATUS_MASK 0x000000F0L
+#define MMEA7_ERR_STATUS__SDP_RDRSP_DATASTATUS_MASK 0x00000300L
+#define MMEA7_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR_MASK 0x00000400L
+#define MMEA7_ERR_STATUS__CLEAR_ERROR_STATUS_MASK 0x00000800L
+#define MMEA7_ERR_STATUS__BUSY_ON_ERROR_MASK 0x00001000L
+#define MMEA7_ERR_STATUS__FUE_FLAG_MASK 0x00002000L
+//MMEA7_MISC2
+#define MMEA7_MISC2__CSGROUP_SWAP_IN_DRAM_ARB__SHIFT 0x0
+#define MMEA7_MISC2__CSGROUP_SWAP_IN_GMI_ARB__SHIFT 0x1
+#define MMEA7_MISC2__CSGRP_BURST_LIMIT_DATA_DRAM__SHIFT 0x2
+#define MMEA7_MISC2__CSGRP_BURST_LIMIT_DATA_GMI__SHIFT 0x7
+#define MMEA7_MISC2__IO_RDWR_PRIORITY_ENABLE__SHIFT 0xc
+#define MMEA7_MISC2__RRET_SWAP_MODE__SHIFT 0xd
+#define MMEA7_MISC2__CSGROUP_SWAP_IN_DRAM_ARB_MASK 0x00000001L
+#define MMEA7_MISC2__CSGROUP_SWAP_IN_GMI_ARB_MASK 0x00000002L
+#define MMEA7_MISC2__CSGRP_BURST_LIMIT_DATA_DRAM_MASK 0x0000007CL
+#define MMEA7_MISC2__CSGRP_BURST_LIMIT_DATA_GMI_MASK 0x00000F80L
+#define MMEA7_MISC2__IO_RDWR_PRIORITY_ENABLE_MASK 0x00001000L
+#define MMEA7_MISC2__RRET_SWAP_MODE_MASK 0x00002000L
+//MMEA7_ADDRDEC_SELECT
+#define MMEA7_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_START__SHIFT 0x0
+#define MMEA7_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_END__SHIFT 0x5
+#define MMEA7_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_START__SHIFT 0xa
+#define MMEA7_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_END__SHIFT 0xf
+#define MMEA7_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_START_MASK 0x0000001FL
+#define MMEA7_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_END_MASK 0x000003E0L
+#define MMEA7_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_START_MASK 0x00007C00L
+#define MMEA7_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_END_MASK 0x000F8000L
+//MMEA7_EDC_CNT3
+#define MMEA7_EDC_CNT3__DRAMRD_PAGEMEM_DED_COUNT__SHIFT 0x0
+#define MMEA7_EDC_CNT3__DRAMWR_PAGEMEM_DED_COUNT__SHIFT 0x2
+#define MMEA7_EDC_CNT3__IORD_CMDMEM_DED_COUNT__SHIFT 0x4
+#define MMEA7_EDC_CNT3__IOWR_CMDMEM_DED_COUNT__SHIFT 0x6
+#define MMEA7_EDC_CNT3__IOWR_DATAMEM_DED_COUNT__SHIFT 0x8
+#define MMEA7_EDC_CNT3__GMIRD_PAGEMEM_DED_COUNT__SHIFT 0xa
+#define MMEA7_EDC_CNT3__GMIWR_PAGEMEM_DED_COUNT__SHIFT 0xc
+#define MMEA7_EDC_CNT3__DRAMRD_PAGEMEM_DED_COUNT_MASK 0x00000003L
+#define MMEA7_EDC_CNT3__DRAMWR_PAGEMEM_DED_COUNT_MASK 0x0000000CL
+#define MMEA7_EDC_CNT3__IORD_CMDMEM_DED_COUNT_MASK 0x00000030L
+#define MMEA7_EDC_CNT3__IOWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
+#define MMEA7_EDC_CNT3__IOWR_DATAMEM_DED_COUNT_MASK 0x00000300L
+#define MMEA7_EDC_CNT3__GMIRD_PAGEMEM_DED_COUNT_MASK 0x00000C00L
+#define MMEA7_EDC_CNT3__GMIWR_PAGEMEM_DED_COUNT_MASK 0x00003000L
+
+
+// addressBlock: mmhub_pctldec1
+//PCTL1_CTRL
+#define PCTL1_CTRL__PG_ENABLE__SHIFT 0x0
+#define PCTL1_CTRL__ALLOW_DEEP_SLEEP_MODE__SHIFT 0x1
+#define PCTL1_CTRL__STCTRL_RSMU_IDLE_THRESHOLD__SHIFT 0x4
+#define PCTL1_CTRL__STCTRL_DAGB_IDLE_THRESHOLD__SHIFT 0xb
+#define PCTL1_CTRL__STCTRL_IGNORE_PROTECTION_FAULT__SHIFT 0x10
+#define PCTL1_CTRL__OVR_EA0_SDP_PARTACK__SHIFT 0x11
+#define PCTL1_CTRL__OVR_EA1_SDP_PARTACK__SHIFT 0x12
+#define PCTL1_CTRL__OVR_EA2_SDP_PARTACK__SHIFT 0x13
+#define PCTL1_CTRL__OVR_EA3_SDP_PARTACK__SHIFT 0x14
+#define PCTL1_CTRL__OVR_EA4_SDP_PARTACK__SHIFT 0x15
+#define PCTL1_CTRL__OVR_EA0_SDP_FULLACK__SHIFT 0x16
+#define PCTL1_CTRL__OVR_EA1_SDP_FULLACK__SHIFT 0x17
+#define PCTL1_CTRL__OVR_EA2_SDP_FULLACK__SHIFT 0x18
+#define PCTL1_CTRL__OVR_EA3_SDP_FULLACK__SHIFT 0x19
+#define PCTL1_CTRL__OVR_EA4_SDP_FULLACK__SHIFT 0x1a
+#define PCTL1_CTRL__PGFSM_CMD_STATUS__SHIFT 0x1b
+#define PCTL1_CTRL__PG_ENABLE_MASK 0x00000001L
+#define PCTL1_CTRL__ALLOW_DEEP_SLEEP_MODE_MASK 0x0000000EL
+#define PCTL1_CTRL__STCTRL_RSMU_IDLE_THRESHOLD_MASK 0x000007F0L
+#define PCTL1_CTRL__STCTRL_DAGB_IDLE_THRESHOLD_MASK 0x0000F800L
+#define PCTL1_CTRL__STCTRL_IGNORE_PROTECTION_FAULT_MASK 0x00010000L
+#define PCTL1_CTRL__OVR_EA0_SDP_PARTACK_MASK 0x00020000L
+#define PCTL1_CTRL__OVR_EA1_SDP_PARTACK_MASK 0x00040000L
+#define PCTL1_CTRL__OVR_EA2_SDP_PARTACK_MASK 0x00080000L
+#define PCTL1_CTRL__OVR_EA3_SDP_PARTACK_MASK 0x00100000L
+#define PCTL1_CTRL__OVR_EA4_SDP_PARTACK_MASK 0x00200000L
+#define PCTL1_CTRL__OVR_EA0_SDP_FULLACK_MASK 0x00400000L
+#define PCTL1_CTRL__OVR_EA1_SDP_FULLACK_MASK 0x00800000L
+#define PCTL1_CTRL__OVR_EA2_SDP_FULLACK_MASK 0x01000000L
+#define PCTL1_CTRL__OVR_EA3_SDP_FULLACK_MASK 0x02000000L
+#define PCTL1_CTRL__OVR_EA4_SDP_FULLACK_MASK 0x04000000L
+#define PCTL1_CTRL__PGFSM_CMD_STATUS_MASK 0x18000000L
+//PCTL1_MMHUB_DEEPSLEEP_IB
+#define PCTL1_MMHUB_DEEPSLEEP_IB__DS0__SHIFT 0x0
+#define PCTL1_MMHUB_DEEPSLEEP_IB__DS1__SHIFT 0x1
+#define PCTL1_MMHUB_DEEPSLEEP_IB__DS2__SHIFT 0x2
+#define PCTL1_MMHUB_DEEPSLEEP_IB__DS3__SHIFT 0x3
+#define PCTL1_MMHUB_DEEPSLEEP_IB__DS4__SHIFT 0x4
+#define PCTL1_MMHUB_DEEPSLEEP_IB__DS5__SHIFT 0x5
+#define PCTL1_MMHUB_DEEPSLEEP_IB__DS6__SHIFT 0x6
+#define PCTL1_MMHUB_DEEPSLEEP_IB__DS7__SHIFT 0x7
+#define PCTL1_MMHUB_DEEPSLEEP_IB__DS8__SHIFT 0x8
+#define PCTL1_MMHUB_DEEPSLEEP_IB__DS9__SHIFT 0x9
+#define PCTL1_MMHUB_DEEPSLEEP_IB__DS10__SHIFT 0xa
+#define PCTL1_MMHUB_DEEPSLEEP_IB__DS11__SHIFT 0xb
+#define PCTL1_MMHUB_DEEPSLEEP_IB__DS12__SHIFT 0xc
+#define PCTL1_MMHUB_DEEPSLEEP_IB__DS13__SHIFT 0xd
+#define PCTL1_MMHUB_DEEPSLEEP_IB__DS14__SHIFT 0xe
+#define PCTL1_MMHUB_DEEPSLEEP_IB__DS15__SHIFT 0xf
+#define PCTL1_MMHUB_DEEPSLEEP_IB__DS16__SHIFT 0x10
+#define PCTL1_MMHUB_DEEPSLEEP_IB__SETCLEAR__SHIFT 0x1f
+#define PCTL1_MMHUB_DEEPSLEEP_IB__DS0_MASK 0x00000001L
+#define PCTL1_MMHUB_DEEPSLEEP_IB__DS1_MASK 0x00000002L
+#define PCTL1_MMHUB_DEEPSLEEP_IB__DS2_MASK 0x00000004L
+#define PCTL1_MMHUB_DEEPSLEEP_IB__DS3_MASK 0x00000008L
+#define PCTL1_MMHUB_DEEPSLEEP_IB__DS4_MASK 0x00000010L
+#define PCTL1_MMHUB_DEEPSLEEP_IB__DS5_MASK 0x00000020L
+#define PCTL1_MMHUB_DEEPSLEEP_IB__DS6_MASK 0x00000040L
+#define PCTL1_MMHUB_DEEPSLEEP_IB__DS7_MASK 0x00000080L
+#define PCTL1_MMHUB_DEEPSLEEP_IB__DS8_MASK 0x00000100L
+#define PCTL1_MMHUB_DEEPSLEEP_IB__DS9_MASK 0x00000200L
+#define PCTL1_MMHUB_DEEPSLEEP_IB__DS10_MASK 0x00000400L
+#define PCTL1_MMHUB_DEEPSLEEP_IB__DS11_MASK 0x00000800L
+#define PCTL1_MMHUB_DEEPSLEEP_IB__DS12_MASK 0x00001000L
+#define PCTL1_MMHUB_DEEPSLEEP_IB__DS13_MASK 0x00002000L
+#define PCTL1_MMHUB_DEEPSLEEP_IB__DS14_MASK 0x00004000L
+#define PCTL1_MMHUB_DEEPSLEEP_IB__DS15_MASK 0x00008000L
+#define PCTL1_MMHUB_DEEPSLEEP_IB__DS16_MASK 0x00010000L
+#define PCTL1_MMHUB_DEEPSLEEP_IB__SETCLEAR_MASK 0x80000000L
+//PCTL1_MMHUB_DEEPSLEEP_OVERRIDE
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS0__SHIFT 0x0
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS1__SHIFT 0x1
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS2__SHIFT 0x2
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS3__SHIFT 0x3
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS4__SHIFT 0x4
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS5__SHIFT 0x5
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS6__SHIFT 0x6
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS7__SHIFT 0x7
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS8__SHIFT 0x8
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS9__SHIFT 0x9
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS10__SHIFT 0xa
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS11__SHIFT 0xb
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS12__SHIFT 0xc
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS13__SHIFT 0xd
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS14__SHIFT 0xe
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS15__SHIFT 0xf
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS16__SHIFT 0x10
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS_ATHUB__SHIFT 0x11
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS0_MASK 0x00000001L
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS1_MASK 0x00000002L
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS2_MASK 0x00000004L
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS3_MASK 0x00000008L
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS4_MASK 0x00000010L
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS5_MASK 0x00000020L
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS6_MASK 0x00000040L
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS7_MASK 0x00000080L
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS8_MASK 0x00000100L
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS9_MASK 0x00000200L
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS10_MASK 0x00000400L
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS11_MASK 0x00000800L
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS12_MASK 0x00001000L
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS13_MASK 0x00002000L
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS14_MASK 0x00004000L
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS15_MASK 0x00008000L
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS16_MASK 0x00010000L
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS_ATHUB_MASK 0x00020000L
+//PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS0__SHIFT 0x0
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS1__SHIFT 0x1
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS2__SHIFT 0x2
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS3__SHIFT 0x3
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS4__SHIFT 0x4
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS5__SHIFT 0x5
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS6__SHIFT 0x6
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS7__SHIFT 0x7
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS8__SHIFT 0x8
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS9__SHIFT 0x9
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS10__SHIFT 0xa
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS11__SHIFT 0xb
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS12__SHIFT 0xc
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS13__SHIFT 0xd
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS14__SHIFT 0xe
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS15__SHIFT 0xf
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS16__SHIFT 0x10
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS0_MASK 0x00000001L
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS1_MASK 0x00000002L
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS2_MASK 0x00000004L
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS3_MASK 0x00000008L
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS4_MASK 0x00000010L
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS5_MASK 0x00000020L
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS6_MASK 0x00000040L
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS7_MASK 0x00000080L
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS8_MASK 0x00000100L
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS9_MASK 0x00000200L
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS10_MASK 0x00000400L
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS11_MASK 0x00000800L
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS12_MASK 0x00001000L
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS13_MASK 0x00002000L
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS14_MASK 0x00004000L
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS15_MASK 0x00008000L
+#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS16_MASK 0x00010000L
+//PCTL1_PG_IGNORE_DEEPSLEEP
+#define PCTL1_PG_IGNORE_DEEPSLEEP__DS0__SHIFT 0x0
+#define PCTL1_PG_IGNORE_DEEPSLEEP__DS1__SHIFT 0x1
+#define PCTL1_PG_IGNORE_DEEPSLEEP__DS2__SHIFT 0x2
+#define PCTL1_PG_IGNORE_DEEPSLEEP__DS3__SHIFT 0x3
+#define PCTL1_PG_IGNORE_DEEPSLEEP__DS4__SHIFT 0x4
+#define PCTL1_PG_IGNORE_DEEPSLEEP__DS5__SHIFT 0x5
+#define PCTL1_PG_IGNORE_DEEPSLEEP__DS6__SHIFT 0x6
+#define PCTL1_PG_IGNORE_DEEPSLEEP__DS7__SHIFT 0x7
+#define PCTL1_PG_IGNORE_DEEPSLEEP__DS8__SHIFT 0x8
+#define PCTL1_PG_IGNORE_DEEPSLEEP__DS9__SHIFT 0x9
+#define PCTL1_PG_IGNORE_DEEPSLEEP__DS10__SHIFT 0xa
+#define PCTL1_PG_IGNORE_DEEPSLEEP__DS11__SHIFT 0xb
+#define PCTL1_PG_IGNORE_DEEPSLEEP__DS12__SHIFT 0xc
+#define PCTL1_PG_IGNORE_DEEPSLEEP__DS13__SHIFT 0xd
+#define PCTL1_PG_IGNORE_DEEPSLEEP__DS14__SHIFT 0xe
+#define PCTL1_PG_IGNORE_DEEPSLEEP__DS15__SHIFT 0xf
+#define PCTL1_PG_IGNORE_DEEPSLEEP__DS16__SHIFT 0x10
+#define PCTL1_PG_IGNORE_DEEPSLEEP__DS_ATHUB__SHIFT 0x11
+#define PCTL1_PG_IGNORE_DEEPSLEEP__ALLIPS__SHIFT 0x12
+#define PCTL1_PG_IGNORE_DEEPSLEEP__DS0_MASK 0x00000001L
+#define PCTL1_PG_IGNORE_DEEPSLEEP__DS1_MASK 0x00000002L
+#define PCTL1_PG_IGNORE_DEEPSLEEP__DS2_MASK 0x00000004L
+#define PCTL1_PG_IGNORE_DEEPSLEEP__DS3_MASK 0x00000008L
+#define PCTL1_PG_IGNORE_DEEPSLEEP__DS4_MASK 0x00000010L
+#define PCTL1_PG_IGNORE_DEEPSLEEP__DS5_MASK 0x00000020L
+#define PCTL1_PG_IGNORE_DEEPSLEEP__DS6_MASK 0x00000040L
+#define PCTL1_PG_IGNORE_DEEPSLEEP__DS7_MASK 0x00000080L
+#define PCTL1_PG_IGNORE_DEEPSLEEP__DS8_MASK 0x00000100L
+#define PCTL1_PG_IGNORE_DEEPSLEEP__DS9_MASK 0x00000200L
+#define PCTL1_PG_IGNORE_DEEPSLEEP__DS10_MASK 0x00000400L
+#define PCTL1_PG_IGNORE_DEEPSLEEP__DS11_MASK 0x00000800L
+#define PCTL1_PG_IGNORE_DEEPSLEEP__DS12_MASK 0x00001000L
+#define PCTL1_PG_IGNORE_DEEPSLEEP__DS13_MASK 0x00002000L
+#define PCTL1_PG_IGNORE_DEEPSLEEP__DS14_MASK 0x00004000L
+#define PCTL1_PG_IGNORE_DEEPSLEEP__DS15_MASK 0x00008000L
+#define PCTL1_PG_IGNORE_DEEPSLEEP__DS16_MASK 0x00010000L
+#define PCTL1_PG_IGNORE_DEEPSLEEP__DS_ATHUB_MASK 0x00020000L
+#define PCTL1_PG_IGNORE_DEEPSLEEP__ALLIPS_MASK 0x00040000L
+//PCTL1_PG_IGNORE_DEEPSLEEP_IB
+#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS0__SHIFT 0x0
+#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS1__SHIFT 0x1
+#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS2__SHIFT 0x2
+#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS3__SHIFT 0x3
+#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS4__SHIFT 0x4
+#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS5__SHIFT 0x5
+#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS6__SHIFT 0x6
+#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS7__SHIFT 0x7
+#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS8__SHIFT 0x8
+#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS9__SHIFT 0x9
+#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS10__SHIFT 0xa
+#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS11__SHIFT 0xb
+#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS12__SHIFT 0xc
+#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS13__SHIFT 0xd
+#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS14__SHIFT 0xe
+#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS15__SHIFT 0xf
+#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS16__SHIFT 0x10
+#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__ALLIPS__SHIFT 0x11
+#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS0_MASK 0x00000001L
+#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS1_MASK 0x00000002L
+#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS2_MASK 0x00000004L
+#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS3_MASK 0x00000008L
+#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS4_MASK 0x00000010L
+#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS5_MASK 0x00000020L
+#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS6_MASK 0x00000040L
+#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS7_MASK 0x00000080L
+#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS8_MASK 0x00000100L
+#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS9_MASK 0x00000200L
+#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS10_MASK 0x00000400L
+#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS11_MASK 0x00000800L
+#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS12_MASK 0x00001000L
+#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS13_MASK 0x00002000L
+#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS14_MASK 0x00004000L
+#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS15_MASK 0x00008000L
+#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS16_MASK 0x00010000L
+#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__ALLIPS_MASK 0x00020000L
+//PCTL1_SLICE0_CFG_DAGB_BUSY
+#define PCTL1_SLICE0_CFG_DAGB_BUSY__DB_LNCFG__SHIFT 0x0
+#define PCTL1_SLICE0_CFG_DAGB_BUSY__DB_LNCFG_MASK 0xFFFFFFFFL
+//PCTL1_SLICE0_CFG_DS_ALLOW
+#define PCTL1_SLICE0_CFG_DS_ALLOW__DS0__SHIFT 0x0
+#define PCTL1_SLICE0_CFG_DS_ALLOW__DS1__SHIFT 0x1
+#define PCTL1_SLICE0_CFG_DS_ALLOW__DS2__SHIFT 0x2
+#define PCTL1_SLICE0_CFG_DS_ALLOW__DS3__SHIFT 0x3
+#define PCTL1_SLICE0_CFG_DS_ALLOW__DS4__SHIFT 0x4
+#define PCTL1_SLICE0_CFG_DS_ALLOW__DS5__SHIFT 0x5
+#define PCTL1_SLICE0_CFG_DS_ALLOW__DS6__SHIFT 0x6
+#define PCTL1_SLICE0_CFG_DS_ALLOW__DS7__SHIFT 0x7
+#define PCTL1_SLICE0_CFG_DS_ALLOW__DS8__SHIFT 0x8
+#define PCTL1_SLICE0_CFG_DS_ALLOW__DS9__SHIFT 0x9
+#define PCTL1_SLICE0_CFG_DS_ALLOW__DS10__SHIFT 0xa
+#define PCTL1_SLICE0_CFG_DS_ALLOW__DS11__SHIFT 0xb
+#define PCTL1_SLICE0_CFG_DS_ALLOW__DS12__SHIFT 0xc
+#define PCTL1_SLICE0_CFG_DS_ALLOW__DS13__SHIFT 0xd
+#define PCTL1_SLICE0_CFG_DS_ALLOW__DS14__SHIFT 0xe
+#define PCTL1_SLICE0_CFG_DS_ALLOW__DS15__SHIFT 0xf
+#define PCTL1_SLICE0_CFG_DS_ALLOW__DS16__SHIFT 0x10
+#define PCTL1_SLICE0_CFG_DS_ALLOW__DS0_MASK 0x00000001L
+#define PCTL1_SLICE0_CFG_DS_ALLOW__DS1_MASK 0x00000002L
+#define PCTL1_SLICE0_CFG_DS_ALLOW__DS2_MASK 0x00000004L
+#define PCTL1_SLICE0_CFG_DS_ALLOW__DS3_MASK 0x00000008L
+#define PCTL1_SLICE0_CFG_DS_ALLOW__DS4_MASK 0x00000010L
+#define PCTL1_SLICE0_CFG_DS_ALLOW__DS5_MASK 0x00000020L
+#define PCTL1_SLICE0_CFG_DS_ALLOW__DS6_MASK 0x00000040L
+#define PCTL1_SLICE0_CFG_DS_ALLOW__DS7_MASK 0x00000080L
+#define PCTL1_SLICE0_CFG_DS_ALLOW__DS8_MASK 0x00000100L
+#define PCTL1_SLICE0_CFG_DS_ALLOW__DS9_MASK 0x00000200L
+#define PCTL1_SLICE0_CFG_DS_ALLOW__DS10_MASK 0x00000400L
+#define PCTL1_SLICE0_CFG_DS_ALLOW__DS11_MASK 0x00000800L
+#define PCTL1_SLICE0_CFG_DS_ALLOW__DS12_MASK 0x00001000L
+#define PCTL1_SLICE0_CFG_DS_ALLOW__DS13_MASK 0x00002000L
+#define PCTL1_SLICE0_CFG_DS_ALLOW__DS14_MASK 0x00004000L
+#define PCTL1_SLICE0_CFG_DS_ALLOW__DS15_MASK 0x00008000L
+#define PCTL1_SLICE0_CFG_DS_ALLOW__DS16_MASK 0x00010000L
+//PCTL1_SLICE0_CFG_DS_ALLOW_IB
+#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS0__SHIFT 0x0
+#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS1__SHIFT 0x1
+#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS2__SHIFT 0x2
+#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS3__SHIFT 0x3
+#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS4__SHIFT 0x4
+#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS5__SHIFT 0x5
+#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS6__SHIFT 0x6
+#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS7__SHIFT 0x7
+#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS8__SHIFT 0x8
+#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS9__SHIFT 0x9
+#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS10__SHIFT 0xa
+#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS11__SHIFT 0xb
+#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS12__SHIFT 0xc
+#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS13__SHIFT 0xd
+#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS14__SHIFT 0xe
+#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS15__SHIFT 0xf
+#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS16__SHIFT 0x10
+#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS0_MASK 0x00000001L
+#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS1_MASK 0x00000002L
+#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS2_MASK 0x00000004L
+#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS3_MASK 0x00000008L
+#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS4_MASK 0x00000010L
+#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS5_MASK 0x00000020L
+#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS6_MASK 0x00000040L
+#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS7_MASK 0x00000080L
+#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS8_MASK 0x00000100L
+#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS9_MASK 0x00000200L
+#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS10_MASK 0x00000400L
+#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS11_MASK 0x00000800L
+#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS12_MASK 0x00001000L
+#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS13_MASK 0x00002000L
+#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS14_MASK 0x00004000L
+#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS15_MASK 0x00008000L
+#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS16_MASK 0x00010000L
+//PCTL1_SLICE1_CFG_DAGB_BUSY
+#define PCTL1_SLICE1_CFG_DAGB_BUSY__DB_LNCFG__SHIFT 0x0
+#define PCTL1_SLICE1_CFG_DAGB_BUSY__DB_LNCFG_MASK 0xFFFFFFFFL
+//PCTL1_SLICE1_CFG_DS_ALLOW
+#define PCTL1_SLICE1_CFG_DS_ALLOW__DS0__SHIFT 0x0
+#define PCTL1_SLICE1_CFG_DS_ALLOW__DS1__SHIFT 0x1
+#define PCTL1_SLICE1_CFG_DS_ALLOW__DS2__SHIFT 0x2
+#define PCTL1_SLICE1_CFG_DS_ALLOW__DS3__SHIFT 0x3
+#define PCTL1_SLICE1_CFG_DS_ALLOW__DS4__SHIFT 0x4
+#define PCTL1_SLICE1_CFG_DS_ALLOW__DS5__SHIFT 0x5
+#define PCTL1_SLICE1_CFG_DS_ALLOW__DS6__SHIFT 0x6
+#define PCTL1_SLICE1_CFG_DS_ALLOW__DS7__SHIFT 0x7
+#define PCTL1_SLICE1_CFG_DS_ALLOW__DS8__SHIFT 0x8
+#define PCTL1_SLICE1_CFG_DS_ALLOW__DS9__SHIFT 0x9
+#define PCTL1_SLICE1_CFG_DS_ALLOW__DS10__SHIFT 0xa
+#define PCTL1_SLICE1_CFG_DS_ALLOW__DS11__SHIFT 0xb
+#define PCTL1_SLICE1_CFG_DS_ALLOW__DS12__SHIFT 0xc
+#define PCTL1_SLICE1_CFG_DS_ALLOW__DS13__SHIFT 0xd
+#define PCTL1_SLICE1_CFG_DS_ALLOW__DS14__SHIFT 0xe
+#define PCTL1_SLICE1_CFG_DS_ALLOW__DS15__SHIFT 0xf
+#define PCTL1_SLICE1_CFG_DS_ALLOW__DS16__SHIFT 0x10
+#define PCTL1_SLICE1_CFG_DS_ALLOW__DS0_MASK 0x00000001L
+#define PCTL1_SLICE1_CFG_DS_ALLOW__DS1_MASK 0x00000002L
+#define PCTL1_SLICE1_CFG_DS_ALLOW__DS2_MASK 0x00000004L
+#define PCTL1_SLICE1_CFG_DS_ALLOW__DS3_MASK 0x00000008L
+#define PCTL1_SLICE1_CFG_DS_ALLOW__DS4_MASK 0x00000010L
+#define PCTL1_SLICE1_CFG_DS_ALLOW__DS5_MASK 0x00000020L
+#define PCTL1_SLICE1_CFG_DS_ALLOW__DS6_MASK 0x00000040L
+#define PCTL1_SLICE1_CFG_DS_ALLOW__DS7_MASK 0x00000080L
+#define PCTL1_SLICE1_CFG_DS_ALLOW__DS8_MASK 0x00000100L
+#define PCTL1_SLICE1_CFG_DS_ALLOW__DS9_MASK 0x00000200L
+#define PCTL1_SLICE1_CFG_DS_ALLOW__DS10_MASK 0x00000400L
+#define PCTL1_SLICE1_CFG_DS_ALLOW__DS11_MASK 0x00000800L
+#define PCTL1_SLICE1_CFG_DS_ALLOW__DS12_MASK 0x00001000L
+#define PCTL1_SLICE1_CFG_DS_ALLOW__DS13_MASK 0x00002000L
+#define PCTL1_SLICE1_CFG_DS_ALLOW__DS14_MASK 0x00004000L
+#define PCTL1_SLICE1_CFG_DS_ALLOW__DS15_MASK 0x00008000L
+#define PCTL1_SLICE1_CFG_DS_ALLOW__DS16_MASK 0x00010000L
+//PCTL1_SLICE1_CFG_DS_ALLOW_IB
+#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS0__SHIFT 0x0
+#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS1__SHIFT 0x1
+#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS2__SHIFT 0x2
+#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS3__SHIFT 0x3
+#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS4__SHIFT 0x4
+#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS5__SHIFT 0x5
+#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS6__SHIFT 0x6
+#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS7__SHIFT 0x7
+#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS8__SHIFT 0x8
+#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS9__SHIFT 0x9
+#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS10__SHIFT 0xa
+#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS11__SHIFT 0xb
+#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS12__SHIFT 0xc
+#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS13__SHIFT 0xd
+#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS14__SHIFT 0xe
+#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS15__SHIFT 0xf
+#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS16__SHIFT 0x10
+#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS0_MASK 0x00000001L
+#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS1_MASK 0x00000002L
+#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS2_MASK 0x00000004L
+#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS3_MASK 0x00000008L
+#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS4_MASK 0x00000010L
+#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS5_MASK 0x00000020L
+#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS6_MASK 0x00000040L
+#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS7_MASK 0x00000080L
+#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS8_MASK 0x00000100L
+#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS9_MASK 0x00000200L
+#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS10_MASK 0x00000400L
+#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS11_MASK 0x00000800L
+#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS12_MASK 0x00001000L
+#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS13_MASK 0x00002000L
+#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS14_MASK 0x00004000L
+#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS15_MASK 0x00008000L
+#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS16_MASK 0x00010000L
+//PCTL1_SLICE2_CFG_DAGB_BUSY
+#define PCTL1_SLICE2_CFG_DAGB_BUSY__DB_LNCFG__SHIFT 0x0
+#define PCTL1_SLICE2_CFG_DAGB_BUSY__DB_LNCFG_MASK 0xFFFFFFFFL
+//PCTL1_SLICE2_CFG_DS_ALLOW
+#define PCTL1_SLICE2_CFG_DS_ALLOW__DS0__SHIFT 0x0
+#define PCTL1_SLICE2_CFG_DS_ALLOW__DS1__SHIFT 0x1
+#define PCTL1_SLICE2_CFG_DS_ALLOW__DS2__SHIFT 0x2
+#define PCTL1_SLICE2_CFG_DS_ALLOW__DS3__SHIFT 0x3
+#define PCTL1_SLICE2_CFG_DS_ALLOW__DS4__SHIFT 0x4
+#define PCTL1_SLICE2_CFG_DS_ALLOW__DS5__SHIFT 0x5
+#define PCTL1_SLICE2_CFG_DS_ALLOW__DS6__SHIFT 0x6
+#define PCTL1_SLICE2_CFG_DS_ALLOW__DS7__SHIFT 0x7
+#define PCTL1_SLICE2_CFG_DS_ALLOW__DS8__SHIFT 0x8
+#define PCTL1_SLICE2_CFG_DS_ALLOW__DS9__SHIFT 0x9
+#define PCTL1_SLICE2_CFG_DS_ALLOW__DS10__SHIFT 0xa
+#define PCTL1_SLICE2_CFG_DS_ALLOW__DS11__SHIFT 0xb
+#define PCTL1_SLICE2_CFG_DS_ALLOW__DS12__SHIFT 0xc
+#define PCTL1_SLICE2_CFG_DS_ALLOW__DS13__SHIFT 0xd
+#define PCTL1_SLICE2_CFG_DS_ALLOW__DS14__SHIFT 0xe
+#define PCTL1_SLICE2_CFG_DS_ALLOW__DS15__SHIFT 0xf
+#define PCTL1_SLICE2_CFG_DS_ALLOW__DS16__SHIFT 0x10
+#define PCTL1_SLICE2_CFG_DS_ALLOW__DS0_MASK 0x00000001L
+#define PCTL1_SLICE2_CFG_DS_ALLOW__DS1_MASK 0x00000002L
+#define PCTL1_SLICE2_CFG_DS_ALLOW__DS2_MASK 0x00000004L
+#define PCTL1_SLICE2_CFG_DS_ALLOW__DS3_MASK 0x00000008L
+#define PCTL1_SLICE2_CFG_DS_ALLOW__DS4_MASK 0x00000010L
+#define PCTL1_SLICE2_CFG_DS_ALLOW__DS5_MASK 0x00000020L
+#define PCTL1_SLICE2_CFG_DS_ALLOW__DS6_MASK 0x00000040L
+#define PCTL1_SLICE2_CFG_DS_ALLOW__DS7_MASK 0x00000080L
+#define PCTL1_SLICE2_CFG_DS_ALLOW__DS8_MASK 0x00000100L
+#define PCTL1_SLICE2_CFG_DS_ALLOW__DS9_MASK 0x00000200L
+#define PCTL1_SLICE2_CFG_DS_ALLOW__DS10_MASK 0x00000400L
+#define PCTL1_SLICE2_CFG_DS_ALLOW__DS11_MASK 0x00000800L
+#define PCTL1_SLICE2_CFG_DS_ALLOW__DS12_MASK 0x00001000L
+#define PCTL1_SLICE2_CFG_DS_ALLOW__DS13_MASK 0x00002000L
+#define PCTL1_SLICE2_CFG_DS_ALLOW__DS14_MASK 0x00004000L
+#define PCTL1_SLICE2_CFG_DS_ALLOW__DS15_MASK 0x00008000L
+#define PCTL1_SLICE2_CFG_DS_ALLOW__DS16_MASK 0x00010000L
+//PCTL1_SLICE2_CFG_DS_ALLOW_IB
+#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS0__SHIFT 0x0
+#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS1__SHIFT 0x1
+#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS2__SHIFT 0x2
+#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS3__SHIFT 0x3
+#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS4__SHIFT 0x4
+#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS5__SHIFT 0x5
+#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS6__SHIFT 0x6
+#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS7__SHIFT 0x7
+#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS8__SHIFT 0x8
+#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS9__SHIFT 0x9
+#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS10__SHIFT 0xa
+#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS11__SHIFT 0xb
+#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS12__SHIFT 0xc
+#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS13__SHIFT 0xd
+#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS14__SHIFT 0xe
+#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS15__SHIFT 0xf
+#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS16__SHIFT 0x10
+#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS0_MASK 0x00000001L
+#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS1_MASK 0x00000002L
+#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS2_MASK 0x00000004L
+#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS3_MASK 0x00000008L
+#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS4_MASK 0x00000010L
+#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS5_MASK 0x00000020L
+#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS6_MASK 0x00000040L
+#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS7_MASK 0x00000080L
+#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS8_MASK 0x00000100L
+#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS9_MASK 0x00000200L
+#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS10_MASK 0x00000400L
+#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS11_MASK 0x00000800L
+#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS12_MASK 0x00001000L
+#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS13_MASK 0x00002000L
+#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS14_MASK 0x00004000L
+#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS15_MASK 0x00008000L
+#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS16_MASK 0x00010000L
+//PCTL1_SLICE3_CFG_DAGB_BUSY
+#define PCTL1_SLICE3_CFG_DAGB_BUSY__DB_LNCFG__SHIFT 0x0
+#define PCTL1_SLICE3_CFG_DAGB_BUSY__DB_LNCFG_MASK 0xFFFFFFFFL
+//PCTL1_SLICE3_CFG_DS_ALLOW
+#define PCTL1_SLICE3_CFG_DS_ALLOW__DS0__SHIFT 0x0
+#define PCTL1_SLICE3_CFG_DS_ALLOW__DS1__SHIFT 0x1
+#define PCTL1_SLICE3_CFG_DS_ALLOW__DS2__SHIFT 0x2
+#define PCTL1_SLICE3_CFG_DS_ALLOW__DS3__SHIFT 0x3
+#define PCTL1_SLICE3_CFG_DS_ALLOW__DS4__SHIFT 0x4
+#define PCTL1_SLICE3_CFG_DS_ALLOW__DS5__SHIFT 0x5
+#define PCTL1_SLICE3_CFG_DS_ALLOW__DS6__SHIFT 0x6
+#define PCTL1_SLICE3_CFG_DS_ALLOW__DS7__SHIFT 0x7
+#define PCTL1_SLICE3_CFG_DS_ALLOW__DS8__SHIFT 0x8
+#define PCTL1_SLICE3_CFG_DS_ALLOW__DS9__SHIFT 0x9
+#define PCTL1_SLICE3_CFG_DS_ALLOW__DS10__SHIFT 0xa
+#define PCTL1_SLICE3_CFG_DS_ALLOW__DS11__SHIFT 0xb
+#define PCTL1_SLICE3_CFG_DS_ALLOW__DS12__SHIFT 0xc
+#define PCTL1_SLICE3_CFG_DS_ALLOW__DS13__SHIFT 0xd
+#define PCTL1_SLICE3_CFG_DS_ALLOW__DS14__SHIFT 0xe
+#define PCTL1_SLICE3_CFG_DS_ALLOW__DS15__SHIFT 0xf
+#define PCTL1_SLICE3_CFG_DS_ALLOW__DS16__SHIFT 0x10
+#define PCTL1_SLICE3_CFG_DS_ALLOW__DS0_MASK 0x00000001L
+#define PCTL1_SLICE3_CFG_DS_ALLOW__DS1_MASK 0x00000002L
+#define PCTL1_SLICE3_CFG_DS_ALLOW__DS2_MASK 0x00000004L
+#define PCTL1_SLICE3_CFG_DS_ALLOW__DS3_MASK 0x00000008L
+#define PCTL1_SLICE3_CFG_DS_ALLOW__DS4_MASK 0x00000010L
+#define PCTL1_SLICE3_CFG_DS_ALLOW__DS5_MASK 0x00000020L
+#define PCTL1_SLICE3_CFG_DS_ALLOW__DS6_MASK 0x00000040L
+#define PCTL1_SLICE3_CFG_DS_ALLOW__DS7_MASK 0x00000080L
+#define PCTL1_SLICE3_CFG_DS_ALLOW__DS8_MASK 0x00000100L
+#define PCTL1_SLICE3_CFG_DS_ALLOW__DS9_MASK 0x00000200L
+#define PCTL1_SLICE3_CFG_DS_ALLOW__DS10_MASK 0x00000400L
+#define PCTL1_SLICE3_CFG_DS_ALLOW__DS11_MASK 0x00000800L
+#define PCTL1_SLICE3_CFG_DS_ALLOW__DS12_MASK 0x00001000L
+#define PCTL1_SLICE3_CFG_DS_ALLOW__DS13_MASK 0x00002000L
+#define PCTL1_SLICE3_CFG_DS_ALLOW__DS14_MASK 0x00004000L
+#define PCTL1_SLICE3_CFG_DS_ALLOW__DS15_MASK 0x00008000L
+#define PCTL1_SLICE3_CFG_DS_ALLOW__DS16_MASK 0x00010000L
+//PCTL1_SLICE3_CFG_DS_ALLOW_IB
+#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS0__SHIFT 0x0
+#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS1__SHIFT 0x1
+#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS2__SHIFT 0x2
+#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS3__SHIFT 0x3
+#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS4__SHIFT 0x4
+#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS5__SHIFT 0x5
+#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS6__SHIFT 0x6
+#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS7__SHIFT 0x7
+#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS8__SHIFT 0x8
+#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS9__SHIFT 0x9
+#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS10__SHIFT 0xa
+#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS11__SHIFT 0xb
+#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS12__SHIFT 0xc
+#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS13__SHIFT 0xd
+#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS14__SHIFT 0xe
+#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS15__SHIFT 0xf
+#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS16__SHIFT 0x10
+#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS0_MASK 0x00000001L
+#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS1_MASK 0x00000002L
+#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS2_MASK 0x00000004L
+#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS3_MASK 0x00000008L
+#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS4_MASK 0x00000010L
+#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS5_MASK 0x00000020L
+#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS6_MASK 0x00000040L
+#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS7_MASK 0x00000080L
+#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS8_MASK 0x00000100L
+#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS9_MASK 0x00000200L
+#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS10_MASK 0x00000400L
+#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS11_MASK 0x00000800L
+#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS12_MASK 0x00001000L
+#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS13_MASK 0x00002000L
+#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS14_MASK 0x00004000L
+#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS15_MASK 0x00008000L
+#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS16_MASK 0x00010000L
+//PCTL1_SLICE4_CFG_DAGB_BUSY
+#define PCTL1_SLICE4_CFG_DAGB_BUSY__DB_LNCFG__SHIFT 0x0
+#define PCTL1_SLICE4_CFG_DAGB_BUSY__DB_LNCFG_MASK 0xFFFFFFFFL
+//PCTL1_SLICE4_CFG_DS_ALLOW
+#define PCTL1_SLICE4_CFG_DS_ALLOW__DS0__SHIFT 0x0
+#define PCTL1_SLICE4_CFG_DS_ALLOW__DS1__SHIFT 0x1
+#define PCTL1_SLICE4_CFG_DS_ALLOW__DS2__SHIFT 0x2
+#define PCTL1_SLICE4_CFG_DS_ALLOW__DS3__SHIFT 0x3
+#define PCTL1_SLICE4_CFG_DS_ALLOW__DS4__SHIFT 0x4
+#define PCTL1_SLICE4_CFG_DS_ALLOW__DS5__SHIFT 0x5
+#define PCTL1_SLICE4_CFG_DS_ALLOW__DS6__SHIFT 0x6
+#define PCTL1_SLICE4_CFG_DS_ALLOW__DS7__SHIFT 0x7
+#define PCTL1_SLICE4_CFG_DS_ALLOW__DS8__SHIFT 0x8
+#define PCTL1_SLICE4_CFG_DS_ALLOW__DS9__SHIFT 0x9
+#define PCTL1_SLICE4_CFG_DS_ALLOW__DS10__SHIFT 0xa
+#define PCTL1_SLICE4_CFG_DS_ALLOW__DS11__SHIFT 0xb
+#define PCTL1_SLICE4_CFG_DS_ALLOW__DS12__SHIFT 0xc
+#define PCTL1_SLICE4_CFG_DS_ALLOW__DS13__SHIFT 0xd
+#define PCTL1_SLICE4_CFG_DS_ALLOW__DS14__SHIFT 0xe
+#define PCTL1_SLICE4_CFG_DS_ALLOW__DS15__SHIFT 0xf
+#define PCTL1_SLICE4_CFG_DS_ALLOW__DS16__SHIFT 0x10
+#define PCTL1_SLICE4_CFG_DS_ALLOW__DS0_MASK 0x00000001L
+#define PCTL1_SLICE4_CFG_DS_ALLOW__DS1_MASK 0x00000002L
+#define PCTL1_SLICE4_CFG_DS_ALLOW__DS2_MASK 0x00000004L
+#define PCTL1_SLICE4_CFG_DS_ALLOW__DS3_MASK 0x00000008L
+#define PCTL1_SLICE4_CFG_DS_ALLOW__DS4_MASK 0x00000010L
+#define PCTL1_SLICE4_CFG_DS_ALLOW__DS5_MASK 0x00000020L
+#define PCTL1_SLICE4_CFG_DS_ALLOW__DS6_MASK 0x00000040L
+#define PCTL1_SLICE4_CFG_DS_ALLOW__DS7_MASK 0x00000080L
+#define PCTL1_SLICE4_CFG_DS_ALLOW__DS8_MASK 0x00000100L
+#define PCTL1_SLICE4_CFG_DS_ALLOW__DS9_MASK 0x00000200L
+#define PCTL1_SLICE4_CFG_DS_ALLOW__DS10_MASK 0x00000400L
+#define PCTL1_SLICE4_CFG_DS_ALLOW__DS11_MASK 0x00000800L
+#define PCTL1_SLICE4_CFG_DS_ALLOW__DS12_MASK 0x00001000L
+#define PCTL1_SLICE4_CFG_DS_ALLOW__DS13_MASK 0x00002000L
+#define PCTL1_SLICE4_CFG_DS_ALLOW__DS14_MASK 0x00004000L
+#define PCTL1_SLICE4_CFG_DS_ALLOW__DS15_MASK 0x00008000L
+#define PCTL1_SLICE4_CFG_DS_ALLOW__DS16_MASK 0x00010000L
+//PCTL1_SLICE4_CFG_DS_ALLOW_IB
+#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS0__SHIFT 0x0
+#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS1__SHIFT 0x1
+#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS2__SHIFT 0x2
+#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS3__SHIFT 0x3
+#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS4__SHIFT 0x4
+#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS5__SHIFT 0x5
+#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS6__SHIFT 0x6
+#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS7__SHIFT 0x7
+#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS8__SHIFT 0x8
+#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS9__SHIFT 0x9
+#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS10__SHIFT 0xa
+#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS11__SHIFT 0xb
+#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS12__SHIFT 0xc
+#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS13__SHIFT 0xd
+#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS14__SHIFT 0xe
+#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS15__SHIFT 0xf
+#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS16__SHIFT 0x10
+#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS0_MASK 0x00000001L
+#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS1_MASK 0x00000002L
+#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS2_MASK 0x00000004L
+#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS3_MASK 0x00000008L
+#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS4_MASK 0x00000010L
+#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS5_MASK 0x00000020L
+#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS6_MASK 0x00000040L
+#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS7_MASK 0x00000080L
+#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS8_MASK 0x00000100L
+#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS9_MASK 0x00000200L
+#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS10_MASK 0x00000400L
+#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS11_MASK 0x00000800L
+#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS12_MASK 0x00001000L
+#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS13_MASK 0x00002000L
+#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS14_MASK 0x00004000L
+#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS15_MASK 0x00008000L
+#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS16_MASK 0x00010000L
+//PCTL1_UTCL2_MISC
+#define PCTL1_UTCL2_MISC__CRITICAL_REGS_LOCK__SHIFT 0xb
+#define PCTL1_UTCL2_MISC__TILE_IDLE_THRESHOLD__SHIFT 0xc
+#define PCTL1_UTCL2_MISC__RENG_MEM_LS_ENABLE__SHIFT 0xf
+#define PCTL1_UTCL2_MISC__STCTRL_FORCE_PGFSM_CMD_DONE__SHIFT 0x10
+#define PCTL1_UTCL2_MISC__RENG_EXECUTE_ON_REG_UPDATE__SHIFT 0x11
+#define PCTL1_UTCL2_MISC__RD_TIMER_ENABLE__SHIFT 0x12
+#define PCTL1_UTCL2_MISC__CRITICAL_REGS_LOCK_MASK 0x00000800L
+#define PCTL1_UTCL2_MISC__TILE_IDLE_THRESHOLD_MASK 0x00007000L
+#define PCTL1_UTCL2_MISC__RENG_MEM_LS_ENABLE_MASK 0x00008000L
+#define PCTL1_UTCL2_MISC__STCTRL_FORCE_PGFSM_CMD_DONE_MASK 0x00010000L
+#define PCTL1_UTCL2_MISC__RENG_EXECUTE_ON_REG_UPDATE_MASK 0x00020000L
+#define PCTL1_UTCL2_MISC__RD_TIMER_ENABLE_MASK 0x00040000L
+//PCTL1_SLICE0_MISC
+#define PCTL1_SLICE0_MISC__CRITICAL_REGS_LOCK__SHIFT 0xa
+#define PCTL1_SLICE0_MISC__TILE_IDLE_THRESHOLD__SHIFT 0xb
+#define PCTL1_SLICE0_MISC__RENG_MEM_LS_ENABLE__SHIFT 0xe
+#define PCTL1_SLICE0_MISC__STCTRL_FORCE_PGFSM_CMD_DONE__SHIFT 0xf
+#define PCTL1_SLICE0_MISC__DEEPSLEEP_DISCSDP__SHIFT 0x10
+#define PCTL1_SLICE0_MISC__RENG_EXECUTE_ON_REG_UPDATE__SHIFT 0x11
+#define PCTL1_SLICE0_MISC__RD_TIMER_ENABLE__SHIFT 0x12
+#define PCTL1_SLICE0_MISC__CRITICAL_REGS_LOCK_MASK 0x00000400L
+#define PCTL1_SLICE0_MISC__TILE_IDLE_THRESHOLD_MASK 0x00003800L
+#define PCTL1_SLICE0_MISC__RENG_MEM_LS_ENABLE_MASK 0x00004000L
+#define PCTL1_SLICE0_MISC__STCTRL_FORCE_PGFSM_CMD_DONE_MASK 0x00008000L
+#define PCTL1_SLICE0_MISC__DEEPSLEEP_DISCSDP_MASK 0x00010000L
+#define PCTL1_SLICE0_MISC__RENG_EXECUTE_ON_REG_UPDATE_MASK 0x00020000L
+#define PCTL1_SLICE0_MISC__RD_TIMER_ENABLE_MASK 0x00040000L
+//PCTL1_SLICE1_MISC
+#define PCTL1_SLICE1_MISC__CRITICAL_REGS_LOCK__SHIFT 0xa
+#define PCTL1_SLICE1_MISC__TILE_IDLE_THRESHOLD__SHIFT 0xb
+#define PCTL1_SLICE1_MISC__RENG_MEM_LS_ENABLE__SHIFT 0xe
+#define PCTL1_SLICE1_MISC__STCTRL_FORCE_PGFSM_CMD_DONE__SHIFT 0xf
+#define PCTL1_SLICE1_MISC__DEEPSLEEP_DISCSDP__SHIFT 0x10
+#define PCTL1_SLICE1_MISC__RENG_EXECUTE_ON_REG_UPDATE__SHIFT 0x11
+#define PCTL1_SLICE1_MISC__RD_TIMER_ENABLE__SHIFT 0x12
+#define PCTL1_SLICE1_MISC__CRITICAL_REGS_LOCK_MASK 0x00000400L
+#define PCTL1_SLICE1_MISC__TILE_IDLE_THRESHOLD_MASK 0x00003800L
+#define PCTL1_SLICE1_MISC__RENG_MEM_LS_ENABLE_MASK 0x00004000L
+#define PCTL1_SLICE1_MISC__STCTRL_FORCE_PGFSM_CMD_DONE_MASK 0x00008000L
+#define PCTL1_SLICE1_MISC__DEEPSLEEP_DISCSDP_MASK 0x00010000L
+#define PCTL1_SLICE1_MISC__RENG_EXECUTE_ON_REG_UPDATE_MASK 0x00020000L
+#define PCTL1_SLICE1_MISC__RD_TIMER_ENABLE_MASK 0x00040000L
+//PCTL1_SLICE2_MISC
+#define PCTL1_SLICE2_MISC__CRITICAL_REGS_LOCK__SHIFT 0xa
+#define PCTL1_SLICE2_MISC__TILE_IDLE_THRESHOLD__SHIFT 0xb
+#define PCTL1_SLICE2_MISC__RENG_MEM_LS_ENABLE__SHIFT 0xe
+#define PCTL1_SLICE2_MISC__STCTRL_FORCE_PGFSM_CMD_DONE__SHIFT 0xf
+#define PCTL1_SLICE2_MISC__DEEPSLEEP_DISCSDP__SHIFT 0x10
+#define PCTL1_SLICE2_MISC__RENG_EXECUTE_ON_REG_UPDATE__SHIFT 0x11
+#define PCTL1_SLICE2_MISC__RD_TIMER_ENABLE__SHIFT 0x12
+#define PCTL1_SLICE2_MISC__CRITICAL_REGS_LOCK_MASK 0x00000400L
+#define PCTL1_SLICE2_MISC__TILE_IDLE_THRESHOLD_MASK 0x00003800L
+#define PCTL1_SLICE2_MISC__RENG_MEM_LS_ENABLE_MASK 0x00004000L
+#define PCTL1_SLICE2_MISC__STCTRL_FORCE_PGFSM_CMD_DONE_MASK 0x00008000L
+#define PCTL1_SLICE2_MISC__DEEPSLEEP_DISCSDP_MASK 0x00010000L
+#define PCTL1_SLICE2_MISC__RENG_EXECUTE_ON_REG_UPDATE_MASK 0x00020000L
+#define PCTL1_SLICE2_MISC__RD_TIMER_ENABLE_MASK 0x00040000L
+//PCTL1_SLICE3_MISC
+#define PCTL1_SLICE3_MISC__CRITICAL_REGS_LOCK__SHIFT 0xa
+#define PCTL1_SLICE3_MISC__TILE_IDLE_THRESHOLD__SHIFT 0xb
+#define PCTL1_SLICE3_MISC__RENG_MEM_LS_ENABLE__SHIFT 0xe
+#define PCTL1_SLICE3_MISC__STCTRL_FORCE_PGFSM_CMD_DONE__SHIFT 0xf
+#define PCTL1_SLICE3_MISC__DEEPSLEEP_DISCSDP__SHIFT 0x10
+#define PCTL1_SLICE3_MISC__RENG_EXECUTE_ON_REG_UPDATE__SHIFT 0x11
+#define PCTL1_SLICE3_MISC__RD_TIMER_ENABLE__SHIFT 0x12
+#define PCTL1_SLICE3_MISC__CRITICAL_REGS_LOCK_MASK 0x00000400L
+#define PCTL1_SLICE3_MISC__TILE_IDLE_THRESHOLD_MASK 0x00003800L
+#define PCTL1_SLICE3_MISC__RENG_MEM_LS_ENABLE_MASK 0x00004000L
+#define PCTL1_SLICE3_MISC__STCTRL_FORCE_PGFSM_CMD_DONE_MASK 0x00008000L
+#define PCTL1_SLICE3_MISC__DEEPSLEEP_DISCSDP_MASK 0x00010000L
+#define PCTL1_SLICE3_MISC__RENG_EXECUTE_ON_REG_UPDATE_MASK 0x00020000L
+#define PCTL1_SLICE3_MISC__RD_TIMER_ENABLE_MASK 0x00040000L
+//PCTL1_SLICE4_MISC
+#define PCTL1_SLICE4_MISC__CRITICAL_REGS_LOCK__SHIFT 0xa
+#define PCTL1_SLICE4_MISC__TILE_IDLE_THRESHOLD__SHIFT 0xb
+#define PCTL1_SLICE4_MISC__RENG_MEM_LS_ENABLE__SHIFT 0xe
+#define PCTL1_SLICE4_MISC__STCTRL_FORCE_PGFSM_CMD_DONE__SHIFT 0xf
+#define PCTL1_SLICE4_MISC__DEEPSLEEP_DISCSDP__SHIFT 0x10
+#define PCTL1_SLICE4_MISC__RENG_EXECUTE_ON_REG_UPDATE__SHIFT 0x11
+#define PCTL1_SLICE4_MISC__RD_TIMER_ENABLE__SHIFT 0x12
+#define PCTL1_SLICE4_MISC__CRITICAL_REGS_LOCK_MASK 0x00000400L
+#define PCTL1_SLICE4_MISC__TILE_IDLE_THRESHOLD_MASK 0x00003800L
+#define PCTL1_SLICE4_MISC__RENG_MEM_LS_ENABLE_MASK 0x00004000L
+#define PCTL1_SLICE4_MISC__STCTRL_FORCE_PGFSM_CMD_DONE_MASK 0x00008000L
+#define PCTL1_SLICE4_MISC__DEEPSLEEP_DISCSDP_MASK 0x00010000L
+#define PCTL1_SLICE4_MISC__RENG_EXECUTE_ON_REG_UPDATE_MASK 0x00020000L
+#define PCTL1_SLICE4_MISC__RD_TIMER_ENABLE_MASK 0x00040000L
+//PCTL1_UTCL2_RENG_EXECUTE
+#define PCTL1_UTCL2_RENG_EXECUTE__RENG_EXECUTE_NOW__SHIFT 0x0
+#define PCTL1_UTCL2_RENG_EXECUTE__RENG_EXECUTE_NOW_MODE__SHIFT 0x1
+#define PCTL1_UTCL2_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR__SHIFT 0x2
+#define PCTL1_UTCL2_RENG_EXECUTE__RENG_EXECUTE_END_PTR__SHIFT 0xd
+#define PCTL1_UTCL2_RENG_EXECUTE__RENG_EXECUTE_NOW_MASK 0x00000001L
+#define PCTL1_UTCL2_RENG_EXECUTE__RENG_EXECUTE_NOW_MODE_MASK 0x00000002L
+#define PCTL1_UTCL2_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR_MASK 0x00001FFCL
+#define PCTL1_UTCL2_RENG_EXECUTE__RENG_EXECUTE_END_PTR_MASK 0x00FFE000L
+//PCTL1_SLICE0_RENG_EXECUTE
+#define PCTL1_SLICE0_RENG_EXECUTE__RENG_EXECUTE_NOW__SHIFT 0x0
+#define PCTL1_SLICE0_RENG_EXECUTE__RENG_EXECUTE_NOW_MODE__SHIFT 0x1
+#define PCTL1_SLICE0_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR__SHIFT 0x2
+#define PCTL1_SLICE0_RENG_EXECUTE__RENG_EXECUTE_END_PTR__SHIFT 0xc
+#define PCTL1_SLICE0_RENG_EXECUTE__RENG_EXECUTE_NOW_MASK 0x00000001L
+#define PCTL1_SLICE0_RENG_EXECUTE__RENG_EXECUTE_NOW_MODE_MASK 0x00000002L
+#define PCTL1_SLICE0_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR_MASK 0x00000FFCL
+#define PCTL1_SLICE0_RENG_EXECUTE__RENG_EXECUTE_END_PTR_MASK 0x003FF000L
+//PCTL1_SLICE1_RENG_EXECUTE
+#define PCTL1_SLICE1_RENG_EXECUTE__RENG_EXECUTE_NOW__SHIFT 0x0
+#define PCTL1_SLICE1_RENG_EXECUTE__RENG_EXECUTE_NOW_MODE__SHIFT 0x1
+#define PCTL1_SLICE1_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR__SHIFT 0x2
+#define PCTL1_SLICE1_RENG_EXECUTE__RENG_EXECUTE_END_PTR__SHIFT 0xc
+#define PCTL1_SLICE1_RENG_EXECUTE__RENG_EXECUTE_NOW_MASK 0x00000001L
+#define PCTL1_SLICE1_RENG_EXECUTE__RENG_EXECUTE_NOW_MODE_MASK 0x00000002L
+#define PCTL1_SLICE1_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR_MASK 0x00000FFCL
+#define PCTL1_SLICE1_RENG_EXECUTE__RENG_EXECUTE_END_PTR_MASK 0x003FF000L
+//PCTL1_SLICE2_RENG_EXECUTE
+#define PCTL1_SLICE2_RENG_EXECUTE__RENG_EXECUTE_NOW__SHIFT 0x0
+#define PCTL1_SLICE2_RENG_EXECUTE__RENG_EXECUTE_NOW_MODE__SHIFT 0x1
+#define PCTL1_SLICE2_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR__SHIFT 0x2
+#define PCTL1_SLICE2_RENG_EXECUTE__RENG_EXECUTE_END_PTR__SHIFT 0xc
+#define PCTL1_SLICE2_RENG_EXECUTE__RENG_EXECUTE_NOW_MASK 0x00000001L
+#define PCTL1_SLICE2_RENG_EXECUTE__RENG_EXECUTE_NOW_MODE_MASK 0x00000002L
+#define PCTL1_SLICE2_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR_MASK 0x00000FFCL
+#define PCTL1_SLICE2_RENG_EXECUTE__RENG_EXECUTE_END_PTR_MASK 0x003FF000L
+//PCTL1_SLICE3_RENG_EXECUTE
+#define PCTL1_SLICE3_RENG_EXECUTE__RENG_EXECUTE_NOW__SHIFT 0x0
+#define PCTL1_SLICE3_RENG_EXECUTE__RENG_EXECUTE_NOW_MODE__SHIFT 0x1
+#define PCTL1_SLICE3_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR__SHIFT 0x2
+#define PCTL1_SLICE3_RENG_EXECUTE__RENG_EXECUTE_END_PTR__SHIFT 0xc
+#define PCTL1_SLICE3_RENG_EXECUTE__RENG_EXECUTE_NOW_MASK 0x00000001L
+#define PCTL1_SLICE3_RENG_EXECUTE__RENG_EXECUTE_NOW_MODE_MASK 0x00000002L
+#define PCTL1_SLICE3_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR_MASK 0x00000FFCL
+#define PCTL1_SLICE3_RENG_EXECUTE__RENG_EXECUTE_END_PTR_MASK 0x003FF000L
+//PCTL1_SLICE4_RENG_EXECUTE
+#define PCTL1_SLICE4_RENG_EXECUTE__RENG_EXECUTE_NOW__SHIFT 0x0
+#define PCTL1_SLICE4_RENG_EXECUTE__RENG_EXECUTE_NOW_MODE__SHIFT 0x1
+#define PCTL1_SLICE4_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR__SHIFT 0x2
+#define PCTL1_SLICE4_RENG_EXECUTE__RENG_EXECUTE_END_PTR__SHIFT 0xc
+#define PCTL1_SLICE4_RENG_EXECUTE__RENG_EXECUTE_NOW_MASK 0x00000001L
+#define PCTL1_SLICE4_RENG_EXECUTE__RENG_EXECUTE_NOW_MODE_MASK 0x00000002L
+#define PCTL1_SLICE4_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR_MASK 0x00000FFCL
+#define PCTL1_SLICE4_RENG_EXECUTE__RENG_EXECUTE_END_PTR_MASK 0x003FF000L
+//PCTL1_UTCL2_RENG_RAM_INDEX
+#define PCTL1_UTCL2_RENG_RAM_INDEX__RENG_RAM_INDEX__SHIFT 0x0
+#define PCTL1_UTCL2_RENG_RAM_INDEX__RENG_RAM_INDEX_MASK 0x000007FFL
+//PCTL1_UTCL2_RENG_RAM_DATA
+#define PCTL1_UTCL2_RENG_RAM_DATA__RENG_RAM_DATA__SHIFT 0x0
+#define PCTL1_UTCL2_RENG_RAM_DATA__RENG_RAM_DATA_MASK 0xFFFFFFFFL
+//PCTL1_SLICE0_RENG_RAM_INDEX
+#define PCTL1_SLICE0_RENG_RAM_INDEX__RENG_RAM_INDEX__SHIFT 0x0
+#define PCTL1_SLICE0_RENG_RAM_INDEX__RENG_RAM_INDEX_MASK 0x000003FFL
+//PCTL1_SLICE0_RENG_RAM_DATA
+#define PCTL1_SLICE0_RENG_RAM_DATA__RENG_RAM_DATA__SHIFT 0x0
+#define PCTL1_SLICE0_RENG_RAM_DATA__RENG_RAM_DATA_MASK 0xFFFFFFFFL
+//PCTL1_SLICE1_RENG_RAM_INDEX
+#define PCTL1_SLICE1_RENG_RAM_INDEX__RENG_RAM_INDEX__SHIFT 0x0
+#define PCTL1_SLICE1_RENG_RAM_INDEX__RENG_RAM_INDEX_MASK 0x000003FFL
+//PCTL1_SLICE1_RENG_RAM_DATA
+#define PCTL1_SLICE1_RENG_RAM_DATA__RENG_RAM_DATA__SHIFT 0x0
+#define PCTL1_SLICE1_RENG_RAM_DATA__RENG_RAM_DATA_MASK 0xFFFFFFFFL
+//PCTL1_SLICE2_RENG_RAM_INDEX
+#define PCTL1_SLICE2_RENG_RAM_INDEX__RENG_RAM_INDEX__SHIFT 0x0
+#define PCTL1_SLICE2_RENG_RAM_INDEX__RENG_RAM_INDEX_MASK 0x000003FFL
+//PCTL1_SLICE2_RENG_RAM_DATA
+#define PCTL1_SLICE2_RENG_RAM_DATA__RENG_RAM_DATA__SHIFT 0x0
+#define PCTL1_SLICE2_RENG_RAM_DATA__RENG_RAM_DATA_MASK 0xFFFFFFFFL
+//PCTL1_SLICE3_RENG_RAM_INDEX
+#define PCTL1_SLICE3_RENG_RAM_INDEX__RENG_RAM_INDEX__SHIFT 0x0
+#define PCTL1_SLICE3_RENG_RAM_INDEX__RENG_RAM_INDEX_MASK 0x000003FFL
+//PCTL1_SLICE3_RENG_RAM_DATA
+#define PCTL1_SLICE3_RENG_RAM_DATA__RENG_RAM_DATA__SHIFT 0x0
+#define PCTL1_SLICE3_RENG_RAM_DATA__RENG_RAM_DATA_MASK 0xFFFFFFFFL
+//PCTL1_SLICE4_RENG_RAM_INDEX
+#define PCTL1_SLICE4_RENG_RAM_INDEX__RENG_RAM_INDEX__SHIFT 0x0
+#define PCTL1_SLICE4_RENG_RAM_INDEX__RENG_RAM_INDEX_MASK 0x000003FFL
+//PCTL1_SLICE4_RENG_RAM_DATA
+#define PCTL1_SLICE4_RENG_RAM_DATA__RENG_RAM_DATA__SHIFT 0x0
+#define PCTL1_SLICE4_RENG_RAM_DATA__RENG_RAM_DATA_MASK 0xFFFFFFFFL
+//PCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE0
+#define PCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE1
+#define PCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE2
+#define PCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE3
+#define PCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE4
+#define PCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL1_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET0
+#define PCTL1_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL0__SHIFT 0x0
+#define PCTL1_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL1__SHIFT 0x10
+#define PCTL1_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL0_MASK 0x0000FFFFL
+#define PCTL1_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL1_MASK 0xFFFF0000L
+//PCTL1_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET1
+#define PCTL1_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL0__SHIFT 0x0
+#define PCTL1_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL1__SHIFT 0x10
+#define PCTL1_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL0_MASK 0x0000FFFFL
+#define PCTL1_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL1_MASK 0xFFFF0000L
+//PCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE0
+#define PCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE1
+#define PCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE2
+#define PCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE3
+#define PCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE4
+#define PCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL1_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET0
+#define PCTL1_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL0__SHIFT 0x0
+#define PCTL1_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL1__SHIFT 0x10
+#define PCTL1_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL0_MASK 0x0000FFFFL
+#define PCTL1_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL1_MASK 0xFFFF0000L
+//PCTL1_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET1
+#define PCTL1_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL0__SHIFT 0x0
+#define PCTL1_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL1__SHIFT 0x10
+#define PCTL1_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL0_MASK 0x0000FFFFL
+#define PCTL1_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL1_MASK 0xFFFF0000L
+//PCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE0
+#define PCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE1
+#define PCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE2
+#define PCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE3
+#define PCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE4
+#define PCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL1_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET0
+#define PCTL1_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL0__SHIFT 0x0
+#define PCTL1_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL1__SHIFT 0x10
+#define PCTL1_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL0_MASK 0x0000FFFFL
+#define PCTL1_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL1_MASK 0xFFFF0000L
+//PCTL1_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET1
+#define PCTL1_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL0__SHIFT 0x0
+#define PCTL1_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL1__SHIFT 0x10
+#define PCTL1_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL0_MASK 0x0000FFFFL
+#define PCTL1_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL1_MASK 0xFFFF0000L
+//PCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE0
+#define PCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE1
+#define PCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE2
+#define PCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE3
+#define PCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE4
+#define PCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL1_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET0
+#define PCTL1_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL0__SHIFT 0x0
+#define PCTL1_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL1__SHIFT 0x10
+#define PCTL1_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL0_MASK 0x0000FFFFL
+#define PCTL1_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL1_MASK 0xFFFF0000L
+//PCTL1_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET1
+#define PCTL1_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL0__SHIFT 0x0
+#define PCTL1_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL1__SHIFT 0x10
+#define PCTL1_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL0_MASK 0x0000FFFFL
+#define PCTL1_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL1_MASK 0xFFFF0000L
+//PCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE0
+#define PCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE1
+#define PCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE2
+#define PCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE3
+#define PCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE4
+#define PCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL1_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET0
+#define PCTL1_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL0__SHIFT 0x0
+#define PCTL1_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL1__SHIFT 0x10
+#define PCTL1_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL0_MASK 0x0000FFFFL
+#define PCTL1_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL1_MASK 0xFFFF0000L
+//PCTL1_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET1
+#define PCTL1_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL0__SHIFT 0x0
+#define PCTL1_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL1__SHIFT 0x10
+#define PCTL1_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL0_MASK 0x0000FFFFL
+#define PCTL1_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL1_MASK 0xFFFF0000L
+//PCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE0
+#define PCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE1
+#define PCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE2
+#define PCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE3
+#define PCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE4
+#define PCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL1_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET0
+#define PCTL1_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL0__SHIFT 0x0
+#define PCTL1_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL1__SHIFT 0x10
+#define PCTL1_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL0_MASK 0x0000FFFFL
+#define PCTL1_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL1_MASK 0xFFFF0000L
+//PCTL1_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET1
+#define PCTL1_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL0__SHIFT 0x0
+#define PCTL1_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL1__SHIFT 0x10
+#define PCTL1_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL0_MASK 0x0000FFFFL
+#define PCTL1_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL1_MASK 0xFFFF0000L
+
+
+// addressBlock: mmhub_l1tlb_vml1dec:1
+//VML1_1_MC_VM_MX_L1_TLB0_STATUS
+#define VML1_1_MC_VM_MX_L1_TLB0_STATUS__BUSY__SHIFT 0x0
+#define VML1_1_MC_VM_MX_L1_TLB0_STATUS__FOUND_PARITY_ERRORS__SHIFT 0x1
+#define VML1_1_MC_VM_MX_L1_TLB0_STATUS__BUSY_MASK 0x00000001L
+#define VML1_1_MC_VM_MX_L1_TLB0_STATUS__FOUND_PARITY_ERRORS_MASK 0x00000002L
+//VML1_1_MC_VM_MX_L1_TLB1_STATUS
+#define VML1_1_MC_VM_MX_L1_TLB1_STATUS__BUSY__SHIFT 0x0
+#define VML1_1_MC_VM_MX_L1_TLB1_STATUS__FOUND_PARITY_ERRORS__SHIFT 0x1
+#define VML1_1_MC_VM_MX_L1_TLB1_STATUS__BUSY_MASK 0x00000001L
+#define VML1_1_MC_VM_MX_L1_TLB1_STATUS__FOUND_PARITY_ERRORS_MASK 0x00000002L
+//VML1_1_MC_VM_MX_L1_TLB2_STATUS
+#define VML1_1_MC_VM_MX_L1_TLB2_STATUS__BUSY__SHIFT 0x0
+#define VML1_1_MC_VM_MX_L1_TLB2_STATUS__FOUND_PARITY_ERRORS__SHIFT 0x1
+#define VML1_1_MC_VM_MX_L1_TLB2_STATUS__BUSY_MASK 0x00000001L
+#define VML1_1_MC_VM_MX_L1_TLB2_STATUS__FOUND_PARITY_ERRORS_MASK 0x00000002L
+//VML1_1_MC_VM_MX_L1_TLB3_STATUS
+#define VML1_1_MC_VM_MX_L1_TLB3_STATUS__BUSY__SHIFT 0x0
+#define VML1_1_MC_VM_MX_L1_TLB3_STATUS__FOUND_PARITY_ERRORS__SHIFT 0x1
+#define VML1_1_MC_VM_MX_L1_TLB3_STATUS__BUSY_MASK 0x00000001L
+#define VML1_1_MC_VM_MX_L1_TLB3_STATUS__FOUND_PARITY_ERRORS_MASK 0x00000002L
+//VML1_1_MC_VM_MX_L1_TLB4_STATUS
+#define VML1_1_MC_VM_MX_L1_TLB4_STATUS__BUSY__SHIFT 0x0
+#define VML1_1_MC_VM_MX_L1_TLB4_STATUS__FOUND_PARITY_ERRORS__SHIFT 0x1
+#define VML1_1_MC_VM_MX_L1_TLB4_STATUS__BUSY_MASK 0x00000001L
+#define VML1_1_MC_VM_MX_L1_TLB4_STATUS__FOUND_PARITY_ERRORS_MASK 0x00000002L
+//VML1_1_MC_VM_MX_L1_TLB5_STATUS
+#define VML1_1_MC_VM_MX_L1_TLB5_STATUS__BUSY__SHIFT 0x0
+#define VML1_1_MC_VM_MX_L1_TLB5_STATUS__FOUND_PARITY_ERRORS__SHIFT 0x1
+#define VML1_1_MC_VM_MX_L1_TLB5_STATUS__BUSY_MASK 0x00000001L
+#define VML1_1_MC_VM_MX_L1_TLB5_STATUS__FOUND_PARITY_ERRORS_MASK 0x00000002L
+//VML1_1_MC_VM_MX_L1_TLB6_STATUS
+#define VML1_1_MC_VM_MX_L1_TLB6_STATUS__BUSY__SHIFT 0x0
+#define VML1_1_MC_VM_MX_L1_TLB6_STATUS__FOUND_PARITY_ERRORS__SHIFT 0x1
+#define VML1_1_MC_VM_MX_L1_TLB6_STATUS__BUSY_MASK 0x00000001L
+#define VML1_1_MC_VM_MX_L1_TLB6_STATUS__FOUND_PARITY_ERRORS_MASK 0x00000002L
+//VML1_1_MC_VM_MX_L1_TLB7_STATUS
+#define VML1_1_MC_VM_MX_L1_TLB7_STATUS__BUSY__SHIFT 0x0
+#define VML1_1_MC_VM_MX_L1_TLB7_STATUS__FOUND_PARITY_ERRORS__SHIFT 0x1
+#define VML1_1_MC_VM_MX_L1_TLB7_STATUS__BUSY_MASK 0x00000001L
+#define VML1_1_MC_VM_MX_L1_TLB7_STATUS__FOUND_PARITY_ERRORS_MASK 0x00000002L
+
+
+// addressBlock: mmhub_l1tlb_vml1pldec:1
+//VML1PL1_MC_VM_MX_L1_PERFCOUNTER0_CFG
+#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//VML1PL1_MC_VM_MX_L1_PERFCOUNTER1_CFG
+#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//VML1PL1_MC_VM_MX_L1_PERFCOUNTER2_CFG
+#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0
+#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8
+#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18
+#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c
+#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d
+#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL
+#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L
+#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L
+#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L
+//VML1PL1_MC_VM_MX_L1_PERFCOUNTER3_CFG
+#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER3_CFG__PERF_SEL__SHIFT 0x0
+#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER3_CFG__PERF_SEL_END__SHIFT 0x8
+#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER3_CFG__PERF_MODE__SHIFT 0x18
+#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER3_CFG__ENABLE__SHIFT 0x1c
+#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER3_CFG__CLEAR__SHIFT 0x1d
+#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER3_CFG__PERF_SEL_MASK 0x000000FFL
+#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER3_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER3_CFG__PERF_MODE_MASK 0x0F000000L
+#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER3_CFG__ENABLE_MASK 0x10000000L
+#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER3_CFG__CLEAR_MASK 0x20000000L
+//VML1PL1_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL
+#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+
+
+// addressBlock: mmhub_l1tlb_vml1prdec:1
+//VML1PR1_MC_VM_MX_L1_PERFCOUNTER_LO
+#define VML1PR1_MC_VM_MX_L1_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define VML1PR1_MC_VM_MX_L1_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//VML1PR1_MC_VM_MX_L1_PERFCOUNTER_HI
+#define VML1PR1_MC_VM_MX_L1_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define VML1PR1_MC_VM_MX_L1_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define VML1PR1_MC_VM_MX_L1_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define VML1PR1_MC_VM_MX_L1_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+
+
+// addressBlock: mmhub_utcl2_atcl2dec:1
+//ATCL2_1_ATC_L2_CNTL
+#define ATCL2_1_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READ_REQUESTS__SHIFT 0x0
+#define ATCL2_1_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITE_REQUESTS__SHIFT 0x3
+#define ATCL2_1_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READS_DEPENDS_ON_ADDR_MOD__SHIFT 0x6
+#define ATCL2_1_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITES_DEPENDS_ON_ADDR_MOD__SHIFT 0x7
+#define ATCL2_1_ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_READ_REQUESTS__SHIFT 0x8
+#define ATCL2_1_ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_WRITE_REQUESTS__SHIFT 0xb
+#define ATCL2_1_ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_READS_DEPENDS_ON_ADDR_MOD__SHIFT 0xe
+#define ATCL2_1_ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_WRITES_DEPENDS_ON_ADDR_MOD__SHIFT 0xf
+#define ATCL2_1_ATC_L2_CNTL__CACHE_INVALIDATE_MODE__SHIFT 0x10
+#define ATCL2_1_ATC_L2_CNTL__ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY__SHIFT 0x13
+#define ATCL2_1_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READ_REQUESTS_MASK 0x00000003L
+#define ATCL2_1_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITE_REQUESTS_MASK 0x00000018L
+#define ATCL2_1_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READS_DEPENDS_ON_ADDR_MOD_MASK 0x00000040L
+#define ATCL2_1_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITES_DEPENDS_ON_ADDR_MOD_MASK 0x00000080L
+#define ATCL2_1_ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_READ_REQUESTS_MASK 0x00000300L
+#define ATCL2_1_ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_WRITE_REQUESTS_MASK 0x00001800L
+#define ATCL2_1_ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_READS_DEPENDS_ON_ADDR_MOD_MASK 0x00004000L
+#define ATCL2_1_ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_WRITES_DEPENDS_ON_ADDR_MOD_MASK 0x00008000L
+#define ATCL2_1_ATC_L2_CNTL__CACHE_INVALIDATE_MODE_MASK 0x00070000L
+#define ATCL2_1_ATC_L2_CNTL__ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY_MASK 0x00080000L
+//ATCL2_1_ATC_L2_CNTL2
+#define ATCL2_1_ATC_L2_CNTL2__BANK_SELECT__SHIFT 0x0
+#define ATCL2_1_ATC_L2_CNTL2__L2_CACHE_UPDATE_MODE__SHIFT 0x6
+#define ATCL2_1_ATC_L2_CNTL2__ENABLE_L2_CACHE_LRU_UPDATE_BY_WRITE__SHIFT 0x8
+#define ATCL2_1_ATC_L2_CNTL2__L2_CACHE_SWAP_TAG_INDEX_LSBS__SHIFT 0x9
+#define ATCL2_1_ATC_L2_CNTL2__L2_CACHE_VMID_MODE__SHIFT 0xc
+#define ATCL2_1_ATC_L2_CNTL2__L2_CACHE_UPDATE_WILDCARD_REFERENCE_VALUE__SHIFT 0xf
+#define ATCL2_1_ATC_L2_CNTL2__L2_BIGK_FRAGMENT_SIZE__SHIFT 0x15
+#define ATCL2_1_ATC_L2_CNTL2__L2_4K_BIGK_SWAP_ENABLE__SHIFT 0x1b
+#define ATCL2_1_ATC_L2_CNTL2__BANK_SELECT_MASK 0x0000003FL
+#define ATCL2_1_ATC_L2_CNTL2__L2_CACHE_UPDATE_MODE_MASK 0x000000C0L
+#define ATCL2_1_ATC_L2_CNTL2__ENABLE_L2_CACHE_LRU_UPDATE_BY_WRITE_MASK 0x00000100L
+#define ATCL2_1_ATC_L2_CNTL2__L2_CACHE_SWAP_TAG_INDEX_LSBS_MASK 0x00000E00L
+#define ATCL2_1_ATC_L2_CNTL2__L2_CACHE_VMID_MODE_MASK 0x00007000L
+#define ATCL2_1_ATC_L2_CNTL2__L2_CACHE_UPDATE_WILDCARD_REFERENCE_VALUE_MASK 0x001F8000L
+#define ATCL2_1_ATC_L2_CNTL2__L2_BIGK_FRAGMENT_SIZE_MASK 0x07E00000L
+#define ATCL2_1_ATC_L2_CNTL2__L2_4K_BIGK_SWAP_ENABLE_MASK 0x08000000L
+//ATCL2_1_ATC_L2_CACHE_DATA0
+#define ATCL2_1_ATC_L2_CACHE_DATA0__DATA_REGISTER_VALID__SHIFT 0x0
+#define ATCL2_1_ATC_L2_CACHE_DATA0__CACHE_ENTRY_VALID__SHIFT 0x1
+#define ATCL2_1_ATC_L2_CACHE_DATA0__CACHED_ATTRIBUTES__SHIFT 0x2
+#define ATCL2_1_ATC_L2_CACHE_DATA0__VIRTUAL_PAGE_ADDRESS_HIGH__SHIFT 0x17
+#define ATCL2_1_ATC_L2_CACHE_DATA0__DATA_REGISTER_VALID_MASK 0x00000001L
+#define ATCL2_1_ATC_L2_CACHE_DATA0__CACHE_ENTRY_VALID_MASK 0x00000002L
+#define ATCL2_1_ATC_L2_CACHE_DATA0__CACHED_ATTRIBUTES_MASK 0x007FFFFCL
+#define ATCL2_1_ATC_L2_CACHE_DATA0__VIRTUAL_PAGE_ADDRESS_HIGH_MASK 0x07800000L
+//ATCL2_1_ATC_L2_CACHE_DATA1
+#define ATCL2_1_ATC_L2_CACHE_DATA1__VIRTUAL_PAGE_ADDRESS_LOW__SHIFT 0x0
+#define ATCL2_1_ATC_L2_CACHE_DATA1__VIRTUAL_PAGE_ADDRESS_LOW_MASK 0xFFFFFFFFL
+//ATCL2_1_ATC_L2_CACHE_DATA2
+#define ATCL2_1_ATC_L2_CACHE_DATA2__PHYSICAL_PAGE_ADDRESS__SHIFT 0x0
+#define ATCL2_1_ATC_L2_CACHE_DATA2__PHYSICAL_PAGE_ADDRESS_MASK 0xFFFFFFFFL
+//ATCL2_1_ATC_L2_CNTL3
+#define ATCL2_1_ATC_L2_CNTL3__DELAY_SEND_INVALIDATION_REQUEST__SHIFT 0x0
+#define ATCL2_1_ATC_L2_CNTL3__ATS_REQUEST_CREDIT_MINUS1__SHIFT 0x3
+#define ATCL2_1_ATC_L2_CNTL3__COMPCLKREQ_OFF_HYSTERESIS__SHIFT 0x9
+#define ATCL2_1_ATC_L2_CNTL3__DELAY_SEND_INVALIDATION_REQUEST_MASK 0x00000007L
+#define ATCL2_1_ATC_L2_CNTL3__ATS_REQUEST_CREDIT_MINUS1_MASK 0x000001F8L
+#define ATCL2_1_ATC_L2_CNTL3__COMPCLKREQ_OFF_HYSTERESIS_MASK 0x00000E00L
+//ATCL2_1_ATC_L2_STATUS
+#define ATCL2_1_ATC_L2_STATUS__BUSY__SHIFT 0x0
+#define ATCL2_1_ATC_L2_STATUS__PARITY_ERROR_INFO__SHIFT 0x1
+#define ATCL2_1_ATC_L2_STATUS__BUSY_MASK 0x00000001L
+#define ATCL2_1_ATC_L2_STATUS__PARITY_ERROR_INFO_MASK 0x7FFFFFFEL
+//ATCL2_1_ATC_L2_STATUS2
+#define ATCL2_1_ATC_L2_STATUS2__IFIFO_NON_FATAL_PARITY_ERROR_INFO__SHIFT 0x0
+#define ATCL2_1_ATC_L2_STATUS2__IFIFO_FATAL_PARITY_ERROR_INFO__SHIFT 0x8
+#define ATCL2_1_ATC_L2_STATUS2__IFIFO_NON_FATAL_PARITY_ERROR_INFO_MASK 0x000000FFL
+#define ATCL2_1_ATC_L2_STATUS2__IFIFO_FATAL_PARITY_ERROR_INFO_MASK 0x0000FF00L
+//ATCL2_1_ATC_L2_STATUS3
+#define ATCL2_1_ATC_L2_STATUS3__BUSY__SHIFT 0x0
+#define ATCL2_1_ATC_L2_STATUS3__PARITY_ERROR_INFO__SHIFT 0x1
+#define ATCL2_1_ATC_L2_STATUS3__BUSY_MASK 0x00000001L
+#define ATCL2_1_ATC_L2_STATUS3__PARITY_ERROR_INFO_MASK 0x7FFFFFFEL
+//ATCL2_1_ATC_L2_MISC_CG
+#define ATCL2_1_ATC_L2_MISC_CG__OFFDLY__SHIFT 0x6
+#define ATCL2_1_ATC_L2_MISC_CG__ENABLE__SHIFT 0x12
+#define ATCL2_1_ATC_L2_MISC_CG__MEM_LS_ENABLE__SHIFT 0x13
+#define ATCL2_1_ATC_L2_MISC_CG__OFFDLY_MASK 0x00000FC0L
+#define ATCL2_1_ATC_L2_MISC_CG__ENABLE_MASK 0x00040000L
+#define ATCL2_1_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK 0x00080000L
+//ATCL2_1_ATC_L2_MEM_POWER_LS
+#define ATCL2_1_ATC_L2_MEM_POWER_LS__LS_SETUP__SHIFT 0x0
+#define ATCL2_1_ATC_L2_MEM_POWER_LS__LS_HOLD__SHIFT 0x6
+#define ATCL2_1_ATC_L2_MEM_POWER_LS__LS_SETUP_MASK 0x0000003FL
+#define ATCL2_1_ATC_L2_MEM_POWER_LS__LS_HOLD_MASK 0x00000FC0L
+//ATCL2_1_ATC_L2_CGTT_CLK_CTRL
+#define ATCL2_1_ATC_L2_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define ATCL2_1_ATC_L2_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define ATCL2_1_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE__SHIFT 0xf
+#define ATCL2_1_ATC_L2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x10
+#define ATCL2_1_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x18
+#define ATCL2_1_ATC_L2_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define ATCL2_1_ATC_L2_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define ATCL2_1_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L
+#define ATCL2_1_ATC_L2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00FF0000L
+#define ATCL2_1_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0xFF000000L
+//ATCL2_1_ATC_L2_CACHE_4K_DSM_INDEX
+#define ATCL2_1_ATC_L2_CACHE_4K_DSM_INDEX__INDEX__SHIFT 0x0
+#define ATCL2_1_ATC_L2_CACHE_4K_DSM_INDEX__INDEX_MASK 0x000000FFL
+//ATCL2_1_ATC_L2_CACHE_2M_DSM_INDEX
+#define ATCL2_1_ATC_L2_CACHE_2M_DSM_INDEX__INDEX__SHIFT 0x0
+#define ATCL2_1_ATC_L2_CACHE_2M_DSM_INDEX__INDEX_MASK 0x000000FFL
+//ATCL2_1_ATC_L2_CACHE_4K_DSM_CNTL
+#define ATCL2_1_ATC_L2_CACHE_4K_DSM_CNTL__INJECT_DELAY__SHIFT 0x0
+#define ATCL2_1_ATC_L2_CACHE_4K_DSM_CNTL__DSM_IRRITATOR_DATA__SHIFT 0x6
+#define ATCL2_1_ATC_L2_CACHE_4K_DSM_CNTL__ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define ATCL2_1_ATC_L2_CACHE_4K_DSM_CNTL__ENABLE_ERROR_INJECT__SHIFT 0x9
+#define ATCL2_1_ATC_L2_CACHE_4K_DSM_CNTL__SELECT_INJECT_DELAY__SHIFT 0xb
+#define ATCL2_1_ATC_L2_CACHE_4K_DSM_CNTL__WRITE_COUNTERS__SHIFT 0xc
+#define ATCL2_1_ATC_L2_CACHE_4K_DSM_CNTL__SEC_COUNT__SHIFT 0xd
+#define ATCL2_1_ATC_L2_CACHE_4K_DSM_CNTL__DED_COUNT__SHIFT 0xf
+#define ATCL2_1_ATC_L2_CACHE_4K_DSM_CNTL__TEST_FUE__SHIFT 0x11
+#define ATCL2_1_ATC_L2_CACHE_4K_DSM_CNTL__INJECT_DELAY_MASK 0x0000003FL
+#define ATCL2_1_ATC_L2_CACHE_4K_DSM_CNTL__DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define ATCL2_1_ATC_L2_CACHE_4K_DSM_CNTL__ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define ATCL2_1_ATC_L2_CACHE_4K_DSM_CNTL__ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define ATCL2_1_ATC_L2_CACHE_4K_DSM_CNTL__SELECT_INJECT_DELAY_MASK 0x00000800L
+#define ATCL2_1_ATC_L2_CACHE_4K_DSM_CNTL__WRITE_COUNTERS_MASK 0x00001000L
+#define ATCL2_1_ATC_L2_CACHE_4K_DSM_CNTL__SEC_COUNT_MASK 0x00006000L
+#define ATCL2_1_ATC_L2_CACHE_4K_DSM_CNTL__DED_COUNT_MASK 0x00018000L
+#define ATCL2_1_ATC_L2_CACHE_4K_DSM_CNTL__TEST_FUE_MASK 0x00020000L
+//ATCL2_1_ATC_L2_CACHE_2M_DSM_CNTL
+#define ATCL2_1_ATC_L2_CACHE_2M_DSM_CNTL__INJECT_DELAY__SHIFT 0x0
+#define ATCL2_1_ATC_L2_CACHE_2M_DSM_CNTL__DSM_IRRITATOR_DATA__SHIFT 0x6
+#define ATCL2_1_ATC_L2_CACHE_2M_DSM_CNTL__ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define ATCL2_1_ATC_L2_CACHE_2M_DSM_CNTL__ENABLE_ERROR_INJECT__SHIFT 0x9
+#define ATCL2_1_ATC_L2_CACHE_2M_DSM_CNTL__SELECT_INJECT_DELAY__SHIFT 0xb
+#define ATCL2_1_ATC_L2_CACHE_2M_DSM_CNTL__WRITE_COUNTERS__SHIFT 0xc
+#define ATCL2_1_ATC_L2_CACHE_2M_DSM_CNTL__SEC_COUNT__SHIFT 0xd
+#define ATCL2_1_ATC_L2_CACHE_2M_DSM_CNTL__DED_COUNT__SHIFT 0xf
+#define ATCL2_1_ATC_L2_CACHE_2M_DSM_CNTL__TEST_FUE__SHIFT 0x11
+#define ATCL2_1_ATC_L2_CACHE_2M_DSM_CNTL__INJECT_DELAY_MASK 0x0000003FL
+#define ATCL2_1_ATC_L2_CACHE_2M_DSM_CNTL__DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define ATCL2_1_ATC_L2_CACHE_2M_DSM_CNTL__ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define ATCL2_1_ATC_L2_CACHE_2M_DSM_CNTL__ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define ATCL2_1_ATC_L2_CACHE_2M_DSM_CNTL__SELECT_INJECT_DELAY_MASK 0x00000800L
+#define ATCL2_1_ATC_L2_CACHE_2M_DSM_CNTL__WRITE_COUNTERS_MASK 0x00001000L
+#define ATCL2_1_ATC_L2_CACHE_2M_DSM_CNTL__SEC_COUNT_MASK 0x00006000L
+#define ATCL2_1_ATC_L2_CACHE_2M_DSM_CNTL__DED_COUNT_MASK 0x00018000L
+#define ATCL2_1_ATC_L2_CACHE_2M_DSM_CNTL__TEST_FUE_MASK 0x00020000L
+//ATCL2_1_ATC_L2_CNTL4
+#define ATCL2_1_ATC_L2_CNTL4__MM_NONRT_IFIFO_ACTIVE_TRANSACTION_LIMIT__SHIFT 0x0
+#define ATCL2_1_ATC_L2_CNTL4__MM_SOFTRT_IFIFO_ACTIVE_TRANSACTION_LIMIT__SHIFT 0xa
+#define ATCL2_1_ATC_L2_CNTL4__MM_NONRT_IFIFO_ACTIVE_TRANSACTION_LIMIT_MASK 0x000003FFL
+#define ATCL2_1_ATC_L2_CNTL4__MM_SOFTRT_IFIFO_ACTIVE_TRANSACTION_LIMIT_MASK 0x000FFC00L
+//ATCL2_1_ATC_L2_MM_GROUP_RT_CLASSES
+#define ATCL2_1_ATC_L2_MM_GROUP_RT_CLASSES__GROUP_RT_CLASS__SHIFT 0x0
+#define ATCL2_1_ATC_L2_MM_GROUP_RT_CLASSES__GROUP_RT_CLASS_MASK 0xFFFFFFFFL
+
+
+// addressBlock: mmhub_utcl2_vml2pfdec:1
+//VML2PF1_VM_L2_CNTL
+#define VML2PF1_VM_L2_CNTL__ENABLE_L2_CACHE__SHIFT 0x0
+#define VML2PF1_VM_L2_CNTL__ENABLE_L2_FRAGMENT_PROCESSING__SHIFT 0x1
+#define VML2PF1_VM_L2_CNTL__L2_CACHE_PTE_ENDIAN_SWAP_MODE__SHIFT 0x2
+#define VML2PF1_VM_L2_CNTL__L2_CACHE_PDE_ENDIAN_SWAP_MODE__SHIFT 0x4
+#define VML2PF1_VM_L2_CNTL__L2_PDE0_CACHE_TAG_GENERATION_MODE__SHIFT 0x8
+#define VML2PF1_VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE__SHIFT 0x9
+#define VML2PF1_VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE__SHIFT 0xa
+#define VML2PF1_VM_L2_CNTL__ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY__SHIFT 0xb
+#define VML2PF1_VM_L2_CNTL__L2_PDE0_CACHE_SPLIT_MODE__SHIFT 0xc
+#define VML2PF1_VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE__SHIFT 0xf
+#define VML2PF1_VM_L2_CNTL__PDE_FAULT_CLASSIFICATION__SHIFT 0x12
+#define VML2PF1_VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE__SHIFT 0x13
+#define VML2PF1_VM_L2_CNTL__IDENTITY_MODE_FRAGMENT_SIZE__SHIFT 0x15
+#define VML2PF1_VM_L2_CNTL__L2_PTE_CACHE_ADDR_MODE__SHIFT 0x1a
+#define VML2PF1_VM_L2_CNTL__ENABLE_L2_CACHE_MASK 0x00000001L
+#define VML2PF1_VM_L2_CNTL__ENABLE_L2_FRAGMENT_PROCESSING_MASK 0x00000002L
+#define VML2PF1_VM_L2_CNTL__L2_CACHE_PTE_ENDIAN_SWAP_MODE_MASK 0x0000000CL
+#define VML2PF1_VM_L2_CNTL__L2_CACHE_PDE_ENDIAN_SWAP_MODE_MASK 0x00000030L
+#define VML2PF1_VM_L2_CNTL__L2_PDE0_CACHE_TAG_GENERATION_MODE_MASK 0x00000100L
+#define VML2PF1_VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE_MASK 0x00000200L
+#define VML2PF1_VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE_MASK 0x00000400L
+#define VML2PF1_VM_L2_CNTL__ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY_MASK 0x00000800L
+#define VML2PF1_VM_L2_CNTL__L2_PDE0_CACHE_SPLIT_MODE_MASK 0x00007000L
+#define VML2PF1_VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE_MASK 0x00038000L
+#define VML2PF1_VM_L2_CNTL__PDE_FAULT_CLASSIFICATION_MASK 0x00040000L
+#define VML2PF1_VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE_MASK 0x00180000L
+#define VML2PF1_VM_L2_CNTL__IDENTITY_MODE_FRAGMENT_SIZE_MASK 0x03E00000L
+#define VML2PF1_VM_L2_CNTL__L2_PTE_CACHE_ADDR_MODE_MASK 0x0C000000L
+//VML2PF1_VM_L2_CNTL2
+#define VML2PF1_VM_L2_CNTL2__INVALIDATE_ALL_L1_TLBS__SHIFT 0x0
+#define VML2PF1_VM_L2_CNTL2__INVALIDATE_L2_CACHE__SHIFT 0x1
+#define VML2PF1_VM_L2_CNTL2__DISABLE_INVALIDATE_PER_DOMAIN__SHIFT 0x15
+#define VML2PF1_VM_L2_CNTL2__DISABLE_BIGK_CACHE_OPTIMIZATION__SHIFT 0x16
+#define VML2PF1_VM_L2_CNTL2__L2_PTE_CACHE_VMID_MODE__SHIFT 0x17
+#define VML2PF1_VM_L2_CNTL2__INVALIDATE_CACHE_MODE__SHIFT 0x1a
+#define VML2PF1_VM_L2_CNTL2__PDE_CACHE_EFFECTIVE_SIZE__SHIFT 0x1c
+#define VML2PF1_VM_L2_CNTL2__INVALIDATE_ALL_L1_TLBS_MASK 0x00000001L
+#define VML2PF1_VM_L2_CNTL2__INVALIDATE_L2_CACHE_MASK 0x00000002L
+#define VML2PF1_VM_L2_CNTL2__DISABLE_INVALIDATE_PER_DOMAIN_MASK 0x00200000L
+#define VML2PF1_VM_L2_CNTL2__DISABLE_BIGK_CACHE_OPTIMIZATION_MASK 0x00400000L
+#define VML2PF1_VM_L2_CNTL2__L2_PTE_CACHE_VMID_MODE_MASK 0x03800000L
+#define VML2PF1_VM_L2_CNTL2__INVALIDATE_CACHE_MODE_MASK 0x0C000000L
+#define VML2PF1_VM_L2_CNTL2__PDE_CACHE_EFFECTIVE_SIZE_MASK 0x70000000L
+//VML2PF1_VM_L2_CNTL3
+#define VML2PF1_VM_L2_CNTL3__BANK_SELECT__SHIFT 0x0
+#define VML2PF1_VM_L2_CNTL3__L2_CACHE_UPDATE_MODE__SHIFT 0x6
+#define VML2PF1_VM_L2_CNTL3__L2_CACHE_UPDATE_WILDCARD_REFERENCE_VALUE__SHIFT 0x8
+#define VML2PF1_VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0xf
+#define VML2PF1_VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY__SHIFT 0x14
+#define VML2PF1_VM_L2_CNTL3__L2_CACHE_4K_EFFECTIVE_SIZE__SHIFT 0x15
+#define VML2PF1_VM_L2_CNTL3__L2_CACHE_BIGK_EFFECTIVE_SIZE__SHIFT 0x18
+#define VML2PF1_VM_L2_CNTL3__L2_CACHE_4K_FORCE_MISS__SHIFT 0x1c
+#define VML2PF1_VM_L2_CNTL3__L2_CACHE_BIGK_FORCE_MISS__SHIFT 0x1d
+#define VML2PF1_VM_L2_CNTL3__PDE_CACHE_FORCE_MISS__SHIFT 0x1e
+#define VML2PF1_VM_L2_CNTL3__L2_CACHE_4K_ASSOCIATIVITY__SHIFT 0x1f
+#define VML2PF1_VM_L2_CNTL3__BANK_SELECT_MASK 0x0000003FL
+#define VML2PF1_VM_L2_CNTL3__L2_CACHE_UPDATE_MODE_MASK 0x000000C0L
+#define VML2PF1_VM_L2_CNTL3__L2_CACHE_UPDATE_WILDCARD_REFERENCE_VALUE_MASK 0x00001F00L
+#define VML2PF1_VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000F8000L
+#define VML2PF1_VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK 0x00100000L
+#define VML2PF1_VM_L2_CNTL3__L2_CACHE_4K_EFFECTIVE_SIZE_MASK 0x00E00000L
+#define VML2PF1_VM_L2_CNTL3__L2_CACHE_BIGK_EFFECTIVE_SIZE_MASK 0x0F000000L
+#define VML2PF1_VM_L2_CNTL3__L2_CACHE_4K_FORCE_MISS_MASK 0x10000000L
+#define VML2PF1_VM_L2_CNTL3__L2_CACHE_BIGK_FORCE_MISS_MASK 0x20000000L
+#define VML2PF1_VM_L2_CNTL3__PDE_CACHE_FORCE_MISS_MASK 0x40000000L
+#define VML2PF1_VM_L2_CNTL3__L2_CACHE_4K_ASSOCIATIVITY_MASK 0x80000000L
+//VML2PF1_VM_L2_STATUS
+#define VML2PF1_VM_L2_STATUS__L2_BUSY__SHIFT 0x0
+#define VML2PF1_VM_L2_STATUS__CONTEXT_DOMAIN_BUSY__SHIFT 0x1
+#define VML2PF1_VM_L2_STATUS__FOUND_4K_PTE_CACHE_PARITY_ERRORS__SHIFT 0x11
+#define VML2PF1_VM_L2_STATUS__FOUND_BIGK_PTE_CACHE_PARITY_ERRORS__SHIFT 0x12
+#define VML2PF1_VM_L2_STATUS__FOUND_PDE0_CACHE_PARITY_ERRORS__SHIFT 0x13
+#define VML2PF1_VM_L2_STATUS__FOUND_PDE1_CACHE_PARITY_ERRORS__SHIFT 0x14
+#define VML2PF1_VM_L2_STATUS__FOUND_PDE2_CACHE_PARITY_ERRORS__SHIFT 0x15
+#define VML2PF1_VM_L2_STATUS__L2_BUSY_MASK 0x00000001L
+#define VML2PF1_VM_L2_STATUS__CONTEXT_DOMAIN_BUSY_MASK 0x0001FFFEL
+#define VML2PF1_VM_L2_STATUS__FOUND_4K_PTE_CACHE_PARITY_ERRORS_MASK 0x00020000L
+#define VML2PF1_VM_L2_STATUS__FOUND_BIGK_PTE_CACHE_PARITY_ERRORS_MASK 0x00040000L
+#define VML2PF1_VM_L2_STATUS__FOUND_PDE0_CACHE_PARITY_ERRORS_MASK 0x00080000L
+#define VML2PF1_VM_L2_STATUS__FOUND_PDE1_CACHE_PARITY_ERRORS_MASK 0x00100000L
+#define VML2PF1_VM_L2_STATUS__FOUND_PDE2_CACHE_PARITY_ERRORS_MASK 0x00200000L
+//VML2PF1_VM_DUMMY_PAGE_FAULT_CNTL
+#define VML2PF1_VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_FAULT_ENABLE__SHIFT 0x0
+#define VML2PF1_VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_ADDRESS_LOGICAL__SHIFT 0x1
+#define VML2PF1_VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_COMPARE_MSBS__SHIFT 0x2
+#define VML2PF1_VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_FAULT_ENABLE_MASK 0x00000001L
+#define VML2PF1_VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_ADDRESS_LOGICAL_MASK 0x00000002L
+#define VML2PF1_VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_COMPARE_MSBS_MASK 0x000000FCL
+//VML2PF1_VM_DUMMY_PAGE_FAULT_ADDR_LO32
+#define VML2PF1_VM_DUMMY_PAGE_FAULT_ADDR_LO32__DUMMY_PAGE_ADDR_LO32__SHIFT 0x0
+#define VML2PF1_VM_DUMMY_PAGE_FAULT_ADDR_LO32__DUMMY_PAGE_ADDR_LO32_MASK 0xFFFFFFFFL
+//VML2PF1_VM_DUMMY_PAGE_FAULT_ADDR_HI32
+#define VML2PF1_VM_DUMMY_PAGE_FAULT_ADDR_HI32__DUMMY_PAGE_ADDR_HI4__SHIFT 0x0
+#define VML2PF1_VM_DUMMY_PAGE_FAULT_ADDR_HI32__DUMMY_PAGE_ADDR_HI4_MASK 0x0000000FL
+//VML2PF1_VM_L2_PROTECTION_FAULT_CNTL
+#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x0
+#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__ALLOW_SUBSEQUENT_PROTECTION_FAULT_STATUS_ADDR_UPDATES__SHIFT 0x1
+#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x2
+#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x3
+#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__PDE1_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x4
+#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__PDE2_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x5
+#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x6
+#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__NACK_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x7
+#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x8
+#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x9
+#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xb
+#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__CLIENT_ID_NO_RETRY_FAULT_INTERRUPT__SHIFT 0xd
+#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__OTHER_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT__SHIFT 0x1d
+#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__CRASH_ON_NO_RETRY_FAULT__SHIFT 0x1e
+#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__CRASH_ON_RETRY_FAULT__SHIFT 0x1f
+#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00000001L
+#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__ALLOW_SUBSEQUENT_PROTECTION_FAULT_STATUS_ADDR_UPDATES_MASK 0x00000002L
+#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000004L
+#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000008L
+#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__PDE1_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000010L
+#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__PDE2_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000020L
+#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000040L
+#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__NACK_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000080L
+#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000100L
+#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000200L
+#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000800L
+#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__CLIENT_ID_NO_RETRY_FAULT_INTERRUPT_MASK 0x1FFFE000L
+#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__OTHER_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT_MASK 0x20000000L
+#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__CRASH_ON_NO_RETRY_FAULT_MASK 0x40000000L
+#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__CRASH_ON_RETRY_FAULT_MASK 0x80000000L
+//VML2PF1_VM_L2_PROTECTION_FAULT_CNTL2
+#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL2__CLIENT_ID_PRT_FAULT_INTERRUPT__SHIFT 0x0
+#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL2__OTHER_CLIENT_ID_PRT_FAULT_INTERRUPT__SHIFT 0x10
+#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL2__ACTIVE_PAGE_MIGRATION_PTE__SHIFT 0x11
+#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL2__ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY__SHIFT 0x12
+#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL2__ENABLE_RETRY_FAULT_INTERRUPT__SHIFT 0x13
+#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL2__CLIENT_ID_PRT_FAULT_INTERRUPT_MASK 0x0000FFFFL
+#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL2__OTHER_CLIENT_ID_PRT_FAULT_INTERRUPT_MASK 0x00010000L
+#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL2__ACTIVE_PAGE_MIGRATION_PTE_MASK 0x00020000L
+#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL2__ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY_MASK 0x00040000L
+#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL2__ENABLE_RETRY_FAULT_INTERRUPT_MASK 0x00080000L
+//VML2PF1_VM_L2_PROTECTION_FAULT_MM_CNTL3
+#define VML2PF1_VM_L2_PROTECTION_FAULT_MM_CNTL3__VML1_READ_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT__SHIFT 0x0
+#define VML2PF1_VM_L2_PROTECTION_FAULT_MM_CNTL3__VML1_READ_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT_MASK 0xFFFFFFFFL
+//VML2PF1_VM_L2_PROTECTION_FAULT_MM_CNTL4
+#define VML2PF1_VM_L2_PROTECTION_FAULT_MM_CNTL4__VML1_WRITE_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT__SHIFT 0x0
+#define VML2PF1_VM_L2_PROTECTION_FAULT_MM_CNTL4__VML1_WRITE_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT_MASK 0xFFFFFFFFL
+//VML2PF1_VM_L2_PROTECTION_FAULT_STATUS
+#define VML2PF1_VM_L2_PROTECTION_FAULT_STATUS__MORE_FAULTS__SHIFT 0x0
+#define VML2PF1_VM_L2_PROTECTION_FAULT_STATUS__WALKER_ERROR__SHIFT 0x1
+#define VML2PF1_VM_L2_PROTECTION_FAULT_STATUS__PERMISSION_FAULTS__SHIFT 0x4
+#define VML2PF1_VM_L2_PROTECTION_FAULT_STATUS__MAPPING_ERROR__SHIFT 0x8
+#define VML2PF1_VM_L2_PROTECTION_FAULT_STATUS__CID__SHIFT 0x9
+#define VML2PF1_VM_L2_PROTECTION_FAULT_STATUS__RW__SHIFT 0x12
+#define VML2PF1_VM_L2_PROTECTION_FAULT_STATUS__ATOMIC__SHIFT 0x13
+#define VML2PF1_VM_L2_PROTECTION_FAULT_STATUS__VMID__SHIFT 0x14
+#define VML2PF1_VM_L2_PROTECTION_FAULT_STATUS__VF__SHIFT 0x18
+#define VML2PF1_VM_L2_PROTECTION_FAULT_STATUS__VFID__SHIFT 0x19
+#define VML2PF1_VM_L2_PROTECTION_FAULT_STATUS__MORE_FAULTS_MASK 0x00000001L
+#define VML2PF1_VM_L2_PROTECTION_FAULT_STATUS__WALKER_ERROR_MASK 0x0000000EL
+#define VML2PF1_VM_L2_PROTECTION_FAULT_STATUS__PERMISSION_FAULTS_MASK 0x000000F0L
+#define VML2PF1_VM_L2_PROTECTION_FAULT_STATUS__MAPPING_ERROR_MASK 0x00000100L
+#define VML2PF1_VM_L2_PROTECTION_FAULT_STATUS__CID_MASK 0x0003FE00L
+#define VML2PF1_VM_L2_PROTECTION_FAULT_STATUS__RW_MASK 0x00040000L
+#define VML2PF1_VM_L2_PROTECTION_FAULT_STATUS__ATOMIC_MASK 0x00080000L
+#define VML2PF1_VM_L2_PROTECTION_FAULT_STATUS__VMID_MASK 0x00F00000L
+#define VML2PF1_VM_L2_PROTECTION_FAULT_STATUS__VF_MASK 0x01000000L
+#define VML2PF1_VM_L2_PROTECTION_FAULT_STATUS__VFID_MASK 0x1E000000L
+//VML2PF1_VM_L2_PROTECTION_FAULT_ADDR_LO32
+#define VML2PF1_VM_L2_PROTECTION_FAULT_ADDR_LO32__LOGICAL_PAGE_ADDR_LO32__SHIFT 0x0
+#define VML2PF1_VM_L2_PROTECTION_FAULT_ADDR_LO32__LOGICAL_PAGE_ADDR_LO32_MASK 0xFFFFFFFFL
+//VML2PF1_VM_L2_PROTECTION_FAULT_ADDR_HI32
+#define VML2PF1_VM_L2_PROTECTION_FAULT_ADDR_HI32__LOGICAL_PAGE_ADDR_HI4__SHIFT 0x0
+#define VML2PF1_VM_L2_PROTECTION_FAULT_ADDR_HI32__LOGICAL_PAGE_ADDR_HI4_MASK 0x0000000FL
+//VML2PF1_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32
+#define VML2PF1_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32__PHYSICAL_PAGE_ADDR_LO32__SHIFT 0x0
+#define VML2PF1_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32__PHYSICAL_PAGE_ADDR_LO32_MASK 0xFFFFFFFFL
+//VML2PF1_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32
+#define VML2PF1_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32__PHYSICAL_PAGE_ADDR_HI4__SHIFT 0x0
+#define VML2PF1_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32__PHYSICAL_PAGE_ADDR_HI4_MASK 0x0000000FL
+//VML2PF1_VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32
+#define VML2PF1_VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2PF1_VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2PF1_VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32
+#define VML2PF1_VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2PF1_VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2PF1_VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32
+#define VML2PF1_VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2PF1_VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2PF1_VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32
+#define VML2PF1_VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2PF1_VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2PF1_VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32
+#define VML2PF1_VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32__PHYSICAL_PAGE_OFFSET_LO32__SHIFT 0x0
+#define VML2PF1_VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32__PHYSICAL_PAGE_OFFSET_LO32_MASK 0xFFFFFFFFL
+//VML2PF1_VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32
+#define VML2PF1_VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32__PHYSICAL_PAGE_OFFSET_HI4__SHIFT 0x0
+#define VML2PF1_VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32__PHYSICAL_PAGE_OFFSET_HI4_MASK 0x0000000FL
+//VML2PF1_VM_L2_CNTL4
+#define VML2PF1_VM_L2_CNTL4__L2_CACHE_4K_PARTITION_COUNT__SHIFT 0x0
+#define VML2PF1_VM_L2_CNTL4__VMC_TAP_PDE_REQUEST_PHYSICAL__SHIFT 0x6
+#define VML2PF1_VM_L2_CNTL4__VMC_TAP_PTE_REQUEST_PHYSICAL__SHIFT 0x7
+#define VML2PF1_VM_L2_CNTL4__MM_NONRT_IFIFO_ACTIVE_TRANSACTION_LIMIT__SHIFT 0x8
+#define VML2PF1_VM_L2_CNTL4__MM_SOFTRT_IFIFO_ACTIVE_TRANSACTION_LIMIT__SHIFT 0x12
+#define VML2PF1_VM_L2_CNTL4__BPM_CGCGLS_OVERRIDE__SHIFT 0x1c
+#define VML2PF1_VM_L2_CNTL4__L2_CACHE_4K_PARTITION_COUNT_MASK 0x0000003FL
+#define VML2PF1_VM_L2_CNTL4__VMC_TAP_PDE_REQUEST_PHYSICAL_MASK 0x00000040L
+#define VML2PF1_VM_L2_CNTL4__VMC_TAP_PTE_REQUEST_PHYSICAL_MASK 0x00000080L
+#define VML2PF1_VM_L2_CNTL4__MM_NONRT_IFIFO_ACTIVE_TRANSACTION_LIMIT_MASK 0x0003FF00L
+#define VML2PF1_VM_L2_CNTL4__MM_SOFTRT_IFIFO_ACTIVE_TRANSACTION_LIMIT_MASK 0x0FFC0000L
+#define VML2PF1_VM_L2_CNTL4__BPM_CGCGLS_OVERRIDE_MASK 0x10000000L
+//VML2PF1_VM_L2_MM_GROUP_RT_CLASSES
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_0_RT_CLASS__SHIFT 0x0
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_1_RT_CLASS__SHIFT 0x1
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_2_RT_CLASS__SHIFT 0x2
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_3_RT_CLASS__SHIFT 0x3
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_4_RT_CLASS__SHIFT 0x4
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_5_RT_CLASS__SHIFT 0x5
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_6_RT_CLASS__SHIFT 0x6
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_7_RT_CLASS__SHIFT 0x7
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_8_RT_CLASS__SHIFT 0x8
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_9_RT_CLASS__SHIFT 0x9
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_10_RT_CLASS__SHIFT 0xa
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_11_RT_CLASS__SHIFT 0xb
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_12_RT_CLASS__SHIFT 0xc
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_13_RT_CLASS__SHIFT 0xd
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_14_RT_CLASS__SHIFT 0xe
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_15_RT_CLASS__SHIFT 0xf
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_16_RT_CLASS__SHIFT 0x10
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_17_RT_CLASS__SHIFT 0x11
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_18_RT_CLASS__SHIFT 0x12
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_19_RT_CLASS__SHIFT 0x13
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_20_RT_CLASS__SHIFT 0x14
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_21_RT_CLASS__SHIFT 0x15
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_22_RT_CLASS__SHIFT 0x16
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_23_RT_CLASS__SHIFT 0x17
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_24_RT_CLASS__SHIFT 0x18
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_25_RT_CLASS__SHIFT 0x19
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_26_RT_CLASS__SHIFT 0x1a
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_27_RT_CLASS__SHIFT 0x1b
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_28_RT_CLASS__SHIFT 0x1c
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_29_RT_CLASS__SHIFT 0x1d
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_30_RT_CLASS__SHIFT 0x1e
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_31_RT_CLASS__SHIFT 0x1f
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_0_RT_CLASS_MASK 0x00000001L
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_1_RT_CLASS_MASK 0x00000002L
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_2_RT_CLASS_MASK 0x00000004L
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_3_RT_CLASS_MASK 0x00000008L
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_4_RT_CLASS_MASK 0x00000010L
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_5_RT_CLASS_MASK 0x00000020L
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_6_RT_CLASS_MASK 0x00000040L
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_7_RT_CLASS_MASK 0x00000080L
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_8_RT_CLASS_MASK 0x00000100L
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_9_RT_CLASS_MASK 0x00000200L
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_10_RT_CLASS_MASK 0x00000400L
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_11_RT_CLASS_MASK 0x00000800L
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_12_RT_CLASS_MASK 0x00001000L
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_13_RT_CLASS_MASK 0x00002000L
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_14_RT_CLASS_MASK 0x00004000L
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_15_RT_CLASS_MASK 0x00008000L
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_16_RT_CLASS_MASK 0x00010000L
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_17_RT_CLASS_MASK 0x00020000L
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_18_RT_CLASS_MASK 0x00040000L
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_19_RT_CLASS_MASK 0x00080000L
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_20_RT_CLASS_MASK 0x00100000L
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_21_RT_CLASS_MASK 0x00200000L
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_22_RT_CLASS_MASK 0x00400000L
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_23_RT_CLASS_MASK 0x00800000L
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_24_RT_CLASS_MASK 0x01000000L
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_25_RT_CLASS_MASK 0x02000000L
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_26_RT_CLASS_MASK 0x04000000L
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_27_RT_CLASS_MASK 0x08000000L
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_28_RT_CLASS_MASK 0x10000000L
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_29_RT_CLASS_MASK 0x20000000L
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_30_RT_CLASS_MASK 0x40000000L
+#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_31_RT_CLASS_MASK 0x80000000L
+//VML2PF1_VM_L2_BANK_SELECT_RESERVED_CID
+#define VML2PF1_VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_READ_CLIENT_ID__SHIFT 0x0
+#define VML2PF1_VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_WRITE_CLIENT_ID__SHIFT 0xa
+#define VML2PF1_VM_L2_BANK_SELECT_RESERVED_CID__ENABLE__SHIFT 0x14
+#define VML2PF1_VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_INVALIDATION_MODE__SHIFT 0x18
+#define VML2PF1_VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_PRIVATE_INVALIDATION__SHIFT 0x19
+#define VML2PF1_VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_READ_CLIENT_ID_MASK 0x000001FFL
+#define VML2PF1_VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_WRITE_CLIENT_ID_MASK 0x0007FC00L
+#define VML2PF1_VM_L2_BANK_SELECT_RESERVED_CID__ENABLE_MASK 0x00100000L
+#define VML2PF1_VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_INVALIDATION_MODE_MASK 0x01000000L
+#define VML2PF1_VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_PRIVATE_INVALIDATION_MASK 0x02000000L
+//VML2PF1_VM_L2_BANK_SELECT_RESERVED_CID2
+#define VML2PF1_VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_READ_CLIENT_ID__SHIFT 0x0
+#define VML2PF1_VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_WRITE_CLIENT_ID__SHIFT 0xa
+#define VML2PF1_VM_L2_BANK_SELECT_RESERVED_CID2__ENABLE__SHIFT 0x14
+#define VML2PF1_VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_INVALIDATION_MODE__SHIFT 0x18
+#define VML2PF1_VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_PRIVATE_INVALIDATION__SHIFT 0x19
+#define VML2PF1_VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_READ_CLIENT_ID_MASK 0x000001FFL
+#define VML2PF1_VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_WRITE_CLIENT_ID_MASK 0x0007FC00L
+#define VML2PF1_VM_L2_BANK_SELECT_RESERVED_CID2__ENABLE_MASK 0x00100000L
+#define VML2PF1_VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_INVALIDATION_MODE_MASK 0x01000000L
+#define VML2PF1_VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_PRIVATE_INVALIDATION_MASK 0x02000000L
+//VML2PF1_VM_L2_CACHE_PARITY_CNTL
+#define VML2PF1_VM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_4K_PTE_CACHES__SHIFT 0x0
+#define VML2PF1_VM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_BIGK_PTE_CACHES__SHIFT 0x1
+#define VML2PF1_VM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_PDE_CACHES__SHIFT 0x2
+#define VML2PF1_VM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_4K_PTE_CACHE__SHIFT 0x3
+#define VML2PF1_VM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_BIGK_PTE_CACHE__SHIFT 0x4
+#define VML2PF1_VM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_PDE_CACHE__SHIFT 0x5
+#define VML2PF1_VM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_BANK__SHIFT 0x6
+#define VML2PF1_VM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_NUMBER__SHIFT 0x9
+#define VML2PF1_VM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_ASSOC__SHIFT 0xc
+#define VML2PF1_VM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_4K_PTE_CACHES_MASK 0x00000001L
+#define VML2PF1_VM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_BIGK_PTE_CACHES_MASK 0x00000002L
+#define VML2PF1_VM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_PDE_CACHES_MASK 0x00000004L
+#define VML2PF1_VM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_4K_PTE_CACHE_MASK 0x00000008L
+#define VML2PF1_VM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_BIGK_PTE_CACHE_MASK 0x00000010L
+#define VML2PF1_VM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_PDE_CACHE_MASK 0x00000020L
+#define VML2PF1_VM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_BANK_MASK 0x000001C0L
+#define VML2PF1_VM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_NUMBER_MASK 0x00000E00L
+#define VML2PF1_VM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_ASSOC_MASK 0x0000F000L
+//VML2PF1_VM_L2_CGTT_CLK_CTRL
+#define VML2PF1_VM_L2_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define VML2PF1_VM_L2_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define VML2PF1_VM_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE__SHIFT 0xf
+#define VML2PF1_VM_L2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x10
+#define VML2PF1_VM_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x18
+#define VML2PF1_VM_L2_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define VML2PF1_VM_L2_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define VML2PF1_VM_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L
+#define VML2PF1_VM_L2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00FF0000L
+#define VML2PF1_VM_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0xFF000000L
+
+
+// addressBlock: mmhub_utcl2_vml2vcdec:1
+//VML2VC1_VM_CONTEXT0_CNTL
+#define VML2VC1_VM_CONTEXT0_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT0_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VML2VC1_VM_CONTEXT0_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VML2VC1_VM_CONTEXT0_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VML2VC1_VM_CONTEXT0_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VML2VC1_VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VML2VC1_VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VML2VC1_VM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VML2VC1_VM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VML2VC1_VM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VML2VC1_VM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VML2VC1_VM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VML2VC1_VM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VML2VC1_VM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VML2VC1_VM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VML2VC1_VM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VML2VC1_VM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VML2VC1_VM_CONTEXT0_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VML2VC1_VM_CONTEXT0_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VML2VC1_VM_CONTEXT0_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VML2VC1_VM_CONTEXT0_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VML2VC1_VM_CONTEXT0_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VML2VC1_VM_CONTEXT0_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VML2VC1_VM_CONTEXT0_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VML2VC1_VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VML2VC1_VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VML2VC1_VM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VML2VC1_VM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VML2VC1_VM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VML2VC1_VM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VML2VC1_VM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VML2VC1_VM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VML2VC1_VM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VML2VC1_VM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VML2VC1_VM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VML2VC1_VM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VML2VC1_VM_CONTEXT0_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VML2VC1_VM_CONTEXT0_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VML2VC1_VM_CONTEXT1_CNTL
+#define VML2VC1_VM_CONTEXT1_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VML2VC1_VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VML2VC1_VM_CONTEXT1_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VML2VC1_VM_CONTEXT1_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VML2VC1_VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VML2VC1_VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VML2VC1_VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VML2VC1_VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VML2VC1_VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VML2VC1_VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VML2VC1_VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VML2VC1_VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VML2VC1_VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VML2VC1_VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VML2VC1_VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VML2VC1_VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VML2VC1_VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VML2VC1_VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VML2VC1_VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VML2VC1_VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VML2VC1_VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VML2VC1_VM_CONTEXT1_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VML2VC1_VM_CONTEXT1_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VML2VC1_VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VML2VC1_VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VML2VC1_VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VML2VC1_VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VML2VC1_VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VML2VC1_VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VML2VC1_VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VML2VC1_VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VML2VC1_VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VML2VC1_VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VML2VC1_VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VML2VC1_VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VML2VC1_VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VML2VC1_VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VML2VC1_VM_CONTEXT2_CNTL
+#define VML2VC1_VM_CONTEXT2_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT2_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VML2VC1_VM_CONTEXT2_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VML2VC1_VM_CONTEXT2_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VML2VC1_VM_CONTEXT2_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VML2VC1_VM_CONTEXT2_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VML2VC1_VM_CONTEXT2_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VML2VC1_VM_CONTEXT2_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VML2VC1_VM_CONTEXT2_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VML2VC1_VM_CONTEXT2_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VML2VC1_VM_CONTEXT2_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VML2VC1_VM_CONTEXT2_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VML2VC1_VM_CONTEXT2_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VML2VC1_VM_CONTEXT2_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VML2VC1_VM_CONTEXT2_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VML2VC1_VM_CONTEXT2_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VML2VC1_VM_CONTEXT2_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VML2VC1_VM_CONTEXT2_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VML2VC1_VM_CONTEXT2_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VML2VC1_VM_CONTEXT2_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VML2VC1_VM_CONTEXT2_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VML2VC1_VM_CONTEXT2_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VML2VC1_VM_CONTEXT2_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VML2VC1_VM_CONTEXT2_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VML2VC1_VM_CONTEXT2_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VML2VC1_VM_CONTEXT2_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VML2VC1_VM_CONTEXT2_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VML2VC1_VM_CONTEXT2_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VML2VC1_VM_CONTEXT2_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VML2VC1_VM_CONTEXT2_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VML2VC1_VM_CONTEXT2_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VML2VC1_VM_CONTEXT2_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VML2VC1_VM_CONTEXT2_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VML2VC1_VM_CONTEXT2_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VML2VC1_VM_CONTEXT2_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VML2VC1_VM_CONTEXT2_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VML2VC1_VM_CONTEXT2_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VML2VC1_VM_CONTEXT2_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VML2VC1_VM_CONTEXT3_CNTL
+#define VML2VC1_VM_CONTEXT3_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT3_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VML2VC1_VM_CONTEXT3_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VML2VC1_VM_CONTEXT3_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VML2VC1_VM_CONTEXT3_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VML2VC1_VM_CONTEXT3_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VML2VC1_VM_CONTEXT3_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VML2VC1_VM_CONTEXT3_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VML2VC1_VM_CONTEXT3_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VML2VC1_VM_CONTEXT3_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VML2VC1_VM_CONTEXT3_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VML2VC1_VM_CONTEXT3_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VML2VC1_VM_CONTEXT3_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VML2VC1_VM_CONTEXT3_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VML2VC1_VM_CONTEXT3_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VML2VC1_VM_CONTEXT3_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VML2VC1_VM_CONTEXT3_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VML2VC1_VM_CONTEXT3_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VML2VC1_VM_CONTEXT3_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VML2VC1_VM_CONTEXT3_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VML2VC1_VM_CONTEXT3_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VML2VC1_VM_CONTEXT3_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VML2VC1_VM_CONTEXT3_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VML2VC1_VM_CONTEXT3_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VML2VC1_VM_CONTEXT3_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VML2VC1_VM_CONTEXT3_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VML2VC1_VM_CONTEXT3_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VML2VC1_VM_CONTEXT3_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VML2VC1_VM_CONTEXT3_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VML2VC1_VM_CONTEXT3_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VML2VC1_VM_CONTEXT3_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VML2VC1_VM_CONTEXT3_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VML2VC1_VM_CONTEXT3_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VML2VC1_VM_CONTEXT3_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VML2VC1_VM_CONTEXT3_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VML2VC1_VM_CONTEXT3_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VML2VC1_VM_CONTEXT3_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VML2VC1_VM_CONTEXT3_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VML2VC1_VM_CONTEXT4_CNTL
+#define VML2VC1_VM_CONTEXT4_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT4_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VML2VC1_VM_CONTEXT4_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VML2VC1_VM_CONTEXT4_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VML2VC1_VM_CONTEXT4_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VML2VC1_VM_CONTEXT4_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VML2VC1_VM_CONTEXT4_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VML2VC1_VM_CONTEXT4_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VML2VC1_VM_CONTEXT4_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VML2VC1_VM_CONTEXT4_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VML2VC1_VM_CONTEXT4_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VML2VC1_VM_CONTEXT4_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VML2VC1_VM_CONTEXT4_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VML2VC1_VM_CONTEXT4_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VML2VC1_VM_CONTEXT4_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VML2VC1_VM_CONTEXT4_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VML2VC1_VM_CONTEXT4_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VML2VC1_VM_CONTEXT4_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VML2VC1_VM_CONTEXT4_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VML2VC1_VM_CONTEXT4_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VML2VC1_VM_CONTEXT4_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VML2VC1_VM_CONTEXT4_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VML2VC1_VM_CONTEXT4_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VML2VC1_VM_CONTEXT4_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VML2VC1_VM_CONTEXT4_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VML2VC1_VM_CONTEXT4_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VML2VC1_VM_CONTEXT4_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VML2VC1_VM_CONTEXT4_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VML2VC1_VM_CONTEXT4_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VML2VC1_VM_CONTEXT4_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VML2VC1_VM_CONTEXT4_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VML2VC1_VM_CONTEXT4_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VML2VC1_VM_CONTEXT4_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VML2VC1_VM_CONTEXT4_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VML2VC1_VM_CONTEXT4_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VML2VC1_VM_CONTEXT4_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VML2VC1_VM_CONTEXT4_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VML2VC1_VM_CONTEXT4_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VML2VC1_VM_CONTEXT5_CNTL
+#define VML2VC1_VM_CONTEXT5_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT5_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VML2VC1_VM_CONTEXT5_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VML2VC1_VM_CONTEXT5_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VML2VC1_VM_CONTEXT5_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VML2VC1_VM_CONTEXT5_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VML2VC1_VM_CONTEXT5_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VML2VC1_VM_CONTEXT5_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VML2VC1_VM_CONTEXT5_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VML2VC1_VM_CONTEXT5_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VML2VC1_VM_CONTEXT5_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VML2VC1_VM_CONTEXT5_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VML2VC1_VM_CONTEXT5_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VML2VC1_VM_CONTEXT5_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VML2VC1_VM_CONTEXT5_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VML2VC1_VM_CONTEXT5_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VML2VC1_VM_CONTEXT5_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VML2VC1_VM_CONTEXT5_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VML2VC1_VM_CONTEXT5_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VML2VC1_VM_CONTEXT5_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VML2VC1_VM_CONTEXT5_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VML2VC1_VM_CONTEXT5_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VML2VC1_VM_CONTEXT5_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VML2VC1_VM_CONTEXT5_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VML2VC1_VM_CONTEXT5_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VML2VC1_VM_CONTEXT5_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VML2VC1_VM_CONTEXT5_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VML2VC1_VM_CONTEXT5_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VML2VC1_VM_CONTEXT5_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VML2VC1_VM_CONTEXT5_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VML2VC1_VM_CONTEXT5_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VML2VC1_VM_CONTEXT5_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VML2VC1_VM_CONTEXT5_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VML2VC1_VM_CONTEXT5_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VML2VC1_VM_CONTEXT5_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VML2VC1_VM_CONTEXT5_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VML2VC1_VM_CONTEXT5_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VML2VC1_VM_CONTEXT5_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VML2VC1_VM_CONTEXT6_CNTL
+#define VML2VC1_VM_CONTEXT6_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT6_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VML2VC1_VM_CONTEXT6_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VML2VC1_VM_CONTEXT6_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VML2VC1_VM_CONTEXT6_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VML2VC1_VM_CONTEXT6_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VML2VC1_VM_CONTEXT6_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VML2VC1_VM_CONTEXT6_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VML2VC1_VM_CONTEXT6_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VML2VC1_VM_CONTEXT6_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VML2VC1_VM_CONTEXT6_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VML2VC1_VM_CONTEXT6_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VML2VC1_VM_CONTEXT6_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VML2VC1_VM_CONTEXT6_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VML2VC1_VM_CONTEXT6_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VML2VC1_VM_CONTEXT6_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VML2VC1_VM_CONTEXT6_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VML2VC1_VM_CONTEXT6_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VML2VC1_VM_CONTEXT6_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VML2VC1_VM_CONTEXT6_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VML2VC1_VM_CONTEXT6_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VML2VC1_VM_CONTEXT6_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VML2VC1_VM_CONTEXT6_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VML2VC1_VM_CONTEXT6_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VML2VC1_VM_CONTEXT6_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VML2VC1_VM_CONTEXT6_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VML2VC1_VM_CONTEXT6_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VML2VC1_VM_CONTEXT6_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VML2VC1_VM_CONTEXT6_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VML2VC1_VM_CONTEXT6_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VML2VC1_VM_CONTEXT6_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VML2VC1_VM_CONTEXT6_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VML2VC1_VM_CONTEXT6_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VML2VC1_VM_CONTEXT6_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VML2VC1_VM_CONTEXT6_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VML2VC1_VM_CONTEXT6_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VML2VC1_VM_CONTEXT6_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VML2VC1_VM_CONTEXT6_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VML2VC1_VM_CONTEXT7_CNTL
+#define VML2VC1_VM_CONTEXT7_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT7_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VML2VC1_VM_CONTEXT7_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VML2VC1_VM_CONTEXT7_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VML2VC1_VM_CONTEXT7_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VML2VC1_VM_CONTEXT7_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VML2VC1_VM_CONTEXT7_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VML2VC1_VM_CONTEXT7_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VML2VC1_VM_CONTEXT7_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VML2VC1_VM_CONTEXT7_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VML2VC1_VM_CONTEXT7_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VML2VC1_VM_CONTEXT7_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VML2VC1_VM_CONTEXT7_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VML2VC1_VM_CONTEXT7_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VML2VC1_VM_CONTEXT7_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VML2VC1_VM_CONTEXT7_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VML2VC1_VM_CONTEXT7_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VML2VC1_VM_CONTEXT7_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VML2VC1_VM_CONTEXT7_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VML2VC1_VM_CONTEXT7_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VML2VC1_VM_CONTEXT7_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VML2VC1_VM_CONTEXT7_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VML2VC1_VM_CONTEXT7_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VML2VC1_VM_CONTEXT7_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VML2VC1_VM_CONTEXT7_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VML2VC1_VM_CONTEXT7_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VML2VC1_VM_CONTEXT7_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VML2VC1_VM_CONTEXT7_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VML2VC1_VM_CONTEXT7_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VML2VC1_VM_CONTEXT7_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VML2VC1_VM_CONTEXT7_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VML2VC1_VM_CONTEXT7_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VML2VC1_VM_CONTEXT7_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VML2VC1_VM_CONTEXT7_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VML2VC1_VM_CONTEXT7_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VML2VC1_VM_CONTEXT7_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VML2VC1_VM_CONTEXT7_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VML2VC1_VM_CONTEXT7_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VML2VC1_VM_CONTEXT8_CNTL
+#define VML2VC1_VM_CONTEXT8_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT8_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VML2VC1_VM_CONTEXT8_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VML2VC1_VM_CONTEXT8_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VML2VC1_VM_CONTEXT8_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VML2VC1_VM_CONTEXT8_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VML2VC1_VM_CONTEXT8_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VML2VC1_VM_CONTEXT8_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VML2VC1_VM_CONTEXT8_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VML2VC1_VM_CONTEXT8_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VML2VC1_VM_CONTEXT8_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VML2VC1_VM_CONTEXT8_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VML2VC1_VM_CONTEXT8_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VML2VC1_VM_CONTEXT8_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VML2VC1_VM_CONTEXT8_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VML2VC1_VM_CONTEXT8_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VML2VC1_VM_CONTEXT8_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VML2VC1_VM_CONTEXT8_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VML2VC1_VM_CONTEXT8_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VML2VC1_VM_CONTEXT8_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VML2VC1_VM_CONTEXT8_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VML2VC1_VM_CONTEXT8_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VML2VC1_VM_CONTEXT8_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VML2VC1_VM_CONTEXT8_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VML2VC1_VM_CONTEXT8_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VML2VC1_VM_CONTEXT8_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VML2VC1_VM_CONTEXT8_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VML2VC1_VM_CONTEXT8_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VML2VC1_VM_CONTEXT8_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VML2VC1_VM_CONTEXT8_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VML2VC1_VM_CONTEXT8_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VML2VC1_VM_CONTEXT8_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VML2VC1_VM_CONTEXT8_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VML2VC1_VM_CONTEXT8_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VML2VC1_VM_CONTEXT8_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VML2VC1_VM_CONTEXT8_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VML2VC1_VM_CONTEXT8_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VML2VC1_VM_CONTEXT8_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VML2VC1_VM_CONTEXT9_CNTL
+#define VML2VC1_VM_CONTEXT9_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT9_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VML2VC1_VM_CONTEXT9_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VML2VC1_VM_CONTEXT9_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VML2VC1_VM_CONTEXT9_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VML2VC1_VM_CONTEXT9_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VML2VC1_VM_CONTEXT9_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VML2VC1_VM_CONTEXT9_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VML2VC1_VM_CONTEXT9_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VML2VC1_VM_CONTEXT9_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VML2VC1_VM_CONTEXT9_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VML2VC1_VM_CONTEXT9_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VML2VC1_VM_CONTEXT9_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VML2VC1_VM_CONTEXT9_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VML2VC1_VM_CONTEXT9_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VML2VC1_VM_CONTEXT9_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VML2VC1_VM_CONTEXT9_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VML2VC1_VM_CONTEXT9_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VML2VC1_VM_CONTEXT9_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VML2VC1_VM_CONTEXT9_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VML2VC1_VM_CONTEXT9_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VML2VC1_VM_CONTEXT9_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VML2VC1_VM_CONTEXT9_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VML2VC1_VM_CONTEXT9_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VML2VC1_VM_CONTEXT9_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VML2VC1_VM_CONTEXT9_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VML2VC1_VM_CONTEXT9_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VML2VC1_VM_CONTEXT9_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VML2VC1_VM_CONTEXT9_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VML2VC1_VM_CONTEXT9_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VML2VC1_VM_CONTEXT9_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VML2VC1_VM_CONTEXT9_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VML2VC1_VM_CONTEXT9_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VML2VC1_VM_CONTEXT9_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VML2VC1_VM_CONTEXT9_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VML2VC1_VM_CONTEXT9_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VML2VC1_VM_CONTEXT9_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VML2VC1_VM_CONTEXT9_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VML2VC1_VM_CONTEXT10_CNTL
+#define VML2VC1_VM_CONTEXT10_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT10_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VML2VC1_VM_CONTEXT10_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VML2VC1_VM_CONTEXT10_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VML2VC1_VM_CONTEXT10_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VML2VC1_VM_CONTEXT10_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VML2VC1_VM_CONTEXT10_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VML2VC1_VM_CONTEXT10_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VML2VC1_VM_CONTEXT10_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VML2VC1_VM_CONTEXT10_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VML2VC1_VM_CONTEXT10_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VML2VC1_VM_CONTEXT10_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VML2VC1_VM_CONTEXT10_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VML2VC1_VM_CONTEXT10_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VML2VC1_VM_CONTEXT10_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VML2VC1_VM_CONTEXT10_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VML2VC1_VM_CONTEXT10_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VML2VC1_VM_CONTEXT10_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VML2VC1_VM_CONTEXT10_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VML2VC1_VM_CONTEXT10_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VML2VC1_VM_CONTEXT10_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VML2VC1_VM_CONTEXT10_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VML2VC1_VM_CONTEXT10_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VML2VC1_VM_CONTEXT10_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VML2VC1_VM_CONTEXT10_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VML2VC1_VM_CONTEXT10_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VML2VC1_VM_CONTEXT10_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VML2VC1_VM_CONTEXT10_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VML2VC1_VM_CONTEXT10_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VML2VC1_VM_CONTEXT10_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VML2VC1_VM_CONTEXT10_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VML2VC1_VM_CONTEXT10_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VML2VC1_VM_CONTEXT10_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VML2VC1_VM_CONTEXT10_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VML2VC1_VM_CONTEXT10_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VML2VC1_VM_CONTEXT10_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VML2VC1_VM_CONTEXT10_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VML2VC1_VM_CONTEXT10_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VML2VC1_VM_CONTEXT11_CNTL
+#define VML2VC1_VM_CONTEXT11_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT11_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VML2VC1_VM_CONTEXT11_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VML2VC1_VM_CONTEXT11_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VML2VC1_VM_CONTEXT11_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VML2VC1_VM_CONTEXT11_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VML2VC1_VM_CONTEXT11_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VML2VC1_VM_CONTEXT11_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VML2VC1_VM_CONTEXT11_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VML2VC1_VM_CONTEXT11_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VML2VC1_VM_CONTEXT11_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VML2VC1_VM_CONTEXT11_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VML2VC1_VM_CONTEXT11_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VML2VC1_VM_CONTEXT11_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VML2VC1_VM_CONTEXT11_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VML2VC1_VM_CONTEXT11_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VML2VC1_VM_CONTEXT11_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VML2VC1_VM_CONTEXT11_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VML2VC1_VM_CONTEXT11_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VML2VC1_VM_CONTEXT11_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VML2VC1_VM_CONTEXT11_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VML2VC1_VM_CONTEXT11_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VML2VC1_VM_CONTEXT11_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VML2VC1_VM_CONTEXT11_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VML2VC1_VM_CONTEXT11_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VML2VC1_VM_CONTEXT11_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VML2VC1_VM_CONTEXT11_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VML2VC1_VM_CONTEXT11_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VML2VC1_VM_CONTEXT11_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VML2VC1_VM_CONTEXT11_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VML2VC1_VM_CONTEXT11_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VML2VC1_VM_CONTEXT11_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VML2VC1_VM_CONTEXT11_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VML2VC1_VM_CONTEXT11_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VML2VC1_VM_CONTEXT11_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VML2VC1_VM_CONTEXT11_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VML2VC1_VM_CONTEXT11_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VML2VC1_VM_CONTEXT11_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VML2VC1_VM_CONTEXT12_CNTL
+#define VML2VC1_VM_CONTEXT12_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT12_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VML2VC1_VM_CONTEXT12_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VML2VC1_VM_CONTEXT12_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VML2VC1_VM_CONTEXT12_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VML2VC1_VM_CONTEXT12_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VML2VC1_VM_CONTEXT12_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VML2VC1_VM_CONTEXT12_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VML2VC1_VM_CONTEXT12_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VML2VC1_VM_CONTEXT12_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VML2VC1_VM_CONTEXT12_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VML2VC1_VM_CONTEXT12_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VML2VC1_VM_CONTEXT12_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VML2VC1_VM_CONTEXT12_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VML2VC1_VM_CONTEXT12_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VML2VC1_VM_CONTEXT12_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VML2VC1_VM_CONTEXT12_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VML2VC1_VM_CONTEXT12_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VML2VC1_VM_CONTEXT12_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VML2VC1_VM_CONTEXT12_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VML2VC1_VM_CONTEXT12_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VML2VC1_VM_CONTEXT12_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VML2VC1_VM_CONTEXT12_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VML2VC1_VM_CONTEXT12_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VML2VC1_VM_CONTEXT12_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VML2VC1_VM_CONTEXT12_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VML2VC1_VM_CONTEXT12_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VML2VC1_VM_CONTEXT12_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VML2VC1_VM_CONTEXT12_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VML2VC1_VM_CONTEXT12_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VML2VC1_VM_CONTEXT12_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VML2VC1_VM_CONTEXT12_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VML2VC1_VM_CONTEXT12_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VML2VC1_VM_CONTEXT12_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VML2VC1_VM_CONTEXT12_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VML2VC1_VM_CONTEXT12_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VML2VC1_VM_CONTEXT12_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VML2VC1_VM_CONTEXT12_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VML2VC1_VM_CONTEXT13_CNTL
+#define VML2VC1_VM_CONTEXT13_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT13_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VML2VC1_VM_CONTEXT13_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VML2VC1_VM_CONTEXT13_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VML2VC1_VM_CONTEXT13_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VML2VC1_VM_CONTEXT13_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VML2VC1_VM_CONTEXT13_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VML2VC1_VM_CONTEXT13_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VML2VC1_VM_CONTEXT13_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VML2VC1_VM_CONTEXT13_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VML2VC1_VM_CONTEXT13_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VML2VC1_VM_CONTEXT13_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VML2VC1_VM_CONTEXT13_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VML2VC1_VM_CONTEXT13_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VML2VC1_VM_CONTEXT13_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VML2VC1_VM_CONTEXT13_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VML2VC1_VM_CONTEXT13_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VML2VC1_VM_CONTEXT13_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VML2VC1_VM_CONTEXT13_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VML2VC1_VM_CONTEXT13_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VML2VC1_VM_CONTEXT13_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VML2VC1_VM_CONTEXT13_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VML2VC1_VM_CONTEXT13_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VML2VC1_VM_CONTEXT13_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VML2VC1_VM_CONTEXT13_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VML2VC1_VM_CONTEXT13_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VML2VC1_VM_CONTEXT13_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VML2VC1_VM_CONTEXT13_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VML2VC1_VM_CONTEXT13_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VML2VC1_VM_CONTEXT13_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VML2VC1_VM_CONTEXT13_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VML2VC1_VM_CONTEXT13_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VML2VC1_VM_CONTEXT13_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VML2VC1_VM_CONTEXT13_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VML2VC1_VM_CONTEXT13_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VML2VC1_VM_CONTEXT13_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VML2VC1_VM_CONTEXT13_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VML2VC1_VM_CONTEXT13_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VML2VC1_VM_CONTEXT14_CNTL
+#define VML2VC1_VM_CONTEXT14_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT14_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VML2VC1_VM_CONTEXT14_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VML2VC1_VM_CONTEXT14_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VML2VC1_VM_CONTEXT14_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VML2VC1_VM_CONTEXT14_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VML2VC1_VM_CONTEXT14_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VML2VC1_VM_CONTEXT14_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VML2VC1_VM_CONTEXT14_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VML2VC1_VM_CONTEXT14_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VML2VC1_VM_CONTEXT14_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VML2VC1_VM_CONTEXT14_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VML2VC1_VM_CONTEXT14_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VML2VC1_VM_CONTEXT14_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VML2VC1_VM_CONTEXT14_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VML2VC1_VM_CONTEXT14_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VML2VC1_VM_CONTEXT14_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VML2VC1_VM_CONTEXT14_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VML2VC1_VM_CONTEXT14_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VML2VC1_VM_CONTEXT14_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VML2VC1_VM_CONTEXT14_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VML2VC1_VM_CONTEXT14_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VML2VC1_VM_CONTEXT14_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VML2VC1_VM_CONTEXT14_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VML2VC1_VM_CONTEXT14_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VML2VC1_VM_CONTEXT14_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VML2VC1_VM_CONTEXT14_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VML2VC1_VM_CONTEXT14_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VML2VC1_VM_CONTEXT14_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VML2VC1_VM_CONTEXT14_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VML2VC1_VM_CONTEXT14_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VML2VC1_VM_CONTEXT14_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VML2VC1_VM_CONTEXT14_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VML2VC1_VM_CONTEXT14_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VML2VC1_VM_CONTEXT14_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VML2VC1_VM_CONTEXT14_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VML2VC1_VM_CONTEXT14_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VML2VC1_VM_CONTEXT14_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VML2VC1_VM_CONTEXT15_CNTL
+#define VML2VC1_VM_CONTEXT15_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT15_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VML2VC1_VM_CONTEXT15_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VML2VC1_VM_CONTEXT15_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VML2VC1_VM_CONTEXT15_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VML2VC1_VM_CONTEXT15_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VML2VC1_VM_CONTEXT15_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VML2VC1_VM_CONTEXT15_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VML2VC1_VM_CONTEXT15_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VML2VC1_VM_CONTEXT15_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VML2VC1_VM_CONTEXT15_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VML2VC1_VM_CONTEXT15_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VML2VC1_VM_CONTEXT15_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VML2VC1_VM_CONTEXT15_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VML2VC1_VM_CONTEXT15_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VML2VC1_VM_CONTEXT15_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VML2VC1_VM_CONTEXT15_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VML2VC1_VM_CONTEXT15_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VML2VC1_VM_CONTEXT15_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VML2VC1_VM_CONTEXT15_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VML2VC1_VM_CONTEXT15_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VML2VC1_VM_CONTEXT15_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VML2VC1_VM_CONTEXT15_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VML2VC1_VM_CONTEXT15_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VML2VC1_VM_CONTEXT15_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VML2VC1_VM_CONTEXT15_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VML2VC1_VM_CONTEXT15_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VML2VC1_VM_CONTEXT15_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VML2VC1_VM_CONTEXT15_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VML2VC1_VM_CONTEXT15_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VML2VC1_VM_CONTEXT15_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VML2VC1_VM_CONTEXT15_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VML2VC1_VM_CONTEXT15_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VML2VC1_VM_CONTEXT15_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VML2VC1_VM_CONTEXT15_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VML2VC1_VM_CONTEXT15_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VML2VC1_VM_CONTEXT15_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VML2VC1_VM_CONTEXT15_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VML2VC1_VM_CONTEXTS_DISABLE
+#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_0__SHIFT 0x0
+#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_1__SHIFT 0x1
+#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_2__SHIFT 0x2
+#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_3__SHIFT 0x3
+#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_4__SHIFT 0x4
+#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_5__SHIFT 0x5
+#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_6__SHIFT 0x6
+#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_7__SHIFT 0x7
+#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_8__SHIFT 0x8
+#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_9__SHIFT 0x9
+#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_10__SHIFT 0xa
+#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_11__SHIFT 0xb
+#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_12__SHIFT 0xc
+#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_13__SHIFT 0xd
+#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_14__SHIFT 0xe
+#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_15__SHIFT 0xf
+#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_0_MASK 0x00000001L
+#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_1_MASK 0x00000002L
+#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_2_MASK 0x00000004L
+#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_3_MASK 0x00000008L
+#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_4_MASK 0x00000010L
+#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_5_MASK 0x00000020L
+#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_6_MASK 0x00000040L
+#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_7_MASK 0x00000080L
+#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_8_MASK 0x00000100L
+#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_9_MASK 0x00000200L
+#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_10_MASK 0x00000400L
+#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_11_MASK 0x00000800L
+#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_12_MASK 0x00001000L
+#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_13_MASK 0x00002000L
+#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_14_MASK 0x00004000L
+#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_15_MASK 0x00008000L
+//VML2VC1_VM_INVALIDATE_ENG0_SEM
+#define VML2VC1_VM_INVALIDATE_ENG0_SEM__SEMAPHORE__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG0_SEM__SEMAPHORE_MASK 0x00000001L
+//VML2VC1_VM_INVALIDATE_ENG1_SEM
+#define VML2VC1_VM_INVALIDATE_ENG1_SEM__SEMAPHORE__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG1_SEM__SEMAPHORE_MASK 0x00000001L
+//VML2VC1_VM_INVALIDATE_ENG2_SEM
+#define VML2VC1_VM_INVALIDATE_ENG2_SEM__SEMAPHORE__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG2_SEM__SEMAPHORE_MASK 0x00000001L
+//VML2VC1_VM_INVALIDATE_ENG3_SEM
+#define VML2VC1_VM_INVALIDATE_ENG3_SEM__SEMAPHORE__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG3_SEM__SEMAPHORE_MASK 0x00000001L
+//VML2VC1_VM_INVALIDATE_ENG4_SEM
+#define VML2VC1_VM_INVALIDATE_ENG4_SEM__SEMAPHORE__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG4_SEM__SEMAPHORE_MASK 0x00000001L
+//VML2VC1_VM_INVALIDATE_ENG5_SEM
+#define VML2VC1_VM_INVALIDATE_ENG5_SEM__SEMAPHORE__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG5_SEM__SEMAPHORE_MASK 0x00000001L
+//VML2VC1_VM_INVALIDATE_ENG6_SEM
+#define VML2VC1_VM_INVALIDATE_ENG6_SEM__SEMAPHORE__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG6_SEM__SEMAPHORE_MASK 0x00000001L
+//VML2VC1_VM_INVALIDATE_ENG7_SEM
+#define VML2VC1_VM_INVALIDATE_ENG7_SEM__SEMAPHORE__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG7_SEM__SEMAPHORE_MASK 0x00000001L
+//VML2VC1_VM_INVALIDATE_ENG8_SEM
+#define VML2VC1_VM_INVALIDATE_ENG8_SEM__SEMAPHORE__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG8_SEM__SEMAPHORE_MASK 0x00000001L
+//VML2VC1_VM_INVALIDATE_ENG9_SEM
+#define VML2VC1_VM_INVALIDATE_ENG9_SEM__SEMAPHORE__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG9_SEM__SEMAPHORE_MASK 0x00000001L
+//VML2VC1_VM_INVALIDATE_ENG10_SEM
+#define VML2VC1_VM_INVALIDATE_ENG10_SEM__SEMAPHORE__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG10_SEM__SEMAPHORE_MASK 0x00000001L
+//VML2VC1_VM_INVALIDATE_ENG11_SEM
+#define VML2VC1_VM_INVALIDATE_ENG11_SEM__SEMAPHORE__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG11_SEM__SEMAPHORE_MASK 0x00000001L
+//VML2VC1_VM_INVALIDATE_ENG12_SEM
+#define VML2VC1_VM_INVALIDATE_ENG12_SEM__SEMAPHORE__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG12_SEM__SEMAPHORE_MASK 0x00000001L
+//VML2VC1_VM_INVALIDATE_ENG13_SEM
+#define VML2VC1_VM_INVALIDATE_ENG13_SEM__SEMAPHORE__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG13_SEM__SEMAPHORE_MASK 0x00000001L
+//VML2VC1_VM_INVALIDATE_ENG14_SEM
+#define VML2VC1_VM_INVALIDATE_ENG14_SEM__SEMAPHORE__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG14_SEM__SEMAPHORE_MASK 0x00000001L
+//VML2VC1_VM_INVALIDATE_ENG15_SEM
+#define VML2VC1_VM_INVALIDATE_ENG15_SEM__SEMAPHORE__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG15_SEM__SEMAPHORE_MASK 0x00000001L
+//VML2VC1_VM_INVALIDATE_ENG16_SEM
+#define VML2VC1_VM_INVALIDATE_ENG16_SEM__SEMAPHORE__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG16_SEM__SEMAPHORE_MASK 0x00000001L
+//VML2VC1_VM_INVALIDATE_ENG17_SEM
+#define VML2VC1_VM_INVALIDATE_ENG17_SEM__SEMAPHORE__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG17_SEM__SEMAPHORE_MASK 0x00000001L
+//VML2VC1_VM_INVALIDATE_ENG0_REQ
+#define VML2VC1_VM_INVALIDATE_ENG0_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG0_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VML2VC1_VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VML2VC1_VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VML2VC1_VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VML2VC1_VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VML2VC1_VM_INVALIDATE_ENG0_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VML2VC1_VM_INVALIDATE_ENG0_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VML2VC1_VM_INVALIDATE_ENG0_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VML2VC1_VM_INVALIDATE_ENG0_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VML2VC1_VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VML2VC1_VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VML2VC1_VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VML2VC1_VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VML2VC1_VM_INVALIDATE_ENG0_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VML2VC1_VM_INVALIDATE_ENG0_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VML2VC1_VM_INVALIDATE_ENG1_REQ
+#define VML2VC1_VM_INVALIDATE_ENG1_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG1_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VML2VC1_VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VML2VC1_VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VML2VC1_VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VML2VC1_VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VML2VC1_VM_INVALIDATE_ENG1_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VML2VC1_VM_INVALIDATE_ENG1_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VML2VC1_VM_INVALIDATE_ENG1_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VML2VC1_VM_INVALIDATE_ENG1_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VML2VC1_VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VML2VC1_VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VML2VC1_VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VML2VC1_VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VML2VC1_VM_INVALIDATE_ENG1_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VML2VC1_VM_INVALIDATE_ENG1_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VML2VC1_VM_INVALIDATE_ENG2_REQ
+#define VML2VC1_VM_INVALIDATE_ENG2_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG2_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VML2VC1_VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VML2VC1_VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VML2VC1_VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VML2VC1_VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VML2VC1_VM_INVALIDATE_ENG2_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VML2VC1_VM_INVALIDATE_ENG2_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VML2VC1_VM_INVALIDATE_ENG2_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VML2VC1_VM_INVALIDATE_ENG2_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VML2VC1_VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VML2VC1_VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VML2VC1_VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VML2VC1_VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VML2VC1_VM_INVALIDATE_ENG2_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VML2VC1_VM_INVALIDATE_ENG2_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VML2VC1_VM_INVALIDATE_ENG3_REQ
+#define VML2VC1_VM_INVALIDATE_ENG3_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG3_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VML2VC1_VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VML2VC1_VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VML2VC1_VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VML2VC1_VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VML2VC1_VM_INVALIDATE_ENG3_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VML2VC1_VM_INVALIDATE_ENG3_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VML2VC1_VM_INVALIDATE_ENG3_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VML2VC1_VM_INVALIDATE_ENG3_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VML2VC1_VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VML2VC1_VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VML2VC1_VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VML2VC1_VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VML2VC1_VM_INVALIDATE_ENG3_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VML2VC1_VM_INVALIDATE_ENG3_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VML2VC1_VM_INVALIDATE_ENG4_REQ
+#define VML2VC1_VM_INVALIDATE_ENG4_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG4_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VML2VC1_VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VML2VC1_VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VML2VC1_VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VML2VC1_VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VML2VC1_VM_INVALIDATE_ENG4_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VML2VC1_VM_INVALIDATE_ENG4_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VML2VC1_VM_INVALIDATE_ENG4_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VML2VC1_VM_INVALIDATE_ENG4_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VML2VC1_VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VML2VC1_VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VML2VC1_VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VML2VC1_VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VML2VC1_VM_INVALIDATE_ENG4_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VML2VC1_VM_INVALIDATE_ENG4_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VML2VC1_VM_INVALIDATE_ENG5_REQ
+#define VML2VC1_VM_INVALIDATE_ENG5_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG5_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VML2VC1_VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VML2VC1_VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VML2VC1_VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VML2VC1_VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VML2VC1_VM_INVALIDATE_ENG5_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VML2VC1_VM_INVALIDATE_ENG5_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VML2VC1_VM_INVALIDATE_ENG5_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VML2VC1_VM_INVALIDATE_ENG5_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VML2VC1_VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VML2VC1_VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VML2VC1_VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VML2VC1_VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VML2VC1_VM_INVALIDATE_ENG5_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VML2VC1_VM_INVALIDATE_ENG5_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VML2VC1_VM_INVALIDATE_ENG6_REQ
+#define VML2VC1_VM_INVALIDATE_ENG6_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG6_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VML2VC1_VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VML2VC1_VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VML2VC1_VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VML2VC1_VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VML2VC1_VM_INVALIDATE_ENG6_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VML2VC1_VM_INVALIDATE_ENG6_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VML2VC1_VM_INVALIDATE_ENG6_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VML2VC1_VM_INVALIDATE_ENG6_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VML2VC1_VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VML2VC1_VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VML2VC1_VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VML2VC1_VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VML2VC1_VM_INVALIDATE_ENG6_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VML2VC1_VM_INVALIDATE_ENG6_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VML2VC1_VM_INVALIDATE_ENG7_REQ
+#define VML2VC1_VM_INVALIDATE_ENG7_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG7_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VML2VC1_VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VML2VC1_VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VML2VC1_VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VML2VC1_VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VML2VC1_VM_INVALIDATE_ENG7_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VML2VC1_VM_INVALIDATE_ENG7_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VML2VC1_VM_INVALIDATE_ENG7_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VML2VC1_VM_INVALIDATE_ENG7_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VML2VC1_VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VML2VC1_VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VML2VC1_VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VML2VC1_VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VML2VC1_VM_INVALIDATE_ENG7_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VML2VC1_VM_INVALIDATE_ENG7_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VML2VC1_VM_INVALIDATE_ENG8_REQ
+#define VML2VC1_VM_INVALIDATE_ENG8_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG8_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VML2VC1_VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VML2VC1_VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VML2VC1_VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VML2VC1_VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VML2VC1_VM_INVALIDATE_ENG8_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VML2VC1_VM_INVALIDATE_ENG8_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VML2VC1_VM_INVALIDATE_ENG8_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VML2VC1_VM_INVALIDATE_ENG8_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VML2VC1_VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VML2VC1_VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VML2VC1_VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VML2VC1_VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VML2VC1_VM_INVALIDATE_ENG8_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VML2VC1_VM_INVALIDATE_ENG8_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VML2VC1_VM_INVALIDATE_ENG9_REQ
+#define VML2VC1_VM_INVALIDATE_ENG9_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG9_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VML2VC1_VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VML2VC1_VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VML2VC1_VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VML2VC1_VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VML2VC1_VM_INVALIDATE_ENG9_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VML2VC1_VM_INVALIDATE_ENG9_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VML2VC1_VM_INVALIDATE_ENG9_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VML2VC1_VM_INVALIDATE_ENG9_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VML2VC1_VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VML2VC1_VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VML2VC1_VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VML2VC1_VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VML2VC1_VM_INVALIDATE_ENG9_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VML2VC1_VM_INVALIDATE_ENG9_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VML2VC1_VM_INVALIDATE_ENG10_REQ
+#define VML2VC1_VM_INVALIDATE_ENG10_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG10_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VML2VC1_VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VML2VC1_VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VML2VC1_VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VML2VC1_VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VML2VC1_VM_INVALIDATE_ENG10_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VML2VC1_VM_INVALIDATE_ENG10_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VML2VC1_VM_INVALIDATE_ENG10_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VML2VC1_VM_INVALIDATE_ENG10_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VML2VC1_VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VML2VC1_VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VML2VC1_VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VML2VC1_VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VML2VC1_VM_INVALIDATE_ENG10_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VML2VC1_VM_INVALIDATE_ENG10_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VML2VC1_VM_INVALIDATE_ENG11_REQ
+#define VML2VC1_VM_INVALIDATE_ENG11_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG11_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VML2VC1_VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VML2VC1_VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VML2VC1_VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VML2VC1_VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VML2VC1_VM_INVALIDATE_ENG11_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VML2VC1_VM_INVALIDATE_ENG11_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VML2VC1_VM_INVALIDATE_ENG11_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VML2VC1_VM_INVALIDATE_ENG11_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VML2VC1_VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VML2VC1_VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VML2VC1_VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VML2VC1_VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VML2VC1_VM_INVALIDATE_ENG11_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VML2VC1_VM_INVALIDATE_ENG11_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VML2VC1_VM_INVALIDATE_ENG12_REQ
+#define VML2VC1_VM_INVALIDATE_ENG12_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG12_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VML2VC1_VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VML2VC1_VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VML2VC1_VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VML2VC1_VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VML2VC1_VM_INVALIDATE_ENG12_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VML2VC1_VM_INVALIDATE_ENG12_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VML2VC1_VM_INVALIDATE_ENG12_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VML2VC1_VM_INVALIDATE_ENG12_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VML2VC1_VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VML2VC1_VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VML2VC1_VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VML2VC1_VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VML2VC1_VM_INVALIDATE_ENG12_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VML2VC1_VM_INVALIDATE_ENG12_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VML2VC1_VM_INVALIDATE_ENG13_REQ
+#define VML2VC1_VM_INVALIDATE_ENG13_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG13_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VML2VC1_VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VML2VC1_VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VML2VC1_VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VML2VC1_VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VML2VC1_VM_INVALIDATE_ENG13_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VML2VC1_VM_INVALIDATE_ENG13_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VML2VC1_VM_INVALIDATE_ENG13_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VML2VC1_VM_INVALIDATE_ENG13_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VML2VC1_VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VML2VC1_VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VML2VC1_VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VML2VC1_VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VML2VC1_VM_INVALIDATE_ENG13_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VML2VC1_VM_INVALIDATE_ENG13_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VML2VC1_VM_INVALIDATE_ENG14_REQ
+#define VML2VC1_VM_INVALIDATE_ENG14_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG14_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VML2VC1_VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VML2VC1_VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VML2VC1_VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VML2VC1_VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VML2VC1_VM_INVALIDATE_ENG14_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VML2VC1_VM_INVALIDATE_ENG14_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VML2VC1_VM_INVALIDATE_ENG14_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VML2VC1_VM_INVALIDATE_ENG14_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VML2VC1_VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VML2VC1_VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VML2VC1_VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VML2VC1_VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VML2VC1_VM_INVALIDATE_ENG14_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VML2VC1_VM_INVALIDATE_ENG14_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VML2VC1_VM_INVALIDATE_ENG15_REQ
+#define VML2VC1_VM_INVALIDATE_ENG15_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG15_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VML2VC1_VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VML2VC1_VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VML2VC1_VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VML2VC1_VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VML2VC1_VM_INVALIDATE_ENG15_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VML2VC1_VM_INVALIDATE_ENG15_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VML2VC1_VM_INVALIDATE_ENG15_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VML2VC1_VM_INVALIDATE_ENG15_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VML2VC1_VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VML2VC1_VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VML2VC1_VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VML2VC1_VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VML2VC1_VM_INVALIDATE_ENG15_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VML2VC1_VM_INVALIDATE_ENG15_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VML2VC1_VM_INVALIDATE_ENG16_REQ
+#define VML2VC1_VM_INVALIDATE_ENG16_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG16_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VML2VC1_VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VML2VC1_VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VML2VC1_VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VML2VC1_VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VML2VC1_VM_INVALIDATE_ENG16_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VML2VC1_VM_INVALIDATE_ENG16_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VML2VC1_VM_INVALIDATE_ENG16_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VML2VC1_VM_INVALIDATE_ENG16_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VML2VC1_VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VML2VC1_VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VML2VC1_VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VML2VC1_VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VML2VC1_VM_INVALIDATE_ENG16_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VML2VC1_VM_INVALIDATE_ENG16_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VML2VC1_VM_INVALIDATE_ENG17_REQ
+#define VML2VC1_VM_INVALIDATE_ENG17_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG17_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VML2VC1_VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VML2VC1_VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VML2VC1_VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VML2VC1_VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VML2VC1_VM_INVALIDATE_ENG17_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VML2VC1_VM_INVALIDATE_ENG17_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VML2VC1_VM_INVALIDATE_ENG17_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VML2VC1_VM_INVALIDATE_ENG17_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VML2VC1_VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VML2VC1_VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VML2VC1_VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VML2VC1_VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VML2VC1_VM_INVALIDATE_ENG17_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VML2VC1_VM_INVALIDATE_ENG17_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VML2VC1_VM_INVALIDATE_ENG0_ACK
+#define VML2VC1_VM_INVALIDATE_ENG0_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG0_ACK__SEMAPHORE__SHIFT 0x10
+#define VML2VC1_VM_INVALIDATE_ENG0_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VML2VC1_VM_INVALIDATE_ENG0_ACK__SEMAPHORE_MASK 0x00010000L
+//VML2VC1_VM_INVALIDATE_ENG1_ACK
+#define VML2VC1_VM_INVALIDATE_ENG1_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG1_ACK__SEMAPHORE__SHIFT 0x10
+#define VML2VC1_VM_INVALIDATE_ENG1_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VML2VC1_VM_INVALIDATE_ENG1_ACK__SEMAPHORE_MASK 0x00010000L
+//VML2VC1_VM_INVALIDATE_ENG2_ACK
+#define VML2VC1_VM_INVALIDATE_ENG2_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG2_ACK__SEMAPHORE__SHIFT 0x10
+#define VML2VC1_VM_INVALIDATE_ENG2_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VML2VC1_VM_INVALIDATE_ENG2_ACK__SEMAPHORE_MASK 0x00010000L
+//VML2VC1_VM_INVALIDATE_ENG3_ACK
+#define VML2VC1_VM_INVALIDATE_ENG3_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG3_ACK__SEMAPHORE__SHIFT 0x10
+#define VML2VC1_VM_INVALIDATE_ENG3_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VML2VC1_VM_INVALIDATE_ENG3_ACK__SEMAPHORE_MASK 0x00010000L
+//VML2VC1_VM_INVALIDATE_ENG4_ACK
+#define VML2VC1_VM_INVALIDATE_ENG4_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG4_ACK__SEMAPHORE__SHIFT 0x10
+#define VML2VC1_VM_INVALIDATE_ENG4_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VML2VC1_VM_INVALIDATE_ENG4_ACK__SEMAPHORE_MASK 0x00010000L
+//VML2VC1_VM_INVALIDATE_ENG5_ACK
+#define VML2VC1_VM_INVALIDATE_ENG5_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG5_ACK__SEMAPHORE__SHIFT 0x10
+#define VML2VC1_VM_INVALIDATE_ENG5_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VML2VC1_VM_INVALIDATE_ENG5_ACK__SEMAPHORE_MASK 0x00010000L
+//VML2VC1_VM_INVALIDATE_ENG6_ACK
+#define VML2VC1_VM_INVALIDATE_ENG6_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG6_ACK__SEMAPHORE__SHIFT 0x10
+#define VML2VC1_VM_INVALIDATE_ENG6_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VML2VC1_VM_INVALIDATE_ENG6_ACK__SEMAPHORE_MASK 0x00010000L
+//VML2VC1_VM_INVALIDATE_ENG7_ACK
+#define VML2VC1_VM_INVALIDATE_ENG7_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG7_ACK__SEMAPHORE__SHIFT 0x10
+#define VML2VC1_VM_INVALIDATE_ENG7_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VML2VC1_VM_INVALIDATE_ENG7_ACK__SEMAPHORE_MASK 0x00010000L
+//VML2VC1_VM_INVALIDATE_ENG8_ACK
+#define VML2VC1_VM_INVALIDATE_ENG8_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG8_ACK__SEMAPHORE__SHIFT 0x10
+#define VML2VC1_VM_INVALIDATE_ENG8_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VML2VC1_VM_INVALIDATE_ENG8_ACK__SEMAPHORE_MASK 0x00010000L
+//VML2VC1_VM_INVALIDATE_ENG9_ACK
+#define VML2VC1_VM_INVALIDATE_ENG9_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG9_ACK__SEMAPHORE__SHIFT 0x10
+#define VML2VC1_VM_INVALIDATE_ENG9_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VML2VC1_VM_INVALIDATE_ENG9_ACK__SEMAPHORE_MASK 0x00010000L
+//VML2VC1_VM_INVALIDATE_ENG10_ACK
+#define VML2VC1_VM_INVALIDATE_ENG10_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG10_ACK__SEMAPHORE__SHIFT 0x10
+#define VML2VC1_VM_INVALIDATE_ENG10_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VML2VC1_VM_INVALIDATE_ENG10_ACK__SEMAPHORE_MASK 0x00010000L
+//VML2VC1_VM_INVALIDATE_ENG11_ACK
+#define VML2VC1_VM_INVALIDATE_ENG11_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG11_ACK__SEMAPHORE__SHIFT 0x10
+#define VML2VC1_VM_INVALIDATE_ENG11_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VML2VC1_VM_INVALIDATE_ENG11_ACK__SEMAPHORE_MASK 0x00010000L
+//VML2VC1_VM_INVALIDATE_ENG12_ACK
+#define VML2VC1_VM_INVALIDATE_ENG12_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG12_ACK__SEMAPHORE__SHIFT 0x10
+#define VML2VC1_VM_INVALIDATE_ENG12_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VML2VC1_VM_INVALIDATE_ENG12_ACK__SEMAPHORE_MASK 0x00010000L
+//VML2VC1_VM_INVALIDATE_ENG13_ACK
+#define VML2VC1_VM_INVALIDATE_ENG13_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG13_ACK__SEMAPHORE__SHIFT 0x10
+#define VML2VC1_VM_INVALIDATE_ENG13_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VML2VC1_VM_INVALIDATE_ENG13_ACK__SEMAPHORE_MASK 0x00010000L
+//VML2VC1_VM_INVALIDATE_ENG14_ACK
+#define VML2VC1_VM_INVALIDATE_ENG14_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG14_ACK__SEMAPHORE__SHIFT 0x10
+#define VML2VC1_VM_INVALIDATE_ENG14_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VML2VC1_VM_INVALIDATE_ENG14_ACK__SEMAPHORE_MASK 0x00010000L
+//VML2VC1_VM_INVALIDATE_ENG15_ACK
+#define VML2VC1_VM_INVALIDATE_ENG15_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG15_ACK__SEMAPHORE__SHIFT 0x10
+#define VML2VC1_VM_INVALIDATE_ENG15_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VML2VC1_VM_INVALIDATE_ENG15_ACK__SEMAPHORE_MASK 0x00010000L
+//VML2VC1_VM_INVALIDATE_ENG16_ACK
+#define VML2VC1_VM_INVALIDATE_ENG16_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG16_ACK__SEMAPHORE__SHIFT 0x10
+#define VML2VC1_VM_INVALIDATE_ENG16_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VML2VC1_VM_INVALIDATE_ENG16_ACK__SEMAPHORE_MASK 0x00010000L
+//VML2VC1_VM_INVALIDATE_ENG17_ACK
+#define VML2VC1_VM_INVALIDATE_ENG17_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG17_ACK__SEMAPHORE__SHIFT 0x10
+#define VML2VC1_VM_INVALIDATE_ENG17_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VML2VC1_VM_INVALIDATE_ENG17_ACK__SEMAPHORE_MASK 0x00010000L
+//VML2VC1_VM_INVALIDATE_ENG0_ADDR_RANGE_LO32
+#define VML2VC1_VM_INVALIDATE_ENG0_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG0_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VML2VC1_VM_INVALIDATE_ENG0_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VML2VC1_VM_INVALIDATE_ENG0_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VML2VC1_VM_INVALIDATE_ENG0_ADDR_RANGE_HI32
+#define VML2VC1_VM_INVALIDATE_ENG0_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG0_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VML2VC1_VM_INVALIDATE_ENG1_ADDR_RANGE_LO32
+#define VML2VC1_VM_INVALIDATE_ENG1_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG1_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VML2VC1_VM_INVALIDATE_ENG1_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VML2VC1_VM_INVALIDATE_ENG1_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VML2VC1_VM_INVALIDATE_ENG1_ADDR_RANGE_HI32
+#define VML2VC1_VM_INVALIDATE_ENG1_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG1_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VML2VC1_VM_INVALIDATE_ENG2_ADDR_RANGE_LO32
+#define VML2VC1_VM_INVALIDATE_ENG2_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG2_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VML2VC1_VM_INVALIDATE_ENG2_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VML2VC1_VM_INVALIDATE_ENG2_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VML2VC1_VM_INVALIDATE_ENG2_ADDR_RANGE_HI32
+#define VML2VC1_VM_INVALIDATE_ENG2_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG2_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VML2VC1_VM_INVALIDATE_ENG3_ADDR_RANGE_LO32
+#define VML2VC1_VM_INVALIDATE_ENG3_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG3_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VML2VC1_VM_INVALIDATE_ENG3_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VML2VC1_VM_INVALIDATE_ENG3_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VML2VC1_VM_INVALIDATE_ENG3_ADDR_RANGE_HI32
+#define VML2VC1_VM_INVALIDATE_ENG3_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG3_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VML2VC1_VM_INVALIDATE_ENG4_ADDR_RANGE_LO32
+#define VML2VC1_VM_INVALIDATE_ENG4_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG4_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VML2VC1_VM_INVALIDATE_ENG4_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VML2VC1_VM_INVALIDATE_ENG4_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VML2VC1_VM_INVALIDATE_ENG4_ADDR_RANGE_HI32
+#define VML2VC1_VM_INVALIDATE_ENG4_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG4_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VML2VC1_VM_INVALIDATE_ENG5_ADDR_RANGE_LO32
+#define VML2VC1_VM_INVALIDATE_ENG5_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG5_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VML2VC1_VM_INVALIDATE_ENG5_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VML2VC1_VM_INVALIDATE_ENG5_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VML2VC1_VM_INVALIDATE_ENG5_ADDR_RANGE_HI32
+#define VML2VC1_VM_INVALIDATE_ENG5_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG5_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VML2VC1_VM_INVALIDATE_ENG6_ADDR_RANGE_LO32
+#define VML2VC1_VM_INVALIDATE_ENG6_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG6_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VML2VC1_VM_INVALIDATE_ENG6_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VML2VC1_VM_INVALIDATE_ENG6_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VML2VC1_VM_INVALIDATE_ENG6_ADDR_RANGE_HI32
+#define VML2VC1_VM_INVALIDATE_ENG6_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG6_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VML2VC1_VM_INVALIDATE_ENG7_ADDR_RANGE_LO32
+#define VML2VC1_VM_INVALIDATE_ENG7_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG7_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VML2VC1_VM_INVALIDATE_ENG7_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VML2VC1_VM_INVALIDATE_ENG7_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VML2VC1_VM_INVALIDATE_ENG7_ADDR_RANGE_HI32
+#define VML2VC1_VM_INVALIDATE_ENG7_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG7_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VML2VC1_VM_INVALIDATE_ENG8_ADDR_RANGE_LO32
+#define VML2VC1_VM_INVALIDATE_ENG8_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG8_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VML2VC1_VM_INVALIDATE_ENG8_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VML2VC1_VM_INVALIDATE_ENG8_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VML2VC1_VM_INVALIDATE_ENG8_ADDR_RANGE_HI32
+#define VML2VC1_VM_INVALIDATE_ENG8_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG8_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VML2VC1_VM_INVALIDATE_ENG9_ADDR_RANGE_LO32
+#define VML2VC1_VM_INVALIDATE_ENG9_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG9_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VML2VC1_VM_INVALIDATE_ENG9_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VML2VC1_VM_INVALIDATE_ENG9_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VML2VC1_VM_INVALIDATE_ENG9_ADDR_RANGE_HI32
+#define VML2VC1_VM_INVALIDATE_ENG9_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG9_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VML2VC1_VM_INVALIDATE_ENG10_ADDR_RANGE_LO32
+#define VML2VC1_VM_INVALIDATE_ENG10_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG10_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VML2VC1_VM_INVALIDATE_ENG10_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VML2VC1_VM_INVALIDATE_ENG10_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VML2VC1_VM_INVALIDATE_ENG10_ADDR_RANGE_HI32
+#define VML2VC1_VM_INVALIDATE_ENG10_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG10_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VML2VC1_VM_INVALIDATE_ENG11_ADDR_RANGE_LO32
+#define VML2VC1_VM_INVALIDATE_ENG11_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG11_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VML2VC1_VM_INVALIDATE_ENG11_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VML2VC1_VM_INVALIDATE_ENG11_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VML2VC1_VM_INVALIDATE_ENG11_ADDR_RANGE_HI32
+#define VML2VC1_VM_INVALIDATE_ENG11_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG11_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VML2VC1_VM_INVALIDATE_ENG12_ADDR_RANGE_LO32
+#define VML2VC1_VM_INVALIDATE_ENG12_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG12_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VML2VC1_VM_INVALIDATE_ENG12_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VML2VC1_VM_INVALIDATE_ENG12_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VML2VC1_VM_INVALIDATE_ENG12_ADDR_RANGE_HI32
+#define VML2VC1_VM_INVALIDATE_ENG12_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG12_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VML2VC1_VM_INVALIDATE_ENG13_ADDR_RANGE_LO32
+#define VML2VC1_VM_INVALIDATE_ENG13_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG13_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VML2VC1_VM_INVALIDATE_ENG13_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VML2VC1_VM_INVALIDATE_ENG13_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VML2VC1_VM_INVALIDATE_ENG13_ADDR_RANGE_HI32
+#define VML2VC1_VM_INVALIDATE_ENG13_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG13_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VML2VC1_VM_INVALIDATE_ENG14_ADDR_RANGE_LO32
+#define VML2VC1_VM_INVALIDATE_ENG14_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG14_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VML2VC1_VM_INVALIDATE_ENG14_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VML2VC1_VM_INVALIDATE_ENG14_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VML2VC1_VM_INVALIDATE_ENG14_ADDR_RANGE_HI32
+#define VML2VC1_VM_INVALIDATE_ENG14_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG14_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VML2VC1_VM_INVALIDATE_ENG15_ADDR_RANGE_LO32
+#define VML2VC1_VM_INVALIDATE_ENG15_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG15_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VML2VC1_VM_INVALIDATE_ENG15_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VML2VC1_VM_INVALIDATE_ENG15_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VML2VC1_VM_INVALIDATE_ENG15_ADDR_RANGE_HI32
+#define VML2VC1_VM_INVALIDATE_ENG15_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG15_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VML2VC1_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32
+#define VML2VC1_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VML2VC1_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VML2VC1_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VML2VC1_VM_INVALIDATE_ENG16_ADDR_RANGE_HI32
+#define VML2VC1_VM_INVALIDATE_ENG16_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG16_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VML2VC1_VM_INVALIDATE_ENG17_ADDR_RANGE_LO32
+#define VML2VC1_VM_INVALIDATE_ENG17_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG17_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VML2VC1_VM_INVALIDATE_ENG17_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VML2VC1_VM_INVALIDATE_ENG17_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VML2VC1_VM_INVALIDATE_ENG17_ADDR_RANGE_HI32
+#define VML2VC1_VM_INVALIDATE_ENG17_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VML2VC1_VM_INVALIDATE_ENG17_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VML2VC1_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32
+#define VML2VC1_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32
+#define VML2VC1_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32
+#define VML2VC1_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32
+#define VML2VC1_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32
+#define VML2VC1_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32
+#define VML2VC1_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32
+#define VML2VC1_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32
+#define VML2VC1_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32
+#define VML2VC1_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32
+#define VML2VC1_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32
+#define VML2VC1_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32
+#define VML2VC1_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32
+#define VML2VC1_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32
+#define VML2VC1_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32
+#define VML2VC1_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32
+#define VML2VC1_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32
+#define VML2VC1_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32
+#define VML2VC1_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32
+#define VML2VC1_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32
+#define VML2VC1_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32
+#define VML2VC1_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32
+#define VML2VC1_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32
+#define VML2VC1_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32
+#define VML2VC1_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32
+#define VML2VC1_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32
+#define VML2VC1_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32
+#define VML2VC1_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32
+#define VML2VC1_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32
+#define VML2VC1_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32
+#define VML2VC1_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32
+#define VML2VC1_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32
+#define VML2VC1_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32
+#define VML2VC1_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32
+#define VML2VC1_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2VC1_VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32
+#define VML2VC1_VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32
+#define VML2VC1_VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2VC1_VM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32
+#define VML2VC1_VM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32
+#define VML2VC1_VM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2VC1_VM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32
+#define VML2VC1_VM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32
+#define VML2VC1_VM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2VC1_VM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32
+#define VML2VC1_VM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32
+#define VML2VC1_VM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2VC1_VM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32
+#define VML2VC1_VM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32
+#define VML2VC1_VM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2VC1_VM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32
+#define VML2VC1_VM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32
+#define VML2VC1_VM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2VC1_VM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32
+#define VML2VC1_VM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32
+#define VML2VC1_VM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2VC1_VM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32
+#define VML2VC1_VM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32
+#define VML2VC1_VM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2VC1_VM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32
+#define VML2VC1_VM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32
+#define VML2VC1_VM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2VC1_VM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32
+#define VML2VC1_VM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32
+#define VML2VC1_VM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2VC1_VM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32
+#define VML2VC1_VM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32
+#define VML2VC1_VM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2VC1_VM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32
+#define VML2VC1_VM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32
+#define VML2VC1_VM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2VC1_VM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32
+#define VML2VC1_VM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32
+#define VML2VC1_VM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2VC1_VM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32
+#define VML2VC1_VM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32
+#define VML2VC1_VM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2VC1_VM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32
+#define VML2VC1_VM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32
+#define VML2VC1_VM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2VC1_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32
+#define VML2VC1_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32
+#define VML2VC1_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2VC1_VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32
+#define VML2VC1_VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32
+#define VML2VC1_VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2VC1_VM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32
+#define VML2VC1_VM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32
+#define VML2VC1_VM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2VC1_VM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32
+#define VML2VC1_VM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32
+#define VML2VC1_VM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2VC1_VM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32
+#define VML2VC1_VM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32
+#define VML2VC1_VM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2VC1_VM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32
+#define VML2VC1_VM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32
+#define VML2VC1_VM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2VC1_VM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32
+#define VML2VC1_VM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32
+#define VML2VC1_VM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2VC1_VM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32
+#define VML2VC1_VM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32
+#define VML2VC1_VM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2VC1_VM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32
+#define VML2VC1_VM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32
+#define VML2VC1_VM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2VC1_VM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32
+#define VML2VC1_VM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32
+#define VML2VC1_VM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2VC1_VM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32
+#define VML2VC1_VM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32
+#define VML2VC1_VM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2VC1_VM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32
+#define VML2VC1_VM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32
+#define VML2VC1_VM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2VC1_VM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32
+#define VML2VC1_VM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32
+#define VML2VC1_VM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2VC1_VM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32
+#define VML2VC1_VM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32
+#define VML2VC1_VM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2VC1_VM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32
+#define VML2VC1_VM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32
+#define VML2VC1_VM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VML2VC1_VM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32
+#define VML2VC1_VM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VML2VC1_VM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32
+#define VML2VC1_VM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VML2VC1_VM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+
+
+// addressBlock: mmhub_utcl2_vmsharedpfdec:1
+//VMSHAREDPF1_MC_VM_NB_MMIOBASE
+#define VMSHAREDPF1_MC_VM_NB_MMIOBASE__MMIOBASE__SHIFT 0x0
+#define VMSHAREDPF1_MC_VM_NB_MMIOBASE__MMIOBASE_MASK 0xFFFFFFFFL
+//VMSHAREDPF1_MC_VM_NB_MMIOLIMIT
+#define VMSHAREDPF1_MC_VM_NB_MMIOLIMIT__MMIOLIMIT__SHIFT 0x0
+#define VMSHAREDPF1_MC_VM_NB_MMIOLIMIT__MMIOLIMIT_MASK 0xFFFFFFFFL
+//VMSHAREDPF1_MC_VM_NB_PCI_CTRL
+#define VMSHAREDPF1_MC_VM_NB_PCI_CTRL__MMIOENABLE__SHIFT 0x17
+#define VMSHAREDPF1_MC_VM_NB_PCI_CTRL__MMIOENABLE_MASK 0x00800000L
+//VMSHAREDPF1_MC_VM_NB_PCI_ARB
+#define VMSHAREDPF1_MC_VM_NB_PCI_ARB__VGA_HOLE__SHIFT 0x3
+#define VMSHAREDPF1_MC_VM_NB_PCI_ARB__VGA_HOLE_MASK 0x00000008L
+//VMSHAREDPF1_MC_VM_NB_TOP_OF_DRAM_SLOT1
+#define VMSHAREDPF1_MC_VM_NB_TOP_OF_DRAM_SLOT1__TOP_OF_DRAM__SHIFT 0x17
+#define VMSHAREDPF1_MC_VM_NB_TOP_OF_DRAM_SLOT1__TOP_OF_DRAM_MASK 0xFF800000L
+//VMSHAREDPF1_MC_VM_NB_LOWER_TOP_OF_DRAM2
+#define VMSHAREDPF1_MC_VM_NB_LOWER_TOP_OF_DRAM2__ENABLE__SHIFT 0x0
+#define VMSHAREDPF1_MC_VM_NB_LOWER_TOP_OF_DRAM2__LOWER_TOM2__SHIFT 0x17
+#define VMSHAREDPF1_MC_VM_NB_LOWER_TOP_OF_DRAM2__ENABLE_MASK 0x00000001L
+#define VMSHAREDPF1_MC_VM_NB_LOWER_TOP_OF_DRAM2__LOWER_TOM2_MASK 0xFF800000L
+//VMSHAREDPF1_MC_VM_NB_UPPER_TOP_OF_DRAM2
+#define VMSHAREDPF1_MC_VM_NB_UPPER_TOP_OF_DRAM2__UPPER_TOM2__SHIFT 0x0
+#define VMSHAREDPF1_MC_VM_NB_UPPER_TOP_OF_DRAM2__UPPER_TOM2_MASK 0x00000FFFL
+//VMSHAREDPF1_MC_VM_FB_OFFSET
+#define VMSHAREDPF1_MC_VM_FB_OFFSET__FB_OFFSET__SHIFT 0x0
+#define VMSHAREDPF1_MC_VM_FB_OFFSET__FB_OFFSET_MASK 0x00FFFFFFL
+//VMSHAREDPF1_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB
+#define VMSHAREDPF1_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB__PHYSICAL_PAGE_NUMBER_LSB__SHIFT 0x0
+#define VMSHAREDPF1_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB__PHYSICAL_PAGE_NUMBER_LSB_MASK 0xFFFFFFFFL
+//VMSHAREDPF1_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB
+#define VMSHAREDPF1_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB__PHYSICAL_PAGE_NUMBER_MSB__SHIFT 0x0
+#define VMSHAREDPF1_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB__PHYSICAL_PAGE_NUMBER_MSB_MASK 0x0000000FL
+//VMSHAREDPF1_MC_VM_STEERING
+#define VMSHAREDPF1_MC_VM_STEERING__DEFAULT_STEERING__SHIFT 0x0
+#define VMSHAREDPF1_MC_VM_STEERING__DEFAULT_STEERING_MASK 0x00000003L
+//VMSHAREDPF1_MC_SHARED_VIRT_RESET_REQ
+#define VMSHAREDPF1_MC_SHARED_VIRT_RESET_REQ__VF__SHIFT 0x0
+#define VMSHAREDPF1_MC_SHARED_VIRT_RESET_REQ__PF__SHIFT 0x1f
+#define VMSHAREDPF1_MC_SHARED_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL
+#define VMSHAREDPF1_MC_SHARED_VIRT_RESET_REQ__PF_MASK 0x80000000L
+//VMSHAREDPF1_MC_MEM_POWER_LS
+#define VMSHAREDPF1_MC_MEM_POWER_LS__LS_SETUP__SHIFT 0x0
+#define VMSHAREDPF1_MC_MEM_POWER_LS__LS_HOLD__SHIFT 0x6
+#define VMSHAREDPF1_MC_MEM_POWER_LS__LS_SETUP_MASK 0x0000003FL
+#define VMSHAREDPF1_MC_MEM_POWER_LS__LS_HOLD_MASK 0x00000FC0L
+//VMSHAREDPF1_MC_VM_CACHEABLE_DRAM_ADDRESS_START
+#define VMSHAREDPF1_MC_VM_CACHEABLE_DRAM_ADDRESS_START__ADDRESS__SHIFT 0x0
+#define VMSHAREDPF1_MC_VM_CACHEABLE_DRAM_ADDRESS_START__ADDRESS_MASK 0x000FFFFFL
+//VMSHAREDPF1_MC_VM_CACHEABLE_DRAM_ADDRESS_END
+#define VMSHAREDPF1_MC_VM_CACHEABLE_DRAM_ADDRESS_END__ADDRESS__SHIFT 0x0
+#define VMSHAREDPF1_MC_VM_CACHEABLE_DRAM_ADDRESS_END__ADDRESS_MASK 0x000FFFFFL
+//VMSHAREDPF1_MC_VM_APT_CNTL
+#define VMSHAREDPF1_MC_VM_APT_CNTL__FORCE_MTYPE_UC__SHIFT 0x0
+#define VMSHAREDPF1_MC_VM_APT_CNTL__DIRECT_SYSTEM_EN__SHIFT 0x1
+#define VMSHAREDPF1_MC_VM_APT_CNTL__FORCE_MTYPE_UC_MASK 0x00000001L
+#define VMSHAREDPF1_MC_VM_APT_CNTL__DIRECT_SYSTEM_EN_MASK 0x00000002L
+//VMSHAREDPF1_MC_VM_LOCAL_HBM_ADDRESS_START
+#define VMSHAREDPF1_MC_VM_LOCAL_HBM_ADDRESS_START__ADDRESS__SHIFT 0x0
+#define VMSHAREDPF1_MC_VM_LOCAL_HBM_ADDRESS_START__ADDRESS_MASK 0x000FFFFFL
+//VMSHAREDPF1_MC_VM_LOCAL_HBM_ADDRESS_END
+#define VMSHAREDPF1_MC_VM_LOCAL_HBM_ADDRESS_END__ADDRESS__SHIFT 0x0
+#define VMSHAREDPF1_MC_VM_LOCAL_HBM_ADDRESS_END__ADDRESS_MASK 0x000FFFFFL
+//VMSHAREDPF1_MC_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL
+#define VMSHAREDPF1_MC_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL__LOCK__SHIFT 0x0
+#define VMSHAREDPF1_MC_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL__LOCK_MASK 0x00000001L
+//VMSHAREDPF1_MC_VM_XGMI_LFB_CNTL
+#define VMSHAREDPF1_MC_VM_XGMI_LFB_CNTL__PF_LFB_REGION__SHIFT 0x0
+#define VMSHAREDPF1_MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION__SHIFT 0x4
+#define VMSHAREDPF1_MC_VM_XGMI_LFB_CNTL__PF_LFB_REGION_MASK 0x0000000FL
+#define VMSHAREDPF1_MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION_MASK 0x000000F0L
+//VMSHAREDPF1_MC_VM_XGMI_LFB_SIZE
+#define VMSHAREDPF1_MC_VM_XGMI_LFB_SIZE__PF_LFB_SIZE__SHIFT 0x0
+#define VMSHAREDPF1_MC_VM_XGMI_LFB_SIZE__PF_LFB_SIZE_MASK 0x0001FFFFL
+//VMSHAREDPF1_MC_VM_CACHEABLE_DRAM_CNTL
+#define VMSHAREDPF1_MC_VM_CACHEABLE_DRAM_CNTL__ENABLE_CACHEABLE_DRAM_ADDRESS_APERTURE__SHIFT 0x0
+#define VMSHAREDPF1_MC_VM_CACHEABLE_DRAM_CNTL__ENABLE_CACHEABLE_DRAM_ADDRESS_APERTURE_MASK 0x00000001L
+
+
+// addressBlock: mmhub_utcl2_vmsharedvcdec:1
+//VMSHAREDVC1_MC_VM_FB_LOCATION_BASE
+#define VMSHAREDVC1_MC_VM_FB_LOCATION_BASE__FB_BASE__SHIFT 0x0
+#define VMSHAREDVC1_MC_VM_FB_LOCATION_BASE__FB_BASE_MASK 0x00FFFFFFL
+//VMSHAREDVC1_MC_VM_FB_LOCATION_TOP
+#define VMSHAREDVC1_MC_VM_FB_LOCATION_TOP__FB_TOP__SHIFT 0x0
+#define VMSHAREDVC1_MC_VM_FB_LOCATION_TOP__FB_TOP_MASK 0x00FFFFFFL
+//VMSHAREDVC1_MC_VM_AGP_TOP
+#define VMSHAREDVC1_MC_VM_AGP_TOP__AGP_TOP__SHIFT 0x0
+#define VMSHAREDVC1_MC_VM_AGP_TOP__AGP_TOP_MASK 0x00FFFFFFL
+//VMSHAREDVC1_MC_VM_AGP_BOT
+#define VMSHAREDVC1_MC_VM_AGP_BOT__AGP_BOT__SHIFT 0x0
+#define VMSHAREDVC1_MC_VM_AGP_BOT__AGP_BOT_MASK 0x00FFFFFFL
+//VMSHAREDVC1_MC_VM_AGP_BASE
+#define VMSHAREDVC1_MC_VM_AGP_BASE__AGP_BASE__SHIFT 0x0
+#define VMSHAREDVC1_MC_VM_AGP_BASE__AGP_BASE_MASK 0x00FFFFFFL
+//VMSHAREDVC1_MC_VM_SYSTEM_APERTURE_LOW_ADDR
+#define VMSHAREDVC1_MC_VM_SYSTEM_APERTURE_LOW_ADDR__LOGICAL_ADDR__SHIFT 0x0
+#define VMSHAREDVC1_MC_VM_SYSTEM_APERTURE_LOW_ADDR__LOGICAL_ADDR_MASK 0x3FFFFFFFL
+//VMSHAREDVC1_MC_VM_SYSTEM_APERTURE_HIGH_ADDR
+#define VMSHAREDVC1_MC_VM_SYSTEM_APERTURE_HIGH_ADDR__LOGICAL_ADDR__SHIFT 0x0
+#define VMSHAREDVC1_MC_VM_SYSTEM_APERTURE_HIGH_ADDR__LOGICAL_ADDR_MASK 0x3FFFFFFFL
+//VMSHAREDVC1_MC_VM_MX_L1_TLB_CNTL
+#define VMSHAREDVC1_MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB__SHIFT 0x0
+#define VMSHAREDVC1_MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE__SHIFT 0x3
+#define VMSHAREDVC1_MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT 0x5
+#define VMSHAREDVC1_MC_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL__SHIFT 0x6
+#define VMSHAREDVC1_MC_VM_MX_L1_TLB_CNTL__ECO_BITS__SHIFT 0x7
+#define VMSHAREDVC1_MC_VM_MX_L1_TLB_CNTL__MTYPE__SHIFT 0xb
+#define VMSHAREDVC1_MC_VM_MX_L1_TLB_CNTL__ATC_EN__SHIFT 0xd
+#define VMSHAREDVC1_MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB_MASK 0x00000001L
+#define VMSHAREDVC1_MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK 0x00000018L
+#define VMSHAREDVC1_MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS_MASK 0x00000020L
+#define VMSHAREDVC1_MC_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL_MASK 0x00000040L
+#define VMSHAREDVC1_MC_VM_MX_L1_TLB_CNTL__ECO_BITS_MASK 0x00000780L
+#define VMSHAREDVC1_MC_VM_MX_L1_TLB_CNTL__MTYPE_MASK 0x00001800L
+#define VMSHAREDVC1_MC_VM_MX_L1_TLB_CNTL__ATC_EN_MASK 0x00002000L
+
+
+// addressBlock: mmhub_utcl2_vmsharedhvdec:1
+//VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF0
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF0__VF_FB_SIZE__SHIFT 0x0
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF0__VF_FB_OFFSET__SHIFT 0x10
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF0__VF_FB_SIZE_MASK 0x0000FFFFL
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF0__VF_FB_OFFSET_MASK 0xFFFF0000L
+//VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF1
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF1__VF_FB_SIZE__SHIFT 0x0
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF1__VF_FB_OFFSET__SHIFT 0x10
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF1__VF_FB_SIZE_MASK 0x0000FFFFL
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF1__VF_FB_OFFSET_MASK 0xFFFF0000L
+//VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF2
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF2__VF_FB_SIZE__SHIFT 0x0
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF2__VF_FB_OFFSET__SHIFT 0x10
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF2__VF_FB_SIZE_MASK 0x0000FFFFL
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF2__VF_FB_OFFSET_MASK 0xFFFF0000L
+//VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF3
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF3__VF_FB_SIZE__SHIFT 0x0
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF3__VF_FB_OFFSET__SHIFT 0x10
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF3__VF_FB_SIZE_MASK 0x0000FFFFL
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF3__VF_FB_OFFSET_MASK 0xFFFF0000L
+//VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF4
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF4__VF_FB_SIZE__SHIFT 0x0
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF4__VF_FB_OFFSET__SHIFT 0x10
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF4__VF_FB_SIZE_MASK 0x0000FFFFL
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF4__VF_FB_OFFSET_MASK 0xFFFF0000L
+//VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF5
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF5__VF_FB_SIZE__SHIFT 0x0
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF5__VF_FB_OFFSET__SHIFT 0x10
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF5__VF_FB_SIZE_MASK 0x0000FFFFL
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF5__VF_FB_OFFSET_MASK 0xFFFF0000L
+//VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF6
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF6__VF_FB_SIZE__SHIFT 0x0
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF6__VF_FB_OFFSET__SHIFT 0x10
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF6__VF_FB_SIZE_MASK 0x0000FFFFL
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF6__VF_FB_OFFSET_MASK 0xFFFF0000L
+//VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF7
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF7__VF_FB_SIZE__SHIFT 0x0
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF7__VF_FB_OFFSET__SHIFT 0x10
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF7__VF_FB_SIZE_MASK 0x0000FFFFL
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF7__VF_FB_OFFSET_MASK 0xFFFF0000L
+//VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF8
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF8__VF_FB_SIZE__SHIFT 0x0
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF8__VF_FB_OFFSET__SHIFT 0x10
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF8__VF_FB_SIZE_MASK 0x0000FFFFL
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF8__VF_FB_OFFSET_MASK 0xFFFF0000L
+//VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF9
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF9__VF_FB_SIZE__SHIFT 0x0
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF9__VF_FB_OFFSET__SHIFT 0x10
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF9__VF_FB_SIZE_MASK 0x0000FFFFL
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF9__VF_FB_OFFSET_MASK 0xFFFF0000L
+//VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF10
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF10__VF_FB_SIZE__SHIFT 0x0
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF10__VF_FB_OFFSET__SHIFT 0x10
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF10__VF_FB_SIZE_MASK 0x0000FFFFL
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF10__VF_FB_OFFSET_MASK 0xFFFF0000L
+//VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF11
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF11__VF_FB_SIZE__SHIFT 0x0
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF11__VF_FB_OFFSET__SHIFT 0x10
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF11__VF_FB_SIZE_MASK 0x0000FFFFL
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF11__VF_FB_OFFSET_MASK 0xFFFF0000L
+//VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF12
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF12__VF_FB_SIZE__SHIFT 0x0
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF12__VF_FB_OFFSET__SHIFT 0x10
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF12__VF_FB_SIZE_MASK 0x0000FFFFL
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF12__VF_FB_OFFSET_MASK 0xFFFF0000L
+//VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF13
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF13__VF_FB_SIZE__SHIFT 0x0
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF13__VF_FB_OFFSET__SHIFT 0x10
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF13__VF_FB_SIZE_MASK 0x0000FFFFL
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF13__VF_FB_OFFSET_MASK 0xFFFF0000L
+//VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF14
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF14__VF_FB_SIZE__SHIFT 0x0
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF14__VF_FB_OFFSET__SHIFT 0x10
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF14__VF_FB_SIZE_MASK 0x0000FFFFL
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF14__VF_FB_OFFSET_MASK 0xFFFF0000L
+//VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF15
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF15__VF_FB_SIZE__SHIFT 0x0
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF15__VF_FB_OFFSET__SHIFT 0x10
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF15__VF_FB_SIZE_MASK 0x0000FFFFL
+#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF15__VF_FB_OFFSET_MASK 0xFFFF0000L
+//VMSHAREDHV1_VM_IOMMU_MMIO_CNTRL_1
+#define VMSHAREDHV1_VM_IOMMU_MMIO_CNTRL_1__MARC_EN__SHIFT 0x8
+#define VMSHAREDHV1_VM_IOMMU_MMIO_CNTRL_1__MARC_EN_MASK 0x00000100L
+//VMSHAREDHV1_MC_VM_MARC_BASE_LO_0
+#define VMSHAREDHV1_MC_VM_MARC_BASE_LO_0__MARC_BASE_LO_0__SHIFT 0xc
+#define VMSHAREDHV1_MC_VM_MARC_BASE_LO_0__MARC_BASE_LO_0_MASK 0xFFFFF000L
+//VMSHAREDHV1_MC_VM_MARC_BASE_LO_1
+#define VMSHAREDHV1_MC_VM_MARC_BASE_LO_1__MARC_BASE_LO_1__SHIFT 0xc
+#define VMSHAREDHV1_MC_VM_MARC_BASE_LO_1__MARC_BASE_LO_1_MASK 0xFFFFF000L
+//VMSHAREDHV1_MC_VM_MARC_BASE_LO_2
+#define VMSHAREDHV1_MC_VM_MARC_BASE_LO_2__MARC_BASE_LO_2__SHIFT 0xc
+#define VMSHAREDHV1_MC_VM_MARC_BASE_LO_2__MARC_BASE_LO_2_MASK 0xFFFFF000L
+//VMSHAREDHV1_MC_VM_MARC_BASE_LO_3
+#define VMSHAREDHV1_MC_VM_MARC_BASE_LO_3__MARC_BASE_LO_3__SHIFT 0xc
+#define VMSHAREDHV1_MC_VM_MARC_BASE_LO_3__MARC_BASE_LO_3_MASK 0xFFFFF000L
+//VMSHAREDHV1_MC_VM_MARC_BASE_HI_0
+#define VMSHAREDHV1_MC_VM_MARC_BASE_HI_0__MARC_BASE_HI_0__SHIFT 0x0
+#define VMSHAREDHV1_MC_VM_MARC_BASE_HI_0__MARC_BASE_HI_0_MASK 0x000FFFFFL
+//VMSHAREDHV1_MC_VM_MARC_BASE_HI_1
+#define VMSHAREDHV1_MC_VM_MARC_BASE_HI_1__MARC_BASE_HI_1__SHIFT 0x0
+#define VMSHAREDHV1_MC_VM_MARC_BASE_HI_1__MARC_BASE_HI_1_MASK 0x000FFFFFL
+//VMSHAREDHV1_MC_VM_MARC_BASE_HI_2
+#define VMSHAREDHV1_MC_VM_MARC_BASE_HI_2__MARC_BASE_HI_2__SHIFT 0x0
+#define VMSHAREDHV1_MC_VM_MARC_BASE_HI_2__MARC_BASE_HI_2_MASK 0x000FFFFFL
+//VMSHAREDHV1_MC_VM_MARC_BASE_HI_3
+#define VMSHAREDHV1_MC_VM_MARC_BASE_HI_3__MARC_BASE_HI_3__SHIFT 0x0
+#define VMSHAREDHV1_MC_VM_MARC_BASE_HI_3__MARC_BASE_HI_3_MASK 0x000FFFFFL
+//VMSHAREDHV1_MC_VM_MARC_RELOC_LO_0
+#define VMSHAREDHV1_MC_VM_MARC_RELOC_LO_0__MARC_ENABLE_0__SHIFT 0x0
+#define VMSHAREDHV1_MC_VM_MARC_RELOC_LO_0__MARC_READONLY_0__SHIFT 0x1
+#define VMSHAREDHV1_MC_VM_MARC_RELOC_LO_0__MARC_RELOC_LO_0__SHIFT 0xc
+#define VMSHAREDHV1_MC_VM_MARC_RELOC_LO_0__MARC_ENABLE_0_MASK 0x00000001L
+#define VMSHAREDHV1_MC_VM_MARC_RELOC_LO_0__MARC_READONLY_0_MASK 0x00000002L
+#define VMSHAREDHV1_MC_VM_MARC_RELOC_LO_0__MARC_RELOC_LO_0_MASK 0xFFFFF000L
+//VMSHAREDHV1_MC_VM_MARC_RELOC_LO_1
+#define VMSHAREDHV1_MC_VM_MARC_RELOC_LO_1__MARC_ENABLE_1__SHIFT 0x0
+#define VMSHAREDHV1_MC_VM_MARC_RELOC_LO_1__MARC_READONLY_1__SHIFT 0x1
+#define VMSHAREDHV1_MC_VM_MARC_RELOC_LO_1__MARC_RELOC_LO_1__SHIFT 0xc
+#define VMSHAREDHV1_MC_VM_MARC_RELOC_LO_1__MARC_ENABLE_1_MASK 0x00000001L
+#define VMSHAREDHV1_MC_VM_MARC_RELOC_LO_1__MARC_READONLY_1_MASK 0x00000002L
+#define VMSHAREDHV1_MC_VM_MARC_RELOC_LO_1__MARC_RELOC_LO_1_MASK 0xFFFFF000L
+//VMSHAREDHV1_MC_VM_MARC_RELOC_LO_2
+#define VMSHAREDHV1_MC_VM_MARC_RELOC_LO_2__MARC_ENABLE_2__SHIFT 0x0
+#define VMSHAREDHV1_MC_VM_MARC_RELOC_LO_2__MARC_READONLY_2__SHIFT 0x1
+#define VMSHAREDHV1_MC_VM_MARC_RELOC_LO_2__MARC_RELOC_LO_2__SHIFT 0xc
+#define VMSHAREDHV1_MC_VM_MARC_RELOC_LO_2__MARC_ENABLE_2_MASK 0x00000001L
+#define VMSHAREDHV1_MC_VM_MARC_RELOC_LO_2__MARC_READONLY_2_MASK 0x00000002L
+#define VMSHAREDHV1_MC_VM_MARC_RELOC_LO_2__MARC_RELOC_LO_2_MASK 0xFFFFF000L
+//VMSHAREDHV1_MC_VM_MARC_RELOC_LO_3
+#define VMSHAREDHV1_MC_VM_MARC_RELOC_LO_3__MARC_ENABLE_3__SHIFT 0x0
+#define VMSHAREDHV1_MC_VM_MARC_RELOC_LO_3__MARC_READONLY_3__SHIFT 0x1
+#define VMSHAREDHV1_MC_VM_MARC_RELOC_LO_3__MARC_RELOC_LO_3__SHIFT 0xc
+#define VMSHAREDHV1_MC_VM_MARC_RELOC_LO_3__MARC_ENABLE_3_MASK 0x00000001L
+#define VMSHAREDHV1_MC_VM_MARC_RELOC_LO_3__MARC_READONLY_3_MASK 0x00000002L
+#define VMSHAREDHV1_MC_VM_MARC_RELOC_LO_3__MARC_RELOC_LO_3_MASK 0xFFFFF000L
+//VMSHAREDHV1_MC_VM_MARC_RELOC_HI_0
+#define VMSHAREDHV1_MC_VM_MARC_RELOC_HI_0__MARC_RELOC_HI_0__SHIFT 0x0
+#define VMSHAREDHV1_MC_VM_MARC_RELOC_HI_0__MARC_RELOC_HI_0_MASK 0x000FFFFFL
+//VMSHAREDHV1_MC_VM_MARC_RELOC_HI_1
+#define VMSHAREDHV1_MC_VM_MARC_RELOC_HI_1__MARC_RELOC_HI_1__SHIFT 0x0
+#define VMSHAREDHV1_MC_VM_MARC_RELOC_HI_1__MARC_RELOC_HI_1_MASK 0x000FFFFFL
+//VMSHAREDHV1_MC_VM_MARC_RELOC_HI_2
+#define VMSHAREDHV1_MC_VM_MARC_RELOC_HI_2__MARC_RELOC_HI_2__SHIFT 0x0
+#define VMSHAREDHV1_MC_VM_MARC_RELOC_HI_2__MARC_RELOC_HI_2_MASK 0x000FFFFFL
+//VMSHAREDHV1_MC_VM_MARC_RELOC_HI_3
+#define VMSHAREDHV1_MC_VM_MARC_RELOC_HI_3__MARC_RELOC_HI_3__SHIFT 0x0
+#define VMSHAREDHV1_MC_VM_MARC_RELOC_HI_3__MARC_RELOC_HI_3_MASK 0x000FFFFFL
+//VMSHAREDHV1_MC_VM_MARC_LEN_LO_0
+#define VMSHAREDHV1_MC_VM_MARC_LEN_LO_0__MARC_LEN_LO_0__SHIFT 0xc
+#define VMSHAREDHV1_MC_VM_MARC_LEN_LO_0__MARC_LEN_LO_0_MASK 0xFFFFF000L
+//VMSHAREDHV1_MC_VM_MARC_LEN_LO_1
+#define VMSHAREDHV1_MC_VM_MARC_LEN_LO_1__MARC_LEN_LO_1__SHIFT 0xc
+#define VMSHAREDHV1_MC_VM_MARC_LEN_LO_1__MARC_LEN_LO_1_MASK 0xFFFFF000L
+//VMSHAREDHV1_MC_VM_MARC_LEN_LO_2
+#define VMSHAREDHV1_MC_VM_MARC_LEN_LO_2__MARC_LEN_LO_2__SHIFT 0xc
+#define VMSHAREDHV1_MC_VM_MARC_LEN_LO_2__MARC_LEN_LO_2_MASK 0xFFFFF000L
+//VMSHAREDHV1_MC_VM_MARC_LEN_LO_3
+#define VMSHAREDHV1_MC_VM_MARC_LEN_LO_3__MARC_LEN_LO_3__SHIFT 0xc
+#define VMSHAREDHV1_MC_VM_MARC_LEN_LO_3__MARC_LEN_LO_3_MASK 0xFFFFF000L
+//VMSHAREDHV1_MC_VM_MARC_LEN_HI_0
+#define VMSHAREDHV1_MC_VM_MARC_LEN_HI_0__MARC_LEN_HI_0__SHIFT 0x0
+#define VMSHAREDHV1_MC_VM_MARC_LEN_HI_0__MARC_LEN_HI_0_MASK 0x000FFFFFL
+//VMSHAREDHV1_MC_VM_MARC_LEN_HI_1
+#define VMSHAREDHV1_MC_VM_MARC_LEN_HI_1__MARC_LEN_HI_1__SHIFT 0x0
+#define VMSHAREDHV1_MC_VM_MARC_LEN_HI_1__MARC_LEN_HI_1_MASK 0x000FFFFFL
+//VMSHAREDHV1_MC_VM_MARC_LEN_HI_2
+#define VMSHAREDHV1_MC_VM_MARC_LEN_HI_2__MARC_LEN_HI_2__SHIFT 0x0
+#define VMSHAREDHV1_MC_VM_MARC_LEN_HI_2__MARC_LEN_HI_2_MASK 0x000FFFFFL
+//VMSHAREDHV1_MC_VM_MARC_LEN_HI_3
+#define VMSHAREDHV1_MC_VM_MARC_LEN_HI_3__MARC_LEN_HI_3__SHIFT 0x0
+#define VMSHAREDHV1_MC_VM_MARC_LEN_HI_3__MARC_LEN_HI_3_MASK 0x000FFFFFL
+//VMSHAREDHV1_VM_IOMMU_CONTROL_REGISTER
+#define VMSHAREDHV1_VM_IOMMU_CONTROL_REGISTER__IOMMUEN__SHIFT 0x0
+#define VMSHAREDHV1_VM_IOMMU_CONTROL_REGISTER__IOMMUEN_MASK 0x00000001L
+//VMSHAREDHV1_VM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER
+#define VMSHAREDHV1_VM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER__PERFOPTEN__SHIFT 0xd
+#define VMSHAREDHV1_VM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER__PERFOPTEN_MASK 0x00002000L
+//VMSHAREDHV1_VM_PCIE_ATS_CNTL
+#define VMSHAREDHV1_VM_PCIE_ATS_CNTL__STU__SHIFT 0x10
+#define VMSHAREDHV1_VM_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0x1f
+#define VMSHAREDHV1_VM_PCIE_ATS_CNTL__STU_MASK 0x001F0000L
+#define VMSHAREDHV1_VM_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x80000000L
+//VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_0
+#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_0__ATC_ENABLE__SHIFT 0x1f
+#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_0__ATC_ENABLE_MASK 0x80000000L
+//VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_1
+#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_1__ATC_ENABLE__SHIFT 0x1f
+#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_1__ATC_ENABLE_MASK 0x80000000L
+//VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_2
+#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_2__ATC_ENABLE__SHIFT 0x1f
+#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_2__ATC_ENABLE_MASK 0x80000000L
+//VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_3
+#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_3__ATC_ENABLE__SHIFT 0x1f
+#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_3__ATC_ENABLE_MASK 0x80000000L
+//VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_4
+#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_4__ATC_ENABLE__SHIFT 0x1f
+#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_4__ATC_ENABLE_MASK 0x80000000L
+//VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_5
+#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_5__ATC_ENABLE__SHIFT 0x1f
+#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_5__ATC_ENABLE_MASK 0x80000000L
+//VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_6
+#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_6__ATC_ENABLE__SHIFT 0x1f
+#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_6__ATC_ENABLE_MASK 0x80000000L
+//VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_7
+#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_7__ATC_ENABLE__SHIFT 0x1f
+#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_7__ATC_ENABLE_MASK 0x80000000L
+//VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_8
+#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_8__ATC_ENABLE__SHIFT 0x1f
+#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_8__ATC_ENABLE_MASK 0x80000000L
+//VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_9
+#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_9__ATC_ENABLE__SHIFT 0x1f
+#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_9__ATC_ENABLE_MASK 0x80000000L
+//VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_10
+#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_10__ATC_ENABLE__SHIFT 0x1f
+#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_10__ATC_ENABLE_MASK 0x80000000L
+//VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_11
+#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_11__ATC_ENABLE__SHIFT 0x1f
+#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_11__ATC_ENABLE_MASK 0x80000000L
+//VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_12
+#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_12__ATC_ENABLE__SHIFT 0x1f
+#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_12__ATC_ENABLE_MASK 0x80000000L
+//VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_13
+#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_13__ATC_ENABLE__SHIFT 0x1f
+#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_13__ATC_ENABLE_MASK 0x80000000L
+//VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_14
+#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_14__ATC_ENABLE__SHIFT 0x1f
+#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_14__ATC_ENABLE_MASK 0x80000000L
+//VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_15
+#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_15__ATC_ENABLE__SHIFT 0x1f
+#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_15__ATC_ENABLE_MASK 0x80000000L
+//VMSHAREDHV1_UTCL2_CGTT_CLK_CTRL
+#define VMSHAREDHV1_UTCL2_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define VMSHAREDHV1_UTCL2_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define VMSHAREDHV1_UTCL2_CGTT_CLK_CTRL__SOFT_OVERRIDE_EXTRA__SHIFT 0xc
+#define VMSHAREDHV1_UTCL2_CGTT_CLK_CTRL__MGLS_OVERRIDE__SHIFT 0xf
+#define VMSHAREDHV1_UTCL2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x10
+#define VMSHAREDHV1_UTCL2_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x18
+#define VMSHAREDHV1_UTCL2_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define VMSHAREDHV1_UTCL2_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define VMSHAREDHV1_UTCL2_CGTT_CLK_CTRL__SOFT_OVERRIDE_EXTRA_MASK 0x00007000L
+#define VMSHAREDHV1_UTCL2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L
+#define VMSHAREDHV1_UTCL2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00FF0000L
+#define VMSHAREDHV1_UTCL2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0xFF000000L
+//VMSHAREDHV1_MC_SHARED_ACTIVE_FCN_ID
+#define VMSHAREDHV1_MC_SHARED_ACTIVE_FCN_ID__VFID__SHIFT 0x0
+#define VMSHAREDHV1_MC_SHARED_ACTIVE_FCN_ID__VF__SHIFT 0x1f
+#define VMSHAREDHV1_MC_SHARED_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL
+#define VMSHAREDHV1_MC_SHARED_ACTIVE_FCN_ID__VF_MASK 0x80000000L
+//VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE
+#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF0__SHIFT 0x0
+#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF1__SHIFT 0x1
+#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF2__SHIFT 0x2
+#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF3__SHIFT 0x3
+#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF4__SHIFT 0x4
+#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF5__SHIFT 0x5
+#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF6__SHIFT 0x6
+#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF7__SHIFT 0x7
+#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF8__SHIFT 0x8
+#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF9__SHIFT 0x9
+#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF10__SHIFT 0xa
+#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF11__SHIFT 0xb
+#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF12__SHIFT 0xc
+#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF13__SHIFT 0xd
+#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF14__SHIFT 0xe
+#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF15__SHIFT 0xf
+#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_PF__SHIFT 0x1f
+#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF0_MASK 0x00000001L
+#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF1_MASK 0x00000002L
+#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF2_MASK 0x00000004L
+#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF3_MASK 0x00000008L
+#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF4_MASK 0x00000010L
+#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF5_MASK 0x00000020L
+#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF6_MASK 0x00000040L
+#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF7_MASK 0x00000080L
+#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF8_MASK 0x00000100L
+#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF9_MASK 0x00000200L
+#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF10_MASK 0x00000400L
+#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF11_MASK 0x00000800L
+#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF12_MASK 0x00001000L
+#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF13_MASK 0x00002000L
+#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF14_MASK 0x00004000L
+#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF15_MASK 0x00008000L
+#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_PF_MASK 0x80000000L
+
+
+// addressBlock: mmhub_utcl2_atcl2pfcntrdec:1
+//ATCL2PFCNTR1_ATC_L2_PERFCOUNTER_LO
+#define ATCL2PFCNTR1_ATC_L2_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define ATCL2PFCNTR1_ATC_L2_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//ATCL2PFCNTR1_ATC_L2_PERFCOUNTER_HI
+#define ATCL2PFCNTR1_ATC_L2_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define ATCL2PFCNTR1_ATC_L2_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define ATCL2PFCNTR1_ATC_L2_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define ATCL2PFCNTR1_ATC_L2_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+
+
+// addressBlock: mmhub_utcl2_atcl2pfcntldec:1
+//ATCL2PFCNTL1_ATC_L2_PERFCOUNTER0_CFG
+#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//ATCL2PFCNTL1_ATC_L2_PERFCOUNTER1_CFG
+#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//ATCL2PFCNTL1_ATC_L2_PERFCOUNTER_RSLT_CNTL
+#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+
+
+// addressBlock: mmhub_utcl2_vml2pldec:1
+//VML2PL1_MC_VM_L2_PERFCOUNTER0_CFG
+#define VML2PL1_MC_VM_L2_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define VML2PL1_MC_VM_L2_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define VML2PL1_MC_VM_L2_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define VML2PL1_MC_VM_L2_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define VML2PL1_MC_VM_L2_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define VML2PL1_MC_VM_L2_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define VML2PL1_MC_VM_L2_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define VML2PL1_MC_VM_L2_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define VML2PL1_MC_VM_L2_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define VML2PL1_MC_VM_L2_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//VML2PL1_MC_VM_L2_PERFCOUNTER1_CFG
+#define VML2PL1_MC_VM_L2_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define VML2PL1_MC_VM_L2_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define VML2PL1_MC_VM_L2_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define VML2PL1_MC_VM_L2_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define VML2PL1_MC_VM_L2_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define VML2PL1_MC_VM_L2_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define VML2PL1_MC_VM_L2_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define VML2PL1_MC_VM_L2_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define VML2PL1_MC_VM_L2_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define VML2PL1_MC_VM_L2_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//VML2PL1_MC_VM_L2_PERFCOUNTER2_CFG
+#define VML2PL1_MC_VM_L2_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0
+#define VML2PL1_MC_VM_L2_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8
+#define VML2PL1_MC_VM_L2_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18
+#define VML2PL1_MC_VM_L2_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c
+#define VML2PL1_MC_VM_L2_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d
+#define VML2PL1_MC_VM_L2_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL
+#define VML2PL1_MC_VM_L2_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define VML2PL1_MC_VM_L2_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L
+#define VML2PL1_MC_VM_L2_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L
+#define VML2PL1_MC_VM_L2_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L
+//VML2PL1_MC_VM_L2_PERFCOUNTER3_CFG
+#define VML2PL1_MC_VM_L2_PERFCOUNTER3_CFG__PERF_SEL__SHIFT 0x0
+#define VML2PL1_MC_VM_L2_PERFCOUNTER3_CFG__PERF_SEL_END__SHIFT 0x8
+#define VML2PL1_MC_VM_L2_PERFCOUNTER3_CFG__PERF_MODE__SHIFT 0x18
+#define VML2PL1_MC_VM_L2_PERFCOUNTER3_CFG__ENABLE__SHIFT 0x1c
+#define VML2PL1_MC_VM_L2_PERFCOUNTER3_CFG__CLEAR__SHIFT 0x1d
+#define VML2PL1_MC_VM_L2_PERFCOUNTER3_CFG__PERF_SEL_MASK 0x000000FFL
+#define VML2PL1_MC_VM_L2_PERFCOUNTER3_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define VML2PL1_MC_VM_L2_PERFCOUNTER3_CFG__PERF_MODE_MASK 0x0F000000L
+#define VML2PL1_MC_VM_L2_PERFCOUNTER3_CFG__ENABLE_MASK 0x10000000L
+#define VML2PL1_MC_VM_L2_PERFCOUNTER3_CFG__CLEAR_MASK 0x20000000L
+//VML2PL1_MC_VM_L2_PERFCOUNTER4_CFG
+#define VML2PL1_MC_VM_L2_PERFCOUNTER4_CFG__PERF_SEL__SHIFT 0x0
+#define VML2PL1_MC_VM_L2_PERFCOUNTER4_CFG__PERF_SEL_END__SHIFT 0x8
+#define VML2PL1_MC_VM_L2_PERFCOUNTER4_CFG__PERF_MODE__SHIFT 0x18
+#define VML2PL1_MC_VM_L2_PERFCOUNTER4_CFG__ENABLE__SHIFT 0x1c
+#define VML2PL1_MC_VM_L2_PERFCOUNTER4_CFG__CLEAR__SHIFT 0x1d
+#define VML2PL1_MC_VM_L2_PERFCOUNTER4_CFG__PERF_SEL_MASK 0x000000FFL
+#define VML2PL1_MC_VM_L2_PERFCOUNTER4_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define VML2PL1_MC_VM_L2_PERFCOUNTER4_CFG__PERF_MODE_MASK 0x0F000000L
+#define VML2PL1_MC_VM_L2_PERFCOUNTER4_CFG__ENABLE_MASK 0x10000000L
+#define VML2PL1_MC_VM_L2_PERFCOUNTER4_CFG__CLEAR_MASK 0x20000000L
+//VML2PL1_MC_VM_L2_PERFCOUNTER5_CFG
+#define VML2PL1_MC_VM_L2_PERFCOUNTER5_CFG__PERF_SEL__SHIFT 0x0
+#define VML2PL1_MC_VM_L2_PERFCOUNTER5_CFG__PERF_SEL_END__SHIFT 0x8
+#define VML2PL1_MC_VM_L2_PERFCOUNTER5_CFG__PERF_MODE__SHIFT 0x18
+#define VML2PL1_MC_VM_L2_PERFCOUNTER5_CFG__ENABLE__SHIFT 0x1c
+#define VML2PL1_MC_VM_L2_PERFCOUNTER5_CFG__CLEAR__SHIFT 0x1d
+#define VML2PL1_MC_VM_L2_PERFCOUNTER5_CFG__PERF_SEL_MASK 0x000000FFL
+#define VML2PL1_MC_VM_L2_PERFCOUNTER5_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define VML2PL1_MC_VM_L2_PERFCOUNTER5_CFG__PERF_MODE_MASK 0x0F000000L
+#define VML2PL1_MC_VM_L2_PERFCOUNTER5_CFG__ENABLE_MASK 0x10000000L
+#define VML2PL1_MC_VM_L2_PERFCOUNTER5_CFG__CLEAR_MASK 0x20000000L
+//VML2PL1_MC_VM_L2_PERFCOUNTER6_CFG
+#define VML2PL1_MC_VM_L2_PERFCOUNTER6_CFG__PERF_SEL__SHIFT 0x0
+#define VML2PL1_MC_VM_L2_PERFCOUNTER6_CFG__PERF_SEL_END__SHIFT 0x8
+#define VML2PL1_MC_VM_L2_PERFCOUNTER6_CFG__PERF_MODE__SHIFT 0x18
+#define VML2PL1_MC_VM_L2_PERFCOUNTER6_CFG__ENABLE__SHIFT 0x1c
+#define VML2PL1_MC_VM_L2_PERFCOUNTER6_CFG__CLEAR__SHIFT 0x1d
+#define VML2PL1_MC_VM_L2_PERFCOUNTER6_CFG__PERF_SEL_MASK 0x000000FFL
+#define VML2PL1_MC_VM_L2_PERFCOUNTER6_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define VML2PL1_MC_VM_L2_PERFCOUNTER6_CFG__PERF_MODE_MASK 0x0F000000L
+#define VML2PL1_MC_VM_L2_PERFCOUNTER6_CFG__ENABLE_MASK 0x10000000L
+#define VML2PL1_MC_VM_L2_PERFCOUNTER6_CFG__CLEAR_MASK 0x20000000L
+//VML2PL1_MC_VM_L2_PERFCOUNTER7_CFG
+#define VML2PL1_MC_VM_L2_PERFCOUNTER7_CFG__PERF_SEL__SHIFT 0x0
+#define VML2PL1_MC_VM_L2_PERFCOUNTER7_CFG__PERF_SEL_END__SHIFT 0x8
+#define VML2PL1_MC_VM_L2_PERFCOUNTER7_CFG__PERF_MODE__SHIFT 0x18
+#define VML2PL1_MC_VM_L2_PERFCOUNTER7_CFG__ENABLE__SHIFT 0x1c
+#define VML2PL1_MC_VM_L2_PERFCOUNTER7_CFG__CLEAR__SHIFT 0x1d
+#define VML2PL1_MC_VM_L2_PERFCOUNTER7_CFG__PERF_SEL_MASK 0x000000FFL
+#define VML2PL1_MC_VM_L2_PERFCOUNTER7_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define VML2PL1_MC_VM_L2_PERFCOUNTER7_CFG__PERF_MODE_MASK 0x0F000000L
+#define VML2PL1_MC_VM_L2_PERFCOUNTER7_CFG__ENABLE_MASK 0x10000000L
+#define VML2PL1_MC_VM_L2_PERFCOUNTER7_CFG__CLEAR_MASK 0x20000000L
+//VML2PL1_MC_VM_L2_PERFCOUNTER_RSLT_CNTL
+#define VML2PL1_MC_VM_L2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define VML2PL1_MC_VM_L2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define VML2PL1_MC_VM_L2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define VML2PL1_MC_VM_L2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define VML2PL1_MC_VM_L2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define VML2PL1_MC_VM_L2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define VML2PL1_MC_VM_L2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define VML2PL1_MC_VM_L2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define VML2PL1_MC_VM_L2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define VML2PL1_MC_VM_L2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define VML2PL1_MC_VM_L2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define VML2PL1_MC_VM_L2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+
+
+// addressBlock: mmhub_utcl2_vml2prdec:1
+//VML2PR1_MC_VM_L2_PERFCOUNTER_LO
+#define VML2PR1_MC_VM_L2_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define VML2PR1_MC_VM_L2_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//VML2PR1_MC_VM_L2_PERFCOUNTER_HI
+#define VML2PR1_MC_VM_L2_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define VML2PR1_MC_VM_L2_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define VML2PR1_MC_VM_L2_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define VML2PR1_MC_VM_L2_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_12_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_12_0_0_offset.h
new file mode 100644
index 000000000000..1fe51fcb648e
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_12_0_0_offset.h
@@ -0,0 +1,336 @@
+/*
+ * Copyright (C) 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _mp_12_0_0_OFFSET_HEADER
+#define _mp_12_0_0_OFFSET_HEADER
+
+
+
+// addressBlock: mp_SmuMp0_SmnDec
+// base address: 0x0
+#define mmMP0_SMN_C2PMSG_32 0x0060
+#define mmMP0_SMN_C2PMSG_32_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_33 0x0061
+#define mmMP0_SMN_C2PMSG_33_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_34 0x0062
+#define mmMP0_SMN_C2PMSG_34_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_35 0x0063
+#define mmMP0_SMN_C2PMSG_35_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_36 0x0064
+#define mmMP0_SMN_C2PMSG_36_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_37 0x0065
+#define mmMP0_SMN_C2PMSG_37_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_38 0x0066
+#define mmMP0_SMN_C2PMSG_38_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_39 0x0067
+#define mmMP0_SMN_C2PMSG_39_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_40 0x0068
+#define mmMP0_SMN_C2PMSG_40_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_41 0x0069
+#define mmMP0_SMN_C2PMSG_41_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_42 0x006a
+#define mmMP0_SMN_C2PMSG_42_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_43 0x006b
+#define mmMP0_SMN_C2PMSG_43_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_44 0x006c
+#define mmMP0_SMN_C2PMSG_44_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_45 0x006d
+#define mmMP0_SMN_C2PMSG_45_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_46 0x006e
+#define mmMP0_SMN_C2PMSG_46_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_47 0x006f
+#define mmMP0_SMN_C2PMSG_47_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_48 0x0070
+#define mmMP0_SMN_C2PMSG_48_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_49 0x0071
+#define mmMP0_SMN_C2PMSG_49_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_50 0x0072
+#define mmMP0_SMN_C2PMSG_50_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_51 0x0073
+#define mmMP0_SMN_C2PMSG_51_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_52 0x0074
+#define mmMP0_SMN_C2PMSG_52_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_53 0x0075
+#define mmMP0_SMN_C2PMSG_53_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_54 0x0076
+#define mmMP0_SMN_C2PMSG_54_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_55 0x0077
+#define mmMP0_SMN_C2PMSG_55_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_56 0x0078
+#define mmMP0_SMN_C2PMSG_56_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_57 0x0079
+#define mmMP0_SMN_C2PMSG_57_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_58 0x007a
+#define mmMP0_SMN_C2PMSG_58_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_59 0x007b
+#define mmMP0_SMN_C2PMSG_59_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_60 0x007c
+#define mmMP0_SMN_C2PMSG_60_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_61 0x007d
+#define mmMP0_SMN_C2PMSG_61_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_62 0x007e
+#define mmMP0_SMN_C2PMSG_62_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_63 0x007f
+#define mmMP0_SMN_C2PMSG_63_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_64 0x0080
+#define mmMP0_SMN_C2PMSG_64_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_65 0x0081
+#define mmMP0_SMN_C2PMSG_65_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_66 0x0082
+#define mmMP0_SMN_C2PMSG_66_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_67 0x0083
+#define mmMP0_SMN_C2PMSG_67_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_68 0x0084
+#define mmMP0_SMN_C2PMSG_68_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_69 0x0085
+#define mmMP0_SMN_C2PMSG_69_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_70 0x0086
+#define mmMP0_SMN_C2PMSG_70_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_71 0x0087
+#define mmMP0_SMN_C2PMSG_71_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_72 0x0088
+#define mmMP0_SMN_C2PMSG_72_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_73 0x0089
+#define mmMP0_SMN_C2PMSG_73_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_74 0x008a
+#define mmMP0_SMN_C2PMSG_74_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_75 0x008b
+#define mmMP0_SMN_C2PMSG_75_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_76 0x008c
+#define mmMP0_SMN_C2PMSG_76_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_77 0x008d
+#define mmMP0_SMN_C2PMSG_77_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_78 0x008e
+#define mmMP0_SMN_C2PMSG_78_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_79 0x008f
+#define mmMP0_SMN_C2PMSG_79_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_80 0x0090
+#define mmMP0_SMN_C2PMSG_80_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_81 0x0091
+#define mmMP0_SMN_C2PMSG_81_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_82 0x0092
+#define mmMP0_SMN_C2PMSG_82_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_83 0x0093
+#define mmMP0_SMN_C2PMSG_83_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_84 0x0094
+#define mmMP0_SMN_C2PMSG_84_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_85 0x0095
+#define mmMP0_SMN_C2PMSG_85_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_86 0x0096
+#define mmMP0_SMN_C2PMSG_86_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_87 0x0097
+#define mmMP0_SMN_C2PMSG_87_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_88 0x0098
+#define mmMP0_SMN_C2PMSG_88_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_89 0x0099
+#define mmMP0_SMN_C2PMSG_89_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_90 0x009a
+#define mmMP0_SMN_C2PMSG_90_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_91 0x009b
+#define mmMP0_SMN_C2PMSG_91_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_92 0x009c
+#define mmMP0_SMN_C2PMSG_92_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_93 0x009d
+#define mmMP0_SMN_C2PMSG_93_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_94 0x009e
+#define mmMP0_SMN_C2PMSG_94_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_95 0x009f
+#define mmMP0_SMN_C2PMSG_95_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_96 0x00a0
+#define mmMP0_SMN_C2PMSG_96_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_97 0x00a1
+#define mmMP0_SMN_C2PMSG_97_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_98 0x00a2
+#define mmMP0_SMN_C2PMSG_98_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_99 0x00a3
+#define mmMP0_SMN_C2PMSG_99_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_100 0x00a4
+#define mmMP0_SMN_C2PMSG_100_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_101 0x00a5
+#define mmMP0_SMN_C2PMSG_101_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_102 0x00a6
+#define mmMP0_SMN_C2PMSG_102_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_103 0x00a7
+#define mmMP0_SMN_C2PMSG_103_BASE_IDX 0
+#define mmMP0_SMN_IH_CREDIT 0x00c1
+#define mmMP0_SMN_IH_CREDIT_BASE_IDX 0
+#define mmMP0_SMN_IH_SW_INT 0x00c2
+#define mmMP0_SMN_IH_SW_INT_BASE_IDX 0
+#define mmMP0_SMN_IH_SW_INT_CTRL 0x00c3
+#define mmMP0_SMN_IH_SW_INT_CTRL_BASE_IDX 0
+
+
+// addressBlock: mp_SmuMp1_SmnDec
+// base address: 0x0
+#define mmMP1_SMN_C2PMSG_32 0x0260
+#define mmMP1_SMN_C2PMSG_32_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_33 0x0261
+#define mmMP1_SMN_C2PMSG_33_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_34 0x0262
+#define mmMP1_SMN_C2PMSG_34_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_35 0x0263
+#define mmMP1_SMN_C2PMSG_35_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_36 0x0264
+#define mmMP1_SMN_C2PMSG_36_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_37 0x0265
+#define mmMP1_SMN_C2PMSG_37_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_38 0x0266
+#define mmMP1_SMN_C2PMSG_38_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_39 0x0267
+#define mmMP1_SMN_C2PMSG_39_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_40 0x0268
+#define mmMP1_SMN_C2PMSG_40_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_41 0x0269
+#define mmMP1_SMN_C2PMSG_41_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_42 0x026a
+#define mmMP1_SMN_C2PMSG_42_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_43 0x026b
+#define mmMP1_SMN_C2PMSG_43_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_44 0x026c
+#define mmMP1_SMN_C2PMSG_44_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_45 0x026d
+#define mmMP1_SMN_C2PMSG_45_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_46 0x026e
+#define mmMP1_SMN_C2PMSG_46_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_47 0x026f
+#define mmMP1_SMN_C2PMSG_47_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_48 0x0270
+#define mmMP1_SMN_C2PMSG_48_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_49 0x0271
+#define mmMP1_SMN_C2PMSG_49_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_50 0x0272
+#define mmMP1_SMN_C2PMSG_50_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_51 0x0273
+#define mmMP1_SMN_C2PMSG_51_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_52 0x0274
+#define mmMP1_SMN_C2PMSG_52_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_53 0x0275
+#define mmMP1_SMN_C2PMSG_53_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_54 0x0276
+#define mmMP1_SMN_C2PMSG_54_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_55 0x0277
+#define mmMP1_SMN_C2PMSG_55_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_56 0x0278
+#define mmMP1_SMN_C2PMSG_56_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_57 0x0279
+#define mmMP1_SMN_C2PMSG_57_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_58 0x027a
+#define mmMP1_SMN_C2PMSG_58_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_59 0x027b
+#define mmMP1_SMN_C2PMSG_59_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_60 0x027c
+#define mmMP1_SMN_C2PMSG_60_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_61 0x027d
+#define mmMP1_SMN_C2PMSG_61_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_62 0x027e
+#define mmMP1_SMN_C2PMSG_62_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_63 0x027f
+#define mmMP1_SMN_C2PMSG_63_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_64 0x0280
+#define mmMP1_SMN_C2PMSG_64_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_65 0x0281
+#define mmMP1_SMN_C2PMSG_65_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_66 0x0282
+#define mmMP1_SMN_C2PMSG_66_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_67 0x0283
+#define mmMP1_SMN_C2PMSG_67_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_68 0x0284
+#define mmMP1_SMN_C2PMSG_68_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_69 0x0285
+#define mmMP1_SMN_C2PMSG_69_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_70 0x0286
+#define mmMP1_SMN_C2PMSG_70_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_71 0x0287
+#define mmMP1_SMN_C2PMSG_71_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_72 0x0288
+#define mmMP1_SMN_C2PMSG_72_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_73 0x0289
+#define mmMP1_SMN_C2PMSG_73_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_74 0x028a
+#define mmMP1_SMN_C2PMSG_74_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_75 0x028b
+#define mmMP1_SMN_C2PMSG_75_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_76 0x028c
+#define mmMP1_SMN_C2PMSG_76_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_77 0x028d
+#define mmMP1_SMN_C2PMSG_77_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_78 0x028e
+#define mmMP1_SMN_C2PMSG_78_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_79 0x028f
+#define mmMP1_SMN_C2PMSG_79_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_80 0x0290
+#define mmMP1_SMN_C2PMSG_80_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_81 0x0291
+#define mmMP1_SMN_C2PMSG_81_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_82 0x0292
+#define mmMP1_SMN_C2PMSG_82_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_83 0x0293
+#define mmMP1_SMN_C2PMSG_83_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_84 0x0294
+#define mmMP1_SMN_C2PMSG_84_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_85 0x0295
+#define mmMP1_SMN_C2PMSG_85_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_86 0x0296
+#define mmMP1_SMN_C2PMSG_86_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_87 0x0297
+#define mmMP1_SMN_C2PMSG_87_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_88 0x0298
+#define mmMP1_SMN_C2PMSG_88_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_89 0x0299
+#define mmMP1_SMN_C2PMSG_89_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_90 0x029a
+#define mmMP1_SMN_C2PMSG_90_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_91 0x029b
+#define mmMP1_SMN_C2PMSG_91_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_92 0x029c
+#define mmMP1_SMN_C2PMSG_92_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_93 0x029d
+#define mmMP1_SMN_C2PMSG_93_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_94 0x029e
+#define mmMP1_SMN_C2PMSG_94_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_95 0x029f
+#define mmMP1_SMN_C2PMSG_95_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_96 0x02a0
+#define mmMP1_SMN_C2PMSG_96_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_97 0x02a1
+#define mmMP1_SMN_C2PMSG_97_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_98 0x02a2
+#define mmMP1_SMN_C2PMSG_98_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_99 0x02a3
+#define mmMP1_SMN_C2PMSG_99_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_100 0x02a4
+#define mmMP1_SMN_C2PMSG_100_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_101 0x02a5
+#define mmMP1_SMN_C2PMSG_101_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_102 0x02a6
+#define mmMP1_SMN_C2PMSG_102_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_103 0x02a7
+#define mmMP1_SMN_C2PMSG_103_BASE_IDX 0
+#define mmMP1_SMN_IH_CREDIT 0x02c1
+#define mmMP1_SMN_IH_CREDIT_BASE_IDX 0
+#define mmMP1_SMN_IH_SW_INT 0x02c2
+#define mmMP1_SMN_IH_SW_INT_BASE_IDX 0
+#define mmMP1_SMN_IH_SW_INT_CTRL 0x02c3
+#define mmMP1_SMN_IH_SW_INT_CTRL_BASE_IDX 0
+#define mmMP1_SMN_FPS_CNT 0x02c4
+#define mmMP1_SMN_FPS_CNT_BASE_IDX 0
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_12_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_12_0_0_sh_mask.h
new file mode 100644
index 000000000000..c78151e624b3
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_12_0_0_sh_mask.h
@@ -0,0 +1,866 @@
+/*
+ * Copyright (C) 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _mp_12_0_0_SH_MASK_HEADER
+#define _mp_12_0_0_SH_MASK_HEADER
+
+
+// addressBlock: mp_SmuMp0_SmnDec
+//MP0_SMN_C2PMSG_32
+#define MP0_SMN_C2PMSG_32__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_32__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_33
+#define MP0_SMN_C2PMSG_33__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_33__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_34
+#define MP0_SMN_C2PMSG_34__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_34__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_35
+#define MP0_SMN_C2PMSG_35__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_35__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_36
+#define MP0_SMN_C2PMSG_36__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_36__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_37
+#define MP0_SMN_C2PMSG_37__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_37__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_38
+#define MP0_SMN_C2PMSG_38__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_38__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_39
+#define MP0_SMN_C2PMSG_39__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_39__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_40
+#define MP0_SMN_C2PMSG_40__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_40__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_41
+#define MP0_SMN_C2PMSG_41__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_41__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_42
+#define MP0_SMN_C2PMSG_42__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_42__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_43
+#define MP0_SMN_C2PMSG_43__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_43__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_44
+#define MP0_SMN_C2PMSG_44__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_44__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_45
+#define MP0_SMN_C2PMSG_45__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_45__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_46
+#define MP0_SMN_C2PMSG_46__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_46__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_47
+#define MP0_SMN_C2PMSG_47__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_47__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_48
+#define MP0_SMN_C2PMSG_48__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_48__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_49
+#define MP0_SMN_C2PMSG_49__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_49__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_50
+#define MP0_SMN_C2PMSG_50__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_50__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_51
+#define MP0_SMN_C2PMSG_51__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_51__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_52
+#define MP0_SMN_C2PMSG_52__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_52__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_53
+#define MP0_SMN_C2PMSG_53__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_53__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_54
+#define MP0_SMN_C2PMSG_54__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_54__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_55
+#define MP0_SMN_C2PMSG_55__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_55__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_56
+#define MP0_SMN_C2PMSG_56__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_56__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_57
+#define MP0_SMN_C2PMSG_57__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_57__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_58
+#define MP0_SMN_C2PMSG_58__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_58__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_59
+#define MP0_SMN_C2PMSG_59__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_59__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_60
+#define MP0_SMN_C2PMSG_60__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_60__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_61
+#define MP0_SMN_C2PMSG_61__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_61__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_62
+#define MP0_SMN_C2PMSG_62__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_62__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_63
+#define MP0_SMN_C2PMSG_63__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_63__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_64
+#define MP0_SMN_C2PMSG_64__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_64__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_65
+#define MP0_SMN_C2PMSG_65__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_65__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_66
+#define MP0_SMN_C2PMSG_66__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_66__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_67
+#define MP0_SMN_C2PMSG_67__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_67__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_68
+#define MP0_SMN_C2PMSG_68__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_68__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_69
+#define MP0_SMN_C2PMSG_69__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_69__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_70
+#define MP0_SMN_C2PMSG_70__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_70__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_71
+#define MP0_SMN_C2PMSG_71__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_71__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_72
+#define MP0_SMN_C2PMSG_72__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_72__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_73
+#define MP0_SMN_C2PMSG_73__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_73__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_74
+#define MP0_SMN_C2PMSG_74__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_74__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_75
+#define MP0_SMN_C2PMSG_75__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_75__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_76
+#define MP0_SMN_C2PMSG_76__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_76__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_77
+#define MP0_SMN_C2PMSG_77__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_77__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_78
+#define MP0_SMN_C2PMSG_78__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_78__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_79
+#define MP0_SMN_C2PMSG_79__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_79__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_80
+#define MP0_SMN_C2PMSG_80__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_80__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_81
+#define MP0_SMN_C2PMSG_81__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_81__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_82
+#define MP0_SMN_C2PMSG_82__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_82__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_83
+#define MP0_SMN_C2PMSG_83__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_83__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_84
+#define MP0_SMN_C2PMSG_84__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_84__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_85
+#define MP0_SMN_C2PMSG_85__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_85__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_86
+#define MP0_SMN_C2PMSG_86__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_86__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_87
+#define MP0_SMN_C2PMSG_87__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_87__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_88
+#define MP0_SMN_C2PMSG_88__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_88__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_89
+#define MP0_SMN_C2PMSG_89__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_89__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_90
+#define MP0_SMN_C2PMSG_90__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_91
+#define MP0_SMN_C2PMSG_91__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_91__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_92
+#define MP0_SMN_C2PMSG_92__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_92__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_93
+#define MP0_SMN_C2PMSG_93__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_93__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_94
+#define MP0_SMN_C2PMSG_94__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_94__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_95
+#define MP0_SMN_C2PMSG_95__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_95__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_96
+#define MP0_SMN_C2PMSG_96__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_96__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_97
+#define MP0_SMN_C2PMSG_97__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_97__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_98
+#define MP0_SMN_C2PMSG_98__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_98__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_99
+#define MP0_SMN_C2PMSG_99__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_99__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_100
+#define MP0_SMN_C2PMSG_100__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_100__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_101
+#define MP0_SMN_C2PMSG_101__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_101__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_102
+#define MP0_SMN_C2PMSG_102__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_102__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_103
+#define MP0_SMN_C2PMSG_103__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_103__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_IH_CREDIT
+#define MP0_SMN_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0
+#define MP0_SMN_IH_CREDIT__CLIENT_ID__SHIFT 0x10
+#define MP0_SMN_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L
+#define MP0_SMN_IH_CREDIT__CLIENT_ID_MASK 0x00FF0000L
+//MP0_SMN_IH_SW_INT
+#define MP0_SMN_IH_SW_INT__ID__SHIFT 0x0
+#define MP0_SMN_IH_SW_INT__VALID__SHIFT 0x8
+#define MP0_SMN_IH_SW_INT__ID_MASK 0x000000FFL
+#define MP0_SMN_IH_SW_INT__VALID_MASK 0x00000100L
+
+
+// addressBlock: mp_SmuMp1_SmnDec
+//MP1_SMN_C2PMSG_32
+#define MP1_SMN_C2PMSG_32__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_32__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_33
+#define MP1_SMN_C2PMSG_33__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_33__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_34
+#define MP1_SMN_C2PMSG_34__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_34__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_35
+#define MP1_SMN_C2PMSG_35__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_35__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_36
+#define MP1_SMN_C2PMSG_36__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_36__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_37
+#define MP1_SMN_C2PMSG_37__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_37__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_38
+#define MP1_SMN_C2PMSG_38__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_38__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_39
+#define MP1_SMN_C2PMSG_39__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_39__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_40
+#define MP1_SMN_C2PMSG_40__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_40__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_41
+#define MP1_SMN_C2PMSG_41__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_41__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_42
+#define MP1_SMN_C2PMSG_42__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_42__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_43
+#define MP1_SMN_C2PMSG_43__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_43__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_44
+#define MP1_SMN_C2PMSG_44__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_44__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_45
+#define MP1_SMN_C2PMSG_45__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_45__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_46
+#define MP1_SMN_C2PMSG_46__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_46__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_47
+#define MP1_SMN_C2PMSG_47__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_47__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_48
+#define MP1_SMN_C2PMSG_48__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_48__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_49
+#define MP1_SMN_C2PMSG_49__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_49__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_50
+#define MP1_SMN_C2PMSG_50__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_50__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_51
+#define MP1_SMN_C2PMSG_51__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_51__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_52
+#define MP1_SMN_C2PMSG_52__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_52__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_53
+#define MP1_SMN_C2PMSG_53__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_53__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_54
+#define MP1_SMN_C2PMSG_54__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_54__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_55
+#define MP1_SMN_C2PMSG_55__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_55__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_56
+#define MP1_SMN_C2PMSG_56__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_56__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_57
+#define MP1_SMN_C2PMSG_57__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_57__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_58
+#define MP1_SMN_C2PMSG_58__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_58__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_59
+#define MP1_SMN_C2PMSG_59__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_59__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_60
+#define MP1_SMN_C2PMSG_60__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_60__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_61
+#define MP1_SMN_C2PMSG_61__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_61__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_62
+#define MP1_SMN_C2PMSG_62__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_62__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_63
+#define MP1_SMN_C2PMSG_63__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_63__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_64
+#define MP1_SMN_C2PMSG_64__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_64__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_65
+#define MP1_SMN_C2PMSG_65__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_65__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_66
+#define MP1_SMN_C2PMSG_66__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_66__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_67
+#define MP1_SMN_C2PMSG_67__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_67__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_68
+#define MP1_SMN_C2PMSG_68__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_68__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_69
+#define MP1_SMN_C2PMSG_69__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_69__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_70
+#define MP1_SMN_C2PMSG_70__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_70__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_71
+#define MP1_SMN_C2PMSG_71__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_71__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_72
+#define MP1_SMN_C2PMSG_72__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_72__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_73
+#define MP1_SMN_C2PMSG_73__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_73__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_74
+#define MP1_SMN_C2PMSG_74__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_74__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_75
+#define MP1_SMN_C2PMSG_75__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_75__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_76
+#define MP1_SMN_C2PMSG_76__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_76__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_77
+#define MP1_SMN_C2PMSG_77__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_77__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_78
+#define MP1_SMN_C2PMSG_78__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_78__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_79
+#define MP1_SMN_C2PMSG_79__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_79__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_80
+#define MP1_SMN_C2PMSG_80__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_80__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_81
+#define MP1_SMN_C2PMSG_81__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_81__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_82
+#define MP1_SMN_C2PMSG_82__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_82__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_83
+#define MP1_SMN_C2PMSG_83__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_83__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_84
+#define MP1_SMN_C2PMSG_84__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_84__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_85
+#define MP1_SMN_C2PMSG_85__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_85__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_86
+#define MP1_SMN_C2PMSG_86__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_86__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_87
+#define MP1_SMN_C2PMSG_87__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_87__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_88
+#define MP1_SMN_C2PMSG_88__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_88__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_89
+#define MP1_SMN_C2PMSG_89__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_89__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_90
+#define MP1_SMN_C2PMSG_90__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_91
+#define MP1_SMN_C2PMSG_91__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_91__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_92
+#define MP1_SMN_C2PMSG_92__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_92__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_93
+#define MP1_SMN_C2PMSG_93__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_93__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_94
+#define MP1_SMN_C2PMSG_94__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_94__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_95
+#define MP1_SMN_C2PMSG_95__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_95__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_96
+#define MP1_SMN_C2PMSG_96__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_96__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_97
+#define MP1_SMN_C2PMSG_97__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_97__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_98
+#define MP1_SMN_C2PMSG_98__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_98__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_99
+#define MP1_SMN_C2PMSG_99__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_99__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_100
+#define MP1_SMN_C2PMSG_100__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_100__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_101
+#define MP1_SMN_C2PMSG_101__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_101__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_102
+#define MP1_SMN_C2PMSG_102__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_102__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_103
+#define MP1_SMN_C2PMSG_103__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_103__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_IH_CREDIT
+#define MP1_SMN_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0
+#define MP1_SMN_IH_CREDIT__CLIENT_ID__SHIFT 0x10
+#define MP1_SMN_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L
+#define MP1_SMN_IH_CREDIT__CLIENT_ID_MASK 0x00FF0000L
+//MP1_SMN_IH_SW_INT
+#define MP1_SMN_IH_SW_INT__ID__SHIFT 0x0
+#define MP1_SMN_IH_SW_INT__VALID__SHIFT 0x8
+#define MP1_SMN_IH_SW_INT__ID_MASK 0x000000FFL
+#define MP1_SMN_IH_SW_INT__VALID_MASK 0x00000100L
+//MP1_SMN_FPS_CNT
+#define MP1_SMN_FPS_CNT__COUNT__SHIFT 0x0
+#define MP1_SMN_FPS_CNT__COUNT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: mp_SmuMp0Pub_CruDec
+//MP0_IH_CREDIT
+#define MP0_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0
+#define MP0_IH_CREDIT__CLIENT_ID__SHIFT 0x10
+#define MP0_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L
+#define MP0_IH_CREDIT__CLIENT_ID_MASK 0x00FF0000L
+//MP0_IH_SW_INT
+#define MP0_IH_SW_INT__ID__SHIFT 0x0
+#define MP0_IH_SW_INT__VALID__SHIFT 0x8
+#define MP0_IH_SW_INT__ID_MASK 0x000000FFL
+#define MP0_IH_SW_INT__VALID_MASK 0x00000100L
+//MP0_IH_SW_INT_CTRL
+#define MP0_IH_SW_INT_CTRL__INT_MASK__SHIFT 0x0
+#define MP0_IH_SW_INT_CTRL__INT_ACK__SHIFT 0x8
+#define MP0_IH_SW_INT_CTRL__INT_MASK_MASK 0x00000001L
+#define MP0_IH_SW_INT_CTRL__INT_ACK_MASK 0x00000100L
+
+
+// addressBlock: mp_SmuMp1Pub_CruDec
+//MP1_FIRMWARE_FLAGS
+#define MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT 0x0
+#define MP1_FIRMWARE_FLAGS__RESERVED__SHIFT 0x1
+#define MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK 0x00000001L
+#define MP1_FIRMWARE_FLAGS__RESERVED_MASK 0xFFFFFFFEL
+//MP1_C2PMSG_0
+#define MP1_C2PMSG_0__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_0__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_1
+#define MP1_C2PMSG_1__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_1__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_2
+#define MP1_C2PMSG_2__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_2__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_3
+#define MP1_C2PMSG_3__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_3__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_4
+#define MP1_C2PMSG_4__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_4__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_5
+#define MP1_C2PMSG_5__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_5__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_6
+#define MP1_C2PMSG_6__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_6__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_7
+#define MP1_C2PMSG_7__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_7__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_8
+#define MP1_C2PMSG_8__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_8__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_9
+#define MP1_C2PMSG_9__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_9__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_10
+#define MP1_C2PMSG_10__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_10__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_11
+#define MP1_C2PMSG_11__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_11__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_12
+#define MP1_C2PMSG_12__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_12__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_13
+#define MP1_C2PMSG_13__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_13__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_14
+#define MP1_C2PMSG_14__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_14__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_15
+#define MP1_C2PMSG_15__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_15__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_16
+#define MP1_C2PMSG_16__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_16__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_17
+#define MP1_C2PMSG_17__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_17__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_18
+#define MP1_C2PMSG_18__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_18__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_19
+#define MP1_C2PMSG_19__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_19__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_20
+#define MP1_C2PMSG_20__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_20__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_21
+#define MP1_C2PMSG_21__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_21__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_22
+#define MP1_C2PMSG_22__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_22__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_23
+#define MP1_C2PMSG_23__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_23__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_24
+#define MP1_C2PMSG_24__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_24__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_25
+#define MP1_C2PMSG_25__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_25__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_26
+#define MP1_C2PMSG_26__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_26__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_27
+#define MP1_C2PMSG_27__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_27__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_28
+#define MP1_C2PMSG_28__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_28__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_29
+#define MP1_C2PMSG_29__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_29__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_30
+#define MP1_C2PMSG_30__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_30__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_31
+#define MP1_C2PMSG_31__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_31__CONTENT_MASK 0xFFFFFFFFL
+//MP1_P2CMSG_0
+#define MP1_P2CMSG_0__CONTENT__SHIFT 0x0
+#define MP1_P2CMSG_0__CONTENT_MASK 0xFFFFFFFFL
+//MP1_P2CMSG_1
+#define MP1_P2CMSG_1__CONTENT__SHIFT 0x0
+#define MP1_P2CMSG_1__CONTENT_MASK 0xFFFFFFFFL
+//MP1_P2CMSG_2
+#define MP1_P2CMSG_2__CONTENT__SHIFT 0x0
+#define MP1_P2CMSG_2__CONTENT_MASK 0xFFFFFFFFL
+//MP1_P2CMSG_3
+#define MP1_P2CMSG_3__CONTENT__SHIFT 0x0
+#define MP1_P2CMSG_3__CONTENT_MASK 0xFFFFFFFFL
+//MP1_P2CMSG_INTEN
+#define MP1_P2CMSG_INTEN__INTEN__SHIFT 0x0
+#define MP1_P2CMSG_INTEN__INTEN_MASK 0x0000000FL
+//MP1_P2CMSG_INTSTS
+#define MP1_P2CMSG_INTSTS__INTSTS0__SHIFT 0x0
+#define MP1_P2CMSG_INTSTS__INTSTS1__SHIFT 0x1
+#define MP1_P2CMSG_INTSTS__INTSTS2__SHIFT 0x2
+#define MP1_P2CMSG_INTSTS__INTSTS3__SHIFT 0x3
+#define MP1_P2CMSG_INTSTS__INTSTS0_MASK 0x00000001L
+#define MP1_P2CMSG_INTSTS__INTSTS1_MASK 0x00000002L
+#define MP1_P2CMSG_INTSTS__INTSTS2_MASK 0x00000004L
+#define MP1_P2CMSG_INTSTS__INTSTS3_MASK 0x00000008L
+//MP1_C2PMSG_32
+#define MP1_C2PMSG_32__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_32__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_33
+#define MP1_C2PMSG_33__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_33__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_34
+#define MP1_C2PMSG_34__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_34__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_35
+#define MP1_C2PMSG_35__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_35__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_36
+#define MP1_C2PMSG_36__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_36__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_37
+#define MP1_C2PMSG_37__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_37__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_38
+#define MP1_C2PMSG_38__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_38__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_39
+#define MP1_C2PMSG_39__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_39__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_40
+#define MP1_C2PMSG_40__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_40__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_41
+#define MP1_C2PMSG_41__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_41__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_42
+#define MP1_C2PMSG_42__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_42__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_43
+#define MP1_C2PMSG_43__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_43__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_44
+#define MP1_C2PMSG_44__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_44__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_45
+#define MP1_C2PMSG_45__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_45__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_46
+#define MP1_C2PMSG_46__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_46__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_47
+#define MP1_C2PMSG_47__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_47__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_48
+#define MP1_C2PMSG_48__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_48__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_49
+#define MP1_C2PMSG_49__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_49__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_50
+#define MP1_C2PMSG_50__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_50__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_51
+#define MP1_C2PMSG_51__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_51__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_52
+#define MP1_C2PMSG_52__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_52__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_53
+#define MP1_C2PMSG_53__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_53__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_54
+#define MP1_C2PMSG_54__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_54__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_55
+#define MP1_C2PMSG_55__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_55__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_56
+#define MP1_C2PMSG_56__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_56__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_57
+#define MP1_C2PMSG_57__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_57__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_58
+#define MP1_C2PMSG_58__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_58__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_59
+#define MP1_C2PMSG_59__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_59__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_60
+#define MP1_C2PMSG_60__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_60__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_61
+#define MP1_C2PMSG_61__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_61__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_62
+#define MP1_C2PMSG_62__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_62__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_63
+#define MP1_C2PMSG_63__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_63__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_64
+#define MP1_C2PMSG_64__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_64__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_65
+#define MP1_C2PMSG_65__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_65__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_66
+#define MP1_C2PMSG_66__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_66__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_67
+#define MP1_C2PMSG_67__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_67__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_68
+#define MP1_C2PMSG_68__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_68__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_69
+#define MP1_C2PMSG_69__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_69__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_70
+#define MP1_C2PMSG_70__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_70__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_71
+#define MP1_C2PMSG_71__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_71__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_72
+#define MP1_C2PMSG_72__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_72__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_73
+#define MP1_C2PMSG_73__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_73__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_74
+#define MP1_C2PMSG_74__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_74__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_75
+#define MP1_C2PMSG_75__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_75__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_76
+#define MP1_C2PMSG_76__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_76__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_77
+#define MP1_C2PMSG_77__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_77__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_78
+#define MP1_C2PMSG_78__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_78__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_79
+#define MP1_C2PMSG_79__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_79__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_80
+#define MP1_C2PMSG_80__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_80__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_81
+#define MP1_C2PMSG_81__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_81__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_82
+#define MP1_C2PMSG_82__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_82__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_83
+#define MP1_C2PMSG_83__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_83__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_84
+#define MP1_C2PMSG_84__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_84__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_85
+#define MP1_C2PMSG_85__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_85__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_86
+#define MP1_C2PMSG_86__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_86__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_87
+#define MP1_C2PMSG_87__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_87__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_88
+#define MP1_C2PMSG_88__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_88__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_89
+#define MP1_C2PMSG_89__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_89__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_90
+#define MP1_C2PMSG_90__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_91
+#define MP1_C2PMSG_91__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_91__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_92
+#define MP1_C2PMSG_92__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_92__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_93
+#define MP1_C2PMSG_93__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_93__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_94
+#define MP1_C2PMSG_94__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_94__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_95
+#define MP1_C2PMSG_95__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_95__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_96
+#define MP1_C2PMSG_96__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_96__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_97
+#define MP1_C2PMSG_97__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_97__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_98
+#define MP1_C2PMSG_98__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_98__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_99
+#define MP1_C2PMSG_99__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_99__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_100
+#define MP1_C2PMSG_100__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_100__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_101
+#define MP1_C2PMSG_101__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_101__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_102
+#define MP1_C2PMSG_102__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_102__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_103
+#define MP1_C2PMSG_103__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_103__CONTENT_MASK 0xFFFFFFFFL
+//MP1_IH_CREDIT
+#define MP1_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0
+#define MP1_IH_CREDIT__CLIENT_ID__SHIFT 0x10
+#define MP1_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L
+#define MP1_IH_CREDIT__CLIENT_ID_MASK 0x00FF0000L
+//MP1_IH_SW_INT
+#define MP1_IH_SW_INT__ID__SHIFT 0x0
+#define MP1_IH_SW_INT__VALID__SHIFT 0x8
+#define MP1_IH_SW_INT__ID_MASK 0x000000FFL
+#define MP1_IH_SW_INT__VALID_MASK 0x00000100L
+//MP1_IH_SW_INT_CTRL
+#define MP1_IH_SW_INT_CTRL__INT_MASK__SHIFT 0x0
+#define MP1_IH_SW_INT_CTRL__INT_ACK__SHIFT 0x8
+#define MP1_IH_SW_INT_CTRL__INT_MASK_MASK 0x00000001L
+#define MP1_IH_SW_INT_CTRL__INT_ACK_MASK 0x00000100L
+//MP1_FPS_CNT
+#define MP1_FPS_CNT__COUNT__SHIFT 0x0
+#define MP1_FPS_CNT__COUNT_MASK 0xFFFFFFFFL
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_sh_mask.h
index 88602479a1aa..ee8c15e4543d 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_sh_mask.h
@@ -74709,6 +74709,36 @@
//PCIE_PERF_COUNT1_TXCLK2
#define PCIE_PERF_COUNT1_TXCLK2__COUNTER1__SHIFT 0x0
#define PCIE_PERF_COUNT1_TXCLK2__COUNTER1_MASK 0xFFFFFFFFL
+//PCIE_PERF_CNTL_TXCLK3
+#define PCIE_PERF_CNTL_TXCLK3__EVENT0_SEL__SHIFT 0x0
+#define PCIE_PERF_CNTL_TXCLK3__EVENT1_SEL__SHIFT 0x8
+#define PCIE_PERF_CNTL_TXCLK3__COUNTER0_UPPER__SHIFT 0x10
+#define PCIE_PERF_CNTL_TXCLK3__COUNTER1_UPPER__SHIFT 0x18
+#define PCIE_PERF_CNTL_TXCLK3__EVENT0_SEL_MASK 0x000000FFL
+#define PCIE_PERF_CNTL_TXCLK3__EVENT1_SEL_MASK 0x0000FF00L
+#define PCIE_PERF_CNTL_TXCLK3__COUNTER0_UPPER_MASK 0x00FF0000L
+#define PCIE_PERF_CNTL_TXCLK3__COUNTER1_UPPER_MASK 0xFF000000L
+//PCIE_PERF_COUNT0_TXCLK3
+#define PCIE_PERF_COUNT0_TXCLK3__COUNTER0__SHIFT 0x0
+#define PCIE_PERF_COUNT0_TXCLK3__COUNTER0_MASK 0xFFFFFFFFL
+//PCIE_PERF_COUNT1_TXCLK3
+#define PCIE_PERF_COUNT1_TXCLK3__COUNTER1__SHIFT 0x0
+#define PCIE_PERF_COUNT1_TXCLK3__COUNTER1_MASK 0xFFFFFFFFL
+//PCIE_PERF_CNTL_TXCLK4
+#define PCIE_PERF_CNTL_TXCLK4__EVENT0_SEL__SHIFT 0x0
+#define PCIE_PERF_CNTL_TXCLK4__EVENT1_SEL__SHIFT 0x8
+#define PCIE_PERF_CNTL_TXCLK4__COUNTER0_UPPER__SHIFT 0x10
+#define PCIE_PERF_CNTL_TXCLK4__COUNTER1_UPPER__SHIFT 0x18
+#define PCIE_PERF_CNTL_TXCLK4__EVENT0_SEL_MASK 0x000000FFL
+#define PCIE_PERF_CNTL_TXCLK4__EVENT1_SEL_MASK 0x0000FF00L
+#define PCIE_PERF_CNTL_TXCLK4__COUNTER0_UPPER_MASK 0x00FF0000L
+#define PCIE_PERF_CNTL_TXCLK4__COUNTER1_UPPER_MASK 0xFF000000L
+//PCIE_PERF_COUNT0_TXCLK4
+#define PCIE_PERF_COUNT0_TXCLK4__COUNTER0__SHIFT 0x0
+#define PCIE_PERF_COUNT0_TXCLK4__COUNTER0_MASK 0xFFFFFFFFL
+//PCIE_PERF_COUNT1_TXCLK4
+#define PCIE_PERF_COUNT1_TXCLK4__COUNTER1__SHIFT 0x0
+#define PCIE_PERF_COUNT1_TXCLK4__COUNTER1_MASK 0xFFFFFFFFL
//PCIE_PRBS_CLR
#define PCIE_PRBS_CLR__PRBS_CLR__SHIFT 0x0
#define PCIE_PRBS_CLR__PRBS_POLARITY_EN__SHIFT 0x18
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_smn.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_smn.h
index caf5ffdc130a..6702575bc6e3 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_smn.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_smn.h
@@ -50,6 +50,12 @@
#define smnPCIE_PERF_CNTL_TXCLK2 0x11180254
#define smnPCIE_PERF_COUNT0_TXCLK2 0x11180258
#define smnPCIE_PERF_COUNT1_TXCLK2 0x1118025c
+#define smnPCIE_PERF_CNTL_TXCLK3 0x1118021c
+#define smnPCIE_PERF_COUNT0_TXCLK3 0x11180220
+#define smnPCIE_PERF_COUNT1_TXCLK3 0x11180224
+#define smnPCIE_PERF_CNTL_TXCLK4 0x11180228
+#define smnPCIE_PERF_COUNT0_TXCLK4 0x1118022c
+#define smnPCIE_PERF_COUNT1_TXCLK4 0x11180230
#define smnPCIE_RX_NUM_NAK 0x11180038
#define smnPCIE_RX_NUM_NAK_GENERATED 0x1118003c
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_0_smn.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_0_smn.h
index 4bcacf529852..991128bb9476 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_0_smn.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_0_smn.h
@@ -22,6 +22,9 @@
#ifndef _nbio_7_4_0_SMN_HEADER
#define _nbio_7_4_0_SMN_HEADER
+// addressBlock: nbio_nbif0_bif_ras_bif_ras_regblk
+// base address: 0x10100000
+#define smnBIFL_RAS_CENTRAL_STATUS 0x10139040
#define smnNBIF_MGCG_CTRL_LCLK 0x1013a21c
#define smnCPM_CONTROL 0x11180460
@@ -53,4 +56,13 @@
#define smnPCIE_RX_NUM_NAK 0x11180038
#define smnPCIE_RX_NUM_NAK_GENERATED 0x1118003c
+// addressBlock: nbio_iohub_nb_misc_misc_cfgdec
+// base address: 0x13a10000
+#define smnIOHC_INTERRUPT_EOI 0x13a10120
+
+// addressBlock: nbio_iohub_nb_rascfg_ras_cfgdec
+// base address: 0x13a20000
+#define smnRAS_GLOBAL_STATUS_LO 0x13a20020
+#define smnRAS_GLOBAL_STATUS_HI 0x13a20024
+
#endif // _nbio_7_4_0_SMN_HEADER
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h
index 994e796a28d7..ce5830ebe095 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h
@@ -2793,8 +2793,8 @@
#define mmBIF_DOORBELL_INT_CNTL_BASE_IDX 2
#define mmBIF_FB_EN 0x00ff
#define mmBIF_FB_EN_BASE_IDX 2
-#define mmBIF_BUSY_DELAY_CNTR 0x0100
-#define mmBIF_BUSY_DELAY_CNTR_BASE_IDX 2
+#define mmBIF_INTR_CNTL 0x0100
+#define mmBIF_INTR_CNTL_BASE_IDX 2
#define mmBIF_MST_TRANS_PENDING_VF 0x0109
#define mmBIF_MST_TRANS_PENDING_VF_BASE_IDX 2
#define mmBIF_SLV_TRANS_PENDING_VF 0x010a
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_sh_mask.h
index d467b939c971..07f04b2b5bdd 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_sh_mask.h
@@ -20420,9 +20420,9 @@
#define BIF_FB_EN__FB_WRITE_EN__SHIFT 0x1
#define BIF_FB_EN__FB_READ_EN_MASK 0x00000001L
#define BIF_FB_EN__FB_WRITE_EN_MASK 0x00000002L
-//BIF_BUSY_DELAY_CNTR
-#define BIF_BUSY_DELAY_CNTR__DELAY_CNT__SHIFT 0x0
-#define BIF_BUSY_DELAY_CNTR__DELAY_CNT_MASK 0x0000003FL
+//BIF_INTR_CNTL
+#define BIF_INTR_CNTL__RAS_INTR_VEC_SEL__SHIFT 0x0
+#define BIF_INTR_CNTL__RAS_INTR_VEC_SEL_MASK 0x00000001L
//BIF_MST_TRANS_PENDING_VF
#define BIF_MST_TRANS_PENDING_VF__BIF_MST_TRANS_PENDING__SHIFT 0x0
#define BIF_MST_TRANS_PENDING_VF__BIF_MST_TRANS_PENDING_MASK 0x7FFFFFFFL
@@ -48436,4 +48436,47 @@
#define RCC_DEV0_EPF0_VF15_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L
#define RCC_DEV0_EPF0_VF15_GFXMSIX_PBA__MSIX_PENDING_BITS_2_MASK 0x00000004L
+//IOHC_INTERRUPT_EOI
+#define IOHC_INTERRUPT_EOI__SMI_EOI__SHIFT 0x0
+#define IOHC_INTERRUPT_EOI__SCI_EOI__SHIFT 0x1
+#define IOHC_INTERRUPT_EOI__NMI_EOI__SHIFT 0x2
+#define IOHC_INTERRUPT_EOI__SMI_EOI_MASK 0x00000001L
+#define IOHC_INTERRUPT_EOI__SCI_EOI_MASK 0x00000002L
+#define IOHC_INTERRUPT_EOI__NMI_EOI_MASK 0x00000004L
+
+//RAS_GLOBAL_STATUS_LO
+#define RAS_GLOBAL_STATUS_LO__ParityErrCorr__SHIFT 0x0
+#define RAS_GLOBAL_STATUS_LO__ParityErrNonFatal__SHIFT 0x1
+#define RAS_GLOBAL_STATUS_LO__ParityErrFatal__SHIFT 0x2
+#define RAS_GLOBAL_STATUS_LO__ParityErrSerr__SHIFT 0x3
+#define RAS_GLOBAL_STATUS_LO__HPLGWA_NMI__SHIFT 0x6
+#define RAS_GLOBAL_STATUS_LO__HPLGWA_SCI__SHIFT 0x7
+#define RAS_GLOBAL_STATUS_LO__HPLGWA_SMI__SHIFT 0x8
+#define RAS_GLOBAL_STATUS_LO__SW_SMI__SHIFT 0x9
+#define RAS_GLOBAL_STATUS_LO__SW_SCI__SHIFT 0xa
+#define RAS_GLOBAL_STATUS_LO__SW_NMI__SHIFT 0xb
+#define RAS_GLOBAL_STATUS_LO__APML_NMI__SHIFT 0xc
+#define RAS_GLOBAL_STATUS_LO__APML_SyncFld__SHIFT 0xd
+#define RAS_GLOBAL_STATUS_LO__PIN_SyncFld_NMI__SHIFT 0xe
+#define RAS_GLOBAL_STATUS_LO__APML_SyncFld_Private__SHIFT 0xf
+#define RAS_GLOBAL_STATUS_LO__ParityErrCorr_MASK 0x00000001L
+#define RAS_GLOBAL_STATUS_LO__ParityErrNonFatal_MASK 0x00000002L
+#define RAS_GLOBAL_STATUS_LO__ParityErrFatal_MASK 0x00000004L
+#define RAS_GLOBAL_STATUS_LO__ParityErrSerr_MASK 0x00000008L
+#define RAS_GLOBAL_STATUS_LO__HPLGWA_NMI_MASK 0x00000040L
+#define RAS_GLOBAL_STATUS_LO__HPLGWA_SCI_MASK 0x00000080L
+#define RAS_GLOBAL_STATUS_LO__HPLGWA_SMI_MASK 0x00000100L
+#define RAS_GLOBAL_STATUS_LO__SW_SMI_MASK 0x00000200L
+#define RAS_GLOBAL_STATUS_LO__SW_SCI_MASK 0x00000400L
+#define RAS_GLOBAL_STATUS_LO__SW_NMI_MASK 0x00000800L
+#define RAS_GLOBAL_STATUS_LO__APML_NMI_MASK 0x00001000L
+#define RAS_GLOBAL_STATUS_LO__APML_SyncFld_MASK 0x00002000L
+#define RAS_GLOBAL_STATUS_LO__PIN_SyncFld_NMI_MASK 0x00004000L
+#define RAS_GLOBAL_STATUS_LO__APML_SyncFld_Private_MASK 0x00008000L
+//RAS_GLOBAL_STATUS_HI
+#define RAS_GLOBAL_STATUS_HI__PCIE0PortAErr__SHIFT 0x0
+#define RAS_GLOBAL_STATUS_HI__NBIF0PortAErr__SHIFT 0x1
+#define RAS_GLOBAL_STATUS_HI__PCIE0PortAErr_MASK 0x00000001L
+#define RAS_GLOBAL_STATUS_HI__NBIF0PortAErr_MASK 0x00000002L
+
#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_0_sh_mask.h
index 1ee3a2329ee4..096d878eb1de 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_0_sh_mask.h
@@ -588,11 +588,15 @@
#define IH_STORM_CLIENT_LIST_CNTL__CLIENT30_IS_STORM_CLIENT_MASK 0x40000000L
#define IH_STORM_CLIENT_LIST_CNTL__CLIENT31_IS_STORM_CLIENT_MASK 0x80000000L
//IH_CLK_CTRL
+#define IH_CLK_CTRL__IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE__SHIFT 0x19
+#define IH_CLK_CTRL__IH_BUFFER_MEM_CLK_SOFT_OVERRIDE__SHIFT 0x1a
#define IH_CLK_CTRL__DBUS_MUX_CLK_SOFT_OVERRIDE__SHIFT 0x1b
#define IH_CLK_CTRL__OSSSYS_SHARE_CLK_SOFT_OVERRIDE__SHIFT 0x1c
#define IH_CLK_CTRL__LIMIT_SMN_CLK_SOFT_OVERRIDE__SHIFT 0x1d
#define IH_CLK_CTRL__DYN_CLK_SOFT_OVERRIDE__SHIFT 0x1e
#define IH_CLK_CTRL__REG_CLK_SOFT_OVERRIDE__SHIFT 0x1f
+#define IH_CLK_CTRL__IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE_MASK 0x02000000L
+#define IH_CLK_CTRL__IH_BUFFER_MEM_CLK_SOFT_OVERRIDE_MASK 0x04000000L
#define IH_CLK_CTRL__DBUS_MUX_CLK_SOFT_OVERRIDE_MASK 0x08000000L
#define IH_CLK_CTRL__OSSSYS_SHARE_CLK_SOFT_OVERRIDE_MASK 0x10000000L
#define IH_CLK_CTRL__LIMIT_SMN_CLK_SOFT_OVERRIDE_MASK 0x20000000L
@@ -1109,7 +1113,11 @@
#define IH_CID_REMAP_DATA__CLIENT_ID_REMAP_MASK 0x00FF0000L
//IH_CHICKEN
#define IH_CHICKEN__ACTIVE_FCN_ID_PROT_ENABLE__SHIFT 0x0
+#define IH_CHICKEN__MC_SPACE_FBPA_ENABLE__SHIFT 0x3
+#define IH_CHICKEN__MC_SPACE_GPA_ENABLE__SHIFT 0x4
#define IH_CHICKEN__ACTIVE_FCN_ID_PROT_ENABLE_MASK 0x00000001L
+#define IH_CHICKEN__MC_SPACE_FBPA_ENABLE_MASK 0x00000008L
+#define IH_CHICKEN__MC_SPACE_GPA_ENABLE_MASK 0x00000010L
//IH_MMHUB_CNTL
#define IH_MMHUB_CNTL__UNITID__SHIFT 0x0
#define IH_MMHUB_CNTL__IV_TLVL__SHIFT 0x8
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/rsmu/rsmu_0_0_2_offset.h
index 0a6b072d191e..46466ae77f19 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/rsmu/rsmu_0_0_2_offset.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2018 Advanced Micro Devices, Inc.
+ * Copyright (C) 2019 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -18,18 +18,10 @@
* AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
-#ifndef _mmhub_9_4_0_SH_MASK_HEADER
-#define _mmhub_9_4_0_SH_MASK_HEADER
+#ifndef _rsmu_0_0_2_OFFSET_HEADER
+#define _rsmu_0_0_2_OFFSET_HEADER
-
-// addressBlock: mmhub_utcl2_vmsharedpfdec
-//MC_VM_XGMI_LFB_CNTL
-#define MC_VM_XGMI_LFB_CNTL__PF_LFB_REGION__SHIFT 0x0
-#define MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION__SHIFT 0x4
-#define MC_VM_XGMI_LFB_CNTL__PF_LFB_REGION_MASK 0x00000007L
-#define MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION_MASK 0x00000070L
-//MC_VM_XGMI_LFB_SIZE
-#define MC_VM_XGMI_LFB_SIZE__PF_LFB_SIZE__SHIFT 0x0
-#define MC_VM_XGMI_LFB_SIZE__PF_LFB_SIZE_MASK 0x0000FFFFL
+#define mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU 0x0d91
+#define mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU_BASE_IDX 0
#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/rsmu/rsmu_0_0_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/rsmu/rsmu_0_0_2_sh_mask.h
new file mode 100644
index 000000000000..ea0acb598254
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/rsmu/rsmu_0_0_2_sh_mask.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _rsmu_0_0_2_SH_MASK_HEADER
+#define _rsmu_0_0_2_SH_MASK_HEADER
+
+//RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU
+#define RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU__RSMU_UMC_INDEX_WREN__SHIFT 0x0
+#define RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU__RSMU_UMC_INDEX_INSTANCE__SHIFT 0x10
+#define RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU__RSMU_UMC_INDEX_MODE_EN__SHIFT 0x1f
+#define RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU__RSMU_UMC_INDEX_WREN_MASK 0x0000FFFFL
+#define RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU__RSMU_UMC_INDEX_INSTANCE_MASK 0x000F0000L
+#define RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU__RSMU_UMC_INDEX_MODE_EN_MASK 0x80000000L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_2_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_2_2_offset.h
new file mode 100644
index 000000000000..ff5df90071e6
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_2_2_offset.h
@@ -0,0 +1,1051 @@
+/*
+ * Copyright (C) 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _sdma0_4_2_2_OFFSET_HEADER
+#define _sdma0_4_2_2_OFFSET_HEADER
+
+
+
+// addressBlock: sdma0_sdma0dec
+// base address: 0x4980
+#define mmSDMA0_UCODE_ADDR 0x0000
+#define mmSDMA0_UCODE_ADDR_BASE_IDX 0
+#define mmSDMA0_UCODE_DATA 0x0001
+#define mmSDMA0_UCODE_DATA_BASE_IDX 0
+#define mmSDMA0_VM_CNTL 0x0004
+#define mmSDMA0_VM_CNTL_BASE_IDX 0
+#define mmSDMA0_VM_CTX_LO 0x0005
+#define mmSDMA0_VM_CTX_LO_BASE_IDX 0
+#define mmSDMA0_VM_CTX_HI 0x0006
+#define mmSDMA0_VM_CTX_HI_BASE_IDX 0
+#define mmSDMA0_ACTIVE_FCN_ID 0x0007
+#define mmSDMA0_ACTIVE_FCN_ID_BASE_IDX 0
+#define mmSDMA0_VM_CTX_CNTL 0x0008
+#define mmSDMA0_VM_CTX_CNTL_BASE_IDX 0
+#define mmSDMA0_VIRT_RESET_REQ 0x0009
+#define mmSDMA0_VIRT_RESET_REQ_BASE_IDX 0
+#define mmSDMA0_VF_ENABLE 0x000a
+#define mmSDMA0_VF_ENABLE_BASE_IDX 0
+#define mmSDMA0_CONTEXT_REG_TYPE0 0x000b
+#define mmSDMA0_CONTEXT_REG_TYPE0_BASE_IDX 0
+#define mmSDMA0_CONTEXT_REG_TYPE1 0x000c
+#define mmSDMA0_CONTEXT_REG_TYPE1_BASE_IDX 0
+#define mmSDMA0_CONTEXT_REG_TYPE2 0x000d
+#define mmSDMA0_CONTEXT_REG_TYPE2_BASE_IDX 0
+#define mmSDMA0_CONTEXT_REG_TYPE3 0x000e
+#define mmSDMA0_CONTEXT_REG_TYPE3_BASE_IDX 0
+#define mmSDMA0_PUB_REG_TYPE0 0x000f
+#define mmSDMA0_PUB_REG_TYPE0_BASE_IDX 0
+#define mmSDMA0_PUB_REG_TYPE1 0x0010
+#define mmSDMA0_PUB_REG_TYPE1_BASE_IDX 0
+#define mmSDMA0_PUB_REG_TYPE2 0x0011
+#define mmSDMA0_PUB_REG_TYPE2_BASE_IDX 0
+#define mmSDMA0_PUB_REG_TYPE3 0x0012
+#define mmSDMA0_PUB_REG_TYPE3_BASE_IDX 0
+#define mmSDMA0_MMHUB_CNTL 0x0013
+#define mmSDMA0_MMHUB_CNTL_BASE_IDX 0
+#define mmSDMA0_CONTEXT_GROUP_BOUNDARY 0x0019
+#define mmSDMA0_CONTEXT_GROUP_BOUNDARY_BASE_IDX 0
+#define mmSDMA0_POWER_CNTL 0x001a
+#define mmSDMA0_POWER_CNTL_BASE_IDX 0
+#define mmSDMA0_CLK_CTRL 0x001b
+#define mmSDMA0_CLK_CTRL_BASE_IDX 0
+#define mmSDMA0_CNTL 0x001c
+#define mmSDMA0_CNTL_BASE_IDX 0
+#define mmSDMA0_CHICKEN_BITS 0x001d
+#define mmSDMA0_CHICKEN_BITS_BASE_IDX 0
+#define mmSDMA0_GB_ADDR_CONFIG 0x001e
+#define mmSDMA0_GB_ADDR_CONFIG_BASE_IDX 0
+#define mmSDMA0_GB_ADDR_CONFIG_READ 0x001f
+#define mmSDMA0_GB_ADDR_CONFIG_READ_BASE_IDX 0
+#define mmSDMA0_RB_RPTR_FETCH_HI 0x0020
+#define mmSDMA0_RB_RPTR_FETCH_HI_BASE_IDX 0
+#define mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL 0x0021
+#define mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL_BASE_IDX 0
+#define mmSDMA0_RB_RPTR_FETCH 0x0022
+#define mmSDMA0_RB_RPTR_FETCH_BASE_IDX 0
+#define mmSDMA0_IB_OFFSET_FETCH 0x0023
+#define mmSDMA0_IB_OFFSET_FETCH_BASE_IDX 0
+#define mmSDMA0_PROGRAM 0x0024
+#define mmSDMA0_PROGRAM_BASE_IDX 0
+#define mmSDMA0_STATUS_REG 0x0025
+#define mmSDMA0_STATUS_REG_BASE_IDX 0
+#define mmSDMA0_STATUS1_REG 0x0026
+#define mmSDMA0_STATUS1_REG_BASE_IDX 0
+#define mmSDMA0_RD_BURST_CNTL 0x0027
+#define mmSDMA0_RD_BURST_CNTL_BASE_IDX 0
+#define mmSDMA0_HBM_PAGE_CONFIG 0x0028
+#define mmSDMA0_HBM_PAGE_CONFIG_BASE_IDX 0
+#define mmSDMA0_UCODE_CHECKSUM 0x0029
+#define mmSDMA0_UCODE_CHECKSUM_BASE_IDX 0
+#define mmSDMA0_F32_CNTL 0x002a
+#define mmSDMA0_F32_CNTL_BASE_IDX 0
+#define mmSDMA0_FREEZE 0x002b
+#define mmSDMA0_FREEZE_BASE_IDX 0
+#define mmSDMA0_PHASE0_QUANTUM 0x002c
+#define mmSDMA0_PHASE0_QUANTUM_BASE_IDX 0
+#define mmSDMA0_PHASE1_QUANTUM 0x002d
+#define mmSDMA0_PHASE1_QUANTUM_BASE_IDX 0
+#define mmSDMA_POWER_GATING 0x002e
+#define mmSDMA_POWER_GATING_BASE_IDX 0
+#define mmSDMA_PGFSM_CONFIG 0x002f
+#define mmSDMA_PGFSM_CONFIG_BASE_IDX 0
+#define mmSDMA_PGFSM_WRITE 0x0030
+#define mmSDMA_PGFSM_WRITE_BASE_IDX 0
+#define mmSDMA_PGFSM_READ 0x0031
+#define mmSDMA_PGFSM_READ_BASE_IDX 0
+#define mmSDMA0_EDC_CONFIG 0x0032
+#define mmSDMA0_EDC_CONFIG_BASE_IDX 0
+#define mmSDMA0_BA_THRESHOLD 0x0033
+#define mmSDMA0_BA_THRESHOLD_BASE_IDX 0
+#define mmSDMA0_ID 0x0034
+#define mmSDMA0_ID_BASE_IDX 0
+#define mmSDMA0_VERSION 0x0035
+#define mmSDMA0_VERSION_BASE_IDX 0
+#define mmSDMA0_EDC_COUNTER 0x0036
+#define mmSDMA0_EDC_COUNTER_BASE_IDX 0
+#define mmSDMA0_EDC_COUNTER_CLEAR 0x0037
+#define mmSDMA0_EDC_COUNTER_CLEAR_BASE_IDX 0
+#define mmSDMA0_STATUS2_REG 0x0038
+#define mmSDMA0_STATUS2_REG_BASE_IDX 0
+#define mmSDMA0_ATOMIC_CNTL 0x0039
+#define mmSDMA0_ATOMIC_CNTL_BASE_IDX 0
+#define mmSDMA0_ATOMIC_PREOP_LO 0x003a
+#define mmSDMA0_ATOMIC_PREOP_LO_BASE_IDX 0
+#define mmSDMA0_ATOMIC_PREOP_HI 0x003b
+#define mmSDMA0_ATOMIC_PREOP_HI_BASE_IDX 0
+#define mmSDMA0_UTCL1_CNTL 0x003c
+#define mmSDMA0_UTCL1_CNTL_BASE_IDX 0
+#define mmSDMA0_UTCL1_WATERMK 0x003d
+#define mmSDMA0_UTCL1_WATERMK_BASE_IDX 0
+#define mmSDMA0_UTCL1_RD_STATUS 0x003e
+#define mmSDMA0_UTCL1_RD_STATUS_BASE_IDX 0
+#define mmSDMA0_UTCL1_WR_STATUS 0x003f
+#define mmSDMA0_UTCL1_WR_STATUS_BASE_IDX 0
+#define mmSDMA0_UTCL1_INV0 0x0040
+#define mmSDMA0_UTCL1_INV0_BASE_IDX 0
+#define mmSDMA0_UTCL1_INV1 0x0041
+#define mmSDMA0_UTCL1_INV1_BASE_IDX 0
+#define mmSDMA0_UTCL1_INV2 0x0042
+#define mmSDMA0_UTCL1_INV2_BASE_IDX 0
+#define mmSDMA0_UTCL1_RD_XNACK0 0x0043
+#define mmSDMA0_UTCL1_RD_XNACK0_BASE_IDX 0
+#define mmSDMA0_UTCL1_RD_XNACK1 0x0044
+#define mmSDMA0_UTCL1_RD_XNACK1_BASE_IDX 0
+#define mmSDMA0_UTCL1_WR_XNACK0 0x0045
+#define mmSDMA0_UTCL1_WR_XNACK0_BASE_IDX 0
+#define mmSDMA0_UTCL1_WR_XNACK1 0x0046
+#define mmSDMA0_UTCL1_WR_XNACK1_BASE_IDX 0
+#define mmSDMA0_UTCL1_TIMEOUT 0x0047
+#define mmSDMA0_UTCL1_TIMEOUT_BASE_IDX 0
+#define mmSDMA0_UTCL1_PAGE 0x0048
+#define mmSDMA0_UTCL1_PAGE_BASE_IDX 0
+#define mmSDMA0_POWER_CNTL_IDLE 0x0049
+#define mmSDMA0_POWER_CNTL_IDLE_BASE_IDX 0
+#define mmSDMA0_RELAX_ORDERING_LUT 0x004a
+#define mmSDMA0_RELAX_ORDERING_LUT_BASE_IDX 0
+#define mmSDMA0_CHICKEN_BITS_2 0x004b
+#define mmSDMA0_CHICKEN_BITS_2_BASE_IDX 0
+#define mmSDMA0_STATUS3_REG 0x004c
+#define mmSDMA0_STATUS3_REG_BASE_IDX 0
+#define mmSDMA0_PHYSICAL_ADDR_LO 0x004d
+#define mmSDMA0_PHYSICAL_ADDR_LO_BASE_IDX 0
+#define mmSDMA0_PHYSICAL_ADDR_HI 0x004e
+#define mmSDMA0_PHYSICAL_ADDR_HI_BASE_IDX 0
+#define mmSDMA0_PHASE2_QUANTUM 0x004f
+#define mmSDMA0_PHASE2_QUANTUM_BASE_IDX 0
+#define mmSDMA0_ERROR_LOG 0x0050
+#define mmSDMA0_ERROR_LOG_BASE_IDX 0
+#define mmSDMA0_PUB_DUMMY_REG0 0x0051
+#define mmSDMA0_PUB_DUMMY_REG0_BASE_IDX 0
+#define mmSDMA0_PUB_DUMMY_REG1 0x0052
+#define mmSDMA0_PUB_DUMMY_REG1_BASE_IDX 0
+#define mmSDMA0_PUB_DUMMY_REG2 0x0053
+#define mmSDMA0_PUB_DUMMY_REG2_BASE_IDX 0
+#define mmSDMA0_PUB_DUMMY_REG3 0x0054
+#define mmSDMA0_PUB_DUMMY_REG3_BASE_IDX 0
+#define mmSDMA0_F32_COUNTER 0x0055
+#define mmSDMA0_F32_COUNTER_BASE_IDX 0
+#define mmSDMA0_UNBREAKABLE 0x0056
+#define mmSDMA0_UNBREAKABLE_BASE_IDX 0
+#define mmSDMA0_PERFMON_CNTL 0x0057
+#define mmSDMA0_PERFMON_CNTL_BASE_IDX 0
+#define mmSDMA0_PERFCOUNTER0_RESULT 0x0058
+#define mmSDMA0_PERFCOUNTER0_RESULT_BASE_IDX 0
+#define mmSDMA0_PERFCOUNTER1_RESULT 0x0059
+#define mmSDMA0_PERFCOUNTER1_RESULT_BASE_IDX 0
+#define mmSDMA0_PERFCOUNTER_TAG_DELAY_RANGE 0x005a
+#define mmSDMA0_PERFCOUNTER_TAG_DELAY_RANGE_BASE_IDX 0
+#define mmSDMA0_CRD_CNTL 0x005b
+#define mmSDMA0_CRD_CNTL_BASE_IDX 0
+#define mmSDMA0_GPU_IOV_VIOLATION_LOG 0x005d
+#define mmSDMA0_GPU_IOV_VIOLATION_LOG_BASE_IDX 0
+#define mmSDMA0_ULV_CNTL 0x005e
+#define mmSDMA0_ULV_CNTL_BASE_IDX 0
+#define mmSDMA0_EA_DBIT_ADDR_DATA 0x0060
+#define mmSDMA0_EA_DBIT_ADDR_DATA_BASE_IDX 0
+#define mmSDMA0_EA_DBIT_ADDR_INDEX 0x0061
+#define mmSDMA0_EA_DBIT_ADDR_INDEX_BASE_IDX 0
+#define mmSDMA0_GPU_IOV_VIOLATION_LOG2 0x0062
+#define mmSDMA0_GPU_IOV_VIOLATION_LOG2_BASE_IDX 0
+#define mmSDMA0_GFX_RB_CNTL 0x0080
+#define mmSDMA0_GFX_RB_CNTL_BASE_IDX 0
+#define mmSDMA0_GFX_RB_BASE 0x0081
+#define mmSDMA0_GFX_RB_BASE_BASE_IDX 0
+#define mmSDMA0_GFX_RB_BASE_HI 0x0082
+#define mmSDMA0_GFX_RB_BASE_HI_BASE_IDX 0
+#define mmSDMA0_GFX_RB_RPTR 0x0083
+#define mmSDMA0_GFX_RB_RPTR_BASE_IDX 0
+#define mmSDMA0_GFX_RB_RPTR_HI 0x0084
+#define mmSDMA0_GFX_RB_RPTR_HI_BASE_IDX 0
+#define mmSDMA0_GFX_RB_WPTR 0x0085
+#define mmSDMA0_GFX_RB_WPTR_BASE_IDX 0
+#define mmSDMA0_GFX_RB_WPTR_HI 0x0086
+#define mmSDMA0_GFX_RB_WPTR_HI_BASE_IDX 0
+#define mmSDMA0_GFX_RB_WPTR_POLL_CNTL 0x0087
+#define mmSDMA0_GFX_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define mmSDMA0_GFX_RB_RPTR_ADDR_HI 0x0088
+#define mmSDMA0_GFX_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define mmSDMA0_GFX_RB_RPTR_ADDR_LO 0x0089
+#define mmSDMA0_GFX_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define mmSDMA0_GFX_IB_CNTL 0x008a
+#define mmSDMA0_GFX_IB_CNTL_BASE_IDX 0
+#define mmSDMA0_GFX_IB_RPTR 0x008b
+#define mmSDMA0_GFX_IB_RPTR_BASE_IDX 0
+#define mmSDMA0_GFX_IB_OFFSET 0x008c
+#define mmSDMA0_GFX_IB_OFFSET_BASE_IDX 0
+#define mmSDMA0_GFX_IB_BASE_LO 0x008d
+#define mmSDMA0_GFX_IB_BASE_LO_BASE_IDX 0
+#define mmSDMA0_GFX_IB_BASE_HI 0x008e
+#define mmSDMA0_GFX_IB_BASE_HI_BASE_IDX 0
+#define mmSDMA0_GFX_IB_SIZE 0x008f
+#define mmSDMA0_GFX_IB_SIZE_BASE_IDX 0
+#define mmSDMA0_GFX_SKIP_CNTL 0x0090
+#define mmSDMA0_GFX_SKIP_CNTL_BASE_IDX 0
+#define mmSDMA0_GFX_CONTEXT_STATUS 0x0091
+#define mmSDMA0_GFX_CONTEXT_STATUS_BASE_IDX 0
+#define mmSDMA0_GFX_DOORBELL 0x0092
+#define mmSDMA0_GFX_DOORBELL_BASE_IDX 0
+#define mmSDMA0_GFX_CONTEXT_CNTL 0x0093
+#define mmSDMA0_GFX_CONTEXT_CNTL_BASE_IDX 0
+#define mmSDMA0_GFX_STATUS 0x00a8
+#define mmSDMA0_GFX_STATUS_BASE_IDX 0
+#define mmSDMA0_GFX_DOORBELL_LOG 0x00a9
+#define mmSDMA0_GFX_DOORBELL_LOG_BASE_IDX 0
+#define mmSDMA0_GFX_WATERMARK 0x00aa
+#define mmSDMA0_GFX_WATERMARK_BASE_IDX 0
+#define mmSDMA0_GFX_DOORBELL_OFFSET 0x00ab
+#define mmSDMA0_GFX_DOORBELL_OFFSET_BASE_IDX 0
+#define mmSDMA0_GFX_CSA_ADDR_LO 0x00ac
+#define mmSDMA0_GFX_CSA_ADDR_LO_BASE_IDX 0
+#define mmSDMA0_GFX_CSA_ADDR_HI 0x00ad
+#define mmSDMA0_GFX_CSA_ADDR_HI_BASE_IDX 0
+#define mmSDMA0_GFX_IB_SUB_REMAIN 0x00af
+#define mmSDMA0_GFX_IB_SUB_REMAIN_BASE_IDX 0
+#define mmSDMA0_GFX_PREEMPT 0x00b0
+#define mmSDMA0_GFX_PREEMPT_BASE_IDX 0
+#define mmSDMA0_GFX_DUMMY_REG 0x00b1
+#define mmSDMA0_GFX_DUMMY_REG_BASE_IDX 0
+#define mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI 0x00b2
+#define mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO 0x00b3
+#define mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define mmSDMA0_GFX_RB_AQL_CNTL 0x00b4
+#define mmSDMA0_GFX_RB_AQL_CNTL_BASE_IDX 0
+#define mmSDMA0_GFX_MINOR_PTR_UPDATE 0x00b5
+#define mmSDMA0_GFX_MINOR_PTR_UPDATE_BASE_IDX 0
+#define mmSDMA0_GFX_MIDCMD_DATA0 0x00c0
+#define mmSDMA0_GFX_MIDCMD_DATA0_BASE_IDX 0
+#define mmSDMA0_GFX_MIDCMD_DATA1 0x00c1
+#define mmSDMA0_GFX_MIDCMD_DATA1_BASE_IDX 0
+#define mmSDMA0_GFX_MIDCMD_DATA2 0x00c2
+#define mmSDMA0_GFX_MIDCMD_DATA2_BASE_IDX 0
+#define mmSDMA0_GFX_MIDCMD_DATA3 0x00c3
+#define mmSDMA0_GFX_MIDCMD_DATA3_BASE_IDX 0
+#define mmSDMA0_GFX_MIDCMD_DATA4 0x00c4
+#define mmSDMA0_GFX_MIDCMD_DATA4_BASE_IDX 0
+#define mmSDMA0_GFX_MIDCMD_DATA5 0x00c5
+#define mmSDMA0_GFX_MIDCMD_DATA5_BASE_IDX 0
+#define mmSDMA0_GFX_MIDCMD_DATA6 0x00c6
+#define mmSDMA0_GFX_MIDCMD_DATA6_BASE_IDX 0
+#define mmSDMA0_GFX_MIDCMD_DATA7 0x00c7
+#define mmSDMA0_GFX_MIDCMD_DATA7_BASE_IDX 0
+#define mmSDMA0_GFX_MIDCMD_DATA8 0x00c8
+#define mmSDMA0_GFX_MIDCMD_DATA8_BASE_IDX 0
+#define mmSDMA0_GFX_MIDCMD_CNTL 0x00c9
+#define mmSDMA0_GFX_MIDCMD_CNTL_BASE_IDX 0
+#define mmSDMA0_PAGE_RB_CNTL 0x00d8
+#define mmSDMA0_PAGE_RB_CNTL_BASE_IDX 0
+#define mmSDMA0_PAGE_RB_BASE 0x00d9
+#define mmSDMA0_PAGE_RB_BASE_BASE_IDX 0
+#define mmSDMA0_PAGE_RB_BASE_HI 0x00da
+#define mmSDMA0_PAGE_RB_BASE_HI_BASE_IDX 0
+#define mmSDMA0_PAGE_RB_RPTR 0x00db
+#define mmSDMA0_PAGE_RB_RPTR_BASE_IDX 0
+#define mmSDMA0_PAGE_RB_RPTR_HI 0x00dc
+#define mmSDMA0_PAGE_RB_RPTR_HI_BASE_IDX 0
+#define mmSDMA0_PAGE_RB_WPTR 0x00dd
+#define mmSDMA0_PAGE_RB_WPTR_BASE_IDX 0
+#define mmSDMA0_PAGE_RB_WPTR_HI 0x00de
+#define mmSDMA0_PAGE_RB_WPTR_HI_BASE_IDX 0
+#define mmSDMA0_PAGE_RB_WPTR_POLL_CNTL 0x00df
+#define mmSDMA0_PAGE_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define mmSDMA0_PAGE_RB_RPTR_ADDR_HI 0x00e0
+#define mmSDMA0_PAGE_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define mmSDMA0_PAGE_RB_RPTR_ADDR_LO 0x00e1
+#define mmSDMA0_PAGE_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define mmSDMA0_PAGE_IB_CNTL 0x00e2
+#define mmSDMA0_PAGE_IB_CNTL_BASE_IDX 0
+#define mmSDMA0_PAGE_IB_RPTR 0x00e3
+#define mmSDMA0_PAGE_IB_RPTR_BASE_IDX 0
+#define mmSDMA0_PAGE_IB_OFFSET 0x00e4
+#define mmSDMA0_PAGE_IB_OFFSET_BASE_IDX 0
+#define mmSDMA0_PAGE_IB_BASE_LO 0x00e5
+#define mmSDMA0_PAGE_IB_BASE_LO_BASE_IDX 0
+#define mmSDMA0_PAGE_IB_BASE_HI 0x00e6
+#define mmSDMA0_PAGE_IB_BASE_HI_BASE_IDX 0
+#define mmSDMA0_PAGE_IB_SIZE 0x00e7
+#define mmSDMA0_PAGE_IB_SIZE_BASE_IDX 0
+#define mmSDMA0_PAGE_SKIP_CNTL 0x00e8
+#define mmSDMA0_PAGE_SKIP_CNTL_BASE_IDX 0
+#define mmSDMA0_PAGE_CONTEXT_STATUS 0x00e9
+#define mmSDMA0_PAGE_CONTEXT_STATUS_BASE_IDX 0
+#define mmSDMA0_PAGE_DOORBELL 0x00ea
+#define mmSDMA0_PAGE_DOORBELL_BASE_IDX 0
+#define mmSDMA0_PAGE_STATUS 0x0100
+#define mmSDMA0_PAGE_STATUS_BASE_IDX 0
+#define mmSDMA0_PAGE_DOORBELL_LOG 0x0101
+#define mmSDMA0_PAGE_DOORBELL_LOG_BASE_IDX 0
+#define mmSDMA0_PAGE_WATERMARK 0x0102
+#define mmSDMA0_PAGE_WATERMARK_BASE_IDX 0
+#define mmSDMA0_PAGE_DOORBELL_OFFSET 0x0103
+#define mmSDMA0_PAGE_DOORBELL_OFFSET_BASE_IDX 0
+#define mmSDMA0_PAGE_CSA_ADDR_LO 0x0104
+#define mmSDMA0_PAGE_CSA_ADDR_LO_BASE_IDX 0
+#define mmSDMA0_PAGE_CSA_ADDR_HI 0x0105
+#define mmSDMA0_PAGE_CSA_ADDR_HI_BASE_IDX 0
+#define mmSDMA0_PAGE_IB_SUB_REMAIN 0x0107
+#define mmSDMA0_PAGE_IB_SUB_REMAIN_BASE_IDX 0
+#define mmSDMA0_PAGE_PREEMPT 0x0108
+#define mmSDMA0_PAGE_PREEMPT_BASE_IDX 0
+#define mmSDMA0_PAGE_DUMMY_REG 0x0109
+#define mmSDMA0_PAGE_DUMMY_REG_BASE_IDX 0
+#define mmSDMA0_PAGE_RB_WPTR_POLL_ADDR_HI 0x010a
+#define mmSDMA0_PAGE_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define mmSDMA0_PAGE_RB_WPTR_POLL_ADDR_LO 0x010b
+#define mmSDMA0_PAGE_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define mmSDMA0_PAGE_RB_AQL_CNTL 0x010c
+#define mmSDMA0_PAGE_RB_AQL_CNTL_BASE_IDX 0
+#define mmSDMA0_PAGE_MINOR_PTR_UPDATE 0x010d
+#define mmSDMA0_PAGE_MINOR_PTR_UPDATE_BASE_IDX 0
+#define mmSDMA0_PAGE_MIDCMD_DATA0 0x0118
+#define mmSDMA0_PAGE_MIDCMD_DATA0_BASE_IDX 0
+#define mmSDMA0_PAGE_MIDCMD_DATA1 0x0119
+#define mmSDMA0_PAGE_MIDCMD_DATA1_BASE_IDX 0
+#define mmSDMA0_PAGE_MIDCMD_DATA2 0x011a
+#define mmSDMA0_PAGE_MIDCMD_DATA2_BASE_IDX 0
+#define mmSDMA0_PAGE_MIDCMD_DATA3 0x011b
+#define mmSDMA0_PAGE_MIDCMD_DATA3_BASE_IDX 0
+#define mmSDMA0_PAGE_MIDCMD_DATA4 0x011c
+#define mmSDMA0_PAGE_MIDCMD_DATA4_BASE_IDX 0
+#define mmSDMA0_PAGE_MIDCMD_DATA5 0x011d
+#define mmSDMA0_PAGE_MIDCMD_DATA5_BASE_IDX 0
+#define mmSDMA0_PAGE_MIDCMD_DATA6 0x011e
+#define mmSDMA0_PAGE_MIDCMD_DATA6_BASE_IDX 0
+#define mmSDMA0_PAGE_MIDCMD_DATA7 0x011f
+#define mmSDMA0_PAGE_MIDCMD_DATA7_BASE_IDX 0
+#define mmSDMA0_PAGE_MIDCMD_DATA8 0x0120
+#define mmSDMA0_PAGE_MIDCMD_DATA8_BASE_IDX 0
+#define mmSDMA0_PAGE_MIDCMD_CNTL 0x0121
+#define mmSDMA0_PAGE_MIDCMD_CNTL_BASE_IDX 0
+#define mmSDMA0_RLC0_RB_CNTL 0x0130
+#define mmSDMA0_RLC0_RB_CNTL_BASE_IDX 0
+#define mmSDMA0_RLC0_RB_BASE 0x0131
+#define mmSDMA0_RLC0_RB_BASE_BASE_IDX 0
+#define mmSDMA0_RLC0_RB_BASE_HI 0x0132
+#define mmSDMA0_RLC0_RB_BASE_HI_BASE_IDX 0
+#define mmSDMA0_RLC0_RB_RPTR 0x0133
+#define mmSDMA0_RLC0_RB_RPTR_BASE_IDX 0
+#define mmSDMA0_RLC0_RB_RPTR_HI 0x0134
+#define mmSDMA0_RLC0_RB_RPTR_HI_BASE_IDX 0
+#define mmSDMA0_RLC0_RB_WPTR 0x0135
+#define mmSDMA0_RLC0_RB_WPTR_BASE_IDX 0
+#define mmSDMA0_RLC0_RB_WPTR_HI 0x0136
+#define mmSDMA0_RLC0_RB_WPTR_HI_BASE_IDX 0
+#define mmSDMA0_RLC0_RB_WPTR_POLL_CNTL 0x0137
+#define mmSDMA0_RLC0_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define mmSDMA0_RLC0_RB_RPTR_ADDR_HI 0x0138
+#define mmSDMA0_RLC0_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define mmSDMA0_RLC0_RB_RPTR_ADDR_LO 0x0139
+#define mmSDMA0_RLC0_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define mmSDMA0_RLC0_IB_CNTL 0x013a
+#define mmSDMA0_RLC0_IB_CNTL_BASE_IDX 0
+#define mmSDMA0_RLC0_IB_RPTR 0x013b
+#define mmSDMA0_RLC0_IB_RPTR_BASE_IDX 0
+#define mmSDMA0_RLC0_IB_OFFSET 0x013c
+#define mmSDMA0_RLC0_IB_OFFSET_BASE_IDX 0
+#define mmSDMA0_RLC0_IB_BASE_LO 0x013d
+#define mmSDMA0_RLC0_IB_BASE_LO_BASE_IDX 0
+#define mmSDMA0_RLC0_IB_BASE_HI 0x013e
+#define mmSDMA0_RLC0_IB_BASE_HI_BASE_IDX 0
+#define mmSDMA0_RLC0_IB_SIZE 0x013f
+#define mmSDMA0_RLC0_IB_SIZE_BASE_IDX 0
+#define mmSDMA0_RLC0_SKIP_CNTL 0x0140
+#define mmSDMA0_RLC0_SKIP_CNTL_BASE_IDX 0
+#define mmSDMA0_RLC0_CONTEXT_STATUS 0x0141
+#define mmSDMA0_RLC0_CONTEXT_STATUS_BASE_IDX 0
+#define mmSDMA0_RLC0_DOORBELL 0x0142
+#define mmSDMA0_RLC0_DOORBELL_BASE_IDX 0
+#define mmSDMA0_RLC0_STATUS 0x0158
+#define mmSDMA0_RLC0_STATUS_BASE_IDX 0
+#define mmSDMA0_RLC0_DOORBELL_LOG 0x0159
+#define mmSDMA0_RLC0_DOORBELL_LOG_BASE_IDX 0
+#define mmSDMA0_RLC0_WATERMARK 0x015a
+#define mmSDMA0_RLC0_WATERMARK_BASE_IDX 0
+#define mmSDMA0_RLC0_DOORBELL_OFFSET 0x015b
+#define mmSDMA0_RLC0_DOORBELL_OFFSET_BASE_IDX 0
+#define mmSDMA0_RLC0_CSA_ADDR_LO 0x015c
+#define mmSDMA0_RLC0_CSA_ADDR_LO_BASE_IDX 0
+#define mmSDMA0_RLC0_CSA_ADDR_HI 0x015d
+#define mmSDMA0_RLC0_CSA_ADDR_HI_BASE_IDX 0
+#define mmSDMA0_RLC0_IB_SUB_REMAIN 0x015f
+#define mmSDMA0_RLC0_IB_SUB_REMAIN_BASE_IDX 0
+#define mmSDMA0_RLC0_PREEMPT 0x0160
+#define mmSDMA0_RLC0_PREEMPT_BASE_IDX 0
+#define mmSDMA0_RLC0_DUMMY_REG 0x0161
+#define mmSDMA0_RLC0_DUMMY_REG_BASE_IDX 0
+#define mmSDMA0_RLC0_RB_WPTR_POLL_ADDR_HI 0x0162
+#define mmSDMA0_RLC0_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define mmSDMA0_RLC0_RB_WPTR_POLL_ADDR_LO 0x0163
+#define mmSDMA0_RLC0_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define mmSDMA0_RLC0_RB_AQL_CNTL 0x0164
+#define mmSDMA0_RLC0_RB_AQL_CNTL_BASE_IDX 0
+#define mmSDMA0_RLC0_MINOR_PTR_UPDATE 0x0165
+#define mmSDMA0_RLC0_MINOR_PTR_UPDATE_BASE_IDX 0
+#define mmSDMA0_RLC0_MIDCMD_DATA0 0x0170
+#define mmSDMA0_RLC0_MIDCMD_DATA0_BASE_IDX 0
+#define mmSDMA0_RLC0_MIDCMD_DATA1 0x0171
+#define mmSDMA0_RLC0_MIDCMD_DATA1_BASE_IDX 0
+#define mmSDMA0_RLC0_MIDCMD_DATA2 0x0172
+#define mmSDMA0_RLC0_MIDCMD_DATA2_BASE_IDX 0
+#define mmSDMA0_RLC0_MIDCMD_DATA3 0x0173
+#define mmSDMA0_RLC0_MIDCMD_DATA3_BASE_IDX 0
+#define mmSDMA0_RLC0_MIDCMD_DATA4 0x0174
+#define mmSDMA0_RLC0_MIDCMD_DATA4_BASE_IDX 0
+#define mmSDMA0_RLC0_MIDCMD_DATA5 0x0175
+#define mmSDMA0_RLC0_MIDCMD_DATA5_BASE_IDX 0
+#define mmSDMA0_RLC0_MIDCMD_DATA6 0x0176
+#define mmSDMA0_RLC0_MIDCMD_DATA6_BASE_IDX 0
+#define mmSDMA0_RLC0_MIDCMD_DATA7 0x0177
+#define mmSDMA0_RLC0_MIDCMD_DATA7_BASE_IDX 0
+#define mmSDMA0_RLC0_MIDCMD_DATA8 0x0178
+#define mmSDMA0_RLC0_MIDCMD_DATA8_BASE_IDX 0
+#define mmSDMA0_RLC0_MIDCMD_CNTL 0x0179
+#define mmSDMA0_RLC0_MIDCMD_CNTL_BASE_IDX 0
+#define mmSDMA0_RLC1_RB_CNTL 0x0188
+#define mmSDMA0_RLC1_RB_CNTL_BASE_IDX 0
+#define mmSDMA0_RLC1_RB_BASE 0x0189
+#define mmSDMA0_RLC1_RB_BASE_BASE_IDX 0
+#define mmSDMA0_RLC1_RB_BASE_HI 0x018a
+#define mmSDMA0_RLC1_RB_BASE_HI_BASE_IDX 0
+#define mmSDMA0_RLC1_RB_RPTR 0x018b
+#define mmSDMA0_RLC1_RB_RPTR_BASE_IDX 0
+#define mmSDMA0_RLC1_RB_RPTR_HI 0x018c
+#define mmSDMA0_RLC1_RB_RPTR_HI_BASE_IDX 0
+#define mmSDMA0_RLC1_RB_WPTR 0x018d
+#define mmSDMA0_RLC1_RB_WPTR_BASE_IDX 0
+#define mmSDMA0_RLC1_RB_WPTR_HI 0x018e
+#define mmSDMA0_RLC1_RB_WPTR_HI_BASE_IDX 0
+#define mmSDMA0_RLC1_RB_WPTR_POLL_CNTL 0x018f
+#define mmSDMA0_RLC1_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define mmSDMA0_RLC1_RB_RPTR_ADDR_HI 0x0190
+#define mmSDMA0_RLC1_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define mmSDMA0_RLC1_RB_RPTR_ADDR_LO 0x0191
+#define mmSDMA0_RLC1_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define mmSDMA0_RLC1_IB_CNTL 0x0192
+#define mmSDMA0_RLC1_IB_CNTL_BASE_IDX 0
+#define mmSDMA0_RLC1_IB_RPTR 0x0193
+#define mmSDMA0_RLC1_IB_RPTR_BASE_IDX 0
+#define mmSDMA0_RLC1_IB_OFFSET 0x0194
+#define mmSDMA0_RLC1_IB_OFFSET_BASE_IDX 0
+#define mmSDMA0_RLC1_IB_BASE_LO 0x0195
+#define mmSDMA0_RLC1_IB_BASE_LO_BASE_IDX 0
+#define mmSDMA0_RLC1_IB_BASE_HI 0x0196
+#define mmSDMA0_RLC1_IB_BASE_HI_BASE_IDX 0
+#define mmSDMA0_RLC1_IB_SIZE 0x0197
+#define mmSDMA0_RLC1_IB_SIZE_BASE_IDX 0
+#define mmSDMA0_RLC1_SKIP_CNTL 0x0198
+#define mmSDMA0_RLC1_SKIP_CNTL_BASE_IDX 0
+#define mmSDMA0_RLC1_CONTEXT_STATUS 0x0199
+#define mmSDMA0_RLC1_CONTEXT_STATUS_BASE_IDX 0
+#define mmSDMA0_RLC1_DOORBELL 0x019a
+#define mmSDMA0_RLC1_DOORBELL_BASE_IDX 0
+#define mmSDMA0_RLC1_STATUS 0x01b0
+#define mmSDMA0_RLC1_STATUS_BASE_IDX 0
+#define mmSDMA0_RLC1_DOORBELL_LOG 0x01b1
+#define mmSDMA0_RLC1_DOORBELL_LOG_BASE_IDX 0
+#define mmSDMA0_RLC1_WATERMARK 0x01b2
+#define mmSDMA0_RLC1_WATERMARK_BASE_IDX 0
+#define mmSDMA0_RLC1_DOORBELL_OFFSET 0x01b3
+#define mmSDMA0_RLC1_DOORBELL_OFFSET_BASE_IDX 0
+#define mmSDMA0_RLC1_CSA_ADDR_LO 0x01b4
+#define mmSDMA0_RLC1_CSA_ADDR_LO_BASE_IDX 0
+#define mmSDMA0_RLC1_CSA_ADDR_HI 0x01b5
+#define mmSDMA0_RLC1_CSA_ADDR_HI_BASE_IDX 0
+#define mmSDMA0_RLC1_IB_SUB_REMAIN 0x01b7
+#define mmSDMA0_RLC1_IB_SUB_REMAIN_BASE_IDX 0
+#define mmSDMA0_RLC1_PREEMPT 0x01b8
+#define mmSDMA0_RLC1_PREEMPT_BASE_IDX 0
+#define mmSDMA0_RLC1_DUMMY_REG 0x01b9
+#define mmSDMA0_RLC1_DUMMY_REG_BASE_IDX 0
+#define mmSDMA0_RLC1_RB_WPTR_POLL_ADDR_HI 0x01ba
+#define mmSDMA0_RLC1_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define mmSDMA0_RLC1_RB_WPTR_POLL_ADDR_LO 0x01bb
+#define mmSDMA0_RLC1_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define mmSDMA0_RLC1_RB_AQL_CNTL 0x01bc
+#define mmSDMA0_RLC1_RB_AQL_CNTL_BASE_IDX 0
+#define mmSDMA0_RLC1_MINOR_PTR_UPDATE 0x01bd
+#define mmSDMA0_RLC1_MINOR_PTR_UPDATE_BASE_IDX 0
+#define mmSDMA0_RLC1_MIDCMD_DATA0 0x01c8
+#define mmSDMA0_RLC1_MIDCMD_DATA0_BASE_IDX 0
+#define mmSDMA0_RLC1_MIDCMD_DATA1 0x01c9
+#define mmSDMA0_RLC1_MIDCMD_DATA1_BASE_IDX 0
+#define mmSDMA0_RLC1_MIDCMD_DATA2 0x01ca
+#define mmSDMA0_RLC1_MIDCMD_DATA2_BASE_IDX 0
+#define mmSDMA0_RLC1_MIDCMD_DATA3 0x01cb
+#define mmSDMA0_RLC1_MIDCMD_DATA3_BASE_IDX 0
+#define mmSDMA0_RLC1_MIDCMD_DATA4 0x01cc
+#define mmSDMA0_RLC1_MIDCMD_DATA4_BASE_IDX 0
+#define mmSDMA0_RLC1_MIDCMD_DATA5 0x01cd
+#define mmSDMA0_RLC1_MIDCMD_DATA5_BASE_IDX 0
+#define mmSDMA0_RLC1_MIDCMD_DATA6 0x01ce
+#define mmSDMA0_RLC1_MIDCMD_DATA6_BASE_IDX 0
+#define mmSDMA0_RLC1_MIDCMD_DATA7 0x01cf
+#define mmSDMA0_RLC1_MIDCMD_DATA7_BASE_IDX 0
+#define mmSDMA0_RLC1_MIDCMD_DATA8 0x01d0
+#define mmSDMA0_RLC1_MIDCMD_DATA8_BASE_IDX 0
+#define mmSDMA0_RLC1_MIDCMD_CNTL 0x01d1
+#define mmSDMA0_RLC1_MIDCMD_CNTL_BASE_IDX 0
+#define mmSDMA0_RLC2_RB_CNTL 0x01e0
+#define mmSDMA0_RLC2_RB_CNTL_BASE_IDX 0
+#define mmSDMA0_RLC2_RB_BASE 0x01e1
+#define mmSDMA0_RLC2_RB_BASE_BASE_IDX 0
+#define mmSDMA0_RLC2_RB_BASE_HI 0x01e2
+#define mmSDMA0_RLC2_RB_BASE_HI_BASE_IDX 0
+#define mmSDMA0_RLC2_RB_RPTR 0x01e3
+#define mmSDMA0_RLC2_RB_RPTR_BASE_IDX 0
+#define mmSDMA0_RLC2_RB_RPTR_HI 0x01e4
+#define mmSDMA0_RLC2_RB_RPTR_HI_BASE_IDX 0
+#define mmSDMA0_RLC2_RB_WPTR 0x01e5
+#define mmSDMA0_RLC2_RB_WPTR_BASE_IDX 0
+#define mmSDMA0_RLC2_RB_WPTR_HI 0x01e6
+#define mmSDMA0_RLC2_RB_WPTR_HI_BASE_IDX 0
+#define mmSDMA0_RLC2_RB_WPTR_POLL_CNTL 0x01e7
+#define mmSDMA0_RLC2_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define mmSDMA0_RLC2_RB_RPTR_ADDR_HI 0x01e8
+#define mmSDMA0_RLC2_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define mmSDMA0_RLC2_RB_RPTR_ADDR_LO 0x01e9
+#define mmSDMA0_RLC2_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define mmSDMA0_RLC2_IB_CNTL 0x01ea
+#define mmSDMA0_RLC2_IB_CNTL_BASE_IDX 0
+#define mmSDMA0_RLC2_IB_RPTR 0x01eb
+#define mmSDMA0_RLC2_IB_RPTR_BASE_IDX 0
+#define mmSDMA0_RLC2_IB_OFFSET 0x01ec
+#define mmSDMA0_RLC2_IB_OFFSET_BASE_IDX 0
+#define mmSDMA0_RLC2_IB_BASE_LO 0x01ed
+#define mmSDMA0_RLC2_IB_BASE_LO_BASE_IDX 0
+#define mmSDMA0_RLC2_IB_BASE_HI 0x01ee
+#define mmSDMA0_RLC2_IB_BASE_HI_BASE_IDX 0
+#define mmSDMA0_RLC2_IB_SIZE 0x01ef
+#define mmSDMA0_RLC2_IB_SIZE_BASE_IDX 0
+#define mmSDMA0_RLC2_SKIP_CNTL 0x01f0
+#define mmSDMA0_RLC2_SKIP_CNTL_BASE_IDX 0
+#define mmSDMA0_RLC2_CONTEXT_STATUS 0x01f1
+#define mmSDMA0_RLC2_CONTEXT_STATUS_BASE_IDX 0
+#define mmSDMA0_RLC2_DOORBELL 0x01f2
+#define mmSDMA0_RLC2_DOORBELL_BASE_IDX 0
+#define mmSDMA0_RLC2_STATUS 0x0208
+#define mmSDMA0_RLC2_STATUS_BASE_IDX 0
+#define mmSDMA0_RLC2_DOORBELL_LOG 0x0209
+#define mmSDMA0_RLC2_DOORBELL_LOG_BASE_IDX 0
+#define mmSDMA0_RLC2_WATERMARK 0x020a
+#define mmSDMA0_RLC2_WATERMARK_BASE_IDX 0
+#define mmSDMA0_RLC2_DOORBELL_OFFSET 0x020b
+#define mmSDMA0_RLC2_DOORBELL_OFFSET_BASE_IDX 0
+#define mmSDMA0_RLC2_CSA_ADDR_LO 0x020c
+#define mmSDMA0_RLC2_CSA_ADDR_LO_BASE_IDX 0
+#define mmSDMA0_RLC2_CSA_ADDR_HI 0x020d
+#define mmSDMA0_RLC2_CSA_ADDR_HI_BASE_IDX 0
+#define mmSDMA0_RLC2_IB_SUB_REMAIN 0x020f
+#define mmSDMA0_RLC2_IB_SUB_REMAIN_BASE_IDX 0
+#define mmSDMA0_RLC2_PREEMPT 0x0210
+#define mmSDMA0_RLC2_PREEMPT_BASE_IDX 0
+#define mmSDMA0_RLC2_DUMMY_REG 0x0211
+#define mmSDMA0_RLC2_DUMMY_REG_BASE_IDX 0
+#define mmSDMA0_RLC2_RB_WPTR_POLL_ADDR_HI 0x0212
+#define mmSDMA0_RLC2_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define mmSDMA0_RLC2_RB_WPTR_POLL_ADDR_LO 0x0213
+#define mmSDMA0_RLC2_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define mmSDMA0_RLC2_RB_AQL_CNTL 0x0214
+#define mmSDMA0_RLC2_RB_AQL_CNTL_BASE_IDX 0
+#define mmSDMA0_RLC2_MINOR_PTR_UPDATE 0x0215
+#define mmSDMA0_RLC2_MINOR_PTR_UPDATE_BASE_IDX 0
+#define mmSDMA0_RLC2_MIDCMD_DATA0 0x0220
+#define mmSDMA0_RLC2_MIDCMD_DATA0_BASE_IDX 0
+#define mmSDMA0_RLC2_MIDCMD_DATA1 0x0221
+#define mmSDMA0_RLC2_MIDCMD_DATA1_BASE_IDX 0
+#define mmSDMA0_RLC2_MIDCMD_DATA2 0x0222
+#define mmSDMA0_RLC2_MIDCMD_DATA2_BASE_IDX 0
+#define mmSDMA0_RLC2_MIDCMD_DATA3 0x0223
+#define mmSDMA0_RLC2_MIDCMD_DATA3_BASE_IDX 0
+#define mmSDMA0_RLC2_MIDCMD_DATA4 0x0224
+#define mmSDMA0_RLC2_MIDCMD_DATA4_BASE_IDX 0
+#define mmSDMA0_RLC2_MIDCMD_DATA5 0x0225
+#define mmSDMA0_RLC2_MIDCMD_DATA5_BASE_IDX 0
+#define mmSDMA0_RLC2_MIDCMD_DATA6 0x0226
+#define mmSDMA0_RLC2_MIDCMD_DATA6_BASE_IDX 0
+#define mmSDMA0_RLC2_MIDCMD_DATA7 0x0227
+#define mmSDMA0_RLC2_MIDCMD_DATA7_BASE_IDX 0
+#define mmSDMA0_RLC2_MIDCMD_DATA8 0x0228
+#define mmSDMA0_RLC2_MIDCMD_DATA8_BASE_IDX 0
+#define mmSDMA0_RLC2_MIDCMD_CNTL 0x0229
+#define mmSDMA0_RLC2_MIDCMD_CNTL_BASE_IDX 0
+#define mmSDMA0_RLC3_RB_CNTL 0x0238
+#define mmSDMA0_RLC3_RB_CNTL_BASE_IDX 0
+#define mmSDMA0_RLC3_RB_BASE 0x0239
+#define mmSDMA0_RLC3_RB_BASE_BASE_IDX 0
+#define mmSDMA0_RLC3_RB_BASE_HI 0x023a
+#define mmSDMA0_RLC3_RB_BASE_HI_BASE_IDX 0
+#define mmSDMA0_RLC3_RB_RPTR 0x023b
+#define mmSDMA0_RLC3_RB_RPTR_BASE_IDX 0
+#define mmSDMA0_RLC3_RB_RPTR_HI 0x023c
+#define mmSDMA0_RLC3_RB_RPTR_HI_BASE_IDX 0
+#define mmSDMA0_RLC3_RB_WPTR 0x023d
+#define mmSDMA0_RLC3_RB_WPTR_BASE_IDX 0
+#define mmSDMA0_RLC3_RB_WPTR_HI 0x023e
+#define mmSDMA0_RLC3_RB_WPTR_HI_BASE_IDX 0
+#define mmSDMA0_RLC3_RB_WPTR_POLL_CNTL 0x023f
+#define mmSDMA0_RLC3_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define mmSDMA0_RLC3_RB_RPTR_ADDR_HI 0x0240
+#define mmSDMA0_RLC3_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define mmSDMA0_RLC3_RB_RPTR_ADDR_LO 0x0241
+#define mmSDMA0_RLC3_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define mmSDMA0_RLC3_IB_CNTL 0x0242
+#define mmSDMA0_RLC3_IB_CNTL_BASE_IDX 0
+#define mmSDMA0_RLC3_IB_RPTR 0x0243
+#define mmSDMA0_RLC3_IB_RPTR_BASE_IDX 0
+#define mmSDMA0_RLC3_IB_OFFSET 0x0244
+#define mmSDMA0_RLC3_IB_OFFSET_BASE_IDX 0
+#define mmSDMA0_RLC3_IB_BASE_LO 0x0245
+#define mmSDMA0_RLC3_IB_BASE_LO_BASE_IDX 0
+#define mmSDMA0_RLC3_IB_BASE_HI 0x0246
+#define mmSDMA0_RLC3_IB_BASE_HI_BASE_IDX 0
+#define mmSDMA0_RLC3_IB_SIZE 0x0247
+#define mmSDMA0_RLC3_IB_SIZE_BASE_IDX 0
+#define mmSDMA0_RLC3_SKIP_CNTL 0x0248
+#define mmSDMA0_RLC3_SKIP_CNTL_BASE_IDX 0
+#define mmSDMA0_RLC3_CONTEXT_STATUS 0x0249
+#define mmSDMA0_RLC3_CONTEXT_STATUS_BASE_IDX 0
+#define mmSDMA0_RLC3_DOORBELL 0x024a
+#define mmSDMA0_RLC3_DOORBELL_BASE_IDX 0
+#define mmSDMA0_RLC3_STATUS 0x0260
+#define mmSDMA0_RLC3_STATUS_BASE_IDX 0
+#define mmSDMA0_RLC3_DOORBELL_LOG 0x0261
+#define mmSDMA0_RLC3_DOORBELL_LOG_BASE_IDX 0
+#define mmSDMA0_RLC3_WATERMARK 0x0262
+#define mmSDMA0_RLC3_WATERMARK_BASE_IDX 0
+#define mmSDMA0_RLC3_DOORBELL_OFFSET 0x0263
+#define mmSDMA0_RLC3_DOORBELL_OFFSET_BASE_IDX 0
+#define mmSDMA0_RLC3_CSA_ADDR_LO 0x0264
+#define mmSDMA0_RLC3_CSA_ADDR_LO_BASE_IDX 0
+#define mmSDMA0_RLC3_CSA_ADDR_HI 0x0265
+#define mmSDMA0_RLC3_CSA_ADDR_HI_BASE_IDX 0
+#define mmSDMA0_RLC3_IB_SUB_REMAIN 0x0267
+#define mmSDMA0_RLC3_IB_SUB_REMAIN_BASE_IDX 0
+#define mmSDMA0_RLC3_PREEMPT 0x0268
+#define mmSDMA0_RLC3_PREEMPT_BASE_IDX 0
+#define mmSDMA0_RLC3_DUMMY_REG 0x0269
+#define mmSDMA0_RLC3_DUMMY_REG_BASE_IDX 0
+#define mmSDMA0_RLC3_RB_WPTR_POLL_ADDR_HI 0x026a
+#define mmSDMA0_RLC3_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define mmSDMA0_RLC3_RB_WPTR_POLL_ADDR_LO 0x026b
+#define mmSDMA0_RLC3_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define mmSDMA0_RLC3_RB_AQL_CNTL 0x026c
+#define mmSDMA0_RLC3_RB_AQL_CNTL_BASE_IDX 0
+#define mmSDMA0_RLC3_MINOR_PTR_UPDATE 0x026d
+#define mmSDMA0_RLC3_MINOR_PTR_UPDATE_BASE_IDX 0
+#define mmSDMA0_RLC3_MIDCMD_DATA0 0x0278
+#define mmSDMA0_RLC3_MIDCMD_DATA0_BASE_IDX 0
+#define mmSDMA0_RLC3_MIDCMD_DATA1 0x0279
+#define mmSDMA0_RLC3_MIDCMD_DATA1_BASE_IDX 0
+#define mmSDMA0_RLC3_MIDCMD_DATA2 0x027a
+#define mmSDMA0_RLC3_MIDCMD_DATA2_BASE_IDX 0
+#define mmSDMA0_RLC3_MIDCMD_DATA3 0x027b
+#define mmSDMA0_RLC3_MIDCMD_DATA3_BASE_IDX 0
+#define mmSDMA0_RLC3_MIDCMD_DATA4 0x027c
+#define mmSDMA0_RLC3_MIDCMD_DATA4_BASE_IDX 0
+#define mmSDMA0_RLC3_MIDCMD_DATA5 0x027d
+#define mmSDMA0_RLC3_MIDCMD_DATA5_BASE_IDX 0
+#define mmSDMA0_RLC3_MIDCMD_DATA6 0x027e
+#define mmSDMA0_RLC3_MIDCMD_DATA6_BASE_IDX 0
+#define mmSDMA0_RLC3_MIDCMD_DATA7 0x027f
+#define mmSDMA0_RLC3_MIDCMD_DATA7_BASE_IDX 0
+#define mmSDMA0_RLC3_MIDCMD_DATA8 0x0280
+#define mmSDMA0_RLC3_MIDCMD_DATA8_BASE_IDX 0
+#define mmSDMA0_RLC3_MIDCMD_CNTL 0x0281
+#define mmSDMA0_RLC3_MIDCMD_CNTL_BASE_IDX 0
+#define mmSDMA0_RLC4_RB_CNTL 0x0290
+#define mmSDMA0_RLC4_RB_CNTL_BASE_IDX 0
+#define mmSDMA0_RLC4_RB_BASE 0x0291
+#define mmSDMA0_RLC4_RB_BASE_BASE_IDX 0
+#define mmSDMA0_RLC4_RB_BASE_HI 0x0292
+#define mmSDMA0_RLC4_RB_BASE_HI_BASE_IDX 0
+#define mmSDMA0_RLC4_RB_RPTR 0x0293
+#define mmSDMA0_RLC4_RB_RPTR_BASE_IDX 0
+#define mmSDMA0_RLC4_RB_RPTR_HI 0x0294
+#define mmSDMA0_RLC4_RB_RPTR_HI_BASE_IDX 0
+#define mmSDMA0_RLC4_RB_WPTR 0x0295
+#define mmSDMA0_RLC4_RB_WPTR_BASE_IDX 0
+#define mmSDMA0_RLC4_RB_WPTR_HI 0x0296
+#define mmSDMA0_RLC4_RB_WPTR_HI_BASE_IDX 0
+#define mmSDMA0_RLC4_RB_WPTR_POLL_CNTL 0x0297
+#define mmSDMA0_RLC4_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define mmSDMA0_RLC4_RB_RPTR_ADDR_HI 0x0298
+#define mmSDMA0_RLC4_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define mmSDMA0_RLC4_RB_RPTR_ADDR_LO 0x0299
+#define mmSDMA0_RLC4_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define mmSDMA0_RLC4_IB_CNTL 0x029a
+#define mmSDMA0_RLC4_IB_CNTL_BASE_IDX 0
+#define mmSDMA0_RLC4_IB_RPTR 0x029b
+#define mmSDMA0_RLC4_IB_RPTR_BASE_IDX 0
+#define mmSDMA0_RLC4_IB_OFFSET 0x029c
+#define mmSDMA0_RLC4_IB_OFFSET_BASE_IDX 0
+#define mmSDMA0_RLC4_IB_BASE_LO 0x029d
+#define mmSDMA0_RLC4_IB_BASE_LO_BASE_IDX 0
+#define mmSDMA0_RLC4_IB_BASE_HI 0x029e
+#define mmSDMA0_RLC4_IB_BASE_HI_BASE_IDX 0
+#define mmSDMA0_RLC4_IB_SIZE 0x029f
+#define mmSDMA0_RLC4_IB_SIZE_BASE_IDX 0
+#define mmSDMA0_RLC4_SKIP_CNTL 0x02a0
+#define mmSDMA0_RLC4_SKIP_CNTL_BASE_IDX 0
+#define mmSDMA0_RLC4_CONTEXT_STATUS 0x02a1
+#define mmSDMA0_RLC4_CONTEXT_STATUS_BASE_IDX 0
+#define mmSDMA0_RLC4_DOORBELL 0x02a2
+#define mmSDMA0_RLC4_DOORBELL_BASE_IDX 0
+#define mmSDMA0_RLC4_STATUS 0x02b8
+#define mmSDMA0_RLC4_STATUS_BASE_IDX 0
+#define mmSDMA0_RLC4_DOORBELL_LOG 0x02b9
+#define mmSDMA0_RLC4_DOORBELL_LOG_BASE_IDX 0
+#define mmSDMA0_RLC4_WATERMARK 0x02ba
+#define mmSDMA0_RLC4_WATERMARK_BASE_IDX 0
+#define mmSDMA0_RLC4_DOORBELL_OFFSET 0x02bb
+#define mmSDMA0_RLC4_DOORBELL_OFFSET_BASE_IDX 0
+#define mmSDMA0_RLC4_CSA_ADDR_LO 0x02bc
+#define mmSDMA0_RLC4_CSA_ADDR_LO_BASE_IDX 0
+#define mmSDMA0_RLC4_CSA_ADDR_HI 0x02bd
+#define mmSDMA0_RLC4_CSA_ADDR_HI_BASE_IDX 0
+#define mmSDMA0_RLC4_IB_SUB_REMAIN 0x02bf
+#define mmSDMA0_RLC4_IB_SUB_REMAIN_BASE_IDX 0
+#define mmSDMA0_RLC4_PREEMPT 0x02c0
+#define mmSDMA0_RLC4_PREEMPT_BASE_IDX 0
+#define mmSDMA0_RLC4_DUMMY_REG 0x02c1
+#define mmSDMA0_RLC4_DUMMY_REG_BASE_IDX 0
+#define mmSDMA0_RLC4_RB_WPTR_POLL_ADDR_HI 0x02c2
+#define mmSDMA0_RLC4_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define mmSDMA0_RLC4_RB_WPTR_POLL_ADDR_LO 0x02c3
+#define mmSDMA0_RLC4_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define mmSDMA0_RLC4_RB_AQL_CNTL 0x02c4
+#define mmSDMA0_RLC4_RB_AQL_CNTL_BASE_IDX 0
+#define mmSDMA0_RLC4_MINOR_PTR_UPDATE 0x02c5
+#define mmSDMA0_RLC4_MINOR_PTR_UPDATE_BASE_IDX 0
+#define mmSDMA0_RLC4_MIDCMD_DATA0 0x02d0
+#define mmSDMA0_RLC4_MIDCMD_DATA0_BASE_IDX 0
+#define mmSDMA0_RLC4_MIDCMD_DATA1 0x02d1
+#define mmSDMA0_RLC4_MIDCMD_DATA1_BASE_IDX 0
+#define mmSDMA0_RLC4_MIDCMD_DATA2 0x02d2
+#define mmSDMA0_RLC4_MIDCMD_DATA2_BASE_IDX 0
+#define mmSDMA0_RLC4_MIDCMD_DATA3 0x02d3
+#define mmSDMA0_RLC4_MIDCMD_DATA3_BASE_IDX 0
+#define mmSDMA0_RLC4_MIDCMD_DATA4 0x02d4
+#define mmSDMA0_RLC4_MIDCMD_DATA4_BASE_IDX 0
+#define mmSDMA0_RLC4_MIDCMD_DATA5 0x02d5
+#define mmSDMA0_RLC4_MIDCMD_DATA5_BASE_IDX 0
+#define mmSDMA0_RLC4_MIDCMD_DATA6 0x02d6
+#define mmSDMA0_RLC4_MIDCMD_DATA6_BASE_IDX 0
+#define mmSDMA0_RLC4_MIDCMD_DATA7 0x02d7
+#define mmSDMA0_RLC4_MIDCMD_DATA7_BASE_IDX 0
+#define mmSDMA0_RLC4_MIDCMD_DATA8 0x02d8
+#define mmSDMA0_RLC4_MIDCMD_DATA8_BASE_IDX 0
+#define mmSDMA0_RLC4_MIDCMD_CNTL 0x02d9
+#define mmSDMA0_RLC4_MIDCMD_CNTL_BASE_IDX 0
+#define mmSDMA0_RLC5_RB_CNTL 0x02e8
+#define mmSDMA0_RLC5_RB_CNTL_BASE_IDX 0
+#define mmSDMA0_RLC5_RB_BASE 0x02e9
+#define mmSDMA0_RLC5_RB_BASE_BASE_IDX 0
+#define mmSDMA0_RLC5_RB_BASE_HI 0x02ea
+#define mmSDMA0_RLC5_RB_BASE_HI_BASE_IDX 0
+#define mmSDMA0_RLC5_RB_RPTR 0x02eb
+#define mmSDMA0_RLC5_RB_RPTR_BASE_IDX 0
+#define mmSDMA0_RLC5_RB_RPTR_HI 0x02ec
+#define mmSDMA0_RLC5_RB_RPTR_HI_BASE_IDX 0
+#define mmSDMA0_RLC5_RB_WPTR 0x02ed
+#define mmSDMA0_RLC5_RB_WPTR_BASE_IDX 0
+#define mmSDMA0_RLC5_RB_WPTR_HI 0x02ee
+#define mmSDMA0_RLC5_RB_WPTR_HI_BASE_IDX 0
+#define mmSDMA0_RLC5_RB_WPTR_POLL_CNTL 0x02ef
+#define mmSDMA0_RLC5_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define mmSDMA0_RLC5_RB_RPTR_ADDR_HI 0x02f0
+#define mmSDMA0_RLC5_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define mmSDMA0_RLC5_RB_RPTR_ADDR_LO 0x02f1
+#define mmSDMA0_RLC5_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define mmSDMA0_RLC5_IB_CNTL 0x02f2
+#define mmSDMA0_RLC5_IB_CNTL_BASE_IDX 0
+#define mmSDMA0_RLC5_IB_RPTR 0x02f3
+#define mmSDMA0_RLC5_IB_RPTR_BASE_IDX 0
+#define mmSDMA0_RLC5_IB_OFFSET 0x02f4
+#define mmSDMA0_RLC5_IB_OFFSET_BASE_IDX 0
+#define mmSDMA0_RLC5_IB_BASE_LO 0x02f5
+#define mmSDMA0_RLC5_IB_BASE_LO_BASE_IDX 0
+#define mmSDMA0_RLC5_IB_BASE_HI 0x02f6
+#define mmSDMA0_RLC5_IB_BASE_HI_BASE_IDX 0
+#define mmSDMA0_RLC5_IB_SIZE 0x02f7
+#define mmSDMA0_RLC5_IB_SIZE_BASE_IDX 0
+#define mmSDMA0_RLC5_SKIP_CNTL 0x02f8
+#define mmSDMA0_RLC5_SKIP_CNTL_BASE_IDX 0
+#define mmSDMA0_RLC5_CONTEXT_STATUS 0x02f9
+#define mmSDMA0_RLC5_CONTEXT_STATUS_BASE_IDX 0
+#define mmSDMA0_RLC5_DOORBELL 0x02fa
+#define mmSDMA0_RLC5_DOORBELL_BASE_IDX 0
+#define mmSDMA0_RLC5_STATUS 0x0310
+#define mmSDMA0_RLC5_STATUS_BASE_IDX 0
+#define mmSDMA0_RLC5_DOORBELL_LOG 0x0311
+#define mmSDMA0_RLC5_DOORBELL_LOG_BASE_IDX 0
+#define mmSDMA0_RLC5_WATERMARK 0x0312
+#define mmSDMA0_RLC5_WATERMARK_BASE_IDX 0
+#define mmSDMA0_RLC5_DOORBELL_OFFSET 0x0313
+#define mmSDMA0_RLC5_DOORBELL_OFFSET_BASE_IDX 0
+#define mmSDMA0_RLC5_CSA_ADDR_LO 0x0314
+#define mmSDMA0_RLC5_CSA_ADDR_LO_BASE_IDX 0
+#define mmSDMA0_RLC5_CSA_ADDR_HI 0x0315
+#define mmSDMA0_RLC5_CSA_ADDR_HI_BASE_IDX 0
+#define mmSDMA0_RLC5_IB_SUB_REMAIN 0x0317
+#define mmSDMA0_RLC5_IB_SUB_REMAIN_BASE_IDX 0
+#define mmSDMA0_RLC5_PREEMPT 0x0318
+#define mmSDMA0_RLC5_PREEMPT_BASE_IDX 0
+#define mmSDMA0_RLC5_DUMMY_REG 0x0319
+#define mmSDMA0_RLC5_DUMMY_REG_BASE_IDX 0
+#define mmSDMA0_RLC5_RB_WPTR_POLL_ADDR_HI 0x031a
+#define mmSDMA0_RLC5_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define mmSDMA0_RLC5_RB_WPTR_POLL_ADDR_LO 0x031b
+#define mmSDMA0_RLC5_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define mmSDMA0_RLC5_RB_AQL_CNTL 0x031c
+#define mmSDMA0_RLC5_RB_AQL_CNTL_BASE_IDX 0
+#define mmSDMA0_RLC5_MINOR_PTR_UPDATE 0x031d
+#define mmSDMA0_RLC5_MINOR_PTR_UPDATE_BASE_IDX 0
+#define mmSDMA0_RLC5_MIDCMD_DATA0 0x0328
+#define mmSDMA0_RLC5_MIDCMD_DATA0_BASE_IDX 0
+#define mmSDMA0_RLC5_MIDCMD_DATA1 0x0329
+#define mmSDMA0_RLC5_MIDCMD_DATA1_BASE_IDX 0
+#define mmSDMA0_RLC5_MIDCMD_DATA2 0x032a
+#define mmSDMA0_RLC5_MIDCMD_DATA2_BASE_IDX 0
+#define mmSDMA0_RLC5_MIDCMD_DATA3 0x032b
+#define mmSDMA0_RLC5_MIDCMD_DATA3_BASE_IDX 0
+#define mmSDMA0_RLC5_MIDCMD_DATA4 0x032c
+#define mmSDMA0_RLC5_MIDCMD_DATA4_BASE_IDX 0
+#define mmSDMA0_RLC5_MIDCMD_DATA5 0x032d
+#define mmSDMA0_RLC5_MIDCMD_DATA5_BASE_IDX 0
+#define mmSDMA0_RLC5_MIDCMD_DATA6 0x032e
+#define mmSDMA0_RLC5_MIDCMD_DATA6_BASE_IDX 0
+#define mmSDMA0_RLC5_MIDCMD_DATA7 0x032f
+#define mmSDMA0_RLC5_MIDCMD_DATA7_BASE_IDX 0
+#define mmSDMA0_RLC5_MIDCMD_DATA8 0x0330
+#define mmSDMA0_RLC5_MIDCMD_DATA8_BASE_IDX 0
+#define mmSDMA0_RLC5_MIDCMD_CNTL 0x0331
+#define mmSDMA0_RLC5_MIDCMD_CNTL_BASE_IDX 0
+#define mmSDMA0_RLC6_RB_CNTL 0x0340
+#define mmSDMA0_RLC6_RB_CNTL_BASE_IDX 0
+#define mmSDMA0_RLC6_RB_BASE 0x0341
+#define mmSDMA0_RLC6_RB_BASE_BASE_IDX 0
+#define mmSDMA0_RLC6_RB_BASE_HI 0x0342
+#define mmSDMA0_RLC6_RB_BASE_HI_BASE_IDX 0
+#define mmSDMA0_RLC6_RB_RPTR 0x0343
+#define mmSDMA0_RLC6_RB_RPTR_BASE_IDX 0
+#define mmSDMA0_RLC6_RB_RPTR_HI 0x0344
+#define mmSDMA0_RLC6_RB_RPTR_HI_BASE_IDX 0
+#define mmSDMA0_RLC6_RB_WPTR 0x0345
+#define mmSDMA0_RLC6_RB_WPTR_BASE_IDX 0
+#define mmSDMA0_RLC6_RB_WPTR_HI 0x0346
+#define mmSDMA0_RLC6_RB_WPTR_HI_BASE_IDX 0
+#define mmSDMA0_RLC6_RB_WPTR_POLL_CNTL 0x0347
+#define mmSDMA0_RLC6_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define mmSDMA0_RLC6_RB_RPTR_ADDR_HI 0x0348
+#define mmSDMA0_RLC6_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define mmSDMA0_RLC6_RB_RPTR_ADDR_LO 0x0349
+#define mmSDMA0_RLC6_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define mmSDMA0_RLC6_IB_CNTL 0x034a
+#define mmSDMA0_RLC6_IB_CNTL_BASE_IDX 0
+#define mmSDMA0_RLC6_IB_RPTR 0x034b
+#define mmSDMA0_RLC6_IB_RPTR_BASE_IDX 0
+#define mmSDMA0_RLC6_IB_OFFSET 0x034c
+#define mmSDMA0_RLC6_IB_OFFSET_BASE_IDX 0
+#define mmSDMA0_RLC6_IB_BASE_LO 0x034d
+#define mmSDMA0_RLC6_IB_BASE_LO_BASE_IDX 0
+#define mmSDMA0_RLC6_IB_BASE_HI 0x034e
+#define mmSDMA0_RLC6_IB_BASE_HI_BASE_IDX 0
+#define mmSDMA0_RLC6_IB_SIZE 0x034f
+#define mmSDMA0_RLC6_IB_SIZE_BASE_IDX 0
+#define mmSDMA0_RLC6_SKIP_CNTL 0x0350
+#define mmSDMA0_RLC6_SKIP_CNTL_BASE_IDX 0
+#define mmSDMA0_RLC6_CONTEXT_STATUS 0x0351
+#define mmSDMA0_RLC6_CONTEXT_STATUS_BASE_IDX 0
+#define mmSDMA0_RLC6_DOORBELL 0x0352
+#define mmSDMA0_RLC6_DOORBELL_BASE_IDX 0
+#define mmSDMA0_RLC6_STATUS 0x0368
+#define mmSDMA0_RLC6_STATUS_BASE_IDX 0
+#define mmSDMA0_RLC6_DOORBELL_LOG 0x0369
+#define mmSDMA0_RLC6_DOORBELL_LOG_BASE_IDX 0
+#define mmSDMA0_RLC6_WATERMARK 0x036a
+#define mmSDMA0_RLC6_WATERMARK_BASE_IDX 0
+#define mmSDMA0_RLC6_DOORBELL_OFFSET 0x036b
+#define mmSDMA0_RLC6_DOORBELL_OFFSET_BASE_IDX 0
+#define mmSDMA0_RLC6_CSA_ADDR_LO 0x036c
+#define mmSDMA0_RLC6_CSA_ADDR_LO_BASE_IDX 0
+#define mmSDMA0_RLC6_CSA_ADDR_HI 0x036d
+#define mmSDMA0_RLC6_CSA_ADDR_HI_BASE_IDX 0
+#define mmSDMA0_RLC6_IB_SUB_REMAIN 0x036f
+#define mmSDMA0_RLC6_IB_SUB_REMAIN_BASE_IDX 0
+#define mmSDMA0_RLC6_PREEMPT 0x0370
+#define mmSDMA0_RLC6_PREEMPT_BASE_IDX 0
+#define mmSDMA0_RLC6_DUMMY_REG 0x0371
+#define mmSDMA0_RLC6_DUMMY_REG_BASE_IDX 0
+#define mmSDMA0_RLC6_RB_WPTR_POLL_ADDR_HI 0x0372
+#define mmSDMA0_RLC6_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define mmSDMA0_RLC6_RB_WPTR_POLL_ADDR_LO 0x0373
+#define mmSDMA0_RLC6_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define mmSDMA0_RLC6_RB_AQL_CNTL 0x0374
+#define mmSDMA0_RLC6_RB_AQL_CNTL_BASE_IDX 0
+#define mmSDMA0_RLC6_MINOR_PTR_UPDATE 0x0375
+#define mmSDMA0_RLC6_MINOR_PTR_UPDATE_BASE_IDX 0
+#define mmSDMA0_RLC6_MIDCMD_DATA0 0x0380
+#define mmSDMA0_RLC6_MIDCMD_DATA0_BASE_IDX 0
+#define mmSDMA0_RLC6_MIDCMD_DATA1 0x0381
+#define mmSDMA0_RLC6_MIDCMD_DATA1_BASE_IDX 0
+#define mmSDMA0_RLC6_MIDCMD_DATA2 0x0382
+#define mmSDMA0_RLC6_MIDCMD_DATA2_BASE_IDX 0
+#define mmSDMA0_RLC6_MIDCMD_DATA3 0x0383
+#define mmSDMA0_RLC6_MIDCMD_DATA3_BASE_IDX 0
+#define mmSDMA0_RLC6_MIDCMD_DATA4 0x0384
+#define mmSDMA0_RLC6_MIDCMD_DATA4_BASE_IDX 0
+#define mmSDMA0_RLC6_MIDCMD_DATA5 0x0385
+#define mmSDMA0_RLC6_MIDCMD_DATA5_BASE_IDX 0
+#define mmSDMA0_RLC6_MIDCMD_DATA6 0x0386
+#define mmSDMA0_RLC6_MIDCMD_DATA6_BASE_IDX 0
+#define mmSDMA0_RLC6_MIDCMD_DATA7 0x0387
+#define mmSDMA0_RLC6_MIDCMD_DATA7_BASE_IDX 0
+#define mmSDMA0_RLC6_MIDCMD_DATA8 0x0388
+#define mmSDMA0_RLC6_MIDCMD_DATA8_BASE_IDX 0
+#define mmSDMA0_RLC6_MIDCMD_CNTL 0x0389
+#define mmSDMA0_RLC6_MIDCMD_CNTL_BASE_IDX 0
+#define mmSDMA0_RLC7_RB_CNTL 0x0398
+#define mmSDMA0_RLC7_RB_CNTL_BASE_IDX 0
+#define mmSDMA0_RLC7_RB_BASE 0x0399
+#define mmSDMA0_RLC7_RB_BASE_BASE_IDX 0
+#define mmSDMA0_RLC7_RB_BASE_HI 0x039a
+#define mmSDMA0_RLC7_RB_BASE_HI_BASE_IDX 0
+#define mmSDMA0_RLC7_RB_RPTR 0x039b
+#define mmSDMA0_RLC7_RB_RPTR_BASE_IDX 0
+#define mmSDMA0_RLC7_RB_RPTR_HI 0x039c
+#define mmSDMA0_RLC7_RB_RPTR_HI_BASE_IDX 0
+#define mmSDMA0_RLC7_RB_WPTR 0x039d
+#define mmSDMA0_RLC7_RB_WPTR_BASE_IDX 0
+#define mmSDMA0_RLC7_RB_WPTR_HI 0x039e
+#define mmSDMA0_RLC7_RB_WPTR_HI_BASE_IDX 0
+#define mmSDMA0_RLC7_RB_WPTR_POLL_CNTL 0x039f
+#define mmSDMA0_RLC7_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define mmSDMA0_RLC7_RB_RPTR_ADDR_HI 0x03a0
+#define mmSDMA0_RLC7_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define mmSDMA0_RLC7_RB_RPTR_ADDR_LO 0x03a1
+#define mmSDMA0_RLC7_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define mmSDMA0_RLC7_IB_CNTL 0x03a2
+#define mmSDMA0_RLC7_IB_CNTL_BASE_IDX 0
+#define mmSDMA0_RLC7_IB_RPTR 0x03a3
+#define mmSDMA0_RLC7_IB_RPTR_BASE_IDX 0
+#define mmSDMA0_RLC7_IB_OFFSET 0x03a4
+#define mmSDMA0_RLC7_IB_OFFSET_BASE_IDX 0
+#define mmSDMA0_RLC7_IB_BASE_LO 0x03a5
+#define mmSDMA0_RLC7_IB_BASE_LO_BASE_IDX 0
+#define mmSDMA0_RLC7_IB_BASE_HI 0x03a6
+#define mmSDMA0_RLC7_IB_BASE_HI_BASE_IDX 0
+#define mmSDMA0_RLC7_IB_SIZE 0x03a7
+#define mmSDMA0_RLC7_IB_SIZE_BASE_IDX 0
+#define mmSDMA0_RLC7_SKIP_CNTL 0x03a8
+#define mmSDMA0_RLC7_SKIP_CNTL_BASE_IDX 0
+#define mmSDMA0_RLC7_CONTEXT_STATUS 0x03a9
+#define mmSDMA0_RLC7_CONTEXT_STATUS_BASE_IDX 0
+#define mmSDMA0_RLC7_DOORBELL 0x03aa
+#define mmSDMA0_RLC7_DOORBELL_BASE_IDX 0
+#define mmSDMA0_RLC7_STATUS 0x03c0
+#define mmSDMA0_RLC7_STATUS_BASE_IDX 0
+#define mmSDMA0_RLC7_DOORBELL_LOG 0x03c1
+#define mmSDMA0_RLC7_DOORBELL_LOG_BASE_IDX 0
+#define mmSDMA0_RLC7_WATERMARK 0x03c2
+#define mmSDMA0_RLC7_WATERMARK_BASE_IDX 0
+#define mmSDMA0_RLC7_DOORBELL_OFFSET 0x03c3
+#define mmSDMA0_RLC7_DOORBELL_OFFSET_BASE_IDX 0
+#define mmSDMA0_RLC7_CSA_ADDR_LO 0x03c4
+#define mmSDMA0_RLC7_CSA_ADDR_LO_BASE_IDX 0
+#define mmSDMA0_RLC7_CSA_ADDR_HI 0x03c5
+#define mmSDMA0_RLC7_CSA_ADDR_HI_BASE_IDX 0
+#define mmSDMA0_RLC7_IB_SUB_REMAIN 0x03c7
+#define mmSDMA0_RLC7_IB_SUB_REMAIN_BASE_IDX 0
+#define mmSDMA0_RLC7_PREEMPT 0x03c8
+#define mmSDMA0_RLC7_PREEMPT_BASE_IDX 0
+#define mmSDMA0_RLC7_DUMMY_REG 0x03c9
+#define mmSDMA0_RLC7_DUMMY_REG_BASE_IDX 0
+#define mmSDMA0_RLC7_RB_WPTR_POLL_ADDR_HI 0x03ca
+#define mmSDMA0_RLC7_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define mmSDMA0_RLC7_RB_WPTR_POLL_ADDR_LO 0x03cb
+#define mmSDMA0_RLC7_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define mmSDMA0_RLC7_RB_AQL_CNTL 0x03cc
+#define mmSDMA0_RLC7_RB_AQL_CNTL_BASE_IDX 0
+#define mmSDMA0_RLC7_MINOR_PTR_UPDATE 0x03cd
+#define mmSDMA0_RLC7_MINOR_PTR_UPDATE_BASE_IDX 0
+#define mmSDMA0_RLC7_MIDCMD_DATA0 0x03d8
+#define mmSDMA0_RLC7_MIDCMD_DATA0_BASE_IDX 0
+#define mmSDMA0_RLC7_MIDCMD_DATA1 0x03d9
+#define mmSDMA0_RLC7_MIDCMD_DATA1_BASE_IDX 0
+#define mmSDMA0_RLC7_MIDCMD_DATA2 0x03da
+#define mmSDMA0_RLC7_MIDCMD_DATA2_BASE_IDX 0
+#define mmSDMA0_RLC7_MIDCMD_DATA3 0x03db
+#define mmSDMA0_RLC7_MIDCMD_DATA3_BASE_IDX 0
+#define mmSDMA0_RLC7_MIDCMD_DATA4 0x03dc
+#define mmSDMA0_RLC7_MIDCMD_DATA4_BASE_IDX 0
+#define mmSDMA0_RLC7_MIDCMD_DATA5 0x03dd
+#define mmSDMA0_RLC7_MIDCMD_DATA5_BASE_IDX 0
+#define mmSDMA0_RLC7_MIDCMD_DATA6 0x03de
+#define mmSDMA0_RLC7_MIDCMD_DATA6_BASE_IDX 0
+#define mmSDMA0_RLC7_MIDCMD_DATA7 0x03df
+#define mmSDMA0_RLC7_MIDCMD_DATA7_BASE_IDX 0
+#define mmSDMA0_RLC7_MIDCMD_DATA8 0x03e0
+#define mmSDMA0_RLC7_MIDCMD_DATA8_BASE_IDX 0
+#define mmSDMA0_RLC7_MIDCMD_CNTL 0x03e1
+#define mmSDMA0_RLC7_MIDCMD_CNTL_BASE_IDX 0
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_2_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_2_2_sh_mask.h
new file mode 100644
index 000000000000..9feb67b09b63
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_2_2_sh_mask.h
@@ -0,0 +1,3002 @@
+/*
+ * Copyright (C) 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _sdma0_4_2_2_SH_MASK_HEADER
+#define _sdma0_4_2_2_SH_MASK_HEADER
+
+
+// addressBlock: sdma0_sdma0dec
+//SDMA0_UCODE_ADDR
+#define SDMA0_UCODE_ADDR__VALUE__SHIFT 0x0
+#define SDMA0_UCODE_ADDR__VALUE_MASK 0x00001FFFL
+//SDMA0_UCODE_DATA
+#define SDMA0_UCODE_DATA__VALUE__SHIFT 0x0
+#define SDMA0_UCODE_DATA__VALUE_MASK 0xFFFFFFFFL
+//SDMA0_VM_CNTL
+#define SDMA0_VM_CNTL__CMD__SHIFT 0x0
+#define SDMA0_VM_CNTL__CMD_MASK 0x0000000FL
+//SDMA0_VM_CTX_LO
+#define SDMA0_VM_CTX_LO__ADDR__SHIFT 0x2
+#define SDMA0_VM_CTX_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_VM_CTX_HI
+#define SDMA0_VM_CTX_HI__ADDR__SHIFT 0x0
+#define SDMA0_VM_CTX_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_ACTIVE_FCN_ID
+#define SDMA0_ACTIVE_FCN_ID__VFID__SHIFT 0x0
+#define SDMA0_ACTIVE_FCN_ID__RESERVED__SHIFT 0x4
+#define SDMA0_ACTIVE_FCN_ID__VF__SHIFT 0x1f
+#define SDMA0_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL
+#define SDMA0_ACTIVE_FCN_ID__RESERVED_MASK 0x7FFFFFF0L
+#define SDMA0_ACTIVE_FCN_ID__VF_MASK 0x80000000L
+//SDMA0_VM_CTX_CNTL
+#define SDMA0_VM_CTX_CNTL__PRIV__SHIFT 0x0
+#define SDMA0_VM_CTX_CNTL__VMID__SHIFT 0x4
+#define SDMA0_VM_CTX_CNTL__PRIV_MASK 0x00000001L
+#define SDMA0_VM_CTX_CNTL__VMID_MASK 0x000000F0L
+//SDMA0_VIRT_RESET_REQ
+#define SDMA0_VIRT_RESET_REQ__VF__SHIFT 0x0
+#define SDMA0_VIRT_RESET_REQ__PF__SHIFT 0x1f
+#define SDMA0_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL
+#define SDMA0_VIRT_RESET_REQ__PF_MASK 0x80000000L
+//SDMA0_VF_ENABLE
+#define SDMA0_VF_ENABLE__VF_ENABLE__SHIFT 0x0
+#define SDMA0_VF_ENABLE__VF_ENABLE_MASK 0x00000001L
+//SDMA0_CONTEXT_REG_TYPE0
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_CNTL__SHIFT 0x0
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_BASE__SHIFT 0x1
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_BASE_HI__SHIFT 0x2
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR__SHIFT 0x3
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR_HI__SHIFT 0x4
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_WPTR__SHIFT 0x5
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_WPTR_HI__SHIFT 0x6
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_WPTR_POLL_CNTL__SHIFT 0x7
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR_ADDR_HI__SHIFT 0x8
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR_ADDR_LO__SHIFT 0x9
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_CNTL__SHIFT 0xa
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_RPTR__SHIFT 0xb
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_OFFSET__SHIFT 0xc
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_BASE_LO__SHIFT 0xd
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_BASE_HI__SHIFT 0xe
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_SIZE__SHIFT 0xf
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_SKIP_CNTL__SHIFT 0x10
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_CONTEXT_STATUS__SHIFT 0x11
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_DOORBELL__SHIFT 0x12
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_CONTEXT_CNTL__SHIFT 0x13
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_CNTL_MASK 0x00000001L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_BASE_MASK 0x00000002L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_BASE_HI_MASK 0x00000004L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR_MASK 0x00000008L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR_HI_MASK 0x00000010L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_WPTR_MASK 0x00000020L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_WPTR_HI_MASK 0x00000040L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_WPTR_POLL_CNTL_MASK 0x00000080L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR_ADDR_HI_MASK 0x00000100L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR_ADDR_LO_MASK 0x00000200L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_CNTL_MASK 0x00000400L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_RPTR_MASK 0x00000800L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_OFFSET_MASK 0x00001000L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_BASE_LO_MASK 0x00002000L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_BASE_HI_MASK 0x00004000L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_SIZE_MASK 0x00008000L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_SKIP_CNTL_MASK 0x00010000L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_CONTEXT_STATUS_MASK 0x00020000L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_DOORBELL_MASK 0x00040000L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_CONTEXT_CNTL_MASK 0x00080000L
+//SDMA0_CONTEXT_REG_TYPE1
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_STATUS__SHIFT 0x8
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_DOORBELL_LOG__SHIFT 0x9
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_WATERMARK__SHIFT 0xa
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_DOORBELL_OFFSET__SHIFT 0xb
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_CSA_ADDR_LO__SHIFT 0xc
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_CSA_ADDR_HI__SHIFT 0xd
+#define SDMA0_CONTEXT_REG_TYPE1__VOID_REG2__SHIFT 0xe
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_IB_SUB_REMAIN__SHIFT 0xf
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_PREEMPT__SHIFT 0x10
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_DUMMY_REG__SHIFT 0x11
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_RB_WPTR_POLL_ADDR_HI__SHIFT 0x12
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_RB_WPTR_POLL_ADDR_LO__SHIFT 0x13
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_RB_AQL_CNTL__SHIFT 0x14
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_MINOR_PTR_UPDATE__SHIFT 0x15
+#define SDMA0_CONTEXT_REG_TYPE1__RESERVED__SHIFT 0x16
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_STATUS_MASK 0x00000100L
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_DOORBELL_LOG_MASK 0x00000200L
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_WATERMARK_MASK 0x00000400L
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_DOORBELL_OFFSET_MASK 0x00000800L
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_CSA_ADDR_LO_MASK 0x00001000L
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_CSA_ADDR_HI_MASK 0x00002000L
+#define SDMA0_CONTEXT_REG_TYPE1__VOID_REG2_MASK 0x00004000L
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_IB_SUB_REMAIN_MASK 0x00008000L
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_PREEMPT_MASK 0x00010000L
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_DUMMY_REG_MASK 0x00020000L
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_RB_WPTR_POLL_ADDR_HI_MASK 0x00040000L
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_RB_WPTR_POLL_ADDR_LO_MASK 0x00080000L
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_RB_AQL_CNTL_MASK 0x00100000L
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_MINOR_PTR_UPDATE_MASK 0x00200000L
+#define SDMA0_CONTEXT_REG_TYPE1__RESERVED_MASK 0xFFC00000L
+//SDMA0_CONTEXT_REG_TYPE2
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA0__SHIFT 0x0
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA1__SHIFT 0x1
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA2__SHIFT 0x2
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA3__SHIFT 0x3
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA4__SHIFT 0x4
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA5__SHIFT 0x5
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA6__SHIFT 0x6
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA7__SHIFT 0x7
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA8__SHIFT 0x8
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_CNTL__SHIFT 0x9
+#define SDMA0_CONTEXT_REG_TYPE2__RESERVED__SHIFT 0xa
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA0_MASK 0x00000001L
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA1_MASK 0x00000002L
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA2_MASK 0x00000004L
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA3_MASK 0x00000008L
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA4_MASK 0x00000010L
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA5_MASK 0x00000020L
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA6_MASK 0x00000040L
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA7_MASK 0x00000080L
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA8_MASK 0x00000100L
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_CNTL_MASK 0x00000200L
+#define SDMA0_CONTEXT_REG_TYPE2__RESERVED_MASK 0xFFFFFC00L
+//SDMA0_CONTEXT_REG_TYPE3
+#define SDMA0_CONTEXT_REG_TYPE3__RESERVED__SHIFT 0x0
+#define SDMA0_CONTEXT_REG_TYPE3__RESERVED_MASK 0xFFFFFFFFL
+//SDMA0_PUB_REG_TYPE0
+#define SDMA0_PUB_REG_TYPE0__SDMA0_UCODE_ADDR__SHIFT 0x0
+#define SDMA0_PUB_REG_TYPE0__SDMA0_UCODE_DATA__SHIFT 0x1
+#define SDMA0_PUB_REG_TYPE0__RESERVED3__SHIFT 0x3
+#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CNTL__SHIFT 0x4
+#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CTX_LO__SHIFT 0x5
+#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CTX_HI__SHIFT 0x6
+#define SDMA0_PUB_REG_TYPE0__SDMA0_ACTIVE_FCN_ID__SHIFT 0x7
+#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CTX_CNTL__SHIFT 0x8
+#define SDMA0_PUB_REG_TYPE0__SDMA0_VIRT_RESET_REQ__SHIFT 0x9
+#define SDMA0_PUB_REG_TYPE0__RESERVED10__SHIFT 0xa
+#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE0__SHIFT 0xb
+#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE1__SHIFT 0xc
+#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE2__SHIFT 0xd
+#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE3__SHIFT 0xe
+#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE0__SHIFT 0xf
+#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE1__SHIFT 0x10
+#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE2__SHIFT 0x11
+#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE3__SHIFT 0x12
+#define SDMA0_PUB_REG_TYPE0__SDMA0_MMHUB_CNTL__SHIFT 0x13
+#define SDMA0_PUB_REG_TYPE0__RESERVED_FOR_PSPSMU_ACCESS_ONLY__SHIFT 0x15
+#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_GROUP_BOUNDARY__SHIFT 0x19
+#define SDMA0_PUB_REG_TYPE0__SDMA0_POWER_CNTL__SHIFT 0x1a
+#define SDMA0_PUB_REG_TYPE0__SDMA0_CLK_CTRL__SHIFT 0x1b
+#define SDMA0_PUB_REG_TYPE0__SDMA0_CNTL__SHIFT 0x1c
+#define SDMA0_PUB_REG_TYPE0__SDMA0_CHICKEN_BITS__SHIFT 0x1d
+#define SDMA0_PUB_REG_TYPE0__SDMA0_GB_ADDR_CONFIG__SHIFT 0x1e
+#define SDMA0_PUB_REG_TYPE0__SDMA0_GB_ADDR_CONFIG_READ__SHIFT 0x1f
+#define SDMA0_PUB_REG_TYPE0__SDMA0_UCODE_ADDR_MASK 0x00000001L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_UCODE_DATA_MASK 0x00000002L
+#define SDMA0_PUB_REG_TYPE0__RESERVED3_MASK 0x00000008L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CNTL_MASK 0x00000010L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CTX_LO_MASK 0x00000020L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CTX_HI_MASK 0x00000040L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_ACTIVE_FCN_ID_MASK 0x00000080L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CTX_CNTL_MASK 0x00000100L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_VIRT_RESET_REQ_MASK 0x00000200L
+#define SDMA0_PUB_REG_TYPE0__RESERVED10_MASK 0x00000400L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE0_MASK 0x00000800L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE1_MASK 0x00001000L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE2_MASK 0x00002000L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE3_MASK 0x00004000L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE0_MASK 0x00008000L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE1_MASK 0x00010000L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE2_MASK 0x00020000L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE3_MASK 0x00040000L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_MMHUB_CNTL_MASK 0x00080000L
+#define SDMA0_PUB_REG_TYPE0__RESERVED_FOR_PSPSMU_ACCESS_ONLY_MASK 0x01E00000L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_GROUP_BOUNDARY_MASK 0x02000000L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_POWER_CNTL_MASK 0x04000000L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_CLK_CTRL_MASK 0x08000000L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_CNTL_MASK 0x10000000L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_CHICKEN_BITS_MASK 0x20000000L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_GB_ADDR_CONFIG_MASK 0x40000000L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_GB_ADDR_CONFIG_READ_MASK 0x80000000L
+//SDMA0_PUB_REG_TYPE1
+#define SDMA0_PUB_REG_TYPE1__SDMA0_RB_RPTR_FETCH_HI__SHIFT 0x0
+#define SDMA0_PUB_REG_TYPE1__SDMA0_SEM_WAIT_FAIL_TIMER_CNTL__SHIFT 0x1
+#define SDMA0_PUB_REG_TYPE1__SDMA0_RB_RPTR_FETCH__SHIFT 0x2
+#define SDMA0_PUB_REG_TYPE1__SDMA0_IB_OFFSET_FETCH__SHIFT 0x3
+#define SDMA0_PUB_REG_TYPE1__SDMA0_PROGRAM__SHIFT 0x4
+#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS_REG__SHIFT 0x5
+#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS1_REG__SHIFT 0x6
+#define SDMA0_PUB_REG_TYPE1__SDMA0_RD_BURST_CNTL__SHIFT 0x7
+#define SDMA0_PUB_REG_TYPE1__SDMA0_HBM_PAGE_CONFIG__SHIFT 0x8
+#define SDMA0_PUB_REG_TYPE1__SDMA0_UCODE_CHECKSUM__SHIFT 0x9
+#define SDMA0_PUB_REG_TYPE1__SDMA0_F32_CNTL__SHIFT 0xa
+#define SDMA0_PUB_REG_TYPE1__SDMA0_FREEZE__SHIFT 0xb
+#define SDMA0_PUB_REG_TYPE1__SDMA0_PHASE0_QUANTUM__SHIFT 0xc
+#define SDMA0_PUB_REG_TYPE1__SDMA0_PHASE1_QUANTUM__SHIFT 0xd
+#define SDMA0_PUB_REG_TYPE1__SDMA_POWER_GATING__SHIFT 0xe
+#define SDMA0_PUB_REG_TYPE1__SDMA_PGFSM_CONFIG__SHIFT 0xf
+#define SDMA0_PUB_REG_TYPE1__SDMA_PGFSM_WRITE__SHIFT 0x10
+#define SDMA0_PUB_REG_TYPE1__SDMA_PGFSM_READ__SHIFT 0x11
+#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_CONFIG__SHIFT 0x12
+#define SDMA0_PUB_REG_TYPE1__SDMA0_BA_THRESHOLD__SHIFT 0x13
+#define SDMA0_PUB_REG_TYPE1__SDMA0_ID__SHIFT 0x14
+#define SDMA0_PUB_REG_TYPE1__SDMA0_VERSION__SHIFT 0x15
+#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_COUNTER__SHIFT 0x16
+#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_COUNTER_CLEAR__SHIFT 0x17
+#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS2_REG__SHIFT 0x18
+#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_CNTL__SHIFT 0x19
+#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_PREOP_LO__SHIFT 0x1a
+#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_PREOP_HI__SHIFT 0x1b
+#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_CNTL__SHIFT 0x1c
+#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_WATERMK__SHIFT 0x1d
+#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_RD_STATUS__SHIFT 0x1e
+#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_WR_STATUS__SHIFT 0x1f
+#define SDMA0_PUB_REG_TYPE1__SDMA0_RB_RPTR_FETCH_HI_MASK 0x00000001L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_SEM_WAIT_FAIL_TIMER_CNTL_MASK 0x00000002L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_RB_RPTR_FETCH_MASK 0x00000004L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_IB_OFFSET_FETCH_MASK 0x00000008L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_PROGRAM_MASK 0x00000010L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS_REG_MASK 0x00000020L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS1_REG_MASK 0x00000040L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_RD_BURST_CNTL_MASK 0x00000080L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_HBM_PAGE_CONFIG_MASK 0x00000100L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_UCODE_CHECKSUM_MASK 0x00000200L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_F32_CNTL_MASK 0x00000400L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_FREEZE_MASK 0x00000800L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_PHASE0_QUANTUM_MASK 0x00001000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_PHASE1_QUANTUM_MASK 0x00002000L
+#define SDMA0_PUB_REG_TYPE1__SDMA_POWER_GATING_MASK 0x00004000L
+#define SDMA0_PUB_REG_TYPE1__SDMA_PGFSM_CONFIG_MASK 0x00008000L
+#define SDMA0_PUB_REG_TYPE1__SDMA_PGFSM_WRITE_MASK 0x00010000L
+#define SDMA0_PUB_REG_TYPE1__SDMA_PGFSM_READ_MASK 0x00020000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_CONFIG_MASK 0x00040000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_BA_THRESHOLD_MASK 0x00080000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_ID_MASK 0x00100000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_VERSION_MASK 0x00200000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_COUNTER_MASK 0x00400000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_COUNTER_CLEAR_MASK 0x00800000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS2_REG_MASK 0x01000000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_CNTL_MASK 0x02000000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_PREOP_LO_MASK 0x04000000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_PREOP_HI_MASK 0x08000000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_CNTL_MASK 0x10000000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_WATERMK_MASK 0x20000000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_RD_STATUS_MASK 0x40000000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_WR_STATUS_MASK 0x80000000L
+//SDMA0_PUB_REG_TYPE2
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV0__SHIFT 0x0
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV1__SHIFT 0x1
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV2__SHIFT 0x2
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_RD_XNACK0__SHIFT 0x3
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_RD_XNACK1__SHIFT 0x4
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_WR_XNACK0__SHIFT 0x5
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_WR_XNACK1__SHIFT 0x6
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_TIMEOUT__SHIFT 0x7
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_PAGE__SHIFT 0x8
+#define SDMA0_PUB_REG_TYPE2__SDMA0_POWER_CNTL_IDLE__SHIFT 0x9
+#define SDMA0_PUB_REG_TYPE2__SDMA0_RELAX_ORDERING_LUT__SHIFT 0xa
+#define SDMA0_PUB_REG_TYPE2__SDMA0_CHICKEN_BITS_2__SHIFT 0xb
+#define SDMA0_PUB_REG_TYPE2__SDMA0_STATUS3_REG__SHIFT 0xc
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PHYSICAL_ADDR_LO__SHIFT 0xd
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PHYSICAL_ADDR_HI__SHIFT 0xe
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PHASE2_QUANTUM__SHIFT 0xf
+#define SDMA0_PUB_REG_TYPE2__SDMA0_ERROR_LOG__SHIFT 0x10
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG0__SHIFT 0x11
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG1__SHIFT 0x12
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG2__SHIFT 0x13
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG3__SHIFT 0x14
+#define SDMA0_PUB_REG_TYPE2__SDMA0_F32_COUNTER__SHIFT 0x15
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UNBREAKABLE__SHIFT 0x16
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFMON_CNTL__SHIFT 0x17
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFCOUNTER0_RESULT__SHIFT 0x18
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFCOUNTER1_RESULT__SHIFT 0x19
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFCOUNTER_TAG_DELAY_RANGE__SHIFT 0x1a
+#define SDMA0_PUB_REG_TYPE2__SDMA0_CRD_CNTL__SHIFT 0x1b
+#define SDMA0_PUB_REG_TYPE2__RESERVED28__SHIFT 0x1c
+#define SDMA0_PUB_REG_TYPE2__SDMA0_GPU_IOV_VIOLATION_LOG__SHIFT 0x1d
+#define SDMA0_PUB_REG_TYPE2__SDMA0_ULV_CNTL__SHIFT 0x1e
+#define SDMA0_PUB_REG_TYPE2__RESERVED__SHIFT 0x1f
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV0_MASK 0x00000001L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV1_MASK 0x00000002L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV2_MASK 0x00000004L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_RD_XNACK0_MASK 0x00000008L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_RD_XNACK1_MASK 0x00000010L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_WR_XNACK0_MASK 0x00000020L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_WR_XNACK1_MASK 0x00000040L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_TIMEOUT_MASK 0x00000080L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_PAGE_MASK 0x00000100L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_POWER_CNTL_IDLE_MASK 0x00000200L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_RELAX_ORDERING_LUT_MASK 0x00000400L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_CHICKEN_BITS_2_MASK 0x00000800L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_STATUS3_REG_MASK 0x00001000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PHYSICAL_ADDR_LO_MASK 0x00002000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PHYSICAL_ADDR_HI_MASK 0x00004000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PHASE2_QUANTUM_MASK 0x00008000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_ERROR_LOG_MASK 0x00010000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG0_MASK 0x00020000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG1_MASK 0x00040000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG2_MASK 0x00080000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG3_MASK 0x00100000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_F32_COUNTER_MASK 0x00200000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UNBREAKABLE_MASK 0x00400000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFMON_CNTL_MASK 0x00800000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFCOUNTER0_RESULT_MASK 0x01000000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFCOUNTER1_RESULT_MASK 0x02000000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFCOUNTER_TAG_DELAY_RANGE_MASK 0x04000000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_CRD_CNTL_MASK 0x08000000L
+#define SDMA0_PUB_REG_TYPE2__RESERVED28_MASK 0x10000000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_GPU_IOV_VIOLATION_LOG_MASK 0x20000000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_ULV_CNTL_MASK 0x40000000L
+#define SDMA0_PUB_REG_TYPE2__RESERVED_MASK 0x80000000L
+//SDMA0_PUB_REG_TYPE3
+#define SDMA0_PUB_REG_TYPE3__SDMA0_EA_DBIT_ADDR_DATA__SHIFT 0x0
+#define SDMA0_PUB_REG_TYPE3__SDMA0_EA_DBIT_ADDR_INDEX__SHIFT 0x1
+#define SDMA0_PUB_REG_TYPE3__SDMA0_GPU_IOV_VIOLATION_LOG2__SHIFT 0x2
+#define SDMA0_PUB_REG_TYPE3__RESERVED__SHIFT 0x3
+#define SDMA0_PUB_REG_TYPE3__SDMA0_EA_DBIT_ADDR_DATA_MASK 0x00000001L
+#define SDMA0_PUB_REG_TYPE3__SDMA0_EA_DBIT_ADDR_INDEX_MASK 0x00000002L
+#define SDMA0_PUB_REG_TYPE3__SDMA0_GPU_IOV_VIOLATION_LOG2_MASK 0x00000004L
+#define SDMA0_PUB_REG_TYPE3__RESERVED_MASK 0xFFFFFFF8L
+//SDMA0_MMHUB_CNTL
+#define SDMA0_MMHUB_CNTL__UNIT_ID__SHIFT 0x0
+#define SDMA0_MMHUB_CNTL__UNIT_ID_MASK 0x0000003FL
+//SDMA0_CONTEXT_GROUP_BOUNDARY
+#define SDMA0_CONTEXT_GROUP_BOUNDARY__RESERVED__SHIFT 0x0
+#define SDMA0_CONTEXT_GROUP_BOUNDARY__RESERVED_MASK 0xFFFFFFFFL
+//SDMA0_POWER_CNTL
+#define SDMA0_POWER_CNTL__PG_CNTL_ENABLE__SHIFT 0x0
+#define SDMA0_POWER_CNTL__EXT_PG_POWER_ON_REQ__SHIFT 0x1
+#define SDMA0_POWER_CNTL__EXT_PG_POWER_OFF_REQ__SHIFT 0x2
+#define SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME__SHIFT 0x3
+#define SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE__SHIFT 0x8
+#define SDMA0_POWER_CNTL__MEM_POWER_LS_EN__SHIFT 0x9
+#define SDMA0_POWER_CNTL__MEM_POWER_DS_EN__SHIFT 0xa
+#define SDMA0_POWER_CNTL__MEM_POWER_SD_EN__SHIFT 0xb
+#define SDMA0_POWER_CNTL__MEM_POWER_DELAY__SHIFT 0xc
+#define SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME__SHIFT 0x1a
+#define SDMA0_POWER_CNTL__PG_CNTL_ENABLE_MASK 0x00000001L
+#define SDMA0_POWER_CNTL__EXT_PG_POWER_ON_REQ_MASK 0x00000002L
+#define SDMA0_POWER_CNTL__EXT_PG_POWER_OFF_REQ_MASK 0x00000004L
+#define SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK 0x000000F8L
+#define SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK 0x00000100L
+#define SDMA0_POWER_CNTL__MEM_POWER_LS_EN_MASK 0x00000200L
+#define SDMA0_POWER_CNTL__MEM_POWER_DS_EN_MASK 0x00000400L
+#define SDMA0_POWER_CNTL__MEM_POWER_SD_EN_MASK 0x00000800L
+#define SDMA0_POWER_CNTL__MEM_POWER_DELAY_MASK 0x003FF000L
+#define SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK 0xFC000000L
+//SDMA0_CLK_CTRL
+#define SDMA0_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define SDMA0_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define SDMA0_CLK_CTRL__RESERVED__SHIFT 0xc
+#define SDMA0_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define SDMA0_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define SDMA0_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define SDMA0_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define SDMA0_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define SDMA0_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define SDMA0_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define SDMA0_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define SDMA0_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define SDMA0_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define SDMA0_CLK_CTRL__RESERVED_MASK 0x00FFF000L
+#define SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//SDMA0_CNTL
+#define SDMA0_CNTL__TRAP_ENABLE__SHIFT 0x0
+#define SDMA0_CNTL__UTC_L1_ENABLE__SHIFT 0x1
+#define SDMA0_CNTL__SEM_WAIT_INT_ENABLE__SHIFT 0x2
+#define SDMA0_CNTL__DATA_SWAP_ENABLE__SHIFT 0x3
+#define SDMA0_CNTL__FENCE_SWAP_ENABLE__SHIFT 0x4
+#define SDMA0_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x5
+#define SDMA0_CNTL__MIDCMD_WORLDSWITCH_ENABLE__SHIFT 0x11
+#define SDMA0_CNTL__AUTO_CTXSW_ENABLE__SHIFT 0x12
+#define SDMA0_CNTL__CTXEMPTY_INT_ENABLE__SHIFT 0x1c
+#define SDMA0_CNTL__FROZEN_INT_ENABLE__SHIFT 0x1d
+#define SDMA0_CNTL__IB_PREEMPT_INT_ENABLE__SHIFT 0x1e
+#define SDMA0_CNTL__TRAP_ENABLE_MASK 0x00000001L
+#define SDMA0_CNTL__UTC_L1_ENABLE_MASK 0x00000002L
+#define SDMA0_CNTL__SEM_WAIT_INT_ENABLE_MASK 0x00000004L
+#define SDMA0_CNTL__DATA_SWAP_ENABLE_MASK 0x00000008L
+#define SDMA0_CNTL__FENCE_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA0_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00000020L
+#define SDMA0_CNTL__MIDCMD_WORLDSWITCH_ENABLE_MASK 0x00020000L
+#define SDMA0_CNTL__AUTO_CTXSW_ENABLE_MASK 0x00040000L
+#define SDMA0_CNTL__CTXEMPTY_INT_ENABLE_MASK 0x10000000L
+#define SDMA0_CNTL__FROZEN_INT_ENABLE_MASK 0x20000000L
+#define SDMA0_CNTL__IB_PREEMPT_INT_ENABLE_MASK 0x40000000L
+//SDMA0_CHICKEN_BITS
+#define SDMA0_CHICKEN_BITS__COPY_EFFICIENCY_ENABLE__SHIFT 0x0
+#define SDMA0_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE__SHIFT 0x1
+#define SDMA0_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE__SHIFT 0x2
+#define SDMA0_CHICKEN_BITS__WRITE_BURST_LENGTH__SHIFT 0x8
+#define SDMA0_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE__SHIFT 0xa
+#define SDMA0_CHICKEN_BITS__COPY_OVERLAP_ENABLE__SHIFT 0x10
+#define SDMA0_CHICKEN_BITS__RAW_CHECK_ENABLE__SHIFT 0x11
+#define SDMA0_CHICKEN_BITS__SRBM_POLL_RETRYING__SHIFT 0x14
+#define SDMA0_CHICKEN_BITS__CG_STATUS_OUTPUT__SHIFT 0x17
+#define SDMA0_CHICKEN_BITS__TIME_BASED_QOS__SHIFT 0x19
+#define SDMA0_CHICKEN_BITS__CE_AFIFO_WATERMARK__SHIFT 0x1a
+#define SDMA0_CHICKEN_BITS__CE_DFIFO_WATERMARK__SHIFT 0x1c
+#define SDMA0_CHICKEN_BITS__CE_LFIFO_WATERMARK__SHIFT 0x1e
+#define SDMA0_CHICKEN_BITS__COPY_EFFICIENCY_ENABLE_MASK 0x00000001L
+#define SDMA0_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE_MASK 0x00000002L
+#define SDMA0_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE_MASK 0x00000004L
+#define SDMA0_CHICKEN_BITS__WRITE_BURST_LENGTH_MASK 0x00000300L
+#define SDMA0_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE_MASK 0x00001C00L
+#define SDMA0_CHICKEN_BITS__COPY_OVERLAP_ENABLE_MASK 0x00010000L
+#define SDMA0_CHICKEN_BITS__RAW_CHECK_ENABLE_MASK 0x00020000L
+#define SDMA0_CHICKEN_BITS__SRBM_POLL_RETRYING_MASK 0x00100000L
+#define SDMA0_CHICKEN_BITS__CG_STATUS_OUTPUT_MASK 0x00800000L
+#define SDMA0_CHICKEN_BITS__TIME_BASED_QOS_MASK 0x02000000L
+#define SDMA0_CHICKEN_BITS__CE_AFIFO_WATERMARK_MASK 0x0C000000L
+#define SDMA0_CHICKEN_BITS__CE_DFIFO_WATERMARK_MASK 0x30000000L
+#define SDMA0_CHICKEN_BITS__CE_LFIFO_WATERMARK_MASK 0xC0000000L
+//SDMA0_GB_ADDR_CONFIG
+#define SDMA0_GB_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
+#define SDMA0_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
+#define SDMA0_GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE__SHIFT 0x8
+#define SDMA0_GB_ADDR_CONFIG__NUM_BANKS__SHIFT 0xc
+#define SDMA0_GB_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x13
+#define SDMA0_GB_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define SDMA0_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
+#define SDMA0_GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
+#define SDMA0_GB_ADDR_CONFIG__NUM_BANKS_MASK 0x00007000L
+#define SDMA0_GB_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00180000L
+//SDMA0_GB_ADDR_CONFIG_READ
+#define SDMA0_GB_ADDR_CONFIG_READ__NUM_PIPES__SHIFT 0x0
+#define SDMA0_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
+#define SDMA0_GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE__SHIFT 0x8
+#define SDMA0_GB_ADDR_CONFIG_READ__NUM_BANKS__SHIFT 0xc
+#define SDMA0_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES__SHIFT 0x13
+#define SDMA0_GB_ADDR_CONFIG_READ__NUM_PIPES_MASK 0x00000007L
+#define SDMA0_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
+#define SDMA0_GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
+#define SDMA0_GB_ADDR_CONFIG_READ__NUM_BANKS_MASK 0x00007000L
+#define SDMA0_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES_MASK 0x00180000L
+//SDMA0_RB_RPTR_FETCH_HI
+#define SDMA0_RB_RPTR_FETCH_HI__OFFSET__SHIFT 0x0
+#define SDMA0_RB_RPTR_FETCH_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_SEM_WAIT_FAIL_TIMER_CNTL
+#define SDMA0_SEM_WAIT_FAIL_TIMER_CNTL__TIMER__SHIFT 0x0
+#define SDMA0_SEM_WAIT_FAIL_TIMER_CNTL__TIMER_MASK 0xFFFFFFFFL
+//SDMA0_RB_RPTR_FETCH
+#define SDMA0_RB_RPTR_FETCH__OFFSET__SHIFT 0x2
+#define SDMA0_RB_RPTR_FETCH__OFFSET_MASK 0xFFFFFFFCL
+//SDMA0_IB_OFFSET_FETCH
+#define SDMA0_IB_OFFSET_FETCH__OFFSET__SHIFT 0x2
+#define SDMA0_IB_OFFSET_FETCH__OFFSET_MASK 0x003FFFFCL
+//SDMA0_PROGRAM
+#define SDMA0_PROGRAM__STREAM__SHIFT 0x0
+#define SDMA0_PROGRAM__STREAM_MASK 0xFFFFFFFFL
+//SDMA0_STATUS_REG
+#define SDMA0_STATUS_REG__IDLE__SHIFT 0x0
+#define SDMA0_STATUS_REG__REG_IDLE__SHIFT 0x1
+#define SDMA0_STATUS_REG__RB_EMPTY__SHIFT 0x2
+#define SDMA0_STATUS_REG__RB_FULL__SHIFT 0x3
+#define SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT 0x4
+#define SDMA0_STATUS_REG__RB_CMD_FULL__SHIFT 0x5
+#define SDMA0_STATUS_REG__IB_CMD_IDLE__SHIFT 0x6
+#define SDMA0_STATUS_REG__IB_CMD_FULL__SHIFT 0x7
+#define SDMA0_STATUS_REG__BLOCK_IDLE__SHIFT 0x8
+#define SDMA0_STATUS_REG__INSIDE_IB__SHIFT 0x9
+#define SDMA0_STATUS_REG__EX_IDLE__SHIFT 0xa
+#define SDMA0_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE__SHIFT 0xb
+#define SDMA0_STATUS_REG__PACKET_READY__SHIFT 0xc
+#define SDMA0_STATUS_REG__MC_WR_IDLE__SHIFT 0xd
+#define SDMA0_STATUS_REG__SRBM_IDLE__SHIFT 0xe
+#define SDMA0_STATUS_REG__CONTEXT_EMPTY__SHIFT 0xf
+#define SDMA0_STATUS_REG__DELTA_RPTR_FULL__SHIFT 0x10
+#define SDMA0_STATUS_REG__RB_MC_RREQ_IDLE__SHIFT 0x11
+#define SDMA0_STATUS_REG__IB_MC_RREQ_IDLE__SHIFT 0x12
+#define SDMA0_STATUS_REG__MC_RD_IDLE__SHIFT 0x13
+#define SDMA0_STATUS_REG__DELTA_RPTR_EMPTY__SHIFT 0x14
+#define SDMA0_STATUS_REG__MC_RD_RET_STALL__SHIFT 0x15
+#define SDMA0_STATUS_REG__MC_RD_NO_POLL_IDLE__SHIFT 0x16
+#define SDMA0_STATUS_REG__PREV_CMD_IDLE__SHIFT 0x19
+#define SDMA0_STATUS_REG__SEM_IDLE__SHIFT 0x1a
+#define SDMA0_STATUS_REG__SEM_REQ_STALL__SHIFT 0x1b
+#define SDMA0_STATUS_REG__SEM_RESP_STATE__SHIFT 0x1c
+#define SDMA0_STATUS_REG__INT_IDLE__SHIFT 0x1e
+#define SDMA0_STATUS_REG__INT_REQ_STALL__SHIFT 0x1f
+#define SDMA0_STATUS_REG__IDLE_MASK 0x00000001L
+#define SDMA0_STATUS_REG__REG_IDLE_MASK 0x00000002L
+#define SDMA0_STATUS_REG__RB_EMPTY_MASK 0x00000004L
+#define SDMA0_STATUS_REG__RB_FULL_MASK 0x00000008L
+#define SDMA0_STATUS_REG__RB_CMD_IDLE_MASK 0x00000010L
+#define SDMA0_STATUS_REG__RB_CMD_FULL_MASK 0x00000020L
+#define SDMA0_STATUS_REG__IB_CMD_IDLE_MASK 0x00000040L
+#define SDMA0_STATUS_REG__IB_CMD_FULL_MASK 0x00000080L
+#define SDMA0_STATUS_REG__BLOCK_IDLE_MASK 0x00000100L
+#define SDMA0_STATUS_REG__INSIDE_IB_MASK 0x00000200L
+#define SDMA0_STATUS_REG__EX_IDLE_MASK 0x00000400L
+#define SDMA0_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE_MASK 0x00000800L
+#define SDMA0_STATUS_REG__PACKET_READY_MASK 0x00001000L
+#define SDMA0_STATUS_REG__MC_WR_IDLE_MASK 0x00002000L
+#define SDMA0_STATUS_REG__SRBM_IDLE_MASK 0x00004000L
+#define SDMA0_STATUS_REG__CONTEXT_EMPTY_MASK 0x00008000L
+#define SDMA0_STATUS_REG__DELTA_RPTR_FULL_MASK 0x00010000L
+#define SDMA0_STATUS_REG__RB_MC_RREQ_IDLE_MASK 0x00020000L
+#define SDMA0_STATUS_REG__IB_MC_RREQ_IDLE_MASK 0x00040000L
+#define SDMA0_STATUS_REG__MC_RD_IDLE_MASK 0x00080000L
+#define SDMA0_STATUS_REG__DELTA_RPTR_EMPTY_MASK 0x00100000L
+#define SDMA0_STATUS_REG__MC_RD_RET_STALL_MASK 0x00200000L
+#define SDMA0_STATUS_REG__MC_RD_NO_POLL_IDLE_MASK 0x00400000L
+#define SDMA0_STATUS_REG__PREV_CMD_IDLE_MASK 0x02000000L
+#define SDMA0_STATUS_REG__SEM_IDLE_MASK 0x04000000L
+#define SDMA0_STATUS_REG__SEM_REQ_STALL_MASK 0x08000000L
+#define SDMA0_STATUS_REG__SEM_RESP_STATE_MASK 0x30000000L
+#define SDMA0_STATUS_REG__INT_IDLE_MASK 0x40000000L
+#define SDMA0_STATUS_REG__INT_REQ_STALL_MASK 0x80000000L
+//SDMA0_STATUS1_REG
+#define SDMA0_STATUS1_REG__CE_WREQ_IDLE__SHIFT 0x0
+#define SDMA0_STATUS1_REG__CE_WR_IDLE__SHIFT 0x1
+#define SDMA0_STATUS1_REG__CE_SPLIT_IDLE__SHIFT 0x2
+#define SDMA0_STATUS1_REG__CE_RREQ_IDLE__SHIFT 0x3
+#define SDMA0_STATUS1_REG__CE_OUT_IDLE__SHIFT 0x4
+#define SDMA0_STATUS1_REG__CE_IN_IDLE__SHIFT 0x5
+#define SDMA0_STATUS1_REG__CE_DST_IDLE__SHIFT 0x6
+#define SDMA0_STATUS1_REG__CE_CMD_IDLE__SHIFT 0x9
+#define SDMA0_STATUS1_REG__CE_AFIFO_FULL__SHIFT 0xa
+#define SDMA0_STATUS1_REG__CE_INFO_FULL__SHIFT 0xd
+#define SDMA0_STATUS1_REG__CE_INFO1_FULL__SHIFT 0xe
+#define SDMA0_STATUS1_REG__EX_START__SHIFT 0xf
+#define SDMA0_STATUS1_REG__CE_RD_STALL__SHIFT 0x11
+#define SDMA0_STATUS1_REG__CE_WR_STALL__SHIFT 0x12
+#define SDMA0_STATUS1_REG__CE_WREQ_IDLE_MASK 0x00000001L
+#define SDMA0_STATUS1_REG__CE_WR_IDLE_MASK 0x00000002L
+#define SDMA0_STATUS1_REG__CE_SPLIT_IDLE_MASK 0x00000004L
+#define SDMA0_STATUS1_REG__CE_RREQ_IDLE_MASK 0x00000008L
+#define SDMA0_STATUS1_REG__CE_OUT_IDLE_MASK 0x00000010L
+#define SDMA0_STATUS1_REG__CE_IN_IDLE_MASK 0x00000020L
+#define SDMA0_STATUS1_REG__CE_DST_IDLE_MASK 0x00000040L
+#define SDMA0_STATUS1_REG__CE_CMD_IDLE_MASK 0x00000200L
+#define SDMA0_STATUS1_REG__CE_AFIFO_FULL_MASK 0x00000400L
+#define SDMA0_STATUS1_REG__CE_INFO_FULL_MASK 0x00002000L
+#define SDMA0_STATUS1_REG__CE_INFO1_FULL_MASK 0x00004000L
+#define SDMA0_STATUS1_REG__EX_START_MASK 0x00008000L
+#define SDMA0_STATUS1_REG__CE_RD_STALL_MASK 0x00020000L
+#define SDMA0_STATUS1_REG__CE_WR_STALL_MASK 0x00040000L
+//SDMA0_RD_BURST_CNTL
+#define SDMA0_RD_BURST_CNTL__RD_BURST__SHIFT 0x0
+#define SDMA0_RD_BURST_CNTL__CMD_BUFFER_RD_BURST__SHIFT 0x2
+#define SDMA0_RD_BURST_CNTL__RD_BURST_MASK 0x00000003L
+#define SDMA0_RD_BURST_CNTL__CMD_BUFFER_RD_BURST_MASK 0x0000000CL
+//SDMA0_HBM_PAGE_CONFIG
+#define SDMA0_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT__SHIFT 0x0
+#define SDMA0_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT_MASK 0x00000003L
+//SDMA0_UCODE_CHECKSUM
+#define SDMA0_UCODE_CHECKSUM__DATA__SHIFT 0x0
+#define SDMA0_UCODE_CHECKSUM__DATA_MASK 0xFFFFFFFFL
+//SDMA0_F32_CNTL
+#define SDMA0_F32_CNTL__HALT__SHIFT 0x0
+#define SDMA0_F32_CNTL__STEP__SHIFT 0x1
+#define SDMA0_F32_CNTL__HALT_MASK 0x00000001L
+#define SDMA0_F32_CNTL__STEP_MASK 0x00000002L
+//SDMA0_FREEZE
+#define SDMA0_FREEZE__PREEMPT__SHIFT 0x0
+#define SDMA0_FREEZE__FREEZE__SHIFT 0x4
+#define SDMA0_FREEZE__FROZEN__SHIFT 0x5
+#define SDMA0_FREEZE__F32_FREEZE__SHIFT 0x6
+#define SDMA0_FREEZE__PREEMPT_MASK 0x00000001L
+#define SDMA0_FREEZE__FREEZE_MASK 0x00000010L
+#define SDMA0_FREEZE__FROZEN_MASK 0x00000020L
+#define SDMA0_FREEZE__F32_FREEZE_MASK 0x00000040L
+//SDMA0_PHASE0_QUANTUM
+#define SDMA0_PHASE0_QUANTUM__UNIT__SHIFT 0x0
+#define SDMA0_PHASE0_QUANTUM__VALUE__SHIFT 0x8
+#define SDMA0_PHASE0_QUANTUM__PREFER__SHIFT 0x1e
+#define SDMA0_PHASE0_QUANTUM__UNIT_MASK 0x0000000FL
+#define SDMA0_PHASE0_QUANTUM__VALUE_MASK 0x00FFFF00L
+#define SDMA0_PHASE0_QUANTUM__PREFER_MASK 0x40000000L
+//SDMA0_PHASE1_QUANTUM
+#define SDMA0_PHASE1_QUANTUM__UNIT__SHIFT 0x0
+#define SDMA0_PHASE1_QUANTUM__VALUE__SHIFT 0x8
+#define SDMA0_PHASE1_QUANTUM__PREFER__SHIFT 0x1e
+#define SDMA0_PHASE1_QUANTUM__UNIT_MASK 0x0000000FL
+#define SDMA0_PHASE1_QUANTUM__VALUE_MASK 0x00FFFF00L
+#define SDMA0_PHASE1_QUANTUM__PREFER_MASK 0x40000000L
+//SDMA_POWER_GATING
+#define SDMA_POWER_GATING__SDMA0_POWER_OFF_CONDITION__SHIFT 0x0
+#define SDMA_POWER_GATING__SDMA0_POWER_ON_CONDITION__SHIFT 0x1
+#define SDMA_POWER_GATING__SDMA0_POWER_OFF_REQ__SHIFT 0x2
+#define SDMA_POWER_GATING__SDMA0_POWER_ON_REQ__SHIFT 0x3
+#define SDMA_POWER_GATING__PG_CNTL_STATUS__SHIFT 0x4
+#define SDMA_POWER_GATING__SDMA0_POWER_OFF_CONDITION_MASK 0x00000001L
+#define SDMA_POWER_GATING__SDMA0_POWER_ON_CONDITION_MASK 0x00000002L
+#define SDMA_POWER_GATING__SDMA0_POWER_OFF_REQ_MASK 0x00000004L
+#define SDMA_POWER_GATING__SDMA0_POWER_ON_REQ_MASK 0x00000008L
+#define SDMA_POWER_GATING__PG_CNTL_STATUS_MASK 0x00000030L
+//SDMA_PGFSM_CONFIG
+#define SDMA_PGFSM_CONFIG__FSM_ADDR__SHIFT 0x0
+#define SDMA_PGFSM_CONFIG__POWER_DOWN__SHIFT 0x8
+#define SDMA_PGFSM_CONFIG__POWER_UP__SHIFT 0x9
+#define SDMA_PGFSM_CONFIG__P1_SELECT__SHIFT 0xa
+#define SDMA_PGFSM_CONFIG__P2_SELECT__SHIFT 0xb
+#define SDMA_PGFSM_CONFIG__WRITE__SHIFT 0xc
+#define SDMA_PGFSM_CONFIG__READ__SHIFT 0xd
+#define SDMA_PGFSM_CONFIG__SRBM_OVERRIDE__SHIFT 0x1b
+#define SDMA_PGFSM_CONFIG__REG_ADDR__SHIFT 0x1c
+#define SDMA_PGFSM_CONFIG__FSM_ADDR_MASK 0x000000FFL
+#define SDMA_PGFSM_CONFIG__POWER_DOWN_MASK 0x00000100L
+#define SDMA_PGFSM_CONFIG__POWER_UP_MASK 0x00000200L
+#define SDMA_PGFSM_CONFIG__P1_SELECT_MASK 0x00000400L
+#define SDMA_PGFSM_CONFIG__P2_SELECT_MASK 0x00000800L
+#define SDMA_PGFSM_CONFIG__WRITE_MASK 0x00001000L
+#define SDMA_PGFSM_CONFIG__READ_MASK 0x00002000L
+#define SDMA_PGFSM_CONFIG__SRBM_OVERRIDE_MASK 0x08000000L
+#define SDMA_PGFSM_CONFIG__REG_ADDR_MASK 0xF0000000L
+//SDMA_PGFSM_WRITE
+#define SDMA_PGFSM_WRITE__VALUE__SHIFT 0x0
+#define SDMA_PGFSM_WRITE__VALUE_MASK 0xFFFFFFFFL
+//SDMA_PGFSM_READ
+#define SDMA_PGFSM_READ__VALUE__SHIFT 0x0
+#define SDMA_PGFSM_READ__VALUE_MASK 0x00FFFFFFL
+//SDMA0_EDC_CONFIG
+#define SDMA0_EDC_CONFIG__DIS_EDC__SHIFT 0x1
+#define SDMA0_EDC_CONFIG__ECC_INT_ENABLE__SHIFT 0x2
+#define SDMA0_EDC_CONFIG__DIS_EDC_MASK 0x00000002L
+#define SDMA0_EDC_CONFIG__ECC_INT_ENABLE_MASK 0x00000004L
+//SDMA0_BA_THRESHOLD
+#define SDMA0_BA_THRESHOLD__READ_THRES__SHIFT 0x0
+#define SDMA0_BA_THRESHOLD__WRITE_THRES__SHIFT 0x10
+#define SDMA0_BA_THRESHOLD__READ_THRES_MASK 0x000003FFL
+#define SDMA0_BA_THRESHOLD__WRITE_THRES_MASK 0x03FF0000L
+//SDMA0_ID
+#define SDMA0_ID__DEVICE_ID__SHIFT 0x0
+#define SDMA0_ID__DEVICE_ID_MASK 0x000000FFL
+//SDMA0_VERSION
+#define SDMA0_VERSION__MINVER__SHIFT 0x0
+#define SDMA0_VERSION__MAJVER__SHIFT 0x8
+#define SDMA0_VERSION__REV__SHIFT 0x10
+#define SDMA0_VERSION__MINVER_MASK 0x0000007FL
+#define SDMA0_VERSION__MAJVER_MASK 0x00007F00L
+#define SDMA0_VERSION__REV_MASK 0x003F0000L
+//SDMA0_EDC_COUNTER
+#define SDMA0_EDC_COUNTER__SDMA_UCODE_BUF_SED__SHIFT 0x0
+#define SDMA0_EDC_COUNTER__SDMA_RB_CMD_BUF_SED__SHIFT 0x2
+#define SDMA0_EDC_COUNTER__SDMA_IB_CMD_BUF_SED__SHIFT 0x3
+#define SDMA0_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED__SHIFT 0x4
+#define SDMA0_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED__SHIFT 0x5
+#define SDMA0_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED__SHIFT 0x6
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED__SHIFT 0x7
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED__SHIFT 0x8
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED__SHIFT 0x9
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED__SHIFT 0xa
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED__SHIFT 0xb
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED__SHIFT 0xc
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED__SHIFT 0xd
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED__SHIFT 0xe
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF8_SED__SHIFT 0xf
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF9_SED__SHIFT 0x10
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF10_SED__SHIFT 0x11
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF11_SED__SHIFT 0x12
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF12_SED__SHIFT 0x13
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF13_SED__SHIFT 0x14
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF14_SED__SHIFT 0x15
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF15_SED__SHIFT 0x16
+#define SDMA0_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED__SHIFT 0x17
+#define SDMA0_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED__SHIFT 0x18
+#define SDMA0_EDC_COUNTER__SDMA_UCODE_BUF_SED_MASK 0x00000001L
+#define SDMA0_EDC_COUNTER__SDMA_RB_CMD_BUF_SED_MASK 0x00000004L
+#define SDMA0_EDC_COUNTER__SDMA_IB_CMD_BUF_SED_MASK 0x00000008L
+#define SDMA0_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED_MASK 0x00000010L
+#define SDMA0_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED_MASK 0x00000020L
+#define SDMA0_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED_MASK 0x00000040L
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED_MASK 0x00000080L
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED_MASK 0x00000100L
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED_MASK 0x00000200L
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED_MASK 0x00000400L
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED_MASK 0x00000800L
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED_MASK 0x00001000L
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED_MASK 0x00002000L
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED_MASK 0x00004000L
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF8_SED_MASK 0x00008000L
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF9_SED_MASK 0x00010000L
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF10_SED_MASK 0x00020000L
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF11_SED_MASK 0x00040000L
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF12_SED_MASK 0x00080000L
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF13_SED_MASK 0x00100000L
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF14_SED_MASK 0x00200000L
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF15_SED_MASK 0x00400000L
+#define SDMA0_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED_MASK 0x00800000L
+#define SDMA0_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED_MASK 0x01000000L
+//SDMA0_EDC_COUNTER_CLEAR
+#define SDMA0_EDC_COUNTER_CLEAR__DUMMY__SHIFT 0x0
+#define SDMA0_EDC_COUNTER_CLEAR__DUMMY_MASK 0x00000001L
+//SDMA0_STATUS2_REG
+#define SDMA0_STATUS2_REG__ID__SHIFT 0x0
+#define SDMA0_STATUS2_REG__F32_INSTR_PTR__SHIFT 0x3
+#define SDMA0_STATUS2_REG__CMD_OP__SHIFT 0x10
+#define SDMA0_STATUS2_REG__ID_MASK 0x00000007L
+#define SDMA0_STATUS2_REG__F32_INSTR_PTR_MASK 0x0000FFF8L
+#define SDMA0_STATUS2_REG__CMD_OP_MASK 0xFFFF0000L
+//SDMA0_ATOMIC_CNTL
+#define SDMA0_ATOMIC_CNTL__LOOP_TIMER__SHIFT 0x0
+#define SDMA0_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE__SHIFT 0x1f
+#define SDMA0_ATOMIC_CNTL__LOOP_TIMER_MASK 0x7FFFFFFFL
+#define SDMA0_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE_MASK 0x80000000L
+//SDMA0_ATOMIC_PREOP_LO
+#define SDMA0_ATOMIC_PREOP_LO__DATA__SHIFT 0x0
+#define SDMA0_ATOMIC_PREOP_LO__DATA_MASK 0xFFFFFFFFL
+//SDMA0_ATOMIC_PREOP_HI
+#define SDMA0_ATOMIC_PREOP_HI__DATA__SHIFT 0x0
+#define SDMA0_ATOMIC_PREOP_HI__DATA_MASK 0xFFFFFFFFL
+//SDMA0_UTCL1_CNTL
+#define SDMA0_UTCL1_CNTL__REDO_ENABLE__SHIFT 0x0
+#define SDMA0_UTCL1_CNTL__REDO_DELAY__SHIFT 0x1
+#define SDMA0_UTCL1_CNTL__REDO_WATERMK__SHIFT 0xb
+#define SDMA0_UTCL1_CNTL__INVACK_DELAY__SHIFT 0xe
+#define SDMA0_UTCL1_CNTL__REQL2_CREDIT__SHIFT 0x18
+#define SDMA0_UTCL1_CNTL__VADDR_WATERMK__SHIFT 0x1d
+#define SDMA0_UTCL1_CNTL__REDO_ENABLE_MASK 0x00000001L
+#define SDMA0_UTCL1_CNTL__REDO_DELAY_MASK 0x000007FEL
+#define SDMA0_UTCL1_CNTL__REDO_WATERMK_MASK 0x00003800L
+#define SDMA0_UTCL1_CNTL__INVACK_DELAY_MASK 0x00FFC000L
+#define SDMA0_UTCL1_CNTL__REQL2_CREDIT_MASK 0x1F000000L
+#define SDMA0_UTCL1_CNTL__VADDR_WATERMK_MASK 0xE0000000L
+//SDMA0_UTCL1_WATERMK
+#define SDMA0_UTCL1_WATERMK__REQMC_WATERMK__SHIFT 0x0
+#define SDMA0_UTCL1_WATERMK__REQPG_WATERMK__SHIFT 0x9
+#define SDMA0_UTCL1_WATERMK__INVREQ_WATERMK__SHIFT 0x11
+#define SDMA0_UTCL1_WATERMK__XNACK_WATERMK__SHIFT 0x19
+#define SDMA0_UTCL1_WATERMK__REQMC_WATERMK_MASK 0x000001FFL
+#define SDMA0_UTCL1_WATERMK__REQPG_WATERMK_MASK 0x0001FE00L
+#define SDMA0_UTCL1_WATERMK__INVREQ_WATERMK_MASK 0x01FE0000L
+#define SDMA0_UTCL1_WATERMK__XNACK_WATERMK_MASK 0xFE000000L
+//SDMA0_UTCL1_RD_STATUS
+#define SDMA0_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0
+#define SDMA0_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1
+#define SDMA0_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2
+#define SDMA0_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3
+#define SDMA0_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4
+#define SDMA0_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5
+#define SDMA0_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6
+#define SDMA0_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7
+#define SDMA0_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8
+#define SDMA0_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9
+#define SDMA0_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa
+#define SDMA0_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb
+#define SDMA0_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc
+#define SDMA0_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd
+#define SDMA0_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe
+#define SDMA0_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf
+#define SDMA0_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10
+#define SDMA0_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11
+#define SDMA0_UTCL1_RD_STATUS__PAGE_FAULT__SHIFT 0x12
+#define SDMA0_UTCL1_RD_STATUS__PAGE_NULL__SHIFT 0x13
+#define SDMA0_UTCL1_RD_STATUS__REQL2_IDLE__SHIFT 0x14
+#define SDMA0_UTCL1_RD_STATUS__CE_L1_STALL__SHIFT 0x15
+#define SDMA0_UTCL1_RD_STATUS__NEXT_RD_VECTOR__SHIFT 0x16
+#define SDMA0_UTCL1_RD_STATUS__MERGE_STATE__SHIFT 0x1a
+#define SDMA0_UTCL1_RD_STATUS__ADDR_RD_RTR__SHIFT 0x1d
+#define SDMA0_UTCL1_RD_STATUS__WPTR_POLLING__SHIFT 0x1e
+#define SDMA0_UTCL1_RD_STATUS__INVREQ_SIZE__SHIFT 0x1f
+#define SDMA0_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L
+#define SDMA0_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L
+#define SDMA0_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L
+#define SDMA0_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L
+#define SDMA0_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L
+#define SDMA0_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L
+#define SDMA0_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L
+#define SDMA0_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L
+#define SDMA0_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L
+#define SDMA0_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L
+#define SDMA0_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L
+#define SDMA0_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L
+#define SDMA0_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L
+#define SDMA0_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L
+#define SDMA0_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L
+#define SDMA0_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L
+#define SDMA0_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L
+#define SDMA0_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L
+#define SDMA0_UTCL1_RD_STATUS__PAGE_FAULT_MASK 0x00040000L
+#define SDMA0_UTCL1_RD_STATUS__PAGE_NULL_MASK 0x00080000L
+#define SDMA0_UTCL1_RD_STATUS__REQL2_IDLE_MASK 0x00100000L
+#define SDMA0_UTCL1_RD_STATUS__CE_L1_STALL_MASK 0x00200000L
+#define SDMA0_UTCL1_RD_STATUS__NEXT_RD_VECTOR_MASK 0x03C00000L
+#define SDMA0_UTCL1_RD_STATUS__MERGE_STATE_MASK 0x1C000000L
+#define SDMA0_UTCL1_RD_STATUS__ADDR_RD_RTR_MASK 0x20000000L
+#define SDMA0_UTCL1_RD_STATUS__WPTR_POLLING_MASK 0x40000000L
+#define SDMA0_UTCL1_RD_STATUS__INVREQ_SIZE_MASK 0x80000000L
+//SDMA0_UTCL1_WR_STATUS
+#define SDMA0_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0
+#define SDMA0_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1
+#define SDMA0_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2
+#define SDMA0_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3
+#define SDMA0_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4
+#define SDMA0_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5
+#define SDMA0_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6
+#define SDMA0_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7
+#define SDMA0_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8
+#define SDMA0_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9
+#define SDMA0_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa
+#define SDMA0_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb
+#define SDMA0_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc
+#define SDMA0_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd
+#define SDMA0_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe
+#define SDMA0_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf
+#define SDMA0_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10
+#define SDMA0_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11
+#define SDMA0_UTCL1_WR_STATUS__PAGE_FAULT__SHIFT 0x12
+#define SDMA0_UTCL1_WR_STATUS__PAGE_NULL__SHIFT 0x13
+#define SDMA0_UTCL1_WR_STATUS__REQL2_IDLE__SHIFT 0x14
+#define SDMA0_UTCL1_WR_STATUS__F32_WR_RTR__SHIFT 0x15
+#define SDMA0_UTCL1_WR_STATUS__NEXT_WR_VECTOR__SHIFT 0x16
+#define SDMA0_UTCL1_WR_STATUS__MERGE_STATE__SHIFT 0x19
+#define SDMA0_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY__SHIFT 0x1c
+#define SDMA0_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL__SHIFT 0x1d
+#define SDMA0_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY__SHIFT 0x1e
+#define SDMA0_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL__SHIFT 0x1f
+#define SDMA0_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L
+#define SDMA0_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L
+#define SDMA0_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L
+#define SDMA0_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L
+#define SDMA0_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L
+#define SDMA0_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L
+#define SDMA0_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L
+#define SDMA0_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L
+#define SDMA0_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L
+#define SDMA0_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L
+#define SDMA0_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L
+#define SDMA0_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L
+#define SDMA0_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L
+#define SDMA0_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L
+#define SDMA0_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L
+#define SDMA0_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L
+#define SDMA0_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L
+#define SDMA0_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L
+#define SDMA0_UTCL1_WR_STATUS__PAGE_FAULT_MASK 0x00040000L
+#define SDMA0_UTCL1_WR_STATUS__PAGE_NULL_MASK 0x00080000L
+#define SDMA0_UTCL1_WR_STATUS__REQL2_IDLE_MASK 0x00100000L
+#define SDMA0_UTCL1_WR_STATUS__F32_WR_RTR_MASK 0x00200000L
+#define SDMA0_UTCL1_WR_STATUS__NEXT_WR_VECTOR_MASK 0x01C00000L
+#define SDMA0_UTCL1_WR_STATUS__MERGE_STATE_MASK 0x0E000000L
+#define SDMA0_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY_MASK 0x10000000L
+#define SDMA0_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL_MASK 0x20000000L
+#define SDMA0_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY_MASK 0x40000000L
+#define SDMA0_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL_MASK 0x80000000L
+//SDMA0_UTCL1_INV0
+#define SDMA0_UTCL1_INV0__INV_MIDDLE__SHIFT 0x0
+#define SDMA0_UTCL1_INV0__RD_TIMEOUT__SHIFT 0x1
+#define SDMA0_UTCL1_INV0__WR_TIMEOUT__SHIFT 0x2
+#define SDMA0_UTCL1_INV0__RD_IN_INVADR__SHIFT 0x3
+#define SDMA0_UTCL1_INV0__WR_IN_INVADR__SHIFT 0x4
+#define SDMA0_UTCL1_INV0__PAGE_NULL_SW__SHIFT 0x5
+#define SDMA0_UTCL1_INV0__XNACK_IS_INVADR__SHIFT 0x6
+#define SDMA0_UTCL1_INV0__INVREQ_ENABLE__SHIFT 0x7
+#define SDMA0_UTCL1_INV0__NACK_TIMEOUT_SW__SHIFT 0x8
+#define SDMA0_UTCL1_INV0__NFLUSH_INV_IDLE__SHIFT 0x9
+#define SDMA0_UTCL1_INV0__FLUSH_INV_IDLE__SHIFT 0xa
+#define SDMA0_UTCL1_INV0__INV_FLUSHTYPE__SHIFT 0xb
+#define SDMA0_UTCL1_INV0__INV_VMID_VEC__SHIFT 0xc
+#define SDMA0_UTCL1_INV0__INV_ADDR_HI__SHIFT 0x1c
+#define SDMA0_UTCL1_INV0__INV_MIDDLE_MASK 0x00000001L
+#define SDMA0_UTCL1_INV0__RD_TIMEOUT_MASK 0x00000002L
+#define SDMA0_UTCL1_INV0__WR_TIMEOUT_MASK 0x00000004L
+#define SDMA0_UTCL1_INV0__RD_IN_INVADR_MASK 0x00000008L
+#define SDMA0_UTCL1_INV0__WR_IN_INVADR_MASK 0x00000010L
+#define SDMA0_UTCL1_INV0__PAGE_NULL_SW_MASK 0x00000020L
+#define SDMA0_UTCL1_INV0__XNACK_IS_INVADR_MASK 0x00000040L
+#define SDMA0_UTCL1_INV0__INVREQ_ENABLE_MASK 0x00000080L
+#define SDMA0_UTCL1_INV0__NACK_TIMEOUT_SW_MASK 0x00000100L
+#define SDMA0_UTCL1_INV0__NFLUSH_INV_IDLE_MASK 0x00000200L
+#define SDMA0_UTCL1_INV0__FLUSH_INV_IDLE_MASK 0x00000400L
+#define SDMA0_UTCL1_INV0__INV_FLUSHTYPE_MASK 0x00000800L
+#define SDMA0_UTCL1_INV0__INV_VMID_VEC_MASK 0x0FFFF000L
+#define SDMA0_UTCL1_INV0__INV_ADDR_HI_MASK 0xF0000000L
+//SDMA0_UTCL1_INV1
+#define SDMA0_UTCL1_INV1__INV_ADDR_LO__SHIFT 0x0
+#define SDMA0_UTCL1_INV1__INV_ADDR_LO_MASK 0xFFFFFFFFL
+//SDMA0_UTCL1_INV2
+#define SDMA0_UTCL1_INV2__INV_NFLUSH_VMID_VEC__SHIFT 0x0
+#define SDMA0_UTCL1_INV2__INV_NFLUSH_VMID_VEC_MASK 0xFFFFFFFFL
+//SDMA0_UTCL1_RD_XNACK0
+#define SDMA0_UTCL1_RD_XNACK0__XNACK_ADDR_LO__SHIFT 0x0
+#define SDMA0_UTCL1_RD_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL
+//SDMA0_UTCL1_RD_XNACK1
+#define SDMA0_UTCL1_RD_XNACK1__XNACK_ADDR_HI__SHIFT 0x0
+#define SDMA0_UTCL1_RD_XNACK1__XNACK_VMID__SHIFT 0x4
+#define SDMA0_UTCL1_RD_XNACK1__XNACK_VECTOR__SHIFT 0x8
+#define SDMA0_UTCL1_RD_XNACK1__IS_XNACK__SHIFT 0x1a
+#define SDMA0_UTCL1_RD_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL
+#define SDMA0_UTCL1_RD_XNACK1__XNACK_VMID_MASK 0x000000F0L
+#define SDMA0_UTCL1_RD_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L
+#define SDMA0_UTCL1_RD_XNACK1__IS_XNACK_MASK 0x0C000000L
+//SDMA0_UTCL1_WR_XNACK0
+#define SDMA0_UTCL1_WR_XNACK0__XNACK_ADDR_LO__SHIFT 0x0
+#define SDMA0_UTCL1_WR_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL
+//SDMA0_UTCL1_WR_XNACK1
+#define SDMA0_UTCL1_WR_XNACK1__XNACK_ADDR_HI__SHIFT 0x0
+#define SDMA0_UTCL1_WR_XNACK1__XNACK_VMID__SHIFT 0x4
+#define SDMA0_UTCL1_WR_XNACK1__XNACK_VECTOR__SHIFT 0x8
+#define SDMA0_UTCL1_WR_XNACK1__IS_XNACK__SHIFT 0x1a
+#define SDMA0_UTCL1_WR_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL
+#define SDMA0_UTCL1_WR_XNACK1__XNACK_VMID_MASK 0x000000F0L
+#define SDMA0_UTCL1_WR_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L
+#define SDMA0_UTCL1_WR_XNACK1__IS_XNACK_MASK 0x0C000000L
+//SDMA0_UTCL1_TIMEOUT
+#define SDMA0_UTCL1_TIMEOUT__RD_XNACK_LIMIT__SHIFT 0x0
+#define SDMA0_UTCL1_TIMEOUT__WR_XNACK_LIMIT__SHIFT 0x10
+#define SDMA0_UTCL1_TIMEOUT__RD_XNACK_LIMIT_MASK 0x0000FFFFL
+#define SDMA0_UTCL1_TIMEOUT__WR_XNACK_LIMIT_MASK 0xFFFF0000L
+//SDMA0_UTCL1_PAGE
+#define SDMA0_UTCL1_PAGE__VM_HOLE__SHIFT 0x0
+#define SDMA0_UTCL1_PAGE__REQ_TYPE__SHIFT 0x1
+#define SDMA0_UTCL1_PAGE__USE_MTYPE__SHIFT 0x6
+#define SDMA0_UTCL1_PAGE__USE_PT_SNOOP__SHIFT 0x9
+#define SDMA0_UTCL1_PAGE__VM_HOLE_MASK 0x00000001L
+#define SDMA0_UTCL1_PAGE__REQ_TYPE_MASK 0x0000001EL
+#define SDMA0_UTCL1_PAGE__USE_MTYPE_MASK 0x000001C0L
+#define SDMA0_UTCL1_PAGE__USE_PT_SNOOP_MASK 0x00000200L
+//SDMA0_POWER_CNTL_IDLE
+#define SDMA0_POWER_CNTL_IDLE__DELAY0__SHIFT 0x0
+#define SDMA0_POWER_CNTL_IDLE__DELAY1__SHIFT 0x10
+#define SDMA0_POWER_CNTL_IDLE__DELAY2__SHIFT 0x18
+#define SDMA0_POWER_CNTL_IDLE__DELAY0_MASK 0x0000FFFFL
+#define SDMA0_POWER_CNTL_IDLE__DELAY1_MASK 0x00FF0000L
+#define SDMA0_POWER_CNTL_IDLE__DELAY2_MASK 0xFF000000L
+//SDMA0_RELAX_ORDERING_LUT
+#define SDMA0_RELAX_ORDERING_LUT__RESERVED0__SHIFT 0x0
+#define SDMA0_RELAX_ORDERING_LUT__COPY__SHIFT 0x1
+#define SDMA0_RELAX_ORDERING_LUT__WRITE__SHIFT 0x2
+#define SDMA0_RELAX_ORDERING_LUT__RESERVED3__SHIFT 0x3
+#define SDMA0_RELAX_ORDERING_LUT__RESERVED4__SHIFT 0x4
+#define SDMA0_RELAX_ORDERING_LUT__FENCE__SHIFT 0x5
+#define SDMA0_RELAX_ORDERING_LUT__RESERVED76__SHIFT 0x6
+#define SDMA0_RELAX_ORDERING_LUT__POLL_MEM__SHIFT 0x8
+#define SDMA0_RELAX_ORDERING_LUT__COND_EXE__SHIFT 0x9
+#define SDMA0_RELAX_ORDERING_LUT__ATOMIC__SHIFT 0xa
+#define SDMA0_RELAX_ORDERING_LUT__CONST_FILL__SHIFT 0xb
+#define SDMA0_RELAX_ORDERING_LUT__PTEPDE__SHIFT 0xc
+#define SDMA0_RELAX_ORDERING_LUT__TIMESTAMP__SHIFT 0xd
+#define SDMA0_RELAX_ORDERING_LUT__RESERVED__SHIFT 0xe
+#define SDMA0_RELAX_ORDERING_LUT__WORLD_SWITCH__SHIFT 0x1b
+#define SDMA0_RELAX_ORDERING_LUT__RPTR_WRB__SHIFT 0x1c
+#define SDMA0_RELAX_ORDERING_LUT__WPTR_POLL__SHIFT 0x1d
+#define SDMA0_RELAX_ORDERING_LUT__IB_FETCH__SHIFT 0x1e
+#define SDMA0_RELAX_ORDERING_LUT__RB_FETCH__SHIFT 0x1f
+#define SDMA0_RELAX_ORDERING_LUT__RESERVED0_MASK 0x00000001L
+#define SDMA0_RELAX_ORDERING_LUT__COPY_MASK 0x00000002L
+#define SDMA0_RELAX_ORDERING_LUT__WRITE_MASK 0x00000004L
+#define SDMA0_RELAX_ORDERING_LUT__RESERVED3_MASK 0x00000008L
+#define SDMA0_RELAX_ORDERING_LUT__RESERVED4_MASK 0x00000010L
+#define SDMA0_RELAX_ORDERING_LUT__FENCE_MASK 0x00000020L
+#define SDMA0_RELAX_ORDERING_LUT__RESERVED76_MASK 0x000000C0L
+#define SDMA0_RELAX_ORDERING_LUT__POLL_MEM_MASK 0x00000100L
+#define SDMA0_RELAX_ORDERING_LUT__COND_EXE_MASK 0x00000200L
+#define SDMA0_RELAX_ORDERING_LUT__ATOMIC_MASK 0x00000400L
+#define SDMA0_RELAX_ORDERING_LUT__CONST_FILL_MASK 0x00000800L
+#define SDMA0_RELAX_ORDERING_LUT__PTEPDE_MASK 0x00001000L
+#define SDMA0_RELAX_ORDERING_LUT__TIMESTAMP_MASK 0x00002000L
+#define SDMA0_RELAX_ORDERING_LUT__RESERVED_MASK 0x07FFC000L
+#define SDMA0_RELAX_ORDERING_LUT__WORLD_SWITCH_MASK 0x08000000L
+#define SDMA0_RELAX_ORDERING_LUT__RPTR_WRB_MASK 0x10000000L
+#define SDMA0_RELAX_ORDERING_LUT__WPTR_POLL_MASK 0x20000000L
+#define SDMA0_RELAX_ORDERING_LUT__IB_FETCH_MASK 0x40000000L
+#define SDMA0_RELAX_ORDERING_LUT__RB_FETCH_MASK 0x80000000L
+//SDMA0_CHICKEN_BITS_2
+#define SDMA0_CHICKEN_BITS_2__F32_CMD_PROC_DELAY__SHIFT 0x0
+#define SDMA0_CHICKEN_BITS_2__F32_CMD_PROC_DELAY_MASK 0x0000000FL
+//SDMA0_STATUS3_REG
+#define SDMA0_STATUS3_REG__CMD_OP_STATUS__SHIFT 0x0
+#define SDMA0_STATUS3_REG__PREV_VM_CMD__SHIFT 0x10
+#define SDMA0_STATUS3_REG__EXCEPTION_IDLE__SHIFT 0x14
+#define SDMA0_STATUS3_REG__QUEUE_ID_MATCH__SHIFT 0x15
+#define SDMA0_STATUS3_REG__INT_QUEUE_ID__SHIFT 0x16
+#define SDMA0_STATUS3_REG__CMD_OP_STATUS_MASK 0x0000FFFFL
+#define SDMA0_STATUS3_REG__PREV_VM_CMD_MASK 0x000F0000L
+#define SDMA0_STATUS3_REG__EXCEPTION_IDLE_MASK 0x00100000L
+#define SDMA0_STATUS3_REG__QUEUE_ID_MATCH_MASK 0x00200000L
+#define SDMA0_STATUS3_REG__INT_QUEUE_ID_MASK 0x03C00000L
+//SDMA0_PHYSICAL_ADDR_LO
+#define SDMA0_PHYSICAL_ADDR_LO__D_VALID__SHIFT 0x0
+#define SDMA0_PHYSICAL_ADDR_LO__DIRTY__SHIFT 0x1
+#define SDMA0_PHYSICAL_ADDR_LO__PHY_VALID__SHIFT 0x2
+#define SDMA0_PHYSICAL_ADDR_LO__ADDR__SHIFT 0xc
+#define SDMA0_PHYSICAL_ADDR_LO__D_VALID_MASK 0x00000001L
+#define SDMA0_PHYSICAL_ADDR_LO__DIRTY_MASK 0x00000002L
+#define SDMA0_PHYSICAL_ADDR_LO__PHY_VALID_MASK 0x00000004L
+#define SDMA0_PHYSICAL_ADDR_LO__ADDR_MASK 0xFFFFF000L
+//SDMA0_PHYSICAL_ADDR_HI
+#define SDMA0_PHYSICAL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_PHYSICAL_ADDR_HI__ADDR_MASK 0x0000FFFFL
+//SDMA0_PHASE2_QUANTUM
+#define SDMA0_PHASE2_QUANTUM__UNIT__SHIFT 0x0
+#define SDMA0_PHASE2_QUANTUM__VALUE__SHIFT 0x8
+#define SDMA0_PHASE2_QUANTUM__PREFER__SHIFT 0x1e
+#define SDMA0_PHASE2_QUANTUM__UNIT_MASK 0x0000000FL
+#define SDMA0_PHASE2_QUANTUM__VALUE_MASK 0x00FFFF00L
+#define SDMA0_PHASE2_QUANTUM__PREFER_MASK 0x40000000L
+//SDMA0_ERROR_LOG
+#define SDMA0_ERROR_LOG__OVERRIDE__SHIFT 0x0
+#define SDMA0_ERROR_LOG__STATUS__SHIFT 0x10
+#define SDMA0_ERROR_LOG__OVERRIDE_MASK 0x0000FFFFL
+#define SDMA0_ERROR_LOG__STATUS_MASK 0xFFFF0000L
+//SDMA0_PUB_DUMMY_REG0
+#define SDMA0_PUB_DUMMY_REG0__VALUE__SHIFT 0x0
+#define SDMA0_PUB_DUMMY_REG0__VALUE_MASK 0xFFFFFFFFL
+//SDMA0_PUB_DUMMY_REG1
+#define SDMA0_PUB_DUMMY_REG1__VALUE__SHIFT 0x0
+#define SDMA0_PUB_DUMMY_REG1__VALUE_MASK 0xFFFFFFFFL
+//SDMA0_PUB_DUMMY_REG2
+#define SDMA0_PUB_DUMMY_REG2__VALUE__SHIFT 0x0
+#define SDMA0_PUB_DUMMY_REG2__VALUE_MASK 0xFFFFFFFFL
+//SDMA0_PUB_DUMMY_REG3
+#define SDMA0_PUB_DUMMY_REG3__VALUE__SHIFT 0x0
+#define SDMA0_PUB_DUMMY_REG3__VALUE_MASK 0xFFFFFFFFL
+//SDMA0_F32_COUNTER
+#define SDMA0_F32_COUNTER__VALUE__SHIFT 0x0
+#define SDMA0_F32_COUNTER__VALUE_MASK 0xFFFFFFFFL
+//SDMA0_UNBREAKABLE
+#define SDMA0_UNBREAKABLE__VALUE__SHIFT 0x0
+#define SDMA0_UNBREAKABLE__VALUE_MASK 0x00000001L
+//SDMA0_PERFMON_CNTL
+#define SDMA0_PERFMON_CNTL__PERF_ENABLE0__SHIFT 0x0
+#define SDMA0_PERFMON_CNTL__PERF_CLEAR0__SHIFT 0x1
+#define SDMA0_PERFMON_CNTL__PERF_SEL0__SHIFT 0x2
+#define SDMA0_PERFMON_CNTL__PERF_ENABLE1__SHIFT 0xa
+#define SDMA0_PERFMON_CNTL__PERF_CLEAR1__SHIFT 0xb
+#define SDMA0_PERFMON_CNTL__PERF_SEL1__SHIFT 0xc
+#define SDMA0_PERFMON_CNTL__PERF_ENABLE0_MASK 0x00000001L
+#define SDMA0_PERFMON_CNTL__PERF_CLEAR0_MASK 0x00000002L
+#define SDMA0_PERFMON_CNTL__PERF_SEL0_MASK 0x000003FCL
+#define SDMA0_PERFMON_CNTL__PERF_ENABLE1_MASK 0x00000400L
+#define SDMA0_PERFMON_CNTL__PERF_CLEAR1_MASK 0x00000800L
+#define SDMA0_PERFMON_CNTL__PERF_SEL1_MASK 0x000FF000L
+//SDMA0_PERFCOUNTER0_RESULT
+#define SDMA0_PERFCOUNTER0_RESULT__PERF_COUNT__SHIFT 0x0
+#define SDMA0_PERFCOUNTER0_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL
+//SDMA0_PERFCOUNTER1_RESULT
+#define SDMA0_PERFCOUNTER1_RESULT__PERF_COUNT__SHIFT 0x0
+#define SDMA0_PERFCOUNTER1_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL
+//SDMA0_PERFCOUNTER_TAG_DELAY_RANGE
+#define SDMA0_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_LOW__SHIFT 0x0
+#define SDMA0_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_HIGH__SHIFT 0xe
+#define SDMA0_PERFCOUNTER_TAG_DELAY_RANGE__SELECT_RW__SHIFT 0x1c
+#define SDMA0_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_LOW_MASK 0x00003FFFL
+#define SDMA0_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_HIGH_MASK 0x0FFFC000L
+#define SDMA0_PERFCOUNTER_TAG_DELAY_RANGE__SELECT_RW_MASK 0x10000000L
+//SDMA0_CRD_CNTL
+#define SDMA0_CRD_CNTL__MC_WRREQ_CREDIT__SHIFT 0x7
+#define SDMA0_CRD_CNTL__MC_RDREQ_CREDIT__SHIFT 0xd
+#define SDMA0_CRD_CNTL__MC_WRREQ_CREDIT_MASK 0x00001F80L
+#define SDMA0_CRD_CNTL__MC_RDREQ_CREDIT_MASK 0x0007E000L
+//SDMA0_GPU_IOV_VIOLATION_LOG
+#define SDMA0_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0
+#define SDMA0_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS__SHIFT 0x1
+#define SDMA0_GPU_IOV_VIOLATION_LOG__ADDRESS__SHIFT 0x2
+#define SDMA0_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION__SHIFT 0x14
+#define SDMA0_GPU_IOV_VIOLATION_LOG__VF__SHIFT 0x15
+#define SDMA0_GPU_IOV_VIOLATION_LOG__VFID__SHIFT 0x16
+#define SDMA0_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L
+#define SDMA0_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS_MASK 0x00000002L
+#define SDMA0_GPU_IOV_VIOLATION_LOG__ADDRESS_MASK 0x000FFFFCL
+#define SDMA0_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION_MASK 0x00100000L
+#define SDMA0_GPU_IOV_VIOLATION_LOG__VF_MASK 0x00200000L
+#define SDMA0_GPU_IOV_VIOLATION_LOG__VFID_MASK 0x03C00000L
+//SDMA0_ULV_CNTL
+#define SDMA0_ULV_CNTL__HYSTERESIS__SHIFT 0x0
+#define SDMA0_ULV_CNTL__ENTER_ULV_INT_CLR__SHIFT 0x1b
+#define SDMA0_ULV_CNTL__EXIT_ULV_INT_CLR__SHIFT 0x1c
+#define SDMA0_ULV_CNTL__ENTER_ULV_INT__SHIFT 0x1d
+#define SDMA0_ULV_CNTL__EXIT_ULV_INT__SHIFT 0x1e
+#define SDMA0_ULV_CNTL__ULV_STATUS__SHIFT 0x1f
+#define SDMA0_ULV_CNTL__HYSTERESIS_MASK 0x0000001FL
+#define SDMA0_ULV_CNTL__ENTER_ULV_INT_CLR_MASK 0x08000000L
+#define SDMA0_ULV_CNTL__EXIT_ULV_INT_CLR_MASK 0x10000000L
+#define SDMA0_ULV_CNTL__ENTER_ULV_INT_MASK 0x20000000L
+#define SDMA0_ULV_CNTL__EXIT_ULV_INT_MASK 0x40000000L
+#define SDMA0_ULV_CNTL__ULV_STATUS_MASK 0x80000000L
+//SDMA0_EA_DBIT_ADDR_DATA
+#define SDMA0_EA_DBIT_ADDR_DATA__VALUE__SHIFT 0x0
+#define SDMA0_EA_DBIT_ADDR_DATA__VALUE_MASK 0xFFFFFFFFL
+//SDMA0_EA_DBIT_ADDR_INDEX
+#define SDMA0_EA_DBIT_ADDR_INDEX__VALUE__SHIFT 0x0
+#define SDMA0_EA_DBIT_ADDR_INDEX__VALUE_MASK 0x00000007L
+//SDMA0_GPU_IOV_VIOLATION_LOG2
+#define SDMA0_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID__SHIFT 0x0
+#define SDMA0_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID_MASK 0x000000FFL
+//SDMA0_GFX_RB_CNTL
+#define SDMA0_GFX_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA0_GFX_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA0_GFX_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA0_GFX_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA0_GFX_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA0_GFX_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA0_GFX_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA0_GFX_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA0_GFX_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA0_GFX_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA0_GFX_RB_BASE
+#define SDMA0_GFX_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA0_GFX_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_GFX_RB_BASE_HI
+#define SDMA0_GFX_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_GFX_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA0_GFX_RB_RPTR
+#define SDMA0_GFX_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA0_GFX_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_GFX_RB_RPTR_HI
+#define SDMA0_GFX_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_GFX_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_GFX_RB_WPTR
+#define SDMA0_GFX_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA0_GFX_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_GFX_RB_WPTR_HI
+#define SDMA0_GFX_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_GFX_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_GFX_RB_WPTR_POLL_CNTL
+#define SDMA0_GFX_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA0_GFX_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA0_GFX_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA0_GFX_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA0_GFX_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA0_GFX_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA0_GFX_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA0_GFX_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA0_GFX_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA0_GFX_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA0_GFX_RB_RPTR_ADDR_HI
+#define SDMA0_GFX_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_GFX_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_GFX_RB_RPTR_ADDR_LO
+#define SDMA0_GFX_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA0_GFX_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_GFX_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA0_GFX_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_GFX_IB_CNTL
+#define SDMA0_GFX_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA0_GFX_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA0_GFX_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA0_GFX_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA0_GFX_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA0_GFX_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA0_GFX_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA0_GFX_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA0_GFX_IB_RPTR
+#define SDMA0_GFX_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA0_GFX_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA0_GFX_IB_OFFSET
+#define SDMA0_GFX_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_GFX_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA0_GFX_IB_BASE_LO
+#define SDMA0_GFX_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA0_GFX_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA0_GFX_IB_BASE_HI
+#define SDMA0_GFX_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_GFX_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_GFX_IB_SIZE
+#define SDMA0_GFX_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA0_GFX_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA0_GFX_SKIP_CNTL
+#define SDMA0_GFX_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA0_GFX_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA0_GFX_CONTEXT_STATUS
+#define SDMA0_GFX_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA0_GFX_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA0_GFX_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA0_GFX_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA0_GFX_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA0_GFX_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA0_GFX_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA0_GFX_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA0_GFX_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA0_GFX_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA0_GFX_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA0_GFX_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA0_GFX_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA0_GFX_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA0_GFX_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA0_GFX_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA0_GFX_DOORBELL
+#define SDMA0_GFX_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA0_GFX_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA0_GFX_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA0_GFX_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA0_GFX_CONTEXT_CNTL
+#define SDMA0_GFX_CONTEXT_CNTL__RESUME_CTX__SHIFT 0x10
+#define SDMA0_GFX_CONTEXT_CNTL__RESUME_CTX_MASK 0x00010000L
+//SDMA0_GFX_STATUS
+#define SDMA0_GFX_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA0_GFX_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA0_GFX_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA0_GFX_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA0_GFX_DOORBELL_LOG
+#define SDMA0_GFX_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA0_GFX_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA0_GFX_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA0_GFX_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA0_GFX_WATERMARK
+#define SDMA0_GFX_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA0_GFX_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA0_GFX_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA0_GFX_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA0_GFX_DOORBELL_OFFSET
+#define SDMA0_GFX_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_GFX_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA0_GFX_CSA_ADDR_LO
+#define SDMA0_GFX_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_GFX_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_GFX_CSA_ADDR_HI
+#define SDMA0_GFX_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_GFX_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_GFX_IB_SUB_REMAIN
+#define SDMA0_GFX_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA0_GFX_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA0_GFX_PREEMPT
+#define SDMA0_GFX_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA0_GFX_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA0_GFX_DUMMY_REG
+#define SDMA0_GFX_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA0_GFX_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA0_GFX_RB_WPTR_POLL_ADDR_HI
+#define SDMA0_GFX_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_GFX_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_GFX_RB_WPTR_POLL_ADDR_LO
+#define SDMA0_GFX_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_GFX_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_GFX_RB_AQL_CNTL
+#define SDMA0_GFX_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA0_GFX_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA0_GFX_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA0_GFX_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA0_GFX_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA0_GFX_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA0_GFX_MINOR_PTR_UPDATE
+#define SDMA0_GFX_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA0_GFX_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA0_GFX_MIDCMD_DATA0
+#define SDMA0_GFX_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA0_GFX_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA0_GFX_MIDCMD_DATA1
+#define SDMA0_GFX_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA0_GFX_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA0_GFX_MIDCMD_DATA2
+#define SDMA0_GFX_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA0_GFX_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA0_GFX_MIDCMD_DATA3
+#define SDMA0_GFX_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA0_GFX_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA0_GFX_MIDCMD_DATA4
+#define SDMA0_GFX_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA0_GFX_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA0_GFX_MIDCMD_DATA5
+#define SDMA0_GFX_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA0_GFX_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA0_GFX_MIDCMD_DATA6
+#define SDMA0_GFX_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA0_GFX_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA0_GFX_MIDCMD_DATA7
+#define SDMA0_GFX_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA0_GFX_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA0_GFX_MIDCMD_DATA8
+#define SDMA0_GFX_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA0_GFX_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA0_GFX_MIDCMD_CNTL
+#define SDMA0_GFX_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA0_GFX_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA0_GFX_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA0_GFX_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA0_GFX_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA0_GFX_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA0_GFX_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA0_GFX_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA0_PAGE_RB_CNTL
+#define SDMA0_PAGE_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA0_PAGE_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA0_PAGE_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA0_PAGE_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA0_PAGE_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA0_PAGE_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA0_PAGE_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA0_PAGE_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA0_PAGE_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA0_PAGE_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA0_PAGE_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA0_PAGE_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA0_PAGE_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA0_PAGE_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA0_PAGE_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA0_PAGE_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA0_PAGE_RB_BASE
+#define SDMA0_PAGE_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA0_PAGE_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_PAGE_RB_BASE_HI
+#define SDMA0_PAGE_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_PAGE_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA0_PAGE_RB_RPTR
+#define SDMA0_PAGE_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA0_PAGE_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_PAGE_RB_RPTR_HI
+#define SDMA0_PAGE_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_PAGE_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_PAGE_RB_WPTR
+#define SDMA0_PAGE_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA0_PAGE_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_PAGE_RB_WPTR_HI
+#define SDMA0_PAGE_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_PAGE_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_PAGE_RB_WPTR_POLL_CNTL
+#define SDMA0_PAGE_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA0_PAGE_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA0_PAGE_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA0_PAGE_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA0_PAGE_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA0_PAGE_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA0_PAGE_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA0_PAGE_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA0_PAGE_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA0_PAGE_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA0_PAGE_RB_RPTR_ADDR_HI
+#define SDMA0_PAGE_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_PAGE_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_PAGE_RB_RPTR_ADDR_LO
+#define SDMA0_PAGE_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA0_PAGE_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_PAGE_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA0_PAGE_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_PAGE_IB_CNTL
+#define SDMA0_PAGE_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA0_PAGE_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA0_PAGE_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA0_PAGE_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA0_PAGE_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA0_PAGE_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA0_PAGE_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA0_PAGE_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA0_PAGE_IB_RPTR
+#define SDMA0_PAGE_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA0_PAGE_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA0_PAGE_IB_OFFSET
+#define SDMA0_PAGE_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_PAGE_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA0_PAGE_IB_BASE_LO
+#define SDMA0_PAGE_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA0_PAGE_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA0_PAGE_IB_BASE_HI
+#define SDMA0_PAGE_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_PAGE_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_PAGE_IB_SIZE
+#define SDMA0_PAGE_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA0_PAGE_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA0_PAGE_SKIP_CNTL
+#define SDMA0_PAGE_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA0_PAGE_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA0_PAGE_CONTEXT_STATUS
+#define SDMA0_PAGE_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA0_PAGE_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA0_PAGE_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA0_PAGE_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA0_PAGE_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA0_PAGE_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA0_PAGE_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA0_PAGE_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA0_PAGE_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA0_PAGE_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA0_PAGE_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA0_PAGE_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA0_PAGE_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA0_PAGE_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA0_PAGE_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA0_PAGE_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA0_PAGE_DOORBELL
+#define SDMA0_PAGE_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA0_PAGE_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA0_PAGE_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA0_PAGE_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA0_PAGE_STATUS
+#define SDMA0_PAGE_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA0_PAGE_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA0_PAGE_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA0_PAGE_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA0_PAGE_DOORBELL_LOG
+#define SDMA0_PAGE_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA0_PAGE_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA0_PAGE_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA0_PAGE_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA0_PAGE_WATERMARK
+#define SDMA0_PAGE_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA0_PAGE_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA0_PAGE_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA0_PAGE_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA0_PAGE_DOORBELL_OFFSET
+#define SDMA0_PAGE_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_PAGE_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA0_PAGE_CSA_ADDR_LO
+#define SDMA0_PAGE_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_PAGE_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_PAGE_CSA_ADDR_HI
+#define SDMA0_PAGE_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_PAGE_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_PAGE_IB_SUB_REMAIN
+#define SDMA0_PAGE_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA0_PAGE_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA0_PAGE_PREEMPT
+#define SDMA0_PAGE_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA0_PAGE_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA0_PAGE_DUMMY_REG
+#define SDMA0_PAGE_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA0_PAGE_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA0_PAGE_RB_WPTR_POLL_ADDR_HI
+#define SDMA0_PAGE_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_PAGE_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_PAGE_RB_WPTR_POLL_ADDR_LO
+#define SDMA0_PAGE_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_PAGE_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_PAGE_RB_AQL_CNTL
+#define SDMA0_PAGE_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA0_PAGE_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA0_PAGE_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA0_PAGE_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA0_PAGE_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA0_PAGE_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA0_PAGE_MINOR_PTR_UPDATE
+#define SDMA0_PAGE_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA0_PAGE_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA0_PAGE_MIDCMD_DATA0
+#define SDMA0_PAGE_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA0_PAGE_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA0_PAGE_MIDCMD_DATA1
+#define SDMA0_PAGE_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA0_PAGE_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA0_PAGE_MIDCMD_DATA2
+#define SDMA0_PAGE_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA0_PAGE_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA0_PAGE_MIDCMD_DATA3
+#define SDMA0_PAGE_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA0_PAGE_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA0_PAGE_MIDCMD_DATA4
+#define SDMA0_PAGE_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA0_PAGE_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA0_PAGE_MIDCMD_DATA5
+#define SDMA0_PAGE_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA0_PAGE_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA0_PAGE_MIDCMD_DATA6
+#define SDMA0_PAGE_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA0_PAGE_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA0_PAGE_MIDCMD_DATA7
+#define SDMA0_PAGE_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA0_PAGE_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA0_PAGE_MIDCMD_DATA8
+#define SDMA0_PAGE_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA0_PAGE_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA0_PAGE_MIDCMD_CNTL
+#define SDMA0_PAGE_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA0_PAGE_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA0_PAGE_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA0_PAGE_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA0_PAGE_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA0_PAGE_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA0_PAGE_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA0_PAGE_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA0_RLC0_RB_CNTL
+#define SDMA0_RLC0_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA0_RLC0_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA0_RLC0_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA0_RLC0_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA0_RLC0_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA0_RLC0_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA0_RLC0_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA0_RLC0_RB_BASE
+#define SDMA0_RLC0_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA0_RLC0_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_RLC0_RB_BASE_HI
+#define SDMA0_RLC0_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_RLC0_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA0_RLC0_RB_RPTR
+#define SDMA0_RLC0_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA0_RLC0_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_RLC0_RB_RPTR_HI
+#define SDMA0_RLC0_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_RLC0_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_RLC0_RB_WPTR
+#define SDMA0_RLC0_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA0_RLC0_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_RLC0_RB_WPTR_HI
+#define SDMA0_RLC0_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_RLC0_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_RLC0_RB_WPTR_POLL_CNTL
+#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA0_RLC0_RB_RPTR_ADDR_HI
+#define SDMA0_RLC0_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_RLC0_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_RLC0_RB_RPTR_ADDR_LO
+#define SDMA0_RLC0_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA0_RLC0_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_RLC0_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA0_RLC0_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_RLC0_IB_CNTL
+#define SDMA0_RLC0_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA0_RLC0_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA0_RLC0_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA0_RLC0_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA0_RLC0_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA0_RLC0_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA0_RLC0_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA0_RLC0_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA0_RLC0_IB_RPTR
+#define SDMA0_RLC0_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA0_RLC0_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA0_RLC0_IB_OFFSET
+#define SDMA0_RLC0_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_RLC0_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA0_RLC0_IB_BASE_LO
+#define SDMA0_RLC0_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA0_RLC0_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA0_RLC0_IB_BASE_HI
+#define SDMA0_RLC0_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_RLC0_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_RLC0_IB_SIZE
+#define SDMA0_RLC0_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA0_RLC0_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA0_RLC0_SKIP_CNTL
+#define SDMA0_RLC0_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA0_RLC0_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA0_RLC0_CONTEXT_STATUS
+#define SDMA0_RLC0_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA0_RLC0_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA0_RLC0_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA0_RLC0_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA0_RLC0_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA0_RLC0_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA0_RLC0_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA0_RLC0_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA0_RLC0_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA0_RLC0_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA0_RLC0_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA0_RLC0_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA0_RLC0_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA0_RLC0_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA0_RLC0_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA0_RLC0_DOORBELL
+#define SDMA0_RLC0_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA0_RLC0_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA0_RLC0_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA0_RLC0_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA0_RLC0_STATUS
+#define SDMA0_RLC0_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA0_RLC0_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA0_RLC0_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA0_RLC0_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA0_RLC0_DOORBELL_LOG
+#define SDMA0_RLC0_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA0_RLC0_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA0_RLC0_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA0_RLC0_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA0_RLC0_WATERMARK
+#define SDMA0_RLC0_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA0_RLC0_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA0_RLC0_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA0_RLC0_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA0_RLC0_DOORBELL_OFFSET
+#define SDMA0_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_RLC0_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA0_RLC0_CSA_ADDR_LO
+#define SDMA0_RLC0_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_RLC0_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_RLC0_CSA_ADDR_HI
+#define SDMA0_RLC0_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_RLC0_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_RLC0_IB_SUB_REMAIN
+#define SDMA0_RLC0_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA0_RLC0_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA0_RLC0_PREEMPT
+#define SDMA0_RLC0_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA0_RLC0_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA0_RLC0_DUMMY_REG
+#define SDMA0_RLC0_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA0_RLC0_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA0_RLC0_RB_WPTR_POLL_ADDR_HI
+#define SDMA0_RLC0_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_RLC0_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_RLC0_RB_WPTR_POLL_ADDR_LO
+#define SDMA0_RLC0_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_RLC0_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_RLC0_RB_AQL_CNTL
+#define SDMA0_RLC0_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA0_RLC0_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA0_RLC0_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA0_RLC0_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA0_RLC0_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA0_RLC0_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA0_RLC0_MINOR_PTR_UPDATE
+#define SDMA0_RLC0_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA0_RLC0_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA0_RLC0_MIDCMD_DATA0
+#define SDMA0_RLC0_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA0_RLC0_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA0_RLC0_MIDCMD_DATA1
+#define SDMA0_RLC0_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA0_RLC0_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA0_RLC0_MIDCMD_DATA2
+#define SDMA0_RLC0_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA0_RLC0_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA0_RLC0_MIDCMD_DATA3
+#define SDMA0_RLC0_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA0_RLC0_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA0_RLC0_MIDCMD_DATA4
+#define SDMA0_RLC0_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA0_RLC0_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA0_RLC0_MIDCMD_DATA5
+#define SDMA0_RLC0_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA0_RLC0_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA0_RLC0_MIDCMD_DATA6
+#define SDMA0_RLC0_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA0_RLC0_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA0_RLC0_MIDCMD_DATA7
+#define SDMA0_RLC0_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA0_RLC0_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA0_RLC0_MIDCMD_DATA8
+#define SDMA0_RLC0_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA0_RLC0_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA0_RLC0_MIDCMD_CNTL
+#define SDMA0_RLC0_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA0_RLC0_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA0_RLC0_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA0_RLC0_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA0_RLC0_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA0_RLC0_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA0_RLC0_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA0_RLC0_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA0_RLC1_RB_CNTL
+#define SDMA0_RLC1_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA0_RLC1_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA0_RLC1_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA0_RLC1_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA0_RLC1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA0_RLC1_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA0_RLC1_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA0_RLC1_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA0_RLC1_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA0_RLC1_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA0_RLC1_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA0_RLC1_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA0_RLC1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA0_RLC1_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA0_RLC1_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA0_RLC1_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA0_RLC1_RB_BASE
+#define SDMA0_RLC1_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA0_RLC1_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_RLC1_RB_BASE_HI
+#define SDMA0_RLC1_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_RLC1_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA0_RLC1_RB_RPTR
+#define SDMA0_RLC1_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA0_RLC1_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_RLC1_RB_RPTR_HI
+#define SDMA0_RLC1_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_RLC1_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_RLC1_RB_WPTR
+#define SDMA0_RLC1_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA0_RLC1_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_RLC1_RB_WPTR_HI
+#define SDMA0_RLC1_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_RLC1_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_RLC1_RB_WPTR_POLL_CNTL
+#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA0_RLC1_RB_RPTR_ADDR_HI
+#define SDMA0_RLC1_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_RLC1_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_RLC1_RB_RPTR_ADDR_LO
+#define SDMA0_RLC1_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA0_RLC1_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_RLC1_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA0_RLC1_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_RLC1_IB_CNTL
+#define SDMA0_RLC1_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA0_RLC1_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA0_RLC1_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA0_RLC1_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA0_RLC1_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA0_RLC1_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA0_RLC1_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA0_RLC1_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA0_RLC1_IB_RPTR
+#define SDMA0_RLC1_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA0_RLC1_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA0_RLC1_IB_OFFSET
+#define SDMA0_RLC1_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_RLC1_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA0_RLC1_IB_BASE_LO
+#define SDMA0_RLC1_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA0_RLC1_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA0_RLC1_IB_BASE_HI
+#define SDMA0_RLC1_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_RLC1_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_RLC1_IB_SIZE
+#define SDMA0_RLC1_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA0_RLC1_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA0_RLC1_SKIP_CNTL
+#define SDMA0_RLC1_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA0_RLC1_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA0_RLC1_CONTEXT_STATUS
+#define SDMA0_RLC1_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA0_RLC1_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA0_RLC1_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA0_RLC1_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA0_RLC1_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA0_RLC1_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA0_RLC1_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA0_RLC1_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA0_RLC1_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA0_RLC1_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA0_RLC1_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA0_RLC1_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA0_RLC1_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA0_RLC1_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA0_RLC1_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA0_RLC1_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA0_RLC1_DOORBELL
+#define SDMA0_RLC1_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA0_RLC1_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA0_RLC1_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA0_RLC1_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA0_RLC1_STATUS
+#define SDMA0_RLC1_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA0_RLC1_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA0_RLC1_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA0_RLC1_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA0_RLC1_DOORBELL_LOG
+#define SDMA0_RLC1_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA0_RLC1_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA0_RLC1_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA0_RLC1_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA0_RLC1_WATERMARK
+#define SDMA0_RLC1_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA0_RLC1_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA0_RLC1_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA0_RLC1_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA0_RLC1_DOORBELL_OFFSET
+#define SDMA0_RLC1_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_RLC1_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA0_RLC1_CSA_ADDR_LO
+#define SDMA0_RLC1_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_RLC1_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_RLC1_CSA_ADDR_HI
+#define SDMA0_RLC1_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_RLC1_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_RLC1_IB_SUB_REMAIN
+#define SDMA0_RLC1_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA0_RLC1_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA0_RLC1_PREEMPT
+#define SDMA0_RLC1_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA0_RLC1_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA0_RLC1_DUMMY_REG
+#define SDMA0_RLC1_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA0_RLC1_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA0_RLC1_RB_WPTR_POLL_ADDR_HI
+#define SDMA0_RLC1_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_RLC1_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_RLC1_RB_WPTR_POLL_ADDR_LO
+#define SDMA0_RLC1_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_RLC1_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_RLC1_RB_AQL_CNTL
+#define SDMA0_RLC1_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA0_RLC1_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA0_RLC1_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA0_RLC1_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA0_RLC1_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA0_RLC1_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA0_RLC1_MINOR_PTR_UPDATE
+#define SDMA0_RLC1_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA0_RLC1_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA0_RLC1_MIDCMD_DATA0
+#define SDMA0_RLC1_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA0_RLC1_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA0_RLC1_MIDCMD_DATA1
+#define SDMA0_RLC1_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA0_RLC1_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA0_RLC1_MIDCMD_DATA2
+#define SDMA0_RLC1_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA0_RLC1_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA0_RLC1_MIDCMD_DATA3
+#define SDMA0_RLC1_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA0_RLC1_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA0_RLC1_MIDCMD_DATA4
+#define SDMA0_RLC1_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA0_RLC1_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA0_RLC1_MIDCMD_DATA5
+#define SDMA0_RLC1_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA0_RLC1_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA0_RLC1_MIDCMD_DATA6
+#define SDMA0_RLC1_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA0_RLC1_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA0_RLC1_MIDCMD_DATA7
+#define SDMA0_RLC1_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA0_RLC1_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA0_RLC1_MIDCMD_DATA8
+#define SDMA0_RLC1_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA0_RLC1_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA0_RLC1_MIDCMD_CNTL
+#define SDMA0_RLC1_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA0_RLC1_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA0_RLC1_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA0_RLC1_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA0_RLC1_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA0_RLC1_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA0_RLC1_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA0_RLC1_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA0_RLC2_RB_CNTL
+#define SDMA0_RLC2_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA0_RLC2_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA0_RLC2_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA0_RLC2_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA0_RLC2_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA0_RLC2_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA0_RLC2_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA0_RLC2_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA0_RLC2_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA0_RLC2_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA0_RLC2_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA0_RLC2_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA0_RLC2_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA0_RLC2_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA0_RLC2_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA0_RLC2_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA0_RLC2_RB_BASE
+#define SDMA0_RLC2_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA0_RLC2_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_RLC2_RB_BASE_HI
+#define SDMA0_RLC2_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_RLC2_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA0_RLC2_RB_RPTR
+#define SDMA0_RLC2_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA0_RLC2_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_RLC2_RB_RPTR_HI
+#define SDMA0_RLC2_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_RLC2_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_RLC2_RB_WPTR
+#define SDMA0_RLC2_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA0_RLC2_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_RLC2_RB_WPTR_HI
+#define SDMA0_RLC2_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_RLC2_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_RLC2_RB_WPTR_POLL_CNTL
+#define SDMA0_RLC2_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA0_RLC2_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA0_RLC2_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA0_RLC2_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA0_RLC2_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA0_RLC2_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA0_RLC2_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA0_RLC2_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA0_RLC2_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA0_RLC2_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA0_RLC2_RB_RPTR_ADDR_HI
+#define SDMA0_RLC2_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_RLC2_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_RLC2_RB_RPTR_ADDR_LO
+#define SDMA0_RLC2_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA0_RLC2_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_RLC2_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA0_RLC2_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_RLC2_IB_CNTL
+#define SDMA0_RLC2_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA0_RLC2_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA0_RLC2_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA0_RLC2_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA0_RLC2_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA0_RLC2_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA0_RLC2_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA0_RLC2_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA0_RLC2_IB_RPTR
+#define SDMA0_RLC2_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA0_RLC2_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA0_RLC2_IB_OFFSET
+#define SDMA0_RLC2_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_RLC2_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA0_RLC2_IB_BASE_LO
+#define SDMA0_RLC2_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA0_RLC2_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA0_RLC2_IB_BASE_HI
+#define SDMA0_RLC2_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_RLC2_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_RLC2_IB_SIZE
+#define SDMA0_RLC2_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA0_RLC2_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA0_RLC2_SKIP_CNTL
+#define SDMA0_RLC2_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA0_RLC2_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA0_RLC2_CONTEXT_STATUS
+#define SDMA0_RLC2_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA0_RLC2_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA0_RLC2_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA0_RLC2_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA0_RLC2_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA0_RLC2_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA0_RLC2_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA0_RLC2_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA0_RLC2_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA0_RLC2_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA0_RLC2_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA0_RLC2_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA0_RLC2_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA0_RLC2_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA0_RLC2_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA0_RLC2_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA0_RLC2_DOORBELL
+#define SDMA0_RLC2_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA0_RLC2_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA0_RLC2_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA0_RLC2_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA0_RLC2_STATUS
+#define SDMA0_RLC2_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA0_RLC2_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA0_RLC2_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA0_RLC2_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA0_RLC2_DOORBELL_LOG
+#define SDMA0_RLC2_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA0_RLC2_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA0_RLC2_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA0_RLC2_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA0_RLC2_WATERMARK
+#define SDMA0_RLC2_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA0_RLC2_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA0_RLC2_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA0_RLC2_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA0_RLC2_DOORBELL_OFFSET
+#define SDMA0_RLC2_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_RLC2_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA0_RLC2_CSA_ADDR_LO
+#define SDMA0_RLC2_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_RLC2_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_RLC2_CSA_ADDR_HI
+#define SDMA0_RLC2_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_RLC2_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_RLC2_IB_SUB_REMAIN
+#define SDMA0_RLC2_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA0_RLC2_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA0_RLC2_PREEMPT
+#define SDMA0_RLC2_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA0_RLC2_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA0_RLC2_DUMMY_REG
+#define SDMA0_RLC2_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA0_RLC2_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA0_RLC2_RB_WPTR_POLL_ADDR_HI
+#define SDMA0_RLC2_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_RLC2_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_RLC2_RB_WPTR_POLL_ADDR_LO
+#define SDMA0_RLC2_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_RLC2_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_RLC2_RB_AQL_CNTL
+#define SDMA0_RLC2_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA0_RLC2_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA0_RLC2_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA0_RLC2_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA0_RLC2_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA0_RLC2_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA0_RLC2_MINOR_PTR_UPDATE
+#define SDMA0_RLC2_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA0_RLC2_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA0_RLC2_MIDCMD_DATA0
+#define SDMA0_RLC2_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA0_RLC2_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA0_RLC2_MIDCMD_DATA1
+#define SDMA0_RLC2_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA0_RLC2_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA0_RLC2_MIDCMD_DATA2
+#define SDMA0_RLC2_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA0_RLC2_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA0_RLC2_MIDCMD_DATA3
+#define SDMA0_RLC2_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA0_RLC2_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA0_RLC2_MIDCMD_DATA4
+#define SDMA0_RLC2_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA0_RLC2_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA0_RLC2_MIDCMD_DATA5
+#define SDMA0_RLC2_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA0_RLC2_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA0_RLC2_MIDCMD_DATA6
+#define SDMA0_RLC2_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA0_RLC2_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA0_RLC2_MIDCMD_DATA7
+#define SDMA0_RLC2_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA0_RLC2_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA0_RLC2_MIDCMD_DATA8
+#define SDMA0_RLC2_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA0_RLC2_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA0_RLC2_MIDCMD_CNTL
+#define SDMA0_RLC2_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA0_RLC2_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA0_RLC2_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA0_RLC2_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA0_RLC2_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA0_RLC2_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA0_RLC2_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA0_RLC2_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA0_RLC3_RB_CNTL
+#define SDMA0_RLC3_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA0_RLC3_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA0_RLC3_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA0_RLC3_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA0_RLC3_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA0_RLC3_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA0_RLC3_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA0_RLC3_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA0_RLC3_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA0_RLC3_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA0_RLC3_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA0_RLC3_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA0_RLC3_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA0_RLC3_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA0_RLC3_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA0_RLC3_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA0_RLC3_RB_BASE
+#define SDMA0_RLC3_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA0_RLC3_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_RLC3_RB_BASE_HI
+#define SDMA0_RLC3_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_RLC3_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA0_RLC3_RB_RPTR
+#define SDMA0_RLC3_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA0_RLC3_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_RLC3_RB_RPTR_HI
+#define SDMA0_RLC3_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_RLC3_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_RLC3_RB_WPTR
+#define SDMA0_RLC3_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA0_RLC3_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_RLC3_RB_WPTR_HI
+#define SDMA0_RLC3_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_RLC3_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_RLC3_RB_WPTR_POLL_CNTL
+#define SDMA0_RLC3_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA0_RLC3_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA0_RLC3_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA0_RLC3_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA0_RLC3_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA0_RLC3_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA0_RLC3_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA0_RLC3_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA0_RLC3_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA0_RLC3_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA0_RLC3_RB_RPTR_ADDR_HI
+#define SDMA0_RLC3_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_RLC3_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_RLC3_RB_RPTR_ADDR_LO
+#define SDMA0_RLC3_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA0_RLC3_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_RLC3_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA0_RLC3_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_RLC3_IB_CNTL
+#define SDMA0_RLC3_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA0_RLC3_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA0_RLC3_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA0_RLC3_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA0_RLC3_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA0_RLC3_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA0_RLC3_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA0_RLC3_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA0_RLC3_IB_RPTR
+#define SDMA0_RLC3_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA0_RLC3_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA0_RLC3_IB_OFFSET
+#define SDMA0_RLC3_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_RLC3_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA0_RLC3_IB_BASE_LO
+#define SDMA0_RLC3_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA0_RLC3_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA0_RLC3_IB_BASE_HI
+#define SDMA0_RLC3_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_RLC3_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_RLC3_IB_SIZE
+#define SDMA0_RLC3_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA0_RLC3_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA0_RLC3_SKIP_CNTL
+#define SDMA0_RLC3_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA0_RLC3_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA0_RLC3_CONTEXT_STATUS
+#define SDMA0_RLC3_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA0_RLC3_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA0_RLC3_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA0_RLC3_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA0_RLC3_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA0_RLC3_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA0_RLC3_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA0_RLC3_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA0_RLC3_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA0_RLC3_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA0_RLC3_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA0_RLC3_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA0_RLC3_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA0_RLC3_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA0_RLC3_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA0_RLC3_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA0_RLC3_DOORBELL
+#define SDMA0_RLC3_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA0_RLC3_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA0_RLC3_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA0_RLC3_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA0_RLC3_STATUS
+#define SDMA0_RLC3_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA0_RLC3_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA0_RLC3_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA0_RLC3_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA0_RLC3_DOORBELL_LOG
+#define SDMA0_RLC3_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA0_RLC3_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA0_RLC3_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA0_RLC3_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA0_RLC3_WATERMARK
+#define SDMA0_RLC3_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA0_RLC3_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA0_RLC3_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA0_RLC3_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA0_RLC3_DOORBELL_OFFSET
+#define SDMA0_RLC3_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_RLC3_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA0_RLC3_CSA_ADDR_LO
+#define SDMA0_RLC3_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_RLC3_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_RLC3_CSA_ADDR_HI
+#define SDMA0_RLC3_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_RLC3_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_RLC3_IB_SUB_REMAIN
+#define SDMA0_RLC3_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA0_RLC3_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA0_RLC3_PREEMPT
+#define SDMA0_RLC3_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA0_RLC3_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA0_RLC3_DUMMY_REG
+#define SDMA0_RLC3_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA0_RLC3_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA0_RLC3_RB_WPTR_POLL_ADDR_HI
+#define SDMA0_RLC3_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_RLC3_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_RLC3_RB_WPTR_POLL_ADDR_LO
+#define SDMA0_RLC3_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_RLC3_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_RLC3_RB_AQL_CNTL
+#define SDMA0_RLC3_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA0_RLC3_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA0_RLC3_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA0_RLC3_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA0_RLC3_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA0_RLC3_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA0_RLC3_MINOR_PTR_UPDATE
+#define SDMA0_RLC3_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA0_RLC3_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA0_RLC3_MIDCMD_DATA0
+#define SDMA0_RLC3_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA0_RLC3_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA0_RLC3_MIDCMD_DATA1
+#define SDMA0_RLC3_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA0_RLC3_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA0_RLC3_MIDCMD_DATA2
+#define SDMA0_RLC3_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA0_RLC3_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA0_RLC3_MIDCMD_DATA3
+#define SDMA0_RLC3_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA0_RLC3_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA0_RLC3_MIDCMD_DATA4
+#define SDMA0_RLC3_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA0_RLC3_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA0_RLC3_MIDCMD_DATA5
+#define SDMA0_RLC3_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA0_RLC3_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA0_RLC3_MIDCMD_DATA6
+#define SDMA0_RLC3_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA0_RLC3_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA0_RLC3_MIDCMD_DATA7
+#define SDMA0_RLC3_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA0_RLC3_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA0_RLC3_MIDCMD_DATA8
+#define SDMA0_RLC3_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA0_RLC3_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA0_RLC3_MIDCMD_CNTL
+#define SDMA0_RLC3_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA0_RLC3_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA0_RLC3_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA0_RLC3_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA0_RLC3_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA0_RLC3_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA0_RLC3_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA0_RLC3_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA0_RLC4_RB_CNTL
+#define SDMA0_RLC4_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA0_RLC4_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA0_RLC4_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA0_RLC4_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA0_RLC4_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA0_RLC4_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA0_RLC4_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA0_RLC4_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA0_RLC4_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA0_RLC4_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA0_RLC4_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA0_RLC4_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA0_RLC4_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA0_RLC4_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA0_RLC4_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA0_RLC4_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA0_RLC4_RB_BASE
+#define SDMA0_RLC4_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA0_RLC4_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_RLC4_RB_BASE_HI
+#define SDMA0_RLC4_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_RLC4_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA0_RLC4_RB_RPTR
+#define SDMA0_RLC4_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA0_RLC4_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_RLC4_RB_RPTR_HI
+#define SDMA0_RLC4_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_RLC4_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_RLC4_RB_WPTR
+#define SDMA0_RLC4_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA0_RLC4_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_RLC4_RB_WPTR_HI
+#define SDMA0_RLC4_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_RLC4_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_RLC4_RB_WPTR_POLL_CNTL
+#define SDMA0_RLC4_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA0_RLC4_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA0_RLC4_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA0_RLC4_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA0_RLC4_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA0_RLC4_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA0_RLC4_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA0_RLC4_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA0_RLC4_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA0_RLC4_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA0_RLC4_RB_RPTR_ADDR_HI
+#define SDMA0_RLC4_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_RLC4_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_RLC4_RB_RPTR_ADDR_LO
+#define SDMA0_RLC4_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA0_RLC4_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_RLC4_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA0_RLC4_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_RLC4_IB_CNTL
+#define SDMA0_RLC4_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA0_RLC4_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA0_RLC4_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA0_RLC4_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA0_RLC4_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA0_RLC4_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA0_RLC4_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA0_RLC4_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA0_RLC4_IB_RPTR
+#define SDMA0_RLC4_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA0_RLC4_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA0_RLC4_IB_OFFSET
+#define SDMA0_RLC4_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_RLC4_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA0_RLC4_IB_BASE_LO
+#define SDMA0_RLC4_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA0_RLC4_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA0_RLC4_IB_BASE_HI
+#define SDMA0_RLC4_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_RLC4_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_RLC4_IB_SIZE
+#define SDMA0_RLC4_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA0_RLC4_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA0_RLC4_SKIP_CNTL
+#define SDMA0_RLC4_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA0_RLC4_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA0_RLC4_CONTEXT_STATUS
+#define SDMA0_RLC4_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA0_RLC4_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA0_RLC4_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA0_RLC4_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA0_RLC4_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA0_RLC4_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA0_RLC4_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA0_RLC4_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA0_RLC4_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA0_RLC4_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA0_RLC4_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA0_RLC4_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA0_RLC4_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA0_RLC4_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA0_RLC4_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA0_RLC4_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA0_RLC4_DOORBELL
+#define SDMA0_RLC4_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA0_RLC4_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA0_RLC4_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA0_RLC4_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA0_RLC4_STATUS
+#define SDMA0_RLC4_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA0_RLC4_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA0_RLC4_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA0_RLC4_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA0_RLC4_DOORBELL_LOG
+#define SDMA0_RLC4_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA0_RLC4_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA0_RLC4_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA0_RLC4_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA0_RLC4_WATERMARK
+#define SDMA0_RLC4_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA0_RLC4_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA0_RLC4_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA0_RLC4_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA0_RLC4_DOORBELL_OFFSET
+#define SDMA0_RLC4_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_RLC4_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA0_RLC4_CSA_ADDR_LO
+#define SDMA0_RLC4_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_RLC4_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_RLC4_CSA_ADDR_HI
+#define SDMA0_RLC4_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_RLC4_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_RLC4_IB_SUB_REMAIN
+#define SDMA0_RLC4_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA0_RLC4_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA0_RLC4_PREEMPT
+#define SDMA0_RLC4_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA0_RLC4_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA0_RLC4_DUMMY_REG
+#define SDMA0_RLC4_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA0_RLC4_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA0_RLC4_RB_WPTR_POLL_ADDR_HI
+#define SDMA0_RLC4_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_RLC4_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_RLC4_RB_WPTR_POLL_ADDR_LO
+#define SDMA0_RLC4_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_RLC4_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_RLC4_RB_AQL_CNTL
+#define SDMA0_RLC4_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA0_RLC4_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA0_RLC4_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA0_RLC4_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA0_RLC4_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA0_RLC4_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA0_RLC4_MINOR_PTR_UPDATE
+#define SDMA0_RLC4_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA0_RLC4_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA0_RLC4_MIDCMD_DATA0
+#define SDMA0_RLC4_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA0_RLC4_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA0_RLC4_MIDCMD_DATA1
+#define SDMA0_RLC4_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA0_RLC4_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA0_RLC4_MIDCMD_DATA2
+#define SDMA0_RLC4_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA0_RLC4_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA0_RLC4_MIDCMD_DATA3
+#define SDMA0_RLC4_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA0_RLC4_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA0_RLC4_MIDCMD_DATA4
+#define SDMA0_RLC4_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA0_RLC4_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA0_RLC4_MIDCMD_DATA5
+#define SDMA0_RLC4_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA0_RLC4_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA0_RLC4_MIDCMD_DATA6
+#define SDMA0_RLC4_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA0_RLC4_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA0_RLC4_MIDCMD_DATA7
+#define SDMA0_RLC4_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA0_RLC4_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA0_RLC4_MIDCMD_DATA8
+#define SDMA0_RLC4_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA0_RLC4_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA0_RLC4_MIDCMD_CNTL
+#define SDMA0_RLC4_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA0_RLC4_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA0_RLC4_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA0_RLC4_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA0_RLC4_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA0_RLC4_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA0_RLC4_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA0_RLC4_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA0_RLC5_RB_CNTL
+#define SDMA0_RLC5_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA0_RLC5_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA0_RLC5_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA0_RLC5_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA0_RLC5_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA0_RLC5_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA0_RLC5_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA0_RLC5_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA0_RLC5_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA0_RLC5_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA0_RLC5_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA0_RLC5_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA0_RLC5_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA0_RLC5_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA0_RLC5_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA0_RLC5_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA0_RLC5_RB_BASE
+#define SDMA0_RLC5_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA0_RLC5_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_RLC5_RB_BASE_HI
+#define SDMA0_RLC5_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_RLC5_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA0_RLC5_RB_RPTR
+#define SDMA0_RLC5_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA0_RLC5_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_RLC5_RB_RPTR_HI
+#define SDMA0_RLC5_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_RLC5_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_RLC5_RB_WPTR
+#define SDMA0_RLC5_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA0_RLC5_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_RLC5_RB_WPTR_HI
+#define SDMA0_RLC5_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_RLC5_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_RLC5_RB_WPTR_POLL_CNTL
+#define SDMA0_RLC5_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA0_RLC5_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA0_RLC5_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA0_RLC5_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA0_RLC5_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA0_RLC5_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA0_RLC5_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA0_RLC5_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA0_RLC5_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA0_RLC5_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA0_RLC5_RB_RPTR_ADDR_HI
+#define SDMA0_RLC5_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_RLC5_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_RLC5_RB_RPTR_ADDR_LO
+#define SDMA0_RLC5_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA0_RLC5_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_RLC5_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA0_RLC5_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_RLC5_IB_CNTL
+#define SDMA0_RLC5_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA0_RLC5_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA0_RLC5_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA0_RLC5_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA0_RLC5_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA0_RLC5_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA0_RLC5_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA0_RLC5_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA0_RLC5_IB_RPTR
+#define SDMA0_RLC5_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA0_RLC5_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA0_RLC5_IB_OFFSET
+#define SDMA0_RLC5_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_RLC5_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA0_RLC5_IB_BASE_LO
+#define SDMA0_RLC5_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA0_RLC5_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA0_RLC5_IB_BASE_HI
+#define SDMA0_RLC5_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_RLC5_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_RLC5_IB_SIZE
+#define SDMA0_RLC5_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA0_RLC5_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA0_RLC5_SKIP_CNTL
+#define SDMA0_RLC5_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA0_RLC5_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA0_RLC5_CONTEXT_STATUS
+#define SDMA0_RLC5_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA0_RLC5_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA0_RLC5_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA0_RLC5_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA0_RLC5_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA0_RLC5_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA0_RLC5_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA0_RLC5_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA0_RLC5_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA0_RLC5_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA0_RLC5_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA0_RLC5_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA0_RLC5_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA0_RLC5_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA0_RLC5_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA0_RLC5_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA0_RLC5_DOORBELL
+#define SDMA0_RLC5_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA0_RLC5_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA0_RLC5_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA0_RLC5_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA0_RLC5_STATUS
+#define SDMA0_RLC5_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA0_RLC5_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA0_RLC5_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA0_RLC5_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA0_RLC5_DOORBELL_LOG
+#define SDMA0_RLC5_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA0_RLC5_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA0_RLC5_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA0_RLC5_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA0_RLC5_WATERMARK
+#define SDMA0_RLC5_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA0_RLC5_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA0_RLC5_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA0_RLC5_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA0_RLC5_DOORBELL_OFFSET
+#define SDMA0_RLC5_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_RLC5_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA0_RLC5_CSA_ADDR_LO
+#define SDMA0_RLC5_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_RLC5_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_RLC5_CSA_ADDR_HI
+#define SDMA0_RLC5_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_RLC5_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_RLC5_IB_SUB_REMAIN
+#define SDMA0_RLC5_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA0_RLC5_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA0_RLC5_PREEMPT
+#define SDMA0_RLC5_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA0_RLC5_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA0_RLC5_DUMMY_REG
+#define SDMA0_RLC5_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA0_RLC5_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA0_RLC5_RB_WPTR_POLL_ADDR_HI
+#define SDMA0_RLC5_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_RLC5_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_RLC5_RB_WPTR_POLL_ADDR_LO
+#define SDMA0_RLC5_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_RLC5_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_RLC5_RB_AQL_CNTL
+#define SDMA0_RLC5_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA0_RLC5_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA0_RLC5_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA0_RLC5_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA0_RLC5_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA0_RLC5_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA0_RLC5_MINOR_PTR_UPDATE
+#define SDMA0_RLC5_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA0_RLC5_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA0_RLC5_MIDCMD_DATA0
+#define SDMA0_RLC5_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA0_RLC5_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA0_RLC5_MIDCMD_DATA1
+#define SDMA0_RLC5_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA0_RLC5_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA0_RLC5_MIDCMD_DATA2
+#define SDMA0_RLC5_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA0_RLC5_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA0_RLC5_MIDCMD_DATA3
+#define SDMA0_RLC5_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA0_RLC5_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA0_RLC5_MIDCMD_DATA4
+#define SDMA0_RLC5_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA0_RLC5_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA0_RLC5_MIDCMD_DATA5
+#define SDMA0_RLC5_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA0_RLC5_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA0_RLC5_MIDCMD_DATA6
+#define SDMA0_RLC5_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA0_RLC5_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA0_RLC5_MIDCMD_DATA7
+#define SDMA0_RLC5_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA0_RLC5_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA0_RLC5_MIDCMD_DATA8
+#define SDMA0_RLC5_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA0_RLC5_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA0_RLC5_MIDCMD_CNTL
+#define SDMA0_RLC5_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA0_RLC5_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA0_RLC5_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA0_RLC5_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA0_RLC5_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA0_RLC5_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA0_RLC5_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA0_RLC5_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA0_RLC6_RB_CNTL
+#define SDMA0_RLC6_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA0_RLC6_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA0_RLC6_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA0_RLC6_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA0_RLC6_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA0_RLC6_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA0_RLC6_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA0_RLC6_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA0_RLC6_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA0_RLC6_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA0_RLC6_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA0_RLC6_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA0_RLC6_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA0_RLC6_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA0_RLC6_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA0_RLC6_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA0_RLC6_RB_BASE
+#define SDMA0_RLC6_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA0_RLC6_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_RLC6_RB_BASE_HI
+#define SDMA0_RLC6_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_RLC6_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA0_RLC6_RB_RPTR
+#define SDMA0_RLC6_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA0_RLC6_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_RLC6_RB_RPTR_HI
+#define SDMA0_RLC6_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_RLC6_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_RLC6_RB_WPTR
+#define SDMA0_RLC6_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA0_RLC6_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_RLC6_RB_WPTR_HI
+#define SDMA0_RLC6_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_RLC6_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_RLC6_RB_WPTR_POLL_CNTL
+#define SDMA0_RLC6_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA0_RLC6_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA0_RLC6_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA0_RLC6_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA0_RLC6_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA0_RLC6_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA0_RLC6_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA0_RLC6_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA0_RLC6_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA0_RLC6_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA0_RLC6_RB_RPTR_ADDR_HI
+#define SDMA0_RLC6_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_RLC6_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_RLC6_RB_RPTR_ADDR_LO
+#define SDMA0_RLC6_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA0_RLC6_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_RLC6_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA0_RLC6_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_RLC6_IB_CNTL
+#define SDMA0_RLC6_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA0_RLC6_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA0_RLC6_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA0_RLC6_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA0_RLC6_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA0_RLC6_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA0_RLC6_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA0_RLC6_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA0_RLC6_IB_RPTR
+#define SDMA0_RLC6_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA0_RLC6_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA0_RLC6_IB_OFFSET
+#define SDMA0_RLC6_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_RLC6_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA0_RLC6_IB_BASE_LO
+#define SDMA0_RLC6_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA0_RLC6_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA0_RLC6_IB_BASE_HI
+#define SDMA0_RLC6_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_RLC6_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_RLC6_IB_SIZE
+#define SDMA0_RLC6_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA0_RLC6_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA0_RLC6_SKIP_CNTL
+#define SDMA0_RLC6_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA0_RLC6_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA0_RLC6_CONTEXT_STATUS
+#define SDMA0_RLC6_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA0_RLC6_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA0_RLC6_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA0_RLC6_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA0_RLC6_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA0_RLC6_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA0_RLC6_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA0_RLC6_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA0_RLC6_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA0_RLC6_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA0_RLC6_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA0_RLC6_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA0_RLC6_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA0_RLC6_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA0_RLC6_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA0_RLC6_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA0_RLC6_DOORBELL
+#define SDMA0_RLC6_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA0_RLC6_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA0_RLC6_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA0_RLC6_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA0_RLC6_STATUS
+#define SDMA0_RLC6_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA0_RLC6_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA0_RLC6_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA0_RLC6_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA0_RLC6_DOORBELL_LOG
+#define SDMA0_RLC6_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA0_RLC6_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA0_RLC6_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA0_RLC6_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA0_RLC6_WATERMARK
+#define SDMA0_RLC6_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA0_RLC6_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA0_RLC6_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA0_RLC6_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA0_RLC6_DOORBELL_OFFSET
+#define SDMA0_RLC6_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_RLC6_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA0_RLC6_CSA_ADDR_LO
+#define SDMA0_RLC6_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_RLC6_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_RLC6_CSA_ADDR_HI
+#define SDMA0_RLC6_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_RLC6_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_RLC6_IB_SUB_REMAIN
+#define SDMA0_RLC6_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA0_RLC6_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA0_RLC6_PREEMPT
+#define SDMA0_RLC6_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA0_RLC6_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA0_RLC6_DUMMY_REG
+#define SDMA0_RLC6_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA0_RLC6_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA0_RLC6_RB_WPTR_POLL_ADDR_HI
+#define SDMA0_RLC6_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_RLC6_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_RLC6_RB_WPTR_POLL_ADDR_LO
+#define SDMA0_RLC6_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_RLC6_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_RLC6_RB_AQL_CNTL
+#define SDMA0_RLC6_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA0_RLC6_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA0_RLC6_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA0_RLC6_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA0_RLC6_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA0_RLC6_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA0_RLC6_MINOR_PTR_UPDATE
+#define SDMA0_RLC6_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA0_RLC6_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA0_RLC6_MIDCMD_DATA0
+#define SDMA0_RLC6_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA0_RLC6_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA0_RLC6_MIDCMD_DATA1
+#define SDMA0_RLC6_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA0_RLC6_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA0_RLC6_MIDCMD_DATA2
+#define SDMA0_RLC6_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA0_RLC6_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA0_RLC6_MIDCMD_DATA3
+#define SDMA0_RLC6_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA0_RLC6_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA0_RLC6_MIDCMD_DATA4
+#define SDMA0_RLC6_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA0_RLC6_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA0_RLC6_MIDCMD_DATA5
+#define SDMA0_RLC6_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA0_RLC6_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA0_RLC6_MIDCMD_DATA6
+#define SDMA0_RLC6_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA0_RLC6_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA0_RLC6_MIDCMD_DATA7
+#define SDMA0_RLC6_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA0_RLC6_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA0_RLC6_MIDCMD_DATA8
+#define SDMA0_RLC6_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA0_RLC6_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA0_RLC6_MIDCMD_CNTL
+#define SDMA0_RLC6_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA0_RLC6_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA0_RLC6_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA0_RLC6_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA0_RLC6_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA0_RLC6_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA0_RLC6_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA0_RLC6_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA0_RLC7_RB_CNTL
+#define SDMA0_RLC7_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA0_RLC7_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA0_RLC7_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA0_RLC7_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA0_RLC7_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA0_RLC7_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA0_RLC7_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA0_RLC7_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA0_RLC7_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA0_RLC7_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA0_RLC7_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA0_RLC7_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA0_RLC7_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA0_RLC7_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA0_RLC7_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA0_RLC7_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA0_RLC7_RB_BASE
+#define SDMA0_RLC7_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA0_RLC7_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_RLC7_RB_BASE_HI
+#define SDMA0_RLC7_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_RLC7_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA0_RLC7_RB_RPTR
+#define SDMA0_RLC7_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA0_RLC7_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_RLC7_RB_RPTR_HI
+#define SDMA0_RLC7_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_RLC7_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_RLC7_RB_WPTR
+#define SDMA0_RLC7_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA0_RLC7_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_RLC7_RB_WPTR_HI
+#define SDMA0_RLC7_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_RLC7_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_RLC7_RB_WPTR_POLL_CNTL
+#define SDMA0_RLC7_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA0_RLC7_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA0_RLC7_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA0_RLC7_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA0_RLC7_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA0_RLC7_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA0_RLC7_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA0_RLC7_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA0_RLC7_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA0_RLC7_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA0_RLC7_RB_RPTR_ADDR_HI
+#define SDMA0_RLC7_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_RLC7_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_RLC7_RB_RPTR_ADDR_LO
+#define SDMA0_RLC7_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA0_RLC7_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_RLC7_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA0_RLC7_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_RLC7_IB_CNTL
+#define SDMA0_RLC7_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA0_RLC7_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA0_RLC7_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA0_RLC7_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA0_RLC7_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA0_RLC7_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA0_RLC7_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA0_RLC7_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA0_RLC7_IB_RPTR
+#define SDMA0_RLC7_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA0_RLC7_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA0_RLC7_IB_OFFSET
+#define SDMA0_RLC7_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_RLC7_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA0_RLC7_IB_BASE_LO
+#define SDMA0_RLC7_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA0_RLC7_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA0_RLC7_IB_BASE_HI
+#define SDMA0_RLC7_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_RLC7_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_RLC7_IB_SIZE
+#define SDMA0_RLC7_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA0_RLC7_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA0_RLC7_SKIP_CNTL
+#define SDMA0_RLC7_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA0_RLC7_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA0_RLC7_CONTEXT_STATUS
+#define SDMA0_RLC7_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA0_RLC7_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA0_RLC7_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA0_RLC7_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA0_RLC7_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA0_RLC7_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA0_RLC7_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA0_RLC7_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA0_RLC7_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA0_RLC7_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA0_RLC7_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA0_RLC7_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA0_RLC7_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA0_RLC7_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA0_RLC7_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA0_RLC7_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA0_RLC7_DOORBELL
+#define SDMA0_RLC7_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA0_RLC7_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA0_RLC7_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA0_RLC7_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA0_RLC7_STATUS
+#define SDMA0_RLC7_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA0_RLC7_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA0_RLC7_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA0_RLC7_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA0_RLC7_DOORBELL_LOG
+#define SDMA0_RLC7_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA0_RLC7_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA0_RLC7_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA0_RLC7_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA0_RLC7_WATERMARK
+#define SDMA0_RLC7_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA0_RLC7_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA0_RLC7_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA0_RLC7_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA0_RLC7_DOORBELL_OFFSET
+#define SDMA0_RLC7_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_RLC7_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA0_RLC7_CSA_ADDR_LO
+#define SDMA0_RLC7_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_RLC7_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_RLC7_CSA_ADDR_HI
+#define SDMA0_RLC7_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_RLC7_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_RLC7_IB_SUB_REMAIN
+#define SDMA0_RLC7_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA0_RLC7_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA0_RLC7_PREEMPT
+#define SDMA0_RLC7_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA0_RLC7_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA0_RLC7_DUMMY_REG
+#define SDMA0_RLC7_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA0_RLC7_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA0_RLC7_RB_WPTR_POLL_ADDR_HI
+#define SDMA0_RLC7_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_RLC7_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_RLC7_RB_WPTR_POLL_ADDR_LO
+#define SDMA0_RLC7_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_RLC7_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_RLC7_RB_AQL_CNTL
+#define SDMA0_RLC7_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA0_RLC7_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA0_RLC7_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA0_RLC7_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA0_RLC7_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA0_RLC7_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA0_RLC7_MINOR_PTR_UPDATE
+#define SDMA0_RLC7_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA0_RLC7_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA0_RLC7_MIDCMD_DATA0
+#define SDMA0_RLC7_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA0_RLC7_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA0_RLC7_MIDCMD_DATA1
+#define SDMA0_RLC7_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA0_RLC7_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA0_RLC7_MIDCMD_DATA2
+#define SDMA0_RLC7_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA0_RLC7_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA0_RLC7_MIDCMD_DATA3
+#define SDMA0_RLC7_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA0_RLC7_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA0_RLC7_MIDCMD_DATA4
+#define SDMA0_RLC7_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA0_RLC7_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA0_RLC7_MIDCMD_DATA5
+#define SDMA0_RLC7_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA0_RLC7_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA0_RLC7_MIDCMD_DATA6
+#define SDMA0_RLC7_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA0_RLC7_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA0_RLC7_MIDCMD_DATA7
+#define SDMA0_RLC7_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA0_RLC7_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA0_RLC7_MIDCMD_DATA8
+#define SDMA0_RLC7_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA0_RLC7_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA0_RLC7_MIDCMD_CNTL
+#define SDMA0_RLC7_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA0_RLC7_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA0_RLC7_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA0_RLC7_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA0_RLC7_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA0_RLC7_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA0_RLC7_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA0_RLC7_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/sdma1/sdma1_4_2_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/sdma1/sdma1_4_2_2_offset.h
new file mode 100644
index 000000000000..681233a55a1d
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/sdma1/sdma1_4_2_2_offset.h
@@ -0,0 +1,1043 @@
+/*
+ * Copyright (C) 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _sdma1_4_2_2_OFFSET_HEADER
+#define _sdma1_4_2_2_OFFSET_HEADER
+
+
+
+// addressBlock: sdma1_sdma1dec
+// base address: 0x6180
+#define mmSDMA1_UCODE_ADDR 0x0000
+#define mmSDMA1_UCODE_ADDR_BASE_IDX 0
+#define mmSDMA1_UCODE_DATA 0x0001
+#define mmSDMA1_UCODE_DATA_BASE_IDX 0
+#define mmSDMA1_VM_CNTL 0x0004
+#define mmSDMA1_VM_CNTL_BASE_IDX 0
+#define mmSDMA1_VM_CTX_LO 0x0005
+#define mmSDMA1_VM_CTX_LO_BASE_IDX 0
+#define mmSDMA1_VM_CTX_HI 0x0006
+#define mmSDMA1_VM_CTX_HI_BASE_IDX 0
+#define mmSDMA1_ACTIVE_FCN_ID 0x0007
+#define mmSDMA1_ACTIVE_FCN_ID_BASE_IDX 0
+#define mmSDMA1_VM_CTX_CNTL 0x0008
+#define mmSDMA1_VM_CTX_CNTL_BASE_IDX 0
+#define mmSDMA1_VIRT_RESET_REQ 0x0009
+#define mmSDMA1_VIRT_RESET_REQ_BASE_IDX 0
+#define mmSDMA1_VF_ENABLE 0x000a
+#define mmSDMA1_VF_ENABLE_BASE_IDX 0
+#define mmSDMA1_CONTEXT_REG_TYPE0 0x000b
+#define mmSDMA1_CONTEXT_REG_TYPE0_BASE_IDX 0
+#define mmSDMA1_CONTEXT_REG_TYPE1 0x000c
+#define mmSDMA1_CONTEXT_REG_TYPE1_BASE_IDX 0
+#define mmSDMA1_CONTEXT_REG_TYPE2 0x000d
+#define mmSDMA1_CONTEXT_REG_TYPE2_BASE_IDX 0
+#define mmSDMA1_CONTEXT_REG_TYPE3 0x000e
+#define mmSDMA1_CONTEXT_REG_TYPE3_BASE_IDX 0
+#define mmSDMA1_PUB_REG_TYPE0 0x000f
+#define mmSDMA1_PUB_REG_TYPE0_BASE_IDX 0
+#define mmSDMA1_PUB_REG_TYPE1 0x0010
+#define mmSDMA1_PUB_REG_TYPE1_BASE_IDX 0
+#define mmSDMA1_PUB_REG_TYPE2 0x0011
+#define mmSDMA1_PUB_REG_TYPE2_BASE_IDX 0
+#define mmSDMA1_PUB_REG_TYPE3 0x0012
+#define mmSDMA1_PUB_REG_TYPE3_BASE_IDX 0
+#define mmSDMA1_MMHUB_CNTL 0x0013
+#define mmSDMA1_MMHUB_CNTL_BASE_IDX 0
+#define mmSDMA1_CONTEXT_GROUP_BOUNDARY 0x0019
+#define mmSDMA1_CONTEXT_GROUP_BOUNDARY_BASE_IDX 0
+#define mmSDMA1_POWER_CNTL 0x001a
+#define mmSDMA1_POWER_CNTL_BASE_IDX 0
+#define mmSDMA1_CLK_CTRL 0x001b
+#define mmSDMA1_CLK_CTRL_BASE_IDX 0
+#define mmSDMA1_CNTL 0x001c
+#define mmSDMA1_CNTL_BASE_IDX 0
+#define mmSDMA1_CHICKEN_BITS 0x001d
+#define mmSDMA1_CHICKEN_BITS_BASE_IDX 0
+#define mmSDMA1_GB_ADDR_CONFIG 0x001e
+#define mmSDMA1_GB_ADDR_CONFIG_BASE_IDX 0
+#define mmSDMA1_GB_ADDR_CONFIG_READ 0x001f
+#define mmSDMA1_GB_ADDR_CONFIG_READ_BASE_IDX 0
+#define mmSDMA1_RB_RPTR_FETCH_HI 0x0020
+#define mmSDMA1_RB_RPTR_FETCH_HI_BASE_IDX 0
+#define mmSDMA1_SEM_WAIT_FAIL_TIMER_CNTL 0x0021
+#define mmSDMA1_SEM_WAIT_FAIL_TIMER_CNTL_BASE_IDX 0
+#define mmSDMA1_RB_RPTR_FETCH 0x0022
+#define mmSDMA1_RB_RPTR_FETCH_BASE_IDX 0
+#define mmSDMA1_IB_OFFSET_FETCH 0x0023
+#define mmSDMA1_IB_OFFSET_FETCH_BASE_IDX 0
+#define mmSDMA1_PROGRAM 0x0024
+#define mmSDMA1_PROGRAM_BASE_IDX 0
+#define mmSDMA1_STATUS_REG 0x0025
+#define mmSDMA1_STATUS_REG_BASE_IDX 0
+#define mmSDMA1_STATUS1_REG 0x0026
+#define mmSDMA1_STATUS1_REG_BASE_IDX 0
+#define mmSDMA1_RD_BURST_CNTL 0x0027
+#define mmSDMA1_RD_BURST_CNTL_BASE_IDX 0
+#define mmSDMA1_HBM_PAGE_CONFIG 0x0028
+#define mmSDMA1_HBM_PAGE_CONFIG_BASE_IDX 0
+#define mmSDMA1_UCODE_CHECKSUM 0x0029
+#define mmSDMA1_UCODE_CHECKSUM_BASE_IDX 0
+#define mmSDMA1_F32_CNTL 0x002a
+#define mmSDMA1_F32_CNTL_BASE_IDX 0
+#define mmSDMA1_FREEZE 0x002b
+#define mmSDMA1_FREEZE_BASE_IDX 0
+#define mmSDMA1_PHASE0_QUANTUM 0x002c
+#define mmSDMA1_PHASE0_QUANTUM_BASE_IDX 0
+#define mmSDMA1_PHASE1_QUANTUM 0x002d
+#define mmSDMA1_PHASE1_QUANTUM_BASE_IDX 0
+#define mmSDMA1_EDC_CONFIG 0x0032
+#define mmSDMA1_EDC_CONFIG_BASE_IDX 0
+#define mmSDMA1_BA_THRESHOLD 0x0033
+#define mmSDMA1_BA_THRESHOLD_BASE_IDX 0
+#define mmSDMA1_ID 0x0034
+#define mmSDMA1_ID_BASE_IDX 0
+#define mmSDMA1_VERSION 0x0035
+#define mmSDMA1_VERSION_BASE_IDX 0
+#define mmSDMA1_EDC_COUNTER 0x0036
+#define mmSDMA1_EDC_COUNTER_BASE_IDX 0
+#define mmSDMA1_EDC_COUNTER_CLEAR 0x0037
+#define mmSDMA1_EDC_COUNTER_CLEAR_BASE_IDX 0
+#define mmSDMA1_STATUS2_REG 0x0038
+#define mmSDMA1_STATUS2_REG_BASE_IDX 0
+#define mmSDMA1_ATOMIC_CNTL 0x0039
+#define mmSDMA1_ATOMIC_CNTL_BASE_IDX 0
+#define mmSDMA1_ATOMIC_PREOP_LO 0x003a
+#define mmSDMA1_ATOMIC_PREOP_LO_BASE_IDX 0
+#define mmSDMA1_ATOMIC_PREOP_HI 0x003b
+#define mmSDMA1_ATOMIC_PREOP_HI_BASE_IDX 0
+#define mmSDMA1_UTCL1_CNTL 0x003c
+#define mmSDMA1_UTCL1_CNTL_BASE_IDX 0
+#define mmSDMA1_UTCL1_WATERMK 0x003d
+#define mmSDMA1_UTCL1_WATERMK_BASE_IDX 0
+#define mmSDMA1_UTCL1_RD_STATUS 0x003e
+#define mmSDMA1_UTCL1_RD_STATUS_BASE_IDX 0
+#define mmSDMA1_UTCL1_WR_STATUS 0x003f
+#define mmSDMA1_UTCL1_WR_STATUS_BASE_IDX 0
+#define mmSDMA1_UTCL1_INV0 0x0040
+#define mmSDMA1_UTCL1_INV0_BASE_IDX 0
+#define mmSDMA1_UTCL1_INV1 0x0041
+#define mmSDMA1_UTCL1_INV1_BASE_IDX 0
+#define mmSDMA1_UTCL1_INV2 0x0042
+#define mmSDMA1_UTCL1_INV2_BASE_IDX 0
+#define mmSDMA1_UTCL1_RD_XNACK0 0x0043
+#define mmSDMA1_UTCL1_RD_XNACK0_BASE_IDX 0
+#define mmSDMA1_UTCL1_RD_XNACK1 0x0044
+#define mmSDMA1_UTCL1_RD_XNACK1_BASE_IDX 0
+#define mmSDMA1_UTCL1_WR_XNACK0 0x0045
+#define mmSDMA1_UTCL1_WR_XNACK0_BASE_IDX 0
+#define mmSDMA1_UTCL1_WR_XNACK1 0x0046
+#define mmSDMA1_UTCL1_WR_XNACK1_BASE_IDX 0
+#define mmSDMA1_UTCL1_TIMEOUT 0x0047
+#define mmSDMA1_UTCL1_TIMEOUT_BASE_IDX 0
+#define mmSDMA1_UTCL1_PAGE 0x0048
+#define mmSDMA1_UTCL1_PAGE_BASE_IDX 0
+#define mmSDMA1_POWER_CNTL_IDLE 0x0049
+#define mmSDMA1_POWER_CNTL_IDLE_BASE_IDX 0
+#define mmSDMA1_RELAX_ORDERING_LUT 0x004a
+#define mmSDMA1_RELAX_ORDERING_LUT_BASE_IDX 0
+#define mmSDMA1_CHICKEN_BITS_2 0x004b
+#define mmSDMA1_CHICKEN_BITS_2_BASE_IDX 0
+#define mmSDMA1_STATUS3_REG 0x004c
+#define mmSDMA1_STATUS3_REG_BASE_IDX 0
+#define mmSDMA1_PHYSICAL_ADDR_LO 0x004d
+#define mmSDMA1_PHYSICAL_ADDR_LO_BASE_IDX 0
+#define mmSDMA1_PHYSICAL_ADDR_HI 0x004e
+#define mmSDMA1_PHYSICAL_ADDR_HI_BASE_IDX 0
+#define mmSDMA1_PHASE2_QUANTUM 0x004f
+#define mmSDMA1_PHASE2_QUANTUM_BASE_IDX 0
+#define mmSDMA1_ERROR_LOG 0x0050
+#define mmSDMA1_ERROR_LOG_BASE_IDX 0
+#define mmSDMA1_PUB_DUMMY_REG0 0x0051
+#define mmSDMA1_PUB_DUMMY_REG0_BASE_IDX 0
+#define mmSDMA1_PUB_DUMMY_REG1 0x0052
+#define mmSDMA1_PUB_DUMMY_REG1_BASE_IDX 0
+#define mmSDMA1_PUB_DUMMY_REG2 0x0053
+#define mmSDMA1_PUB_DUMMY_REG2_BASE_IDX 0
+#define mmSDMA1_PUB_DUMMY_REG3 0x0054
+#define mmSDMA1_PUB_DUMMY_REG3_BASE_IDX 0
+#define mmSDMA1_F32_COUNTER 0x0055
+#define mmSDMA1_F32_COUNTER_BASE_IDX 0
+#define mmSDMA1_UNBREAKABLE 0x0056
+#define mmSDMA1_UNBREAKABLE_BASE_IDX 0
+#define mmSDMA1_PERFMON_CNTL 0x0057
+#define mmSDMA1_PERFMON_CNTL_BASE_IDX 0
+#define mmSDMA1_PERFCOUNTER0_RESULT 0x0058
+#define mmSDMA1_PERFCOUNTER0_RESULT_BASE_IDX 0
+#define mmSDMA1_PERFCOUNTER1_RESULT 0x0059
+#define mmSDMA1_PERFCOUNTER1_RESULT_BASE_IDX 0
+#define mmSDMA1_PERFCOUNTER_TAG_DELAY_RANGE 0x005a
+#define mmSDMA1_PERFCOUNTER_TAG_DELAY_RANGE_BASE_IDX 0
+#define mmSDMA1_CRD_CNTL 0x005b
+#define mmSDMA1_CRD_CNTL_BASE_IDX 0
+#define mmSDMA1_GPU_IOV_VIOLATION_LOG 0x005d
+#define mmSDMA1_GPU_IOV_VIOLATION_LOG_BASE_IDX 0
+#define mmSDMA1_ULV_CNTL 0x005e
+#define mmSDMA1_ULV_CNTL_BASE_IDX 0
+#define mmSDMA1_EA_DBIT_ADDR_DATA 0x0060
+#define mmSDMA1_EA_DBIT_ADDR_DATA_BASE_IDX 0
+#define mmSDMA1_EA_DBIT_ADDR_INDEX 0x0061
+#define mmSDMA1_EA_DBIT_ADDR_INDEX_BASE_IDX 0
+#define mmSDMA1_GPU_IOV_VIOLATION_LOG2 0x0062
+#define mmSDMA1_GPU_IOV_VIOLATION_LOG2_BASE_IDX 0
+#define mmSDMA1_GFX_RB_CNTL 0x0080
+#define mmSDMA1_GFX_RB_CNTL_BASE_IDX 0
+#define mmSDMA1_GFX_RB_BASE 0x0081
+#define mmSDMA1_GFX_RB_BASE_BASE_IDX 0
+#define mmSDMA1_GFX_RB_BASE_HI 0x0082
+#define mmSDMA1_GFX_RB_BASE_HI_BASE_IDX 0
+#define mmSDMA1_GFX_RB_RPTR 0x0083
+#define mmSDMA1_GFX_RB_RPTR_BASE_IDX 0
+#define mmSDMA1_GFX_RB_RPTR_HI 0x0084
+#define mmSDMA1_GFX_RB_RPTR_HI_BASE_IDX 0
+#define mmSDMA1_GFX_RB_WPTR 0x0085
+#define mmSDMA1_GFX_RB_WPTR_BASE_IDX 0
+#define mmSDMA1_GFX_RB_WPTR_HI 0x0086
+#define mmSDMA1_GFX_RB_WPTR_HI_BASE_IDX 0
+#define mmSDMA1_GFX_RB_WPTR_POLL_CNTL 0x0087
+#define mmSDMA1_GFX_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define mmSDMA1_GFX_RB_RPTR_ADDR_HI 0x0088
+#define mmSDMA1_GFX_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define mmSDMA1_GFX_RB_RPTR_ADDR_LO 0x0089
+#define mmSDMA1_GFX_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define mmSDMA1_GFX_IB_CNTL 0x008a
+#define mmSDMA1_GFX_IB_CNTL_BASE_IDX 0
+#define mmSDMA1_GFX_IB_RPTR 0x008b
+#define mmSDMA1_GFX_IB_RPTR_BASE_IDX 0
+#define mmSDMA1_GFX_IB_OFFSET 0x008c
+#define mmSDMA1_GFX_IB_OFFSET_BASE_IDX 0
+#define mmSDMA1_GFX_IB_BASE_LO 0x008d
+#define mmSDMA1_GFX_IB_BASE_LO_BASE_IDX 0
+#define mmSDMA1_GFX_IB_BASE_HI 0x008e
+#define mmSDMA1_GFX_IB_BASE_HI_BASE_IDX 0
+#define mmSDMA1_GFX_IB_SIZE 0x008f
+#define mmSDMA1_GFX_IB_SIZE_BASE_IDX 0
+#define mmSDMA1_GFX_SKIP_CNTL 0x0090
+#define mmSDMA1_GFX_SKIP_CNTL_BASE_IDX 0
+#define mmSDMA1_GFX_CONTEXT_STATUS 0x0091
+#define mmSDMA1_GFX_CONTEXT_STATUS_BASE_IDX 0
+#define mmSDMA1_GFX_DOORBELL 0x0092
+#define mmSDMA1_GFX_DOORBELL_BASE_IDX 0
+#define mmSDMA1_GFX_CONTEXT_CNTL 0x0093
+#define mmSDMA1_GFX_CONTEXT_CNTL_BASE_IDX 0
+#define mmSDMA1_GFX_STATUS 0x00a8
+#define mmSDMA1_GFX_STATUS_BASE_IDX 0
+#define mmSDMA1_GFX_DOORBELL_LOG 0x00a9
+#define mmSDMA1_GFX_DOORBELL_LOG_BASE_IDX 0
+#define mmSDMA1_GFX_WATERMARK 0x00aa
+#define mmSDMA1_GFX_WATERMARK_BASE_IDX 0
+#define mmSDMA1_GFX_DOORBELL_OFFSET 0x00ab
+#define mmSDMA1_GFX_DOORBELL_OFFSET_BASE_IDX 0
+#define mmSDMA1_GFX_CSA_ADDR_LO 0x00ac
+#define mmSDMA1_GFX_CSA_ADDR_LO_BASE_IDX 0
+#define mmSDMA1_GFX_CSA_ADDR_HI 0x00ad
+#define mmSDMA1_GFX_CSA_ADDR_HI_BASE_IDX 0
+#define mmSDMA1_GFX_IB_SUB_REMAIN 0x00af
+#define mmSDMA1_GFX_IB_SUB_REMAIN_BASE_IDX 0
+#define mmSDMA1_GFX_PREEMPT 0x00b0
+#define mmSDMA1_GFX_PREEMPT_BASE_IDX 0
+#define mmSDMA1_GFX_DUMMY_REG 0x00b1
+#define mmSDMA1_GFX_DUMMY_REG_BASE_IDX 0
+#define mmSDMA1_GFX_RB_WPTR_POLL_ADDR_HI 0x00b2
+#define mmSDMA1_GFX_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define mmSDMA1_GFX_RB_WPTR_POLL_ADDR_LO 0x00b3
+#define mmSDMA1_GFX_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define mmSDMA1_GFX_RB_AQL_CNTL 0x00b4
+#define mmSDMA1_GFX_RB_AQL_CNTL_BASE_IDX 0
+#define mmSDMA1_GFX_MINOR_PTR_UPDATE 0x00b5
+#define mmSDMA1_GFX_MINOR_PTR_UPDATE_BASE_IDX 0
+#define mmSDMA1_GFX_MIDCMD_DATA0 0x00c0
+#define mmSDMA1_GFX_MIDCMD_DATA0_BASE_IDX 0
+#define mmSDMA1_GFX_MIDCMD_DATA1 0x00c1
+#define mmSDMA1_GFX_MIDCMD_DATA1_BASE_IDX 0
+#define mmSDMA1_GFX_MIDCMD_DATA2 0x00c2
+#define mmSDMA1_GFX_MIDCMD_DATA2_BASE_IDX 0
+#define mmSDMA1_GFX_MIDCMD_DATA3 0x00c3
+#define mmSDMA1_GFX_MIDCMD_DATA3_BASE_IDX 0
+#define mmSDMA1_GFX_MIDCMD_DATA4 0x00c4
+#define mmSDMA1_GFX_MIDCMD_DATA4_BASE_IDX 0
+#define mmSDMA1_GFX_MIDCMD_DATA5 0x00c5
+#define mmSDMA1_GFX_MIDCMD_DATA5_BASE_IDX 0
+#define mmSDMA1_GFX_MIDCMD_DATA6 0x00c6
+#define mmSDMA1_GFX_MIDCMD_DATA6_BASE_IDX 0
+#define mmSDMA1_GFX_MIDCMD_DATA7 0x00c7
+#define mmSDMA1_GFX_MIDCMD_DATA7_BASE_IDX 0
+#define mmSDMA1_GFX_MIDCMD_DATA8 0x00c8
+#define mmSDMA1_GFX_MIDCMD_DATA8_BASE_IDX 0
+#define mmSDMA1_GFX_MIDCMD_CNTL 0x00c9
+#define mmSDMA1_GFX_MIDCMD_CNTL_BASE_IDX 0
+#define mmSDMA1_PAGE_RB_CNTL 0x00d8
+#define mmSDMA1_PAGE_RB_CNTL_BASE_IDX 0
+#define mmSDMA1_PAGE_RB_BASE 0x00d9
+#define mmSDMA1_PAGE_RB_BASE_BASE_IDX 0
+#define mmSDMA1_PAGE_RB_BASE_HI 0x00da
+#define mmSDMA1_PAGE_RB_BASE_HI_BASE_IDX 0
+#define mmSDMA1_PAGE_RB_RPTR 0x00db
+#define mmSDMA1_PAGE_RB_RPTR_BASE_IDX 0
+#define mmSDMA1_PAGE_RB_RPTR_HI 0x00dc
+#define mmSDMA1_PAGE_RB_RPTR_HI_BASE_IDX 0
+#define mmSDMA1_PAGE_RB_WPTR 0x00dd
+#define mmSDMA1_PAGE_RB_WPTR_BASE_IDX 0
+#define mmSDMA1_PAGE_RB_WPTR_HI 0x00de
+#define mmSDMA1_PAGE_RB_WPTR_HI_BASE_IDX 0
+#define mmSDMA1_PAGE_RB_WPTR_POLL_CNTL 0x00df
+#define mmSDMA1_PAGE_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define mmSDMA1_PAGE_RB_RPTR_ADDR_HI 0x00e0
+#define mmSDMA1_PAGE_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define mmSDMA1_PAGE_RB_RPTR_ADDR_LO 0x00e1
+#define mmSDMA1_PAGE_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define mmSDMA1_PAGE_IB_CNTL 0x00e2
+#define mmSDMA1_PAGE_IB_CNTL_BASE_IDX 0
+#define mmSDMA1_PAGE_IB_RPTR 0x00e3
+#define mmSDMA1_PAGE_IB_RPTR_BASE_IDX 0
+#define mmSDMA1_PAGE_IB_OFFSET 0x00e4
+#define mmSDMA1_PAGE_IB_OFFSET_BASE_IDX 0
+#define mmSDMA1_PAGE_IB_BASE_LO 0x00e5
+#define mmSDMA1_PAGE_IB_BASE_LO_BASE_IDX 0
+#define mmSDMA1_PAGE_IB_BASE_HI 0x00e6
+#define mmSDMA1_PAGE_IB_BASE_HI_BASE_IDX 0
+#define mmSDMA1_PAGE_IB_SIZE 0x00e7
+#define mmSDMA1_PAGE_IB_SIZE_BASE_IDX 0
+#define mmSDMA1_PAGE_SKIP_CNTL 0x00e8
+#define mmSDMA1_PAGE_SKIP_CNTL_BASE_IDX 0
+#define mmSDMA1_PAGE_CONTEXT_STATUS 0x00e9
+#define mmSDMA1_PAGE_CONTEXT_STATUS_BASE_IDX 0
+#define mmSDMA1_PAGE_DOORBELL 0x00ea
+#define mmSDMA1_PAGE_DOORBELL_BASE_IDX 0
+#define mmSDMA1_PAGE_STATUS 0x0100
+#define mmSDMA1_PAGE_STATUS_BASE_IDX 0
+#define mmSDMA1_PAGE_DOORBELL_LOG 0x0101
+#define mmSDMA1_PAGE_DOORBELL_LOG_BASE_IDX 0
+#define mmSDMA1_PAGE_WATERMARK 0x0102
+#define mmSDMA1_PAGE_WATERMARK_BASE_IDX 0
+#define mmSDMA1_PAGE_DOORBELL_OFFSET 0x0103
+#define mmSDMA1_PAGE_DOORBELL_OFFSET_BASE_IDX 0
+#define mmSDMA1_PAGE_CSA_ADDR_LO 0x0104
+#define mmSDMA1_PAGE_CSA_ADDR_LO_BASE_IDX 0
+#define mmSDMA1_PAGE_CSA_ADDR_HI 0x0105
+#define mmSDMA1_PAGE_CSA_ADDR_HI_BASE_IDX 0
+#define mmSDMA1_PAGE_IB_SUB_REMAIN 0x0107
+#define mmSDMA1_PAGE_IB_SUB_REMAIN_BASE_IDX 0
+#define mmSDMA1_PAGE_PREEMPT 0x0108
+#define mmSDMA1_PAGE_PREEMPT_BASE_IDX 0
+#define mmSDMA1_PAGE_DUMMY_REG 0x0109
+#define mmSDMA1_PAGE_DUMMY_REG_BASE_IDX 0
+#define mmSDMA1_PAGE_RB_WPTR_POLL_ADDR_HI 0x010a
+#define mmSDMA1_PAGE_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define mmSDMA1_PAGE_RB_WPTR_POLL_ADDR_LO 0x010b
+#define mmSDMA1_PAGE_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define mmSDMA1_PAGE_RB_AQL_CNTL 0x010c
+#define mmSDMA1_PAGE_RB_AQL_CNTL_BASE_IDX 0
+#define mmSDMA1_PAGE_MINOR_PTR_UPDATE 0x010d
+#define mmSDMA1_PAGE_MINOR_PTR_UPDATE_BASE_IDX 0
+#define mmSDMA1_PAGE_MIDCMD_DATA0 0x0118
+#define mmSDMA1_PAGE_MIDCMD_DATA0_BASE_IDX 0
+#define mmSDMA1_PAGE_MIDCMD_DATA1 0x0119
+#define mmSDMA1_PAGE_MIDCMD_DATA1_BASE_IDX 0
+#define mmSDMA1_PAGE_MIDCMD_DATA2 0x011a
+#define mmSDMA1_PAGE_MIDCMD_DATA2_BASE_IDX 0
+#define mmSDMA1_PAGE_MIDCMD_DATA3 0x011b
+#define mmSDMA1_PAGE_MIDCMD_DATA3_BASE_IDX 0
+#define mmSDMA1_PAGE_MIDCMD_DATA4 0x011c
+#define mmSDMA1_PAGE_MIDCMD_DATA4_BASE_IDX 0
+#define mmSDMA1_PAGE_MIDCMD_DATA5 0x011d
+#define mmSDMA1_PAGE_MIDCMD_DATA5_BASE_IDX 0
+#define mmSDMA1_PAGE_MIDCMD_DATA6 0x011e
+#define mmSDMA1_PAGE_MIDCMD_DATA6_BASE_IDX 0
+#define mmSDMA1_PAGE_MIDCMD_DATA7 0x011f
+#define mmSDMA1_PAGE_MIDCMD_DATA7_BASE_IDX 0
+#define mmSDMA1_PAGE_MIDCMD_DATA8 0x0120
+#define mmSDMA1_PAGE_MIDCMD_DATA8_BASE_IDX 0
+#define mmSDMA1_PAGE_MIDCMD_CNTL 0x0121
+#define mmSDMA1_PAGE_MIDCMD_CNTL_BASE_IDX 0
+#define mmSDMA1_RLC0_RB_CNTL 0x0130
+#define mmSDMA1_RLC0_RB_CNTL_BASE_IDX 0
+#define mmSDMA1_RLC0_RB_BASE 0x0131
+#define mmSDMA1_RLC0_RB_BASE_BASE_IDX 0
+#define mmSDMA1_RLC0_RB_BASE_HI 0x0132
+#define mmSDMA1_RLC0_RB_BASE_HI_BASE_IDX 0
+#define mmSDMA1_RLC0_RB_RPTR 0x0133
+#define mmSDMA1_RLC0_RB_RPTR_BASE_IDX 0
+#define mmSDMA1_RLC0_RB_RPTR_HI 0x0134
+#define mmSDMA1_RLC0_RB_RPTR_HI_BASE_IDX 0
+#define mmSDMA1_RLC0_RB_WPTR 0x0135
+#define mmSDMA1_RLC0_RB_WPTR_BASE_IDX 0
+#define mmSDMA1_RLC0_RB_WPTR_HI 0x0136
+#define mmSDMA1_RLC0_RB_WPTR_HI_BASE_IDX 0
+#define mmSDMA1_RLC0_RB_WPTR_POLL_CNTL 0x0137
+#define mmSDMA1_RLC0_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define mmSDMA1_RLC0_RB_RPTR_ADDR_HI 0x0138
+#define mmSDMA1_RLC0_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define mmSDMA1_RLC0_RB_RPTR_ADDR_LO 0x0139
+#define mmSDMA1_RLC0_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define mmSDMA1_RLC0_IB_CNTL 0x013a
+#define mmSDMA1_RLC0_IB_CNTL_BASE_IDX 0
+#define mmSDMA1_RLC0_IB_RPTR 0x013b
+#define mmSDMA1_RLC0_IB_RPTR_BASE_IDX 0
+#define mmSDMA1_RLC0_IB_OFFSET 0x013c
+#define mmSDMA1_RLC0_IB_OFFSET_BASE_IDX 0
+#define mmSDMA1_RLC0_IB_BASE_LO 0x013d
+#define mmSDMA1_RLC0_IB_BASE_LO_BASE_IDX 0
+#define mmSDMA1_RLC0_IB_BASE_HI 0x013e
+#define mmSDMA1_RLC0_IB_BASE_HI_BASE_IDX 0
+#define mmSDMA1_RLC0_IB_SIZE 0x013f
+#define mmSDMA1_RLC0_IB_SIZE_BASE_IDX 0
+#define mmSDMA1_RLC0_SKIP_CNTL 0x0140
+#define mmSDMA1_RLC0_SKIP_CNTL_BASE_IDX 0
+#define mmSDMA1_RLC0_CONTEXT_STATUS 0x0141
+#define mmSDMA1_RLC0_CONTEXT_STATUS_BASE_IDX 0
+#define mmSDMA1_RLC0_DOORBELL 0x0142
+#define mmSDMA1_RLC0_DOORBELL_BASE_IDX 0
+#define mmSDMA1_RLC0_STATUS 0x0158
+#define mmSDMA1_RLC0_STATUS_BASE_IDX 0
+#define mmSDMA1_RLC0_DOORBELL_LOG 0x0159
+#define mmSDMA1_RLC0_DOORBELL_LOG_BASE_IDX 0
+#define mmSDMA1_RLC0_WATERMARK 0x015a
+#define mmSDMA1_RLC0_WATERMARK_BASE_IDX 0
+#define mmSDMA1_RLC0_DOORBELL_OFFSET 0x015b
+#define mmSDMA1_RLC0_DOORBELL_OFFSET_BASE_IDX 0
+#define mmSDMA1_RLC0_CSA_ADDR_LO 0x015c
+#define mmSDMA1_RLC0_CSA_ADDR_LO_BASE_IDX 0
+#define mmSDMA1_RLC0_CSA_ADDR_HI 0x015d
+#define mmSDMA1_RLC0_CSA_ADDR_HI_BASE_IDX 0
+#define mmSDMA1_RLC0_IB_SUB_REMAIN 0x015f
+#define mmSDMA1_RLC0_IB_SUB_REMAIN_BASE_IDX 0
+#define mmSDMA1_RLC0_PREEMPT 0x0160
+#define mmSDMA1_RLC0_PREEMPT_BASE_IDX 0
+#define mmSDMA1_RLC0_DUMMY_REG 0x0161
+#define mmSDMA1_RLC0_DUMMY_REG_BASE_IDX 0
+#define mmSDMA1_RLC0_RB_WPTR_POLL_ADDR_HI 0x0162
+#define mmSDMA1_RLC0_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define mmSDMA1_RLC0_RB_WPTR_POLL_ADDR_LO 0x0163
+#define mmSDMA1_RLC0_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define mmSDMA1_RLC0_RB_AQL_CNTL 0x0164
+#define mmSDMA1_RLC0_RB_AQL_CNTL_BASE_IDX 0
+#define mmSDMA1_RLC0_MINOR_PTR_UPDATE 0x0165
+#define mmSDMA1_RLC0_MINOR_PTR_UPDATE_BASE_IDX 0
+#define mmSDMA1_RLC0_MIDCMD_DATA0 0x0170
+#define mmSDMA1_RLC0_MIDCMD_DATA0_BASE_IDX 0
+#define mmSDMA1_RLC0_MIDCMD_DATA1 0x0171
+#define mmSDMA1_RLC0_MIDCMD_DATA1_BASE_IDX 0
+#define mmSDMA1_RLC0_MIDCMD_DATA2 0x0172
+#define mmSDMA1_RLC0_MIDCMD_DATA2_BASE_IDX 0
+#define mmSDMA1_RLC0_MIDCMD_DATA3 0x0173
+#define mmSDMA1_RLC0_MIDCMD_DATA3_BASE_IDX 0
+#define mmSDMA1_RLC0_MIDCMD_DATA4 0x0174
+#define mmSDMA1_RLC0_MIDCMD_DATA4_BASE_IDX 0
+#define mmSDMA1_RLC0_MIDCMD_DATA5 0x0175
+#define mmSDMA1_RLC0_MIDCMD_DATA5_BASE_IDX 0
+#define mmSDMA1_RLC0_MIDCMD_DATA6 0x0176
+#define mmSDMA1_RLC0_MIDCMD_DATA6_BASE_IDX 0
+#define mmSDMA1_RLC0_MIDCMD_DATA7 0x0177
+#define mmSDMA1_RLC0_MIDCMD_DATA7_BASE_IDX 0
+#define mmSDMA1_RLC0_MIDCMD_DATA8 0x0178
+#define mmSDMA1_RLC0_MIDCMD_DATA8_BASE_IDX 0
+#define mmSDMA1_RLC0_MIDCMD_CNTL 0x0179
+#define mmSDMA1_RLC0_MIDCMD_CNTL_BASE_IDX 0
+#define mmSDMA1_RLC1_RB_CNTL 0x0188
+#define mmSDMA1_RLC1_RB_CNTL_BASE_IDX 0
+#define mmSDMA1_RLC1_RB_BASE 0x0189
+#define mmSDMA1_RLC1_RB_BASE_BASE_IDX 0
+#define mmSDMA1_RLC1_RB_BASE_HI 0x018a
+#define mmSDMA1_RLC1_RB_BASE_HI_BASE_IDX 0
+#define mmSDMA1_RLC1_RB_RPTR 0x018b
+#define mmSDMA1_RLC1_RB_RPTR_BASE_IDX 0
+#define mmSDMA1_RLC1_RB_RPTR_HI 0x018c
+#define mmSDMA1_RLC1_RB_RPTR_HI_BASE_IDX 0
+#define mmSDMA1_RLC1_RB_WPTR 0x018d
+#define mmSDMA1_RLC1_RB_WPTR_BASE_IDX 0
+#define mmSDMA1_RLC1_RB_WPTR_HI 0x018e
+#define mmSDMA1_RLC1_RB_WPTR_HI_BASE_IDX 0
+#define mmSDMA1_RLC1_RB_WPTR_POLL_CNTL 0x018f
+#define mmSDMA1_RLC1_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define mmSDMA1_RLC1_RB_RPTR_ADDR_HI 0x0190
+#define mmSDMA1_RLC1_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define mmSDMA1_RLC1_RB_RPTR_ADDR_LO 0x0191
+#define mmSDMA1_RLC1_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define mmSDMA1_RLC1_IB_CNTL 0x0192
+#define mmSDMA1_RLC1_IB_CNTL_BASE_IDX 0
+#define mmSDMA1_RLC1_IB_RPTR 0x0193
+#define mmSDMA1_RLC1_IB_RPTR_BASE_IDX 0
+#define mmSDMA1_RLC1_IB_OFFSET 0x0194
+#define mmSDMA1_RLC1_IB_OFFSET_BASE_IDX 0
+#define mmSDMA1_RLC1_IB_BASE_LO 0x0195
+#define mmSDMA1_RLC1_IB_BASE_LO_BASE_IDX 0
+#define mmSDMA1_RLC1_IB_BASE_HI 0x0196
+#define mmSDMA1_RLC1_IB_BASE_HI_BASE_IDX 0
+#define mmSDMA1_RLC1_IB_SIZE 0x0197
+#define mmSDMA1_RLC1_IB_SIZE_BASE_IDX 0
+#define mmSDMA1_RLC1_SKIP_CNTL 0x0198
+#define mmSDMA1_RLC1_SKIP_CNTL_BASE_IDX 0
+#define mmSDMA1_RLC1_CONTEXT_STATUS 0x0199
+#define mmSDMA1_RLC1_CONTEXT_STATUS_BASE_IDX 0
+#define mmSDMA1_RLC1_DOORBELL 0x019a
+#define mmSDMA1_RLC1_DOORBELL_BASE_IDX 0
+#define mmSDMA1_RLC1_STATUS 0x01b0
+#define mmSDMA1_RLC1_STATUS_BASE_IDX 0
+#define mmSDMA1_RLC1_DOORBELL_LOG 0x01b1
+#define mmSDMA1_RLC1_DOORBELL_LOG_BASE_IDX 0
+#define mmSDMA1_RLC1_WATERMARK 0x01b2
+#define mmSDMA1_RLC1_WATERMARK_BASE_IDX 0
+#define mmSDMA1_RLC1_DOORBELL_OFFSET 0x01b3
+#define mmSDMA1_RLC1_DOORBELL_OFFSET_BASE_IDX 0
+#define mmSDMA1_RLC1_CSA_ADDR_LO 0x01b4
+#define mmSDMA1_RLC1_CSA_ADDR_LO_BASE_IDX 0
+#define mmSDMA1_RLC1_CSA_ADDR_HI 0x01b5
+#define mmSDMA1_RLC1_CSA_ADDR_HI_BASE_IDX 0
+#define mmSDMA1_RLC1_IB_SUB_REMAIN 0x01b7
+#define mmSDMA1_RLC1_IB_SUB_REMAIN_BASE_IDX 0
+#define mmSDMA1_RLC1_PREEMPT 0x01b8
+#define mmSDMA1_RLC1_PREEMPT_BASE_IDX 0
+#define mmSDMA1_RLC1_DUMMY_REG 0x01b9
+#define mmSDMA1_RLC1_DUMMY_REG_BASE_IDX 0
+#define mmSDMA1_RLC1_RB_WPTR_POLL_ADDR_HI 0x01ba
+#define mmSDMA1_RLC1_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define mmSDMA1_RLC1_RB_WPTR_POLL_ADDR_LO 0x01bb
+#define mmSDMA1_RLC1_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define mmSDMA1_RLC1_RB_AQL_CNTL 0x01bc
+#define mmSDMA1_RLC1_RB_AQL_CNTL_BASE_IDX 0
+#define mmSDMA1_RLC1_MINOR_PTR_UPDATE 0x01bd
+#define mmSDMA1_RLC1_MINOR_PTR_UPDATE_BASE_IDX 0
+#define mmSDMA1_RLC1_MIDCMD_DATA0 0x01c8
+#define mmSDMA1_RLC1_MIDCMD_DATA0_BASE_IDX 0
+#define mmSDMA1_RLC1_MIDCMD_DATA1 0x01c9
+#define mmSDMA1_RLC1_MIDCMD_DATA1_BASE_IDX 0
+#define mmSDMA1_RLC1_MIDCMD_DATA2 0x01ca
+#define mmSDMA1_RLC1_MIDCMD_DATA2_BASE_IDX 0
+#define mmSDMA1_RLC1_MIDCMD_DATA3 0x01cb
+#define mmSDMA1_RLC1_MIDCMD_DATA3_BASE_IDX 0
+#define mmSDMA1_RLC1_MIDCMD_DATA4 0x01cc
+#define mmSDMA1_RLC1_MIDCMD_DATA4_BASE_IDX 0
+#define mmSDMA1_RLC1_MIDCMD_DATA5 0x01cd
+#define mmSDMA1_RLC1_MIDCMD_DATA5_BASE_IDX 0
+#define mmSDMA1_RLC1_MIDCMD_DATA6 0x01ce
+#define mmSDMA1_RLC1_MIDCMD_DATA6_BASE_IDX 0
+#define mmSDMA1_RLC1_MIDCMD_DATA7 0x01cf
+#define mmSDMA1_RLC1_MIDCMD_DATA7_BASE_IDX 0
+#define mmSDMA1_RLC1_MIDCMD_DATA8 0x01d0
+#define mmSDMA1_RLC1_MIDCMD_DATA8_BASE_IDX 0
+#define mmSDMA1_RLC1_MIDCMD_CNTL 0x01d1
+#define mmSDMA1_RLC1_MIDCMD_CNTL_BASE_IDX 0
+#define mmSDMA1_RLC2_RB_CNTL 0x01e0
+#define mmSDMA1_RLC2_RB_CNTL_BASE_IDX 0
+#define mmSDMA1_RLC2_RB_BASE 0x01e1
+#define mmSDMA1_RLC2_RB_BASE_BASE_IDX 0
+#define mmSDMA1_RLC2_RB_BASE_HI 0x01e2
+#define mmSDMA1_RLC2_RB_BASE_HI_BASE_IDX 0
+#define mmSDMA1_RLC2_RB_RPTR 0x01e3
+#define mmSDMA1_RLC2_RB_RPTR_BASE_IDX 0
+#define mmSDMA1_RLC2_RB_RPTR_HI 0x01e4
+#define mmSDMA1_RLC2_RB_RPTR_HI_BASE_IDX 0
+#define mmSDMA1_RLC2_RB_WPTR 0x01e5
+#define mmSDMA1_RLC2_RB_WPTR_BASE_IDX 0
+#define mmSDMA1_RLC2_RB_WPTR_HI 0x01e6
+#define mmSDMA1_RLC2_RB_WPTR_HI_BASE_IDX 0
+#define mmSDMA1_RLC2_RB_WPTR_POLL_CNTL 0x01e7
+#define mmSDMA1_RLC2_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define mmSDMA1_RLC2_RB_RPTR_ADDR_HI 0x01e8
+#define mmSDMA1_RLC2_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define mmSDMA1_RLC2_RB_RPTR_ADDR_LO 0x01e9
+#define mmSDMA1_RLC2_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define mmSDMA1_RLC2_IB_CNTL 0x01ea
+#define mmSDMA1_RLC2_IB_CNTL_BASE_IDX 0
+#define mmSDMA1_RLC2_IB_RPTR 0x01eb
+#define mmSDMA1_RLC2_IB_RPTR_BASE_IDX 0
+#define mmSDMA1_RLC2_IB_OFFSET 0x01ec
+#define mmSDMA1_RLC2_IB_OFFSET_BASE_IDX 0
+#define mmSDMA1_RLC2_IB_BASE_LO 0x01ed
+#define mmSDMA1_RLC2_IB_BASE_LO_BASE_IDX 0
+#define mmSDMA1_RLC2_IB_BASE_HI 0x01ee
+#define mmSDMA1_RLC2_IB_BASE_HI_BASE_IDX 0
+#define mmSDMA1_RLC2_IB_SIZE 0x01ef
+#define mmSDMA1_RLC2_IB_SIZE_BASE_IDX 0
+#define mmSDMA1_RLC2_SKIP_CNTL 0x01f0
+#define mmSDMA1_RLC2_SKIP_CNTL_BASE_IDX 0
+#define mmSDMA1_RLC2_CONTEXT_STATUS 0x01f1
+#define mmSDMA1_RLC2_CONTEXT_STATUS_BASE_IDX 0
+#define mmSDMA1_RLC2_DOORBELL 0x01f2
+#define mmSDMA1_RLC2_DOORBELL_BASE_IDX 0
+#define mmSDMA1_RLC2_STATUS 0x0208
+#define mmSDMA1_RLC2_STATUS_BASE_IDX 0
+#define mmSDMA1_RLC2_DOORBELL_LOG 0x0209
+#define mmSDMA1_RLC2_DOORBELL_LOG_BASE_IDX 0
+#define mmSDMA1_RLC2_WATERMARK 0x020a
+#define mmSDMA1_RLC2_WATERMARK_BASE_IDX 0
+#define mmSDMA1_RLC2_DOORBELL_OFFSET 0x020b
+#define mmSDMA1_RLC2_DOORBELL_OFFSET_BASE_IDX 0
+#define mmSDMA1_RLC2_CSA_ADDR_LO 0x020c
+#define mmSDMA1_RLC2_CSA_ADDR_LO_BASE_IDX 0
+#define mmSDMA1_RLC2_CSA_ADDR_HI 0x020d
+#define mmSDMA1_RLC2_CSA_ADDR_HI_BASE_IDX 0
+#define mmSDMA1_RLC2_IB_SUB_REMAIN 0x020f
+#define mmSDMA1_RLC2_IB_SUB_REMAIN_BASE_IDX 0
+#define mmSDMA1_RLC2_PREEMPT 0x0210
+#define mmSDMA1_RLC2_PREEMPT_BASE_IDX 0
+#define mmSDMA1_RLC2_DUMMY_REG 0x0211
+#define mmSDMA1_RLC2_DUMMY_REG_BASE_IDX 0
+#define mmSDMA1_RLC2_RB_WPTR_POLL_ADDR_HI 0x0212
+#define mmSDMA1_RLC2_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define mmSDMA1_RLC2_RB_WPTR_POLL_ADDR_LO 0x0213
+#define mmSDMA1_RLC2_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define mmSDMA1_RLC2_RB_AQL_CNTL 0x0214
+#define mmSDMA1_RLC2_RB_AQL_CNTL_BASE_IDX 0
+#define mmSDMA1_RLC2_MINOR_PTR_UPDATE 0x0215
+#define mmSDMA1_RLC2_MINOR_PTR_UPDATE_BASE_IDX 0
+#define mmSDMA1_RLC2_MIDCMD_DATA0 0x0220
+#define mmSDMA1_RLC2_MIDCMD_DATA0_BASE_IDX 0
+#define mmSDMA1_RLC2_MIDCMD_DATA1 0x0221
+#define mmSDMA1_RLC2_MIDCMD_DATA1_BASE_IDX 0
+#define mmSDMA1_RLC2_MIDCMD_DATA2 0x0222
+#define mmSDMA1_RLC2_MIDCMD_DATA2_BASE_IDX 0
+#define mmSDMA1_RLC2_MIDCMD_DATA3 0x0223
+#define mmSDMA1_RLC2_MIDCMD_DATA3_BASE_IDX 0
+#define mmSDMA1_RLC2_MIDCMD_DATA4 0x0224
+#define mmSDMA1_RLC2_MIDCMD_DATA4_BASE_IDX 0
+#define mmSDMA1_RLC2_MIDCMD_DATA5 0x0225
+#define mmSDMA1_RLC2_MIDCMD_DATA5_BASE_IDX 0
+#define mmSDMA1_RLC2_MIDCMD_DATA6 0x0226
+#define mmSDMA1_RLC2_MIDCMD_DATA6_BASE_IDX 0
+#define mmSDMA1_RLC2_MIDCMD_DATA7 0x0227
+#define mmSDMA1_RLC2_MIDCMD_DATA7_BASE_IDX 0
+#define mmSDMA1_RLC2_MIDCMD_DATA8 0x0228
+#define mmSDMA1_RLC2_MIDCMD_DATA8_BASE_IDX 0
+#define mmSDMA1_RLC2_MIDCMD_CNTL 0x0229
+#define mmSDMA1_RLC2_MIDCMD_CNTL_BASE_IDX 0
+#define mmSDMA1_RLC3_RB_CNTL 0x0238
+#define mmSDMA1_RLC3_RB_CNTL_BASE_IDX 0
+#define mmSDMA1_RLC3_RB_BASE 0x0239
+#define mmSDMA1_RLC3_RB_BASE_BASE_IDX 0
+#define mmSDMA1_RLC3_RB_BASE_HI 0x023a
+#define mmSDMA1_RLC3_RB_BASE_HI_BASE_IDX 0
+#define mmSDMA1_RLC3_RB_RPTR 0x023b
+#define mmSDMA1_RLC3_RB_RPTR_BASE_IDX 0
+#define mmSDMA1_RLC3_RB_RPTR_HI 0x023c
+#define mmSDMA1_RLC3_RB_RPTR_HI_BASE_IDX 0
+#define mmSDMA1_RLC3_RB_WPTR 0x023d
+#define mmSDMA1_RLC3_RB_WPTR_BASE_IDX 0
+#define mmSDMA1_RLC3_RB_WPTR_HI 0x023e
+#define mmSDMA1_RLC3_RB_WPTR_HI_BASE_IDX 0
+#define mmSDMA1_RLC3_RB_WPTR_POLL_CNTL 0x023f
+#define mmSDMA1_RLC3_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define mmSDMA1_RLC3_RB_RPTR_ADDR_HI 0x0240
+#define mmSDMA1_RLC3_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define mmSDMA1_RLC3_RB_RPTR_ADDR_LO 0x0241
+#define mmSDMA1_RLC3_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define mmSDMA1_RLC3_IB_CNTL 0x0242
+#define mmSDMA1_RLC3_IB_CNTL_BASE_IDX 0
+#define mmSDMA1_RLC3_IB_RPTR 0x0243
+#define mmSDMA1_RLC3_IB_RPTR_BASE_IDX 0
+#define mmSDMA1_RLC3_IB_OFFSET 0x0244
+#define mmSDMA1_RLC3_IB_OFFSET_BASE_IDX 0
+#define mmSDMA1_RLC3_IB_BASE_LO 0x0245
+#define mmSDMA1_RLC3_IB_BASE_LO_BASE_IDX 0
+#define mmSDMA1_RLC3_IB_BASE_HI 0x0246
+#define mmSDMA1_RLC3_IB_BASE_HI_BASE_IDX 0
+#define mmSDMA1_RLC3_IB_SIZE 0x0247
+#define mmSDMA1_RLC3_IB_SIZE_BASE_IDX 0
+#define mmSDMA1_RLC3_SKIP_CNTL 0x0248
+#define mmSDMA1_RLC3_SKIP_CNTL_BASE_IDX 0
+#define mmSDMA1_RLC3_CONTEXT_STATUS 0x0249
+#define mmSDMA1_RLC3_CONTEXT_STATUS_BASE_IDX 0
+#define mmSDMA1_RLC3_DOORBELL 0x024a
+#define mmSDMA1_RLC3_DOORBELL_BASE_IDX 0
+#define mmSDMA1_RLC3_STATUS 0x0260
+#define mmSDMA1_RLC3_STATUS_BASE_IDX 0
+#define mmSDMA1_RLC3_DOORBELL_LOG 0x0261
+#define mmSDMA1_RLC3_DOORBELL_LOG_BASE_IDX 0
+#define mmSDMA1_RLC3_WATERMARK 0x0262
+#define mmSDMA1_RLC3_WATERMARK_BASE_IDX 0
+#define mmSDMA1_RLC3_DOORBELL_OFFSET 0x0263
+#define mmSDMA1_RLC3_DOORBELL_OFFSET_BASE_IDX 0
+#define mmSDMA1_RLC3_CSA_ADDR_LO 0x0264
+#define mmSDMA1_RLC3_CSA_ADDR_LO_BASE_IDX 0
+#define mmSDMA1_RLC3_CSA_ADDR_HI 0x0265
+#define mmSDMA1_RLC3_CSA_ADDR_HI_BASE_IDX 0
+#define mmSDMA1_RLC3_IB_SUB_REMAIN 0x0267
+#define mmSDMA1_RLC3_IB_SUB_REMAIN_BASE_IDX 0
+#define mmSDMA1_RLC3_PREEMPT 0x0268
+#define mmSDMA1_RLC3_PREEMPT_BASE_IDX 0
+#define mmSDMA1_RLC3_DUMMY_REG 0x0269
+#define mmSDMA1_RLC3_DUMMY_REG_BASE_IDX 0
+#define mmSDMA1_RLC3_RB_WPTR_POLL_ADDR_HI 0x026a
+#define mmSDMA1_RLC3_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define mmSDMA1_RLC3_RB_WPTR_POLL_ADDR_LO 0x026b
+#define mmSDMA1_RLC3_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define mmSDMA1_RLC3_RB_AQL_CNTL 0x026c
+#define mmSDMA1_RLC3_RB_AQL_CNTL_BASE_IDX 0
+#define mmSDMA1_RLC3_MINOR_PTR_UPDATE 0x026d
+#define mmSDMA1_RLC3_MINOR_PTR_UPDATE_BASE_IDX 0
+#define mmSDMA1_RLC3_MIDCMD_DATA0 0x0278
+#define mmSDMA1_RLC3_MIDCMD_DATA0_BASE_IDX 0
+#define mmSDMA1_RLC3_MIDCMD_DATA1 0x0279
+#define mmSDMA1_RLC3_MIDCMD_DATA1_BASE_IDX 0
+#define mmSDMA1_RLC3_MIDCMD_DATA2 0x027a
+#define mmSDMA1_RLC3_MIDCMD_DATA2_BASE_IDX 0
+#define mmSDMA1_RLC3_MIDCMD_DATA3 0x027b
+#define mmSDMA1_RLC3_MIDCMD_DATA3_BASE_IDX 0
+#define mmSDMA1_RLC3_MIDCMD_DATA4 0x027c
+#define mmSDMA1_RLC3_MIDCMD_DATA4_BASE_IDX 0
+#define mmSDMA1_RLC3_MIDCMD_DATA5 0x027d
+#define mmSDMA1_RLC3_MIDCMD_DATA5_BASE_IDX 0
+#define mmSDMA1_RLC3_MIDCMD_DATA6 0x027e
+#define mmSDMA1_RLC3_MIDCMD_DATA6_BASE_IDX 0
+#define mmSDMA1_RLC3_MIDCMD_DATA7 0x027f
+#define mmSDMA1_RLC3_MIDCMD_DATA7_BASE_IDX 0
+#define mmSDMA1_RLC3_MIDCMD_DATA8 0x0280
+#define mmSDMA1_RLC3_MIDCMD_DATA8_BASE_IDX 0
+#define mmSDMA1_RLC3_MIDCMD_CNTL 0x0281
+#define mmSDMA1_RLC3_MIDCMD_CNTL_BASE_IDX 0
+#define mmSDMA1_RLC4_RB_CNTL 0x0290
+#define mmSDMA1_RLC4_RB_CNTL_BASE_IDX 0
+#define mmSDMA1_RLC4_RB_BASE 0x0291
+#define mmSDMA1_RLC4_RB_BASE_BASE_IDX 0
+#define mmSDMA1_RLC4_RB_BASE_HI 0x0292
+#define mmSDMA1_RLC4_RB_BASE_HI_BASE_IDX 0
+#define mmSDMA1_RLC4_RB_RPTR 0x0293
+#define mmSDMA1_RLC4_RB_RPTR_BASE_IDX 0
+#define mmSDMA1_RLC4_RB_RPTR_HI 0x0294
+#define mmSDMA1_RLC4_RB_RPTR_HI_BASE_IDX 0
+#define mmSDMA1_RLC4_RB_WPTR 0x0295
+#define mmSDMA1_RLC4_RB_WPTR_BASE_IDX 0
+#define mmSDMA1_RLC4_RB_WPTR_HI 0x0296
+#define mmSDMA1_RLC4_RB_WPTR_HI_BASE_IDX 0
+#define mmSDMA1_RLC4_RB_WPTR_POLL_CNTL 0x0297
+#define mmSDMA1_RLC4_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define mmSDMA1_RLC4_RB_RPTR_ADDR_HI 0x0298
+#define mmSDMA1_RLC4_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define mmSDMA1_RLC4_RB_RPTR_ADDR_LO 0x0299
+#define mmSDMA1_RLC4_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define mmSDMA1_RLC4_IB_CNTL 0x029a
+#define mmSDMA1_RLC4_IB_CNTL_BASE_IDX 0
+#define mmSDMA1_RLC4_IB_RPTR 0x029b
+#define mmSDMA1_RLC4_IB_RPTR_BASE_IDX 0
+#define mmSDMA1_RLC4_IB_OFFSET 0x029c
+#define mmSDMA1_RLC4_IB_OFFSET_BASE_IDX 0
+#define mmSDMA1_RLC4_IB_BASE_LO 0x029d
+#define mmSDMA1_RLC4_IB_BASE_LO_BASE_IDX 0
+#define mmSDMA1_RLC4_IB_BASE_HI 0x029e
+#define mmSDMA1_RLC4_IB_BASE_HI_BASE_IDX 0
+#define mmSDMA1_RLC4_IB_SIZE 0x029f
+#define mmSDMA1_RLC4_IB_SIZE_BASE_IDX 0
+#define mmSDMA1_RLC4_SKIP_CNTL 0x02a0
+#define mmSDMA1_RLC4_SKIP_CNTL_BASE_IDX 0
+#define mmSDMA1_RLC4_CONTEXT_STATUS 0x02a1
+#define mmSDMA1_RLC4_CONTEXT_STATUS_BASE_IDX 0
+#define mmSDMA1_RLC4_DOORBELL 0x02a2
+#define mmSDMA1_RLC4_DOORBELL_BASE_IDX 0
+#define mmSDMA1_RLC4_STATUS 0x02b8
+#define mmSDMA1_RLC4_STATUS_BASE_IDX 0
+#define mmSDMA1_RLC4_DOORBELL_LOG 0x02b9
+#define mmSDMA1_RLC4_DOORBELL_LOG_BASE_IDX 0
+#define mmSDMA1_RLC4_WATERMARK 0x02ba
+#define mmSDMA1_RLC4_WATERMARK_BASE_IDX 0
+#define mmSDMA1_RLC4_DOORBELL_OFFSET 0x02bb
+#define mmSDMA1_RLC4_DOORBELL_OFFSET_BASE_IDX 0
+#define mmSDMA1_RLC4_CSA_ADDR_LO 0x02bc
+#define mmSDMA1_RLC4_CSA_ADDR_LO_BASE_IDX 0
+#define mmSDMA1_RLC4_CSA_ADDR_HI 0x02bd
+#define mmSDMA1_RLC4_CSA_ADDR_HI_BASE_IDX 0
+#define mmSDMA1_RLC4_IB_SUB_REMAIN 0x02bf
+#define mmSDMA1_RLC4_IB_SUB_REMAIN_BASE_IDX 0
+#define mmSDMA1_RLC4_PREEMPT 0x02c0
+#define mmSDMA1_RLC4_PREEMPT_BASE_IDX 0
+#define mmSDMA1_RLC4_DUMMY_REG 0x02c1
+#define mmSDMA1_RLC4_DUMMY_REG_BASE_IDX 0
+#define mmSDMA1_RLC4_RB_WPTR_POLL_ADDR_HI 0x02c2
+#define mmSDMA1_RLC4_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define mmSDMA1_RLC4_RB_WPTR_POLL_ADDR_LO 0x02c3
+#define mmSDMA1_RLC4_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define mmSDMA1_RLC4_RB_AQL_CNTL 0x02c4
+#define mmSDMA1_RLC4_RB_AQL_CNTL_BASE_IDX 0
+#define mmSDMA1_RLC4_MINOR_PTR_UPDATE 0x02c5
+#define mmSDMA1_RLC4_MINOR_PTR_UPDATE_BASE_IDX 0
+#define mmSDMA1_RLC4_MIDCMD_DATA0 0x02d0
+#define mmSDMA1_RLC4_MIDCMD_DATA0_BASE_IDX 0
+#define mmSDMA1_RLC4_MIDCMD_DATA1 0x02d1
+#define mmSDMA1_RLC4_MIDCMD_DATA1_BASE_IDX 0
+#define mmSDMA1_RLC4_MIDCMD_DATA2 0x02d2
+#define mmSDMA1_RLC4_MIDCMD_DATA2_BASE_IDX 0
+#define mmSDMA1_RLC4_MIDCMD_DATA3 0x02d3
+#define mmSDMA1_RLC4_MIDCMD_DATA3_BASE_IDX 0
+#define mmSDMA1_RLC4_MIDCMD_DATA4 0x02d4
+#define mmSDMA1_RLC4_MIDCMD_DATA4_BASE_IDX 0
+#define mmSDMA1_RLC4_MIDCMD_DATA5 0x02d5
+#define mmSDMA1_RLC4_MIDCMD_DATA5_BASE_IDX 0
+#define mmSDMA1_RLC4_MIDCMD_DATA6 0x02d6
+#define mmSDMA1_RLC4_MIDCMD_DATA6_BASE_IDX 0
+#define mmSDMA1_RLC4_MIDCMD_DATA7 0x02d7
+#define mmSDMA1_RLC4_MIDCMD_DATA7_BASE_IDX 0
+#define mmSDMA1_RLC4_MIDCMD_DATA8 0x02d8
+#define mmSDMA1_RLC4_MIDCMD_DATA8_BASE_IDX 0
+#define mmSDMA1_RLC4_MIDCMD_CNTL 0x02d9
+#define mmSDMA1_RLC4_MIDCMD_CNTL_BASE_IDX 0
+#define mmSDMA1_RLC5_RB_CNTL 0x02e8
+#define mmSDMA1_RLC5_RB_CNTL_BASE_IDX 0
+#define mmSDMA1_RLC5_RB_BASE 0x02e9
+#define mmSDMA1_RLC5_RB_BASE_BASE_IDX 0
+#define mmSDMA1_RLC5_RB_BASE_HI 0x02ea
+#define mmSDMA1_RLC5_RB_BASE_HI_BASE_IDX 0
+#define mmSDMA1_RLC5_RB_RPTR 0x02eb
+#define mmSDMA1_RLC5_RB_RPTR_BASE_IDX 0
+#define mmSDMA1_RLC5_RB_RPTR_HI 0x02ec
+#define mmSDMA1_RLC5_RB_RPTR_HI_BASE_IDX 0
+#define mmSDMA1_RLC5_RB_WPTR 0x02ed
+#define mmSDMA1_RLC5_RB_WPTR_BASE_IDX 0
+#define mmSDMA1_RLC5_RB_WPTR_HI 0x02ee
+#define mmSDMA1_RLC5_RB_WPTR_HI_BASE_IDX 0
+#define mmSDMA1_RLC5_RB_WPTR_POLL_CNTL 0x02ef
+#define mmSDMA1_RLC5_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define mmSDMA1_RLC5_RB_RPTR_ADDR_HI 0x02f0
+#define mmSDMA1_RLC5_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define mmSDMA1_RLC5_RB_RPTR_ADDR_LO 0x02f1
+#define mmSDMA1_RLC5_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define mmSDMA1_RLC5_IB_CNTL 0x02f2
+#define mmSDMA1_RLC5_IB_CNTL_BASE_IDX 0
+#define mmSDMA1_RLC5_IB_RPTR 0x02f3
+#define mmSDMA1_RLC5_IB_RPTR_BASE_IDX 0
+#define mmSDMA1_RLC5_IB_OFFSET 0x02f4
+#define mmSDMA1_RLC5_IB_OFFSET_BASE_IDX 0
+#define mmSDMA1_RLC5_IB_BASE_LO 0x02f5
+#define mmSDMA1_RLC5_IB_BASE_LO_BASE_IDX 0
+#define mmSDMA1_RLC5_IB_BASE_HI 0x02f6
+#define mmSDMA1_RLC5_IB_BASE_HI_BASE_IDX 0
+#define mmSDMA1_RLC5_IB_SIZE 0x02f7
+#define mmSDMA1_RLC5_IB_SIZE_BASE_IDX 0
+#define mmSDMA1_RLC5_SKIP_CNTL 0x02f8
+#define mmSDMA1_RLC5_SKIP_CNTL_BASE_IDX 0
+#define mmSDMA1_RLC5_CONTEXT_STATUS 0x02f9
+#define mmSDMA1_RLC5_CONTEXT_STATUS_BASE_IDX 0
+#define mmSDMA1_RLC5_DOORBELL 0x02fa
+#define mmSDMA1_RLC5_DOORBELL_BASE_IDX 0
+#define mmSDMA1_RLC5_STATUS 0x0310
+#define mmSDMA1_RLC5_STATUS_BASE_IDX 0
+#define mmSDMA1_RLC5_DOORBELL_LOG 0x0311
+#define mmSDMA1_RLC5_DOORBELL_LOG_BASE_IDX 0
+#define mmSDMA1_RLC5_WATERMARK 0x0312
+#define mmSDMA1_RLC5_WATERMARK_BASE_IDX 0
+#define mmSDMA1_RLC5_DOORBELL_OFFSET 0x0313
+#define mmSDMA1_RLC5_DOORBELL_OFFSET_BASE_IDX 0
+#define mmSDMA1_RLC5_CSA_ADDR_LO 0x0314
+#define mmSDMA1_RLC5_CSA_ADDR_LO_BASE_IDX 0
+#define mmSDMA1_RLC5_CSA_ADDR_HI 0x0315
+#define mmSDMA1_RLC5_CSA_ADDR_HI_BASE_IDX 0
+#define mmSDMA1_RLC5_IB_SUB_REMAIN 0x0317
+#define mmSDMA1_RLC5_IB_SUB_REMAIN_BASE_IDX 0
+#define mmSDMA1_RLC5_PREEMPT 0x0318
+#define mmSDMA1_RLC5_PREEMPT_BASE_IDX 0
+#define mmSDMA1_RLC5_DUMMY_REG 0x0319
+#define mmSDMA1_RLC5_DUMMY_REG_BASE_IDX 0
+#define mmSDMA1_RLC5_RB_WPTR_POLL_ADDR_HI 0x031a
+#define mmSDMA1_RLC5_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define mmSDMA1_RLC5_RB_WPTR_POLL_ADDR_LO 0x031b
+#define mmSDMA1_RLC5_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define mmSDMA1_RLC5_RB_AQL_CNTL 0x031c
+#define mmSDMA1_RLC5_RB_AQL_CNTL_BASE_IDX 0
+#define mmSDMA1_RLC5_MINOR_PTR_UPDATE 0x031d
+#define mmSDMA1_RLC5_MINOR_PTR_UPDATE_BASE_IDX 0
+#define mmSDMA1_RLC5_MIDCMD_DATA0 0x0328
+#define mmSDMA1_RLC5_MIDCMD_DATA0_BASE_IDX 0
+#define mmSDMA1_RLC5_MIDCMD_DATA1 0x0329
+#define mmSDMA1_RLC5_MIDCMD_DATA1_BASE_IDX 0
+#define mmSDMA1_RLC5_MIDCMD_DATA2 0x032a
+#define mmSDMA1_RLC5_MIDCMD_DATA2_BASE_IDX 0
+#define mmSDMA1_RLC5_MIDCMD_DATA3 0x032b
+#define mmSDMA1_RLC5_MIDCMD_DATA3_BASE_IDX 0
+#define mmSDMA1_RLC5_MIDCMD_DATA4 0x032c
+#define mmSDMA1_RLC5_MIDCMD_DATA4_BASE_IDX 0
+#define mmSDMA1_RLC5_MIDCMD_DATA5 0x032d
+#define mmSDMA1_RLC5_MIDCMD_DATA5_BASE_IDX 0
+#define mmSDMA1_RLC5_MIDCMD_DATA6 0x032e
+#define mmSDMA1_RLC5_MIDCMD_DATA6_BASE_IDX 0
+#define mmSDMA1_RLC5_MIDCMD_DATA7 0x032f
+#define mmSDMA1_RLC5_MIDCMD_DATA7_BASE_IDX 0
+#define mmSDMA1_RLC5_MIDCMD_DATA8 0x0330
+#define mmSDMA1_RLC5_MIDCMD_DATA8_BASE_IDX 0
+#define mmSDMA1_RLC5_MIDCMD_CNTL 0x0331
+#define mmSDMA1_RLC5_MIDCMD_CNTL_BASE_IDX 0
+#define mmSDMA1_RLC6_RB_CNTL 0x0340
+#define mmSDMA1_RLC6_RB_CNTL_BASE_IDX 0
+#define mmSDMA1_RLC6_RB_BASE 0x0341
+#define mmSDMA1_RLC6_RB_BASE_BASE_IDX 0
+#define mmSDMA1_RLC6_RB_BASE_HI 0x0342
+#define mmSDMA1_RLC6_RB_BASE_HI_BASE_IDX 0
+#define mmSDMA1_RLC6_RB_RPTR 0x0343
+#define mmSDMA1_RLC6_RB_RPTR_BASE_IDX 0
+#define mmSDMA1_RLC6_RB_RPTR_HI 0x0344
+#define mmSDMA1_RLC6_RB_RPTR_HI_BASE_IDX 0
+#define mmSDMA1_RLC6_RB_WPTR 0x0345
+#define mmSDMA1_RLC6_RB_WPTR_BASE_IDX 0
+#define mmSDMA1_RLC6_RB_WPTR_HI 0x0346
+#define mmSDMA1_RLC6_RB_WPTR_HI_BASE_IDX 0
+#define mmSDMA1_RLC6_RB_WPTR_POLL_CNTL 0x0347
+#define mmSDMA1_RLC6_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define mmSDMA1_RLC6_RB_RPTR_ADDR_HI 0x0348
+#define mmSDMA1_RLC6_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define mmSDMA1_RLC6_RB_RPTR_ADDR_LO 0x0349
+#define mmSDMA1_RLC6_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define mmSDMA1_RLC6_IB_CNTL 0x034a
+#define mmSDMA1_RLC6_IB_CNTL_BASE_IDX 0
+#define mmSDMA1_RLC6_IB_RPTR 0x034b
+#define mmSDMA1_RLC6_IB_RPTR_BASE_IDX 0
+#define mmSDMA1_RLC6_IB_OFFSET 0x034c
+#define mmSDMA1_RLC6_IB_OFFSET_BASE_IDX 0
+#define mmSDMA1_RLC6_IB_BASE_LO 0x034d
+#define mmSDMA1_RLC6_IB_BASE_LO_BASE_IDX 0
+#define mmSDMA1_RLC6_IB_BASE_HI 0x034e
+#define mmSDMA1_RLC6_IB_BASE_HI_BASE_IDX 0
+#define mmSDMA1_RLC6_IB_SIZE 0x034f
+#define mmSDMA1_RLC6_IB_SIZE_BASE_IDX 0
+#define mmSDMA1_RLC6_SKIP_CNTL 0x0350
+#define mmSDMA1_RLC6_SKIP_CNTL_BASE_IDX 0
+#define mmSDMA1_RLC6_CONTEXT_STATUS 0x0351
+#define mmSDMA1_RLC6_CONTEXT_STATUS_BASE_IDX 0
+#define mmSDMA1_RLC6_DOORBELL 0x0352
+#define mmSDMA1_RLC6_DOORBELL_BASE_IDX 0
+#define mmSDMA1_RLC6_STATUS 0x0368
+#define mmSDMA1_RLC6_STATUS_BASE_IDX 0
+#define mmSDMA1_RLC6_DOORBELL_LOG 0x0369
+#define mmSDMA1_RLC6_DOORBELL_LOG_BASE_IDX 0
+#define mmSDMA1_RLC6_WATERMARK 0x036a
+#define mmSDMA1_RLC6_WATERMARK_BASE_IDX 0
+#define mmSDMA1_RLC6_DOORBELL_OFFSET 0x036b
+#define mmSDMA1_RLC6_DOORBELL_OFFSET_BASE_IDX 0
+#define mmSDMA1_RLC6_CSA_ADDR_LO 0x036c
+#define mmSDMA1_RLC6_CSA_ADDR_LO_BASE_IDX 0
+#define mmSDMA1_RLC6_CSA_ADDR_HI 0x036d
+#define mmSDMA1_RLC6_CSA_ADDR_HI_BASE_IDX 0
+#define mmSDMA1_RLC6_IB_SUB_REMAIN 0x036f
+#define mmSDMA1_RLC6_IB_SUB_REMAIN_BASE_IDX 0
+#define mmSDMA1_RLC6_PREEMPT 0x0370
+#define mmSDMA1_RLC6_PREEMPT_BASE_IDX 0
+#define mmSDMA1_RLC6_DUMMY_REG 0x0371
+#define mmSDMA1_RLC6_DUMMY_REG_BASE_IDX 0
+#define mmSDMA1_RLC6_RB_WPTR_POLL_ADDR_HI 0x0372
+#define mmSDMA1_RLC6_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define mmSDMA1_RLC6_RB_WPTR_POLL_ADDR_LO 0x0373
+#define mmSDMA1_RLC6_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define mmSDMA1_RLC6_RB_AQL_CNTL 0x0374
+#define mmSDMA1_RLC6_RB_AQL_CNTL_BASE_IDX 0
+#define mmSDMA1_RLC6_MINOR_PTR_UPDATE 0x0375
+#define mmSDMA1_RLC6_MINOR_PTR_UPDATE_BASE_IDX 0
+#define mmSDMA1_RLC6_MIDCMD_DATA0 0x0380
+#define mmSDMA1_RLC6_MIDCMD_DATA0_BASE_IDX 0
+#define mmSDMA1_RLC6_MIDCMD_DATA1 0x0381
+#define mmSDMA1_RLC6_MIDCMD_DATA1_BASE_IDX 0
+#define mmSDMA1_RLC6_MIDCMD_DATA2 0x0382
+#define mmSDMA1_RLC6_MIDCMD_DATA2_BASE_IDX 0
+#define mmSDMA1_RLC6_MIDCMD_DATA3 0x0383
+#define mmSDMA1_RLC6_MIDCMD_DATA3_BASE_IDX 0
+#define mmSDMA1_RLC6_MIDCMD_DATA4 0x0384
+#define mmSDMA1_RLC6_MIDCMD_DATA4_BASE_IDX 0
+#define mmSDMA1_RLC6_MIDCMD_DATA5 0x0385
+#define mmSDMA1_RLC6_MIDCMD_DATA5_BASE_IDX 0
+#define mmSDMA1_RLC6_MIDCMD_DATA6 0x0386
+#define mmSDMA1_RLC6_MIDCMD_DATA6_BASE_IDX 0
+#define mmSDMA1_RLC6_MIDCMD_DATA7 0x0387
+#define mmSDMA1_RLC6_MIDCMD_DATA7_BASE_IDX 0
+#define mmSDMA1_RLC6_MIDCMD_DATA8 0x0388
+#define mmSDMA1_RLC6_MIDCMD_DATA8_BASE_IDX 0
+#define mmSDMA1_RLC6_MIDCMD_CNTL 0x0389
+#define mmSDMA1_RLC6_MIDCMD_CNTL_BASE_IDX 0
+#define mmSDMA1_RLC7_RB_CNTL 0x0398
+#define mmSDMA1_RLC7_RB_CNTL_BASE_IDX 0
+#define mmSDMA1_RLC7_RB_BASE 0x0399
+#define mmSDMA1_RLC7_RB_BASE_BASE_IDX 0
+#define mmSDMA1_RLC7_RB_BASE_HI 0x039a
+#define mmSDMA1_RLC7_RB_BASE_HI_BASE_IDX 0
+#define mmSDMA1_RLC7_RB_RPTR 0x039b
+#define mmSDMA1_RLC7_RB_RPTR_BASE_IDX 0
+#define mmSDMA1_RLC7_RB_RPTR_HI 0x039c
+#define mmSDMA1_RLC7_RB_RPTR_HI_BASE_IDX 0
+#define mmSDMA1_RLC7_RB_WPTR 0x039d
+#define mmSDMA1_RLC7_RB_WPTR_BASE_IDX 0
+#define mmSDMA1_RLC7_RB_WPTR_HI 0x039e
+#define mmSDMA1_RLC7_RB_WPTR_HI_BASE_IDX 0
+#define mmSDMA1_RLC7_RB_WPTR_POLL_CNTL 0x039f
+#define mmSDMA1_RLC7_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define mmSDMA1_RLC7_RB_RPTR_ADDR_HI 0x03a0
+#define mmSDMA1_RLC7_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define mmSDMA1_RLC7_RB_RPTR_ADDR_LO 0x03a1
+#define mmSDMA1_RLC7_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define mmSDMA1_RLC7_IB_CNTL 0x03a2
+#define mmSDMA1_RLC7_IB_CNTL_BASE_IDX 0
+#define mmSDMA1_RLC7_IB_RPTR 0x03a3
+#define mmSDMA1_RLC7_IB_RPTR_BASE_IDX 0
+#define mmSDMA1_RLC7_IB_OFFSET 0x03a4
+#define mmSDMA1_RLC7_IB_OFFSET_BASE_IDX 0
+#define mmSDMA1_RLC7_IB_BASE_LO 0x03a5
+#define mmSDMA1_RLC7_IB_BASE_LO_BASE_IDX 0
+#define mmSDMA1_RLC7_IB_BASE_HI 0x03a6
+#define mmSDMA1_RLC7_IB_BASE_HI_BASE_IDX 0
+#define mmSDMA1_RLC7_IB_SIZE 0x03a7
+#define mmSDMA1_RLC7_IB_SIZE_BASE_IDX 0
+#define mmSDMA1_RLC7_SKIP_CNTL 0x03a8
+#define mmSDMA1_RLC7_SKIP_CNTL_BASE_IDX 0
+#define mmSDMA1_RLC7_CONTEXT_STATUS 0x03a9
+#define mmSDMA1_RLC7_CONTEXT_STATUS_BASE_IDX 0
+#define mmSDMA1_RLC7_DOORBELL 0x03aa
+#define mmSDMA1_RLC7_DOORBELL_BASE_IDX 0
+#define mmSDMA1_RLC7_STATUS 0x03c0
+#define mmSDMA1_RLC7_STATUS_BASE_IDX 0
+#define mmSDMA1_RLC7_DOORBELL_LOG 0x03c1
+#define mmSDMA1_RLC7_DOORBELL_LOG_BASE_IDX 0
+#define mmSDMA1_RLC7_WATERMARK 0x03c2
+#define mmSDMA1_RLC7_WATERMARK_BASE_IDX 0
+#define mmSDMA1_RLC7_DOORBELL_OFFSET 0x03c3
+#define mmSDMA1_RLC7_DOORBELL_OFFSET_BASE_IDX 0
+#define mmSDMA1_RLC7_CSA_ADDR_LO 0x03c4
+#define mmSDMA1_RLC7_CSA_ADDR_LO_BASE_IDX 0
+#define mmSDMA1_RLC7_CSA_ADDR_HI 0x03c5
+#define mmSDMA1_RLC7_CSA_ADDR_HI_BASE_IDX 0
+#define mmSDMA1_RLC7_IB_SUB_REMAIN 0x03c7
+#define mmSDMA1_RLC7_IB_SUB_REMAIN_BASE_IDX 0
+#define mmSDMA1_RLC7_PREEMPT 0x03c8
+#define mmSDMA1_RLC7_PREEMPT_BASE_IDX 0
+#define mmSDMA1_RLC7_DUMMY_REG 0x03c9
+#define mmSDMA1_RLC7_DUMMY_REG_BASE_IDX 0
+#define mmSDMA1_RLC7_RB_WPTR_POLL_ADDR_HI 0x03ca
+#define mmSDMA1_RLC7_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define mmSDMA1_RLC7_RB_WPTR_POLL_ADDR_LO 0x03cb
+#define mmSDMA1_RLC7_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define mmSDMA1_RLC7_RB_AQL_CNTL 0x03cc
+#define mmSDMA1_RLC7_RB_AQL_CNTL_BASE_IDX 0
+#define mmSDMA1_RLC7_MINOR_PTR_UPDATE 0x03cd
+#define mmSDMA1_RLC7_MINOR_PTR_UPDATE_BASE_IDX 0
+#define mmSDMA1_RLC7_MIDCMD_DATA0 0x03d8
+#define mmSDMA1_RLC7_MIDCMD_DATA0_BASE_IDX 0
+#define mmSDMA1_RLC7_MIDCMD_DATA1 0x03d9
+#define mmSDMA1_RLC7_MIDCMD_DATA1_BASE_IDX 0
+#define mmSDMA1_RLC7_MIDCMD_DATA2 0x03da
+#define mmSDMA1_RLC7_MIDCMD_DATA2_BASE_IDX 0
+#define mmSDMA1_RLC7_MIDCMD_DATA3 0x03db
+#define mmSDMA1_RLC7_MIDCMD_DATA3_BASE_IDX 0
+#define mmSDMA1_RLC7_MIDCMD_DATA4 0x03dc
+#define mmSDMA1_RLC7_MIDCMD_DATA4_BASE_IDX 0
+#define mmSDMA1_RLC7_MIDCMD_DATA5 0x03dd
+#define mmSDMA1_RLC7_MIDCMD_DATA5_BASE_IDX 0
+#define mmSDMA1_RLC7_MIDCMD_DATA6 0x03de
+#define mmSDMA1_RLC7_MIDCMD_DATA6_BASE_IDX 0
+#define mmSDMA1_RLC7_MIDCMD_DATA7 0x03df
+#define mmSDMA1_RLC7_MIDCMD_DATA7_BASE_IDX 0
+#define mmSDMA1_RLC7_MIDCMD_DATA8 0x03e0
+#define mmSDMA1_RLC7_MIDCMD_DATA8_BASE_IDX 0
+#define mmSDMA1_RLC7_MIDCMD_CNTL 0x03e1
+#define mmSDMA1_RLC7_MIDCMD_CNTL_BASE_IDX 0
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/sdma1/sdma1_4_2_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/sdma1/sdma1_4_2_2_sh_mask.h
new file mode 100644
index 000000000000..ac2468e6bc46
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/sdma1/sdma1_4_2_2_sh_mask.h
@@ -0,0 +1,2956 @@
+/*
+ * Copyright (C) 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _sdma1_4_2_2_SH_MASK_HEADER
+#define _sdma1_4_2_2_SH_MASK_HEADER
+
+
+// addressBlock: sdma1_sdma1dec
+//SDMA1_UCODE_ADDR
+#define SDMA1_UCODE_ADDR__VALUE__SHIFT 0x0
+#define SDMA1_UCODE_ADDR__VALUE_MASK 0x00001FFFL
+//SDMA1_UCODE_DATA
+#define SDMA1_UCODE_DATA__VALUE__SHIFT 0x0
+#define SDMA1_UCODE_DATA__VALUE_MASK 0xFFFFFFFFL
+//SDMA1_VM_CNTL
+#define SDMA1_VM_CNTL__CMD__SHIFT 0x0
+#define SDMA1_VM_CNTL__CMD_MASK 0x0000000FL
+//SDMA1_VM_CTX_LO
+#define SDMA1_VM_CTX_LO__ADDR__SHIFT 0x2
+#define SDMA1_VM_CTX_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_VM_CTX_HI
+#define SDMA1_VM_CTX_HI__ADDR__SHIFT 0x0
+#define SDMA1_VM_CTX_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_ACTIVE_FCN_ID
+#define SDMA1_ACTIVE_FCN_ID__VFID__SHIFT 0x0
+#define SDMA1_ACTIVE_FCN_ID__RESERVED__SHIFT 0x4
+#define SDMA1_ACTIVE_FCN_ID__VF__SHIFT 0x1f
+#define SDMA1_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL
+#define SDMA1_ACTIVE_FCN_ID__RESERVED_MASK 0x7FFFFFF0L
+#define SDMA1_ACTIVE_FCN_ID__VF_MASK 0x80000000L
+//SDMA1_VM_CTX_CNTL
+#define SDMA1_VM_CTX_CNTL__PRIV__SHIFT 0x0
+#define SDMA1_VM_CTX_CNTL__VMID__SHIFT 0x4
+#define SDMA1_VM_CTX_CNTL__PRIV_MASK 0x00000001L
+#define SDMA1_VM_CTX_CNTL__VMID_MASK 0x000000F0L
+//SDMA1_VIRT_RESET_REQ
+#define SDMA1_VIRT_RESET_REQ__VF__SHIFT 0x0
+#define SDMA1_VIRT_RESET_REQ__PF__SHIFT 0x1f
+#define SDMA1_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL
+#define SDMA1_VIRT_RESET_REQ__PF_MASK 0x80000000L
+//SDMA1_VF_ENABLE
+#define SDMA1_VF_ENABLE__VF_ENABLE__SHIFT 0x0
+#define SDMA1_VF_ENABLE__VF_ENABLE_MASK 0x00000001L
+//SDMA1_CONTEXT_REG_TYPE0
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_CNTL__SHIFT 0x0
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_BASE__SHIFT 0x1
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_BASE_HI__SHIFT 0x2
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR__SHIFT 0x3
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR_HI__SHIFT 0x4
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_WPTR__SHIFT 0x5
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_WPTR_HI__SHIFT 0x6
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_WPTR_POLL_CNTL__SHIFT 0x7
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR_ADDR_HI__SHIFT 0x8
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR_ADDR_LO__SHIFT 0x9
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_CNTL__SHIFT 0xa
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_RPTR__SHIFT 0xb
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_OFFSET__SHIFT 0xc
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_BASE_LO__SHIFT 0xd
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_BASE_HI__SHIFT 0xe
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_SIZE__SHIFT 0xf
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_SKIP_CNTL__SHIFT 0x10
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_CONTEXT_STATUS__SHIFT 0x11
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_DOORBELL__SHIFT 0x12
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_CONTEXT_CNTL__SHIFT 0x13
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_CNTL_MASK 0x00000001L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_BASE_MASK 0x00000002L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_BASE_HI_MASK 0x00000004L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR_MASK 0x00000008L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR_HI_MASK 0x00000010L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_WPTR_MASK 0x00000020L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_WPTR_HI_MASK 0x00000040L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_WPTR_POLL_CNTL_MASK 0x00000080L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR_ADDR_HI_MASK 0x00000100L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR_ADDR_LO_MASK 0x00000200L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_CNTL_MASK 0x00000400L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_RPTR_MASK 0x00000800L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_OFFSET_MASK 0x00001000L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_BASE_LO_MASK 0x00002000L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_BASE_HI_MASK 0x00004000L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_SIZE_MASK 0x00008000L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_SKIP_CNTL_MASK 0x00010000L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_CONTEXT_STATUS_MASK 0x00020000L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_DOORBELL_MASK 0x00040000L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_CONTEXT_CNTL_MASK 0x00080000L
+//SDMA1_CONTEXT_REG_TYPE1
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_STATUS__SHIFT 0x8
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_DOORBELL_LOG__SHIFT 0x9
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_WATERMARK__SHIFT 0xa
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_DOORBELL_OFFSET__SHIFT 0xb
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_CSA_ADDR_LO__SHIFT 0xc
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_CSA_ADDR_HI__SHIFT 0xd
+#define SDMA1_CONTEXT_REG_TYPE1__VOID_REG2__SHIFT 0xe
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_IB_SUB_REMAIN__SHIFT 0xf
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_PREEMPT__SHIFT 0x10
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_DUMMY_REG__SHIFT 0x11
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_RB_WPTR_POLL_ADDR_HI__SHIFT 0x12
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_RB_WPTR_POLL_ADDR_LO__SHIFT 0x13
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_RB_AQL_CNTL__SHIFT 0x14
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_MINOR_PTR_UPDATE__SHIFT 0x15
+#define SDMA1_CONTEXT_REG_TYPE1__RESERVED__SHIFT 0x16
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_STATUS_MASK 0x00000100L
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_DOORBELL_LOG_MASK 0x00000200L
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_WATERMARK_MASK 0x00000400L
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_DOORBELL_OFFSET_MASK 0x00000800L
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_CSA_ADDR_LO_MASK 0x00001000L
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_CSA_ADDR_HI_MASK 0x00002000L
+#define SDMA1_CONTEXT_REG_TYPE1__VOID_REG2_MASK 0x00004000L
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_IB_SUB_REMAIN_MASK 0x00008000L
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_PREEMPT_MASK 0x00010000L
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_DUMMY_REG_MASK 0x00020000L
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_RB_WPTR_POLL_ADDR_HI_MASK 0x00040000L
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_RB_WPTR_POLL_ADDR_LO_MASK 0x00080000L
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_RB_AQL_CNTL_MASK 0x00100000L
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_MINOR_PTR_UPDATE_MASK 0x00200000L
+#define SDMA1_CONTEXT_REG_TYPE1__RESERVED_MASK 0xFFC00000L
+//SDMA1_CONTEXT_REG_TYPE2
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA0__SHIFT 0x0
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA1__SHIFT 0x1
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA2__SHIFT 0x2
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA3__SHIFT 0x3
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA4__SHIFT 0x4
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA5__SHIFT 0x5
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA6__SHIFT 0x6
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA7__SHIFT 0x7
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA8__SHIFT 0x8
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_CNTL__SHIFT 0x9
+#define SDMA1_CONTEXT_REG_TYPE2__RESERVED__SHIFT 0xa
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA0_MASK 0x00000001L
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA1_MASK 0x00000002L
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA2_MASK 0x00000004L
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA3_MASK 0x00000008L
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA4_MASK 0x00000010L
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA5_MASK 0x00000020L
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA6_MASK 0x00000040L
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA7_MASK 0x00000080L
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA8_MASK 0x00000100L
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_CNTL_MASK 0x00000200L
+#define SDMA1_CONTEXT_REG_TYPE2__RESERVED_MASK 0xFFFFFC00L
+//SDMA1_CONTEXT_REG_TYPE3
+#define SDMA1_CONTEXT_REG_TYPE3__RESERVED__SHIFT 0x0
+#define SDMA1_CONTEXT_REG_TYPE3__RESERVED_MASK 0xFFFFFFFFL
+//SDMA1_PUB_REG_TYPE0
+#define SDMA1_PUB_REG_TYPE0__SDMA1_UCODE_ADDR__SHIFT 0x0
+#define SDMA1_PUB_REG_TYPE0__SDMA1_UCODE_DATA__SHIFT 0x1
+#define SDMA1_PUB_REG_TYPE0__RESERVED3__SHIFT 0x3
+#define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CNTL__SHIFT 0x4
+#define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CTX_LO__SHIFT 0x5
+#define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CTX_HI__SHIFT 0x6
+#define SDMA1_PUB_REG_TYPE0__SDMA1_ACTIVE_FCN_ID__SHIFT 0x7
+#define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CTX_CNTL__SHIFT 0x8
+#define SDMA1_PUB_REG_TYPE0__SDMA1_VIRT_RESET_REQ__SHIFT 0x9
+#define SDMA1_PUB_REG_TYPE0__RESERVED10__SHIFT 0xa
+#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE0__SHIFT 0xb
+#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE1__SHIFT 0xc
+#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE2__SHIFT 0xd
+#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE3__SHIFT 0xe
+#define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE0__SHIFT 0xf
+#define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE1__SHIFT 0x10
+#define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE2__SHIFT 0x11
+#define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE3__SHIFT 0x12
+#define SDMA1_PUB_REG_TYPE0__SDMA1_MMHUB_CNTL__SHIFT 0x13
+#define SDMA1_PUB_REG_TYPE0__RESERVED_FOR_PSPSMU_ACCESS_ONLY__SHIFT 0x15
+#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_GROUP_BOUNDARY__SHIFT 0x19
+#define SDMA1_PUB_REG_TYPE0__SDMA1_POWER_CNTL__SHIFT 0x1a
+#define SDMA1_PUB_REG_TYPE0__SDMA1_CLK_CTRL__SHIFT 0x1b
+#define SDMA1_PUB_REG_TYPE0__SDMA1_CNTL__SHIFT 0x1c
+#define SDMA1_PUB_REG_TYPE0__SDMA1_CHICKEN_BITS__SHIFT 0x1d
+#define SDMA1_PUB_REG_TYPE0__SDMA1_GB_ADDR_CONFIG__SHIFT 0x1e
+#define SDMA1_PUB_REG_TYPE0__SDMA1_GB_ADDR_CONFIG_READ__SHIFT 0x1f
+#define SDMA1_PUB_REG_TYPE0__SDMA1_UCODE_ADDR_MASK 0x00000001L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_UCODE_DATA_MASK 0x00000002L
+#define SDMA1_PUB_REG_TYPE0__RESERVED3_MASK 0x00000008L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CNTL_MASK 0x00000010L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CTX_LO_MASK 0x00000020L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CTX_HI_MASK 0x00000040L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_ACTIVE_FCN_ID_MASK 0x00000080L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CTX_CNTL_MASK 0x00000100L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_VIRT_RESET_REQ_MASK 0x00000200L
+#define SDMA1_PUB_REG_TYPE0__RESERVED10_MASK 0x00000400L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE0_MASK 0x00000800L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE1_MASK 0x00001000L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE2_MASK 0x00002000L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE3_MASK 0x00004000L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE0_MASK 0x00008000L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE1_MASK 0x00010000L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE2_MASK 0x00020000L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE3_MASK 0x00040000L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_MMHUB_CNTL_MASK 0x00080000L
+#define SDMA1_PUB_REG_TYPE0__RESERVED_FOR_PSPSMU_ACCESS_ONLY_MASK 0x01E00000L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_GROUP_BOUNDARY_MASK 0x02000000L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_POWER_CNTL_MASK 0x04000000L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_CLK_CTRL_MASK 0x08000000L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_CNTL_MASK 0x10000000L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_CHICKEN_BITS_MASK 0x20000000L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_GB_ADDR_CONFIG_MASK 0x40000000L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_GB_ADDR_CONFIG_READ_MASK 0x80000000L
+//SDMA1_PUB_REG_TYPE1
+#define SDMA1_PUB_REG_TYPE1__SDMA1_RB_RPTR_FETCH_HI__SHIFT 0x0
+#define SDMA1_PUB_REG_TYPE1__SDMA1_SEM_WAIT_FAIL_TIMER_CNTL__SHIFT 0x1
+#define SDMA1_PUB_REG_TYPE1__SDMA1_RB_RPTR_FETCH__SHIFT 0x2
+#define SDMA1_PUB_REG_TYPE1__SDMA1_IB_OFFSET_FETCH__SHIFT 0x3
+#define SDMA1_PUB_REG_TYPE1__SDMA1_PROGRAM__SHIFT 0x4
+#define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS_REG__SHIFT 0x5
+#define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS1_REG__SHIFT 0x6
+#define SDMA1_PUB_REG_TYPE1__SDMA1_RD_BURST_CNTL__SHIFT 0x7
+#define SDMA1_PUB_REG_TYPE1__SDMA1_HBM_PAGE_CONFIG__SHIFT 0x8
+#define SDMA1_PUB_REG_TYPE1__SDMA1_UCODE_CHECKSUM__SHIFT 0x9
+#define SDMA1_PUB_REG_TYPE1__SDMA1_F32_CNTL__SHIFT 0xa
+#define SDMA1_PUB_REG_TYPE1__SDMA1_FREEZE__SHIFT 0xb
+#define SDMA1_PUB_REG_TYPE1__SDMA1_PHASE0_QUANTUM__SHIFT 0xc
+#define SDMA1_PUB_REG_TYPE1__SDMA1_PHASE1_QUANTUM__SHIFT 0xd
+#define SDMA1_PUB_REG_TYPE1__SDMA_POWER_GATING__SHIFT 0xe
+#define SDMA1_PUB_REG_TYPE1__SDMA_PGFSM_CONFIG__SHIFT 0xf
+#define SDMA1_PUB_REG_TYPE1__SDMA_PGFSM_WRITE__SHIFT 0x10
+#define SDMA1_PUB_REG_TYPE1__SDMA_PGFSM_READ__SHIFT 0x11
+#define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_CONFIG__SHIFT 0x12
+#define SDMA1_PUB_REG_TYPE1__SDMA1_BA_THRESHOLD__SHIFT 0x13
+#define SDMA1_PUB_REG_TYPE1__SDMA1_ID__SHIFT 0x14
+#define SDMA1_PUB_REG_TYPE1__SDMA1_VERSION__SHIFT 0x15
+#define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_COUNTER__SHIFT 0x16
+#define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_COUNTER_CLEAR__SHIFT 0x17
+#define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS2_REG__SHIFT 0x18
+#define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_CNTL__SHIFT 0x19
+#define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_PREOP_LO__SHIFT 0x1a
+#define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_PREOP_HI__SHIFT 0x1b
+#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_CNTL__SHIFT 0x1c
+#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_WATERMK__SHIFT 0x1d
+#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_RD_STATUS__SHIFT 0x1e
+#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_WR_STATUS__SHIFT 0x1f
+#define SDMA1_PUB_REG_TYPE1__SDMA1_RB_RPTR_FETCH_HI_MASK 0x00000001L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_SEM_WAIT_FAIL_TIMER_CNTL_MASK 0x00000002L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_RB_RPTR_FETCH_MASK 0x00000004L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_IB_OFFSET_FETCH_MASK 0x00000008L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_PROGRAM_MASK 0x00000010L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS_REG_MASK 0x00000020L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS1_REG_MASK 0x00000040L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_RD_BURST_CNTL_MASK 0x00000080L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_HBM_PAGE_CONFIG_MASK 0x00000100L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_UCODE_CHECKSUM_MASK 0x00000200L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_F32_CNTL_MASK 0x00000400L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_FREEZE_MASK 0x00000800L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_PHASE0_QUANTUM_MASK 0x00001000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_PHASE1_QUANTUM_MASK 0x00002000L
+#define SDMA1_PUB_REG_TYPE1__SDMA_POWER_GATING_MASK 0x00004000L
+#define SDMA1_PUB_REG_TYPE1__SDMA_PGFSM_CONFIG_MASK 0x00008000L
+#define SDMA1_PUB_REG_TYPE1__SDMA_PGFSM_WRITE_MASK 0x00010000L
+#define SDMA1_PUB_REG_TYPE1__SDMA_PGFSM_READ_MASK 0x00020000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_CONFIG_MASK 0x00040000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_BA_THRESHOLD_MASK 0x00080000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_ID_MASK 0x00100000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_VERSION_MASK 0x00200000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_COUNTER_MASK 0x00400000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_COUNTER_CLEAR_MASK 0x00800000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS2_REG_MASK 0x01000000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_CNTL_MASK 0x02000000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_PREOP_LO_MASK 0x04000000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_PREOP_HI_MASK 0x08000000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_CNTL_MASK 0x10000000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_WATERMK_MASK 0x20000000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_RD_STATUS_MASK 0x40000000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_WR_STATUS_MASK 0x80000000L
+//SDMA1_PUB_REG_TYPE2
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV0__SHIFT 0x0
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV1__SHIFT 0x1
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV2__SHIFT 0x2
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_RD_XNACK0__SHIFT 0x3
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_RD_XNACK1__SHIFT 0x4
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_WR_XNACK0__SHIFT 0x5
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_WR_XNACK1__SHIFT 0x6
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_TIMEOUT__SHIFT 0x7
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_PAGE__SHIFT 0x8
+#define SDMA1_PUB_REG_TYPE2__SDMA1_POWER_CNTL_IDLE__SHIFT 0x9
+#define SDMA1_PUB_REG_TYPE2__SDMA1_RELAX_ORDERING_LUT__SHIFT 0xa
+#define SDMA1_PUB_REG_TYPE2__SDMA1_CHICKEN_BITS_2__SHIFT 0xb
+#define SDMA1_PUB_REG_TYPE2__SDMA1_STATUS3_REG__SHIFT 0xc
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PHYSICAL_ADDR_LO__SHIFT 0xd
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PHYSICAL_ADDR_HI__SHIFT 0xe
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PHASE2_QUANTUM__SHIFT 0xf
+#define SDMA1_PUB_REG_TYPE2__SDMA1_ERROR_LOG__SHIFT 0x10
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG0__SHIFT 0x11
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG1__SHIFT 0x12
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG2__SHIFT 0x13
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG3__SHIFT 0x14
+#define SDMA1_PUB_REG_TYPE2__SDMA1_F32_COUNTER__SHIFT 0x15
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UNBREAKABLE__SHIFT 0x16
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PERFMON_CNTL__SHIFT 0x17
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PERFCOUNTER0_RESULT__SHIFT 0x18
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PERFCOUNTER1_RESULT__SHIFT 0x19
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PERFCOUNTER_TAG_DELAY_RANGE__SHIFT 0x1a
+#define SDMA1_PUB_REG_TYPE2__SDMA1_CRD_CNTL__SHIFT 0x1b
+#define SDMA1_PUB_REG_TYPE2__RESERVED28__SHIFT 0x1c
+#define SDMA1_PUB_REG_TYPE2__SDMA1_GPU_IOV_VIOLATION_LOG__SHIFT 0x1d
+#define SDMA1_PUB_REG_TYPE2__SDMA1_ULV_CNTL__SHIFT 0x1e
+#define SDMA1_PUB_REG_TYPE2__RESERVED__SHIFT 0x1f
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV0_MASK 0x00000001L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV1_MASK 0x00000002L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV2_MASK 0x00000004L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_RD_XNACK0_MASK 0x00000008L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_RD_XNACK1_MASK 0x00000010L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_WR_XNACK0_MASK 0x00000020L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_WR_XNACK1_MASK 0x00000040L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_TIMEOUT_MASK 0x00000080L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_PAGE_MASK 0x00000100L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_POWER_CNTL_IDLE_MASK 0x00000200L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_RELAX_ORDERING_LUT_MASK 0x00000400L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_CHICKEN_BITS_2_MASK 0x00000800L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_STATUS3_REG_MASK 0x00001000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PHYSICAL_ADDR_LO_MASK 0x00002000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PHYSICAL_ADDR_HI_MASK 0x00004000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PHASE2_QUANTUM_MASK 0x00008000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_ERROR_LOG_MASK 0x00010000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG0_MASK 0x00020000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG1_MASK 0x00040000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG2_MASK 0x00080000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG3_MASK 0x00100000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_F32_COUNTER_MASK 0x00200000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UNBREAKABLE_MASK 0x00400000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PERFMON_CNTL_MASK 0x00800000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PERFCOUNTER0_RESULT_MASK 0x01000000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PERFCOUNTER1_RESULT_MASK 0x02000000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PERFCOUNTER_TAG_DELAY_RANGE_MASK 0x04000000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_CRD_CNTL_MASK 0x08000000L
+#define SDMA1_PUB_REG_TYPE2__RESERVED28_MASK 0x10000000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_GPU_IOV_VIOLATION_LOG_MASK 0x20000000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_ULV_CNTL_MASK 0x40000000L
+#define SDMA1_PUB_REG_TYPE2__RESERVED_MASK 0x80000000L
+//SDMA1_PUB_REG_TYPE3
+#define SDMA1_PUB_REG_TYPE3__SDMA1_EA_DBIT_ADDR_DATA__SHIFT 0x0
+#define SDMA1_PUB_REG_TYPE3__SDMA1_EA_DBIT_ADDR_INDEX__SHIFT 0x1
+#define SDMA1_PUB_REG_TYPE3__SDMA1_GPU_IOV_VIOLATION_LOG2__SHIFT 0x2
+#define SDMA1_PUB_REG_TYPE3__RESERVED__SHIFT 0x3
+#define SDMA1_PUB_REG_TYPE3__SDMA1_EA_DBIT_ADDR_DATA_MASK 0x00000001L
+#define SDMA1_PUB_REG_TYPE3__SDMA1_EA_DBIT_ADDR_INDEX_MASK 0x00000002L
+#define SDMA1_PUB_REG_TYPE3__SDMA1_GPU_IOV_VIOLATION_LOG2_MASK 0x00000004L
+#define SDMA1_PUB_REG_TYPE3__RESERVED_MASK 0xFFFFFFF8L
+//SDMA1_MMHUB_CNTL
+#define SDMA1_MMHUB_CNTL__UNIT_ID__SHIFT 0x0
+#define SDMA1_MMHUB_CNTL__UNIT_ID_MASK 0x0000003FL
+//SDMA1_CONTEXT_GROUP_BOUNDARY
+#define SDMA1_CONTEXT_GROUP_BOUNDARY__RESERVED__SHIFT 0x0
+#define SDMA1_CONTEXT_GROUP_BOUNDARY__RESERVED_MASK 0xFFFFFFFFL
+//SDMA1_POWER_CNTL
+#define SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE__SHIFT 0x8
+#define SDMA1_POWER_CNTL__MEM_POWER_LS_EN__SHIFT 0x9
+#define SDMA1_POWER_CNTL__MEM_POWER_DS_EN__SHIFT 0xa
+#define SDMA1_POWER_CNTL__MEM_POWER_SD_EN__SHIFT 0xb
+#define SDMA1_POWER_CNTL__MEM_POWER_DELAY__SHIFT 0xc
+#define SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE_MASK 0x00000100L
+#define SDMA1_POWER_CNTL__MEM_POWER_LS_EN_MASK 0x00000200L
+#define SDMA1_POWER_CNTL__MEM_POWER_DS_EN_MASK 0x00000400L
+#define SDMA1_POWER_CNTL__MEM_POWER_SD_EN_MASK 0x00000800L
+#define SDMA1_POWER_CNTL__MEM_POWER_DELAY_MASK 0x003FF000L
+//SDMA1_CLK_CTRL
+#define SDMA1_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define SDMA1_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define SDMA1_CLK_CTRL__RESERVED__SHIFT 0xc
+#define SDMA1_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define SDMA1_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define SDMA1_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define SDMA1_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define SDMA1_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define SDMA1_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define SDMA1_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define SDMA1_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define SDMA1_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define SDMA1_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define SDMA1_CLK_CTRL__RESERVED_MASK 0x00FFF000L
+#define SDMA1_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define SDMA1_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define SDMA1_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define SDMA1_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define SDMA1_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define SDMA1_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define SDMA1_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define SDMA1_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//SDMA1_CNTL
+#define SDMA1_CNTL__TRAP_ENABLE__SHIFT 0x0
+#define SDMA1_CNTL__UTC_L1_ENABLE__SHIFT 0x1
+#define SDMA1_CNTL__SEM_WAIT_INT_ENABLE__SHIFT 0x2
+#define SDMA1_CNTL__DATA_SWAP_ENABLE__SHIFT 0x3
+#define SDMA1_CNTL__FENCE_SWAP_ENABLE__SHIFT 0x4
+#define SDMA1_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x5
+#define SDMA1_CNTL__MIDCMD_WORLDSWITCH_ENABLE__SHIFT 0x11
+#define SDMA1_CNTL__AUTO_CTXSW_ENABLE__SHIFT 0x12
+#define SDMA1_CNTL__CTXEMPTY_INT_ENABLE__SHIFT 0x1c
+#define SDMA1_CNTL__FROZEN_INT_ENABLE__SHIFT 0x1d
+#define SDMA1_CNTL__IB_PREEMPT_INT_ENABLE__SHIFT 0x1e
+#define SDMA1_CNTL__TRAP_ENABLE_MASK 0x00000001L
+#define SDMA1_CNTL__UTC_L1_ENABLE_MASK 0x00000002L
+#define SDMA1_CNTL__SEM_WAIT_INT_ENABLE_MASK 0x00000004L
+#define SDMA1_CNTL__DATA_SWAP_ENABLE_MASK 0x00000008L
+#define SDMA1_CNTL__FENCE_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA1_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00000020L
+#define SDMA1_CNTL__MIDCMD_WORLDSWITCH_ENABLE_MASK 0x00020000L
+#define SDMA1_CNTL__AUTO_CTXSW_ENABLE_MASK 0x00040000L
+#define SDMA1_CNTL__CTXEMPTY_INT_ENABLE_MASK 0x10000000L
+#define SDMA1_CNTL__FROZEN_INT_ENABLE_MASK 0x20000000L
+#define SDMA1_CNTL__IB_PREEMPT_INT_ENABLE_MASK 0x40000000L
+//SDMA1_CHICKEN_BITS
+#define SDMA1_CHICKEN_BITS__COPY_EFFICIENCY_ENABLE__SHIFT 0x0
+#define SDMA1_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE__SHIFT 0x1
+#define SDMA1_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE__SHIFT 0x2
+#define SDMA1_CHICKEN_BITS__WRITE_BURST_LENGTH__SHIFT 0x8
+#define SDMA1_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE__SHIFT 0xa
+#define SDMA1_CHICKEN_BITS__COPY_OVERLAP_ENABLE__SHIFT 0x10
+#define SDMA1_CHICKEN_BITS__RAW_CHECK_ENABLE__SHIFT 0x11
+#define SDMA1_CHICKEN_BITS__SRBM_POLL_RETRYING__SHIFT 0x14
+#define SDMA1_CHICKEN_BITS__CG_STATUS_OUTPUT__SHIFT 0x17
+#define SDMA1_CHICKEN_BITS__TIME_BASED_QOS__SHIFT 0x19
+#define SDMA1_CHICKEN_BITS__CE_AFIFO_WATERMARK__SHIFT 0x1a
+#define SDMA1_CHICKEN_BITS__CE_DFIFO_WATERMARK__SHIFT 0x1c
+#define SDMA1_CHICKEN_BITS__CE_LFIFO_WATERMARK__SHIFT 0x1e
+#define SDMA1_CHICKEN_BITS__COPY_EFFICIENCY_ENABLE_MASK 0x00000001L
+#define SDMA1_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE_MASK 0x00000002L
+#define SDMA1_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE_MASK 0x00000004L
+#define SDMA1_CHICKEN_BITS__WRITE_BURST_LENGTH_MASK 0x00000300L
+#define SDMA1_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE_MASK 0x00001C00L
+#define SDMA1_CHICKEN_BITS__COPY_OVERLAP_ENABLE_MASK 0x00010000L
+#define SDMA1_CHICKEN_BITS__RAW_CHECK_ENABLE_MASK 0x00020000L
+#define SDMA1_CHICKEN_BITS__SRBM_POLL_RETRYING_MASK 0x00100000L
+#define SDMA1_CHICKEN_BITS__CG_STATUS_OUTPUT_MASK 0x00800000L
+#define SDMA1_CHICKEN_BITS__TIME_BASED_QOS_MASK 0x02000000L
+#define SDMA1_CHICKEN_BITS__CE_AFIFO_WATERMARK_MASK 0x0C000000L
+#define SDMA1_CHICKEN_BITS__CE_DFIFO_WATERMARK_MASK 0x30000000L
+#define SDMA1_CHICKEN_BITS__CE_LFIFO_WATERMARK_MASK 0xC0000000L
+//SDMA1_GB_ADDR_CONFIG
+#define SDMA1_GB_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
+#define SDMA1_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
+#define SDMA1_GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE__SHIFT 0x8
+#define SDMA1_GB_ADDR_CONFIG__NUM_BANKS__SHIFT 0xc
+#define SDMA1_GB_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x13
+#define SDMA1_GB_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define SDMA1_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
+#define SDMA1_GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
+#define SDMA1_GB_ADDR_CONFIG__NUM_BANKS_MASK 0x00007000L
+#define SDMA1_GB_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00180000L
+//SDMA1_GB_ADDR_CONFIG_READ
+#define SDMA1_GB_ADDR_CONFIG_READ__NUM_PIPES__SHIFT 0x0
+#define SDMA1_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
+#define SDMA1_GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE__SHIFT 0x8
+#define SDMA1_GB_ADDR_CONFIG_READ__NUM_BANKS__SHIFT 0xc
+#define SDMA1_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES__SHIFT 0x13
+#define SDMA1_GB_ADDR_CONFIG_READ__NUM_PIPES_MASK 0x00000007L
+#define SDMA1_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
+#define SDMA1_GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
+#define SDMA1_GB_ADDR_CONFIG_READ__NUM_BANKS_MASK 0x00007000L
+#define SDMA1_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES_MASK 0x00180000L
+//SDMA1_RB_RPTR_FETCH_HI
+#define SDMA1_RB_RPTR_FETCH_HI__OFFSET__SHIFT 0x0
+#define SDMA1_RB_RPTR_FETCH_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_SEM_WAIT_FAIL_TIMER_CNTL
+#define SDMA1_SEM_WAIT_FAIL_TIMER_CNTL__TIMER__SHIFT 0x0
+#define SDMA1_SEM_WAIT_FAIL_TIMER_CNTL__TIMER_MASK 0xFFFFFFFFL
+//SDMA1_RB_RPTR_FETCH
+#define SDMA1_RB_RPTR_FETCH__OFFSET__SHIFT 0x2
+#define SDMA1_RB_RPTR_FETCH__OFFSET_MASK 0xFFFFFFFCL
+//SDMA1_IB_OFFSET_FETCH
+#define SDMA1_IB_OFFSET_FETCH__OFFSET__SHIFT 0x2
+#define SDMA1_IB_OFFSET_FETCH__OFFSET_MASK 0x003FFFFCL
+//SDMA1_PROGRAM
+#define SDMA1_PROGRAM__STREAM__SHIFT 0x0
+#define SDMA1_PROGRAM__STREAM_MASK 0xFFFFFFFFL
+//SDMA1_STATUS_REG
+#define SDMA1_STATUS_REG__IDLE__SHIFT 0x0
+#define SDMA1_STATUS_REG__REG_IDLE__SHIFT 0x1
+#define SDMA1_STATUS_REG__RB_EMPTY__SHIFT 0x2
+#define SDMA1_STATUS_REG__RB_FULL__SHIFT 0x3
+#define SDMA1_STATUS_REG__RB_CMD_IDLE__SHIFT 0x4
+#define SDMA1_STATUS_REG__RB_CMD_FULL__SHIFT 0x5
+#define SDMA1_STATUS_REG__IB_CMD_IDLE__SHIFT 0x6
+#define SDMA1_STATUS_REG__IB_CMD_FULL__SHIFT 0x7
+#define SDMA1_STATUS_REG__BLOCK_IDLE__SHIFT 0x8
+#define SDMA1_STATUS_REG__INSIDE_IB__SHIFT 0x9
+#define SDMA1_STATUS_REG__EX_IDLE__SHIFT 0xa
+#define SDMA1_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE__SHIFT 0xb
+#define SDMA1_STATUS_REG__PACKET_READY__SHIFT 0xc
+#define SDMA1_STATUS_REG__MC_WR_IDLE__SHIFT 0xd
+#define SDMA1_STATUS_REG__SRBM_IDLE__SHIFT 0xe
+#define SDMA1_STATUS_REG__CONTEXT_EMPTY__SHIFT 0xf
+#define SDMA1_STATUS_REG__DELTA_RPTR_FULL__SHIFT 0x10
+#define SDMA1_STATUS_REG__RB_MC_RREQ_IDLE__SHIFT 0x11
+#define SDMA1_STATUS_REG__IB_MC_RREQ_IDLE__SHIFT 0x12
+#define SDMA1_STATUS_REG__MC_RD_IDLE__SHIFT 0x13
+#define SDMA1_STATUS_REG__DELTA_RPTR_EMPTY__SHIFT 0x14
+#define SDMA1_STATUS_REG__MC_RD_RET_STALL__SHIFT 0x15
+#define SDMA1_STATUS_REG__MC_RD_NO_POLL_IDLE__SHIFT 0x16
+#define SDMA1_STATUS_REG__PREV_CMD_IDLE__SHIFT 0x19
+#define SDMA1_STATUS_REG__SEM_IDLE__SHIFT 0x1a
+#define SDMA1_STATUS_REG__SEM_REQ_STALL__SHIFT 0x1b
+#define SDMA1_STATUS_REG__SEM_RESP_STATE__SHIFT 0x1c
+#define SDMA1_STATUS_REG__INT_IDLE__SHIFT 0x1e
+#define SDMA1_STATUS_REG__INT_REQ_STALL__SHIFT 0x1f
+#define SDMA1_STATUS_REG__IDLE_MASK 0x00000001L
+#define SDMA1_STATUS_REG__REG_IDLE_MASK 0x00000002L
+#define SDMA1_STATUS_REG__RB_EMPTY_MASK 0x00000004L
+#define SDMA1_STATUS_REG__RB_FULL_MASK 0x00000008L
+#define SDMA1_STATUS_REG__RB_CMD_IDLE_MASK 0x00000010L
+#define SDMA1_STATUS_REG__RB_CMD_FULL_MASK 0x00000020L
+#define SDMA1_STATUS_REG__IB_CMD_IDLE_MASK 0x00000040L
+#define SDMA1_STATUS_REG__IB_CMD_FULL_MASK 0x00000080L
+#define SDMA1_STATUS_REG__BLOCK_IDLE_MASK 0x00000100L
+#define SDMA1_STATUS_REG__INSIDE_IB_MASK 0x00000200L
+#define SDMA1_STATUS_REG__EX_IDLE_MASK 0x00000400L
+#define SDMA1_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE_MASK 0x00000800L
+#define SDMA1_STATUS_REG__PACKET_READY_MASK 0x00001000L
+#define SDMA1_STATUS_REG__MC_WR_IDLE_MASK 0x00002000L
+#define SDMA1_STATUS_REG__SRBM_IDLE_MASK 0x00004000L
+#define SDMA1_STATUS_REG__CONTEXT_EMPTY_MASK 0x00008000L
+#define SDMA1_STATUS_REG__DELTA_RPTR_FULL_MASK 0x00010000L
+#define SDMA1_STATUS_REG__RB_MC_RREQ_IDLE_MASK 0x00020000L
+#define SDMA1_STATUS_REG__IB_MC_RREQ_IDLE_MASK 0x00040000L
+#define SDMA1_STATUS_REG__MC_RD_IDLE_MASK 0x00080000L
+#define SDMA1_STATUS_REG__DELTA_RPTR_EMPTY_MASK 0x00100000L
+#define SDMA1_STATUS_REG__MC_RD_RET_STALL_MASK 0x00200000L
+#define SDMA1_STATUS_REG__MC_RD_NO_POLL_IDLE_MASK 0x00400000L
+#define SDMA1_STATUS_REG__PREV_CMD_IDLE_MASK 0x02000000L
+#define SDMA1_STATUS_REG__SEM_IDLE_MASK 0x04000000L
+#define SDMA1_STATUS_REG__SEM_REQ_STALL_MASK 0x08000000L
+#define SDMA1_STATUS_REG__SEM_RESP_STATE_MASK 0x30000000L
+#define SDMA1_STATUS_REG__INT_IDLE_MASK 0x40000000L
+#define SDMA1_STATUS_REG__INT_REQ_STALL_MASK 0x80000000L
+//SDMA1_STATUS1_REG
+#define SDMA1_STATUS1_REG__CE_WREQ_IDLE__SHIFT 0x0
+#define SDMA1_STATUS1_REG__CE_WR_IDLE__SHIFT 0x1
+#define SDMA1_STATUS1_REG__CE_SPLIT_IDLE__SHIFT 0x2
+#define SDMA1_STATUS1_REG__CE_RREQ_IDLE__SHIFT 0x3
+#define SDMA1_STATUS1_REG__CE_OUT_IDLE__SHIFT 0x4
+#define SDMA1_STATUS1_REG__CE_IN_IDLE__SHIFT 0x5
+#define SDMA1_STATUS1_REG__CE_DST_IDLE__SHIFT 0x6
+#define SDMA1_STATUS1_REG__CE_CMD_IDLE__SHIFT 0x9
+#define SDMA1_STATUS1_REG__CE_AFIFO_FULL__SHIFT 0xa
+#define SDMA1_STATUS1_REG__CE_INFO_FULL__SHIFT 0xd
+#define SDMA1_STATUS1_REG__CE_INFO1_FULL__SHIFT 0xe
+#define SDMA1_STATUS1_REG__EX_START__SHIFT 0xf
+#define SDMA1_STATUS1_REG__CE_RD_STALL__SHIFT 0x11
+#define SDMA1_STATUS1_REG__CE_WR_STALL__SHIFT 0x12
+#define SDMA1_STATUS1_REG__CE_WREQ_IDLE_MASK 0x00000001L
+#define SDMA1_STATUS1_REG__CE_WR_IDLE_MASK 0x00000002L
+#define SDMA1_STATUS1_REG__CE_SPLIT_IDLE_MASK 0x00000004L
+#define SDMA1_STATUS1_REG__CE_RREQ_IDLE_MASK 0x00000008L
+#define SDMA1_STATUS1_REG__CE_OUT_IDLE_MASK 0x00000010L
+#define SDMA1_STATUS1_REG__CE_IN_IDLE_MASK 0x00000020L
+#define SDMA1_STATUS1_REG__CE_DST_IDLE_MASK 0x00000040L
+#define SDMA1_STATUS1_REG__CE_CMD_IDLE_MASK 0x00000200L
+#define SDMA1_STATUS1_REG__CE_AFIFO_FULL_MASK 0x00000400L
+#define SDMA1_STATUS1_REG__CE_INFO_FULL_MASK 0x00002000L
+#define SDMA1_STATUS1_REG__CE_INFO1_FULL_MASK 0x00004000L
+#define SDMA1_STATUS1_REG__EX_START_MASK 0x00008000L
+#define SDMA1_STATUS1_REG__CE_RD_STALL_MASK 0x00020000L
+#define SDMA1_STATUS1_REG__CE_WR_STALL_MASK 0x00040000L
+//SDMA1_RD_BURST_CNTL
+#define SDMA1_RD_BURST_CNTL__RD_BURST__SHIFT 0x0
+#define SDMA1_RD_BURST_CNTL__CMD_BUFFER_RD_BURST__SHIFT 0x2
+#define SDMA1_RD_BURST_CNTL__RD_BURST_MASK 0x00000003L
+#define SDMA1_RD_BURST_CNTL__CMD_BUFFER_RD_BURST_MASK 0x0000000CL
+//SDMA1_HBM_PAGE_CONFIG
+#define SDMA1_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT__SHIFT 0x0
+#define SDMA1_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT_MASK 0x00000001L
+//SDMA1_UCODE_CHECKSUM
+#define SDMA1_UCODE_CHECKSUM__DATA__SHIFT 0x0
+#define SDMA1_UCODE_CHECKSUM__DATA_MASK 0xFFFFFFFFL
+//SDMA1_F32_CNTL
+#define SDMA1_F32_CNTL__HALT__SHIFT 0x0
+#define SDMA1_F32_CNTL__STEP__SHIFT 0x1
+#define SDMA1_F32_CNTL__HALT_MASK 0x00000001L
+#define SDMA1_F32_CNTL__STEP_MASK 0x00000002L
+//SDMA1_FREEZE
+#define SDMA1_FREEZE__PREEMPT__SHIFT 0x0
+#define SDMA1_FREEZE__FREEZE__SHIFT 0x4
+#define SDMA1_FREEZE__FROZEN__SHIFT 0x5
+#define SDMA1_FREEZE__F32_FREEZE__SHIFT 0x6
+#define SDMA1_FREEZE__PREEMPT_MASK 0x00000001L
+#define SDMA1_FREEZE__FREEZE_MASK 0x00000010L
+#define SDMA1_FREEZE__FROZEN_MASK 0x00000020L
+#define SDMA1_FREEZE__F32_FREEZE_MASK 0x00000040L
+//SDMA1_PHASE0_QUANTUM
+#define SDMA1_PHASE0_QUANTUM__UNIT__SHIFT 0x0
+#define SDMA1_PHASE0_QUANTUM__VALUE__SHIFT 0x8
+#define SDMA1_PHASE0_QUANTUM__PREFER__SHIFT 0x1e
+#define SDMA1_PHASE0_QUANTUM__UNIT_MASK 0x0000000FL
+#define SDMA1_PHASE0_QUANTUM__VALUE_MASK 0x00FFFF00L
+#define SDMA1_PHASE0_QUANTUM__PREFER_MASK 0x40000000L
+//SDMA1_PHASE1_QUANTUM
+#define SDMA1_PHASE1_QUANTUM__UNIT__SHIFT 0x0
+#define SDMA1_PHASE1_QUANTUM__VALUE__SHIFT 0x8
+#define SDMA1_PHASE1_QUANTUM__PREFER__SHIFT 0x1e
+#define SDMA1_PHASE1_QUANTUM__UNIT_MASK 0x0000000FL
+#define SDMA1_PHASE1_QUANTUM__VALUE_MASK 0x00FFFF00L
+#define SDMA1_PHASE1_QUANTUM__PREFER_MASK 0x40000000L
+//SDMA1_EDC_CONFIG
+#define SDMA1_EDC_CONFIG__DIS_EDC__SHIFT 0x1
+#define SDMA1_EDC_CONFIG__ECC_INT_ENABLE__SHIFT 0x2
+#define SDMA1_EDC_CONFIG__DIS_EDC_MASK 0x00000002L
+#define SDMA1_EDC_CONFIG__ECC_INT_ENABLE_MASK 0x00000004L
+//SDMA1_BA_THRESHOLD
+#define SDMA1_BA_THRESHOLD__READ_THRES__SHIFT 0x0
+#define SDMA1_BA_THRESHOLD__WRITE_THRES__SHIFT 0x10
+#define SDMA1_BA_THRESHOLD__READ_THRES_MASK 0x000003FFL
+#define SDMA1_BA_THRESHOLD__WRITE_THRES_MASK 0x03FF0000L
+//SDMA1_ID
+#define SDMA1_ID__DEVICE_ID__SHIFT 0x0
+#define SDMA1_ID__DEVICE_ID_MASK 0x000000FFL
+//SDMA1_VERSION
+#define SDMA1_VERSION__MINVER__SHIFT 0x0
+#define SDMA1_VERSION__MAJVER__SHIFT 0x8
+#define SDMA1_VERSION__REV__SHIFT 0x10
+#define SDMA1_VERSION__MINVER_MASK 0x0000007FL
+#define SDMA1_VERSION__MAJVER_MASK 0x00007F00L
+#define SDMA1_VERSION__REV_MASK 0x003F0000L
+//SDMA1_EDC_COUNTER
+#define SDMA1_EDC_COUNTER__SDMA_UCODE_BUF_SED__SHIFT 0x0
+#define SDMA1_EDC_COUNTER__SDMA_RB_CMD_BUF_SED__SHIFT 0x2
+#define SDMA1_EDC_COUNTER__SDMA_IB_CMD_BUF_SED__SHIFT 0x3
+#define SDMA1_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED__SHIFT 0x4
+#define SDMA1_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED__SHIFT 0x5
+#define SDMA1_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED__SHIFT 0x6
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED__SHIFT 0x7
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED__SHIFT 0x8
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED__SHIFT 0x9
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED__SHIFT 0xa
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED__SHIFT 0xb
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED__SHIFT 0xc
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED__SHIFT 0xd
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED__SHIFT 0xe
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF8_SED__SHIFT 0xf
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF9_SED__SHIFT 0x10
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF10_SED__SHIFT 0x11
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF11_SED__SHIFT 0x12
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF12_SED__SHIFT 0x13
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF13_SED__SHIFT 0x14
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF14_SED__SHIFT 0x15
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF15_SED__SHIFT 0x16
+#define SDMA1_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED__SHIFT 0x17
+#define SDMA1_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED__SHIFT 0x18
+#define SDMA1_EDC_COUNTER__SDMA_UCODE_BUF_SED_MASK 0x00000001L
+#define SDMA1_EDC_COUNTER__SDMA_RB_CMD_BUF_SED_MASK 0x00000004L
+#define SDMA1_EDC_COUNTER__SDMA_IB_CMD_BUF_SED_MASK 0x00000008L
+#define SDMA1_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED_MASK 0x00000010L
+#define SDMA1_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED_MASK 0x00000020L
+#define SDMA1_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED_MASK 0x00000040L
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED_MASK 0x00000080L
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED_MASK 0x00000100L
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED_MASK 0x00000200L
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED_MASK 0x00000400L
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED_MASK 0x00000800L
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED_MASK 0x00001000L
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED_MASK 0x00002000L
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED_MASK 0x00004000L
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF8_SED_MASK 0x00008000L
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF9_SED_MASK 0x00010000L
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF10_SED_MASK 0x00020000L
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF11_SED_MASK 0x00040000L
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF12_SED_MASK 0x00080000L
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF13_SED_MASK 0x00100000L
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF14_SED_MASK 0x00200000L
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF15_SED_MASK 0x00400000L
+#define SDMA1_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED_MASK 0x00800000L
+#define SDMA1_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED_MASK 0x01000000L
+//SDMA1_EDC_COUNTER_CLEAR
+#define SDMA1_EDC_COUNTER_CLEAR__DUMMY__SHIFT 0x0
+#define SDMA1_EDC_COUNTER_CLEAR__DUMMY_MASK 0x00000001L
+//SDMA1_STATUS2_REG
+#define SDMA1_STATUS2_REG__ID__SHIFT 0x0
+#define SDMA1_STATUS2_REG__F32_INSTR_PTR__SHIFT 0x3
+#define SDMA1_STATUS2_REG__CMD_OP__SHIFT 0x10
+#define SDMA1_STATUS2_REG__ID_MASK 0x00000007L
+#define SDMA1_STATUS2_REG__F32_INSTR_PTR_MASK 0x0000FFF8L
+#define SDMA1_STATUS2_REG__CMD_OP_MASK 0xFFFF0000L
+//SDMA1_ATOMIC_CNTL
+#define SDMA1_ATOMIC_CNTL__LOOP_TIMER__SHIFT 0x0
+#define SDMA1_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE__SHIFT 0x1f
+#define SDMA1_ATOMIC_CNTL__LOOP_TIMER_MASK 0x7FFFFFFFL
+#define SDMA1_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE_MASK 0x80000000L
+//SDMA1_ATOMIC_PREOP_LO
+#define SDMA1_ATOMIC_PREOP_LO__DATA__SHIFT 0x0
+#define SDMA1_ATOMIC_PREOP_LO__DATA_MASK 0xFFFFFFFFL
+//SDMA1_ATOMIC_PREOP_HI
+#define SDMA1_ATOMIC_PREOP_HI__DATA__SHIFT 0x0
+#define SDMA1_ATOMIC_PREOP_HI__DATA_MASK 0xFFFFFFFFL
+//SDMA1_UTCL1_CNTL
+#define SDMA1_UTCL1_CNTL__REDO_ENABLE__SHIFT 0x0
+#define SDMA1_UTCL1_CNTL__REDO_DELAY__SHIFT 0x1
+#define SDMA1_UTCL1_CNTL__REDO_WATERMK__SHIFT 0xb
+#define SDMA1_UTCL1_CNTL__INVACK_DELAY__SHIFT 0xe
+#define SDMA1_UTCL1_CNTL__REQL2_CREDIT__SHIFT 0x18
+#define SDMA1_UTCL1_CNTL__VADDR_WATERMK__SHIFT 0x1d
+#define SDMA1_UTCL1_CNTL__REDO_ENABLE_MASK 0x00000001L
+#define SDMA1_UTCL1_CNTL__REDO_DELAY_MASK 0x000007FEL
+#define SDMA1_UTCL1_CNTL__REDO_WATERMK_MASK 0x00003800L
+#define SDMA1_UTCL1_CNTL__INVACK_DELAY_MASK 0x00FFC000L
+#define SDMA1_UTCL1_CNTL__REQL2_CREDIT_MASK 0x1F000000L
+#define SDMA1_UTCL1_CNTL__VADDR_WATERMK_MASK 0xE0000000L
+//SDMA1_UTCL1_WATERMK
+#define SDMA1_UTCL1_WATERMK__REQMC_WATERMK__SHIFT 0x0
+#define SDMA1_UTCL1_WATERMK__REQPG_WATERMK__SHIFT 0x9
+#define SDMA1_UTCL1_WATERMK__INVREQ_WATERMK__SHIFT 0x11
+#define SDMA1_UTCL1_WATERMK__XNACK_WATERMK__SHIFT 0x19
+#define SDMA1_UTCL1_WATERMK__REQMC_WATERMK_MASK 0x000001FFL
+#define SDMA1_UTCL1_WATERMK__REQPG_WATERMK_MASK 0x0001FE00L
+#define SDMA1_UTCL1_WATERMK__INVREQ_WATERMK_MASK 0x01FE0000L
+#define SDMA1_UTCL1_WATERMK__XNACK_WATERMK_MASK 0xFE000000L
+//SDMA1_UTCL1_RD_STATUS
+#define SDMA1_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0
+#define SDMA1_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1
+#define SDMA1_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2
+#define SDMA1_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3
+#define SDMA1_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4
+#define SDMA1_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5
+#define SDMA1_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6
+#define SDMA1_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7
+#define SDMA1_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8
+#define SDMA1_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9
+#define SDMA1_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa
+#define SDMA1_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb
+#define SDMA1_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc
+#define SDMA1_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd
+#define SDMA1_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe
+#define SDMA1_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf
+#define SDMA1_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10
+#define SDMA1_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11
+#define SDMA1_UTCL1_RD_STATUS__PAGE_FAULT__SHIFT 0x12
+#define SDMA1_UTCL1_RD_STATUS__PAGE_NULL__SHIFT 0x13
+#define SDMA1_UTCL1_RD_STATUS__REQL2_IDLE__SHIFT 0x14
+#define SDMA1_UTCL1_RD_STATUS__CE_L1_STALL__SHIFT 0x15
+#define SDMA1_UTCL1_RD_STATUS__NEXT_RD_VECTOR__SHIFT 0x16
+#define SDMA1_UTCL1_RD_STATUS__MERGE_STATE__SHIFT 0x1a
+#define SDMA1_UTCL1_RD_STATUS__ADDR_RD_RTR__SHIFT 0x1d
+#define SDMA1_UTCL1_RD_STATUS__WPTR_POLLING__SHIFT 0x1e
+#define SDMA1_UTCL1_RD_STATUS__INVREQ_SIZE__SHIFT 0x1f
+#define SDMA1_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L
+#define SDMA1_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L
+#define SDMA1_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L
+#define SDMA1_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L
+#define SDMA1_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L
+#define SDMA1_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L
+#define SDMA1_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L
+#define SDMA1_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L
+#define SDMA1_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L
+#define SDMA1_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L
+#define SDMA1_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L
+#define SDMA1_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L
+#define SDMA1_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L
+#define SDMA1_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L
+#define SDMA1_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L
+#define SDMA1_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L
+#define SDMA1_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L
+#define SDMA1_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L
+#define SDMA1_UTCL1_RD_STATUS__PAGE_FAULT_MASK 0x00040000L
+#define SDMA1_UTCL1_RD_STATUS__PAGE_NULL_MASK 0x00080000L
+#define SDMA1_UTCL1_RD_STATUS__REQL2_IDLE_MASK 0x00100000L
+#define SDMA1_UTCL1_RD_STATUS__CE_L1_STALL_MASK 0x00200000L
+#define SDMA1_UTCL1_RD_STATUS__NEXT_RD_VECTOR_MASK 0x03C00000L
+#define SDMA1_UTCL1_RD_STATUS__MERGE_STATE_MASK 0x1C000000L
+#define SDMA1_UTCL1_RD_STATUS__ADDR_RD_RTR_MASK 0x20000000L
+#define SDMA1_UTCL1_RD_STATUS__WPTR_POLLING_MASK 0x40000000L
+#define SDMA1_UTCL1_RD_STATUS__INVREQ_SIZE_MASK 0x80000000L
+//SDMA1_UTCL1_WR_STATUS
+#define SDMA1_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0
+#define SDMA1_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1
+#define SDMA1_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2
+#define SDMA1_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3
+#define SDMA1_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4
+#define SDMA1_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5
+#define SDMA1_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6
+#define SDMA1_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7
+#define SDMA1_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8
+#define SDMA1_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9
+#define SDMA1_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa
+#define SDMA1_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb
+#define SDMA1_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc
+#define SDMA1_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd
+#define SDMA1_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe
+#define SDMA1_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf
+#define SDMA1_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10
+#define SDMA1_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11
+#define SDMA1_UTCL1_WR_STATUS__PAGE_FAULT__SHIFT 0x12
+#define SDMA1_UTCL1_WR_STATUS__PAGE_NULL__SHIFT 0x13
+#define SDMA1_UTCL1_WR_STATUS__REQL2_IDLE__SHIFT 0x14
+#define SDMA1_UTCL1_WR_STATUS__F32_WR_RTR__SHIFT 0x15
+#define SDMA1_UTCL1_WR_STATUS__NEXT_WR_VECTOR__SHIFT 0x16
+#define SDMA1_UTCL1_WR_STATUS__MERGE_STATE__SHIFT 0x19
+#define SDMA1_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY__SHIFT 0x1c
+#define SDMA1_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL__SHIFT 0x1d
+#define SDMA1_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY__SHIFT 0x1e
+#define SDMA1_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL__SHIFT 0x1f
+#define SDMA1_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L
+#define SDMA1_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L
+#define SDMA1_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L
+#define SDMA1_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L
+#define SDMA1_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L
+#define SDMA1_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L
+#define SDMA1_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L
+#define SDMA1_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L
+#define SDMA1_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L
+#define SDMA1_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L
+#define SDMA1_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L
+#define SDMA1_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L
+#define SDMA1_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L
+#define SDMA1_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L
+#define SDMA1_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L
+#define SDMA1_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L
+#define SDMA1_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L
+#define SDMA1_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L
+#define SDMA1_UTCL1_WR_STATUS__PAGE_FAULT_MASK 0x00040000L
+#define SDMA1_UTCL1_WR_STATUS__PAGE_NULL_MASK 0x00080000L
+#define SDMA1_UTCL1_WR_STATUS__REQL2_IDLE_MASK 0x00100000L
+#define SDMA1_UTCL1_WR_STATUS__F32_WR_RTR_MASK 0x00200000L
+#define SDMA1_UTCL1_WR_STATUS__NEXT_WR_VECTOR_MASK 0x01C00000L
+#define SDMA1_UTCL1_WR_STATUS__MERGE_STATE_MASK 0x0E000000L
+#define SDMA1_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY_MASK 0x10000000L
+#define SDMA1_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL_MASK 0x20000000L
+#define SDMA1_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY_MASK 0x40000000L
+#define SDMA1_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL_MASK 0x80000000L
+//SDMA1_UTCL1_INV0
+#define SDMA1_UTCL1_INV0__INV_MIDDLE__SHIFT 0x0
+#define SDMA1_UTCL1_INV0__RD_TIMEOUT__SHIFT 0x1
+#define SDMA1_UTCL1_INV0__WR_TIMEOUT__SHIFT 0x2
+#define SDMA1_UTCL1_INV0__RD_IN_INVADR__SHIFT 0x3
+#define SDMA1_UTCL1_INV0__WR_IN_INVADR__SHIFT 0x4
+#define SDMA1_UTCL1_INV0__PAGE_NULL_SW__SHIFT 0x5
+#define SDMA1_UTCL1_INV0__XNACK_IS_INVADR__SHIFT 0x6
+#define SDMA1_UTCL1_INV0__INVREQ_ENABLE__SHIFT 0x7
+#define SDMA1_UTCL1_INV0__NACK_TIMEOUT_SW__SHIFT 0x8
+#define SDMA1_UTCL1_INV0__NFLUSH_INV_IDLE__SHIFT 0x9
+#define SDMA1_UTCL1_INV0__FLUSH_INV_IDLE__SHIFT 0xa
+#define SDMA1_UTCL1_INV0__INV_FLUSHTYPE__SHIFT 0xb
+#define SDMA1_UTCL1_INV0__INV_VMID_VEC__SHIFT 0xc
+#define SDMA1_UTCL1_INV0__INV_ADDR_HI__SHIFT 0x1c
+#define SDMA1_UTCL1_INV0__INV_MIDDLE_MASK 0x00000001L
+#define SDMA1_UTCL1_INV0__RD_TIMEOUT_MASK 0x00000002L
+#define SDMA1_UTCL1_INV0__WR_TIMEOUT_MASK 0x00000004L
+#define SDMA1_UTCL1_INV0__RD_IN_INVADR_MASK 0x00000008L
+#define SDMA1_UTCL1_INV0__WR_IN_INVADR_MASK 0x00000010L
+#define SDMA1_UTCL1_INV0__PAGE_NULL_SW_MASK 0x00000020L
+#define SDMA1_UTCL1_INV0__XNACK_IS_INVADR_MASK 0x00000040L
+#define SDMA1_UTCL1_INV0__INVREQ_ENABLE_MASK 0x00000080L
+#define SDMA1_UTCL1_INV0__NACK_TIMEOUT_SW_MASK 0x00000100L
+#define SDMA1_UTCL1_INV0__NFLUSH_INV_IDLE_MASK 0x00000200L
+#define SDMA1_UTCL1_INV0__FLUSH_INV_IDLE_MASK 0x00000400L
+#define SDMA1_UTCL1_INV0__INV_FLUSHTYPE_MASK 0x00000800L
+#define SDMA1_UTCL1_INV0__INV_VMID_VEC_MASK 0x0FFFF000L
+#define SDMA1_UTCL1_INV0__INV_ADDR_HI_MASK 0xF0000000L
+//SDMA1_UTCL1_INV1
+#define SDMA1_UTCL1_INV1__INV_ADDR_LO__SHIFT 0x0
+#define SDMA1_UTCL1_INV1__INV_ADDR_LO_MASK 0xFFFFFFFFL
+//SDMA1_UTCL1_INV2
+#define SDMA1_UTCL1_INV2__INV_NFLUSH_VMID_VEC__SHIFT 0x0
+#define SDMA1_UTCL1_INV2__INV_NFLUSH_VMID_VEC_MASK 0xFFFFFFFFL
+//SDMA1_UTCL1_RD_XNACK0
+#define SDMA1_UTCL1_RD_XNACK0__XNACK_ADDR_LO__SHIFT 0x0
+#define SDMA1_UTCL1_RD_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL
+//SDMA1_UTCL1_RD_XNACK1
+#define SDMA1_UTCL1_RD_XNACK1__XNACK_ADDR_HI__SHIFT 0x0
+#define SDMA1_UTCL1_RD_XNACK1__XNACK_VMID__SHIFT 0x4
+#define SDMA1_UTCL1_RD_XNACK1__XNACK_VECTOR__SHIFT 0x8
+#define SDMA1_UTCL1_RD_XNACK1__IS_XNACK__SHIFT 0x1a
+#define SDMA1_UTCL1_RD_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL
+#define SDMA1_UTCL1_RD_XNACK1__XNACK_VMID_MASK 0x000000F0L
+#define SDMA1_UTCL1_RD_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L
+#define SDMA1_UTCL1_RD_XNACK1__IS_XNACK_MASK 0x0C000000L
+//SDMA1_UTCL1_WR_XNACK0
+#define SDMA1_UTCL1_WR_XNACK0__XNACK_ADDR_LO__SHIFT 0x0
+#define SDMA1_UTCL1_WR_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL
+//SDMA1_UTCL1_WR_XNACK1
+#define SDMA1_UTCL1_WR_XNACK1__XNACK_ADDR_HI__SHIFT 0x0
+#define SDMA1_UTCL1_WR_XNACK1__XNACK_VMID__SHIFT 0x4
+#define SDMA1_UTCL1_WR_XNACK1__XNACK_VECTOR__SHIFT 0x8
+#define SDMA1_UTCL1_WR_XNACK1__IS_XNACK__SHIFT 0x1a
+#define SDMA1_UTCL1_WR_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL
+#define SDMA1_UTCL1_WR_XNACK1__XNACK_VMID_MASK 0x000000F0L
+#define SDMA1_UTCL1_WR_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L
+#define SDMA1_UTCL1_WR_XNACK1__IS_XNACK_MASK 0x0C000000L
+//SDMA1_UTCL1_TIMEOUT
+#define SDMA1_UTCL1_TIMEOUT__RD_XNACK_LIMIT__SHIFT 0x0
+#define SDMA1_UTCL1_TIMEOUT__WR_XNACK_LIMIT__SHIFT 0x10
+#define SDMA1_UTCL1_TIMEOUT__RD_XNACK_LIMIT_MASK 0x0000FFFFL
+#define SDMA1_UTCL1_TIMEOUT__WR_XNACK_LIMIT_MASK 0xFFFF0000L
+//SDMA1_UTCL1_PAGE
+#define SDMA1_UTCL1_PAGE__VM_HOLE__SHIFT 0x0
+#define SDMA1_UTCL1_PAGE__REQ_TYPE__SHIFT 0x1
+#define SDMA1_UTCL1_PAGE__USE_MTYPE__SHIFT 0x6
+#define SDMA1_UTCL1_PAGE__USE_PT_SNOOP__SHIFT 0x9
+#define SDMA1_UTCL1_PAGE__VM_HOLE_MASK 0x00000001L
+#define SDMA1_UTCL1_PAGE__REQ_TYPE_MASK 0x0000001EL
+#define SDMA1_UTCL1_PAGE__USE_MTYPE_MASK 0x000001C0L
+#define SDMA1_UTCL1_PAGE__USE_PT_SNOOP_MASK 0x00000200L
+//SDMA1_POWER_CNTL_IDLE
+#define SDMA1_POWER_CNTL_IDLE__DELAY0__SHIFT 0x0
+#define SDMA1_POWER_CNTL_IDLE__DELAY1__SHIFT 0x10
+#define SDMA1_POWER_CNTL_IDLE__DELAY2__SHIFT 0x18
+#define SDMA1_POWER_CNTL_IDLE__DELAY0_MASK 0x0000FFFFL
+#define SDMA1_POWER_CNTL_IDLE__DELAY1_MASK 0x00FF0000L
+#define SDMA1_POWER_CNTL_IDLE__DELAY2_MASK 0xFF000000L
+//SDMA1_RELAX_ORDERING_LUT
+#define SDMA1_RELAX_ORDERING_LUT__RESERVED0__SHIFT 0x0
+#define SDMA1_RELAX_ORDERING_LUT__COPY__SHIFT 0x1
+#define SDMA1_RELAX_ORDERING_LUT__WRITE__SHIFT 0x2
+#define SDMA1_RELAX_ORDERING_LUT__RESERVED3__SHIFT 0x3
+#define SDMA1_RELAX_ORDERING_LUT__RESERVED4__SHIFT 0x4
+#define SDMA1_RELAX_ORDERING_LUT__FENCE__SHIFT 0x5
+#define SDMA1_RELAX_ORDERING_LUT__RESERVED76__SHIFT 0x6
+#define SDMA1_RELAX_ORDERING_LUT__POLL_MEM__SHIFT 0x8
+#define SDMA1_RELAX_ORDERING_LUT__COND_EXE__SHIFT 0x9
+#define SDMA1_RELAX_ORDERING_LUT__ATOMIC__SHIFT 0xa
+#define SDMA1_RELAX_ORDERING_LUT__CONST_FILL__SHIFT 0xb
+#define SDMA1_RELAX_ORDERING_LUT__PTEPDE__SHIFT 0xc
+#define SDMA1_RELAX_ORDERING_LUT__TIMESTAMP__SHIFT 0xd
+#define SDMA1_RELAX_ORDERING_LUT__RESERVED__SHIFT 0xe
+#define SDMA1_RELAX_ORDERING_LUT__WORLD_SWITCH__SHIFT 0x1b
+#define SDMA1_RELAX_ORDERING_LUT__RPTR_WRB__SHIFT 0x1c
+#define SDMA1_RELAX_ORDERING_LUT__WPTR_POLL__SHIFT 0x1d
+#define SDMA1_RELAX_ORDERING_LUT__IB_FETCH__SHIFT 0x1e
+#define SDMA1_RELAX_ORDERING_LUT__RB_FETCH__SHIFT 0x1f
+#define SDMA1_RELAX_ORDERING_LUT__RESERVED0_MASK 0x00000001L
+#define SDMA1_RELAX_ORDERING_LUT__COPY_MASK 0x00000002L
+#define SDMA1_RELAX_ORDERING_LUT__WRITE_MASK 0x00000004L
+#define SDMA1_RELAX_ORDERING_LUT__RESERVED3_MASK 0x00000008L
+#define SDMA1_RELAX_ORDERING_LUT__RESERVED4_MASK 0x00000010L
+#define SDMA1_RELAX_ORDERING_LUT__FENCE_MASK 0x00000020L
+#define SDMA1_RELAX_ORDERING_LUT__RESERVED76_MASK 0x000000C0L
+#define SDMA1_RELAX_ORDERING_LUT__POLL_MEM_MASK 0x00000100L
+#define SDMA1_RELAX_ORDERING_LUT__COND_EXE_MASK 0x00000200L
+#define SDMA1_RELAX_ORDERING_LUT__ATOMIC_MASK 0x00000400L
+#define SDMA1_RELAX_ORDERING_LUT__CONST_FILL_MASK 0x00000800L
+#define SDMA1_RELAX_ORDERING_LUT__PTEPDE_MASK 0x00001000L
+#define SDMA1_RELAX_ORDERING_LUT__TIMESTAMP_MASK 0x00002000L
+#define SDMA1_RELAX_ORDERING_LUT__RESERVED_MASK 0x07FFC000L
+#define SDMA1_RELAX_ORDERING_LUT__WORLD_SWITCH_MASK 0x08000000L
+#define SDMA1_RELAX_ORDERING_LUT__RPTR_WRB_MASK 0x10000000L
+#define SDMA1_RELAX_ORDERING_LUT__WPTR_POLL_MASK 0x20000000L
+#define SDMA1_RELAX_ORDERING_LUT__IB_FETCH_MASK 0x40000000L
+#define SDMA1_RELAX_ORDERING_LUT__RB_FETCH_MASK 0x80000000L
+//SDMA1_CHICKEN_BITS_2
+#define SDMA1_CHICKEN_BITS_2__F32_CMD_PROC_DELAY__SHIFT 0x0
+#define SDMA1_CHICKEN_BITS_2__F32_CMD_PROC_DELAY_MASK 0x0000000FL
+//SDMA1_STATUS3_REG
+#define SDMA1_STATUS3_REG__CMD_OP_STATUS__SHIFT 0x0
+#define SDMA1_STATUS3_REG__PREV_VM_CMD__SHIFT 0x10
+#define SDMA1_STATUS3_REG__EXCEPTION_IDLE__SHIFT 0x14
+#define SDMA1_STATUS3_REG__QUEUE_ID_MATCH__SHIFT 0x15
+#define SDMA1_STATUS3_REG__INT_QUEUE_ID__SHIFT 0x16
+#define SDMA1_STATUS3_REG__CMD_OP_STATUS_MASK 0x0000FFFFL
+#define SDMA1_STATUS3_REG__PREV_VM_CMD_MASK 0x000F0000L
+#define SDMA1_STATUS3_REG__EXCEPTION_IDLE_MASK 0x00100000L
+#define SDMA1_STATUS3_REG__QUEUE_ID_MATCH_MASK 0x00200000L
+#define SDMA1_STATUS3_REG__INT_QUEUE_ID_MASK 0x03C00000L
+//SDMA1_PHYSICAL_ADDR_LO
+#define SDMA1_PHYSICAL_ADDR_LO__D_VALID__SHIFT 0x0
+#define SDMA1_PHYSICAL_ADDR_LO__DIRTY__SHIFT 0x1
+#define SDMA1_PHYSICAL_ADDR_LO__PHY_VALID__SHIFT 0x2
+#define SDMA1_PHYSICAL_ADDR_LO__ADDR__SHIFT 0xc
+#define SDMA1_PHYSICAL_ADDR_LO__D_VALID_MASK 0x00000001L
+#define SDMA1_PHYSICAL_ADDR_LO__DIRTY_MASK 0x00000002L
+#define SDMA1_PHYSICAL_ADDR_LO__PHY_VALID_MASK 0x00000004L
+#define SDMA1_PHYSICAL_ADDR_LO__ADDR_MASK 0xFFFFF000L
+//SDMA1_PHYSICAL_ADDR_HI
+#define SDMA1_PHYSICAL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_PHYSICAL_ADDR_HI__ADDR_MASK 0x0000FFFFL
+//SDMA1_PHASE2_QUANTUM
+#define SDMA1_PHASE2_QUANTUM__UNIT__SHIFT 0x0
+#define SDMA1_PHASE2_QUANTUM__VALUE__SHIFT 0x8
+#define SDMA1_PHASE2_QUANTUM__PREFER__SHIFT 0x1e
+#define SDMA1_PHASE2_QUANTUM__UNIT_MASK 0x0000000FL
+#define SDMA1_PHASE2_QUANTUM__VALUE_MASK 0x00FFFF00L
+#define SDMA1_PHASE2_QUANTUM__PREFER_MASK 0x40000000L
+//SDMA1_ERROR_LOG
+#define SDMA1_ERROR_LOG__OVERRIDE__SHIFT 0x0
+#define SDMA1_ERROR_LOG__STATUS__SHIFT 0x10
+#define SDMA1_ERROR_LOG__OVERRIDE_MASK 0x0000FFFFL
+#define SDMA1_ERROR_LOG__STATUS_MASK 0xFFFF0000L
+//SDMA1_PUB_DUMMY_REG0
+#define SDMA1_PUB_DUMMY_REG0__VALUE__SHIFT 0x0
+#define SDMA1_PUB_DUMMY_REG0__VALUE_MASK 0xFFFFFFFFL
+//SDMA1_PUB_DUMMY_REG1
+#define SDMA1_PUB_DUMMY_REG1__VALUE__SHIFT 0x0
+#define SDMA1_PUB_DUMMY_REG1__VALUE_MASK 0xFFFFFFFFL
+//SDMA1_PUB_DUMMY_REG2
+#define SDMA1_PUB_DUMMY_REG2__VALUE__SHIFT 0x0
+#define SDMA1_PUB_DUMMY_REG2__VALUE_MASK 0xFFFFFFFFL
+//SDMA1_PUB_DUMMY_REG3
+#define SDMA1_PUB_DUMMY_REG3__VALUE__SHIFT 0x0
+#define SDMA1_PUB_DUMMY_REG3__VALUE_MASK 0xFFFFFFFFL
+//SDMA1_F32_COUNTER
+#define SDMA1_F32_COUNTER__VALUE__SHIFT 0x0
+#define SDMA1_F32_COUNTER__VALUE_MASK 0xFFFFFFFFL
+//SDMA1_UNBREAKABLE
+#define SDMA1_UNBREAKABLE__VALUE__SHIFT 0x0
+#define SDMA1_UNBREAKABLE__VALUE_MASK 0x00000001L
+//SDMA1_PERFMON_CNTL
+#define SDMA1_PERFMON_CNTL__PERF_ENABLE0__SHIFT 0x0
+#define SDMA1_PERFMON_CNTL__PERF_CLEAR0__SHIFT 0x1
+#define SDMA1_PERFMON_CNTL__PERF_SEL0__SHIFT 0x2
+#define SDMA1_PERFMON_CNTL__PERF_ENABLE1__SHIFT 0xa
+#define SDMA1_PERFMON_CNTL__PERF_CLEAR1__SHIFT 0xb
+#define SDMA1_PERFMON_CNTL__PERF_SEL1__SHIFT 0xc
+#define SDMA1_PERFMON_CNTL__PERF_ENABLE0_MASK 0x00000001L
+#define SDMA1_PERFMON_CNTL__PERF_CLEAR0_MASK 0x00000002L
+#define SDMA1_PERFMON_CNTL__PERF_SEL0_MASK 0x000003FCL
+#define SDMA1_PERFMON_CNTL__PERF_ENABLE1_MASK 0x00000400L
+#define SDMA1_PERFMON_CNTL__PERF_CLEAR1_MASK 0x00000800L
+#define SDMA1_PERFMON_CNTL__PERF_SEL1_MASK 0x000FF000L
+//SDMA1_PERFCOUNTER0_RESULT
+#define SDMA1_PERFCOUNTER0_RESULT__PERF_COUNT__SHIFT 0x0
+#define SDMA1_PERFCOUNTER0_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL
+//SDMA1_PERFCOUNTER1_RESULT
+#define SDMA1_PERFCOUNTER1_RESULT__PERF_COUNT__SHIFT 0x0
+#define SDMA1_PERFCOUNTER1_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL
+//SDMA1_PERFCOUNTER_TAG_DELAY_RANGE
+#define SDMA1_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_LOW__SHIFT 0x0
+#define SDMA1_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_HIGH__SHIFT 0xe
+#define SDMA1_PERFCOUNTER_TAG_DELAY_RANGE__SELECT_RW__SHIFT 0x1c
+#define SDMA1_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_LOW_MASK 0x00003FFFL
+#define SDMA1_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_HIGH_MASK 0x0FFFC000L
+#define SDMA1_PERFCOUNTER_TAG_DELAY_RANGE__SELECT_RW_MASK 0x10000000L
+//SDMA1_CRD_CNTL
+#define SDMA1_CRD_CNTL__MC_WRREQ_CREDIT__SHIFT 0x7
+#define SDMA1_CRD_CNTL__MC_RDREQ_CREDIT__SHIFT 0xd
+#define SDMA1_CRD_CNTL__MC_WRREQ_CREDIT_MASK 0x00001F80L
+#define SDMA1_CRD_CNTL__MC_RDREQ_CREDIT_MASK 0x0007E000L
+//SDMA1_GPU_IOV_VIOLATION_LOG
+#define SDMA1_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0
+#define SDMA1_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS__SHIFT 0x1
+#define SDMA1_GPU_IOV_VIOLATION_LOG__ADDRESS__SHIFT 0x2
+#define SDMA1_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION__SHIFT 0x14
+#define SDMA1_GPU_IOV_VIOLATION_LOG__VF__SHIFT 0x15
+#define SDMA1_GPU_IOV_VIOLATION_LOG__VFID__SHIFT 0x16
+#define SDMA1_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L
+#define SDMA1_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS_MASK 0x00000002L
+#define SDMA1_GPU_IOV_VIOLATION_LOG__ADDRESS_MASK 0x000FFFFCL
+#define SDMA1_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION_MASK 0x00100000L
+#define SDMA1_GPU_IOV_VIOLATION_LOG__VF_MASK 0x00200000L
+#define SDMA1_GPU_IOV_VIOLATION_LOG__VFID_MASK 0x03C00000L
+//SDMA1_ULV_CNTL
+#define SDMA1_ULV_CNTL__HYSTERESIS__SHIFT 0x0
+#define SDMA1_ULV_CNTL__ENTER_ULV_INT_CLR__SHIFT 0x1b
+#define SDMA1_ULV_CNTL__EXIT_ULV_INT_CLR__SHIFT 0x1c
+#define SDMA1_ULV_CNTL__ENTER_ULV_INT__SHIFT 0x1d
+#define SDMA1_ULV_CNTL__EXIT_ULV_INT__SHIFT 0x1e
+#define SDMA1_ULV_CNTL__ULV_STATUS__SHIFT 0x1f
+#define SDMA1_ULV_CNTL__HYSTERESIS_MASK 0x0000001FL
+#define SDMA1_ULV_CNTL__ENTER_ULV_INT_CLR_MASK 0x08000000L
+#define SDMA1_ULV_CNTL__EXIT_ULV_INT_CLR_MASK 0x10000000L
+#define SDMA1_ULV_CNTL__ENTER_ULV_INT_MASK 0x20000000L
+#define SDMA1_ULV_CNTL__EXIT_ULV_INT_MASK 0x40000000L
+#define SDMA1_ULV_CNTL__ULV_STATUS_MASK 0x80000000L
+//SDMA1_EA_DBIT_ADDR_DATA
+#define SDMA1_EA_DBIT_ADDR_DATA__VALUE__SHIFT 0x0
+#define SDMA1_EA_DBIT_ADDR_DATA__VALUE_MASK 0xFFFFFFFFL
+//SDMA1_EA_DBIT_ADDR_INDEX
+#define SDMA1_EA_DBIT_ADDR_INDEX__VALUE__SHIFT 0x0
+#define SDMA1_EA_DBIT_ADDR_INDEX__VALUE_MASK 0x00000007L
+//SDMA1_GPU_IOV_VIOLATION_LOG2
+#define SDMA1_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID__SHIFT 0x0
+#define SDMA1_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID_MASK 0x000000FFL
+//SDMA1_GFX_RB_CNTL
+#define SDMA1_GFX_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA1_GFX_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA1_GFX_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA1_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA1_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA1_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA1_GFX_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA1_GFX_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA1_GFX_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA1_GFX_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA1_GFX_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA1_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA1_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA1_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA1_GFX_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA1_GFX_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA1_GFX_RB_BASE
+#define SDMA1_GFX_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA1_GFX_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_GFX_RB_BASE_HI
+#define SDMA1_GFX_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_GFX_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA1_GFX_RB_RPTR
+#define SDMA1_GFX_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA1_GFX_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_GFX_RB_RPTR_HI
+#define SDMA1_GFX_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_GFX_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_GFX_RB_WPTR
+#define SDMA1_GFX_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA1_GFX_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_GFX_RB_WPTR_HI
+#define SDMA1_GFX_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_GFX_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_GFX_RB_WPTR_POLL_CNTL
+#define SDMA1_GFX_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA1_GFX_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA1_GFX_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA1_GFX_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA1_GFX_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA1_GFX_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA1_GFX_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA1_GFX_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA1_GFX_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA1_GFX_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA1_GFX_RB_RPTR_ADDR_HI
+#define SDMA1_GFX_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_GFX_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_GFX_RB_RPTR_ADDR_LO
+#define SDMA1_GFX_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA1_GFX_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_GFX_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA1_GFX_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_GFX_IB_CNTL
+#define SDMA1_GFX_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA1_GFX_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA1_GFX_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA1_GFX_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA1_GFX_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA1_GFX_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA1_GFX_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA1_GFX_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA1_GFX_IB_RPTR
+#define SDMA1_GFX_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA1_GFX_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA1_GFX_IB_OFFSET
+#define SDMA1_GFX_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_GFX_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA1_GFX_IB_BASE_LO
+#define SDMA1_GFX_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA1_GFX_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA1_GFX_IB_BASE_HI
+#define SDMA1_GFX_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_GFX_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_GFX_IB_SIZE
+#define SDMA1_GFX_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA1_GFX_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA1_GFX_SKIP_CNTL
+#define SDMA1_GFX_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA1_GFX_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA1_GFX_CONTEXT_STATUS
+#define SDMA1_GFX_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA1_GFX_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA1_GFX_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA1_GFX_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA1_GFX_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA1_GFX_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA1_GFX_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA1_GFX_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA1_GFX_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA1_GFX_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA1_GFX_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA1_GFX_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA1_GFX_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA1_GFX_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA1_GFX_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA1_GFX_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA1_GFX_DOORBELL
+#define SDMA1_GFX_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA1_GFX_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA1_GFX_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA1_GFX_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA1_GFX_CONTEXT_CNTL
+#define SDMA1_GFX_CONTEXT_CNTL__RESUME_CTX__SHIFT 0x10
+#define SDMA1_GFX_CONTEXT_CNTL__RESUME_CTX_MASK 0x00010000L
+//SDMA1_GFX_STATUS
+#define SDMA1_GFX_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA1_GFX_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA1_GFX_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA1_GFX_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA1_GFX_DOORBELL_LOG
+#define SDMA1_GFX_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA1_GFX_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA1_GFX_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA1_GFX_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA1_GFX_WATERMARK
+#define SDMA1_GFX_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA1_GFX_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA1_GFX_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA1_GFX_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA1_GFX_DOORBELL_OFFSET
+#define SDMA1_GFX_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_GFX_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA1_GFX_CSA_ADDR_LO
+#define SDMA1_GFX_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_GFX_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_GFX_CSA_ADDR_HI
+#define SDMA1_GFX_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_GFX_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_GFX_IB_SUB_REMAIN
+#define SDMA1_GFX_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA1_GFX_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA1_GFX_PREEMPT
+#define SDMA1_GFX_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA1_GFX_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA1_GFX_DUMMY_REG
+#define SDMA1_GFX_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA1_GFX_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA1_GFX_RB_WPTR_POLL_ADDR_HI
+#define SDMA1_GFX_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_GFX_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_GFX_RB_WPTR_POLL_ADDR_LO
+#define SDMA1_GFX_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_GFX_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_GFX_RB_AQL_CNTL
+#define SDMA1_GFX_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA1_GFX_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA1_GFX_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA1_GFX_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA1_GFX_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA1_GFX_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA1_GFX_MINOR_PTR_UPDATE
+#define SDMA1_GFX_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA1_GFX_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA1_GFX_MIDCMD_DATA0
+#define SDMA1_GFX_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA1_GFX_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA1_GFX_MIDCMD_DATA1
+#define SDMA1_GFX_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA1_GFX_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA1_GFX_MIDCMD_DATA2
+#define SDMA1_GFX_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA1_GFX_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA1_GFX_MIDCMD_DATA3
+#define SDMA1_GFX_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA1_GFX_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA1_GFX_MIDCMD_DATA4
+#define SDMA1_GFX_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA1_GFX_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA1_GFX_MIDCMD_DATA5
+#define SDMA1_GFX_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA1_GFX_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA1_GFX_MIDCMD_DATA6
+#define SDMA1_GFX_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA1_GFX_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA1_GFX_MIDCMD_DATA7
+#define SDMA1_GFX_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA1_GFX_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA1_GFX_MIDCMD_DATA8
+#define SDMA1_GFX_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA1_GFX_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA1_GFX_MIDCMD_CNTL
+#define SDMA1_GFX_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA1_GFX_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA1_GFX_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA1_GFX_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA1_GFX_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA1_GFX_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA1_GFX_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA1_GFX_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA1_PAGE_RB_CNTL
+#define SDMA1_PAGE_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA1_PAGE_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA1_PAGE_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA1_PAGE_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA1_PAGE_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA1_PAGE_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA1_PAGE_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA1_PAGE_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA1_PAGE_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA1_PAGE_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA1_PAGE_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA1_PAGE_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA1_PAGE_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA1_PAGE_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA1_PAGE_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA1_PAGE_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA1_PAGE_RB_BASE
+#define SDMA1_PAGE_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA1_PAGE_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_PAGE_RB_BASE_HI
+#define SDMA1_PAGE_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_PAGE_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA1_PAGE_RB_RPTR
+#define SDMA1_PAGE_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA1_PAGE_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_PAGE_RB_RPTR_HI
+#define SDMA1_PAGE_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_PAGE_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_PAGE_RB_WPTR
+#define SDMA1_PAGE_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA1_PAGE_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_PAGE_RB_WPTR_HI
+#define SDMA1_PAGE_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_PAGE_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_PAGE_RB_WPTR_POLL_CNTL
+#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA1_PAGE_RB_RPTR_ADDR_HI
+#define SDMA1_PAGE_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_PAGE_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_PAGE_RB_RPTR_ADDR_LO
+#define SDMA1_PAGE_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA1_PAGE_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_PAGE_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA1_PAGE_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_PAGE_IB_CNTL
+#define SDMA1_PAGE_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA1_PAGE_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA1_PAGE_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA1_PAGE_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA1_PAGE_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA1_PAGE_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA1_PAGE_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA1_PAGE_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA1_PAGE_IB_RPTR
+#define SDMA1_PAGE_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA1_PAGE_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA1_PAGE_IB_OFFSET
+#define SDMA1_PAGE_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_PAGE_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA1_PAGE_IB_BASE_LO
+#define SDMA1_PAGE_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA1_PAGE_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA1_PAGE_IB_BASE_HI
+#define SDMA1_PAGE_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_PAGE_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_PAGE_IB_SIZE
+#define SDMA1_PAGE_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA1_PAGE_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA1_PAGE_SKIP_CNTL
+#define SDMA1_PAGE_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA1_PAGE_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA1_PAGE_CONTEXT_STATUS
+#define SDMA1_PAGE_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA1_PAGE_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA1_PAGE_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA1_PAGE_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA1_PAGE_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA1_PAGE_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA1_PAGE_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA1_PAGE_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA1_PAGE_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA1_PAGE_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA1_PAGE_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA1_PAGE_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA1_PAGE_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA1_PAGE_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA1_PAGE_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA1_PAGE_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA1_PAGE_DOORBELL
+#define SDMA1_PAGE_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA1_PAGE_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA1_PAGE_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA1_PAGE_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA1_PAGE_STATUS
+#define SDMA1_PAGE_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA1_PAGE_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA1_PAGE_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA1_PAGE_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA1_PAGE_DOORBELL_LOG
+#define SDMA1_PAGE_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA1_PAGE_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA1_PAGE_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA1_PAGE_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA1_PAGE_WATERMARK
+#define SDMA1_PAGE_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA1_PAGE_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA1_PAGE_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA1_PAGE_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA1_PAGE_DOORBELL_OFFSET
+#define SDMA1_PAGE_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_PAGE_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA1_PAGE_CSA_ADDR_LO
+#define SDMA1_PAGE_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_PAGE_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_PAGE_CSA_ADDR_HI
+#define SDMA1_PAGE_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_PAGE_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_PAGE_IB_SUB_REMAIN
+#define SDMA1_PAGE_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA1_PAGE_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA1_PAGE_PREEMPT
+#define SDMA1_PAGE_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA1_PAGE_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA1_PAGE_DUMMY_REG
+#define SDMA1_PAGE_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA1_PAGE_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA1_PAGE_RB_WPTR_POLL_ADDR_HI
+#define SDMA1_PAGE_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_PAGE_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_PAGE_RB_WPTR_POLL_ADDR_LO
+#define SDMA1_PAGE_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_PAGE_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_PAGE_RB_AQL_CNTL
+#define SDMA1_PAGE_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA1_PAGE_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA1_PAGE_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA1_PAGE_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA1_PAGE_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA1_PAGE_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA1_PAGE_MINOR_PTR_UPDATE
+#define SDMA1_PAGE_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA1_PAGE_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA1_PAGE_MIDCMD_DATA0
+#define SDMA1_PAGE_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA1_PAGE_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA1_PAGE_MIDCMD_DATA1
+#define SDMA1_PAGE_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA1_PAGE_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA1_PAGE_MIDCMD_DATA2
+#define SDMA1_PAGE_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA1_PAGE_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA1_PAGE_MIDCMD_DATA3
+#define SDMA1_PAGE_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA1_PAGE_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA1_PAGE_MIDCMD_DATA4
+#define SDMA1_PAGE_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA1_PAGE_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA1_PAGE_MIDCMD_DATA5
+#define SDMA1_PAGE_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA1_PAGE_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA1_PAGE_MIDCMD_DATA6
+#define SDMA1_PAGE_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA1_PAGE_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA1_PAGE_MIDCMD_DATA7
+#define SDMA1_PAGE_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA1_PAGE_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA1_PAGE_MIDCMD_DATA8
+#define SDMA1_PAGE_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA1_PAGE_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA1_PAGE_MIDCMD_CNTL
+#define SDMA1_PAGE_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA1_PAGE_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA1_PAGE_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA1_PAGE_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA1_PAGE_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA1_PAGE_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA1_PAGE_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA1_PAGE_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA1_RLC0_RB_CNTL
+#define SDMA1_RLC0_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA1_RLC0_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA1_RLC0_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA1_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA1_RLC0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA1_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA1_RLC0_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA1_RLC0_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA1_RLC0_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA1_RLC0_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA1_RLC0_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA1_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA1_RLC0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA1_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA1_RLC0_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA1_RLC0_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA1_RLC0_RB_BASE
+#define SDMA1_RLC0_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA1_RLC0_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_RLC0_RB_BASE_HI
+#define SDMA1_RLC0_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_RLC0_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA1_RLC0_RB_RPTR
+#define SDMA1_RLC0_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA1_RLC0_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_RLC0_RB_RPTR_HI
+#define SDMA1_RLC0_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_RLC0_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_RLC0_RB_WPTR
+#define SDMA1_RLC0_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA1_RLC0_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_RLC0_RB_WPTR_HI
+#define SDMA1_RLC0_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_RLC0_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_RLC0_RB_WPTR_POLL_CNTL
+#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA1_RLC0_RB_RPTR_ADDR_HI
+#define SDMA1_RLC0_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_RLC0_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_RLC0_RB_RPTR_ADDR_LO
+#define SDMA1_RLC0_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA1_RLC0_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_RLC0_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA1_RLC0_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_RLC0_IB_CNTL
+#define SDMA1_RLC0_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA1_RLC0_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA1_RLC0_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA1_RLC0_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA1_RLC0_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA1_RLC0_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA1_RLC0_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA1_RLC0_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA1_RLC0_IB_RPTR
+#define SDMA1_RLC0_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA1_RLC0_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA1_RLC0_IB_OFFSET
+#define SDMA1_RLC0_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_RLC0_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA1_RLC0_IB_BASE_LO
+#define SDMA1_RLC0_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA1_RLC0_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA1_RLC0_IB_BASE_HI
+#define SDMA1_RLC0_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_RLC0_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_RLC0_IB_SIZE
+#define SDMA1_RLC0_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA1_RLC0_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA1_RLC0_SKIP_CNTL
+#define SDMA1_RLC0_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA1_RLC0_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA1_RLC0_CONTEXT_STATUS
+#define SDMA1_RLC0_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA1_RLC0_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA1_RLC0_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA1_RLC0_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA1_RLC0_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA1_RLC0_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA1_RLC0_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA1_RLC0_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA1_RLC0_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA1_RLC0_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA1_RLC0_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA1_RLC0_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA1_RLC0_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA1_RLC0_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA1_RLC0_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA1_RLC0_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA1_RLC0_DOORBELL
+#define SDMA1_RLC0_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA1_RLC0_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA1_RLC0_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA1_RLC0_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA1_RLC0_STATUS
+#define SDMA1_RLC0_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA1_RLC0_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA1_RLC0_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA1_RLC0_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA1_RLC0_DOORBELL_LOG
+#define SDMA1_RLC0_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA1_RLC0_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA1_RLC0_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA1_RLC0_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA1_RLC0_WATERMARK
+#define SDMA1_RLC0_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA1_RLC0_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA1_RLC0_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA1_RLC0_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA1_RLC0_DOORBELL_OFFSET
+#define SDMA1_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_RLC0_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA1_RLC0_CSA_ADDR_LO
+#define SDMA1_RLC0_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_RLC0_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_RLC0_CSA_ADDR_HI
+#define SDMA1_RLC0_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_RLC0_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_RLC0_IB_SUB_REMAIN
+#define SDMA1_RLC0_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA1_RLC0_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA1_RLC0_PREEMPT
+#define SDMA1_RLC0_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA1_RLC0_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA1_RLC0_DUMMY_REG
+#define SDMA1_RLC0_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA1_RLC0_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA1_RLC0_RB_WPTR_POLL_ADDR_HI
+#define SDMA1_RLC0_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_RLC0_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_RLC0_RB_WPTR_POLL_ADDR_LO
+#define SDMA1_RLC0_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_RLC0_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_RLC0_RB_AQL_CNTL
+#define SDMA1_RLC0_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA1_RLC0_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA1_RLC0_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA1_RLC0_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA1_RLC0_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA1_RLC0_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA1_RLC0_MINOR_PTR_UPDATE
+#define SDMA1_RLC0_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA1_RLC0_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA1_RLC0_MIDCMD_DATA0
+#define SDMA1_RLC0_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA1_RLC0_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA1_RLC0_MIDCMD_DATA1
+#define SDMA1_RLC0_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA1_RLC0_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA1_RLC0_MIDCMD_DATA2
+#define SDMA1_RLC0_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA1_RLC0_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA1_RLC0_MIDCMD_DATA3
+#define SDMA1_RLC0_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA1_RLC0_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA1_RLC0_MIDCMD_DATA4
+#define SDMA1_RLC0_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA1_RLC0_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA1_RLC0_MIDCMD_DATA5
+#define SDMA1_RLC0_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA1_RLC0_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA1_RLC0_MIDCMD_DATA6
+#define SDMA1_RLC0_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA1_RLC0_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA1_RLC0_MIDCMD_DATA7
+#define SDMA1_RLC0_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA1_RLC0_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA1_RLC0_MIDCMD_DATA8
+#define SDMA1_RLC0_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA1_RLC0_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA1_RLC0_MIDCMD_CNTL
+#define SDMA1_RLC0_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA1_RLC0_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA1_RLC0_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA1_RLC0_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA1_RLC0_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA1_RLC0_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA1_RLC0_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA1_RLC0_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA1_RLC1_RB_CNTL
+#define SDMA1_RLC1_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA1_RLC1_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA1_RLC1_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA1_RLC1_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA1_RLC1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA1_RLC1_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA1_RLC1_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA1_RLC1_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA1_RLC1_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA1_RLC1_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA1_RLC1_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA1_RLC1_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA1_RLC1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA1_RLC1_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA1_RLC1_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA1_RLC1_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA1_RLC1_RB_BASE
+#define SDMA1_RLC1_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA1_RLC1_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_RLC1_RB_BASE_HI
+#define SDMA1_RLC1_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_RLC1_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA1_RLC1_RB_RPTR
+#define SDMA1_RLC1_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA1_RLC1_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_RLC1_RB_RPTR_HI
+#define SDMA1_RLC1_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_RLC1_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_RLC1_RB_WPTR
+#define SDMA1_RLC1_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA1_RLC1_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_RLC1_RB_WPTR_HI
+#define SDMA1_RLC1_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_RLC1_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_RLC1_RB_WPTR_POLL_CNTL
+#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA1_RLC1_RB_RPTR_ADDR_HI
+#define SDMA1_RLC1_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_RLC1_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_RLC1_RB_RPTR_ADDR_LO
+#define SDMA1_RLC1_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA1_RLC1_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_RLC1_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA1_RLC1_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_RLC1_IB_CNTL
+#define SDMA1_RLC1_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA1_RLC1_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA1_RLC1_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA1_RLC1_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA1_RLC1_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA1_RLC1_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA1_RLC1_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA1_RLC1_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA1_RLC1_IB_RPTR
+#define SDMA1_RLC1_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA1_RLC1_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA1_RLC1_IB_OFFSET
+#define SDMA1_RLC1_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_RLC1_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA1_RLC1_IB_BASE_LO
+#define SDMA1_RLC1_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA1_RLC1_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA1_RLC1_IB_BASE_HI
+#define SDMA1_RLC1_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_RLC1_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_RLC1_IB_SIZE
+#define SDMA1_RLC1_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA1_RLC1_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA1_RLC1_SKIP_CNTL
+#define SDMA1_RLC1_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA1_RLC1_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA1_RLC1_CONTEXT_STATUS
+#define SDMA1_RLC1_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA1_RLC1_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA1_RLC1_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA1_RLC1_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA1_RLC1_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA1_RLC1_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA1_RLC1_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA1_RLC1_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA1_RLC1_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA1_RLC1_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA1_RLC1_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA1_RLC1_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA1_RLC1_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA1_RLC1_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA1_RLC1_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA1_RLC1_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA1_RLC1_DOORBELL
+#define SDMA1_RLC1_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA1_RLC1_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA1_RLC1_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA1_RLC1_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA1_RLC1_STATUS
+#define SDMA1_RLC1_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA1_RLC1_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA1_RLC1_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA1_RLC1_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA1_RLC1_DOORBELL_LOG
+#define SDMA1_RLC1_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA1_RLC1_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA1_RLC1_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA1_RLC1_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA1_RLC1_WATERMARK
+#define SDMA1_RLC1_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA1_RLC1_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA1_RLC1_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA1_RLC1_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA1_RLC1_DOORBELL_OFFSET
+#define SDMA1_RLC1_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_RLC1_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA1_RLC1_CSA_ADDR_LO
+#define SDMA1_RLC1_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_RLC1_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_RLC1_CSA_ADDR_HI
+#define SDMA1_RLC1_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_RLC1_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_RLC1_IB_SUB_REMAIN
+#define SDMA1_RLC1_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA1_RLC1_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA1_RLC1_PREEMPT
+#define SDMA1_RLC1_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA1_RLC1_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA1_RLC1_DUMMY_REG
+#define SDMA1_RLC1_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA1_RLC1_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA1_RLC1_RB_WPTR_POLL_ADDR_HI
+#define SDMA1_RLC1_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_RLC1_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_RLC1_RB_WPTR_POLL_ADDR_LO
+#define SDMA1_RLC1_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_RLC1_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_RLC1_RB_AQL_CNTL
+#define SDMA1_RLC1_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA1_RLC1_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA1_RLC1_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA1_RLC1_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA1_RLC1_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA1_RLC1_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA1_RLC1_MINOR_PTR_UPDATE
+#define SDMA1_RLC1_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA1_RLC1_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA1_RLC1_MIDCMD_DATA0
+#define SDMA1_RLC1_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA1_RLC1_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA1_RLC1_MIDCMD_DATA1
+#define SDMA1_RLC1_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA1_RLC1_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA1_RLC1_MIDCMD_DATA2
+#define SDMA1_RLC1_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA1_RLC1_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA1_RLC1_MIDCMD_DATA3
+#define SDMA1_RLC1_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA1_RLC1_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA1_RLC1_MIDCMD_DATA4
+#define SDMA1_RLC1_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA1_RLC1_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA1_RLC1_MIDCMD_DATA5
+#define SDMA1_RLC1_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA1_RLC1_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA1_RLC1_MIDCMD_DATA6
+#define SDMA1_RLC1_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA1_RLC1_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA1_RLC1_MIDCMD_DATA7
+#define SDMA1_RLC1_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA1_RLC1_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA1_RLC1_MIDCMD_DATA8
+#define SDMA1_RLC1_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA1_RLC1_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA1_RLC1_MIDCMD_CNTL
+#define SDMA1_RLC1_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA1_RLC1_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA1_RLC1_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA1_RLC1_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA1_RLC1_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA1_RLC1_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA1_RLC1_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA1_RLC1_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA1_RLC2_RB_CNTL
+#define SDMA1_RLC2_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA1_RLC2_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA1_RLC2_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA1_RLC2_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA1_RLC2_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA1_RLC2_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA1_RLC2_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA1_RLC2_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA1_RLC2_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA1_RLC2_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA1_RLC2_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA1_RLC2_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA1_RLC2_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA1_RLC2_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA1_RLC2_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA1_RLC2_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA1_RLC2_RB_BASE
+#define SDMA1_RLC2_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA1_RLC2_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_RLC2_RB_BASE_HI
+#define SDMA1_RLC2_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_RLC2_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA1_RLC2_RB_RPTR
+#define SDMA1_RLC2_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA1_RLC2_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_RLC2_RB_RPTR_HI
+#define SDMA1_RLC2_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_RLC2_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_RLC2_RB_WPTR
+#define SDMA1_RLC2_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA1_RLC2_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_RLC2_RB_WPTR_HI
+#define SDMA1_RLC2_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_RLC2_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_RLC2_RB_WPTR_POLL_CNTL
+#define SDMA1_RLC2_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA1_RLC2_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA1_RLC2_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA1_RLC2_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA1_RLC2_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA1_RLC2_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA1_RLC2_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA1_RLC2_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA1_RLC2_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA1_RLC2_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA1_RLC2_RB_RPTR_ADDR_HI
+#define SDMA1_RLC2_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_RLC2_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_RLC2_RB_RPTR_ADDR_LO
+#define SDMA1_RLC2_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA1_RLC2_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_RLC2_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA1_RLC2_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_RLC2_IB_CNTL
+#define SDMA1_RLC2_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA1_RLC2_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA1_RLC2_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA1_RLC2_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA1_RLC2_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA1_RLC2_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA1_RLC2_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA1_RLC2_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA1_RLC2_IB_RPTR
+#define SDMA1_RLC2_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA1_RLC2_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA1_RLC2_IB_OFFSET
+#define SDMA1_RLC2_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_RLC2_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA1_RLC2_IB_BASE_LO
+#define SDMA1_RLC2_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA1_RLC2_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA1_RLC2_IB_BASE_HI
+#define SDMA1_RLC2_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_RLC2_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_RLC2_IB_SIZE
+#define SDMA1_RLC2_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA1_RLC2_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA1_RLC2_SKIP_CNTL
+#define SDMA1_RLC2_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA1_RLC2_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA1_RLC2_CONTEXT_STATUS
+#define SDMA1_RLC2_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA1_RLC2_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA1_RLC2_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA1_RLC2_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA1_RLC2_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA1_RLC2_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA1_RLC2_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA1_RLC2_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA1_RLC2_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA1_RLC2_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA1_RLC2_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA1_RLC2_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA1_RLC2_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA1_RLC2_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA1_RLC2_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA1_RLC2_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA1_RLC2_DOORBELL
+#define SDMA1_RLC2_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA1_RLC2_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA1_RLC2_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA1_RLC2_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA1_RLC2_STATUS
+#define SDMA1_RLC2_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA1_RLC2_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA1_RLC2_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA1_RLC2_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA1_RLC2_DOORBELL_LOG
+#define SDMA1_RLC2_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA1_RLC2_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA1_RLC2_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA1_RLC2_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA1_RLC2_WATERMARK
+#define SDMA1_RLC2_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA1_RLC2_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA1_RLC2_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA1_RLC2_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA1_RLC2_DOORBELL_OFFSET
+#define SDMA1_RLC2_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_RLC2_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA1_RLC2_CSA_ADDR_LO
+#define SDMA1_RLC2_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_RLC2_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_RLC2_CSA_ADDR_HI
+#define SDMA1_RLC2_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_RLC2_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_RLC2_IB_SUB_REMAIN
+#define SDMA1_RLC2_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA1_RLC2_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA1_RLC2_PREEMPT
+#define SDMA1_RLC2_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA1_RLC2_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA1_RLC2_DUMMY_REG
+#define SDMA1_RLC2_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA1_RLC2_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA1_RLC2_RB_WPTR_POLL_ADDR_HI
+#define SDMA1_RLC2_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_RLC2_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_RLC2_RB_WPTR_POLL_ADDR_LO
+#define SDMA1_RLC2_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_RLC2_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_RLC2_RB_AQL_CNTL
+#define SDMA1_RLC2_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA1_RLC2_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA1_RLC2_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA1_RLC2_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA1_RLC2_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA1_RLC2_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA1_RLC2_MINOR_PTR_UPDATE
+#define SDMA1_RLC2_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA1_RLC2_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA1_RLC2_MIDCMD_DATA0
+#define SDMA1_RLC2_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA1_RLC2_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA1_RLC2_MIDCMD_DATA1
+#define SDMA1_RLC2_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA1_RLC2_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA1_RLC2_MIDCMD_DATA2
+#define SDMA1_RLC2_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA1_RLC2_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA1_RLC2_MIDCMD_DATA3
+#define SDMA1_RLC2_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA1_RLC2_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA1_RLC2_MIDCMD_DATA4
+#define SDMA1_RLC2_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA1_RLC2_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA1_RLC2_MIDCMD_DATA5
+#define SDMA1_RLC2_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA1_RLC2_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA1_RLC2_MIDCMD_DATA6
+#define SDMA1_RLC2_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA1_RLC2_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA1_RLC2_MIDCMD_DATA7
+#define SDMA1_RLC2_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA1_RLC2_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA1_RLC2_MIDCMD_DATA8
+#define SDMA1_RLC2_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA1_RLC2_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA1_RLC2_MIDCMD_CNTL
+#define SDMA1_RLC2_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA1_RLC2_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA1_RLC2_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA1_RLC2_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA1_RLC2_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA1_RLC2_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA1_RLC2_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA1_RLC2_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA1_RLC3_RB_CNTL
+#define SDMA1_RLC3_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA1_RLC3_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA1_RLC3_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA1_RLC3_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA1_RLC3_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA1_RLC3_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA1_RLC3_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA1_RLC3_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA1_RLC3_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA1_RLC3_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA1_RLC3_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA1_RLC3_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA1_RLC3_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA1_RLC3_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA1_RLC3_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA1_RLC3_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA1_RLC3_RB_BASE
+#define SDMA1_RLC3_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA1_RLC3_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_RLC3_RB_BASE_HI
+#define SDMA1_RLC3_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_RLC3_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA1_RLC3_RB_RPTR
+#define SDMA1_RLC3_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA1_RLC3_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_RLC3_RB_RPTR_HI
+#define SDMA1_RLC3_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_RLC3_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_RLC3_RB_WPTR
+#define SDMA1_RLC3_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA1_RLC3_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_RLC3_RB_WPTR_HI
+#define SDMA1_RLC3_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_RLC3_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_RLC3_RB_WPTR_POLL_CNTL
+#define SDMA1_RLC3_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA1_RLC3_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA1_RLC3_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA1_RLC3_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA1_RLC3_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA1_RLC3_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA1_RLC3_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA1_RLC3_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA1_RLC3_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA1_RLC3_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA1_RLC3_RB_RPTR_ADDR_HI
+#define SDMA1_RLC3_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_RLC3_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_RLC3_RB_RPTR_ADDR_LO
+#define SDMA1_RLC3_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA1_RLC3_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_RLC3_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA1_RLC3_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_RLC3_IB_CNTL
+#define SDMA1_RLC3_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA1_RLC3_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA1_RLC3_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA1_RLC3_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA1_RLC3_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA1_RLC3_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA1_RLC3_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA1_RLC3_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA1_RLC3_IB_RPTR
+#define SDMA1_RLC3_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA1_RLC3_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA1_RLC3_IB_OFFSET
+#define SDMA1_RLC3_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_RLC3_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA1_RLC3_IB_BASE_LO
+#define SDMA1_RLC3_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA1_RLC3_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA1_RLC3_IB_BASE_HI
+#define SDMA1_RLC3_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_RLC3_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_RLC3_IB_SIZE
+#define SDMA1_RLC3_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA1_RLC3_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA1_RLC3_SKIP_CNTL
+#define SDMA1_RLC3_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA1_RLC3_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA1_RLC3_CONTEXT_STATUS
+#define SDMA1_RLC3_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA1_RLC3_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA1_RLC3_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA1_RLC3_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA1_RLC3_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA1_RLC3_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA1_RLC3_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA1_RLC3_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA1_RLC3_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA1_RLC3_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA1_RLC3_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA1_RLC3_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA1_RLC3_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA1_RLC3_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA1_RLC3_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA1_RLC3_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA1_RLC3_DOORBELL
+#define SDMA1_RLC3_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA1_RLC3_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA1_RLC3_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA1_RLC3_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA1_RLC3_STATUS
+#define SDMA1_RLC3_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA1_RLC3_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA1_RLC3_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA1_RLC3_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA1_RLC3_DOORBELL_LOG
+#define SDMA1_RLC3_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA1_RLC3_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA1_RLC3_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA1_RLC3_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA1_RLC3_WATERMARK
+#define SDMA1_RLC3_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA1_RLC3_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA1_RLC3_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA1_RLC3_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA1_RLC3_DOORBELL_OFFSET
+#define SDMA1_RLC3_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_RLC3_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA1_RLC3_CSA_ADDR_LO
+#define SDMA1_RLC3_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_RLC3_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_RLC3_CSA_ADDR_HI
+#define SDMA1_RLC3_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_RLC3_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_RLC3_IB_SUB_REMAIN
+#define SDMA1_RLC3_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA1_RLC3_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA1_RLC3_PREEMPT
+#define SDMA1_RLC3_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA1_RLC3_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA1_RLC3_DUMMY_REG
+#define SDMA1_RLC3_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA1_RLC3_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA1_RLC3_RB_WPTR_POLL_ADDR_HI
+#define SDMA1_RLC3_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_RLC3_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_RLC3_RB_WPTR_POLL_ADDR_LO
+#define SDMA1_RLC3_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_RLC3_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_RLC3_RB_AQL_CNTL
+#define SDMA1_RLC3_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA1_RLC3_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA1_RLC3_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA1_RLC3_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA1_RLC3_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA1_RLC3_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA1_RLC3_MINOR_PTR_UPDATE
+#define SDMA1_RLC3_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA1_RLC3_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA1_RLC3_MIDCMD_DATA0
+#define SDMA1_RLC3_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA1_RLC3_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA1_RLC3_MIDCMD_DATA1
+#define SDMA1_RLC3_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA1_RLC3_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA1_RLC3_MIDCMD_DATA2
+#define SDMA1_RLC3_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA1_RLC3_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA1_RLC3_MIDCMD_DATA3
+#define SDMA1_RLC3_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA1_RLC3_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA1_RLC3_MIDCMD_DATA4
+#define SDMA1_RLC3_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA1_RLC3_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA1_RLC3_MIDCMD_DATA5
+#define SDMA1_RLC3_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA1_RLC3_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA1_RLC3_MIDCMD_DATA6
+#define SDMA1_RLC3_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA1_RLC3_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA1_RLC3_MIDCMD_DATA7
+#define SDMA1_RLC3_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA1_RLC3_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA1_RLC3_MIDCMD_DATA8
+#define SDMA1_RLC3_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA1_RLC3_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA1_RLC3_MIDCMD_CNTL
+#define SDMA1_RLC3_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA1_RLC3_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA1_RLC3_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA1_RLC3_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA1_RLC3_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA1_RLC3_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA1_RLC3_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA1_RLC3_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA1_RLC4_RB_CNTL
+#define SDMA1_RLC4_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA1_RLC4_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA1_RLC4_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA1_RLC4_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA1_RLC4_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA1_RLC4_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA1_RLC4_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA1_RLC4_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA1_RLC4_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA1_RLC4_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA1_RLC4_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA1_RLC4_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA1_RLC4_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA1_RLC4_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA1_RLC4_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA1_RLC4_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA1_RLC4_RB_BASE
+#define SDMA1_RLC4_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA1_RLC4_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_RLC4_RB_BASE_HI
+#define SDMA1_RLC4_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_RLC4_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA1_RLC4_RB_RPTR
+#define SDMA1_RLC4_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA1_RLC4_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_RLC4_RB_RPTR_HI
+#define SDMA1_RLC4_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_RLC4_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_RLC4_RB_WPTR
+#define SDMA1_RLC4_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA1_RLC4_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_RLC4_RB_WPTR_HI
+#define SDMA1_RLC4_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_RLC4_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_RLC4_RB_WPTR_POLL_CNTL
+#define SDMA1_RLC4_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA1_RLC4_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA1_RLC4_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA1_RLC4_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA1_RLC4_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA1_RLC4_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA1_RLC4_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA1_RLC4_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA1_RLC4_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA1_RLC4_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA1_RLC4_RB_RPTR_ADDR_HI
+#define SDMA1_RLC4_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_RLC4_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_RLC4_RB_RPTR_ADDR_LO
+#define SDMA1_RLC4_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA1_RLC4_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_RLC4_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA1_RLC4_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_RLC4_IB_CNTL
+#define SDMA1_RLC4_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA1_RLC4_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA1_RLC4_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA1_RLC4_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA1_RLC4_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA1_RLC4_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA1_RLC4_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA1_RLC4_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA1_RLC4_IB_RPTR
+#define SDMA1_RLC4_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA1_RLC4_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA1_RLC4_IB_OFFSET
+#define SDMA1_RLC4_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_RLC4_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA1_RLC4_IB_BASE_LO
+#define SDMA1_RLC4_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA1_RLC4_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA1_RLC4_IB_BASE_HI
+#define SDMA1_RLC4_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_RLC4_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_RLC4_IB_SIZE
+#define SDMA1_RLC4_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA1_RLC4_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA1_RLC4_SKIP_CNTL
+#define SDMA1_RLC4_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA1_RLC4_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA1_RLC4_CONTEXT_STATUS
+#define SDMA1_RLC4_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA1_RLC4_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA1_RLC4_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA1_RLC4_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA1_RLC4_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA1_RLC4_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA1_RLC4_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA1_RLC4_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA1_RLC4_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA1_RLC4_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA1_RLC4_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA1_RLC4_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA1_RLC4_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA1_RLC4_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA1_RLC4_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA1_RLC4_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA1_RLC4_DOORBELL
+#define SDMA1_RLC4_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA1_RLC4_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA1_RLC4_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA1_RLC4_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA1_RLC4_STATUS
+#define SDMA1_RLC4_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA1_RLC4_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA1_RLC4_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA1_RLC4_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA1_RLC4_DOORBELL_LOG
+#define SDMA1_RLC4_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA1_RLC4_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA1_RLC4_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA1_RLC4_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA1_RLC4_WATERMARK
+#define SDMA1_RLC4_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA1_RLC4_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA1_RLC4_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA1_RLC4_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA1_RLC4_DOORBELL_OFFSET
+#define SDMA1_RLC4_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_RLC4_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA1_RLC4_CSA_ADDR_LO
+#define SDMA1_RLC4_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_RLC4_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_RLC4_CSA_ADDR_HI
+#define SDMA1_RLC4_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_RLC4_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_RLC4_IB_SUB_REMAIN
+#define SDMA1_RLC4_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA1_RLC4_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA1_RLC4_PREEMPT
+#define SDMA1_RLC4_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA1_RLC4_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA1_RLC4_DUMMY_REG
+#define SDMA1_RLC4_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA1_RLC4_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA1_RLC4_RB_WPTR_POLL_ADDR_HI
+#define SDMA1_RLC4_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_RLC4_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_RLC4_RB_WPTR_POLL_ADDR_LO
+#define SDMA1_RLC4_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_RLC4_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_RLC4_RB_AQL_CNTL
+#define SDMA1_RLC4_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA1_RLC4_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA1_RLC4_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA1_RLC4_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA1_RLC4_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA1_RLC4_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA1_RLC4_MINOR_PTR_UPDATE
+#define SDMA1_RLC4_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA1_RLC4_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA1_RLC4_MIDCMD_DATA0
+#define SDMA1_RLC4_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA1_RLC4_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA1_RLC4_MIDCMD_DATA1
+#define SDMA1_RLC4_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA1_RLC4_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA1_RLC4_MIDCMD_DATA2
+#define SDMA1_RLC4_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA1_RLC4_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA1_RLC4_MIDCMD_DATA3
+#define SDMA1_RLC4_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA1_RLC4_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA1_RLC4_MIDCMD_DATA4
+#define SDMA1_RLC4_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA1_RLC4_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA1_RLC4_MIDCMD_DATA5
+#define SDMA1_RLC4_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA1_RLC4_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA1_RLC4_MIDCMD_DATA6
+#define SDMA1_RLC4_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA1_RLC4_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA1_RLC4_MIDCMD_DATA7
+#define SDMA1_RLC4_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA1_RLC4_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA1_RLC4_MIDCMD_DATA8
+#define SDMA1_RLC4_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA1_RLC4_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA1_RLC4_MIDCMD_CNTL
+#define SDMA1_RLC4_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA1_RLC4_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA1_RLC4_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA1_RLC4_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA1_RLC4_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA1_RLC4_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA1_RLC4_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA1_RLC4_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA1_RLC5_RB_CNTL
+#define SDMA1_RLC5_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA1_RLC5_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA1_RLC5_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA1_RLC5_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA1_RLC5_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA1_RLC5_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA1_RLC5_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA1_RLC5_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA1_RLC5_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA1_RLC5_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA1_RLC5_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA1_RLC5_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA1_RLC5_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA1_RLC5_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA1_RLC5_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA1_RLC5_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA1_RLC5_RB_BASE
+#define SDMA1_RLC5_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA1_RLC5_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_RLC5_RB_BASE_HI
+#define SDMA1_RLC5_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_RLC5_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA1_RLC5_RB_RPTR
+#define SDMA1_RLC5_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA1_RLC5_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_RLC5_RB_RPTR_HI
+#define SDMA1_RLC5_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_RLC5_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_RLC5_RB_WPTR
+#define SDMA1_RLC5_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA1_RLC5_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_RLC5_RB_WPTR_HI
+#define SDMA1_RLC5_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_RLC5_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_RLC5_RB_WPTR_POLL_CNTL
+#define SDMA1_RLC5_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA1_RLC5_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA1_RLC5_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA1_RLC5_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA1_RLC5_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA1_RLC5_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA1_RLC5_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA1_RLC5_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA1_RLC5_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA1_RLC5_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA1_RLC5_RB_RPTR_ADDR_HI
+#define SDMA1_RLC5_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_RLC5_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_RLC5_RB_RPTR_ADDR_LO
+#define SDMA1_RLC5_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA1_RLC5_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_RLC5_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA1_RLC5_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_RLC5_IB_CNTL
+#define SDMA1_RLC5_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA1_RLC5_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA1_RLC5_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA1_RLC5_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA1_RLC5_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA1_RLC5_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA1_RLC5_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA1_RLC5_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA1_RLC5_IB_RPTR
+#define SDMA1_RLC5_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA1_RLC5_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA1_RLC5_IB_OFFSET
+#define SDMA1_RLC5_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_RLC5_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA1_RLC5_IB_BASE_LO
+#define SDMA1_RLC5_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA1_RLC5_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA1_RLC5_IB_BASE_HI
+#define SDMA1_RLC5_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_RLC5_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_RLC5_IB_SIZE
+#define SDMA1_RLC5_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA1_RLC5_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA1_RLC5_SKIP_CNTL
+#define SDMA1_RLC5_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA1_RLC5_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA1_RLC5_CONTEXT_STATUS
+#define SDMA1_RLC5_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA1_RLC5_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA1_RLC5_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA1_RLC5_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA1_RLC5_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA1_RLC5_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA1_RLC5_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA1_RLC5_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA1_RLC5_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA1_RLC5_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA1_RLC5_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA1_RLC5_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA1_RLC5_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA1_RLC5_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA1_RLC5_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA1_RLC5_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA1_RLC5_DOORBELL
+#define SDMA1_RLC5_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA1_RLC5_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA1_RLC5_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA1_RLC5_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA1_RLC5_STATUS
+#define SDMA1_RLC5_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA1_RLC5_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA1_RLC5_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA1_RLC5_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA1_RLC5_DOORBELL_LOG
+#define SDMA1_RLC5_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA1_RLC5_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA1_RLC5_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA1_RLC5_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA1_RLC5_WATERMARK
+#define SDMA1_RLC5_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA1_RLC5_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA1_RLC5_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA1_RLC5_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA1_RLC5_DOORBELL_OFFSET
+#define SDMA1_RLC5_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_RLC5_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA1_RLC5_CSA_ADDR_LO
+#define SDMA1_RLC5_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_RLC5_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_RLC5_CSA_ADDR_HI
+#define SDMA1_RLC5_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_RLC5_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_RLC5_IB_SUB_REMAIN
+#define SDMA1_RLC5_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA1_RLC5_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA1_RLC5_PREEMPT
+#define SDMA1_RLC5_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA1_RLC5_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA1_RLC5_DUMMY_REG
+#define SDMA1_RLC5_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA1_RLC5_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA1_RLC5_RB_WPTR_POLL_ADDR_HI
+#define SDMA1_RLC5_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_RLC5_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_RLC5_RB_WPTR_POLL_ADDR_LO
+#define SDMA1_RLC5_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_RLC5_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_RLC5_RB_AQL_CNTL
+#define SDMA1_RLC5_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA1_RLC5_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA1_RLC5_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA1_RLC5_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA1_RLC5_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA1_RLC5_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA1_RLC5_MINOR_PTR_UPDATE
+#define SDMA1_RLC5_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA1_RLC5_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA1_RLC5_MIDCMD_DATA0
+#define SDMA1_RLC5_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA1_RLC5_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA1_RLC5_MIDCMD_DATA1
+#define SDMA1_RLC5_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA1_RLC5_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA1_RLC5_MIDCMD_DATA2
+#define SDMA1_RLC5_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA1_RLC5_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA1_RLC5_MIDCMD_DATA3
+#define SDMA1_RLC5_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA1_RLC5_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA1_RLC5_MIDCMD_DATA4
+#define SDMA1_RLC5_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA1_RLC5_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA1_RLC5_MIDCMD_DATA5
+#define SDMA1_RLC5_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA1_RLC5_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA1_RLC5_MIDCMD_DATA6
+#define SDMA1_RLC5_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA1_RLC5_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA1_RLC5_MIDCMD_DATA7
+#define SDMA1_RLC5_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA1_RLC5_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA1_RLC5_MIDCMD_DATA8
+#define SDMA1_RLC5_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA1_RLC5_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA1_RLC5_MIDCMD_CNTL
+#define SDMA1_RLC5_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA1_RLC5_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA1_RLC5_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA1_RLC5_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA1_RLC5_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA1_RLC5_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA1_RLC5_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA1_RLC5_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA1_RLC6_RB_CNTL
+#define SDMA1_RLC6_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA1_RLC6_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA1_RLC6_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA1_RLC6_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA1_RLC6_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA1_RLC6_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA1_RLC6_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA1_RLC6_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA1_RLC6_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA1_RLC6_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA1_RLC6_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA1_RLC6_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA1_RLC6_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA1_RLC6_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA1_RLC6_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA1_RLC6_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA1_RLC6_RB_BASE
+#define SDMA1_RLC6_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA1_RLC6_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_RLC6_RB_BASE_HI
+#define SDMA1_RLC6_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_RLC6_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA1_RLC6_RB_RPTR
+#define SDMA1_RLC6_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA1_RLC6_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_RLC6_RB_RPTR_HI
+#define SDMA1_RLC6_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_RLC6_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_RLC6_RB_WPTR
+#define SDMA1_RLC6_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA1_RLC6_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_RLC6_RB_WPTR_HI
+#define SDMA1_RLC6_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_RLC6_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_RLC6_RB_WPTR_POLL_CNTL
+#define SDMA1_RLC6_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA1_RLC6_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA1_RLC6_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA1_RLC6_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA1_RLC6_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA1_RLC6_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA1_RLC6_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA1_RLC6_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA1_RLC6_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA1_RLC6_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA1_RLC6_RB_RPTR_ADDR_HI
+#define SDMA1_RLC6_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_RLC6_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_RLC6_RB_RPTR_ADDR_LO
+#define SDMA1_RLC6_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA1_RLC6_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_RLC6_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA1_RLC6_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_RLC6_IB_CNTL
+#define SDMA1_RLC6_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA1_RLC6_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA1_RLC6_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA1_RLC6_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA1_RLC6_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA1_RLC6_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA1_RLC6_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA1_RLC6_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA1_RLC6_IB_RPTR
+#define SDMA1_RLC6_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA1_RLC6_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA1_RLC6_IB_OFFSET
+#define SDMA1_RLC6_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_RLC6_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA1_RLC6_IB_BASE_LO
+#define SDMA1_RLC6_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA1_RLC6_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA1_RLC6_IB_BASE_HI
+#define SDMA1_RLC6_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_RLC6_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_RLC6_IB_SIZE
+#define SDMA1_RLC6_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA1_RLC6_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA1_RLC6_SKIP_CNTL
+#define SDMA1_RLC6_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA1_RLC6_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA1_RLC6_CONTEXT_STATUS
+#define SDMA1_RLC6_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA1_RLC6_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA1_RLC6_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA1_RLC6_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA1_RLC6_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA1_RLC6_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA1_RLC6_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA1_RLC6_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA1_RLC6_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA1_RLC6_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA1_RLC6_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA1_RLC6_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA1_RLC6_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA1_RLC6_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA1_RLC6_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA1_RLC6_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA1_RLC6_DOORBELL
+#define SDMA1_RLC6_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA1_RLC6_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA1_RLC6_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA1_RLC6_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA1_RLC6_STATUS
+#define SDMA1_RLC6_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA1_RLC6_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA1_RLC6_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA1_RLC6_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA1_RLC6_DOORBELL_LOG
+#define SDMA1_RLC6_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA1_RLC6_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA1_RLC6_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA1_RLC6_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA1_RLC6_WATERMARK
+#define SDMA1_RLC6_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA1_RLC6_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA1_RLC6_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA1_RLC6_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA1_RLC6_DOORBELL_OFFSET
+#define SDMA1_RLC6_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_RLC6_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA1_RLC6_CSA_ADDR_LO
+#define SDMA1_RLC6_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_RLC6_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_RLC6_CSA_ADDR_HI
+#define SDMA1_RLC6_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_RLC6_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_RLC6_IB_SUB_REMAIN
+#define SDMA1_RLC6_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA1_RLC6_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA1_RLC6_PREEMPT
+#define SDMA1_RLC6_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA1_RLC6_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA1_RLC6_DUMMY_REG
+#define SDMA1_RLC6_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA1_RLC6_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA1_RLC6_RB_WPTR_POLL_ADDR_HI
+#define SDMA1_RLC6_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_RLC6_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_RLC6_RB_WPTR_POLL_ADDR_LO
+#define SDMA1_RLC6_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_RLC6_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_RLC6_RB_AQL_CNTL
+#define SDMA1_RLC6_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA1_RLC6_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA1_RLC6_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA1_RLC6_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA1_RLC6_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA1_RLC6_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA1_RLC6_MINOR_PTR_UPDATE
+#define SDMA1_RLC6_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA1_RLC6_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA1_RLC6_MIDCMD_DATA0
+#define SDMA1_RLC6_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA1_RLC6_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA1_RLC6_MIDCMD_DATA1
+#define SDMA1_RLC6_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA1_RLC6_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA1_RLC6_MIDCMD_DATA2
+#define SDMA1_RLC6_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA1_RLC6_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA1_RLC6_MIDCMD_DATA3
+#define SDMA1_RLC6_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA1_RLC6_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA1_RLC6_MIDCMD_DATA4
+#define SDMA1_RLC6_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA1_RLC6_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA1_RLC6_MIDCMD_DATA5
+#define SDMA1_RLC6_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA1_RLC6_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA1_RLC6_MIDCMD_DATA6
+#define SDMA1_RLC6_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA1_RLC6_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA1_RLC6_MIDCMD_DATA7
+#define SDMA1_RLC6_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA1_RLC6_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA1_RLC6_MIDCMD_DATA8
+#define SDMA1_RLC6_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA1_RLC6_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA1_RLC6_MIDCMD_CNTL
+#define SDMA1_RLC6_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA1_RLC6_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA1_RLC6_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA1_RLC6_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA1_RLC6_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA1_RLC6_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA1_RLC6_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA1_RLC6_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA1_RLC7_RB_CNTL
+#define SDMA1_RLC7_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA1_RLC7_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA1_RLC7_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA1_RLC7_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA1_RLC7_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA1_RLC7_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA1_RLC7_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA1_RLC7_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA1_RLC7_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA1_RLC7_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA1_RLC7_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA1_RLC7_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA1_RLC7_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA1_RLC7_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA1_RLC7_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA1_RLC7_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA1_RLC7_RB_BASE
+#define SDMA1_RLC7_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA1_RLC7_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_RLC7_RB_BASE_HI
+#define SDMA1_RLC7_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_RLC7_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA1_RLC7_RB_RPTR
+#define SDMA1_RLC7_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA1_RLC7_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_RLC7_RB_RPTR_HI
+#define SDMA1_RLC7_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_RLC7_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_RLC7_RB_WPTR
+#define SDMA1_RLC7_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA1_RLC7_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_RLC7_RB_WPTR_HI
+#define SDMA1_RLC7_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_RLC7_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_RLC7_RB_WPTR_POLL_CNTL
+#define SDMA1_RLC7_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA1_RLC7_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA1_RLC7_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA1_RLC7_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA1_RLC7_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA1_RLC7_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA1_RLC7_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA1_RLC7_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA1_RLC7_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA1_RLC7_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA1_RLC7_RB_RPTR_ADDR_HI
+#define SDMA1_RLC7_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_RLC7_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_RLC7_RB_RPTR_ADDR_LO
+#define SDMA1_RLC7_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA1_RLC7_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_RLC7_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA1_RLC7_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_RLC7_IB_CNTL
+#define SDMA1_RLC7_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA1_RLC7_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA1_RLC7_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA1_RLC7_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA1_RLC7_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA1_RLC7_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA1_RLC7_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA1_RLC7_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA1_RLC7_IB_RPTR
+#define SDMA1_RLC7_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA1_RLC7_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA1_RLC7_IB_OFFSET
+#define SDMA1_RLC7_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_RLC7_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA1_RLC7_IB_BASE_LO
+#define SDMA1_RLC7_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA1_RLC7_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA1_RLC7_IB_BASE_HI
+#define SDMA1_RLC7_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_RLC7_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_RLC7_IB_SIZE
+#define SDMA1_RLC7_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA1_RLC7_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA1_RLC7_SKIP_CNTL
+#define SDMA1_RLC7_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA1_RLC7_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA1_RLC7_CONTEXT_STATUS
+#define SDMA1_RLC7_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA1_RLC7_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA1_RLC7_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA1_RLC7_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA1_RLC7_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA1_RLC7_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA1_RLC7_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA1_RLC7_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA1_RLC7_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA1_RLC7_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA1_RLC7_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA1_RLC7_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA1_RLC7_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA1_RLC7_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA1_RLC7_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA1_RLC7_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA1_RLC7_DOORBELL
+#define SDMA1_RLC7_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA1_RLC7_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA1_RLC7_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA1_RLC7_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA1_RLC7_STATUS
+#define SDMA1_RLC7_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA1_RLC7_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA1_RLC7_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA1_RLC7_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA1_RLC7_DOORBELL_LOG
+#define SDMA1_RLC7_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA1_RLC7_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA1_RLC7_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA1_RLC7_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA1_RLC7_WATERMARK
+#define SDMA1_RLC7_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA1_RLC7_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA1_RLC7_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA1_RLC7_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA1_RLC7_DOORBELL_OFFSET
+#define SDMA1_RLC7_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_RLC7_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA1_RLC7_CSA_ADDR_LO
+#define SDMA1_RLC7_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_RLC7_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_RLC7_CSA_ADDR_HI
+#define SDMA1_RLC7_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_RLC7_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_RLC7_IB_SUB_REMAIN
+#define SDMA1_RLC7_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA1_RLC7_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA1_RLC7_PREEMPT
+#define SDMA1_RLC7_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA1_RLC7_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA1_RLC7_DUMMY_REG
+#define SDMA1_RLC7_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA1_RLC7_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA1_RLC7_RB_WPTR_POLL_ADDR_HI
+#define SDMA1_RLC7_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_RLC7_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_RLC7_RB_WPTR_POLL_ADDR_LO
+#define SDMA1_RLC7_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_RLC7_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_RLC7_RB_AQL_CNTL
+#define SDMA1_RLC7_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA1_RLC7_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA1_RLC7_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA1_RLC7_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA1_RLC7_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA1_RLC7_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA1_RLC7_MINOR_PTR_UPDATE
+#define SDMA1_RLC7_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA1_RLC7_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA1_RLC7_MIDCMD_DATA0
+#define SDMA1_RLC7_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA1_RLC7_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA1_RLC7_MIDCMD_DATA1
+#define SDMA1_RLC7_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA1_RLC7_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA1_RLC7_MIDCMD_DATA2
+#define SDMA1_RLC7_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA1_RLC7_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA1_RLC7_MIDCMD_DATA3
+#define SDMA1_RLC7_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA1_RLC7_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA1_RLC7_MIDCMD_DATA4
+#define SDMA1_RLC7_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA1_RLC7_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA1_RLC7_MIDCMD_DATA5
+#define SDMA1_RLC7_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA1_RLC7_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA1_RLC7_MIDCMD_DATA6
+#define SDMA1_RLC7_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA1_RLC7_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA1_RLC7_MIDCMD_DATA7
+#define SDMA1_RLC7_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA1_RLC7_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA1_RLC7_MIDCMD_DATA8
+#define SDMA1_RLC7_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA1_RLC7_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA1_RLC7_MIDCMD_CNTL
+#define SDMA1_RLC7_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA1_RLC7_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA1_RLC7_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA1_RLC7_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA1_RLC7_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA1_RLC7_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA1_RLC7_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA1_RLC7_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/sdma2/sdma2_4_2_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/sdma2/sdma2_4_2_2_offset.h
new file mode 100644
index 000000000000..6aa0813915c2
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/sdma2/sdma2_4_2_2_offset.h
@@ -0,0 +1,1043 @@
+/*
+ * Copyright (C) 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _sdma2_4_2_2_OFFSET_HEADER
+#define _sdma2_4_2_2_OFFSET_HEADER
+
+
+
+// addressBlock: sdma2_sdma2dec
+// base address: 0x78000
+#define mmSDMA2_UCODE_ADDR 0x0000
+#define mmSDMA2_UCODE_ADDR_BASE_IDX 1
+#define mmSDMA2_UCODE_DATA 0x0001
+#define mmSDMA2_UCODE_DATA_BASE_IDX 1
+#define mmSDMA2_VM_CNTL 0x0004
+#define mmSDMA2_VM_CNTL_BASE_IDX 1
+#define mmSDMA2_VM_CTX_LO 0x0005
+#define mmSDMA2_VM_CTX_LO_BASE_IDX 1
+#define mmSDMA2_VM_CTX_HI 0x0006
+#define mmSDMA2_VM_CTX_HI_BASE_IDX 1
+#define mmSDMA2_ACTIVE_FCN_ID 0x0007
+#define mmSDMA2_ACTIVE_FCN_ID_BASE_IDX 1
+#define mmSDMA2_VM_CTX_CNTL 0x0008
+#define mmSDMA2_VM_CTX_CNTL_BASE_IDX 1
+#define mmSDMA2_VIRT_RESET_REQ 0x0009
+#define mmSDMA2_VIRT_RESET_REQ_BASE_IDX 1
+#define mmSDMA2_VF_ENABLE 0x000a
+#define mmSDMA2_VF_ENABLE_BASE_IDX 1
+#define mmSDMA2_CONTEXT_REG_TYPE0 0x000b
+#define mmSDMA2_CONTEXT_REG_TYPE0_BASE_IDX 1
+#define mmSDMA2_CONTEXT_REG_TYPE1 0x000c
+#define mmSDMA2_CONTEXT_REG_TYPE1_BASE_IDX 1
+#define mmSDMA2_CONTEXT_REG_TYPE2 0x000d
+#define mmSDMA2_CONTEXT_REG_TYPE2_BASE_IDX 1
+#define mmSDMA2_CONTEXT_REG_TYPE3 0x000e
+#define mmSDMA2_CONTEXT_REG_TYPE3_BASE_IDX 1
+#define mmSDMA2_PUB_REG_TYPE0 0x000f
+#define mmSDMA2_PUB_REG_TYPE0_BASE_IDX 1
+#define mmSDMA2_PUB_REG_TYPE1 0x0010
+#define mmSDMA2_PUB_REG_TYPE1_BASE_IDX 1
+#define mmSDMA2_PUB_REG_TYPE2 0x0011
+#define mmSDMA2_PUB_REG_TYPE2_BASE_IDX 1
+#define mmSDMA2_PUB_REG_TYPE3 0x0012
+#define mmSDMA2_PUB_REG_TYPE3_BASE_IDX 1
+#define mmSDMA2_MMHUB_CNTL 0x0013
+#define mmSDMA2_MMHUB_CNTL_BASE_IDX 1
+#define mmSDMA2_CONTEXT_GROUP_BOUNDARY 0x0019
+#define mmSDMA2_CONTEXT_GROUP_BOUNDARY_BASE_IDX 1
+#define mmSDMA2_POWER_CNTL 0x001a
+#define mmSDMA2_POWER_CNTL_BASE_IDX 1
+#define mmSDMA2_CLK_CTRL 0x001b
+#define mmSDMA2_CLK_CTRL_BASE_IDX 1
+#define mmSDMA2_CNTL 0x001c
+#define mmSDMA2_CNTL_BASE_IDX 1
+#define mmSDMA2_CHICKEN_BITS 0x001d
+#define mmSDMA2_CHICKEN_BITS_BASE_IDX 1
+#define mmSDMA2_GB_ADDR_CONFIG 0x001e
+#define mmSDMA2_GB_ADDR_CONFIG_BASE_IDX 1
+#define mmSDMA2_GB_ADDR_CONFIG_READ 0x001f
+#define mmSDMA2_GB_ADDR_CONFIG_READ_BASE_IDX 1
+#define mmSDMA2_RB_RPTR_FETCH_HI 0x0020
+#define mmSDMA2_RB_RPTR_FETCH_HI_BASE_IDX 1
+#define mmSDMA2_SEM_WAIT_FAIL_TIMER_CNTL 0x0021
+#define mmSDMA2_SEM_WAIT_FAIL_TIMER_CNTL_BASE_IDX 1
+#define mmSDMA2_RB_RPTR_FETCH 0x0022
+#define mmSDMA2_RB_RPTR_FETCH_BASE_IDX 1
+#define mmSDMA2_IB_OFFSET_FETCH 0x0023
+#define mmSDMA2_IB_OFFSET_FETCH_BASE_IDX 1
+#define mmSDMA2_PROGRAM 0x0024
+#define mmSDMA2_PROGRAM_BASE_IDX 1
+#define mmSDMA2_STATUS_REG 0x0025
+#define mmSDMA2_STATUS_REG_BASE_IDX 1
+#define mmSDMA2_STATUS1_REG 0x0026
+#define mmSDMA2_STATUS1_REG_BASE_IDX 1
+#define mmSDMA2_RD_BURST_CNTL 0x0027
+#define mmSDMA2_RD_BURST_CNTL_BASE_IDX 1
+#define mmSDMA2_HBM_PAGE_CONFIG 0x0028
+#define mmSDMA2_HBM_PAGE_CONFIG_BASE_IDX 1
+#define mmSDMA2_UCODE_CHECKSUM 0x0029
+#define mmSDMA2_UCODE_CHECKSUM_BASE_IDX 1
+#define mmSDMA2_F32_CNTL 0x002a
+#define mmSDMA2_F32_CNTL_BASE_IDX 1
+#define mmSDMA2_FREEZE 0x002b
+#define mmSDMA2_FREEZE_BASE_IDX 1
+#define mmSDMA2_PHASE0_QUANTUM 0x002c
+#define mmSDMA2_PHASE0_QUANTUM_BASE_IDX 1
+#define mmSDMA2_PHASE1_QUANTUM 0x002d
+#define mmSDMA2_PHASE1_QUANTUM_BASE_IDX 1
+#define mmSDMA2_EDC_CONFIG 0x0032
+#define mmSDMA2_EDC_CONFIG_BASE_IDX 1
+#define mmSDMA2_BA_THRESHOLD 0x0033
+#define mmSDMA2_BA_THRESHOLD_BASE_IDX 1
+#define mmSDMA2_ID 0x0034
+#define mmSDMA2_ID_BASE_IDX 1
+#define mmSDMA2_VERSION 0x0035
+#define mmSDMA2_VERSION_BASE_IDX 1
+#define mmSDMA2_EDC_COUNTER 0x0036
+#define mmSDMA2_EDC_COUNTER_BASE_IDX 1
+#define mmSDMA2_EDC_COUNTER_CLEAR 0x0037
+#define mmSDMA2_EDC_COUNTER_CLEAR_BASE_IDX 1
+#define mmSDMA2_STATUS2_REG 0x0038
+#define mmSDMA2_STATUS2_REG_BASE_IDX 1
+#define mmSDMA2_ATOMIC_CNTL 0x0039
+#define mmSDMA2_ATOMIC_CNTL_BASE_IDX 1
+#define mmSDMA2_ATOMIC_PREOP_LO 0x003a
+#define mmSDMA2_ATOMIC_PREOP_LO_BASE_IDX 1
+#define mmSDMA2_ATOMIC_PREOP_HI 0x003b
+#define mmSDMA2_ATOMIC_PREOP_HI_BASE_IDX 1
+#define mmSDMA2_UTCL1_CNTL 0x003c
+#define mmSDMA2_UTCL1_CNTL_BASE_IDX 1
+#define mmSDMA2_UTCL1_WATERMK 0x003d
+#define mmSDMA2_UTCL1_WATERMK_BASE_IDX 1
+#define mmSDMA2_UTCL1_RD_STATUS 0x003e
+#define mmSDMA2_UTCL1_RD_STATUS_BASE_IDX 1
+#define mmSDMA2_UTCL1_WR_STATUS 0x003f
+#define mmSDMA2_UTCL1_WR_STATUS_BASE_IDX 1
+#define mmSDMA2_UTCL1_INV0 0x0040
+#define mmSDMA2_UTCL1_INV0_BASE_IDX 1
+#define mmSDMA2_UTCL1_INV1 0x0041
+#define mmSDMA2_UTCL1_INV1_BASE_IDX 1
+#define mmSDMA2_UTCL1_INV2 0x0042
+#define mmSDMA2_UTCL1_INV2_BASE_IDX 1
+#define mmSDMA2_UTCL1_RD_XNACK0 0x0043
+#define mmSDMA2_UTCL1_RD_XNACK0_BASE_IDX 1
+#define mmSDMA2_UTCL1_RD_XNACK1 0x0044
+#define mmSDMA2_UTCL1_RD_XNACK1_BASE_IDX 1
+#define mmSDMA2_UTCL1_WR_XNACK0 0x0045
+#define mmSDMA2_UTCL1_WR_XNACK0_BASE_IDX 1
+#define mmSDMA2_UTCL1_WR_XNACK1 0x0046
+#define mmSDMA2_UTCL1_WR_XNACK1_BASE_IDX 1
+#define mmSDMA2_UTCL1_TIMEOUT 0x0047
+#define mmSDMA2_UTCL1_TIMEOUT_BASE_IDX 1
+#define mmSDMA2_UTCL1_PAGE 0x0048
+#define mmSDMA2_UTCL1_PAGE_BASE_IDX 1
+#define mmSDMA2_POWER_CNTL_IDLE 0x0049
+#define mmSDMA2_POWER_CNTL_IDLE_BASE_IDX 1
+#define mmSDMA2_RELAX_ORDERING_LUT 0x004a
+#define mmSDMA2_RELAX_ORDERING_LUT_BASE_IDX 1
+#define mmSDMA2_CHICKEN_BITS_2 0x004b
+#define mmSDMA2_CHICKEN_BITS_2_BASE_IDX 1
+#define mmSDMA2_STATUS3_REG 0x004c
+#define mmSDMA2_STATUS3_REG_BASE_IDX 1
+#define mmSDMA2_PHYSICAL_ADDR_LO 0x004d
+#define mmSDMA2_PHYSICAL_ADDR_LO_BASE_IDX 1
+#define mmSDMA2_PHYSICAL_ADDR_HI 0x004e
+#define mmSDMA2_PHYSICAL_ADDR_HI_BASE_IDX 1
+#define mmSDMA2_PHASE2_QUANTUM 0x004f
+#define mmSDMA2_PHASE2_QUANTUM_BASE_IDX 1
+#define mmSDMA2_ERROR_LOG 0x0050
+#define mmSDMA2_ERROR_LOG_BASE_IDX 1
+#define mmSDMA2_PUB_DUMMY_REG0 0x0051
+#define mmSDMA2_PUB_DUMMY_REG0_BASE_IDX 1
+#define mmSDMA2_PUB_DUMMY_REG1 0x0052
+#define mmSDMA2_PUB_DUMMY_REG1_BASE_IDX 1
+#define mmSDMA2_PUB_DUMMY_REG2 0x0053
+#define mmSDMA2_PUB_DUMMY_REG2_BASE_IDX 1
+#define mmSDMA2_PUB_DUMMY_REG3 0x0054
+#define mmSDMA2_PUB_DUMMY_REG3_BASE_IDX 1
+#define mmSDMA2_F32_COUNTER 0x0055
+#define mmSDMA2_F32_COUNTER_BASE_IDX 1
+#define mmSDMA2_UNBREAKABLE 0x0056
+#define mmSDMA2_UNBREAKABLE_BASE_IDX 1
+#define mmSDMA2_PERFMON_CNTL 0x0057
+#define mmSDMA2_PERFMON_CNTL_BASE_IDX 1
+#define mmSDMA2_PERFCOUNTER0_RESULT 0x0058
+#define mmSDMA2_PERFCOUNTER0_RESULT_BASE_IDX 1
+#define mmSDMA2_PERFCOUNTER1_RESULT 0x0059
+#define mmSDMA2_PERFCOUNTER1_RESULT_BASE_IDX 1
+#define mmSDMA2_PERFCOUNTER_TAG_DELAY_RANGE 0x005a
+#define mmSDMA2_PERFCOUNTER_TAG_DELAY_RANGE_BASE_IDX 1
+#define mmSDMA2_CRD_CNTL 0x005b
+#define mmSDMA2_CRD_CNTL_BASE_IDX 1
+#define mmSDMA2_GPU_IOV_VIOLATION_LOG 0x005d
+#define mmSDMA2_GPU_IOV_VIOLATION_LOG_BASE_IDX 1
+#define mmSDMA2_ULV_CNTL 0x005e
+#define mmSDMA2_ULV_CNTL_BASE_IDX 1
+#define mmSDMA2_EA_DBIT_ADDR_DATA 0x0060
+#define mmSDMA2_EA_DBIT_ADDR_DATA_BASE_IDX 1
+#define mmSDMA2_EA_DBIT_ADDR_INDEX 0x0061
+#define mmSDMA2_EA_DBIT_ADDR_INDEX_BASE_IDX 1
+#define mmSDMA2_GPU_IOV_VIOLATION_LOG2 0x0062
+#define mmSDMA2_GPU_IOV_VIOLATION_LOG2_BASE_IDX 1
+#define mmSDMA2_GFX_RB_CNTL 0x0080
+#define mmSDMA2_GFX_RB_CNTL_BASE_IDX 1
+#define mmSDMA2_GFX_RB_BASE 0x0081
+#define mmSDMA2_GFX_RB_BASE_BASE_IDX 1
+#define mmSDMA2_GFX_RB_BASE_HI 0x0082
+#define mmSDMA2_GFX_RB_BASE_HI_BASE_IDX 1
+#define mmSDMA2_GFX_RB_RPTR 0x0083
+#define mmSDMA2_GFX_RB_RPTR_BASE_IDX 1
+#define mmSDMA2_GFX_RB_RPTR_HI 0x0084
+#define mmSDMA2_GFX_RB_RPTR_HI_BASE_IDX 1
+#define mmSDMA2_GFX_RB_WPTR 0x0085
+#define mmSDMA2_GFX_RB_WPTR_BASE_IDX 1
+#define mmSDMA2_GFX_RB_WPTR_HI 0x0086
+#define mmSDMA2_GFX_RB_WPTR_HI_BASE_IDX 1
+#define mmSDMA2_GFX_RB_WPTR_POLL_CNTL 0x0087
+#define mmSDMA2_GFX_RB_WPTR_POLL_CNTL_BASE_IDX 1
+#define mmSDMA2_GFX_RB_RPTR_ADDR_HI 0x0088
+#define mmSDMA2_GFX_RB_RPTR_ADDR_HI_BASE_IDX 1
+#define mmSDMA2_GFX_RB_RPTR_ADDR_LO 0x0089
+#define mmSDMA2_GFX_RB_RPTR_ADDR_LO_BASE_IDX 1
+#define mmSDMA2_GFX_IB_CNTL 0x008a
+#define mmSDMA2_GFX_IB_CNTL_BASE_IDX 1
+#define mmSDMA2_GFX_IB_RPTR 0x008b
+#define mmSDMA2_GFX_IB_RPTR_BASE_IDX 1
+#define mmSDMA2_GFX_IB_OFFSET 0x008c
+#define mmSDMA2_GFX_IB_OFFSET_BASE_IDX 1
+#define mmSDMA2_GFX_IB_BASE_LO 0x008d
+#define mmSDMA2_GFX_IB_BASE_LO_BASE_IDX 1
+#define mmSDMA2_GFX_IB_BASE_HI 0x008e
+#define mmSDMA2_GFX_IB_BASE_HI_BASE_IDX 1
+#define mmSDMA2_GFX_IB_SIZE 0x008f
+#define mmSDMA2_GFX_IB_SIZE_BASE_IDX 1
+#define mmSDMA2_GFX_SKIP_CNTL 0x0090
+#define mmSDMA2_GFX_SKIP_CNTL_BASE_IDX 1
+#define mmSDMA2_GFX_CONTEXT_STATUS 0x0091
+#define mmSDMA2_GFX_CONTEXT_STATUS_BASE_IDX 1
+#define mmSDMA2_GFX_DOORBELL 0x0092
+#define mmSDMA2_GFX_DOORBELL_BASE_IDX 1
+#define mmSDMA2_GFX_CONTEXT_CNTL 0x0093
+#define mmSDMA2_GFX_CONTEXT_CNTL_BASE_IDX 1
+#define mmSDMA2_GFX_STATUS 0x00a8
+#define mmSDMA2_GFX_STATUS_BASE_IDX 1
+#define mmSDMA2_GFX_DOORBELL_LOG 0x00a9
+#define mmSDMA2_GFX_DOORBELL_LOG_BASE_IDX 1
+#define mmSDMA2_GFX_WATERMARK 0x00aa
+#define mmSDMA2_GFX_WATERMARK_BASE_IDX 1
+#define mmSDMA2_GFX_DOORBELL_OFFSET 0x00ab
+#define mmSDMA2_GFX_DOORBELL_OFFSET_BASE_IDX 1
+#define mmSDMA2_GFX_CSA_ADDR_LO 0x00ac
+#define mmSDMA2_GFX_CSA_ADDR_LO_BASE_IDX 1
+#define mmSDMA2_GFX_CSA_ADDR_HI 0x00ad
+#define mmSDMA2_GFX_CSA_ADDR_HI_BASE_IDX 1
+#define mmSDMA2_GFX_IB_SUB_REMAIN 0x00af
+#define mmSDMA2_GFX_IB_SUB_REMAIN_BASE_IDX 1
+#define mmSDMA2_GFX_PREEMPT 0x00b0
+#define mmSDMA2_GFX_PREEMPT_BASE_IDX 1
+#define mmSDMA2_GFX_DUMMY_REG 0x00b1
+#define mmSDMA2_GFX_DUMMY_REG_BASE_IDX 1
+#define mmSDMA2_GFX_RB_WPTR_POLL_ADDR_HI 0x00b2
+#define mmSDMA2_GFX_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1
+#define mmSDMA2_GFX_RB_WPTR_POLL_ADDR_LO 0x00b3
+#define mmSDMA2_GFX_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1
+#define mmSDMA2_GFX_RB_AQL_CNTL 0x00b4
+#define mmSDMA2_GFX_RB_AQL_CNTL_BASE_IDX 1
+#define mmSDMA2_GFX_MINOR_PTR_UPDATE 0x00b5
+#define mmSDMA2_GFX_MINOR_PTR_UPDATE_BASE_IDX 1
+#define mmSDMA2_GFX_MIDCMD_DATA0 0x00c0
+#define mmSDMA2_GFX_MIDCMD_DATA0_BASE_IDX 1
+#define mmSDMA2_GFX_MIDCMD_DATA1 0x00c1
+#define mmSDMA2_GFX_MIDCMD_DATA1_BASE_IDX 1
+#define mmSDMA2_GFX_MIDCMD_DATA2 0x00c2
+#define mmSDMA2_GFX_MIDCMD_DATA2_BASE_IDX 1
+#define mmSDMA2_GFX_MIDCMD_DATA3 0x00c3
+#define mmSDMA2_GFX_MIDCMD_DATA3_BASE_IDX 1
+#define mmSDMA2_GFX_MIDCMD_DATA4 0x00c4
+#define mmSDMA2_GFX_MIDCMD_DATA4_BASE_IDX 1
+#define mmSDMA2_GFX_MIDCMD_DATA5 0x00c5
+#define mmSDMA2_GFX_MIDCMD_DATA5_BASE_IDX 1
+#define mmSDMA2_GFX_MIDCMD_DATA6 0x00c6
+#define mmSDMA2_GFX_MIDCMD_DATA6_BASE_IDX 1
+#define mmSDMA2_GFX_MIDCMD_DATA7 0x00c7
+#define mmSDMA2_GFX_MIDCMD_DATA7_BASE_IDX 1
+#define mmSDMA2_GFX_MIDCMD_DATA8 0x00c8
+#define mmSDMA2_GFX_MIDCMD_DATA8_BASE_IDX 1
+#define mmSDMA2_GFX_MIDCMD_CNTL 0x00c9
+#define mmSDMA2_GFX_MIDCMD_CNTL_BASE_IDX 1
+#define mmSDMA2_PAGE_RB_CNTL 0x00d8
+#define mmSDMA2_PAGE_RB_CNTL_BASE_IDX 1
+#define mmSDMA2_PAGE_RB_BASE 0x00d9
+#define mmSDMA2_PAGE_RB_BASE_BASE_IDX 1
+#define mmSDMA2_PAGE_RB_BASE_HI 0x00da
+#define mmSDMA2_PAGE_RB_BASE_HI_BASE_IDX 1
+#define mmSDMA2_PAGE_RB_RPTR 0x00db
+#define mmSDMA2_PAGE_RB_RPTR_BASE_IDX 1
+#define mmSDMA2_PAGE_RB_RPTR_HI 0x00dc
+#define mmSDMA2_PAGE_RB_RPTR_HI_BASE_IDX 1
+#define mmSDMA2_PAGE_RB_WPTR 0x00dd
+#define mmSDMA2_PAGE_RB_WPTR_BASE_IDX 1
+#define mmSDMA2_PAGE_RB_WPTR_HI 0x00de
+#define mmSDMA2_PAGE_RB_WPTR_HI_BASE_IDX 1
+#define mmSDMA2_PAGE_RB_WPTR_POLL_CNTL 0x00df
+#define mmSDMA2_PAGE_RB_WPTR_POLL_CNTL_BASE_IDX 1
+#define mmSDMA2_PAGE_RB_RPTR_ADDR_HI 0x00e0
+#define mmSDMA2_PAGE_RB_RPTR_ADDR_HI_BASE_IDX 1
+#define mmSDMA2_PAGE_RB_RPTR_ADDR_LO 0x00e1
+#define mmSDMA2_PAGE_RB_RPTR_ADDR_LO_BASE_IDX 1
+#define mmSDMA2_PAGE_IB_CNTL 0x00e2
+#define mmSDMA2_PAGE_IB_CNTL_BASE_IDX 1
+#define mmSDMA2_PAGE_IB_RPTR 0x00e3
+#define mmSDMA2_PAGE_IB_RPTR_BASE_IDX 1
+#define mmSDMA2_PAGE_IB_OFFSET 0x00e4
+#define mmSDMA2_PAGE_IB_OFFSET_BASE_IDX 1
+#define mmSDMA2_PAGE_IB_BASE_LO 0x00e5
+#define mmSDMA2_PAGE_IB_BASE_LO_BASE_IDX 1
+#define mmSDMA2_PAGE_IB_BASE_HI 0x00e6
+#define mmSDMA2_PAGE_IB_BASE_HI_BASE_IDX 1
+#define mmSDMA2_PAGE_IB_SIZE 0x00e7
+#define mmSDMA2_PAGE_IB_SIZE_BASE_IDX 1
+#define mmSDMA2_PAGE_SKIP_CNTL 0x00e8
+#define mmSDMA2_PAGE_SKIP_CNTL_BASE_IDX 1
+#define mmSDMA2_PAGE_CONTEXT_STATUS 0x00e9
+#define mmSDMA2_PAGE_CONTEXT_STATUS_BASE_IDX 1
+#define mmSDMA2_PAGE_DOORBELL 0x00ea
+#define mmSDMA2_PAGE_DOORBELL_BASE_IDX 1
+#define mmSDMA2_PAGE_STATUS 0x0100
+#define mmSDMA2_PAGE_STATUS_BASE_IDX 1
+#define mmSDMA2_PAGE_DOORBELL_LOG 0x0101
+#define mmSDMA2_PAGE_DOORBELL_LOG_BASE_IDX 1
+#define mmSDMA2_PAGE_WATERMARK 0x0102
+#define mmSDMA2_PAGE_WATERMARK_BASE_IDX 1
+#define mmSDMA2_PAGE_DOORBELL_OFFSET 0x0103
+#define mmSDMA2_PAGE_DOORBELL_OFFSET_BASE_IDX 1
+#define mmSDMA2_PAGE_CSA_ADDR_LO 0x0104
+#define mmSDMA2_PAGE_CSA_ADDR_LO_BASE_IDX 1
+#define mmSDMA2_PAGE_CSA_ADDR_HI 0x0105
+#define mmSDMA2_PAGE_CSA_ADDR_HI_BASE_IDX 1
+#define mmSDMA2_PAGE_IB_SUB_REMAIN 0x0107
+#define mmSDMA2_PAGE_IB_SUB_REMAIN_BASE_IDX 1
+#define mmSDMA2_PAGE_PREEMPT 0x0108
+#define mmSDMA2_PAGE_PREEMPT_BASE_IDX 1
+#define mmSDMA2_PAGE_DUMMY_REG 0x0109
+#define mmSDMA2_PAGE_DUMMY_REG_BASE_IDX 1
+#define mmSDMA2_PAGE_RB_WPTR_POLL_ADDR_HI 0x010a
+#define mmSDMA2_PAGE_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1
+#define mmSDMA2_PAGE_RB_WPTR_POLL_ADDR_LO 0x010b
+#define mmSDMA2_PAGE_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1
+#define mmSDMA2_PAGE_RB_AQL_CNTL 0x010c
+#define mmSDMA2_PAGE_RB_AQL_CNTL_BASE_IDX 1
+#define mmSDMA2_PAGE_MINOR_PTR_UPDATE 0x010d
+#define mmSDMA2_PAGE_MINOR_PTR_UPDATE_BASE_IDX 1
+#define mmSDMA2_PAGE_MIDCMD_DATA0 0x0118
+#define mmSDMA2_PAGE_MIDCMD_DATA0_BASE_IDX 1
+#define mmSDMA2_PAGE_MIDCMD_DATA1 0x0119
+#define mmSDMA2_PAGE_MIDCMD_DATA1_BASE_IDX 1
+#define mmSDMA2_PAGE_MIDCMD_DATA2 0x011a
+#define mmSDMA2_PAGE_MIDCMD_DATA2_BASE_IDX 1
+#define mmSDMA2_PAGE_MIDCMD_DATA3 0x011b
+#define mmSDMA2_PAGE_MIDCMD_DATA3_BASE_IDX 1
+#define mmSDMA2_PAGE_MIDCMD_DATA4 0x011c
+#define mmSDMA2_PAGE_MIDCMD_DATA4_BASE_IDX 1
+#define mmSDMA2_PAGE_MIDCMD_DATA5 0x011d
+#define mmSDMA2_PAGE_MIDCMD_DATA5_BASE_IDX 1
+#define mmSDMA2_PAGE_MIDCMD_DATA6 0x011e
+#define mmSDMA2_PAGE_MIDCMD_DATA6_BASE_IDX 1
+#define mmSDMA2_PAGE_MIDCMD_DATA7 0x011f
+#define mmSDMA2_PAGE_MIDCMD_DATA7_BASE_IDX 1
+#define mmSDMA2_PAGE_MIDCMD_DATA8 0x0120
+#define mmSDMA2_PAGE_MIDCMD_DATA8_BASE_IDX 1
+#define mmSDMA2_PAGE_MIDCMD_CNTL 0x0121
+#define mmSDMA2_PAGE_MIDCMD_CNTL_BASE_IDX 1
+#define mmSDMA2_RLC0_RB_CNTL 0x0130
+#define mmSDMA2_RLC0_RB_CNTL_BASE_IDX 1
+#define mmSDMA2_RLC0_RB_BASE 0x0131
+#define mmSDMA2_RLC0_RB_BASE_BASE_IDX 1
+#define mmSDMA2_RLC0_RB_BASE_HI 0x0132
+#define mmSDMA2_RLC0_RB_BASE_HI_BASE_IDX 1
+#define mmSDMA2_RLC0_RB_RPTR 0x0133
+#define mmSDMA2_RLC0_RB_RPTR_BASE_IDX 1
+#define mmSDMA2_RLC0_RB_RPTR_HI 0x0134
+#define mmSDMA2_RLC0_RB_RPTR_HI_BASE_IDX 1
+#define mmSDMA2_RLC0_RB_WPTR 0x0135
+#define mmSDMA2_RLC0_RB_WPTR_BASE_IDX 1
+#define mmSDMA2_RLC0_RB_WPTR_HI 0x0136
+#define mmSDMA2_RLC0_RB_WPTR_HI_BASE_IDX 1
+#define mmSDMA2_RLC0_RB_WPTR_POLL_CNTL 0x0137
+#define mmSDMA2_RLC0_RB_WPTR_POLL_CNTL_BASE_IDX 1
+#define mmSDMA2_RLC0_RB_RPTR_ADDR_HI 0x0138
+#define mmSDMA2_RLC0_RB_RPTR_ADDR_HI_BASE_IDX 1
+#define mmSDMA2_RLC0_RB_RPTR_ADDR_LO 0x0139
+#define mmSDMA2_RLC0_RB_RPTR_ADDR_LO_BASE_IDX 1
+#define mmSDMA2_RLC0_IB_CNTL 0x013a
+#define mmSDMA2_RLC0_IB_CNTL_BASE_IDX 1
+#define mmSDMA2_RLC0_IB_RPTR 0x013b
+#define mmSDMA2_RLC0_IB_RPTR_BASE_IDX 1
+#define mmSDMA2_RLC0_IB_OFFSET 0x013c
+#define mmSDMA2_RLC0_IB_OFFSET_BASE_IDX 1
+#define mmSDMA2_RLC0_IB_BASE_LO 0x013d
+#define mmSDMA2_RLC0_IB_BASE_LO_BASE_IDX 1
+#define mmSDMA2_RLC0_IB_BASE_HI 0x013e
+#define mmSDMA2_RLC0_IB_BASE_HI_BASE_IDX 1
+#define mmSDMA2_RLC0_IB_SIZE 0x013f
+#define mmSDMA2_RLC0_IB_SIZE_BASE_IDX 1
+#define mmSDMA2_RLC0_SKIP_CNTL 0x0140
+#define mmSDMA2_RLC0_SKIP_CNTL_BASE_IDX 1
+#define mmSDMA2_RLC0_CONTEXT_STATUS 0x0141
+#define mmSDMA2_RLC0_CONTEXT_STATUS_BASE_IDX 1
+#define mmSDMA2_RLC0_DOORBELL 0x0142
+#define mmSDMA2_RLC0_DOORBELL_BASE_IDX 1
+#define mmSDMA2_RLC0_STATUS 0x0158
+#define mmSDMA2_RLC0_STATUS_BASE_IDX 1
+#define mmSDMA2_RLC0_DOORBELL_LOG 0x0159
+#define mmSDMA2_RLC0_DOORBELL_LOG_BASE_IDX 1
+#define mmSDMA2_RLC0_WATERMARK 0x015a
+#define mmSDMA2_RLC0_WATERMARK_BASE_IDX 1
+#define mmSDMA2_RLC0_DOORBELL_OFFSET 0x015b
+#define mmSDMA2_RLC0_DOORBELL_OFFSET_BASE_IDX 1
+#define mmSDMA2_RLC0_CSA_ADDR_LO 0x015c
+#define mmSDMA2_RLC0_CSA_ADDR_LO_BASE_IDX 1
+#define mmSDMA2_RLC0_CSA_ADDR_HI 0x015d
+#define mmSDMA2_RLC0_CSA_ADDR_HI_BASE_IDX 1
+#define mmSDMA2_RLC0_IB_SUB_REMAIN 0x015f
+#define mmSDMA2_RLC0_IB_SUB_REMAIN_BASE_IDX 1
+#define mmSDMA2_RLC0_PREEMPT 0x0160
+#define mmSDMA2_RLC0_PREEMPT_BASE_IDX 1
+#define mmSDMA2_RLC0_DUMMY_REG 0x0161
+#define mmSDMA2_RLC0_DUMMY_REG_BASE_IDX 1
+#define mmSDMA2_RLC0_RB_WPTR_POLL_ADDR_HI 0x0162
+#define mmSDMA2_RLC0_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1
+#define mmSDMA2_RLC0_RB_WPTR_POLL_ADDR_LO 0x0163
+#define mmSDMA2_RLC0_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1
+#define mmSDMA2_RLC0_RB_AQL_CNTL 0x0164
+#define mmSDMA2_RLC0_RB_AQL_CNTL_BASE_IDX 1
+#define mmSDMA2_RLC0_MINOR_PTR_UPDATE 0x0165
+#define mmSDMA2_RLC0_MINOR_PTR_UPDATE_BASE_IDX 1
+#define mmSDMA2_RLC0_MIDCMD_DATA0 0x0170
+#define mmSDMA2_RLC0_MIDCMD_DATA0_BASE_IDX 1
+#define mmSDMA2_RLC0_MIDCMD_DATA1 0x0171
+#define mmSDMA2_RLC0_MIDCMD_DATA1_BASE_IDX 1
+#define mmSDMA2_RLC0_MIDCMD_DATA2 0x0172
+#define mmSDMA2_RLC0_MIDCMD_DATA2_BASE_IDX 1
+#define mmSDMA2_RLC0_MIDCMD_DATA3 0x0173
+#define mmSDMA2_RLC0_MIDCMD_DATA3_BASE_IDX 1
+#define mmSDMA2_RLC0_MIDCMD_DATA4 0x0174
+#define mmSDMA2_RLC0_MIDCMD_DATA4_BASE_IDX 1
+#define mmSDMA2_RLC0_MIDCMD_DATA5 0x0175
+#define mmSDMA2_RLC0_MIDCMD_DATA5_BASE_IDX 1
+#define mmSDMA2_RLC0_MIDCMD_DATA6 0x0176
+#define mmSDMA2_RLC0_MIDCMD_DATA6_BASE_IDX 1
+#define mmSDMA2_RLC0_MIDCMD_DATA7 0x0177
+#define mmSDMA2_RLC0_MIDCMD_DATA7_BASE_IDX 1
+#define mmSDMA2_RLC0_MIDCMD_DATA8 0x0178
+#define mmSDMA2_RLC0_MIDCMD_DATA8_BASE_IDX 1
+#define mmSDMA2_RLC0_MIDCMD_CNTL 0x0179
+#define mmSDMA2_RLC0_MIDCMD_CNTL_BASE_IDX 1
+#define mmSDMA2_RLC1_RB_CNTL 0x0188
+#define mmSDMA2_RLC1_RB_CNTL_BASE_IDX 1
+#define mmSDMA2_RLC1_RB_BASE 0x0189
+#define mmSDMA2_RLC1_RB_BASE_BASE_IDX 1
+#define mmSDMA2_RLC1_RB_BASE_HI 0x018a
+#define mmSDMA2_RLC1_RB_BASE_HI_BASE_IDX 1
+#define mmSDMA2_RLC1_RB_RPTR 0x018b
+#define mmSDMA2_RLC1_RB_RPTR_BASE_IDX 1
+#define mmSDMA2_RLC1_RB_RPTR_HI 0x018c
+#define mmSDMA2_RLC1_RB_RPTR_HI_BASE_IDX 1
+#define mmSDMA2_RLC1_RB_WPTR 0x018d
+#define mmSDMA2_RLC1_RB_WPTR_BASE_IDX 1
+#define mmSDMA2_RLC1_RB_WPTR_HI 0x018e
+#define mmSDMA2_RLC1_RB_WPTR_HI_BASE_IDX 1
+#define mmSDMA2_RLC1_RB_WPTR_POLL_CNTL 0x018f
+#define mmSDMA2_RLC1_RB_WPTR_POLL_CNTL_BASE_IDX 1
+#define mmSDMA2_RLC1_RB_RPTR_ADDR_HI 0x0190
+#define mmSDMA2_RLC1_RB_RPTR_ADDR_HI_BASE_IDX 1
+#define mmSDMA2_RLC1_RB_RPTR_ADDR_LO 0x0191
+#define mmSDMA2_RLC1_RB_RPTR_ADDR_LO_BASE_IDX 1
+#define mmSDMA2_RLC1_IB_CNTL 0x0192
+#define mmSDMA2_RLC1_IB_CNTL_BASE_IDX 1
+#define mmSDMA2_RLC1_IB_RPTR 0x0193
+#define mmSDMA2_RLC1_IB_RPTR_BASE_IDX 1
+#define mmSDMA2_RLC1_IB_OFFSET 0x0194
+#define mmSDMA2_RLC1_IB_OFFSET_BASE_IDX 1
+#define mmSDMA2_RLC1_IB_BASE_LO 0x0195
+#define mmSDMA2_RLC1_IB_BASE_LO_BASE_IDX 1
+#define mmSDMA2_RLC1_IB_BASE_HI 0x0196
+#define mmSDMA2_RLC1_IB_BASE_HI_BASE_IDX 1
+#define mmSDMA2_RLC1_IB_SIZE 0x0197
+#define mmSDMA2_RLC1_IB_SIZE_BASE_IDX 1
+#define mmSDMA2_RLC1_SKIP_CNTL 0x0198
+#define mmSDMA2_RLC1_SKIP_CNTL_BASE_IDX 1
+#define mmSDMA2_RLC1_CONTEXT_STATUS 0x0199
+#define mmSDMA2_RLC1_CONTEXT_STATUS_BASE_IDX 1
+#define mmSDMA2_RLC1_DOORBELL 0x019a
+#define mmSDMA2_RLC1_DOORBELL_BASE_IDX 1
+#define mmSDMA2_RLC1_STATUS 0x01b0
+#define mmSDMA2_RLC1_STATUS_BASE_IDX 1
+#define mmSDMA2_RLC1_DOORBELL_LOG 0x01b1
+#define mmSDMA2_RLC1_DOORBELL_LOG_BASE_IDX 1
+#define mmSDMA2_RLC1_WATERMARK 0x01b2
+#define mmSDMA2_RLC1_WATERMARK_BASE_IDX 1
+#define mmSDMA2_RLC1_DOORBELL_OFFSET 0x01b3
+#define mmSDMA2_RLC1_DOORBELL_OFFSET_BASE_IDX 1
+#define mmSDMA2_RLC1_CSA_ADDR_LO 0x01b4
+#define mmSDMA2_RLC1_CSA_ADDR_LO_BASE_IDX 1
+#define mmSDMA2_RLC1_CSA_ADDR_HI 0x01b5
+#define mmSDMA2_RLC1_CSA_ADDR_HI_BASE_IDX 1
+#define mmSDMA2_RLC1_IB_SUB_REMAIN 0x01b7
+#define mmSDMA2_RLC1_IB_SUB_REMAIN_BASE_IDX 1
+#define mmSDMA2_RLC1_PREEMPT 0x01b8
+#define mmSDMA2_RLC1_PREEMPT_BASE_IDX 1
+#define mmSDMA2_RLC1_DUMMY_REG 0x01b9
+#define mmSDMA2_RLC1_DUMMY_REG_BASE_IDX 1
+#define mmSDMA2_RLC1_RB_WPTR_POLL_ADDR_HI 0x01ba
+#define mmSDMA2_RLC1_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1
+#define mmSDMA2_RLC1_RB_WPTR_POLL_ADDR_LO 0x01bb
+#define mmSDMA2_RLC1_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1
+#define mmSDMA2_RLC1_RB_AQL_CNTL 0x01bc
+#define mmSDMA2_RLC1_RB_AQL_CNTL_BASE_IDX 1
+#define mmSDMA2_RLC1_MINOR_PTR_UPDATE 0x01bd
+#define mmSDMA2_RLC1_MINOR_PTR_UPDATE_BASE_IDX 1
+#define mmSDMA2_RLC1_MIDCMD_DATA0 0x01c8
+#define mmSDMA2_RLC1_MIDCMD_DATA0_BASE_IDX 1
+#define mmSDMA2_RLC1_MIDCMD_DATA1 0x01c9
+#define mmSDMA2_RLC1_MIDCMD_DATA1_BASE_IDX 1
+#define mmSDMA2_RLC1_MIDCMD_DATA2 0x01ca
+#define mmSDMA2_RLC1_MIDCMD_DATA2_BASE_IDX 1
+#define mmSDMA2_RLC1_MIDCMD_DATA3 0x01cb
+#define mmSDMA2_RLC1_MIDCMD_DATA3_BASE_IDX 1
+#define mmSDMA2_RLC1_MIDCMD_DATA4 0x01cc
+#define mmSDMA2_RLC1_MIDCMD_DATA4_BASE_IDX 1
+#define mmSDMA2_RLC1_MIDCMD_DATA5 0x01cd
+#define mmSDMA2_RLC1_MIDCMD_DATA5_BASE_IDX 1
+#define mmSDMA2_RLC1_MIDCMD_DATA6 0x01ce
+#define mmSDMA2_RLC1_MIDCMD_DATA6_BASE_IDX 1
+#define mmSDMA2_RLC1_MIDCMD_DATA7 0x01cf
+#define mmSDMA2_RLC1_MIDCMD_DATA7_BASE_IDX 1
+#define mmSDMA2_RLC1_MIDCMD_DATA8 0x01d0
+#define mmSDMA2_RLC1_MIDCMD_DATA8_BASE_IDX 1
+#define mmSDMA2_RLC1_MIDCMD_CNTL 0x01d1
+#define mmSDMA2_RLC1_MIDCMD_CNTL_BASE_IDX 1
+#define mmSDMA2_RLC2_RB_CNTL 0x01e0
+#define mmSDMA2_RLC2_RB_CNTL_BASE_IDX 1
+#define mmSDMA2_RLC2_RB_BASE 0x01e1
+#define mmSDMA2_RLC2_RB_BASE_BASE_IDX 1
+#define mmSDMA2_RLC2_RB_BASE_HI 0x01e2
+#define mmSDMA2_RLC2_RB_BASE_HI_BASE_IDX 1
+#define mmSDMA2_RLC2_RB_RPTR 0x01e3
+#define mmSDMA2_RLC2_RB_RPTR_BASE_IDX 1
+#define mmSDMA2_RLC2_RB_RPTR_HI 0x01e4
+#define mmSDMA2_RLC2_RB_RPTR_HI_BASE_IDX 1
+#define mmSDMA2_RLC2_RB_WPTR 0x01e5
+#define mmSDMA2_RLC2_RB_WPTR_BASE_IDX 1
+#define mmSDMA2_RLC2_RB_WPTR_HI 0x01e6
+#define mmSDMA2_RLC2_RB_WPTR_HI_BASE_IDX 1
+#define mmSDMA2_RLC2_RB_WPTR_POLL_CNTL 0x01e7
+#define mmSDMA2_RLC2_RB_WPTR_POLL_CNTL_BASE_IDX 1
+#define mmSDMA2_RLC2_RB_RPTR_ADDR_HI 0x01e8
+#define mmSDMA2_RLC2_RB_RPTR_ADDR_HI_BASE_IDX 1
+#define mmSDMA2_RLC2_RB_RPTR_ADDR_LO 0x01e9
+#define mmSDMA2_RLC2_RB_RPTR_ADDR_LO_BASE_IDX 1
+#define mmSDMA2_RLC2_IB_CNTL 0x01ea
+#define mmSDMA2_RLC2_IB_CNTL_BASE_IDX 1
+#define mmSDMA2_RLC2_IB_RPTR 0x01eb
+#define mmSDMA2_RLC2_IB_RPTR_BASE_IDX 1
+#define mmSDMA2_RLC2_IB_OFFSET 0x01ec
+#define mmSDMA2_RLC2_IB_OFFSET_BASE_IDX 1
+#define mmSDMA2_RLC2_IB_BASE_LO 0x01ed
+#define mmSDMA2_RLC2_IB_BASE_LO_BASE_IDX 1
+#define mmSDMA2_RLC2_IB_BASE_HI 0x01ee
+#define mmSDMA2_RLC2_IB_BASE_HI_BASE_IDX 1
+#define mmSDMA2_RLC2_IB_SIZE 0x01ef
+#define mmSDMA2_RLC2_IB_SIZE_BASE_IDX 1
+#define mmSDMA2_RLC2_SKIP_CNTL 0x01f0
+#define mmSDMA2_RLC2_SKIP_CNTL_BASE_IDX 1
+#define mmSDMA2_RLC2_CONTEXT_STATUS 0x01f1
+#define mmSDMA2_RLC2_CONTEXT_STATUS_BASE_IDX 1
+#define mmSDMA2_RLC2_DOORBELL 0x01f2
+#define mmSDMA2_RLC2_DOORBELL_BASE_IDX 1
+#define mmSDMA2_RLC2_STATUS 0x0208
+#define mmSDMA2_RLC2_STATUS_BASE_IDX 1
+#define mmSDMA2_RLC2_DOORBELL_LOG 0x0209
+#define mmSDMA2_RLC2_DOORBELL_LOG_BASE_IDX 1
+#define mmSDMA2_RLC2_WATERMARK 0x020a
+#define mmSDMA2_RLC2_WATERMARK_BASE_IDX 1
+#define mmSDMA2_RLC2_DOORBELL_OFFSET 0x020b
+#define mmSDMA2_RLC2_DOORBELL_OFFSET_BASE_IDX 1
+#define mmSDMA2_RLC2_CSA_ADDR_LO 0x020c
+#define mmSDMA2_RLC2_CSA_ADDR_LO_BASE_IDX 1
+#define mmSDMA2_RLC2_CSA_ADDR_HI 0x020d
+#define mmSDMA2_RLC2_CSA_ADDR_HI_BASE_IDX 1
+#define mmSDMA2_RLC2_IB_SUB_REMAIN 0x020f
+#define mmSDMA2_RLC2_IB_SUB_REMAIN_BASE_IDX 1
+#define mmSDMA2_RLC2_PREEMPT 0x0210
+#define mmSDMA2_RLC2_PREEMPT_BASE_IDX 1
+#define mmSDMA2_RLC2_DUMMY_REG 0x0211
+#define mmSDMA2_RLC2_DUMMY_REG_BASE_IDX 1
+#define mmSDMA2_RLC2_RB_WPTR_POLL_ADDR_HI 0x0212
+#define mmSDMA2_RLC2_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1
+#define mmSDMA2_RLC2_RB_WPTR_POLL_ADDR_LO 0x0213
+#define mmSDMA2_RLC2_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1
+#define mmSDMA2_RLC2_RB_AQL_CNTL 0x0214
+#define mmSDMA2_RLC2_RB_AQL_CNTL_BASE_IDX 1
+#define mmSDMA2_RLC2_MINOR_PTR_UPDATE 0x0215
+#define mmSDMA2_RLC2_MINOR_PTR_UPDATE_BASE_IDX 1
+#define mmSDMA2_RLC2_MIDCMD_DATA0 0x0220
+#define mmSDMA2_RLC2_MIDCMD_DATA0_BASE_IDX 1
+#define mmSDMA2_RLC2_MIDCMD_DATA1 0x0221
+#define mmSDMA2_RLC2_MIDCMD_DATA1_BASE_IDX 1
+#define mmSDMA2_RLC2_MIDCMD_DATA2 0x0222
+#define mmSDMA2_RLC2_MIDCMD_DATA2_BASE_IDX 1
+#define mmSDMA2_RLC2_MIDCMD_DATA3 0x0223
+#define mmSDMA2_RLC2_MIDCMD_DATA3_BASE_IDX 1
+#define mmSDMA2_RLC2_MIDCMD_DATA4 0x0224
+#define mmSDMA2_RLC2_MIDCMD_DATA4_BASE_IDX 1
+#define mmSDMA2_RLC2_MIDCMD_DATA5 0x0225
+#define mmSDMA2_RLC2_MIDCMD_DATA5_BASE_IDX 1
+#define mmSDMA2_RLC2_MIDCMD_DATA6 0x0226
+#define mmSDMA2_RLC2_MIDCMD_DATA6_BASE_IDX 1
+#define mmSDMA2_RLC2_MIDCMD_DATA7 0x0227
+#define mmSDMA2_RLC2_MIDCMD_DATA7_BASE_IDX 1
+#define mmSDMA2_RLC2_MIDCMD_DATA8 0x0228
+#define mmSDMA2_RLC2_MIDCMD_DATA8_BASE_IDX 1
+#define mmSDMA2_RLC2_MIDCMD_CNTL 0x0229
+#define mmSDMA2_RLC2_MIDCMD_CNTL_BASE_IDX 1
+#define mmSDMA2_RLC3_RB_CNTL 0x0238
+#define mmSDMA2_RLC3_RB_CNTL_BASE_IDX 1
+#define mmSDMA2_RLC3_RB_BASE 0x0239
+#define mmSDMA2_RLC3_RB_BASE_BASE_IDX 1
+#define mmSDMA2_RLC3_RB_BASE_HI 0x023a
+#define mmSDMA2_RLC3_RB_BASE_HI_BASE_IDX 1
+#define mmSDMA2_RLC3_RB_RPTR 0x023b
+#define mmSDMA2_RLC3_RB_RPTR_BASE_IDX 1
+#define mmSDMA2_RLC3_RB_RPTR_HI 0x023c
+#define mmSDMA2_RLC3_RB_RPTR_HI_BASE_IDX 1
+#define mmSDMA2_RLC3_RB_WPTR 0x023d
+#define mmSDMA2_RLC3_RB_WPTR_BASE_IDX 1
+#define mmSDMA2_RLC3_RB_WPTR_HI 0x023e
+#define mmSDMA2_RLC3_RB_WPTR_HI_BASE_IDX 1
+#define mmSDMA2_RLC3_RB_WPTR_POLL_CNTL 0x023f
+#define mmSDMA2_RLC3_RB_WPTR_POLL_CNTL_BASE_IDX 1
+#define mmSDMA2_RLC3_RB_RPTR_ADDR_HI 0x0240
+#define mmSDMA2_RLC3_RB_RPTR_ADDR_HI_BASE_IDX 1
+#define mmSDMA2_RLC3_RB_RPTR_ADDR_LO 0x0241
+#define mmSDMA2_RLC3_RB_RPTR_ADDR_LO_BASE_IDX 1
+#define mmSDMA2_RLC3_IB_CNTL 0x0242
+#define mmSDMA2_RLC3_IB_CNTL_BASE_IDX 1
+#define mmSDMA2_RLC3_IB_RPTR 0x0243
+#define mmSDMA2_RLC3_IB_RPTR_BASE_IDX 1
+#define mmSDMA2_RLC3_IB_OFFSET 0x0244
+#define mmSDMA2_RLC3_IB_OFFSET_BASE_IDX 1
+#define mmSDMA2_RLC3_IB_BASE_LO 0x0245
+#define mmSDMA2_RLC3_IB_BASE_LO_BASE_IDX 1
+#define mmSDMA2_RLC3_IB_BASE_HI 0x0246
+#define mmSDMA2_RLC3_IB_BASE_HI_BASE_IDX 1
+#define mmSDMA2_RLC3_IB_SIZE 0x0247
+#define mmSDMA2_RLC3_IB_SIZE_BASE_IDX 1
+#define mmSDMA2_RLC3_SKIP_CNTL 0x0248
+#define mmSDMA2_RLC3_SKIP_CNTL_BASE_IDX 1
+#define mmSDMA2_RLC3_CONTEXT_STATUS 0x0249
+#define mmSDMA2_RLC3_CONTEXT_STATUS_BASE_IDX 1
+#define mmSDMA2_RLC3_DOORBELL 0x024a
+#define mmSDMA2_RLC3_DOORBELL_BASE_IDX 1
+#define mmSDMA2_RLC3_STATUS 0x0260
+#define mmSDMA2_RLC3_STATUS_BASE_IDX 1
+#define mmSDMA2_RLC3_DOORBELL_LOG 0x0261
+#define mmSDMA2_RLC3_DOORBELL_LOG_BASE_IDX 1
+#define mmSDMA2_RLC3_WATERMARK 0x0262
+#define mmSDMA2_RLC3_WATERMARK_BASE_IDX 1
+#define mmSDMA2_RLC3_DOORBELL_OFFSET 0x0263
+#define mmSDMA2_RLC3_DOORBELL_OFFSET_BASE_IDX 1
+#define mmSDMA2_RLC3_CSA_ADDR_LO 0x0264
+#define mmSDMA2_RLC3_CSA_ADDR_LO_BASE_IDX 1
+#define mmSDMA2_RLC3_CSA_ADDR_HI 0x0265
+#define mmSDMA2_RLC3_CSA_ADDR_HI_BASE_IDX 1
+#define mmSDMA2_RLC3_IB_SUB_REMAIN 0x0267
+#define mmSDMA2_RLC3_IB_SUB_REMAIN_BASE_IDX 1
+#define mmSDMA2_RLC3_PREEMPT 0x0268
+#define mmSDMA2_RLC3_PREEMPT_BASE_IDX 1
+#define mmSDMA2_RLC3_DUMMY_REG 0x0269
+#define mmSDMA2_RLC3_DUMMY_REG_BASE_IDX 1
+#define mmSDMA2_RLC3_RB_WPTR_POLL_ADDR_HI 0x026a
+#define mmSDMA2_RLC3_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1
+#define mmSDMA2_RLC3_RB_WPTR_POLL_ADDR_LO 0x026b
+#define mmSDMA2_RLC3_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1
+#define mmSDMA2_RLC3_RB_AQL_CNTL 0x026c
+#define mmSDMA2_RLC3_RB_AQL_CNTL_BASE_IDX 1
+#define mmSDMA2_RLC3_MINOR_PTR_UPDATE 0x026d
+#define mmSDMA2_RLC3_MINOR_PTR_UPDATE_BASE_IDX 1
+#define mmSDMA2_RLC3_MIDCMD_DATA0 0x0278
+#define mmSDMA2_RLC3_MIDCMD_DATA0_BASE_IDX 1
+#define mmSDMA2_RLC3_MIDCMD_DATA1 0x0279
+#define mmSDMA2_RLC3_MIDCMD_DATA1_BASE_IDX 1
+#define mmSDMA2_RLC3_MIDCMD_DATA2 0x027a
+#define mmSDMA2_RLC3_MIDCMD_DATA2_BASE_IDX 1
+#define mmSDMA2_RLC3_MIDCMD_DATA3 0x027b
+#define mmSDMA2_RLC3_MIDCMD_DATA3_BASE_IDX 1
+#define mmSDMA2_RLC3_MIDCMD_DATA4 0x027c
+#define mmSDMA2_RLC3_MIDCMD_DATA4_BASE_IDX 1
+#define mmSDMA2_RLC3_MIDCMD_DATA5 0x027d
+#define mmSDMA2_RLC3_MIDCMD_DATA5_BASE_IDX 1
+#define mmSDMA2_RLC3_MIDCMD_DATA6 0x027e
+#define mmSDMA2_RLC3_MIDCMD_DATA6_BASE_IDX 1
+#define mmSDMA2_RLC3_MIDCMD_DATA7 0x027f
+#define mmSDMA2_RLC3_MIDCMD_DATA7_BASE_IDX 1
+#define mmSDMA2_RLC3_MIDCMD_DATA8 0x0280
+#define mmSDMA2_RLC3_MIDCMD_DATA8_BASE_IDX 1
+#define mmSDMA2_RLC3_MIDCMD_CNTL 0x0281
+#define mmSDMA2_RLC3_MIDCMD_CNTL_BASE_IDX 1
+#define mmSDMA2_RLC4_RB_CNTL 0x0290
+#define mmSDMA2_RLC4_RB_CNTL_BASE_IDX 1
+#define mmSDMA2_RLC4_RB_BASE 0x0291
+#define mmSDMA2_RLC4_RB_BASE_BASE_IDX 1
+#define mmSDMA2_RLC4_RB_BASE_HI 0x0292
+#define mmSDMA2_RLC4_RB_BASE_HI_BASE_IDX 1
+#define mmSDMA2_RLC4_RB_RPTR 0x0293
+#define mmSDMA2_RLC4_RB_RPTR_BASE_IDX 1
+#define mmSDMA2_RLC4_RB_RPTR_HI 0x0294
+#define mmSDMA2_RLC4_RB_RPTR_HI_BASE_IDX 1
+#define mmSDMA2_RLC4_RB_WPTR 0x0295
+#define mmSDMA2_RLC4_RB_WPTR_BASE_IDX 1
+#define mmSDMA2_RLC4_RB_WPTR_HI 0x0296
+#define mmSDMA2_RLC4_RB_WPTR_HI_BASE_IDX 1
+#define mmSDMA2_RLC4_RB_WPTR_POLL_CNTL 0x0297
+#define mmSDMA2_RLC4_RB_WPTR_POLL_CNTL_BASE_IDX 1
+#define mmSDMA2_RLC4_RB_RPTR_ADDR_HI 0x0298
+#define mmSDMA2_RLC4_RB_RPTR_ADDR_HI_BASE_IDX 1
+#define mmSDMA2_RLC4_RB_RPTR_ADDR_LO 0x0299
+#define mmSDMA2_RLC4_RB_RPTR_ADDR_LO_BASE_IDX 1
+#define mmSDMA2_RLC4_IB_CNTL 0x029a
+#define mmSDMA2_RLC4_IB_CNTL_BASE_IDX 1
+#define mmSDMA2_RLC4_IB_RPTR 0x029b
+#define mmSDMA2_RLC4_IB_RPTR_BASE_IDX 1
+#define mmSDMA2_RLC4_IB_OFFSET 0x029c
+#define mmSDMA2_RLC4_IB_OFFSET_BASE_IDX 1
+#define mmSDMA2_RLC4_IB_BASE_LO 0x029d
+#define mmSDMA2_RLC4_IB_BASE_LO_BASE_IDX 1
+#define mmSDMA2_RLC4_IB_BASE_HI 0x029e
+#define mmSDMA2_RLC4_IB_BASE_HI_BASE_IDX 1
+#define mmSDMA2_RLC4_IB_SIZE 0x029f
+#define mmSDMA2_RLC4_IB_SIZE_BASE_IDX 1
+#define mmSDMA2_RLC4_SKIP_CNTL 0x02a0
+#define mmSDMA2_RLC4_SKIP_CNTL_BASE_IDX 1
+#define mmSDMA2_RLC4_CONTEXT_STATUS 0x02a1
+#define mmSDMA2_RLC4_CONTEXT_STATUS_BASE_IDX 1
+#define mmSDMA2_RLC4_DOORBELL 0x02a2
+#define mmSDMA2_RLC4_DOORBELL_BASE_IDX 1
+#define mmSDMA2_RLC4_STATUS 0x02b8
+#define mmSDMA2_RLC4_STATUS_BASE_IDX 1
+#define mmSDMA2_RLC4_DOORBELL_LOG 0x02b9
+#define mmSDMA2_RLC4_DOORBELL_LOG_BASE_IDX 1
+#define mmSDMA2_RLC4_WATERMARK 0x02ba
+#define mmSDMA2_RLC4_WATERMARK_BASE_IDX 1
+#define mmSDMA2_RLC4_DOORBELL_OFFSET 0x02bb
+#define mmSDMA2_RLC4_DOORBELL_OFFSET_BASE_IDX 1
+#define mmSDMA2_RLC4_CSA_ADDR_LO 0x02bc
+#define mmSDMA2_RLC4_CSA_ADDR_LO_BASE_IDX 1
+#define mmSDMA2_RLC4_CSA_ADDR_HI 0x02bd
+#define mmSDMA2_RLC4_CSA_ADDR_HI_BASE_IDX 1
+#define mmSDMA2_RLC4_IB_SUB_REMAIN 0x02bf
+#define mmSDMA2_RLC4_IB_SUB_REMAIN_BASE_IDX 1
+#define mmSDMA2_RLC4_PREEMPT 0x02c0
+#define mmSDMA2_RLC4_PREEMPT_BASE_IDX 1
+#define mmSDMA2_RLC4_DUMMY_REG 0x02c1
+#define mmSDMA2_RLC4_DUMMY_REG_BASE_IDX 1
+#define mmSDMA2_RLC4_RB_WPTR_POLL_ADDR_HI 0x02c2
+#define mmSDMA2_RLC4_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1
+#define mmSDMA2_RLC4_RB_WPTR_POLL_ADDR_LO 0x02c3
+#define mmSDMA2_RLC4_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1
+#define mmSDMA2_RLC4_RB_AQL_CNTL 0x02c4
+#define mmSDMA2_RLC4_RB_AQL_CNTL_BASE_IDX 1
+#define mmSDMA2_RLC4_MINOR_PTR_UPDATE 0x02c5
+#define mmSDMA2_RLC4_MINOR_PTR_UPDATE_BASE_IDX 1
+#define mmSDMA2_RLC4_MIDCMD_DATA0 0x02d0
+#define mmSDMA2_RLC4_MIDCMD_DATA0_BASE_IDX 1
+#define mmSDMA2_RLC4_MIDCMD_DATA1 0x02d1
+#define mmSDMA2_RLC4_MIDCMD_DATA1_BASE_IDX 1
+#define mmSDMA2_RLC4_MIDCMD_DATA2 0x02d2
+#define mmSDMA2_RLC4_MIDCMD_DATA2_BASE_IDX 1
+#define mmSDMA2_RLC4_MIDCMD_DATA3 0x02d3
+#define mmSDMA2_RLC4_MIDCMD_DATA3_BASE_IDX 1
+#define mmSDMA2_RLC4_MIDCMD_DATA4 0x02d4
+#define mmSDMA2_RLC4_MIDCMD_DATA4_BASE_IDX 1
+#define mmSDMA2_RLC4_MIDCMD_DATA5 0x02d5
+#define mmSDMA2_RLC4_MIDCMD_DATA5_BASE_IDX 1
+#define mmSDMA2_RLC4_MIDCMD_DATA6 0x02d6
+#define mmSDMA2_RLC4_MIDCMD_DATA6_BASE_IDX 1
+#define mmSDMA2_RLC4_MIDCMD_DATA7 0x02d7
+#define mmSDMA2_RLC4_MIDCMD_DATA7_BASE_IDX 1
+#define mmSDMA2_RLC4_MIDCMD_DATA8 0x02d8
+#define mmSDMA2_RLC4_MIDCMD_DATA8_BASE_IDX 1
+#define mmSDMA2_RLC4_MIDCMD_CNTL 0x02d9
+#define mmSDMA2_RLC4_MIDCMD_CNTL_BASE_IDX 1
+#define mmSDMA2_RLC5_RB_CNTL 0x02e8
+#define mmSDMA2_RLC5_RB_CNTL_BASE_IDX 1
+#define mmSDMA2_RLC5_RB_BASE 0x02e9
+#define mmSDMA2_RLC5_RB_BASE_BASE_IDX 1
+#define mmSDMA2_RLC5_RB_BASE_HI 0x02ea
+#define mmSDMA2_RLC5_RB_BASE_HI_BASE_IDX 1
+#define mmSDMA2_RLC5_RB_RPTR 0x02eb
+#define mmSDMA2_RLC5_RB_RPTR_BASE_IDX 1
+#define mmSDMA2_RLC5_RB_RPTR_HI 0x02ec
+#define mmSDMA2_RLC5_RB_RPTR_HI_BASE_IDX 1
+#define mmSDMA2_RLC5_RB_WPTR 0x02ed
+#define mmSDMA2_RLC5_RB_WPTR_BASE_IDX 1
+#define mmSDMA2_RLC5_RB_WPTR_HI 0x02ee
+#define mmSDMA2_RLC5_RB_WPTR_HI_BASE_IDX 1
+#define mmSDMA2_RLC5_RB_WPTR_POLL_CNTL 0x02ef
+#define mmSDMA2_RLC5_RB_WPTR_POLL_CNTL_BASE_IDX 1
+#define mmSDMA2_RLC5_RB_RPTR_ADDR_HI 0x02f0
+#define mmSDMA2_RLC5_RB_RPTR_ADDR_HI_BASE_IDX 1
+#define mmSDMA2_RLC5_RB_RPTR_ADDR_LO 0x02f1
+#define mmSDMA2_RLC5_RB_RPTR_ADDR_LO_BASE_IDX 1
+#define mmSDMA2_RLC5_IB_CNTL 0x02f2
+#define mmSDMA2_RLC5_IB_CNTL_BASE_IDX 1
+#define mmSDMA2_RLC5_IB_RPTR 0x02f3
+#define mmSDMA2_RLC5_IB_RPTR_BASE_IDX 1
+#define mmSDMA2_RLC5_IB_OFFSET 0x02f4
+#define mmSDMA2_RLC5_IB_OFFSET_BASE_IDX 1
+#define mmSDMA2_RLC5_IB_BASE_LO 0x02f5
+#define mmSDMA2_RLC5_IB_BASE_LO_BASE_IDX 1
+#define mmSDMA2_RLC5_IB_BASE_HI 0x02f6
+#define mmSDMA2_RLC5_IB_BASE_HI_BASE_IDX 1
+#define mmSDMA2_RLC5_IB_SIZE 0x02f7
+#define mmSDMA2_RLC5_IB_SIZE_BASE_IDX 1
+#define mmSDMA2_RLC5_SKIP_CNTL 0x02f8
+#define mmSDMA2_RLC5_SKIP_CNTL_BASE_IDX 1
+#define mmSDMA2_RLC5_CONTEXT_STATUS 0x02f9
+#define mmSDMA2_RLC5_CONTEXT_STATUS_BASE_IDX 1
+#define mmSDMA2_RLC5_DOORBELL 0x02fa
+#define mmSDMA2_RLC5_DOORBELL_BASE_IDX 1
+#define mmSDMA2_RLC5_STATUS 0x0310
+#define mmSDMA2_RLC5_STATUS_BASE_IDX 1
+#define mmSDMA2_RLC5_DOORBELL_LOG 0x0311
+#define mmSDMA2_RLC5_DOORBELL_LOG_BASE_IDX 1
+#define mmSDMA2_RLC5_WATERMARK 0x0312
+#define mmSDMA2_RLC5_WATERMARK_BASE_IDX 1
+#define mmSDMA2_RLC5_DOORBELL_OFFSET 0x0313
+#define mmSDMA2_RLC5_DOORBELL_OFFSET_BASE_IDX 1
+#define mmSDMA2_RLC5_CSA_ADDR_LO 0x0314
+#define mmSDMA2_RLC5_CSA_ADDR_LO_BASE_IDX 1
+#define mmSDMA2_RLC5_CSA_ADDR_HI 0x0315
+#define mmSDMA2_RLC5_CSA_ADDR_HI_BASE_IDX 1
+#define mmSDMA2_RLC5_IB_SUB_REMAIN 0x0317
+#define mmSDMA2_RLC5_IB_SUB_REMAIN_BASE_IDX 1
+#define mmSDMA2_RLC5_PREEMPT 0x0318
+#define mmSDMA2_RLC5_PREEMPT_BASE_IDX 1
+#define mmSDMA2_RLC5_DUMMY_REG 0x0319
+#define mmSDMA2_RLC5_DUMMY_REG_BASE_IDX 1
+#define mmSDMA2_RLC5_RB_WPTR_POLL_ADDR_HI 0x031a
+#define mmSDMA2_RLC5_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1
+#define mmSDMA2_RLC5_RB_WPTR_POLL_ADDR_LO 0x031b
+#define mmSDMA2_RLC5_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1
+#define mmSDMA2_RLC5_RB_AQL_CNTL 0x031c
+#define mmSDMA2_RLC5_RB_AQL_CNTL_BASE_IDX 1
+#define mmSDMA2_RLC5_MINOR_PTR_UPDATE 0x031d
+#define mmSDMA2_RLC5_MINOR_PTR_UPDATE_BASE_IDX 1
+#define mmSDMA2_RLC5_MIDCMD_DATA0 0x0328
+#define mmSDMA2_RLC5_MIDCMD_DATA0_BASE_IDX 1
+#define mmSDMA2_RLC5_MIDCMD_DATA1 0x0329
+#define mmSDMA2_RLC5_MIDCMD_DATA1_BASE_IDX 1
+#define mmSDMA2_RLC5_MIDCMD_DATA2 0x032a
+#define mmSDMA2_RLC5_MIDCMD_DATA2_BASE_IDX 1
+#define mmSDMA2_RLC5_MIDCMD_DATA3 0x032b
+#define mmSDMA2_RLC5_MIDCMD_DATA3_BASE_IDX 1
+#define mmSDMA2_RLC5_MIDCMD_DATA4 0x032c
+#define mmSDMA2_RLC5_MIDCMD_DATA4_BASE_IDX 1
+#define mmSDMA2_RLC5_MIDCMD_DATA5 0x032d
+#define mmSDMA2_RLC5_MIDCMD_DATA5_BASE_IDX 1
+#define mmSDMA2_RLC5_MIDCMD_DATA6 0x032e
+#define mmSDMA2_RLC5_MIDCMD_DATA6_BASE_IDX 1
+#define mmSDMA2_RLC5_MIDCMD_DATA7 0x032f
+#define mmSDMA2_RLC5_MIDCMD_DATA7_BASE_IDX 1
+#define mmSDMA2_RLC5_MIDCMD_DATA8 0x0330
+#define mmSDMA2_RLC5_MIDCMD_DATA8_BASE_IDX 1
+#define mmSDMA2_RLC5_MIDCMD_CNTL 0x0331
+#define mmSDMA2_RLC5_MIDCMD_CNTL_BASE_IDX 1
+#define mmSDMA2_RLC6_RB_CNTL 0x0340
+#define mmSDMA2_RLC6_RB_CNTL_BASE_IDX 1
+#define mmSDMA2_RLC6_RB_BASE 0x0341
+#define mmSDMA2_RLC6_RB_BASE_BASE_IDX 1
+#define mmSDMA2_RLC6_RB_BASE_HI 0x0342
+#define mmSDMA2_RLC6_RB_BASE_HI_BASE_IDX 1
+#define mmSDMA2_RLC6_RB_RPTR 0x0343
+#define mmSDMA2_RLC6_RB_RPTR_BASE_IDX 1
+#define mmSDMA2_RLC6_RB_RPTR_HI 0x0344
+#define mmSDMA2_RLC6_RB_RPTR_HI_BASE_IDX 1
+#define mmSDMA2_RLC6_RB_WPTR 0x0345
+#define mmSDMA2_RLC6_RB_WPTR_BASE_IDX 1
+#define mmSDMA2_RLC6_RB_WPTR_HI 0x0346
+#define mmSDMA2_RLC6_RB_WPTR_HI_BASE_IDX 1
+#define mmSDMA2_RLC6_RB_WPTR_POLL_CNTL 0x0347
+#define mmSDMA2_RLC6_RB_WPTR_POLL_CNTL_BASE_IDX 1
+#define mmSDMA2_RLC6_RB_RPTR_ADDR_HI 0x0348
+#define mmSDMA2_RLC6_RB_RPTR_ADDR_HI_BASE_IDX 1
+#define mmSDMA2_RLC6_RB_RPTR_ADDR_LO 0x0349
+#define mmSDMA2_RLC6_RB_RPTR_ADDR_LO_BASE_IDX 1
+#define mmSDMA2_RLC6_IB_CNTL 0x034a
+#define mmSDMA2_RLC6_IB_CNTL_BASE_IDX 1
+#define mmSDMA2_RLC6_IB_RPTR 0x034b
+#define mmSDMA2_RLC6_IB_RPTR_BASE_IDX 1
+#define mmSDMA2_RLC6_IB_OFFSET 0x034c
+#define mmSDMA2_RLC6_IB_OFFSET_BASE_IDX 1
+#define mmSDMA2_RLC6_IB_BASE_LO 0x034d
+#define mmSDMA2_RLC6_IB_BASE_LO_BASE_IDX 1
+#define mmSDMA2_RLC6_IB_BASE_HI 0x034e
+#define mmSDMA2_RLC6_IB_BASE_HI_BASE_IDX 1
+#define mmSDMA2_RLC6_IB_SIZE 0x034f
+#define mmSDMA2_RLC6_IB_SIZE_BASE_IDX 1
+#define mmSDMA2_RLC6_SKIP_CNTL 0x0350
+#define mmSDMA2_RLC6_SKIP_CNTL_BASE_IDX 1
+#define mmSDMA2_RLC6_CONTEXT_STATUS 0x0351
+#define mmSDMA2_RLC6_CONTEXT_STATUS_BASE_IDX 1
+#define mmSDMA2_RLC6_DOORBELL 0x0352
+#define mmSDMA2_RLC6_DOORBELL_BASE_IDX 1
+#define mmSDMA2_RLC6_STATUS 0x0368
+#define mmSDMA2_RLC6_STATUS_BASE_IDX 1
+#define mmSDMA2_RLC6_DOORBELL_LOG 0x0369
+#define mmSDMA2_RLC6_DOORBELL_LOG_BASE_IDX 1
+#define mmSDMA2_RLC6_WATERMARK 0x036a
+#define mmSDMA2_RLC6_WATERMARK_BASE_IDX 1
+#define mmSDMA2_RLC6_DOORBELL_OFFSET 0x036b
+#define mmSDMA2_RLC6_DOORBELL_OFFSET_BASE_IDX 1
+#define mmSDMA2_RLC6_CSA_ADDR_LO 0x036c
+#define mmSDMA2_RLC6_CSA_ADDR_LO_BASE_IDX 1
+#define mmSDMA2_RLC6_CSA_ADDR_HI 0x036d
+#define mmSDMA2_RLC6_CSA_ADDR_HI_BASE_IDX 1
+#define mmSDMA2_RLC6_IB_SUB_REMAIN 0x036f
+#define mmSDMA2_RLC6_IB_SUB_REMAIN_BASE_IDX 1
+#define mmSDMA2_RLC6_PREEMPT 0x0370
+#define mmSDMA2_RLC6_PREEMPT_BASE_IDX 1
+#define mmSDMA2_RLC6_DUMMY_REG 0x0371
+#define mmSDMA2_RLC6_DUMMY_REG_BASE_IDX 1
+#define mmSDMA2_RLC6_RB_WPTR_POLL_ADDR_HI 0x0372
+#define mmSDMA2_RLC6_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1
+#define mmSDMA2_RLC6_RB_WPTR_POLL_ADDR_LO 0x0373
+#define mmSDMA2_RLC6_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1
+#define mmSDMA2_RLC6_RB_AQL_CNTL 0x0374
+#define mmSDMA2_RLC6_RB_AQL_CNTL_BASE_IDX 1
+#define mmSDMA2_RLC6_MINOR_PTR_UPDATE 0x0375
+#define mmSDMA2_RLC6_MINOR_PTR_UPDATE_BASE_IDX 1
+#define mmSDMA2_RLC6_MIDCMD_DATA0 0x0380
+#define mmSDMA2_RLC6_MIDCMD_DATA0_BASE_IDX 1
+#define mmSDMA2_RLC6_MIDCMD_DATA1 0x0381
+#define mmSDMA2_RLC6_MIDCMD_DATA1_BASE_IDX 1
+#define mmSDMA2_RLC6_MIDCMD_DATA2 0x0382
+#define mmSDMA2_RLC6_MIDCMD_DATA2_BASE_IDX 1
+#define mmSDMA2_RLC6_MIDCMD_DATA3 0x0383
+#define mmSDMA2_RLC6_MIDCMD_DATA3_BASE_IDX 1
+#define mmSDMA2_RLC6_MIDCMD_DATA4 0x0384
+#define mmSDMA2_RLC6_MIDCMD_DATA4_BASE_IDX 1
+#define mmSDMA2_RLC6_MIDCMD_DATA5 0x0385
+#define mmSDMA2_RLC6_MIDCMD_DATA5_BASE_IDX 1
+#define mmSDMA2_RLC6_MIDCMD_DATA6 0x0386
+#define mmSDMA2_RLC6_MIDCMD_DATA6_BASE_IDX 1
+#define mmSDMA2_RLC6_MIDCMD_DATA7 0x0387
+#define mmSDMA2_RLC6_MIDCMD_DATA7_BASE_IDX 1
+#define mmSDMA2_RLC6_MIDCMD_DATA8 0x0388
+#define mmSDMA2_RLC6_MIDCMD_DATA8_BASE_IDX 1
+#define mmSDMA2_RLC6_MIDCMD_CNTL 0x0389
+#define mmSDMA2_RLC6_MIDCMD_CNTL_BASE_IDX 1
+#define mmSDMA2_RLC7_RB_CNTL 0x0398
+#define mmSDMA2_RLC7_RB_CNTL_BASE_IDX 1
+#define mmSDMA2_RLC7_RB_BASE 0x0399
+#define mmSDMA2_RLC7_RB_BASE_BASE_IDX 1
+#define mmSDMA2_RLC7_RB_BASE_HI 0x039a
+#define mmSDMA2_RLC7_RB_BASE_HI_BASE_IDX 1
+#define mmSDMA2_RLC7_RB_RPTR 0x039b
+#define mmSDMA2_RLC7_RB_RPTR_BASE_IDX 1
+#define mmSDMA2_RLC7_RB_RPTR_HI 0x039c
+#define mmSDMA2_RLC7_RB_RPTR_HI_BASE_IDX 1
+#define mmSDMA2_RLC7_RB_WPTR 0x039d
+#define mmSDMA2_RLC7_RB_WPTR_BASE_IDX 1
+#define mmSDMA2_RLC7_RB_WPTR_HI 0x039e
+#define mmSDMA2_RLC7_RB_WPTR_HI_BASE_IDX 1
+#define mmSDMA2_RLC7_RB_WPTR_POLL_CNTL 0x039f
+#define mmSDMA2_RLC7_RB_WPTR_POLL_CNTL_BASE_IDX 1
+#define mmSDMA2_RLC7_RB_RPTR_ADDR_HI 0x03a0
+#define mmSDMA2_RLC7_RB_RPTR_ADDR_HI_BASE_IDX 1
+#define mmSDMA2_RLC7_RB_RPTR_ADDR_LO 0x03a1
+#define mmSDMA2_RLC7_RB_RPTR_ADDR_LO_BASE_IDX 1
+#define mmSDMA2_RLC7_IB_CNTL 0x03a2
+#define mmSDMA2_RLC7_IB_CNTL_BASE_IDX 1
+#define mmSDMA2_RLC7_IB_RPTR 0x03a3
+#define mmSDMA2_RLC7_IB_RPTR_BASE_IDX 1
+#define mmSDMA2_RLC7_IB_OFFSET 0x03a4
+#define mmSDMA2_RLC7_IB_OFFSET_BASE_IDX 1
+#define mmSDMA2_RLC7_IB_BASE_LO 0x03a5
+#define mmSDMA2_RLC7_IB_BASE_LO_BASE_IDX 1
+#define mmSDMA2_RLC7_IB_BASE_HI 0x03a6
+#define mmSDMA2_RLC7_IB_BASE_HI_BASE_IDX 1
+#define mmSDMA2_RLC7_IB_SIZE 0x03a7
+#define mmSDMA2_RLC7_IB_SIZE_BASE_IDX 1
+#define mmSDMA2_RLC7_SKIP_CNTL 0x03a8
+#define mmSDMA2_RLC7_SKIP_CNTL_BASE_IDX 1
+#define mmSDMA2_RLC7_CONTEXT_STATUS 0x03a9
+#define mmSDMA2_RLC7_CONTEXT_STATUS_BASE_IDX 1
+#define mmSDMA2_RLC7_DOORBELL 0x03aa
+#define mmSDMA2_RLC7_DOORBELL_BASE_IDX 1
+#define mmSDMA2_RLC7_STATUS 0x03c0
+#define mmSDMA2_RLC7_STATUS_BASE_IDX 1
+#define mmSDMA2_RLC7_DOORBELL_LOG 0x03c1
+#define mmSDMA2_RLC7_DOORBELL_LOG_BASE_IDX 1
+#define mmSDMA2_RLC7_WATERMARK 0x03c2
+#define mmSDMA2_RLC7_WATERMARK_BASE_IDX 1
+#define mmSDMA2_RLC7_DOORBELL_OFFSET 0x03c3
+#define mmSDMA2_RLC7_DOORBELL_OFFSET_BASE_IDX 1
+#define mmSDMA2_RLC7_CSA_ADDR_LO 0x03c4
+#define mmSDMA2_RLC7_CSA_ADDR_LO_BASE_IDX 1
+#define mmSDMA2_RLC7_CSA_ADDR_HI 0x03c5
+#define mmSDMA2_RLC7_CSA_ADDR_HI_BASE_IDX 1
+#define mmSDMA2_RLC7_IB_SUB_REMAIN 0x03c7
+#define mmSDMA2_RLC7_IB_SUB_REMAIN_BASE_IDX 1
+#define mmSDMA2_RLC7_PREEMPT 0x03c8
+#define mmSDMA2_RLC7_PREEMPT_BASE_IDX 1
+#define mmSDMA2_RLC7_DUMMY_REG 0x03c9
+#define mmSDMA2_RLC7_DUMMY_REG_BASE_IDX 1
+#define mmSDMA2_RLC7_RB_WPTR_POLL_ADDR_HI 0x03ca
+#define mmSDMA2_RLC7_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1
+#define mmSDMA2_RLC7_RB_WPTR_POLL_ADDR_LO 0x03cb
+#define mmSDMA2_RLC7_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1
+#define mmSDMA2_RLC7_RB_AQL_CNTL 0x03cc
+#define mmSDMA2_RLC7_RB_AQL_CNTL_BASE_IDX 1
+#define mmSDMA2_RLC7_MINOR_PTR_UPDATE 0x03cd
+#define mmSDMA2_RLC7_MINOR_PTR_UPDATE_BASE_IDX 1
+#define mmSDMA2_RLC7_MIDCMD_DATA0 0x03d8
+#define mmSDMA2_RLC7_MIDCMD_DATA0_BASE_IDX 1
+#define mmSDMA2_RLC7_MIDCMD_DATA1 0x03d9
+#define mmSDMA2_RLC7_MIDCMD_DATA1_BASE_IDX 1
+#define mmSDMA2_RLC7_MIDCMD_DATA2 0x03da
+#define mmSDMA2_RLC7_MIDCMD_DATA2_BASE_IDX 1
+#define mmSDMA2_RLC7_MIDCMD_DATA3 0x03db
+#define mmSDMA2_RLC7_MIDCMD_DATA3_BASE_IDX 1
+#define mmSDMA2_RLC7_MIDCMD_DATA4 0x03dc
+#define mmSDMA2_RLC7_MIDCMD_DATA4_BASE_IDX 1
+#define mmSDMA2_RLC7_MIDCMD_DATA5 0x03dd
+#define mmSDMA2_RLC7_MIDCMD_DATA5_BASE_IDX 1
+#define mmSDMA2_RLC7_MIDCMD_DATA6 0x03de
+#define mmSDMA2_RLC7_MIDCMD_DATA6_BASE_IDX 1
+#define mmSDMA2_RLC7_MIDCMD_DATA7 0x03df
+#define mmSDMA2_RLC7_MIDCMD_DATA7_BASE_IDX 1
+#define mmSDMA2_RLC7_MIDCMD_DATA8 0x03e0
+#define mmSDMA2_RLC7_MIDCMD_DATA8_BASE_IDX 1
+#define mmSDMA2_RLC7_MIDCMD_CNTL 0x03e1
+#define mmSDMA2_RLC7_MIDCMD_CNTL_BASE_IDX 1
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/sdma2/sdma2_4_2_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/sdma2/sdma2_4_2_2_sh_mask.h
new file mode 100644
index 000000000000..be10d5d3347e
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/sdma2/sdma2_4_2_2_sh_mask.h
@@ -0,0 +1,2956 @@
+/*
+ * Copyright (C) 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _sdma2_4_2_2_SH_MASK_HEADER
+#define _sdma2_4_2_2_SH_MASK_HEADER
+
+
+// addressBlock: sdma2_sdma2dec
+//SDMA2_UCODE_ADDR
+#define SDMA2_UCODE_ADDR__VALUE__SHIFT 0x0
+#define SDMA2_UCODE_ADDR__VALUE_MASK 0x00001FFFL
+//SDMA2_UCODE_DATA
+#define SDMA2_UCODE_DATA__VALUE__SHIFT 0x0
+#define SDMA2_UCODE_DATA__VALUE_MASK 0xFFFFFFFFL
+//SDMA2_VM_CNTL
+#define SDMA2_VM_CNTL__CMD__SHIFT 0x0
+#define SDMA2_VM_CNTL__CMD_MASK 0x0000000FL
+//SDMA2_VM_CTX_LO
+#define SDMA2_VM_CTX_LO__ADDR__SHIFT 0x2
+#define SDMA2_VM_CTX_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA2_VM_CTX_HI
+#define SDMA2_VM_CTX_HI__ADDR__SHIFT 0x0
+#define SDMA2_VM_CTX_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA2_ACTIVE_FCN_ID
+#define SDMA2_ACTIVE_FCN_ID__VFID__SHIFT 0x0
+#define SDMA2_ACTIVE_FCN_ID__RESERVED__SHIFT 0x4
+#define SDMA2_ACTIVE_FCN_ID__VF__SHIFT 0x1f
+#define SDMA2_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL
+#define SDMA2_ACTIVE_FCN_ID__RESERVED_MASK 0x7FFFFFF0L
+#define SDMA2_ACTIVE_FCN_ID__VF_MASK 0x80000000L
+//SDMA2_VM_CTX_CNTL
+#define SDMA2_VM_CTX_CNTL__PRIV__SHIFT 0x0
+#define SDMA2_VM_CTX_CNTL__VMID__SHIFT 0x4
+#define SDMA2_VM_CTX_CNTL__PRIV_MASK 0x00000001L
+#define SDMA2_VM_CTX_CNTL__VMID_MASK 0x000000F0L
+//SDMA2_VIRT_RESET_REQ
+#define SDMA2_VIRT_RESET_REQ__VF__SHIFT 0x0
+#define SDMA2_VIRT_RESET_REQ__PF__SHIFT 0x1f
+#define SDMA2_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL
+#define SDMA2_VIRT_RESET_REQ__PF_MASK 0x80000000L
+//SDMA2_VF_ENABLE
+#define SDMA2_VF_ENABLE__VF_ENABLE__SHIFT 0x0
+#define SDMA2_VF_ENABLE__VF_ENABLE_MASK 0x00000001L
+//SDMA2_CONTEXT_REG_TYPE0
+#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_RB_CNTL__SHIFT 0x0
+#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_RB_BASE__SHIFT 0x1
+#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_RB_BASE_HI__SHIFT 0x2
+#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_RB_RPTR__SHIFT 0x3
+#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_RB_RPTR_HI__SHIFT 0x4
+#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_RB_WPTR__SHIFT 0x5
+#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_RB_WPTR_HI__SHIFT 0x6
+#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_RB_WPTR_POLL_CNTL__SHIFT 0x7
+#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_RB_RPTR_ADDR_HI__SHIFT 0x8
+#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_RB_RPTR_ADDR_LO__SHIFT 0x9
+#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_IB_CNTL__SHIFT 0xa
+#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_IB_RPTR__SHIFT 0xb
+#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_IB_OFFSET__SHIFT 0xc
+#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_IB_BASE_LO__SHIFT 0xd
+#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_IB_BASE_HI__SHIFT 0xe
+#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_IB_SIZE__SHIFT 0xf
+#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_SKIP_CNTL__SHIFT 0x10
+#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_CONTEXT_STATUS__SHIFT 0x11
+#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_DOORBELL__SHIFT 0x12
+#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_CONTEXT_CNTL__SHIFT 0x13
+#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_RB_CNTL_MASK 0x00000001L
+#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_RB_BASE_MASK 0x00000002L
+#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_RB_BASE_HI_MASK 0x00000004L
+#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_RB_RPTR_MASK 0x00000008L
+#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_RB_RPTR_HI_MASK 0x00000010L
+#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_RB_WPTR_MASK 0x00000020L
+#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_RB_WPTR_HI_MASK 0x00000040L
+#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_RB_WPTR_POLL_CNTL_MASK 0x00000080L
+#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_RB_RPTR_ADDR_HI_MASK 0x00000100L
+#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_RB_RPTR_ADDR_LO_MASK 0x00000200L
+#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_IB_CNTL_MASK 0x00000400L
+#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_IB_RPTR_MASK 0x00000800L
+#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_IB_OFFSET_MASK 0x00001000L
+#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_IB_BASE_LO_MASK 0x00002000L
+#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_IB_BASE_HI_MASK 0x00004000L
+#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_IB_SIZE_MASK 0x00008000L
+#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_SKIP_CNTL_MASK 0x00010000L
+#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_CONTEXT_STATUS_MASK 0x00020000L
+#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_DOORBELL_MASK 0x00040000L
+#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_CONTEXT_CNTL_MASK 0x00080000L
+//SDMA2_CONTEXT_REG_TYPE1
+#define SDMA2_CONTEXT_REG_TYPE1__SDMA2_GFX_STATUS__SHIFT 0x8
+#define SDMA2_CONTEXT_REG_TYPE1__SDMA2_GFX_DOORBELL_LOG__SHIFT 0x9
+#define SDMA2_CONTEXT_REG_TYPE1__SDMA2_GFX_WATERMARK__SHIFT 0xa
+#define SDMA2_CONTEXT_REG_TYPE1__SDMA2_GFX_DOORBELL_OFFSET__SHIFT 0xb
+#define SDMA2_CONTEXT_REG_TYPE1__SDMA2_GFX_CSA_ADDR_LO__SHIFT 0xc
+#define SDMA2_CONTEXT_REG_TYPE1__SDMA2_GFX_CSA_ADDR_HI__SHIFT 0xd
+#define SDMA2_CONTEXT_REG_TYPE1__VOID_REG2__SHIFT 0xe
+#define SDMA2_CONTEXT_REG_TYPE1__SDMA2_GFX_IB_SUB_REMAIN__SHIFT 0xf
+#define SDMA2_CONTEXT_REG_TYPE1__SDMA2_GFX_PREEMPT__SHIFT 0x10
+#define SDMA2_CONTEXT_REG_TYPE1__SDMA2_GFX_DUMMY_REG__SHIFT 0x11
+#define SDMA2_CONTEXT_REG_TYPE1__SDMA2_GFX_RB_WPTR_POLL_ADDR_HI__SHIFT 0x12
+#define SDMA2_CONTEXT_REG_TYPE1__SDMA2_GFX_RB_WPTR_POLL_ADDR_LO__SHIFT 0x13
+#define SDMA2_CONTEXT_REG_TYPE1__SDMA2_GFX_RB_AQL_CNTL__SHIFT 0x14
+#define SDMA2_CONTEXT_REG_TYPE1__SDMA2_GFX_MINOR_PTR_UPDATE__SHIFT 0x15
+#define SDMA2_CONTEXT_REG_TYPE1__RESERVED__SHIFT 0x16
+#define SDMA2_CONTEXT_REG_TYPE1__SDMA2_GFX_STATUS_MASK 0x00000100L
+#define SDMA2_CONTEXT_REG_TYPE1__SDMA2_GFX_DOORBELL_LOG_MASK 0x00000200L
+#define SDMA2_CONTEXT_REG_TYPE1__SDMA2_GFX_WATERMARK_MASK 0x00000400L
+#define SDMA2_CONTEXT_REG_TYPE1__SDMA2_GFX_DOORBELL_OFFSET_MASK 0x00000800L
+#define SDMA2_CONTEXT_REG_TYPE1__SDMA2_GFX_CSA_ADDR_LO_MASK 0x00001000L
+#define SDMA2_CONTEXT_REG_TYPE1__SDMA2_GFX_CSA_ADDR_HI_MASK 0x00002000L
+#define SDMA2_CONTEXT_REG_TYPE1__VOID_REG2_MASK 0x00004000L
+#define SDMA2_CONTEXT_REG_TYPE1__SDMA2_GFX_IB_SUB_REMAIN_MASK 0x00008000L
+#define SDMA2_CONTEXT_REG_TYPE1__SDMA2_GFX_PREEMPT_MASK 0x00010000L
+#define SDMA2_CONTEXT_REG_TYPE1__SDMA2_GFX_DUMMY_REG_MASK 0x00020000L
+#define SDMA2_CONTEXT_REG_TYPE1__SDMA2_GFX_RB_WPTR_POLL_ADDR_HI_MASK 0x00040000L
+#define SDMA2_CONTEXT_REG_TYPE1__SDMA2_GFX_RB_WPTR_POLL_ADDR_LO_MASK 0x00080000L
+#define SDMA2_CONTEXT_REG_TYPE1__SDMA2_GFX_RB_AQL_CNTL_MASK 0x00100000L
+#define SDMA2_CONTEXT_REG_TYPE1__SDMA2_GFX_MINOR_PTR_UPDATE_MASK 0x00200000L
+#define SDMA2_CONTEXT_REG_TYPE1__RESERVED_MASK 0xFFC00000L
+//SDMA2_CONTEXT_REG_TYPE2
+#define SDMA2_CONTEXT_REG_TYPE2__SDMA2_GFX_MIDCMD_DATA0__SHIFT 0x0
+#define SDMA2_CONTEXT_REG_TYPE2__SDMA2_GFX_MIDCMD_DATA1__SHIFT 0x1
+#define SDMA2_CONTEXT_REG_TYPE2__SDMA2_GFX_MIDCMD_DATA2__SHIFT 0x2
+#define SDMA2_CONTEXT_REG_TYPE2__SDMA2_GFX_MIDCMD_DATA3__SHIFT 0x3
+#define SDMA2_CONTEXT_REG_TYPE2__SDMA2_GFX_MIDCMD_DATA4__SHIFT 0x4
+#define SDMA2_CONTEXT_REG_TYPE2__SDMA2_GFX_MIDCMD_DATA5__SHIFT 0x5
+#define SDMA2_CONTEXT_REG_TYPE2__SDMA2_GFX_MIDCMD_DATA6__SHIFT 0x6
+#define SDMA2_CONTEXT_REG_TYPE2__SDMA2_GFX_MIDCMD_DATA7__SHIFT 0x7
+#define SDMA2_CONTEXT_REG_TYPE2__SDMA2_GFX_MIDCMD_DATA8__SHIFT 0x8
+#define SDMA2_CONTEXT_REG_TYPE2__SDMA2_GFX_MIDCMD_CNTL__SHIFT 0x9
+#define SDMA2_CONTEXT_REG_TYPE2__RESERVED__SHIFT 0xa
+#define SDMA2_CONTEXT_REG_TYPE2__SDMA2_GFX_MIDCMD_DATA0_MASK 0x00000001L
+#define SDMA2_CONTEXT_REG_TYPE2__SDMA2_GFX_MIDCMD_DATA1_MASK 0x00000002L
+#define SDMA2_CONTEXT_REG_TYPE2__SDMA2_GFX_MIDCMD_DATA2_MASK 0x00000004L
+#define SDMA2_CONTEXT_REG_TYPE2__SDMA2_GFX_MIDCMD_DATA3_MASK 0x00000008L
+#define SDMA2_CONTEXT_REG_TYPE2__SDMA2_GFX_MIDCMD_DATA4_MASK 0x00000010L
+#define SDMA2_CONTEXT_REG_TYPE2__SDMA2_GFX_MIDCMD_DATA5_MASK 0x00000020L
+#define SDMA2_CONTEXT_REG_TYPE2__SDMA2_GFX_MIDCMD_DATA6_MASK 0x00000040L
+#define SDMA2_CONTEXT_REG_TYPE2__SDMA2_GFX_MIDCMD_DATA7_MASK 0x00000080L
+#define SDMA2_CONTEXT_REG_TYPE2__SDMA2_GFX_MIDCMD_DATA8_MASK 0x00000100L
+#define SDMA2_CONTEXT_REG_TYPE2__SDMA2_GFX_MIDCMD_CNTL_MASK 0x00000200L
+#define SDMA2_CONTEXT_REG_TYPE2__RESERVED_MASK 0xFFFFFC00L
+//SDMA2_CONTEXT_REG_TYPE3
+#define SDMA2_CONTEXT_REG_TYPE3__RESERVED__SHIFT 0x0
+#define SDMA2_CONTEXT_REG_TYPE3__RESERVED_MASK 0xFFFFFFFFL
+//SDMA2_PUB_REG_TYPE0
+#define SDMA2_PUB_REG_TYPE0__SDMA2_UCODE_ADDR__SHIFT 0x0
+#define SDMA2_PUB_REG_TYPE0__SDMA2_UCODE_DATA__SHIFT 0x1
+#define SDMA2_PUB_REG_TYPE0__RESERVED3__SHIFT 0x3
+#define SDMA2_PUB_REG_TYPE0__SDMA2_VM_CNTL__SHIFT 0x4
+#define SDMA2_PUB_REG_TYPE0__SDMA2_VM_CTX_LO__SHIFT 0x5
+#define SDMA2_PUB_REG_TYPE0__SDMA2_VM_CTX_HI__SHIFT 0x6
+#define SDMA2_PUB_REG_TYPE0__SDMA2_ACTIVE_FCN_ID__SHIFT 0x7
+#define SDMA2_PUB_REG_TYPE0__SDMA2_VM_CTX_CNTL__SHIFT 0x8
+#define SDMA2_PUB_REG_TYPE0__SDMA2_VIRT_RESET_REQ__SHIFT 0x9
+#define SDMA2_PUB_REG_TYPE0__RESERVED10__SHIFT 0xa
+#define SDMA2_PUB_REG_TYPE0__SDMA2_CONTEXT_REG_TYPE0__SHIFT 0xb
+#define SDMA2_PUB_REG_TYPE0__SDMA2_CONTEXT_REG_TYPE1__SHIFT 0xc
+#define SDMA2_PUB_REG_TYPE0__SDMA2_CONTEXT_REG_TYPE2__SHIFT 0xd
+#define SDMA2_PUB_REG_TYPE0__SDMA2_CONTEXT_REG_TYPE3__SHIFT 0xe
+#define SDMA2_PUB_REG_TYPE0__SDMA2_PUB_REG_TYPE0__SHIFT 0xf
+#define SDMA2_PUB_REG_TYPE0__SDMA2_PUB_REG_TYPE1__SHIFT 0x10
+#define SDMA2_PUB_REG_TYPE0__SDMA2_PUB_REG_TYPE2__SHIFT 0x11
+#define SDMA2_PUB_REG_TYPE0__SDMA2_PUB_REG_TYPE3__SHIFT 0x12
+#define SDMA2_PUB_REG_TYPE0__SDMA2_MMHUB_CNTL__SHIFT 0x13
+#define SDMA2_PUB_REG_TYPE0__RESERVED_FOR_PSPSMU_ACCESS_ONLY__SHIFT 0x15
+#define SDMA2_PUB_REG_TYPE0__SDMA2_CONTEXT_GROUP_BOUNDARY__SHIFT 0x19
+#define SDMA2_PUB_REG_TYPE0__SDMA2_POWER_CNTL__SHIFT 0x1a
+#define SDMA2_PUB_REG_TYPE0__SDMA2_CLK_CTRL__SHIFT 0x1b
+#define SDMA2_PUB_REG_TYPE0__SDMA2_CNTL__SHIFT 0x1c
+#define SDMA2_PUB_REG_TYPE0__SDMA2_CHICKEN_BITS__SHIFT 0x1d
+#define SDMA2_PUB_REG_TYPE0__SDMA2_GB_ADDR_CONFIG__SHIFT 0x1e
+#define SDMA2_PUB_REG_TYPE0__SDMA2_GB_ADDR_CONFIG_READ__SHIFT 0x1f
+#define SDMA2_PUB_REG_TYPE0__SDMA2_UCODE_ADDR_MASK 0x00000001L
+#define SDMA2_PUB_REG_TYPE0__SDMA2_UCODE_DATA_MASK 0x00000002L
+#define SDMA2_PUB_REG_TYPE0__RESERVED3_MASK 0x00000008L
+#define SDMA2_PUB_REG_TYPE0__SDMA2_VM_CNTL_MASK 0x00000010L
+#define SDMA2_PUB_REG_TYPE0__SDMA2_VM_CTX_LO_MASK 0x00000020L
+#define SDMA2_PUB_REG_TYPE0__SDMA2_VM_CTX_HI_MASK 0x00000040L
+#define SDMA2_PUB_REG_TYPE0__SDMA2_ACTIVE_FCN_ID_MASK 0x00000080L
+#define SDMA2_PUB_REG_TYPE0__SDMA2_VM_CTX_CNTL_MASK 0x00000100L
+#define SDMA2_PUB_REG_TYPE0__SDMA2_VIRT_RESET_REQ_MASK 0x00000200L
+#define SDMA2_PUB_REG_TYPE0__RESERVED10_MASK 0x00000400L
+#define SDMA2_PUB_REG_TYPE0__SDMA2_CONTEXT_REG_TYPE0_MASK 0x00000800L
+#define SDMA2_PUB_REG_TYPE0__SDMA2_CONTEXT_REG_TYPE1_MASK 0x00001000L
+#define SDMA2_PUB_REG_TYPE0__SDMA2_CONTEXT_REG_TYPE2_MASK 0x00002000L
+#define SDMA2_PUB_REG_TYPE0__SDMA2_CONTEXT_REG_TYPE3_MASK 0x00004000L
+#define SDMA2_PUB_REG_TYPE0__SDMA2_PUB_REG_TYPE0_MASK 0x00008000L
+#define SDMA2_PUB_REG_TYPE0__SDMA2_PUB_REG_TYPE1_MASK 0x00010000L
+#define SDMA2_PUB_REG_TYPE0__SDMA2_PUB_REG_TYPE2_MASK 0x00020000L
+#define SDMA2_PUB_REG_TYPE0__SDMA2_PUB_REG_TYPE3_MASK 0x00040000L
+#define SDMA2_PUB_REG_TYPE0__SDMA2_MMHUB_CNTL_MASK 0x00080000L
+#define SDMA2_PUB_REG_TYPE0__RESERVED_FOR_PSPSMU_ACCESS_ONLY_MASK 0x01E00000L
+#define SDMA2_PUB_REG_TYPE0__SDMA2_CONTEXT_GROUP_BOUNDARY_MASK 0x02000000L
+#define SDMA2_PUB_REG_TYPE0__SDMA2_POWER_CNTL_MASK 0x04000000L
+#define SDMA2_PUB_REG_TYPE0__SDMA2_CLK_CTRL_MASK 0x08000000L
+#define SDMA2_PUB_REG_TYPE0__SDMA2_CNTL_MASK 0x10000000L
+#define SDMA2_PUB_REG_TYPE0__SDMA2_CHICKEN_BITS_MASK 0x20000000L
+#define SDMA2_PUB_REG_TYPE0__SDMA2_GB_ADDR_CONFIG_MASK 0x40000000L
+#define SDMA2_PUB_REG_TYPE0__SDMA2_GB_ADDR_CONFIG_READ_MASK 0x80000000L
+//SDMA2_PUB_REG_TYPE1
+#define SDMA2_PUB_REG_TYPE1__SDMA2_RB_RPTR_FETCH_HI__SHIFT 0x0
+#define SDMA2_PUB_REG_TYPE1__SDMA2_SEM_WAIT_FAIL_TIMER_CNTL__SHIFT 0x1
+#define SDMA2_PUB_REG_TYPE1__SDMA2_RB_RPTR_FETCH__SHIFT 0x2
+#define SDMA2_PUB_REG_TYPE1__SDMA2_IB_OFFSET_FETCH__SHIFT 0x3
+#define SDMA2_PUB_REG_TYPE1__SDMA2_PROGRAM__SHIFT 0x4
+#define SDMA2_PUB_REG_TYPE1__SDMA2_STATUS_REG__SHIFT 0x5
+#define SDMA2_PUB_REG_TYPE1__SDMA2_STATUS1_REG__SHIFT 0x6
+#define SDMA2_PUB_REG_TYPE1__SDMA2_RD_BURST_CNTL__SHIFT 0x7
+#define SDMA2_PUB_REG_TYPE1__SDMA2_HBM_PAGE_CONFIG__SHIFT 0x8
+#define SDMA2_PUB_REG_TYPE1__SDMA2_UCODE_CHECKSUM__SHIFT 0x9
+#define SDMA2_PUB_REG_TYPE1__SDMA2_F32_CNTL__SHIFT 0xa
+#define SDMA2_PUB_REG_TYPE1__SDMA2_FREEZE__SHIFT 0xb
+#define SDMA2_PUB_REG_TYPE1__SDMA2_PHASE0_QUANTUM__SHIFT 0xc
+#define SDMA2_PUB_REG_TYPE1__SDMA2_PHASE1_QUANTUM__SHIFT 0xd
+#define SDMA2_PUB_REG_TYPE1__SDMA_POWER_GATING__SHIFT 0xe
+#define SDMA2_PUB_REG_TYPE1__SDMA_PGFSM_CONFIG__SHIFT 0xf
+#define SDMA2_PUB_REG_TYPE1__SDMA_PGFSM_WRITE__SHIFT 0x10
+#define SDMA2_PUB_REG_TYPE1__SDMA_PGFSM_READ__SHIFT 0x11
+#define SDMA2_PUB_REG_TYPE1__SDMA2_EDC_CONFIG__SHIFT 0x12
+#define SDMA2_PUB_REG_TYPE1__SDMA2_BA_THRESHOLD__SHIFT 0x13
+#define SDMA2_PUB_REG_TYPE1__SDMA2_ID__SHIFT 0x14
+#define SDMA2_PUB_REG_TYPE1__SDMA2_VERSION__SHIFT 0x15
+#define SDMA2_PUB_REG_TYPE1__SDMA2_EDC_COUNTER__SHIFT 0x16
+#define SDMA2_PUB_REG_TYPE1__SDMA2_EDC_COUNTER_CLEAR__SHIFT 0x17
+#define SDMA2_PUB_REG_TYPE1__SDMA2_STATUS2_REG__SHIFT 0x18
+#define SDMA2_PUB_REG_TYPE1__SDMA2_ATOMIC_CNTL__SHIFT 0x19
+#define SDMA2_PUB_REG_TYPE1__SDMA2_ATOMIC_PREOP_LO__SHIFT 0x1a
+#define SDMA2_PUB_REG_TYPE1__SDMA2_ATOMIC_PREOP_HI__SHIFT 0x1b
+#define SDMA2_PUB_REG_TYPE1__SDMA2_UTCL1_CNTL__SHIFT 0x1c
+#define SDMA2_PUB_REG_TYPE1__SDMA2_UTCL1_WATERMK__SHIFT 0x1d
+#define SDMA2_PUB_REG_TYPE1__SDMA2_UTCL1_RD_STATUS__SHIFT 0x1e
+#define SDMA2_PUB_REG_TYPE1__SDMA2_UTCL1_WR_STATUS__SHIFT 0x1f
+#define SDMA2_PUB_REG_TYPE1__SDMA2_RB_RPTR_FETCH_HI_MASK 0x00000001L
+#define SDMA2_PUB_REG_TYPE1__SDMA2_SEM_WAIT_FAIL_TIMER_CNTL_MASK 0x00000002L
+#define SDMA2_PUB_REG_TYPE1__SDMA2_RB_RPTR_FETCH_MASK 0x00000004L
+#define SDMA2_PUB_REG_TYPE1__SDMA2_IB_OFFSET_FETCH_MASK 0x00000008L
+#define SDMA2_PUB_REG_TYPE1__SDMA2_PROGRAM_MASK 0x00000010L
+#define SDMA2_PUB_REG_TYPE1__SDMA2_STATUS_REG_MASK 0x00000020L
+#define SDMA2_PUB_REG_TYPE1__SDMA2_STATUS1_REG_MASK 0x00000040L
+#define SDMA2_PUB_REG_TYPE1__SDMA2_RD_BURST_CNTL_MASK 0x00000080L
+#define SDMA2_PUB_REG_TYPE1__SDMA2_HBM_PAGE_CONFIG_MASK 0x00000100L
+#define SDMA2_PUB_REG_TYPE1__SDMA2_UCODE_CHECKSUM_MASK 0x00000200L
+#define SDMA2_PUB_REG_TYPE1__SDMA2_F32_CNTL_MASK 0x00000400L
+#define SDMA2_PUB_REG_TYPE1__SDMA2_FREEZE_MASK 0x00000800L
+#define SDMA2_PUB_REG_TYPE1__SDMA2_PHASE0_QUANTUM_MASK 0x00001000L
+#define SDMA2_PUB_REG_TYPE1__SDMA2_PHASE1_QUANTUM_MASK 0x00002000L
+#define SDMA2_PUB_REG_TYPE1__SDMA_POWER_GATING_MASK 0x00004000L
+#define SDMA2_PUB_REG_TYPE1__SDMA_PGFSM_CONFIG_MASK 0x00008000L
+#define SDMA2_PUB_REG_TYPE1__SDMA_PGFSM_WRITE_MASK 0x00010000L
+#define SDMA2_PUB_REG_TYPE1__SDMA_PGFSM_READ_MASK 0x00020000L
+#define SDMA2_PUB_REG_TYPE1__SDMA2_EDC_CONFIG_MASK 0x00040000L
+#define SDMA2_PUB_REG_TYPE1__SDMA2_BA_THRESHOLD_MASK 0x00080000L
+#define SDMA2_PUB_REG_TYPE1__SDMA2_ID_MASK 0x00100000L
+#define SDMA2_PUB_REG_TYPE1__SDMA2_VERSION_MASK 0x00200000L
+#define SDMA2_PUB_REG_TYPE1__SDMA2_EDC_COUNTER_MASK 0x00400000L
+#define SDMA2_PUB_REG_TYPE1__SDMA2_EDC_COUNTER_CLEAR_MASK 0x00800000L
+#define SDMA2_PUB_REG_TYPE1__SDMA2_STATUS2_REG_MASK 0x01000000L
+#define SDMA2_PUB_REG_TYPE1__SDMA2_ATOMIC_CNTL_MASK 0x02000000L
+#define SDMA2_PUB_REG_TYPE1__SDMA2_ATOMIC_PREOP_LO_MASK 0x04000000L
+#define SDMA2_PUB_REG_TYPE1__SDMA2_ATOMIC_PREOP_HI_MASK 0x08000000L
+#define SDMA2_PUB_REG_TYPE1__SDMA2_UTCL1_CNTL_MASK 0x10000000L
+#define SDMA2_PUB_REG_TYPE1__SDMA2_UTCL1_WATERMK_MASK 0x20000000L
+#define SDMA2_PUB_REG_TYPE1__SDMA2_UTCL1_RD_STATUS_MASK 0x40000000L
+#define SDMA2_PUB_REG_TYPE1__SDMA2_UTCL1_WR_STATUS_MASK 0x80000000L
+//SDMA2_PUB_REG_TYPE2
+#define SDMA2_PUB_REG_TYPE2__SDMA2_UTCL1_INV0__SHIFT 0x0
+#define SDMA2_PUB_REG_TYPE2__SDMA2_UTCL1_INV1__SHIFT 0x1
+#define SDMA2_PUB_REG_TYPE2__SDMA2_UTCL1_INV2__SHIFT 0x2
+#define SDMA2_PUB_REG_TYPE2__SDMA2_UTCL1_RD_XNACK0__SHIFT 0x3
+#define SDMA2_PUB_REG_TYPE2__SDMA2_UTCL1_RD_XNACK1__SHIFT 0x4
+#define SDMA2_PUB_REG_TYPE2__SDMA2_UTCL1_WR_XNACK0__SHIFT 0x5
+#define SDMA2_PUB_REG_TYPE2__SDMA2_UTCL1_WR_XNACK1__SHIFT 0x6
+#define SDMA2_PUB_REG_TYPE2__SDMA2_UTCL1_TIMEOUT__SHIFT 0x7
+#define SDMA2_PUB_REG_TYPE2__SDMA2_UTCL1_PAGE__SHIFT 0x8
+#define SDMA2_PUB_REG_TYPE2__SDMA2_POWER_CNTL_IDLE__SHIFT 0x9
+#define SDMA2_PUB_REG_TYPE2__SDMA2_RELAX_ORDERING_LUT__SHIFT 0xa
+#define SDMA2_PUB_REG_TYPE2__SDMA2_CHICKEN_BITS_2__SHIFT 0xb
+#define SDMA2_PUB_REG_TYPE2__SDMA2_STATUS3_REG__SHIFT 0xc
+#define SDMA2_PUB_REG_TYPE2__SDMA2_PHYSICAL_ADDR_LO__SHIFT 0xd
+#define SDMA2_PUB_REG_TYPE2__SDMA2_PHYSICAL_ADDR_HI__SHIFT 0xe
+#define SDMA2_PUB_REG_TYPE2__SDMA2_PHASE2_QUANTUM__SHIFT 0xf
+#define SDMA2_PUB_REG_TYPE2__SDMA2_ERROR_LOG__SHIFT 0x10
+#define SDMA2_PUB_REG_TYPE2__SDMA2_PUB_DUMMY_REG0__SHIFT 0x11
+#define SDMA2_PUB_REG_TYPE2__SDMA2_PUB_DUMMY_REG1__SHIFT 0x12
+#define SDMA2_PUB_REG_TYPE2__SDMA2_PUB_DUMMY_REG2__SHIFT 0x13
+#define SDMA2_PUB_REG_TYPE2__SDMA2_PUB_DUMMY_REG3__SHIFT 0x14
+#define SDMA2_PUB_REG_TYPE2__SDMA2_F32_COUNTER__SHIFT 0x15
+#define SDMA2_PUB_REG_TYPE2__SDMA2_UNBREAKABLE__SHIFT 0x16
+#define SDMA2_PUB_REG_TYPE2__SDMA2_PERFMON_CNTL__SHIFT 0x17
+#define SDMA2_PUB_REG_TYPE2__SDMA2_PERFCOUNTER0_RESULT__SHIFT 0x18
+#define SDMA2_PUB_REG_TYPE2__SDMA2_PERFCOUNTER1_RESULT__SHIFT 0x19
+#define SDMA2_PUB_REG_TYPE2__SDMA2_PERFCOUNTER_TAG_DELAY_RANGE__SHIFT 0x1a
+#define SDMA2_PUB_REG_TYPE2__SDMA2_CRD_CNTL__SHIFT 0x1b
+#define SDMA2_PUB_REG_TYPE2__RESERVED28__SHIFT 0x1c
+#define SDMA2_PUB_REG_TYPE2__SDMA2_GPU_IOV_VIOLATION_LOG__SHIFT 0x1d
+#define SDMA2_PUB_REG_TYPE2__SDMA2_ULV_CNTL__SHIFT 0x1e
+#define SDMA2_PUB_REG_TYPE2__RESERVED__SHIFT 0x1f
+#define SDMA2_PUB_REG_TYPE2__SDMA2_UTCL1_INV0_MASK 0x00000001L
+#define SDMA2_PUB_REG_TYPE2__SDMA2_UTCL1_INV1_MASK 0x00000002L
+#define SDMA2_PUB_REG_TYPE2__SDMA2_UTCL1_INV2_MASK 0x00000004L
+#define SDMA2_PUB_REG_TYPE2__SDMA2_UTCL1_RD_XNACK0_MASK 0x00000008L
+#define SDMA2_PUB_REG_TYPE2__SDMA2_UTCL1_RD_XNACK1_MASK 0x00000010L
+#define SDMA2_PUB_REG_TYPE2__SDMA2_UTCL1_WR_XNACK0_MASK 0x00000020L
+#define SDMA2_PUB_REG_TYPE2__SDMA2_UTCL1_WR_XNACK1_MASK 0x00000040L
+#define SDMA2_PUB_REG_TYPE2__SDMA2_UTCL1_TIMEOUT_MASK 0x00000080L
+#define SDMA2_PUB_REG_TYPE2__SDMA2_UTCL1_PAGE_MASK 0x00000100L
+#define SDMA2_PUB_REG_TYPE2__SDMA2_POWER_CNTL_IDLE_MASK 0x00000200L
+#define SDMA2_PUB_REG_TYPE2__SDMA2_RELAX_ORDERING_LUT_MASK 0x00000400L
+#define SDMA2_PUB_REG_TYPE2__SDMA2_CHICKEN_BITS_2_MASK 0x00000800L
+#define SDMA2_PUB_REG_TYPE2__SDMA2_STATUS3_REG_MASK 0x00001000L
+#define SDMA2_PUB_REG_TYPE2__SDMA2_PHYSICAL_ADDR_LO_MASK 0x00002000L
+#define SDMA2_PUB_REG_TYPE2__SDMA2_PHYSICAL_ADDR_HI_MASK 0x00004000L
+#define SDMA2_PUB_REG_TYPE2__SDMA2_PHASE2_QUANTUM_MASK 0x00008000L
+#define SDMA2_PUB_REG_TYPE2__SDMA2_ERROR_LOG_MASK 0x00010000L
+#define SDMA2_PUB_REG_TYPE2__SDMA2_PUB_DUMMY_REG0_MASK 0x00020000L
+#define SDMA2_PUB_REG_TYPE2__SDMA2_PUB_DUMMY_REG1_MASK 0x00040000L
+#define SDMA2_PUB_REG_TYPE2__SDMA2_PUB_DUMMY_REG2_MASK 0x00080000L
+#define SDMA2_PUB_REG_TYPE2__SDMA2_PUB_DUMMY_REG3_MASK 0x00100000L
+#define SDMA2_PUB_REG_TYPE2__SDMA2_F32_COUNTER_MASK 0x00200000L
+#define SDMA2_PUB_REG_TYPE2__SDMA2_UNBREAKABLE_MASK 0x00400000L
+#define SDMA2_PUB_REG_TYPE2__SDMA2_PERFMON_CNTL_MASK 0x00800000L
+#define SDMA2_PUB_REG_TYPE2__SDMA2_PERFCOUNTER0_RESULT_MASK 0x01000000L
+#define SDMA2_PUB_REG_TYPE2__SDMA2_PERFCOUNTER1_RESULT_MASK 0x02000000L
+#define SDMA2_PUB_REG_TYPE2__SDMA2_PERFCOUNTER_TAG_DELAY_RANGE_MASK 0x04000000L
+#define SDMA2_PUB_REG_TYPE2__SDMA2_CRD_CNTL_MASK 0x08000000L
+#define SDMA2_PUB_REG_TYPE2__RESERVED28_MASK 0x10000000L
+#define SDMA2_PUB_REG_TYPE2__SDMA2_GPU_IOV_VIOLATION_LOG_MASK 0x20000000L
+#define SDMA2_PUB_REG_TYPE2__SDMA2_ULV_CNTL_MASK 0x40000000L
+#define SDMA2_PUB_REG_TYPE2__RESERVED_MASK 0x80000000L
+//SDMA2_PUB_REG_TYPE3
+#define SDMA2_PUB_REG_TYPE3__SDMA2_EA_DBIT_ADDR_DATA__SHIFT 0x0
+#define SDMA2_PUB_REG_TYPE3__SDMA2_EA_DBIT_ADDR_INDEX__SHIFT 0x1
+#define SDMA2_PUB_REG_TYPE3__SDMA2_GPU_IOV_VIOLATION_LOG2__SHIFT 0x2
+#define SDMA2_PUB_REG_TYPE3__RESERVED__SHIFT 0x3
+#define SDMA2_PUB_REG_TYPE3__SDMA2_EA_DBIT_ADDR_DATA_MASK 0x00000001L
+#define SDMA2_PUB_REG_TYPE3__SDMA2_EA_DBIT_ADDR_INDEX_MASK 0x00000002L
+#define SDMA2_PUB_REG_TYPE3__SDMA2_GPU_IOV_VIOLATION_LOG2_MASK 0x00000004L
+#define SDMA2_PUB_REG_TYPE3__RESERVED_MASK 0xFFFFFFF8L
+//SDMA2_MMHUB_CNTL
+#define SDMA2_MMHUB_CNTL__UNIT_ID__SHIFT 0x0
+#define SDMA2_MMHUB_CNTL__UNIT_ID_MASK 0x0000003FL
+//SDMA2_CONTEXT_GROUP_BOUNDARY
+#define SDMA2_CONTEXT_GROUP_BOUNDARY__RESERVED__SHIFT 0x0
+#define SDMA2_CONTEXT_GROUP_BOUNDARY__RESERVED_MASK 0xFFFFFFFFL
+//SDMA2_POWER_CNTL
+#define SDMA2_POWER_CNTL__MEM_POWER_OVERRIDE__SHIFT 0x8
+#define SDMA2_POWER_CNTL__MEM_POWER_LS_EN__SHIFT 0x9
+#define SDMA2_POWER_CNTL__MEM_POWER_DS_EN__SHIFT 0xa
+#define SDMA2_POWER_CNTL__MEM_POWER_SD_EN__SHIFT 0xb
+#define SDMA2_POWER_CNTL__MEM_POWER_DELAY__SHIFT 0xc
+#define SDMA2_POWER_CNTL__MEM_POWER_OVERRIDE_MASK 0x00000100L
+#define SDMA2_POWER_CNTL__MEM_POWER_LS_EN_MASK 0x00000200L
+#define SDMA2_POWER_CNTL__MEM_POWER_DS_EN_MASK 0x00000400L
+#define SDMA2_POWER_CNTL__MEM_POWER_SD_EN_MASK 0x00000800L
+#define SDMA2_POWER_CNTL__MEM_POWER_DELAY_MASK 0x003FF000L
+//SDMA2_CLK_CTRL
+#define SDMA2_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define SDMA2_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define SDMA2_CLK_CTRL__RESERVED__SHIFT 0xc
+#define SDMA2_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define SDMA2_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define SDMA2_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define SDMA2_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define SDMA2_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define SDMA2_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define SDMA2_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define SDMA2_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define SDMA2_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define SDMA2_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define SDMA2_CLK_CTRL__RESERVED_MASK 0x00FFF000L
+#define SDMA2_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define SDMA2_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define SDMA2_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define SDMA2_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define SDMA2_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define SDMA2_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define SDMA2_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define SDMA2_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//SDMA2_CNTL
+#define SDMA2_CNTL__TRAP_ENABLE__SHIFT 0x0
+#define SDMA2_CNTL__UTC_L1_ENABLE__SHIFT 0x1
+#define SDMA2_CNTL__SEM_WAIT_INT_ENABLE__SHIFT 0x2
+#define SDMA2_CNTL__DATA_SWAP_ENABLE__SHIFT 0x3
+#define SDMA2_CNTL__FENCE_SWAP_ENABLE__SHIFT 0x4
+#define SDMA2_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x5
+#define SDMA2_CNTL__MIDCMD_WORLDSWITCH_ENABLE__SHIFT 0x11
+#define SDMA2_CNTL__AUTO_CTXSW_ENABLE__SHIFT 0x12
+#define SDMA2_CNTL__CTXEMPTY_INT_ENABLE__SHIFT 0x1c
+#define SDMA2_CNTL__FROZEN_INT_ENABLE__SHIFT 0x1d
+#define SDMA2_CNTL__IB_PREEMPT_INT_ENABLE__SHIFT 0x1e
+#define SDMA2_CNTL__TRAP_ENABLE_MASK 0x00000001L
+#define SDMA2_CNTL__UTC_L1_ENABLE_MASK 0x00000002L
+#define SDMA2_CNTL__SEM_WAIT_INT_ENABLE_MASK 0x00000004L
+#define SDMA2_CNTL__DATA_SWAP_ENABLE_MASK 0x00000008L
+#define SDMA2_CNTL__FENCE_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA2_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00000020L
+#define SDMA2_CNTL__MIDCMD_WORLDSWITCH_ENABLE_MASK 0x00020000L
+#define SDMA2_CNTL__AUTO_CTXSW_ENABLE_MASK 0x00040000L
+#define SDMA2_CNTL__CTXEMPTY_INT_ENABLE_MASK 0x10000000L
+#define SDMA2_CNTL__FROZEN_INT_ENABLE_MASK 0x20000000L
+#define SDMA2_CNTL__IB_PREEMPT_INT_ENABLE_MASK 0x40000000L
+//SDMA2_CHICKEN_BITS
+#define SDMA2_CHICKEN_BITS__COPY_EFFICIENCY_ENABLE__SHIFT 0x0
+#define SDMA2_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE__SHIFT 0x1
+#define SDMA2_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE__SHIFT 0x2
+#define SDMA2_CHICKEN_BITS__WRITE_BURST_LENGTH__SHIFT 0x8
+#define SDMA2_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE__SHIFT 0xa
+#define SDMA2_CHICKEN_BITS__COPY_OVERLAP_ENABLE__SHIFT 0x10
+#define SDMA2_CHICKEN_BITS__RAW_CHECK_ENABLE__SHIFT 0x11
+#define SDMA2_CHICKEN_BITS__SRBM_POLL_RETRYING__SHIFT 0x14
+#define SDMA2_CHICKEN_BITS__CG_STATUS_OUTPUT__SHIFT 0x17
+#define SDMA2_CHICKEN_BITS__TIME_BASED_QOS__SHIFT 0x19
+#define SDMA2_CHICKEN_BITS__CE_AFIFO_WATERMARK__SHIFT 0x1a
+#define SDMA2_CHICKEN_BITS__CE_DFIFO_WATERMARK__SHIFT 0x1c
+#define SDMA2_CHICKEN_BITS__CE_LFIFO_WATERMARK__SHIFT 0x1e
+#define SDMA2_CHICKEN_BITS__COPY_EFFICIENCY_ENABLE_MASK 0x00000001L
+#define SDMA2_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE_MASK 0x00000002L
+#define SDMA2_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE_MASK 0x00000004L
+#define SDMA2_CHICKEN_BITS__WRITE_BURST_LENGTH_MASK 0x00000300L
+#define SDMA2_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE_MASK 0x00001C00L
+#define SDMA2_CHICKEN_BITS__COPY_OVERLAP_ENABLE_MASK 0x00010000L
+#define SDMA2_CHICKEN_BITS__RAW_CHECK_ENABLE_MASK 0x00020000L
+#define SDMA2_CHICKEN_BITS__SRBM_POLL_RETRYING_MASK 0x00100000L
+#define SDMA2_CHICKEN_BITS__CG_STATUS_OUTPUT_MASK 0x00800000L
+#define SDMA2_CHICKEN_BITS__TIME_BASED_QOS_MASK 0x02000000L
+#define SDMA2_CHICKEN_BITS__CE_AFIFO_WATERMARK_MASK 0x0C000000L
+#define SDMA2_CHICKEN_BITS__CE_DFIFO_WATERMARK_MASK 0x30000000L
+#define SDMA2_CHICKEN_BITS__CE_LFIFO_WATERMARK_MASK 0xC0000000L
+//SDMA2_GB_ADDR_CONFIG
+#define SDMA2_GB_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
+#define SDMA2_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
+#define SDMA2_GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE__SHIFT 0x8
+#define SDMA2_GB_ADDR_CONFIG__NUM_BANKS__SHIFT 0xc
+#define SDMA2_GB_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x13
+#define SDMA2_GB_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define SDMA2_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
+#define SDMA2_GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
+#define SDMA2_GB_ADDR_CONFIG__NUM_BANKS_MASK 0x00007000L
+#define SDMA2_GB_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00180000L
+//SDMA2_GB_ADDR_CONFIG_READ
+#define SDMA2_GB_ADDR_CONFIG_READ__NUM_PIPES__SHIFT 0x0
+#define SDMA2_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
+#define SDMA2_GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE__SHIFT 0x8
+#define SDMA2_GB_ADDR_CONFIG_READ__NUM_BANKS__SHIFT 0xc
+#define SDMA2_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES__SHIFT 0x13
+#define SDMA2_GB_ADDR_CONFIG_READ__NUM_PIPES_MASK 0x00000007L
+#define SDMA2_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
+#define SDMA2_GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
+#define SDMA2_GB_ADDR_CONFIG_READ__NUM_BANKS_MASK 0x00007000L
+#define SDMA2_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES_MASK 0x00180000L
+//SDMA2_RB_RPTR_FETCH_HI
+#define SDMA2_RB_RPTR_FETCH_HI__OFFSET__SHIFT 0x0
+#define SDMA2_RB_RPTR_FETCH_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA2_SEM_WAIT_FAIL_TIMER_CNTL
+#define SDMA2_SEM_WAIT_FAIL_TIMER_CNTL__TIMER__SHIFT 0x0
+#define SDMA2_SEM_WAIT_FAIL_TIMER_CNTL__TIMER_MASK 0xFFFFFFFFL
+//SDMA2_RB_RPTR_FETCH
+#define SDMA2_RB_RPTR_FETCH__OFFSET__SHIFT 0x2
+#define SDMA2_RB_RPTR_FETCH__OFFSET_MASK 0xFFFFFFFCL
+//SDMA2_IB_OFFSET_FETCH
+#define SDMA2_IB_OFFSET_FETCH__OFFSET__SHIFT 0x2
+#define SDMA2_IB_OFFSET_FETCH__OFFSET_MASK 0x003FFFFCL
+//SDMA2_PROGRAM
+#define SDMA2_PROGRAM__STREAM__SHIFT 0x0
+#define SDMA2_PROGRAM__STREAM_MASK 0xFFFFFFFFL
+//SDMA2_STATUS_REG
+#define SDMA2_STATUS_REG__IDLE__SHIFT 0x0
+#define SDMA2_STATUS_REG__REG_IDLE__SHIFT 0x1
+#define SDMA2_STATUS_REG__RB_EMPTY__SHIFT 0x2
+#define SDMA2_STATUS_REG__RB_FULL__SHIFT 0x3
+#define SDMA2_STATUS_REG__RB_CMD_IDLE__SHIFT 0x4
+#define SDMA2_STATUS_REG__RB_CMD_FULL__SHIFT 0x5
+#define SDMA2_STATUS_REG__IB_CMD_IDLE__SHIFT 0x6
+#define SDMA2_STATUS_REG__IB_CMD_FULL__SHIFT 0x7
+#define SDMA2_STATUS_REG__BLOCK_IDLE__SHIFT 0x8
+#define SDMA2_STATUS_REG__INSIDE_IB__SHIFT 0x9
+#define SDMA2_STATUS_REG__EX_IDLE__SHIFT 0xa
+#define SDMA2_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE__SHIFT 0xb
+#define SDMA2_STATUS_REG__PACKET_READY__SHIFT 0xc
+#define SDMA2_STATUS_REG__MC_WR_IDLE__SHIFT 0xd
+#define SDMA2_STATUS_REG__SRBM_IDLE__SHIFT 0xe
+#define SDMA2_STATUS_REG__CONTEXT_EMPTY__SHIFT 0xf
+#define SDMA2_STATUS_REG__DELTA_RPTR_FULL__SHIFT 0x10
+#define SDMA2_STATUS_REG__RB_MC_RREQ_IDLE__SHIFT 0x11
+#define SDMA2_STATUS_REG__IB_MC_RREQ_IDLE__SHIFT 0x12
+#define SDMA2_STATUS_REG__MC_RD_IDLE__SHIFT 0x13
+#define SDMA2_STATUS_REG__DELTA_RPTR_EMPTY__SHIFT 0x14
+#define SDMA2_STATUS_REG__MC_RD_RET_STALL__SHIFT 0x15
+#define SDMA2_STATUS_REG__MC_RD_NO_POLL_IDLE__SHIFT 0x16
+#define SDMA2_STATUS_REG__PREV_CMD_IDLE__SHIFT 0x19
+#define SDMA2_STATUS_REG__SEM_IDLE__SHIFT 0x1a
+#define SDMA2_STATUS_REG__SEM_REQ_STALL__SHIFT 0x1b
+#define SDMA2_STATUS_REG__SEM_RESP_STATE__SHIFT 0x1c
+#define SDMA2_STATUS_REG__INT_IDLE__SHIFT 0x1e
+#define SDMA2_STATUS_REG__INT_REQ_STALL__SHIFT 0x1f
+#define SDMA2_STATUS_REG__IDLE_MASK 0x00000001L
+#define SDMA2_STATUS_REG__REG_IDLE_MASK 0x00000002L
+#define SDMA2_STATUS_REG__RB_EMPTY_MASK 0x00000004L
+#define SDMA2_STATUS_REG__RB_FULL_MASK 0x00000008L
+#define SDMA2_STATUS_REG__RB_CMD_IDLE_MASK 0x00000010L
+#define SDMA2_STATUS_REG__RB_CMD_FULL_MASK 0x00000020L
+#define SDMA2_STATUS_REG__IB_CMD_IDLE_MASK 0x00000040L
+#define SDMA2_STATUS_REG__IB_CMD_FULL_MASK 0x00000080L
+#define SDMA2_STATUS_REG__BLOCK_IDLE_MASK 0x00000100L
+#define SDMA2_STATUS_REG__INSIDE_IB_MASK 0x00000200L
+#define SDMA2_STATUS_REG__EX_IDLE_MASK 0x00000400L
+#define SDMA2_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE_MASK 0x00000800L
+#define SDMA2_STATUS_REG__PACKET_READY_MASK 0x00001000L
+#define SDMA2_STATUS_REG__MC_WR_IDLE_MASK 0x00002000L
+#define SDMA2_STATUS_REG__SRBM_IDLE_MASK 0x00004000L
+#define SDMA2_STATUS_REG__CONTEXT_EMPTY_MASK 0x00008000L
+#define SDMA2_STATUS_REG__DELTA_RPTR_FULL_MASK 0x00010000L
+#define SDMA2_STATUS_REG__RB_MC_RREQ_IDLE_MASK 0x00020000L
+#define SDMA2_STATUS_REG__IB_MC_RREQ_IDLE_MASK 0x00040000L
+#define SDMA2_STATUS_REG__MC_RD_IDLE_MASK 0x00080000L
+#define SDMA2_STATUS_REG__DELTA_RPTR_EMPTY_MASK 0x00100000L
+#define SDMA2_STATUS_REG__MC_RD_RET_STALL_MASK 0x00200000L
+#define SDMA2_STATUS_REG__MC_RD_NO_POLL_IDLE_MASK 0x00400000L
+#define SDMA2_STATUS_REG__PREV_CMD_IDLE_MASK 0x02000000L
+#define SDMA2_STATUS_REG__SEM_IDLE_MASK 0x04000000L
+#define SDMA2_STATUS_REG__SEM_REQ_STALL_MASK 0x08000000L
+#define SDMA2_STATUS_REG__SEM_RESP_STATE_MASK 0x30000000L
+#define SDMA2_STATUS_REG__INT_IDLE_MASK 0x40000000L
+#define SDMA2_STATUS_REG__INT_REQ_STALL_MASK 0x80000000L
+//SDMA2_STATUS1_REG
+#define SDMA2_STATUS1_REG__CE_WREQ_IDLE__SHIFT 0x0
+#define SDMA2_STATUS1_REG__CE_WR_IDLE__SHIFT 0x1
+#define SDMA2_STATUS1_REG__CE_SPLIT_IDLE__SHIFT 0x2
+#define SDMA2_STATUS1_REG__CE_RREQ_IDLE__SHIFT 0x3
+#define SDMA2_STATUS1_REG__CE_OUT_IDLE__SHIFT 0x4
+#define SDMA2_STATUS1_REG__CE_IN_IDLE__SHIFT 0x5
+#define SDMA2_STATUS1_REG__CE_DST_IDLE__SHIFT 0x6
+#define SDMA2_STATUS1_REG__CE_CMD_IDLE__SHIFT 0x9
+#define SDMA2_STATUS1_REG__CE_AFIFO_FULL__SHIFT 0xa
+#define SDMA2_STATUS1_REG__CE_INFO_FULL__SHIFT 0xd
+#define SDMA2_STATUS1_REG__CE_INFO1_FULL__SHIFT 0xe
+#define SDMA2_STATUS1_REG__EX_START__SHIFT 0xf
+#define SDMA2_STATUS1_REG__CE_RD_STALL__SHIFT 0x11
+#define SDMA2_STATUS1_REG__CE_WR_STALL__SHIFT 0x12
+#define SDMA2_STATUS1_REG__CE_WREQ_IDLE_MASK 0x00000001L
+#define SDMA2_STATUS1_REG__CE_WR_IDLE_MASK 0x00000002L
+#define SDMA2_STATUS1_REG__CE_SPLIT_IDLE_MASK 0x00000004L
+#define SDMA2_STATUS1_REG__CE_RREQ_IDLE_MASK 0x00000008L
+#define SDMA2_STATUS1_REG__CE_OUT_IDLE_MASK 0x00000010L
+#define SDMA2_STATUS1_REG__CE_IN_IDLE_MASK 0x00000020L
+#define SDMA2_STATUS1_REG__CE_DST_IDLE_MASK 0x00000040L
+#define SDMA2_STATUS1_REG__CE_CMD_IDLE_MASK 0x00000200L
+#define SDMA2_STATUS1_REG__CE_AFIFO_FULL_MASK 0x00000400L
+#define SDMA2_STATUS1_REG__CE_INFO_FULL_MASK 0x00002000L
+#define SDMA2_STATUS1_REG__CE_INFO1_FULL_MASK 0x00004000L
+#define SDMA2_STATUS1_REG__EX_START_MASK 0x00008000L
+#define SDMA2_STATUS1_REG__CE_RD_STALL_MASK 0x00020000L
+#define SDMA2_STATUS1_REG__CE_WR_STALL_MASK 0x00040000L
+//SDMA2_RD_BURST_CNTL
+#define SDMA2_RD_BURST_CNTL__RD_BURST__SHIFT 0x0
+#define SDMA2_RD_BURST_CNTL__CMD_BUFFER_RD_BURST__SHIFT 0x2
+#define SDMA2_RD_BURST_CNTL__RD_BURST_MASK 0x00000003L
+#define SDMA2_RD_BURST_CNTL__CMD_BUFFER_RD_BURST_MASK 0x0000000CL
+//SDMA2_HBM_PAGE_CONFIG
+#define SDMA2_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT__SHIFT 0x0
+#define SDMA2_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT_MASK 0x00000001L
+//SDMA2_UCODE_CHECKSUM
+#define SDMA2_UCODE_CHECKSUM__DATA__SHIFT 0x0
+#define SDMA2_UCODE_CHECKSUM__DATA_MASK 0xFFFFFFFFL
+//SDMA2_F32_CNTL
+#define SDMA2_F32_CNTL__HALT__SHIFT 0x0
+#define SDMA2_F32_CNTL__STEP__SHIFT 0x1
+#define SDMA2_F32_CNTL__HALT_MASK 0x00000001L
+#define SDMA2_F32_CNTL__STEP_MASK 0x00000002L
+//SDMA2_FREEZE
+#define SDMA2_FREEZE__PREEMPT__SHIFT 0x0
+#define SDMA2_FREEZE__FREEZE__SHIFT 0x4
+#define SDMA2_FREEZE__FROZEN__SHIFT 0x5
+#define SDMA2_FREEZE__F32_FREEZE__SHIFT 0x6
+#define SDMA2_FREEZE__PREEMPT_MASK 0x00000001L
+#define SDMA2_FREEZE__FREEZE_MASK 0x00000010L
+#define SDMA2_FREEZE__FROZEN_MASK 0x00000020L
+#define SDMA2_FREEZE__F32_FREEZE_MASK 0x00000040L
+//SDMA2_PHASE0_QUANTUM
+#define SDMA2_PHASE0_QUANTUM__UNIT__SHIFT 0x0
+#define SDMA2_PHASE0_QUANTUM__VALUE__SHIFT 0x8
+#define SDMA2_PHASE0_QUANTUM__PREFER__SHIFT 0x1e
+#define SDMA2_PHASE0_QUANTUM__UNIT_MASK 0x0000000FL
+#define SDMA2_PHASE0_QUANTUM__VALUE_MASK 0x00FFFF00L
+#define SDMA2_PHASE0_QUANTUM__PREFER_MASK 0x40000000L
+//SDMA2_PHASE1_QUANTUM
+#define SDMA2_PHASE1_QUANTUM__UNIT__SHIFT 0x0
+#define SDMA2_PHASE1_QUANTUM__VALUE__SHIFT 0x8
+#define SDMA2_PHASE1_QUANTUM__PREFER__SHIFT 0x1e
+#define SDMA2_PHASE1_QUANTUM__UNIT_MASK 0x0000000FL
+#define SDMA2_PHASE1_QUANTUM__VALUE_MASK 0x00FFFF00L
+#define SDMA2_PHASE1_QUANTUM__PREFER_MASK 0x40000000L
+//SDMA2_EDC_CONFIG
+#define SDMA2_EDC_CONFIG__DIS_EDC__SHIFT 0x1
+#define SDMA2_EDC_CONFIG__ECC_INT_ENABLE__SHIFT 0x2
+#define SDMA2_EDC_CONFIG__DIS_EDC_MASK 0x00000002L
+#define SDMA2_EDC_CONFIG__ECC_INT_ENABLE_MASK 0x00000004L
+//SDMA2_BA_THRESHOLD
+#define SDMA2_BA_THRESHOLD__READ_THRES__SHIFT 0x0
+#define SDMA2_BA_THRESHOLD__WRITE_THRES__SHIFT 0x10
+#define SDMA2_BA_THRESHOLD__READ_THRES_MASK 0x000003FFL
+#define SDMA2_BA_THRESHOLD__WRITE_THRES_MASK 0x03FF0000L
+//SDMA2_ID
+#define SDMA2_ID__DEVICE_ID__SHIFT 0x0
+#define SDMA2_ID__DEVICE_ID_MASK 0x000000FFL
+//SDMA2_VERSION
+#define SDMA2_VERSION__MINVER__SHIFT 0x0
+#define SDMA2_VERSION__MAJVER__SHIFT 0x8
+#define SDMA2_VERSION__REV__SHIFT 0x10
+#define SDMA2_VERSION__MINVER_MASK 0x0000007FL
+#define SDMA2_VERSION__MAJVER_MASK 0x00007F00L
+#define SDMA2_VERSION__REV_MASK 0x003F0000L
+//SDMA2_EDC_COUNTER
+#define SDMA2_EDC_COUNTER__SDMA_UCODE_BUF_SED__SHIFT 0x0
+#define SDMA2_EDC_COUNTER__SDMA_RB_CMD_BUF_SED__SHIFT 0x2
+#define SDMA2_EDC_COUNTER__SDMA_IB_CMD_BUF_SED__SHIFT 0x3
+#define SDMA2_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED__SHIFT 0x4
+#define SDMA2_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED__SHIFT 0x5
+#define SDMA2_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED__SHIFT 0x6
+#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED__SHIFT 0x7
+#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED__SHIFT 0x8
+#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED__SHIFT 0x9
+#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED__SHIFT 0xa
+#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED__SHIFT 0xb
+#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED__SHIFT 0xc
+#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED__SHIFT 0xd
+#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED__SHIFT 0xe
+#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF8_SED__SHIFT 0xf
+#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF9_SED__SHIFT 0x10
+#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF10_SED__SHIFT 0x11
+#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF11_SED__SHIFT 0x12
+#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF12_SED__SHIFT 0x13
+#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF13_SED__SHIFT 0x14
+#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF14_SED__SHIFT 0x15
+#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF15_SED__SHIFT 0x16
+#define SDMA2_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED__SHIFT 0x17
+#define SDMA2_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED__SHIFT 0x18
+#define SDMA2_EDC_COUNTER__SDMA_UCODE_BUF_SED_MASK 0x00000001L
+#define SDMA2_EDC_COUNTER__SDMA_RB_CMD_BUF_SED_MASK 0x00000004L
+#define SDMA2_EDC_COUNTER__SDMA_IB_CMD_BUF_SED_MASK 0x00000008L
+#define SDMA2_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED_MASK 0x00000010L
+#define SDMA2_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED_MASK 0x00000020L
+#define SDMA2_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED_MASK 0x00000040L
+#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED_MASK 0x00000080L
+#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED_MASK 0x00000100L
+#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED_MASK 0x00000200L
+#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED_MASK 0x00000400L
+#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED_MASK 0x00000800L
+#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED_MASK 0x00001000L
+#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED_MASK 0x00002000L
+#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED_MASK 0x00004000L
+#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF8_SED_MASK 0x00008000L
+#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF9_SED_MASK 0x00010000L
+#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF10_SED_MASK 0x00020000L
+#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF11_SED_MASK 0x00040000L
+#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF12_SED_MASK 0x00080000L
+#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF13_SED_MASK 0x00100000L
+#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF14_SED_MASK 0x00200000L
+#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF15_SED_MASK 0x00400000L
+#define SDMA2_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED_MASK 0x00800000L
+#define SDMA2_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED_MASK 0x01000000L
+//SDMA2_EDC_COUNTER_CLEAR
+#define SDMA2_EDC_COUNTER_CLEAR__DUMMY__SHIFT 0x0
+#define SDMA2_EDC_COUNTER_CLEAR__DUMMY_MASK 0x00000001L
+//SDMA2_STATUS2_REG
+#define SDMA2_STATUS2_REG__ID__SHIFT 0x0
+#define SDMA2_STATUS2_REG__F32_INSTR_PTR__SHIFT 0x3
+#define SDMA2_STATUS2_REG__CMD_OP__SHIFT 0x10
+#define SDMA2_STATUS2_REG__ID_MASK 0x00000007L
+#define SDMA2_STATUS2_REG__F32_INSTR_PTR_MASK 0x0000FFF8L
+#define SDMA2_STATUS2_REG__CMD_OP_MASK 0xFFFF0000L
+//SDMA2_ATOMIC_CNTL
+#define SDMA2_ATOMIC_CNTL__LOOP_TIMER__SHIFT 0x0
+#define SDMA2_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE__SHIFT 0x1f
+#define SDMA2_ATOMIC_CNTL__LOOP_TIMER_MASK 0x7FFFFFFFL
+#define SDMA2_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE_MASK 0x80000000L
+//SDMA2_ATOMIC_PREOP_LO
+#define SDMA2_ATOMIC_PREOP_LO__DATA__SHIFT 0x0
+#define SDMA2_ATOMIC_PREOP_LO__DATA_MASK 0xFFFFFFFFL
+//SDMA2_ATOMIC_PREOP_HI
+#define SDMA2_ATOMIC_PREOP_HI__DATA__SHIFT 0x0
+#define SDMA2_ATOMIC_PREOP_HI__DATA_MASK 0xFFFFFFFFL
+//SDMA2_UTCL1_CNTL
+#define SDMA2_UTCL1_CNTL__REDO_ENABLE__SHIFT 0x0
+#define SDMA2_UTCL1_CNTL__REDO_DELAY__SHIFT 0x1
+#define SDMA2_UTCL1_CNTL__REDO_WATERMK__SHIFT 0xb
+#define SDMA2_UTCL1_CNTL__INVACK_DELAY__SHIFT 0xe
+#define SDMA2_UTCL1_CNTL__REQL2_CREDIT__SHIFT 0x18
+#define SDMA2_UTCL1_CNTL__VADDR_WATERMK__SHIFT 0x1d
+#define SDMA2_UTCL1_CNTL__REDO_ENABLE_MASK 0x00000001L
+#define SDMA2_UTCL1_CNTL__REDO_DELAY_MASK 0x000007FEL
+#define SDMA2_UTCL1_CNTL__REDO_WATERMK_MASK 0x00003800L
+#define SDMA2_UTCL1_CNTL__INVACK_DELAY_MASK 0x00FFC000L
+#define SDMA2_UTCL1_CNTL__REQL2_CREDIT_MASK 0x1F000000L
+#define SDMA2_UTCL1_CNTL__VADDR_WATERMK_MASK 0xE0000000L
+//SDMA2_UTCL1_WATERMK
+#define SDMA2_UTCL1_WATERMK__REQMC_WATERMK__SHIFT 0x0
+#define SDMA2_UTCL1_WATERMK__REQPG_WATERMK__SHIFT 0x9
+#define SDMA2_UTCL1_WATERMK__INVREQ_WATERMK__SHIFT 0x11
+#define SDMA2_UTCL1_WATERMK__XNACK_WATERMK__SHIFT 0x19
+#define SDMA2_UTCL1_WATERMK__REQMC_WATERMK_MASK 0x000001FFL
+#define SDMA2_UTCL1_WATERMK__REQPG_WATERMK_MASK 0x0001FE00L
+#define SDMA2_UTCL1_WATERMK__INVREQ_WATERMK_MASK 0x01FE0000L
+#define SDMA2_UTCL1_WATERMK__XNACK_WATERMK_MASK 0xFE000000L
+//SDMA2_UTCL1_RD_STATUS
+#define SDMA2_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0
+#define SDMA2_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1
+#define SDMA2_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2
+#define SDMA2_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3
+#define SDMA2_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4
+#define SDMA2_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5
+#define SDMA2_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6
+#define SDMA2_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7
+#define SDMA2_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8
+#define SDMA2_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9
+#define SDMA2_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa
+#define SDMA2_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb
+#define SDMA2_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc
+#define SDMA2_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd
+#define SDMA2_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe
+#define SDMA2_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf
+#define SDMA2_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10
+#define SDMA2_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11
+#define SDMA2_UTCL1_RD_STATUS__PAGE_FAULT__SHIFT 0x12
+#define SDMA2_UTCL1_RD_STATUS__PAGE_NULL__SHIFT 0x13
+#define SDMA2_UTCL1_RD_STATUS__REQL2_IDLE__SHIFT 0x14
+#define SDMA2_UTCL1_RD_STATUS__CE_L1_STALL__SHIFT 0x15
+#define SDMA2_UTCL1_RD_STATUS__NEXT_RD_VECTOR__SHIFT 0x16
+#define SDMA2_UTCL1_RD_STATUS__MERGE_STATE__SHIFT 0x1a
+#define SDMA2_UTCL1_RD_STATUS__ADDR_RD_RTR__SHIFT 0x1d
+#define SDMA2_UTCL1_RD_STATUS__WPTR_POLLING__SHIFT 0x1e
+#define SDMA2_UTCL1_RD_STATUS__INVREQ_SIZE__SHIFT 0x1f
+#define SDMA2_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L
+#define SDMA2_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L
+#define SDMA2_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L
+#define SDMA2_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L
+#define SDMA2_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L
+#define SDMA2_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L
+#define SDMA2_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L
+#define SDMA2_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L
+#define SDMA2_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L
+#define SDMA2_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L
+#define SDMA2_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L
+#define SDMA2_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L
+#define SDMA2_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L
+#define SDMA2_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L
+#define SDMA2_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L
+#define SDMA2_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L
+#define SDMA2_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L
+#define SDMA2_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L
+#define SDMA2_UTCL1_RD_STATUS__PAGE_FAULT_MASK 0x00040000L
+#define SDMA2_UTCL1_RD_STATUS__PAGE_NULL_MASK 0x00080000L
+#define SDMA2_UTCL1_RD_STATUS__REQL2_IDLE_MASK 0x00100000L
+#define SDMA2_UTCL1_RD_STATUS__CE_L1_STALL_MASK 0x00200000L
+#define SDMA2_UTCL1_RD_STATUS__NEXT_RD_VECTOR_MASK 0x03C00000L
+#define SDMA2_UTCL1_RD_STATUS__MERGE_STATE_MASK 0x1C000000L
+#define SDMA2_UTCL1_RD_STATUS__ADDR_RD_RTR_MASK 0x20000000L
+#define SDMA2_UTCL1_RD_STATUS__WPTR_POLLING_MASK 0x40000000L
+#define SDMA2_UTCL1_RD_STATUS__INVREQ_SIZE_MASK 0x80000000L
+//SDMA2_UTCL1_WR_STATUS
+#define SDMA2_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0
+#define SDMA2_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1
+#define SDMA2_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2
+#define SDMA2_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3
+#define SDMA2_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4
+#define SDMA2_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5
+#define SDMA2_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6
+#define SDMA2_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7
+#define SDMA2_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8
+#define SDMA2_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9
+#define SDMA2_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa
+#define SDMA2_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb
+#define SDMA2_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc
+#define SDMA2_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd
+#define SDMA2_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe
+#define SDMA2_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf
+#define SDMA2_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10
+#define SDMA2_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11
+#define SDMA2_UTCL1_WR_STATUS__PAGE_FAULT__SHIFT 0x12
+#define SDMA2_UTCL1_WR_STATUS__PAGE_NULL__SHIFT 0x13
+#define SDMA2_UTCL1_WR_STATUS__REQL2_IDLE__SHIFT 0x14
+#define SDMA2_UTCL1_WR_STATUS__F32_WR_RTR__SHIFT 0x15
+#define SDMA2_UTCL1_WR_STATUS__NEXT_WR_VECTOR__SHIFT 0x16
+#define SDMA2_UTCL1_WR_STATUS__MERGE_STATE__SHIFT 0x19
+#define SDMA2_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY__SHIFT 0x1c
+#define SDMA2_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL__SHIFT 0x1d
+#define SDMA2_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY__SHIFT 0x1e
+#define SDMA2_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL__SHIFT 0x1f
+#define SDMA2_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L
+#define SDMA2_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L
+#define SDMA2_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L
+#define SDMA2_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L
+#define SDMA2_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L
+#define SDMA2_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L
+#define SDMA2_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L
+#define SDMA2_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L
+#define SDMA2_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L
+#define SDMA2_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L
+#define SDMA2_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L
+#define SDMA2_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L
+#define SDMA2_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L
+#define SDMA2_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L
+#define SDMA2_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L
+#define SDMA2_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L
+#define SDMA2_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L
+#define SDMA2_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L
+#define SDMA2_UTCL1_WR_STATUS__PAGE_FAULT_MASK 0x00040000L
+#define SDMA2_UTCL1_WR_STATUS__PAGE_NULL_MASK 0x00080000L
+#define SDMA2_UTCL1_WR_STATUS__REQL2_IDLE_MASK 0x00100000L
+#define SDMA2_UTCL1_WR_STATUS__F32_WR_RTR_MASK 0x00200000L
+#define SDMA2_UTCL1_WR_STATUS__NEXT_WR_VECTOR_MASK 0x01C00000L
+#define SDMA2_UTCL1_WR_STATUS__MERGE_STATE_MASK 0x0E000000L
+#define SDMA2_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY_MASK 0x10000000L
+#define SDMA2_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL_MASK 0x20000000L
+#define SDMA2_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY_MASK 0x40000000L
+#define SDMA2_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL_MASK 0x80000000L
+//SDMA2_UTCL1_INV0
+#define SDMA2_UTCL1_INV0__INV_MIDDLE__SHIFT 0x0
+#define SDMA2_UTCL1_INV0__RD_TIMEOUT__SHIFT 0x1
+#define SDMA2_UTCL1_INV0__WR_TIMEOUT__SHIFT 0x2
+#define SDMA2_UTCL1_INV0__RD_IN_INVADR__SHIFT 0x3
+#define SDMA2_UTCL1_INV0__WR_IN_INVADR__SHIFT 0x4
+#define SDMA2_UTCL1_INV0__PAGE_NULL_SW__SHIFT 0x5
+#define SDMA2_UTCL1_INV0__XNACK_IS_INVADR__SHIFT 0x6
+#define SDMA2_UTCL1_INV0__INVREQ_ENABLE__SHIFT 0x7
+#define SDMA2_UTCL1_INV0__NACK_TIMEOUT_SW__SHIFT 0x8
+#define SDMA2_UTCL1_INV0__NFLUSH_INV_IDLE__SHIFT 0x9
+#define SDMA2_UTCL1_INV0__FLUSH_INV_IDLE__SHIFT 0xa
+#define SDMA2_UTCL1_INV0__INV_FLUSHTYPE__SHIFT 0xb
+#define SDMA2_UTCL1_INV0__INV_VMID_VEC__SHIFT 0xc
+#define SDMA2_UTCL1_INV0__INV_ADDR_HI__SHIFT 0x1c
+#define SDMA2_UTCL1_INV0__INV_MIDDLE_MASK 0x00000001L
+#define SDMA2_UTCL1_INV0__RD_TIMEOUT_MASK 0x00000002L
+#define SDMA2_UTCL1_INV0__WR_TIMEOUT_MASK 0x00000004L
+#define SDMA2_UTCL1_INV0__RD_IN_INVADR_MASK 0x00000008L
+#define SDMA2_UTCL1_INV0__WR_IN_INVADR_MASK 0x00000010L
+#define SDMA2_UTCL1_INV0__PAGE_NULL_SW_MASK 0x00000020L
+#define SDMA2_UTCL1_INV0__XNACK_IS_INVADR_MASK 0x00000040L
+#define SDMA2_UTCL1_INV0__INVREQ_ENABLE_MASK 0x00000080L
+#define SDMA2_UTCL1_INV0__NACK_TIMEOUT_SW_MASK 0x00000100L
+#define SDMA2_UTCL1_INV0__NFLUSH_INV_IDLE_MASK 0x00000200L
+#define SDMA2_UTCL1_INV0__FLUSH_INV_IDLE_MASK 0x00000400L
+#define SDMA2_UTCL1_INV0__INV_FLUSHTYPE_MASK 0x00000800L
+#define SDMA2_UTCL1_INV0__INV_VMID_VEC_MASK 0x0FFFF000L
+#define SDMA2_UTCL1_INV0__INV_ADDR_HI_MASK 0xF0000000L
+//SDMA2_UTCL1_INV1
+#define SDMA2_UTCL1_INV1__INV_ADDR_LO__SHIFT 0x0
+#define SDMA2_UTCL1_INV1__INV_ADDR_LO_MASK 0xFFFFFFFFL
+//SDMA2_UTCL1_INV2
+#define SDMA2_UTCL1_INV2__INV_NFLUSH_VMID_VEC__SHIFT 0x0
+#define SDMA2_UTCL1_INV2__INV_NFLUSH_VMID_VEC_MASK 0xFFFFFFFFL
+//SDMA2_UTCL1_RD_XNACK0
+#define SDMA2_UTCL1_RD_XNACK0__XNACK_ADDR_LO__SHIFT 0x0
+#define SDMA2_UTCL1_RD_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL
+//SDMA2_UTCL1_RD_XNACK1
+#define SDMA2_UTCL1_RD_XNACK1__XNACK_ADDR_HI__SHIFT 0x0
+#define SDMA2_UTCL1_RD_XNACK1__XNACK_VMID__SHIFT 0x4
+#define SDMA2_UTCL1_RD_XNACK1__XNACK_VECTOR__SHIFT 0x8
+#define SDMA2_UTCL1_RD_XNACK1__IS_XNACK__SHIFT 0x1a
+#define SDMA2_UTCL1_RD_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL
+#define SDMA2_UTCL1_RD_XNACK1__XNACK_VMID_MASK 0x000000F0L
+#define SDMA2_UTCL1_RD_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L
+#define SDMA2_UTCL1_RD_XNACK1__IS_XNACK_MASK 0x0C000000L
+//SDMA2_UTCL1_WR_XNACK0
+#define SDMA2_UTCL1_WR_XNACK0__XNACK_ADDR_LO__SHIFT 0x0
+#define SDMA2_UTCL1_WR_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL
+//SDMA2_UTCL1_WR_XNACK1
+#define SDMA2_UTCL1_WR_XNACK1__XNACK_ADDR_HI__SHIFT 0x0
+#define SDMA2_UTCL1_WR_XNACK1__XNACK_VMID__SHIFT 0x4
+#define SDMA2_UTCL1_WR_XNACK1__XNACK_VECTOR__SHIFT 0x8
+#define SDMA2_UTCL1_WR_XNACK1__IS_XNACK__SHIFT 0x1a
+#define SDMA2_UTCL1_WR_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL
+#define SDMA2_UTCL1_WR_XNACK1__XNACK_VMID_MASK 0x000000F0L
+#define SDMA2_UTCL1_WR_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L
+#define SDMA2_UTCL1_WR_XNACK1__IS_XNACK_MASK 0x0C000000L
+//SDMA2_UTCL1_TIMEOUT
+#define SDMA2_UTCL1_TIMEOUT__RD_XNACK_LIMIT__SHIFT 0x0
+#define SDMA2_UTCL1_TIMEOUT__WR_XNACK_LIMIT__SHIFT 0x10
+#define SDMA2_UTCL1_TIMEOUT__RD_XNACK_LIMIT_MASK 0x0000FFFFL
+#define SDMA2_UTCL1_TIMEOUT__WR_XNACK_LIMIT_MASK 0xFFFF0000L
+//SDMA2_UTCL1_PAGE
+#define SDMA2_UTCL1_PAGE__VM_HOLE__SHIFT 0x0
+#define SDMA2_UTCL1_PAGE__REQ_TYPE__SHIFT 0x1
+#define SDMA2_UTCL1_PAGE__USE_MTYPE__SHIFT 0x6
+#define SDMA2_UTCL1_PAGE__USE_PT_SNOOP__SHIFT 0x9
+#define SDMA2_UTCL1_PAGE__VM_HOLE_MASK 0x00000001L
+#define SDMA2_UTCL1_PAGE__REQ_TYPE_MASK 0x0000001EL
+#define SDMA2_UTCL1_PAGE__USE_MTYPE_MASK 0x000001C0L
+#define SDMA2_UTCL1_PAGE__USE_PT_SNOOP_MASK 0x00000200L
+//SDMA2_POWER_CNTL_IDLE
+#define SDMA2_POWER_CNTL_IDLE__DELAY0__SHIFT 0x0
+#define SDMA2_POWER_CNTL_IDLE__DELAY1__SHIFT 0x10
+#define SDMA2_POWER_CNTL_IDLE__DELAY2__SHIFT 0x18
+#define SDMA2_POWER_CNTL_IDLE__DELAY0_MASK 0x0000FFFFL
+#define SDMA2_POWER_CNTL_IDLE__DELAY1_MASK 0x00FF0000L
+#define SDMA2_POWER_CNTL_IDLE__DELAY2_MASK 0xFF000000L
+//SDMA2_RELAX_ORDERING_LUT
+#define SDMA2_RELAX_ORDERING_LUT__RESERVED0__SHIFT 0x0
+#define SDMA2_RELAX_ORDERING_LUT__COPY__SHIFT 0x1
+#define SDMA2_RELAX_ORDERING_LUT__WRITE__SHIFT 0x2
+#define SDMA2_RELAX_ORDERING_LUT__RESERVED3__SHIFT 0x3
+#define SDMA2_RELAX_ORDERING_LUT__RESERVED4__SHIFT 0x4
+#define SDMA2_RELAX_ORDERING_LUT__FENCE__SHIFT 0x5
+#define SDMA2_RELAX_ORDERING_LUT__RESERVED76__SHIFT 0x6
+#define SDMA2_RELAX_ORDERING_LUT__POLL_MEM__SHIFT 0x8
+#define SDMA2_RELAX_ORDERING_LUT__COND_EXE__SHIFT 0x9
+#define SDMA2_RELAX_ORDERING_LUT__ATOMIC__SHIFT 0xa
+#define SDMA2_RELAX_ORDERING_LUT__CONST_FILL__SHIFT 0xb
+#define SDMA2_RELAX_ORDERING_LUT__PTEPDE__SHIFT 0xc
+#define SDMA2_RELAX_ORDERING_LUT__TIMESTAMP__SHIFT 0xd
+#define SDMA2_RELAX_ORDERING_LUT__RESERVED__SHIFT 0xe
+#define SDMA2_RELAX_ORDERING_LUT__WORLD_SWITCH__SHIFT 0x1b
+#define SDMA2_RELAX_ORDERING_LUT__RPTR_WRB__SHIFT 0x1c
+#define SDMA2_RELAX_ORDERING_LUT__WPTR_POLL__SHIFT 0x1d
+#define SDMA2_RELAX_ORDERING_LUT__IB_FETCH__SHIFT 0x1e
+#define SDMA2_RELAX_ORDERING_LUT__RB_FETCH__SHIFT 0x1f
+#define SDMA2_RELAX_ORDERING_LUT__RESERVED0_MASK 0x00000001L
+#define SDMA2_RELAX_ORDERING_LUT__COPY_MASK 0x00000002L
+#define SDMA2_RELAX_ORDERING_LUT__WRITE_MASK 0x00000004L
+#define SDMA2_RELAX_ORDERING_LUT__RESERVED3_MASK 0x00000008L
+#define SDMA2_RELAX_ORDERING_LUT__RESERVED4_MASK 0x00000010L
+#define SDMA2_RELAX_ORDERING_LUT__FENCE_MASK 0x00000020L
+#define SDMA2_RELAX_ORDERING_LUT__RESERVED76_MASK 0x000000C0L
+#define SDMA2_RELAX_ORDERING_LUT__POLL_MEM_MASK 0x00000100L
+#define SDMA2_RELAX_ORDERING_LUT__COND_EXE_MASK 0x00000200L
+#define SDMA2_RELAX_ORDERING_LUT__ATOMIC_MASK 0x00000400L
+#define SDMA2_RELAX_ORDERING_LUT__CONST_FILL_MASK 0x00000800L
+#define SDMA2_RELAX_ORDERING_LUT__PTEPDE_MASK 0x00001000L
+#define SDMA2_RELAX_ORDERING_LUT__TIMESTAMP_MASK 0x00002000L
+#define SDMA2_RELAX_ORDERING_LUT__RESERVED_MASK 0x07FFC000L
+#define SDMA2_RELAX_ORDERING_LUT__WORLD_SWITCH_MASK 0x08000000L
+#define SDMA2_RELAX_ORDERING_LUT__RPTR_WRB_MASK 0x10000000L
+#define SDMA2_RELAX_ORDERING_LUT__WPTR_POLL_MASK 0x20000000L
+#define SDMA2_RELAX_ORDERING_LUT__IB_FETCH_MASK 0x40000000L
+#define SDMA2_RELAX_ORDERING_LUT__RB_FETCH_MASK 0x80000000L
+//SDMA2_CHICKEN_BITS_2
+#define SDMA2_CHICKEN_BITS_2__F32_CMD_PROC_DELAY__SHIFT 0x0
+#define SDMA2_CHICKEN_BITS_2__F32_CMD_PROC_DELAY_MASK 0x0000000FL
+//SDMA2_STATUS3_REG
+#define SDMA2_STATUS3_REG__CMD_OP_STATUS__SHIFT 0x0
+#define SDMA2_STATUS3_REG__PREV_VM_CMD__SHIFT 0x10
+#define SDMA2_STATUS3_REG__EXCEPTION_IDLE__SHIFT 0x14
+#define SDMA2_STATUS3_REG__QUEUE_ID_MATCH__SHIFT 0x15
+#define SDMA2_STATUS3_REG__INT_QUEUE_ID__SHIFT 0x16
+#define SDMA2_STATUS3_REG__CMD_OP_STATUS_MASK 0x0000FFFFL
+#define SDMA2_STATUS3_REG__PREV_VM_CMD_MASK 0x000F0000L
+#define SDMA2_STATUS3_REG__EXCEPTION_IDLE_MASK 0x00100000L
+#define SDMA2_STATUS3_REG__QUEUE_ID_MATCH_MASK 0x00200000L
+#define SDMA2_STATUS3_REG__INT_QUEUE_ID_MASK 0x03C00000L
+//SDMA2_PHYSICAL_ADDR_LO
+#define SDMA2_PHYSICAL_ADDR_LO__D_VALID__SHIFT 0x0
+#define SDMA2_PHYSICAL_ADDR_LO__DIRTY__SHIFT 0x1
+#define SDMA2_PHYSICAL_ADDR_LO__PHY_VALID__SHIFT 0x2
+#define SDMA2_PHYSICAL_ADDR_LO__ADDR__SHIFT 0xc
+#define SDMA2_PHYSICAL_ADDR_LO__D_VALID_MASK 0x00000001L
+#define SDMA2_PHYSICAL_ADDR_LO__DIRTY_MASK 0x00000002L
+#define SDMA2_PHYSICAL_ADDR_LO__PHY_VALID_MASK 0x00000004L
+#define SDMA2_PHYSICAL_ADDR_LO__ADDR_MASK 0xFFFFF000L
+//SDMA2_PHYSICAL_ADDR_HI
+#define SDMA2_PHYSICAL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA2_PHYSICAL_ADDR_HI__ADDR_MASK 0x0000FFFFL
+//SDMA2_PHASE2_QUANTUM
+#define SDMA2_PHASE2_QUANTUM__UNIT__SHIFT 0x0
+#define SDMA2_PHASE2_QUANTUM__VALUE__SHIFT 0x8
+#define SDMA2_PHASE2_QUANTUM__PREFER__SHIFT 0x1e
+#define SDMA2_PHASE2_QUANTUM__UNIT_MASK 0x0000000FL
+#define SDMA2_PHASE2_QUANTUM__VALUE_MASK 0x00FFFF00L
+#define SDMA2_PHASE2_QUANTUM__PREFER_MASK 0x40000000L
+//SDMA2_ERROR_LOG
+#define SDMA2_ERROR_LOG__OVERRIDE__SHIFT 0x0
+#define SDMA2_ERROR_LOG__STATUS__SHIFT 0x10
+#define SDMA2_ERROR_LOG__OVERRIDE_MASK 0x0000FFFFL
+#define SDMA2_ERROR_LOG__STATUS_MASK 0xFFFF0000L
+//SDMA2_PUB_DUMMY_REG0
+#define SDMA2_PUB_DUMMY_REG0__VALUE__SHIFT 0x0
+#define SDMA2_PUB_DUMMY_REG0__VALUE_MASK 0xFFFFFFFFL
+//SDMA2_PUB_DUMMY_REG1
+#define SDMA2_PUB_DUMMY_REG1__VALUE__SHIFT 0x0
+#define SDMA2_PUB_DUMMY_REG1__VALUE_MASK 0xFFFFFFFFL
+//SDMA2_PUB_DUMMY_REG2
+#define SDMA2_PUB_DUMMY_REG2__VALUE__SHIFT 0x0
+#define SDMA2_PUB_DUMMY_REG2__VALUE_MASK 0xFFFFFFFFL
+//SDMA2_PUB_DUMMY_REG3
+#define SDMA2_PUB_DUMMY_REG3__VALUE__SHIFT 0x0
+#define SDMA2_PUB_DUMMY_REG3__VALUE_MASK 0xFFFFFFFFL
+//SDMA2_F32_COUNTER
+#define SDMA2_F32_COUNTER__VALUE__SHIFT 0x0
+#define SDMA2_F32_COUNTER__VALUE_MASK 0xFFFFFFFFL
+//SDMA2_UNBREAKABLE
+#define SDMA2_UNBREAKABLE__VALUE__SHIFT 0x0
+#define SDMA2_UNBREAKABLE__VALUE_MASK 0x00000001L
+//SDMA2_PERFMON_CNTL
+#define SDMA2_PERFMON_CNTL__PERF_ENABLE0__SHIFT 0x0
+#define SDMA2_PERFMON_CNTL__PERF_CLEAR0__SHIFT 0x1
+#define SDMA2_PERFMON_CNTL__PERF_SEL0__SHIFT 0x2
+#define SDMA2_PERFMON_CNTL__PERF_ENABLE1__SHIFT 0xa
+#define SDMA2_PERFMON_CNTL__PERF_CLEAR1__SHIFT 0xb
+#define SDMA2_PERFMON_CNTL__PERF_SEL1__SHIFT 0xc
+#define SDMA2_PERFMON_CNTL__PERF_ENABLE0_MASK 0x00000001L
+#define SDMA2_PERFMON_CNTL__PERF_CLEAR0_MASK 0x00000002L
+#define SDMA2_PERFMON_CNTL__PERF_SEL0_MASK 0x000003FCL
+#define SDMA2_PERFMON_CNTL__PERF_ENABLE1_MASK 0x00000400L
+#define SDMA2_PERFMON_CNTL__PERF_CLEAR1_MASK 0x00000800L
+#define SDMA2_PERFMON_CNTL__PERF_SEL1_MASK 0x000FF000L
+//SDMA2_PERFCOUNTER0_RESULT
+#define SDMA2_PERFCOUNTER0_RESULT__PERF_COUNT__SHIFT 0x0
+#define SDMA2_PERFCOUNTER0_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL
+//SDMA2_PERFCOUNTER1_RESULT
+#define SDMA2_PERFCOUNTER1_RESULT__PERF_COUNT__SHIFT 0x0
+#define SDMA2_PERFCOUNTER1_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL
+//SDMA2_PERFCOUNTER_TAG_DELAY_RANGE
+#define SDMA2_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_LOW__SHIFT 0x0
+#define SDMA2_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_HIGH__SHIFT 0xe
+#define SDMA2_PERFCOUNTER_TAG_DELAY_RANGE__SELECT_RW__SHIFT 0x1c
+#define SDMA2_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_LOW_MASK 0x00003FFFL
+#define SDMA2_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_HIGH_MASK 0x0FFFC000L
+#define SDMA2_PERFCOUNTER_TAG_DELAY_RANGE__SELECT_RW_MASK 0x10000000L
+//SDMA2_CRD_CNTL
+#define SDMA2_CRD_CNTL__MC_WRREQ_CREDIT__SHIFT 0x7
+#define SDMA2_CRD_CNTL__MC_RDREQ_CREDIT__SHIFT 0xd
+#define SDMA2_CRD_CNTL__MC_WRREQ_CREDIT_MASK 0x00001F80L
+#define SDMA2_CRD_CNTL__MC_RDREQ_CREDIT_MASK 0x0007E000L
+//SDMA2_GPU_IOV_VIOLATION_LOG
+#define SDMA2_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0
+#define SDMA2_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS__SHIFT 0x1
+#define SDMA2_GPU_IOV_VIOLATION_LOG__ADDRESS__SHIFT 0x2
+#define SDMA2_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION__SHIFT 0x14
+#define SDMA2_GPU_IOV_VIOLATION_LOG__VF__SHIFT 0x15
+#define SDMA2_GPU_IOV_VIOLATION_LOG__VFID__SHIFT 0x16
+#define SDMA2_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L
+#define SDMA2_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS_MASK 0x00000002L
+#define SDMA2_GPU_IOV_VIOLATION_LOG__ADDRESS_MASK 0x000FFFFCL
+#define SDMA2_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION_MASK 0x00100000L
+#define SDMA2_GPU_IOV_VIOLATION_LOG__VF_MASK 0x00200000L
+#define SDMA2_GPU_IOV_VIOLATION_LOG__VFID_MASK 0x03C00000L
+//SDMA2_ULV_CNTL
+#define SDMA2_ULV_CNTL__HYSTERESIS__SHIFT 0x0
+#define SDMA2_ULV_CNTL__ENTER_ULV_INT_CLR__SHIFT 0x1b
+#define SDMA2_ULV_CNTL__EXIT_ULV_INT_CLR__SHIFT 0x1c
+#define SDMA2_ULV_CNTL__ENTER_ULV_INT__SHIFT 0x1d
+#define SDMA2_ULV_CNTL__EXIT_ULV_INT__SHIFT 0x1e
+#define SDMA2_ULV_CNTL__ULV_STATUS__SHIFT 0x1f
+#define SDMA2_ULV_CNTL__HYSTERESIS_MASK 0x0000001FL
+#define SDMA2_ULV_CNTL__ENTER_ULV_INT_CLR_MASK 0x08000000L
+#define SDMA2_ULV_CNTL__EXIT_ULV_INT_CLR_MASK 0x10000000L
+#define SDMA2_ULV_CNTL__ENTER_ULV_INT_MASK 0x20000000L
+#define SDMA2_ULV_CNTL__EXIT_ULV_INT_MASK 0x40000000L
+#define SDMA2_ULV_CNTL__ULV_STATUS_MASK 0x80000000L
+//SDMA2_EA_DBIT_ADDR_DATA
+#define SDMA2_EA_DBIT_ADDR_DATA__VALUE__SHIFT 0x0
+#define SDMA2_EA_DBIT_ADDR_DATA__VALUE_MASK 0xFFFFFFFFL
+//SDMA2_EA_DBIT_ADDR_INDEX
+#define SDMA2_EA_DBIT_ADDR_INDEX__VALUE__SHIFT 0x0
+#define SDMA2_EA_DBIT_ADDR_INDEX__VALUE_MASK 0x00000007L
+//SDMA2_GPU_IOV_VIOLATION_LOG2
+#define SDMA2_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID__SHIFT 0x0
+#define SDMA2_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID_MASK 0x000000FFL
+//SDMA2_GFX_RB_CNTL
+#define SDMA2_GFX_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA2_GFX_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA2_GFX_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA2_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA2_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA2_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA2_GFX_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA2_GFX_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA2_GFX_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA2_GFX_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA2_GFX_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA2_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA2_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA2_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA2_GFX_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA2_GFX_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA2_GFX_RB_BASE
+#define SDMA2_GFX_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA2_GFX_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA2_GFX_RB_BASE_HI
+#define SDMA2_GFX_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA2_GFX_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA2_GFX_RB_RPTR
+#define SDMA2_GFX_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA2_GFX_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA2_GFX_RB_RPTR_HI
+#define SDMA2_GFX_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA2_GFX_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA2_GFX_RB_WPTR
+#define SDMA2_GFX_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA2_GFX_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA2_GFX_RB_WPTR_HI
+#define SDMA2_GFX_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA2_GFX_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA2_GFX_RB_WPTR_POLL_CNTL
+#define SDMA2_GFX_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA2_GFX_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA2_GFX_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA2_GFX_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA2_GFX_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA2_GFX_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA2_GFX_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA2_GFX_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA2_GFX_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA2_GFX_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA2_GFX_RB_RPTR_ADDR_HI
+#define SDMA2_GFX_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA2_GFX_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA2_GFX_RB_RPTR_ADDR_LO
+#define SDMA2_GFX_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA2_GFX_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA2_GFX_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA2_GFX_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA2_GFX_IB_CNTL
+#define SDMA2_GFX_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA2_GFX_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA2_GFX_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA2_GFX_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA2_GFX_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA2_GFX_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA2_GFX_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA2_GFX_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA2_GFX_IB_RPTR
+#define SDMA2_GFX_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA2_GFX_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA2_GFX_IB_OFFSET
+#define SDMA2_GFX_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA2_GFX_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA2_GFX_IB_BASE_LO
+#define SDMA2_GFX_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA2_GFX_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA2_GFX_IB_BASE_HI
+#define SDMA2_GFX_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA2_GFX_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA2_GFX_IB_SIZE
+#define SDMA2_GFX_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA2_GFX_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA2_GFX_SKIP_CNTL
+#define SDMA2_GFX_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA2_GFX_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA2_GFX_CONTEXT_STATUS
+#define SDMA2_GFX_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA2_GFX_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA2_GFX_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA2_GFX_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA2_GFX_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA2_GFX_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA2_GFX_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA2_GFX_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA2_GFX_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA2_GFX_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA2_GFX_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA2_GFX_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA2_GFX_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA2_GFX_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA2_GFX_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA2_GFX_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA2_GFX_DOORBELL
+#define SDMA2_GFX_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA2_GFX_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA2_GFX_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA2_GFX_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA2_GFX_CONTEXT_CNTL
+#define SDMA2_GFX_CONTEXT_CNTL__RESUME_CTX__SHIFT 0x10
+#define SDMA2_GFX_CONTEXT_CNTL__RESUME_CTX_MASK 0x00010000L
+//SDMA2_GFX_STATUS
+#define SDMA2_GFX_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA2_GFX_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA2_GFX_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA2_GFX_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA2_GFX_DOORBELL_LOG
+#define SDMA2_GFX_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA2_GFX_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA2_GFX_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA2_GFX_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA2_GFX_WATERMARK
+#define SDMA2_GFX_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA2_GFX_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA2_GFX_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA2_GFX_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA2_GFX_DOORBELL_OFFSET
+#define SDMA2_GFX_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA2_GFX_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA2_GFX_CSA_ADDR_LO
+#define SDMA2_GFX_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA2_GFX_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA2_GFX_CSA_ADDR_HI
+#define SDMA2_GFX_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA2_GFX_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA2_GFX_IB_SUB_REMAIN
+#define SDMA2_GFX_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA2_GFX_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA2_GFX_PREEMPT
+#define SDMA2_GFX_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA2_GFX_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA2_GFX_DUMMY_REG
+#define SDMA2_GFX_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA2_GFX_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA2_GFX_RB_WPTR_POLL_ADDR_HI
+#define SDMA2_GFX_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA2_GFX_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA2_GFX_RB_WPTR_POLL_ADDR_LO
+#define SDMA2_GFX_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA2_GFX_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA2_GFX_RB_AQL_CNTL
+#define SDMA2_GFX_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA2_GFX_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA2_GFX_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA2_GFX_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA2_GFX_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA2_GFX_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA2_GFX_MINOR_PTR_UPDATE
+#define SDMA2_GFX_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA2_GFX_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA2_GFX_MIDCMD_DATA0
+#define SDMA2_GFX_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA2_GFX_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA2_GFX_MIDCMD_DATA1
+#define SDMA2_GFX_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA2_GFX_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA2_GFX_MIDCMD_DATA2
+#define SDMA2_GFX_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA2_GFX_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA2_GFX_MIDCMD_DATA3
+#define SDMA2_GFX_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA2_GFX_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA2_GFX_MIDCMD_DATA4
+#define SDMA2_GFX_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA2_GFX_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA2_GFX_MIDCMD_DATA5
+#define SDMA2_GFX_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA2_GFX_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA2_GFX_MIDCMD_DATA6
+#define SDMA2_GFX_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA2_GFX_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA2_GFX_MIDCMD_DATA7
+#define SDMA2_GFX_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA2_GFX_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA2_GFX_MIDCMD_DATA8
+#define SDMA2_GFX_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA2_GFX_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA2_GFX_MIDCMD_CNTL
+#define SDMA2_GFX_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA2_GFX_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA2_GFX_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA2_GFX_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA2_GFX_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA2_GFX_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA2_GFX_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA2_GFX_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA2_PAGE_RB_CNTL
+#define SDMA2_PAGE_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA2_PAGE_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA2_PAGE_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA2_PAGE_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA2_PAGE_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA2_PAGE_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA2_PAGE_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA2_PAGE_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA2_PAGE_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA2_PAGE_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA2_PAGE_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA2_PAGE_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA2_PAGE_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA2_PAGE_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA2_PAGE_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA2_PAGE_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA2_PAGE_RB_BASE
+#define SDMA2_PAGE_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA2_PAGE_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA2_PAGE_RB_BASE_HI
+#define SDMA2_PAGE_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA2_PAGE_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA2_PAGE_RB_RPTR
+#define SDMA2_PAGE_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA2_PAGE_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA2_PAGE_RB_RPTR_HI
+#define SDMA2_PAGE_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA2_PAGE_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA2_PAGE_RB_WPTR
+#define SDMA2_PAGE_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA2_PAGE_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA2_PAGE_RB_WPTR_HI
+#define SDMA2_PAGE_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA2_PAGE_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA2_PAGE_RB_WPTR_POLL_CNTL
+#define SDMA2_PAGE_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA2_PAGE_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA2_PAGE_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA2_PAGE_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA2_PAGE_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA2_PAGE_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA2_PAGE_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA2_PAGE_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA2_PAGE_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA2_PAGE_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA2_PAGE_RB_RPTR_ADDR_HI
+#define SDMA2_PAGE_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA2_PAGE_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA2_PAGE_RB_RPTR_ADDR_LO
+#define SDMA2_PAGE_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA2_PAGE_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA2_PAGE_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA2_PAGE_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA2_PAGE_IB_CNTL
+#define SDMA2_PAGE_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA2_PAGE_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA2_PAGE_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA2_PAGE_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA2_PAGE_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA2_PAGE_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA2_PAGE_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA2_PAGE_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA2_PAGE_IB_RPTR
+#define SDMA2_PAGE_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA2_PAGE_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA2_PAGE_IB_OFFSET
+#define SDMA2_PAGE_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA2_PAGE_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA2_PAGE_IB_BASE_LO
+#define SDMA2_PAGE_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA2_PAGE_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA2_PAGE_IB_BASE_HI
+#define SDMA2_PAGE_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA2_PAGE_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA2_PAGE_IB_SIZE
+#define SDMA2_PAGE_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA2_PAGE_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA2_PAGE_SKIP_CNTL
+#define SDMA2_PAGE_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA2_PAGE_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA2_PAGE_CONTEXT_STATUS
+#define SDMA2_PAGE_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA2_PAGE_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA2_PAGE_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA2_PAGE_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA2_PAGE_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA2_PAGE_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA2_PAGE_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA2_PAGE_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA2_PAGE_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA2_PAGE_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA2_PAGE_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA2_PAGE_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA2_PAGE_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA2_PAGE_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA2_PAGE_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA2_PAGE_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA2_PAGE_DOORBELL
+#define SDMA2_PAGE_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA2_PAGE_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA2_PAGE_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA2_PAGE_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA2_PAGE_STATUS
+#define SDMA2_PAGE_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA2_PAGE_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA2_PAGE_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA2_PAGE_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA2_PAGE_DOORBELL_LOG
+#define SDMA2_PAGE_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA2_PAGE_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA2_PAGE_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA2_PAGE_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA2_PAGE_WATERMARK
+#define SDMA2_PAGE_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA2_PAGE_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA2_PAGE_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA2_PAGE_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA2_PAGE_DOORBELL_OFFSET
+#define SDMA2_PAGE_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA2_PAGE_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA2_PAGE_CSA_ADDR_LO
+#define SDMA2_PAGE_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA2_PAGE_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA2_PAGE_CSA_ADDR_HI
+#define SDMA2_PAGE_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA2_PAGE_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA2_PAGE_IB_SUB_REMAIN
+#define SDMA2_PAGE_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA2_PAGE_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA2_PAGE_PREEMPT
+#define SDMA2_PAGE_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA2_PAGE_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA2_PAGE_DUMMY_REG
+#define SDMA2_PAGE_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA2_PAGE_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA2_PAGE_RB_WPTR_POLL_ADDR_HI
+#define SDMA2_PAGE_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA2_PAGE_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA2_PAGE_RB_WPTR_POLL_ADDR_LO
+#define SDMA2_PAGE_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA2_PAGE_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA2_PAGE_RB_AQL_CNTL
+#define SDMA2_PAGE_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA2_PAGE_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA2_PAGE_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA2_PAGE_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA2_PAGE_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA2_PAGE_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA2_PAGE_MINOR_PTR_UPDATE
+#define SDMA2_PAGE_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA2_PAGE_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA2_PAGE_MIDCMD_DATA0
+#define SDMA2_PAGE_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA2_PAGE_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA2_PAGE_MIDCMD_DATA1
+#define SDMA2_PAGE_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA2_PAGE_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA2_PAGE_MIDCMD_DATA2
+#define SDMA2_PAGE_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA2_PAGE_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA2_PAGE_MIDCMD_DATA3
+#define SDMA2_PAGE_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA2_PAGE_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA2_PAGE_MIDCMD_DATA4
+#define SDMA2_PAGE_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA2_PAGE_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA2_PAGE_MIDCMD_DATA5
+#define SDMA2_PAGE_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA2_PAGE_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA2_PAGE_MIDCMD_DATA6
+#define SDMA2_PAGE_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA2_PAGE_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA2_PAGE_MIDCMD_DATA7
+#define SDMA2_PAGE_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA2_PAGE_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA2_PAGE_MIDCMD_DATA8
+#define SDMA2_PAGE_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA2_PAGE_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA2_PAGE_MIDCMD_CNTL
+#define SDMA2_PAGE_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA2_PAGE_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA2_PAGE_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA2_PAGE_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA2_PAGE_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA2_PAGE_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA2_PAGE_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA2_PAGE_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA2_RLC0_RB_CNTL
+#define SDMA2_RLC0_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA2_RLC0_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA2_RLC0_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA2_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA2_RLC0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA2_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA2_RLC0_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA2_RLC0_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA2_RLC0_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA2_RLC0_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA2_RLC0_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA2_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA2_RLC0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA2_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA2_RLC0_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA2_RLC0_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA2_RLC0_RB_BASE
+#define SDMA2_RLC0_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA2_RLC0_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA2_RLC0_RB_BASE_HI
+#define SDMA2_RLC0_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA2_RLC0_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA2_RLC0_RB_RPTR
+#define SDMA2_RLC0_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA2_RLC0_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA2_RLC0_RB_RPTR_HI
+#define SDMA2_RLC0_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA2_RLC0_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA2_RLC0_RB_WPTR
+#define SDMA2_RLC0_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA2_RLC0_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA2_RLC0_RB_WPTR_HI
+#define SDMA2_RLC0_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA2_RLC0_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA2_RLC0_RB_WPTR_POLL_CNTL
+#define SDMA2_RLC0_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA2_RLC0_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA2_RLC0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA2_RLC0_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA2_RLC0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA2_RLC0_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA2_RLC0_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA2_RLC0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA2_RLC0_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA2_RLC0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA2_RLC0_RB_RPTR_ADDR_HI
+#define SDMA2_RLC0_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA2_RLC0_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA2_RLC0_RB_RPTR_ADDR_LO
+#define SDMA2_RLC0_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA2_RLC0_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA2_RLC0_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA2_RLC0_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA2_RLC0_IB_CNTL
+#define SDMA2_RLC0_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA2_RLC0_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA2_RLC0_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA2_RLC0_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA2_RLC0_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA2_RLC0_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA2_RLC0_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA2_RLC0_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA2_RLC0_IB_RPTR
+#define SDMA2_RLC0_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA2_RLC0_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA2_RLC0_IB_OFFSET
+#define SDMA2_RLC0_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA2_RLC0_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA2_RLC0_IB_BASE_LO
+#define SDMA2_RLC0_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA2_RLC0_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA2_RLC0_IB_BASE_HI
+#define SDMA2_RLC0_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA2_RLC0_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA2_RLC0_IB_SIZE
+#define SDMA2_RLC0_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA2_RLC0_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA2_RLC0_SKIP_CNTL
+#define SDMA2_RLC0_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA2_RLC0_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA2_RLC0_CONTEXT_STATUS
+#define SDMA2_RLC0_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA2_RLC0_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA2_RLC0_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA2_RLC0_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA2_RLC0_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA2_RLC0_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA2_RLC0_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA2_RLC0_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA2_RLC0_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA2_RLC0_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA2_RLC0_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA2_RLC0_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA2_RLC0_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA2_RLC0_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA2_RLC0_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA2_RLC0_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA2_RLC0_DOORBELL
+#define SDMA2_RLC0_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA2_RLC0_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA2_RLC0_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA2_RLC0_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA2_RLC0_STATUS
+#define SDMA2_RLC0_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA2_RLC0_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA2_RLC0_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA2_RLC0_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA2_RLC0_DOORBELL_LOG
+#define SDMA2_RLC0_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA2_RLC0_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA2_RLC0_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA2_RLC0_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA2_RLC0_WATERMARK
+#define SDMA2_RLC0_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA2_RLC0_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA2_RLC0_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA2_RLC0_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA2_RLC0_DOORBELL_OFFSET
+#define SDMA2_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA2_RLC0_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA2_RLC0_CSA_ADDR_LO
+#define SDMA2_RLC0_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA2_RLC0_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA2_RLC0_CSA_ADDR_HI
+#define SDMA2_RLC0_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA2_RLC0_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA2_RLC0_IB_SUB_REMAIN
+#define SDMA2_RLC0_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA2_RLC0_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA2_RLC0_PREEMPT
+#define SDMA2_RLC0_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA2_RLC0_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA2_RLC0_DUMMY_REG
+#define SDMA2_RLC0_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA2_RLC0_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA2_RLC0_RB_WPTR_POLL_ADDR_HI
+#define SDMA2_RLC0_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA2_RLC0_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA2_RLC0_RB_WPTR_POLL_ADDR_LO
+#define SDMA2_RLC0_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA2_RLC0_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA2_RLC0_RB_AQL_CNTL
+#define SDMA2_RLC0_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA2_RLC0_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA2_RLC0_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA2_RLC0_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA2_RLC0_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA2_RLC0_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA2_RLC0_MINOR_PTR_UPDATE
+#define SDMA2_RLC0_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA2_RLC0_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA2_RLC0_MIDCMD_DATA0
+#define SDMA2_RLC0_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA2_RLC0_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA2_RLC0_MIDCMD_DATA1
+#define SDMA2_RLC0_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA2_RLC0_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA2_RLC0_MIDCMD_DATA2
+#define SDMA2_RLC0_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA2_RLC0_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA2_RLC0_MIDCMD_DATA3
+#define SDMA2_RLC0_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA2_RLC0_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA2_RLC0_MIDCMD_DATA4
+#define SDMA2_RLC0_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA2_RLC0_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA2_RLC0_MIDCMD_DATA5
+#define SDMA2_RLC0_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA2_RLC0_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA2_RLC0_MIDCMD_DATA6
+#define SDMA2_RLC0_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA2_RLC0_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA2_RLC0_MIDCMD_DATA7
+#define SDMA2_RLC0_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA2_RLC0_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA2_RLC0_MIDCMD_DATA8
+#define SDMA2_RLC0_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA2_RLC0_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA2_RLC0_MIDCMD_CNTL
+#define SDMA2_RLC0_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA2_RLC0_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA2_RLC0_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA2_RLC0_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA2_RLC0_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA2_RLC0_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA2_RLC0_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA2_RLC0_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA2_RLC1_RB_CNTL
+#define SDMA2_RLC1_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA2_RLC1_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA2_RLC1_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA2_RLC1_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA2_RLC1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA2_RLC1_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA2_RLC1_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA2_RLC1_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA2_RLC1_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA2_RLC1_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA2_RLC1_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA2_RLC1_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA2_RLC1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA2_RLC1_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA2_RLC1_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA2_RLC1_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA2_RLC1_RB_BASE
+#define SDMA2_RLC1_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA2_RLC1_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA2_RLC1_RB_BASE_HI
+#define SDMA2_RLC1_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA2_RLC1_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA2_RLC1_RB_RPTR
+#define SDMA2_RLC1_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA2_RLC1_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA2_RLC1_RB_RPTR_HI
+#define SDMA2_RLC1_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA2_RLC1_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA2_RLC1_RB_WPTR
+#define SDMA2_RLC1_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA2_RLC1_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA2_RLC1_RB_WPTR_HI
+#define SDMA2_RLC1_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA2_RLC1_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA2_RLC1_RB_WPTR_POLL_CNTL
+#define SDMA2_RLC1_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA2_RLC1_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA2_RLC1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA2_RLC1_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA2_RLC1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA2_RLC1_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA2_RLC1_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA2_RLC1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA2_RLC1_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA2_RLC1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA2_RLC1_RB_RPTR_ADDR_HI
+#define SDMA2_RLC1_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA2_RLC1_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA2_RLC1_RB_RPTR_ADDR_LO
+#define SDMA2_RLC1_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA2_RLC1_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA2_RLC1_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA2_RLC1_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA2_RLC1_IB_CNTL
+#define SDMA2_RLC1_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA2_RLC1_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA2_RLC1_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA2_RLC1_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA2_RLC1_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA2_RLC1_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA2_RLC1_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA2_RLC1_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA2_RLC1_IB_RPTR
+#define SDMA2_RLC1_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA2_RLC1_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA2_RLC1_IB_OFFSET
+#define SDMA2_RLC1_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA2_RLC1_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA2_RLC1_IB_BASE_LO
+#define SDMA2_RLC1_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA2_RLC1_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA2_RLC1_IB_BASE_HI
+#define SDMA2_RLC1_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA2_RLC1_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA2_RLC1_IB_SIZE
+#define SDMA2_RLC1_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA2_RLC1_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA2_RLC1_SKIP_CNTL
+#define SDMA2_RLC1_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA2_RLC1_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA2_RLC1_CONTEXT_STATUS
+#define SDMA2_RLC1_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA2_RLC1_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA2_RLC1_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA2_RLC1_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA2_RLC1_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA2_RLC1_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA2_RLC1_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA2_RLC1_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA2_RLC1_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA2_RLC1_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA2_RLC1_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA2_RLC1_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA2_RLC1_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA2_RLC1_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA2_RLC1_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA2_RLC1_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA2_RLC1_DOORBELL
+#define SDMA2_RLC1_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA2_RLC1_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA2_RLC1_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA2_RLC1_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA2_RLC1_STATUS
+#define SDMA2_RLC1_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA2_RLC1_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA2_RLC1_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA2_RLC1_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA2_RLC1_DOORBELL_LOG
+#define SDMA2_RLC1_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA2_RLC1_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA2_RLC1_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA2_RLC1_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA2_RLC1_WATERMARK
+#define SDMA2_RLC1_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA2_RLC1_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA2_RLC1_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA2_RLC1_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA2_RLC1_DOORBELL_OFFSET
+#define SDMA2_RLC1_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA2_RLC1_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA2_RLC1_CSA_ADDR_LO
+#define SDMA2_RLC1_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA2_RLC1_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA2_RLC1_CSA_ADDR_HI
+#define SDMA2_RLC1_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA2_RLC1_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA2_RLC1_IB_SUB_REMAIN
+#define SDMA2_RLC1_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA2_RLC1_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA2_RLC1_PREEMPT
+#define SDMA2_RLC1_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA2_RLC1_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA2_RLC1_DUMMY_REG
+#define SDMA2_RLC1_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA2_RLC1_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA2_RLC1_RB_WPTR_POLL_ADDR_HI
+#define SDMA2_RLC1_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA2_RLC1_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA2_RLC1_RB_WPTR_POLL_ADDR_LO
+#define SDMA2_RLC1_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA2_RLC1_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA2_RLC1_RB_AQL_CNTL
+#define SDMA2_RLC1_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA2_RLC1_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA2_RLC1_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA2_RLC1_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA2_RLC1_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA2_RLC1_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA2_RLC1_MINOR_PTR_UPDATE
+#define SDMA2_RLC1_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA2_RLC1_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA2_RLC1_MIDCMD_DATA0
+#define SDMA2_RLC1_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA2_RLC1_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA2_RLC1_MIDCMD_DATA1
+#define SDMA2_RLC1_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA2_RLC1_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA2_RLC1_MIDCMD_DATA2
+#define SDMA2_RLC1_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA2_RLC1_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA2_RLC1_MIDCMD_DATA3
+#define SDMA2_RLC1_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA2_RLC1_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA2_RLC1_MIDCMD_DATA4
+#define SDMA2_RLC1_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA2_RLC1_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA2_RLC1_MIDCMD_DATA5
+#define SDMA2_RLC1_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA2_RLC1_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA2_RLC1_MIDCMD_DATA6
+#define SDMA2_RLC1_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA2_RLC1_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA2_RLC1_MIDCMD_DATA7
+#define SDMA2_RLC1_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA2_RLC1_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA2_RLC1_MIDCMD_DATA8
+#define SDMA2_RLC1_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA2_RLC1_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA2_RLC1_MIDCMD_CNTL
+#define SDMA2_RLC1_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA2_RLC1_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA2_RLC1_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA2_RLC1_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA2_RLC1_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA2_RLC1_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA2_RLC1_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA2_RLC1_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA2_RLC2_RB_CNTL
+#define SDMA2_RLC2_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA2_RLC2_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA2_RLC2_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA2_RLC2_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA2_RLC2_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA2_RLC2_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA2_RLC2_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA2_RLC2_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA2_RLC2_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA2_RLC2_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA2_RLC2_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA2_RLC2_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA2_RLC2_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA2_RLC2_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA2_RLC2_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA2_RLC2_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA2_RLC2_RB_BASE
+#define SDMA2_RLC2_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA2_RLC2_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA2_RLC2_RB_BASE_HI
+#define SDMA2_RLC2_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA2_RLC2_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA2_RLC2_RB_RPTR
+#define SDMA2_RLC2_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA2_RLC2_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA2_RLC2_RB_RPTR_HI
+#define SDMA2_RLC2_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA2_RLC2_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA2_RLC2_RB_WPTR
+#define SDMA2_RLC2_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA2_RLC2_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA2_RLC2_RB_WPTR_HI
+#define SDMA2_RLC2_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA2_RLC2_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA2_RLC2_RB_WPTR_POLL_CNTL
+#define SDMA2_RLC2_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA2_RLC2_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA2_RLC2_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA2_RLC2_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA2_RLC2_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA2_RLC2_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA2_RLC2_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA2_RLC2_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA2_RLC2_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA2_RLC2_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA2_RLC2_RB_RPTR_ADDR_HI
+#define SDMA2_RLC2_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA2_RLC2_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA2_RLC2_RB_RPTR_ADDR_LO
+#define SDMA2_RLC2_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA2_RLC2_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA2_RLC2_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA2_RLC2_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA2_RLC2_IB_CNTL
+#define SDMA2_RLC2_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA2_RLC2_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA2_RLC2_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA2_RLC2_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA2_RLC2_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA2_RLC2_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA2_RLC2_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA2_RLC2_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA2_RLC2_IB_RPTR
+#define SDMA2_RLC2_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA2_RLC2_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA2_RLC2_IB_OFFSET
+#define SDMA2_RLC2_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA2_RLC2_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA2_RLC2_IB_BASE_LO
+#define SDMA2_RLC2_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA2_RLC2_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA2_RLC2_IB_BASE_HI
+#define SDMA2_RLC2_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA2_RLC2_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA2_RLC2_IB_SIZE
+#define SDMA2_RLC2_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA2_RLC2_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA2_RLC2_SKIP_CNTL
+#define SDMA2_RLC2_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA2_RLC2_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA2_RLC2_CONTEXT_STATUS
+#define SDMA2_RLC2_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA2_RLC2_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA2_RLC2_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA2_RLC2_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA2_RLC2_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA2_RLC2_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA2_RLC2_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA2_RLC2_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA2_RLC2_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA2_RLC2_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA2_RLC2_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA2_RLC2_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA2_RLC2_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA2_RLC2_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA2_RLC2_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA2_RLC2_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA2_RLC2_DOORBELL
+#define SDMA2_RLC2_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA2_RLC2_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA2_RLC2_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA2_RLC2_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA2_RLC2_STATUS
+#define SDMA2_RLC2_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA2_RLC2_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA2_RLC2_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA2_RLC2_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA2_RLC2_DOORBELL_LOG
+#define SDMA2_RLC2_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA2_RLC2_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA2_RLC2_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA2_RLC2_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA2_RLC2_WATERMARK
+#define SDMA2_RLC2_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA2_RLC2_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA2_RLC2_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA2_RLC2_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA2_RLC2_DOORBELL_OFFSET
+#define SDMA2_RLC2_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA2_RLC2_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA2_RLC2_CSA_ADDR_LO
+#define SDMA2_RLC2_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA2_RLC2_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA2_RLC2_CSA_ADDR_HI
+#define SDMA2_RLC2_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA2_RLC2_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA2_RLC2_IB_SUB_REMAIN
+#define SDMA2_RLC2_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA2_RLC2_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA2_RLC2_PREEMPT
+#define SDMA2_RLC2_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA2_RLC2_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA2_RLC2_DUMMY_REG
+#define SDMA2_RLC2_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA2_RLC2_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA2_RLC2_RB_WPTR_POLL_ADDR_HI
+#define SDMA2_RLC2_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA2_RLC2_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA2_RLC2_RB_WPTR_POLL_ADDR_LO
+#define SDMA2_RLC2_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA2_RLC2_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA2_RLC2_RB_AQL_CNTL
+#define SDMA2_RLC2_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA2_RLC2_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA2_RLC2_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA2_RLC2_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA2_RLC2_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA2_RLC2_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA2_RLC2_MINOR_PTR_UPDATE
+#define SDMA2_RLC2_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA2_RLC2_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA2_RLC2_MIDCMD_DATA0
+#define SDMA2_RLC2_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA2_RLC2_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA2_RLC2_MIDCMD_DATA1
+#define SDMA2_RLC2_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA2_RLC2_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA2_RLC2_MIDCMD_DATA2
+#define SDMA2_RLC2_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA2_RLC2_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA2_RLC2_MIDCMD_DATA3
+#define SDMA2_RLC2_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA2_RLC2_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA2_RLC2_MIDCMD_DATA4
+#define SDMA2_RLC2_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA2_RLC2_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA2_RLC2_MIDCMD_DATA5
+#define SDMA2_RLC2_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA2_RLC2_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA2_RLC2_MIDCMD_DATA6
+#define SDMA2_RLC2_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA2_RLC2_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA2_RLC2_MIDCMD_DATA7
+#define SDMA2_RLC2_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA2_RLC2_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA2_RLC2_MIDCMD_DATA8
+#define SDMA2_RLC2_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA2_RLC2_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA2_RLC2_MIDCMD_CNTL
+#define SDMA2_RLC2_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA2_RLC2_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA2_RLC2_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA2_RLC2_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA2_RLC2_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA2_RLC2_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA2_RLC2_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA2_RLC2_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA2_RLC3_RB_CNTL
+#define SDMA2_RLC3_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA2_RLC3_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA2_RLC3_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA2_RLC3_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA2_RLC3_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA2_RLC3_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA2_RLC3_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA2_RLC3_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA2_RLC3_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA2_RLC3_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA2_RLC3_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA2_RLC3_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA2_RLC3_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA2_RLC3_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA2_RLC3_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA2_RLC3_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA2_RLC3_RB_BASE
+#define SDMA2_RLC3_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA2_RLC3_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA2_RLC3_RB_BASE_HI
+#define SDMA2_RLC3_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA2_RLC3_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA2_RLC3_RB_RPTR
+#define SDMA2_RLC3_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA2_RLC3_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA2_RLC3_RB_RPTR_HI
+#define SDMA2_RLC3_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA2_RLC3_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA2_RLC3_RB_WPTR
+#define SDMA2_RLC3_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA2_RLC3_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA2_RLC3_RB_WPTR_HI
+#define SDMA2_RLC3_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA2_RLC3_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA2_RLC3_RB_WPTR_POLL_CNTL
+#define SDMA2_RLC3_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA2_RLC3_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA2_RLC3_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA2_RLC3_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA2_RLC3_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA2_RLC3_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA2_RLC3_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA2_RLC3_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA2_RLC3_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA2_RLC3_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA2_RLC3_RB_RPTR_ADDR_HI
+#define SDMA2_RLC3_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA2_RLC3_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA2_RLC3_RB_RPTR_ADDR_LO
+#define SDMA2_RLC3_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA2_RLC3_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA2_RLC3_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA2_RLC3_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA2_RLC3_IB_CNTL
+#define SDMA2_RLC3_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA2_RLC3_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA2_RLC3_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA2_RLC3_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA2_RLC3_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA2_RLC3_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA2_RLC3_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA2_RLC3_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA2_RLC3_IB_RPTR
+#define SDMA2_RLC3_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA2_RLC3_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA2_RLC3_IB_OFFSET
+#define SDMA2_RLC3_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA2_RLC3_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA2_RLC3_IB_BASE_LO
+#define SDMA2_RLC3_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA2_RLC3_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA2_RLC3_IB_BASE_HI
+#define SDMA2_RLC3_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA2_RLC3_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA2_RLC3_IB_SIZE
+#define SDMA2_RLC3_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA2_RLC3_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA2_RLC3_SKIP_CNTL
+#define SDMA2_RLC3_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA2_RLC3_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA2_RLC3_CONTEXT_STATUS
+#define SDMA2_RLC3_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA2_RLC3_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA2_RLC3_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA2_RLC3_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA2_RLC3_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA2_RLC3_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA2_RLC3_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA2_RLC3_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA2_RLC3_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA2_RLC3_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA2_RLC3_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA2_RLC3_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA2_RLC3_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA2_RLC3_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA2_RLC3_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA2_RLC3_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA2_RLC3_DOORBELL
+#define SDMA2_RLC3_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA2_RLC3_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA2_RLC3_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA2_RLC3_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA2_RLC3_STATUS
+#define SDMA2_RLC3_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA2_RLC3_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA2_RLC3_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA2_RLC3_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA2_RLC3_DOORBELL_LOG
+#define SDMA2_RLC3_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA2_RLC3_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA2_RLC3_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA2_RLC3_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA2_RLC3_WATERMARK
+#define SDMA2_RLC3_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA2_RLC3_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA2_RLC3_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA2_RLC3_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA2_RLC3_DOORBELL_OFFSET
+#define SDMA2_RLC3_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA2_RLC3_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA2_RLC3_CSA_ADDR_LO
+#define SDMA2_RLC3_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA2_RLC3_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA2_RLC3_CSA_ADDR_HI
+#define SDMA2_RLC3_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA2_RLC3_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA2_RLC3_IB_SUB_REMAIN
+#define SDMA2_RLC3_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA2_RLC3_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA2_RLC3_PREEMPT
+#define SDMA2_RLC3_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA2_RLC3_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA2_RLC3_DUMMY_REG
+#define SDMA2_RLC3_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA2_RLC3_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA2_RLC3_RB_WPTR_POLL_ADDR_HI
+#define SDMA2_RLC3_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA2_RLC3_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA2_RLC3_RB_WPTR_POLL_ADDR_LO
+#define SDMA2_RLC3_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA2_RLC3_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA2_RLC3_RB_AQL_CNTL
+#define SDMA2_RLC3_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA2_RLC3_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA2_RLC3_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA2_RLC3_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA2_RLC3_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA2_RLC3_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA2_RLC3_MINOR_PTR_UPDATE
+#define SDMA2_RLC3_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA2_RLC3_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA2_RLC3_MIDCMD_DATA0
+#define SDMA2_RLC3_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA2_RLC3_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA2_RLC3_MIDCMD_DATA1
+#define SDMA2_RLC3_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA2_RLC3_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA2_RLC3_MIDCMD_DATA2
+#define SDMA2_RLC3_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA2_RLC3_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA2_RLC3_MIDCMD_DATA3
+#define SDMA2_RLC3_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA2_RLC3_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA2_RLC3_MIDCMD_DATA4
+#define SDMA2_RLC3_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA2_RLC3_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA2_RLC3_MIDCMD_DATA5
+#define SDMA2_RLC3_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA2_RLC3_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA2_RLC3_MIDCMD_DATA6
+#define SDMA2_RLC3_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA2_RLC3_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA2_RLC3_MIDCMD_DATA7
+#define SDMA2_RLC3_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA2_RLC3_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA2_RLC3_MIDCMD_DATA8
+#define SDMA2_RLC3_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA2_RLC3_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA2_RLC3_MIDCMD_CNTL
+#define SDMA2_RLC3_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA2_RLC3_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA2_RLC3_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA2_RLC3_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA2_RLC3_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA2_RLC3_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA2_RLC3_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA2_RLC3_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA2_RLC4_RB_CNTL
+#define SDMA2_RLC4_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA2_RLC4_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA2_RLC4_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA2_RLC4_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA2_RLC4_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA2_RLC4_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA2_RLC4_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA2_RLC4_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA2_RLC4_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA2_RLC4_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA2_RLC4_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA2_RLC4_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA2_RLC4_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA2_RLC4_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA2_RLC4_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA2_RLC4_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA2_RLC4_RB_BASE
+#define SDMA2_RLC4_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA2_RLC4_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA2_RLC4_RB_BASE_HI
+#define SDMA2_RLC4_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA2_RLC4_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA2_RLC4_RB_RPTR
+#define SDMA2_RLC4_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA2_RLC4_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA2_RLC4_RB_RPTR_HI
+#define SDMA2_RLC4_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA2_RLC4_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA2_RLC4_RB_WPTR
+#define SDMA2_RLC4_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA2_RLC4_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA2_RLC4_RB_WPTR_HI
+#define SDMA2_RLC4_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA2_RLC4_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA2_RLC4_RB_WPTR_POLL_CNTL
+#define SDMA2_RLC4_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA2_RLC4_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA2_RLC4_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA2_RLC4_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA2_RLC4_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA2_RLC4_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA2_RLC4_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA2_RLC4_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA2_RLC4_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA2_RLC4_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA2_RLC4_RB_RPTR_ADDR_HI
+#define SDMA2_RLC4_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA2_RLC4_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA2_RLC4_RB_RPTR_ADDR_LO
+#define SDMA2_RLC4_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA2_RLC4_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA2_RLC4_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA2_RLC4_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA2_RLC4_IB_CNTL
+#define SDMA2_RLC4_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA2_RLC4_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA2_RLC4_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA2_RLC4_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA2_RLC4_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA2_RLC4_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA2_RLC4_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA2_RLC4_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA2_RLC4_IB_RPTR
+#define SDMA2_RLC4_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA2_RLC4_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA2_RLC4_IB_OFFSET
+#define SDMA2_RLC4_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA2_RLC4_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA2_RLC4_IB_BASE_LO
+#define SDMA2_RLC4_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA2_RLC4_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA2_RLC4_IB_BASE_HI
+#define SDMA2_RLC4_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA2_RLC4_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA2_RLC4_IB_SIZE
+#define SDMA2_RLC4_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA2_RLC4_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA2_RLC4_SKIP_CNTL
+#define SDMA2_RLC4_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA2_RLC4_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA2_RLC4_CONTEXT_STATUS
+#define SDMA2_RLC4_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA2_RLC4_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA2_RLC4_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA2_RLC4_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA2_RLC4_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA2_RLC4_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA2_RLC4_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA2_RLC4_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA2_RLC4_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA2_RLC4_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA2_RLC4_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA2_RLC4_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA2_RLC4_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA2_RLC4_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA2_RLC4_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA2_RLC4_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA2_RLC4_DOORBELL
+#define SDMA2_RLC4_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA2_RLC4_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA2_RLC4_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA2_RLC4_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA2_RLC4_STATUS
+#define SDMA2_RLC4_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA2_RLC4_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA2_RLC4_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA2_RLC4_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA2_RLC4_DOORBELL_LOG
+#define SDMA2_RLC4_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA2_RLC4_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA2_RLC4_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA2_RLC4_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA2_RLC4_WATERMARK
+#define SDMA2_RLC4_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA2_RLC4_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA2_RLC4_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA2_RLC4_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA2_RLC4_DOORBELL_OFFSET
+#define SDMA2_RLC4_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA2_RLC4_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA2_RLC4_CSA_ADDR_LO
+#define SDMA2_RLC4_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA2_RLC4_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA2_RLC4_CSA_ADDR_HI
+#define SDMA2_RLC4_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA2_RLC4_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA2_RLC4_IB_SUB_REMAIN
+#define SDMA2_RLC4_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA2_RLC4_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA2_RLC4_PREEMPT
+#define SDMA2_RLC4_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA2_RLC4_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA2_RLC4_DUMMY_REG
+#define SDMA2_RLC4_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA2_RLC4_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA2_RLC4_RB_WPTR_POLL_ADDR_HI
+#define SDMA2_RLC4_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA2_RLC4_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA2_RLC4_RB_WPTR_POLL_ADDR_LO
+#define SDMA2_RLC4_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA2_RLC4_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA2_RLC4_RB_AQL_CNTL
+#define SDMA2_RLC4_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA2_RLC4_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA2_RLC4_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA2_RLC4_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA2_RLC4_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA2_RLC4_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA2_RLC4_MINOR_PTR_UPDATE
+#define SDMA2_RLC4_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA2_RLC4_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA2_RLC4_MIDCMD_DATA0
+#define SDMA2_RLC4_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA2_RLC4_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA2_RLC4_MIDCMD_DATA1
+#define SDMA2_RLC4_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA2_RLC4_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA2_RLC4_MIDCMD_DATA2
+#define SDMA2_RLC4_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA2_RLC4_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA2_RLC4_MIDCMD_DATA3
+#define SDMA2_RLC4_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA2_RLC4_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA2_RLC4_MIDCMD_DATA4
+#define SDMA2_RLC4_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA2_RLC4_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA2_RLC4_MIDCMD_DATA5
+#define SDMA2_RLC4_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA2_RLC4_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA2_RLC4_MIDCMD_DATA6
+#define SDMA2_RLC4_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA2_RLC4_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA2_RLC4_MIDCMD_DATA7
+#define SDMA2_RLC4_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA2_RLC4_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA2_RLC4_MIDCMD_DATA8
+#define SDMA2_RLC4_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA2_RLC4_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA2_RLC4_MIDCMD_CNTL
+#define SDMA2_RLC4_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA2_RLC4_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA2_RLC4_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA2_RLC4_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA2_RLC4_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA2_RLC4_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA2_RLC4_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA2_RLC4_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA2_RLC5_RB_CNTL
+#define SDMA2_RLC5_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA2_RLC5_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA2_RLC5_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA2_RLC5_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA2_RLC5_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA2_RLC5_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA2_RLC5_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA2_RLC5_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA2_RLC5_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA2_RLC5_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA2_RLC5_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA2_RLC5_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA2_RLC5_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA2_RLC5_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA2_RLC5_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA2_RLC5_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA2_RLC5_RB_BASE
+#define SDMA2_RLC5_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA2_RLC5_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA2_RLC5_RB_BASE_HI
+#define SDMA2_RLC5_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA2_RLC5_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA2_RLC5_RB_RPTR
+#define SDMA2_RLC5_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA2_RLC5_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA2_RLC5_RB_RPTR_HI
+#define SDMA2_RLC5_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA2_RLC5_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA2_RLC5_RB_WPTR
+#define SDMA2_RLC5_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA2_RLC5_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA2_RLC5_RB_WPTR_HI
+#define SDMA2_RLC5_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA2_RLC5_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA2_RLC5_RB_WPTR_POLL_CNTL
+#define SDMA2_RLC5_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA2_RLC5_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA2_RLC5_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA2_RLC5_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA2_RLC5_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA2_RLC5_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA2_RLC5_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA2_RLC5_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA2_RLC5_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA2_RLC5_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA2_RLC5_RB_RPTR_ADDR_HI
+#define SDMA2_RLC5_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA2_RLC5_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA2_RLC5_RB_RPTR_ADDR_LO
+#define SDMA2_RLC5_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA2_RLC5_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA2_RLC5_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA2_RLC5_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA2_RLC5_IB_CNTL
+#define SDMA2_RLC5_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA2_RLC5_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA2_RLC5_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA2_RLC5_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA2_RLC5_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA2_RLC5_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA2_RLC5_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA2_RLC5_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA2_RLC5_IB_RPTR
+#define SDMA2_RLC5_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA2_RLC5_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA2_RLC5_IB_OFFSET
+#define SDMA2_RLC5_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA2_RLC5_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA2_RLC5_IB_BASE_LO
+#define SDMA2_RLC5_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA2_RLC5_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA2_RLC5_IB_BASE_HI
+#define SDMA2_RLC5_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA2_RLC5_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA2_RLC5_IB_SIZE
+#define SDMA2_RLC5_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA2_RLC5_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA2_RLC5_SKIP_CNTL
+#define SDMA2_RLC5_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA2_RLC5_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA2_RLC5_CONTEXT_STATUS
+#define SDMA2_RLC5_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA2_RLC5_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA2_RLC5_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA2_RLC5_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA2_RLC5_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA2_RLC5_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA2_RLC5_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA2_RLC5_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA2_RLC5_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA2_RLC5_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA2_RLC5_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA2_RLC5_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA2_RLC5_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA2_RLC5_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA2_RLC5_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA2_RLC5_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA2_RLC5_DOORBELL
+#define SDMA2_RLC5_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA2_RLC5_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA2_RLC5_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA2_RLC5_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA2_RLC5_STATUS
+#define SDMA2_RLC5_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA2_RLC5_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA2_RLC5_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA2_RLC5_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA2_RLC5_DOORBELL_LOG
+#define SDMA2_RLC5_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA2_RLC5_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA2_RLC5_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA2_RLC5_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA2_RLC5_WATERMARK
+#define SDMA2_RLC5_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA2_RLC5_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA2_RLC5_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA2_RLC5_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA2_RLC5_DOORBELL_OFFSET
+#define SDMA2_RLC5_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA2_RLC5_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA2_RLC5_CSA_ADDR_LO
+#define SDMA2_RLC5_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA2_RLC5_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA2_RLC5_CSA_ADDR_HI
+#define SDMA2_RLC5_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA2_RLC5_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA2_RLC5_IB_SUB_REMAIN
+#define SDMA2_RLC5_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA2_RLC5_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA2_RLC5_PREEMPT
+#define SDMA2_RLC5_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA2_RLC5_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA2_RLC5_DUMMY_REG
+#define SDMA2_RLC5_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA2_RLC5_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA2_RLC5_RB_WPTR_POLL_ADDR_HI
+#define SDMA2_RLC5_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA2_RLC5_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA2_RLC5_RB_WPTR_POLL_ADDR_LO
+#define SDMA2_RLC5_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA2_RLC5_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA2_RLC5_RB_AQL_CNTL
+#define SDMA2_RLC5_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA2_RLC5_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA2_RLC5_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA2_RLC5_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA2_RLC5_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA2_RLC5_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA2_RLC5_MINOR_PTR_UPDATE
+#define SDMA2_RLC5_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA2_RLC5_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA2_RLC5_MIDCMD_DATA0
+#define SDMA2_RLC5_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA2_RLC5_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA2_RLC5_MIDCMD_DATA1
+#define SDMA2_RLC5_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA2_RLC5_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA2_RLC5_MIDCMD_DATA2
+#define SDMA2_RLC5_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA2_RLC5_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA2_RLC5_MIDCMD_DATA3
+#define SDMA2_RLC5_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA2_RLC5_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA2_RLC5_MIDCMD_DATA4
+#define SDMA2_RLC5_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA2_RLC5_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA2_RLC5_MIDCMD_DATA5
+#define SDMA2_RLC5_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA2_RLC5_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA2_RLC5_MIDCMD_DATA6
+#define SDMA2_RLC5_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA2_RLC5_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA2_RLC5_MIDCMD_DATA7
+#define SDMA2_RLC5_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA2_RLC5_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA2_RLC5_MIDCMD_DATA8
+#define SDMA2_RLC5_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA2_RLC5_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA2_RLC5_MIDCMD_CNTL
+#define SDMA2_RLC5_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA2_RLC5_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA2_RLC5_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA2_RLC5_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA2_RLC5_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA2_RLC5_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA2_RLC5_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA2_RLC5_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA2_RLC6_RB_CNTL
+#define SDMA2_RLC6_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA2_RLC6_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA2_RLC6_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA2_RLC6_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA2_RLC6_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA2_RLC6_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA2_RLC6_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA2_RLC6_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA2_RLC6_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA2_RLC6_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA2_RLC6_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA2_RLC6_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA2_RLC6_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA2_RLC6_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA2_RLC6_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA2_RLC6_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA2_RLC6_RB_BASE
+#define SDMA2_RLC6_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA2_RLC6_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA2_RLC6_RB_BASE_HI
+#define SDMA2_RLC6_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA2_RLC6_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA2_RLC6_RB_RPTR
+#define SDMA2_RLC6_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA2_RLC6_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA2_RLC6_RB_RPTR_HI
+#define SDMA2_RLC6_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA2_RLC6_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA2_RLC6_RB_WPTR
+#define SDMA2_RLC6_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA2_RLC6_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA2_RLC6_RB_WPTR_HI
+#define SDMA2_RLC6_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA2_RLC6_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA2_RLC6_RB_WPTR_POLL_CNTL
+#define SDMA2_RLC6_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA2_RLC6_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA2_RLC6_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA2_RLC6_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA2_RLC6_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA2_RLC6_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA2_RLC6_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA2_RLC6_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA2_RLC6_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA2_RLC6_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA2_RLC6_RB_RPTR_ADDR_HI
+#define SDMA2_RLC6_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA2_RLC6_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA2_RLC6_RB_RPTR_ADDR_LO
+#define SDMA2_RLC6_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA2_RLC6_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA2_RLC6_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA2_RLC6_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA2_RLC6_IB_CNTL
+#define SDMA2_RLC6_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA2_RLC6_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA2_RLC6_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA2_RLC6_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA2_RLC6_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA2_RLC6_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA2_RLC6_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA2_RLC6_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA2_RLC6_IB_RPTR
+#define SDMA2_RLC6_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA2_RLC6_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA2_RLC6_IB_OFFSET
+#define SDMA2_RLC6_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA2_RLC6_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA2_RLC6_IB_BASE_LO
+#define SDMA2_RLC6_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA2_RLC6_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA2_RLC6_IB_BASE_HI
+#define SDMA2_RLC6_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA2_RLC6_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA2_RLC6_IB_SIZE
+#define SDMA2_RLC6_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA2_RLC6_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA2_RLC6_SKIP_CNTL
+#define SDMA2_RLC6_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA2_RLC6_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA2_RLC6_CONTEXT_STATUS
+#define SDMA2_RLC6_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA2_RLC6_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA2_RLC6_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA2_RLC6_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA2_RLC6_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA2_RLC6_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA2_RLC6_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA2_RLC6_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA2_RLC6_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA2_RLC6_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA2_RLC6_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA2_RLC6_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA2_RLC6_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA2_RLC6_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA2_RLC6_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA2_RLC6_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA2_RLC6_DOORBELL
+#define SDMA2_RLC6_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA2_RLC6_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA2_RLC6_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA2_RLC6_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA2_RLC6_STATUS
+#define SDMA2_RLC6_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA2_RLC6_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA2_RLC6_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA2_RLC6_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA2_RLC6_DOORBELL_LOG
+#define SDMA2_RLC6_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA2_RLC6_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA2_RLC6_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA2_RLC6_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA2_RLC6_WATERMARK
+#define SDMA2_RLC6_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA2_RLC6_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA2_RLC6_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA2_RLC6_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA2_RLC6_DOORBELL_OFFSET
+#define SDMA2_RLC6_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA2_RLC6_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA2_RLC6_CSA_ADDR_LO
+#define SDMA2_RLC6_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA2_RLC6_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA2_RLC6_CSA_ADDR_HI
+#define SDMA2_RLC6_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA2_RLC6_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA2_RLC6_IB_SUB_REMAIN
+#define SDMA2_RLC6_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA2_RLC6_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA2_RLC6_PREEMPT
+#define SDMA2_RLC6_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA2_RLC6_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA2_RLC6_DUMMY_REG
+#define SDMA2_RLC6_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA2_RLC6_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA2_RLC6_RB_WPTR_POLL_ADDR_HI
+#define SDMA2_RLC6_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA2_RLC6_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA2_RLC6_RB_WPTR_POLL_ADDR_LO
+#define SDMA2_RLC6_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA2_RLC6_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA2_RLC6_RB_AQL_CNTL
+#define SDMA2_RLC6_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA2_RLC6_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA2_RLC6_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA2_RLC6_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA2_RLC6_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA2_RLC6_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA2_RLC6_MINOR_PTR_UPDATE
+#define SDMA2_RLC6_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA2_RLC6_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA2_RLC6_MIDCMD_DATA0
+#define SDMA2_RLC6_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA2_RLC6_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA2_RLC6_MIDCMD_DATA1
+#define SDMA2_RLC6_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA2_RLC6_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA2_RLC6_MIDCMD_DATA2
+#define SDMA2_RLC6_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA2_RLC6_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA2_RLC6_MIDCMD_DATA3
+#define SDMA2_RLC6_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA2_RLC6_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA2_RLC6_MIDCMD_DATA4
+#define SDMA2_RLC6_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA2_RLC6_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA2_RLC6_MIDCMD_DATA5
+#define SDMA2_RLC6_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA2_RLC6_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA2_RLC6_MIDCMD_DATA6
+#define SDMA2_RLC6_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA2_RLC6_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA2_RLC6_MIDCMD_DATA7
+#define SDMA2_RLC6_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA2_RLC6_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA2_RLC6_MIDCMD_DATA8
+#define SDMA2_RLC6_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA2_RLC6_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA2_RLC6_MIDCMD_CNTL
+#define SDMA2_RLC6_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA2_RLC6_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA2_RLC6_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA2_RLC6_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA2_RLC6_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA2_RLC6_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA2_RLC6_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA2_RLC6_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA2_RLC7_RB_CNTL
+#define SDMA2_RLC7_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA2_RLC7_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA2_RLC7_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA2_RLC7_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA2_RLC7_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA2_RLC7_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA2_RLC7_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA2_RLC7_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA2_RLC7_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA2_RLC7_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA2_RLC7_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA2_RLC7_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA2_RLC7_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA2_RLC7_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA2_RLC7_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA2_RLC7_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA2_RLC7_RB_BASE
+#define SDMA2_RLC7_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA2_RLC7_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA2_RLC7_RB_BASE_HI
+#define SDMA2_RLC7_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA2_RLC7_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA2_RLC7_RB_RPTR
+#define SDMA2_RLC7_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA2_RLC7_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA2_RLC7_RB_RPTR_HI
+#define SDMA2_RLC7_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA2_RLC7_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA2_RLC7_RB_WPTR
+#define SDMA2_RLC7_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA2_RLC7_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA2_RLC7_RB_WPTR_HI
+#define SDMA2_RLC7_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA2_RLC7_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA2_RLC7_RB_WPTR_POLL_CNTL
+#define SDMA2_RLC7_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA2_RLC7_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA2_RLC7_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA2_RLC7_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA2_RLC7_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA2_RLC7_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA2_RLC7_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA2_RLC7_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA2_RLC7_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA2_RLC7_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA2_RLC7_RB_RPTR_ADDR_HI
+#define SDMA2_RLC7_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA2_RLC7_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA2_RLC7_RB_RPTR_ADDR_LO
+#define SDMA2_RLC7_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA2_RLC7_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA2_RLC7_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA2_RLC7_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA2_RLC7_IB_CNTL
+#define SDMA2_RLC7_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA2_RLC7_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA2_RLC7_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA2_RLC7_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA2_RLC7_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA2_RLC7_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA2_RLC7_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA2_RLC7_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA2_RLC7_IB_RPTR
+#define SDMA2_RLC7_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA2_RLC7_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA2_RLC7_IB_OFFSET
+#define SDMA2_RLC7_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA2_RLC7_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA2_RLC7_IB_BASE_LO
+#define SDMA2_RLC7_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA2_RLC7_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA2_RLC7_IB_BASE_HI
+#define SDMA2_RLC7_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA2_RLC7_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA2_RLC7_IB_SIZE
+#define SDMA2_RLC7_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA2_RLC7_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA2_RLC7_SKIP_CNTL
+#define SDMA2_RLC7_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA2_RLC7_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA2_RLC7_CONTEXT_STATUS
+#define SDMA2_RLC7_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA2_RLC7_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA2_RLC7_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA2_RLC7_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA2_RLC7_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA2_RLC7_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA2_RLC7_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA2_RLC7_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA2_RLC7_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA2_RLC7_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA2_RLC7_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA2_RLC7_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA2_RLC7_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA2_RLC7_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA2_RLC7_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA2_RLC7_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA2_RLC7_DOORBELL
+#define SDMA2_RLC7_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA2_RLC7_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA2_RLC7_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA2_RLC7_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA2_RLC7_STATUS
+#define SDMA2_RLC7_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA2_RLC7_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA2_RLC7_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA2_RLC7_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA2_RLC7_DOORBELL_LOG
+#define SDMA2_RLC7_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA2_RLC7_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA2_RLC7_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA2_RLC7_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA2_RLC7_WATERMARK
+#define SDMA2_RLC7_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA2_RLC7_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA2_RLC7_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA2_RLC7_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA2_RLC7_DOORBELL_OFFSET
+#define SDMA2_RLC7_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA2_RLC7_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA2_RLC7_CSA_ADDR_LO
+#define SDMA2_RLC7_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA2_RLC7_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA2_RLC7_CSA_ADDR_HI
+#define SDMA2_RLC7_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA2_RLC7_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA2_RLC7_IB_SUB_REMAIN
+#define SDMA2_RLC7_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA2_RLC7_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA2_RLC7_PREEMPT
+#define SDMA2_RLC7_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA2_RLC7_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA2_RLC7_DUMMY_REG
+#define SDMA2_RLC7_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA2_RLC7_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA2_RLC7_RB_WPTR_POLL_ADDR_HI
+#define SDMA2_RLC7_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA2_RLC7_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA2_RLC7_RB_WPTR_POLL_ADDR_LO
+#define SDMA2_RLC7_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA2_RLC7_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA2_RLC7_RB_AQL_CNTL
+#define SDMA2_RLC7_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA2_RLC7_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA2_RLC7_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA2_RLC7_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA2_RLC7_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA2_RLC7_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA2_RLC7_MINOR_PTR_UPDATE
+#define SDMA2_RLC7_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA2_RLC7_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA2_RLC7_MIDCMD_DATA0
+#define SDMA2_RLC7_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA2_RLC7_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA2_RLC7_MIDCMD_DATA1
+#define SDMA2_RLC7_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA2_RLC7_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA2_RLC7_MIDCMD_DATA2
+#define SDMA2_RLC7_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA2_RLC7_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA2_RLC7_MIDCMD_DATA3
+#define SDMA2_RLC7_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA2_RLC7_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA2_RLC7_MIDCMD_DATA4
+#define SDMA2_RLC7_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA2_RLC7_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA2_RLC7_MIDCMD_DATA5
+#define SDMA2_RLC7_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA2_RLC7_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA2_RLC7_MIDCMD_DATA6
+#define SDMA2_RLC7_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA2_RLC7_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA2_RLC7_MIDCMD_DATA7
+#define SDMA2_RLC7_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA2_RLC7_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA2_RLC7_MIDCMD_DATA8
+#define SDMA2_RLC7_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA2_RLC7_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA2_RLC7_MIDCMD_CNTL
+#define SDMA2_RLC7_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA2_RLC7_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA2_RLC7_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA2_RLC7_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA2_RLC7_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA2_RLC7_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA2_RLC7_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA2_RLC7_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/sdma3/sdma3_4_2_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/sdma3/sdma3_4_2_2_offset.h
new file mode 100644
index 000000000000..09e8302715cb
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/sdma3/sdma3_4_2_2_offset.h
@@ -0,0 +1,1043 @@
+/*
+ * Copyright (C) 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _sdma3_4_2_2_OFFSET_HEADER
+#define _sdma3_4_2_2_OFFSET_HEADER
+
+
+
+// addressBlock: sdma3_sdma3dec
+// base address: 0x79000
+#define mmSDMA3_UCODE_ADDR 0x0000
+#define mmSDMA3_UCODE_ADDR_BASE_IDX 1
+#define mmSDMA3_UCODE_DATA 0x0001
+#define mmSDMA3_UCODE_DATA_BASE_IDX 1
+#define mmSDMA3_VM_CNTL 0x0004
+#define mmSDMA3_VM_CNTL_BASE_IDX 1
+#define mmSDMA3_VM_CTX_LO 0x0005
+#define mmSDMA3_VM_CTX_LO_BASE_IDX 1
+#define mmSDMA3_VM_CTX_HI 0x0006
+#define mmSDMA3_VM_CTX_HI_BASE_IDX 1
+#define mmSDMA3_ACTIVE_FCN_ID 0x0007
+#define mmSDMA3_ACTIVE_FCN_ID_BASE_IDX 1
+#define mmSDMA3_VM_CTX_CNTL 0x0008
+#define mmSDMA3_VM_CTX_CNTL_BASE_IDX 1
+#define mmSDMA3_VIRT_RESET_REQ 0x0009
+#define mmSDMA3_VIRT_RESET_REQ_BASE_IDX 1
+#define mmSDMA3_VF_ENABLE 0x000a
+#define mmSDMA3_VF_ENABLE_BASE_IDX 1
+#define mmSDMA3_CONTEXT_REG_TYPE0 0x000b
+#define mmSDMA3_CONTEXT_REG_TYPE0_BASE_IDX 1
+#define mmSDMA3_CONTEXT_REG_TYPE1 0x000c
+#define mmSDMA3_CONTEXT_REG_TYPE1_BASE_IDX 1
+#define mmSDMA3_CONTEXT_REG_TYPE2 0x000d
+#define mmSDMA3_CONTEXT_REG_TYPE2_BASE_IDX 1
+#define mmSDMA3_CONTEXT_REG_TYPE3 0x000e
+#define mmSDMA3_CONTEXT_REG_TYPE3_BASE_IDX 1
+#define mmSDMA3_PUB_REG_TYPE0 0x000f
+#define mmSDMA3_PUB_REG_TYPE0_BASE_IDX 1
+#define mmSDMA3_PUB_REG_TYPE1 0x0010
+#define mmSDMA3_PUB_REG_TYPE1_BASE_IDX 1
+#define mmSDMA3_PUB_REG_TYPE2 0x0011
+#define mmSDMA3_PUB_REG_TYPE2_BASE_IDX 1
+#define mmSDMA3_PUB_REG_TYPE3 0x0012
+#define mmSDMA3_PUB_REG_TYPE3_BASE_IDX 1
+#define mmSDMA3_MMHUB_CNTL 0x0013
+#define mmSDMA3_MMHUB_CNTL_BASE_IDX 1
+#define mmSDMA3_CONTEXT_GROUP_BOUNDARY 0x0019
+#define mmSDMA3_CONTEXT_GROUP_BOUNDARY_BASE_IDX 1
+#define mmSDMA3_POWER_CNTL 0x001a
+#define mmSDMA3_POWER_CNTL_BASE_IDX 1
+#define mmSDMA3_CLK_CTRL 0x001b
+#define mmSDMA3_CLK_CTRL_BASE_IDX 1
+#define mmSDMA3_CNTL 0x001c
+#define mmSDMA3_CNTL_BASE_IDX 1
+#define mmSDMA3_CHICKEN_BITS 0x001d
+#define mmSDMA3_CHICKEN_BITS_BASE_IDX 1
+#define mmSDMA3_GB_ADDR_CONFIG 0x001e
+#define mmSDMA3_GB_ADDR_CONFIG_BASE_IDX 1
+#define mmSDMA3_GB_ADDR_CONFIG_READ 0x001f
+#define mmSDMA3_GB_ADDR_CONFIG_READ_BASE_IDX 1
+#define mmSDMA3_RB_RPTR_FETCH_HI 0x0020
+#define mmSDMA3_RB_RPTR_FETCH_HI_BASE_IDX 1
+#define mmSDMA3_SEM_WAIT_FAIL_TIMER_CNTL 0x0021
+#define mmSDMA3_SEM_WAIT_FAIL_TIMER_CNTL_BASE_IDX 1
+#define mmSDMA3_RB_RPTR_FETCH 0x0022
+#define mmSDMA3_RB_RPTR_FETCH_BASE_IDX 1
+#define mmSDMA3_IB_OFFSET_FETCH 0x0023
+#define mmSDMA3_IB_OFFSET_FETCH_BASE_IDX 1
+#define mmSDMA3_PROGRAM 0x0024
+#define mmSDMA3_PROGRAM_BASE_IDX 1
+#define mmSDMA3_STATUS_REG 0x0025
+#define mmSDMA3_STATUS_REG_BASE_IDX 1
+#define mmSDMA3_STATUS1_REG 0x0026
+#define mmSDMA3_STATUS1_REG_BASE_IDX 1
+#define mmSDMA3_RD_BURST_CNTL 0x0027
+#define mmSDMA3_RD_BURST_CNTL_BASE_IDX 1
+#define mmSDMA3_HBM_PAGE_CONFIG 0x0028
+#define mmSDMA3_HBM_PAGE_CONFIG_BASE_IDX 1
+#define mmSDMA3_UCODE_CHECKSUM 0x0029
+#define mmSDMA3_UCODE_CHECKSUM_BASE_IDX 1
+#define mmSDMA3_F32_CNTL 0x002a
+#define mmSDMA3_F32_CNTL_BASE_IDX 1
+#define mmSDMA3_FREEZE 0x002b
+#define mmSDMA3_FREEZE_BASE_IDX 1
+#define mmSDMA3_PHASE0_QUANTUM 0x002c
+#define mmSDMA3_PHASE0_QUANTUM_BASE_IDX 1
+#define mmSDMA3_PHASE1_QUANTUM 0x002d
+#define mmSDMA3_PHASE1_QUANTUM_BASE_IDX 1
+#define mmSDMA3_EDC_CONFIG 0x0032
+#define mmSDMA3_EDC_CONFIG_BASE_IDX 1
+#define mmSDMA3_BA_THRESHOLD 0x0033
+#define mmSDMA3_BA_THRESHOLD_BASE_IDX 1
+#define mmSDMA3_ID 0x0034
+#define mmSDMA3_ID_BASE_IDX 1
+#define mmSDMA3_VERSION 0x0035
+#define mmSDMA3_VERSION_BASE_IDX 1
+#define mmSDMA3_EDC_COUNTER 0x0036
+#define mmSDMA3_EDC_COUNTER_BASE_IDX 1
+#define mmSDMA3_EDC_COUNTER_CLEAR 0x0037
+#define mmSDMA3_EDC_COUNTER_CLEAR_BASE_IDX 1
+#define mmSDMA3_STATUS2_REG 0x0038
+#define mmSDMA3_STATUS2_REG_BASE_IDX 1
+#define mmSDMA3_ATOMIC_CNTL 0x0039
+#define mmSDMA3_ATOMIC_CNTL_BASE_IDX 1
+#define mmSDMA3_ATOMIC_PREOP_LO 0x003a
+#define mmSDMA3_ATOMIC_PREOP_LO_BASE_IDX 1
+#define mmSDMA3_ATOMIC_PREOP_HI 0x003b
+#define mmSDMA3_ATOMIC_PREOP_HI_BASE_IDX 1
+#define mmSDMA3_UTCL1_CNTL 0x003c
+#define mmSDMA3_UTCL1_CNTL_BASE_IDX 1
+#define mmSDMA3_UTCL1_WATERMK 0x003d
+#define mmSDMA3_UTCL1_WATERMK_BASE_IDX 1
+#define mmSDMA3_UTCL1_RD_STATUS 0x003e
+#define mmSDMA3_UTCL1_RD_STATUS_BASE_IDX 1
+#define mmSDMA3_UTCL1_WR_STATUS 0x003f
+#define mmSDMA3_UTCL1_WR_STATUS_BASE_IDX 1
+#define mmSDMA3_UTCL1_INV0 0x0040
+#define mmSDMA3_UTCL1_INV0_BASE_IDX 1
+#define mmSDMA3_UTCL1_INV1 0x0041
+#define mmSDMA3_UTCL1_INV1_BASE_IDX 1
+#define mmSDMA3_UTCL1_INV2 0x0042
+#define mmSDMA3_UTCL1_INV2_BASE_IDX 1
+#define mmSDMA3_UTCL1_RD_XNACK0 0x0043
+#define mmSDMA3_UTCL1_RD_XNACK0_BASE_IDX 1
+#define mmSDMA3_UTCL1_RD_XNACK1 0x0044
+#define mmSDMA3_UTCL1_RD_XNACK1_BASE_IDX 1
+#define mmSDMA3_UTCL1_WR_XNACK0 0x0045
+#define mmSDMA3_UTCL1_WR_XNACK0_BASE_IDX 1
+#define mmSDMA3_UTCL1_WR_XNACK1 0x0046
+#define mmSDMA3_UTCL1_WR_XNACK1_BASE_IDX 1
+#define mmSDMA3_UTCL1_TIMEOUT 0x0047
+#define mmSDMA3_UTCL1_TIMEOUT_BASE_IDX 1
+#define mmSDMA3_UTCL1_PAGE 0x0048
+#define mmSDMA3_UTCL1_PAGE_BASE_IDX 1
+#define mmSDMA3_POWER_CNTL_IDLE 0x0049
+#define mmSDMA3_POWER_CNTL_IDLE_BASE_IDX 1
+#define mmSDMA3_RELAX_ORDERING_LUT 0x004a
+#define mmSDMA3_RELAX_ORDERING_LUT_BASE_IDX 1
+#define mmSDMA3_CHICKEN_BITS_2 0x004b
+#define mmSDMA3_CHICKEN_BITS_2_BASE_IDX 1
+#define mmSDMA3_STATUS3_REG 0x004c
+#define mmSDMA3_STATUS3_REG_BASE_IDX 1
+#define mmSDMA3_PHYSICAL_ADDR_LO 0x004d
+#define mmSDMA3_PHYSICAL_ADDR_LO_BASE_IDX 1
+#define mmSDMA3_PHYSICAL_ADDR_HI 0x004e
+#define mmSDMA3_PHYSICAL_ADDR_HI_BASE_IDX 1
+#define mmSDMA3_PHASE2_QUANTUM 0x004f
+#define mmSDMA3_PHASE2_QUANTUM_BASE_IDX 1
+#define mmSDMA3_ERROR_LOG 0x0050
+#define mmSDMA3_ERROR_LOG_BASE_IDX 1
+#define mmSDMA3_PUB_DUMMY_REG0 0x0051
+#define mmSDMA3_PUB_DUMMY_REG0_BASE_IDX 1
+#define mmSDMA3_PUB_DUMMY_REG1 0x0052
+#define mmSDMA3_PUB_DUMMY_REG1_BASE_IDX 1
+#define mmSDMA3_PUB_DUMMY_REG2 0x0053
+#define mmSDMA3_PUB_DUMMY_REG2_BASE_IDX 1
+#define mmSDMA3_PUB_DUMMY_REG3 0x0054
+#define mmSDMA3_PUB_DUMMY_REG3_BASE_IDX 1
+#define mmSDMA3_F32_COUNTER 0x0055
+#define mmSDMA3_F32_COUNTER_BASE_IDX 1
+#define mmSDMA3_UNBREAKABLE 0x0056
+#define mmSDMA3_UNBREAKABLE_BASE_IDX 1
+#define mmSDMA3_PERFMON_CNTL 0x0057
+#define mmSDMA3_PERFMON_CNTL_BASE_IDX 1
+#define mmSDMA3_PERFCOUNTER0_RESULT 0x0058
+#define mmSDMA3_PERFCOUNTER0_RESULT_BASE_IDX 1
+#define mmSDMA3_PERFCOUNTER1_RESULT 0x0059
+#define mmSDMA3_PERFCOUNTER1_RESULT_BASE_IDX 1
+#define mmSDMA3_PERFCOUNTER_TAG_DELAY_RANGE 0x005a
+#define mmSDMA3_PERFCOUNTER_TAG_DELAY_RANGE_BASE_IDX 1
+#define mmSDMA3_CRD_CNTL 0x005b
+#define mmSDMA3_CRD_CNTL_BASE_IDX 1
+#define mmSDMA3_GPU_IOV_VIOLATION_LOG 0x005d
+#define mmSDMA3_GPU_IOV_VIOLATION_LOG_BASE_IDX 1
+#define mmSDMA3_ULV_CNTL 0x005e
+#define mmSDMA3_ULV_CNTL_BASE_IDX 1
+#define mmSDMA3_EA_DBIT_ADDR_DATA 0x0060
+#define mmSDMA3_EA_DBIT_ADDR_DATA_BASE_IDX 1
+#define mmSDMA3_EA_DBIT_ADDR_INDEX 0x0061
+#define mmSDMA3_EA_DBIT_ADDR_INDEX_BASE_IDX 1
+#define mmSDMA3_GPU_IOV_VIOLATION_LOG2 0x0062
+#define mmSDMA3_GPU_IOV_VIOLATION_LOG2_BASE_IDX 1
+#define mmSDMA3_GFX_RB_CNTL 0x0080
+#define mmSDMA3_GFX_RB_CNTL_BASE_IDX 1
+#define mmSDMA3_GFX_RB_BASE 0x0081
+#define mmSDMA3_GFX_RB_BASE_BASE_IDX 1
+#define mmSDMA3_GFX_RB_BASE_HI 0x0082
+#define mmSDMA3_GFX_RB_BASE_HI_BASE_IDX 1
+#define mmSDMA3_GFX_RB_RPTR 0x0083
+#define mmSDMA3_GFX_RB_RPTR_BASE_IDX 1
+#define mmSDMA3_GFX_RB_RPTR_HI 0x0084
+#define mmSDMA3_GFX_RB_RPTR_HI_BASE_IDX 1
+#define mmSDMA3_GFX_RB_WPTR 0x0085
+#define mmSDMA3_GFX_RB_WPTR_BASE_IDX 1
+#define mmSDMA3_GFX_RB_WPTR_HI 0x0086
+#define mmSDMA3_GFX_RB_WPTR_HI_BASE_IDX 1
+#define mmSDMA3_GFX_RB_WPTR_POLL_CNTL 0x0087
+#define mmSDMA3_GFX_RB_WPTR_POLL_CNTL_BASE_IDX 1
+#define mmSDMA3_GFX_RB_RPTR_ADDR_HI 0x0088
+#define mmSDMA3_GFX_RB_RPTR_ADDR_HI_BASE_IDX 1
+#define mmSDMA3_GFX_RB_RPTR_ADDR_LO 0x0089
+#define mmSDMA3_GFX_RB_RPTR_ADDR_LO_BASE_IDX 1
+#define mmSDMA3_GFX_IB_CNTL 0x008a
+#define mmSDMA3_GFX_IB_CNTL_BASE_IDX 1
+#define mmSDMA3_GFX_IB_RPTR 0x008b
+#define mmSDMA3_GFX_IB_RPTR_BASE_IDX 1
+#define mmSDMA3_GFX_IB_OFFSET 0x008c
+#define mmSDMA3_GFX_IB_OFFSET_BASE_IDX 1
+#define mmSDMA3_GFX_IB_BASE_LO 0x008d
+#define mmSDMA3_GFX_IB_BASE_LO_BASE_IDX 1
+#define mmSDMA3_GFX_IB_BASE_HI 0x008e
+#define mmSDMA3_GFX_IB_BASE_HI_BASE_IDX 1
+#define mmSDMA3_GFX_IB_SIZE 0x008f
+#define mmSDMA3_GFX_IB_SIZE_BASE_IDX 1
+#define mmSDMA3_GFX_SKIP_CNTL 0x0090
+#define mmSDMA3_GFX_SKIP_CNTL_BASE_IDX 1
+#define mmSDMA3_GFX_CONTEXT_STATUS 0x0091
+#define mmSDMA3_GFX_CONTEXT_STATUS_BASE_IDX 1
+#define mmSDMA3_GFX_DOORBELL 0x0092
+#define mmSDMA3_GFX_DOORBELL_BASE_IDX 1
+#define mmSDMA3_GFX_CONTEXT_CNTL 0x0093
+#define mmSDMA3_GFX_CONTEXT_CNTL_BASE_IDX 1
+#define mmSDMA3_GFX_STATUS 0x00a8
+#define mmSDMA3_GFX_STATUS_BASE_IDX 1
+#define mmSDMA3_GFX_DOORBELL_LOG 0x00a9
+#define mmSDMA3_GFX_DOORBELL_LOG_BASE_IDX 1
+#define mmSDMA3_GFX_WATERMARK 0x00aa
+#define mmSDMA3_GFX_WATERMARK_BASE_IDX 1
+#define mmSDMA3_GFX_DOORBELL_OFFSET 0x00ab
+#define mmSDMA3_GFX_DOORBELL_OFFSET_BASE_IDX 1
+#define mmSDMA3_GFX_CSA_ADDR_LO 0x00ac
+#define mmSDMA3_GFX_CSA_ADDR_LO_BASE_IDX 1
+#define mmSDMA3_GFX_CSA_ADDR_HI 0x00ad
+#define mmSDMA3_GFX_CSA_ADDR_HI_BASE_IDX 1
+#define mmSDMA3_GFX_IB_SUB_REMAIN 0x00af
+#define mmSDMA3_GFX_IB_SUB_REMAIN_BASE_IDX 1
+#define mmSDMA3_GFX_PREEMPT 0x00b0
+#define mmSDMA3_GFX_PREEMPT_BASE_IDX 1
+#define mmSDMA3_GFX_DUMMY_REG 0x00b1
+#define mmSDMA3_GFX_DUMMY_REG_BASE_IDX 1
+#define mmSDMA3_GFX_RB_WPTR_POLL_ADDR_HI 0x00b2
+#define mmSDMA3_GFX_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1
+#define mmSDMA3_GFX_RB_WPTR_POLL_ADDR_LO 0x00b3
+#define mmSDMA3_GFX_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1
+#define mmSDMA3_GFX_RB_AQL_CNTL 0x00b4
+#define mmSDMA3_GFX_RB_AQL_CNTL_BASE_IDX 1
+#define mmSDMA3_GFX_MINOR_PTR_UPDATE 0x00b5
+#define mmSDMA3_GFX_MINOR_PTR_UPDATE_BASE_IDX 1
+#define mmSDMA3_GFX_MIDCMD_DATA0 0x00c0
+#define mmSDMA3_GFX_MIDCMD_DATA0_BASE_IDX 1
+#define mmSDMA3_GFX_MIDCMD_DATA1 0x00c1
+#define mmSDMA3_GFX_MIDCMD_DATA1_BASE_IDX 1
+#define mmSDMA3_GFX_MIDCMD_DATA2 0x00c2
+#define mmSDMA3_GFX_MIDCMD_DATA2_BASE_IDX 1
+#define mmSDMA3_GFX_MIDCMD_DATA3 0x00c3
+#define mmSDMA3_GFX_MIDCMD_DATA3_BASE_IDX 1
+#define mmSDMA3_GFX_MIDCMD_DATA4 0x00c4
+#define mmSDMA3_GFX_MIDCMD_DATA4_BASE_IDX 1
+#define mmSDMA3_GFX_MIDCMD_DATA5 0x00c5
+#define mmSDMA3_GFX_MIDCMD_DATA5_BASE_IDX 1
+#define mmSDMA3_GFX_MIDCMD_DATA6 0x00c6
+#define mmSDMA3_GFX_MIDCMD_DATA6_BASE_IDX 1
+#define mmSDMA3_GFX_MIDCMD_DATA7 0x00c7
+#define mmSDMA3_GFX_MIDCMD_DATA7_BASE_IDX 1
+#define mmSDMA3_GFX_MIDCMD_DATA8 0x00c8
+#define mmSDMA3_GFX_MIDCMD_DATA8_BASE_IDX 1
+#define mmSDMA3_GFX_MIDCMD_CNTL 0x00c9
+#define mmSDMA3_GFX_MIDCMD_CNTL_BASE_IDX 1
+#define mmSDMA3_PAGE_RB_CNTL 0x00d8
+#define mmSDMA3_PAGE_RB_CNTL_BASE_IDX 1
+#define mmSDMA3_PAGE_RB_BASE 0x00d9
+#define mmSDMA3_PAGE_RB_BASE_BASE_IDX 1
+#define mmSDMA3_PAGE_RB_BASE_HI 0x00da
+#define mmSDMA3_PAGE_RB_BASE_HI_BASE_IDX 1
+#define mmSDMA3_PAGE_RB_RPTR 0x00db
+#define mmSDMA3_PAGE_RB_RPTR_BASE_IDX 1
+#define mmSDMA3_PAGE_RB_RPTR_HI 0x00dc
+#define mmSDMA3_PAGE_RB_RPTR_HI_BASE_IDX 1
+#define mmSDMA3_PAGE_RB_WPTR 0x00dd
+#define mmSDMA3_PAGE_RB_WPTR_BASE_IDX 1
+#define mmSDMA3_PAGE_RB_WPTR_HI 0x00de
+#define mmSDMA3_PAGE_RB_WPTR_HI_BASE_IDX 1
+#define mmSDMA3_PAGE_RB_WPTR_POLL_CNTL 0x00df
+#define mmSDMA3_PAGE_RB_WPTR_POLL_CNTL_BASE_IDX 1
+#define mmSDMA3_PAGE_RB_RPTR_ADDR_HI 0x00e0
+#define mmSDMA3_PAGE_RB_RPTR_ADDR_HI_BASE_IDX 1
+#define mmSDMA3_PAGE_RB_RPTR_ADDR_LO 0x00e1
+#define mmSDMA3_PAGE_RB_RPTR_ADDR_LO_BASE_IDX 1
+#define mmSDMA3_PAGE_IB_CNTL 0x00e2
+#define mmSDMA3_PAGE_IB_CNTL_BASE_IDX 1
+#define mmSDMA3_PAGE_IB_RPTR 0x00e3
+#define mmSDMA3_PAGE_IB_RPTR_BASE_IDX 1
+#define mmSDMA3_PAGE_IB_OFFSET 0x00e4
+#define mmSDMA3_PAGE_IB_OFFSET_BASE_IDX 1
+#define mmSDMA3_PAGE_IB_BASE_LO 0x00e5
+#define mmSDMA3_PAGE_IB_BASE_LO_BASE_IDX 1
+#define mmSDMA3_PAGE_IB_BASE_HI 0x00e6
+#define mmSDMA3_PAGE_IB_BASE_HI_BASE_IDX 1
+#define mmSDMA3_PAGE_IB_SIZE 0x00e7
+#define mmSDMA3_PAGE_IB_SIZE_BASE_IDX 1
+#define mmSDMA3_PAGE_SKIP_CNTL 0x00e8
+#define mmSDMA3_PAGE_SKIP_CNTL_BASE_IDX 1
+#define mmSDMA3_PAGE_CONTEXT_STATUS 0x00e9
+#define mmSDMA3_PAGE_CONTEXT_STATUS_BASE_IDX 1
+#define mmSDMA3_PAGE_DOORBELL 0x00ea
+#define mmSDMA3_PAGE_DOORBELL_BASE_IDX 1
+#define mmSDMA3_PAGE_STATUS 0x0100
+#define mmSDMA3_PAGE_STATUS_BASE_IDX 1
+#define mmSDMA3_PAGE_DOORBELL_LOG 0x0101
+#define mmSDMA3_PAGE_DOORBELL_LOG_BASE_IDX 1
+#define mmSDMA3_PAGE_WATERMARK 0x0102
+#define mmSDMA3_PAGE_WATERMARK_BASE_IDX 1
+#define mmSDMA3_PAGE_DOORBELL_OFFSET 0x0103
+#define mmSDMA3_PAGE_DOORBELL_OFFSET_BASE_IDX 1
+#define mmSDMA3_PAGE_CSA_ADDR_LO 0x0104
+#define mmSDMA3_PAGE_CSA_ADDR_LO_BASE_IDX 1
+#define mmSDMA3_PAGE_CSA_ADDR_HI 0x0105
+#define mmSDMA3_PAGE_CSA_ADDR_HI_BASE_IDX 1
+#define mmSDMA3_PAGE_IB_SUB_REMAIN 0x0107
+#define mmSDMA3_PAGE_IB_SUB_REMAIN_BASE_IDX 1
+#define mmSDMA3_PAGE_PREEMPT 0x0108
+#define mmSDMA3_PAGE_PREEMPT_BASE_IDX 1
+#define mmSDMA3_PAGE_DUMMY_REG 0x0109
+#define mmSDMA3_PAGE_DUMMY_REG_BASE_IDX 1
+#define mmSDMA3_PAGE_RB_WPTR_POLL_ADDR_HI 0x010a
+#define mmSDMA3_PAGE_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1
+#define mmSDMA3_PAGE_RB_WPTR_POLL_ADDR_LO 0x010b
+#define mmSDMA3_PAGE_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1
+#define mmSDMA3_PAGE_RB_AQL_CNTL 0x010c
+#define mmSDMA3_PAGE_RB_AQL_CNTL_BASE_IDX 1
+#define mmSDMA3_PAGE_MINOR_PTR_UPDATE 0x010d
+#define mmSDMA3_PAGE_MINOR_PTR_UPDATE_BASE_IDX 1
+#define mmSDMA3_PAGE_MIDCMD_DATA0 0x0118
+#define mmSDMA3_PAGE_MIDCMD_DATA0_BASE_IDX 1
+#define mmSDMA3_PAGE_MIDCMD_DATA1 0x0119
+#define mmSDMA3_PAGE_MIDCMD_DATA1_BASE_IDX 1
+#define mmSDMA3_PAGE_MIDCMD_DATA2 0x011a
+#define mmSDMA3_PAGE_MIDCMD_DATA2_BASE_IDX 1
+#define mmSDMA3_PAGE_MIDCMD_DATA3 0x011b
+#define mmSDMA3_PAGE_MIDCMD_DATA3_BASE_IDX 1
+#define mmSDMA3_PAGE_MIDCMD_DATA4 0x011c
+#define mmSDMA3_PAGE_MIDCMD_DATA4_BASE_IDX 1
+#define mmSDMA3_PAGE_MIDCMD_DATA5 0x011d
+#define mmSDMA3_PAGE_MIDCMD_DATA5_BASE_IDX 1
+#define mmSDMA3_PAGE_MIDCMD_DATA6 0x011e
+#define mmSDMA3_PAGE_MIDCMD_DATA6_BASE_IDX 1
+#define mmSDMA3_PAGE_MIDCMD_DATA7 0x011f
+#define mmSDMA3_PAGE_MIDCMD_DATA7_BASE_IDX 1
+#define mmSDMA3_PAGE_MIDCMD_DATA8 0x0120
+#define mmSDMA3_PAGE_MIDCMD_DATA8_BASE_IDX 1
+#define mmSDMA3_PAGE_MIDCMD_CNTL 0x0121
+#define mmSDMA3_PAGE_MIDCMD_CNTL_BASE_IDX 1
+#define mmSDMA3_RLC0_RB_CNTL 0x0130
+#define mmSDMA3_RLC0_RB_CNTL_BASE_IDX 1
+#define mmSDMA3_RLC0_RB_BASE 0x0131
+#define mmSDMA3_RLC0_RB_BASE_BASE_IDX 1
+#define mmSDMA3_RLC0_RB_BASE_HI 0x0132
+#define mmSDMA3_RLC0_RB_BASE_HI_BASE_IDX 1
+#define mmSDMA3_RLC0_RB_RPTR 0x0133
+#define mmSDMA3_RLC0_RB_RPTR_BASE_IDX 1
+#define mmSDMA3_RLC0_RB_RPTR_HI 0x0134
+#define mmSDMA3_RLC0_RB_RPTR_HI_BASE_IDX 1
+#define mmSDMA3_RLC0_RB_WPTR 0x0135
+#define mmSDMA3_RLC0_RB_WPTR_BASE_IDX 1
+#define mmSDMA3_RLC0_RB_WPTR_HI 0x0136
+#define mmSDMA3_RLC0_RB_WPTR_HI_BASE_IDX 1
+#define mmSDMA3_RLC0_RB_WPTR_POLL_CNTL 0x0137
+#define mmSDMA3_RLC0_RB_WPTR_POLL_CNTL_BASE_IDX 1
+#define mmSDMA3_RLC0_RB_RPTR_ADDR_HI 0x0138
+#define mmSDMA3_RLC0_RB_RPTR_ADDR_HI_BASE_IDX 1
+#define mmSDMA3_RLC0_RB_RPTR_ADDR_LO 0x0139
+#define mmSDMA3_RLC0_RB_RPTR_ADDR_LO_BASE_IDX 1
+#define mmSDMA3_RLC0_IB_CNTL 0x013a
+#define mmSDMA3_RLC0_IB_CNTL_BASE_IDX 1
+#define mmSDMA3_RLC0_IB_RPTR 0x013b
+#define mmSDMA3_RLC0_IB_RPTR_BASE_IDX 1
+#define mmSDMA3_RLC0_IB_OFFSET 0x013c
+#define mmSDMA3_RLC0_IB_OFFSET_BASE_IDX 1
+#define mmSDMA3_RLC0_IB_BASE_LO 0x013d
+#define mmSDMA3_RLC0_IB_BASE_LO_BASE_IDX 1
+#define mmSDMA3_RLC0_IB_BASE_HI 0x013e
+#define mmSDMA3_RLC0_IB_BASE_HI_BASE_IDX 1
+#define mmSDMA3_RLC0_IB_SIZE 0x013f
+#define mmSDMA3_RLC0_IB_SIZE_BASE_IDX 1
+#define mmSDMA3_RLC0_SKIP_CNTL 0x0140
+#define mmSDMA3_RLC0_SKIP_CNTL_BASE_IDX 1
+#define mmSDMA3_RLC0_CONTEXT_STATUS 0x0141
+#define mmSDMA3_RLC0_CONTEXT_STATUS_BASE_IDX 1
+#define mmSDMA3_RLC0_DOORBELL 0x0142
+#define mmSDMA3_RLC0_DOORBELL_BASE_IDX 1
+#define mmSDMA3_RLC0_STATUS 0x0158
+#define mmSDMA3_RLC0_STATUS_BASE_IDX 1
+#define mmSDMA3_RLC0_DOORBELL_LOG 0x0159
+#define mmSDMA3_RLC0_DOORBELL_LOG_BASE_IDX 1
+#define mmSDMA3_RLC0_WATERMARK 0x015a
+#define mmSDMA3_RLC0_WATERMARK_BASE_IDX 1
+#define mmSDMA3_RLC0_DOORBELL_OFFSET 0x015b
+#define mmSDMA3_RLC0_DOORBELL_OFFSET_BASE_IDX 1
+#define mmSDMA3_RLC0_CSA_ADDR_LO 0x015c
+#define mmSDMA3_RLC0_CSA_ADDR_LO_BASE_IDX 1
+#define mmSDMA3_RLC0_CSA_ADDR_HI 0x015d
+#define mmSDMA3_RLC0_CSA_ADDR_HI_BASE_IDX 1
+#define mmSDMA3_RLC0_IB_SUB_REMAIN 0x015f
+#define mmSDMA3_RLC0_IB_SUB_REMAIN_BASE_IDX 1
+#define mmSDMA3_RLC0_PREEMPT 0x0160
+#define mmSDMA3_RLC0_PREEMPT_BASE_IDX 1
+#define mmSDMA3_RLC0_DUMMY_REG 0x0161
+#define mmSDMA3_RLC0_DUMMY_REG_BASE_IDX 1
+#define mmSDMA3_RLC0_RB_WPTR_POLL_ADDR_HI 0x0162
+#define mmSDMA3_RLC0_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1
+#define mmSDMA3_RLC0_RB_WPTR_POLL_ADDR_LO 0x0163
+#define mmSDMA3_RLC0_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1
+#define mmSDMA3_RLC0_RB_AQL_CNTL 0x0164
+#define mmSDMA3_RLC0_RB_AQL_CNTL_BASE_IDX 1
+#define mmSDMA3_RLC0_MINOR_PTR_UPDATE 0x0165
+#define mmSDMA3_RLC0_MINOR_PTR_UPDATE_BASE_IDX 1
+#define mmSDMA3_RLC0_MIDCMD_DATA0 0x0170
+#define mmSDMA3_RLC0_MIDCMD_DATA0_BASE_IDX 1
+#define mmSDMA3_RLC0_MIDCMD_DATA1 0x0171
+#define mmSDMA3_RLC0_MIDCMD_DATA1_BASE_IDX 1
+#define mmSDMA3_RLC0_MIDCMD_DATA2 0x0172
+#define mmSDMA3_RLC0_MIDCMD_DATA2_BASE_IDX 1
+#define mmSDMA3_RLC0_MIDCMD_DATA3 0x0173
+#define mmSDMA3_RLC0_MIDCMD_DATA3_BASE_IDX 1
+#define mmSDMA3_RLC0_MIDCMD_DATA4 0x0174
+#define mmSDMA3_RLC0_MIDCMD_DATA4_BASE_IDX 1
+#define mmSDMA3_RLC0_MIDCMD_DATA5 0x0175
+#define mmSDMA3_RLC0_MIDCMD_DATA5_BASE_IDX 1
+#define mmSDMA3_RLC0_MIDCMD_DATA6 0x0176
+#define mmSDMA3_RLC0_MIDCMD_DATA6_BASE_IDX 1
+#define mmSDMA3_RLC0_MIDCMD_DATA7 0x0177
+#define mmSDMA3_RLC0_MIDCMD_DATA7_BASE_IDX 1
+#define mmSDMA3_RLC0_MIDCMD_DATA8 0x0178
+#define mmSDMA3_RLC0_MIDCMD_DATA8_BASE_IDX 1
+#define mmSDMA3_RLC0_MIDCMD_CNTL 0x0179
+#define mmSDMA3_RLC0_MIDCMD_CNTL_BASE_IDX 1
+#define mmSDMA3_RLC1_RB_CNTL 0x0188
+#define mmSDMA3_RLC1_RB_CNTL_BASE_IDX 1
+#define mmSDMA3_RLC1_RB_BASE 0x0189
+#define mmSDMA3_RLC1_RB_BASE_BASE_IDX 1
+#define mmSDMA3_RLC1_RB_BASE_HI 0x018a
+#define mmSDMA3_RLC1_RB_BASE_HI_BASE_IDX 1
+#define mmSDMA3_RLC1_RB_RPTR 0x018b
+#define mmSDMA3_RLC1_RB_RPTR_BASE_IDX 1
+#define mmSDMA3_RLC1_RB_RPTR_HI 0x018c
+#define mmSDMA3_RLC1_RB_RPTR_HI_BASE_IDX 1
+#define mmSDMA3_RLC1_RB_WPTR 0x018d
+#define mmSDMA3_RLC1_RB_WPTR_BASE_IDX 1
+#define mmSDMA3_RLC1_RB_WPTR_HI 0x018e
+#define mmSDMA3_RLC1_RB_WPTR_HI_BASE_IDX 1
+#define mmSDMA3_RLC1_RB_WPTR_POLL_CNTL 0x018f
+#define mmSDMA3_RLC1_RB_WPTR_POLL_CNTL_BASE_IDX 1
+#define mmSDMA3_RLC1_RB_RPTR_ADDR_HI 0x0190
+#define mmSDMA3_RLC1_RB_RPTR_ADDR_HI_BASE_IDX 1
+#define mmSDMA3_RLC1_RB_RPTR_ADDR_LO 0x0191
+#define mmSDMA3_RLC1_RB_RPTR_ADDR_LO_BASE_IDX 1
+#define mmSDMA3_RLC1_IB_CNTL 0x0192
+#define mmSDMA3_RLC1_IB_CNTL_BASE_IDX 1
+#define mmSDMA3_RLC1_IB_RPTR 0x0193
+#define mmSDMA3_RLC1_IB_RPTR_BASE_IDX 1
+#define mmSDMA3_RLC1_IB_OFFSET 0x0194
+#define mmSDMA3_RLC1_IB_OFFSET_BASE_IDX 1
+#define mmSDMA3_RLC1_IB_BASE_LO 0x0195
+#define mmSDMA3_RLC1_IB_BASE_LO_BASE_IDX 1
+#define mmSDMA3_RLC1_IB_BASE_HI 0x0196
+#define mmSDMA3_RLC1_IB_BASE_HI_BASE_IDX 1
+#define mmSDMA3_RLC1_IB_SIZE 0x0197
+#define mmSDMA3_RLC1_IB_SIZE_BASE_IDX 1
+#define mmSDMA3_RLC1_SKIP_CNTL 0x0198
+#define mmSDMA3_RLC1_SKIP_CNTL_BASE_IDX 1
+#define mmSDMA3_RLC1_CONTEXT_STATUS 0x0199
+#define mmSDMA3_RLC1_CONTEXT_STATUS_BASE_IDX 1
+#define mmSDMA3_RLC1_DOORBELL 0x019a
+#define mmSDMA3_RLC1_DOORBELL_BASE_IDX 1
+#define mmSDMA3_RLC1_STATUS 0x01b0
+#define mmSDMA3_RLC1_STATUS_BASE_IDX 1
+#define mmSDMA3_RLC1_DOORBELL_LOG 0x01b1
+#define mmSDMA3_RLC1_DOORBELL_LOG_BASE_IDX 1
+#define mmSDMA3_RLC1_WATERMARK 0x01b2
+#define mmSDMA3_RLC1_WATERMARK_BASE_IDX 1
+#define mmSDMA3_RLC1_DOORBELL_OFFSET 0x01b3
+#define mmSDMA3_RLC1_DOORBELL_OFFSET_BASE_IDX 1
+#define mmSDMA3_RLC1_CSA_ADDR_LO 0x01b4
+#define mmSDMA3_RLC1_CSA_ADDR_LO_BASE_IDX 1
+#define mmSDMA3_RLC1_CSA_ADDR_HI 0x01b5
+#define mmSDMA3_RLC1_CSA_ADDR_HI_BASE_IDX 1
+#define mmSDMA3_RLC1_IB_SUB_REMAIN 0x01b7
+#define mmSDMA3_RLC1_IB_SUB_REMAIN_BASE_IDX 1
+#define mmSDMA3_RLC1_PREEMPT 0x01b8
+#define mmSDMA3_RLC1_PREEMPT_BASE_IDX 1
+#define mmSDMA3_RLC1_DUMMY_REG 0x01b9
+#define mmSDMA3_RLC1_DUMMY_REG_BASE_IDX 1
+#define mmSDMA3_RLC1_RB_WPTR_POLL_ADDR_HI 0x01ba
+#define mmSDMA3_RLC1_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1
+#define mmSDMA3_RLC1_RB_WPTR_POLL_ADDR_LO 0x01bb
+#define mmSDMA3_RLC1_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1
+#define mmSDMA3_RLC1_RB_AQL_CNTL 0x01bc
+#define mmSDMA3_RLC1_RB_AQL_CNTL_BASE_IDX 1
+#define mmSDMA3_RLC1_MINOR_PTR_UPDATE 0x01bd
+#define mmSDMA3_RLC1_MINOR_PTR_UPDATE_BASE_IDX 1
+#define mmSDMA3_RLC1_MIDCMD_DATA0 0x01c8
+#define mmSDMA3_RLC1_MIDCMD_DATA0_BASE_IDX 1
+#define mmSDMA3_RLC1_MIDCMD_DATA1 0x01c9
+#define mmSDMA3_RLC1_MIDCMD_DATA1_BASE_IDX 1
+#define mmSDMA3_RLC1_MIDCMD_DATA2 0x01ca
+#define mmSDMA3_RLC1_MIDCMD_DATA2_BASE_IDX 1
+#define mmSDMA3_RLC1_MIDCMD_DATA3 0x01cb
+#define mmSDMA3_RLC1_MIDCMD_DATA3_BASE_IDX 1
+#define mmSDMA3_RLC1_MIDCMD_DATA4 0x01cc
+#define mmSDMA3_RLC1_MIDCMD_DATA4_BASE_IDX 1
+#define mmSDMA3_RLC1_MIDCMD_DATA5 0x01cd
+#define mmSDMA3_RLC1_MIDCMD_DATA5_BASE_IDX 1
+#define mmSDMA3_RLC1_MIDCMD_DATA6 0x01ce
+#define mmSDMA3_RLC1_MIDCMD_DATA6_BASE_IDX 1
+#define mmSDMA3_RLC1_MIDCMD_DATA7 0x01cf
+#define mmSDMA3_RLC1_MIDCMD_DATA7_BASE_IDX 1
+#define mmSDMA3_RLC1_MIDCMD_DATA8 0x01d0
+#define mmSDMA3_RLC1_MIDCMD_DATA8_BASE_IDX 1
+#define mmSDMA3_RLC1_MIDCMD_CNTL 0x01d1
+#define mmSDMA3_RLC1_MIDCMD_CNTL_BASE_IDX 1
+#define mmSDMA3_RLC2_RB_CNTL 0x01e0
+#define mmSDMA3_RLC2_RB_CNTL_BASE_IDX 1
+#define mmSDMA3_RLC2_RB_BASE 0x01e1
+#define mmSDMA3_RLC2_RB_BASE_BASE_IDX 1
+#define mmSDMA3_RLC2_RB_BASE_HI 0x01e2
+#define mmSDMA3_RLC2_RB_BASE_HI_BASE_IDX 1
+#define mmSDMA3_RLC2_RB_RPTR 0x01e3
+#define mmSDMA3_RLC2_RB_RPTR_BASE_IDX 1
+#define mmSDMA3_RLC2_RB_RPTR_HI 0x01e4
+#define mmSDMA3_RLC2_RB_RPTR_HI_BASE_IDX 1
+#define mmSDMA3_RLC2_RB_WPTR 0x01e5
+#define mmSDMA3_RLC2_RB_WPTR_BASE_IDX 1
+#define mmSDMA3_RLC2_RB_WPTR_HI 0x01e6
+#define mmSDMA3_RLC2_RB_WPTR_HI_BASE_IDX 1
+#define mmSDMA3_RLC2_RB_WPTR_POLL_CNTL 0x01e7
+#define mmSDMA3_RLC2_RB_WPTR_POLL_CNTL_BASE_IDX 1
+#define mmSDMA3_RLC2_RB_RPTR_ADDR_HI 0x01e8
+#define mmSDMA3_RLC2_RB_RPTR_ADDR_HI_BASE_IDX 1
+#define mmSDMA3_RLC2_RB_RPTR_ADDR_LO 0x01e9
+#define mmSDMA3_RLC2_RB_RPTR_ADDR_LO_BASE_IDX 1
+#define mmSDMA3_RLC2_IB_CNTL 0x01ea
+#define mmSDMA3_RLC2_IB_CNTL_BASE_IDX 1
+#define mmSDMA3_RLC2_IB_RPTR 0x01eb
+#define mmSDMA3_RLC2_IB_RPTR_BASE_IDX 1
+#define mmSDMA3_RLC2_IB_OFFSET 0x01ec
+#define mmSDMA3_RLC2_IB_OFFSET_BASE_IDX 1
+#define mmSDMA3_RLC2_IB_BASE_LO 0x01ed
+#define mmSDMA3_RLC2_IB_BASE_LO_BASE_IDX 1
+#define mmSDMA3_RLC2_IB_BASE_HI 0x01ee
+#define mmSDMA3_RLC2_IB_BASE_HI_BASE_IDX 1
+#define mmSDMA3_RLC2_IB_SIZE 0x01ef
+#define mmSDMA3_RLC2_IB_SIZE_BASE_IDX 1
+#define mmSDMA3_RLC2_SKIP_CNTL 0x01f0
+#define mmSDMA3_RLC2_SKIP_CNTL_BASE_IDX 1
+#define mmSDMA3_RLC2_CONTEXT_STATUS 0x01f1
+#define mmSDMA3_RLC2_CONTEXT_STATUS_BASE_IDX 1
+#define mmSDMA3_RLC2_DOORBELL 0x01f2
+#define mmSDMA3_RLC2_DOORBELL_BASE_IDX 1
+#define mmSDMA3_RLC2_STATUS 0x0208
+#define mmSDMA3_RLC2_STATUS_BASE_IDX 1
+#define mmSDMA3_RLC2_DOORBELL_LOG 0x0209
+#define mmSDMA3_RLC2_DOORBELL_LOG_BASE_IDX 1
+#define mmSDMA3_RLC2_WATERMARK 0x020a
+#define mmSDMA3_RLC2_WATERMARK_BASE_IDX 1
+#define mmSDMA3_RLC2_DOORBELL_OFFSET 0x020b
+#define mmSDMA3_RLC2_DOORBELL_OFFSET_BASE_IDX 1
+#define mmSDMA3_RLC2_CSA_ADDR_LO 0x020c
+#define mmSDMA3_RLC2_CSA_ADDR_LO_BASE_IDX 1
+#define mmSDMA3_RLC2_CSA_ADDR_HI 0x020d
+#define mmSDMA3_RLC2_CSA_ADDR_HI_BASE_IDX 1
+#define mmSDMA3_RLC2_IB_SUB_REMAIN 0x020f
+#define mmSDMA3_RLC2_IB_SUB_REMAIN_BASE_IDX 1
+#define mmSDMA3_RLC2_PREEMPT 0x0210
+#define mmSDMA3_RLC2_PREEMPT_BASE_IDX 1
+#define mmSDMA3_RLC2_DUMMY_REG 0x0211
+#define mmSDMA3_RLC2_DUMMY_REG_BASE_IDX 1
+#define mmSDMA3_RLC2_RB_WPTR_POLL_ADDR_HI 0x0212
+#define mmSDMA3_RLC2_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1
+#define mmSDMA3_RLC2_RB_WPTR_POLL_ADDR_LO 0x0213
+#define mmSDMA3_RLC2_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1
+#define mmSDMA3_RLC2_RB_AQL_CNTL 0x0214
+#define mmSDMA3_RLC2_RB_AQL_CNTL_BASE_IDX 1
+#define mmSDMA3_RLC2_MINOR_PTR_UPDATE 0x0215
+#define mmSDMA3_RLC2_MINOR_PTR_UPDATE_BASE_IDX 1
+#define mmSDMA3_RLC2_MIDCMD_DATA0 0x0220
+#define mmSDMA3_RLC2_MIDCMD_DATA0_BASE_IDX 1
+#define mmSDMA3_RLC2_MIDCMD_DATA1 0x0221
+#define mmSDMA3_RLC2_MIDCMD_DATA1_BASE_IDX 1
+#define mmSDMA3_RLC2_MIDCMD_DATA2 0x0222
+#define mmSDMA3_RLC2_MIDCMD_DATA2_BASE_IDX 1
+#define mmSDMA3_RLC2_MIDCMD_DATA3 0x0223
+#define mmSDMA3_RLC2_MIDCMD_DATA3_BASE_IDX 1
+#define mmSDMA3_RLC2_MIDCMD_DATA4 0x0224
+#define mmSDMA3_RLC2_MIDCMD_DATA4_BASE_IDX 1
+#define mmSDMA3_RLC2_MIDCMD_DATA5 0x0225
+#define mmSDMA3_RLC2_MIDCMD_DATA5_BASE_IDX 1
+#define mmSDMA3_RLC2_MIDCMD_DATA6 0x0226
+#define mmSDMA3_RLC2_MIDCMD_DATA6_BASE_IDX 1
+#define mmSDMA3_RLC2_MIDCMD_DATA7 0x0227
+#define mmSDMA3_RLC2_MIDCMD_DATA7_BASE_IDX 1
+#define mmSDMA3_RLC2_MIDCMD_DATA8 0x0228
+#define mmSDMA3_RLC2_MIDCMD_DATA8_BASE_IDX 1
+#define mmSDMA3_RLC2_MIDCMD_CNTL 0x0229
+#define mmSDMA3_RLC2_MIDCMD_CNTL_BASE_IDX 1
+#define mmSDMA3_RLC3_RB_CNTL 0x0238
+#define mmSDMA3_RLC3_RB_CNTL_BASE_IDX 1
+#define mmSDMA3_RLC3_RB_BASE 0x0239
+#define mmSDMA3_RLC3_RB_BASE_BASE_IDX 1
+#define mmSDMA3_RLC3_RB_BASE_HI 0x023a
+#define mmSDMA3_RLC3_RB_BASE_HI_BASE_IDX 1
+#define mmSDMA3_RLC3_RB_RPTR 0x023b
+#define mmSDMA3_RLC3_RB_RPTR_BASE_IDX 1
+#define mmSDMA3_RLC3_RB_RPTR_HI 0x023c
+#define mmSDMA3_RLC3_RB_RPTR_HI_BASE_IDX 1
+#define mmSDMA3_RLC3_RB_WPTR 0x023d
+#define mmSDMA3_RLC3_RB_WPTR_BASE_IDX 1
+#define mmSDMA3_RLC3_RB_WPTR_HI 0x023e
+#define mmSDMA3_RLC3_RB_WPTR_HI_BASE_IDX 1
+#define mmSDMA3_RLC3_RB_WPTR_POLL_CNTL 0x023f
+#define mmSDMA3_RLC3_RB_WPTR_POLL_CNTL_BASE_IDX 1
+#define mmSDMA3_RLC3_RB_RPTR_ADDR_HI 0x0240
+#define mmSDMA3_RLC3_RB_RPTR_ADDR_HI_BASE_IDX 1
+#define mmSDMA3_RLC3_RB_RPTR_ADDR_LO 0x0241
+#define mmSDMA3_RLC3_RB_RPTR_ADDR_LO_BASE_IDX 1
+#define mmSDMA3_RLC3_IB_CNTL 0x0242
+#define mmSDMA3_RLC3_IB_CNTL_BASE_IDX 1
+#define mmSDMA3_RLC3_IB_RPTR 0x0243
+#define mmSDMA3_RLC3_IB_RPTR_BASE_IDX 1
+#define mmSDMA3_RLC3_IB_OFFSET 0x0244
+#define mmSDMA3_RLC3_IB_OFFSET_BASE_IDX 1
+#define mmSDMA3_RLC3_IB_BASE_LO 0x0245
+#define mmSDMA3_RLC3_IB_BASE_LO_BASE_IDX 1
+#define mmSDMA3_RLC3_IB_BASE_HI 0x0246
+#define mmSDMA3_RLC3_IB_BASE_HI_BASE_IDX 1
+#define mmSDMA3_RLC3_IB_SIZE 0x0247
+#define mmSDMA3_RLC3_IB_SIZE_BASE_IDX 1
+#define mmSDMA3_RLC3_SKIP_CNTL 0x0248
+#define mmSDMA3_RLC3_SKIP_CNTL_BASE_IDX 1
+#define mmSDMA3_RLC3_CONTEXT_STATUS 0x0249
+#define mmSDMA3_RLC3_CONTEXT_STATUS_BASE_IDX 1
+#define mmSDMA3_RLC3_DOORBELL 0x024a
+#define mmSDMA3_RLC3_DOORBELL_BASE_IDX 1
+#define mmSDMA3_RLC3_STATUS 0x0260
+#define mmSDMA3_RLC3_STATUS_BASE_IDX 1
+#define mmSDMA3_RLC3_DOORBELL_LOG 0x0261
+#define mmSDMA3_RLC3_DOORBELL_LOG_BASE_IDX 1
+#define mmSDMA3_RLC3_WATERMARK 0x0262
+#define mmSDMA3_RLC3_WATERMARK_BASE_IDX 1
+#define mmSDMA3_RLC3_DOORBELL_OFFSET 0x0263
+#define mmSDMA3_RLC3_DOORBELL_OFFSET_BASE_IDX 1
+#define mmSDMA3_RLC3_CSA_ADDR_LO 0x0264
+#define mmSDMA3_RLC3_CSA_ADDR_LO_BASE_IDX 1
+#define mmSDMA3_RLC3_CSA_ADDR_HI 0x0265
+#define mmSDMA3_RLC3_CSA_ADDR_HI_BASE_IDX 1
+#define mmSDMA3_RLC3_IB_SUB_REMAIN 0x0267
+#define mmSDMA3_RLC3_IB_SUB_REMAIN_BASE_IDX 1
+#define mmSDMA3_RLC3_PREEMPT 0x0268
+#define mmSDMA3_RLC3_PREEMPT_BASE_IDX 1
+#define mmSDMA3_RLC3_DUMMY_REG 0x0269
+#define mmSDMA3_RLC3_DUMMY_REG_BASE_IDX 1
+#define mmSDMA3_RLC3_RB_WPTR_POLL_ADDR_HI 0x026a
+#define mmSDMA3_RLC3_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1
+#define mmSDMA3_RLC3_RB_WPTR_POLL_ADDR_LO 0x026b
+#define mmSDMA3_RLC3_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1
+#define mmSDMA3_RLC3_RB_AQL_CNTL 0x026c
+#define mmSDMA3_RLC3_RB_AQL_CNTL_BASE_IDX 1
+#define mmSDMA3_RLC3_MINOR_PTR_UPDATE 0x026d
+#define mmSDMA3_RLC3_MINOR_PTR_UPDATE_BASE_IDX 1
+#define mmSDMA3_RLC3_MIDCMD_DATA0 0x0278
+#define mmSDMA3_RLC3_MIDCMD_DATA0_BASE_IDX 1
+#define mmSDMA3_RLC3_MIDCMD_DATA1 0x0279
+#define mmSDMA3_RLC3_MIDCMD_DATA1_BASE_IDX 1
+#define mmSDMA3_RLC3_MIDCMD_DATA2 0x027a
+#define mmSDMA3_RLC3_MIDCMD_DATA2_BASE_IDX 1
+#define mmSDMA3_RLC3_MIDCMD_DATA3 0x027b
+#define mmSDMA3_RLC3_MIDCMD_DATA3_BASE_IDX 1
+#define mmSDMA3_RLC3_MIDCMD_DATA4 0x027c
+#define mmSDMA3_RLC3_MIDCMD_DATA4_BASE_IDX 1
+#define mmSDMA3_RLC3_MIDCMD_DATA5 0x027d
+#define mmSDMA3_RLC3_MIDCMD_DATA5_BASE_IDX 1
+#define mmSDMA3_RLC3_MIDCMD_DATA6 0x027e
+#define mmSDMA3_RLC3_MIDCMD_DATA6_BASE_IDX 1
+#define mmSDMA3_RLC3_MIDCMD_DATA7 0x027f
+#define mmSDMA3_RLC3_MIDCMD_DATA7_BASE_IDX 1
+#define mmSDMA3_RLC3_MIDCMD_DATA8 0x0280
+#define mmSDMA3_RLC3_MIDCMD_DATA8_BASE_IDX 1
+#define mmSDMA3_RLC3_MIDCMD_CNTL 0x0281
+#define mmSDMA3_RLC3_MIDCMD_CNTL_BASE_IDX 1
+#define mmSDMA3_RLC4_RB_CNTL 0x0290
+#define mmSDMA3_RLC4_RB_CNTL_BASE_IDX 1
+#define mmSDMA3_RLC4_RB_BASE 0x0291
+#define mmSDMA3_RLC4_RB_BASE_BASE_IDX 1
+#define mmSDMA3_RLC4_RB_BASE_HI 0x0292
+#define mmSDMA3_RLC4_RB_BASE_HI_BASE_IDX 1
+#define mmSDMA3_RLC4_RB_RPTR 0x0293
+#define mmSDMA3_RLC4_RB_RPTR_BASE_IDX 1
+#define mmSDMA3_RLC4_RB_RPTR_HI 0x0294
+#define mmSDMA3_RLC4_RB_RPTR_HI_BASE_IDX 1
+#define mmSDMA3_RLC4_RB_WPTR 0x0295
+#define mmSDMA3_RLC4_RB_WPTR_BASE_IDX 1
+#define mmSDMA3_RLC4_RB_WPTR_HI 0x0296
+#define mmSDMA3_RLC4_RB_WPTR_HI_BASE_IDX 1
+#define mmSDMA3_RLC4_RB_WPTR_POLL_CNTL 0x0297
+#define mmSDMA3_RLC4_RB_WPTR_POLL_CNTL_BASE_IDX 1
+#define mmSDMA3_RLC4_RB_RPTR_ADDR_HI 0x0298
+#define mmSDMA3_RLC4_RB_RPTR_ADDR_HI_BASE_IDX 1
+#define mmSDMA3_RLC4_RB_RPTR_ADDR_LO 0x0299
+#define mmSDMA3_RLC4_RB_RPTR_ADDR_LO_BASE_IDX 1
+#define mmSDMA3_RLC4_IB_CNTL 0x029a
+#define mmSDMA3_RLC4_IB_CNTL_BASE_IDX 1
+#define mmSDMA3_RLC4_IB_RPTR 0x029b
+#define mmSDMA3_RLC4_IB_RPTR_BASE_IDX 1
+#define mmSDMA3_RLC4_IB_OFFSET 0x029c
+#define mmSDMA3_RLC4_IB_OFFSET_BASE_IDX 1
+#define mmSDMA3_RLC4_IB_BASE_LO 0x029d
+#define mmSDMA3_RLC4_IB_BASE_LO_BASE_IDX 1
+#define mmSDMA3_RLC4_IB_BASE_HI 0x029e
+#define mmSDMA3_RLC4_IB_BASE_HI_BASE_IDX 1
+#define mmSDMA3_RLC4_IB_SIZE 0x029f
+#define mmSDMA3_RLC4_IB_SIZE_BASE_IDX 1
+#define mmSDMA3_RLC4_SKIP_CNTL 0x02a0
+#define mmSDMA3_RLC4_SKIP_CNTL_BASE_IDX 1
+#define mmSDMA3_RLC4_CONTEXT_STATUS 0x02a1
+#define mmSDMA3_RLC4_CONTEXT_STATUS_BASE_IDX 1
+#define mmSDMA3_RLC4_DOORBELL 0x02a2
+#define mmSDMA3_RLC4_DOORBELL_BASE_IDX 1
+#define mmSDMA3_RLC4_STATUS 0x02b8
+#define mmSDMA3_RLC4_STATUS_BASE_IDX 1
+#define mmSDMA3_RLC4_DOORBELL_LOG 0x02b9
+#define mmSDMA3_RLC4_DOORBELL_LOG_BASE_IDX 1
+#define mmSDMA3_RLC4_WATERMARK 0x02ba
+#define mmSDMA3_RLC4_WATERMARK_BASE_IDX 1
+#define mmSDMA3_RLC4_DOORBELL_OFFSET 0x02bb
+#define mmSDMA3_RLC4_DOORBELL_OFFSET_BASE_IDX 1
+#define mmSDMA3_RLC4_CSA_ADDR_LO 0x02bc
+#define mmSDMA3_RLC4_CSA_ADDR_LO_BASE_IDX 1
+#define mmSDMA3_RLC4_CSA_ADDR_HI 0x02bd
+#define mmSDMA3_RLC4_CSA_ADDR_HI_BASE_IDX 1
+#define mmSDMA3_RLC4_IB_SUB_REMAIN 0x02bf
+#define mmSDMA3_RLC4_IB_SUB_REMAIN_BASE_IDX 1
+#define mmSDMA3_RLC4_PREEMPT 0x02c0
+#define mmSDMA3_RLC4_PREEMPT_BASE_IDX 1
+#define mmSDMA3_RLC4_DUMMY_REG 0x02c1
+#define mmSDMA3_RLC4_DUMMY_REG_BASE_IDX 1
+#define mmSDMA3_RLC4_RB_WPTR_POLL_ADDR_HI 0x02c2
+#define mmSDMA3_RLC4_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1
+#define mmSDMA3_RLC4_RB_WPTR_POLL_ADDR_LO 0x02c3
+#define mmSDMA3_RLC4_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1
+#define mmSDMA3_RLC4_RB_AQL_CNTL 0x02c4
+#define mmSDMA3_RLC4_RB_AQL_CNTL_BASE_IDX 1
+#define mmSDMA3_RLC4_MINOR_PTR_UPDATE 0x02c5
+#define mmSDMA3_RLC4_MINOR_PTR_UPDATE_BASE_IDX 1
+#define mmSDMA3_RLC4_MIDCMD_DATA0 0x02d0
+#define mmSDMA3_RLC4_MIDCMD_DATA0_BASE_IDX 1
+#define mmSDMA3_RLC4_MIDCMD_DATA1 0x02d1
+#define mmSDMA3_RLC4_MIDCMD_DATA1_BASE_IDX 1
+#define mmSDMA3_RLC4_MIDCMD_DATA2 0x02d2
+#define mmSDMA3_RLC4_MIDCMD_DATA2_BASE_IDX 1
+#define mmSDMA3_RLC4_MIDCMD_DATA3 0x02d3
+#define mmSDMA3_RLC4_MIDCMD_DATA3_BASE_IDX 1
+#define mmSDMA3_RLC4_MIDCMD_DATA4 0x02d4
+#define mmSDMA3_RLC4_MIDCMD_DATA4_BASE_IDX 1
+#define mmSDMA3_RLC4_MIDCMD_DATA5 0x02d5
+#define mmSDMA3_RLC4_MIDCMD_DATA5_BASE_IDX 1
+#define mmSDMA3_RLC4_MIDCMD_DATA6 0x02d6
+#define mmSDMA3_RLC4_MIDCMD_DATA6_BASE_IDX 1
+#define mmSDMA3_RLC4_MIDCMD_DATA7 0x02d7
+#define mmSDMA3_RLC4_MIDCMD_DATA7_BASE_IDX 1
+#define mmSDMA3_RLC4_MIDCMD_DATA8 0x02d8
+#define mmSDMA3_RLC4_MIDCMD_DATA8_BASE_IDX 1
+#define mmSDMA3_RLC4_MIDCMD_CNTL 0x02d9
+#define mmSDMA3_RLC4_MIDCMD_CNTL_BASE_IDX 1
+#define mmSDMA3_RLC5_RB_CNTL 0x02e8
+#define mmSDMA3_RLC5_RB_CNTL_BASE_IDX 1
+#define mmSDMA3_RLC5_RB_BASE 0x02e9
+#define mmSDMA3_RLC5_RB_BASE_BASE_IDX 1
+#define mmSDMA3_RLC5_RB_BASE_HI 0x02ea
+#define mmSDMA3_RLC5_RB_BASE_HI_BASE_IDX 1
+#define mmSDMA3_RLC5_RB_RPTR 0x02eb
+#define mmSDMA3_RLC5_RB_RPTR_BASE_IDX 1
+#define mmSDMA3_RLC5_RB_RPTR_HI 0x02ec
+#define mmSDMA3_RLC5_RB_RPTR_HI_BASE_IDX 1
+#define mmSDMA3_RLC5_RB_WPTR 0x02ed
+#define mmSDMA3_RLC5_RB_WPTR_BASE_IDX 1
+#define mmSDMA3_RLC5_RB_WPTR_HI 0x02ee
+#define mmSDMA3_RLC5_RB_WPTR_HI_BASE_IDX 1
+#define mmSDMA3_RLC5_RB_WPTR_POLL_CNTL 0x02ef
+#define mmSDMA3_RLC5_RB_WPTR_POLL_CNTL_BASE_IDX 1
+#define mmSDMA3_RLC5_RB_RPTR_ADDR_HI 0x02f0
+#define mmSDMA3_RLC5_RB_RPTR_ADDR_HI_BASE_IDX 1
+#define mmSDMA3_RLC5_RB_RPTR_ADDR_LO 0x02f1
+#define mmSDMA3_RLC5_RB_RPTR_ADDR_LO_BASE_IDX 1
+#define mmSDMA3_RLC5_IB_CNTL 0x02f2
+#define mmSDMA3_RLC5_IB_CNTL_BASE_IDX 1
+#define mmSDMA3_RLC5_IB_RPTR 0x02f3
+#define mmSDMA3_RLC5_IB_RPTR_BASE_IDX 1
+#define mmSDMA3_RLC5_IB_OFFSET 0x02f4
+#define mmSDMA3_RLC5_IB_OFFSET_BASE_IDX 1
+#define mmSDMA3_RLC5_IB_BASE_LO 0x02f5
+#define mmSDMA3_RLC5_IB_BASE_LO_BASE_IDX 1
+#define mmSDMA3_RLC5_IB_BASE_HI 0x02f6
+#define mmSDMA3_RLC5_IB_BASE_HI_BASE_IDX 1
+#define mmSDMA3_RLC5_IB_SIZE 0x02f7
+#define mmSDMA3_RLC5_IB_SIZE_BASE_IDX 1
+#define mmSDMA3_RLC5_SKIP_CNTL 0x02f8
+#define mmSDMA3_RLC5_SKIP_CNTL_BASE_IDX 1
+#define mmSDMA3_RLC5_CONTEXT_STATUS 0x02f9
+#define mmSDMA3_RLC5_CONTEXT_STATUS_BASE_IDX 1
+#define mmSDMA3_RLC5_DOORBELL 0x02fa
+#define mmSDMA3_RLC5_DOORBELL_BASE_IDX 1
+#define mmSDMA3_RLC5_STATUS 0x0310
+#define mmSDMA3_RLC5_STATUS_BASE_IDX 1
+#define mmSDMA3_RLC5_DOORBELL_LOG 0x0311
+#define mmSDMA3_RLC5_DOORBELL_LOG_BASE_IDX 1
+#define mmSDMA3_RLC5_WATERMARK 0x0312
+#define mmSDMA3_RLC5_WATERMARK_BASE_IDX 1
+#define mmSDMA3_RLC5_DOORBELL_OFFSET 0x0313
+#define mmSDMA3_RLC5_DOORBELL_OFFSET_BASE_IDX 1
+#define mmSDMA3_RLC5_CSA_ADDR_LO 0x0314
+#define mmSDMA3_RLC5_CSA_ADDR_LO_BASE_IDX 1
+#define mmSDMA3_RLC5_CSA_ADDR_HI 0x0315
+#define mmSDMA3_RLC5_CSA_ADDR_HI_BASE_IDX 1
+#define mmSDMA3_RLC5_IB_SUB_REMAIN 0x0317
+#define mmSDMA3_RLC5_IB_SUB_REMAIN_BASE_IDX 1
+#define mmSDMA3_RLC5_PREEMPT 0x0318
+#define mmSDMA3_RLC5_PREEMPT_BASE_IDX 1
+#define mmSDMA3_RLC5_DUMMY_REG 0x0319
+#define mmSDMA3_RLC5_DUMMY_REG_BASE_IDX 1
+#define mmSDMA3_RLC5_RB_WPTR_POLL_ADDR_HI 0x031a
+#define mmSDMA3_RLC5_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1
+#define mmSDMA3_RLC5_RB_WPTR_POLL_ADDR_LO 0x031b
+#define mmSDMA3_RLC5_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1
+#define mmSDMA3_RLC5_RB_AQL_CNTL 0x031c
+#define mmSDMA3_RLC5_RB_AQL_CNTL_BASE_IDX 1
+#define mmSDMA3_RLC5_MINOR_PTR_UPDATE 0x031d
+#define mmSDMA3_RLC5_MINOR_PTR_UPDATE_BASE_IDX 1
+#define mmSDMA3_RLC5_MIDCMD_DATA0 0x0328
+#define mmSDMA3_RLC5_MIDCMD_DATA0_BASE_IDX 1
+#define mmSDMA3_RLC5_MIDCMD_DATA1 0x0329
+#define mmSDMA3_RLC5_MIDCMD_DATA1_BASE_IDX 1
+#define mmSDMA3_RLC5_MIDCMD_DATA2 0x032a
+#define mmSDMA3_RLC5_MIDCMD_DATA2_BASE_IDX 1
+#define mmSDMA3_RLC5_MIDCMD_DATA3 0x032b
+#define mmSDMA3_RLC5_MIDCMD_DATA3_BASE_IDX 1
+#define mmSDMA3_RLC5_MIDCMD_DATA4 0x032c
+#define mmSDMA3_RLC5_MIDCMD_DATA4_BASE_IDX 1
+#define mmSDMA3_RLC5_MIDCMD_DATA5 0x032d
+#define mmSDMA3_RLC5_MIDCMD_DATA5_BASE_IDX 1
+#define mmSDMA3_RLC5_MIDCMD_DATA6 0x032e
+#define mmSDMA3_RLC5_MIDCMD_DATA6_BASE_IDX 1
+#define mmSDMA3_RLC5_MIDCMD_DATA7 0x032f
+#define mmSDMA3_RLC5_MIDCMD_DATA7_BASE_IDX 1
+#define mmSDMA3_RLC5_MIDCMD_DATA8 0x0330
+#define mmSDMA3_RLC5_MIDCMD_DATA8_BASE_IDX 1
+#define mmSDMA3_RLC5_MIDCMD_CNTL 0x0331
+#define mmSDMA3_RLC5_MIDCMD_CNTL_BASE_IDX 1
+#define mmSDMA3_RLC6_RB_CNTL 0x0340
+#define mmSDMA3_RLC6_RB_CNTL_BASE_IDX 1
+#define mmSDMA3_RLC6_RB_BASE 0x0341
+#define mmSDMA3_RLC6_RB_BASE_BASE_IDX 1
+#define mmSDMA3_RLC6_RB_BASE_HI 0x0342
+#define mmSDMA3_RLC6_RB_BASE_HI_BASE_IDX 1
+#define mmSDMA3_RLC6_RB_RPTR 0x0343
+#define mmSDMA3_RLC6_RB_RPTR_BASE_IDX 1
+#define mmSDMA3_RLC6_RB_RPTR_HI 0x0344
+#define mmSDMA3_RLC6_RB_RPTR_HI_BASE_IDX 1
+#define mmSDMA3_RLC6_RB_WPTR 0x0345
+#define mmSDMA3_RLC6_RB_WPTR_BASE_IDX 1
+#define mmSDMA3_RLC6_RB_WPTR_HI 0x0346
+#define mmSDMA3_RLC6_RB_WPTR_HI_BASE_IDX 1
+#define mmSDMA3_RLC6_RB_WPTR_POLL_CNTL 0x0347
+#define mmSDMA3_RLC6_RB_WPTR_POLL_CNTL_BASE_IDX 1
+#define mmSDMA3_RLC6_RB_RPTR_ADDR_HI 0x0348
+#define mmSDMA3_RLC6_RB_RPTR_ADDR_HI_BASE_IDX 1
+#define mmSDMA3_RLC6_RB_RPTR_ADDR_LO 0x0349
+#define mmSDMA3_RLC6_RB_RPTR_ADDR_LO_BASE_IDX 1
+#define mmSDMA3_RLC6_IB_CNTL 0x034a
+#define mmSDMA3_RLC6_IB_CNTL_BASE_IDX 1
+#define mmSDMA3_RLC6_IB_RPTR 0x034b
+#define mmSDMA3_RLC6_IB_RPTR_BASE_IDX 1
+#define mmSDMA3_RLC6_IB_OFFSET 0x034c
+#define mmSDMA3_RLC6_IB_OFFSET_BASE_IDX 1
+#define mmSDMA3_RLC6_IB_BASE_LO 0x034d
+#define mmSDMA3_RLC6_IB_BASE_LO_BASE_IDX 1
+#define mmSDMA3_RLC6_IB_BASE_HI 0x034e
+#define mmSDMA3_RLC6_IB_BASE_HI_BASE_IDX 1
+#define mmSDMA3_RLC6_IB_SIZE 0x034f
+#define mmSDMA3_RLC6_IB_SIZE_BASE_IDX 1
+#define mmSDMA3_RLC6_SKIP_CNTL 0x0350
+#define mmSDMA3_RLC6_SKIP_CNTL_BASE_IDX 1
+#define mmSDMA3_RLC6_CONTEXT_STATUS 0x0351
+#define mmSDMA3_RLC6_CONTEXT_STATUS_BASE_IDX 1
+#define mmSDMA3_RLC6_DOORBELL 0x0352
+#define mmSDMA3_RLC6_DOORBELL_BASE_IDX 1
+#define mmSDMA3_RLC6_STATUS 0x0368
+#define mmSDMA3_RLC6_STATUS_BASE_IDX 1
+#define mmSDMA3_RLC6_DOORBELL_LOG 0x0369
+#define mmSDMA3_RLC6_DOORBELL_LOG_BASE_IDX 1
+#define mmSDMA3_RLC6_WATERMARK 0x036a
+#define mmSDMA3_RLC6_WATERMARK_BASE_IDX 1
+#define mmSDMA3_RLC6_DOORBELL_OFFSET 0x036b
+#define mmSDMA3_RLC6_DOORBELL_OFFSET_BASE_IDX 1
+#define mmSDMA3_RLC6_CSA_ADDR_LO 0x036c
+#define mmSDMA3_RLC6_CSA_ADDR_LO_BASE_IDX 1
+#define mmSDMA3_RLC6_CSA_ADDR_HI 0x036d
+#define mmSDMA3_RLC6_CSA_ADDR_HI_BASE_IDX 1
+#define mmSDMA3_RLC6_IB_SUB_REMAIN 0x036f
+#define mmSDMA3_RLC6_IB_SUB_REMAIN_BASE_IDX 1
+#define mmSDMA3_RLC6_PREEMPT 0x0370
+#define mmSDMA3_RLC6_PREEMPT_BASE_IDX 1
+#define mmSDMA3_RLC6_DUMMY_REG 0x0371
+#define mmSDMA3_RLC6_DUMMY_REG_BASE_IDX 1
+#define mmSDMA3_RLC6_RB_WPTR_POLL_ADDR_HI 0x0372
+#define mmSDMA3_RLC6_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1
+#define mmSDMA3_RLC6_RB_WPTR_POLL_ADDR_LO 0x0373
+#define mmSDMA3_RLC6_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1
+#define mmSDMA3_RLC6_RB_AQL_CNTL 0x0374
+#define mmSDMA3_RLC6_RB_AQL_CNTL_BASE_IDX 1
+#define mmSDMA3_RLC6_MINOR_PTR_UPDATE 0x0375
+#define mmSDMA3_RLC6_MINOR_PTR_UPDATE_BASE_IDX 1
+#define mmSDMA3_RLC6_MIDCMD_DATA0 0x0380
+#define mmSDMA3_RLC6_MIDCMD_DATA0_BASE_IDX 1
+#define mmSDMA3_RLC6_MIDCMD_DATA1 0x0381
+#define mmSDMA3_RLC6_MIDCMD_DATA1_BASE_IDX 1
+#define mmSDMA3_RLC6_MIDCMD_DATA2 0x0382
+#define mmSDMA3_RLC6_MIDCMD_DATA2_BASE_IDX 1
+#define mmSDMA3_RLC6_MIDCMD_DATA3 0x0383
+#define mmSDMA3_RLC6_MIDCMD_DATA3_BASE_IDX 1
+#define mmSDMA3_RLC6_MIDCMD_DATA4 0x0384
+#define mmSDMA3_RLC6_MIDCMD_DATA4_BASE_IDX 1
+#define mmSDMA3_RLC6_MIDCMD_DATA5 0x0385
+#define mmSDMA3_RLC6_MIDCMD_DATA5_BASE_IDX 1
+#define mmSDMA3_RLC6_MIDCMD_DATA6 0x0386
+#define mmSDMA3_RLC6_MIDCMD_DATA6_BASE_IDX 1
+#define mmSDMA3_RLC6_MIDCMD_DATA7 0x0387
+#define mmSDMA3_RLC6_MIDCMD_DATA7_BASE_IDX 1
+#define mmSDMA3_RLC6_MIDCMD_DATA8 0x0388
+#define mmSDMA3_RLC6_MIDCMD_DATA8_BASE_IDX 1
+#define mmSDMA3_RLC6_MIDCMD_CNTL 0x0389
+#define mmSDMA3_RLC6_MIDCMD_CNTL_BASE_IDX 1
+#define mmSDMA3_RLC7_RB_CNTL 0x0398
+#define mmSDMA3_RLC7_RB_CNTL_BASE_IDX 1
+#define mmSDMA3_RLC7_RB_BASE 0x0399
+#define mmSDMA3_RLC7_RB_BASE_BASE_IDX 1
+#define mmSDMA3_RLC7_RB_BASE_HI 0x039a
+#define mmSDMA3_RLC7_RB_BASE_HI_BASE_IDX 1
+#define mmSDMA3_RLC7_RB_RPTR 0x039b
+#define mmSDMA3_RLC7_RB_RPTR_BASE_IDX 1
+#define mmSDMA3_RLC7_RB_RPTR_HI 0x039c
+#define mmSDMA3_RLC7_RB_RPTR_HI_BASE_IDX 1
+#define mmSDMA3_RLC7_RB_WPTR 0x039d
+#define mmSDMA3_RLC7_RB_WPTR_BASE_IDX 1
+#define mmSDMA3_RLC7_RB_WPTR_HI 0x039e
+#define mmSDMA3_RLC7_RB_WPTR_HI_BASE_IDX 1
+#define mmSDMA3_RLC7_RB_WPTR_POLL_CNTL 0x039f
+#define mmSDMA3_RLC7_RB_WPTR_POLL_CNTL_BASE_IDX 1
+#define mmSDMA3_RLC7_RB_RPTR_ADDR_HI 0x03a0
+#define mmSDMA3_RLC7_RB_RPTR_ADDR_HI_BASE_IDX 1
+#define mmSDMA3_RLC7_RB_RPTR_ADDR_LO 0x03a1
+#define mmSDMA3_RLC7_RB_RPTR_ADDR_LO_BASE_IDX 1
+#define mmSDMA3_RLC7_IB_CNTL 0x03a2
+#define mmSDMA3_RLC7_IB_CNTL_BASE_IDX 1
+#define mmSDMA3_RLC7_IB_RPTR 0x03a3
+#define mmSDMA3_RLC7_IB_RPTR_BASE_IDX 1
+#define mmSDMA3_RLC7_IB_OFFSET 0x03a4
+#define mmSDMA3_RLC7_IB_OFFSET_BASE_IDX 1
+#define mmSDMA3_RLC7_IB_BASE_LO 0x03a5
+#define mmSDMA3_RLC7_IB_BASE_LO_BASE_IDX 1
+#define mmSDMA3_RLC7_IB_BASE_HI 0x03a6
+#define mmSDMA3_RLC7_IB_BASE_HI_BASE_IDX 1
+#define mmSDMA3_RLC7_IB_SIZE 0x03a7
+#define mmSDMA3_RLC7_IB_SIZE_BASE_IDX 1
+#define mmSDMA3_RLC7_SKIP_CNTL 0x03a8
+#define mmSDMA3_RLC7_SKIP_CNTL_BASE_IDX 1
+#define mmSDMA3_RLC7_CONTEXT_STATUS 0x03a9
+#define mmSDMA3_RLC7_CONTEXT_STATUS_BASE_IDX 1
+#define mmSDMA3_RLC7_DOORBELL 0x03aa
+#define mmSDMA3_RLC7_DOORBELL_BASE_IDX 1
+#define mmSDMA3_RLC7_STATUS 0x03c0
+#define mmSDMA3_RLC7_STATUS_BASE_IDX 1
+#define mmSDMA3_RLC7_DOORBELL_LOG 0x03c1
+#define mmSDMA3_RLC7_DOORBELL_LOG_BASE_IDX 1
+#define mmSDMA3_RLC7_WATERMARK 0x03c2
+#define mmSDMA3_RLC7_WATERMARK_BASE_IDX 1
+#define mmSDMA3_RLC7_DOORBELL_OFFSET 0x03c3
+#define mmSDMA3_RLC7_DOORBELL_OFFSET_BASE_IDX 1
+#define mmSDMA3_RLC7_CSA_ADDR_LO 0x03c4
+#define mmSDMA3_RLC7_CSA_ADDR_LO_BASE_IDX 1
+#define mmSDMA3_RLC7_CSA_ADDR_HI 0x03c5
+#define mmSDMA3_RLC7_CSA_ADDR_HI_BASE_IDX 1
+#define mmSDMA3_RLC7_IB_SUB_REMAIN 0x03c7
+#define mmSDMA3_RLC7_IB_SUB_REMAIN_BASE_IDX 1
+#define mmSDMA3_RLC7_PREEMPT 0x03c8
+#define mmSDMA3_RLC7_PREEMPT_BASE_IDX 1
+#define mmSDMA3_RLC7_DUMMY_REG 0x03c9
+#define mmSDMA3_RLC7_DUMMY_REG_BASE_IDX 1
+#define mmSDMA3_RLC7_RB_WPTR_POLL_ADDR_HI 0x03ca
+#define mmSDMA3_RLC7_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1
+#define mmSDMA3_RLC7_RB_WPTR_POLL_ADDR_LO 0x03cb
+#define mmSDMA3_RLC7_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1
+#define mmSDMA3_RLC7_RB_AQL_CNTL 0x03cc
+#define mmSDMA3_RLC7_RB_AQL_CNTL_BASE_IDX 1
+#define mmSDMA3_RLC7_MINOR_PTR_UPDATE 0x03cd
+#define mmSDMA3_RLC7_MINOR_PTR_UPDATE_BASE_IDX 1
+#define mmSDMA3_RLC7_MIDCMD_DATA0 0x03d8
+#define mmSDMA3_RLC7_MIDCMD_DATA0_BASE_IDX 1
+#define mmSDMA3_RLC7_MIDCMD_DATA1 0x03d9
+#define mmSDMA3_RLC7_MIDCMD_DATA1_BASE_IDX 1
+#define mmSDMA3_RLC7_MIDCMD_DATA2 0x03da
+#define mmSDMA3_RLC7_MIDCMD_DATA2_BASE_IDX 1
+#define mmSDMA3_RLC7_MIDCMD_DATA3 0x03db
+#define mmSDMA3_RLC7_MIDCMD_DATA3_BASE_IDX 1
+#define mmSDMA3_RLC7_MIDCMD_DATA4 0x03dc
+#define mmSDMA3_RLC7_MIDCMD_DATA4_BASE_IDX 1
+#define mmSDMA3_RLC7_MIDCMD_DATA5 0x03dd
+#define mmSDMA3_RLC7_MIDCMD_DATA5_BASE_IDX 1
+#define mmSDMA3_RLC7_MIDCMD_DATA6 0x03de
+#define mmSDMA3_RLC7_MIDCMD_DATA6_BASE_IDX 1
+#define mmSDMA3_RLC7_MIDCMD_DATA7 0x03df
+#define mmSDMA3_RLC7_MIDCMD_DATA7_BASE_IDX 1
+#define mmSDMA3_RLC7_MIDCMD_DATA8 0x03e0
+#define mmSDMA3_RLC7_MIDCMD_DATA8_BASE_IDX 1
+#define mmSDMA3_RLC7_MIDCMD_CNTL 0x03e1
+#define mmSDMA3_RLC7_MIDCMD_CNTL_BASE_IDX 1
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/sdma3/sdma3_4_2_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/sdma3/sdma3_4_2_2_sh_mask.h
new file mode 100644
index 000000000000..6f2d5ad00488
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/sdma3/sdma3_4_2_2_sh_mask.h
@@ -0,0 +1,2956 @@
+/*
+ * Copyright (C) 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _sdma3_4_2_2_SH_MASK_HEADER
+#define _sdma3_4_2_2_SH_MASK_HEADER
+
+
+// addressBlock: sdma3_sdma3dec
+//SDMA3_UCODE_ADDR
+#define SDMA3_UCODE_ADDR__VALUE__SHIFT 0x0
+#define SDMA3_UCODE_ADDR__VALUE_MASK 0x00001FFFL
+//SDMA3_UCODE_DATA
+#define SDMA3_UCODE_DATA__VALUE__SHIFT 0x0
+#define SDMA3_UCODE_DATA__VALUE_MASK 0xFFFFFFFFL
+//SDMA3_VM_CNTL
+#define SDMA3_VM_CNTL__CMD__SHIFT 0x0
+#define SDMA3_VM_CNTL__CMD_MASK 0x0000000FL
+//SDMA3_VM_CTX_LO
+#define SDMA3_VM_CTX_LO__ADDR__SHIFT 0x2
+#define SDMA3_VM_CTX_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA3_VM_CTX_HI
+#define SDMA3_VM_CTX_HI__ADDR__SHIFT 0x0
+#define SDMA3_VM_CTX_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA3_ACTIVE_FCN_ID
+#define SDMA3_ACTIVE_FCN_ID__VFID__SHIFT 0x0
+#define SDMA3_ACTIVE_FCN_ID__RESERVED__SHIFT 0x4
+#define SDMA3_ACTIVE_FCN_ID__VF__SHIFT 0x1f
+#define SDMA3_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL
+#define SDMA3_ACTIVE_FCN_ID__RESERVED_MASK 0x7FFFFFF0L
+#define SDMA3_ACTIVE_FCN_ID__VF_MASK 0x80000000L
+//SDMA3_VM_CTX_CNTL
+#define SDMA3_VM_CTX_CNTL__PRIV__SHIFT 0x0
+#define SDMA3_VM_CTX_CNTL__VMID__SHIFT 0x4
+#define SDMA3_VM_CTX_CNTL__PRIV_MASK 0x00000001L
+#define SDMA3_VM_CTX_CNTL__VMID_MASK 0x000000F0L
+//SDMA3_VIRT_RESET_REQ
+#define SDMA3_VIRT_RESET_REQ__VF__SHIFT 0x0
+#define SDMA3_VIRT_RESET_REQ__PF__SHIFT 0x1f
+#define SDMA3_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL
+#define SDMA3_VIRT_RESET_REQ__PF_MASK 0x80000000L
+//SDMA3_VF_ENABLE
+#define SDMA3_VF_ENABLE__VF_ENABLE__SHIFT 0x0
+#define SDMA3_VF_ENABLE__VF_ENABLE_MASK 0x00000001L
+//SDMA3_CONTEXT_REG_TYPE0
+#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_RB_CNTL__SHIFT 0x0
+#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_RB_BASE__SHIFT 0x1
+#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_RB_BASE_HI__SHIFT 0x2
+#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_RB_RPTR__SHIFT 0x3
+#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_RB_RPTR_HI__SHIFT 0x4
+#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_RB_WPTR__SHIFT 0x5
+#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_RB_WPTR_HI__SHIFT 0x6
+#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_RB_WPTR_POLL_CNTL__SHIFT 0x7
+#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_RB_RPTR_ADDR_HI__SHIFT 0x8
+#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_RB_RPTR_ADDR_LO__SHIFT 0x9
+#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_IB_CNTL__SHIFT 0xa
+#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_IB_RPTR__SHIFT 0xb
+#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_IB_OFFSET__SHIFT 0xc
+#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_IB_BASE_LO__SHIFT 0xd
+#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_IB_BASE_HI__SHIFT 0xe
+#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_IB_SIZE__SHIFT 0xf
+#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_SKIP_CNTL__SHIFT 0x10
+#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_CONTEXT_STATUS__SHIFT 0x11
+#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_DOORBELL__SHIFT 0x12
+#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_CONTEXT_CNTL__SHIFT 0x13
+#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_RB_CNTL_MASK 0x00000001L
+#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_RB_BASE_MASK 0x00000002L
+#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_RB_BASE_HI_MASK 0x00000004L
+#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_RB_RPTR_MASK 0x00000008L
+#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_RB_RPTR_HI_MASK 0x00000010L
+#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_RB_WPTR_MASK 0x00000020L
+#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_RB_WPTR_HI_MASK 0x00000040L
+#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_RB_WPTR_POLL_CNTL_MASK 0x00000080L
+#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_RB_RPTR_ADDR_HI_MASK 0x00000100L
+#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_RB_RPTR_ADDR_LO_MASK 0x00000200L
+#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_IB_CNTL_MASK 0x00000400L
+#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_IB_RPTR_MASK 0x00000800L
+#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_IB_OFFSET_MASK 0x00001000L
+#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_IB_BASE_LO_MASK 0x00002000L
+#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_IB_BASE_HI_MASK 0x00004000L
+#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_IB_SIZE_MASK 0x00008000L
+#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_SKIP_CNTL_MASK 0x00010000L
+#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_CONTEXT_STATUS_MASK 0x00020000L
+#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_DOORBELL_MASK 0x00040000L
+#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_CONTEXT_CNTL_MASK 0x00080000L
+//SDMA3_CONTEXT_REG_TYPE1
+#define SDMA3_CONTEXT_REG_TYPE1__SDMA3_GFX_STATUS__SHIFT 0x8
+#define SDMA3_CONTEXT_REG_TYPE1__SDMA3_GFX_DOORBELL_LOG__SHIFT 0x9
+#define SDMA3_CONTEXT_REG_TYPE1__SDMA3_GFX_WATERMARK__SHIFT 0xa
+#define SDMA3_CONTEXT_REG_TYPE1__SDMA3_GFX_DOORBELL_OFFSET__SHIFT 0xb
+#define SDMA3_CONTEXT_REG_TYPE1__SDMA3_GFX_CSA_ADDR_LO__SHIFT 0xc
+#define SDMA3_CONTEXT_REG_TYPE1__SDMA3_GFX_CSA_ADDR_HI__SHIFT 0xd
+#define SDMA3_CONTEXT_REG_TYPE1__VOID_REG2__SHIFT 0xe
+#define SDMA3_CONTEXT_REG_TYPE1__SDMA3_GFX_IB_SUB_REMAIN__SHIFT 0xf
+#define SDMA3_CONTEXT_REG_TYPE1__SDMA3_GFX_PREEMPT__SHIFT 0x10
+#define SDMA3_CONTEXT_REG_TYPE1__SDMA3_GFX_DUMMY_REG__SHIFT 0x11
+#define SDMA3_CONTEXT_REG_TYPE1__SDMA3_GFX_RB_WPTR_POLL_ADDR_HI__SHIFT 0x12
+#define SDMA3_CONTEXT_REG_TYPE1__SDMA3_GFX_RB_WPTR_POLL_ADDR_LO__SHIFT 0x13
+#define SDMA3_CONTEXT_REG_TYPE1__SDMA3_GFX_RB_AQL_CNTL__SHIFT 0x14
+#define SDMA3_CONTEXT_REG_TYPE1__SDMA3_GFX_MINOR_PTR_UPDATE__SHIFT 0x15
+#define SDMA3_CONTEXT_REG_TYPE1__RESERVED__SHIFT 0x16
+#define SDMA3_CONTEXT_REG_TYPE1__SDMA3_GFX_STATUS_MASK 0x00000100L
+#define SDMA3_CONTEXT_REG_TYPE1__SDMA3_GFX_DOORBELL_LOG_MASK 0x00000200L
+#define SDMA3_CONTEXT_REG_TYPE1__SDMA3_GFX_WATERMARK_MASK 0x00000400L
+#define SDMA3_CONTEXT_REG_TYPE1__SDMA3_GFX_DOORBELL_OFFSET_MASK 0x00000800L
+#define SDMA3_CONTEXT_REG_TYPE1__SDMA3_GFX_CSA_ADDR_LO_MASK 0x00001000L
+#define SDMA3_CONTEXT_REG_TYPE1__SDMA3_GFX_CSA_ADDR_HI_MASK 0x00002000L
+#define SDMA3_CONTEXT_REG_TYPE1__VOID_REG2_MASK 0x00004000L
+#define SDMA3_CONTEXT_REG_TYPE1__SDMA3_GFX_IB_SUB_REMAIN_MASK 0x00008000L
+#define SDMA3_CONTEXT_REG_TYPE1__SDMA3_GFX_PREEMPT_MASK 0x00010000L
+#define SDMA3_CONTEXT_REG_TYPE1__SDMA3_GFX_DUMMY_REG_MASK 0x00020000L
+#define SDMA3_CONTEXT_REG_TYPE1__SDMA3_GFX_RB_WPTR_POLL_ADDR_HI_MASK 0x00040000L
+#define SDMA3_CONTEXT_REG_TYPE1__SDMA3_GFX_RB_WPTR_POLL_ADDR_LO_MASK 0x00080000L
+#define SDMA3_CONTEXT_REG_TYPE1__SDMA3_GFX_RB_AQL_CNTL_MASK 0x00100000L
+#define SDMA3_CONTEXT_REG_TYPE1__SDMA3_GFX_MINOR_PTR_UPDATE_MASK 0x00200000L
+#define SDMA3_CONTEXT_REG_TYPE1__RESERVED_MASK 0xFFC00000L
+//SDMA3_CONTEXT_REG_TYPE2
+#define SDMA3_CONTEXT_REG_TYPE2__SDMA3_GFX_MIDCMD_DATA0__SHIFT 0x0
+#define SDMA3_CONTEXT_REG_TYPE2__SDMA3_GFX_MIDCMD_DATA1__SHIFT 0x1
+#define SDMA3_CONTEXT_REG_TYPE2__SDMA3_GFX_MIDCMD_DATA2__SHIFT 0x2
+#define SDMA3_CONTEXT_REG_TYPE2__SDMA3_GFX_MIDCMD_DATA3__SHIFT 0x3
+#define SDMA3_CONTEXT_REG_TYPE2__SDMA3_GFX_MIDCMD_DATA4__SHIFT 0x4
+#define SDMA3_CONTEXT_REG_TYPE2__SDMA3_GFX_MIDCMD_DATA5__SHIFT 0x5
+#define SDMA3_CONTEXT_REG_TYPE2__SDMA3_GFX_MIDCMD_DATA6__SHIFT 0x6
+#define SDMA3_CONTEXT_REG_TYPE2__SDMA3_GFX_MIDCMD_DATA7__SHIFT 0x7
+#define SDMA3_CONTEXT_REG_TYPE2__SDMA3_GFX_MIDCMD_DATA8__SHIFT 0x8
+#define SDMA3_CONTEXT_REG_TYPE2__SDMA3_GFX_MIDCMD_CNTL__SHIFT 0x9
+#define SDMA3_CONTEXT_REG_TYPE2__RESERVED__SHIFT 0xa
+#define SDMA3_CONTEXT_REG_TYPE2__SDMA3_GFX_MIDCMD_DATA0_MASK 0x00000001L
+#define SDMA3_CONTEXT_REG_TYPE2__SDMA3_GFX_MIDCMD_DATA1_MASK 0x00000002L
+#define SDMA3_CONTEXT_REG_TYPE2__SDMA3_GFX_MIDCMD_DATA2_MASK 0x00000004L
+#define SDMA3_CONTEXT_REG_TYPE2__SDMA3_GFX_MIDCMD_DATA3_MASK 0x00000008L
+#define SDMA3_CONTEXT_REG_TYPE2__SDMA3_GFX_MIDCMD_DATA4_MASK 0x00000010L
+#define SDMA3_CONTEXT_REG_TYPE2__SDMA3_GFX_MIDCMD_DATA5_MASK 0x00000020L
+#define SDMA3_CONTEXT_REG_TYPE2__SDMA3_GFX_MIDCMD_DATA6_MASK 0x00000040L
+#define SDMA3_CONTEXT_REG_TYPE2__SDMA3_GFX_MIDCMD_DATA7_MASK 0x00000080L
+#define SDMA3_CONTEXT_REG_TYPE2__SDMA3_GFX_MIDCMD_DATA8_MASK 0x00000100L
+#define SDMA3_CONTEXT_REG_TYPE2__SDMA3_GFX_MIDCMD_CNTL_MASK 0x00000200L
+#define SDMA3_CONTEXT_REG_TYPE2__RESERVED_MASK 0xFFFFFC00L
+//SDMA3_CONTEXT_REG_TYPE3
+#define SDMA3_CONTEXT_REG_TYPE3__RESERVED__SHIFT 0x0
+#define SDMA3_CONTEXT_REG_TYPE3__RESERVED_MASK 0xFFFFFFFFL
+//SDMA3_PUB_REG_TYPE0
+#define SDMA3_PUB_REG_TYPE0__SDMA3_UCODE_ADDR__SHIFT 0x0
+#define SDMA3_PUB_REG_TYPE0__SDMA3_UCODE_DATA__SHIFT 0x1
+#define SDMA3_PUB_REG_TYPE0__RESERVED3__SHIFT 0x3
+#define SDMA3_PUB_REG_TYPE0__SDMA3_VM_CNTL__SHIFT 0x4
+#define SDMA3_PUB_REG_TYPE0__SDMA3_VM_CTX_LO__SHIFT 0x5
+#define SDMA3_PUB_REG_TYPE0__SDMA3_VM_CTX_HI__SHIFT 0x6
+#define SDMA3_PUB_REG_TYPE0__SDMA3_ACTIVE_FCN_ID__SHIFT 0x7
+#define SDMA3_PUB_REG_TYPE0__SDMA3_VM_CTX_CNTL__SHIFT 0x8
+#define SDMA3_PUB_REG_TYPE0__SDMA3_VIRT_RESET_REQ__SHIFT 0x9
+#define SDMA3_PUB_REG_TYPE0__RESERVED10__SHIFT 0xa
+#define SDMA3_PUB_REG_TYPE0__SDMA3_CONTEXT_REG_TYPE0__SHIFT 0xb
+#define SDMA3_PUB_REG_TYPE0__SDMA3_CONTEXT_REG_TYPE1__SHIFT 0xc
+#define SDMA3_PUB_REG_TYPE0__SDMA3_CONTEXT_REG_TYPE2__SHIFT 0xd
+#define SDMA3_PUB_REG_TYPE0__SDMA3_CONTEXT_REG_TYPE3__SHIFT 0xe
+#define SDMA3_PUB_REG_TYPE0__SDMA3_PUB_REG_TYPE0__SHIFT 0xf
+#define SDMA3_PUB_REG_TYPE0__SDMA3_PUB_REG_TYPE1__SHIFT 0x10
+#define SDMA3_PUB_REG_TYPE0__SDMA3_PUB_REG_TYPE2__SHIFT 0x11
+#define SDMA3_PUB_REG_TYPE0__SDMA3_PUB_REG_TYPE3__SHIFT 0x12
+#define SDMA3_PUB_REG_TYPE0__SDMA3_MMHUB_CNTL__SHIFT 0x13
+#define SDMA3_PUB_REG_TYPE0__RESERVED_FOR_PSPSMU_ACCESS_ONLY__SHIFT 0x15
+#define SDMA3_PUB_REG_TYPE0__SDMA3_CONTEXT_GROUP_BOUNDARY__SHIFT 0x19
+#define SDMA3_PUB_REG_TYPE0__SDMA3_POWER_CNTL__SHIFT 0x1a
+#define SDMA3_PUB_REG_TYPE0__SDMA3_CLK_CTRL__SHIFT 0x1b
+#define SDMA3_PUB_REG_TYPE0__SDMA3_CNTL__SHIFT 0x1c
+#define SDMA3_PUB_REG_TYPE0__SDMA3_CHICKEN_BITS__SHIFT 0x1d
+#define SDMA3_PUB_REG_TYPE0__SDMA3_GB_ADDR_CONFIG__SHIFT 0x1e
+#define SDMA3_PUB_REG_TYPE0__SDMA3_GB_ADDR_CONFIG_READ__SHIFT 0x1f
+#define SDMA3_PUB_REG_TYPE0__SDMA3_UCODE_ADDR_MASK 0x00000001L
+#define SDMA3_PUB_REG_TYPE0__SDMA3_UCODE_DATA_MASK 0x00000002L
+#define SDMA3_PUB_REG_TYPE0__RESERVED3_MASK 0x00000008L
+#define SDMA3_PUB_REG_TYPE0__SDMA3_VM_CNTL_MASK 0x00000010L
+#define SDMA3_PUB_REG_TYPE0__SDMA3_VM_CTX_LO_MASK 0x00000020L
+#define SDMA3_PUB_REG_TYPE0__SDMA3_VM_CTX_HI_MASK 0x00000040L
+#define SDMA3_PUB_REG_TYPE0__SDMA3_ACTIVE_FCN_ID_MASK 0x00000080L
+#define SDMA3_PUB_REG_TYPE0__SDMA3_VM_CTX_CNTL_MASK 0x00000100L
+#define SDMA3_PUB_REG_TYPE0__SDMA3_VIRT_RESET_REQ_MASK 0x00000200L
+#define SDMA3_PUB_REG_TYPE0__RESERVED10_MASK 0x00000400L
+#define SDMA3_PUB_REG_TYPE0__SDMA3_CONTEXT_REG_TYPE0_MASK 0x00000800L
+#define SDMA3_PUB_REG_TYPE0__SDMA3_CONTEXT_REG_TYPE1_MASK 0x00001000L
+#define SDMA3_PUB_REG_TYPE0__SDMA3_CONTEXT_REG_TYPE2_MASK 0x00002000L
+#define SDMA3_PUB_REG_TYPE0__SDMA3_CONTEXT_REG_TYPE3_MASK 0x00004000L
+#define SDMA3_PUB_REG_TYPE0__SDMA3_PUB_REG_TYPE0_MASK 0x00008000L
+#define SDMA3_PUB_REG_TYPE0__SDMA3_PUB_REG_TYPE1_MASK 0x00010000L
+#define SDMA3_PUB_REG_TYPE0__SDMA3_PUB_REG_TYPE2_MASK 0x00020000L
+#define SDMA3_PUB_REG_TYPE0__SDMA3_PUB_REG_TYPE3_MASK 0x00040000L
+#define SDMA3_PUB_REG_TYPE0__SDMA3_MMHUB_CNTL_MASK 0x00080000L
+#define SDMA3_PUB_REG_TYPE0__RESERVED_FOR_PSPSMU_ACCESS_ONLY_MASK 0x01E00000L
+#define SDMA3_PUB_REG_TYPE0__SDMA3_CONTEXT_GROUP_BOUNDARY_MASK 0x02000000L
+#define SDMA3_PUB_REG_TYPE0__SDMA3_POWER_CNTL_MASK 0x04000000L
+#define SDMA3_PUB_REG_TYPE0__SDMA3_CLK_CTRL_MASK 0x08000000L
+#define SDMA3_PUB_REG_TYPE0__SDMA3_CNTL_MASK 0x10000000L
+#define SDMA3_PUB_REG_TYPE0__SDMA3_CHICKEN_BITS_MASK 0x20000000L
+#define SDMA3_PUB_REG_TYPE0__SDMA3_GB_ADDR_CONFIG_MASK 0x40000000L
+#define SDMA3_PUB_REG_TYPE0__SDMA3_GB_ADDR_CONFIG_READ_MASK 0x80000000L
+//SDMA3_PUB_REG_TYPE1
+#define SDMA3_PUB_REG_TYPE1__SDMA3_RB_RPTR_FETCH_HI__SHIFT 0x0
+#define SDMA3_PUB_REG_TYPE1__SDMA3_SEM_WAIT_FAIL_TIMER_CNTL__SHIFT 0x1
+#define SDMA3_PUB_REG_TYPE1__SDMA3_RB_RPTR_FETCH__SHIFT 0x2
+#define SDMA3_PUB_REG_TYPE1__SDMA3_IB_OFFSET_FETCH__SHIFT 0x3
+#define SDMA3_PUB_REG_TYPE1__SDMA3_PROGRAM__SHIFT 0x4
+#define SDMA3_PUB_REG_TYPE1__SDMA3_STATUS_REG__SHIFT 0x5
+#define SDMA3_PUB_REG_TYPE1__SDMA3_STATUS1_REG__SHIFT 0x6
+#define SDMA3_PUB_REG_TYPE1__SDMA3_RD_BURST_CNTL__SHIFT 0x7
+#define SDMA3_PUB_REG_TYPE1__SDMA3_HBM_PAGE_CONFIG__SHIFT 0x8
+#define SDMA3_PUB_REG_TYPE1__SDMA3_UCODE_CHECKSUM__SHIFT 0x9
+#define SDMA3_PUB_REG_TYPE1__SDMA3_F32_CNTL__SHIFT 0xa
+#define SDMA3_PUB_REG_TYPE1__SDMA3_FREEZE__SHIFT 0xb
+#define SDMA3_PUB_REG_TYPE1__SDMA3_PHASE0_QUANTUM__SHIFT 0xc
+#define SDMA3_PUB_REG_TYPE1__SDMA3_PHASE1_QUANTUM__SHIFT 0xd
+#define SDMA3_PUB_REG_TYPE1__SDMA_POWER_GATING__SHIFT 0xe
+#define SDMA3_PUB_REG_TYPE1__SDMA_PGFSM_CONFIG__SHIFT 0xf
+#define SDMA3_PUB_REG_TYPE1__SDMA_PGFSM_WRITE__SHIFT 0x10
+#define SDMA3_PUB_REG_TYPE1__SDMA_PGFSM_READ__SHIFT 0x11
+#define SDMA3_PUB_REG_TYPE1__SDMA3_EDC_CONFIG__SHIFT 0x12
+#define SDMA3_PUB_REG_TYPE1__SDMA3_BA_THRESHOLD__SHIFT 0x13
+#define SDMA3_PUB_REG_TYPE1__SDMA3_ID__SHIFT 0x14
+#define SDMA3_PUB_REG_TYPE1__SDMA3_VERSION__SHIFT 0x15
+#define SDMA3_PUB_REG_TYPE1__SDMA3_EDC_COUNTER__SHIFT 0x16
+#define SDMA3_PUB_REG_TYPE1__SDMA3_EDC_COUNTER_CLEAR__SHIFT 0x17
+#define SDMA3_PUB_REG_TYPE1__SDMA3_STATUS2_REG__SHIFT 0x18
+#define SDMA3_PUB_REG_TYPE1__SDMA3_ATOMIC_CNTL__SHIFT 0x19
+#define SDMA3_PUB_REG_TYPE1__SDMA3_ATOMIC_PREOP_LO__SHIFT 0x1a
+#define SDMA3_PUB_REG_TYPE1__SDMA3_ATOMIC_PREOP_HI__SHIFT 0x1b
+#define SDMA3_PUB_REG_TYPE1__SDMA3_UTCL1_CNTL__SHIFT 0x1c
+#define SDMA3_PUB_REG_TYPE1__SDMA3_UTCL1_WATERMK__SHIFT 0x1d
+#define SDMA3_PUB_REG_TYPE1__SDMA3_UTCL1_RD_STATUS__SHIFT 0x1e
+#define SDMA3_PUB_REG_TYPE1__SDMA3_UTCL1_WR_STATUS__SHIFT 0x1f
+#define SDMA3_PUB_REG_TYPE1__SDMA3_RB_RPTR_FETCH_HI_MASK 0x00000001L
+#define SDMA3_PUB_REG_TYPE1__SDMA3_SEM_WAIT_FAIL_TIMER_CNTL_MASK 0x00000002L
+#define SDMA3_PUB_REG_TYPE1__SDMA3_RB_RPTR_FETCH_MASK 0x00000004L
+#define SDMA3_PUB_REG_TYPE1__SDMA3_IB_OFFSET_FETCH_MASK 0x00000008L
+#define SDMA3_PUB_REG_TYPE1__SDMA3_PROGRAM_MASK 0x00000010L
+#define SDMA3_PUB_REG_TYPE1__SDMA3_STATUS_REG_MASK 0x00000020L
+#define SDMA3_PUB_REG_TYPE1__SDMA3_STATUS1_REG_MASK 0x00000040L
+#define SDMA3_PUB_REG_TYPE1__SDMA3_RD_BURST_CNTL_MASK 0x00000080L
+#define SDMA3_PUB_REG_TYPE1__SDMA3_HBM_PAGE_CONFIG_MASK 0x00000100L
+#define SDMA3_PUB_REG_TYPE1__SDMA3_UCODE_CHECKSUM_MASK 0x00000200L
+#define SDMA3_PUB_REG_TYPE1__SDMA3_F32_CNTL_MASK 0x00000400L
+#define SDMA3_PUB_REG_TYPE1__SDMA3_FREEZE_MASK 0x00000800L
+#define SDMA3_PUB_REG_TYPE1__SDMA3_PHASE0_QUANTUM_MASK 0x00001000L
+#define SDMA3_PUB_REG_TYPE1__SDMA3_PHASE1_QUANTUM_MASK 0x00002000L
+#define SDMA3_PUB_REG_TYPE1__SDMA_POWER_GATING_MASK 0x00004000L
+#define SDMA3_PUB_REG_TYPE1__SDMA_PGFSM_CONFIG_MASK 0x00008000L
+#define SDMA3_PUB_REG_TYPE1__SDMA_PGFSM_WRITE_MASK 0x00010000L
+#define SDMA3_PUB_REG_TYPE1__SDMA_PGFSM_READ_MASK 0x00020000L
+#define SDMA3_PUB_REG_TYPE1__SDMA3_EDC_CONFIG_MASK 0x00040000L
+#define SDMA3_PUB_REG_TYPE1__SDMA3_BA_THRESHOLD_MASK 0x00080000L
+#define SDMA3_PUB_REG_TYPE1__SDMA3_ID_MASK 0x00100000L
+#define SDMA3_PUB_REG_TYPE1__SDMA3_VERSION_MASK 0x00200000L
+#define SDMA3_PUB_REG_TYPE1__SDMA3_EDC_COUNTER_MASK 0x00400000L
+#define SDMA3_PUB_REG_TYPE1__SDMA3_EDC_COUNTER_CLEAR_MASK 0x00800000L
+#define SDMA3_PUB_REG_TYPE1__SDMA3_STATUS2_REG_MASK 0x01000000L
+#define SDMA3_PUB_REG_TYPE1__SDMA3_ATOMIC_CNTL_MASK 0x02000000L
+#define SDMA3_PUB_REG_TYPE1__SDMA3_ATOMIC_PREOP_LO_MASK 0x04000000L
+#define SDMA3_PUB_REG_TYPE1__SDMA3_ATOMIC_PREOP_HI_MASK 0x08000000L
+#define SDMA3_PUB_REG_TYPE1__SDMA3_UTCL1_CNTL_MASK 0x10000000L
+#define SDMA3_PUB_REG_TYPE1__SDMA3_UTCL1_WATERMK_MASK 0x20000000L
+#define SDMA3_PUB_REG_TYPE1__SDMA3_UTCL1_RD_STATUS_MASK 0x40000000L
+#define SDMA3_PUB_REG_TYPE1__SDMA3_UTCL1_WR_STATUS_MASK 0x80000000L
+//SDMA3_PUB_REG_TYPE2
+#define SDMA3_PUB_REG_TYPE2__SDMA3_UTCL1_INV0__SHIFT 0x0
+#define SDMA3_PUB_REG_TYPE2__SDMA3_UTCL1_INV1__SHIFT 0x1
+#define SDMA3_PUB_REG_TYPE2__SDMA3_UTCL1_INV2__SHIFT 0x2
+#define SDMA3_PUB_REG_TYPE2__SDMA3_UTCL1_RD_XNACK0__SHIFT 0x3
+#define SDMA3_PUB_REG_TYPE2__SDMA3_UTCL1_RD_XNACK1__SHIFT 0x4
+#define SDMA3_PUB_REG_TYPE2__SDMA3_UTCL1_WR_XNACK0__SHIFT 0x5
+#define SDMA3_PUB_REG_TYPE2__SDMA3_UTCL1_WR_XNACK1__SHIFT 0x6
+#define SDMA3_PUB_REG_TYPE2__SDMA3_UTCL1_TIMEOUT__SHIFT 0x7
+#define SDMA3_PUB_REG_TYPE2__SDMA3_UTCL1_PAGE__SHIFT 0x8
+#define SDMA3_PUB_REG_TYPE2__SDMA3_POWER_CNTL_IDLE__SHIFT 0x9
+#define SDMA3_PUB_REG_TYPE2__SDMA3_RELAX_ORDERING_LUT__SHIFT 0xa
+#define SDMA3_PUB_REG_TYPE2__SDMA3_CHICKEN_BITS_2__SHIFT 0xb
+#define SDMA3_PUB_REG_TYPE2__SDMA3_STATUS3_REG__SHIFT 0xc
+#define SDMA3_PUB_REG_TYPE2__SDMA3_PHYSICAL_ADDR_LO__SHIFT 0xd
+#define SDMA3_PUB_REG_TYPE2__SDMA3_PHYSICAL_ADDR_HI__SHIFT 0xe
+#define SDMA3_PUB_REG_TYPE2__SDMA3_PHASE2_QUANTUM__SHIFT 0xf
+#define SDMA3_PUB_REG_TYPE2__SDMA3_ERROR_LOG__SHIFT 0x10
+#define SDMA3_PUB_REG_TYPE2__SDMA3_PUB_DUMMY_REG0__SHIFT 0x11
+#define SDMA3_PUB_REG_TYPE2__SDMA3_PUB_DUMMY_REG1__SHIFT 0x12
+#define SDMA3_PUB_REG_TYPE2__SDMA3_PUB_DUMMY_REG2__SHIFT 0x13
+#define SDMA3_PUB_REG_TYPE2__SDMA3_PUB_DUMMY_REG3__SHIFT 0x14
+#define SDMA3_PUB_REG_TYPE2__SDMA3_F32_COUNTER__SHIFT 0x15
+#define SDMA3_PUB_REG_TYPE2__SDMA3_UNBREAKABLE__SHIFT 0x16
+#define SDMA3_PUB_REG_TYPE2__SDMA3_PERFMON_CNTL__SHIFT 0x17
+#define SDMA3_PUB_REG_TYPE2__SDMA3_PERFCOUNTER0_RESULT__SHIFT 0x18
+#define SDMA3_PUB_REG_TYPE2__SDMA3_PERFCOUNTER1_RESULT__SHIFT 0x19
+#define SDMA3_PUB_REG_TYPE2__SDMA3_PERFCOUNTER_TAG_DELAY_RANGE__SHIFT 0x1a
+#define SDMA3_PUB_REG_TYPE2__SDMA3_CRD_CNTL__SHIFT 0x1b
+#define SDMA3_PUB_REG_TYPE2__RESERVED28__SHIFT 0x1c
+#define SDMA3_PUB_REG_TYPE2__SDMA3_GPU_IOV_VIOLATION_LOG__SHIFT 0x1d
+#define SDMA3_PUB_REG_TYPE2__SDMA3_ULV_CNTL__SHIFT 0x1e
+#define SDMA3_PUB_REG_TYPE2__RESERVED__SHIFT 0x1f
+#define SDMA3_PUB_REG_TYPE2__SDMA3_UTCL1_INV0_MASK 0x00000001L
+#define SDMA3_PUB_REG_TYPE2__SDMA3_UTCL1_INV1_MASK 0x00000002L
+#define SDMA3_PUB_REG_TYPE2__SDMA3_UTCL1_INV2_MASK 0x00000004L
+#define SDMA3_PUB_REG_TYPE2__SDMA3_UTCL1_RD_XNACK0_MASK 0x00000008L
+#define SDMA3_PUB_REG_TYPE2__SDMA3_UTCL1_RD_XNACK1_MASK 0x00000010L
+#define SDMA3_PUB_REG_TYPE2__SDMA3_UTCL1_WR_XNACK0_MASK 0x00000020L
+#define SDMA3_PUB_REG_TYPE2__SDMA3_UTCL1_WR_XNACK1_MASK 0x00000040L
+#define SDMA3_PUB_REG_TYPE2__SDMA3_UTCL1_TIMEOUT_MASK 0x00000080L
+#define SDMA3_PUB_REG_TYPE2__SDMA3_UTCL1_PAGE_MASK 0x00000100L
+#define SDMA3_PUB_REG_TYPE2__SDMA3_POWER_CNTL_IDLE_MASK 0x00000200L
+#define SDMA3_PUB_REG_TYPE2__SDMA3_RELAX_ORDERING_LUT_MASK 0x00000400L
+#define SDMA3_PUB_REG_TYPE2__SDMA3_CHICKEN_BITS_2_MASK 0x00000800L
+#define SDMA3_PUB_REG_TYPE2__SDMA3_STATUS3_REG_MASK 0x00001000L
+#define SDMA3_PUB_REG_TYPE2__SDMA3_PHYSICAL_ADDR_LO_MASK 0x00002000L
+#define SDMA3_PUB_REG_TYPE2__SDMA3_PHYSICAL_ADDR_HI_MASK 0x00004000L
+#define SDMA3_PUB_REG_TYPE2__SDMA3_PHASE2_QUANTUM_MASK 0x00008000L
+#define SDMA3_PUB_REG_TYPE2__SDMA3_ERROR_LOG_MASK 0x00010000L
+#define SDMA3_PUB_REG_TYPE2__SDMA3_PUB_DUMMY_REG0_MASK 0x00020000L
+#define SDMA3_PUB_REG_TYPE2__SDMA3_PUB_DUMMY_REG1_MASK 0x00040000L
+#define SDMA3_PUB_REG_TYPE2__SDMA3_PUB_DUMMY_REG2_MASK 0x00080000L
+#define SDMA3_PUB_REG_TYPE2__SDMA3_PUB_DUMMY_REG3_MASK 0x00100000L
+#define SDMA3_PUB_REG_TYPE2__SDMA3_F32_COUNTER_MASK 0x00200000L
+#define SDMA3_PUB_REG_TYPE2__SDMA3_UNBREAKABLE_MASK 0x00400000L
+#define SDMA3_PUB_REG_TYPE2__SDMA3_PERFMON_CNTL_MASK 0x00800000L
+#define SDMA3_PUB_REG_TYPE2__SDMA3_PERFCOUNTER0_RESULT_MASK 0x01000000L
+#define SDMA3_PUB_REG_TYPE2__SDMA3_PERFCOUNTER1_RESULT_MASK 0x02000000L
+#define SDMA3_PUB_REG_TYPE2__SDMA3_PERFCOUNTER_TAG_DELAY_RANGE_MASK 0x04000000L
+#define SDMA3_PUB_REG_TYPE2__SDMA3_CRD_CNTL_MASK 0x08000000L
+#define SDMA3_PUB_REG_TYPE2__RESERVED28_MASK 0x10000000L
+#define SDMA3_PUB_REG_TYPE2__SDMA3_GPU_IOV_VIOLATION_LOG_MASK 0x20000000L
+#define SDMA3_PUB_REG_TYPE2__SDMA3_ULV_CNTL_MASK 0x40000000L
+#define SDMA3_PUB_REG_TYPE2__RESERVED_MASK 0x80000000L
+//SDMA3_PUB_REG_TYPE3
+#define SDMA3_PUB_REG_TYPE3__SDMA3_EA_DBIT_ADDR_DATA__SHIFT 0x0
+#define SDMA3_PUB_REG_TYPE3__SDMA3_EA_DBIT_ADDR_INDEX__SHIFT 0x1
+#define SDMA3_PUB_REG_TYPE3__SDMA3_GPU_IOV_VIOLATION_LOG2__SHIFT 0x2
+#define SDMA3_PUB_REG_TYPE3__RESERVED__SHIFT 0x3
+#define SDMA3_PUB_REG_TYPE3__SDMA3_EA_DBIT_ADDR_DATA_MASK 0x00000001L
+#define SDMA3_PUB_REG_TYPE3__SDMA3_EA_DBIT_ADDR_INDEX_MASK 0x00000002L
+#define SDMA3_PUB_REG_TYPE3__SDMA3_GPU_IOV_VIOLATION_LOG2_MASK 0x00000004L
+#define SDMA3_PUB_REG_TYPE3__RESERVED_MASK 0xFFFFFFF8L
+//SDMA3_MMHUB_CNTL
+#define SDMA3_MMHUB_CNTL__UNIT_ID__SHIFT 0x0
+#define SDMA3_MMHUB_CNTL__UNIT_ID_MASK 0x0000003FL
+//SDMA3_CONTEXT_GROUP_BOUNDARY
+#define SDMA3_CONTEXT_GROUP_BOUNDARY__RESERVED__SHIFT 0x0
+#define SDMA3_CONTEXT_GROUP_BOUNDARY__RESERVED_MASK 0xFFFFFFFFL
+//SDMA3_POWER_CNTL
+#define SDMA3_POWER_CNTL__MEM_POWER_OVERRIDE__SHIFT 0x8
+#define SDMA3_POWER_CNTL__MEM_POWER_LS_EN__SHIFT 0x9
+#define SDMA3_POWER_CNTL__MEM_POWER_DS_EN__SHIFT 0xa
+#define SDMA3_POWER_CNTL__MEM_POWER_SD_EN__SHIFT 0xb
+#define SDMA3_POWER_CNTL__MEM_POWER_DELAY__SHIFT 0xc
+#define SDMA3_POWER_CNTL__MEM_POWER_OVERRIDE_MASK 0x00000100L
+#define SDMA3_POWER_CNTL__MEM_POWER_LS_EN_MASK 0x00000200L
+#define SDMA3_POWER_CNTL__MEM_POWER_DS_EN_MASK 0x00000400L
+#define SDMA3_POWER_CNTL__MEM_POWER_SD_EN_MASK 0x00000800L
+#define SDMA3_POWER_CNTL__MEM_POWER_DELAY_MASK 0x003FF000L
+//SDMA3_CLK_CTRL
+#define SDMA3_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define SDMA3_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define SDMA3_CLK_CTRL__RESERVED__SHIFT 0xc
+#define SDMA3_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define SDMA3_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define SDMA3_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define SDMA3_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define SDMA3_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define SDMA3_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define SDMA3_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define SDMA3_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define SDMA3_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define SDMA3_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define SDMA3_CLK_CTRL__RESERVED_MASK 0x00FFF000L
+#define SDMA3_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define SDMA3_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define SDMA3_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define SDMA3_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define SDMA3_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define SDMA3_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define SDMA3_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define SDMA3_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//SDMA3_CNTL
+#define SDMA3_CNTL__TRAP_ENABLE__SHIFT 0x0
+#define SDMA3_CNTL__UTC_L1_ENABLE__SHIFT 0x1
+#define SDMA3_CNTL__SEM_WAIT_INT_ENABLE__SHIFT 0x2
+#define SDMA3_CNTL__DATA_SWAP_ENABLE__SHIFT 0x3
+#define SDMA3_CNTL__FENCE_SWAP_ENABLE__SHIFT 0x4
+#define SDMA3_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x5
+#define SDMA3_CNTL__MIDCMD_WORLDSWITCH_ENABLE__SHIFT 0x11
+#define SDMA3_CNTL__AUTO_CTXSW_ENABLE__SHIFT 0x12
+#define SDMA3_CNTL__CTXEMPTY_INT_ENABLE__SHIFT 0x1c
+#define SDMA3_CNTL__FROZEN_INT_ENABLE__SHIFT 0x1d
+#define SDMA3_CNTL__IB_PREEMPT_INT_ENABLE__SHIFT 0x1e
+#define SDMA3_CNTL__TRAP_ENABLE_MASK 0x00000001L
+#define SDMA3_CNTL__UTC_L1_ENABLE_MASK 0x00000002L
+#define SDMA3_CNTL__SEM_WAIT_INT_ENABLE_MASK 0x00000004L
+#define SDMA3_CNTL__DATA_SWAP_ENABLE_MASK 0x00000008L
+#define SDMA3_CNTL__FENCE_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA3_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00000020L
+#define SDMA3_CNTL__MIDCMD_WORLDSWITCH_ENABLE_MASK 0x00020000L
+#define SDMA3_CNTL__AUTO_CTXSW_ENABLE_MASK 0x00040000L
+#define SDMA3_CNTL__CTXEMPTY_INT_ENABLE_MASK 0x10000000L
+#define SDMA3_CNTL__FROZEN_INT_ENABLE_MASK 0x20000000L
+#define SDMA3_CNTL__IB_PREEMPT_INT_ENABLE_MASK 0x40000000L
+//SDMA3_CHICKEN_BITS
+#define SDMA3_CHICKEN_BITS__COPY_EFFICIENCY_ENABLE__SHIFT 0x0
+#define SDMA3_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE__SHIFT 0x1
+#define SDMA3_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE__SHIFT 0x2
+#define SDMA3_CHICKEN_BITS__WRITE_BURST_LENGTH__SHIFT 0x8
+#define SDMA3_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE__SHIFT 0xa
+#define SDMA3_CHICKEN_BITS__COPY_OVERLAP_ENABLE__SHIFT 0x10
+#define SDMA3_CHICKEN_BITS__RAW_CHECK_ENABLE__SHIFT 0x11
+#define SDMA3_CHICKEN_BITS__SRBM_POLL_RETRYING__SHIFT 0x14
+#define SDMA3_CHICKEN_BITS__CG_STATUS_OUTPUT__SHIFT 0x17
+#define SDMA3_CHICKEN_BITS__TIME_BASED_QOS__SHIFT 0x19
+#define SDMA3_CHICKEN_BITS__CE_AFIFO_WATERMARK__SHIFT 0x1a
+#define SDMA3_CHICKEN_BITS__CE_DFIFO_WATERMARK__SHIFT 0x1c
+#define SDMA3_CHICKEN_BITS__CE_LFIFO_WATERMARK__SHIFT 0x1e
+#define SDMA3_CHICKEN_BITS__COPY_EFFICIENCY_ENABLE_MASK 0x00000001L
+#define SDMA3_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE_MASK 0x00000002L
+#define SDMA3_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE_MASK 0x00000004L
+#define SDMA3_CHICKEN_BITS__WRITE_BURST_LENGTH_MASK 0x00000300L
+#define SDMA3_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE_MASK 0x00001C00L
+#define SDMA3_CHICKEN_BITS__COPY_OVERLAP_ENABLE_MASK 0x00010000L
+#define SDMA3_CHICKEN_BITS__RAW_CHECK_ENABLE_MASK 0x00020000L
+#define SDMA3_CHICKEN_BITS__SRBM_POLL_RETRYING_MASK 0x00100000L
+#define SDMA3_CHICKEN_BITS__CG_STATUS_OUTPUT_MASK 0x00800000L
+#define SDMA3_CHICKEN_BITS__TIME_BASED_QOS_MASK 0x02000000L
+#define SDMA3_CHICKEN_BITS__CE_AFIFO_WATERMARK_MASK 0x0C000000L
+#define SDMA3_CHICKEN_BITS__CE_DFIFO_WATERMARK_MASK 0x30000000L
+#define SDMA3_CHICKEN_BITS__CE_LFIFO_WATERMARK_MASK 0xC0000000L
+//SDMA3_GB_ADDR_CONFIG
+#define SDMA3_GB_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
+#define SDMA3_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
+#define SDMA3_GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE__SHIFT 0x8
+#define SDMA3_GB_ADDR_CONFIG__NUM_BANKS__SHIFT 0xc
+#define SDMA3_GB_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x13
+#define SDMA3_GB_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define SDMA3_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
+#define SDMA3_GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
+#define SDMA3_GB_ADDR_CONFIG__NUM_BANKS_MASK 0x00007000L
+#define SDMA3_GB_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00180000L
+//SDMA3_GB_ADDR_CONFIG_READ
+#define SDMA3_GB_ADDR_CONFIG_READ__NUM_PIPES__SHIFT 0x0
+#define SDMA3_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
+#define SDMA3_GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE__SHIFT 0x8
+#define SDMA3_GB_ADDR_CONFIG_READ__NUM_BANKS__SHIFT 0xc
+#define SDMA3_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES__SHIFT 0x13
+#define SDMA3_GB_ADDR_CONFIG_READ__NUM_PIPES_MASK 0x00000007L
+#define SDMA3_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
+#define SDMA3_GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
+#define SDMA3_GB_ADDR_CONFIG_READ__NUM_BANKS_MASK 0x00007000L
+#define SDMA3_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES_MASK 0x00180000L
+//SDMA3_RB_RPTR_FETCH_HI
+#define SDMA3_RB_RPTR_FETCH_HI__OFFSET__SHIFT 0x0
+#define SDMA3_RB_RPTR_FETCH_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA3_SEM_WAIT_FAIL_TIMER_CNTL
+#define SDMA3_SEM_WAIT_FAIL_TIMER_CNTL__TIMER__SHIFT 0x0
+#define SDMA3_SEM_WAIT_FAIL_TIMER_CNTL__TIMER_MASK 0xFFFFFFFFL
+//SDMA3_RB_RPTR_FETCH
+#define SDMA3_RB_RPTR_FETCH__OFFSET__SHIFT 0x2
+#define SDMA3_RB_RPTR_FETCH__OFFSET_MASK 0xFFFFFFFCL
+//SDMA3_IB_OFFSET_FETCH
+#define SDMA3_IB_OFFSET_FETCH__OFFSET__SHIFT 0x2
+#define SDMA3_IB_OFFSET_FETCH__OFFSET_MASK 0x003FFFFCL
+//SDMA3_PROGRAM
+#define SDMA3_PROGRAM__STREAM__SHIFT 0x0
+#define SDMA3_PROGRAM__STREAM_MASK 0xFFFFFFFFL
+//SDMA3_STATUS_REG
+#define SDMA3_STATUS_REG__IDLE__SHIFT 0x0
+#define SDMA3_STATUS_REG__REG_IDLE__SHIFT 0x1
+#define SDMA3_STATUS_REG__RB_EMPTY__SHIFT 0x2
+#define SDMA3_STATUS_REG__RB_FULL__SHIFT 0x3
+#define SDMA3_STATUS_REG__RB_CMD_IDLE__SHIFT 0x4
+#define SDMA3_STATUS_REG__RB_CMD_FULL__SHIFT 0x5
+#define SDMA3_STATUS_REG__IB_CMD_IDLE__SHIFT 0x6
+#define SDMA3_STATUS_REG__IB_CMD_FULL__SHIFT 0x7
+#define SDMA3_STATUS_REG__BLOCK_IDLE__SHIFT 0x8
+#define SDMA3_STATUS_REG__INSIDE_IB__SHIFT 0x9
+#define SDMA3_STATUS_REG__EX_IDLE__SHIFT 0xa
+#define SDMA3_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE__SHIFT 0xb
+#define SDMA3_STATUS_REG__PACKET_READY__SHIFT 0xc
+#define SDMA3_STATUS_REG__MC_WR_IDLE__SHIFT 0xd
+#define SDMA3_STATUS_REG__SRBM_IDLE__SHIFT 0xe
+#define SDMA3_STATUS_REG__CONTEXT_EMPTY__SHIFT 0xf
+#define SDMA3_STATUS_REG__DELTA_RPTR_FULL__SHIFT 0x10
+#define SDMA3_STATUS_REG__RB_MC_RREQ_IDLE__SHIFT 0x11
+#define SDMA3_STATUS_REG__IB_MC_RREQ_IDLE__SHIFT 0x12
+#define SDMA3_STATUS_REG__MC_RD_IDLE__SHIFT 0x13
+#define SDMA3_STATUS_REG__DELTA_RPTR_EMPTY__SHIFT 0x14
+#define SDMA3_STATUS_REG__MC_RD_RET_STALL__SHIFT 0x15
+#define SDMA3_STATUS_REG__MC_RD_NO_POLL_IDLE__SHIFT 0x16
+#define SDMA3_STATUS_REG__PREV_CMD_IDLE__SHIFT 0x19
+#define SDMA3_STATUS_REG__SEM_IDLE__SHIFT 0x1a
+#define SDMA3_STATUS_REG__SEM_REQ_STALL__SHIFT 0x1b
+#define SDMA3_STATUS_REG__SEM_RESP_STATE__SHIFT 0x1c
+#define SDMA3_STATUS_REG__INT_IDLE__SHIFT 0x1e
+#define SDMA3_STATUS_REG__INT_REQ_STALL__SHIFT 0x1f
+#define SDMA3_STATUS_REG__IDLE_MASK 0x00000001L
+#define SDMA3_STATUS_REG__REG_IDLE_MASK 0x00000002L
+#define SDMA3_STATUS_REG__RB_EMPTY_MASK 0x00000004L
+#define SDMA3_STATUS_REG__RB_FULL_MASK 0x00000008L
+#define SDMA3_STATUS_REG__RB_CMD_IDLE_MASK 0x00000010L
+#define SDMA3_STATUS_REG__RB_CMD_FULL_MASK 0x00000020L
+#define SDMA3_STATUS_REG__IB_CMD_IDLE_MASK 0x00000040L
+#define SDMA3_STATUS_REG__IB_CMD_FULL_MASK 0x00000080L
+#define SDMA3_STATUS_REG__BLOCK_IDLE_MASK 0x00000100L
+#define SDMA3_STATUS_REG__INSIDE_IB_MASK 0x00000200L
+#define SDMA3_STATUS_REG__EX_IDLE_MASK 0x00000400L
+#define SDMA3_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE_MASK 0x00000800L
+#define SDMA3_STATUS_REG__PACKET_READY_MASK 0x00001000L
+#define SDMA3_STATUS_REG__MC_WR_IDLE_MASK 0x00002000L
+#define SDMA3_STATUS_REG__SRBM_IDLE_MASK 0x00004000L
+#define SDMA3_STATUS_REG__CONTEXT_EMPTY_MASK 0x00008000L
+#define SDMA3_STATUS_REG__DELTA_RPTR_FULL_MASK 0x00010000L
+#define SDMA3_STATUS_REG__RB_MC_RREQ_IDLE_MASK 0x00020000L
+#define SDMA3_STATUS_REG__IB_MC_RREQ_IDLE_MASK 0x00040000L
+#define SDMA3_STATUS_REG__MC_RD_IDLE_MASK 0x00080000L
+#define SDMA3_STATUS_REG__DELTA_RPTR_EMPTY_MASK 0x00100000L
+#define SDMA3_STATUS_REG__MC_RD_RET_STALL_MASK 0x00200000L
+#define SDMA3_STATUS_REG__MC_RD_NO_POLL_IDLE_MASK 0x00400000L
+#define SDMA3_STATUS_REG__PREV_CMD_IDLE_MASK 0x02000000L
+#define SDMA3_STATUS_REG__SEM_IDLE_MASK 0x04000000L
+#define SDMA3_STATUS_REG__SEM_REQ_STALL_MASK 0x08000000L
+#define SDMA3_STATUS_REG__SEM_RESP_STATE_MASK 0x30000000L
+#define SDMA3_STATUS_REG__INT_IDLE_MASK 0x40000000L
+#define SDMA3_STATUS_REG__INT_REQ_STALL_MASK 0x80000000L
+//SDMA3_STATUS1_REG
+#define SDMA3_STATUS1_REG__CE_WREQ_IDLE__SHIFT 0x0
+#define SDMA3_STATUS1_REG__CE_WR_IDLE__SHIFT 0x1
+#define SDMA3_STATUS1_REG__CE_SPLIT_IDLE__SHIFT 0x2
+#define SDMA3_STATUS1_REG__CE_RREQ_IDLE__SHIFT 0x3
+#define SDMA3_STATUS1_REG__CE_OUT_IDLE__SHIFT 0x4
+#define SDMA3_STATUS1_REG__CE_IN_IDLE__SHIFT 0x5
+#define SDMA3_STATUS1_REG__CE_DST_IDLE__SHIFT 0x6
+#define SDMA3_STATUS1_REG__CE_CMD_IDLE__SHIFT 0x9
+#define SDMA3_STATUS1_REG__CE_AFIFO_FULL__SHIFT 0xa
+#define SDMA3_STATUS1_REG__CE_INFO_FULL__SHIFT 0xd
+#define SDMA3_STATUS1_REG__CE_INFO1_FULL__SHIFT 0xe
+#define SDMA3_STATUS1_REG__EX_START__SHIFT 0xf
+#define SDMA3_STATUS1_REG__CE_RD_STALL__SHIFT 0x11
+#define SDMA3_STATUS1_REG__CE_WR_STALL__SHIFT 0x12
+#define SDMA3_STATUS1_REG__CE_WREQ_IDLE_MASK 0x00000001L
+#define SDMA3_STATUS1_REG__CE_WR_IDLE_MASK 0x00000002L
+#define SDMA3_STATUS1_REG__CE_SPLIT_IDLE_MASK 0x00000004L
+#define SDMA3_STATUS1_REG__CE_RREQ_IDLE_MASK 0x00000008L
+#define SDMA3_STATUS1_REG__CE_OUT_IDLE_MASK 0x00000010L
+#define SDMA3_STATUS1_REG__CE_IN_IDLE_MASK 0x00000020L
+#define SDMA3_STATUS1_REG__CE_DST_IDLE_MASK 0x00000040L
+#define SDMA3_STATUS1_REG__CE_CMD_IDLE_MASK 0x00000200L
+#define SDMA3_STATUS1_REG__CE_AFIFO_FULL_MASK 0x00000400L
+#define SDMA3_STATUS1_REG__CE_INFO_FULL_MASK 0x00002000L
+#define SDMA3_STATUS1_REG__CE_INFO1_FULL_MASK 0x00004000L
+#define SDMA3_STATUS1_REG__EX_START_MASK 0x00008000L
+#define SDMA3_STATUS1_REG__CE_RD_STALL_MASK 0x00020000L
+#define SDMA3_STATUS1_REG__CE_WR_STALL_MASK 0x00040000L
+//SDMA3_RD_BURST_CNTL
+#define SDMA3_RD_BURST_CNTL__RD_BURST__SHIFT 0x0
+#define SDMA3_RD_BURST_CNTL__CMD_BUFFER_RD_BURST__SHIFT 0x2
+#define SDMA3_RD_BURST_CNTL__RD_BURST_MASK 0x00000003L
+#define SDMA3_RD_BURST_CNTL__CMD_BUFFER_RD_BURST_MASK 0x0000000CL
+//SDMA3_HBM_PAGE_CONFIG
+#define SDMA3_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT__SHIFT 0x0
+#define SDMA3_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT_MASK 0x00000001L
+//SDMA3_UCODE_CHECKSUM
+#define SDMA3_UCODE_CHECKSUM__DATA__SHIFT 0x0
+#define SDMA3_UCODE_CHECKSUM__DATA_MASK 0xFFFFFFFFL
+//SDMA3_F32_CNTL
+#define SDMA3_F32_CNTL__HALT__SHIFT 0x0
+#define SDMA3_F32_CNTL__STEP__SHIFT 0x1
+#define SDMA3_F32_CNTL__HALT_MASK 0x00000001L
+#define SDMA3_F32_CNTL__STEP_MASK 0x00000002L
+//SDMA3_FREEZE
+#define SDMA3_FREEZE__PREEMPT__SHIFT 0x0
+#define SDMA3_FREEZE__FREEZE__SHIFT 0x4
+#define SDMA3_FREEZE__FROZEN__SHIFT 0x5
+#define SDMA3_FREEZE__F32_FREEZE__SHIFT 0x6
+#define SDMA3_FREEZE__PREEMPT_MASK 0x00000001L
+#define SDMA3_FREEZE__FREEZE_MASK 0x00000010L
+#define SDMA3_FREEZE__FROZEN_MASK 0x00000020L
+#define SDMA3_FREEZE__F32_FREEZE_MASK 0x00000040L
+//SDMA3_PHASE0_QUANTUM
+#define SDMA3_PHASE0_QUANTUM__UNIT__SHIFT 0x0
+#define SDMA3_PHASE0_QUANTUM__VALUE__SHIFT 0x8
+#define SDMA3_PHASE0_QUANTUM__PREFER__SHIFT 0x1e
+#define SDMA3_PHASE0_QUANTUM__UNIT_MASK 0x0000000FL
+#define SDMA3_PHASE0_QUANTUM__VALUE_MASK 0x00FFFF00L
+#define SDMA3_PHASE0_QUANTUM__PREFER_MASK 0x40000000L
+//SDMA3_PHASE1_QUANTUM
+#define SDMA3_PHASE1_QUANTUM__UNIT__SHIFT 0x0
+#define SDMA3_PHASE1_QUANTUM__VALUE__SHIFT 0x8
+#define SDMA3_PHASE1_QUANTUM__PREFER__SHIFT 0x1e
+#define SDMA3_PHASE1_QUANTUM__UNIT_MASK 0x0000000FL
+#define SDMA3_PHASE1_QUANTUM__VALUE_MASK 0x00FFFF00L
+#define SDMA3_PHASE1_QUANTUM__PREFER_MASK 0x40000000L
+//SDMA3_EDC_CONFIG
+#define SDMA3_EDC_CONFIG__DIS_EDC__SHIFT 0x1
+#define SDMA3_EDC_CONFIG__ECC_INT_ENABLE__SHIFT 0x2
+#define SDMA3_EDC_CONFIG__DIS_EDC_MASK 0x00000002L
+#define SDMA3_EDC_CONFIG__ECC_INT_ENABLE_MASK 0x00000004L
+//SDMA3_BA_THRESHOLD
+#define SDMA3_BA_THRESHOLD__READ_THRES__SHIFT 0x0
+#define SDMA3_BA_THRESHOLD__WRITE_THRES__SHIFT 0x10
+#define SDMA3_BA_THRESHOLD__READ_THRES_MASK 0x000003FFL
+#define SDMA3_BA_THRESHOLD__WRITE_THRES_MASK 0x03FF0000L
+//SDMA3_ID
+#define SDMA3_ID__DEVICE_ID__SHIFT 0x0
+#define SDMA3_ID__DEVICE_ID_MASK 0x000000FFL
+//SDMA3_VERSION
+#define SDMA3_VERSION__MINVER__SHIFT 0x0
+#define SDMA3_VERSION__MAJVER__SHIFT 0x8
+#define SDMA3_VERSION__REV__SHIFT 0x10
+#define SDMA3_VERSION__MINVER_MASK 0x0000007FL
+#define SDMA3_VERSION__MAJVER_MASK 0x00007F00L
+#define SDMA3_VERSION__REV_MASK 0x003F0000L
+//SDMA3_EDC_COUNTER
+#define SDMA3_EDC_COUNTER__SDMA_UCODE_BUF_SED__SHIFT 0x0
+#define SDMA3_EDC_COUNTER__SDMA_RB_CMD_BUF_SED__SHIFT 0x2
+#define SDMA3_EDC_COUNTER__SDMA_IB_CMD_BUF_SED__SHIFT 0x3
+#define SDMA3_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED__SHIFT 0x4
+#define SDMA3_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED__SHIFT 0x5
+#define SDMA3_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED__SHIFT 0x6
+#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED__SHIFT 0x7
+#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED__SHIFT 0x8
+#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED__SHIFT 0x9
+#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED__SHIFT 0xa
+#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED__SHIFT 0xb
+#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED__SHIFT 0xc
+#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED__SHIFT 0xd
+#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED__SHIFT 0xe
+#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF8_SED__SHIFT 0xf
+#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF9_SED__SHIFT 0x10
+#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF10_SED__SHIFT 0x11
+#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF11_SED__SHIFT 0x12
+#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF12_SED__SHIFT 0x13
+#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF13_SED__SHIFT 0x14
+#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF14_SED__SHIFT 0x15
+#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF15_SED__SHIFT 0x16
+#define SDMA3_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED__SHIFT 0x17
+#define SDMA3_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED__SHIFT 0x18
+#define SDMA3_EDC_COUNTER__SDMA_UCODE_BUF_SED_MASK 0x00000001L
+#define SDMA3_EDC_COUNTER__SDMA_RB_CMD_BUF_SED_MASK 0x00000004L
+#define SDMA3_EDC_COUNTER__SDMA_IB_CMD_BUF_SED_MASK 0x00000008L
+#define SDMA3_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED_MASK 0x00000010L
+#define SDMA3_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED_MASK 0x00000020L
+#define SDMA3_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED_MASK 0x00000040L
+#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED_MASK 0x00000080L
+#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED_MASK 0x00000100L
+#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED_MASK 0x00000200L
+#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED_MASK 0x00000400L
+#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED_MASK 0x00000800L
+#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED_MASK 0x00001000L
+#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED_MASK 0x00002000L
+#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED_MASK 0x00004000L
+#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF8_SED_MASK 0x00008000L
+#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF9_SED_MASK 0x00010000L
+#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF10_SED_MASK 0x00020000L
+#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF11_SED_MASK 0x00040000L
+#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF12_SED_MASK 0x00080000L
+#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF13_SED_MASK 0x00100000L
+#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF14_SED_MASK 0x00200000L
+#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF15_SED_MASK 0x00400000L
+#define SDMA3_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED_MASK 0x00800000L
+#define SDMA3_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED_MASK 0x01000000L
+//SDMA3_EDC_COUNTER_CLEAR
+#define SDMA3_EDC_COUNTER_CLEAR__DUMMY__SHIFT 0x0
+#define SDMA3_EDC_COUNTER_CLEAR__DUMMY_MASK 0x00000001L
+//SDMA3_STATUS2_REG
+#define SDMA3_STATUS2_REG__ID__SHIFT 0x0
+#define SDMA3_STATUS2_REG__F32_INSTR_PTR__SHIFT 0x3
+#define SDMA3_STATUS2_REG__CMD_OP__SHIFT 0x10
+#define SDMA3_STATUS2_REG__ID_MASK 0x00000007L
+#define SDMA3_STATUS2_REG__F32_INSTR_PTR_MASK 0x0000FFF8L
+#define SDMA3_STATUS2_REG__CMD_OP_MASK 0xFFFF0000L
+//SDMA3_ATOMIC_CNTL
+#define SDMA3_ATOMIC_CNTL__LOOP_TIMER__SHIFT 0x0
+#define SDMA3_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE__SHIFT 0x1f
+#define SDMA3_ATOMIC_CNTL__LOOP_TIMER_MASK 0x7FFFFFFFL
+#define SDMA3_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE_MASK 0x80000000L
+//SDMA3_ATOMIC_PREOP_LO
+#define SDMA3_ATOMIC_PREOP_LO__DATA__SHIFT 0x0
+#define SDMA3_ATOMIC_PREOP_LO__DATA_MASK 0xFFFFFFFFL
+//SDMA3_ATOMIC_PREOP_HI
+#define SDMA3_ATOMIC_PREOP_HI__DATA__SHIFT 0x0
+#define SDMA3_ATOMIC_PREOP_HI__DATA_MASK 0xFFFFFFFFL
+//SDMA3_UTCL1_CNTL
+#define SDMA3_UTCL1_CNTL__REDO_ENABLE__SHIFT 0x0
+#define SDMA3_UTCL1_CNTL__REDO_DELAY__SHIFT 0x1
+#define SDMA3_UTCL1_CNTL__REDO_WATERMK__SHIFT 0xb
+#define SDMA3_UTCL1_CNTL__INVACK_DELAY__SHIFT 0xe
+#define SDMA3_UTCL1_CNTL__REQL2_CREDIT__SHIFT 0x18
+#define SDMA3_UTCL1_CNTL__VADDR_WATERMK__SHIFT 0x1d
+#define SDMA3_UTCL1_CNTL__REDO_ENABLE_MASK 0x00000001L
+#define SDMA3_UTCL1_CNTL__REDO_DELAY_MASK 0x000007FEL
+#define SDMA3_UTCL1_CNTL__REDO_WATERMK_MASK 0x00003800L
+#define SDMA3_UTCL1_CNTL__INVACK_DELAY_MASK 0x00FFC000L
+#define SDMA3_UTCL1_CNTL__REQL2_CREDIT_MASK 0x1F000000L
+#define SDMA3_UTCL1_CNTL__VADDR_WATERMK_MASK 0xE0000000L
+//SDMA3_UTCL1_WATERMK
+#define SDMA3_UTCL1_WATERMK__REQMC_WATERMK__SHIFT 0x0
+#define SDMA3_UTCL1_WATERMK__REQPG_WATERMK__SHIFT 0x9
+#define SDMA3_UTCL1_WATERMK__INVREQ_WATERMK__SHIFT 0x11
+#define SDMA3_UTCL1_WATERMK__XNACK_WATERMK__SHIFT 0x19
+#define SDMA3_UTCL1_WATERMK__REQMC_WATERMK_MASK 0x000001FFL
+#define SDMA3_UTCL1_WATERMK__REQPG_WATERMK_MASK 0x0001FE00L
+#define SDMA3_UTCL1_WATERMK__INVREQ_WATERMK_MASK 0x01FE0000L
+#define SDMA3_UTCL1_WATERMK__XNACK_WATERMK_MASK 0xFE000000L
+//SDMA3_UTCL1_RD_STATUS
+#define SDMA3_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0
+#define SDMA3_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1
+#define SDMA3_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2
+#define SDMA3_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3
+#define SDMA3_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4
+#define SDMA3_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5
+#define SDMA3_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6
+#define SDMA3_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7
+#define SDMA3_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8
+#define SDMA3_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9
+#define SDMA3_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa
+#define SDMA3_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb
+#define SDMA3_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc
+#define SDMA3_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd
+#define SDMA3_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe
+#define SDMA3_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf
+#define SDMA3_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10
+#define SDMA3_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11
+#define SDMA3_UTCL1_RD_STATUS__PAGE_FAULT__SHIFT 0x12
+#define SDMA3_UTCL1_RD_STATUS__PAGE_NULL__SHIFT 0x13
+#define SDMA3_UTCL1_RD_STATUS__REQL2_IDLE__SHIFT 0x14
+#define SDMA3_UTCL1_RD_STATUS__CE_L1_STALL__SHIFT 0x15
+#define SDMA3_UTCL1_RD_STATUS__NEXT_RD_VECTOR__SHIFT 0x16
+#define SDMA3_UTCL1_RD_STATUS__MERGE_STATE__SHIFT 0x1a
+#define SDMA3_UTCL1_RD_STATUS__ADDR_RD_RTR__SHIFT 0x1d
+#define SDMA3_UTCL1_RD_STATUS__WPTR_POLLING__SHIFT 0x1e
+#define SDMA3_UTCL1_RD_STATUS__INVREQ_SIZE__SHIFT 0x1f
+#define SDMA3_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L
+#define SDMA3_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L
+#define SDMA3_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L
+#define SDMA3_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L
+#define SDMA3_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L
+#define SDMA3_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L
+#define SDMA3_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L
+#define SDMA3_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L
+#define SDMA3_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L
+#define SDMA3_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L
+#define SDMA3_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L
+#define SDMA3_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L
+#define SDMA3_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L
+#define SDMA3_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L
+#define SDMA3_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L
+#define SDMA3_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L
+#define SDMA3_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L
+#define SDMA3_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L
+#define SDMA3_UTCL1_RD_STATUS__PAGE_FAULT_MASK 0x00040000L
+#define SDMA3_UTCL1_RD_STATUS__PAGE_NULL_MASK 0x00080000L
+#define SDMA3_UTCL1_RD_STATUS__REQL2_IDLE_MASK 0x00100000L
+#define SDMA3_UTCL1_RD_STATUS__CE_L1_STALL_MASK 0x00200000L
+#define SDMA3_UTCL1_RD_STATUS__NEXT_RD_VECTOR_MASK 0x03C00000L
+#define SDMA3_UTCL1_RD_STATUS__MERGE_STATE_MASK 0x1C000000L
+#define SDMA3_UTCL1_RD_STATUS__ADDR_RD_RTR_MASK 0x20000000L
+#define SDMA3_UTCL1_RD_STATUS__WPTR_POLLING_MASK 0x40000000L
+#define SDMA3_UTCL1_RD_STATUS__INVREQ_SIZE_MASK 0x80000000L
+//SDMA3_UTCL1_WR_STATUS
+#define SDMA3_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0
+#define SDMA3_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1
+#define SDMA3_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2
+#define SDMA3_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3
+#define SDMA3_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4
+#define SDMA3_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5
+#define SDMA3_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6
+#define SDMA3_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7
+#define SDMA3_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8
+#define SDMA3_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9
+#define SDMA3_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa
+#define SDMA3_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb
+#define SDMA3_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc
+#define SDMA3_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd
+#define SDMA3_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe
+#define SDMA3_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf
+#define SDMA3_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10
+#define SDMA3_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11
+#define SDMA3_UTCL1_WR_STATUS__PAGE_FAULT__SHIFT 0x12
+#define SDMA3_UTCL1_WR_STATUS__PAGE_NULL__SHIFT 0x13
+#define SDMA3_UTCL1_WR_STATUS__REQL2_IDLE__SHIFT 0x14
+#define SDMA3_UTCL1_WR_STATUS__F32_WR_RTR__SHIFT 0x15
+#define SDMA3_UTCL1_WR_STATUS__NEXT_WR_VECTOR__SHIFT 0x16
+#define SDMA3_UTCL1_WR_STATUS__MERGE_STATE__SHIFT 0x19
+#define SDMA3_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY__SHIFT 0x1c
+#define SDMA3_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL__SHIFT 0x1d
+#define SDMA3_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY__SHIFT 0x1e
+#define SDMA3_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL__SHIFT 0x1f
+#define SDMA3_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L
+#define SDMA3_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L
+#define SDMA3_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L
+#define SDMA3_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L
+#define SDMA3_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L
+#define SDMA3_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L
+#define SDMA3_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L
+#define SDMA3_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L
+#define SDMA3_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L
+#define SDMA3_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L
+#define SDMA3_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L
+#define SDMA3_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L
+#define SDMA3_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L
+#define SDMA3_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L
+#define SDMA3_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L
+#define SDMA3_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L
+#define SDMA3_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L
+#define SDMA3_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L
+#define SDMA3_UTCL1_WR_STATUS__PAGE_FAULT_MASK 0x00040000L
+#define SDMA3_UTCL1_WR_STATUS__PAGE_NULL_MASK 0x00080000L
+#define SDMA3_UTCL1_WR_STATUS__REQL2_IDLE_MASK 0x00100000L
+#define SDMA3_UTCL1_WR_STATUS__F32_WR_RTR_MASK 0x00200000L
+#define SDMA3_UTCL1_WR_STATUS__NEXT_WR_VECTOR_MASK 0x01C00000L
+#define SDMA3_UTCL1_WR_STATUS__MERGE_STATE_MASK 0x0E000000L
+#define SDMA3_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY_MASK 0x10000000L
+#define SDMA3_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL_MASK 0x20000000L
+#define SDMA3_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY_MASK 0x40000000L
+#define SDMA3_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL_MASK 0x80000000L
+//SDMA3_UTCL1_INV0
+#define SDMA3_UTCL1_INV0__INV_MIDDLE__SHIFT 0x0
+#define SDMA3_UTCL1_INV0__RD_TIMEOUT__SHIFT 0x1
+#define SDMA3_UTCL1_INV0__WR_TIMEOUT__SHIFT 0x2
+#define SDMA3_UTCL1_INV0__RD_IN_INVADR__SHIFT 0x3
+#define SDMA3_UTCL1_INV0__WR_IN_INVADR__SHIFT 0x4
+#define SDMA3_UTCL1_INV0__PAGE_NULL_SW__SHIFT 0x5
+#define SDMA3_UTCL1_INV0__XNACK_IS_INVADR__SHIFT 0x6
+#define SDMA3_UTCL1_INV0__INVREQ_ENABLE__SHIFT 0x7
+#define SDMA3_UTCL1_INV0__NACK_TIMEOUT_SW__SHIFT 0x8
+#define SDMA3_UTCL1_INV0__NFLUSH_INV_IDLE__SHIFT 0x9
+#define SDMA3_UTCL1_INV0__FLUSH_INV_IDLE__SHIFT 0xa
+#define SDMA3_UTCL1_INV0__INV_FLUSHTYPE__SHIFT 0xb
+#define SDMA3_UTCL1_INV0__INV_VMID_VEC__SHIFT 0xc
+#define SDMA3_UTCL1_INV0__INV_ADDR_HI__SHIFT 0x1c
+#define SDMA3_UTCL1_INV0__INV_MIDDLE_MASK 0x00000001L
+#define SDMA3_UTCL1_INV0__RD_TIMEOUT_MASK 0x00000002L
+#define SDMA3_UTCL1_INV0__WR_TIMEOUT_MASK 0x00000004L
+#define SDMA3_UTCL1_INV0__RD_IN_INVADR_MASK 0x00000008L
+#define SDMA3_UTCL1_INV0__WR_IN_INVADR_MASK 0x00000010L
+#define SDMA3_UTCL1_INV0__PAGE_NULL_SW_MASK 0x00000020L
+#define SDMA3_UTCL1_INV0__XNACK_IS_INVADR_MASK 0x00000040L
+#define SDMA3_UTCL1_INV0__INVREQ_ENABLE_MASK 0x00000080L
+#define SDMA3_UTCL1_INV0__NACK_TIMEOUT_SW_MASK 0x00000100L
+#define SDMA3_UTCL1_INV0__NFLUSH_INV_IDLE_MASK 0x00000200L
+#define SDMA3_UTCL1_INV0__FLUSH_INV_IDLE_MASK 0x00000400L
+#define SDMA3_UTCL1_INV0__INV_FLUSHTYPE_MASK 0x00000800L
+#define SDMA3_UTCL1_INV0__INV_VMID_VEC_MASK 0x0FFFF000L
+#define SDMA3_UTCL1_INV0__INV_ADDR_HI_MASK 0xF0000000L
+//SDMA3_UTCL1_INV1
+#define SDMA3_UTCL1_INV1__INV_ADDR_LO__SHIFT 0x0
+#define SDMA3_UTCL1_INV1__INV_ADDR_LO_MASK 0xFFFFFFFFL
+//SDMA3_UTCL1_INV2
+#define SDMA3_UTCL1_INV2__INV_NFLUSH_VMID_VEC__SHIFT 0x0
+#define SDMA3_UTCL1_INV2__INV_NFLUSH_VMID_VEC_MASK 0xFFFFFFFFL
+//SDMA3_UTCL1_RD_XNACK0
+#define SDMA3_UTCL1_RD_XNACK0__XNACK_ADDR_LO__SHIFT 0x0
+#define SDMA3_UTCL1_RD_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL
+//SDMA3_UTCL1_RD_XNACK1
+#define SDMA3_UTCL1_RD_XNACK1__XNACK_ADDR_HI__SHIFT 0x0
+#define SDMA3_UTCL1_RD_XNACK1__XNACK_VMID__SHIFT 0x4
+#define SDMA3_UTCL1_RD_XNACK1__XNACK_VECTOR__SHIFT 0x8
+#define SDMA3_UTCL1_RD_XNACK1__IS_XNACK__SHIFT 0x1a
+#define SDMA3_UTCL1_RD_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL
+#define SDMA3_UTCL1_RD_XNACK1__XNACK_VMID_MASK 0x000000F0L
+#define SDMA3_UTCL1_RD_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L
+#define SDMA3_UTCL1_RD_XNACK1__IS_XNACK_MASK 0x0C000000L
+//SDMA3_UTCL1_WR_XNACK0
+#define SDMA3_UTCL1_WR_XNACK0__XNACK_ADDR_LO__SHIFT 0x0
+#define SDMA3_UTCL1_WR_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL
+//SDMA3_UTCL1_WR_XNACK1
+#define SDMA3_UTCL1_WR_XNACK1__XNACK_ADDR_HI__SHIFT 0x0
+#define SDMA3_UTCL1_WR_XNACK1__XNACK_VMID__SHIFT 0x4
+#define SDMA3_UTCL1_WR_XNACK1__XNACK_VECTOR__SHIFT 0x8
+#define SDMA3_UTCL1_WR_XNACK1__IS_XNACK__SHIFT 0x1a
+#define SDMA3_UTCL1_WR_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL
+#define SDMA3_UTCL1_WR_XNACK1__XNACK_VMID_MASK 0x000000F0L
+#define SDMA3_UTCL1_WR_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L
+#define SDMA3_UTCL1_WR_XNACK1__IS_XNACK_MASK 0x0C000000L
+//SDMA3_UTCL1_TIMEOUT
+#define SDMA3_UTCL1_TIMEOUT__RD_XNACK_LIMIT__SHIFT 0x0
+#define SDMA3_UTCL1_TIMEOUT__WR_XNACK_LIMIT__SHIFT 0x10
+#define SDMA3_UTCL1_TIMEOUT__RD_XNACK_LIMIT_MASK 0x0000FFFFL
+#define SDMA3_UTCL1_TIMEOUT__WR_XNACK_LIMIT_MASK 0xFFFF0000L
+//SDMA3_UTCL1_PAGE
+#define SDMA3_UTCL1_PAGE__VM_HOLE__SHIFT 0x0
+#define SDMA3_UTCL1_PAGE__REQ_TYPE__SHIFT 0x1
+#define SDMA3_UTCL1_PAGE__USE_MTYPE__SHIFT 0x6
+#define SDMA3_UTCL1_PAGE__USE_PT_SNOOP__SHIFT 0x9
+#define SDMA3_UTCL1_PAGE__VM_HOLE_MASK 0x00000001L
+#define SDMA3_UTCL1_PAGE__REQ_TYPE_MASK 0x0000001EL
+#define SDMA3_UTCL1_PAGE__USE_MTYPE_MASK 0x000001C0L
+#define SDMA3_UTCL1_PAGE__USE_PT_SNOOP_MASK 0x00000200L
+//SDMA3_POWER_CNTL_IDLE
+#define SDMA3_POWER_CNTL_IDLE__DELAY0__SHIFT 0x0
+#define SDMA3_POWER_CNTL_IDLE__DELAY1__SHIFT 0x10
+#define SDMA3_POWER_CNTL_IDLE__DELAY2__SHIFT 0x18
+#define SDMA3_POWER_CNTL_IDLE__DELAY0_MASK 0x0000FFFFL
+#define SDMA3_POWER_CNTL_IDLE__DELAY1_MASK 0x00FF0000L
+#define SDMA3_POWER_CNTL_IDLE__DELAY2_MASK 0xFF000000L
+//SDMA3_RELAX_ORDERING_LUT
+#define SDMA3_RELAX_ORDERING_LUT__RESERVED0__SHIFT 0x0
+#define SDMA3_RELAX_ORDERING_LUT__COPY__SHIFT 0x1
+#define SDMA3_RELAX_ORDERING_LUT__WRITE__SHIFT 0x2
+#define SDMA3_RELAX_ORDERING_LUT__RESERVED3__SHIFT 0x3
+#define SDMA3_RELAX_ORDERING_LUT__RESERVED4__SHIFT 0x4
+#define SDMA3_RELAX_ORDERING_LUT__FENCE__SHIFT 0x5
+#define SDMA3_RELAX_ORDERING_LUT__RESERVED76__SHIFT 0x6
+#define SDMA3_RELAX_ORDERING_LUT__POLL_MEM__SHIFT 0x8
+#define SDMA3_RELAX_ORDERING_LUT__COND_EXE__SHIFT 0x9
+#define SDMA3_RELAX_ORDERING_LUT__ATOMIC__SHIFT 0xa
+#define SDMA3_RELAX_ORDERING_LUT__CONST_FILL__SHIFT 0xb
+#define SDMA3_RELAX_ORDERING_LUT__PTEPDE__SHIFT 0xc
+#define SDMA3_RELAX_ORDERING_LUT__TIMESTAMP__SHIFT 0xd
+#define SDMA3_RELAX_ORDERING_LUT__RESERVED__SHIFT 0xe
+#define SDMA3_RELAX_ORDERING_LUT__WORLD_SWITCH__SHIFT 0x1b
+#define SDMA3_RELAX_ORDERING_LUT__RPTR_WRB__SHIFT 0x1c
+#define SDMA3_RELAX_ORDERING_LUT__WPTR_POLL__SHIFT 0x1d
+#define SDMA3_RELAX_ORDERING_LUT__IB_FETCH__SHIFT 0x1e
+#define SDMA3_RELAX_ORDERING_LUT__RB_FETCH__SHIFT 0x1f
+#define SDMA3_RELAX_ORDERING_LUT__RESERVED0_MASK 0x00000001L
+#define SDMA3_RELAX_ORDERING_LUT__COPY_MASK 0x00000002L
+#define SDMA3_RELAX_ORDERING_LUT__WRITE_MASK 0x00000004L
+#define SDMA3_RELAX_ORDERING_LUT__RESERVED3_MASK 0x00000008L
+#define SDMA3_RELAX_ORDERING_LUT__RESERVED4_MASK 0x00000010L
+#define SDMA3_RELAX_ORDERING_LUT__FENCE_MASK 0x00000020L
+#define SDMA3_RELAX_ORDERING_LUT__RESERVED76_MASK 0x000000C0L
+#define SDMA3_RELAX_ORDERING_LUT__POLL_MEM_MASK 0x00000100L
+#define SDMA3_RELAX_ORDERING_LUT__COND_EXE_MASK 0x00000200L
+#define SDMA3_RELAX_ORDERING_LUT__ATOMIC_MASK 0x00000400L
+#define SDMA3_RELAX_ORDERING_LUT__CONST_FILL_MASK 0x00000800L
+#define SDMA3_RELAX_ORDERING_LUT__PTEPDE_MASK 0x00001000L
+#define SDMA3_RELAX_ORDERING_LUT__TIMESTAMP_MASK 0x00002000L
+#define SDMA3_RELAX_ORDERING_LUT__RESERVED_MASK 0x07FFC000L
+#define SDMA3_RELAX_ORDERING_LUT__WORLD_SWITCH_MASK 0x08000000L
+#define SDMA3_RELAX_ORDERING_LUT__RPTR_WRB_MASK 0x10000000L
+#define SDMA3_RELAX_ORDERING_LUT__WPTR_POLL_MASK 0x20000000L
+#define SDMA3_RELAX_ORDERING_LUT__IB_FETCH_MASK 0x40000000L
+#define SDMA3_RELAX_ORDERING_LUT__RB_FETCH_MASK 0x80000000L
+//SDMA3_CHICKEN_BITS_2
+#define SDMA3_CHICKEN_BITS_2__F32_CMD_PROC_DELAY__SHIFT 0x0
+#define SDMA3_CHICKEN_BITS_2__F32_CMD_PROC_DELAY_MASK 0x0000000FL
+//SDMA3_STATUS3_REG
+#define SDMA3_STATUS3_REG__CMD_OP_STATUS__SHIFT 0x0
+#define SDMA3_STATUS3_REG__PREV_VM_CMD__SHIFT 0x10
+#define SDMA3_STATUS3_REG__EXCEPTION_IDLE__SHIFT 0x14
+#define SDMA3_STATUS3_REG__QUEUE_ID_MATCH__SHIFT 0x15
+#define SDMA3_STATUS3_REG__INT_QUEUE_ID__SHIFT 0x16
+#define SDMA3_STATUS3_REG__CMD_OP_STATUS_MASK 0x0000FFFFL
+#define SDMA3_STATUS3_REG__PREV_VM_CMD_MASK 0x000F0000L
+#define SDMA3_STATUS3_REG__EXCEPTION_IDLE_MASK 0x00100000L
+#define SDMA3_STATUS3_REG__QUEUE_ID_MATCH_MASK 0x00200000L
+#define SDMA3_STATUS3_REG__INT_QUEUE_ID_MASK 0x03C00000L
+//SDMA3_PHYSICAL_ADDR_LO
+#define SDMA3_PHYSICAL_ADDR_LO__D_VALID__SHIFT 0x0
+#define SDMA3_PHYSICAL_ADDR_LO__DIRTY__SHIFT 0x1
+#define SDMA3_PHYSICAL_ADDR_LO__PHY_VALID__SHIFT 0x2
+#define SDMA3_PHYSICAL_ADDR_LO__ADDR__SHIFT 0xc
+#define SDMA3_PHYSICAL_ADDR_LO__D_VALID_MASK 0x00000001L
+#define SDMA3_PHYSICAL_ADDR_LO__DIRTY_MASK 0x00000002L
+#define SDMA3_PHYSICAL_ADDR_LO__PHY_VALID_MASK 0x00000004L
+#define SDMA3_PHYSICAL_ADDR_LO__ADDR_MASK 0xFFFFF000L
+//SDMA3_PHYSICAL_ADDR_HI
+#define SDMA3_PHYSICAL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA3_PHYSICAL_ADDR_HI__ADDR_MASK 0x0000FFFFL
+//SDMA3_PHASE2_QUANTUM
+#define SDMA3_PHASE2_QUANTUM__UNIT__SHIFT 0x0
+#define SDMA3_PHASE2_QUANTUM__VALUE__SHIFT 0x8
+#define SDMA3_PHASE2_QUANTUM__PREFER__SHIFT 0x1e
+#define SDMA3_PHASE2_QUANTUM__UNIT_MASK 0x0000000FL
+#define SDMA3_PHASE2_QUANTUM__VALUE_MASK 0x00FFFF00L
+#define SDMA3_PHASE2_QUANTUM__PREFER_MASK 0x40000000L
+//SDMA3_ERROR_LOG
+#define SDMA3_ERROR_LOG__OVERRIDE__SHIFT 0x0
+#define SDMA3_ERROR_LOG__STATUS__SHIFT 0x10
+#define SDMA3_ERROR_LOG__OVERRIDE_MASK 0x0000FFFFL
+#define SDMA3_ERROR_LOG__STATUS_MASK 0xFFFF0000L
+//SDMA3_PUB_DUMMY_REG0
+#define SDMA3_PUB_DUMMY_REG0__VALUE__SHIFT 0x0
+#define SDMA3_PUB_DUMMY_REG0__VALUE_MASK 0xFFFFFFFFL
+//SDMA3_PUB_DUMMY_REG1
+#define SDMA3_PUB_DUMMY_REG1__VALUE__SHIFT 0x0
+#define SDMA3_PUB_DUMMY_REG1__VALUE_MASK 0xFFFFFFFFL
+//SDMA3_PUB_DUMMY_REG2
+#define SDMA3_PUB_DUMMY_REG2__VALUE__SHIFT 0x0
+#define SDMA3_PUB_DUMMY_REG2__VALUE_MASK 0xFFFFFFFFL
+//SDMA3_PUB_DUMMY_REG3
+#define SDMA3_PUB_DUMMY_REG3__VALUE__SHIFT 0x0
+#define SDMA3_PUB_DUMMY_REG3__VALUE_MASK 0xFFFFFFFFL
+//SDMA3_F32_COUNTER
+#define SDMA3_F32_COUNTER__VALUE__SHIFT 0x0
+#define SDMA3_F32_COUNTER__VALUE_MASK 0xFFFFFFFFL
+//SDMA3_UNBREAKABLE
+#define SDMA3_UNBREAKABLE__VALUE__SHIFT 0x0
+#define SDMA3_UNBREAKABLE__VALUE_MASK 0x00000001L
+//SDMA3_PERFMON_CNTL
+#define SDMA3_PERFMON_CNTL__PERF_ENABLE0__SHIFT 0x0
+#define SDMA3_PERFMON_CNTL__PERF_CLEAR0__SHIFT 0x1
+#define SDMA3_PERFMON_CNTL__PERF_SEL0__SHIFT 0x2
+#define SDMA3_PERFMON_CNTL__PERF_ENABLE1__SHIFT 0xa
+#define SDMA3_PERFMON_CNTL__PERF_CLEAR1__SHIFT 0xb
+#define SDMA3_PERFMON_CNTL__PERF_SEL1__SHIFT 0xc
+#define SDMA3_PERFMON_CNTL__PERF_ENABLE0_MASK 0x00000001L
+#define SDMA3_PERFMON_CNTL__PERF_CLEAR0_MASK 0x00000002L
+#define SDMA3_PERFMON_CNTL__PERF_SEL0_MASK 0x000003FCL
+#define SDMA3_PERFMON_CNTL__PERF_ENABLE1_MASK 0x00000400L
+#define SDMA3_PERFMON_CNTL__PERF_CLEAR1_MASK 0x00000800L
+#define SDMA3_PERFMON_CNTL__PERF_SEL1_MASK 0x000FF000L
+//SDMA3_PERFCOUNTER0_RESULT
+#define SDMA3_PERFCOUNTER0_RESULT__PERF_COUNT__SHIFT 0x0
+#define SDMA3_PERFCOUNTER0_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL
+//SDMA3_PERFCOUNTER1_RESULT
+#define SDMA3_PERFCOUNTER1_RESULT__PERF_COUNT__SHIFT 0x0
+#define SDMA3_PERFCOUNTER1_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL
+//SDMA3_PERFCOUNTER_TAG_DELAY_RANGE
+#define SDMA3_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_LOW__SHIFT 0x0
+#define SDMA3_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_HIGH__SHIFT 0xe
+#define SDMA3_PERFCOUNTER_TAG_DELAY_RANGE__SELECT_RW__SHIFT 0x1c
+#define SDMA3_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_LOW_MASK 0x00003FFFL
+#define SDMA3_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_HIGH_MASK 0x0FFFC000L
+#define SDMA3_PERFCOUNTER_TAG_DELAY_RANGE__SELECT_RW_MASK 0x10000000L
+//SDMA3_CRD_CNTL
+#define SDMA3_CRD_CNTL__MC_WRREQ_CREDIT__SHIFT 0x7
+#define SDMA3_CRD_CNTL__MC_RDREQ_CREDIT__SHIFT 0xd
+#define SDMA3_CRD_CNTL__MC_WRREQ_CREDIT_MASK 0x00001F80L
+#define SDMA3_CRD_CNTL__MC_RDREQ_CREDIT_MASK 0x0007E000L
+//SDMA3_GPU_IOV_VIOLATION_LOG
+#define SDMA3_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0
+#define SDMA3_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS__SHIFT 0x1
+#define SDMA3_GPU_IOV_VIOLATION_LOG__ADDRESS__SHIFT 0x2
+#define SDMA3_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION__SHIFT 0x14
+#define SDMA3_GPU_IOV_VIOLATION_LOG__VF__SHIFT 0x15
+#define SDMA3_GPU_IOV_VIOLATION_LOG__VFID__SHIFT 0x16
+#define SDMA3_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L
+#define SDMA3_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS_MASK 0x00000002L
+#define SDMA3_GPU_IOV_VIOLATION_LOG__ADDRESS_MASK 0x000FFFFCL
+#define SDMA3_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION_MASK 0x00100000L
+#define SDMA3_GPU_IOV_VIOLATION_LOG__VF_MASK 0x00200000L
+#define SDMA3_GPU_IOV_VIOLATION_LOG__VFID_MASK 0x03C00000L
+//SDMA3_ULV_CNTL
+#define SDMA3_ULV_CNTL__HYSTERESIS__SHIFT 0x0
+#define SDMA3_ULV_CNTL__ENTER_ULV_INT_CLR__SHIFT 0x1b
+#define SDMA3_ULV_CNTL__EXIT_ULV_INT_CLR__SHIFT 0x1c
+#define SDMA3_ULV_CNTL__ENTER_ULV_INT__SHIFT 0x1d
+#define SDMA3_ULV_CNTL__EXIT_ULV_INT__SHIFT 0x1e
+#define SDMA3_ULV_CNTL__ULV_STATUS__SHIFT 0x1f
+#define SDMA3_ULV_CNTL__HYSTERESIS_MASK 0x0000001FL
+#define SDMA3_ULV_CNTL__ENTER_ULV_INT_CLR_MASK 0x08000000L
+#define SDMA3_ULV_CNTL__EXIT_ULV_INT_CLR_MASK 0x10000000L
+#define SDMA3_ULV_CNTL__ENTER_ULV_INT_MASK 0x20000000L
+#define SDMA3_ULV_CNTL__EXIT_ULV_INT_MASK 0x40000000L
+#define SDMA3_ULV_CNTL__ULV_STATUS_MASK 0x80000000L
+//SDMA3_EA_DBIT_ADDR_DATA
+#define SDMA3_EA_DBIT_ADDR_DATA__VALUE__SHIFT 0x0
+#define SDMA3_EA_DBIT_ADDR_DATA__VALUE_MASK 0xFFFFFFFFL
+//SDMA3_EA_DBIT_ADDR_INDEX
+#define SDMA3_EA_DBIT_ADDR_INDEX__VALUE__SHIFT 0x0
+#define SDMA3_EA_DBIT_ADDR_INDEX__VALUE_MASK 0x00000007L
+//SDMA3_GPU_IOV_VIOLATION_LOG2
+#define SDMA3_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID__SHIFT 0x0
+#define SDMA3_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID_MASK 0x000000FFL
+//SDMA3_GFX_RB_CNTL
+#define SDMA3_GFX_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA3_GFX_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA3_GFX_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA3_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA3_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA3_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA3_GFX_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA3_GFX_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA3_GFX_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA3_GFX_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA3_GFX_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA3_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA3_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA3_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA3_GFX_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA3_GFX_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA3_GFX_RB_BASE
+#define SDMA3_GFX_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA3_GFX_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA3_GFX_RB_BASE_HI
+#define SDMA3_GFX_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA3_GFX_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA3_GFX_RB_RPTR
+#define SDMA3_GFX_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA3_GFX_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA3_GFX_RB_RPTR_HI
+#define SDMA3_GFX_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA3_GFX_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA3_GFX_RB_WPTR
+#define SDMA3_GFX_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA3_GFX_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA3_GFX_RB_WPTR_HI
+#define SDMA3_GFX_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA3_GFX_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA3_GFX_RB_WPTR_POLL_CNTL
+#define SDMA3_GFX_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA3_GFX_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA3_GFX_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA3_GFX_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA3_GFX_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA3_GFX_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA3_GFX_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA3_GFX_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA3_GFX_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA3_GFX_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA3_GFX_RB_RPTR_ADDR_HI
+#define SDMA3_GFX_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA3_GFX_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA3_GFX_RB_RPTR_ADDR_LO
+#define SDMA3_GFX_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA3_GFX_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA3_GFX_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA3_GFX_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA3_GFX_IB_CNTL
+#define SDMA3_GFX_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA3_GFX_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA3_GFX_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA3_GFX_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA3_GFX_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA3_GFX_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA3_GFX_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA3_GFX_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA3_GFX_IB_RPTR
+#define SDMA3_GFX_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA3_GFX_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA3_GFX_IB_OFFSET
+#define SDMA3_GFX_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA3_GFX_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA3_GFX_IB_BASE_LO
+#define SDMA3_GFX_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA3_GFX_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA3_GFX_IB_BASE_HI
+#define SDMA3_GFX_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA3_GFX_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA3_GFX_IB_SIZE
+#define SDMA3_GFX_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA3_GFX_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA3_GFX_SKIP_CNTL
+#define SDMA3_GFX_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA3_GFX_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA3_GFX_CONTEXT_STATUS
+#define SDMA3_GFX_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA3_GFX_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA3_GFX_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA3_GFX_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA3_GFX_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA3_GFX_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA3_GFX_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA3_GFX_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA3_GFX_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA3_GFX_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA3_GFX_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA3_GFX_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA3_GFX_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA3_GFX_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA3_GFX_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA3_GFX_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA3_GFX_DOORBELL
+#define SDMA3_GFX_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA3_GFX_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA3_GFX_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA3_GFX_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA3_GFX_CONTEXT_CNTL
+#define SDMA3_GFX_CONTEXT_CNTL__RESUME_CTX__SHIFT 0x10
+#define SDMA3_GFX_CONTEXT_CNTL__RESUME_CTX_MASK 0x00010000L
+//SDMA3_GFX_STATUS
+#define SDMA3_GFX_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA3_GFX_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA3_GFX_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA3_GFX_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA3_GFX_DOORBELL_LOG
+#define SDMA3_GFX_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA3_GFX_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA3_GFX_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA3_GFX_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA3_GFX_WATERMARK
+#define SDMA3_GFX_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA3_GFX_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA3_GFX_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA3_GFX_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA3_GFX_DOORBELL_OFFSET
+#define SDMA3_GFX_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA3_GFX_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA3_GFX_CSA_ADDR_LO
+#define SDMA3_GFX_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA3_GFX_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA3_GFX_CSA_ADDR_HI
+#define SDMA3_GFX_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA3_GFX_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA3_GFX_IB_SUB_REMAIN
+#define SDMA3_GFX_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA3_GFX_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA3_GFX_PREEMPT
+#define SDMA3_GFX_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA3_GFX_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA3_GFX_DUMMY_REG
+#define SDMA3_GFX_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA3_GFX_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA3_GFX_RB_WPTR_POLL_ADDR_HI
+#define SDMA3_GFX_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA3_GFX_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA3_GFX_RB_WPTR_POLL_ADDR_LO
+#define SDMA3_GFX_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA3_GFX_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA3_GFX_RB_AQL_CNTL
+#define SDMA3_GFX_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA3_GFX_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA3_GFX_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA3_GFX_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA3_GFX_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA3_GFX_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA3_GFX_MINOR_PTR_UPDATE
+#define SDMA3_GFX_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA3_GFX_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA3_GFX_MIDCMD_DATA0
+#define SDMA3_GFX_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA3_GFX_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA3_GFX_MIDCMD_DATA1
+#define SDMA3_GFX_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA3_GFX_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA3_GFX_MIDCMD_DATA2
+#define SDMA3_GFX_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA3_GFX_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA3_GFX_MIDCMD_DATA3
+#define SDMA3_GFX_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA3_GFX_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA3_GFX_MIDCMD_DATA4
+#define SDMA3_GFX_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA3_GFX_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA3_GFX_MIDCMD_DATA5
+#define SDMA3_GFX_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA3_GFX_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA3_GFX_MIDCMD_DATA6
+#define SDMA3_GFX_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA3_GFX_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA3_GFX_MIDCMD_DATA7
+#define SDMA3_GFX_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA3_GFX_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA3_GFX_MIDCMD_DATA8
+#define SDMA3_GFX_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA3_GFX_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA3_GFX_MIDCMD_CNTL
+#define SDMA3_GFX_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA3_GFX_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA3_GFX_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA3_GFX_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA3_GFX_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA3_GFX_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA3_GFX_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA3_GFX_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA3_PAGE_RB_CNTL
+#define SDMA3_PAGE_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA3_PAGE_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA3_PAGE_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA3_PAGE_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA3_PAGE_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA3_PAGE_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA3_PAGE_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA3_PAGE_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA3_PAGE_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA3_PAGE_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA3_PAGE_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA3_PAGE_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA3_PAGE_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA3_PAGE_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA3_PAGE_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA3_PAGE_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA3_PAGE_RB_BASE
+#define SDMA3_PAGE_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA3_PAGE_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA3_PAGE_RB_BASE_HI
+#define SDMA3_PAGE_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA3_PAGE_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA3_PAGE_RB_RPTR
+#define SDMA3_PAGE_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA3_PAGE_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA3_PAGE_RB_RPTR_HI
+#define SDMA3_PAGE_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA3_PAGE_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA3_PAGE_RB_WPTR
+#define SDMA3_PAGE_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA3_PAGE_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA3_PAGE_RB_WPTR_HI
+#define SDMA3_PAGE_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA3_PAGE_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA3_PAGE_RB_WPTR_POLL_CNTL
+#define SDMA3_PAGE_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA3_PAGE_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA3_PAGE_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA3_PAGE_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA3_PAGE_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA3_PAGE_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA3_PAGE_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA3_PAGE_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA3_PAGE_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA3_PAGE_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA3_PAGE_RB_RPTR_ADDR_HI
+#define SDMA3_PAGE_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA3_PAGE_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA3_PAGE_RB_RPTR_ADDR_LO
+#define SDMA3_PAGE_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA3_PAGE_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA3_PAGE_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA3_PAGE_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA3_PAGE_IB_CNTL
+#define SDMA3_PAGE_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA3_PAGE_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA3_PAGE_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA3_PAGE_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA3_PAGE_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA3_PAGE_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA3_PAGE_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA3_PAGE_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA3_PAGE_IB_RPTR
+#define SDMA3_PAGE_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA3_PAGE_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA3_PAGE_IB_OFFSET
+#define SDMA3_PAGE_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA3_PAGE_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA3_PAGE_IB_BASE_LO
+#define SDMA3_PAGE_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA3_PAGE_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA3_PAGE_IB_BASE_HI
+#define SDMA3_PAGE_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA3_PAGE_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA3_PAGE_IB_SIZE
+#define SDMA3_PAGE_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA3_PAGE_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA3_PAGE_SKIP_CNTL
+#define SDMA3_PAGE_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA3_PAGE_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA3_PAGE_CONTEXT_STATUS
+#define SDMA3_PAGE_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA3_PAGE_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA3_PAGE_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA3_PAGE_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA3_PAGE_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA3_PAGE_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA3_PAGE_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA3_PAGE_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA3_PAGE_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA3_PAGE_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA3_PAGE_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA3_PAGE_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA3_PAGE_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA3_PAGE_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA3_PAGE_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA3_PAGE_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA3_PAGE_DOORBELL
+#define SDMA3_PAGE_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA3_PAGE_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA3_PAGE_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA3_PAGE_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA3_PAGE_STATUS
+#define SDMA3_PAGE_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA3_PAGE_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA3_PAGE_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA3_PAGE_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA3_PAGE_DOORBELL_LOG
+#define SDMA3_PAGE_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA3_PAGE_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA3_PAGE_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA3_PAGE_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA3_PAGE_WATERMARK
+#define SDMA3_PAGE_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA3_PAGE_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA3_PAGE_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA3_PAGE_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA3_PAGE_DOORBELL_OFFSET
+#define SDMA3_PAGE_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA3_PAGE_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA3_PAGE_CSA_ADDR_LO
+#define SDMA3_PAGE_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA3_PAGE_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA3_PAGE_CSA_ADDR_HI
+#define SDMA3_PAGE_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA3_PAGE_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA3_PAGE_IB_SUB_REMAIN
+#define SDMA3_PAGE_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA3_PAGE_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA3_PAGE_PREEMPT
+#define SDMA3_PAGE_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA3_PAGE_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA3_PAGE_DUMMY_REG
+#define SDMA3_PAGE_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA3_PAGE_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA3_PAGE_RB_WPTR_POLL_ADDR_HI
+#define SDMA3_PAGE_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA3_PAGE_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA3_PAGE_RB_WPTR_POLL_ADDR_LO
+#define SDMA3_PAGE_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA3_PAGE_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA3_PAGE_RB_AQL_CNTL
+#define SDMA3_PAGE_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA3_PAGE_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA3_PAGE_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA3_PAGE_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA3_PAGE_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA3_PAGE_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA3_PAGE_MINOR_PTR_UPDATE
+#define SDMA3_PAGE_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA3_PAGE_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA3_PAGE_MIDCMD_DATA0
+#define SDMA3_PAGE_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA3_PAGE_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA3_PAGE_MIDCMD_DATA1
+#define SDMA3_PAGE_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA3_PAGE_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA3_PAGE_MIDCMD_DATA2
+#define SDMA3_PAGE_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA3_PAGE_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA3_PAGE_MIDCMD_DATA3
+#define SDMA3_PAGE_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA3_PAGE_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA3_PAGE_MIDCMD_DATA4
+#define SDMA3_PAGE_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA3_PAGE_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA3_PAGE_MIDCMD_DATA5
+#define SDMA3_PAGE_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA3_PAGE_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA3_PAGE_MIDCMD_DATA6
+#define SDMA3_PAGE_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA3_PAGE_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA3_PAGE_MIDCMD_DATA7
+#define SDMA3_PAGE_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA3_PAGE_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA3_PAGE_MIDCMD_DATA8
+#define SDMA3_PAGE_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA3_PAGE_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA3_PAGE_MIDCMD_CNTL
+#define SDMA3_PAGE_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA3_PAGE_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA3_PAGE_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA3_PAGE_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA3_PAGE_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA3_PAGE_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA3_PAGE_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA3_PAGE_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA3_RLC0_RB_CNTL
+#define SDMA3_RLC0_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA3_RLC0_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA3_RLC0_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA3_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA3_RLC0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA3_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA3_RLC0_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA3_RLC0_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA3_RLC0_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA3_RLC0_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA3_RLC0_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA3_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA3_RLC0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA3_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA3_RLC0_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA3_RLC0_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA3_RLC0_RB_BASE
+#define SDMA3_RLC0_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA3_RLC0_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA3_RLC0_RB_BASE_HI
+#define SDMA3_RLC0_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA3_RLC0_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA3_RLC0_RB_RPTR
+#define SDMA3_RLC0_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA3_RLC0_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA3_RLC0_RB_RPTR_HI
+#define SDMA3_RLC0_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA3_RLC0_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA3_RLC0_RB_WPTR
+#define SDMA3_RLC0_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA3_RLC0_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA3_RLC0_RB_WPTR_HI
+#define SDMA3_RLC0_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA3_RLC0_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA3_RLC0_RB_WPTR_POLL_CNTL
+#define SDMA3_RLC0_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA3_RLC0_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA3_RLC0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA3_RLC0_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA3_RLC0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA3_RLC0_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA3_RLC0_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA3_RLC0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA3_RLC0_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA3_RLC0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA3_RLC0_RB_RPTR_ADDR_HI
+#define SDMA3_RLC0_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA3_RLC0_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA3_RLC0_RB_RPTR_ADDR_LO
+#define SDMA3_RLC0_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA3_RLC0_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA3_RLC0_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA3_RLC0_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA3_RLC0_IB_CNTL
+#define SDMA3_RLC0_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA3_RLC0_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA3_RLC0_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA3_RLC0_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA3_RLC0_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA3_RLC0_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA3_RLC0_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA3_RLC0_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA3_RLC0_IB_RPTR
+#define SDMA3_RLC0_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA3_RLC0_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA3_RLC0_IB_OFFSET
+#define SDMA3_RLC0_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA3_RLC0_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA3_RLC0_IB_BASE_LO
+#define SDMA3_RLC0_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA3_RLC0_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA3_RLC0_IB_BASE_HI
+#define SDMA3_RLC0_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA3_RLC0_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA3_RLC0_IB_SIZE
+#define SDMA3_RLC0_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA3_RLC0_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA3_RLC0_SKIP_CNTL
+#define SDMA3_RLC0_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA3_RLC0_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA3_RLC0_CONTEXT_STATUS
+#define SDMA3_RLC0_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA3_RLC0_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA3_RLC0_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA3_RLC0_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA3_RLC0_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA3_RLC0_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA3_RLC0_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA3_RLC0_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA3_RLC0_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA3_RLC0_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA3_RLC0_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA3_RLC0_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA3_RLC0_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA3_RLC0_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA3_RLC0_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA3_RLC0_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA3_RLC0_DOORBELL
+#define SDMA3_RLC0_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA3_RLC0_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA3_RLC0_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA3_RLC0_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA3_RLC0_STATUS
+#define SDMA3_RLC0_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA3_RLC0_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA3_RLC0_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA3_RLC0_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA3_RLC0_DOORBELL_LOG
+#define SDMA3_RLC0_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA3_RLC0_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA3_RLC0_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA3_RLC0_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA3_RLC0_WATERMARK
+#define SDMA3_RLC0_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA3_RLC0_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA3_RLC0_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA3_RLC0_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA3_RLC0_DOORBELL_OFFSET
+#define SDMA3_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA3_RLC0_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA3_RLC0_CSA_ADDR_LO
+#define SDMA3_RLC0_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA3_RLC0_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA3_RLC0_CSA_ADDR_HI
+#define SDMA3_RLC0_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA3_RLC0_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA3_RLC0_IB_SUB_REMAIN
+#define SDMA3_RLC0_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA3_RLC0_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA3_RLC0_PREEMPT
+#define SDMA3_RLC0_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA3_RLC0_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA3_RLC0_DUMMY_REG
+#define SDMA3_RLC0_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA3_RLC0_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA3_RLC0_RB_WPTR_POLL_ADDR_HI
+#define SDMA3_RLC0_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA3_RLC0_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA3_RLC0_RB_WPTR_POLL_ADDR_LO
+#define SDMA3_RLC0_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA3_RLC0_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA3_RLC0_RB_AQL_CNTL
+#define SDMA3_RLC0_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA3_RLC0_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA3_RLC0_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA3_RLC0_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA3_RLC0_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA3_RLC0_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA3_RLC0_MINOR_PTR_UPDATE
+#define SDMA3_RLC0_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA3_RLC0_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA3_RLC0_MIDCMD_DATA0
+#define SDMA3_RLC0_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA3_RLC0_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA3_RLC0_MIDCMD_DATA1
+#define SDMA3_RLC0_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA3_RLC0_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA3_RLC0_MIDCMD_DATA2
+#define SDMA3_RLC0_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA3_RLC0_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA3_RLC0_MIDCMD_DATA3
+#define SDMA3_RLC0_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA3_RLC0_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA3_RLC0_MIDCMD_DATA4
+#define SDMA3_RLC0_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA3_RLC0_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA3_RLC0_MIDCMD_DATA5
+#define SDMA3_RLC0_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA3_RLC0_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA3_RLC0_MIDCMD_DATA6
+#define SDMA3_RLC0_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA3_RLC0_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA3_RLC0_MIDCMD_DATA7
+#define SDMA3_RLC0_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA3_RLC0_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA3_RLC0_MIDCMD_DATA8
+#define SDMA3_RLC0_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA3_RLC0_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA3_RLC0_MIDCMD_CNTL
+#define SDMA3_RLC0_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA3_RLC0_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA3_RLC0_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA3_RLC0_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA3_RLC0_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA3_RLC0_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA3_RLC0_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA3_RLC0_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA3_RLC1_RB_CNTL
+#define SDMA3_RLC1_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA3_RLC1_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA3_RLC1_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA3_RLC1_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA3_RLC1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA3_RLC1_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA3_RLC1_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA3_RLC1_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA3_RLC1_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA3_RLC1_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA3_RLC1_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA3_RLC1_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA3_RLC1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA3_RLC1_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA3_RLC1_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA3_RLC1_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA3_RLC1_RB_BASE
+#define SDMA3_RLC1_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA3_RLC1_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA3_RLC1_RB_BASE_HI
+#define SDMA3_RLC1_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA3_RLC1_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA3_RLC1_RB_RPTR
+#define SDMA3_RLC1_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA3_RLC1_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA3_RLC1_RB_RPTR_HI
+#define SDMA3_RLC1_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA3_RLC1_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA3_RLC1_RB_WPTR
+#define SDMA3_RLC1_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA3_RLC1_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA3_RLC1_RB_WPTR_HI
+#define SDMA3_RLC1_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA3_RLC1_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA3_RLC1_RB_WPTR_POLL_CNTL
+#define SDMA3_RLC1_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA3_RLC1_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA3_RLC1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA3_RLC1_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA3_RLC1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA3_RLC1_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA3_RLC1_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA3_RLC1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA3_RLC1_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA3_RLC1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA3_RLC1_RB_RPTR_ADDR_HI
+#define SDMA3_RLC1_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA3_RLC1_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA3_RLC1_RB_RPTR_ADDR_LO
+#define SDMA3_RLC1_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA3_RLC1_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA3_RLC1_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA3_RLC1_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA3_RLC1_IB_CNTL
+#define SDMA3_RLC1_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA3_RLC1_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA3_RLC1_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA3_RLC1_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA3_RLC1_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA3_RLC1_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA3_RLC1_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA3_RLC1_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA3_RLC1_IB_RPTR
+#define SDMA3_RLC1_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA3_RLC1_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA3_RLC1_IB_OFFSET
+#define SDMA3_RLC1_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA3_RLC1_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA3_RLC1_IB_BASE_LO
+#define SDMA3_RLC1_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA3_RLC1_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA3_RLC1_IB_BASE_HI
+#define SDMA3_RLC1_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA3_RLC1_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA3_RLC1_IB_SIZE
+#define SDMA3_RLC1_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA3_RLC1_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA3_RLC1_SKIP_CNTL
+#define SDMA3_RLC1_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA3_RLC1_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA3_RLC1_CONTEXT_STATUS
+#define SDMA3_RLC1_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA3_RLC1_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA3_RLC1_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA3_RLC1_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA3_RLC1_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA3_RLC1_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA3_RLC1_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA3_RLC1_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA3_RLC1_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA3_RLC1_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA3_RLC1_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA3_RLC1_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA3_RLC1_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA3_RLC1_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA3_RLC1_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA3_RLC1_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA3_RLC1_DOORBELL
+#define SDMA3_RLC1_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA3_RLC1_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA3_RLC1_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA3_RLC1_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA3_RLC1_STATUS
+#define SDMA3_RLC1_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA3_RLC1_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA3_RLC1_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA3_RLC1_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA3_RLC1_DOORBELL_LOG
+#define SDMA3_RLC1_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA3_RLC1_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA3_RLC1_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA3_RLC1_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA3_RLC1_WATERMARK
+#define SDMA3_RLC1_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA3_RLC1_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA3_RLC1_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA3_RLC1_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA3_RLC1_DOORBELL_OFFSET
+#define SDMA3_RLC1_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA3_RLC1_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA3_RLC1_CSA_ADDR_LO
+#define SDMA3_RLC1_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA3_RLC1_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA3_RLC1_CSA_ADDR_HI
+#define SDMA3_RLC1_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA3_RLC1_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA3_RLC1_IB_SUB_REMAIN
+#define SDMA3_RLC1_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA3_RLC1_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA3_RLC1_PREEMPT
+#define SDMA3_RLC1_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA3_RLC1_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA3_RLC1_DUMMY_REG
+#define SDMA3_RLC1_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA3_RLC1_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA3_RLC1_RB_WPTR_POLL_ADDR_HI
+#define SDMA3_RLC1_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA3_RLC1_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA3_RLC1_RB_WPTR_POLL_ADDR_LO
+#define SDMA3_RLC1_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA3_RLC1_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA3_RLC1_RB_AQL_CNTL
+#define SDMA3_RLC1_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA3_RLC1_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA3_RLC1_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA3_RLC1_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA3_RLC1_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA3_RLC1_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA3_RLC1_MINOR_PTR_UPDATE
+#define SDMA3_RLC1_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA3_RLC1_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA3_RLC1_MIDCMD_DATA0
+#define SDMA3_RLC1_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA3_RLC1_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA3_RLC1_MIDCMD_DATA1
+#define SDMA3_RLC1_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA3_RLC1_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA3_RLC1_MIDCMD_DATA2
+#define SDMA3_RLC1_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA3_RLC1_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA3_RLC1_MIDCMD_DATA3
+#define SDMA3_RLC1_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA3_RLC1_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA3_RLC1_MIDCMD_DATA4
+#define SDMA3_RLC1_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA3_RLC1_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA3_RLC1_MIDCMD_DATA5
+#define SDMA3_RLC1_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA3_RLC1_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA3_RLC1_MIDCMD_DATA6
+#define SDMA3_RLC1_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA3_RLC1_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA3_RLC1_MIDCMD_DATA7
+#define SDMA3_RLC1_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA3_RLC1_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA3_RLC1_MIDCMD_DATA8
+#define SDMA3_RLC1_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA3_RLC1_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA3_RLC1_MIDCMD_CNTL
+#define SDMA3_RLC1_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA3_RLC1_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA3_RLC1_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA3_RLC1_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA3_RLC1_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA3_RLC1_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA3_RLC1_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA3_RLC1_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA3_RLC2_RB_CNTL
+#define SDMA3_RLC2_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA3_RLC2_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA3_RLC2_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA3_RLC2_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA3_RLC2_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA3_RLC2_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA3_RLC2_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA3_RLC2_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA3_RLC2_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA3_RLC2_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA3_RLC2_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA3_RLC2_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA3_RLC2_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA3_RLC2_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA3_RLC2_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA3_RLC2_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA3_RLC2_RB_BASE
+#define SDMA3_RLC2_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA3_RLC2_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA3_RLC2_RB_BASE_HI
+#define SDMA3_RLC2_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA3_RLC2_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA3_RLC2_RB_RPTR
+#define SDMA3_RLC2_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA3_RLC2_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA3_RLC2_RB_RPTR_HI
+#define SDMA3_RLC2_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA3_RLC2_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA3_RLC2_RB_WPTR
+#define SDMA3_RLC2_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA3_RLC2_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA3_RLC2_RB_WPTR_HI
+#define SDMA3_RLC2_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA3_RLC2_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA3_RLC2_RB_WPTR_POLL_CNTL
+#define SDMA3_RLC2_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA3_RLC2_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA3_RLC2_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA3_RLC2_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA3_RLC2_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA3_RLC2_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA3_RLC2_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA3_RLC2_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA3_RLC2_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA3_RLC2_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA3_RLC2_RB_RPTR_ADDR_HI
+#define SDMA3_RLC2_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA3_RLC2_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA3_RLC2_RB_RPTR_ADDR_LO
+#define SDMA3_RLC2_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA3_RLC2_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA3_RLC2_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA3_RLC2_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA3_RLC2_IB_CNTL
+#define SDMA3_RLC2_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA3_RLC2_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA3_RLC2_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA3_RLC2_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA3_RLC2_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA3_RLC2_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA3_RLC2_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA3_RLC2_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA3_RLC2_IB_RPTR
+#define SDMA3_RLC2_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA3_RLC2_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA3_RLC2_IB_OFFSET
+#define SDMA3_RLC2_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA3_RLC2_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA3_RLC2_IB_BASE_LO
+#define SDMA3_RLC2_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA3_RLC2_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA3_RLC2_IB_BASE_HI
+#define SDMA3_RLC2_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA3_RLC2_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA3_RLC2_IB_SIZE
+#define SDMA3_RLC2_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA3_RLC2_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA3_RLC2_SKIP_CNTL
+#define SDMA3_RLC2_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA3_RLC2_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA3_RLC2_CONTEXT_STATUS
+#define SDMA3_RLC2_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA3_RLC2_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA3_RLC2_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA3_RLC2_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA3_RLC2_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA3_RLC2_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA3_RLC2_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA3_RLC2_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA3_RLC2_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA3_RLC2_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA3_RLC2_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA3_RLC2_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA3_RLC2_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA3_RLC2_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA3_RLC2_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA3_RLC2_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA3_RLC2_DOORBELL
+#define SDMA3_RLC2_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA3_RLC2_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA3_RLC2_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA3_RLC2_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA3_RLC2_STATUS
+#define SDMA3_RLC2_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA3_RLC2_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA3_RLC2_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA3_RLC2_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA3_RLC2_DOORBELL_LOG
+#define SDMA3_RLC2_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA3_RLC2_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA3_RLC2_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA3_RLC2_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA3_RLC2_WATERMARK
+#define SDMA3_RLC2_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA3_RLC2_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA3_RLC2_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA3_RLC2_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA3_RLC2_DOORBELL_OFFSET
+#define SDMA3_RLC2_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA3_RLC2_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA3_RLC2_CSA_ADDR_LO
+#define SDMA3_RLC2_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA3_RLC2_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA3_RLC2_CSA_ADDR_HI
+#define SDMA3_RLC2_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA3_RLC2_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA3_RLC2_IB_SUB_REMAIN
+#define SDMA3_RLC2_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA3_RLC2_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA3_RLC2_PREEMPT
+#define SDMA3_RLC2_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA3_RLC2_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA3_RLC2_DUMMY_REG
+#define SDMA3_RLC2_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA3_RLC2_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA3_RLC2_RB_WPTR_POLL_ADDR_HI
+#define SDMA3_RLC2_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA3_RLC2_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA3_RLC2_RB_WPTR_POLL_ADDR_LO
+#define SDMA3_RLC2_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA3_RLC2_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA3_RLC2_RB_AQL_CNTL
+#define SDMA3_RLC2_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA3_RLC2_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA3_RLC2_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA3_RLC2_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA3_RLC2_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA3_RLC2_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA3_RLC2_MINOR_PTR_UPDATE
+#define SDMA3_RLC2_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA3_RLC2_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA3_RLC2_MIDCMD_DATA0
+#define SDMA3_RLC2_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA3_RLC2_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA3_RLC2_MIDCMD_DATA1
+#define SDMA3_RLC2_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA3_RLC2_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA3_RLC2_MIDCMD_DATA2
+#define SDMA3_RLC2_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA3_RLC2_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA3_RLC2_MIDCMD_DATA3
+#define SDMA3_RLC2_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA3_RLC2_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA3_RLC2_MIDCMD_DATA4
+#define SDMA3_RLC2_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA3_RLC2_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA3_RLC2_MIDCMD_DATA5
+#define SDMA3_RLC2_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA3_RLC2_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA3_RLC2_MIDCMD_DATA6
+#define SDMA3_RLC2_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA3_RLC2_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA3_RLC2_MIDCMD_DATA7
+#define SDMA3_RLC2_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA3_RLC2_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA3_RLC2_MIDCMD_DATA8
+#define SDMA3_RLC2_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA3_RLC2_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA3_RLC2_MIDCMD_CNTL
+#define SDMA3_RLC2_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA3_RLC2_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA3_RLC2_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA3_RLC2_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA3_RLC2_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA3_RLC2_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA3_RLC2_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA3_RLC2_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA3_RLC3_RB_CNTL
+#define SDMA3_RLC3_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA3_RLC3_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA3_RLC3_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA3_RLC3_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA3_RLC3_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA3_RLC3_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA3_RLC3_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA3_RLC3_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA3_RLC3_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA3_RLC3_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA3_RLC3_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA3_RLC3_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA3_RLC3_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA3_RLC3_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA3_RLC3_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA3_RLC3_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA3_RLC3_RB_BASE
+#define SDMA3_RLC3_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA3_RLC3_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA3_RLC3_RB_BASE_HI
+#define SDMA3_RLC3_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA3_RLC3_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA3_RLC3_RB_RPTR
+#define SDMA3_RLC3_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA3_RLC3_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA3_RLC3_RB_RPTR_HI
+#define SDMA3_RLC3_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA3_RLC3_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA3_RLC3_RB_WPTR
+#define SDMA3_RLC3_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA3_RLC3_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA3_RLC3_RB_WPTR_HI
+#define SDMA3_RLC3_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA3_RLC3_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA3_RLC3_RB_WPTR_POLL_CNTL
+#define SDMA3_RLC3_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA3_RLC3_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA3_RLC3_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA3_RLC3_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA3_RLC3_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA3_RLC3_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA3_RLC3_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA3_RLC3_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA3_RLC3_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA3_RLC3_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA3_RLC3_RB_RPTR_ADDR_HI
+#define SDMA3_RLC3_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA3_RLC3_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA3_RLC3_RB_RPTR_ADDR_LO
+#define SDMA3_RLC3_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA3_RLC3_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA3_RLC3_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA3_RLC3_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA3_RLC3_IB_CNTL
+#define SDMA3_RLC3_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA3_RLC3_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA3_RLC3_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA3_RLC3_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA3_RLC3_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA3_RLC3_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA3_RLC3_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA3_RLC3_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA3_RLC3_IB_RPTR
+#define SDMA3_RLC3_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA3_RLC3_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA3_RLC3_IB_OFFSET
+#define SDMA3_RLC3_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA3_RLC3_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA3_RLC3_IB_BASE_LO
+#define SDMA3_RLC3_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA3_RLC3_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA3_RLC3_IB_BASE_HI
+#define SDMA3_RLC3_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA3_RLC3_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA3_RLC3_IB_SIZE
+#define SDMA3_RLC3_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA3_RLC3_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA3_RLC3_SKIP_CNTL
+#define SDMA3_RLC3_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA3_RLC3_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA3_RLC3_CONTEXT_STATUS
+#define SDMA3_RLC3_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA3_RLC3_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA3_RLC3_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA3_RLC3_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA3_RLC3_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA3_RLC3_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA3_RLC3_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA3_RLC3_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA3_RLC3_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA3_RLC3_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA3_RLC3_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA3_RLC3_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA3_RLC3_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA3_RLC3_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA3_RLC3_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA3_RLC3_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA3_RLC3_DOORBELL
+#define SDMA3_RLC3_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA3_RLC3_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA3_RLC3_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA3_RLC3_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA3_RLC3_STATUS
+#define SDMA3_RLC3_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA3_RLC3_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA3_RLC3_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA3_RLC3_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA3_RLC3_DOORBELL_LOG
+#define SDMA3_RLC3_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA3_RLC3_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA3_RLC3_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA3_RLC3_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA3_RLC3_WATERMARK
+#define SDMA3_RLC3_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA3_RLC3_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA3_RLC3_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA3_RLC3_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA3_RLC3_DOORBELL_OFFSET
+#define SDMA3_RLC3_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA3_RLC3_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA3_RLC3_CSA_ADDR_LO
+#define SDMA3_RLC3_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA3_RLC3_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA3_RLC3_CSA_ADDR_HI
+#define SDMA3_RLC3_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA3_RLC3_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA3_RLC3_IB_SUB_REMAIN
+#define SDMA3_RLC3_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA3_RLC3_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA3_RLC3_PREEMPT
+#define SDMA3_RLC3_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA3_RLC3_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA3_RLC3_DUMMY_REG
+#define SDMA3_RLC3_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA3_RLC3_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA3_RLC3_RB_WPTR_POLL_ADDR_HI
+#define SDMA3_RLC3_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA3_RLC3_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA3_RLC3_RB_WPTR_POLL_ADDR_LO
+#define SDMA3_RLC3_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA3_RLC3_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA3_RLC3_RB_AQL_CNTL
+#define SDMA3_RLC3_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA3_RLC3_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA3_RLC3_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA3_RLC3_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA3_RLC3_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA3_RLC3_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA3_RLC3_MINOR_PTR_UPDATE
+#define SDMA3_RLC3_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA3_RLC3_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA3_RLC3_MIDCMD_DATA0
+#define SDMA3_RLC3_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA3_RLC3_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA3_RLC3_MIDCMD_DATA1
+#define SDMA3_RLC3_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA3_RLC3_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA3_RLC3_MIDCMD_DATA2
+#define SDMA3_RLC3_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA3_RLC3_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA3_RLC3_MIDCMD_DATA3
+#define SDMA3_RLC3_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA3_RLC3_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA3_RLC3_MIDCMD_DATA4
+#define SDMA3_RLC3_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA3_RLC3_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA3_RLC3_MIDCMD_DATA5
+#define SDMA3_RLC3_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA3_RLC3_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA3_RLC3_MIDCMD_DATA6
+#define SDMA3_RLC3_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA3_RLC3_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA3_RLC3_MIDCMD_DATA7
+#define SDMA3_RLC3_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA3_RLC3_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA3_RLC3_MIDCMD_DATA8
+#define SDMA3_RLC3_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA3_RLC3_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA3_RLC3_MIDCMD_CNTL
+#define SDMA3_RLC3_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA3_RLC3_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA3_RLC3_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA3_RLC3_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA3_RLC3_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA3_RLC3_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA3_RLC3_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA3_RLC3_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA3_RLC4_RB_CNTL
+#define SDMA3_RLC4_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA3_RLC4_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA3_RLC4_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA3_RLC4_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA3_RLC4_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA3_RLC4_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA3_RLC4_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA3_RLC4_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA3_RLC4_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA3_RLC4_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA3_RLC4_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA3_RLC4_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA3_RLC4_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA3_RLC4_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA3_RLC4_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA3_RLC4_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA3_RLC4_RB_BASE
+#define SDMA3_RLC4_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA3_RLC4_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA3_RLC4_RB_BASE_HI
+#define SDMA3_RLC4_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA3_RLC4_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA3_RLC4_RB_RPTR
+#define SDMA3_RLC4_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA3_RLC4_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA3_RLC4_RB_RPTR_HI
+#define SDMA3_RLC4_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA3_RLC4_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA3_RLC4_RB_WPTR
+#define SDMA3_RLC4_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA3_RLC4_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA3_RLC4_RB_WPTR_HI
+#define SDMA3_RLC4_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA3_RLC4_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA3_RLC4_RB_WPTR_POLL_CNTL
+#define SDMA3_RLC4_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA3_RLC4_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA3_RLC4_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA3_RLC4_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA3_RLC4_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA3_RLC4_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA3_RLC4_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA3_RLC4_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA3_RLC4_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA3_RLC4_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA3_RLC4_RB_RPTR_ADDR_HI
+#define SDMA3_RLC4_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA3_RLC4_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA3_RLC4_RB_RPTR_ADDR_LO
+#define SDMA3_RLC4_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA3_RLC4_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA3_RLC4_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA3_RLC4_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA3_RLC4_IB_CNTL
+#define SDMA3_RLC4_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA3_RLC4_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA3_RLC4_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA3_RLC4_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA3_RLC4_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA3_RLC4_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA3_RLC4_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA3_RLC4_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA3_RLC4_IB_RPTR
+#define SDMA3_RLC4_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA3_RLC4_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA3_RLC4_IB_OFFSET
+#define SDMA3_RLC4_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA3_RLC4_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA3_RLC4_IB_BASE_LO
+#define SDMA3_RLC4_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA3_RLC4_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA3_RLC4_IB_BASE_HI
+#define SDMA3_RLC4_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA3_RLC4_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA3_RLC4_IB_SIZE
+#define SDMA3_RLC4_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA3_RLC4_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA3_RLC4_SKIP_CNTL
+#define SDMA3_RLC4_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA3_RLC4_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA3_RLC4_CONTEXT_STATUS
+#define SDMA3_RLC4_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA3_RLC4_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA3_RLC4_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA3_RLC4_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA3_RLC4_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA3_RLC4_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA3_RLC4_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA3_RLC4_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA3_RLC4_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA3_RLC4_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA3_RLC4_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA3_RLC4_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA3_RLC4_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA3_RLC4_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA3_RLC4_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA3_RLC4_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA3_RLC4_DOORBELL
+#define SDMA3_RLC4_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA3_RLC4_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA3_RLC4_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA3_RLC4_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA3_RLC4_STATUS
+#define SDMA3_RLC4_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA3_RLC4_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA3_RLC4_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA3_RLC4_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA3_RLC4_DOORBELL_LOG
+#define SDMA3_RLC4_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA3_RLC4_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA3_RLC4_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA3_RLC4_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA3_RLC4_WATERMARK
+#define SDMA3_RLC4_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA3_RLC4_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA3_RLC4_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA3_RLC4_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA3_RLC4_DOORBELL_OFFSET
+#define SDMA3_RLC4_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA3_RLC4_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA3_RLC4_CSA_ADDR_LO
+#define SDMA3_RLC4_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA3_RLC4_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA3_RLC4_CSA_ADDR_HI
+#define SDMA3_RLC4_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA3_RLC4_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA3_RLC4_IB_SUB_REMAIN
+#define SDMA3_RLC4_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA3_RLC4_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA3_RLC4_PREEMPT
+#define SDMA3_RLC4_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA3_RLC4_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA3_RLC4_DUMMY_REG
+#define SDMA3_RLC4_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA3_RLC4_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA3_RLC4_RB_WPTR_POLL_ADDR_HI
+#define SDMA3_RLC4_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA3_RLC4_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA3_RLC4_RB_WPTR_POLL_ADDR_LO
+#define SDMA3_RLC4_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA3_RLC4_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA3_RLC4_RB_AQL_CNTL
+#define SDMA3_RLC4_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA3_RLC4_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA3_RLC4_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA3_RLC4_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA3_RLC4_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA3_RLC4_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA3_RLC4_MINOR_PTR_UPDATE
+#define SDMA3_RLC4_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA3_RLC4_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA3_RLC4_MIDCMD_DATA0
+#define SDMA3_RLC4_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA3_RLC4_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA3_RLC4_MIDCMD_DATA1
+#define SDMA3_RLC4_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA3_RLC4_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA3_RLC4_MIDCMD_DATA2
+#define SDMA3_RLC4_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA3_RLC4_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA3_RLC4_MIDCMD_DATA3
+#define SDMA3_RLC4_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA3_RLC4_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA3_RLC4_MIDCMD_DATA4
+#define SDMA3_RLC4_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA3_RLC4_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA3_RLC4_MIDCMD_DATA5
+#define SDMA3_RLC4_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA3_RLC4_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA3_RLC4_MIDCMD_DATA6
+#define SDMA3_RLC4_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA3_RLC4_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA3_RLC4_MIDCMD_DATA7
+#define SDMA3_RLC4_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA3_RLC4_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA3_RLC4_MIDCMD_DATA8
+#define SDMA3_RLC4_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA3_RLC4_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA3_RLC4_MIDCMD_CNTL
+#define SDMA3_RLC4_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA3_RLC4_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA3_RLC4_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA3_RLC4_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA3_RLC4_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA3_RLC4_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA3_RLC4_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA3_RLC4_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA3_RLC5_RB_CNTL
+#define SDMA3_RLC5_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA3_RLC5_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA3_RLC5_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA3_RLC5_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA3_RLC5_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA3_RLC5_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA3_RLC5_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA3_RLC5_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA3_RLC5_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA3_RLC5_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA3_RLC5_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA3_RLC5_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA3_RLC5_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA3_RLC5_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA3_RLC5_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA3_RLC5_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA3_RLC5_RB_BASE
+#define SDMA3_RLC5_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA3_RLC5_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA3_RLC5_RB_BASE_HI
+#define SDMA3_RLC5_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA3_RLC5_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA3_RLC5_RB_RPTR
+#define SDMA3_RLC5_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA3_RLC5_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA3_RLC5_RB_RPTR_HI
+#define SDMA3_RLC5_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA3_RLC5_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA3_RLC5_RB_WPTR
+#define SDMA3_RLC5_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA3_RLC5_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA3_RLC5_RB_WPTR_HI
+#define SDMA3_RLC5_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA3_RLC5_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA3_RLC5_RB_WPTR_POLL_CNTL
+#define SDMA3_RLC5_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA3_RLC5_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA3_RLC5_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA3_RLC5_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA3_RLC5_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA3_RLC5_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA3_RLC5_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA3_RLC5_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA3_RLC5_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA3_RLC5_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA3_RLC5_RB_RPTR_ADDR_HI
+#define SDMA3_RLC5_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA3_RLC5_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA3_RLC5_RB_RPTR_ADDR_LO
+#define SDMA3_RLC5_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA3_RLC5_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA3_RLC5_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA3_RLC5_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA3_RLC5_IB_CNTL
+#define SDMA3_RLC5_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA3_RLC5_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA3_RLC5_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA3_RLC5_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA3_RLC5_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA3_RLC5_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA3_RLC5_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA3_RLC5_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA3_RLC5_IB_RPTR
+#define SDMA3_RLC5_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA3_RLC5_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA3_RLC5_IB_OFFSET
+#define SDMA3_RLC5_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA3_RLC5_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA3_RLC5_IB_BASE_LO
+#define SDMA3_RLC5_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA3_RLC5_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA3_RLC5_IB_BASE_HI
+#define SDMA3_RLC5_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA3_RLC5_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA3_RLC5_IB_SIZE
+#define SDMA3_RLC5_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA3_RLC5_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA3_RLC5_SKIP_CNTL
+#define SDMA3_RLC5_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA3_RLC5_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA3_RLC5_CONTEXT_STATUS
+#define SDMA3_RLC5_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA3_RLC5_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA3_RLC5_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA3_RLC5_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA3_RLC5_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA3_RLC5_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA3_RLC5_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA3_RLC5_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA3_RLC5_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA3_RLC5_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA3_RLC5_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA3_RLC5_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA3_RLC5_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA3_RLC5_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA3_RLC5_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA3_RLC5_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA3_RLC5_DOORBELL
+#define SDMA3_RLC5_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA3_RLC5_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA3_RLC5_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA3_RLC5_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA3_RLC5_STATUS
+#define SDMA3_RLC5_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA3_RLC5_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA3_RLC5_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA3_RLC5_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA3_RLC5_DOORBELL_LOG
+#define SDMA3_RLC5_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA3_RLC5_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA3_RLC5_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA3_RLC5_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA3_RLC5_WATERMARK
+#define SDMA3_RLC5_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA3_RLC5_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA3_RLC5_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA3_RLC5_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA3_RLC5_DOORBELL_OFFSET
+#define SDMA3_RLC5_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA3_RLC5_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA3_RLC5_CSA_ADDR_LO
+#define SDMA3_RLC5_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA3_RLC5_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA3_RLC5_CSA_ADDR_HI
+#define SDMA3_RLC5_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA3_RLC5_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA3_RLC5_IB_SUB_REMAIN
+#define SDMA3_RLC5_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA3_RLC5_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA3_RLC5_PREEMPT
+#define SDMA3_RLC5_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA3_RLC5_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA3_RLC5_DUMMY_REG
+#define SDMA3_RLC5_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA3_RLC5_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA3_RLC5_RB_WPTR_POLL_ADDR_HI
+#define SDMA3_RLC5_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA3_RLC5_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA3_RLC5_RB_WPTR_POLL_ADDR_LO
+#define SDMA3_RLC5_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA3_RLC5_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA3_RLC5_RB_AQL_CNTL
+#define SDMA3_RLC5_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA3_RLC5_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA3_RLC5_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA3_RLC5_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA3_RLC5_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA3_RLC5_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA3_RLC5_MINOR_PTR_UPDATE
+#define SDMA3_RLC5_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA3_RLC5_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA3_RLC5_MIDCMD_DATA0
+#define SDMA3_RLC5_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA3_RLC5_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA3_RLC5_MIDCMD_DATA1
+#define SDMA3_RLC5_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA3_RLC5_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA3_RLC5_MIDCMD_DATA2
+#define SDMA3_RLC5_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA3_RLC5_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA3_RLC5_MIDCMD_DATA3
+#define SDMA3_RLC5_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA3_RLC5_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA3_RLC5_MIDCMD_DATA4
+#define SDMA3_RLC5_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA3_RLC5_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA3_RLC5_MIDCMD_DATA5
+#define SDMA3_RLC5_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA3_RLC5_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA3_RLC5_MIDCMD_DATA6
+#define SDMA3_RLC5_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA3_RLC5_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA3_RLC5_MIDCMD_DATA7
+#define SDMA3_RLC5_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA3_RLC5_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA3_RLC5_MIDCMD_DATA8
+#define SDMA3_RLC5_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA3_RLC5_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA3_RLC5_MIDCMD_CNTL
+#define SDMA3_RLC5_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA3_RLC5_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA3_RLC5_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA3_RLC5_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA3_RLC5_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA3_RLC5_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA3_RLC5_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA3_RLC5_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA3_RLC6_RB_CNTL
+#define SDMA3_RLC6_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA3_RLC6_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA3_RLC6_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA3_RLC6_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA3_RLC6_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA3_RLC6_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA3_RLC6_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA3_RLC6_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA3_RLC6_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA3_RLC6_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA3_RLC6_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA3_RLC6_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA3_RLC6_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA3_RLC6_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA3_RLC6_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA3_RLC6_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA3_RLC6_RB_BASE
+#define SDMA3_RLC6_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA3_RLC6_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA3_RLC6_RB_BASE_HI
+#define SDMA3_RLC6_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA3_RLC6_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA3_RLC6_RB_RPTR
+#define SDMA3_RLC6_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA3_RLC6_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA3_RLC6_RB_RPTR_HI
+#define SDMA3_RLC6_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA3_RLC6_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA3_RLC6_RB_WPTR
+#define SDMA3_RLC6_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA3_RLC6_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA3_RLC6_RB_WPTR_HI
+#define SDMA3_RLC6_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA3_RLC6_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA3_RLC6_RB_WPTR_POLL_CNTL
+#define SDMA3_RLC6_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA3_RLC6_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA3_RLC6_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA3_RLC6_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA3_RLC6_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA3_RLC6_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA3_RLC6_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA3_RLC6_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA3_RLC6_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA3_RLC6_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA3_RLC6_RB_RPTR_ADDR_HI
+#define SDMA3_RLC6_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA3_RLC6_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA3_RLC6_RB_RPTR_ADDR_LO
+#define SDMA3_RLC6_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA3_RLC6_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA3_RLC6_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA3_RLC6_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA3_RLC6_IB_CNTL
+#define SDMA3_RLC6_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA3_RLC6_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA3_RLC6_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA3_RLC6_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA3_RLC6_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA3_RLC6_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA3_RLC6_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA3_RLC6_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA3_RLC6_IB_RPTR
+#define SDMA3_RLC6_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA3_RLC6_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA3_RLC6_IB_OFFSET
+#define SDMA3_RLC6_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA3_RLC6_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA3_RLC6_IB_BASE_LO
+#define SDMA3_RLC6_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA3_RLC6_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA3_RLC6_IB_BASE_HI
+#define SDMA3_RLC6_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA3_RLC6_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA3_RLC6_IB_SIZE
+#define SDMA3_RLC6_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA3_RLC6_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA3_RLC6_SKIP_CNTL
+#define SDMA3_RLC6_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA3_RLC6_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA3_RLC6_CONTEXT_STATUS
+#define SDMA3_RLC6_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA3_RLC6_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA3_RLC6_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA3_RLC6_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA3_RLC6_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA3_RLC6_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA3_RLC6_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA3_RLC6_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA3_RLC6_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA3_RLC6_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA3_RLC6_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA3_RLC6_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA3_RLC6_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA3_RLC6_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA3_RLC6_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA3_RLC6_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA3_RLC6_DOORBELL
+#define SDMA3_RLC6_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA3_RLC6_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA3_RLC6_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA3_RLC6_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA3_RLC6_STATUS
+#define SDMA3_RLC6_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA3_RLC6_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA3_RLC6_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA3_RLC6_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA3_RLC6_DOORBELL_LOG
+#define SDMA3_RLC6_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA3_RLC6_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA3_RLC6_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA3_RLC6_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA3_RLC6_WATERMARK
+#define SDMA3_RLC6_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA3_RLC6_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA3_RLC6_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA3_RLC6_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA3_RLC6_DOORBELL_OFFSET
+#define SDMA3_RLC6_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA3_RLC6_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA3_RLC6_CSA_ADDR_LO
+#define SDMA3_RLC6_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA3_RLC6_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA3_RLC6_CSA_ADDR_HI
+#define SDMA3_RLC6_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA3_RLC6_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA3_RLC6_IB_SUB_REMAIN
+#define SDMA3_RLC6_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA3_RLC6_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA3_RLC6_PREEMPT
+#define SDMA3_RLC6_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA3_RLC6_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA3_RLC6_DUMMY_REG
+#define SDMA3_RLC6_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA3_RLC6_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA3_RLC6_RB_WPTR_POLL_ADDR_HI
+#define SDMA3_RLC6_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA3_RLC6_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA3_RLC6_RB_WPTR_POLL_ADDR_LO
+#define SDMA3_RLC6_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA3_RLC6_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA3_RLC6_RB_AQL_CNTL
+#define SDMA3_RLC6_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA3_RLC6_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA3_RLC6_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA3_RLC6_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA3_RLC6_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA3_RLC6_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA3_RLC6_MINOR_PTR_UPDATE
+#define SDMA3_RLC6_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA3_RLC6_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA3_RLC6_MIDCMD_DATA0
+#define SDMA3_RLC6_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA3_RLC6_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA3_RLC6_MIDCMD_DATA1
+#define SDMA3_RLC6_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA3_RLC6_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA3_RLC6_MIDCMD_DATA2
+#define SDMA3_RLC6_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA3_RLC6_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA3_RLC6_MIDCMD_DATA3
+#define SDMA3_RLC6_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA3_RLC6_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA3_RLC6_MIDCMD_DATA4
+#define SDMA3_RLC6_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA3_RLC6_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA3_RLC6_MIDCMD_DATA5
+#define SDMA3_RLC6_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA3_RLC6_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA3_RLC6_MIDCMD_DATA6
+#define SDMA3_RLC6_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA3_RLC6_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA3_RLC6_MIDCMD_DATA7
+#define SDMA3_RLC6_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA3_RLC6_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA3_RLC6_MIDCMD_DATA8
+#define SDMA3_RLC6_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA3_RLC6_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA3_RLC6_MIDCMD_CNTL
+#define SDMA3_RLC6_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA3_RLC6_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA3_RLC6_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA3_RLC6_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA3_RLC6_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA3_RLC6_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA3_RLC6_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA3_RLC6_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA3_RLC7_RB_CNTL
+#define SDMA3_RLC7_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA3_RLC7_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA3_RLC7_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA3_RLC7_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA3_RLC7_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA3_RLC7_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA3_RLC7_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA3_RLC7_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA3_RLC7_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA3_RLC7_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA3_RLC7_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA3_RLC7_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA3_RLC7_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA3_RLC7_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA3_RLC7_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA3_RLC7_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA3_RLC7_RB_BASE
+#define SDMA3_RLC7_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA3_RLC7_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA3_RLC7_RB_BASE_HI
+#define SDMA3_RLC7_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA3_RLC7_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA3_RLC7_RB_RPTR
+#define SDMA3_RLC7_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA3_RLC7_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA3_RLC7_RB_RPTR_HI
+#define SDMA3_RLC7_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA3_RLC7_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA3_RLC7_RB_WPTR
+#define SDMA3_RLC7_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA3_RLC7_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA3_RLC7_RB_WPTR_HI
+#define SDMA3_RLC7_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA3_RLC7_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA3_RLC7_RB_WPTR_POLL_CNTL
+#define SDMA3_RLC7_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA3_RLC7_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA3_RLC7_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA3_RLC7_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA3_RLC7_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA3_RLC7_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA3_RLC7_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA3_RLC7_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA3_RLC7_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA3_RLC7_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA3_RLC7_RB_RPTR_ADDR_HI
+#define SDMA3_RLC7_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA3_RLC7_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA3_RLC7_RB_RPTR_ADDR_LO
+#define SDMA3_RLC7_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA3_RLC7_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA3_RLC7_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA3_RLC7_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA3_RLC7_IB_CNTL
+#define SDMA3_RLC7_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA3_RLC7_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA3_RLC7_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA3_RLC7_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA3_RLC7_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA3_RLC7_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA3_RLC7_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA3_RLC7_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA3_RLC7_IB_RPTR
+#define SDMA3_RLC7_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA3_RLC7_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA3_RLC7_IB_OFFSET
+#define SDMA3_RLC7_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA3_RLC7_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA3_RLC7_IB_BASE_LO
+#define SDMA3_RLC7_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA3_RLC7_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA3_RLC7_IB_BASE_HI
+#define SDMA3_RLC7_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA3_RLC7_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA3_RLC7_IB_SIZE
+#define SDMA3_RLC7_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA3_RLC7_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA3_RLC7_SKIP_CNTL
+#define SDMA3_RLC7_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA3_RLC7_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA3_RLC7_CONTEXT_STATUS
+#define SDMA3_RLC7_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA3_RLC7_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA3_RLC7_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA3_RLC7_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA3_RLC7_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA3_RLC7_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA3_RLC7_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA3_RLC7_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA3_RLC7_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA3_RLC7_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA3_RLC7_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA3_RLC7_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA3_RLC7_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA3_RLC7_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA3_RLC7_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA3_RLC7_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA3_RLC7_DOORBELL
+#define SDMA3_RLC7_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA3_RLC7_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA3_RLC7_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA3_RLC7_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA3_RLC7_STATUS
+#define SDMA3_RLC7_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA3_RLC7_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA3_RLC7_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA3_RLC7_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA3_RLC7_DOORBELL_LOG
+#define SDMA3_RLC7_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA3_RLC7_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA3_RLC7_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA3_RLC7_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA3_RLC7_WATERMARK
+#define SDMA3_RLC7_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA3_RLC7_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA3_RLC7_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA3_RLC7_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA3_RLC7_DOORBELL_OFFSET
+#define SDMA3_RLC7_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA3_RLC7_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA3_RLC7_CSA_ADDR_LO
+#define SDMA3_RLC7_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA3_RLC7_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA3_RLC7_CSA_ADDR_HI
+#define SDMA3_RLC7_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA3_RLC7_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA3_RLC7_IB_SUB_REMAIN
+#define SDMA3_RLC7_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA3_RLC7_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA3_RLC7_PREEMPT
+#define SDMA3_RLC7_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA3_RLC7_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA3_RLC7_DUMMY_REG
+#define SDMA3_RLC7_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA3_RLC7_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA3_RLC7_RB_WPTR_POLL_ADDR_HI
+#define SDMA3_RLC7_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA3_RLC7_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA3_RLC7_RB_WPTR_POLL_ADDR_LO
+#define SDMA3_RLC7_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA3_RLC7_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA3_RLC7_RB_AQL_CNTL
+#define SDMA3_RLC7_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA3_RLC7_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA3_RLC7_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA3_RLC7_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA3_RLC7_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA3_RLC7_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA3_RLC7_MINOR_PTR_UPDATE
+#define SDMA3_RLC7_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA3_RLC7_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA3_RLC7_MIDCMD_DATA0
+#define SDMA3_RLC7_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA3_RLC7_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA3_RLC7_MIDCMD_DATA1
+#define SDMA3_RLC7_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA3_RLC7_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA3_RLC7_MIDCMD_DATA2
+#define SDMA3_RLC7_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA3_RLC7_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA3_RLC7_MIDCMD_DATA3
+#define SDMA3_RLC7_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA3_RLC7_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA3_RLC7_MIDCMD_DATA4
+#define SDMA3_RLC7_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA3_RLC7_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA3_RLC7_MIDCMD_DATA5
+#define SDMA3_RLC7_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA3_RLC7_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA3_RLC7_MIDCMD_DATA6
+#define SDMA3_RLC7_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA3_RLC7_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA3_RLC7_MIDCMD_DATA7
+#define SDMA3_RLC7_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA3_RLC7_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA3_RLC7_MIDCMD_DATA8
+#define SDMA3_RLC7_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA3_RLC7_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA3_RLC7_MIDCMD_CNTL
+#define SDMA3_RLC7_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA3_RLC7_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA3_RLC7_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA3_RLC7_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA3_RLC7_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA3_RLC7_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA3_RLC7_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA3_RLC7_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/sdma4/sdma4_4_2_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/sdma4/sdma4_4_2_2_offset.h
new file mode 100644
index 000000000000..755ffa5781de
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/sdma4/sdma4_4_2_2_offset.h
@@ -0,0 +1,1043 @@
+/*
+ * Copyright (C) 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _sdma4_4_2_2_OFFSET_HEADER
+#define _sdma4_4_2_2_OFFSET_HEADER
+
+
+
+// addressBlock: sdma4_sdma4dec
+// base address: 0x7a000
+#define mmSDMA4_UCODE_ADDR 0x0000
+#define mmSDMA4_UCODE_ADDR_BASE_IDX 1
+#define mmSDMA4_UCODE_DATA 0x0001
+#define mmSDMA4_UCODE_DATA_BASE_IDX 1
+#define mmSDMA4_VM_CNTL 0x0004
+#define mmSDMA4_VM_CNTL_BASE_IDX 1
+#define mmSDMA4_VM_CTX_LO 0x0005
+#define mmSDMA4_VM_CTX_LO_BASE_IDX 1
+#define mmSDMA4_VM_CTX_HI 0x0006
+#define mmSDMA4_VM_CTX_HI_BASE_IDX 1
+#define mmSDMA4_ACTIVE_FCN_ID 0x0007
+#define mmSDMA4_ACTIVE_FCN_ID_BASE_IDX 1
+#define mmSDMA4_VM_CTX_CNTL 0x0008
+#define mmSDMA4_VM_CTX_CNTL_BASE_IDX 1
+#define mmSDMA4_VIRT_RESET_REQ 0x0009
+#define mmSDMA4_VIRT_RESET_REQ_BASE_IDX 1
+#define mmSDMA4_VF_ENABLE 0x000a
+#define mmSDMA4_VF_ENABLE_BASE_IDX 1
+#define mmSDMA4_CONTEXT_REG_TYPE0 0x000b
+#define mmSDMA4_CONTEXT_REG_TYPE0_BASE_IDX 1
+#define mmSDMA4_CONTEXT_REG_TYPE1 0x000c
+#define mmSDMA4_CONTEXT_REG_TYPE1_BASE_IDX 1
+#define mmSDMA4_CONTEXT_REG_TYPE2 0x000d
+#define mmSDMA4_CONTEXT_REG_TYPE2_BASE_IDX 1
+#define mmSDMA4_CONTEXT_REG_TYPE3 0x000e
+#define mmSDMA4_CONTEXT_REG_TYPE3_BASE_IDX 1
+#define mmSDMA4_PUB_REG_TYPE0 0x000f
+#define mmSDMA4_PUB_REG_TYPE0_BASE_IDX 1
+#define mmSDMA4_PUB_REG_TYPE1 0x0010
+#define mmSDMA4_PUB_REG_TYPE1_BASE_IDX 1
+#define mmSDMA4_PUB_REG_TYPE2 0x0011
+#define mmSDMA4_PUB_REG_TYPE2_BASE_IDX 1
+#define mmSDMA4_PUB_REG_TYPE3 0x0012
+#define mmSDMA4_PUB_REG_TYPE3_BASE_IDX 1
+#define mmSDMA4_MMHUB_CNTL 0x0013
+#define mmSDMA4_MMHUB_CNTL_BASE_IDX 1
+#define mmSDMA4_CONTEXT_GROUP_BOUNDARY 0x0019
+#define mmSDMA4_CONTEXT_GROUP_BOUNDARY_BASE_IDX 1
+#define mmSDMA4_POWER_CNTL 0x001a
+#define mmSDMA4_POWER_CNTL_BASE_IDX 1
+#define mmSDMA4_CLK_CTRL 0x001b
+#define mmSDMA4_CLK_CTRL_BASE_IDX 1
+#define mmSDMA4_CNTL 0x001c
+#define mmSDMA4_CNTL_BASE_IDX 1
+#define mmSDMA4_CHICKEN_BITS 0x001d
+#define mmSDMA4_CHICKEN_BITS_BASE_IDX 1
+#define mmSDMA4_GB_ADDR_CONFIG 0x001e
+#define mmSDMA4_GB_ADDR_CONFIG_BASE_IDX 1
+#define mmSDMA4_GB_ADDR_CONFIG_READ 0x001f
+#define mmSDMA4_GB_ADDR_CONFIG_READ_BASE_IDX 1
+#define mmSDMA4_RB_RPTR_FETCH_HI 0x0020
+#define mmSDMA4_RB_RPTR_FETCH_HI_BASE_IDX 1
+#define mmSDMA4_SEM_WAIT_FAIL_TIMER_CNTL 0x0021
+#define mmSDMA4_SEM_WAIT_FAIL_TIMER_CNTL_BASE_IDX 1
+#define mmSDMA4_RB_RPTR_FETCH 0x0022
+#define mmSDMA4_RB_RPTR_FETCH_BASE_IDX 1
+#define mmSDMA4_IB_OFFSET_FETCH 0x0023
+#define mmSDMA4_IB_OFFSET_FETCH_BASE_IDX 1
+#define mmSDMA4_PROGRAM 0x0024
+#define mmSDMA4_PROGRAM_BASE_IDX 1
+#define mmSDMA4_STATUS_REG 0x0025
+#define mmSDMA4_STATUS_REG_BASE_IDX 1
+#define mmSDMA4_STATUS1_REG 0x0026
+#define mmSDMA4_STATUS1_REG_BASE_IDX 1
+#define mmSDMA4_RD_BURST_CNTL 0x0027
+#define mmSDMA4_RD_BURST_CNTL_BASE_IDX 1
+#define mmSDMA4_HBM_PAGE_CONFIG 0x0028
+#define mmSDMA4_HBM_PAGE_CONFIG_BASE_IDX 1
+#define mmSDMA4_UCODE_CHECKSUM 0x0029
+#define mmSDMA4_UCODE_CHECKSUM_BASE_IDX 1
+#define mmSDMA4_F32_CNTL 0x002a
+#define mmSDMA4_F32_CNTL_BASE_IDX 1
+#define mmSDMA4_FREEZE 0x002b
+#define mmSDMA4_FREEZE_BASE_IDX 1
+#define mmSDMA4_PHASE0_QUANTUM 0x002c
+#define mmSDMA4_PHASE0_QUANTUM_BASE_IDX 1
+#define mmSDMA4_PHASE1_QUANTUM 0x002d
+#define mmSDMA4_PHASE1_QUANTUM_BASE_IDX 1
+#define mmSDMA4_EDC_CONFIG 0x0032
+#define mmSDMA4_EDC_CONFIG_BASE_IDX 1
+#define mmSDMA4_BA_THRESHOLD 0x0033
+#define mmSDMA4_BA_THRESHOLD_BASE_IDX 1
+#define mmSDMA4_ID 0x0034
+#define mmSDMA4_ID_BASE_IDX 1
+#define mmSDMA4_VERSION 0x0035
+#define mmSDMA4_VERSION_BASE_IDX 1
+#define mmSDMA4_EDC_COUNTER 0x0036
+#define mmSDMA4_EDC_COUNTER_BASE_IDX 1
+#define mmSDMA4_EDC_COUNTER_CLEAR 0x0037
+#define mmSDMA4_EDC_COUNTER_CLEAR_BASE_IDX 1
+#define mmSDMA4_STATUS2_REG 0x0038
+#define mmSDMA4_STATUS2_REG_BASE_IDX 1
+#define mmSDMA4_ATOMIC_CNTL 0x0039
+#define mmSDMA4_ATOMIC_CNTL_BASE_IDX 1
+#define mmSDMA4_ATOMIC_PREOP_LO 0x003a
+#define mmSDMA4_ATOMIC_PREOP_LO_BASE_IDX 1
+#define mmSDMA4_ATOMIC_PREOP_HI 0x003b
+#define mmSDMA4_ATOMIC_PREOP_HI_BASE_IDX 1
+#define mmSDMA4_UTCL1_CNTL 0x003c
+#define mmSDMA4_UTCL1_CNTL_BASE_IDX 1
+#define mmSDMA4_UTCL1_WATERMK 0x003d
+#define mmSDMA4_UTCL1_WATERMK_BASE_IDX 1
+#define mmSDMA4_UTCL1_RD_STATUS 0x003e
+#define mmSDMA4_UTCL1_RD_STATUS_BASE_IDX 1
+#define mmSDMA4_UTCL1_WR_STATUS 0x003f
+#define mmSDMA4_UTCL1_WR_STATUS_BASE_IDX 1
+#define mmSDMA4_UTCL1_INV0 0x0040
+#define mmSDMA4_UTCL1_INV0_BASE_IDX 1
+#define mmSDMA4_UTCL1_INV1 0x0041
+#define mmSDMA4_UTCL1_INV1_BASE_IDX 1
+#define mmSDMA4_UTCL1_INV2 0x0042
+#define mmSDMA4_UTCL1_INV2_BASE_IDX 1
+#define mmSDMA4_UTCL1_RD_XNACK0 0x0043
+#define mmSDMA4_UTCL1_RD_XNACK0_BASE_IDX 1
+#define mmSDMA4_UTCL1_RD_XNACK1 0x0044
+#define mmSDMA4_UTCL1_RD_XNACK1_BASE_IDX 1
+#define mmSDMA4_UTCL1_WR_XNACK0 0x0045
+#define mmSDMA4_UTCL1_WR_XNACK0_BASE_IDX 1
+#define mmSDMA4_UTCL1_WR_XNACK1 0x0046
+#define mmSDMA4_UTCL1_WR_XNACK1_BASE_IDX 1
+#define mmSDMA4_UTCL1_TIMEOUT 0x0047
+#define mmSDMA4_UTCL1_TIMEOUT_BASE_IDX 1
+#define mmSDMA4_UTCL1_PAGE 0x0048
+#define mmSDMA4_UTCL1_PAGE_BASE_IDX 1
+#define mmSDMA4_POWER_CNTL_IDLE 0x0049
+#define mmSDMA4_POWER_CNTL_IDLE_BASE_IDX 1
+#define mmSDMA4_RELAX_ORDERING_LUT 0x004a
+#define mmSDMA4_RELAX_ORDERING_LUT_BASE_IDX 1
+#define mmSDMA4_CHICKEN_BITS_2 0x004b
+#define mmSDMA4_CHICKEN_BITS_2_BASE_IDX 1
+#define mmSDMA4_STATUS3_REG 0x004c
+#define mmSDMA4_STATUS3_REG_BASE_IDX 1
+#define mmSDMA4_PHYSICAL_ADDR_LO 0x004d
+#define mmSDMA4_PHYSICAL_ADDR_LO_BASE_IDX 1
+#define mmSDMA4_PHYSICAL_ADDR_HI 0x004e
+#define mmSDMA4_PHYSICAL_ADDR_HI_BASE_IDX 1
+#define mmSDMA4_PHASE2_QUANTUM 0x004f
+#define mmSDMA4_PHASE2_QUANTUM_BASE_IDX 1
+#define mmSDMA4_ERROR_LOG 0x0050
+#define mmSDMA4_ERROR_LOG_BASE_IDX 1
+#define mmSDMA4_PUB_DUMMY_REG0 0x0051
+#define mmSDMA4_PUB_DUMMY_REG0_BASE_IDX 1
+#define mmSDMA4_PUB_DUMMY_REG1 0x0052
+#define mmSDMA4_PUB_DUMMY_REG1_BASE_IDX 1
+#define mmSDMA4_PUB_DUMMY_REG2 0x0053
+#define mmSDMA4_PUB_DUMMY_REG2_BASE_IDX 1
+#define mmSDMA4_PUB_DUMMY_REG3 0x0054
+#define mmSDMA4_PUB_DUMMY_REG3_BASE_IDX 1
+#define mmSDMA4_F32_COUNTER 0x0055
+#define mmSDMA4_F32_COUNTER_BASE_IDX 1
+#define mmSDMA4_UNBREAKABLE 0x0056
+#define mmSDMA4_UNBREAKABLE_BASE_IDX 1
+#define mmSDMA4_PERFMON_CNTL 0x0057
+#define mmSDMA4_PERFMON_CNTL_BASE_IDX 1
+#define mmSDMA4_PERFCOUNTER0_RESULT 0x0058
+#define mmSDMA4_PERFCOUNTER0_RESULT_BASE_IDX 1
+#define mmSDMA4_PERFCOUNTER1_RESULT 0x0059
+#define mmSDMA4_PERFCOUNTER1_RESULT_BASE_IDX 1
+#define mmSDMA4_PERFCOUNTER_TAG_DELAY_RANGE 0x005a
+#define mmSDMA4_PERFCOUNTER_TAG_DELAY_RANGE_BASE_IDX 1
+#define mmSDMA4_CRD_CNTL 0x005b
+#define mmSDMA4_CRD_CNTL_BASE_IDX 1
+#define mmSDMA4_GPU_IOV_VIOLATION_LOG 0x005d
+#define mmSDMA4_GPU_IOV_VIOLATION_LOG_BASE_IDX 1
+#define mmSDMA4_ULV_CNTL 0x005e
+#define mmSDMA4_ULV_CNTL_BASE_IDX 1
+#define mmSDMA4_EA_DBIT_ADDR_DATA 0x0060
+#define mmSDMA4_EA_DBIT_ADDR_DATA_BASE_IDX 1
+#define mmSDMA4_EA_DBIT_ADDR_INDEX 0x0061
+#define mmSDMA4_EA_DBIT_ADDR_INDEX_BASE_IDX 1
+#define mmSDMA4_GPU_IOV_VIOLATION_LOG2 0x0062
+#define mmSDMA4_GPU_IOV_VIOLATION_LOG2_BASE_IDX 1
+#define mmSDMA4_GFX_RB_CNTL 0x0080
+#define mmSDMA4_GFX_RB_CNTL_BASE_IDX 1
+#define mmSDMA4_GFX_RB_BASE 0x0081
+#define mmSDMA4_GFX_RB_BASE_BASE_IDX 1
+#define mmSDMA4_GFX_RB_BASE_HI 0x0082
+#define mmSDMA4_GFX_RB_BASE_HI_BASE_IDX 1
+#define mmSDMA4_GFX_RB_RPTR 0x0083
+#define mmSDMA4_GFX_RB_RPTR_BASE_IDX 1
+#define mmSDMA4_GFX_RB_RPTR_HI 0x0084
+#define mmSDMA4_GFX_RB_RPTR_HI_BASE_IDX 1
+#define mmSDMA4_GFX_RB_WPTR 0x0085
+#define mmSDMA4_GFX_RB_WPTR_BASE_IDX 1
+#define mmSDMA4_GFX_RB_WPTR_HI 0x0086
+#define mmSDMA4_GFX_RB_WPTR_HI_BASE_IDX 1
+#define mmSDMA4_GFX_RB_WPTR_POLL_CNTL 0x0087
+#define mmSDMA4_GFX_RB_WPTR_POLL_CNTL_BASE_IDX 1
+#define mmSDMA4_GFX_RB_RPTR_ADDR_HI 0x0088
+#define mmSDMA4_GFX_RB_RPTR_ADDR_HI_BASE_IDX 1
+#define mmSDMA4_GFX_RB_RPTR_ADDR_LO 0x0089
+#define mmSDMA4_GFX_RB_RPTR_ADDR_LO_BASE_IDX 1
+#define mmSDMA4_GFX_IB_CNTL 0x008a
+#define mmSDMA4_GFX_IB_CNTL_BASE_IDX 1
+#define mmSDMA4_GFX_IB_RPTR 0x008b
+#define mmSDMA4_GFX_IB_RPTR_BASE_IDX 1
+#define mmSDMA4_GFX_IB_OFFSET 0x008c
+#define mmSDMA4_GFX_IB_OFFSET_BASE_IDX 1
+#define mmSDMA4_GFX_IB_BASE_LO 0x008d
+#define mmSDMA4_GFX_IB_BASE_LO_BASE_IDX 1
+#define mmSDMA4_GFX_IB_BASE_HI 0x008e
+#define mmSDMA4_GFX_IB_BASE_HI_BASE_IDX 1
+#define mmSDMA4_GFX_IB_SIZE 0x008f
+#define mmSDMA4_GFX_IB_SIZE_BASE_IDX 1
+#define mmSDMA4_GFX_SKIP_CNTL 0x0090
+#define mmSDMA4_GFX_SKIP_CNTL_BASE_IDX 1
+#define mmSDMA4_GFX_CONTEXT_STATUS 0x0091
+#define mmSDMA4_GFX_CONTEXT_STATUS_BASE_IDX 1
+#define mmSDMA4_GFX_DOORBELL 0x0092
+#define mmSDMA4_GFX_DOORBELL_BASE_IDX 1
+#define mmSDMA4_GFX_CONTEXT_CNTL 0x0093
+#define mmSDMA4_GFX_CONTEXT_CNTL_BASE_IDX 1
+#define mmSDMA4_GFX_STATUS 0x00a8
+#define mmSDMA4_GFX_STATUS_BASE_IDX 1
+#define mmSDMA4_GFX_DOORBELL_LOG 0x00a9
+#define mmSDMA4_GFX_DOORBELL_LOG_BASE_IDX 1
+#define mmSDMA4_GFX_WATERMARK 0x00aa
+#define mmSDMA4_GFX_WATERMARK_BASE_IDX 1
+#define mmSDMA4_GFX_DOORBELL_OFFSET 0x00ab
+#define mmSDMA4_GFX_DOORBELL_OFFSET_BASE_IDX 1
+#define mmSDMA4_GFX_CSA_ADDR_LO 0x00ac
+#define mmSDMA4_GFX_CSA_ADDR_LO_BASE_IDX 1
+#define mmSDMA4_GFX_CSA_ADDR_HI 0x00ad
+#define mmSDMA4_GFX_CSA_ADDR_HI_BASE_IDX 1
+#define mmSDMA4_GFX_IB_SUB_REMAIN 0x00af
+#define mmSDMA4_GFX_IB_SUB_REMAIN_BASE_IDX 1
+#define mmSDMA4_GFX_PREEMPT 0x00b0
+#define mmSDMA4_GFX_PREEMPT_BASE_IDX 1
+#define mmSDMA4_GFX_DUMMY_REG 0x00b1
+#define mmSDMA4_GFX_DUMMY_REG_BASE_IDX 1
+#define mmSDMA4_GFX_RB_WPTR_POLL_ADDR_HI 0x00b2
+#define mmSDMA4_GFX_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1
+#define mmSDMA4_GFX_RB_WPTR_POLL_ADDR_LO 0x00b3
+#define mmSDMA4_GFX_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1
+#define mmSDMA4_GFX_RB_AQL_CNTL 0x00b4
+#define mmSDMA4_GFX_RB_AQL_CNTL_BASE_IDX 1
+#define mmSDMA4_GFX_MINOR_PTR_UPDATE 0x00b5
+#define mmSDMA4_GFX_MINOR_PTR_UPDATE_BASE_IDX 1
+#define mmSDMA4_GFX_MIDCMD_DATA0 0x00c0
+#define mmSDMA4_GFX_MIDCMD_DATA0_BASE_IDX 1
+#define mmSDMA4_GFX_MIDCMD_DATA1 0x00c1
+#define mmSDMA4_GFX_MIDCMD_DATA1_BASE_IDX 1
+#define mmSDMA4_GFX_MIDCMD_DATA2 0x00c2
+#define mmSDMA4_GFX_MIDCMD_DATA2_BASE_IDX 1
+#define mmSDMA4_GFX_MIDCMD_DATA3 0x00c3
+#define mmSDMA4_GFX_MIDCMD_DATA3_BASE_IDX 1
+#define mmSDMA4_GFX_MIDCMD_DATA4 0x00c4
+#define mmSDMA4_GFX_MIDCMD_DATA4_BASE_IDX 1
+#define mmSDMA4_GFX_MIDCMD_DATA5 0x00c5
+#define mmSDMA4_GFX_MIDCMD_DATA5_BASE_IDX 1
+#define mmSDMA4_GFX_MIDCMD_DATA6 0x00c6
+#define mmSDMA4_GFX_MIDCMD_DATA6_BASE_IDX 1
+#define mmSDMA4_GFX_MIDCMD_DATA7 0x00c7
+#define mmSDMA4_GFX_MIDCMD_DATA7_BASE_IDX 1
+#define mmSDMA4_GFX_MIDCMD_DATA8 0x00c8
+#define mmSDMA4_GFX_MIDCMD_DATA8_BASE_IDX 1
+#define mmSDMA4_GFX_MIDCMD_CNTL 0x00c9
+#define mmSDMA4_GFX_MIDCMD_CNTL_BASE_IDX 1
+#define mmSDMA4_PAGE_RB_CNTL 0x00d8
+#define mmSDMA4_PAGE_RB_CNTL_BASE_IDX 1
+#define mmSDMA4_PAGE_RB_BASE 0x00d9
+#define mmSDMA4_PAGE_RB_BASE_BASE_IDX 1
+#define mmSDMA4_PAGE_RB_BASE_HI 0x00da
+#define mmSDMA4_PAGE_RB_BASE_HI_BASE_IDX 1
+#define mmSDMA4_PAGE_RB_RPTR 0x00db
+#define mmSDMA4_PAGE_RB_RPTR_BASE_IDX 1
+#define mmSDMA4_PAGE_RB_RPTR_HI 0x00dc
+#define mmSDMA4_PAGE_RB_RPTR_HI_BASE_IDX 1
+#define mmSDMA4_PAGE_RB_WPTR 0x00dd
+#define mmSDMA4_PAGE_RB_WPTR_BASE_IDX 1
+#define mmSDMA4_PAGE_RB_WPTR_HI 0x00de
+#define mmSDMA4_PAGE_RB_WPTR_HI_BASE_IDX 1
+#define mmSDMA4_PAGE_RB_WPTR_POLL_CNTL 0x00df
+#define mmSDMA4_PAGE_RB_WPTR_POLL_CNTL_BASE_IDX 1
+#define mmSDMA4_PAGE_RB_RPTR_ADDR_HI 0x00e0
+#define mmSDMA4_PAGE_RB_RPTR_ADDR_HI_BASE_IDX 1
+#define mmSDMA4_PAGE_RB_RPTR_ADDR_LO 0x00e1
+#define mmSDMA4_PAGE_RB_RPTR_ADDR_LO_BASE_IDX 1
+#define mmSDMA4_PAGE_IB_CNTL 0x00e2
+#define mmSDMA4_PAGE_IB_CNTL_BASE_IDX 1
+#define mmSDMA4_PAGE_IB_RPTR 0x00e3
+#define mmSDMA4_PAGE_IB_RPTR_BASE_IDX 1
+#define mmSDMA4_PAGE_IB_OFFSET 0x00e4
+#define mmSDMA4_PAGE_IB_OFFSET_BASE_IDX 1
+#define mmSDMA4_PAGE_IB_BASE_LO 0x00e5
+#define mmSDMA4_PAGE_IB_BASE_LO_BASE_IDX 1
+#define mmSDMA4_PAGE_IB_BASE_HI 0x00e6
+#define mmSDMA4_PAGE_IB_BASE_HI_BASE_IDX 1
+#define mmSDMA4_PAGE_IB_SIZE 0x00e7
+#define mmSDMA4_PAGE_IB_SIZE_BASE_IDX 1
+#define mmSDMA4_PAGE_SKIP_CNTL 0x00e8
+#define mmSDMA4_PAGE_SKIP_CNTL_BASE_IDX 1
+#define mmSDMA4_PAGE_CONTEXT_STATUS 0x00e9
+#define mmSDMA4_PAGE_CONTEXT_STATUS_BASE_IDX 1
+#define mmSDMA4_PAGE_DOORBELL 0x00ea
+#define mmSDMA4_PAGE_DOORBELL_BASE_IDX 1
+#define mmSDMA4_PAGE_STATUS 0x0100
+#define mmSDMA4_PAGE_STATUS_BASE_IDX 1
+#define mmSDMA4_PAGE_DOORBELL_LOG 0x0101
+#define mmSDMA4_PAGE_DOORBELL_LOG_BASE_IDX 1
+#define mmSDMA4_PAGE_WATERMARK 0x0102
+#define mmSDMA4_PAGE_WATERMARK_BASE_IDX 1
+#define mmSDMA4_PAGE_DOORBELL_OFFSET 0x0103
+#define mmSDMA4_PAGE_DOORBELL_OFFSET_BASE_IDX 1
+#define mmSDMA4_PAGE_CSA_ADDR_LO 0x0104
+#define mmSDMA4_PAGE_CSA_ADDR_LO_BASE_IDX 1
+#define mmSDMA4_PAGE_CSA_ADDR_HI 0x0105
+#define mmSDMA4_PAGE_CSA_ADDR_HI_BASE_IDX 1
+#define mmSDMA4_PAGE_IB_SUB_REMAIN 0x0107
+#define mmSDMA4_PAGE_IB_SUB_REMAIN_BASE_IDX 1
+#define mmSDMA4_PAGE_PREEMPT 0x0108
+#define mmSDMA4_PAGE_PREEMPT_BASE_IDX 1
+#define mmSDMA4_PAGE_DUMMY_REG 0x0109
+#define mmSDMA4_PAGE_DUMMY_REG_BASE_IDX 1
+#define mmSDMA4_PAGE_RB_WPTR_POLL_ADDR_HI 0x010a
+#define mmSDMA4_PAGE_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1
+#define mmSDMA4_PAGE_RB_WPTR_POLL_ADDR_LO 0x010b
+#define mmSDMA4_PAGE_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1
+#define mmSDMA4_PAGE_RB_AQL_CNTL 0x010c
+#define mmSDMA4_PAGE_RB_AQL_CNTL_BASE_IDX 1
+#define mmSDMA4_PAGE_MINOR_PTR_UPDATE 0x010d
+#define mmSDMA4_PAGE_MINOR_PTR_UPDATE_BASE_IDX 1
+#define mmSDMA4_PAGE_MIDCMD_DATA0 0x0118
+#define mmSDMA4_PAGE_MIDCMD_DATA0_BASE_IDX 1
+#define mmSDMA4_PAGE_MIDCMD_DATA1 0x0119
+#define mmSDMA4_PAGE_MIDCMD_DATA1_BASE_IDX 1
+#define mmSDMA4_PAGE_MIDCMD_DATA2 0x011a
+#define mmSDMA4_PAGE_MIDCMD_DATA2_BASE_IDX 1
+#define mmSDMA4_PAGE_MIDCMD_DATA3 0x011b
+#define mmSDMA4_PAGE_MIDCMD_DATA3_BASE_IDX 1
+#define mmSDMA4_PAGE_MIDCMD_DATA4 0x011c
+#define mmSDMA4_PAGE_MIDCMD_DATA4_BASE_IDX 1
+#define mmSDMA4_PAGE_MIDCMD_DATA5 0x011d
+#define mmSDMA4_PAGE_MIDCMD_DATA5_BASE_IDX 1
+#define mmSDMA4_PAGE_MIDCMD_DATA6 0x011e
+#define mmSDMA4_PAGE_MIDCMD_DATA6_BASE_IDX 1
+#define mmSDMA4_PAGE_MIDCMD_DATA7 0x011f
+#define mmSDMA4_PAGE_MIDCMD_DATA7_BASE_IDX 1
+#define mmSDMA4_PAGE_MIDCMD_DATA8 0x0120
+#define mmSDMA4_PAGE_MIDCMD_DATA8_BASE_IDX 1
+#define mmSDMA4_PAGE_MIDCMD_CNTL 0x0121
+#define mmSDMA4_PAGE_MIDCMD_CNTL_BASE_IDX 1
+#define mmSDMA4_RLC0_RB_CNTL 0x0130
+#define mmSDMA4_RLC0_RB_CNTL_BASE_IDX 1
+#define mmSDMA4_RLC0_RB_BASE 0x0131
+#define mmSDMA4_RLC0_RB_BASE_BASE_IDX 1
+#define mmSDMA4_RLC0_RB_BASE_HI 0x0132
+#define mmSDMA4_RLC0_RB_BASE_HI_BASE_IDX 1
+#define mmSDMA4_RLC0_RB_RPTR 0x0133
+#define mmSDMA4_RLC0_RB_RPTR_BASE_IDX 1
+#define mmSDMA4_RLC0_RB_RPTR_HI 0x0134
+#define mmSDMA4_RLC0_RB_RPTR_HI_BASE_IDX 1
+#define mmSDMA4_RLC0_RB_WPTR 0x0135
+#define mmSDMA4_RLC0_RB_WPTR_BASE_IDX 1
+#define mmSDMA4_RLC0_RB_WPTR_HI 0x0136
+#define mmSDMA4_RLC0_RB_WPTR_HI_BASE_IDX 1
+#define mmSDMA4_RLC0_RB_WPTR_POLL_CNTL 0x0137
+#define mmSDMA4_RLC0_RB_WPTR_POLL_CNTL_BASE_IDX 1
+#define mmSDMA4_RLC0_RB_RPTR_ADDR_HI 0x0138
+#define mmSDMA4_RLC0_RB_RPTR_ADDR_HI_BASE_IDX 1
+#define mmSDMA4_RLC0_RB_RPTR_ADDR_LO 0x0139
+#define mmSDMA4_RLC0_RB_RPTR_ADDR_LO_BASE_IDX 1
+#define mmSDMA4_RLC0_IB_CNTL 0x013a
+#define mmSDMA4_RLC0_IB_CNTL_BASE_IDX 1
+#define mmSDMA4_RLC0_IB_RPTR 0x013b
+#define mmSDMA4_RLC0_IB_RPTR_BASE_IDX 1
+#define mmSDMA4_RLC0_IB_OFFSET 0x013c
+#define mmSDMA4_RLC0_IB_OFFSET_BASE_IDX 1
+#define mmSDMA4_RLC0_IB_BASE_LO 0x013d
+#define mmSDMA4_RLC0_IB_BASE_LO_BASE_IDX 1
+#define mmSDMA4_RLC0_IB_BASE_HI 0x013e
+#define mmSDMA4_RLC0_IB_BASE_HI_BASE_IDX 1
+#define mmSDMA4_RLC0_IB_SIZE 0x013f
+#define mmSDMA4_RLC0_IB_SIZE_BASE_IDX 1
+#define mmSDMA4_RLC0_SKIP_CNTL 0x0140
+#define mmSDMA4_RLC0_SKIP_CNTL_BASE_IDX 1
+#define mmSDMA4_RLC0_CONTEXT_STATUS 0x0141
+#define mmSDMA4_RLC0_CONTEXT_STATUS_BASE_IDX 1
+#define mmSDMA4_RLC0_DOORBELL 0x0142
+#define mmSDMA4_RLC0_DOORBELL_BASE_IDX 1
+#define mmSDMA4_RLC0_STATUS 0x0158
+#define mmSDMA4_RLC0_STATUS_BASE_IDX 1
+#define mmSDMA4_RLC0_DOORBELL_LOG 0x0159
+#define mmSDMA4_RLC0_DOORBELL_LOG_BASE_IDX 1
+#define mmSDMA4_RLC0_WATERMARK 0x015a
+#define mmSDMA4_RLC0_WATERMARK_BASE_IDX 1
+#define mmSDMA4_RLC0_DOORBELL_OFFSET 0x015b
+#define mmSDMA4_RLC0_DOORBELL_OFFSET_BASE_IDX 1
+#define mmSDMA4_RLC0_CSA_ADDR_LO 0x015c
+#define mmSDMA4_RLC0_CSA_ADDR_LO_BASE_IDX 1
+#define mmSDMA4_RLC0_CSA_ADDR_HI 0x015d
+#define mmSDMA4_RLC0_CSA_ADDR_HI_BASE_IDX 1
+#define mmSDMA4_RLC0_IB_SUB_REMAIN 0x015f
+#define mmSDMA4_RLC0_IB_SUB_REMAIN_BASE_IDX 1
+#define mmSDMA4_RLC0_PREEMPT 0x0160
+#define mmSDMA4_RLC0_PREEMPT_BASE_IDX 1
+#define mmSDMA4_RLC0_DUMMY_REG 0x0161
+#define mmSDMA4_RLC0_DUMMY_REG_BASE_IDX 1
+#define mmSDMA4_RLC0_RB_WPTR_POLL_ADDR_HI 0x0162
+#define mmSDMA4_RLC0_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1
+#define mmSDMA4_RLC0_RB_WPTR_POLL_ADDR_LO 0x0163
+#define mmSDMA4_RLC0_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1
+#define mmSDMA4_RLC0_RB_AQL_CNTL 0x0164
+#define mmSDMA4_RLC0_RB_AQL_CNTL_BASE_IDX 1
+#define mmSDMA4_RLC0_MINOR_PTR_UPDATE 0x0165
+#define mmSDMA4_RLC0_MINOR_PTR_UPDATE_BASE_IDX 1
+#define mmSDMA4_RLC0_MIDCMD_DATA0 0x0170
+#define mmSDMA4_RLC0_MIDCMD_DATA0_BASE_IDX 1
+#define mmSDMA4_RLC0_MIDCMD_DATA1 0x0171
+#define mmSDMA4_RLC0_MIDCMD_DATA1_BASE_IDX 1
+#define mmSDMA4_RLC0_MIDCMD_DATA2 0x0172
+#define mmSDMA4_RLC0_MIDCMD_DATA2_BASE_IDX 1
+#define mmSDMA4_RLC0_MIDCMD_DATA3 0x0173
+#define mmSDMA4_RLC0_MIDCMD_DATA3_BASE_IDX 1
+#define mmSDMA4_RLC0_MIDCMD_DATA4 0x0174
+#define mmSDMA4_RLC0_MIDCMD_DATA4_BASE_IDX 1
+#define mmSDMA4_RLC0_MIDCMD_DATA5 0x0175
+#define mmSDMA4_RLC0_MIDCMD_DATA5_BASE_IDX 1
+#define mmSDMA4_RLC0_MIDCMD_DATA6 0x0176
+#define mmSDMA4_RLC0_MIDCMD_DATA6_BASE_IDX 1
+#define mmSDMA4_RLC0_MIDCMD_DATA7 0x0177
+#define mmSDMA4_RLC0_MIDCMD_DATA7_BASE_IDX 1
+#define mmSDMA4_RLC0_MIDCMD_DATA8 0x0178
+#define mmSDMA4_RLC0_MIDCMD_DATA8_BASE_IDX 1
+#define mmSDMA4_RLC0_MIDCMD_CNTL 0x0179
+#define mmSDMA4_RLC0_MIDCMD_CNTL_BASE_IDX 1
+#define mmSDMA4_RLC1_RB_CNTL 0x0188
+#define mmSDMA4_RLC1_RB_CNTL_BASE_IDX 1
+#define mmSDMA4_RLC1_RB_BASE 0x0189
+#define mmSDMA4_RLC1_RB_BASE_BASE_IDX 1
+#define mmSDMA4_RLC1_RB_BASE_HI 0x018a
+#define mmSDMA4_RLC1_RB_BASE_HI_BASE_IDX 1
+#define mmSDMA4_RLC1_RB_RPTR 0x018b
+#define mmSDMA4_RLC1_RB_RPTR_BASE_IDX 1
+#define mmSDMA4_RLC1_RB_RPTR_HI 0x018c
+#define mmSDMA4_RLC1_RB_RPTR_HI_BASE_IDX 1
+#define mmSDMA4_RLC1_RB_WPTR 0x018d
+#define mmSDMA4_RLC1_RB_WPTR_BASE_IDX 1
+#define mmSDMA4_RLC1_RB_WPTR_HI 0x018e
+#define mmSDMA4_RLC1_RB_WPTR_HI_BASE_IDX 1
+#define mmSDMA4_RLC1_RB_WPTR_POLL_CNTL 0x018f
+#define mmSDMA4_RLC1_RB_WPTR_POLL_CNTL_BASE_IDX 1
+#define mmSDMA4_RLC1_RB_RPTR_ADDR_HI 0x0190
+#define mmSDMA4_RLC1_RB_RPTR_ADDR_HI_BASE_IDX 1
+#define mmSDMA4_RLC1_RB_RPTR_ADDR_LO 0x0191
+#define mmSDMA4_RLC1_RB_RPTR_ADDR_LO_BASE_IDX 1
+#define mmSDMA4_RLC1_IB_CNTL 0x0192
+#define mmSDMA4_RLC1_IB_CNTL_BASE_IDX 1
+#define mmSDMA4_RLC1_IB_RPTR 0x0193
+#define mmSDMA4_RLC1_IB_RPTR_BASE_IDX 1
+#define mmSDMA4_RLC1_IB_OFFSET 0x0194
+#define mmSDMA4_RLC1_IB_OFFSET_BASE_IDX 1
+#define mmSDMA4_RLC1_IB_BASE_LO 0x0195
+#define mmSDMA4_RLC1_IB_BASE_LO_BASE_IDX 1
+#define mmSDMA4_RLC1_IB_BASE_HI 0x0196
+#define mmSDMA4_RLC1_IB_BASE_HI_BASE_IDX 1
+#define mmSDMA4_RLC1_IB_SIZE 0x0197
+#define mmSDMA4_RLC1_IB_SIZE_BASE_IDX 1
+#define mmSDMA4_RLC1_SKIP_CNTL 0x0198
+#define mmSDMA4_RLC1_SKIP_CNTL_BASE_IDX 1
+#define mmSDMA4_RLC1_CONTEXT_STATUS 0x0199
+#define mmSDMA4_RLC1_CONTEXT_STATUS_BASE_IDX 1
+#define mmSDMA4_RLC1_DOORBELL 0x019a
+#define mmSDMA4_RLC1_DOORBELL_BASE_IDX 1
+#define mmSDMA4_RLC1_STATUS 0x01b0
+#define mmSDMA4_RLC1_STATUS_BASE_IDX 1
+#define mmSDMA4_RLC1_DOORBELL_LOG 0x01b1
+#define mmSDMA4_RLC1_DOORBELL_LOG_BASE_IDX 1
+#define mmSDMA4_RLC1_WATERMARK 0x01b2
+#define mmSDMA4_RLC1_WATERMARK_BASE_IDX 1
+#define mmSDMA4_RLC1_DOORBELL_OFFSET 0x01b3
+#define mmSDMA4_RLC1_DOORBELL_OFFSET_BASE_IDX 1
+#define mmSDMA4_RLC1_CSA_ADDR_LO 0x01b4
+#define mmSDMA4_RLC1_CSA_ADDR_LO_BASE_IDX 1
+#define mmSDMA4_RLC1_CSA_ADDR_HI 0x01b5
+#define mmSDMA4_RLC1_CSA_ADDR_HI_BASE_IDX 1
+#define mmSDMA4_RLC1_IB_SUB_REMAIN 0x01b7
+#define mmSDMA4_RLC1_IB_SUB_REMAIN_BASE_IDX 1
+#define mmSDMA4_RLC1_PREEMPT 0x01b8
+#define mmSDMA4_RLC1_PREEMPT_BASE_IDX 1
+#define mmSDMA4_RLC1_DUMMY_REG 0x01b9
+#define mmSDMA4_RLC1_DUMMY_REG_BASE_IDX 1
+#define mmSDMA4_RLC1_RB_WPTR_POLL_ADDR_HI 0x01ba
+#define mmSDMA4_RLC1_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1
+#define mmSDMA4_RLC1_RB_WPTR_POLL_ADDR_LO 0x01bb
+#define mmSDMA4_RLC1_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1
+#define mmSDMA4_RLC1_RB_AQL_CNTL 0x01bc
+#define mmSDMA4_RLC1_RB_AQL_CNTL_BASE_IDX 1
+#define mmSDMA4_RLC1_MINOR_PTR_UPDATE 0x01bd
+#define mmSDMA4_RLC1_MINOR_PTR_UPDATE_BASE_IDX 1
+#define mmSDMA4_RLC1_MIDCMD_DATA0 0x01c8
+#define mmSDMA4_RLC1_MIDCMD_DATA0_BASE_IDX 1
+#define mmSDMA4_RLC1_MIDCMD_DATA1 0x01c9
+#define mmSDMA4_RLC1_MIDCMD_DATA1_BASE_IDX 1
+#define mmSDMA4_RLC1_MIDCMD_DATA2 0x01ca
+#define mmSDMA4_RLC1_MIDCMD_DATA2_BASE_IDX 1
+#define mmSDMA4_RLC1_MIDCMD_DATA3 0x01cb
+#define mmSDMA4_RLC1_MIDCMD_DATA3_BASE_IDX 1
+#define mmSDMA4_RLC1_MIDCMD_DATA4 0x01cc
+#define mmSDMA4_RLC1_MIDCMD_DATA4_BASE_IDX 1
+#define mmSDMA4_RLC1_MIDCMD_DATA5 0x01cd
+#define mmSDMA4_RLC1_MIDCMD_DATA5_BASE_IDX 1
+#define mmSDMA4_RLC1_MIDCMD_DATA6 0x01ce
+#define mmSDMA4_RLC1_MIDCMD_DATA6_BASE_IDX 1
+#define mmSDMA4_RLC1_MIDCMD_DATA7 0x01cf
+#define mmSDMA4_RLC1_MIDCMD_DATA7_BASE_IDX 1
+#define mmSDMA4_RLC1_MIDCMD_DATA8 0x01d0
+#define mmSDMA4_RLC1_MIDCMD_DATA8_BASE_IDX 1
+#define mmSDMA4_RLC1_MIDCMD_CNTL 0x01d1
+#define mmSDMA4_RLC1_MIDCMD_CNTL_BASE_IDX 1
+#define mmSDMA4_RLC2_RB_CNTL 0x01e0
+#define mmSDMA4_RLC2_RB_CNTL_BASE_IDX 1
+#define mmSDMA4_RLC2_RB_BASE 0x01e1
+#define mmSDMA4_RLC2_RB_BASE_BASE_IDX 1
+#define mmSDMA4_RLC2_RB_BASE_HI 0x01e2
+#define mmSDMA4_RLC2_RB_BASE_HI_BASE_IDX 1
+#define mmSDMA4_RLC2_RB_RPTR 0x01e3
+#define mmSDMA4_RLC2_RB_RPTR_BASE_IDX 1
+#define mmSDMA4_RLC2_RB_RPTR_HI 0x01e4
+#define mmSDMA4_RLC2_RB_RPTR_HI_BASE_IDX 1
+#define mmSDMA4_RLC2_RB_WPTR 0x01e5
+#define mmSDMA4_RLC2_RB_WPTR_BASE_IDX 1
+#define mmSDMA4_RLC2_RB_WPTR_HI 0x01e6
+#define mmSDMA4_RLC2_RB_WPTR_HI_BASE_IDX 1
+#define mmSDMA4_RLC2_RB_WPTR_POLL_CNTL 0x01e7
+#define mmSDMA4_RLC2_RB_WPTR_POLL_CNTL_BASE_IDX 1
+#define mmSDMA4_RLC2_RB_RPTR_ADDR_HI 0x01e8
+#define mmSDMA4_RLC2_RB_RPTR_ADDR_HI_BASE_IDX 1
+#define mmSDMA4_RLC2_RB_RPTR_ADDR_LO 0x01e9
+#define mmSDMA4_RLC2_RB_RPTR_ADDR_LO_BASE_IDX 1
+#define mmSDMA4_RLC2_IB_CNTL 0x01ea
+#define mmSDMA4_RLC2_IB_CNTL_BASE_IDX 1
+#define mmSDMA4_RLC2_IB_RPTR 0x01eb
+#define mmSDMA4_RLC2_IB_RPTR_BASE_IDX 1
+#define mmSDMA4_RLC2_IB_OFFSET 0x01ec
+#define mmSDMA4_RLC2_IB_OFFSET_BASE_IDX 1
+#define mmSDMA4_RLC2_IB_BASE_LO 0x01ed
+#define mmSDMA4_RLC2_IB_BASE_LO_BASE_IDX 1
+#define mmSDMA4_RLC2_IB_BASE_HI 0x01ee
+#define mmSDMA4_RLC2_IB_BASE_HI_BASE_IDX 1
+#define mmSDMA4_RLC2_IB_SIZE 0x01ef
+#define mmSDMA4_RLC2_IB_SIZE_BASE_IDX 1
+#define mmSDMA4_RLC2_SKIP_CNTL 0x01f0
+#define mmSDMA4_RLC2_SKIP_CNTL_BASE_IDX 1
+#define mmSDMA4_RLC2_CONTEXT_STATUS 0x01f1
+#define mmSDMA4_RLC2_CONTEXT_STATUS_BASE_IDX 1
+#define mmSDMA4_RLC2_DOORBELL 0x01f2
+#define mmSDMA4_RLC2_DOORBELL_BASE_IDX 1
+#define mmSDMA4_RLC2_STATUS 0x0208
+#define mmSDMA4_RLC2_STATUS_BASE_IDX 1
+#define mmSDMA4_RLC2_DOORBELL_LOG 0x0209
+#define mmSDMA4_RLC2_DOORBELL_LOG_BASE_IDX 1
+#define mmSDMA4_RLC2_WATERMARK 0x020a
+#define mmSDMA4_RLC2_WATERMARK_BASE_IDX 1
+#define mmSDMA4_RLC2_DOORBELL_OFFSET 0x020b
+#define mmSDMA4_RLC2_DOORBELL_OFFSET_BASE_IDX 1
+#define mmSDMA4_RLC2_CSA_ADDR_LO 0x020c
+#define mmSDMA4_RLC2_CSA_ADDR_LO_BASE_IDX 1
+#define mmSDMA4_RLC2_CSA_ADDR_HI 0x020d
+#define mmSDMA4_RLC2_CSA_ADDR_HI_BASE_IDX 1
+#define mmSDMA4_RLC2_IB_SUB_REMAIN 0x020f
+#define mmSDMA4_RLC2_IB_SUB_REMAIN_BASE_IDX 1
+#define mmSDMA4_RLC2_PREEMPT 0x0210
+#define mmSDMA4_RLC2_PREEMPT_BASE_IDX 1
+#define mmSDMA4_RLC2_DUMMY_REG 0x0211
+#define mmSDMA4_RLC2_DUMMY_REG_BASE_IDX 1
+#define mmSDMA4_RLC2_RB_WPTR_POLL_ADDR_HI 0x0212
+#define mmSDMA4_RLC2_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1
+#define mmSDMA4_RLC2_RB_WPTR_POLL_ADDR_LO 0x0213
+#define mmSDMA4_RLC2_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1
+#define mmSDMA4_RLC2_RB_AQL_CNTL 0x0214
+#define mmSDMA4_RLC2_RB_AQL_CNTL_BASE_IDX 1
+#define mmSDMA4_RLC2_MINOR_PTR_UPDATE 0x0215
+#define mmSDMA4_RLC2_MINOR_PTR_UPDATE_BASE_IDX 1
+#define mmSDMA4_RLC2_MIDCMD_DATA0 0x0220
+#define mmSDMA4_RLC2_MIDCMD_DATA0_BASE_IDX 1
+#define mmSDMA4_RLC2_MIDCMD_DATA1 0x0221
+#define mmSDMA4_RLC2_MIDCMD_DATA1_BASE_IDX 1
+#define mmSDMA4_RLC2_MIDCMD_DATA2 0x0222
+#define mmSDMA4_RLC2_MIDCMD_DATA2_BASE_IDX 1
+#define mmSDMA4_RLC2_MIDCMD_DATA3 0x0223
+#define mmSDMA4_RLC2_MIDCMD_DATA3_BASE_IDX 1
+#define mmSDMA4_RLC2_MIDCMD_DATA4 0x0224
+#define mmSDMA4_RLC2_MIDCMD_DATA4_BASE_IDX 1
+#define mmSDMA4_RLC2_MIDCMD_DATA5 0x0225
+#define mmSDMA4_RLC2_MIDCMD_DATA5_BASE_IDX 1
+#define mmSDMA4_RLC2_MIDCMD_DATA6 0x0226
+#define mmSDMA4_RLC2_MIDCMD_DATA6_BASE_IDX 1
+#define mmSDMA4_RLC2_MIDCMD_DATA7 0x0227
+#define mmSDMA4_RLC2_MIDCMD_DATA7_BASE_IDX 1
+#define mmSDMA4_RLC2_MIDCMD_DATA8 0x0228
+#define mmSDMA4_RLC2_MIDCMD_DATA8_BASE_IDX 1
+#define mmSDMA4_RLC2_MIDCMD_CNTL 0x0229
+#define mmSDMA4_RLC2_MIDCMD_CNTL_BASE_IDX 1
+#define mmSDMA4_RLC3_RB_CNTL 0x0238
+#define mmSDMA4_RLC3_RB_CNTL_BASE_IDX 1
+#define mmSDMA4_RLC3_RB_BASE 0x0239
+#define mmSDMA4_RLC3_RB_BASE_BASE_IDX 1
+#define mmSDMA4_RLC3_RB_BASE_HI 0x023a
+#define mmSDMA4_RLC3_RB_BASE_HI_BASE_IDX 1
+#define mmSDMA4_RLC3_RB_RPTR 0x023b
+#define mmSDMA4_RLC3_RB_RPTR_BASE_IDX 1
+#define mmSDMA4_RLC3_RB_RPTR_HI 0x023c
+#define mmSDMA4_RLC3_RB_RPTR_HI_BASE_IDX 1
+#define mmSDMA4_RLC3_RB_WPTR 0x023d
+#define mmSDMA4_RLC3_RB_WPTR_BASE_IDX 1
+#define mmSDMA4_RLC3_RB_WPTR_HI 0x023e
+#define mmSDMA4_RLC3_RB_WPTR_HI_BASE_IDX 1
+#define mmSDMA4_RLC3_RB_WPTR_POLL_CNTL 0x023f
+#define mmSDMA4_RLC3_RB_WPTR_POLL_CNTL_BASE_IDX 1
+#define mmSDMA4_RLC3_RB_RPTR_ADDR_HI 0x0240
+#define mmSDMA4_RLC3_RB_RPTR_ADDR_HI_BASE_IDX 1
+#define mmSDMA4_RLC3_RB_RPTR_ADDR_LO 0x0241
+#define mmSDMA4_RLC3_RB_RPTR_ADDR_LO_BASE_IDX 1
+#define mmSDMA4_RLC3_IB_CNTL 0x0242
+#define mmSDMA4_RLC3_IB_CNTL_BASE_IDX 1
+#define mmSDMA4_RLC3_IB_RPTR 0x0243
+#define mmSDMA4_RLC3_IB_RPTR_BASE_IDX 1
+#define mmSDMA4_RLC3_IB_OFFSET 0x0244
+#define mmSDMA4_RLC3_IB_OFFSET_BASE_IDX 1
+#define mmSDMA4_RLC3_IB_BASE_LO 0x0245
+#define mmSDMA4_RLC3_IB_BASE_LO_BASE_IDX 1
+#define mmSDMA4_RLC3_IB_BASE_HI 0x0246
+#define mmSDMA4_RLC3_IB_BASE_HI_BASE_IDX 1
+#define mmSDMA4_RLC3_IB_SIZE 0x0247
+#define mmSDMA4_RLC3_IB_SIZE_BASE_IDX 1
+#define mmSDMA4_RLC3_SKIP_CNTL 0x0248
+#define mmSDMA4_RLC3_SKIP_CNTL_BASE_IDX 1
+#define mmSDMA4_RLC3_CONTEXT_STATUS 0x0249
+#define mmSDMA4_RLC3_CONTEXT_STATUS_BASE_IDX 1
+#define mmSDMA4_RLC3_DOORBELL 0x024a
+#define mmSDMA4_RLC3_DOORBELL_BASE_IDX 1
+#define mmSDMA4_RLC3_STATUS 0x0260
+#define mmSDMA4_RLC3_STATUS_BASE_IDX 1
+#define mmSDMA4_RLC3_DOORBELL_LOG 0x0261
+#define mmSDMA4_RLC3_DOORBELL_LOG_BASE_IDX 1
+#define mmSDMA4_RLC3_WATERMARK 0x0262
+#define mmSDMA4_RLC3_WATERMARK_BASE_IDX 1
+#define mmSDMA4_RLC3_DOORBELL_OFFSET 0x0263
+#define mmSDMA4_RLC3_DOORBELL_OFFSET_BASE_IDX 1
+#define mmSDMA4_RLC3_CSA_ADDR_LO 0x0264
+#define mmSDMA4_RLC3_CSA_ADDR_LO_BASE_IDX 1
+#define mmSDMA4_RLC3_CSA_ADDR_HI 0x0265
+#define mmSDMA4_RLC3_CSA_ADDR_HI_BASE_IDX 1
+#define mmSDMA4_RLC3_IB_SUB_REMAIN 0x0267
+#define mmSDMA4_RLC3_IB_SUB_REMAIN_BASE_IDX 1
+#define mmSDMA4_RLC3_PREEMPT 0x0268
+#define mmSDMA4_RLC3_PREEMPT_BASE_IDX 1
+#define mmSDMA4_RLC3_DUMMY_REG 0x0269
+#define mmSDMA4_RLC3_DUMMY_REG_BASE_IDX 1
+#define mmSDMA4_RLC3_RB_WPTR_POLL_ADDR_HI 0x026a
+#define mmSDMA4_RLC3_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1
+#define mmSDMA4_RLC3_RB_WPTR_POLL_ADDR_LO 0x026b
+#define mmSDMA4_RLC3_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1
+#define mmSDMA4_RLC3_RB_AQL_CNTL 0x026c
+#define mmSDMA4_RLC3_RB_AQL_CNTL_BASE_IDX 1
+#define mmSDMA4_RLC3_MINOR_PTR_UPDATE 0x026d
+#define mmSDMA4_RLC3_MINOR_PTR_UPDATE_BASE_IDX 1
+#define mmSDMA4_RLC3_MIDCMD_DATA0 0x0278
+#define mmSDMA4_RLC3_MIDCMD_DATA0_BASE_IDX 1
+#define mmSDMA4_RLC3_MIDCMD_DATA1 0x0279
+#define mmSDMA4_RLC3_MIDCMD_DATA1_BASE_IDX 1
+#define mmSDMA4_RLC3_MIDCMD_DATA2 0x027a
+#define mmSDMA4_RLC3_MIDCMD_DATA2_BASE_IDX 1
+#define mmSDMA4_RLC3_MIDCMD_DATA3 0x027b
+#define mmSDMA4_RLC3_MIDCMD_DATA3_BASE_IDX 1
+#define mmSDMA4_RLC3_MIDCMD_DATA4 0x027c
+#define mmSDMA4_RLC3_MIDCMD_DATA4_BASE_IDX 1
+#define mmSDMA4_RLC3_MIDCMD_DATA5 0x027d
+#define mmSDMA4_RLC3_MIDCMD_DATA5_BASE_IDX 1
+#define mmSDMA4_RLC3_MIDCMD_DATA6 0x027e
+#define mmSDMA4_RLC3_MIDCMD_DATA6_BASE_IDX 1
+#define mmSDMA4_RLC3_MIDCMD_DATA7 0x027f
+#define mmSDMA4_RLC3_MIDCMD_DATA7_BASE_IDX 1
+#define mmSDMA4_RLC3_MIDCMD_DATA8 0x0280
+#define mmSDMA4_RLC3_MIDCMD_DATA8_BASE_IDX 1
+#define mmSDMA4_RLC3_MIDCMD_CNTL 0x0281
+#define mmSDMA4_RLC3_MIDCMD_CNTL_BASE_IDX 1
+#define mmSDMA4_RLC4_RB_CNTL 0x0290
+#define mmSDMA4_RLC4_RB_CNTL_BASE_IDX 1
+#define mmSDMA4_RLC4_RB_BASE 0x0291
+#define mmSDMA4_RLC4_RB_BASE_BASE_IDX 1
+#define mmSDMA4_RLC4_RB_BASE_HI 0x0292
+#define mmSDMA4_RLC4_RB_BASE_HI_BASE_IDX 1
+#define mmSDMA4_RLC4_RB_RPTR 0x0293
+#define mmSDMA4_RLC4_RB_RPTR_BASE_IDX 1
+#define mmSDMA4_RLC4_RB_RPTR_HI 0x0294
+#define mmSDMA4_RLC4_RB_RPTR_HI_BASE_IDX 1
+#define mmSDMA4_RLC4_RB_WPTR 0x0295
+#define mmSDMA4_RLC4_RB_WPTR_BASE_IDX 1
+#define mmSDMA4_RLC4_RB_WPTR_HI 0x0296
+#define mmSDMA4_RLC4_RB_WPTR_HI_BASE_IDX 1
+#define mmSDMA4_RLC4_RB_WPTR_POLL_CNTL 0x0297
+#define mmSDMA4_RLC4_RB_WPTR_POLL_CNTL_BASE_IDX 1
+#define mmSDMA4_RLC4_RB_RPTR_ADDR_HI 0x0298
+#define mmSDMA4_RLC4_RB_RPTR_ADDR_HI_BASE_IDX 1
+#define mmSDMA4_RLC4_RB_RPTR_ADDR_LO 0x0299
+#define mmSDMA4_RLC4_RB_RPTR_ADDR_LO_BASE_IDX 1
+#define mmSDMA4_RLC4_IB_CNTL 0x029a
+#define mmSDMA4_RLC4_IB_CNTL_BASE_IDX 1
+#define mmSDMA4_RLC4_IB_RPTR 0x029b
+#define mmSDMA4_RLC4_IB_RPTR_BASE_IDX 1
+#define mmSDMA4_RLC4_IB_OFFSET 0x029c
+#define mmSDMA4_RLC4_IB_OFFSET_BASE_IDX 1
+#define mmSDMA4_RLC4_IB_BASE_LO 0x029d
+#define mmSDMA4_RLC4_IB_BASE_LO_BASE_IDX 1
+#define mmSDMA4_RLC4_IB_BASE_HI 0x029e
+#define mmSDMA4_RLC4_IB_BASE_HI_BASE_IDX 1
+#define mmSDMA4_RLC4_IB_SIZE 0x029f
+#define mmSDMA4_RLC4_IB_SIZE_BASE_IDX 1
+#define mmSDMA4_RLC4_SKIP_CNTL 0x02a0
+#define mmSDMA4_RLC4_SKIP_CNTL_BASE_IDX 1
+#define mmSDMA4_RLC4_CONTEXT_STATUS 0x02a1
+#define mmSDMA4_RLC4_CONTEXT_STATUS_BASE_IDX 1
+#define mmSDMA4_RLC4_DOORBELL 0x02a2
+#define mmSDMA4_RLC4_DOORBELL_BASE_IDX 1
+#define mmSDMA4_RLC4_STATUS 0x02b8
+#define mmSDMA4_RLC4_STATUS_BASE_IDX 1
+#define mmSDMA4_RLC4_DOORBELL_LOG 0x02b9
+#define mmSDMA4_RLC4_DOORBELL_LOG_BASE_IDX 1
+#define mmSDMA4_RLC4_WATERMARK 0x02ba
+#define mmSDMA4_RLC4_WATERMARK_BASE_IDX 1
+#define mmSDMA4_RLC4_DOORBELL_OFFSET 0x02bb
+#define mmSDMA4_RLC4_DOORBELL_OFFSET_BASE_IDX 1
+#define mmSDMA4_RLC4_CSA_ADDR_LO 0x02bc
+#define mmSDMA4_RLC4_CSA_ADDR_LO_BASE_IDX 1
+#define mmSDMA4_RLC4_CSA_ADDR_HI 0x02bd
+#define mmSDMA4_RLC4_CSA_ADDR_HI_BASE_IDX 1
+#define mmSDMA4_RLC4_IB_SUB_REMAIN 0x02bf
+#define mmSDMA4_RLC4_IB_SUB_REMAIN_BASE_IDX 1
+#define mmSDMA4_RLC4_PREEMPT 0x02c0
+#define mmSDMA4_RLC4_PREEMPT_BASE_IDX 1
+#define mmSDMA4_RLC4_DUMMY_REG 0x02c1
+#define mmSDMA4_RLC4_DUMMY_REG_BASE_IDX 1
+#define mmSDMA4_RLC4_RB_WPTR_POLL_ADDR_HI 0x02c2
+#define mmSDMA4_RLC4_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1
+#define mmSDMA4_RLC4_RB_WPTR_POLL_ADDR_LO 0x02c3
+#define mmSDMA4_RLC4_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1
+#define mmSDMA4_RLC4_RB_AQL_CNTL 0x02c4
+#define mmSDMA4_RLC4_RB_AQL_CNTL_BASE_IDX 1
+#define mmSDMA4_RLC4_MINOR_PTR_UPDATE 0x02c5
+#define mmSDMA4_RLC4_MINOR_PTR_UPDATE_BASE_IDX 1
+#define mmSDMA4_RLC4_MIDCMD_DATA0 0x02d0
+#define mmSDMA4_RLC4_MIDCMD_DATA0_BASE_IDX 1
+#define mmSDMA4_RLC4_MIDCMD_DATA1 0x02d1
+#define mmSDMA4_RLC4_MIDCMD_DATA1_BASE_IDX 1
+#define mmSDMA4_RLC4_MIDCMD_DATA2 0x02d2
+#define mmSDMA4_RLC4_MIDCMD_DATA2_BASE_IDX 1
+#define mmSDMA4_RLC4_MIDCMD_DATA3 0x02d3
+#define mmSDMA4_RLC4_MIDCMD_DATA3_BASE_IDX 1
+#define mmSDMA4_RLC4_MIDCMD_DATA4 0x02d4
+#define mmSDMA4_RLC4_MIDCMD_DATA4_BASE_IDX 1
+#define mmSDMA4_RLC4_MIDCMD_DATA5 0x02d5
+#define mmSDMA4_RLC4_MIDCMD_DATA5_BASE_IDX 1
+#define mmSDMA4_RLC4_MIDCMD_DATA6 0x02d6
+#define mmSDMA4_RLC4_MIDCMD_DATA6_BASE_IDX 1
+#define mmSDMA4_RLC4_MIDCMD_DATA7 0x02d7
+#define mmSDMA4_RLC4_MIDCMD_DATA7_BASE_IDX 1
+#define mmSDMA4_RLC4_MIDCMD_DATA8 0x02d8
+#define mmSDMA4_RLC4_MIDCMD_DATA8_BASE_IDX 1
+#define mmSDMA4_RLC4_MIDCMD_CNTL 0x02d9
+#define mmSDMA4_RLC4_MIDCMD_CNTL_BASE_IDX 1
+#define mmSDMA4_RLC5_RB_CNTL 0x02e8
+#define mmSDMA4_RLC5_RB_CNTL_BASE_IDX 1
+#define mmSDMA4_RLC5_RB_BASE 0x02e9
+#define mmSDMA4_RLC5_RB_BASE_BASE_IDX 1
+#define mmSDMA4_RLC5_RB_BASE_HI 0x02ea
+#define mmSDMA4_RLC5_RB_BASE_HI_BASE_IDX 1
+#define mmSDMA4_RLC5_RB_RPTR 0x02eb
+#define mmSDMA4_RLC5_RB_RPTR_BASE_IDX 1
+#define mmSDMA4_RLC5_RB_RPTR_HI 0x02ec
+#define mmSDMA4_RLC5_RB_RPTR_HI_BASE_IDX 1
+#define mmSDMA4_RLC5_RB_WPTR 0x02ed
+#define mmSDMA4_RLC5_RB_WPTR_BASE_IDX 1
+#define mmSDMA4_RLC5_RB_WPTR_HI 0x02ee
+#define mmSDMA4_RLC5_RB_WPTR_HI_BASE_IDX 1
+#define mmSDMA4_RLC5_RB_WPTR_POLL_CNTL 0x02ef
+#define mmSDMA4_RLC5_RB_WPTR_POLL_CNTL_BASE_IDX 1
+#define mmSDMA4_RLC5_RB_RPTR_ADDR_HI 0x02f0
+#define mmSDMA4_RLC5_RB_RPTR_ADDR_HI_BASE_IDX 1
+#define mmSDMA4_RLC5_RB_RPTR_ADDR_LO 0x02f1
+#define mmSDMA4_RLC5_RB_RPTR_ADDR_LO_BASE_IDX 1
+#define mmSDMA4_RLC5_IB_CNTL 0x02f2
+#define mmSDMA4_RLC5_IB_CNTL_BASE_IDX 1
+#define mmSDMA4_RLC5_IB_RPTR 0x02f3
+#define mmSDMA4_RLC5_IB_RPTR_BASE_IDX 1
+#define mmSDMA4_RLC5_IB_OFFSET 0x02f4
+#define mmSDMA4_RLC5_IB_OFFSET_BASE_IDX 1
+#define mmSDMA4_RLC5_IB_BASE_LO 0x02f5
+#define mmSDMA4_RLC5_IB_BASE_LO_BASE_IDX 1
+#define mmSDMA4_RLC5_IB_BASE_HI 0x02f6
+#define mmSDMA4_RLC5_IB_BASE_HI_BASE_IDX 1
+#define mmSDMA4_RLC5_IB_SIZE 0x02f7
+#define mmSDMA4_RLC5_IB_SIZE_BASE_IDX 1
+#define mmSDMA4_RLC5_SKIP_CNTL 0x02f8
+#define mmSDMA4_RLC5_SKIP_CNTL_BASE_IDX 1
+#define mmSDMA4_RLC5_CONTEXT_STATUS 0x02f9
+#define mmSDMA4_RLC5_CONTEXT_STATUS_BASE_IDX 1
+#define mmSDMA4_RLC5_DOORBELL 0x02fa
+#define mmSDMA4_RLC5_DOORBELL_BASE_IDX 1
+#define mmSDMA4_RLC5_STATUS 0x0310
+#define mmSDMA4_RLC5_STATUS_BASE_IDX 1
+#define mmSDMA4_RLC5_DOORBELL_LOG 0x0311
+#define mmSDMA4_RLC5_DOORBELL_LOG_BASE_IDX 1
+#define mmSDMA4_RLC5_WATERMARK 0x0312
+#define mmSDMA4_RLC5_WATERMARK_BASE_IDX 1
+#define mmSDMA4_RLC5_DOORBELL_OFFSET 0x0313
+#define mmSDMA4_RLC5_DOORBELL_OFFSET_BASE_IDX 1
+#define mmSDMA4_RLC5_CSA_ADDR_LO 0x0314
+#define mmSDMA4_RLC5_CSA_ADDR_LO_BASE_IDX 1
+#define mmSDMA4_RLC5_CSA_ADDR_HI 0x0315
+#define mmSDMA4_RLC5_CSA_ADDR_HI_BASE_IDX 1
+#define mmSDMA4_RLC5_IB_SUB_REMAIN 0x0317
+#define mmSDMA4_RLC5_IB_SUB_REMAIN_BASE_IDX 1
+#define mmSDMA4_RLC5_PREEMPT 0x0318
+#define mmSDMA4_RLC5_PREEMPT_BASE_IDX 1
+#define mmSDMA4_RLC5_DUMMY_REG 0x0319
+#define mmSDMA4_RLC5_DUMMY_REG_BASE_IDX 1
+#define mmSDMA4_RLC5_RB_WPTR_POLL_ADDR_HI 0x031a
+#define mmSDMA4_RLC5_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1
+#define mmSDMA4_RLC5_RB_WPTR_POLL_ADDR_LO 0x031b
+#define mmSDMA4_RLC5_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1
+#define mmSDMA4_RLC5_RB_AQL_CNTL 0x031c
+#define mmSDMA4_RLC5_RB_AQL_CNTL_BASE_IDX 1
+#define mmSDMA4_RLC5_MINOR_PTR_UPDATE 0x031d
+#define mmSDMA4_RLC5_MINOR_PTR_UPDATE_BASE_IDX 1
+#define mmSDMA4_RLC5_MIDCMD_DATA0 0x0328
+#define mmSDMA4_RLC5_MIDCMD_DATA0_BASE_IDX 1
+#define mmSDMA4_RLC5_MIDCMD_DATA1 0x0329
+#define mmSDMA4_RLC5_MIDCMD_DATA1_BASE_IDX 1
+#define mmSDMA4_RLC5_MIDCMD_DATA2 0x032a
+#define mmSDMA4_RLC5_MIDCMD_DATA2_BASE_IDX 1
+#define mmSDMA4_RLC5_MIDCMD_DATA3 0x032b
+#define mmSDMA4_RLC5_MIDCMD_DATA3_BASE_IDX 1
+#define mmSDMA4_RLC5_MIDCMD_DATA4 0x032c
+#define mmSDMA4_RLC5_MIDCMD_DATA4_BASE_IDX 1
+#define mmSDMA4_RLC5_MIDCMD_DATA5 0x032d
+#define mmSDMA4_RLC5_MIDCMD_DATA5_BASE_IDX 1
+#define mmSDMA4_RLC5_MIDCMD_DATA6 0x032e
+#define mmSDMA4_RLC5_MIDCMD_DATA6_BASE_IDX 1
+#define mmSDMA4_RLC5_MIDCMD_DATA7 0x032f
+#define mmSDMA4_RLC5_MIDCMD_DATA7_BASE_IDX 1
+#define mmSDMA4_RLC5_MIDCMD_DATA8 0x0330
+#define mmSDMA4_RLC5_MIDCMD_DATA8_BASE_IDX 1
+#define mmSDMA4_RLC5_MIDCMD_CNTL 0x0331
+#define mmSDMA4_RLC5_MIDCMD_CNTL_BASE_IDX 1
+#define mmSDMA4_RLC6_RB_CNTL 0x0340
+#define mmSDMA4_RLC6_RB_CNTL_BASE_IDX 1
+#define mmSDMA4_RLC6_RB_BASE 0x0341
+#define mmSDMA4_RLC6_RB_BASE_BASE_IDX 1
+#define mmSDMA4_RLC6_RB_BASE_HI 0x0342
+#define mmSDMA4_RLC6_RB_BASE_HI_BASE_IDX 1
+#define mmSDMA4_RLC6_RB_RPTR 0x0343
+#define mmSDMA4_RLC6_RB_RPTR_BASE_IDX 1
+#define mmSDMA4_RLC6_RB_RPTR_HI 0x0344
+#define mmSDMA4_RLC6_RB_RPTR_HI_BASE_IDX 1
+#define mmSDMA4_RLC6_RB_WPTR 0x0345
+#define mmSDMA4_RLC6_RB_WPTR_BASE_IDX 1
+#define mmSDMA4_RLC6_RB_WPTR_HI 0x0346
+#define mmSDMA4_RLC6_RB_WPTR_HI_BASE_IDX 1
+#define mmSDMA4_RLC6_RB_WPTR_POLL_CNTL 0x0347
+#define mmSDMA4_RLC6_RB_WPTR_POLL_CNTL_BASE_IDX 1
+#define mmSDMA4_RLC6_RB_RPTR_ADDR_HI 0x0348
+#define mmSDMA4_RLC6_RB_RPTR_ADDR_HI_BASE_IDX 1
+#define mmSDMA4_RLC6_RB_RPTR_ADDR_LO 0x0349
+#define mmSDMA4_RLC6_RB_RPTR_ADDR_LO_BASE_IDX 1
+#define mmSDMA4_RLC6_IB_CNTL 0x034a
+#define mmSDMA4_RLC6_IB_CNTL_BASE_IDX 1
+#define mmSDMA4_RLC6_IB_RPTR 0x034b
+#define mmSDMA4_RLC6_IB_RPTR_BASE_IDX 1
+#define mmSDMA4_RLC6_IB_OFFSET 0x034c
+#define mmSDMA4_RLC6_IB_OFFSET_BASE_IDX 1
+#define mmSDMA4_RLC6_IB_BASE_LO 0x034d
+#define mmSDMA4_RLC6_IB_BASE_LO_BASE_IDX 1
+#define mmSDMA4_RLC6_IB_BASE_HI 0x034e
+#define mmSDMA4_RLC6_IB_BASE_HI_BASE_IDX 1
+#define mmSDMA4_RLC6_IB_SIZE 0x034f
+#define mmSDMA4_RLC6_IB_SIZE_BASE_IDX 1
+#define mmSDMA4_RLC6_SKIP_CNTL 0x0350
+#define mmSDMA4_RLC6_SKIP_CNTL_BASE_IDX 1
+#define mmSDMA4_RLC6_CONTEXT_STATUS 0x0351
+#define mmSDMA4_RLC6_CONTEXT_STATUS_BASE_IDX 1
+#define mmSDMA4_RLC6_DOORBELL 0x0352
+#define mmSDMA4_RLC6_DOORBELL_BASE_IDX 1
+#define mmSDMA4_RLC6_STATUS 0x0368
+#define mmSDMA4_RLC6_STATUS_BASE_IDX 1
+#define mmSDMA4_RLC6_DOORBELL_LOG 0x0369
+#define mmSDMA4_RLC6_DOORBELL_LOG_BASE_IDX 1
+#define mmSDMA4_RLC6_WATERMARK 0x036a
+#define mmSDMA4_RLC6_WATERMARK_BASE_IDX 1
+#define mmSDMA4_RLC6_DOORBELL_OFFSET 0x036b
+#define mmSDMA4_RLC6_DOORBELL_OFFSET_BASE_IDX 1
+#define mmSDMA4_RLC6_CSA_ADDR_LO 0x036c
+#define mmSDMA4_RLC6_CSA_ADDR_LO_BASE_IDX 1
+#define mmSDMA4_RLC6_CSA_ADDR_HI 0x036d
+#define mmSDMA4_RLC6_CSA_ADDR_HI_BASE_IDX 1
+#define mmSDMA4_RLC6_IB_SUB_REMAIN 0x036f
+#define mmSDMA4_RLC6_IB_SUB_REMAIN_BASE_IDX 1
+#define mmSDMA4_RLC6_PREEMPT 0x0370
+#define mmSDMA4_RLC6_PREEMPT_BASE_IDX 1
+#define mmSDMA4_RLC6_DUMMY_REG 0x0371
+#define mmSDMA4_RLC6_DUMMY_REG_BASE_IDX 1
+#define mmSDMA4_RLC6_RB_WPTR_POLL_ADDR_HI 0x0372
+#define mmSDMA4_RLC6_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1
+#define mmSDMA4_RLC6_RB_WPTR_POLL_ADDR_LO 0x0373
+#define mmSDMA4_RLC6_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1
+#define mmSDMA4_RLC6_RB_AQL_CNTL 0x0374
+#define mmSDMA4_RLC6_RB_AQL_CNTL_BASE_IDX 1
+#define mmSDMA4_RLC6_MINOR_PTR_UPDATE 0x0375
+#define mmSDMA4_RLC6_MINOR_PTR_UPDATE_BASE_IDX 1
+#define mmSDMA4_RLC6_MIDCMD_DATA0 0x0380
+#define mmSDMA4_RLC6_MIDCMD_DATA0_BASE_IDX 1
+#define mmSDMA4_RLC6_MIDCMD_DATA1 0x0381
+#define mmSDMA4_RLC6_MIDCMD_DATA1_BASE_IDX 1
+#define mmSDMA4_RLC6_MIDCMD_DATA2 0x0382
+#define mmSDMA4_RLC6_MIDCMD_DATA2_BASE_IDX 1
+#define mmSDMA4_RLC6_MIDCMD_DATA3 0x0383
+#define mmSDMA4_RLC6_MIDCMD_DATA3_BASE_IDX 1
+#define mmSDMA4_RLC6_MIDCMD_DATA4 0x0384
+#define mmSDMA4_RLC6_MIDCMD_DATA4_BASE_IDX 1
+#define mmSDMA4_RLC6_MIDCMD_DATA5 0x0385
+#define mmSDMA4_RLC6_MIDCMD_DATA5_BASE_IDX 1
+#define mmSDMA4_RLC6_MIDCMD_DATA6 0x0386
+#define mmSDMA4_RLC6_MIDCMD_DATA6_BASE_IDX 1
+#define mmSDMA4_RLC6_MIDCMD_DATA7 0x0387
+#define mmSDMA4_RLC6_MIDCMD_DATA7_BASE_IDX 1
+#define mmSDMA4_RLC6_MIDCMD_DATA8 0x0388
+#define mmSDMA4_RLC6_MIDCMD_DATA8_BASE_IDX 1
+#define mmSDMA4_RLC6_MIDCMD_CNTL 0x0389
+#define mmSDMA4_RLC6_MIDCMD_CNTL_BASE_IDX 1
+#define mmSDMA4_RLC7_RB_CNTL 0x0398
+#define mmSDMA4_RLC7_RB_CNTL_BASE_IDX 1
+#define mmSDMA4_RLC7_RB_BASE 0x0399
+#define mmSDMA4_RLC7_RB_BASE_BASE_IDX 1
+#define mmSDMA4_RLC7_RB_BASE_HI 0x039a
+#define mmSDMA4_RLC7_RB_BASE_HI_BASE_IDX 1
+#define mmSDMA4_RLC7_RB_RPTR 0x039b
+#define mmSDMA4_RLC7_RB_RPTR_BASE_IDX 1
+#define mmSDMA4_RLC7_RB_RPTR_HI 0x039c
+#define mmSDMA4_RLC7_RB_RPTR_HI_BASE_IDX 1
+#define mmSDMA4_RLC7_RB_WPTR 0x039d
+#define mmSDMA4_RLC7_RB_WPTR_BASE_IDX 1
+#define mmSDMA4_RLC7_RB_WPTR_HI 0x039e
+#define mmSDMA4_RLC7_RB_WPTR_HI_BASE_IDX 1
+#define mmSDMA4_RLC7_RB_WPTR_POLL_CNTL 0x039f
+#define mmSDMA4_RLC7_RB_WPTR_POLL_CNTL_BASE_IDX 1
+#define mmSDMA4_RLC7_RB_RPTR_ADDR_HI 0x03a0
+#define mmSDMA4_RLC7_RB_RPTR_ADDR_HI_BASE_IDX 1
+#define mmSDMA4_RLC7_RB_RPTR_ADDR_LO 0x03a1
+#define mmSDMA4_RLC7_RB_RPTR_ADDR_LO_BASE_IDX 1
+#define mmSDMA4_RLC7_IB_CNTL 0x03a2
+#define mmSDMA4_RLC7_IB_CNTL_BASE_IDX 1
+#define mmSDMA4_RLC7_IB_RPTR 0x03a3
+#define mmSDMA4_RLC7_IB_RPTR_BASE_IDX 1
+#define mmSDMA4_RLC7_IB_OFFSET 0x03a4
+#define mmSDMA4_RLC7_IB_OFFSET_BASE_IDX 1
+#define mmSDMA4_RLC7_IB_BASE_LO 0x03a5
+#define mmSDMA4_RLC7_IB_BASE_LO_BASE_IDX 1
+#define mmSDMA4_RLC7_IB_BASE_HI 0x03a6
+#define mmSDMA4_RLC7_IB_BASE_HI_BASE_IDX 1
+#define mmSDMA4_RLC7_IB_SIZE 0x03a7
+#define mmSDMA4_RLC7_IB_SIZE_BASE_IDX 1
+#define mmSDMA4_RLC7_SKIP_CNTL 0x03a8
+#define mmSDMA4_RLC7_SKIP_CNTL_BASE_IDX 1
+#define mmSDMA4_RLC7_CONTEXT_STATUS 0x03a9
+#define mmSDMA4_RLC7_CONTEXT_STATUS_BASE_IDX 1
+#define mmSDMA4_RLC7_DOORBELL 0x03aa
+#define mmSDMA4_RLC7_DOORBELL_BASE_IDX 1
+#define mmSDMA4_RLC7_STATUS 0x03c0
+#define mmSDMA4_RLC7_STATUS_BASE_IDX 1
+#define mmSDMA4_RLC7_DOORBELL_LOG 0x03c1
+#define mmSDMA4_RLC7_DOORBELL_LOG_BASE_IDX 1
+#define mmSDMA4_RLC7_WATERMARK 0x03c2
+#define mmSDMA4_RLC7_WATERMARK_BASE_IDX 1
+#define mmSDMA4_RLC7_DOORBELL_OFFSET 0x03c3
+#define mmSDMA4_RLC7_DOORBELL_OFFSET_BASE_IDX 1
+#define mmSDMA4_RLC7_CSA_ADDR_LO 0x03c4
+#define mmSDMA4_RLC7_CSA_ADDR_LO_BASE_IDX 1
+#define mmSDMA4_RLC7_CSA_ADDR_HI 0x03c5
+#define mmSDMA4_RLC7_CSA_ADDR_HI_BASE_IDX 1
+#define mmSDMA4_RLC7_IB_SUB_REMAIN 0x03c7
+#define mmSDMA4_RLC7_IB_SUB_REMAIN_BASE_IDX 1
+#define mmSDMA4_RLC7_PREEMPT 0x03c8
+#define mmSDMA4_RLC7_PREEMPT_BASE_IDX 1
+#define mmSDMA4_RLC7_DUMMY_REG 0x03c9
+#define mmSDMA4_RLC7_DUMMY_REG_BASE_IDX 1
+#define mmSDMA4_RLC7_RB_WPTR_POLL_ADDR_HI 0x03ca
+#define mmSDMA4_RLC7_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1
+#define mmSDMA4_RLC7_RB_WPTR_POLL_ADDR_LO 0x03cb
+#define mmSDMA4_RLC7_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1
+#define mmSDMA4_RLC7_RB_AQL_CNTL 0x03cc
+#define mmSDMA4_RLC7_RB_AQL_CNTL_BASE_IDX 1
+#define mmSDMA4_RLC7_MINOR_PTR_UPDATE 0x03cd
+#define mmSDMA4_RLC7_MINOR_PTR_UPDATE_BASE_IDX 1
+#define mmSDMA4_RLC7_MIDCMD_DATA0 0x03d8
+#define mmSDMA4_RLC7_MIDCMD_DATA0_BASE_IDX 1
+#define mmSDMA4_RLC7_MIDCMD_DATA1 0x03d9
+#define mmSDMA4_RLC7_MIDCMD_DATA1_BASE_IDX 1
+#define mmSDMA4_RLC7_MIDCMD_DATA2 0x03da
+#define mmSDMA4_RLC7_MIDCMD_DATA2_BASE_IDX 1
+#define mmSDMA4_RLC7_MIDCMD_DATA3 0x03db
+#define mmSDMA4_RLC7_MIDCMD_DATA3_BASE_IDX 1
+#define mmSDMA4_RLC7_MIDCMD_DATA4 0x03dc
+#define mmSDMA4_RLC7_MIDCMD_DATA4_BASE_IDX 1
+#define mmSDMA4_RLC7_MIDCMD_DATA5 0x03dd
+#define mmSDMA4_RLC7_MIDCMD_DATA5_BASE_IDX 1
+#define mmSDMA4_RLC7_MIDCMD_DATA6 0x03de
+#define mmSDMA4_RLC7_MIDCMD_DATA6_BASE_IDX 1
+#define mmSDMA4_RLC7_MIDCMD_DATA7 0x03df
+#define mmSDMA4_RLC7_MIDCMD_DATA7_BASE_IDX 1
+#define mmSDMA4_RLC7_MIDCMD_DATA8 0x03e0
+#define mmSDMA4_RLC7_MIDCMD_DATA8_BASE_IDX 1
+#define mmSDMA4_RLC7_MIDCMD_CNTL 0x03e1
+#define mmSDMA4_RLC7_MIDCMD_CNTL_BASE_IDX 1
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/sdma4/sdma4_4_2_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/sdma4/sdma4_4_2_2_sh_mask.h
new file mode 100644
index 000000000000..2cc510913214
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/sdma4/sdma4_4_2_2_sh_mask.h
@@ -0,0 +1,2956 @@
+/*
+ * Copyright (C) 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _sdma4_4_2_2_SH_MASK_HEADER
+#define _sdma4_4_2_2_SH_MASK_HEADER
+
+
+// addressBlock: sdma4_sdma4dec
+//SDMA4_UCODE_ADDR
+#define SDMA4_UCODE_ADDR__VALUE__SHIFT 0x0
+#define SDMA4_UCODE_ADDR__VALUE_MASK 0x00001FFFL
+//SDMA4_UCODE_DATA
+#define SDMA4_UCODE_DATA__VALUE__SHIFT 0x0
+#define SDMA4_UCODE_DATA__VALUE_MASK 0xFFFFFFFFL
+//SDMA4_VM_CNTL
+#define SDMA4_VM_CNTL__CMD__SHIFT 0x0
+#define SDMA4_VM_CNTL__CMD_MASK 0x0000000FL
+//SDMA4_VM_CTX_LO
+#define SDMA4_VM_CTX_LO__ADDR__SHIFT 0x2
+#define SDMA4_VM_CTX_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA4_VM_CTX_HI
+#define SDMA4_VM_CTX_HI__ADDR__SHIFT 0x0
+#define SDMA4_VM_CTX_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA4_ACTIVE_FCN_ID
+#define SDMA4_ACTIVE_FCN_ID__VFID__SHIFT 0x0
+#define SDMA4_ACTIVE_FCN_ID__RESERVED__SHIFT 0x4
+#define SDMA4_ACTIVE_FCN_ID__VF__SHIFT 0x1f
+#define SDMA4_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL
+#define SDMA4_ACTIVE_FCN_ID__RESERVED_MASK 0x7FFFFFF0L
+#define SDMA4_ACTIVE_FCN_ID__VF_MASK 0x80000000L
+//SDMA4_VM_CTX_CNTL
+#define SDMA4_VM_CTX_CNTL__PRIV__SHIFT 0x0
+#define SDMA4_VM_CTX_CNTL__VMID__SHIFT 0x4
+#define SDMA4_VM_CTX_CNTL__PRIV_MASK 0x00000001L
+#define SDMA4_VM_CTX_CNTL__VMID_MASK 0x000000F0L
+//SDMA4_VIRT_RESET_REQ
+#define SDMA4_VIRT_RESET_REQ__VF__SHIFT 0x0
+#define SDMA4_VIRT_RESET_REQ__PF__SHIFT 0x1f
+#define SDMA4_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL
+#define SDMA4_VIRT_RESET_REQ__PF_MASK 0x80000000L
+//SDMA4_VF_ENABLE
+#define SDMA4_VF_ENABLE__VF_ENABLE__SHIFT 0x0
+#define SDMA4_VF_ENABLE__VF_ENABLE_MASK 0x00000001L
+//SDMA4_CONTEXT_REG_TYPE0
+#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_RB_CNTL__SHIFT 0x0
+#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_RB_BASE__SHIFT 0x1
+#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_RB_BASE_HI__SHIFT 0x2
+#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_RB_RPTR__SHIFT 0x3
+#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_RB_RPTR_HI__SHIFT 0x4
+#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_RB_WPTR__SHIFT 0x5
+#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_RB_WPTR_HI__SHIFT 0x6
+#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_RB_WPTR_POLL_CNTL__SHIFT 0x7
+#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_RB_RPTR_ADDR_HI__SHIFT 0x8
+#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_RB_RPTR_ADDR_LO__SHIFT 0x9
+#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_IB_CNTL__SHIFT 0xa
+#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_IB_RPTR__SHIFT 0xb
+#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_IB_OFFSET__SHIFT 0xc
+#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_IB_BASE_LO__SHIFT 0xd
+#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_IB_BASE_HI__SHIFT 0xe
+#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_IB_SIZE__SHIFT 0xf
+#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_SKIP_CNTL__SHIFT 0x10
+#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_CONTEXT_STATUS__SHIFT 0x11
+#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_DOORBELL__SHIFT 0x12
+#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_CONTEXT_CNTL__SHIFT 0x13
+#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_RB_CNTL_MASK 0x00000001L
+#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_RB_BASE_MASK 0x00000002L
+#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_RB_BASE_HI_MASK 0x00000004L
+#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_RB_RPTR_MASK 0x00000008L
+#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_RB_RPTR_HI_MASK 0x00000010L
+#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_RB_WPTR_MASK 0x00000020L
+#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_RB_WPTR_HI_MASK 0x00000040L
+#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_RB_WPTR_POLL_CNTL_MASK 0x00000080L
+#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_RB_RPTR_ADDR_HI_MASK 0x00000100L
+#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_RB_RPTR_ADDR_LO_MASK 0x00000200L
+#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_IB_CNTL_MASK 0x00000400L
+#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_IB_RPTR_MASK 0x00000800L
+#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_IB_OFFSET_MASK 0x00001000L
+#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_IB_BASE_LO_MASK 0x00002000L
+#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_IB_BASE_HI_MASK 0x00004000L
+#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_IB_SIZE_MASK 0x00008000L
+#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_SKIP_CNTL_MASK 0x00010000L
+#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_CONTEXT_STATUS_MASK 0x00020000L
+#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_DOORBELL_MASK 0x00040000L
+#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_CONTEXT_CNTL_MASK 0x00080000L
+//SDMA4_CONTEXT_REG_TYPE1
+#define SDMA4_CONTEXT_REG_TYPE1__SDMA4_GFX_STATUS__SHIFT 0x8
+#define SDMA4_CONTEXT_REG_TYPE1__SDMA4_GFX_DOORBELL_LOG__SHIFT 0x9
+#define SDMA4_CONTEXT_REG_TYPE1__SDMA4_GFX_WATERMARK__SHIFT 0xa
+#define SDMA4_CONTEXT_REG_TYPE1__SDMA4_GFX_DOORBELL_OFFSET__SHIFT 0xb
+#define SDMA4_CONTEXT_REG_TYPE1__SDMA4_GFX_CSA_ADDR_LO__SHIFT 0xc
+#define SDMA4_CONTEXT_REG_TYPE1__SDMA4_GFX_CSA_ADDR_HI__SHIFT 0xd
+#define SDMA4_CONTEXT_REG_TYPE1__VOID_REG2__SHIFT 0xe
+#define SDMA4_CONTEXT_REG_TYPE1__SDMA4_GFX_IB_SUB_REMAIN__SHIFT 0xf
+#define SDMA4_CONTEXT_REG_TYPE1__SDMA4_GFX_PREEMPT__SHIFT 0x10
+#define SDMA4_CONTEXT_REG_TYPE1__SDMA4_GFX_DUMMY_REG__SHIFT 0x11
+#define SDMA4_CONTEXT_REG_TYPE1__SDMA4_GFX_RB_WPTR_POLL_ADDR_HI__SHIFT 0x12
+#define SDMA4_CONTEXT_REG_TYPE1__SDMA4_GFX_RB_WPTR_POLL_ADDR_LO__SHIFT 0x13
+#define SDMA4_CONTEXT_REG_TYPE1__SDMA4_GFX_RB_AQL_CNTL__SHIFT 0x14
+#define SDMA4_CONTEXT_REG_TYPE1__SDMA4_GFX_MINOR_PTR_UPDATE__SHIFT 0x15
+#define SDMA4_CONTEXT_REG_TYPE1__RESERVED__SHIFT 0x16
+#define SDMA4_CONTEXT_REG_TYPE1__SDMA4_GFX_STATUS_MASK 0x00000100L
+#define SDMA4_CONTEXT_REG_TYPE1__SDMA4_GFX_DOORBELL_LOG_MASK 0x00000200L
+#define SDMA4_CONTEXT_REG_TYPE1__SDMA4_GFX_WATERMARK_MASK 0x00000400L
+#define SDMA4_CONTEXT_REG_TYPE1__SDMA4_GFX_DOORBELL_OFFSET_MASK 0x00000800L
+#define SDMA4_CONTEXT_REG_TYPE1__SDMA4_GFX_CSA_ADDR_LO_MASK 0x00001000L
+#define SDMA4_CONTEXT_REG_TYPE1__SDMA4_GFX_CSA_ADDR_HI_MASK 0x00002000L
+#define SDMA4_CONTEXT_REG_TYPE1__VOID_REG2_MASK 0x00004000L
+#define SDMA4_CONTEXT_REG_TYPE1__SDMA4_GFX_IB_SUB_REMAIN_MASK 0x00008000L
+#define SDMA4_CONTEXT_REG_TYPE1__SDMA4_GFX_PREEMPT_MASK 0x00010000L
+#define SDMA4_CONTEXT_REG_TYPE1__SDMA4_GFX_DUMMY_REG_MASK 0x00020000L
+#define SDMA4_CONTEXT_REG_TYPE1__SDMA4_GFX_RB_WPTR_POLL_ADDR_HI_MASK 0x00040000L
+#define SDMA4_CONTEXT_REG_TYPE1__SDMA4_GFX_RB_WPTR_POLL_ADDR_LO_MASK 0x00080000L
+#define SDMA4_CONTEXT_REG_TYPE1__SDMA4_GFX_RB_AQL_CNTL_MASK 0x00100000L
+#define SDMA4_CONTEXT_REG_TYPE1__SDMA4_GFX_MINOR_PTR_UPDATE_MASK 0x00200000L
+#define SDMA4_CONTEXT_REG_TYPE1__RESERVED_MASK 0xFFC00000L
+//SDMA4_CONTEXT_REG_TYPE2
+#define SDMA4_CONTEXT_REG_TYPE2__SDMA4_GFX_MIDCMD_DATA0__SHIFT 0x0
+#define SDMA4_CONTEXT_REG_TYPE2__SDMA4_GFX_MIDCMD_DATA1__SHIFT 0x1
+#define SDMA4_CONTEXT_REG_TYPE2__SDMA4_GFX_MIDCMD_DATA2__SHIFT 0x2
+#define SDMA4_CONTEXT_REG_TYPE2__SDMA4_GFX_MIDCMD_DATA3__SHIFT 0x3
+#define SDMA4_CONTEXT_REG_TYPE2__SDMA4_GFX_MIDCMD_DATA4__SHIFT 0x4
+#define SDMA4_CONTEXT_REG_TYPE2__SDMA4_GFX_MIDCMD_DATA5__SHIFT 0x5
+#define SDMA4_CONTEXT_REG_TYPE2__SDMA4_GFX_MIDCMD_DATA6__SHIFT 0x6
+#define SDMA4_CONTEXT_REG_TYPE2__SDMA4_GFX_MIDCMD_DATA7__SHIFT 0x7
+#define SDMA4_CONTEXT_REG_TYPE2__SDMA4_GFX_MIDCMD_DATA8__SHIFT 0x8
+#define SDMA4_CONTEXT_REG_TYPE2__SDMA4_GFX_MIDCMD_CNTL__SHIFT 0x9
+#define SDMA4_CONTEXT_REG_TYPE2__RESERVED__SHIFT 0xa
+#define SDMA4_CONTEXT_REG_TYPE2__SDMA4_GFX_MIDCMD_DATA0_MASK 0x00000001L
+#define SDMA4_CONTEXT_REG_TYPE2__SDMA4_GFX_MIDCMD_DATA1_MASK 0x00000002L
+#define SDMA4_CONTEXT_REG_TYPE2__SDMA4_GFX_MIDCMD_DATA2_MASK 0x00000004L
+#define SDMA4_CONTEXT_REG_TYPE2__SDMA4_GFX_MIDCMD_DATA3_MASK 0x00000008L
+#define SDMA4_CONTEXT_REG_TYPE2__SDMA4_GFX_MIDCMD_DATA4_MASK 0x00000010L
+#define SDMA4_CONTEXT_REG_TYPE2__SDMA4_GFX_MIDCMD_DATA5_MASK 0x00000020L
+#define SDMA4_CONTEXT_REG_TYPE2__SDMA4_GFX_MIDCMD_DATA6_MASK 0x00000040L
+#define SDMA4_CONTEXT_REG_TYPE2__SDMA4_GFX_MIDCMD_DATA7_MASK 0x00000080L
+#define SDMA4_CONTEXT_REG_TYPE2__SDMA4_GFX_MIDCMD_DATA8_MASK 0x00000100L
+#define SDMA4_CONTEXT_REG_TYPE2__SDMA4_GFX_MIDCMD_CNTL_MASK 0x00000200L
+#define SDMA4_CONTEXT_REG_TYPE2__RESERVED_MASK 0xFFFFFC00L
+//SDMA4_CONTEXT_REG_TYPE3
+#define SDMA4_CONTEXT_REG_TYPE3__RESERVED__SHIFT 0x0
+#define SDMA4_CONTEXT_REG_TYPE3__RESERVED_MASK 0xFFFFFFFFL
+//SDMA4_PUB_REG_TYPE0
+#define SDMA4_PUB_REG_TYPE0__SDMA4_UCODE_ADDR__SHIFT 0x0
+#define SDMA4_PUB_REG_TYPE0__SDMA4_UCODE_DATA__SHIFT 0x1
+#define SDMA4_PUB_REG_TYPE0__RESERVED3__SHIFT 0x3
+#define SDMA4_PUB_REG_TYPE0__SDMA4_VM_CNTL__SHIFT 0x4
+#define SDMA4_PUB_REG_TYPE0__SDMA4_VM_CTX_LO__SHIFT 0x5
+#define SDMA4_PUB_REG_TYPE0__SDMA4_VM_CTX_HI__SHIFT 0x6
+#define SDMA4_PUB_REG_TYPE0__SDMA4_ACTIVE_FCN_ID__SHIFT 0x7
+#define SDMA4_PUB_REG_TYPE0__SDMA4_VM_CTX_CNTL__SHIFT 0x8
+#define SDMA4_PUB_REG_TYPE0__SDMA4_VIRT_RESET_REQ__SHIFT 0x9
+#define SDMA4_PUB_REG_TYPE0__RESERVED10__SHIFT 0xa
+#define SDMA4_PUB_REG_TYPE0__SDMA4_CONTEXT_REG_TYPE0__SHIFT 0xb
+#define SDMA4_PUB_REG_TYPE0__SDMA4_CONTEXT_REG_TYPE1__SHIFT 0xc
+#define SDMA4_PUB_REG_TYPE0__SDMA4_CONTEXT_REG_TYPE2__SHIFT 0xd
+#define SDMA4_PUB_REG_TYPE0__SDMA4_CONTEXT_REG_TYPE3__SHIFT 0xe
+#define SDMA4_PUB_REG_TYPE0__SDMA4_PUB_REG_TYPE0__SHIFT 0xf
+#define SDMA4_PUB_REG_TYPE0__SDMA4_PUB_REG_TYPE1__SHIFT 0x10
+#define SDMA4_PUB_REG_TYPE0__SDMA4_PUB_REG_TYPE2__SHIFT 0x11
+#define SDMA4_PUB_REG_TYPE0__SDMA4_PUB_REG_TYPE3__SHIFT 0x12
+#define SDMA4_PUB_REG_TYPE0__SDMA4_MMHUB_CNTL__SHIFT 0x13
+#define SDMA4_PUB_REG_TYPE0__RESERVED_FOR_PSPSMU_ACCESS_ONLY__SHIFT 0x15
+#define SDMA4_PUB_REG_TYPE0__SDMA4_CONTEXT_GROUP_BOUNDARY__SHIFT 0x19
+#define SDMA4_PUB_REG_TYPE0__SDMA4_POWER_CNTL__SHIFT 0x1a
+#define SDMA4_PUB_REG_TYPE0__SDMA4_CLK_CTRL__SHIFT 0x1b
+#define SDMA4_PUB_REG_TYPE0__SDMA4_CNTL__SHIFT 0x1c
+#define SDMA4_PUB_REG_TYPE0__SDMA4_CHICKEN_BITS__SHIFT 0x1d
+#define SDMA4_PUB_REG_TYPE0__SDMA4_GB_ADDR_CONFIG__SHIFT 0x1e
+#define SDMA4_PUB_REG_TYPE0__SDMA4_GB_ADDR_CONFIG_READ__SHIFT 0x1f
+#define SDMA4_PUB_REG_TYPE0__SDMA4_UCODE_ADDR_MASK 0x00000001L
+#define SDMA4_PUB_REG_TYPE0__SDMA4_UCODE_DATA_MASK 0x00000002L
+#define SDMA4_PUB_REG_TYPE0__RESERVED3_MASK 0x00000008L
+#define SDMA4_PUB_REG_TYPE0__SDMA4_VM_CNTL_MASK 0x00000010L
+#define SDMA4_PUB_REG_TYPE0__SDMA4_VM_CTX_LO_MASK 0x00000020L
+#define SDMA4_PUB_REG_TYPE0__SDMA4_VM_CTX_HI_MASK 0x00000040L
+#define SDMA4_PUB_REG_TYPE0__SDMA4_ACTIVE_FCN_ID_MASK 0x00000080L
+#define SDMA4_PUB_REG_TYPE0__SDMA4_VM_CTX_CNTL_MASK 0x00000100L
+#define SDMA4_PUB_REG_TYPE0__SDMA4_VIRT_RESET_REQ_MASK 0x00000200L
+#define SDMA4_PUB_REG_TYPE0__RESERVED10_MASK 0x00000400L
+#define SDMA4_PUB_REG_TYPE0__SDMA4_CONTEXT_REG_TYPE0_MASK 0x00000800L
+#define SDMA4_PUB_REG_TYPE0__SDMA4_CONTEXT_REG_TYPE1_MASK 0x00001000L
+#define SDMA4_PUB_REG_TYPE0__SDMA4_CONTEXT_REG_TYPE2_MASK 0x00002000L
+#define SDMA4_PUB_REG_TYPE0__SDMA4_CONTEXT_REG_TYPE3_MASK 0x00004000L
+#define SDMA4_PUB_REG_TYPE0__SDMA4_PUB_REG_TYPE0_MASK 0x00008000L
+#define SDMA4_PUB_REG_TYPE0__SDMA4_PUB_REG_TYPE1_MASK 0x00010000L
+#define SDMA4_PUB_REG_TYPE0__SDMA4_PUB_REG_TYPE2_MASK 0x00020000L
+#define SDMA4_PUB_REG_TYPE0__SDMA4_PUB_REG_TYPE3_MASK 0x00040000L
+#define SDMA4_PUB_REG_TYPE0__SDMA4_MMHUB_CNTL_MASK 0x00080000L
+#define SDMA4_PUB_REG_TYPE0__RESERVED_FOR_PSPSMU_ACCESS_ONLY_MASK 0x01E00000L
+#define SDMA4_PUB_REG_TYPE0__SDMA4_CONTEXT_GROUP_BOUNDARY_MASK 0x02000000L
+#define SDMA4_PUB_REG_TYPE0__SDMA4_POWER_CNTL_MASK 0x04000000L
+#define SDMA4_PUB_REG_TYPE0__SDMA4_CLK_CTRL_MASK 0x08000000L
+#define SDMA4_PUB_REG_TYPE0__SDMA4_CNTL_MASK 0x10000000L
+#define SDMA4_PUB_REG_TYPE0__SDMA4_CHICKEN_BITS_MASK 0x20000000L
+#define SDMA4_PUB_REG_TYPE0__SDMA4_GB_ADDR_CONFIG_MASK 0x40000000L
+#define SDMA4_PUB_REG_TYPE0__SDMA4_GB_ADDR_CONFIG_READ_MASK 0x80000000L
+//SDMA4_PUB_REG_TYPE1
+#define SDMA4_PUB_REG_TYPE1__SDMA4_RB_RPTR_FETCH_HI__SHIFT 0x0
+#define SDMA4_PUB_REG_TYPE1__SDMA4_SEM_WAIT_FAIL_TIMER_CNTL__SHIFT 0x1
+#define SDMA4_PUB_REG_TYPE1__SDMA4_RB_RPTR_FETCH__SHIFT 0x2
+#define SDMA4_PUB_REG_TYPE1__SDMA4_IB_OFFSET_FETCH__SHIFT 0x3
+#define SDMA4_PUB_REG_TYPE1__SDMA4_PROGRAM__SHIFT 0x4
+#define SDMA4_PUB_REG_TYPE1__SDMA4_STATUS_REG__SHIFT 0x5
+#define SDMA4_PUB_REG_TYPE1__SDMA4_STATUS1_REG__SHIFT 0x6
+#define SDMA4_PUB_REG_TYPE1__SDMA4_RD_BURST_CNTL__SHIFT 0x7
+#define SDMA4_PUB_REG_TYPE1__SDMA4_HBM_PAGE_CONFIG__SHIFT 0x8
+#define SDMA4_PUB_REG_TYPE1__SDMA4_UCODE_CHECKSUM__SHIFT 0x9
+#define SDMA4_PUB_REG_TYPE1__SDMA4_F32_CNTL__SHIFT 0xa
+#define SDMA4_PUB_REG_TYPE1__SDMA4_FREEZE__SHIFT 0xb
+#define SDMA4_PUB_REG_TYPE1__SDMA4_PHASE0_QUANTUM__SHIFT 0xc
+#define SDMA4_PUB_REG_TYPE1__SDMA4_PHASE1_QUANTUM__SHIFT 0xd
+#define SDMA4_PUB_REG_TYPE1__SDMA_POWER_GATING__SHIFT 0xe
+#define SDMA4_PUB_REG_TYPE1__SDMA_PGFSM_CONFIG__SHIFT 0xf
+#define SDMA4_PUB_REG_TYPE1__SDMA_PGFSM_WRITE__SHIFT 0x10
+#define SDMA4_PUB_REG_TYPE1__SDMA_PGFSM_READ__SHIFT 0x11
+#define SDMA4_PUB_REG_TYPE1__SDMA4_EDC_CONFIG__SHIFT 0x12
+#define SDMA4_PUB_REG_TYPE1__SDMA4_BA_THRESHOLD__SHIFT 0x13
+#define SDMA4_PUB_REG_TYPE1__SDMA4_ID__SHIFT 0x14
+#define SDMA4_PUB_REG_TYPE1__SDMA4_VERSION__SHIFT 0x15
+#define SDMA4_PUB_REG_TYPE1__SDMA4_EDC_COUNTER__SHIFT 0x16
+#define SDMA4_PUB_REG_TYPE1__SDMA4_EDC_COUNTER_CLEAR__SHIFT 0x17
+#define SDMA4_PUB_REG_TYPE1__SDMA4_STATUS2_REG__SHIFT 0x18
+#define SDMA4_PUB_REG_TYPE1__SDMA4_ATOMIC_CNTL__SHIFT 0x19
+#define SDMA4_PUB_REG_TYPE1__SDMA4_ATOMIC_PREOP_LO__SHIFT 0x1a
+#define SDMA4_PUB_REG_TYPE1__SDMA4_ATOMIC_PREOP_HI__SHIFT 0x1b
+#define SDMA4_PUB_REG_TYPE1__SDMA4_UTCL1_CNTL__SHIFT 0x1c
+#define SDMA4_PUB_REG_TYPE1__SDMA4_UTCL1_WATERMK__SHIFT 0x1d
+#define SDMA4_PUB_REG_TYPE1__SDMA4_UTCL1_RD_STATUS__SHIFT 0x1e
+#define SDMA4_PUB_REG_TYPE1__SDMA4_UTCL1_WR_STATUS__SHIFT 0x1f
+#define SDMA4_PUB_REG_TYPE1__SDMA4_RB_RPTR_FETCH_HI_MASK 0x00000001L
+#define SDMA4_PUB_REG_TYPE1__SDMA4_SEM_WAIT_FAIL_TIMER_CNTL_MASK 0x00000002L
+#define SDMA4_PUB_REG_TYPE1__SDMA4_RB_RPTR_FETCH_MASK 0x00000004L
+#define SDMA4_PUB_REG_TYPE1__SDMA4_IB_OFFSET_FETCH_MASK 0x00000008L
+#define SDMA4_PUB_REG_TYPE1__SDMA4_PROGRAM_MASK 0x00000010L
+#define SDMA4_PUB_REG_TYPE1__SDMA4_STATUS_REG_MASK 0x00000020L
+#define SDMA4_PUB_REG_TYPE1__SDMA4_STATUS1_REG_MASK 0x00000040L
+#define SDMA4_PUB_REG_TYPE1__SDMA4_RD_BURST_CNTL_MASK 0x00000080L
+#define SDMA4_PUB_REG_TYPE1__SDMA4_HBM_PAGE_CONFIG_MASK 0x00000100L
+#define SDMA4_PUB_REG_TYPE1__SDMA4_UCODE_CHECKSUM_MASK 0x00000200L
+#define SDMA4_PUB_REG_TYPE1__SDMA4_F32_CNTL_MASK 0x00000400L
+#define SDMA4_PUB_REG_TYPE1__SDMA4_FREEZE_MASK 0x00000800L
+#define SDMA4_PUB_REG_TYPE1__SDMA4_PHASE0_QUANTUM_MASK 0x00001000L
+#define SDMA4_PUB_REG_TYPE1__SDMA4_PHASE1_QUANTUM_MASK 0x00002000L
+#define SDMA4_PUB_REG_TYPE1__SDMA_POWER_GATING_MASK 0x00004000L
+#define SDMA4_PUB_REG_TYPE1__SDMA_PGFSM_CONFIG_MASK 0x00008000L
+#define SDMA4_PUB_REG_TYPE1__SDMA_PGFSM_WRITE_MASK 0x00010000L
+#define SDMA4_PUB_REG_TYPE1__SDMA_PGFSM_READ_MASK 0x00020000L
+#define SDMA4_PUB_REG_TYPE1__SDMA4_EDC_CONFIG_MASK 0x00040000L
+#define SDMA4_PUB_REG_TYPE1__SDMA4_BA_THRESHOLD_MASK 0x00080000L
+#define SDMA4_PUB_REG_TYPE1__SDMA4_ID_MASK 0x00100000L
+#define SDMA4_PUB_REG_TYPE1__SDMA4_VERSION_MASK 0x00200000L
+#define SDMA4_PUB_REG_TYPE1__SDMA4_EDC_COUNTER_MASK 0x00400000L
+#define SDMA4_PUB_REG_TYPE1__SDMA4_EDC_COUNTER_CLEAR_MASK 0x00800000L
+#define SDMA4_PUB_REG_TYPE1__SDMA4_STATUS2_REG_MASK 0x01000000L
+#define SDMA4_PUB_REG_TYPE1__SDMA4_ATOMIC_CNTL_MASK 0x02000000L
+#define SDMA4_PUB_REG_TYPE1__SDMA4_ATOMIC_PREOP_LO_MASK 0x04000000L
+#define SDMA4_PUB_REG_TYPE1__SDMA4_ATOMIC_PREOP_HI_MASK 0x08000000L
+#define SDMA4_PUB_REG_TYPE1__SDMA4_UTCL1_CNTL_MASK 0x10000000L
+#define SDMA4_PUB_REG_TYPE1__SDMA4_UTCL1_WATERMK_MASK 0x20000000L
+#define SDMA4_PUB_REG_TYPE1__SDMA4_UTCL1_RD_STATUS_MASK 0x40000000L
+#define SDMA4_PUB_REG_TYPE1__SDMA4_UTCL1_WR_STATUS_MASK 0x80000000L
+//SDMA4_PUB_REG_TYPE2
+#define SDMA4_PUB_REG_TYPE2__SDMA4_UTCL1_INV0__SHIFT 0x0
+#define SDMA4_PUB_REG_TYPE2__SDMA4_UTCL1_INV1__SHIFT 0x1
+#define SDMA4_PUB_REG_TYPE2__SDMA4_UTCL1_INV2__SHIFT 0x2
+#define SDMA4_PUB_REG_TYPE2__SDMA4_UTCL1_RD_XNACK0__SHIFT 0x3
+#define SDMA4_PUB_REG_TYPE2__SDMA4_UTCL1_RD_XNACK1__SHIFT 0x4
+#define SDMA4_PUB_REG_TYPE2__SDMA4_UTCL1_WR_XNACK0__SHIFT 0x5
+#define SDMA4_PUB_REG_TYPE2__SDMA4_UTCL1_WR_XNACK1__SHIFT 0x6
+#define SDMA4_PUB_REG_TYPE2__SDMA4_UTCL1_TIMEOUT__SHIFT 0x7
+#define SDMA4_PUB_REG_TYPE2__SDMA4_UTCL1_PAGE__SHIFT 0x8
+#define SDMA4_PUB_REG_TYPE2__SDMA4_POWER_CNTL_IDLE__SHIFT 0x9
+#define SDMA4_PUB_REG_TYPE2__SDMA4_RELAX_ORDERING_LUT__SHIFT 0xa
+#define SDMA4_PUB_REG_TYPE2__SDMA4_CHICKEN_BITS_2__SHIFT 0xb
+#define SDMA4_PUB_REG_TYPE2__SDMA4_STATUS3_REG__SHIFT 0xc
+#define SDMA4_PUB_REG_TYPE2__SDMA4_PHYSICAL_ADDR_LO__SHIFT 0xd
+#define SDMA4_PUB_REG_TYPE2__SDMA4_PHYSICAL_ADDR_HI__SHIFT 0xe
+#define SDMA4_PUB_REG_TYPE2__SDMA4_PHASE2_QUANTUM__SHIFT 0xf
+#define SDMA4_PUB_REG_TYPE2__SDMA4_ERROR_LOG__SHIFT 0x10
+#define SDMA4_PUB_REG_TYPE2__SDMA4_PUB_DUMMY_REG0__SHIFT 0x11
+#define SDMA4_PUB_REG_TYPE2__SDMA4_PUB_DUMMY_REG1__SHIFT 0x12
+#define SDMA4_PUB_REG_TYPE2__SDMA4_PUB_DUMMY_REG2__SHIFT 0x13
+#define SDMA4_PUB_REG_TYPE2__SDMA4_PUB_DUMMY_REG3__SHIFT 0x14
+#define SDMA4_PUB_REG_TYPE2__SDMA4_F32_COUNTER__SHIFT 0x15
+#define SDMA4_PUB_REG_TYPE2__SDMA4_UNBREAKABLE__SHIFT 0x16
+#define SDMA4_PUB_REG_TYPE2__SDMA4_PERFMON_CNTL__SHIFT 0x17
+#define SDMA4_PUB_REG_TYPE2__SDMA4_PERFCOUNTER0_RESULT__SHIFT 0x18
+#define SDMA4_PUB_REG_TYPE2__SDMA4_PERFCOUNTER1_RESULT__SHIFT 0x19
+#define SDMA4_PUB_REG_TYPE2__SDMA4_PERFCOUNTER_TAG_DELAY_RANGE__SHIFT 0x1a
+#define SDMA4_PUB_REG_TYPE2__SDMA4_CRD_CNTL__SHIFT 0x1b
+#define SDMA4_PUB_REG_TYPE2__RESERVED28__SHIFT 0x1c
+#define SDMA4_PUB_REG_TYPE2__SDMA4_GPU_IOV_VIOLATION_LOG__SHIFT 0x1d
+#define SDMA4_PUB_REG_TYPE2__SDMA4_ULV_CNTL__SHIFT 0x1e
+#define SDMA4_PUB_REG_TYPE2__RESERVED__SHIFT 0x1f
+#define SDMA4_PUB_REG_TYPE2__SDMA4_UTCL1_INV0_MASK 0x00000001L
+#define SDMA4_PUB_REG_TYPE2__SDMA4_UTCL1_INV1_MASK 0x00000002L
+#define SDMA4_PUB_REG_TYPE2__SDMA4_UTCL1_INV2_MASK 0x00000004L
+#define SDMA4_PUB_REG_TYPE2__SDMA4_UTCL1_RD_XNACK0_MASK 0x00000008L
+#define SDMA4_PUB_REG_TYPE2__SDMA4_UTCL1_RD_XNACK1_MASK 0x00000010L
+#define SDMA4_PUB_REG_TYPE2__SDMA4_UTCL1_WR_XNACK0_MASK 0x00000020L
+#define SDMA4_PUB_REG_TYPE2__SDMA4_UTCL1_WR_XNACK1_MASK 0x00000040L
+#define SDMA4_PUB_REG_TYPE2__SDMA4_UTCL1_TIMEOUT_MASK 0x00000080L
+#define SDMA4_PUB_REG_TYPE2__SDMA4_UTCL1_PAGE_MASK 0x00000100L
+#define SDMA4_PUB_REG_TYPE2__SDMA4_POWER_CNTL_IDLE_MASK 0x00000200L
+#define SDMA4_PUB_REG_TYPE2__SDMA4_RELAX_ORDERING_LUT_MASK 0x00000400L
+#define SDMA4_PUB_REG_TYPE2__SDMA4_CHICKEN_BITS_2_MASK 0x00000800L
+#define SDMA4_PUB_REG_TYPE2__SDMA4_STATUS3_REG_MASK 0x00001000L
+#define SDMA4_PUB_REG_TYPE2__SDMA4_PHYSICAL_ADDR_LO_MASK 0x00002000L
+#define SDMA4_PUB_REG_TYPE2__SDMA4_PHYSICAL_ADDR_HI_MASK 0x00004000L
+#define SDMA4_PUB_REG_TYPE2__SDMA4_PHASE2_QUANTUM_MASK 0x00008000L
+#define SDMA4_PUB_REG_TYPE2__SDMA4_ERROR_LOG_MASK 0x00010000L
+#define SDMA4_PUB_REG_TYPE2__SDMA4_PUB_DUMMY_REG0_MASK 0x00020000L
+#define SDMA4_PUB_REG_TYPE2__SDMA4_PUB_DUMMY_REG1_MASK 0x00040000L
+#define SDMA4_PUB_REG_TYPE2__SDMA4_PUB_DUMMY_REG2_MASK 0x00080000L
+#define SDMA4_PUB_REG_TYPE2__SDMA4_PUB_DUMMY_REG3_MASK 0x00100000L
+#define SDMA4_PUB_REG_TYPE2__SDMA4_F32_COUNTER_MASK 0x00200000L
+#define SDMA4_PUB_REG_TYPE2__SDMA4_UNBREAKABLE_MASK 0x00400000L
+#define SDMA4_PUB_REG_TYPE2__SDMA4_PERFMON_CNTL_MASK 0x00800000L
+#define SDMA4_PUB_REG_TYPE2__SDMA4_PERFCOUNTER0_RESULT_MASK 0x01000000L
+#define SDMA4_PUB_REG_TYPE2__SDMA4_PERFCOUNTER1_RESULT_MASK 0x02000000L
+#define SDMA4_PUB_REG_TYPE2__SDMA4_PERFCOUNTER_TAG_DELAY_RANGE_MASK 0x04000000L
+#define SDMA4_PUB_REG_TYPE2__SDMA4_CRD_CNTL_MASK 0x08000000L
+#define SDMA4_PUB_REG_TYPE2__RESERVED28_MASK 0x10000000L
+#define SDMA4_PUB_REG_TYPE2__SDMA4_GPU_IOV_VIOLATION_LOG_MASK 0x20000000L
+#define SDMA4_PUB_REG_TYPE2__SDMA4_ULV_CNTL_MASK 0x40000000L
+#define SDMA4_PUB_REG_TYPE2__RESERVED_MASK 0x80000000L
+//SDMA4_PUB_REG_TYPE3
+#define SDMA4_PUB_REG_TYPE3__SDMA4_EA_DBIT_ADDR_DATA__SHIFT 0x0
+#define SDMA4_PUB_REG_TYPE3__SDMA4_EA_DBIT_ADDR_INDEX__SHIFT 0x1
+#define SDMA4_PUB_REG_TYPE3__SDMA4_GPU_IOV_VIOLATION_LOG2__SHIFT 0x2
+#define SDMA4_PUB_REG_TYPE3__RESERVED__SHIFT 0x3
+#define SDMA4_PUB_REG_TYPE3__SDMA4_EA_DBIT_ADDR_DATA_MASK 0x00000001L
+#define SDMA4_PUB_REG_TYPE3__SDMA4_EA_DBIT_ADDR_INDEX_MASK 0x00000002L
+#define SDMA4_PUB_REG_TYPE3__SDMA4_GPU_IOV_VIOLATION_LOG2_MASK 0x00000004L
+#define SDMA4_PUB_REG_TYPE3__RESERVED_MASK 0xFFFFFFF8L
+//SDMA4_MMHUB_CNTL
+#define SDMA4_MMHUB_CNTL__UNIT_ID__SHIFT 0x0
+#define SDMA4_MMHUB_CNTL__UNIT_ID_MASK 0x0000003FL
+//SDMA4_CONTEXT_GROUP_BOUNDARY
+#define SDMA4_CONTEXT_GROUP_BOUNDARY__RESERVED__SHIFT 0x0
+#define SDMA4_CONTEXT_GROUP_BOUNDARY__RESERVED_MASK 0xFFFFFFFFL
+//SDMA4_POWER_CNTL
+#define SDMA4_POWER_CNTL__MEM_POWER_OVERRIDE__SHIFT 0x8
+#define SDMA4_POWER_CNTL__MEM_POWER_LS_EN__SHIFT 0x9
+#define SDMA4_POWER_CNTL__MEM_POWER_DS_EN__SHIFT 0xa
+#define SDMA4_POWER_CNTL__MEM_POWER_SD_EN__SHIFT 0xb
+#define SDMA4_POWER_CNTL__MEM_POWER_DELAY__SHIFT 0xc
+#define SDMA4_POWER_CNTL__MEM_POWER_OVERRIDE_MASK 0x00000100L
+#define SDMA4_POWER_CNTL__MEM_POWER_LS_EN_MASK 0x00000200L
+#define SDMA4_POWER_CNTL__MEM_POWER_DS_EN_MASK 0x00000400L
+#define SDMA4_POWER_CNTL__MEM_POWER_SD_EN_MASK 0x00000800L
+#define SDMA4_POWER_CNTL__MEM_POWER_DELAY_MASK 0x003FF000L
+//SDMA4_CLK_CTRL
+#define SDMA4_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define SDMA4_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define SDMA4_CLK_CTRL__RESERVED__SHIFT 0xc
+#define SDMA4_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define SDMA4_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define SDMA4_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define SDMA4_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define SDMA4_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define SDMA4_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define SDMA4_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define SDMA4_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define SDMA4_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define SDMA4_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define SDMA4_CLK_CTRL__RESERVED_MASK 0x00FFF000L
+#define SDMA4_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define SDMA4_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define SDMA4_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define SDMA4_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define SDMA4_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define SDMA4_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define SDMA4_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define SDMA4_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//SDMA4_CNTL
+#define SDMA4_CNTL__TRAP_ENABLE__SHIFT 0x0
+#define SDMA4_CNTL__UTC_L1_ENABLE__SHIFT 0x1
+#define SDMA4_CNTL__SEM_WAIT_INT_ENABLE__SHIFT 0x2
+#define SDMA4_CNTL__DATA_SWAP_ENABLE__SHIFT 0x3
+#define SDMA4_CNTL__FENCE_SWAP_ENABLE__SHIFT 0x4
+#define SDMA4_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x5
+#define SDMA4_CNTL__MIDCMD_WORLDSWITCH_ENABLE__SHIFT 0x11
+#define SDMA4_CNTL__AUTO_CTXSW_ENABLE__SHIFT 0x12
+#define SDMA4_CNTL__CTXEMPTY_INT_ENABLE__SHIFT 0x1c
+#define SDMA4_CNTL__FROZEN_INT_ENABLE__SHIFT 0x1d
+#define SDMA4_CNTL__IB_PREEMPT_INT_ENABLE__SHIFT 0x1e
+#define SDMA4_CNTL__TRAP_ENABLE_MASK 0x00000001L
+#define SDMA4_CNTL__UTC_L1_ENABLE_MASK 0x00000002L
+#define SDMA4_CNTL__SEM_WAIT_INT_ENABLE_MASK 0x00000004L
+#define SDMA4_CNTL__DATA_SWAP_ENABLE_MASK 0x00000008L
+#define SDMA4_CNTL__FENCE_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA4_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00000020L
+#define SDMA4_CNTL__MIDCMD_WORLDSWITCH_ENABLE_MASK 0x00020000L
+#define SDMA4_CNTL__AUTO_CTXSW_ENABLE_MASK 0x00040000L
+#define SDMA4_CNTL__CTXEMPTY_INT_ENABLE_MASK 0x10000000L
+#define SDMA4_CNTL__FROZEN_INT_ENABLE_MASK 0x20000000L
+#define SDMA4_CNTL__IB_PREEMPT_INT_ENABLE_MASK 0x40000000L
+//SDMA4_CHICKEN_BITS
+#define SDMA4_CHICKEN_BITS__COPY_EFFICIENCY_ENABLE__SHIFT 0x0
+#define SDMA4_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE__SHIFT 0x1
+#define SDMA4_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE__SHIFT 0x2
+#define SDMA4_CHICKEN_BITS__WRITE_BURST_LENGTH__SHIFT 0x8
+#define SDMA4_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE__SHIFT 0xa
+#define SDMA4_CHICKEN_BITS__COPY_OVERLAP_ENABLE__SHIFT 0x10
+#define SDMA4_CHICKEN_BITS__RAW_CHECK_ENABLE__SHIFT 0x11
+#define SDMA4_CHICKEN_BITS__SRBM_POLL_RETRYING__SHIFT 0x14
+#define SDMA4_CHICKEN_BITS__CG_STATUS_OUTPUT__SHIFT 0x17
+#define SDMA4_CHICKEN_BITS__TIME_BASED_QOS__SHIFT 0x19
+#define SDMA4_CHICKEN_BITS__CE_AFIFO_WATERMARK__SHIFT 0x1a
+#define SDMA4_CHICKEN_BITS__CE_DFIFO_WATERMARK__SHIFT 0x1c
+#define SDMA4_CHICKEN_BITS__CE_LFIFO_WATERMARK__SHIFT 0x1e
+#define SDMA4_CHICKEN_BITS__COPY_EFFICIENCY_ENABLE_MASK 0x00000001L
+#define SDMA4_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE_MASK 0x00000002L
+#define SDMA4_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE_MASK 0x00000004L
+#define SDMA4_CHICKEN_BITS__WRITE_BURST_LENGTH_MASK 0x00000300L
+#define SDMA4_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE_MASK 0x00001C00L
+#define SDMA4_CHICKEN_BITS__COPY_OVERLAP_ENABLE_MASK 0x00010000L
+#define SDMA4_CHICKEN_BITS__RAW_CHECK_ENABLE_MASK 0x00020000L
+#define SDMA4_CHICKEN_BITS__SRBM_POLL_RETRYING_MASK 0x00100000L
+#define SDMA4_CHICKEN_BITS__CG_STATUS_OUTPUT_MASK 0x00800000L
+#define SDMA4_CHICKEN_BITS__TIME_BASED_QOS_MASK 0x02000000L
+#define SDMA4_CHICKEN_BITS__CE_AFIFO_WATERMARK_MASK 0x0C000000L
+#define SDMA4_CHICKEN_BITS__CE_DFIFO_WATERMARK_MASK 0x30000000L
+#define SDMA4_CHICKEN_BITS__CE_LFIFO_WATERMARK_MASK 0xC0000000L
+//SDMA4_GB_ADDR_CONFIG
+#define SDMA4_GB_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
+#define SDMA4_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
+#define SDMA4_GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE__SHIFT 0x8
+#define SDMA4_GB_ADDR_CONFIG__NUM_BANKS__SHIFT 0xc
+#define SDMA4_GB_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x13
+#define SDMA4_GB_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define SDMA4_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
+#define SDMA4_GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
+#define SDMA4_GB_ADDR_CONFIG__NUM_BANKS_MASK 0x00007000L
+#define SDMA4_GB_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00180000L
+//SDMA4_GB_ADDR_CONFIG_READ
+#define SDMA4_GB_ADDR_CONFIG_READ__NUM_PIPES__SHIFT 0x0
+#define SDMA4_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
+#define SDMA4_GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE__SHIFT 0x8
+#define SDMA4_GB_ADDR_CONFIG_READ__NUM_BANKS__SHIFT 0xc
+#define SDMA4_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES__SHIFT 0x13
+#define SDMA4_GB_ADDR_CONFIG_READ__NUM_PIPES_MASK 0x00000007L
+#define SDMA4_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
+#define SDMA4_GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
+#define SDMA4_GB_ADDR_CONFIG_READ__NUM_BANKS_MASK 0x00007000L
+#define SDMA4_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES_MASK 0x00180000L
+//SDMA4_RB_RPTR_FETCH_HI
+#define SDMA4_RB_RPTR_FETCH_HI__OFFSET__SHIFT 0x0
+#define SDMA4_RB_RPTR_FETCH_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA4_SEM_WAIT_FAIL_TIMER_CNTL
+#define SDMA4_SEM_WAIT_FAIL_TIMER_CNTL__TIMER__SHIFT 0x0
+#define SDMA4_SEM_WAIT_FAIL_TIMER_CNTL__TIMER_MASK 0xFFFFFFFFL
+//SDMA4_RB_RPTR_FETCH
+#define SDMA4_RB_RPTR_FETCH__OFFSET__SHIFT 0x2
+#define SDMA4_RB_RPTR_FETCH__OFFSET_MASK 0xFFFFFFFCL
+//SDMA4_IB_OFFSET_FETCH
+#define SDMA4_IB_OFFSET_FETCH__OFFSET__SHIFT 0x2
+#define SDMA4_IB_OFFSET_FETCH__OFFSET_MASK 0x003FFFFCL
+//SDMA4_PROGRAM
+#define SDMA4_PROGRAM__STREAM__SHIFT 0x0
+#define SDMA4_PROGRAM__STREAM_MASK 0xFFFFFFFFL
+//SDMA4_STATUS_REG
+#define SDMA4_STATUS_REG__IDLE__SHIFT 0x0
+#define SDMA4_STATUS_REG__REG_IDLE__SHIFT 0x1
+#define SDMA4_STATUS_REG__RB_EMPTY__SHIFT 0x2
+#define SDMA4_STATUS_REG__RB_FULL__SHIFT 0x3
+#define SDMA4_STATUS_REG__RB_CMD_IDLE__SHIFT 0x4
+#define SDMA4_STATUS_REG__RB_CMD_FULL__SHIFT 0x5
+#define SDMA4_STATUS_REG__IB_CMD_IDLE__SHIFT 0x6
+#define SDMA4_STATUS_REG__IB_CMD_FULL__SHIFT 0x7
+#define SDMA4_STATUS_REG__BLOCK_IDLE__SHIFT 0x8
+#define SDMA4_STATUS_REG__INSIDE_IB__SHIFT 0x9
+#define SDMA4_STATUS_REG__EX_IDLE__SHIFT 0xa
+#define SDMA4_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE__SHIFT 0xb
+#define SDMA4_STATUS_REG__PACKET_READY__SHIFT 0xc
+#define SDMA4_STATUS_REG__MC_WR_IDLE__SHIFT 0xd
+#define SDMA4_STATUS_REG__SRBM_IDLE__SHIFT 0xe
+#define SDMA4_STATUS_REG__CONTEXT_EMPTY__SHIFT 0xf
+#define SDMA4_STATUS_REG__DELTA_RPTR_FULL__SHIFT 0x10
+#define SDMA4_STATUS_REG__RB_MC_RREQ_IDLE__SHIFT 0x11
+#define SDMA4_STATUS_REG__IB_MC_RREQ_IDLE__SHIFT 0x12
+#define SDMA4_STATUS_REG__MC_RD_IDLE__SHIFT 0x13
+#define SDMA4_STATUS_REG__DELTA_RPTR_EMPTY__SHIFT 0x14
+#define SDMA4_STATUS_REG__MC_RD_RET_STALL__SHIFT 0x15
+#define SDMA4_STATUS_REG__MC_RD_NO_POLL_IDLE__SHIFT 0x16
+#define SDMA4_STATUS_REG__PREV_CMD_IDLE__SHIFT 0x19
+#define SDMA4_STATUS_REG__SEM_IDLE__SHIFT 0x1a
+#define SDMA4_STATUS_REG__SEM_REQ_STALL__SHIFT 0x1b
+#define SDMA4_STATUS_REG__SEM_RESP_STATE__SHIFT 0x1c
+#define SDMA4_STATUS_REG__INT_IDLE__SHIFT 0x1e
+#define SDMA4_STATUS_REG__INT_REQ_STALL__SHIFT 0x1f
+#define SDMA4_STATUS_REG__IDLE_MASK 0x00000001L
+#define SDMA4_STATUS_REG__REG_IDLE_MASK 0x00000002L
+#define SDMA4_STATUS_REG__RB_EMPTY_MASK 0x00000004L
+#define SDMA4_STATUS_REG__RB_FULL_MASK 0x00000008L
+#define SDMA4_STATUS_REG__RB_CMD_IDLE_MASK 0x00000010L
+#define SDMA4_STATUS_REG__RB_CMD_FULL_MASK 0x00000020L
+#define SDMA4_STATUS_REG__IB_CMD_IDLE_MASK 0x00000040L
+#define SDMA4_STATUS_REG__IB_CMD_FULL_MASK 0x00000080L
+#define SDMA4_STATUS_REG__BLOCK_IDLE_MASK 0x00000100L
+#define SDMA4_STATUS_REG__INSIDE_IB_MASK 0x00000200L
+#define SDMA4_STATUS_REG__EX_IDLE_MASK 0x00000400L
+#define SDMA4_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE_MASK 0x00000800L
+#define SDMA4_STATUS_REG__PACKET_READY_MASK 0x00001000L
+#define SDMA4_STATUS_REG__MC_WR_IDLE_MASK 0x00002000L
+#define SDMA4_STATUS_REG__SRBM_IDLE_MASK 0x00004000L
+#define SDMA4_STATUS_REG__CONTEXT_EMPTY_MASK 0x00008000L
+#define SDMA4_STATUS_REG__DELTA_RPTR_FULL_MASK 0x00010000L
+#define SDMA4_STATUS_REG__RB_MC_RREQ_IDLE_MASK 0x00020000L
+#define SDMA4_STATUS_REG__IB_MC_RREQ_IDLE_MASK 0x00040000L
+#define SDMA4_STATUS_REG__MC_RD_IDLE_MASK 0x00080000L
+#define SDMA4_STATUS_REG__DELTA_RPTR_EMPTY_MASK 0x00100000L
+#define SDMA4_STATUS_REG__MC_RD_RET_STALL_MASK 0x00200000L
+#define SDMA4_STATUS_REG__MC_RD_NO_POLL_IDLE_MASK 0x00400000L
+#define SDMA4_STATUS_REG__PREV_CMD_IDLE_MASK 0x02000000L
+#define SDMA4_STATUS_REG__SEM_IDLE_MASK 0x04000000L
+#define SDMA4_STATUS_REG__SEM_REQ_STALL_MASK 0x08000000L
+#define SDMA4_STATUS_REG__SEM_RESP_STATE_MASK 0x30000000L
+#define SDMA4_STATUS_REG__INT_IDLE_MASK 0x40000000L
+#define SDMA4_STATUS_REG__INT_REQ_STALL_MASK 0x80000000L
+//SDMA4_STATUS1_REG
+#define SDMA4_STATUS1_REG__CE_WREQ_IDLE__SHIFT 0x0
+#define SDMA4_STATUS1_REG__CE_WR_IDLE__SHIFT 0x1
+#define SDMA4_STATUS1_REG__CE_SPLIT_IDLE__SHIFT 0x2
+#define SDMA4_STATUS1_REG__CE_RREQ_IDLE__SHIFT 0x3
+#define SDMA4_STATUS1_REG__CE_OUT_IDLE__SHIFT 0x4
+#define SDMA4_STATUS1_REG__CE_IN_IDLE__SHIFT 0x5
+#define SDMA4_STATUS1_REG__CE_DST_IDLE__SHIFT 0x6
+#define SDMA4_STATUS1_REG__CE_CMD_IDLE__SHIFT 0x9
+#define SDMA4_STATUS1_REG__CE_AFIFO_FULL__SHIFT 0xa
+#define SDMA4_STATUS1_REG__CE_INFO_FULL__SHIFT 0xd
+#define SDMA4_STATUS1_REG__CE_INFO1_FULL__SHIFT 0xe
+#define SDMA4_STATUS1_REG__EX_START__SHIFT 0xf
+#define SDMA4_STATUS1_REG__CE_RD_STALL__SHIFT 0x11
+#define SDMA4_STATUS1_REG__CE_WR_STALL__SHIFT 0x12
+#define SDMA4_STATUS1_REG__CE_WREQ_IDLE_MASK 0x00000001L
+#define SDMA4_STATUS1_REG__CE_WR_IDLE_MASK 0x00000002L
+#define SDMA4_STATUS1_REG__CE_SPLIT_IDLE_MASK 0x00000004L
+#define SDMA4_STATUS1_REG__CE_RREQ_IDLE_MASK 0x00000008L
+#define SDMA4_STATUS1_REG__CE_OUT_IDLE_MASK 0x00000010L
+#define SDMA4_STATUS1_REG__CE_IN_IDLE_MASK 0x00000020L
+#define SDMA4_STATUS1_REG__CE_DST_IDLE_MASK 0x00000040L
+#define SDMA4_STATUS1_REG__CE_CMD_IDLE_MASK 0x00000200L
+#define SDMA4_STATUS1_REG__CE_AFIFO_FULL_MASK 0x00000400L
+#define SDMA4_STATUS1_REG__CE_INFO_FULL_MASK 0x00002000L
+#define SDMA4_STATUS1_REG__CE_INFO1_FULL_MASK 0x00004000L
+#define SDMA4_STATUS1_REG__EX_START_MASK 0x00008000L
+#define SDMA4_STATUS1_REG__CE_RD_STALL_MASK 0x00020000L
+#define SDMA4_STATUS1_REG__CE_WR_STALL_MASK 0x00040000L
+//SDMA4_RD_BURST_CNTL
+#define SDMA4_RD_BURST_CNTL__RD_BURST__SHIFT 0x0
+#define SDMA4_RD_BURST_CNTL__CMD_BUFFER_RD_BURST__SHIFT 0x2
+#define SDMA4_RD_BURST_CNTL__RD_BURST_MASK 0x00000003L
+#define SDMA4_RD_BURST_CNTL__CMD_BUFFER_RD_BURST_MASK 0x0000000CL
+//SDMA4_HBM_PAGE_CONFIG
+#define SDMA4_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT__SHIFT 0x0
+#define SDMA4_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT_MASK 0x00000001L
+//SDMA4_UCODE_CHECKSUM
+#define SDMA4_UCODE_CHECKSUM__DATA__SHIFT 0x0
+#define SDMA4_UCODE_CHECKSUM__DATA_MASK 0xFFFFFFFFL
+//SDMA4_F32_CNTL
+#define SDMA4_F32_CNTL__HALT__SHIFT 0x0
+#define SDMA4_F32_CNTL__STEP__SHIFT 0x1
+#define SDMA4_F32_CNTL__HALT_MASK 0x00000001L
+#define SDMA4_F32_CNTL__STEP_MASK 0x00000002L
+//SDMA4_FREEZE
+#define SDMA4_FREEZE__PREEMPT__SHIFT 0x0
+#define SDMA4_FREEZE__FREEZE__SHIFT 0x4
+#define SDMA4_FREEZE__FROZEN__SHIFT 0x5
+#define SDMA4_FREEZE__F32_FREEZE__SHIFT 0x6
+#define SDMA4_FREEZE__PREEMPT_MASK 0x00000001L
+#define SDMA4_FREEZE__FREEZE_MASK 0x00000010L
+#define SDMA4_FREEZE__FROZEN_MASK 0x00000020L
+#define SDMA4_FREEZE__F32_FREEZE_MASK 0x00000040L
+//SDMA4_PHASE0_QUANTUM
+#define SDMA4_PHASE0_QUANTUM__UNIT__SHIFT 0x0
+#define SDMA4_PHASE0_QUANTUM__VALUE__SHIFT 0x8
+#define SDMA4_PHASE0_QUANTUM__PREFER__SHIFT 0x1e
+#define SDMA4_PHASE0_QUANTUM__UNIT_MASK 0x0000000FL
+#define SDMA4_PHASE0_QUANTUM__VALUE_MASK 0x00FFFF00L
+#define SDMA4_PHASE0_QUANTUM__PREFER_MASK 0x40000000L
+//SDMA4_PHASE1_QUANTUM
+#define SDMA4_PHASE1_QUANTUM__UNIT__SHIFT 0x0
+#define SDMA4_PHASE1_QUANTUM__VALUE__SHIFT 0x8
+#define SDMA4_PHASE1_QUANTUM__PREFER__SHIFT 0x1e
+#define SDMA4_PHASE1_QUANTUM__UNIT_MASK 0x0000000FL
+#define SDMA4_PHASE1_QUANTUM__VALUE_MASK 0x00FFFF00L
+#define SDMA4_PHASE1_QUANTUM__PREFER_MASK 0x40000000L
+//SDMA4_EDC_CONFIG
+#define SDMA4_EDC_CONFIG__DIS_EDC__SHIFT 0x1
+#define SDMA4_EDC_CONFIG__ECC_INT_ENABLE__SHIFT 0x2
+#define SDMA4_EDC_CONFIG__DIS_EDC_MASK 0x00000002L
+#define SDMA4_EDC_CONFIG__ECC_INT_ENABLE_MASK 0x00000004L
+//SDMA4_BA_THRESHOLD
+#define SDMA4_BA_THRESHOLD__READ_THRES__SHIFT 0x0
+#define SDMA4_BA_THRESHOLD__WRITE_THRES__SHIFT 0x10
+#define SDMA4_BA_THRESHOLD__READ_THRES_MASK 0x000003FFL
+#define SDMA4_BA_THRESHOLD__WRITE_THRES_MASK 0x03FF0000L
+//SDMA4_ID
+#define SDMA4_ID__DEVICE_ID__SHIFT 0x0
+#define SDMA4_ID__DEVICE_ID_MASK 0x000000FFL
+//SDMA4_VERSION
+#define SDMA4_VERSION__MINVER__SHIFT 0x0
+#define SDMA4_VERSION__MAJVER__SHIFT 0x8
+#define SDMA4_VERSION__REV__SHIFT 0x10
+#define SDMA4_VERSION__MINVER_MASK 0x0000007FL
+#define SDMA4_VERSION__MAJVER_MASK 0x00007F00L
+#define SDMA4_VERSION__REV_MASK 0x003F0000L
+//SDMA4_EDC_COUNTER
+#define SDMA4_EDC_COUNTER__SDMA_UCODE_BUF_SED__SHIFT 0x0
+#define SDMA4_EDC_COUNTER__SDMA_RB_CMD_BUF_SED__SHIFT 0x2
+#define SDMA4_EDC_COUNTER__SDMA_IB_CMD_BUF_SED__SHIFT 0x3
+#define SDMA4_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED__SHIFT 0x4
+#define SDMA4_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED__SHIFT 0x5
+#define SDMA4_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED__SHIFT 0x6
+#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED__SHIFT 0x7
+#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED__SHIFT 0x8
+#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED__SHIFT 0x9
+#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED__SHIFT 0xa
+#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED__SHIFT 0xb
+#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED__SHIFT 0xc
+#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED__SHIFT 0xd
+#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED__SHIFT 0xe
+#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF8_SED__SHIFT 0xf
+#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF9_SED__SHIFT 0x10
+#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF10_SED__SHIFT 0x11
+#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF11_SED__SHIFT 0x12
+#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF12_SED__SHIFT 0x13
+#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF13_SED__SHIFT 0x14
+#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF14_SED__SHIFT 0x15
+#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF15_SED__SHIFT 0x16
+#define SDMA4_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED__SHIFT 0x17
+#define SDMA4_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED__SHIFT 0x18
+#define SDMA4_EDC_COUNTER__SDMA_UCODE_BUF_SED_MASK 0x00000001L
+#define SDMA4_EDC_COUNTER__SDMA_RB_CMD_BUF_SED_MASK 0x00000004L
+#define SDMA4_EDC_COUNTER__SDMA_IB_CMD_BUF_SED_MASK 0x00000008L
+#define SDMA4_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED_MASK 0x00000010L
+#define SDMA4_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED_MASK 0x00000020L
+#define SDMA4_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED_MASK 0x00000040L
+#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED_MASK 0x00000080L
+#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED_MASK 0x00000100L
+#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED_MASK 0x00000200L
+#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED_MASK 0x00000400L
+#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED_MASK 0x00000800L
+#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED_MASK 0x00001000L
+#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED_MASK 0x00002000L
+#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED_MASK 0x00004000L
+#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF8_SED_MASK 0x00008000L
+#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF9_SED_MASK 0x00010000L
+#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF10_SED_MASK 0x00020000L
+#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF11_SED_MASK 0x00040000L
+#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF12_SED_MASK 0x00080000L
+#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF13_SED_MASK 0x00100000L
+#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF14_SED_MASK 0x00200000L
+#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF15_SED_MASK 0x00400000L
+#define SDMA4_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED_MASK 0x00800000L
+#define SDMA4_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED_MASK 0x01000000L
+//SDMA4_EDC_COUNTER_CLEAR
+#define SDMA4_EDC_COUNTER_CLEAR__DUMMY__SHIFT 0x0
+#define SDMA4_EDC_COUNTER_CLEAR__DUMMY_MASK 0x00000001L
+//SDMA4_STATUS2_REG
+#define SDMA4_STATUS2_REG__ID__SHIFT 0x0
+#define SDMA4_STATUS2_REG__F32_INSTR_PTR__SHIFT 0x3
+#define SDMA4_STATUS2_REG__CMD_OP__SHIFT 0x10
+#define SDMA4_STATUS2_REG__ID_MASK 0x00000007L
+#define SDMA4_STATUS2_REG__F32_INSTR_PTR_MASK 0x0000FFF8L
+#define SDMA4_STATUS2_REG__CMD_OP_MASK 0xFFFF0000L
+//SDMA4_ATOMIC_CNTL
+#define SDMA4_ATOMIC_CNTL__LOOP_TIMER__SHIFT 0x0
+#define SDMA4_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE__SHIFT 0x1f
+#define SDMA4_ATOMIC_CNTL__LOOP_TIMER_MASK 0x7FFFFFFFL
+#define SDMA4_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE_MASK 0x80000000L
+//SDMA4_ATOMIC_PREOP_LO
+#define SDMA4_ATOMIC_PREOP_LO__DATA__SHIFT 0x0
+#define SDMA4_ATOMIC_PREOP_LO__DATA_MASK 0xFFFFFFFFL
+//SDMA4_ATOMIC_PREOP_HI
+#define SDMA4_ATOMIC_PREOP_HI__DATA__SHIFT 0x0
+#define SDMA4_ATOMIC_PREOP_HI__DATA_MASK 0xFFFFFFFFL
+//SDMA4_UTCL1_CNTL
+#define SDMA4_UTCL1_CNTL__REDO_ENABLE__SHIFT 0x0
+#define SDMA4_UTCL1_CNTL__REDO_DELAY__SHIFT 0x1
+#define SDMA4_UTCL1_CNTL__REDO_WATERMK__SHIFT 0xb
+#define SDMA4_UTCL1_CNTL__INVACK_DELAY__SHIFT 0xe
+#define SDMA4_UTCL1_CNTL__REQL2_CREDIT__SHIFT 0x18
+#define SDMA4_UTCL1_CNTL__VADDR_WATERMK__SHIFT 0x1d
+#define SDMA4_UTCL1_CNTL__REDO_ENABLE_MASK 0x00000001L
+#define SDMA4_UTCL1_CNTL__REDO_DELAY_MASK 0x000007FEL
+#define SDMA4_UTCL1_CNTL__REDO_WATERMK_MASK 0x00003800L
+#define SDMA4_UTCL1_CNTL__INVACK_DELAY_MASK 0x00FFC000L
+#define SDMA4_UTCL1_CNTL__REQL2_CREDIT_MASK 0x1F000000L
+#define SDMA4_UTCL1_CNTL__VADDR_WATERMK_MASK 0xE0000000L
+//SDMA4_UTCL1_WATERMK
+#define SDMA4_UTCL1_WATERMK__REQMC_WATERMK__SHIFT 0x0
+#define SDMA4_UTCL1_WATERMK__REQPG_WATERMK__SHIFT 0x9
+#define SDMA4_UTCL1_WATERMK__INVREQ_WATERMK__SHIFT 0x11
+#define SDMA4_UTCL1_WATERMK__XNACK_WATERMK__SHIFT 0x19
+#define SDMA4_UTCL1_WATERMK__REQMC_WATERMK_MASK 0x000001FFL
+#define SDMA4_UTCL1_WATERMK__REQPG_WATERMK_MASK 0x0001FE00L
+#define SDMA4_UTCL1_WATERMK__INVREQ_WATERMK_MASK 0x01FE0000L
+#define SDMA4_UTCL1_WATERMK__XNACK_WATERMK_MASK 0xFE000000L
+//SDMA4_UTCL1_RD_STATUS
+#define SDMA4_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0
+#define SDMA4_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1
+#define SDMA4_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2
+#define SDMA4_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3
+#define SDMA4_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4
+#define SDMA4_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5
+#define SDMA4_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6
+#define SDMA4_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7
+#define SDMA4_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8
+#define SDMA4_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9
+#define SDMA4_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa
+#define SDMA4_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb
+#define SDMA4_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc
+#define SDMA4_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd
+#define SDMA4_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe
+#define SDMA4_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf
+#define SDMA4_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10
+#define SDMA4_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11
+#define SDMA4_UTCL1_RD_STATUS__PAGE_FAULT__SHIFT 0x12
+#define SDMA4_UTCL1_RD_STATUS__PAGE_NULL__SHIFT 0x13
+#define SDMA4_UTCL1_RD_STATUS__REQL2_IDLE__SHIFT 0x14
+#define SDMA4_UTCL1_RD_STATUS__CE_L1_STALL__SHIFT 0x15
+#define SDMA4_UTCL1_RD_STATUS__NEXT_RD_VECTOR__SHIFT 0x16
+#define SDMA4_UTCL1_RD_STATUS__MERGE_STATE__SHIFT 0x1a
+#define SDMA4_UTCL1_RD_STATUS__ADDR_RD_RTR__SHIFT 0x1d
+#define SDMA4_UTCL1_RD_STATUS__WPTR_POLLING__SHIFT 0x1e
+#define SDMA4_UTCL1_RD_STATUS__INVREQ_SIZE__SHIFT 0x1f
+#define SDMA4_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L
+#define SDMA4_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L
+#define SDMA4_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L
+#define SDMA4_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L
+#define SDMA4_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L
+#define SDMA4_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L
+#define SDMA4_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L
+#define SDMA4_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L
+#define SDMA4_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L
+#define SDMA4_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L
+#define SDMA4_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L
+#define SDMA4_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L
+#define SDMA4_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L
+#define SDMA4_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L
+#define SDMA4_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L
+#define SDMA4_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L
+#define SDMA4_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L
+#define SDMA4_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L
+#define SDMA4_UTCL1_RD_STATUS__PAGE_FAULT_MASK 0x00040000L
+#define SDMA4_UTCL1_RD_STATUS__PAGE_NULL_MASK 0x00080000L
+#define SDMA4_UTCL1_RD_STATUS__REQL2_IDLE_MASK 0x00100000L
+#define SDMA4_UTCL1_RD_STATUS__CE_L1_STALL_MASK 0x00200000L
+#define SDMA4_UTCL1_RD_STATUS__NEXT_RD_VECTOR_MASK 0x03C00000L
+#define SDMA4_UTCL1_RD_STATUS__MERGE_STATE_MASK 0x1C000000L
+#define SDMA4_UTCL1_RD_STATUS__ADDR_RD_RTR_MASK 0x20000000L
+#define SDMA4_UTCL1_RD_STATUS__WPTR_POLLING_MASK 0x40000000L
+#define SDMA4_UTCL1_RD_STATUS__INVREQ_SIZE_MASK 0x80000000L
+//SDMA4_UTCL1_WR_STATUS
+#define SDMA4_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0
+#define SDMA4_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1
+#define SDMA4_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2
+#define SDMA4_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3
+#define SDMA4_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4
+#define SDMA4_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5
+#define SDMA4_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6
+#define SDMA4_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7
+#define SDMA4_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8
+#define SDMA4_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9
+#define SDMA4_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa
+#define SDMA4_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb
+#define SDMA4_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc
+#define SDMA4_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd
+#define SDMA4_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe
+#define SDMA4_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf
+#define SDMA4_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10
+#define SDMA4_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11
+#define SDMA4_UTCL1_WR_STATUS__PAGE_FAULT__SHIFT 0x12
+#define SDMA4_UTCL1_WR_STATUS__PAGE_NULL__SHIFT 0x13
+#define SDMA4_UTCL1_WR_STATUS__REQL2_IDLE__SHIFT 0x14
+#define SDMA4_UTCL1_WR_STATUS__F32_WR_RTR__SHIFT 0x15
+#define SDMA4_UTCL1_WR_STATUS__NEXT_WR_VECTOR__SHIFT 0x16
+#define SDMA4_UTCL1_WR_STATUS__MERGE_STATE__SHIFT 0x19
+#define SDMA4_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY__SHIFT 0x1c
+#define SDMA4_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL__SHIFT 0x1d
+#define SDMA4_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY__SHIFT 0x1e
+#define SDMA4_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL__SHIFT 0x1f
+#define SDMA4_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L
+#define SDMA4_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L
+#define SDMA4_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L
+#define SDMA4_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L
+#define SDMA4_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L
+#define SDMA4_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L
+#define SDMA4_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L
+#define SDMA4_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L
+#define SDMA4_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L
+#define SDMA4_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L
+#define SDMA4_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L
+#define SDMA4_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L
+#define SDMA4_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L
+#define SDMA4_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L
+#define SDMA4_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L
+#define SDMA4_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L
+#define SDMA4_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L
+#define SDMA4_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L
+#define SDMA4_UTCL1_WR_STATUS__PAGE_FAULT_MASK 0x00040000L
+#define SDMA4_UTCL1_WR_STATUS__PAGE_NULL_MASK 0x00080000L
+#define SDMA4_UTCL1_WR_STATUS__REQL2_IDLE_MASK 0x00100000L
+#define SDMA4_UTCL1_WR_STATUS__F32_WR_RTR_MASK 0x00200000L
+#define SDMA4_UTCL1_WR_STATUS__NEXT_WR_VECTOR_MASK 0x01C00000L
+#define SDMA4_UTCL1_WR_STATUS__MERGE_STATE_MASK 0x0E000000L
+#define SDMA4_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY_MASK 0x10000000L
+#define SDMA4_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL_MASK 0x20000000L
+#define SDMA4_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY_MASK 0x40000000L
+#define SDMA4_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL_MASK 0x80000000L
+//SDMA4_UTCL1_INV0
+#define SDMA4_UTCL1_INV0__INV_MIDDLE__SHIFT 0x0
+#define SDMA4_UTCL1_INV0__RD_TIMEOUT__SHIFT 0x1
+#define SDMA4_UTCL1_INV0__WR_TIMEOUT__SHIFT 0x2
+#define SDMA4_UTCL1_INV0__RD_IN_INVADR__SHIFT 0x3
+#define SDMA4_UTCL1_INV0__WR_IN_INVADR__SHIFT 0x4
+#define SDMA4_UTCL1_INV0__PAGE_NULL_SW__SHIFT 0x5
+#define SDMA4_UTCL1_INV0__XNACK_IS_INVADR__SHIFT 0x6
+#define SDMA4_UTCL1_INV0__INVREQ_ENABLE__SHIFT 0x7
+#define SDMA4_UTCL1_INV0__NACK_TIMEOUT_SW__SHIFT 0x8
+#define SDMA4_UTCL1_INV0__NFLUSH_INV_IDLE__SHIFT 0x9
+#define SDMA4_UTCL1_INV0__FLUSH_INV_IDLE__SHIFT 0xa
+#define SDMA4_UTCL1_INV0__INV_FLUSHTYPE__SHIFT 0xb
+#define SDMA4_UTCL1_INV0__INV_VMID_VEC__SHIFT 0xc
+#define SDMA4_UTCL1_INV0__INV_ADDR_HI__SHIFT 0x1c
+#define SDMA4_UTCL1_INV0__INV_MIDDLE_MASK 0x00000001L
+#define SDMA4_UTCL1_INV0__RD_TIMEOUT_MASK 0x00000002L
+#define SDMA4_UTCL1_INV0__WR_TIMEOUT_MASK 0x00000004L
+#define SDMA4_UTCL1_INV0__RD_IN_INVADR_MASK 0x00000008L
+#define SDMA4_UTCL1_INV0__WR_IN_INVADR_MASK 0x00000010L
+#define SDMA4_UTCL1_INV0__PAGE_NULL_SW_MASK 0x00000020L
+#define SDMA4_UTCL1_INV0__XNACK_IS_INVADR_MASK 0x00000040L
+#define SDMA4_UTCL1_INV0__INVREQ_ENABLE_MASK 0x00000080L
+#define SDMA4_UTCL1_INV0__NACK_TIMEOUT_SW_MASK 0x00000100L
+#define SDMA4_UTCL1_INV0__NFLUSH_INV_IDLE_MASK 0x00000200L
+#define SDMA4_UTCL1_INV0__FLUSH_INV_IDLE_MASK 0x00000400L
+#define SDMA4_UTCL1_INV0__INV_FLUSHTYPE_MASK 0x00000800L
+#define SDMA4_UTCL1_INV0__INV_VMID_VEC_MASK 0x0FFFF000L
+#define SDMA4_UTCL1_INV0__INV_ADDR_HI_MASK 0xF0000000L
+//SDMA4_UTCL1_INV1
+#define SDMA4_UTCL1_INV1__INV_ADDR_LO__SHIFT 0x0
+#define SDMA4_UTCL1_INV1__INV_ADDR_LO_MASK 0xFFFFFFFFL
+//SDMA4_UTCL1_INV2
+#define SDMA4_UTCL1_INV2__INV_NFLUSH_VMID_VEC__SHIFT 0x0
+#define SDMA4_UTCL1_INV2__INV_NFLUSH_VMID_VEC_MASK 0xFFFFFFFFL
+//SDMA4_UTCL1_RD_XNACK0
+#define SDMA4_UTCL1_RD_XNACK0__XNACK_ADDR_LO__SHIFT 0x0
+#define SDMA4_UTCL1_RD_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL
+//SDMA4_UTCL1_RD_XNACK1
+#define SDMA4_UTCL1_RD_XNACK1__XNACK_ADDR_HI__SHIFT 0x0
+#define SDMA4_UTCL1_RD_XNACK1__XNACK_VMID__SHIFT 0x4
+#define SDMA4_UTCL1_RD_XNACK1__XNACK_VECTOR__SHIFT 0x8
+#define SDMA4_UTCL1_RD_XNACK1__IS_XNACK__SHIFT 0x1a
+#define SDMA4_UTCL1_RD_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL
+#define SDMA4_UTCL1_RD_XNACK1__XNACK_VMID_MASK 0x000000F0L
+#define SDMA4_UTCL1_RD_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L
+#define SDMA4_UTCL1_RD_XNACK1__IS_XNACK_MASK 0x0C000000L
+//SDMA4_UTCL1_WR_XNACK0
+#define SDMA4_UTCL1_WR_XNACK0__XNACK_ADDR_LO__SHIFT 0x0
+#define SDMA4_UTCL1_WR_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL
+//SDMA4_UTCL1_WR_XNACK1
+#define SDMA4_UTCL1_WR_XNACK1__XNACK_ADDR_HI__SHIFT 0x0
+#define SDMA4_UTCL1_WR_XNACK1__XNACK_VMID__SHIFT 0x4
+#define SDMA4_UTCL1_WR_XNACK1__XNACK_VECTOR__SHIFT 0x8
+#define SDMA4_UTCL1_WR_XNACK1__IS_XNACK__SHIFT 0x1a
+#define SDMA4_UTCL1_WR_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL
+#define SDMA4_UTCL1_WR_XNACK1__XNACK_VMID_MASK 0x000000F0L
+#define SDMA4_UTCL1_WR_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L
+#define SDMA4_UTCL1_WR_XNACK1__IS_XNACK_MASK 0x0C000000L
+//SDMA4_UTCL1_TIMEOUT
+#define SDMA4_UTCL1_TIMEOUT__RD_XNACK_LIMIT__SHIFT 0x0
+#define SDMA4_UTCL1_TIMEOUT__WR_XNACK_LIMIT__SHIFT 0x10
+#define SDMA4_UTCL1_TIMEOUT__RD_XNACK_LIMIT_MASK 0x0000FFFFL
+#define SDMA4_UTCL1_TIMEOUT__WR_XNACK_LIMIT_MASK 0xFFFF0000L
+//SDMA4_UTCL1_PAGE
+#define SDMA4_UTCL1_PAGE__VM_HOLE__SHIFT 0x0
+#define SDMA4_UTCL1_PAGE__REQ_TYPE__SHIFT 0x1
+#define SDMA4_UTCL1_PAGE__USE_MTYPE__SHIFT 0x6
+#define SDMA4_UTCL1_PAGE__USE_PT_SNOOP__SHIFT 0x9
+#define SDMA4_UTCL1_PAGE__VM_HOLE_MASK 0x00000001L
+#define SDMA4_UTCL1_PAGE__REQ_TYPE_MASK 0x0000001EL
+#define SDMA4_UTCL1_PAGE__USE_MTYPE_MASK 0x000001C0L
+#define SDMA4_UTCL1_PAGE__USE_PT_SNOOP_MASK 0x00000200L
+//SDMA4_POWER_CNTL_IDLE
+#define SDMA4_POWER_CNTL_IDLE__DELAY0__SHIFT 0x0
+#define SDMA4_POWER_CNTL_IDLE__DELAY1__SHIFT 0x10
+#define SDMA4_POWER_CNTL_IDLE__DELAY2__SHIFT 0x18
+#define SDMA4_POWER_CNTL_IDLE__DELAY0_MASK 0x0000FFFFL
+#define SDMA4_POWER_CNTL_IDLE__DELAY1_MASK 0x00FF0000L
+#define SDMA4_POWER_CNTL_IDLE__DELAY2_MASK 0xFF000000L
+//SDMA4_RELAX_ORDERING_LUT
+#define SDMA4_RELAX_ORDERING_LUT__RESERVED0__SHIFT 0x0
+#define SDMA4_RELAX_ORDERING_LUT__COPY__SHIFT 0x1
+#define SDMA4_RELAX_ORDERING_LUT__WRITE__SHIFT 0x2
+#define SDMA4_RELAX_ORDERING_LUT__RESERVED3__SHIFT 0x3
+#define SDMA4_RELAX_ORDERING_LUT__RESERVED4__SHIFT 0x4
+#define SDMA4_RELAX_ORDERING_LUT__FENCE__SHIFT 0x5
+#define SDMA4_RELAX_ORDERING_LUT__RESERVED76__SHIFT 0x6
+#define SDMA4_RELAX_ORDERING_LUT__POLL_MEM__SHIFT 0x8
+#define SDMA4_RELAX_ORDERING_LUT__COND_EXE__SHIFT 0x9
+#define SDMA4_RELAX_ORDERING_LUT__ATOMIC__SHIFT 0xa
+#define SDMA4_RELAX_ORDERING_LUT__CONST_FILL__SHIFT 0xb
+#define SDMA4_RELAX_ORDERING_LUT__PTEPDE__SHIFT 0xc
+#define SDMA4_RELAX_ORDERING_LUT__TIMESTAMP__SHIFT 0xd
+#define SDMA4_RELAX_ORDERING_LUT__RESERVED__SHIFT 0xe
+#define SDMA4_RELAX_ORDERING_LUT__WORLD_SWITCH__SHIFT 0x1b
+#define SDMA4_RELAX_ORDERING_LUT__RPTR_WRB__SHIFT 0x1c
+#define SDMA4_RELAX_ORDERING_LUT__WPTR_POLL__SHIFT 0x1d
+#define SDMA4_RELAX_ORDERING_LUT__IB_FETCH__SHIFT 0x1e
+#define SDMA4_RELAX_ORDERING_LUT__RB_FETCH__SHIFT 0x1f
+#define SDMA4_RELAX_ORDERING_LUT__RESERVED0_MASK 0x00000001L
+#define SDMA4_RELAX_ORDERING_LUT__COPY_MASK 0x00000002L
+#define SDMA4_RELAX_ORDERING_LUT__WRITE_MASK 0x00000004L
+#define SDMA4_RELAX_ORDERING_LUT__RESERVED3_MASK 0x00000008L
+#define SDMA4_RELAX_ORDERING_LUT__RESERVED4_MASK 0x00000010L
+#define SDMA4_RELAX_ORDERING_LUT__FENCE_MASK 0x00000020L
+#define SDMA4_RELAX_ORDERING_LUT__RESERVED76_MASK 0x000000C0L
+#define SDMA4_RELAX_ORDERING_LUT__POLL_MEM_MASK 0x00000100L
+#define SDMA4_RELAX_ORDERING_LUT__COND_EXE_MASK 0x00000200L
+#define SDMA4_RELAX_ORDERING_LUT__ATOMIC_MASK 0x00000400L
+#define SDMA4_RELAX_ORDERING_LUT__CONST_FILL_MASK 0x00000800L
+#define SDMA4_RELAX_ORDERING_LUT__PTEPDE_MASK 0x00001000L
+#define SDMA4_RELAX_ORDERING_LUT__TIMESTAMP_MASK 0x00002000L
+#define SDMA4_RELAX_ORDERING_LUT__RESERVED_MASK 0x07FFC000L
+#define SDMA4_RELAX_ORDERING_LUT__WORLD_SWITCH_MASK 0x08000000L
+#define SDMA4_RELAX_ORDERING_LUT__RPTR_WRB_MASK 0x10000000L
+#define SDMA4_RELAX_ORDERING_LUT__WPTR_POLL_MASK 0x20000000L
+#define SDMA4_RELAX_ORDERING_LUT__IB_FETCH_MASK 0x40000000L
+#define SDMA4_RELAX_ORDERING_LUT__RB_FETCH_MASK 0x80000000L
+//SDMA4_CHICKEN_BITS_2
+#define SDMA4_CHICKEN_BITS_2__F32_CMD_PROC_DELAY__SHIFT 0x0
+#define SDMA4_CHICKEN_BITS_2__F32_CMD_PROC_DELAY_MASK 0x0000000FL
+//SDMA4_STATUS3_REG
+#define SDMA4_STATUS3_REG__CMD_OP_STATUS__SHIFT 0x0
+#define SDMA4_STATUS3_REG__PREV_VM_CMD__SHIFT 0x10
+#define SDMA4_STATUS3_REG__EXCEPTION_IDLE__SHIFT 0x14
+#define SDMA4_STATUS3_REG__QUEUE_ID_MATCH__SHIFT 0x15
+#define SDMA4_STATUS3_REG__INT_QUEUE_ID__SHIFT 0x16
+#define SDMA4_STATUS3_REG__CMD_OP_STATUS_MASK 0x0000FFFFL
+#define SDMA4_STATUS3_REG__PREV_VM_CMD_MASK 0x000F0000L
+#define SDMA4_STATUS3_REG__EXCEPTION_IDLE_MASK 0x00100000L
+#define SDMA4_STATUS3_REG__QUEUE_ID_MATCH_MASK 0x00200000L
+#define SDMA4_STATUS3_REG__INT_QUEUE_ID_MASK 0x03C00000L
+//SDMA4_PHYSICAL_ADDR_LO
+#define SDMA4_PHYSICAL_ADDR_LO__D_VALID__SHIFT 0x0
+#define SDMA4_PHYSICAL_ADDR_LO__DIRTY__SHIFT 0x1
+#define SDMA4_PHYSICAL_ADDR_LO__PHY_VALID__SHIFT 0x2
+#define SDMA4_PHYSICAL_ADDR_LO__ADDR__SHIFT 0xc
+#define SDMA4_PHYSICAL_ADDR_LO__D_VALID_MASK 0x00000001L
+#define SDMA4_PHYSICAL_ADDR_LO__DIRTY_MASK 0x00000002L
+#define SDMA4_PHYSICAL_ADDR_LO__PHY_VALID_MASK 0x00000004L
+#define SDMA4_PHYSICAL_ADDR_LO__ADDR_MASK 0xFFFFF000L
+//SDMA4_PHYSICAL_ADDR_HI
+#define SDMA4_PHYSICAL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA4_PHYSICAL_ADDR_HI__ADDR_MASK 0x0000FFFFL
+//SDMA4_PHASE2_QUANTUM
+#define SDMA4_PHASE2_QUANTUM__UNIT__SHIFT 0x0
+#define SDMA4_PHASE2_QUANTUM__VALUE__SHIFT 0x8
+#define SDMA4_PHASE2_QUANTUM__PREFER__SHIFT 0x1e
+#define SDMA4_PHASE2_QUANTUM__UNIT_MASK 0x0000000FL
+#define SDMA4_PHASE2_QUANTUM__VALUE_MASK 0x00FFFF00L
+#define SDMA4_PHASE2_QUANTUM__PREFER_MASK 0x40000000L
+//SDMA4_ERROR_LOG
+#define SDMA4_ERROR_LOG__OVERRIDE__SHIFT 0x0
+#define SDMA4_ERROR_LOG__STATUS__SHIFT 0x10
+#define SDMA4_ERROR_LOG__OVERRIDE_MASK 0x0000FFFFL
+#define SDMA4_ERROR_LOG__STATUS_MASK 0xFFFF0000L
+//SDMA4_PUB_DUMMY_REG0
+#define SDMA4_PUB_DUMMY_REG0__VALUE__SHIFT 0x0
+#define SDMA4_PUB_DUMMY_REG0__VALUE_MASK 0xFFFFFFFFL
+//SDMA4_PUB_DUMMY_REG1
+#define SDMA4_PUB_DUMMY_REG1__VALUE__SHIFT 0x0
+#define SDMA4_PUB_DUMMY_REG1__VALUE_MASK 0xFFFFFFFFL
+//SDMA4_PUB_DUMMY_REG2
+#define SDMA4_PUB_DUMMY_REG2__VALUE__SHIFT 0x0
+#define SDMA4_PUB_DUMMY_REG2__VALUE_MASK 0xFFFFFFFFL
+//SDMA4_PUB_DUMMY_REG3
+#define SDMA4_PUB_DUMMY_REG3__VALUE__SHIFT 0x0
+#define SDMA4_PUB_DUMMY_REG3__VALUE_MASK 0xFFFFFFFFL
+//SDMA4_F32_COUNTER
+#define SDMA4_F32_COUNTER__VALUE__SHIFT 0x0
+#define SDMA4_F32_COUNTER__VALUE_MASK 0xFFFFFFFFL
+//SDMA4_UNBREAKABLE
+#define SDMA4_UNBREAKABLE__VALUE__SHIFT 0x0
+#define SDMA4_UNBREAKABLE__VALUE_MASK 0x00000001L
+//SDMA4_PERFMON_CNTL
+#define SDMA4_PERFMON_CNTL__PERF_ENABLE0__SHIFT 0x0
+#define SDMA4_PERFMON_CNTL__PERF_CLEAR0__SHIFT 0x1
+#define SDMA4_PERFMON_CNTL__PERF_SEL0__SHIFT 0x2
+#define SDMA4_PERFMON_CNTL__PERF_ENABLE1__SHIFT 0xa
+#define SDMA4_PERFMON_CNTL__PERF_CLEAR1__SHIFT 0xb
+#define SDMA4_PERFMON_CNTL__PERF_SEL1__SHIFT 0xc
+#define SDMA4_PERFMON_CNTL__PERF_ENABLE0_MASK 0x00000001L
+#define SDMA4_PERFMON_CNTL__PERF_CLEAR0_MASK 0x00000002L
+#define SDMA4_PERFMON_CNTL__PERF_SEL0_MASK 0x000003FCL
+#define SDMA4_PERFMON_CNTL__PERF_ENABLE1_MASK 0x00000400L
+#define SDMA4_PERFMON_CNTL__PERF_CLEAR1_MASK 0x00000800L
+#define SDMA4_PERFMON_CNTL__PERF_SEL1_MASK 0x000FF000L
+//SDMA4_PERFCOUNTER0_RESULT
+#define SDMA4_PERFCOUNTER0_RESULT__PERF_COUNT__SHIFT 0x0
+#define SDMA4_PERFCOUNTER0_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL
+//SDMA4_PERFCOUNTER1_RESULT
+#define SDMA4_PERFCOUNTER1_RESULT__PERF_COUNT__SHIFT 0x0
+#define SDMA4_PERFCOUNTER1_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL
+//SDMA4_PERFCOUNTER_TAG_DELAY_RANGE
+#define SDMA4_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_LOW__SHIFT 0x0
+#define SDMA4_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_HIGH__SHIFT 0xe
+#define SDMA4_PERFCOUNTER_TAG_DELAY_RANGE__SELECT_RW__SHIFT 0x1c
+#define SDMA4_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_LOW_MASK 0x00003FFFL
+#define SDMA4_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_HIGH_MASK 0x0FFFC000L
+#define SDMA4_PERFCOUNTER_TAG_DELAY_RANGE__SELECT_RW_MASK 0x10000000L
+//SDMA4_CRD_CNTL
+#define SDMA4_CRD_CNTL__MC_WRREQ_CREDIT__SHIFT 0x7
+#define SDMA4_CRD_CNTL__MC_RDREQ_CREDIT__SHIFT 0xd
+#define SDMA4_CRD_CNTL__MC_WRREQ_CREDIT_MASK 0x00001F80L
+#define SDMA4_CRD_CNTL__MC_RDREQ_CREDIT_MASK 0x0007E000L
+//SDMA4_GPU_IOV_VIOLATION_LOG
+#define SDMA4_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0
+#define SDMA4_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS__SHIFT 0x1
+#define SDMA4_GPU_IOV_VIOLATION_LOG__ADDRESS__SHIFT 0x2
+#define SDMA4_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION__SHIFT 0x14
+#define SDMA4_GPU_IOV_VIOLATION_LOG__VF__SHIFT 0x15
+#define SDMA4_GPU_IOV_VIOLATION_LOG__VFID__SHIFT 0x16
+#define SDMA4_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L
+#define SDMA4_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS_MASK 0x00000002L
+#define SDMA4_GPU_IOV_VIOLATION_LOG__ADDRESS_MASK 0x000FFFFCL
+#define SDMA4_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION_MASK 0x00100000L
+#define SDMA4_GPU_IOV_VIOLATION_LOG__VF_MASK 0x00200000L
+#define SDMA4_GPU_IOV_VIOLATION_LOG__VFID_MASK 0x03C00000L
+//SDMA4_ULV_CNTL
+#define SDMA4_ULV_CNTL__HYSTERESIS__SHIFT 0x0
+#define SDMA4_ULV_CNTL__ENTER_ULV_INT_CLR__SHIFT 0x1b
+#define SDMA4_ULV_CNTL__EXIT_ULV_INT_CLR__SHIFT 0x1c
+#define SDMA4_ULV_CNTL__ENTER_ULV_INT__SHIFT 0x1d
+#define SDMA4_ULV_CNTL__EXIT_ULV_INT__SHIFT 0x1e
+#define SDMA4_ULV_CNTL__ULV_STATUS__SHIFT 0x1f
+#define SDMA4_ULV_CNTL__HYSTERESIS_MASK 0x0000001FL
+#define SDMA4_ULV_CNTL__ENTER_ULV_INT_CLR_MASK 0x08000000L
+#define SDMA4_ULV_CNTL__EXIT_ULV_INT_CLR_MASK 0x10000000L
+#define SDMA4_ULV_CNTL__ENTER_ULV_INT_MASK 0x20000000L
+#define SDMA4_ULV_CNTL__EXIT_ULV_INT_MASK 0x40000000L
+#define SDMA4_ULV_CNTL__ULV_STATUS_MASK 0x80000000L
+//SDMA4_EA_DBIT_ADDR_DATA
+#define SDMA4_EA_DBIT_ADDR_DATA__VALUE__SHIFT 0x0
+#define SDMA4_EA_DBIT_ADDR_DATA__VALUE_MASK 0xFFFFFFFFL
+//SDMA4_EA_DBIT_ADDR_INDEX
+#define SDMA4_EA_DBIT_ADDR_INDEX__VALUE__SHIFT 0x0
+#define SDMA4_EA_DBIT_ADDR_INDEX__VALUE_MASK 0x00000007L
+//SDMA4_GPU_IOV_VIOLATION_LOG2
+#define SDMA4_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID__SHIFT 0x0
+#define SDMA4_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID_MASK 0x000000FFL
+//SDMA4_GFX_RB_CNTL
+#define SDMA4_GFX_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA4_GFX_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA4_GFX_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA4_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA4_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA4_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA4_GFX_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA4_GFX_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA4_GFX_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA4_GFX_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA4_GFX_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA4_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA4_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA4_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA4_GFX_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA4_GFX_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA4_GFX_RB_BASE
+#define SDMA4_GFX_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA4_GFX_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA4_GFX_RB_BASE_HI
+#define SDMA4_GFX_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA4_GFX_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA4_GFX_RB_RPTR
+#define SDMA4_GFX_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA4_GFX_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA4_GFX_RB_RPTR_HI
+#define SDMA4_GFX_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA4_GFX_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA4_GFX_RB_WPTR
+#define SDMA4_GFX_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA4_GFX_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA4_GFX_RB_WPTR_HI
+#define SDMA4_GFX_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA4_GFX_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA4_GFX_RB_WPTR_POLL_CNTL
+#define SDMA4_GFX_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA4_GFX_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA4_GFX_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA4_GFX_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA4_GFX_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA4_GFX_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA4_GFX_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA4_GFX_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA4_GFX_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA4_GFX_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA4_GFX_RB_RPTR_ADDR_HI
+#define SDMA4_GFX_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA4_GFX_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA4_GFX_RB_RPTR_ADDR_LO
+#define SDMA4_GFX_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA4_GFX_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA4_GFX_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA4_GFX_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA4_GFX_IB_CNTL
+#define SDMA4_GFX_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA4_GFX_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA4_GFX_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA4_GFX_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA4_GFX_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA4_GFX_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA4_GFX_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA4_GFX_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA4_GFX_IB_RPTR
+#define SDMA4_GFX_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA4_GFX_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA4_GFX_IB_OFFSET
+#define SDMA4_GFX_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA4_GFX_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA4_GFX_IB_BASE_LO
+#define SDMA4_GFX_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA4_GFX_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA4_GFX_IB_BASE_HI
+#define SDMA4_GFX_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA4_GFX_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA4_GFX_IB_SIZE
+#define SDMA4_GFX_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA4_GFX_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA4_GFX_SKIP_CNTL
+#define SDMA4_GFX_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA4_GFX_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA4_GFX_CONTEXT_STATUS
+#define SDMA4_GFX_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA4_GFX_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA4_GFX_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA4_GFX_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA4_GFX_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA4_GFX_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA4_GFX_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA4_GFX_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA4_GFX_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA4_GFX_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA4_GFX_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA4_GFX_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA4_GFX_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA4_GFX_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA4_GFX_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA4_GFX_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA4_GFX_DOORBELL
+#define SDMA4_GFX_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA4_GFX_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA4_GFX_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA4_GFX_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA4_GFX_CONTEXT_CNTL
+#define SDMA4_GFX_CONTEXT_CNTL__RESUME_CTX__SHIFT 0x10
+#define SDMA4_GFX_CONTEXT_CNTL__RESUME_CTX_MASK 0x00010000L
+//SDMA4_GFX_STATUS
+#define SDMA4_GFX_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA4_GFX_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA4_GFX_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA4_GFX_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA4_GFX_DOORBELL_LOG
+#define SDMA4_GFX_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA4_GFX_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA4_GFX_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA4_GFX_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA4_GFX_WATERMARK
+#define SDMA4_GFX_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA4_GFX_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA4_GFX_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA4_GFX_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA4_GFX_DOORBELL_OFFSET
+#define SDMA4_GFX_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA4_GFX_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA4_GFX_CSA_ADDR_LO
+#define SDMA4_GFX_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA4_GFX_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA4_GFX_CSA_ADDR_HI
+#define SDMA4_GFX_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA4_GFX_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA4_GFX_IB_SUB_REMAIN
+#define SDMA4_GFX_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA4_GFX_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA4_GFX_PREEMPT
+#define SDMA4_GFX_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA4_GFX_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA4_GFX_DUMMY_REG
+#define SDMA4_GFX_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA4_GFX_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA4_GFX_RB_WPTR_POLL_ADDR_HI
+#define SDMA4_GFX_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA4_GFX_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA4_GFX_RB_WPTR_POLL_ADDR_LO
+#define SDMA4_GFX_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA4_GFX_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA4_GFX_RB_AQL_CNTL
+#define SDMA4_GFX_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA4_GFX_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA4_GFX_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA4_GFX_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA4_GFX_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA4_GFX_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA4_GFX_MINOR_PTR_UPDATE
+#define SDMA4_GFX_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA4_GFX_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA4_GFX_MIDCMD_DATA0
+#define SDMA4_GFX_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA4_GFX_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA4_GFX_MIDCMD_DATA1
+#define SDMA4_GFX_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA4_GFX_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA4_GFX_MIDCMD_DATA2
+#define SDMA4_GFX_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA4_GFX_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA4_GFX_MIDCMD_DATA3
+#define SDMA4_GFX_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA4_GFX_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA4_GFX_MIDCMD_DATA4
+#define SDMA4_GFX_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA4_GFX_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA4_GFX_MIDCMD_DATA5
+#define SDMA4_GFX_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA4_GFX_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA4_GFX_MIDCMD_DATA6
+#define SDMA4_GFX_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA4_GFX_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA4_GFX_MIDCMD_DATA7
+#define SDMA4_GFX_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA4_GFX_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA4_GFX_MIDCMD_DATA8
+#define SDMA4_GFX_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA4_GFX_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA4_GFX_MIDCMD_CNTL
+#define SDMA4_GFX_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA4_GFX_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA4_GFX_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA4_GFX_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA4_GFX_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA4_GFX_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA4_GFX_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA4_GFX_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA4_PAGE_RB_CNTL
+#define SDMA4_PAGE_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA4_PAGE_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA4_PAGE_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA4_PAGE_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA4_PAGE_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA4_PAGE_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA4_PAGE_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA4_PAGE_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA4_PAGE_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA4_PAGE_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA4_PAGE_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA4_PAGE_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA4_PAGE_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA4_PAGE_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA4_PAGE_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA4_PAGE_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA4_PAGE_RB_BASE
+#define SDMA4_PAGE_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA4_PAGE_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA4_PAGE_RB_BASE_HI
+#define SDMA4_PAGE_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA4_PAGE_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA4_PAGE_RB_RPTR
+#define SDMA4_PAGE_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA4_PAGE_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA4_PAGE_RB_RPTR_HI
+#define SDMA4_PAGE_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA4_PAGE_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA4_PAGE_RB_WPTR
+#define SDMA4_PAGE_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA4_PAGE_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA4_PAGE_RB_WPTR_HI
+#define SDMA4_PAGE_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA4_PAGE_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA4_PAGE_RB_WPTR_POLL_CNTL
+#define SDMA4_PAGE_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA4_PAGE_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA4_PAGE_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA4_PAGE_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA4_PAGE_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA4_PAGE_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA4_PAGE_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA4_PAGE_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA4_PAGE_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA4_PAGE_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA4_PAGE_RB_RPTR_ADDR_HI
+#define SDMA4_PAGE_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA4_PAGE_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA4_PAGE_RB_RPTR_ADDR_LO
+#define SDMA4_PAGE_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA4_PAGE_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA4_PAGE_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA4_PAGE_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA4_PAGE_IB_CNTL
+#define SDMA4_PAGE_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA4_PAGE_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA4_PAGE_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA4_PAGE_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA4_PAGE_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA4_PAGE_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA4_PAGE_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA4_PAGE_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA4_PAGE_IB_RPTR
+#define SDMA4_PAGE_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA4_PAGE_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA4_PAGE_IB_OFFSET
+#define SDMA4_PAGE_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA4_PAGE_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA4_PAGE_IB_BASE_LO
+#define SDMA4_PAGE_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA4_PAGE_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA4_PAGE_IB_BASE_HI
+#define SDMA4_PAGE_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA4_PAGE_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA4_PAGE_IB_SIZE
+#define SDMA4_PAGE_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA4_PAGE_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA4_PAGE_SKIP_CNTL
+#define SDMA4_PAGE_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA4_PAGE_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA4_PAGE_CONTEXT_STATUS
+#define SDMA4_PAGE_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA4_PAGE_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA4_PAGE_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA4_PAGE_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA4_PAGE_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA4_PAGE_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA4_PAGE_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA4_PAGE_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA4_PAGE_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA4_PAGE_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA4_PAGE_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA4_PAGE_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA4_PAGE_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA4_PAGE_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA4_PAGE_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA4_PAGE_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA4_PAGE_DOORBELL
+#define SDMA4_PAGE_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA4_PAGE_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA4_PAGE_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA4_PAGE_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA4_PAGE_STATUS
+#define SDMA4_PAGE_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA4_PAGE_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA4_PAGE_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA4_PAGE_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA4_PAGE_DOORBELL_LOG
+#define SDMA4_PAGE_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA4_PAGE_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA4_PAGE_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA4_PAGE_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA4_PAGE_WATERMARK
+#define SDMA4_PAGE_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA4_PAGE_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA4_PAGE_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA4_PAGE_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA4_PAGE_DOORBELL_OFFSET
+#define SDMA4_PAGE_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA4_PAGE_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA4_PAGE_CSA_ADDR_LO
+#define SDMA4_PAGE_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA4_PAGE_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA4_PAGE_CSA_ADDR_HI
+#define SDMA4_PAGE_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA4_PAGE_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA4_PAGE_IB_SUB_REMAIN
+#define SDMA4_PAGE_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA4_PAGE_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA4_PAGE_PREEMPT
+#define SDMA4_PAGE_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA4_PAGE_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA4_PAGE_DUMMY_REG
+#define SDMA4_PAGE_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA4_PAGE_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA4_PAGE_RB_WPTR_POLL_ADDR_HI
+#define SDMA4_PAGE_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA4_PAGE_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA4_PAGE_RB_WPTR_POLL_ADDR_LO
+#define SDMA4_PAGE_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA4_PAGE_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA4_PAGE_RB_AQL_CNTL
+#define SDMA4_PAGE_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA4_PAGE_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA4_PAGE_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA4_PAGE_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA4_PAGE_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA4_PAGE_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA4_PAGE_MINOR_PTR_UPDATE
+#define SDMA4_PAGE_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA4_PAGE_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA4_PAGE_MIDCMD_DATA0
+#define SDMA4_PAGE_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA4_PAGE_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA4_PAGE_MIDCMD_DATA1
+#define SDMA4_PAGE_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA4_PAGE_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA4_PAGE_MIDCMD_DATA2
+#define SDMA4_PAGE_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA4_PAGE_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA4_PAGE_MIDCMD_DATA3
+#define SDMA4_PAGE_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA4_PAGE_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA4_PAGE_MIDCMD_DATA4
+#define SDMA4_PAGE_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA4_PAGE_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA4_PAGE_MIDCMD_DATA5
+#define SDMA4_PAGE_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA4_PAGE_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA4_PAGE_MIDCMD_DATA6
+#define SDMA4_PAGE_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA4_PAGE_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA4_PAGE_MIDCMD_DATA7
+#define SDMA4_PAGE_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA4_PAGE_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA4_PAGE_MIDCMD_DATA8
+#define SDMA4_PAGE_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA4_PAGE_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA4_PAGE_MIDCMD_CNTL
+#define SDMA4_PAGE_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA4_PAGE_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA4_PAGE_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA4_PAGE_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA4_PAGE_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA4_PAGE_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA4_PAGE_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA4_PAGE_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA4_RLC0_RB_CNTL
+#define SDMA4_RLC0_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA4_RLC0_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA4_RLC0_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA4_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA4_RLC0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA4_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA4_RLC0_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA4_RLC0_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA4_RLC0_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA4_RLC0_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA4_RLC0_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA4_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA4_RLC0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA4_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA4_RLC0_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA4_RLC0_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA4_RLC0_RB_BASE
+#define SDMA4_RLC0_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA4_RLC0_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA4_RLC0_RB_BASE_HI
+#define SDMA4_RLC0_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA4_RLC0_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA4_RLC0_RB_RPTR
+#define SDMA4_RLC0_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA4_RLC0_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA4_RLC0_RB_RPTR_HI
+#define SDMA4_RLC0_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA4_RLC0_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA4_RLC0_RB_WPTR
+#define SDMA4_RLC0_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA4_RLC0_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA4_RLC0_RB_WPTR_HI
+#define SDMA4_RLC0_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA4_RLC0_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA4_RLC0_RB_WPTR_POLL_CNTL
+#define SDMA4_RLC0_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA4_RLC0_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA4_RLC0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA4_RLC0_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA4_RLC0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA4_RLC0_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA4_RLC0_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA4_RLC0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA4_RLC0_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA4_RLC0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA4_RLC0_RB_RPTR_ADDR_HI
+#define SDMA4_RLC0_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA4_RLC0_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA4_RLC0_RB_RPTR_ADDR_LO
+#define SDMA4_RLC0_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA4_RLC0_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA4_RLC0_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA4_RLC0_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA4_RLC0_IB_CNTL
+#define SDMA4_RLC0_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA4_RLC0_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA4_RLC0_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA4_RLC0_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA4_RLC0_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA4_RLC0_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA4_RLC0_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA4_RLC0_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA4_RLC0_IB_RPTR
+#define SDMA4_RLC0_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA4_RLC0_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA4_RLC0_IB_OFFSET
+#define SDMA4_RLC0_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA4_RLC0_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA4_RLC0_IB_BASE_LO
+#define SDMA4_RLC0_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA4_RLC0_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA4_RLC0_IB_BASE_HI
+#define SDMA4_RLC0_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA4_RLC0_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA4_RLC0_IB_SIZE
+#define SDMA4_RLC0_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA4_RLC0_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA4_RLC0_SKIP_CNTL
+#define SDMA4_RLC0_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA4_RLC0_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA4_RLC0_CONTEXT_STATUS
+#define SDMA4_RLC0_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA4_RLC0_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA4_RLC0_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA4_RLC0_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA4_RLC0_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA4_RLC0_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA4_RLC0_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA4_RLC0_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA4_RLC0_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA4_RLC0_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA4_RLC0_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA4_RLC0_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA4_RLC0_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA4_RLC0_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA4_RLC0_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA4_RLC0_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA4_RLC0_DOORBELL
+#define SDMA4_RLC0_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA4_RLC0_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA4_RLC0_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA4_RLC0_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA4_RLC0_STATUS
+#define SDMA4_RLC0_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA4_RLC0_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA4_RLC0_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA4_RLC0_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA4_RLC0_DOORBELL_LOG
+#define SDMA4_RLC0_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA4_RLC0_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA4_RLC0_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA4_RLC0_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA4_RLC0_WATERMARK
+#define SDMA4_RLC0_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA4_RLC0_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA4_RLC0_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA4_RLC0_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA4_RLC0_DOORBELL_OFFSET
+#define SDMA4_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA4_RLC0_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA4_RLC0_CSA_ADDR_LO
+#define SDMA4_RLC0_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA4_RLC0_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA4_RLC0_CSA_ADDR_HI
+#define SDMA4_RLC0_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA4_RLC0_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA4_RLC0_IB_SUB_REMAIN
+#define SDMA4_RLC0_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA4_RLC0_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA4_RLC0_PREEMPT
+#define SDMA4_RLC0_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA4_RLC0_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA4_RLC0_DUMMY_REG
+#define SDMA4_RLC0_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA4_RLC0_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA4_RLC0_RB_WPTR_POLL_ADDR_HI
+#define SDMA4_RLC0_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA4_RLC0_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA4_RLC0_RB_WPTR_POLL_ADDR_LO
+#define SDMA4_RLC0_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA4_RLC0_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA4_RLC0_RB_AQL_CNTL
+#define SDMA4_RLC0_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA4_RLC0_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA4_RLC0_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA4_RLC0_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA4_RLC0_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA4_RLC0_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA4_RLC0_MINOR_PTR_UPDATE
+#define SDMA4_RLC0_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA4_RLC0_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA4_RLC0_MIDCMD_DATA0
+#define SDMA4_RLC0_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA4_RLC0_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA4_RLC0_MIDCMD_DATA1
+#define SDMA4_RLC0_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA4_RLC0_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA4_RLC0_MIDCMD_DATA2
+#define SDMA4_RLC0_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA4_RLC0_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA4_RLC0_MIDCMD_DATA3
+#define SDMA4_RLC0_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA4_RLC0_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA4_RLC0_MIDCMD_DATA4
+#define SDMA4_RLC0_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA4_RLC0_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA4_RLC0_MIDCMD_DATA5
+#define SDMA4_RLC0_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA4_RLC0_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA4_RLC0_MIDCMD_DATA6
+#define SDMA4_RLC0_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA4_RLC0_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA4_RLC0_MIDCMD_DATA7
+#define SDMA4_RLC0_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA4_RLC0_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA4_RLC0_MIDCMD_DATA8
+#define SDMA4_RLC0_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA4_RLC0_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA4_RLC0_MIDCMD_CNTL
+#define SDMA4_RLC0_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA4_RLC0_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA4_RLC0_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA4_RLC0_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA4_RLC0_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA4_RLC0_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA4_RLC0_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA4_RLC0_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA4_RLC1_RB_CNTL
+#define SDMA4_RLC1_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA4_RLC1_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA4_RLC1_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA4_RLC1_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA4_RLC1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA4_RLC1_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA4_RLC1_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA4_RLC1_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA4_RLC1_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA4_RLC1_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA4_RLC1_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA4_RLC1_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA4_RLC1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA4_RLC1_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA4_RLC1_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA4_RLC1_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA4_RLC1_RB_BASE
+#define SDMA4_RLC1_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA4_RLC1_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA4_RLC1_RB_BASE_HI
+#define SDMA4_RLC1_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA4_RLC1_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA4_RLC1_RB_RPTR
+#define SDMA4_RLC1_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA4_RLC1_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA4_RLC1_RB_RPTR_HI
+#define SDMA4_RLC1_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA4_RLC1_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA4_RLC1_RB_WPTR
+#define SDMA4_RLC1_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA4_RLC1_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA4_RLC1_RB_WPTR_HI
+#define SDMA4_RLC1_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA4_RLC1_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA4_RLC1_RB_WPTR_POLL_CNTL
+#define SDMA4_RLC1_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA4_RLC1_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA4_RLC1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA4_RLC1_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA4_RLC1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA4_RLC1_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA4_RLC1_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA4_RLC1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA4_RLC1_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA4_RLC1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA4_RLC1_RB_RPTR_ADDR_HI
+#define SDMA4_RLC1_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA4_RLC1_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA4_RLC1_RB_RPTR_ADDR_LO
+#define SDMA4_RLC1_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA4_RLC1_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA4_RLC1_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA4_RLC1_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA4_RLC1_IB_CNTL
+#define SDMA4_RLC1_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA4_RLC1_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA4_RLC1_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA4_RLC1_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA4_RLC1_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA4_RLC1_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA4_RLC1_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA4_RLC1_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA4_RLC1_IB_RPTR
+#define SDMA4_RLC1_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA4_RLC1_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA4_RLC1_IB_OFFSET
+#define SDMA4_RLC1_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA4_RLC1_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA4_RLC1_IB_BASE_LO
+#define SDMA4_RLC1_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA4_RLC1_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA4_RLC1_IB_BASE_HI
+#define SDMA4_RLC1_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA4_RLC1_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA4_RLC1_IB_SIZE
+#define SDMA4_RLC1_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA4_RLC1_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA4_RLC1_SKIP_CNTL
+#define SDMA4_RLC1_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA4_RLC1_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA4_RLC1_CONTEXT_STATUS
+#define SDMA4_RLC1_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA4_RLC1_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA4_RLC1_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA4_RLC1_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA4_RLC1_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA4_RLC1_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA4_RLC1_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA4_RLC1_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA4_RLC1_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA4_RLC1_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA4_RLC1_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA4_RLC1_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA4_RLC1_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA4_RLC1_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA4_RLC1_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA4_RLC1_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA4_RLC1_DOORBELL
+#define SDMA4_RLC1_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA4_RLC1_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA4_RLC1_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA4_RLC1_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA4_RLC1_STATUS
+#define SDMA4_RLC1_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA4_RLC1_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA4_RLC1_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA4_RLC1_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA4_RLC1_DOORBELL_LOG
+#define SDMA4_RLC1_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA4_RLC1_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA4_RLC1_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA4_RLC1_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA4_RLC1_WATERMARK
+#define SDMA4_RLC1_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA4_RLC1_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA4_RLC1_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA4_RLC1_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA4_RLC1_DOORBELL_OFFSET
+#define SDMA4_RLC1_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA4_RLC1_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA4_RLC1_CSA_ADDR_LO
+#define SDMA4_RLC1_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA4_RLC1_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA4_RLC1_CSA_ADDR_HI
+#define SDMA4_RLC1_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA4_RLC1_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA4_RLC1_IB_SUB_REMAIN
+#define SDMA4_RLC1_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA4_RLC1_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA4_RLC1_PREEMPT
+#define SDMA4_RLC1_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA4_RLC1_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA4_RLC1_DUMMY_REG
+#define SDMA4_RLC1_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA4_RLC1_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA4_RLC1_RB_WPTR_POLL_ADDR_HI
+#define SDMA4_RLC1_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA4_RLC1_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA4_RLC1_RB_WPTR_POLL_ADDR_LO
+#define SDMA4_RLC1_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA4_RLC1_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA4_RLC1_RB_AQL_CNTL
+#define SDMA4_RLC1_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA4_RLC1_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA4_RLC1_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA4_RLC1_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA4_RLC1_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA4_RLC1_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA4_RLC1_MINOR_PTR_UPDATE
+#define SDMA4_RLC1_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA4_RLC1_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA4_RLC1_MIDCMD_DATA0
+#define SDMA4_RLC1_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA4_RLC1_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA4_RLC1_MIDCMD_DATA1
+#define SDMA4_RLC1_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA4_RLC1_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA4_RLC1_MIDCMD_DATA2
+#define SDMA4_RLC1_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA4_RLC1_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA4_RLC1_MIDCMD_DATA3
+#define SDMA4_RLC1_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA4_RLC1_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA4_RLC1_MIDCMD_DATA4
+#define SDMA4_RLC1_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA4_RLC1_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA4_RLC1_MIDCMD_DATA5
+#define SDMA4_RLC1_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA4_RLC1_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA4_RLC1_MIDCMD_DATA6
+#define SDMA4_RLC1_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA4_RLC1_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA4_RLC1_MIDCMD_DATA7
+#define SDMA4_RLC1_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA4_RLC1_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA4_RLC1_MIDCMD_DATA8
+#define SDMA4_RLC1_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA4_RLC1_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA4_RLC1_MIDCMD_CNTL
+#define SDMA4_RLC1_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA4_RLC1_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA4_RLC1_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA4_RLC1_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA4_RLC1_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA4_RLC1_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA4_RLC1_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA4_RLC1_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA4_RLC2_RB_CNTL
+#define SDMA4_RLC2_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA4_RLC2_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA4_RLC2_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA4_RLC2_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA4_RLC2_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA4_RLC2_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA4_RLC2_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA4_RLC2_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA4_RLC2_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA4_RLC2_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA4_RLC2_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA4_RLC2_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA4_RLC2_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA4_RLC2_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA4_RLC2_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA4_RLC2_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA4_RLC2_RB_BASE
+#define SDMA4_RLC2_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA4_RLC2_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA4_RLC2_RB_BASE_HI
+#define SDMA4_RLC2_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA4_RLC2_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA4_RLC2_RB_RPTR
+#define SDMA4_RLC2_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA4_RLC2_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA4_RLC2_RB_RPTR_HI
+#define SDMA4_RLC2_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA4_RLC2_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA4_RLC2_RB_WPTR
+#define SDMA4_RLC2_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA4_RLC2_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA4_RLC2_RB_WPTR_HI
+#define SDMA4_RLC2_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA4_RLC2_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA4_RLC2_RB_WPTR_POLL_CNTL
+#define SDMA4_RLC2_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA4_RLC2_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA4_RLC2_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA4_RLC2_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA4_RLC2_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA4_RLC2_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA4_RLC2_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA4_RLC2_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA4_RLC2_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA4_RLC2_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA4_RLC2_RB_RPTR_ADDR_HI
+#define SDMA4_RLC2_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA4_RLC2_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA4_RLC2_RB_RPTR_ADDR_LO
+#define SDMA4_RLC2_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA4_RLC2_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA4_RLC2_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA4_RLC2_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA4_RLC2_IB_CNTL
+#define SDMA4_RLC2_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA4_RLC2_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA4_RLC2_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA4_RLC2_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA4_RLC2_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA4_RLC2_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA4_RLC2_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA4_RLC2_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA4_RLC2_IB_RPTR
+#define SDMA4_RLC2_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA4_RLC2_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA4_RLC2_IB_OFFSET
+#define SDMA4_RLC2_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA4_RLC2_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA4_RLC2_IB_BASE_LO
+#define SDMA4_RLC2_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA4_RLC2_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA4_RLC2_IB_BASE_HI
+#define SDMA4_RLC2_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA4_RLC2_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA4_RLC2_IB_SIZE
+#define SDMA4_RLC2_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA4_RLC2_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA4_RLC2_SKIP_CNTL
+#define SDMA4_RLC2_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA4_RLC2_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA4_RLC2_CONTEXT_STATUS
+#define SDMA4_RLC2_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA4_RLC2_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA4_RLC2_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA4_RLC2_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA4_RLC2_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA4_RLC2_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA4_RLC2_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA4_RLC2_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA4_RLC2_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA4_RLC2_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA4_RLC2_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA4_RLC2_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA4_RLC2_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA4_RLC2_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA4_RLC2_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA4_RLC2_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA4_RLC2_DOORBELL
+#define SDMA4_RLC2_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA4_RLC2_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA4_RLC2_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA4_RLC2_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA4_RLC2_STATUS
+#define SDMA4_RLC2_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA4_RLC2_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA4_RLC2_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA4_RLC2_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA4_RLC2_DOORBELL_LOG
+#define SDMA4_RLC2_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA4_RLC2_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA4_RLC2_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA4_RLC2_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA4_RLC2_WATERMARK
+#define SDMA4_RLC2_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA4_RLC2_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA4_RLC2_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA4_RLC2_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA4_RLC2_DOORBELL_OFFSET
+#define SDMA4_RLC2_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA4_RLC2_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA4_RLC2_CSA_ADDR_LO
+#define SDMA4_RLC2_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA4_RLC2_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA4_RLC2_CSA_ADDR_HI
+#define SDMA4_RLC2_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA4_RLC2_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA4_RLC2_IB_SUB_REMAIN
+#define SDMA4_RLC2_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA4_RLC2_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA4_RLC2_PREEMPT
+#define SDMA4_RLC2_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA4_RLC2_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA4_RLC2_DUMMY_REG
+#define SDMA4_RLC2_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA4_RLC2_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA4_RLC2_RB_WPTR_POLL_ADDR_HI
+#define SDMA4_RLC2_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA4_RLC2_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA4_RLC2_RB_WPTR_POLL_ADDR_LO
+#define SDMA4_RLC2_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA4_RLC2_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA4_RLC2_RB_AQL_CNTL
+#define SDMA4_RLC2_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA4_RLC2_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA4_RLC2_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA4_RLC2_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA4_RLC2_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA4_RLC2_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA4_RLC2_MINOR_PTR_UPDATE
+#define SDMA4_RLC2_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA4_RLC2_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA4_RLC2_MIDCMD_DATA0
+#define SDMA4_RLC2_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA4_RLC2_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA4_RLC2_MIDCMD_DATA1
+#define SDMA4_RLC2_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA4_RLC2_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA4_RLC2_MIDCMD_DATA2
+#define SDMA4_RLC2_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA4_RLC2_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA4_RLC2_MIDCMD_DATA3
+#define SDMA4_RLC2_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA4_RLC2_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA4_RLC2_MIDCMD_DATA4
+#define SDMA4_RLC2_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA4_RLC2_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA4_RLC2_MIDCMD_DATA5
+#define SDMA4_RLC2_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA4_RLC2_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA4_RLC2_MIDCMD_DATA6
+#define SDMA4_RLC2_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA4_RLC2_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA4_RLC2_MIDCMD_DATA7
+#define SDMA4_RLC2_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA4_RLC2_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA4_RLC2_MIDCMD_DATA8
+#define SDMA4_RLC2_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA4_RLC2_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA4_RLC2_MIDCMD_CNTL
+#define SDMA4_RLC2_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA4_RLC2_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA4_RLC2_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA4_RLC2_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA4_RLC2_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA4_RLC2_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA4_RLC2_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA4_RLC2_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA4_RLC3_RB_CNTL
+#define SDMA4_RLC3_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA4_RLC3_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA4_RLC3_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA4_RLC3_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA4_RLC3_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA4_RLC3_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA4_RLC3_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA4_RLC3_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA4_RLC3_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA4_RLC3_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA4_RLC3_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA4_RLC3_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA4_RLC3_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA4_RLC3_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA4_RLC3_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA4_RLC3_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA4_RLC3_RB_BASE
+#define SDMA4_RLC3_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA4_RLC3_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA4_RLC3_RB_BASE_HI
+#define SDMA4_RLC3_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA4_RLC3_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA4_RLC3_RB_RPTR
+#define SDMA4_RLC3_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA4_RLC3_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA4_RLC3_RB_RPTR_HI
+#define SDMA4_RLC3_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA4_RLC3_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA4_RLC3_RB_WPTR
+#define SDMA4_RLC3_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA4_RLC3_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA4_RLC3_RB_WPTR_HI
+#define SDMA4_RLC3_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA4_RLC3_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA4_RLC3_RB_WPTR_POLL_CNTL
+#define SDMA4_RLC3_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA4_RLC3_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA4_RLC3_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA4_RLC3_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA4_RLC3_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA4_RLC3_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA4_RLC3_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA4_RLC3_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA4_RLC3_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA4_RLC3_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA4_RLC3_RB_RPTR_ADDR_HI
+#define SDMA4_RLC3_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA4_RLC3_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA4_RLC3_RB_RPTR_ADDR_LO
+#define SDMA4_RLC3_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA4_RLC3_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA4_RLC3_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA4_RLC3_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA4_RLC3_IB_CNTL
+#define SDMA4_RLC3_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA4_RLC3_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA4_RLC3_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA4_RLC3_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA4_RLC3_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA4_RLC3_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA4_RLC3_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA4_RLC3_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA4_RLC3_IB_RPTR
+#define SDMA4_RLC3_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA4_RLC3_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA4_RLC3_IB_OFFSET
+#define SDMA4_RLC3_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA4_RLC3_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA4_RLC3_IB_BASE_LO
+#define SDMA4_RLC3_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA4_RLC3_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA4_RLC3_IB_BASE_HI
+#define SDMA4_RLC3_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA4_RLC3_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA4_RLC3_IB_SIZE
+#define SDMA4_RLC3_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA4_RLC3_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA4_RLC3_SKIP_CNTL
+#define SDMA4_RLC3_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA4_RLC3_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA4_RLC3_CONTEXT_STATUS
+#define SDMA4_RLC3_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA4_RLC3_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA4_RLC3_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA4_RLC3_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA4_RLC3_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA4_RLC3_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA4_RLC3_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA4_RLC3_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA4_RLC3_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA4_RLC3_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA4_RLC3_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA4_RLC3_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA4_RLC3_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA4_RLC3_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA4_RLC3_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA4_RLC3_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA4_RLC3_DOORBELL
+#define SDMA4_RLC3_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA4_RLC3_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA4_RLC3_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA4_RLC3_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA4_RLC3_STATUS
+#define SDMA4_RLC3_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA4_RLC3_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA4_RLC3_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA4_RLC3_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA4_RLC3_DOORBELL_LOG
+#define SDMA4_RLC3_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA4_RLC3_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA4_RLC3_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA4_RLC3_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA4_RLC3_WATERMARK
+#define SDMA4_RLC3_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA4_RLC3_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA4_RLC3_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA4_RLC3_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA4_RLC3_DOORBELL_OFFSET
+#define SDMA4_RLC3_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA4_RLC3_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA4_RLC3_CSA_ADDR_LO
+#define SDMA4_RLC3_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA4_RLC3_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA4_RLC3_CSA_ADDR_HI
+#define SDMA4_RLC3_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA4_RLC3_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA4_RLC3_IB_SUB_REMAIN
+#define SDMA4_RLC3_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA4_RLC3_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA4_RLC3_PREEMPT
+#define SDMA4_RLC3_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA4_RLC3_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA4_RLC3_DUMMY_REG
+#define SDMA4_RLC3_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA4_RLC3_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA4_RLC3_RB_WPTR_POLL_ADDR_HI
+#define SDMA4_RLC3_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA4_RLC3_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA4_RLC3_RB_WPTR_POLL_ADDR_LO
+#define SDMA4_RLC3_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA4_RLC3_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA4_RLC3_RB_AQL_CNTL
+#define SDMA4_RLC3_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA4_RLC3_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA4_RLC3_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA4_RLC3_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA4_RLC3_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA4_RLC3_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA4_RLC3_MINOR_PTR_UPDATE
+#define SDMA4_RLC3_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA4_RLC3_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA4_RLC3_MIDCMD_DATA0
+#define SDMA4_RLC3_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA4_RLC3_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA4_RLC3_MIDCMD_DATA1
+#define SDMA4_RLC3_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA4_RLC3_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA4_RLC3_MIDCMD_DATA2
+#define SDMA4_RLC3_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA4_RLC3_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA4_RLC3_MIDCMD_DATA3
+#define SDMA4_RLC3_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA4_RLC3_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA4_RLC3_MIDCMD_DATA4
+#define SDMA4_RLC3_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA4_RLC3_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA4_RLC3_MIDCMD_DATA5
+#define SDMA4_RLC3_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA4_RLC3_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA4_RLC3_MIDCMD_DATA6
+#define SDMA4_RLC3_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA4_RLC3_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA4_RLC3_MIDCMD_DATA7
+#define SDMA4_RLC3_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA4_RLC3_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA4_RLC3_MIDCMD_DATA8
+#define SDMA4_RLC3_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA4_RLC3_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA4_RLC3_MIDCMD_CNTL
+#define SDMA4_RLC3_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA4_RLC3_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA4_RLC3_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA4_RLC3_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA4_RLC3_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA4_RLC3_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA4_RLC3_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA4_RLC3_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA4_RLC4_RB_CNTL
+#define SDMA4_RLC4_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA4_RLC4_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA4_RLC4_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA4_RLC4_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA4_RLC4_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA4_RLC4_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA4_RLC4_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA4_RLC4_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA4_RLC4_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA4_RLC4_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA4_RLC4_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA4_RLC4_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA4_RLC4_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA4_RLC4_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA4_RLC4_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA4_RLC4_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA4_RLC4_RB_BASE
+#define SDMA4_RLC4_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA4_RLC4_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA4_RLC4_RB_BASE_HI
+#define SDMA4_RLC4_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA4_RLC4_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA4_RLC4_RB_RPTR
+#define SDMA4_RLC4_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA4_RLC4_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA4_RLC4_RB_RPTR_HI
+#define SDMA4_RLC4_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA4_RLC4_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA4_RLC4_RB_WPTR
+#define SDMA4_RLC4_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA4_RLC4_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA4_RLC4_RB_WPTR_HI
+#define SDMA4_RLC4_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA4_RLC4_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA4_RLC4_RB_WPTR_POLL_CNTL
+#define SDMA4_RLC4_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA4_RLC4_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA4_RLC4_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA4_RLC4_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA4_RLC4_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA4_RLC4_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA4_RLC4_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA4_RLC4_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA4_RLC4_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA4_RLC4_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA4_RLC4_RB_RPTR_ADDR_HI
+#define SDMA4_RLC4_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA4_RLC4_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA4_RLC4_RB_RPTR_ADDR_LO
+#define SDMA4_RLC4_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA4_RLC4_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA4_RLC4_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA4_RLC4_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA4_RLC4_IB_CNTL
+#define SDMA4_RLC4_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA4_RLC4_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA4_RLC4_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA4_RLC4_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA4_RLC4_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA4_RLC4_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA4_RLC4_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA4_RLC4_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA4_RLC4_IB_RPTR
+#define SDMA4_RLC4_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA4_RLC4_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA4_RLC4_IB_OFFSET
+#define SDMA4_RLC4_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA4_RLC4_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA4_RLC4_IB_BASE_LO
+#define SDMA4_RLC4_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA4_RLC4_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA4_RLC4_IB_BASE_HI
+#define SDMA4_RLC4_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA4_RLC4_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA4_RLC4_IB_SIZE
+#define SDMA4_RLC4_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA4_RLC4_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA4_RLC4_SKIP_CNTL
+#define SDMA4_RLC4_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA4_RLC4_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA4_RLC4_CONTEXT_STATUS
+#define SDMA4_RLC4_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA4_RLC4_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA4_RLC4_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA4_RLC4_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA4_RLC4_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA4_RLC4_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA4_RLC4_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA4_RLC4_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA4_RLC4_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA4_RLC4_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA4_RLC4_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA4_RLC4_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA4_RLC4_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA4_RLC4_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA4_RLC4_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA4_RLC4_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA4_RLC4_DOORBELL
+#define SDMA4_RLC4_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA4_RLC4_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA4_RLC4_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA4_RLC4_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA4_RLC4_STATUS
+#define SDMA4_RLC4_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA4_RLC4_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA4_RLC4_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA4_RLC4_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA4_RLC4_DOORBELL_LOG
+#define SDMA4_RLC4_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA4_RLC4_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA4_RLC4_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA4_RLC4_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA4_RLC4_WATERMARK
+#define SDMA4_RLC4_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA4_RLC4_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA4_RLC4_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA4_RLC4_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA4_RLC4_DOORBELL_OFFSET
+#define SDMA4_RLC4_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA4_RLC4_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA4_RLC4_CSA_ADDR_LO
+#define SDMA4_RLC4_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA4_RLC4_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA4_RLC4_CSA_ADDR_HI
+#define SDMA4_RLC4_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA4_RLC4_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA4_RLC4_IB_SUB_REMAIN
+#define SDMA4_RLC4_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA4_RLC4_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA4_RLC4_PREEMPT
+#define SDMA4_RLC4_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA4_RLC4_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA4_RLC4_DUMMY_REG
+#define SDMA4_RLC4_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA4_RLC4_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA4_RLC4_RB_WPTR_POLL_ADDR_HI
+#define SDMA4_RLC4_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA4_RLC4_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA4_RLC4_RB_WPTR_POLL_ADDR_LO
+#define SDMA4_RLC4_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA4_RLC4_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA4_RLC4_RB_AQL_CNTL
+#define SDMA4_RLC4_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA4_RLC4_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA4_RLC4_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA4_RLC4_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA4_RLC4_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA4_RLC4_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA4_RLC4_MINOR_PTR_UPDATE
+#define SDMA4_RLC4_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA4_RLC4_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA4_RLC4_MIDCMD_DATA0
+#define SDMA4_RLC4_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA4_RLC4_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA4_RLC4_MIDCMD_DATA1
+#define SDMA4_RLC4_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA4_RLC4_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA4_RLC4_MIDCMD_DATA2
+#define SDMA4_RLC4_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA4_RLC4_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA4_RLC4_MIDCMD_DATA3
+#define SDMA4_RLC4_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA4_RLC4_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA4_RLC4_MIDCMD_DATA4
+#define SDMA4_RLC4_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA4_RLC4_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA4_RLC4_MIDCMD_DATA5
+#define SDMA4_RLC4_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA4_RLC4_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA4_RLC4_MIDCMD_DATA6
+#define SDMA4_RLC4_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA4_RLC4_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA4_RLC4_MIDCMD_DATA7
+#define SDMA4_RLC4_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA4_RLC4_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA4_RLC4_MIDCMD_DATA8
+#define SDMA4_RLC4_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA4_RLC4_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA4_RLC4_MIDCMD_CNTL
+#define SDMA4_RLC4_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA4_RLC4_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA4_RLC4_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA4_RLC4_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA4_RLC4_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA4_RLC4_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA4_RLC4_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA4_RLC4_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA4_RLC5_RB_CNTL
+#define SDMA4_RLC5_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA4_RLC5_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA4_RLC5_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA4_RLC5_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA4_RLC5_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA4_RLC5_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA4_RLC5_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA4_RLC5_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA4_RLC5_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA4_RLC5_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA4_RLC5_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA4_RLC5_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA4_RLC5_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA4_RLC5_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA4_RLC5_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA4_RLC5_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA4_RLC5_RB_BASE
+#define SDMA4_RLC5_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA4_RLC5_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA4_RLC5_RB_BASE_HI
+#define SDMA4_RLC5_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA4_RLC5_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA4_RLC5_RB_RPTR
+#define SDMA4_RLC5_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA4_RLC5_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA4_RLC5_RB_RPTR_HI
+#define SDMA4_RLC5_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA4_RLC5_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA4_RLC5_RB_WPTR
+#define SDMA4_RLC5_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA4_RLC5_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA4_RLC5_RB_WPTR_HI
+#define SDMA4_RLC5_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA4_RLC5_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA4_RLC5_RB_WPTR_POLL_CNTL
+#define SDMA4_RLC5_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA4_RLC5_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA4_RLC5_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA4_RLC5_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA4_RLC5_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA4_RLC5_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA4_RLC5_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA4_RLC5_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA4_RLC5_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA4_RLC5_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA4_RLC5_RB_RPTR_ADDR_HI
+#define SDMA4_RLC5_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA4_RLC5_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA4_RLC5_RB_RPTR_ADDR_LO
+#define SDMA4_RLC5_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA4_RLC5_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA4_RLC5_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA4_RLC5_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA4_RLC5_IB_CNTL
+#define SDMA4_RLC5_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA4_RLC5_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA4_RLC5_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA4_RLC5_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA4_RLC5_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA4_RLC5_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA4_RLC5_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA4_RLC5_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA4_RLC5_IB_RPTR
+#define SDMA4_RLC5_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA4_RLC5_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA4_RLC5_IB_OFFSET
+#define SDMA4_RLC5_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA4_RLC5_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA4_RLC5_IB_BASE_LO
+#define SDMA4_RLC5_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA4_RLC5_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA4_RLC5_IB_BASE_HI
+#define SDMA4_RLC5_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA4_RLC5_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA4_RLC5_IB_SIZE
+#define SDMA4_RLC5_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA4_RLC5_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA4_RLC5_SKIP_CNTL
+#define SDMA4_RLC5_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA4_RLC5_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA4_RLC5_CONTEXT_STATUS
+#define SDMA4_RLC5_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA4_RLC5_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA4_RLC5_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA4_RLC5_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA4_RLC5_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA4_RLC5_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA4_RLC5_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA4_RLC5_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA4_RLC5_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA4_RLC5_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA4_RLC5_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA4_RLC5_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA4_RLC5_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA4_RLC5_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA4_RLC5_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA4_RLC5_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA4_RLC5_DOORBELL
+#define SDMA4_RLC5_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA4_RLC5_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA4_RLC5_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA4_RLC5_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA4_RLC5_STATUS
+#define SDMA4_RLC5_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA4_RLC5_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA4_RLC5_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA4_RLC5_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA4_RLC5_DOORBELL_LOG
+#define SDMA4_RLC5_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA4_RLC5_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA4_RLC5_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA4_RLC5_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA4_RLC5_WATERMARK
+#define SDMA4_RLC5_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA4_RLC5_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA4_RLC5_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA4_RLC5_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA4_RLC5_DOORBELL_OFFSET
+#define SDMA4_RLC5_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA4_RLC5_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA4_RLC5_CSA_ADDR_LO
+#define SDMA4_RLC5_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA4_RLC5_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA4_RLC5_CSA_ADDR_HI
+#define SDMA4_RLC5_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA4_RLC5_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA4_RLC5_IB_SUB_REMAIN
+#define SDMA4_RLC5_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA4_RLC5_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA4_RLC5_PREEMPT
+#define SDMA4_RLC5_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA4_RLC5_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA4_RLC5_DUMMY_REG
+#define SDMA4_RLC5_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA4_RLC5_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA4_RLC5_RB_WPTR_POLL_ADDR_HI
+#define SDMA4_RLC5_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA4_RLC5_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA4_RLC5_RB_WPTR_POLL_ADDR_LO
+#define SDMA4_RLC5_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA4_RLC5_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA4_RLC5_RB_AQL_CNTL
+#define SDMA4_RLC5_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA4_RLC5_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA4_RLC5_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA4_RLC5_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA4_RLC5_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA4_RLC5_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA4_RLC5_MINOR_PTR_UPDATE
+#define SDMA4_RLC5_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA4_RLC5_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA4_RLC5_MIDCMD_DATA0
+#define SDMA4_RLC5_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA4_RLC5_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA4_RLC5_MIDCMD_DATA1
+#define SDMA4_RLC5_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA4_RLC5_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA4_RLC5_MIDCMD_DATA2
+#define SDMA4_RLC5_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA4_RLC5_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA4_RLC5_MIDCMD_DATA3
+#define SDMA4_RLC5_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA4_RLC5_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA4_RLC5_MIDCMD_DATA4
+#define SDMA4_RLC5_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA4_RLC5_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA4_RLC5_MIDCMD_DATA5
+#define SDMA4_RLC5_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA4_RLC5_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA4_RLC5_MIDCMD_DATA6
+#define SDMA4_RLC5_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA4_RLC5_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA4_RLC5_MIDCMD_DATA7
+#define SDMA4_RLC5_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA4_RLC5_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA4_RLC5_MIDCMD_DATA8
+#define SDMA4_RLC5_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA4_RLC5_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA4_RLC5_MIDCMD_CNTL
+#define SDMA4_RLC5_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA4_RLC5_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA4_RLC5_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA4_RLC5_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA4_RLC5_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA4_RLC5_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA4_RLC5_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA4_RLC5_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA4_RLC6_RB_CNTL
+#define SDMA4_RLC6_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA4_RLC6_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA4_RLC6_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA4_RLC6_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA4_RLC6_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA4_RLC6_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA4_RLC6_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA4_RLC6_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA4_RLC6_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA4_RLC6_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA4_RLC6_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA4_RLC6_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA4_RLC6_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA4_RLC6_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA4_RLC6_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA4_RLC6_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA4_RLC6_RB_BASE
+#define SDMA4_RLC6_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA4_RLC6_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA4_RLC6_RB_BASE_HI
+#define SDMA4_RLC6_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA4_RLC6_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA4_RLC6_RB_RPTR
+#define SDMA4_RLC6_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA4_RLC6_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA4_RLC6_RB_RPTR_HI
+#define SDMA4_RLC6_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA4_RLC6_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA4_RLC6_RB_WPTR
+#define SDMA4_RLC6_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA4_RLC6_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA4_RLC6_RB_WPTR_HI
+#define SDMA4_RLC6_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA4_RLC6_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA4_RLC6_RB_WPTR_POLL_CNTL
+#define SDMA4_RLC6_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA4_RLC6_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA4_RLC6_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA4_RLC6_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA4_RLC6_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA4_RLC6_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA4_RLC6_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA4_RLC6_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA4_RLC6_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA4_RLC6_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA4_RLC6_RB_RPTR_ADDR_HI
+#define SDMA4_RLC6_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA4_RLC6_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA4_RLC6_RB_RPTR_ADDR_LO
+#define SDMA4_RLC6_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA4_RLC6_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA4_RLC6_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA4_RLC6_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA4_RLC6_IB_CNTL
+#define SDMA4_RLC6_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA4_RLC6_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA4_RLC6_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA4_RLC6_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA4_RLC6_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA4_RLC6_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA4_RLC6_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA4_RLC6_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA4_RLC6_IB_RPTR
+#define SDMA4_RLC6_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA4_RLC6_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA4_RLC6_IB_OFFSET
+#define SDMA4_RLC6_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA4_RLC6_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA4_RLC6_IB_BASE_LO
+#define SDMA4_RLC6_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA4_RLC6_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA4_RLC6_IB_BASE_HI
+#define SDMA4_RLC6_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA4_RLC6_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA4_RLC6_IB_SIZE
+#define SDMA4_RLC6_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA4_RLC6_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA4_RLC6_SKIP_CNTL
+#define SDMA4_RLC6_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA4_RLC6_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA4_RLC6_CONTEXT_STATUS
+#define SDMA4_RLC6_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA4_RLC6_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA4_RLC6_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA4_RLC6_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA4_RLC6_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA4_RLC6_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA4_RLC6_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA4_RLC6_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA4_RLC6_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA4_RLC6_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA4_RLC6_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA4_RLC6_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA4_RLC6_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA4_RLC6_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA4_RLC6_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA4_RLC6_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA4_RLC6_DOORBELL
+#define SDMA4_RLC6_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA4_RLC6_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA4_RLC6_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA4_RLC6_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA4_RLC6_STATUS
+#define SDMA4_RLC6_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA4_RLC6_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA4_RLC6_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA4_RLC6_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA4_RLC6_DOORBELL_LOG
+#define SDMA4_RLC6_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA4_RLC6_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA4_RLC6_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA4_RLC6_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA4_RLC6_WATERMARK
+#define SDMA4_RLC6_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA4_RLC6_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA4_RLC6_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA4_RLC6_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA4_RLC6_DOORBELL_OFFSET
+#define SDMA4_RLC6_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA4_RLC6_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA4_RLC6_CSA_ADDR_LO
+#define SDMA4_RLC6_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA4_RLC6_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA4_RLC6_CSA_ADDR_HI
+#define SDMA4_RLC6_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA4_RLC6_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA4_RLC6_IB_SUB_REMAIN
+#define SDMA4_RLC6_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA4_RLC6_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA4_RLC6_PREEMPT
+#define SDMA4_RLC6_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA4_RLC6_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA4_RLC6_DUMMY_REG
+#define SDMA4_RLC6_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA4_RLC6_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA4_RLC6_RB_WPTR_POLL_ADDR_HI
+#define SDMA4_RLC6_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA4_RLC6_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA4_RLC6_RB_WPTR_POLL_ADDR_LO
+#define SDMA4_RLC6_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA4_RLC6_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA4_RLC6_RB_AQL_CNTL
+#define SDMA4_RLC6_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA4_RLC6_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA4_RLC6_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA4_RLC6_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA4_RLC6_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA4_RLC6_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA4_RLC6_MINOR_PTR_UPDATE
+#define SDMA4_RLC6_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA4_RLC6_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA4_RLC6_MIDCMD_DATA0
+#define SDMA4_RLC6_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA4_RLC6_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA4_RLC6_MIDCMD_DATA1
+#define SDMA4_RLC6_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA4_RLC6_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA4_RLC6_MIDCMD_DATA2
+#define SDMA4_RLC6_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA4_RLC6_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA4_RLC6_MIDCMD_DATA3
+#define SDMA4_RLC6_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA4_RLC6_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA4_RLC6_MIDCMD_DATA4
+#define SDMA4_RLC6_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA4_RLC6_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA4_RLC6_MIDCMD_DATA5
+#define SDMA4_RLC6_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA4_RLC6_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA4_RLC6_MIDCMD_DATA6
+#define SDMA4_RLC6_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA4_RLC6_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA4_RLC6_MIDCMD_DATA7
+#define SDMA4_RLC6_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA4_RLC6_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA4_RLC6_MIDCMD_DATA8
+#define SDMA4_RLC6_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA4_RLC6_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA4_RLC6_MIDCMD_CNTL
+#define SDMA4_RLC6_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA4_RLC6_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA4_RLC6_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA4_RLC6_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA4_RLC6_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA4_RLC6_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA4_RLC6_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA4_RLC6_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA4_RLC7_RB_CNTL
+#define SDMA4_RLC7_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA4_RLC7_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA4_RLC7_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA4_RLC7_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA4_RLC7_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA4_RLC7_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA4_RLC7_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA4_RLC7_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA4_RLC7_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA4_RLC7_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA4_RLC7_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA4_RLC7_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA4_RLC7_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA4_RLC7_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA4_RLC7_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA4_RLC7_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA4_RLC7_RB_BASE
+#define SDMA4_RLC7_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA4_RLC7_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA4_RLC7_RB_BASE_HI
+#define SDMA4_RLC7_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA4_RLC7_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA4_RLC7_RB_RPTR
+#define SDMA4_RLC7_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA4_RLC7_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA4_RLC7_RB_RPTR_HI
+#define SDMA4_RLC7_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA4_RLC7_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA4_RLC7_RB_WPTR
+#define SDMA4_RLC7_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA4_RLC7_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA4_RLC7_RB_WPTR_HI
+#define SDMA4_RLC7_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA4_RLC7_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA4_RLC7_RB_WPTR_POLL_CNTL
+#define SDMA4_RLC7_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA4_RLC7_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA4_RLC7_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA4_RLC7_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA4_RLC7_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA4_RLC7_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA4_RLC7_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA4_RLC7_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA4_RLC7_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA4_RLC7_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA4_RLC7_RB_RPTR_ADDR_HI
+#define SDMA4_RLC7_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA4_RLC7_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA4_RLC7_RB_RPTR_ADDR_LO
+#define SDMA4_RLC7_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA4_RLC7_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA4_RLC7_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA4_RLC7_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA4_RLC7_IB_CNTL
+#define SDMA4_RLC7_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA4_RLC7_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA4_RLC7_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA4_RLC7_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA4_RLC7_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA4_RLC7_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA4_RLC7_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA4_RLC7_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA4_RLC7_IB_RPTR
+#define SDMA4_RLC7_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA4_RLC7_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA4_RLC7_IB_OFFSET
+#define SDMA4_RLC7_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA4_RLC7_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA4_RLC7_IB_BASE_LO
+#define SDMA4_RLC7_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA4_RLC7_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA4_RLC7_IB_BASE_HI
+#define SDMA4_RLC7_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA4_RLC7_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA4_RLC7_IB_SIZE
+#define SDMA4_RLC7_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA4_RLC7_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA4_RLC7_SKIP_CNTL
+#define SDMA4_RLC7_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA4_RLC7_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA4_RLC7_CONTEXT_STATUS
+#define SDMA4_RLC7_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA4_RLC7_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA4_RLC7_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA4_RLC7_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA4_RLC7_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA4_RLC7_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA4_RLC7_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA4_RLC7_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA4_RLC7_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA4_RLC7_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA4_RLC7_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA4_RLC7_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA4_RLC7_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA4_RLC7_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA4_RLC7_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA4_RLC7_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA4_RLC7_DOORBELL
+#define SDMA4_RLC7_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA4_RLC7_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA4_RLC7_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA4_RLC7_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA4_RLC7_STATUS
+#define SDMA4_RLC7_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA4_RLC7_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA4_RLC7_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA4_RLC7_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA4_RLC7_DOORBELL_LOG
+#define SDMA4_RLC7_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA4_RLC7_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA4_RLC7_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA4_RLC7_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA4_RLC7_WATERMARK
+#define SDMA4_RLC7_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA4_RLC7_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA4_RLC7_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA4_RLC7_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA4_RLC7_DOORBELL_OFFSET
+#define SDMA4_RLC7_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA4_RLC7_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA4_RLC7_CSA_ADDR_LO
+#define SDMA4_RLC7_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA4_RLC7_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA4_RLC7_CSA_ADDR_HI
+#define SDMA4_RLC7_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA4_RLC7_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA4_RLC7_IB_SUB_REMAIN
+#define SDMA4_RLC7_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA4_RLC7_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA4_RLC7_PREEMPT
+#define SDMA4_RLC7_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA4_RLC7_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA4_RLC7_DUMMY_REG
+#define SDMA4_RLC7_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA4_RLC7_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA4_RLC7_RB_WPTR_POLL_ADDR_HI
+#define SDMA4_RLC7_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA4_RLC7_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA4_RLC7_RB_WPTR_POLL_ADDR_LO
+#define SDMA4_RLC7_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA4_RLC7_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA4_RLC7_RB_AQL_CNTL
+#define SDMA4_RLC7_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA4_RLC7_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA4_RLC7_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA4_RLC7_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA4_RLC7_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA4_RLC7_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA4_RLC7_MINOR_PTR_UPDATE
+#define SDMA4_RLC7_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA4_RLC7_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA4_RLC7_MIDCMD_DATA0
+#define SDMA4_RLC7_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA4_RLC7_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA4_RLC7_MIDCMD_DATA1
+#define SDMA4_RLC7_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA4_RLC7_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA4_RLC7_MIDCMD_DATA2
+#define SDMA4_RLC7_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA4_RLC7_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA4_RLC7_MIDCMD_DATA3
+#define SDMA4_RLC7_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA4_RLC7_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA4_RLC7_MIDCMD_DATA4
+#define SDMA4_RLC7_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA4_RLC7_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA4_RLC7_MIDCMD_DATA5
+#define SDMA4_RLC7_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA4_RLC7_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA4_RLC7_MIDCMD_DATA6
+#define SDMA4_RLC7_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA4_RLC7_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA4_RLC7_MIDCMD_DATA7
+#define SDMA4_RLC7_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA4_RLC7_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA4_RLC7_MIDCMD_DATA8
+#define SDMA4_RLC7_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA4_RLC7_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA4_RLC7_MIDCMD_CNTL
+#define SDMA4_RLC7_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA4_RLC7_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA4_RLC7_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA4_RLC7_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA4_RLC7_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA4_RLC7_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA4_RLC7_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA4_RLC7_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/sdma5/sdma5_4_2_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/sdma5/sdma5_4_2_2_offset.h
new file mode 100644
index 000000000000..ecb51b9f90b0
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/sdma5/sdma5_4_2_2_offset.h
@@ -0,0 +1,1043 @@
+/*
+ * Copyright (C) 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _sdma5_4_2_2_OFFSET_HEADER
+#define _sdma5_4_2_2_OFFSET_HEADER
+
+
+
+// addressBlock: sdma5_sdma5dec
+// base address: 0x7b000
+#define mmSDMA5_UCODE_ADDR 0x0000
+#define mmSDMA5_UCODE_ADDR_BASE_IDX 1
+#define mmSDMA5_UCODE_DATA 0x0001
+#define mmSDMA5_UCODE_DATA_BASE_IDX 1
+#define mmSDMA5_VM_CNTL 0x0004
+#define mmSDMA5_VM_CNTL_BASE_IDX 1
+#define mmSDMA5_VM_CTX_LO 0x0005
+#define mmSDMA5_VM_CTX_LO_BASE_IDX 1
+#define mmSDMA5_VM_CTX_HI 0x0006
+#define mmSDMA5_VM_CTX_HI_BASE_IDX 1
+#define mmSDMA5_ACTIVE_FCN_ID 0x0007
+#define mmSDMA5_ACTIVE_FCN_ID_BASE_IDX 1
+#define mmSDMA5_VM_CTX_CNTL 0x0008
+#define mmSDMA5_VM_CTX_CNTL_BASE_IDX 1
+#define mmSDMA5_VIRT_RESET_REQ 0x0009
+#define mmSDMA5_VIRT_RESET_REQ_BASE_IDX 1
+#define mmSDMA5_VF_ENABLE 0x000a
+#define mmSDMA5_VF_ENABLE_BASE_IDX 1
+#define mmSDMA5_CONTEXT_REG_TYPE0 0x000b
+#define mmSDMA5_CONTEXT_REG_TYPE0_BASE_IDX 1
+#define mmSDMA5_CONTEXT_REG_TYPE1 0x000c
+#define mmSDMA5_CONTEXT_REG_TYPE1_BASE_IDX 1
+#define mmSDMA5_CONTEXT_REG_TYPE2 0x000d
+#define mmSDMA5_CONTEXT_REG_TYPE2_BASE_IDX 1
+#define mmSDMA5_CONTEXT_REG_TYPE3 0x000e
+#define mmSDMA5_CONTEXT_REG_TYPE3_BASE_IDX 1
+#define mmSDMA5_PUB_REG_TYPE0 0x000f
+#define mmSDMA5_PUB_REG_TYPE0_BASE_IDX 1
+#define mmSDMA5_PUB_REG_TYPE1 0x0010
+#define mmSDMA5_PUB_REG_TYPE1_BASE_IDX 1
+#define mmSDMA5_PUB_REG_TYPE2 0x0011
+#define mmSDMA5_PUB_REG_TYPE2_BASE_IDX 1
+#define mmSDMA5_PUB_REG_TYPE3 0x0012
+#define mmSDMA5_PUB_REG_TYPE3_BASE_IDX 1
+#define mmSDMA5_MMHUB_CNTL 0x0013
+#define mmSDMA5_MMHUB_CNTL_BASE_IDX 1
+#define mmSDMA5_CONTEXT_GROUP_BOUNDARY 0x0019
+#define mmSDMA5_CONTEXT_GROUP_BOUNDARY_BASE_IDX 1
+#define mmSDMA5_POWER_CNTL 0x001a
+#define mmSDMA5_POWER_CNTL_BASE_IDX 1
+#define mmSDMA5_CLK_CTRL 0x001b
+#define mmSDMA5_CLK_CTRL_BASE_IDX 1
+#define mmSDMA5_CNTL 0x001c
+#define mmSDMA5_CNTL_BASE_IDX 1
+#define mmSDMA5_CHICKEN_BITS 0x001d
+#define mmSDMA5_CHICKEN_BITS_BASE_IDX 1
+#define mmSDMA5_GB_ADDR_CONFIG 0x001e
+#define mmSDMA5_GB_ADDR_CONFIG_BASE_IDX 1
+#define mmSDMA5_GB_ADDR_CONFIG_READ 0x001f
+#define mmSDMA5_GB_ADDR_CONFIG_READ_BASE_IDX 1
+#define mmSDMA5_RB_RPTR_FETCH_HI 0x0020
+#define mmSDMA5_RB_RPTR_FETCH_HI_BASE_IDX 1
+#define mmSDMA5_SEM_WAIT_FAIL_TIMER_CNTL 0x0021
+#define mmSDMA5_SEM_WAIT_FAIL_TIMER_CNTL_BASE_IDX 1
+#define mmSDMA5_RB_RPTR_FETCH 0x0022
+#define mmSDMA5_RB_RPTR_FETCH_BASE_IDX 1
+#define mmSDMA5_IB_OFFSET_FETCH 0x0023
+#define mmSDMA5_IB_OFFSET_FETCH_BASE_IDX 1
+#define mmSDMA5_PROGRAM 0x0024
+#define mmSDMA5_PROGRAM_BASE_IDX 1
+#define mmSDMA5_STATUS_REG 0x0025
+#define mmSDMA5_STATUS_REG_BASE_IDX 1
+#define mmSDMA5_STATUS1_REG 0x0026
+#define mmSDMA5_STATUS1_REG_BASE_IDX 1
+#define mmSDMA5_RD_BURST_CNTL 0x0027
+#define mmSDMA5_RD_BURST_CNTL_BASE_IDX 1
+#define mmSDMA5_HBM_PAGE_CONFIG 0x0028
+#define mmSDMA5_HBM_PAGE_CONFIG_BASE_IDX 1
+#define mmSDMA5_UCODE_CHECKSUM 0x0029
+#define mmSDMA5_UCODE_CHECKSUM_BASE_IDX 1
+#define mmSDMA5_F32_CNTL 0x002a
+#define mmSDMA5_F32_CNTL_BASE_IDX 1
+#define mmSDMA5_FREEZE 0x002b
+#define mmSDMA5_FREEZE_BASE_IDX 1
+#define mmSDMA5_PHASE0_QUANTUM 0x002c
+#define mmSDMA5_PHASE0_QUANTUM_BASE_IDX 1
+#define mmSDMA5_PHASE1_QUANTUM 0x002d
+#define mmSDMA5_PHASE1_QUANTUM_BASE_IDX 1
+#define mmSDMA5_EDC_CONFIG 0x0032
+#define mmSDMA5_EDC_CONFIG_BASE_IDX 1
+#define mmSDMA5_BA_THRESHOLD 0x0033
+#define mmSDMA5_BA_THRESHOLD_BASE_IDX 1
+#define mmSDMA5_ID 0x0034
+#define mmSDMA5_ID_BASE_IDX 1
+#define mmSDMA5_VERSION 0x0035
+#define mmSDMA5_VERSION_BASE_IDX 1
+#define mmSDMA5_EDC_COUNTER 0x0036
+#define mmSDMA5_EDC_COUNTER_BASE_IDX 1
+#define mmSDMA5_EDC_COUNTER_CLEAR 0x0037
+#define mmSDMA5_EDC_COUNTER_CLEAR_BASE_IDX 1
+#define mmSDMA5_STATUS2_REG 0x0038
+#define mmSDMA5_STATUS2_REG_BASE_IDX 1
+#define mmSDMA5_ATOMIC_CNTL 0x0039
+#define mmSDMA5_ATOMIC_CNTL_BASE_IDX 1
+#define mmSDMA5_ATOMIC_PREOP_LO 0x003a
+#define mmSDMA5_ATOMIC_PREOP_LO_BASE_IDX 1
+#define mmSDMA5_ATOMIC_PREOP_HI 0x003b
+#define mmSDMA5_ATOMIC_PREOP_HI_BASE_IDX 1
+#define mmSDMA5_UTCL1_CNTL 0x003c
+#define mmSDMA5_UTCL1_CNTL_BASE_IDX 1
+#define mmSDMA5_UTCL1_WATERMK 0x003d
+#define mmSDMA5_UTCL1_WATERMK_BASE_IDX 1
+#define mmSDMA5_UTCL1_RD_STATUS 0x003e
+#define mmSDMA5_UTCL1_RD_STATUS_BASE_IDX 1
+#define mmSDMA5_UTCL1_WR_STATUS 0x003f
+#define mmSDMA5_UTCL1_WR_STATUS_BASE_IDX 1
+#define mmSDMA5_UTCL1_INV0 0x0040
+#define mmSDMA5_UTCL1_INV0_BASE_IDX 1
+#define mmSDMA5_UTCL1_INV1 0x0041
+#define mmSDMA5_UTCL1_INV1_BASE_IDX 1
+#define mmSDMA5_UTCL1_INV2 0x0042
+#define mmSDMA5_UTCL1_INV2_BASE_IDX 1
+#define mmSDMA5_UTCL1_RD_XNACK0 0x0043
+#define mmSDMA5_UTCL1_RD_XNACK0_BASE_IDX 1
+#define mmSDMA5_UTCL1_RD_XNACK1 0x0044
+#define mmSDMA5_UTCL1_RD_XNACK1_BASE_IDX 1
+#define mmSDMA5_UTCL1_WR_XNACK0 0x0045
+#define mmSDMA5_UTCL1_WR_XNACK0_BASE_IDX 1
+#define mmSDMA5_UTCL1_WR_XNACK1 0x0046
+#define mmSDMA5_UTCL1_WR_XNACK1_BASE_IDX 1
+#define mmSDMA5_UTCL1_TIMEOUT 0x0047
+#define mmSDMA5_UTCL1_TIMEOUT_BASE_IDX 1
+#define mmSDMA5_UTCL1_PAGE 0x0048
+#define mmSDMA5_UTCL1_PAGE_BASE_IDX 1
+#define mmSDMA5_POWER_CNTL_IDLE 0x0049
+#define mmSDMA5_POWER_CNTL_IDLE_BASE_IDX 1
+#define mmSDMA5_RELAX_ORDERING_LUT 0x004a
+#define mmSDMA5_RELAX_ORDERING_LUT_BASE_IDX 1
+#define mmSDMA5_CHICKEN_BITS_2 0x004b
+#define mmSDMA5_CHICKEN_BITS_2_BASE_IDX 1
+#define mmSDMA5_STATUS3_REG 0x004c
+#define mmSDMA5_STATUS3_REG_BASE_IDX 1
+#define mmSDMA5_PHYSICAL_ADDR_LO 0x004d
+#define mmSDMA5_PHYSICAL_ADDR_LO_BASE_IDX 1
+#define mmSDMA5_PHYSICAL_ADDR_HI 0x004e
+#define mmSDMA5_PHYSICAL_ADDR_HI_BASE_IDX 1
+#define mmSDMA5_PHASE2_QUANTUM 0x004f
+#define mmSDMA5_PHASE2_QUANTUM_BASE_IDX 1
+#define mmSDMA5_ERROR_LOG 0x0050
+#define mmSDMA5_ERROR_LOG_BASE_IDX 1
+#define mmSDMA5_PUB_DUMMY_REG0 0x0051
+#define mmSDMA5_PUB_DUMMY_REG0_BASE_IDX 1
+#define mmSDMA5_PUB_DUMMY_REG1 0x0052
+#define mmSDMA5_PUB_DUMMY_REG1_BASE_IDX 1
+#define mmSDMA5_PUB_DUMMY_REG2 0x0053
+#define mmSDMA5_PUB_DUMMY_REG2_BASE_IDX 1
+#define mmSDMA5_PUB_DUMMY_REG3 0x0054
+#define mmSDMA5_PUB_DUMMY_REG3_BASE_IDX 1
+#define mmSDMA5_F32_COUNTER 0x0055
+#define mmSDMA5_F32_COUNTER_BASE_IDX 1
+#define mmSDMA5_UNBREAKABLE 0x0056
+#define mmSDMA5_UNBREAKABLE_BASE_IDX 1
+#define mmSDMA5_PERFMON_CNTL 0x0057
+#define mmSDMA5_PERFMON_CNTL_BASE_IDX 1
+#define mmSDMA5_PERFCOUNTER0_RESULT 0x0058
+#define mmSDMA5_PERFCOUNTER0_RESULT_BASE_IDX 1
+#define mmSDMA5_PERFCOUNTER1_RESULT 0x0059
+#define mmSDMA5_PERFCOUNTER1_RESULT_BASE_IDX 1
+#define mmSDMA5_PERFCOUNTER_TAG_DELAY_RANGE 0x005a
+#define mmSDMA5_PERFCOUNTER_TAG_DELAY_RANGE_BASE_IDX 1
+#define mmSDMA5_CRD_CNTL 0x005b
+#define mmSDMA5_CRD_CNTL_BASE_IDX 1
+#define mmSDMA5_GPU_IOV_VIOLATION_LOG 0x005d
+#define mmSDMA5_GPU_IOV_VIOLATION_LOG_BASE_IDX 1
+#define mmSDMA5_ULV_CNTL 0x005e
+#define mmSDMA5_ULV_CNTL_BASE_IDX 1
+#define mmSDMA5_EA_DBIT_ADDR_DATA 0x0060
+#define mmSDMA5_EA_DBIT_ADDR_DATA_BASE_IDX 1
+#define mmSDMA5_EA_DBIT_ADDR_INDEX 0x0061
+#define mmSDMA5_EA_DBIT_ADDR_INDEX_BASE_IDX 1
+#define mmSDMA5_GPU_IOV_VIOLATION_LOG2 0x0062
+#define mmSDMA5_GPU_IOV_VIOLATION_LOG2_BASE_IDX 1
+#define mmSDMA5_GFX_RB_CNTL 0x0080
+#define mmSDMA5_GFX_RB_CNTL_BASE_IDX 1
+#define mmSDMA5_GFX_RB_BASE 0x0081
+#define mmSDMA5_GFX_RB_BASE_BASE_IDX 1
+#define mmSDMA5_GFX_RB_BASE_HI 0x0082
+#define mmSDMA5_GFX_RB_BASE_HI_BASE_IDX 1
+#define mmSDMA5_GFX_RB_RPTR 0x0083
+#define mmSDMA5_GFX_RB_RPTR_BASE_IDX 1
+#define mmSDMA5_GFX_RB_RPTR_HI 0x0084
+#define mmSDMA5_GFX_RB_RPTR_HI_BASE_IDX 1
+#define mmSDMA5_GFX_RB_WPTR 0x0085
+#define mmSDMA5_GFX_RB_WPTR_BASE_IDX 1
+#define mmSDMA5_GFX_RB_WPTR_HI 0x0086
+#define mmSDMA5_GFX_RB_WPTR_HI_BASE_IDX 1
+#define mmSDMA5_GFX_RB_WPTR_POLL_CNTL 0x0087
+#define mmSDMA5_GFX_RB_WPTR_POLL_CNTL_BASE_IDX 1
+#define mmSDMA5_GFX_RB_RPTR_ADDR_HI 0x0088
+#define mmSDMA5_GFX_RB_RPTR_ADDR_HI_BASE_IDX 1
+#define mmSDMA5_GFX_RB_RPTR_ADDR_LO 0x0089
+#define mmSDMA5_GFX_RB_RPTR_ADDR_LO_BASE_IDX 1
+#define mmSDMA5_GFX_IB_CNTL 0x008a
+#define mmSDMA5_GFX_IB_CNTL_BASE_IDX 1
+#define mmSDMA5_GFX_IB_RPTR 0x008b
+#define mmSDMA5_GFX_IB_RPTR_BASE_IDX 1
+#define mmSDMA5_GFX_IB_OFFSET 0x008c
+#define mmSDMA5_GFX_IB_OFFSET_BASE_IDX 1
+#define mmSDMA5_GFX_IB_BASE_LO 0x008d
+#define mmSDMA5_GFX_IB_BASE_LO_BASE_IDX 1
+#define mmSDMA5_GFX_IB_BASE_HI 0x008e
+#define mmSDMA5_GFX_IB_BASE_HI_BASE_IDX 1
+#define mmSDMA5_GFX_IB_SIZE 0x008f
+#define mmSDMA5_GFX_IB_SIZE_BASE_IDX 1
+#define mmSDMA5_GFX_SKIP_CNTL 0x0090
+#define mmSDMA5_GFX_SKIP_CNTL_BASE_IDX 1
+#define mmSDMA5_GFX_CONTEXT_STATUS 0x0091
+#define mmSDMA5_GFX_CONTEXT_STATUS_BASE_IDX 1
+#define mmSDMA5_GFX_DOORBELL 0x0092
+#define mmSDMA5_GFX_DOORBELL_BASE_IDX 1
+#define mmSDMA5_GFX_CONTEXT_CNTL 0x0093
+#define mmSDMA5_GFX_CONTEXT_CNTL_BASE_IDX 1
+#define mmSDMA5_GFX_STATUS 0x00a8
+#define mmSDMA5_GFX_STATUS_BASE_IDX 1
+#define mmSDMA5_GFX_DOORBELL_LOG 0x00a9
+#define mmSDMA5_GFX_DOORBELL_LOG_BASE_IDX 1
+#define mmSDMA5_GFX_WATERMARK 0x00aa
+#define mmSDMA5_GFX_WATERMARK_BASE_IDX 1
+#define mmSDMA5_GFX_DOORBELL_OFFSET 0x00ab
+#define mmSDMA5_GFX_DOORBELL_OFFSET_BASE_IDX 1
+#define mmSDMA5_GFX_CSA_ADDR_LO 0x00ac
+#define mmSDMA5_GFX_CSA_ADDR_LO_BASE_IDX 1
+#define mmSDMA5_GFX_CSA_ADDR_HI 0x00ad
+#define mmSDMA5_GFX_CSA_ADDR_HI_BASE_IDX 1
+#define mmSDMA5_GFX_IB_SUB_REMAIN 0x00af
+#define mmSDMA5_GFX_IB_SUB_REMAIN_BASE_IDX 1
+#define mmSDMA5_GFX_PREEMPT 0x00b0
+#define mmSDMA5_GFX_PREEMPT_BASE_IDX 1
+#define mmSDMA5_GFX_DUMMY_REG 0x00b1
+#define mmSDMA5_GFX_DUMMY_REG_BASE_IDX 1
+#define mmSDMA5_GFX_RB_WPTR_POLL_ADDR_HI 0x00b2
+#define mmSDMA5_GFX_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1
+#define mmSDMA5_GFX_RB_WPTR_POLL_ADDR_LO 0x00b3
+#define mmSDMA5_GFX_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1
+#define mmSDMA5_GFX_RB_AQL_CNTL 0x00b4
+#define mmSDMA5_GFX_RB_AQL_CNTL_BASE_IDX 1
+#define mmSDMA5_GFX_MINOR_PTR_UPDATE 0x00b5
+#define mmSDMA5_GFX_MINOR_PTR_UPDATE_BASE_IDX 1
+#define mmSDMA5_GFX_MIDCMD_DATA0 0x00c0
+#define mmSDMA5_GFX_MIDCMD_DATA0_BASE_IDX 1
+#define mmSDMA5_GFX_MIDCMD_DATA1 0x00c1
+#define mmSDMA5_GFX_MIDCMD_DATA1_BASE_IDX 1
+#define mmSDMA5_GFX_MIDCMD_DATA2 0x00c2
+#define mmSDMA5_GFX_MIDCMD_DATA2_BASE_IDX 1
+#define mmSDMA5_GFX_MIDCMD_DATA3 0x00c3
+#define mmSDMA5_GFX_MIDCMD_DATA3_BASE_IDX 1
+#define mmSDMA5_GFX_MIDCMD_DATA4 0x00c4
+#define mmSDMA5_GFX_MIDCMD_DATA4_BASE_IDX 1
+#define mmSDMA5_GFX_MIDCMD_DATA5 0x00c5
+#define mmSDMA5_GFX_MIDCMD_DATA5_BASE_IDX 1
+#define mmSDMA5_GFX_MIDCMD_DATA6 0x00c6
+#define mmSDMA5_GFX_MIDCMD_DATA6_BASE_IDX 1
+#define mmSDMA5_GFX_MIDCMD_DATA7 0x00c7
+#define mmSDMA5_GFX_MIDCMD_DATA7_BASE_IDX 1
+#define mmSDMA5_GFX_MIDCMD_DATA8 0x00c8
+#define mmSDMA5_GFX_MIDCMD_DATA8_BASE_IDX 1
+#define mmSDMA5_GFX_MIDCMD_CNTL 0x00c9
+#define mmSDMA5_GFX_MIDCMD_CNTL_BASE_IDX 1
+#define mmSDMA5_PAGE_RB_CNTL 0x00d8
+#define mmSDMA5_PAGE_RB_CNTL_BASE_IDX 1
+#define mmSDMA5_PAGE_RB_BASE 0x00d9
+#define mmSDMA5_PAGE_RB_BASE_BASE_IDX 1
+#define mmSDMA5_PAGE_RB_BASE_HI 0x00da
+#define mmSDMA5_PAGE_RB_BASE_HI_BASE_IDX 1
+#define mmSDMA5_PAGE_RB_RPTR 0x00db
+#define mmSDMA5_PAGE_RB_RPTR_BASE_IDX 1
+#define mmSDMA5_PAGE_RB_RPTR_HI 0x00dc
+#define mmSDMA5_PAGE_RB_RPTR_HI_BASE_IDX 1
+#define mmSDMA5_PAGE_RB_WPTR 0x00dd
+#define mmSDMA5_PAGE_RB_WPTR_BASE_IDX 1
+#define mmSDMA5_PAGE_RB_WPTR_HI 0x00de
+#define mmSDMA5_PAGE_RB_WPTR_HI_BASE_IDX 1
+#define mmSDMA5_PAGE_RB_WPTR_POLL_CNTL 0x00df
+#define mmSDMA5_PAGE_RB_WPTR_POLL_CNTL_BASE_IDX 1
+#define mmSDMA5_PAGE_RB_RPTR_ADDR_HI 0x00e0
+#define mmSDMA5_PAGE_RB_RPTR_ADDR_HI_BASE_IDX 1
+#define mmSDMA5_PAGE_RB_RPTR_ADDR_LO 0x00e1
+#define mmSDMA5_PAGE_RB_RPTR_ADDR_LO_BASE_IDX 1
+#define mmSDMA5_PAGE_IB_CNTL 0x00e2
+#define mmSDMA5_PAGE_IB_CNTL_BASE_IDX 1
+#define mmSDMA5_PAGE_IB_RPTR 0x00e3
+#define mmSDMA5_PAGE_IB_RPTR_BASE_IDX 1
+#define mmSDMA5_PAGE_IB_OFFSET 0x00e4
+#define mmSDMA5_PAGE_IB_OFFSET_BASE_IDX 1
+#define mmSDMA5_PAGE_IB_BASE_LO 0x00e5
+#define mmSDMA5_PAGE_IB_BASE_LO_BASE_IDX 1
+#define mmSDMA5_PAGE_IB_BASE_HI 0x00e6
+#define mmSDMA5_PAGE_IB_BASE_HI_BASE_IDX 1
+#define mmSDMA5_PAGE_IB_SIZE 0x00e7
+#define mmSDMA5_PAGE_IB_SIZE_BASE_IDX 1
+#define mmSDMA5_PAGE_SKIP_CNTL 0x00e8
+#define mmSDMA5_PAGE_SKIP_CNTL_BASE_IDX 1
+#define mmSDMA5_PAGE_CONTEXT_STATUS 0x00e9
+#define mmSDMA5_PAGE_CONTEXT_STATUS_BASE_IDX 1
+#define mmSDMA5_PAGE_DOORBELL 0x00ea
+#define mmSDMA5_PAGE_DOORBELL_BASE_IDX 1
+#define mmSDMA5_PAGE_STATUS 0x0100
+#define mmSDMA5_PAGE_STATUS_BASE_IDX 1
+#define mmSDMA5_PAGE_DOORBELL_LOG 0x0101
+#define mmSDMA5_PAGE_DOORBELL_LOG_BASE_IDX 1
+#define mmSDMA5_PAGE_WATERMARK 0x0102
+#define mmSDMA5_PAGE_WATERMARK_BASE_IDX 1
+#define mmSDMA5_PAGE_DOORBELL_OFFSET 0x0103
+#define mmSDMA5_PAGE_DOORBELL_OFFSET_BASE_IDX 1
+#define mmSDMA5_PAGE_CSA_ADDR_LO 0x0104
+#define mmSDMA5_PAGE_CSA_ADDR_LO_BASE_IDX 1
+#define mmSDMA5_PAGE_CSA_ADDR_HI 0x0105
+#define mmSDMA5_PAGE_CSA_ADDR_HI_BASE_IDX 1
+#define mmSDMA5_PAGE_IB_SUB_REMAIN 0x0107
+#define mmSDMA5_PAGE_IB_SUB_REMAIN_BASE_IDX 1
+#define mmSDMA5_PAGE_PREEMPT 0x0108
+#define mmSDMA5_PAGE_PREEMPT_BASE_IDX 1
+#define mmSDMA5_PAGE_DUMMY_REG 0x0109
+#define mmSDMA5_PAGE_DUMMY_REG_BASE_IDX 1
+#define mmSDMA5_PAGE_RB_WPTR_POLL_ADDR_HI 0x010a
+#define mmSDMA5_PAGE_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1
+#define mmSDMA5_PAGE_RB_WPTR_POLL_ADDR_LO 0x010b
+#define mmSDMA5_PAGE_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1
+#define mmSDMA5_PAGE_RB_AQL_CNTL 0x010c
+#define mmSDMA5_PAGE_RB_AQL_CNTL_BASE_IDX 1
+#define mmSDMA5_PAGE_MINOR_PTR_UPDATE 0x010d
+#define mmSDMA5_PAGE_MINOR_PTR_UPDATE_BASE_IDX 1
+#define mmSDMA5_PAGE_MIDCMD_DATA0 0x0118
+#define mmSDMA5_PAGE_MIDCMD_DATA0_BASE_IDX 1
+#define mmSDMA5_PAGE_MIDCMD_DATA1 0x0119
+#define mmSDMA5_PAGE_MIDCMD_DATA1_BASE_IDX 1
+#define mmSDMA5_PAGE_MIDCMD_DATA2 0x011a
+#define mmSDMA5_PAGE_MIDCMD_DATA2_BASE_IDX 1
+#define mmSDMA5_PAGE_MIDCMD_DATA3 0x011b
+#define mmSDMA5_PAGE_MIDCMD_DATA3_BASE_IDX 1
+#define mmSDMA5_PAGE_MIDCMD_DATA4 0x011c
+#define mmSDMA5_PAGE_MIDCMD_DATA4_BASE_IDX 1
+#define mmSDMA5_PAGE_MIDCMD_DATA5 0x011d
+#define mmSDMA5_PAGE_MIDCMD_DATA5_BASE_IDX 1
+#define mmSDMA5_PAGE_MIDCMD_DATA6 0x011e
+#define mmSDMA5_PAGE_MIDCMD_DATA6_BASE_IDX 1
+#define mmSDMA5_PAGE_MIDCMD_DATA7 0x011f
+#define mmSDMA5_PAGE_MIDCMD_DATA7_BASE_IDX 1
+#define mmSDMA5_PAGE_MIDCMD_DATA8 0x0120
+#define mmSDMA5_PAGE_MIDCMD_DATA8_BASE_IDX 1
+#define mmSDMA5_PAGE_MIDCMD_CNTL 0x0121
+#define mmSDMA5_PAGE_MIDCMD_CNTL_BASE_IDX 1
+#define mmSDMA5_RLC0_RB_CNTL 0x0130
+#define mmSDMA5_RLC0_RB_CNTL_BASE_IDX 1
+#define mmSDMA5_RLC0_RB_BASE 0x0131
+#define mmSDMA5_RLC0_RB_BASE_BASE_IDX 1
+#define mmSDMA5_RLC0_RB_BASE_HI 0x0132
+#define mmSDMA5_RLC0_RB_BASE_HI_BASE_IDX 1
+#define mmSDMA5_RLC0_RB_RPTR 0x0133
+#define mmSDMA5_RLC0_RB_RPTR_BASE_IDX 1
+#define mmSDMA5_RLC0_RB_RPTR_HI 0x0134
+#define mmSDMA5_RLC0_RB_RPTR_HI_BASE_IDX 1
+#define mmSDMA5_RLC0_RB_WPTR 0x0135
+#define mmSDMA5_RLC0_RB_WPTR_BASE_IDX 1
+#define mmSDMA5_RLC0_RB_WPTR_HI 0x0136
+#define mmSDMA5_RLC0_RB_WPTR_HI_BASE_IDX 1
+#define mmSDMA5_RLC0_RB_WPTR_POLL_CNTL 0x0137
+#define mmSDMA5_RLC0_RB_WPTR_POLL_CNTL_BASE_IDX 1
+#define mmSDMA5_RLC0_RB_RPTR_ADDR_HI 0x0138
+#define mmSDMA5_RLC0_RB_RPTR_ADDR_HI_BASE_IDX 1
+#define mmSDMA5_RLC0_RB_RPTR_ADDR_LO 0x0139
+#define mmSDMA5_RLC0_RB_RPTR_ADDR_LO_BASE_IDX 1
+#define mmSDMA5_RLC0_IB_CNTL 0x013a
+#define mmSDMA5_RLC0_IB_CNTL_BASE_IDX 1
+#define mmSDMA5_RLC0_IB_RPTR 0x013b
+#define mmSDMA5_RLC0_IB_RPTR_BASE_IDX 1
+#define mmSDMA5_RLC0_IB_OFFSET 0x013c
+#define mmSDMA5_RLC0_IB_OFFSET_BASE_IDX 1
+#define mmSDMA5_RLC0_IB_BASE_LO 0x013d
+#define mmSDMA5_RLC0_IB_BASE_LO_BASE_IDX 1
+#define mmSDMA5_RLC0_IB_BASE_HI 0x013e
+#define mmSDMA5_RLC0_IB_BASE_HI_BASE_IDX 1
+#define mmSDMA5_RLC0_IB_SIZE 0x013f
+#define mmSDMA5_RLC0_IB_SIZE_BASE_IDX 1
+#define mmSDMA5_RLC0_SKIP_CNTL 0x0140
+#define mmSDMA5_RLC0_SKIP_CNTL_BASE_IDX 1
+#define mmSDMA5_RLC0_CONTEXT_STATUS 0x0141
+#define mmSDMA5_RLC0_CONTEXT_STATUS_BASE_IDX 1
+#define mmSDMA5_RLC0_DOORBELL 0x0142
+#define mmSDMA5_RLC0_DOORBELL_BASE_IDX 1
+#define mmSDMA5_RLC0_STATUS 0x0158
+#define mmSDMA5_RLC0_STATUS_BASE_IDX 1
+#define mmSDMA5_RLC0_DOORBELL_LOG 0x0159
+#define mmSDMA5_RLC0_DOORBELL_LOG_BASE_IDX 1
+#define mmSDMA5_RLC0_WATERMARK 0x015a
+#define mmSDMA5_RLC0_WATERMARK_BASE_IDX 1
+#define mmSDMA5_RLC0_DOORBELL_OFFSET 0x015b
+#define mmSDMA5_RLC0_DOORBELL_OFFSET_BASE_IDX 1
+#define mmSDMA5_RLC0_CSA_ADDR_LO 0x015c
+#define mmSDMA5_RLC0_CSA_ADDR_LO_BASE_IDX 1
+#define mmSDMA5_RLC0_CSA_ADDR_HI 0x015d
+#define mmSDMA5_RLC0_CSA_ADDR_HI_BASE_IDX 1
+#define mmSDMA5_RLC0_IB_SUB_REMAIN 0x015f
+#define mmSDMA5_RLC0_IB_SUB_REMAIN_BASE_IDX 1
+#define mmSDMA5_RLC0_PREEMPT 0x0160
+#define mmSDMA5_RLC0_PREEMPT_BASE_IDX 1
+#define mmSDMA5_RLC0_DUMMY_REG 0x0161
+#define mmSDMA5_RLC0_DUMMY_REG_BASE_IDX 1
+#define mmSDMA5_RLC0_RB_WPTR_POLL_ADDR_HI 0x0162
+#define mmSDMA5_RLC0_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1
+#define mmSDMA5_RLC0_RB_WPTR_POLL_ADDR_LO 0x0163
+#define mmSDMA5_RLC0_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1
+#define mmSDMA5_RLC0_RB_AQL_CNTL 0x0164
+#define mmSDMA5_RLC0_RB_AQL_CNTL_BASE_IDX 1
+#define mmSDMA5_RLC0_MINOR_PTR_UPDATE 0x0165
+#define mmSDMA5_RLC0_MINOR_PTR_UPDATE_BASE_IDX 1
+#define mmSDMA5_RLC0_MIDCMD_DATA0 0x0170
+#define mmSDMA5_RLC0_MIDCMD_DATA0_BASE_IDX 1
+#define mmSDMA5_RLC0_MIDCMD_DATA1 0x0171
+#define mmSDMA5_RLC0_MIDCMD_DATA1_BASE_IDX 1
+#define mmSDMA5_RLC0_MIDCMD_DATA2 0x0172
+#define mmSDMA5_RLC0_MIDCMD_DATA2_BASE_IDX 1
+#define mmSDMA5_RLC0_MIDCMD_DATA3 0x0173
+#define mmSDMA5_RLC0_MIDCMD_DATA3_BASE_IDX 1
+#define mmSDMA5_RLC0_MIDCMD_DATA4 0x0174
+#define mmSDMA5_RLC0_MIDCMD_DATA4_BASE_IDX 1
+#define mmSDMA5_RLC0_MIDCMD_DATA5 0x0175
+#define mmSDMA5_RLC0_MIDCMD_DATA5_BASE_IDX 1
+#define mmSDMA5_RLC0_MIDCMD_DATA6 0x0176
+#define mmSDMA5_RLC0_MIDCMD_DATA6_BASE_IDX 1
+#define mmSDMA5_RLC0_MIDCMD_DATA7 0x0177
+#define mmSDMA5_RLC0_MIDCMD_DATA7_BASE_IDX 1
+#define mmSDMA5_RLC0_MIDCMD_DATA8 0x0178
+#define mmSDMA5_RLC0_MIDCMD_DATA8_BASE_IDX 1
+#define mmSDMA5_RLC0_MIDCMD_CNTL 0x0179
+#define mmSDMA5_RLC0_MIDCMD_CNTL_BASE_IDX 1
+#define mmSDMA5_RLC1_RB_CNTL 0x0188
+#define mmSDMA5_RLC1_RB_CNTL_BASE_IDX 1
+#define mmSDMA5_RLC1_RB_BASE 0x0189
+#define mmSDMA5_RLC1_RB_BASE_BASE_IDX 1
+#define mmSDMA5_RLC1_RB_BASE_HI 0x018a
+#define mmSDMA5_RLC1_RB_BASE_HI_BASE_IDX 1
+#define mmSDMA5_RLC1_RB_RPTR 0x018b
+#define mmSDMA5_RLC1_RB_RPTR_BASE_IDX 1
+#define mmSDMA5_RLC1_RB_RPTR_HI 0x018c
+#define mmSDMA5_RLC1_RB_RPTR_HI_BASE_IDX 1
+#define mmSDMA5_RLC1_RB_WPTR 0x018d
+#define mmSDMA5_RLC1_RB_WPTR_BASE_IDX 1
+#define mmSDMA5_RLC1_RB_WPTR_HI 0x018e
+#define mmSDMA5_RLC1_RB_WPTR_HI_BASE_IDX 1
+#define mmSDMA5_RLC1_RB_WPTR_POLL_CNTL 0x018f
+#define mmSDMA5_RLC1_RB_WPTR_POLL_CNTL_BASE_IDX 1
+#define mmSDMA5_RLC1_RB_RPTR_ADDR_HI 0x0190
+#define mmSDMA5_RLC1_RB_RPTR_ADDR_HI_BASE_IDX 1
+#define mmSDMA5_RLC1_RB_RPTR_ADDR_LO 0x0191
+#define mmSDMA5_RLC1_RB_RPTR_ADDR_LO_BASE_IDX 1
+#define mmSDMA5_RLC1_IB_CNTL 0x0192
+#define mmSDMA5_RLC1_IB_CNTL_BASE_IDX 1
+#define mmSDMA5_RLC1_IB_RPTR 0x0193
+#define mmSDMA5_RLC1_IB_RPTR_BASE_IDX 1
+#define mmSDMA5_RLC1_IB_OFFSET 0x0194
+#define mmSDMA5_RLC1_IB_OFFSET_BASE_IDX 1
+#define mmSDMA5_RLC1_IB_BASE_LO 0x0195
+#define mmSDMA5_RLC1_IB_BASE_LO_BASE_IDX 1
+#define mmSDMA5_RLC1_IB_BASE_HI 0x0196
+#define mmSDMA5_RLC1_IB_BASE_HI_BASE_IDX 1
+#define mmSDMA5_RLC1_IB_SIZE 0x0197
+#define mmSDMA5_RLC1_IB_SIZE_BASE_IDX 1
+#define mmSDMA5_RLC1_SKIP_CNTL 0x0198
+#define mmSDMA5_RLC1_SKIP_CNTL_BASE_IDX 1
+#define mmSDMA5_RLC1_CONTEXT_STATUS 0x0199
+#define mmSDMA5_RLC1_CONTEXT_STATUS_BASE_IDX 1
+#define mmSDMA5_RLC1_DOORBELL 0x019a
+#define mmSDMA5_RLC1_DOORBELL_BASE_IDX 1
+#define mmSDMA5_RLC1_STATUS 0x01b0
+#define mmSDMA5_RLC1_STATUS_BASE_IDX 1
+#define mmSDMA5_RLC1_DOORBELL_LOG 0x01b1
+#define mmSDMA5_RLC1_DOORBELL_LOG_BASE_IDX 1
+#define mmSDMA5_RLC1_WATERMARK 0x01b2
+#define mmSDMA5_RLC1_WATERMARK_BASE_IDX 1
+#define mmSDMA5_RLC1_DOORBELL_OFFSET 0x01b3
+#define mmSDMA5_RLC1_DOORBELL_OFFSET_BASE_IDX 1
+#define mmSDMA5_RLC1_CSA_ADDR_LO 0x01b4
+#define mmSDMA5_RLC1_CSA_ADDR_LO_BASE_IDX 1
+#define mmSDMA5_RLC1_CSA_ADDR_HI 0x01b5
+#define mmSDMA5_RLC1_CSA_ADDR_HI_BASE_IDX 1
+#define mmSDMA5_RLC1_IB_SUB_REMAIN 0x01b7
+#define mmSDMA5_RLC1_IB_SUB_REMAIN_BASE_IDX 1
+#define mmSDMA5_RLC1_PREEMPT 0x01b8
+#define mmSDMA5_RLC1_PREEMPT_BASE_IDX 1
+#define mmSDMA5_RLC1_DUMMY_REG 0x01b9
+#define mmSDMA5_RLC1_DUMMY_REG_BASE_IDX 1
+#define mmSDMA5_RLC1_RB_WPTR_POLL_ADDR_HI 0x01ba
+#define mmSDMA5_RLC1_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1
+#define mmSDMA5_RLC1_RB_WPTR_POLL_ADDR_LO 0x01bb
+#define mmSDMA5_RLC1_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1
+#define mmSDMA5_RLC1_RB_AQL_CNTL 0x01bc
+#define mmSDMA5_RLC1_RB_AQL_CNTL_BASE_IDX 1
+#define mmSDMA5_RLC1_MINOR_PTR_UPDATE 0x01bd
+#define mmSDMA5_RLC1_MINOR_PTR_UPDATE_BASE_IDX 1
+#define mmSDMA5_RLC1_MIDCMD_DATA0 0x01c8
+#define mmSDMA5_RLC1_MIDCMD_DATA0_BASE_IDX 1
+#define mmSDMA5_RLC1_MIDCMD_DATA1 0x01c9
+#define mmSDMA5_RLC1_MIDCMD_DATA1_BASE_IDX 1
+#define mmSDMA5_RLC1_MIDCMD_DATA2 0x01ca
+#define mmSDMA5_RLC1_MIDCMD_DATA2_BASE_IDX 1
+#define mmSDMA5_RLC1_MIDCMD_DATA3 0x01cb
+#define mmSDMA5_RLC1_MIDCMD_DATA3_BASE_IDX 1
+#define mmSDMA5_RLC1_MIDCMD_DATA4 0x01cc
+#define mmSDMA5_RLC1_MIDCMD_DATA4_BASE_IDX 1
+#define mmSDMA5_RLC1_MIDCMD_DATA5 0x01cd
+#define mmSDMA5_RLC1_MIDCMD_DATA5_BASE_IDX 1
+#define mmSDMA5_RLC1_MIDCMD_DATA6 0x01ce
+#define mmSDMA5_RLC1_MIDCMD_DATA6_BASE_IDX 1
+#define mmSDMA5_RLC1_MIDCMD_DATA7 0x01cf
+#define mmSDMA5_RLC1_MIDCMD_DATA7_BASE_IDX 1
+#define mmSDMA5_RLC1_MIDCMD_DATA8 0x01d0
+#define mmSDMA5_RLC1_MIDCMD_DATA8_BASE_IDX 1
+#define mmSDMA5_RLC1_MIDCMD_CNTL 0x01d1
+#define mmSDMA5_RLC1_MIDCMD_CNTL_BASE_IDX 1
+#define mmSDMA5_RLC2_RB_CNTL 0x01e0
+#define mmSDMA5_RLC2_RB_CNTL_BASE_IDX 1
+#define mmSDMA5_RLC2_RB_BASE 0x01e1
+#define mmSDMA5_RLC2_RB_BASE_BASE_IDX 1
+#define mmSDMA5_RLC2_RB_BASE_HI 0x01e2
+#define mmSDMA5_RLC2_RB_BASE_HI_BASE_IDX 1
+#define mmSDMA5_RLC2_RB_RPTR 0x01e3
+#define mmSDMA5_RLC2_RB_RPTR_BASE_IDX 1
+#define mmSDMA5_RLC2_RB_RPTR_HI 0x01e4
+#define mmSDMA5_RLC2_RB_RPTR_HI_BASE_IDX 1
+#define mmSDMA5_RLC2_RB_WPTR 0x01e5
+#define mmSDMA5_RLC2_RB_WPTR_BASE_IDX 1
+#define mmSDMA5_RLC2_RB_WPTR_HI 0x01e6
+#define mmSDMA5_RLC2_RB_WPTR_HI_BASE_IDX 1
+#define mmSDMA5_RLC2_RB_WPTR_POLL_CNTL 0x01e7
+#define mmSDMA5_RLC2_RB_WPTR_POLL_CNTL_BASE_IDX 1
+#define mmSDMA5_RLC2_RB_RPTR_ADDR_HI 0x01e8
+#define mmSDMA5_RLC2_RB_RPTR_ADDR_HI_BASE_IDX 1
+#define mmSDMA5_RLC2_RB_RPTR_ADDR_LO 0x01e9
+#define mmSDMA5_RLC2_RB_RPTR_ADDR_LO_BASE_IDX 1
+#define mmSDMA5_RLC2_IB_CNTL 0x01ea
+#define mmSDMA5_RLC2_IB_CNTL_BASE_IDX 1
+#define mmSDMA5_RLC2_IB_RPTR 0x01eb
+#define mmSDMA5_RLC2_IB_RPTR_BASE_IDX 1
+#define mmSDMA5_RLC2_IB_OFFSET 0x01ec
+#define mmSDMA5_RLC2_IB_OFFSET_BASE_IDX 1
+#define mmSDMA5_RLC2_IB_BASE_LO 0x01ed
+#define mmSDMA5_RLC2_IB_BASE_LO_BASE_IDX 1
+#define mmSDMA5_RLC2_IB_BASE_HI 0x01ee
+#define mmSDMA5_RLC2_IB_BASE_HI_BASE_IDX 1
+#define mmSDMA5_RLC2_IB_SIZE 0x01ef
+#define mmSDMA5_RLC2_IB_SIZE_BASE_IDX 1
+#define mmSDMA5_RLC2_SKIP_CNTL 0x01f0
+#define mmSDMA5_RLC2_SKIP_CNTL_BASE_IDX 1
+#define mmSDMA5_RLC2_CONTEXT_STATUS 0x01f1
+#define mmSDMA5_RLC2_CONTEXT_STATUS_BASE_IDX 1
+#define mmSDMA5_RLC2_DOORBELL 0x01f2
+#define mmSDMA5_RLC2_DOORBELL_BASE_IDX 1
+#define mmSDMA5_RLC2_STATUS 0x0208
+#define mmSDMA5_RLC2_STATUS_BASE_IDX 1
+#define mmSDMA5_RLC2_DOORBELL_LOG 0x0209
+#define mmSDMA5_RLC2_DOORBELL_LOG_BASE_IDX 1
+#define mmSDMA5_RLC2_WATERMARK 0x020a
+#define mmSDMA5_RLC2_WATERMARK_BASE_IDX 1
+#define mmSDMA5_RLC2_DOORBELL_OFFSET 0x020b
+#define mmSDMA5_RLC2_DOORBELL_OFFSET_BASE_IDX 1
+#define mmSDMA5_RLC2_CSA_ADDR_LO 0x020c
+#define mmSDMA5_RLC2_CSA_ADDR_LO_BASE_IDX 1
+#define mmSDMA5_RLC2_CSA_ADDR_HI 0x020d
+#define mmSDMA5_RLC2_CSA_ADDR_HI_BASE_IDX 1
+#define mmSDMA5_RLC2_IB_SUB_REMAIN 0x020f
+#define mmSDMA5_RLC2_IB_SUB_REMAIN_BASE_IDX 1
+#define mmSDMA5_RLC2_PREEMPT 0x0210
+#define mmSDMA5_RLC2_PREEMPT_BASE_IDX 1
+#define mmSDMA5_RLC2_DUMMY_REG 0x0211
+#define mmSDMA5_RLC2_DUMMY_REG_BASE_IDX 1
+#define mmSDMA5_RLC2_RB_WPTR_POLL_ADDR_HI 0x0212
+#define mmSDMA5_RLC2_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1
+#define mmSDMA5_RLC2_RB_WPTR_POLL_ADDR_LO 0x0213
+#define mmSDMA5_RLC2_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1
+#define mmSDMA5_RLC2_RB_AQL_CNTL 0x0214
+#define mmSDMA5_RLC2_RB_AQL_CNTL_BASE_IDX 1
+#define mmSDMA5_RLC2_MINOR_PTR_UPDATE 0x0215
+#define mmSDMA5_RLC2_MINOR_PTR_UPDATE_BASE_IDX 1
+#define mmSDMA5_RLC2_MIDCMD_DATA0 0x0220
+#define mmSDMA5_RLC2_MIDCMD_DATA0_BASE_IDX 1
+#define mmSDMA5_RLC2_MIDCMD_DATA1 0x0221
+#define mmSDMA5_RLC2_MIDCMD_DATA1_BASE_IDX 1
+#define mmSDMA5_RLC2_MIDCMD_DATA2 0x0222
+#define mmSDMA5_RLC2_MIDCMD_DATA2_BASE_IDX 1
+#define mmSDMA5_RLC2_MIDCMD_DATA3 0x0223
+#define mmSDMA5_RLC2_MIDCMD_DATA3_BASE_IDX 1
+#define mmSDMA5_RLC2_MIDCMD_DATA4 0x0224
+#define mmSDMA5_RLC2_MIDCMD_DATA4_BASE_IDX 1
+#define mmSDMA5_RLC2_MIDCMD_DATA5 0x0225
+#define mmSDMA5_RLC2_MIDCMD_DATA5_BASE_IDX 1
+#define mmSDMA5_RLC2_MIDCMD_DATA6 0x0226
+#define mmSDMA5_RLC2_MIDCMD_DATA6_BASE_IDX 1
+#define mmSDMA5_RLC2_MIDCMD_DATA7 0x0227
+#define mmSDMA5_RLC2_MIDCMD_DATA7_BASE_IDX 1
+#define mmSDMA5_RLC2_MIDCMD_DATA8 0x0228
+#define mmSDMA5_RLC2_MIDCMD_DATA8_BASE_IDX 1
+#define mmSDMA5_RLC2_MIDCMD_CNTL 0x0229
+#define mmSDMA5_RLC2_MIDCMD_CNTL_BASE_IDX 1
+#define mmSDMA5_RLC3_RB_CNTL 0x0238
+#define mmSDMA5_RLC3_RB_CNTL_BASE_IDX 1
+#define mmSDMA5_RLC3_RB_BASE 0x0239
+#define mmSDMA5_RLC3_RB_BASE_BASE_IDX 1
+#define mmSDMA5_RLC3_RB_BASE_HI 0x023a
+#define mmSDMA5_RLC3_RB_BASE_HI_BASE_IDX 1
+#define mmSDMA5_RLC3_RB_RPTR 0x023b
+#define mmSDMA5_RLC3_RB_RPTR_BASE_IDX 1
+#define mmSDMA5_RLC3_RB_RPTR_HI 0x023c
+#define mmSDMA5_RLC3_RB_RPTR_HI_BASE_IDX 1
+#define mmSDMA5_RLC3_RB_WPTR 0x023d
+#define mmSDMA5_RLC3_RB_WPTR_BASE_IDX 1
+#define mmSDMA5_RLC3_RB_WPTR_HI 0x023e
+#define mmSDMA5_RLC3_RB_WPTR_HI_BASE_IDX 1
+#define mmSDMA5_RLC3_RB_WPTR_POLL_CNTL 0x023f
+#define mmSDMA5_RLC3_RB_WPTR_POLL_CNTL_BASE_IDX 1
+#define mmSDMA5_RLC3_RB_RPTR_ADDR_HI 0x0240
+#define mmSDMA5_RLC3_RB_RPTR_ADDR_HI_BASE_IDX 1
+#define mmSDMA5_RLC3_RB_RPTR_ADDR_LO 0x0241
+#define mmSDMA5_RLC3_RB_RPTR_ADDR_LO_BASE_IDX 1
+#define mmSDMA5_RLC3_IB_CNTL 0x0242
+#define mmSDMA5_RLC3_IB_CNTL_BASE_IDX 1
+#define mmSDMA5_RLC3_IB_RPTR 0x0243
+#define mmSDMA5_RLC3_IB_RPTR_BASE_IDX 1
+#define mmSDMA5_RLC3_IB_OFFSET 0x0244
+#define mmSDMA5_RLC3_IB_OFFSET_BASE_IDX 1
+#define mmSDMA5_RLC3_IB_BASE_LO 0x0245
+#define mmSDMA5_RLC3_IB_BASE_LO_BASE_IDX 1
+#define mmSDMA5_RLC3_IB_BASE_HI 0x0246
+#define mmSDMA5_RLC3_IB_BASE_HI_BASE_IDX 1
+#define mmSDMA5_RLC3_IB_SIZE 0x0247
+#define mmSDMA5_RLC3_IB_SIZE_BASE_IDX 1
+#define mmSDMA5_RLC3_SKIP_CNTL 0x0248
+#define mmSDMA5_RLC3_SKIP_CNTL_BASE_IDX 1
+#define mmSDMA5_RLC3_CONTEXT_STATUS 0x0249
+#define mmSDMA5_RLC3_CONTEXT_STATUS_BASE_IDX 1
+#define mmSDMA5_RLC3_DOORBELL 0x024a
+#define mmSDMA5_RLC3_DOORBELL_BASE_IDX 1
+#define mmSDMA5_RLC3_STATUS 0x0260
+#define mmSDMA5_RLC3_STATUS_BASE_IDX 1
+#define mmSDMA5_RLC3_DOORBELL_LOG 0x0261
+#define mmSDMA5_RLC3_DOORBELL_LOG_BASE_IDX 1
+#define mmSDMA5_RLC3_WATERMARK 0x0262
+#define mmSDMA5_RLC3_WATERMARK_BASE_IDX 1
+#define mmSDMA5_RLC3_DOORBELL_OFFSET 0x0263
+#define mmSDMA5_RLC3_DOORBELL_OFFSET_BASE_IDX 1
+#define mmSDMA5_RLC3_CSA_ADDR_LO 0x0264
+#define mmSDMA5_RLC3_CSA_ADDR_LO_BASE_IDX 1
+#define mmSDMA5_RLC3_CSA_ADDR_HI 0x0265
+#define mmSDMA5_RLC3_CSA_ADDR_HI_BASE_IDX 1
+#define mmSDMA5_RLC3_IB_SUB_REMAIN 0x0267
+#define mmSDMA5_RLC3_IB_SUB_REMAIN_BASE_IDX 1
+#define mmSDMA5_RLC3_PREEMPT 0x0268
+#define mmSDMA5_RLC3_PREEMPT_BASE_IDX 1
+#define mmSDMA5_RLC3_DUMMY_REG 0x0269
+#define mmSDMA5_RLC3_DUMMY_REG_BASE_IDX 1
+#define mmSDMA5_RLC3_RB_WPTR_POLL_ADDR_HI 0x026a
+#define mmSDMA5_RLC3_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1
+#define mmSDMA5_RLC3_RB_WPTR_POLL_ADDR_LO 0x026b
+#define mmSDMA5_RLC3_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1
+#define mmSDMA5_RLC3_RB_AQL_CNTL 0x026c
+#define mmSDMA5_RLC3_RB_AQL_CNTL_BASE_IDX 1
+#define mmSDMA5_RLC3_MINOR_PTR_UPDATE 0x026d
+#define mmSDMA5_RLC3_MINOR_PTR_UPDATE_BASE_IDX 1
+#define mmSDMA5_RLC3_MIDCMD_DATA0 0x0278
+#define mmSDMA5_RLC3_MIDCMD_DATA0_BASE_IDX 1
+#define mmSDMA5_RLC3_MIDCMD_DATA1 0x0279
+#define mmSDMA5_RLC3_MIDCMD_DATA1_BASE_IDX 1
+#define mmSDMA5_RLC3_MIDCMD_DATA2 0x027a
+#define mmSDMA5_RLC3_MIDCMD_DATA2_BASE_IDX 1
+#define mmSDMA5_RLC3_MIDCMD_DATA3 0x027b
+#define mmSDMA5_RLC3_MIDCMD_DATA3_BASE_IDX 1
+#define mmSDMA5_RLC3_MIDCMD_DATA4 0x027c
+#define mmSDMA5_RLC3_MIDCMD_DATA4_BASE_IDX 1
+#define mmSDMA5_RLC3_MIDCMD_DATA5 0x027d
+#define mmSDMA5_RLC3_MIDCMD_DATA5_BASE_IDX 1
+#define mmSDMA5_RLC3_MIDCMD_DATA6 0x027e
+#define mmSDMA5_RLC3_MIDCMD_DATA6_BASE_IDX 1
+#define mmSDMA5_RLC3_MIDCMD_DATA7 0x027f
+#define mmSDMA5_RLC3_MIDCMD_DATA7_BASE_IDX 1
+#define mmSDMA5_RLC3_MIDCMD_DATA8 0x0280
+#define mmSDMA5_RLC3_MIDCMD_DATA8_BASE_IDX 1
+#define mmSDMA5_RLC3_MIDCMD_CNTL 0x0281
+#define mmSDMA5_RLC3_MIDCMD_CNTL_BASE_IDX 1
+#define mmSDMA5_RLC4_RB_CNTL 0x0290
+#define mmSDMA5_RLC4_RB_CNTL_BASE_IDX 1
+#define mmSDMA5_RLC4_RB_BASE 0x0291
+#define mmSDMA5_RLC4_RB_BASE_BASE_IDX 1
+#define mmSDMA5_RLC4_RB_BASE_HI 0x0292
+#define mmSDMA5_RLC4_RB_BASE_HI_BASE_IDX 1
+#define mmSDMA5_RLC4_RB_RPTR 0x0293
+#define mmSDMA5_RLC4_RB_RPTR_BASE_IDX 1
+#define mmSDMA5_RLC4_RB_RPTR_HI 0x0294
+#define mmSDMA5_RLC4_RB_RPTR_HI_BASE_IDX 1
+#define mmSDMA5_RLC4_RB_WPTR 0x0295
+#define mmSDMA5_RLC4_RB_WPTR_BASE_IDX 1
+#define mmSDMA5_RLC4_RB_WPTR_HI 0x0296
+#define mmSDMA5_RLC4_RB_WPTR_HI_BASE_IDX 1
+#define mmSDMA5_RLC4_RB_WPTR_POLL_CNTL 0x0297
+#define mmSDMA5_RLC4_RB_WPTR_POLL_CNTL_BASE_IDX 1
+#define mmSDMA5_RLC4_RB_RPTR_ADDR_HI 0x0298
+#define mmSDMA5_RLC4_RB_RPTR_ADDR_HI_BASE_IDX 1
+#define mmSDMA5_RLC4_RB_RPTR_ADDR_LO 0x0299
+#define mmSDMA5_RLC4_RB_RPTR_ADDR_LO_BASE_IDX 1
+#define mmSDMA5_RLC4_IB_CNTL 0x029a
+#define mmSDMA5_RLC4_IB_CNTL_BASE_IDX 1
+#define mmSDMA5_RLC4_IB_RPTR 0x029b
+#define mmSDMA5_RLC4_IB_RPTR_BASE_IDX 1
+#define mmSDMA5_RLC4_IB_OFFSET 0x029c
+#define mmSDMA5_RLC4_IB_OFFSET_BASE_IDX 1
+#define mmSDMA5_RLC4_IB_BASE_LO 0x029d
+#define mmSDMA5_RLC4_IB_BASE_LO_BASE_IDX 1
+#define mmSDMA5_RLC4_IB_BASE_HI 0x029e
+#define mmSDMA5_RLC4_IB_BASE_HI_BASE_IDX 1
+#define mmSDMA5_RLC4_IB_SIZE 0x029f
+#define mmSDMA5_RLC4_IB_SIZE_BASE_IDX 1
+#define mmSDMA5_RLC4_SKIP_CNTL 0x02a0
+#define mmSDMA5_RLC4_SKIP_CNTL_BASE_IDX 1
+#define mmSDMA5_RLC4_CONTEXT_STATUS 0x02a1
+#define mmSDMA5_RLC4_CONTEXT_STATUS_BASE_IDX 1
+#define mmSDMA5_RLC4_DOORBELL 0x02a2
+#define mmSDMA5_RLC4_DOORBELL_BASE_IDX 1
+#define mmSDMA5_RLC4_STATUS 0x02b8
+#define mmSDMA5_RLC4_STATUS_BASE_IDX 1
+#define mmSDMA5_RLC4_DOORBELL_LOG 0x02b9
+#define mmSDMA5_RLC4_DOORBELL_LOG_BASE_IDX 1
+#define mmSDMA5_RLC4_WATERMARK 0x02ba
+#define mmSDMA5_RLC4_WATERMARK_BASE_IDX 1
+#define mmSDMA5_RLC4_DOORBELL_OFFSET 0x02bb
+#define mmSDMA5_RLC4_DOORBELL_OFFSET_BASE_IDX 1
+#define mmSDMA5_RLC4_CSA_ADDR_LO 0x02bc
+#define mmSDMA5_RLC4_CSA_ADDR_LO_BASE_IDX 1
+#define mmSDMA5_RLC4_CSA_ADDR_HI 0x02bd
+#define mmSDMA5_RLC4_CSA_ADDR_HI_BASE_IDX 1
+#define mmSDMA5_RLC4_IB_SUB_REMAIN 0x02bf
+#define mmSDMA5_RLC4_IB_SUB_REMAIN_BASE_IDX 1
+#define mmSDMA5_RLC4_PREEMPT 0x02c0
+#define mmSDMA5_RLC4_PREEMPT_BASE_IDX 1
+#define mmSDMA5_RLC4_DUMMY_REG 0x02c1
+#define mmSDMA5_RLC4_DUMMY_REG_BASE_IDX 1
+#define mmSDMA5_RLC4_RB_WPTR_POLL_ADDR_HI 0x02c2
+#define mmSDMA5_RLC4_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1
+#define mmSDMA5_RLC4_RB_WPTR_POLL_ADDR_LO 0x02c3
+#define mmSDMA5_RLC4_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1
+#define mmSDMA5_RLC4_RB_AQL_CNTL 0x02c4
+#define mmSDMA5_RLC4_RB_AQL_CNTL_BASE_IDX 1
+#define mmSDMA5_RLC4_MINOR_PTR_UPDATE 0x02c5
+#define mmSDMA5_RLC4_MINOR_PTR_UPDATE_BASE_IDX 1
+#define mmSDMA5_RLC4_MIDCMD_DATA0 0x02d0
+#define mmSDMA5_RLC4_MIDCMD_DATA0_BASE_IDX 1
+#define mmSDMA5_RLC4_MIDCMD_DATA1 0x02d1
+#define mmSDMA5_RLC4_MIDCMD_DATA1_BASE_IDX 1
+#define mmSDMA5_RLC4_MIDCMD_DATA2 0x02d2
+#define mmSDMA5_RLC4_MIDCMD_DATA2_BASE_IDX 1
+#define mmSDMA5_RLC4_MIDCMD_DATA3 0x02d3
+#define mmSDMA5_RLC4_MIDCMD_DATA3_BASE_IDX 1
+#define mmSDMA5_RLC4_MIDCMD_DATA4 0x02d4
+#define mmSDMA5_RLC4_MIDCMD_DATA4_BASE_IDX 1
+#define mmSDMA5_RLC4_MIDCMD_DATA5 0x02d5
+#define mmSDMA5_RLC4_MIDCMD_DATA5_BASE_IDX 1
+#define mmSDMA5_RLC4_MIDCMD_DATA6 0x02d6
+#define mmSDMA5_RLC4_MIDCMD_DATA6_BASE_IDX 1
+#define mmSDMA5_RLC4_MIDCMD_DATA7 0x02d7
+#define mmSDMA5_RLC4_MIDCMD_DATA7_BASE_IDX 1
+#define mmSDMA5_RLC4_MIDCMD_DATA8 0x02d8
+#define mmSDMA5_RLC4_MIDCMD_DATA8_BASE_IDX 1
+#define mmSDMA5_RLC4_MIDCMD_CNTL 0x02d9
+#define mmSDMA5_RLC4_MIDCMD_CNTL_BASE_IDX 1
+#define mmSDMA5_RLC5_RB_CNTL 0x02e8
+#define mmSDMA5_RLC5_RB_CNTL_BASE_IDX 1
+#define mmSDMA5_RLC5_RB_BASE 0x02e9
+#define mmSDMA5_RLC5_RB_BASE_BASE_IDX 1
+#define mmSDMA5_RLC5_RB_BASE_HI 0x02ea
+#define mmSDMA5_RLC5_RB_BASE_HI_BASE_IDX 1
+#define mmSDMA5_RLC5_RB_RPTR 0x02eb
+#define mmSDMA5_RLC5_RB_RPTR_BASE_IDX 1
+#define mmSDMA5_RLC5_RB_RPTR_HI 0x02ec
+#define mmSDMA5_RLC5_RB_RPTR_HI_BASE_IDX 1
+#define mmSDMA5_RLC5_RB_WPTR 0x02ed
+#define mmSDMA5_RLC5_RB_WPTR_BASE_IDX 1
+#define mmSDMA5_RLC5_RB_WPTR_HI 0x02ee
+#define mmSDMA5_RLC5_RB_WPTR_HI_BASE_IDX 1
+#define mmSDMA5_RLC5_RB_WPTR_POLL_CNTL 0x02ef
+#define mmSDMA5_RLC5_RB_WPTR_POLL_CNTL_BASE_IDX 1
+#define mmSDMA5_RLC5_RB_RPTR_ADDR_HI 0x02f0
+#define mmSDMA5_RLC5_RB_RPTR_ADDR_HI_BASE_IDX 1
+#define mmSDMA5_RLC5_RB_RPTR_ADDR_LO 0x02f1
+#define mmSDMA5_RLC5_RB_RPTR_ADDR_LO_BASE_IDX 1
+#define mmSDMA5_RLC5_IB_CNTL 0x02f2
+#define mmSDMA5_RLC5_IB_CNTL_BASE_IDX 1
+#define mmSDMA5_RLC5_IB_RPTR 0x02f3
+#define mmSDMA5_RLC5_IB_RPTR_BASE_IDX 1
+#define mmSDMA5_RLC5_IB_OFFSET 0x02f4
+#define mmSDMA5_RLC5_IB_OFFSET_BASE_IDX 1
+#define mmSDMA5_RLC5_IB_BASE_LO 0x02f5
+#define mmSDMA5_RLC5_IB_BASE_LO_BASE_IDX 1
+#define mmSDMA5_RLC5_IB_BASE_HI 0x02f6
+#define mmSDMA5_RLC5_IB_BASE_HI_BASE_IDX 1
+#define mmSDMA5_RLC5_IB_SIZE 0x02f7
+#define mmSDMA5_RLC5_IB_SIZE_BASE_IDX 1
+#define mmSDMA5_RLC5_SKIP_CNTL 0x02f8
+#define mmSDMA5_RLC5_SKIP_CNTL_BASE_IDX 1
+#define mmSDMA5_RLC5_CONTEXT_STATUS 0x02f9
+#define mmSDMA5_RLC5_CONTEXT_STATUS_BASE_IDX 1
+#define mmSDMA5_RLC5_DOORBELL 0x02fa
+#define mmSDMA5_RLC5_DOORBELL_BASE_IDX 1
+#define mmSDMA5_RLC5_STATUS 0x0310
+#define mmSDMA5_RLC5_STATUS_BASE_IDX 1
+#define mmSDMA5_RLC5_DOORBELL_LOG 0x0311
+#define mmSDMA5_RLC5_DOORBELL_LOG_BASE_IDX 1
+#define mmSDMA5_RLC5_WATERMARK 0x0312
+#define mmSDMA5_RLC5_WATERMARK_BASE_IDX 1
+#define mmSDMA5_RLC5_DOORBELL_OFFSET 0x0313
+#define mmSDMA5_RLC5_DOORBELL_OFFSET_BASE_IDX 1
+#define mmSDMA5_RLC5_CSA_ADDR_LO 0x0314
+#define mmSDMA5_RLC5_CSA_ADDR_LO_BASE_IDX 1
+#define mmSDMA5_RLC5_CSA_ADDR_HI 0x0315
+#define mmSDMA5_RLC5_CSA_ADDR_HI_BASE_IDX 1
+#define mmSDMA5_RLC5_IB_SUB_REMAIN 0x0317
+#define mmSDMA5_RLC5_IB_SUB_REMAIN_BASE_IDX 1
+#define mmSDMA5_RLC5_PREEMPT 0x0318
+#define mmSDMA5_RLC5_PREEMPT_BASE_IDX 1
+#define mmSDMA5_RLC5_DUMMY_REG 0x0319
+#define mmSDMA5_RLC5_DUMMY_REG_BASE_IDX 1
+#define mmSDMA5_RLC5_RB_WPTR_POLL_ADDR_HI 0x031a
+#define mmSDMA5_RLC5_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1
+#define mmSDMA5_RLC5_RB_WPTR_POLL_ADDR_LO 0x031b
+#define mmSDMA5_RLC5_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1
+#define mmSDMA5_RLC5_RB_AQL_CNTL 0x031c
+#define mmSDMA5_RLC5_RB_AQL_CNTL_BASE_IDX 1
+#define mmSDMA5_RLC5_MINOR_PTR_UPDATE 0x031d
+#define mmSDMA5_RLC5_MINOR_PTR_UPDATE_BASE_IDX 1
+#define mmSDMA5_RLC5_MIDCMD_DATA0 0x0328
+#define mmSDMA5_RLC5_MIDCMD_DATA0_BASE_IDX 1
+#define mmSDMA5_RLC5_MIDCMD_DATA1 0x0329
+#define mmSDMA5_RLC5_MIDCMD_DATA1_BASE_IDX 1
+#define mmSDMA5_RLC5_MIDCMD_DATA2 0x032a
+#define mmSDMA5_RLC5_MIDCMD_DATA2_BASE_IDX 1
+#define mmSDMA5_RLC5_MIDCMD_DATA3 0x032b
+#define mmSDMA5_RLC5_MIDCMD_DATA3_BASE_IDX 1
+#define mmSDMA5_RLC5_MIDCMD_DATA4 0x032c
+#define mmSDMA5_RLC5_MIDCMD_DATA4_BASE_IDX 1
+#define mmSDMA5_RLC5_MIDCMD_DATA5 0x032d
+#define mmSDMA5_RLC5_MIDCMD_DATA5_BASE_IDX 1
+#define mmSDMA5_RLC5_MIDCMD_DATA6 0x032e
+#define mmSDMA5_RLC5_MIDCMD_DATA6_BASE_IDX 1
+#define mmSDMA5_RLC5_MIDCMD_DATA7 0x032f
+#define mmSDMA5_RLC5_MIDCMD_DATA7_BASE_IDX 1
+#define mmSDMA5_RLC5_MIDCMD_DATA8 0x0330
+#define mmSDMA5_RLC5_MIDCMD_DATA8_BASE_IDX 1
+#define mmSDMA5_RLC5_MIDCMD_CNTL 0x0331
+#define mmSDMA5_RLC5_MIDCMD_CNTL_BASE_IDX 1
+#define mmSDMA5_RLC6_RB_CNTL 0x0340
+#define mmSDMA5_RLC6_RB_CNTL_BASE_IDX 1
+#define mmSDMA5_RLC6_RB_BASE 0x0341
+#define mmSDMA5_RLC6_RB_BASE_BASE_IDX 1
+#define mmSDMA5_RLC6_RB_BASE_HI 0x0342
+#define mmSDMA5_RLC6_RB_BASE_HI_BASE_IDX 1
+#define mmSDMA5_RLC6_RB_RPTR 0x0343
+#define mmSDMA5_RLC6_RB_RPTR_BASE_IDX 1
+#define mmSDMA5_RLC6_RB_RPTR_HI 0x0344
+#define mmSDMA5_RLC6_RB_RPTR_HI_BASE_IDX 1
+#define mmSDMA5_RLC6_RB_WPTR 0x0345
+#define mmSDMA5_RLC6_RB_WPTR_BASE_IDX 1
+#define mmSDMA5_RLC6_RB_WPTR_HI 0x0346
+#define mmSDMA5_RLC6_RB_WPTR_HI_BASE_IDX 1
+#define mmSDMA5_RLC6_RB_WPTR_POLL_CNTL 0x0347
+#define mmSDMA5_RLC6_RB_WPTR_POLL_CNTL_BASE_IDX 1
+#define mmSDMA5_RLC6_RB_RPTR_ADDR_HI 0x0348
+#define mmSDMA5_RLC6_RB_RPTR_ADDR_HI_BASE_IDX 1
+#define mmSDMA5_RLC6_RB_RPTR_ADDR_LO 0x0349
+#define mmSDMA5_RLC6_RB_RPTR_ADDR_LO_BASE_IDX 1
+#define mmSDMA5_RLC6_IB_CNTL 0x034a
+#define mmSDMA5_RLC6_IB_CNTL_BASE_IDX 1
+#define mmSDMA5_RLC6_IB_RPTR 0x034b
+#define mmSDMA5_RLC6_IB_RPTR_BASE_IDX 1
+#define mmSDMA5_RLC6_IB_OFFSET 0x034c
+#define mmSDMA5_RLC6_IB_OFFSET_BASE_IDX 1
+#define mmSDMA5_RLC6_IB_BASE_LO 0x034d
+#define mmSDMA5_RLC6_IB_BASE_LO_BASE_IDX 1
+#define mmSDMA5_RLC6_IB_BASE_HI 0x034e
+#define mmSDMA5_RLC6_IB_BASE_HI_BASE_IDX 1
+#define mmSDMA5_RLC6_IB_SIZE 0x034f
+#define mmSDMA5_RLC6_IB_SIZE_BASE_IDX 1
+#define mmSDMA5_RLC6_SKIP_CNTL 0x0350
+#define mmSDMA5_RLC6_SKIP_CNTL_BASE_IDX 1
+#define mmSDMA5_RLC6_CONTEXT_STATUS 0x0351
+#define mmSDMA5_RLC6_CONTEXT_STATUS_BASE_IDX 1
+#define mmSDMA5_RLC6_DOORBELL 0x0352
+#define mmSDMA5_RLC6_DOORBELL_BASE_IDX 1
+#define mmSDMA5_RLC6_STATUS 0x0368
+#define mmSDMA5_RLC6_STATUS_BASE_IDX 1
+#define mmSDMA5_RLC6_DOORBELL_LOG 0x0369
+#define mmSDMA5_RLC6_DOORBELL_LOG_BASE_IDX 1
+#define mmSDMA5_RLC6_WATERMARK 0x036a
+#define mmSDMA5_RLC6_WATERMARK_BASE_IDX 1
+#define mmSDMA5_RLC6_DOORBELL_OFFSET 0x036b
+#define mmSDMA5_RLC6_DOORBELL_OFFSET_BASE_IDX 1
+#define mmSDMA5_RLC6_CSA_ADDR_LO 0x036c
+#define mmSDMA5_RLC6_CSA_ADDR_LO_BASE_IDX 1
+#define mmSDMA5_RLC6_CSA_ADDR_HI 0x036d
+#define mmSDMA5_RLC6_CSA_ADDR_HI_BASE_IDX 1
+#define mmSDMA5_RLC6_IB_SUB_REMAIN 0x036f
+#define mmSDMA5_RLC6_IB_SUB_REMAIN_BASE_IDX 1
+#define mmSDMA5_RLC6_PREEMPT 0x0370
+#define mmSDMA5_RLC6_PREEMPT_BASE_IDX 1
+#define mmSDMA5_RLC6_DUMMY_REG 0x0371
+#define mmSDMA5_RLC6_DUMMY_REG_BASE_IDX 1
+#define mmSDMA5_RLC6_RB_WPTR_POLL_ADDR_HI 0x0372
+#define mmSDMA5_RLC6_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1
+#define mmSDMA5_RLC6_RB_WPTR_POLL_ADDR_LO 0x0373
+#define mmSDMA5_RLC6_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1
+#define mmSDMA5_RLC6_RB_AQL_CNTL 0x0374
+#define mmSDMA5_RLC6_RB_AQL_CNTL_BASE_IDX 1
+#define mmSDMA5_RLC6_MINOR_PTR_UPDATE 0x0375
+#define mmSDMA5_RLC6_MINOR_PTR_UPDATE_BASE_IDX 1
+#define mmSDMA5_RLC6_MIDCMD_DATA0 0x0380
+#define mmSDMA5_RLC6_MIDCMD_DATA0_BASE_IDX 1
+#define mmSDMA5_RLC6_MIDCMD_DATA1 0x0381
+#define mmSDMA5_RLC6_MIDCMD_DATA1_BASE_IDX 1
+#define mmSDMA5_RLC6_MIDCMD_DATA2 0x0382
+#define mmSDMA5_RLC6_MIDCMD_DATA2_BASE_IDX 1
+#define mmSDMA5_RLC6_MIDCMD_DATA3 0x0383
+#define mmSDMA5_RLC6_MIDCMD_DATA3_BASE_IDX 1
+#define mmSDMA5_RLC6_MIDCMD_DATA4 0x0384
+#define mmSDMA5_RLC6_MIDCMD_DATA4_BASE_IDX 1
+#define mmSDMA5_RLC6_MIDCMD_DATA5 0x0385
+#define mmSDMA5_RLC6_MIDCMD_DATA5_BASE_IDX 1
+#define mmSDMA5_RLC6_MIDCMD_DATA6 0x0386
+#define mmSDMA5_RLC6_MIDCMD_DATA6_BASE_IDX 1
+#define mmSDMA5_RLC6_MIDCMD_DATA7 0x0387
+#define mmSDMA5_RLC6_MIDCMD_DATA7_BASE_IDX 1
+#define mmSDMA5_RLC6_MIDCMD_DATA8 0x0388
+#define mmSDMA5_RLC6_MIDCMD_DATA8_BASE_IDX 1
+#define mmSDMA5_RLC6_MIDCMD_CNTL 0x0389
+#define mmSDMA5_RLC6_MIDCMD_CNTL_BASE_IDX 1
+#define mmSDMA5_RLC7_RB_CNTL 0x0398
+#define mmSDMA5_RLC7_RB_CNTL_BASE_IDX 1
+#define mmSDMA5_RLC7_RB_BASE 0x0399
+#define mmSDMA5_RLC7_RB_BASE_BASE_IDX 1
+#define mmSDMA5_RLC7_RB_BASE_HI 0x039a
+#define mmSDMA5_RLC7_RB_BASE_HI_BASE_IDX 1
+#define mmSDMA5_RLC7_RB_RPTR 0x039b
+#define mmSDMA5_RLC7_RB_RPTR_BASE_IDX 1
+#define mmSDMA5_RLC7_RB_RPTR_HI 0x039c
+#define mmSDMA5_RLC7_RB_RPTR_HI_BASE_IDX 1
+#define mmSDMA5_RLC7_RB_WPTR 0x039d
+#define mmSDMA5_RLC7_RB_WPTR_BASE_IDX 1
+#define mmSDMA5_RLC7_RB_WPTR_HI 0x039e
+#define mmSDMA5_RLC7_RB_WPTR_HI_BASE_IDX 1
+#define mmSDMA5_RLC7_RB_WPTR_POLL_CNTL 0x039f
+#define mmSDMA5_RLC7_RB_WPTR_POLL_CNTL_BASE_IDX 1
+#define mmSDMA5_RLC7_RB_RPTR_ADDR_HI 0x03a0
+#define mmSDMA5_RLC7_RB_RPTR_ADDR_HI_BASE_IDX 1
+#define mmSDMA5_RLC7_RB_RPTR_ADDR_LO 0x03a1
+#define mmSDMA5_RLC7_RB_RPTR_ADDR_LO_BASE_IDX 1
+#define mmSDMA5_RLC7_IB_CNTL 0x03a2
+#define mmSDMA5_RLC7_IB_CNTL_BASE_IDX 1
+#define mmSDMA5_RLC7_IB_RPTR 0x03a3
+#define mmSDMA5_RLC7_IB_RPTR_BASE_IDX 1
+#define mmSDMA5_RLC7_IB_OFFSET 0x03a4
+#define mmSDMA5_RLC7_IB_OFFSET_BASE_IDX 1
+#define mmSDMA5_RLC7_IB_BASE_LO 0x03a5
+#define mmSDMA5_RLC7_IB_BASE_LO_BASE_IDX 1
+#define mmSDMA5_RLC7_IB_BASE_HI 0x03a6
+#define mmSDMA5_RLC7_IB_BASE_HI_BASE_IDX 1
+#define mmSDMA5_RLC7_IB_SIZE 0x03a7
+#define mmSDMA5_RLC7_IB_SIZE_BASE_IDX 1
+#define mmSDMA5_RLC7_SKIP_CNTL 0x03a8
+#define mmSDMA5_RLC7_SKIP_CNTL_BASE_IDX 1
+#define mmSDMA5_RLC7_CONTEXT_STATUS 0x03a9
+#define mmSDMA5_RLC7_CONTEXT_STATUS_BASE_IDX 1
+#define mmSDMA5_RLC7_DOORBELL 0x03aa
+#define mmSDMA5_RLC7_DOORBELL_BASE_IDX 1
+#define mmSDMA5_RLC7_STATUS 0x03c0
+#define mmSDMA5_RLC7_STATUS_BASE_IDX 1
+#define mmSDMA5_RLC7_DOORBELL_LOG 0x03c1
+#define mmSDMA5_RLC7_DOORBELL_LOG_BASE_IDX 1
+#define mmSDMA5_RLC7_WATERMARK 0x03c2
+#define mmSDMA5_RLC7_WATERMARK_BASE_IDX 1
+#define mmSDMA5_RLC7_DOORBELL_OFFSET 0x03c3
+#define mmSDMA5_RLC7_DOORBELL_OFFSET_BASE_IDX 1
+#define mmSDMA5_RLC7_CSA_ADDR_LO 0x03c4
+#define mmSDMA5_RLC7_CSA_ADDR_LO_BASE_IDX 1
+#define mmSDMA5_RLC7_CSA_ADDR_HI 0x03c5
+#define mmSDMA5_RLC7_CSA_ADDR_HI_BASE_IDX 1
+#define mmSDMA5_RLC7_IB_SUB_REMAIN 0x03c7
+#define mmSDMA5_RLC7_IB_SUB_REMAIN_BASE_IDX 1
+#define mmSDMA5_RLC7_PREEMPT 0x03c8
+#define mmSDMA5_RLC7_PREEMPT_BASE_IDX 1
+#define mmSDMA5_RLC7_DUMMY_REG 0x03c9
+#define mmSDMA5_RLC7_DUMMY_REG_BASE_IDX 1
+#define mmSDMA5_RLC7_RB_WPTR_POLL_ADDR_HI 0x03ca
+#define mmSDMA5_RLC7_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1
+#define mmSDMA5_RLC7_RB_WPTR_POLL_ADDR_LO 0x03cb
+#define mmSDMA5_RLC7_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1
+#define mmSDMA5_RLC7_RB_AQL_CNTL 0x03cc
+#define mmSDMA5_RLC7_RB_AQL_CNTL_BASE_IDX 1
+#define mmSDMA5_RLC7_MINOR_PTR_UPDATE 0x03cd
+#define mmSDMA5_RLC7_MINOR_PTR_UPDATE_BASE_IDX 1
+#define mmSDMA5_RLC7_MIDCMD_DATA0 0x03d8
+#define mmSDMA5_RLC7_MIDCMD_DATA0_BASE_IDX 1
+#define mmSDMA5_RLC7_MIDCMD_DATA1 0x03d9
+#define mmSDMA5_RLC7_MIDCMD_DATA1_BASE_IDX 1
+#define mmSDMA5_RLC7_MIDCMD_DATA2 0x03da
+#define mmSDMA5_RLC7_MIDCMD_DATA2_BASE_IDX 1
+#define mmSDMA5_RLC7_MIDCMD_DATA3 0x03db
+#define mmSDMA5_RLC7_MIDCMD_DATA3_BASE_IDX 1
+#define mmSDMA5_RLC7_MIDCMD_DATA4 0x03dc
+#define mmSDMA5_RLC7_MIDCMD_DATA4_BASE_IDX 1
+#define mmSDMA5_RLC7_MIDCMD_DATA5 0x03dd
+#define mmSDMA5_RLC7_MIDCMD_DATA5_BASE_IDX 1
+#define mmSDMA5_RLC7_MIDCMD_DATA6 0x03de
+#define mmSDMA5_RLC7_MIDCMD_DATA6_BASE_IDX 1
+#define mmSDMA5_RLC7_MIDCMD_DATA7 0x03df
+#define mmSDMA5_RLC7_MIDCMD_DATA7_BASE_IDX 1
+#define mmSDMA5_RLC7_MIDCMD_DATA8 0x03e0
+#define mmSDMA5_RLC7_MIDCMD_DATA8_BASE_IDX 1
+#define mmSDMA5_RLC7_MIDCMD_CNTL 0x03e1
+#define mmSDMA5_RLC7_MIDCMD_CNTL_BASE_IDX 1
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/sdma5/sdma5_4_2_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/sdma5/sdma5_4_2_2_sh_mask.h
new file mode 100644
index 000000000000..e99856b92386
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/sdma5/sdma5_4_2_2_sh_mask.h
@@ -0,0 +1,2956 @@
+/*
+ * Copyright (C) 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _sdma5_4_2_2_SH_MASK_HEADER
+#define _sdma5_4_2_2_SH_MASK_HEADER
+
+
+// addressBlock: sdma5_sdma5dec
+//SDMA5_UCODE_ADDR
+#define SDMA5_UCODE_ADDR__VALUE__SHIFT 0x0
+#define SDMA5_UCODE_ADDR__VALUE_MASK 0x00001FFFL
+//SDMA5_UCODE_DATA
+#define SDMA5_UCODE_DATA__VALUE__SHIFT 0x0
+#define SDMA5_UCODE_DATA__VALUE_MASK 0xFFFFFFFFL
+//SDMA5_VM_CNTL
+#define SDMA5_VM_CNTL__CMD__SHIFT 0x0
+#define SDMA5_VM_CNTL__CMD_MASK 0x0000000FL
+//SDMA5_VM_CTX_LO
+#define SDMA5_VM_CTX_LO__ADDR__SHIFT 0x2
+#define SDMA5_VM_CTX_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA5_VM_CTX_HI
+#define SDMA5_VM_CTX_HI__ADDR__SHIFT 0x0
+#define SDMA5_VM_CTX_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA5_ACTIVE_FCN_ID
+#define SDMA5_ACTIVE_FCN_ID__VFID__SHIFT 0x0
+#define SDMA5_ACTIVE_FCN_ID__RESERVED__SHIFT 0x4
+#define SDMA5_ACTIVE_FCN_ID__VF__SHIFT 0x1f
+#define SDMA5_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL
+#define SDMA5_ACTIVE_FCN_ID__RESERVED_MASK 0x7FFFFFF0L
+#define SDMA5_ACTIVE_FCN_ID__VF_MASK 0x80000000L
+//SDMA5_VM_CTX_CNTL
+#define SDMA5_VM_CTX_CNTL__PRIV__SHIFT 0x0
+#define SDMA5_VM_CTX_CNTL__VMID__SHIFT 0x4
+#define SDMA5_VM_CTX_CNTL__PRIV_MASK 0x00000001L
+#define SDMA5_VM_CTX_CNTL__VMID_MASK 0x000000F0L
+//SDMA5_VIRT_RESET_REQ
+#define SDMA5_VIRT_RESET_REQ__VF__SHIFT 0x0
+#define SDMA5_VIRT_RESET_REQ__PF__SHIFT 0x1f
+#define SDMA5_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL
+#define SDMA5_VIRT_RESET_REQ__PF_MASK 0x80000000L
+//SDMA5_VF_ENABLE
+#define SDMA5_VF_ENABLE__VF_ENABLE__SHIFT 0x0
+#define SDMA5_VF_ENABLE__VF_ENABLE_MASK 0x00000001L
+//SDMA5_CONTEXT_REG_TYPE0
+#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_RB_CNTL__SHIFT 0x0
+#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_RB_BASE__SHIFT 0x1
+#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_RB_BASE_HI__SHIFT 0x2
+#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_RB_RPTR__SHIFT 0x3
+#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_RB_RPTR_HI__SHIFT 0x4
+#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_RB_WPTR__SHIFT 0x5
+#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_RB_WPTR_HI__SHIFT 0x6
+#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_RB_WPTR_POLL_CNTL__SHIFT 0x7
+#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_RB_RPTR_ADDR_HI__SHIFT 0x8
+#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_RB_RPTR_ADDR_LO__SHIFT 0x9
+#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_IB_CNTL__SHIFT 0xa
+#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_IB_RPTR__SHIFT 0xb
+#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_IB_OFFSET__SHIFT 0xc
+#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_IB_BASE_LO__SHIFT 0xd
+#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_IB_BASE_HI__SHIFT 0xe
+#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_IB_SIZE__SHIFT 0xf
+#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_SKIP_CNTL__SHIFT 0x10
+#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_CONTEXT_STATUS__SHIFT 0x11
+#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_DOORBELL__SHIFT 0x12
+#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_CONTEXT_CNTL__SHIFT 0x13
+#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_RB_CNTL_MASK 0x00000001L
+#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_RB_BASE_MASK 0x00000002L
+#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_RB_BASE_HI_MASK 0x00000004L
+#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_RB_RPTR_MASK 0x00000008L
+#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_RB_RPTR_HI_MASK 0x00000010L
+#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_RB_WPTR_MASK 0x00000020L
+#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_RB_WPTR_HI_MASK 0x00000040L
+#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_RB_WPTR_POLL_CNTL_MASK 0x00000080L
+#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_RB_RPTR_ADDR_HI_MASK 0x00000100L
+#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_RB_RPTR_ADDR_LO_MASK 0x00000200L
+#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_IB_CNTL_MASK 0x00000400L
+#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_IB_RPTR_MASK 0x00000800L
+#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_IB_OFFSET_MASK 0x00001000L
+#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_IB_BASE_LO_MASK 0x00002000L
+#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_IB_BASE_HI_MASK 0x00004000L
+#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_IB_SIZE_MASK 0x00008000L
+#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_SKIP_CNTL_MASK 0x00010000L
+#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_CONTEXT_STATUS_MASK 0x00020000L
+#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_DOORBELL_MASK 0x00040000L
+#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_CONTEXT_CNTL_MASK 0x00080000L
+//SDMA5_CONTEXT_REG_TYPE1
+#define SDMA5_CONTEXT_REG_TYPE1__SDMA5_GFX_STATUS__SHIFT 0x8
+#define SDMA5_CONTEXT_REG_TYPE1__SDMA5_GFX_DOORBELL_LOG__SHIFT 0x9
+#define SDMA5_CONTEXT_REG_TYPE1__SDMA5_GFX_WATERMARK__SHIFT 0xa
+#define SDMA5_CONTEXT_REG_TYPE1__SDMA5_GFX_DOORBELL_OFFSET__SHIFT 0xb
+#define SDMA5_CONTEXT_REG_TYPE1__SDMA5_GFX_CSA_ADDR_LO__SHIFT 0xc
+#define SDMA5_CONTEXT_REG_TYPE1__SDMA5_GFX_CSA_ADDR_HI__SHIFT 0xd
+#define SDMA5_CONTEXT_REG_TYPE1__VOID_REG2__SHIFT 0xe
+#define SDMA5_CONTEXT_REG_TYPE1__SDMA5_GFX_IB_SUB_REMAIN__SHIFT 0xf
+#define SDMA5_CONTEXT_REG_TYPE1__SDMA5_GFX_PREEMPT__SHIFT 0x10
+#define SDMA5_CONTEXT_REG_TYPE1__SDMA5_GFX_DUMMY_REG__SHIFT 0x11
+#define SDMA5_CONTEXT_REG_TYPE1__SDMA5_GFX_RB_WPTR_POLL_ADDR_HI__SHIFT 0x12
+#define SDMA5_CONTEXT_REG_TYPE1__SDMA5_GFX_RB_WPTR_POLL_ADDR_LO__SHIFT 0x13
+#define SDMA5_CONTEXT_REG_TYPE1__SDMA5_GFX_RB_AQL_CNTL__SHIFT 0x14
+#define SDMA5_CONTEXT_REG_TYPE1__SDMA5_GFX_MINOR_PTR_UPDATE__SHIFT 0x15
+#define SDMA5_CONTEXT_REG_TYPE1__RESERVED__SHIFT 0x16
+#define SDMA5_CONTEXT_REG_TYPE1__SDMA5_GFX_STATUS_MASK 0x00000100L
+#define SDMA5_CONTEXT_REG_TYPE1__SDMA5_GFX_DOORBELL_LOG_MASK 0x00000200L
+#define SDMA5_CONTEXT_REG_TYPE1__SDMA5_GFX_WATERMARK_MASK 0x00000400L
+#define SDMA5_CONTEXT_REG_TYPE1__SDMA5_GFX_DOORBELL_OFFSET_MASK 0x00000800L
+#define SDMA5_CONTEXT_REG_TYPE1__SDMA5_GFX_CSA_ADDR_LO_MASK 0x00001000L
+#define SDMA5_CONTEXT_REG_TYPE1__SDMA5_GFX_CSA_ADDR_HI_MASK 0x00002000L
+#define SDMA5_CONTEXT_REG_TYPE1__VOID_REG2_MASK 0x00004000L
+#define SDMA5_CONTEXT_REG_TYPE1__SDMA5_GFX_IB_SUB_REMAIN_MASK 0x00008000L
+#define SDMA5_CONTEXT_REG_TYPE1__SDMA5_GFX_PREEMPT_MASK 0x00010000L
+#define SDMA5_CONTEXT_REG_TYPE1__SDMA5_GFX_DUMMY_REG_MASK 0x00020000L
+#define SDMA5_CONTEXT_REG_TYPE1__SDMA5_GFX_RB_WPTR_POLL_ADDR_HI_MASK 0x00040000L
+#define SDMA5_CONTEXT_REG_TYPE1__SDMA5_GFX_RB_WPTR_POLL_ADDR_LO_MASK 0x00080000L
+#define SDMA5_CONTEXT_REG_TYPE1__SDMA5_GFX_RB_AQL_CNTL_MASK 0x00100000L
+#define SDMA5_CONTEXT_REG_TYPE1__SDMA5_GFX_MINOR_PTR_UPDATE_MASK 0x00200000L
+#define SDMA5_CONTEXT_REG_TYPE1__RESERVED_MASK 0xFFC00000L
+//SDMA5_CONTEXT_REG_TYPE2
+#define SDMA5_CONTEXT_REG_TYPE2__SDMA5_GFX_MIDCMD_DATA0__SHIFT 0x0
+#define SDMA5_CONTEXT_REG_TYPE2__SDMA5_GFX_MIDCMD_DATA1__SHIFT 0x1
+#define SDMA5_CONTEXT_REG_TYPE2__SDMA5_GFX_MIDCMD_DATA2__SHIFT 0x2
+#define SDMA5_CONTEXT_REG_TYPE2__SDMA5_GFX_MIDCMD_DATA3__SHIFT 0x3
+#define SDMA5_CONTEXT_REG_TYPE2__SDMA5_GFX_MIDCMD_DATA4__SHIFT 0x4
+#define SDMA5_CONTEXT_REG_TYPE2__SDMA5_GFX_MIDCMD_DATA5__SHIFT 0x5
+#define SDMA5_CONTEXT_REG_TYPE2__SDMA5_GFX_MIDCMD_DATA6__SHIFT 0x6
+#define SDMA5_CONTEXT_REG_TYPE2__SDMA5_GFX_MIDCMD_DATA7__SHIFT 0x7
+#define SDMA5_CONTEXT_REG_TYPE2__SDMA5_GFX_MIDCMD_DATA8__SHIFT 0x8
+#define SDMA5_CONTEXT_REG_TYPE2__SDMA5_GFX_MIDCMD_CNTL__SHIFT 0x9
+#define SDMA5_CONTEXT_REG_TYPE2__RESERVED__SHIFT 0xa
+#define SDMA5_CONTEXT_REG_TYPE2__SDMA5_GFX_MIDCMD_DATA0_MASK 0x00000001L
+#define SDMA5_CONTEXT_REG_TYPE2__SDMA5_GFX_MIDCMD_DATA1_MASK 0x00000002L
+#define SDMA5_CONTEXT_REG_TYPE2__SDMA5_GFX_MIDCMD_DATA2_MASK 0x00000004L
+#define SDMA5_CONTEXT_REG_TYPE2__SDMA5_GFX_MIDCMD_DATA3_MASK 0x00000008L
+#define SDMA5_CONTEXT_REG_TYPE2__SDMA5_GFX_MIDCMD_DATA4_MASK 0x00000010L
+#define SDMA5_CONTEXT_REG_TYPE2__SDMA5_GFX_MIDCMD_DATA5_MASK 0x00000020L
+#define SDMA5_CONTEXT_REG_TYPE2__SDMA5_GFX_MIDCMD_DATA6_MASK 0x00000040L
+#define SDMA5_CONTEXT_REG_TYPE2__SDMA5_GFX_MIDCMD_DATA7_MASK 0x00000080L
+#define SDMA5_CONTEXT_REG_TYPE2__SDMA5_GFX_MIDCMD_DATA8_MASK 0x00000100L
+#define SDMA5_CONTEXT_REG_TYPE2__SDMA5_GFX_MIDCMD_CNTL_MASK 0x00000200L
+#define SDMA5_CONTEXT_REG_TYPE2__RESERVED_MASK 0xFFFFFC00L
+//SDMA5_CONTEXT_REG_TYPE3
+#define SDMA5_CONTEXT_REG_TYPE3__RESERVED__SHIFT 0x0
+#define SDMA5_CONTEXT_REG_TYPE3__RESERVED_MASK 0xFFFFFFFFL
+//SDMA5_PUB_REG_TYPE0
+#define SDMA5_PUB_REG_TYPE0__SDMA5_UCODE_ADDR__SHIFT 0x0
+#define SDMA5_PUB_REG_TYPE0__SDMA5_UCODE_DATA__SHIFT 0x1
+#define SDMA5_PUB_REG_TYPE0__RESERVED3__SHIFT 0x3
+#define SDMA5_PUB_REG_TYPE0__SDMA5_VM_CNTL__SHIFT 0x4
+#define SDMA5_PUB_REG_TYPE0__SDMA5_VM_CTX_LO__SHIFT 0x5
+#define SDMA5_PUB_REG_TYPE0__SDMA5_VM_CTX_HI__SHIFT 0x6
+#define SDMA5_PUB_REG_TYPE0__SDMA5_ACTIVE_FCN_ID__SHIFT 0x7
+#define SDMA5_PUB_REG_TYPE0__SDMA5_VM_CTX_CNTL__SHIFT 0x8
+#define SDMA5_PUB_REG_TYPE0__SDMA5_VIRT_RESET_REQ__SHIFT 0x9
+#define SDMA5_PUB_REG_TYPE0__RESERVED10__SHIFT 0xa
+#define SDMA5_PUB_REG_TYPE0__SDMA5_CONTEXT_REG_TYPE0__SHIFT 0xb
+#define SDMA5_PUB_REG_TYPE0__SDMA5_CONTEXT_REG_TYPE1__SHIFT 0xc
+#define SDMA5_PUB_REG_TYPE0__SDMA5_CONTEXT_REG_TYPE2__SHIFT 0xd
+#define SDMA5_PUB_REG_TYPE0__SDMA5_CONTEXT_REG_TYPE3__SHIFT 0xe
+#define SDMA5_PUB_REG_TYPE0__SDMA5_PUB_REG_TYPE0__SHIFT 0xf
+#define SDMA5_PUB_REG_TYPE0__SDMA5_PUB_REG_TYPE1__SHIFT 0x10
+#define SDMA5_PUB_REG_TYPE0__SDMA5_PUB_REG_TYPE2__SHIFT 0x11
+#define SDMA5_PUB_REG_TYPE0__SDMA5_PUB_REG_TYPE3__SHIFT 0x12
+#define SDMA5_PUB_REG_TYPE0__SDMA5_MMHUB_CNTL__SHIFT 0x13
+#define SDMA5_PUB_REG_TYPE0__RESERVED_FOR_PSPSMU_ACCESS_ONLY__SHIFT 0x15
+#define SDMA5_PUB_REG_TYPE0__SDMA5_CONTEXT_GROUP_BOUNDARY__SHIFT 0x19
+#define SDMA5_PUB_REG_TYPE0__SDMA5_POWER_CNTL__SHIFT 0x1a
+#define SDMA5_PUB_REG_TYPE0__SDMA5_CLK_CTRL__SHIFT 0x1b
+#define SDMA5_PUB_REG_TYPE0__SDMA5_CNTL__SHIFT 0x1c
+#define SDMA5_PUB_REG_TYPE0__SDMA5_CHICKEN_BITS__SHIFT 0x1d
+#define SDMA5_PUB_REG_TYPE0__SDMA5_GB_ADDR_CONFIG__SHIFT 0x1e
+#define SDMA5_PUB_REG_TYPE0__SDMA5_GB_ADDR_CONFIG_READ__SHIFT 0x1f
+#define SDMA5_PUB_REG_TYPE0__SDMA5_UCODE_ADDR_MASK 0x00000001L
+#define SDMA5_PUB_REG_TYPE0__SDMA5_UCODE_DATA_MASK 0x00000002L
+#define SDMA5_PUB_REG_TYPE0__RESERVED3_MASK 0x00000008L
+#define SDMA5_PUB_REG_TYPE0__SDMA5_VM_CNTL_MASK 0x00000010L
+#define SDMA5_PUB_REG_TYPE0__SDMA5_VM_CTX_LO_MASK 0x00000020L
+#define SDMA5_PUB_REG_TYPE0__SDMA5_VM_CTX_HI_MASK 0x00000040L
+#define SDMA5_PUB_REG_TYPE0__SDMA5_ACTIVE_FCN_ID_MASK 0x00000080L
+#define SDMA5_PUB_REG_TYPE0__SDMA5_VM_CTX_CNTL_MASK 0x00000100L
+#define SDMA5_PUB_REG_TYPE0__SDMA5_VIRT_RESET_REQ_MASK 0x00000200L
+#define SDMA5_PUB_REG_TYPE0__RESERVED10_MASK 0x00000400L
+#define SDMA5_PUB_REG_TYPE0__SDMA5_CONTEXT_REG_TYPE0_MASK 0x00000800L
+#define SDMA5_PUB_REG_TYPE0__SDMA5_CONTEXT_REG_TYPE1_MASK 0x00001000L
+#define SDMA5_PUB_REG_TYPE0__SDMA5_CONTEXT_REG_TYPE2_MASK 0x00002000L
+#define SDMA5_PUB_REG_TYPE0__SDMA5_CONTEXT_REG_TYPE3_MASK 0x00004000L
+#define SDMA5_PUB_REG_TYPE0__SDMA5_PUB_REG_TYPE0_MASK 0x00008000L
+#define SDMA5_PUB_REG_TYPE0__SDMA5_PUB_REG_TYPE1_MASK 0x00010000L
+#define SDMA5_PUB_REG_TYPE0__SDMA5_PUB_REG_TYPE2_MASK 0x00020000L
+#define SDMA5_PUB_REG_TYPE0__SDMA5_PUB_REG_TYPE3_MASK 0x00040000L
+#define SDMA5_PUB_REG_TYPE0__SDMA5_MMHUB_CNTL_MASK 0x00080000L
+#define SDMA5_PUB_REG_TYPE0__RESERVED_FOR_PSPSMU_ACCESS_ONLY_MASK 0x01E00000L
+#define SDMA5_PUB_REG_TYPE0__SDMA5_CONTEXT_GROUP_BOUNDARY_MASK 0x02000000L
+#define SDMA5_PUB_REG_TYPE0__SDMA5_POWER_CNTL_MASK 0x04000000L
+#define SDMA5_PUB_REG_TYPE0__SDMA5_CLK_CTRL_MASK 0x08000000L
+#define SDMA5_PUB_REG_TYPE0__SDMA5_CNTL_MASK 0x10000000L
+#define SDMA5_PUB_REG_TYPE0__SDMA5_CHICKEN_BITS_MASK 0x20000000L
+#define SDMA5_PUB_REG_TYPE0__SDMA5_GB_ADDR_CONFIG_MASK 0x40000000L
+#define SDMA5_PUB_REG_TYPE0__SDMA5_GB_ADDR_CONFIG_READ_MASK 0x80000000L
+//SDMA5_PUB_REG_TYPE1
+#define SDMA5_PUB_REG_TYPE1__SDMA5_RB_RPTR_FETCH_HI__SHIFT 0x0
+#define SDMA5_PUB_REG_TYPE1__SDMA5_SEM_WAIT_FAIL_TIMER_CNTL__SHIFT 0x1
+#define SDMA5_PUB_REG_TYPE1__SDMA5_RB_RPTR_FETCH__SHIFT 0x2
+#define SDMA5_PUB_REG_TYPE1__SDMA5_IB_OFFSET_FETCH__SHIFT 0x3
+#define SDMA5_PUB_REG_TYPE1__SDMA5_PROGRAM__SHIFT 0x4
+#define SDMA5_PUB_REG_TYPE1__SDMA5_STATUS_REG__SHIFT 0x5
+#define SDMA5_PUB_REG_TYPE1__SDMA5_STATUS1_REG__SHIFT 0x6
+#define SDMA5_PUB_REG_TYPE1__SDMA5_RD_BURST_CNTL__SHIFT 0x7
+#define SDMA5_PUB_REG_TYPE1__SDMA5_HBM_PAGE_CONFIG__SHIFT 0x8
+#define SDMA5_PUB_REG_TYPE1__SDMA5_UCODE_CHECKSUM__SHIFT 0x9
+#define SDMA5_PUB_REG_TYPE1__SDMA5_F32_CNTL__SHIFT 0xa
+#define SDMA5_PUB_REG_TYPE1__SDMA5_FREEZE__SHIFT 0xb
+#define SDMA5_PUB_REG_TYPE1__SDMA5_PHASE0_QUANTUM__SHIFT 0xc
+#define SDMA5_PUB_REG_TYPE1__SDMA5_PHASE1_QUANTUM__SHIFT 0xd
+#define SDMA5_PUB_REG_TYPE1__SDMA_POWER_GATING__SHIFT 0xe
+#define SDMA5_PUB_REG_TYPE1__SDMA_PGFSM_CONFIG__SHIFT 0xf
+#define SDMA5_PUB_REG_TYPE1__SDMA_PGFSM_WRITE__SHIFT 0x10
+#define SDMA5_PUB_REG_TYPE1__SDMA_PGFSM_READ__SHIFT 0x11
+#define SDMA5_PUB_REG_TYPE1__SDMA5_EDC_CONFIG__SHIFT 0x12
+#define SDMA5_PUB_REG_TYPE1__SDMA5_BA_THRESHOLD__SHIFT 0x13
+#define SDMA5_PUB_REG_TYPE1__SDMA5_ID__SHIFT 0x14
+#define SDMA5_PUB_REG_TYPE1__SDMA5_VERSION__SHIFT 0x15
+#define SDMA5_PUB_REG_TYPE1__SDMA5_EDC_COUNTER__SHIFT 0x16
+#define SDMA5_PUB_REG_TYPE1__SDMA5_EDC_COUNTER_CLEAR__SHIFT 0x17
+#define SDMA5_PUB_REG_TYPE1__SDMA5_STATUS2_REG__SHIFT 0x18
+#define SDMA5_PUB_REG_TYPE1__SDMA5_ATOMIC_CNTL__SHIFT 0x19
+#define SDMA5_PUB_REG_TYPE1__SDMA5_ATOMIC_PREOP_LO__SHIFT 0x1a
+#define SDMA5_PUB_REG_TYPE1__SDMA5_ATOMIC_PREOP_HI__SHIFT 0x1b
+#define SDMA5_PUB_REG_TYPE1__SDMA5_UTCL1_CNTL__SHIFT 0x1c
+#define SDMA5_PUB_REG_TYPE1__SDMA5_UTCL1_WATERMK__SHIFT 0x1d
+#define SDMA5_PUB_REG_TYPE1__SDMA5_UTCL1_RD_STATUS__SHIFT 0x1e
+#define SDMA5_PUB_REG_TYPE1__SDMA5_UTCL1_WR_STATUS__SHIFT 0x1f
+#define SDMA5_PUB_REG_TYPE1__SDMA5_RB_RPTR_FETCH_HI_MASK 0x00000001L
+#define SDMA5_PUB_REG_TYPE1__SDMA5_SEM_WAIT_FAIL_TIMER_CNTL_MASK 0x00000002L
+#define SDMA5_PUB_REG_TYPE1__SDMA5_RB_RPTR_FETCH_MASK 0x00000004L
+#define SDMA5_PUB_REG_TYPE1__SDMA5_IB_OFFSET_FETCH_MASK 0x00000008L
+#define SDMA5_PUB_REG_TYPE1__SDMA5_PROGRAM_MASK 0x00000010L
+#define SDMA5_PUB_REG_TYPE1__SDMA5_STATUS_REG_MASK 0x00000020L
+#define SDMA5_PUB_REG_TYPE1__SDMA5_STATUS1_REG_MASK 0x00000040L
+#define SDMA5_PUB_REG_TYPE1__SDMA5_RD_BURST_CNTL_MASK 0x00000080L
+#define SDMA5_PUB_REG_TYPE1__SDMA5_HBM_PAGE_CONFIG_MASK 0x00000100L
+#define SDMA5_PUB_REG_TYPE1__SDMA5_UCODE_CHECKSUM_MASK 0x00000200L
+#define SDMA5_PUB_REG_TYPE1__SDMA5_F32_CNTL_MASK 0x00000400L
+#define SDMA5_PUB_REG_TYPE1__SDMA5_FREEZE_MASK 0x00000800L
+#define SDMA5_PUB_REG_TYPE1__SDMA5_PHASE0_QUANTUM_MASK 0x00001000L
+#define SDMA5_PUB_REG_TYPE1__SDMA5_PHASE1_QUANTUM_MASK 0x00002000L
+#define SDMA5_PUB_REG_TYPE1__SDMA_POWER_GATING_MASK 0x00004000L
+#define SDMA5_PUB_REG_TYPE1__SDMA_PGFSM_CONFIG_MASK 0x00008000L
+#define SDMA5_PUB_REG_TYPE1__SDMA_PGFSM_WRITE_MASK 0x00010000L
+#define SDMA5_PUB_REG_TYPE1__SDMA_PGFSM_READ_MASK 0x00020000L
+#define SDMA5_PUB_REG_TYPE1__SDMA5_EDC_CONFIG_MASK 0x00040000L
+#define SDMA5_PUB_REG_TYPE1__SDMA5_BA_THRESHOLD_MASK 0x00080000L
+#define SDMA5_PUB_REG_TYPE1__SDMA5_ID_MASK 0x00100000L
+#define SDMA5_PUB_REG_TYPE1__SDMA5_VERSION_MASK 0x00200000L
+#define SDMA5_PUB_REG_TYPE1__SDMA5_EDC_COUNTER_MASK 0x00400000L
+#define SDMA5_PUB_REG_TYPE1__SDMA5_EDC_COUNTER_CLEAR_MASK 0x00800000L
+#define SDMA5_PUB_REG_TYPE1__SDMA5_STATUS2_REG_MASK 0x01000000L
+#define SDMA5_PUB_REG_TYPE1__SDMA5_ATOMIC_CNTL_MASK 0x02000000L
+#define SDMA5_PUB_REG_TYPE1__SDMA5_ATOMIC_PREOP_LO_MASK 0x04000000L
+#define SDMA5_PUB_REG_TYPE1__SDMA5_ATOMIC_PREOP_HI_MASK 0x08000000L
+#define SDMA5_PUB_REG_TYPE1__SDMA5_UTCL1_CNTL_MASK 0x10000000L
+#define SDMA5_PUB_REG_TYPE1__SDMA5_UTCL1_WATERMK_MASK 0x20000000L
+#define SDMA5_PUB_REG_TYPE1__SDMA5_UTCL1_RD_STATUS_MASK 0x40000000L
+#define SDMA5_PUB_REG_TYPE1__SDMA5_UTCL1_WR_STATUS_MASK 0x80000000L
+//SDMA5_PUB_REG_TYPE2
+#define SDMA5_PUB_REG_TYPE2__SDMA5_UTCL1_INV0__SHIFT 0x0
+#define SDMA5_PUB_REG_TYPE2__SDMA5_UTCL1_INV1__SHIFT 0x1
+#define SDMA5_PUB_REG_TYPE2__SDMA5_UTCL1_INV2__SHIFT 0x2
+#define SDMA5_PUB_REG_TYPE2__SDMA5_UTCL1_RD_XNACK0__SHIFT 0x3
+#define SDMA5_PUB_REG_TYPE2__SDMA5_UTCL1_RD_XNACK1__SHIFT 0x4
+#define SDMA5_PUB_REG_TYPE2__SDMA5_UTCL1_WR_XNACK0__SHIFT 0x5
+#define SDMA5_PUB_REG_TYPE2__SDMA5_UTCL1_WR_XNACK1__SHIFT 0x6
+#define SDMA5_PUB_REG_TYPE2__SDMA5_UTCL1_TIMEOUT__SHIFT 0x7
+#define SDMA5_PUB_REG_TYPE2__SDMA5_UTCL1_PAGE__SHIFT 0x8
+#define SDMA5_PUB_REG_TYPE2__SDMA5_POWER_CNTL_IDLE__SHIFT 0x9
+#define SDMA5_PUB_REG_TYPE2__SDMA5_RELAX_ORDERING_LUT__SHIFT 0xa
+#define SDMA5_PUB_REG_TYPE2__SDMA5_CHICKEN_BITS_2__SHIFT 0xb
+#define SDMA5_PUB_REG_TYPE2__SDMA5_STATUS3_REG__SHIFT 0xc
+#define SDMA5_PUB_REG_TYPE2__SDMA5_PHYSICAL_ADDR_LO__SHIFT 0xd
+#define SDMA5_PUB_REG_TYPE2__SDMA5_PHYSICAL_ADDR_HI__SHIFT 0xe
+#define SDMA5_PUB_REG_TYPE2__SDMA5_PHASE2_QUANTUM__SHIFT 0xf
+#define SDMA5_PUB_REG_TYPE2__SDMA5_ERROR_LOG__SHIFT 0x10
+#define SDMA5_PUB_REG_TYPE2__SDMA5_PUB_DUMMY_REG0__SHIFT 0x11
+#define SDMA5_PUB_REG_TYPE2__SDMA5_PUB_DUMMY_REG1__SHIFT 0x12
+#define SDMA5_PUB_REG_TYPE2__SDMA5_PUB_DUMMY_REG2__SHIFT 0x13
+#define SDMA5_PUB_REG_TYPE2__SDMA5_PUB_DUMMY_REG3__SHIFT 0x14
+#define SDMA5_PUB_REG_TYPE2__SDMA5_F32_COUNTER__SHIFT 0x15
+#define SDMA5_PUB_REG_TYPE2__SDMA5_UNBREAKABLE__SHIFT 0x16
+#define SDMA5_PUB_REG_TYPE2__SDMA5_PERFMON_CNTL__SHIFT 0x17
+#define SDMA5_PUB_REG_TYPE2__SDMA5_PERFCOUNTER0_RESULT__SHIFT 0x18
+#define SDMA5_PUB_REG_TYPE2__SDMA5_PERFCOUNTER1_RESULT__SHIFT 0x19
+#define SDMA5_PUB_REG_TYPE2__SDMA5_PERFCOUNTER_TAG_DELAY_RANGE__SHIFT 0x1a
+#define SDMA5_PUB_REG_TYPE2__SDMA5_CRD_CNTL__SHIFT 0x1b
+#define SDMA5_PUB_REG_TYPE2__RESERVED28__SHIFT 0x1c
+#define SDMA5_PUB_REG_TYPE2__SDMA5_GPU_IOV_VIOLATION_LOG__SHIFT 0x1d
+#define SDMA5_PUB_REG_TYPE2__SDMA5_ULV_CNTL__SHIFT 0x1e
+#define SDMA5_PUB_REG_TYPE2__RESERVED__SHIFT 0x1f
+#define SDMA5_PUB_REG_TYPE2__SDMA5_UTCL1_INV0_MASK 0x00000001L
+#define SDMA5_PUB_REG_TYPE2__SDMA5_UTCL1_INV1_MASK 0x00000002L
+#define SDMA5_PUB_REG_TYPE2__SDMA5_UTCL1_INV2_MASK 0x00000004L
+#define SDMA5_PUB_REG_TYPE2__SDMA5_UTCL1_RD_XNACK0_MASK 0x00000008L
+#define SDMA5_PUB_REG_TYPE2__SDMA5_UTCL1_RD_XNACK1_MASK 0x00000010L
+#define SDMA5_PUB_REG_TYPE2__SDMA5_UTCL1_WR_XNACK0_MASK 0x00000020L
+#define SDMA5_PUB_REG_TYPE2__SDMA5_UTCL1_WR_XNACK1_MASK 0x00000040L
+#define SDMA5_PUB_REG_TYPE2__SDMA5_UTCL1_TIMEOUT_MASK 0x00000080L
+#define SDMA5_PUB_REG_TYPE2__SDMA5_UTCL1_PAGE_MASK 0x00000100L
+#define SDMA5_PUB_REG_TYPE2__SDMA5_POWER_CNTL_IDLE_MASK 0x00000200L
+#define SDMA5_PUB_REG_TYPE2__SDMA5_RELAX_ORDERING_LUT_MASK 0x00000400L
+#define SDMA5_PUB_REG_TYPE2__SDMA5_CHICKEN_BITS_2_MASK 0x00000800L
+#define SDMA5_PUB_REG_TYPE2__SDMA5_STATUS3_REG_MASK 0x00001000L
+#define SDMA5_PUB_REG_TYPE2__SDMA5_PHYSICAL_ADDR_LO_MASK 0x00002000L
+#define SDMA5_PUB_REG_TYPE2__SDMA5_PHYSICAL_ADDR_HI_MASK 0x00004000L
+#define SDMA5_PUB_REG_TYPE2__SDMA5_PHASE2_QUANTUM_MASK 0x00008000L
+#define SDMA5_PUB_REG_TYPE2__SDMA5_ERROR_LOG_MASK 0x00010000L
+#define SDMA5_PUB_REG_TYPE2__SDMA5_PUB_DUMMY_REG0_MASK 0x00020000L
+#define SDMA5_PUB_REG_TYPE2__SDMA5_PUB_DUMMY_REG1_MASK 0x00040000L
+#define SDMA5_PUB_REG_TYPE2__SDMA5_PUB_DUMMY_REG2_MASK 0x00080000L
+#define SDMA5_PUB_REG_TYPE2__SDMA5_PUB_DUMMY_REG3_MASK 0x00100000L
+#define SDMA5_PUB_REG_TYPE2__SDMA5_F32_COUNTER_MASK 0x00200000L
+#define SDMA5_PUB_REG_TYPE2__SDMA5_UNBREAKABLE_MASK 0x00400000L
+#define SDMA5_PUB_REG_TYPE2__SDMA5_PERFMON_CNTL_MASK 0x00800000L
+#define SDMA5_PUB_REG_TYPE2__SDMA5_PERFCOUNTER0_RESULT_MASK 0x01000000L
+#define SDMA5_PUB_REG_TYPE2__SDMA5_PERFCOUNTER1_RESULT_MASK 0x02000000L
+#define SDMA5_PUB_REG_TYPE2__SDMA5_PERFCOUNTER_TAG_DELAY_RANGE_MASK 0x04000000L
+#define SDMA5_PUB_REG_TYPE2__SDMA5_CRD_CNTL_MASK 0x08000000L
+#define SDMA5_PUB_REG_TYPE2__RESERVED28_MASK 0x10000000L
+#define SDMA5_PUB_REG_TYPE2__SDMA5_GPU_IOV_VIOLATION_LOG_MASK 0x20000000L
+#define SDMA5_PUB_REG_TYPE2__SDMA5_ULV_CNTL_MASK 0x40000000L
+#define SDMA5_PUB_REG_TYPE2__RESERVED_MASK 0x80000000L
+//SDMA5_PUB_REG_TYPE3
+#define SDMA5_PUB_REG_TYPE3__SDMA5_EA_DBIT_ADDR_DATA__SHIFT 0x0
+#define SDMA5_PUB_REG_TYPE3__SDMA5_EA_DBIT_ADDR_INDEX__SHIFT 0x1
+#define SDMA5_PUB_REG_TYPE3__SDMA5_GPU_IOV_VIOLATION_LOG2__SHIFT 0x2
+#define SDMA5_PUB_REG_TYPE3__RESERVED__SHIFT 0x3
+#define SDMA5_PUB_REG_TYPE3__SDMA5_EA_DBIT_ADDR_DATA_MASK 0x00000001L
+#define SDMA5_PUB_REG_TYPE3__SDMA5_EA_DBIT_ADDR_INDEX_MASK 0x00000002L
+#define SDMA5_PUB_REG_TYPE3__SDMA5_GPU_IOV_VIOLATION_LOG2_MASK 0x00000004L
+#define SDMA5_PUB_REG_TYPE3__RESERVED_MASK 0xFFFFFFF8L
+//SDMA5_MMHUB_CNTL
+#define SDMA5_MMHUB_CNTL__UNIT_ID__SHIFT 0x0
+#define SDMA5_MMHUB_CNTL__UNIT_ID_MASK 0x0000003FL
+//SDMA5_CONTEXT_GROUP_BOUNDARY
+#define SDMA5_CONTEXT_GROUP_BOUNDARY__RESERVED__SHIFT 0x0
+#define SDMA5_CONTEXT_GROUP_BOUNDARY__RESERVED_MASK 0xFFFFFFFFL
+//SDMA5_POWER_CNTL
+#define SDMA5_POWER_CNTL__MEM_POWER_OVERRIDE__SHIFT 0x8
+#define SDMA5_POWER_CNTL__MEM_POWER_LS_EN__SHIFT 0x9
+#define SDMA5_POWER_CNTL__MEM_POWER_DS_EN__SHIFT 0xa
+#define SDMA5_POWER_CNTL__MEM_POWER_SD_EN__SHIFT 0xb
+#define SDMA5_POWER_CNTL__MEM_POWER_DELAY__SHIFT 0xc
+#define SDMA5_POWER_CNTL__MEM_POWER_OVERRIDE_MASK 0x00000100L
+#define SDMA5_POWER_CNTL__MEM_POWER_LS_EN_MASK 0x00000200L
+#define SDMA5_POWER_CNTL__MEM_POWER_DS_EN_MASK 0x00000400L
+#define SDMA5_POWER_CNTL__MEM_POWER_SD_EN_MASK 0x00000800L
+#define SDMA5_POWER_CNTL__MEM_POWER_DELAY_MASK 0x003FF000L
+//SDMA5_CLK_CTRL
+#define SDMA5_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define SDMA5_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define SDMA5_CLK_CTRL__RESERVED__SHIFT 0xc
+#define SDMA5_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define SDMA5_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define SDMA5_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define SDMA5_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define SDMA5_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define SDMA5_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define SDMA5_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define SDMA5_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define SDMA5_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define SDMA5_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define SDMA5_CLK_CTRL__RESERVED_MASK 0x00FFF000L
+#define SDMA5_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define SDMA5_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define SDMA5_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define SDMA5_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define SDMA5_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define SDMA5_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define SDMA5_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define SDMA5_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//SDMA5_CNTL
+#define SDMA5_CNTL__TRAP_ENABLE__SHIFT 0x0
+#define SDMA5_CNTL__UTC_L1_ENABLE__SHIFT 0x1
+#define SDMA5_CNTL__SEM_WAIT_INT_ENABLE__SHIFT 0x2
+#define SDMA5_CNTL__DATA_SWAP_ENABLE__SHIFT 0x3
+#define SDMA5_CNTL__FENCE_SWAP_ENABLE__SHIFT 0x4
+#define SDMA5_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x5
+#define SDMA5_CNTL__MIDCMD_WORLDSWITCH_ENABLE__SHIFT 0x11
+#define SDMA5_CNTL__AUTO_CTXSW_ENABLE__SHIFT 0x12
+#define SDMA5_CNTL__CTXEMPTY_INT_ENABLE__SHIFT 0x1c
+#define SDMA5_CNTL__FROZEN_INT_ENABLE__SHIFT 0x1d
+#define SDMA5_CNTL__IB_PREEMPT_INT_ENABLE__SHIFT 0x1e
+#define SDMA5_CNTL__TRAP_ENABLE_MASK 0x00000001L
+#define SDMA5_CNTL__UTC_L1_ENABLE_MASK 0x00000002L
+#define SDMA5_CNTL__SEM_WAIT_INT_ENABLE_MASK 0x00000004L
+#define SDMA5_CNTL__DATA_SWAP_ENABLE_MASK 0x00000008L
+#define SDMA5_CNTL__FENCE_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA5_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00000020L
+#define SDMA5_CNTL__MIDCMD_WORLDSWITCH_ENABLE_MASK 0x00020000L
+#define SDMA5_CNTL__AUTO_CTXSW_ENABLE_MASK 0x00040000L
+#define SDMA5_CNTL__CTXEMPTY_INT_ENABLE_MASK 0x10000000L
+#define SDMA5_CNTL__FROZEN_INT_ENABLE_MASK 0x20000000L
+#define SDMA5_CNTL__IB_PREEMPT_INT_ENABLE_MASK 0x40000000L
+//SDMA5_CHICKEN_BITS
+#define SDMA5_CHICKEN_BITS__COPY_EFFICIENCY_ENABLE__SHIFT 0x0
+#define SDMA5_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE__SHIFT 0x1
+#define SDMA5_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE__SHIFT 0x2
+#define SDMA5_CHICKEN_BITS__WRITE_BURST_LENGTH__SHIFT 0x8
+#define SDMA5_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE__SHIFT 0xa
+#define SDMA5_CHICKEN_BITS__COPY_OVERLAP_ENABLE__SHIFT 0x10
+#define SDMA5_CHICKEN_BITS__RAW_CHECK_ENABLE__SHIFT 0x11
+#define SDMA5_CHICKEN_BITS__SRBM_POLL_RETRYING__SHIFT 0x14
+#define SDMA5_CHICKEN_BITS__CG_STATUS_OUTPUT__SHIFT 0x17
+#define SDMA5_CHICKEN_BITS__TIME_BASED_QOS__SHIFT 0x19
+#define SDMA5_CHICKEN_BITS__CE_AFIFO_WATERMARK__SHIFT 0x1a
+#define SDMA5_CHICKEN_BITS__CE_DFIFO_WATERMARK__SHIFT 0x1c
+#define SDMA5_CHICKEN_BITS__CE_LFIFO_WATERMARK__SHIFT 0x1e
+#define SDMA5_CHICKEN_BITS__COPY_EFFICIENCY_ENABLE_MASK 0x00000001L
+#define SDMA5_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE_MASK 0x00000002L
+#define SDMA5_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE_MASK 0x00000004L
+#define SDMA5_CHICKEN_BITS__WRITE_BURST_LENGTH_MASK 0x00000300L
+#define SDMA5_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE_MASK 0x00001C00L
+#define SDMA5_CHICKEN_BITS__COPY_OVERLAP_ENABLE_MASK 0x00010000L
+#define SDMA5_CHICKEN_BITS__RAW_CHECK_ENABLE_MASK 0x00020000L
+#define SDMA5_CHICKEN_BITS__SRBM_POLL_RETRYING_MASK 0x00100000L
+#define SDMA5_CHICKEN_BITS__CG_STATUS_OUTPUT_MASK 0x00800000L
+#define SDMA5_CHICKEN_BITS__TIME_BASED_QOS_MASK 0x02000000L
+#define SDMA5_CHICKEN_BITS__CE_AFIFO_WATERMARK_MASK 0x0C000000L
+#define SDMA5_CHICKEN_BITS__CE_DFIFO_WATERMARK_MASK 0x30000000L
+#define SDMA5_CHICKEN_BITS__CE_LFIFO_WATERMARK_MASK 0xC0000000L
+//SDMA5_GB_ADDR_CONFIG
+#define SDMA5_GB_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
+#define SDMA5_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
+#define SDMA5_GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE__SHIFT 0x8
+#define SDMA5_GB_ADDR_CONFIG__NUM_BANKS__SHIFT 0xc
+#define SDMA5_GB_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x13
+#define SDMA5_GB_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define SDMA5_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
+#define SDMA5_GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
+#define SDMA5_GB_ADDR_CONFIG__NUM_BANKS_MASK 0x00007000L
+#define SDMA5_GB_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00180000L
+//SDMA5_GB_ADDR_CONFIG_READ
+#define SDMA5_GB_ADDR_CONFIG_READ__NUM_PIPES__SHIFT 0x0
+#define SDMA5_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
+#define SDMA5_GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE__SHIFT 0x8
+#define SDMA5_GB_ADDR_CONFIG_READ__NUM_BANKS__SHIFT 0xc
+#define SDMA5_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES__SHIFT 0x13
+#define SDMA5_GB_ADDR_CONFIG_READ__NUM_PIPES_MASK 0x00000007L
+#define SDMA5_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
+#define SDMA5_GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
+#define SDMA5_GB_ADDR_CONFIG_READ__NUM_BANKS_MASK 0x00007000L
+#define SDMA5_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES_MASK 0x00180000L
+//SDMA5_RB_RPTR_FETCH_HI
+#define SDMA5_RB_RPTR_FETCH_HI__OFFSET__SHIFT 0x0
+#define SDMA5_RB_RPTR_FETCH_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA5_SEM_WAIT_FAIL_TIMER_CNTL
+#define SDMA5_SEM_WAIT_FAIL_TIMER_CNTL__TIMER__SHIFT 0x0
+#define SDMA5_SEM_WAIT_FAIL_TIMER_CNTL__TIMER_MASK 0xFFFFFFFFL
+//SDMA5_RB_RPTR_FETCH
+#define SDMA5_RB_RPTR_FETCH__OFFSET__SHIFT 0x2
+#define SDMA5_RB_RPTR_FETCH__OFFSET_MASK 0xFFFFFFFCL
+//SDMA5_IB_OFFSET_FETCH
+#define SDMA5_IB_OFFSET_FETCH__OFFSET__SHIFT 0x2
+#define SDMA5_IB_OFFSET_FETCH__OFFSET_MASK 0x003FFFFCL
+//SDMA5_PROGRAM
+#define SDMA5_PROGRAM__STREAM__SHIFT 0x0
+#define SDMA5_PROGRAM__STREAM_MASK 0xFFFFFFFFL
+//SDMA5_STATUS_REG
+#define SDMA5_STATUS_REG__IDLE__SHIFT 0x0
+#define SDMA5_STATUS_REG__REG_IDLE__SHIFT 0x1
+#define SDMA5_STATUS_REG__RB_EMPTY__SHIFT 0x2
+#define SDMA5_STATUS_REG__RB_FULL__SHIFT 0x3
+#define SDMA5_STATUS_REG__RB_CMD_IDLE__SHIFT 0x4
+#define SDMA5_STATUS_REG__RB_CMD_FULL__SHIFT 0x5
+#define SDMA5_STATUS_REG__IB_CMD_IDLE__SHIFT 0x6
+#define SDMA5_STATUS_REG__IB_CMD_FULL__SHIFT 0x7
+#define SDMA5_STATUS_REG__BLOCK_IDLE__SHIFT 0x8
+#define SDMA5_STATUS_REG__INSIDE_IB__SHIFT 0x9
+#define SDMA5_STATUS_REG__EX_IDLE__SHIFT 0xa
+#define SDMA5_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE__SHIFT 0xb
+#define SDMA5_STATUS_REG__PACKET_READY__SHIFT 0xc
+#define SDMA5_STATUS_REG__MC_WR_IDLE__SHIFT 0xd
+#define SDMA5_STATUS_REG__SRBM_IDLE__SHIFT 0xe
+#define SDMA5_STATUS_REG__CONTEXT_EMPTY__SHIFT 0xf
+#define SDMA5_STATUS_REG__DELTA_RPTR_FULL__SHIFT 0x10
+#define SDMA5_STATUS_REG__RB_MC_RREQ_IDLE__SHIFT 0x11
+#define SDMA5_STATUS_REG__IB_MC_RREQ_IDLE__SHIFT 0x12
+#define SDMA5_STATUS_REG__MC_RD_IDLE__SHIFT 0x13
+#define SDMA5_STATUS_REG__DELTA_RPTR_EMPTY__SHIFT 0x14
+#define SDMA5_STATUS_REG__MC_RD_RET_STALL__SHIFT 0x15
+#define SDMA5_STATUS_REG__MC_RD_NO_POLL_IDLE__SHIFT 0x16
+#define SDMA5_STATUS_REG__PREV_CMD_IDLE__SHIFT 0x19
+#define SDMA5_STATUS_REG__SEM_IDLE__SHIFT 0x1a
+#define SDMA5_STATUS_REG__SEM_REQ_STALL__SHIFT 0x1b
+#define SDMA5_STATUS_REG__SEM_RESP_STATE__SHIFT 0x1c
+#define SDMA5_STATUS_REG__INT_IDLE__SHIFT 0x1e
+#define SDMA5_STATUS_REG__INT_REQ_STALL__SHIFT 0x1f
+#define SDMA5_STATUS_REG__IDLE_MASK 0x00000001L
+#define SDMA5_STATUS_REG__REG_IDLE_MASK 0x00000002L
+#define SDMA5_STATUS_REG__RB_EMPTY_MASK 0x00000004L
+#define SDMA5_STATUS_REG__RB_FULL_MASK 0x00000008L
+#define SDMA5_STATUS_REG__RB_CMD_IDLE_MASK 0x00000010L
+#define SDMA5_STATUS_REG__RB_CMD_FULL_MASK 0x00000020L
+#define SDMA5_STATUS_REG__IB_CMD_IDLE_MASK 0x00000040L
+#define SDMA5_STATUS_REG__IB_CMD_FULL_MASK 0x00000080L
+#define SDMA5_STATUS_REG__BLOCK_IDLE_MASK 0x00000100L
+#define SDMA5_STATUS_REG__INSIDE_IB_MASK 0x00000200L
+#define SDMA5_STATUS_REG__EX_IDLE_MASK 0x00000400L
+#define SDMA5_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE_MASK 0x00000800L
+#define SDMA5_STATUS_REG__PACKET_READY_MASK 0x00001000L
+#define SDMA5_STATUS_REG__MC_WR_IDLE_MASK 0x00002000L
+#define SDMA5_STATUS_REG__SRBM_IDLE_MASK 0x00004000L
+#define SDMA5_STATUS_REG__CONTEXT_EMPTY_MASK 0x00008000L
+#define SDMA5_STATUS_REG__DELTA_RPTR_FULL_MASK 0x00010000L
+#define SDMA5_STATUS_REG__RB_MC_RREQ_IDLE_MASK 0x00020000L
+#define SDMA5_STATUS_REG__IB_MC_RREQ_IDLE_MASK 0x00040000L
+#define SDMA5_STATUS_REG__MC_RD_IDLE_MASK 0x00080000L
+#define SDMA5_STATUS_REG__DELTA_RPTR_EMPTY_MASK 0x00100000L
+#define SDMA5_STATUS_REG__MC_RD_RET_STALL_MASK 0x00200000L
+#define SDMA5_STATUS_REG__MC_RD_NO_POLL_IDLE_MASK 0x00400000L
+#define SDMA5_STATUS_REG__PREV_CMD_IDLE_MASK 0x02000000L
+#define SDMA5_STATUS_REG__SEM_IDLE_MASK 0x04000000L
+#define SDMA5_STATUS_REG__SEM_REQ_STALL_MASK 0x08000000L
+#define SDMA5_STATUS_REG__SEM_RESP_STATE_MASK 0x30000000L
+#define SDMA5_STATUS_REG__INT_IDLE_MASK 0x40000000L
+#define SDMA5_STATUS_REG__INT_REQ_STALL_MASK 0x80000000L
+//SDMA5_STATUS1_REG
+#define SDMA5_STATUS1_REG__CE_WREQ_IDLE__SHIFT 0x0
+#define SDMA5_STATUS1_REG__CE_WR_IDLE__SHIFT 0x1
+#define SDMA5_STATUS1_REG__CE_SPLIT_IDLE__SHIFT 0x2
+#define SDMA5_STATUS1_REG__CE_RREQ_IDLE__SHIFT 0x3
+#define SDMA5_STATUS1_REG__CE_OUT_IDLE__SHIFT 0x4
+#define SDMA5_STATUS1_REG__CE_IN_IDLE__SHIFT 0x5
+#define SDMA5_STATUS1_REG__CE_DST_IDLE__SHIFT 0x6
+#define SDMA5_STATUS1_REG__CE_CMD_IDLE__SHIFT 0x9
+#define SDMA5_STATUS1_REG__CE_AFIFO_FULL__SHIFT 0xa
+#define SDMA5_STATUS1_REG__CE_INFO_FULL__SHIFT 0xd
+#define SDMA5_STATUS1_REG__CE_INFO1_FULL__SHIFT 0xe
+#define SDMA5_STATUS1_REG__EX_START__SHIFT 0xf
+#define SDMA5_STATUS1_REG__CE_RD_STALL__SHIFT 0x11
+#define SDMA5_STATUS1_REG__CE_WR_STALL__SHIFT 0x12
+#define SDMA5_STATUS1_REG__CE_WREQ_IDLE_MASK 0x00000001L
+#define SDMA5_STATUS1_REG__CE_WR_IDLE_MASK 0x00000002L
+#define SDMA5_STATUS1_REG__CE_SPLIT_IDLE_MASK 0x00000004L
+#define SDMA5_STATUS1_REG__CE_RREQ_IDLE_MASK 0x00000008L
+#define SDMA5_STATUS1_REG__CE_OUT_IDLE_MASK 0x00000010L
+#define SDMA5_STATUS1_REG__CE_IN_IDLE_MASK 0x00000020L
+#define SDMA5_STATUS1_REG__CE_DST_IDLE_MASK 0x00000040L
+#define SDMA5_STATUS1_REG__CE_CMD_IDLE_MASK 0x00000200L
+#define SDMA5_STATUS1_REG__CE_AFIFO_FULL_MASK 0x00000400L
+#define SDMA5_STATUS1_REG__CE_INFO_FULL_MASK 0x00002000L
+#define SDMA5_STATUS1_REG__CE_INFO1_FULL_MASK 0x00004000L
+#define SDMA5_STATUS1_REG__EX_START_MASK 0x00008000L
+#define SDMA5_STATUS1_REG__CE_RD_STALL_MASK 0x00020000L
+#define SDMA5_STATUS1_REG__CE_WR_STALL_MASK 0x00040000L
+//SDMA5_RD_BURST_CNTL
+#define SDMA5_RD_BURST_CNTL__RD_BURST__SHIFT 0x0
+#define SDMA5_RD_BURST_CNTL__CMD_BUFFER_RD_BURST__SHIFT 0x2
+#define SDMA5_RD_BURST_CNTL__RD_BURST_MASK 0x00000003L
+#define SDMA5_RD_BURST_CNTL__CMD_BUFFER_RD_BURST_MASK 0x0000000CL
+//SDMA5_HBM_PAGE_CONFIG
+#define SDMA5_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT__SHIFT 0x0
+#define SDMA5_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT_MASK 0x00000001L
+//SDMA5_UCODE_CHECKSUM
+#define SDMA5_UCODE_CHECKSUM__DATA__SHIFT 0x0
+#define SDMA5_UCODE_CHECKSUM__DATA_MASK 0xFFFFFFFFL
+//SDMA5_F32_CNTL
+#define SDMA5_F32_CNTL__HALT__SHIFT 0x0
+#define SDMA5_F32_CNTL__STEP__SHIFT 0x1
+#define SDMA5_F32_CNTL__HALT_MASK 0x00000001L
+#define SDMA5_F32_CNTL__STEP_MASK 0x00000002L
+//SDMA5_FREEZE
+#define SDMA5_FREEZE__PREEMPT__SHIFT 0x0
+#define SDMA5_FREEZE__FREEZE__SHIFT 0x4
+#define SDMA5_FREEZE__FROZEN__SHIFT 0x5
+#define SDMA5_FREEZE__F32_FREEZE__SHIFT 0x6
+#define SDMA5_FREEZE__PREEMPT_MASK 0x00000001L
+#define SDMA5_FREEZE__FREEZE_MASK 0x00000010L
+#define SDMA5_FREEZE__FROZEN_MASK 0x00000020L
+#define SDMA5_FREEZE__F32_FREEZE_MASK 0x00000040L
+//SDMA5_PHASE0_QUANTUM
+#define SDMA5_PHASE0_QUANTUM__UNIT__SHIFT 0x0
+#define SDMA5_PHASE0_QUANTUM__VALUE__SHIFT 0x8
+#define SDMA5_PHASE0_QUANTUM__PREFER__SHIFT 0x1e
+#define SDMA5_PHASE0_QUANTUM__UNIT_MASK 0x0000000FL
+#define SDMA5_PHASE0_QUANTUM__VALUE_MASK 0x00FFFF00L
+#define SDMA5_PHASE0_QUANTUM__PREFER_MASK 0x40000000L
+//SDMA5_PHASE1_QUANTUM
+#define SDMA5_PHASE1_QUANTUM__UNIT__SHIFT 0x0
+#define SDMA5_PHASE1_QUANTUM__VALUE__SHIFT 0x8
+#define SDMA5_PHASE1_QUANTUM__PREFER__SHIFT 0x1e
+#define SDMA5_PHASE1_QUANTUM__UNIT_MASK 0x0000000FL
+#define SDMA5_PHASE1_QUANTUM__VALUE_MASK 0x00FFFF00L
+#define SDMA5_PHASE1_QUANTUM__PREFER_MASK 0x40000000L
+//SDMA5_EDC_CONFIG
+#define SDMA5_EDC_CONFIG__DIS_EDC__SHIFT 0x1
+#define SDMA5_EDC_CONFIG__ECC_INT_ENABLE__SHIFT 0x2
+#define SDMA5_EDC_CONFIG__DIS_EDC_MASK 0x00000002L
+#define SDMA5_EDC_CONFIG__ECC_INT_ENABLE_MASK 0x00000004L
+//SDMA5_BA_THRESHOLD
+#define SDMA5_BA_THRESHOLD__READ_THRES__SHIFT 0x0
+#define SDMA5_BA_THRESHOLD__WRITE_THRES__SHIFT 0x10
+#define SDMA5_BA_THRESHOLD__READ_THRES_MASK 0x000003FFL
+#define SDMA5_BA_THRESHOLD__WRITE_THRES_MASK 0x03FF0000L
+//SDMA5_ID
+#define SDMA5_ID__DEVICE_ID__SHIFT 0x0
+#define SDMA5_ID__DEVICE_ID_MASK 0x000000FFL
+//SDMA5_VERSION
+#define SDMA5_VERSION__MINVER__SHIFT 0x0
+#define SDMA5_VERSION__MAJVER__SHIFT 0x8
+#define SDMA5_VERSION__REV__SHIFT 0x10
+#define SDMA5_VERSION__MINVER_MASK 0x0000007FL
+#define SDMA5_VERSION__MAJVER_MASK 0x00007F00L
+#define SDMA5_VERSION__REV_MASK 0x003F0000L
+//SDMA5_EDC_COUNTER
+#define SDMA5_EDC_COUNTER__SDMA_UCODE_BUF_SED__SHIFT 0x0
+#define SDMA5_EDC_COUNTER__SDMA_RB_CMD_BUF_SED__SHIFT 0x2
+#define SDMA5_EDC_COUNTER__SDMA_IB_CMD_BUF_SED__SHIFT 0x3
+#define SDMA5_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED__SHIFT 0x4
+#define SDMA5_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED__SHIFT 0x5
+#define SDMA5_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED__SHIFT 0x6
+#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED__SHIFT 0x7
+#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED__SHIFT 0x8
+#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED__SHIFT 0x9
+#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED__SHIFT 0xa
+#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED__SHIFT 0xb
+#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED__SHIFT 0xc
+#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED__SHIFT 0xd
+#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED__SHIFT 0xe
+#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF8_SED__SHIFT 0xf
+#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF9_SED__SHIFT 0x10
+#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF10_SED__SHIFT 0x11
+#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF11_SED__SHIFT 0x12
+#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF12_SED__SHIFT 0x13
+#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF13_SED__SHIFT 0x14
+#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF14_SED__SHIFT 0x15
+#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF15_SED__SHIFT 0x16
+#define SDMA5_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED__SHIFT 0x17
+#define SDMA5_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED__SHIFT 0x18
+#define SDMA5_EDC_COUNTER__SDMA_UCODE_BUF_SED_MASK 0x00000001L
+#define SDMA5_EDC_COUNTER__SDMA_RB_CMD_BUF_SED_MASK 0x00000004L
+#define SDMA5_EDC_COUNTER__SDMA_IB_CMD_BUF_SED_MASK 0x00000008L
+#define SDMA5_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED_MASK 0x00000010L
+#define SDMA5_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED_MASK 0x00000020L
+#define SDMA5_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED_MASK 0x00000040L
+#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED_MASK 0x00000080L
+#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED_MASK 0x00000100L
+#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED_MASK 0x00000200L
+#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED_MASK 0x00000400L
+#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED_MASK 0x00000800L
+#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED_MASK 0x00001000L
+#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED_MASK 0x00002000L
+#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED_MASK 0x00004000L
+#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF8_SED_MASK 0x00008000L
+#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF9_SED_MASK 0x00010000L
+#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF10_SED_MASK 0x00020000L
+#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF11_SED_MASK 0x00040000L
+#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF12_SED_MASK 0x00080000L
+#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF13_SED_MASK 0x00100000L
+#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF14_SED_MASK 0x00200000L
+#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF15_SED_MASK 0x00400000L
+#define SDMA5_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED_MASK 0x00800000L
+#define SDMA5_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED_MASK 0x01000000L
+//SDMA5_EDC_COUNTER_CLEAR
+#define SDMA5_EDC_COUNTER_CLEAR__DUMMY__SHIFT 0x0
+#define SDMA5_EDC_COUNTER_CLEAR__DUMMY_MASK 0x00000001L
+//SDMA5_STATUS2_REG
+#define SDMA5_STATUS2_REG__ID__SHIFT 0x0
+#define SDMA5_STATUS2_REG__F32_INSTR_PTR__SHIFT 0x3
+#define SDMA5_STATUS2_REG__CMD_OP__SHIFT 0x10
+#define SDMA5_STATUS2_REG__ID_MASK 0x00000007L
+#define SDMA5_STATUS2_REG__F32_INSTR_PTR_MASK 0x0000FFF8L
+#define SDMA5_STATUS2_REG__CMD_OP_MASK 0xFFFF0000L
+//SDMA5_ATOMIC_CNTL
+#define SDMA5_ATOMIC_CNTL__LOOP_TIMER__SHIFT 0x0
+#define SDMA5_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE__SHIFT 0x1f
+#define SDMA5_ATOMIC_CNTL__LOOP_TIMER_MASK 0x7FFFFFFFL
+#define SDMA5_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE_MASK 0x80000000L
+//SDMA5_ATOMIC_PREOP_LO
+#define SDMA5_ATOMIC_PREOP_LO__DATA__SHIFT 0x0
+#define SDMA5_ATOMIC_PREOP_LO__DATA_MASK 0xFFFFFFFFL
+//SDMA5_ATOMIC_PREOP_HI
+#define SDMA5_ATOMIC_PREOP_HI__DATA__SHIFT 0x0
+#define SDMA5_ATOMIC_PREOP_HI__DATA_MASK 0xFFFFFFFFL
+//SDMA5_UTCL1_CNTL
+#define SDMA5_UTCL1_CNTL__REDO_ENABLE__SHIFT 0x0
+#define SDMA5_UTCL1_CNTL__REDO_DELAY__SHIFT 0x1
+#define SDMA5_UTCL1_CNTL__REDO_WATERMK__SHIFT 0xb
+#define SDMA5_UTCL1_CNTL__INVACK_DELAY__SHIFT 0xe
+#define SDMA5_UTCL1_CNTL__REQL2_CREDIT__SHIFT 0x18
+#define SDMA5_UTCL1_CNTL__VADDR_WATERMK__SHIFT 0x1d
+#define SDMA5_UTCL1_CNTL__REDO_ENABLE_MASK 0x00000001L
+#define SDMA5_UTCL1_CNTL__REDO_DELAY_MASK 0x000007FEL
+#define SDMA5_UTCL1_CNTL__REDO_WATERMK_MASK 0x00003800L
+#define SDMA5_UTCL1_CNTL__INVACK_DELAY_MASK 0x00FFC000L
+#define SDMA5_UTCL1_CNTL__REQL2_CREDIT_MASK 0x1F000000L
+#define SDMA5_UTCL1_CNTL__VADDR_WATERMK_MASK 0xE0000000L
+//SDMA5_UTCL1_WATERMK
+#define SDMA5_UTCL1_WATERMK__REQMC_WATERMK__SHIFT 0x0
+#define SDMA5_UTCL1_WATERMK__REQPG_WATERMK__SHIFT 0x9
+#define SDMA5_UTCL1_WATERMK__INVREQ_WATERMK__SHIFT 0x11
+#define SDMA5_UTCL1_WATERMK__XNACK_WATERMK__SHIFT 0x19
+#define SDMA5_UTCL1_WATERMK__REQMC_WATERMK_MASK 0x000001FFL
+#define SDMA5_UTCL1_WATERMK__REQPG_WATERMK_MASK 0x0001FE00L
+#define SDMA5_UTCL1_WATERMK__INVREQ_WATERMK_MASK 0x01FE0000L
+#define SDMA5_UTCL1_WATERMK__XNACK_WATERMK_MASK 0xFE000000L
+//SDMA5_UTCL1_RD_STATUS
+#define SDMA5_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0
+#define SDMA5_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1
+#define SDMA5_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2
+#define SDMA5_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3
+#define SDMA5_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4
+#define SDMA5_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5
+#define SDMA5_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6
+#define SDMA5_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7
+#define SDMA5_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8
+#define SDMA5_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9
+#define SDMA5_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa
+#define SDMA5_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb
+#define SDMA5_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc
+#define SDMA5_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd
+#define SDMA5_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe
+#define SDMA5_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf
+#define SDMA5_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10
+#define SDMA5_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11
+#define SDMA5_UTCL1_RD_STATUS__PAGE_FAULT__SHIFT 0x12
+#define SDMA5_UTCL1_RD_STATUS__PAGE_NULL__SHIFT 0x13
+#define SDMA5_UTCL1_RD_STATUS__REQL2_IDLE__SHIFT 0x14
+#define SDMA5_UTCL1_RD_STATUS__CE_L1_STALL__SHIFT 0x15
+#define SDMA5_UTCL1_RD_STATUS__NEXT_RD_VECTOR__SHIFT 0x16
+#define SDMA5_UTCL1_RD_STATUS__MERGE_STATE__SHIFT 0x1a
+#define SDMA5_UTCL1_RD_STATUS__ADDR_RD_RTR__SHIFT 0x1d
+#define SDMA5_UTCL1_RD_STATUS__WPTR_POLLING__SHIFT 0x1e
+#define SDMA5_UTCL1_RD_STATUS__INVREQ_SIZE__SHIFT 0x1f
+#define SDMA5_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L
+#define SDMA5_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L
+#define SDMA5_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L
+#define SDMA5_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L
+#define SDMA5_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L
+#define SDMA5_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L
+#define SDMA5_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L
+#define SDMA5_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L
+#define SDMA5_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L
+#define SDMA5_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L
+#define SDMA5_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L
+#define SDMA5_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L
+#define SDMA5_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L
+#define SDMA5_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L
+#define SDMA5_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L
+#define SDMA5_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L
+#define SDMA5_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L
+#define SDMA5_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L
+#define SDMA5_UTCL1_RD_STATUS__PAGE_FAULT_MASK 0x00040000L
+#define SDMA5_UTCL1_RD_STATUS__PAGE_NULL_MASK 0x00080000L
+#define SDMA5_UTCL1_RD_STATUS__REQL2_IDLE_MASK 0x00100000L
+#define SDMA5_UTCL1_RD_STATUS__CE_L1_STALL_MASK 0x00200000L
+#define SDMA5_UTCL1_RD_STATUS__NEXT_RD_VECTOR_MASK 0x03C00000L
+#define SDMA5_UTCL1_RD_STATUS__MERGE_STATE_MASK 0x1C000000L
+#define SDMA5_UTCL1_RD_STATUS__ADDR_RD_RTR_MASK 0x20000000L
+#define SDMA5_UTCL1_RD_STATUS__WPTR_POLLING_MASK 0x40000000L
+#define SDMA5_UTCL1_RD_STATUS__INVREQ_SIZE_MASK 0x80000000L
+//SDMA5_UTCL1_WR_STATUS
+#define SDMA5_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0
+#define SDMA5_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1
+#define SDMA5_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2
+#define SDMA5_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3
+#define SDMA5_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4
+#define SDMA5_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5
+#define SDMA5_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6
+#define SDMA5_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7
+#define SDMA5_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8
+#define SDMA5_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9
+#define SDMA5_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa
+#define SDMA5_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb
+#define SDMA5_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc
+#define SDMA5_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd
+#define SDMA5_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe
+#define SDMA5_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf
+#define SDMA5_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10
+#define SDMA5_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11
+#define SDMA5_UTCL1_WR_STATUS__PAGE_FAULT__SHIFT 0x12
+#define SDMA5_UTCL1_WR_STATUS__PAGE_NULL__SHIFT 0x13
+#define SDMA5_UTCL1_WR_STATUS__REQL2_IDLE__SHIFT 0x14
+#define SDMA5_UTCL1_WR_STATUS__F32_WR_RTR__SHIFT 0x15
+#define SDMA5_UTCL1_WR_STATUS__NEXT_WR_VECTOR__SHIFT 0x16
+#define SDMA5_UTCL1_WR_STATUS__MERGE_STATE__SHIFT 0x19
+#define SDMA5_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY__SHIFT 0x1c
+#define SDMA5_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL__SHIFT 0x1d
+#define SDMA5_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY__SHIFT 0x1e
+#define SDMA5_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL__SHIFT 0x1f
+#define SDMA5_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L
+#define SDMA5_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L
+#define SDMA5_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L
+#define SDMA5_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L
+#define SDMA5_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L
+#define SDMA5_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L
+#define SDMA5_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L
+#define SDMA5_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L
+#define SDMA5_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L
+#define SDMA5_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L
+#define SDMA5_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L
+#define SDMA5_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L
+#define SDMA5_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L
+#define SDMA5_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L
+#define SDMA5_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L
+#define SDMA5_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L
+#define SDMA5_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L
+#define SDMA5_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L
+#define SDMA5_UTCL1_WR_STATUS__PAGE_FAULT_MASK 0x00040000L
+#define SDMA5_UTCL1_WR_STATUS__PAGE_NULL_MASK 0x00080000L
+#define SDMA5_UTCL1_WR_STATUS__REQL2_IDLE_MASK 0x00100000L
+#define SDMA5_UTCL1_WR_STATUS__F32_WR_RTR_MASK 0x00200000L
+#define SDMA5_UTCL1_WR_STATUS__NEXT_WR_VECTOR_MASK 0x01C00000L
+#define SDMA5_UTCL1_WR_STATUS__MERGE_STATE_MASK 0x0E000000L
+#define SDMA5_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY_MASK 0x10000000L
+#define SDMA5_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL_MASK 0x20000000L
+#define SDMA5_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY_MASK 0x40000000L
+#define SDMA5_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL_MASK 0x80000000L
+//SDMA5_UTCL1_INV0
+#define SDMA5_UTCL1_INV0__INV_MIDDLE__SHIFT 0x0
+#define SDMA5_UTCL1_INV0__RD_TIMEOUT__SHIFT 0x1
+#define SDMA5_UTCL1_INV0__WR_TIMEOUT__SHIFT 0x2
+#define SDMA5_UTCL1_INV0__RD_IN_INVADR__SHIFT 0x3
+#define SDMA5_UTCL1_INV0__WR_IN_INVADR__SHIFT 0x4
+#define SDMA5_UTCL1_INV0__PAGE_NULL_SW__SHIFT 0x5
+#define SDMA5_UTCL1_INV0__XNACK_IS_INVADR__SHIFT 0x6
+#define SDMA5_UTCL1_INV0__INVREQ_ENABLE__SHIFT 0x7
+#define SDMA5_UTCL1_INV0__NACK_TIMEOUT_SW__SHIFT 0x8
+#define SDMA5_UTCL1_INV0__NFLUSH_INV_IDLE__SHIFT 0x9
+#define SDMA5_UTCL1_INV0__FLUSH_INV_IDLE__SHIFT 0xa
+#define SDMA5_UTCL1_INV0__INV_FLUSHTYPE__SHIFT 0xb
+#define SDMA5_UTCL1_INV0__INV_VMID_VEC__SHIFT 0xc
+#define SDMA5_UTCL1_INV0__INV_ADDR_HI__SHIFT 0x1c
+#define SDMA5_UTCL1_INV0__INV_MIDDLE_MASK 0x00000001L
+#define SDMA5_UTCL1_INV0__RD_TIMEOUT_MASK 0x00000002L
+#define SDMA5_UTCL1_INV0__WR_TIMEOUT_MASK 0x00000004L
+#define SDMA5_UTCL1_INV0__RD_IN_INVADR_MASK 0x00000008L
+#define SDMA5_UTCL1_INV0__WR_IN_INVADR_MASK 0x00000010L
+#define SDMA5_UTCL1_INV0__PAGE_NULL_SW_MASK 0x00000020L
+#define SDMA5_UTCL1_INV0__XNACK_IS_INVADR_MASK 0x00000040L
+#define SDMA5_UTCL1_INV0__INVREQ_ENABLE_MASK 0x00000080L
+#define SDMA5_UTCL1_INV0__NACK_TIMEOUT_SW_MASK 0x00000100L
+#define SDMA5_UTCL1_INV0__NFLUSH_INV_IDLE_MASK 0x00000200L
+#define SDMA5_UTCL1_INV0__FLUSH_INV_IDLE_MASK 0x00000400L
+#define SDMA5_UTCL1_INV0__INV_FLUSHTYPE_MASK 0x00000800L
+#define SDMA5_UTCL1_INV0__INV_VMID_VEC_MASK 0x0FFFF000L
+#define SDMA5_UTCL1_INV0__INV_ADDR_HI_MASK 0xF0000000L
+//SDMA5_UTCL1_INV1
+#define SDMA5_UTCL1_INV1__INV_ADDR_LO__SHIFT 0x0
+#define SDMA5_UTCL1_INV1__INV_ADDR_LO_MASK 0xFFFFFFFFL
+//SDMA5_UTCL1_INV2
+#define SDMA5_UTCL1_INV2__INV_NFLUSH_VMID_VEC__SHIFT 0x0
+#define SDMA5_UTCL1_INV2__INV_NFLUSH_VMID_VEC_MASK 0xFFFFFFFFL
+//SDMA5_UTCL1_RD_XNACK0
+#define SDMA5_UTCL1_RD_XNACK0__XNACK_ADDR_LO__SHIFT 0x0
+#define SDMA5_UTCL1_RD_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL
+//SDMA5_UTCL1_RD_XNACK1
+#define SDMA5_UTCL1_RD_XNACK1__XNACK_ADDR_HI__SHIFT 0x0
+#define SDMA5_UTCL1_RD_XNACK1__XNACK_VMID__SHIFT 0x4
+#define SDMA5_UTCL1_RD_XNACK1__XNACK_VECTOR__SHIFT 0x8
+#define SDMA5_UTCL1_RD_XNACK1__IS_XNACK__SHIFT 0x1a
+#define SDMA5_UTCL1_RD_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL
+#define SDMA5_UTCL1_RD_XNACK1__XNACK_VMID_MASK 0x000000F0L
+#define SDMA5_UTCL1_RD_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L
+#define SDMA5_UTCL1_RD_XNACK1__IS_XNACK_MASK 0x0C000000L
+//SDMA5_UTCL1_WR_XNACK0
+#define SDMA5_UTCL1_WR_XNACK0__XNACK_ADDR_LO__SHIFT 0x0
+#define SDMA5_UTCL1_WR_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL
+//SDMA5_UTCL1_WR_XNACK1
+#define SDMA5_UTCL1_WR_XNACK1__XNACK_ADDR_HI__SHIFT 0x0
+#define SDMA5_UTCL1_WR_XNACK1__XNACK_VMID__SHIFT 0x4
+#define SDMA5_UTCL1_WR_XNACK1__XNACK_VECTOR__SHIFT 0x8
+#define SDMA5_UTCL1_WR_XNACK1__IS_XNACK__SHIFT 0x1a
+#define SDMA5_UTCL1_WR_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL
+#define SDMA5_UTCL1_WR_XNACK1__XNACK_VMID_MASK 0x000000F0L
+#define SDMA5_UTCL1_WR_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L
+#define SDMA5_UTCL1_WR_XNACK1__IS_XNACK_MASK 0x0C000000L
+//SDMA5_UTCL1_TIMEOUT
+#define SDMA5_UTCL1_TIMEOUT__RD_XNACK_LIMIT__SHIFT 0x0
+#define SDMA5_UTCL1_TIMEOUT__WR_XNACK_LIMIT__SHIFT 0x10
+#define SDMA5_UTCL1_TIMEOUT__RD_XNACK_LIMIT_MASK 0x0000FFFFL
+#define SDMA5_UTCL1_TIMEOUT__WR_XNACK_LIMIT_MASK 0xFFFF0000L
+//SDMA5_UTCL1_PAGE
+#define SDMA5_UTCL1_PAGE__VM_HOLE__SHIFT 0x0
+#define SDMA5_UTCL1_PAGE__REQ_TYPE__SHIFT 0x1
+#define SDMA5_UTCL1_PAGE__USE_MTYPE__SHIFT 0x6
+#define SDMA5_UTCL1_PAGE__USE_PT_SNOOP__SHIFT 0x9
+#define SDMA5_UTCL1_PAGE__VM_HOLE_MASK 0x00000001L
+#define SDMA5_UTCL1_PAGE__REQ_TYPE_MASK 0x0000001EL
+#define SDMA5_UTCL1_PAGE__USE_MTYPE_MASK 0x000001C0L
+#define SDMA5_UTCL1_PAGE__USE_PT_SNOOP_MASK 0x00000200L
+//SDMA5_POWER_CNTL_IDLE
+#define SDMA5_POWER_CNTL_IDLE__DELAY0__SHIFT 0x0
+#define SDMA5_POWER_CNTL_IDLE__DELAY1__SHIFT 0x10
+#define SDMA5_POWER_CNTL_IDLE__DELAY2__SHIFT 0x18
+#define SDMA5_POWER_CNTL_IDLE__DELAY0_MASK 0x0000FFFFL
+#define SDMA5_POWER_CNTL_IDLE__DELAY1_MASK 0x00FF0000L
+#define SDMA5_POWER_CNTL_IDLE__DELAY2_MASK 0xFF000000L
+//SDMA5_RELAX_ORDERING_LUT
+#define SDMA5_RELAX_ORDERING_LUT__RESERVED0__SHIFT 0x0
+#define SDMA5_RELAX_ORDERING_LUT__COPY__SHIFT 0x1
+#define SDMA5_RELAX_ORDERING_LUT__WRITE__SHIFT 0x2
+#define SDMA5_RELAX_ORDERING_LUT__RESERVED3__SHIFT 0x3
+#define SDMA5_RELAX_ORDERING_LUT__RESERVED4__SHIFT 0x4
+#define SDMA5_RELAX_ORDERING_LUT__FENCE__SHIFT 0x5
+#define SDMA5_RELAX_ORDERING_LUT__RESERVED76__SHIFT 0x6
+#define SDMA5_RELAX_ORDERING_LUT__POLL_MEM__SHIFT 0x8
+#define SDMA5_RELAX_ORDERING_LUT__COND_EXE__SHIFT 0x9
+#define SDMA5_RELAX_ORDERING_LUT__ATOMIC__SHIFT 0xa
+#define SDMA5_RELAX_ORDERING_LUT__CONST_FILL__SHIFT 0xb
+#define SDMA5_RELAX_ORDERING_LUT__PTEPDE__SHIFT 0xc
+#define SDMA5_RELAX_ORDERING_LUT__TIMESTAMP__SHIFT 0xd
+#define SDMA5_RELAX_ORDERING_LUT__RESERVED__SHIFT 0xe
+#define SDMA5_RELAX_ORDERING_LUT__WORLD_SWITCH__SHIFT 0x1b
+#define SDMA5_RELAX_ORDERING_LUT__RPTR_WRB__SHIFT 0x1c
+#define SDMA5_RELAX_ORDERING_LUT__WPTR_POLL__SHIFT 0x1d
+#define SDMA5_RELAX_ORDERING_LUT__IB_FETCH__SHIFT 0x1e
+#define SDMA5_RELAX_ORDERING_LUT__RB_FETCH__SHIFT 0x1f
+#define SDMA5_RELAX_ORDERING_LUT__RESERVED0_MASK 0x00000001L
+#define SDMA5_RELAX_ORDERING_LUT__COPY_MASK 0x00000002L
+#define SDMA5_RELAX_ORDERING_LUT__WRITE_MASK 0x00000004L
+#define SDMA5_RELAX_ORDERING_LUT__RESERVED3_MASK 0x00000008L
+#define SDMA5_RELAX_ORDERING_LUT__RESERVED4_MASK 0x00000010L
+#define SDMA5_RELAX_ORDERING_LUT__FENCE_MASK 0x00000020L
+#define SDMA5_RELAX_ORDERING_LUT__RESERVED76_MASK 0x000000C0L
+#define SDMA5_RELAX_ORDERING_LUT__POLL_MEM_MASK 0x00000100L
+#define SDMA5_RELAX_ORDERING_LUT__COND_EXE_MASK 0x00000200L
+#define SDMA5_RELAX_ORDERING_LUT__ATOMIC_MASK 0x00000400L
+#define SDMA5_RELAX_ORDERING_LUT__CONST_FILL_MASK 0x00000800L
+#define SDMA5_RELAX_ORDERING_LUT__PTEPDE_MASK 0x00001000L
+#define SDMA5_RELAX_ORDERING_LUT__TIMESTAMP_MASK 0x00002000L
+#define SDMA5_RELAX_ORDERING_LUT__RESERVED_MASK 0x07FFC000L
+#define SDMA5_RELAX_ORDERING_LUT__WORLD_SWITCH_MASK 0x08000000L
+#define SDMA5_RELAX_ORDERING_LUT__RPTR_WRB_MASK 0x10000000L
+#define SDMA5_RELAX_ORDERING_LUT__WPTR_POLL_MASK 0x20000000L
+#define SDMA5_RELAX_ORDERING_LUT__IB_FETCH_MASK 0x40000000L
+#define SDMA5_RELAX_ORDERING_LUT__RB_FETCH_MASK 0x80000000L
+//SDMA5_CHICKEN_BITS_2
+#define SDMA5_CHICKEN_BITS_2__F32_CMD_PROC_DELAY__SHIFT 0x0
+#define SDMA5_CHICKEN_BITS_2__F32_CMD_PROC_DELAY_MASK 0x0000000FL
+//SDMA5_STATUS3_REG
+#define SDMA5_STATUS3_REG__CMD_OP_STATUS__SHIFT 0x0
+#define SDMA5_STATUS3_REG__PREV_VM_CMD__SHIFT 0x10
+#define SDMA5_STATUS3_REG__EXCEPTION_IDLE__SHIFT 0x14
+#define SDMA5_STATUS3_REG__QUEUE_ID_MATCH__SHIFT 0x15
+#define SDMA5_STATUS3_REG__INT_QUEUE_ID__SHIFT 0x16
+#define SDMA5_STATUS3_REG__CMD_OP_STATUS_MASK 0x0000FFFFL
+#define SDMA5_STATUS3_REG__PREV_VM_CMD_MASK 0x000F0000L
+#define SDMA5_STATUS3_REG__EXCEPTION_IDLE_MASK 0x00100000L
+#define SDMA5_STATUS3_REG__QUEUE_ID_MATCH_MASK 0x00200000L
+#define SDMA5_STATUS3_REG__INT_QUEUE_ID_MASK 0x03C00000L
+//SDMA5_PHYSICAL_ADDR_LO
+#define SDMA5_PHYSICAL_ADDR_LO__D_VALID__SHIFT 0x0
+#define SDMA5_PHYSICAL_ADDR_LO__DIRTY__SHIFT 0x1
+#define SDMA5_PHYSICAL_ADDR_LO__PHY_VALID__SHIFT 0x2
+#define SDMA5_PHYSICAL_ADDR_LO__ADDR__SHIFT 0xc
+#define SDMA5_PHYSICAL_ADDR_LO__D_VALID_MASK 0x00000001L
+#define SDMA5_PHYSICAL_ADDR_LO__DIRTY_MASK 0x00000002L
+#define SDMA5_PHYSICAL_ADDR_LO__PHY_VALID_MASK 0x00000004L
+#define SDMA5_PHYSICAL_ADDR_LO__ADDR_MASK 0xFFFFF000L
+//SDMA5_PHYSICAL_ADDR_HI
+#define SDMA5_PHYSICAL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA5_PHYSICAL_ADDR_HI__ADDR_MASK 0x0000FFFFL
+//SDMA5_PHASE2_QUANTUM
+#define SDMA5_PHASE2_QUANTUM__UNIT__SHIFT 0x0
+#define SDMA5_PHASE2_QUANTUM__VALUE__SHIFT 0x8
+#define SDMA5_PHASE2_QUANTUM__PREFER__SHIFT 0x1e
+#define SDMA5_PHASE2_QUANTUM__UNIT_MASK 0x0000000FL
+#define SDMA5_PHASE2_QUANTUM__VALUE_MASK 0x00FFFF00L
+#define SDMA5_PHASE2_QUANTUM__PREFER_MASK 0x40000000L
+//SDMA5_ERROR_LOG
+#define SDMA5_ERROR_LOG__OVERRIDE__SHIFT 0x0
+#define SDMA5_ERROR_LOG__STATUS__SHIFT 0x10
+#define SDMA5_ERROR_LOG__OVERRIDE_MASK 0x0000FFFFL
+#define SDMA5_ERROR_LOG__STATUS_MASK 0xFFFF0000L
+//SDMA5_PUB_DUMMY_REG0
+#define SDMA5_PUB_DUMMY_REG0__VALUE__SHIFT 0x0
+#define SDMA5_PUB_DUMMY_REG0__VALUE_MASK 0xFFFFFFFFL
+//SDMA5_PUB_DUMMY_REG1
+#define SDMA5_PUB_DUMMY_REG1__VALUE__SHIFT 0x0
+#define SDMA5_PUB_DUMMY_REG1__VALUE_MASK 0xFFFFFFFFL
+//SDMA5_PUB_DUMMY_REG2
+#define SDMA5_PUB_DUMMY_REG2__VALUE__SHIFT 0x0
+#define SDMA5_PUB_DUMMY_REG2__VALUE_MASK 0xFFFFFFFFL
+//SDMA5_PUB_DUMMY_REG3
+#define SDMA5_PUB_DUMMY_REG3__VALUE__SHIFT 0x0
+#define SDMA5_PUB_DUMMY_REG3__VALUE_MASK 0xFFFFFFFFL
+//SDMA5_F32_COUNTER
+#define SDMA5_F32_COUNTER__VALUE__SHIFT 0x0
+#define SDMA5_F32_COUNTER__VALUE_MASK 0xFFFFFFFFL
+//SDMA5_UNBREAKABLE
+#define SDMA5_UNBREAKABLE__VALUE__SHIFT 0x0
+#define SDMA5_UNBREAKABLE__VALUE_MASK 0x00000001L
+//SDMA5_PERFMON_CNTL
+#define SDMA5_PERFMON_CNTL__PERF_ENABLE0__SHIFT 0x0
+#define SDMA5_PERFMON_CNTL__PERF_CLEAR0__SHIFT 0x1
+#define SDMA5_PERFMON_CNTL__PERF_SEL0__SHIFT 0x2
+#define SDMA5_PERFMON_CNTL__PERF_ENABLE1__SHIFT 0xa
+#define SDMA5_PERFMON_CNTL__PERF_CLEAR1__SHIFT 0xb
+#define SDMA5_PERFMON_CNTL__PERF_SEL1__SHIFT 0xc
+#define SDMA5_PERFMON_CNTL__PERF_ENABLE0_MASK 0x00000001L
+#define SDMA5_PERFMON_CNTL__PERF_CLEAR0_MASK 0x00000002L
+#define SDMA5_PERFMON_CNTL__PERF_SEL0_MASK 0x000003FCL
+#define SDMA5_PERFMON_CNTL__PERF_ENABLE1_MASK 0x00000400L
+#define SDMA5_PERFMON_CNTL__PERF_CLEAR1_MASK 0x00000800L
+#define SDMA5_PERFMON_CNTL__PERF_SEL1_MASK 0x000FF000L
+//SDMA5_PERFCOUNTER0_RESULT
+#define SDMA5_PERFCOUNTER0_RESULT__PERF_COUNT__SHIFT 0x0
+#define SDMA5_PERFCOUNTER0_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL
+//SDMA5_PERFCOUNTER1_RESULT
+#define SDMA5_PERFCOUNTER1_RESULT__PERF_COUNT__SHIFT 0x0
+#define SDMA5_PERFCOUNTER1_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL
+//SDMA5_PERFCOUNTER_TAG_DELAY_RANGE
+#define SDMA5_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_LOW__SHIFT 0x0
+#define SDMA5_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_HIGH__SHIFT 0xe
+#define SDMA5_PERFCOUNTER_TAG_DELAY_RANGE__SELECT_RW__SHIFT 0x1c
+#define SDMA5_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_LOW_MASK 0x00003FFFL
+#define SDMA5_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_HIGH_MASK 0x0FFFC000L
+#define SDMA5_PERFCOUNTER_TAG_DELAY_RANGE__SELECT_RW_MASK 0x10000000L
+//SDMA5_CRD_CNTL
+#define SDMA5_CRD_CNTL__MC_WRREQ_CREDIT__SHIFT 0x7
+#define SDMA5_CRD_CNTL__MC_RDREQ_CREDIT__SHIFT 0xd
+#define SDMA5_CRD_CNTL__MC_WRREQ_CREDIT_MASK 0x00001F80L
+#define SDMA5_CRD_CNTL__MC_RDREQ_CREDIT_MASK 0x0007E000L
+//SDMA5_GPU_IOV_VIOLATION_LOG
+#define SDMA5_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0
+#define SDMA5_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS__SHIFT 0x1
+#define SDMA5_GPU_IOV_VIOLATION_LOG__ADDRESS__SHIFT 0x2
+#define SDMA5_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION__SHIFT 0x14
+#define SDMA5_GPU_IOV_VIOLATION_LOG__VF__SHIFT 0x15
+#define SDMA5_GPU_IOV_VIOLATION_LOG__VFID__SHIFT 0x16
+#define SDMA5_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L
+#define SDMA5_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS_MASK 0x00000002L
+#define SDMA5_GPU_IOV_VIOLATION_LOG__ADDRESS_MASK 0x000FFFFCL
+#define SDMA5_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION_MASK 0x00100000L
+#define SDMA5_GPU_IOV_VIOLATION_LOG__VF_MASK 0x00200000L
+#define SDMA5_GPU_IOV_VIOLATION_LOG__VFID_MASK 0x03C00000L
+//SDMA5_ULV_CNTL
+#define SDMA5_ULV_CNTL__HYSTERESIS__SHIFT 0x0
+#define SDMA5_ULV_CNTL__ENTER_ULV_INT_CLR__SHIFT 0x1b
+#define SDMA5_ULV_CNTL__EXIT_ULV_INT_CLR__SHIFT 0x1c
+#define SDMA5_ULV_CNTL__ENTER_ULV_INT__SHIFT 0x1d
+#define SDMA5_ULV_CNTL__EXIT_ULV_INT__SHIFT 0x1e
+#define SDMA5_ULV_CNTL__ULV_STATUS__SHIFT 0x1f
+#define SDMA5_ULV_CNTL__HYSTERESIS_MASK 0x0000001FL
+#define SDMA5_ULV_CNTL__ENTER_ULV_INT_CLR_MASK 0x08000000L
+#define SDMA5_ULV_CNTL__EXIT_ULV_INT_CLR_MASK 0x10000000L
+#define SDMA5_ULV_CNTL__ENTER_ULV_INT_MASK 0x20000000L
+#define SDMA5_ULV_CNTL__EXIT_ULV_INT_MASK 0x40000000L
+#define SDMA5_ULV_CNTL__ULV_STATUS_MASK 0x80000000L
+//SDMA5_EA_DBIT_ADDR_DATA
+#define SDMA5_EA_DBIT_ADDR_DATA__VALUE__SHIFT 0x0
+#define SDMA5_EA_DBIT_ADDR_DATA__VALUE_MASK 0xFFFFFFFFL
+//SDMA5_EA_DBIT_ADDR_INDEX
+#define SDMA5_EA_DBIT_ADDR_INDEX__VALUE__SHIFT 0x0
+#define SDMA5_EA_DBIT_ADDR_INDEX__VALUE_MASK 0x00000007L
+//SDMA5_GPU_IOV_VIOLATION_LOG2
+#define SDMA5_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID__SHIFT 0x0
+#define SDMA5_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID_MASK 0x000000FFL
+//SDMA5_GFX_RB_CNTL
+#define SDMA5_GFX_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA5_GFX_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA5_GFX_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA5_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA5_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA5_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA5_GFX_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA5_GFX_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA5_GFX_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA5_GFX_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA5_GFX_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA5_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA5_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA5_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA5_GFX_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA5_GFX_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA5_GFX_RB_BASE
+#define SDMA5_GFX_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA5_GFX_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA5_GFX_RB_BASE_HI
+#define SDMA5_GFX_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA5_GFX_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA5_GFX_RB_RPTR
+#define SDMA5_GFX_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA5_GFX_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA5_GFX_RB_RPTR_HI
+#define SDMA5_GFX_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA5_GFX_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA5_GFX_RB_WPTR
+#define SDMA5_GFX_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA5_GFX_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA5_GFX_RB_WPTR_HI
+#define SDMA5_GFX_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA5_GFX_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA5_GFX_RB_WPTR_POLL_CNTL
+#define SDMA5_GFX_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA5_GFX_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA5_GFX_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA5_GFX_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA5_GFX_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA5_GFX_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA5_GFX_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA5_GFX_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA5_GFX_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA5_GFX_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA5_GFX_RB_RPTR_ADDR_HI
+#define SDMA5_GFX_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA5_GFX_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA5_GFX_RB_RPTR_ADDR_LO
+#define SDMA5_GFX_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA5_GFX_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA5_GFX_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA5_GFX_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA5_GFX_IB_CNTL
+#define SDMA5_GFX_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA5_GFX_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA5_GFX_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA5_GFX_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA5_GFX_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA5_GFX_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA5_GFX_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA5_GFX_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA5_GFX_IB_RPTR
+#define SDMA5_GFX_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA5_GFX_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA5_GFX_IB_OFFSET
+#define SDMA5_GFX_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA5_GFX_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA5_GFX_IB_BASE_LO
+#define SDMA5_GFX_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA5_GFX_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA5_GFX_IB_BASE_HI
+#define SDMA5_GFX_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA5_GFX_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA5_GFX_IB_SIZE
+#define SDMA5_GFX_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA5_GFX_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA5_GFX_SKIP_CNTL
+#define SDMA5_GFX_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA5_GFX_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA5_GFX_CONTEXT_STATUS
+#define SDMA5_GFX_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA5_GFX_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA5_GFX_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA5_GFX_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA5_GFX_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA5_GFX_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA5_GFX_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA5_GFX_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA5_GFX_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA5_GFX_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA5_GFX_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA5_GFX_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA5_GFX_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA5_GFX_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA5_GFX_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA5_GFX_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA5_GFX_DOORBELL
+#define SDMA5_GFX_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA5_GFX_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA5_GFX_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA5_GFX_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA5_GFX_CONTEXT_CNTL
+#define SDMA5_GFX_CONTEXT_CNTL__RESUME_CTX__SHIFT 0x10
+#define SDMA5_GFX_CONTEXT_CNTL__RESUME_CTX_MASK 0x00010000L
+//SDMA5_GFX_STATUS
+#define SDMA5_GFX_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA5_GFX_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA5_GFX_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA5_GFX_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA5_GFX_DOORBELL_LOG
+#define SDMA5_GFX_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA5_GFX_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA5_GFX_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA5_GFX_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA5_GFX_WATERMARK
+#define SDMA5_GFX_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA5_GFX_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA5_GFX_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA5_GFX_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA5_GFX_DOORBELL_OFFSET
+#define SDMA5_GFX_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA5_GFX_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA5_GFX_CSA_ADDR_LO
+#define SDMA5_GFX_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA5_GFX_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA5_GFX_CSA_ADDR_HI
+#define SDMA5_GFX_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA5_GFX_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA5_GFX_IB_SUB_REMAIN
+#define SDMA5_GFX_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA5_GFX_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA5_GFX_PREEMPT
+#define SDMA5_GFX_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA5_GFX_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA5_GFX_DUMMY_REG
+#define SDMA5_GFX_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA5_GFX_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA5_GFX_RB_WPTR_POLL_ADDR_HI
+#define SDMA5_GFX_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA5_GFX_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA5_GFX_RB_WPTR_POLL_ADDR_LO
+#define SDMA5_GFX_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA5_GFX_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA5_GFX_RB_AQL_CNTL
+#define SDMA5_GFX_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA5_GFX_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA5_GFX_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA5_GFX_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA5_GFX_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA5_GFX_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA5_GFX_MINOR_PTR_UPDATE
+#define SDMA5_GFX_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA5_GFX_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA5_GFX_MIDCMD_DATA0
+#define SDMA5_GFX_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA5_GFX_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA5_GFX_MIDCMD_DATA1
+#define SDMA5_GFX_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA5_GFX_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA5_GFX_MIDCMD_DATA2
+#define SDMA5_GFX_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA5_GFX_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA5_GFX_MIDCMD_DATA3
+#define SDMA5_GFX_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA5_GFX_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA5_GFX_MIDCMD_DATA4
+#define SDMA5_GFX_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA5_GFX_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA5_GFX_MIDCMD_DATA5
+#define SDMA5_GFX_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA5_GFX_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA5_GFX_MIDCMD_DATA6
+#define SDMA5_GFX_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA5_GFX_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA5_GFX_MIDCMD_DATA7
+#define SDMA5_GFX_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA5_GFX_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA5_GFX_MIDCMD_DATA8
+#define SDMA5_GFX_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA5_GFX_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA5_GFX_MIDCMD_CNTL
+#define SDMA5_GFX_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA5_GFX_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA5_GFX_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA5_GFX_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA5_GFX_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA5_GFX_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA5_GFX_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA5_GFX_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA5_PAGE_RB_CNTL
+#define SDMA5_PAGE_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA5_PAGE_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA5_PAGE_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA5_PAGE_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA5_PAGE_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA5_PAGE_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA5_PAGE_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA5_PAGE_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA5_PAGE_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA5_PAGE_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA5_PAGE_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA5_PAGE_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA5_PAGE_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA5_PAGE_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA5_PAGE_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA5_PAGE_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA5_PAGE_RB_BASE
+#define SDMA5_PAGE_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA5_PAGE_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA5_PAGE_RB_BASE_HI
+#define SDMA5_PAGE_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA5_PAGE_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA5_PAGE_RB_RPTR
+#define SDMA5_PAGE_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA5_PAGE_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA5_PAGE_RB_RPTR_HI
+#define SDMA5_PAGE_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA5_PAGE_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA5_PAGE_RB_WPTR
+#define SDMA5_PAGE_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA5_PAGE_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA5_PAGE_RB_WPTR_HI
+#define SDMA5_PAGE_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA5_PAGE_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA5_PAGE_RB_WPTR_POLL_CNTL
+#define SDMA5_PAGE_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA5_PAGE_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA5_PAGE_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA5_PAGE_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA5_PAGE_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA5_PAGE_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA5_PAGE_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA5_PAGE_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA5_PAGE_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA5_PAGE_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA5_PAGE_RB_RPTR_ADDR_HI
+#define SDMA5_PAGE_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA5_PAGE_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA5_PAGE_RB_RPTR_ADDR_LO
+#define SDMA5_PAGE_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA5_PAGE_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA5_PAGE_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA5_PAGE_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA5_PAGE_IB_CNTL
+#define SDMA5_PAGE_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA5_PAGE_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA5_PAGE_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA5_PAGE_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA5_PAGE_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA5_PAGE_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA5_PAGE_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA5_PAGE_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA5_PAGE_IB_RPTR
+#define SDMA5_PAGE_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA5_PAGE_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA5_PAGE_IB_OFFSET
+#define SDMA5_PAGE_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA5_PAGE_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA5_PAGE_IB_BASE_LO
+#define SDMA5_PAGE_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA5_PAGE_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA5_PAGE_IB_BASE_HI
+#define SDMA5_PAGE_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA5_PAGE_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA5_PAGE_IB_SIZE
+#define SDMA5_PAGE_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA5_PAGE_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA5_PAGE_SKIP_CNTL
+#define SDMA5_PAGE_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA5_PAGE_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA5_PAGE_CONTEXT_STATUS
+#define SDMA5_PAGE_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA5_PAGE_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA5_PAGE_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA5_PAGE_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA5_PAGE_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA5_PAGE_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA5_PAGE_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA5_PAGE_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA5_PAGE_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA5_PAGE_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA5_PAGE_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA5_PAGE_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA5_PAGE_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA5_PAGE_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA5_PAGE_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA5_PAGE_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA5_PAGE_DOORBELL
+#define SDMA5_PAGE_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA5_PAGE_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA5_PAGE_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA5_PAGE_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA5_PAGE_STATUS
+#define SDMA5_PAGE_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA5_PAGE_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA5_PAGE_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA5_PAGE_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA5_PAGE_DOORBELL_LOG
+#define SDMA5_PAGE_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA5_PAGE_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA5_PAGE_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA5_PAGE_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA5_PAGE_WATERMARK
+#define SDMA5_PAGE_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA5_PAGE_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA5_PAGE_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA5_PAGE_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA5_PAGE_DOORBELL_OFFSET
+#define SDMA5_PAGE_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA5_PAGE_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA5_PAGE_CSA_ADDR_LO
+#define SDMA5_PAGE_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA5_PAGE_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA5_PAGE_CSA_ADDR_HI
+#define SDMA5_PAGE_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA5_PAGE_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA5_PAGE_IB_SUB_REMAIN
+#define SDMA5_PAGE_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA5_PAGE_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA5_PAGE_PREEMPT
+#define SDMA5_PAGE_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA5_PAGE_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA5_PAGE_DUMMY_REG
+#define SDMA5_PAGE_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA5_PAGE_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA5_PAGE_RB_WPTR_POLL_ADDR_HI
+#define SDMA5_PAGE_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA5_PAGE_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA5_PAGE_RB_WPTR_POLL_ADDR_LO
+#define SDMA5_PAGE_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA5_PAGE_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA5_PAGE_RB_AQL_CNTL
+#define SDMA5_PAGE_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA5_PAGE_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA5_PAGE_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA5_PAGE_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA5_PAGE_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA5_PAGE_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA5_PAGE_MINOR_PTR_UPDATE
+#define SDMA5_PAGE_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA5_PAGE_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA5_PAGE_MIDCMD_DATA0
+#define SDMA5_PAGE_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA5_PAGE_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA5_PAGE_MIDCMD_DATA1
+#define SDMA5_PAGE_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA5_PAGE_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA5_PAGE_MIDCMD_DATA2
+#define SDMA5_PAGE_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA5_PAGE_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA5_PAGE_MIDCMD_DATA3
+#define SDMA5_PAGE_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA5_PAGE_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA5_PAGE_MIDCMD_DATA4
+#define SDMA5_PAGE_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA5_PAGE_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA5_PAGE_MIDCMD_DATA5
+#define SDMA5_PAGE_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA5_PAGE_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA5_PAGE_MIDCMD_DATA6
+#define SDMA5_PAGE_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA5_PAGE_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA5_PAGE_MIDCMD_DATA7
+#define SDMA5_PAGE_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA5_PAGE_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA5_PAGE_MIDCMD_DATA8
+#define SDMA5_PAGE_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA5_PAGE_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA5_PAGE_MIDCMD_CNTL
+#define SDMA5_PAGE_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA5_PAGE_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA5_PAGE_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA5_PAGE_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA5_PAGE_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA5_PAGE_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA5_PAGE_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA5_PAGE_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA5_RLC0_RB_CNTL
+#define SDMA5_RLC0_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA5_RLC0_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA5_RLC0_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA5_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA5_RLC0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA5_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA5_RLC0_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA5_RLC0_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA5_RLC0_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA5_RLC0_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA5_RLC0_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA5_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA5_RLC0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA5_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA5_RLC0_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA5_RLC0_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA5_RLC0_RB_BASE
+#define SDMA5_RLC0_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA5_RLC0_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA5_RLC0_RB_BASE_HI
+#define SDMA5_RLC0_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA5_RLC0_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA5_RLC0_RB_RPTR
+#define SDMA5_RLC0_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA5_RLC0_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA5_RLC0_RB_RPTR_HI
+#define SDMA5_RLC0_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA5_RLC0_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA5_RLC0_RB_WPTR
+#define SDMA5_RLC0_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA5_RLC0_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA5_RLC0_RB_WPTR_HI
+#define SDMA5_RLC0_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA5_RLC0_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA5_RLC0_RB_WPTR_POLL_CNTL
+#define SDMA5_RLC0_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA5_RLC0_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA5_RLC0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA5_RLC0_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA5_RLC0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA5_RLC0_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA5_RLC0_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA5_RLC0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA5_RLC0_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA5_RLC0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA5_RLC0_RB_RPTR_ADDR_HI
+#define SDMA5_RLC0_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA5_RLC0_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA5_RLC0_RB_RPTR_ADDR_LO
+#define SDMA5_RLC0_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA5_RLC0_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA5_RLC0_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA5_RLC0_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA5_RLC0_IB_CNTL
+#define SDMA5_RLC0_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA5_RLC0_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA5_RLC0_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA5_RLC0_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA5_RLC0_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA5_RLC0_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA5_RLC0_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA5_RLC0_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA5_RLC0_IB_RPTR
+#define SDMA5_RLC0_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA5_RLC0_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA5_RLC0_IB_OFFSET
+#define SDMA5_RLC0_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA5_RLC0_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA5_RLC0_IB_BASE_LO
+#define SDMA5_RLC0_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA5_RLC0_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA5_RLC0_IB_BASE_HI
+#define SDMA5_RLC0_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA5_RLC0_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA5_RLC0_IB_SIZE
+#define SDMA5_RLC0_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA5_RLC0_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA5_RLC0_SKIP_CNTL
+#define SDMA5_RLC0_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA5_RLC0_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA5_RLC0_CONTEXT_STATUS
+#define SDMA5_RLC0_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA5_RLC0_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA5_RLC0_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA5_RLC0_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA5_RLC0_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA5_RLC0_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA5_RLC0_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA5_RLC0_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA5_RLC0_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA5_RLC0_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA5_RLC0_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA5_RLC0_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA5_RLC0_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA5_RLC0_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA5_RLC0_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA5_RLC0_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA5_RLC0_DOORBELL
+#define SDMA5_RLC0_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA5_RLC0_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA5_RLC0_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA5_RLC0_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA5_RLC0_STATUS
+#define SDMA5_RLC0_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA5_RLC0_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA5_RLC0_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA5_RLC0_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA5_RLC0_DOORBELL_LOG
+#define SDMA5_RLC0_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA5_RLC0_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA5_RLC0_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA5_RLC0_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA5_RLC0_WATERMARK
+#define SDMA5_RLC0_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA5_RLC0_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA5_RLC0_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA5_RLC0_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA5_RLC0_DOORBELL_OFFSET
+#define SDMA5_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA5_RLC0_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA5_RLC0_CSA_ADDR_LO
+#define SDMA5_RLC0_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA5_RLC0_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA5_RLC0_CSA_ADDR_HI
+#define SDMA5_RLC0_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA5_RLC0_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA5_RLC0_IB_SUB_REMAIN
+#define SDMA5_RLC0_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA5_RLC0_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA5_RLC0_PREEMPT
+#define SDMA5_RLC0_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA5_RLC0_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA5_RLC0_DUMMY_REG
+#define SDMA5_RLC0_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA5_RLC0_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA5_RLC0_RB_WPTR_POLL_ADDR_HI
+#define SDMA5_RLC0_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA5_RLC0_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA5_RLC0_RB_WPTR_POLL_ADDR_LO
+#define SDMA5_RLC0_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA5_RLC0_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA5_RLC0_RB_AQL_CNTL
+#define SDMA5_RLC0_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA5_RLC0_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA5_RLC0_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA5_RLC0_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA5_RLC0_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA5_RLC0_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA5_RLC0_MINOR_PTR_UPDATE
+#define SDMA5_RLC0_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA5_RLC0_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA5_RLC0_MIDCMD_DATA0
+#define SDMA5_RLC0_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA5_RLC0_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA5_RLC0_MIDCMD_DATA1
+#define SDMA5_RLC0_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA5_RLC0_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA5_RLC0_MIDCMD_DATA2
+#define SDMA5_RLC0_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA5_RLC0_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA5_RLC0_MIDCMD_DATA3
+#define SDMA5_RLC0_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA5_RLC0_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA5_RLC0_MIDCMD_DATA4
+#define SDMA5_RLC0_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA5_RLC0_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA5_RLC0_MIDCMD_DATA5
+#define SDMA5_RLC0_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA5_RLC0_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA5_RLC0_MIDCMD_DATA6
+#define SDMA5_RLC0_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA5_RLC0_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA5_RLC0_MIDCMD_DATA7
+#define SDMA5_RLC0_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA5_RLC0_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA5_RLC0_MIDCMD_DATA8
+#define SDMA5_RLC0_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA5_RLC0_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA5_RLC0_MIDCMD_CNTL
+#define SDMA5_RLC0_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA5_RLC0_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA5_RLC0_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA5_RLC0_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA5_RLC0_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA5_RLC0_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA5_RLC0_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA5_RLC0_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA5_RLC1_RB_CNTL
+#define SDMA5_RLC1_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA5_RLC1_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA5_RLC1_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA5_RLC1_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA5_RLC1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA5_RLC1_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA5_RLC1_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA5_RLC1_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA5_RLC1_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA5_RLC1_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA5_RLC1_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA5_RLC1_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA5_RLC1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA5_RLC1_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA5_RLC1_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA5_RLC1_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA5_RLC1_RB_BASE
+#define SDMA5_RLC1_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA5_RLC1_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA5_RLC1_RB_BASE_HI
+#define SDMA5_RLC1_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA5_RLC1_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA5_RLC1_RB_RPTR
+#define SDMA5_RLC1_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA5_RLC1_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA5_RLC1_RB_RPTR_HI
+#define SDMA5_RLC1_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA5_RLC1_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA5_RLC1_RB_WPTR
+#define SDMA5_RLC1_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA5_RLC1_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA5_RLC1_RB_WPTR_HI
+#define SDMA5_RLC1_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA5_RLC1_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA5_RLC1_RB_WPTR_POLL_CNTL
+#define SDMA5_RLC1_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA5_RLC1_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA5_RLC1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA5_RLC1_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA5_RLC1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA5_RLC1_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA5_RLC1_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA5_RLC1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA5_RLC1_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA5_RLC1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA5_RLC1_RB_RPTR_ADDR_HI
+#define SDMA5_RLC1_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA5_RLC1_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA5_RLC1_RB_RPTR_ADDR_LO
+#define SDMA5_RLC1_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA5_RLC1_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA5_RLC1_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA5_RLC1_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA5_RLC1_IB_CNTL
+#define SDMA5_RLC1_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA5_RLC1_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA5_RLC1_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA5_RLC1_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA5_RLC1_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA5_RLC1_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA5_RLC1_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA5_RLC1_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA5_RLC1_IB_RPTR
+#define SDMA5_RLC1_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA5_RLC1_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA5_RLC1_IB_OFFSET
+#define SDMA5_RLC1_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA5_RLC1_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA5_RLC1_IB_BASE_LO
+#define SDMA5_RLC1_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA5_RLC1_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA5_RLC1_IB_BASE_HI
+#define SDMA5_RLC1_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA5_RLC1_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA5_RLC1_IB_SIZE
+#define SDMA5_RLC1_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA5_RLC1_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA5_RLC1_SKIP_CNTL
+#define SDMA5_RLC1_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA5_RLC1_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA5_RLC1_CONTEXT_STATUS
+#define SDMA5_RLC1_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA5_RLC1_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA5_RLC1_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA5_RLC1_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA5_RLC1_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA5_RLC1_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA5_RLC1_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA5_RLC1_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA5_RLC1_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA5_RLC1_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA5_RLC1_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA5_RLC1_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA5_RLC1_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA5_RLC1_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA5_RLC1_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA5_RLC1_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA5_RLC1_DOORBELL
+#define SDMA5_RLC1_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA5_RLC1_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA5_RLC1_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA5_RLC1_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA5_RLC1_STATUS
+#define SDMA5_RLC1_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA5_RLC1_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA5_RLC1_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA5_RLC1_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA5_RLC1_DOORBELL_LOG
+#define SDMA5_RLC1_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA5_RLC1_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA5_RLC1_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA5_RLC1_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA5_RLC1_WATERMARK
+#define SDMA5_RLC1_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA5_RLC1_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA5_RLC1_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA5_RLC1_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA5_RLC1_DOORBELL_OFFSET
+#define SDMA5_RLC1_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA5_RLC1_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA5_RLC1_CSA_ADDR_LO
+#define SDMA5_RLC1_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA5_RLC1_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA5_RLC1_CSA_ADDR_HI
+#define SDMA5_RLC1_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA5_RLC1_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA5_RLC1_IB_SUB_REMAIN
+#define SDMA5_RLC1_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA5_RLC1_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA5_RLC1_PREEMPT
+#define SDMA5_RLC1_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA5_RLC1_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA5_RLC1_DUMMY_REG
+#define SDMA5_RLC1_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA5_RLC1_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA5_RLC1_RB_WPTR_POLL_ADDR_HI
+#define SDMA5_RLC1_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA5_RLC1_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA5_RLC1_RB_WPTR_POLL_ADDR_LO
+#define SDMA5_RLC1_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA5_RLC1_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA5_RLC1_RB_AQL_CNTL
+#define SDMA5_RLC1_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA5_RLC1_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA5_RLC1_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA5_RLC1_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA5_RLC1_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA5_RLC1_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA5_RLC1_MINOR_PTR_UPDATE
+#define SDMA5_RLC1_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA5_RLC1_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA5_RLC1_MIDCMD_DATA0
+#define SDMA5_RLC1_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA5_RLC1_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA5_RLC1_MIDCMD_DATA1
+#define SDMA5_RLC1_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA5_RLC1_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA5_RLC1_MIDCMD_DATA2
+#define SDMA5_RLC1_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA5_RLC1_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA5_RLC1_MIDCMD_DATA3
+#define SDMA5_RLC1_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA5_RLC1_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA5_RLC1_MIDCMD_DATA4
+#define SDMA5_RLC1_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA5_RLC1_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA5_RLC1_MIDCMD_DATA5
+#define SDMA5_RLC1_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA5_RLC1_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA5_RLC1_MIDCMD_DATA6
+#define SDMA5_RLC1_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA5_RLC1_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA5_RLC1_MIDCMD_DATA7
+#define SDMA5_RLC1_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA5_RLC1_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA5_RLC1_MIDCMD_DATA8
+#define SDMA5_RLC1_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA5_RLC1_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA5_RLC1_MIDCMD_CNTL
+#define SDMA5_RLC1_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA5_RLC1_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA5_RLC1_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA5_RLC1_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA5_RLC1_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA5_RLC1_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA5_RLC1_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA5_RLC1_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA5_RLC2_RB_CNTL
+#define SDMA5_RLC2_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA5_RLC2_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA5_RLC2_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA5_RLC2_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA5_RLC2_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA5_RLC2_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA5_RLC2_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA5_RLC2_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA5_RLC2_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA5_RLC2_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA5_RLC2_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA5_RLC2_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA5_RLC2_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA5_RLC2_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA5_RLC2_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA5_RLC2_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA5_RLC2_RB_BASE
+#define SDMA5_RLC2_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA5_RLC2_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA5_RLC2_RB_BASE_HI
+#define SDMA5_RLC2_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA5_RLC2_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA5_RLC2_RB_RPTR
+#define SDMA5_RLC2_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA5_RLC2_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA5_RLC2_RB_RPTR_HI
+#define SDMA5_RLC2_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA5_RLC2_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA5_RLC2_RB_WPTR
+#define SDMA5_RLC2_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA5_RLC2_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA5_RLC2_RB_WPTR_HI
+#define SDMA5_RLC2_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA5_RLC2_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA5_RLC2_RB_WPTR_POLL_CNTL
+#define SDMA5_RLC2_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA5_RLC2_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA5_RLC2_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA5_RLC2_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA5_RLC2_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA5_RLC2_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA5_RLC2_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA5_RLC2_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA5_RLC2_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA5_RLC2_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA5_RLC2_RB_RPTR_ADDR_HI
+#define SDMA5_RLC2_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA5_RLC2_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA5_RLC2_RB_RPTR_ADDR_LO
+#define SDMA5_RLC2_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA5_RLC2_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA5_RLC2_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA5_RLC2_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA5_RLC2_IB_CNTL
+#define SDMA5_RLC2_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA5_RLC2_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA5_RLC2_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA5_RLC2_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA5_RLC2_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA5_RLC2_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA5_RLC2_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA5_RLC2_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA5_RLC2_IB_RPTR
+#define SDMA5_RLC2_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA5_RLC2_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA5_RLC2_IB_OFFSET
+#define SDMA5_RLC2_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA5_RLC2_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA5_RLC2_IB_BASE_LO
+#define SDMA5_RLC2_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA5_RLC2_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA5_RLC2_IB_BASE_HI
+#define SDMA5_RLC2_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA5_RLC2_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA5_RLC2_IB_SIZE
+#define SDMA5_RLC2_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA5_RLC2_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA5_RLC2_SKIP_CNTL
+#define SDMA5_RLC2_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA5_RLC2_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA5_RLC2_CONTEXT_STATUS
+#define SDMA5_RLC2_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA5_RLC2_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA5_RLC2_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA5_RLC2_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA5_RLC2_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA5_RLC2_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA5_RLC2_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA5_RLC2_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA5_RLC2_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA5_RLC2_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA5_RLC2_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA5_RLC2_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA5_RLC2_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA5_RLC2_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA5_RLC2_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA5_RLC2_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA5_RLC2_DOORBELL
+#define SDMA5_RLC2_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA5_RLC2_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA5_RLC2_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA5_RLC2_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA5_RLC2_STATUS
+#define SDMA5_RLC2_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA5_RLC2_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA5_RLC2_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA5_RLC2_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA5_RLC2_DOORBELL_LOG
+#define SDMA5_RLC2_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA5_RLC2_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA5_RLC2_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA5_RLC2_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA5_RLC2_WATERMARK
+#define SDMA5_RLC2_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA5_RLC2_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA5_RLC2_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA5_RLC2_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA5_RLC2_DOORBELL_OFFSET
+#define SDMA5_RLC2_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA5_RLC2_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA5_RLC2_CSA_ADDR_LO
+#define SDMA5_RLC2_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA5_RLC2_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA5_RLC2_CSA_ADDR_HI
+#define SDMA5_RLC2_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA5_RLC2_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA5_RLC2_IB_SUB_REMAIN
+#define SDMA5_RLC2_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA5_RLC2_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA5_RLC2_PREEMPT
+#define SDMA5_RLC2_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA5_RLC2_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA5_RLC2_DUMMY_REG
+#define SDMA5_RLC2_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA5_RLC2_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA5_RLC2_RB_WPTR_POLL_ADDR_HI
+#define SDMA5_RLC2_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA5_RLC2_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA5_RLC2_RB_WPTR_POLL_ADDR_LO
+#define SDMA5_RLC2_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA5_RLC2_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA5_RLC2_RB_AQL_CNTL
+#define SDMA5_RLC2_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA5_RLC2_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA5_RLC2_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA5_RLC2_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA5_RLC2_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA5_RLC2_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA5_RLC2_MINOR_PTR_UPDATE
+#define SDMA5_RLC2_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA5_RLC2_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA5_RLC2_MIDCMD_DATA0
+#define SDMA5_RLC2_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA5_RLC2_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA5_RLC2_MIDCMD_DATA1
+#define SDMA5_RLC2_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA5_RLC2_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA5_RLC2_MIDCMD_DATA2
+#define SDMA5_RLC2_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA5_RLC2_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA5_RLC2_MIDCMD_DATA3
+#define SDMA5_RLC2_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA5_RLC2_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA5_RLC2_MIDCMD_DATA4
+#define SDMA5_RLC2_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA5_RLC2_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA5_RLC2_MIDCMD_DATA5
+#define SDMA5_RLC2_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA5_RLC2_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA5_RLC2_MIDCMD_DATA6
+#define SDMA5_RLC2_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA5_RLC2_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA5_RLC2_MIDCMD_DATA7
+#define SDMA5_RLC2_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA5_RLC2_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA5_RLC2_MIDCMD_DATA8
+#define SDMA5_RLC2_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA5_RLC2_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA5_RLC2_MIDCMD_CNTL
+#define SDMA5_RLC2_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA5_RLC2_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA5_RLC2_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA5_RLC2_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA5_RLC2_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA5_RLC2_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA5_RLC2_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA5_RLC2_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA5_RLC3_RB_CNTL
+#define SDMA5_RLC3_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA5_RLC3_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA5_RLC3_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA5_RLC3_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA5_RLC3_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA5_RLC3_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA5_RLC3_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA5_RLC3_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA5_RLC3_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA5_RLC3_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA5_RLC3_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA5_RLC3_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA5_RLC3_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA5_RLC3_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA5_RLC3_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA5_RLC3_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA5_RLC3_RB_BASE
+#define SDMA5_RLC3_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA5_RLC3_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA5_RLC3_RB_BASE_HI
+#define SDMA5_RLC3_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA5_RLC3_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA5_RLC3_RB_RPTR
+#define SDMA5_RLC3_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA5_RLC3_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA5_RLC3_RB_RPTR_HI
+#define SDMA5_RLC3_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA5_RLC3_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA5_RLC3_RB_WPTR
+#define SDMA5_RLC3_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA5_RLC3_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA5_RLC3_RB_WPTR_HI
+#define SDMA5_RLC3_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA5_RLC3_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA5_RLC3_RB_WPTR_POLL_CNTL
+#define SDMA5_RLC3_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA5_RLC3_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA5_RLC3_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA5_RLC3_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA5_RLC3_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA5_RLC3_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA5_RLC3_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA5_RLC3_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA5_RLC3_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA5_RLC3_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA5_RLC3_RB_RPTR_ADDR_HI
+#define SDMA5_RLC3_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA5_RLC3_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA5_RLC3_RB_RPTR_ADDR_LO
+#define SDMA5_RLC3_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA5_RLC3_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA5_RLC3_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA5_RLC3_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA5_RLC3_IB_CNTL
+#define SDMA5_RLC3_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA5_RLC3_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA5_RLC3_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA5_RLC3_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA5_RLC3_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA5_RLC3_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA5_RLC3_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA5_RLC3_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA5_RLC3_IB_RPTR
+#define SDMA5_RLC3_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA5_RLC3_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA5_RLC3_IB_OFFSET
+#define SDMA5_RLC3_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA5_RLC3_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA5_RLC3_IB_BASE_LO
+#define SDMA5_RLC3_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA5_RLC3_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA5_RLC3_IB_BASE_HI
+#define SDMA5_RLC3_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA5_RLC3_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA5_RLC3_IB_SIZE
+#define SDMA5_RLC3_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA5_RLC3_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA5_RLC3_SKIP_CNTL
+#define SDMA5_RLC3_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA5_RLC3_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA5_RLC3_CONTEXT_STATUS
+#define SDMA5_RLC3_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA5_RLC3_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA5_RLC3_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA5_RLC3_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA5_RLC3_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA5_RLC3_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA5_RLC3_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA5_RLC3_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA5_RLC3_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA5_RLC3_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA5_RLC3_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA5_RLC3_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA5_RLC3_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA5_RLC3_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA5_RLC3_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA5_RLC3_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA5_RLC3_DOORBELL
+#define SDMA5_RLC3_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA5_RLC3_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA5_RLC3_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA5_RLC3_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA5_RLC3_STATUS
+#define SDMA5_RLC3_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA5_RLC3_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA5_RLC3_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA5_RLC3_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA5_RLC3_DOORBELL_LOG
+#define SDMA5_RLC3_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA5_RLC3_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA5_RLC3_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA5_RLC3_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA5_RLC3_WATERMARK
+#define SDMA5_RLC3_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA5_RLC3_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA5_RLC3_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA5_RLC3_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA5_RLC3_DOORBELL_OFFSET
+#define SDMA5_RLC3_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA5_RLC3_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA5_RLC3_CSA_ADDR_LO
+#define SDMA5_RLC3_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA5_RLC3_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA5_RLC3_CSA_ADDR_HI
+#define SDMA5_RLC3_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA5_RLC3_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA5_RLC3_IB_SUB_REMAIN
+#define SDMA5_RLC3_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA5_RLC3_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA5_RLC3_PREEMPT
+#define SDMA5_RLC3_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA5_RLC3_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA5_RLC3_DUMMY_REG
+#define SDMA5_RLC3_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA5_RLC3_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA5_RLC3_RB_WPTR_POLL_ADDR_HI
+#define SDMA5_RLC3_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA5_RLC3_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA5_RLC3_RB_WPTR_POLL_ADDR_LO
+#define SDMA5_RLC3_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA5_RLC3_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA5_RLC3_RB_AQL_CNTL
+#define SDMA5_RLC3_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA5_RLC3_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA5_RLC3_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA5_RLC3_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA5_RLC3_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA5_RLC3_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA5_RLC3_MINOR_PTR_UPDATE
+#define SDMA5_RLC3_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA5_RLC3_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA5_RLC3_MIDCMD_DATA0
+#define SDMA5_RLC3_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA5_RLC3_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA5_RLC3_MIDCMD_DATA1
+#define SDMA5_RLC3_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA5_RLC3_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA5_RLC3_MIDCMD_DATA2
+#define SDMA5_RLC3_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA5_RLC3_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA5_RLC3_MIDCMD_DATA3
+#define SDMA5_RLC3_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA5_RLC3_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA5_RLC3_MIDCMD_DATA4
+#define SDMA5_RLC3_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA5_RLC3_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA5_RLC3_MIDCMD_DATA5
+#define SDMA5_RLC3_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA5_RLC3_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA5_RLC3_MIDCMD_DATA6
+#define SDMA5_RLC3_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA5_RLC3_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA5_RLC3_MIDCMD_DATA7
+#define SDMA5_RLC3_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA5_RLC3_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA5_RLC3_MIDCMD_DATA8
+#define SDMA5_RLC3_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA5_RLC3_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA5_RLC3_MIDCMD_CNTL
+#define SDMA5_RLC3_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA5_RLC3_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA5_RLC3_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA5_RLC3_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA5_RLC3_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA5_RLC3_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA5_RLC3_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA5_RLC3_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA5_RLC4_RB_CNTL
+#define SDMA5_RLC4_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA5_RLC4_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA5_RLC4_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA5_RLC4_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA5_RLC4_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA5_RLC4_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA5_RLC4_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA5_RLC4_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA5_RLC4_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA5_RLC4_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA5_RLC4_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA5_RLC4_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA5_RLC4_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA5_RLC4_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA5_RLC4_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA5_RLC4_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA5_RLC4_RB_BASE
+#define SDMA5_RLC4_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA5_RLC4_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA5_RLC4_RB_BASE_HI
+#define SDMA5_RLC4_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA5_RLC4_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA5_RLC4_RB_RPTR
+#define SDMA5_RLC4_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA5_RLC4_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA5_RLC4_RB_RPTR_HI
+#define SDMA5_RLC4_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA5_RLC4_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA5_RLC4_RB_WPTR
+#define SDMA5_RLC4_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA5_RLC4_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA5_RLC4_RB_WPTR_HI
+#define SDMA5_RLC4_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA5_RLC4_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA5_RLC4_RB_WPTR_POLL_CNTL
+#define SDMA5_RLC4_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA5_RLC4_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA5_RLC4_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA5_RLC4_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA5_RLC4_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA5_RLC4_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA5_RLC4_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA5_RLC4_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA5_RLC4_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA5_RLC4_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA5_RLC4_RB_RPTR_ADDR_HI
+#define SDMA5_RLC4_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA5_RLC4_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA5_RLC4_RB_RPTR_ADDR_LO
+#define SDMA5_RLC4_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA5_RLC4_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA5_RLC4_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA5_RLC4_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA5_RLC4_IB_CNTL
+#define SDMA5_RLC4_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA5_RLC4_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA5_RLC4_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA5_RLC4_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA5_RLC4_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA5_RLC4_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA5_RLC4_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA5_RLC4_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA5_RLC4_IB_RPTR
+#define SDMA5_RLC4_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA5_RLC4_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA5_RLC4_IB_OFFSET
+#define SDMA5_RLC4_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA5_RLC4_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA5_RLC4_IB_BASE_LO
+#define SDMA5_RLC4_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA5_RLC4_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA5_RLC4_IB_BASE_HI
+#define SDMA5_RLC4_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA5_RLC4_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA5_RLC4_IB_SIZE
+#define SDMA5_RLC4_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA5_RLC4_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA5_RLC4_SKIP_CNTL
+#define SDMA5_RLC4_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA5_RLC4_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA5_RLC4_CONTEXT_STATUS
+#define SDMA5_RLC4_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA5_RLC4_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA5_RLC4_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA5_RLC4_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA5_RLC4_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA5_RLC4_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA5_RLC4_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA5_RLC4_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA5_RLC4_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA5_RLC4_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA5_RLC4_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA5_RLC4_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA5_RLC4_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA5_RLC4_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA5_RLC4_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA5_RLC4_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA5_RLC4_DOORBELL
+#define SDMA5_RLC4_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA5_RLC4_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA5_RLC4_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA5_RLC4_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA5_RLC4_STATUS
+#define SDMA5_RLC4_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA5_RLC4_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA5_RLC4_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA5_RLC4_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA5_RLC4_DOORBELL_LOG
+#define SDMA5_RLC4_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA5_RLC4_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA5_RLC4_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA5_RLC4_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA5_RLC4_WATERMARK
+#define SDMA5_RLC4_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA5_RLC4_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA5_RLC4_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA5_RLC4_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA5_RLC4_DOORBELL_OFFSET
+#define SDMA5_RLC4_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA5_RLC4_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA5_RLC4_CSA_ADDR_LO
+#define SDMA5_RLC4_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA5_RLC4_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA5_RLC4_CSA_ADDR_HI
+#define SDMA5_RLC4_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA5_RLC4_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA5_RLC4_IB_SUB_REMAIN
+#define SDMA5_RLC4_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA5_RLC4_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA5_RLC4_PREEMPT
+#define SDMA5_RLC4_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA5_RLC4_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA5_RLC4_DUMMY_REG
+#define SDMA5_RLC4_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA5_RLC4_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA5_RLC4_RB_WPTR_POLL_ADDR_HI
+#define SDMA5_RLC4_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA5_RLC4_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA5_RLC4_RB_WPTR_POLL_ADDR_LO
+#define SDMA5_RLC4_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA5_RLC4_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA5_RLC4_RB_AQL_CNTL
+#define SDMA5_RLC4_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA5_RLC4_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA5_RLC4_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA5_RLC4_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA5_RLC4_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA5_RLC4_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA5_RLC4_MINOR_PTR_UPDATE
+#define SDMA5_RLC4_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA5_RLC4_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA5_RLC4_MIDCMD_DATA0
+#define SDMA5_RLC4_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA5_RLC4_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA5_RLC4_MIDCMD_DATA1
+#define SDMA5_RLC4_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA5_RLC4_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA5_RLC4_MIDCMD_DATA2
+#define SDMA5_RLC4_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA5_RLC4_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA5_RLC4_MIDCMD_DATA3
+#define SDMA5_RLC4_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA5_RLC4_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA5_RLC4_MIDCMD_DATA4
+#define SDMA5_RLC4_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA5_RLC4_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA5_RLC4_MIDCMD_DATA5
+#define SDMA5_RLC4_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA5_RLC4_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA5_RLC4_MIDCMD_DATA6
+#define SDMA5_RLC4_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA5_RLC4_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA5_RLC4_MIDCMD_DATA7
+#define SDMA5_RLC4_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA5_RLC4_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA5_RLC4_MIDCMD_DATA8
+#define SDMA5_RLC4_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA5_RLC4_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA5_RLC4_MIDCMD_CNTL
+#define SDMA5_RLC4_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA5_RLC4_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA5_RLC4_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA5_RLC4_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA5_RLC4_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA5_RLC4_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA5_RLC4_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA5_RLC4_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA5_RLC5_RB_CNTL
+#define SDMA5_RLC5_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA5_RLC5_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA5_RLC5_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA5_RLC5_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA5_RLC5_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA5_RLC5_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA5_RLC5_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA5_RLC5_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA5_RLC5_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA5_RLC5_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA5_RLC5_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA5_RLC5_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA5_RLC5_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA5_RLC5_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA5_RLC5_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA5_RLC5_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA5_RLC5_RB_BASE
+#define SDMA5_RLC5_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA5_RLC5_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA5_RLC5_RB_BASE_HI
+#define SDMA5_RLC5_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA5_RLC5_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA5_RLC5_RB_RPTR
+#define SDMA5_RLC5_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA5_RLC5_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA5_RLC5_RB_RPTR_HI
+#define SDMA5_RLC5_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA5_RLC5_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA5_RLC5_RB_WPTR
+#define SDMA5_RLC5_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA5_RLC5_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA5_RLC5_RB_WPTR_HI
+#define SDMA5_RLC5_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA5_RLC5_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA5_RLC5_RB_WPTR_POLL_CNTL
+#define SDMA5_RLC5_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA5_RLC5_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA5_RLC5_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA5_RLC5_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA5_RLC5_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA5_RLC5_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA5_RLC5_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA5_RLC5_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA5_RLC5_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA5_RLC5_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA5_RLC5_RB_RPTR_ADDR_HI
+#define SDMA5_RLC5_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA5_RLC5_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA5_RLC5_RB_RPTR_ADDR_LO
+#define SDMA5_RLC5_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA5_RLC5_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA5_RLC5_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA5_RLC5_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA5_RLC5_IB_CNTL
+#define SDMA5_RLC5_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA5_RLC5_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA5_RLC5_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA5_RLC5_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA5_RLC5_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA5_RLC5_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA5_RLC5_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA5_RLC5_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA5_RLC5_IB_RPTR
+#define SDMA5_RLC5_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA5_RLC5_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA5_RLC5_IB_OFFSET
+#define SDMA5_RLC5_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA5_RLC5_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA5_RLC5_IB_BASE_LO
+#define SDMA5_RLC5_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA5_RLC5_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA5_RLC5_IB_BASE_HI
+#define SDMA5_RLC5_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA5_RLC5_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA5_RLC5_IB_SIZE
+#define SDMA5_RLC5_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA5_RLC5_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA5_RLC5_SKIP_CNTL
+#define SDMA5_RLC5_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA5_RLC5_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA5_RLC5_CONTEXT_STATUS
+#define SDMA5_RLC5_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA5_RLC5_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA5_RLC5_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA5_RLC5_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA5_RLC5_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA5_RLC5_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA5_RLC5_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA5_RLC5_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA5_RLC5_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA5_RLC5_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA5_RLC5_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA5_RLC5_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA5_RLC5_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA5_RLC5_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA5_RLC5_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA5_RLC5_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA5_RLC5_DOORBELL
+#define SDMA5_RLC5_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA5_RLC5_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA5_RLC5_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA5_RLC5_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA5_RLC5_STATUS
+#define SDMA5_RLC5_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA5_RLC5_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA5_RLC5_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA5_RLC5_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA5_RLC5_DOORBELL_LOG
+#define SDMA5_RLC5_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA5_RLC5_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA5_RLC5_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA5_RLC5_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA5_RLC5_WATERMARK
+#define SDMA5_RLC5_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA5_RLC5_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA5_RLC5_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA5_RLC5_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA5_RLC5_DOORBELL_OFFSET
+#define SDMA5_RLC5_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA5_RLC5_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA5_RLC5_CSA_ADDR_LO
+#define SDMA5_RLC5_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA5_RLC5_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA5_RLC5_CSA_ADDR_HI
+#define SDMA5_RLC5_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA5_RLC5_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA5_RLC5_IB_SUB_REMAIN
+#define SDMA5_RLC5_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA5_RLC5_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA5_RLC5_PREEMPT
+#define SDMA5_RLC5_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA5_RLC5_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA5_RLC5_DUMMY_REG
+#define SDMA5_RLC5_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA5_RLC5_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA5_RLC5_RB_WPTR_POLL_ADDR_HI
+#define SDMA5_RLC5_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA5_RLC5_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA5_RLC5_RB_WPTR_POLL_ADDR_LO
+#define SDMA5_RLC5_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA5_RLC5_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA5_RLC5_RB_AQL_CNTL
+#define SDMA5_RLC5_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA5_RLC5_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA5_RLC5_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA5_RLC5_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA5_RLC5_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA5_RLC5_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA5_RLC5_MINOR_PTR_UPDATE
+#define SDMA5_RLC5_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA5_RLC5_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA5_RLC5_MIDCMD_DATA0
+#define SDMA5_RLC5_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA5_RLC5_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA5_RLC5_MIDCMD_DATA1
+#define SDMA5_RLC5_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA5_RLC5_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA5_RLC5_MIDCMD_DATA2
+#define SDMA5_RLC5_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA5_RLC5_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA5_RLC5_MIDCMD_DATA3
+#define SDMA5_RLC5_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA5_RLC5_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA5_RLC5_MIDCMD_DATA4
+#define SDMA5_RLC5_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA5_RLC5_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA5_RLC5_MIDCMD_DATA5
+#define SDMA5_RLC5_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA5_RLC5_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA5_RLC5_MIDCMD_DATA6
+#define SDMA5_RLC5_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA5_RLC5_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA5_RLC5_MIDCMD_DATA7
+#define SDMA5_RLC5_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA5_RLC5_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA5_RLC5_MIDCMD_DATA8
+#define SDMA5_RLC5_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA5_RLC5_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA5_RLC5_MIDCMD_CNTL
+#define SDMA5_RLC5_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA5_RLC5_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA5_RLC5_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA5_RLC5_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA5_RLC5_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA5_RLC5_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA5_RLC5_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA5_RLC5_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA5_RLC6_RB_CNTL
+#define SDMA5_RLC6_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA5_RLC6_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA5_RLC6_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA5_RLC6_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA5_RLC6_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA5_RLC6_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA5_RLC6_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA5_RLC6_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA5_RLC6_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA5_RLC6_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA5_RLC6_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA5_RLC6_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA5_RLC6_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA5_RLC6_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA5_RLC6_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA5_RLC6_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA5_RLC6_RB_BASE
+#define SDMA5_RLC6_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA5_RLC6_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA5_RLC6_RB_BASE_HI
+#define SDMA5_RLC6_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA5_RLC6_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA5_RLC6_RB_RPTR
+#define SDMA5_RLC6_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA5_RLC6_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA5_RLC6_RB_RPTR_HI
+#define SDMA5_RLC6_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA5_RLC6_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA5_RLC6_RB_WPTR
+#define SDMA5_RLC6_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA5_RLC6_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA5_RLC6_RB_WPTR_HI
+#define SDMA5_RLC6_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA5_RLC6_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA5_RLC6_RB_WPTR_POLL_CNTL
+#define SDMA5_RLC6_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA5_RLC6_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA5_RLC6_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA5_RLC6_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA5_RLC6_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA5_RLC6_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA5_RLC6_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA5_RLC6_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA5_RLC6_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA5_RLC6_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA5_RLC6_RB_RPTR_ADDR_HI
+#define SDMA5_RLC6_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA5_RLC6_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA5_RLC6_RB_RPTR_ADDR_LO
+#define SDMA5_RLC6_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA5_RLC6_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA5_RLC6_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA5_RLC6_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA5_RLC6_IB_CNTL
+#define SDMA5_RLC6_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA5_RLC6_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA5_RLC6_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA5_RLC6_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA5_RLC6_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA5_RLC6_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA5_RLC6_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA5_RLC6_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA5_RLC6_IB_RPTR
+#define SDMA5_RLC6_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA5_RLC6_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA5_RLC6_IB_OFFSET
+#define SDMA5_RLC6_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA5_RLC6_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA5_RLC6_IB_BASE_LO
+#define SDMA5_RLC6_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA5_RLC6_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA5_RLC6_IB_BASE_HI
+#define SDMA5_RLC6_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA5_RLC6_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA5_RLC6_IB_SIZE
+#define SDMA5_RLC6_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA5_RLC6_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA5_RLC6_SKIP_CNTL
+#define SDMA5_RLC6_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA5_RLC6_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA5_RLC6_CONTEXT_STATUS
+#define SDMA5_RLC6_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA5_RLC6_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA5_RLC6_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA5_RLC6_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA5_RLC6_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA5_RLC6_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA5_RLC6_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA5_RLC6_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA5_RLC6_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA5_RLC6_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA5_RLC6_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA5_RLC6_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA5_RLC6_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA5_RLC6_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA5_RLC6_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA5_RLC6_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA5_RLC6_DOORBELL
+#define SDMA5_RLC6_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA5_RLC6_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA5_RLC6_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA5_RLC6_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA5_RLC6_STATUS
+#define SDMA5_RLC6_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA5_RLC6_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA5_RLC6_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA5_RLC6_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA5_RLC6_DOORBELL_LOG
+#define SDMA5_RLC6_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA5_RLC6_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA5_RLC6_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA5_RLC6_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA5_RLC6_WATERMARK
+#define SDMA5_RLC6_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA5_RLC6_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA5_RLC6_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA5_RLC6_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA5_RLC6_DOORBELL_OFFSET
+#define SDMA5_RLC6_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA5_RLC6_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA5_RLC6_CSA_ADDR_LO
+#define SDMA5_RLC6_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA5_RLC6_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA5_RLC6_CSA_ADDR_HI
+#define SDMA5_RLC6_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA5_RLC6_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA5_RLC6_IB_SUB_REMAIN
+#define SDMA5_RLC6_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA5_RLC6_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA5_RLC6_PREEMPT
+#define SDMA5_RLC6_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA5_RLC6_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA5_RLC6_DUMMY_REG
+#define SDMA5_RLC6_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA5_RLC6_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA5_RLC6_RB_WPTR_POLL_ADDR_HI
+#define SDMA5_RLC6_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA5_RLC6_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA5_RLC6_RB_WPTR_POLL_ADDR_LO
+#define SDMA5_RLC6_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA5_RLC6_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA5_RLC6_RB_AQL_CNTL
+#define SDMA5_RLC6_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA5_RLC6_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA5_RLC6_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA5_RLC6_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA5_RLC6_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA5_RLC6_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA5_RLC6_MINOR_PTR_UPDATE
+#define SDMA5_RLC6_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA5_RLC6_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA5_RLC6_MIDCMD_DATA0
+#define SDMA5_RLC6_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA5_RLC6_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA5_RLC6_MIDCMD_DATA1
+#define SDMA5_RLC6_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA5_RLC6_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA5_RLC6_MIDCMD_DATA2
+#define SDMA5_RLC6_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA5_RLC6_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA5_RLC6_MIDCMD_DATA3
+#define SDMA5_RLC6_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA5_RLC6_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA5_RLC6_MIDCMD_DATA4
+#define SDMA5_RLC6_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA5_RLC6_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA5_RLC6_MIDCMD_DATA5
+#define SDMA5_RLC6_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA5_RLC6_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA5_RLC6_MIDCMD_DATA6
+#define SDMA5_RLC6_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA5_RLC6_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA5_RLC6_MIDCMD_DATA7
+#define SDMA5_RLC6_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA5_RLC6_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA5_RLC6_MIDCMD_DATA8
+#define SDMA5_RLC6_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA5_RLC6_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA5_RLC6_MIDCMD_CNTL
+#define SDMA5_RLC6_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA5_RLC6_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA5_RLC6_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA5_RLC6_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA5_RLC6_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA5_RLC6_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA5_RLC6_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA5_RLC6_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA5_RLC7_RB_CNTL
+#define SDMA5_RLC7_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA5_RLC7_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA5_RLC7_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA5_RLC7_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA5_RLC7_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA5_RLC7_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA5_RLC7_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA5_RLC7_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA5_RLC7_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA5_RLC7_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA5_RLC7_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA5_RLC7_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA5_RLC7_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA5_RLC7_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA5_RLC7_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA5_RLC7_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA5_RLC7_RB_BASE
+#define SDMA5_RLC7_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA5_RLC7_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA5_RLC7_RB_BASE_HI
+#define SDMA5_RLC7_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA5_RLC7_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA5_RLC7_RB_RPTR
+#define SDMA5_RLC7_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA5_RLC7_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA5_RLC7_RB_RPTR_HI
+#define SDMA5_RLC7_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA5_RLC7_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA5_RLC7_RB_WPTR
+#define SDMA5_RLC7_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA5_RLC7_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA5_RLC7_RB_WPTR_HI
+#define SDMA5_RLC7_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA5_RLC7_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA5_RLC7_RB_WPTR_POLL_CNTL
+#define SDMA5_RLC7_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA5_RLC7_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA5_RLC7_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA5_RLC7_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA5_RLC7_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA5_RLC7_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA5_RLC7_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA5_RLC7_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA5_RLC7_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA5_RLC7_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA5_RLC7_RB_RPTR_ADDR_HI
+#define SDMA5_RLC7_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA5_RLC7_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA5_RLC7_RB_RPTR_ADDR_LO
+#define SDMA5_RLC7_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA5_RLC7_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA5_RLC7_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA5_RLC7_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA5_RLC7_IB_CNTL
+#define SDMA5_RLC7_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA5_RLC7_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA5_RLC7_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA5_RLC7_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA5_RLC7_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA5_RLC7_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA5_RLC7_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA5_RLC7_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA5_RLC7_IB_RPTR
+#define SDMA5_RLC7_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA5_RLC7_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA5_RLC7_IB_OFFSET
+#define SDMA5_RLC7_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA5_RLC7_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA5_RLC7_IB_BASE_LO
+#define SDMA5_RLC7_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA5_RLC7_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA5_RLC7_IB_BASE_HI
+#define SDMA5_RLC7_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA5_RLC7_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA5_RLC7_IB_SIZE
+#define SDMA5_RLC7_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA5_RLC7_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA5_RLC7_SKIP_CNTL
+#define SDMA5_RLC7_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA5_RLC7_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA5_RLC7_CONTEXT_STATUS
+#define SDMA5_RLC7_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA5_RLC7_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA5_RLC7_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA5_RLC7_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA5_RLC7_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA5_RLC7_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA5_RLC7_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA5_RLC7_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA5_RLC7_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA5_RLC7_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA5_RLC7_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA5_RLC7_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA5_RLC7_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA5_RLC7_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA5_RLC7_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA5_RLC7_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA5_RLC7_DOORBELL
+#define SDMA5_RLC7_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA5_RLC7_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA5_RLC7_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA5_RLC7_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA5_RLC7_STATUS
+#define SDMA5_RLC7_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA5_RLC7_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA5_RLC7_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA5_RLC7_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA5_RLC7_DOORBELL_LOG
+#define SDMA5_RLC7_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA5_RLC7_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA5_RLC7_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA5_RLC7_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA5_RLC7_WATERMARK
+#define SDMA5_RLC7_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA5_RLC7_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA5_RLC7_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA5_RLC7_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA5_RLC7_DOORBELL_OFFSET
+#define SDMA5_RLC7_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA5_RLC7_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA5_RLC7_CSA_ADDR_LO
+#define SDMA5_RLC7_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA5_RLC7_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA5_RLC7_CSA_ADDR_HI
+#define SDMA5_RLC7_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA5_RLC7_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA5_RLC7_IB_SUB_REMAIN
+#define SDMA5_RLC7_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA5_RLC7_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA5_RLC7_PREEMPT
+#define SDMA5_RLC7_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA5_RLC7_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA5_RLC7_DUMMY_REG
+#define SDMA5_RLC7_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA5_RLC7_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA5_RLC7_RB_WPTR_POLL_ADDR_HI
+#define SDMA5_RLC7_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA5_RLC7_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA5_RLC7_RB_WPTR_POLL_ADDR_LO
+#define SDMA5_RLC7_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA5_RLC7_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA5_RLC7_RB_AQL_CNTL
+#define SDMA5_RLC7_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA5_RLC7_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA5_RLC7_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA5_RLC7_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA5_RLC7_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA5_RLC7_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA5_RLC7_MINOR_PTR_UPDATE
+#define SDMA5_RLC7_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA5_RLC7_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA5_RLC7_MIDCMD_DATA0
+#define SDMA5_RLC7_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA5_RLC7_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA5_RLC7_MIDCMD_DATA1
+#define SDMA5_RLC7_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA5_RLC7_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA5_RLC7_MIDCMD_DATA2
+#define SDMA5_RLC7_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA5_RLC7_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA5_RLC7_MIDCMD_DATA3
+#define SDMA5_RLC7_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA5_RLC7_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA5_RLC7_MIDCMD_DATA4
+#define SDMA5_RLC7_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA5_RLC7_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA5_RLC7_MIDCMD_DATA5
+#define SDMA5_RLC7_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA5_RLC7_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA5_RLC7_MIDCMD_DATA6
+#define SDMA5_RLC7_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA5_RLC7_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA5_RLC7_MIDCMD_DATA7
+#define SDMA5_RLC7_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA5_RLC7_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA5_RLC7_MIDCMD_DATA8
+#define SDMA5_RLC7_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA5_RLC7_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA5_RLC7_MIDCMD_CNTL
+#define SDMA5_RLC7_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA5_RLC7_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA5_RLC7_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA5_RLC7_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA5_RLC7_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA5_RLC7_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA5_RLC7_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA5_RLC7_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/sdma6/sdma6_4_2_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/sdma6/sdma6_4_2_2_offset.h
new file mode 100644
index 000000000000..ae12db26362e
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/sdma6/sdma6_4_2_2_offset.h
@@ -0,0 +1,1043 @@
+/*
+ * Copyright (C) 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _sdma6_4_2_2_OFFSET_HEADER
+#define _sdma6_4_2_2_OFFSET_HEADER
+
+
+
+// addressBlock: sdma6_sdma6dec
+// base address: 0x7c000
+#define mmSDMA6_UCODE_ADDR 0x0000
+#define mmSDMA6_UCODE_ADDR_BASE_IDX 1
+#define mmSDMA6_UCODE_DATA 0x0001
+#define mmSDMA6_UCODE_DATA_BASE_IDX 1
+#define mmSDMA6_VM_CNTL 0x0004
+#define mmSDMA6_VM_CNTL_BASE_IDX 1
+#define mmSDMA6_VM_CTX_LO 0x0005
+#define mmSDMA6_VM_CTX_LO_BASE_IDX 1
+#define mmSDMA6_VM_CTX_HI 0x0006
+#define mmSDMA6_VM_CTX_HI_BASE_IDX 1
+#define mmSDMA6_ACTIVE_FCN_ID 0x0007
+#define mmSDMA6_ACTIVE_FCN_ID_BASE_IDX 1
+#define mmSDMA6_VM_CTX_CNTL 0x0008
+#define mmSDMA6_VM_CTX_CNTL_BASE_IDX 1
+#define mmSDMA6_VIRT_RESET_REQ 0x0009
+#define mmSDMA6_VIRT_RESET_REQ_BASE_IDX 1
+#define mmSDMA6_VF_ENABLE 0x000a
+#define mmSDMA6_VF_ENABLE_BASE_IDX 1
+#define mmSDMA6_CONTEXT_REG_TYPE0 0x000b
+#define mmSDMA6_CONTEXT_REG_TYPE0_BASE_IDX 1
+#define mmSDMA6_CONTEXT_REG_TYPE1 0x000c
+#define mmSDMA6_CONTEXT_REG_TYPE1_BASE_IDX 1
+#define mmSDMA6_CONTEXT_REG_TYPE2 0x000d
+#define mmSDMA6_CONTEXT_REG_TYPE2_BASE_IDX 1
+#define mmSDMA6_CONTEXT_REG_TYPE3 0x000e
+#define mmSDMA6_CONTEXT_REG_TYPE3_BASE_IDX 1
+#define mmSDMA6_PUB_REG_TYPE0 0x000f
+#define mmSDMA6_PUB_REG_TYPE0_BASE_IDX 1
+#define mmSDMA6_PUB_REG_TYPE1 0x0010
+#define mmSDMA6_PUB_REG_TYPE1_BASE_IDX 1
+#define mmSDMA6_PUB_REG_TYPE2 0x0011
+#define mmSDMA6_PUB_REG_TYPE2_BASE_IDX 1
+#define mmSDMA6_PUB_REG_TYPE3 0x0012
+#define mmSDMA6_PUB_REG_TYPE3_BASE_IDX 1
+#define mmSDMA6_MMHUB_CNTL 0x0013
+#define mmSDMA6_MMHUB_CNTL_BASE_IDX 1
+#define mmSDMA6_CONTEXT_GROUP_BOUNDARY 0x0019
+#define mmSDMA6_CONTEXT_GROUP_BOUNDARY_BASE_IDX 1
+#define mmSDMA6_POWER_CNTL 0x001a
+#define mmSDMA6_POWER_CNTL_BASE_IDX 1
+#define mmSDMA6_CLK_CTRL 0x001b
+#define mmSDMA6_CLK_CTRL_BASE_IDX 1
+#define mmSDMA6_CNTL 0x001c
+#define mmSDMA6_CNTL_BASE_IDX 1
+#define mmSDMA6_CHICKEN_BITS 0x001d
+#define mmSDMA6_CHICKEN_BITS_BASE_IDX 1
+#define mmSDMA6_GB_ADDR_CONFIG 0x001e
+#define mmSDMA6_GB_ADDR_CONFIG_BASE_IDX 1
+#define mmSDMA6_GB_ADDR_CONFIG_READ 0x001f
+#define mmSDMA6_GB_ADDR_CONFIG_READ_BASE_IDX 1
+#define mmSDMA6_RB_RPTR_FETCH_HI 0x0020
+#define mmSDMA6_RB_RPTR_FETCH_HI_BASE_IDX 1
+#define mmSDMA6_SEM_WAIT_FAIL_TIMER_CNTL 0x0021
+#define mmSDMA6_SEM_WAIT_FAIL_TIMER_CNTL_BASE_IDX 1
+#define mmSDMA6_RB_RPTR_FETCH 0x0022
+#define mmSDMA6_RB_RPTR_FETCH_BASE_IDX 1
+#define mmSDMA6_IB_OFFSET_FETCH 0x0023
+#define mmSDMA6_IB_OFFSET_FETCH_BASE_IDX 1
+#define mmSDMA6_PROGRAM 0x0024
+#define mmSDMA6_PROGRAM_BASE_IDX 1
+#define mmSDMA6_STATUS_REG 0x0025
+#define mmSDMA6_STATUS_REG_BASE_IDX 1
+#define mmSDMA6_STATUS1_REG 0x0026
+#define mmSDMA6_STATUS1_REG_BASE_IDX 1
+#define mmSDMA6_RD_BURST_CNTL 0x0027
+#define mmSDMA6_RD_BURST_CNTL_BASE_IDX 1
+#define mmSDMA6_HBM_PAGE_CONFIG 0x0028
+#define mmSDMA6_HBM_PAGE_CONFIG_BASE_IDX 1
+#define mmSDMA6_UCODE_CHECKSUM 0x0029
+#define mmSDMA6_UCODE_CHECKSUM_BASE_IDX 1
+#define mmSDMA6_F32_CNTL 0x002a
+#define mmSDMA6_F32_CNTL_BASE_IDX 1
+#define mmSDMA6_FREEZE 0x002b
+#define mmSDMA6_FREEZE_BASE_IDX 1
+#define mmSDMA6_PHASE0_QUANTUM 0x002c
+#define mmSDMA6_PHASE0_QUANTUM_BASE_IDX 1
+#define mmSDMA6_PHASE1_QUANTUM 0x002d
+#define mmSDMA6_PHASE1_QUANTUM_BASE_IDX 1
+#define mmSDMA6_EDC_CONFIG 0x0032
+#define mmSDMA6_EDC_CONFIG_BASE_IDX 1
+#define mmSDMA6_BA_THRESHOLD 0x0033
+#define mmSDMA6_BA_THRESHOLD_BASE_IDX 1
+#define mmSDMA6_ID 0x0034
+#define mmSDMA6_ID_BASE_IDX 1
+#define mmSDMA6_VERSION 0x0035
+#define mmSDMA6_VERSION_BASE_IDX 1
+#define mmSDMA6_EDC_COUNTER 0x0036
+#define mmSDMA6_EDC_COUNTER_BASE_IDX 1
+#define mmSDMA6_EDC_COUNTER_CLEAR 0x0037
+#define mmSDMA6_EDC_COUNTER_CLEAR_BASE_IDX 1
+#define mmSDMA6_STATUS2_REG 0x0038
+#define mmSDMA6_STATUS2_REG_BASE_IDX 1
+#define mmSDMA6_ATOMIC_CNTL 0x0039
+#define mmSDMA6_ATOMIC_CNTL_BASE_IDX 1
+#define mmSDMA6_ATOMIC_PREOP_LO 0x003a
+#define mmSDMA6_ATOMIC_PREOP_LO_BASE_IDX 1
+#define mmSDMA6_ATOMIC_PREOP_HI 0x003b
+#define mmSDMA6_ATOMIC_PREOP_HI_BASE_IDX 1
+#define mmSDMA6_UTCL1_CNTL 0x003c
+#define mmSDMA6_UTCL1_CNTL_BASE_IDX 1
+#define mmSDMA6_UTCL1_WATERMK 0x003d
+#define mmSDMA6_UTCL1_WATERMK_BASE_IDX 1
+#define mmSDMA6_UTCL1_RD_STATUS 0x003e
+#define mmSDMA6_UTCL1_RD_STATUS_BASE_IDX 1
+#define mmSDMA6_UTCL1_WR_STATUS 0x003f
+#define mmSDMA6_UTCL1_WR_STATUS_BASE_IDX 1
+#define mmSDMA6_UTCL1_INV0 0x0040
+#define mmSDMA6_UTCL1_INV0_BASE_IDX 1
+#define mmSDMA6_UTCL1_INV1 0x0041
+#define mmSDMA6_UTCL1_INV1_BASE_IDX 1
+#define mmSDMA6_UTCL1_INV2 0x0042
+#define mmSDMA6_UTCL1_INV2_BASE_IDX 1
+#define mmSDMA6_UTCL1_RD_XNACK0 0x0043
+#define mmSDMA6_UTCL1_RD_XNACK0_BASE_IDX 1
+#define mmSDMA6_UTCL1_RD_XNACK1 0x0044
+#define mmSDMA6_UTCL1_RD_XNACK1_BASE_IDX 1
+#define mmSDMA6_UTCL1_WR_XNACK0 0x0045
+#define mmSDMA6_UTCL1_WR_XNACK0_BASE_IDX 1
+#define mmSDMA6_UTCL1_WR_XNACK1 0x0046
+#define mmSDMA6_UTCL1_WR_XNACK1_BASE_IDX 1
+#define mmSDMA6_UTCL1_TIMEOUT 0x0047
+#define mmSDMA6_UTCL1_TIMEOUT_BASE_IDX 1
+#define mmSDMA6_UTCL1_PAGE 0x0048
+#define mmSDMA6_UTCL1_PAGE_BASE_IDX 1
+#define mmSDMA6_POWER_CNTL_IDLE 0x0049
+#define mmSDMA6_POWER_CNTL_IDLE_BASE_IDX 1
+#define mmSDMA6_RELAX_ORDERING_LUT 0x004a
+#define mmSDMA6_RELAX_ORDERING_LUT_BASE_IDX 1
+#define mmSDMA6_CHICKEN_BITS_2 0x004b
+#define mmSDMA6_CHICKEN_BITS_2_BASE_IDX 1
+#define mmSDMA6_STATUS3_REG 0x004c
+#define mmSDMA6_STATUS3_REG_BASE_IDX 1
+#define mmSDMA6_PHYSICAL_ADDR_LO 0x004d
+#define mmSDMA6_PHYSICAL_ADDR_LO_BASE_IDX 1
+#define mmSDMA6_PHYSICAL_ADDR_HI 0x004e
+#define mmSDMA6_PHYSICAL_ADDR_HI_BASE_IDX 1
+#define mmSDMA6_PHASE2_QUANTUM 0x004f
+#define mmSDMA6_PHASE2_QUANTUM_BASE_IDX 1
+#define mmSDMA6_ERROR_LOG 0x0050
+#define mmSDMA6_ERROR_LOG_BASE_IDX 1
+#define mmSDMA6_PUB_DUMMY_REG0 0x0051
+#define mmSDMA6_PUB_DUMMY_REG0_BASE_IDX 1
+#define mmSDMA6_PUB_DUMMY_REG1 0x0052
+#define mmSDMA6_PUB_DUMMY_REG1_BASE_IDX 1
+#define mmSDMA6_PUB_DUMMY_REG2 0x0053
+#define mmSDMA6_PUB_DUMMY_REG2_BASE_IDX 1
+#define mmSDMA6_PUB_DUMMY_REG3 0x0054
+#define mmSDMA6_PUB_DUMMY_REG3_BASE_IDX 1
+#define mmSDMA6_F32_COUNTER 0x0055
+#define mmSDMA6_F32_COUNTER_BASE_IDX 1
+#define mmSDMA6_UNBREAKABLE 0x0056
+#define mmSDMA6_UNBREAKABLE_BASE_IDX 1
+#define mmSDMA6_PERFMON_CNTL 0x0057
+#define mmSDMA6_PERFMON_CNTL_BASE_IDX 1
+#define mmSDMA6_PERFCOUNTER0_RESULT 0x0058
+#define mmSDMA6_PERFCOUNTER0_RESULT_BASE_IDX 1
+#define mmSDMA6_PERFCOUNTER1_RESULT 0x0059
+#define mmSDMA6_PERFCOUNTER1_RESULT_BASE_IDX 1
+#define mmSDMA6_PERFCOUNTER_TAG_DELAY_RANGE 0x005a
+#define mmSDMA6_PERFCOUNTER_TAG_DELAY_RANGE_BASE_IDX 1
+#define mmSDMA6_CRD_CNTL 0x005b
+#define mmSDMA6_CRD_CNTL_BASE_IDX 1
+#define mmSDMA6_GPU_IOV_VIOLATION_LOG 0x005d
+#define mmSDMA6_GPU_IOV_VIOLATION_LOG_BASE_IDX 1
+#define mmSDMA6_ULV_CNTL 0x005e
+#define mmSDMA6_ULV_CNTL_BASE_IDX 1
+#define mmSDMA6_EA_DBIT_ADDR_DATA 0x0060
+#define mmSDMA6_EA_DBIT_ADDR_DATA_BASE_IDX 1
+#define mmSDMA6_EA_DBIT_ADDR_INDEX 0x0061
+#define mmSDMA6_EA_DBIT_ADDR_INDEX_BASE_IDX 1
+#define mmSDMA6_GPU_IOV_VIOLATION_LOG2 0x0062
+#define mmSDMA6_GPU_IOV_VIOLATION_LOG2_BASE_IDX 1
+#define mmSDMA6_GFX_RB_CNTL 0x0080
+#define mmSDMA6_GFX_RB_CNTL_BASE_IDX 1
+#define mmSDMA6_GFX_RB_BASE 0x0081
+#define mmSDMA6_GFX_RB_BASE_BASE_IDX 1
+#define mmSDMA6_GFX_RB_BASE_HI 0x0082
+#define mmSDMA6_GFX_RB_BASE_HI_BASE_IDX 1
+#define mmSDMA6_GFX_RB_RPTR 0x0083
+#define mmSDMA6_GFX_RB_RPTR_BASE_IDX 1
+#define mmSDMA6_GFX_RB_RPTR_HI 0x0084
+#define mmSDMA6_GFX_RB_RPTR_HI_BASE_IDX 1
+#define mmSDMA6_GFX_RB_WPTR 0x0085
+#define mmSDMA6_GFX_RB_WPTR_BASE_IDX 1
+#define mmSDMA6_GFX_RB_WPTR_HI 0x0086
+#define mmSDMA6_GFX_RB_WPTR_HI_BASE_IDX 1
+#define mmSDMA6_GFX_RB_WPTR_POLL_CNTL 0x0087
+#define mmSDMA6_GFX_RB_WPTR_POLL_CNTL_BASE_IDX 1
+#define mmSDMA6_GFX_RB_RPTR_ADDR_HI 0x0088
+#define mmSDMA6_GFX_RB_RPTR_ADDR_HI_BASE_IDX 1
+#define mmSDMA6_GFX_RB_RPTR_ADDR_LO 0x0089
+#define mmSDMA6_GFX_RB_RPTR_ADDR_LO_BASE_IDX 1
+#define mmSDMA6_GFX_IB_CNTL 0x008a
+#define mmSDMA6_GFX_IB_CNTL_BASE_IDX 1
+#define mmSDMA6_GFX_IB_RPTR 0x008b
+#define mmSDMA6_GFX_IB_RPTR_BASE_IDX 1
+#define mmSDMA6_GFX_IB_OFFSET 0x008c
+#define mmSDMA6_GFX_IB_OFFSET_BASE_IDX 1
+#define mmSDMA6_GFX_IB_BASE_LO 0x008d
+#define mmSDMA6_GFX_IB_BASE_LO_BASE_IDX 1
+#define mmSDMA6_GFX_IB_BASE_HI 0x008e
+#define mmSDMA6_GFX_IB_BASE_HI_BASE_IDX 1
+#define mmSDMA6_GFX_IB_SIZE 0x008f
+#define mmSDMA6_GFX_IB_SIZE_BASE_IDX 1
+#define mmSDMA6_GFX_SKIP_CNTL 0x0090
+#define mmSDMA6_GFX_SKIP_CNTL_BASE_IDX 1
+#define mmSDMA6_GFX_CONTEXT_STATUS 0x0091
+#define mmSDMA6_GFX_CONTEXT_STATUS_BASE_IDX 1
+#define mmSDMA6_GFX_DOORBELL 0x0092
+#define mmSDMA6_GFX_DOORBELL_BASE_IDX 1
+#define mmSDMA6_GFX_CONTEXT_CNTL 0x0093
+#define mmSDMA6_GFX_CONTEXT_CNTL_BASE_IDX 1
+#define mmSDMA6_GFX_STATUS 0x00a8
+#define mmSDMA6_GFX_STATUS_BASE_IDX 1
+#define mmSDMA6_GFX_DOORBELL_LOG 0x00a9
+#define mmSDMA6_GFX_DOORBELL_LOG_BASE_IDX 1
+#define mmSDMA6_GFX_WATERMARK 0x00aa
+#define mmSDMA6_GFX_WATERMARK_BASE_IDX 1
+#define mmSDMA6_GFX_DOORBELL_OFFSET 0x00ab
+#define mmSDMA6_GFX_DOORBELL_OFFSET_BASE_IDX 1
+#define mmSDMA6_GFX_CSA_ADDR_LO 0x00ac
+#define mmSDMA6_GFX_CSA_ADDR_LO_BASE_IDX 1
+#define mmSDMA6_GFX_CSA_ADDR_HI 0x00ad
+#define mmSDMA6_GFX_CSA_ADDR_HI_BASE_IDX 1
+#define mmSDMA6_GFX_IB_SUB_REMAIN 0x00af
+#define mmSDMA6_GFX_IB_SUB_REMAIN_BASE_IDX 1
+#define mmSDMA6_GFX_PREEMPT 0x00b0
+#define mmSDMA6_GFX_PREEMPT_BASE_IDX 1
+#define mmSDMA6_GFX_DUMMY_REG 0x00b1
+#define mmSDMA6_GFX_DUMMY_REG_BASE_IDX 1
+#define mmSDMA6_GFX_RB_WPTR_POLL_ADDR_HI 0x00b2
+#define mmSDMA6_GFX_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1
+#define mmSDMA6_GFX_RB_WPTR_POLL_ADDR_LO 0x00b3
+#define mmSDMA6_GFX_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1
+#define mmSDMA6_GFX_RB_AQL_CNTL 0x00b4
+#define mmSDMA6_GFX_RB_AQL_CNTL_BASE_IDX 1
+#define mmSDMA6_GFX_MINOR_PTR_UPDATE 0x00b5
+#define mmSDMA6_GFX_MINOR_PTR_UPDATE_BASE_IDX 1
+#define mmSDMA6_GFX_MIDCMD_DATA0 0x00c0
+#define mmSDMA6_GFX_MIDCMD_DATA0_BASE_IDX 1
+#define mmSDMA6_GFX_MIDCMD_DATA1 0x00c1
+#define mmSDMA6_GFX_MIDCMD_DATA1_BASE_IDX 1
+#define mmSDMA6_GFX_MIDCMD_DATA2 0x00c2
+#define mmSDMA6_GFX_MIDCMD_DATA2_BASE_IDX 1
+#define mmSDMA6_GFX_MIDCMD_DATA3 0x00c3
+#define mmSDMA6_GFX_MIDCMD_DATA3_BASE_IDX 1
+#define mmSDMA6_GFX_MIDCMD_DATA4 0x00c4
+#define mmSDMA6_GFX_MIDCMD_DATA4_BASE_IDX 1
+#define mmSDMA6_GFX_MIDCMD_DATA5 0x00c5
+#define mmSDMA6_GFX_MIDCMD_DATA5_BASE_IDX 1
+#define mmSDMA6_GFX_MIDCMD_DATA6 0x00c6
+#define mmSDMA6_GFX_MIDCMD_DATA6_BASE_IDX 1
+#define mmSDMA6_GFX_MIDCMD_DATA7 0x00c7
+#define mmSDMA6_GFX_MIDCMD_DATA7_BASE_IDX 1
+#define mmSDMA6_GFX_MIDCMD_DATA8 0x00c8
+#define mmSDMA6_GFX_MIDCMD_DATA8_BASE_IDX 1
+#define mmSDMA6_GFX_MIDCMD_CNTL 0x00c9
+#define mmSDMA6_GFX_MIDCMD_CNTL_BASE_IDX 1
+#define mmSDMA6_PAGE_RB_CNTL 0x00d8
+#define mmSDMA6_PAGE_RB_CNTL_BASE_IDX 1
+#define mmSDMA6_PAGE_RB_BASE 0x00d9
+#define mmSDMA6_PAGE_RB_BASE_BASE_IDX 1
+#define mmSDMA6_PAGE_RB_BASE_HI 0x00da
+#define mmSDMA6_PAGE_RB_BASE_HI_BASE_IDX 1
+#define mmSDMA6_PAGE_RB_RPTR 0x00db
+#define mmSDMA6_PAGE_RB_RPTR_BASE_IDX 1
+#define mmSDMA6_PAGE_RB_RPTR_HI 0x00dc
+#define mmSDMA6_PAGE_RB_RPTR_HI_BASE_IDX 1
+#define mmSDMA6_PAGE_RB_WPTR 0x00dd
+#define mmSDMA6_PAGE_RB_WPTR_BASE_IDX 1
+#define mmSDMA6_PAGE_RB_WPTR_HI 0x00de
+#define mmSDMA6_PAGE_RB_WPTR_HI_BASE_IDX 1
+#define mmSDMA6_PAGE_RB_WPTR_POLL_CNTL 0x00df
+#define mmSDMA6_PAGE_RB_WPTR_POLL_CNTL_BASE_IDX 1
+#define mmSDMA6_PAGE_RB_RPTR_ADDR_HI 0x00e0
+#define mmSDMA6_PAGE_RB_RPTR_ADDR_HI_BASE_IDX 1
+#define mmSDMA6_PAGE_RB_RPTR_ADDR_LO 0x00e1
+#define mmSDMA6_PAGE_RB_RPTR_ADDR_LO_BASE_IDX 1
+#define mmSDMA6_PAGE_IB_CNTL 0x00e2
+#define mmSDMA6_PAGE_IB_CNTL_BASE_IDX 1
+#define mmSDMA6_PAGE_IB_RPTR 0x00e3
+#define mmSDMA6_PAGE_IB_RPTR_BASE_IDX 1
+#define mmSDMA6_PAGE_IB_OFFSET 0x00e4
+#define mmSDMA6_PAGE_IB_OFFSET_BASE_IDX 1
+#define mmSDMA6_PAGE_IB_BASE_LO 0x00e5
+#define mmSDMA6_PAGE_IB_BASE_LO_BASE_IDX 1
+#define mmSDMA6_PAGE_IB_BASE_HI 0x00e6
+#define mmSDMA6_PAGE_IB_BASE_HI_BASE_IDX 1
+#define mmSDMA6_PAGE_IB_SIZE 0x00e7
+#define mmSDMA6_PAGE_IB_SIZE_BASE_IDX 1
+#define mmSDMA6_PAGE_SKIP_CNTL 0x00e8
+#define mmSDMA6_PAGE_SKIP_CNTL_BASE_IDX 1
+#define mmSDMA6_PAGE_CONTEXT_STATUS 0x00e9
+#define mmSDMA6_PAGE_CONTEXT_STATUS_BASE_IDX 1
+#define mmSDMA6_PAGE_DOORBELL 0x00ea
+#define mmSDMA6_PAGE_DOORBELL_BASE_IDX 1
+#define mmSDMA6_PAGE_STATUS 0x0100
+#define mmSDMA6_PAGE_STATUS_BASE_IDX 1
+#define mmSDMA6_PAGE_DOORBELL_LOG 0x0101
+#define mmSDMA6_PAGE_DOORBELL_LOG_BASE_IDX 1
+#define mmSDMA6_PAGE_WATERMARK 0x0102
+#define mmSDMA6_PAGE_WATERMARK_BASE_IDX 1
+#define mmSDMA6_PAGE_DOORBELL_OFFSET 0x0103
+#define mmSDMA6_PAGE_DOORBELL_OFFSET_BASE_IDX 1
+#define mmSDMA6_PAGE_CSA_ADDR_LO 0x0104
+#define mmSDMA6_PAGE_CSA_ADDR_LO_BASE_IDX 1
+#define mmSDMA6_PAGE_CSA_ADDR_HI 0x0105
+#define mmSDMA6_PAGE_CSA_ADDR_HI_BASE_IDX 1
+#define mmSDMA6_PAGE_IB_SUB_REMAIN 0x0107
+#define mmSDMA6_PAGE_IB_SUB_REMAIN_BASE_IDX 1
+#define mmSDMA6_PAGE_PREEMPT 0x0108
+#define mmSDMA6_PAGE_PREEMPT_BASE_IDX 1
+#define mmSDMA6_PAGE_DUMMY_REG 0x0109
+#define mmSDMA6_PAGE_DUMMY_REG_BASE_IDX 1
+#define mmSDMA6_PAGE_RB_WPTR_POLL_ADDR_HI 0x010a
+#define mmSDMA6_PAGE_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1
+#define mmSDMA6_PAGE_RB_WPTR_POLL_ADDR_LO 0x010b
+#define mmSDMA6_PAGE_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1
+#define mmSDMA6_PAGE_RB_AQL_CNTL 0x010c
+#define mmSDMA6_PAGE_RB_AQL_CNTL_BASE_IDX 1
+#define mmSDMA6_PAGE_MINOR_PTR_UPDATE 0x010d
+#define mmSDMA6_PAGE_MINOR_PTR_UPDATE_BASE_IDX 1
+#define mmSDMA6_PAGE_MIDCMD_DATA0 0x0118
+#define mmSDMA6_PAGE_MIDCMD_DATA0_BASE_IDX 1
+#define mmSDMA6_PAGE_MIDCMD_DATA1 0x0119
+#define mmSDMA6_PAGE_MIDCMD_DATA1_BASE_IDX 1
+#define mmSDMA6_PAGE_MIDCMD_DATA2 0x011a
+#define mmSDMA6_PAGE_MIDCMD_DATA2_BASE_IDX 1
+#define mmSDMA6_PAGE_MIDCMD_DATA3 0x011b
+#define mmSDMA6_PAGE_MIDCMD_DATA3_BASE_IDX 1
+#define mmSDMA6_PAGE_MIDCMD_DATA4 0x011c
+#define mmSDMA6_PAGE_MIDCMD_DATA4_BASE_IDX 1
+#define mmSDMA6_PAGE_MIDCMD_DATA5 0x011d
+#define mmSDMA6_PAGE_MIDCMD_DATA5_BASE_IDX 1
+#define mmSDMA6_PAGE_MIDCMD_DATA6 0x011e
+#define mmSDMA6_PAGE_MIDCMD_DATA6_BASE_IDX 1
+#define mmSDMA6_PAGE_MIDCMD_DATA7 0x011f
+#define mmSDMA6_PAGE_MIDCMD_DATA7_BASE_IDX 1
+#define mmSDMA6_PAGE_MIDCMD_DATA8 0x0120
+#define mmSDMA6_PAGE_MIDCMD_DATA8_BASE_IDX 1
+#define mmSDMA6_PAGE_MIDCMD_CNTL 0x0121
+#define mmSDMA6_PAGE_MIDCMD_CNTL_BASE_IDX 1
+#define mmSDMA6_RLC0_RB_CNTL 0x0130
+#define mmSDMA6_RLC0_RB_CNTL_BASE_IDX 1
+#define mmSDMA6_RLC0_RB_BASE 0x0131
+#define mmSDMA6_RLC0_RB_BASE_BASE_IDX 1
+#define mmSDMA6_RLC0_RB_BASE_HI 0x0132
+#define mmSDMA6_RLC0_RB_BASE_HI_BASE_IDX 1
+#define mmSDMA6_RLC0_RB_RPTR 0x0133
+#define mmSDMA6_RLC0_RB_RPTR_BASE_IDX 1
+#define mmSDMA6_RLC0_RB_RPTR_HI 0x0134
+#define mmSDMA6_RLC0_RB_RPTR_HI_BASE_IDX 1
+#define mmSDMA6_RLC0_RB_WPTR 0x0135
+#define mmSDMA6_RLC0_RB_WPTR_BASE_IDX 1
+#define mmSDMA6_RLC0_RB_WPTR_HI 0x0136
+#define mmSDMA6_RLC0_RB_WPTR_HI_BASE_IDX 1
+#define mmSDMA6_RLC0_RB_WPTR_POLL_CNTL 0x0137
+#define mmSDMA6_RLC0_RB_WPTR_POLL_CNTL_BASE_IDX 1
+#define mmSDMA6_RLC0_RB_RPTR_ADDR_HI 0x0138
+#define mmSDMA6_RLC0_RB_RPTR_ADDR_HI_BASE_IDX 1
+#define mmSDMA6_RLC0_RB_RPTR_ADDR_LO 0x0139
+#define mmSDMA6_RLC0_RB_RPTR_ADDR_LO_BASE_IDX 1
+#define mmSDMA6_RLC0_IB_CNTL 0x013a
+#define mmSDMA6_RLC0_IB_CNTL_BASE_IDX 1
+#define mmSDMA6_RLC0_IB_RPTR 0x013b
+#define mmSDMA6_RLC0_IB_RPTR_BASE_IDX 1
+#define mmSDMA6_RLC0_IB_OFFSET 0x013c
+#define mmSDMA6_RLC0_IB_OFFSET_BASE_IDX 1
+#define mmSDMA6_RLC0_IB_BASE_LO 0x013d
+#define mmSDMA6_RLC0_IB_BASE_LO_BASE_IDX 1
+#define mmSDMA6_RLC0_IB_BASE_HI 0x013e
+#define mmSDMA6_RLC0_IB_BASE_HI_BASE_IDX 1
+#define mmSDMA6_RLC0_IB_SIZE 0x013f
+#define mmSDMA6_RLC0_IB_SIZE_BASE_IDX 1
+#define mmSDMA6_RLC0_SKIP_CNTL 0x0140
+#define mmSDMA6_RLC0_SKIP_CNTL_BASE_IDX 1
+#define mmSDMA6_RLC0_CONTEXT_STATUS 0x0141
+#define mmSDMA6_RLC0_CONTEXT_STATUS_BASE_IDX 1
+#define mmSDMA6_RLC0_DOORBELL 0x0142
+#define mmSDMA6_RLC0_DOORBELL_BASE_IDX 1
+#define mmSDMA6_RLC0_STATUS 0x0158
+#define mmSDMA6_RLC0_STATUS_BASE_IDX 1
+#define mmSDMA6_RLC0_DOORBELL_LOG 0x0159
+#define mmSDMA6_RLC0_DOORBELL_LOG_BASE_IDX 1
+#define mmSDMA6_RLC0_WATERMARK 0x015a
+#define mmSDMA6_RLC0_WATERMARK_BASE_IDX 1
+#define mmSDMA6_RLC0_DOORBELL_OFFSET 0x015b
+#define mmSDMA6_RLC0_DOORBELL_OFFSET_BASE_IDX 1
+#define mmSDMA6_RLC0_CSA_ADDR_LO 0x015c
+#define mmSDMA6_RLC0_CSA_ADDR_LO_BASE_IDX 1
+#define mmSDMA6_RLC0_CSA_ADDR_HI 0x015d
+#define mmSDMA6_RLC0_CSA_ADDR_HI_BASE_IDX 1
+#define mmSDMA6_RLC0_IB_SUB_REMAIN 0x015f
+#define mmSDMA6_RLC0_IB_SUB_REMAIN_BASE_IDX 1
+#define mmSDMA6_RLC0_PREEMPT 0x0160
+#define mmSDMA6_RLC0_PREEMPT_BASE_IDX 1
+#define mmSDMA6_RLC0_DUMMY_REG 0x0161
+#define mmSDMA6_RLC0_DUMMY_REG_BASE_IDX 1
+#define mmSDMA6_RLC0_RB_WPTR_POLL_ADDR_HI 0x0162
+#define mmSDMA6_RLC0_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1
+#define mmSDMA6_RLC0_RB_WPTR_POLL_ADDR_LO 0x0163
+#define mmSDMA6_RLC0_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1
+#define mmSDMA6_RLC0_RB_AQL_CNTL 0x0164
+#define mmSDMA6_RLC0_RB_AQL_CNTL_BASE_IDX 1
+#define mmSDMA6_RLC0_MINOR_PTR_UPDATE 0x0165
+#define mmSDMA6_RLC0_MINOR_PTR_UPDATE_BASE_IDX 1
+#define mmSDMA6_RLC0_MIDCMD_DATA0 0x0170
+#define mmSDMA6_RLC0_MIDCMD_DATA0_BASE_IDX 1
+#define mmSDMA6_RLC0_MIDCMD_DATA1 0x0171
+#define mmSDMA6_RLC0_MIDCMD_DATA1_BASE_IDX 1
+#define mmSDMA6_RLC0_MIDCMD_DATA2 0x0172
+#define mmSDMA6_RLC0_MIDCMD_DATA2_BASE_IDX 1
+#define mmSDMA6_RLC0_MIDCMD_DATA3 0x0173
+#define mmSDMA6_RLC0_MIDCMD_DATA3_BASE_IDX 1
+#define mmSDMA6_RLC0_MIDCMD_DATA4 0x0174
+#define mmSDMA6_RLC0_MIDCMD_DATA4_BASE_IDX 1
+#define mmSDMA6_RLC0_MIDCMD_DATA5 0x0175
+#define mmSDMA6_RLC0_MIDCMD_DATA5_BASE_IDX 1
+#define mmSDMA6_RLC0_MIDCMD_DATA6 0x0176
+#define mmSDMA6_RLC0_MIDCMD_DATA6_BASE_IDX 1
+#define mmSDMA6_RLC0_MIDCMD_DATA7 0x0177
+#define mmSDMA6_RLC0_MIDCMD_DATA7_BASE_IDX 1
+#define mmSDMA6_RLC0_MIDCMD_DATA8 0x0178
+#define mmSDMA6_RLC0_MIDCMD_DATA8_BASE_IDX 1
+#define mmSDMA6_RLC0_MIDCMD_CNTL 0x0179
+#define mmSDMA6_RLC0_MIDCMD_CNTL_BASE_IDX 1
+#define mmSDMA6_RLC1_RB_CNTL 0x0188
+#define mmSDMA6_RLC1_RB_CNTL_BASE_IDX 1
+#define mmSDMA6_RLC1_RB_BASE 0x0189
+#define mmSDMA6_RLC1_RB_BASE_BASE_IDX 1
+#define mmSDMA6_RLC1_RB_BASE_HI 0x018a
+#define mmSDMA6_RLC1_RB_BASE_HI_BASE_IDX 1
+#define mmSDMA6_RLC1_RB_RPTR 0x018b
+#define mmSDMA6_RLC1_RB_RPTR_BASE_IDX 1
+#define mmSDMA6_RLC1_RB_RPTR_HI 0x018c
+#define mmSDMA6_RLC1_RB_RPTR_HI_BASE_IDX 1
+#define mmSDMA6_RLC1_RB_WPTR 0x018d
+#define mmSDMA6_RLC1_RB_WPTR_BASE_IDX 1
+#define mmSDMA6_RLC1_RB_WPTR_HI 0x018e
+#define mmSDMA6_RLC1_RB_WPTR_HI_BASE_IDX 1
+#define mmSDMA6_RLC1_RB_WPTR_POLL_CNTL 0x018f
+#define mmSDMA6_RLC1_RB_WPTR_POLL_CNTL_BASE_IDX 1
+#define mmSDMA6_RLC1_RB_RPTR_ADDR_HI 0x0190
+#define mmSDMA6_RLC1_RB_RPTR_ADDR_HI_BASE_IDX 1
+#define mmSDMA6_RLC1_RB_RPTR_ADDR_LO 0x0191
+#define mmSDMA6_RLC1_RB_RPTR_ADDR_LO_BASE_IDX 1
+#define mmSDMA6_RLC1_IB_CNTL 0x0192
+#define mmSDMA6_RLC1_IB_CNTL_BASE_IDX 1
+#define mmSDMA6_RLC1_IB_RPTR 0x0193
+#define mmSDMA6_RLC1_IB_RPTR_BASE_IDX 1
+#define mmSDMA6_RLC1_IB_OFFSET 0x0194
+#define mmSDMA6_RLC1_IB_OFFSET_BASE_IDX 1
+#define mmSDMA6_RLC1_IB_BASE_LO 0x0195
+#define mmSDMA6_RLC1_IB_BASE_LO_BASE_IDX 1
+#define mmSDMA6_RLC1_IB_BASE_HI 0x0196
+#define mmSDMA6_RLC1_IB_BASE_HI_BASE_IDX 1
+#define mmSDMA6_RLC1_IB_SIZE 0x0197
+#define mmSDMA6_RLC1_IB_SIZE_BASE_IDX 1
+#define mmSDMA6_RLC1_SKIP_CNTL 0x0198
+#define mmSDMA6_RLC1_SKIP_CNTL_BASE_IDX 1
+#define mmSDMA6_RLC1_CONTEXT_STATUS 0x0199
+#define mmSDMA6_RLC1_CONTEXT_STATUS_BASE_IDX 1
+#define mmSDMA6_RLC1_DOORBELL 0x019a
+#define mmSDMA6_RLC1_DOORBELL_BASE_IDX 1
+#define mmSDMA6_RLC1_STATUS 0x01b0
+#define mmSDMA6_RLC1_STATUS_BASE_IDX 1
+#define mmSDMA6_RLC1_DOORBELL_LOG 0x01b1
+#define mmSDMA6_RLC1_DOORBELL_LOG_BASE_IDX 1
+#define mmSDMA6_RLC1_WATERMARK 0x01b2
+#define mmSDMA6_RLC1_WATERMARK_BASE_IDX 1
+#define mmSDMA6_RLC1_DOORBELL_OFFSET 0x01b3
+#define mmSDMA6_RLC1_DOORBELL_OFFSET_BASE_IDX 1
+#define mmSDMA6_RLC1_CSA_ADDR_LO 0x01b4
+#define mmSDMA6_RLC1_CSA_ADDR_LO_BASE_IDX 1
+#define mmSDMA6_RLC1_CSA_ADDR_HI 0x01b5
+#define mmSDMA6_RLC1_CSA_ADDR_HI_BASE_IDX 1
+#define mmSDMA6_RLC1_IB_SUB_REMAIN 0x01b7
+#define mmSDMA6_RLC1_IB_SUB_REMAIN_BASE_IDX 1
+#define mmSDMA6_RLC1_PREEMPT 0x01b8
+#define mmSDMA6_RLC1_PREEMPT_BASE_IDX 1
+#define mmSDMA6_RLC1_DUMMY_REG 0x01b9
+#define mmSDMA6_RLC1_DUMMY_REG_BASE_IDX 1
+#define mmSDMA6_RLC1_RB_WPTR_POLL_ADDR_HI 0x01ba
+#define mmSDMA6_RLC1_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1
+#define mmSDMA6_RLC1_RB_WPTR_POLL_ADDR_LO 0x01bb
+#define mmSDMA6_RLC1_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1
+#define mmSDMA6_RLC1_RB_AQL_CNTL 0x01bc
+#define mmSDMA6_RLC1_RB_AQL_CNTL_BASE_IDX 1
+#define mmSDMA6_RLC1_MINOR_PTR_UPDATE 0x01bd
+#define mmSDMA6_RLC1_MINOR_PTR_UPDATE_BASE_IDX 1
+#define mmSDMA6_RLC1_MIDCMD_DATA0 0x01c8
+#define mmSDMA6_RLC1_MIDCMD_DATA0_BASE_IDX 1
+#define mmSDMA6_RLC1_MIDCMD_DATA1 0x01c9
+#define mmSDMA6_RLC1_MIDCMD_DATA1_BASE_IDX 1
+#define mmSDMA6_RLC1_MIDCMD_DATA2 0x01ca
+#define mmSDMA6_RLC1_MIDCMD_DATA2_BASE_IDX 1
+#define mmSDMA6_RLC1_MIDCMD_DATA3 0x01cb
+#define mmSDMA6_RLC1_MIDCMD_DATA3_BASE_IDX 1
+#define mmSDMA6_RLC1_MIDCMD_DATA4 0x01cc
+#define mmSDMA6_RLC1_MIDCMD_DATA4_BASE_IDX 1
+#define mmSDMA6_RLC1_MIDCMD_DATA5 0x01cd
+#define mmSDMA6_RLC1_MIDCMD_DATA5_BASE_IDX 1
+#define mmSDMA6_RLC1_MIDCMD_DATA6 0x01ce
+#define mmSDMA6_RLC1_MIDCMD_DATA6_BASE_IDX 1
+#define mmSDMA6_RLC1_MIDCMD_DATA7 0x01cf
+#define mmSDMA6_RLC1_MIDCMD_DATA7_BASE_IDX 1
+#define mmSDMA6_RLC1_MIDCMD_DATA8 0x01d0
+#define mmSDMA6_RLC1_MIDCMD_DATA8_BASE_IDX 1
+#define mmSDMA6_RLC1_MIDCMD_CNTL 0x01d1
+#define mmSDMA6_RLC1_MIDCMD_CNTL_BASE_IDX 1
+#define mmSDMA6_RLC2_RB_CNTL 0x01e0
+#define mmSDMA6_RLC2_RB_CNTL_BASE_IDX 1
+#define mmSDMA6_RLC2_RB_BASE 0x01e1
+#define mmSDMA6_RLC2_RB_BASE_BASE_IDX 1
+#define mmSDMA6_RLC2_RB_BASE_HI 0x01e2
+#define mmSDMA6_RLC2_RB_BASE_HI_BASE_IDX 1
+#define mmSDMA6_RLC2_RB_RPTR 0x01e3
+#define mmSDMA6_RLC2_RB_RPTR_BASE_IDX 1
+#define mmSDMA6_RLC2_RB_RPTR_HI 0x01e4
+#define mmSDMA6_RLC2_RB_RPTR_HI_BASE_IDX 1
+#define mmSDMA6_RLC2_RB_WPTR 0x01e5
+#define mmSDMA6_RLC2_RB_WPTR_BASE_IDX 1
+#define mmSDMA6_RLC2_RB_WPTR_HI 0x01e6
+#define mmSDMA6_RLC2_RB_WPTR_HI_BASE_IDX 1
+#define mmSDMA6_RLC2_RB_WPTR_POLL_CNTL 0x01e7
+#define mmSDMA6_RLC2_RB_WPTR_POLL_CNTL_BASE_IDX 1
+#define mmSDMA6_RLC2_RB_RPTR_ADDR_HI 0x01e8
+#define mmSDMA6_RLC2_RB_RPTR_ADDR_HI_BASE_IDX 1
+#define mmSDMA6_RLC2_RB_RPTR_ADDR_LO 0x01e9
+#define mmSDMA6_RLC2_RB_RPTR_ADDR_LO_BASE_IDX 1
+#define mmSDMA6_RLC2_IB_CNTL 0x01ea
+#define mmSDMA6_RLC2_IB_CNTL_BASE_IDX 1
+#define mmSDMA6_RLC2_IB_RPTR 0x01eb
+#define mmSDMA6_RLC2_IB_RPTR_BASE_IDX 1
+#define mmSDMA6_RLC2_IB_OFFSET 0x01ec
+#define mmSDMA6_RLC2_IB_OFFSET_BASE_IDX 1
+#define mmSDMA6_RLC2_IB_BASE_LO 0x01ed
+#define mmSDMA6_RLC2_IB_BASE_LO_BASE_IDX 1
+#define mmSDMA6_RLC2_IB_BASE_HI 0x01ee
+#define mmSDMA6_RLC2_IB_BASE_HI_BASE_IDX 1
+#define mmSDMA6_RLC2_IB_SIZE 0x01ef
+#define mmSDMA6_RLC2_IB_SIZE_BASE_IDX 1
+#define mmSDMA6_RLC2_SKIP_CNTL 0x01f0
+#define mmSDMA6_RLC2_SKIP_CNTL_BASE_IDX 1
+#define mmSDMA6_RLC2_CONTEXT_STATUS 0x01f1
+#define mmSDMA6_RLC2_CONTEXT_STATUS_BASE_IDX 1
+#define mmSDMA6_RLC2_DOORBELL 0x01f2
+#define mmSDMA6_RLC2_DOORBELL_BASE_IDX 1
+#define mmSDMA6_RLC2_STATUS 0x0208
+#define mmSDMA6_RLC2_STATUS_BASE_IDX 1
+#define mmSDMA6_RLC2_DOORBELL_LOG 0x0209
+#define mmSDMA6_RLC2_DOORBELL_LOG_BASE_IDX 1
+#define mmSDMA6_RLC2_WATERMARK 0x020a
+#define mmSDMA6_RLC2_WATERMARK_BASE_IDX 1
+#define mmSDMA6_RLC2_DOORBELL_OFFSET 0x020b
+#define mmSDMA6_RLC2_DOORBELL_OFFSET_BASE_IDX 1
+#define mmSDMA6_RLC2_CSA_ADDR_LO 0x020c
+#define mmSDMA6_RLC2_CSA_ADDR_LO_BASE_IDX 1
+#define mmSDMA6_RLC2_CSA_ADDR_HI 0x020d
+#define mmSDMA6_RLC2_CSA_ADDR_HI_BASE_IDX 1
+#define mmSDMA6_RLC2_IB_SUB_REMAIN 0x020f
+#define mmSDMA6_RLC2_IB_SUB_REMAIN_BASE_IDX 1
+#define mmSDMA6_RLC2_PREEMPT 0x0210
+#define mmSDMA6_RLC2_PREEMPT_BASE_IDX 1
+#define mmSDMA6_RLC2_DUMMY_REG 0x0211
+#define mmSDMA6_RLC2_DUMMY_REG_BASE_IDX 1
+#define mmSDMA6_RLC2_RB_WPTR_POLL_ADDR_HI 0x0212
+#define mmSDMA6_RLC2_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1
+#define mmSDMA6_RLC2_RB_WPTR_POLL_ADDR_LO 0x0213
+#define mmSDMA6_RLC2_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1
+#define mmSDMA6_RLC2_RB_AQL_CNTL 0x0214
+#define mmSDMA6_RLC2_RB_AQL_CNTL_BASE_IDX 1
+#define mmSDMA6_RLC2_MINOR_PTR_UPDATE 0x0215
+#define mmSDMA6_RLC2_MINOR_PTR_UPDATE_BASE_IDX 1
+#define mmSDMA6_RLC2_MIDCMD_DATA0 0x0220
+#define mmSDMA6_RLC2_MIDCMD_DATA0_BASE_IDX 1
+#define mmSDMA6_RLC2_MIDCMD_DATA1 0x0221
+#define mmSDMA6_RLC2_MIDCMD_DATA1_BASE_IDX 1
+#define mmSDMA6_RLC2_MIDCMD_DATA2 0x0222
+#define mmSDMA6_RLC2_MIDCMD_DATA2_BASE_IDX 1
+#define mmSDMA6_RLC2_MIDCMD_DATA3 0x0223
+#define mmSDMA6_RLC2_MIDCMD_DATA3_BASE_IDX 1
+#define mmSDMA6_RLC2_MIDCMD_DATA4 0x0224
+#define mmSDMA6_RLC2_MIDCMD_DATA4_BASE_IDX 1
+#define mmSDMA6_RLC2_MIDCMD_DATA5 0x0225
+#define mmSDMA6_RLC2_MIDCMD_DATA5_BASE_IDX 1
+#define mmSDMA6_RLC2_MIDCMD_DATA6 0x0226
+#define mmSDMA6_RLC2_MIDCMD_DATA6_BASE_IDX 1
+#define mmSDMA6_RLC2_MIDCMD_DATA7 0x0227
+#define mmSDMA6_RLC2_MIDCMD_DATA7_BASE_IDX 1
+#define mmSDMA6_RLC2_MIDCMD_DATA8 0x0228
+#define mmSDMA6_RLC2_MIDCMD_DATA8_BASE_IDX 1
+#define mmSDMA6_RLC2_MIDCMD_CNTL 0x0229
+#define mmSDMA6_RLC2_MIDCMD_CNTL_BASE_IDX 1
+#define mmSDMA6_RLC3_RB_CNTL 0x0238
+#define mmSDMA6_RLC3_RB_CNTL_BASE_IDX 1
+#define mmSDMA6_RLC3_RB_BASE 0x0239
+#define mmSDMA6_RLC3_RB_BASE_BASE_IDX 1
+#define mmSDMA6_RLC3_RB_BASE_HI 0x023a
+#define mmSDMA6_RLC3_RB_BASE_HI_BASE_IDX 1
+#define mmSDMA6_RLC3_RB_RPTR 0x023b
+#define mmSDMA6_RLC3_RB_RPTR_BASE_IDX 1
+#define mmSDMA6_RLC3_RB_RPTR_HI 0x023c
+#define mmSDMA6_RLC3_RB_RPTR_HI_BASE_IDX 1
+#define mmSDMA6_RLC3_RB_WPTR 0x023d
+#define mmSDMA6_RLC3_RB_WPTR_BASE_IDX 1
+#define mmSDMA6_RLC3_RB_WPTR_HI 0x023e
+#define mmSDMA6_RLC3_RB_WPTR_HI_BASE_IDX 1
+#define mmSDMA6_RLC3_RB_WPTR_POLL_CNTL 0x023f
+#define mmSDMA6_RLC3_RB_WPTR_POLL_CNTL_BASE_IDX 1
+#define mmSDMA6_RLC3_RB_RPTR_ADDR_HI 0x0240
+#define mmSDMA6_RLC3_RB_RPTR_ADDR_HI_BASE_IDX 1
+#define mmSDMA6_RLC3_RB_RPTR_ADDR_LO 0x0241
+#define mmSDMA6_RLC3_RB_RPTR_ADDR_LO_BASE_IDX 1
+#define mmSDMA6_RLC3_IB_CNTL 0x0242
+#define mmSDMA6_RLC3_IB_CNTL_BASE_IDX 1
+#define mmSDMA6_RLC3_IB_RPTR 0x0243
+#define mmSDMA6_RLC3_IB_RPTR_BASE_IDX 1
+#define mmSDMA6_RLC3_IB_OFFSET 0x0244
+#define mmSDMA6_RLC3_IB_OFFSET_BASE_IDX 1
+#define mmSDMA6_RLC3_IB_BASE_LO 0x0245
+#define mmSDMA6_RLC3_IB_BASE_LO_BASE_IDX 1
+#define mmSDMA6_RLC3_IB_BASE_HI 0x0246
+#define mmSDMA6_RLC3_IB_BASE_HI_BASE_IDX 1
+#define mmSDMA6_RLC3_IB_SIZE 0x0247
+#define mmSDMA6_RLC3_IB_SIZE_BASE_IDX 1
+#define mmSDMA6_RLC3_SKIP_CNTL 0x0248
+#define mmSDMA6_RLC3_SKIP_CNTL_BASE_IDX 1
+#define mmSDMA6_RLC3_CONTEXT_STATUS 0x0249
+#define mmSDMA6_RLC3_CONTEXT_STATUS_BASE_IDX 1
+#define mmSDMA6_RLC3_DOORBELL 0x024a
+#define mmSDMA6_RLC3_DOORBELL_BASE_IDX 1
+#define mmSDMA6_RLC3_STATUS 0x0260
+#define mmSDMA6_RLC3_STATUS_BASE_IDX 1
+#define mmSDMA6_RLC3_DOORBELL_LOG 0x0261
+#define mmSDMA6_RLC3_DOORBELL_LOG_BASE_IDX 1
+#define mmSDMA6_RLC3_WATERMARK 0x0262
+#define mmSDMA6_RLC3_WATERMARK_BASE_IDX 1
+#define mmSDMA6_RLC3_DOORBELL_OFFSET 0x0263
+#define mmSDMA6_RLC3_DOORBELL_OFFSET_BASE_IDX 1
+#define mmSDMA6_RLC3_CSA_ADDR_LO 0x0264
+#define mmSDMA6_RLC3_CSA_ADDR_LO_BASE_IDX 1
+#define mmSDMA6_RLC3_CSA_ADDR_HI 0x0265
+#define mmSDMA6_RLC3_CSA_ADDR_HI_BASE_IDX 1
+#define mmSDMA6_RLC3_IB_SUB_REMAIN 0x0267
+#define mmSDMA6_RLC3_IB_SUB_REMAIN_BASE_IDX 1
+#define mmSDMA6_RLC3_PREEMPT 0x0268
+#define mmSDMA6_RLC3_PREEMPT_BASE_IDX 1
+#define mmSDMA6_RLC3_DUMMY_REG 0x0269
+#define mmSDMA6_RLC3_DUMMY_REG_BASE_IDX 1
+#define mmSDMA6_RLC3_RB_WPTR_POLL_ADDR_HI 0x026a
+#define mmSDMA6_RLC3_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1
+#define mmSDMA6_RLC3_RB_WPTR_POLL_ADDR_LO 0x026b
+#define mmSDMA6_RLC3_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1
+#define mmSDMA6_RLC3_RB_AQL_CNTL 0x026c
+#define mmSDMA6_RLC3_RB_AQL_CNTL_BASE_IDX 1
+#define mmSDMA6_RLC3_MINOR_PTR_UPDATE 0x026d
+#define mmSDMA6_RLC3_MINOR_PTR_UPDATE_BASE_IDX 1
+#define mmSDMA6_RLC3_MIDCMD_DATA0 0x0278
+#define mmSDMA6_RLC3_MIDCMD_DATA0_BASE_IDX 1
+#define mmSDMA6_RLC3_MIDCMD_DATA1 0x0279
+#define mmSDMA6_RLC3_MIDCMD_DATA1_BASE_IDX 1
+#define mmSDMA6_RLC3_MIDCMD_DATA2 0x027a
+#define mmSDMA6_RLC3_MIDCMD_DATA2_BASE_IDX 1
+#define mmSDMA6_RLC3_MIDCMD_DATA3 0x027b
+#define mmSDMA6_RLC3_MIDCMD_DATA3_BASE_IDX 1
+#define mmSDMA6_RLC3_MIDCMD_DATA4 0x027c
+#define mmSDMA6_RLC3_MIDCMD_DATA4_BASE_IDX 1
+#define mmSDMA6_RLC3_MIDCMD_DATA5 0x027d
+#define mmSDMA6_RLC3_MIDCMD_DATA5_BASE_IDX 1
+#define mmSDMA6_RLC3_MIDCMD_DATA6 0x027e
+#define mmSDMA6_RLC3_MIDCMD_DATA6_BASE_IDX 1
+#define mmSDMA6_RLC3_MIDCMD_DATA7 0x027f
+#define mmSDMA6_RLC3_MIDCMD_DATA7_BASE_IDX 1
+#define mmSDMA6_RLC3_MIDCMD_DATA8 0x0280
+#define mmSDMA6_RLC3_MIDCMD_DATA8_BASE_IDX 1
+#define mmSDMA6_RLC3_MIDCMD_CNTL 0x0281
+#define mmSDMA6_RLC3_MIDCMD_CNTL_BASE_IDX 1
+#define mmSDMA6_RLC4_RB_CNTL 0x0290
+#define mmSDMA6_RLC4_RB_CNTL_BASE_IDX 1
+#define mmSDMA6_RLC4_RB_BASE 0x0291
+#define mmSDMA6_RLC4_RB_BASE_BASE_IDX 1
+#define mmSDMA6_RLC4_RB_BASE_HI 0x0292
+#define mmSDMA6_RLC4_RB_BASE_HI_BASE_IDX 1
+#define mmSDMA6_RLC4_RB_RPTR 0x0293
+#define mmSDMA6_RLC4_RB_RPTR_BASE_IDX 1
+#define mmSDMA6_RLC4_RB_RPTR_HI 0x0294
+#define mmSDMA6_RLC4_RB_RPTR_HI_BASE_IDX 1
+#define mmSDMA6_RLC4_RB_WPTR 0x0295
+#define mmSDMA6_RLC4_RB_WPTR_BASE_IDX 1
+#define mmSDMA6_RLC4_RB_WPTR_HI 0x0296
+#define mmSDMA6_RLC4_RB_WPTR_HI_BASE_IDX 1
+#define mmSDMA6_RLC4_RB_WPTR_POLL_CNTL 0x0297
+#define mmSDMA6_RLC4_RB_WPTR_POLL_CNTL_BASE_IDX 1
+#define mmSDMA6_RLC4_RB_RPTR_ADDR_HI 0x0298
+#define mmSDMA6_RLC4_RB_RPTR_ADDR_HI_BASE_IDX 1
+#define mmSDMA6_RLC4_RB_RPTR_ADDR_LO 0x0299
+#define mmSDMA6_RLC4_RB_RPTR_ADDR_LO_BASE_IDX 1
+#define mmSDMA6_RLC4_IB_CNTL 0x029a
+#define mmSDMA6_RLC4_IB_CNTL_BASE_IDX 1
+#define mmSDMA6_RLC4_IB_RPTR 0x029b
+#define mmSDMA6_RLC4_IB_RPTR_BASE_IDX 1
+#define mmSDMA6_RLC4_IB_OFFSET 0x029c
+#define mmSDMA6_RLC4_IB_OFFSET_BASE_IDX 1
+#define mmSDMA6_RLC4_IB_BASE_LO 0x029d
+#define mmSDMA6_RLC4_IB_BASE_LO_BASE_IDX 1
+#define mmSDMA6_RLC4_IB_BASE_HI 0x029e
+#define mmSDMA6_RLC4_IB_BASE_HI_BASE_IDX 1
+#define mmSDMA6_RLC4_IB_SIZE 0x029f
+#define mmSDMA6_RLC4_IB_SIZE_BASE_IDX 1
+#define mmSDMA6_RLC4_SKIP_CNTL 0x02a0
+#define mmSDMA6_RLC4_SKIP_CNTL_BASE_IDX 1
+#define mmSDMA6_RLC4_CONTEXT_STATUS 0x02a1
+#define mmSDMA6_RLC4_CONTEXT_STATUS_BASE_IDX 1
+#define mmSDMA6_RLC4_DOORBELL 0x02a2
+#define mmSDMA6_RLC4_DOORBELL_BASE_IDX 1
+#define mmSDMA6_RLC4_STATUS 0x02b8
+#define mmSDMA6_RLC4_STATUS_BASE_IDX 1
+#define mmSDMA6_RLC4_DOORBELL_LOG 0x02b9
+#define mmSDMA6_RLC4_DOORBELL_LOG_BASE_IDX 1
+#define mmSDMA6_RLC4_WATERMARK 0x02ba
+#define mmSDMA6_RLC4_WATERMARK_BASE_IDX 1
+#define mmSDMA6_RLC4_DOORBELL_OFFSET 0x02bb
+#define mmSDMA6_RLC4_DOORBELL_OFFSET_BASE_IDX 1
+#define mmSDMA6_RLC4_CSA_ADDR_LO 0x02bc
+#define mmSDMA6_RLC4_CSA_ADDR_LO_BASE_IDX 1
+#define mmSDMA6_RLC4_CSA_ADDR_HI 0x02bd
+#define mmSDMA6_RLC4_CSA_ADDR_HI_BASE_IDX 1
+#define mmSDMA6_RLC4_IB_SUB_REMAIN 0x02bf
+#define mmSDMA6_RLC4_IB_SUB_REMAIN_BASE_IDX 1
+#define mmSDMA6_RLC4_PREEMPT 0x02c0
+#define mmSDMA6_RLC4_PREEMPT_BASE_IDX 1
+#define mmSDMA6_RLC4_DUMMY_REG 0x02c1
+#define mmSDMA6_RLC4_DUMMY_REG_BASE_IDX 1
+#define mmSDMA6_RLC4_RB_WPTR_POLL_ADDR_HI 0x02c2
+#define mmSDMA6_RLC4_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1
+#define mmSDMA6_RLC4_RB_WPTR_POLL_ADDR_LO 0x02c3
+#define mmSDMA6_RLC4_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1
+#define mmSDMA6_RLC4_RB_AQL_CNTL 0x02c4
+#define mmSDMA6_RLC4_RB_AQL_CNTL_BASE_IDX 1
+#define mmSDMA6_RLC4_MINOR_PTR_UPDATE 0x02c5
+#define mmSDMA6_RLC4_MINOR_PTR_UPDATE_BASE_IDX 1
+#define mmSDMA6_RLC4_MIDCMD_DATA0 0x02d0
+#define mmSDMA6_RLC4_MIDCMD_DATA0_BASE_IDX 1
+#define mmSDMA6_RLC4_MIDCMD_DATA1 0x02d1
+#define mmSDMA6_RLC4_MIDCMD_DATA1_BASE_IDX 1
+#define mmSDMA6_RLC4_MIDCMD_DATA2 0x02d2
+#define mmSDMA6_RLC4_MIDCMD_DATA2_BASE_IDX 1
+#define mmSDMA6_RLC4_MIDCMD_DATA3 0x02d3
+#define mmSDMA6_RLC4_MIDCMD_DATA3_BASE_IDX 1
+#define mmSDMA6_RLC4_MIDCMD_DATA4 0x02d4
+#define mmSDMA6_RLC4_MIDCMD_DATA4_BASE_IDX 1
+#define mmSDMA6_RLC4_MIDCMD_DATA5 0x02d5
+#define mmSDMA6_RLC4_MIDCMD_DATA5_BASE_IDX 1
+#define mmSDMA6_RLC4_MIDCMD_DATA6 0x02d6
+#define mmSDMA6_RLC4_MIDCMD_DATA6_BASE_IDX 1
+#define mmSDMA6_RLC4_MIDCMD_DATA7 0x02d7
+#define mmSDMA6_RLC4_MIDCMD_DATA7_BASE_IDX 1
+#define mmSDMA6_RLC4_MIDCMD_DATA8 0x02d8
+#define mmSDMA6_RLC4_MIDCMD_DATA8_BASE_IDX 1
+#define mmSDMA6_RLC4_MIDCMD_CNTL 0x02d9
+#define mmSDMA6_RLC4_MIDCMD_CNTL_BASE_IDX 1
+#define mmSDMA6_RLC5_RB_CNTL 0x02e8
+#define mmSDMA6_RLC5_RB_CNTL_BASE_IDX 1
+#define mmSDMA6_RLC5_RB_BASE 0x02e9
+#define mmSDMA6_RLC5_RB_BASE_BASE_IDX 1
+#define mmSDMA6_RLC5_RB_BASE_HI 0x02ea
+#define mmSDMA6_RLC5_RB_BASE_HI_BASE_IDX 1
+#define mmSDMA6_RLC5_RB_RPTR 0x02eb
+#define mmSDMA6_RLC5_RB_RPTR_BASE_IDX 1
+#define mmSDMA6_RLC5_RB_RPTR_HI 0x02ec
+#define mmSDMA6_RLC5_RB_RPTR_HI_BASE_IDX 1
+#define mmSDMA6_RLC5_RB_WPTR 0x02ed
+#define mmSDMA6_RLC5_RB_WPTR_BASE_IDX 1
+#define mmSDMA6_RLC5_RB_WPTR_HI 0x02ee
+#define mmSDMA6_RLC5_RB_WPTR_HI_BASE_IDX 1
+#define mmSDMA6_RLC5_RB_WPTR_POLL_CNTL 0x02ef
+#define mmSDMA6_RLC5_RB_WPTR_POLL_CNTL_BASE_IDX 1
+#define mmSDMA6_RLC5_RB_RPTR_ADDR_HI 0x02f0
+#define mmSDMA6_RLC5_RB_RPTR_ADDR_HI_BASE_IDX 1
+#define mmSDMA6_RLC5_RB_RPTR_ADDR_LO 0x02f1
+#define mmSDMA6_RLC5_RB_RPTR_ADDR_LO_BASE_IDX 1
+#define mmSDMA6_RLC5_IB_CNTL 0x02f2
+#define mmSDMA6_RLC5_IB_CNTL_BASE_IDX 1
+#define mmSDMA6_RLC5_IB_RPTR 0x02f3
+#define mmSDMA6_RLC5_IB_RPTR_BASE_IDX 1
+#define mmSDMA6_RLC5_IB_OFFSET 0x02f4
+#define mmSDMA6_RLC5_IB_OFFSET_BASE_IDX 1
+#define mmSDMA6_RLC5_IB_BASE_LO 0x02f5
+#define mmSDMA6_RLC5_IB_BASE_LO_BASE_IDX 1
+#define mmSDMA6_RLC5_IB_BASE_HI 0x02f6
+#define mmSDMA6_RLC5_IB_BASE_HI_BASE_IDX 1
+#define mmSDMA6_RLC5_IB_SIZE 0x02f7
+#define mmSDMA6_RLC5_IB_SIZE_BASE_IDX 1
+#define mmSDMA6_RLC5_SKIP_CNTL 0x02f8
+#define mmSDMA6_RLC5_SKIP_CNTL_BASE_IDX 1
+#define mmSDMA6_RLC5_CONTEXT_STATUS 0x02f9
+#define mmSDMA6_RLC5_CONTEXT_STATUS_BASE_IDX 1
+#define mmSDMA6_RLC5_DOORBELL 0x02fa
+#define mmSDMA6_RLC5_DOORBELL_BASE_IDX 1
+#define mmSDMA6_RLC5_STATUS 0x0310
+#define mmSDMA6_RLC5_STATUS_BASE_IDX 1
+#define mmSDMA6_RLC5_DOORBELL_LOG 0x0311
+#define mmSDMA6_RLC5_DOORBELL_LOG_BASE_IDX 1
+#define mmSDMA6_RLC5_WATERMARK 0x0312
+#define mmSDMA6_RLC5_WATERMARK_BASE_IDX 1
+#define mmSDMA6_RLC5_DOORBELL_OFFSET 0x0313
+#define mmSDMA6_RLC5_DOORBELL_OFFSET_BASE_IDX 1
+#define mmSDMA6_RLC5_CSA_ADDR_LO 0x0314
+#define mmSDMA6_RLC5_CSA_ADDR_LO_BASE_IDX 1
+#define mmSDMA6_RLC5_CSA_ADDR_HI 0x0315
+#define mmSDMA6_RLC5_CSA_ADDR_HI_BASE_IDX 1
+#define mmSDMA6_RLC5_IB_SUB_REMAIN 0x0317
+#define mmSDMA6_RLC5_IB_SUB_REMAIN_BASE_IDX 1
+#define mmSDMA6_RLC5_PREEMPT 0x0318
+#define mmSDMA6_RLC5_PREEMPT_BASE_IDX 1
+#define mmSDMA6_RLC5_DUMMY_REG 0x0319
+#define mmSDMA6_RLC5_DUMMY_REG_BASE_IDX 1
+#define mmSDMA6_RLC5_RB_WPTR_POLL_ADDR_HI 0x031a
+#define mmSDMA6_RLC5_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1
+#define mmSDMA6_RLC5_RB_WPTR_POLL_ADDR_LO 0x031b
+#define mmSDMA6_RLC5_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1
+#define mmSDMA6_RLC5_RB_AQL_CNTL 0x031c
+#define mmSDMA6_RLC5_RB_AQL_CNTL_BASE_IDX 1
+#define mmSDMA6_RLC5_MINOR_PTR_UPDATE 0x031d
+#define mmSDMA6_RLC5_MINOR_PTR_UPDATE_BASE_IDX 1
+#define mmSDMA6_RLC5_MIDCMD_DATA0 0x0328
+#define mmSDMA6_RLC5_MIDCMD_DATA0_BASE_IDX 1
+#define mmSDMA6_RLC5_MIDCMD_DATA1 0x0329
+#define mmSDMA6_RLC5_MIDCMD_DATA1_BASE_IDX 1
+#define mmSDMA6_RLC5_MIDCMD_DATA2 0x032a
+#define mmSDMA6_RLC5_MIDCMD_DATA2_BASE_IDX 1
+#define mmSDMA6_RLC5_MIDCMD_DATA3 0x032b
+#define mmSDMA6_RLC5_MIDCMD_DATA3_BASE_IDX 1
+#define mmSDMA6_RLC5_MIDCMD_DATA4 0x032c
+#define mmSDMA6_RLC5_MIDCMD_DATA4_BASE_IDX 1
+#define mmSDMA6_RLC5_MIDCMD_DATA5 0x032d
+#define mmSDMA6_RLC5_MIDCMD_DATA5_BASE_IDX 1
+#define mmSDMA6_RLC5_MIDCMD_DATA6 0x032e
+#define mmSDMA6_RLC5_MIDCMD_DATA6_BASE_IDX 1
+#define mmSDMA6_RLC5_MIDCMD_DATA7 0x032f
+#define mmSDMA6_RLC5_MIDCMD_DATA7_BASE_IDX 1
+#define mmSDMA6_RLC5_MIDCMD_DATA8 0x0330
+#define mmSDMA6_RLC5_MIDCMD_DATA8_BASE_IDX 1
+#define mmSDMA6_RLC5_MIDCMD_CNTL 0x0331
+#define mmSDMA6_RLC5_MIDCMD_CNTL_BASE_IDX 1
+#define mmSDMA6_RLC6_RB_CNTL 0x0340
+#define mmSDMA6_RLC6_RB_CNTL_BASE_IDX 1
+#define mmSDMA6_RLC6_RB_BASE 0x0341
+#define mmSDMA6_RLC6_RB_BASE_BASE_IDX 1
+#define mmSDMA6_RLC6_RB_BASE_HI 0x0342
+#define mmSDMA6_RLC6_RB_BASE_HI_BASE_IDX 1
+#define mmSDMA6_RLC6_RB_RPTR 0x0343
+#define mmSDMA6_RLC6_RB_RPTR_BASE_IDX 1
+#define mmSDMA6_RLC6_RB_RPTR_HI 0x0344
+#define mmSDMA6_RLC6_RB_RPTR_HI_BASE_IDX 1
+#define mmSDMA6_RLC6_RB_WPTR 0x0345
+#define mmSDMA6_RLC6_RB_WPTR_BASE_IDX 1
+#define mmSDMA6_RLC6_RB_WPTR_HI 0x0346
+#define mmSDMA6_RLC6_RB_WPTR_HI_BASE_IDX 1
+#define mmSDMA6_RLC6_RB_WPTR_POLL_CNTL 0x0347
+#define mmSDMA6_RLC6_RB_WPTR_POLL_CNTL_BASE_IDX 1
+#define mmSDMA6_RLC6_RB_RPTR_ADDR_HI 0x0348
+#define mmSDMA6_RLC6_RB_RPTR_ADDR_HI_BASE_IDX 1
+#define mmSDMA6_RLC6_RB_RPTR_ADDR_LO 0x0349
+#define mmSDMA6_RLC6_RB_RPTR_ADDR_LO_BASE_IDX 1
+#define mmSDMA6_RLC6_IB_CNTL 0x034a
+#define mmSDMA6_RLC6_IB_CNTL_BASE_IDX 1
+#define mmSDMA6_RLC6_IB_RPTR 0x034b
+#define mmSDMA6_RLC6_IB_RPTR_BASE_IDX 1
+#define mmSDMA6_RLC6_IB_OFFSET 0x034c
+#define mmSDMA6_RLC6_IB_OFFSET_BASE_IDX 1
+#define mmSDMA6_RLC6_IB_BASE_LO 0x034d
+#define mmSDMA6_RLC6_IB_BASE_LO_BASE_IDX 1
+#define mmSDMA6_RLC6_IB_BASE_HI 0x034e
+#define mmSDMA6_RLC6_IB_BASE_HI_BASE_IDX 1
+#define mmSDMA6_RLC6_IB_SIZE 0x034f
+#define mmSDMA6_RLC6_IB_SIZE_BASE_IDX 1
+#define mmSDMA6_RLC6_SKIP_CNTL 0x0350
+#define mmSDMA6_RLC6_SKIP_CNTL_BASE_IDX 1
+#define mmSDMA6_RLC6_CONTEXT_STATUS 0x0351
+#define mmSDMA6_RLC6_CONTEXT_STATUS_BASE_IDX 1
+#define mmSDMA6_RLC6_DOORBELL 0x0352
+#define mmSDMA6_RLC6_DOORBELL_BASE_IDX 1
+#define mmSDMA6_RLC6_STATUS 0x0368
+#define mmSDMA6_RLC6_STATUS_BASE_IDX 1
+#define mmSDMA6_RLC6_DOORBELL_LOG 0x0369
+#define mmSDMA6_RLC6_DOORBELL_LOG_BASE_IDX 1
+#define mmSDMA6_RLC6_WATERMARK 0x036a
+#define mmSDMA6_RLC6_WATERMARK_BASE_IDX 1
+#define mmSDMA6_RLC6_DOORBELL_OFFSET 0x036b
+#define mmSDMA6_RLC6_DOORBELL_OFFSET_BASE_IDX 1
+#define mmSDMA6_RLC6_CSA_ADDR_LO 0x036c
+#define mmSDMA6_RLC6_CSA_ADDR_LO_BASE_IDX 1
+#define mmSDMA6_RLC6_CSA_ADDR_HI 0x036d
+#define mmSDMA6_RLC6_CSA_ADDR_HI_BASE_IDX 1
+#define mmSDMA6_RLC6_IB_SUB_REMAIN 0x036f
+#define mmSDMA6_RLC6_IB_SUB_REMAIN_BASE_IDX 1
+#define mmSDMA6_RLC6_PREEMPT 0x0370
+#define mmSDMA6_RLC6_PREEMPT_BASE_IDX 1
+#define mmSDMA6_RLC6_DUMMY_REG 0x0371
+#define mmSDMA6_RLC6_DUMMY_REG_BASE_IDX 1
+#define mmSDMA6_RLC6_RB_WPTR_POLL_ADDR_HI 0x0372
+#define mmSDMA6_RLC6_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1
+#define mmSDMA6_RLC6_RB_WPTR_POLL_ADDR_LO 0x0373
+#define mmSDMA6_RLC6_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1
+#define mmSDMA6_RLC6_RB_AQL_CNTL 0x0374
+#define mmSDMA6_RLC6_RB_AQL_CNTL_BASE_IDX 1
+#define mmSDMA6_RLC6_MINOR_PTR_UPDATE 0x0375
+#define mmSDMA6_RLC6_MINOR_PTR_UPDATE_BASE_IDX 1
+#define mmSDMA6_RLC6_MIDCMD_DATA0 0x0380
+#define mmSDMA6_RLC6_MIDCMD_DATA0_BASE_IDX 1
+#define mmSDMA6_RLC6_MIDCMD_DATA1 0x0381
+#define mmSDMA6_RLC6_MIDCMD_DATA1_BASE_IDX 1
+#define mmSDMA6_RLC6_MIDCMD_DATA2 0x0382
+#define mmSDMA6_RLC6_MIDCMD_DATA2_BASE_IDX 1
+#define mmSDMA6_RLC6_MIDCMD_DATA3 0x0383
+#define mmSDMA6_RLC6_MIDCMD_DATA3_BASE_IDX 1
+#define mmSDMA6_RLC6_MIDCMD_DATA4 0x0384
+#define mmSDMA6_RLC6_MIDCMD_DATA4_BASE_IDX 1
+#define mmSDMA6_RLC6_MIDCMD_DATA5 0x0385
+#define mmSDMA6_RLC6_MIDCMD_DATA5_BASE_IDX 1
+#define mmSDMA6_RLC6_MIDCMD_DATA6 0x0386
+#define mmSDMA6_RLC6_MIDCMD_DATA6_BASE_IDX 1
+#define mmSDMA6_RLC6_MIDCMD_DATA7 0x0387
+#define mmSDMA6_RLC6_MIDCMD_DATA7_BASE_IDX 1
+#define mmSDMA6_RLC6_MIDCMD_DATA8 0x0388
+#define mmSDMA6_RLC6_MIDCMD_DATA8_BASE_IDX 1
+#define mmSDMA6_RLC6_MIDCMD_CNTL 0x0389
+#define mmSDMA6_RLC6_MIDCMD_CNTL_BASE_IDX 1
+#define mmSDMA6_RLC7_RB_CNTL 0x0398
+#define mmSDMA6_RLC7_RB_CNTL_BASE_IDX 1
+#define mmSDMA6_RLC7_RB_BASE 0x0399
+#define mmSDMA6_RLC7_RB_BASE_BASE_IDX 1
+#define mmSDMA6_RLC7_RB_BASE_HI 0x039a
+#define mmSDMA6_RLC7_RB_BASE_HI_BASE_IDX 1
+#define mmSDMA6_RLC7_RB_RPTR 0x039b
+#define mmSDMA6_RLC7_RB_RPTR_BASE_IDX 1
+#define mmSDMA6_RLC7_RB_RPTR_HI 0x039c
+#define mmSDMA6_RLC7_RB_RPTR_HI_BASE_IDX 1
+#define mmSDMA6_RLC7_RB_WPTR 0x039d
+#define mmSDMA6_RLC7_RB_WPTR_BASE_IDX 1
+#define mmSDMA6_RLC7_RB_WPTR_HI 0x039e
+#define mmSDMA6_RLC7_RB_WPTR_HI_BASE_IDX 1
+#define mmSDMA6_RLC7_RB_WPTR_POLL_CNTL 0x039f
+#define mmSDMA6_RLC7_RB_WPTR_POLL_CNTL_BASE_IDX 1
+#define mmSDMA6_RLC7_RB_RPTR_ADDR_HI 0x03a0
+#define mmSDMA6_RLC7_RB_RPTR_ADDR_HI_BASE_IDX 1
+#define mmSDMA6_RLC7_RB_RPTR_ADDR_LO 0x03a1
+#define mmSDMA6_RLC7_RB_RPTR_ADDR_LO_BASE_IDX 1
+#define mmSDMA6_RLC7_IB_CNTL 0x03a2
+#define mmSDMA6_RLC7_IB_CNTL_BASE_IDX 1
+#define mmSDMA6_RLC7_IB_RPTR 0x03a3
+#define mmSDMA6_RLC7_IB_RPTR_BASE_IDX 1
+#define mmSDMA6_RLC7_IB_OFFSET 0x03a4
+#define mmSDMA6_RLC7_IB_OFFSET_BASE_IDX 1
+#define mmSDMA6_RLC7_IB_BASE_LO 0x03a5
+#define mmSDMA6_RLC7_IB_BASE_LO_BASE_IDX 1
+#define mmSDMA6_RLC7_IB_BASE_HI 0x03a6
+#define mmSDMA6_RLC7_IB_BASE_HI_BASE_IDX 1
+#define mmSDMA6_RLC7_IB_SIZE 0x03a7
+#define mmSDMA6_RLC7_IB_SIZE_BASE_IDX 1
+#define mmSDMA6_RLC7_SKIP_CNTL 0x03a8
+#define mmSDMA6_RLC7_SKIP_CNTL_BASE_IDX 1
+#define mmSDMA6_RLC7_CONTEXT_STATUS 0x03a9
+#define mmSDMA6_RLC7_CONTEXT_STATUS_BASE_IDX 1
+#define mmSDMA6_RLC7_DOORBELL 0x03aa
+#define mmSDMA6_RLC7_DOORBELL_BASE_IDX 1
+#define mmSDMA6_RLC7_STATUS 0x03c0
+#define mmSDMA6_RLC7_STATUS_BASE_IDX 1
+#define mmSDMA6_RLC7_DOORBELL_LOG 0x03c1
+#define mmSDMA6_RLC7_DOORBELL_LOG_BASE_IDX 1
+#define mmSDMA6_RLC7_WATERMARK 0x03c2
+#define mmSDMA6_RLC7_WATERMARK_BASE_IDX 1
+#define mmSDMA6_RLC7_DOORBELL_OFFSET 0x03c3
+#define mmSDMA6_RLC7_DOORBELL_OFFSET_BASE_IDX 1
+#define mmSDMA6_RLC7_CSA_ADDR_LO 0x03c4
+#define mmSDMA6_RLC7_CSA_ADDR_LO_BASE_IDX 1
+#define mmSDMA6_RLC7_CSA_ADDR_HI 0x03c5
+#define mmSDMA6_RLC7_CSA_ADDR_HI_BASE_IDX 1
+#define mmSDMA6_RLC7_IB_SUB_REMAIN 0x03c7
+#define mmSDMA6_RLC7_IB_SUB_REMAIN_BASE_IDX 1
+#define mmSDMA6_RLC7_PREEMPT 0x03c8
+#define mmSDMA6_RLC7_PREEMPT_BASE_IDX 1
+#define mmSDMA6_RLC7_DUMMY_REG 0x03c9
+#define mmSDMA6_RLC7_DUMMY_REG_BASE_IDX 1
+#define mmSDMA6_RLC7_RB_WPTR_POLL_ADDR_HI 0x03ca
+#define mmSDMA6_RLC7_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1
+#define mmSDMA6_RLC7_RB_WPTR_POLL_ADDR_LO 0x03cb
+#define mmSDMA6_RLC7_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1
+#define mmSDMA6_RLC7_RB_AQL_CNTL 0x03cc
+#define mmSDMA6_RLC7_RB_AQL_CNTL_BASE_IDX 1
+#define mmSDMA6_RLC7_MINOR_PTR_UPDATE 0x03cd
+#define mmSDMA6_RLC7_MINOR_PTR_UPDATE_BASE_IDX 1
+#define mmSDMA6_RLC7_MIDCMD_DATA0 0x03d8
+#define mmSDMA6_RLC7_MIDCMD_DATA0_BASE_IDX 1
+#define mmSDMA6_RLC7_MIDCMD_DATA1 0x03d9
+#define mmSDMA6_RLC7_MIDCMD_DATA1_BASE_IDX 1
+#define mmSDMA6_RLC7_MIDCMD_DATA2 0x03da
+#define mmSDMA6_RLC7_MIDCMD_DATA2_BASE_IDX 1
+#define mmSDMA6_RLC7_MIDCMD_DATA3 0x03db
+#define mmSDMA6_RLC7_MIDCMD_DATA3_BASE_IDX 1
+#define mmSDMA6_RLC7_MIDCMD_DATA4 0x03dc
+#define mmSDMA6_RLC7_MIDCMD_DATA4_BASE_IDX 1
+#define mmSDMA6_RLC7_MIDCMD_DATA5 0x03dd
+#define mmSDMA6_RLC7_MIDCMD_DATA5_BASE_IDX 1
+#define mmSDMA6_RLC7_MIDCMD_DATA6 0x03de
+#define mmSDMA6_RLC7_MIDCMD_DATA6_BASE_IDX 1
+#define mmSDMA6_RLC7_MIDCMD_DATA7 0x03df
+#define mmSDMA6_RLC7_MIDCMD_DATA7_BASE_IDX 1
+#define mmSDMA6_RLC7_MIDCMD_DATA8 0x03e0
+#define mmSDMA6_RLC7_MIDCMD_DATA8_BASE_IDX 1
+#define mmSDMA6_RLC7_MIDCMD_CNTL 0x03e1
+#define mmSDMA6_RLC7_MIDCMD_CNTL_BASE_IDX 1
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/sdma6/sdma6_4_2_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/sdma6/sdma6_4_2_2_sh_mask.h
new file mode 100644
index 000000000000..55569f5d8eae
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/sdma6/sdma6_4_2_2_sh_mask.h
@@ -0,0 +1,2956 @@
+/*
+ * Copyright (C) 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _sdma6_4_2_2_SH_MASK_HEADER
+#define _sdma6_4_2_2_SH_MASK_HEADER
+
+
+// addressBlock: sdma6_sdma6dec
+//SDMA6_UCODE_ADDR
+#define SDMA6_UCODE_ADDR__VALUE__SHIFT 0x0
+#define SDMA6_UCODE_ADDR__VALUE_MASK 0x00001FFFL
+//SDMA6_UCODE_DATA
+#define SDMA6_UCODE_DATA__VALUE__SHIFT 0x0
+#define SDMA6_UCODE_DATA__VALUE_MASK 0xFFFFFFFFL
+//SDMA6_VM_CNTL
+#define SDMA6_VM_CNTL__CMD__SHIFT 0x0
+#define SDMA6_VM_CNTL__CMD_MASK 0x0000000FL
+//SDMA6_VM_CTX_LO
+#define SDMA6_VM_CTX_LO__ADDR__SHIFT 0x2
+#define SDMA6_VM_CTX_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA6_VM_CTX_HI
+#define SDMA6_VM_CTX_HI__ADDR__SHIFT 0x0
+#define SDMA6_VM_CTX_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA6_ACTIVE_FCN_ID
+#define SDMA6_ACTIVE_FCN_ID__VFID__SHIFT 0x0
+#define SDMA6_ACTIVE_FCN_ID__RESERVED__SHIFT 0x4
+#define SDMA6_ACTIVE_FCN_ID__VF__SHIFT 0x1f
+#define SDMA6_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL
+#define SDMA6_ACTIVE_FCN_ID__RESERVED_MASK 0x7FFFFFF0L
+#define SDMA6_ACTIVE_FCN_ID__VF_MASK 0x80000000L
+//SDMA6_VM_CTX_CNTL
+#define SDMA6_VM_CTX_CNTL__PRIV__SHIFT 0x0
+#define SDMA6_VM_CTX_CNTL__VMID__SHIFT 0x4
+#define SDMA6_VM_CTX_CNTL__PRIV_MASK 0x00000001L
+#define SDMA6_VM_CTX_CNTL__VMID_MASK 0x000000F0L
+//SDMA6_VIRT_RESET_REQ
+#define SDMA6_VIRT_RESET_REQ__VF__SHIFT 0x0
+#define SDMA6_VIRT_RESET_REQ__PF__SHIFT 0x1f
+#define SDMA6_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL
+#define SDMA6_VIRT_RESET_REQ__PF_MASK 0x80000000L
+//SDMA6_VF_ENABLE
+#define SDMA6_VF_ENABLE__VF_ENABLE__SHIFT 0x0
+#define SDMA6_VF_ENABLE__VF_ENABLE_MASK 0x00000001L
+//SDMA6_CONTEXT_REG_TYPE0
+#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_RB_CNTL__SHIFT 0x0
+#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_RB_BASE__SHIFT 0x1
+#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_RB_BASE_HI__SHIFT 0x2
+#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_RB_RPTR__SHIFT 0x3
+#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_RB_RPTR_HI__SHIFT 0x4
+#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_RB_WPTR__SHIFT 0x5
+#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_RB_WPTR_HI__SHIFT 0x6
+#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_RB_WPTR_POLL_CNTL__SHIFT 0x7
+#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_RB_RPTR_ADDR_HI__SHIFT 0x8
+#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_RB_RPTR_ADDR_LO__SHIFT 0x9
+#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_IB_CNTL__SHIFT 0xa
+#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_IB_RPTR__SHIFT 0xb
+#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_IB_OFFSET__SHIFT 0xc
+#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_IB_BASE_LO__SHIFT 0xd
+#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_IB_BASE_HI__SHIFT 0xe
+#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_IB_SIZE__SHIFT 0xf
+#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_SKIP_CNTL__SHIFT 0x10
+#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_CONTEXT_STATUS__SHIFT 0x11
+#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_DOORBELL__SHIFT 0x12
+#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_CONTEXT_CNTL__SHIFT 0x13
+#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_RB_CNTL_MASK 0x00000001L
+#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_RB_BASE_MASK 0x00000002L
+#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_RB_BASE_HI_MASK 0x00000004L
+#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_RB_RPTR_MASK 0x00000008L
+#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_RB_RPTR_HI_MASK 0x00000010L
+#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_RB_WPTR_MASK 0x00000020L
+#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_RB_WPTR_HI_MASK 0x00000040L
+#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_RB_WPTR_POLL_CNTL_MASK 0x00000080L
+#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_RB_RPTR_ADDR_HI_MASK 0x00000100L
+#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_RB_RPTR_ADDR_LO_MASK 0x00000200L
+#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_IB_CNTL_MASK 0x00000400L
+#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_IB_RPTR_MASK 0x00000800L
+#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_IB_OFFSET_MASK 0x00001000L
+#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_IB_BASE_LO_MASK 0x00002000L
+#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_IB_BASE_HI_MASK 0x00004000L
+#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_IB_SIZE_MASK 0x00008000L
+#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_SKIP_CNTL_MASK 0x00010000L
+#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_CONTEXT_STATUS_MASK 0x00020000L
+#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_DOORBELL_MASK 0x00040000L
+#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_CONTEXT_CNTL_MASK 0x00080000L
+//SDMA6_CONTEXT_REG_TYPE1
+#define SDMA6_CONTEXT_REG_TYPE1__SDMA6_GFX_STATUS__SHIFT 0x8
+#define SDMA6_CONTEXT_REG_TYPE1__SDMA6_GFX_DOORBELL_LOG__SHIFT 0x9
+#define SDMA6_CONTEXT_REG_TYPE1__SDMA6_GFX_WATERMARK__SHIFT 0xa
+#define SDMA6_CONTEXT_REG_TYPE1__SDMA6_GFX_DOORBELL_OFFSET__SHIFT 0xb
+#define SDMA6_CONTEXT_REG_TYPE1__SDMA6_GFX_CSA_ADDR_LO__SHIFT 0xc
+#define SDMA6_CONTEXT_REG_TYPE1__SDMA6_GFX_CSA_ADDR_HI__SHIFT 0xd
+#define SDMA6_CONTEXT_REG_TYPE1__VOID_REG2__SHIFT 0xe
+#define SDMA6_CONTEXT_REG_TYPE1__SDMA6_GFX_IB_SUB_REMAIN__SHIFT 0xf
+#define SDMA6_CONTEXT_REG_TYPE1__SDMA6_GFX_PREEMPT__SHIFT 0x10
+#define SDMA6_CONTEXT_REG_TYPE1__SDMA6_GFX_DUMMY_REG__SHIFT 0x11
+#define SDMA6_CONTEXT_REG_TYPE1__SDMA6_GFX_RB_WPTR_POLL_ADDR_HI__SHIFT 0x12
+#define SDMA6_CONTEXT_REG_TYPE1__SDMA6_GFX_RB_WPTR_POLL_ADDR_LO__SHIFT 0x13
+#define SDMA6_CONTEXT_REG_TYPE1__SDMA6_GFX_RB_AQL_CNTL__SHIFT 0x14
+#define SDMA6_CONTEXT_REG_TYPE1__SDMA6_GFX_MINOR_PTR_UPDATE__SHIFT 0x15
+#define SDMA6_CONTEXT_REG_TYPE1__RESERVED__SHIFT 0x16
+#define SDMA6_CONTEXT_REG_TYPE1__SDMA6_GFX_STATUS_MASK 0x00000100L
+#define SDMA6_CONTEXT_REG_TYPE1__SDMA6_GFX_DOORBELL_LOG_MASK 0x00000200L
+#define SDMA6_CONTEXT_REG_TYPE1__SDMA6_GFX_WATERMARK_MASK 0x00000400L
+#define SDMA6_CONTEXT_REG_TYPE1__SDMA6_GFX_DOORBELL_OFFSET_MASK 0x00000800L
+#define SDMA6_CONTEXT_REG_TYPE1__SDMA6_GFX_CSA_ADDR_LO_MASK 0x00001000L
+#define SDMA6_CONTEXT_REG_TYPE1__SDMA6_GFX_CSA_ADDR_HI_MASK 0x00002000L
+#define SDMA6_CONTEXT_REG_TYPE1__VOID_REG2_MASK 0x00004000L
+#define SDMA6_CONTEXT_REG_TYPE1__SDMA6_GFX_IB_SUB_REMAIN_MASK 0x00008000L
+#define SDMA6_CONTEXT_REG_TYPE1__SDMA6_GFX_PREEMPT_MASK 0x00010000L
+#define SDMA6_CONTEXT_REG_TYPE1__SDMA6_GFX_DUMMY_REG_MASK 0x00020000L
+#define SDMA6_CONTEXT_REG_TYPE1__SDMA6_GFX_RB_WPTR_POLL_ADDR_HI_MASK 0x00040000L
+#define SDMA6_CONTEXT_REG_TYPE1__SDMA6_GFX_RB_WPTR_POLL_ADDR_LO_MASK 0x00080000L
+#define SDMA6_CONTEXT_REG_TYPE1__SDMA6_GFX_RB_AQL_CNTL_MASK 0x00100000L
+#define SDMA6_CONTEXT_REG_TYPE1__SDMA6_GFX_MINOR_PTR_UPDATE_MASK 0x00200000L
+#define SDMA6_CONTEXT_REG_TYPE1__RESERVED_MASK 0xFFC00000L
+//SDMA6_CONTEXT_REG_TYPE2
+#define SDMA6_CONTEXT_REG_TYPE2__SDMA6_GFX_MIDCMD_DATA0__SHIFT 0x0
+#define SDMA6_CONTEXT_REG_TYPE2__SDMA6_GFX_MIDCMD_DATA1__SHIFT 0x1
+#define SDMA6_CONTEXT_REG_TYPE2__SDMA6_GFX_MIDCMD_DATA2__SHIFT 0x2
+#define SDMA6_CONTEXT_REG_TYPE2__SDMA6_GFX_MIDCMD_DATA3__SHIFT 0x3
+#define SDMA6_CONTEXT_REG_TYPE2__SDMA6_GFX_MIDCMD_DATA4__SHIFT 0x4
+#define SDMA6_CONTEXT_REG_TYPE2__SDMA6_GFX_MIDCMD_DATA5__SHIFT 0x5
+#define SDMA6_CONTEXT_REG_TYPE2__SDMA6_GFX_MIDCMD_DATA6__SHIFT 0x6
+#define SDMA6_CONTEXT_REG_TYPE2__SDMA6_GFX_MIDCMD_DATA7__SHIFT 0x7
+#define SDMA6_CONTEXT_REG_TYPE2__SDMA6_GFX_MIDCMD_DATA8__SHIFT 0x8
+#define SDMA6_CONTEXT_REG_TYPE2__SDMA6_GFX_MIDCMD_CNTL__SHIFT 0x9
+#define SDMA6_CONTEXT_REG_TYPE2__RESERVED__SHIFT 0xa
+#define SDMA6_CONTEXT_REG_TYPE2__SDMA6_GFX_MIDCMD_DATA0_MASK 0x00000001L
+#define SDMA6_CONTEXT_REG_TYPE2__SDMA6_GFX_MIDCMD_DATA1_MASK 0x00000002L
+#define SDMA6_CONTEXT_REG_TYPE2__SDMA6_GFX_MIDCMD_DATA2_MASK 0x00000004L
+#define SDMA6_CONTEXT_REG_TYPE2__SDMA6_GFX_MIDCMD_DATA3_MASK 0x00000008L
+#define SDMA6_CONTEXT_REG_TYPE2__SDMA6_GFX_MIDCMD_DATA4_MASK 0x00000010L
+#define SDMA6_CONTEXT_REG_TYPE2__SDMA6_GFX_MIDCMD_DATA5_MASK 0x00000020L
+#define SDMA6_CONTEXT_REG_TYPE2__SDMA6_GFX_MIDCMD_DATA6_MASK 0x00000040L
+#define SDMA6_CONTEXT_REG_TYPE2__SDMA6_GFX_MIDCMD_DATA7_MASK 0x00000080L
+#define SDMA6_CONTEXT_REG_TYPE2__SDMA6_GFX_MIDCMD_DATA8_MASK 0x00000100L
+#define SDMA6_CONTEXT_REG_TYPE2__SDMA6_GFX_MIDCMD_CNTL_MASK 0x00000200L
+#define SDMA6_CONTEXT_REG_TYPE2__RESERVED_MASK 0xFFFFFC00L
+//SDMA6_CONTEXT_REG_TYPE3
+#define SDMA6_CONTEXT_REG_TYPE3__RESERVED__SHIFT 0x0
+#define SDMA6_CONTEXT_REG_TYPE3__RESERVED_MASK 0xFFFFFFFFL
+//SDMA6_PUB_REG_TYPE0
+#define SDMA6_PUB_REG_TYPE0__SDMA6_UCODE_ADDR__SHIFT 0x0
+#define SDMA6_PUB_REG_TYPE0__SDMA6_UCODE_DATA__SHIFT 0x1
+#define SDMA6_PUB_REG_TYPE0__RESERVED3__SHIFT 0x3
+#define SDMA6_PUB_REG_TYPE0__SDMA6_VM_CNTL__SHIFT 0x4
+#define SDMA6_PUB_REG_TYPE0__SDMA6_VM_CTX_LO__SHIFT 0x5
+#define SDMA6_PUB_REG_TYPE0__SDMA6_VM_CTX_HI__SHIFT 0x6
+#define SDMA6_PUB_REG_TYPE0__SDMA6_ACTIVE_FCN_ID__SHIFT 0x7
+#define SDMA6_PUB_REG_TYPE0__SDMA6_VM_CTX_CNTL__SHIFT 0x8
+#define SDMA6_PUB_REG_TYPE0__SDMA6_VIRT_RESET_REQ__SHIFT 0x9
+#define SDMA6_PUB_REG_TYPE0__RESERVED10__SHIFT 0xa
+#define SDMA6_PUB_REG_TYPE0__SDMA6_CONTEXT_REG_TYPE0__SHIFT 0xb
+#define SDMA6_PUB_REG_TYPE0__SDMA6_CONTEXT_REG_TYPE1__SHIFT 0xc
+#define SDMA6_PUB_REG_TYPE0__SDMA6_CONTEXT_REG_TYPE2__SHIFT 0xd
+#define SDMA6_PUB_REG_TYPE0__SDMA6_CONTEXT_REG_TYPE3__SHIFT 0xe
+#define SDMA6_PUB_REG_TYPE0__SDMA6_PUB_REG_TYPE0__SHIFT 0xf
+#define SDMA6_PUB_REG_TYPE0__SDMA6_PUB_REG_TYPE1__SHIFT 0x10
+#define SDMA6_PUB_REG_TYPE0__SDMA6_PUB_REG_TYPE2__SHIFT 0x11
+#define SDMA6_PUB_REG_TYPE0__SDMA6_PUB_REG_TYPE3__SHIFT 0x12
+#define SDMA6_PUB_REG_TYPE0__SDMA6_MMHUB_CNTL__SHIFT 0x13
+#define SDMA6_PUB_REG_TYPE0__RESERVED_FOR_PSPSMU_ACCESS_ONLY__SHIFT 0x15
+#define SDMA6_PUB_REG_TYPE0__SDMA6_CONTEXT_GROUP_BOUNDARY__SHIFT 0x19
+#define SDMA6_PUB_REG_TYPE0__SDMA6_POWER_CNTL__SHIFT 0x1a
+#define SDMA6_PUB_REG_TYPE0__SDMA6_CLK_CTRL__SHIFT 0x1b
+#define SDMA6_PUB_REG_TYPE0__SDMA6_CNTL__SHIFT 0x1c
+#define SDMA6_PUB_REG_TYPE0__SDMA6_CHICKEN_BITS__SHIFT 0x1d
+#define SDMA6_PUB_REG_TYPE0__SDMA6_GB_ADDR_CONFIG__SHIFT 0x1e
+#define SDMA6_PUB_REG_TYPE0__SDMA6_GB_ADDR_CONFIG_READ__SHIFT 0x1f
+#define SDMA6_PUB_REG_TYPE0__SDMA6_UCODE_ADDR_MASK 0x00000001L
+#define SDMA6_PUB_REG_TYPE0__SDMA6_UCODE_DATA_MASK 0x00000002L
+#define SDMA6_PUB_REG_TYPE0__RESERVED3_MASK 0x00000008L
+#define SDMA6_PUB_REG_TYPE0__SDMA6_VM_CNTL_MASK 0x00000010L
+#define SDMA6_PUB_REG_TYPE0__SDMA6_VM_CTX_LO_MASK 0x00000020L
+#define SDMA6_PUB_REG_TYPE0__SDMA6_VM_CTX_HI_MASK 0x00000040L
+#define SDMA6_PUB_REG_TYPE0__SDMA6_ACTIVE_FCN_ID_MASK 0x00000080L
+#define SDMA6_PUB_REG_TYPE0__SDMA6_VM_CTX_CNTL_MASK 0x00000100L
+#define SDMA6_PUB_REG_TYPE0__SDMA6_VIRT_RESET_REQ_MASK 0x00000200L
+#define SDMA6_PUB_REG_TYPE0__RESERVED10_MASK 0x00000400L
+#define SDMA6_PUB_REG_TYPE0__SDMA6_CONTEXT_REG_TYPE0_MASK 0x00000800L
+#define SDMA6_PUB_REG_TYPE0__SDMA6_CONTEXT_REG_TYPE1_MASK 0x00001000L
+#define SDMA6_PUB_REG_TYPE0__SDMA6_CONTEXT_REG_TYPE2_MASK 0x00002000L
+#define SDMA6_PUB_REG_TYPE0__SDMA6_CONTEXT_REG_TYPE3_MASK 0x00004000L
+#define SDMA6_PUB_REG_TYPE0__SDMA6_PUB_REG_TYPE0_MASK 0x00008000L
+#define SDMA6_PUB_REG_TYPE0__SDMA6_PUB_REG_TYPE1_MASK 0x00010000L
+#define SDMA6_PUB_REG_TYPE0__SDMA6_PUB_REG_TYPE2_MASK 0x00020000L
+#define SDMA6_PUB_REG_TYPE0__SDMA6_PUB_REG_TYPE3_MASK 0x00040000L
+#define SDMA6_PUB_REG_TYPE0__SDMA6_MMHUB_CNTL_MASK 0x00080000L
+#define SDMA6_PUB_REG_TYPE0__RESERVED_FOR_PSPSMU_ACCESS_ONLY_MASK 0x01E00000L
+#define SDMA6_PUB_REG_TYPE0__SDMA6_CONTEXT_GROUP_BOUNDARY_MASK 0x02000000L
+#define SDMA6_PUB_REG_TYPE0__SDMA6_POWER_CNTL_MASK 0x04000000L
+#define SDMA6_PUB_REG_TYPE0__SDMA6_CLK_CTRL_MASK 0x08000000L
+#define SDMA6_PUB_REG_TYPE0__SDMA6_CNTL_MASK 0x10000000L
+#define SDMA6_PUB_REG_TYPE0__SDMA6_CHICKEN_BITS_MASK 0x20000000L
+#define SDMA6_PUB_REG_TYPE0__SDMA6_GB_ADDR_CONFIG_MASK 0x40000000L
+#define SDMA6_PUB_REG_TYPE0__SDMA6_GB_ADDR_CONFIG_READ_MASK 0x80000000L
+//SDMA6_PUB_REG_TYPE1
+#define SDMA6_PUB_REG_TYPE1__SDMA6_RB_RPTR_FETCH_HI__SHIFT 0x0
+#define SDMA6_PUB_REG_TYPE1__SDMA6_SEM_WAIT_FAIL_TIMER_CNTL__SHIFT 0x1
+#define SDMA6_PUB_REG_TYPE1__SDMA6_RB_RPTR_FETCH__SHIFT 0x2
+#define SDMA6_PUB_REG_TYPE1__SDMA6_IB_OFFSET_FETCH__SHIFT 0x3
+#define SDMA6_PUB_REG_TYPE1__SDMA6_PROGRAM__SHIFT 0x4
+#define SDMA6_PUB_REG_TYPE1__SDMA6_STATUS_REG__SHIFT 0x5
+#define SDMA6_PUB_REG_TYPE1__SDMA6_STATUS1_REG__SHIFT 0x6
+#define SDMA6_PUB_REG_TYPE1__SDMA6_RD_BURST_CNTL__SHIFT 0x7
+#define SDMA6_PUB_REG_TYPE1__SDMA6_HBM_PAGE_CONFIG__SHIFT 0x8
+#define SDMA6_PUB_REG_TYPE1__SDMA6_UCODE_CHECKSUM__SHIFT 0x9
+#define SDMA6_PUB_REG_TYPE1__SDMA6_F32_CNTL__SHIFT 0xa
+#define SDMA6_PUB_REG_TYPE1__SDMA6_FREEZE__SHIFT 0xb
+#define SDMA6_PUB_REG_TYPE1__SDMA6_PHASE0_QUANTUM__SHIFT 0xc
+#define SDMA6_PUB_REG_TYPE1__SDMA6_PHASE1_QUANTUM__SHIFT 0xd
+#define SDMA6_PUB_REG_TYPE1__SDMA_POWER_GATING__SHIFT 0xe
+#define SDMA6_PUB_REG_TYPE1__SDMA_PGFSM_CONFIG__SHIFT 0xf
+#define SDMA6_PUB_REG_TYPE1__SDMA_PGFSM_WRITE__SHIFT 0x10
+#define SDMA6_PUB_REG_TYPE1__SDMA_PGFSM_READ__SHIFT 0x11
+#define SDMA6_PUB_REG_TYPE1__SDMA6_EDC_CONFIG__SHIFT 0x12
+#define SDMA6_PUB_REG_TYPE1__SDMA6_BA_THRESHOLD__SHIFT 0x13
+#define SDMA6_PUB_REG_TYPE1__SDMA6_ID__SHIFT 0x14
+#define SDMA6_PUB_REG_TYPE1__SDMA6_VERSION__SHIFT 0x15
+#define SDMA6_PUB_REG_TYPE1__SDMA6_EDC_COUNTER__SHIFT 0x16
+#define SDMA6_PUB_REG_TYPE1__SDMA6_EDC_COUNTER_CLEAR__SHIFT 0x17
+#define SDMA6_PUB_REG_TYPE1__SDMA6_STATUS2_REG__SHIFT 0x18
+#define SDMA6_PUB_REG_TYPE1__SDMA6_ATOMIC_CNTL__SHIFT 0x19
+#define SDMA6_PUB_REG_TYPE1__SDMA6_ATOMIC_PREOP_LO__SHIFT 0x1a
+#define SDMA6_PUB_REG_TYPE1__SDMA6_ATOMIC_PREOP_HI__SHIFT 0x1b
+#define SDMA6_PUB_REG_TYPE1__SDMA6_UTCL1_CNTL__SHIFT 0x1c
+#define SDMA6_PUB_REG_TYPE1__SDMA6_UTCL1_WATERMK__SHIFT 0x1d
+#define SDMA6_PUB_REG_TYPE1__SDMA6_UTCL1_RD_STATUS__SHIFT 0x1e
+#define SDMA6_PUB_REG_TYPE1__SDMA6_UTCL1_WR_STATUS__SHIFT 0x1f
+#define SDMA6_PUB_REG_TYPE1__SDMA6_RB_RPTR_FETCH_HI_MASK 0x00000001L
+#define SDMA6_PUB_REG_TYPE1__SDMA6_SEM_WAIT_FAIL_TIMER_CNTL_MASK 0x00000002L
+#define SDMA6_PUB_REG_TYPE1__SDMA6_RB_RPTR_FETCH_MASK 0x00000004L
+#define SDMA6_PUB_REG_TYPE1__SDMA6_IB_OFFSET_FETCH_MASK 0x00000008L
+#define SDMA6_PUB_REG_TYPE1__SDMA6_PROGRAM_MASK 0x00000010L
+#define SDMA6_PUB_REG_TYPE1__SDMA6_STATUS_REG_MASK 0x00000020L
+#define SDMA6_PUB_REG_TYPE1__SDMA6_STATUS1_REG_MASK 0x00000040L
+#define SDMA6_PUB_REG_TYPE1__SDMA6_RD_BURST_CNTL_MASK 0x00000080L
+#define SDMA6_PUB_REG_TYPE1__SDMA6_HBM_PAGE_CONFIG_MASK 0x00000100L
+#define SDMA6_PUB_REG_TYPE1__SDMA6_UCODE_CHECKSUM_MASK 0x00000200L
+#define SDMA6_PUB_REG_TYPE1__SDMA6_F32_CNTL_MASK 0x00000400L
+#define SDMA6_PUB_REG_TYPE1__SDMA6_FREEZE_MASK 0x00000800L
+#define SDMA6_PUB_REG_TYPE1__SDMA6_PHASE0_QUANTUM_MASK 0x00001000L
+#define SDMA6_PUB_REG_TYPE1__SDMA6_PHASE1_QUANTUM_MASK 0x00002000L
+#define SDMA6_PUB_REG_TYPE1__SDMA_POWER_GATING_MASK 0x00004000L
+#define SDMA6_PUB_REG_TYPE1__SDMA_PGFSM_CONFIG_MASK 0x00008000L
+#define SDMA6_PUB_REG_TYPE1__SDMA_PGFSM_WRITE_MASK 0x00010000L
+#define SDMA6_PUB_REG_TYPE1__SDMA_PGFSM_READ_MASK 0x00020000L
+#define SDMA6_PUB_REG_TYPE1__SDMA6_EDC_CONFIG_MASK 0x00040000L
+#define SDMA6_PUB_REG_TYPE1__SDMA6_BA_THRESHOLD_MASK 0x00080000L
+#define SDMA6_PUB_REG_TYPE1__SDMA6_ID_MASK 0x00100000L
+#define SDMA6_PUB_REG_TYPE1__SDMA6_VERSION_MASK 0x00200000L
+#define SDMA6_PUB_REG_TYPE1__SDMA6_EDC_COUNTER_MASK 0x00400000L
+#define SDMA6_PUB_REG_TYPE1__SDMA6_EDC_COUNTER_CLEAR_MASK 0x00800000L
+#define SDMA6_PUB_REG_TYPE1__SDMA6_STATUS2_REG_MASK 0x01000000L
+#define SDMA6_PUB_REG_TYPE1__SDMA6_ATOMIC_CNTL_MASK 0x02000000L
+#define SDMA6_PUB_REG_TYPE1__SDMA6_ATOMIC_PREOP_LO_MASK 0x04000000L
+#define SDMA6_PUB_REG_TYPE1__SDMA6_ATOMIC_PREOP_HI_MASK 0x08000000L
+#define SDMA6_PUB_REG_TYPE1__SDMA6_UTCL1_CNTL_MASK 0x10000000L
+#define SDMA6_PUB_REG_TYPE1__SDMA6_UTCL1_WATERMK_MASK 0x20000000L
+#define SDMA6_PUB_REG_TYPE1__SDMA6_UTCL1_RD_STATUS_MASK 0x40000000L
+#define SDMA6_PUB_REG_TYPE1__SDMA6_UTCL1_WR_STATUS_MASK 0x80000000L
+//SDMA6_PUB_REG_TYPE2
+#define SDMA6_PUB_REG_TYPE2__SDMA6_UTCL1_INV0__SHIFT 0x0
+#define SDMA6_PUB_REG_TYPE2__SDMA6_UTCL1_INV1__SHIFT 0x1
+#define SDMA6_PUB_REG_TYPE2__SDMA6_UTCL1_INV2__SHIFT 0x2
+#define SDMA6_PUB_REG_TYPE2__SDMA6_UTCL1_RD_XNACK0__SHIFT 0x3
+#define SDMA6_PUB_REG_TYPE2__SDMA6_UTCL1_RD_XNACK1__SHIFT 0x4
+#define SDMA6_PUB_REG_TYPE2__SDMA6_UTCL1_WR_XNACK0__SHIFT 0x5
+#define SDMA6_PUB_REG_TYPE2__SDMA6_UTCL1_WR_XNACK1__SHIFT 0x6
+#define SDMA6_PUB_REG_TYPE2__SDMA6_UTCL1_TIMEOUT__SHIFT 0x7
+#define SDMA6_PUB_REG_TYPE2__SDMA6_UTCL1_PAGE__SHIFT 0x8
+#define SDMA6_PUB_REG_TYPE2__SDMA6_POWER_CNTL_IDLE__SHIFT 0x9
+#define SDMA6_PUB_REG_TYPE2__SDMA6_RELAX_ORDERING_LUT__SHIFT 0xa
+#define SDMA6_PUB_REG_TYPE2__SDMA6_CHICKEN_BITS_2__SHIFT 0xb
+#define SDMA6_PUB_REG_TYPE2__SDMA6_STATUS3_REG__SHIFT 0xc
+#define SDMA6_PUB_REG_TYPE2__SDMA6_PHYSICAL_ADDR_LO__SHIFT 0xd
+#define SDMA6_PUB_REG_TYPE2__SDMA6_PHYSICAL_ADDR_HI__SHIFT 0xe
+#define SDMA6_PUB_REG_TYPE2__SDMA6_PHASE2_QUANTUM__SHIFT 0xf
+#define SDMA6_PUB_REG_TYPE2__SDMA6_ERROR_LOG__SHIFT 0x10
+#define SDMA6_PUB_REG_TYPE2__SDMA6_PUB_DUMMY_REG0__SHIFT 0x11
+#define SDMA6_PUB_REG_TYPE2__SDMA6_PUB_DUMMY_REG1__SHIFT 0x12
+#define SDMA6_PUB_REG_TYPE2__SDMA6_PUB_DUMMY_REG2__SHIFT 0x13
+#define SDMA6_PUB_REG_TYPE2__SDMA6_PUB_DUMMY_REG3__SHIFT 0x14
+#define SDMA6_PUB_REG_TYPE2__SDMA6_F32_COUNTER__SHIFT 0x15
+#define SDMA6_PUB_REG_TYPE2__SDMA6_UNBREAKABLE__SHIFT 0x16
+#define SDMA6_PUB_REG_TYPE2__SDMA6_PERFMON_CNTL__SHIFT 0x17
+#define SDMA6_PUB_REG_TYPE2__SDMA6_PERFCOUNTER0_RESULT__SHIFT 0x18
+#define SDMA6_PUB_REG_TYPE2__SDMA6_PERFCOUNTER1_RESULT__SHIFT 0x19
+#define SDMA6_PUB_REG_TYPE2__SDMA6_PERFCOUNTER_TAG_DELAY_RANGE__SHIFT 0x1a
+#define SDMA6_PUB_REG_TYPE2__SDMA6_CRD_CNTL__SHIFT 0x1b
+#define SDMA6_PUB_REG_TYPE2__RESERVED28__SHIFT 0x1c
+#define SDMA6_PUB_REG_TYPE2__SDMA6_GPU_IOV_VIOLATION_LOG__SHIFT 0x1d
+#define SDMA6_PUB_REG_TYPE2__SDMA6_ULV_CNTL__SHIFT 0x1e
+#define SDMA6_PUB_REG_TYPE2__RESERVED__SHIFT 0x1f
+#define SDMA6_PUB_REG_TYPE2__SDMA6_UTCL1_INV0_MASK 0x00000001L
+#define SDMA6_PUB_REG_TYPE2__SDMA6_UTCL1_INV1_MASK 0x00000002L
+#define SDMA6_PUB_REG_TYPE2__SDMA6_UTCL1_INV2_MASK 0x00000004L
+#define SDMA6_PUB_REG_TYPE2__SDMA6_UTCL1_RD_XNACK0_MASK 0x00000008L
+#define SDMA6_PUB_REG_TYPE2__SDMA6_UTCL1_RD_XNACK1_MASK 0x00000010L
+#define SDMA6_PUB_REG_TYPE2__SDMA6_UTCL1_WR_XNACK0_MASK 0x00000020L
+#define SDMA6_PUB_REG_TYPE2__SDMA6_UTCL1_WR_XNACK1_MASK 0x00000040L
+#define SDMA6_PUB_REG_TYPE2__SDMA6_UTCL1_TIMEOUT_MASK 0x00000080L
+#define SDMA6_PUB_REG_TYPE2__SDMA6_UTCL1_PAGE_MASK 0x00000100L
+#define SDMA6_PUB_REG_TYPE2__SDMA6_POWER_CNTL_IDLE_MASK 0x00000200L
+#define SDMA6_PUB_REG_TYPE2__SDMA6_RELAX_ORDERING_LUT_MASK 0x00000400L
+#define SDMA6_PUB_REG_TYPE2__SDMA6_CHICKEN_BITS_2_MASK 0x00000800L
+#define SDMA6_PUB_REG_TYPE2__SDMA6_STATUS3_REG_MASK 0x00001000L
+#define SDMA6_PUB_REG_TYPE2__SDMA6_PHYSICAL_ADDR_LO_MASK 0x00002000L
+#define SDMA6_PUB_REG_TYPE2__SDMA6_PHYSICAL_ADDR_HI_MASK 0x00004000L
+#define SDMA6_PUB_REG_TYPE2__SDMA6_PHASE2_QUANTUM_MASK 0x00008000L
+#define SDMA6_PUB_REG_TYPE2__SDMA6_ERROR_LOG_MASK 0x00010000L
+#define SDMA6_PUB_REG_TYPE2__SDMA6_PUB_DUMMY_REG0_MASK 0x00020000L
+#define SDMA6_PUB_REG_TYPE2__SDMA6_PUB_DUMMY_REG1_MASK 0x00040000L
+#define SDMA6_PUB_REG_TYPE2__SDMA6_PUB_DUMMY_REG2_MASK 0x00080000L
+#define SDMA6_PUB_REG_TYPE2__SDMA6_PUB_DUMMY_REG3_MASK 0x00100000L
+#define SDMA6_PUB_REG_TYPE2__SDMA6_F32_COUNTER_MASK 0x00200000L
+#define SDMA6_PUB_REG_TYPE2__SDMA6_UNBREAKABLE_MASK 0x00400000L
+#define SDMA6_PUB_REG_TYPE2__SDMA6_PERFMON_CNTL_MASK 0x00800000L
+#define SDMA6_PUB_REG_TYPE2__SDMA6_PERFCOUNTER0_RESULT_MASK 0x01000000L
+#define SDMA6_PUB_REG_TYPE2__SDMA6_PERFCOUNTER1_RESULT_MASK 0x02000000L
+#define SDMA6_PUB_REG_TYPE2__SDMA6_PERFCOUNTER_TAG_DELAY_RANGE_MASK 0x04000000L
+#define SDMA6_PUB_REG_TYPE2__SDMA6_CRD_CNTL_MASK 0x08000000L
+#define SDMA6_PUB_REG_TYPE2__RESERVED28_MASK 0x10000000L
+#define SDMA6_PUB_REG_TYPE2__SDMA6_GPU_IOV_VIOLATION_LOG_MASK 0x20000000L
+#define SDMA6_PUB_REG_TYPE2__SDMA6_ULV_CNTL_MASK 0x40000000L
+#define SDMA6_PUB_REG_TYPE2__RESERVED_MASK 0x80000000L
+//SDMA6_PUB_REG_TYPE3
+#define SDMA6_PUB_REG_TYPE3__SDMA6_EA_DBIT_ADDR_DATA__SHIFT 0x0
+#define SDMA6_PUB_REG_TYPE3__SDMA6_EA_DBIT_ADDR_INDEX__SHIFT 0x1
+#define SDMA6_PUB_REG_TYPE3__SDMA6_GPU_IOV_VIOLATION_LOG2__SHIFT 0x2
+#define SDMA6_PUB_REG_TYPE3__RESERVED__SHIFT 0x3
+#define SDMA6_PUB_REG_TYPE3__SDMA6_EA_DBIT_ADDR_DATA_MASK 0x00000001L
+#define SDMA6_PUB_REG_TYPE3__SDMA6_EA_DBIT_ADDR_INDEX_MASK 0x00000002L
+#define SDMA6_PUB_REG_TYPE3__SDMA6_GPU_IOV_VIOLATION_LOG2_MASK 0x00000004L
+#define SDMA6_PUB_REG_TYPE3__RESERVED_MASK 0xFFFFFFF8L
+//SDMA6_MMHUB_CNTL
+#define SDMA6_MMHUB_CNTL__UNIT_ID__SHIFT 0x0
+#define SDMA6_MMHUB_CNTL__UNIT_ID_MASK 0x0000003FL
+//SDMA6_CONTEXT_GROUP_BOUNDARY
+#define SDMA6_CONTEXT_GROUP_BOUNDARY__RESERVED__SHIFT 0x0
+#define SDMA6_CONTEXT_GROUP_BOUNDARY__RESERVED_MASK 0xFFFFFFFFL
+//SDMA6_POWER_CNTL
+#define SDMA6_POWER_CNTL__MEM_POWER_OVERRIDE__SHIFT 0x8
+#define SDMA6_POWER_CNTL__MEM_POWER_LS_EN__SHIFT 0x9
+#define SDMA6_POWER_CNTL__MEM_POWER_DS_EN__SHIFT 0xa
+#define SDMA6_POWER_CNTL__MEM_POWER_SD_EN__SHIFT 0xb
+#define SDMA6_POWER_CNTL__MEM_POWER_DELAY__SHIFT 0xc
+#define SDMA6_POWER_CNTL__MEM_POWER_OVERRIDE_MASK 0x00000100L
+#define SDMA6_POWER_CNTL__MEM_POWER_LS_EN_MASK 0x00000200L
+#define SDMA6_POWER_CNTL__MEM_POWER_DS_EN_MASK 0x00000400L
+#define SDMA6_POWER_CNTL__MEM_POWER_SD_EN_MASK 0x00000800L
+#define SDMA6_POWER_CNTL__MEM_POWER_DELAY_MASK 0x003FF000L
+//SDMA6_CLK_CTRL
+#define SDMA6_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define SDMA6_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define SDMA6_CLK_CTRL__RESERVED__SHIFT 0xc
+#define SDMA6_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define SDMA6_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define SDMA6_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define SDMA6_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define SDMA6_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define SDMA6_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define SDMA6_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define SDMA6_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define SDMA6_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define SDMA6_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define SDMA6_CLK_CTRL__RESERVED_MASK 0x00FFF000L
+#define SDMA6_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define SDMA6_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define SDMA6_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define SDMA6_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define SDMA6_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define SDMA6_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define SDMA6_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define SDMA6_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//SDMA6_CNTL
+#define SDMA6_CNTL__TRAP_ENABLE__SHIFT 0x0
+#define SDMA6_CNTL__UTC_L1_ENABLE__SHIFT 0x1
+#define SDMA6_CNTL__SEM_WAIT_INT_ENABLE__SHIFT 0x2
+#define SDMA6_CNTL__DATA_SWAP_ENABLE__SHIFT 0x3
+#define SDMA6_CNTL__FENCE_SWAP_ENABLE__SHIFT 0x4
+#define SDMA6_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x5
+#define SDMA6_CNTL__MIDCMD_WORLDSWITCH_ENABLE__SHIFT 0x11
+#define SDMA6_CNTL__AUTO_CTXSW_ENABLE__SHIFT 0x12
+#define SDMA6_CNTL__CTXEMPTY_INT_ENABLE__SHIFT 0x1c
+#define SDMA6_CNTL__FROZEN_INT_ENABLE__SHIFT 0x1d
+#define SDMA6_CNTL__IB_PREEMPT_INT_ENABLE__SHIFT 0x1e
+#define SDMA6_CNTL__TRAP_ENABLE_MASK 0x00000001L
+#define SDMA6_CNTL__UTC_L1_ENABLE_MASK 0x00000002L
+#define SDMA6_CNTL__SEM_WAIT_INT_ENABLE_MASK 0x00000004L
+#define SDMA6_CNTL__DATA_SWAP_ENABLE_MASK 0x00000008L
+#define SDMA6_CNTL__FENCE_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA6_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00000020L
+#define SDMA6_CNTL__MIDCMD_WORLDSWITCH_ENABLE_MASK 0x00020000L
+#define SDMA6_CNTL__AUTO_CTXSW_ENABLE_MASK 0x00040000L
+#define SDMA6_CNTL__CTXEMPTY_INT_ENABLE_MASK 0x10000000L
+#define SDMA6_CNTL__FROZEN_INT_ENABLE_MASK 0x20000000L
+#define SDMA6_CNTL__IB_PREEMPT_INT_ENABLE_MASK 0x40000000L
+//SDMA6_CHICKEN_BITS
+#define SDMA6_CHICKEN_BITS__COPY_EFFICIENCY_ENABLE__SHIFT 0x0
+#define SDMA6_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE__SHIFT 0x1
+#define SDMA6_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE__SHIFT 0x2
+#define SDMA6_CHICKEN_BITS__WRITE_BURST_LENGTH__SHIFT 0x8
+#define SDMA6_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE__SHIFT 0xa
+#define SDMA6_CHICKEN_BITS__COPY_OVERLAP_ENABLE__SHIFT 0x10
+#define SDMA6_CHICKEN_BITS__RAW_CHECK_ENABLE__SHIFT 0x11
+#define SDMA6_CHICKEN_BITS__SRBM_POLL_RETRYING__SHIFT 0x14
+#define SDMA6_CHICKEN_BITS__CG_STATUS_OUTPUT__SHIFT 0x17
+#define SDMA6_CHICKEN_BITS__TIME_BASED_QOS__SHIFT 0x19
+#define SDMA6_CHICKEN_BITS__CE_AFIFO_WATERMARK__SHIFT 0x1a
+#define SDMA6_CHICKEN_BITS__CE_DFIFO_WATERMARK__SHIFT 0x1c
+#define SDMA6_CHICKEN_BITS__CE_LFIFO_WATERMARK__SHIFT 0x1e
+#define SDMA6_CHICKEN_BITS__COPY_EFFICIENCY_ENABLE_MASK 0x00000001L
+#define SDMA6_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE_MASK 0x00000002L
+#define SDMA6_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE_MASK 0x00000004L
+#define SDMA6_CHICKEN_BITS__WRITE_BURST_LENGTH_MASK 0x00000300L
+#define SDMA6_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE_MASK 0x00001C00L
+#define SDMA6_CHICKEN_BITS__COPY_OVERLAP_ENABLE_MASK 0x00010000L
+#define SDMA6_CHICKEN_BITS__RAW_CHECK_ENABLE_MASK 0x00020000L
+#define SDMA6_CHICKEN_BITS__SRBM_POLL_RETRYING_MASK 0x00100000L
+#define SDMA6_CHICKEN_BITS__CG_STATUS_OUTPUT_MASK 0x00800000L
+#define SDMA6_CHICKEN_BITS__TIME_BASED_QOS_MASK 0x02000000L
+#define SDMA6_CHICKEN_BITS__CE_AFIFO_WATERMARK_MASK 0x0C000000L
+#define SDMA6_CHICKEN_BITS__CE_DFIFO_WATERMARK_MASK 0x30000000L
+#define SDMA6_CHICKEN_BITS__CE_LFIFO_WATERMARK_MASK 0xC0000000L
+//SDMA6_GB_ADDR_CONFIG
+#define SDMA6_GB_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
+#define SDMA6_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
+#define SDMA6_GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE__SHIFT 0x8
+#define SDMA6_GB_ADDR_CONFIG__NUM_BANKS__SHIFT 0xc
+#define SDMA6_GB_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x13
+#define SDMA6_GB_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define SDMA6_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
+#define SDMA6_GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
+#define SDMA6_GB_ADDR_CONFIG__NUM_BANKS_MASK 0x00007000L
+#define SDMA6_GB_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00180000L
+//SDMA6_GB_ADDR_CONFIG_READ
+#define SDMA6_GB_ADDR_CONFIG_READ__NUM_PIPES__SHIFT 0x0
+#define SDMA6_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
+#define SDMA6_GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE__SHIFT 0x8
+#define SDMA6_GB_ADDR_CONFIG_READ__NUM_BANKS__SHIFT 0xc
+#define SDMA6_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES__SHIFT 0x13
+#define SDMA6_GB_ADDR_CONFIG_READ__NUM_PIPES_MASK 0x00000007L
+#define SDMA6_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
+#define SDMA6_GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
+#define SDMA6_GB_ADDR_CONFIG_READ__NUM_BANKS_MASK 0x00007000L
+#define SDMA6_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES_MASK 0x00180000L
+//SDMA6_RB_RPTR_FETCH_HI
+#define SDMA6_RB_RPTR_FETCH_HI__OFFSET__SHIFT 0x0
+#define SDMA6_RB_RPTR_FETCH_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA6_SEM_WAIT_FAIL_TIMER_CNTL
+#define SDMA6_SEM_WAIT_FAIL_TIMER_CNTL__TIMER__SHIFT 0x0
+#define SDMA6_SEM_WAIT_FAIL_TIMER_CNTL__TIMER_MASK 0xFFFFFFFFL
+//SDMA6_RB_RPTR_FETCH
+#define SDMA6_RB_RPTR_FETCH__OFFSET__SHIFT 0x2
+#define SDMA6_RB_RPTR_FETCH__OFFSET_MASK 0xFFFFFFFCL
+//SDMA6_IB_OFFSET_FETCH
+#define SDMA6_IB_OFFSET_FETCH__OFFSET__SHIFT 0x2
+#define SDMA6_IB_OFFSET_FETCH__OFFSET_MASK 0x003FFFFCL
+//SDMA6_PROGRAM
+#define SDMA6_PROGRAM__STREAM__SHIFT 0x0
+#define SDMA6_PROGRAM__STREAM_MASK 0xFFFFFFFFL
+//SDMA6_STATUS_REG
+#define SDMA6_STATUS_REG__IDLE__SHIFT 0x0
+#define SDMA6_STATUS_REG__REG_IDLE__SHIFT 0x1
+#define SDMA6_STATUS_REG__RB_EMPTY__SHIFT 0x2
+#define SDMA6_STATUS_REG__RB_FULL__SHIFT 0x3
+#define SDMA6_STATUS_REG__RB_CMD_IDLE__SHIFT 0x4
+#define SDMA6_STATUS_REG__RB_CMD_FULL__SHIFT 0x5
+#define SDMA6_STATUS_REG__IB_CMD_IDLE__SHIFT 0x6
+#define SDMA6_STATUS_REG__IB_CMD_FULL__SHIFT 0x7
+#define SDMA6_STATUS_REG__BLOCK_IDLE__SHIFT 0x8
+#define SDMA6_STATUS_REG__INSIDE_IB__SHIFT 0x9
+#define SDMA6_STATUS_REG__EX_IDLE__SHIFT 0xa
+#define SDMA6_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE__SHIFT 0xb
+#define SDMA6_STATUS_REG__PACKET_READY__SHIFT 0xc
+#define SDMA6_STATUS_REG__MC_WR_IDLE__SHIFT 0xd
+#define SDMA6_STATUS_REG__SRBM_IDLE__SHIFT 0xe
+#define SDMA6_STATUS_REG__CONTEXT_EMPTY__SHIFT 0xf
+#define SDMA6_STATUS_REG__DELTA_RPTR_FULL__SHIFT 0x10
+#define SDMA6_STATUS_REG__RB_MC_RREQ_IDLE__SHIFT 0x11
+#define SDMA6_STATUS_REG__IB_MC_RREQ_IDLE__SHIFT 0x12
+#define SDMA6_STATUS_REG__MC_RD_IDLE__SHIFT 0x13
+#define SDMA6_STATUS_REG__DELTA_RPTR_EMPTY__SHIFT 0x14
+#define SDMA6_STATUS_REG__MC_RD_RET_STALL__SHIFT 0x15
+#define SDMA6_STATUS_REG__MC_RD_NO_POLL_IDLE__SHIFT 0x16
+#define SDMA6_STATUS_REG__PREV_CMD_IDLE__SHIFT 0x19
+#define SDMA6_STATUS_REG__SEM_IDLE__SHIFT 0x1a
+#define SDMA6_STATUS_REG__SEM_REQ_STALL__SHIFT 0x1b
+#define SDMA6_STATUS_REG__SEM_RESP_STATE__SHIFT 0x1c
+#define SDMA6_STATUS_REG__INT_IDLE__SHIFT 0x1e
+#define SDMA6_STATUS_REG__INT_REQ_STALL__SHIFT 0x1f
+#define SDMA6_STATUS_REG__IDLE_MASK 0x00000001L
+#define SDMA6_STATUS_REG__REG_IDLE_MASK 0x00000002L
+#define SDMA6_STATUS_REG__RB_EMPTY_MASK 0x00000004L
+#define SDMA6_STATUS_REG__RB_FULL_MASK 0x00000008L
+#define SDMA6_STATUS_REG__RB_CMD_IDLE_MASK 0x00000010L
+#define SDMA6_STATUS_REG__RB_CMD_FULL_MASK 0x00000020L
+#define SDMA6_STATUS_REG__IB_CMD_IDLE_MASK 0x00000040L
+#define SDMA6_STATUS_REG__IB_CMD_FULL_MASK 0x00000080L
+#define SDMA6_STATUS_REG__BLOCK_IDLE_MASK 0x00000100L
+#define SDMA6_STATUS_REG__INSIDE_IB_MASK 0x00000200L
+#define SDMA6_STATUS_REG__EX_IDLE_MASK 0x00000400L
+#define SDMA6_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE_MASK 0x00000800L
+#define SDMA6_STATUS_REG__PACKET_READY_MASK 0x00001000L
+#define SDMA6_STATUS_REG__MC_WR_IDLE_MASK 0x00002000L
+#define SDMA6_STATUS_REG__SRBM_IDLE_MASK 0x00004000L
+#define SDMA6_STATUS_REG__CONTEXT_EMPTY_MASK 0x00008000L
+#define SDMA6_STATUS_REG__DELTA_RPTR_FULL_MASK 0x00010000L
+#define SDMA6_STATUS_REG__RB_MC_RREQ_IDLE_MASK 0x00020000L
+#define SDMA6_STATUS_REG__IB_MC_RREQ_IDLE_MASK 0x00040000L
+#define SDMA6_STATUS_REG__MC_RD_IDLE_MASK 0x00080000L
+#define SDMA6_STATUS_REG__DELTA_RPTR_EMPTY_MASK 0x00100000L
+#define SDMA6_STATUS_REG__MC_RD_RET_STALL_MASK 0x00200000L
+#define SDMA6_STATUS_REG__MC_RD_NO_POLL_IDLE_MASK 0x00400000L
+#define SDMA6_STATUS_REG__PREV_CMD_IDLE_MASK 0x02000000L
+#define SDMA6_STATUS_REG__SEM_IDLE_MASK 0x04000000L
+#define SDMA6_STATUS_REG__SEM_REQ_STALL_MASK 0x08000000L
+#define SDMA6_STATUS_REG__SEM_RESP_STATE_MASK 0x30000000L
+#define SDMA6_STATUS_REG__INT_IDLE_MASK 0x40000000L
+#define SDMA6_STATUS_REG__INT_REQ_STALL_MASK 0x80000000L
+//SDMA6_STATUS1_REG
+#define SDMA6_STATUS1_REG__CE_WREQ_IDLE__SHIFT 0x0
+#define SDMA6_STATUS1_REG__CE_WR_IDLE__SHIFT 0x1
+#define SDMA6_STATUS1_REG__CE_SPLIT_IDLE__SHIFT 0x2
+#define SDMA6_STATUS1_REG__CE_RREQ_IDLE__SHIFT 0x3
+#define SDMA6_STATUS1_REG__CE_OUT_IDLE__SHIFT 0x4
+#define SDMA6_STATUS1_REG__CE_IN_IDLE__SHIFT 0x5
+#define SDMA6_STATUS1_REG__CE_DST_IDLE__SHIFT 0x6
+#define SDMA6_STATUS1_REG__CE_CMD_IDLE__SHIFT 0x9
+#define SDMA6_STATUS1_REG__CE_AFIFO_FULL__SHIFT 0xa
+#define SDMA6_STATUS1_REG__CE_INFO_FULL__SHIFT 0xd
+#define SDMA6_STATUS1_REG__CE_INFO1_FULL__SHIFT 0xe
+#define SDMA6_STATUS1_REG__EX_START__SHIFT 0xf
+#define SDMA6_STATUS1_REG__CE_RD_STALL__SHIFT 0x11
+#define SDMA6_STATUS1_REG__CE_WR_STALL__SHIFT 0x12
+#define SDMA6_STATUS1_REG__CE_WREQ_IDLE_MASK 0x00000001L
+#define SDMA6_STATUS1_REG__CE_WR_IDLE_MASK 0x00000002L
+#define SDMA6_STATUS1_REG__CE_SPLIT_IDLE_MASK 0x00000004L
+#define SDMA6_STATUS1_REG__CE_RREQ_IDLE_MASK 0x00000008L
+#define SDMA6_STATUS1_REG__CE_OUT_IDLE_MASK 0x00000010L
+#define SDMA6_STATUS1_REG__CE_IN_IDLE_MASK 0x00000020L
+#define SDMA6_STATUS1_REG__CE_DST_IDLE_MASK 0x00000040L
+#define SDMA6_STATUS1_REG__CE_CMD_IDLE_MASK 0x00000200L
+#define SDMA6_STATUS1_REG__CE_AFIFO_FULL_MASK 0x00000400L
+#define SDMA6_STATUS1_REG__CE_INFO_FULL_MASK 0x00002000L
+#define SDMA6_STATUS1_REG__CE_INFO1_FULL_MASK 0x00004000L
+#define SDMA6_STATUS1_REG__EX_START_MASK 0x00008000L
+#define SDMA6_STATUS1_REG__CE_RD_STALL_MASK 0x00020000L
+#define SDMA6_STATUS1_REG__CE_WR_STALL_MASK 0x00040000L
+//SDMA6_RD_BURST_CNTL
+#define SDMA6_RD_BURST_CNTL__RD_BURST__SHIFT 0x0
+#define SDMA6_RD_BURST_CNTL__CMD_BUFFER_RD_BURST__SHIFT 0x2
+#define SDMA6_RD_BURST_CNTL__RD_BURST_MASK 0x00000003L
+#define SDMA6_RD_BURST_CNTL__CMD_BUFFER_RD_BURST_MASK 0x0000000CL
+//SDMA6_HBM_PAGE_CONFIG
+#define SDMA6_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT__SHIFT 0x0
+#define SDMA6_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT_MASK 0x00000001L
+//SDMA6_UCODE_CHECKSUM
+#define SDMA6_UCODE_CHECKSUM__DATA__SHIFT 0x0
+#define SDMA6_UCODE_CHECKSUM__DATA_MASK 0xFFFFFFFFL
+//SDMA6_F32_CNTL
+#define SDMA6_F32_CNTL__HALT__SHIFT 0x0
+#define SDMA6_F32_CNTL__STEP__SHIFT 0x1
+#define SDMA6_F32_CNTL__HALT_MASK 0x00000001L
+#define SDMA6_F32_CNTL__STEP_MASK 0x00000002L
+//SDMA6_FREEZE
+#define SDMA6_FREEZE__PREEMPT__SHIFT 0x0
+#define SDMA6_FREEZE__FREEZE__SHIFT 0x4
+#define SDMA6_FREEZE__FROZEN__SHIFT 0x5
+#define SDMA6_FREEZE__F32_FREEZE__SHIFT 0x6
+#define SDMA6_FREEZE__PREEMPT_MASK 0x00000001L
+#define SDMA6_FREEZE__FREEZE_MASK 0x00000010L
+#define SDMA6_FREEZE__FROZEN_MASK 0x00000020L
+#define SDMA6_FREEZE__F32_FREEZE_MASK 0x00000040L
+//SDMA6_PHASE0_QUANTUM
+#define SDMA6_PHASE0_QUANTUM__UNIT__SHIFT 0x0
+#define SDMA6_PHASE0_QUANTUM__VALUE__SHIFT 0x8
+#define SDMA6_PHASE0_QUANTUM__PREFER__SHIFT 0x1e
+#define SDMA6_PHASE0_QUANTUM__UNIT_MASK 0x0000000FL
+#define SDMA6_PHASE0_QUANTUM__VALUE_MASK 0x00FFFF00L
+#define SDMA6_PHASE0_QUANTUM__PREFER_MASK 0x40000000L
+//SDMA6_PHASE1_QUANTUM
+#define SDMA6_PHASE1_QUANTUM__UNIT__SHIFT 0x0
+#define SDMA6_PHASE1_QUANTUM__VALUE__SHIFT 0x8
+#define SDMA6_PHASE1_QUANTUM__PREFER__SHIFT 0x1e
+#define SDMA6_PHASE1_QUANTUM__UNIT_MASK 0x0000000FL
+#define SDMA6_PHASE1_QUANTUM__VALUE_MASK 0x00FFFF00L
+#define SDMA6_PHASE1_QUANTUM__PREFER_MASK 0x40000000L
+//SDMA6_EDC_CONFIG
+#define SDMA6_EDC_CONFIG__DIS_EDC__SHIFT 0x1
+#define SDMA6_EDC_CONFIG__ECC_INT_ENABLE__SHIFT 0x2
+#define SDMA6_EDC_CONFIG__DIS_EDC_MASK 0x00000002L
+#define SDMA6_EDC_CONFIG__ECC_INT_ENABLE_MASK 0x00000004L
+//SDMA6_BA_THRESHOLD
+#define SDMA6_BA_THRESHOLD__READ_THRES__SHIFT 0x0
+#define SDMA6_BA_THRESHOLD__WRITE_THRES__SHIFT 0x10
+#define SDMA6_BA_THRESHOLD__READ_THRES_MASK 0x000003FFL
+#define SDMA6_BA_THRESHOLD__WRITE_THRES_MASK 0x03FF0000L
+//SDMA6_ID
+#define SDMA6_ID__DEVICE_ID__SHIFT 0x0
+#define SDMA6_ID__DEVICE_ID_MASK 0x000000FFL
+//SDMA6_VERSION
+#define SDMA6_VERSION__MINVER__SHIFT 0x0
+#define SDMA6_VERSION__MAJVER__SHIFT 0x8
+#define SDMA6_VERSION__REV__SHIFT 0x10
+#define SDMA6_VERSION__MINVER_MASK 0x0000007FL
+#define SDMA6_VERSION__MAJVER_MASK 0x00007F00L
+#define SDMA6_VERSION__REV_MASK 0x003F0000L
+//SDMA6_EDC_COUNTER
+#define SDMA6_EDC_COUNTER__SDMA_UCODE_BUF_SED__SHIFT 0x0
+#define SDMA6_EDC_COUNTER__SDMA_RB_CMD_BUF_SED__SHIFT 0x2
+#define SDMA6_EDC_COUNTER__SDMA_IB_CMD_BUF_SED__SHIFT 0x3
+#define SDMA6_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED__SHIFT 0x4
+#define SDMA6_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED__SHIFT 0x5
+#define SDMA6_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED__SHIFT 0x6
+#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED__SHIFT 0x7
+#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED__SHIFT 0x8
+#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED__SHIFT 0x9
+#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED__SHIFT 0xa
+#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED__SHIFT 0xb
+#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED__SHIFT 0xc
+#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED__SHIFT 0xd
+#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED__SHIFT 0xe
+#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF8_SED__SHIFT 0xf
+#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF9_SED__SHIFT 0x10
+#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF10_SED__SHIFT 0x11
+#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF11_SED__SHIFT 0x12
+#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF12_SED__SHIFT 0x13
+#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF13_SED__SHIFT 0x14
+#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF14_SED__SHIFT 0x15
+#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF15_SED__SHIFT 0x16
+#define SDMA6_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED__SHIFT 0x17
+#define SDMA6_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED__SHIFT 0x18
+#define SDMA6_EDC_COUNTER__SDMA_UCODE_BUF_SED_MASK 0x00000001L
+#define SDMA6_EDC_COUNTER__SDMA_RB_CMD_BUF_SED_MASK 0x00000004L
+#define SDMA6_EDC_COUNTER__SDMA_IB_CMD_BUF_SED_MASK 0x00000008L
+#define SDMA6_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED_MASK 0x00000010L
+#define SDMA6_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED_MASK 0x00000020L
+#define SDMA6_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED_MASK 0x00000040L
+#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED_MASK 0x00000080L
+#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED_MASK 0x00000100L
+#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED_MASK 0x00000200L
+#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED_MASK 0x00000400L
+#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED_MASK 0x00000800L
+#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED_MASK 0x00001000L
+#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED_MASK 0x00002000L
+#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED_MASK 0x00004000L
+#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF8_SED_MASK 0x00008000L
+#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF9_SED_MASK 0x00010000L
+#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF10_SED_MASK 0x00020000L
+#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF11_SED_MASK 0x00040000L
+#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF12_SED_MASK 0x00080000L
+#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF13_SED_MASK 0x00100000L
+#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF14_SED_MASK 0x00200000L
+#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF15_SED_MASK 0x00400000L
+#define SDMA6_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED_MASK 0x00800000L
+#define SDMA6_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED_MASK 0x01000000L
+//SDMA6_EDC_COUNTER_CLEAR
+#define SDMA6_EDC_COUNTER_CLEAR__DUMMY__SHIFT 0x0
+#define SDMA6_EDC_COUNTER_CLEAR__DUMMY_MASK 0x00000001L
+//SDMA6_STATUS2_REG
+#define SDMA6_STATUS2_REG__ID__SHIFT 0x0
+#define SDMA6_STATUS2_REG__F32_INSTR_PTR__SHIFT 0x3
+#define SDMA6_STATUS2_REG__CMD_OP__SHIFT 0x10
+#define SDMA6_STATUS2_REG__ID_MASK 0x00000007L
+#define SDMA6_STATUS2_REG__F32_INSTR_PTR_MASK 0x0000FFF8L
+#define SDMA6_STATUS2_REG__CMD_OP_MASK 0xFFFF0000L
+//SDMA6_ATOMIC_CNTL
+#define SDMA6_ATOMIC_CNTL__LOOP_TIMER__SHIFT 0x0
+#define SDMA6_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE__SHIFT 0x1f
+#define SDMA6_ATOMIC_CNTL__LOOP_TIMER_MASK 0x7FFFFFFFL
+#define SDMA6_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE_MASK 0x80000000L
+//SDMA6_ATOMIC_PREOP_LO
+#define SDMA6_ATOMIC_PREOP_LO__DATA__SHIFT 0x0
+#define SDMA6_ATOMIC_PREOP_LO__DATA_MASK 0xFFFFFFFFL
+//SDMA6_ATOMIC_PREOP_HI
+#define SDMA6_ATOMIC_PREOP_HI__DATA__SHIFT 0x0
+#define SDMA6_ATOMIC_PREOP_HI__DATA_MASK 0xFFFFFFFFL
+//SDMA6_UTCL1_CNTL
+#define SDMA6_UTCL1_CNTL__REDO_ENABLE__SHIFT 0x0
+#define SDMA6_UTCL1_CNTL__REDO_DELAY__SHIFT 0x1
+#define SDMA6_UTCL1_CNTL__REDO_WATERMK__SHIFT 0xb
+#define SDMA6_UTCL1_CNTL__INVACK_DELAY__SHIFT 0xe
+#define SDMA6_UTCL1_CNTL__REQL2_CREDIT__SHIFT 0x18
+#define SDMA6_UTCL1_CNTL__VADDR_WATERMK__SHIFT 0x1d
+#define SDMA6_UTCL1_CNTL__REDO_ENABLE_MASK 0x00000001L
+#define SDMA6_UTCL1_CNTL__REDO_DELAY_MASK 0x000007FEL
+#define SDMA6_UTCL1_CNTL__REDO_WATERMK_MASK 0x00003800L
+#define SDMA6_UTCL1_CNTL__INVACK_DELAY_MASK 0x00FFC000L
+#define SDMA6_UTCL1_CNTL__REQL2_CREDIT_MASK 0x1F000000L
+#define SDMA6_UTCL1_CNTL__VADDR_WATERMK_MASK 0xE0000000L
+//SDMA6_UTCL1_WATERMK
+#define SDMA6_UTCL1_WATERMK__REQMC_WATERMK__SHIFT 0x0
+#define SDMA6_UTCL1_WATERMK__REQPG_WATERMK__SHIFT 0x9
+#define SDMA6_UTCL1_WATERMK__INVREQ_WATERMK__SHIFT 0x11
+#define SDMA6_UTCL1_WATERMK__XNACK_WATERMK__SHIFT 0x19
+#define SDMA6_UTCL1_WATERMK__REQMC_WATERMK_MASK 0x000001FFL
+#define SDMA6_UTCL1_WATERMK__REQPG_WATERMK_MASK 0x0001FE00L
+#define SDMA6_UTCL1_WATERMK__INVREQ_WATERMK_MASK 0x01FE0000L
+#define SDMA6_UTCL1_WATERMK__XNACK_WATERMK_MASK 0xFE000000L
+//SDMA6_UTCL1_RD_STATUS
+#define SDMA6_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0
+#define SDMA6_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1
+#define SDMA6_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2
+#define SDMA6_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3
+#define SDMA6_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4
+#define SDMA6_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5
+#define SDMA6_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6
+#define SDMA6_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7
+#define SDMA6_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8
+#define SDMA6_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9
+#define SDMA6_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa
+#define SDMA6_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb
+#define SDMA6_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc
+#define SDMA6_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd
+#define SDMA6_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe
+#define SDMA6_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf
+#define SDMA6_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10
+#define SDMA6_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11
+#define SDMA6_UTCL1_RD_STATUS__PAGE_FAULT__SHIFT 0x12
+#define SDMA6_UTCL1_RD_STATUS__PAGE_NULL__SHIFT 0x13
+#define SDMA6_UTCL1_RD_STATUS__REQL2_IDLE__SHIFT 0x14
+#define SDMA6_UTCL1_RD_STATUS__CE_L1_STALL__SHIFT 0x15
+#define SDMA6_UTCL1_RD_STATUS__NEXT_RD_VECTOR__SHIFT 0x16
+#define SDMA6_UTCL1_RD_STATUS__MERGE_STATE__SHIFT 0x1a
+#define SDMA6_UTCL1_RD_STATUS__ADDR_RD_RTR__SHIFT 0x1d
+#define SDMA6_UTCL1_RD_STATUS__WPTR_POLLING__SHIFT 0x1e
+#define SDMA6_UTCL1_RD_STATUS__INVREQ_SIZE__SHIFT 0x1f
+#define SDMA6_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L
+#define SDMA6_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L
+#define SDMA6_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L
+#define SDMA6_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L
+#define SDMA6_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L
+#define SDMA6_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L
+#define SDMA6_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L
+#define SDMA6_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L
+#define SDMA6_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L
+#define SDMA6_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L
+#define SDMA6_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L
+#define SDMA6_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L
+#define SDMA6_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L
+#define SDMA6_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L
+#define SDMA6_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L
+#define SDMA6_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L
+#define SDMA6_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L
+#define SDMA6_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L
+#define SDMA6_UTCL1_RD_STATUS__PAGE_FAULT_MASK 0x00040000L
+#define SDMA6_UTCL1_RD_STATUS__PAGE_NULL_MASK 0x00080000L
+#define SDMA6_UTCL1_RD_STATUS__REQL2_IDLE_MASK 0x00100000L
+#define SDMA6_UTCL1_RD_STATUS__CE_L1_STALL_MASK 0x00200000L
+#define SDMA6_UTCL1_RD_STATUS__NEXT_RD_VECTOR_MASK 0x03C00000L
+#define SDMA6_UTCL1_RD_STATUS__MERGE_STATE_MASK 0x1C000000L
+#define SDMA6_UTCL1_RD_STATUS__ADDR_RD_RTR_MASK 0x20000000L
+#define SDMA6_UTCL1_RD_STATUS__WPTR_POLLING_MASK 0x40000000L
+#define SDMA6_UTCL1_RD_STATUS__INVREQ_SIZE_MASK 0x80000000L
+//SDMA6_UTCL1_WR_STATUS
+#define SDMA6_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0
+#define SDMA6_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1
+#define SDMA6_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2
+#define SDMA6_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3
+#define SDMA6_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4
+#define SDMA6_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5
+#define SDMA6_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6
+#define SDMA6_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7
+#define SDMA6_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8
+#define SDMA6_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9
+#define SDMA6_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa
+#define SDMA6_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb
+#define SDMA6_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc
+#define SDMA6_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd
+#define SDMA6_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe
+#define SDMA6_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf
+#define SDMA6_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10
+#define SDMA6_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11
+#define SDMA6_UTCL1_WR_STATUS__PAGE_FAULT__SHIFT 0x12
+#define SDMA6_UTCL1_WR_STATUS__PAGE_NULL__SHIFT 0x13
+#define SDMA6_UTCL1_WR_STATUS__REQL2_IDLE__SHIFT 0x14
+#define SDMA6_UTCL1_WR_STATUS__F32_WR_RTR__SHIFT 0x15
+#define SDMA6_UTCL1_WR_STATUS__NEXT_WR_VECTOR__SHIFT 0x16
+#define SDMA6_UTCL1_WR_STATUS__MERGE_STATE__SHIFT 0x19
+#define SDMA6_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY__SHIFT 0x1c
+#define SDMA6_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL__SHIFT 0x1d
+#define SDMA6_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY__SHIFT 0x1e
+#define SDMA6_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL__SHIFT 0x1f
+#define SDMA6_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L
+#define SDMA6_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L
+#define SDMA6_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L
+#define SDMA6_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L
+#define SDMA6_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L
+#define SDMA6_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L
+#define SDMA6_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L
+#define SDMA6_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L
+#define SDMA6_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L
+#define SDMA6_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L
+#define SDMA6_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L
+#define SDMA6_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L
+#define SDMA6_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L
+#define SDMA6_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L
+#define SDMA6_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L
+#define SDMA6_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L
+#define SDMA6_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L
+#define SDMA6_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L
+#define SDMA6_UTCL1_WR_STATUS__PAGE_FAULT_MASK 0x00040000L
+#define SDMA6_UTCL1_WR_STATUS__PAGE_NULL_MASK 0x00080000L
+#define SDMA6_UTCL1_WR_STATUS__REQL2_IDLE_MASK 0x00100000L
+#define SDMA6_UTCL1_WR_STATUS__F32_WR_RTR_MASK 0x00200000L
+#define SDMA6_UTCL1_WR_STATUS__NEXT_WR_VECTOR_MASK 0x01C00000L
+#define SDMA6_UTCL1_WR_STATUS__MERGE_STATE_MASK 0x0E000000L
+#define SDMA6_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY_MASK 0x10000000L
+#define SDMA6_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL_MASK 0x20000000L
+#define SDMA6_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY_MASK 0x40000000L
+#define SDMA6_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL_MASK 0x80000000L
+//SDMA6_UTCL1_INV0
+#define SDMA6_UTCL1_INV0__INV_MIDDLE__SHIFT 0x0
+#define SDMA6_UTCL1_INV0__RD_TIMEOUT__SHIFT 0x1
+#define SDMA6_UTCL1_INV0__WR_TIMEOUT__SHIFT 0x2
+#define SDMA6_UTCL1_INV0__RD_IN_INVADR__SHIFT 0x3
+#define SDMA6_UTCL1_INV0__WR_IN_INVADR__SHIFT 0x4
+#define SDMA6_UTCL1_INV0__PAGE_NULL_SW__SHIFT 0x5
+#define SDMA6_UTCL1_INV0__XNACK_IS_INVADR__SHIFT 0x6
+#define SDMA6_UTCL1_INV0__INVREQ_ENABLE__SHIFT 0x7
+#define SDMA6_UTCL1_INV0__NACK_TIMEOUT_SW__SHIFT 0x8
+#define SDMA6_UTCL1_INV0__NFLUSH_INV_IDLE__SHIFT 0x9
+#define SDMA6_UTCL1_INV0__FLUSH_INV_IDLE__SHIFT 0xa
+#define SDMA6_UTCL1_INV0__INV_FLUSHTYPE__SHIFT 0xb
+#define SDMA6_UTCL1_INV0__INV_VMID_VEC__SHIFT 0xc
+#define SDMA6_UTCL1_INV0__INV_ADDR_HI__SHIFT 0x1c
+#define SDMA6_UTCL1_INV0__INV_MIDDLE_MASK 0x00000001L
+#define SDMA6_UTCL1_INV0__RD_TIMEOUT_MASK 0x00000002L
+#define SDMA6_UTCL1_INV0__WR_TIMEOUT_MASK 0x00000004L
+#define SDMA6_UTCL1_INV0__RD_IN_INVADR_MASK 0x00000008L
+#define SDMA6_UTCL1_INV0__WR_IN_INVADR_MASK 0x00000010L
+#define SDMA6_UTCL1_INV0__PAGE_NULL_SW_MASK 0x00000020L
+#define SDMA6_UTCL1_INV0__XNACK_IS_INVADR_MASK 0x00000040L
+#define SDMA6_UTCL1_INV0__INVREQ_ENABLE_MASK 0x00000080L
+#define SDMA6_UTCL1_INV0__NACK_TIMEOUT_SW_MASK 0x00000100L
+#define SDMA6_UTCL1_INV0__NFLUSH_INV_IDLE_MASK 0x00000200L
+#define SDMA6_UTCL1_INV0__FLUSH_INV_IDLE_MASK 0x00000400L
+#define SDMA6_UTCL1_INV0__INV_FLUSHTYPE_MASK 0x00000800L
+#define SDMA6_UTCL1_INV0__INV_VMID_VEC_MASK 0x0FFFF000L
+#define SDMA6_UTCL1_INV0__INV_ADDR_HI_MASK 0xF0000000L
+//SDMA6_UTCL1_INV1
+#define SDMA6_UTCL1_INV1__INV_ADDR_LO__SHIFT 0x0
+#define SDMA6_UTCL1_INV1__INV_ADDR_LO_MASK 0xFFFFFFFFL
+//SDMA6_UTCL1_INV2
+#define SDMA6_UTCL1_INV2__INV_NFLUSH_VMID_VEC__SHIFT 0x0
+#define SDMA6_UTCL1_INV2__INV_NFLUSH_VMID_VEC_MASK 0xFFFFFFFFL
+//SDMA6_UTCL1_RD_XNACK0
+#define SDMA6_UTCL1_RD_XNACK0__XNACK_ADDR_LO__SHIFT 0x0
+#define SDMA6_UTCL1_RD_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL
+//SDMA6_UTCL1_RD_XNACK1
+#define SDMA6_UTCL1_RD_XNACK1__XNACK_ADDR_HI__SHIFT 0x0
+#define SDMA6_UTCL1_RD_XNACK1__XNACK_VMID__SHIFT 0x4
+#define SDMA6_UTCL1_RD_XNACK1__XNACK_VECTOR__SHIFT 0x8
+#define SDMA6_UTCL1_RD_XNACK1__IS_XNACK__SHIFT 0x1a
+#define SDMA6_UTCL1_RD_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL
+#define SDMA6_UTCL1_RD_XNACK1__XNACK_VMID_MASK 0x000000F0L
+#define SDMA6_UTCL1_RD_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L
+#define SDMA6_UTCL1_RD_XNACK1__IS_XNACK_MASK 0x0C000000L
+//SDMA6_UTCL1_WR_XNACK0
+#define SDMA6_UTCL1_WR_XNACK0__XNACK_ADDR_LO__SHIFT 0x0
+#define SDMA6_UTCL1_WR_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL
+//SDMA6_UTCL1_WR_XNACK1
+#define SDMA6_UTCL1_WR_XNACK1__XNACK_ADDR_HI__SHIFT 0x0
+#define SDMA6_UTCL1_WR_XNACK1__XNACK_VMID__SHIFT 0x4
+#define SDMA6_UTCL1_WR_XNACK1__XNACK_VECTOR__SHIFT 0x8
+#define SDMA6_UTCL1_WR_XNACK1__IS_XNACK__SHIFT 0x1a
+#define SDMA6_UTCL1_WR_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL
+#define SDMA6_UTCL1_WR_XNACK1__XNACK_VMID_MASK 0x000000F0L
+#define SDMA6_UTCL1_WR_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L
+#define SDMA6_UTCL1_WR_XNACK1__IS_XNACK_MASK 0x0C000000L
+//SDMA6_UTCL1_TIMEOUT
+#define SDMA6_UTCL1_TIMEOUT__RD_XNACK_LIMIT__SHIFT 0x0
+#define SDMA6_UTCL1_TIMEOUT__WR_XNACK_LIMIT__SHIFT 0x10
+#define SDMA6_UTCL1_TIMEOUT__RD_XNACK_LIMIT_MASK 0x0000FFFFL
+#define SDMA6_UTCL1_TIMEOUT__WR_XNACK_LIMIT_MASK 0xFFFF0000L
+//SDMA6_UTCL1_PAGE
+#define SDMA6_UTCL1_PAGE__VM_HOLE__SHIFT 0x0
+#define SDMA6_UTCL1_PAGE__REQ_TYPE__SHIFT 0x1
+#define SDMA6_UTCL1_PAGE__USE_MTYPE__SHIFT 0x6
+#define SDMA6_UTCL1_PAGE__USE_PT_SNOOP__SHIFT 0x9
+#define SDMA6_UTCL1_PAGE__VM_HOLE_MASK 0x00000001L
+#define SDMA6_UTCL1_PAGE__REQ_TYPE_MASK 0x0000001EL
+#define SDMA6_UTCL1_PAGE__USE_MTYPE_MASK 0x000001C0L
+#define SDMA6_UTCL1_PAGE__USE_PT_SNOOP_MASK 0x00000200L
+//SDMA6_POWER_CNTL_IDLE
+#define SDMA6_POWER_CNTL_IDLE__DELAY0__SHIFT 0x0
+#define SDMA6_POWER_CNTL_IDLE__DELAY1__SHIFT 0x10
+#define SDMA6_POWER_CNTL_IDLE__DELAY2__SHIFT 0x18
+#define SDMA6_POWER_CNTL_IDLE__DELAY0_MASK 0x0000FFFFL
+#define SDMA6_POWER_CNTL_IDLE__DELAY1_MASK 0x00FF0000L
+#define SDMA6_POWER_CNTL_IDLE__DELAY2_MASK 0xFF000000L
+//SDMA6_RELAX_ORDERING_LUT
+#define SDMA6_RELAX_ORDERING_LUT__RESERVED0__SHIFT 0x0
+#define SDMA6_RELAX_ORDERING_LUT__COPY__SHIFT 0x1
+#define SDMA6_RELAX_ORDERING_LUT__WRITE__SHIFT 0x2
+#define SDMA6_RELAX_ORDERING_LUT__RESERVED3__SHIFT 0x3
+#define SDMA6_RELAX_ORDERING_LUT__RESERVED4__SHIFT 0x4
+#define SDMA6_RELAX_ORDERING_LUT__FENCE__SHIFT 0x5
+#define SDMA6_RELAX_ORDERING_LUT__RESERVED76__SHIFT 0x6
+#define SDMA6_RELAX_ORDERING_LUT__POLL_MEM__SHIFT 0x8
+#define SDMA6_RELAX_ORDERING_LUT__COND_EXE__SHIFT 0x9
+#define SDMA6_RELAX_ORDERING_LUT__ATOMIC__SHIFT 0xa
+#define SDMA6_RELAX_ORDERING_LUT__CONST_FILL__SHIFT 0xb
+#define SDMA6_RELAX_ORDERING_LUT__PTEPDE__SHIFT 0xc
+#define SDMA6_RELAX_ORDERING_LUT__TIMESTAMP__SHIFT 0xd
+#define SDMA6_RELAX_ORDERING_LUT__RESERVED__SHIFT 0xe
+#define SDMA6_RELAX_ORDERING_LUT__WORLD_SWITCH__SHIFT 0x1b
+#define SDMA6_RELAX_ORDERING_LUT__RPTR_WRB__SHIFT 0x1c
+#define SDMA6_RELAX_ORDERING_LUT__WPTR_POLL__SHIFT 0x1d
+#define SDMA6_RELAX_ORDERING_LUT__IB_FETCH__SHIFT 0x1e
+#define SDMA6_RELAX_ORDERING_LUT__RB_FETCH__SHIFT 0x1f
+#define SDMA6_RELAX_ORDERING_LUT__RESERVED0_MASK 0x00000001L
+#define SDMA6_RELAX_ORDERING_LUT__COPY_MASK 0x00000002L
+#define SDMA6_RELAX_ORDERING_LUT__WRITE_MASK 0x00000004L
+#define SDMA6_RELAX_ORDERING_LUT__RESERVED3_MASK 0x00000008L
+#define SDMA6_RELAX_ORDERING_LUT__RESERVED4_MASK 0x00000010L
+#define SDMA6_RELAX_ORDERING_LUT__FENCE_MASK 0x00000020L
+#define SDMA6_RELAX_ORDERING_LUT__RESERVED76_MASK 0x000000C0L
+#define SDMA6_RELAX_ORDERING_LUT__POLL_MEM_MASK 0x00000100L
+#define SDMA6_RELAX_ORDERING_LUT__COND_EXE_MASK 0x00000200L
+#define SDMA6_RELAX_ORDERING_LUT__ATOMIC_MASK 0x00000400L
+#define SDMA6_RELAX_ORDERING_LUT__CONST_FILL_MASK 0x00000800L
+#define SDMA6_RELAX_ORDERING_LUT__PTEPDE_MASK 0x00001000L
+#define SDMA6_RELAX_ORDERING_LUT__TIMESTAMP_MASK 0x00002000L
+#define SDMA6_RELAX_ORDERING_LUT__RESERVED_MASK 0x07FFC000L
+#define SDMA6_RELAX_ORDERING_LUT__WORLD_SWITCH_MASK 0x08000000L
+#define SDMA6_RELAX_ORDERING_LUT__RPTR_WRB_MASK 0x10000000L
+#define SDMA6_RELAX_ORDERING_LUT__WPTR_POLL_MASK 0x20000000L
+#define SDMA6_RELAX_ORDERING_LUT__IB_FETCH_MASK 0x40000000L
+#define SDMA6_RELAX_ORDERING_LUT__RB_FETCH_MASK 0x80000000L
+//SDMA6_CHICKEN_BITS_2
+#define SDMA6_CHICKEN_BITS_2__F32_CMD_PROC_DELAY__SHIFT 0x0
+#define SDMA6_CHICKEN_BITS_2__F32_CMD_PROC_DELAY_MASK 0x0000000FL
+//SDMA6_STATUS3_REG
+#define SDMA6_STATUS3_REG__CMD_OP_STATUS__SHIFT 0x0
+#define SDMA6_STATUS3_REG__PREV_VM_CMD__SHIFT 0x10
+#define SDMA6_STATUS3_REG__EXCEPTION_IDLE__SHIFT 0x14
+#define SDMA6_STATUS3_REG__QUEUE_ID_MATCH__SHIFT 0x15
+#define SDMA6_STATUS3_REG__INT_QUEUE_ID__SHIFT 0x16
+#define SDMA6_STATUS3_REG__CMD_OP_STATUS_MASK 0x0000FFFFL
+#define SDMA6_STATUS3_REG__PREV_VM_CMD_MASK 0x000F0000L
+#define SDMA6_STATUS3_REG__EXCEPTION_IDLE_MASK 0x00100000L
+#define SDMA6_STATUS3_REG__QUEUE_ID_MATCH_MASK 0x00200000L
+#define SDMA6_STATUS3_REG__INT_QUEUE_ID_MASK 0x03C00000L
+//SDMA6_PHYSICAL_ADDR_LO
+#define SDMA6_PHYSICAL_ADDR_LO__D_VALID__SHIFT 0x0
+#define SDMA6_PHYSICAL_ADDR_LO__DIRTY__SHIFT 0x1
+#define SDMA6_PHYSICAL_ADDR_LO__PHY_VALID__SHIFT 0x2
+#define SDMA6_PHYSICAL_ADDR_LO__ADDR__SHIFT 0xc
+#define SDMA6_PHYSICAL_ADDR_LO__D_VALID_MASK 0x00000001L
+#define SDMA6_PHYSICAL_ADDR_LO__DIRTY_MASK 0x00000002L
+#define SDMA6_PHYSICAL_ADDR_LO__PHY_VALID_MASK 0x00000004L
+#define SDMA6_PHYSICAL_ADDR_LO__ADDR_MASK 0xFFFFF000L
+//SDMA6_PHYSICAL_ADDR_HI
+#define SDMA6_PHYSICAL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA6_PHYSICAL_ADDR_HI__ADDR_MASK 0x0000FFFFL
+//SDMA6_PHASE2_QUANTUM
+#define SDMA6_PHASE2_QUANTUM__UNIT__SHIFT 0x0
+#define SDMA6_PHASE2_QUANTUM__VALUE__SHIFT 0x8
+#define SDMA6_PHASE2_QUANTUM__PREFER__SHIFT 0x1e
+#define SDMA6_PHASE2_QUANTUM__UNIT_MASK 0x0000000FL
+#define SDMA6_PHASE2_QUANTUM__VALUE_MASK 0x00FFFF00L
+#define SDMA6_PHASE2_QUANTUM__PREFER_MASK 0x40000000L
+//SDMA6_ERROR_LOG
+#define SDMA6_ERROR_LOG__OVERRIDE__SHIFT 0x0
+#define SDMA6_ERROR_LOG__STATUS__SHIFT 0x10
+#define SDMA6_ERROR_LOG__OVERRIDE_MASK 0x0000FFFFL
+#define SDMA6_ERROR_LOG__STATUS_MASK 0xFFFF0000L
+//SDMA6_PUB_DUMMY_REG0
+#define SDMA6_PUB_DUMMY_REG0__VALUE__SHIFT 0x0
+#define SDMA6_PUB_DUMMY_REG0__VALUE_MASK 0xFFFFFFFFL
+//SDMA6_PUB_DUMMY_REG1
+#define SDMA6_PUB_DUMMY_REG1__VALUE__SHIFT 0x0
+#define SDMA6_PUB_DUMMY_REG1__VALUE_MASK 0xFFFFFFFFL
+//SDMA6_PUB_DUMMY_REG2
+#define SDMA6_PUB_DUMMY_REG2__VALUE__SHIFT 0x0
+#define SDMA6_PUB_DUMMY_REG2__VALUE_MASK 0xFFFFFFFFL
+//SDMA6_PUB_DUMMY_REG3
+#define SDMA6_PUB_DUMMY_REG3__VALUE__SHIFT 0x0
+#define SDMA6_PUB_DUMMY_REG3__VALUE_MASK 0xFFFFFFFFL
+//SDMA6_F32_COUNTER
+#define SDMA6_F32_COUNTER__VALUE__SHIFT 0x0
+#define SDMA6_F32_COUNTER__VALUE_MASK 0xFFFFFFFFL
+//SDMA6_UNBREAKABLE
+#define SDMA6_UNBREAKABLE__VALUE__SHIFT 0x0
+#define SDMA6_UNBREAKABLE__VALUE_MASK 0x00000001L
+//SDMA6_PERFMON_CNTL
+#define SDMA6_PERFMON_CNTL__PERF_ENABLE0__SHIFT 0x0
+#define SDMA6_PERFMON_CNTL__PERF_CLEAR0__SHIFT 0x1
+#define SDMA6_PERFMON_CNTL__PERF_SEL0__SHIFT 0x2
+#define SDMA6_PERFMON_CNTL__PERF_ENABLE1__SHIFT 0xa
+#define SDMA6_PERFMON_CNTL__PERF_CLEAR1__SHIFT 0xb
+#define SDMA6_PERFMON_CNTL__PERF_SEL1__SHIFT 0xc
+#define SDMA6_PERFMON_CNTL__PERF_ENABLE0_MASK 0x00000001L
+#define SDMA6_PERFMON_CNTL__PERF_CLEAR0_MASK 0x00000002L
+#define SDMA6_PERFMON_CNTL__PERF_SEL0_MASK 0x000003FCL
+#define SDMA6_PERFMON_CNTL__PERF_ENABLE1_MASK 0x00000400L
+#define SDMA6_PERFMON_CNTL__PERF_CLEAR1_MASK 0x00000800L
+#define SDMA6_PERFMON_CNTL__PERF_SEL1_MASK 0x000FF000L
+//SDMA6_PERFCOUNTER0_RESULT
+#define SDMA6_PERFCOUNTER0_RESULT__PERF_COUNT__SHIFT 0x0
+#define SDMA6_PERFCOUNTER0_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL
+//SDMA6_PERFCOUNTER1_RESULT
+#define SDMA6_PERFCOUNTER1_RESULT__PERF_COUNT__SHIFT 0x0
+#define SDMA6_PERFCOUNTER1_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL
+//SDMA6_PERFCOUNTER_TAG_DELAY_RANGE
+#define SDMA6_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_LOW__SHIFT 0x0
+#define SDMA6_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_HIGH__SHIFT 0xe
+#define SDMA6_PERFCOUNTER_TAG_DELAY_RANGE__SELECT_RW__SHIFT 0x1c
+#define SDMA6_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_LOW_MASK 0x00003FFFL
+#define SDMA6_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_HIGH_MASK 0x0FFFC000L
+#define SDMA6_PERFCOUNTER_TAG_DELAY_RANGE__SELECT_RW_MASK 0x10000000L
+//SDMA6_CRD_CNTL
+#define SDMA6_CRD_CNTL__MC_WRREQ_CREDIT__SHIFT 0x7
+#define SDMA6_CRD_CNTL__MC_RDREQ_CREDIT__SHIFT 0xd
+#define SDMA6_CRD_CNTL__MC_WRREQ_CREDIT_MASK 0x00001F80L
+#define SDMA6_CRD_CNTL__MC_RDREQ_CREDIT_MASK 0x0007E000L
+//SDMA6_GPU_IOV_VIOLATION_LOG
+#define SDMA6_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0
+#define SDMA6_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS__SHIFT 0x1
+#define SDMA6_GPU_IOV_VIOLATION_LOG__ADDRESS__SHIFT 0x2
+#define SDMA6_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION__SHIFT 0x14
+#define SDMA6_GPU_IOV_VIOLATION_LOG__VF__SHIFT 0x15
+#define SDMA6_GPU_IOV_VIOLATION_LOG__VFID__SHIFT 0x16
+#define SDMA6_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L
+#define SDMA6_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS_MASK 0x00000002L
+#define SDMA6_GPU_IOV_VIOLATION_LOG__ADDRESS_MASK 0x000FFFFCL
+#define SDMA6_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION_MASK 0x00100000L
+#define SDMA6_GPU_IOV_VIOLATION_LOG__VF_MASK 0x00200000L
+#define SDMA6_GPU_IOV_VIOLATION_LOG__VFID_MASK 0x03C00000L
+//SDMA6_ULV_CNTL
+#define SDMA6_ULV_CNTL__HYSTERESIS__SHIFT 0x0
+#define SDMA6_ULV_CNTL__ENTER_ULV_INT_CLR__SHIFT 0x1b
+#define SDMA6_ULV_CNTL__EXIT_ULV_INT_CLR__SHIFT 0x1c
+#define SDMA6_ULV_CNTL__ENTER_ULV_INT__SHIFT 0x1d
+#define SDMA6_ULV_CNTL__EXIT_ULV_INT__SHIFT 0x1e
+#define SDMA6_ULV_CNTL__ULV_STATUS__SHIFT 0x1f
+#define SDMA6_ULV_CNTL__HYSTERESIS_MASK 0x0000001FL
+#define SDMA6_ULV_CNTL__ENTER_ULV_INT_CLR_MASK 0x08000000L
+#define SDMA6_ULV_CNTL__EXIT_ULV_INT_CLR_MASK 0x10000000L
+#define SDMA6_ULV_CNTL__ENTER_ULV_INT_MASK 0x20000000L
+#define SDMA6_ULV_CNTL__EXIT_ULV_INT_MASK 0x40000000L
+#define SDMA6_ULV_CNTL__ULV_STATUS_MASK 0x80000000L
+//SDMA6_EA_DBIT_ADDR_DATA
+#define SDMA6_EA_DBIT_ADDR_DATA__VALUE__SHIFT 0x0
+#define SDMA6_EA_DBIT_ADDR_DATA__VALUE_MASK 0xFFFFFFFFL
+//SDMA6_EA_DBIT_ADDR_INDEX
+#define SDMA6_EA_DBIT_ADDR_INDEX__VALUE__SHIFT 0x0
+#define SDMA6_EA_DBIT_ADDR_INDEX__VALUE_MASK 0x00000007L
+//SDMA6_GPU_IOV_VIOLATION_LOG2
+#define SDMA6_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID__SHIFT 0x0
+#define SDMA6_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID_MASK 0x000000FFL
+//SDMA6_GFX_RB_CNTL
+#define SDMA6_GFX_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA6_GFX_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA6_GFX_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA6_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA6_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA6_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA6_GFX_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA6_GFX_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA6_GFX_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA6_GFX_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA6_GFX_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA6_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA6_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA6_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA6_GFX_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA6_GFX_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA6_GFX_RB_BASE
+#define SDMA6_GFX_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA6_GFX_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA6_GFX_RB_BASE_HI
+#define SDMA6_GFX_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA6_GFX_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA6_GFX_RB_RPTR
+#define SDMA6_GFX_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA6_GFX_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA6_GFX_RB_RPTR_HI
+#define SDMA6_GFX_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA6_GFX_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA6_GFX_RB_WPTR
+#define SDMA6_GFX_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA6_GFX_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA6_GFX_RB_WPTR_HI
+#define SDMA6_GFX_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA6_GFX_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA6_GFX_RB_WPTR_POLL_CNTL
+#define SDMA6_GFX_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA6_GFX_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA6_GFX_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA6_GFX_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA6_GFX_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA6_GFX_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA6_GFX_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA6_GFX_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA6_GFX_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA6_GFX_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA6_GFX_RB_RPTR_ADDR_HI
+#define SDMA6_GFX_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA6_GFX_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA6_GFX_RB_RPTR_ADDR_LO
+#define SDMA6_GFX_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA6_GFX_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA6_GFX_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA6_GFX_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA6_GFX_IB_CNTL
+#define SDMA6_GFX_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA6_GFX_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA6_GFX_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA6_GFX_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA6_GFX_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA6_GFX_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA6_GFX_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA6_GFX_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA6_GFX_IB_RPTR
+#define SDMA6_GFX_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA6_GFX_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA6_GFX_IB_OFFSET
+#define SDMA6_GFX_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA6_GFX_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA6_GFX_IB_BASE_LO
+#define SDMA6_GFX_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA6_GFX_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA6_GFX_IB_BASE_HI
+#define SDMA6_GFX_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA6_GFX_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA6_GFX_IB_SIZE
+#define SDMA6_GFX_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA6_GFX_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA6_GFX_SKIP_CNTL
+#define SDMA6_GFX_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA6_GFX_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA6_GFX_CONTEXT_STATUS
+#define SDMA6_GFX_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA6_GFX_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA6_GFX_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA6_GFX_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA6_GFX_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA6_GFX_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA6_GFX_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA6_GFX_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA6_GFX_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA6_GFX_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA6_GFX_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA6_GFX_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA6_GFX_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA6_GFX_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA6_GFX_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA6_GFX_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA6_GFX_DOORBELL
+#define SDMA6_GFX_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA6_GFX_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA6_GFX_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA6_GFX_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA6_GFX_CONTEXT_CNTL
+#define SDMA6_GFX_CONTEXT_CNTL__RESUME_CTX__SHIFT 0x10
+#define SDMA6_GFX_CONTEXT_CNTL__RESUME_CTX_MASK 0x00010000L
+//SDMA6_GFX_STATUS
+#define SDMA6_GFX_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA6_GFX_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA6_GFX_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA6_GFX_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA6_GFX_DOORBELL_LOG
+#define SDMA6_GFX_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA6_GFX_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA6_GFX_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA6_GFX_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA6_GFX_WATERMARK
+#define SDMA6_GFX_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA6_GFX_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA6_GFX_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA6_GFX_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA6_GFX_DOORBELL_OFFSET
+#define SDMA6_GFX_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA6_GFX_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA6_GFX_CSA_ADDR_LO
+#define SDMA6_GFX_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA6_GFX_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA6_GFX_CSA_ADDR_HI
+#define SDMA6_GFX_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA6_GFX_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA6_GFX_IB_SUB_REMAIN
+#define SDMA6_GFX_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA6_GFX_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA6_GFX_PREEMPT
+#define SDMA6_GFX_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA6_GFX_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA6_GFX_DUMMY_REG
+#define SDMA6_GFX_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA6_GFX_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA6_GFX_RB_WPTR_POLL_ADDR_HI
+#define SDMA6_GFX_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA6_GFX_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA6_GFX_RB_WPTR_POLL_ADDR_LO
+#define SDMA6_GFX_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA6_GFX_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA6_GFX_RB_AQL_CNTL
+#define SDMA6_GFX_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA6_GFX_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA6_GFX_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA6_GFX_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA6_GFX_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA6_GFX_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA6_GFX_MINOR_PTR_UPDATE
+#define SDMA6_GFX_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA6_GFX_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA6_GFX_MIDCMD_DATA0
+#define SDMA6_GFX_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA6_GFX_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA6_GFX_MIDCMD_DATA1
+#define SDMA6_GFX_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA6_GFX_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA6_GFX_MIDCMD_DATA2
+#define SDMA6_GFX_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA6_GFX_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA6_GFX_MIDCMD_DATA3
+#define SDMA6_GFX_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA6_GFX_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA6_GFX_MIDCMD_DATA4
+#define SDMA6_GFX_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA6_GFX_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA6_GFX_MIDCMD_DATA5
+#define SDMA6_GFX_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA6_GFX_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA6_GFX_MIDCMD_DATA6
+#define SDMA6_GFX_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA6_GFX_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA6_GFX_MIDCMD_DATA7
+#define SDMA6_GFX_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA6_GFX_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA6_GFX_MIDCMD_DATA8
+#define SDMA6_GFX_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA6_GFX_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA6_GFX_MIDCMD_CNTL
+#define SDMA6_GFX_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA6_GFX_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA6_GFX_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA6_GFX_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA6_GFX_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA6_GFX_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA6_GFX_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA6_GFX_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA6_PAGE_RB_CNTL
+#define SDMA6_PAGE_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA6_PAGE_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA6_PAGE_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA6_PAGE_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA6_PAGE_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA6_PAGE_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA6_PAGE_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA6_PAGE_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA6_PAGE_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA6_PAGE_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA6_PAGE_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA6_PAGE_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA6_PAGE_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA6_PAGE_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA6_PAGE_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA6_PAGE_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA6_PAGE_RB_BASE
+#define SDMA6_PAGE_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA6_PAGE_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA6_PAGE_RB_BASE_HI
+#define SDMA6_PAGE_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA6_PAGE_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA6_PAGE_RB_RPTR
+#define SDMA6_PAGE_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA6_PAGE_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA6_PAGE_RB_RPTR_HI
+#define SDMA6_PAGE_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA6_PAGE_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA6_PAGE_RB_WPTR
+#define SDMA6_PAGE_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA6_PAGE_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA6_PAGE_RB_WPTR_HI
+#define SDMA6_PAGE_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA6_PAGE_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA6_PAGE_RB_WPTR_POLL_CNTL
+#define SDMA6_PAGE_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA6_PAGE_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA6_PAGE_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA6_PAGE_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA6_PAGE_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA6_PAGE_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA6_PAGE_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA6_PAGE_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA6_PAGE_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA6_PAGE_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA6_PAGE_RB_RPTR_ADDR_HI
+#define SDMA6_PAGE_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA6_PAGE_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA6_PAGE_RB_RPTR_ADDR_LO
+#define SDMA6_PAGE_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA6_PAGE_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA6_PAGE_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA6_PAGE_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA6_PAGE_IB_CNTL
+#define SDMA6_PAGE_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA6_PAGE_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA6_PAGE_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA6_PAGE_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA6_PAGE_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA6_PAGE_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA6_PAGE_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA6_PAGE_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA6_PAGE_IB_RPTR
+#define SDMA6_PAGE_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA6_PAGE_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA6_PAGE_IB_OFFSET
+#define SDMA6_PAGE_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA6_PAGE_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA6_PAGE_IB_BASE_LO
+#define SDMA6_PAGE_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA6_PAGE_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA6_PAGE_IB_BASE_HI
+#define SDMA6_PAGE_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA6_PAGE_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA6_PAGE_IB_SIZE
+#define SDMA6_PAGE_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA6_PAGE_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA6_PAGE_SKIP_CNTL
+#define SDMA6_PAGE_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA6_PAGE_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA6_PAGE_CONTEXT_STATUS
+#define SDMA6_PAGE_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA6_PAGE_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA6_PAGE_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA6_PAGE_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA6_PAGE_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA6_PAGE_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA6_PAGE_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA6_PAGE_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA6_PAGE_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA6_PAGE_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA6_PAGE_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA6_PAGE_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA6_PAGE_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA6_PAGE_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA6_PAGE_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA6_PAGE_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA6_PAGE_DOORBELL
+#define SDMA6_PAGE_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA6_PAGE_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA6_PAGE_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA6_PAGE_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA6_PAGE_STATUS
+#define SDMA6_PAGE_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA6_PAGE_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA6_PAGE_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA6_PAGE_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA6_PAGE_DOORBELL_LOG
+#define SDMA6_PAGE_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA6_PAGE_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA6_PAGE_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA6_PAGE_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA6_PAGE_WATERMARK
+#define SDMA6_PAGE_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA6_PAGE_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA6_PAGE_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA6_PAGE_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA6_PAGE_DOORBELL_OFFSET
+#define SDMA6_PAGE_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA6_PAGE_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA6_PAGE_CSA_ADDR_LO
+#define SDMA6_PAGE_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA6_PAGE_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA6_PAGE_CSA_ADDR_HI
+#define SDMA6_PAGE_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA6_PAGE_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA6_PAGE_IB_SUB_REMAIN
+#define SDMA6_PAGE_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA6_PAGE_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA6_PAGE_PREEMPT
+#define SDMA6_PAGE_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA6_PAGE_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA6_PAGE_DUMMY_REG
+#define SDMA6_PAGE_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA6_PAGE_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA6_PAGE_RB_WPTR_POLL_ADDR_HI
+#define SDMA6_PAGE_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA6_PAGE_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA6_PAGE_RB_WPTR_POLL_ADDR_LO
+#define SDMA6_PAGE_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA6_PAGE_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA6_PAGE_RB_AQL_CNTL
+#define SDMA6_PAGE_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA6_PAGE_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA6_PAGE_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA6_PAGE_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA6_PAGE_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA6_PAGE_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA6_PAGE_MINOR_PTR_UPDATE
+#define SDMA6_PAGE_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA6_PAGE_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA6_PAGE_MIDCMD_DATA0
+#define SDMA6_PAGE_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA6_PAGE_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA6_PAGE_MIDCMD_DATA1
+#define SDMA6_PAGE_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA6_PAGE_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA6_PAGE_MIDCMD_DATA2
+#define SDMA6_PAGE_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA6_PAGE_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA6_PAGE_MIDCMD_DATA3
+#define SDMA6_PAGE_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA6_PAGE_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA6_PAGE_MIDCMD_DATA4
+#define SDMA6_PAGE_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA6_PAGE_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA6_PAGE_MIDCMD_DATA5
+#define SDMA6_PAGE_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA6_PAGE_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA6_PAGE_MIDCMD_DATA6
+#define SDMA6_PAGE_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA6_PAGE_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA6_PAGE_MIDCMD_DATA7
+#define SDMA6_PAGE_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA6_PAGE_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA6_PAGE_MIDCMD_DATA8
+#define SDMA6_PAGE_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA6_PAGE_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA6_PAGE_MIDCMD_CNTL
+#define SDMA6_PAGE_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA6_PAGE_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA6_PAGE_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA6_PAGE_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA6_PAGE_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA6_PAGE_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA6_PAGE_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA6_PAGE_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA6_RLC0_RB_CNTL
+#define SDMA6_RLC0_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA6_RLC0_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA6_RLC0_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA6_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA6_RLC0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA6_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA6_RLC0_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA6_RLC0_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA6_RLC0_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA6_RLC0_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA6_RLC0_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA6_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA6_RLC0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA6_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA6_RLC0_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA6_RLC0_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA6_RLC0_RB_BASE
+#define SDMA6_RLC0_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA6_RLC0_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA6_RLC0_RB_BASE_HI
+#define SDMA6_RLC0_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA6_RLC0_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA6_RLC0_RB_RPTR
+#define SDMA6_RLC0_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA6_RLC0_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA6_RLC0_RB_RPTR_HI
+#define SDMA6_RLC0_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA6_RLC0_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA6_RLC0_RB_WPTR
+#define SDMA6_RLC0_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA6_RLC0_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA6_RLC0_RB_WPTR_HI
+#define SDMA6_RLC0_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA6_RLC0_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA6_RLC0_RB_WPTR_POLL_CNTL
+#define SDMA6_RLC0_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA6_RLC0_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA6_RLC0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA6_RLC0_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA6_RLC0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA6_RLC0_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA6_RLC0_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA6_RLC0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA6_RLC0_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA6_RLC0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA6_RLC0_RB_RPTR_ADDR_HI
+#define SDMA6_RLC0_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA6_RLC0_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA6_RLC0_RB_RPTR_ADDR_LO
+#define SDMA6_RLC0_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA6_RLC0_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA6_RLC0_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA6_RLC0_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA6_RLC0_IB_CNTL
+#define SDMA6_RLC0_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA6_RLC0_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA6_RLC0_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA6_RLC0_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA6_RLC0_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA6_RLC0_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA6_RLC0_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA6_RLC0_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA6_RLC0_IB_RPTR
+#define SDMA6_RLC0_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA6_RLC0_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA6_RLC0_IB_OFFSET
+#define SDMA6_RLC0_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA6_RLC0_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA6_RLC0_IB_BASE_LO
+#define SDMA6_RLC0_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA6_RLC0_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA6_RLC0_IB_BASE_HI
+#define SDMA6_RLC0_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA6_RLC0_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA6_RLC0_IB_SIZE
+#define SDMA6_RLC0_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA6_RLC0_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA6_RLC0_SKIP_CNTL
+#define SDMA6_RLC0_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA6_RLC0_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA6_RLC0_CONTEXT_STATUS
+#define SDMA6_RLC0_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA6_RLC0_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA6_RLC0_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA6_RLC0_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA6_RLC0_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA6_RLC0_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA6_RLC0_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA6_RLC0_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA6_RLC0_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA6_RLC0_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA6_RLC0_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA6_RLC0_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA6_RLC0_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA6_RLC0_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA6_RLC0_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA6_RLC0_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA6_RLC0_DOORBELL
+#define SDMA6_RLC0_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA6_RLC0_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA6_RLC0_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA6_RLC0_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA6_RLC0_STATUS
+#define SDMA6_RLC0_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA6_RLC0_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA6_RLC0_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA6_RLC0_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA6_RLC0_DOORBELL_LOG
+#define SDMA6_RLC0_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA6_RLC0_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA6_RLC0_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA6_RLC0_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA6_RLC0_WATERMARK
+#define SDMA6_RLC0_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA6_RLC0_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA6_RLC0_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA6_RLC0_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA6_RLC0_DOORBELL_OFFSET
+#define SDMA6_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA6_RLC0_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA6_RLC0_CSA_ADDR_LO
+#define SDMA6_RLC0_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA6_RLC0_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA6_RLC0_CSA_ADDR_HI
+#define SDMA6_RLC0_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA6_RLC0_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA6_RLC0_IB_SUB_REMAIN
+#define SDMA6_RLC0_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA6_RLC0_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA6_RLC0_PREEMPT
+#define SDMA6_RLC0_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA6_RLC0_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA6_RLC0_DUMMY_REG
+#define SDMA6_RLC0_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA6_RLC0_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA6_RLC0_RB_WPTR_POLL_ADDR_HI
+#define SDMA6_RLC0_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA6_RLC0_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA6_RLC0_RB_WPTR_POLL_ADDR_LO
+#define SDMA6_RLC0_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA6_RLC0_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA6_RLC0_RB_AQL_CNTL
+#define SDMA6_RLC0_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA6_RLC0_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA6_RLC0_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA6_RLC0_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA6_RLC0_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA6_RLC0_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA6_RLC0_MINOR_PTR_UPDATE
+#define SDMA6_RLC0_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA6_RLC0_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA6_RLC0_MIDCMD_DATA0
+#define SDMA6_RLC0_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA6_RLC0_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA6_RLC0_MIDCMD_DATA1
+#define SDMA6_RLC0_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA6_RLC0_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA6_RLC0_MIDCMD_DATA2
+#define SDMA6_RLC0_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA6_RLC0_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA6_RLC0_MIDCMD_DATA3
+#define SDMA6_RLC0_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA6_RLC0_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA6_RLC0_MIDCMD_DATA4
+#define SDMA6_RLC0_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA6_RLC0_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA6_RLC0_MIDCMD_DATA5
+#define SDMA6_RLC0_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA6_RLC0_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA6_RLC0_MIDCMD_DATA6
+#define SDMA6_RLC0_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA6_RLC0_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA6_RLC0_MIDCMD_DATA7
+#define SDMA6_RLC0_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA6_RLC0_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA6_RLC0_MIDCMD_DATA8
+#define SDMA6_RLC0_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA6_RLC0_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA6_RLC0_MIDCMD_CNTL
+#define SDMA6_RLC0_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA6_RLC0_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA6_RLC0_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA6_RLC0_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA6_RLC0_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA6_RLC0_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA6_RLC0_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA6_RLC0_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA6_RLC1_RB_CNTL
+#define SDMA6_RLC1_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA6_RLC1_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA6_RLC1_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA6_RLC1_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA6_RLC1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA6_RLC1_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA6_RLC1_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA6_RLC1_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA6_RLC1_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA6_RLC1_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA6_RLC1_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA6_RLC1_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA6_RLC1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA6_RLC1_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA6_RLC1_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA6_RLC1_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA6_RLC1_RB_BASE
+#define SDMA6_RLC1_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA6_RLC1_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA6_RLC1_RB_BASE_HI
+#define SDMA6_RLC1_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA6_RLC1_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA6_RLC1_RB_RPTR
+#define SDMA6_RLC1_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA6_RLC1_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA6_RLC1_RB_RPTR_HI
+#define SDMA6_RLC1_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA6_RLC1_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA6_RLC1_RB_WPTR
+#define SDMA6_RLC1_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA6_RLC1_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA6_RLC1_RB_WPTR_HI
+#define SDMA6_RLC1_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA6_RLC1_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA6_RLC1_RB_WPTR_POLL_CNTL
+#define SDMA6_RLC1_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA6_RLC1_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA6_RLC1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA6_RLC1_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA6_RLC1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA6_RLC1_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA6_RLC1_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA6_RLC1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA6_RLC1_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA6_RLC1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA6_RLC1_RB_RPTR_ADDR_HI
+#define SDMA6_RLC1_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA6_RLC1_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA6_RLC1_RB_RPTR_ADDR_LO
+#define SDMA6_RLC1_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA6_RLC1_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA6_RLC1_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA6_RLC1_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA6_RLC1_IB_CNTL
+#define SDMA6_RLC1_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA6_RLC1_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA6_RLC1_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA6_RLC1_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA6_RLC1_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA6_RLC1_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA6_RLC1_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA6_RLC1_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA6_RLC1_IB_RPTR
+#define SDMA6_RLC1_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA6_RLC1_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA6_RLC1_IB_OFFSET
+#define SDMA6_RLC1_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA6_RLC1_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA6_RLC1_IB_BASE_LO
+#define SDMA6_RLC1_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA6_RLC1_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA6_RLC1_IB_BASE_HI
+#define SDMA6_RLC1_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA6_RLC1_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA6_RLC1_IB_SIZE
+#define SDMA6_RLC1_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA6_RLC1_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA6_RLC1_SKIP_CNTL
+#define SDMA6_RLC1_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA6_RLC1_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA6_RLC1_CONTEXT_STATUS
+#define SDMA6_RLC1_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA6_RLC1_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA6_RLC1_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA6_RLC1_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA6_RLC1_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA6_RLC1_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA6_RLC1_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA6_RLC1_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA6_RLC1_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA6_RLC1_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA6_RLC1_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA6_RLC1_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA6_RLC1_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA6_RLC1_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA6_RLC1_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA6_RLC1_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA6_RLC1_DOORBELL
+#define SDMA6_RLC1_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA6_RLC1_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA6_RLC1_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA6_RLC1_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA6_RLC1_STATUS
+#define SDMA6_RLC1_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA6_RLC1_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA6_RLC1_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA6_RLC1_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA6_RLC1_DOORBELL_LOG
+#define SDMA6_RLC1_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA6_RLC1_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA6_RLC1_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA6_RLC1_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA6_RLC1_WATERMARK
+#define SDMA6_RLC1_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA6_RLC1_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA6_RLC1_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA6_RLC1_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA6_RLC1_DOORBELL_OFFSET
+#define SDMA6_RLC1_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA6_RLC1_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA6_RLC1_CSA_ADDR_LO
+#define SDMA6_RLC1_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA6_RLC1_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA6_RLC1_CSA_ADDR_HI
+#define SDMA6_RLC1_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA6_RLC1_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA6_RLC1_IB_SUB_REMAIN
+#define SDMA6_RLC1_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA6_RLC1_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA6_RLC1_PREEMPT
+#define SDMA6_RLC1_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA6_RLC1_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA6_RLC1_DUMMY_REG
+#define SDMA6_RLC1_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA6_RLC1_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA6_RLC1_RB_WPTR_POLL_ADDR_HI
+#define SDMA6_RLC1_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA6_RLC1_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA6_RLC1_RB_WPTR_POLL_ADDR_LO
+#define SDMA6_RLC1_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA6_RLC1_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA6_RLC1_RB_AQL_CNTL
+#define SDMA6_RLC1_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA6_RLC1_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA6_RLC1_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA6_RLC1_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA6_RLC1_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA6_RLC1_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA6_RLC1_MINOR_PTR_UPDATE
+#define SDMA6_RLC1_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA6_RLC1_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA6_RLC1_MIDCMD_DATA0
+#define SDMA6_RLC1_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA6_RLC1_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA6_RLC1_MIDCMD_DATA1
+#define SDMA6_RLC1_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA6_RLC1_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA6_RLC1_MIDCMD_DATA2
+#define SDMA6_RLC1_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA6_RLC1_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA6_RLC1_MIDCMD_DATA3
+#define SDMA6_RLC1_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA6_RLC1_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA6_RLC1_MIDCMD_DATA4
+#define SDMA6_RLC1_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA6_RLC1_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA6_RLC1_MIDCMD_DATA5
+#define SDMA6_RLC1_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA6_RLC1_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA6_RLC1_MIDCMD_DATA6
+#define SDMA6_RLC1_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA6_RLC1_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA6_RLC1_MIDCMD_DATA7
+#define SDMA6_RLC1_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA6_RLC1_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA6_RLC1_MIDCMD_DATA8
+#define SDMA6_RLC1_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA6_RLC1_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA6_RLC1_MIDCMD_CNTL
+#define SDMA6_RLC1_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA6_RLC1_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA6_RLC1_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA6_RLC1_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA6_RLC1_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA6_RLC1_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA6_RLC1_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA6_RLC1_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA6_RLC2_RB_CNTL
+#define SDMA6_RLC2_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA6_RLC2_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA6_RLC2_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA6_RLC2_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA6_RLC2_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA6_RLC2_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA6_RLC2_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA6_RLC2_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA6_RLC2_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA6_RLC2_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA6_RLC2_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA6_RLC2_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA6_RLC2_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA6_RLC2_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA6_RLC2_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA6_RLC2_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA6_RLC2_RB_BASE
+#define SDMA6_RLC2_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA6_RLC2_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA6_RLC2_RB_BASE_HI
+#define SDMA6_RLC2_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA6_RLC2_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA6_RLC2_RB_RPTR
+#define SDMA6_RLC2_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA6_RLC2_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA6_RLC2_RB_RPTR_HI
+#define SDMA6_RLC2_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA6_RLC2_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA6_RLC2_RB_WPTR
+#define SDMA6_RLC2_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA6_RLC2_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA6_RLC2_RB_WPTR_HI
+#define SDMA6_RLC2_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA6_RLC2_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA6_RLC2_RB_WPTR_POLL_CNTL
+#define SDMA6_RLC2_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA6_RLC2_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA6_RLC2_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA6_RLC2_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA6_RLC2_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA6_RLC2_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA6_RLC2_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA6_RLC2_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA6_RLC2_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA6_RLC2_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA6_RLC2_RB_RPTR_ADDR_HI
+#define SDMA6_RLC2_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA6_RLC2_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA6_RLC2_RB_RPTR_ADDR_LO
+#define SDMA6_RLC2_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA6_RLC2_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA6_RLC2_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA6_RLC2_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA6_RLC2_IB_CNTL
+#define SDMA6_RLC2_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA6_RLC2_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA6_RLC2_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA6_RLC2_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA6_RLC2_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA6_RLC2_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA6_RLC2_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA6_RLC2_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA6_RLC2_IB_RPTR
+#define SDMA6_RLC2_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA6_RLC2_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA6_RLC2_IB_OFFSET
+#define SDMA6_RLC2_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA6_RLC2_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA6_RLC2_IB_BASE_LO
+#define SDMA6_RLC2_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA6_RLC2_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA6_RLC2_IB_BASE_HI
+#define SDMA6_RLC2_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA6_RLC2_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA6_RLC2_IB_SIZE
+#define SDMA6_RLC2_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA6_RLC2_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA6_RLC2_SKIP_CNTL
+#define SDMA6_RLC2_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA6_RLC2_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA6_RLC2_CONTEXT_STATUS
+#define SDMA6_RLC2_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA6_RLC2_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA6_RLC2_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA6_RLC2_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA6_RLC2_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA6_RLC2_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA6_RLC2_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA6_RLC2_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA6_RLC2_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA6_RLC2_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA6_RLC2_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA6_RLC2_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA6_RLC2_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA6_RLC2_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA6_RLC2_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA6_RLC2_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA6_RLC2_DOORBELL
+#define SDMA6_RLC2_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA6_RLC2_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA6_RLC2_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA6_RLC2_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA6_RLC2_STATUS
+#define SDMA6_RLC2_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA6_RLC2_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA6_RLC2_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA6_RLC2_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA6_RLC2_DOORBELL_LOG
+#define SDMA6_RLC2_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA6_RLC2_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA6_RLC2_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA6_RLC2_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA6_RLC2_WATERMARK
+#define SDMA6_RLC2_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA6_RLC2_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA6_RLC2_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA6_RLC2_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA6_RLC2_DOORBELL_OFFSET
+#define SDMA6_RLC2_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA6_RLC2_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA6_RLC2_CSA_ADDR_LO
+#define SDMA6_RLC2_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA6_RLC2_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA6_RLC2_CSA_ADDR_HI
+#define SDMA6_RLC2_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA6_RLC2_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA6_RLC2_IB_SUB_REMAIN
+#define SDMA6_RLC2_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA6_RLC2_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA6_RLC2_PREEMPT
+#define SDMA6_RLC2_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA6_RLC2_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA6_RLC2_DUMMY_REG
+#define SDMA6_RLC2_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA6_RLC2_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA6_RLC2_RB_WPTR_POLL_ADDR_HI
+#define SDMA6_RLC2_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA6_RLC2_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA6_RLC2_RB_WPTR_POLL_ADDR_LO
+#define SDMA6_RLC2_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA6_RLC2_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA6_RLC2_RB_AQL_CNTL
+#define SDMA6_RLC2_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA6_RLC2_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA6_RLC2_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA6_RLC2_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA6_RLC2_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA6_RLC2_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA6_RLC2_MINOR_PTR_UPDATE
+#define SDMA6_RLC2_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA6_RLC2_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA6_RLC2_MIDCMD_DATA0
+#define SDMA6_RLC2_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA6_RLC2_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA6_RLC2_MIDCMD_DATA1
+#define SDMA6_RLC2_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA6_RLC2_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA6_RLC2_MIDCMD_DATA2
+#define SDMA6_RLC2_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA6_RLC2_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA6_RLC2_MIDCMD_DATA3
+#define SDMA6_RLC2_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA6_RLC2_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA6_RLC2_MIDCMD_DATA4
+#define SDMA6_RLC2_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA6_RLC2_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA6_RLC2_MIDCMD_DATA5
+#define SDMA6_RLC2_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA6_RLC2_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA6_RLC2_MIDCMD_DATA6
+#define SDMA6_RLC2_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA6_RLC2_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA6_RLC2_MIDCMD_DATA7
+#define SDMA6_RLC2_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA6_RLC2_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA6_RLC2_MIDCMD_DATA8
+#define SDMA6_RLC2_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA6_RLC2_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA6_RLC2_MIDCMD_CNTL
+#define SDMA6_RLC2_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA6_RLC2_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA6_RLC2_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA6_RLC2_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA6_RLC2_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA6_RLC2_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA6_RLC2_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA6_RLC2_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA6_RLC3_RB_CNTL
+#define SDMA6_RLC3_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA6_RLC3_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA6_RLC3_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA6_RLC3_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA6_RLC3_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA6_RLC3_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA6_RLC3_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA6_RLC3_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA6_RLC3_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA6_RLC3_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA6_RLC3_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA6_RLC3_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA6_RLC3_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA6_RLC3_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA6_RLC3_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA6_RLC3_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA6_RLC3_RB_BASE
+#define SDMA6_RLC3_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA6_RLC3_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA6_RLC3_RB_BASE_HI
+#define SDMA6_RLC3_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA6_RLC3_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA6_RLC3_RB_RPTR
+#define SDMA6_RLC3_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA6_RLC3_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA6_RLC3_RB_RPTR_HI
+#define SDMA6_RLC3_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA6_RLC3_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA6_RLC3_RB_WPTR
+#define SDMA6_RLC3_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA6_RLC3_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA6_RLC3_RB_WPTR_HI
+#define SDMA6_RLC3_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA6_RLC3_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA6_RLC3_RB_WPTR_POLL_CNTL
+#define SDMA6_RLC3_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA6_RLC3_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA6_RLC3_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA6_RLC3_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA6_RLC3_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA6_RLC3_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA6_RLC3_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA6_RLC3_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA6_RLC3_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA6_RLC3_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA6_RLC3_RB_RPTR_ADDR_HI
+#define SDMA6_RLC3_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA6_RLC3_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA6_RLC3_RB_RPTR_ADDR_LO
+#define SDMA6_RLC3_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA6_RLC3_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA6_RLC3_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA6_RLC3_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA6_RLC3_IB_CNTL
+#define SDMA6_RLC3_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA6_RLC3_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA6_RLC3_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA6_RLC3_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA6_RLC3_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA6_RLC3_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA6_RLC3_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA6_RLC3_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA6_RLC3_IB_RPTR
+#define SDMA6_RLC3_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA6_RLC3_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA6_RLC3_IB_OFFSET
+#define SDMA6_RLC3_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA6_RLC3_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA6_RLC3_IB_BASE_LO
+#define SDMA6_RLC3_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA6_RLC3_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA6_RLC3_IB_BASE_HI
+#define SDMA6_RLC3_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA6_RLC3_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA6_RLC3_IB_SIZE
+#define SDMA6_RLC3_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA6_RLC3_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA6_RLC3_SKIP_CNTL
+#define SDMA6_RLC3_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA6_RLC3_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA6_RLC3_CONTEXT_STATUS
+#define SDMA6_RLC3_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA6_RLC3_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA6_RLC3_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA6_RLC3_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA6_RLC3_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA6_RLC3_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA6_RLC3_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA6_RLC3_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA6_RLC3_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA6_RLC3_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA6_RLC3_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA6_RLC3_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA6_RLC3_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA6_RLC3_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA6_RLC3_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA6_RLC3_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA6_RLC3_DOORBELL
+#define SDMA6_RLC3_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA6_RLC3_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA6_RLC3_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA6_RLC3_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA6_RLC3_STATUS
+#define SDMA6_RLC3_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA6_RLC3_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA6_RLC3_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA6_RLC3_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA6_RLC3_DOORBELL_LOG
+#define SDMA6_RLC3_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA6_RLC3_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA6_RLC3_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA6_RLC3_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA6_RLC3_WATERMARK
+#define SDMA6_RLC3_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA6_RLC3_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA6_RLC3_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA6_RLC3_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA6_RLC3_DOORBELL_OFFSET
+#define SDMA6_RLC3_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA6_RLC3_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA6_RLC3_CSA_ADDR_LO
+#define SDMA6_RLC3_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA6_RLC3_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA6_RLC3_CSA_ADDR_HI
+#define SDMA6_RLC3_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA6_RLC3_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA6_RLC3_IB_SUB_REMAIN
+#define SDMA6_RLC3_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA6_RLC3_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA6_RLC3_PREEMPT
+#define SDMA6_RLC3_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA6_RLC3_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA6_RLC3_DUMMY_REG
+#define SDMA6_RLC3_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA6_RLC3_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA6_RLC3_RB_WPTR_POLL_ADDR_HI
+#define SDMA6_RLC3_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA6_RLC3_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA6_RLC3_RB_WPTR_POLL_ADDR_LO
+#define SDMA6_RLC3_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA6_RLC3_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA6_RLC3_RB_AQL_CNTL
+#define SDMA6_RLC3_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA6_RLC3_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA6_RLC3_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA6_RLC3_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA6_RLC3_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA6_RLC3_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA6_RLC3_MINOR_PTR_UPDATE
+#define SDMA6_RLC3_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA6_RLC3_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA6_RLC3_MIDCMD_DATA0
+#define SDMA6_RLC3_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA6_RLC3_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA6_RLC3_MIDCMD_DATA1
+#define SDMA6_RLC3_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA6_RLC3_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA6_RLC3_MIDCMD_DATA2
+#define SDMA6_RLC3_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA6_RLC3_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA6_RLC3_MIDCMD_DATA3
+#define SDMA6_RLC3_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA6_RLC3_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA6_RLC3_MIDCMD_DATA4
+#define SDMA6_RLC3_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA6_RLC3_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA6_RLC3_MIDCMD_DATA5
+#define SDMA6_RLC3_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA6_RLC3_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA6_RLC3_MIDCMD_DATA6
+#define SDMA6_RLC3_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA6_RLC3_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA6_RLC3_MIDCMD_DATA7
+#define SDMA6_RLC3_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA6_RLC3_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA6_RLC3_MIDCMD_DATA8
+#define SDMA6_RLC3_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA6_RLC3_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA6_RLC3_MIDCMD_CNTL
+#define SDMA6_RLC3_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA6_RLC3_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA6_RLC3_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA6_RLC3_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA6_RLC3_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA6_RLC3_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA6_RLC3_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA6_RLC3_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA6_RLC4_RB_CNTL
+#define SDMA6_RLC4_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA6_RLC4_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA6_RLC4_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA6_RLC4_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA6_RLC4_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA6_RLC4_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA6_RLC4_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA6_RLC4_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA6_RLC4_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA6_RLC4_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA6_RLC4_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA6_RLC4_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA6_RLC4_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA6_RLC4_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA6_RLC4_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA6_RLC4_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA6_RLC4_RB_BASE
+#define SDMA6_RLC4_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA6_RLC4_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA6_RLC4_RB_BASE_HI
+#define SDMA6_RLC4_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA6_RLC4_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA6_RLC4_RB_RPTR
+#define SDMA6_RLC4_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA6_RLC4_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA6_RLC4_RB_RPTR_HI
+#define SDMA6_RLC4_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA6_RLC4_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA6_RLC4_RB_WPTR
+#define SDMA6_RLC4_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA6_RLC4_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA6_RLC4_RB_WPTR_HI
+#define SDMA6_RLC4_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA6_RLC4_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA6_RLC4_RB_WPTR_POLL_CNTL
+#define SDMA6_RLC4_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA6_RLC4_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA6_RLC4_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA6_RLC4_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA6_RLC4_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA6_RLC4_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA6_RLC4_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA6_RLC4_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA6_RLC4_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA6_RLC4_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA6_RLC4_RB_RPTR_ADDR_HI
+#define SDMA6_RLC4_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA6_RLC4_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA6_RLC4_RB_RPTR_ADDR_LO
+#define SDMA6_RLC4_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA6_RLC4_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA6_RLC4_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA6_RLC4_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA6_RLC4_IB_CNTL
+#define SDMA6_RLC4_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA6_RLC4_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA6_RLC4_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA6_RLC4_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA6_RLC4_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA6_RLC4_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA6_RLC4_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA6_RLC4_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA6_RLC4_IB_RPTR
+#define SDMA6_RLC4_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA6_RLC4_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA6_RLC4_IB_OFFSET
+#define SDMA6_RLC4_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA6_RLC4_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA6_RLC4_IB_BASE_LO
+#define SDMA6_RLC4_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA6_RLC4_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA6_RLC4_IB_BASE_HI
+#define SDMA6_RLC4_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA6_RLC4_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA6_RLC4_IB_SIZE
+#define SDMA6_RLC4_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA6_RLC4_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA6_RLC4_SKIP_CNTL
+#define SDMA6_RLC4_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA6_RLC4_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA6_RLC4_CONTEXT_STATUS
+#define SDMA6_RLC4_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA6_RLC4_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA6_RLC4_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA6_RLC4_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA6_RLC4_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA6_RLC4_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA6_RLC4_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA6_RLC4_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA6_RLC4_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA6_RLC4_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA6_RLC4_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA6_RLC4_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA6_RLC4_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA6_RLC4_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA6_RLC4_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA6_RLC4_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA6_RLC4_DOORBELL
+#define SDMA6_RLC4_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA6_RLC4_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA6_RLC4_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA6_RLC4_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA6_RLC4_STATUS
+#define SDMA6_RLC4_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA6_RLC4_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA6_RLC4_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA6_RLC4_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA6_RLC4_DOORBELL_LOG
+#define SDMA6_RLC4_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA6_RLC4_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA6_RLC4_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA6_RLC4_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA6_RLC4_WATERMARK
+#define SDMA6_RLC4_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA6_RLC4_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA6_RLC4_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA6_RLC4_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA6_RLC4_DOORBELL_OFFSET
+#define SDMA6_RLC4_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA6_RLC4_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA6_RLC4_CSA_ADDR_LO
+#define SDMA6_RLC4_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA6_RLC4_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA6_RLC4_CSA_ADDR_HI
+#define SDMA6_RLC4_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA6_RLC4_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA6_RLC4_IB_SUB_REMAIN
+#define SDMA6_RLC4_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA6_RLC4_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA6_RLC4_PREEMPT
+#define SDMA6_RLC4_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA6_RLC4_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA6_RLC4_DUMMY_REG
+#define SDMA6_RLC4_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA6_RLC4_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA6_RLC4_RB_WPTR_POLL_ADDR_HI
+#define SDMA6_RLC4_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA6_RLC4_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA6_RLC4_RB_WPTR_POLL_ADDR_LO
+#define SDMA6_RLC4_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA6_RLC4_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA6_RLC4_RB_AQL_CNTL
+#define SDMA6_RLC4_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA6_RLC4_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA6_RLC4_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA6_RLC4_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA6_RLC4_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA6_RLC4_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA6_RLC4_MINOR_PTR_UPDATE
+#define SDMA6_RLC4_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA6_RLC4_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA6_RLC4_MIDCMD_DATA0
+#define SDMA6_RLC4_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA6_RLC4_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA6_RLC4_MIDCMD_DATA1
+#define SDMA6_RLC4_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA6_RLC4_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA6_RLC4_MIDCMD_DATA2
+#define SDMA6_RLC4_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA6_RLC4_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA6_RLC4_MIDCMD_DATA3
+#define SDMA6_RLC4_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA6_RLC4_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA6_RLC4_MIDCMD_DATA4
+#define SDMA6_RLC4_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA6_RLC4_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA6_RLC4_MIDCMD_DATA5
+#define SDMA6_RLC4_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA6_RLC4_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA6_RLC4_MIDCMD_DATA6
+#define SDMA6_RLC4_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA6_RLC4_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA6_RLC4_MIDCMD_DATA7
+#define SDMA6_RLC4_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA6_RLC4_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA6_RLC4_MIDCMD_DATA8
+#define SDMA6_RLC4_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA6_RLC4_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA6_RLC4_MIDCMD_CNTL
+#define SDMA6_RLC4_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA6_RLC4_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA6_RLC4_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA6_RLC4_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA6_RLC4_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA6_RLC4_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA6_RLC4_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA6_RLC4_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA6_RLC5_RB_CNTL
+#define SDMA6_RLC5_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA6_RLC5_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA6_RLC5_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA6_RLC5_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA6_RLC5_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA6_RLC5_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA6_RLC5_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA6_RLC5_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA6_RLC5_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA6_RLC5_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA6_RLC5_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA6_RLC5_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA6_RLC5_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA6_RLC5_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA6_RLC5_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA6_RLC5_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA6_RLC5_RB_BASE
+#define SDMA6_RLC5_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA6_RLC5_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA6_RLC5_RB_BASE_HI
+#define SDMA6_RLC5_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA6_RLC5_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA6_RLC5_RB_RPTR
+#define SDMA6_RLC5_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA6_RLC5_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA6_RLC5_RB_RPTR_HI
+#define SDMA6_RLC5_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA6_RLC5_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA6_RLC5_RB_WPTR
+#define SDMA6_RLC5_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA6_RLC5_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA6_RLC5_RB_WPTR_HI
+#define SDMA6_RLC5_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA6_RLC5_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA6_RLC5_RB_WPTR_POLL_CNTL
+#define SDMA6_RLC5_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA6_RLC5_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA6_RLC5_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA6_RLC5_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA6_RLC5_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA6_RLC5_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA6_RLC5_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA6_RLC5_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA6_RLC5_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA6_RLC5_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA6_RLC5_RB_RPTR_ADDR_HI
+#define SDMA6_RLC5_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA6_RLC5_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA6_RLC5_RB_RPTR_ADDR_LO
+#define SDMA6_RLC5_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA6_RLC5_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA6_RLC5_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA6_RLC5_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA6_RLC5_IB_CNTL
+#define SDMA6_RLC5_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA6_RLC5_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA6_RLC5_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA6_RLC5_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA6_RLC5_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA6_RLC5_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA6_RLC5_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA6_RLC5_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA6_RLC5_IB_RPTR
+#define SDMA6_RLC5_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA6_RLC5_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA6_RLC5_IB_OFFSET
+#define SDMA6_RLC5_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA6_RLC5_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA6_RLC5_IB_BASE_LO
+#define SDMA6_RLC5_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA6_RLC5_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA6_RLC5_IB_BASE_HI
+#define SDMA6_RLC5_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA6_RLC5_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA6_RLC5_IB_SIZE
+#define SDMA6_RLC5_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA6_RLC5_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA6_RLC5_SKIP_CNTL
+#define SDMA6_RLC5_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA6_RLC5_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA6_RLC5_CONTEXT_STATUS
+#define SDMA6_RLC5_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA6_RLC5_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA6_RLC5_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA6_RLC5_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA6_RLC5_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA6_RLC5_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA6_RLC5_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA6_RLC5_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA6_RLC5_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA6_RLC5_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA6_RLC5_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA6_RLC5_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA6_RLC5_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA6_RLC5_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA6_RLC5_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA6_RLC5_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA6_RLC5_DOORBELL
+#define SDMA6_RLC5_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA6_RLC5_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA6_RLC5_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA6_RLC5_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA6_RLC5_STATUS
+#define SDMA6_RLC5_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA6_RLC5_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA6_RLC5_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA6_RLC5_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA6_RLC5_DOORBELL_LOG
+#define SDMA6_RLC5_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA6_RLC5_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA6_RLC5_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA6_RLC5_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA6_RLC5_WATERMARK
+#define SDMA6_RLC5_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA6_RLC5_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA6_RLC5_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA6_RLC5_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA6_RLC5_DOORBELL_OFFSET
+#define SDMA6_RLC5_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA6_RLC5_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA6_RLC5_CSA_ADDR_LO
+#define SDMA6_RLC5_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA6_RLC5_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA6_RLC5_CSA_ADDR_HI
+#define SDMA6_RLC5_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA6_RLC5_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA6_RLC5_IB_SUB_REMAIN
+#define SDMA6_RLC5_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA6_RLC5_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA6_RLC5_PREEMPT
+#define SDMA6_RLC5_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA6_RLC5_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA6_RLC5_DUMMY_REG
+#define SDMA6_RLC5_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA6_RLC5_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA6_RLC5_RB_WPTR_POLL_ADDR_HI
+#define SDMA6_RLC5_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA6_RLC5_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA6_RLC5_RB_WPTR_POLL_ADDR_LO
+#define SDMA6_RLC5_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA6_RLC5_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA6_RLC5_RB_AQL_CNTL
+#define SDMA6_RLC5_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA6_RLC5_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA6_RLC5_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA6_RLC5_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA6_RLC5_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA6_RLC5_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA6_RLC5_MINOR_PTR_UPDATE
+#define SDMA6_RLC5_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA6_RLC5_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA6_RLC5_MIDCMD_DATA0
+#define SDMA6_RLC5_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA6_RLC5_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA6_RLC5_MIDCMD_DATA1
+#define SDMA6_RLC5_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA6_RLC5_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA6_RLC5_MIDCMD_DATA2
+#define SDMA6_RLC5_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA6_RLC5_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA6_RLC5_MIDCMD_DATA3
+#define SDMA6_RLC5_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA6_RLC5_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA6_RLC5_MIDCMD_DATA4
+#define SDMA6_RLC5_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA6_RLC5_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA6_RLC5_MIDCMD_DATA5
+#define SDMA6_RLC5_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA6_RLC5_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA6_RLC5_MIDCMD_DATA6
+#define SDMA6_RLC5_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA6_RLC5_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA6_RLC5_MIDCMD_DATA7
+#define SDMA6_RLC5_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA6_RLC5_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA6_RLC5_MIDCMD_DATA8
+#define SDMA6_RLC5_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA6_RLC5_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA6_RLC5_MIDCMD_CNTL
+#define SDMA6_RLC5_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA6_RLC5_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA6_RLC5_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA6_RLC5_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA6_RLC5_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA6_RLC5_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA6_RLC5_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA6_RLC5_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA6_RLC6_RB_CNTL
+#define SDMA6_RLC6_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA6_RLC6_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA6_RLC6_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA6_RLC6_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA6_RLC6_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA6_RLC6_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA6_RLC6_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA6_RLC6_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA6_RLC6_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA6_RLC6_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA6_RLC6_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA6_RLC6_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA6_RLC6_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA6_RLC6_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA6_RLC6_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA6_RLC6_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA6_RLC6_RB_BASE
+#define SDMA6_RLC6_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA6_RLC6_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA6_RLC6_RB_BASE_HI
+#define SDMA6_RLC6_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA6_RLC6_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA6_RLC6_RB_RPTR
+#define SDMA6_RLC6_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA6_RLC6_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA6_RLC6_RB_RPTR_HI
+#define SDMA6_RLC6_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA6_RLC6_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA6_RLC6_RB_WPTR
+#define SDMA6_RLC6_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA6_RLC6_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA6_RLC6_RB_WPTR_HI
+#define SDMA6_RLC6_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA6_RLC6_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA6_RLC6_RB_WPTR_POLL_CNTL
+#define SDMA6_RLC6_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA6_RLC6_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA6_RLC6_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA6_RLC6_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA6_RLC6_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA6_RLC6_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA6_RLC6_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA6_RLC6_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA6_RLC6_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA6_RLC6_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA6_RLC6_RB_RPTR_ADDR_HI
+#define SDMA6_RLC6_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA6_RLC6_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA6_RLC6_RB_RPTR_ADDR_LO
+#define SDMA6_RLC6_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA6_RLC6_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA6_RLC6_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA6_RLC6_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA6_RLC6_IB_CNTL
+#define SDMA6_RLC6_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA6_RLC6_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA6_RLC6_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA6_RLC6_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA6_RLC6_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA6_RLC6_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA6_RLC6_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA6_RLC6_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA6_RLC6_IB_RPTR
+#define SDMA6_RLC6_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA6_RLC6_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA6_RLC6_IB_OFFSET
+#define SDMA6_RLC6_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA6_RLC6_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA6_RLC6_IB_BASE_LO
+#define SDMA6_RLC6_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA6_RLC6_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA6_RLC6_IB_BASE_HI
+#define SDMA6_RLC6_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA6_RLC6_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA6_RLC6_IB_SIZE
+#define SDMA6_RLC6_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA6_RLC6_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA6_RLC6_SKIP_CNTL
+#define SDMA6_RLC6_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA6_RLC6_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA6_RLC6_CONTEXT_STATUS
+#define SDMA6_RLC6_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA6_RLC6_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA6_RLC6_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA6_RLC6_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA6_RLC6_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA6_RLC6_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA6_RLC6_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA6_RLC6_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA6_RLC6_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA6_RLC6_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA6_RLC6_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA6_RLC6_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA6_RLC6_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA6_RLC6_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA6_RLC6_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA6_RLC6_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA6_RLC6_DOORBELL
+#define SDMA6_RLC6_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA6_RLC6_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA6_RLC6_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA6_RLC6_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA6_RLC6_STATUS
+#define SDMA6_RLC6_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA6_RLC6_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA6_RLC6_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA6_RLC6_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA6_RLC6_DOORBELL_LOG
+#define SDMA6_RLC6_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA6_RLC6_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA6_RLC6_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA6_RLC6_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA6_RLC6_WATERMARK
+#define SDMA6_RLC6_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA6_RLC6_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA6_RLC6_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA6_RLC6_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA6_RLC6_DOORBELL_OFFSET
+#define SDMA6_RLC6_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA6_RLC6_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA6_RLC6_CSA_ADDR_LO
+#define SDMA6_RLC6_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA6_RLC6_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA6_RLC6_CSA_ADDR_HI
+#define SDMA6_RLC6_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA6_RLC6_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA6_RLC6_IB_SUB_REMAIN
+#define SDMA6_RLC6_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA6_RLC6_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA6_RLC6_PREEMPT
+#define SDMA6_RLC6_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA6_RLC6_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA6_RLC6_DUMMY_REG
+#define SDMA6_RLC6_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA6_RLC6_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA6_RLC6_RB_WPTR_POLL_ADDR_HI
+#define SDMA6_RLC6_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA6_RLC6_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA6_RLC6_RB_WPTR_POLL_ADDR_LO
+#define SDMA6_RLC6_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA6_RLC6_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA6_RLC6_RB_AQL_CNTL
+#define SDMA6_RLC6_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA6_RLC6_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA6_RLC6_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA6_RLC6_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA6_RLC6_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA6_RLC6_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA6_RLC6_MINOR_PTR_UPDATE
+#define SDMA6_RLC6_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA6_RLC6_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA6_RLC6_MIDCMD_DATA0
+#define SDMA6_RLC6_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA6_RLC6_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA6_RLC6_MIDCMD_DATA1
+#define SDMA6_RLC6_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA6_RLC6_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA6_RLC6_MIDCMD_DATA2
+#define SDMA6_RLC6_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA6_RLC6_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA6_RLC6_MIDCMD_DATA3
+#define SDMA6_RLC6_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA6_RLC6_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA6_RLC6_MIDCMD_DATA4
+#define SDMA6_RLC6_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA6_RLC6_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA6_RLC6_MIDCMD_DATA5
+#define SDMA6_RLC6_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA6_RLC6_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA6_RLC6_MIDCMD_DATA6
+#define SDMA6_RLC6_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA6_RLC6_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA6_RLC6_MIDCMD_DATA7
+#define SDMA6_RLC6_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA6_RLC6_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA6_RLC6_MIDCMD_DATA8
+#define SDMA6_RLC6_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA6_RLC6_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA6_RLC6_MIDCMD_CNTL
+#define SDMA6_RLC6_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA6_RLC6_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA6_RLC6_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA6_RLC6_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA6_RLC6_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA6_RLC6_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA6_RLC6_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA6_RLC6_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA6_RLC7_RB_CNTL
+#define SDMA6_RLC7_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA6_RLC7_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA6_RLC7_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA6_RLC7_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA6_RLC7_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA6_RLC7_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA6_RLC7_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA6_RLC7_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA6_RLC7_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA6_RLC7_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA6_RLC7_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA6_RLC7_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA6_RLC7_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA6_RLC7_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA6_RLC7_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA6_RLC7_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA6_RLC7_RB_BASE
+#define SDMA6_RLC7_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA6_RLC7_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA6_RLC7_RB_BASE_HI
+#define SDMA6_RLC7_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA6_RLC7_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA6_RLC7_RB_RPTR
+#define SDMA6_RLC7_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA6_RLC7_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA6_RLC7_RB_RPTR_HI
+#define SDMA6_RLC7_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA6_RLC7_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA6_RLC7_RB_WPTR
+#define SDMA6_RLC7_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA6_RLC7_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA6_RLC7_RB_WPTR_HI
+#define SDMA6_RLC7_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA6_RLC7_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA6_RLC7_RB_WPTR_POLL_CNTL
+#define SDMA6_RLC7_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA6_RLC7_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA6_RLC7_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA6_RLC7_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA6_RLC7_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA6_RLC7_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA6_RLC7_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA6_RLC7_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA6_RLC7_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA6_RLC7_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA6_RLC7_RB_RPTR_ADDR_HI
+#define SDMA6_RLC7_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA6_RLC7_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA6_RLC7_RB_RPTR_ADDR_LO
+#define SDMA6_RLC7_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA6_RLC7_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA6_RLC7_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA6_RLC7_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA6_RLC7_IB_CNTL
+#define SDMA6_RLC7_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA6_RLC7_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA6_RLC7_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA6_RLC7_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA6_RLC7_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA6_RLC7_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA6_RLC7_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA6_RLC7_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA6_RLC7_IB_RPTR
+#define SDMA6_RLC7_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA6_RLC7_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA6_RLC7_IB_OFFSET
+#define SDMA6_RLC7_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA6_RLC7_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA6_RLC7_IB_BASE_LO
+#define SDMA6_RLC7_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA6_RLC7_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA6_RLC7_IB_BASE_HI
+#define SDMA6_RLC7_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA6_RLC7_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA6_RLC7_IB_SIZE
+#define SDMA6_RLC7_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA6_RLC7_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA6_RLC7_SKIP_CNTL
+#define SDMA6_RLC7_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA6_RLC7_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA6_RLC7_CONTEXT_STATUS
+#define SDMA6_RLC7_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA6_RLC7_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA6_RLC7_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA6_RLC7_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA6_RLC7_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA6_RLC7_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA6_RLC7_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA6_RLC7_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA6_RLC7_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA6_RLC7_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA6_RLC7_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA6_RLC7_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA6_RLC7_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA6_RLC7_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA6_RLC7_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA6_RLC7_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA6_RLC7_DOORBELL
+#define SDMA6_RLC7_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA6_RLC7_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA6_RLC7_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA6_RLC7_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA6_RLC7_STATUS
+#define SDMA6_RLC7_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA6_RLC7_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA6_RLC7_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA6_RLC7_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA6_RLC7_DOORBELL_LOG
+#define SDMA6_RLC7_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA6_RLC7_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA6_RLC7_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA6_RLC7_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA6_RLC7_WATERMARK
+#define SDMA6_RLC7_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA6_RLC7_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA6_RLC7_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA6_RLC7_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA6_RLC7_DOORBELL_OFFSET
+#define SDMA6_RLC7_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA6_RLC7_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA6_RLC7_CSA_ADDR_LO
+#define SDMA6_RLC7_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA6_RLC7_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA6_RLC7_CSA_ADDR_HI
+#define SDMA6_RLC7_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA6_RLC7_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA6_RLC7_IB_SUB_REMAIN
+#define SDMA6_RLC7_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA6_RLC7_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA6_RLC7_PREEMPT
+#define SDMA6_RLC7_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA6_RLC7_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA6_RLC7_DUMMY_REG
+#define SDMA6_RLC7_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA6_RLC7_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA6_RLC7_RB_WPTR_POLL_ADDR_HI
+#define SDMA6_RLC7_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA6_RLC7_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA6_RLC7_RB_WPTR_POLL_ADDR_LO
+#define SDMA6_RLC7_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA6_RLC7_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA6_RLC7_RB_AQL_CNTL
+#define SDMA6_RLC7_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA6_RLC7_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA6_RLC7_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA6_RLC7_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA6_RLC7_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA6_RLC7_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA6_RLC7_MINOR_PTR_UPDATE
+#define SDMA6_RLC7_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA6_RLC7_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA6_RLC7_MIDCMD_DATA0
+#define SDMA6_RLC7_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA6_RLC7_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA6_RLC7_MIDCMD_DATA1
+#define SDMA6_RLC7_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA6_RLC7_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA6_RLC7_MIDCMD_DATA2
+#define SDMA6_RLC7_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA6_RLC7_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA6_RLC7_MIDCMD_DATA3
+#define SDMA6_RLC7_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA6_RLC7_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA6_RLC7_MIDCMD_DATA4
+#define SDMA6_RLC7_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA6_RLC7_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA6_RLC7_MIDCMD_DATA5
+#define SDMA6_RLC7_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA6_RLC7_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA6_RLC7_MIDCMD_DATA6
+#define SDMA6_RLC7_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA6_RLC7_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA6_RLC7_MIDCMD_DATA7
+#define SDMA6_RLC7_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA6_RLC7_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA6_RLC7_MIDCMD_DATA8
+#define SDMA6_RLC7_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA6_RLC7_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA6_RLC7_MIDCMD_CNTL
+#define SDMA6_RLC7_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA6_RLC7_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA6_RLC7_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA6_RLC7_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA6_RLC7_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA6_RLC7_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA6_RLC7_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA6_RLC7_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/sdma7/sdma7_4_2_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/sdma7/sdma7_4_2_2_offset.h
new file mode 100644
index 000000000000..10f387202af6
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/sdma7/sdma7_4_2_2_offset.h
@@ -0,0 +1,1043 @@
+/*
+ * Copyright (C) 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _sdma7_4_2_2_OFFSET_HEADER
+#define _sdma7_4_2_2_OFFSET_HEADER
+
+
+
+// addressBlock: sdma7_sdma7dec
+// base address: 0x7d000
+#define mmSDMA7_UCODE_ADDR 0x0000
+#define mmSDMA7_UCODE_ADDR_BASE_IDX 1
+#define mmSDMA7_UCODE_DATA 0x0001
+#define mmSDMA7_UCODE_DATA_BASE_IDX 1
+#define mmSDMA7_VM_CNTL 0x0004
+#define mmSDMA7_VM_CNTL_BASE_IDX 1
+#define mmSDMA7_VM_CTX_LO 0x0005
+#define mmSDMA7_VM_CTX_LO_BASE_IDX 1
+#define mmSDMA7_VM_CTX_HI 0x0006
+#define mmSDMA7_VM_CTX_HI_BASE_IDX 1
+#define mmSDMA7_ACTIVE_FCN_ID 0x0007
+#define mmSDMA7_ACTIVE_FCN_ID_BASE_IDX 1
+#define mmSDMA7_VM_CTX_CNTL 0x0008
+#define mmSDMA7_VM_CTX_CNTL_BASE_IDX 1
+#define mmSDMA7_VIRT_RESET_REQ 0x0009
+#define mmSDMA7_VIRT_RESET_REQ_BASE_IDX 1
+#define mmSDMA7_VF_ENABLE 0x000a
+#define mmSDMA7_VF_ENABLE_BASE_IDX 1
+#define mmSDMA7_CONTEXT_REG_TYPE0 0x000b
+#define mmSDMA7_CONTEXT_REG_TYPE0_BASE_IDX 1
+#define mmSDMA7_CONTEXT_REG_TYPE1 0x000c
+#define mmSDMA7_CONTEXT_REG_TYPE1_BASE_IDX 1
+#define mmSDMA7_CONTEXT_REG_TYPE2 0x000d
+#define mmSDMA7_CONTEXT_REG_TYPE2_BASE_IDX 1
+#define mmSDMA7_CONTEXT_REG_TYPE3 0x000e
+#define mmSDMA7_CONTEXT_REG_TYPE3_BASE_IDX 1
+#define mmSDMA7_PUB_REG_TYPE0 0x000f
+#define mmSDMA7_PUB_REG_TYPE0_BASE_IDX 1
+#define mmSDMA7_PUB_REG_TYPE1 0x0010
+#define mmSDMA7_PUB_REG_TYPE1_BASE_IDX 1
+#define mmSDMA7_PUB_REG_TYPE2 0x0011
+#define mmSDMA7_PUB_REG_TYPE2_BASE_IDX 1
+#define mmSDMA7_PUB_REG_TYPE3 0x0012
+#define mmSDMA7_PUB_REG_TYPE3_BASE_IDX 1
+#define mmSDMA7_MMHUB_CNTL 0x0013
+#define mmSDMA7_MMHUB_CNTL_BASE_IDX 1
+#define mmSDMA7_CONTEXT_GROUP_BOUNDARY 0x0019
+#define mmSDMA7_CONTEXT_GROUP_BOUNDARY_BASE_IDX 1
+#define mmSDMA7_POWER_CNTL 0x001a
+#define mmSDMA7_POWER_CNTL_BASE_IDX 1
+#define mmSDMA7_CLK_CTRL 0x001b
+#define mmSDMA7_CLK_CTRL_BASE_IDX 1
+#define mmSDMA7_CNTL 0x001c
+#define mmSDMA7_CNTL_BASE_IDX 1
+#define mmSDMA7_CHICKEN_BITS 0x001d
+#define mmSDMA7_CHICKEN_BITS_BASE_IDX 1
+#define mmSDMA7_GB_ADDR_CONFIG 0x001e
+#define mmSDMA7_GB_ADDR_CONFIG_BASE_IDX 1
+#define mmSDMA7_GB_ADDR_CONFIG_READ 0x001f
+#define mmSDMA7_GB_ADDR_CONFIG_READ_BASE_IDX 1
+#define mmSDMA7_RB_RPTR_FETCH_HI 0x0020
+#define mmSDMA7_RB_RPTR_FETCH_HI_BASE_IDX 1
+#define mmSDMA7_SEM_WAIT_FAIL_TIMER_CNTL 0x0021
+#define mmSDMA7_SEM_WAIT_FAIL_TIMER_CNTL_BASE_IDX 1
+#define mmSDMA7_RB_RPTR_FETCH 0x0022
+#define mmSDMA7_RB_RPTR_FETCH_BASE_IDX 1
+#define mmSDMA7_IB_OFFSET_FETCH 0x0023
+#define mmSDMA7_IB_OFFSET_FETCH_BASE_IDX 1
+#define mmSDMA7_PROGRAM 0x0024
+#define mmSDMA7_PROGRAM_BASE_IDX 1
+#define mmSDMA7_STATUS_REG 0x0025
+#define mmSDMA7_STATUS_REG_BASE_IDX 1
+#define mmSDMA7_STATUS1_REG 0x0026
+#define mmSDMA7_STATUS1_REG_BASE_IDX 1
+#define mmSDMA7_RD_BURST_CNTL 0x0027
+#define mmSDMA7_RD_BURST_CNTL_BASE_IDX 1
+#define mmSDMA7_HBM_PAGE_CONFIG 0x0028
+#define mmSDMA7_HBM_PAGE_CONFIG_BASE_IDX 1
+#define mmSDMA7_UCODE_CHECKSUM 0x0029
+#define mmSDMA7_UCODE_CHECKSUM_BASE_IDX 1
+#define mmSDMA7_F32_CNTL 0x002a
+#define mmSDMA7_F32_CNTL_BASE_IDX 1
+#define mmSDMA7_FREEZE 0x002b
+#define mmSDMA7_FREEZE_BASE_IDX 1
+#define mmSDMA7_PHASE0_QUANTUM 0x002c
+#define mmSDMA7_PHASE0_QUANTUM_BASE_IDX 1
+#define mmSDMA7_PHASE1_QUANTUM 0x002d
+#define mmSDMA7_PHASE1_QUANTUM_BASE_IDX 1
+#define mmSDMA7_EDC_CONFIG 0x0032
+#define mmSDMA7_EDC_CONFIG_BASE_IDX 1
+#define mmSDMA7_BA_THRESHOLD 0x0033
+#define mmSDMA7_BA_THRESHOLD_BASE_IDX 1
+#define mmSDMA7_ID 0x0034
+#define mmSDMA7_ID_BASE_IDX 1
+#define mmSDMA7_VERSION 0x0035
+#define mmSDMA7_VERSION_BASE_IDX 1
+#define mmSDMA7_EDC_COUNTER 0x0036
+#define mmSDMA7_EDC_COUNTER_BASE_IDX 1
+#define mmSDMA7_EDC_COUNTER_CLEAR 0x0037
+#define mmSDMA7_EDC_COUNTER_CLEAR_BASE_IDX 1
+#define mmSDMA7_STATUS2_REG 0x0038
+#define mmSDMA7_STATUS2_REG_BASE_IDX 1
+#define mmSDMA7_ATOMIC_CNTL 0x0039
+#define mmSDMA7_ATOMIC_CNTL_BASE_IDX 1
+#define mmSDMA7_ATOMIC_PREOP_LO 0x003a
+#define mmSDMA7_ATOMIC_PREOP_LO_BASE_IDX 1
+#define mmSDMA7_ATOMIC_PREOP_HI 0x003b
+#define mmSDMA7_ATOMIC_PREOP_HI_BASE_IDX 1
+#define mmSDMA7_UTCL1_CNTL 0x003c
+#define mmSDMA7_UTCL1_CNTL_BASE_IDX 1
+#define mmSDMA7_UTCL1_WATERMK 0x003d
+#define mmSDMA7_UTCL1_WATERMK_BASE_IDX 1
+#define mmSDMA7_UTCL1_RD_STATUS 0x003e
+#define mmSDMA7_UTCL1_RD_STATUS_BASE_IDX 1
+#define mmSDMA7_UTCL1_WR_STATUS 0x003f
+#define mmSDMA7_UTCL1_WR_STATUS_BASE_IDX 1
+#define mmSDMA7_UTCL1_INV0 0x0040
+#define mmSDMA7_UTCL1_INV0_BASE_IDX 1
+#define mmSDMA7_UTCL1_INV1 0x0041
+#define mmSDMA7_UTCL1_INV1_BASE_IDX 1
+#define mmSDMA7_UTCL1_INV2 0x0042
+#define mmSDMA7_UTCL1_INV2_BASE_IDX 1
+#define mmSDMA7_UTCL1_RD_XNACK0 0x0043
+#define mmSDMA7_UTCL1_RD_XNACK0_BASE_IDX 1
+#define mmSDMA7_UTCL1_RD_XNACK1 0x0044
+#define mmSDMA7_UTCL1_RD_XNACK1_BASE_IDX 1
+#define mmSDMA7_UTCL1_WR_XNACK0 0x0045
+#define mmSDMA7_UTCL1_WR_XNACK0_BASE_IDX 1
+#define mmSDMA7_UTCL1_WR_XNACK1 0x0046
+#define mmSDMA7_UTCL1_WR_XNACK1_BASE_IDX 1
+#define mmSDMA7_UTCL1_TIMEOUT 0x0047
+#define mmSDMA7_UTCL1_TIMEOUT_BASE_IDX 1
+#define mmSDMA7_UTCL1_PAGE 0x0048
+#define mmSDMA7_UTCL1_PAGE_BASE_IDX 1
+#define mmSDMA7_POWER_CNTL_IDLE 0x0049
+#define mmSDMA7_POWER_CNTL_IDLE_BASE_IDX 1
+#define mmSDMA7_RELAX_ORDERING_LUT 0x004a
+#define mmSDMA7_RELAX_ORDERING_LUT_BASE_IDX 1
+#define mmSDMA7_CHICKEN_BITS_2 0x004b
+#define mmSDMA7_CHICKEN_BITS_2_BASE_IDX 1
+#define mmSDMA7_STATUS3_REG 0x004c
+#define mmSDMA7_STATUS3_REG_BASE_IDX 1
+#define mmSDMA7_PHYSICAL_ADDR_LO 0x004d
+#define mmSDMA7_PHYSICAL_ADDR_LO_BASE_IDX 1
+#define mmSDMA7_PHYSICAL_ADDR_HI 0x004e
+#define mmSDMA7_PHYSICAL_ADDR_HI_BASE_IDX 1
+#define mmSDMA7_PHASE2_QUANTUM 0x004f
+#define mmSDMA7_PHASE2_QUANTUM_BASE_IDX 1
+#define mmSDMA7_ERROR_LOG 0x0050
+#define mmSDMA7_ERROR_LOG_BASE_IDX 1
+#define mmSDMA7_PUB_DUMMY_REG0 0x0051
+#define mmSDMA7_PUB_DUMMY_REG0_BASE_IDX 1
+#define mmSDMA7_PUB_DUMMY_REG1 0x0052
+#define mmSDMA7_PUB_DUMMY_REG1_BASE_IDX 1
+#define mmSDMA7_PUB_DUMMY_REG2 0x0053
+#define mmSDMA7_PUB_DUMMY_REG2_BASE_IDX 1
+#define mmSDMA7_PUB_DUMMY_REG3 0x0054
+#define mmSDMA7_PUB_DUMMY_REG3_BASE_IDX 1
+#define mmSDMA7_F32_COUNTER 0x0055
+#define mmSDMA7_F32_COUNTER_BASE_IDX 1
+#define mmSDMA7_UNBREAKABLE 0x0056
+#define mmSDMA7_UNBREAKABLE_BASE_IDX 1
+#define mmSDMA7_PERFMON_CNTL 0x0057
+#define mmSDMA7_PERFMON_CNTL_BASE_IDX 1
+#define mmSDMA7_PERFCOUNTER0_RESULT 0x0058
+#define mmSDMA7_PERFCOUNTER0_RESULT_BASE_IDX 1
+#define mmSDMA7_PERFCOUNTER1_RESULT 0x0059
+#define mmSDMA7_PERFCOUNTER1_RESULT_BASE_IDX 1
+#define mmSDMA7_PERFCOUNTER_TAG_DELAY_RANGE 0x005a
+#define mmSDMA7_PERFCOUNTER_TAG_DELAY_RANGE_BASE_IDX 1
+#define mmSDMA7_CRD_CNTL 0x005b
+#define mmSDMA7_CRD_CNTL_BASE_IDX 1
+#define mmSDMA7_GPU_IOV_VIOLATION_LOG 0x005d
+#define mmSDMA7_GPU_IOV_VIOLATION_LOG_BASE_IDX 1
+#define mmSDMA7_ULV_CNTL 0x005e
+#define mmSDMA7_ULV_CNTL_BASE_IDX 1
+#define mmSDMA7_EA_DBIT_ADDR_DATA 0x0060
+#define mmSDMA7_EA_DBIT_ADDR_DATA_BASE_IDX 1
+#define mmSDMA7_EA_DBIT_ADDR_INDEX 0x0061
+#define mmSDMA7_EA_DBIT_ADDR_INDEX_BASE_IDX 1
+#define mmSDMA7_GPU_IOV_VIOLATION_LOG2 0x0062
+#define mmSDMA7_GPU_IOV_VIOLATION_LOG2_BASE_IDX 1
+#define mmSDMA7_GFX_RB_CNTL 0x0080
+#define mmSDMA7_GFX_RB_CNTL_BASE_IDX 1
+#define mmSDMA7_GFX_RB_BASE 0x0081
+#define mmSDMA7_GFX_RB_BASE_BASE_IDX 1
+#define mmSDMA7_GFX_RB_BASE_HI 0x0082
+#define mmSDMA7_GFX_RB_BASE_HI_BASE_IDX 1
+#define mmSDMA7_GFX_RB_RPTR 0x0083
+#define mmSDMA7_GFX_RB_RPTR_BASE_IDX 1
+#define mmSDMA7_GFX_RB_RPTR_HI 0x0084
+#define mmSDMA7_GFX_RB_RPTR_HI_BASE_IDX 1
+#define mmSDMA7_GFX_RB_WPTR 0x0085
+#define mmSDMA7_GFX_RB_WPTR_BASE_IDX 1
+#define mmSDMA7_GFX_RB_WPTR_HI 0x0086
+#define mmSDMA7_GFX_RB_WPTR_HI_BASE_IDX 1
+#define mmSDMA7_GFX_RB_WPTR_POLL_CNTL 0x0087
+#define mmSDMA7_GFX_RB_WPTR_POLL_CNTL_BASE_IDX 1
+#define mmSDMA7_GFX_RB_RPTR_ADDR_HI 0x0088
+#define mmSDMA7_GFX_RB_RPTR_ADDR_HI_BASE_IDX 1
+#define mmSDMA7_GFX_RB_RPTR_ADDR_LO 0x0089
+#define mmSDMA7_GFX_RB_RPTR_ADDR_LO_BASE_IDX 1
+#define mmSDMA7_GFX_IB_CNTL 0x008a
+#define mmSDMA7_GFX_IB_CNTL_BASE_IDX 1
+#define mmSDMA7_GFX_IB_RPTR 0x008b
+#define mmSDMA7_GFX_IB_RPTR_BASE_IDX 1
+#define mmSDMA7_GFX_IB_OFFSET 0x008c
+#define mmSDMA7_GFX_IB_OFFSET_BASE_IDX 1
+#define mmSDMA7_GFX_IB_BASE_LO 0x008d
+#define mmSDMA7_GFX_IB_BASE_LO_BASE_IDX 1
+#define mmSDMA7_GFX_IB_BASE_HI 0x008e
+#define mmSDMA7_GFX_IB_BASE_HI_BASE_IDX 1
+#define mmSDMA7_GFX_IB_SIZE 0x008f
+#define mmSDMA7_GFX_IB_SIZE_BASE_IDX 1
+#define mmSDMA7_GFX_SKIP_CNTL 0x0090
+#define mmSDMA7_GFX_SKIP_CNTL_BASE_IDX 1
+#define mmSDMA7_GFX_CONTEXT_STATUS 0x0091
+#define mmSDMA7_GFX_CONTEXT_STATUS_BASE_IDX 1
+#define mmSDMA7_GFX_DOORBELL 0x0092
+#define mmSDMA7_GFX_DOORBELL_BASE_IDX 1
+#define mmSDMA7_GFX_CONTEXT_CNTL 0x0093
+#define mmSDMA7_GFX_CONTEXT_CNTL_BASE_IDX 1
+#define mmSDMA7_GFX_STATUS 0x00a8
+#define mmSDMA7_GFX_STATUS_BASE_IDX 1
+#define mmSDMA7_GFX_DOORBELL_LOG 0x00a9
+#define mmSDMA7_GFX_DOORBELL_LOG_BASE_IDX 1
+#define mmSDMA7_GFX_WATERMARK 0x00aa
+#define mmSDMA7_GFX_WATERMARK_BASE_IDX 1
+#define mmSDMA7_GFX_DOORBELL_OFFSET 0x00ab
+#define mmSDMA7_GFX_DOORBELL_OFFSET_BASE_IDX 1
+#define mmSDMA7_GFX_CSA_ADDR_LO 0x00ac
+#define mmSDMA7_GFX_CSA_ADDR_LO_BASE_IDX 1
+#define mmSDMA7_GFX_CSA_ADDR_HI 0x00ad
+#define mmSDMA7_GFX_CSA_ADDR_HI_BASE_IDX 1
+#define mmSDMA7_GFX_IB_SUB_REMAIN 0x00af
+#define mmSDMA7_GFX_IB_SUB_REMAIN_BASE_IDX 1
+#define mmSDMA7_GFX_PREEMPT 0x00b0
+#define mmSDMA7_GFX_PREEMPT_BASE_IDX 1
+#define mmSDMA7_GFX_DUMMY_REG 0x00b1
+#define mmSDMA7_GFX_DUMMY_REG_BASE_IDX 1
+#define mmSDMA7_GFX_RB_WPTR_POLL_ADDR_HI 0x00b2
+#define mmSDMA7_GFX_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1
+#define mmSDMA7_GFX_RB_WPTR_POLL_ADDR_LO 0x00b3
+#define mmSDMA7_GFX_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1
+#define mmSDMA7_GFX_RB_AQL_CNTL 0x00b4
+#define mmSDMA7_GFX_RB_AQL_CNTL_BASE_IDX 1
+#define mmSDMA7_GFX_MINOR_PTR_UPDATE 0x00b5
+#define mmSDMA7_GFX_MINOR_PTR_UPDATE_BASE_IDX 1
+#define mmSDMA7_GFX_MIDCMD_DATA0 0x00c0
+#define mmSDMA7_GFX_MIDCMD_DATA0_BASE_IDX 1
+#define mmSDMA7_GFX_MIDCMD_DATA1 0x00c1
+#define mmSDMA7_GFX_MIDCMD_DATA1_BASE_IDX 1
+#define mmSDMA7_GFX_MIDCMD_DATA2 0x00c2
+#define mmSDMA7_GFX_MIDCMD_DATA2_BASE_IDX 1
+#define mmSDMA7_GFX_MIDCMD_DATA3 0x00c3
+#define mmSDMA7_GFX_MIDCMD_DATA3_BASE_IDX 1
+#define mmSDMA7_GFX_MIDCMD_DATA4 0x00c4
+#define mmSDMA7_GFX_MIDCMD_DATA4_BASE_IDX 1
+#define mmSDMA7_GFX_MIDCMD_DATA5 0x00c5
+#define mmSDMA7_GFX_MIDCMD_DATA5_BASE_IDX 1
+#define mmSDMA7_GFX_MIDCMD_DATA6 0x00c6
+#define mmSDMA7_GFX_MIDCMD_DATA6_BASE_IDX 1
+#define mmSDMA7_GFX_MIDCMD_DATA7 0x00c7
+#define mmSDMA7_GFX_MIDCMD_DATA7_BASE_IDX 1
+#define mmSDMA7_GFX_MIDCMD_DATA8 0x00c8
+#define mmSDMA7_GFX_MIDCMD_DATA8_BASE_IDX 1
+#define mmSDMA7_GFX_MIDCMD_CNTL 0x00c9
+#define mmSDMA7_GFX_MIDCMD_CNTL_BASE_IDX 1
+#define mmSDMA7_PAGE_RB_CNTL 0x00d8
+#define mmSDMA7_PAGE_RB_CNTL_BASE_IDX 1
+#define mmSDMA7_PAGE_RB_BASE 0x00d9
+#define mmSDMA7_PAGE_RB_BASE_BASE_IDX 1
+#define mmSDMA7_PAGE_RB_BASE_HI 0x00da
+#define mmSDMA7_PAGE_RB_BASE_HI_BASE_IDX 1
+#define mmSDMA7_PAGE_RB_RPTR 0x00db
+#define mmSDMA7_PAGE_RB_RPTR_BASE_IDX 1
+#define mmSDMA7_PAGE_RB_RPTR_HI 0x00dc
+#define mmSDMA7_PAGE_RB_RPTR_HI_BASE_IDX 1
+#define mmSDMA7_PAGE_RB_WPTR 0x00dd
+#define mmSDMA7_PAGE_RB_WPTR_BASE_IDX 1
+#define mmSDMA7_PAGE_RB_WPTR_HI 0x00de
+#define mmSDMA7_PAGE_RB_WPTR_HI_BASE_IDX 1
+#define mmSDMA7_PAGE_RB_WPTR_POLL_CNTL 0x00df
+#define mmSDMA7_PAGE_RB_WPTR_POLL_CNTL_BASE_IDX 1
+#define mmSDMA7_PAGE_RB_RPTR_ADDR_HI 0x00e0
+#define mmSDMA7_PAGE_RB_RPTR_ADDR_HI_BASE_IDX 1
+#define mmSDMA7_PAGE_RB_RPTR_ADDR_LO 0x00e1
+#define mmSDMA7_PAGE_RB_RPTR_ADDR_LO_BASE_IDX 1
+#define mmSDMA7_PAGE_IB_CNTL 0x00e2
+#define mmSDMA7_PAGE_IB_CNTL_BASE_IDX 1
+#define mmSDMA7_PAGE_IB_RPTR 0x00e3
+#define mmSDMA7_PAGE_IB_RPTR_BASE_IDX 1
+#define mmSDMA7_PAGE_IB_OFFSET 0x00e4
+#define mmSDMA7_PAGE_IB_OFFSET_BASE_IDX 1
+#define mmSDMA7_PAGE_IB_BASE_LO 0x00e5
+#define mmSDMA7_PAGE_IB_BASE_LO_BASE_IDX 1
+#define mmSDMA7_PAGE_IB_BASE_HI 0x00e6
+#define mmSDMA7_PAGE_IB_BASE_HI_BASE_IDX 1
+#define mmSDMA7_PAGE_IB_SIZE 0x00e7
+#define mmSDMA7_PAGE_IB_SIZE_BASE_IDX 1
+#define mmSDMA7_PAGE_SKIP_CNTL 0x00e8
+#define mmSDMA7_PAGE_SKIP_CNTL_BASE_IDX 1
+#define mmSDMA7_PAGE_CONTEXT_STATUS 0x00e9
+#define mmSDMA7_PAGE_CONTEXT_STATUS_BASE_IDX 1
+#define mmSDMA7_PAGE_DOORBELL 0x00ea
+#define mmSDMA7_PAGE_DOORBELL_BASE_IDX 1
+#define mmSDMA7_PAGE_STATUS 0x0100
+#define mmSDMA7_PAGE_STATUS_BASE_IDX 1
+#define mmSDMA7_PAGE_DOORBELL_LOG 0x0101
+#define mmSDMA7_PAGE_DOORBELL_LOG_BASE_IDX 1
+#define mmSDMA7_PAGE_WATERMARK 0x0102
+#define mmSDMA7_PAGE_WATERMARK_BASE_IDX 1
+#define mmSDMA7_PAGE_DOORBELL_OFFSET 0x0103
+#define mmSDMA7_PAGE_DOORBELL_OFFSET_BASE_IDX 1
+#define mmSDMA7_PAGE_CSA_ADDR_LO 0x0104
+#define mmSDMA7_PAGE_CSA_ADDR_LO_BASE_IDX 1
+#define mmSDMA7_PAGE_CSA_ADDR_HI 0x0105
+#define mmSDMA7_PAGE_CSA_ADDR_HI_BASE_IDX 1
+#define mmSDMA7_PAGE_IB_SUB_REMAIN 0x0107
+#define mmSDMA7_PAGE_IB_SUB_REMAIN_BASE_IDX 1
+#define mmSDMA7_PAGE_PREEMPT 0x0108
+#define mmSDMA7_PAGE_PREEMPT_BASE_IDX 1
+#define mmSDMA7_PAGE_DUMMY_REG 0x0109
+#define mmSDMA7_PAGE_DUMMY_REG_BASE_IDX 1
+#define mmSDMA7_PAGE_RB_WPTR_POLL_ADDR_HI 0x010a
+#define mmSDMA7_PAGE_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1
+#define mmSDMA7_PAGE_RB_WPTR_POLL_ADDR_LO 0x010b
+#define mmSDMA7_PAGE_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1
+#define mmSDMA7_PAGE_RB_AQL_CNTL 0x010c
+#define mmSDMA7_PAGE_RB_AQL_CNTL_BASE_IDX 1
+#define mmSDMA7_PAGE_MINOR_PTR_UPDATE 0x010d
+#define mmSDMA7_PAGE_MINOR_PTR_UPDATE_BASE_IDX 1
+#define mmSDMA7_PAGE_MIDCMD_DATA0 0x0118
+#define mmSDMA7_PAGE_MIDCMD_DATA0_BASE_IDX 1
+#define mmSDMA7_PAGE_MIDCMD_DATA1 0x0119
+#define mmSDMA7_PAGE_MIDCMD_DATA1_BASE_IDX 1
+#define mmSDMA7_PAGE_MIDCMD_DATA2 0x011a
+#define mmSDMA7_PAGE_MIDCMD_DATA2_BASE_IDX 1
+#define mmSDMA7_PAGE_MIDCMD_DATA3 0x011b
+#define mmSDMA7_PAGE_MIDCMD_DATA3_BASE_IDX 1
+#define mmSDMA7_PAGE_MIDCMD_DATA4 0x011c
+#define mmSDMA7_PAGE_MIDCMD_DATA4_BASE_IDX 1
+#define mmSDMA7_PAGE_MIDCMD_DATA5 0x011d
+#define mmSDMA7_PAGE_MIDCMD_DATA5_BASE_IDX 1
+#define mmSDMA7_PAGE_MIDCMD_DATA6 0x011e
+#define mmSDMA7_PAGE_MIDCMD_DATA6_BASE_IDX 1
+#define mmSDMA7_PAGE_MIDCMD_DATA7 0x011f
+#define mmSDMA7_PAGE_MIDCMD_DATA7_BASE_IDX 1
+#define mmSDMA7_PAGE_MIDCMD_DATA8 0x0120
+#define mmSDMA7_PAGE_MIDCMD_DATA8_BASE_IDX 1
+#define mmSDMA7_PAGE_MIDCMD_CNTL 0x0121
+#define mmSDMA7_PAGE_MIDCMD_CNTL_BASE_IDX 1
+#define mmSDMA7_RLC0_RB_CNTL 0x0130
+#define mmSDMA7_RLC0_RB_CNTL_BASE_IDX 1
+#define mmSDMA7_RLC0_RB_BASE 0x0131
+#define mmSDMA7_RLC0_RB_BASE_BASE_IDX 1
+#define mmSDMA7_RLC0_RB_BASE_HI 0x0132
+#define mmSDMA7_RLC0_RB_BASE_HI_BASE_IDX 1
+#define mmSDMA7_RLC0_RB_RPTR 0x0133
+#define mmSDMA7_RLC0_RB_RPTR_BASE_IDX 1
+#define mmSDMA7_RLC0_RB_RPTR_HI 0x0134
+#define mmSDMA7_RLC0_RB_RPTR_HI_BASE_IDX 1
+#define mmSDMA7_RLC0_RB_WPTR 0x0135
+#define mmSDMA7_RLC0_RB_WPTR_BASE_IDX 1
+#define mmSDMA7_RLC0_RB_WPTR_HI 0x0136
+#define mmSDMA7_RLC0_RB_WPTR_HI_BASE_IDX 1
+#define mmSDMA7_RLC0_RB_WPTR_POLL_CNTL 0x0137
+#define mmSDMA7_RLC0_RB_WPTR_POLL_CNTL_BASE_IDX 1
+#define mmSDMA7_RLC0_RB_RPTR_ADDR_HI 0x0138
+#define mmSDMA7_RLC0_RB_RPTR_ADDR_HI_BASE_IDX 1
+#define mmSDMA7_RLC0_RB_RPTR_ADDR_LO 0x0139
+#define mmSDMA7_RLC0_RB_RPTR_ADDR_LO_BASE_IDX 1
+#define mmSDMA7_RLC0_IB_CNTL 0x013a
+#define mmSDMA7_RLC0_IB_CNTL_BASE_IDX 1
+#define mmSDMA7_RLC0_IB_RPTR 0x013b
+#define mmSDMA7_RLC0_IB_RPTR_BASE_IDX 1
+#define mmSDMA7_RLC0_IB_OFFSET 0x013c
+#define mmSDMA7_RLC0_IB_OFFSET_BASE_IDX 1
+#define mmSDMA7_RLC0_IB_BASE_LO 0x013d
+#define mmSDMA7_RLC0_IB_BASE_LO_BASE_IDX 1
+#define mmSDMA7_RLC0_IB_BASE_HI 0x013e
+#define mmSDMA7_RLC0_IB_BASE_HI_BASE_IDX 1
+#define mmSDMA7_RLC0_IB_SIZE 0x013f
+#define mmSDMA7_RLC0_IB_SIZE_BASE_IDX 1
+#define mmSDMA7_RLC0_SKIP_CNTL 0x0140
+#define mmSDMA7_RLC0_SKIP_CNTL_BASE_IDX 1
+#define mmSDMA7_RLC0_CONTEXT_STATUS 0x0141
+#define mmSDMA7_RLC0_CONTEXT_STATUS_BASE_IDX 1
+#define mmSDMA7_RLC0_DOORBELL 0x0142
+#define mmSDMA7_RLC0_DOORBELL_BASE_IDX 1
+#define mmSDMA7_RLC0_STATUS 0x0158
+#define mmSDMA7_RLC0_STATUS_BASE_IDX 1
+#define mmSDMA7_RLC0_DOORBELL_LOG 0x0159
+#define mmSDMA7_RLC0_DOORBELL_LOG_BASE_IDX 1
+#define mmSDMA7_RLC0_WATERMARK 0x015a
+#define mmSDMA7_RLC0_WATERMARK_BASE_IDX 1
+#define mmSDMA7_RLC0_DOORBELL_OFFSET 0x015b
+#define mmSDMA7_RLC0_DOORBELL_OFFSET_BASE_IDX 1
+#define mmSDMA7_RLC0_CSA_ADDR_LO 0x015c
+#define mmSDMA7_RLC0_CSA_ADDR_LO_BASE_IDX 1
+#define mmSDMA7_RLC0_CSA_ADDR_HI 0x015d
+#define mmSDMA7_RLC0_CSA_ADDR_HI_BASE_IDX 1
+#define mmSDMA7_RLC0_IB_SUB_REMAIN 0x015f
+#define mmSDMA7_RLC0_IB_SUB_REMAIN_BASE_IDX 1
+#define mmSDMA7_RLC0_PREEMPT 0x0160
+#define mmSDMA7_RLC0_PREEMPT_BASE_IDX 1
+#define mmSDMA7_RLC0_DUMMY_REG 0x0161
+#define mmSDMA7_RLC0_DUMMY_REG_BASE_IDX 1
+#define mmSDMA7_RLC0_RB_WPTR_POLL_ADDR_HI 0x0162
+#define mmSDMA7_RLC0_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1
+#define mmSDMA7_RLC0_RB_WPTR_POLL_ADDR_LO 0x0163
+#define mmSDMA7_RLC0_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1
+#define mmSDMA7_RLC0_RB_AQL_CNTL 0x0164
+#define mmSDMA7_RLC0_RB_AQL_CNTL_BASE_IDX 1
+#define mmSDMA7_RLC0_MINOR_PTR_UPDATE 0x0165
+#define mmSDMA7_RLC0_MINOR_PTR_UPDATE_BASE_IDX 1
+#define mmSDMA7_RLC0_MIDCMD_DATA0 0x0170
+#define mmSDMA7_RLC0_MIDCMD_DATA0_BASE_IDX 1
+#define mmSDMA7_RLC0_MIDCMD_DATA1 0x0171
+#define mmSDMA7_RLC0_MIDCMD_DATA1_BASE_IDX 1
+#define mmSDMA7_RLC0_MIDCMD_DATA2 0x0172
+#define mmSDMA7_RLC0_MIDCMD_DATA2_BASE_IDX 1
+#define mmSDMA7_RLC0_MIDCMD_DATA3 0x0173
+#define mmSDMA7_RLC0_MIDCMD_DATA3_BASE_IDX 1
+#define mmSDMA7_RLC0_MIDCMD_DATA4 0x0174
+#define mmSDMA7_RLC0_MIDCMD_DATA4_BASE_IDX 1
+#define mmSDMA7_RLC0_MIDCMD_DATA5 0x0175
+#define mmSDMA7_RLC0_MIDCMD_DATA5_BASE_IDX 1
+#define mmSDMA7_RLC0_MIDCMD_DATA6 0x0176
+#define mmSDMA7_RLC0_MIDCMD_DATA6_BASE_IDX 1
+#define mmSDMA7_RLC0_MIDCMD_DATA7 0x0177
+#define mmSDMA7_RLC0_MIDCMD_DATA7_BASE_IDX 1
+#define mmSDMA7_RLC0_MIDCMD_DATA8 0x0178
+#define mmSDMA7_RLC0_MIDCMD_DATA8_BASE_IDX 1
+#define mmSDMA7_RLC0_MIDCMD_CNTL 0x0179
+#define mmSDMA7_RLC0_MIDCMD_CNTL_BASE_IDX 1
+#define mmSDMA7_RLC1_RB_CNTL 0x0188
+#define mmSDMA7_RLC1_RB_CNTL_BASE_IDX 1
+#define mmSDMA7_RLC1_RB_BASE 0x0189
+#define mmSDMA7_RLC1_RB_BASE_BASE_IDX 1
+#define mmSDMA7_RLC1_RB_BASE_HI 0x018a
+#define mmSDMA7_RLC1_RB_BASE_HI_BASE_IDX 1
+#define mmSDMA7_RLC1_RB_RPTR 0x018b
+#define mmSDMA7_RLC1_RB_RPTR_BASE_IDX 1
+#define mmSDMA7_RLC1_RB_RPTR_HI 0x018c
+#define mmSDMA7_RLC1_RB_RPTR_HI_BASE_IDX 1
+#define mmSDMA7_RLC1_RB_WPTR 0x018d
+#define mmSDMA7_RLC1_RB_WPTR_BASE_IDX 1
+#define mmSDMA7_RLC1_RB_WPTR_HI 0x018e
+#define mmSDMA7_RLC1_RB_WPTR_HI_BASE_IDX 1
+#define mmSDMA7_RLC1_RB_WPTR_POLL_CNTL 0x018f
+#define mmSDMA7_RLC1_RB_WPTR_POLL_CNTL_BASE_IDX 1
+#define mmSDMA7_RLC1_RB_RPTR_ADDR_HI 0x0190
+#define mmSDMA7_RLC1_RB_RPTR_ADDR_HI_BASE_IDX 1
+#define mmSDMA7_RLC1_RB_RPTR_ADDR_LO 0x0191
+#define mmSDMA7_RLC1_RB_RPTR_ADDR_LO_BASE_IDX 1
+#define mmSDMA7_RLC1_IB_CNTL 0x0192
+#define mmSDMA7_RLC1_IB_CNTL_BASE_IDX 1
+#define mmSDMA7_RLC1_IB_RPTR 0x0193
+#define mmSDMA7_RLC1_IB_RPTR_BASE_IDX 1
+#define mmSDMA7_RLC1_IB_OFFSET 0x0194
+#define mmSDMA7_RLC1_IB_OFFSET_BASE_IDX 1
+#define mmSDMA7_RLC1_IB_BASE_LO 0x0195
+#define mmSDMA7_RLC1_IB_BASE_LO_BASE_IDX 1
+#define mmSDMA7_RLC1_IB_BASE_HI 0x0196
+#define mmSDMA7_RLC1_IB_BASE_HI_BASE_IDX 1
+#define mmSDMA7_RLC1_IB_SIZE 0x0197
+#define mmSDMA7_RLC1_IB_SIZE_BASE_IDX 1
+#define mmSDMA7_RLC1_SKIP_CNTL 0x0198
+#define mmSDMA7_RLC1_SKIP_CNTL_BASE_IDX 1
+#define mmSDMA7_RLC1_CONTEXT_STATUS 0x0199
+#define mmSDMA7_RLC1_CONTEXT_STATUS_BASE_IDX 1
+#define mmSDMA7_RLC1_DOORBELL 0x019a
+#define mmSDMA7_RLC1_DOORBELL_BASE_IDX 1
+#define mmSDMA7_RLC1_STATUS 0x01b0
+#define mmSDMA7_RLC1_STATUS_BASE_IDX 1
+#define mmSDMA7_RLC1_DOORBELL_LOG 0x01b1
+#define mmSDMA7_RLC1_DOORBELL_LOG_BASE_IDX 1
+#define mmSDMA7_RLC1_WATERMARK 0x01b2
+#define mmSDMA7_RLC1_WATERMARK_BASE_IDX 1
+#define mmSDMA7_RLC1_DOORBELL_OFFSET 0x01b3
+#define mmSDMA7_RLC1_DOORBELL_OFFSET_BASE_IDX 1
+#define mmSDMA7_RLC1_CSA_ADDR_LO 0x01b4
+#define mmSDMA7_RLC1_CSA_ADDR_LO_BASE_IDX 1
+#define mmSDMA7_RLC1_CSA_ADDR_HI 0x01b5
+#define mmSDMA7_RLC1_CSA_ADDR_HI_BASE_IDX 1
+#define mmSDMA7_RLC1_IB_SUB_REMAIN 0x01b7
+#define mmSDMA7_RLC1_IB_SUB_REMAIN_BASE_IDX 1
+#define mmSDMA7_RLC1_PREEMPT 0x01b8
+#define mmSDMA7_RLC1_PREEMPT_BASE_IDX 1
+#define mmSDMA7_RLC1_DUMMY_REG 0x01b9
+#define mmSDMA7_RLC1_DUMMY_REG_BASE_IDX 1
+#define mmSDMA7_RLC1_RB_WPTR_POLL_ADDR_HI 0x01ba
+#define mmSDMA7_RLC1_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1
+#define mmSDMA7_RLC1_RB_WPTR_POLL_ADDR_LO 0x01bb
+#define mmSDMA7_RLC1_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1
+#define mmSDMA7_RLC1_RB_AQL_CNTL 0x01bc
+#define mmSDMA7_RLC1_RB_AQL_CNTL_BASE_IDX 1
+#define mmSDMA7_RLC1_MINOR_PTR_UPDATE 0x01bd
+#define mmSDMA7_RLC1_MINOR_PTR_UPDATE_BASE_IDX 1
+#define mmSDMA7_RLC1_MIDCMD_DATA0 0x01c8
+#define mmSDMA7_RLC1_MIDCMD_DATA0_BASE_IDX 1
+#define mmSDMA7_RLC1_MIDCMD_DATA1 0x01c9
+#define mmSDMA7_RLC1_MIDCMD_DATA1_BASE_IDX 1
+#define mmSDMA7_RLC1_MIDCMD_DATA2 0x01ca
+#define mmSDMA7_RLC1_MIDCMD_DATA2_BASE_IDX 1
+#define mmSDMA7_RLC1_MIDCMD_DATA3 0x01cb
+#define mmSDMA7_RLC1_MIDCMD_DATA3_BASE_IDX 1
+#define mmSDMA7_RLC1_MIDCMD_DATA4 0x01cc
+#define mmSDMA7_RLC1_MIDCMD_DATA4_BASE_IDX 1
+#define mmSDMA7_RLC1_MIDCMD_DATA5 0x01cd
+#define mmSDMA7_RLC1_MIDCMD_DATA5_BASE_IDX 1
+#define mmSDMA7_RLC1_MIDCMD_DATA6 0x01ce
+#define mmSDMA7_RLC1_MIDCMD_DATA6_BASE_IDX 1
+#define mmSDMA7_RLC1_MIDCMD_DATA7 0x01cf
+#define mmSDMA7_RLC1_MIDCMD_DATA7_BASE_IDX 1
+#define mmSDMA7_RLC1_MIDCMD_DATA8 0x01d0
+#define mmSDMA7_RLC1_MIDCMD_DATA8_BASE_IDX 1
+#define mmSDMA7_RLC1_MIDCMD_CNTL 0x01d1
+#define mmSDMA7_RLC1_MIDCMD_CNTL_BASE_IDX 1
+#define mmSDMA7_RLC2_RB_CNTL 0x01e0
+#define mmSDMA7_RLC2_RB_CNTL_BASE_IDX 1
+#define mmSDMA7_RLC2_RB_BASE 0x01e1
+#define mmSDMA7_RLC2_RB_BASE_BASE_IDX 1
+#define mmSDMA7_RLC2_RB_BASE_HI 0x01e2
+#define mmSDMA7_RLC2_RB_BASE_HI_BASE_IDX 1
+#define mmSDMA7_RLC2_RB_RPTR 0x01e3
+#define mmSDMA7_RLC2_RB_RPTR_BASE_IDX 1
+#define mmSDMA7_RLC2_RB_RPTR_HI 0x01e4
+#define mmSDMA7_RLC2_RB_RPTR_HI_BASE_IDX 1
+#define mmSDMA7_RLC2_RB_WPTR 0x01e5
+#define mmSDMA7_RLC2_RB_WPTR_BASE_IDX 1
+#define mmSDMA7_RLC2_RB_WPTR_HI 0x01e6
+#define mmSDMA7_RLC2_RB_WPTR_HI_BASE_IDX 1
+#define mmSDMA7_RLC2_RB_WPTR_POLL_CNTL 0x01e7
+#define mmSDMA7_RLC2_RB_WPTR_POLL_CNTL_BASE_IDX 1
+#define mmSDMA7_RLC2_RB_RPTR_ADDR_HI 0x01e8
+#define mmSDMA7_RLC2_RB_RPTR_ADDR_HI_BASE_IDX 1
+#define mmSDMA7_RLC2_RB_RPTR_ADDR_LO 0x01e9
+#define mmSDMA7_RLC2_RB_RPTR_ADDR_LO_BASE_IDX 1
+#define mmSDMA7_RLC2_IB_CNTL 0x01ea
+#define mmSDMA7_RLC2_IB_CNTL_BASE_IDX 1
+#define mmSDMA7_RLC2_IB_RPTR 0x01eb
+#define mmSDMA7_RLC2_IB_RPTR_BASE_IDX 1
+#define mmSDMA7_RLC2_IB_OFFSET 0x01ec
+#define mmSDMA7_RLC2_IB_OFFSET_BASE_IDX 1
+#define mmSDMA7_RLC2_IB_BASE_LO 0x01ed
+#define mmSDMA7_RLC2_IB_BASE_LO_BASE_IDX 1
+#define mmSDMA7_RLC2_IB_BASE_HI 0x01ee
+#define mmSDMA7_RLC2_IB_BASE_HI_BASE_IDX 1
+#define mmSDMA7_RLC2_IB_SIZE 0x01ef
+#define mmSDMA7_RLC2_IB_SIZE_BASE_IDX 1
+#define mmSDMA7_RLC2_SKIP_CNTL 0x01f0
+#define mmSDMA7_RLC2_SKIP_CNTL_BASE_IDX 1
+#define mmSDMA7_RLC2_CONTEXT_STATUS 0x01f1
+#define mmSDMA7_RLC2_CONTEXT_STATUS_BASE_IDX 1
+#define mmSDMA7_RLC2_DOORBELL 0x01f2
+#define mmSDMA7_RLC2_DOORBELL_BASE_IDX 1
+#define mmSDMA7_RLC2_STATUS 0x0208
+#define mmSDMA7_RLC2_STATUS_BASE_IDX 1
+#define mmSDMA7_RLC2_DOORBELL_LOG 0x0209
+#define mmSDMA7_RLC2_DOORBELL_LOG_BASE_IDX 1
+#define mmSDMA7_RLC2_WATERMARK 0x020a
+#define mmSDMA7_RLC2_WATERMARK_BASE_IDX 1
+#define mmSDMA7_RLC2_DOORBELL_OFFSET 0x020b
+#define mmSDMA7_RLC2_DOORBELL_OFFSET_BASE_IDX 1
+#define mmSDMA7_RLC2_CSA_ADDR_LO 0x020c
+#define mmSDMA7_RLC2_CSA_ADDR_LO_BASE_IDX 1
+#define mmSDMA7_RLC2_CSA_ADDR_HI 0x020d
+#define mmSDMA7_RLC2_CSA_ADDR_HI_BASE_IDX 1
+#define mmSDMA7_RLC2_IB_SUB_REMAIN 0x020f
+#define mmSDMA7_RLC2_IB_SUB_REMAIN_BASE_IDX 1
+#define mmSDMA7_RLC2_PREEMPT 0x0210
+#define mmSDMA7_RLC2_PREEMPT_BASE_IDX 1
+#define mmSDMA7_RLC2_DUMMY_REG 0x0211
+#define mmSDMA7_RLC2_DUMMY_REG_BASE_IDX 1
+#define mmSDMA7_RLC2_RB_WPTR_POLL_ADDR_HI 0x0212
+#define mmSDMA7_RLC2_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1
+#define mmSDMA7_RLC2_RB_WPTR_POLL_ADDR_LO 0x0213
+#define mmSDMA7_RLC2_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1
+#define mmSDMA7_RLC2_RB_AQL_CNTL 0x0214
+#define mmSDMA7_RLC2_RB_AQL_CNTL_BASE_IDX 1
+#define mmSDMA7_RLC2_MINOR_PTR_UPDATE 0x0215
+#define mmSDMA7_RLC2_MINOR_PTR_UPDATE_BASE_IDX 1
+#define mmSDMA7_RLC2_MIDCMD_DATA0 0x0220
+#define mmSDMA7_RLC2_MIDCMD_DATA0_BASE_IDX 1
+#define mmSDMA7_RLC2_MIDCMD_DATA1 0x0221
+#define mmSDMA7_RLC2_MIDCMD_DATA1_BASE_IDX 1
+#define mmSDMA7_RLC2_MIDCMD_DATA2 0x0222
+#define mmSDMA7_RLC2_MIDCMD_DATA2_BASE_IDX 1
+#define mmSDMA7_RLC2_MIDCMD_DATA3 0x0223
+#define mmSDMA7_RLC2_MIDCMD_DATA3_BASE_IDX 1
+#define mmSDMA7_RLC2_MIDCMD_DATA4 0x0224
+#define mmSDMA7_RLC2_MIDCMD_DATA4_BASE_IDX 1
+#define mmSDMA7_RLC2_MIDCMD_DATA5 0x0225
+#define mmSDMA7_RLC2_MIDCMD_DATA5_BASE_IDX 1
+#define mmSDMA7_RLC2_MIDCMD_DATA6 0x0226
+#define mmSDMA7_RLC2_MIDCMD_DATA6_BASE_IDX 1
+#define mmSDMA7_RLC2_MIDCMD_DATA7 0x0227
+#define mmSDMA7_RLC2_MIDCMD_DATA7_BASE_IDX 1
+#define mmSDMA7_RLC2_MIDCMD_DATA8 0x0228
+#define mmSDMA7_RLC2_MIDCMD_DATA8_BASE_IDX 1
+#define mmSDMA7_RLC2_MIDCMD_CNTL 0x0229
+#define mmSDMA7_RLC2_MIDCMD_CNTL_BASE_IDX 1
+#define mmSDMA7_RLC3_RB_CNTL 0x0238
+#define mmSDMA7_RLC3_RB_CNTL_BASE_IDX 1
+#define mmSDMA7_RLC3_RB_BASE 0x0239
+#define mmSDMA7_RLC3_RB_BASE_BASE_IDX 1
+#define mmSDMA7_RLC3_RB_BASE_HI 0x023a
+#define mmSDMA7_RLC3_RB_BASE_HI_BASE_IDX 1
+#define mmSDMA7_RLC3_RB_RPTR 0x023b
+#define mmSDMA7_RLC3_RB_RPTR_BASE_IDX 1
+#define mmSDMA7_RLC3_RB_RPTR_HI 0x023c
+#define mmSDMA7_RLC3_RB_RPTR_HI_BASE_IDX 1
+#define mmSDMA7_RLC3_RB_WPTR 0x023d
+#define mmSDMA7_RLC3_RB_WPTR_BASE_IDX 1
+#define mmSDMA7_RLC3_RB_WPTR_HI 0x023e
+#define mmSDMA7_RLC3_RB_WPTR_HI_BASE_IDX 1
+#define mmSDMA7_RLC3_RB_WPTR_POLL_CNTL 0x023f
+#define mmSDMA7_RLC3_RB_WPTR_POLL_CNTL_BASE_IDX 1
+#define mmSDMA7_RLC3_RB_RPTR_ADDR_HI 0x0240
+#define mmSDMA7_RLC3_RB_RPTR_ADDR_HI_BASE_IDX 1
+#define mmSDMA7_RLC3_RB_RPTR_ADDR_LO 0x0241
+#define mmSDMA7_RLC3_RB_RPTR_ADDR_LO_BASE_IDX 1
+#define mmSDMA7_RLC3_IB_CNTL 0x0242
+#define mmSDMA7_RLC3_IB_CNTL_BASE_IDX 1
+#define mmSDMA7_RLC3_IB_RPTR 0x0243
+#define mmSDMA7_RLC3_IB_RPTR_BASE_IDX 1
+#define mmSDMA7_RLC3_IB_OFFSET 0x0244
+#define mmSDMA7_RLC3_IB_OFFSET_BASE_IDX 1
+#define mmSDMA7_RLC3_IB_BASE_LO 0x0245
+#define mmSDMA7_RLC3_IB_BASE_LO_BASE_IDX 1
+#define mmSDMA7_RLC3_IB_BASE_HI 0x0246
+#define mmSDMA7_RLC3_IB_BASE_HI_BASE_IDX 1
+#define mmSDMA7_RLC3_IB_SIZE 0x0247
+#define mmSDMA7_RLC3_IB_SIZE_BASE_IDX 1
+#define mmSDMA7_RLC3_SKIP_CNTL 0x0248
+#define mmSDMA7_RLC3_SKIP_CNTL_BASE_IDX 1
+#define mmSDMA7_RLC3_CONTEXT_STATUS 0x0249
+#define mmSDMA7_RLC3_CONTEXT_STATUS_BASE_IDX 1
+#define mmSDMA7_RLC3_DOORBELL 0x024a
+#define mmSDMA7_RLC3_DOORBELL_BASE_IDX 1
+#define mmSDMA7_RLC3_STATUS 0x0260
+#define mmSDMA7_RLC3_STATUS_BASE_IDX 1
+#define mmSDMA7_RLC3_DOORBELL_LOG 0x0261
+#define mmSDMA7_RLC3_DOORBELL_LOG_BASE_IDX 1
+#define mmSDMA7_RLC3_WATERMARK 0x0262
+#define mmSDMA7_RLC3_WATERMARK_BASE_IDX 1
+#define mmSDMA7_RLC3_DOORBELL_OFFSET 0x0263
+#define mmSDMA7_RLC3_DOORBELL_OFFSET_BASE_IDX 1
+#define mmSDMA7_RLC3_CSA_ADDR_LO 0x0264
+#define mmSDMA7_RLC3_CSA_ADDR_LO_BASE_IDX 1
+#define mmSDMA7_RLC3_CSA_ADDR_HI 0x0265
+#define mmSDMA7_RLC3_CSA_ADDR_HI_BASE_IDX 1
+#define mmSDMA7_RLC3_IB_SUB_REMAIN 0x0267
+#define mmSDMA7_RLC3_IB_SUB_REMAIN_BASE_IDX 1
+#define mmSDMA7_RLC3_PREEMPT 0x0268
+#define mmSDMA7_RLC3_PREEMPT_BASE_IDX 1
+#define mmSDMA7_RLC3_DUMMY_REG 0x0269
+#define mmSDMA7_RLC3_DUMMY_REG_BASE_IDX 1
+#define mmSDMA7_RLC3_RB_WPTR_POLL_ADDR_HI 0x026a
+#define mmSDMA7_RLC3_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1
+#define mmSDMA7_RLC3_RB_WPTR_POLL_ADDR_LO 0x026b
+#define mmSDMA7_RLC3_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1
+#define mmSDMA7_RLC3_RB_AQL_CNTL 0x026c
+#define mmSDMA7_RLC3_RB_AQL_CNTL_BASE_IDX 1
+#define mmSDMA7_RLC3_MINOR_PTR_UPDATE 0x026d
+#define mmSDMA7_RLC3_MINOR_PTR_UPDATE_BASE_IDX 1
+#define mmSDMA7_RLC3_MIDCMD_DATA0 0x0278
+#define mmSDMA7_RLC3_MIDCMD_DATA0_BASE_IDX 1
+#define mmSDMA7_RLC3_MIDCMD_DATA1 0x0279
+#define mmSDMA7_RLC3_MIDCMD_DATA1_BASE_IDX 1
+#define mmSDMA7_RLC3_MIDCMD_DATA2 0x027a
+#define mmSDMA7_RLC3_MIDCMD_DATA2_BASE_IDX 1
+#define mmSDMA7_RLC3_MIDCMD_DATA3 0x027b
+#define mmSDMA7_RLC3_MIDCMD_DATA3_BASE_IDX 1
+#define mmSDMA7_RLC3_MIDCMD_DATA4 0x027c
+#define mmSDMA7_RLC3_MIDCMD_DATA4_BASE_IDX 1
+#define mmSDMA7_RLC3_MIDCMD_DATA5 0x027d
+#define mmSDMA7_RLC3_MIDCMD_DATA5_BASE_IDX 1
+#define mmSDMA7_RLC3_MIDCMD_DATA6 0x027e
+#define mmSDMA7_RLC3_MIDCMD_DATA6_BASE_IDX 1
+#define mmSDMA7_RLC3_MIDCMD_DATA7 0x027f
+#define mmSDMA7_RLC3_MIDCMD_DATA7_BASE_IDX 1
+#define mmSDMA7_RLC3_MIDCMD_DATA8 0x0280
+#define mmSDMA7_RLC3_MIDCMD_DATA8_BASE_IDX 1
+#define mmSDMA7_RLC3_MIDCMD_CNTL 0x0281
+#define mmSDMA7_RLC3_MIDCMD_CNTL_BASE_IDX 1
+#define mmSDMA7_RLC4_RB_CNTL 0x0290
+#define mmSDMA7_RLC4_RB_CNTL_BASE_IDX 1
+#define mmSDMA7_RLC4_RB_BASE 0x0291
+#define mmSDMA7_RLC4_RB_BASE_BASE_IDX 1
+#define mmSDMA7_RLC4_RB_BASE_HI 0x0292
+#define mmSDMA7_RLC4_RB_BASE_HI_BASE_IDX 1
+#define mmSDMA7_RLC4_RB_RPTR 0x0293
+#define mmSDMA7_RLC4_RB_RPTR_BASE_IDX 1
+#define mmSDMA7_RLC4_RB_RPTR_HI 0x0294
+#define mmSDMA7_RLC4_RB_RPTR_HI_BASE_IDX 1
+#define mmSDMA7_RLC4_RB_WPTR 0x0295
+#define mmSDMA7_RLC4_RB_WPTR_BASE_IDX 1
+#define mmSDMA7_RLC4_RB_WPTR_HI 0x0296
+#define mmSDMA7_RLC4_RB_WPTR_HI_BASE_IDX 1
+#define mmSDMA7_RLC4_RB_WPTR_POLL_CNTL 0x0297
+#define mmSDMA7_RLC4_RB_WPTR_POLL_CNTL_BASE_IDX 1
+#define mmSDMA7_RLC4_RB_RPTR_ADDR_HI 0x0298
+#define mmSDMA7_RLC4_RB_RPTR_ADDR_HI_BASE_IDX 1
+#define mmSDMA7_RLC4_RB_RPTR_ADDR_LO 0x0299
+#define mmSDMA7_RLC4_RB_RPTR_ADDR_LO_BASE_IDX 1
+#define mmSDMA7_RLC4_IB_CNTL 0x029a
+#define mmSDMA7_RLC4_IB_CNTL_BASE_IDX 1
+#define mmSDMA7_RLC4_IB_RPTR 0x029b
+#define mmSDMA7_RLC4_IB_RPTR_BASE_IDX 1
+#define mmSDMA7_RLC4_IB_OFFSET 0x029c
+#define mmSDMA7_RLC4_IB_OFFSET_BASE_IDX 1
+#define mmSDMA7_RLC4_IB_BASE_LO 0x029d
+#define mmSDMA7_RLC4_IB_BASE_LO_BASE_IDX 1
+#define mmSDMA7_RLC4_IB_BASE_HI 0x029e
+#define mmSDMA7_RLC4_IB_BASE_HI_BASE_IDX 1
+#define mmSDMA7_RLC4_IB_SIZE 0x029f
+#define mmSDMA7_RLC4_IB_SIZE_BASE_IDX 1
+#define mmSDMA7_RLC4_SKIP_CNTL 0x02a0
+#define mmSDMA7_RLC4_SKIP_CNTL_BASE_IDX 1
+#define mmSDMA7_RLC4_CONTEXT_STATUS 0x02a1
+#define mmSDMA7_RLC4_CONTEXT_STATUS_BASE_IDX 1
+#define mmSDMA7_RLC4_DOORBELL 0x02a2
+#define mmSDMA7_RLC4_DOORBELL_BASE_IDX 1
+#define mmSDMA7_RLC4_STATUS 0x02b8
+#define mmSDMA7_RLC4_STATUS_BASE_IDX 1
+#define mmSDMA7_RLC4_DOORBELL_LOG 0x02b9
+#define mmSDMA7_RLC4_DOORBELL_LOG_BASE_IDX 1
+#define mmSDMA7_RLC4_WATERMARK 0x02ba
+#define mmSDMA7_RLC4_WATERMARK_BASE_IDX 1
+#define mmSDMA7_RLC4_DOORBELL_OFFSET 0x02bb
+#define mmSDMA7_RLC4_DOORBELL_OFFSET_BASE_IDX 1
+#define mmSDMA7_RLC4_CSA_ADDR_LO 0x02bc
+#define mmSDMA7_RLC4_CSA_ADDR_LO_BASE_IDX 1
+#define mmSDMA7_RLC4_CSA_ADDR_HI 0x02bd
+#define mmSDMA7_RLC4_CSA_ADDR_HI_BASE_IDX 1
+#define mmSDMA7_RLC4_IB_SUB_REMAIN 0x02bf
+#define mmSDMA7_RLC4_IB_SUB_REMAIN_BASE_IDX 1
+#define mmSDMA7_RLC4_PREEMPT 0x02c0
+#define mmSDMA7_RLC4_PREEMPT_BASE_IDX 1
+#define mmSDMA7_RLC4_DUMMY_REG 0x02c1
+#define mmSDMA7_RLC4_DUMMY_REG_BASE_IDX 1
+#define mmSDMA7_RLC4_RB_WPTR_POLL_ADDR_HI 0x02c2
+#define mmSDMA7_RLC4_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1
+#define mmSDMA7_RLC4_RB_WPTR_POLL_ADDR_LO 0x02c3
+#define mmSDMA7_RLC4_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1
+#define mmSDMA7_RLC4_RB_AQL_CNTL 0x02c4
+#define mmSDMA7_RLC4_RB_AQL_CNTL_BASE_IDX 1
+#define mmSDMA7_RLC4_MINOR_PTR_UPDATE 0x02c5
+#define mmSDMA7_RLC4_MINOR_PTR_UPDATE_BASE_IDX 1
+#define mmSDMA7_RLC4_MIDCMD_DATA0 0x02d0
+#define mmSDMA7_RLC4_MIDCMD_DATA0_BASE_IDX 1
+#define mmSDMA7_RLC4_MIDCMD_DATA1 0x02d1
+#define mmSDMA7_RLC4_MIDCMD_DATA1_BASE_IDX 1
+#define mmSDMA7_RLC4_MIDCMD_DATA2 0x02d2
+#define mmSDMA7_RLC4_MIDCMD_DATA2_BASE_IDX 1
+#define mmSDMA7_RLC4_MIDCMD_DATA3 0x02d3
+#define mmSDMA7_RLC4_MIDCMD_DATA3_BASE_IDX 1
+#define mmSDMA7_RLC4_MIDCMD_DATA4 0x02d4
+#define mmSDMA7_RLC4_MIDCMD_DATA4_BASE_IDX 1
+#define mmSDMA7_RLC4_MIDCMD_DATA5 0x02d5
+#define mmSDMA7_RLC4_MIDCMD_DATA5_BASE_IDX 1
+#define mmSDMA7_RLC4_MIDCMD_DATA6 0x02d6
+#define mmSDMA7_RLC4_MIDCMD_DATA6_BASE_IDX 1
+#define mmSDMA7_RLC4_MIDCMD_DATA7 0x02d7
+#define mmSDMA7_RLC4_MIDCMD_DATA7_BASE_IDX 1
+#define mmSDMA7_RLC4_MIDCMD_DATA8 0x02d8
+#define mmSDMA7_RLC4_MIDCMD_DATA8_BASE_IDX 1
+#define mmSDMA7_RLC4_MIDCMD_CNTL 0x02d9
+#define mmSDMA7_RLC4_MIDCMD_CNTL_BASE_IDX 1
+#define mmSDMA7_RLC5_RB_CNTL 0x02e8
+#define mmSDMA7_RLC5_RB_CNTL_BASE_IDX 1
+#define mmSDMA7_RLC5_RB_BASE 0x02e9
+#define mmSDMA7_RLC5_RB_BASE_BASE_IDX 1
+#define mmSDMA7_RLC5_RB_BASE_HI 0x02ea
+#define mmSDMA7_RLC5_RB_BASE_HI_BASE_IDX 1
+#define mmSDMA7_RLC5_RB_RPTR 0x02eb
+#define mmSDMA7_RLC5_RB_RPTR_BASE_IDX 1
+#define mmSDMA7_RLC5_RB_RPTR_HI 0x02ec
+#define mmSDMA7_RLC5_RB_RPTR_HI_BASE_IDX 1
+#define mmSDMA7_RLC5_RB_WPTR 0x02ed
+#define mmSDMA7_RLC5_RB_WPTR_BASE_IDX 1
+#define mmSDMA7_RLC5_RB_WPTR_HI 0x02ee
+#define mmSDMA7_RLC5_RB_WPTR_HI_BASE_IDX 1
+#define mmSDMA7_RLC5_RB_WPTR_POLL_CNTL 0x02ef
+#define mmSDMA7_RLC5_RB_WPTR_POLL_CNTL_BASE_IDX 1
+#define mmSDMA7_RLC5_RB_RPTR_ADDR_HI 0x02f0
+#define mmSDMA7_RLC5_RB_RPTR_ADDR_HI_BASE_IDX 1
+#define mmSDMA7_RLC5_RB_RPTR_ADDR_LO 0x02f1
+#define mmSDMA7_RLC5_RB_RPTR_ADDR_LO_BASE_IDX 1
+#define mmSDMA7_RLC5_IB_CNTL 0x02f2
+#define mmSDMA7_RLC5_IB_CNTL_BASE_IDX 1
+#define mmSDMA7_RLC5_IB_RPTR 0x02f3
+#define mmSDMA7_RLC5_IB_RPTR_BASE_IDX 1
+#define mmSDMA7_RLC5_IB_OFFSET 0x02f4
+#define mmSDMA7_RLC5_IB_OFFSET_BASE_IDX 1
+#define mmSDMA7_RLC5_IB_BASE_LO 0x02f5
+#define mmSDMA7_RLC5_IB_BASE_LO_BASE_IDX 1
+#define mmSDMA7_RLC5_IB_BASE_HI 0x02f6
+#define mmSDMA7_RLC5_IB_BASE_HI_BASE_IDX 1
+#define mmSDMA7_RLC5_IB_SIZE 0x02f7
+#define mmSDMA7_RLC5_IB_SIZE_BASE_IDX 1
+#define mmSDMA7_RLC5_SKIP_CNTL 0x02f8
+#define mmSDMA7_RLC5_SKIP_CNTL_BASE_IDX 1
+#define mmSDMA7_RLC5_CONTEXT_STATUS 0x02f9
+#define mmSDMA7_RLC5_CONTEXT_STATUS_BASE_IDX 1
+#define mmSDMA7_RLC5_DOORBELL 0x02fa
+#define mmSDMA7_RLC5_DOORBELL_BASE_IDX 1
+#define mmSDMA7_RLC5_STATUS 0x0310
+#define mmSDMA7_RLC5_STATUS_BASE_IDX 1
+#define mmSDMA7_RLC5_DOORBELL_LOG 0x0311
+#define mmSDMA7_RLC5_DOORBELL_LOG_BASE_IDX 1
+#define mmSDMA7_RLC5_WATERMARK 0x0312
+#define mmSDMA7_RLC5_WATERMARK_BASE_IDX 1
+#define mmSDMA7_RLC5_DOORBELL_OFFSET 0x0313
+#define mmSDMA7_RLC5_DOORBELL_OFFSET_BASE_IDX 1
+#define mmSDMA7_RLC5_CSA_ADDR_LO 0x0314
+#define mmSDMA7_RLC5_CSA_ADDR_LO_BASE_IDX 1
+#define mmSDMA7_RLC5_CSA_ADDR_HI 0x0315
+#define mmSDMA7_RLC5_CSA_ADDR_HI_BASE_IDX 1
+#define mmSDMA7_RLC5_IB_SUB_REMAIN 0x0317
+#define mmSDMA7_RLC5_IB_SUB_REMAIN_BASE_IDX 1
+#define mmSDMA7_RLC5_PREEMPT 0x0318
+#define mmSDMA7_RLC5_PREEMPT_BASE_IDX 1
+#define mmSDMA7_RLC5_DUMMY_REG 0x0319
+#define mmSDMA7_RLC5_DUMMY_REG_BASE_IDX 1
+#define mmSDMA7_RLC5_RB_WPTR_POLL_ADDR_HI 0x031a
+#define mmSDMA7_RLC5_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1
+#define mmSDMA7_RLC5_RB_WPTR_POLL_ADDR_LO 0x031b
+#define mmSDMA7_RLC5_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1
+#define mmSDMA7_RLC5_RB_AQL_CNTL 0x031c
+#define mmSDMA7_RLC5_RB_AQL_CNTL_BASE_IDX 1
+#define mmSDMA7_RLC5_MINOR_PTR_UPDATE 0x031d
+#define mmSDMA7_RLC5_MINOR_PTR_UPDATE_BASE_IDX 1
+#define mmSDMA7_RLC5_MIDCMD_DATA0 0x0328
+#define mmSDMA7_RLC5_MIDCMD_DATA0_BASE_IDX 1
+#define mmSDMA7_RLC5_MIDCMD_DATA1 0x0329
+#define mmSDMA7_RLC5_MIDCMD_DATA1_BASE_IDX 1
+#define mmSDMA7_RLC5_MIDCMD_DATA2 0x032a
+#define mmSDMA7_RLC5_MIDCMD_DATA2_BASE_IDX 1
+#define mmSDMA7_RLC5_MIDCMD_DATA3 0x032b
+#define mmSDMA7_RLC5_MIDCMD_DATA3_BASE_IDX 1
+#define mmSDMA7_RLC5_MIDCMD_DATA4 0x032c
+#define mmSDMA7_RLC5_MIDCMD_DATA4_BASE_IDX 1
+#define mmSDMA7_RLC5_MIDCMD_DATA5 0x032d
+#define mmSDMA7_RLC5_MIDCMD_DATA5_BASE_IDX 1
+#define mmSDMA7_RLC5_MIDCMD_DATA6 0x032e
+#define mmSDMA7_RLC5_MIDCMD_DATA6_BASE_IDX 1
+#define mmSDMA7_RLC5_MIDCMD_DATA7 0x032f
+#define mmSDMA7_RLC5_MIDCMD_DATA7_BASE_IDX 1
+#define mmSDMA7_RLC5_MIDCMD_DATA8 0x0330
+#define mmSDMA7_RLC5_MIDCMD_DATA8_BASE_IDX 1
+#define mmSDMA7_RLC5_MIDCMD_CNTL 0x0331
+#define mmSDMA7_RLC5_MIDCMD_CNTL_BASE_IDX 1
+#define mmSDMA7_RLC6_RB_CNTL 0x0340
+#define mmSDMA7_RLC6_RB_CNTL_BASE_IDX 1
+#define mmSDMA7_RLC6_RB_BASE 0x0341
+#define mmSDMA7_RLC6_RB_BASE_BASE_IDX 1
+#define mmSDMA7_RLC6_RB_BASE_HI 0x0342
+#define mmSDMA7_RLC6_RB_BASE_HI_BASE_IDX 1
+#define mmSDMA7_RLC6_RB_RPTR 0x0343
+#define mmSDMA7_RLC6_RB_RPTR_BASE_IDX 1
+#define mmSDMA7_RLC6_RB_RPTR_HI 0x0344
+#define mmSDMA7_RLC6_RB_RPTR_HI_BASE_IDX 1
+#define mmSDMA7_RLC6_RB_WPTR 0x0345
+#define mmSDMA7_RLC6_RB_WPTR_BASE_IDX 1
+#define mmSDMA7_RLC6_RB_WPTR_HI 0x0346
+#define mmSDMA7_RLC6_RB_WPTR_HI_BASE_IDX 1
+#define mmSDMA7_RLC6_RB_WPTR_POLL_CNTL 0x0347
+#define mmSDMA7_RLC6_RB_WPTR_POLL_CNTL_BASE_IDX 1
+#define mmSDMA7_RLC6_RB_RPTR_ADDR_HI 0x0348
+#define mmSDMA7_RLC6_RB_RPTR_ADDR_HI_BASE_IDX 1
+#define mmSDMA7_RLC6_RB_RPTR_ADDR_LO 0x0349
+#define mmSDMA7_RLC6_RB_RPTR_ADDR_LO_BASE_IDX 1
+#define mmSDMA7_RLC6_IB_CNTL 0x034a
+#define mmSDMA7_RLC6_IB_CNTL_BASE_IDX 1
+#define mmSDMA7_RLC6_IB_RPTR 0x034b
+#define mmSDMA7_RLC6_IB_RPTR_BASE_IDX 1
+#define mmSDMA7_RLC6_IB_OFFSET 0x034c
+#define mmSDMA7_RLC6_IB_OFFSET_BASE_IDX 1
+#define mmSDMA7_RLC6_IB_BASE_LO 0x034d
+#define mmSDMA7_RLC6_IB_BASE_LO_BASE_IDX 1
+#define mmSDMA7_RLC6_IB_BASE_HI 0x034e
+#define mmSDMA7_RLC6_IB_BASE_HI_BASE_IDX 1
+#define mmSDMA7_RLC6_IB_SIZE 0x034f
+#define mmSDMA7_RLC6_IB_SIZE_BASE_IDX 1
+#define mmSDMA7_RLC6_SKIP_CNTL 0x0350
+#define mmSDMA7_RLC6_SKIP_CNTL_BASE_IDX 1
+#define mmSDMA7_RLC6_CONTEXT_STATUS 0x0351
+#define mmSDMA7_RLC6_CONTEXT_STATUS_BASE_IDX 1
+#define mmSDMA7_RLC6_DOORBELL 0x0352
+#define mmSDMA7_RLC6_DOORBELL_BASE_IDX 1
+#define mmSDMA7_RLC6_STATUS 0x0368
+#define mmSDMA7_RLC6_STATUS_BASE_IDX 1
+#define mmSDMA7_RLC6_DOORBELL_LOG 0x0369
+#define mmSDMA7_RLC6_DOORBELL_LOG_BASE_IDX 1
+#define mmSDMA7_RLC6_WATERMARK 0x036a
+#define mmSDMA7_RLC6_WATERMARK_BASE_IDX 1
+#define mmSDMA7_RLC6_DOORBELL_OFFSET 0x036b
+#define mmSDMA7_RLC6_DOORBELL_OFFSET_BASE_IDX 1
+#define mmSDMA7_RLC6_CSA_ADDR_LO 0x036c
+#define mmSDMA7_RLC6_CSA_ADDR_LO_BASE_IDX 1
+#define mmSDMA7_RLC6_CSA_ADDR_HI 0x036d
+#define mmSDMA7_RLC6_CSA_ADDR_HI_BASE_IDX 1
+#define mmSDMA7_RLC6_IB_SUB_REMAIN 0x036f
+#define mmSDMA7_RLC6_IB_SUB_REMAIN_BASE_IDX 1
+#define mmSDMA7_RLC6_PREEMPT 0x0370
+#define mmSDMA7_RLC6_PREEMPT_BASE_IDX 1
+#define mmSDMA7_RLC6_DUMMY_REG 0x0371
+#define mmSDMA7_RLC6_DUMMY_REG_BASE_IDX 1
+#define mmSDMA7_RLC6_RB_WPTR_POLL_ADDR_HI 0x0372
+#define mmSDMA7_RLC6_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1
+#define mmSDMA7_RLC6_RB_WPTR_POLL_ADDR_LO 0x0373
+#define mmSDMA7_RLC6_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1
+#define mmSDMA7_RLC6_RB_AQL_CNTL 0x0374
+#define mmSDMA7_RLC6_RB_AQL_CNTL_BASE_IDX 1
+#define mmSDMA7_RLC6_MINOR_PTR_UPDATE 0x0375
+#define mmSDMA7_RLC6_MINOR_PTR_UPDATE_BASE_IDX 1
+#define mmSDMA7_RLC6_MIDCMD_DATA0 0x0380
+#define mmSDMA7_RLC6_MIDCMD_DATA0_BASE_IDX 1
+#define mmSDMA7_RLC6_MIDCMD_DATA1 0x0381
+#define mmSDMA7_RLC6_MIDCMD_DATA1_BASE_IDX 1
+#define mmSDMA7_RLC6_MIDCMD_DATA2 0x0382
+#define mmSDMA7_RLC6_MIDCMD_DATA2_BASE_IDX 1
+#define mmSDMA7_RLC6_MIDCMD_DATA3 0x0383
+#define mmSDMA7_RLC6_MIDCMD_DATA3_BASE_IDX 1
+#define mmSDMA7_RLC6_MIDCMD_DATA4 0x0384
+#define mmSDMA7_RLC6_MIDCMD_DATA4_BASE_IDX 1
+#define mmSDMA7_RLC6_MIDCMD_DATA5 0x0385
+#define mmSDMA7_RLC6_MIDCMD_DATA5_BASE_IDX 1
+#define mmSDMA7_RLC6_MIDCMD_DATA6 0x0386
+#define mmSDMA7_RLC6_MIDCMD_DATA6_BASE_IDX 1
+#define mmSDMA7_RLC6_MIDCMD_DATA7 0x0387
+#define mmSDMA7_RLC6_MIDCMD_DATA7_BASE_IDX 1
+#define mmSDMA7_RLC6_MIDCMD_DATA8 0x0388
+#define mmSDMA7_RLC6_MIDCMD_DATA8_BASE_IDX 1
+#define mmSDMA7_RLC6_MIDCMD_CNTL 0x0389
+#define mmSDMA7_RLC6_MIDCMD_CNTL_BASE_IDX 1
+#define mmSDMA7_RLC7_RB_CNTL 0x0398
+#define mmSDMA7_RLC7_RB_CNTL_BASE_IDX 1
+#define mmSDMA7_RLC7_RB_BASE 0x0399
+#define mmSDMA7_RLC7_RB_BASE_BASE_IDX 1
+#define mmSDMA7_RLC7_RB_BASE_HI 0x039a
+#define mmSDMA7_RLC7_RB_BASE_HI_BASE_IDX 1
+#define mmSDMA7_RLC7_RB_RPTR 0x039b
+#define mmSDMA7_RLC7_RB_RPTR_BASE_IDX 1
+#define mmSDMA7_RLC7_RB_RPTR_HI 0x039c
+#define mmSDMA7_RLC7_RB_RPTR_HI_BASE_IDX 1
+#define mmSDMA7_RLC7_RB_WPTR 0x039d
+#define mmSDMA7_RLC7_RB_WPTR_BASE_IDX 1
+#define mmSDMA7_RLC7_RB_WPTR_HI 0x039e
+#define mmSDMA7_RLC7_RB_WPTR_HI_BASE_IDX 1
+#define mmSDMA7_RLC7_RB_WPTR_POLL_CNTL 0x039f
+#define mmSDMA7_RLC7_RB_WPTR_POLL_CNTL_BASE_IDX 1
+#define mmSDMA7_RLC7_RB_RPTR_ADDR_HI 0x03a0
+#define mmSDMA7_RLC7_RB_RPTR_ADDR_HI_BASE_IDX 1
+#define mmSDMA7_RLC7_RB_RPTR_ADDR_LO 0x03a1
+#define mmSDMA7_RLC7_RB_RPTR_ADDR_LO_BASE_IDX 1
+#define mmSDMA7_RLC7_IB_CNTL 0x03a2
+#define mmSDMA7_RLC7_IB_CNTL_BASE_IDX 1
+#define mmSDMA7_RLC7_IB_RPTR 0x03a3
+#define mmSDMA7_RLC7_IB_RPTR_BASE_IDX 1
+#define mmSDMA7_RLC7_IB_OFFSET 0x03a4
+#define mmSDMA7_RLC7_IB_OFFSET_BASE_IDX 1
+#define mmSDMA7_RLC7_IB_BASE_LO 0x03a5
+#define mmSDMA7_RLC7_IB_BASE_LO_BASE_IDX 1
+#define mmSDMA7_RLC7_IB_BASE_HI 0x03a6
+#define mmSDMA7_RLC7_IB_BASE_HI_BASE_IDX 1
+#define mmSDMA7_RLC7_IB_SIZE 0x03a7
+#define mmSDMA7_RLC7_IB_SIZE_BASE_IDX 1
+#define mmSDMA7_RLC7_SKIP_CNTL 0x03a8
+#define mmSDMA7_RLC7_SKIP_CNTL_BASE_IDX 1
+#define mmSDMA7_RLC7_CONTEXT_STATUS 0x03a9
+#define mmSDMA7_RLC7_CONTEXT_STATUS_BASE_IDX 1
+#define mmSDMA7_RLC7_DOORBELL 0x03aa
+#define mmSDMA7_RLC7_DOORBELL_BASE_IDX 1
+#define mmSDMA7_RLC7_STATUS 0x03c0
+#define mmSDMA7_RLC7_STATUS_BASE_IDX 1
+#define mmSDMA7_RLC7_DOORBELL_LOG 0x03c1
+#define mmSDMA7_RLC7_DOORBELL_LOG_BASE_IDX 1
+#define mmSDMA7_RLC7_WATERMARK 0x03c2
+#define mmSDMA7_RLC7_WATERMARK_BASE_IDX 1
+#define mmSDMA7_RLC7_DOORBELL_OFFSET 0x03c3
+#define mmSDMA7_RLC7_DOORBELL_OFFSET_BASE_IDX 1
+#define mmSDMA7_RLC7_CSA_ADDR_LO 0x03c4
+#define mmSDMA7_RLC7_CSA_ADDR_LO_BASE_IDX 1
+#define mmSDMA7_RLC7_CSA_ADDR_HI 0x03c5
+#define mmSDMA7_RLC7_CSA_ADDR_HI_BASE_IDX 1
+#define mmSDMA7_RLC7_IB_SUB_REMAIN 0x03c7
+#define mmSDMA7_RLC7_IB_SUB_REMAIN_BASE_IDX 1
+#define mmSDMA7_RLC7_PREEMPT 0x03c8
+#define mmSDMA7_RLC7_PREEMPT_BASE_IDX 1
+#define mmSDMA7_RLC7_DUMMY_REG 0x03c9
+#define mmSDMA7_RLC7_DUMMY_REG_BASE_IDX 1
+#define mmSDMA7_RLC7_RB_WPTR_POLL_ADDR_HI 0x03ca
+#define mmSDMA7_RLC7_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1
+#define mmSDMA7_RLC7_RB_WPTR_POLL_ADDR_LO 0x03cb
+#define mmSDMA7_RLC7_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1
+#define mmSDMA7_RLC7_RB_AQL_CNTL 0x03cc
+#define mmSDMA7_RLC7_RB_AQL_CNTL_BASE_IDX 1
+#define mmSDMA7_RLC7_MINOR_PTR_UPDATE 0x03cd
+#define mmSDMA7_RLC7_MINOR_PTR_UPDATE_BASE_IDX 1
+#define mmSDMA7_RLC7_MIDCMD_DATA0 0x03d8
+#define mmSDMA7_RLC7_MIDCMD_DATA0_BASE_IDX 1
+#define mmSDMA7_RLC7_MIDCMD_DATA1 0x03d9
+#define mmSDMA7_RLC7_MIDCMD_DATA1_BASE_IDX 1
+#define mmSDMA7_RLC7_MIDCMD_DATA2 0x03da
+#define mmSDMA7_RLC7_MIDCMD_DATA2_BASE_IDX 1
+#define mmSDMA7_RLC7_MIDCMD_DATA3 0x03db
+#define mmSDMA7_RLC7_MIDCMD_DATA3_BASE_IDX 1
+#define mmSDMA7_RLC7_MIDCMD_DATA4 0x03dc
+#define mmSDMA7_RLC7_MIDCMD_DATA4_BASE_IDX 1
+#define mmSDMA7_RLC7_MIDCMD_DATA5 0x03dd
+#define mmSDMA7_RLC7_MIDCMD_DATA5_BASE_IDX 1
+#define mmSDMA7_RLC7_MIDCMD_DATA6 0x03de
+#define mmSDMA7_RLC7_MIDCMD_DATA6_BASE_IDX 1
+#define mmSDMA7_RLC7_MIDCMD_DATA7 0x03df
+#define mmSDMA7_RLC7_MIDCMD_DATA7_BASE_IDX 1
+#define mmSDMA7_RLC7_MIDCMD_DATA8 0x03e0
+#define mmSDMA7_RLC7_MIDCMD_DATA8_BASE_IDX 1
+#define mmSDMA7_RLC7_MIDCMD_CNTL 0x03e1
+#define mmSDMA7_RLC7_MIDCMD_CNTL_BASE_IDX 1
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/sdma7/sdma7_4_2_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/sdma7/sdma7_4_2_2_sh_mask.h
new file mode 100644
index 000000000000..4b56d8c67d91
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/sdma7/sdma7_4_2_2_sh_mask.h
@@ -0,0 +1,2956 @@
+/*
+ * Copyright (C) 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _sdma7_4_2_2_SH_MASK_HEADER
+#define _sdma7_4_2_2_SH_MASK_HEADER
+
+
+// addressBlock: sdma7_sdma7dec
+//SDMA7_UCODE_ADDR
+#define SDMA7_UCODE_ADDR__VALUE__SHIFT 0x0
+#define SDMA7_UCODE_ADDR__VALUE_MASK 0x00001FFFL
+//SDMA7_UCODE_DATA
+#define SDMA7_UCODE_DATA__VALUE__SHIFT 0x0
+#define SDMA7_UCODE_DATA__VALUE_MASK 0xFFFFFFFFL
+//SDMA7_VM_CNTL
+#define SDMA7_VM_CNTL__CMD__SHIFT 0x0
+#define SDMA7_VM_CNTL__CMD_MASK 0x0000000FL
+//SDMA7_VM_CTX_LO
+#define SDMA7_VM_CTX_LO__ADDR__SHIFT 0x2
+#define SDMA7_VM_CTX_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA7_VM_CTX_HI
+#define SDMA7_VM_CTX_HI__ADDR__SHIFT 0x0
+#define SDMA7_VM_CTX_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA7_ACTIVE_FCN_ID
+#define SDMA7_ACTIVE_FCN_ID__VFID__SHIFT 0x0
+#define SDMA7_ACTIVE_FCN_ID__RESERVED__SHIFT 0x4
+#define SDMA7_ACTIVE_FCN_ID__VF__SHIFT 0x1f
+#define SDMA7_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL
+#define SDMA7_ACTIVE_FCN_ID__RESERVED_MASK 0x7FFFFFF0L
+#define SDMA7_ACTIVE_FCN_ID__VF_MASK 0x80000000L
+//SDMA7_VM_CTX_CNTL
+#define SDMA7_VM_CTX_CNTL__PRIV__SHIFT 0x0
+#define SDMA7_VM_CTX_CNTL__VMID__SHIFT 0x4
+#define SDMA7_VM_CTX_CNTL__PRIV_MASK 0x00000001L
+#define SDMA7_VM_CTX_CNTL__VMID_MASK 0x000000F0L
+//SDMA7_VIRT_RESET_REQ
+#define SDMA7_VIRT_RESET_REQ__VF__SHIFT 0x0
+#define SDMA7_VIRT_RESET_REQ__PF__SHIFT 0x1f
+#define SDMA7_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL
+#define SDMA7_VIRT_RESET_REQ__PF_MASK 0x80000000L
+//SDMA7_VF_ENABLE
+#define SDMA7_VF_ENABLE__VF_ENABLE__SHIFT 0x0
+#define SDMA7_VF_ENABLE__VF_ENABLE_MASK 0x00000001L
+//SDMA7_CONTEXT_REG_TYPE0
+#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_RB_CNTL__SHIFT 0x0
+#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_RB_BASE__SHIFT 0x1
+#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_RB_BASE_HI__SHIFT 0x2
+#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_RB_RPTR__SHIFT 0x3
+#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_RB_RPTR_HI__SHIFT 0x4
+#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_RB_WPTR__SHIFT 0x5
+#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_RB_WPTR_HI__SHIFT 0x6
+#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_RB_WPTR_POLL_CNTL__SHIFT 0x7
+#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_RB_RPTR_ADDR_HI__SHIFT 0x8
+#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_RB_RPTR_ADDR_LO__SHIFT 0x9
+#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_IB_CNTL__SHIFT 0xa
+#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_IB_RPTR__SHIFT 0xb
+#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_IB_OFFSET__SHIFT 0xc
+#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_IB_BASE_LO__SHIFT 0xd
+#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_IB_BASE_HI__SHIFT 0xe
+#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_IB_SIZE__SHIFT 0xf
+#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_SKIP_CNTL__SHIFT 0x10
+#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_CONTEXT_STATUS__SHIFT 0x11
+#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_DOORBELL__SHIFT 0x12
+#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_CONTEXT_CNTL__SHIFT 0x13
+#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_RB_CNTL_MASK 0x00000001L
+#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_RB_BASE_MASK 0x00000002L
+#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_RB_BASE_HI_MASK 0x00000004L
+#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_RB_RPTR_MASK 0x00000008L
+#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_RB_RPTR_HI_MASK 0x00000010L
+#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_RB_WPTR_MASK 0x00000020L
+#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_RB_WPTR_HI_MASK 0x00000040L
+#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_RB_WPTR_POLL_CNTL_MASK 0x00000080L
+#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_RB_RPTR_ADDR_HI_MASK 0x00000100L
+#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_RB_RPTR_ADDR_LO_MASK 0x00000200L
+#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_IB_CNTL_MASK 0x00000400L
+#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_IB_RPTR_MASK 0x00000800L
+#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_IB_OFFSET_MASK 0x00001000L
+#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_IB_BASE_LO_MASK 0x00002000L
+#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_IB_BASE_HI_MASK 0x00004000L
+#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_IB_SIZE_MASK 0x00008000L
+#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_SKIP_CNTL_MASK 0x00010000L
+#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_CONTEXT_STATUS_MASK 0x00020000L
+#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_DOORBELL_MASK 0x00040000L
+#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_CONTEXT_CNTL_MASK 0x00080000L
+//SDMA7_CONTEXT_REG_TYPE1
+#define SDMA7_CONTEXT_REG_TYPE1__SDMA7_GFX_STATUS__SHIFT 0x8
+#define SDMA7_CONTEXT_REG_TYPE1__SDMA7_GFX_DOORBELL_LOG__SHIFT 0x9
+#define SDMA7_CONTEXT_REG_TYPE1__SDMA7_GFX_WATERMARK__SHIFT 0xa
+#define SDMA7_CONTEXT_REG_TYPE1__SDMA7_GFX_DOORBELL_OFFSET__SHIFT 0xb
+#define SDMA7_CONTEXT_REG_TYPE1__SDMA7_GFX_CSA_ADDR_LO__SHIFT 0xc
+#define SDMA7_CONTEXT_REG_TYPE1__SDMA7_GFX_CSA_ADDR_HI__SHIFT 0xd
+#define SDMA7_CONTEXT_REG_TYPE1__VOID_REG2__SHIFT 0xe
+#define SDMA7_CONTEXT_REG_TYPE1__SDMA7_GFX_IB_SUB_REMAIN__SHIFT 0xf
+#define SDMA7_CONTEXT_REG_TYPE1__SDMA7_GFX_PREEMPT__SHIFT 0x10
+#define SDMA7_CONTEXT_REG_TYPE1__SDMA7_GFX_DUMMY_REG__SHIFT 0x11
+#define SDMA7_CONTEXT_REG_TYPE1__SDMA7_GFX_RB_WPTR_POLL_ADDR_HI__SHIFT 0x12
+#define SDMA7_CONTEXT_REG_TYPE1__SDMA7_GFX_RB_WPTR_POLL_ADDR_LO__SHIFT 0x13
+#define SDMA7_CONTEXT_REG_TYPE1__SDMA7_GFX_RB_AQL_CNTL__SHIFT 0x14
+#define SDMA7_CONTEXT_REG_TYPE1__SDMA7_GFX_MINOR_PTR_UPDATE__SHIFT 0x15
+#define SDMA7_CONTEXT_REG_TYPE1__RESERVED__SHIFT 0x16
+#define SDMA7_CONTEXT_REG_TYPE1__SDMA7_GFX_STATUS_MASK 0x00000100L
+#define SDMA7_CONTEXT_REG_TYPE1__SDMA7_GFX_DOORBELL_LOG_MASK 0x00000200L
+#define SDMA7_CONTEXT_REG_TYPE1__SDMA7_GFX_WATERMARK_MASK 0x00000400L
+#define SDMA7_CONTEXT_REG_TYPE1__SDMA7_GFX_DOORBELL_OFFSET_MASK 0x00000800L
+#define SDMA7_CONTEXT_REG_TYPE1__SDMA7_GFX_CSA_ADDR_LO_MASK 0x00001000L
+#define SDMA7_CONTEXT_REG_TYPE1__SDMA7_GFX_CSA_ADDR_HI_MASK 0x00002000L
+#define SDMA7_CONTEXT_REG_TYPE1__VOID_REG2_MASK 0x00004000L
+#define SDMA7_CONTEXT_REG_TYPE1__SDMA7_GFX_IB_SUB_REMAIN_MASK 0x00008000L
+#define SDMA7_CONTEXT_REG_TYPE1__SDMA7_GFX_PREEMPT_MASK 0x00010000L
+#define SDMA7_CONTEXT_REG_TYPE1__SDMA7_GFX_DUMMY_REG_MASK 0x00020000L
+#define SDMA7_CONTEXT_REG_TYPE1__SDMA7_GFX_RB_WPTR_POLL_ADDR_HI_MASK 0x00040000L
+#define SDMA7_CONTEXT_REG_TYPE1__SDMA7_GFX_RB_WPTR_POLL_ADDR_LO_MASK 0x00080000L
+#define SDMA7_CONTEXT_REG_TYPE1__SDMA7_GFX_RB_AQL_CNTL_MASK 0x00100000L
+#define SDMA7_CONTEXT_REG_TYPE1__SDMA7_GFX_MINOR_PTR_UPDATE_MASK 0x00200000L
+#define SDMA7_CONTEXT_REG_TYPE1__RESERVED_MASK 0xFFC00000L
+//SDMA7_CONTEXT_REG_TYPE2
+#define SDMA7_CONTEXT_REG_TYPE2__SDMA7_GFX_MIDCMD_DATA0__SHIFT 0x0
+#define SDMA7_CONTEXT_REG_TYPE2__SDMA7_GFX_MIDCMD_DATA1__SHIFT 0x1
+#define SDMA7_CONTEXT_REG_TYPE2__SDMA7_GFX_MIDCMD_DATA2__SHIFT 0x2
+#define SDMA7_CONTEXT_REG_TYPE2__SDMA7_GFX_MIDCMD_DATA3__SHIFT 0x3
+#define SDMA7_CONTEXT_REG_TYPE2__SDMA7_GFX_MIDCMD_DATA4__SHIFT 0x4
+#define SDMA7_CONTEXT_REG_TYPE2__SDMA7_GFX_MIDCMD_DATA5__SHIFT 0x5
+#define SDMA7_CONTEXT_REG_TYPE2__SDMA7_GFX_MIDCMD_DATA6__SHIFT 0x6
+#define SDMA7_CONTEXT_REG_TYPE2__SDMA7_GFX_MIDCMD_DATA7__SHIFT 0x7
+#define SDMA7_CONTEXT_REG_TYPE2__SDMA7_GFX_MIDCMD_DATA8__SHIFT 0x8
+#define SDMA7_CONTEXT_REG_TYPE2__SDMA7_GFX_MIDCMD_CNTL__SHIFT 0x9
+#define SDMA7_CONTEXT_REG_TYPE2__RESERVED__SHIFT 0xa
+#define SDMA7_CONTEXT_REG_TYPE2__SDMA7_GFX_MIDCMD_DATA0_MASK 0x00000001L
+#define SDMA7_CONTEXT_REG_TYPE2__SDMA7_GFX_MIDCMD_DATA1_MASK 0x00000002L
+#define SDMA7_CONTEXT_REG_TYPE2__SDMA7_GFX_MIDCMD_DATA2_MASK 0x00000004L
+#define SDMA7_CONTEXT_REG_TYPE2__SDMA7_GFX_MIDCMD_DATA3_MASK 0x00000008L
+#define SDMA7_CONTEXT_REG_TYPE2__SDMA7_GFX_MIDCMD_DATA4_MASK 0x00000010L
+#define SDMA7_CONTEXT_REG_TYPE2__SDMA7_GFX_MIDCMD_DATA5_MASK 0x00000020L
+#define SDMA7_CONTEXT_REG_TYPE2__SDMA7_GFX_MIDCMD_DATA6_MASK 0x00000040L
+#define SDMA7_CONTEXT_REG_TYPE2__SDMA7_GFX_MIDCMD_DATA7_MASK 0x00000080L
+#define SDMA7_CONTEXT_REG_TYPE2__SDMA7_GFX_MIDCMD_DATA8_MASK 0x00000100L
+#define SDMA7_CONTEXT_REG_TYPE2__SDMA7_GFX_MIDCMD_CNTL_MASK 0x00000200L
+#define SDMA7_CONTEXT_REG_TYPE2__RESERVED_MASK 0xFFFFFC00L
+//SDMA7_CONTEXT_REG_TYPE3
+#define SDMA7_CONTEXT_REG_TYPE3__RESERVED__SHIFT 0x0
+#define SDMA7_CONTEXT_REG_TYPE3__RESERVED_MASK 0xFFFFFFFFL
+//SDMA7_PUB_REG_TYPE0
+#define SDMA7_PUB_REG_TYPE0__SDMA7_UCODE_ADDR__SHIFT 0x0
+#define SDMA7_PUB_REG_TYPE0__SDMA7_UCODE_DATA__SHIFT 0x1
+#define SDMA7_PUB_REG_TYPE0__RESERVED3__SHIFT 0x3
+#define SDMA7_PUB_REG_TYPE0__SDMA7_VM_CNTL__SHIFT 0x4
+#define SDMA7_PUB_REG_TYPE0__SDMA7_VM_CTX_LO__SHIFT 0x5
+#define SDMA7_PUB_REG_TYPE0__SDMA7_VM_CTX_HI__SHIFT 0x6
+#define SDMA7_PUB_REG_TYPE0__SDMA7_ACTIVE_FCN_ID__SHIFT 0x7
+#define SDMA7_PUB_REG_TYPE0__SDMA7_VM_CTX_CNTL__SHIFT 0x8
+#define SDMA7_PUB_REG_TYPE0__SDMA7_VIRT_RESET_REQ__SHIFT 0x9
+#define SDMA7_PUB_REG_TYPE0__RESERVED10__SHIFT 0xa
+#define SDMA7_PUB_REG_TYPE0__SDMA7_CONTEXT_REG_TYPE0__SHIFT 0xb
+#define SDMA7_PUB_REG_TYPE0__SDMA7_CONTEXT_REG_TYPE1__SHIFT 0xc
+#define SDMA7_PUB_REG_TYPE0__SDMA7_CONTEXT_REG_TYPE2__SHIFT 0xd
+#define SDMA7_PUB_REG_TYPE0__SDMA7_CONTEXT_REG_TYPE3__SHIFT 0xe
+#define SDMA7_PUB_REG_TYPE0__SDMA7_PUB_REG_TYPE0__SHIFT 0xf
+#define SDMA7_PUB_REG_TYPE0__SDMA7_PUB_REG_TYPE1__SHIFT 0x10
+#define SDMA7_PUB_REG_TYPE0__SDMA7_PUB_REG_TYPE2__SHIFT 0x11
+#define SDMA7_PUB_REG_TYPE0__SDMA7_PUB_REG_TYPE3__SHIFT 0x12
+#define SDMA7_PUB_REG_TYPE0__SDMA7_MMHUB_CNTL__SHIFT 0x13
+#define SDMA7_PUB_REG_TYPE0__RESERVED_FOR_PSPSMU_ACCESS_ONLY__SHIFT 0x15
+#define SDMA7_PUB_REG_TYPE0__SDMA7_CONTEXT_GROUP_BOUNDARY__SHIFT 0x19
+#define SDMA7_PUB_REG_TYPE0__SDMA7_POWER_CNTL__SHIFT 0x1a
+#define SDMA7_PUB_REG_TYPE0__SDMA7_CLK_CTRL__SHIFT 0x1b
+#define SDMA7_PUB_REG_TYPE0__SDMA7_CNTL__SHIFT 0x1c
+#define SDMA7_PUB_REG_TYPE0__SDMA7_CHICKEN_BITS__SHIFT 0x1d
+#define SDMA7_PUB_REG_TYPE0__SDMA7_GB_ADDR_CONFIG__SHIFT 0x1e
+#define SDMA7_PUB_REG_TYPE0__SDMA7_GB_ADDR_CONFIG_READ__SHIFT 0x1f
+#define SDMA7_PUB_REG_TYPE0__SDMA7_UCODE_ADDR_MASK 0x00000001L
+#define SDMA7_PUB_REG_TYPE0__SDMA7_UCODE_DATA_MASK 0x00000002L
+#define SDMA7_PUB_REG_TYPE0__RESERVED3_MASK 0x00000008L
+#define SDMA7_PUB_REG_TYPE0__SDMA7_VM_CNTL_MASK 0x00000010L
+#define SDMA7_PUB_REG_TYPE0__SDMA7_VM_CTX_LO_MASK 0x00000020L
+#define SDMA7_PUB_REG_TYPE0__SDMA7_VM_CTX_HI_MASK 0x00000040L
+#define SDMA7_PUB_REG_TYPE0__SDMA7_ACTIVE_FCN_ID_MASK 0x00000080L
+#define SDMA7_PUB_REG_TYPE0__SDMA7_VM_CTX_CNTL_MASK 0x00000100L
+#define SDMA7_PUB_REG_TYPE0__SDMA7_VIRT_RESET_REQ_MASK 0x00000200L
+#define SDMA7_PUB_REG_TYPE0__RESERVED10_MASK 0x00000400L
+#define SDMA7_PUB_REG_TYPE0__SDMA7_CONTEXT_REG_TYPE0_MASK 0x00000800L
+#define SDMA7_PUB_REG_TYPE0__SDMA7_CONTEXT_REG_TYPE1_MASK 0x00001000L
+#define SDMA7_PUB_REG_TYPE0__SDMA7_CONTEXT_REG_TYPE2_MASK 0x00002000L
+#define SDMA7_PUB_REG_TYPE0__SDMA7_CONTEXT_REG_TYPE3_MASK 0x00004000L
+#define SDMA7_PUB_REG_TYPE0__SDMA7_PUB_REG_TYPE0_MASK 0x00008000L
+#define SDMA7_PUB_REG_TYPE0__SDMA7_PUB_REG_TYPE1_MASK 0x00010000L
+#define SDMA7_PUB_REG_TYPE0__SDMA7_PUB_REG_TYPE2_MASK 0x00020000L
+#define SDMA7_PUB_REG_TYPE0__SDMA7_PUB_REG_TYPE3_MASK 0x00040000L
+#define SDMA7_PUB_REG_TYPE0__SDMA7_MMHUB_CNTL_MASK 0x00080000L
+#define SDMA7_PUB_REG_TYPE0__RESERVED_FOR_PSPSMU_ACCESS_ONLY_MASK 0x01E00000L
+#define SDMA7_PUB_REG_TYPE0__SDMA7_CONTEXT_GROUP_BOUNDARY_MASK 0x02000000L
+#define SDMA7_PUB_REG_TYPE0__SDMA7_POWER_CNTL_MASK 0x04000000L
+#define SDMA7_PUB_REG_TYPE0__SDMA7_CLK_CTRL_MASK 0x08000000L
+#define SDMA7_PUB_REG_TYPE0__SDMA7_CNTL_MASK 0x10000000L
+#define SDMA7_PUB_REG_TYPE0__SDMA7_CHICKEN_BITS_MASK 0x20000000L
+#define SDMA7_PUB_REG_TYPE0__SDMA7_GB_ADDR_CONFIG_MASK 0x40000000L
+#define SDMA7_PUB_REG_TYPE0__SDMA7_GB_ADDR_CONFIG_READ_MASK 0x80000000L
+//SDMA7_PUB_REG_TYPE1
+#define SDMA7_PUB_REG_TYPE1__SDMA7_RB_RPTR_FETCH_HI__SHIFT 0x0
+#define SDMA7_PUB_REG_TYPE1__SDMA7_SEM_WAIT_FAIL_TIMER_CNTL__SHIFT 0x1
+#define SDMA7_PUB_REG_TYPE1__SDMA7_RB_RPTR_FETCH__SHIFT 0x2
+#define SDMA7_PUB_REG_TYPE1__SDMA7_IB_OFFSET_FETCH__SHIFT 0x3
+#define SDMA7_PUB_REG_TYPE1__SDMA7_PROGRAM__SHIFT 0x4
+#define SDMA7_PUB_REG_TYPE1__SDMA7_STATUS_REG__SHIFT 0x5
+#define SDMA7_PUB_REG_TYPE1__SDMA7_STATUS1_REG__SHIFT 0x6
+#define SDMA7_PUB_REG_TYPE1__SDMA7_RD_BURST_CNTL__SHIFT 0x7
+#define SDMA7_PUB_REG_TYPE1__SDMA7_HBM_PAGE_CONFIG__SHIFT 0x8
+#define SDMA7_PUB_REG_TYPE1__SDMA7_UCODE_CHECKSUM__SHIFT 0x9
+#define SDMA7_PUB_REG_TYPE1__SDMA7_F32_CNTL__SHIFT 0xa
+#define SDMA7_PUB_REG_TYPE1__SDMA7_FREEZE__SHIFT 0xb
+#define SDMA7_PUB_REG_TYPE1__SDMA7_PHASE0_QUANTUM__SHIFT 0xc
+#define SDMA7_PUB_REG_TYPE1__SDMA7_PHASE1_QUANTUM__SHIFT 0xd
+#define SDMA7_PUB_REG_TYPE1__SDMA_POWER_GATING__SHIFT 0xe
+#define SDMA7_PUB_REG_TYPE1__SDMA_PGFSM_CONFIG__SHIFT 0xf
+#define SDMA7_PUB_REG_TYPE1__SDMA_PGFSM_WRITE__SHIFT 0x10
+#define SDMA7_PUB_REG_TYPE1__SDMA_PGFSM_READ__SHIFT 0x11
+#define SDMA7_PUB_REG_TYPE1__SDMA7_EDC_CONFIG__SHIFT 0x12
+#define SDMA7_PUB_REG_TYPE1__SDMA7_BA_THRESHOLD__SHIFT 0x13
+#define SDMA7_PUB_REG_TYPE1__SDMA7_ID__SHIFT 0x14
+#define SDMA7_PUB_REG_TYPE1__SDMA7_VERSION__SHIFT 0x15
+#define SDMA7_PUB_REG_TYPE1__SDMA7_EDC_COUNTER__SHIFT 0x16
+#define SDMA7_PUB_REG_TYPE1__SDMA7_EDC_COUNTER_CLEAR__SHIFT 0x17
+#define SDMA7_PUB_REG_TYPE1__SDMA7_STATUS2_REG__SHIFT 0x18
+#define SDMA7_PUB_REG_TYPE1__SDMA7_ATOMIC_CNTL__SHIFT 0x19
+#define SDMA7_PUB_REG_TYPE1__SDMA7_ATOMIC_PREOP_LO__SHIFT 0x1a
+#define SDMA7_PUB_REG_TYPE1__SDMA7_ATOMIC_PREOP_HI__SHIFT 0x1b
+#define SDMA7_PUB_REG_TYPE1__SDMA7_UTCL1_CNTL__SHIFT 0x1c
+#define SDMA7_PUB_REG_TYPE1__SDMA7_UTCL1_WATERMK__SHIFT 0x1d
+#define SDMA7_PUB_REG_TYPE1__SDMA7_UTCL1_RD_STATUS__SHIFT 0x1e
+#define SDMA7_PUB_REG_TYPE1__SDMA7_UTCL1_WR_STATUS__SHIFT 0x1f
+#define SDMA7_PUB_REG_TYPE1__SDMA7_RB_RPTR_FETCH_HI_MASK 0x00000001L
+#define SDMA7_PUB_REG_TYPE1__SDMA7_SEM_WAIT_FAIL_TIMER_CNTL_MASK 0x00000002L
+#define SDMA7_PUB_REG_TYPE1__SDMA7_RB_RPTR_FETCH_MASK 0x00000004L
+#define SDMA7_PUB_REG_TYPE1__SDMA7_IB_OFFSET_FETCH_MASK 0x00000008L
+#define SDMA7_PUB_REG_TYPE1__SDMA7_PROGRAM_MASK 0x00000010L
+#define SDMA7_PUB_REG_TYPE1__SDMA7_STATUS_REG_MASK 0x00000020L
+#define SDMA7_PUB_REG_TYPE1__SDMA7_STATUS1_REG_MASK 0x00000040L
+#define SDMA7_PUB_REG_TYPE1__SDMA7_RD_BURST_CNTL_MASK 0x00000080L
+#define SDMA7_PUB_REG_TYPE1__SDMA7_HBM_PAGE_CONFIG_MASK 0x00000100L
+#define SDMA7_PUB_REG_TYPE1__SDMA7_UCODE_CHECKSUM_MASK 0x00000200L
+#define SDMA7_PUB_REG_TYPE1__SDMA7_F32_CNTL_MASK 0x00000400L
+#define SDMA7_PUB_REG_TYPE1__SDMA7_FREEZE_MASK 0x00000800L
+#define SDMA7_PUB_REG_TYPE1__SDMA7_PHASE0_QUANTUM_MASK 0x00001000L
+#define SDMA7_PUB_REG_TYPE1__SDMA7_PHASE1_QUANTUM_MASK 0x00002000L
+#define SDMA7_PUB_REG_TYPE1__SDMA_POWER_GATING_MASK 0x00004000L
+#define SDMA7_PUB_REG_TYPE1__SDMA_PGFSM_CONFIG_MASK 0x00008000L
+#define SDMA7_PUB_REG_TYPE1__SDMA_PGFSM_WRITE_MASK 0x00010000L
+#define SDMA7_PUB_REG_TYPE1__SDMA_PGFSM_READ_MASK 0x00020000L
+#define SDMA7_PUB_REG_TYPE1__SDMA7_EDC_CONFIG_MASK 0x00040000L
+#define SDMA7_PUB_REG_TYPE1__SDMA7_BA_THRESHOLD_MASK 0x00080000L
+#define SDMA7_PUB_REG_TYPE1__SDMA7_ID_MASK 0x00100000L
+#define SDMA7_PUB_REG_TYPE1__SDMA7_VERSION_MASK 0x00200000L
+#define SDMA7_PUB_REG_TYPE1__SDMA7_EDC_COUNTER_MASK 0x00400000L
+#define SDMA7_PUB_REG_TYPE1__SDMA7_EDC_COUNTER_CLEAR_MASK 0x00800000L
+#define SDMA7_PUB_REG_TYPE1__SDMA7_STATUS2_REG_MASK 0x01000000L
+#define SDMA7_PUB_REG_TYPE1__SDMA7_ATOMIC_CNTL_MASK 0x02000000L
+#define SDMA7_PUB_REG_TYPE1__SDMA7_ATOMIC_PREOP_LO_MASK 0x04000000L
+#define SDMA7_PUB_REG_TYPE1__SDMA7_ATOMIC_PREOP_HI_MASK 0x08000000L
+#define SDMA7_PUB_REG_TYPE1__SDMA7_UTCL1_CNTL_MASK 0x10000000L
+#define SDMA7_PUB_REG_TYPE1__SDMA7_UTCL1_WATERMK_MASK 0x20000000L
+#define SDMA7_PUB_REG_TYPE1__SDMA7_UTCL1_RD_STATUS_MASK 0x40000000L
+#define SDMA7_PUB_REG_TYPE1__SDMA7_UTCL1_WR_STATUS_MASK 0x80000000L
+//SDMA7_PUB_REG_TYPE2
+#define SDMA7_PUB_REG_TYPE2__SDMA7_UTCL1_INV0__SHIFT 0x0
+#define SDMA7_PUB_REG_TYPE2__SDMA7_UTCL1_INV1__SHIFT 0x1
+#define SDMA7_PUB_REG_TYPE2__SDMA7_UTCL1_INV2__SHIFT 0x2
+#define SDMA7_PUB_REG_TYPE2__SDMA7_UTCL1_RD_XNACK0__SHIFT 0x3
+#define SDMA7_PUB_REG_TYPE2__SDMA7_UTCL1_RD_XNACK1__SHIFT 0x4
+#define SDMA7_PUB_REG_TYPE2__SDMA7_UTCL1_WR_XNACK0__SHIFT 0x5
+#define SDMA7_PUB_REG_TYPE2__SDMA7_UTCL1_WR_XNACK1__SHIFT 0x6
+#define SDMA7_PUB_REG_TYPE2__SDMA7_UTCL1_TIMEOUT__SHIFT 0x7
+#define SDMA7_PUB_REG_TYPE2__SDMA7_UTCL1_PAGE__SHIFT 0x8
+#define SDMA7_PUB_REG_TYPE2__SDMA7_POWER_CNTL_IDLE__SHIFT 0x9
+#define SDMA7_PUB_REG_TYPE2__SDMA7_RELAX_ORDERING_LUT__SHIFT 0xa
+#define SDMA7_PUB_REG_TYPE2__SDMA7_CHICKEN_BITS_2__SHIFT 0xb
+#define SDMA7_PUB_REG_TYPE2__SDMA7_STATUS3_REG__SHIFT 0xc
+#define SDMA7_PUB_REG_TYPE2__SDMA7_PHYSICAL_ADDR_LO__SHIFT 0xd
+#define SDMA7_PUB_REG_TYPE2__SDMA7_PHYSICAL_ADDR_HI__SHIFT 0xe
+#define SDMA7_PUB_REG_TYPE2__SDMA7_PHASE2_QUANTUM__SHIFT 0xf
+#define SDMA7_PUB_REG_TYPE2__SDMA7_ERROR_LOG__SHIFT 0x10
+#define SDMA7_PUB_REG_TYPE2__SDMA7_PUB_DUMMY_REG0__SHIFT 0x11
+#define SDMA7_PUB_REG_TYPE2__SDMA7_PUB_DUMMY_REG1__SHIFT 0x12
+#define SDMA7_PUB_REG_TYPE2__SDMA7_PUB_DUMMY_REG2__SHIFT 0x13
+#define SDMA7_PUB_REG_TYPE2__SDMA7_PUB_DUMMY_REG3__SHIFT 0x14
+#define SDMA7_PUB_REG_TYPE2__SDMA7_F32_COUNTER__SHIFT 0x15
+#define SDMA7_PUB_REG_TYPE2__SDMA7_UNBREAKABLE__SHIFT 0x16
+#define SDMA7_PUB_REG_TYPE2__SDMA7_PERFMON_CNTL__SHIFT 0x17
+#define SDMA7_PUB_REG_TYPE2__SDMA7_PERFCOUNTER0_RESULT__SHIFT 0x18
+#define SDMA7_PUB_REG_TYPE2__SDMA7_PERFCOUNTER1_RESULT__SHIFT 0x19
+#define SDMA7_PUB_REG_TYPE2__SDMA7_PERFCOUNTER_TAG_DELAY_RANGE__SHIFT 0x1a
+#define SDMA7_PUB_REG_TYPE2__SDMA7_CRD_CNTL__SHIFT 0x1b
+#define SDMA7_PUB_REG_TYPE2__RESERVED28__SHIFT 0x1c
+#define SDMA7_PUB_REG_TYPE2__SDMA7_GPU_IOV_VIOLATION_LOG__SHIFT 0x1d
+#define SDMA7_PUB_REG_TYPE2__SDMA7_ULV_CNTL__SHIFT 0x1e
+#define SDMA7_PUB_REG_TYPE2__RESERVED__SHIFT 0x1f
+#define SDMA7_PUB_REG_TYPE2__SDMA7_UTCL1_INV0_MASK 0x00000001L
+#define SDMA7_PUB_REG_TYPE2__SDMA7_UTCL1_INV1_MASK 0x00000002L
+#define SDMA7_PUB_REG_TYPE2__SDMA7_UTCL1_INV2_MASK 0x00000004L
+#define SDMA7_PUB_REG_TYPE2__SDMA7_UTCL1_RD_XNACK0_MASK 0x00000008L
+#define SDMA7_PUB_REG_TYPE2__SDMA7_UTCL1_RD_XNACK1_MASK 0x00000010L
+#define SDMA7_PUB_REG_TYPE2__SDMA7_UTCL1_WR_XNACK0_MASK 0x00000020L
+#define SDMA7_PUB_REG_TYPE2__SDMA7_UTCL1_WR_XNACK1_MASK 0x00000040L
+#define SDMA7_PUB_REG_TYPE2__SDMA7_UTCL1_TIMEOUT_MASK 0x00000080L
+#define SDMA7_PUB_REG_TYPE2__SDMA7_UTCL1_PAGE_MASK 0x00000100L
+#define SDMA7_PUB_REG_TYPE2__SDMA7_POWER_CNTL_IDLE_MASK 0x00000200L
+#define SDMA7_PUB_REG_TYPE2__SDMA7_RELAX_ORDERING_LUT_MASK 0x00000400L
+#define SDMA7_PUB_REG_TYPE2__SDMA7_CHICKEN_BITS_2_MASK 0x00000800L
+#define SDMA7_PUB_REG_TYPE2__SDMA7_STATUS3_REG_MASK 0x00001000L
+#define SDMA7_PUB_REG_TYPE2__SDMA7_PHYSICAL_ADDR_LO_MASK 0x00002000L
+#define SDMA7_PUB_REG_TYPE2__SDMA7_PHYSICAL_ADDR_HI_MASK 0x00004000L
+#define SDMA7_PUB_REG_TYPE2__SDMA7_PHASE2_QUANTUM_MASK 0x00008000L
+#define SDMA7_PUB_REG_TYPE2__SDMA7_ERROR_LOG_MASK 0x00010000L
+#define SDMA7_PUB_REG_TYPE2__SDMA7_PUB_DUMMY_REG0_MASK 0x00020000L
+#define SDMA7_PUB_REG_TYPE2__SDMA7_PUB_DUMMY_REG1_MASK 0x00040000L
+#define SDMA7_PUB_REG_TYPE2__SDMA7_PUB_DUMMY_REG2_MASK 0x00080000L
+#define SDMA7_PUB_REG_TYPE2__SDMA7_PUB_DUMMY_REG3_MASK 0x00100000L
+#define SDMA7_PUB_REG_TYPE2__SDMA7_F32_COUNTER_MASK 0x00200000L
+#define SDMA7_PUB_REG_TYPE2__SDMA7_UNBREAKABLE_MASK 0x00400000L
+#define SDMA7_PUB_REG_TYPE2__SDMA7_PERFMON_CNTL_MASK 0x00800000L
+#define SDMA7_PUB_REG_TYPE2__SDMA7_PERFCOUNTER0_RESULT_MASK 0x01000000L
+#define SDMA7_PUB_REG_TYPE2__SDMA7_PERFCOUNTER1_RESULT_MASK 0x02000000L
+#define SDMA7_PUB_REG_TYPE2__SDMA7_PERFCOUNTER_TAG_DELAY_RANGE_MASK 0x04000000L
+#define SDMA7_PUB_REG_TYPE2__SDMA7_CRD_CNTL_MASK 0x08000000L
+#define SDMA7_PUB_REG_TYPE2__RESERVED28_MASK 0x10000000L
+#define SDMA7_PUB_REG_TYPE2__SDMA7_GPU_IOV_VIOLATION_LOG_MASK 0x20000000L
+#define SDMA7_PUB_REG_TYPE2__SDMA7_ULV_CNTL_MASK 0x40000000L
+#define SDMA7_PUB_REG_TYPE2__RESERVED_MASK 0x80000000L
+//SDMA7_PUB_REG_TYPE3
+#define SDMA7_PUB_REG_TYPE3__SDMA7_EA_DBIT_ADDR_DATA__SHIFT 0x0
+#define SDMA7_PUB_REG_TYPE3__SDMA7_EA_DBIT_ADDR_INDEX__SHIFT 0x1
+#define SDMA7_PUB_REG_TYPE3__SDMA7_GPU_IOV_VIOLATION_LOG2__SHIFT 0x2
+#define SDMA7_PUB_REG_TYPE3__RESERVED__SHIFT 0x3
+#define SDMA7_PUB_REG_TYPE3__SDMA7_EA_DBIT_ADDR_DATA_MASK 0x00000001L
+#define SDMA7_PUB_REG_TYPE3__SDMA7_EA_DBIT_ADDR_INDEX_MASK 0x00000002L
+#define SDMA7_PUB_REG_TYPE3__SDMA7_GPU_IOV_VIOLATION_LOG2_MASK 0x00000004L
+#define SDMA7_PUB_REG_TYPE3__RESERVED_MASK 0xFFFFFFF8L
+//SDMA7_MMHUB_CNTL
+#define SDMA7_MMHUB_CNTL__UNIT_ID__SHIFT 0x0
+#define SDMA7_MMHUB_CNTL__UNIT_ID_MASK 0x0000003FL
+//SDMA7_CONTEXT_GROUP_BOUNDARY
+#define SDMA7_CONTEXT_GROUP_BOUNDARY__RESERVED__SHIFT 0x0
+#define SDMA7_CONTEXT_GROUP_BOUNDARY__RESERVED_MASK 0xFFFFFFFFL
+//SDMA7_POWER_CNTL
+#define SDMA7_POWER_CNTL__MEM_POWER_OVERRIDE__SHIFT 0x8
+#define SDMA7_POWER_CNTL__MEM_POWER_LS_EN__SHIFT 0x9
+#define SDMA7_POWER_CNTL__MEM_POWER_DS_EN__SHIFT 0xa
+#define SDMA7_POWER_CNTL__MEM_POWER_SD_EN__SHIFT 0xb
+#define SDMA7_POWER_CNTL__MEM_POWER_DELAY__SHIFT 0xc
+#define SDMA7_POWER_CNTL__MEM_POWER_OVERRIDE_MASK 0x00000100L
+#define SDMA7_POWER_CNTL__MEM_POWER_LS_EN_MASK 0x00000200L
+#define SDMA7_POWER_CNTL__MEM_POWER_DS_EN_MASK 0x00000400L
+#define SDMA7_POWER_CNTL__MEM_POWER_SD_EN_MASK 0x00000800L
+#define SDMA7_POWER_CNTL__MEM_POWER_DELAY_MASK 0x003FF000L
+//SDMA7_CLK_CTRL
+#define SDMA7_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define SDMA7_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define SDMA7_CLK_CTRL__RESERVED__SHIFT 0xc
+#define SDMA7_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define SDMA7_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define SDMA7_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define SDMA7_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define SDMA7_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define SDMA7_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define SDMA7_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define SDMA7_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define SDMA7_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define SDMA7_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define SDMA7_CLK_CTRL__RESERVED_MASK 0x00FFF000L
+#define SDMA7_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define SDMA7_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define SDMA7_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define SDMA7_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define SDMA7_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define SDMA7_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define SDMA7_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define SDMA7_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//SDMA7_CNTL
+#define SDMA7_CNTL__TRAP_ENABLE__SHIFT 0x0
+#define SDMA7_CNTL__UTC_L1_ENABLE__SHIFT 0x1
+#define SDMA7_CNTL__SEM_WAIT_INT_ENABLE__SHIFT 0x2
+#define SDMA7_CNTL__DATA_SWAP_ENABLE__SHIFT 0x3
+#define SDMA7_CNTL__FENCE_SWAP_ENABLE__SHIFT 0x4
+#define SDMA7_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x5
+#define SDMA7_CNTL__MIDCMD_WORLDSWITCH_ENABLE__SHIFT 0x11
+#define SDMA7_CNTL__AUTO_CTXSW_ENABLE__SHIFT 0x12
+#define SDMA7_CNTL__CTXEMPTY_INT_ENABLE__SHIFT 0x1c
+#define SDMA7_CNTL__FROZEN_INT_ENABLE__SHIFT 0x1d
+#define SDMA7_CNTL__IB_PREEMPT_INT_ENABLE__SHIFT 0x1e
+#define SDMA7_CNTL__TRAP_ENABLE_MASK 0x00000001L
+#define SDMA7_CNTL__UTC_L1_ENABLE_MASK 0x00000002L
+#define SDMA7_CNTL__SEM_WAIT_INT_ENABLE_MASK 0x00000004L
+#define SDMA7_CNTL__DATA_SWAP_ENABLE_MASK 0x00000008L
+#define SDMA7_CNTL__FENCE_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA7_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00000020L
+#define SDMA7_CNTL__MIDCMD_WORLDSWITCH_ENABLE_MASK 0x00020000L
+#define SDMA7_CNTL__AUTO_CTXSW_ENABLE_MASK 0x00040000L
+#define SDMA7_CNTL__CTXEMPTY_INT_ENABLE_MASK 0x10000000L
+#define SDMA7_CNTL__FROZEN_INT_ENABLE_MASK 0x20000000L
+#define SDMA7_CNTL__IB_PREEMPT_INT_ENABLE_MASK 0x40000000L
+//SDMA7_CHICKEN_BITS
+#define SDMA7_CHICKEN_BITS__COPY_EFFICIENCY_ENABLE__SHIFT 0x0
+#define SDMA7_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE__SHIFT 0x1
+#define SDMA7_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE__SHIFT 0x2
+#define SDMA7_CHICKEN_BITS__WRITE_BURST_LENGTH__SHIFT 0x8
+#define SDMA7_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE__SHIFT 0xa
+#define SDMA7_CHICKEN_BITS__COPY_OVERLAP_ENABLE__SHIFT 0x10
+#define SDMA7_CHICKEN_BITS__RAW_CHECK_ENABLE__SHIFT 0x11
+#define SDMA7_CHICKEN_BITS__SRBM_POLL_RETRYING__SHIFT 0x14
+#define SDMA7_CHICKEN_BITS__CG_STATUS_OUTPUT__SHIFT 0x17
+#define SDMA7_CHICKEN_BITS__TIME_BASED_QOS__SHIFT 0x19
+#define SDMA7_CHICKEN_BITS__CE_AFIFO_WATERMARK__SHIFT 0x1a
+#define SDMA7_CHICKEN_BITS__CE_DFIFO_WATERMARK__SHIFT 0x1c
+#define SDMA7_CHICKEN_BITS__CE_LFIFO_WATERMARK__SHIFT 0x1e
+#define SDMA7_CHICKEN_BITS__COPY_EFFICIENCY_ENABLE_MASK 0x00000001L
+#define SDMA7_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE_MASK 0x00000002L
+#define SDMA7_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE_MASK 0x00000004L
+#define SDMA7_CHICKEN_BITS__WRITE_BURST_LENGTH_MASK 0x00000300L
+#define SDMA7_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE_MASK 0x00001C00L
+#define SDMA7_CHICKEN_BITS__COPY_OVERLAP_ENABLE_MASK 0x00010000L
+#define SDMA7_CHICKEN_BITS__RAW_CHECK_ENABLE_MASK 0x00020000L
+#define SDMA7_CHICKEN_BITS__SRBM_POLL_RETRYING_MASK 0x00100000L
+#define SDMA7_CHICKEN_BITS__CG_STATUS_OUTPUT_MASK 0x00800000L
+#define SDMA7_CHICKEN_BITS__TIME_BASED_QOS_MASK 0x02000000L
+#define SDMA7_CHICKEN_BITS__CE_AFIFO_WATERMARK_MASK 0x0C000000L
+#define SDMA7_CHICKEN_BITS__CE_DFIFO_WATERMARK_MASK 0x30000000L
+#define SDMA7_CHICKEN_BITS__CE_LFIFO_WATERMARK_MASK 0xC0000000L
+//SDMA7_GB_ADDR_CONFIG
+#define SDMA7_GB_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
+#define SDMA7_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
+#define SDMA7_GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE__SHIFT 0x8
+#define SDMA7_GB_ADDR_CONFIG__NUM_BANKS__SHIFT 0xc
+#define SDMA7_GB_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x13
+#define SDMA7_GB_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define SDMA7_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
+#define SDMA7_GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
+#define SDMA7_GB_ADDR_CONFIG__NUM_BANKS_MASK 0x00007000L
+#define SDMA7_GB_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00180000L
+//SDMA7_GB_ADDR_CONFIG_READ
+#define SDMA7_GB_ADDR_CONFIG_READ__NUM_PIPES__SHIFT 0x0
+#define SDMA7_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
+#define SDMA7_GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE__SHIFT 0x8
+#define SDMA7_GB_ADDR_CONFIG_READ__NUM_BANKS__SHIFT 0xc
+#define SDMA7_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES__SHIFT 0x13
+#define SDMA7_GB_ADDR_CONFIG_READ__NUM_PIPES_MASK 0x00000007L
+#define SDMA7_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
+#define SDMA7_GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
+#define SDMA7_GB_ADDR_CONFIG_READ__NUM_BANKS_MASK 0x00007000L
+#define SDMA7_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES_MASK 0x00180000L
+//SDMA7_RB_RPTR_FETCH_HI
+#define SDMA7_RB_RPTR_FETCH_HI__OFFSET__SHIFT 0x0
+#define SDMA7_RB_RPTR_FETCH_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA7_SEM_WAIT_FAIL_TIMER_CNTL
+#define SDMA7_SEM_WAIT_FAIL_TIMER_CNTL__TIMER__SHIFT 0x0
+#define SDMA7_SEM_WAIT_FAIL_TIMER_CNTL__TIMER_MASK 0xFFFFFFFFL
+//SDMA7_RB_RPTR_FETCH
+#define SDMA7_RB_RPTR_FETCH__OFFSET__SHIFT 0x2
+#define SDMA7_RB_RPTR_FETCH__OFFSET_MASK 0xFFFFFFFCL
+//SDMA7_IB_OFFSET_FETCH
+#define SDMA7_IB_OFFSET_FETCH__OFFSET__SHIFT 0x2
+#define SDMA7_IB_OFFSET_FETCH__OFFSET_MASK 0x003FFFFCL
+//SDMA7_PROGRAM
+#define SDMA7_PROGRAM__STREAM__SHIFT 0x0
+#define SDMA7_PROGRAM__STREAM_MASK 0xFFFFFFFFL
+//SDMA7_STATUS_REG
+#define SDMA7_STATUS_REG__IDLE__SHIFT 0x0
+#define SDMA7_STATUS_REG__REG_IDLE__SHIFT 0x1
+#define SDMA7_STATUS_REG__RB_EMPTY__SHIFT 0x2
+#define SDMA7_STATUS_REG__RB_FULL__SHIFT 0x3
+#define SDMA7_STATUS_REG__RB_CMD_IDLE__SHIFT 0x4
+#define SDMA7_STATUS_REG__RB_CMD_FULL__SHIFT 0x5
+#define SDMA7_STATUS_REG__IB_CMD_IDLE__SHIFT 0x6
+#define SDMA7_STATUS_REG__IB_CMD_FULL__SHIFT 0x7
+#define SDMA7_STATUS_REG__BLOCK_IDLE__SHIFT 0x8
+#define SDMA7_STATUS_REG__INSIDE_IB__SHIFT 0x9
+#define SDMA7_STATUS_REG__EX_IDLE__SHIFT 0xa
+#define SDMA7_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE__SHIFT 0xb
+#define SDMA7_STATUS_REG__PACKET_READY__SHIFT 0xc
+#define SDMA7_STATUS_REG__MC_WR_IDLE__SHIFT 0xd
+#define SDMA7_STATUS_REG__SRBM_IDLE__SHIFT 0xe
+#define SDMA7_STATUS_REG__CONTEXT_EMPTY__SHIFT 0xf
+#define SDMA7_STATUS_REG__DELTA_RPTR_FULL__SHIFT 0x10
+#define SDMA7_STATUS_REG__RB_MC_RREQ_IDLE__SHIFT 0x11
+#define SDMA7_STATUS_REG__IB_MC_RREQ_IDLE__SHIFT 0x12
+#define SDMA7_STATUS_REG__MC_RD_IDLE__SHIFT 0x13
+#define SDMA7_STATUS_REG__DELTA_RPTR_EMPTY__SHIFT 0x14
+#define SDMA7_STATUS_REG__MC_RD_RET_STALL__SHIFT 0x15
+#define SDMA7_STATUS_REG__MC_RD_NO_POLL_IDLE__SHIFT 0x16
+#define SDMA7_STATUS_REG__PREV_CMD_IDLE__SHIFT 0x19
+#define SDMA7_STATUS_REG__SEM_IDLE__SHIFT 0x1a
+#define SDMA7_STATUS_REG__SEM_REQ_STALL__SHIFT 0x1b
+#define SDMA7_STATUS_REG__SEM_RESP_STATE__SHIFT 0x1c
+#define SDMA7_STATUS_REG__INT_IDLE__SHIFT 0x1e
+#define SDMA7_STATUS_REG__INT_REQ_STALL__SHIFT 0x1f
+#define SDMA7_STATUS_REG__IDLE_MASK 0x00000001L
+#define SDMA7_STATUS_REG__REG_IDLE_MASK 0x00000002L
+#define SDMA7_STATUS_REG__RB_EMPTY_MASK 0x00000004L
+#define SDMA7_STATUS_REG__RB_FULL_MASK 0x00000008L
+#define SDMA7_STATUS_REG__RB_CMD_IDLE_MASK 0x00000010L
+#define SDMA7_STATUS_REG__RB_CMD_FULL_MASK 0x00000020L
+#define SDMA7_STATUS_REG__IB_CMD_IDLE_MASK 0x00000040L
+#define SDMA7_STATUS_REG__IB_CMD_FULL_MASK 0x00000080L
+#define SDMA7_STATUS_REG__BLOCK_IDLE_MASK 0x00000100L
+#define SDMA7_STATUS_REG__INSIDE_IB_MASK 0x00000200L
+#define SDMA7_STATUS_REG__EX_IDLE_MASK 0x00000400L
+#define SDMA7_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE_MASK 0x00000800L
+#define SDMA7_STATUS_REG__PACKET_READY_MASK 0x00001000L
+#define SDMA7_STATUS_REG__MC_WR_IDLE_MASK 0x00002000L
+#define SDMA7_STATUS_REG__SRBM_IDLE_MASK 0x00004000L
+#define SDMA7_STATUS_REG__CONTEXT_EMPTY_MASK 0x00008000L
+#define SDMA7_STATUS_REG__DELTA_RPTR_FULL_MASK 0x00010000L
+#define SDMA7_STATUS_REG__RB_MC_RREQ_IDLE_MASK 0x00020000L
+#define SDMA7_STATUS_REG__IB_MC_RREQ_IDLE_MASK 0x00040000L
+#define SDMA7_STATUS_REG__MC_RD_IDLE_MASK 0x00080000L
+#define SDMA7_STATUS_REG__DELTA_RPTR_EMPTY_MASK 0x00100000L
+#define SDMA7_STATUS_REG__MC_RD_RET_STALL_MASK 0x00200000L
+#define SDMA7_STATUS_REG__MC_RD_NO_POLL_IDLE_MASK 0x00400000L
+#define SDMA7_STATUS_REG__PREV_CMD_IDLE_MASK 0x02000000L
+#define SDMA7_STATUS_REG__SEM_IDLE_MASK 0x04000000L
+#define SDMA7_STATUS_REG__SEM_REQ_STALL_MASK 0x08000000L
+#define SDMA7_STATUS_REG__SEM_RESP_STATE_MASK 0x30000000L
+#define SDMA7_STATUS_REG__INT_IDLE_MASK 0x40000000L
+#define SDMA7_STATUS_REG__INT_REQ_STALL_MASK 0x80000000L
+//SDMA7_STATUS1_REG
+#define SDMA7_STATUS1_REG__CE_WREQ_IDLE__SHIFT 0x0
+#define SDMA7_STATUS1_REG__CE_WR_IDLE__SHIFT 0x1
+#define SDMA7_STATUS1_REG__CE_SPLIT_IDLE__SHIFT 0x2
+#define SDMA7_STATUS1_REG__CE_RREQ_IDLE__SHIFT 0x3
+#define SDMA7_STATUS1_REG__CE_OUT_IDLE__SHIFT 0x4
+#define SDMA7_STATUS1_REG__CE_IN_IDLE__SHIFT 0x5
+#define SDMA7_STATUS1_REG__CE_DST_IDLE__SHIFT 0x6
+#define SDMA7_STATUS1_REG__CE_CMD_IDLE__SHIFT 0x9
+#define SDMA7_STATUS1_REG__CE_AFIFO_FULL__SHIFT 0xa
+#define SDMA7_STATUS1_REG__CE_INFO_FULL__SHIFT 0xd
+#define SDMA7_STATUS1_REG__CE_INFO1_FULL__SHIFT 0xe
+#define SDMA7_STATUS1_REG__EX_START__SHIFT 0xf
+#define SDMA7_STATUS1_REG__CE_RD_STALL__SHIFT 0x11
+#define SDMA7_STATUS1_REG__CE_WR_STALL__SHIFT 0x12
+#define SDMA7_STATUS1_REG__CE_WREQ_IDLE_MASK 0x00000001L
+#define SDMA7_STATUS1_REG__CE_WR_IDLE_MASK 0x00000002L
+#define SDMA7_STATUS1_REG__CE_SPLIT_IDLE_MASK 0x00000004L
+#define SDMA7_STATUS1_REG__CE_RREQ_IDLE_MASK 0x00000008L
+#define SDMA7_STATUS1_REG__CE_OUT_IDLE_MASK 0x00000010L
+#define SDMA7_STATUS1_REG__CE_IN_IDLE_MASK 0x00000020L
+#define SDMA7_STATUS1_REG__CE_DST_IDLE_MASK 0x00000040L
+#define SDMA7_STATUS1_REG__CE_CMD_IDLE_MASK 0x00000200L
+#define SDMA7_STATUS1_REG__CE_AFIFO_FULL_MASK 0x00000400L
+#define SDMA7_STATUS1_REG__CE_INFO_FULL_MASK 0x00002000L
+#define SDMA7_STATUS1_REG__CE_INFO1_FULL_MASK 0x00004000L
+#define SDMA7_STATUS1_REG__EX_START_MASK 0x00008000L
+#define SDMA7_STATUS1_REG__CE_RD_STALL_MASK 0x00020000L
+#define SDMA7_STATUS1_REG__CE_WR_STALL_MASK 0x00040000L
+//SDMA7_RD_BURST_CNTL
+#define SDMA7_RD_BURST_CNTL__RD_BURST__SHIFT 0x0
+#define SDMA7_RD_BURST_CNTL__CMD_BUFFER_RD_BURST__SHIFT 0x2
+#define SDMA7_RD_BURST_CNTL__RD_BURST_MASK 0x00000003L
+#define SDMA7_RD_BURST_CNTL__CMD_BUFFER_RD_BURST_MASK 0x0000000CL
+//SDMA7_HBM_PAGE_CONFIG
+#define SDMA7_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT__SHIFT 0x0
+#define SDMA7_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT_MASK 0x00000001L
+//SDMA7_UCODE_CHECKSUM
+#define SDMA7_UCODE_CHECKSUM__DATA__SHIFT 0x0
+#define SDMA7_UCODE_CHECKSUM__DATA_MASK 0xFFFFFFFFL
+//SDMA7_F32_CNTL
+#define SDMA7_F32_CNTL__HALT__SHIFT 0x0
+#define SDMA7_F32_CNTL__STEP__SHIFT 0x1
+#define SDMA7_F32_CNTL__HALT_MASK 0x00000001L
+#define SDMA7_F32_CNTL__STEP_MASK 0x00000002L
+//SDMA7_FREEZE
+#define SDMA7_FREEZE__PREEMPT__SHIFT 0x0
+#define SDMA7_FREEZE__FREEZE__SHIFT 0x4
+#define SDMA7_FREEZE__FROZEN__SHIFT 0x5
+#define SDMA7_FREEZE__F32_FREEZE__SHIFT 0x6
+#define SDMA7_FREEZE__PREEMPT_MASK 0x00000001L
+#define SDMA7_FREEZE__FREEZE_MASK 0x00000010L
+#define SDMA7_FREEZE__FROZEN_MASK 0x00000020L
+#define SDMA7_FREEZE__F32_FREEZE_MASK 0x00000040L
+//SDMA7_PHASE0_QUANTUM
+#define SDMA7_PHASE0_QUANTUM__UNIT__SHIFT 0x0
+#define SDMA7_PHASE0_QUANTUM__VALUE__SHIFT 0x8
+#define SDMA7_PHASE0_QUANTUM__PREFER__SHIFT 0x1e
+#define SDMA7_PHASE0_QUANTUM__UNIT_MASK 0x0000000FL
+#define SDMA7_PHASE0_QUANTUM__VALUE_MASK 0x00FFFF00L
+#define SDMA7_PHASE0_QUANTUM__PREFER_MASK 0x40000000L
+//SDMA7_PHASE1_QUANTUM
+#define SDMA7_PHASE1_QUANTUM__UNIT__SHIFT 0x0
+#define SDMA7_PHASE1_QUANTUM__VALUE__SHIFT 0x8
+#define SDMA7_PHASE1_QUANTUM__PREFER__SHIFT 0x1e
+#define SDMA7_PHASE1_QUANTUM__UNIT_MASK 0x0000000FL
+#define SDMA7_PHASE1_QUANTUM__VALUE_MASK 0x00FFFF00L
+#define SDMA7_PHASE1_QUANTUM__PREFER_MASK 0x40000000L
+//SDMA7_EDC_CONFIG
+#define SDMA7_EDC_CONFIG__DIS_EDC__SHIFT 0x1
+#define SDMA7_EDC_CONFIG__ECC_INT_ENABLE__SHIFT 0x2
+#define SDMA7_EDC_CONFIG__DIS_EDC_MASK 0x00000002L
+#define SDMA7_EDC_CONFIG__ECC_INT_ENABLE_MASK 0x00000004L
+//SDMA7_BA_THRESHOLD
+#define SDMA7_BA_THRESHOLD__READ_THRES__SHIFT 0x0
+#define SDMA7_BA_THRESHOLD__WRITE_THRES__SHIFT 0x10
+#define SDMA7_BA_THRESHOLD__READ_THRES_MASK 0x000003FFL
+#define SDMA7_BA_THRESHOLD__WRITE_THRES_MASK 0x03FF0000L
+//SDMA7_ID
+#define SDMA7_ID__DEVICE_ID__SHIFT 0x0
+#define SDMA7_ID__DEVICE_ID_MASK 0x000000FFL
+//SDMA7_VERSION
+#define SDMA7_VERSION__MINVER__SHIFT 0x0
+#define SDMA7_VERSION__MAJVER__SHIFT 0x8
+#define SDMA7_VERSION__REV__SHIFT 0x10
+#define SDMA7_VERSION__MINVER_MASK 0x0000007FL
+#define SDMA7_VERSION__MAJVER_MASK 0x00007F00L
+#define SDMA7_VERSION__REV_MASK 0x003F0000L
+//SDMA7_EDC_COUNTER
+#define SDMA7_EDC_COUNTER__SDMA_UCODE_BUF_SED__SHIFT 0x0
+#define SDMA7_EDC_COUNTER__SDMA_RB_CMD_BUF_SED__SHIFT 0x2
+#define SDMA7_EDC_COUNTER__SDMA_IB_CMD_BUF_SED__SHIFT 0x3
+#define SDMA7_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED__SHIFT 0x4
+#define SDMA7_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED__SHIFT 0x5
+#define SDMA7_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED__SHIFT 0x6
+#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED__SHIFT 0x7
+#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED__SHIFT 0x8
+#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED__SHIFT 0x9
+#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED__SHIFT 0xa
+#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED__SHIFT 0xb
+#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED__SHIFT 0xc
+#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED__SHIFT 0xd
+#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED__SHIFT 0xe
+#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF8_SED__SHIFT 0xf
+#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF9_SED__SHIFT 0x10
+#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF10_SED__SHIFT 0x11
+#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF11_SED__SHIFT 0x12
+#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF12_SED__SHIFT 0x13
+#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF13_SED__SHIFT 0x14
+#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF14_SED__SHIFT 0x15
+#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF15_SED__SHIFT 0x16
+#define SDMA7_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED__SHIFT 0x17
+#define SDMA7_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED__SHIFT 0x18
+#define SDMA7_EDC_COUNTER__SDMA_UCODE_BUF_SED_MASK 0x00000001L
+#define SDMA7_EDC_COUNTER__SDMA_RB_CMD_BUF_SED_MASK 0x00000004L
+#define SDMA7_EDC_COUNTER__SDMA_IB_CMD_BUF_SED_MASK 0x00000008L
+#define SDMA7_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED_MASK 0x00000010L
+#define SDMA7_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED_MASK 0x00000020L
+#define SDMA7_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED_MASK 0x00000040L
+#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED_MASK 0x00000080L
+#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED_MASK 0x00000100L
+#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED_MASK 0x00000200L
+#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED_MASK 0x00000400L
+#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED_MASK 0x00000800L
+#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED_MASK 0x00001000L
+#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED_MASK 0x00002000L
+#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED_MASK 0x00004000L
+#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF8_SED_MASK 0x00008000L
+#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF9_SED_MASK 0x00010000L
+#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF10_SED_MASK 0x00020000L
+#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF11_SED_MASK 0x00040000L
+#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF12_SED_MASK 0x00080000L
+#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF13_SED_MASK 0x00100000L
+#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF14_SED_MASK 0x00200000L
+#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF15_SED_MASK 0x00400000L
+#define SDMA7_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED_MASK 0x00800000L
+#define SDMA7_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED_MASK 0x01000000L
+//SDMA7_EDC_COUNTER_CLEAR
+#define SDMA7_EDC_COUNTER_CLEAR__DUMMY__SHIFT 0x0
+#define SDMA7_EDC_COUNTER_CLEAR__DUMMY_MASK 0x00000001L
+//SDMA7_STATUS2_REG
+#define SDMA7_STATUS2_REG__ID__SHIFT 0x0
+#define SDMA7_STATUS2_REG__F32_INSTR_PTR__SHIFT 0x3
+#define SDMA7_STATUS2_REG__CMD_OP__SHIFT 0x10
+#define SDMA7_STATUS2_REG__ID_MASK 0x00000007L
+#define SDMA7_STATUS2_REG__F32_INSTR_PTR_MASK 0x0000FFF8L
+#define SDMA7_STATUS2_REG__CMD_OP_MASK 0xFFFF0000L
+//SDMA7_ATOMIC_CNTL
+#define SDMA7_ATOMIC_CNTL__LOOP_TIMER__SHIFT 0x0
+#define SDMA7_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE__SHIFT 0x1f
+#define SDMA7_ATOMIC_CNTL__LOOP_TIMER_MASK 0x7FFFFFFFL
+#define SDMA7_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE_MASK 0x80000000L
+//SDMA7_ATOMIC_PREOP_LO
+#define SDMA7_ATOMIC_PREOP_LO__DATA__SHIFT 0x0
+#define SDMA7_ATOMIC_PREOP_LO__DATA_MASK 0xFFFFFFFFL
+//SDMA7_ATOMIC_PREOP_HI
+#define SDMA7_ATOMIC_PREOP_HI__DATA__SHIFT 0x0
+#define SDMA7_ATOMIC_PREOP_HI__DATA_MASK 0xFFFFFFFFL
+//SDMA7_UTCL1_CNTL
+#define SDMA7_UTCL1_CNTL__REDO_ENABLE__SHIFT 0x0
+#define SDMA7_UTCL1_CNTL__REDO_DELAY__SHIFT 0x1
+#define SDMA7_UTCL1_CNTL__REDO_WATERMK__SHIFT 0xb
+#define SDMA7_UTCL1_CNTL__INVACK_DELAY__SHIFT 0xe
+#define SDMA7_UTCL1_CNTL__REQL2_CREDIT__SHIFT 0x18
+#define SDMA7_UTCL1_CNTL__VADDR_WATERMK__SHIFT 0x1d
+#define SDMA7_UTCL1_CNTL__REDO_ENABLE_MASK 0x00000001L
+#define SDMA7_UTCL1_CNTL__REDO_DELAY_MASK 0x000007FEL
+#define SDMA7_UTCL1_CNTL__REDO_WATERMK_MASK 0x00003800L
+#define SDMA7_UTCL1_CNTL__INVACK_DELAY_MASK 0x00FFC000L
+#define SDMA7_UTCL1_CNTL__REQL2_CREDIT_MASK 0x1F000000L
+#define SDMA7_UTCL1_CNTL__VADDR_WATERMK_MASK 0xE0000000L
+//SDMA7_UTCL1_WATERMK
+#define SDMA7_UTCL1_WATERMK__REQMC_WATERMK__SHIFT 0x0
+#define SDMA7_UTCL1_WATERMK__REQPG_WATERMK__SHIFT 0x9
+#define SDMA7_UTCL1_WATERMK__INVREQ_WATERMK__SHIFT 0x11
+#define SDMA7_UTCL1_WATERMK__XNACK_WATERMK__SHIFT 0x19
+#define SDMA7_UTCL1_WATERMK__REQMC_WATERMK_MASK 0x000001FFL
+#define SDMA7_UTCL1_WATERMK__REQPG_WATERMK_MASK 0x0001FE00L
+#define SDMA7_UTCL1_WATERMK__INVREQ_WATERMK_MASK 0x01FE0000L
+#define SDMA7_UTCL1_WATERMK__XNACK_WATERMK_MASK 0xFE000000L
+//SDMA7_UTCL1_RD_STATUS
+#define SDMA7_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0
+#define SDMA7_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1
+#define SDMA7_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2
+#define SDMA7_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3
+#define SDMA7_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4
+#define SDMA7_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5
+#define SDMA7_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6
+#define SDMA7_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7
+#define SDMA7_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8
+#define SDMA7_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9
+#define SDMA7_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa
+#define SDMA7_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb
+#define SDMA7_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc
+#define SDMA7_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd
+#define SDMA7_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe
+#define SDMA7_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf
+#define SDMA7_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10
+#define SDMA7_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11
+#define SDMA7_UTCL1_RD_STATUS__PAGE_FAULT__SHIFT 0x12
+#define SDMA7_UTCL1_RD_STATUS__PAGE_NULL__SHIFT 0x13
+#define SDMA7_UTCL1_RD_STATUS__REQL2_IDLE__SHIFT 0x14
+#define SDMA7_UTCL1_RD_STATUS__CE_L1_STALL__SHIFT 0x15
+#define SDMA7_UTCL1_RD_STATUS__NEXT_RD_VECTOR__SHIFT 0x16
+#define SDMA7_UTCL1_RD_STATUS__MERGE_STATE__SHIFT 0x1a
+#define SDMA7_UTCL1_RD_STATUS__ADDR_RD_RTR__SHIFT 0x1d
+#define SDMA7_UTCL1_RD_STATUS__WPTR_POLLING__SHIFT 0x1e
+#define SDMA7_UTCL1_RD_STATUS__INVREQ_SIZE__SHIFT 0x1f
+#define SDMA7_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L
+#define SDMA7_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L
+#define SDMA7_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L
+#define SDMA7_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L
+#define SDMA7_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L
+#define SDMA7_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L
+#define SDMA7_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L
+#define SDMA7_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L
+#define SDMA7_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L
+#define SDMA7_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L
+#define SDMA7_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L
+#define SDMA7_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L
+#define SDMA7_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L
+#define SDMA7_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L
+#define SDMA7_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L
+#define SDMA7_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L
+#define SDMA7_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L
+#define SDMA7_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L
+#define SDMA7_UTCL1_RD_STATUS__PAGE_FAULT_MASK 0x00040000L
+#define SDMA7_UTCL1_RD_STATUS__PAGE_NULL_MASK 0x00080000L
+#define SDMA7_UTCL1_RD_STATUS__REQL2_IDLE_MASK 0x00100000L
+#define SDMA7_UTCL1_RD_STATUS__CE_L1_STALL_MASK 0x00200000L
+#define SDMA7_UTCL1_RD_STATUS__NEXT_RD_VECTOR_MASK 0x03C00000L
+#define SDMA7_UTCL1_RD_STATUS__MERGE_STATE_MASK 0x1C000000L
+#define SDMA7_UTCL1_RD_STATUS__ADDR_RD_RTR_MASK 0x20000000L
+#define SDMA7_UTCL1_RD_STATUS__WPTR_POLLING_MASK 0x40000000L
+#define SDMA7_UTCL1_RD_STATUS__INVREQ_SIZE_MASK 0x80000000L
+//SDMA7_UTCL1_WR_STATUS
+#define SDMA7_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0
+#define SDMA7_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1
+#define SDMA7_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2
+#define SDMA7_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3
+#define SDMA7_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4
+#define SDMA7_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5
+#define SDMA7_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6
+#define SDMA7_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7
+#define SDMA7_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8
+#define SDMA7_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9
+#define SDMA7_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa
+#define SDMA7_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb
+#define SDMA7_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc
+#define SDMA7_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd
+#define SDMA7_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe
+#define SDMA7_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf
+#define SDMA7_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10
+#define SDMA7_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11
+#define SDMA7_UTCL1_WR_STATUS__PAGE_FAULT__SHIFT 0x12
+#define SDMA7_UTCL1_WR_STATUS__PAGE_NULL__SHIFT 0x13
+#define SDMA7_UTCL1_WR_STATUS__REQL2_IDLE__SHIFT 0x14
+#define SDMA7_UTCL1_WR_STATUS__F32_WR_RTR__SHIFT 0x15
+#define SDMA7_UTCL1_WR_STATUS__NEXT_WR_VECTOR__SHIFT 0x16
+#define SDMA7_UTCL1_WR_STATUS__MERGE_STATE__SHIFT 0x19
+#define SDMA7_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY__SHIFT 0x1c
+#define SDMA7_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL__SHIFT 0x1d
+#define SDMA7_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY__SHIFT 0x1e
+#define SDMA7_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL__SHIFT 0x1f
+#define SDMA7_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L
+#define SDMA7_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L
+#define SDMA7_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L
+#define SDMA7_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L
+#define SDMA7_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L
+#define SDMA7_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L
+#define SDMA7_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L
+#define SDMA7_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L
+#define SDMA7_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L
+#define SDMA7_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L
+#define SDMA7_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L
+#define SDMA7_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L
+#define SDMA7_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L
+#define SDMA7_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L
+#define SDMA7_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L
+#define SDMA7_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L
+#define SDMA7_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L
+#define SDMA7_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L
+#define SDMA7_UTCL1_WR_STATUS__PAGE_FAULT_MASK 0x00040000L
+#define SDMA7_UTCL1_WR_STATUS__PAGE_NULL_MASK 0x00080000L
+#define SDMA7_UTCL1_WR_STATUS__REQL2_IDLE_MASK 0x00100000L
+#define SDMA7_UTCL1_WR_STATUS__F32_WR_RTR_MASK 0x00200000L
+#define SDMA7_UTCL1_WR_STATUS__NEXT_WR_VECTOR_MASK 0x01C00000L
+#define SDMA7_UTCL1_WR_STATUS__MERGE_STATE_MASK 0x0E000000L
+#define SDMA7_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY_MASK 0x10000000L
+#define SDMA7_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL_MASK 0x20000000L
+#define SDMA7_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY_MASK 0x40000000L
+#define SDMA7_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL_MASK 0x80000000L
+//SDMA7_UTCL1_INV0
+#define SDMA7_UTCL1_INV0__INV_MIDDLE__SHIFT 0x0
+#define SDMA7_UTCL1_INV0__RD_TIMEOUT__SHIFT 0x1
+#define SDMA7_UTCL1_INV0__WR_TIMEOUT__SHIFT 0x2
+#define SDMA7_UTCL1_INV0__RD_IN_INVADR__SHIFT 0x3
+#define SDMA7_UTCL1_INV0__WR_IN_INVADR__SHIFT 0x4
+#define SDMA7_UTCL1_INV0__PAGE_NULL_SW__SHIFT 0x5
+#define SDMA7_UTCL1_INV0__XNACK_IS_INVADR__SHIFT 0x6
+#define SDMA7_UTCL1_INV0__INVREQ_ENABLE__SHIFT 0x7
+#define SDMA7_UTCL1_INV0__NACK_TIMEOUT_SW__SHIFT 0x8
+#define SDMA7_UTCL1_INV0__NFLUSH_INV_IDLE__SHIFT 0x9
+#define SDMA7_UTCL1_INV0__FLUSH_INV_IDLE__SHIFT 0xa
+#define SDMA7_UTCL1_INV0__INV_FLUSHTYPE__SHIFT 0xb
+#define SDMA7_UTCL1_INV0__INV_VMID_VEC__SHIFT 0xc
+#define SDMA7_UTCL1_INV0__INV_ADDR_HI__SHIFT 0x1c
+#define SDMA7_UTCL1_INV0__INV_MIDDLE_MASK 0x00000001L
+#define SDMA7_UTCL1_INV0__RD_TIMEOUT_MASK 0x00000002L
+#define SDMA7_UTCL1_INV0__WR_TIMEOUT_MASK 0x00000004L
+#define SDMA7_UTCL1_INV0__RD_IN_INVADR_MASK 0x00000008L
+#define SDMA7_UTCL1_INV0__WR_IN_INVADR_MASK 0x00000010L
+#define SDMA7_UTCL1_INV0__PAGE_NULL_SW_MASK 0x00000020L
+#define SDMA7_UTCL1_INV0__XNACK_IS_INVADR_MASK 0x00000040L
+#define SDMA7_UTCL1_INV0__INVREQ_ENABLE_MASK 0x00000080L
+#define SDMA7_UTCL1_INV0__NACK_TIMEOUT_SW_MASK 0x00000100L
+#define SDMA7_UTCL1_INV0__NFLUSH_INV_IDLE_MASK 0x00000200L
+#define SDMA7_UTCL1_INV0__FLUSH_INV_IDLE_MASK 0x00000400L
+#define SDMA7_UTCL1_INV0__INV_FLUSHTYPE_MASK 0x00000800L
+#define SDMA7_UTCL1_INV0__INV_VMID_VEC_MASK 0x0FFFF000L
+#define SDMA7_UTCL1_INV0__INV_ADDR_HI_MASK 0xF0000000L
+//SDMA7_UTCL1_INV1
+#define SDMA7_UTCL1_INV1__INV_ADDR_LO__SHIFT 0x0
+#define SDMA7_UTCL1_INV1__INV_ADDR_LO_MASK 0xFFFFFFFFL
+//SDMA7_UTCL1_INV2
+#define SDMA7_UTCL1_INV2__INV_NFLUSH_VMID_VEC__SHIFT 0x0
+#define SDMA7_UTCL1_INV2__INV_NFLUSH_VMID_VEC_MASK 0xFFFFFFFFL
+//SDMA7_UTCL1_RD_XNACK0
+#define SDMA7_UTCL1_RD_XNACK0__XNACK_ADDR_LO__SHIFT 0x0
+#define SDMA7_UTCL1_RD_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL
+//SDMA7_UTCL1_RD_XNACK1
+#define SDMA7_UTCL1_RD_XNACK1__XNACK_ADDR_HI__SHIFT 0x0
+#define SDMA7_UTCL1_RD_XNACK1__XNACK_VMID__SHIFT 0x4
+#define SDMA7_UTCL1_RD_XNACK1__XNACK_VECTOR__SHIFT 0x8
+#define SDMA7_UTCL1_RD_XNACK1__IS_XNACK__SHIFT 0x1a
+#define SDMA7_UTCL1_RD_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL
+#define SDMA7_UTCL1_RD_XNACK1__XNACK_VMID_MASK 0x000000F0L
+#define SDMA7_UTCL1_RD_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L
+#define SDMA7_UTCL1_RD_XNACK1__IS_XNACK_MASK 0x0C000000L
+//SDMA7_UTCL1_WR_XNACK0
+#define SDMA7_UTCL1_WR_XNACK0__XNACK_ADDR_LO__SHIFT 0x0
+#define SDMA7_UTCL1_WR_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL
+//SDMA7_UTCL1_WR_XNACK1
+#define SDMA7_UTCL1_WR_XNACK1__XNACK_ADDR_HI__SHIFT 0x0
+#define SDMA7_UTCL1_WR_XNACK1__XNACK_VMID__SHIFT 0x4
+#define SDMA7_UTCL1_WR_XNACK1__XNACK_VECTOR__SHIFT 0x8
+#define SDMA7_UTCL1_WR_XNACK1__IS_XNACK__SHIFT 0x1a
+#define SDMA7_UTCL1_WR_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL
+#define SDMA7_UTCL1_WR_XNACK1__XNACK_VMID_MASK 0x000000F0L
+#define SDMA7_UTCL1_WR_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L
+#define SDMA7_UTCL1_WR_XNACK1__IS_XNACK_MASK 0x0C000000L
+//SDMA7_UTCL1_TIMEOUT
+#define SDMA7_UTCL1_TIMEOUT__RD_XNACK_LIMIT__SHIFT 0x0
+#define SDMA7_UTCL1_TIMEOUT__WR_XNACK_LIMIT__SHIFT 0x10
+#define SDMA7_UTCL1_TIMEOUT__RD_XNACK_LIMIT_MASK 0x0000FFFFL
+#define SDMA7_UTCL1_TIMEOUT__WR_XNACK_LIMIT_MASK 0xFFFF0000L
+//SDMA7_UTCL1_PAGE
+#define SDMA7_UTCL1_PAGE__VM_HOLE__SHIFT 0x0
+#define SDMA7_UTCL1_PAGE__REQ_TYPE__SHIFT 0x1
+#define SDMA7_UTCL1_PAGE__USE_MTYPE__SHIFT 0x6
+#define SDMA7_UTCL1_PAGE__USE_PT_SNOOP__SHIFT 0x9
+#define SDMA7_UTCL1_PAGE__VM_HOLE_MASK 0x00000001L
+#define SDMA7_UTCL1_PAGE__REQ_TYPE_MASK 0x0000001EL
+#define SDMA7_UTCL1_PAGE__USE_MTYPE_MASK 0x000001C0L
+#define SDMA7_UTCL1_PAGE__USE_PT_SNOOP_MASK 0x00000200L
+//SDMA7_POWER_CNTL_IDLE
+#define SDMA7_POWER_CNTL_IDLE__DELAY0__SHIFT 0x0
+#define SDMA7_POWER_CNTL_IDLE__DELAY1__SHIFT 0x10
+#define SDMA7_POWER_CNTL_IDLE__DELAY2__SHIFT 0x18
+#define SDMA7_POWER_CNTL_IDLE__DELAY0_MASK 0x0000FFFFL
+#define SDMA7_POWER_CNTL_IDLE__DELAY1_MASK 0x00FF0000L
+#define SDMA7_POWER_CNTL_IDLE__DELAY2_MASK 0xFF000000L
+//SDMA7_RELAX_ORDERING_LUT
+#define SDMA7_RELAX_ORDERING_LUT__RESERVED0__SHIFT 0x0
+#define SDMA7_RELAX_ORDERING_LUT__COPY__SHIFT 0x1
+#define SDMA7_RELAX_ORDERING_LUT__WRITE__SHIFT 0x2
+#define SDMA7_RELAX_ORDERING_LUT__RESERVED3__SHIFT 0x3
+#define SDMA7_RELAX_ORDERING_LUT__RESERVED4__SHIFT 0x4
+#define SDMA7_RELAX_ORDERING_LUT__FENCE__SHIFT 0x5
+#define SDMA7_RELAX_ORDERING_LUT__RESERVED76__SHIFT 0x6
+#define SDMA7_RELAX_ORDERING_LUT__POLL_MEM__SHIFT 0x8
+#define SDMA7_RELAX_ORDERING_LUT__COND_EXE__SHIFT 0x9
+#define SDMA7_RELAX_ORDERING_LUT__ATOMIC__SHIFT 0xa
+#define SDMA7_RELAX_ORDERING_LUT__CONST_FILL__SHIFT 0xb
+#define SDMA7_RELAX_ORDERING_LUT__PTEPDE__SHIFT 0xc
+#define SDMA7_RELAX_ORDERING_LUT__TIMESTAMP__SHIFT 0xd
+#define SDMA7_RELAX_ORDERING_LUT__RESERVED__SHIFT 0xe
+#define SDMA7_RELAX_ORDERING_LUT__WORLD_SWITCH__SHIFT 0x1b
+#define SDMA7_RELAX_ORDERING_LUT__RPTR_WRB__SHIFT 0x1c
+#define SDMA7_RELAX_ORDERING_LUT__WPTR_POLL__SHIFT 0x1d
+#define SDMA7_RELAX_ORDERING_LUT__IB_FETCH__SHIFT 0x1e
+#define SDMA7_RELAX_ORDERING_LUT__RB_FETCH__SHIFT 0x1f
+#define SDMA7_RELAX_ORDERING_LUT__RESERVED0_MASK 0x00000001L
+#define SDMA7_RELAX_ORDERING_LUT__COPY_MASK 0x00000002L
+#define SDMA7_RELAX_ORDERING_LUT__WRITE_MASK 0x00000004L
+#define SDMA7_RELAX_ORDERING_LUT__RESERVED3_MASK 0x00000008L
+#define SDMA7_RELAX_ORDERING_LUT__RESERVED4_MASK 0x00000010L
+#define SDMA7_RELAX_ORDERING_LUT__FENCE_MASK 0x00000020L
+#define SDMA7_RELAX_ORDERING_LUT__RESERVED76_MASK 0x000000C0L
+#define SDMA7_RELAX_ORDERING_LUT__POLL_MEM_MASK 0x00000100L
+#define SDMA7_RELAX_ORDERING_LUT__COND_EXE_MASK 0x00000200L
+#define SDMA7_RELAX_ORDERING_LUT__ATOMIC_MASK 0x00000400L
+#define SDMA7_RELAX_ORDERING_LUT__CONST_FILL_MASK 0x00000800L
+#define SDMA7_RELAX_ORDERING_LUT__PTEPDE_MASK 0x00001000L
+#define SDMA7_RELAX_ORDERING_LUT__TIMESTAMP_MASK 0x00002000L
+#define SDMA7_RELAX_ORDERING_LUT__RESERVED_MASK 0x07FFC000L
+#define SDMA7_RELAX_ORDERING_LUT__WORLD_SWITCH_MASK 0x08000000L
+#define SDMA7_RELAX_ORDERING_LUT__RPTR_WRB_MASK 0x10000000L
+#define SDMA7_RELAX_ORDERING_LUT__WPTR_POLL_MASK 0x20000000L
+#define SDMA7_RELAX_ORDERING_LUT__IB_FETCH_MASK 0x40000000L
+#define SDMA7_RELAX_ORDERING_LUT__RB_FETCH_MASK 0x80000000L
+//SDMA7_CHICKEN_BITS_2
+#define SDMA7_CHICKEN_BITS_2__F32_CMD_PROC_DELAY__SHIFT 0x0
+#define SDMA7_CHICKEN_BITS_2__F32_CMD_PROC_DELAY_MASK 0x0000000FL
+//SDMA7_STATUS3_REG
+#define SDMA7_STATUS3_REG__CMD_OP_STATUS__SHIFT 0x0
+#define SDMA7_STATUS3_REG__PREV_VM_CMD__SHIFT 0x10
+#define SDMA7_STATUS3_REG__EXCEPTION_IDLE__SHIFT 0x14
+#define SDMA7_STATUS3_REG__QUEUE_ID_MATCH__SHIFT 0x15
+#define SDMA7_STATUS3_REG__INT_QUEUE_ID__SHIFT 0x16
+#define SDMA7_STATUS3_REG__CMD_OP_STATUS_MASK 0x0000FFFFL
+#define SDMA7_STATUS3_REG__PREV_VM_CMD_MASK 0x000F0000L
+#define SDMA7_STATUS3_REG__EXCEPTION_IDLE_MASK 0x00100000L
+#define SDMA7_STATUS3_REG__QUEUE_ID_MATCH_MASK 0x00200000L
+#define SDMA7_STATUS3_REG__INT_QUEUE_ID_MASK 0x03C00000L
+//SDMA7_PHYSICAL_ADDR_LO
+#define SDMA7_PHYSICAL_ADDR_LO__D_VALID__SHIFT 0x0
+#define SDMA7_PHYSICAL_ADDR_LO__DIRTY__SHIFT 0x1
+#define SDMA7_PHYSICAL_ADDR_LO__PHY_VALID__SHIFT 0x2
+#define SDMA7_PHYSICAL_ADDR_LO__ADDR__SHIFT 0xc
+#define SDMA7_PHYSICAL_ADDR_LO__D_VALID_MASK 0x00000001L
+#define SDMA7_PHYSICAL_ADDR_LO__DIRTY_MASK 0x00000002L
+#define SDMA7_PHYSICAL_ADDR_LO__PHY_VALID_MASK 0x00000004L
+#define SDMA7_PHYSICAL_ADDR_LO__ADDR_MASK 0xFFFFF000L
+//SDMA7_PHYSICAL_ADDR_HI
+#define SDMA7_PHYSICAL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA7_PHYSICAL_ADDR_HI__ADDR_MASK 0x0000FFFFL
+//SDMA7_PHASE2_QUANTUM
+#define SDMA7_PHASE2_QUANTUM__UNIT__SHIFT 0x0
+#define SDMA7_PHASE2_QUANTUM__VALUE__SHIFT 0x8
+#define SDMA7_PHASE2_QUANTUM__PREFER__SHIFT 0x1e
+#define SDMA7_PHASE2_QUANTUM__UNIT_MASK 0x0000000FL
+#define SDMA7_PHASE2_QUANTUM__VALUE_MASK 0x00FFFF00L
+#define SDMA7_PHASE2_QUANTUM__PREFER_MASK 0x40000000L
+//SDMA7_ERROR_LOG
+#define SDMA7_ERROR_LOG__OVERRIDE__SHIFT 0x0
+#define SDMA7_ERROR_LOG__STATUS__SHIFT 0x10
+#define SDMA7_ERROR_LOG__OVERRIDE_MASK 0x0000FFFFL
+#define SDMA7_ERROR_LOG__STATUS_MASK 0xFFFF0000L
+//SDMA7_PUB_DUMMY_REG0
+#define SDMA7_PUB_DUMMY_REG0__VALUE__SHIFT 0x0
+#define SDMA7_PUB_DUMMY_REG0__VALUE_MASK 0xFFFFFFFFL
+//SDMA7_PUB_DUMMY_REG1
+#define SDMA7_PUB_DUMMY_REG1__VALUE__SHIFT 0x0
+#define SDMA7_PUB_DUMMY_REG1__VALUE_MASK 0xFFFFFFFFL
+//SDMA7_PUB_DUMMY_REG2
+#define SDMA7_PUB_DUMMY_REG2__VALUE__SHIFT 0x0
+#define SDMA7_PUB_DUMMY_REG2__VALUE_MASK 0xFFFFFFFFL
+//SDMA7_PUB_DUMMY_REG3
+#define SDMA7_PUB_DUMMY_REG3__VALUE__SHIFT 0x0
+#define SDMA7_PUB_DUMMY_REG3__VALUE_MASK 0xFFFFFFFFL
+//SDMA7_F32_COUNTER
+#define SDMA7_F32_COUNTER__VALUE__SHIFT 0x0
+#define SDMA7_F32_COUNTER__VALUE_MASK 0xFFFFFFFFL
+//SDMA7_UNBREAKABLE
+#define SDMA7_UNBREAKABLE__VALUE__SHIFT 0x0
+#define SDMA7_UNBREAKABLE__VALUE_MASK 0x00000001L
+//SDMA7_PERFMON_CNTL
+#define SDMA7_PERFMON_CNTL__PERF_ENABLE0__SHIFT 0x0
+#define SDMA7_PERFMON_CNTL__PERF_CLEAR0__SHIFT 0x1
+#define SDMA7_PERFMON_CNTL__PERF_SEL0__SHIFT 0x2
+#define SDMA7_PERFMON_CNTL__PERF_ENABLE1__SHIFT 0xa
+#define SDMA7_PERFMON_CNTL__PERF_CLEAR1__SHIFT 0xb
+#define SDMA7_PERFMON_CNTL__PERF_SEL1__SHIFT 0xc
+#define SDMA7_PERFMON_CNTL__PERF_ENABLE0_MASK 0x00000001L
+#define SDMA7_PERFMON_CNTL__PERF_CLEAR0_MASK 0x00000002L
+#define SDMA7_PERFMON_CNTL__PERF_SEL0_MASK 0x000003FCL
+#define SDMA7_PERFMON_CNTL__PERF_ENABLE1_MASK 0x00000400L
+#define SDMA7_PERFMON_CNTL__PERF_CLEAR1_MASK 0x00000800L
+#define SDMA7_PERFMON_CNTL__PERF_SEL1_MASK 0x000FF000L
+//SDMA7_PERFCOUNTER0_RESULT
+#define SDMA7_PERFCOUNTER0_RESULT__PERF_COUNT__SHIFT 0x0
+#define SDMA7_PERFCOUNTER0_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL
+//SDMA7_PERFCOUNTER1_RESULT
+#define SDMA7_PERFCOUNTER1_RESULT__PERF_COUNT__SHIFT 0x0
+#define SDMA7_PERFCOUNTER1_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL
+//SDMA7_PERFCOUNTER_TAG_DELAY_RANGE
+#define SDMA7_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_LOW__SHIFT 0x0
+#define SDMA7_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_HIGH__SHIFT 0xe
+#define SDMA7_PERFCOUNTER_TAG_DELAY_RANGE__SELECT_RW__SHIFT 0x1c
+#define SDMA7_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_LOW_MASK 0x00003FFFL
+#define SDMA7_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_HIGH_MASK 0x0FFFC000L
+#define SDMA7_PERFCOUNTER_TAG_DELAY_RANGE__SELECT_RW_MASK 0x10000000L
+//SDMA7_CRD_CNTL
+#define SDMA7_CRD_CNTL__MC_WRREQ_CREDIT__SHIFT 0x7
+#define SDMA7_CRD_CNTL__MC_RDREQ_CREDIT__SHIFT 0xd
+#define SDMA7_CRD_CNTL__MC_WRREQ_CREDIT_MASK 0x00001F80L
+#define SDMA7_CRD_CNTL__MC_RDREQ_CREDIT_MASK 0x0007E000L
+//SDMA7_GPU_IOV_VIOLATION_LOG
+#define SDMA7_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0
+#define SDMA7_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS__SHIFT 0x1
+#define SDMA7_GPU_IOV_VIOLATION_LOG__ADDRESS__SHIFT 0x2
+#define SDMA7_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION__SHIFT 0x14
+#define SDMA7_GPU_IOV_VIOLATION_LOG__VF__SHIFT 0x15
+#define SDMA7_GPU_IOV_VIOLATION_LOG__VFID__SHIFT 0x16
+#define SDMA7_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L
+#define SDMA7_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS_MASK 0x00000002L
+#define SDMA7_GPU_IOV_VIOLATION_LOG__ADDRESS_MASK 0x000FFFFCL
+#define SDMA7_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION_MASK 0x00100000L
+#define SDMA7_GPU_IOV_VIOLATION_LOG__VF_MASK 0x00200000L
+#define SDMA7_GPU_IOV_VIOLATION_LOG__VFID_MASK 0x03C00000L
+//SDMA7_ULV_CNTL
+#define SDMA7_ULV_CNTL__HYSTERESIS__SHIFT 0x0
+#define SDMA7_ULV_CNTL__ENTER_ULV_INT_CLR__SHIFT 0x1b
+#define SDMA7_ULV_CNTL__EXIT_ULV_INT_CLR__SHIFT 0x1c
+#define SDMA7_ULV_CNTL__ENTER_ULV_INT__SHIFT 0x1d
+#define SDMA7_ULV_CNTL__EXIT_ULV_INT__SHIFT 0x1e
+#define SDMA7_ULV_CNTL__ULV_STATUS__SHIFT 0x1f
+#define SDMA7_ULV_CNTL__HYSTERESIS_MASK 0x0000001FL
+#define SDMA7_ULV_CNTL__ENTER_ULV_INT_CLR_MASK 0x08000000L
+#define SDMA7_ULV_CNTL__EXIT_ULV_INT_CLR_MASK 0x10000000L
+#define SDMA7_ULV_CNTL__ENTER_ULV_INT_MASK 0x20000000L
+#define SDMA7_ULV_CNTL__EXIT_ULV_INT_MASK 0x40000000L
+#define SDMA7_ULV_CNTL__ULV_STATUS_MASK 0x80000000L
+//SDMA7_EA_DBIT_ADDR_DATA
+#define SDMA7_EA_DBIT_ADDR_DATA__VALUE__SHIFT 0x0
+#define SDMA7_EA_DBIT_ADDR_DATA__VALUE_MASK 0xFFFFFFFFL
+//SDMA7_EA_DBIT_ADDR_INDEX
+#define SDMA7_EA_DBIT_ADDR_INDEX__VALUE__SHIFT 0x0
+#define SDMA7_EA_DBIT_ADDR_INDEX__VALUE_MASK 0x00000007L
+//SDMA7_GPU_IOV_VIOLATION_LOG2
+#define SDMA7_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID__SHIFT 0x0
+#define SDMA7_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID_MASK 0x000000FFL
+//SDMA7_GFX_RB_CNTL
+#define SDMA7_GFX_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA7_GFX_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA7_GFX_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA7_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA7_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA7_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA7_GFX_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA7_GFX_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA7_GFX_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA7_GFX_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA7_GFX_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA7_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA7_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA7_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA7_GFX_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA7_GFX_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA7_GFX_RB_BASE
+#define SDMA7_GFX_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA7_GFX_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA7_GFX_RB_BASE_HI
+#define SDMA7_GFX_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA7_GFX_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA7_GFX_RB_RPTR
+#define SDMA7_GFX_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA7_GFX_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA7_GFX_RB_RPTR_HI
+#define SDMA7_GFX_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA7_GFX_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA7_GFX_RB_WPTR
+#define SDMA7_GFX_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA7_GFX_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA7_GFX_RB_WPTR_HI
+#define SDMA7_GFX_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA7_GFX_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA7_GFX_RB_WPTR_POLL_CNTL
+#define SDMA7_GFX_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA7_GFX_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA7_GFX_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA7_GFX_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA7_GFX_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA7_GFX_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA7_GFX_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA7_GFX_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA7_GFX_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA7_GFX_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA7_GFX_RB_RPTR_ADDR_HI
+#define SDMA7_GFX_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA7_GFX_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA7_GFX_RB_RPTR_ADDR_LO
+#define SDMA7_GFX_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA7_GFX_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA7_GFX_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA7_GFX_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA7_GFX_IB_CNTL
+#define SDMA7_GFX_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA7_GFX_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA7_GFX_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA7_GFX_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA7_GFX_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA7_GFX_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA7_GFX_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA7_GFX_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA7_GFX_IB_RPTR
+#define SDMA7_GFX_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA7_GFX_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA7_GFX_IB_OFFSET
+#define SDMA7_GFX_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA7_GFX_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA7_GFX_IB_BASE_LO
+#define SDMA7_GFX_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA7_GFX_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA7_GFX_IB_BASE_HI
+#define SDMA7_GFX_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA7_GFX_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA7_GFX_IB_SIZE
+#define SDMA7_GFX_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA7_GFX_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA7_GFX_SKIP_CNTL
+#define SDMA7_GFX_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA7_GFX_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA7_GFX_CONTEXT_STATUS
+#define SDMA7_GFX_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA7_GFX_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA7_GFX_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA7_GFX_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA7_GFX_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA7_GFX_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA7_GFX_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA7_GFX_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA7_GFX_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA7_GFX_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA7_GFX_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA7_GFX_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA7_GFX_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA7_GFX_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA7_GFX_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA7_GFX_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA7_GFX_DOORBELL
+#define SDMA7_GFX_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA7_GFX_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA7_GFX_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA7_GFX_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA7_GFX_CONTEXT_CNTL
+#define SDMA7_GFX_CONTEXT_CNTL__RESUME_CTX__SHIFT 0x10
+#define SDMA7_GFX_CONTEXT_CNTL__RESUME_CTX_MASK 0x00010000L
+//SDMA7_GFX_STATUS
+#define SDMA7_GFX_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA7_GFX_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA7_GFX_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA7_GFX_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA7_GFX_DOORBELL_LOG
+#define SDMA7_GFX_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA7_GFX_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA7_GFX_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA7_GFX_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA7_GFX_WATERMARK
+#define SDMA7_GFX_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA7_GFX_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA7_GFX_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA7_GFX_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA7_GFX_DOORBELL_OFFSET
+#define SDMA7_GFX_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA7_GFX_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA7_GFX_CSA_ADDR_LO
+#define SDMA7_GFX_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA7_GFX_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA7_GFX_CSA_ADDR_HI
+#define SDMA7_GFX_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA7_GFX_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA7_GFX_IB_SUB_REMAIN
+#define SDMA7_GFX_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA7_GFX_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA7_GFX_PREEMPT
+#define SDMA7_GFX_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA7_GFX_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA7_GFX_DUMMY_REG
+#define SDMA7_GFX_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA7_GFX_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA7_GFX_RB_WPTR_POLL_ADDR_HI
+#define SDMA7_GFX_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA7_GFX_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA7_GFX_RB_WPTR_POLL_ADDR_LO
+#define SDMA7_GFX_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA7_GFX_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA7_GFX_RB_AQL_CNTL
+#define SDMA7_GFX_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA7_GFX_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA7_GFX_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA7_GFX_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA7_GFX_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA7_GFX_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA7_GFX_MINOR_PTR_UPDATE
+#define SDMA7_GFX_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA7_GFX_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA7_GFX_MIDCMD_DATA0
+#define SDMA7_GFX_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA7_GFX_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA7_GFX_MIDCMD_DATA1
+#define SDMA7_GFX_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA7_GFX_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA7_GFX_MIDCMD_DATA2
+#define SDMA7_GFX_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA7_GFX_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA7_GFX_MIDCMD_DATA3
+#define SDMA7_GFX_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA7_GFX_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA7_GFX_MIDCMD_DATA4
+#define SDMA7_GFX_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA7_GFX_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA7_GFX_MIDCMD_DATA5
+#define SDMA7_GFX_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA7_GFX_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA7_GFX_MIDCMD_DATA6
+#define SDMA7_GFX_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA7_GFX_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA7_GFX_MIDCMD_DATA7
+#define SDMA7_GFX_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA7_GFX_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA7_GFX_MIDCMD_DATA8
+#define SDMA7_GFX_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA7_GFX_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA7_GFX_MIDCMD_CNTL
+#define SDMA7_GFX_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA7_GFX_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA7_GFX_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA7_GFX_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA7_GFX_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA7_GFX_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA7_GFX_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA7_GFX_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA7_PAGE_RB_CNTL
+#define SDMA7_PAGE_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA7_PAGE_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA7_PAGE_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA7_PAGE_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA7_PAGE_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA7_PAGE_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA7_PAGE_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA7_PAGE_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA7_PAGE_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA7_PAGE_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA7_PAGE_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA7_PAGE_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA7_PAGE_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA7_PAGE_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA7_PAGE_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA7_PAGE_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA7_PAGE_RB_BASE
+#define SDMA7_PAGE_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA7_PAGE_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA7_PAGE_RB_BASE_HI
+#define SDMA7_PAGE_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA7_PAGE_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA7_PAGE_RB_RPTR
+#define SDMA7_PAGE_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA7_PAGE_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA7_PAGE_RB_RPTR_HI
+#define SDMA7_PAGE_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA7_PAGE_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA7_PAGE_RB_WPTR
+#define SDMA7_PAGE_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA7_PAGE_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA7_PAGE_RB_WPTR_HI
+#define SDMA7_PAGE_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA7_PAGE_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA7_PAGE_RB_WPTR_POLL_CNTL
+#define SDMA7_PAGE_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA7_PAGE_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA7_PAGE_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA7_PAGE_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA7_PAGE_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA7_PAGE_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA7_PAGE_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA7_PAGE_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA7_PAGE_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA7_PAGE_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA7_PAGE_RB_RPTR_ADDR_HI
+#define SDMA7_PAGE_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA7_PAGE_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA7_PAGE_RB_RPTR_ADDR_LO
+#define SDMA7_PAGE_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA7_PAGE_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA7_PAGE_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA7_PAGE_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA7_PAGE_IB_CNTL
+#define SDMA7_PAGE_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA7_PAGE_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA7_PAGE_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA7_PAGE_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA7_PAGE_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA7_PAGE_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA7_PAGE_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA7_PAGE_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA7_PAGE_IB_RPTR
+#define SDMA7_PAGE_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA7_PAGE_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA7_PAGE_IB_OFFSET
+#define SDMA7_PAGE_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA7_PAGE_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA7_PAGE_IB_BASE_LO
+#define SDMA7_PAGE_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA7_PAGE_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA7_PAGE_IB_BASE_HI
+#define SDMA7_PAGE_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA7_PAGE_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA7_PAGE_IB_SIZE
+#define SDMA7_PAGE_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA7_PAGE_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA7_PAGE_SKIP_CNTL
+#define SDMA7_PAGE_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA7_PAGE_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA7_PAGE_CONTEXT_STATUS
+#define SDMA7_PAGE_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA7_PAGE_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA7_PAGE_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA7_PAGE_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA7_PAGE_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA7_PAGE_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA7_PAGE_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA7_PAGE_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA7_PAGE_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA7_PAGE_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA7_PAGE_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA7_PAGE_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA7_PAGE_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA7_PAGE_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA7_PAGE_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA7_PAGE_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA7_PAGE_DOORBELL
+#define SDMA7_PAGE_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA7_PAGE_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA7_PAGE_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA7_PAGE_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA7_PAGE_STATUS
+#define SDMA7_PAGE_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA7_PAGE_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA7_PAGE_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA7_PAGE_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA7_PAGE_DOORBELL_LOG
+#define SDMA7_PAGE_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA7_PAGE_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA7_PAGE_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA7_PAGE_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA7_PAGE_WATERMARK
+#define SDMA7_PAGE_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA7_PAGE_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA7_PAGE_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA7_PAGE_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA7_PAGE_DOORBELL_OFFSET
+#define SDMA7_PAGE_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA7_PAGE_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA7_PAGE_CSA_ADDR_LO
+#define SDMA7_PAGE_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA7_PAGE_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA7_PAGE_CSA_ADDR_HI
+#define SDMA7_PAGE_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA7_PAGE_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA7_PAGE_IB_SUB_REMAIN
+#define SDMA7_PAGE_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA7_PAGE_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA7_PAGE_PREEMPT
+#define SDMA7_PAGE_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA7_PAGE_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA7_PAGE_DUMMY_REG
+#define SDMA7_PAGE_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA7_PAGE_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA7_PAGE_RB_WPTR_POLL_ADDR_HI
+#define SDMA7_PAGE_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA7_PAGE_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA7_PAGE_RB_WPTR_POLL_ADDR_LO
+#define SDMA7_PAGE_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA7_PAGE_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA7_PAGE_RB_AQL_CNTL
+#define SDMA7_PAGE_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA7_PAGE_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA7_PAGE_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA7_PAGE_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA7_PAGE_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA7_PAGE_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA7_PAGE_MINOR_PTR_UPDATE
+#define SDMA7_PAGE_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA7_PAGE_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA7_PAGE_MIDCMD_DATA0
+#define SDMA7_PAGE_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA7_PAGE_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA7_PAGE_MIDCMD_DATA1
+#define SDMA7_PAGE_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA7_PAGE_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA7_PAGE_MIDCMD_DATA2
+#define SDMA7_PAGE_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA7_PAGE_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA7_PAGE_MIDCMD_DATA3
+#define SDMA7_PAGE_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA7_PAGE_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA7_PAGE_MIDCMD_DATA4
+#define SDMA7_PAGE_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA7_PAGE_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA7_PAGE_MIDCMD_DATA5
+#define SDMA7_PAGE_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA7_PAGE_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA7_PAGE_MIDCMD_DATA6
+#define SDMA7_PAGE_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA7_PAGE_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA7_PAGE_MIDCMD_DATA7
+#define SDMA7_PAGE_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA7_PAGE_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA7_PAGE_MIDCMD_DATA8
+#define SDMA7_PAGE_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA7_PAGE_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA7_PAGE_MIDCMD_CNTL
+#define SDMA7_PAGE_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA7_PAGE_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA7_PAGE_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA7_PAGE_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA7_PAGE_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA7_PAGE_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA7_PAGE_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA7_PAGE_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA7_RLC0_RB_CNTL
+#define SDMA7_RLC0_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA7_RLC0_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA7_RLC0_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA7_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA7_RLC0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA7_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA7_RLC0_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA7_RLC0_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA7_RLC0_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA7_RLC0_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA7_RLC0_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA7_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA7_RLC0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA7_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA7_RLC0_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA7_RLC0_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA7_RLC0_RB_BASE
+#define SDMA7_RLC0_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA7_RLC0_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA7_RLC0_RB_BASE_HI
+#define SDMA7_RLC0_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA7_RLC0_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA7_RLC0_RB_RPTR
+#define SDMA7_RLC0_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA7_RLC0_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA7_RLC0_RB_RPTR_HI
+#define SDMA7_RLC0_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA7_RLC0_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA7_RLC0_RB_WPTR
+#define SDMA7_RLC0_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA7_RLC0_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA7_RLC0_RB_WPTR_HI
+#define SDMA7_RLC0_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA7_RLC0_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA7_RLC0_RB_WPTR_POLL_CNTL
+#define SDMA7_RLC0_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA7_RLC0_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA7_RLC0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA7_RLC0_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA7_RLC0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA7_RLC0_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA7_RLC0_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA7_RLC0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA7_RLC0_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA7_RLC0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA7_RLC0_RB_RPTR_ADDR_HI
+#define SDMA7_RLC0_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA7_RLC0_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA7_RLC0_RB_RPTR_ADDR_LO
+#define SDMA7_RLC0_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA7_RLC0_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA7_RLC0_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA7_RLC0_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA7_RLC0_IB_CNTL
+#define SDMA7_RLC0_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA7_RLC0_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA7_RLC0_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA7_RLC0_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA7_RLC0_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA7_RLC0_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA7_RLC0_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA7_RLC0_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA7_RLC0_IB_RPTR
+#define SDMA7_RLC0_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA7_RLC0_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA7_RLC0_IB_OFFSET
+#define SDMA7_RLC0_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA7_RLC0_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA7_RLC0_IB_BASE_LO
+#define SDMA7_RLC0_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA7_RLC0_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA7_RLC0_IB_BASE_HI
+#define SDMA7_RLC0_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA7_RLC0_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA7_RLC0_IB_SIZE
+#define SDMA7_RLC0_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA7_RLC0_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA7_RLC0_SKIP_CNTL
+#define SDMA7_RLC0_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA7_RLC0_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA7_RLC0_CONTEXT_STATUS
+#define SDMA7_RLC0_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA7_RLC0_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA7_RLC0_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA7_RLC0_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA7_RLC0_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA7_RLC0_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA7_RLC0_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA7_RLC0_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA7_RLC0_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA7_RLC0_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA7_RLC0_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA7_RLC0_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA7_RLC0_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA7_RLC0_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA7_RLC0_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA7_RLC0_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA7_RLC0_DOORBELL
+#define SDMA7_RLC0_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA7_RLC0_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA7_RLC0_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA7_RLC0_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA7_RLC0_STATUS
+#define SDMA7_RLC0_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA7_RLC0_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA7_RLC0_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA7_RLC0_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA7_RLC0_DOORBELL_LOG
+#define SDMA7_RLC0_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA7_RLC0_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA7_RLC0_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA7_RLC0_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA7_RLC0_WATERMARK
+#define SDMA7_RLC0_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA7_RLC0_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA7_RLC0_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA7_RLC0_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA7_RLC0_DOORBELL_OFFSET
+#define SDMA7_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA7_RLC0_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA7_RLC0_CSA_ADDR_LO
+#define SDMA7_RLC0_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA7_RLC0_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA7_RLC0_CSA_ADDR_HI
+#define SDMA7_RLC0_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA7_RLC0_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA7_RLC0_IB_SUB_REMAIN
+#define SDMA7_RLC0_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA7_RLC0_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA7_RLC0_PREEMPT
+#define SDMA7_RLC0_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA7_RLC0_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA7_RLC0_DUMMY_REG
+#define SDMA7_RLC0_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA7_RLC0_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA7_RLC0_RB_WPTR_POLL_ADDR_HI
+#define SDMA7_RLC0_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA7_RLC0_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA7_RLC0_RB_WPTR_POLL_ADDR_LO
+#define SDMA7_RLC0_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA7_RLC0_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA7_RLC0_RB_AQL_CNTL
+#define SDMA7_RLC0_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA7_RLC0_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA7_RLC0_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA7_RLC0_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA7_RLC0_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA7_RLC0_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA7_RLC0_MINOR_PTR_UPDATE
+#define SDMA7_RLC0_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA7_RLC0_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA7_RLC0_MIDCMD_DATA0
+#define SDMA7_RLC0_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA7_RLC0_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA7_RLC0_MIDCMD_DATA1
+#define SDMA7_RLC0_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA7_RLC0_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA7_RLC0_MIDCMD_DATA2
+#define SDMA7_RLC0_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA7_RLC0_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA7_RLC0_MIDCMD_DATA3
+#define SDMA7_RLC0_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA7_RLC0_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA7_RLC0_MIDCMD_DATA4
+#define SDMA7_RLC0_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA7_RLC0_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA7_RLC0_MIDCMD_DATA5
+#define SDMA7_RLC0_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA7_RLC0_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA7_RLC0_MIDCMD_DATA6
+#define SDMA7_RLC0_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA7_RLC0_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA7_RLC0_MIDCMD_DATA7
+#define SDMA7_RLC0_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA7_RLC0_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA7_RLC0_MIDCMD_DATA8
+#define SDMA7_RLC0_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA7_RLC0_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA7_RLC0_MIDCMD_CNTL
+#define SDMA7_RLC0_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA7_RLC0_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA7_RLC0_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA7_RLC0_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA7_RLC0_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA7_RLC0_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA7_RLC0_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA7_RLC0_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA7_RLC1_RB_CNTL
+#define SDMA7_RLC1_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA7_RLC1_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA7_RLC1_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA7_RLC1_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA7_RLC1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA7_RLC1_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA7_RLC1_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA7_RLC1_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA7_RLC1_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA7_RLC1_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA7_RLC1_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA7_RLC1_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA7_RLC1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA7_RLC1_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA7_RLC1_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA7_RLC1_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA7_RLC1_RB_BASE
+#define SDMA7_RLC1_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA7_RLC1_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA7_RLC1_RB_BASE_HI
+#define SDMA7_RLC1_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA7_RLC1_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA7_RLC1_RB_RPTR
+#define SDMA7_RLC1_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA7_RLC1_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA7_RLC1_RB_RPTR_HI
+#define SDMA7_RLC1_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA7_RLC1_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA7_RLC1_RB_WPTR
+#define SDMA7_RLC1_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA7_RLC1_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA7_RLC1_RB_WPTR_HI
+#define SDMA7_RLC1_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA7_RLC1_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA7_RLC1_RB_WPTR_POLL_CNTL
+#define SDMA7_RLC1_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA7_RLC1_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA7_RLC1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA7_RLC1_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA7_RLC1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA7_RLC1_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA7_RLC1_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA7_RLC1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA7_RLC1_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA7_RLC1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA7_RLC1_RB_RPTR_ADDR_HI
+#define SDMA7_RLC1_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA7_RLC1_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA7_RLC1_RB_RPTR_ADDR_LO
+#define SDMA7_RLC1_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA7_RLC1_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA7_RLC1_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA7_RLC1_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA7_RLC1_IB_CNTL
+#define SDMA7_RLC1_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA7_RLC1_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA7_RLC1_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA7_RLC1_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA7_RLC1_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA7_RLC1_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA7_RLC1_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA7_RLC1_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA7_RLC1_IB_RPTR
+#define SDMA7_RLC1_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA7_RLC1_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA7_RLC1_IB_OFFSET
+#define SDMA7_RLC1_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA7_RLC1_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA7_RLC1_IB_BASE_LO
+#define SDMA7_RLC1_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA7_RLC1_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA7_RLC1_IB_BASE_HI
+#define SDMA7_RLC1_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA7_RLC1_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA7_RLC1_IB_SIZE
+#define SDMA7_RLC1_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA7_RLC1_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA7_RLC1_SKIP_CNTL
+#define SDMA7_RLC1_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA7_RLC1_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA7_RLC1_CONTEXT_STATUS
+#define SDMA7_RLC1_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA7_RLC1_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA7_RLC1_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA7_RLC1_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA7_RLC1_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA7_RLC1_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA7_RLC1_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA7_RLC1_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA7_RLC1_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA7_RLC1_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA7_RLC1_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA7_RLC1_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA7_RLC1_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA7_RLC1_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA7_RLC1_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA7_RLC1_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA7_RLC1_DOORBELL
+#define SDMA7_RLC1_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA7_RLC1_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA7_RLC1_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA7_RLC1_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA7_RLC1_STATUS
+#define SDMA7_RLC1_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA7_RLC1_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA7_RLC1_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA7_RLC1_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA7_RLC1_DOORBELL_LOG
+#define SDMA7_RLC1_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA7_RLC1_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA7_RLC1_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA7_RLC1_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA7_RLC1_WATERMARK
+#define SDMA7_RLC1_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA7_RLC1_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA7_RLC1_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA7_RLC1_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA7_RLC1_DOORBELL_OFFSET
+#define SDMA7_RLC1_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA7_RLC1_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA7_RLC1_CSA_ADDR_LO
+#define SDMA7_RLC1_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA7_RLC1_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA7_RLC1_CSA_ADDR_HI
+#define SDMA7_RLC1_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA7_RLC1_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA7_RLC1_IB_SUB_REMAIN
+#define SDMA7_RLC1_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA7_RLC1_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA7_RLC1_PREEMPT
+#define SDMA7_RLC1_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA7_RLC1_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA7_RLC1_DUMMY_REG
+#define SDMA7_RLC1_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA7_RLC1_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA7_RLC1_RB_WPTR_POLL_ADDR_HI
+#define SDMA7_RLC1_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA7_RLC1_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA7_RLC1_RB_WPTR_POLL_ADDR_LO
+#define SDMA7_RLC1_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA7_RLC1_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA7_RLC1_RB_AQL_CNTL
+#define SDMA7_RLC1_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA7_RLC1_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA7_RLC1_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA7_RLC1_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA7_RLC1_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA7_RLC1_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA7_RLC1_MINOR_PTR_UPDATE
+#define SDMA7_RLC1_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA7_RLC1_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA7_RLC1_MIDCMD_DATA0
+#define SDMA7_RLC1_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA7_RLC1_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA7_RLC1_MIDCMD_DATA1
+#define SDMA7_RLC1_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA7_RLC1_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA7_RLC1_MIDCMD_DATA2
+#define SDMA7_RLC1_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA7_RLC1_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA7_RLC1_MIDCMD_DATA3
+#define SDMA7_RLC1_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA7_RLC1_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA7_RLC1_MIDCMD_DATA4
+#define SDMA7_RLC1_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA7_RLC1_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA7_RLC1_MIDCMD_DATA5
+#define SDMA7_RLC1_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA7_RLC1_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA7_RLC1_MIDCMD_DATA6
+#define SDMA7_RLC1_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA7_RLC1_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA7_RLC1_MIDCMD_DATA7
+#define SDMA7_RLC1_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA7_RLC1_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA7_RLC1_MIDCMD_DATA8
+#define SDMA7_RLC1_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA7_RLC1_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA7_RLC1_MIDCMD_CNTL
+#define SDMA7_RLC1_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA7_RLC1_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA7_RLC1_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA7_RLC1_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA7_RLC1_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA7_RLC1_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA7_RLC1_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA7_RLC1_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA7_RLC2_RB_CNTL
+#define SDMA7_RLC2_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA7_RLC2_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA7_RLC2_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA7_RLC2_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA7_RLC2_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA7_RLC2_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA7_RLC2_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA7_RLC2_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA7_RLC2_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA7_RLC2_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA7_RLC2_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA7_RLC2_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA7_RLC2_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA7_RLC2_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA7_RLC2_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA7_RLC2_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA7_RLC2_RB_BASE
+#define SDMA7_RLC2_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA7_RLC2_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA7_RLC2_RB_BASE_HI
+#define SDMA7_RLC2_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA7_RLC2_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA7_RLC2_RB_RPTR
+#define SDMA7_RLC2_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA7_RLC2_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA7_RLC2_RB_RPTR_HI
+#define SDMA7_RLC2_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA7_RLC2_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA7_RLC2_RB_WPTR
+#define SDMA7_RLC2_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA7_RLC2_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA7_RLC2_RB_WPTR_HI
+#define SDMA7_RLC2_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA7_RLC2_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA7_RLC2_RB_WPTR_POLL_CNTL
+#define SDMA7_RLC2_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA7_RLC2_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA7_RLC2_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA7_RLC2_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA7_RLC2_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA7_RLC2_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA7_RLC2_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA7_RLC2_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA7_RLC2_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA7_RLC2_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA7_RLC2_RB_RPTR_ADDR_HI
+#define SDMA7_RLC2_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA7_RLC2_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA7_RLC2_RB_RPTR_ADDR_LO
+#define SDMA7_RLC2_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA7_RLC2_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA7_RLC2_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA7_RLC2_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA7_RLC2_IB_CNTL
+#define SDMA7_RLC2_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA7_RLC2_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA7_RLC2_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA7_RLC2_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA7_RLC2_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA7_RLC2_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA7_RLC2_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA7_RLC2_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA7_RLC2_IB_RPTR
+#define SDMA7_RLC2_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA7_RLC2_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA7_RLC2_IB_OFFSET
+#define SDMA7_RLC2_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA7_RLC2_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA7_RLC2_IB_BASE_LO
+#define SDMA7_RLC2_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA7_RLC2_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA7_RLC2_IB_BASE_HI
+#define SDMA7_RLC2_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA7_RLC2_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA7_RLC2_IB_SIZE
+#define SDMA7_RLC2_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA7_RLC2_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA7_RLC2_SKIP_CNTL
+#define SDMA7_RLC2_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA7_RLC2_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA7_RLC2_CONTEXT_STATUS
+#define SDMA7_RLC2_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA7_RLC2_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA7_RLC2_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA7_RLC2_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA7_RLC2_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA7_RLC2_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA7_RLC2_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA7_RLC2_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA7_RLC2_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA7_RLC2_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA7_RLC2_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA7_RLC2_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA7_RLC2_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA7_RLC2_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA7_RLC2_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA7_RLC2_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA7_RLC2_DOORBELL
+#define SDMA7_RLC2_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA7_RLC2_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA7_RLC2_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA7_RLC2_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA7_RLC2_STATUS
+#define SDMA7_RLC2_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA7_RLC2_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA7_RLC2_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA7_RLC2_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA7_RLC2_DOORBELL_LOG
+#define SDMA7_RLC2_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA7_RLC2_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA7_RLC2_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA7_RLC2_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA7_RLC2_WATERMARK
+#define SDMA7_RLC2_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA7_RLC2_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA7_RLC2_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA7_RLC2_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA7_RLC2_DOORBELL_OFFSET
+#define SDMA7_RLC2_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA7_RLC2_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA7_RLC2_CSA_ADDR_LO
+#define SDMA7_RLC2_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA7_RLC2_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA7_RLC2_CSA_ADDR_HI
+#define SDMA7_RLC2_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA7_RLC2_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA7_RLC2_IB_SUB_REMAIN
+#define SDMA7_RLC2_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA7_RLC2_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA7_RLC2_PREEMPT
+#define SDMA7_RLC2_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA7_RLC2_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA7_RLC2_DUMMY_REG
+#define SDMA7_RLC2_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA7_RLC2_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA7_RLC2_RB_WPTR_POLL_ADDR_HI
+#define SDMA7_RLC2_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA7_RLC2_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA7_RLC2_RB_WPTR_POLL_ADDR_LO
+#define SDMA7_RLC2_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA7_RLC2_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA7_RLC2_RB_AQL_CNTL
+#define SDMA7_RLC2_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA7_RLC2_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA7_RLC2_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA7_RLC2_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA7_RLC2_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA7_RLC2_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA7_RLC2_MINOR_PTR_UPDATE
+#define SDMA7_RLC2_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA7_RLC2_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA7_RLC2_MIDCMD_DATA0
+#define SDMA7_RLC2_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA7_RLC2_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA7_RLC2_MIDCMD_DATA1
+#define SDMA7_RLC2_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA7_RLC2_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA7_RLC2_MIDCMD_DATA2
+#define SDMA7_RLC2_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA7_RLC2_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA7_RLC2_MIDCMD_DATA3
+#define SDMA7_RLC2_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA7_RLC2_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA7_RLC2_MIDCMD_DATA4
+#define SDMA7_RLC2_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA7_RLC2_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA7_RLC2_MIDCMD_DATA5
+#define SDMA7_RLC2_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA7_RLC2_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA7_RLC2_MIDCMD_DATA6
+#define SDMA7_RLC2_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA7_RLC2_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA7_RLC2_MIDCMD_DATA7
+#define SDMA7_RLC2_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA7_RLC2_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA7_RLC2_MIDCMD_DATA8
+#define SDMA7_RLC2_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA7_RLC2_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA7_RLC2_MIDCMD_CNTL
+#define SDMA7_RLC2_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA7_RLC2_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA7_RLC2_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA7_RLC2_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA7_RLC2_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA7_RLC2_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA7_RLC2_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA7_RLC2_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA7_RLC3_RB_CNTL
+#define SDMA7_RLC3_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA7_RLC3_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA7_RLC3_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA7_RLC3_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA7_RLC3_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA7_RLC3_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA7_RLC3_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA7_RLC3_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA7_RLC3_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA7_RLC3_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA7_RLC3_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA7_RLC3_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA7_RLC3_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA7_RLC3_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA7_RLC3_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA7_RLC3_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA7_RLC3_RB_BASE
+#define SDMA7_RLC3_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA7_RLC3_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA7_RLC3_RB_BASE_HI
+#define SDMA7_RLC3_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA7_RLC3_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA7_RLC3_RB_RPTR
+#define SDMA7_RLC3_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA7_RLC3_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA7_RLC3_RB_RPTR_HI
+#define SDMA7_RLC3_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA7_RLC3_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA7_RLC3_RB_WPTR
+#define SDMA7_RLC3_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA7_RLC3_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA7_RLC3_RB_WPTR_HI
+#define SDMA7_RLC3_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA7_RLC3_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA7_RLC3_RB_WPTR_POLL_CNTL
+#define SDMA7_RLC3_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA7_RLC3_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA7_RLC3_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA7_RLC3_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA7_RLC3_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA7_RLC3_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA7_RLC3_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA7_RLC3_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA7_RLC3_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA7_RLC3_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA7_RLC3_RB_RPTR_ADDR_HI
+#define SDMA7_RLC3_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA7_RLC3_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA7_RLC3_RB_RPTR_ADDR_LO
+#define SDMA7_RLC3_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA7_RLC3_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA7_RLC3_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA7_RLC3_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA7_RLC3_IB_CNTL
+#define SDMA7_RLC3_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA7_RLC3_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA7_RLC3_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA7_RLC3_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA7_RLC3_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA7_RLC3_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA7_RLC3_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA7_RLC3_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA7_RLC3_IB_RPTR
+#define SDMA7_RLC3_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA7_RLC3_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA7_RLC3_IB_OFFSET
+#define SDMA7_RLC3_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA7_RLC3_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA7_RLC3_IB_BASE_LO
+#define SDMA7_RLC3_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA7_RLC3_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA7_RLC3_IB_BASE_HI
+#define SDMA7_RLC3_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA7_RLC3_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA7_RLC3_IB_SIZE
+#define SDMA7_RLC3_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA7_RLC3_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA7_RLC3_SKIP_CNTL
+#define SDMA7_RLC3_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA7_RLC3_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA7_RLC3_CONTEXT_STATUS
+#define SDMA7_RLC3_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA7_RLC3_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA7_RLC3_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA7_RLC3_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA7_RLC3_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA7_RLC3_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA7_RLC3_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA7_RLC3_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA7_RLC3_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA7_RLC3_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA7_RLC3_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA7_RLC3_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA7_RLC3_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA7_RLC3_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA7_RLC3_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA7_RLC3_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA7_RLC3_DOORBELL
+#define SDMA7_RLC3_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA7_RLC3_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA7_RLC3_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA7_RLC3_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA7_RLC3_STATUS
+#define SDMA7_RLC3_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA7_RLC3_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA7_RLC3_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA7_RLC3_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA7_RLC3_DOORBELL_LOG
+#define SDMA7_RLC3_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA7_RLC3_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA7_RLC3_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA7_RLC3_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA7_RLC3_WATERMARK
+#define SDMA7_RLC3_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA7_RLC3_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA7_RLC3_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA7_RLC3_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA7_RLC3_DOORBELL_OFFSET
+#define SDMA7_RLC3_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA7_RLC3_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA7_RLC3_CSA_ADDR_LO
+#define SDMA7_RLC3_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA7_RLC3_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA7_RLC3_CSA_ADDR_HI
+#define SDMA7_RLC3_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA7_RLC3_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA7_RLC3_IB_SUB_REMAIN
+#define SDMA7_RLC3_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA7_RLC3_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA7_RLC3_PREEMPT
+#define SDMA7_RLC3_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA7_RLC3_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA7_RLC3_DUMMY_REG
+#define SDMA7_RLC3_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA7_RLC3_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA7_RLC3_RB_WPTR_POLL_ADDR_HI
+#define SDMA7_RLC3_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA7_RLC3_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA7_RLC3_RB_WPTR_POLL_ADDR_LO
+#define SDMA7_RLC3_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA7_RLC3_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA7_RLC3_RB_AQL_CNTL
+#define SDMA7_RLC3_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA7_RLC3_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA7_RLC3_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA7_RLC3_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA7_RLC3_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA7_RLC3_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA7_RLC3_MINOR_PTR_UPDATE
+#define SDMA7_RLC3_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA7_RLC3_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA7_RLC3_MIDCMD_DATA0
+#define SDMA7_RLC3_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA7_RLC3_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA7_RLC3_MIDCMD_DATA1
+#define SDMA7_RLC3_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA7_RLC3_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA7_RLC3_MIDCMD_DATA2
+#define SDMA7_RLC3_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA7_RLC3_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA7_RLC3_MIDCMD_DATA3
+#define SDMA7_RLC3_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA7_RLC3_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA7_RLC3_MIDCMD_DATA4
+#define SDMA7_RLC3_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA7_RLC3_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA7_RLC3_MIDCMD_DATA5
+#define SDMA7_RLC3_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA7_RLC3_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA7_RLC3_MIDCMD_DATA6
+#define SDMA7_RLC3_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA7_RLC3_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA7_RLC3_MIDCMD_DATA7
+#define SDMA7_RLC3_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA7_RLC3_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA7_RLC3_MIDCMD_DATA8
+#define SDMA7_RLC3_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA7_RLC3_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA7_RLC3_MIDCMD_CNTL
+#define SDMA7_RLC3_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA7_RLC3_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA7_RLC3_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA7_RLC3_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA7_RLC3_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA7_RLC3_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA7_RLC3_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA7_RLC3_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA7_RLC4_RB_CNTL
+#define SDMA7_RLC4_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA7_RLC4_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA7_RLC4_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA7_RLC4_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA7_RLC4_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA7_RLC4_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA7_RLC4_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA7_RLC4_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA7_RLC4_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA7_RLC4_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA7_RLC4_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA7_RLC4_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA7_RLC4_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA7_RLC4_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA7_RLC4_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA7_RLC4_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA7_RLC4_RB_BASE
+#define SDMA7_RLC4_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA7_RLC4_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA7_RLC4_RB_BASE_HI
+#define SDMA7_RLC4_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA7_RLC4_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA7_RLC4_RB_RPTR
+#define SDMA7_RLC4_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA7_RLC4_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA7_RLC4_RB_RPTR_HI
+#define SDMA7_RLC4_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA7_RLC4_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA7_RLC4_RB_WPTR
+#define SDMA7_RLC4_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA7_RLC4_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA7_RLC4_RB_WPTR_HI
+#define SDMA7_RLC4_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA7_RLC4_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA7_RLC4_RB_WPTR_POLL_CNTL
+#define SDMA7_RLC4_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA7_RLC4_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA7_RLC4_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA7_RLC4_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA7_RLC4_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA7_RLC4_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA7_RLC4_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA7_RLC4_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA7_RLC4_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA7_RLC4_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA7_RLC4_RB_RPTR_ADDR_HI
+#define SDMA7_RLC4_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA7_RLC4_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA7_RLC4_RB_RPTR_ADDR_LO
+#define SDMA7_RLC4_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA7_RLC4_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA7_RLC4_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA7_RLC4_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA7_RLC4_IB_CNTL
+#define SDMA7_RLC4_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA7_RLC4_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA7_RLC4_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA7_RLC4_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA7_RLC4_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA7_RLC4_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA7_RLC4_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA7_RLC4_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA7_RLC4_IB_RPTR
+#define SDMA7_RLC4_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA7_RLC4_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA7_RLC4_IB_OFFSET
+#define SDMA7_RLC4_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA7_RLC4_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA7_RLC4_IB_BASE_LO
+#define SDMA7_RLC4_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA7_RLC4_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA7_RLC4_IB_BASE_HI
+#define SDMA7_RLC4_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA7_RLC4_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA7_RLC4_IB_SIZE
+#define SDMA7_RLC4_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA7_RLC4_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA7_RLC4_SKIP_CNTL
+#define SDMA7_RLC4_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA7_RLC4_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA7_RLC4_CONTEXT_STATUS
+#define SDMA7_RLC4_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA7_RLC4_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA7_RLC4_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA7_RLC4_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA7_RLC4_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA7_RLC4_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA7_RLC4_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA7_RLC4_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA7_RLC4_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA7_RLC4_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA7_RLC4_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA7_RLC4_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA7_RLC4_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA7_RLC4_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA7_RLC4_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA7_RLC4_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA7_RLC4_DOORBELL
+#define SDMA7_RLC4_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA7_RLC4_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA7_RLC4_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA7_RLC4_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA7_RLC4_STATUS
+#define SDMA7_RLC4_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA7_RLC4_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA7_RLC4_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA7_RLC4_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA7_RLC4_DOORBELL_LOG
+#define SDMA7_RLC4_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA7_RLC4_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA7_RLC4_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA7_RLC4_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA7_RLC4_WATERMARK
+#define SDMA7_RLC4_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA7_RLC4_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA7_RLC4_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA7_RLC4_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA7_RLC4_DOORBELL_OFFSET
+#define SDMA7_RLC4_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA7_RLC4_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA7_RLC4_CSA_ADDR_LO
+#define SDMA7_RLC4_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA7_RLC4_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA7_RLC4_CSA_ADDR_HI
+#define SDMA7_RLC4_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA7_RLC4_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA7_RLC4_IB_SUB_REMAIN
+#define SDMA7_RLC4_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA7_RLC4_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA7_RLC4_PREEMPT
+#define SDMA7_RLC4_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA7_RLC4_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA7_RLC4_DUMMY_REG
+#define SDMA7_RLC4_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA7_RLC4_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA7_RLC4_RB_WPTR_POLL_ADDR_HI
+#define SDMA7_RLC4_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA7_RLC4_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA7_RLC4_RB_WPTR_POLL_ADDR_LO
+#define SDMA7_RLC4_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA7_RLC4_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA7_RLC4_RB_AQL_CNTL
+#define SDMA7_RLC4_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA7_RLC4_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA7_RLC4_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA7_RLC4_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA7_RLC4_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA7_RLC4_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA7_RLC4_MINOR_PTR_UPDATE
+#define SDMA7_RLC4_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA7_RLC4_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA7_RLC4_MIDCMD_DATA0
+#define SDMA7_RLC4_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA7_RLC4_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA7_RLC4_MIDCMD_DATA1
+#define SDMA7_RLC4_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA7_RLC4_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA7_RLC4_MIDCMD_DATA2
+#define SDMA7_RLC4_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA7_RLC4_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA7_RLC4_MIDCMD_DATA3
+#define SDMA7_RLC4_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA7_RLC4_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA7_RLC4_MIDCMD_DATA4
+#define SDMA7_RLC4_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA7_RLC4_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA7_RLC4_MIDCMD_DATA5
+#define SDMA7_RLC4_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA7_RLC4_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA7_RLC4_MIDCMD_DATA6
+#define SDMA7_RLC4_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA7_RLC4_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA7_RLC4_MIDCMD_DATA7
+#define SDMA7_RLC4_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA7_RLC4_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA7_RLC4_MIDCMD_DATA8
+#define SDMA7_RLC4_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA7_RLC4_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA7_RLC4_MIDCMD_CNTL
+#define SDMA7_RLC4_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA7_RLC4_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA7_RLC4_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA7_RLC4_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA7_RLC4_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA7_RLC4_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA7_RLC4_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA7_RLC4_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA7_RLC5_RB_CNTL
+#define SDMA7_RLC5_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA7_RLC5_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA7_RLC5_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA7_RLC5_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA7_RLC5_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA7_RLC5_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA7_RLC5_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA7_RLC5_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA7_RLC5_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA7_RLC5_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA7_RLC5_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA7_RLC5_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA7_RLC5_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA7_RLC5_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA7_RLC5_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA7_RLC5_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA7_RLC5_RB_BASE
+#define SDMA7_RLC5_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA7_RLC5_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA7_RLC5_RB_BASE_HI
+#define SDMA7_RLC5_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA7_RLC5_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA7_RLC5_RB_RPTR
+#define SDMA7_RLC5_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA7_RLC5_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA7_RLC5_RB_RPTR_HI
+#define SDMA7_RLC5_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA7_RLC5_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA7_RLC5_RB_WPTR
+#define SDMA7_RLC5_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA7_RLC5_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA7_RLC5_RB_WPTR_HI
+#define SDMA7_RLC5_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA7_RLC5_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA7_RLC5_RB_WPTR_POLL_CNTL
+#define SDMA7_RLC5_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA7_RLC5_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA7_RLC5_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA7_RLC5_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA7_RLC5_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA7_RLC5_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA7_RLC5_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA7_RLC5_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA7_RLC5_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA7_RLC5_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA7_RLC5_RB_RPTR_ADDR_HI
+#define SDMA7_RLC5_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA7_RLC5_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA7_RLC5_RB_RPTR_ADDR_LO
+#define SDMA7_RLC5_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA7_RLC5_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA7_RLC5_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA7_RLC5_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA7_RLC5_IB_CNTL
+#define SDMA7_RLC5_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA7_RLC5_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA7_RLC5_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA7_RLC5_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA7_RLC5_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA7_RLC5_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA7_RLC5_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA7_RLC5_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA7_RLC5_IB_RPTR
+#define SDMA7_RLC5_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA7_RLC5_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA7_RLC5_IB_OFFSET
+#define SDMA7_RLC5_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA7_RLC5_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA7_RLC5_IB_BASE_LO
+#define SDMA7_RLC5_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA7_RLC5_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA7_RLC5_IB_BASE_HI
+#define SDMA7_RLC5_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA7_RLC5_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA7_RLC5_IB_SIZE
+#define SDMA7_RLC5_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA7_RLC5_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA7_RLC5_SKIP_CNTL
+#define SDMA7_RLC5_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA7_RLC5_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA7_RLC5_CONTEXT_STATUS
+#define SDMA7_RLC5_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA7_RLC5_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA7_RLC5_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA7_RLC5_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA7_RLC5_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA7_RLC5_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA7_RLC5_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA7_RLC5_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA7_RLC5_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA7_RLC5_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA7_RLC5_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA7_RLC5_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA7_RLC5_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA7_RLC5_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA7_RLC5_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA7_RLC5_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA7_RLC5_DOORBELL
+#define SDMA7_RLC5_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA7_RLC5_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA7_RLC5_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA7_RLC5_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA7_RLC5_STATUS
+#define SDMA7_RLC5_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA7_RLC5_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA7_RLC5_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA7_RLC5_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA7_RLC5_DOORBELL_LOG
+#define SDMA7_RLC5_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA7_RLC5_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA7_RLC5_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA7_RLC5_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA7_RLC5_WATERMARK
+#define SDMA7_RLC5_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA7_RLC5_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA7_RLC5_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA7_RLC5_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA7_RLC5_DOORBELL_OFFSET
+#define SDMA7_RLC5_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA7_RLC5_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA7_RLC5_CSA_ADDR_LO
+#define SDMA7_RLC5_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA7_RLC5_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA7_RLC5_CSA_ADDR_HI
+#define SDMA7_RLC5_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA7_RLC5_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA7_RLC5_IB_SUB_REMAIN
+#define SDMA7_RLC5_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA7_RLC5_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA7_RLC5_PREEMPT
+#define SDMA7_RLC5_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA7_RLC5_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA7_RLC5_DUMMY_REG
+#define SDMA7_RLC5_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA7_RLC5_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA7_RLC5_RB_WPTR_POLL_ADDR_HI
+#define SDMA7_RLC5_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA7_RLC5_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA7_RLC5_RB_WPTR_POLL_ADDR_LO
+#define SDMA7_RLC5_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA7_RLC5_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA7_RLC5_RB_AQL_CNTL
+#define SDMA7_RLC5_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA7_RLC5_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA7_RLC5_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA7_RLC5_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA7_RLC5_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA7_RLC5_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA7_RLC5_MINOR_PTR_UPDATE
+#define SDMA7_RLC5_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA7_RLC5_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA7_RLC5_MIDCMD_DATA0
+#define SDMA7_RLC5_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA7_RLC5_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA7_RLC5_MIDCMD_DATA1
+#define SDMA7_RLC5_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA7_RLC5_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA7_RLC5_MIDCMD_DATA2
+#define SDMA7_RLC5_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA7_RLC5_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA7_RLC5_MIDCMD_DATA3
+#define SDMA7_RLC5_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA7_RLC5_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA7_RLC5_MIDCMD_DATA4
+#define SDMA7_RLC5_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA7_RLC5_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA7_RLC5_MIDCMD_DATA5
+#define SDMA7_RLC5_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA7_RLC5_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA7_RLC5_MIDCMD_DATA6
+#define SDMA7_RLC5_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA7_RLC5_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA7_RLC5_MIDCMD_DATA7
+#define SDMA7_RLC5_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA7_RLC5_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA7_RLC5_MIDCMD_DATA8
+#define SDMA7_RLC5_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA7_RLC5_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA7_RLC5_MIDCMD_CNTL
+#define SDMA7_RLC5_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA7_RLC5_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA7_RLC5_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA7_RLC5_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA7_RLC5_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA7_RLC5_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA7_RLC5_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA7_RLC5_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA7_RLC6_RB_CNTL
+#define SDMA7_RLC6_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA7_RLC6_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA7_RLC6_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA7_RLC6_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA7_RLC6_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA7_RLC6_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA7_RLC6_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA7_RLC6_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA7_RLC6_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA7_RLC6_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA7_RLC6_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA7_RLC6_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA7_RLC6_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA7_RLC6_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA7_RLC6_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA7_RLC6_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA7_RLC6_RB_BASE
+#define SDMA7_RLC6_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA7_RLC6_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA7_RLC6_RB_BASE_HI
+#define SDMA7_RLC6_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA7_RLC6_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA7_RLC6_RB_RPTR
+#define SDMA7_RLC6_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA7_RLC6_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA7_RLC6_RB_RPTR_HI
+#define SDMA7_RLC6_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA7_RLC6_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA7_RLC6_RB_WPTR
+#define SDMA7_RLC6_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA7_RLC6_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA7_RLC6_RB_WPTR_HI
+#define SDMA7_RLC6_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA7_RLC6_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA7_RLC6_RB_WPTR_POLL_CNTL
+#define SDMA7_RLC6_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA7_RLC6_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA7_RLC6_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA7_RLC6_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA7_RLC6_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA7_RLC6_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA7_RLC6_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA7_RLC6_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA7_RLC6_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA7_RLC6_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA7_RLC6_RB_RPTR_ADDR_HI
+#define SDMA7_RLC6_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA7_RLC6_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA7_RLC6_RB_RPTR_ADDR_LO
+#define SDMA7_RLC6_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA7_RLC6_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA7_RLC6_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA7_RLC6_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA7_RLC6_IB_CNTL
+#define SDMA7_RLC6_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA7_RLC6_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA7_RLC6_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA7_RLC6_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA7_RLC6_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA7_RLC6_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA7_RLC6_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA7_RLC6_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA7_RLC6_IB_RPTR
+#define SDMA7_RLC6_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA7_RLC6_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA7_RLC6_IB_OFFSET
+#define SDMA7_RLC6_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA7_RLC6_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA7_RLC6_IB_BASE_LO
+#define SDMA7_RLC6_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA7_RLC6_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA7_RLC6_IB_BASE_HI
+#define SDMA7_RLC6_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA7_RLC6_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA7_RLC6_IB_SIZE
+#define SDMA7_RLC6_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA7_RLC6_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA7_RLC6_SKIP_CNTL
+#define SDMA7_RLC6_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA7_RLC6_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA7_RLC6_CONTEXT_STATUS
+#define SDMA7_RLC6_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA7_RLC6_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA7_RLC6_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA7_RLC6_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA7_RLC6_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA7_RLC6_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA7_RLC6_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA7_RLC6_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA7_RLC6_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA7_RLC6_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA7_RLC6_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA7_RLC6_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA7_RLC6_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA7_RLC6_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA7_RLC6_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA7_RLC6_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA7_RLC6_DOORBELL
+#define SDMA7_RLC6_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA7_RLC6_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA7_RLC6_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA7_RLC6_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA7_RLC6_STATUS
+#define SDMA7_RLC6_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA7_RLC6_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA7_RLC6_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA7_RLC6_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA7_RLC6_DOORBELL_LOG
+#define SDMA7_RLC6_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA7_RLC6_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA7_RLC6_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA7_RLC6_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA7_RLC6_WATERMARK
+#define SDMA7_RLC6_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA7_RLC6_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA7_RLC6_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA7_RLC6_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA7_RLC6_DOORBELL_OFFSET
+#define SDMA7_RLC6_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA7_RLC6_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA7_RLC6_CSA_ADDR_LO
+#define SDMA7_RLC6_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA7_RLC6_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA7_RLC6_CSA_ADDR_HI
+#define SDMA7_RLC6_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA7_RLC6_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA7_RLC6_IB_SUB_REMAIN
+#define SDMA7_RLC6_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA7_RLC6_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA7_RLC6_PREEMPT
+#define SDMA7_RLC6_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA7_RLC6_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA7_RLC6_DUMMY_REG
+#define SDMA7_RLC6_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA7_RLC6_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA7_RLC6_RB_WPTR_POLL_ADDR_HI
+#define SDMA7_RLC6_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA7_RLC6_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA7_RLC6_RB_WPTR_POLL_ADDR_LO
+#define SDMA7_RLC6_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA7_RLC6_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA7_RLC6_RB_AQL_CNTL
+#define SDMA7_RLC6_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA7_RLC6_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA7_RLC6_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA7_RLC6_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA7_RLC6_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA7_RLC6_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA7_RLC6_MINOR_PTR_UPDATE
+#define SDMA7_RLC6_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA7_RLC6_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA7_RLC6_MIDCMD_DATA0
+#define SDMA7_RLC6_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA7_RLC6_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA7_RLC6_MIDCMD_DATA1
+#define SDMA7_RLC6_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA7_RLC6_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA7_RLC6_MIDCMD_DATA2
+#define SDMA7_RLC6_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA7_RLC6_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA7_RLC6_MIDCMD_DATA3
+#define SDMA7_RLC6_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA7_RLC6_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA7_RLC6_MIDCMD_DATA4
+#define SDMA7_RLC6_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA7_RLC6_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA7_RLC6_MIDCMD_DATA5
+#define SDMA7_RLC6_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA7_RLC6_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA7_RLC6_MIDCMD_DATA6
+#define SDMA7_RLC6_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA7_RLC6_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA7_RLC6_MIDCMD_DATA7
+#define SDMA7_RLC6_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA7_RLC6_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA7_RLC6_MIDCMD_DATA8
+#define SDMA7_RLC6_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA7_RLC6_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA7_RLC6_MIDCMD_CNTL
+#define SDMA7_RLC6_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA7_RLC6_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA7_RLC6_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA7_RLC6_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA7_RLC6_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA7_RLC6_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA7_RLC6_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA7_RLC6_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA7_RLC7_RB_CNTL
+#define SDMA7_RLC7_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA7_RLC7_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA7_RLC7_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA7_RLC7_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA7_RLC7_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA7_RLC7_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA7_RLC7_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA7_RLC7_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA7_RLC7_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA7_RLC7_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA7_RLC7_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA7_RLC7_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA7_RLC7_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA7_RLC7_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA7_RLC7_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA7_RLC7_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA7_RLC7_RB_BASE
+#define SDMA7_RLC7_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA7_RLC7_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA7_RLC7_RB_BASE_HI
+#define SDMA7_RLC7_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA7_RLC7_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA7_RLC7_RB_RPTR
+#define SDMA7_RLC7_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA7_RLC7_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA7_RLC7_RB_RPTR_HI
+#define SDMA7_RLC7_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA7_RLC7_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA7_RLC7_RB_WPTR
+#define SDMA7_RLC7_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA7_RLC7_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA7_RLC7_RB_WPTR_HI
+#define SDMA7_RLC7_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA7_RLC7_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA7_RLC7_RB_WPTR_POLL_CNTL
+#define SDMA7_RLC7_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA7_RLC7_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA7_RLC7_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA7_RLC7_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA7_RLC7_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA7_RLC7_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA7_RLC7_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA7_RLC7_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA7_RLC7_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA7_RLC7_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA7_RLC7_RB_RPTR_ADDR_HI
+#define SDMA7_RLC7_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA7_RLC7_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA7_RLC7_RB_RPTR_ADDR_LO
+#define SDMA7_RLC7_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA7_RLC7_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA7_RLC7_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA7_RLC7_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA7_RLC7_IB_CNTL
+#define SDMA7_RLC7_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA7_RLC7_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA7_RLC7_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA7_RLC7_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA7_RLC7_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA7_RLC7_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA7_RLC7_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA7_RLC7_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA7_RLC7_IB_RPTR
+#define SDMA7_RLC7_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA7_RLC7_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA7_RLC7_IB_OFFSET
+#define SDMA7_RLC7_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA7_RLC7_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA7_RLC7_IB_BASE_LO
+#define SDMA7_RLC7_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA7_RLC7_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA7_RLC7_IB_BASE_HI
+#define SDMA7_RLC7_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA7_RLC7_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA7_RLC7_IB_SIZE
+#define SDMA7_RLC7_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA7_RLC7_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA7_RLC7_SKIP_CNTL
+#define SDMA7_RLC7_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA7_RLC7_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA7_RLC7_CONTEXT_STATUS
+#define SDMA7_RLC7_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA7_RLC7_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA7_RLC7_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA7_RLC7_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA7_RLC7_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA7_RLC7_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA7_RLC7_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA7_RLC7_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA7_RLC7_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA7_RLC7_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA7_RLC7_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA7_RLC7_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA7_RLC7_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA7_RLC7_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA7_RLC7_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA7_RLC7_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA7_RLC7_DOORBELL
+#define SDMA7_RLC7_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA7_RLC7_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA7_RLC7_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA7_RLC7_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA7_RLC7_STATUS
+#define SDMA7_RLC7_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA7_RLC7_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA7_RLC7_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA7_RLC7_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA7_RLC7_DOORBELL_LOG
+#define SDMA7_RLC7_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA7_RLC7_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA7_RLC7_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA7_RLC7_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA7_RLC7_WATERMARK
+#define SDMA7_RLC7_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA7_RLC7_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA7_RLC7_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA7_RLC7_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA7_RLC7_DOORBELL_OFFSET
+#define SDMA7_RLC7_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA7_RLC7_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA7_RLC7_CSA_ADDR_LO
+#define SDMA7_RLC7_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA7_RLC7_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA7_RLC7_CSA_ADDR_HI
+#define SDMA7_RLC7_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA7_RLC7_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA7_RLC7_IB_SUB_REMAIN
+#define SDMA7_RLC7_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA7_RLC7_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA7_RLC7_PREEMPT
+#define SDMA7_RLC7_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA7_RLC7_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA7_RLC7_DUMMY_REG
+#define SDMA7_RLC7_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA7_RLC7_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA7_RLC7_RB_WPTR_POLL_ADDR_HI
+#define SDMA7_RLC7_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA7_RLC7_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA7_RLC7_RB_WPTR_POLL_ADDR_LO
+#define SDMA7_RLC7_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA7_RLC7_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA7_RLC7_RB_AQL_CNTL
+#define SDMA7_RLC7_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA7_RLC7_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA7_RLC7_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA7_RLC7_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA7_RLC7_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA7_RLC7_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA7_RLC7_MINOR_PTR_UPDATE
+#define SDMA7_RLC7_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA7_RLC7_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA7_RLC7_MIDCMD_DATA0
+#define SDMA7_RLC7_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA7_RLC7_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA7_RLC7_MIDCMD_DATA1
+#define SDMA7_RLC7_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA7_RLC7_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA7_RLC7_MIDCMD_DATA2
+#define SDMA7_RLC7_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA7_RLC7_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA7_RLC7_MIDCMD_DATA3
+#define SDMA7_RLC7_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA7_RLC7_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA7_RLC7_MIDCMD_DATA4
+#define SDMA7_RLC7_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA7_RLC7_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA7_RLC7_MIDCMD_DATA5
+#define SDMA7_RLC7_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA7_RLC7_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA7_RLC7_MIDCMD_DATA6
+#define SDMA7_RLC7_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA7_RLC7_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA7_RLC7_MIDCMD_DATA7
+#define SDMA7_RLC7_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA7_RLC7_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA7_RLC7_MIDCMD_DATA8
+#define SDMA7_RLC7_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA7_RLC7_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA7_RLC7_MIDCMD_CNTL
+#define SDMA7_RLC7_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA7_RLC7_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA7_RLC7_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA7_RLC7_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA7_RLC7_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA7_RLC7_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA7_RLC7_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA7_RLC7_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_d.h
index dbc2e723f659..71169daa701a 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_d.h
@@ -49,6 +49,7 @@
#define ixCG_SPLL_FUNC_CNTL_5 0xc0500150
#define ixCG_SPLL_FUNC_CNTL_6 0xc0500154
#define ixCG_SPLL_FUNC_CNTL_7 0xc0500158
+#define ixCG_SPLL_STATUS 0xC050015C
#define ixSPLL_CNTL_MODE 0xc0500160
#define ixCG_SPLL_SPREAD_SPECTRUM 0xc0500164
#define ixCG_SPLL_SPREAD_SPECTRUM_2 0xc0500168
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h
index 6af9f0217b34..61a9a84e0c3a 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h
@@ -194,6 +194,8 @@
#define CG_SPLL_FUNC_CNTL_6__SPLL_LF_CNTR__SHIFT 0x19
#define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL_MASK 0xfff
#define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL__SHIFT 0x0
+#define CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK 0x2
+#define CG_SPLL_STATUS__SPLL_CHG_STATUS__SHIFT 0x1
#define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL_MASK 0x1
#define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL__SHIFT 0x0
#define SPLL_CNTL_MODE__SPLL_LEGACY_PDIV_MASK 0x2
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h
index bd3685166779..351446754c72 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h
@@ -49,6 +49,7 @@
#define ixCG_SPLL_FUNC_CNTL_5 0xc0500150
#define ixCG_SPLL_FUNC_CNTL_6 0xc0500154
#define ixCG_SPLL_FUNC_CNTL_7 0xc0500158
+#define ixCG_SPLL_STATUS 0xC050015C
#define ixSPLL_CNTL_MODE 0xc0500160
#define ixCG_SPLL_SPREAD_SPECTRUM 0xc0500164
#define ixCG_SPLL_SPREAD_SPECTRUM_2 0xc0500168
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_sh_mask.h
index 627906674fe8..4bfd5f8ba66c 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_sh_mask.h
@@ -194,6 +194,8 @@
#define CG_SPLL_FUNC_CNTL_6__SPLL_LF_CNTR__SHIFT 0x19
#define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL_MASK 0xfff
#define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL__SHIFT 0x0
+#define CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK 0x2
+#define CG_SPLL_STATUS__SPLL_CHG_STATUS__SHIFT 0x1
#define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL_MASK 0x1
#define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL__SHIFT 0x0
#define SPLL_CNTL_MODE__SPLL_LEGACY_PDIV_MASK 0x2
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h
index f35aba72e640..21da61c398f5 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h
@@ -52,6 +52,7 @@
#define ixCG_SPLL_FUNC_CNTL_5 0xc0500150
#define ixCG_SPLL_FUNC_CNTL_6 0xc0500154
#define ixCG_SPLL_FUNC_CNTL_7 0xc0500158
+#define ixCG_SPLL_STATUS 0xC050015C
#define ixSPLL_CNTL_MODE 0xc0500160
#define ixCG_SPLL_SPREAD_SPECTRUM 0xc0500164
#define ixCG_SPLL_SPREAD_SPECTRUM_2 0xc0500168
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h
index 481ee6560aa9..f64fe0fbcb32 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h
@@ -220,6 +220,8 @@
#define CG_SPLL_FUNC_CNTL_6__SPLL_LF_CNTR__SHIFT 0x19
#define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL_MASK 0xfff
#define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL__SHIFT 0x0
+#define CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK 0x2
+#define CG_SPLL_STATUS__SPLL_CHG_STATUS__SHIFT 0x1
#define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL_MASK 0x1
#define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL__SHIFT 0x0
#define SPLL_CNTL_MODE__SPLL_LEGACY_PDIV_MASK 0x2
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_offset.h
index 5df70484bc7d..687d6843c258 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_offset.h
@@ -29,6 +29,190 @@
#define mmSMUSVI0_TEL_PLANE0_BASE_IDX 0
#define mmSMUIO_MCM_CONFIG 0x0024
#define mmSMUIO_MCM_CONFIG_BASE_IDX 0
+#define mmCKSVII2C_IC_CON 0x0040
+#define mmCKSVII2C_IC_CON_BASE_IDX 0
+#define mmCKSVII2C_IC_TAR 0x0041
+#define mmCKSVII2C_IC_TAR_BASE_IDX 0
+#define mmCKSVII2C_IC_SAR 0x0042
+#define mmCKSVII2C_IC_SAR_BASE_IDX 0
+#define mmCKSVII2C_IC_HS_MADDR 0x0043
+#define mmCKSVII2C_IC_HS_MADDR_BASE_IDX 0
+#define mmCKSVII2C_IC_DATA_CMD 0x0044
+#define mmCKSVII2C_IC_DATA_CMD_BASE_IDX 0
+#define mmCKSVII2C_IC_SS_SCL_HCNT 0x0045
+#define mmCKSVII2C_IC_SS_SCL_HCNT_BASE_IDX 0
+#define mmCKSVII2C_IC_SS_SCL_LCNT 0x0046
+#define mmCKSVII2C_IC_SS_SCL_LCNT_BASE_IDX 0
+#define mmCKSVII2C_IC_FS_SCL_HCNT 0x0047
+#define mmCKSVII2C_IC_FS_SCL_HCNT_BASE_IDX 0
+#define mmCKSVII2C_IC_FS_SCL_LCNT 0x0048
+#define mmCKSVII2C_IC_FS_SCL_LCNT_BASE_IDX 0
+#define mmCKSVII2C_IC_HS_SCL_HCNT 0x0049
+#define mmCKSVII2C_IC_HS_SCL_HCNT_BASE_IDX 0
+#define mmCKSVII2C_IC_HS_SCL_LCNT 0x004a
+#define mmCKSVII2C_IC_HS_SCL_LCNT_BASE_IDX 0
+#define mmCKSVII2C_IC_INTR_STAT 0x004b
+#define mmCKSVII2C_IC_INTR_STAT_BASE_IDX 0
+#define mmCKSVII2C_IC_INTR_MASK 0x004c
+#define mmCKSVII2C_IC_INTR_MASK_BASE_IDX 0
+#define mmCKSVII2C_IC_RAW_INTR_STAT 0x004d
+#define mmCKSVII2C_IC_RAW_INTR_STAT_BASE_IDX 0
+#define mmCKSVII2C_IC_RX_TL 0x004e
+#define mmCKSVII2C_IC_RX_TL_BASE_IDX 0
+#define mmCKSVII2C_IC_TX_TL 0x004f
+#define mmCKSVII2C_IC_TX_TL_BASE_IDX 0
+#define mmCKSVII2C_IC_CLR_INTR 0x0050
+#define mmCKSVII2C_IC_CLR_INTR_BASE_IDX 0
+#define mmCKSVII2C_IC_CLR_RX_UNDER 0x0051
+#define mmCKSVII2C_IC_CLR_RX_UNDER_BASE_IDX 0
+#define mmCKSVII2C_IC_CLR_RX_OVER 0x0052
+#define mmCKSVII2C_IC_CLR_RX_OVER_BASE_IDX 0
+#define mmCKSVII2C_IC_CLR_TX_OVER 0x0053
+#define mmCKSVII2C_IC_CLR_TX_OVER_BASE_IDX 0
+#define mmCKSVII2C_IC_CLR_RD_REQ 0x0054
+#define mmCKSVII2C_IC_CLR_RD_REQ_BASE_IDX 0
+#define mmCKSVII2C_IC_CLR_TX_ABRT 0x0055
+#define mmCKSVII2C_IC_CLR_TX_ABRT_BASE_IDX 0
+#define mmCKSVII2C_IC_CLR_RX_DONE 0x0056
+#define mmCKSVII2C_IC_CLR_RX_DONE_BASE_IDX 0
+#define mmCKSVII2C_IC_CLR_ACTIVITY 0x0057
+#define mmCKSVII2C_IC_CLR_ACTIVITY_BASE_IDX 0
+#define mmCKSVII2C_IC_CLR_STOP_DET 0x0058
+#define mmCKSVII2C_IC_CLR_STOP_DET_BASE_IDX 0
+#define mmCKSVII2C_IC_CLR_START_DET 0x0059
+#define mmCKSVII2C_IC_CLR_START_DET_BASE_IDX 0
+#define mmCKSVII2C_IC_CLR_GEN_CALL 0x005a
+#define mmCKSVII2C_IC_CLR_GEN_CALL_BASE_IDX 0
+#define mmCKSVII2C_IC_ENABLE 0x005b
+#define mmCKSVII2C_IC_ENABLE_BASE_IDX 0
+#define mmCKSVII2C_IC_STATUS 0x005c
+#define mmCKSVII2C_IC_STATUS_BASE_IDX 0
+#define mmCKSVII2C_IC_TXFLR 0x005d
+#define mmCKSVII2C_IC_TXFLR_BASE_IDX 0
+#define mmCKSVII2C_IC_RXFLR 0x005e
+#define mmCKSVII2C_IC_RXFLR_BASE_IDX 0
+#define mmCKSVII2C_IC_SDA_HOLD 0x005f
+#define mmCKSVII2C_IC_SDA_HOLD_BASE_IDX 0
+#define mmCKSVII2C_IC_TX_ABRT_SOURCE 0x0060
+#define mmCKSVII2C_IC_TX_ABRT_SOURCE_BASE_IDX 0
+#define mmCKSVII2C_IC_SLV_DATA_NACK_ONLY 0x0061
+#define mmCKSVII2C_IC_SLV_DATA_NACK_ONLY_BASE_IDX 0
+#define mmCKSVII2C_IC_DMA_CR 0x0062
+#define mmCKSVII2C_IC_DMA_CR_BASE_IDX 0
+#define mmCKSVII2C_IC_DMA_TDLR 0x0063
+#define mmCKSVII2C_IC_DMA_TDLR_BASE_IDX 0
+#define mmCKSVII2C_IC_DMA_RDLR 0x0064
+#define mmCKSVII2C_IC_DMA_RDLR_BASE_IDX 0
+#define mmCKSVII2C_IC_SDA_SETUP 0x0065
+#define mmCKSVII2C_IC_SDA_SETUP_BASE_IDX 0
+#define mmCKSVII2C_IC_ACK_GENERAL_CALL 0x0066
+#define mmCKSVII2C_IC_ACK_GENERAL_CALL_BASE_IDX 0
+#define mmCKSVII2C_IC_ENABLE_STATUS 0x0067
+#define mmCKSVII2C_IC_ENABLE_STATUS_BASE_IDX 0
+#define mmCKSVII2C_IC_FS_SPKLEN 0x0068
+#define mmCKSVII2C_IC_FS_SPKLEN_BASE_IDX 0
+#define mmCKSVII2C_IC_HS_SPKLEN 0x0069
+#define mmCKSVII2C_IC_HS_SPKLEN_BASE_IDX 0
+#define mmCKSVII2C_IC_CLR_RESTART_DET 0x006a
+#define mmCKSVII2C_IC_CLR_RESTART_DET_BASE_IDX 0
+#define mmCKSVII2C_IC_COMP_PARAM_1 0x006b
+#define mmCKSVII2C_IC_COMP_PARAM_1_BASE_IDX 0
+#define mmCKSVII2C_IC_COMP_VERSION 0x006c
+#define mmCKSVII2C_IC_COMP_VERSION_BASE_IDX 0
+#define mmCKSVII2C_IC_COMP_TYPE 0x006d
+#define mmCKSVII2C_IC_COMP_TYPE_BASE_IDX 0
+#define mmCKSVII2C1_IC_CON 0x0080
+#define mmCKSVII2C1_IC_CON_BASE_IDX 0
+#define mmCKSVII2C1_IC_TAR 0x0081
+#define mmCKSVII2C1_IC_TAR_BASE_IDX 0
+#define mmCKSVII2C1_IC_SAR 0x0082
+#define mmCKSVII2C1_IC_SAR_BASE_IDX 0
+#define mmCKSVII2C1_IC_HS_MADDR 0x0083
+#define mmCKSVII2C1_IC_HS_MADDR_BASE_IDX 0
+#define mmCKSVII2C1_IC_DATA_CMD 0x0084
+#define mmCKSVII2C1_IC_DATA_CMD_BASE_IDX 0
+#define mmCKSVII2C1_IC_SS_SCL_HCNT 0x0085
+#define mmCKSVII2C1_IC_SS_SCL_HCNT_BASE_IDX 0
+#define mmCKSVII2C1_IC_SS_SCL_LCNT 0x0086
+#define mmCKSVII2C1_IC_SS_SCL_LCNT_BASE_IDX 0
+#define mmCKSVII2C1_IC_FS_SCL_HCNT 0x0087
+#define mmCKSVII2C1_IC_FS_SCL_HCNT_BASE_IDX 0
+#define mmCKSVII2C1_IC_FS_SCL_LCNT 0x0088
+#define mmCKSVII2C1_IC_FS_SCL_LCNT_BASE_IDX 0
+#define mmCKSVII2C1_IC_HS_SCL_HCNT 0x0089
+#define mmCKSVII2C1_IC_HS_SCL_HCNT_BASE_IDX 0
+#define mmCKSVII2C1_IC_HS_SCL_LCNT 0x008a
+#define mmCKSVII2C1_IC_HS_SCL_LCNT_BASE_IDX 0
+#define mmCKSVII2C1_IC_INTR_STAT 0x008b
+#define mmCKSVII2C1_IC_INTR_STAT_BASE_IDX 0
+#define mmCKSVII2C1_IC_INTR_MASK 0x008c
+#define mmCKSVII2C1_IC_INTR_MASK_BASE_IDX 0
+#define mmCKSVII2C1_IC_RAW_INTR_STAT 0x008d
+#define mmCKSVII2C1_IC_RAW_INTR_STAT_BASE_IDX 0
+#define mmCKSVII2C1_IC_RX_TL 0x008e
+#define mmCKSVII2C1_IC_RX_TL_BASE_IDX 0
+#define mmCKSVII2C1_IC_TX_TL 0x008f
+#define mmCKSVII2C1_IC_TX_TL_BASE_IDX 0
+#define mmCKSVII2C1_IC_CLR_INTR 0x0090
+#define mmCKSVII2C1_IC_CLR_INTR_BASE_IDX 0
+#define mmCKSVII2C1_IC_CLR_RX_UNDER 0x0091
+#define mmCKSVII2C1_IC_CLR_RX_UNDER_BASE_IDX 0
+#define mmCKSVII2C1_IC_CLR_RX_OVER 0x0092
+#define mmCKSVII2C1_IC_CLR_RX_OVER_BASE_IDX 0
+#define mmCKSVII2C1_IC_CLR_TX_OVER 0x0093
+#define mmCKSVII2C1_IC_CLR_TX_OVER_BASE_IDX 0
+#define mmCKSVII2C1_IC_CLR_RD_REQ 0x0094
+#define mmCKSVII2C1_IC_CLR_RD_REQ_BASE_IDX 0
+#define mmCKSVII2C1_IC_CLR_TX_ABRT 0x0095
+#define mmCKSVII2C1_IC_CLR_TX_ABRT_BASE_IDX 0
+#define mmCKSVII2C1_IC_CLR_RX_DONE 0x0096
+#define mmCKSVII2C1_IC_CLR_RX_DONE_BASE_IDX 0
+#define mmCKSVII2C1_IC_CLR_ACTIVITY 0x0097
+#define mmCKSVII2C1_IC_CLR_ACTIVITY_BASE_IDX 0
+#define mmCKSVII2C1_IC_CLR_STOP_DET 0x0098
+#define mmCKSVII2C1_IC_CLR_STOP_DET_BASE_IDX 0
+#define mmCKSVII2C1_IC_CLR_START_DET 0x0099
+#define mmCKSVII2C1_IC_CLR_START_DET_BASE_IDX 0
+#define mmCKSVII2C1_IC_CLR_GEN_CALL 0x009a
+#define mmCKSVII2C1_IC_CLR_GEN_CALL_BASE_IDX 0
+#define mmCKSVII2C1_IC_ENABLE 0x009b
+#define mmCKSVII2C1_IC_ENABLE_BASE_IDX 0
+#define mmCKSVII2C1_IC_STATUS 0x009c
+#define mmCKSVII2C1_IC_STATUS_BASE_IDX 0
+#define mmCKSVII2C1_IC_TXFLR 0x009d
+#define mmCKSVII2C1_IC_TXFLR_BASE_IDX 0
+#define mmCKSVII2C1_IC_RXFLR 0x009e
+#define mmCKSVII2C1_IC_RXFLR_BASE_IDX 0
+#define mmCKSVII2C1_IC_SDA_HOLD 0x009f
+#define mmCKSVII2C1_IC_SDA_HOLD_BASE_IDX 0
+#define mmCKSVII2C1_IC_TX_ABRT_SOURCE 0x00a0
+#define mmCKSVII2C1_IC_TX_ABRT_SOURCE_BASE_IDX 0
+#define mmCKSVII2C1_IC_SLV_DATA_NACK_ONLY 0x00a1
+#define mmCKSVII2C1_IC_SLV_DATA_NACK_ONLY_BASE_IDX 0
+#define mmCKSVII2C1_IC_DMA_CR 0x00a2
+#define mmCKSVII2C1_IC_DMA_CR_BASE_IDX 0
+#define mmCKSVII2C1_IC_DMA_TDLR 0x00a3
+#define mmCKSVII2C1_IC_DMA_TDLR_BASE_IDX 0
+#define mmCKSVII2C1_IC_DMA_RDLR 0x00a4
+#define mmCKSVII2C1_IC_DMA_RDLR_BASE_IDX 0
+#define mmCKSVII2C1_IC_SDA_SETUP 0x00a5
+#define mmCKSVII2C1_IC_SDA_SETUP_BASE_IDX 0
+#define mmCKSVII2C1_IC_ACK_GENERAL_CALL 0x00a6
+#define mmCKSVII2C1_IC_ACK_GENERAL_CALL_BASE_IDX 0
+#define mmCKSVII2C1_IC_ENABLE_STATUS 0x00a7
+#define mmCKSVII2C1_IC_ENABLE_STATUS_BASE_IDX 0
+#define mmCKSVII2C1_IC_FS_SPKLEN 0x00a8
+#define mmCKSVII2C1_IC_FS_SPKLEN_BASE_IDX 0
+#define mmCKSVII2C1_IC_HS_SPKLEN 0x00a9
+#define mmCKSVII2C1_IC_HS_SPKLEN_BASE_IDX 0
+#define mmCKSVII2C1_IC_CLR_RESTART_DET 0x00aa
+#define mmCKSVII2C1_IC_CLR_RESTART_DET_BASE_IDX 0
+#define mmCKSVII2C1_IC_COMP_PARAM_1 0x00ab
+#define mmCKSVII2C1_IC_COMP_PARAM_1_BASE_IDX 0
+#define mmCKSVII2C1_IC_COMP_VERSION 0x00ac
+#define mmCKSVII2C1_IC_COMP_VERSION_BASE_IDX 0
+#define mmCKSVII2C1_IC_COMP_TYPE 0x00ad
+#define mmCKSVII2C1_IC_COMP_TYPE_BASE_IDX 0
#define mmSMUIO_MP_RESET_INTR 0x00c1
#define mmSMUIO_MP_RESET_INTR_BASE_IDX 0
#define mmSMUIO_SOC_HALT 0x00c2
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_sh_mask.h
index 237961558e89..6905a9618127 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_sh_mask.h
@@ -37,6 +37,413 @@
#define SMUIO_MCM_CONFIG__PKG_TYPE_MASK 0x0000001CL
#define SMUIO_MCM_CONFIG__SOCKET_ID_MASK 0x00000020L
#define SMUIO_MCM_CONFIG__PKG_SUBTYPE_MASK 0x000000C0L
+//CKSVII2C_IC_CON
+#define CKSVII2C_IC_CON__IC_MASTER_MODE__SHIFT 0x0
+#define CKSVII2C_IC_CON__IC_MAX_SPEED_MODE__SHIFT 0x1
+#define CKSVII2C_IC_CON__IC_10BITADDR_SLAVE__SHIFT 0x3
+#define CKSVII2C_IC_CON__IC_10BITADDR_MASTER__SHIFT 0x4
+#define CKSVII2C_IC_CON__IC_RESTART_EN__SHIFT 0x5
+#define CKSVII2C_IC_CON__IC_SLAVE_DISABLE__SHIFT 0x6
+#define CKSVII2C_IC_CON__STOP_DET_IFADDRESSED__SHIFT 0x7
+#define CKSVII2C_IC_CON__TX_EMPTY_CTRL__SHIFT 0x8
+#define CKSVII2C_IC_CON__RX_FIFO_FULL_HLD_CTRL__SHIFT 0x9
+#define CKSVII2C_IC_CON__IC_MASTER_MODE_MASK 0x00000001L
+#define CKSVII2C_IC_CON__IC_MAX_SPEED_MODE_MASK 0x00000006L
+#define CKSVII2C_IC_CON__IC_10BITADDR_SLAVE_MASK 0x00000008L
+#define CKSVII2C_IC_CON__IC_10BITADDR_MASTER_MASK 0x00000010L
+#define CKSVII2C_IC_CON__IC_RESTART_EN_MASK 0x00000020L
+#define CKSVII2C_IC_CON__IC_SLAVE_DISABLE_MASK 0x00000040L
+#define CKSVII2C_IC_CON__STOP_DET_IFADDRESSED_MASK 0x00000080L
+#define CKSVII2C_IC_CON__TX_EMPTY_CTRL_MASK 0x00000100L
+#define CKSVII2C_IC_CON__RX_FIFO_FULL_HLD_CTRL_MASK 0x00000200L
+//CKSVII2C_IC_TAR
+#define CKSVII2C_IC_TAR__IC_TAR__SHIFT 0x0
+#define CKSVII2C_IC_TAR__GC_OR_START__SHIFT 0xa
+#define CKSVII2C_IC_TAR__SPECIAL__SHIFT 0xb
+#define CKSVII2C_IC_TAR__IC_10BITADDR_MASTER__SHIFT 0xc
+#define CKSVII2C_IC_TAR__IC_TAR_MASK 0x000003FFL
+#define CKSVII2C_IC_TAR__GC_OR_START_MASK 0x00000400L
+#define CKSVII2C_IC_TAR__SPECIAL_MASK 0x00000800L
+#define CKSVII2C_IC_TAR__IC_10BITADDR_MASTER_MASK 0x00001000L
+//CKSVII2C_IC_SAR
+#define CKSVII2C_IC_SAR__IC_SAR__SHIFT 0x0
+#define CKSVII2C_IC_SAR__IC_SAR_MASK 0x000003FFL
+//CKSVII2C_IC_HS_MADDR
+#define CKSVII2C_IC_HS_MADDR__IC_HS_MADDR__SHIFT 0x0
+#define CKSVII2C_IC_HS_MADDR__IC_HS_MADDR_MASK 0x00000007L
+//CKSVII2C_IC_DATA_CMD
+#define CKSVII2C_IC_DATA_CMD__DAT__SHIFT 0x0
+#define CKSVII2C_IC_DATA_CMD__CMD__SHIFT 0x8
+#define CKSVII2C_IC_DATA_CMD__STOP__SHIFT 0x9
+#define CKSVII2C_IC_DATA_CMD__RESTART__SHIFT 0xa
+#define CKSVII2C_IC_DATA_CMD__DAT_MASK 0x000000FFL
+#define CKSVII2C_IC_DATA_CMD__CMD_MASK 0x00000100L
+#define CKSVII2C_IC_DATA_CMD__STOP_MASK 0x00000200L
+#define CKSVII2C_IC_DATA_CMD__RESTART_MASK 0x00000400L
+//CKSVII2C_IC_SS_SCL_HCNT
+#define CKSVII2C_IC_SS_SCL_HCNT__IC_SS_SCL_HCNT__SHIFT 0x0
+#define CKSVII2C_IC_SS_SCL_HCNT__IC_SS_SCL_HCNT_MASK 0x0000FFFFL
+//CKSVII2C_IC_SS_SCL_LCNT
+#define CKSVII2C_IC_SS_SCL_LCNT__IC_SS_SCL_LCNT__SHIFT 0x0
+#define CKSVII2C_IC_SS_SCL_LCNT__IC_SS_SCL_LCNT_MASK 0x0000FFFFL
+//CKSVII2C_IC_FS_SCL_HCNT
+#define CKSVII2C_IC_FS_SCL_HCNT__IC_FS_SCL_HCNT__SHIFT 0x0
+#define CKSVII2C_IC_FS_SCL_HCNT__IC_FS_SCL_HCNT_MASK 0x0000FFFFL
+//CKSVII2C_IC_FS_SCL_LCNT
+#define CKSVII2C_IC_FS_SCL_LCNT__IC_FS_SCL_LCNT__SHIFT 0x0
+#define CKSVII2C_IC_FS_SCL_LCNT__IC_FS_SCL_LCNT_MASK 0x0000FFFFL
+//CKSVII2C_IC_HS_SCL_HCNT
+#define CKSVII2C_IC_HS_SCL_HCNT__IC_HS_SCL_HCNT__SHIFT 0x0
+#define CKSVII2C_IC_HS_SCL_HCNT__IC_HS_SCL_HCNT_MASK 0x0000FFFFL
+//CKSVII2C_IC_HS_SCL_LCNT
+#define CKSVII2C_IC_HS_SCL_LCNT__IC_HS_SCL_LCNT__SHIFT 0x0
+#define CKSVII2C_IC_HS_SCL_LCNT__IC_HS_SCL_LCNT_MASK 0x0000FFFFL
+//CKSVII2C_IC_INTR_STAT
+#define CKSVII2C_IC_INTR_STAT__R_RX_UNDER__SHIFT 0x0
+#define CKSVII2C_IC_INTR_STAT__R_RX_OVER__SHIFT 0x1
+#define CKSVII2C_IC_INTR_STAT__R_RX_FULL__SHIFT 0x2
+#define CKSVII2C_IC_INTR_STAT__R_TX_OVER__SHIFT 0x3
+#define CKSVII2C_IC_INTR_STAT__R_TX_EMPTY__SHIFT 0x4
+#define CKSVII2C_IC_INTR_STAT__R_RD_REQ__SHIFT 0x5
+#define CKSVII2C_IC_INTR_STAT__R_TX_ABRT__SHIFT 0x6
+#define CKSVII2C_IC_INTR_STAT__R_RX_DONE__SHIFT 0x7
+#define CKSVII2C_IC_INTR_STAT__R_ACTIVITY__SHIFT 0x8
+#define CKSVII2C_IC_INTR_STAT__R_STOP_DET__SHIFT 0x9
+#define CKSVII2C_IC_INTR_STAT__R_START_DET__SHIFT 0xa
+#define CKSVII2C_IC_INTR_STAT__R_GEN_CALL__SHIFT 0xb
+#define CKSVII2C_IC_INTR_STAT__R_RESTART_DET__SHIFT 0xc
+#define CKSVII2C_IC_INTR_STAT__R_MST_ON_HOLD__SHIFT 0xd
+#define CKSVII2C_IC_INTR_STAT__R_RX_UNDER_MASK 0x00000001L
+#define CKSVII2C_IC_INTR_STAT__R_RX_OVER_MASK 0x00000002L
+#define CKSVII2C_IC_INTR_STAT__R_RX_FULL_MASK 0x00000004L
+#define CKSVII2C_IC_INTR_STAT__R_TX_OVER_MASK 0x00000008L
+#define CKSVII2C_IC_INTR_STAT__R_TX_EMPTY_MASK 0x00000010L
+#define CKSVII2C_IC_INTR_STAT__R_RD_REQ_MASK 0x00000020L
+#define CKSVII2C_IC_INTR_STAT__R_TX_ABRT_MASK 0x00000040L
+#define CKSVII2C_IC_INTR_STAT__R_RX_DONE_MASK 0x00000080L
+#define CKSVII2C_IC_INTR_STAT__R_ACTIVITY_MASK 0x00000100L
+#define CKSVII2C_IC_INTR_STAT__R_STOP_DET_MASK 0x00000200L
+#define CKSVII2C_IC_INTR_STAT__R_START_DET_MASK 0x00000400L
+#define CKSVII2C_IC_INTR_STAT__R_GEN_CALL_MASK 0x00000800L
+#define CKSVII2C_IC_INTR_STAT__R_RESTART_DET_MASK 0x00001000L
+#define CKSVII2C_IC_INTR_STAT__R_MST_ON_HOLD_MASK 0x00002000L
+//CKSVII2C_IC_INTR_MASK
+#define CKSVII2C_IC_INTR_MASK__M_RX_UNDER__SHIFT 0x0
+#define CKSVII2C_IC_INTR_MASK__M_RX_OVER__SHIFT 0x1
+#define CKSVII2C_IC_INTR_MASK__M_RX_FULL__SHIFT 0x2
+#define CKSVII2C_IC_INTR_MASK__M_TX_OVER__SHIFT 0x3
+#define CKSVII2C_IC_INTR_MASK__M_TX_EMPTY__SHIFT 0x4
+#define CKSVII2C_IC_INTR_MASK__M_RD_REQ__SHIFT 0x5
+#define CKSVII2C_IC_INTR_MASK__M_TX_ABRT__SHIFT 0x6
+#define CKSVII2C_IC_INTR_MASK__M_RX_DONE__SHIFT 0x7
+#define CKSVII2C_IC_INTR_MASK__M_ACTIVITY__SHIFT 0x8
+#define CKSVII2C_IC_INTR_MASK__M_STOP_DET__SHIFT 0x9
+#define CKSVII2C_IC_INTR_MASK__M_START_DET__SHIFT 0xa
+#define CKSVII2C_IC_INTR_MASK__M_GEN_CALL__SHIFT 0xb
+#define CKSVII2C_IC_INTR_MASK__M_RESTART_DET__SHIFT 0xc
+#define CKSVII2C_IC_INTR_MASK__M_MST_ON_HOLD__SHIFT 0xd
+#define CKSVII2C_IC_INTR_MASK__M_RX_UNDER_MASK 0x00000001L
+#define CKSVII2C_IC_INTR_MASK__M_RX_OVER_MASK 0x00000002L
+#define CKSVII2C_IC_INTR_MASK__M_RX_FULL_MASK 0x00000004L
+#define CKSVII2C_IC_INTR_MASK__M_TX_OVER_MASK 0x00000008L
+#define CKSVII2C_IC_INTR_MASK__M_TX_EMPTY_MASK 0x00000010L
+#define CKSVII2C_IC_INTR_MASK__M_RD_REQ_MASK 0x00000020L
+#define CKSVII2C_IC_INTR_MASK__M_TX_ABRT_MASK 0x00000040L
+#define CKSVII2C_IC_INTR_MASK__M_RX_DONE_MASK 0x00000080L
+#define CKSVII2C_IC_INTR_MASK__M_ACTIVITY_MASK 0x00000100L
+#define CKSVII2C_IC_INTR_MASK__M_STOP_DET_MASK 0x00000200L
+#define CKSVII2C_IC_INTR_MASK__M_START_DET_MASK 0x00000400L
+#define CKSVII2C_IC_INTR_MASK__M_GEN_CALL_MASK 0x00000800L
+#define CKSVII2C_IC_INTR_MASK__M_RESTART_DET_MASK 0x00001000L
+#define CKSVII2C_IC_INTR_MASK__M_MST_ON_HOLD_MASK 0x00002000L
+//CKSVII2C_IC_RAW_INTR_STAT
+#define CKSVII2C_IC_RAW_INTR_STAT__R_RX_UNDER__SHIFT 0x0
+#define CKSVII2C_IC_RAW_INTR_STAT__R_RX_OVER__SHIFT 0x1
+#define CKSVII2C_IC_RAW_INTR_STAT__R_RX_FULL__SHIFT 0x2
+#define CKSVII2C_IC_RAW_INTR_STAT__R_TX_OVER__SHIFT 0x3
+#define CKSVII2C_IC__RAW_INTR_STAT__R_TX_EMPTY__SHIFT 0x4
+#define CKSVII2C_IC_RAW_INTR_STAT__R_RD_REQ__SHIFT 0x5
+#define CKSVII2C_IC_RAW_INTR_STAT__R_TX_ABRT__SHIFT 0x6
+#define CKSVII2C_IC_RAW_INTR_STAT__R_RX_DONE__SHIFT 0x7
+#define CKSVII2C_IC_RAW_INTR_STAT__R_ACTIVITY__SHIFT 0x8
+#define CKSVII2C_IC_RAW_INTR_STAT__R_STOP_DET__SHIFT 0x9
+#define CKSVII2C_IC_RAW_INTR_STAT__R_START_DET__SHIFT 0xa
+#define CKSVII2C_IC_RAW_INTR_STAT__R_GEN_CALL__SHIFT 0xb
+#define CKSVII2C_IC_RAW_INTR_STAT__R_RESTART_DET__SHIFT 0xc
+#define CKSVII2C_IC_RAW_INTR_STAT__R_MST_ON_HOLD__SHIFT 0xd
+#define CKSVII2C_IC_RAW_INTR_STAT__R_RX_UNDER_MASK 0x00000001L
+#define CKSVII2C_IC_RAW_INTR_STAT__R_RX_OVER_MASK 0x00000002L
+#define CKSVII2C_IC_RAW_INTR_STAT__R_RX_FULL_MASK 0x00000004L
+#define CKSVII2C_IC_RAW_INTR_STAT__R_TX_OVER_MASK 0x00000008L
+#define CKSVII2C_IC_RAW_INTR_STAT__R_TX_EMPTY_MASK 0x00000010L
+#define CKSVII2C_IC_RAW_INTR_STAT__R_RD_REQ_MASK 0x00000020L
+#define CKSVII2C_IC_RAW_INTR_STAT__R_TX_ABRT_MASK 0x00000040L
+#define CKSVII2C_IC_RAW_INTR_STAT__R_RX_DONE_MASK 0x00000080L
+#define CKSVII2C_IC_RAW_INTR_STAT__R_ACTIVITY_MASK 0x00000100L
+#define CKSVII2C_IC_RAW_INTR_STAT__R_STOP_DET_MASK 0x00000200L
+#define CKSVII2C_IC_RAW_INTR_STAT__R_START_DET_MASK 0x00000400L
+#define CKSVII2C_IC_RAW_INTR_STAT__R_GEN_CALL_MASK 0x00000800L
+#define CKSVII2C_IC_RAW_INTR_STAT__R_RESTART_DET_MASK 0x00001000L
+#define CKSVII2C_IC_RAW_INTR_STAT__R_MST_ON_HOLD_MASK 0x00002000L
+//CKSVII2C_IC_RX_TL
+//CKSVII2C_IC_TX_TL
+//CKSVII2C_IC_CLR_INTR
+//CKSVII2C_IC_CLR_RX_UNDER
+//CKSVII2C_IC_CLR_RX_OVER
+//CKSVII2C_IC_CLR_TX_OVER
+//CKSVII2C_IC_CLR_RD_REQ
+//CKSVII2C_IC_CLR_TX_ABRT
+//CKSVII2C_IC_CLR_RX_DONE
+//CKSVII2C_IC_CLR_ACTIVITY
+#define CKSVII2C_IC_CLR_ACTIVITY__CLR_ACTIVITY__SHIFT 0x0
+#define CKSVII2C_IC_CLR_ACTIVITY__CLR_ACTIVITY_MASK 0x00000001L
+//CKSVII2C_IC_CLR_STOP_DET
+//CKSVII2C_IC_CLR_START_DET
+//CKSVII2C_IC_CLR_GEN_CALL
+//CKSVII2C_IC_ENABLE
+#define CKSVII2C_IC_ENABLE__ENABLE__SHIFT 0x0
+#define CKSVII2C_IC_ENABLE__ABORT__SHIFT 0x1
+#define CKSVII2C_IC_ENABLE__ENABLE_MASK 0x00000001L
+#define CKSVII2C_IC_ENABLE__ABORT_MASK 0x00000002L
+//CKSVII2C_IC_STATUS
+#define CKSVII2C_IC_STATUS__ACTIVITY__SHIFT 0x0
+#define CKSVII2C_IC_STATUS__TFNF__SHIFT 0x1
+#define CKSVII2C_IC_STATUS__TFE__SHIFT 0x2
+#define CKSVII2C_IC_STATUS__RFNE__SHIFT 0x3
+#define CKSVII2C_IC_STATUS__RFF__SHIFT 0x4
+#define CKSVII2C_IC_STATUS__MST_ACTIVITY__SHIFT 0x5
+#define CKSVII2C_IC_STATUS__SLV_ACTIVITY__SHIFT 0x6
+#define CKSVII2C_IC_STATUS__ACTIVITY_MASK 0x00000001L
+#define CKSVII2C_IC_STATUS__TFNF_MASK 0x00000002L
+#define CKSVII2C_IC_STATUS__TFE_MASK 0x00000004L
+#define CKSVII2C_IC_STATUS__RFNE_MASK 0x00000008L
+#define CKSVII2C_IC_STATUS__RFF_MASK 0x00000010L
+#define CKSVII2C_IC_STATUS__MST_ACTIVITY_MASK 0x00000020L
+#define CKSVII2C_IC_STATUS__SLV_ACTIVITY_MASK 0x00000040L
+//CKSVII2C_IC_TXFLR
+//CKSVII2C_IC_RXFLR
+//CKSVII2C_IC_SDA_HOLD
+#define CKSVII2C_IC_SDA_HOLD__IC_SDA_HOLD__SHIFT 0x0
+#define CKSVII2C_IC_SDA_HOLD__IC_SDA_HOLD_MASK 0x00FFFFFFL
+//CKSVII2C_IC_TX_ABRT_SOURCE
+
+#define CKSVII2C_IC_TX_ABRT_SOURCE__ABRT_7B_ADDR_NOACK__SHIFT 0x0
+#define CKSVII2C_IC_TX_ABRT_SOURCE__ABRT_10ADDR1_NOACK__SHIFT 0x1
+#define CKSVII2C_IC_TX_ABRT_SOURCE__ABRT_10ADDR2_NOACK__SHIFT 0x2
+#define CKSVII2C_IC_TX_ABRT_SOURCE__ABRT_TXDATA_NOACK__SHIFT 0x3
+#define CKSVII2C_IC_TX_ABRT_SOURCE__ABRT_7B_ADDR_NOACK_MASK 0x00000001L
+#define CKSVII2C_IC_TX_ABRT_SOURCE__ABRT_10ADDR1_NOACK_MASK 0x00000002L
+#define CKSVII2C_IC_TX_ABRT_SOURCE__ABRT_10ADDR2_NOACK_MASK 0x00000004L
+#define CKSVII2C_IC_TX_ABRT_SOURCE__ABRT_TXDATA_NOACK_MASK 0x00000008L
+//CKSVII2C_IC_SLV_DATA_NACK_ONLY
+//CKSVII2C_IC_DMA_CR
+//CKSVII2C_IC_DMA_TDLR
+//CKSVII2C_IC_DMA_RDLR
+//CKSVII2C_IC_SDA_SETUP
+#define CKSVII2C_IC_SDA_SETUP__SDA_SETUP__SHIFT 0x0
+#define CKSVII2C_IC_SDA_SETUP__SDA_SETUP_MASK 0x000000FFL
+//CKSVII2C_IC_ACK_GENERAL_CALL
+#define CKSVII2C_IC_ACK_GENERAL_CALL__ACK_GENERAL_CALL__SHIFT 0x0
+#define CKSVII2C_IC_ACK_GENERAL_CALL__ACK_GENERAL_CALL_MASK 0x00000001L
+//CKSVII2C_IC_ENABLE_STATUS
+#define CKSVII2C_IC_ENABLE_STATUS__IC_EN__SHIFT 0x0
+#define CKSVII2C_IC_ENABLE_STATUS__SLV_RX_ABORTED__SHIFT 0x1
+#define CKSVII2C_IC_ENABLE_STATUS__SLV_FIFO_FILLED_AND_FLUSHED__SHIFT 0x2
+#define CKSVII2C_IC_ENABLE_STATUS__IC_EN_MASK 0x00000001L
+#define CKSVII2C_IC_ENABLE_STATUS__SLV_RX_ABORTED_MASK 0x00000002L
+#define CKSVII2C_IC_ENABLE_STATUS__SLV_FIFO_FILLED_AND_FLUSHED_MASK 0x00000004L
+//CKSVII2C_IC_FS_SPKLEN
+#define CKSVII2C_IC_FS_SPKLEN__FS_SPKLEN__SHIFT 0x0
+#define CKSVII2C_IC_FS_SPKLEN__FS_SPKLEN_MASK 0x000000FFL
+//CKSVII2C_IC_HS_SPKLEN
+#define CKSVII2C_IC_HS_SPKLEN__HS_SPKLEN__SHIFT 0x0
+#define CKSVII2C_IC_HS_SPKLEN__HS_SPKLEN_MASK 0x000000FFL
+//CKSVII2C_IC_CLR_RESTART_DET
+//CKSVII2C_IC_COMP_PARAM_1
+#define CKSVII2C_IC_COMP_PARAM_1__COMP_PARAM_1__SHIFT 0x0
+#define CKSVII2C_IC_COMP_PARAM_1__COMP_PARAM_1_MASK 0xFFFFFFFFL
+//CKSVII2C_IC_COMP_VERSION
+#define CKSVII2C_IC_COMP_VERSION__COMP_VERSION__SHIFT 0x0
+#define CKSVII2C_IC_COMP_VERSION__COMP_VERSION_MASK 0xFFFFFFFFL
+//CKSVII2C_IC_COMP_TYPE
+#define CKSVII2C_IC_COMP_TYPE__COMP_TYPE__SHIFT 0x0
+#define CKSVII2C_IC_COMP_TYPE__COMP_TYPE_MASK 0xFFFFFFFFL
+//CKSVII2C1_IC_CON
+#define CKSVII2C1_IC_CON__IC1_MASTER_MODE__SHIFT 0x0
+#define CKSVII2C1_IC_CON__IC1_MAX_SPEED_MODE__SHIFT 0x1
+#define CKSVII2C1_IC_CON__IC1_10BITADDR_SLAVE__SHIFT 0x3
+#define CKSVII2C1_IC_CON__IC1_10BITADDR_MASTER__SHIFT 0x4
+#define CKSVII2C1_IC_CON__IC1_RESTART_EN__SHIFT 0x5
+#define CKSVII2C1_IC_CON__IC1_SLAVE_DISABLE__SHIFT 0x6
+#define CKSVII2C1_IC_CON__STOP1_DET_IFADDRESSED__SHIFT 0x7
+#define CKSVII2C1_IC_CON__TX1_EMPTY_CTRL__SHIFT 0x8
+#define CKSVII2C1_IC_CON__RX1_FIFO_FULL_HLD_CTRL__SHIFT 0x9
+#define CKSVII2C1_IC_CON__IC1_MASTER_MODE_MASK 0x00000001L
+#define CKSVII2C1_IC_CON__IC1_MAX_SPEED_MODE_MASK 0x00000006L
+#define CKSVII2C1_IC_CON__IC1_10BITADDR_SLAVE_MASK 0x00000008L
+#define CKSVII2C1_IC_CON__IC1_10BITADDR_MASTER_MASK 0x00000010L
+#define CKSVII2C1_IC_CON__IC1_RESTART_EN_MASK 0x00000020L
+#define CKSVII2C1_IC_CON__IC1_SLAVE_DISABLE_MASK 0x00000040L
+#define CKSVII2C1_IC_CON__STOP1_DET_IFADDRESSED_MASK 0x00000080L
+#define CKSVII2C1_IC_CON__TX1_EMPTY_CTRL_MASK 0x00000100L
+#define CKSVII2C1_IC_CON__RX1_FIFO_FULL_HLD_CTRL_MASK 0x00000200L
+//CKSVII2C1_IC_TAR
+#define CKSVII2C1_IC_TAR__IC1_TAR__SHIFT 0x0
+#define CKSVII2C1_IC_TAR__GC1_OR_START__SHIFT 0xa
+#define CKSVII2C1_IC_TAR__SPECIAL1__SHIFT 0xb
+#define CKSVII2C1_IC_TAR__IC1_10BITADDR_MASTER__SHIFT 0xc
+#define CKSVII2C1_IC_TAR__IC1_TAR_MASK 0x000003FFL
+#define CKSVII2C1_IC_TAR__GC1_OR_START_MASK 0x00000400L
+#define CKSVII2C1_IC_TAR__SPECIAL1_MASK 0x00000800L
+#define CKSVII2C1_IC_TAR__IC1_10BITADDR_MASTER_MASK 0x00001000L
+//CKSVII2C1_IC_SAR
+#define CKSVII2C1_IC_SAR__IC1_SAR__SHIFT 0x0
+#define CKSVII2C1_IC_SAR__IC1_SAR_MASK 0x000003FFL
+//CKSVII2C1_IC_HS_MADDR
+#define CKSVII2C1_IC_HS_MADDR__IC1_HS_MADDR__SHIFT 0x0
+#define CKSVII2C1_IC_HS_MADDR__IC1_HS_MADDR_MASK 0x00000007L
+//CKSVII2C1_IC_DATA_CMD
+#define CKSVII2C1_IC_DATA_CMD__DAT1__SHIFT 0x0
+#define CKSVII2C1_IC_DATA_CMD__CMD1__SHIFT 0x8
+#define CKSVII2C1_IC_DATA_CMD__STOP1__SHIFT 0x9
+#define CKSVII2C1_IC_DATA_CMD__RESTART1__SHIFT 0xa
+#define CKSVII2C1_IC_DATA_CMD__DAT1_MASK 0x000000FFL
+#define CKSVII2C1_IC_DATA_CMD__CMD1_MASK 0x00000100L
+#define CKSVII2C1_IC_DATA_CMD__STOP1_MASK 0x00000200L
+#define CKSVII2C1_IC_DATA_CMD__RESTART1_MASK 0x00000400L
+//CKSVII2C1_IC_SS_SCL_HCNT
+#define CKSVII2C1_IC_SS_SCL_HCNT__IC1_SS_SCL_HCNT__SHIFT 0x0
+#define CKSVII2C1_IC_SS_SCL_HCNT__IC1_SS_SCL_HCNT_MASK 0x0000FFFFL
+//CKSVII2C1_IC_SS_SCL_LCNT
+#define CKSVII2C1_IC_SS_SCL_LCNT__IC1_SS_SCL_LCNT__SHIFT 0x0
+#define CKSVII2C1_IC_SS_SCL_LCNT__IC1_SS_SCL_LCNT_MASK 0x0000FFFFL
+//CKSVII2C1_IC_FS_SCL_HCNT
+#define CKSVII2C1_IC_FS_SCL_HCNT__IC1_FS_SCL_HCNT__SHIFT 0x0
+#define CKSVII2C1_IC_FS_SCL_HCNT__IC1_FS_SCL_HCNT_MASK 0x0000FFFFL
+//CKSVII2C1_IC_FS_SCL_LCNT
+#define CKSVII2C1_IC_FS_SCL_LCNT__IC1_FS_SCL_LCNT__SHIFT 0x0
+#define CKSVII2C1_IC_FS_SCL_LCNT__IC1_FS_SCL_LCNT_MASK 0x0000FFFFL
+//CKSVII2C1_IC_HS_SCL_HCNT
+#define CKSVII2C1_IC_HS_SCL_HCNT__IC1_HS_SCL_HCNT__SHIFT 0x0
+#define CKSVII2C1_IC_HS_SCL_HCNT__IC1_HS_SCL_HCNT_MASK 0x0000FFFFL
+//CKSVII2C1_IC_HS_SCL_LCNT
+#define CKSVII2C1_IC_HS_SCL_LCNT__IC1_HS_SCL_LCNT__SHIFT 0x0
+#define CKSVII2C1_IC_HS_SCL_LCNT__IC1_HS_SCL_LCNT_MASK 0x0000FFFFL
+//CKSVII2C1_IC_INTR_STAT
+#define CKSVII2C1_IC_INTR_STAT__R1_RX_UNDER__SHIFT 0x0
+#define CKSVII2C1_IC_INTR_STAT__R1_RX_OVER__SHIFT 0x1
+#define CKSVII2C1_IC_INTR_STAT__R1_RX_FULL__SHIFT 0x2
+#define CKSVII2C1_IC_INTR_STAT__R1_TX_OVER__SHIFT 0x3
+#define CKSVII2C1_IC_INTR_STAT__R1_TX_EMPTY__SHIFT 0x4
+#define CKSVII2C1_IC_INTR_STAT__R1_RD_REQ__SHIFT 0x5
+#define CKSVII2C1_IC_INTR_STAT__R1_TX_ABRT__SHIFT 0x6
+#define CKSVII2C1_IC_INTR_STAT__R1_RX_DONE__SHIFT 0x7
+#define CKSVII2C1_IC_INTR_STAT__R1_ACTIVITY__SHIFT 0x8
+#define CKSVII2C1_IC_INTR_STAT__R1_STOP_DET__SHIFT 0x9
+#define CKSVII2C1_IC_INTR_STAT__R1_START_DET__SHIFT 0xa
+#define CKSVII2C1_IC_INTR_STAT__R1_GEN_CALL__SHIFT 0xb
+#define CKSVII2C1_IC_INTR_STAT__R1_RESTART_DET__SHIFT 0xc
+#define CKSVII2C1_IC_INTR_STAT__R1_MST_ON_HOLD__SHIFT 0xd
+#define CKSVII2C1_IC_INTR_STAT__R1_RX_UNDER_MASK 0x00000001L
+#define CKSVII2C1_IC_INTR_STAT__R1_RX_OVER_MASK 0x00000002L
+#define CKSVII2C1_IC_INTR_STAT__R1_RX_FULL_MASK 0x00000004L
+#define CKSVII2C1_IC_INTR_STAT__R1_TX_OVER_MASK 0x00000008L
+#define CKSVII2C1_IC_INTR_STAT__R1_TX_EMPTY_MASK 0x00000010L
+#define CKSVII2C1_IC_INTR_STAT__R1_RD_REQ_MASK 0x00000020L
+#define CKSVII2C1_IC_INTR_STAT__R1_TX_ABRT_MASK 0x00000040L
+#define CKSVII2C1_IC_INTR_STAT__R1_RX_DONE_MASK 0x00000080L
+#define CKSVII2C1_IC_INTR_STAT__R1_ACTIVITY_MASK 0x00000100L
+#define CKSVII2C1_IC_INTR_STAT__R1_STOP_DET_MASK 0x00000200L
+#define CKSVII2C1_IC_INTR_STAT__R1_START_DET_MASK 0x00000400L
+#define CKSVII2C1_IC_INTR_STAT__R1_GEN_CALL_MASK 0x00000800L
+#define CKSVII2C1_IC_INTR_STAT__R1_RESTART_DET_MASK 0x00001000L
+#define CKSVII2C1_IC_INTR_STAT__R1_MST_ON_HOLD_MASK 0x00002000L
+//CKSVII2C1_IC_INTR_MASK
+#define CKSVII2C1_IC_INTR_MASK__M1_RX_UNDER__SHIFT 0x0
+#define CKSVII2C1_IC_INTR_MASK__M1_RX_OVER__SHIFT 0x1
+#define CKSVII2C1_IC_INTR_MASK__M1_RX_FULL__SHIFT 0x2
+#define CKSVII2C1_IC_INTR_MASK__M1_TX_OVER__SHIFT 0x3
+#define CKSVII2C1_IC_INTR_MASK__M1_TX_EMPTY__SHIFT 0x4
+#define CKSVII2C1_IC_INTR_MASK__M1_RD_REQ__SHIFT 0x5
+#define CKSVII2C1_IC_INTR_MASK__M1_TX_ABRT__SHIFT 0x6
+#define CKSVII2C1_IC_INTR_MASK__M1_RX_DONE__SHIFT 0x7
+#define CKSVII2C1_IC_INTR_MASK__M1_ACTIVITY__SHIFT 0x8
+#define CKSVII2C1_IC_INTR_MASK__M1_STOP_DET__SHIFT 0x9
+#define CKSVII2C1_IC_INTR_MASK__M1_START_DET__SHIFT 0xa
+#define CKSVII2C1_IC_INTR_MASK__M1_GEN_CALL__SHIFT 0xb
+#define CKSVII2C1_IC_INTR_MASK__M1_RESTART_DET__SHIFT 0xc
+#define CKSVII2C1_IC_INTR_MASK__M1_MST_ON_HOLD__SHIFT 0xd
+#define CKSVII2C1_IC_INTR_MASK__M1_RX_UNDER_MASK 0x00000001L
+#define CKSVII2C1_IC_INTR_MASK__M1_RX_OVER_MASK 0x00000002L
+#define CKSVII2C1_IC_INTR_MASK__M1_RX_FULL_MASK 0x00000004L
+#define CKSVII2C1_IC_INTR_MASK__M1_TX_OVER_MASK 0x00000008L
+#define CKSVII2C1_IC_INTR_MASK__M1_TX_EMPTY_MASK 0x00000010L
+#define CKSVII2C1_IC_INTR_MASK__M1_RD_REQ_MASK 0x00000020L
+#define CKSVII2C1_IC_INTR_MASK__M1_TX_ABRT_MASK 0x00000040L
+#define CKSVII2C1_IC_INTR_MASK__M1_RX_DONE_MASK 0x00000080L
+#define CKSVII2C1_IC_INTR_MASK__M1_ACTIVITY_MASK 0x00000100L
+#define CKSVII2C1_IC_INTR_MASK__M1_STOP_DET_MASK 0x00000200L
+#define CKSVII2C1_IC_INTR_MASK__M1_START_DET_MASK 0x00000400L
+#define CKSVII2C1_IC_INTR_MASK__M1_GEN_CALL_MASK 0x00000800L
+#define CKSVII2C1_IC_INTR_MASK__M1_RESTART_DET_MASK 0x00001000L
+#define CKSVII2C1_IC_INTR_MASK__M1_MST_ON_HOLD_MASK 0x00002000L
+//CKSVII2C1_IC_RAW_INTR_STAT
+//CKSVII2C1_IC_RX_TL
+//CKSVII2C1_IC_TX_TL
+//CKSVII2C1_IC_CLR_INTR
+//CKSVII2C1_IC_CLR_RX_UNDER
+//CKSVII2C1_IC_CLR_RX_OVER
+//CKSVII2C1_IC_CLR_TX_OVER
+//CKSVII2C1_IC_CLR_RD_REQ
+//CKSVII2C1_IC_CLR_TX_ABRT
+//CKSVII2C1_IC_CLR_RX_DONE
+//CKSVII2C1_IC_CLR_ACTIVITY
+//CKSVII2C1_IC_CLR_STOP_DET
+//CKSVII2C1_IC_CLR_START_DET
+//CKSVII2C1_IC_CLR_GEN_CALL
+//CKSVII2C1_IC_ENABLE
+#define CKSVII2C1_IC_ENABLE__ENABLE1__SHIFT 0x0
+#define CKSVII2C1_IC_ENABLE__ABORT1__SHIFT 0x1
+#define CKSVII2C1_IC_ENABLE__ENABLE1_MASK 0x00000001L
+#define CKSVII2C1_IC_ENABLE__ABORT1_MASK 0x00000002L
+//CKSVII2C1_IC_STATUS
+#define CKSVII2C1_IC_STATUS__ACTIVITY1__SHIFT 0x0
+#define CKSVII2C1_IC_STATUS__TFNF1__SHIFT 0x1
+#define CKSVII2C1_IC_STATUS__TFE1__SHIFT 0x2
+#define CKSVII2C1_IC_STATUS__RFNE1__SHIFT 0x3
+#define CKSVII2C1_IC_STATUS__RFF1__SHIFT 0x4
+#define CKSVII2C1_IC_STATUS__MST1_ACTIVITY__SHIFT 0x5
+#define CKSVII2C1_IC_STATUS__SLV1_ACTIVITY__SHIFT 0x6
+#define CKSVII2C1_IC_STATUS__ACTIVITY1_MASK 0x00000001L
+#define CKSVII2C1_IC_STATUS__TFNF1_MASK 0x00000002L
+#define CKSVII2C1_IC_STATUS__TFE1_MASK 0x00000004L
+#define CKSVII2C1_IC_STATUS__RFNE1_MASK 0x00000008L
+#define CKSVII2C1_IC_STATUS__RFF1_MASK 0x00000010L
+#define CKSVII2C1_IC_STATUS__MST1_ACTIVITY_MASK 0x00000020L
+#define CKSVII2C1_IC_STATUS__SLV1_ACTIVITY_MASK 0x00000040L
+//CKSVII2C1_IC_TXFLR
+//CKSVII2C1_IC_RXFLR
+//CKSVII2C1_IC_SDA_HOLD
+#define CKSVII2C1_IC_SDA_HOLD__IC1_SDA_HOLD__SHIFT 0x0
+#define CKSVII2C1_IC_SDA_HOLD__IC1_SDA_HOLD_MASK 0x00FFFFFFL
+//CKSVII2C1_IC_TX_ABRT_SOURCE
+//CKSVII2C1_IC_SLV_DATA_NACK_ONLY
+//CKSVII2C1_IC_DMA_CR
+//CKSVII2C1_IC_DMA_TDLR
+//CKSVII2C1_IC_DMA_RDLR
+//CKSVII2C1_IC_SDA_SETUP
+#define CKSVII2C1_IC_SDA_SETUP__SDA1_SETUP__SHIFT 0x0
+#define CKSVII2C1_IC_SDA_SETUP__SDA1_SETUP_MASK 0x000000FFL
+//CKSVII2C1_IC_ACK_GENERAL_CALL
+#define CKSVII2C1_IC_ACK_GENERAL_CALL__ACK1_GENERAL_CALL__SHIFT 0x0
+#define CKSVII2C1_IC_ACK_GENERAL_CALL__ACK1_GENERAL_CALL_MASK 0x00000001L
+//CKSVII2C1_IC_ENABLE_STATUS
+#define CKSVII2C1_IC_ENABLE_STATUS__IC1_EN__SHIFT 0x0
+#define CKSVII2C1_IC_ENABLE_STATUS__SLV1_RX_ABORTED__SHIFT 0x1
+#define CKSVII2C1_IC_ENABLE_STATUS__SLV1_FIFO_FILLED_AND_FLUSHED__SHIFT 0x2
+#define CKSVII2C1_IC_ENABLE_STATUS__IC1_EN_MASK 0x00000001L
+#define CKSVII2C1_IC_ENABLE_STATUS__SLV1_RX_ABORTED_MASK 0x00000002L
+#define CKSVII2C1_IC_ENABLE_STATUS__SLV1_FIFO_FILLED_AND_FLUSHED_MASK 0x00000004L
//SMUIO_MP_RESET_INTR
#define SMUIO_MP_RESET_INTR__SMUIO_MP_RESET_INTR__SHIFT 0x0
#define SMUIO_MP_RESET_INTR__SMUIO_MP_RESET_INTR_MASK 0x00000001L
diff --git a/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_1_offset.h b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_1_offset.h
new file mode 100644
index 000000000000..0d6b594be775
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_1_offset.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _umc_6_1_1_OFFSET_HEADER
+#define _umc_6_1_1_OFFSET_HEADER
+
+#define mmUMCCH0_0_EccErrCntSel 0x0360
+#define mmUMCCH0_0_EccErrCntSel_BASE_IDX 0
+#define mmUMCCH0_0_EccErrCnt 0x0361
+#define mmUMCCH0_0_EccErrCnt_BASE_IDX 0
+#define mmMCA_UMC_UMC0_MCUMC_STATUST0 0x03c2
+#define mmMCA_UMC_UMC0_MCUMC_STATUST0_BASE_IDX 0
+#define mmMCA_UMC_UMC0_MCUMC_ADDRT0 0x03c4
+#define mmMCA_UMC_UMC0_MCUMC_ADDRT0_BASE_IDX 0
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_1_sh_mask.h
new file mode 100644
index 000000000000..45c888280af9
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_1_sh_mask.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _umc_6_1_1_SH_MASK_HEADER
+#define _umc_6_1_1_SH_MASK_HEADER
+
+//UMCCH0_0_EccErrCntSel
+#define UMCCH0_0_EccErrCntSel__EccErrCntCsSel__SHIFT 0x0
+#define UMCCH0_0_EccErrCntSel__EccErrInt__SHIFT 0xc
+#define UMCCH0_0_EccErrCntSel__EccErrCntEn__SHIFT 0xf
+#define UMCCH0_0_EccErrCntSel__EccErrCntCsSel_MASK 0x0000000FL
+#define UMCCH0_0_EccErrCntSel__EccErrInt_MASK 0x00003000L
+#define UMCCH0_0_EccErrCntSel__EccErrCntEn_MASK 0x00008000L
+//UMCCH0_0_EccErrCnt
+#define UMCCH0_0_EccErrCnt__EccErrCnt__SHIFT 0x0
+#define UMCCH0_0_EccErrCnt__EccErrCnt_MASK 0x0000FFFFL
+//MCA_UMC_UMC0_MCUMC_STATUST0
+#define MCA_UMC_UMC0_MCUMC_STATUST0__ErrorCode__SHIFT 0x0
+#define MCA_UMC_UMC0_MCUMC_STATUST0__ErrorCodeExt__SHIFT 0x10
+#define MCA_UMC_UMC0_MCUMC_STATUST0__RESERV0__SHIFT 0x16
+#define MCA_UMC_UMC0_MCUMC_STATUST0__ErrCoreId__SHIFT 0x20
+#define MCA_UMC_UMC0_MCUMC_STATUST0__RESERV1__SHIFT 0x26
+#define MCA_UMC_UMC0_MCUMC_STATUST0__Scrub__SHIFT 0x28
+#define MCA_UMC_UMC0_MCUMC_STATUST0__RESERV2__SHIFT 0x29
+#define MCA_UMC_UMC0_MCUMC_STATUST0__Poison__SHIFT 0x2b
+#define MCA_UMC_UMC0_MCUMC_STATUST0__Deferred__SHIFT 0x2c
+#define MCA_UMC_UMC0_MCUMC_STATUST0__UECC__SHIFT 0x2d
+#define MCA_UMC_UMC0_MCUMC_STATUST0__CECC__SHIFT 0x2e
+#define MCA_UMC_UMC0_MCUMC_STATUST0__RESERV3__SHIFT 0x2f
+#define MCA_UMC_UMC0_MCUMC_STATUST0__Transparent__SHIFT 0x34
+#define MCA_UMC_UMC0_MCUMC_STATUST0__SyndV__SHIFT 0x35
+#define MCA_UMC_UMC0_MCUMC_STATUST0__RESERV4__SHIFT 0x36
+#define MCA_UMC_UMC0_MCUMC_STATUST0__TCC__SHIFT 0x37
+#define MCA_UMC_UMC0_MCUMC_STATUST0__ErrCoreIdVal__SHIFT 0x38
+#define MCA_UMC_UMC0_MCUMC_STATUST0__PCC__SHIFT 0x39
+#define MCA_UMC_UMC0_MCUMC_STATUST0__AddrV__SHIFT 0x3a
+#define MCA_UMC_UMC0_MCUMC_STATUST0__MiscV__SHIFT 0x3b
+#define MCA_UMC_UMC0_MCUMC_STATUST0__En__SHIFT 0x3c
+#define MCA_UMC_UMC0_MCUMC_STATUST0__UC__SHIFT 0x3d
+#define MCA_UMC_UMC0_MCUMC_STATUST0__Overflow__SHIFT 0x3e
+#define MCA_UMC_UMC0_MCUMC_STATUST0__Val__SHIFT 0x3f
+#define MCA_UMC_UMC0_MCUMC_STATUST0__ErrorCode_MASK 0x000000000000FFFFL
+#define MCA_UMC_UMC0_MCUMC_STATUST0__ErrorCodeExt_MASK 0x00000000003F0000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0__RESERV0_MASK 0x00000000FFC00000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0__ErrCoreId_MASK 0x0000003F00000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0__RESERV1_MASK 0x000000C000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0__Scrub_MASK 0x0000010000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0__RESERV2_MASK 0x0000060000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0__Poison_MASK 0x0000080000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0__Deferred_MASK 0x0000100000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0__UECC_MASK 0x0000200000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0__CECC_MASK 0x0000400000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0__RESERV3_MASK 0x000F800000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0__Transparent_MASK 0x0010000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0__SyndV_MASK 0x0020000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0__RESERV4_MASK 0x0040000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0__TCC_MASK 0x0080000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0__ErrCoreIdVal_MASK 0x0100000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0__PCC_MASK 0x0200000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0__AddrV_MASK 0x0400000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0__MiscV_MASK 0x0800000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0__En_MASK 0x1000000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0__UC_MASK 0x2000000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0__Overflow_MASK 0x4000000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0__Val_MASK 0x8000000000000000L
+//MCA_UMC_UMC0_MCUMC_ADDRT0
+#define MCA_UMC_UMC0_MCUMC_ADDRT0__ErrorAddr__SHIFT 0x0
+#define MCA_UMC_UMC0_MCUMC_ADDRT0__LSB__SHIFT 0x38
+#define MCA_UMC_UMC0_MCUMC_ADDRT0__Reserved__SHIFT 0x3e
+#define MCA_UMC_UMC0_MCUMC_ADDRT0__ErrorAddr_MASK 0x00FFFFFFFFFFFFFFL
+#define MCA_UMC_UMC0_MCUMC_ADDRT0__LSB_MASK 0x3F00000000000000L
+#define MCA_UMC_UMC0_MCUMC_ADDRT0__Reserved_MASK 0xC000000000000000L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_2_offset.h
new file mode 100644
index 000000000000..ce005c674a18
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_2_offset.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _umc_6_1_2_OFFSET_HEADER
+#define _umc_6_1_2_OFFSET_HEADER
+
+#define mmUMCCH0_0_EccErrCntSel_ARCT 0x0360
+#define mmUMCCH0_0_EccErrCntSel_ARCT_BASE_IDX 1
+#define mmUMCCH0_0_EccErrCnt_ARCT 0x0361
+#define mmUMCCH0_0_EccErrCnt_ARCT_BASE_IDX 1
+#define mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT 0x03c2
+#define mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT_BASE_IDX 1
+#define mmMCA_UMC_UMC0_MCUMC_ADDRT0_ARCT 0x03c4
+#define mmMCA_UMC_UMC0_MCUMC_ADDRT0_ARCT_BASE_IDX 1
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_2_sh_mask.h
new file mode 100644
index 000000000000..a5a8c993ec3a
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_2_sh_mask.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _umc_6_1_2_SH_MASK_HEADER
+#define _umc_6_1_2_SH_MASK_HEADER
+
+//UMCCH0_0_EccErrCntSel_ARCT
+#define UMCCH0_0_EccErrCntSel_ARCT__EccErrCntCsSel__SHIFT 0x0
+#define UMCCH0_0_EccErrCntSel_ARCT__EccErrInt__SHIFT 0xc
+#define UMCCH0_0_EccErrCntSel_ARCT__EccErrCntEn__SHIFT 0xf
+#define UMCCH0_0_EccErrCntSel_ARCT__EccErrCntCsSel_MASK 0x0000000FL
+#define UMCCH0_0_EccErrCntSel_ARCT__EccErrInt_MASK 0x00003000L
+#define UMCCH0_0_EccErrCntSel_ARCT__EccErrCntEn_MASK 0x00008000L
+//UMCCH0_0_EccErrCnt_ARCT
+#define UMCCH0_0_EccErrCnt_ARCT__EccErrCnt__SHIFT 0x0
+#define UMCCH0_0_EccErrCnt_ARCT__EccErrCnt_MASK 0x0000FFFFL
+//MCA_UMC_UMC0_MCUMC_STATUST0_ARCT
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__ErrorCode__SHIFT 0x0
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__ErrorCodeExt__SHIFT 0x10
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__RESERV0__SHIFT 0x16
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__ErrCoreId__SHIFT 0x20
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__RESERV1__SHIFT 0x26
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__Scrub__SHIFT 0x28
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__RESERV2__SHIFT 0x29
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__Poison__SHIFT 0x2b
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__Deferred__SHIFT 0x2c
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__UECC__SHIFT 0x2d
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__CECC__SHIFT 0x2e
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__RESERV3__SHIFT 0x2f
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__Transparent__SHIFT 0x34
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__SyndV__SHIFT 0x35
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__RESERV4__SHIFT 0x36
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__TCC__SHIFT 0x37
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__ErrCoreIdVal__SHIFT 0x38
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__PCC__SHIFT 0x39
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__AddrV__SHIFT 0x3a
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__MiscV__SHIFT 0x3b
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__En__SHIFT 0x3c
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__UC__SHIFT 0x3d
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__Overflow__SHIFT 0x3e
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__Val__SHIFT 0x3f
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__ErrorCode_MASK 0x000000000000FFFFL
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__ErrorCodeExt_MASK 0x00000000003F0000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__RESERV0_MASK 0x00000000FFC00000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__ErrCoreId_MASK 0x0000003F00000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__RESERV1_MASK 0x000000C000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__Scrub_MASK 0x0000010000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__RESERV2_MASK 0x0000060000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__Poison_MASK 0x0000080000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__Deferred_MASK 0x0000100000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__UECC_MASK 0x0000200000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__CECC_MASK 0x0000400000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__RESERV3_MASK 0x000F800000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__Transparent_MASK 0x0010000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__SyndV_MASK 0x0020000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__RESERV4_MASK 0x0040000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__TCC_MASK 0x0080000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__ErrCoreIdVal_MASK 0x0100000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__PCC_MASK 0x0200000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__AddrV_MASK 0x0400000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__MiscV_MASK 0x0800000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__En_MASK 0x1000000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__UC_MASK 0x2000000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__Overflow_MASK 0x4000000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__Val_MASK 0x8000000000000000L
+//MCA_UMC_UMC0_MCUMC_ADDRT0_ARCT
+#define MCA_UMC_UMC0_MCUMC_ADDRT0_ARCT__ErrorAddr__SHIFT 0x0
+#define MCA_UMC_UMC0_MCUMC_ADDRT0_ARCT__LSB__SHIFT 0x38
+#define MCA_UMC_UMC0_MCUMC_ADDRT0_ARCT__Reserved__SHIFT 0x3e
+#define MCA_UMC_UMC0_MCUMC_ADDRT0_ARCT__ErrorAddr_MASK 0x00FFFFFFFFFFFFFFL
+#define MCA_UMC_UMC0_MCUMC_ADDRT0_ARCT__LSB_MASK 0x3F00000000000000L
+#define MCA_UMC_UMC0_MCUMC_ADDRT0_ARCT__Reserved_MASK 0xC000000000000000L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_2_5_offset.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_2_5_offset.h
new file mode 100644
index 000000000000..90350f46a0c4
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_2_5_offset.h
@@ -0,0 +1,991 @@
+/*
+ * Copyright (C) 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _vcn_2_5_OFFSET_HEADER
+#define _vcn_2_5_OFFSET_HEADER
+
+// addressBlock: uvd0_mmsch_dec
+// base address: 0x1e000
+#define mmMMSCH_VF_VMID 0x000b
+#define mmMMSCH_VF_VMID_BASE_IDX 0
+#define mmMMSCH_VF_CTX_ADDR_LO 0x000c
+#define mmMMSCH_VF_CTX_ADDR_LO_BASE_IDX 0
+#define mmMMSCH_VF_CTX_ADDR_HI 0x000d
+#define mmMMSCH_VF_CTX_ADDR_HI_BASE_IDX 0
+#define mmMMSCH_VF_CTX_SIZE 0x000e
+#define mmMMSCH_VF_CTX_SIZE_BASE_IDX 0
+#define mmMMSCH_VF_MAILBOX_HOST 0x0012
+#define mmMMSCH_VF_MAILBOX_HOST_BASE_IDX 0
+#define mmMMSCH_VF_MAILBOX_RESP 0x0013
+#define mmMMSCH_VF_MAILBOX_RESP_BASE_IDX 0
+
+
+// addressBlock: uvd0_jpegnpdec
+// base address: 0x1e200
+#define mmUVD_JPEG_CNTL 0x0080
+#define mmUVD_JPEG_CNTL_BASE_IDX 0
+#define mmUVD_JPEG_RB_BASE 0x0081
+#define mmUVD_JPEG_RB_BASE_BASE_IDX 0
+#define mmUVD_JPEG_RB_WPTR 0x0082
+#define mmUVD_JPEG_RB_WPTR_BASE_IDX 0
+#define mmUVD_JPEG_RB_RPTR 0x0083
+#define mmUVD_JPEG_RB_RPTR_BASE_IDX 0
+#define mmUVD_JPEG_RB_SIZE 0x0084
+#define mmUVD_JPEG_RB_SIZE_BASE_IDX 0
+#define mmUVD_JPEG_DEC_SCRATCH0 0x0089
+#define mmUVD_JPEG_DEC_SCRATCH0_BASE_IDX 0
+#define mmUVD_JPEG_INT_EN 0x008a
+#define mmUVD_JPEG_INT_EN_BASE_IDX 0
+#define mmUVD_JPEG_INT_STAT 0x008b
+#define mmUVD_JPEG_INT_STAT_BASE_IDX 0
+#define mmUVD_JPEG_PITCH 0x009f
+#define mmUVD_JPEG_PITCH_BASE_IDX 0
+#define mmUVD_JPEG_UV_PITCH 0x00a0
+#define mmUVD_JPEG_UV_PITCH_BASE_IDX 0
+#define mmJPEG_DEC_Y_GFX8_TILING_SURFACE 0x00a1
+#define mmJPEG_DEC_Y_GFX8_TILING_SURFACE_BASE_IDX 0
+#define mmJPEG_DEC_UV_GFX8_TILING_SURFACE 0x00a2
+#define mmJPEG_DEC_UV_GFX8_TILING_SURFACE_BASE_IDX 0
+#define mmJPEG_DEC_GFX8_ADDR_CONFIG 0x00a3
+#define mmJPEG_DEC_GFX8_ADDR_CONFIG_BASE_IDX 0
+#define mmJPEG_DEC_Y_GFX10_TILING_SURFACE 0x00a4
+#define mmJPEG_DEC_Y_GFX10_TILING_SURFACE_BASE_IDX 0
+#define mmJPEG_DEC_UV_GFX10_TILING_SURFACE 0x00a5
+#define mmJPEG_DEC_UV_GFX10_TILING_SURFACE_BASE_IDX 0
+#define mmJPEG_DEC_GFX10_ADDR_CONFIG 0x00a6
+#define mmJPEG_DEC_GFX10_ADDR_CONFIG_BASE_IDX 0
+#define mmJPEG_DEC_ADDR_MODE 0x00a7
+#define mmJPEG_DEC_ADDR_MODE_BASE_IDX 0
+#define mmUVD_JPEG_GPCOM_CMD 0x00a9
+#define mmUVD_JPEG_GPCOM_CMD_BASE_IDX 0
+#define mmUVD_JPEG_GPCOM_DATA0 0x00aa
+#define mmUVD_JPEG_GPCOM_DATA0_BASE_IDX 0
+#define mmUVD_JPEG_GPCOM_DATA1 0x00ab
+#define mmUVD_JPEG_GPCOM_DATA1_BASE_IDX 0
+#define mmUVD_JPEG_SCRATCH1 0x00ae
+#define mmUVD_JPEG_SCRATCH1_BASE_IDX 0
+#define mmUVD_JPEG_DEC_SOFT_RST 0x00af
+#define mmUVD_JPEG_DEC_SOFT_RST_BASE_IDX 0
+
+
+// addressBlock: uvd0_uvd_jpeg_enc_dec
+// base address: 0x1e300
+#define mmUVD_JPEG_ENC_INT_EN 0x00c1
+#define mmUVD_JPEG_ENC_INT_EN_BASE_IDX 0
+#define mmUVD_JPEG_ENC_INT_STATUS 0x00c2
+#define mmUVD_JPEG_ENC_INT_STATUS_BASE_IDX 0
+#define mmUVD_JPEG_ENC_ENGINE_CNTL 0x00c5
+#define mmUVD_JPEG_ENC_ENGINE_CNTL_BASE_IDX 0
+#define mmUVD_JPEG_ENC_SCRATCH1 0x00ce
+#define mmUVD_JPEG_ENC_SCRATCH1_BASE_IDX 0
+
+
+// addressBlock: uvd0_uvd_jpeg_enc_sclk_dec
+// base address: 0x1e380
+#define mmUVD_JPEG_ENC_STATUS 0x00e5
+#define mmUVD_JPEG_ENC_STATUS_BASE_IDX 0
+#define mmUVD_JPEG_ENC_PITCH 0x00e6
+#define mmUVD_JPEG_ENC_PITCH_BASE_IDX 0
+#define mmUVD_JPEG_ENC_LUMA_BASE 0x00e7
+#define mmUVD_JPEG_ENC_LUMA_BASE_BASE_IDX 0
+#define mmUVD_JPEG_ENC_CHROMAU_BASE 0x00e8
+#define mmUVD_JPEG_ENC_CHROMAU_BASE_BASE_IDX 0
+#define mmUVD_JPEG_ENC_CHROMAV_BASE 0x00e9
+#define mmUVD_JPEG_ENC_CHROMAV_BASE_BASE_IDX 0
+#define mmJPEG_ENC_Y_GFX10_TILING_SURFACE 0x00ea
+#define mmJPEG_ENC_Y_GFX10_TILING_SURFACE_BASE_IDX 0
+#define mmJPEG_ENC_UV_GFX10_TILING_SURFACE 0x00eb
+#define mmJPEG_ENC_UV_GFX10_TILING_SURFACE_BASE_IDX 0
+#define mmJPEG_ENC_GFX10_ADDR_CONFIG 0x00ec
+#define mmJPEG_ENC_GFX10_ADDR_CONFIG_BASE_IDX 0
+#define mmJPEG_ENC_ADDR_MODE 0x00ed
+#define mmJPEG_ENC_ADDR_MODE_BASE_IDX 0
+#define mmUVD_JPEG_ENC_GPCOM_CMD 0x00ee
+#define mmUVD_JPEG_ENC_GPCOM_CMD_BASE_IDX 0
+#define mmUVD_JPEG_ENC_GPCOM_DATA0 0x00ef
+#define mmUVD_JPEG_ENC_GPCOM_DATA0_BASE_IDX 0
+#define mmUVD_JPEG_ENC_GPCOM_DATA1 0x00f0
+#define mmUVD_JPEG_ENC_GPCOM_DATA1_BASE_IDX 0
+#define mmUVD_JPEG_ENC_CGC_CNTL 0x00f5
+#define mmUVD_JPEG_ENC_CGC_CNTL_BASE_IDX 0
+#define mmUVD_JPEG_ENC_SCRATCH0 0x00f6
+#define mmUVD_JPEG_ENC_SCRATCH0_BASE_IDX 0
+#define mmUVD_JPEG_ENC_SOFT_RST 0x00f7
+#define mmUVD_JPEG_ENC_SOFT_RST_BASE_IDX 0
+
+
+// addressBlock: uvd0_uvd_jrbc_dec
+// base address: 0x1e400
+#define mmUVD_JRBC_RB_WPTR 0x0100
+#define mmUVD_JRBC_RB_WPTR_BASE_IDX 0
+#define mmUVD_JRBC_RB_CNTL 0x0101
+#define mmUVD_JRBC_RB_CNTL_BASE_IDX 0
+#define mmUVD_JRBC_IB_SIZE 0x0102
+#define mmUVD_JRBC_IB_SIZE_BASE_IDX 0
+#define mmUVD_JRBC_URGENT_CNTL 0x0103
+#define mmUVD_JRBC_URGENT_CNTL_BASE_IDX 0
+#define mmUVD_JRBC_RB_REF_DATA 0x0104
+#define mmUVD_JRBC_RB_REF_DATA_BASE_IDX 0
+#define mmUVD_JRBC_RB_COND_RD_TIMER 0x0105
+#define mmUVD_JRBC_RB_COND_RD_TIMER_BASE_IDX 0
+#define mmUVD_JRBC_SOFT_RESET 0x0108
+#define mmUVD_JRBC_SOFT_RESET_BASE_IDX 0
+#define mmUVD_JRBC_STATUS 0x0109
+#define mmUVD_JRBC_STATUS_BASE_IDX 0
+#define mmUVD_JRBC_RB_RPTR 0x010a
+#define mmUVD_JRBC_RB_RPTR_BASE_IDX 0
+#define mmUVD_JRBC_RB_BUF_STATUS 0x010b
+#define mmUVD_JRBC_RB_BUF_STATUS_BASE_IDX 0
+#define mmUVD_JRBC_IB_BUF_STATUS 0x010c
+#define mmUVD_JRBC_IB_BUF_STATUS_BASE_IDX 0
+#define mmUVD_JRBC_IB_SIZE_UPDATE 0x010d
+#define mmUVD_JRBC_IB_SIZE_UPDATE_BASE_IDX 0
+#define mmUVD_JRBC_IB_COND_RD_TIMER 0x010e
+#define mmUVD_JRBC_IB_COND_RD_TIMER_BASE_IDX 0
+#define mmUVD_JRBC_IB_REF_DATA 0x010f
+#define mmUVD_JRBC_IB_REF_DATA_BASE_IDX 0
+#define mmUVD_JPEG_PREEMPT_CMD 0x0110
+#define mmUVD_JPEG_PREEMPT_CMD_BASE_IDX 0
+#define mmUVD_JPEG_PREEMPT_FENCE_DATA0 0x0111
+#define mmUVD_JPEG_PREEMPT_FENCE_DATA0_BASE_IDX 0
+#define mmUVD_JPEG_PREEMPT_FENCE_DATA1 0x0112
+#define mmUVD_JPEG_PREEMPT_FENCE_DATA1_BASE_IDX 0
+#define mmUVD_JRBC_RB_SIZE 0x0113
+#define mmUVD_JRBC_RB_SIZE_BASE_IDX 0
+#define mmUVD_JRBC_SCRATCH0 0x0114
+#define mmUVD_JRBC_SCRATCH0_BASE_IDX 0
+
+
+// addressBlock: uvd0_uvd_jrbc_enc_dec
+// base address: 0x1e480
+#define mmUVD_JRBC_ENC_RB_WPTR 0x0120
+#define mmUVD_JRBC_ENC_RB_WPTR_BASE_IDX 0
+#define mmUVD_JRBC_ENC_RB_CNTL 0x0121
+#define mmUVD_JRBC_ENC_RB_CNTL_BASE_IDX 0
+#define mmUVD_JRBC_ENC_IB_SIZE 0x0122
+#define mmUVD_JRBC_ENC_IB_SIZE_BASE_IDX 0
+#define mmUVD_JRBC_ENC_URGENT_CNTL 0x0123
+#define mmUVD_JRBC_ENC_URGENT_CNTL_BASE_IDX 0
+#define mmUVD_JRBC_ENC_RB_REF_DATA 0x0124
+#define mmUVD_JRBC_ENC_RB_REF_DATA_BASE_IDX 0
+#define mmUVD_JRBC_ENC_RB_COND_RD_TIMER 0x0125
+#define mmUVD_JRBC_ENC_RB_COND_RD_TIMER_BASE_IDX 0
+#define mmUVD_JRBC_ENC_SOFT_RESET 0x0128
+#define mmUVD_JRBC_ENC_SOFT_RESET_BASE_IDX 0
+#define mmUVD_JRBC_ENC_STATUS 0x0129
+#define mmUVD_JRBC_ENC_STATUS_BASE_IDX 0
+#define mmUVD_JRBC_ENC_RB_RPTR 0x012a
+#define mmUVD_JRBC_ENC_RB_RPTR_BASE_IDX 0
+#define mmUVD_JRBC_ENC_RB_BUF_STATUS 0x012b
+#define mmUVD_JRBC_ENC_RB_BUF_STATUS_BASE_IDX 0
+#define mmUVD_JRBC_ENC_IB_BUF_STATUS 0x012c
+#define mmUVD_JRBC_ENC_IB_BUF_STATUS_BASE_IDX 0
+#define mmUVD_JRBC_ENC_IB_SIZE_UPDATE 0x012d
+#define mmUVD_JRBC_ENC_IB_SIZE_UPDATE_BASE_IDX 0
+#define mmUVD_JRBC_ENC_IB_COND_RD_TIMER 0x012e
+#define mmUVD_JRBC_ENC_IB_COND_RD_TIMER_BASE_IDX 0
+#define mmUVD_JRBC_ENC_IB_REF_DATA 0x012f
+#define mmUVD_JRBC_ENC_IB_REF_DATA_BASE_IDX 0
+#define mmUVD_JPEG_ENC_PREEMPT_CMD 0x0130
+#define mmUVD_JPEG_ENC_PREEMPT_CMD_BASE_IDX 0
+#define mmUVD_JPEG_ENC_PREEMPT_FENCE_DATA0 0x0131
+#define mmUVD_JPEG_ENC_PREEMPT_FENCE_DATA0_BASE_IDX 0
+#define mmUVD_JPEG_ENC_PREEMPT_FENCE_DATA1 0x0132
+#define mmUVD_JPEG_ENC_PREEMPT_FENCE_DATA1_BASE_IDX 0
+#define mmUVD_JRBC_ENC_RB_SIZE 0x0133
+#define mmUVD_JRBC_ENC_RB_SIZE_BASE_IDX 0
+#define mmUVD_JRBC_ENC_SCRATCH0 0x0134
+#define mmUVD_JRBC_ENC_SCRATCH0_BASE_IDX 0
+
+
+// addressBlock: uvd0_uvd_jmi_dec
+// base address: 0x1e500
+#define mmUVD_JMI_CTRL 0x0145
+#define mmUVD_JMI_CTRL_BASE_IDX 0
+#define mmUVD_LMI_JRBC_CTRL 0x0146
+#define mmUVD_LMI_JRBC_CTRL_BASE_IDX 0
+#define mmUVD_LMI_JPEG_CTRL 0x0147
+#define mmUVD_LMI_JPEG_CTRL_BASE_IDX 0
+#define mmUVD_JMI_EJRBC_CTRL 0x0148
+#define mmUVD_JMI_EJRBC_CTRL_BASE_IDX 0
+#define mmUVD_LMI_EJPEG_CTRL 0x0149
+#define mmUVD_LMI_EJPEG_CTRL_BASE_IDX 0
+#define mmUVD_LMI_JRBC_IB_VMID 0x014f
+#define mmUVD_LMI_JRBC_IB_VMID_BASE_IDX 0
+#define mmUVD_LMI_JRBC_RB_VMID 0x0150
+#define mmUVD_LMI_JRBC_RB_VMID_BASE_IDX 0
+#define mmUVD_LMI_JPEG_VMID 0x0151
+#define mmUVD_LMI_JPEG_VMID_BASE_IDX 0
+#define mmUVD_JMI_ENC_JRBC_IB_VMID 0x0152
+#define mmUVD_JMI_ENC_JRBC_IB_VMID_BASE_IDX 0
+#define mmUVD_JMI_ENC_JRBC_RB_VMID 0x0153
+#define mmUVD_JMI_ENC_JRBC_RB_VMID_BASE_IDX 0
+#define mmUVD_JMI_ENC_JPEG_VMID 0x0154
+#define mmUVD_JMI_ENC_JPEG_VMID_BASE_IDX 0
+#define mmUVD_JMI_PERFMON_CTRL 0x015c
+#define mmUVD_JMI_PERFMON_CTRL_BASE_IDX 0
+#define mmUVD_JMI_PERFMON_COUNT_LO 0x015d
+#define mmUVD_JMI_PERFMON_COUNT_LO_BASE_IDX 0
+#define mmUVD_JMI_PERFMON_COUNT_HI 0x015e
+#define mmUVD_JMI_PERFMON_COUNT_HI_BASE_IDX 0
+#define mmUVD_LMI_JPEG_READ_64BIT_BAR_LOW 0x0160
+#define mmUVD_LMI_JPEG_READ_64BIT_BAR_LOW_BASE_IDX 0
+#define mmUVD_LMI_JPEG_READ_64BIT_BAR_HIGH 0x0161
+#define mmUVD_LMI_JPEG_READ_64BIT_BAR_HIGH_BASE_IDX 0
+#define mmUVD_LMI_JPEG_WRITE_64BIT_BAR_LOW 0x0162
+#define mmUVD_LMI_JPEG_WRITE_64BIT_BAR_LOW_BASE_IDX 0
+#define mmUVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH 0x0163
+#define mmUVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH_BASE_IDX 0
+#define mmUVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW 0x0164
+#define mmUVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW_BASE_IDX 0
+#define mmUVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH 0x0165
+#define mmUVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH_BASE_IDX 0
+#define mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW 0x0166
+#define mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW_BASE_IDX 0
+#define mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH 0x0167
+#define mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH_BASE_IDX 0
+#define mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW 0x0168
+#define mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW_BASE_IDX 0
+#define mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH 0x0169
+#define mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH_BASE_IDX 0
+#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW 0x016a
+#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_BASE_IDX 0
+#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH 0x016b
+#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 0
+#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW 0x016c
+#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW_BASE_IDX 0
+#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH 0x016d
+#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH_BASE_IDX 0
+#define mmUVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW 0x016e
+#define mmUVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW_BASE_IDX 0
+#define mmUVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH 0x016f
+#define mmUVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 0
+#define mmUVD_LMI_JRBC_IB_MEM_RD_64BIT_BAR_LOW 0x0170
+#define mmUVD_LMI_JRBC_IB_MEM_RD_64BIT_BAR_LOW_BASE_IDX 0
+#define mmUVD_LMI_JRBC_IB_MEM_RD_64BIT_BAR_HIGH 0x0171
+#define mmUVD_LMI_JRBC_IB_MEM_RD_64BIT_BAR_HIGH_BASE_IDX 0
+#define mmUVD_LMI_EJPEG_PREEMPT_FENCE_64BIT_BAR_LOW 0x017a
+#define mmUVD_LMI_EJPEG_PREEMPT_FENCE_64BIT_BAR_LOW_BASE_IDX 0
+#define mmUVD_LMI_EJPEG_PREEMPT_FENCE_64BIT_BAR_HIGH 0x017b
+#define mmUVD_LMI_EJPEG_PREEMPT_FENCE_64BIT_BAR_HIGH_BASE_IDX 0
+#define mmUVD_LMI_EJRBC_RB_64BIT_BAR_LOW 0x017c
+#define mmUVD_LMI_EJRBC_RB_64BIT_BAR_LOW_BASE_IDX 0
+#define mmUVD_LMI_EJRBC_RB_64BIT_BAR_HIGH 0x017d
+#define mmUVD_LMI_EJRBC_RB_64BIT_BAR_HIGH_BASE_IDX 0
+#define mmUVD_LMI_EJRBC_IB_64BIT_BAR_LOW 0x017e
+#define mmUVD_LMI_EJRBC_IB_64BIT_BAR_LOW_BASE_IDX 0
+#define mmUVD_LMI_EJRBC_IB_64BIT_BAR_HIGH 0x017f
+#define mmUVD_LMI_EJRBC_IB_64BIT_BAR_HIGH_BASE_IDX 0
+#define mmUVD_LMI_EJRBC_RB_MEM_WR_64BIT_BAR_LOW 0x0180
+#define mmUVD_LMI_EJRBC_RB_MEM_WR_64BIT_BAR_LOW_BASE_IDX 0
+#define mmUVD_LMI_EJRBC_RB_MEM_WR_64BIT_BAR_HIGH 0x0181
+#define mmUVD_LMI_EJRBC_RB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 0
+#define mmUVD_LMI_EJRBC_RB_MEM_RD_64BIT_BAR_LOW 0x0182
+#define mmUVD_LMI_EJRBC_RB_MEM_RD_64BIT_BAR_LOW_BASE_IDX 0
+#define mmUVD_LMI_EJRBC_RB_MEM_RD_64BIT_BAR_HIGH 0x0183
+#define mmUVD_LMI_EJRBC_RB_MEM_RD_64BIT_BAR_HIGH_BASE_IDX 0
+#define mmUVD_LMI_EJRBC_IB_MEM_WR_64BIT_BAR_LOW 0x0184
+#define mmUVD_LMI_EJRBC_IB_MEM_WR_64BIT_BAR_LOW_BASE_IDX 0
+#define mmUVD_LMI_EJRBC_IB_MEM_WR_64BIT_BAR_HIGH 0x0185
+#define mmUVD_LMI_EJRBC_IB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 0
+#define mmUVD_LMI_EJRBC_IB_MEM_RD_64BIT_BAR_LOW 0x0186
+#define mmUVD_LMI_EJRBC_IB_MEM_RD_64BIT_BAR_LOW_BASE_IDX 0
+#define mmUVD_LMI_EJRBC_IB_MEM_RD_64BIT_BAR_HIGH 0x0187
+#define mmUVD_LMI_EJRBC_IB_MEM_RD_64BIT_BAR_HIGH_BASE_IDX 0
+#define mmUVD_LMI_JPEG_PREEMPT_VMID 0x0188
+#define mmUVD_LMI_JPEG_PREEMPT_VMID_BASE_IDX 0
+#define mmUVD_LMI_ENC_JPEG_PREEMPT_VMID 0x0189
+#define mmUVD_LMI_ENC_JPEG_PREEMPT_VMID_BASE_IDX 0
+#define mmUVD_LMI_JPEG2_VMID 0x018a
+#define mmUVD_LMI_JPEG2_VMID_BASE_IDX 0
+#define mmUVD_LMI_JPEG2_READ_64BIT_BAR_LOW 0x018b
+#define mmUVD_LMI_JPEG2_READ_64BIT_BAR_LOW_BASE_IDX 0
+#define mmUVD_LMI_JPEG2_READ_64BIT_BAR_HIGH 0x018c
+#define mmUVD_LMI_JPEG2_READ_64BIT_BAR_HIGH_BASE_IDX 0
+#define mmUVD_LMI_JPEG2_WRITE_64BIT_BAR_LOW 0x018d
+#define mmUVD_LMI_JPEG2_WRITE_64BIT_BAR_LOW_BASE_IDX 0
+#define mmUVD_LMI_JPEG2_WRITE_64BIT_BAR_HIGH 0x018e
+#define mmUVD_LMI_JPEG2_WRITE_64BIT_BAR_HIGH_BASE_IDX 0
+#define mmUVD_LMI_JPEG_CTRL2 0x018f
+#define mmUVD_LMI_JPEG_CTRL2_BASE_IDX 0
+#define mmUVD_JMI_DEC_SWAP_CNTL 0x0190
+#define mmUVD_JMI_DEC_SWAP_CNTL_BASE_IDX 0
+#define mmUVD_JMI_ENC_SWAP_CNTL 0x0191
+#define mmUVD_JMI_ENC_SWAP_CNTL_BASE_IDX 0
+#define mmUVD_JMI_CNTL 0x0192
+#define mmUVD_JMI_CNTL_BASE_IDX 0
+#define mmUVD_JMI_HUFF_FENCE_64BIT_BAR_LOW 0x019a
+#define mmUVD_JMI_HUFF_FENCE_64BIT_BAR_LOW_BASE_IDX 0
+#define mmUVD_JMI_HUFF_FENCE_64BIT_BAR_HIGH 0x019b
+#define mmUVD_JMI_HUFF_FENCE_64BIT_BAR_HIGH_BASE_IDX 0
+#define mmUVD_JMI_DEC_SWAP_CNTL2 0x019c
+#define mmUVD_JMI_DEC_SWAP_CNTL2_BASE_IDX 0
+
+
+// addressBlock: uvd0_uvd_jpeg_common_dec
+// base address: 0x1e700
+#define mmJPEG_SOFT_RESET_STATUS 0x01c0
+#define mmJPEG_SOFT_RESET_STATUS_BASE_IDX 0
+#define mmJPEG_SYS_INT_EN 0x01c1
+#define mmJPEG_SYS_INT_EN_BASE_IDX 0
+#define mmJPEG_SYS_INT_STATUS 0x01c2
+#define mmJPEG_SYS_INT_STATUS_BASE_IDX 0
+#define mmJPEG_SYS_INT_ACK 0x01c3
+#define mmJPEG_SYS_INT_ACK_BASE_IDX 0
+#define mmJPEG_MASTINT_EN 0x01c8
+#define mmJPEG_MASTINT_EN_BASE_IDX 0
+#define mmJPEG_IH_CTRL 0x01c9
+#define mmJPEG_IH_CTRL_BASE_IDX 0
+#define mmJRBBM_ARB_CTRL 0x01cb
+#define mmJRBBM_ARB_CTRL_BASE_IDX 0
+
+
+// addressBlock: uvd0_uvd_jpeg_common_sclk_dec
+// base address: 0x1e780
+#define mmJPEG_CGC_GATE 0x01e0
+#define mmJPEG_CGC_GATE_BASE_IDX 0
+#define mmJPEG_CGC_CTRL 0x01e1
+#define mmJPEG_CGC_CTRL_BASE_IDX 0
+#define mmJPEG_CGC_STATUS 0x01e2
+#define mmJPEG_CGC_STATUS_BASE_IDX 0
+#define mmJPEG_COMN_CGC_MEM_CTRL 0x01e3
+#define mmJPEG_COMN_CGC_MEM_CTRL_BASE_IDX 0
+#define mmJPEG_DEC_CGC_MEM_CTRL 0x01e4
+#define mmJPEG_DEC_CGC_MEM_CTRL_BASE_IDX 0
+#define mmJPEG2_DEC_CGC_MEM_CTRL 0x01e5
+#define mmJPEG2_DEC_CGC_MEM_CTRL_BASE_IDX 0
+#define mmJPEG_ENC_CGC_MEM_CTRL 0x01e6
+#define mmJPEG_ENC_CGC_MEM_CTRL_BASE_IDX 0
+#define mmJPEG_SOFT_RESET2 0x01e7
+#define mmJPEG_SOFT_RESET2_BASE_IDX 0
+#define mmJPEG_PERF_BANK_CONF 0x01e8
+#define mmJPEG_PERF_BANK_CONF_BASE_IDX 0
+#define mmJPEG_PERF_BANK_EVENT_SEL 0x01e9
+#define mmJPEG_PERF_BANK_EVENT_SEL_BASE_IDX 0
+#define mmJPEG_PERF_BANK_COUNT0 0x01ea
+#define mmJPEG_PERF_BANK_COUNT0_BASE_IDX 0
+#define mmJPEG_PERF_BANK_COUNT1 0x01eb
+#define mmJPEG_PERF_BANK_COUNT1_BASE_IDX 0
+#define mmJPEG_PERF_BANK_COUNT2 0x01ec
+#define mmJPEG_PERF_BANK_COUNT2_BASE_IDX 0
+#define mmJPEG_PERF_BANK_COUNT3 0x01ed
+#define mmJPEG_PERF_BANK_COUNT3_BASE_IDX 0
+
+
+// addressBlock: uvd0_uvd_pg_dec
+// base address: 0x1f800
+#define mmUVD_PGFSM_CONFIG 0x0000
+#define mmUVD_PGFSM_CONFIG_BASE_IDX 1
+#define mmUVD_PGFSM_STATUS 0x0001
+#define mmUVD_PGFSM_STATUS_BASE_IDX 1
+#define mmUVD_POWER_STATUS 0x0004
+#define mmUVD_POWER_STATUS_BASE_IDX 1
+#define mmUVD_PG_IND_INDEX 0x0005
+#define mmUVD_PG_IND_INDEX_BASE_IDX 1
+#define mmUVD_PG_IND_DATA 0x0006
+#define mmUVD_PG_IND_DATA_BASE_IDX 1
+#define mmCC_UVD_HARVESTING 0x0007
+#define mmCC_UVD_HARVESTING_BASE_IDX 1
+#define mmUVD_JPEG_POWER_STATUS 0x000a
+#define mmUVD_JPEG_POWER_STATUS_BASE_IDX 1
+#define mmUVD_DPG_LMA_CTL 0x0011
+#define mmUVD_DPG_LMA_CTL_BASE_IDX 1
+#define mmUVD_DPG_LMA_DATA 0x0012
+#define mmUVD_DPG_LMA_DATA_BASE_IDX 1
+#define mmUVD_DPG_LMA_MASK 0x0013
+#define mmUVD_DPG_LMA_MASK_BASE_IDX 1
+#define mmUVD_DPG_PAUSE 0x0014
+#define mmUVD_DPG_PAUSE_BASE_IDX 1
+#define mmUVD_SCRATCH1 0x0015
+#define mmUVD_SCRATCH1_BASE_IDX 1
+#define mmUVD_SCRATCH2 0x0016
+#define mmUVD_SCRATCH2_BASE_IDX 1
+#define mmUVD_SCRATCH3 0x0017
+#define mmUVD_SCRATCH3_BASE_IDX 1
+#define mmUVD_SCRATCH4 0x0018
+#define mmUVD_SCRATCH4_BASE_IDX 1
+#define mmUVD_SCRATCH5 0x0019
+#define mmUVD_SCRATCH5_BASE_IDX 1
+#define mmUVD_SCRATCH6 0x001a
+#define mmUVD_SCRATCH6_BASE_IDX 1
+#define mmUVD_SCRATCH7 0x001b
+#define mmUVD_SCRATCH7_BASE_IDX 1
+#define mmUVD_SCRATCH8 0x001c
+#define mmUVD_SCRATCH8_BASE_IDX 1
+#define mmUVD_SCRATCH9 0x001d
+#define mmUVD_SCRATCH9_BASE_IDX 1
+#define mmUVD_SCRATCH10 0x001e
+#define mmUVD_SCRATCH10_BASE_IDX 1
+#define mmUVD_SCRATCH11 0x001f
+#define mmUVD_SCRATCH11_BASE_IDX 1
+#define mmUVD_SCRATCH12 0x0020
+#define mmUVD_SCRATCH12_BASE_IDX 1
+#define mmUVD_SCRATCH13 0x0021
+#define mmUVD_SCRATCH13_BASE_IDX 1
+#define mmUVD_SCRATCH14 0x0022
+#define mmUVD_SCRATCH14_BASE_IDX 1
+#define mmUVD_FREE_COUNTER_REG 0x0024
+#define mmUVD_FREE_COUNTER_REG_BASE_IDX 1
+#define mmUVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_LOW 0x0025
+#define mmUVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_LOW_BASE_IDX 1
+#define mmUVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_HIGH 0x0026
+#define mmUVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_HIGH_BASE_IDX 1
+#define mmUVD_DPG_VCPU_CACHE_OFFSET0 0x0027
+#define mmUVD_DPG_VCPU_CACHE_OFFSET0_BASE_IDX 1
+#define mmUVD_DPG_LMI_VCPU_CACHE_VMID 0x0028
+#define mmUVD_DPG_LMI_VCPU_CACHE_VMID_BASE_IDX 1
+#define mmUVD_PF_STATUS 0x0039
+#define mmUVD_PF_STATUS_BASE_IDX 1
+#define mmUVD_DPG_CLK_EN_VCPU_REPORT 0x003c
+#define mmUVD_DPG_CLK_EN_VCPU_REPORT_BASE_IDX 1
+#define mmUVD_GFX8_ADDR_CONFIG 0x0049
+#define mmUVD_GFX8_ADDR_CONFIG_BASE_IDX 1
+#define mmUVD_GFX10_ADDR_CONFIG 0x004a
+#define mmUVD_GFX10_ADDR_CONFIG_BASE_IDX 1
+#define mmUVD_GPCNT2_CNTL 0x004b
+#define mmUVD_GPCNT2_CNTL_BASE_IDX 1
+#define mmUVD_GPCNT2_TARGET_LOWER 0x004c
+#define mmUVD_GPCNT2_TARGET_LOWER_BASE_IDX 1
+#define mmUVD_GPCNT2_STATUS_LOWER 0x004d
+#define mmUVD_GPCNT2_STATUS_LOWER_BASE_IDX 1
+#define mmUVD_GPCNT2_TARGET_UPPER 0x004e
+#define mmUVD_GPCNT2_TARGET_UPPER_BASE_IDX 1
+#define mmUVD_GPCNT2_STATUS_UPPER 0x004f
+#define mmUVD_GPCNT2_STATUS_UPPER_BASE_IDX 1
+#define mmUVD_GPCNT3_CNTL 0x0050
+#define mmUVD_GPCNT3_CNTL_BASE_IDX 1
+#define mmUVD_GPCNT3_TARGET_LOWER 0x0051
+#define mmUVD_GPCNT3_TARGET_LOWER_BASE_IDX 1
+#define mmUVD_GPCNT3_STATUS_LOWER 0x0052
+#define mmUVD_GPCNT3_STATUS_LOWER_BASE_IDX 1
+#define mmUVD_GPCNT3_TARGET_UPPER 0x0053
+#define mmUVD_GPCNT3_TARGET_UPPER_BASE_IDX 1
+#define mmUVD_GPCNT3_STATUS_UPPER 0x0054
+#define mmUVD_GPCNT3_STATUS_UPPER_BASE_IDX 1
+
+
+// addressBlock: uvd0_uvddec
+// base address: 0x1fa00
+#define mmUVD_STATUS 0x0080
+#define mmUVD_STATUS_BASE_IDX 1
+#define mmUVD_ENC_PIPE_BUSY 0x0081
+#define mmUVD_ENC_PIPE_BUSY_BASE_IDX 1
+#define mmUVD_SOFT_RESET 0x0084
+#define mmUVD_SOFT_RESET_BASE_IDX 1
+#define mmUVD_SOFT_RESET2 0x0085
+#define mmUVD_SOFT_RESET2_BASE_IDX 1
+#define mmUVD_MMSCH_SOFT_RESET 0x0086
+#define mmUVD_MMSCH_SOFT_RESET_BASE_IDX 1
+#define mmUVD_CGC_GATE 0x0088
+#define mmUVD_CGC_GATE_BASE_IDX 1
+#define mmUVD_CGC_STATUS 0x0089
+#define mmUVD_CGC_STATUS_BASE_IDX 1
+#define mmUVD_CGC_CTRL 0x008a
+#define mmUVD_CGC_CTRL_BASE_IDX 1
+#define mmUVD_CGC_UDEC_STATUS 0x008b
+#define mmUVD_CGC_UDEC_STATUS_BASE_IDX 1
+#define mmUVD_SUVD_CGC_GATE 0x008c
+#define mmUVD_SUVD_CGC_GATE_BASE_IDX 1
+#define mmUVD_SUVD_CGC_STATUS 0x008d
+#define mmUVD_SUVD_CGC_STATUS_BASE_IDX 1
+#define mmUVD_SUVD_CGC_CTRL 0x008e
+#define mmUVD_SUVD_CGC_CTRL_BASE_IDX 1
+#define mmUVD_GPCOM_VCPU_CMD 0x008f
+#define mmUVD_GPCOM_VCPU_CMD_BASE_IDX 1
+#define mmUVD_GPCOM_VCPU_DATA0 0x0090
+#define mmUVD_GPCOM_VCPU_DATA0_BASE_IDX 1
+#define mmUVD_GPCOM_VCPU_DATA1 0x0091
+#define mmUVD_GPCOM_VCPU_DATA1_BASE_IDX 1
+#define mmUVD_GPCOM_SYS_CMD 0x0092
+#define mmUVD_GPCOM_SYS_CMD_BASE_IDX 1
+#define mmUVD_GPCOM_SYS_DATA0 0x0093
+#define mmUVD_GPCOM_SYS_DATA0_BASE_IDX 1
+#define mmUVD_GPCOM_SYS_DATA1 0x0094
+#define mmUVD_GPCOM_SYS_DATA1_BASE_IDX 1
+#define mmUVD_VCPU_INT_EN 0x0095
+#define mmUVD_VCPU_INT_EN_BASE_IDX 1
+#define mmUVD_VCPU_INT_ACK 0x0097
+#define mmUVD_VCPU_INT_ACK_BASE_IDX 1
+#define mmUVD_VCPU_INT_ROUTE 0x0098
+#define mmUVD_VCPU_INT_ROUTE_BASE_IDX 1
+#define mmUVD_ENC_VCPU_INT_EN 0x009e
+#define mmUVD_ENC_VCPU_INT_EN_BASE_IDX 1
+#define mmUVD_ENC_VCPU_INT_ACK 0x00a0
+#define mmUVD_ENC_VCPU_INT_ACK_BASE_IDX 1
+#define mmUVD_MASTINT_EN 0x00a1
+#define mmUVD_MASTINT_EN_BASE_IDX 1
+#define mmUVD_SYS_INT_EN 0x00a2
+#define mmUVD_SYS_INT_EN_BASE_IDX 1
+#define mmUVD_SYS_INT_STATUS 0x00a3
+#define mmUVD_SYS_INT_STATUS_BASE_IDX 1
+#define mmUVD_SYS_INT_ACK 0x00a4
+#define mmUVD_SYS_INT_ACK_BASE_IDX 1
+#define mmUVD_JOB_DONE 0x00a5
+#define mmUVD_JOB_DONE_BASE_IDX 1
+#define mmUVD_CBUF_ID 0x00a6
+#define mmUVD_CBUF_ID_BASE_IDX 1
+#define mmUVD_CONTEXT_ID 0x00a7
+#define mmUVD_CONTEXT_ID_BASE_IDX 1
+#define mmUVD_CONTEXT_ID2 0x00a8
+#define mmUVD_CONTEXT_ID2_BASE_IDX 1
+#define mmUVD_NO_OP 0x00a9
+#define mmUVD_NO_OP_BASE_IDX 1
+#define mmUVD_RB_BASE_LO 0x00aa
+#define mmUVD_RB_BASE_LO_BASE_IDX 1
+#define mmUVD_RB_BASE_HI 0x00ab
+#define mmUVD_RB_BASE_HI_BASE_IDX 1
+#define mmUVD_RB_SIZE 0x00ac
+#define mmUVD_RB_SIZE_BASE_IDX 1
+#define mmUVD_RB_RPTR 0x00ad
+#define mmUVD_RB_RPTR_BASE_IDX 1
+#define mmUVD_RB_WPTR 0x00ae
+#define mmUVD_RB_WPTR_BASE_IDX 1
+#define mmUVD_RB_BASE_LO2 0x00af
+#define mmUVD_RB_BASE_LO2_BASE_IDX 1
+#define mmUVD_RB_BASE_HI2 0x00b0
+#define mmUVD_RB_BASE_HI2_BASE_IDX 1
+#define mmUVD_RB_SIZE2 0x00b1
+#define mmUVD_RB_SIZE2_BASE_IDX 1
+#define mmUVD_RB_RPTR2 0x00b2
+#define mmUVD_RB_RPTR2_BASE_IDX 1
+#define mmUVD_RB_WPTR2 0x00b3
+#define mmUVD_RB_WPTR2_BASE_IDX 1
+#define mmUVD_RB_BASE_LO3 0x00b4
+#define mmUVD_RB_BASE_LO3_BASE_IDX 1
+#define mmUVD_RB_BASE_HI3 0x00b5
+#define mmUVD_RB_BASE_HI3_BASE_IDX 1
+#define mmUVD_RB_SIZE3 0x00b6
+#define mmUVD_RB_SIZE3_BASE_IDX 1
+#define mmUVD_RB_RPTR3 0x00b7
+#define mmUVD_RB_RPTR3_BASE_IDX 1
+#define mmUVD_RB_WPTR3 0x00b8
+#define mmUVD_RB_WPTR3_BASE_IDX 1
+#define mmUVD_RB_BASE_LO4 0x00b9
+#define mmUVD_RB_BASE_LO4_BASE_IDX 1
+#define mmUVD_RB_BASE_HI4 0x00ba
+#define mmUVD_RB_BASE_HI4_BASE_IDX 1
+#define mmUVD_RB_SIZE4 0x00bb
+#define mmUVD_RB_SIZE4_BASE_IDX 1
+#define mmUVD_RB_RPTR4 0x00bc
+#define mmUVD_RB_RPTR4_BASE_IDX 1
+#define mmUVD_RB_WPTR4 0x00bd
+#define mmUVD_RB_WPTR4_BASE_IDX 1
+#define mmUVD_OUT_RB_BASE_LO 0x00be
+#define mmUVD_OUT_RB_BASE_LO_BASE_IDX 1
+#define mmUVD_OUT_RB_BASE_HI 0x00bf
+#define mmUVD_OUT_RB_BASE_HI_BASE_IDX 1
+#define mmUVD_OUT_RB_SIZE 0x00c0
+#define mmUVD_OUT_RB_SIZE_BASE_IDX 1
+#define mmUVD_OUT_RB_RPTR 0x00c1
+#define mmUVD_OUT_RB_RPTR_BASE_IDX 1
+#define mmUVD_OUT_RB_WPTR 0x00c2
+#define mmUVD_OUT_RB_WPTR_BASE_IDX 1
+#define mmUVD_RB_ARB_CTRL 0x00c6
+#define mmUVD_RB_ARB_CTRL_BASE_IDX 1
+#define mmUVD_CTX_INDEX 0x00c7
+#define mmUVD_CTX_INDEX_BASE_IDX 1
+#define mmUVD_CTX_DATA 0x00c8
+#define mmUVD_CTX_DATA_BASE_IDX 1
+#define mmUVD_CXW_WR 0x00c9
+#define mmUVD_CXW_WR_BASE_IDX 1
+#define mmUVD_CXW_WR_INT_ID 0x00ca
+#define mmUVD_CXW_WR_INT_ID_BASE_IDX 1
+#define mmUVD_CXW_WR_INT_CTX_ID 0x00cb
+#define mmUVD_CXW_WR_INT_CTX_ID_BASE_IDX 1
+#define mmUVD_CXW_INT_ID 0x00cc
+#define mmUVD_CXW_INT_ID_BASE_IDX 1
+#define mmUVD_TOP_CTRL 0x00cf
+#define mmUVD_TOP_CTRL_BASE_IDX 1
+#define mmUVD_YBASE 0x00d0
+#define mmUVD_YBASE_BASE_IDX 1
+#define mmUVD_UVBASE 0x00d1
+#define mmUVD_UVBASE_BASE_IDX 1
+#define mmUVD_PITCH 0x00d2
+#define mmUVD_PITCH_BASE_IDX 1
+#define mmUVD_WIDTH 0x00d3
+#define mmUVD_WIDTH_BASE_IDX 1
+#define mmUVD_HEIGHT 0x00d4
+#define mmUVD_HEIGHT_BASE_IDX 1
+#define mmUVD_PICCOUNT 0x00d5
+#define mmUVD_PICCOUNT_BASE_IDX 1
+#define mmUVD_SCRATCH_NP 0x00db
+#define mmUVD_SCRATCH_NP_BASE_IDX 1
+#define mmUVD_VERSION 0x00dd
+#define mmUVD_VERSION_BASE_IDX 1
+#define mmUVD_GP_SCRATCH0 0x00de
+#define mmUVD_GP_SCRATCH0_BASE_IDX 1
+#define mmUVD_GP_SCRATCH1 0x00df
+#define mmUVD_GP_SCRATCH1_BASE_IDX 1
+#define mmUVD_GP_SCRATCH2 0x00e0
+#define mmUVD_GP_SCRATCH2_BASE_IDX 1
+#define mmUVD_GP_SCRATCH3 0x00e1
+#define mmUVD_GP_SCRATCH3_BASE_IDX 1
+#define mmUVD_GP_SCRATCH4 0x00e2
+#define mmUVD_GP_SCRATCH4_BASE_IDX 1
+#define mmUVD_GP_SCRATCH5 0x00e3
+#define mmUVD_GP_SCRATCH5_BASE_IDX 1
+#define mmUVD_GP_SCRATCH6 0x00e4
+#define mmUVD_GP_SCRATCH6_BASE_IDX 1
+#define mmUVD_GP_SCRATCH7 0x00e5
+#define mmUVD_GP_SCRATCH7_BASE_IDX 1
+#define mmUVD_GP_SCRATCH8 0x00e6
+#define mmUVD_GP_SCRATCH8_BASE_IDX 1
+#define mmUVD_GP_SCRATCH9 0x00e7
+#define mmUVD_GP_SCRATCH9_BASE_IDX 1
+#define mmUVD_GP_SCRATCH10 0x00e8
+#define mmUVD_GP_SCRATCH10_BASE_IDX 1
+#define mmUVD_GP_SCRATCH11 0x00e9
+#define mmUVD_GP_SCRATCH11_BASE_IDX 1
+#define mmUVD_GP_SCRATCH12 0x00ea
+#define mmUVD_GP_SCRATCH12_BASE_IDX 1
+#define mmUVD_GP_SCRATCH13 0x00eb
+#define mmUVD_GP_SCRATCH13_BASE_IDX 1
+#define mmUVD_GP_SCRATCH14 0x00ec
+#define mmUVD_GP_SCRATCH14_BASE_IDX 1
+#define mmUVD_GP_SCRATCH15 0x00ed
+#define mmUVD_GP_SCRATCH15_BASE_IDX 1
+#define mmUVD_GP_SCRATCH16 0x00ee
+#define mmUVD_GP_SCRATCH16_BASE_IDX 1
+#define mmUVD_GP_SCRATCH17 0x00ef
+#define mmUVD_GP_SCRATCH17_BASE_IDX 1
+#define mmUVD_GP_SCRATCH18 0x00f0
+#define mmUVD_GP_SCRATCH18_BASE_IDX 1
+#define mmUVD_GP_SCRATCH19 0x00f1
+#define mmUVD_GP_SCRATCH19_BASE_IDX 1
+#define mmUVD_GP_SCRATCH20 0x00f2
+#define mmUVD_GP_SCRATCH20_BASE_IDX 1
+#define mmUVD_GP_SCRATCH21 0x00f3
+#define mmUVD_GP_SCRATCH21_BASE_IDX 1
+#define mmUVD_GP_SCRATCH22 0x00f4
+#define mmUVD_GP_SCRATCH22_BASE_IDX 1
+#define mmUVD_GP_SCRATCH23 0x00f5
+#define mmUVD_GP_SCRATCH23_BASE_IDX 1
+
+
+// addressBlock: uvd0_ecpudec
+// base address: 0x1fd00
+#define mmUVD_VCPU_CACHE_OFFSET0 0x0140
+#define mmUVD_VCPU_CACHE_OFFSET0_BASE_IDX 1
+#define mmUVD_VCPU_CACHE_SIZE0 0x0141
+#define mmUVD_VCPU_CACHE_SIZE0_BASE_IDX 1
+#define mmUVD_VCPU_CACHE_OFFSET1 0x0142
+#define mmUVD_VCPU_CACHE_OFFSET1_BASE_IDX 1
+#define mmUVD_VCPU_CACHE_SIZE1 0x0143
+#define mmUVD_VCPU_CACHE_SIZE1_BASE_IDX 1
+#define mmUVD_VCPU_CACHE_OFFSET2 0x0144
+#define mmUVD_VCPU_CACHE_OFFSET2_BASE_IDX 1
+#define mmUVD_VCPU_CACHE_SIZE2 0x0145
+#define mmUVD_VCPU_CACHE_SIZE2_BASE_IDX 1
+#define mmUVD_VCPU_CACHE_OFFSET3 0x0146
+#define mmUVD_VCPU_CACHE_OFFSET3_BASE_IDX 1
+#define mmUVD_VCPU_CACHE_SIZE3 0x0147
+#define mmUVD_VCPU_CACHE_SIZE3_BASE_IDX 1
+#define mmUVD_VCPU_CACHE_OFFSET4 0x0148
+#define mmUVD_VCPU_CACHE_OFFSET4_BASE_IDX 1
+#define mmUVD_VCPU_CACHE_SIZE4 0x0149
+#define mmUVD_VCPU_CACHE_SIZE4_BASE_IDX 1
+#define mmUVD_VCPU_CACHE_OFFSET5 0x014a
+#define mmUVD_VCPU_CACHE_OFFSET5_BASE_IDX 1
+#define mmUVD_VCPU_CACHE_SIZE5 0x014b
+#define mmUVD_VCPU_CACHE_SIZE5_BASE_IDX 1
+#define mmUVD_VCPU_CACHE_OFFSET6 0x014c
+#define mmUVD_VCPU_CACHE_OFFSET6_BASE_IDX 1
+#define mmUVD_VCPU_CACHE_SIZE6 0x014d
+#define mmUVD_VCPU_CACHE_SIZE6_BASE_IDX 1
+#define mmUVD_VCPU_CACHE_OFFSET7 0x014e
+#define mmUVD_VCPU_CACHE_OFFSET7_BASE_IDX 1
+#define mmUVD_VCPU_CACHE_SIZE7 0x014f
+#define mmUVD_VCPU_CACHE_SIZE7_BASE_IDX 1
+#define mmUVD_VCPU_CACHE_OFFSET8 0x0150
+#define mmUVD_VCPU_CACHE_OFFSET8_BASE_IDX 1
+#define mmUVD_VCPU_CACHE_SIZE8 0x0151
+#define mmUVD_VCPU_CACHE_SIZE8_BASE_IDX 1
+#define mmUVD_VCPU_NONCACHE_OFFSET0 0x0152
+#define mmUVD_VCPU_NONCACHE_OFFSET0_BASE_IDX 1
+#define mmUVD_VCPU_NONCACHE_SIZE0 0x0153
+#define mmUVD_VCPU_NONCACHE_SIZE0_BASE_IDX 1
+#define mmUVD_VCPU_NONCACHE_OFFSET1 0x0154
+#define mmUVD_VCPU_NONCACHE_OFFSET1_BASE_IDX 1
+#define mmUVD_VCPU_NONCACHE_SIZE1 0x0155
+#define mmUVD_VCPU_NONCACHE_SIZE1_BASE_IDX 1
+#define mmUVD_VCPU_CNTL 0x0156
+#define mmUVD_VCPU_CNTL_BASE_IDX 1
+#define mmUVD_VCPU_PRID 0x0157
+#define mmUVD_VCPU_PRID_BASE_IDX 1
+#define mmUVD_VCPU_TRCE 0x0158
+#define mmUVD_VCPU_TRCE_BASE_IDX 1
+#define mmUVD_VCPU_TRCE_RD 0x0159
+#define mmUVD_VCPU_TRCE_RD_BASE_IDX 1
+
+
+// addressBlock: uvd0_uvd_mpcdec
+// base address: 0x20310
+#define mmUVD_MP_SWAP_CNTL 0x02c4
+#define mmUVD_MP_SWAP_CNTL_BASE_IDX 1
+#define mmUVD_MP_SWAP_CNTL2 0x02c5
+#define mmUVD_MP_SWAP_CNTL2_BASE_IDX 1
+#define mmUVD_MPC_LUMA_SRCH 0x02c6
+#define mmUVD_MPC_LUMA_SRCH_BASE_IDX 1
+#define mmUVD_MPC_LUMA_HIT 0x02c7
+#define mmUVD_MPC_LUMA_HIT_BASE_IDX 1
+#define mmUVD_MPC_LUMA_HITPEND 0x02c8
+#define mmUVD_MPC_LUMA_HITPEND_BASE_IDX 1
+#define mmUVD_MPC_CHROMA_SRCH 0x02c9
+#define mmUVD_MPC_CHROMA_SRCH_BASE_IDX 1
+#define mmUVD_MPC_CHROMA_HIT 0x02ca
+#define mmUVD_MPC_CHROMA_HIT_BASE_IDX 1
+#define mmUVD_MPC_CHROMA_HITPEND 0x02cb
+#define mmUVD_MPC_CHROMA_HITPEND_BASE_IDX 1
+#define mmUVD_MPC_CNTL 0x02cc
+#define mmUVD_MPC_CNTL_BASE_IDX 1
+#define mmUVD_MPC_PITCH 0x02cd
+#define mmUVD_MPC_PITCH_BASE_IDX 1
+#define mmUVD_MPC_SET_MUXA0 0x02ce
+#define mmUVD_MPC_SET_MUXA0_BASE_IDX 1
+#define mmUVD_MPC_SET_MUXA1 0x02cf
+#define mmUVD_MPC_SET_MUXA1_BASE_IDX 1
+#define mmUVD_MPC_SET_MUXB0 0x02d0
+#define mmUVD_MPC_SET_MUXB0_BASE_IDX 1
+#define mmUVD_MPC_SET_MUXB1 0x02d1
+#define mmUVD_MPC_SET_MUXB1_BASE_IDX 1
+#define mmUVD_MPC_SET_MUX 0x02d2
+#define mmUVD_MPC_SET_MUX_BASE_IDX 1
+#define mmUVD_MPC_SET_ALU 0x02d3
+#define mmUVD_MPC_SET_ALU_BASE_IDX 1
+#define mmUVD_MPC_PERF0 0x02d4
+#define mmUVD_MPC_PERF0_BASE_IDX 1
+#define mmUVD_MPC_PERF1 0x02d5
+#define mmUVD_MPC_PERF1_BASE_IDX 1
+
+
+// addressBlock: uvd0_uvd_rbcdec
+// base address: 0x20370
+#define mmUVD_RBC_IB_SIZE 0x02dc
+#define mmUVD_RBC_IB_SIZE_BASE_IDX 1
+#define mmUVD_RBC_IB_SIZE_UPDATE 0x02dd
+#define mmUVD_RBC_IB_SIZE_UPDATE_BASE_IDX 1
+#define mmUVD_RBC_RB_CNTL 0x02de
+#define mmUVD_RBC_RB_CNTL_BASE_IDX 1
+#define mmUVD_RBC_RB_RPTR_ADDR 0x02df
+#define mmUVD_RBC_RB_RPTR_ADDR_BASE_IDX 1
+#define mmUVD_RBC_RB_RPTR 0x02e0
+#define mmUVD_RBC_RB_RPTR_BASE_IDX 1
+#define mmUVD_RBC_RB_WPTR 0x02e1
+#define mmUVD_RBC_RB_WPTR_BASE_IDX 1
+#define mmUVD_RBC_VCPU_ACCESS 0x02e2
+#define mmUVD_RBC_VCPU_ACCESS_BASE_IDX 1
+#define mmUVD_RBC_READ_REQ_URGENT_CNTL 0x02e5
+#define mmUVD_RBC_READ_REQ_URGENT_CNTL_BASE_IDX 1
+#define mmUVD_RBC_RB_WPTR_CNTL 0x02e6
+#define mmUVD_RBC_RB_WPTR_CNTL_BASE_IDX 1
+#define mmUVD_RBC_WPTR_STATUS 0x02e7
+#define mmUVD_RBC_WPTR_STATUS_BASE_IDX 1
+#define mmUVD_RBC_WPTR_POLL_CNTL 0x02e8
+#define mmUVD_RBC_WPTR_POLL_CNTL_BASE_IDX 1
+#define mmUVD_RBC_WPTR_POLL_ADDR 0x02e9
+#define mmUVD_RBC_WPTR_POLL_ADDR_BASE_IDX 1
+#define mmUVD_SEMA_CMD 0x02ea
+#define mmUVD_SEMA_CMD_BASE_IDX 1
+#define mmUVD_SEMA_ADDR_LOW 0x02eb
+#define mmUVD_SEMA_ADDR_LOW_BASE_IDX 1
+#define mmUVD_SEMA_ADDR_HIGH 0x02ec
+#define mmUVD_SEMA_ADDR_HIGH_BASE_IDX 1
+#define mmUVD_ENGINE_CNTL 0x02ed
+#define mmUVD_ENGINE_CNTL_BASE_IDX 1
+#define mmUVD_SEMA_TIMEOUT_STATUS 0x02ee
+#define mmUVD_SEMA_TIMEOUT_STATUS_BASE_IDX 1
+#define mmUVD_SEMA_CNTL 0x02ef
+#define mmUVD_SEMA_CNTL_BASE_IDX 1
+#define mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL 0x02f0
+#define mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL_BASE_IDX 1
+#define mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL 0x02f1
+#define mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL_BASE_IDX 1
+#define mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL 0x02f2
+#define mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL_BASE_IDX 1
+#define mmUVD_JOB_START 0x02f3
+#define mmUVD_JOB_START_BASE_IDX 1
+#define mmUVD_RBC_BUF_STATUS 0x02f4
+#define mmUVD_RBC_BUF_STATUS_BASE_IDX 1
+
+
+// addressBlock: uvd0_uvdgendec
+// base address: 0x20470
+#define mmUVD_LCM_CGC_CNTRL 0x033f
+#define mmUVD_LCM_CGC_CNTRL_BASE_IDX 1
+#define mmUVD_MIF_CURR_UV_ADDR_CONFIG 0x03a0
+#define mmUVD_MIF_CURR_UV_ADDR_CONFIG_BASE_IDX 1
+#define mmUVD_MIF_REF_UV_ADDR_CONFIG 0x03a1
+#define mmUVD_MIF_REF_UV_ADDR_CONFIG_BASE_IDX 1
+#define mmUVD_MIF_RECON1_UV_ADDR_CONFIG 0x03a2
+#define mmUVD_MIF_RECON1_UV_ADDR_CONFIG_BASE_IDX 1
+#define mmUVD_MIF_CURR_ADDR_CONFIG 0x03ae
+#define mmUVD_MIF_CURR_ADDR_CONFIG_BASE_IDX 1
+#define mmUVD_MIF_REF_ADDR_CONFIG 0x03af
+#define mmUVD_MIF_REF_ADDR_CONFIG_BASE_IDX 1
+#define mmUVD_MIF_RECON1_ADDR_CONFIG 0x03e1
+#define mmUVD_MIF_RECON1_ADDR_CONFIG_BASE_IDX 1
+
+
+// addressBlock: uvd0_lmi_adpdec
+// base address: 0x20870
+#define mmUVD_LMI_RBC_RB_64BIT_BAR_LOW 0x0432
+#define mmUVD_LMI_RBC_RB_64BIT_BAR_LOW_BASE_IDX 1
+#define mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH 0x0433
+#define mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH_BASE_IDX 1
+#define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW 0x0434
+#define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_BASE_IDX 1
+#define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH 0x0435
+#define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_BASE_IDX 1
+#define mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW 0x0438
+#define mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW_BASE_IDX 1
+#define mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH 0x0439
+#define mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH_BASE_IDX 1
+#define mmUVD_LMI_VCPU_NC1_64BIT_BAR_LOW 0x043a
+#define mmUVD_LMI_VCPU_NC1_64BIT_BAR_LOW_BASE_IDX 1
+#define mmUVD_LMI_VCPU_NC1_64BIT_BAR_HIGH 0x043b
+#define mmUVD_LMI_VCPU_NC1_64BIT_BAR_HIGH_BASE_IDX 1
+#define mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW 0x043c
+#define mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW_BASE_IDX 1
+#define mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH 0x043d
+#define mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH_BASE_IDX 1
+#define mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW 0x0468
+#define mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW_BASE_IDX 1
+#define mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH 0x0469
+#define mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH_BASE_IDX 1
+#define mmUVD_LMI_VCPU_CACHE8_64BIT_BAR_LOW 0x046a
+#define mmUVD_LMI_VCPU_CACHE8_64BIT_BAR_LOW_BASE_IDX 1
+#define mmUVD_LMI_VCPU_CACHE8_64BIT_BAR_HIGH 0x046b
+#define mmUVD_LMI_VCPU_CACHE8_64BIT_BAR_HIGH_BASE_IDX 1
+#define mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW 0x046c
+#define mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW_BASE_IDX 1
+#define mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH 0x046d
+#define mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH_BASE_IDX 1
+#define mmUVD_LMI_VCPU_CACHE3_64BIT_BAR_LOW 0x046e
+#define mmUVD_LMI_VCPU_CACHE3_64BIT_BAR_LOW_BASE_IDX 1
+#define mmUVD_LMI_VCPU_CACHE3_64BIT_BAR_HIGH 0x046f
+#define mmUVD_LMI_VCPU_CACHE3_64BIT_BAR_HIGH_BASE_IDX 1
+#define mmUVD_LMI_VCPU_CACHE4_64BIT_BAR_LOW 0x0470
+#define mmUVD_LMI_VCPU_CACHE4_64BIT_BAR_LOW_BASE_IDX 1
+#define mmUVD_LMI_VCPU_CACHE4_64BIT_BAR_HIGH 0x0471
+#define mmUVD_LMI_VCPU_CACHE4_64BIT_BAR_HIGH_BASE_IDX 1
+#define mmUVD_LMI_VCPU_CACHE5_64BIT_BAR_LOW 0x0472
+#define mmUVD_LMI_VCPU_CACHE5_64BIT_BAR_LOW_BASE_IDX 1
+#define mmUVD_LMI_VCPU_CACHE5_64BIT_BAR_HIGH 0x0473
+#define mmUVD_LMI_VCPU_CACHE5_64BIT_BAR_HIGH_BASE_IDX 1
+#define mmUVD_LMI_VCPU_CACHE6_64BIT_BAR_LOW 0x0474
+#define mmUVD_LMI_VCPU_CACHE6_64BIT_BAR_LOW_BASE_IDX 1
+#define mmUVD_LMI_VCPU_CACHE6_64BIT_BAR_HIGH 0x0475
+#define mmUVD_LMI_VCPU_CACHE6_64BIT_BAR_HIGH_BASE_IDX 1
+#define mmUVD_LMI_VCPU_CACHE7_64BIT_BAR_LOW 0x0476
+#define mmUVD_LMI_VCPU_CACHE7_64BIT_BAR_LOW_BASE_IDX 1
+#define mmUVD_LMI_VCPU_CACHE7_64BIT_BAR_HIGH 0x0477
+#define mmUVD_LMI_VCPU_CACHE7_64BIT_BAR_HIGH_BASE_IDX 1
+#define mmUVD_LMI_SPH_64BIT_BAR_HIGH 0x047c
+#define mmUVD_LMI_SPH_64BIT_BAR_HIGH_BASE_IDX 1
+#define mmUVD_LMI_MMSCH_NC0_64BIT_BAR_LOW 0x047d
+#define mmUVD_LMI_MMSCH_NC0_64BIT_BAR_LOW_BASE_IDX 1
+#define mmUVD_LMI_MMSCH_NC0_64BIT_BAR_HIGH 0x047e
+#define mmUVD_LMI_MMSCH_NC0_64BIT_BAR_HIGH_BASE_IDX 1
+#define mmUVD_LMI_MMSCH_NC1_64BIT_BAR_LOW 0x047f
+#define mmUVD_LMI_MMSCH_NC1_64BIT_BAR_LOW_BASE_IDX 1
+#define mmUVD_LMI_MMSCH_NC1_64BIT_BAR_HIGH 0x0480
+#define mmUVD_LMI_MMSCH_NC1_64BIT_BAR_HIGH_BASE_IDX 1
+#define mmUVD_LMI_MMSCH_NC2_64BIT_BAR_LOW 0x0481
+#define mmUVD_LMI_MMSCH_NC2_64BIT_BAR_LOW_BASE_IDX 1
+#define mmUVD_LMI_MMSCH_NC2_64BIT_BAR_HIGH 0x0482
+#define mmUVD_LMI_MMSCH_NC2_64BIT_BAR_HIGH_BASE_IDX 1
+#define mmUVD_LMI_MMSCH_NC3_64BIT_BAR_LOW 0x0483
+#define mmUVD_LMI_MMSCH_NC3_64BIT_BAR_LOW_BASE_IDX 1
+#define mmUVD_LMI_MMSCH_NC3_64BIT_BAR_HIGH 0x0484
+#define mmUVD_LMI_MMSCH_NC3_64BIT_BAR_HIGH_BASE_IDX 1
+#define mmUVD_LMI_MMSCH_NC4_64BIT_BAR_LOW 0x0485
+#define mmUVD_LMI_MMSCH_NC4_64BIT_BAR_LOW_BASE_IDX 1
+#define mmUVD_LMI_MMSCH_NC4_64BIT_BAR_HIGH 0x0486
+#define mmUVD_LMI_MMSCH_NC4_64BIT_BAR_HIGH_BASE_IDX 1
+#define mmUVD_LMI_MMSCH_NC5_64BIT_BAR_LOW 0x0487
+#define mmUVD_LMI_MMSCH_NC5_64BIT_BAR_LOW_BASE_IDX 1
+#define mmUVD_LMI_MMSCH_NC5_64BIT_BAR_HIGH 0x0488
+#define mmUVD_LMI_MMSCH_NC5_64BIT_BAR_HIGH_BASE_IDX 1
+#define mmUVD_LMI_MMSCH_NC6_64BIT_BAR_LOW 0x0489
+#define mmUVD_LMI_MMSCH_NC6_64BIT_BAR_LOW_BASE_IDX 1
+#define mmUVD_LMI_MMSCH_NC6_64BIT_BAR_HIGH 0x048a
+#define mmUVD_LMI_MMSCH_NC6_64BIT_BAR_HIGH_BASE_IDX 1
+#define mmUVD_LMI_MMSCH_NC7_64BIT_BAR_LOW 0x048b
+#define mmUVD_LMI_MMSCH_NC7_64BIT_BAR_LOW_BASE_IDX 1
+#define mmUVD_LMI_MMSCH_NC7_64BIT_BAR_HIGH 0x048c
+#define mmUVD_LMI_MMSCH_NC7_64BIT_BAR_HIGH_BASE_IDX 1
+#define mmUVD_LMI_MMSCH_NC_VMID 0x048d
+#define mmUVD_LMI_MMSCH_NC_VMID_BASE_IDX 1
+#define mmUVD_LMI_MMSCH_CTRL 0x048e
+#define mmUVD_LMI_MMSCH_CTRL_BASE_IDX 1
+#define mmUVD_LMI_ARB_CTRL2 0x049a
+#define mmUVD_LMI_ARB_CTRL2_BASE_IDX 1
+#define mmUVD_LMI_VCPU_CACHE_VMIDS_MULTI 0x049f
+#define mmUVD_LMI_VCPU_CACHE_VMIDS_MULTI_BASE_IDX 1
+#define mmUVD_LMI_VCPU_NC_VMIDS_MULTI 0x04a0
+#define mmUVD_LMI_VCPU_NC_VMIDS_MULTI_BASE_IDX 1
+#define mmUVD_LMI_LAT_CTRL 0x04a1
+#define mmUVD_LMI_LAT_CTRL_BASE_IDX 1
+#define mmUVD_LMI_LAT_CNTR 0x04a2
+#define mmUVD_LMI_LAT_CNTR_BASE_IDX 1
+#define mmUVD_LMI_AVG_LAT_CNTR 0x04a3
+#define mmUVD_LMI_AVG_LAT_CNTR_BASE_IDX 1
+#define mmUVD_LMI_SPH 0x04a4
+#define mmUVD_LMI_SPH_BASE_IDX 1
+#define mmUVD_LMI_VCPU_CACHE_VMID 0x04a5
+#define mmUVD_LMI_VCPU_CACHE_VMID_BASE_IDX 1
+#define mmUVD_LMI_CTRL2 0x04a6
+#define mmUVD_LMI_CTRL2_BASE_IDX 1
+#define mmUVD_LMI_URGENT_CTRL 0x04a7
+#define mmUVD_LMI_URGENT_CTRL_BASE_IDX 1
+#define mmUVD_LMI_CTRL 0x04a8
+#define mmUVD_LMI_CTRL_BASE_IDX 1
+#define mmUVD_LMI_STATUS 0x04a9
+#define mmUVD_LMI_STATUS_BASE_IDX 1
+#define mmUVD_LMI_PERFMON_CTRL 0x04ac
+#define mmUVD_LMI_PERFMON_CTRL_BASE_IDX 1
+#define mmUVD_LMI_PERFMON_COUNT_LO 0x04ad
+#define mmUVD_LMI_PERFMON_COUNT_LO_BASE_IDX 1
+#define mmUVD_LMI_PERFMON_COUNT_HI 0x04ae
+#define mmUVD_LMI_PERFMON_COUNT_HI_BASE_IDX 1
+#define mmUVD_LMI_RBC_RB_VMID 0x04b0
+#define mmUVD_LMI_RBC_RB_VMID_BASE_IDX 1
+#define mmUVD_LMI_RBC_IB_VMID 0x04b1
+#define mmUVD_LMI_RBC_IB_VMID_BASE_IDX 1
+#define mmUVD_LMI_MC_CREDITS 0x04b2
+#define mmUVD_LMI_MC_CREDITS_BASE_IDX 1
+
+
+// addressBlock: uvd0_uvdnpdec
+// base address: 0x20bd0
+#define mmMDM_DMA_CMD 0x06f4
+#define mmMDM_DMA_CMD_BASE_IDX 1
+#define mmMDM_DMA_STATUS 0x06f5
+#define mmMDM_DMA_STATUS_BASE_IDX 1
+#define mmMDM_DMA_CTL 0x06f6
+#define mmMDM_DMA_CTL_BASE_IDX 1
+#define mmMDM_ENC_PIPE_BUSY 0x06f7
+#define mmMDM_ENC_PIPE_BUSY_BASE_IDX 1
+#define mmMDM_WIG_PIPE_BUSY 0x06f9
+#define mmMDM_WIG_PIPE_BUSY_BASE_IDX 1
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_2_5_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_2_5_sh_mask.h
new file mode 100644
index 000000000000..c41c59c30006
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_2_5_sh_mask.h
@@ -0,0 +1,3609 @@
+/*
+ * Copyright (C) 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _vcn_2_5_SH_MASK_HEADER
+#define _vcn_2_5_SH_MASK_HEADER
+
+// addressBlock: uvd0_mmsch_dec
+//MMSCH_UCODE_ADDR
+#define MMSCH_UCODE_ADDR__UCODE_ADDR__SHIFT 0x2
+#define MMSCH_UCODE_ADDR__UCODE_LOCK__SHIFT 0x1f
+#define MMSCH_UCODE_ADDR__UCODE_ADDR_MASK 0x00003FFCL
+#define MMSCH_UCODE_ADDR__UCODE_LOCK_MASK 0x80000000L
+//MMSCH_UCODE_DATA
+#define MMSCH_UCODE_DATA__UCODE_DATA__SHIFT 0x0
+#define MMSCH_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
+//MMSCH_SRAM_ADDR
+#define MMSCH_SRAM_ADDR__SRAM_ADDR__SHIFT 0x2
+#define MMSCH_SRAM_ADDR__SRAM_LOCK__SHIFT 0x1f
+#define MMSCH_SRAM_ADDR__SRAM_ADDR_MASK 0x00001FFCL
+#define MMSCH_SRAM_ADDR__SRAM_LOCK_MASK 0x80000000L
+//MMSCH_SRAM_DATA
+#define MMSCH_SRAM_DATA__SRAM_DATA__SHIFT 0x0
+#define MMSCH_SRAM_DATA__SRAM_DATA_MASK 0xFFFFFFFFL
+//MMSCH_VF_SRAM_OFFSET
+#define MMSCH_VF_SRAM_OFFSET__VF_SRAM_OFFSET__SHIFT 0x2
+#define MMSCH_VF_SRAM_OFFSET__VF_SRAM_NUM_DW_PER_VF__SHIFT 0x10
+#define MMSCH_VF_SRAM_OFFSET__VF_SRAM_OFFSET_MASK 0x00001FFCL
+#define MMSCH_VF_SRAM_OFFSET__VF_SRAM_NUM_DW_PER_VF_MASK 0x00FF0000L
+//MMSCH_DB_SRAM_OFFSET
+#define MMSCH_DB_SRAM_OFFSET__DB_SRAM_OFFSET__SHIFT 0x2
+#define MMSCH_DB_SRAM_OFFSET__DB_SRAM_NUM_ENG__SHIFT 0x10
+#define MMSCH_DB_SRAM_OFFSET__DB_SRAM_NUM_RING_PER_ENG__SHIFT 0x18
+#define MMSCH_DB_SRAM_OFFSET__DB_SRAM_OFFSET_MASK 0x00001FFCL
+#define MMSCH_DB_SRAM_OFFSET__DB_SRAM_NUM_ENG_MASK 0x00FF0000L
+#define MMSCH_DB_SRAM_OFFSET__DB_SRAM_NUM_RING_PER_ENG_MASK 0xFF000000L
+//MMSCH_CTX_SRAM_OFFSET
+#define MMSCH_CTX_SRAM_OFFSET__CTX_SRAM_OFFSET__SHIFT 0x2
+#define MMSCH_CTX_SRAM_OFFSET__CTX_SRAM_SIZE__SHIFT 0x10
+#define MMSCH_CTX_SRAM_OFFSET__CTX_SRAM_OFFSET_MASK 0x00001FFCL
+#define MMSCH_CTX_SRAM_OFFSET__CTX_SRAM_SIZE_MASK 0xFFFF0000L
+//MMSCH_CTL
+#define MMSCH_CTL__P_RUNSTALL__SHIFT 0x0
+#define MMSCH_CTL__P_RESET__SHIFT 0x1
+#define MMSCH_CTL__VFID_FIFO_EN__SHIFT 0x4
+#define MMSCH_CTL__P_LOCK__SHIFT 0x1f
+#define MMSCH_CTL__P_RUNSTALL_MASK 0x00000001L
+#define MMSCH_CTL__P_RESET_MASK 0x00000002L
+#define MMSCH_CTL__VFID_FIFO_EN_MASK 0x00000010L
+#define MMSCH_CTL__P_LOCK_MASK 0x80000000L
+//MMSCH_INTR
+#define MMSCH_INTR__INTR__SHIFT 0x0
+#define MMSCH_INTR__INTR_MASK 0x00001FFFL
+//MMSCH_INTR_ACK
+#define MMSCH_INTR_ACK__INTR__SHIFT 0x0
+#define MMSCH_INTR_ACK__INTR_MASK 0x00001FFFL
+//MMSCH_INTR_STATUS
+#define MMSCH_INTR_STATUS__INTR__SHIFT 0x0
+#define MMSCH_INTR_STATUS__INTR_MASK 0x00001FFFL
+//MMSCH_VF_VMID
+#define MMSCH_VF_VMID__VF_CTX_VMID__SHIFT 0x0
+#define MMSCH_VF_VMID__VF_GPCOM_VMID__SHIFT 0x5
+#define MMSCH_VF_VMID__VF_CTX_VMID_MASK 0x0000001FL
+#define MMSCH_VF_VMID__VF_GPCOM_VMID_MASK 0x000003E0L
+//MMSCH_VF_CTX_ADDR_LO
+#define MMSCH_VF_CTX_ADDR_LO__VF_CTX_ADDR_LO__SHIFT 0x6
+#define MMSCH_VF_CTX_ADDR_LO__VF_CTX_ADDR_LO_MASK 0xFFFFFFC0L
+//MMSCH_VF_CTX_ADDR_HI
+#define MMSCH_VF_CTX_ADDR_HI__VF_CTX_ADDR_HI__SHIFT 0x0
+#define MMSCH_VF_CTX_ADDR_HI__VF_CTX_ADDR_HI_MASK 0xFFFFFFFFL
+//MMSCH_VF_CTX_SIZE
+#define MMSCH_VF_CTX_SIZE__VF_CTX_SIZE__SHIFT 0x0
+#define MMSCH_VF_CTX_SIZE__VF_CTX_SIZE_MASK 0xFFFFFFFFL
+//MMSCH_VF_GPCOM_ADDR_LO
+#define MMSCH_VF_GPCOM_ADDR_LO__VF_GPCOM_ADDR_LO__SHIFT 0x6
+#define MMSCH_VF_GPCOM_ADDR_LO__VF_GPCOM_ADDR_LO_MASK 0xFFFFFFC0L
+//MMSCH_VF_GPCOM_ADDR_HI
+#define MMSCH_VF_GPCOM_ADDR_HI__VF_GPCOM_ADDR_HI__SHIFT 0x0
+#define MMSCH_VF_GPCOM_ADDR_HI__VF_GPCOM_ADDR_HI_MASK 0xFFFFFFFFL
+//MMSCH_VF_GPCOM_SIZE
+#define MMSCH_VF_GPCOM_SIZE__VF_GPCOM_SIZE__SHIFT 0x0
+#define MMSCH_VF_GPCOM_SIZE__VF_GPCOM_SIZE_MASK 0xFFFFFFFFL
+//MMSCH_VF_MAILBOX_HOST
+#define MMSCH_VF_MAILBOX_HOST__DATA__SHIFT 0x0
+#define MMSCH_VF_MAILBOX_HOST__DATA_MASK 0xFFFFFFFFL
+//MMSCH_VF_MAILBOX_RESP
+#define MMSCH_VF_MAILBOX_RESP__RESP__SHIFT 0x0
+#define MMSCH_VF_MAILBOX_RESP__RESP_MASK 0xFFFFFFFFL
+//MMSCH_VF_MAILBOX_0
+#define MMSCH_VF_MAILBOX_0__DATA__SHIFT 0x0
+#define MMSCH_VF_MAILBOX_0__DATA_MASK 0xFFFFFFFFL
+//MMSCH_VF_MAILBOX_0_RESP
+#define MMSCH_VF_MAILBOX_0_RESP__RESP__SHIFT 0x0
+#define MMSCH_VF_MAILBOX_0_RESP__RESP_MASK 0xFFFFFFFFL
+//MMSCH_VF_MAILBOX_1
+#define MMSCH_VF_MAILBOX_1__DATA__SHIFT 0x0
+#define MMSCH_VF_MAILBOX_1__DATA_MASK 0xFFFFFFFFL
+//MMSCH_VF_MAILBOX_1_RESP
+#define MMSCH_VF_MAILBOX_1_RESP__RESP__SHIFT 0x0
+#define MMSCH_VF_MAILBOX_1_RESP__RESP_MASK 0xFFFFFFFFL
+//MMSCH_CNTL
+#define MMSCH_CNTL__CLK_EN__SHIFT 0x0
+#define MMSCH_CNTL__ED_ENABLE__SHIFT 0x1
+#define MMSCH_CNTL__MMSCH_IRQ_ERR__SHIFT 0x5
+#define MMSCH_CNTL__MMSCH_NACK_INTR_EN__SHIFT 0x9
+#define MMSCH_CNTL__MMSCH_DB_BUSY_INTR_EN__SHIFT 0xa
+#define MMSCH_CNTL__PRB_TIMEOUT_VAL__SHIFT 0x14
+#define MMSCH_CNTL__TIMEOUT_DIS__SHIFT 0x1c
+#define MMSCH_CNTL__CLK_EN_MASK 0x00000001L
+#define MMSCH_CNTL__ED_ENABLE_MASK 0x00000002L
+#define MMSCH_CNTL__MMSCH_IRQ_ERR_MASK 0x000001E0L
+#define MMSCH_CNTL__MMSCH_NACK_INTR_EN_MASK 0x00000200L
+#define MMSCH_CNTL__MMSCH_DB_BUSY_INTR_EN_MASK 0x00000400L
+#define MMSCH_CNTL__PRB_TIMEOUT_VAL_MASK 0x0FF00000L
+#define MMSCH_CNTL__TIMEOUT_DIS_MASK 0x10000000L
+//MMSCH_NONCACHE_OFFSET0
+#define MMSCH_NONCACHE_OFFSET0__OFFSET__SHIFT 0x0
+#define MMSCH_NONCACHE_OFFSET0__OFFSET_MASK 0x0FFFFFFFL
+//MMSCH_NONCACHE_SIZE0
+#define MMSCH_NONCACHE_SIZE0__SIZE__SHIFT 0x0
+#define MMSCH_NONCACHE_SIZE0__SIZE_MASK 0x00FFFFFFL
+//MMSCH_NONCACHE_OFFSET1
+#define MMSCH_NONCACHE_OFFSET1__OFFSET__SHIFT 0x0
+#define MMSCH_NONCACHE_OFFSET1__OFFSET_MASK 0x0FFFFFFFL
+//MMSCH_NONCACHE_SIZE1
+#define MMSCH_NONCACHE_SIZE1__SIZE__SHIFT 0x0
+#define MMSCH_NONCACHE_SIZE1__SIZE_MASK 0x00FFFFFFL
+//MMSCH_PROC_STATE1
+#define MMSCH_PROC_STATE1__PC__SHIFT 0x0
+#define MMSCH_PROC_STATE1__PC_MASK 0xFFFFFFFFL
+//MMSCH_LAST_MC_ADDR
+#define MMSCH_LAST_MC_ADDR__MC_ADDR__SHIFT 0x0
+#define MMSCH_LAST_MC_ADDR__RW__SHIFT 0x1f
+#define MMSCH_LAST_MC_ADDR__MC_ADDR_MASK 0x0FFFFFFFL
+#define MMSCH_LAST_MC_ADDR__RW_MASK 0x80000000L
+//MMSCH_LAST_MEM_ACCESS_HI
+#define MMSCH_LAST_MEM_ACCESS_HI__PROC_CMD__SHIFT 0x0
+#define MMSCH_LAST_MEM_ACCESS_HI__FIFO_RPTR__SHIFT 0x8
+#define MMSCH_LAST_MEM_ACCESS_HI__FIFO_WPTR__SHIFT 0xc
+#define MMSCH_LAST_MEM_ACCESS_HI__PROC_CMD_MASK 0x00000007L
+#define MMSCH_LAST_MEM_ACCESS_HI__FIFO_RPTR_MASK 0x00000700L
+#define MMSCH_LAST_MEM_ACCESS_HI__FIFO_WPTR_MASK 0x00007000L
+//MMSCH_LAST_MEM_ACCESS_LO
+#define MMSCH_LAST_MEM_ACCESS_LO__PROC_ADDR__SHIFT 0x0
+#define MMSCH_LAST_MEM_ACCESS_LO__PROC_ADDR_MASK 0xFFFFFFFFL
+//MMSCH_IOV_ACTIVE_FCN_ID
+#define MMSCH_IOV_ACTIVE_FCN_ID__ACTIVE_VF_ID__SHIFT 0x0
+#define MMSCH_IOV_ACTIVE_FCN_ID__ACTIVE_PF_VF__SHIFT 0x1f
+#define MMSCH_IOV_ACTIVE_FCN_ID__ACTIVE_VF_ID_MASK 0x0000001FL
+#define MMSCH_IOV_ACTIVE_FCN_ID__ACTIVE_PF_VF_MASK 0x80000000L
+//MMSCH_SCRATCH_0
+#define MMSCH_SCRATCH_0__SCRATCH_0__SHIFT 0x0
+#define MMSCH_SCRATCH_0__SCRATCH_0_MASK 0xFFFFFFFFL
+//MMSCH_SCRATCH_1
+#define MMSCH_SCRATCH_1__SCRATCH_1__SHIFT 0x0
+#define MMSCH_SCRATCH_1__SCRATCH_1_MASK 0xFFFFFFFFL
+//MMSCH_GPUIOV_SCH_BLOCK_0
+#define MMSCH_GPUIOV_SCH_BLOCK_0__ID__SHIFT 0x0
+#define MMSCH_GPUIOV_SCH_BLOCK_0__VERSION__SHIFT 0x4
+#define MMSCH_GPUIOV_SCH_BLOCK_0__SIZE__SHIFT 0x8
+#define MMSCH_GPUIOV_SCH_BLOCK_0__ID_MASK 0x0000000FL
+#define MMSCH_GPUIOV_SCH_BLOCK_0__VERSION_MASK 0x000000F0L
+#define MMSCH_GPUIOV_SCH_BLOCK_0__SIZE_MASK 0x0000FF00L
+//MMSCH_GPUIOV_CMD_CONTROL_0
+#define MMSCH_GPUIOV_CMD_CONTROL_0__CMD_TYPE__SHIFT 0x0
+#define MMSCH_GPUIOV_CMD_CONTROL_0__CMD_EXECUTE__SHIFT 0x4
+#define MMSCH_GPUIOV_CMD_CONTROL_0__CMD_EXECUTE_INTR_EN__SHIFT 0x5
+#define MMSCH_GPUIOV_CMD_CONTROL_0__VM_BUSY_INTR_EN__SHIFT 0x6
+#define MMSCH_GPUIOV_CMD_CONTROL_0__FUNCTINO_ID__SHIFT 0x8
+#define MMSCH_GPUIOV_CMD_CONTROL_0__NEXT_FUNCTINO_ID__SHIFT 0x10
+#define MMSCH_GPUIOV_CMD_CONTROL_0__CMD_TYPE_MASK 0x0000000FL
+#define MMSCH_GPUIOV_CMD_CONTROL_0__CMD_EXECUTE_MASK 0x00000010L
+#define MMSCH_GPUIOV_CMD_CONTROL_0__CMD_EXECUTE_INTR_EN_MASK 0x00000020L
+#define MMSCH_GPUIOV_CMD_CONTROL_0__VM_BUSY_INTR_EN_MASK 0x00000040L
+#define MMSCH_GPUIOV_CMD_CONTROL_0__FUNCTINO_ID_MASK 0x0000FF00L
+#define MMSCH_GPUIOV_CMD_CONTROL_0__NEXT_FUNCTINO_ID_MASK 0x00FF0000L
+//MMSCH_GPUIOV_CMD_STATUS_0
+#define MMSCH_GPUIOV_CMD_STATUS_0__CMD_STATUS__SHIFT 0x0
+#define MMSCH_GPUIOV_CMD_STATUS_0__CMD_STATUS_MASK 0x0000000FL
+//MMSCH_GPUIOV_VM_BUSY_STATUS_0
+#define MMSCH_GPUIOV_VM_BUSY_STATUS_0__BUSY__SHIFT 0x0
+#define MMSCH_GPUIOV_VM_BUSY_STATUS_0__BUSY_MASK 0xFFFFFFFFL
+//MMSCH_GPUIOV_ACTIVE_FCNS_0
+#define MMSCH_GPUIOV_ACTIVE_FCNS_0__ACTIVE_FCNS__SHIFT 0x0
+#define MMSCH_GPUIOV_ACTIVE_FCNS_0__ACTIVE_FCNS_MASK 0xFFFFFFFFL
+//MMSCH_GPUIOV_ACTIVE_FCN_ID_0
+#define MMSCH_GPUIOV_ACTIVE_FCN_ID_0__ID__SHIFT 0x0
+#define MMSCH_GPUIOV_ACTIVE_FCN_ID_0__ID_STATUS__SHIFT 0x8
+#define MMSCH_GPUIOV_ACTIVE_FCN_ID_0__ID_MASK 0x000000FFL
+#define MMSCH_GPUIOV_ACTIVE_FCN_ID_0__ID_STATUS_MASK 0x00000F00L
+//MMSCH_GPUIOV_DW6_0
+#define MMSCH_GPUIOV_DW6_0__DATA__SHIFT 0x0
+#define MMSCH_GPUIOV_DW6_0__DATA_MASK 0xFFFFFFFFL
+//MMSCH_GPUIOV_DW7_0
+#define MMSCH_GPUIOV_DW7_0__DATA__SHIFT 0x0
+#define MMSCH_GPUIOV_DW7_0__DATA_MASK 0xFFFFFFFFL
+//MMSCH_GPUIOV_DW8_0
+#define MMSCH_GPUIOV_DW8_0__DATA__SHIFT 0x0
+#define MMSCH_GPUIOV_DW8_0__DATA_MASK 0xFFFFFFFFL
+//MMSCH_GPUIOV_SCH_BLOCK_1
+#define MMSCH_GPUIOV_SCH_BLOCK_1__ID__SHIFT 0x0
+#define MMSCH_GPUIOV_SCH_BLOCK_1__VERSION__SHIFT 0x4
+#define MMSCH_GPUIOV_SCH_BLOCK_1__SIZE__SHIFT 0x8
+#define MMSCH_GPUIOV_SCH_BLOCK_1__ID_MASK 0x0000000FL
+#define MMSCH_GPUIOV_SCH_BLOCK_1__VERSION_MASK 0x000000F0L
+#define MMSCH_GPUIOV_SCH_BLOCK_1__SIZE_MASK 0x0000FF00L
+//MMSCH_GPUIOV_CMD_CONTROL_1
+#define MMSCH_GPUIOV_CMD_CONTROL_1__CMD_TYPE__SHIFT 0x0
+#define MMSCH_GPUIOV_CMD_CONTROL_1__CMD_EXECUTE__SHIFT 0x4
+#define MMSCH_GPUIOV_CMD_CONTROL_1__CMD_EXECUTE_INTR_EN__SHIFT 0x5
+#define MMSCH_GPUIOV_CMD_CONTROL_1__VM_BUSY_INTR_EN__SHIFT 0x6
+#define MMSCH_GPUIOV_CMD_CONTROL_1__FUNCTINO_ID__SHIFT 0x8
+#define MMSCH_GPUIOV_CMD_CONTROL_1__NEXT_FUNCTINO_ID__SHIFT 0x10
+#define MMSCH_GPUIOV_CMD_CONTROL_1__CMD_TYPE_MASK 0x0000000FL
+#define MMSCH_GPUIOV_CMD_CONTROL_1__CMD_EXECUTE_MASK 0x00000010L
+#define MMSCH_GPUIOV_CMD_CONTROL_1__CMD_EXECUTE_INTR_EN_MASK 0x00000020L
+#define MMSCH_GPUIOV_CMD_CONTROL_1__VM_BUSY_INTR_EN_MASK 0x00000040L
+#define MMSCH_GPUIOV_CMD_CONTROL_1__FUNCTINO_ID_MASK 0x0000FF00L
+#define MMSCH_GPUIOV_CMD_CONTROL_1__NEXT_FUNCTINO_ID_MASK 0x00FF0000L
+//MMSCH_GPUIOV_CMD_STATUS_1
+#define MMSCH_GPUIOV_CMD_STATUS_1__CMD_STATUS__SHIFT 0x0
+#define MMSCH_GPUIOV_CMD_STATUS_1__CMD_STATUS_MASK 0x0000000FL
+//MMSCH_GPUIOV_VM_BUSY_STATUS_1
+#define MMSCH_GPUIOV_VM_BUSY_STATUS_1__BUSY__SHIFT 0x0
+#define MMSCH_GPUIOV_VM_BUSY_STATUS_1__BUSY_MASK 0xFFFFFFFFL
+//MMSCH_GPUIOV_ACTIVE_FCNS_1
+#define MMSCH_GPUIOV_ACTIVE_FCNS_1__ACTIVE_FCNS__SHIFT 0x0
+#define MMSCH_GPUIOV_ACTIVE_FCNS_1__ACTIVE_FCNS_MASK 0xFFFFFFFFL
+//MMSCH_GPUIOV_ACTIVE_FCN_ID_1
+#define MMSCH_GPUIOV_ACTIVE_FCN_ID_1__ID__SHIFT 0x0
+#define MMSCH_GPUIOV_ACTIVE_FCN_ID_1__ID_STATUS__SHIFT 0x8
+#define MMSCH_GPUIOV_ACTIVE_FCN_ID_1__ID_MASK 0x000000FFL
+#define MMSCH_GPUIOV_ACTIVE_FCN_ID_1__ID_STATUS_MASK 0x00000F00L
+//MMSCH_GPUIOV_DW6_1
+#define MMSCH_GPUIOV_DW6_1__DATA__SHIFT 0x0
+#define MMSCH_GPUIOV_DW6_1__DATA_MASK 0xFFFFFFFFL
+//MMSCH_GPUIOV_DW7_1
+#define MMSCH_GPUIOV_DW7_1__DATA__SHIFT 0x0
+#define MMSCH_GPUIOV_DW7_1__DATA_MASK 0xFFFFFFFFL
+//MMSCH_GPUIOV_DW8_1
+#define MMSCH_GPUIOV_DW8_1__DATA__SHIFT 0x0
+#define MMSCH_GPUIOV_DW8_1__DATA_MASK 0xFFFFFFFFL
+//MMSCH_GPUIOV_CNTXT
+#define MMSCH_GPUIOV_CNTXT__CNTXT_SIZE__SHIFT 0x0
+#define MMSCH_GPUIOV_CNTXT__CNTXT_LOCATION__SHIFT 0x7
+#define MMSCH_GPUIOV_CNTXT__CNTXT_OFFSET__SHIFT 0xa
+#define MMSCH_GPUIOV_CNTXT__CNTXT_SIZE_MASK 0x0000007FL
+#define MMSCH_GPUIOV_CNTXT__CNTXT_LOCATION_MASK 0x00000080L
+#define MMSCH_GPUIOV_CNTXT__CNTXT_OFFSET_MASK 0xFFFFFC00L
+//MMSCH_SCRATCH_2
+#define MMSCH_SCRATCH_2__SCRATCH_2__SHIFT 0x0
+#define MMSCH_SCRATCH_2__SCRATCH_2_MASK 0xFFFFFFFFL
+//MMSCH_SCRATCH_3
+#define MMSCH_SCRATCH_3__SCRATCH_3__SHIFT 0x0
+#define MMSCH_SCRATCH_3__SCRATCH_3_MASK 0xFFFFFFFFL
+//MMSCH_SCRATCH_4
+#define MMSCH_SCRATCH_4__SCRATCH_4__SHIFT 0x0
+#define MMSCH_SCRATCH_4__SCRATCH_4_MASK 0xFFFFFFFFL
+//MMSCH_SCRATCH_5
+#define MMSCH_SCRATCH_5__SCRATCH_5__SHIFT 0x0
+#define MMSCH_SCRATCH_5__SCRATCH_5_MASK 0xFFFFFFFFL
+//MMSCH_SCRATCH_6
+#define MMSCH_SCRATCH_6__SCRATCH_6__SHIFT 0x0
+#define MMSCH_SCRATCH_6__SCRATCH_6_MASK 0xFFFFFFFFL
+//MMSCH_SCRATCH_7
+#define MMSCH_SCRATCH_7__SCRATCH_7__SHIFT 0x0
+#define MMSCH_SCRATCH_7__SCRATCH_7_MASK 0xFFFFFFFFL
+//MMSCH_VFID_FIFO_HEAD_0
+#define MMSCH_VFID_FIFO_HEAD_0__HEAD__SHIFT 0x0
+#define MMSCH_VFID_FIFO_HEAD_0__HEAD_MASK 0x0000003FL
+//MMSCH_VFID_FIFO_TAIL_0
+#define MMSCH_VFID_FIFO_TAIL_0__TAIL__SHIFT 0x0
+#define MMSCH_VFID_FIFO_TAIL_0__TAIL_MASK 0x0000003FL
+//MMSCH_VFID_FIFO_HEAD_1
+#define MMSCH_VFID_FIFO_HEAD_1__HEAD__SHIFT 0x0
+#define MMSCH_VFID_FIFO_HEAD_1__HEAD_MASK 0x0000003FL
+//MMSCH_VFID_FIFO_TAIL_1
+#define MMSCH_VFID_FIFO_TAIL_1__TAIL__SHIFT 0x0
+#define MMSCH_VFID_FIFO_TAIL_1__TAIL_MASK 0x0000003FL
+//MMSCH_NACK_STATUS
+#define MMSCH_NACK_STATUS__WR_NACK_STATUS__SHIFT 0x0
+#define MMSCH_NACK_STATUS__RD_NACK_STATUS__SHIFT 0x2
+#define MMSCH_NACK_STATUS__WR_NACK_STATUS_MASK 0x00000003L
+#define MMSCH_NACK_STATUS__RD_NACK_STATUS_MASK 0x0000000CL
+//MMSCH_VF_MAILBOX0_DATA
+#define MMSCH_VF_MAILBOX0_DATA__DATA__SHIFT 0x0
+#define MMSCH_VF_MAILBOX0_DATA__DATA_MASK 0xFFFFFFFFL
+//MMSCH_VF_MAILBOX1_DATA
+#define MMSCH_VF_MAILBOX1_DATA__DATA__SHIFT 0x0
+#define MMSCH_VF_MAILBOX1_DATA__DATA_MASK 0xFFFFFFFFL
+//MMSCH_GPUIOV_SCH_BLOCK_IP_0
+#define MMSCH_GPUIOV_SCH_BLOCK_IP_0__ID__SHIFT 0x0
+#define MMSCH_GPUIOV_SCH_BLOCK_IP_0__VERSION__SHIFT 0x4
+#define MMSCH_GPUIOV_SCH_BLOCK_IP_0__SIZE__SHIFT 0x8
+#define MMSCH_GPUIOV_SCH_BLOCK_IP_0__ID_MASK 0x0000000FL
+#define MMSCH_GPUIOV_SCH_BLOCK_IP_0__VERSION_MASK 0x000000F0L
+#define MMSCH_GPUIOV_SCH_BLOCK_IP_0__SIZE_MASK 0x0000FF00L
+//MMSCH_GPUIOV_CMD_STATUS_IP_0
+#define MMSCH_GPUIOV_CMD_STATUS_IP_0__CMD_STATUS__SHIFT 0x0
+#define MMSCH_GPUIOV_CMD_STATUS_IP_0__CMD_STATUS_MASK 0x0000000FL
+//MMSCH_GPUIOV_ACTIVE_FCN_ID_IP_0
+#define MMSCH_GPUIOV_ACTIVE_FCN_ID_IP_0__ID__SHIFT 0x0
+#define MMSCH_GPUIOV_ACTIVE_FCN_ID_IP_0__ID_STATUS__SHIFT 0x8
+#define MMSCH_GPUIOV_ACTIVE_FCN_ID_IP_0__ID_MASK 0x000000FFL
+#define MMSCH_GPUIOV_ACTIVE_FCN_ID_IP_0__ID_STATUS_MASK 0x00000F00L
+//MMSCH_GPUIOV_SCH_BLOCK_IP_1
+#define MMSCH_GPUIOV_SCH_BLOCK_IP_1__ID__SHIFT 0x0
+#define MMSCH_GPUIOV_SCH_BLOCK_IP_1__VERSION__SHIFT 0x4
+#define MMSCH_GPUIOV_SCH_BLOCK_IP_1__SIZE__SHIFT 0x8
+#define MMSCH_GPUIOV_SCH_BLOCK_IP_1__ID_MASK 0x0000000FL
+#define MMSCH_GPUIOV_SCH_BLOCK_IP_1__VERSION_MASK 0x000000F0L
+#define MMSCH_GPUIOV_SCH_BLOCK_IP_1__SIZE_MASK 0x0000FF00L
+//MMSCH_GPUIOV_CMD_STATUS_IP_1
+#define MMSCH_GPUIOV_CMD_STATUS_IP_1__CMD_STATUS__SHIFT 0x0
+#define MMSCH_GPUIOV_CMD_STATUS_IP_1__CMD_STATUS_MASK 0x0000000FL
+//MMSCH_GPUIOV_ACTIVE_FCN_ID_IP_1
+#define MMSCH_GPUIOV_ACTIVE_FCN_ID_IP_1__ID__SHIFT 0x0
+#define MMSCH_GPUIOV_ACTIVE_FCN_ID_IP_1__ID_STATUS__SHIFT 0x8
+#define MMSCH_GPUIOV_ACTIVE_FCN_ID_IP_1__ID_MASK 0x000000FFL
+#define MMSCH_GPUIOV_ACTIVE_FCN_ID_IP_1__ID_STATUS_MASK 0x00000F00L
+//MMSCH_GPUIOV_CNTXT_IP
+#define MMSCH_GPUIOV_CNTXT_IP__CNTXT_SIZE__SHIFT 0x0
+#define MMSCH_GPUIOV_CNTXT_IP__CNTXT_LOCATION__SHIFT 0x7
+#define MMSCH_GPUIOV_CNTXT_IP__CNTXT_SIZE_MASK 0x0000007FL
+#define MMSCH_GPUIOV_CNTXT_IP__CNTXT_LOCATION_MASK 0x00000080L
+//MMSCH_GPUIOV_SCH_BLOCK_2
+#define MMSCH_GPUIOV_SCH_BLOCK_2__ID__SHIFT 0x0
+#define MMSCH_GPUIOV_SCH_BLOCK_2__VERSION__SHIFT 0x4
+#define MMSCH_GPUIOV_SCH_BLOCK_2__SIZE__SHIFT 0x8
+#define MMSCH_GPUIOV_SCH_BLOCK_2__ID_MASK 0x0000000FL
+#define MMSCH_GPUIOV_SCH_BLOCK_2__VERSION_MASK 0x000000F0L
+#define MMSCH_GPUIOV_SCH_BLOCK_2__SIZE_MASK 0x0000FF00L
+//MMSCH_GPUIOV_CMD_CONTROL_2
+#define MMSCH_GPUIOV_CMD_CONTROL_2__CMD_TYPE__SHIFT 0x0
+#define MMSCH_GPUIOV_CMD_CONTROL_2__CMD_EXECUTE__SHIFT 0x4
+#define MMSCH_GPUIOV_CMD_CONTROL_2__CMD_EXECUTE_INTR_EN__SHIFT 0x5
+#define MMSCH_GPUIOV_CMD_CONTROL_2__VM_BUSY_INTR_EN__SHIFT 0x6
+#define MMSCH_GPUIOV_CMD_CONTROL_2__FUNCTINO_ID__SHIFT 0x8
+#define MMSCH_GPUIOV_CMD_CONTROL_2__NEXT_FUNCTINO_ID__SHIFT 0x10
+#define MMSCH_GPUIOV_CMD_CONTROL_2__CMD_TYPE_MASK 0x0000000FL
+#define MMSCH_GPUIOV_CMD_CONTROL_2__CMD_EXECUTE_MASK 0x00000010L
+#define MMSCH_GPUIOV_CMD_CONTROL_2__CMD_EXECUTE_INTR_EN_MASK 0x00000020L
+#define MMSCH_GPUIOV_CMD_CONTROL_2__VM_BUSY_INTR_EN_MASK 0x00000040L
+#define MMSCH_GPUIOV_CMD_CONTROL_2__FUNCTINO_ID_MASK 0x0000FF00L
+#define MMSCH_GPUIOV_CMD_CONTROL_2__NEXT_FUNCTINO_ID_MASK 0x00FF0000L
+//MMSCH_GPUIOV_CMD_STATUS_2
+#define MMSCH_GPUIOV_CMD_STATUS_2__CMD_STATUS__SHIFT 0x0
+#define MMSCH_GPUIOV_CMD_STATUS_2__CMD_STATUS_MASK 0x0000000FL
+//MMSCH_GPUIOV_VM_BUSY_STATUS_2
+#define MMSCH_GPUIOV_VM_BUSY_STATUS_2__BUSY__SHIFT 0x0
+#define MMSCH_GPUIOV_VM_BUSY_STATUS_2__BUSY_MASK 0xFFFFFFFFL
+//MMSCH_GPUIOV_ACTIVE_FCNS_2
+#define MMSCH_GPUIOV_ACTIVE_FCNS_2__ACTIVE_FCNS__SHIFT 0x0
+#define MMSCH_GPUIOV_ACTIVE_FCNS_2__ACTIVE_FCNS_MASK 0xFFFFFFFFL
+//MMSCH_GPUIOV_ACTIVE_FCN_ID_2
+#define MMSCH_GPUIOV_ACTIVE_FCN_ID_2__ID__SHIFT 0x0
+#define MMSCH_GPUIOV_ACTIVE_FCN_ID_2__ID_STATUS__SHIFT 0x8
+#define MMSCH_GPUIOV_ACTIVE_FCN_ID_2__ID_MASK 0x000000FFL
+#define MMSCH_GPUIOV_ACTIVE_FCN_ID_2__ID_STATUS_MASK 0x00000F00L
+//MMSCH_GPUIOV_DW6_2
+#define MMSCH_GPUIOV_DW6_2__DATA__SHIFT 0x0
+#define MMSCH_GPUIOV_DW6_2__DATA_MASK 0xFFFFFFFFL
+//MMSCH_GPUIOV_DW7_2
+#define MMSCH_GPUIOV_DW7_2__DATA__SHIFT 0x0
+#define MMSCH_GPUIOV_DW7_2__DATA_MASK 0xFFFFFFFFL
+//MMSCH_GPUIOV_DW8_2
+#define MMSCH_GPUIOV_DW8_2__DATA__SHIFT 0x0
+#define MMSCH_GPUIOV_DW8_2__DATA_MASK 0xFFFFFFFFL
+//MMSCH_GPUIOV_SCH_BLOCK_IP_2
+#define MMSCH_GPUIOV_SCH_BLOCK_IP_2__ID__SHIFT 0x0
+#define MMSCH_GPUIOV_SCH_BLOCK_IP_2__VERSION__SHIFT 0x4
+#define MMSCH_GPUIOV_SCH_BLOCK_IP_2__SIZE__SHIFT 0x8
+#define MMSCH_GPUIOV_SCH_BLOCK_IP_2__ID_MASK 0x0000000FL
+#define MMSCH_GPUIOV_SCH_BLOCK_IP_2__VERSION_MASK 0x000000F0L
+#define MMSCH_GPUIOV_SCH_BLOCK_IP_2__SIZE_MASK 0x0000FF00L
+//MMSCH_GPUIOV_CMD_STATUS_IP_2
+#define MMSCH_GPUIOV_CMD_STATUS_IP_2__CMD_STATUS__SHIFT 0x0
+#define MMSCH_GPUIOV_CMD_STATUS_IP_2__CMD_STATUS_MASK 0x0000000FL
+//MMSCH_GPUIOV_ACTIVE_FCN_ID_IP_2
+#define MMSCH_GPUIOV_ACTIVE_FCN_ID_IP_2__ID__SHIFT 0x0
+#define MMSCH_GPUIOV_ACTIVE_FCN_ID_IP_2__ID_STATUS__SHIFT 0x8
+#define MMSCH_GPUIOV_ACTIVE_FCN_ID_IP_2__ID_MASK 0x000000FFL
+#define MMSCH_GPUIOV_ACTIVE_FCN_ID_IP_2__ID_STATUS_MASK 0x00000F00L
+//MMSCH_VFID_FIFO_HEAD_2
+#define MMSCH_VFID_FIFO_HEAD_2__HEAD__SHIFT 0x0
+#define MMSCH_VFID_FIFO_HEAD_2__HEAD_MASK 0x0000003FL
+//MMSCH_VFID_FIFO_TAIL_2
+#define MMSCH_VFID_FIFO_TAIL_2__TAIL__SHIFT 0x0
+#define MMSCH_VFID_FIFO_TAIL_2__TAIL_MASK 0x0000003FL
+//MMSCH_VM_BUSY_STATUS_0
+#define MMSCH_VM_BUSY_STATUS_0__BUSY__SHIFT 0x0
+#define MMSCH_VM_BUSY_STATUS_0__BUSY_MASK 0xFFFFFFFFL
+//MMSCH_VM_BUSY_STATUS_1
+#define MMSCH_VM_BUSY_STATUS_1__BUSY__SHIFT 0x0
+#define MMSCH_VM_BUSY_STATUS_1__BUSY_MASK 0xFFFFFFFFL
+//MMSCH_VM_BUSY_STATUS_2
+#define MMSCH_VM_BUSY_STATUS_2__BUSY__SHIFT 0x0
+#define MMSCH_VM_BUSY_STATUS_2__BUSY_MASK 0xFFFFFFFFL
+
+
+// addressBlock: uvd0_jpegnpdec
+//UVD_JPEG_CNTL
+#define UVD_JPEG_CNTL__REQUEST_EN__SHIFT 0x1
+#define UVD_JPEG_CNTL__ERR_RST_EN__SHIFT 0x2
+#define UVD_JPEG_CNTL__HUFF_SPEED_EN__SHIFT 0x3
+#define UVD_JPEG_CNTL__HUFF_SPEED_STATUS__SHIFT 0x4
+#define UVD_JPEG_CNTL__DBG_MUX_SEL__SHIFT 0x8
+#define UVD_JPEG_CNTL__REQUEST_EN_MASK 0x00000002L
+#define UVD_JPEG_CNTL__ERR_RST_EN_MASK 0x00000004L
+#define UVD_JPEG_CNTL__HUFF_SPEED_EN_MASK 0x00000008L
+#define UVD_JPEG_CNTL__HUFF_SPEED_STATUS_MASK 0x00000010L
+#define UVD_JPEG_CNTL__DBG_MUX_SEL_MASK 0x00007F00L
+//UVD_JPEG_RB_BASE
+#define UVD_JPEG_RB_BASE__RB_BYTE_OFF__SHIFT 0x0
+#define UVD_JPEG_RB_BASE__RB_BASE__SHIFT 0x6
+#define UVD_JPEG_RB_BASE__RB_BYTE_OFF_MASK 0x0000003FL
+#define UVD_JPEG_RB_BASE__RB_BASE_MASK 0xFFFFFFC0L
+//UVD_JPEG_RB_WPTR
+#define UVD_JPEG_RB_WPTR__RB_WPTR__SHIFT 0x4
+#define UVD_JPEG_RB_WPTR__RB_WPTR_MASK 0x3FFFFFF0L
+//UVD_JPEG_RB_RPTR
+#define UVD_JPEG_RB_RPTR__RB_RPTR__SHIFT 0x4
+#define UVD_JPEG_RB_RPTR__RB_RPTR_MASK 0x3FFFFFF0L
+//UVD_JPEG_RB_SIZE
+#define UVD_JPEG_RB_SIZE__RB_SIZE__SHIFT 0x4
+#define UVD_JPEG_RB_SIZE__RB_SIZE_MASK 0x3FFFFFF0L
+//UVD_JPEG_DEC_SCRATCH0
+#define UVD_JPEG_DEC_SCRATCH0__SCRATCH0__SHIFT 0x0
+#define UVD_JPEG_DEC_SCRATCH0__SCRATCH0_MASK 0xFFFFFFFFL
+//UVD_JPEG_INT_EN
+#define UVD_JPEG_INT_EN__OUTBUF_WPTR_INC_EN__SHIFT 0x0
+#define UVD_JPEG_INT_EN__JOB_AVAIL_EN__SHIFT 0x1
+#define UVD_JPEG_INT_EN__FENCE_VAL_EN__SHIFT 0x2
+#define UVD_JPEG_INT_EN__FIFO_OVERFLOW_ERR_EN__SHIFT 0x6
+#define UVD_JPEG_INT_EN__BLK_CNT_OUT_OF_SYNC_ERR_EN__SHIFT 0x7
+#define UVD_JPEG_INT_EN__EOI_ERR_EN__SHIFT 0x8
+#define UVD_JPEG_INT_EN__HFM_ERR_EN__SHIFT 0x9
+#define UVD_JPEG_INT_EN__RST_ERR_EN__SHIFT 0xa
+#define UVD_JPEG_INT_EN__ECS_MK_ERR_EN__SHIFT 0xb
+#define UVD_JPEG_INT_EN__TIMEOUT_ERR_EN__SHIFT 0xc
+#define UVD_JPEG_INT_EN__MARKER_ERR_EN__SHIFT 0xd
+#define UVD_JPEG_INT_EN__FMT_ERR_EN__SHIFT 0xe
+#define UVD_JPEG_INT_EN__PROFILE_ERR_EN__SHIFT 0xf
+#define UVD_JPEG_INT_EN__OUTBUF_WPTR_INC_EN_MASK 0x00000001L
+#define UVD_JPEG_INT_EN__JOB_AVAIL_EN_MASK 0x00000002L
+#define UVD_JPEG_INT_EN__FENCE_VAL_EN_MASK 0x00000004L
+#define UVD_JPEG_INT_EN__FIFO_OVERFLOW_ERR_EN_MASK 0x00000040L
+#define UVD_JPEG_INT_EN__BLK_CNT_OUT_OF_SYNC_ERR_EN_MASK 0x00000080L
+#define UVD_JPEG_INT_EN__EOI_ERR_EN_MASK 0x00000100L
+#define UVD_JPEG_INT_EN__HFM_ERR_EN_MASK 0x00000200L
+#define UVD_JPEG_INT_EN__RST_ERR_EN_MASK 0x00000400L
+#define UVD_JPEG_INT_EN__ECS_MK_ERR_EN_MASK 0x00000800L
+#define UVD_JPEG_INT_EN__TIMEOUT_ERR_EN_MASK 0x00001000L
+#define UVD_JPEG_INT_EN__MARKER_ERR_EN_MASK 0x00002000L
+#define UVD_JPEG_INT_EN__FMT_ERR_EN_MASK 0x00004000L
+#define UVD_JPEG_INT_EN__PROFILE_ERR_EN_MASK 0x00008000L
+//UVD_JPEG_INT_STAT
+#define UVD_JPEG_INT_STAT__OUTBUF_WPTR_INC_INT__SHIFT 0x0
+#define UVD_JPEG_INT_STAT__JOB_AVAIL_INT__SHIFT 0x1
+#define UVD_JPEG_INT_STAT__FENCE_VAL_INT__SHIFT 0x2
+#define UVD_JPEG_INT_STAT__FIFO_OVERFLOW_ERR_INT__SHIFT 0x6
+#define UVD_JPEG_INT_STAT__BLK_CNT_OUT_OF_SYNC_ERR_INT__SHIFT 0x7
+#define UVD_JPEG_INT_STAT__EOI_ERR_INT__SHIFT 0x8
+#define UVD_JPEG_INT_STAT__HFM_ERR_INT__SHIFT 0x9
+#define UVD_JPEG_INT_STAT__RST_ERR_INT__SHIFT 0xa
+#define UVD_JPEG_INT_STAT__ECS_MK_ERR_INT__SHIFT 0xb
+#define UVD_JPEG_INT_STAT__TIMEOUT_ERR_INT__SHIFT 0xc
+#define UVD_JPEG_INT_STAT__MARKER_ERR_INT__SHIFT 0xd
+#define UVD_JPEG_INT_STAT__FMT_ERR_INT__SHIFT 0xe
+#define UVD_JPEG_INT_STAT__PROFILE_ERR_INT__SHIFT 0xf
+#define UVD_JPEG_INT_STAT__OUTBUF_WPTR_INC_INT_MASK 0x00000001L
+#define UVD_JPEG_INT_STAT__JOB_AVAIL_INT_MASK 0x00000002L
+#define UVD_JPEG_INT_STAT__FENCE_VAL_INT_MASK 0x00000004L
+#define UVD_JPEG_INT_STAT__FIFO_OVERFLOW_ERR_INT_MASK 0x00000040L
+#define UVD_JPEG_INT_STAT__BLK_CNT_OUT_OF_SYNC_ERR_INT_MASK 0x00000080L
+#define UVD_JPEG_INT_STAT__EOI_ERR_INT_MASK 0x00000100L
+#define UVD_JPEG_INT_STAT__HFM_ERR_INT_MASK 0x00000200L
+#define UVD_JPEG_INT_STAT__RST_ERR_INT_MASK 0x00000400L
+#define UVD_JPEG_INT_STAT__ECS_MK_ERR_INT_MASK 0x00000800L
+#define UVD_JPEG_INT_STAT__TIMEOUT_ERR_INT_MASK 0x00001000L
+#define UVD_JPEG_INT_STAT__MARKER_ERR_INT_MASK 0x00002000L
+#define UVD_JPEG_INT_STAT__FMT_ERR_INT_MASK 0x00004000L
+#define UVD_JPEG_INT_STAT__PROFILE_ERR_INT_MASK 0x00008000L
+//UVD_JPEG_PITCH
+#define UVD_JPEG_PITCH__PITCH__SHIFT 0x0
+#define UVD_JPEG_PITCH__PITCH_MASK 0xFFFFFFFFL
+//UVD_JPEG_UV_PITCH
+#define UVD_JPEG_UV_PITCH__UV_PITCH__SHIFT 0x0
+#define UVD_JPEG_UV_PITCH__UV_PITCH_MASK 0xFFFFFFFFL
+//JPEG_DEC_Y_GFX8_TILING_SURFACE
+#define JPEG_DEC_Y_GFX8_TILING_SURFACE__BANK_WIDTH__SHIFT 0x0
+#define JPEG_DEC_Y_GFX8_TILING_SURFACE__BANK_HEIGHT__SHIFT 0x2
+#define JPEG_DEC_Y_GFX8_TILING_SURFACE__MACRO_TILE_ASPECT__SHIFT 0x4
+#define JPEG_DEC_Y_GFX8_TILING_SURFACE__NUM_BANKS__SHIFT 0x6
+#define JPEG_DEC_Y_GFX8_TILING_SURFACE__PIPE_CONFIG__SHIFT 0x8
+#define JPEG_DEC_Y_GFX8_TILING_SURFACE__TILE_SPLIT__SHIFT 0xd
+#define JPEG_DEC_Y_GFX8_TILING_SURFACE__ARRAY_MODE__SHIFT 0x10
+#define JPEG_DEC_Y_GFX8_TILING_SURFACE__BANK_WIDTH_MASK 0x00000003L
+#define JPEG_DEC_Y_GFX8_TILING_SURFACE__BANK_HEIGHT_MASK 0x0000000CL
+#define JPEG_DEC_Y_GFX8_TILING_SURFACE__MACRO_TILE_ASPECT_MASK 0x00000030L
+#define JPEG_DEC_Y_GFX8_TILING_SURFACE__NUM_BANKS_MASK 0x000000C0L
+#define JPEG_DEC_Y_GFX8_TILING_SURFACE__PIPE_CONFIG_MASK 0x00001F00L
+#define JPEG_DEC_Y_GFX8_TILING_SURFACE__TILE_SPLIT_MASK 0x0000E000L
+#define JPEG_DEC_Y_GFX8_TILING_SURFACE__ARRAY_MODE_MASK 0x000F0000L
+//JPEG_DEC_UV_GFX8_TILING_SURFACE
+#define JPEG_DEC_UV_GFX8_TILING_SURFACE__BANK_WIDTH__SHIFT 0x0
+#define JPEG_DEC_UV_GFX8_TILING_SURFACE__BANK_HEIGHT__SHIFT 0x2
+#define JPEG_DEC_UV_GFX8_TILING_SURFACE__MACRO_TILE_ASPECT__SHIFT 0x4
+#define JPEG_DEC_UV_GFX8_TILING_SURFACE__NUM_BANKS__SHIFT 0x6
+#define JPEG_DEC_UV_GFX8_TILING_SURFACE__PIPE_CONFIG__SHIFT 0x8
+#define JPEG_DEC_UV_GFX8_TILING_SURFACE__TILE_SPLIT__SHIFT 0xd
+#define JPEG_DEC_UV_GFX8_TILING_SURFACE__ARRAY_MODE__SHIFT 0x10
+#define JPEG_DEC_UV_GFX8_TILING_SURFACE__BANK_WIDTH_MASK 0x00000003L
+#define JPEG_DEC_UV_GFX8_TILING_SURFACE__BANK_HEIGHT_MASK 0x0000000CL
+#define JPEG_DEC_UV_GFX8_TILING_SURFACE__MACRO_TILE_ASPECT_MASK 0x00000030L
+#define JPEG_DEC_UV_GFX8_TILING_SURFACE__NUM_BANKS_MASK 0x000000C0L
+#define JPEG_DEC_UV_GFX8_TILING_SURFACE__PIPE_CONFIG_MASK 0x00001F00L
+#define JPEG_DEC_UV_GFX8_TILING_SURFACE__TILE_SPLIT_MASK 0x0000E000L
+#define JPEG_DEC_UV_GFX8_TILING_SURFACE__ARRAY_MODE_MASK 0x000F0000L
+//JPEG_DEC_GFX8_ADDR_CONFIG
+#define JPEG_DEC_GFX8_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x4
+#define JPEG_DEC_GFX8_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000070L
+//JPEG_DEC_Y_GFX10_TILING_SURFACE
+#define JPEG_DEC_Y_GFX10_TILING_SURFACE__SWIZZLE_MODE__SHIFT 0x0
+#define JPEG_DEC_Y_GFX10_TILING_SURFACE__SWIZZLE_MODE_MASK 0x0000001FL
+//JPEG_DEC_UV_GFX10_TILING_SURFACE
+#define JPEG_DEC_UV_GFX10_TILING_SURFACE__SWIZZLE_MODE__SHIFT 0x0
+#define JPEG_DEC_UV_GFX10_TILING_SURFACE__SWIZZLE_MODE_MASK 0x0000001FL
+//JPEG_DEC_GFX10_ADDR_CONFIG
+#define JPEG_DEC_GFX10_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
+#define JPEG_DEC_GFX10_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
+#define JPEG_DEC_GFX10_ADDR_CONFIG__NUM_BANKS__SHIFT 0xc
+#define JPEG_DEC_GFX10_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x13
+#define JPEG_DEC_GFX10_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define JPEG_DEC_GFX10_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
+#define JPEG_DEC_GFX10_ADDR_CONFIG__NUM_BANKS_MASK 0x00007000L
+#define JPEG_DEC_GFX10_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00180000L
+//JPEG_DEC_ADDR_MODE
+#define JPEG_DEC_ADDR_MODE__ADDR_MODE_Y__SHIFT 0x0
+#define JPEG_DEC_ADDR_MODE__ADDR_MODE_UV__SHIFT 0x2
+#define JPEG_DEC_ADDR_MODE__ADDR_LIB_SEL__SHIFT 0xc
+#define JPEG_DEC_ADDR_MODE__ADDR_MODE_Y_MASK 0x00000003L
+#define JPEG_DEC_ADDR_MODE__ADDR_MODE_UV_MASK 0x0000000CL
+#define JPEG_DEC_ADDR_MODE__ADDR_LIB_SEL_MASK 0x00007000L
+//UVD_JPEG_OUTPUT_XY
+//UVD_JPEG_GPCOM_CMD
+#define UVD_JPEG_GPCOM_CMD__CMD__SHIFT 0x1
+#define UVD_JPEG_GPCOM_CMD__CMD_MASK 0x0000000EL
+//UVD_JPEG_GPCOM_DATA0
+#define UVD_JPEG_GPCOM_DATA0__DATA0__SHIFT 0x0
+#define UVD_JPEG_GPCOM_DATA0__DATA0_MASK 0xFFFFFFFFL
+//UVD_JPEG_GPCOM_DATA1
+#define UVD_JPEG_GPCOM_DATA1__DATA1__SHIFT 0x0
+#define UVD_JPEG_GPCOM_DATA1__DATA1_MASK 0xFFFFFFFFL
+//UVD_JPEG_SCRATCH1
+#define UVD_JPEG_SCRATCH1__SCRATCH1__SHIFT 0x0
+#define UVD_JPEG_SCRATCH1__SCRATCH1_MASK 0xFFFFFFFFL
+//UVD_JPEG_DEC_SOFT_RST
+#define UVD_JPEG_DEC_SOFT_RST__SOFT_RESET__SHIFT 0x0
+#define UVD_JPEG_DEC_SOFT_RST__RESET_STATUS__SHIFT 0x10
+#define UVD_JPEG_DEC_SOFT_RST__SOFT_RESET_MASK 0x00000001L
+#define UVD_JPEG_DEC_SOFT_RST__RESET_STATUS_MASK 0x00010000L
+
+
+// addressBlock: uvd0_uvd_jpeg_enc_dec
+//UVD_JPEG_ENC_INT_EN
+#define UVD_JPEG_ENC_INT_EN__HUFF_JOB_DONE_INT_EN__SHIFT 0x0
+#define UVD_JPEG_ENC_INT_EN__SCLR_JOB_DONE_INT_EN__SHIFT 0x1
+#define UVD_JPEG_ENC_INT_EN__HUFF_ERROR_INT_EN__SHIFT 0x2
+#define UVD_JPEG_ENC_INT_EN__SCLR_ERROR_INT_EN__SHIFT 0x3
+#define UVD_JPEG_ENC_INT_EN__QTBL_ERROR_INT_EN__SHIFT 0x4
+#define UVD_JPEG_ENC_INT_EN__PIC_SIZE_ERROR_INT_EN__SHIFT 0x5
+#define UVD_JPEG_ENC_INT_EN__FENCE_VAL_INT_EN__SHIFT 0x6
+#define UVD_JPEG_ENC_INT_EN__HUFF_JOB_DONE_INT_EN_MASK 0x00000001L
+#define UVD_JPEG_ENC_INT_EN__SCLR_JOB_DONE_INT_EN_MASK 0x00000002L
+#define UVD_JPEG_ENC_INT_EN__HUFF_ERROR_INT_EN_MASK 0x00000004L
+#define UVD_JPEG_ENC_INT_EN__SCLR_ERROR_INT_EN_MASK 0x00000008L
+#define UVD_JPEG_ENC_INT_EN__QTBL_ERROR_INT_EN_MASK 0x00000010L
+#define UVD_JPEG_ENC_INT_EN__PIC_SIZE_ERROR_INT_EN_MASK 0x00000020L
+#define UVD_JPEG_ENC_INT_EN__FENCE_VAL_INT_EN_MASK 0x00000040L
+//UVD_JPEG_ENC_INT_STATUS
+#define UVD_JPEG_ENC_INT_STATUS__HUFF_JOB_DONE_STATUS__SHIFT 0x0
+#define UVD_JPEG_ENC_INT_STATUS__SCLR_JOB_DONE_STATUS__SHIFT 0x1
+#define UVD_JPEG_ENC_INT_STATUS__HUFF_ERROR_STATUS__SHIFT 0x2
+#define UVD_JPEG_ENC_INT_STATUS__SCLR_ERROR_STATUS__SHIFT 0x3
+#define UVD_JPEG_ENC_INT_STATUS__QTBL_ERROR_STATUS__SHIFT 0x4
+#define UVD_JPEG_ENC_INT_STATUS__PIC_SIZE_ERROR_STATUS__SHIFT 0x5
+#define UVD_JPEG_ENC_INT_STATUS__FENCE_VAL_STATUS__SHIFT 0x6
+#define UVD_JPEG_ENC_INT_STATUS__HUFF_JOB_DONE_STATUS_MASK 0x00000001L
+#define UVD_JPEG_ENC_INT_STATUS__SCLR_JOB_DONE_STATUS_MASK 0x00000002L
+#define UVD_JPEG_ENC_INT_STATUS__HUFF_ERROR_STATUS_MASK 0x00000004L
+#define UVD_JPEG_ENC_INT_STATUS__SCLR_ERROR_STATUS_MASK 0x00000008L
+#define UVD_JPEG_ENC_INT_STATUS__QTBL_ERROR_STATUS_MASK 0x00000010L
+#define UVD_JPEG_ENC_INT_STATUS__PIC_SIZE_ERROR_STATUS_MASK 0x00000020L
+#define UVD_JPEG_ENC_INT_STATUS__FENCE_VAL_STATUS_MASK 0x00000040L
+//UVD_JPEG_ENC_ENGINE_CNTL
+#define UVD_JPEG_ENC_ENGINE_CNTL__HUFF_WR_COMB_DIS__SHIFT 0x0
+#define UVD_JPEG_ENC_ENGINE_CNTL__DISTINCT_CHROMA_QUANT_TABLES__SHIFT 0x1
+#define UVD_JPEG_ENC_ENGINE_CNTL__SCALAR_EN__SHIFT 0x2
+#define UVD_JPEG_ENC_ENGINE_CNTL__ENCODE_EN__SHIFT 0x3
+#define UVD_JPEG_ENC_ENGINE_CNTL__CMP_NEEDED__SHIFT 0x4
+#define UVD_JPEG_ENC_ENGINE_CNTL__ECS_RESTRICT_32B_EN__SHIFT 0x9
+#define UVD_JPEG_ENC_ENGINE_CNTL__HUFF_WR_COMB_DIS_MASK 0x00000001L
+#define UVD_JPEG_ENC_ENGINE_CNTL__DISTINCT_CHROMA_QUANT_TABLES_MASK 0x00000002L
+#define UVD_JPEG_ENC_ENGINE_CNTL__SCALAR_EN_MASK 0x00000004L
+#define UVD_JPEG_ENC_ENGINE_CNTL__ENCODE_EN_MASK 0x00000008L
+#define UVD_JPEG_ENC_ENGINE_CNTL__CMP_NEEDED_MASK 0x00000010L
+#define UVD_JPEG_ENC_ENGINE_CNTL__ECS_RESTRICT_32B_EN_MASK 0x00000200L
+//UVD_JPEG_ENC_SCRATCH1
+#define UVD_JPEG_ENC_SCRATCH1__SCRATCH1__SHIFT 0x0
+#define UVD_JPEG_ENC_SCRATCH1__SCRATCH1_MASK 0xFFFFFFFFL
+
+
+// addressBlock: uvd0_uvd_jpeg_enc_sclk_dec
+//UVD_JPEG_ENC_STATUS
+#define UVD_JPEG_ENC_STATUS__PEL_FETCH_IDLE__SHIFT 0x0
+#define UVD_JPEG_ENC_STATUS__HUFF_CORE_IDLE__SHIFT 0x1
+#define UVD_JPEG_ENC_STATUS__FDCT_IDLE__SHIFT 0x2
+#define UVD_JPEG_ENC_STATUS__SCALAR_IDLE__SHIFT 0x3
+#define UVD_JPEG_ENC_STATUS__PEL_FETCH_IDLE_MASK 0x00000001L
+#define UVD_JPEG_ENC_STATUS__HUFF_CORE_IDLE_MASK 0x00000002L
+#define UVD_JPEG_ENC_STATUS__FDCT_IDLE_MASK 0x00000004L
+#define UVD_JPEG_ENC_STATUS__SCALAR_IDLE_MASK 0x00000008L
+//UVD_JPEG_ENC_PITCH
+#define UVD_JPEG_ENC_PITCH__PITCH_Y__SHIFT 0x0
+#define UVD_JPEG_ENC_PITCH__PITCH_UV__SHIFT 0x10
+#define UVD_JPEG_ENC_PITCH__PITCH_Y_MASK 0x00000FFFL
+#define UVD_JPEG_ENC_PITCH__PITCH_UV_MASK 0x0FFF0000L
+//UVD_JPEG_ENC_LUMA_BASE
+#define UVD_JPEG_ENC_LUMA_BASE__LUMA_BASE__SHIFT 0x0
+#define UVD_JPEG_ENC_LUMA_BASE__LUMA_BASE_MASK 0xFFFFFFFFL
+//UVD_JPEG_ENC_CHROMAU_BASE
+#define UVD_JPEG_ENC_CHROMAU_BASE__CHROMAU_BASE__SHIFT 0x0
+#define UVD_JPEG_ENC_CHROMAU_BASE__CHROMAU_BASE_MASK 0xFFFFFFFFL
+//UVD_JPEG_ENC_CHROMAV_BASE
+#define UVD_JPEG_ENC_CHROMAV_BASE__CHROMAV_BASE__SHIFT 0x0
+#define UVD_JPEG_ENC_CHROMAV_BASE__CHROMAV_BASE_MASK 0xFFFFFFFFL
+//JPEG_ENC_Y_GFX10_TILING_SURFACE
+#define JPEG_ENC_Y_GFX10_TILING_SURFACE__SWIZZLE_MODE__SHIFT 0x0
+#define JPEG_ENC_Y_GFX10_TILING_SURFACE__SWIZZLE_MODE_MASK 0x0000001FL
+//JPEG_ENC_UV_GFX10_TILING_SURFACE
+#define JPEG_ENC_UV_GFX10_TILING_SURFACE__SWIZZLE_MODE__SHIFT 0x0
+#define JPEG_ENC_UV_GFX10_TILING_SURFACE__SWIZZLE_MODE_MASK 0x0000001FL
+//JPEG_ENC_GFX10_ADDR_CONFIG
+#define JPEG_ENC_GFX10_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
+#define JPEG_ENC_GFX10_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
+#define JPEG_ENC_GFX10_ADDR_CONFIG__NUM_BANKS__SHIFT 0xc
+#define JPEG_ENC_GFX10_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x13
+#define JPEG_ENC_GFX10_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define JPEG_ENC_GFX10_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
+#define JPEG_ENC_GFX10_ADDR_CONFIG__NUM_BANKS_MASK 0x00007000L
+#define JPEG_ENC_GFX10_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00180000L
+//JPEG_ENC_ADDR_MODE
+#define JPEG_ENC_ADDR_MODE__ADDR_MODE_Y__SHIFT 0x0
+#define JPEG_ENC_ADDR_MODE__ADDR_MODE_UV__SHIFT 0x2
+#define JPEG_ENC_ADDR_MODE__ADDR_LIB_SEL__SHIFT 0xc
+#define JPEG_ENC_ADDR_MODE__ADDR_MODE_Y_MASK 0x00000003L
+#define JPEG_ENC_ADDR_MODE__ADDR_MODE_UV_MASK 0x0000000CL
+#define JPEG_ENC_ADDR_MODE__ADDR_LIB_SEL_MASK 0x00007000L
+//UVD_JPEG_ENC_GPCOM_CMD
+#define UVD_JPEG_ENC_GPCOM_CMD__CMD__SHIFT 0x1
+#define UVD_JPEG_ENC_GPCOM_CMD__CMD_MASK 0x0000000EL
+//UVD_JPEG_ENC_GPCOM_DATA0
+#define UVD_JPEG_ENC_GPCOM_DATA0__DATA0__SHIFT 0x0
+#define UVD_JPEG_ENC_GPCOM_DATA0__DATA0_MASK 0xFFFFFFFFL
+//UVD_JPEG_ENC_GPCOM_DATA1
+#define UVD_JPEG_ENC_GPCOM_DATA1__DATA1__SHIFT 0x0
+#define UVD_JPEG_ENC_GPCOM_DATA1__DATA1_MASK 0xFFFFFFFFL
+//UVD_JPEG_ENC_CGC_CNTL
+#define UVD_JPEG_ENC_CGC_CNTL__CGC_EN__SHIFT 0x0
+#define UVD_JPEG_ENC_CGC_CNTL__CGC_EN_MASK 0x00000001L
+//UVD_JPEG_ENC_SCRATCH0
+#define UVD_JPEG_ENC_SCRATCH0__SCRATCH0__SHIFT 0x0
+#define UVD_JPEG_ENC_SCRATCH0__SCRATCH0_MASK 0xFFFFFFFFL
+//UVD_JPEG_ENC_SOFT_RST
+#define UVD_JPEG_ENC_SOFT_RST__SOFT_RST__SHIFT 0x0
+#define UVD_JPEG_ENC_SOFT_RST__RESET_STATUS__SHIFT 0x10
+#define UVD_JPEG_ENC_SOFT_RST__SOFT_RST_MASK 0x00000001L
+#define UVD_JPEG_ENC_SOFT_RST__RESET_STATUS_MASK 0x00010000L
+
+
+// addressBlock: uvd0_uvd_jrbc_dec
+//UVD_JRBC_RB_WPTR
+#define UVD_JRBC_RB_WPTR__RB_WPTR__SHIFT 0x4
+#define UVD_JRBC_RB_WPTR__RB_WPTR_MASK 0x007FFFF0L
+//UVD_JRBC_RB_CNTL
+#define UVD_JRBC_RB_CNTL__RB_NO_FETCH__SHIFT 0x0
+#define UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN__SHIFT 0x1
+#define UVD_JRBC_RB_CNTL__RB_PRE_WRITE_TIMER__SHIFT 0x4
+#define UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK 0x00000001L
+#define UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK 0x00000002L
+#define UVD_JRBC_RB_CNTL__RB_PRE_WRITE_TIMER_MASK 0x0007FFF0L
+//UVD_JRBC_IB_SIZE
+#define UVD_JRBC_IB_SIZE__IB_SIZE__SHIFT 0x4
+#define UVD_JRBC_IB_SIZE__IB_SIZE_MASK 0x007FFFF0L
+//UVD_JRBC_URGENT_CNTL
+#define UVD_JRBC_URGENT_CNTL__CMD_READ_REQ_PRIORITY_MARK__SHIFT 0x0
+#define UVD_JRBC_URGENT_CNTL__CMD_READ_REQ_PRIORITY_MARK_MASK 0x00000003L
+//UVD_JRBC_RB_REF_DATA
+#define UVD_JRBC_RB_REF_DATA__REF_DATA__SHIFT 0x0
+#define UVD_JRBC_RB_REF_DATA__REF_DATA_MASK 0xFFFFFFFFL
+//UVD_JRBC_RB_COND_RD_TIMER
+#define UVD_JRBC_RB_COND_RD_TIMER__RETRY_TIMER_CNT__SHIFT 0x0
+#define UVD_JRBC_RB_COND_RD_TIMER__RETRY_INTERVAL_CNT__SHIFT 0x10
+#define UVD_JRBC_RB_COND_RD_TIMER__CONTINUOUS_POLL_EN__SHIFT 0x18
+#define UVD_JRBC_RB_COND_RD_TIMER__MEM_TIMEOUT_EN__SHIFT 0x19
+#define UVD_JRBC_RB_COND_RD_TIMER__RETRY_TIMER_CNT_MASK 0x0000FFFFL
+#define UVD_JRBC_RB_COND_RD_TIMER__RETRY_INTERVAL_CNT_MASK 0x00FF0000L
+#define UVD_JRBC_RB_COND_RD_TIMER__CONTINUOUS_POLL_EN_MASK 0x01000000L
+#define UVD_JRBC_RB_COND_RD_TIMER__MEM_TIMEOUT_EN_MASK 0x02000000L
+//UVD_JRBC_SOFT_RESET
+#define UVD_JRBC_SOFT_RESET__RESET__SHIFT 0x0
+#define UVD_JRBC_SOFT_RESET__SCLK_RESET_STATUS__SHIFT 0x11
+#define UVD_JRBC_SOFT_RESET__RESET_MASK 0x00000001L
+#define UVD_JRBC_SOFT_RESET__SCLK_RESET_STATUS_MASK 0x00020000L
+//UVD_JRBC_STATUS
+#define UVD_JRBC_STATUS__RB_JOB_DONE__SHIFT 0x0
+#define UVD_JRBC_STATUS__IB_JOB_DONE__SHIFT 0x1
+#define UVD_JRBC_STATUS__RB_ILLEGAL_CMD__SHIFT 0x2
+#define UVD_JRBC_STATUS__RB_COND_REG_RD_TIMEOUT__SHIFT 0x3
+#define UVD_JRBC_STATUS__RB_MEM_WR_TIMEOUT__SHIFT 0x4
+#define UVD_JRBC_STATUS__RB_MEM_RD_TIMEOUT__SHIFT 0x5
+#define UVD_JRBC_STATUS__IB_ILLEGAL_CMD__SHIFT 0x6
+#define UVD_JRBC_STATUS__IB_COND_REG_RD_TIMEOUT__SHIFT 0x7
+#define UVD_JRBC_STATUS__IB_MEM_WR_TIMEOUT__SHIFT 0x8
+#define UVD_JRBC_STATUS__IB_MEM_RD_TIMEOUT__SHIFT 0x9
+#define UVD_JRBC_STATUS__RB_TRAP_STATUS__SHIFT 0xa
+#define UVD_JRBC_STATUS__PREEMPT_STATUS__SHIFT 0xb
+#define UVD_JRBC_STATUS__IB_TRAP_STATUS__SHIFT 0xc
+#define UVD_JRBC_STATUS__INT_EN__SHIFT 0x10
+#define UVD_JRBC_STATUS__INT_ACK__SHIFT 0x11
+#define UVD_JRBC_STATUS__RB_JOB_DONE_MASK 0x00000001L
+#define UVD_JRBC_STATUS__IB_JOB_DONE_MASK 0x00000002L
+#define UVD_JRBC_STATUS__RB_ILLEGAL_CMD_MASK 0x00000004L
+#define UVD_JRBC_STATUS__RB_COND_REG_RD_TIMEOUT_MASK 0x00000008L
+#define UVD_JRBC_STATUS__RB_MEM_WR_TIMEOUT_MASK 0x00000010L
+#define UVD_JRBC_STATUS__RB_MEM_RD_TIMEOUT_MASK 0x00000020L
+#define UVD_JRBC_STATUS__IB_ILLEGAL_CMD_MASK 0x00000040L
+#define UVD_JRBC_STATUS__IB_COND_REG_RD_TIMEOUT_MASK 0x00000080L
+#define UVD_JRBC_STATUS__IB_MEM_WR_TIMEOUT_MASK 0x00000100L
+#define UVD_JRBC_STATUS__IB_MEM_RD_TIMEOUT_MASK 0x00000200L
+#define UVD_JRBC_STATUS__RB_TRAP_STATUS_MASK 0x00000400L
+#define UVD_JRBC_STATUS__PREEMPT_STATUS_MASK 0x00000800L
+#define UVD_JRBC_STATUS__IB_TRAP_STATUS_MASK 0x00001000L
+#define UVD_JRBC_STATUS__INT_EN_MASK 0x00010000L
+#define UVD_JRBC_STATUS__INT_ACK_MASK 0x00020000L
+//UVD_JRBC_RB_RPTR
+#define UVD_JRBC_RB_RPTR__RB_RPTR__SHIFT 0x4
+#define UVD_JRBC_RB_RPTR__RB_RPTR_MASK 0x007FFFF0L
+//UVD_JRBC_RB_BUF_STATUS
+#define UVD_JRBC_RB_BUF_STATUS__RB_BUF_VALID__SHIFT 0x0
+#define UVD_JRBC_RB_BUF_STATUS__RB_BUF_RD_ADDR__SHIFT 0x10
+#define UVD_JRBC_RB_BUF_STATUS__RB_BUF_WR_ADDR__SHIFT 0x18
+#define UVD_JRBC_RB_BUF_STATUS__RB_BUF_VALID_MASK 0x0000FFFFL
+#define UVD_JRBC_RB_BUF_STATUS__RB_BUF_RD_ADDR_MASK 0x000F0000L
+#define UVD_JRBC_RB_BUF_STATUS__RB_BUF_WR_ADDR_MASK 0x03000000L
+//UVD_JRBC_IB_BUF_STATUS
+#define UVD_JRBC_IB_BUF_STATUS__IB_BUF_VALID__SHIFT 0x0
+#define UVD_JRBC_IB_BUF_STATUS__IB_BUF_RD_ADDR__SHIFT 0x10
+#define UVD_JRBC_IB_BUF_STATUS__IB_BUF_WR_ADDR__SHIFT 0x18
+#define UVD_JRBC_IB_BUF_STATUS__IB_BUF_VALID_MASK 0x0000FFFFL
+#define UVD_JRBC_IB_BUF_STATUS__IB_BUF_RD_ADDR_MASK 0x000F0000L
+#define UVD_JRBC_IB_BUF_STATUS__IB_BUF_WR_ADDR_MASK 0x03000000L
+//UVD_JRBC_IB_SIZE_UPDATE
+#define UVD_JRBC_IB_SIZE_UPDATE__REMAIN_IB_SIZE__SHIFT 0x4
+#define UVD_JRBC_IB_SIZE_UPDATE__REMAIN_IB_SIZE_MASK 0x007FFFF0L
+//UVD_JRBC_IB_COND_RD_TIMER
+#define UVD_JRBC_IB_COND_RD_TIMER__RETRY_TIMER_CNT__SHIFT 0x0
+#define UVD_JRBC_IB_COND_RD_TIMER__RETRY_INTERVAL_CNT__SHIFT 0x10
+#define UVD_JRBC_IB_COND_RD_TIMER__CONTINUOUS_POLL_EN__SHIFT 0x18
+#define UVD_JRBC_IB_COND_RD_TIMER__MEM_TIMEOUT_EN__SHIFT 0x19
+#define UVD_JRBC_IB_COND_RD_TIMER__RETRY_TIMER_CNT_MASK 0x0000FFFFL
+#define UVD_JRBC_IB_COND_RD_TIMER__RETRY_INTERVAL_CNT_MASK 0x00FF0000L
+#define UVD_JRBC_IB_COND_RD_TIMER__CONTINUOUS_POLL_EN_MASK 0x01000000L
+#define UVD_JRBC_IB_COND_RD_TIMER__MEM_TIMEOUT_EN_MASK 0x02000000L
+//UVD_JRBC_IB_REF_DATA
+#define UVD_JRBC_IB_REF_DATA__REF_DATA__SHIFT 0x0
+#define UVD_JRBC_IB_REF_DATA__REF_DATA_MASK 0xFFFFFFFFL
+//UVD_JPEG_PREEMPT_CMD
+#define UVD_JPEG_PREEMPT_CMD__PREEMPT_EN__SHIFT 0x0
+#define UVD_JPEG_PREEMPT_CMD__WAIT_JPEG_JOB_DONE__SHIFT 0x1
+#define UVD_JPEG_PREEMPT_CMD__PREEMPT_FENCE_CMD__SHIFT 0x2
+#define UVD_JPEG_PREEMPT_CMD__PREEMPT_EN_MASK 0x00000001L
+#define UVD_JPEG_PREEMPT_CMD__WAIT_JPEG_JOB_DONE_MASK 0x00000002L
+#define UVD_JPEG_PREEMPT_CMD__PREEMPT_FENCE_CMD_MASK 0x00000004L
+//UVD_JPEG_PREEMPT_FENCE_DATA0
+#define UVD_JPEG_PREEMPT_FENCE_DATA0__PREEMPT_FENCE_DATA0__SHIFT 0x0
+#define UVD_JPEG_PREEMPT_FENCE_DATA0__PREEMPT_FENCE_DATA0_MASK 0xFFFFFFFFL
+//UVD_JPEG_PREEMPT_FENCE_DATA1
+#define UVD_JPEG_PREEMPT_FENCE_DATA1__PREEMPT_FENCE_DATA1__SHIFT 0x0
+#define UVD_JPEG_PREEMPT_FENCE_DATA1__PREEMPT_FENCE_DATA1_MASK 0xFFFFFFFFL
+//UVD_JRBC_RB_SIZE
+#define UVD_JRBC_RB_SIZE__RB_SIZE__SHIFT 0x4
+#define UVD_JRBC_RB_SIZE__RB_SIZE_MASK 0x00FFFFF0L
+//UVD_JRBC_SCRATCH0
+#define UVD_JRBC_SCRATCH0__SCRATCH0__SHIFT 0x0
+#define UVD_JRBC_SCRATCH0__SCRATCH0_MASK 0xFFFFFFFFL
+
+
+// addressBlock: uvd0_uvd_jrbc_enc_dec
+//UVD_JRBC_ENC_RB_WPTR
+#define UVD_JRBC_ENC_RB_WPTR__RB_WPTR__SHIFT 0x4
+#define UVD_JRBC_ENC_RB_WPTR__RB_WPTR_MASK 0x007FFFF0L
+//UVD_JRBC_ENC_RB_CNTL
+#define UVD_JRBC_ENC_RB_CNTL__RB_NO_FETCH__SHIFT 0x0
+#define UVD_JRBC_ENC_RB_CNTL__RB_RPTR_WR_EN__SHIFT 0x1
+#define UVD_JRBC_ENC_RB_CNTL__RB_PRE_WRITE_TIMER__SHIFT 0x4
+#define UVD_JRBC_ENC_RB_CNTL__RB_NO_FETCH_MASK 0x00000001L
+#define UVD_JRBC_ENC_RB_CNTL__RB_RPTR_WR_EN_MASK 0x00000002L
+#define UVD_JRBC_ENC_RB_CNTL__RB_PRE_WRITE_TIMER_MASK 0x0007FFF0L
+//UVD_JRBC_ENC_IB_SIZE
+#define UVD_JRBC_ENC_IB_SIZE__IB_SIZE__SHIFT 0x4
+#define UVD_JRBC_ENC_IB_SIZE__IB_SIZE_MASK 0x007FFFF0L
+//UVD_JRBC_ENC_URGENT_CNTL
+#define UVD_JRBC_ENC_URGENT_CNTL__CMD_READ_REQ_PRIORITY_MARK__SHIFT 0x0
+#define UVD_JRBC_ENC_URGENT_CNTL__CMD_READ_REQ_PRIORITY_MARK_MASK 0x00000003L
+//UVD_JRBC_ENC_RB_REF_DATA
+#define UVD_JRBC_ENC_RB_REF_DATA__REF_DATA__SHIFT 0x0
+#define UVD_JRBC_ENC_RB_REF_DATA__REF_DATA_MASK 0xFFFFFFFFL
+//UVD_JRBC_ENC_RB_COND_RD_TIMER
+#define UVD_JRBC_ENC_RB_COND_RD_TIMER__RETRY_TIMER_CNT__SHIFT 0x0
+#define UVD_JRBC_ENC_RB_COND_RD_TIMER__RETRY_INTERVAL_CNT__SHIFT 0x10
+#define UVD_JRBC_ENC_RB_COND_RD_TIMER__CONTINUOUS_POLL_EN__SHIFT 0x18
+#define UVD_JRBC_ENC_RB_COND_RD_TIMER__MEM_TIMEOUT_EN__SHIFT 0x19
+#define UVD_JRBC_ENC_RB_COND_RD_TIMER__RETRY_TIMER_CNT_MASK 0x0000FFFFL
+#define UVD_JRBC_ENC_RB_COND_RD_TIMER__RETRY_INTERVAL_CNT_MASK 0x00FF0000L
+#define UVD_JRBC_ENC_RB_COND_RD_TIMER__CONTINUOUS_POLL_EN_MASK 0x01000000L
+#define UVD_JRBC_ENC_RB_COND_RD_TIMER__MEM_TIMEOUT_EN_MASK 0x02000000L
+//UVD_JRBC_ENC_SOFT_RESET
+#define UVD_JRBC_ENC_SOFT_RESET__RESET__SHIFT 0x0
+#define UVD_JRBC_ENC_SOFT_RESET__SCLK_RESET_STATUS__SHIFT 0x11
+#define UVD_JRBC_ENC_SOFT_RESET__RESET_MASK 0x00000001L
+#define UVD_JRBC_ENC_SOFT_RESET__SCLK_RESET_STATUS_MASK 0x00020000L
+//UVD_JRBC_ENC_STATUS
+#define UVD_JRBC_ENC_STATUS__RB_JOB_DONE__SHIFT 0x0
+#define UVD_JRBC_ENC_STATUS__IB_JOB_DONE__SHIFT 0x1
+#define UVD_JRBC_ENC_STATUS__RB_ILLEGAL_CMD__SHIFT 0x2
+#define UVD_JRBC_ENC_STATUS__RB_COND_REG_RD_TIMEOUT__SHIFT 0x3
+#define UVD_JRBC_ENC_STATUS__RB_MEM_WR_TIMEOUT__SHIFT 0x4
+#define UVD_JRBC_ENC_STATUS__RB_MEM_RD_TIMEOUT__SHIFT 0x5
+#define UVD_JRBC_ENC_STATUS__IB_ILLEGAL_CMD__SHIFT 0x6
+#define UVD_JRBC_ENC_STATUS__IB_COND_REG_RD_TIMEOUT__SHIFT 0x7
+#define UVD_JRBC_ENC_STATUS__IB_MEM_WR_TIMEOUT__SHIFT 0x8
+#define UVD_JRBC_ENC_STATUS__IB_MEM_RD_TIMEOUT__SHIFT 0x9
+#define UVD_JRBC_ENC_STATUS__RB_TRAP_STATUS__SHIFT 0xa
+#define UVD_JRBC_ENC_STATUS__PREEMPT_STATUS__SHIFT 0xb
+#define UVD_JRBC_ENC_STATUS__IB_TRAP_STATUS__SHIFT 0xc
+#define UVD_JRBC_ENC_STATUS__INT_EN__SHIFT 0x10
+#define UVD_JRBC_ENC_STATUS__INT_ACK__SHIFT 0x11
+#define UVD_JRBC_ENC_STATUS__RB_JOB_DONE_MASK 0x00000001L
+#define UVD_JRBC_ENC_STATUS__IB_JOB_DONE_MASK 0x00000002L
+#define UVD_JRBC_ENC_STATUS__RB_ILLEGAL_CMD_MASK 0x00000004L
+#define UVD_JRBC_ENC_STATUS__RB_COND_REG_RD_TIMEOUT_MASK 0x00000008L
+#define UVD_JRBC_ENC_STATUS__RB_MEM_WR_TIMEOUT_MASK 0x00000010L
+#define UVD_JRBC_ENC_STATUS__RB_MEM_RD_TIMEOUT_MASK 0x00000020L
+#define UVD_JRBC_ENC_STATUS__IB_ILLEGAL_CMD_MASK 0x00000040L
+#define UVD_JRBC_ENC_STATUS__IB_COND_REG_RD_TIMEOUT_MASK 0x00000080L
+#define UVD_JRBC_ENC_STATUS__IB_MEM_WR_TIMEOUT_MASK 0x00000100L
+#define UVD_JRBC_ENC_STATUS__IB_MEM_RD_TIMEOUT_MASK 0x00000200L
+#define UVD_JRBC_ENC_STATUS__RB_TRAP_STATUS_MASK 0x00000400L
+#define UVD_JRBC_ENC_STATUS__PREEMPT_STATUS_MASK 0x00000800L
+#define UVD_JRBC_ENC_STATUS__IB_TRAP_STATUS_MASK 0x00001000L
+#define UVD_JRBC_ENC_STATUS__INT_EN_MASK 0x00010000L
+#define UVD_JRBC_ENC_STATUS__INT_ACK_MASK 0x00020000L
+//UVD_JRBC_ENC_RB_RPTR
+#define UVD_JRBC_ENC_RB_RPTR__RB_RPTR__SHIFT 0x4
+#define UVD_JRBC_ENC_RB_RPTR__RB_RPTR_MASK 0x007FFFF0L
+//UVD_JRBC_ENC_RB_BUF_STATUS
+#define UVD_JRBC_ENC_RB_BUF_STATUS__RB_BUF_VALID__SHIFT 0x0
+#define UVD_JRBC_ENC_RB_BUF_STATUS__RB_BUF_RD_ADDR__SHIFT 0x10
+#define UVD_JRBC_ENC_RB_BUF_STATUS__RB_BUF_WR_ADDR__SHIFT 0x18
+#define UVD_JRBC_ENC_RB_BUF_STATUS__RB_BUF_VALID_MASK 0x0000FFFFL
+#define UVD_JRBC_ENC_RB_BUF_STATUS__RB_BUF_RD_ADDR_MASK 0x000F0000L
+#define UVD_JRBC_ENC_RB_BUF_STATUS__RB_BUF_WR_ADDR_MASK 0x03000000L
+//UVD_JRBC_ENC_IB_BUF_STATUS
+#define UVD_JRBC_ENC_IB_BUF_STATUS__IB_BUF_VALID__SHIFT 0x0
+#define UVD_JRBC_ENC_IB_BUF_STATUS__IB_BUF_RD_ADDR__SHIFT 0x10
+#define UVD_JRBC_ENC_IB_BUF_STATUS__IB_BUF_WR_ADDR__SHIFT 0x18
+#define UVD_JRBC_ENC_IB_BUF_STATUS__IB_BUF_VALID_MASK 0x0000FFFFL
+#define UVD_JRBC_ENC_IB_BUF_STATUS__IB_BUF_RD_ADDR_MASK 0x000F0000L
+#define UVD_JRBC_ENC_IB_BUF_STATUS__IB_BUF_WR_ADDR_MASK 0x03000000L
+//UVD_JRBC_ENC_IB_SIZE_UPDATE
+#define UVD_JRBC_ENC_IB_SIZE_UPDATE__REMAIN_IB_SIZE__SHIFT 0x4
+#define UVD_JRBC_ENC_IB_SIZE_UPDATE__REMAIN_IB_SIZE_MASK 0x007FFFF0L
+//UVD_JRBC_ENC_IB_COND_RD_TIMER
+#define UVD_JRBC_ENC_IB_COND_RD_TIMER__RETRY_TIMER_CNT__SHIFT 0x0
+#define UVD_JRBC_ENC_IB_COND_RD_TIMER__RETRY_INTERVAL_CNT__SHIFT 0x10
+#define UVD_JRBC_ENC_IB_COND_RD_TIMER__CONTINUOUS_POLL_EN__SHIFT 0x18
+#define UVD_JRBC_ENC_IB_COND_RD_TIMER__MEM_TIMEOUT_EN__SHIFT 0x19
+#define UVD_JRBC_ENC_IB_COND_RD_TIMER__RETRY_TIMER_CNT_MASK 0x0000FFFFL
+#define UVD_JRBC_ENC_IB_COND_RD_TIMER__RETRY_INTERVAL_CNT_MASK 0x00FF0000L
+#define UVD_JRBC_ENC_IB_COND_RD_TIMER__CONTINUOUS_POLL_EN_MASK 0x01000000L
+#define UVD_JRBC_ENC_IB_COND_RD_TIMER__MEM_TIMEOUT_EN_MASK 0x02000000L
+//UVD_JRBC_ENC_IB_REF_DATA
+#define UVD_JRBC_ENC_IB_REF_DATA__REF_DATA__SHIFT 0x0
+#define UVD_JRBC_ENC_IB_REF_DATA__REF_DATA_MASK 0xFFFFFFFFL
+//UVD_JPEG_ENC_PREEMPT_CMD
+#define UVD_JPEG_ENC_PREEMPT_CMD__PREEMPT_EN__SHIFT 0x0
+#define UVD_JPEG_ENC_PREEMPT_CMD__WAIT_JPEG_JOB_DONE__SHIFT 0x1
+#define UVD_JPEG_ENC_PREEMPT_CMD__PREEMPT_FENCE_CMD__SHIFT 0x2
+#define UVD_JPEG_ENC_PREEMPT_CMD__PREEMPT_EN_MASK 0x00000001L
+#define UVD_JPEG_ENC_PREEMPT_CMD__WAIT_JPEG_JOB_DONE_MASK 0x00000002L
+#define UVD_JPEG_ENC_PREEMPT_CMD__PREEMPT_FENCE_CMD_MASK 0x00000004L
+//UVD_JPEG_ENC_PREEMPT_FENCE_DATA0
+#define UVD_JPEG_ENC_PREEMPT_FENCE_DATA0__PREEMPT_FENCE_DATA0__SHIFT 0x0
+#define UVD_JPEG_ENC_PREEMPT_FENCE_DATA0__PREEMPT_FENCE_DATA0_MASK 0xFFFFFFFFL
+//UVD_JPEG_ENC_PREEMPT_FENCE_DATA1
+#define UVD_JPEG_ENC_PREEMPT_FENCE_DATA1__PREEMPT_FENCE_DATA1__SHIFT 0x0
+#define UVD_JPEG_ENC_PREEMPT_FENCE_DATA1__PREEMPT_FENCE_DATA1_MASK 0xFFFFFFFFL
+//UVD_JRBC_ENC_RB_SIZE
+#define UVD_JRBC_ENC_RB_SIZE__RB_SIZE__SHIFT 0x4
+#define UVD_JRBC_ENC_RB_SIZE__RB_SIZE_MASK 0x00FFFFF0L
+//UVD_JRBC_ENC_SCRATCH0
+#define UVD_JRBC_ENC_SCRATCH0__SCRATCH0__SHIFT 0x0
+#define UVD_JRBC_ENC_SCRATCH0__SCRATCH0_MASK 0xFFFFFFFFL
+
+
+// addressBlock: uvd0_uvd_jmi_dec
+//UVD_JMI_CTRL
+#define UVD_JMI_CTRL__STALL_MC_ARB__SHIFT 0x0
+#define UVD_JMI_CTRL__MASK_MC_URGENT__SHIFT 0x1
+#define UVD_JMI_CTRL__ASSERT_MC_URGENT__SHIFT 0x2
+#define UVD_JMI_CTRL__MC_RD_ARB_WAIT_TIMER__SHIFT 0x8
+#define UVD_JMI_CTRL__MC_WR_ARB_WAIT_TIMER__SHIFT 0x10
+#define UVD_JMI_CTRL__CRC_RESET__SHIFT 0x18
+#define UVD_JMI_CTRL__CRC_SEL__SHIFT 0x19
+#define UVD_JMI_CTRL__STALL_MC_ARB_MASK 0x00000001L
+#define UVD_JMI_CTRL__MASK_MC_URGENT_MASK 0x00000002L
+#define UVD_JMI_CTRL__ASSERT_MC_URGENT_MASK 0x00000004L
+#define UVD_JMI_CTRL__MC_RD_ARB_WAIT_TIMER_MASK 0x0000FF00L
+#define UVD_JMI_CTRL__MC_WR_ARB_WAIT_TIMER_MASK 0x00FF0000L
+#define UVD_JMI_CTRL__CRC_RESET_MASK 0x01000000L
+#define UVD_JMI_CTRL__CRC_SEL_MASK 0x1E000000L
+//UVD_LMI_JRBC_CTRL
+#define UVD_LMI_JRBC_CTRL__ARB_RD_WAIT_EN__SHIFT 0x0
+#define UVD_LMI_JRBC_CTRL__ARB_WR_WAIT_EN__SHIFT 0x1
+#define UVD_LMI_JRBC_CTRL__RD_MAX_BURST__SHIFT 0x4
+#define UVD_LMI_JRBC_CTRL__WR_MAX_BURST__SHIFT 0x8
+#define UVD_LMI_JRBC_CTRL__RD_SWAP__SHIFT 0x14
+#define UVD_LMI_JRBC_CTRL__WR_SWAP__SHIFT 0x16
+#define UVD_LMI_JRBC_CTRL__ARB_RD_WAIT_EN_MASK 0x00000001L
+#define UVD_LMI_JRBC_CTRL__ARB_WR_WAIT_EN_MASK 0x00000002L
+#define UVD_LMI_JRBC_CTRL__RD_MAX_BURST_MASK 0x000000F0L
+#define UVD_LMI_JRBC_CTRL__WR_MAX_BURST_MASK 0x00000F00L
+#define UVD_LMI_JRBC_CTRL__RD_SWAP_MASK 0x00300000L
+#define UVD_LMI_JRBC_CTRL__WR_SWAP_MASK 0x00C00000L
+//UVD_LMI_JPEG_CTRL
+#define UVD_LMI_JPEG_CTRL__ARB_RD_WAIT_EN__SHIFT 0x0
+#define UVD_LMI_JPEG_CTRL__ARB_WR_WAIT_EN__SHIFT 0x1
+#define UVD_LMI_JPEG_CTRL__RD_MAX_BURST__SHIFT 0x4
+#define UVD_LMI_JPEG_CTRL__WR_MAX_BURST__SHIFT 0x8
+#define UVD_LMI_JPEG_CTRL__RD_SWAP__SHIFT 0x14
+#define UVD_LMI_JPEG_CTRL__WR_SWAP__SHIFT 0x16
+#define UVD_LMI_JPEG_CTRL__ARB_RD_WAIT_EN_MASK 0x00000001L
+#define UVD_LMI_JPEG_CTRL__ARB_WR_WAIT_EN_MASK 0x00000002L
+#define UVD_LMI_JPEG_CTRL__RD_MAX_BURST_MASK 0x000000F0L
+#define UVD_LMI_JPEG_CTRL__WR_MAX_BURST_MASK 0x00000F00L
+#define UVD_LMI_JPEG_CTRL__RD_SWAP_MASK 0x00300000L
+#define UVD_LMI_JPEG_CTRL__WR_SWAP_MASK 0x00C00000L
+//UVD_JMI_EJRBC_CTRL
+#define UVD_JMI_EJRBC_CTRL__ARB_RD_WAIT_EN__SHIFT 0x0
+#define UVD_JMI_EJRBC_CTRL__ARB_WR_WAIT_EN__SHIFT 0x1
+#define UVD_JMI_EJRBC_CTRL__RD_MAX_BURST__SHIFT 0x4
+#define UVD_JMI_EJRBC_CTRL__WR_MAX_BURST__SHIFT 0x8
+#define UVD_JMI_EJRBC_CTRL__RD_SWAP__SHIFT 0x14
+#define UVD_JMI_EJRBC_CTRL__WR_SWAP__SHIFT 0x16
+#define UVD_JMI_EJRBC_CTRL__ARB_RD_WAIT_EN_MASK 0x00000001L
+#define UVD_JMI_EJRBC_CTRL__ARB_WR_WAIT_EN_MASK 0x00000002L
+#define UVD_JMI_EJRBC_CTRL__RD_MAX_BURST_MASK 0x000000F0L
+#define UVD_JMI_EJRBC_CTRL__WR_MAX_BURST_MASK 0x00000F00L
+#define UVD_JMI_EJRBC_CTRL__RD_SWAP_MASK 0x00300000L
+#define UVD_JMI_EJRBC_CTRL__WR_SWAP_MASK 0x00C00000L
+//UVD_LMI_EJPEG_CTRL
+#define UVD_LMI_EJPEG_CTRL__ARB_RD_WAIT_EN__SHIFT 0x0
+#define UVD_LMI_EJPEG_CTRL__ARB_WR_WAIT_EN__SHIFT 0x1
+#define UVD_LMI_EJPEG_CTRL__RD_MAX_BURST__SHIFT 0x4
+#define UVD_LMI_EJPEG_CTRL__WR_MAX_BURST__SHIFT 0x8
+#define UVD_LMI_EJPEG_CTRL__RD_SWAP__SHIFT 0x14
+#define UVD_LMI_EJPEG_CTRL__WR_SWAP__SHIFT 0x16
+#define UVD_LMI_EJPEG_CTRL__ARB_RD_WAIT_EN_MASK 0x00000001L
+#define UVD_LMI_EJPEG_CTRL__ARB_WR_WAIT_EN_MASK 0x00000002L
+#define UVD_LMI_EJPEG_CTRL__RD_MAX_BURST_MASK 0x000000F0L
+#define UVD_LMI_EJPEG_CTRL__WR_MAX_BURST_MASK 0x00000F00L
+#define UVD_LMI_EJPEG_CTRL__RD_SWAP_MASK 0x00300000L
+#define UVD_LMI_EJPEG_CTRL__WR_SWAP_MASK 0x00C00000L
+//UVD_LMI_JRBC_IB_VMID
+#define UVD_LMI_JRBC_IB_VMID__IB_WR_VMID__SHIFT 0x0
+#define UVD_LMI_JRBC_IB_VMID__IB_RD_VMID__SHIFT 0x4
+#define UVD_LMI_JRBC_IB_VMID__MEM_RD_VMID__SHIFT 0x8
+#define UVD_LMI_JRBC_IB_VMID__IB_WR_VMID_MASK 0x0000000FL
+#define UVD_LMI_JRBC_IB_VMID__IB_RD_VMID_MASK 0x000000F0L
+#define UVD_LMI_JRBC_IB_VMID__MEM_RD_VMID_MASK 0x00000F00L
+//UVD_LMI_JRBC_RB_VMID
+#define UVD_LMI_JRBC_RB_VMID__RB_WR_VMID__SHIFT 0x0
+#define UVD_LMI_JRBC_RB_VMID__RB_RD_VMID__SHIFT 0x4
+#define UVD_LMI_JRBC_RB_VMID__MEM_RD_VMID__SHIFT 0x8
+#define UVD_LMI_JRBC_RB_VMID__RB_WR_VMID_MASK 0x0000000FL
+#define UVD_LMI_JRBC_RB_VMID__RB_RD_VMID_MASK 0x000000F0L
+#define UVD_LMI_JRBC_RB_VMID__MEM_RD_VMID_MASK 0x00000F00L
+//UVD_LMI_JPEG_VMID
+#define UVD_LMI_JPEG_VMID__JPEG_RD_VMID__SHIFT 0x0
+#define UVD_LMI_JPEG_VMID__JPEG_WR_VMID__SHIFT 0x4
+#define UVD_LMI_JPEG_VMID__ATOMIC_USER0_WR_VMID__SHIFT 0x8
+#define UVD_LMI_JPEG_VMID__JPEG_RD_VMID_MASK 0x0000000FL
+#define UVD_LMI_JPEG_VMID__JPEG_WR_VMID_MASK 0x000000F0L
+#define UVD_LMI_JPEG_VMID__ATOMIC_USER0_WR_VMID_MASK 0x00000F00L
+//UVD_JMI_ENC_JRBC_IB_VMID
+#define UVD_JMI_ENC_JRBC_IB_VMID__IB_WR_VMID__SHIFT 0x0
+#define UVD_JMI_ENC_JRBC_IB_VMID__IB_RD_VMID__SHIFT 0x4
+#define UVD_JMI_ENC_JRBC_IB_VMID__MEM_RD_VMID__SHIFT 0x8
+#define UVD_JMI_ENC_JRBC_IB_VMID__IB_WR_VMID_MASK 0x0000000FL
+#define UVD_JMI_ENC_JRBC_IB_VMID__IB_RD_VMID_MASK 0x000000F0L
+#define UVD_JMI_ENC_JRBC_IB_VMID__MEM_RD_VMID_MASK 0x00000F00L
+//UVD_JMI_ENC_JRBC_RB_VMID
+#define UVD_JMI_ENC_JRBC_RB_VMID__RB_WR_VMID__SHIFT 0x0
+#define UVD_JMI_ENC_JRBC_RB_VMID__RB_RD_VMID__SHIFT 0x4
+#define UVD_JMI_ENC_JRBC_RB_VMID__MEM_RD_VMID__SHIFT 0x8
+#define UVD_JMI_ENC_JRBC_RB_VMID__RB_WR_VMID_MASK 0x0000000FL
+#define UVD_JMI_ENC_JRBC_RB_VMID__RB_RD_VMID_MASK 0x000000F0L
+#define UVD_JMI_ENC_JRBC_RB_VMID__MEM_RD_VMID_MASK 0x00000F00L
+//UVD_JMI_ENC_JPEG_VMID
+#define UVD_JMI_ENC_JPEG_VMID__PEL_RD_VMID__SHIFT 0x0
+#define UVD_JMI_ENC_JPEG_VMID__BS_WR_VMID__SHIFT 0x5
+#define UVD_JMI_ENC_JPEG_VMID__SCALAR_RD_VMID__SHIFT 0xa
+#define UVD_JMI_ENC_JPEG_VMID__SCALAR_WR_VMID__SHIFT 0xf
+#define UVD_JMI_ENC_JPEG_VMID__HUFF_FENCE_VMID__SHIFT 0x13
+#define UVD_JMI_ENC_JPEG_VMID__ATOMIC_USER1_WR_VMID__SHIFT 0x17
+#define UVD_JMI_ENC_JPEG_VMID__PEL_RD_VMID_MASK 0x0000000FL
+#define UVD_JMI_ENC_JPEG_VMID__BS_WR_VMID_MASK 0x000001E0L
+#define UVD_JMI_ENC_JPEG_VMID__SCALAR_RD_VMID_MASK 0x00003C00L
+#define UVD_JMI_ENC_JPEG_VMID__SCALAR_WR_VMID_MASK 0x00078000L
+#define UVD_JMI_ENC_JPEG_VMID__HUFF_FENCE_VMID_MASK 0x00780000L
+#define UVD_JMI_ENC_JPEG_VMID__ATOMIC_USER1_WR_VMID_MASK 0x07800000L
+//UVD_JMI_PERFMON_CTRL
+#define UVD_JMI_PERFMON_CTRL__PERFMON_STATE__SHIFT 0x0
+#define UVD_JMI_PERFMON_CTRL__PERFMON_SEL__SHIFT 0x8
+#define UVD_JMI_PERFMON_CTRL__PERFMON_STATE_MASK 0x00000003L
+#define UVD_JMI_PERFMON_CTRL__PERFMON_SEL_MASK 0x00000F00L
+//UVD_JMI_PERFMON_COUNT_LO
+#define UVD_JMI_PERFMON_COUNT_LO__PERFMON_COUNT__SHIFT 0x0
+#define UVD_JMI_PERFMON_COUNT_LO__PERFMON_COUNT_MASK 0xFFFFFFFFL
+//UVD_JMI_PERFMON_COUNT_HI
+#define UVD_JMI_PERFMON_COUNT_HI__PERFMON_COUNT__SHIFT 0x0
+#define UVD_JMI_PERFMON_COUNT_HI__PERFMON_COUNT_MASK 0x0000FFFFL
+//UVD_LMI_JPEG_READ_64BIT_BAR_LOW
+#define UVD_LMI_JPEG_READ_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_JPEG_READ_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_JPEG_READ_64BIT_BAR_HIGH
+#define UVD_LMI_JPEG_READ_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_JPEG_READ_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW
+#define UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH
+#define UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW
+#define UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH
+#define UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_JRBC_RB_64BIT_BAR_LOW
+#define UVD_LMI_JRBC_RB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_JRBC_RB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_JRBC_RB_64BIT_BAR_HIGH
+#define UVD_LMI_JRBC_RB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_JRBC_RB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_JRBC_IB_64BIT_BAR_LOW
+#define UVD_LMI_JRBC_IB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_JRBC_IB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_JRBC_IB_64BIT_BAR_HIGH
+#define UVD_LMI_JRBC_IB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_JRBC_IB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW
+#define UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH
+#define UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW
+#define UVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH
+#define UVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW
+#define UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH
+#define UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_JRBC_IB_MEM_RD_64BIT_BAR_LOW
+#define UVD_LMI_JRBC_IB_MEM_RD_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_JRBC_IB_MEM_RD_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_JRBC_IB_MEM_RD_64BIT_BAR_HIGH
+#define UVD_LMI_JRBC_IB_MEM_RD_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_JRBC_IB_MEM_RD_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_EJPEG_PREEMPT_FENCE_64BIT_BAR_LOW
+#define UVD_LMI_EJPEG_PREEMPT_FENCE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_EJPEG_PREEMPT_FENCE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_EJPEG_PREEMPT_FENCE_64BIT_BAR_HIGH
+#define UVD_LMI_EJPEG_PREEMPT_FENCE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_EJPEG_PREEMPT_FENCE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_EJRBC_RB_64BIT_BAR_LOW
+#define UVD_LMI_EJRBC_RB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_EJRBC_RB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_EJRBC_RB_64BIT_BAR_HIGH
+#define UVD_LMI_EJRBC_RB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_EJRBC_RB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_EJRBC_IB_64BIT_BAR_LOW
+#define UVD_LMI_EJRBC_IB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_EJRBC_IB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_EJRBC_IB_64BIT_BAR_HIGH
+#define UVD_LMI_EJRBC_IB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_EJRBC_IB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_EJRBC_RB_MEM_WR_64BIT_BAR_LOW
+#define UVD_LMI_EJRBC_RB_MEM_WR_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_EJRBC_RB_MEM_WR_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_EJRBC_RB_MEM_WR_64BIT_BAR_HIGH
+#define UVD_LMI_EJRBC_RB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_EJRBC_RB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_EJRBC_RB_MEM_RD_64BIT_BAR_LOW
+#define UVD_LMI_EJRBC_RB_MEM_RD_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_EJRBC_RB_MEM_RD_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_EJRBC_RB_MEM_RD_64BIT_BAR_HIGH
+#define UVD_LMI_EJRBC_RB_MEM_RD_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_EJRBC_RB_MEM_RD_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_EJRBC_IB_MEM_WR_64BIT_BAR_LOW
+#define UVD_LMI_EJRBC_IB_MEM_WR_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_EJRBC_IB_MEM_WR_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_EJRBC_IB_MEM_WR_64BIT_BAR_HIGH
+#define UVD_LMI_EJRBC_IB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_EJRBC_IB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_EJRBC_IB_MEM_RD_64BIT_BAR_LOW
+#define UVD_LMI_EJRBC_IB_MEM_RD_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_EJRBC_IB_MEM_RD_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_EJRBC_IB_MEM_RD_64BIT_BAR_HIGH
+#define UVD_LMI_EJRBC_IB_MEM_RD_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_EJRBC_IB_MEM_RD_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_JPEG_PREEMPT_VMID
+#define UVD_LMI_JPEG_PREEMPT_VMID__VMID__SHIFT 0x0
+#define UVD_LMI_JPEG_PREEMPT_VMID__VMID_MASK 0x0000000FL
+//UVD_LMI_ENC_JPEG_PREEMPT_VMID
+#define UVD_LMI_ENC_JPEG_PREEMPT_VMID__VMID__SHIFT 0x0
+#define UVD_LMI_ENC_JPEG_PREEMPT_VMID__VMID_MASK 0x0000000FL
+//UVD_LMI_JPEG2_VMID
+#define UVD_LMI_JPEG2_VMID__JPEG2_RD_VMID__SHIFT 0x0
+#define UVD_LMI_JPEG2_VMID__JPEG2_WR_VMID__SHIFT 0x4
+#define UVD_LMI_JPEG2_VMID__JPEG2_RD_VMID_MASK 0x0000000FL
+#define UVD_LMI_JPEG2_VMID__JPEG2_WR_VMID_MASK 0x000000F0L
+//UVD_LMI_JPEG2_READ_64BIT_BAR_LOW
+#define UVD_LMI_JPEG2_READ_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_JPEG2_READ_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_JPEG2_READ_64BIT_BAR_HIGH
+#define UVD_LMI_JPEG2_READ_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_JPEG2_READ_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_JPEG2_WRITE_64BIT_BAR_LOW
+#define UVD_LMI_JPEG2_WRITE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_JPEG2_WRITE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_JPEG2_WRITE_64BIT_BAR_HIGH
+#define UVD_LMI_JPEG2_WRITE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_JPEG2_WRITE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_JPEG_CTRL2
+#define UVD_LMI_JPEG_CTRL2__ARB_RD_WAIT_EN__SHIFT 0x0
+#define UVD_LMI_JPEG_CTRL2__ARB_WR_WAIT_EN__SHIFT 0x1
+#define UVD_LMI_JPEG_CTRL2__RD_MAX_BURST__SHIFT 0x4
+#define UVD_LMI_JPEG_CTRL2__WR_MAX_BURST__SHIFT 0x8
+#define UVD_LMI_JPEG_CTRL2__RD_SWAP__SHIFT 0x14
+#define UVD_LMI_JPEG_CTRL2__WR_SWAP__SHIFT 0x16
+#define UVD_LMI_JPEG_CTRL2__ARB_RD_WAIT_EN_MASK 0x00000001L
+#define UVD_LMI_JPEG_CTRL2__ARB_WR_WAIT_EN_MASK 0x00000002L
+#define UVD_LMI_JPEG_CTRL2__RD_MAX_BURST_MASK 0x000000F0L
+#define UVD_LMI_JPEG_CTRL2__WR_MAX_BURST_MASK 0x00000F00L
+#define UVD_LMI_JPEG_CTRL2__RD_SWAP_MASK 0x00300000L
+#define UVD_LMI_JPEG_CTRL2__WR_SWAP_MASK 0x00C00000L
+//UVD_JMI_DEC_SWAP_CNTL
+#define UVD_JMI_DEC_SWAP_CNTL__RB_MC_SWAP__SHIFT 0x0
+#define UVD_JMI_DEC_SWAP_CNTL__IB_MC_SWAP__SHIFT 0x2
+#define UVD_JMI_DEC_SWAP_CNTL__RB_MEM_WR_MC_SWAP__SHIFT 0x4
+#define UVD_JMI_DEC_SWAP_CNTL__IB_MEM_WR_MC_SWAP__SHIFT 0x6
+#define UVD_JMI_DEC_SWAP_CNTL__RB_MEM_RD_MC_SWAP__SHIFT 0x8
+#define UVD_JMI_DEC_SWAP_CNTL__IB_MEM_RD_MC_SWAP__SHIFT 0xa
+#define UVD_JMI_DEC_SWAP_CNTL__PREEMPT_WR_MC_SWAP__SHIFT 0xc
+#define UVD_JMI_DEC_SWAP_CNTL__JPEG_RD_MC_SWAP__SHIFT 0xe
+#define UVD_JMI_DEC_SWAP_CNTL__JPEG_WR_MC_SWAP__SHIFT 0x10
+#define UVD_JMI_DEC_SWAP_CNTL__RB_MC_SWAP_MASK 0x00000003L
+#define UVD_JMI_DEC_SWAP_CNTL__IB_MC_SWAP_MASK 0x0000000CL
+#define UVD_JMI_DEC_SWAP_CNTL__RB_MEM_WR_MC_SWAP_MASK 0x00000030L
+#define UVD_JMI_DEC_SWAP_CNTL__IB_MEM_WR_MC_SWAP_MASK 0x000000C0L
+#define UVD_JMI_DEC_SWAP_CNTL__RB_MEM_RD_MC_SWAP_MASK 0x00000300L
+#define UVD_JMI_DEC_SWAP_CNTL__IB_MEM_RD_MC_SWAP_MASK 0x00000C00L
+#define UVD_JMI_DEC_SWAP_CNTL__PREEMPT_WR_MC_SWAP_MASK 0x00003000L
+#define UVD_JMI_DEC_SWAP_CNTL__JPEG_RD_MC_SWAP_MASK 0x0000C000L
+#define UVD_JMI_DEC_SWAP_CNTL__JPEG_WR_MC_SWAP_MASK 0x00030000L
+//UVD_JMI_ENC_SWAP_CNTL
+#define UVD_JMI_ENC_SWAP_CNTL__RB_MC_SWAP__SHIFT 0x0
+#define UVD_JMI_ENC_SWAP_CNTL__IB_MC_SWAP__SHIFT 0x2
+#define UVD_JMI_ENC_SWAP_CNTL__RB_MEM_WR_MC_SWAP__SHIFT 0x4
+#define UVD_JMI_ENC_SWAP_CNTL__IB_MEM_WR_MC_SWAP__SHIFT 0x6
+#define UVD_JMI_ENC_SWAP_CNTL__RB_MEM_RD_MC_SWAP__SHIFT 0x8
+#define UVD_JMI_ENC_SWAP_CNTL__IB_MEM_RD_MC_SWAP__SHIFT 0xa
+#define UVD_JMI_ENC_SWAP_CNTL__PREEMPT_WR_MC_SWAP__SHIFT 0xc
+#define UVD_JMI_ENC_SWAP_CNTL__PEL_RD_MC_SWAP__SHIFT 0xe
+#define UVD_JMI_ENC_SWAP_CNTL__BS_WR_MC_SWAP__SHIFT 0x10
+#define UVD_JMI_ENC_SWAP_CNTL__SCALAR_RD_MC_SWAP__SHIFT 0x12
+#define UVD_JMI_ENC_SWAP_CNTL__SCALAR_WR_MC_SWAP__SHIFT 0x14
+#define UVD_JMI_ENC_SWAP_CNTL__HUFF_FENCE_MC_SWAP__SHIFT 0x16
+#define UVD_JMI_ENC_SWAP_CNTL__RB_MC_SWAP_MASK 0x00000003L
+#define UVD_JMI_ENC_SWAP_CNTL__IB_MC_SWAP_MASK 0x0000000CL
+#define UVD_JMI_ENC_SWAP_CNTL__RB_MEM_WR_MC_SWAP_MASK 0x00000030L
+#define UVD_JMI_ENC_SWAP_CNTL__IB_MEM_WR_MC_SWAP_MASK 0x000000C0L
+#define UVD_JMI_ENC_SWAP_CNTL__RB_MEM_RD_MC_SWAP_MASK 0x00000300L
+#define UVD_JMI_ENC_SWAP_CNTL__IB_MEM_RD_MC_SWAP_MASK 0x00000C00L
+#define UVD_JMI_ENC_SWAP_CNTL__PREEMPT_WR_MC_SWAP_MASK 0x00003000L
+#define UVD_JMI_ENC_SWAP_CNTL__PEL_RD_MC_SWAP_MASK 0x0000C000L
+#define UVD_JMI_ENC_SWAP_CNTL__BS_WR_MC_SWAP_MASK 0x00030000L
+#define UVD_JMI_ENC_SWAP_CNTL__SCALAR_RD_MC_SWAP_MASK 0x000C0000L
+#define UVD_JMI_ENC_SWAP_CNTL__SCALAR_WR_MC_SWAP_MASK 0x00300000L
+#define UVD_JMI_ENC_SWAP_CNTL__HUFF_FENCE_MC_SWAP_MASK 0x00C00000L
+//UVD_JMI_CNTL
+#define UVD_JMI_CNTL__SOFT_RESET__SHIFT 0x0
+#define UVD_JMI_CNTL__MC_RD_REQ_RET_MAX__SHIFT 0x8
+#define UVD_JMI_CNTL__SOFT_RESET_MASK 0x00000001L
+#define UVD_JMI_CNTL__MC_RD_REQ_RET_MAX_MASK 0x0003FF00L
+//UVD_JMI_HUFF_FENCE_64BIT_BAR_LOW
+#define UVD_JMI_HUFF_FENCE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI_HUFF_FENCE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI_HUFF_FENCE_64BIT_BAR_HIGH
+#define UVD_JMI_HUFF_FENCE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI_HUFF_FENCE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI_DEC_SWAP_CNTL2
+#define UVD_JMI_DEC_SWAP_CNTL2__JPEG2_RD_MC_SWAP__SHIFT 0x0
+#define UVD_JMI_DEC_SWAP_CNTL2__JPEG2_WR_MC_SWAP__SHIFT 0x2
+#define UVD_JMI_DEC_SWAP_CNTL2__JPEG2_RD_MC_SWAP_MASK 0x00000003L
+#define UVD_JMI_DEC_SWAP_CNTL2__JPEG2_WR_MC_SWAP_MASK 0x0000000CL
+
+
+// addressBlock: uvd0_uvd_jpeg_common_dec
+//JPEG_SOFT_RESET_STATUS
+#define JPEG_SOFT_RESET_STATUS__JPEG_DEC_RESET_STATUS__SHIFT 0x0
+#define JPEG_SOFT_RESET_STATUS__JPEG2_DEC_RESET_STATUS__SHIFT 0x1
+#define JPEG_SOFT_RESET_STATUS__DJRBC_RESET_STATUS__SHIFT 0x2
+#define JPEG_SOFT_RESET_STATUS__JPEG_ENC_RESET_STATUS__SHIFT 0x3
+#define JPEG_SOFT_RESET_STATUS__EJRBC_RESET_STATUS__SHIFT 0x4
+#define JPEG_SOFT_RESET_STATUS__JMCIF_RESET_STATUS__SHIFT 0x5
+#define JPEG_SOFT_RESET_STATUS__JPEG_DEC_RESET_STATUS_MASK 0x00000001L
+#define JPEG_SOFT_RESET_STATUS__JPEG2_DEC_RESET_STATUS_MASK 0x00000002L
+#define JPEG_SOFT_RESET_STATUS__DJRBC_RESET_STATUS_MASK 0x00000004L
+#define JPEG_SOFT_RESET_STATUS__JPEG_ENC_RESET_STATUS_MASK 0x00000008L
+#define JPEG_SOFT_RESET_STATUS__EJRBC_RESET_STATUS_MASK 0x00000010L
+#define JPEG_SOFT_RESET_STATUS__JMCIF_RESET_STATUS_MASK 0x00000020L
+//JPEG_SYS_INT_EN
+#define JPEG_SYS_INT_EN__DJPEG_CORE__SHIFT 0x0
+#define JPEG_SYS_INT_EN__DJRBC__SHIFT 0x1
+#define JPEG_SYS_INT_EN__DJPEG_PF_RPT__SHIFT 0x2
+#define JPEG_SYS_INT_EN__EJPEG_PF_RPT__SHIFT 0x3
+#define JPEG_SYS_INT_EN__EJPEG_CORE__SHIFT 0x4
+#define JPEG_SYS_INT_EN__EJRBC__SHIFT 0x5
+#define JPEG_SYS_INT_EN__DJPEG_CORE2__SHIFT 0x6
+#define JPEG_SYS_INT_EN__DJPEG_CORE_MASK 0x00000001L
+#define JPEG_SYS_INT_EN__DJRBC_MASK 0x00000002L
+#define JPEG_SYS_INT_EN__DJPEG_PF_RPT_MASK 0x00000004L
+#define JPEG_SYS_INT_EN__EJPEG_PF_RPT_MASK 0x00000008L
+#define JPEG_SYS_INT_EN__EJPEG_CORE_MASK 0x00000010L
+#define JPEG_SYS_INT_EN__EJRBC_MASK 0x00000020L
+#define JPEG_SYS_INT_EN__DJPEG_CORE2_MASK 0x00000040L
+//JPEG_SYS_INT_STATUS
+#define JPEG_SYS_INT_STATUS__DJPEG_CORE__SHIFT 0x0
+#define JPEG_SYS_INT_STATUS__DJRBC__SHIFT 0x1
+#define JPEG_SYS_INT_STATUS__DJPEG_PF_RPT__SHIFT 0x2
+#define JPEG_SYS_INT_STATUS__EJPEG_PF_RPT__SHIFT 0x3
+#define JPEG_SYS_INT_STATUS__EJPEG_CORE__SHIFT 0x4
+#define JPEG_SYS_INT_STATUS__EJRBC__SHIFT 0x5
+#define JPEG_SYS_INT_STATUS__DJPEG_CORE2__SHIFT 0x6
+#define JPEG_SYS_INT_STATUS__DJPEG_CORE_MASK 0x00000001L
+#define JPEG_SYS_INT_STATUS__DJRBC_MASK 0x00000002L
+#define JPEG_SYS_INT_STATUS__DJPEG_PF_RPT_MASK 0x00000004L
+#define JPEG_SYS_INT_STATUS__EJPEG_PF_RPT_MASK 0x00000008L
+#define JPEG_SYS_INT_STATUS__EJPEG_CORE_MASK 0x00000010L
+#define JPEG_SYS_INT_STATUS__EJRBC_MASK 0x00000020L
+#define JPEG_SYS_INT_STATUS__DJPEG_CORE2_MASK 0x00000040L
+//JPEG_SYS_INT_ACK
+#define JPEG_SYS_INT_ACK__DJPEG_CORE__SHIFT 0x0
+#define JPEG_SYS_INT_ACK__DJRBC__SHIFT 0x1
+#define JPEG_SYS_INT_ACK__DJPEG_PF_RPT__SHIFT 0x2
+#define JPEG_SYS_INT_ACK__EJPEG_PF_RPT__SHIFT 0x3
+#define JPEG_SYS_INT_ACK__EJPEG_CORE__SHIFT 0x4
+#define JPEG_SYS_INT_ACK__EJRBC__SHIFT 0x5
+#define JPEG_SYS_INT_ACK__DJPEG_CORE2__SHIFT 0x6
+#define JPEG_SYS_INT_ACK__DJPEG_CORE_MASK 0x00000001L
+#define JPEG_SYS_INT_ACK__DJRBC_MASK 0x00000002L
+#define JPEG_SYS_INT_ACK__DJPEG_PF_RPT_MASK 0x00000004L
+#define JPEG_SYS_INT_ACK__EJPEG_PF_RPT_MASK 0x00000008L
+#define JPEG_SYS_INT_ACK__EJPEG_CORE_MASK 0x00000010L
+#define JPEG_SYS_INT_ACK__EJRBC_MASK 0x00000020L
+#define JPEG_SYS_INT_ACK__DJPEG_CORE2_MASK 0x00000040L
+//JPEG_MASTINT_EN
+#define JPEG_MASTINT_EN__OVERRUN_RST__SHIFT 0x0
+#define JPEG_MASTINT_EN__INT_OVERRUN__SHIFT 0x4
+#define JPEG_MASTINT_EN__OVERRUN_RST_MASK 0x00000001L
+#define JPEG_MASTINT_EN__INT_OVERRUN_MASK 0x007FFFF0L
+//JPEG_IH_CTRL
+#define JPEG_IH_CTRL__IH_SOFT_RESET__SHIFT 0x0
+#define JPEG_IH_CTRL__IH_STALL_EN__SHIFT 0x1
+#define JPEG_IH_CTRL__IH_STATUS_CLEAN__SHIFT 0x2
+#define JPEG_IH_CTRL__IH_VMID__SHIFT 0x3
+#define JPEG_IH_CTRL__IH_USER_DATA__SHIFT 0x7
+#define JPEG_IH_CTRL__IH_RINGID__SHIFT 0x13
+#define JPEG_IH_CTRL__IH_SOFT_RESET_MASK 0x00000001L
+#define JPEG_IH_CTRL__IH_STALL_EN_MASK 0x00000002L
+#define JPEG_IH_CTRL__IH_STATUS_CLEAN_MASK 0x00000004L
+#define JPEG_IH_CTRL__IH_VMID_MASK 0x00000078L
+#define JPEG_IH_CTRL__IH_USER_DATA_MASK 0x0007FF80L
+#define JPEG_IH_CTRL__IH_RINGID_MASK 0x07F80000L
+//JRBBM_ARB_CTRL
+#define JRBBM_ARB_CTRL__DJRBC_DROP__SHIFT 0x0
+#define JRBBM_ARB_CTRL__EJRBC_DROP__SHIFT 0x1
+#define JRBBM_ARB_CTRL__SRBM_DROP__SHIFT 0x2
+#define JRBBM_ARB_CTRL__DJRBC_DROP_MASK 0x00000001L
+#define JRBBM_ARB_CTRL__EJRBC_DROP_MASK 0x00000002L
+#define JRBBM_ARB_CTRL__SRBM_DROP_MASK 0x00000004L
+
+
+// addressBlock: uvd0_uvd_jpeg_common_sclk_dec
+//JPEG_CGC_GATE
+#define JPEG_CGC_GATE__JPEG_DEC__SHIFT 0x0
+#define JPEG_CGC_GATE__JPEG2_DEC__SHIFT 0x1
+#define JPEG_CGC_GATE__JPEG_ENC__SHIFT 0x2
+#define JPEG_CGC_GATE__JMCIF__SHIFT 0x3
+#define JPEG_CGC_GATE__JRBBM__SHIFT 0x4
+#define JPEG_CGC_GATE__JPEG_DEC_MASK 0x00000001L
+#define JPEG_CGC_GATE__JPEG2_DEC_MASK 0x00000002L
+#define JPEG_CGC_GATE__JPEG_ENC_MASK 0x00000004L
+#define JPEG_CGC_GATE__JMCIF_MASK 0x00000008L
+#define JPEG_CGC_GATE__JRBBM_MASK 0x00000010L
+//JPEG_CGC_CTRL
+#define JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT 0x0
+#define JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT 0x1
+#define JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT 0x5
+#define JPEG_CGC_CTRL__DYN_OCLK_RAMP_EN__SHIFT 0xa
+#define JPEG_CGC_CTRL__DYN_RCLK_RAMP_EN__SHIFT 0xb
+#define JPEG_CGC_CTRL__GATER_DIV_ID__SHIFT 0xc
+#define JPEG_CGC_CTRL__JPEG_DEC_MODE__SHIFT 0x10
+#define JPEG_CGC_CTRL__JPEG2_DEC_MODE__SHIFT 0x11
+#define JPEG_CGC_CTRL__JPEG_ENC_MODE__SHIFT 0x12
+#define JPEG_CGC_CTRL__JMCIF_MODE__SHIFT 0x13
+#define JPEG_CGC_CTRL__JRBBM_MODE__SHIFT 0x14
+#define JPEG_CGC_CTRL__DYN_CLOCK_MODE_MASK 0x00000001L
+#define JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK 0x0000001EL
+#define JPEG_CGC_CTRL__CLK_OFF_DELAY_MASK 0x000003E0L
+#define JPEG_CGC_CTRL__DYN_OCLK_RAMP_EN_MASK 0x00000400L
+#define JPEG_CGC_CTRL__DYN_RCLK_RAMP_EN_MASK 0x00000800L
+#define JPEG_CGC_CTRL__GATER_DIV_ID_MASK 0x00007000L
+#define JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK 0x00010000L
+#define JPEG_CGC_CTRL__JPEG2_DEC_MODE_MASK 0x00020000L
+#define JPEG_CGC_CTRL__JPEG_ENC_MODE_MASK 0x00040000L
+#define JPEG_CGC_CTRL__JMCIF_MODE_MASK 0x00080000L
+#define JPEG_CGC_CTRL__JRBBM_MODE_MASK 0x00100000L
+//JPEG_CGC_STATUS
+#define JPEG_CGC_STATUS__JPEG_DEC_VCLK_ACTIVE__SHIFT 0x0
+#define JPEG_CGC_STATUS__JPEG_DEC_SCLK_ACTIVE__SHIFT 0x1
+#define JPEG_CGC_STATUS__JPEG2_DEC_VCLK_ACTIVE__SHIFT 0x2
+#define JPEG_CGC_STATUS__JPEG2_DEC_SCLK_ACTIVE__SHIFT 0x3
+#define JPEG_CGC_STATUS__JPEG_ENC_VCLK_ACTIVE__SHIFT 0x4
+#define JPEG_CGC_STATUS__JPEG_ENC_SCLK_ACTIVE__SHIFT 0x5
+#define JPEG_CGC_STATUS__JMCIF_SCLK_ACTIVE__SHIFT 0x6
+#define JPEG_CGC_STATUS__JRBBM_VCLK_ACTIVE__SHIFT 0x7
+#define JPEG_CGC_STATUS__JRBBM_SCLK_ACTIVE__SHIFT 0x8
+#define JPEG_CGC_STATUS__JPEG_DEC_VCLK_ACTIVE_MASK 0x00000001L
+#define JPEG_CGC_STATUS__JPEG_DEC_SCLK_ACTIVE_MASK 0x00000002L
+#define JPEG_CGC_STATUS__JPEG2_DEC_VCLK_ACTIVE_MASK 0x00000004L
+#define JPEG_CGC_STATUS__JPEG2_DEC_SCLK_ACTIVE_MASK 0x00000008L
+#define JPEG_CGC_STATUS__JPEG_ENC_VCLK_ACTIVE_MASK 0x00000010L
+#define JPEG_CGC_STATUS__JPEG_ENC_SCLK_ACTIVE_MASK 0x00000020L
+#define JPEG_CGC_STATUS__JMCIF_SCLK_ACTIVE_MASK 0x00000040L
+#define JPEG_CGC_STATUS__JRBBM_VCLK_ACTIVE_MASK 0x00000080L
+#define JPEG_CGC_STATUS__JRBBM_SCLK_ACTIVE_MASK 0x00000100L
+//JPEG_COMN_CGC_MEM_CTRL
+#define JPEG_COMN_CGC_MEM_CTRL__JMCIF_LS_EN__SHIFT 0x0
+#define JPEG_COMN_CGC_MEM_CTRL__JMCIF_DS_EN__SHIFT 0x1
+#define JPEG_COMN_CGC_MEM_CTRL__JMCIF_SD_EN__SHIFT 0x2
+#define JPEG_COMN_CGC_MEM_CTRL__LS_SET_DELAY__SHIFT 0x10
+#define JPEG_COMN_CGC_MEM_CTRL__LS_CLEAR_DELAY__SHIFT 0x14
+#define JPEG_COMN_CGC_MEM_CTRL__JMCIF_LS_EN_MASK 0x00000001L
+#define JPEG_COMN_CGC_MEM_CTRL__JMCIF_DS_EN_MASK 0x00000002L
+#define JPEG_COMN_CGC_MEM_CTRL__JMCIF_SD_EN_MASK 0x00000004L
+#define JPEG_COMN_CGC_MEM_CTRL__LS_SET_DELAY_MASK 0x000F0000L
+#define JPEG_COMN_CGC_MEM_CTRL__LS_CLEAR_DELAY_MASK 0x00F00000L
+//JPEG_DEC_CGC_MEM_CTRL
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG_DEC_LS_EN__SHIFT 0x0
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG_DEC_DS_EN__SHIFT 0x1
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG_DEC_SD_EN__SHIFT 0x2
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG_DEC_LS_EN_MASK 0x00000001L
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG_DEC_DS_EN_MASK 0x00000002L
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG_DEC_SD_EN_MASK 0x00000004L
+//JPEG2_DEC_CGC_MEM_CTRL
+#define JPEG2_DEC_CGC_MEM_CTRL__JPEG2_DEC_LS_EN__SHIFT 0x0
+#define JPEG2_DEC_CGC_MEM_CTRL__JPEG2_DEC_DS_EN__SHIFT 0x1
+#define JPEG2_DEC_CGC_MEM_CTRL__JPEG2_DEC_SD_EN__SHIFT 0x2
+#define JPEG2_DEC_CGC_MEM_CTRL__JPEG2_DEC_LS_EN_MASK 0x00000001L
+#define JPEG2_DEC_CGC_MEM_CTRL__JPEG2_DEC_DS_EN_MASK 0x00000002L
+#define JPEG2_DEC_CGC_MEM_CTRL__JPEG2_DEC_SD_EN_MASK 0x00000004L
+//JPEG_ENC_CGC_MEM_CTRL
+#define JPEG_ENC_CGC_MEM_CTRL__JPEG_ENC_LS_EN__SHIFT 0x0
+#define JPEG_ENC_CGC_MEM_CTRL__JPEG_ENC_DS_EN__SHIFT 0x1
+#define JPEG_ENC_CGC_MEM_CTRL__JPEG_ENC_SD_EN__SHIFT 0x2
+#define JPEG_ENC_CGC_MEM_CTRL__JPEG_ENC_LS_EN_MASK 0x00000001L
+#define JPEG_ENC_CGC_MEM_CTRL__JPEG_ENC_DS_EN_MASK 0x00000002L
+#define JPEG_ENC_CGC_MEM_CTRL__JPEG_ENC_SD_EN_MASK 0x00000004L
+//JPEG_SOFT_RESET2
+#define JPEG_SOFT_RESET2__ATOMIC_SOFT_RESET__SHIFT 0x0
+#define JPEG_SOFT_RESET2__ATOMIC_SOFT_RESET_MASK 0x00000001L
+//JPEG_PERF_BANK_CONF
+#define JPEG_PERF_BANK_CONF__RESET__SHIFT 0x0
+#define JPEG_PERF_BANK_CONF__PEEK__SHIFT 0x8
+#define JPEG_PERF_BANK_CONF__CONCATENATE__SHIFT 0x10
+#define JPEG_PERF_BANK_CONF__RESET_MASK 0x0000000FL
+#define JPEG_PERF_BANK_CONF__PEEK_MASK 0x00000F00L
+#define JPEG_PERF_BANK_CONF__CONCATENATE_MASK 0x00030000L
+//JPEG_PERF_BANK_EVENT_SEL
+#define JPEG_PERF_BANK_EVENT_SEL__SEL0__SHIFT 0x0
+#define JPEG_PERF_BANK_EVENT_SEL__SEL1__SHIFT 0x8
+#define JPEG_PERF_BANK_EVENT_SEL__SEL2__SHIFT 0x10
+#define JPEG_PERF_BANK_EVENT_SEL__SEL3__SHIFT 0x18
+#define JPEG_PERF_BANK_EVENT_SEL__SEL0_MASK 0x000000FFL
+#define JPEG_PERF_BANK_EVENT_SEL__SEL1_MASK 0x0000FF00L
+#define JPEG_PERF_BANK_EVENT_SEL__SEL2_MASK 0x00FF0000L
+#define JPEG_PERF_BANK_EVENT_SEL__SEL3_MASK 0xFF000000L
+//JPEG_PERF_BANK_COUNT0
+#define JPEG_PERF_BANK_COUNT0__COUNT__SHIFT 0x0
+#define JPEG_PERF_BANK_COUNT0__COUNT_MASK 0xFFFFFFFFL
+//JPEG_PERF_BANK_COUNT1
+#define JPEG_PERF_BANK_COUNT1__COUNT__SHIFT 0x0
+#define JPEG_PERF_BANK_COUNT1__COUNT_MASK 0xFFFFFFFFL
+//JPEG_PERF_BANK_COUNT2
+#define JPEG_PERF_BANK_COUNT2__COUNT__SHIFT 0x0
+#define JPEG_PERF_BANK_COUNT2__COUNT_MASK 0xFFFFFFFFL
+//JPEG_PERF_BANK_COUNT3
+#define JPEG_PERF_BANK_COUNT3__COUNT__SHIFT 0x0
+#define JPEG_PERF_BANK_COUNT3__COUNT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: uvd0_uvd_pg_dec
+//UVD_PGFSM_CONFIG
+#define UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT 0x0
+#define UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT 0x2
+#define UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT 0x4
+#define UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT 0x6
+#define UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT 0x8
+#define UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT 0xa
+#define UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT 0xc
+#define UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT 0xe
+#define UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT 0x10
+#define UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT 0x12
+#define UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT 0x14
+#define UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT 0x16
+#define UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG_MASK 0x00000003L
+#define UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG_MASK 0x0000000CL
+#define UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG_MASK 0x00000030L
+#define UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG_MASK 0x000000C0L
+#define UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG_MASK 0x00000300L
+#define UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG_MASK 0x00000C00L
+#define UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG_MASK 0x00003000L
+#define UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG_MASK 0x0000C000L
+#define UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG_MASK 0x00030000L
+#define UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG_MASK 0x000C0000L
+#define UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG_MASK 0x00300000L
+#define UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG_MASK 0x00C00000L
+//UVD_PGFSM_STATUS
+#define UVD_PGFSM_STATUS__UVDM_PWR_STATUS__SHIFT 0x0
+#define UVD_PGFSM_STATUS__UVDU_PWR_STATUS__SHIFT 0x2
+#define UVD_PGFSM_STATUS__UVDF_PWR_STATUS__SHIFT 0x4
+#define UVD_PGFSM_STATUS__UVDC_PWR_STATUS__SHIFT 0x6
+#define UVD_PGFSM_STATUS__UVDB_PWR_STATUS__SHIFT 0x8
+#define UVD_PGFSM_STATUS__UVDIL_PWR_STATUS__SHIFT 0xa
+#define UVD_PGFSM_STATUS__UVDIR_PWR_STATUS__SHIFT 0xc
+#define UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT 0xe
+#define UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT 0x10
+#define UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT 0x12
+#define UVD_PGFSM_STATUS__UVDW_PWR_STATUS__SHIFT 0x14
+#define UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT 0x16
+#define UVD_PGFSM_STATUS__UVDM_PWR_STATUS_MASK 0x00000003L
+#define UVD_PGFSM_STATUS__UVDU_PWR_STATUS_MASK 0x0000000CL
+#define UVD_PGFSM_STATUS__UVDF_PWR_STATUS_MASK 0x00000030L
+#define UVD_PGFSM_STATUS__UVDC_PWR_STATUS_MASK 0x000000C0L
+#define UVD_PGFSM_STATUS__UVDB_PWR_STATUS_MASK 0x00000300L
+#define UVD_PGFSM_STATUS__UVDIL_PWR_STATUS_MASK 0x00000C00L
+#define UVD_PGFSM_STATUS__UVDIR_PWR_STATUS_MASK 0x00003000L
+#define UVD_PGFSM_STATUS__UVDTD_PWR_STATUS_MASK 0x0000C000L
+#define UVD_PGFSM_STATUS__UVDTE_PWR_STATUS_MASK 0x00030000L
+#define UVD_PGFSM_STATUS__UVDE_PWR_STATUS_MASK 0x000C0000L
+#define UVD_PGFSM_STATUS__UVDW_PWR_STATUS_MASK 0x00300000L
+#define UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK 0x00C00000L
+//UVD_POWER_STATUS
+#define UVD_POWER_STATUS__UVD_POWER_STATUS__SHIFT 0x0
+#define UVD_POWER_STATUS__UVD_PG_MODE__SHIFT 0x2
+#define UVD_POWER_STATUS__UVD_CG_MODE__SHIFT 0x4
+#define UVD_POWER_STATUS__UVD_PG_EN__SHIFT 0x8
+#define UVD_POWER_STATUS__RBC_SNOOP_DIS__SHIFT 0x9
+#define UVD_POWER_STATUS__SW_RB_SNOOP_DIS__SHIFT 0xb
+#define UVD_POWER_STATUS__STALL_DPG_POWER_UP__SHIFT 0x1f
+#define UVD_POWER_STATUS__UVD_POWER_STATUS_MASK 0x00000003L
+#define UVD_POWER_STATUS__UVD_PG_MODE_MASK 0x00000004L
+#define UVD_POWER_STATUS__UVD_CG_MODE_MASK 0x00000030L
+#define UVD_POWER_STATUS__UVD_PG_EN_MASK 0x00000100L
+#define UVD_POWER_STATUS__RBC_SNOOP_DIS_MASK 0x00000200L
+#define UVD_POWER_STATUS__SW_RB_SNOOP_DIS_MASK 0x00000800L
+#define UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK 0x80000000L
+//UVD_PG_IND_INDEX
+#define UVD_PG_IND_INDEX__INDEX__SHIFT 0x0
+#define UVD_PG_IND_INDEX__INDEX_MASK 0x0000003FL
+//UVD_PG_IND_DATA
+#define UVD_PG_IND_DATA__DATA__SHIFT 0x0
+#define UVD_PG_IND_DATA__DATA_MASK 0xFFFFFFFFL
+//CC_UVD_HARVESTING
+#define CC_UVD_HARVESTING__MMSCH_DISABLE__SHIFT 0x0
+#define CC_UVD_HARVESTING__UVD_DISABLE__SHIFT 0x1
+#define CC_UVD_HARVESTING__MMSCH_DISABLE_MASK 0x00000001L
+#define CC_UVD_HARVESTING__UVD_DISABLE_MASK 0x00000002L
+//UVD_JPEG_POWER_STATUS
+#define UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS__SHIFT 0x0
+#define UVD_JPEG_POWER_STATUS__JPEG_PG_MODE__SHIFT 0x4
+#define UVD_JPEG_POWER_STATUS__JRBC_DEC_SNOOP_DIS__SHIFT 0x8
+#define UVD_JPEG_POWER_STATUS__JRBC_ENC_SNOOP_DIS__SHIFT 0x9
+#define UVD_JPEG_POWER_STATUS__STALL_JDPG_POWER_UP__SHIFT 0x1f
+#define UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK 0x00000001L
+#define UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK 0x00000010L
+#define UVD_JPEG_POWER_STATUS__JRBC_DEC_SNOOP_DIS_MASK 0x00000100L
+#define UVD_JPEG_POWER_STATUS__JRBC_ENC_SNOOP_DIS_MASK 0x00000200L
+#define UVD_JPEG_POWER_STATUS__STALL_JDPG_POWER_UP_MASK 0x80000000L
+//UVD_DPG_LMA_CTL
+#define UVD_DPG_LMA_CTL__READ_WRITE__SHIFT 0x0
+#define UVD_DPG_LMA_CTL__MASK_EN__SHIFT 0x1
+#define UVD_DPG_LMA_CTL__ADDR_AUTO_INCREMENT__SHIFT 0x2
+#define UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT 0x4
+#define UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT 0x10
+#define UVD_DPG_LMA_CTL__READ_WRITE_MASK 0x00000001L
+#define UVD_DPG_LMA_CTL__MASK_EN_MASK 0x00000002L
+#define UVD_DPG_LMA_CTL__ADDR_AUTO_INCREMENT_MASK 0x00000004L
+#define UVD_DPG_LMA_CTL__SRAM_SEL_MASK 0x00000010L
+#define UVD_DPG_LMA_CTL__READ_WRITE_ADDR_MASK 0xFFFF0000L
+//UVD_DPG_LMA_DATA
+#define UVD_DPG_LMA_DATA__LMA_DATA__SHIFT 0x0
+#define UVD_DPG_LMA_DATA__LMA_DATA_MASK 0xFFFFFFFFL
+//UVD_DPG_LMA_MASK
+#define UVD_DPG_LMA_MASK__LMA_MASK__SHIFT 0x0
+#define UVD_DPG_LMA_MASK__LMA_MASK_MASK 0xFFFFFFFFL
+//UVD_DPG_PAUSE
+#define UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ__SHIFT 0x0
+#define UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK__SHIFT 0x1
+#define UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ__SHIFT 0x2
+#define UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK__SHIFT 0x3
+#define UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK 0x00000001L
+#define UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK 0x00000002L
+#define UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK 0x00000004L
+#define UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK 0x00000008L
+//UVD_SCRATCH1
+#define UVD_SCRATCH1__SCRATCH1_DATA__SHIFT 0x0
+#define UVD_SCRATCH1__SCRATCH1_DATA_MASK 0xFFFFFFFFL
+//UVD_SCRATCH2
+#define UVD_SCRATCH2__SCRATCH2_DATA__SHIFT 0x0
+#define UVD_SCRATCH2__SCRATCH2_DATA_MASK 0xFFFFFFFFL
+//UVD_SCRATCH3
+#define UVD_SCRATCH3__SCRATCH3_DATA__SHIFT 0x0
+#define UVD_SCRATCH3__SCRATCH3_DATA_MASK 0xFFFFFFFFL
+//UVD_SCRATCH4
+#define UVD_SCRATCH4__SCRATCH4_DATA__SHIFT 0x0
+#define UVD_SCRATCH4__SCRATCH4_DATA_MASK 0xFFFFFFFFL
+//UVD_SCRATCH5
+#define UVD_SCRATCH5__SCRATCH5_DATA__SHIFT 0x0
+#define UVD_SCRATCH5__SCRATCH5_DATA_MASK 0xFFFFFFFFL
+//UVD_SCRATCH6
+#define UVD_SCRATCH6__SCRATCH6_DATA__SHIFT 0x0
+#define UVD_SCRATCH6__SCRATCH6_DATA_MASK 0xFFFFFFFFL
+//UVD_SCRATCH7
+#define UVD_SCRATCH7__SCRATCH7_DATA__SHIFT 0x0
+#define UVD_SCRATCH7__SCRATCH7_DATA_MASK 0xFFFFFFFFL
+//UVD_SCRATCH8
+#define UVD_SCRATCH8__SCRATCH8_DATA__SHIFT 0x0
+#define UVD_SCRATCH8__SCRATCH8_DATA_MASK 0xFFFFFFFFL
+//UVD_SCRATCH9
+#define UVD_SCRATCH9__SCRATCH9_DATA__SHIFT 0x0
+#define UVD_SCRATCH9__SCRATCH9_DATA_MASK 0xFFFFFFFFL
+//UVD_SCRATCH10
+#define UVD_SCRATCH10__SCRATCH10_DATA__SHIFT 0x0
+#define UVD_SCRATCH10__SCRATCH10_DATA_MASK 0xFFFFFFFFL
+//UVD_SCRATCH11
+#define UVD_SCRATCH11__SCRATCH11_DATA__SHIFT 0x0
+#define UVD_SCRATCH11__SCRATCH11_DATA_MASK 0xFFFFFFFFL
+//UVD_SCRATCH12
+#define UVD_SCRATCH12__SCRATCH12_DATA__SHIFT 0x0
+#define UVD_SCRATCH12__SCRATCH12_DATA_MASK 0xFFFFFFFFL
+//UVD_SCRATCH13
+#define UVD_SCRATCH13__SCRATCH13_DATA__SHIFT 0x0
+#define UVD_SCRATCH13__SCRATCH13_DATA_MASK 0xFFFFFFFFL
+//UVD_SCRATCH14
+#define UVD_SCRATCH14__SCRATCH14_DATA__SHIFT 0x0
+#define UVD_SCRATCH14__SCRATCH14_DATA_MASK 0xFFFFFFFFL
+//UVD_FREE_COUNTER_REG
+#define UVD_FREE_COUNTER_REG__FREE_COUNTER__SHIFT 0x0
+#define UVD_FREE_COUNTER_REG__FREE_COUNTER_MASK 0xFFFFFFFFL
+//UVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_LOW
+#define UVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_HIGH
+#define UVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_DPG_VCPU_CACHE_OFFSET0
+#define UVD_DPG_VCPU_CACHE_OFFSET0__CACHE_OFFSET0__SHIFT 0x0
+#define UVD_DPG_VCPU_CACHE_OFFSET0__CACHE_OFFSET0_MASK 0x01FFFFFFL
+//UVD_DPG_LMI_VCPU_CACHE_VMID
+#define UVD_DPG_LMI_VCPU_CACHE_VMID__VCPU_CACHE_VMID__SHIFT 0x0
+#define UVD_DPG_LMI_VCPU_CACHE_VMID__VCPU_CACHE_VMID_MASK 0x0000000FL
+//UVD_PF_STATUS
+#define UVD_PF_STATUS__JPEG_PF_OCCURED__SHIFT 0x0
+#define UVD_PF_STATUS__NJ_PF_OCCURED__SHIFT 0x1
+#define UVD_PF_STATUS__ENCODER0_PF_OCCURED__SHIFT 0x2
+#define UVD_PF_STATUS__ENCODER1_PF_OCCURED__SHIFT 0x3
+#define UVD_PF_STATUS__ENCODER2_PF_OCCURED__SHIFT 0x4
+#define UVD_PF_STATUS__ENCODER3_PF_OCCURED__SHIFT 0x5
+#define UVD_PF_STATUS__ENCODER4_PF_OCCURED__SHIFT 0x6
+#define UVD_PF_STATUS__EJPEG_PF_OCCURED__SHIFT 0x7
+#define UVD_PF_STATUS__JPEG_PF_CLEAR__SHIFT 0x8
+#define UVD_PF_STATUS__NJ_PF_CLEAR__SHIFT 0x9
+#define UVD_PF_STATUS__ENCODER0_PF_CLEAR__SHIFT 0xa
+#define UVD_PF_STATUS__ENCODER1_PF_CLEAR__SHIFT 0xb
+#define UVD_PF_STATUS__ENCODER2_PF_CLEAR__SHIFT 0xc
+#define UVD_PF_STATUS__ENCODER3_PF_CLEAR__SHIFT 0xd
+#define UVD_PF_STATUS__ENCODER4_PF_CLEAR__SHIFT 0xe
+#define UVD_PF_STATUS__EJPEG_PF_CLEAR__SHIFT 0xf
+#define UVD_PF_STATUS__NJ_ATM_PF_OCCURED__SHIFT 0x10
+#define UVD_PF_STATUS__DJ_ATM_PF_OCCURED__SHIFT 0x11
+#define UVD_PF_STATUS__EJ_ATM_PF_OCCURED__SHIFT 0x12
+#define UVD_PF_STATUS__JPEG_PF_OCCURED_MASK 0x00000001L
+#define UVD_PF_STATUS__NJ_PF_OCCURED_MASK 0x00000002L
+#define UVD_PF_STATUS__ENCODER0_PF_OCCURED_MASK 0x00000004L
+#define UVD_PF_STATUS__ENCODER1_PF_OCCURED_MASK 0x00000008L
+#define UVD_PF_STATUS__ENCODER2_PF_OCCURED_MASK 0x00000010L
+#define UVD_PF_STATUS__ENCODER3_PF_OCCURED_MASK 0x00000020L
+#define UVD_PF_STATUS__ENCODER4_PF_OCCURED_MASK 0x00000040L
+#define UVD_PF_STATUS__EJPEG_PF_OCCURED_MASK 0x00000080L
+#define UVD_PF_STATUS__JPEG_PF_CLEAR_MASK 0x00000100L
+#define UVD_PF_STATUS__NJ_PF_CLEAR_MASK 0x00000200L
+#define UVD_PF_STATUS__ENCODER0_PF_CLEAR_MASK 0x00000400L
+#define UVD_PF_STATUS__ENCODER1_PF_CLEAR_MASK 0x00000800L
+#define UVD_PF_STATUS__ENCODER2_PF_CLEAR_MASK 0x00001000L
+#define UVD_PF_STATUS__ENCODER3_PF_CLEAR_MASK 0x00002000L
+#define UVD_PF_STATUS__ENCODER4_PF_CLEAR_MASK 0x00004000L
+#define UVD_PF_STATUS__EJPEG_PF_CLEAR_MASK 0x00008000L
+#define UVD_PF_STATUS__NJ_ATM_PF_OCCURED_MASK 0x00010000L
+#define UVD_PF_STATUS__DJ_ATM_PF_OCCURED_MASK 0x00020000L
+#define UVD_PF_STATUS__EJ_ATM_PF_OCCURED_MASK 0x00040000L
+//UVD_DPG_CLK_EN_VCPU_REPORT
+#define UVD_DPG_CLK_EN_VCPU_REPORT__CLK_EN__SHIFT 0x0
+#define UVD_DPG_CLK_EN_VCPU_REPORT__VCPU_REPORT__SHIFT 0x1
+#define UVD_DPG_CLK_EN_VCPU_REPORT__CLK_EN_MASK 0x00000001L
+#define UVD_DPG_CLK_EN_VCPU_REPORT__VCPU_REPORT_MASK 0x000000FEL
+//UVD_GFX8_ADDR_CONFIG
+#define UVD_GFX8_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x4
+#define UVD_GFX8_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000070L
+//UVD_GFX10_ADDR_CONFIG
+#define UVD_GFX10_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
+#define UVD_GFX10_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
+#define UVD_GFX10_ADDR_CONFIG__NUM_BANKS__SHIFT 0xc
+#define UVD_GFX10_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x13
+#define UVD_GFX10_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define UVD_GFX10_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
+#define UVD_GFX10_ADDR_CONFIG__NUM_BANKS_MASK 0x00007000L
+#define UVD_GFX10_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00180000L
+//UVD_GPCNT2_CNTL
+#define UVD_GPCNT2_CNTL__CLR__SHIFT 0x0
+#define UVD_GPCNT2_CNTL__START__SHIFT 0x1
+#define UVD_GPCNT2_CNTL__COUNTUP__SHIFT 0x2
+#define UVD_GPCNT2_CNTL__CLR_MASK 0x00000001L
+#define UVD_GPCNT2_CNTL__START_MASK 0x00000002L
+#define UVD_GPCNT2_CNTL__COUNTUP_MASK 0x00000004L
+//UVD_GPCNT2_TARGET_LOWER
+#define UVD_GPCNT2_TARGET_LOWER__TARGET__SHIFT 0x0
+#define UVD_GPCNT2_TARGET_LOWER__TARGET_MASK 0xFFFFFFFFL
+//UVD_GPCNT2_STATUS_LOWER
+#define UVD_GPCNT2_STATUS_LOWER__COUNT__SHIFT 0x0
+#define UVD_GPCNT2_STATUS_LOWER__COUNT_MASK 0xFFFFFFFFL
+//UVD_GPCNT2_TARGET_UPPER
+#define UVD_GPCNT2_TARGET_UPPER__TARGET__SHIFT 0x0
+#define UVD_GPCNT2_TARGET_UPPER__TARGET_MASK 0x0000FFFFL
+//UVD_GPCNT2_STATUS_UPPER
+#define UVD_GPCNT2_STATUS_UPPER__COUNT__SHIFT 0x0
+#define UVD_GPCNT2_STATUS_UPPER__COUNT_MASK 0x0000FFFFL
+//UVD_GPCNT3_CNTL
+#define UVD_GPCNT3_CNTL__CLR__SHIFT 0x0
+#define UVD_GPCNT3_CNTL__START__SHIFT 0x1
+#define UVD_GPCNT3_CNTL__COUNTUP__SHIFT 0x2
+#define UVD_GPCNT3_CNTL__FREQ__SHIFT 0x3
+#define UVD_GPCNT3_CNTL__DIV__SHIFT 0xa
+#define UVD_GPCNT3_CNTL__CLR_MASK 0x00000001L
+#define UVD_GPCNT3_CNTL__START_MASK 0x00000002L
+#define UVD_GPCNT3_CNTL__COUNTUP_MASK 0x00000004L
+#define UVD_GPCNT3_CNTL__FREQ_MASK 0x000003F8L
+#define UVD_GPCNT3_CNTL__DIV_MASK 0x0001FC00L
+//UVD_GPCNT3_TARGET_LOWER
+#define UVD_GPCNT3_TARGET_LOWER__TARGET__SHIFT 0x0
+#define UVD_GPCNT3_TARGET_LOWER__TARGET_MASK 0xFFFFFFFFL
+//UVD_GPCNT3_STATUS_LOWER
+#define UVD_GPCNT3_STATUS_LOWER__COUNT__SHIFT 0x0
+#define UVD_GPCNT3_STATUS_LOWER__COUNT_MASK 0xFFFFFFFFL
+//UVD_GPCNT3_TARGET_UPPER
+#define UVD_GPCNT3_TARGET_UPPER__TARGET__SHIFT 0x0
+#define UVD_GPCNT3_TARGET_UPPER__TARGET_MASK 0x0000FFFFL
+//UVD_GPCNT3_STATUS_UPPER
+#define UVD_GPCNT3_STATUS_UPPER__COUNT__SHIFT 0x0
+#define UVD_GPCNT3_STATUS_UPPER__COUNT_MASK 0x0000FFFFL
+
+
+// addressBlock: uvd0_uvddec
+//UVD_STATUS
+#define UVD_STATUS__RBC_BUSY__SHIFT 0x0
+#define UVD_STATUS__VCPU_REPORT__SHIFT 0x1
+#define UVD_STATUS__RBC_ACCESS_GPCOM__SHIFT 0x10
+#define UVD_STATUS__SYS_GPCOM_REQ__SHIFT 0x1f
+#define UVD_STATUS__RBC_BUSY_MASK 0x00000001L
+#define UVD_STATUS__VCPU_REPORT_MASK 0x000000FEL
+#define UVD_STATUS__RBC_ACCESS_GPCOM_MASK 0x00010000L
+#define UVD_STATUS__SYS_GPCOM_REQ_MASK 0x80000000L
+//UVD_ENC_PIPE_BUSY
+#define UVD_ENC_PIPE_BUSY__IME_BUSY__SHIFT 0x0
+#define UVD_ENC_PIPE_BUSY__SMP_BUSY__SHIFT 0x1
+#define UVD_ENC_PIPE_BUSY__SIT_BUSY__SHIFT 0x2
+#define UVD_ENC_PIPE_BUSY__SDB_BUSY__SHIFT 0x3
+#define UVD_ENC_PIPE_BUSY__ENT_BUSY__SHIFT 0x4
+#define UVD_ENC_PIPE_BUSY__ENT_HEADER_BUSY__SHIFT 0x5
+#define UVD_ENC_PIPE_BUSY__LCM_BUSY__SHIFT 0x6
+#define UVD_ENC_PIPE_BUSY__MDM_RD_CUR_BUSY__SHIFT 0x7
+#define UVD_ENC_PIPE_BUSY__MDM_RD_REF_BUSY__SHIFT 0x8
+#define UVD_ENC_PIPE_BUSY__MDM_RD_GEN_BUSY__SHIFT 0x9
+#define UVD_ENC_PIPE_BUSY__MDM_WR_RECON_BUSY__SHIFT 0xa
+#define UVD_ENC_PIPE_BUSY__MDM_WR_GEN_BUSY__SHIFT 0xb
+#define UVD_ENC_PIPE_BUSY__MIF_RD_CUR_BUSY__SHIFT 0x10
+#define UVD_ENC_PIPE_BUSY__MIF_RD_REF0_BUSY__SHIFT 0x11
+#define UVD_ENC_PIPE_BUSY__MIF_WR_GEN0_BUSY__SHIFT 0x12
+#define UVD_ENC_PIPE_BUSY__MIF_RD_GEN0_BUSY__SHIFT 0x13
+#define UVD_ENC_PIPE_BUSY__MIF_WR_GEN1_BUSY__SHIFT 0x14
+#define UVD_ENC_PIPE_BUSY__MIF_RD_GEN1_BUSY__SHIFT 0x15
+#define UVD_ENC_PIPE_BUSY__MIF_WR_BSP0_BUSY__SHIFT 0x16
+#define UVD_ENC_PIPE_BUSY__MIF_WR_BSP1_BUSY__SHIFT 0x17
+#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD0_BUSY__SHIFT 0x18
+#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD1_BUSY__SHIFT 0x19
+#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD2_BUSY__SHIFT 0x1a
+#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD3_BUSY__SHIFT 0x1b
+#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD4_BUSY__SHIFT 0x1c
+#define UVD_ENC_PIPE_BUSY__MIF_WR_BSP2_BUSY__SHIFT 0x1d
+#define UVD_ENC_PIPE_BUSY__MIF_WR_BSP3_BUSY__SHIFT 0x1e
+#define UVD_ENC_PIPE_BUSY__IME_BUSY_MASK 0x00000001L
+#define UVD_ENC_PIPE_BUSY__SMP_BUSY_MASK 0x00000002L
+#define UVD_ENC_PIPE_BUSY__SIT_BUSY_MASK 0x00000004L
+#define UVD_ENC_PIPE_BUSY__SDB_BUSY_MASK 0x00000008L
+#define UVD_ENC_PIPE_BUSY__ENT_BUSY_MASK 0x00000010L
+#define UVD_ENC_PIPE_BUSY__ENT_HEADER_BUSY_MASK 0x00000020L
+#define UVD_ENC_PIPE_BUSY__LCM_BUSY_MASK 0x00000040L
+#define UVD_ENC_PIPE_BUSY__MDM_RD_CUR_BUSY_MASK 0x00000080L
+#define UVD_ENC_PIPE_BUSY__MDM_RD_REF_BUSY_MASK 0x00000100L
+#define UVD_ENC_PIPE_BUSY__MDM_RD_GEN_BUSY_MASK 0x00000200L
+#define UVD_ENC_PIPE_BUSY__MDM_WR_RECON_BUSY_MASK 0x00000400L
+#define UVD_ENC_PIPE_BUSY__MDM_WR_GEN_BUSY_MASK 0x00000800L
+#define UVD_ENC_PIPE_BUSY__MIF_RD_CUR_BUSY_MASK 0x00010000L
+#define UVD_ENC_PIPE_BUSY__MIF_RD_REF0_BUSY_MASK 0x00020000L
+#define UVD_ENC_PIPE_BUSY__MIF_WR_GEN0_BUSY_MASK 0x00040000L
+#define UVD_ENC_PIPE_BUSY__MIF_RD_GEN0_BUSY_MASK 0x00080000L
+#define UVD_ENC_PIPE_BUSY__MIF_WR_GEN1_BUSY_MASK 0x00100000L
+#define UVD_ENC_PIPE_BUSY__MIF_RD_GEN1_BUSY_MASK 0x00200000L
+#define UVD_ENC_PIPE_BUSY__MIF_WR_BSP0_BUSY_MASK 0x00400000L
+#define UVD_ENC_PIPE_BUSY__MIF_WR_BSP1_BUSY_MASK 0x00800000L
+#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD0_BUSY_MASK 0x01000000L
+#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD1_BUSY_MASK 0x02000000L
+#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD2_BUSY_MASK 0x04000000L
+#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD3_BUSY_MASK 0x08000000L
+#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD4_BUSY_MASK 0x10000000L
+#define UVD_ENC_PIPE_BUSY__MIF_WR_BSP2_BUSY_MASK 0x20000000L
+#define UVD_ENC_PIPE_BUSY__MIF_WR_BSP3_BUSY_MASK 0x40000000L
+//UVD_SOFT_RESET
+#define UVD_SOFT_RESET__RBC_SOFT_RESET__SHIFT 0x0
+#define UVD_SOFT_RESET__LBSI_SOFT_RESET__SHIFT 0x1
+#define UVD_SOFT_RESET__LMI_SOFT_RESET__SHIFT 0x2
+#define UVD_SOFT_RESET__VCPU_SOFT_RESET__SHIFT 0x3
+#define UVD_SOFT_RESET__UDEC_SOFT_RESET__SHIFT 0x4
+#define UVD_SOFT_RESET__CXW_SOFT_RESET__SHIFT 0x6
+#define UVD_SOFT_RESET__TAP_SOFT_RESET__SHIFT 0x7
+#define UVD_SOFT_RESET__MPC_SOFT_RESET__SHIFT 0x8
+#define UVD_SOFT_RESET__EFC_SOFT_RESET__SHIFT 0x9
+#define UVD_SOFT_RESET__IH_SOFT_RESET__SHIFT 0xa
+#define UVD_SOFT_RESET__MPRD_SOFT_RESET__SHIFT 0xb
+#define UVD_SOFT_RESET__IDCT_SOFT_RESET__SHIFT 0xc
+#define UVD_SOFT_RESET__LMI_UMC_SOFT_RESET__SHIFT 0xd
+#define UVD_SOFT_RESET__SPH_SOFT_RESET__SHIFT 0xe
+#define UVD_SOFT_RESET__MIF_SOFT_RESET__SHIFT 0xf
+#define UVD_SOFT_RESET__LCM_SOFT_RESET__SHIFT 0x10
+#define UVD_SOFT_RESET__SUVD_SOFT_RESET__SHIFT 0x11
+#define UVD_SOFT_RESET__LBSI_VCLK_RESET_STATUS__SHIFT 0x12
+#define UVD_SOFT_RESET__VCPU_VCLK_RESET_STATUS__SHIFT 0x13
+#define UVD_SOFT_RESET__UDEC_VCLK_RESET_STATUS__SHIFT 0x14
+#define UVD_SOFT_RESET__UDEC_DCLK_RESET_STATUS__SHIFT 0x15
+#define UVD_SOFT_RESET__MPC_DCLK_RESET_STATUS__SHIFT 0x16
+#define UVD_SOFT_RESET__MPRD_VCLK_RESET_STATUS__SHIFT 0x17
+#define UVD_SOFT_RESET__MPRD_DCLK_RESET_STATUS__SHIFT 0x18
+#define UVD_SOFT_RESET__IDCT_VCLK_RESET_STATUS__SHIFT 0x19
+#define UVD_SOFT_RESET__MIF_DCLK_RESET_STATUS__SHIFT 0x1a
+#define UVD_SOFT_RESET__LCM_DCLK_RESET_STATUS__SHIFT 0x1b
+#define UVD_SOFT_RESET__SUVD_VCLK_RESET_STATUS__SHIFT 0x1c
+#define UVD_SOFT_RESET__SUVD_DCLK_RESET_STATUS__SHIFT 0x1d
+#define UVD_SOFT_RESET__RE_DCLK_RESET_STATUS__SHIFT 0x1e
+#define UVD_SOFT_RESET__SRE_DCLK_RESET_STATUS__SHIFT 0x1f
+#define UVD_SOFT_RESET__RBC_SOFT_RESET_MASK 0x00000001L
+#define UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK 0x00000002L
+#define UVD_SOFT_RESET__LMI_SOFT_RESET_MASK 0x00000004L
+#define UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK 0x00000008L
+#define UVD_SOFT_RESET__UDEC_SOFT_RESET_MASK 0x00000010L
+#define UVD_SOFT_RESET__CXW_SOFT_RESET_MASK 0x00000040L
+#define UVD_SOFT_RESET__TAP_SOFT_RESET_MASK 0x00000080L
+#define UVD_SOFT_RESET__MPC_SOFT_RESET_MASK 0x00000100L
+#define UVD_SOFT_RESET__EFC_SOFT_RESET_MASK 0x00000200L
+#define UVD_SOFT_RESET__IH_SOFT_RESET_MASK 0x00000400L
+#define UVD_SOFT_RESET__MPRD_SOFT_RESET_MASK 0x00000800L
+#define UVD_SOFT_RESET__IDCT_SOFT_RESET_MASK 0x00001000L
+#define UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK 0x00002000L
+#define UVD_SOFT_RESET__SPH_SOFT_RESET_MASK 0x00004000L
+#define UVD_SOFT_RESET__MIF_SOFT_RESET_MASK 0x00008000L
+#define UVD_SOFT_RESET__LCM_SOFT_RESET_MASK 0x00010000L
+#define UVD_SOFT_RESET__SUVD_SOFT_RESET_MASK 0x00020000L
+#define UVD_SOFT_RESET__LBSI_VCLK_RESET_STATUS_MASK 0x00040000L
+#define UVD_SOFT_RESET__VCPU_VCLK_RESET_STATUS_MASK 0x00080000L
+#define UVD_SOFT_RESET__UDEC_VCLK_RESET_STATUS_MASK 0x00100000L
+#define UVD_SOFT_RESET__UDEC_DCLK_RESET_STATUS_MASK 0x00200000L
+#define UVD_SOFT_RESET__MPC_DCLK_RESET_STATUS_MASK 0x00400000L
+#define UVD_SOFT_RESET__MPRD_VCLK_RESET_STATUS_MASK 0x00800000L
+#define UVD_SOFT_RESET__MPRD_DCLK_RESET_STATUS_MASK 0x01000000L
+#define UVD_SOFT_RESET__IDCT_VCLK_RESET_STATUS_MASK 0x02000000L
+#define UVD_SOFT_RESET__MIF_DCLK_RESET_STATUS_MASK 0x04000000L
+#define UVD_SOFT_RESET__LCM_DCLK_RESET_STATUS_MASK 0x08000000L
+#define UVD_SOFT_RESET__SUVD_VCLK_RESET_STATUS_MASK 0x10000000L
+#define UVD_SOFT_RESET__SUVD_DCLK_RESET_STATUS_MASK 0x20000000L
+#define UVD_SOFT_RESET__RE_DCLK_RESET_STATUS_MASK 0x40000000L
+#define UVD_SOFT_RESET__SRE_DCLK_RESET_STATUS_MASK 0x80000000L
+//UVD_SOFT_RESET2
+#define UVD_SOFT_RESET2__ATOMIC_SOFT_RESET__SHIFT 0x0
+#define UVD_SOFT_RESET2__MMSCH_VCLK_RESET_STATUS__SHIFT 0x10
+#define UVD_SOFT_RESET2__MMSCH_SCLK_RESET_STATUS__SHIFT 0x11
+#define UVD_SOFT_RESET2__ATOMIC_SOFT_RESET_MASK 0x00000001L
+#define UVD_SOFT_RESET2__MMSCH_VCLK_RESET_STATUS_MASK 0x00010000L
+#define UVD_SOFT_RESET2__MMSCH_SCLK_RESET_STATUS_MASK 0x00020000L
+//UVD_MMSCH_SOFT_RESET
+#define UVD_MMSCH_SOFT_RESET__MMSCH_RESET__SHIFT 0x0
+#define UVD_MMSCH_SOFT_RESET__TAP_SOFT_RESET__SHIFT 0x1
+#define UVD_MMSCH_SOFT_RESET__MMSCH_LOCK__SHIFT 0x1f
+#define UVD_MMSCH_SOFT_RESET__MMSCH_RESET_MASK 0x00000001L
+#define UVD_MMSCH_SOFT_RESET__TAP_SOFT_RESET_MASK 0x00000002L
+#define UVD_MMSCH_SOFT_RESET__MMSCH_LOCK_MASK 0x80000000L
+//UVD_CGC_GATE
+#define UVD_CGC_GATE__SYS__SHIFT 0x0
+#define UVD_CGC_GATE__UDEC__SHIFT 0x1
+#define UVD_CGC_GATE__MPEG2__SHIFT 0x2
+#define UVD_CGC_GATE__REGS__SHIFT 0x3
+#define UVD_CGC_GATE__RBC__SHIFT 0x4
+#define UVD_CGC_GATE__LMI_MC__SHIFT 0x5
+#define UVD_CGC_GATE__LMI_UMC__SHIFT 0x6
+#define UVD_CGC_GATE__IDCT__SHIFT 0x7
+#define UVD_CGC_GATE__MPRD__SHIFT 0x8
+#define UVD_CGC_GATE__MPC__SHIFT 0x9
+#define UVD_CGC_GATE__LBSI__SHIFT 0xa
+#define UVD_CGC_GATE__LRBBM__SHIFT 0xb
+#define UVD_CGC_GATE__UDEC_RE__SHIFT 0xc
+#define UVD_CGC_GATE__UDEC_CM__SHIFT 0xd
+#define UVD_CGC_GATE__UDEC_IT__SHIFT 0xe
+#define UVD_CGC_GATE__UDEC_DB__SHIFT 0xf
+#define UVD_CGC_GATE__UDEC_MP__SHIFT 0x10
+#define UVD_CGC_GATE__WCB__SHIFT 0x11
+#define UVD_CGC_GATE__VCPU__SHIFT 0x12
+#define UVD_CGC_GATE__MMSCH__SHIFT 0x14
+#define UVD_CGC_GATE__SYS_MASK 0x00000001L
+#define UVD_CGC_GATE__UDEC_MASK 0x00000002L
+#define UVD_CGC_GATE__MPEG2_MASK 0x00000004L
+#define UVD_CGC_GATE__REGS_MASK 0x00000008L
+#define UVD_CGC_GATE__RBC_MASK 0x00000010L
+#define UVD_CGC_GATE__LMI_MC_MASK 0x00000020L
+#define UVD_CGC_GATE__LMI_UMC_MASK 0x00000040L
+#define UVD_CGC_GATE__IDCT_MASK 0x00000080L
+#define UVD_CGC_GATE__MPRD_MASK 0x00000100L
+#define UVD_CGC_GATE__MPC_MASK 0x00000200L
+#define UVD_CGC_GATE__LBSI_MASK 0x00000400L
+#define UVD_CGC_GATE__LRBBM_MASK 0x00000800L
+#define UVD_CGC_GATE__UDEC_RE_MASK 0x00001000L
+#define UVD_CGC_GATE__UDEC_CM_MASK 0x00002000L
+#define UVD_CGC_GATE__UDEC_IT_MASK 0x00004000L
+#define UVD_CGC_GATE__UDEC_DB_MASK 0x00008000L
+#define UVD_CGC_GATE__UDEC_MP_MASK 0x00010000L
+#define UVD_CGC_GATE__WCB_MASK 0x00020000L
+#define UVD_CGC_GATE__VCPU_MASK 0x00040000L
+#define UVD_CGC_GATE__MMSCH_MASK 0x00100000L
+//UVD_CGC_STATUS
+#define UVD_CGC_STATUS__SYS_SCLK__SHIFT 0x0
+#define UVD_CGC_STATUS__SYS_DCLK__SHIFT 0x1
+#define UVD_CGC_STATUS__SYS_VCLK__SHIFT 0x2
+#define UVD_CGC_STATUS__UDEC_SCLK__SHIFT 0x3
+#define UVD_CGC_STATUS__UDEC_DCLK__SHIFT 0x4
+#define UVD_CGC_STATUS__UDEC_VCLK__SHIFT 0x5
+#define UVD_CGC_STATUS__MPEG2_SCLK__SHIFT 0x6
+#define UVD_CGC_STATUS__MPEG2_DCLK__SHIFT 0x7
+#define UVD_CGC_STATUS__MPEG2_VCLK__SHIFT 0x8
+#define UVD_CGC_STATUS__REGS_SCLK__SHIFT 0x9
+#define UVD_CGC_STATUS__REGS_VCLK__SHIFT 0xa
+#define UVD_CGC_STATUS__RBC_SCLK__SHIFT 0xb
+#define UVD_CGC_STATUS__LMI_MC_SCLK__SHIFT 0xc
+#define UVD_CGC_STATUS__LMI_UMC_SCLK__SHIFT 0xd
+#define UVD_CGC_STATUS__IDCT_SCLK__SHIFT 0xe
+#define UVD_CGC_STATUS__IDCT_VCLK__SHIFT 0xf
+#define UVD_CGC_STATUS__MPRD_SCLK__SHIFT 0x10
+#define UVD_CGC_STATUS__MPRD_DCLK__SHIFT 0x11
+#define UVD_CGC_STATUS__MPRD_VCLK__SHIFT 0x12
+#define UVD_CGC_STATUS__MPC_SCLK__SHIFT 0x13
+#define UVD_CGC_STATUS__MPC_DCLK__SHIFT 0x14
+#define UVD_CGC_STATUS__LBSI_SCLK__SHIFT 0x15
+#define UVD_CGC_STATUS__LBSI_VCLK__SHIFT 0x16
+#define UVD_CGC_STATUS__LRBBM_SCLK__SHIFT 0x17
+#define UVD_CGC_STATUS__WCB_SCLK__SHIFT 0x18
+#define UVD_CGC_STATUS__VCPU_SCLK__SHIFT 0x19
+#define UVD_CGC_STATUS__VCPU_VCLK__SHIFT 0x1a
+#define UVD_CGC_STATUS__MMSCH_SCLK__SHIFT 0x1b
+#define UVD_CGC_STATUS__MMSCH_VCLK__SHIFT 0x1c
+#define UVD_CGC_STATUS__ALL_ENC_ACTIVE__SHIFT 0x1d
+#define UVD_CGC_STATUS__ALL_DEC_ACTIVE__SHIFT 0x1f
+#define UVD_CGC_STATUS__SYS_SCLK_MASK 0x00000001L
+#define UVD_CGC_STATUS__SYS_DCLK_MASK 0x00000002L
+#define UVD_CGC_STATUS__SYS_VCLK_MASK 0x00000004L
+#define UVD_CGC_STATUS__UDEC_SCLK_MASK 0x00000008L
+#define UVD_CGC_STATUS__UDEC_DCLK_MASK 0x00000010L
+#define UVD_CGC_STATUS__UDEC_VCLK_MASK 0x00000020L
+#define UVD_CGC_STATUS__MPEG2_SCLK_MASK 0x00000040L
+#define UVD_CGC_STATUS__MPEG2_DCLK_MASK 0x00000080L
+#define UVD_CGC_STATUS__MPEG2_VCLK_MASK 0x00000100L
+#define UVD_CGC_STATUS__REGS_SCLK_MASK 0x00000200L
+#define UVD_CGC_STATUS__REGS_VCLK_MASK 0x00000400L
+#define UVD_CGC_STATUS__RBC_SCLK_MASK 0x00000800L
+#define UVD_CGC_STATUS__LMI_MC_SCLK_MASK 0x00001000L
+#define UVD_CGC_STATUS__LMI_UMC_SCLK_MASK 0x00002000L
+#define UVD_CGC_STATUS__IDCT_SCLK_MASK 0x00004000L
+#define UVD_CGC_STATUS__IDCT_VCLK_MASK 0x00008000L
+#define UVD_CGC_STATUS__MPRD_SCLK_MASK 0x00010000L
+#define UVD_CGC_STATUS__MPRD_DCLK_MASK 0x00020000L
+#define UVD_CGC_STATUS__MPRD_VCLK_MASK 0x00040000L
+#define UVD_CGC_STATUS__MPC_SCLK_MASK 0x00080000L
+#define UVD_CGC_STATUS__MPC_DCLK_MASK 0x00100000L
+#define UVD_CGC_STATUS__LBSI_SCLK_MASK 0x00200000L
+#define UVD_CGC_STATUS__LBSI_VCLK_MASK 0x00400000L
+#define UVD_CGC_STATUS__LRBBM_SCLK_MASK 0x00800000L
+#define UVD_CGC_STATUS__WCB_SCLK_MASK 0x01000000L
+#define UVD_CGC_STATUS__VCPU_SCLK_MASK 0x02000000L
+#define UVD_CGC_STATUS__VCPU_VCLK_MASK 0x04000000L
+#define UVD_CGC_STATUS__MMSCH_SCLK_MASK 0x08000000L
+#define UVD_CGC_STATUS__MMSCH_VCLK_MASK 0x10000000L
+#define UVD_CGC_STATUS__ALL_ENC_ACTIVE_MASK 0x20000000L
+#define UVD_CGC_STATUS__ALL_DEC_ACTIVE_MASK 0x80000000L
+//UVD_CGC_CTRL
+#define UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT 0x0
+#define UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT 0x2
+#define UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT 0x6
+#define UVD_CGC_CTRL__UDEC_RE_MODE__SHIFT 0xb
+#define UVD_CGC_CTRL__UDEC_CM_MODE__SHIFT 0xc
+#define UVD_CGC_CTRL__UDEC_IT_MODE__SHIFT 0xd
+#define UVD_CGC_CTRL__UDEC_DB_MODE__SHIFT 0xe
+#define UVD_CGC_CTRL__UDEC_MP_MODE__SHIFT 0xf
+#define UVD_CGC_CTRL__SYS_MODE__SHIFT 0x10
+#define UVD_CGC_CTRL__UDEC_MODE__SHIFT 0x11
+#define UVD_CGC_CTRL__MPEG2_MODE__SHIFT 0x12
+#define UVD_CGC_CTRL__REGS_MODE__SHIFT 0x13
+#define UVD_CGC_CTRL__RBC_MODE__SHIFT 0x14
+#define UVD_CGC_CTRL__LMI_MC_MODE__SHIFT 0x15
+#define UVD_CGC_CTRL__LMI_UMC_MODE__SHIFT 0x16
+#define UVD_CGC_CTRL__IDCT_MODE__SHIFT 0x17
+#define UVD_CGC_CTRL__MPRD_MODE__SHIFT 0x18
+#define UVD_CGC_CTRL__MPC_MODE__SHIFT 0x19
+#define UVD_CGC_CTRL__LBSI_MODE__SHIFT 0x1a
+#define UVD_CGC_CTRL__LRBBM_MODE__SHIFT 0x1b
+#define UVD_CGC_CTRL__WCB_MODE__SHIFT 0x1c
+#define UVD_CGC_CTRL__VCPU_MODE__SHIFT 0x1d
+#define UVD_CGC_CTRL__MMSCH_MODE__SHIFT 0x1f
+#define UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK 0x00000001L
+#define UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK 0x0000003CL
+#define UVD_CGC_CTRL__CLK_OFF_DELAY_MASK 0x000007C0L
+#define UVD_CGC_CTRL__UDEC_RE_MODE_MASK 0x00000800L
+#define UVD_CGC_CTRL__UDEC_CM_MODE_MASK 0x00001000L
+#define UVD_CGC_CTRL__UDEC_IT_MODE_MASK 0x00002000L
+#define UVD_CGC_CTRL__UDEC_DB_MODE_MASK 0x00004000L
+#define UVD_CGC_CTRL__UDEC_MP_MODE_MASK 0x00008000L
+#define UVD_CGC_CTRL__SYS_MODE_MASK 0x00010000L
+#define UVD_CGC_CTRL__UDEC_MODE_MASK 0x00020000L
+#define UVD_CGC_CTRL__MPEG2_MODE_MASK 0x00040000L
+#define UVD_CGC_CTRL__REGS_MODE_MASK 0x00080000L
+#define UVD_CGC_CTRL__RBC_MODE_MASK 0x00100000L
+#define UVD_CGC_CTRL__LMI_MC_MODE_MASK 0x00200000L
+#define UVD_CGC_CTRL__LMI_UMC_MODE_MASK 0x00400000L
+#define UVD_CGC_CTRL__IDCT_MODE_MASK 0x00800000L
+#define UVD_CGC_CTRL__MPRD_MODE_MASK 0x01000000L
+#define UVD_CGC_CTRL__MPC_MODE_MASK 0x02000000L
+#define UVD_CGC_CTRL__LBSI_MODE_MASK 0x04000000L
+#define UVD_CGC_CTRL__LRBBM_MODE_MASK 0x08000000L
+#define UVD_CGC_CTRL__WCB_MODE_MASK 0x10000000L
+#define UVD_CGC_CTRL__VCPU_MODE_MASK 0x20000000L
+#define UVD_CGC_CTRL__MMSCH_MODE_MASK 0x80000000L
+//UVD_CGC_UDEC_STATUS
+#define UVD_CGC_UDEC_STATUS__RE_SCLK__SHIFT 0x0
+#define UVD_CGC_UDEC_STATUS__RE_DCLK__SHIFT 0x1
+#define UVD_CGC_UDEC_STATUS__RE_VCLK__SHIFT 0x2
+#define UVD_CGC_UDEC_STATUS__CM_SCLK__SHIFT 0x3
+#define UVD_CGC_UDEC_STATUS__CM_DCLK__SHIFT 0x4
+#define UVD_CGC_UDEC_STATUS__CM_VCLK__SHIFT 0x5
+#define UVD_CGC_UDEC_STATUS__IT_SCLK__SHIFT 0x6
+#define UVD_CGC_UDEC_STATUS__IT_DCLK__SHIFT 0x7
+#define UVD_CGC_UDEC_STATUS__IT_VCLK__SHIFT 0x8
+#define UVD_CGC_UDEC_STATUS__DB_SCLK__SHIFT 0x9
+#define UVD_CGC_UDEC_STATUS__DB_DCLK__SHIFT 0xa
+#define UVD_CGC_UDEC_STATUS__DB_VCLK__SHIFT 0xb
+#define UVD_CGC_UDEC_STATUS__MP_SCLK__SHIFT 0xc
+#define UVD_CGC_UDEC_STATUS__MP_DCLK__SHIFT 0xd
+#define UVD_CGC_UDEC_STATUS__MP_VCLK__SHIFT 0xe
+#define UVD_CGC_UDEC_STATUS__RE_SCLK_MASK 0x00000001L
+#define UVD_CGC_UDEC_STATUS__RE_DCLK_MASK 0x00000002L
+#define UVD_CGC_UDEC_STATUS__RE_VCLK_MASK 0x00000004L
+#define UVD_CGC_UDEC_STATUS__CM_SCLK_MASK 0x00000008L
+#define UVD_CGC_UDEC_STATUS__CM_DCLK_MASK 0x00000010L
+#define UVD_CGC_UDEC_STATUS__CM_VCLK_MASK 0x00000020L
+#define UVD_CGC_UDEC_STATUS__IT_SCLK_MASK 0x00000040L
+#define UVD_CGC_UDEC_STATUS__IT_DCLK_MASK 0x00000080L
+#define UVD_CGC_UDEC_STATUS__IT_VCLK_MASK 0x00000100L
+#define UVD_CGC_UDEC_STATUS__DB_SCLK_MASK 0x00000200L
+#define UVD_CGC_UDEC_STATUS__DB_DCLK_MASK 0x00000400L
+#define UVD_CGC_UDEC_STATUS__DB_VCLK_MASK 0x00000800L
+#define UVD_CGC_UDEC_STATUS__MP_SCLK_MASK 0x00001000L
+#define UVD_CGC_UDEC_STATUS__MP_DCLK_MASK 0x00002000L
+#define UVD_CGC_UDEC_STATUS__MP_VCLK_MASK 0x00004000L
+//UVD_SUVD_CGC_GATE
+#define UVD_SUVD_CGC_GATE__SRE__SHIFT 0x0
+#define UVD_SUVD_CGC_GATE__SIT__SHIFT 0x1
+#define UVD_SUVD_CGC_GATE__SMP__SHIFT 0x2
+#define UVD_SUVD_CGC_GATE__SCM__SHIFT 0x3
+#define UVD_SUVD_CGC_GATE__SDB__SHIFT 0x4
+#define UVD_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5
+#define UVD_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6
+#define UVD_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7
+#define UVD_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8
+#define UVD_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9
+#define UVD_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa
+#define UVD_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb
+#define UVD_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc
+#define UVD_SUVD_CGC_GATE__SCLR__SHIFT 0xd
+#define UVD_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe
+#define UVD_SUVD_CGC_GATE__ENT__SHIFT 0xf
+#define UVD_SUVD_CGC_GATE__IME__SHIFT 0x10
+#define UVD_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11
+#define UVD_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12
+#define UVD_SUVD_CGC_GATE__SITE__SHIFT 0x13
+#define UVD_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14
+#define UVD_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15
+#define UVD_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16
+#define UVD_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17
+#define UVD_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18
+#define UVD_SUVD_CGC_GATE__EFC__SHIFT 0x19
+#define UVD_SUVD_CGC_GATE__SRE_MASK 0x00000001L
+#define UVD_SUVD_CGC_GATE__SIT_MASK 0x00000002L
+#define UVD_SUVD_CGC_GATE__SMP_MASK 0x00000004L
+#define UVD_SUVD_CGC_GATE__SCM_MASK 0x00000008L
+#define UVD_SUVD_CGC_GATE__SDB_MASK 0x00000010L
+#define UVD_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L
+#define UVD_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L
+#define UVD_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L
+#define UVD_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L
+#define UVD_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L
+#define UVD_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L
+#define UVD_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L
+#define UVD_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L
+#define UVD_SUVD_CGC_GATE__SCLR_MASK 0x00002000L
+#define UVD_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L
+#define UVD_SUVD_CGC_GATE__ENT_MASK 0x00008000L
+#define UVD_SUVD_CGC_GATE__IME_MASK 0x00010000L
+#define UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L
+#define UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L
+#define UVD_SUVD_CGC_GATE__SITE_MASK 0x00080000L
+#define UVD_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L
+#define UVD_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L
+#define UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L
+#define UVD_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L
+#define UVD_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L
+#define UVD_SUVD_CGC_GATE__EFC_MASK 0x02000000L
+//UVD_SUVD_CGC_STATUS
+#define UVD_SUVD_CGC_STATUS__SRE_VCLK__SHIFT 0x0
+#define UVD_SUVD_CGC_STATUS__SRE_DCLK__SHIFT 0x1
+#define UVD_SUVD_CGC_STATUS__SIT_DCLK__SHIFT 0x2
+#define UVD_SUVD_CGC_STATUS__SMP_DCLK__SHIFT 0x3
+#define UVD_SUVD_CGC_STATUS__SCM_DCLK__SHIFT 0x4
+#define UVD_SUVD_CGC_STATUS__SDB_DCLK__SHIFT 0x5
+#define UVD_SUVD_CGC_STATUS__SRE_H264_VCLK__SHIFT 0x6
+#define UVD_SUVD_CGC_STATUS__SRE_HEVC_VCLK__SHIFT 0x7
+#define UVD_SUVD_CGC_STATUS__SIT_H264_DCLK__SHIFT 0x8
+#define UVD_SUVD_CGC_STATUS__SIT_HEVC_DCLK__SHIFT 0x9
+#define UVD_SUVD_CGC_STATUS__SCM_H264_DCLK__SHIFT 0xa
+#define UVD_SUVD_CGC_STATUS__SCM_HEVC_DCLK__SHIFT 0xb
+#define UVD_SUVD_CGC_STATUS__SDB_H264_DCLK__SHIFT 0xc
+#define UVD_SUVD_CGC_STATUS__SDB_HEVC_DCLK__SHIFT 0xd
+#define UVD_SUVD_CGC_STATUS__SCLR_DCLK__SHIFT 0xe
+#define UVD_SUVD_CGC_STATUS__UVD_SC__SHIFT 0xf
+#define UVD_SUVD_CGC_STATUS__ENT_DCLK__SHIFT 0x10
+#define UVD_SUVD_CGC_STATUS__IME_DCLK__SHIFT 0x11
+#define UVD_SUVD_CGC_STATUS__SIT_HEVC_DEC_DCLK__SHIFT 0x12
+#define UVD_SUVD_CGC_STATUS__SIT_HEVC_ENC_DCLK__SHIFT 0x13
+#define UVD_SUVD_CGC_STATUS__SITE_DCLK__SHIFT 0x14
+#define UVD_SUVD_CGC_STATUS__SITE_HEVC_DCLK__SHIFT 0x15
+#define UVD_SUVD_CGC_STATUS__SITE_HEVC_ENC_DCLK__SHIFT 0x16
+#define UVD_SUVD_CGC_STATUS__SRE_VP9_VCLK__SHIFT 0x17
+#define UVD_SUVD_CGC_STATUS__SCM_VP9_VCLK__SHIFT 0x18
+#define UVD_SUVD_CGC_STATUS__SIT_VP9_DEC_DCLK__SHIFT 0x19
+#define UVD_SUVD_CGC_STATUS__SDB_VP9_DCLK__SHIFT 0x1a
+#define UVD_SUVD_CGC_STATUS__IME_HEVC_DCLK__SHIFT 0x1b
+#define UVD_SUVD_CGC_STATUS__EFC_DCLK__SHIFT 0x1c
+#define UVD_SUVD_CGC_STATUS__SRE_VCLK_MASK 0x00000001L
+#define UVD_SUVD_CGC_STATUS__SRE_DCLK_MASK 0x00000002L
+#define UVD_SUVD_CGC_STATUS__SIT_DCLK_MASK 0x00000004L
+#define UVD_SUVD_CGC_STATUS__SMP_DCLK_MASK 0x00000008L
+#define UVD_SUVD_CGC_STATUS__SCM_DCLK_MASK 0x00000010L
+#define UVD_SUVD_CGC_STATUS__SDB_DCLK_MASK 0x00000020L
+#define UVD_SUVD_CGC_STATUS__SRE_H264_VCLK_MASK 0x00000040L
+#define UVD_SUVD_CGC_STATUS__SRE_HEVC_VCLK_MASK 0x00000080L
+#define UVD_SUVD_CGC_STATUS__SIT_H264_DCLK_MASK 0x00000100L
+#define UVD_SUVD_CGC_STATUS__SIT_HEVC_DCLK_MASK 0x00000200L
+#define UVD_SUVD_CGC_STATUS__SCM_H264_DCLK_MASK 0x00000400L
+#define UVD_SUVD_CGC_STATUS__SCM_HEVC_DCLK_MASK 0x00000800L
+#define UVD_SUVD_CGC_STATUS__SDB_H264_DCLK_MASK 0x00001000L
+#define UVD_SUVD_CGC_STATUS__SDB_HEVC_DCLK_MASK 0x00002000L
+#define UVD_SUVD_CGC_STATUS__SCLR_DCLK_MASK 0x00004000L
+#define UVD_SUVD_CGC_STATUS__UVD_SC_MASK 0x00008000L
+#define UVD_SUVD_CGC_STATUS__ENT_DCLK_MASK 0x00010000L
+#define UVD_SUVD_CGC_STATUS__IME_DCLK_MASK 0x00020000L
+#define UVD_SUVD_CGC_STATUS__SIT_HEVC_DEC_DCLK_MASK 0x00040000L
+#define UVD_SUVD_CGC_STATUS__SIT_HEVC_ENC_DCLK_MASK 0x00080000L
+#define UVD_SUVD_CGC_STATUS__SITE_DCLK_MASK 0x00100000L
+#define UVD_SUVD_CGC_STATUS__SITE_HEVC_DCLK_MASK 0x00200000L
+#define UVD_SUVD_CGC_STATUS__SITE_HEVC_ENC_DCLK_MASK 0x00400000L
+#define UVD_SUVD_CGC_STATUS__SRE_VP9_VCLK_MASK 0x00800000L
+#define UVD_SUVD_CGC_STATUS__SCM_VP9_VCLK_MASK 0x01000000L
+#define UVD_SUVD_CGC_STATUS__SIT_VP9_DEC_DCLK_MASK 0x02000000L
+#define UVD_SUVD_CGC_STATUS__SDB_VP9_DCLK_MASK 0x04000000L
+#define UVD_SUVD_CGC_STATUS__IME_HEVC_DCLK_MASK 0x08000000L
+#define UVD_SUVD_CGC_STATUS__EFC_DCLK_MASK 0x10000000L
+//UVD_SUVD_CGC_CTRL
+#define UVD_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0
+#define UVD_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1
+#define UVD_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2
+#define UVD_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3
+#define UVD_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4
+#define UVD_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5
+#define UVD_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6
+#define UVD_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7
+#define UVD_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8
+#define UVD_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9
+#define UVD_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa
+#define UVD_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L
+#define UVD_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L
+#define UVD_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L
+#define UVD_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L
+#define UVD_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L
+#define UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L
+#define UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L
+#define UVD_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L
+#define UVD_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L
+#define UVD_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L
+#define UVD_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L
+//UVD_GPCOM_VCPU_CMD
+#define UVD_GPCOM_VCPU_CMD__CMD_SEND__SHIFT 0x0
+#define UVD_GPCOM_VCPU_CMD__CMD__SHIFT 0x1
+#define UVD_GPCOM_VCPU_CMD__CMD_SOURCE__SHIFT 0x1f
+#define UVD_GPCOM_VCPU_CMD__CMD_SEND_MASK 0x00000001L
+#define UVD_GPCOM_VCPU_CMD__CMD_MASK 0x7FFFFFFEL
+#define UVD_GPCOM_VCPU_CMD__CMD_SOURCE_MASK 0x80000000L
+//UVD_GPCOM_VCPU_DATA0
+#define UVD_GPCOM_VCPU_DATA0__DATA0__SHIFT 0x0
+#define UVD_GPCOM_VCPU_DATA0__DATA0_MASK 0xFFFFFFFFL
+//UVD_GPCOM_VCPU_DATA1
+#define UVD_GPCOM_VCPU_DATA1__DATA1__SHIFT 0x0
+#define UVD_GPCOM_VCPU_DATA1__DATA1_MASK 0xFFFFFFFFL
+//UVD_GPCOM_SYS_CMD
+#define UVD_GPCOM_SYS_CMD__CMD_SEND__SHIFT 0x0
+#define UVD_GPCOM_SYS_CMD__CMD__SHIFT 0x1
+#define UVD_GPCOM_SYS_CMD__CMD_SOURCE__SHIFT 0x1f
+#define UVD_GPCOM_SYS_CMD__CMD_SEND_MASK 0x00000001L
+#define UVD_GPCOM_SYS_CMD__CMD_MASK 0x7FFFFFFEL
+#define UVD_GPCOM_SYS_CMD__CMD_SOURCE_MASK 0x80000000L
+//UVD_GPCOM_SYS_DATA0
+#define UVD_GPCOM_SYS_DATA0__DATA0__SHIFT 0x0
+#define UVD_GPCOM_SYS_DATA0__DATA0_MASK 0xFFFFFFFFL
+//UVD_GPCOM_SYS_DATA1
+#define UVD_GPCOM_SYS_DATA1__DATA1__SHIFT 0x0
+#define UVD_GPCOM_SYS_DATA1__DATA1_MASK 0xFFFFFFFFL
+//UVD_VCPU_INT_EN
+#define UVD_VCPU_INT_EN__PIF_ADDR_ERR_EN__SHIFT 0x0
+#define UVD_VCPU_INT_EN__SEMA_WAIT_FAULT_TIMEOUT_EN__SHIFT 0x1
+#define UVD_VCPU_INT_EN__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_EN__SHIFT 0x2
+#define UVD_VCPU_INT_EN__NJ_PF_RPT_EN__SHIFT 0x3
+#define UVD_VCPU_INT_EN__SW_RB1_INT_EN__SHIFT 0x4
+#define UVD_VCPU_INT_EN__SW_RB2_INT_EN__SHIFT 0x5
+#define UVD_VCPU_INT_EN__RBC_REG_PRIV_FAULT_EN__SHIFT 0x6
+#define UVD_VCPU_INT_EN__SW_RB3_INT_EN__SHIFT 0x7
+#define UVD_VCPU_INT_EN__SW_RB4_INT_EN__SHIFT 0x9
+#define UVD_VCPU_INT_EN__SW_RB5_INT_EN__SHIFT 0xa
+#define UVD_VCPU_INT_EN__LBSI_EN__SHIFT 0xb
+#define UVD_VCPU_INT_EN__UDEC_EN__SHIFT 0xc
+#define UVD_VCPU_INT_EN__RPTR_WR_EN__SHIFT 0x10
+#define UVD_VCPU_INT_EN__JOB_START_EN__SHIFT 0x11
+#define UVD_VCPU_INT_EN__NJ_PF_EN__SHIFT 0x12
+#define UVD_VCPU_INT_EN__SEMA_WAIT_FAIL_SIG_EN__SHIFT 0x17
+#define UVD_VCPU_INT_EN__IDCT_EN__SHIFT 0x18
+#define UVD_VCPU_INT_EN__MPRD_EN__SHIFT 0x19
+#define UVD_VCPU_INT_EN__AVM_INT_EN__SHIFT 0x1a
+#define UVD_VCPU_INT_EN__CLK_SWT_EN__SHIFT 0x1b
+#define UVD_VCPU_INT_EN__MIF_HWINT_EN__SHIFT 0x1c
+#define UVD_VCPU_INT_EN__MPRD_ERR_EN__SHIFT 0x1d
+#define UVD_VCPU_INT_EN__DRV_FW_REQ_EN__SHIFT 0x1e
+#define UVD_VCPU_INT_EN__DRV_FW_ACK_EN__SHIFT 0x1f
+#define UVD_VCPU_INT_EN__PIF_ADDR_ERR_EN_MASK 0x00000001L
+#define UVD_VCPU_INT_EN__SEMA_WAIT_FAULT_TIMEOUT_EN_MASK 0x00000002L
+#define UVD_VCPU_INT_EN__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_EN_MASK 0x00000004L
+#define UVD_VCPU_INT_EN__NJ_PF_RPT_EN_MASK 0x00000008L
+#define UVD_VCPU_INT_EN__SW_RB1_INT_EN_MASK 0x00000010L
+#define UVD_VCPU_INT_EN__SW_RB2_INT_EN_MASK 0x00000020L
+#define UVD_VCPU_INT_EN__RBC_REG_PRIV_FAULT_EN_MASK 0x00000040L
+#define UVD_VCPU_INT_EN__SW_RB3_INT_EN_MASK 0x00000080L
+#define UVD_VCPU_INT_EN__SW_RB4_INT_EN_MASK 0x00000200L
+#define UVD_VCPU_INT_EN__SW_RB5_INT_EN_MASK 0x00000400L
+#define UVD_VCPU_INT_EN__LBSI_EN_MASK 0x00000800L
+#define UVD_VCPU_INT_EN__UDEC_EN_MASK 0x00001000L
+#define UVD_VCPU_INT_EN__RPTR_WR_EN_MASK 0x00010000L
+#define UVD_VCPU_INT_EN__JOB_START_EN_MASK 0x00020000L
+#define UVD_VCPU_INT_EN__NJ_PF_EN_MASK 0x00040000L
+#define UVD_VCPU_INT_EN__SEMA_WAIT_FAIL_SIG_EN_MASK 0x00800000L
+#define UVD_VCPU_INT_EN__IDCT_EN_MASK 0x01000000L
+#define UVD_VCPU_INT_EN__MPRD_EN_MASK 0x02000000L
+#define UVD_VCPU_INT_EN__AVM_INT_EN_MASK 0x04000000L
+#define UVD_VCPU_INT_EN__CLK_SWT_EN_MASK 0x08000000L
+#define UVD_VCPU_INT_EN__MIF_HWINT_EN_MASK 0x10000000L
+#define UVD_VCPU_INT_EN__MPRD_ERR_EN_MASK 0x20000000L
+#define UVD_VCPU_INT_EN__DRV_FW_REQ_EN_MASK 0x40000000L
+#define UVD_VCPU_INT_EN__DRV_FW_ACK_EN_MASK 0x80000000L
+//UVD_VCPU_INT_ACK
+#define UVD_VCPU_INT_ACK__PIF_ADDR_ERR_ACK__SHIFT 0x0
+#define UVD_VCPU_INT_ACK__SEMA_WAIT_FAULT_TIMEOUT_ACK__SHIFT 0x1
+#define UVD_VCPU_INT_ACK__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_ACK__SHIFT 0x2
+#define UVD_VCPU_INT_ACK__NJ_PF_RPT_ACK__SHIFT 0x3
+#define UVD_VCPU_INT_ACK__SW_RB1_INT_ACK__SHIFT 0x4
+#define UVD_VCPU_INT_ACK__SW_RB2_INT_ACK__SHIFT 0x5
+#define UVD_VCPU_INT_ACK__RBC_REG_PRIV_FAULT_ACK__SHIFT 0x6
+#define UVD_VCPU_INT_ACK__SW_RB3_INT_ACK__SHIFT 0x7
+#define UVD_VCPU_INT_ACK__SW_RB4_INT_ACK__SHIFT 0x9
+#define UVD_VCPU_INT_ACK__SW_RB5_INT_ACK__SHIFT 0xa
+#define UVD_VCPU_INT_ACK__LBSI_ACK__SHIFT 0xb
+#define UVD_VCPU_INT_ACK__UDEC_ACK__SHIFT 0xc
+#define UVD_VCPU_INT_ACK__RPTR_WR_ACK__SHIFT 0x10
+#define UVD_VCPU_INT_ACK__JOB_START_ACK__SHIFT 0x11
+#define UVD_VCPU_INT_ACK__NJ_PF_ACK__SHIFT 0x12
+#define UVD_VCPU_INT_ACK__SEMA_WAIT_FAIL_SIG_ACK__SHIFT 0x17
+#define UVD_VCPU_INT_ACK__IDCT_ACK__SHIFT 0x18
+#define UVD_VCPU_INT_ACK__MPRD_ACK__SHIFT 0x19
+#define UVD_VCPU_INT_ACK__AVM_INT_ACK__SHIFT 0x1a
+#define UVD_VCPU_INT_ACK__CLK_SWT_ACK__SHIFT 0x1b
+#define UVD_VCPU_INT_ACK__MIF_HWINT_ACK__SHIFT 0x1c
+#define UVD_VCPU_INT_ACK__MPRD_ERR_ACK__SHIFT 0x1d
+#define UVD_VCPU_INT_ACK__DRV_FW_REQ_ACK__SHIFT 0x1e
+#define UVD_VCPU_INT_ACK__DRV_FW_ACK_ACK__SHIFT 0x1f
+#define UVD_VCPU_INT_ACK__PIF_ADDR_ERR_ACK_MASK 0x00000001L
+#define UVD_VCPU_INT_ACK__SEMA_WAIT_FAULT_TIMEOUT_ACK_MASK 0x00000002L
+#define UVD_VCPU_INT_ACK__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_ACK_MASK 0x00000004L
+#define UVD_VCPU_INT_ACK__NJ_PF_RPT_ACK_MASK 0x00000008L
+#define UVD_VCPU_INT_ACK__SW_RB1_INT_ACK_MASK 0x00000010L
+#define UVD_VCPU_INT_ACK__SW_RB2_INT_ACK_MASK 0x00000020L
+#define UVD_VCPU_INT_ACK__RBC_REG_PRIV_FAULT_ACK_MASK 0x00000040L
+#define UVD_VCPU_INT_ACK__SW_RB3_INT_ACK_MASK 0x00000080L
+#define UVD_VCPU_INT_ACK__SW_RB4_INT_ACK_MASK 0x00000200L
+#define UVD_VCPU_INT_ACK__SW_RB5_INT_ACK_MASK 0x00000400L
+#define UVD_VCPU_INT_ACK__LBSI_ACK_MASK 0x00000800L
+#define UVD_VCPU_INT_ACK__UDEC_ACK_MASK 0x00001000L
+#define UVD_VCPU_INT_ACK__RPTR_WR_ACK_MASK 0x00010000L
+#define UVD_VCPU_INT_ACK__JOB_START_ACK_MASK 0x00020000L
+#define UVD_VCPU_INT_ACK__NJ_PF_ACK_MASK 0x00040000L
+#define UVD_VCPU_INT_ACK__SEMA_WAIT_FAIL_SIG_ACK_MASK 0x00800000L
+#define UVD_VCPU_INT_ACK__IDCT_ACK_MASK 0x01000000L
+#define UVD_VCPU_INT_ACK__MPRD_ACK_MASK 0x02000000L
+#define UVD_VCPU_INT_ACK__AVM_INT_ACK_MASK 0x04000000L
+#define UVD_VCPU_INT_ACK__CLK_SWT_ACK_MASK 0x08000000L
+#define UVD_VCPU_INT_ACK__MIF_HWINT_ACK_MASK 0x10000000L
+#define UVD_VCPU_INT_ACK__MPRD_ERR_ACK_MASK 0x20000000L
+#define UVD_VCPU_INT_ACK__DRV_FW_REQ_ACK_MASK 0x40000000L
+#define UVD_VCPU_INT_ACK__DRV_FW_ACK_ACK_MASK 0x80000000L
+//UVD_VCPU_INT_ROUTE
+#define UVD_VCPU_INT_ROUTE__DRV_FW_MSG__SHIFT 0x0
+#define UVD_VCPU_INT_ROUTE__FW_DRV_MSG_ACK__SHIFT 0x1
+#define UVD_VCPU_INT_ROUTE__VCPU_GPCOM__SHIFT 0x2
+#define UVD_VCPU_INT_ROUTE__DRV_FW_MSG_MASK 0x00000001L
+#define UVD_VCPU_INT_ROUTE__FW_DRV_MSG_ACK_MASK 0x00000002L
+#define UVD_VCPU_INT_ROUTE__VCPU_GPCOM_MASK 0x00000004L
+//UVD_ENC_VCPU_INT_EN
+#define UVD_ENC_VCPU_INT_EN__DCE_UVD_SCAN_IN_BUFMGR_EN__SHIFT 0x0
+#define UVD_ENC_VCPU_INT_EN__DCE_UVD_SCAN_IN_BUFMGR2_EN__SHIFT 0x1
+#define UVD_ENC_VCPU_INT_EN__DCE_UVD_SCAN_IN_BUFMGR3_EN__SHIFT 0x2
+#define UVD_ENC_VCPU_INT_EN__DCE_UVD_SCAN_IN_BUFMGR_EN_MASK 0x00000001L
+#define UVD_ENC_VCPU_INT_EN__DCE_UVD_SCAN_IN_BUFMGR2_EN_MASK 0x00000002L
+#define UVD_ENC_VCPU_INT_EN__DCE_UVD_SCAN_IN_BUFMGR3_EN_MASK 0x00000004L
+//UVD_ENC_VCPU_INT_ACK
+#define UVD_ENC_VCPU_INT_ACK__DCE_UVD_SCAN_IN_BUFMGR_ACK__SHIFT 0x0
+#define UVD_ENC_VCPU_INT_ACK__DCE_UVD_SCAN_IN_BUFMGR2_ACK__SHIFT 0x1
+#define UVD_ENC_VCPU_INT_ACK__DCE_UVD_SCAN_IN_BUFMGR3_ACK__SHIFT 0x2
+#define UVD_ENC_VCPU_INT_ACK__DCE_UVD_SCAN_IN_BUFMGR_ACK_MASK 0x00000001L
+#define UVD_ENC_VCPU_INT_ACK__DCE_UVD_SCAN_IN_BUFMGR2_ACK_MASK 0x00000002L
+#define UVD_ENC_VCPU_INT_ACK__DCE_UVD_SCAN_IN_BUFMGR3_ACK_MASK 0x00000004L
+//UVD_MASTINT_EN
+#define UVD_MASTINT_EN__OVERRUN_RST__SHIFT 0x0
+#define UVD_MASTINT_EN__VCPU_EN__SHIFT 0x1
+#define UVD_MASTINT_EN__SYS_EN__SHIFT 0x2
+#define UVD_MASTINT_EN__INT_OVERRUN__SHIFT 0x4
+#define UVD_MASTINT_EN__OVERRUN_RST_MASK 0x00000001L
+#define UVD_MASTINT_EN__VCPU_EN_MASK 0x00000002L
+#define UVD_MASTINT_EN__SYS_EN_MASK 0x00000004L
+#define UVD_MASTINT_EN__INT_OVERRUN_MASK 0x007FFFF0L
+//UVD_SYS_INT_EN
+#define UVD_SYS_INT_EN__PIF_ADDR_ERR_EN__SHIFT 0x0
+#define UVD_SYS_INT_EN__SEMA_WAIT_FAULT_TIMEOUT_EN__SHIFT 0x1
+#define UVD_SYS_INT_EN__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_EN__SHIFT 0x2
+#define UVD_SYS_INT_EN__CXW_WR_EN__SHIFT 0x3
+#define UVD_SYS_INT_EN__RBC_REG_PRIV_FAULT_EN__SHIFT 0x6
+#define UVD_SYS_INT_EN__LBSI_EN__SHIFT 0xb
+#define UVD_SYS_INT_EN__UDEC_EN__SHIFT 0xc
+#define UVD_SYS_INT_EN__JOB_DONE_EN__SHIFT 0x10
+#define UVD_SYS_INT_EN__SEMA_WAIT_FAIL_SIG_EN__SHIFT 0x17
+#define UVD_SYS_INT_EN__IDCT_EN__SHIFT 0x18
+#define UVD_SYS_INT_EN__MPRD_EN__SHIFT 0x19
+#define UVD_SYS_INT_EN__CLK_SWT_EN__SHIFT 0x1b
+#define UVD_SYS_INT_EN__MIF_HWINT_EN__SHIFT 0x1c
+#define UVD_SYS_INT_EN__MPRD_ERR_EN__SHIFT 0x1d
+#define UVD_SYS_INT_EN__AVM_INT_EN__SHIFT 0x1f
+#define UVD_SYS_INT_EN__PIF_ADDR_ERR_EN_MASK 0x00000001L
+#define UVD_SYS_INT_EN__SEMA_WAIT_FAULT_TIMEOUT_EN_MASK 0x00000002L
+#define UVD_SYS_INT_EN__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_EN_MASK 0x00000004L
+#define UVD_SYS_INT_EN__CXW_WR_EN_MASK 0x00000008L
+#define UVD_SYS_INT_EN__RBC_REG_PRIV_FAULT_EN_MASK 0x00000040L
+#define UVD_SYS_INT_EN__LBSI_EN_MASK 0x00000800L
+#define UVD_SYS_INT_EN__UDEC_EN_MASK 0x00001000L
+#define UVD_SYS_INT_EN__JOB_DONE_EN_MASK 0x00010000L
+#define UVD_SYS_INT_EN__SEMA_WAIT_FAIL_SIG_EN_MASK 0x00800000L
+#define UVD_SYS_INT_EN__IDCT_EN_MASK 0x01000000L
+#define UVD_SYS_INT_EN__MPRD_EN_MASK 0x02000000L
+#define UVD_SYS_INT_EN__CLK_SWT_EN_MASK 0x08000000L
+#define UVD_SYS_INT_EN__MIF_HWINT_EN_MASK 0x10000000L
+#define UVD_SYS_INT_EN__MPRD_ERR_EN_MASK 0x20000000L
+#define UVD_SYS_INT_EN__AVM_INT_EN_MASK 0x80000000L
+//UVD_SYS_INT_STATUS
+#define UVD_SYS_INT_STATUS__PIF_ADDR_ERR_INT__SHIFT 0x0
+#define UVD_SYS_INT_STATUS__SEMA_WAIT_FAULT_TIMEOUT_INT__SHIFT 0x1
+#define UVD_SYS_INT_STATUS__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_INT__SHIFT 0x2
+#define UVD_SYS_INT_STATUS__CXW_WR_INT__SHIFT 0x3
+#define UVD_SYS_INT_STATUS__RBC_REG_PRIV_FAULT_INT__SHIFT 0x6
+#define UVD_SYS_INT_STATUS__LBSI_INT__SHIFT 0xb
+#define UVD_SYS_INT_STATUS__UDEC_INT__SHIFT 0xc
+#define UVD_SYS_INT_STATUS__JOB_DONE_INT__SHIFT 0x10
+#define UVD_SYS_INT_STATUS__GPCOM_INT__SHIFT 0x12
+#define UVD_SYS_INT_STATUS__SEMA_WAIT_FAIL_SIG_INT__SHIFT 0x17
+#define UVD_SYS_INT_STATUS__IDCT_INT__SHIFT 0x18
+#define UVD_SYS_INT_STATUS__MPRD_INT__SHIFT 0x19
+#define UVD_SYS_INT_STATUS__CLK_SWT_INT__SHIFT 0x1b
+#define UVD_SYS_INT_STATUS__MIF_HWINT__SHIFT 0x1c
+#define UVD_SYS_INT_STATUS__MPRD_ERR_INT__SHIFT 0x1d
+#define UVD_SYS_INT_STATUS__AVM_INT__SHIFT 0x1f
+#define UVD_SYS_INT_STATUS__PIF_ADDR_ERR_INT_MASK 0x00000001L
+#define UVD_SYS_INT_STATUS__SEMA_WAIT_FAULT_TIMEOUT_INT_MASK 0x00000002L
+#define UVD_SYS_INT_STATUS__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_INT_MASK 0x00000004L
+#define UVD_SYS_INT_STATUS__CXW_WR_INT_MASK 0x00000008L
+#define UVD_SYS_INT_STATUS__RBC_REG_PRIV_FAULT_INT_MASK 0x00000040L
+#define UVD_SYS_INT_STATUS__LBSI_INT_MASK 0x00000800L
+#define UVD_SYS_INT_STATUS__UDEC_INT_MASK 0x00001000L
+#define UVD_SYS_INT_STATUS__JOB_DONE_INT_MASK 0x00010000L
+#define UVD_SYS_INT_STATUS__GPCOM_INT_MASK 0x00040000L
+#define UVD_SYS_INT_STATUS__SEMA_WAIT_FAIL_SIG_INT_MASK 0x00800000L
+#define UVD_SYS_INT_STATUS__IDCT_INT_MASK 0x01000000L
+#define UVD_SYS_INT_STATUS__MPRD_INT_MASK 0x02000000L
+#define UVD_SYS_INT_STATUS__CLK_SWT_INT_MASK 0x08000000L
+#define UVD_SYS_INT_STATUS__MIF_HWINT_MASK 0x10000000L
+#define UVD_SYS_INT_STATUS__MPRD_ERR_INT_MASK 0x20000000L
+#define UVD_SYS_INT_STATUS__AVM_INT_MASK 0x80000000L
+//UVD_SYS_INT_ACK
+#define UVD_SYS_INT_ACK__PIF_ADDR_ERR_ACK__SHIFT 0x0
+#define UVD_SYS_INT_ACK__SEMA_WAIT_FAULT_TIMEOUT_ACK__SHIFT 0x1
+#define UVD_SYS_INT_ACK__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_ACK__SHIFT 0x2
+#define UVD_SYS_INT_ACK__CXW_WR_ACK__SHIFT 0x3
+#define UVD_SYS_INT_ACK__RBC_REG_PRIV_FAULT_ACK__SHIFT 0x6
+#define UVD_SYS_INT_ACK__LBSI_ACK__SHIFT 0xb
+#define UVD_SYS_INT_ACK__UDEC_ACK__SHIFT 0xc
+#define UVD_SYS_INT_ACK__JOB_DONE_ACK__SHIFT 0x10
+#define UVD_SYS_INT_ACK__SEMA_WAIT_FAIL_SIG_ACK__SHIFT 0x17
+#define UVD_SYS_INT_ACK__IDCT_ACK__SHIFT 0x18
+#define UVD_SYS_INT_ACK__MPRD_ACK__SHIFT 0x19
+#define UVD_SYS_INT_ACK__CLK_SWT_ACK__SHIFT 0x1b
+#define UVD_SYS_INT_ACK__MIF_HWINT_ACK__SHIFT 0x1c
+#define UVD_SYS_INT_ACK__MPRD_ERR_ACK__SHIFT 0x1d
+#define UVD_SYS_INT_ACK__AVM_INT_ACK__SHIFT 0x1f
+#define UVD_SYS_INT_ACK__PIF_ADDR_ERR_ACK_MASK 0x00000001L
+#define UVD_SYS_INT_ACK__SEMA_WAIT_FAULT_TIMEOUT_ACK_MASK 0x00000002L
+#define UVD_SYS_INT_ACK__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_ACK_MASK 0x00000004L
+#define UVD_SYS_INT_ACK__CXW_WR_ACK_MASK 0x00000008L
+#define UVD_SYS_INT_ACK__RBC_REG_PRIV_FAULT_ACK_MASK 0x00000040L
+#define UVD_SYS_INT_ACK__LBSI_ACK_MASK 0x00000800L
+#define UVD_SYS_INT_ACK__UDEC_ACK_MASK 0x00001000L
+#define UVD_SYS_INT_ACK__JOB_DONE_ACK_MASK 0x00010000L
+#define UVD_SYS_INT_ACK__SEMA_WAIT_FAIL_SIG_ACK_MASK 0x00800000L
+#define UVD_SYS_INT_ACK__IDCT_ACK_MASK 0x01000000L
+#define UVD_SYS_INT_ACK__MPRD_ACK_MASK 0x02000000L
+#define UVD_SYS_INT_ACK__CLK_SWT_ACK_MASK 0x08000000L
+#define UVD_SYS_INT_ACK__MIF_HWINT_ACK_MASK 0x10000000L
+#define UVD_SYS_INT_ACK__MPRD_ERR_ACK_MASK 0x20000000L
+#define UVD_SYS_INT_ACK__AVM_INT_ACK_MASK 0x80000000L
+//UVD_JOB_DONE
+#define UVD_JOB_DONE__JOB_DONE__SHIFT 0x0
+#define UVD_JOB_DONE__JOB_DONE_MASK 0x00000003L
+//UVD_CBUF_ID
+#define UVD_CBUF_ID__CBUF_ID__SHIFT 0x0
+#define UVD_CBUF_ID__CBUF_ID_MASK 0xFFFFFFFFL
+//UVD_CONTEXT_ID
+#define UVD_CONTEXT_ID__CONTEXT_ID__SHIFT 0x0
+#define UVD_CONTEXT_ID__CONTEXT_ID_MASK 0xFFFFFFFFL
+//UVD_CONTEXT_ID2
+#define UVD_CONTEXT_ID2__CONTEXT_ID2__SHIFT 0x0
+#define UVD_CONTEXT_ID2__CONTEXT_ID2_MASK 0xFFFFFFFFL
+//UVD_NO_OP
+#define UVD_NO_OP__NO_OP__SHIFT 0x0
+#define UVD_NO_OP__NO_OP_MASK 0xFFFFFFFFL
+//UVD_RB_BASE_LO
+#define UVD_RB_BASE_LO__RB_BASE_LO__SHIFT 0x6
+#define UVD_RB_BASE_LO__RB_BASE_LO_MASK 0xFFFFFFC0L
+//UVD_RB_BASE_HI
+#define UVD_RB_BASE_HI__RB_BASE_HI__SHIFT 0x0
+#define UVD_RB_BASE_HI__RB_BASE_HI_MASK 0xFFFFFFFFL
+//UVD_RB_SIZE
+#define UVD_RB_SIZE__RB_SIZE__SHIFT 0x4
+#define UVD_RB_SIZE__RB_SIZE_MASK 0x007FFFF0L
+//UVD_RB_RPTR
+#define UVD_RB_RPTR__RB_RPTR__SHIFT 0x4
+#define UVD_RB_RPTR__RB_RPTR_MASK 0x007FFFF0L
+//UVD_RB_WPTR
+#define UVD_RB_WPTR__RB_WPTR__SHIFT 0x4
+#define UVD_RB_WPTR__RB_WPTR_MASK 0x007FFFF0L
+//UVD_RB_BASE_LO2
+#define UVD_RB_BASE_LO2__RB_BASE_LO__SHIFT 0x6
+#define UVD_RB_BASE_LO2__RB_BASE_LO_MASK 0xFFFFFFC0L
+//UVD_RB_BASE_HI2
+#define UVD_RB_BASE_HI2__RB_BASE_HI__SHIFT 0x0
+#define UVD_RB_BASE_HI2__RB_BASE_HI_MASK 0xFFFFFFFFL
+//UVD_RB_SIZE2
+#define UVD_RB_SIZE2__RB_SIZE__SHIFT 0x4
+#define UVD_RB_SIZE2__RB_SIZE_MASK 0x007FFFF0L
+//UVD_RB_RPTR2
+#define UVD_RB_RPTR2__RB_RPTR__SHIFT 0x4
+#define UVD_RB_RPTR2__RB_RPTR_MASK 0x007FFFF0L
+//UVD_RB_WPTR2
+#define UVD_RB_WPTR2__RB_WPTR__SHIFT 0x4
+#define UVD_RB_WPTR2__RB_WPTR_MASK 0x007FFFF0L
+//UVD_RB_BASE_LO3
+#define UVD_RB_BASE_LO3__RB_BASE_LO__SHIFT 0x6
+#define UVD_RB_BASE_LO3__RB_BASE_LO_MASK 0xFFFFFFC0L
+//UVD_RB_BASE_HI3
+#define UVD_RB_BASE_HI3__RB_BASE_HI__SHIFT 0x0
+#define UVD_RB_BASE_HI3__RB_BASE_HI_MASK 0xFFFFFFFFL
+//UVD_RB_SIZE3
+#define UVD_RB_SIZE3__RB_SIZE__SHIFT 0x4
+#define UVD_RB_SIZE3__RB_SIZE_MASK 0x007FFFF0L
+//UVD_RB_RPTR3
+#define UVD_RB_RPTR3__RB_RPTR__SHIFT 0x4
+#define UVD_RB_RPTR3__RB_RPTR_MASK 0x007FFFF0L
+//UVD_RB_WPTR3
+#define UVD_RB_WPTR3__RB_WPTR__SHIFT 0x4
+#define UVD_RB_WPTR3__RB_WPTR_MASK 0x007FFFF0L
+//UVD_RB_BASE_LO4
+#define UVD_RB_BASE_LO4__RB_BASE_LO__SHIFT 0x6
+#define UVD_RB_BASE_LO4__RB_BASE_LO_MASK 0xFFFFFFC0L
+//UVD_RB_BASE_HI4
+#define UVD_RB_BASE_HI4__RB_BASE_HI__SHIFT 0x0
+#define UVD_RB_BASE_HI4__RB_BASE_HI_MASK 0xFFFFFFFFL
+//UVD_RB_SIZE4
+#define UVD_RB_SIZE4__RB_SIZE__SHIFT 0x4
+#define UVD_RB_SIZE4__RB_SIZE_MASK 0x007FFFF0L
+//UVD_RB_RPTR4
+#define UVD_RB_RPTR4__RB_RPTR__SHIFT 0x4
+#define UVD_RB_RPTR4__RB_RPTR_MASK 0x007FFFF0L
+//UVD_RB_WPTR4
+#define UVD_RB_WPTR4__RB_WPTR__SHIFT 0x4
+#define UVD_RB_WPTR4__RB_WPTR_MASK 0x007FFFF0L
+//UVD_OUT_RB_BASE_LO
+#define UVD_OUT_RB_BASE_LO__RB_BASE_LO__SHIFT 0x6
+#define UVD_OUT_RB_BASE_LO__RB_BASE_LO_MASK 0xFFFFFFC0L
+//UVD_OUT_RB_BASE_HI
+#define UVD_OUT_RB_BASE_HI__RB_BASE_HI__SHIFT 0x0
+#define UVD_OUT_RB_BASE_HI__RB_BASE_HI_MASK 0xFFFFFFFFL
+//UVD_OUT_RB_SIZE
+#define UVD_OUT_RB_SIZE__RB_SIZE__SHIFT 0x4
+#define UVD_OUT_RB_SIZE__RB_SIZE_MASK 0x007FFFF0L
+//UVD_OUT_RB_RPTR
+#define UVD_OUT_RB_RPTR__RB_RPTR__SHIFT 0x4
+#define UVD_OUT_RB_RPTR__RB_RPTR_MASK 0x007FFFF0L
+//UVD_OUT_RB_WPTR
+#define UVD_OUT_RB_WPTR__RB_WPTR__SHIFT 0x4
+#define UVD_OUT_RB_WPTR__RB_WPTR_MASK 0x007FFFF0L
+//UVD_RB_ARB_CTRL
+#define UVD_RB_ARB_CTRL__SRBM_DROP__SHIFT 0x0
+#define UVD_RB_ARB_CTRL__SRBM_DIS__SHIFT 0x1
+#define UVD_RB_ARB_CTRL__VCPU_DROP__SHIFT 0x2
+#define UVD_RB_ARB_CTRL__VCPU_DIS__SHIFT 0x3
+#define UVD_RB_ARB_CTRL__RBC_DROP__SHIFT 0x4
+#define UVD_RB_ARB_CTRL__RBC_DIS__SHIFT 0x5
+#define UVD_RB_ARB_CTRL__FWOFLD_DROP__SHIFT 0x6
+#define UVD_RB_ARB_CTRL__FWOFLD_DIS__SHIFT 0x7
+#define UVD_RB_ARB_CTRL__FAST_PATH_EN__SHIFT 0x8
+#define UVD_RB_ARB_CTRL__SRBM_DROP_MASK 0x00000001L
+#define UVD_RB_ARB_CTRL__SRBM_DIS_MASK 0x00000002L
+#define UVD_RB_ARB_CTRL__VCPU_DROP_MASK 0x00000004L
+#define UVD_RB_ARB_CTRL__VCPU_DIS_MASK 0x00000008L
+#define UVD_RB_ARB_CTRL__RBC_DROP_MASK 0x00000010L
+#define UVD_RB_ARB_CTRL__RBC_DIS_MASK 0x00000020L
+#define UVD_RB_ARB_CTRL__FWOFLD_DROP_MASK 0x00000040L
+#define UVD_RB_ARB_CTRL__FWOFLD_DIS_MASK 0x00000080L
+#define UVD_RB_ARB_CTRL__FAST_PATH_EN_MASK 0x00000100L
+//UVD_CTX_INDEX
+#define UVD_CTX_INDEX__INDEX__SHIFT 0x0
+#define UVD_CTX_INDEX__INDEX_MASK 0x000001FFL
+//UVD_CTX_DATA
+#define UVD_CTX_DATA__DATA__SHIFT 0x0
+#define UVD_CTX_DATA__DATA_MASK 0xFFFFFFFFL
+//UVD_CXW_WR
+#define UVD_CXW_WR__DAT__SHIFT 0x0
+#define UVD_CXW_WR__STAT__SHIFT 0x1f
+#define UVD_CXW_WR__DAT_MASK 0x0FFFFFFFL
+#define UVD_CXW_WR__STAT_MASK 0x80000000L
+//UVD_CXW_WR_INT_ID
+#define UVD_CXW_WR_INT_ID__ID__SHIFT 0x0
+#define UVD_CXW_WR_INT_ID__ID_MASK 0x000000FFL
+//UVD_CXW_WR_INT_CTX_ID
+#define UVD_CXW_WR_INT_CTX_ID__ID__SHIFT 0x0
+#define UVD_CXW_WR_INT_CTX_ID__ID_MASK 0x0FFFFFFFL
+//UVD_CXW_INT_ID
+#define UVD_CXW_INT_ID__ID__SHIFT 0x0
+#define UVD_CXW_INT_ID__ID_MASK 0x000000FFL
+//UVD_TOP_CTRL
+#define UVD_TOP_CTRL__STANDARD__SHIFT 0x0
+#define UVD_TOP_CTRL__STD_VERSION__SHIFT 0x4
+#define UVD_TOP_CTRL__STANDARD_MASK 0x0000000FL
+#define UVD_TOP_CTRL__STD_VERSION_MASK 0x000000F0L
+//UVD_YBASE
+#define UVD_YBASE__DUM__SHIFT 0x0
+#define UVD_YBASE__DUM_MASK 0xFFFFFFFFL
+//UVD_UVBASE
+#define UVD_UVBASE__DUM__SHIFT 0x0
+#define UVD_UVBASE__DUM_MASK 0xFFFFFFFFL
+//UVD_PITCH
+#define UVD_PITCH__DUM__SHIFT 0x0
+#define UVD_PITCH__DUM_MASK 0xFFFFFFFFL
+//UVD_WIDTH
+#define UVD_WIDTH__DUM__SHIFT 0x0
+#define UVD_WIDTH__DUM_MASK 0xFFFFFFFFL
+//UVD_HEIGHT
+#define UVD_HEIGHT__DUM__SHIFT 0x0
+#define UVD_HEIGHT__DUM_MASK 0xFFFFFFFFL
+//UVD_PICCOUNT
+#define UVD_PICCOUNT__DUM__SHIFT 0x0
+#define UVD_PICCOUNT__DUM_MASK 0xFFFFFFFFL
+//UVD_SCRATCH_NP
+#define UVD_SCRATCH_NP__DATA__SHIFT 0x0
+#define UVD_SCRATCH_NP__DATA_MASK 0xFFFFFFFFL
+//UVD_VERSION
+#define UVD_VERSION__MINOR_VERSION__SHIFT 0x0
+#define UVD_VERSION__MAJOR_VERSION__SHIFT 0x10
+#define UVD_VERSION__MINOR_VERSION_MASK 0x0000FFFFL
+#define UVD_VERSION__MAJOR_VERSION_MASK 0x0FFF0000L
+//UVD_GP_SCRATCH0
+#define UVD_GP_SCRATCH0__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH0__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH1
+#define UVD_GP_SCRATCH1__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH1__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH2
+#define UVD_GP_SCRATCH2__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH2__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH3
+#define UVD_GP_SCRATCH3__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH3__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH4
+#define UVD_GP_SCRATCH4__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH4__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH5
+#define UVD_GP_SCRATCH5__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH5__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH6
+#define UVD_GP_SCRATCH6__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH6__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH7
+#define UVD_GP_SCRATCH7__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH7__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH8
+#define UVD_GP_SCRATCH8__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH8__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH9
+#define UVD_GP_SCRATCH9__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH9__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH10
+#define UVD_GP_SCRATCH10__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH10__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH11
+#define UVD_GP_SCRATCH11__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH11__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH12
+#define UVD_GP_SCRATCH12__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH12__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH13
+#define UVD_GP_SCRATCH13__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH13__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH14
+#define UVD_GP_SCRATCH14__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH14__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH15
+#define UVD_GP_SCRATCH15__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH15__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH16
+#define UVD_GP_SCRATCH16__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH16__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH17
+#define UVD_GP_SCRATCH17__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH17__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH18
+#define UVD_GP_SCRATCH18__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH18__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH19
+#define UVD_GP_SCRATCH19__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH19__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH20
+#define UVD_GP_SCRATCH20__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH20__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH21
+#define UVD_GP_SCRATCH21__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH21__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH22
+#define UVD_GP_SCRATCH22__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH22__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH23
+#define UVD_GP_SCRATCH23__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH23__DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: uvd0_ecpudec
+//UVD_VCPU_CACHE_OFFSET0
+#define UVD_VCPU_CACHE_OFFSET0__CACHE_OFFSET0__SHIFT 0x0
+#define UVD_VCPU_CACHE_OFFSET0__CACHE_OFFSET0_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_SIZE0
+#define UVD_VCPU_CACHE_SIZE0__CACHE_SIZE0__SHIFT 0x0
+#define UVD_VCPU_CACHE_SIZE0__CACHE_SIZE0_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_OFFSET1
+#define UVD_VCPU_CACHE_OFFSET1__CACHE_OFFSET1__SHIFT 0x0
+#define UVD_VCPU_CACHE_OFFSET1__CACHE_OFFSET1_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_SIZE1
+#define UVD_VCPU_CACHE_SIZE1__CACHE_SIZE1__SHIFT 0x0
+#define UVD_VCPU_CACHE_SIZE1__CACHE_SIZE1_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_OFFSET2
+#define UVD_VCPU_CACHE_OFFSET2__CACHE_OFFSET2__SHIFT 0x0
+#define UVD_VCPU_CACHE_OFFSET2__CACHE_OFFSET2_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_SIZE2
+#define UVD_VCPU_CACHE_SIZE2__CACHE_SIZE2__SHIFT 0x0
+#define UVD_VCPU_CACHE_SIZE2__CACHE_SIZE2_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_OFFSET3
+#define UVD_VCPU_CACHE_OFFSET3__CACHE_OFFSET3__SHIFT 0x0
+#define UVD_VCPU_CACHE_OFFSET3__CACHE_OFFSET3_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_SIZE3
+#define UVD_VCPU_CACHE_SIZE3__CACHE_SIZE3__SHIFT 0x0
+#define UVD_VCPU_CACHE_SIZE3__CACHE_SIZE3_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_OFFSET4
+#define UVD_VCPU_CACHE_OFFSET4__CACHE_OFFSET4__SHIFT 0x0
+#define UVD_VCPU_CACHE_OFFSET4__CACHE_OFFSET4_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_SIZE4
+#define UVD_VCPU_CACHE_SIZE4__CACHE_SIZE4__SHIFT 0x0
+#define UVD_VCPU_CACHE_SIZE4__CACHE_SIZE4_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_OFFSET5
+#define UVD_VCPU_CACHE_OFFSET5__CACHE_OFFSET5__SHIFT 0x0
+#define UVD_VCPU_CACHE_OFFSET5__CACHE_OFFSET5_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_SIZE5
+#define UVD_VCPU_CACHE_SIZE5__CACHE_SIZE5__SHIFT 0x0
+#define UVD_VCPU_CACHE_SIZE5__CACHE_SIZE5_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_OFFSET6
+#define UVD_VCPU_CACHE_OFFSET6__CACHE_OFFSET6__SHIFT 0x0
+#define UVD_VCPU_CACHE_OFFSET6__CACHE_OFFSET6_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_SIZE6
+#define UVD_VCPU_CACHE_SIZE6__CACHE_SIZE6__SHIFT 0x0
+#define UVD_VCPU_CACHE_SIZE6__CACHE_SIZE6_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_OFFSET7
+#define UVD_VCPU_CACHE_OFFSET7__CACHE_OFFSET7__SHIFT 0x0
+#define UVD_VCPU_CACHE_OFFSET7__CACHE_OFFSET7_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_SIZE7
+#define UVD_VCPU_CACHE_SIZE7__CACHE_SIZE7__SHIFT 0x0
+#define UVD_VCPU_CACHE_SIZE7__CACHE_SIZE7_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_OFFSET8
+#define UVD_VCPU_CACHE_OFFSET8__CACHE_OFFSET8__SHIFT 0x0
+#define UVD_VCPU_CACHE_OFFSET8__CACHE_OFFSET8_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_SIZE8
+#define UVD_VCPU_CACHE_SIZE8__CACHE_SIZE8__SHIFT 0x0
+#define UVD_VCPU_CACHE_SIZE8__CACHE_SIZE8_MASK 0x001FFFFFL
+//UVD_VCPU_NONCACHE_OFFSET0
+#define UVD_VCPU_NONCACHE_OFFSET0__NONCACHE_OFFSET0__SHIFT 0x0
+#define UVD_VCPU_NONCACHE_OFFSET0__NONCACHE_OFFSET0_MASK 0x01FFFFFFL
+//UVD_VCPU_NONCACHE_SIZE0
+#define UVD_VCPU_NONCACHE_SIZE0__NONCACHE_SIZE0__SHIFT 0x0
+#define UVD_VCPU_NONCACHE_SIZE0__NONCACHE_SIZE0_MASK 0x001FFFFFL
+//UVD_VCPU_NONCACHE_OFFSET1
+#define UVD_VCPU_NONCACHE_OFFSET1__NONCACHE_OFFSET1__SHIFT 0x0
+#define UVD_VCPU_NONCACHE_OFFSET1__NONCACHE_OFFSET1_MASK 0x01FFFFFFL
+//UVD_VCPU_NONCACHE_SIZE1
+#define UVD_VCPU_NONCACHE_SIZE1__NONCACHE_SIZE1__SHIFT 0x0
+#define UVD_VCPU_NONCACHE_SIZE1__NONCACHE_SIZE1_MASK 0x001FFFFFL
+//UVD_VCPU_CNTL
+#define UVD_VCPU_CNTL__IRQ_ERR__SHIFT 0x0
+#define UVD_VCPU_CNTL__PMB_ED_ENABLE__SHIFT 0x5
+#define UVD_VCPU_CNTL__PMB_SOFT_RESET__SHIFT 0x6
+#define UVD_VCPU_CNTL__RBBM_SOFT_RESET__SHIFT 0x7
+#define UVD_VCPU_CNTL__ABORT_REQ__SHIFT 0x8
+#define UVD_VCPU_CNTL__CLK_EN__SHIFT 0x9
+#define UVD_VCPU_CNTL__TRCE_EN__SHIFT 0xa
+#define UVD_VCPU_CNTL__TRCE_MUX__SHIFT 0xb
+#define UVD_VCPU_CNTL__JTAG_EN__SHIFT 0x10
+#define UVD_VCPU_CNTL__TIMEOUT_DIS__SHIFT 0x12
+#define UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT 0x14
+#define UVD_VCPU_CNTL__BLK_RST__SHIFT 0x1c
+#define UVD_VCPU_CNTL__IRQ_ERR_MASK 0x0000000FL
+#define UVD_VCPU_CNTL__PMB_ED_ENABLE_MASK 0x00000020L
+#define UVD_VCPU_CNTL__PMB_SOFT_RESET_MASK 0x00000040L
+#define UVD_VCPU_CNTL__RBBM_SOFT_RESET_MASK 0x00000080L
+#define UVD_VCPU_CNTL__ABORT_REQ_MASK 0x00000100L
+#define UVD_VCPU_CNTL__CLK_EN_MASK 0x00000200L
+#define UVD_VCPU_CNTL__TRCE_EN_MASK 0x00000400L
+#define UVD_VCPU_CNTL__TRCE_MUX_MASK 0x00001800L
+#define UVD_VCPU_CNTL__JTAG_EN_MASK 0x00010000L
+#define UVD_VCPU_CNTL__TIMEOUT_DIS_MASK 0x00040000L
+#define UVD_VCPU_CNTL__PRB_TIMEOUT_VAL_MASK 0x0FF00000L
+#define UVD_VCPU_CNTL__BLK_RST_MASK 0x10000000L
+//UVD_VCPU_PRID
+#define UVD_VCPU_PRID__PRID__SHIFT 0x0
+#define UVD_VCPU_PRID__PRID_MASK 0x0000FFFFL
+//UVD_VCPU_TRCE
+#define UVD_VCPU_TRCE__PC__SHIFT 0x0
+#define UVD_VCPU_TRCE__PC_MASK 0x0FFFFFFFL
+//UVD_VCPU_TRCE_RD
+#define UVD_VCPU_TRCE_RD__DATA__SHIFT 0x0
+#define UVD_VCPU_TRCE_RD__DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: uvd0_uvd_mpcdec
+//UVD_MP_SWAP_CNTL
+#define UVD_MP_SWAP_CNTL__MP_REF0_MC_SWAP__SHIFT 0x0
+#define UVD_MP_SWAP_CNTL__MP_REF1_MC_SWAP__SHIFT 0x2
+#define UVD_MP_SWAP_CNTL__MP_REF2_MC_SWAP__SHIFT 0x4
+#define UVD_MP_SWAP_CNTL__MP_REF3_MC_SWAP__SHIFT 0x6
+#define UVD_MP_SWAP_CNTL__MP_REF4_MC_SWAP__SHIFT 0x8
+#define UVD_MP_SWAP_CNTL__MP_REF5_MC_SWAP__SHIFT 0xa
+#define UVD_MP_SWAP_CNTL__MP_REF6_MC_SWAP__SHIFT 0xc
+#define UVD_MP_SWAP_CNTL__MP_REF7_MC_SWAP__SHIFT 0xe
+#define UVD_MP_SWAP_CNTL__MP_REF8_MC_SWAP__SHIFT 0x10
+#define UVD_MP_SWAP_CNTL__MP_REF9_MC_SWAP__SHIFT 0x12
+#define UVD_MP_SWAP_CNTL__MP_REF10_MC_SWAP__SHIFT 0x14
+#define UVD_MP_SWAP_CNTL__MP_REF11_MC_SWAP__SHIFT 0x16
+#define UVD_MP_SWAP_CNTL__MP_REF12_MC_SWAP__SHIFT 0x18
+#define UVD_MP_SWAP_CNTL__MP_REF13_MC_SWAP__SHIFT 0x1a
+#define UVD_MP_SWAP_CNTL__MP_REF14_MC_SWAP__SHIFT 0x1c
+#define UVD_MP_SWAP_CNTL__MP_REF15_MC_SWAP__SHIFT 0x1e
+#define UVD_MP_SWAP_CNTL__MP_REF0_MC_SWAP_MASK 0x00000003L
+#define UVD_MP_SWAP_CNTL__MP_REF1_MC_SWAP_MASK 0x0000000CL
+#define UVD_MP_SWAP_CNTL__MP_REF2_MC_SWAP_MASK 0x00000030L
+#define UVD_MP_SWAP_CNTL__MP_REF3_MC_SWAP_MASK 0x000000C0L
+#define UVD_MP_SWAP_CNTL__MP_REF4_MC_SWAP_MASK 0x00000300L
+#define UVD_MP_SWAP_CNTL__MP_REF5_MC_SWAP_MASK 0x00000C00L
+#define UVD_MP_SWAP_CNTL__MP_REF6_MC_SWAP_MASK 0x00003000L
+#define UVD_MP_SWAP_CNTL__MP_REF7_MC_SWAP_MASK 0x0000C000L
+#define UVD_MP_SWAP_CNTL__MP_REF8_MC_SWAP_MASK 0x00030000L
+#define UVD_MP_SWAP_CNTL__MP_REF9_MC_SWAP_MASK 0x000C0000L
+#define UVD_MP_SWAP_CNTL__MP_REF10_MC_SWAP_MASK 0x00300000L
+#define UVD_MP_SWAP_CNTL__MP_REF11_MC_SWAP_MASK 0x00C00000L
+#define UVD_MP_SWAP_CNTL__MP_REF12_MC_SWAP_MASK 0x03000000L
+#define UVD_MP_SWAP_CNTL__MP_REF13_MC_SWAP_MASK 0x0C000000L
+#define UVD_MP_SWAP_CNTL__MP_REF14_MC_SWAP_MASK 0x30000000L
+#define UVD_MP_SWAP_CNTL__MP_REF15_MC_SWAP_MASK 0xC0000000L
+//UVD_MPC_LUMA_SRCH
+#define UVD_MPC_LUMA_SRCH__CNTR__SHIFT 0x0
+#define UVD_MPC_LUMA_SRCH__CNTR_MASK 0xFFFFFFFFL
+//UVD_MPC_LUMA_HIT
+#define UVD_MPC_LUMA_HIT__CNTR__SHIFT 0x0
+#define UVD_MPC_LUMA_HIT__CNTR_MASK 0xFFFFFFFFL
+//UVD_MPC_LUMA_HITPEND
+#define UVD_MPC_LUMA_HITPEND__CNTR__SHIFT 0x0
+#define UVD_MPC_LUMA_HITPEND__CNTR_MASK 0xFFFFFFFFL
+//UVD_MPC_CHROMA_SRCH
+#define UVD_MPC_CHROMA_SRCH__CNTR__SHIFT 0x0
+#define UVD_MPC_CHROMA_SRCH__CNTR_MASK 0xFFFFFFFFL
+//UVD_MPC_CHROMA_HIT
+#define UVD_MPC_CHROMA_HIT__CNTR__SHIFT 0x0
+#define UVD_MPC_CHROMA_HIT__CNTR_MASK 0xFFFFFFFFL
+//UVD_MPC_CHROMA_HITPEND
+#define UVD_MPC_CHROMA_HITPEND__CNTR__SHIFT 0x0
+#define UVD_MPC_CHROMA_HITPEND__CNTR_MASK 0xFFFFFFFFL
+//UVD_MPC_CNTL
+#define UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT 0x3
+#define UVD_MPC_CNTL__PERF_RST__SHIFT 0x6
+#define UVD_MPC_CNTL__AVE_WEIGHT__SHIFT 0x10
+#define UVD_MPC_CNTL__URGENT_EN__SHIFT 0x12
+#define UVD_MPC_CNTL__SMPAT_REQ_SPEED_UP__SHIFT 0x13
+#define UVD_MPC_CNTL__TEST_MODE_EN__SHIFT 0x14
+#define UVD_MPC_CNTL__REPLACEMENT_MODE_MASK 0x00000038L
+#define UVD_MPC_CNTL__PERF_RST_MASK 0x00000040L
+#define UVD_MPC_CNTL__AVE_WEIGHT_MASK 0x00030000L
+#define UVD_MPC_CNTL__URGENT_EN_MASK 0x00040000L
+#define UVD_MPC_CNTL__SMPAT_REQ_SPEED_UP_MASK 0x00080000L
+#define UVD_MPC_CNTL__TEST_MODE_EN_MASK 0x00100000L
+//UVD_MPC_PITCH
+#define UVD_MPC_PITCH__LUMA_PITCH__SHIFT 0x0
+#define UVD_MPC_PITCH__LUMA_PITCH_MASK 0x000007FFL
+//UVD_MPC_SET_MUXA0
+#define UVD_MPC_SET_MUXA0__VARA_0__SHIFT 0x0
+#define UVD_MPC_SET_MUXA0__VARA_1__SHIFT 0x6
+#define UVD_MPC_SET_MUXA0__VARA_2__SHIFT 0xc
+#define UVD_MPC_SET_MUXA0__VARA_3__SHIFT 0x12
+#define UVD_MPC_SET_MUXA0__VARA_4__SHIFT 0x18
+#define UVD_MPC_SET_MUXA0__VARA_0_MASK 0x0000003FL
+#define UVD_MPC_SET_MUXA0__VARA_1_MASK 0x00000FC0L
+#define UVD_MPC_SET_MUXA0__VARA_2_MASK 0x0003F000L
+#define UVD_MPC_SET_MUXA0__VARA_3_MASK 0x00FC0000L
+#define UVD_MPC_SET_MUXA0__VARA_4_MASK 0x3F000000L
+//UVD_MPC_SET_MUXA1
+#define UVD_MPC_SET_MUXA1__VARA_5__SHIFT 0x0
+#define UVD_MPC_SET_MUXA1__VARA_6__SHIFT 0x6
+#define UVD_MPC_SET_MUXA1__VARA_7__SHIFT 0xc
+#define UVD_MPC_SET_MUXA1__VARA_5_MASK 0x0000003FL
+#define UVD_MPC_SET_MUXA1__VARA_6_MASK 0x00000FC0L
+#define UVD_MPC_SET_MUXA1__VARA_7_MASK 0x0003F000L
+//UVD_MPC_SET_MUXB0
+#define UVD_MPC_SET_MUXB0__VARB_0__SHIFT 0x0
+#define UVD_MPC_SET_MUXB0__VARB_1__SHIFT 0x6
+#define UVD_MPC_SET_MUXB0__VARB_2__SHIFT 0xc
+#define UVD_MPC_SET_MUXB0__VARB_3__SHIFT 0x12
+#define UVD_MPC_SET_MUXB0__VARB_4__SHIFT 0x18
+#define UVD_MPC_SET_MUXB0__VARB_0_MASK 0x0000003FL
+#define UVD_MPC_SET_MUXB0__VARB_1_MASK 0x00000FC0L
+#define UVD_MPC_SET_MUXB0__VARB_2_MASK 0x0003F000L
+#define UVD_MPC_SET_MUXB0__VARB_3_MASK 0x00FC0000L
+#define UVD_MPC_SET_MUXB0__VARB_4_MASK 0x3F000000L
+//UVD_MPC_SET_MUXB1
+#define UVD_MPC_SET_MUXB1__VARB_5__SHIFT 0x0
+#define UVD_MPC_SET_MUXB1__VARB_6__SHIFT 0x6
+#define UVD_MPC_SET_MUXB1__VARB_7__SHIFT 0xc
+#define UVD_MPC_SET_MUXB1__VARB_5_MASK 0x0000003FL
+#define UVD_MPC_SET_MUXB1__VARB_6_MASK 0x00000FC0L
+#define UVD_MPC_SET_MUXB1__VARB_7_MASK 0x0003F000L
+//UVD_MPC_SET_MUX
+#define UVD_MPC_SET_MUX__SET_0__SHIFT 0x0
+#define UVD_MPC_SET_MUX__SET_1__SHIFT 0x3
+#define UVD_MPC_SET_MUX__SET_2__SHIFT 0x6
+#define UVD_MPC_SET_MUX__SET_0_MASK 0x00000007L
+#define UVD_MPC_SET_MUX__SET_1_MASK 0x00000038L
+#define UVD_MPC_SET_MUX__SET_2_MASK 0x000001C0L
+//UVD_MPC_SET_ALU
+#define UVD_MPC_SET_ALU__FUNCT__SHIFT 0x0
+#define UVD_MPC_SET_ALU__OPERAND__SHIFT 0x4
+#define UVD_MPC_SET_ALU__FUNCT_MASK 0x00000007L
+#define UVD_MPC_SET_ALU__OPERAND_MASK 0x00000FF0L
+//UVD_MPC_PERF0
+#define UVD_MPC_PERF0__MAX_LAT__SHIFT 0x0
+#define UVD_MPC_PERF0__MAX_LAT_MASK 0x000003FFL
+//UVD_MPC_PERF1
+#define UVD_MPC_PERF1__AVE_LAT__SHIFT 0x0
+#define UVD_MPC_PERF1__AVE_LAT_MASK 0x000003FFL
+
+
+// addressBlock: uvd0_uvd_rbcdec
+//UVD_RBC_IB_SIZE
+#define UVD_RBC_IB_SIZE__IB_SIZE__SHIFT 0x4
+#define UVD_RBC_IB_SIZE__IB_SIZE_MASK 0x007FFFF0L
+//UVD_RBC_IB_SIZE_UPDATE
+#define UVD_RBC_IB_SIZE_UPDATE__REMAIN_IB_SIZE__SHIFT 0x4
+#define UVD_RBC_IB_SIZE_UPDATE__REMAIN_IB_SIZE_MASK 0x007FFFF0L
+//UVD_RBC_RB_CNTL
+#define UVD_RBC_RB_CNTL__RB_BUFSZ__SHIFT 0x0
+#define UVD_RBC_RB_CNTL__RB_BLKSZ__SHIFT 0x8
+#define UVD_RBC_RB_CNTL__RB_NO_FETCH__SHIFT 0x10
+#define UVD_RBC_RB_CNTL__RB_WPTR_POLL_EN__SHIFT 0x14
+#define UVD_RBC_RB_CNTL__RB_NO_UPDATE__SHIFT 0x18
+#define UVD_RBC_RB_CNTL__RB_RPTR_WR_EN__SHIFT 0x1c
+#define UVD_RBC_RB_CNTL__RB_BUFSZ_MASK 0x0000001FL
+#define UVD_RBC_RB_CNTL__RB_BLKSZ_MASK 0x00001F00L
+#define UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK 0x00010000L
+#define UVD_RBC_RB_CNTL__RB_WPTR_POLL_EN_MASK 0x00100000L
+#define UVD_RBC_RB_CNTL__RB_NO_UPDATE_MASK 0x01000000L
+#define UVD_RBC_RB_CNTL__RB_RPTR_WR_EN_MASK 0x10000000L
+//UVD_RBC_RB_RPTR_ADDR
+#define UVD_RBC_RB_RPTR_ADDR__RB_RPTR_ADDR__SHIFT 0x0
+#define UVD_RBC_RB_RPTR_ADDR__RB_RPTR_ADDR_MASK 0xFFFFFFFFL
+//UVD_RBC_RB_RPTR
+#define UVD_RBC_RB_RPTR__RB_RPTR__SHIFT 0x4
+#define UVD_RBC_RB_RPTR__RB_RPTR_MASK 0x007FFFF0L
+//UVD_RBC_RB_WPTR
+#define UVD_RBC_RB_WPTR__RB_WPTR__SHIFT 0x4
+#define UVD_RBC_RB_WPTR__RB_WPTR_MASK 0x007FFFF0L
+//UVD_RBC_VCPU_ACCESS
+#define UVD_RBC_VCPU_ACCESS__ENABLE_RBC__SHIFT 0x0
+#define UVD_RBC_VCPU_ACCESS__ENABLE_RBC_MASK 0x00000001L
+//UVD_RBC_READ_REQ_URGENT_CNTL
+#define UVD_RBC_READ_REQ_URGENT_CNTL__CMD_READ_REQ_PRIORITY_MARK__SHIFT 0x0
+#define UVD_RBC_READ_REQ_URGENT_CNTL__CMD_READ_REQ_PRIORITY_MARK_MASK 0x00000003L
+//UVD_RBC_RB_WPTR_CNTL
+#define UVD_RBC_RB_WPTR_CNTL__RB_PRE_WRITE_TIMER__SHIFT 0x0
+#define UVD_RBC_RB_WPTR_CNTL__RB_PRE_WRITE_TIMER_MASK 0x00007FFFL
+//UVD_RBC_WPTR_STATUS
+#define UVD_RBC_WPTR_STATUS__RB_WPTR_IN_USE__SHIFT 0x4
+#define UVD_RBC_WPTR_STATUS__RB_WPTR_IN_USE_MASK 0x007FFFF0L
+//UVD_RBC_WPTR_POLL_CNTL
+#define UVD_RBC_WPTR_POLL_CNTL__POLL_FREQ__SHIFT 0x0
+#define UVD_RBC_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define UVD_RBC_WPTR_POLL_CNTL__POLL_FREQ_MASK 0x0000FFFFL
+#define UVD_RBC_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//UVD_RBC_WPTR_POLL_ADDR
+#define UVD_RBC_WPTR_POLL_ADDR__POLL_ADDR__SHIFT 0x2
+#define UVD_RBC_WPTR_POLL_ADDR__POLL_ADDR_MASK 0xFFFFFFFCL
+//UVD_SEMA_CMD
+#define UVD_SEMA_CMD__REQ_CMD__SHIFT 0x0
+#define UVD_SEMA_CMD__WR_PHASE__SHIFT 0x4
+#define UVD_SEMA_CMD__MODE__SHIFT 0x6
+#define UVD_SEMA_CMD__VMID_EN__SHIFT 0x7
+#define UVD_SEMA_CMD__VMID__SHIFT 0x8
+#define UVD_SEMA_CMD__REQ_CMD_MASK 0x0000000FL
+#define UVD_SEMA_CMD__WR_PHASE_MASK 0x00000030L
+#define UVD_SEMA_CMD__MODE_MASK 0x00000040L
+#define UVD_SEMA_CMD__VMID_EN_MASK 0x00000080L
+#define UVD_SEMA_CMD__VMID_MASK 0x00000F00L
+//UVD_SEMA_ADDR_LOW
+#define UVD_SEMA_ADDR_LOW__ADDR_26_3__SHIFT 0x0
+#define UVD_SEMA_ADDR_LOW__ADDR_26_3_MASK 0x00FFFFFFL
+//UVD_SEMA_ADDR_HIGH
+#define UVD_SEMA_ADDR_HIGH__ADDR_47_27__SHIFT 0x0
+#define UVD_SEMA_ADDR_HIGH__ADDR_47_27_MASK 0x001FFFFFL
+//UVD_ENGINE_CNTL
+#define UVD_ENGINE_CNTL__ENGINE_START__SHIFT 0x0
+#define UVD_ENGINE_CNTL__ENGINE_START_MODE__SHIFT 0x1
+#define UVD_ENGINE_CNTL__NJ_PF_HANDLE_DISABLE__SHIFT 0x2
+#define UVD_ENGINE_CNTL__ENGINE_START_MASK 0x00000001L
+#define UVD_ENGINE_CNTL__ENGINE_START_MODE_MASK 0x00000002L
+#define UVD_ENGINE_CNTL__NJ_PF_HANDLE_DISABLE_MASK 0x00000004L
+//UVD_SEMA_TIMEOUT_STATUS
+#define UVD_SEMA_TIMEOUT_STATUS__SEMAPHORE_WAIT_INCOMPLETE_TIMEOUT_STAT__SHIFT 0x0
+#define UVD_SEMA_TIMEOUT_STATUS__SEMAPHORE_WAIT_FAULT_TIMEOUT_STAT__SHIFT 0x1
+#define UVD_SEMA_TIMEOUT_STATUS__SEMAPHORE_SIGNAL_INCOMPLETE_TIMEOUT_STAT__SHIFT 0x2
+#define UVD_SEMA_TIMEOUT_STATUS__SEMAPHORE_TIMEOUT_CLEAR__SHIFT 0x3
+#define UVD_SEMA_TIMEOUT_STATUS__SEMAPHORE_WAIT_INCOMPLETE_TIMEOUT_STAT_MASK 0x00000001L
+#define UVD_SEMA_TIMEOUT_STATUS__SEMAPHORE_WAIT_FAULT_TIMEOUT_STAT_MASK 0x00000002L
+#define UVD_SEMA_TIMEOUT_STATUS__SEMAPHORE_SIGNAL_INCOMPLETE_TIMEOUT_STAT_MASK 0x00000004L
+#define UVD_SEMA_TIMEOUT_STATUS__SEMAPHORE_TIMEOUT_CLEAR_MASK 0x00000008L
+//UVD_SEMA_CNTL
+#define UVD_SEMA_CNTL__SEMAPHORE_EN__SHIFT 0x0
+#define UVD_SEMA_CNTL__ADVANCED_MODE_DIS__SHIFT 0x1
+#define UVD_SEMA_CNTL__SEMAPHORE_EN_MASK 0x00000001L
+#define UVD_SEMA_CNTL__ADVANCED_MODE_DIS_MASK 0x00000002L
+//UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL
+#define UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL__SIGNAL_INCOMPLETE_EN__SHIFT 0x0
+#define UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL__SIGNAL_INCOMPLETE_COUNT__SHIFT 0x1
+#define UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL__RESEND_TIMER__SHIFT 0x18
+#define UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL__SIGNAL_INCOMPLETE_EN_MASK 0x00000001L
+#define UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL__SIGNAL_INCOMPLETE_COUNT_MASK 0x001FFFFEL
+#define UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL__RESEND_TIMER_MASK 0x07000000L
+//UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL
+#define UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL__WAIT_FAULT_EN__SHIFT 0x0
+#define UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL__WAIT_FAULT_COUNT__SHIFT 0x1
+#define UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL__RESEND_TIMER__SHIFT 0x18
+#define UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL__WAIT_FAULT_EN_MASK 0x00000001L
+#define UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL__WAIT_FAULT_COUNT_MASK 0x001FFFFEL
+#define UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL__RESEND_TIMER_MASK 0x07000000L
+//UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL
+#define UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL__WAIT_INCOMPLETE_EN__SHIFT 0x0
+#define UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL__WAIT_INCOMPLETE_COUNT__SHIFT 0x1
+#define UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL__RESEND_TIMER__SHIFT 0x18
+#define UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL__WAIT_INCOMPLETE_EN_MASK 0x00000001L
+#define UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL__WAIT_INCOMPLETE_COUNT_MASK 0x001FFFFEL
+#define UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL__RESEND_TIMER_MASK 0x07000000L
+//UVD_JOB_START
+#define UVD_JOB_START__JOB_START__SHIFT 0x0
+#define UVD_JOB_START__JOB_START_MASK 0x00000001L
+//UVD_RBC_BUF_STATUS
+#define UVD_RBC_BUF_STATUS__RB_BUF_VALID__SHIFT 0x0
+#define UVD_RBC_BUF_STATUS__IB_BUF_VALID__SHIFT 0x8
+#define UVD_RBC_BUF_STATUS__RB_BUF_RD_ADDR__SHIFT 0x10
+#define UVD_RBC_BUF_STATUS__IB_BUF_RD_ADDR__SHIFT 0x13
+#define UVD_RBC_BUF_STATUS__RB_BUF_WR_ADDR__SHIFT 0x16
+#define UVD_RBC_BUF_STATUS__IB_BUF_WR_ADDR__SHIFT 0x19
+#define UVD_RBC_BUF_STATUS__RB_BUF_VALID_MASK 0x000000FFL
+#define UVD_RBC_BUF_STATUS__IB_BUF_VALID_MASK 0x0000FF00L
+#define UVD_RBC_BUF_STATUS__RB_BUF_RD_ADDR_MASK 0x00070000L
+#define UVD_RBC_BUF_STATUS__IB_BUF_RD_ADDR_MASK 0x00380000L
+#define UVD_RBC_BUF_STATUS__RB_BUF_WR_ADDR_MASK 0x01C00000L
+#define UVD_RBC_BUF_STATUS__IB_BUF_WR_ADDR_MASK 0x0E000000L
+
+
+// addressBlock: uvd0_uvdgendec
+//UVD_LCM_CGC_CNTRL
+#define UVD_LCM_CGC_CNTRL__FORCE_OFF__SHIFT 0x12
+#define UVD_LCM_CGC_CNTRL__FORCE_ON__SHIFT 0x13
+#define UVD_LCM_CGC_CNTRL__OFF_DELAY__SHIFT 0x14
+#define UVD_LCM_CGC_CNTRL__ON_DELAY__SHIFT 0x1c
+#define UVD_LCM_CGC_CNTRL__FORCE_OFF_MASK 0x00040000L
+#define UVD_LCM_CGC_CNTRL__FORCE_ON_MASK 0x00080000L
+#define UVD_LCM_CGC_CNTRL__OFF_DELAY_MASK 0x0FF00000L
+#define UVD_LCM_CGC_CNTRL__ON_DELAY_MASK 0xF0000000L
+
+
+// addressBlock: uvd0_lmi_adpdec
+//UVD_LMI_RBC_RB_64BIT_BAR_LOW
+#define UVD_LMI_RBC_RB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_RBC_RB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_RBC_RB_64BIT_BAR_HIGH
+#define UVD_LMI_RBC_RB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_RBC_RB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_RBC_IB_64BIT_BAR_LOW
+#define UVD_LMI_RBC_IB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_RBC_IB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_RBC_IB_64BIT_BAR_HIGH
+#define UVD_LMI_RBC_IB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_RBC_IB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_LBSI_64BIT_BAR_LOW
+#define UVD_LMI_LBSI_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_LBSI_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_LBSI_64BIT_BAR_HIGH
+#define UVD_LMI_LBSI_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_LBSI_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_NC0_64BIT_BAR_LOW
+#define UVD_LMI_VCPU_NC0_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_VCPU_NC0_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_NC0_64BIT_BAR_HIGH
+#define UVD_LMI_VCPU_NC0_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_VCPU_NC0_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_NC1_64BIT_BAR_LOW
+#define UVD_LMI_VCPU_NC1_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_VCPU_NC1_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_NC1_64BIT_BAR_HIGH
+#define UVD_LMI_VCPU_NC1_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_VCPU_NC1_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE_64BIT_BAR_LOW
+#define UVD_LMI_VCPU_CACHE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH
+#define UVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW
+#define UVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH
+#define UVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE8_64BIT_BAR_LOW
+#define UVD_LMI_VCPU_CACHE8_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE8_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE8_64BIT_BAR_HIGH
+#define UVD_LMI_VCPU_CACHE8_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE8_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW
+#define UVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH
+#define UVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE3_64BIT_BAR_LOW
+#define UVD_LMI_VCPU_CACHE3_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE3_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE3_64BIT_BAR_HIGH
+#define UVD_LMI_VCPU_CACHE3_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE3_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE4_64BIT_BAR_LOW
+#define UVD_LMI_VCPU_CACHE4_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE4_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE4_64BIT_BAR_HIGH
+#define UVD_LMI_VCPU_CACHE4_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE4_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE5_64BIT_BAR_LOW
+#define UVD_LMI_VCPU_CACHE5_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE5_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE5_64BIT_BAR_HIGH
+#define UVD_LMI_VCPU_CACHE5_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE5_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE6_64BIT_BAR_LOW
+#define UVD_LMI_VCPU_CACHE6_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE6_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE6_64BIT_BAR_HIGH
+#define UVD_LMI_VCPU_CACHE6_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE6_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE7_64BIT_BAR_LOW
+#define UVD_LMI_VCPU_CACHE7_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE7_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE7_64BIT_BAR_HIGH
+#define UVD_LMI_VCPU_CACHE7_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE7_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC0_64BIT_BAR_LOW
+#define UVD_LMI_MMSCH_NC0_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC0_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC0_64BIT_BAR_HIGH
+#define UVD_LMI_MMSCH_NC0_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC0_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC1_64BIT_BAR_LOW
+#define UVD_LMI_MMSCH_NC1_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC1_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC1_64BIT_BAR_HIGH
+#define UVD_LMI_MMSCH_NC1_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC1_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC2_64BIT_BAR_LOW
+#define UVD_LMI_MMSCH_NC2_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC2_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC2_64BIT_BAR_HIGH
+#define UVD_LMI_MMSCH_NC2_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC2_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC3_64BIT_BAR_LOW
+#define UVD_LMI_MMSCH_NC3_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC3_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC3_64BIT_BAR_HIGH
+#define UVD_LMI_MMSCH_NC3_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC3_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC4_64BIT_BAR_LOW
+#define UVD_LMI_MMSCH_NC4_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC4_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC4_64BIT_BAR_HIGH
+#define UVD_LMI_MMSCH_NC4_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC4_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC5_64BIT_BAR_LOW
+#define UVD_LMI_MMSCH_NC5_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC5_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC5_64BIT_BAR_HIGH
+#define UVD_LMI_MMSCH_NC5_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC5_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC6_64BIT_BAR_LOW
+#define UVD_LMI_MMSCH_NC6_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC6_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC6_64BIT_BAR_HIGH
+#define UVD_LMI_MMSCH_NC6_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC6_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC7_64BIT_BAR_LOW
+#define UVD_LMI_MMSCH_NC7_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC7_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC7_64BIT_BAR_HIGH
+#define UVD_LMI_MMSCH_NC7_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC7_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC_VMID
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC0_VMID__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC1_VMID__SHIFT 0x4
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC2_VMID__SHIFT 0x8
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC3_VMID__SHIFT 0xc
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC4_VMID__SHIFT 0x10
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC5_VMID__SHIFT 0x14
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC6_VMID__SHIFT 0x18
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC7_VMID__SHIFT 0x1c
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC0_VMID_MASK 0x0000000FL
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC1_VMID_MASK 0x000000F0L
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC2_VMID_MASK 0x00000F00L
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC3_VMID_MASK 0x0000F000L
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC4_VMID_MASK 0x000F0000L
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC5_VMID_MASK 0x00F00000L
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC6_VMID_MASK 0x0F000000L
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC7_VMID_MASK 0xF0000000L
+//UVD_LMI_MMSCH_CTRL
+#define UVD_LMI_MMSCH_CTRL__MMSCH_DATA_COHERENCY_EN__SHIFT 0x0
+#define UVD_LMI_MMSCH_CTRL__MMSCH_VM__SHIFT 0x1
+#define UVD_LMI_MMSCH_CTRL__MMSCH_R_MC_SWAP__SHIFT 0x3
+#define UVD_LMI_MMSCH_CTRL__MMSCH_W_MC_SWAP__SHIFT 0x5
+#define UVD_LMI_MMSCH_CTRL__MMSCH_RD__SHIFT 0x7
+#define UVD_LMI_MMSCH_CTRL__MMSCH_WR__SHIFT 0x9
+#define UVD_LMI_MMSCH_CTRL__MMSCH_RD_DROP__SHIFT 0xb
+#define UVD_LMI_MMSCH_CTRL__MMSCH_WR_DROP__SHIFT 0xc
+#define UVD_LMI_MMSCH_CTRL__MMSCH_DATA_COHERENCY_EN_MASK 0x00000001L
+#define UVD_LMI_MMSCH_CTRL__MMSCH_VM_MASK 0x00000002L
+#define UVD_LMI_MMSCH_CTRL__MMSCH_R_MC_SWAP_MASK 0x00000018L
+#define UVD_LMI_MMSCH_CTRL__MMSCH_W_MC_SWAP_MASK 0x00000060L
+#define UVD_LMI_MMSCH_CTRL__MMSCH_RD_MASK 0x00000180L
+#define UVD_LMI_MMSCH_CTRL__MMSCH_WR_MASK 0x00000600L
+#define UVD_LMI_MMSCH_CTRL__MMSCH_RD_DROP_MASK 0x00000800L
+#define UVD_LMI_MMSCH_CTRL__MMSCH_WR_DROP_MASK 0x00001000L
+//UVD_LMI_ARB_CTRL2
+#define UVD_LMI_ARB_CTRL2__CENC_RD_WAIT_EN__SHIFT 0x0
+#define UVD_LMI_ARB_CTRL2__ATOMIC_WR_WAIT_EN__SHIFT 0x1
+#define UVD_LMI_ARB_CTRL2__CENC_RD_MAX_BURST__SHIFT 0x2
+#define UVD_LMI_ARB_CTRL2__ATOMIC_WR_MAX_BURST__SHIFT 0x6
+#define UVD_LMI_ARB_CTRL2__MIF_RD_REQ_RET_MAX__SHIFT 0xa
+#define UVD_LMI_ARB_CTRL2__MIF_WR_REQ_RET_MAX__SHIFT 0x14
+#define UVD_LMI_ARB_CTRL2__CENC_RD_WAIT_EN_MASK 0x00000001L
+#define UVD_LMI_ARB_CTRL2__ATOMIC_WR_WAIT_EN_MASK 0x00000002L
+#define UVD_LMI_ARB_CTRL2__CENC_RD_MAX_BURST_MASK 0x0000003CL
+#define UVD_LMI_ARB_CTRL2__ATOMIC_WR_MAX_BURST_MASK 0x000003C0L
+#define UVD_LMI_ARB_CTRL2__MIF_RD_REQ_RET_MAX_MASK 0x000FFC00L
+#define UVD_LMI_ARB_CTRL2__MIF_WR_REQ_RET_MAX_MASK 0xFFF00000L
+//UVD_LMI_VCPU_CACHE_VMIDS_MULTI
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE1_VMID__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE2_VMID__SHIFT 0x4
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE3_VMID__SHIFT 0x8
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE4_VMID__SHIFT 0xc
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE5_VMID__SHIFT 0x10
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE6_VMID__SHIFT 0x14
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE7_VMID__SHIFT 0x18
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE8_VMID__SHIFT 0x1c
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE1_VMID_MASK 0x0000000FL
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE2_VMID_MASK 0x000000F0L
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE3_VMID_MASK 0x00000F00L
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE4_VMID_MASK 0x0000F000L
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE5_VMID_MASK 0x000F0000L
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE6_VMID_MASK 0x00F00000L
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE7_VMID_MASK 0x0F000000L
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE8_VMID_MASK 0xF0000000L
+//UVD_LMI_VCPU_NC_VMIDS_MULTI
+#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC2_VMID__SHIFT 0x4
+#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC3_VMID__SHIFT 0x8
+#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC4_VMID__SHIFT 0xc
+#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC5_VMID__SHIFT 0x10
+#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC6_VMID__SHIFT 0x14
+#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC7_VMID__SHIFT 0x18
+#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC2_VMID_MASK 0x000000F0L
+#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC3_VMID_MASK 0x00000F00L
+#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC4_VMID_MASK 0x0000F000L
+#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC5_VMID_MASK 0x000F0000L
+#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC6_VMID_MASK 0x00F00000L
+#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC7_VMID_MASK 0x0F000000L
+//UVD_LMI_LAT_CTRL
+#define UVD_LMI_LAT_CTRL__SCALE__SHIFT 0x0
+#define UVD_LMI_LAT_CTRL__MAX_START__SHIFT 0x8
+#define UVD_LMI_LAT_CTRL__MIN_START__SHIFT 0x9
+#define UVD_LMI_LAT_CTRL__AVG_START__SHIFT 0xa
+#define UVD_LMI_LAT_CTRL__PERFMON_SYNC__SHIFT 0xb
+#define UVD_LMI_LAT_CTRL__SKIP__SHIFT 0x10
+#define UVD_LMI_LAT_CTRL__SCALE_MASK 0x000000FFL
+#define UVD_LMI_LAT_CTRL__MAX_START_MASK 0x00000100L
+#define UVD_LMI_LAT_CTRL__MIN_START_MASK 0x00000200L
+#define UVD_LMI_LAT_CTRL__AVG_START_MASK 0x00000400L
+#define UVD_LMI_LAT_CTRL__PERFMON_SYNC_MASK 0x00000800L
+#define UVD_LMI_LAT_CTRL__SKIP_MASK 0x000F0000L
+//UVD_LMI_LAT_CNTR
+#define UVD_LMI_LAT_CNTR__MAX_LAT__SHIFT 0x0
+#define UVD_LMI_LAT_CNTR__MIN_LAT__SHIFT 0x8
+#define UVD_LMI_LAT_CNTR__MAX_LAT_MASK 0x000000FFL
+#define UVD_LMI_LAT_CNTR__MIN_LAT_MASK 0x0000FF00L
+//UVD_LMI_AVG_LAT_CNTR
+#define UVD_LMI_AVG_LAT_CNTR__ENV_LOW__SHIFT 0x0
+#define UVD_LMI_AVG_LAT_CNTR__ENV_HIGH__SHIFT 0x8
+#define UVD_LMI_AVG_LAT_CNTR__ENV_HIT__SHIFT 0x10
+#define UVD_LMI_AVG_LAT_CNTR__ENV_LOW_MASK 0x000000FFL
+#define UVD_LMI_AVG_LAT_CNTR__ENV_HIGH_MASK 0x0000FF00L
+#define UVD_LMI_AVG_LAT_CNTR__ENV_HIT_MASK 0xFFFF0000L
+//UVD_LMI_SPH
+#define UVD_LMI_SPH__ADDR__SHIFT 0x0
+#define UVD_LMI_SPH__STS__SHIFT 0x1c
+#define UVD_LMI_SPH__STS_VALID__SHIFT 0x1e
+#define UVD_LMI_SPH__STS_OVERFLOW__SHIFT 0x1f
+#define UVD_LMI_SPH__ADDR_MASK 0x0FFFFFFFL
+#define UVD_LMI_SPH__STS_MASK 0x30000000L
+#define UVD_LMI_SPH__STS_VALID_MASK 0x40000000L
+#define UVD_LMI_SPH__STS_OVERFLOW_MASK 0x80000000L
+//UVD_LMI_VCPU_CACHE_VMID
+#define UVD_LMI_VCPU_CACHE_VMID__VCPU_CACHE_VMID__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE_VMID__VCPU_CACHE_VMID_MASK 0x0000000FL
+//UVD_LMI_CTRL2
+#define UVD_LMI_CTRL2__SPH_DIS__SHIFT 0x0
+#define UVD_LMI_CTRL2__STALL_ARB__SHIFT 0x1
+#define UVD_LMI_CTRL2__ASSERT_UMC_URGENT__SHIFT 0x2
+#define UVD_LMI_CTRL2__MASK_UMC_URGENT__SHIFT 0x3
+#define UVD_LMI_CTRL2__CRC1_RESET__SHIFT 0x4
+#define UVD_LMI_CTRL2__DRCITF_BUBBLE_FIX_DIS__SHIFT 0x7
+#define UVD_LMI_CTRL2__STALL_ARB_UMC__SHIFT 0x8
+#define UVD_LMI_CTRL2__MC_READ_ID_SEL__SHIFT 0x9
+#define UVD_LMI_CTRL2__MC_WRITE_ID_SEL__SHIFT 0xb
+#define UVD_LMI_CTRL2__VCPU_NC0_EXT_EN__SHIFT 0xd
+#define UVD_LMI_CTRL2__VCPU_NC1_EXT_EN__SHIFT 0xe
+#define UVD_LMI_CTRL2__SPU_EXTRA_CID_EN__SHIFT 0xf
+#define UVD_LMI_CTRL2__RE_OFFLOAD_EN__SHIFT 0x10
+#define UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT 0x11
+#define UVD_LMI_CTRL2__CLEAR_NJ_PF_BP__SHIFT 0x19
+#define UVD_LMI_CTRL2__NJ_MIF_GATING__SHIFT 0x1a
+#define UVD_LMI_CTRL2__CRC1_SEL__SHIFT 0x1b
+#define UVD_LMI_CTRL2__SPH_DIS_MASK 0x00000001L
+#define UVD_LMI_CTRL2__STALL_ARB_MASK 0x00000002L
+#define UVD_LMI_CTRL2__ASSERT_UMC_URGENT_MASK 0x00000004L
+#define UVD_LMI_CTRL2__MASK_UMC_URGENT_MASK 0x00000008L
+#define UVD_LMI_CTRL2__CRC1_RESET_MASK 0x00000010L
+#define UVD_LMI_CTRL2__DRCITF_BUBBLE_FIX_DIS_MASK 0x00000080L
+#define UVD_LMI_CTRL2__STALL_ARB_UMC_MASK 0x00000100L
+#define UVD_LMI_CTRL2__MC_READ_ID_SEL_MASK 0x00000600L
+#define UVD_LMI_CTRL2__MC_WRITE_ID_SEL_MASK 0x00001800L
+#define UVD_LMI_CTRL2__VCPU_NC0_EXT_EN_MASK 0x00002000L
+#define UVD_LMI_CTRL2__VCPU_NC1_EXT_EN_MASK 0x00004000L
+#define UVD_LMI_CTRL2__SPU_EXTRA_CID_EN_MASK 0x00008000L
+#define UVD_LMI_CTRL2__RE_OFFLOAD_EN_MASK 0x00010000L
+#define UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM_MASK 0x01FE0000L
+#define UVD_LMI_CTRL2__CLEAR_NJ_PF_BP_MASK 0x02000000L
+#define UVD_LMI_CTRL2__NJ_MIF_GATING_MASK 0x04000000L
+#define UVD_LMI_CTRL2__CRC1_SEL_MASK 0xF8000000L
+//UVD_LMI_URGENT_CTRL
+#define UVD_LMI_URGENT_CTRL__ENABLE_MC_RD_URGENT_STALL__SHIFT 0x0
+#define UVD_LMI_URGENT_CTRL__ASSERT_MC_RD_STALL__SHIFT 0x1
+#define UVD_LMI_URGENT_CTRL__ASSERT_MC_RD_URGENT__SHIFT 0x2
+#define UVD_LMI_URGENT_CTRL__ENABLE_MC_WR_URGENT_STALL__SHIFT 0x8
+#define UVD_LMI_URGENT_CTRL__ASSERT_MC_WR_STALL__SHIFT 0x9
+#define UVD_LMI_URGENT_CTRL__ASSERT_MC_WR_URGENT__SHIFT 0xa
+#define UVD_LMI_URGENT_CTRL__ENABLE_UMC_RD_URGENT_STALL__SHIFT 0x10
+#define UVD_LMI_URGENT_CTRL__ASSERT_UMC_RD_STALL__SHIFT 0x11
+#define UVD_LMI_URGENT_CTRL__ASSERT_UMC_RD_URGENT__SHIFT 0x12
+#define UVD_LMI_URGENT_CTRL__ENABLE_UMC_WR_URGENT_STALL__SHIFT 0x18
+#define UVD_LMI_URGENT_CTRL__ASSERT_UMC_WR_STALL__SHIFT 0x19
+#define UVD_LMI_URGENT_CTRL__ASSERT_UMC_WR_URGENT__SHIFT 0x1a
+#define UVD_LMI_URGENT_CTRL__ENABLE_MC_RD_URGENT_STALL_MASK 0x00000001L
+#define UVD_LMI_URGENT_CTRL__ASSERT_MC_RD_STALL_MASK 0x00000002L
+#define UVD_LMI_URGENT_CTRL__ASSERT_MC_RD_URGENT_MASK 0x0000003CL
+#define UVD_LMI_URGENT_CTRL__ENABLE_MC_WR_URGENT_STALL_MASK 0x00000100L
+#define UVD_LMI_URGENT_CTRL__ASSERT_MC_WR_STALL_MASK 0x00000200L
+#define UVD_LMI_URGENT_CTRL__ASSERT_MC_WR_URGENT_MASK 0x00003C00L
+#define UVD_LMI_URGENT_CTRL__ENABLE_UMC_RD_URGENT_STALL_MASK 0x00010000L
+#define UVD_LMI_URGENT_CTRL__ASSERT_UMC_RD_STALL_MASK 0x00020000L
+#define UVD_LMI_URGENT_CTRL__ASSERT_UMC_RD_URGENT_MASK 0x003C0000L
+#define UVD_LMI_URGENT_CTRL__ENABLE_UMC_WR_URGENT_STALL_MASK 0x01000000L
+#define UVD_LMI_URGENT_CTRL__ASSERT_UMC_WR_STALL_MASK 0x02000000L
+#define UVD_LMI_URGENT_CTRL__ASSERT_UMC_WR_URGENT_MASK 0x3C000000L
+//UVD_LMI_CTRL
+#define UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT 0x0
+#define UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN__SHIFT 0x8
+#define UVD_LMI_CTRL__REQ_MODE__SHIFT 0x9
+#define UVD_LMI_CTRL__ASSERT_MC_URGENT__SHIFT 0xb
+#define UVD_LMI_CTRL__MASK_MC_URGENT__SHIFT 0xc
+#define UVD_LMI_CTRL__DATA_COHERENCY_EN__SHIFT 0xd
+#define UVD_LMI_CTRL__CRC_RESET__SHIFT 0xe
+#define UVD_LMI_CTRL__CRC_SEL__SHIFT 0xf
+#define UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN__SHIFT 0x15
+#define UVD_LMI_CTRL__CM_DATA_COHERENCY_EN__SHIFT 0x16
+#define UVD_LMI_CTRL__DB_DB_DATA_COHERENCY_EN__SHIFT 0x17
+#define UVD_LMI_CTRL__DB_IT_DATA_COHERENCY_EN__SHIFT 0x18
+#define UVD_LMI_CTRL__IT_IT_DATA_COHERENCY_EN__SHIFT 0x19
+#define UVD_LMI_CTRL__MIF_MIF_DATA_COHERENCY_EN__SHIFT 0x1a
+#define UVD_LMI_CTRL__MIF_LESS_OUTSTANDING_RD_REQ__SHIFT 0x1b
+#define UVD_LMI_CTRL__RFU__SHIFT 0x1e
+#define UVD_LMI_CTRL__WRITE_CLEAN_TIMER_MASK 0x000000FFL
+#define UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK 0x00000100L
+#define UVD_LMI_CTRL__REQ_MODE_MASK 0x00000200L
+#define UVD_LMI_CTRL__ASSERT_MC_URGENT_MASK 0x00000800L
+#define UVD_LMI_CTRL__MASK_MC_URGENT_MASK 0x00001000L
+#define UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK 0x00002000L
+#define UVD_LMI_CTRL__CRC_RESET_MASK 0x00004000L
+#define UVD_LMI_CTRL__CRC_SEL_MASK 0x000F8000L
+#define UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK 0x00200000L
+#define UVD_LMI_CTRL__CM_DATA_COHERENCY_EN_MASK 0x00400000L
+#define UVD_LMI_CTRL__DB_DB_DATA_COHERENCY_EN_MASK 0x00800000L
+#define UVD_LMI_CTRL__DB_IT_DATA_COHERENCY_EN_MASK 0x01000000L
+#define UVD_LMI_CTRL__IT_IT_DATA_COHERENCY_EN_MASK 0x02000000L
+#define UVD_LMI_CTRL__MIF_MIF_DATA_COHERENCY_EN_MASK 0x04000000L
+#define UVD_LMI_CTRL__MIF_LESS_OUTSTANDING_RD_REQ_MASK 0x08000000L
+#define UVD_LMI_CTRL__RFU_MASK 0xC0000000L
+//UVD_LMI_STATUS
+#define UVD_LMI_STATUS__READ_CLEAN__SHIFT 0x0
+#define UVD_LMI_STATUS__WRITE_CLEAN__SHIFT 0x1
+#define UVD_LMI_STATUS__WRITE_CLEAN_RAW__SHIFT 0x2
+#define UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN__SHIFT 0x3
+#define UVD_LMI_STATUS__UMC_READ_CLEAN__SHIFT 0x4
+#define UVD_LMI_STATUS__UMC_WRITE_CLEAN__SHIFT 0x5
+#define UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW__SHIFT 0x6
+#define UVD_LMI_STATUS__PENDING_UVD_MC_WRITE__SHIFT 0x7
+#define UVD_LMI_STATUS__READ_CLEAN_RAW__SHIFT 0x8
+#define UVD_LMI_STATUS__UMC_READ_CLEAN_RAW__SHIFT 0x9
+#define UVD_LMI_STATUS__UMC_UVD_IDLE__SHIFT 0xa
+#define UVD_LMI_STATUS__UMC_AVP_IDLE__SHIFT 0xb
+#define UVD_LMI_STATUS__ADP_MC_READ_CLEAN__SHIFT 0xc
+#define UVD_LMI_STATUS__ADP_UMC_READ_CLEAN__SHIFT 0xd
+#define UVD_LMI_STATUS__BSP0_WRITE_CLEAN__SHIFT 0x12
+#define UVD_LMI_STATUS__BSP1_WRITE_CLEAN__SHIFT 0x13
+#define UVD_LMI_STATUS__BSP2_WRITE_CLEAN__SHIFT 0x14
+#define UVD_LMI_STATUS__BSP3_WRITE_CLEAN__SHIFT 0x15
+#define UVD_LMI_STATUS__CENC_READ_CLEAN__SHIFT 0x16
+#define UVD_LMI_STATUS__READ_CLEAN_MASK 0x00000001L
+#define UVD_LMI_STATUS__WRITE_CLEAN_MASK 0x00000002L
+#define UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK 0x00000004L
+#define UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK 0x00000008L
+#define UVD_LMI_STATUS__UMC_READ_CLEAN_MASK 0x00000010L
+#define UVD_LMI_STATUS__UMC_WRITE_CLEAN_MASK 0x00000020L
+#define UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK 0x00000040L
+#define UVD_LMI_STATUS__PENDING_UVD_MC_WRITE_MASK 0x00000080L
+#define UVD_LMI_STATUS__READ_CLEAN_RAW_MASK 0x00000100L
+#define UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK 0x00000200L
+#define UVD_LMI_STATUS__UMC_UVD_IDLE_MASK 0x00000400L
+#define UVD_LMI_STATUS__UMC_AVP_IDLE_MASK 0x00000800L
+#define UVD_LMI_STATUS__ADP_MC_READ_CLEAN_MASK 0x00001000L
+#define UVD_LMI_STATUS__ADP_UMC_READ_CLEAN_MASK 0x00002000L
+#define UVD_LMI_STATUS__BSP0_WRITE_CLEAN_MASK 0x00040000L
+#define UVD_LMI_STATUS__BSP1_WRITE_CLEAN_MASK 0x00080000L
+#define UVD_LMI_STATUS__BSP2_WRITE_CLEAN_MASK 0x00100000L
+#define UVD_LMI_STATUS__BSP3_WRITE_CLEAN_MASK 0x00200000L
+#define UVD_LMI_STATUS__CENC_READ_CLEAN_MASK 0x00400000L
+//UVD_LMI_PERFMON_CTRL
+#define UVD_LMI_PERFMON_CTRL__PERFMON_STATE__SHIFT 0x0
+#define UVD_LMI_PERFMON_CTRL__PERFMON_SEL__SHIFT 0x8
+#define UVD_LMI_PERFMON_CTRL__PERFMON_STATE_MASK 0x00000003L
+#define UVD_LMI_PERFMON_CTRL__PERFMON_SEL_MASK 0x00001F00L
+//UVD_LMI_PERFMON_COUNT_LO
+#define UVD_LMI_PERFMON_COUNT_LO__PERFMON_COUNT__SHIFT 0x0
+#define UVD_LMI_PERFMON_COUNT_LO__PERFMON_COUNT_MASK 0xFFFFFFFFL
+//UVD_LMI_PERFMON_COUNT_HI
+#define UVD_LMI_PERFMON_COUNT_HI__PERFMON_COUNT__SHIFT 0x0
+#define UVD_LMI_PERFMON_COUNT_HI__PERFMON_COUNT_MASK 0x0000FFFFL
+//UVD_LMI_RBC_RB_VMID
+#define UVD_LMI_RBC_RB_VMID__RB_VMID__SHIFT 0x0
+#define UVD_LMI_RBC_RB_VMID__RB_VMID_MASK 0x0000000FL
+//UVD_LMI_RBC_IB_VMID
+#define UVD_LMI_RBC_IB_VMID__IB_VMID__SHIFT 0x0
+#define UVD_LMI_RBC_IB_VMID__IB_VMID_MASK 0x0000000FL
+//UVD_LMI_MC_CREDITS
+#define UVD_LMI_MC_CREDITS__UVD_RD_CREDITS__SHIFT 0x0
+#define UVD_LMI_MC_CREDITS__UVD_WR_CREDITS__SHIFT 0x8
+#define UVD_LMI_MC_CREDITS__UMC_RD_CREDITS__SHIFT 0x10
+#define UVD_LMI_MC_CREDITS__UMC_WR_CREDITS__SHIFT 0x18
+#define UVD_LMI_MC_CREDITS__UVD_RD_CREDITS_MASK 0x0000003FL
+#define UVD_LMI_MC_CREDITS__UVD_WR_CREDITS_MASK 0x00003F00L
+#define UVD_LMI_MC_CREDITS__UMC_RD_CREDITS_MASK 0x003F0000L
+#define UVD_LMI_MC_CREDITS__UMC_WR_CREDITS_MASK 0x3F000000L
+
+
+// addressBlock: uvd0_uvdnpdec
+//MDM_DMA_CMD
+#define MDM_DMA_CMD__MDM_DMA_CMD__SHIFT 0x0
+#define MDM_DMA_CMD__MDM_DMA_CMD_MASK 0xFFFFFFFFL
+//MDM_DMA_STATUS
+#define MDM_DMA_STATUS__SDB_DMA_WR_BUSY__SHIFT 0x0
+#define MDM_DMA_STATUS__SCM_DMA_WR_BUSY__SHIFT 0x1
+#define MDM_DMA_STATUS__SCM_DMA_RD_BUSY__SHIFT 0x2
+#define MDM_DMA_STATUS__RB_DMA_WR_BUSY__SHIFT 0x3
+#define MDM_DMA_STATUS__RB_DMA_RD_BUSY__SHIFT 0x4
+#define MDM_DMA_STATUS__SDB_DMA_RD_BUSY__SHIFT 0x5
+#define MDM_DMA_STATUS__SCLR_DMA_WR_BUSY__SHIFT 0x6
+#define MDM_DMA_STATUS__SDB_DMA_WR_BUSY_MASK 0x00000001L
+#define MDM_DMA_STATUS__SCM_DMA_WR_BUSY_MASK 0x00000002L
+#define MDM_DMA_STATUS__SCM_DMA_RD_BUSY_MASK 0x00000004L
+#define MDM_DMA_STATUS__RB_DMA_WR_BUSY_MASK 0x00000008L
+#define MDM_DMA_STATUS__RB_DMA_RD_BUSY_MASK 0x00000010L
+#define MDM_DMA_STATUS__SDB_DMA_RD_BUSY_MASK 0x00000020L
+#define MDM_DMA_STATUS__SCLR_DMA_WR_BUSY_MASK 0x00000040L
+//MDM_DMA_CTL
+#define MDM_DMA_CTL__MDM_BYPASS__SHIFT 0x0
+#define MDM_DMA_CTL__FOUR_CMD__SHIFT 0x1
+#define MDM_DMA_CTL__ENCODE_MODE__SHIFT 0x2
+#define MDM_DMA_CTL__VP9_DEC_MODE__SHIFT 0x3
+#define MDM_DMA_CTL__SW_DRST__SHIFT 0x1f
+#define MDM_DMA_CTL__MDM_BYPASS_MASK 0x00000001L
+#define MDM_DMA_CTL__FOUR_CMD_MASK 0x00000002L
+#define MDM_DMA_CTL__ENCODE_MODE_MASK 0x00000004L
+#define MDM_DMA_CTL__VP9_DEC_MODE_MASK 0x00000008L
+#define MDM_DMA_CTL__SW_DRST_MASK 0x80000000L
+//MDM_ENC_PIPE_BUSY
+#define MDM_ENC_PIPE_BUSY__IME_BUSY__SHIFT 0x0
+#define MDM_ENC_PIPE_BUSY__SMP_BUSY__SHIFT 0x1
+#define MDM_ENC_PIPE_BUSY__SIT_BUSY__SHIFT 0x2
+#define MDM_ENC_PIPE_BUSY__SDB_BUSY__SHIFT 0x3
+#define MDM_ENC_PIPE_BUSY__ENT_BUSY__SHIFT 0x4
+#define MDM_ENC_PIPE_BUSY__ENT_HEADER_BUSY__SHIFT 0x5
+#define MDM_ENC_PIPE_BUSY__LCM_BUSY__SHIFT 0x6
+#define MDM_ENC_PIPE_BUSY__MDM_RD_CUR_BUSY__SHIFT 0x7
+#define MDM_ENC_PIPE_BUSY__MDM_RD_REF_BUSY__SHIFT 0x8
+#define MDM_ENC_PIPE_BUSY__MDM_RD_GEN_BUSY__SHIFT 0x9
+#define MDM_ENC_PIPE_BUSY__MDM_WR_RECON_BUSY__SHIFT 0xa
+#define MDM_ENC_PIPE_BUSY__MDM_WR_GEN_BUSY__SHIFT 0xb
+#define MDM_ENC_PIPE_BUSY__MDM_EFC_BUSY__SHIFT 0xc
+#define MDM_ENC_PIPE_BUSY__MDM_EFC_PROGRAM_BUSY__SHIFT 0xd
+#define MDM_ENC_PIPE_BUSY__MIF_RD_CUR_BUSY__SHIFT 0x10
+#define MDM_ENC_PIPE_BUSY__MIF_RD_REF0_BUSY__SHIFT 0x11
+#define MDM_ENC_PIPE_BUSY__MIF_WR_GEN0_BUSY__SHIFT 0x12
+#define MDM_ENC_PIPE_BUSY__MIF_RD_GEN0_BUSY__SHIFT 0x13
+#define MDM_ENC_PIPE_BUSY__MIF_WR_GEN1_BUSY__SHIFT 0x14
+#define MDM_ENC_PIPE_BUSY__MIF_RD_GEN1_BUSY__SHIFT 0x15
+#define MDM_ENC_PIPE_BUSY__MIF_WR_BSP0_BUSY__SHIFT 0x16
+#define MDM_ENC_PIPE_BUSY__MIF_WR_BSP1_BUSY__SHIFT 0x17
+#define MDM_ENC_PIPE_BUSY__MIF_RD_BSD0_BUSY__SHIFT 0x18
+#define MDM_ENC_PIPE_BUSY__MIF_RD_BSD1_BUSY__SHIFT 0x19
+#define MDM_ENC_PIPE_BUSY__MIF_RD_BSD2_BUSY__SHIFT 0x1a
+#define MDM_ENC_PIPE_BUSY__MIF_RD_BSD3_BUSY__SHIFT 0x1b
+#define MDM_ENC_PIPE_BUSY__MIF_RD_BSD4_BUSY__SHIFT 0x1c
+#define MDM_ENC_PIPE_BUSY__IME_BUSY_MASK 0x00000001L
+#define MDM_ENC_PIPE_BUSY__SMP_BUSY_MASK 0x00000002L
+#define MDM_ENC_PIPE_BUSY__SIT_BUSY_MASK 0x00000004L
+#define MDM_ENC_PIPE_BUSY__SDB_BUSY_MASK 0x00000008L
+#define MDM_ENC_PIPE_BUSY__ENT_BUSY_MASK 0x00000010L
+#define MDM_ENC_PIPE_BUSY__ENT_HEADER_BUSY_MASK 0x00000020L
+#define MDM_ENC_PIPE_BUSY__LCM_BUSY_MASK 0x00000040L
+#define MDM_ENC_PIPE_BUSY__MDM_RD_CUR_BUSY_MASK 0x00000080L
+#define MDM_ENC_PIPE_BUSY__MDM_RD_REF_BUSY_MASK 0x00000100L
+#define MDM_ENC_PIPE_BUSY__MDM_RD_GEN_BUSY_MASK 0x00000200L
+#define MDM_ENC_PIPE_BUSY__MDM_WR_RECON_BUSY_MASK 0x00000400L
+#define MDM_ENC_PIPE_BUSY__MDM_WR_GEN_BUSY_MASK 0x00000800L
+#define MDM_ENC_PIPE_BUSY__MDM_EFC_BUSY_MASK 0x00001000L
+#define MDM_ENC_PIPE_BUSY__MDM_EFC_PROGRAM_BUSY_MASK 0x00002000L
+#define MDM_ENC_PIPE_BUSY__MIF_RD_CUR_BUSY_MASK 0x00010000L
+#define MDM_ENC_PIPE_BUSY__MIF_RD_REF0_BUSY_MASK 0x00020000L
+#define MDM_ENC_PIPE_BUSY__MIF_WR_GEN0_BUSY_MASK 0x00040000L
+#define MDM_ENC_PIPE_BUSY__MIF_RD_GEN0_BUSY_MASK 0x00080000L
+#define MDM_ENC_PIPE_BUSY__MIF_WR_GEN1_BUSY_MASK 0x00100000L
+#define MDM_ENC_PIPE_BUSY__MIF_RD_GEN1_BUSY_MASK 0x00200000L
+#define MDM_ENC_PIPE_BUSY__MIF_WR_BSP0_BUSY_MASK 0x00400000L
+#define MDM_ENC_PIPE_BUSY__MIF_WR_BSP1_BUSY_MASK 0x00800000L
+#define MDM_ENC_PIPE_BUSY__MIF_RD_BSD0_BUSY_MASK 0x01000000L
+#define MDM_ENC_PIPE_BUSY__MIF_RD_BSD1_BUSY_MASK 0x02000000L
+#define MDM_ENC_PIPE_BUSY__MIF_RD_BSD2_BUSY_MASK 0x04000000L
+#define MDM_ENC_PIPE_BUSY__MIF_RD_BSD3_BUSY_MASK 0x08000000L
+#define MDM_ENC_PIPE_BUSY__MIF_RD_BSD4_BUSY_MASK 0x10000000L
+//MDM_WIG_PIPE_BUSY
+#define MDM_WIG_PIPE_BUSY__WIG_TBE_BUSY__SHIFT 0x0
+#define MDM_WIG_PIPE_BUSY__WIG_ENT_BUSY__SHIFT 0x1
+#define MDM_WIG_PIPE_BUSY__WIG_ENT_HEADER_BUSY__SHIFT 0x2
+#define MDM_WIG_PIPE_BUSY__WIG_ENT_HEADER_FIFO_FULL__SHIFT 0x3
+#define MDM_WIG_PIPE_BUSY__LCM_BUSY__SHIFT 0x4
+#define MDM_WIG_PIPE_BUSY__MDM_RD_CUR_BUSY__SHIFT 0x5
+#define MDM_WIG_PIPE_BUSY__MDM_RD_REF_BUSY__SHIFT 0x6
+#define MDM_WIG_PIPE_BUSY__MDM_RD_GEN_BUSY__SHIFT 0x7
+#define MDM_WIG_PIPE_BUSY__MDM_WR_RECON_BUSY__SHIFT 0x8
+#define MDM_WIG_PIPE_BUSY__MDM_WR_GEN_BUSY__SHIFT 0x9
+#define MDM_WIG_PIPE_BUSY__MIF_RD_CUR_BUSY__SHIFT 0xa
+#define MDM_WIG_PIPE_BUSY__MIF_RD_REF0_BUSY__SHIFT 0xb
+#define MDM_WIG_PIPE_BUSY__MIF_WR_GEN0_BUSY__SHIFT 0xc
+#define MDM_WIG_PIPE_BUSY__MIF_RD_GEN0_BUSY__SHIFT 0xd
+#define MDM_WIG_PIPE_BUSY__MIF_WR_GEN1_BUSY__SHIFT 0xe
+#define MDM_WIG_PIPE_BUSY__MIF_RD_GEN1_BUSY__SHIFT 0xf
+#define MDM_WIG_PIPE_BUSY__MIF_WR_BSP0_BUSY__SHIFT 0x10
+#define MDM_WIG_PIPE_BUSY__MIF_WR_BSP1_BUSY__SHIFT 0x11
+#define MDM_WIG_PIPE_BUSY__MIF_RD_BSD0_BUSY__SHIFT 0x12
+#define MDM_WIG_PIPE_BUSY__MIF_RD_BSD1_BUSY__SHIFT 0x13
+#define MDM_WIG_PIPE_BUSY__MIF_RD_BSD2_BUSY__SHIFT 0x14
+#define MDM_WIG_PIPE_BUSY__MIF_RD_BSD3_BUSY__SHIFT 0x15
+#define MDM_WIG_PIPE_BUSY__MIF_RD_BSD4_BUSY__SHIFT 0x16
+#define MDM_WIG_PIPE_BUSY__MIF_RD_BSD5_BUSY__SHIFT 0x17
+#define MDM_WIG_PIPE_BUSY__MIF_WR_BSP2_BUSY__SHIFT 0x18
+#define MDM_WIG_PIPE_BUSY__MIF_WR_BSP3_BUSY__SHIFT 0x19
+#define MDM_WIG_PIPE_BUSY__LCM_BSP0_NOT_EMPTY__SHIFT 0x1a
+#define MDM_WIG_PIPE_BUSY__LCM_BSP1_NOT_EMPTY__SHIFT 0x1b
+#define MDM_WIG_PIPE_BUSY__LCM_BSP2_NOT_EMPTY__SHIFT 0x1c
+#define MDM_WIG_PIPE_BUSY__LCM_BSP3_NOT_EMPTY__SHIFT 0x1d
+#define MDM_WIG_PIPE_BUSY__WIG_TBE_BUSY_MASK 0x00000001L
+#define MDM_WIG_PIPE_BUSY__WIG_ENT_BUSY_MASK 0x00000002L
+#define MDM_WIG_PIPE_BUSY__WIG_ENT_HEADER_BUSY_MASK 0x00000004L
+#define MDM_WIG_PIPE_BUSY__WIG_ENT_HEADER_FIFO_FULL_MASK 0x00000008L
+#define MDM_WIG_PIPE_BUSY__LCM_BUSY_MASK 0x00000010L
+#define MDM_WIG_PIPE_BUSY__MDM_RD_CUR_BUSY_MASK 0x00000020L
+#define MDM_WIG_PIPE_BUSY__MDM_RD_REF_BUSY_MASK 0x00000040L
+#define MDM_WIG_PIPE_BUSY__MDM_RD_GEN_BUSY_MASK 0x00000080L
+#define MDM_WIG_PIPE_BUSY__MDM_WR_RECON_BUSY_MASK 0x00000100L
+#define MDM_WIG_PIPE_BUSY__MDM_WR_GEN_BUSY_MASK 0x00000200L
+#define MDM_WIG_PIPE_BUSY__MIF_RD_CUR_BUSY_MASK 0x00000400L
+#define MDM_WIG_PIPE_BUSY__MIF_RD_REF0_BUSY_MASK 0x00000800L
+#define MDM_WIG_PIPE_BUSY__MIF_WR_GEN0_BUSY_MASK 0x00001000L
+#define MDM_WIG_PIPE_BUSY__MIF_RD_GEN0_BUSY_MASK 0x00002000L
+#define MDM_WIG_PIPE_BUSY__MIF_WR_GEN1_BUSY_MASK 0x00004000L
+#define MDM_WIG_PIPE_BUSY__MIF_RD_GEN1_BUSY_MASK 0x00008000L
+#define MDM_WIG_PIPE_BUSY__MIF_WR_BSP0_BUSY_MASK 0x00010000L
+#define MDM_WIG_PIPE_BUSY__MIF_WR_BSP1_BUSY_MASK 0x00020000L
+#define MDM_WIG_PIPE_BUSY__MIF_RD_BSD0_BUSY_MASK 0x00040000L
+#define MDM_WIG_PIPE_BUSY__MIF_RD_BSD1_BUSY_MASK 0x00080000L
+#define MDM_WIG_PIPE_BUSY__MIF_RD_BSD2_BUSY_MASK 0x00100000L
+#define MDM_WIG_PIPE_BUSY__MIF_RD_BSD3_BUSY_MASK 0x00200000L
+#define MDM_WIG_PIPE_BUSY__MIF_RD_BSD4_BUSY_MASK 0x00400000L
+#define MDM_WIG_PIPE_BUSY__MIF_RD_BSD5_BUSY_MASK 0x00800000L
+#define MDM_WIG_PIPE_BUSY__MIF_WR_BSP2_BUSY_MASK 0x01000000L
+#define MDM_WIG_PIPE_BUSY__MIF_WR_BSP3_BUSY_MASK 0x02000000L
+#define MDM_WIG_PIPE_BUSY__LCM_BSP0_NOT_EMPTY_MASK 0x04000000L
+#define MDM_WIG_PIPE_BUSY__LCM_BSP1_NOT_EMPTY_MASK 0x08000000L
+#define MDM_WIG_PIPE_BUSY__LCM_BSP2_NOT_EMPTY_MASK 0x10000000L
+#define MDM_WIG_PIPE_BUSY__LCM_BSP3_NOT_EMPTY_MASK 0x20000000L
+
+
+// addressBlock: lmi_adp_indirect
+//UVD_LMI_CRC0
+#define UVD_LMI_CRC0__CRC32__SHIFT 0x0
+#define UVD_LMI_CRC0__CRC32_MASK 0xFFFFFFFFL
+//UVD_LMI_CRC1
+#define UVD_LMI_CRC1__CRC32__SHIFT 0x0
+#define UVD_LMI_CRC1__CRC32_MASK 0xFFFFFFFFL
+//UVD_LMI_CRC2
+#define UVD_LMI_CRC2__CRC32__SHIFT 0x0
+#define UVD_LMI_CRC2__CRC32_MASK 0xFFFFFFFFL
+//UVD_LMI_CRC3
+#define UVD_LMI_CRC3__CRC32__SHIFT 0x0
+#define UVD_LMI_CRC3__CRC32_MASK 0xFFFFFFFFL
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index 24cfe84d7322..70146518174c 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -492,12 +492,13 @@ struct atom_firmware_info_v3_1
/* Total 32bit cap indication */
enum atombios_firmware_capability
{
- ATOM_FIRMWARE_CAP_FIRMWARE_POSTED = 0x00000001,
- ATOM_FIRMWARE_CAP_GPU_VIRTUALIZATION = 0x00000002,
- ATOM_FIRMWARE_CAP_WMI_SUPPORT = 0x00000040,
- ATOM_FIRMWARE_CAP_HWEMU_ENABLE = 0x00000080,
- ATOM_FIRMWARE_CAP_HWEMU_UMC_CFG = 0x00000100,
- ATOM_FIRMWARE_CAP_SRAM_ECC = 0x00000200,
+ ATOM_FIRMWARE_CAP_FIRMWARE_POSTED = 0x00000001,
+ ATOM_FIRMWARE_CAP_GPU_VIRTUALIZATION = 0x00000002,
+ ATOM_FIRMWARE_CAP_WMI_SUPPORT = 0x00000040,
+ ATOM_FIRMWARE_CAP_HWEMU_ENABLE = 0x00000080,
+ ATOM_FIRMWARE_CAP_HWEMU_UMC_CFG = 0x00000100,
+ ATOM_FIRMWARE_CAP_SRAM_ECC = 0x00000200,
+ ATOM_FIRMWARE_CAP_ENABLE_2STAGE_BIST_TRAINING = 0x00000400,
};
enum atom_cooling_solution_id{
@@ -1789,6 +1790,92 @@ struct atom_smc_dpm_info_v4_5
};
+struct atom_smc_dpm_info_v4_6
+{
+ struct atom_common_table_header table_header;
+ // section: board parameters
+ uint32_t i2c_padding[3]; // old i2c control are moved to new area
+
+ uint16_t maxvoltagestepgfx; // in mv(q2) max voltage step that smu will request. multiple steps are taken if voltage change exceeds this value.
+ uint16_t maxvoltagestepsoc; // in mv(q2) max voltage step that smu will request. multiple steps are taken if voltage change exceeds this value.
+
+ uint8_t vddgfxvrmapping; // use vr_mapping* bitfields
+ uint8_t vddsocvrmapping; // use vr_mapping* bitfields
+ uint8_t vddmemvrmapping; // use vr_mapping* bitfields
+ uint8_t boardvrmapping; // use vr_mapping* bitfields
+
+ uint8_t gfxulvphasesheddingmask; // set this to 1 to set psi0/1 to 1 in ulv mode
+ uint8_t externalsensorpresent; // external rdi connected to tmon (aka temp in)
+ uint8_t padding8_v[2];
+
+ // telemetry settings
+ uint16_t gfxmaxcurrent; // in amps
+ uint8_t gfxoffset; // in amps
+ uint8_t padding_telemetrygfx;
+
+ uint16_t socmaxcurrent; // in amps
+ uint8_t socoffset; // in amps
+ uint8_t padding_telemetrysoc;
+
+ uint16_t memmaxcurrent; // in amps
+ uint8_t memoffset; // in amps
+ uint8_t padding_telemetrymem;
+
+ uint16_t boardmaxcurrent; // in amps
+ uint8_t boardoffset; // in amps
+ uint8_t padding_telemetryboardinput;
+
+ // gpio settings
+ uint8_t vr0hotgpio; // gpio pin configured for vr0 hot event
+ uint8_t vr0hotpolarity; // gpio polarity for vr0 hot event
+ uint8_t vr1hotgpio; // gpio pin configured for vr1 hot event
+ uint8_t vr1hotpolarity; // gpio polarity for vr1 hot event
+
+ // gfxclk pll spread spectrum
+ uint8_t pllgfxclkspreadenabled; // on or off
+ uint8_t pllgfxclkspreadpercent; // q4.4
+ uint16_t pllgfxclkspreadfreq; // khz
+
+ // uclk spread spectrum
+ uint8_t uclkspreadenabled; // on or off
+ uint8_t uclkspreadpercent; // q4.4
+ uint16_t uclkspreadfreq; // khz
+
+ // fclk spread spectrum
+ uint8_t fclkspreadenabled; // on or off
+ uint8_t fclkspreadpercent; // q4.4
+ uint16_t fclkspreadfreq; // khz
+
+
+ // gfxclk fll spread spectrum
+ uint8_t fllgfxclkspreadenabled; // on or off
+ uint8_t fllgfxclkspreadpercent; // q4.4
+ uint16_t fllgfxclkspreadfreq; // khz
+
+ // i2c controller structure
+ struct smudpm_i2c_controller_config_v2 i2ccontrollers[8];
+
+ // memory section
+ uint32_t memorychannelenabled; // for dram use only, max 32 channels enabled bit mask.
+
+ uint8_t drambitwidth; // for dram use only. see dram bit width type defines
+ uint8_t paddingmem[3];
+
+ // total board power
+ uint16_t totalboardpower; //only needed for tcp estimated case, where tcp = tgp+total board power
+ uint16_t boardpadding;
+
+ // section: xgmi training
+ uint8_t xgmilinkspeed[4];
+ uint8_t xgmilinkwidth[4];
+
+ uint16_t xgmifclkfreq[4];
+ uint16_t xgmisocvoltage[4];
+
+ // reserved
+ uint32_t boardreserved[10];
+};
+
/*
***************************************************************************
Data Table asic_profiling_info structure
diff --git a/drivers/gpu/drm/amd/include/discovery.h b/drivers/gpu/drm/amd/include/discovery.h
index 5dcb776548d8..7ec4331e67f2 100644
--- a/drivers/gpu/drm/amd/include/discovery.h
+++ b/drivers/gpu/drm/amd/include/discovery.h
@@ -25,7 +25,6 @@
#define _DISCOVERY_H_
#define PSP_HEADER_SIZE 256
-#define BINARY_MAX_SIZE (64 << 10)
#define BINARY_SIGNATURE 0x28211407
#define DISCOVERY_TABLE_SIGNATURE 0x53445049
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/nbio/irqsrcs_nbif_7_4.h b/drivers/gpu/drm/amd/include/ivsrcid/nbio/irqsrcs_nbif_7_4.h
new file mode 100644
index 000000000000..79af4258f259
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/ivsrcid/nbio/irqsrcs_nbif_7_4.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __IRQSRCS_NBIF_7_4_H__
+#define __IRQSRCS_NBIF_7_4_H__
+
+#define NBIF_7_4__SRCID__CHIP_ERR_INT_EVENT 0x5E // Error generated
+#define NBIF_7_4__SRCID__DOORBELL_INTERRUPT 0x5F // Interrupt for doorbell event during VDDGFX off
+#define NBIF_7_4__SRCID__RAS_CONTROLLER_INTERRUPT 0x60 // Interrupt for ras_intr_valid from RAS controller
+#define NBIF_7_4__SRCID__ERREVENT_ATHUB_INTERRUPT 0x61 // Interrupt for SDP ErrEvent received from ATHUB
+#define NBIF_7_4__SRCID__PF_VF_MSGBUF_VALID 0x87 // Valid message in PF->VF mailbox message buffer (The interrupt is sent on behalf of PF)
+#define NBIF_7_4__SRCID__PF_VF_MSGBUF_ACK 0x88 // Acknowledge message in PF->VF mailbox message buffer (The interrupt is sent on behalf of VF)
+#define NBIF_7_4__SRCID__VF_PF_MSGBUF_VALID 0x89 // Valid message in VF->PF mailbox message buffer (The interrupt is sent on behalf of VF)
+#define NBIF_7_4__SRCID__VF_PF_MSGBUF_ACK 0x8A // Acknowledge message in VF->PF mailbox message buffer (The interrupt is sent on behalf of PF)
+#define NBIF_7_4__SRCID__CHIP_DPA_INT_EVENT 0xA0 // BIF_CHIP_DPA_INT_EVENT
+#define NBIF_7_4__SRCID__CHIP_SLOT_POWER_CHG_INT_EVENT 0xA1 // BIF_CHIP_SLOT_POWER_CHG_INT_EVENT
+#define NBIF_7_4__SRCID__ATOMIC_UR_OPCODE 0xCE // BIF receives unsupported atomic opcode from MC
+#define NBIF_7_4__SRCID__ATOMIC_REQESTEREN_LOW 0xCF // BIF receive atomic request from MC while AtomicOp Requester is not enabled in PCIE config space
+
+#endif // __IRQSRCS_NBIF_7_4_H__
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 98b9533e672b..a607b1034962 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -256,6 +256,10 @@ struct kfd2kgd_calls {
uint32_t wptr_shift, uint32_t wptr_mask,
struct mm_struct *mm);
+ int (*hiq_mqd_load)(struct kgd_dev *kgd, void *mqd,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t doorbell_off);
+
int (*hqd_sdma_load)(struct kgd_dev *kgd, void *mqd,
uint32_t __user *wptr, struct mm_struct *mm);
@@ -291,21 +295,22 @@ struct kfd2kgd_calls {
uint32_t (*address_watch_get_offset)(struct kgd_dev *kgd,
unsigned int watch_point_id,
unsigned int reg_offset);
- bool (*get_atc_vmid_pasid_mapping_valid)(
- struct kgd_dev *kgd,
- uint8_t vmid);
- uint16_t (*get_atc_vmid_pasid_mapping_pasid)(
+ bool (*get_atc_vmid_pasid_mapping_info)(
struct kgd_dev *kgd,
- uint8_t vmid);
+ uint8_t vmid,
+ uint16_t *p_pasid);
+ /* No longer needed from GFXv9 onward. The scratch base address is
+ * passed to the shader by the CP. It's the user mode driver's
+ * responsibility.
+ */
void (*set_scratch_backing_va)(struct kgd_dev *kgd,
uint64_t va, uint32_t vmid);
+
int (*get_tile_config)(struct kgd_dev *kgd, struct tile_config *config);
void (*set_vm_context_page_table_base)(struct kgd_dev *kgd,
uint32_t vmid, uint64_t page_table_base);
- int (*invalidate_tlbs)(struct kgd_dev *kgd, uint16_t pasid);
- int (*invalidate_tlbs_vmid)(struct kgd_dev *kgd, uint16_t vmid);
uint32_t (*read_vmid_from_vmfault_reg)(struct kgd_dev *kgd);
uint64_t (*get_hive_id)(struct kgd_dev *kgd);
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 5b1ebb7f995a..a7f92d0b3a90 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -142,6 +142,7 @@ enum PP_SMC_POWER_PROFILE {
PP_SMC_POWER_PROFILE_VR = 0x4,
PP_SMC_POWER_PROFILE_COMPUTE = 0x5,
PP_SMC_POWER_PROFILE_CUSTOM = 0x6,
+ PP_SMC_POWER_PROFILE_COUNT,
};
enum {
@@ -171,6 +172,18 @@ enum PP_HWMON_TEMP {
PP_TEMP_MAX
};
+enum pp_mp1_state {
+ PP_MP1_STATE_NONE,
+ PP_MP1_STATE_SHUTDOWN,
+ PP_MP1_STATE_UNLOAD,
+ PP_MP1_STATE_RESET,
+};
+
+enum pp_df_cstate {
+ DF_CSTATE_DISALLOW = 0,
+ DF_CSTATE_ALLOW,
+};
+
#define PP_GROUP_MASK 0xF0000000
#define PP_GROUP_SHIFT 28
@@ -207,6 +220,9 @@ enum PP_HWMON_TEMP {
((group) << PP_GROUP_SHIFT | (block) << PP_BLOCK_SHIFT | \
(support) << PP_STATE_SUPPORT_SHIFT | (state) << PP_STATE_SHIFT)
+#define XGMI_MODE_PSTATE_D3 0
+#define XGMI_MODE_PSTATE_D0 1
+
struct seq_file;
enum amd_pp_clock_type;
struct amd_pp_simple_clock_info;
@@ -266,6 +282,8 @@ struct amd_pm_funcs {
int (*get_power_profile_mode)(void *handle, char *buf);
int (*set_power_profile_mode)(void *handle, long *input, uint32_t size);
int (*odn_edit_dpm_table)(void *handle, uint32_t type, long *input, uint32_t size);
+ int (*set_mp1_state)(void *handle, enum pp_mp1_state mp1_state);
+ int (*smu_i2c_bus_access)(void *handle, bool acquire);
/* export to DC */
u32 (*get_sclk)(void *handle, bool low);
u32 (*get_mclk)(void *handle, bool low);
@@ -301,6 +319,9 @@ struct amd_pm_funcs {
int (*set_asic_baco_state)(void *handle, int state);
int (*get_ppfeature_status)(void *handle, char *buf);
int (*set_ppfeature_status)(void *handle, uint64_t ppfeature_masks);
+ int (*asic_reset_mode_2)(void *handle);
+ int (*set_df_cstate)(void *handle, enum pp_df_cstate state);
+ int (*set_xgmi_pstate)(void *handle, uint32_t pstate);
};
#endif
diff --git a/drivers/gpu/drm/amd/include/navi12_ip_offset.h b/drivers/gpu/drm/amd/include/navi12_ip_offset.h
new file mode 100644
index 000000000000..6c2cc6296c06
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/navi12_ip_offset.h
@@ -0,0 +1,1119 @@
+/*
+ * Copyright (C) 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _navi12_ip_offset_HEADER
+#define _navi12_ip_offset_HEADER
+
+#define MAX_INSTANCE 7
+#define MAX_SEGMENT 5
+
+
+struct IP_BASE_INSTANCE
+{
+ unsigned int segment[MAX_SEGMENT];
+};
+
+struct IP_BASE
+{
+ struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
+};
+
+
+static const struct IP_BASE ATHUB_BASE ={ { { { 0x00000C00, 0x02408C00, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE CLK_BASE ={ { { { 0x00016C00, 0x02401800, 0, 0, 0 } },
+ { { 0x00016E00, 0x02401C00, 0, 0, 0 } },
+ { { 0x00017000, 0x02402000, 0, 0, 0 } },
+ { { 0x00017200, 0x02402400, 0, 0, 0 } },
+ { { 0x0001B000, 0x0242D800, 0, 0, 0 } },
+ { { 0x00017E00, 0x0240BC00, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE DF_BASE ={ { { { 0x00007000, 0x0240B800, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE DIO_BASE ={ { { { 0x02404000, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE DMU_BASE ={ { { { 0x00000012, 0x000000C0, 0x000034C0, 0x00009000, 0x02403C00 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE DPCS_BASE ={ { { { 0x00000012, 0x000000C0, 0x000034C0, 0x00009000, 0x02403C00 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE FUSE_BASE ={ { { { 0x00017400, 0x02401400, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE GC_BASE ={ { { { 0x00001260, 0x0000A000, 0x02402C00, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE HDA_BASE ={ { { { 0x004C0000, 0x02404800, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE HDP_BASE ={ { { { 0x00000F20, 0x0240A400, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE MMHUB_BASE ={ { { { 0x0001A000, 0x02408800, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE MP0_BASE ={ { { { 0x00016000, 0x00DC0000, 0x00E00000, 0x00E40000, 0x0243FC00 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE MP1_BASE ={ { { { 0x00016000, 0x00E80000, 0x00EC0000, 0x00F00000, 0x02400400 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE NBIF0_BASE ={ { { { 0x00000000, 0x00000014, 0x00000D20, 0x00010400, 0x0241B000 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE OSSSYS_BASE ={ { { { 0x000010A0, 0x0240A000, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE PCIE0_BASE ={ { { { 0x02411800, 0x04440000, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE SDMA_BASE ={ { { { 0x00001260, 0x0000A000, 0x02402C00, 0, 0 } },
+ { { 0x00001260, 0x0000A000, 0x02402C00, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE SMUIO_BASE ={ { { { 0x00016800, 0x00016A00, 0x00440000, 0x02401000, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE THM_BASE ={ { { { 0x00016600, 0x02400C00, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE UMC_BASE ={ { { { 0x00014000, 0x02425800, 0, 0, 0 } },
+ { { 0x00054000, 0x02425C00, 0, 0, 0 } },
+ { { 0x00094000, 0x02426000, 0, 0, 0 } },
+ { { 0x000D4000, 0x02426400, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE USB0_BASE ={ { { { 0x0242A800, 0x05B00000, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE UVD0_BASE ={ { { { 0x00007800, 0x00007E00, 0x02403000, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+
+
+#define ATHUB_BASE__INST0_SEG0 0x00000C00
+#define ATHUB_BASE__INST0_SEG1 0x02408C00
+#define ATHUB_BASE__INST0_SEG2 0
+#define ATHUB_BASE__INST0_SEG3 0
+#define ATHUB_BASE__INST0_SEG4 0
+
+#define ATHUB_BASE__INST1_SEG0 0
+#define ATHUB_BASE__INST1_SEG1 0
+#define ATHUB_BASE__INST1_SEG2 0
+#define ATHUB_BASE__INST1_SEG3 0
+#define ATHUB_BASE__INST1_SEG4 0
+
+#define ATHUB_BASE__INST2_SEG0 0
+#define ATHUB_BASE__INST2_SEG1 0
+#define ATHUB_BASE__INST2_SEG2 0
+#define ATHUB_BASE__INST2_SEG3 0
+#define ATHUB_BASE__INST2_SEG4 0
+
+#define ATHUB_BASE__INST3_SEG0 0
+#define ATHUB_BASE__INST3_SEG1 0
+#define ATHUB_BASE__INST3_SEG2 0
+#define ATHUB_BASE__INST3_SEG3 0
+#define ATHUB_BASE__INST3_SEG4 0
+
+#define ATHUB_BASE__INST4_SEG0 0
+#define ATHUB_BASE__INST4_SEG1 0
+#define ATHUB_BASE__INST4_SEG2 0
+#define ATHUB_BASE__INST4_SEG3 0
+#define ATHUB_BASE__INST4_SEG4 0
+
+#define ATHUB_BASE__INST5_SEG0 0
+#define ATHUB_BASE__INST5_SEG1 0
+#define ATHUB_BASE__INST5_SEG2 0
+#define ATHUB_BASE__INST5_SEG3 0
+#define ATHUB_BASE__INST5_SEG4 0
+
+#define ATHUB_BASE__INST6_SEG0 0
+#define ATHUB_BASE__INST6_SEG1 0
+#define ATHUB_BASE__INST6_SEG2 0
+#define ATHUB_BASE__INST6_SEG3 0
+#define ATHUB_BASE__INST6_SEG4 0
+
+#define CLK_BASE__INST0_SEG0 0x00016C00
+#define CLK_BASE__INST0_SEG1 0x02401800
+#define CLK_BASE__INST0_SEG2 0
+#define CLK_BASE__INST0_SEG3 0
+#define CLK_BASE__INST0_SEG4 0
+
+#define CLK_BASE__INST1_SEG0 0x00016E00
+#define CLK_BASE__INST1_SEG1 0x02401C00
+#define CLK_BASE__INST1_SEG2 0
+#define CLK_BASE__INST1_SEG3 0
+#define CLK_BASE__INST1_SEG4 0
+
+#define CLK_BASE__INST2_SEG0 0x00017000
+#define CLK_BASE__INST2_SEG1 0x02402000
+#define CLK_BASE__INST2_SEG2 0
+#define CLK_BASE__INST2_SEG3 0
+#define CLK_BASE__INST2_SEG4 0
+
+#define CLK_BASE__INST3_SEG0 0x00017200
+#define CLK_BASE__INST3_SEG1 0x02402400
+#define CLK_BASE__INST3_SEG2 0
+#define CLK_BASE__INST3_SEG3 0
+#define CLK_BASE__INST3_SEG4 0
+
+#define CLK_BASE__INST4_SEG0 0x0001B000
+#define CLK_BASE__INST4_SEG1 0x0242D800
+#define CLK_BASE__INST4_SEG2 0
+#define CLK_BASE__INST4_SEG3 0
+#define CLK_BASE__INST4_SEG4 0
+
+#define CLK_BASE__INST5_SEG0 0x00017E00
+#define CLK_BASE__INST5_SEG1 0x0240BC00
+#define CLK_BASE__INST5_SEG2 0
+#define CLK_BASE__INST5_SEG3 0
+#define CLK_BASE__INST5_SEG4 0
+
+#define CLK_BASE__INST6_SEG0 0
+#define CLK_BASE__INST6_SEG1 0
+#define CLK_BASE__INST6_SEG2 0
+#define CLK_BASE__INST6_SEG3 0
+#define CLK_BASE__INST6_SEG4 0
+
+#define DF_BASE__INST0_SEG0 0x00007000
+#define DF_BASE__INST0_SEG1 0x0240B800
+#define DF_BASE__INST0_SEG2 0
+#define DF_BASE__INST0_SEG3 0
+#define DF_BASE__INST0_SEG4 0
+
+#define DF_BASE__INST1_SEG0 0
+#define DF_BASE__INST1_SEG1 0
+#define DF_BASE__INST1_SEG2 0
+#define DF_BASE__INST1_SEG3 0
+#define DF_BASE__INST1_SEG4 0
+
+#define DF_BASE__INST2_SEG0 0
+#define DF_BASE__INST2_SEG1 0
+#define DF_BASE__INST2_SEG2 0
+#define DF_BASE__INST2_SEG3 0
+#define DF_BASE__INST2_SEG4 0
+
+#define DF_BASE__INST3_SEG0 0
+#define DF_BASE__INST3_SEG1 0
+#define DF_BASE__INST3_SEG2 0
+#define DF_BASE__INST3_SEG3 0
+#define DF_BASE__INST3_SEG4 0
+
+#define DF_BASE__INST4_SEG0 0
+#define DF_BASE__INST4_SEG1 0
+#define DF_BASE__INST4_SEG2 0
+#define DF_BASE__INST4_SEG3 0
+#define DF_BASE__INST4_SEG4 0
+
+#define DF_BASE__INST5_SEG0 0
+#define DF_BASE__INST5_SEG1 0
+#define DF_BASE__INST5_SEG2 0
+#define DF_BASE__INST5_SEG3 0
+#define DF_BASE__INST5_SEG4 0
+
+#define DF_BASE__INST6_SEG0 0
+#define DF_BASE__INST6_SEG1 0
+#define DF_BASE__INST6_SEG2 0
+#define DF_BASE__INST6_SEG3 0
+#define DF_BASE__INST6_SEG4 0
+
+#define DIO_BASE__INST0_SEG0 0x02404000
+#define DIO_BASE__INST0_SEG1 0
+#define DIO_BASE__INST0_SEG2 0
+#define DIO_BASE__INST0_SEG3 0
+#define DIO_BASE__INST0_SEG4 0
+
+#define DIO_BASE__INST1_SEG0 0
+#define DIO_BASE__INST1_SEG1 0
+#define DIO_BASE__INST1_SEG2 0
+#define DIO_BASE__INST1_SEG3 0
+#define DIO_BASE__INST1_SEG4 0
+
+#define DIO_BASE__INST2_SEG0 0
+#define DIO_BASE__INST2_SEG1 0
+#define DIO_BASE__INST2_SEG2 0
+#define DIO_BASE__INST2_SEG3 0
+#define DIO_BASE__INST2_SEG4 0
+
+#define DIO_BASE__INST3_SEG0 0
+#define DIO_BASE__INST3_SEG1 0
+#define DIO_BASE__INST3_SEG2 0
+#define DIO_BASE__INST3_SEG3 0
+#define DIO_BASE__INST3_SEG4 0
+
+#define DIO_BASE__INST4_SEG0 0
+#define DIO_BASE__INST4_SEG1 0
+#define DIO_BASE__INST4_SEG2 0
+#define DIO_BASE__INST4_SEG3 0
+#define DIO_BASE__INST4_SEG4 0
+
+#define DIO_BASE__INST5_SEG0 0
+#define DIO_BASE__INST5_SEG1 0
+#define DIO_BASE__INST5_SEG2 0
+#define DIO_BASE__INST5_SEG3 0
+#define DIO_BASE__INST5_SEG4 0
+
+#define DIO_BASE__INST6_SEG0 0
+#define DIO_BASE__INST6_SEG1 0
+#define DIO_BASE__INST6_SEG2 0
+#define DIO_BASE__INST6_SEG3 0
+#define DIO_BASE__INST6_SEG4 0
+
+#define DMU_BASE__INST0_SEG0 0x00000012
+#define DMU_BASE__INST0_SEG1 0x000000C0
+#define DMU_BASE__INST0_SEG2 0x000034C0
+#define DMU_BASE__INST0_SEG3 0x00009000
+#define DMU_BASE__INST0_SEG4 0x02403C00
+
+#define DMU_BASE__INST1_SEG0 0
+#define DMU_BASE__INST1_SEG1 0
+#define DMU_BASE__INST1_SEG2 0
+#define DMU_BASE__INST1_SEG3 0
+#define DMU_BASE__INST1_SEG4 0
+
+#define DMU_BASE__INST2_SEG0 0
+#define DMU_BASE__INST2_SEG1 0
+#define DMU_BASE__INST2_SEG2 0
+#define DMU_BASE__INST2_SEG3 0
+#define DMU_BASE__INST2_SEG4 0
+
+#define DMU_BASE__INST3_SEG0 0
+#define DMU_BASE__INST3_SEG1 0
+#define DMU_BASE__INST3_SEG2 0
+#define DMU_BASE__INST3_SEG3 0
+#define DMU_BASE__INST3_SEG4 0
+
+#define DMU_BASE__INST4_SEG0 0
+#define DMU_BASE__INST4_SEG1 0
+#define DMU_BASE__INST4_SEG2 0
+#define DMU_BASE__INST4_SEG3 0
+#define DMU_BASE__INST4_SEG4 0
+
+#define DMU_BASE__INST5_SEG0 0
+#define DMU_BASE__INST5_SEG1 0
+#define DMU_BASE__INST5_SEG2 0
+#define DMU_BASE__INST5_SEG3 0
+#define DMU_BASE__INST5_SEG4 0
+
+#define DMU_BASE__INST6_SEG0 0
+#define DMU_BASE__INST6_SEG1 0
+#define DMU_BASE__INST6_SEG2 0
+#define DMU_BASE__INST6_SEG3 0
+#define DMU_BASE__INST6_SEG4 0
+
+#define DPCS_BASE__INST0_SEG0 0x00000012
+#define DPCS_BASE__INST0_SEG1 0x000000C0
+#define DPCS_BASE__INST0_SEG2 0x000034C0
+#define DPCS_BASE__INST0_SEG3 0x00009000
+#define DPCS_BASE__INST0_SEG4 0x02403C00
+
+#define DPCS_BASE__INST1_SEG0 0
+#define DPCS_BASE__INST1_SEG1 0
+#define DPCS_BASE__INST1_SEG2 0
+#define DPCS_BASE__INST1_SEG3 0
+#define DPCS_BASE__INST1_SEG4 0
+
+#define DPCS_BASE__INST2_SEG0 0
+#define DPCS_BASE__INST2_SEG1 0
+#define DPCS_BASE__INST2_SEG2 0
+#define DPCS_BASE__INST2_SEG3 0
+#define DPCS_BASE__INST2_SEG4 0
+
+#define DPCS_BASE__INST3_SEG0 0
+#define DPCS_BASE__INST3_SEG1 0
+#define DPCS_BASE__INST3_SEG2 0
+#define DPCS_BASE__INST3_SEG3 0
+#define DPCS_BASE__INST3_SEG4 0
+
+#define DPCS_BASE__INST4_SEG0 0
+#define DPCS_BASE__INST4_SEG1 0
+#define DPCS_BASE__INST4_SEG2 0
+#define DPCS_BASE__INST4_SEG3 0
+#define DPCS_BASE__INST4_SEG4 0
+
+#define DPCS_BASE__INST5_SEG0 0
+#define DPCS_BASE__INST5_SEG1 0
+#define DPCS_BASE__INST5_SEG2 0
+#define DPCS_BASE__INST5_SEG3 0
+#define DPCS_BASE__INST5_SEG4 0
+
+#define DPCS_BASE__INST6_SEG0 0
+#define DPCS_BASE__INST6_SEG1 0
+#define DPCS_BASE__INST6_SEG2 0
+#define DPCS_BASE__INST6_SEG3 0
+#define DPCS_BASE__INST6_SEG4 0
+
+#define FUSE_BASE__INST0_SEG0 0x00017400
+#define FUSE_BASE__INST0_SEG1 0x02401400
+#define FUSE_BASE__INST0_SEG2 0
+#define FUSE_BASE__INST0_SEG3 0
+#define FUSE_BASE__INST0_SEG4 0
+
+#define FUSE_BASE__INST1_SEG0 0
+#define FUSE_BASE__INST1_SEG1 0
+#define FUSE_BASE__INST1_SEG2 0
+#define FUSE_BASE__INST1_SEG3 0
+#define FUSE_BASE__INST1_SEG4 0
+
+#define FUSE_BASE__INST2_SEG0 0
+#define FUSE_BASE__INST2_SEG1 0
+#define FUSE_BASE__INST2_SEG2 0
+#define FUSE_BASE__INST2_SEG3 0
+#define FUSE_BASE__INST2_SEG4 0
+
+#define FUSE_BASE__INST3_SEG0 0
+#define FUSE_BASE__INST3_SEG1 0
+#define FUSE_BASE__INST3_SEG2 0
+#define FUSE_BASE__INST3_SEG3 0
+#define FUSE_BASE__INST3_SEG4 0
+
+#define FUSE_BASE__INST4_SEG0 0
+#define FUSE_BASE__INST4_SEG1 0
+#define FUSE_BASE__INST4_SEG2 0
+#define FUSE_BASE__INST4_SEG3 0
+#define FUSE_BASE__INST4_SEG4 0
+
+#define FUSE_BASE__INST5_SEG0 0
+#define FUSE_BASE__INST5_SEG1 0
+#define FUSE_BASE__INST5_SEG2 0
+#define FUSE_BASE__INST5_SEG3 0
+#define FUSE_BASE__INST5_SEG4 0
+
+#define FUSE_BASE__INST6_SEG0 0
+#define FUSE_BASE__INST6_SEG1 0
+#define FUSE_BASE__INST6_SEG2 0
+#define FUSE_BASE__INST6_SEG3 0
+#define FUSE_BASE__INST6_SEG4 0
+
+#define GC_BASE__INST0_SEG0 0x00001260
+#define GC_BASE__INST0_SEG1 0x0000A000
+#define GC_BASE__INST0_SEG2 0x02402C00
+#define GC_BASE__INST0_SEG3 0
+#define GC_BASE__INST0_SEG4 0
+
+#define GC_BASE__INST1_SEG0 0
+#define GC_BASE__INST1_SEG1 0
+#define GC_BASE__INST1_SEG2 0
+#define GC_BASE__INST1_SEG3 0
+#define GC_BASE__INST1_SEG4 0
+
+#define GC_BASE__INST2_SEG0 0
+#define GC_BASE__INST2_SEG1 0
+#define GC_BASE__INST2_SEG2 0
+#define GC_BASE__INST2_SEG3 0
+#define GC_BASE__INST2_SEG4 0
+
+#define GC_BASE__INST3_SEG0 0
+#define GC_BASE__INST3_SEG1 0
+#define GC_BASE__INST3_SEG2 0
+#define GC_BASE__INST3_SEG3 0
+#define GC_BASE__INST3_SEG4 0
+
+#define GC_BASE__INST4_SEG0 0
+#define GC_BASE__INST4_SEG1 0
+#define GC_BASE__INST4_SEG2 0
+#define GC_BASE__INST4_SEG3 0
+#define GC_BASE__INST4_SEG4 0
+
+#define GC_BASE__INST5_SEG0 0
+#define GC_BASE__INST5_SEG1 0
+#define GC_BASE__INST5_SEG2 0
+#define GC_BASE__INST5_SEG3 0
+#define GC_BASE__INST5_SEG4 0
+
+#define GC_BASE__INST6_SEG0 0
+#define GC_BASE__INST6_SEG1 0
+#define GC_BASE__INST6_SEG2 0
+#define GC_BASE__INST6_SEG3 0
+#define GC_BASE__INST6_SEG4 0
+
+#define HDA_BASE__INST0_SEG0 0x004C0000
+#define HDA_BASE__INST0_SEG1 0x02404800
+#define HDA_BASE__INST0_SEG2 0
+#define HDA_BASE__INST0_SEG3 0
+#define HDA_BASE__INST0_SEG4 0
+
+#define HDA_BASE__INST1_SEG0 0
+#define HDA_BASE__INST1_SEG1 0
+#define HDA_BASE__INST1_SEG2 0
+#define HDA_BASE__INST1_SEG3 0
+#define HDA_BASE__INST1_SEG4 0
+
+#define HDA_BASE__INST2_SEG0 0
+#define HDA_BASE__INST2_SEG1 0
+#define HDA_BASE__INST2_SEG2 0
+#define HDA_BASE__INST2_SEG3 0
+#define HDA_BASE__INST2_SEG4 0
+
+#define HDA_BASE__INST3_SEG0 0
+#define HDA_BASE__INST3_SEG1 0
+#define HDA_BASE__INST3_SEG2 0
+#define HDA_BASE__INST3_SEG3 0
+#define HDA_BASE__INST3_SEG4 0
+
+#define HDA_BASE__INST4_SEG0 0
+#define HDA_BASE__INST4_SEG1 0
+#define HDA_BASE__INST4_SEG2 0
+#define HDA_BASE__INST4_SEG3 0
+#define HDA_BASE__INST4_SEG4 0
+
+#define HDA_BASE__INST5_SEG0 0
+#define HDA_BASE__INST5_SEG1 0
+#define HDA_BASE__INST5_SEG2 0
+#define HDA_BASE__INST5_SEG3 0
+#define HDA_BASE__INST5_SEG4 0
+
+#define HDA_BASE__INST6_SEG0 0
+#define HDA_BASE__INST6_SEG1 0
+#define HDA_BASE__INST6_SEG2 0
+#define HDA_BASE__INST6_SEG3 0
+#define HDA_BASE__INST6_SEG4 0
+
+#define HDP_BASE__INST0_SEG0 0x00000F20
+#define HDP_BASE__INST0_SEG1 0x0240A400
+#define HDP_BASE__INST0_SEG2 0
+#define HDP_BASE__INST0_SEG3 0
+#define HDP_BASE__INST0_SEG4 0
+
+#define HDP_BASE__INST1_SEG0 0
+#define HDP_BASE__INST1_SEG1 0
+#define HDP_BASE__INST1_SEG2 0
+#define HDP_BASE__INST1_SEG3 0
+#define HDP_BASE__INST1_SEG4 0
+
+#define HDP_BASE__INST2_SEG0 0
+#define HDP_BASE__INST2_SEG1 0
+#define HDP_BASE__INST2_SEG2 0
+#define HDP_BASE__INST2_SEG3 0
+#define HDP_BASE__INST2_SEG4 0
+
+#define HDP_BASE__INST3_SEG0 0
+#define HDP_BASE__INST3_SEG1 0
+#define HDP_BASE__INST3_SEG2 0
+#define HDP_BASE__INST3_SEG3 0
+#define HDP_BASE__INST3_SEG4 0
+
+#define HDP_BASE__INST4_SEG0 0
+#define HDP_BASE__INST4_SEG1 0
+#define HDP_BASE__INST4_SEG2 0
+#define HDP_BASE__INST4_SEG3 0
+#define HDP_BASE__INST4_SEG4 0
+
+#define HDP_BASE__INST5_SEG0 0
+#define HDP_BASE__INST5_SEG1 0
+#define HDP_BASE__INST5_SEG2 0
+#define HDP_BASE__INST5_SEG3 0
+#define HDP_BASE__INST5_SEG4 0
+
+#define HDP_BASE__INST6_SEG0 0
+#define HDP_BASE__INST6_SEG1 0
+#define HDP_BASE__INST6_SEG2 0
+#define HDP_BASE__INST6_SEG3 0
+#define HDP_BASE__INST6_SEG4 0
+
+#define MMHUB_BASE__INST0_SEG0 0x0001A000
+#define MMHUB_BASE__INST0_SEG1 0x02408800
+#define MMHUB_BASE__INST0_SEG2 0
+#define MMHUB_BASE__INST0_SEG3 0
+#define MMHUB_BASE__INST0_SEG4 0
+
+#define MMHUB_BASE__INST1_SEG0 0
+#define MMHUB_BASE__INST1_SEG1 0
+#define MMHUB_BASE__INST1_SEG2 0
+#define MMHUB_BASE__INST1_SEG3 0
+#define MMHUB_BASE__INST1_SEG4 0
+
+#define MMHUB_BASE__INST2_SEG0 0
+#define MMHUB_BASE__INST2_SEG1 0
+#define MMHUB_BASE__INST2_SEG2 0
+#define MMHUB_BASE__INST2_SEG3 0
+#define MMHUB_BASE__INST2_SEG4 0
+
+#define MMHUB_BASE__INST3_SEG0 0
+#define MMHUB_BASE__INST3_SEG1 0
+#define MMHUB_BASE__INST3_SEG2 0
+#define MMHUB_BASE__INST3_SEG3 0
+#define MMHUB_BASE__INST3_SEG4 0
+
+#define MMHUB_BASE__INST4_SEG0 0
+#define MMHUB_BASE__INST4_SEG1 0
+#define MMHUB_BASE__INST4_SEG2 0
+#define MMHUB_BASE__INST4_SEG3 0
+#define MMHUB_BASE__INST4_SEG4 0
+
+#define MMHUB_BASE__INST5_SEG0 0
+#define MMHUB_BASE__INST5_SEG1 0
+#define MMHUB_BASE__INST5_SEG2 0
+#define MMHUB_BASE__INST5_SEG3 0
+#define MMHUB_BASE__INST5_SEG4 0
+
+#define MMHUB_BASE__INST6_SEG0 0
+#define MMHUB_BASE__INST6_SEG1 0
+#define MMHUB_BASE__INST6_SEG2 0
+#define MMHUB_BASE__INST6_SEG3 0
+#define MMHUB_BASE__INST6_SEG4 0
+
+#define MP0_BASE__INST0_SEG0 0x00016000
+#define MP0_BASE__INST0_SEG1 0x00DC0000
+#define MP0_BASE__INST0_SEG2 0x00E00000
+#define MP0_BASE__INST0_SEG3 0x00E40000
+#define MP0_BASE__INST0_SEG4 0x0243FC00
+
+#define MP0_BASE__INST1_SEG0 0
+#define MP0_BASE__INST1_SEG1 0
+#define MP0_BASE__INST1_SEG2 0
+#define MP0_BASE__INST1_SEG3 0
+#define MP0_BASE__INST1_SEG4 0
+
+#define MP0_BASE__INST2_SEG0 0
+#define MP0_BASE__INST2_SEG1 0
+#define MP0_BASE__INST2_SEG2 0
+#define MP0_BASE__INST2_SEG3 0
+#define MP0_BASE__INST2_SEG4 0
+
+#define MP0_BASE__INST3_SEG0 0
+#define MP0_BASE__INST3_SEG1 0
+#define MP0_BASE__INST3_SEG2 0
+#define MP0_BASE__INST3_SEG3 0
+#define MP0_BASE__INST3_SEG4 0
+
+#define MP0_BASE__INST4_SEG0 0
+#define MP0_BASE__INST4_SEG1 0
+#define MP0_BASE__INST4_SEG2 0
+#define MP0_BASE__INST4_SEG3 0
+#define MP0_BASE__INST4_SEG4 0
+
+#define MP0_BASE__INST5_SEG0 0
+#define MP0_BASE__INST5_SEG1 0
+#define MP0_BASE__INST5_SEG2 0
+#define MP0_BASE__INST5_SEG3 0
+#define MP0_BASE__INST5_SEG4 0
+
+#define MP0_BASE__INST6_SEG0 0
+#define MP0_BASE__INST6_SEG1 0
+#define MP0_BASE__INST6_SEG2 0
+#define MP0_BASE__INST6_SEG3 0
+#define MP0_BASE__INST6_SEG4 0
+
+#define MP1_BASE__INST0_SEG0 0x00016200
+#define MP1_BASE__INST0_SEG1 0x00E80000
+#define MP1_BASE__INST0_SEG2 0x00EC0000
+#define MP1_BASE__INST0_SEG3 0x00F00000
+#define MP1_BASE__INST0_SEG4 0x02400400
+
+#define MP1_BASE__INST1_SEG0 0
+#define MP1_BASE__INST1_SEG1 0
+#define MP1_BASE__INST1_SEG2 0
+#define MP1_BASE__INST1_SEG3 0
+#define MP1_BASE__INST1_SEG4 0
+
+#define MP1_BASE__INST2_SEG0 0
+#define MP1_BASE__INST2_SEG1 0
+#define MP1_BASE__INST2_SEG2 0
+#define MP1_BASE__INST2_SEG3 0
+#define MP1_BASE__INST2_SEG4 0
+
+#define MP1_BASE__INST3_SEG0 0
+#define MP1_BASE__INST3_SEG1 0
+#define MP1_BASE__INST3_SEG2 0
+#define MP1_BASE__INST3_SEG3 0
+#define MP1_BASE__INST3_SEG4 0
+
+#define MP1_BASE__INST4_SEG0 0
+#define MP1_BASE__INST4_SEG1 0
+#define MP1_BASE__INST4_SEG2 0
+#define MP1_BASE__INST4_SEG3 0
+#define MP1_BASE__INST4_SEG4 0
+
+#define MP1_BASE__INST5_SEG0 0
+#define MP1_BASE__INST5_SEG1 0
+#define MP1_BASE__INST5_SEG2 0
+#define MP1_BASE__INST5_SEG3 0
+#define MP1_BASE__INST5_SEG4 0
+
+#define MP1_BASE__INST6_SEG0 0
+#define MP1_BASE__INST6_SEG1 0
+#define MP1_BASE__INST6_SEG2 0
+#define MP1_BASE__INST6_SEG3 0
+#define MP1_BASE__INST6_SEG4 0
+
+#define NBIF0_BASE__INST0_SEG0 0x00000000
+#define NBIF0_BASE__INST0_SEG1 0x00000014
+#define NBIF0_BASE__INST0_SEG2 0x00000D20
+#define NBIF0_BASE__INST0_SEG3 0x00010400
+#define NBIF0_BASE__INST0_SEG4 0x0241B000
+
+#define NBIF0_BASE__INST1_SEG0 0
+#define NBIF0_BASE__INST1_SEG1 0
+#define NBIF0_BASE__INST1_SEG2 0
+#define NBIF0_BASE__INST1_SEG3 0
+#define NBIF0_BASE__INST1_SEG4 0
+
+#define NBIF0_BASE__INST2_SEG0 0
+#define NBIF0_BASE__INST2_SEG1 0
+#define NBIF0_BASE__INST2_SEG2 0
+#define NBIF0_BASE__INST2_SEG3 0
+#define NBIF0_BASE__INST2_SEG4 0
+
+#define NBIF0_BASE__INST3_SEG0 0
+#define NBIF0_BASE__INST3_SEG1 0
+#define NBIF0_BASE__INST3_SEG2 0
+#define NBIF0_BASE__INST3_SEG3 0
+#define NBIF0_BASE__INST3_SEG4 0
+
+#define NBIF0_BASE__INST4_SEG0 0
+#define NBIF0_BASE__INST4_SEG1 0
+#define NBIF0_BASE__INST4_SEG2 0
+#define NBIF0_BASE__INST4_SEG3 0
+#define NBIF0_BASE__INST4_SEG4 0
+
+#define NBIF0_BASE__INST5_SEG0 0
+#define NBIF0_BASE__INST5_SEG1 0
+#define NBIF0_BASE__INST5_SEG2 0
+#define NBIF0_BASE__INST5_SEG3 0
+#define NBIF0_BASE__INST5_SEG4 0
+
+#define NBIF0_BASE__INST6_SEG0 0
+#define NBIF0_BASE__INST6_SEG1 0
+#define NBIF0_BASE__INST6_SEG2 0
+#define NBIF0_BASE__INST6_SEG3 0
+#define NBIF0_BASE__INST6_SEG4 0
+
+#define OSSSYS_BASE__INST0_SEG0 0x000010A0
+#define OSSSYS_BASE__INST0_SEG1 0x0240A000
+#define OSSSYS_BASE__INST0_SEG2 0
+#define OSSSYS_BASE__INST0_SEG3 0
+#define OSSSYS_BASE__INST0_SEG4 0
+
+#define OSSSYS_BASE__INST1_SEG0 0
+#define OSSSYS_BASE__INST1_SEG1 0
+#define OSSSYS_BASE__INST1_SEG2 0
+#define OSSSYS_BASE__INST1_SEG3 0
+#define OSSSYS_BASE__INST1_SEG4 0
+
+#define OSSSYS_BASE__INST2_SEG0 0
+#define OSSSYS_BASE__INST2_SEG1 0
+#define OSSSYS_BASE__INST2_SEG2 0
+#define OSSSYS_BASE__INST2_SEG3 0
+#define OSSSYS_BASE__INST2_SEG4 0
+
+#define OSSSYS_BASE__INST3_SEG0 0
+#define OSSSYS_BASE__INST3_SEG1 0
+#define OSSSYS_BASE__INST3_SEG2 0
+#define OSSSYS_BASE__INST3_SEG3 0
+#define OSSSYS_BASE__INST3_SEG4 0
+
+#define OSSSYS_BASE__INST4_SEG0 0
+#define OSSSYS_BASE__INST4_SEG1 0
+#define OSSSYS_BASE__INST4_SEG2 0
+#define OSSSYS_BASE__INST4_SEG3 0
+#define OSSSYS_BASE__INST4_SEG4 0
+
+#define OSSSYS_BASE__INST5_SEG0 0
+#define OSSSYS_BASE__INST5_SEG1 0
+#define OSSSYS_BASE__INST5_SEG2 0
+#define OSSSYS_BASE__INST5_SEG3 0
+#define OSSSYS_BASE__INST5_SEG4 0
+
+#define OSSSYS_BASE__INST6_SEG0 0
+#define OSSSYS_BASE__INST6_SEG1 0
+#define OSSSYS_BASE__INST6_SEG2 0
+#define OSSSYS_BASE__INST6_SEG3 0
+#define OSSSYS_BASE__INST6_SEG4 0
+
+#define PCIE0_BASE__INST0_SEG0 0x02411800
+#define PCIE0_BASE__INST0_SEG1 0x04440000
+#define PCIE0_BASE__INST0_SEG2 0
+#define PCIE0_BASE__INST0_SEG3 0
+#define PCIE0_BASE__INST0_SEG4 0
+
+#define PCIE0_BASE__INST1_SEG0 0
+#define PCIE0_BASE__INST1_SEG1 0
+#define PCIE0_BASE__INST1_SEG2 0
+#define PCIE0_BASE__INST1_SEG3 0
+#define PCIE0_BASE__INST1_SEG4 0
+
+#define PCIE0_BASE__INST2_SEG0 0
+#define PCIE0_BASE__INST2_SEG1 0
+#define PCIE0_BASE__INST2_SEG2 0
+#define PCIE0_BASE__INST2_SEG3 0
+#define PCIE0_BASE__INST2_SEG4 0
+
+#define PCIE0_BASE__INST3_SEG0 0
+#define PCIE0_BASE__INST3_SEG1 0
+#define PCIE0_BASE__INST3_SEG2 0
+#define PCIE0_BASE__INST3_SEG3 0
+#define PCIE0_BASE__INST3_SEG4 0
+
+#define PCIE0_BASE__INST4_SEG0 0
+#define PCIE0_BASE__INST4_SEG1 0
+#define PCIE0_BASE__INST4_SEG2 0
+#define PCIE0_BASE__INST4_SEG3 0
+#define PCIE0_BASE__INST4_SEG4 0
+
+#define PCIE0_BASE__INST5_SEG0 0
+#define PCIE0_BASE__INST5_SEG1 0
+#define PCIE0_BASE__INST5_SEG2 0
+#define PCIE0_BASE__INST5_SEG3 0
+#define PCIE0_BASE__INST5_SEG4 0
+
+#define PCIE0_BASE__INST6_SEG0 0
+#define PCIE0_BASE__INST6_SEG1 0
+#define PCIE0_BASE__INST6_SEG2 0
+#define PCIE0_BASE__INST6_SEG3 0
+#define PCIE0_BASE__INST6_SEG4 0
+
+#define SDMA_BASE__INST0_SEG0 0x00001260
+#define SDMA_BASE__INST0_SEG1 0x0000A000
+#define SDMA_BASE__INST0_SEG2 0x02402C00
+#define SDMA_BASE__INST0_SEG3 0
+#define SDMA_BASE__INST0_SEG4 0
+
+#define SDMA_BASE__INST1_SEG0 0x00001260
+#define SDMA_BASE__INST1_SEG1 0x0000A000
+#define SDMA_BASE__INST1_SEG2 0x02402C00
+#define SDMA_BASE__INST1_SEG3 0
+#define SDMA_BASE__INST1_SEG4 0
+
+#define SDMA_BASE__INST2_SEG0 0
+#define SDMA_BASE__INST2_SEG1 0
+#define SDMA_BASE__INST2_SEG2 0
+#define SDMA_BASE__INST2_SEG3 0
+#define SDMA_BASE__INST2_SEG4 0
+
+#define SDMA_BASE__INST3_SEG0 0
+#define SDMA_BASE__INST3_SEG1 0
+#define SDMA_BASE__INST3_SEG2 0
+#define SDMA_BASE__INST3_SEG3 0
+#define SDMA_BASE__INST3_SEG4 0
+
+#define SDMA_BASE__INST4_SEG0 0
+#define SDMA_BASE__INST4_SEG1 0
+#define SDMA_BASE__INST4_SEG2 0
+#define SDMA_BASE__INST4_SEG3 0
+#define SDMA_BASE__INST4_SEG4 0
+
+#define SDMA_BASE__INST5_SEG0 0
+#define SDMA_BASE__INST5_SEG1 0
+#define SDMA_BASE__INST5_SEG2 0
+#define SDMA_BASE__INST5_SEG3 0
+#define SDMA_BASE__INST5_SEG4 0
+
+#define SDMA_BASE__INST6_SEG0 0
+#define SDMA_BASE__INST6_SEG1 0
+#define SDMA_BASE__INST6_SEG2 0
+#define SDMA_BASE__INST6_SEG3 0
+#define SDMA_BASE__INST6_SEG4 0
+
+#define SMUIO_BASE__INST0_SEG0 0x00016800
+#define SMUIO_BASE__INST0_SEG1 0x00016A00
+#define SMUIO_BASE__INST0_SEG2 0x00440000
+#define SMUIO_BASE__INST0_SEG3 0x02401000
+#define SMUIO_BASE__INST0_SEG4 0
+
+#define SMUIO_BASE__INST1_SEG0 0
+#define SMUIO_BASE__INST1_SEG1 0
+#define SMUIO_BASE__INST1_SEG2 0
+#define SMUIO_BASE__INST1_SEG3 0
+#define SMUIO_BASE__INST1_SEG4 0
+
+#define SMUIO_BASE__INST2_SEG0 0
+#define SMUIO_BASE__INST2_SEG1 0
+#define SMUIO_BASE__INST2_SEG2 0
+#define SMUIO_BASE__INST2_SEG3 0
+#define SMUIO_BASE__INST2_SEG4 0
+
+#define SMUIO_BASE__INST3_SEG0 0
+#define SMUIO_BASE__INST3_SEG1 0
+#define SMUIO_BASE__INST3_SEG2 0
+#define SMUIO_BASE__INST3_SEG3 0
+#define SMUIO_BASE__INST3_SEG4 0
+
+#define SMUIO_BASE__INST4_SEG0 0
+#define SMUIO_BASE__INST4_SEG1 0
+#define SMUIO_BASE__INST4_SEG2 0
+#define SMUIO_BASE__INST4_SEG3 0
+#define SMUIO_BASE__INST4_SEG4 0
+
+#define SMUIO_BASE__INST5_SEG0 0
+#define SMUIO_BASE__INST5_SEG1 0
+#define SMUIO_BASE__INST5_SEG2 0
+#define SMUIO_BASE__INST5_SEG3 0
+#define SMUIO_BASE__INST5_SEG4 0
+
+#define SMUIO_BASE__INST6_SEG0 0
+#define SMUIO_BASE__INST6_SEG1 0
+#define SMUIO_BASE__INST6_SEG2 0
+#define SMUIO_BASE__INST6_SEG3 0
+#define SMUIO_BASE__INST6_SEG4 0
+
+#define THM_BASE__INST0_SEG0 0x00016600
+#define THM_BASE__INST0_SEG1 0x02400C00
+#define THM_BASE__INST0_SEG2 0
+#define THM_BASE__INST0_SEG3 0
+#define THM_BASE__INST0_SEG4 0
+
+#define THM_BASE__INST1_SEG0 0
+#define THM_BASE__INST1_SEG1 0
+#define THM_BASE__INST1_SEG2 0
+#define THM_BASE__INST1_SEG3 0
+#define THM_BASE__INST1_SEG4 0
+
+#define THM_BASE__INST2_SEG0 0
+#define THM_BASE__INST2_SEG1 0
+#define THM_BASE__INST2_SEG2 0
+#define THM_BASE__INST2_SEG3 0
+#define THM_BASE__INST2_SEG4 0
+
+#define THM_BASE__INST3_SEG0 0
+#define THM_BASE__INST3_SEG1 0
+#define THM_BASE__INST3_SEG2 0
+#define THM_BASE__INST3_SEG3 0
+#define THM_BASE__INST3_SEG4 0
+
+#define THM_BASE__INST4_SEG0 0
+#define THM_BASE__INST4_SEG1 0
+#define THM_BASE__INST4_SEG2 0
+#define THM_BASE__INST4_SEG3 0
+#define THM_BASE__INST4_SEG4 0
+
+#define THM_BASE__INST5_SEG0 0
+#define THM_BASE__INST5_SEG1 0
+#define THM_BASE__INST5_SEG2 0
+#define THM_BASE__INST5_SEG3 0
+#define THM_BASE__INST5_SEG4 0
+
+#define THM_BASE__INST6_SEG0 0
+#define THM_BASE__INST6_SEG1 0
+#define THM_BASE__INST6_SEG2 0
+#define THM_BASE__INST6_SEG3 0
+#define THM_BASE__INST6_SEG4 0
+
+#define UMC_BASE__INST0_SEG0 0x00014000
+#define UMC_BASE__INST0_SEG1 0x02425800
+#define UMC_BASE__INST0_SEG2 0
+#define UMC_BASE__INST0_SEG3 0
+#define UMC_BASE__INST0_SEG4 0
+
+#define UMC_BASE__INST1_SEG0 0x00054000
+#define UMC_BASE__INST1_SEG1 0x02425C00
+#define UMC_BASE__INST1_SEG2 0
+#define UMC_BASE__INST1_SEG3 0
+#define UMC_BASE__INST1_SEG4 0
+
+#define UMC_BASE__INST2_SEG0 0x00094000
+#define UMC_BASE__INST2_SEG1 0x02426000
+#define UMC_BASE__INST2_SEG2 0
+#define UMC_BASE__INST2_SEG3 0
+#define UMC_BASE__INST2_SEG4 0
+
+#define UMC_BASE__INST3_SEG0 0x000D4000
+#define UMC_BASE__INST3_SEG1 0x02426400
+#define UMC_BASE__INST3_SEG2 0
+#define UMC_BASE__INST3_SEG3 0
+#define UMC_BASE__INST3_SEG4 0
+
+#define UMC_BASE__INST4_SEG0 0
+#define UMC_BASE__INST4_SEG1 0
+#define UMC_BASE__INST4_SEG2 0
+#define UMC_BASE__INST4_SEG3 0
+#define UMC_BASE__INST4_SEG4 0
+
+#define UMC_BASE__INST5_SEG0 0
+#define UMC_BASE__INST5_SEG1 0
+#define UMC_BASE__INST5_SEG2 0
+#define UMC_BASE__INST5_SEG3 0
+#define UMC_BASE__INST5_SEG4 0
+
+#define UMC_BASE__INST6_SEG0 0
+#define UMC_BASE__INST6_SEG1 0
+#define UMC_BASE__INST6_SEG2 0
+#define UMC_BASE__INST6_SEG3 0
+#define UMC_BASE__INST6_SEG4 0
+
+#define USB0_BASE__INST0_SEG0 0x0242A800
+#define USB0_BASE__INST0_SEG1 0x05B00000
+#define USB0_BASE__INST0_SEG2 0
+#define USB0_BASE__INST0_SEG3 0
+#define USB0_BASE__INST0_SEG4 0
+
+#define USB0_BASE__INST1_SEG0 0
+#define USB0_BASE__INST1_SEG1 0
+#define USB0_BASE__INST1_SEG2 0
+#define USB0_BASE__INST1_SEG3 0
+#define USB0_BASE__INST1_SEG4 0
+
+#define USB0_BASE__INST2_SEG0 0
+#define USB0_BASE__INST2_SEG1 0
+#define USB0_BASE__INST2_SEG2 0
+#define USB0_BASE__INST2_SEG3 0
+#define USB0_BASE__INST2_SEG4 0
+
+#define USB0_BASE__INST3_SEG0 0
+#define USB0_BASE__INST3_SEG1 0
+#define USB0_BASE__INST3_SEG2 0
+#define USB0_BASE__INST3_SEG3 0
+#define USB0_BASE__INST3_SEG4 0
+
+#define USB0_BASE__INST4_SEG0 0
+#define USB0_BASE__INST4_SEG1 0
+#define USB0_BASE__INST4_SEG2 0
+#define USB0_BASE__INST4_SEG3 0
+#define USB0_BASE__INST4_SEG4 0
+
+#define USB0_BASE__INST5_SEG0 0
+#define USB0_BASE__INST5_SEG1 0
+#define USB0_BASE__INST5_SEG2 0
+#define USB0_BASE__INST5_SEG3 0
+#define USB0_BASE__INST5_SEG4 0
+
+#define USB0_BASE__INST6_SEG0 0
+#define USB0_BASE__INST6_SEG1 0
+#define USB0_BASE__INST6_SEG2 0
+#define USB0_BASE__INST6_SEG3 0
+#define USB0_BASE__INST6_SEG4 0
+
+#define UVD0_BASE__INST0_SEG0 0x00007800
+#define UVD0_BASE__INST0_SEG1 0x00007E00
+#define UVD0_BASE__INST0_SEG2 0x02403000
+#define UVD0_BASE__INST0_SEG3 0
+#define UVD0_BASE__INST0_SEG4 0
+
+#define UVD0_BASE__INST1_SEG0 0
+#define UVD0_BASE__INST1_SEG1 0
+#define UVD0_BASE__INST1_SEG2 0
+#define UVD0_BASE__INST1_SEG3 0
+#define UVD0_BASE__INST1_SEG4 0
+
+#define UVD0_BASE__INST2_SEG0 0
+#define UVD0_BASE__INST2_SEG1 0
+#define UVD0_BASE__INST2_SEG2 0
+#define UVD0_BASE__INST2_SEG3 0
+#define UVD0_BASE__INST2_SEG4 0
+
+#define UVD0_BASE__INST3_SEG0 0
+#define UVD0_BASE__INST3_SEG1 0
+#define UVD0_BASE__INST3_SEG2 0
+#define UVD0_BASE__INST3_SEG3 0
+#define UVD0_BASE__INST3_SEG4 0
+
+#define UVD0_BASE__INST4_SEG0 0
+#define UVD0_BASE__INST4_SEG1 0
+#define UVD0_BASE__INST4_SEG2 0
+#define UVD0_BASE__INST4_SEG3 0
+#define UVD0_BASE__INST4_SEG4 0
+
+#define UVD0_BASE__INST5_SEG0 0
+#define UVD0_BASE__INST5_SEG1 0
+#define UVD0_BASE__INST5_SEG2 0
+#define UVD0_BASE__INST5_SEG3 0
+#define UVD0_BASE__INST5_SEG4 0
+
+#define UVD0_BASE__INST6_SEG0 0
+#define UVD0_BASE__INST6_SEG1 0
+#define UVD0_BASE__INST6_SEG2 0
+#define UVD0_BASE__INST6_SEG3 0
+#define UVD0_BASE__INST6_SEG4 0
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/navi14_ip_offset.h b/drivers/gpu/drm/amd/include/navi14_ip_offset.h
new file mode 100644
index 000000000000..ecdd9eabe0dc
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/navi14_ip_offset.h
@@ -0,0 +1,1119 @@
+/*
+ * Copyright (C) 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _navi14_ip_offset_HEADER
+#define _navi14_ip_offset_HEADER
+
+#define MAX_INSTANCE 7
+#define MAX_SEGMENT 5
+
+
+struct IP_BASE_INSTANCE
+{
+ unsigned int segment[MAX_SEGMENT];
+};
+
+struct IP_BASE
+{
+ struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
+};
+
+
+static const struct IP_BASE ATHUB_BASE ={ { { { 0x00000C00, 0x02408C00, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE CLK_BASE ={ { { { 0x00016C00, 0x02401800, 0, 0, 0 } },
+ { { 0x00016E00, 0x02401C00, 0, 0, 0 } },
+ { { 0x00017000, 0x02402000, 0, 0, 0 } },
+ { { 0x00017200, 0x02402400, 0, 0, 0 } },
+ { { 0x0001B000, 0x0242D800, 0, 0, 0 } },
+ { { 0x00017E00, 0x0240BC00, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE DF_BASE ={ { { { 0x00007000, 0x0240B800, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE DIO_BASE ={ { { { 0x02404000, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE DMU_BASE ={ { { { 0x00000012, 0x000000C0, 0x000034C0, 0x00009000, 0x02403C00 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE DPCS_BASE ={ { { { 0x00000012, 0x000000C0, 0x000034C0, 0x00009000, 0x02403C00 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE FUSE_BASE ={ { { { 0x00017400, 0x02401400, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE GC_BASE ={ { { { 0x00001260, 0x0000A000, 0x02402C00, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE HDA_BASE ={ { { { 0x004C0000, 0x02404800, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE HDP_BASE ={ { { { 0x00000F20, 0x0240A400, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE MMHUB_BASE ={ { { { 0x0001A000, 0x02408800, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE MP0_BASE ={ { { { 0x00016000, 0x00DC0000, 0x00E00000, 0x00E40000, 0x0243FC00 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE MP1_BASE ={ { { { 0x00016000, 0x00DC0000, 0x00E00000, 0x00E40000, 0x0243FC00 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE NBIF0_BASE ={ { { { 0x00000000, 0x00000014, 0x00000D20, 0x00010400, 0x0241B000 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE OSSSYS_BASE ={ { { { 0x000010A0, 0x0240A000, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE PCIE0_BASE ={ { { { 0x00000000, 0x00000014, 0x00000D20, 0x00010400, 0x0241B000 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE SDMA_BASE ={ { { { 0x00001260, 0x0000A000, 0x02402C00, 0, 0 } },
+ { { 0x00001260, 0x0000A000, 0x02402C00, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE SMUIO_BASE ={ { { { 0x00016800, 0x00016A00, 0x00440000, 0x02401000, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE THM_BASE ={ { { { 0x00016600, 0x02400C00, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE UMC_BASE ={ { { { 0x00014000, 0x02425800, 0, 0, 0 } },
+ { { 0x00054000, 0x02425C00, 0, 0, 0 } },
+ { { 0x00094000, 0x02426000, 0, 0, 0 } },
+ { { 0x000D4000, 0x02426400, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE USB0_BASE ={ { { { 0x0242A800, 0x05B00000, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE UVD0_BASE ={ { { { 0x00007800, 0x00007E00, 0x02403000, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+
+
+#define ATHUB_BASE__INST0_SEG0 0x00000C00
+#define ATHUB_BASE__INST0_SEG1 0x02408C00
+#define ATHUB_BASE__INST0_SEG2 0
+#define ATHUB_BASE__INST0_SEG3 0
+#define ATHUB_BASE__INST0_SEG4 0
+
+#define ATHUB_BASE__INST1_SEG0 0
+#define ATHUB_BASE__INST1_SEG1 0
+#define ATHUB_BASE__INST1_SEG2 0
+#define ATHUB_BASE__INST1_SEG3 0
+#define ATHUB_BASE__INST1_SEG4 0
+
+#define ATHUB_BASE__INST2_SEG0 0
+#define ATHUB_BASE__INST2_SEG1 0
+#define ATHUB_BASE__INST2_SEG2 0
+#define ATHUB_BASE__INST2_SEG3 0
+#define ATHUB_BASE__INST2_SEG4 0
+
+#define ATHUB_BASE__INST3_SEG0 0
+#define ATHUB_BASE__INST3_SEG1 0
+#define ATHUB_BASE__INST3_SEG2 0
+#define ATHUB_BASE__INST3_SEG3 0
+#define ATHUB_BASE__INST3_SEG4 0
+
+#define ATHUB_BASE__INST4_SEG0 0
+#define ATHUB_BASE__INST4_SEG1 0
+#define ATHUB_BASE__INST4_SEG2 0
+#define ATHUB_BASE__INST4_SEG3 0
+#define ATHUB_BASE__INST4_SEG4 0
+
+#define ATHUB_BASE__INST5_SEG0 0
+#define ATHUB_BASE__INST5_SEG1 0
+#define ATHUB_BASE__INST5_SEG2 0
+#define ATHUB_BASE__INST5_SEG3 0
+#define ATHUB_BASE__INST5_SEG4 0
+
+#define ATHUB_BASE__INST6_SEG0 0
+#define ATHUB_BASE__INST6_SEG1 0
+#define ATHUB_BASE__INST6_SEG2 0
+#define ATHUB_BASE__INST6_SEG3 0
+#define ATHUB_BASE__INST6_SEG4 0
+
+#define CLK_BASE__INST0_SEG0 0x00016C00
+#define CLK_BASE__INST0_SEG1 0x02401800
+#define CLK_BASE__INST0_SEG2 0
+#define CLK_BASE__INST0_SEG3 0
+#define CLK_BASE__INST0_SEG4 0
+
+#define CLK_BASE__INST1_SEG0 0x00016E00
+#define CLK_BASE__INST1_SEG1 0x02401C00
+#define CLK_BASE__INST1_SEG2 0
+#define CLK_BASE__INST1_SEG3 0
+#define CLK_BASE__INST1_SEG4 0
+
+#define CLK_BASE__INST2_SEG0 0x00017000
+#define CLK_BASE__INST2_SEG1 0x02402000
+#define CLK_BASE__INST2_SEG2 0
+#define CLK_BASE__INST2_SEG3 0
+#define CLK_BASE__INST2_SEG4 0
+
+#define CLK_BASE__INST3_SEG0 0x00017200
+#define CLK_BASE__INST3_SEG1 0x02402400
+#define CLK_BASE__INST3_SEG2 0
+#define CLK_BASE__INST3_SEG3 0
+#define CLK_BASE__INST3_SEG4 0
+
+#define CLK_BASE__INST4_SEG0 0x0001B000
+#define CLK_BASE__INST4_SEG1 0x0242D800
+#define CLK_BASE__INST4_SEG2 0
+#define CLK_BASE__INST4_SEG3 0
+#define CLK_BASE__INST4_SEG4 0
+
+#define CLK_BASE__INST5_SEG0 0x00017E00
+#define CLK_BASE__INST5_SEG1 0x0240BC00
+#define CLK_BASE__INST5_SEG2 0
+#define CLK_BASE__INST5_SEG3 0
+#define CLK_BASE__INST5_SEG4 0
+
+#define CLK_BASE__INST6_SEG0 0
+#define CLK_BASE__INST6_SEG1 0
+#define CLK_BASE__INST6_SEG2 0
+#define CLK_BASE__INST6_SEG3 0
+#define CLK_BASE__INST6_SEG4 0
+
+#define DF_BASE__INST0_SEG0 0x00007000
+#define DF_BASE__INST0_SEG1 0x0240B800
+#define DF_BASE__INST0_SEG2 0
+#define DF_BASE__INST0_SEG3 0
+#define DF_BASE__INST0_SEG4 0
+
+#define DF_BASE__INST1_SEG0 0
+#define DF_BASE__INST1_SEG1 0
+#define DF_BASE__INST1_SEG2 0
+#define DF_BASE__INST1_SEG3 0
+#define DF_BASE__INST1_SEG4 0
+
+#define DF_BASE__INST2_SEG0 0
+#define DF_BASE__INST2_SEG1 0
+#define DF_BASE__INST2_SEG2 0
+#define DF_BASE__INST2_SEG3 0
+#define DF_BASE__INST2_SEG4 0
+
+#define DF_BASE__INST3_SEG0 0
+#define DF_BASE__INST3_SEG1 0
+#define DF_BASE__INST3_SEG2 0
+#define DF_BASE__INST3_SEG3 0
+#define DF_BASE__INST3_SEG4 0
+
+#define DF_BASE__INST4_SEG0 0
+#define DF_BASE__INST4_SEG1 0
+#define DF_BASE__INST4_SEG2 0
+#define DF_BASE__INST4_SEG3 0
+#define DF_BASE__INST4_SEG4 0
+
+#define DF_BASE__INST5_SEG0 0
+#define DF_BASE__INST5_SEG1 0
+#define DF_BASE__INST5_SEG2 0
+#define DF_BASE__INST5_SEG3 0
+#define DF_BASE__INST5_SEG4 0
+
+#define DF_BASE__INST6_SEG0 0
+#define DF_BASE__INST6_SEG1 0
+#define DF_BASE__INST6_SEG2 0
+#define DF_BASE__INST6_SEG3 0
+#define DF_BASE__INST6_SEG4 0
+
+#define DIO_BASE__INST0_SEG0 0x02404000
+#define DIO_BASE__INST0_SEG1 0
+#define DIO_BASE__INST0_SEG2 0
+#define DIO_BASE__INST0_SEG3 0
+#define DIO_BASE__INST0_SEG4 0
+
+#define DIO_BASE__INST1_SEG0 0
+#define DIO_BASE__INST1_SEG1 0
+#define DIO_BASE__INST1_SEG2 0
+#define DIO_BASE__INST1_SEG3 0
+#define DIO_BASE__INST1_SEG4 0
+
+#define DIO_BASE__INST2_SEG0 0
+#define DIO_BASE__INST2_SEG1 0
+#define DIO_BASE__INST2_SEG2 0
+#define DIO_BASE__INST2_SEG3 0
+#define DIO_BASE__INST2_SEG4 0
+
+#define DIO_BASE__INST3_SEG0 0
+#define DIO_BASE__INST3_SEG1 0
+#define DIO_BASE__INST3_SEG2 0
+#define DIO_BASE__INST3_SEG3 0
+#define DIO_BASE__INST3_SEG4 0
+
+#define DIO_BASE__INST4_SEG0 0
+#define DIO_BASE__INST4_SEG1 0
+#define DIO_BASE__INST4_SEG2 0
+#define DIO_BASE__INST4_SEG3 0
+#define DIO_BASE__INST4_SEG4 0
+
+#define DIO_BASE__INST5_SEG0 0
+#define DIO_BASE__INST5_SEG1 0
+#define DIO_BASE__INST5_SEG2 0
+#define DIO_BASE__INST5_SEG3 0
+#define DIO_BASE__INST5_SEG4 0
+
+#define DIO_BASE__INST6_SEG0 0
+#define DIO_BASE__INST6_SEG1 0
+#define DIO_BASE__INST6_SEG2 0
+#define DIO_BASE__INST6_SEG3 0
+#define DIO_BASE__INST6_SEG4 0
+
+#define DMU_BASE__INST0_SEG0 0x00000012
+#define DMU_BASE__INST0_SEG1 0x000000C0
+#define DMU_BASE__INST0_SEG2 0x000034C0
+#define DMU_BASE__INST0_SEG3 0x00009000
+#define DMU_BASE__INST0_SEG4 0x02403C00
+
+#define DMU_BASE__INST1_SEG0 0
+#define DMU_BASE__INST1_SEG1 0
+#define DMU_BASE__INST1_SEG2 0
+#define DMU_BASE__INST1_SEG3 0
+#define DMU_BASE__INST1_SEG4 0
+
+#define DMU_BASE__INST2_SEG0 0
+#define DMU_BASE__INST2_SEG1 0
+#define DMU_BASE__INST2_SEG2 0
+#define DMU_BASE__INST2_SEG3 0
+#define DMU_BASE__INST2_SEG4 0
+
+#define DMU_BASE__INST3_SEG0 0
+#define DMU_BASE__INST3_SEG1 0
+#define DMU_BASE__INST3_SEG2 0
+#define DMU_BASE__INST3_SEG3 0
+#define DMU_BASE__INST3_SEG4 0
+
+#define DMU_BASE__INST4_SEG0 0
+#define DMU_BASE__INST4_SEG1 0
+#define DMU_BASE__INST4_SEG2 0
+#define DMU_BASE__INST4_SEG3 0
+#define DMU_BASE__INST4_SEG4 0
+
+#define DMU_BASE__INST5_SEG0 0
+#define DMU_BASE__INST5_SEG1 0
+#define DMU_BASE__INST5_SEG2 0
+#define DMU_BASE__INST5_SEG3 0
+#define DMU_BASE__INST5_SEG4 0
+
+#define DMU_BASE__INST6_SEG0 0
+#define DMU_BASE__INST6_SEG1 0
+#define DMU_BASE__INST6_SEG2 0
+#define DMU_BASE__INST6_SEG3 0
+#define DMU_BASE__INST6_SEG4 0
+
+#define DPCS_BASE__INST0_SEG0 0x00000012
+#define DPCS_BASE__INST0_SEG1 0x000000C0
+#define DPCS_BASE__INST0_SEG2 0x000034C0
+#define DPCS_BASE__INST0_SEG3 0x00009000
+#define DPCS_BASE__INST0_SEG4 0x02403C00
+
+#define DPCS_BASE__INST1_SEG0 0
+#define DPCS_BASE__INST1_SEG1 0
+#define DPCS_BASE__INST1_SEG2 0
+#define DPCS_BASE__INST1_SEG3 0
+#define DPCS_BASE__INST1_SEG4 0
+
+#define DPCS_BASE__INST2_SEG0 0
+#define DPCS_BASE__INST2_SEG1 0
+#define DPCS_BASE__INST2_SEG2 0
+#define DPCS_BASE__INST2_SEG3 0
+#define DPCS_BASE__INST2_SEG4 0
+
+#define DPCS_BASE__INST3_SEG0 0
+#define DPCS_BASE__INST3_SEG1 0
+#define DPCS_BASE__INST3_SEG2 0
+#define DPCS_BASE__INST3_SEG3 0
+#define DPCS_BASE__INST3_SEG4 0
+
+#define DPCS_BASE__INST4_SEG0 0
+#define DPCS_BASE__INST4_SEG1 0
+#define DPCS_BASE__INST4_SEG2 0
+#define DPCS_BASE__INST4_SEG3 0
+#define DPCS_BASE__INST4_SEG4 0
+
+#define DPCS_BASE__INST5_SEG0 0
+#define DPCS_BASE__INST5_SEG1 0
+#define DPCS_BASE__INST5_SEG2 0
+#define DPCS_BASE__INST5_SEG3 0
+#define DPCS_BASE__INST5_SEG4 0
+
+#define DPCS_BASE__INST6_SEG0 0
+#define DPCS_BASE__INST6_SEG1 0
+#define DPCS_BASE__INST6_SEG2 0
+#define DPCS_BASE__INST6_SEG3 0
+#define DPCS_BASE__INST6_SEG4 0
+
+#define FUSE_BASE__INST0_SEG0 0x00017400
+#define FUSE_BASE__INST0_SEG1 0x02401400
+#define FUSE_BASE__INST0_SEG2 0
+#define FUSE_BASE__INST0_SEG3 0
+#define FUSE_BASE__INST0_SEG4 0
+
+#define FUSE_BASE__INST1_SEG0 0
+#define FUSE_BASE__INST1_SEG1 0
+#define FUSE_BASE__INST1_SEG2 0
+#define FUSE_BASE__INST1_SEG3 0
+#define FUSE_BASE__INST1_SEG4 0
+
+#define FUSE_BASE__INST2_SEG0 0
+#define FUSE_BASE__INST2_SEG1 0
+#define FUSE_BASE__INST2_SEG2 0
+#define FUSE_BASE__INST2_SEG3 0
+#define FUSE_BASE__INST2_SEG4 0
+
+#define FUSE_BASE__INST3_SEG0 0
+#define FUSE_BASE__INST3_SEG1 0
+#define FUSE_BASE__INST3_SEG2 0
+#define FUSE_BASE__INST3_SEG3 0
+#define FUSE_BASE__INST3_SEG4 0
+
+#define FUSE_BASE__INST4_SEG0 0
+#define FUSE_BASE__INST4_SEG1 0
+#define FUSE_BASE__INST4_SEG2 0
+#define FUSE_BASE__INST4_SEG3 0
+#define FUSE_BASE__INST4_SEG4 0
+
+#define FUSE_BASE__INST5_SEG0 0
+#define FUSE_BASE__INST5_SEG1 0
+#define FUSE_BASE__INST5_SEG2 0
+#define FUSE_BASE__INST5_SEG3 0
+#define FUSE_BASE__INST5_SEG4 0
+
+#define FUSE_BASE__INST6_SEG0 0
+#define FUSE_BASE__INST6_SEG1 0
+#define FUSE_BASE__INST6_SEG2 0
+#define FUSE_BASE__INST6_SEG3 0
+#define FUSE_BASE__INST6_SEG4 0
+
+#define GC_BASE__INST0_SEG0 0x00001260
+#define GC_BASE__INST0_SEG1 0x0000A000
+#define GC_BASE__INST0_SEG2 0x02402C00
+#define GC_BASE__INST0_SEG3 0
+#define GC_BASE__INST0_SEG4 0
+
+#define GC_BASE__INST1_SEG0 0
+#define GC_BASE__INST1_SEG1 0
+#define GC_BASE__INST1_SEG2 0
+#define GC_BASE__INST1_SEG3 0
+#define GC_BASE__INST1_SEG4 0
+
+#define GC_BASE__INST2_SEG0 0
+#define GC_BASE__INST2_SEG1 0
+#define GC_BASE__INST2_SEG2 0
+#define GC_BASE__INST2_SEG3 0
+#define GC_BASE__INST2_SEG4 0
+
+#define GC_BASE__INST3_SEG0 0
+#define GC_BASE__INST3_SEG1 0
+#define GC_BASE__INST3_SEG2 0
+#define GC_BASE__INST3_SEG3 0
+#define GC_BASE__INST3_SEG4 0
+
+#define GC_BASE__INST4_SEG0 0
+#define GC_BASE__INST4_SEG1 0
+#define GC_BASE__INST4_SEG2 0
+#define GC_BASE__INST4_SEG3 0
+#define GC_BASE__INST4_SEG4 0
+
+#define GC_BASE__INST5_SEG0 0
+#define GC_BASE__INST5_SEG1 0
+#define GC_BASE__INST5_SEG2 0
+#define GC_BASE__INST5_SEG3 0
+#define GC_BASE__INST5_SEG4 0
+
+#define GC_BASE__INST6_SEG0 0
+#define GC_BASE__INST6_SEG1 0
+#define GC_BASE__INST6_SEG2 0
+#define GC_BASE__INST6_SEG3 0
+#define GC_BASE__INST6_SEG4 0
+
+#define HDA_BASE__INST0_SEG0 0x004C0000
+#define HDA_BASE__INST0_SEG1 0x02404800
+#define HDA_BASE__INST0_SEG2 0
+#define HDA_BASE__INST0_SEG3 0
+#define HDA_BASE__INST0_SEG4 0
+
+#define HDA_BASE__INST1_SEG0 0
+#define HDA_BASE__INST1_SEG1 0
+#define HDA_BASE__INST1_SEG2 0
+#define HDA_BASE__INST1_SEG3 0
+#define HDA_BASE__INST1_SEG4 0
+
+#define HDA_BASE__INST2_SEG0 0
+#define HDA_BASE__INST2_SEG1 0
+#define HDA_BASE__INST2_SEG2 0
+#define HDA_BASE__INST2_SEG3 0
+#define HDA_BASE__INST2_SEG4 0
+
+#define HDA_BASE__INST3_SEG0 0
+#define HDA_BASE__INST3_SEG1 0
+#define HDA_BASE__INST3_SEG2 0
+#define HDA_BASE__INST3_SEG3 0
+#define HDA_BASE__INST3_SEG4 0
+
+#define HDA_BASE__INST4_SEG0 0
+#define HDA_BASE__INST4_SEG1 0
+#define HDA_BASE__INST4_SEG2 0
+#define HDA_BASE__INST4_SEG3 0
+#define HDA_BASE__INST4_SEG4 0
+
+#define HDA_BASE__INST5_SEG0 0
+#define HDA_BASE__INST5_SEG1 0
+#define HDA_BASE__INST5_SEG2 0
+#define HDA_BASE__INST5_SEG3 0
+#define HDA_BASE__INST5_SEG4 0
+
+#define HDA_BASE__INST6_SEG0 0
+#define HDA_BASE__INST6_SEG1 0
+#define HDA_BASE__INST6_SEG2 0
+#define HDA_BASE__INST6_SEG3 0
+#define HDA_BASE__INST6_SEG4 0
+
+#define HDP_BASE__INST0_SEG0 0x00000F20
+#define HDP_BASE__INST0_SEG1 0x0240A400
+#define HDP_BASE__INST0_SEG2 0
+#define HDP_BASE__INST0_SEG3 0
+#define HDP_BASE__INST0_SEG4 0
+
+#define HDP_BASE__INST1_SEG0 0
+#define HDP_BASE__INST1_SEG1 0
+#define HDP_BASE__INST1_SEG2 0
+#define HDP_BASE__INST1_SEG3 0
+#define HDP_BASE__INST1_SEG4 0
+
+#define HDP_BASE__INST2_SEG0 0
+#define HDP_BASE__INST2_SEG1 0
+#define HDP_BASE__INST2_SEG2 0
+#define HDP_BASE__INST2_SEG3 0
+#define HDP_BASE__INST2_SEG4 0
+
+#define HDP_BASE__INST3_SEG0 0
+#define HDP_BASE__INST3_SEG1 0
+#define HDP_BASE__INST3_SEG2 0
+#define HDP_BASE__INST3_SEG3 0
+#define HDP_BASE__INST3_SEG4 0
+
+#define HDP_BASE__INST4_SEG0 0
+#define HDP_BASE__INST4_SEG1 0
+#define HDP_BASE__INST4_SEG2 0
+#define HDP_BASE__INST4_SEG3 0
+#define HDP_BASE__INST4_SEG4 0
+
+#define HDP_BASE__INST5_SEG0 0
+#define HDP_BASE__INST5_SEG1 0
+#define HDP_BASE__INST5_SEG2 0
+#define HDP_BASE__INST5_SEG3 0
+#define HDP_BASE__INST5_SEG4 0
+
+#define HDP_BASE__INST6_SEG0 0
+#define HDP_BASE__INST6_SEG1 0
+#define HDP_BASE__INST6_SEG2 0
+#define HDP_BASE__INST6_SEG3 0
+#define HDP_BASE__INST6_SEG4 0
+
+#define MMHUB_BASE__INST0_SEG0 0x0001A000
+#define MMHUB_BASE__INST0_SEG1 0x02408800
+#define MMHUB_BASE__INST0_SEG2 0
+#define MMHUB_BASE__INST0_SEG3 0
+#define MMHUB_BASE__INST0_SEG4 0
+
+#define MMHUB_BASE__INST1_SEG0 0
+#define MMHUB_BASE__INST1_SEG1 0
+#define MMHUB_BASE__INST1_SEG2 0
+#define MMHUB_BASE__INST1_SEG3 0
+#define MMHUB_BASE__INST1_SEG4 0
+
+#define MMHUB_BASE__INST2_SEG0 0
+#define MMHUB_BASE__INST2_SEG1 0
+#define MMHUB_BASE__INST2_SEG2 0
+#define MMHUB_BASE__INST2_SEG3 0
+#define MMHUB_BASE__INST2_SEG4 0
+
+#define MMHUB_BASE__INST3_SEG0 0
+#define MMHUB_BASE__INST3_SEG1 0
+#define MMHUB_BASE__INST3_SEG2 0
+#define MMHUB_BASE__INST3_SEG3 0
+#define MMHUB_BASE__INST3_SEG4 0
+
+#define MMHUB_BASE__INST4_SEG0 0
+#define MMHUB_BASE__INST4_SEG1 0
+#define MMHUB_BASE__INST4_SEG2 0
+#define MMHUB_BASE__INST4_SEG3 0
+#define MMHUB_BASE__INST4_SEG4 0
+
+#define MMHUB_BASE__INST5_SEG0 0
+#define MMHUB_BASE__INST5_SEG1 0
+#define MMHUB_BASE__INST5_SEG2 0
+#define MMHUB_BASE__INST5_SEG3 0
+#define MMHUB_BASE__INST5_SEG4 0
+
+#define MMHUB_BASE__INST6_SEG0 0
+#define MMHUB_BASE__INST6_SEG1 0
+#define MMHUB_BASE__INST6_SEG2 0
+#define MMHUB_BASE__INST6_SEG3 0
+#define MMHUB_BASE__INST6_SEG4 0
+
+#define MP0_BASE__INST0_SEG0 0x00016000
+#define MP0_BASE__INST0_SEG1 0x00DC0000
+#define MP0_BASE__INST0_SEG2 0x00E00000
+#define MP0_BASE__INST0_SEG3 0x00E40000
+#define MP0_BASE__INST0_SEG4 0x0243FC00
+
+#define MP0_BASE__INST1_SEG0 0
+#define MP0_BASE__INST1_SEG1 0
+#define MP0_BASE__INST1_SEG2 0
+#define MP0_BASE__INST1_SEG3 0
+#define MP0_BASE__INST1_SEG4 0
+
+#define MP0_BASE__INST2_SEG0 0
+#define MP0_BASE__INST2_SEG1 0
+#define MP0_BASE__INST2_SEG2 0
+#define MP0_BASE__INST2_SEG3 0
+#define MP0_BASE__INST2_SEG4 0
+
+#define MP0_BASE__INST3_SEG0 0
+#define MP0_BASE__INST3_SEG1 0
+#define MP0_BASE__INST3_SEG2 0
+#define MP0_BASE__INST3_SEG3 0
+#define MP0_BASE__INST3_SEG4 0
+
+#define MP0_BASE__INST4_SEG0 0
+#define MP0_BASE__INST4_SEG1 0
+#define MP0_BASE__INST4_SEG2 0
+#define MP0_BASE__INST4_SEG3 0
+#define MP0_BASE__INST4_SEG4 0
+
+#define MP0_BASE__INST5_SEG0 0
+#define MP0_BASE__INST5_SEG1 0
+#define MP0_BASE__INST5_SEG2 0
+#define MP0_BASE__INST5_SEG3 0
+#define MP0_BASE__INST5_SEG4 0
+
+#define MP0_BASE__INST6_SEG0 0
+#define MP0_BASE__INST6_SEG1 0
+#define MP0_BASE__INST6_SEG2 0
+#define MP0_BASE__INST6_SEG3 0
+#define MP0_BASE__INST6_SEG4 0
+
+#define MP1_BASE__INST0_SEG0 0x00016000
+#define MP1_BASE__INST0_SEG1 0x00DC0000
+#define MP1_BASE__INST0_SEG2 0x00E00000
+#define MP1_BASE__INST0_SEG3 0x00E40000
+#define MP1_BASE__INST0_SEG4 0x0243FC00
+
+#define MP1_BASE__INST1_SEG0 0
+#define MP1_BASE__INST1_SEG1 0
+#define MP1_BASE__INST1_SEG2 0
+#define MP1_BASE__INST1_SEG3 0
+#define MP1_BASE__INST1_SEG4 0
+
+#define MP1_BASE__INST2_SEG0 0
+#define MP1_BASE__INST2_SEG1 0
+#define MP1_BASE__INST2_SEG2 0
+#define MP1_BASE__INST2_SEG3 0
+#define MP1_BASE__INST2_SEG4 0
+
+#define MP1_BASE__INST3_SEG0 0
+#define MP1_BASE__INST3_SEG1 0
+#define MP1_BASE__INST3_SEG2 0
+#define MP1_BASE__INST3_SEG3 0
+#define MP1_BASE__INST3_SEG4 0
+
+#define MP1_BASE__INST4_SEG0 0
+#define MP1_BASE__INST4_SEG1 0
+#define MP1_BASE__INST4_SEG2 0
+#define MP1_BASE__INST4_SEG3 0
+#define MP1_BASE__INST4_SEG4 0
+
+#define MP1_BASE__INST5_SEG0 0
+#define MP1_BASE__INST5_SEG1 0
+#define MP1_BASE__INST5_SEG2 0
+#define MP1_BASE__INST5_SEG3 0
+#define MP1_BASE__INST5_SEG4 0
+
+#define MP1_BASE__INST6_SEG0 0
+#define MP1_BASE__INST6_SEG1 0
+#define MP1_BASE__INST6_SEG2 0
+#define MP1_BASE__INST6_SEG3 0
+#define MP1_BASE__INST6_SEG4 0
+
+#define NBIF0_BASE__INST0_SEG0 0x00000000
+#define NBIF0_BASE__INST0_SEG1 0x00000014
+#define NBIF0_BASE__INST0_SEG2 0x00000D20
+#define NBIF0_BASE__INST0_SEG3 0x00010400
+#define NBIF0_BASE__INST0_SEG4 0x0241B000
+
+#define NBIF0_BASE__INST1_SEG0 0
+#define NBIF0_BASE__INST1_SEG1 0
+#define NBIF0_BASE__INST1_SEG2 0
+#define NBIF0_BASE__INST1_SEG3 0
+#define NBIF0_BASE__INST1_SEG4 0
+
+#define NBIF0_BASE__INST2_SEG0 0
+#define NBIF0_BASE__INST2_SEG1 0
+#define NBIF0_BASE__INST2_SEG2 0
+#define NBIF0_BASE__INST2_SEG3 0
+#define NBIF0_BASE__INST2_SEG4 0
+
+#define NBIF0_BASE__INST3_SEG0 0
+#define NBIF0_BASE__INST3_SEG1 0
+#define NBIF0_BASE__INST3_SEG2 0
+#define NBIF0_BASE__INST3_SEG3 0
+#define NBIF0_BASE__INST3_SEG4 0
+
+#define NBIF0_BASE__INST4_SEG0 0
+#define NBIF0_BASE__INST4_SEG1 0
+#define NBIF0_BASE__INST4_SEG2 0
+#define NBIF0_BASE__INST4_SEG3 0
+#define NBIF0_BASE__INST4_SEG4 0
+
+#define NBIF0_BASE__INST5_SEG0 0
+#define NBIF0_BASE__INST5_SEG1 0
+#define NBIF0_BASE__INST5_SEG2 0
+#define NBIF0_BASE__INST5_SEG3 0
+#define NBIF0_BASE__INST5_SEG4 0
+
+#define NBIF0_BASE__INST6_SEG0 0
+#define NBIF0_BASE__INST6_SEG1 0
+#define NBIF0_BASE__INST6_SEG2 0
+#define NBIF0_BASE__INST6_SEG3 0
+#define NBIF0_BASE__INST6_SEG4 0
+
+#define OSSSYS_BASE__INST0_SEG0 0x000010A0
+#define OSSSYS_BASE__INST0_SEG1 0x0240A000
+#define OSSSYS_BASE__INST0_SEG2 0
+#define OSSSYS_BASE__INST0_SEG3 0
+#define OSSSYS_BASE__INST0_SEG4 0
+
+#define OSSSYS_BASE__INST1_SEG0 0
+#define OSSSYS_BASE__INST1_SEG1 0
+#define OSSSYS_BASE__INST1_SEG2 0
+#define OSSSYS_BASE__INST1_SEG3 0
+#define OSSSYS_BASE__INST1_SEG4 0
+
+#define OSSSYS_BASE__INST2_SEG0 0
+#define OSSSYS_BASE__INST2_SEG1 0
+#define OSSSYS_BASE__INST2_SEG2 0
+#define OSSSYS_BASE__INST2_SEG3 0
+#define OSSSYS_BASE__INST2_SEG4 0
+
+#define OSSSYS_BASE__INST3_SEG0 0
+#define OSSSYS_BASE__INST3_SEG1 0
+#define OSSSYS_BASE__INST3_SEG2 0
+#define OSSSYS_BASE__INST3_SEG3 0
+#define OSSSYS_BASE__INST3_SEG4 0
+
+#define OSSSYS_BASE__INST4_SEG0 0
+#define OSSSYS_BASE__INST4_SEG1 0
+#define OSSSYS_BASE__INST4_SEG2 0
+#define OSSSYS_BASE__INST4_SEG3 0
+#define OSSSYS_BASE__INST4_SEG4 0
+
+#define OSSSYS_BASE__INST5_SEG0 0
+#define OSSSYS_BASE__INST5_SEG1 0
+#define OSSSYS_BASE__INST5_SEG2 0
+#define OSSSYS_BASE__INST5_SEG3 0
+#define OSSSYS_BASE__INST5_SEG4 0
+
+#define OSSSYS_BASE__INST6_SEG0 0
+#define OSSSYS_BASE__INST6_SEG1 0
+#define OSSSYS_BASE__INST6_SEG2 0
+#define OSSSYS_BASE__INST6_SEG3 0
+#define OSSSYS_BASE__INST6_SEG4 0
+
+#define PCIE0_BASE__INST0_SEG0 0x00000000
+#define PCIE0_BASE__INST0_SEG1 0x00000014
+#define PCIE0_BASE__INST0_SEG2 0x00000D20
+#define PCIE0_BASE__INST0_SEG3 0x00010400
+#define PCIE0_BASE__INST0_SEG4 0x0241B000
+
+#define PCIE0_BASE__INST1_SEG0 0
+#define PCIE0_BASE__INST1_SEG1 0
+#define PCIE0_BASE__INST1_SEG2 0
+#define PCIE0_BASE__INST1_SEG3 0
+#define PCIE0_BASE__INST1_SEG4 0
+
+#define PCIE0_BASE__INST2_SEG0 0
+#define PCIE0_BASE__INST2_SEG1 0
+#define PCIE0_BASE__INST2_SEG2 0
+#define PCIE0_BASE__INST2_SEG3 0
+#define PCIE0_BASE__INST2_SEG4 0
+
+#define PCIE0_BASE__INST3_SEG0 0
+#define PCIE0_BASE__INST3_SEG1 0
+#define PCIE0_BASE__INST3_SEG2 0
+#define PCIE0_BASE__INST3_SEG3 0
+#define PCIE0_BASE__INST3_SEG4 0
+
+#define PCIE0_BASE__INST4_SEG0 0
+#define PCIE0_BASE__INST4_SEG1 0
+#define PCIE0_BASE__INST4_SEG2 0
+#define PCIE0_BASE__INST4_SEG3 0
+#define PCIE0_BASE__INST4_SEG4 0
+
+#define PCIE0_BASE__INST5_SEG0 0
+#define PCIE0_BASE__INST5_SEG1 0
+#define PCIE0_BASE__INST5_SEG2 0
+#define PCIE0_BASE__INST5_SEG3 0
+#define PCIE0_BASE__INST5_SEG4 0
+
+#define PCIE0_BASE__INST6_SEG0 0
+#define PCIE0_BASE__INST6_SEG1 0
+#define PCIE0_BASE__INST6_SEG2 0
+#define PCIE0_BASE__INST6_SEG3 0
+#define PCIE0_BASE__INST6_SEG4 0
+
+#define SDMA_BASE__INST0_SEG0 0x00001260
+#define SDMA_BASE__INST0_SEG1 0x0000A000
+#define SDMA_BASE__INST0_SEG2 0x02402C00
+#define SDMA_BASE__INST0_SEG3 0
+#define SDMA_BASE__INST0_SEG4 0
+
+#define SDMA_BASE__INST1_SEG0 0x00001260
+#define SDMA_BASE__INST1_SEG1 0x0000A000
+#define SDMA_BASE__INST1_SEG2 0x02402C00
+#define SDMA_BASE__INST1_SEG3 0
+#define SDMA_BASE__INST1_SEG4 0
+
+#define SDMA_BASE__INST2_SEG0 0
+#define SDMA_BASE__INST2_SEG1 0
+#define SDMA_BASE__INST2_SEG2 0
+#define SDMA_BASE__INST2_SEG3 0
+#define SDMA_BASE__INST2_SEG4 0
+
+#define SDMA_BASE__INST3_SEG0 0
+#define SDMA_BASE__INST3_SEG1 0
+#define SDMA_BASE__INST3_SEG2 0
+#define SDMA_BASE__INST3_SEG3 0
+#define SDMA_BASE__INST3_SEG4 0
+
+#define SDMA_BASE__INST4_SEG0 0
+#define SDMA_BASE__INST4_SEG1 0
+#define SDMA_BASE__INST4_SEG2 0
+#define SDMA_BASE__INST4_SEG3 0
+#define SDMA_BASE__INST4_SEG4 0
+
+#define SDMA_BASE__INST5_SEG0 0
+#define SDMA_BASE__INST5_SEG1 0
+#define SDMA_BASE__INST5_SEG2 0
+#define SDMA_BASE__INST5_SEG3 0
+#define SDMA_BASE__INST5_SEG4 0
+
+#define SDMA_BASE__INST6_SEG0 0
+#define SDMA_BASE__INST6_SEG1 0
+#define SDMA_BASE__INST6_SEG2 0
+#define SDMA_BASE__INST6_SEG3 0
+#define SDMA_BASE__INST6_SEG4 0
+
+#define SMUIO_BASE__INST0_SEG0 0x00016800
+#define SMUIO_BASE__INST0_SEG1 0x00016A00
+#define SMUIO_BASE__INST0_SEG2 0x00440000
+#define SMUIO_BASE__INST0_SEG3 0x02401000
+#define SMUIO_BASE__INST0_SEG4 0
+
+#define SMUIO_BASE__INST1_SEG0 0
+#define SMUIO_BASE__INST1_SEG1 0
+#define SMUIO_BASE__INST1_SEG2 0
+#define SMUIO_BASE__INST1_SEG3 0
+#define SMUIO_BASE__INST1_SEG4 0
+
+#define SMUIO_BASE__INST2_SEG0 0
+#define SMUIO_BASE__INST2_SEG1 0
+#define SMUIO_BASE__INST2_SEG2 0
+#define SMUIO_BASE__INST2_SEG3 0
+#define SMUIO_BASE__INST2_SEG4 0
+
+#define SMUIO_BASE__INST3_SEG0 0
+#define SMUIO_BASE__INST3_SEG1 0
+#define SMUIO_BASE__INST3_SEG2 0
+#define SMUIO_BASE__INST3_SEG3 0
+#define SMUIO_BASE__INST3_SEG4 0
+
+#define SMUIO_BASE__INST4_SEG0 0
+#define SMUIO_BASE__INST4_SEG1 0
+#define SMUIO_BASE__INST4_SEG2 0
+#define SMUIO_BASE__INST4_SEG3 0
+#define SMUIO_BASE__INST4_SEG4 0
+
+#define SMUIO_BASE__INST5_SEG0 0
+#define SMUIO_BASE__INST5_SEG1 0
+#define SMUIO_BASE__INST5_SEG2 0
+#define SMUIO_BASE__INST5_SEG3 0
+#define SMUIO_BASE__INST5_SEG4 0
+
+#define SMUIO_BASE__INST6_SEG0 0
+#define SMUIO_BASE__INST6_SEG1 0
+#define SMUIO_BASE__INST6_SEG2 0
+#define SMUIO_BASE__INST6_SEG3 0
+#define SMUIO_BASE__INST6_SEG4 0
+
+#define THM_BASE__INST0_SEG0 0x00016600
+#define THM_BASE__INST0_SEG1 0x02400C00
+#define THM_BASE__INST0_SEG2 0
+#define THM_BASE__INST0_SEG3 0
+#define THM_BASE__INST0_SEG4 0
+
+#define THM_BASE__INST1_SEG0 0
+#define THM_BASE__INST1_SEG1 0
+#define THM_BASE__INST1_SEG2 0
+#define THM_BASE__INST1_SEG3 0
+#define THM_BASE__INST1_SEG4 0
+
+#define THM_BASE__INST2_SEG0 0
+#define THM_BASE__INST2_SEG1 0
+#define THM_BASE__INST2_SEG2 0
+#define THM_BASE__INST2_SEG3 0
+#define THM_BASE__INST2_SEG4 0
+
+#define THM_BASE__INST3_SEG0 0
+#define THM_BASE__INST3_SEG1 0
+#define THM_BASE__INST3_SEG2 0
+#define THM_BASE__INST3_SEG3 0
+#define THM_BASE__INST3_SEG4 0
+
+#define THM_BASE__INST4_SEG0 0
+#define THM_BASE__INST4_SEG1 0
+#define THM_BASE__INST4_SEG2 0
+#define THM_BASE__INST4_SEG3 0
+#define THM_BASE__INST4_SEG4 0
+
+#define THM_BASE__INST5_SEG0 0
+#define THM_BASE__INST5_SEG1 0
+#define THM_BASE__INST5_SEG2 0
+#define THM_BASE__INST5_SEG3 0
+#define THM_BASE__INST5_SEG4 0
+
+#define THM_BASE__INST6_SEG0 0
+#define THM_BASE__INST6_SEG1 0
+#define THM_BASE__INST6_SEG2 0
+#define THM_BASE__INST6_SEG3 0
+#define THM_BASE__INST6_SEG4 0
+
+#define UMC_BASE__INST0_SEG0 0x00014000
+#define UMC_BASE__INST0_SEG1 0x02425800
+#define UMC_BASE__INST0_SEG2 0
+#define UMC_BASE__INST0_SEG3 0
+#define UMC_BASE__INST0_SEG4 0
+
+#define UMC_BASE__INST1_SEG0 0x00054000
+#define UMC_BASE__INST1_SEG1 0x02425C00
+#define UMC_BASE__INST1_SEG2 0
+#define UMC_BASE__INST1_SEG3 0
+#define UMC_BASE__INST1_SEG4 0
+
+#define UMC_BASE__INST2_SEG0 0x00094000
+#define UMC_BASE__INST2_SEG1 0x02426000
+#define UMC_BASE__INST2_SEG2 0
+#define UMC_BASE__INST2_SEG3 0
+#define UMC_BASE__INST2_SEG4 0
+
+#define UMC_BASE__INST3_SEG0 0x000D4000
+#define UMC_BASE__INST3_SEG1 0x02426400
+#define UMC_BASE__INST3_SEG2 0
+#define UMC_BASE__INST3_SEG3 0
+#define UMC_BASE__INST3_SEG4 0
+
+#define UMC_BASE__INST4_SEG0 0
+#define UMC_BASE__INST4_SEG1 0
+#define UMC_BASE__INST4_SEG2 0
+#define UMC_BASE__INST4_SEG3 0
+#define UMC_BASE__INST4_SEG4 0
+
+#define UMC_BASE__INST5_SEG0 0
+#define UMC_BASE__INST5_SEG1 0
+#define UMC_BASE__INST5_SEG2 0
+#define UMC_BASE__INST5_SEG3 0
+#define UMC_BASE__INST5_SEG4 0
+
+#define UMC_BASE__INST6_SEG0 0
+#define UMC_BASE__INST6_SEG1 0
+#define UMC_BASE__INST6_SEG2 0
+#define UMC_BASE__INST6_SEG3 0
+#define UMC_BASE__INST6_SEG4 0
+
+#define USB0_BASE__INST0_SEG0 0x0242A800
+#define USB0_BASE__INST0_SEG1 0x05B00000
+#define USB0_BASE__INST0_SEG2 0
+#define USB0_BASE__INST0_SEG3 0
+#define USB0_BASE__INST0_SEG4 0
+
+#define USB0_BASE__INST1_SEG0 0
+#define USB0_BASE__INST1_SEG1 0
+#define USB0_BASE__INST1_SEG2 0
+#define USB0_BASE__INST1_SEG3 0
+#define USB0_BASE__INST1_SEG4 0
+
+#define USB0_BASE__INST2_SEG0 0
+#define USB0_BASE__INST2_SEG1 0
+#define USB0_BASE__INST2_SEG2 0
+#define USB0_BASE__INST2_SEG3 0
+#define USB0_BASE__INST2_SEG4 0
+
+#define USB0_BASE__INST3_SEG0 0
+#define USB0_BASE__INST3_SEG1 0
+#define USB0_BASE__INST3_SEG2 0
+#define USB0_BASE__INST3_SEG3 0
+#define USB0_BASE__INST3_SEG4 0
+
+#define USB0_BASE__INST4_SEG0 0
+#define USB0_BASE__INST4_SEG1 0
+#define USB0_BASE__INST4_SEG2 0
+#define USB0_BASE__INST4_SEG3 0
+#define USB0_BASE__INST4_SEG4 0
+
+#define USB0_BASE__INST5_SEG0 0
+#define USB0_BASE__INST5_SEG1 0
+#define USB0_BASE__INST5_SEG2 0
+#define USB0_BASE__INST5_SEG3 0
+#define USB0_BASE__INST5_SEG4 0
+
+#define USB0_BASE__INST6_SEG0 0
+#define USB0_BASE__INST6_SEG1 0
+#define USB0_BASE__INST6_SEG2 0
+#define USB0_BASE__INST6_SEG3 0
+#define USB0_BASE__INST6_SEG4 0
+
+#define UVD0_BASE__INST0_SEG0 0x00007800
+#define UVD0_BASE__INST0_SEG1 0x00007E00
+#define UVD0_BASE__INST0_SEG2 0x02403000
+#define UVD0_BASE__INST0_SEG3 0
+#define UVD0_BASE__INST0_SEG4 0
+
+#define UVD0_BASE__INST1_SEG0 0
+#define UVD0_BASE__INST1_SEG1 0
+#define UVD0_BASE__INST1_SEG2 0
+#define UVD0_BASE__INST1_SEG3 0
+#define UVD0_BASE__INST1_SEG4 0
+
+#define UVD0_BASE__INST2_SEG0 0
+#define UVD0_BASE__INST2_SEG1 0
+#define UVD0_BASE__INST2_SEG2 0
+#define UVD0_BASE__INST2_SEG3 0
+#define UVD0_BASE__INST2_SEG4 0
+
+#define UVD0_BASE__INST3_SEG0 0
+#define UVD0_BASE__INST3_SEG1 0
+#define UVD0_BASE__INST3_SEG2 0
+#define UVD0_BASE__INST3_SEG3 0
+#define UVD0_BASE__INST3_SEG4 0
+
+#define UVD0_BASE__INST4_SEG0 0
+#define UVD0_BASE__INST4_SEG1 0
+#define UVD0_BASE__INST4_SEG2 0
+#define UVD0_BASE__INST4_SEG3 0
+#define UVD0_BASE__INST4_SEG4 0
+
+#define UVD0_BASE__INST5_SEG0 0
+#define UVD0_BASE__INST5_SEG1 0
+#define UVD0_BASE__INST5_SEG2 0
+#define UVD0_BASE__INST5_SEG3 0
+#define UVD0_BASE__INST5_SEG4 0
+
+#define UVD0_BASE__INST6_SEG0 0
+#define UVD0_BASE__INST6_SEG1 0
+#define UVD0_BASE__INST6_SEG2 0
+#define UVD0_BASE__INST6_SEG3 0
+#define UVD0_BASE__INST6_SEG4 0
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/renoir_ip_offset.h b/drivers/gpu/drm/amd/include/renoir_ip_offset.h
new file mode 100644
index 000000000000..07633e22e99a
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/renoir_ip_offset.h
@@ -0,0 +1,1398 @@
+/*
+ * Copyright (C) 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _renoir_ip_offset_HEADER
+#define _renoir_ip_offset_HEADER
+
+#define MAX_INSTANCE 7
+#define MAX_SEGMENT 5
+
+
+struct IP_BASE_INSTANCE
+{
+ unsigned int segment[MAX_SEGMENT];
+};
+
+struct IP_BASE
+{
+ struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
+};
+
+
+static const struct IP_BASE ACP_BASE ={ { { { 0x02403800, 0x00480000, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE ATHUB_BASE ={ { { { 0x00000C20, 0x02408C00, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE CLK_BASE ={ { { { 0x00016C00, 0x00016E00, 0x00017000, 0x00017E00, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE DBGU_IO0_BASE ={ { { { 0x000001E0, 0x0240B400, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE DF_BASE ={ { { { 0x00007000, 0x0240B800, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE DIO_BASE ={ { { { 0x02404000, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE DMU_BASE ={ { { { 0x00000012, 0x000000C0, 0x000034C0, 0x00009000, 0x02403C00 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE DPCS_BASE ={ { { { 0x00000012, 0x000000C0, 0x000034C0, 0x00009000, 0x02403C00 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE FUSE_BASE ={ { { { 0x00017400, 0x02401400, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE GC_BASE ={ { { { 0x00002000, 0x0000A000, 0x02402C00, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE HDA_BASE ={ { { { 0x02404800, 0x004C0000, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE HDP_BASE ={ { { { 0x00000F20, 0x0240A400, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE IOHC0_BASE ={ { { { 0x00010000, 0x02406000, 0x04EC0000, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE ISP_BASE ={ { { { 0x00018000, 0x0240B000, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE L2IMU0_BASE ={ { { { 0x00007DC0, 0x02407000, 0x00900000, 0x04FC0000, 0x055C0000 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE MMHUB_BASE ={ { { { 0x0001A000, 0x02408800, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE MP0_BASE ={ { { { 0x00016000, 0x0243FC00, 0x00DC0000, 0x00E00000, 0x00E40000 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE MP1_BASE ={ { { { 0x00016000, 0x02400400, 0x00E80000, 0x00EC0000, 0x00F00000 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE NBIF0_BASE ={ { { { 0x00000000, 0x00000014, 0x00000D20, 0x00010400, 0x0241B000 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE DCN_BASE ={ { { { 0x00000012, 0x000000C0, 0x000034C0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE OSSSYS_BASE ={ { { { 0x000010A0, 0x0240A000, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE PCIE0_BASE ={ { { { 0x02411800, 0x04440000, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE SDMA0_BASE ={ { { { 0x00001260, 0x0240A800, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE SMUIO_BASE ={ { { { 0x00016800, 0x00016A00, 0x02401000, 0x00440000, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE THM_BASE ={ { { { 0x00016600, 0x02400C00, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE UMC_BASE ={ { { { 0x00014000, 0x02425800, 0, 0, 0 } },
+ { { 0x00054000, 0x02425C00, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE USB0_BASE ={ { { { 0x0242A800, 0x05B00000, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE UVD0_BASE ={ { { { 0x00007800, 0x00007E00, 0x02403000, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+
+
+#define ACP_BASE__INST0_SEG0 0x02403800
+#define ACP_BASE__INST0_SEG1 0x00480000
+#define ACP_BASE__INST0_SEG2 0
+#define ACP_BASE__INST0_SEG3 0
+#define ACP_BASE__INST0_SEG4 0
+
+#define ACP_BASE__INST1_SEG0 0
+#define ACP_BASE__INST1_SEG1 0
+#define ACP_BASE__INST1_SEG2 0
+#define ACP_BASE__INST1_SEG3 0
+#define ACP_BASE__INST1_SEG4 0
+
+#define ACP_BASE__INST2_SEG0 0
+#define ACP_BASE__INST2_SEG1 0
+#define ACP_BASE__INST2_SEG2 0
+#define ACP_BASE__INST2_SEG3 0
+#define ACP_BASE__INST2_SEG4 0
+
+#define ACP_BASE__INST3_SEG0 0
+#define ACP_BASE__INST3_SEG1 0
+#define ACP_BASE__INST3_SEG2 0
+#define ACP_BASE__INST3_SEG3 0
+#define ACP_BASE__INST3_SEG4 0
+
+#define ACP_BASE__INST4_SEG0 0
+#define ACP_BASE__INST4_SEG1 0
+#define ACP_BASE__INST4_SEG2 0
+#define ACP_BASE__INST4_SEG3 0
+#define ACP_BASE__INST4_SEG4 0
+
+#define ACP_BASE__INST5_SEG0 0
+#define ACP_BASE__INST5_SEG1 0
+#define ACP_BASE__INST5_SEG2 0
+#define ACP_BASE__INST5_SEG3 0
+#define ACP_BASE__INST5_SEG4 0
+
+#define ACP_BASE__INST6_SEG0 0
+#define ACP_BASE__INST6_SEG1 0
+#define ACP_BASE__INST6_SEG2 0
+#define ACP_BASE__INST6_SEG3 0
+#define ACP_BASE__INST6_SEG4 0
+
+#define ATHUB_BASE__INST0_SEG0 0x00000C20
+#define ATHUB_BASE__INST0_SEG1 0x02408C00
+#define ATHUB_BASE__INST0_SEG2 0
+#define ATHUB_BASE__INST0_SEG3 0
+#define ATHUB_BASE__INST0_SEG4 0
+
+#define ATHUB_BASE__INST1_SEG0 0
+#define ATHUB_BASE__INST1_SEG1 0
+#define ATHUB_BASE__INST1_SEG2 0
+#define ATHUB_BASE__INST1_SEG3 0
+#define ATHUB_BASE__INST1_SEG4 0
+
+#define ATHUB_BASE__INST2_SEG0 0
+#define ATHUB_BASE__INST2_SEG1 0
+#define ATHUB_BASE__INST2_SEG2 0
+#define ATHUB_BASE__INST2_SEG3 0
+#define ATHUB_BASE__INST2_SEG4 0
+
+#define ATHUB_BASE__INST3_SEG0 0
+#define ATHUB_BASE__INST3_SEG1 0
+#define ATHUB_BASE__INST3_SEG2 0
+#define ATHUB_BASE__INST3_SEG3 0
+#define ATHUB_BASE__INST3_SEG4 0
+
+#define ATHUB_BASE__INST4_SEG0 0
+#define ATHUB_BASE__INST4_SEG1 0
+#define ATHUB_BASE__INST4_SEG2 0
+#define ATHUB_BASE__INST4_SEG3 0
+#define ATHUB_BASE__INST4_SEG4 0
+
+#define ATHUB_BASE__INST5_SEG0 0
+#define ATHUB_BASE__INST5_SEG1 0
+#define ATHUB_BASE__INST5_SEG2 0
+#define ATHUB_BASE__INST5_SEG3 0
+#define ATHUB_BASE__INST5_SEG4 0
+
+#define ATHUB_BASE__INST6_SEG0 0
+#define ATHUB_BASE__INST6_SEG1 0
+#define ATHUB_BASE__INST6_SEG2 0
+#define ATHUB_BASE__INST6_SEG3 0
+#define ATHUB_BASE__INST6_SEG4 0
+
+#define CLK_BASE__INST0_SEG0 0x00016C00
+#define CLK_BASE__INST0_SEG1 0x00016E00
+#define CLK_BASE__INST0_SEG2 0x00017000
+#define CLK_BASE__INST0_SEG3 0x00017E00
+#define CLK_BASE__INST0_SEG4 0
+
+#define CLK_BASE__INST1_SEG0 0
+#define CLK_BASE__INST1_SEG1 0
+#define CLK_BASE__INST1_SEG2 0
+#define CLK_BASE__INST1_SEG3 0
+#define CLK_BASE__INST1_SEG4 0
+
+#define CLK_BASE__INST2_SEG0 0
+#define CLK_BASE__INST2_SEG1 0
+#define CLK_BASE__INST2_SEG2 0
+#define CLK_BASE__INST2_SEG3 0
+#define CLK_BASE__INST2_SEG4 0
+
+#define CLK_BASE__INST3_SEG0 0
+#define CLK_BASE__INST3_SEG1 0
+#define CLK_BASE__INST3_SEG2 0
+#define CLK_BASE__INST3_SEG3 0
+#define CLK_BASE__INST3_SEG4 0
+
+#define CLK_BASE__INST4_SEG0 0
+#define CLK_BASE__INST4_SEG1 0
+#define CLK_BASE__INST4_SEG2 0
+#define CLK_BASE__INST4_SEG3 0
+#define CLK_BASE__INST4_SEG4 0
+
+#define CLK_BASE__INST5_SEG0 0
+#define CLK_BASE__INST5_SEG1 0
+#define CLK_BASE__INST5_SEG2 0
+#define CLK_BASE__INST5_SEG3 0
+#define CLK_BASE__INST5_SEG4 0
+
+#define CLK_BASE__INST6_SEG0 0
+#define CLK_BASE__INST6_SEG1 0
+#define CLK_BASE__INST6_SEG2 0
+#define CLK_BASE__INST6_SEG3 0
+#define CLK_BASE__INST6_SEG4 0
+
+#define DBGU_IO0_BASE__INST0_SEG0 0x000001E0
+#define DBGU_IO0_BASE__INST0_SEG1 0x0240B400
+#define DBGU_IO0_BASE__INST0_SEG2 0
+#define DBGU_IO0_BASE__INST0_SEG3 0
+#define DBGU_IO0_BASE__INST0_SEG4 0
+
+#define DBGU_IO0_BASE__INST1_SEG0 0
+#define DBGU_IO0_BASE__INST1_SEG1 0
+#define DBGU_IO0_BASE__INST1_SEG2 0
+#define DBGU_IO0_BASE__INST1_SEG3 0
+#define DBGU_IO0_BASE__INST1_SEG4 0
+
+#define DBGU_IO0_BASE__INST2_SEG0 0
+#define DBGU_IO0_BASE__INST2_SEG1 0
+#define DBGU_IO0_BASE__INST2_SEG2 0
+#define DBGU_IO0_BASE__INST2_SEG3 0
+#define DBGU_IO0_BASE__INST2_SEG4 0
+
+#define DBGU_IO0_BASE__INST3_SEG0 0
+#define DBGU_IO0_BASE__INST3_SEG1 0
+#define DBGU_IO0_BASE__INST3_SEG2 0
+#define DBGU_IO0_BASE__INST3_SEG3 0
+#define DBGU_IO0_BASE__INST3_SEG4 0
+
+#define DBGU_IO0_BASE__INST4_SEG0 0
+#define DBGU_IO0_BASE__INST4_SEG1 0
+#define DBGU_IO0_BASE__INST4_SEG2 0
+#define DBGU_IO0_BASE__INST4_SEG3 0
+#define DBGU_IO0_BASE__INST4_SEG4 0
+
+#define DBGU_IO0_BASE__INST5_SEG0 0
+#define DBGU_IO0_BASE__INST5_SEG1 0
+#define DBGU_IO0_BASE__INST5_SEG2 0
+#define DBGU_IO0_BASE__INST5_SEG3 0
+#define DBGU_IO0_BASE__INST5_SEG4 0
+
+#define DBGU_IO0_BASE__INST6_SEG0 0
+#define DBGU_IO0_BASE__INST6_SEG1 0
+#define DBGU_IO0_BASE__INST6_SEG2 0
+#define DBGU_IO0_BASE__INST6_SEG3 0
+#define DBGU_IO0_BASE__INST6_SEG4 0
+
+#define DF_BASE__INST0_SEG0 0x00007000
+#define DF_BASE__INST0_SEG1 0x0240B800
+#define DF_BASE__INST0_SEG2 0
+#define DF_BASE__INST0_SEG3 0
+#define DF_BASE__INST0_SEG4 0
+
+#define DF_BASE__INST1_SEG0 0
+#define DF_BASE__INST1_SEG1 0
+#define DF_BASE__INST1_SEG2 0
+#define DF_BASE__INST1_SEG3 0
+#define DF_BASE__INST1_SEG4 0
+
+#define DF_BASE__INST2_SEG0 0
+#define DF_BASE__INST2_SEG1 0
+#define DF_BASE__INST2_SEG2 0
+#define DF_BASE__INST2_SEG3 0
+#define DF_BASE__INST2_SEG4 0
+
+#define DF_BASE__INST3_SEG0 0
+#define DF_BASE__INST3_SEG1 0
+#define DF_BASE__INST3_SEG2 0
+#define DF_BASE__INST3_SEG3 0
+#define DF_BASE__INST3_SEG4 0
+
+#define DF_BASE__INST4_SEG0 0
+#define DF_BASE__INST4_SEG1 0
+#define DF_BASE__INST4_SEG2 0
+#define DF_BASE__INST4_SEG3 0
+#define DF_BASE__INST4_SEG4 0
+
+#define DF_BASE__INST5_SEG0 0
+#define DF_BASE__INST5_SEG1 0
+#define DF_BASE__INST5_SEG2 0
+#define DF_BASE__INST5_SEG3 0
+#define DF_BASE__INST5_SEG4 0
+
+#define DF_BASE__INST6_SEG0 0
+#define DF_BASE__INST6_SEG1 0
+#define DF_BASE__INST6_SEG2 0
+#define DF_BASE__INST6_SEG3 0
+#define DF_BASE__INST6_SEG4 0
+
+#define DIO_BASE__INST0_SEG0 0x02404000
+#define DIO_BASE__INST0_SEG1 0
+#define DIO_BASE__INST0_SEG2 0
+#define DIO_BASE__INST0_SEG3 0
+#define DIO_BASE__INST0_SEG4 0
+
+#define DIO_BASE__INST1_SEG0 0
+#define DIO_BASE__INST1_SEG1 0
+#define DIO_BASE__INST1_SEG2 0
+#define DIO_BASE__INST1_SEG3 0
+#define DIO_BASE__INST1_SEG4 0
+
+#define DIO_BASE__INST2_SEG0 0
+#define DIO_BASE__INST2_SEG1 0
+#define DIO_BASE__INST2_SEG2 0
+#define DIO_BASE__INST2_SEG3 0
+#define DIO_BASE__INST2_SEG4 0
+
+#define DIO_BASE__INST3_SEG0 0
+#define DIO_BASE__INST3_SEG1 0
+#define DIO_BASE__INST3_SEG2 0
+#define DIO_BASE__INST3_SEG3 0
+#define DIO_BASE__INST3_SEG4 0
+
+#define DIO_BASE__INST4_SEG0 0
+#define DIO_BASE__INST4_SEG1 0
+#define DIO_BASE__INST4_SEG2 0
+#define DIO_BASE__INST4_SEG3 0
+#define DIO_BASE__INST4_SEG4 0
+
+#define DIO_BASE__INST5_SEG0 0
+#define DIO_BASE__INST5_SEG1 0
+#define DIO_BASE__INST5_SEG2 0
+#define DIO_BASE__INST5_SEG3 0
+#define DIO_BASE__INST5_SEG4 0
+
+#define DIO_BASE__INST6_SEG0 0
+#define DIO_BASE__INST6_SEG1 0
+#define DIO_BASE__INST6_SEG2 0
+#define DIO_BASE__INST6_SEG3 0
+#define DIO_BASE__INST6_SEG4 0
+
+#define DMU_BASE__INST0_SEG0 0x00000012
+#define DMU_BASE__INST0_SEG1 0x000000C0
+#define DMU_BASE__INST0_SEG2 0x000034C0
+#define DMU_BASE__INST0_SEG3 0x00009000
+#define DMU_BASE__INST0_SEG4 0x02403C00
+
+#define DMU_BASE__INST1_SEG0 0
+#define DMU_BASE__INST1_SEG1 0
+#define DMU_BASE__INST1_SEG2 0
+#define DMU_BASE__INST1_SEG3 0
+#define DMU_BASE__INST1_SEG4 0
+
+#define DMU_BASE__INST2_SEG0 0
+#define DMU_BASE__INST2_SEG1 0
+#define DMU_BASE__INST2_SEG2 0
+#define DMU_BASE__INST2_SEG3 0
+#define DMU_BASE__INST2_SEG4 0
+
+#define DMU_BASE__INST3_SEG0 0
+#define DMU_BASE__INST3_SEG1 0
+#define DMU_BASE__INST3_SEG2 0
+#define DMU_BASE__INST3_SEG3 0
+#define DMU_BASE__INST3_SEG4 0
+
+#define DMU_BASE__INST4_SEG0 0
+#define DMU_BASE__INST4_SEG1 0
+#define DMU_BASE__INST4_SEG2 0
+#define DMU_BASE__INST4_SEG3 0
+#define DMU_BASE__INST4_SEG4 0
+
+#define DMU_BASE__INST5_SEG0 0
+#define DMU_BASE__INST5_SEG1 0
+#define DMU_BASE__INST5_SEG2 0
+#define DMU_BASE__INST5_SEG3 0
+#define DMU_BASE__INST5_SEG4 0
+
+#define DMU_BASE__INST6_SEG0 0
+#define DMU_BASE__INST6_SEG1 0
+#define DMU_BASE__INST6_SEG2 0
+#define DMU_BASE__INST6_SEG3 0
+#define DMU_BASE__INST6_SEG4 0
+
+#define DPCS_BASE__INST0_SEG0 0x00000012
+#define DPCS_BASE__INST0_SEG1 0x000000C0
+#define DPCS_BASE__INST0_SEG2 0x000034C0
+#define DPCS_BASE__INST0_SEG3 0x00009000
+#define DPCS_BASE__INST0_SEG4 0x02403C00
+
+#define DPCS_BASE__INST1_SEG0 0
+#define DPCS_BASE__INST1_SEG1 0
+#define DPCS_BASE__INST1_SEG2 0
+#define DPCS_BASE__INST1_SEG3 0
+#define DPCS_BASE__INST1_SEG4 0
+
+#define DPCS_BASE__INST2_SEG0 0
+#define DPCS_BASE__INST2_SEG1 0
+#define DPCS_BASE__INST2_SEG2 0
+#define DPCS_BASE__INST2_SEG3 0
+#define DPCS_BASE__INST2_SEG4 0
+
+#define DPCS_BASE__INST3_SEG0 0
+#define DPCS_BASE__INST3_SEG1 0
+#define DPCS_BASE__INST3_SEG2 0
+#define DPCS_BASE__INST3_SEG3 0
+#define DPCS_BASE__INST3_SEG4 0
+
+#define DPCS_BASE__INST4_SEG0 0
+#define DPCS_BASE__INST4_SEG1 0
+#define DPCS_BASE__INST4_SEG2 0
+#define DPCS_BASE__INST4_SEG3 0
+#define DPCS_BASE__INST4_SEG4 0
+
+#define DPCS_BASE__INST5_SEG0 0
+#define DPCS_BASE__INST5_SEG1 0
+#define DPCS_BASE__INST5_SEG2 0
+#define DPCS_BASE__INST5_SEG3 0
+#define DPCS_BASE__INST5_SEG4 0
+
+#define DPCS_BASE__INST6_SEG0 0
+#define DPCS_BASE__INST6_SEG1 0
+#define DPCS_BASE__INST6_SEG2 0
+#define DPCS_BASE__INST6_SEG3 0
+#define DPCS_BASE__INST6_SEG4 0
+
+#define FUSE_BASE__INST0_SEG0 0x00017400
+#define FUSE_BASE__INST0_SEG1 0x02401400
+#define FUSE_BASE__INST0_SEG2 0
+#define FUSE_BASE__INST0_SEG3 0
+#define FUSE_BASE__INST0_SEG4 0
+
+#define FUSE_BASE__INST1_SEG0 0
+#define FUSE_BASE__INST1_SEG1 0
+#define FUSE_BASE__INST1_SEG2 0
+#define FUSE_BASE__INST1_SEG3 0
+#define FUSE_BASE__INST1_SEG4 0
+
+#define FUSE_BASE__INST2_SEG0 0
+#define FUSE_BASE__INST2_SEG1 0
+#define FUSE_BASE__INST2_SEG2 0
+#define FUSE_BASE__INST2_SEG3 0
+#define FUSE_BASE__INST2_SEG4 0
+
+#define FUSE_BASE__INST3_SEG0 0
+#define FUSE_BASE__INST3_SEG1 0
+#define FUSE_BASE__INST3_SEG2 0
+#define FUSE_BASE__INST3_SEG3 0
+#define FUSE_BASE__INST3_SEG4 0
+
+#define FUSE_BASE__INST4_SEG0 0
+#define FUSE_BASE__INST4_SEG1 0
+#define FUSE_BASE__INST4_SEG2 0
+#define FUSE_BASE__INST4_SEG3 0
+#define FUSE_BASE__INST4_SEG4 0
+
+#define FUSE_BASE__INST5_SEG0 0
+#define FUSE_BASE__INST5_SEG1 0
+#define FUSE_BASE__INST5_SEG2 0
+#define FUSE_BASE__INST5_SEG3 0
+#define FUSE_BASE__INST5_SEG4 0
+
+#define FUSE_BASE__INST6_SEG0 0
+#define FUSE_BASE__INST6_SEG1 0
+#define FUSE_BASE__INST6_SEG2 0
+#define FUSE_BASE__INST6_SEG3 0
+#define FUSE_BASE__INST6_SEG4 0
+
+#define GC_BASE__INST0_SEG0 0x00002000
+#define GC_BASE__INST0_SEG1 0x0000A000
+#define GC_BASE__INST0_SEG2 0x02402C00
+#define GC_BASE__INST0_SEG3 0
+#define GC_BASE__INST0_SEG4 0
+
+#define GC_BASE__INST1_SEG0 0
+#define GC_BASE__INST1_SEG1 0
+#define GC_BASE__INST1_SEG2 0
+#define GC_BASE__INST1_SEG3 0
+#define GC_BASE__INST1_SEG4 0
+
+#define GC_BASE__INST2_SEG0 0
+#define GC_BASE__INST2_SEG1 0
+#define GC_BASE__INST2_SEG2 0
+#define GC_BASE__INST2_SEG3 0
+#define GC_BASE__INST2_SEG4 0
+
+#define GC_BASE__INST3_SEG0 0
+#define GC_BASE__INST3_SEG1 0
+#define GC_BASE__INST3_SEG2 0
+#define GC_BASE__INST3_SEG3 0
+#define GC_BASE__INST3_SEG4 0
+
+#define GC_BASE__INST4_SEG0 0
+#define GC_BASE__INST4_SEG1 0
+#define GC_BASE__INST4_SEG2 0
+#define GC_BASE__INST4_SEG3 0
+#define GC_BASE__INST4_SEG4 0
+
+#define GC_BASE__INST5_SEG0 0
+#define GC_BASE__INST5_SEG1 0
+#define GC_BASE__INST5_SEG2 0
+#define GC_BASE__INST5_SEG3 0
+#define GC_BASE__INST5_SEG4 0
+
+#define GC_BASE__INST6_SEG0 0
+#define GC_BASE__INST6_SEG1 0
+#define GC_BASE__INST6_SEG2 0
+#define GC_BASE__INST6_SEG3 0
+#define GC_BASE__INST6_SEG4 0
+
+#define HDA_BASE__INST0_SEG0 0x02404800
+#define HDA_BASE__INST0_SEG1 0x004C0000
+#define HDA_BASE__INST0_SEG2 0
+#define HDA_BASE__INST0_SEG3 0
+#define HDA_BASE__INST0_SEG4 0
+
+#define HDA_BASE__INST1_SEG0 0
+#define HDA_BASE__INST1_SEG1 0
+#define HDA_BASE__INST1_SEG2 0
+#define HDA_BASE__INST1_SEG3 0
+#define HDA_BASE__INST1_SEG4 0
+
+#define HDA_BASE__INST2_SEG0 0
+#define HDA_BASE__INST2_SEG1 0
+#define HDA_BASE__INST2_SEG2 0
+#define HDA_BASE__INST2_SEG3 0
+#define HDA_BASE__INST2_SEG4 0
+
+#define HDA_BASE__INST3_SEG0 0
+#define HDA_BASE__INST3_SEG1 0
+#define HDA_BASE__INST3_SEG2 0
+#define HDA_BASE__INST3_SEG3 0
+#define HDA_BASE__INST3_SEG4 0
+
+#define HDA_BASE__INST4_SEG0 0
+#define HDA_BASE__INST4_SEG1 0
+#define HDA_BASE__INST4_SEG2 0
+#define HDA_BASE__INST4_SEG3 0
+#define HDA_BASE__INST4_SEG4 0
+
+#define HDA_BASE__INST5_SEG0 0
+#define HDA_BASE__INST5_SEG1 0
+#define HDA_BASE__INST5_SEG2 0
+#define HDA_BASE__INST5_SEG3 0
+#define HDA_BASE__INST5_SEG4 0
+
+#define HDA_BASE__INST6_SEG0 0
+#define HDA_BASE__INST6_SEG1 0
+#define HDA_BASE__INST6_SEG2 0
+#define HDA_BASE__INST6_SEG3 0
+#define HDA_BASE__INST6_SEG4 0
+
+#define HDP_BASE__INST0_SEG0 0x00000F20
+#define HDP_BASE__INST0_SEG1 0x0240A400
+#define HDP_BASE__INST0_SEG2 0
+#define HDP_BASE__INST0_SEG3 0
+#define HDP_BASE__INST0_SEG4 0
+
+#define HDP_BASE__INST1_SEG0 0
+#define HDP_BASE__INST1_SEG1 0
+#define HDP_BASE__INST1_SEG2 0
+#define HDP_BASE__INST1_SEG3 0
+#define HDP_BASE__INST1_SEG4 0
+
+#define HDP_BASE__INST2_SEG0 0
+#define HDP_BASE__INST2_SEG1 0
+#define HDP_BASE__INST2_SEG2 0
+#define HDP_BASE__INST2_SEG3 0
+#define HDP_BASE__INST2_SEG4 0
+
+#define HDP_BASE__INST3_SEG0 0
+#define HDP_BASE__INST3_SEG1 0
+#define HDP_BASE__INST3_SEG2 0
+#define HDP_BASE__INST3_SEG3 0
+#define HDP_BASE__INST3_SEG4 0
+
+#define HDP_BASE__INST4_SEG0 0
+#define HDP_BASE__INST4_SEG1 0
+#define HDP_BASE__INST4_SEG2 0
+#define HDP_BASE__INST4_SEG3 0
+#define HDP_BASE__INST4_SEG4 0
+
+#define HDP_BASE__INST5_SEG0 0
+#define HDP_BASE__INST5_SEG1 0
+#define HDP_BASE__INST5_SEG2 0
+#define HDP_BASE__INST5_SEG3 0
+#define HDP_BASE__INST5_SEG4 0
+
+#define HDP_BASE__INST6_SEG0 0
+#define HDP_BASE__INST6_SEG1 0
+#define HDP_BASE__INST6_SEG2 0
+#define HDP_BASE__INST6_SEG3 0
+#define HDP_BASE__INST6_SEG4 0
+
+#define IOHC0_BASE__INST0_SEG0 0x00010000
+#define IOHC0_BASE__INST0_SEG1 0x02406000
+#define IOHC0_BASE__INST0_SEG2 0x04EC0000
+#define IOHC0_BASE__INST0_SEG3 0
+#define IOHC0_BASE__INST0_SEG4 0
+
+#define IOHC0_BASE__INST1_SEG0 0
+#define IOHC0_BASE__INST1_SEG1 0
+#define IOHC0_BASE__INST1_SEG2 0
+#define IOHC0_BASE__INST1_SEG3 0
+#define IOHC0_BASE__INST1_SEG4 0
+
+#define IOHC0_BASE__INST2_SEG0 0
+#define IOHC0_BASE__INST2_SEG1 0
+#define IOHC0_BASE__INST2_SEG2 0
+#define IOHC0_BASE__INST2_SEG3 0
+#define IOHC0_BASE__INST2_SEG4 0
+
+#define IOHC0_BASE__INST3_SEG0 0
+#define IOHC0_BASE__INST3_SEG1 0
+#define IOHC0_BASE__INST3_SEG2 0
+#define IOHC0_BASE__INST3_SEG3 0
+#define IOHC0_BASE__INST3_SEG4 0
+
+#define IOHC0_BASE__INST4_SEG0 0
+#define IOHC0_BASE__INST4_SEG1 0
+#define IOHC0_BASE__INST4_SEG2 0
+#define IOHC0_BASE__INST4_SEG3 0
+#define IOHC0_BASE__INST4_SEG4 0
+
+#define IOHC0_BASE__INST5_SEG0 0
+#define IOHC0_BASE__INST5_SEG1 0
+#define IOHC0_BASE__INST5_SEG2 0
+#define IOHC0_BASE__INST5_SEG3 0
+#define IOHC0_BASE__INST5_SEG4 0
+
+#define IOHC0_BASE__INST6_SEG0 0
+#define IOHC0_BASE__INST6_SEG1 0
+#define IOHC0_BASE__INST6_SEG2 0
+#define IOHC0_BASE__INST6_SEG3 0
+#define IOHC0_BASE__INST6_SEG4 0
+
+#define ISP_BASE__INST0_SEG0 0x00018000
+#define ISP_BASE__INST0_SEG1 0x0240B000
+#define ISP_BASE__INST0_SEG2 0
+#define ISP_BASE__INST0_SEG3 0
+#define ISP_BASE__INST0_SEG4 0
+
+#define ISP_BASE__INST1_SEG0 0
+#define ISP_BASE__INST1_SEG1 0
+#define ISP_BASE__INST1_SEG2 0
+#define ISP_BASE__INST1_SEG3 0
+#define ISP_BASE__INST1_SEG4 0
+
+#define ISP_BASE__INST2_SEG0 0
+#define ISP_BASE__INST2_SEG1 0
+#define ISP_BASE__INST2_SEG2 0
+#define ISP_BASE__INST2_SEG3 0
+#define ISP_BASE__INST2_SEG4 0
+
+#define ISP_BASE__INST3_SEG0 0
+#define ISP_BASE__INST3_SEG1 0
+#define ISP_BASE__INST3_SEG2 0
+#define ISP_BASE__INST3_SEG3 0
+#define ISP_BASE__INST3_SEG4 0
+
+#define ISP_BASE__INST4_SEG0 0
+#define ISP_BASE__INST4_SEG1 0
+#define ISP_BASE__INST4_SEG2 0
+#define ISP_BASE__INST4_SEG3 0
+#define ISP_BASE__INST4_SEG4 0
+
+#define ISP_BASE__INST5_SEG0 0
+#define ISP_BASE__INST5_SEG1 0
+#define ISP_BASE__INST5_SEG2 0
+#define ISP_BASE__INST5_SEG3 0
+#define ISP_BASE__INST5_SEG4 0
+
+#define ISP_BASE__INST6_SEG0 0
+#define ISP_BASE__INST6_SEG1 0
+#define ISP_BASE__INST6_SEG2 0
+#define ISP_BASE__INST6_SEG3 0
+#define ISP_BASE__INST6_SEG4 0
+
+#define L2IMU0_BASE__INST0_SEG0 0x00007DC0
+#define L2IMU0_BASE__INST0_SEG1 0x02407000
+#define L2IMU0_BASE__INST0_SEG2 0x00900000
+#define L2IMU0_BASE__INST0_SEG3 0x04FC0000
+#define L2IMU0_BASE__INST0_SEG4 0x055C0000
+
+#define L2IMU0_BASE__INST1_SEG0 0
+#define L2IMU0_BASE__INST1_SEG1 0
+#define L2IMU0_BASE__INST1_SEG2 0
+#define L2IMU0_BASE__INST1_SEG3 0
+#define L2IMU0_BASE__INST1_SEG4 0
+
+#define L2IMU0_BASE__INST2_SEG0 0
+#define L2IMU0_BASE__INST2_SEG1 0
+#define L2IMU0_BASE__INST2_SEG2 0
+#define L2IMU0_BASE__INST2_SEG3 0
+#define L2IMU0_BASE__INST2_SEG4 0
+
+#define L2IMU0_BASE__INST3_SEG0 0
+#define L2IMU0_BASE__INST3_SEG1 0
+#define L2IMU0_BASE__INST3_SEG2 0
+#define L2IMU0_BASE__INST3_SEG3 0
+#define L2IMU0_BASE__INST3_SEG4 0
+
+#define L2IMU0_BASE__INST4_SEG0 0
+#define L2IMU0_BASE__INST4_SEG1 0
+#define L2IMU0_BASE__INST4_SEG2 0
+#define L2IMU0_BASE__INST4_SEG3 0
+#define L2IMU0_BASE__INST4_SEG4 0
+
+#define L2IMU0_BASE__INST5_SEG0 0
+#define L2IMU0_BASE__INST5_SEG1 0
+#define L2IMU0_BASE__INST5_SEG2 0
+#define L2IMU0_BASE__INST5_SEG3 0
+#define L2IMU0_BASE__INST5_SEG4 0
+
+#define L2IMU0_BASE__INST6_SEG0 0
+#define L2IMU0_BASE__INST6_SEG1 0
+#define L2IMU0_BASE__INST6_SEG2 0
+#define L2IMU0_BASE__INST6_SEG3 0
+#define L2IMU0_BASE__INST6_SEG4 0
+
+#define MMHUB_BASE__INST0_SEG0 0x0001A000
+#define MMHUB_BASE__INST0_SEG1 0x02408800
+#define MMHUB_BASE__INST0_SEG2 0
+#define MMHUB_BASE__INST0_SEG3 0
+#define MMHUB_BASE__INST0_SEG4 0
+
+#define MMHUB_BASE__INST1_SEG0 0
+#define MMHUB_BASE__INST1_SEG1 0
+#define MMHUB_BASE__INST1_SEG2 0
+#define MMHUB_BASE__INST1_SEG3 0
+#define MMHUB_BASE__INST1_SEG4 0
+
+#define MMHUB_BASE__INST2_SEG0 0
+#define MMHUB_BASE__INST2_SEG1 0
+#define MMHUB_BASE__INST2_SEG2 0
+#define MMHUB_BASE__INST2_SEG3 0
+#define MMHUB_BASE__INST2_SEG4 0
+
+#define MMHUB_BASE__INST3_SEG0 0
+#define MMHUB_BASE__INST3_SEG1 0
+#define MMHUB_BASE__INST3_SEG2 0
+#define MMHUB_BASE__INST3_SEG3 0
+#define MMHUB_BASE__INST3_SEG4 0
+
+#define MMHUB_BASE__INST4_SEG0 0
+#define MMHUB_BASE__INST4_SEG1 0
+#define MMHUB_BASE__INST4_SEG2 0
+#define MMHUB_BASE__INST4_SEG3 0
+#define MMHUB_BASE__INST4_SEG4 0
+
+#define MMHUB_BASE__INST5_SEG0 0
+#define MMHUB_BASE__INST5_SEG1 0
+#define MMHUB_BASE__INST5_SEG2 0
+#define MMHUB_BASE__INST5_SEG3 0
+#define MMHUB_BASE__INST5_SEG4 0
+
+#define MMHUB_BASE__INST6_SEG0 0
+#define MMHUB_BASE__INST6_SEG1 0
+#define MMHUB_BASE__INST6_SEG2 0
+#define MMHUB_BASE__INST6_SEG3 0
+#define MMHUB_BASE__INST6_SEG4 0
+
+#define MP0_BASE__INST0_SEG0 0x00016000
+#define MP0_BASE__INST0_SEG1 0x0243FC00
+#define MP0_BASE__INST0_SEG2 0x00DC0000
+#define MP0_BASE__INST0_SEG3 0x00E00000
+#define MP0_BASE__INST0_SEG4 0x00E40000
+
+#define MP0_BASE__INST1_SEG0 0
+#define MP0_BASE__INST1_SEG1 0
+#define MP0_BASE__INST1_SEG2 0
+#define MP0_BASE__INST1_SEG3 0
+#define MP0_BASE__INST1_SEG4 0
+
+#define MP0_BASE__INST2_SEG0 0
+#define MP0_BASE__INST2_SEG1 0
+#define MP0_BASE__INST2_SEG2 0
+#define MP0_BASE__INST2_SEG3 0
+#define MP0_BASE__INST2_SEG4 0
+
+#define MP0_BASE__INST3_SEG0 0
+#define MP0_BASE__INST3_SEG1 0
+#define MP0_BASE__INST3_SEG2 0
+#define MP0_BASE__INST3_SEG3 0
+#define MP0_BASE__INST3_SEG4 0
+
+#define MP0_BASE__INST4_SEG0 0
+#define MP0_BASE__INST4_SEG1 0
+#define MP0_BASE__INST4_SEG2 0
+#define MP0_BASE__INST4_SEG3 0
+#define MP0_BASE__INST4_SEG4 0
+
+#define MP0_BASE__INST5_SEG0 0
+#define MP0_BASE__INST5_SEG1 0
+#define MP0_BASE__INST5_SEG2 0
+#define MP0_BASE__INST5_SEG3 0
+#define MP0_BASE__INST5_SEG4 0
+
+#define MP0_BASE__INST6_SEG0 0
+#define MP0_BASE__INST6_SEG1 0
+#define MP0_BASE__INST6_SEG2 0
+#define MP0_BASE__INST6_SEG3 0
+#define MP0_BASE__INST6_SEG4 0
+
+#define MP1_BASE__INST0_SEG0 0x00016200
+#define MP1_BASE__INST0_SEG1 0x02400400
+#define MP1_BASE__INST0_SEG2 0x00E80000
+#define MP1_BASE__INST0_SEG3 0x00EC0000
+#define MP1_BASE__INST0_SEG4 0x00F00000
+
+#define MP1_BASE__INST1_SEG0 0
+#define MP1_BASE__INST1_SEG1 0
+#define MP1_BASE__INST1_SEG2 0
+#define MP1_BASE__INST1_SEG3 0
+#define MP1_BASE__INST1_SEG4 0
+
+#define MP1_BASE__INST2_SEG0 0
+#define MP1_BASE__INST2_SEG1 0
+#define MP1_BASE__INST2_SEG2 0
+#define MP1_BASE__INST2_SEG3 0
+#define MP1_BASE__INST2_SEG4 0
+
+#define MP1_BASE__INST3_SEG0 0
+#define MP1_BASE__INST3_SEG1 0
+#define MP1_BASE__INST3_SEG2 0
+#define MP1_BASE__INST3_SEG3 0
+#define MP1_BASE__INST3_SEG4 0
+
+#define MP1_BASE__INST4_SEG0 0
+#define MP1_BASE__INST4_SEG1 0
+#define MP1_BASE__INST4_SEG2 0
+#define MP1_BASE__INST4_SEG3 0
+#define MP1_BASE__INST4_SEG4 0
+
+#define MP1_BASE__INST5_SEG0 0
+#define MP1_BASE__INST5_SEG1 0
+#define MP1_BASE__INST5_SEG2 0
+#define MP1_BASE__INST5_SEG3 0
+#define MP1_BASE__INST5_SEG4 0
+
+#define MP1_BASE__INST6_SEG0 0
+#define MP1_BASE__INST6_SEG1 0
+#define MP1_BASE__INST6_SEG2 0
+#define MP1_BASE__INST6_SEG3 0
+#define MP1_BASE__INST6_SEG4 0
+
+#define NBIF0_BASE__INST0_SEG0 0x00000000
+#define NBIF0_BASE__INST0_SEG1 0x00000014
+#define NBIF0_BASE__INST0_SEG2 0x00000D20
+#define NBIF0_BASE__INST0_SEG3 0x00010400
+#define NBIF0_BASE__INST0_SEG4 0x0241B000
+
+#define NBIF0_BASE__INST1_SEG0 0
+#define NBIF0_BASE__INST1_SEG1 0
+#define NBIF0_BASE__INST1_SEG2 0
+#define NBIF0_BASE__INST1_SEG3 0
+#define NBIF0_BASE__INST1_SEG4 0
+
+#define NBIF0_BASE__INST2_SEG0 0
+#define NBIF0_BASE__INST2_SEG1 0
+#define NBIF0_BASE__INST2_SEG2 0
+#define NBIF0_BASE__INST2_SEG3 0
+#define NBIF0_BASE__INST2_SEG4 0
+
+#define NBIF0_BASE__INST3_SEG0 0
+#define NBIF0_BASE__INST3_SEG1 0
+#define NBIF0_BASE__INST3_SEG2 0
+#define NBIF0_BASE__INST3_SEG3 0
+#define NBIF0_BASE__INST3_SEG4 0
+
+#define NBIF0_BASE__INST4_SEG0 0
+#define NBIF0_BASE__INST4_SEG1 0
+#define NBIF0_BASE__INST4_SEG2 0
+#define NBIF0_BASE__INST4_SEG3 0
+#define NBIF0_BASE__INST4_SEG4 0
+
+#define NBIF0_BASE__INST5_SEG0 0
+#define NBIF0_BASE__INST5_SEG1 0
+#define NBIF0_BASE__INST5_SEG2 0
+#define NBIF0_BASE__INST5_SEG3 0
+#define NBIF0_BASE__INST5_SEG4 0
+
+#define NBIF0_BASE__INST6_SEG0 0
+#define NBIF0_BASE__INST6_SEG1 0
+#define NBIF0_BASE__INST6_SEG2 0
+#define NBIF0_BASE__INST6_SEG3 0
+#define NBIF0_BASE__INST6_SEG4 0
+
+#define OSSSYS_BASE__INST0_SEG0 0x000010A0
+#define OSSSYS_BASE__INST0_SEG1 0x0240A000
+#define OSSSYS_BASE__INST0_SEG2 0
+#define OSSSYS_BASE__INST0_SEG3 0
+#define OSSSYS_BASE__INST0_SEG4 0
+
+#define OSSSYS_BASE__INST1_SEG0 0
+#define OSSSYS_BASE__INST1_SEG1 0
+#define OSSSYS_BASE__INST1_SEG2 0
+#define OSSSYS_BASE__INST1_SEG3 0
+#define OSSSYS_BASE__INST1_SEG4 0
+
+#define OSSSYS_BASE__INST2_SEG0 0
+#define OSSSYS_BASE__INST2_SEG1 0
+#define OSSSYS_BASE__INST2_SEG2 0
+#define OSSSYS_BASE__INST2_SEG3 0
+#define OSSSYS_BASE__INST2_SEG4 0
+
+#define OSSSYS_BASE__INST3_SEG0 0
+#define OSSSYS_BASE__INST3_SEG1 0
+#define OSSSYS_BASE__INST3_SEG2 0
+#define OSSSYS_BASE__INST3_SEG3 0
+#define OSSSYS_BASE__INST3_SEG4 0
+
+#define OSSSYS_BASE__INST4_SEG0 0
+#define OSSSYS_BASE__INST4_SEG1 0
+#define OSSSYS_BASE__INST4_SEG2 0
+#define OSSSYS_BASE__INST4_SEG3 0
+#define OSSSYS_BASE__INST4_SEG4 0
+
+#define OSSSYS_BASE__INST5_SEG0 0
+#define OSSSYS_BASE__INST5_SEG1 0
+#define OSSSYS_BASE__INST5_SEG2 0
+#define OSSSYS_BASE__INST5_SEG3 0
+#define OSSSYS_BASE__INST5_SEG4 0
+
+#define OSSSYS_BASE__INST6_SEG0 0
+#define OSSSYS_BASE__INST6_SEG1 0
+#define OSSSYS_BASE__INST6_SEG2 0
+#define OSSSYS_BASE__INST6_SEG3 0
+#define OSSSYS_BASE__INST6_SEG4 0
+
+#define PCIE0_BASE__INST0_SEG0 0x02411800
+#define PCIE0_BASE__INST0_SEG1 0x04440000
+#define PCIE0_BASE__INST0_SEG2 0
+#define PCIE0_BASE__INST0_SEG3 0
+#define PCIE0_BASE__INST0_SEG4 0
+
+#define PCIE0_BASE__INST1_SEG0 0
+#define PCIE0_BASE__INST1_SEG1 0
+#define PCIE0_BASE__INST1_SEG2 0
+#define PCIE0_BASE__INST1_SEG3 0
+#define PCIE0_BASE__INST1_SEG4 0
+
+#define PCIE0_BASE__INST2_SEG0 0
+#define PCIE0_BASE__INST2_SEG1 0
+#define PCIE0_BASE__INST2_SEG2 0
+#define PCIE0_BASE__INST2_SEG3 0
+#define PCIE0_BASE__INST2_SEG4 0
+
+#define PCIE0_BASE__INST3_SEG0 0
+#define PCIE0_BASE__INST3_SEG1 0
+#define PCIE0_BASE__INST3_SEG2 0
+#define PCIE0_BASE__INST3_SEG3 0
+#define PCIE0_BASE__INST3_SEG4 0
+
+#define PCIE0_BASE__INST4_SEG0 0
+#define PCIE0_BASE__INST4_SEG1 0
+#define PCIE0_BASE__INST4_SEG2 0
+#define PCIE0_BASE__INST4_SEG3 0
+#define PCIE0_BASE__INST4_SEG4 0
+
+#define PCIE0_BASE__INST5_SEG0 0
+#define PCIE0_BASE__INST5_SEG1 0
+#define PCIE0_BASE__INST5_SEG2 0
+#define PCIE0_BASE__INST5_SEG3 0
+#define PCIE0_BASE__INST5_SEG4 0
+
+#define PCIE0_BASE__INST6_SEG0 0
+#define PCIE0_BASE__INST6_SEG1 0
+#define PCIE0_BASE__INST6_SEG2 0
+#define PCIE0_BASE__INST6_SEG3 0
+#define PCIE0_BASE__INST6_SEG4 0
+
+#define SDMA0_BASE__INST0_SEG0 0x00001260
+#define SDMA0_BASE__INST0_SEG1 0x0240A800
+#define SDMA0_BASE__INST0_SEG2 0
+#define SDMA0_BASE__INST0_SEG3 0
+#define SDMA0_BASE__INST0_SEG4 0
+
+#define SDMA0_BASE__INST1_SEG0 0
+#define SDMA0_BASE__INST1_SEG1 0
+#define SDMA0_BASE__INST1_SEG2 0
+#define SDMA0_BASE__INST1_SEG3 0
+#define SDMA0_BASE__INST1_SEG4 0
+
+#define SDMA0_BASE__INST2_SEG0 0
+#define SDMA0_BASE__INST2_SEG1 0
+#define SDMA0_BASE__INST2_SEG2 0
+#define SDMA0_BASE__INST2_SEG3 0
+#define SDMA0_BASE__INST2_SEG4 0
+
+#define SDMA0_BASE__INST3_SEG0 0
+#define SDMA0_BASE__INST3_SEG1 0
+#define SDMA0_BASE__INST3_SEG2 0
+#define SDMA0_BASE__INST3_SEG3 0
+#define SDMA0_BASE__INST3_SEG4 0
+
+#define SDMA0_BASE__INST4_SEG0 0
+#define SDMA0_BASE__INST4_SEG1 0
+#define SDMA0_BASE__INST4_SEG2 0
+#define SDMA0_BASE__INST4_SEG3 0
+#define SDMA0_BASE__INST4_SEG4 0
+
+#define SDMA0_BASE__INST5_SEG0 0
+#define SDMA0_BASE__INST5_SEG1 0
+#define SDMA0_BASE__INST5_SEG2 0
+#define SDMA0_BASE__INST5_SEG3 0
+#define SDMA0_BASE__INST5_SEG4 0
+
+#define SDMA0_BASE__INST6_SEG0 0
+#define SDMA0_BASE__INST6_SEG1 0
+#define SDMA0_BASE__INST6_SEG2 0
+#define SDMA0_BASE__INST6_SEG3 0
+#define SDMA0_BASE__INST6_SEG4 0
+
+#define SMUIO_BASE__INST0_SEG0 0x00016800
+#define SMUIO_BASE__INST0_SEG1 0x00016A00
+#define SMUIO_BASE__INST0_SEG2 0x02401000
+#define SMUIO_BASE__INST0_SEG3 0x00440000
+#define SMUIO_BASE__INST0_SEG4 0
+
+#define SMUIO_BASE__INST1_SEG0 0
+#define SMUIO_BASE__INST1_SEG1 0
+#define SMUIO_BASE__INST1_SEG2 0
+#define SMUIO_BASE__INST1_SEG3 0
+#define SMUIO_BASE__INST1_SEG4 0
+
+#define SMUIO_BASE__INST2_SEG0 0
+#define SMUIO_BASE__INST2_SEG1 0
+#define SMUIO_BASE__INST2_SEG2 0
+#define SMUIO_BASE__INST2_SEG3 0
+#define SMUIO_BASE__INST2_SEG4 0
+
+#define SMUIO_BASE__INST3_SEG0 0
+#define SMUIO_BASE__INST3_SEG1 0
+#define SMUIO_BASE__INST3_SEG2 0
+#define SMUIO_BASE__INST3_SEG3 0
+#define SMUIO_BASE__INST3_SEG4 0
+
+#define SMUIO_BASE__INST4_SEG0 0
+#define SMUIO_BASE__INST4_SEG1 0
+#define SMUIO_BASE__INST4_SEG2 0
+#define SMUIO_BASE__INST4_SEG3 0
+#define SMUIO_BASE__INST4_SEG4 0
+
+#define SMUIO_BASE__INST5_SEG0 0
+#define SMUIO_BASE__INST5_SEG1 0
+#define SMUIO_BASE__INST5_SEG2 0
+#define SMUIO_BASE__INST5_SEG3 0
+#define SMUIO_BASE__INST5_SEG4 0
+
+#define SMUIO_BASE__INST6_SEG0 0
+#define SMUIO_BASE__INST6_SEG1 0
+#define SMUIO_BASE__INST6_SEG2 0
+#define SMUIO_BASE__INST6_SEG3 0
+#define SMUIO_BASE__INST6_SEG4 0
+
+#define THM_BASE__INST0_SEG0 0x00016600
+#define THM_BASE__INST0_SEG1 0x02400C00
+#define THM_BASE__INST0_SEG2 0
+#define THM_BASE__INST0_SEG3 0
+#define THM_BASE__INST0_SEG4 0
+
+#define THM_BASE__INST1_SEG0 0
+#define THM_BASE__INST1_SEG1 0
+#define THM_BASE__INST1_SEG2 0
+#define THM_BASE__INST1_SEG3 0
+#define THM_BASE__INST1_SEG4 0
+
+#define THM_BASE__INST2_SEG0 0
+#define THM_BASE__INST2_SEG1 0
+#define THM_BASE__INST2_SEG2 0
+#define THM_BASE__INST2_SEG3 0
+#define THM_BASE__INST2_SEG4 0
+
+#define THM_BASE__INST3_SEG0 0
+#define THM_BASE__INST3_SEG1 0
+#define THM_BASE__INST3_SEG2 0
+#define THM_BASE__INST3_SEG3 0
+#define THM_BASE__INST3_SEG4 0
+
+#define THM_BASE__INST4_SEG0 0
+#define THM_BASE__INST4_SEG1 0
+#define THM_BASE__INST4_SEG2 0
+#define THM_BASE__INST4_SEG3 0
+#define THM_BASE__INST4_SEG4 0
+
+#define THM_BASE__INST5_SEG0 0
+#define THM_BASE__INST5_SEG1 0
+#define THM_BASE__INST5_SEG2 0
+#define THM_BASE__INST5_SEG3 0
+#define THM_BASE__INST5_SEG4 0
+
+#define THM_BASE__INST6_SEG0 0
+#define THM_BASE__INST6_SEG1 0
+#define THM_BASE__INST6_SEG2 0
+#define THM_BASE__INST6_SEG3 0
+#define THM_BASE__INST6_SEG4 0
+
+#define UMC_BASE__INST0_SEG0 0x00014000
+#define UMC_BASE__INST0_SEG1 0x02425800
+#define UMC_BASE__INST0_SEG2 0
+#define UMC_BASE__INST0_SEG3 0
+#define UMC_BASE__INST0_SEG4 0
+
+#define UMC_BASE__INST1_SEG0 0x00054000
+#define UMC_BASE__INST1_SEG1 0x02425C00
+#define UMC_BASE__INST1_SEG2 0
+#define UMC_BASE__INST1_SEG3 0
+#define UMC_BASE__INST1_SEG4 0
+
+#define UMC_BASE__INST2_SEG0 0
+#define UMC_BASE__INST2_SEG1 0
+#define UMC_BASE__INST2_SEG2 0
+#define UMC_BASE__INST2_SEG3 0
+#define UMC_BASE__INST2_SEG4 0
+
+#define UMC_BASE__INST3_SEG0 0
+#define UMC_BASE__INST3_SEG1 0
+#define UMC_BASE__INST3_SEG2 0
+#define UMC_BASE__INST3_SEG3 0
+#define UMC_BASE__INST3_SEG4 0
+
+#define UMC_BASE__INST4_SEG0 0
+#define UMC_BASE__INST4_SEG1 0
+#define UMC_BASE__INST4_SEG2 0
+#define UMC_BASE__INST4_SEG3 0
+#define UMC_BASE__INST4_SEG4 0
+
+#define UMC_BASE__INST5_SEG0 0
+#define UMC_BASE__INST5_SEG1 0
+#define UMC_BASE__INST5_SEG2 0
+#define UMC_BASE__INST5_SEG3 0
+#define UMC_BASE__INST5_SEG4 0
+
+#define UMC_BASE__INST6_SEG0 0
+#define UMC_BASE__INST6_SEG1 0
+#define UMC_BASE__INST6_SEG2 0
+#define UMC_BASE__INST6_SEG3 0
+#define UMC_BASE__INST6_SEG4 0
+
+#define USB0_BASE__INST0_SEG0 0x0242A800
+#define USB0_BASE__INST0_SEG1 0x05B00000
+#define USB0_BASE__INST0_SEG2 0
+#define USB0_BASE__INST0_SEG3 0
+#define USB0_BASE__INST0_SEG4 0
+
+#define USB0_BASE__INST1_SEG0 0
+#define USB0_BASE__INST1_SEG1 0
+#define USB0_BASE__INST1_SEG2 0
+#define USB0_BASE__INST1_SEG3 0
+#define USB0_BASE__INST1_SEG4 0
+
+#define USB0_BASE__INST2_SEG0 0
+#define USB0_BASE__INST2_SEG1 0
+#define USB0_BASE__INST2_SEG2 0
+#define USB0_BASE__INST2_SEG3 0
+#define USB0_BASE__INST2_SEG4 0
+
+#define USB0_BASE__INST3_SEG0 0
+#define USB0_BASE__INST3_SEG1 0
+#define USB0_BASE__INST3_SEG2 0
+#define USB0_BASE__INST3_SEG3 0
+#define USB0_BASE__INST3_SEG4 0
+
+#define USB0_BASE__INST4_SEG0 0
+#define USB0_BASE__INST4_SEG1 0
+#define USB0_BASE__INST4_SEG2 0
+#define USB0_BASE__INST4_SEG3 0
+#define USB0_BASE__INST4_SEG4 0
+
+#define USB0_BASE__INST5_SEG0 0
+#define USB0_BASE__INST5_SEG1 0
+#define USB0_BASE__INST5_SEG2 0
+#define USB0_BASE__INST5_SEG3 0
+#define USB0_BASE__INST5_SEG4 0
+
+#define USB0_BASE__INST6_SEG0 0
+#define USB0_BASE__INST6_SEG1 0
+#define USB0_BASE__INST6_SEG2 0
+#define USB0_BASE__INST6_SEG3 0
+#define USB0_BASE__INST6_SEG4 0
+
+#define UVD0_BASE__INST0_SEG0 0x00007800
+#define UVD0_BASE__INST0_SEG1 0x00007E00
+#define UVD0_BASE__INST0_SEG2 0x02403000
+#define UVD0_BASE__INST0_SEG3 0
+#define UVD0_BASE__INST0_SEG4 0
+
+#define UVD0_BASE__INST1_SEG0 0
+#define UVD0_BASE__INST1_SEG1 0
+#define UVD0_BASE__INST1_SEG2 0
+#define UVD0_BASE__INST1_SEG3 0
+#define UVD0_BASE__INST1_SEG4 0
+
+#define UVD0_BASE__INST2_SEG0 0
+#define UVD0_BASE__INST2_SEG1 0
+#define UVD0_BASE__INST2_SEG2 0
+#define UVD0_BASE__INST2_SEG3 0
+#define UVD0_BASE__INST2_SEG4 0
+
+#define UVD0_BASE__INST3_SEG0 0
+#define UVD0_BASE__INST3_SEG1 0
+#define UVD0_BASE__INST3_SEG2 0
+#define UVD0_BASE__INST3_SEG3 0
+#define UVD0_BASE__INST3_SEG4 0
+
+#define UVD0_BASE__INST4_SEG0 0
+#define UVD0_BASE__INST4_SEG1 0
+#define UVD0_BASE__INST4_SEG2 0
+#define UVD0_BASE__INST4_SEG3 0
+#define UVD0_BASE__INST4_SEG4 0
+
+#define UVD0_BASE__INST5_SEG0 0
+#define UVD0_BASE__INST5_SEG1 0
+#define UVD0_BASE__INST5_SEG2 0
+#define UVD0_BASE__INST5_SEG3 0
+#define UVD0_BASE__INST5_SEG4 0
+
+#define UVD0_BASE__INST6_SEG0 0
+#define UVD0_BASE__INST6_SEG1 0
+#define UVD0_BASE__INST6_SEG2 0
+#define UVD0_BASE__INST6_SEG3 0
+#define UVD0_BASE__INST6_SEG4 0
+
+#define DCN_BASE__INST0_SEG0 0x00000012
+#define DCN_BASE__INST0_SEG1 0x000000C0
+#define DCN_BASE__INST0_SEG2 0x000034C0
+#define DCN_BASE__INST0_SEG3 0
+#define DCN_BASE__INST0_SEG4 0
+
+#define DCN_BASE__INST1_SEG0 0
+#define DCN_BASE__INST1_SEG1 0
+#define DCN_BASE__INST1_SEG2 0
+#define DCN_BASE__INST1_SEG3 0
+#define DCN_BASE__INST1_SEG4 0
+
+#define DCN_BASE__INST2_SEG0 0
+#define DCN_BASE__INST2_SEG1 0
+#define DCN_BASE__INST2_SEG2 0
+#define DCN_BASE__INST2_SEG3 0
+#define DCN_BASE__INST2_SEG4 0
+
+#define DCN_BASE__INST3_SEG0 0
+#define DCN_BASE__INST3_SEG1 0
+#define DCN_BASE__INST3_SEG2 0
+#define DCN_BASE__INST3_SEG3 0
+#define DCN_BASE__INST3_SEG4 0
+
+#define DCN_BASE__INST4_SEG0 0
+#define DCN_BASE__INST4_SEG1 0
+#define DCN_BASE__INST4_SEG2 0
+#define DCN_BASE__INST4_SEG3 0
+#define DCN_BASE__INST4_SEG4 0
+#endif
diff --git a/drivers/gpu/drm/amd/include/soc15_ih_clientid.h b/drivers/gpu/drm/amd/include/soc15_ih_clientid.h
index 12e196c15bbe..1794ad1fc4fc 100644
--- a/drivers/gpu/drm/amd/include/soc15_ih_clientid.h
+++ b/drivers/gpu/drm/amd/include/soc15_ih_clientid.h
@@ -42,7 +42,6 @@ enum soc15_ih_clientid {
SOC15_IH_CLIENTID_SE1SH = 0x0b,
SOC15_IH_CLIENTID_SE2SH = 0x0c,
SOC15_IH_CLIENTID_SE3SH = 0x0d,
- SOC15_IH_CLIENTID_SYSHUB = 0x0e,
SOC15_IH_CLIENTID_UVD1 = 0x0e,
SOC15_IH_CLIENTID_THM = 0x0f,
SOC15_IH_CLIENTID_UVD = 0x10,
@@ -63,7 +62,15 @@ enum soc15_ih_clientid {
SOC15_IH_CLIENTID_MAX,
- SOC15_IH_CLIENTID_VCN = SOC15_IH_CLIENTID_UVD
+ SOC15_IH_CLIENTID_VCN = SOC15_IH_CLIENTID_UVD,
+ SOC15_IH_CLIENTID_VCN1 = SOC15_IH_CLIENTID_UVD1,
+ SOC15_IH_CLIENTID_SDMA2 = SOC15_IH_CLIENTID_ACP,
+ SOC15_IH_CLIENTID_SDMA3 = SOC15_IH_CLIENTID_DCE,
+ SOC15_IH_CLIENTID_SDMA4 = SOC15_IH_CLIENTID_ISP,
+ SOC15_IH_CLIENTID_SDMA5 = SOC15_IH_CLIENTID_VCE0,
+ SOC15_IH_CLIENTID_SDMA6 = SOC15_IH_CLIENTID_XDMA,
+ SOC15_IH_CLIENTID_SDMA7 = SOC15_IH_CLIENTID_VCE1,
+ SOC15_IH_CLIENTID_VMC1 = SOC15_IH_CLIENTID_PCIE0,
};
#endif
diff --git a/drivers/gpu/drm/amd/include/v9_structs.h b/drivers/gpu/drm/amd/include/v9_structs.h
index 8b383dbe1cda..a0c672889fe4 100644
--- a/drivers/gpu/drm/amd/include/v9_structs.h
+++ b/drivers/gpu/drm/amd/include/v9_structs.h
@@ -196,10 +196,10 @@ struct v9_mqd {
uint32_t compute_wave_restore_addr_lo;
uint32_t compute_wave_restore_addr_hi;
uint32_t compute_wave_restore_control;
- uint32_t reserved_39;
- uint32_t reserved_40;
- uint32_t reserved_41;
- uint32_t reserved_42;
+ uint32_t compute_static_thread_mgmt_se4;
+ uint32_t compute_static_thread_mgmt_se5;
+ uint32_t compute_static_thread_mgmt_se6;
+ uint32_t compute_static_thread_mgmt_se7;
uint32_t reserved_43;
uint32_t reserved_44;
uint32_t reserved_45;
diff --git a/drivers/gpu/drm/amd/include/vega10_enum.h b/drivers/gpu/drm/amd/include/vega10_enum.h
index c14ba65a2415..adf1b754666e 100644
--- a/drivers/gpu/drm/amd/include/vega10_enum.h
+++ b/drivers/gpu/drm/amd/include/vega10_enum.h
@@ -1037,6 +1037,7 @@ TCC_CACHE_POLICY_STREAM = 0x00000001,
typedef enum MTYPE {
MTYPE_NC = 0x00000000,
MTYPE_WC = 0x00000001,
+MTYPE_RW = 0x00000001,
MTYPE_CC = 0x00000002,
MTYPE_UC = 0x00000003,
} MTYPE;
diff --git a/drivers/gpu/drm/amd/powerplay/Makefile b/drivers/gpu/drm/amd/powerplay/Makefile
index 727c5cff231c..390345f2d601 100644
--- a/drivers/gpu/drm/amd/powerplay/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/Makefile
@@ -35,7 +35,7 @@ AMD_POWERPLAY = $(addsuffix /Makefile,$(addprefix $(FULL_AMD_PATH)/powerplay/,$(
include $(AMD_POWERPLAY)
-POWER_MGR = amd_powerplay.o amdgpu_smu.o smu_v11_0.o vega20_ppt.o navi10_ppt.o
+POWER_MGR = amd_powerplay.o amdgpu_smu.o smu_v11_0.o smu_v12_0.o vega20_ppt.o arcturus_ppt.o navi10_ppt.o renoir_ppt.o
AMD_PP_POWER = $(addprefix $(AMD_PP_PATH)/,$(POWER_MGR))
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index bea1587d352d..c195575366a3 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -48,7 +48,6 @@ static int amd_powerplay_create(struct amdgpu_device *adev)
hwmgr->adev = adev;
hwmgr->not_vf = !amdgpu_sriov_vf(adev);
- hwmgr->pm_en = (amdgpu_dpm && hwmgr->not_vf) ? true : false;
hwmgr->device = amdgpu_cgs_create_device(adev);
mutex_init(&hwmgr->smu_lock);
hwmgr->chip_family = adev->family;
@@ -924,6 +923,22 @@ static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint3
return hwmgr->hwmgr_func->odn_edit_dpm_table(hwmgr, type, input, size);
}
+static int pp_dpm_set_mp1_state(void *handle, enum pp_mp1_state mp1_state)
+{
+ struct pp_hwmgr *hwmgr = handle;
+
+ if (!hwmgr)
+ return -EINVAL;
+
+ if (!hwmgr->pm_en)
+ return 0;
+
+ if (hwmgr->hwmgr_func->set_mp1_state)
+ return hwmgr->hwmgr_func->set_mp1_state(hwmgr, mp1_state);
+
+ return 0;
+}
+
static int pp_dpm_switch_power_profile(void *handle,
enum PP_SMC_POWER_PROFILE type, bool en)
{
@@ -956,6 +971,14 @@ static int pp_dpm_switch_power_profile(void *handle,
workload = hwmgr->workload_setting[index];
}
+ if (type == PP_SMC_POWER_PROFILE_COMPUTE &&
+ hwmgr->hwmgr_func->disable_power_features_for_compute_performance) {
+ if (hwmgr->hwmgr_func->disable_power_features_for_compute_performance(hwmgr, en)) {
+ mutex_unlock(&hwmgr->smu_lock);
+ return -EINVAL;
+ }
+ }
+
if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);
mutex_unlock(&hwmgr->smu_lock);
@@ -1408,6 +1431,7 @@ static int pp_get_asic_baco_capability(void *handle, bool *cap)
{
struct pp_hwmgr *hwmgr = handle;
+ *cap = false;
if (!hwmgr)
return -EINVAL;
@@ -1495,6 +1519,80 @@ static int pp_set_ppfeature_status(void *handle, uint64_t ppfeature_masks)
return ret;
}
+static int pp_asic_reset_mode_2(void *handle)
+{
+ struct pp_hwmgr *hwmgr = handle;
+ int ret = 0;
+
+ if (!hwmgr || !hwmgr->pm_en)
+ return -EINVAL;
+
+ if (hwmgr->hwmgr_func->asic_reset == NULL) {
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&hwmgr->smu_lock);
+ ret = hwmgr->hwmgr_func->asic_reset(hwmgr, SMU_ASIC_RESET_MODE_2);
+ mutex_unlock(&hwmgr->smu_lock);
+
+ return ret;
+}
+
+static int pp_smu_i2c_bus_access(void *handle, bool acquire)
+{
+ struct pp_hwmgr *hwmgr = handle;
+ int ret = 0;
+
+ if (!hwmgr || !hwmgr->pm_en)
+ return -EINVAL;
+
+ if (hwmgr->hwmgr_func->smu_i2c_bus_access == NULL) {
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&hwmgr->smu_lock);
+ ret = hwmgr->hwmgr_func->smu_i2c_bus_access(hwmgr, acquire);
+ mutex_unlock(&hwmgr->smu_lock);
+
+ return ret;
+}
+
+static int pp_set_df_cstate(void *handle, enum pp_df_cstate state)
+{
+ struct pp_hwmgr *hwmgr = handle;
+
+ if (!hwmgr)
+ return -EINVAL;
+
+ if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_df_cstate)
+ return 0;
+
+ mutex_lock(&hwmgr->smu_lock);
+ hwmgr->hwmgr_func->set_df_cstate(hwmgr, state);
+ mutex_unlock(&hwmgr->smu_lock);
+
+ return 0;
+}
+
+static int pp_set_xgmi_pstate(void *handle, uint32_t pstate)
+{
+ struct pp_hwmgr *hwmgr = handle;
+
+ if (!hwmgr)
+ return -EINVAL;
+
+ if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_xgmi_pstate)
+ return 0;
+
+ mutex_lock(&hwmgr->smu_lock);
+ hwmgr->hwmgr_func->set_xgmi_pstate(hwmgr, pstate);
+ mutex_unlock(&hwmgr->smu_lock);
+
+ return 0;
+}
+
static const struct amd_pm_funcs pp_dpm_funcs = {
.load_firmware = pp_dpm_load_fw,
.wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
@@ -1525,6 +1623,7 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
.get_power_profile_mode = pp_get_power_profile_mode,
.set_power_profile_mode = pp_set_power_profile_mode,
.odn_edit_dpm_table = pp_odn_edit_dpm_table,
+ .set_mp1_state = pp_dpm_set_mp1_state,
.set_power_limit = pp_set_power_limit,
.get_power_limit = pp_get_power_limit,
/* export to DC */
@@ -1550,4 +1649,8 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
.set_asic_baco_state = pp_set_asic_baco_state,
.get_ppfeature_status = pp_get_ppfeature_status,
.set_ppfeature_status = pp_set_ppfeature_status,
+ .asic_reset_mode_2 = pp_asic_reset_mode_2,
+ .smu_i2c_bus_access = pp_smu_i2c_bus_access,
+ .set_df_cstate = pp_set_df_cstate,
+ .set_xgmi_pstate = pp_set_xgmi_pstate,
};
diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index 8a3eadeebdcb..99ad4ddbe12f 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -21,14 +21,171 @@
*/
#include <linux/firmware.h>
+#include <linux/pci.h>
#include "pp_debug.h"
#include "amdgpu.h"
#include "amdgpu_smu.h"
+#include "smu_internal.h"
#include "soc15_common.h"
#include "smu_v11_0.h"
+#include "smu_v12_0.h"
#include "atom.h"
#include "amd_pcie.h"
+#include "vega20_ppt.h"
+#include "arcturus_ppt.h"
+#include "navi10_ppt.h"
+#include "renoir_ppt.h"
+
+#undef __SMU_DUMMY_MAP
+#define __SMU_DUMMY_MAP(type) #type
+static const char* __smu_message_names[] = {
+ SMU_MESSAGE_TYPES
+};
+
+const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type)
+{
+ if (type < 0 || type >= SMU_MSG_MAX_COUNT)
+ return "unknown smu message";
+ return __smu_message_names[type];
+}
+
+#undef __SMU_DUMMY_MAP
+#define __SMU_DUMMY_MAP(fea) #fea
+static const char* __smu_feature_names[] = {
+ SMU_FEATURE_MASKS
+};
+
+const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature)
+{
+ if (feature < 0 || feature >= SMU_FEATURE_COUNT)
+ return "unknown smu feature";
+ return __smu_feature_names[feature];
+}
+
+size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
+{
+ size_t size = 0;
+ int ret = 0, i = 0;
+ uint32_t feature_mask[2] = { 0 };
+ int32_t feature_index = 0;
+ uint32_t count = 0;
+ uint32_t sort_feature[SMU_FEATURE_COUNT];
+ uint64_t hw_feature_count = 0;
+
+ mutex_lock(&smu->mutex);
+
+ ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
+ if (ret)
+ goto failed;
+
+ size = sprintf(buf + size, "features high: 0x%08x low: 0x%08x\n",
+ feature_mask[1], feature_mask[0]);
+
+ for (i = 0; i < SMU_FEATURE_COUNT; i++) {
+ feature_index = smu_feature_get_index(smu, i);
+ if (feature_index < 0)
+ continue;
+ sort_feature[feature_index] = i;
+ hw_feature_count++;
+ }
+
+ for (i = 0; i < hw_feature_count; i++) {
+ size += sprintf(buf + size, "%02d. %-20s (%2d) : %s\n",
+ count++,
+ smu_get_feature_name(smu, sort_feature[i]),
+ i,
+ !!smu_feature_is_enabled(smu, sort_feature[i]) ?
+ "enabled" : "disabled");
+ }
+
+failed:
+ mutex_unlock(&smu->mutex);
+
+ return size;
+}
+
+static int smu_feature_update_enable_state(struct smu_context *smu,
+ uint64_t feature_mask,
+ bool enabled)
+{
+ struct smu_feature *feature = &smu->smu_feature;
+ uint32_t feature_low = 0, feature_high = 0;
+ int ret = 0;
+
+ if (!smu->pm_enabled)
+ return ret;
+
+ feature_low = (feature_mask >> 0 ) & 0xffffffff;
+ feature_high = (feature_mask >> 32) & 0xffffffff;
+
+ if (enabled) {
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow,
+ feature_low);
+ if (ret)
+ return ret;
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh,
+ feature_high);
+ if (ret)
+ return ret;
+ } else {
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow,
+ feature_low);
+ if (ret)
+ return ret;
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh,
+ feature_high);
+ if (ret)
+ return ret;
+ }
+
+ mutex_lock(&feature->mutex);
+ if (enabled)
+ bitmap_or(feature->enabled, feature->enabled,
+ (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
+ else
+ bitmap_andnot(feature->enabled, feature->enabled,
+ (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
+ mutex_unlock(&feature->mutex);
+
+ return ret;
+}
+
+int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
+{
+ int ret = 0;
+ uint32_t feature_mask[2] = { 0 };
+ uint64_t feature_2_enabled = 0;
+ uint64_t feature_2_disabled = 0;
+ uint64_t feature_enables = 0;
+
+ mutex_lock(&smu->mutex);
+
+ ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
+ if (ret)
+ goto out;
+
+ feature_enables = ((uint64_t)feature_mask[1] << 32 | (uint64_t)feature_mask[0]);
+
+ feature_2_enabled = ~feature_enables & new_mask;
+ feature_2_disabled = feature_enables & ~new_mask;
+
+ if (feature_2_enabled) {
+ ret = smu_feature_update_enable_state(smu, feature_2_enabled, true);
+ if (ret)
+ goto out;
+ }
+ if (feature_2_disabled) {
+ ret = smu_feature_update_enable_state(smu, feature_2_disabled, false);
+ if (ret)
+ goto out;
+ }
+
+out:
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version)
{
@@ -63,8 +220,7 @@ int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t
int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t min, uint32_t max)
{
- int ret = 0, clk_id = 0;
- uint32_t param;
+ int ret = 0;
if (min <= 0 && max <= 0)
return -EINVAL;
@@ -72,27 +228,7 @@ int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
if (!smu_clk_dpm_is_enabled(smu, clk_type))
return 0;
- clk_id = smu_clk_get_index(smu, clk_type);
- if (clk_id < 0)
- return clk_id;
-
- if (max > 0) {
- param = (uint32_t)((clk_id << 16) | (max & 0xffff));
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
- param);
- if (ret)
- return ret;
- }
-
- if (min > 0) {
- param = (uint32_t)((clk_id << 16) | (min & 0xffff));
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
- param);
- if (ret)
- return ret;
- }
-
-
+ ret = smu_set_soft_freq_limited_range(smu, clk_type, min, max);
return ret;
}
@@ -133,15 +269,17 @@ int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
}
int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
- uint32_t *min, uint32_t *max)
+ uint32_t *min, uint32_t *max, bool lock_needed)
{
- int ret = 0, clk_id = 0;
- uint32_t param = 0;
uint32_t clock_limit;
+ int ret = 0;
if (!min && !max)
return -EINVAL;
+ if (lock_needed)
+ mutex_lock(&smu->mutex);
+
if (!smu_clk_dpm_is_enabled(smu, clk_type)) {
switch (clk_type) {
case SMU_MCLK:
@@ -165,39 +303,17 @@ int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
*min = clock_limit / 100;
if (max)
*max = clock_limit / 100;
-
- return 0;
- }
-
- mutex_lock(&smu->mutex);
- clk_id = smu_clk_get_index(smu, clk_type);
- if (clk_id < 0) {
- ret = -EINVAL;
- goto failed;
- }
-
- param = (clk_id & 0xffff) << 16;
-
- if (max) {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param);
- if (ret)
- goto failed;
- ret = smu_read_smc_arg(smu, max);
- if (ret)
- goto failed;
+ } else {
+ /*
+ * Todo: Use each asic(ASIC_ppt funcs) control the callbacks exposed to the
+ * core driver and then have helpers for stuff that is common(SMU_v11_x | SMU_v12_x funcs).
+ */
+ ret = smu_get_dpm_ultimate_freq(smu, clk_type, min, max);
}
- if (min) {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param);
- if (ret)
- goto failed;
- ret = smu_read_smc_arg(smu, min);
- if (ret)
- goto failed;
- }
+ if (lock_needed)
+ mutex_unlock(&smu->mutex);
-failed:
- mutex_unlock(&smu->mutex);
return ret;
}
@@ -241,6 +357,35 @@ int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type,
return smu_get_dpm_freq_by_index(smu, clk_type, 0xff, value);
}
+int smu_get_dpm_level_range(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t *min_value, uint32_t *max_value)
+{
+ int ret = 0;
+ uint32_t level_count = 0;
+
+ if (!min_value && !max_value)
+ return -EINVAL;
+
+ if (min_value) {
+ /* by default, level 0 clock value as min value */
+ ret = smu_get_dpm_freq_by_index(smu, clk_type, 0, min_value);
+ if (ret)
+ return ret;
+ }
+
+ if (max_value) {
+ ret = smu_get_dpm_level_count(smu, clk_type, &level_count);
+ if (ret)
+ return ret;
+
+ ret = smu_get_dpm_freq_by_index(smu, clk_type, level_count - 1, max_value);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type)
{
enum smu_feature_mask feature_id = 0;
@@ -262,14 +407,26 @@ bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type)
}
if(!smu_feature_is_enabled(smu, feature_id)) {
- pr_warn("smu %d clk dpm feature %d is not enabled\n", clk_type, feature_id);
return false;
}
return true;
}
-
+/**
+ * smu_dpm_set_power_gate - power gate/ungate the specific IP block
+ *
+ * @smu: smu_context pointer
+ * @block_type: the IP block to power gate/ungate
+ * @gate: to power gate if true, ungate otherwise
+ *
+ * This API uses no smu->mutex lock protection due to:
+ * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce).
+ * This is guarded to be race condition free by the caller.
+ * 2. Or get called on user setting request of power_dpm_force_performance_level.
+ * Under this case, the smu->mutex lock protection is already enforced on
+ * the parent API smu_force_performance_level of the call path.
+ */
int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
bool gate)
{
@@ -277,14 +434,20 @@ int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
switch (block_type) {
case AMD_IP_BLOCK_TYPE_UVD:
- ret = smu_dpm_set_uvd_enable(smu, gate);
+ ret = smu_dpm_set_uvd_enable(smu, !gate);
break;
case AMD_IP_BLOCK_TYPE_VCE:
- ret = smu_dpm_set_vce_enable(smu, gate);
+ ret = smu_dpm_set_vce_enable(smu, !gate);
break;
case AMD_IP_BLOCK_TYPE_GFX:
ret = smu_gfx_off_control(smu, gate);
break;
+ case AMD_IP_BLOCK_TYPE_SDMA:
+ ret = smu_powergate_sdma(smu, gate);
+ break;
+ case AMD_IP_BLOCK_TYPE_JPEG:
+ ret = smu_dpm_set_jpeg_enable(smu, !gate);
+ break;
default:
break;
}
@@ -292,12 +455,6 @@ int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
return ret;
}
-enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
-{
- /* not support power state */
- return POWER_STATE_TYPE_DEFAULT;
-}
-
int smu_get_power_num_states(struct smu_context *smu,
struct pp_states_info *state_info)
{
@@ -319,6 +476,9 @@ int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
struct smu_power_gate *power_gate = &smu_power->power_gate;
int ret = 0;
+ if(!data || !size)
+ return -EINVAL;
+
switch (sensor) {
case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
*((uint32_t *)data) = smu->pstate_sclk;
@@ -359,26 +519,26 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int
void *table_data, bool drv2smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
- struct smu_table *table = NULL;
- int ret = 0;
+ struct amdgpu_device *adev = smu->adev;
+ struct smu_table *table = &smu_table->driver_table;
int table_id = smu_table_get_index(smu, table_index);
+ uint32_t table_size;
+ int ret = 0;
- if (!table_data || table_id >= smu_table->table_count)
+ if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
return -EINVAL;
- table = &smu_table->tables[table_index];
+ table_size = smu_table->tables[table_index].size;
- if (drv2smu)
- memcpy(table->cpu_addr, table_data, table->size);
+ if (drv2smu) {
+ memcpy(table->cpu_addr, table_data, table_size);
+ /*
+ * Flush hdp cache: to guard the content seen by
+ * GPU is consitent with CPU.
+ */
+ amdgpu_asic_flush_hdp(adev, NULL);
+ }
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrHigh,
- upper_32_bits(table->mc_address));
- if (ret)
- return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrLow,
- lower_32_bits(table->mc_address));
- if (ret)
- return ret;
ret = smu_send_smc_msg_with_param(smu, drv2smu ?
SMU_MSG_TransferTableDram2Smu :
SMU_MSG_TransferTableSmu2Dram,
@@ -386,8 +546,10 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int
if (ret)
return ret;
- if (!drv2smu)
- memcpy(table_data, table->cpu_addr, table->size);
+ if (!drv2smu) {
+ amdgpu_asic_flush_hdp(adev, NULL);
+ memcpy(table_data, table->cpu_addr, table_size);
+ }
return ret;
}
@@ -396,25 +558,46 @@ bool is_support_sw_smu(struct amdgpu_device *adev)
{
if (adev->asic_type == CHIP_VEGA20)
return (amdgpu_dpm == 2) ? true : false;
- else if (adev->asic_type >= CHIP_NAVI10)
- return true;
- else
+ else if (adev->asic_type >= CHIP_ARCTURUS) {
+ if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
+ return false;
+ else
+ return true;
+ } else
+ return false;
+}
+
+bool is_support_sw_smu_xgmi(struct amdgpu_device *adev)
+{
+ if (!is_support_sw_smu(adev))
return false;
+
+ if (adev->asic_type == CHIP_VEGA20)
+ return true;
+
+ return false;
}
int smu_sys_get_pp_table(struct smu_context *smu, void **table)
{
struct smu_table_context *smu_table = &smu->smu_table;
+ uint32_t powerplay_table_size;
if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
return -EINVAL;
+ mutex_lock(&smu->mutex);
+
if (smu_table->hardcode_pptable)
*table = smu_table->hardcode_pptable;
else
*table = smu_table->power_play_table;
- return smu_table->power_play_table_size;
+ powerplay_table_size = smu_table->power_play_table_size;
+
+ mutex_unlock(&smu->mutex);
+
+ return powerplay_table_size;
}
int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size)
@@ -441,13 +624,18 @@ int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size)
memcpy(smu_table->hardcode_pptable, buf, size);
smu_table->power_play_table = smu_table->hardcode_pptable;
smu_table->power_play_table_size = size;
- mutex_unlock(&smu->mutex);
+
+ /*
+ * Special hw_fini action(for Navi1x, the DPMs disablement will be
+ * skipped) may be needed for custom pptable uploading.
+ */
+ smu->uploading_custom_pp_table = true;
ret = smu_reset(smu);
if (ret)
pr_info("smu reset failed, ret = %d\n", ret);
- return ret;
+ smu->uploading_custom_pp_table = false;
failed:
mutex_unlock(&smu->mutex);
@@ -480,13 +668,19 @@ int smu_feature_init_dpm(struct smu_context *smu)
return ret;
}
+
int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask)
{
struct smu_feature *feature = &smu->smu_feature;
- uint32_t feature_id;
+ int feature_id;
int ret = 0;
+ if (smu->is_apu)
+ return 1;
+
feature_id = smu_feature_get_index(smu, mask);
+ if (feature_id < 0)
+ return 0;
WARN_ON(feature_id > feature->feature_num);
@@ -501,36 +695,28 @@ int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask,
bool enable)
{
struct smu_feature *feature = &smu->smu_feature;
- uint32_t feature_id;
- int ret = 0;
+ int feature_id;
feature_id = smu_feature_get_index(smu, mask);
+ if (feature_id < 0)
+ return -EINVAL;
WARN_ON(feature_id > feature->feature_num);
- mutex_lock(&feature->mutex);
- ret = smu_feature_update_enable_state(smu, feature_id, enable);
- if (ret)
- goto failed;
-
- if (enable)
- test_and_set_bit(feature_id, feature->enabled);
- else
- test_and_clear_bit(feature_id, feature->enabled);
-
-failed:
- mutex_unlock(&feature->mutex);
-
- return ret;
+ return smu_feature_update_enable_state(smu,
+ 1ULL << feature_id,
+ enable);
}
int smu_feature_is_supported(struct smu_context *smu, enum smu_feature_mask mask)
{
struct smu_feature *feature = &smu->smu_feature;
- uint32_t feature_id;
+ int feature_id;
int ret = 0;
feature_id = smu_feature_get_index(smu, mask);
+ if (feature_id < 0)
+ return 0;
WARN_ON(feature_id > feature->feature_num);
@@ -546,10 +732,12 @@ int smu_feature_set_supported(struct smu_context *smu,
bool enable)
{
struct smu_feature *feature = &smu->smu_feature;
- uint32_t feature_id;
+ int feature_id;
int ret = 0;
feature_id = smu_feature_get_index(smu, mask);
+ if (feature_id < 0)
+ return -EINVAL;
WARN_ON(feature_id > feature->feature_num);
@@ -567,12 +755,27 @@ static int smu_set_funcs(struct amdgpu_device *adev)
{
struct smu_context *smu = &adev->smu;
+ if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
+ smu->od_enabled = true;
+
switch (adev->asic_type) {
case CHIP_VEGA20:
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
+ vega20_set_ppt_funcs(smu);
+ break;
case CHIP_NAVI10:
- if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
- smu->od_enabled = true;
- smu_v11_0_set_smu_funcs(smu);
+ case CHIP_NAVI14:
+ case CHIP_NAVI12:
+ navi10_set_ppt_funcs(smu);
+ break;
+ case CHIP_ARCTURUS:
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
+ arcturus_set_ppt_funcs(smu);
+ /* OD is not supported on Arcturus */
+ smu->od_enabled =false;
+ break;
+ case CHIP_RENOIR:
+ renoir_set_ppt_funcs(smu);
break;
default:
return -EINVAL;
@@ -588,6 +791,7 @@ static int smu_early_init(void *handle)
smu->adev = adev;
smu->pm_enabled = !!amdgpu_dpm;
+ smu->is_apu = false;
mutex_init(&smu->mutex);
return smu_set_funcs(adev);
@@ -600,11 +804,11 @@ static int smu_late_init(void *handle)
if (!smu->pm_enabled)
return 0;
- mutex_lock(&smu->mutex);
+
smu_handle_task(&adev->smu,
smu->smu_dpm.dpm_level,
- AMD_PP_TASK_COMPLETE_INIT);
- mutex_unlock(&smu->mutex);
+ AMD_PP_TASK_COMPLETE_INIT,
+ false);
return 0;
}
@@ -694,6 +898,9 @@ static int smu_sw_init(void *handle)
smu->smu_baco.state = SMU_BACO_STATE_EXIT;
smu->smu_baco.platform_support = false;
+ mutex_init(&smu->sensor_lock);
+ mutex_init(&smu->metrics_lock);
+
smu->watermarks_bitmap = 0;
smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
@@ -768,37 +975,56 @@ static int smu_init_fb_allocations(struct smu_context *smu)
struct amdgpu_device *adev = smu->adev;
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *tables = smu_table->tables;
- uint32_t table_count = smu_table->table_count;
- uint32_t i = 0;
- int32_t ret = 0;
-
- if (table_count <= 0)
- return -EINVAL;
+ struct smu_table *driver_table = &(smu_table->driver_table);
+ uint32_t max_table_size = 0;
+ int ret, i;
- for (i = 0 ; i < table_count; i++) {
- if (tables[i].size == 0)
- continue;
+ /* VRAM allocation for tool table */
+ if (tables[SMU_TABLE_PMSTATUSLOG].size) {
ret = amdgpu_bo_create_kernel(adev,
- tables[i].size,
- tables[i].align,
- tables[i].domain,
- &tables[i].bo,
- &tables[i].mc_address,
- &tables[i].cpu_addr);
- if (ret)
- goto failed;
+ tables[SMU_TABLE_PMSTATUSLOG].size,
+ tables[SMU_TABLE_PMSTATUSLOG].align,
+ tables[SMU_TABLE_PMSTATUSLOG].domain,
+ &tables[SMU_TABLE_PMSTATUSLOG].bo,
+ &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
+ &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
+ if (ret) {
+ pr_err("VRAM allocation for tool table failed!\n");
+ return ret;
+ }
}
- return 0;
-failed:
- for (; i > 0; i--) {
+ /* VRAM allocation for driver table */
+ for (i = 0; i < SMU_TABLE_COUNT; i++) {
if (tables[i].size == 0)
continue;
- amdgpu_bo_free_kernel(&tables[i].bo,
- &tables[i].mc_address,
- &tables[i].cpu_addr);
+ if (i == SMU_TABLE_PMSTATUSLOG)
+ continue;
+
+ if (max_table_size < tables[i].size)
+ max_table_size = tables[i].size;
+ }
+
+ driver_table->size = max_table_size;
+ driver_table->align = PAGE_SIZE;
+ driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
+
+ ret = amdgpu_bo_create_kernel(adev,
+ driver_table->size,
+ driver_table->align,
+ driver_table->domain,
+ &driver_table->bo,
+ &driver_table->mc_address,
+ &driver_table->cpu_addr);
+ if (ret) {
+ pr_err("VRAM allocation for driver table failed!\n");
+ if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
+ amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
+ &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
+ &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
}
+
return ret;
}
@@ -806,62 +1032,21 @@ static int smu_fini_fb_allocations(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *tables = smu_table->tables;
- uint32_t table_count = smu_table->table_count;
- uint32_t i = 0;
+ struct smu_table *driver_table = &(smu_table->driver_table);
- if (table_count == 0 || tables == NULL)
+ if (!tables)
return 0;
- for (i = 0 ; i < table_count; i++) {
- if (tables[i].size == 0)
- continue;
- amdgpu_bo_free_kernel(&tables[i].bo,
- &tables[i].mc_address,
- &tables[i].cpu_addr);
- }
-
- return 0;
-}
+ if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
+ amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
+ &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
+ &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
-static int smu_override_pcie_parameters(struct smu_context *smu)
-{
- struct amdgpu_device *adev = smu->adev;
- uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg;
- int ret;
+ amdgpu_bo_free_kernel(&driver_table->bo,
+ &driver_table->mc_address,
+ &driver_table->cpu_addr);
- if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
- pcie_gen = 3;
- else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
- pcie_gen = 2;
- else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
- pcie_gen = 1;
- else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
- pcie_gen = 0;
-
- /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
- * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
- * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32
- */
- if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
- pcie_width = 6;
- else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
- pcie_width = 5;
- else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
- pcie_width = 4;
- else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
- pcie_width = 3;
- else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
- pcie_width = 2;
- else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
- pcie_width = 1;
-
- smu_pcie_arg = (1 << 16) | (pcie_gen << 8) | pcie_width;
- ret = smu_send_smc_msg_with_param(smu,
- SMU_MSG_OverridePcieParameters,
- smu_pcie_arg);
- if (ret)
- pr_err("[%s] Attempt to override pcie params failed!\n", __func__);
- return ret;
+ return 0;
}
static int smu_smc_table_hw_init(struct smu_context *smu,
@@ -875,9 +1060,11 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
return 0;
}
- ret = smu_init_display_count(smu, 0);
- if (ret)
- return ret;
+ if (adev->asic_type != CHIP_ARCTURUS) {
+ ret = smu_init_display_count(smu, 0);
+ if (ret)
+ return ret;
+ }
if (initialize) {
/* get boot_values from vbios to set revision, gfxclk, and etc. */
@@ -926,42 +1113,62 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
return ret;
}
- /*
- * Copy pptable bo in the vram to smc with SMU MSGs such as
- * SetDriverDramAddr and TransferTableDram2Smu.
- */
- ret = smu_write_pptable(smu);
- if (ret)
- return ret;
-
- /* issue RunAfllBtc msg */
- ret = smu_run_afll_btc(smu);
- if (ret)
- return ret;
+ /* smu_dump_pptable(smu); */
+ if (!amdgpu_sriov_vf(adev)) {
+ ret = smu_set_driver_table_location(smu);
+ if (ret)
+ return ret;
- ret = smu_feature_set_allowed_mask(smu);
- if (ret)
- return ret;
+ /*
+ * Copy pptable bo in the vram to smc with SMU MSGs such as
+ * SetDriverDramAddr and TransferTableDram2Smu.
+ */
+ ret = smu_write_pptable(smu);
+ if (ret)
+ return ret;
- ret = smu_system_features_control(smu, true);
- if (ret)
- return ret;
+ /* issue Run*Btc msg */
+ ret = smu_run_btc(smu);
+ if (ret)
+ return ret;
+ ret = smu_feature_set_allowed_mask(smu);
+ if (ret)
+ return ret;
- ret = smu_override_pcie_parameters(smu);
- if (ret)
- return ret;
+ ret = smu_system_features_control(smu, true);
+ if (ret)
+ return ret;
- ret = smu_notify_display_change(smu);
- if (ret)
- return ret;
+ if (adev->asic_type == CHIP_NAVI10) {
+ if ((adev->pdev->device == 0x731f && (adev->pdev->revision == 0xc2 ||
+ adev->pdev->revision == 0xc3 ||
+ adev->pdev->revision == 0xca ||
+ adev->pdev->revision == 0xcb)) ||
+ (adev->pdev->device == 0x66af && (adev->pdev->revision == 0xf3 ||
+ adev->pdev->revision == 0xf4 ||
+ adev->pdev->revision == 0xf5 ||
+ adev->pdev->revision == 0xf6))) {
+ ret = smu_disable_umc_cdr_12gbps_workaround(smu);
+ if (ret) {
+ pr_err("Workaround failed to disable UMC CDR feature on 12Gbps SKU!\n");
+ return ret;
+ }
+ }
+ }
+ }
+ if (adev->asic_type != CHIP_ARCTURUS) {
+ ret = smu_notify_display_change(smu);
+ if (ret)
+ return ret;
- /*
- * Set min deep sleep dce fclk with bootup value from vbios via
- * SetMinDeepSleepDcefclk MSG.
- */
- ret = smu_set_min_dcef_deep_sleep(smu);
- if (ret)
- return ret;
+ /*
+ * Set min deep sleep dce fclk with bootup value from vbios via
+ * SetMinDeepSleepDcefclk MSG.
+ */
+ ret = smu_set_min_dcef_deep_sleep(smu);
+ if (ret)
+ return ret;
+ }
/*
* Set initialized values (get from vbios) to dpm tables context such as
@@ -969,7 +1176,7 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
* type of clks.
*/
if (initialize) {
- ret = smu_populate_smc_pptable(smu);
+ ret = smu_populate_smc_tables(smu);
if (ret)
return ret;
@@ -978,6 +1185,12 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
return ret;
}
+ if (adev->asic_type != CHIP_ARCTURUS) {
+ ret = smu_override_pcie_parameters(smu);
+ if (ret)
+ return ret;
+ }
+
ret = smu_set_default_od_settings(smu, initialize);
if (ret)
return ret;
@@ -987,7 +1200,7 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
if (ret)
return ret;
- ret = smu_get_power_limit(smu, &smu->default_power_limit, false);
+ ret = smu_get_power_limit(smu, &smu->default_power_limit, false, false);
if (ret)
return ret;
}
@@ -995,8 +1208,9 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
/*
* Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
*/
- ret = smu_set_tool_table_location(smu);
-
+ if (!amdgpu_sriov_vf(adev)) {
+ ret = smu_set_tool_table_location(smu);
+ }
if (!smu_is_dpm_running(smu))
pr_info("dpm has been disabled\n");
@@ -1052,10 +1266,9 @@ static int smu_free_memory_pool(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *memory_pool = &smu_table->memory_pool;
- int ret = 0;
if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
- return ret;
+ return 0;
amdgpu_bo_free_kernel(&memory_pool->bo,
&memory_pool->mc_address,
@@ -1063,6 +1276,30 @@ static int smu_free_memory_pool(struct smu_context *smu)
memset(memory_pool, 0, sizeof(struct smu_table));
+ return 0;
+}
+
+static int smu_start_smc_engine(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
+
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
+ if (adev->asic_type < CHIP_NAVI10) {
+ if (smu->ppt_funcs->load_microcode) {
+ ret = smu->ppt_funcs->load_microcode(smu);
+ if (ret)
+ return ret;
+ }
+ }
+ }
+
+ if (smu->ppt_funcs->check_fw_status) {
+ ret = smu->ppt_funcs->check_fw_status(smu);
+ if (ret)
+ pr_err("SMC is not ready\n");
+ }
+
return ret;
}
@@ -1072,14 +1309,25 @@ static int smu_hw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct smu_context *smu = &adev->smu;
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- ret = smu_check_fw_status(smu);
- if (ret) {
- pr_err("SMC firmware status is not correct\n");
- return ret;
- }
+ ret = smu_start_smc_engine(smu);
+ if (ret) {
+ pr_err("SMU is not ready yet!\n");
+ return ret;
+ }
+
+ if (smu->is_apu) {
+ smu_powergate_sdma(&adev->smu, false);
+ smu_powergate_vcn(&adev->smu, false);
+ smu_powergate_jpeg(&adev->smu, false);
+ smu_set_gfx_cgpg(&adev->smu, true);
}
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ return 0;
+
+ if (!smu->pm_enabled)
+ return 0;
+
ret = smu_feature_init_dpm(smu);
if (ret)
goto failed;
@@ -1117,6 +1365,11 @@ failed:
return ret;
}
+static int smu_stop_dpms(struct smu_context *smu)
+{
+ return smu_system_features_control(smu, false);
+}
+
static int smu_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1124,6 +1377,47 @@ static int smu_hw_fini(void *handle)
struct smu_table_context *table_context = &smu->smu_table;
int ret = 0;
+ if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
+ return 0;
+
+ if (smu->is_apu) {
+ smu_powergate_sdma(&adev->smu, true);
+ smu_powergate_vcn(&adev->smu, true);
+ smu_powergate_jpeg(&adev->smu, true);
+ }
+
+ if (!smu->pm_enabled)
+ return 0;
+
+ if (!amdgpu_sriov_vf(adev)){
+ ret = smu_stop_thermal_control(smu);
+ if (ret) {
+ pr_warn("Fail to stop thermal control!\n");
+ return ret;
+ }
+
+ /*
+ * For custom pptable uploading, skip the DPM features
+ * disable process on Navi1x ASICs.
+ * - As the gfx related features are under control of
+ * RLC on those ASICs. RLC reinitialization will be
+ * needed to reenable them. That will cost much more
+ * efforts.
+ *
+ * - SMU firmware can handle the DPM reenablement
+ * properly.
+ */
+ if (!smu->uploading_custom_pp_table ||
+ !((adev->asic_type >= CHIP_NAVI10) &&
+ (adev->asic_type <= CHIP_NAVI12))) {
+ ret = smu_stop_dpms(smu);
+ if (ret) {
+ pr_warn("Fail to stop Dpms!\n");
+ return ret;
+ }
+ }
+ }
+
kfree(table_context->driver_pptable);
table_context->driver_pptable = NULL;
@@ -1165,13 +1459,19 @@ static int smu_suspend(void *handle)
int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct smu_context *smu = &adev->smu;
- bool baco_feature_is_enabled = smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT);
+ bool baco_feature_is_enabled = false;
+
+ if (!smu->pm_enabled)
+ return 0;
+
+ if(!smu->is_apu)
+ baco_feature_is_enabled = smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT);
ret = smu_system_features_control(smu, false);
if (ret)
return ret;
- if (adev->in_gpu_reset && baco_feature_is_enabled) {
+ if (baco_feature_is_enabled) {
ret = smu_feature_set_enabled(smu, SMU_FEATURE_BACO_BIT, true);
if (ret) {
pr_warn("set BACO feature enabled failed, return %d\n", ret);
@@ -1184,6 +1484,8 @@ static int smu_suspend(void *handle)
if (adev->asic_type >= CHIP_NAVI10 &&
adev->gfx.rlc.funcs->stop)
adev->gfx.rlc.funcs->stop(adev);
+ if (smu->is_apu)
+ smu_set_gfx_cgpg(&adev->smu, false);
return 0;
}
@@ -1194,9 +1496,19 @@ static int smu_resume(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct smu_context *smu = &adev->smu;
+ if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
+ return 0;
+
+ if (!smu->pm_enabled)
+ return 0;
+
pr_info("SMU is resuming...\n");
- mutex_lock(&smu->mutex);
+ ret = smu_start_smc_engine(smu);
+ if (ret) {
+ pr_err("SMU is not ready yet!\n");
+ goto failed;
+ }
ret = smu_smc_table_hw_init(smu, false);
if (ret)
@@ -1206,13 +1518,16 @@ static int smu_resume(void *handle)
if (ret)
goto failed;
- mutex_unlock(&smu->mutex);
+ if (smu->is_apu)
+ smu_set_gfx_cgpg(&adev->smu, true);
+
+ smu->disable_uclk_switch = 0;
pr_info("SMU is resumed successfully!\n");
return 0;
+
failed:
- mutex_unlock(&smu->mutex);
return ret;
}
@@ -1230,8 +1545,9 @@ int smu_display_configuration_change(struct smu_context *smu,
mutex_lock(&smu->mutex);
- smu_set_deep_sleep_dcefclk(smu,
- display_config->min_dcef_deep_sleep_set_clk / 100);
+ if (smu->ppt_funcs->set_deep_sleep_dcefclk)
+ smu->ppt_funcs->set_deep_sleep_dcefclk(smu,
+ display_config->min_dcef_deep_sleep_set_clk / 100);
for (index = 0; index < display_config->num_path_including_non_display; index++) {
if (display_config->displays[index].controller_id != 0)
@@ -1350,7 +1666,8 @@ static int smu_enable_umd_pstate(void *handle,
struct smu_context *smu = (struct smu_context*)(handle);
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
- if (!smu->pm_enabled || !smu_dpm_ctx->dpm_context)
+
+ if (!smu->is_apu && (!smu->pm_enabled || !smu_dpm_ctx->dpm_context))
return -EINVAL;
if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
@@ -1383,43 +1700,6 @@ static int smu_enable_umd_pstate(void *handle,
return 0;
}
-static int smu_default_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
-{
- int ret = 0;
- uint32_t sclk_mask, mclk_mask, soc_mask;
-
- switch (level) {
- case AMD_DPM_FORCED_LEVEL_HIGH:
- ret = smu_force_dpm_limit_value(smu, true);
- break;
- case AMD_DPM_FORCED_LEVEL_LOW:
- ret = smu_force_dpm_limit_value(smu, false);
- break;
- case AMD_DPM_FORCED_LEVEL_AUTO:
- case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
- ret = smu_unforce_dpm_levels(smu);
- break;
- case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
- case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
- case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
- ret = smu_get_profiling_clk_mask(smu, level,
- &sclk_mask,
- &mclk_mask,
- &soc_mask);
- if (ret)
- return ret;
- smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask);
- smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask);
- smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask);
- break;
- case AMD_DPM_FORCED_LEVEL_MANUAL:
- case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
- default:
- break;
- }
- return ret;
-}
-
int smu_adjust_power_state_dynamic(struct smu_context *smu,
enum amd_dpm_forced_level level,
bool skip_display_settings)
@@ -1431,6 +1711,7 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu,
if (!smu->pm_enabled)
return -EINVAL;
+
if (!skip_display_settings) {
ret = smu_display_config_changed(smu);
if (ret) {
@@ -1439,8 +1720,6 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu,
}
}
- if (!smu->pm_enabled)
- return -EINVAL;
ret = smu_apply_clocks_adjust_rules(smu);
if (ret) {
pr_err("Failed to apply clocks adjust rules!");
@@ -1448,7 +1727,7 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu,
}
if (!skip_display_settings) {
- ret = smu_notify_smc_dispaly_config(smu);
+ ret = smu_notify_smc_display_config(smu);
if (ret) {
pr_err("Failed to notify smc display config!");
return ret;
@@ -1458,10 +1737,12 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu,
if (smu_dpm_ctx->dpm_level != level) {
ret = smu_asic_set_performance_level(smu, level);
if (ret) {
- ret = smu_default_set_performance_level(smu, level);
+ pr_err("Failed to set performance level!");
+ return ret;
}
- if (!ret)
- smu_dpm_ctx->dpm_level = level;
+
+ /* update the saved copy */
+ smu_dpm_ctx->dpm_level = level;
}
if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
@@ -1470,7 +1751,7 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu,
workload = smu->workload_setting[index];
if (smu->power_profile_mode != workload)
- smu_set_power_profile_mode(smu, &workload, 0);
+ smu_set_power_profile_mode(smu, &workload, 0, false);
}
return ret;
@@ -1478,18 +1759,22 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu,
int smu_handle_task(struct smu_context *smu,
enum amd_dpm_forced_level level,
- enum amd_pp_task task_id)
+ enum amd_pp_task task_id,
+ bool lock_needed)
{
int ret = 0;
+ if (lock_needed)
+ mutex_lock(&smu->mutex);
+
switch (task_id) {
case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
ret = smu_pre_display_config_changed(smu);
if (ret)
- return ret;
+ goto out;
ret = smu_set_cpu_power_state(smu);
if (ret)
- return ret;
+ goto out;
ret = smu_adjust_power_state_dynamic(smu, level, false);
break;
case AMD_PP_TASK_COMPLETE_INIT:
@@ -1500,15 +1785,55 @@ int smu_handle_task(struct smu_context *smu,
break;
}
+out:
+ if (lock_needed)
+ mutex_unlock(&smu->mutex);
+
return ret;
}
+int smu_switch_power_profile(struct smu_context *smu,
+ enum PP_SMC_POWER_PROFILE type,
+ bool en)
+{
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+ long workload;
+ uint32_t index;
+
+ if (!smu->pm_enabled)
+ return -EINVAL;
+
+ if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
+ return -EINVAL;
+
+ mutex_lock(&smu->mutex);
+
+ if (!en) {
+ smu->workload_mask &= ~(1 << smu->workload_prority[type]);
+ index = fls(smu->workload_mask);
+ index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
+ workload = smu->workload_setting[index];
+ } else {
+ smu->workload_mask |= (1 << smu->workload_prority[type]);
+ index = fls(smu->workload_mask);
+ index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
+ workload = smu->workload_setting[index];
+ }
+
+ if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
+ smu_set_power_profile_mode(smu, &workload, 0, false);
+
+ mutex_unlock(&smu->mutex);
+
+ return 0;
+}
+
enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
{
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
enum amd_dpm_forced_level level;
- if (!smu_dpm_ctx->dpm_context)
+ if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
return -EINVAL;
mutex_lock(&(smu->mutex));
@@ -1520,27 +1845,24 @@ enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
{
- int ret = 0;
- int i;
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+ int ret = 0;
- if (!smu_dpm_ctx->dpm_context)
+ if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
return -EINVAL;
- for (i = 0; i < smu->adev->num_ip_blocks; i++) {
- if (smu->adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)
- break;
- }
+ mutex_lock(&smu->mutex);
+ ret = smu_enable_umd_pstate(smu, &level);
+ if (ret) {
+ mutex_unlock(&smu->mutex);
+ return ret;
+ }
- smu->adev->ip_blocks[i].version->funcs->enable_umd_pstate(smu, &level);
ret = smu_handle_task(smu, level,
- AMD_PP_TASK_READJUST_POWER_STATE);
- if (ret)
- return ret;
+ AMD_PP_TASK_READJUST_POWER_STATE,
+ false);
- mutex_lock(&smu->mutex);
- smu_dpm_ctx->dpm_level = level;
mutex_unlock(&smu->mutex);
return ret;
@@ -1557,6 +1879,142 @@ int smu_set_display_count(struct smu_context *smu, uint32_t count)
return ret;
}
+int smu_force_clk_levels(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t mask,
+ bool lock_needed)
+{
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+ int ret = 0;
+
+ if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
+ pr_debug("force clock level is for dpm manual mode only.\n");
+ return -EINVAL;
+ }
+
+ if (lock_needed)
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels)
+ ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
+
+ if (lock_needed)
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_mp1_state(struct smu_context *smu,
+ enum pp_mp1_state mp1_state)
+{
+ uint16_t msg;
+ int ret;
+
+ /*
+ * The SMC is not fully ready. That may be
+ * expected as the IP may be masked.
+ * So, just return without error.
+ */
+ if (!smu->pm_enabled)
+ return 0;
+
+ mutex_lock(&smu->mutex);
+
+ switch (mp1_state) {
+ case PP_MP1_STATE_SHUTDOWN:
+ msg = SMU_MSG_PrepareMp1ForShutdown;
+ break;
+ case PP_MP1_STATE_UNLOAD:
+ msg = SMU_MSG_PrepareMp1ForUnload;
+ break;
+ case PP_MP1_STATE_RESET:
+ msg = SMU_MSG_PrepareMp1ForReset;
+ break;
+ case PP_MP1_STATE_NONE:
+ default:
+ mutex_unlock(&smu->mutex);
+ return 0;
+ }
+
+ /* some asics may not support those messages */
+ if (smu_msg_get_index(smu, msg) < 0) {
+ mutex_unlock(&smu->mutex);
+ return 0;
+ }
+
+ ret = smu_send_smc_msg(smu, msg);
+ if (ret)
+ pr_err("[PrepareMp1] Failed!\n");
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_df_cstate(struct smu_context *smu,
+ enum pp_df_cstate state)
+{
+ int ret = 0;
+
+ /*
+ * The SMC is not fully ready. That may be
+ * expected as the IP may be masked.
+ * So, just return without error.
+ */
+ if (!smu->pm_enabled)
+ return 0;
+
+ if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
+ return 0;
+
+ mutex_lock(&smu->mutex);
+
+ ret = smu->ppt_funcs->set_df_cstate(smu, state);
+ if (ret)
+ pr_err("[SetDfCstate] failed!\n");
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_write_watermarks_table(struct smu_context *smu)
+{
+ void *watermarks_table = smu->smu_table.watermarks_table;
+
+ if (!watermarks_table)
+ return -EINVAL;
+
+ return smu_update_table(smu,
+ SMU_TABLE_WATERMARKS,
+ 0,
+ watermarks_table,
+ true);
+}
+
+int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
+ struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
+{
+ void *table = smu->smu_table.watermarks_table;
+
+ if (!table)
+ return -EINVAL;
+
+ mutex_lock(&smu->mutex);
+
+ if (!smu->disable_watermark &&
+ smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
+ smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
+ smu_set_watermarks_table(smu, table, clock_ranges);
+ smu->watermarks_bitmap |= WATERMARKS_EXIST;
+ smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
+ }
+
+ mutex_unlock(&smu->mutex);
+
+ return 0;
+}
+
const struct amd_ip_funcs smu_ip_funcs = {
.name = "smu",
.early_init = smu_early_init,
@@ -1584,3 +2042,587 @@ const struct amdgpu_ip_block_version smu_v11_0_ip_block =
.rev = 0,
.funcs = &smu_ip_funcs,
};
+
+const struct amdgpu_ip_block_version smu_v12_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 12,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &smu_ip_funcs,
+};
+
+int smu_load_microcode(struct smu_context *smu)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->load_microcode)
+ ret = smu->ppt_funcs->load_microcode(smu);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_check_fw_status(struct smu_context *smu)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->check_fw_status)
+ ret = smu->ppt_funcs->check_fw_status(smu);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->set_gfx_cgpg)
+ ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->set_fan_speed_rpm)
+ ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_get_power_limit(struct smu_context *smu,
+ uint32_t *limit,
+ bool def,
+ bool lock_needed)
+{
+ int ret = 0;
+
+ if (lock_needed)
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_power_limit)
+ ret = smu->ppt_funcs->get_power_limit(smu, limit, def);
+
+ if (lock_needed)
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->set_power_limit)
+ ret = smu->ppt_funcs->set_power_limit(smu, limit);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->print_clk_levels)
+ ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_od_percentage)
+ ret = smu->ppt_funcs->get_od_percentage(smu, type);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_od_percentage(struct smu_context *smu, enum smu_clk_type type, uint32_t value)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->set_od_percentage)
+ ret = smu->ppt_funcs->set_od_percentage(smu, type, value);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_od_edit_dpm_table(struct smu_context *smu,
+ enum PP_OD_DPM_TABLE_COMMAND type,
+ long *input, uint32_t size)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->od_edit_dpm_table)
+ ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_read_sensor(struct smu_context *smu,
+ enum amd_pp_sensors sensor,
+ void *data, uint32_t *size)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->read_sensor)
+ ret = smu->ppt_funcs->read_sensor(smu, sensor, data, size);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_get_power_profile_mode(struct smu_context *smu, char *buf)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_power_profile_mode)
+ ret = smu->ppt_funcs->get_power_profile_mode(smu, buf);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_power_profile_mode(struct smu_context *smu,
+ long *param,
+ uint32_t param_size,
+ bool lock_needed)
+{
+ int ret = 0;
+
+ if (lock_needed)
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->set_power_profile_mode)
+ ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size);
+
+ if (lock_needed)
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+
+int smu_get_fan_control_mode(struct smu_context *smu)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_fan_control_mode)
+ ret = smu->ppt_funcs->get_fan_control_mode(smu);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_fan_control_mode(struct smu_context *smu, int value)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->set_fan_control_mode)
+ ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_fan_speed_percent)
+ ret = smu->ppt_funcs->get_fan_speed_percent(smu, speed);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->set_fan_speed_percent)
+ ret = smu->ppt_funcs->set_fan_speed_percent(smu, speed);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_fan_speed_rpm)
+ ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->set_deep_sleep_dcefclk)
+ ret = smu->ppt_funcs->set_deep_sleep_dcefclk(smu, clk);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_active_display_count(struct smu_context *smu, uint32_t count)
+{
+ int ret = 0;
+
+ if (smu->ppt_funcs->set_active_display_count)
+ ret = smu->ppt_funcs->set_active_display_count(smu, count);
+
+ return ret;
+}
+
+int smu_get_clock_by_type(struct smu_context *smu,
+ enum amd_pp_clock_type type,
+ struct amd_pp_clocks *clocks)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_clock_by_type)
+ ret = smu->ppt_funcs->get_clock_by_type(smu, type, clocks);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_get_max_high_clocks(struct smu_context *smu,
+ struct amd_pp_simple_clock_info *clocks)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_max_high_clocks)
+ ret = smu->ppt_funcs->get_max_high_clocks(smu, clocks);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_get_clock_by_type_with_latency(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ struct pp_clock_levels_with_latency *clocks)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_clock_by_type_with_latency)
+ ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_get_clock_by_type_with_voltage(struct smu_context *smu,
+ enum amd_pp_clock_type type,
+ struct pp_clock_levels_with_voltage *clocks)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_clock_by_type_with_voltage)
+ ret = smu->ppt_funcs->get_clock_by_type_with_voltage(smu, type, clocks);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+
+int smu_display_clock_voltage_request(struct smu_context *smu,
+ struct pp_display_clock_request *clock_req)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->display_clock_voltage_request)
+ ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+
+int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch)
+{
+ int ret = -EINVAL;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->display_disable_memory_clock_switch)
+ ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_notify_smu_enable_pwe(struct smu_context *smu)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->notify_smu_enable_pwe)
+ ret = smu->ppt_funcs->notify_smu_enable_pwe(smu);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_xgmi_pstate(struct smu_context *smu,
+ uint32_t pstate)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->set_xgmi_pstate)
+ ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_azalia_d3_pme(struct smu_context *smu)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->set_azalia_d3_pme)
+ ret = smu->ppt_funcs->set_azalia_d3_pme(smu);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+bool smu_baco_is_support(struct smu_context *smu)
+{
+ bool ret = false;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support)
+ ret = smu->ppt_funcs->baco_is_support(smu);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state)
+{
+ if (smu->ppt_funcs->baco_get_state)
+ return -EINVAL;
+
+ mutex_lock(&smu->mutex);
+ *state = smu->ppt_funcs->baco_get_state(smu);
+ mutex_unlock(&smu->mutex);
+
+ return 0;
+}
+
+int smu_baco_enter(struct smu_context *smu)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->baco_enter)
+ ret = smu->ppt_funcs->baco_enter(smu);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_baco_exit(struct smu_context *smu)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->baco_exit)
+ ret = smu->ppt_funcs->baco_exit(smu);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_mode2_reset(struct smu_context *smu)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->mode2_reset)
+ ret = smu->ppt_funcs->mode2_reset(smu);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
+ struct pp_smu_nv_clock_table *max_clocks)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
+ ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_get_uclk_dpm_states(struct smu_context *smu,
+ unsigned int *clock_values_in_khz,
+ unsigned int *num_states)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_uclk_dpm_states)
+ ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
+{
+ enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_current_power_state)
+ pm_state = smu->ppt_funcs->get_current_power_state(smu);
+
+ mutex_unlock(&smu->mutex);
+
+ return pm_state;
+}
+
+int smu_get_dpm_clock_table(struct smu_context *smu,
+ struct dpm_clocks *clock_table)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_dpm_clock_table)
+ ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+uint32_t smu_get_pptable_power_limit(struct smu_context *smu)
+{
+ uint32_t ret = 0;
+
+ if (smu->ppt_funcs->get_pptable_power_limit)
+ ret = smu->ppt_funcs->get_pptable_power_limit(smu);
+
+ return ret;
+}
+
+int smu_send_smc_msg(struct smu_context *smu,
+ enum smu_message_type msg)
+{
+ int ret;
+
+ ret = smu_send_smc_msg_with_param(smu, msg, 0);
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
new file mode 100644
index 000000000000..14ba6aa876e2
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
@@ -0,0 +1,2315 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "pp_debug.h"
+#include <linux/firmware.h>
+#include "amdgpu.h"
+#include "amdgpu_smu.h"
+#include "smu_internal.h"
+#include "atomfirmware.h"
+#include "amdgpu_atomfirmware.h"
+#include "smu_v11_0.h"
+#include "smu11_driver_if_arcturus.h"
+#include "soc15_common.h"
+#include "atom.h"
+#include "power_state.h"
+#include "arcturus_ppt.h"
+#include "smu_v11_0_pptable.h"
+#include "arcturus_ppsmc.h"
+#include "nbio/nbio_7_4_sh_mask.h"
+#include "amdgpu_xgmi.h"
+#include <linux/i2c.h>
+#include <linux/pci.h>
+#include "amdgpu_ras.h"
+
+#define to_amdgpu_device(x) (container_of(x, struct amdgpu_ras, eeprom_control.eeprom_accessor))->adev
+
+#define CTF_OFFSET_EDGE 5
+#define CTF_OFFSET_HOTSPOT 5
+#define CTF_OFFSET_HBM 5
+
+#define MSG_MAP(msg, index) \
+ [SMU_MSG_##msg] = {1, (index)}
+#define ARCTURUS_FEA_MAP(smu_feature, arcturus_feature) \
+ [smu_feature] = {1, (arcturus_feature)}
+
+#define SMU_FEATURES_LOW_MASK 0x00000000FFFFFFFF
+#define SMU_FEATURES_LOW_SHIFT 0
+#define SMU_FEATURES_HIGH_MASK 0xFFFFFFFF00000000
+#define SMU_FEATURES_HIGH_SHIFT 32
+
+#define SMC_DPM_FEATURE ( \
+ FEATURE_DPM_PREFETCHER_MASK | \
+ FEATURE_DPM_GFXCLK_MASK | \
+ FEATURE_DPM_UCLK_MASK | \
+ FEATURE_DPM_SOCCLK_MASK | \
+ FEATURE_DPM_MP0CLK_MASK | \
+ FEATURE_DPM_FCLK_MASK | \
+ FEATURE_DPM_XGMI_MASK)
+
+/* possible frequency drift (1Mhz) */
+#define EPSILON 1
+
+static struct smu_11_0_cmn2aisc_mapping arcturus_message_map[SMU_MSG_MAX_COUNT] = {
+ MSG_MAP(TestMessage, PPSMC_MSG_TestMessage),
+ MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion),
+ MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion),
+ MSG_MAP(SetAllowedFeaturesMaskLow, PPSMC_MSG_SetAllowedFeaturesMaskLow),
+ MSG_MAP(SetAllowedFeaturesMaskHigh, PPSMC_MSG_SetAllowedFeaturesMaskHigh),
+ MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures),
+ MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures),
+ MSG_MAP(EnableSmuFeaturesLow, PPSMC_MSG_EnableSmuFeaturesLow),
+ MSG_MAP(EnableSmuFeaturesHigh, PPSMC_MSG_EnableSmuFeaturesHigh),
+ MSG_MAP(DisableSmuFeaturesLow, PPSMC_MSG_DisableSmuFeaturesLow),
+ MSG_MAP(DisableSmuFeaturesHigh, PPSMC_MSG_DisableSmuFeaturesHigh),
+ MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetEnabledSmuFeaturesLow),
+ MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetEnabledSmuFeaturesHigh),
+ MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh),
+ MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow),
+ MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh),
+ MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow),
+ MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram),
+ MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu),
+ MSG_MAP(UseDefaultPPTable, PPSMC_MSG_UseDefaultPPTable),
+ MSG_MAP(UseBackupPPTable, PPSMC_MSG_UseBackupPPTable),
+ MSG_MAP(SetSystemVirtualDramAddrHigh, PPSMC_MSG_SetSystemVirtualDramAddrHigh),
+ MSG_MAP(SetSystemVirtualDramAddrLow, PPSMC_MSG_SetSystemVirtualDramAddrLow),
+ MSG_MAP(EnterBaco, PPSMC_MSG_EnterBaco),
+ MSG_MAP(ExitBaco, PPSMC_MSG_ExitBaco),
+ MSG_MAP(ArmD3, PPSMC_MSG_ArmD3),
+ MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq),
+ MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq),
+ MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq),
+ MSG_MAP(SetHardMaxByFreq, PPSMC_MSG_SetHardMaxByFreq),
+ MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq),
+ MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq),
+ MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex),
+ MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask),
+ MSG_MAP(SetDfSwitchType, PPSMC_MSG_SetDfSwitchType),
+ MSG_MAP(GetVoltageByDpm, PPSMC_MSG_GetVoltageByDpm),
+ MSG_MAP(GetVoltageByDpmOverdrive, PPSMC_MSG_GetVoltageByDpmOverdrive),
+ MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit),
+ MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit),
+ MSG_MAP(PowerUpVcn0, PPSMC_MSG_PowerUpVcn0),
+ MSG_MAP(PowerDownVcn0, PPSMC_MSG_PowerDownVcn0),
+ MSG_MAP(PowerUpVcn1, PPSMC_MSG_PowerUpVcn1),
+ MSG_MAP(PowerDownVcn1, PPSMC_MSG_PowerDownVcn1),
+ MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload),
+ MSG_MAP(PrepareMp1ForReset, PPSMC_MSG_PrepareMp1ForReset),
+ MSG_MAP(PrepareMp1ForShutdown, PPSMC_MSG_PrepareMp1ForShutdown),
+ MSG_MAP(SoftReset, PPSMC_MSG_SoftReset),
+ MSG_MAP(RunAfllBtc, PPSMC_MSG_RunAfllBtc),
+ MSG_MAP(RunDcBtc, PPSMC_MSG_RunDcBtc),
+ MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh),
+ MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow),
+ MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize),
+ MSG_MAP(GetDebugData, PPSMC_MSG_GetDebugData),
+ MSG_MAP(WaflTest, PPSMC_MSG_WaflTest),
+ MSG_MAP(SetXgmiMode, PPSMC_MSG_SetXgmiMode),
+ MSG_MAP(SetMemoryChannelEnable, PPSMC_MSG_SetMemoryChannelEnable),
+};
+
+static struct smu_11_0_cmn2aisc_mapping arcturus_clk_map[SMU_CLK_COUNT] = {
+ CLK_MAP(GFXCLK, PPCLK_GFXCLK),
+ CLK_MAP(SCLK, PPCLK_GFXCLK),
+ CLK_MAP(SOCCLK, PPCLK_SOCCLK),
+ CLK_MAP(FCLK, PPCLK_FCLK),
+ CLK_MAP(UCLK, PPCLK_UCLK),
+ CLK_MAP(MCLK, PPCLK_UCLK),
+ CLK_MAP(DCLK, PPCLK_DCLK),
+ CLK_MAP(VCLK, PPCLK_VCLK),
+};
+
+static struct smu_11_0_cmn2aisc_mapping arcturus_feature_mask_map[SMU_FEATURE_COUNT] = {
+ FEA_MAP(DPM_PREFETCHER),
+ FEA_MAP(DPM_GFXCLK),
+ FEA_MAP(DPM_UCLK),
+ FEA_MAP(DPM_SOCCLK),
+ FEA_MAP(DPM_FCLK),
+ FEA_MAP(DPM_MP0CLK),
+ ARCTURUS_FEA_MAP(SMU_FEATURE_XGMI_BIT, FEATURE_DPM_XGMI_BIT),
+ FEA_MAP(DS_GFXCLK),
+ FEA_MAP(DS_SOCCLK),
+ FEA_MAP(DS_LCLK),
+ FEA_MAP(DS_FCLK),
+ FEA_MAP(DS_UCLK),
+ FEA_MAP(GFX_ULV),
+ ARCTURUS_FEA_MAP(SMU_FEATURE_VCN_PG_BIT, FEATURE_DPM_VCN_BIT),
+ FEA_MAP(RSMU_SMN_CG),
+ FEA_MAP(WAFL_CG),
+ FEA_MAP(PPT),
+ FEA_MAP(TDC),
+ FEA_MAP(APCC_PLUS),
+ FEA_MAP(VR0HOT),
+ FEA_MAP(VR1HOT),
+ FEA_MAP(FW_CTF),
+ FEA_MAP(FAN_CONTROL),
+ FEA_MAP(THERMAL),
+ FEA_MAP(OUT_OF_BAND_MONITOR),
+ FEA_MAP(TEMP_DEPENDENT_VMIN),
+};
+
+static struct smu_11_0_cmn2aisc_mapping arcturus_table_map[SMU_TABLE_COUNT] = {
+ TAB_MAP(PPTABLE),
+ TAB_MAP(AVFS),
+ TAB_MAP(AVFS_PSM_DEBUG),
+ TAB_MAP(AVFS_FUSE_OVERRIDE),
+ TAB_MAP(PMSTATUSLOG),
+ TAB_MAP(SMU_METRICS),
+ TAB_MAP(DRIVER_SMU_CONFIG),
+ TAB_MAP(OVERDRIVE),
+ TAB_MAP(I2C_COMMANDS),
+ TAB_MAP(ACTIVITY_MONITOR_COEFF),
+};
+
+static struct smu_11_0_cmn2aisc_mapping arcturus_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
+ PWR_MAP(AC),
+ PWR_MAP(DC),
+};
+
+static struct smu_11_0_cmn2aisc_mapping arcturus_workload_map[PP_SMC_POWER_PROFILE_COUNT] = {
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT, WORKLOAD_PPLIB_DEFAULT_BIT),
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT),
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT),
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT),
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
+};
+
+static int arcturus_get_smu_msg_index(struct smu_context *smc, uint32_t index)
+{
+ struct smu_11_0_cmn2aisc_mapping mapping;
+
+ if (index >= SMU_MSG_MAX_COUNT)
+ return -EINVAL;
+
+ mapping = arcturus_message_map[index];
+ if (!(mapping.valid_mapping))
+ return -EINVAL;
+
+ return mapping.map_to;
+}
+
+static int arcturus_get_smu_clk_index(struct smu_context *smc, uint32_t index)
+{
+ struct smu_11_0_cmn2aisc_mapping mapping;
+
+ if (index >= SMU_CLK_COUNT)
+ return -EINVAL;
+
+ mapping = arcturus_clk_map[index];
+ if (!(mapping.valid_mapping)) {
+ pr_warn("Unsupported SMU clk: %d\n", index);
+ return -EINVAL;
+ }
+
+ return mapping.map_to;
+}
+
+static int arcturus_get_smu_feature_index(struct smu_context *smc, uint32_t index)
+{
+ struct smu_11_0_cmn2aisc_mapping mapping;
+
+ if (index >= SMU_FEATURE_COUNT)
+ return -EINVAL;
+
+ mapping = arcturus_feature_mask_map[index];
+ if (!(mapping.valid_mapping)) {
+ return -EINVAL;
+ }
+
+ return mapping.map_to;
+}
+
+static int arcturus_get_smu_table_index(struct smu_context *smc, uint32_t index)
+{
+ struct smu_11_0_cmn2aisc_mapping mapping;
+
+ if (index >= SMU_TABLE_COUNT)
+ return -EINVAL;
+
+ mapping = arcturus_table_map[index];
+ if (!(mapping.valid_mapping)) {
+ pr_warn("Unsupported SMU table: %d\n", index);
+ return -EINVAL;
+ }
+
+ return mapping.map_to;
+}
+
+static int arcturus_get_pwr_src_index(struct smu_context *smc, uint32_t index)
+{
+ struct smu_11_0_cmn2aisc_mapping mapping;
+
+ if (index >= SMU_POWER_SOURCE_COUNT)
+ return -EINVAL;
+
+ mapping = arcturus_pwr_src_map[index];
+ if (!(mapping.valid_mapping)) {
+ pr_warn("Unsupported SMU power source: %d\n", index);
+ return -EINVAL;
+ }
+
+ return mapping.map_to;
+}
+
+
+static int arcturus_get_workload_type(struct smu_context *smu, enum PP_SMC_POWER_PROFILE profile)
+{
+ struct smu_11_0_cmn2aisc_mapping mapping;
+
+ if (profile > PP_SMC_POWER_PROFILE_CUSTOM)
+ return -EINVAL;
+
+ mapping = arcturus_workload_map[profile];
+ if (!(mapping.valid_mapping))
+ return -EINVAL;
+
+ return mapping.map_to;
+}
+
+static int arcturus_tables_init(struct smu_context *smu, struct smu_table *tables)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+
+ SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+
+ SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU11_TOOL_SIZE,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+
+ SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+
+ SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+
+ SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF,
+ sizeof(DpmActivityMonitorCoeffInt_t), PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM);
+
+ smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
+ if (!smu_table->metrics_table)
+ return -ENOMEM;
+ smu_table->metrics_time = 0;
+
+ return 0;
+}
+
+static int arcturus_allocate_dpm_context(struct smu_context *smu)
+{
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+
+ if (smu_dpm->dpm_context)
+ return -EINVAL;
+
+ smu_dpm->dpm_context = kzalloc(sizeof(struct arcturus_dpm_table),
+ GFP_KERNEL);
+ if (!smu_dpm->dpm_context)
+ return -ENOMEM;
+
+ if (smu_dpm->golden_dpm_context)
+ return -EINVAL;
+
+ smu_dpm->golden_dpm_context = kzalloc(sizeof(struct arcturus_dpm_table),
+ GFP_KERNEL);
+ if (!smu_dpm->golden_dpm_context)
+ return -ENOMEM;
+
+ smu_dpm->dpm_context_size = sizeof(struct arcturus_dpm_table);
+
+ smu_dpm->dpm_current_power_state = kzalloc(sizeof(struct smu_power_state),
+ GFP_KERNEL);
+ if (!smu_dpm->dpm_current_power_state)
+ return -ENOMEM;
+
+ smu_dpm->dpm_request_power_state = kzalloc(sizeof(struct smu_power_state),
+ GFP_KERNEL);
+ if (!smu_dpm->dpm_request_power_state)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int
+arcturus_get_allowed_feature_mask(struct smu_context *smu,
+ uint32_t *feature_mask, uint32_t num)
+{
+ if (num > 2)
+ return -EINVAL;
+
+ /* pptable will handle the features to enable */
+ memset(feature_mask, 0xFF, sizeof(uint32_t) * num);
+
+ return 0;
+}
+
+static int
+arcturus_set_single_dpm_table(struct smu_context *smu,
+ struct arcturus_single_dpm_table *single_dpm_table,
+ PPCLK_e clk_id)
+{
+ int ret = 0;
+ uint32_t i, num_of_levels = 0, clk;
+
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_GetDpmFreqByIndex,
+ (clk_id << 16 | 0xFF));
+ if (ret) {
+ pr_err("[%s] failed to get dpm levels!\n", __func__);
+ return ret;
+ }
+
+ smu_read_smc_arg(smu, &num_of_levels);
+ if (!num_of_levels) {
+ pr_err("[%s] number of clk levels is invalid!\n", __func__);
+ return -EINVAL;
+ }
+
+ single_dpm_table->count = num_of_levels;
+ for (i = 0; i < num_of_levels; i++) {
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_GetDpmFreqByIndex,
+ (clk_id << 16 | i));
+ if (ret) {
+ pr_err("[%s] failed to get dpm freq by index!\n", __func__);
+ return ret;
+ }
+ smu_read_smc_arg(smu, &clk);
+ if (!clk) {
+ pr_err("[%s] clk value is invalid!\n", __func__);
+ return -EINVAL;
+ }
+ single_dpm_table->dpm_levels[i].value = clk;
+ single_dpm_table->dpm_levels[i].enabled = true;
+ }
+ return 0;
+}
+
+static void arcturus_init_single_dpm_state(struct arcturus_dpm_state *dpm_state)
+{
+ dpm_state->soft_min_level = 0x0;
+ dpm_state->soft_max_level = 0xffff;
+ dpm_state->hard_min_level = 0x0;
+ dpm_state->hard_max_level = 0xffff;
+}
+
+static int arcturus_set_default_dpm_table(struct smu_context *smu)
+{
+ int ret;
+
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+ struct arcturus_dpm_table *dpm_table = NULL;
+ struct arcturus_single_dpm_table *single_dpm_table;
+
+ dpm_table = smu_dpm->dpm_context;
+
+ /* socclk */
+ single_dpm_table = &(dpm_table->soc_table);
+ if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
+ ret = arcturus_set_single_dpm_table(smu, single_dpm_table,
+ PPCLK_SOCCLK);
+ if (ret) {
+ pr_err("[%s] failed to get socclk dpm levels!\n", __func__);
+ return ret;
+ }
+ } else {
+ single_dpm_table->count = 1;
+ single_dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.socclk / 100;
+ }
+ arcturus_init_single_dpm_state(&(single_dpm_table->dpm_state));
+
+ /* gfxclk */
+ single_dpm_table = &(dpm_table->gfx_table);
+ if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
+ ret = arcturus_set_single_dpm_table(smu, single_dpm_table,
+ PPCLK_GFXCLK);
+ if (ret) {
+ pr_err("[SetupDefaultDpmTable] failed to get gfxclk dpm levels!");
+ return ret;
+ }
+ } else {
+ single_dpm_table->count = 1;
+ single_dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100;
+ }
+ arcturus_init_single_dpm_state(&(single_dpm_table->dpm_state));
+
+ /* memclk */
+ single_dpm_table = &(dpm_table->mem_table);
+ if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
+ ret = arcturus_set_single_dpm_table(smu, single_dpm_table,
+ PPCLK_UCLK);
+ if (ret) {
+ pr_err("[SetupDefaultDpmTable] failed to get memclk dpm levels!");
+ return ret;
+ }
+ } else {
+ single_dpm_table->count = 1;
+ single_dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.uclk / 100;
+ }
+ arcturus_init_single_dpm_state(&(single_dpm_table->dpm_state));
+
+ /* fclk */
+ single_dpm_table = &(dpm_table->fclk_table);
+ if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_FCLK_BIT)) {
+ ret = arcturus_set_single_dpm_table(smu, single_dpm_table,
+ PPCLK_FCLK);
+ if (ret) {
+ pr_err("[SetupDefaultDpmTable] failed to get fclk dpm levels!");
+ return ret;
+ }
+ } else {
+ single_dpm_table->count = 1;
+ single_dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.fclk / 100;
+ }
+ arcturus_init_single_dpm_state(&(single_dpm_table->dpm_state));
+
+ memcpy(smu_dpm->golden_dpm_context, dpm_table,
+ sizeof(struct arcturus_dpm_table));
+
+ return 0;
+}
+
+static int arcturus_check_powerplay_table(struct smu_context *smu)
+{
+ return 0;
+}
+
+static int arcturus_store_powerplay_table(struct smu_context *smu)
+{
+ struct smu_11_0_powerplay_table *powerplay_table = NULL;
+ struct smu_table_context *table_context = &smu->smu_table;
+ struct smu_baco_context *smu_baco = &smu->smu_baco;
+ int ret = 0;
+
+ if (!table_context->power_play_table)
+ return -EINVAL;
+
+ powerplay_table = table_context->power_play_table;
+
+ memcpy(table_context->driver_pptable, &powerplay_table->smc_pptable,
+ sizeof(PPTable_t));
+
+ table_context->thermal_controller_type = powerplay_table->thermal_controller_type;
+
+ mutex_lock(&smu_baco->mutex);
+ if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_BACO ||
+ powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_MACO)
+ smu_baco->platform_support = true;
+ mutex_unlock(&smu_baco->mutex);
+
+ return ret;
+}
+
+static int arcturus_append_powerplay_table(struct smu_context *smu)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ PPTable_t *smc_pptable = table_context->driver_pptable;
+ struct atom_smc_dpm_info_v4_6 *smc_dpm_table;
+ int index, ret;
+
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ smc_dpm_info);
+
+ ret = smu_get_atom_data_table(smu, index, NULL, NULL, NULL,
+ (uint8_t **)&smc_dpm_table);
+ if (ret)
+ return ret;
+
+ pr_info("smc_dpm_info table revision(format.content): %d.%d\n",
+ smc_dpm_table->table_header.format_revision,
+ smc_dpm_table->table_header.content_revision);
+
+ if ((smc_dpm_table->table_header.format_revision == 4) &&
+ (smc_dpm_table->table_header.content_revision == 6))
+ memcpy(&smc_pptable->MaxVoltageStepGfx,
+ &smc_dpm_table->maxvoltagestepgfx,
+ sizeof(*smc_dpm_table) - offsetof(struct atom_smc_dpm_info_v4_6, maxvoltagestepgfx));
+
+ return 0;
+}
+
+static int arcturus_run_btc(struct smu_context *smu)
+{
+ int ret = 0;
+
+ ret = smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc);
+ if (ret) {
+ pr_err("RunAfllBtc failed!\n");
+ return ret;
+ }
+
+ return smu_send_smc_msg(smu, SMU_MSG_RunDcBtc);
+}
+
+static int arcturus_populate_umd_state_clk(struct smu_context *smu)
+{
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+ struct arcturus_dpm_table *dpm_table = NULL;
+ struct arcturus_single_dpm_table *gfx_table = NULL;
+ struct arcturus_single_dpm_table *mem_table = NULL;
+
+ dpm_table = smu_dpm->dpm_context;
+ gfx_table = &(dpm_table->gfx_table);
+ mem_table = &(dpm_table->mem_table);
+
+ smu->pstate_sclk = gfx_table->dpm_levels[0].value;
+ smu->pstate_mclk = mem_table->dpm_levels[0].value;
+
+ if (gfx_table->count > ARCTURUS_UMD_PSTATE_GFXCLK_LEVEL &&
+ mem_table->count > ARCTURUS_UMD_PSTATE_MCLK_LEVEL) {
+ smu->pstate_sclk = gfx_table->dpm_levels[ARCTURUS_UMD_PSTATE_GFXCLK_LEVEL].value;
+ smu->pstate_mclk = mem_table->dpm_levels[ARCTURUS_UMD_PSTATE_MCLK_LEVEL].value;
+ }
+
+ smu->pstate_sclk = smu->pstate_sclk * 100;
+ smu->pstate_mclk = smu->pstate_mclk * 100;
+
+ return 0;
+}
+
+static int arcturus_get_clk_table(struct smu_context *smu,
+ struct pp_clock_levels_with_latency *clocks,
+ struct arcturus_single_dpm_table *dpm_table)
+{
+ int i, count;
+
+ count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count;
+ clocks->num_levels = count;
+
+ for (i = 0; i < count; i++) {
+ clocks->data[i].clocks_in_khz =
+ dpm_table->dpm_levels[i].value * 1000;
+ clocks->data[i].latency_in_us = 0;
+ }
+
+ return 0;
+}
+
+static int arcturus_freqs_in_same_level(int32_t frequency1,
+ int32_t frequency2)
+{
+ return (abs(frequency1 - frequency2) <= EPSILON);
+}
+
+static int arcturus_print_clk_levels(struct smu_context *smu,
+ enum smu_clk_type type, char *buf)
+{
+ int i, now, size = 0;
+ int ret = 0;
+ struct pp_clock_levels_with_latency clocks;
+ struct arcturus_single_dpm_table *single_dpm_table;
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+ struct arcturus_dpm_table *dpm_table = NULL;
+
+ dpm_table = smu_dpm->dpm_context;
+
+ switch (type) {
+ case SMU_SCLK:
+ ret = smu_get_current_clk_freq(smu, SMU_GFXCLK, &now);
+ if (ret) {
+ pr_err("Attempt to get current gfx clk Failed!");
+ return ret;
+ }
+
+ single_dpm_table = &(dpm_table->gfx_table);
+ ret = arcturus_get_clk_table(smu, &clocks, single_dpm_table);
+ if (ret) {
+ pr_err("Attempt to get gfx clk levels Failed!");
+ return ret;
+ }
+
+ /*
+ * For DPM disabled case, there will be only one clock level.
+ * And it's safe to assume that is always the current clock.
+ */
+ for (i = 0; i < clocks.num_levels; i++)
+ size += sprintf(buf + size, "%d: %uMhz %s\n", i,
+ clocks.data[i].clocks_in_khz / 1000,
+ (clocks.num_levels == 1) ? "*" :
+ (arcturus_freqs_in_same_level(
+ clocks.data[i].clocks_in_khz / 1000,
+ now / 100) ? "*" : ""));
+ break;
+
+ case SMU_MCLK:
+ ret = smu_get_current_clk_freq(smu, SMU_UCLK, &now);
+ if (ret) {
+ pr_err("Attempt to get current mclk Failed!");
+ return ret;
+ }
+
+ single_dpm_table = &(dpm_table->mem_table);
+ ret = arcturus_get_clk_table(smu, &clocks, single_dpm_table);
+ if (ret) {
+ pr_err("Attempt to get memory clk levels Failed!");
+ return ret;
+ }
+
+ for (i = 0; i < clocks.num_levels; i++)
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i, clocks.data[i].clocks_in_khz / 1000,
+ (clocks.num_levels == 1) ? "*" :
+ (arcturus_freqs_in_same_level(
+ clocks.data[i].clocks_in_khz / 1000,
+ now / 100) ? "*" : ""));
+ break;
+
+ case SMU_SOCCLK:
+ ret = smu_get_current_clk_freq(smu, SMU_SOCCLK, &now);
+ if (ret) {
+ pr_err("Attempt to get current socclk Failed!");
+ return ret;
+ }
+
+ single_dpm_table = &(dpm_table->soc_table);
+ ret = arcturus_get_clk_table(smu, &clocks, single_dpm_table);
+ if (ret) {
+ pr_err("Attempt to get socclk levels Failed!");
+ return ret;
+ }
+
+ for (i = 0; i < clocks.num_levels; i++)
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i, clocks.data[i].clocks_in_khz / 1000,
+ (clocks.num_levels == 1) ? "*" :
+ (arcturus_freqs_in_same_level(
+ clocks.data[i].clocks_in_khz / 1000,
+ now / 100) ? "*" : ""));
+ break;
+
+ case SMU_FCLK:
+ ret = smu_get_current_clk_freq(smu, SMU_FCLK, &now);
+ if (ret) {
+ pr_err("Attempt to get current fclk Failed!");
+ return ret;
+ }
+
+ single_dpm_table = &(dpm_table->fclk_table);
+ ret = arcturus_get_clk_table(smu, &clocks, single_dpm_table);
+ if (ret) {
+ pr_err("Attempt to get fclk levels Failed!");
+ return ret;
+ }
+
+ for (i = 0; i < single_dpm_table->count; i++)
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i, single_dpm_table->dpm_levels[i].value,
+ (clocks.num_levels == 1) ? "*" :
+ (arcturus_freqs_in_same_level(
+ clocks.data[i].clocks_in_khz / 1000,
+ now / 100) ? "*" : ""));
+ break;
+
+ default:
+ break;
+ }
+
+ return size;
+}
+
+static int arcturus_upload_dpm_level(struct smu_context *smu, bool max,
+ uint32_t feature_mask)
+{
+ struct arcturus_single_dpm_table *single_dpm_table;
+ struct arcturus_dpm_table *dpm_table =
+ smu->smu_dpm.dpm_context;
+ uint32_t freq;
+ int ret = 0;
+
+ if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT) &&
+ (feature_mask & FEATURE_DPM_GFXCLK_MASK)) {
+ single_dpm_table = &(dpm_table->gfx_table);
+ freq = max ? single_dpm_table->dpm_state.soft_max_level :
+ single_dpm_table->dpm_state.soft_min_level;
+ ret = smu_send_smc_msg_with_param(smu,
+ (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
+ (PPCLK_GFXCLK << 16) | (freq & 0xffff));
+ if (ret) {
+ pr_err("Failed to set soft %s gfxclk !\n",
+ max ? "max" : "min");
+ return ret;
+ }
+ }
+
+ if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
+ (feature_mask & FEATURE_DPM_UCLK_MASK)) {
+ single_dpm_table = &(dpm_table->mem_table);
+ freq = max ? single_dpm_table->dpm_state.soft_max_level :
+ single_dpm_table->dpm_state.soft_min_level;
+ ret = smu_send_smc_msg_with_param(smu,
+ (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
+ (PPCLK_UCLK << 16) | (freq & 0xffff));
+ if (ret) {
+ pr_err("Failed to set soft %s memclk !\n",
+ max ? "max" : "min");
+ return ret;
+ }
+ }
+
+ if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT) &&
+ (feature_mask & FEATURE_DPM_SOCCLK_MASK)) {
+ single_dpm_table = &(dpm_table->soc_table);
+ freq = max ? single_dpm_table->dpm_state.soft_max_level :
+ single_dpm_table->dpm_state.soft_min_level;
+ ret = smu_send_smc_msg_with_param(smu,
+ (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
+ (PPCLK_SOCCLK << 16) | (freq & 0xffff));
+ if (ret) {
+ pr_err("Failed to set soft %s socclk !\n",
+ max ? "max" : "min");
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int arcturus_force_clk_levels(struct smu_context *smu,
+ enum smu_clk_type type, uint32_t mask)
+{
+ struct arcturus_dpm_table *dpm_table;
+ struct arcturus_single_dpm_table *single_dpm_table;
+ uint32_t soft_min_level, soft_max_level;
+ int ret = 0;
+
+ soft_min_level = mask ? (ffs(mask) - 1) : 0;
+ soft_max_level = mask ? (fls(mask) - 1) : 0;
+
+ dpm_table = smu->smu_dpm.dpm_context;
+
+ switch (type) {
+ case SMU_SCLK:
+ single_dpm_table = &(dpm_table->gfx_table);
+
+ if (soft_max_level >= single_dpm_table->count) {
+ pr_err("Clock level specified %d is over max allowed %d\n",
+ soft_max_level, single_dpm_table->count - 1);
+ ret = -EINVAL;
+ break;
+ }
+
+ single_dpm_table->dpm_state.soft_min_level =
+ single_dpm_table->dpm_levels[soft_min_level].value;
+ single_dpm_table->dpm_state.soft_max_level =
+ single_dpm_table->dpm_levels[soft_max_level].value;
+
+ ret = arcturus_upload_dpm_level(smu, false, FEATURE_DPM_GFXCLK_MASK);
+ if (ret) {
+ pr_err("Failed to upload boot level to lowest!\n");
+ break;
+ }
+
+ ret = arcturus_upload_dpm_level(smu, true, FEATURE_DPM_GFXCLK_MASK);
+ if (ret)
+ pr_err("Failed to upload dpm max level to highest!\n");
+
+ break;
+
+ case SMU_MCLK:
+ case SMU_SOCCLK:
+ case SMU_FCLK:
+ /*
+ * Should not arrive here since Arcturus does not
+ * support mclk/socclk/fclk softmin/softmax settings
+ */
+ ret = -EINVAL;
+ break;
+
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int arcturus_get_thermal_temperature_range(struct smu_context *smu,
+ struct smu_temperature_range *range)
+{
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+
+ if (!range)
+ return -EINVAL;
+
+ range->max = pptable->TedgeLimit *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ range->edge_emergency_max = (pptable->TedgeLimit + CTF_OFFSET_EDGE) *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ range->hotspot_crit_max = pptable->ThotspotLimit *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ range->hotspot_emergency_max = (pptable->ThotspotLimit + CTF_OFFSET_HOTSPOT) *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ range->mem_crit_max = pptable->TmemLimit *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ range->mem_emergency_max = (pptable->TmemLimit + CTF_OFFSET_HBM)*
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+ return 0;
+}
+
+static int arcturus_get_metrics_table(struct smu_context *smu,
+ SmuMetrics_t *metrics_table)
+{
+ struct smu_table_context *smu_table= &smu->smu_table;
+ int ret = 0;
+
+ mutex_lock(&smu->metrics_lock);
+ if (!smu_table->metrics_time ||
+ time_after(jiffies, smu_table->metrics_time + HZ / 1000)) {
+ ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
+ (void *)smu_table->metrics_table, false);
+ if (ret) {
+ pr_info("Failed to export SMU metrics table!\n");
+ mutex_unlock(&smu->metrics_lock);
+ return ret;
+ }
+ smu_table->metrics_time = jiffies;
+ }
+
+ memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t));
+ mutex_unlock(&smu->metrics_lock);
+
+ return ret;
+}
+
+static int arcturus_get_current_activity_percent(struct smu_context *smu,
+ enum amd_pp_sensors sensor,
+ uint32_t *value)
+{
+ SmuMetrics_t metrics;
+ int ret = 0;
+
+ if (!value)
+ return -EINVAL;
+
+ ret = arcturus_get_metrics_table(smu, &metrics);
+ if (ret)
+ return ret;
+
+ switch (sensor) {
+ case AMDGPU_PP_SENSOR_GPU_LOAD:
+ *value = metrics.AverageGfxActivity;
+ break;
+ case AMDGPU_PP_SENSOR_MEM_LOAD:
+ *value = metrics.AverageUclkActivity;
+ break;
+ default:
+ pr_err("Invalid sensor for retrieving clock activity\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int arcturus_get_gpu_power(struct smu_context *smu, uint32_t *value)
+{
+ SmuMetrics_t metrics;
+ int ret = 0;
+
+ if (!value)
+ return -EINVAL;
+
+ ret = arcturus_get_metrics_table(smu, &metrics);
+ if (ret)
+ return ret;
+
+ *value = metrics.AverageSocketPower << 8;
+
+ return 0;
+}
+
+static int arcturus_thermal_get_temperature(struct smu_context *smu,
+ enum amd_pp_sensors sensor,
+ uint32_t *value)
+{
+ SmuMetrics_t metrics;
+ int ret = 0;
+
+ if (!value)
+ return -EINVAL;
+
+ ret = arcturus_get_metrics_table(smu, &metrics);
+ if (ret)
+ return ret;
+
+ switch (sensor) {
+ case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
+ *value = metrics.TemperatureHotspot *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ break;
+ case AMDGPU_PP_SENSOR_EDGE_TEMP:
+ *value = metrics.TemperatureEdge *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ break;
+ case AMDGPU_PP_SENSOR_MEM_TEMP:
+ *value = metrics.TemperatureHBM *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ break;
+ default:
+ pr_err("Invalid sensor for retrieving temp\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int arcturus_read_sensor(struct smu_context *smu,
+ enum amd_pp_sensors sensor,
+ void *data, uint32_t *size)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ PPTable_t *pptable = table_context->driver_pptable;
+ int ret = 0;
+
+ if (!data || !size)
+ return -EINVAL;
+
+ mutex_lock(&smu->sensor_lock);
+ switch (sensor) {
+ case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
+ *(uint32_t *)data = pptable->FanMaximumRpm;
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_MEM_LOAD:
+ case AMDGPU_PP_SENSOR_GPU_LOAD:
+ ret = arcturus_get_current_activity_percent(smu,
+ sensor,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GPU_POWER:
+ ret = arcturus_get_gpu_power(smu, (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
+ case AMDGPU_PP_SENSOR_EDGE_TEMP:
+ case AMDGPU_PP_SENSOR_MEM_TEMP:
+ ret = arcturus_thermal_get_temperature(smu, sensor,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ default:
+ ret = smu_v11_0_read_sensor(smu, sensor, data, size);
+ }
+ mutex_unlock(&smu->sensor_lock);
+
+ return ret;
+}
+
+static int arcturus_get_fan_speed_rpm(struct smu_context *smu,
+ uint32_t *speed)
+{
+ SmuMetrics_t metrics;
+ int ret = 0;
+
+ if (!speed)
+ return -EINVAL;
+
+ ret = arcturus_get_metrics_table(smu, &metrics);
+ if (ret)
+ return ret;
+
+ *speed = metrics.CurrFanSpeed;
+
+ return ret;
+}
+
+static int arcturus_get_fan_speed_percent(struct smu_context *smu,
+ uint32_t *speed)
+{
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+ uint32_t percent, current_rpm;
+ int ret = 0;
+
+ if (!speed)
+ return -EINVAL;
+
+ ret = arcturus_get_fan_speed_rpm(smu, &current_rpm);
+ if (ret)
+ return ret;
+
+ percent = current_rpm * 100 / pptable->FanMaximumRpm;
+ *speed = percent > 100 ? 100 : percent;
+
+ return ret;
+}
+
+static int arcturus_get_current_clk_freq_by_table(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *value)
+{
+ static SmuMetrics_t metrics;
+ int ret = 0, clk_id = 0;
+
+ if (!value)
+ return -EINVAL;
+
+ clk_id = smu_clk_get_index(smu, clk_type);
+ if (clk_id < 0)
+ return -EINVAL;
+
+ ret = arcturus_get_metrics_table(smu, &metrics);
+ if (ret)
+ return ret;
+
+ switch (clk_id) {
+ case PPCLK_GFXCLK:
+ /*
+ * CurrClock[clk_id] can provide accurate
+ * output only when the dpm feature is enabled.
+ * We can use Average_* for dpm disabled case.
+ * But this is available for gfxclk/uclk/socclk.
+ */
+ if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT))
+ *value = metrics.CurrClock[PPCLK_GFXCLK];
+ else
+ *value = metrics.AverageGfxclkFrequency;
+ break;
+ case PPCLK_UCLK:
+ if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT))
+ *value = metrics.CurrClock[PPCLK_UCLK];
+ else
+ *value = metrics.AverageUclkFrequency;
+ break;
+ case PPCLK_SOCCLK:
+ if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT))
+ *value = metrics.CurrClock[PPCLK_SOCCLK];
+ else
+ *value = metrics.AverageSocclkFrequency;
+ break;
+ default:
+ *value = metrics.CurrClock[clk_id];
+ break;
+ }
+
+ return ret;
+}
+
+static uint32_t arcturus_find_lowest_dpm_level(struct arcturus_single_dpm_table *table)
+{
+ uint32_t i;
+
+ for (i = 0; i < table->count; i++) {
+ if (table->dpm_levels[i].enabled)
+ break;
+ }
+ if (i >= table->count) {
+ i = 0;
+ table->dpm_levels[i].enabled = true;
+ }
+
+ return i;
+}
+
+static uint32_t arcturus_find_highest_dpm_level(struct arcturus_single_dpm_table *table)
+{
+ int i = 0;
+
+ if (table->count <= 0) {
+ pr_err("[%s] DPM Table has no entry!", __func__);
+ return 0;
+ }
+ if (table->count > MAX_DPM_NUMBER) {
+ pr_err("[%s] DPM Table has too many entries!", __func__);
+ return MAX_DPM_NUMBER - 1;
+ }
+
+ for (i = table->count - 1; i >= 0; i--) {
+ if (table->dpm_levels[i].enabled)
+ break;
+ }
+ if (i < 0) {
+ i = 0;
+ table->dpm_levels[i].enabled = true;
+ }
+
+ return i;
+}
+
+
+
+static int arcturus_force_dpm_limit_value(struct smu_context *smu, bool highest)
+{
+ struct arcturus_dpm_table *dpm_table =
+ (struct arcturus_dpm_table *)smu->smu_dpm.dpm_context;
+ struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(smu->adev, 0);
+ uint32_t soft_level;
+ int ret = 0;
+
+ /* gfxclk */
+ if (highest)
+ soft_level = arcturus_find_highest_dpm_level(&(dpm_table->gfx_table));
+ else
+ soft_level = arcturus_find_lowest_dpm_level(&(dpm_table->gfx_table));
+
+ dpm_table->gfx_table.dpm_state.soft_min_level =
+ dpm_table->gfx_table.dpm_state.soft_max_level =
+ dpm_table->gfx_table.dpm_levels[soft_level].value;
+
+ ret = arcturus_upload_dpm_level(smu, false, FEATURE_DPM_GFXCLK_MASK);
+ if (ret) {
+ pr_err("Failed to upload boot level to %s!\n",
+ highest ? "highest" : "lowest");
+ return ret;
+ }
+
+ ret = arcturus_upload_dpm_level(smu, true, FEATURE_DPM_GFXCLK_MASK);
+ if (ret) {
+ pr_err("Failed to upload dpm max level to %s!\n!",
+ highest ? "highest" : "lowest");
+ return ret;
+ }
+
+ if (hive)
+ /*
+ * Force XGMI Pstate to highest or lowest
+ * TODO: revise this when xgmi dpm is functional
+ */
+ ret = smu_v11_0_set_xgmi_pstate(smu, highest ? 1 : 0);
+
+ return ret;
+}
+
+static int arcturus_unforce_dpm_levels(struct smu_context *smu)
+{
+ struct arcturus_dpm_table *dpm_table =
+ (struct arcturus_dpm_table *)smu->smu_dpm.dpm_context;
+ struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(smu->adev, 0);
+ uint32_t soft_min_level, soft_max_level;
+ int ret = 0;
+
+ /* gfxclk */
+ soft_min_level = arcturus_find_lowest_dpm_level(&(dpm_table->gfx_table));
+ soft_max_level = arcturus_find_highest_dpm_level(&(dpm_table->gfx_table));
+ dpm_table->gfx_table.dpm_state.soft_min_level =
+ dpm_table->gfx_table.dpm_levels[soft_min_level].value;
+ dpm_table->gfx_table.dpm_state.soft_max_level =
+ dpm_table->gfx_table.dpm_levels[soft_max_level].value;
+
+ ret = arcturus_upload_dpm_level(smu, false, FEATURE_DPM_GFXCLK_MASK);
+ if (ret) {
+ pr_err("Failed to upload DPM Bootup Levels!");
+ return ret;
+ }
+
+ ret = arcturus_upload_dpm_level(smu, true, FEATURE_DPM_GFXCLK_MASK);
+ if (ret) {
+ pr_err("Failed to upload DPM Max Levels!");
+ return ret;
+ }
+
+ if (hive)
+ /*
+ * Reset XGMI Pstate back to default
+ * TODO: revise this when xgmi dpm is functional
+ */
+ ret = smu_v11_0_set_xgmi_pstate(smu, 0);
+
+ return ret;
+}
+
+static int
+arcturus_get_profiling_clk_mask(struct smu_context *smu,
+ enum amd_dpm_forced_level level,
+ uint32_t *sclk_mask,
+ uint32_t *mclk_mask,
+ uint32_t *soc_mask)
+{
+ struct arcturus_dpm_table *dpm_table =
+ (struct arcturus_dpm_table *)smu->smu_dpm.dpm_context;
+ struct arcturus_single_dpm_table *gfx_dpm_table;
+ struct arcturus_single_dpm_table *mem_dpm_table;
+ struct arcturus_single_dpm_table *soc_dpm_table;
+
+ if (!smu->smu_dpm.dpm_context)
+ return -EINVAL;
+
+ gfx_dpm_table = &dpm_table->gfx_table;
+ mem_dpm_table = &dpm_table->mem_table;
+ soc_dpm_table = &dpm_table->soc_table;
+
+ *sclk_mask = 0;
+ *mclk_mask = 0;
+ *soc_mask = 0;
+
+ if (gfx_dpm_table->count > ARCTURUS_UMD_PSTATE_GFXCLK_LEVEL &&
+ mem_dpm_table->count > ARCTURUS_UMD_PSTATE_MCLK_LEVEL &&
+ soc_dpm_table->count > ARCTURUS_UMD_PSTATE_SOCCLK_LEVEL) {
+ *sclk_mask = ARCTURUS_UMD_PSTATE_GFXCLK_LEVEL;
+ *mclk_mask = ARCTURUS_UMD_PSTATE_MCLK_LEVEL;
+ *soc_mask = ARCTURUS_UMD_PSTATE_SOCCLK_LEVEL;
+ }
+
+ if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
+ *sclk_mask = 0;
+ } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
+ *mclk_mask = 0;
+ } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+ *sclk_mask = gfx_dpm_table->count - 1;
+ *mclk_mask = mem_dpm_table->count - 1;
+ *soc_mask = soc_dpm_table->count - 1;
+ }
+
+ return 0;
+}
+
+static int arcturus_get_power_limit(struct smu_context *smu,
+ uint32_t *limit,
+ bool cap)
+{
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+ uint32_t asic_default_power_limit = 0;
+ int ret = 0;
+ int power_src;
+
+ if (!smu->power_limit) {
+ if (smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
+ power_src = smu_power_get_index(smu, SMU_POWER_SOURCE_AC);
+ if (power_src < 0)
+ return -EINVAL;
+
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetPptLimit,
+ power_src << 16);
+ if (ret) {
+ pr_err("[%s] get PPT limit failed!", __func__);
+ return ret;
+ }
+ smu_read_smc_arg(smu, &asic_default_power_limit);
+ } else {
+ /* the last hope to figure out the ppt limit */
+ if (!pptable) {
+ pr_err("Cannot get PPT limit due to pptable missing!");
+ return -EINVAL;
+ }
+ asic_default_power_limit =
+ pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0];
+ }
+
+ smu->power_limit = asic_default_power_limit;
+ }
+
+ if (cap)
+ *limit = smu_v11_0_get_max_power_limit(smu);
+ else
+ *limit = smu->power_limit;
+
+ return 0;
+}
+
+static int arcturus_get_power_profile_mode(struct smu_context *smu,
+ char *buf)
+{
+ struct amdgpu_device *adev = smu->adev;
+ DpmActivityMonitorCoeffInt_t activity_monitor;
+ static const char *profile_name[] = {
+ "BOOTUP_DEFAULT",
+ "3D_FULL_SCREEN",
+ "POWER_SAVING",
+ "VIDEO",
+ "VR",
+ "COMPUTE",
+ "CUSTOM"};
+ static const char *title[] = {
+ "PROFILE_INDEX(NAME)",
+ "CLOCK_TYPE(NAME)",
+ "FPS",
+ "UseRlcBusy",
+ "MinActiveFreqType",
+ "MinActiveFreq",
+ "BoosterFreqType",
+ "BoosterFreq",
+ "PD_Data_limit_c",
+ "PD_Data_error_coeff",
+ "PD_Data_error_rate_coeff"};
+ uint32_t i, size = 0;
+ int16_t workload_type = 0;
+ int result = 0;
+ uint32_t smu_version;
+
+ if (!buf)
+ return -EINVAL;
+
+ result = smu_get_smc_version(smu, NULL, &smu_version);
+ if (result)
+ return result;
+
+ if (smu_version >= 0x360d00 && !amdgpu_sriov_vf(adev))
+ size += sprintf(buf + size, "%16s %s %s %s %s %s %s %s %s %s %s\n",
+ title[0], title[1], title[2], title[3], title[4], title[5],
+ title[6], title[7], title[8], title[9], title[10]);
+ else
+ size += sprintf(buf + size, "%16s\n",
+ title[0]);
+
+ for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
+ /*
+ * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT
+ * Not all profile modes are supported on arcturus.
+ */
+ workload_type = smu_workload_get_type(smu, i);
+ if (workload_type < 0)
+ continue;
+
+ if (smu_version >= 0x360d00 && !amdgpu_sriov_vf(adev)) {
+ result = smu_update_table(smu,
+ SMU_TABLE_ACTIVITY_MONITOR_COEFF,
+ workload_type,
+ (void *)(&activity_monitor),
+ false);
+ if (result) {
+ pr_err("[%s] Failed to get activity monitor!", __func__);
+ return result;
+ }
+ }
+
+ size += sprintf(buf + size, "%2d %14s%s\n",
+ i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
+
+ if (smu_version >= 0x360d00 && !amdgpu_sriov_vf(adev)) {
+ size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
+ " ",
+ 0,
+ "GFXCLK",
+ activity_monitor.Gfx_FPS,
+ activity_monitor.Gfx_UseRlcBusy,
+ activity_monitor.Gfx_MinActiveFreqType,
+ activity_monitor.Gfx_MinActiveFreq,
+ activity_monitor.Gfx_BoosterFreqType,
+ activity_monitor.Gfx_BoosterFreq,
+ activity_monitor.Gfx_PD_Data_limit_c,
+ activity_monitor.Gfx_PD_Data_error_coeff,
+ activity_monitor.Gfx_PD_Data_error_rate_coeff);
+
+ size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
+ " ",
+ 1,
+ "UCLK",
+ activity_monitor.Mem_FPS,
+ activity_monitor.Mem_UseRlcBusy,
+ activity_monitor.Mem_MinActiveFreqType,
+ activity_monitor.Mem_MinActiveFreq,
+ activity_monitor.Mem_BoosterFreqType,
+ activity_monitor.Mem_BoosterFreq,
+ activity_monitor.Mem_PD_Data_limit_c,
+ activity_monitor.Mem_PD_Data_error_coeff,
+ activity_monitor.Mem_PD_Data_error_rate_coeff);
+ }
+ }
+
+ return size;
+}
+
+static int arcturus_set_power_profile_mode(struct smu_context *smu,
+ long *input,
+ uint32_t size)
+{
+ DpmActivityMonitorCoeffInt_t activity_monitor;
+ int workload_type = 0;
+ uint32_t profile_mode = input[size];
+ int ret = 0;
+ uint32_t smu_version;
+
+ if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
+ pr_err("Invalid power profile mode %d\n", profile_mode);
+ return -EINVAL;
+ }
+
+ ret = smu_get_smc_version(smu, NULL, &smu_version);
+ if (ret)
+ return ret;
+
+ if ((profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) &&
+ (smu_version >=0x360d00)) {
+ ret = smu_update_table(smu,
+ SMU_TABLE_ACTIVITY_MONITOR_COEFF,
+ WORKLOAD_PPLIB_CUSTOM_BIT,
+ (void *)(&activity_monitor),
+ false);
+ if (ret) {
+ pr_err("[%s] Failed to get activity monitor!", __func__);
+ return ret;
+ }
+
+ switch (input[0]) {
+ case 0: /* Gfxclk */
+ activity_monitor.Gfx_FPS = input[1];
+ activity_monitor.Gfx_UseRlcBusy = input[2];
+ activity_monitor.Gfx_MinActiveFreqType = input[3];
+ activity_monitor.Gfx_MinActiveFreq = input[4];
+ activity_monitor.Gfx_BoosterFreqType = input[5];
+ activity_monitor.Gfx_BoosterFreq = input[6];
+ activity_monitor.Gfx_PD_Data_limit_c = input[7];
+ activity_monitor.Gfx_PD_Data_error_coeff = input[8];
+ activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9];
+ break;
+ case 1: /* Uclk */
+ activity_monitor.Mem_FPS = input[1];
+ activity_monitor.Mem_UseRlcBusy = input[2];
+ activity_monitor.Mem_MinActiveFreqType = input[3];
+ activity_monitor.Mem_MinActiveFreq = input[4];
+ activity_monitor.Mem_BoosterFreqType = input[5];
+ activity_monitor.Mem_BoosterFreq = input[6];
+ activity_monitor.Mem_PD_Data_limit_c = input[7];
+ activity_monitor.Mem_PD_Data_error_coeff = input[8];
+ activity_monitor.Mem_PD_Data_error_rate_coeff = input[9];
+ break;
+ }
+
+ ret = smu_update_table(smu,
+ SMU_TABLE_ACTIVITY_MONITOR_COEFF,
+ WORKLOAD_PPLIB_CUSTOM_BIT,
+ (void *)(&activity_monitor),
+ true);
+ if (ret) {
+ pr_err("[%s] Failed to set activity monitor!", __func__);
+ return ret;
+ }
+ }
+
+ /*
+ * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT
+ * Not all profile modes are supported on arcturus.
+ */
+ workload_type = smu_workload_get_type(smu, profile_mode);
+ if (workload_type < 0) {
+ pr_err("Unsupported power profile mode %d on arcturus\n", profile_mode);
+ return -EINVAL;
+ }
+
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_SetWorkloadMask,
+ 1 << workload_type);
+ if (ret) {
+ pr_err("Fail to set workload type %d\n", workload_type);
+ return ret;
+ }
+
+ smu->power_profile_mode = profile_mode;
+
+ return 0;
+}
+
+static void arcturus_dump_pptable(struct smu_context *smu)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ PPTable_t *pptable = table_context->driver_pptable;
+ int i;
+
+ pr_info("Dumped PPTable:\n");
+
+ pr_info("Version = 0x%08x\n", pptable->Version);
+
+ pr_info("FeaturesToRun[0] = 0x%08x\n", pptable->FeaturesToRun[0]);
+ pr_info("FeaturesToRun[1] = 0x%08x\n", pptable->FeaturesToRun[1]);
+
+ for (i = 0; i < PPT_THROTTLER_COUNT; i++) {
+ pr_info("SocketPowerLimitAc[%d] = %d\n", i, pptable->SocketPowerLimitAc[i]);
+ pr_info("SocketPowerLimitAcTau[%d] = %d\n", i, pptable->SocketPowerLimitAcTau[i]);
+ }
+
+ pr_info("TdcLimitSoc = %d\n", pptable->TdcLimitSoc);
+ pr_info("TdcLimitSocTau = %d\n", pptable->TdcLimitSocTau);
+ pr_info("TdcLimitGfx = %d\n", pptable->TdcLimitGfx);
+ pr_info("TdcLimitGfxTau = %d\n", pptable->TdcLimitGfxTau);
+
+ pr_info("TedgeLimit = %d\n", pptable->TedgeLimit);
+ pr_info("ThotspotLimit = %d\n", pptable->ThotspotLimit);
+ pr_info("TmemLimit = %d\n", pptable->TmemLimit);
+ pr_info("Tvr_gfxLimit = %d\n", pptable->Tvr_gfxLimit);
+ pr_info("Tvr_memLimit = %d\n", pptable->Tvr_memLimit);
+ pr_info("Tvr_socLimit = %d\n", pptable->Tvr_socLimit);
+ pr_info("FitLimit = %d\n", pptable->FitLimit);
+
+ pr_info("PpmPowerLimit = %d\n", pptable->PpmPowerLimit);
+ pr_info("PpmTemperatureThreshold = %d\n", pptable->PpmTemperatureThreshold);
+
+ pr_info("ThrottlerControlMask = %d\n", pptable->ThrottlerControlMask);
+
+ pr_info("UlvVoltageOffsetGfx = %d\n", pptable->UlvVoltageOffsetGfx);
+ pr_info("UlvPadding = 0x%08x\n", pptable->UlvPadding);
+
+ pr_info("UlvGfxclkBypass = %d\n", pptable->UlvGfxclkBypass);
+ pr_info("Padding234[0] = 0x%02x\n", pptable->Padding234[0]);
+ pr_info("Padding234[1] = 0x%02x\n", pptable->Padding234[1]);
+ pr_info("Padding234[2] = 0x%02x\n", pptable->Padding234[2]);
+
+ pr_info("MinVoltageGfx = %d\n", pptable->MinVoltageGfx);
+ pr_info("MinVoltageSoc = %d\n", pptable->MinVoltageSoc);
+ pr_info("MaxVoltageGfx = %d\n", pptable->MaxVoltageGfx);
+ pr_info("MaxVoltageSoc = %d\n", pptable->MaxVoltageSoc);
+
+ pr_info("LoadLineResistanceGfx = %d\n", pptable->LoadLineResistanceGfx);
+ pr_info("LoadLineResistanceSoc = %d\n", pptable->LoadLineResistanceSoc);
+
+ pr_info("[PPCLK_GFXCLK]\n"
+ " .VoltageMode = 0x%02x\n"
+ " .SnapToDiscrete = 0x%02x\n"
+ " .NumDiscreteLevels = 0x%02x\n"
+ " .padding = 0x%02x\n"
+ " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
+ " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n"
+ " .SsFmin = 0x%04x\n"
+ " .Padding_16 = 0x%04x\n",
+ pptable->DpmDescriptor[PPCLK_GFXCLK].VoltageMode,
+ pptable->DpmDescriptor[PPCLK_GFXCLK].SnapToDiscrete,
+ pptable->DpmDescriptor[PPCLK_GFXCLK].NumDiscreteLevels,
+ pptable->DpmDescriptor[PPCLK_GFXCLK].padding,
+ pptable->DpmDescriptor[PPCLK_GFXCLK].ConversionToAvfsClk.m,
+ pptable->DpmDescriptor[PPCLK_GFXCLK].ConversionToAvfsClk.b,
+ pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.a,
+ pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.b,
+ pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.c,
+ pptable->DpmDescriptor[PPCLK_GFXCLK].SsFmin,
+ pptable->DpmDescriptor[PPCLK_GFXCLK].Padding16);
+
+ pr_info("[PPCLK_VCLK]\n"
+ " .VoltageMode = 0x%02x\n"
+ " .SnapToDiscrete = 0x%02x\n"
+ " .NumDiscreteLevels = 0x%02x\n"
+ " .padding = 0x%02x\n"
+ " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
+ " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n"
+ " .SsFmin = 0x%04x\n"
+ " .Padding_16 = 0x%04x\n",
+ pptable->DpmDescriptor[PPCLK_VCLK].VoltageMode,
+ pptable->DpmDescriptor[PPCLK_VCLK].SnapToDiscrete,
+ pptable->DpmDescriptor[PPCLK_VCLK].NumDiscreteLevels,
+ pptable->DpmDescriptor[PPCLK_VCLK].padding,
+ pptable->DpmDescriptor[PPCLK_VCLK].ConversionToAvfsClk.m,
+ pptable->DpmDescriptor[PPCLK_VCLK].ConversionToAvfsClk.b,
+ pptable->DpmDescriptor[PPCLK_VCLK].SsCurve.a,
+ pptable->DpmDescriptor[PPCLK_VCLK].SsCurve.b,
+ pptable->DpmDescriptor[PPCLK_VCLK].SsCurve.c,
+ pptable->DpmDescriptor[PPCLK_VCLK].SsFmin,
+ pptable->DpmDescriptor[PPCLK_VCLK].Padding16);
+
+ pr_info("[PPCLK_DCLK]\n"
+ " .VoltageMode = 0x%02x\n"
+ " .SnapToDiscrete = 0x%02x\n"
+ " .NumDiscreteLevels = 0x%02x\n"
+ " .padding = 0x%02x\n"
+ " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
+ " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n"
+ " .SsFmin = 0x%04x\n"
+ " .Padding_16 = 0x%04x\n",
+ pptable->DpmDescriptor[PPCLK_DCLK].VoltageMode,
+ pptable->DpmDescriptor[PPCLK_DCLK].SnapToDiscrete,
+ pptable->DpmDescriptor[PPCLK_DCLK].NumDiscreteLevels,
+ pptable->DpmDescriptor[PPCLK_DCLK].padding,
+ pptable->DpmDescriptor[PPCLK_DCLK].ConversionToAvfsClk.m,
+ pptable->DpmDescriptor[PPCLK_DCLK].ConversionToAvfsClk.b,
+ pptable->DpmDescriptor[PPCLK_DCLK].SsCurve.a,
+ pptable->DpmDescriptor[PPCLK_DCLK].SsCurve.b,
+ pptable->DpmDescriptor[PPCLK_DCLK].SsCurve.c,
+ pptable->DpmDescriptor[PPCLK_DCLK].SsFmin,
+ pptable->DpmDescriptor[PPCLK_DCLK].Padding16);
+
+ pr_info("[PPCLK_SOCCLK]\n"
+ " .VoltageMode = 0x%02x\n"
+ " .SnapToDiscrete = 0x%02x\n"
+ " .NumDiscreteLevels = 0x%02x\n"
+ " .padding = 0x%02x\n"
+ " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
+ " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n"
+ " .SsFmin = 0x%04x\n"
+ " .Padding_16 = 0x%04x\n",
+ pptable->DpmDescriptor[PPCLK_SOCCLK].VoltageMode,
+ pptable->DpmDescriptor[PPCLK_SOCCLK].SnapToDiscrete,
+ pptable->DpmDescriptor[PPCLK_SOCCLK].NumDiscreteLevels,
+ pptable->DpmDescriptor[PPCLK_SOCCLK].padding,
+ pptable->DpmDescriptor[PPCLK_SOCCLK].ConversionToAvfsClk.m,
+ pptable->DpmDescriptor[PPCLK_SOCCLK].ConversionToAvfsClk.b,
+ pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.a,
+ pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.b,
+ pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.c,
+ pptable->DpmDescriptor[PPCLK_SOCCLK].SsFmin,
+ pptable->DpmDescriptor[PPCLK_SOCCLK].Padding16);
+
+ pr_info("[PPCLK_UCLK]\n"
+ " .VoltageMode = 0x%02x\n"
+ " .SnapToDiscrete = 0x%02x\n"
+ " .NumDiscreteLevels = 0x%02x\n"
+ " .padding = 0x%02x\n"
+ " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
+ " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n"
+ " .SsFmin = 0x%04x\n"
+ " .Padding_16 = 0x%04x\n",
+ pptable->DpmDescriptor[PPCLK_UCLK].VoltageMode,
+ pptable->DpmDescriptor[PPCLK_UCLK].SnapToDiscrete,
+ pptable->DpmDescriptor[PPCLK_UCLK].NumDiscreteLevels,
+ pptable->DpmDescriptor[PPCLK_UCLK].padding,
+ pptable->DpmDescriptor[PPCLK_UCLK].ConversionToAvfsClk.m,
+ pptable->DpmDescriptor[PPCLK_UCLK].ConversionToAvfsClk.b,
+ pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.a,
+ pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.b,
+ pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.c,
+ pptable->DpmDescriptor[PPCLK_UCLK].SsFmin,
+ pptable->DpmDescriptor[PPCLK_UCLK].Padding16);
+
+ pr_info("[PPCLK_FCLK]\n"
+ " .VoltageMode = 0x%02x\n"
+ " .SnapToDiscrete = 0x%02x\n"
+ " .NumDiscreteLevels = 0x%02x\n"
+ " .padding = 0x%02x\n"
+ " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
+ " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n"
+ " .SsFmin = 0x%04x\n"
+ " .Padding_16 = 0x%04x\n",
+ pptable->DpmDescriptor[PPCLK_FCLK].VoltageMode,
+ pptable->DpmDescriptor[PPCLK_FCLK].SnapToDiscrete,
+ pptable->DpmDescriptor[PPCLK_FCLK].NumDiscreteLevels,
+ pptable->DpmDescriptor[PPCLK_FCLK].padding,
+ pptable->DpmDescriptor[PPCLK_FCLK].ConversionToAvfsClk.m,
+ pptable->DpmDescriptor[PPCLK_FCLK].ConversionToAvfsClk.b,
+ pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.a,
+ pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.b,
+ pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.c,
+ pptable->DpmDescriptor[PPCLK_FCLK].SsFmin,
+ pptable->DpmDescriptor[PPCLK_FCLK].Padding16);
+
+
+ pr_info("FreqTableGfx\n");
+ for (i = 0; i < NUM_GFXCLK_DPM_LEVELS; i++)
+ pr_info(" .[%02d] = %d\n", i, pptable->FreqTableGfx[i]);
+
+ pr_info("FreqTableVclk\n");
+ for (i = 0; i < NUM_VCLK_DPM_LEVELS; i++)
+ pr_info(" .[%02d] = %d\n", i, pptable->FreqTableVclk[i]);
+
+ pr_info("FreqTableDclk\n");
+ for (i = 0; i < NUM_DCLK_DPM_LEVELS; i++)
+ pr_info(" .[%02d] = %d\n", i, pptable->FreqTableDclk[i]);
+
+ pr_info("FreqTableSocclk\n");
+ for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++)
+ pr_info(" .[%02d] = %d\n", i, pptable->FreqTableSocclk[i]);
+
+ pr_info("FreqTableUclk\n");
+ for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++)
+ pr_info(" .[%02d] = %d\n", i, pptable->FreqTableUclk[i]);
+
+ pr_info("FreqTableFclk\n");
+ for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++)
+ pr_info(" .[%02d] = %d\n", i, pptable->FreqTableFclk[i]);
+
+ pr_info("Mp0clkFreq\n");
+ for (i = 0; i < NUM_MP0CLK_DPM_LEVELS; i++)
+ pr_info(" .[%d] = %d\n", i, pptable->Mp0clkFreq[i]);
+
+ pr_info("Mp0DpmVoltage\n");
+ for (i = 0; i < NUM_MP0CLK_DPM_LEVELS; i++)
+ pr_info(" .[%d] = %d\n", i, pptable->Mp0DpmVoltage[i]);
+
+ pr_info("GfxclkFidle = 0x%x\n", pptable->GfxclkFidle);
+ pr_info("GfxclkSlewRate = 0x%x\n", pptable->GfxclkSlewRate);
+ pr_info("Padding567[0] = 0x%x\n", pptable->Padding567[0]);
+ pr_info("Padding567[1] = 0x%x\n", pptable->Padding567[1]);
+ pr_info("Padding567[2] = 0x%x\n", pptable->Padding567[2]);
+ pr_info("Padding567[3] = 0x%x\n", pptable->Padding567[3]);
+ pr_info("GfxclkDsMaxFreq = %d\n", pptable->GfxclkDsMaxFreq);
+ pr_info("GfxclkSource = 0x%x\n", pptable->GfxclkSource);
+ pr_info("Padding456 = 0x%x\n", pptable->Padding456);
+
+ pr_info("EnableTdpm = %d\n", pptable->EnableTdpm);
+ pr_info("TdpmHighHystTemperature = %d\n", pptable->TdpmHighHystTemperature);
+ pr_info("TdpmLowHystTemperature = %d\n", pptable->TdpmLowHystTemperature);
+ pr_info("GfxclkFreqHighTempLimit = %d\n", pptable->GfxclkFreqHighTempLimit);
+
+ pr_info("FanStopTemp = %d\n", pptable->FanStopTemp);
+ pr_info("FanStartTemp = %d\n", pptable->FanStartTemp);
+
+ pr_info("FanGainEdge = %d\n", pptable->FanGainEdge);
+ pr_info("FanGainHotspot = %d\n", pptable->FanGainHotspot);
+ pr_info("FanGainVrGfx = %d\n", pptable->FanGainVrGfx);
+ pr_info("FanGainVrSoc = %d\n", pptable->FanGainVrSoc);
+ pr_info("FanGainVrMem = %d\n", pptable->FanGainVrMem);
+ pr_info("FanGainHbm = %d\n", pptable->FanGainHbm);
+
+ pr_info("FanPwmMin = %d\n", pptable->FanPwmMin);
+ pr_info("FanAcousticLimitRpm = %d\n", pptable->FanAcousticLimitRpm);
+ pr_info("FanThrottlingRpm = %d\n", pptable->FanThrottlingRpm);
+ pr_info("FanMaximumRpm = %d\n", pptable->FanMaximumRpm);
+ pr_info("FanTargetTemperature = %d\n", pptable->FanTargetTemperature);
+ pr_info("FanTargetGfxclk = %d\n", pptable->FanTargetGfxclk);
+ pr_info("FanZeroRpmEnable = %d\n", pptable->FanZeroRpmEnable);
+ pr_info("FanTachEdgePerRev = %d\n", pptable->FanTachEdgePerRev);
+ pr_info("FanTempInputSelect = %d\n", pptable->FanTempInputSelect);
+
+ pr_info("FuzzyFan_ErrorSetDelta = %d\n", pptable->FuzzyFan_ErrorSetDelta);
+ pr_info("FuzzyFan_ErrorRateSetDelta = %d\n", pptable->FuzzyFan_ErrorRateSetDelta);
+ pr_info("FuzzyFan_PwmSetDelta = %d\n", pptable->FuzzyFan_PwmSetDelta);
+ pr_info("FuzzyFan_Reserved = %d\n", pptable->FuzzyFan_Reserved);
+
+ pr_info("OverrideAvfsGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->OverrideAvfsGb[AVFS_VOLTAGE_GFX]);
+ pr_info("OverrideAvfsGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->OverrideAvfsGb[AVFS_VOLTAGE_SOC]);
+ pr_info("Padding8_Avfs[0] = %d\n", pptable->Padding8_Avfs[0]);
+ pr_info("Padding8_Avfs[1] = %d\n", pptable->Padding8_Avfs[1]);
+
+ pr_info("dBtcGbGfxPll{a = 0x%x b = 0x%x c = 0x%x}\n",
+ pptable->dBtcGbGfxPll.a,
+ pptable->dBtcGbGfxPll.b,
+ pptable->dBtcGbGfxPll.c);
+ pr_info("dBtcGbGfxAfll{a = 0x%x b = 0x%x c = 0x%x}\n",
+ pptable->dBtcGbGfxAfll.a,
+ pptable->dBtcGbGfxAfll.b,
+ pptable->dBtcGbGfxAfll.c);
+ pr_info("dBtcGbSoc{a = 0x%x b = 0x%x c = 0x%x}\n",
+ pptable->dBtcGbSoc.a,
+ pptable->dBtcGbSoc.b,
+ pptable->dBtcGbSoc.c);
+
+ pr_info("qAgingGb[AVFS_VOLTAGE_GFX]{m = 0x%x b = 0x%x}\n",
+ pptable->qAgingGb[AVFS_VOLTAGE_GFX].m,
+ pptable->qAgingGb[AVFS_VOLTAGE_GFX].b);
+ pr_info("qAgingGb[AVFS_VOLTAGE_SOC]{m = 0x%x b = 0x%x}\n",
+ pptable->qAgingGb[AVFS_VOLTAGE_SOC].m,
+ pptable->qAgingGb[AVFS_VOLTAGE_SOC].b);
+
+ pr_info("qStaticVoltageOffset[AVFS_VOLTAGE_GFX]{a = 0x%x b = 0x%x c = 0x%x}\n",
+ pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].a,
+ pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].b,
+ pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].c);
+ pr_info("qStaticVoltageOffset[AVFS_VOLTAGE_SOC]{a = 0x%x b = 0x%x c = 0x%x}\n",
+ pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].a,
+ pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].b,
+ pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].c);
+
+ pr_info("DcTol[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcTol[AVFS_VOLTAGE_GFX]);
+ pr_info("DcTol[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcTol[AVFS_VOLTAGE_SOC]);
+
+ pr_info("DcBtcEnabled[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcEnabled[AVFS_VOLTAGE_GFX]);
+ pr_info("DcBtcEnabled[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcEnabled[AVFS_VOLTAGE_SOC]);
+ pr_info("Padding8_GfxBtc[0] = 0x%x\n", pptable->Padding8_GfxBtc[0]);
+ pr_info("Padding8_GfxBtc[1] = 0x%x\n", pptable->Padding8_GfxBtc[1]);
+
+ pr_info("DcBtcMin[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcMin[AVFS_VOLTAGE_GFX]);
+ pr_info("DcBtcMin[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcMin[AVFS_VOLTAGE_SOC]);
+ pr_info("DcBtcMax[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcMax[AVFS_VOLTAGE_GFX]);
+ pr_info("DcBtcMax[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcMax[AVFS_VOLTAGE_SOC]);
+
+ pr_info("DcBtcGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_GFX]);
+ pr_info("DcBtcGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_SOC]);
+
+ pr_info("XgmiDpmPstates\n");
+ for (i = 0; i < NUM_XGMI_LEVELS; i++)
+ pr_info(" .[%d] = %d\n", i, pptable->XgmiDpmPstates[i]);
+ pr_info("XgmiDpmSpare[0] = 0x%02x\n", pptable->XgmiDpmSpare[0]);
+ pr_info("XgmiDpmSpare[1] = 0x%02x\n", pptable->XgmiDpmSpare[1]);
+
+ pr_info("VDDGFX_TVmin = %d\n", pptable->VDDGFX_TVmin);
+ pr_info("VDDSOC_TVmin = %d\n", pptable->VDDSOC_TVmin);
+ pr_info("VDDGFX_Vmin_HiTemp = %d\n", pptable->VDDGFX_Vmin_HiTemp);
+ pr_info("VDDGFX_Vmin_LoTemp = %d\n", pptable->VDDGFX_Vmin_LoTemp);
+ pr_info("VDDSOC_Vmin_HiTemp = %d\n", pptable->VDDSOC_Vmin_HiTemp);
+ pr_info("VDDSOC_Vmin_LoTemp = %d\n", pptable->VDDSOC_Vmin_LoTemp);
+ pr_info("VDDGFX_TVminHystersis = %d\n", pptable->VDDGFX_TVminHystersis);
+ pr_info("VDDSOC_TVminHystersis = %d\n", pptable->VDDSOC_TVminHystersis);
+
+ pr_info("DebugOverrides = 0x%x\n", pptable->DebugOverrides);
+ pr_info("ReservedEquation0{a = 0x%x b = 0x%x c = 0x%x}\n",
+ pptable->ReservedEquation0.a,
+ pptable->ReservedEquation0.b,
+ pptable->ReservedEquation0.c);
+ pr_info("ReservedEquation1{a = 0x%x b = 0x%x c = 0x%x}\n",
+ pptable->ReservedEquation1.a,
+ pptable->ReservedEquation1.b,
+ pptable->ReservedEquation1.c);
+ pr_info("ReservedEquation2{a = 0x%x b = 0x%x c = 0x%x}\n",
+ pptable->ReservedEquation2.a,
+ pptable->ReservedEquation2.b,
+ pptable->ReservedEquation2.c);
+ pr_info("ReservedEquation3{a = 0x%x b = 0x%x c = 0x%x}\n",
+ pptable->ReservedEquation3.a,
+ pptable->ReservedEquation3.b,
+ pptable->ReservedEquation3.c);
+
+ pr_info("MinVoltageUlvGfx = %d\n", pptable->MinVoltageUlvGfx);
+ pr_info("PaddingUlv = %d\n", pptable->PaddingUlv);
+
+ pr_info("TotalPowerConfig = %d\n", pptable->TotalPowerConfig);
+ pr_info("TotalPowerSpare1 = %d\n", pptable->TotalPowerSpare1);
+ pr_info("TotalPowerSpare2 = %d\n", pptable->TotalPowerSpare2);
+
+ pr_info("PccThresholdLow = %d\n", pptable->PccThresholdLow);
+ pr_info("PccThresholdHigh = %d\n", pptable->PccThresholdHigh);
+
+ pr_info("Board Parameters:\n");
+ pr_info("MaxVoltageStepGfx = 0x%x\n", pptable->MaxVoltageStepGfx);
+ pr_info("MaxVoltageStepSoc = 0x%x\n", pptable->MaxVoltageStepSoc);
+
+ pr_info("VddGfxVrMapping = 0x%x\n", pptable->VddGfxVrMapping);
+ pr_info("VddSocVrMapping = 0x%x\n", pptable->VddSocVrMapping);
+ pr_info("VddMemVrMapping = 0x%x\n", pptable->VddMemVrMapping);
+ pr_info("BoardVrMapping = 0x%x\n", pptable->BoardVrMapping);
+
+ pr_info("GfxUlvPhaseSheddingMask = 0x%x\n", pptable->GfxUlvPhaseSheddingMask);
+ pr_info("ExternalSensorPresent = 0x%x\n", pptable->ExternalSensorPresent);
+
+ pr_info("GfxMaxCurrent = 0x%x\n", pptable->GfxMaxCurrent);
+ pr_info("GfxOffset = 0x%x\n", pptable->GfxOffset);
+ pr_info("Padding_TelemetryGfx = 0x%x\n", pptable->Padding_TelemetryGfx);
+
+ pr_info("SocMaxCurrent = 0x%x\n", pptable->SocMaxCurrent);
+ pr_info("SocOffset = 0x%x\n", pptable->SocOffset);
+ pr_info("Padding_TelemetrySoc = 0x%x\n", pptable->Padding_TelemetrySoc);
+
+ pr_info("MemMaxCurrent = 0x%x\n", pptable->MemMaxCurrent);
+ pr_info("MemOffset = 0x%x\n", pptable->MemOffset);
+ pr_info("Padding_TelemetryMem = 0x%x\n", pptable->Padding_TelemetryMem);
+
+ pr_info("BoardMaxCurrent = 0x%x\n", pptable->BoardMaxCurrent);
+ pr_info("BoardOffset = 0x%x\n", pptable->BoardOffset);
+ pr_info("Padding_TelemetryBoardInput = 0x%x\n", pptable->Padding_TelemetryBoardInput);
+
+ pr_info("VR0HotGpio = %d\n", pptable->VR0HotGpio);
+ pr_info("VR0HotPolarity = %d\n", pptable->VR0HotPolarity);
+ pr_info("VR1HotGpio = %d\n", pptable->VR1HotGpio);
+ pr_info("VR1HotPolarity = %d\n", pptable->VR1HotPolarity);
+
+ pr_info("PllGfxclkSpreadEnabled = %d\n", pptable->PllGfxclkSpreadEnabled);
+ pr_info("PllGfxclkSpreadPercent = %d\n", pptable->PllGfxclkSpreadPercent);
+ pr_info("PllGfxclkSpreadFreq = %d\n", pptable->PllGfxclkSpreadFreq);
+
+ pr_info("UclkSpreadEnabled = %d\n", pptable->UclkSpreadEnabled);
+ pr_info("UclkSpreadPercent = %d\n", pptable->UclkSpreadPercent);
+ pr_info("UclkSpreadFreq = %d\n", pptable->UclkSpreadFreq);
+
+ pr_info("FclkSpreadEnabled = %d\n", pptable->FclkSpreadEnabled);
+ pr_info("FclkSpreadPercent = %d\n", pptable->FclkSpreadPercent);
+ pr_info("FclkSpreadFreq = %d\n", pptable->FclkSpreadFreq);
+
+ pr_info("FllGfxclkSpreadEnabled = %d\n", pptable->FllGfxclkSpreadEnabled);
+ pr_info("FllGfxclkSpreadPercent = %d\n", pptable->FllGfxclkSpreadPercent);
+ pr_info("FllGfxclkSpreadFreq = %d\n", pptable->FllGfxclkSpreadFreq);
+
+ for (i = 0; i < NUM_I2C_CONTROLLERS; i++) {
+ pr_info("I2cControllers[%d]:\n", i);
+ pr_info(" .Enabled = %d\n",
+ pptable->I2cControllers[i].Enabled);
+ pr_info(" .SlaveAddress = 0x%x\n",
+ pptable->I2cControllers[i].SlaveAddress);
+ pr_info(" .ControllerPort = %d\n",
+ pptable->I2cControllers[i].ControllerPort);
+ pr_info(" .ControllerName = %d\n",
+ pptable->I2cControllers[i].ControllerName);
+ pr_info(" .ThermalThrottler = %d\n",
+ pptable->I2cControllers[i].ThermalThrotter);
+ pr_info(" .I2cProtocol = %d\n",
+ pptable->I2cControllers[i].I2cProtocol);
+ pr_info(" .Speed = %d\n",
+ pptable->I2cControllers[i].Speed);
+ }
+
+ pr_info("MemoryChannelEnabled = %d\n", pptable->MemoryChannelEnabled);
+ pr_info("DramBitWidth = %d\n", pptable->DramBitWidth);
+
+ pr_info("TotalBoardPower = %d\n", pptable->TotalBoardPower);
+
+ pr_info("XgmiLinkSpeed\n");
+ for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++)
+ pr_info(" .[%d] = %d\n", i, pptable->XgmiLinkSpeed[i]);
+ pr_info("XgmiLinkWidth\n");
+ for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++)
+ pr_info(" .[%d] = %d\n", i, pptable->XgmiLinkWidth[i]);
+ pr_info("XgmiFclkFreq\n");
+ for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++)
+ pr_info(" .[%d] = %d\n", i, pptable->XgmiFclkFreq[i]);
+ pr_info("XgmiSocVoltage\n");
+ for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++)
+ pr_info(" .[%d] = %d\n", i, pptable->XgmiSocVoltage[i]);
+
+}
+
+static bool arcturus_is_dpm_running(struct smu_context *smu)
+{
+ int ret = 0;
+ uint32_t feature_mask[2];
+ unsigned long feature_enabled;
+ ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
+ feature_enabled = (unsigned long)((uint64_t)feature_mask[0] |
+ ((uint64_t)feature_mask[1] << 32));
+ return !!(feature_enabled & SMC_DPM_FEATURE);
+}
+
+static int arcturus_dpm_set_uvd_enable(struct smu_context *smu, bool enable)
+{
+ struct smu_power_context *smu_power = &smu->smu_power;
+ struct smu_power_gate *power_gate = &smu_power->power_gate;
+ int ret = 0;
+
+ if (enable) {
+ if (!smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
+ ret = smu_feature_set_enabled(smu, SMU_FEATURE_VCN_PG_BIT, 1);
+ if (ret) {
+ pr_err("[EnableVCNDPM] failed!\n");
+ return ret;
+ }
+ }
+ power_gate->vcn_gated = false;
+ } else {
+ if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
+ ret = smu_feature_set_enabled(smu, SMU_FEATURE_VCN_PG_BIT, 0);
+ if (ret) {
+ pr_err("[DisableVCNDPM] failed!\n");
+ return ret;
+ }
+ }
+ power_gate->vcn_gated = true;
+ }
+
+ return ret;
+}
+
+
+static void arcturus_fill_eeprom_i2c_req(SwI2cRequest_t *req, bool write,
+ uint8_t address, uint32_t numbytes,
+ uint8_t *data)
+{
+ int i;
+
+ BUG_ON(numbytes > MAX_SW_I2C_COMMANDS);
+
+ req->I2CcontrollerPort = 0;
+ req->I2CSpeed = 2;
+ req->SlaveAddress = address;
+ req->NumCmds = numbytes;
+
+ for (i = 0; i < numbytes; i++) {
+ SwI2cCmd_t *cmd = &req->SwI2cCmds[i];
+
+ /* First 2 bytes are always write for lower 2b EEPROM address */
+ if (i < 2)
+ cmd->Cmd = 1;
+ else
+ cmd->Cmd = write;
+
+
+ /* Add RESTART for read after address filled */
+ cmd->CmdConfig |= (i == 2 && !write) ? CMDCONFIG_RESTART_MASK : 0;
+
+ /* Add STOP in the end */
+ cmd->CmdConfig |= (i == (numbytes - 1)) ? CMDCONFIG_STOP_MASK : 0;
+
+ /* Fill with data regardless if read or write to simplify code */
+ cmd->RegisterAddr = data[i];
+ }
+}
+
+static int arcturus_i2c_eeprom_read_data(struct i2c_adapter *control,
+ uint8_t address,
+ uint8_t *data,
+ uint32_t numbytes)
+{
+ uint32_t i, ret = 0;
+ SwI2cRequest_t req;
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ struct smu_table_context *smu_table = &adev->smu.smu_table;
+ struct smu_table *table = &smu_table->driver_table;
+
+ memset(&req, 0, sizeof(req));
+ arcturus_fill_eeprom_i2c_req(&req, false, address, numbytes, data);
+
+ mutex_lock(&adev->smu.mutex);
+ /* Now read data starting with that address */
+ ret = smu_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, &req,
+ true);
+ mutex_unlock(&adev->smu.mutex);
+
+ if (!ret) {
+ SwI2cRequest_t *res = (SwI2cRequest_t *)table->cpu_addr;
+
+ /* Assume SMU fills res.SwI2cCmds[i].Data with read bytes */
+ for (i = 0; i < numbytes; i++)
+ data[i] = res->SwI2cCmds[i].Data;
+
+ pr_debug("arcturus_i2c_eeprom_read_data, address = %x, bytes = %d, data :",
+ (uint16_t)address, numbytes);
+
+ print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE,
+ 8, 1, data, numbytes, false);
+ } else
+ pr_err("arcturus_i2c_eeprom_read_data - error occurred :%x", ret);
+
+ return ret;
+}
+
+static int arcturus_i2c_eeprom_write_data(struct i2c_adapter *control,
+ uint8_t address,
+ uint8_t *data,
+ uint32_t numbytes)
+{
+ uint32_t ret;
+ SwI2cRequest_t req;
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+
+ memset(&req, 0, sizeof(req));
+ arcturus_fill_eeprom_i2c_req(&req, true, address, numbytes, data);
+
+ mutex_lock(&adev->smu.mutex);
+ ret = smu_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, &req, true);
+ mutex_unlock(&adev->smu.mutex);
+
+ if (!ret) {
+ pr_debug("arcturus_i2c_write(), address = %x, bytes = %d , data: ",
+ (uint16_t)address, numbytes);
+
+ print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE,
+ 8, 1, data, numbytes, false);
+ /*
+ * According to EEPROM spec there is a MAX of 10 ms required for
+ * EEPROM to flush internal RX buffer after STOP was issued at the
+ * end of write transaction. During this time the EEPROM will not be
+ * responsive to any more commands - so wait a bit more.
+ */
+ msleep(10);
+
+ } else
+ pr_err("arcturus_i2c_write- error occurred :%x", ret);
+
+ return ret;
+}
+
+static int arcturus_i2c_eeprom_i2c_xfer(struct i2c_adapter *i2c_adap,
+ struct i2c_msg *msgs, int num)
+{
+ uint32_t i, j, ret, data_size, data_chunk_size, next_eeprom_addr = 0;
+ uint8_t *data_ptr, data_chunk[MAX_SW_I2C_COMMANDS] = { 0 };
+
+ for (i = 0; i < num; i++) {
+ /*
+ * SMU interface allows at most MAX_SW_I2C_COMMANDS bytes of data at
+ * once and hence the data needs to be spliced into chunks and sent each
+ * chunk separately
+ */
+ data_size = msgs[i].len - 2;
+ data_chunk_size = MAX_SW_I2C_COMMANDS - 2;
+ next_eeprom_addr = (msgs[i].buf[0] << 8 & 0xff00) | (msgs[i].buf[1] & 0xff);
+ data_ptr = msgs[i].buf + 2;
+
+ for (j = 0; j < data_size / data_chunk_size; j++) {
+ /* Insert the EEPROM dest addess, bits 0-15 */
+ data_chunk[0] = ((next_eeprom_addr >> 8) & 0xff);
+ data_chunk[1] = (next_eeprom_addr & 0xff);
+
+ if (msgs[i].flags & I2C_M_RD) {
+ ret = arcturus_i2c_eeprom_read_data(i2c_adap,
+ (uint8_t)msgs[i].addr,
+ data_chunk, MAX_SW_I2C_COMMANDS);
+
+ memcpy(data_ptr, data_chunk + 2, data_chunk_size);
+ } else {
+
+ memcpy(data_chunk + 2, data_ptr, data_chunk_size);
+
+ ret = arcturus_i2c_eeprom_write_data(i2c_adap,
+ (uint8_t)msgs[i].addr,
+ data_chunk, MAX_SW_I2C_COMMANDS);
+ }
+
+ if (ret) {
+ num = -EIO;
+ goto fail;
+ }
+
+ next_eeprom_addr += data_chunk_size;
+ data_ptr += data_chunk_size;
+ }
+
+ if (data_size % data_chunk_size) {
+ data_chunk[0] = ((next_eeprom_addr >> 8) & 0xff);
+ data_chunk[1] = (next_eeprom_addr & 0xff);
+
+ if (msgs[i].flags & I2C_M_RD) {
+ ret = arcturus_i2c_eeprom_read_data(i2c_adap,
+ (uint8_t)msgs[i].addr,
+ data_chunk, (data_size % data_chunk_size) + 2);
+
+ memcpy(data_ptr, data_chunk + 2, data_size % data_chunk_size);
+ } else {
+ memcpy(data_chunk + 2, data_ptr, data_size % data_chunk_size);
+
+ ret = arcturus_i2c_eeprom_write_data(i2c_adap,
+ (uint8_t)msgs[i].addr,
+ data_chunk, (data_size % data_chunk_size) + 2);
+ }
+
+ if (ret) {
+ num = -EIO;
+ goto fail;
+ }
+ }
+ }
+
+fail:
+ return num;
+}
+
+static u32 arcturus_i2c_eeprom_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+
+static const struct i2c_algorithm arcturus_i2c_eeprom_i2c_algo = {
+ .master_xfer = arcturus_i2c_eeprom_i2c_xfer,
+ .functionality = arcturus_i2c_eeprom_i2c_func,
+};
+
+static int arcturus_i2c_eeprom_control_init(struct i2c_adapter *control)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ struct smu_context *smu = &adev->smu;
+ int res;
+
+ if (!smu->pm_enabled)
+ return -EOPNOTSUPP;
+
+ control->owner = THIS_MODULE;
+ control->class = I2C_CLASS_SPD;
+ control->dev.parent = &adev->pdev->dev;
+ control->algo = &arcturus_i2c_eeprom_i2c_algo;
+ snprintf(control->name, sizeof(control->name), "RAS EEPROM");
+
+ res = i2c_add_adapter(control);
+ if (res)
+ DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
+
+ return res;
+}
+
+static void arcturus_i2c_eeprom_control_fini(struct i2c_adapter *control)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ struct smu_context *smu = &adev->smu;
+
+ if (!smu->pm_enabled)
+ return;
+
+ i2c_del_adapter(control);
+}
+
+static uint32_t arcturus_get_pptable_power_limit(struct smu_context *smu)
+{
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+
+ return pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0];
+}
+
+static const struct pptable_funcs arcturus_ppt_funcs = {
+ /* translate smu index into arcturus specific index */
+ .get_smu_msg_index = arcturus_get_smu_msg_index,
+ .get_smu_clk_index = arcturus_get_smu_clk_index,
+ .get_smu_feature_index = arcturus_get_smu_feature_index,
+ .get_smu_table_index = arcturus_get_smu_table_index,
+ .get_smu_power_index= arcturus_get_pwr_src_index,
+ .get_workload_type = arcturus_get_workload_type,
+ /* internal structurs allocations */
+ .tables_init = arcturus_tables_init,
+ .alloc_dpm_context = arcturus_allocate_dpm_context,
+ /* pptable related */
+ .check_powerplay_table = arcturus_check_powerplay_table,
+ .store_powerplay_table = arcturus_store_powerplay_table,
+ .append_powerplay_table = arcturus_append_powerplay_table,
+ /* init dpm */
+ .get_allowed_feature_mask = arcturus_get_allowed_feature_mask,
+ /* btc */
+ .run_btc = arcturus_run_btc,
+ /* dpm/clk tables */
+ .set_default_dpm_table = arcturus_set_default_dpm_table,
+ .populate_umd_state_clk = arcturus_populate_umd_state_clk,
+ .get_thermal_temperature_range = arcturus_get_thermal_temperature_range,
+ .get_current_clk_freq_by_table = arcturus_get_current_clk_freq_by_table,
+ .print_clk_levels = arcturus_print_clk_levels,
+ .force_clk_levels = arcturus_force_clk_levels,
+ .read_sensor = arcturus_read_sensor,
+ .get_fan_speed_percent = arcturus_get_fan_speed_percent,
+ .get_fan_speed_rpm = arcturus_get_fan_speed_rpm,
+ .force_dpm_limit_value = arcturus_force_dpm_limit_value,
+ .unforce_dpm_levels = arcturus_unforce_dpm_levels,
+ .get_profiling_clk_mask = arcturus_get_profiling_clk_mask,
+ .get_power_profile_mode = arcturus_get_power_profile_mode,
+ .set_power_profile_mode = arcturus_set_power_profile_mode,
+ .set_performance_level = smu_v11_0_set_performance_level,
+ /* debug (internal used) */
+ .dump_pptable = arcturus_dump_pptable,
+ .get_power_limit = arcturus_get_power_limit,
+ .is_dpm_running = arcturus_is_dpm_running,
+ .dpm_set_uvd_enable = arcturus_dpm_set_uvd_enable,
+ .i2c_eeprom_init = arcturus_i2c_eeprom_control_init,
+ .i2c_eeprom_fini = arcturus_i2c_eeprom_control_fini,
+ .init_microcode = smu_v11_0_init_microcode,
+ .load_microcode = smu_v11_0_load_microcode,
+ .init_smc_tables = smu_v11_0_init_smc_tables,
+ .fini_smc_tables = smu_v11_0_fini_smc_tables,
+ .init_power = smu_v11_0_init_power,
+ .fini_power = smu_v11_0_fini_power,
+ .check_fw_status = smu_v11_0_check_fw_status,
+ .setup_pptable = smu_v11_0_setup_pptable,
+ .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
+ .get_clk_info_from_vbios = smu_v11_0_get_clk_info_from_vbios,
+ .check_pptable = smu_v11_0_check_pptable,
+ .parse_pptable = smu_v11_0_parse_pptable,
+ .populate_smc_tables = smu_v11_0_populate_smc_pptable,
+ .check_fw_version = smu_v11_0_check_fw_version,
+ .write_pptable = smu_v11_0_write_pptable,
+ .set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
+ .set_driver_table_location = smu_v11_0_set_driver_table_location,
+ .set_tool_table_location = smu_v11_0_set_tool_table_location,
+ .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
+ .system_features_control = smu_v11_0_system_features_control,
+ .send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
+ .read_smc_arg = smu_v11_0_read_arg,
+ .init_display_count = smu_v11_0_init_display_count,
+ .set_allowed_mask = smu_v11_0_set_allowed_mask,
+ .get_enabled_mask = smu_v11_0_get_enabled_mask,
+ .notify_display_change = smu_v11_0_notify_display_change,
+ .set_power_limit = smu_v11_0_set_power_limit,
+ .get_current_clk_freq = smu_v11_0_get_current_clk_freq,
+ .init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks,
+ .start_thermal_control = smu_v11_0_start_thermal_control,
+ .stop_thermal_control = smu_v11_0_stop_thermal_control,
+ .set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk,
+ .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
+ .get_fan_control_mode = smu_v11_0_get_fan_control_mode,
+ .set_fan_control_mode = smu_v11_0_set_fan_control_mode,
+ .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
+ .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
+ .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
+ .gfx_off_control = smu_v11_0_gfx_off_control,
+ .register_irq_handler = smu_v11_0_register_irq_handler,
+ .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme,
+ .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc,
+ .baco_is_support= smu_v11_0_baco_is_support,
+ .baco_get_state = smu_v11_0_baco_get_state,
+ .baco_set_state = smu_v11_0_baco_set_state,
+ .baco_enter = smu_v11_0_baco_enter,
+ .baco_exit = smu_v11_0_baco_exit,
+ .get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq,
+ .set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
+ .override_pcie_parameters = smu_v11_0_override_pcie_parameters,
+ .get_pptable_power_limit = arcturus_get_pptable_power_limit,
+};
+
+void arcturus_set_ppt_funcs(struct smu_context *smu)
+{
+ smu->ppt_funcs = &arcturus_ppt_funcs;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.h b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.h
new file mode 100644
index 000000000000..d756b16924b8
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __ARCTURUS_PPT_H__
+#define __ARCTURUS_PPT_H__
+
+#define ARCTURUS_UMD_PSTATE_GFXCLK_LEVEL 0x3
+#define ARCTURUS_UMD_PSTATE_SOCCLK_LEVEL 0x3
+#define ARCTURUS_UMD_PSTATE_MCLK_LEVEL 0x2
+
+#define MAX_DPM_NUMBER 16
+#define MAX_PCIE_CONF 2
+
+struct arcturus_dpm_level {
+ bool enabled;
+ uint32_t value;
+ uint32_t param1;
+};
+
+struct arcturus_dpm_state {
+ uint32_t soft_min_level;
+ uint32_t soft_max_level;
+ uint32_t hard_min_level;
+ uint32_t hard_max_level;
+};
+
+struct arcturus_single_dpm_table {
+ uint32_t count;
+ struct arcturus_dpm_state dpm_state;
+ struct arcturus_dpm_level dpm_levels[MAX_DPM_NUMBER];
+};
+
+struct arcturus_pcie_table {
+ uint16_t count;
+ uint8_t pcie_gen[MAX_PCIE_CONF];
+ uint8_t pcie_lane[MAX_PCIE_CONF];
+ uint32_t lclk[MAX_PCIE_CONF];
+};
+
+struct arcturus_dpm_table {
+ struct arcturus_single_dpm_table soc_table;
+ struct arcturus_single_dpm_table gfx_table;
+ struct arcturus_single_dpm_table mem_table;
+ struct arcturus_single_dpm_table eclk_table;
+ struct arcturus_single_dpm_table vclk_table;
+ struct arcturus_single_dpm_table dclk_table;
+ struct arcturus_single_dpm_table fclk_table;
+ struct arcturus_pcie_table pcie_table;
+};
+
+extern void arcturus_set_ppt_funcs(struct smu_context *smu);
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
index cc63705920dc..2773966ae434 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
@@ -36,7 +36,8 @@ HARDWARE_MGR = hwmgr.o processpptables.o \
pp_overdriver.o smu_helper.o \
vega20_processpptables.o vega20_hwmgr.o vega20_powertune.o \
vega20_thermal.o common_baco.o vega10_baco.o vega20_baco.o \
- vega12_baco.o smu9_baco.o
+ vega12_baco.o smu9_baco.o tonga_baco.o polaris_baco.o fiji_baco.o \
+ ci_baco.o smu7_baco.o
AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR))
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.c
new file mode 100644
index 000000000000..3be40114e63d
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.c
@@ -0,0 +1,195 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "ci_baco.h"
+
+#include "gmc/gmc_7_1_d.h"
+#include "gmc/gmc_7_1_sh_mask.h"
+
+#include "bif/bif_4_1_d.h"
+#include "bif/bif_4_1_sh_mask.h"
+
+#include "dce/dce_8_0_d.h"
+#include "dce/dce_8_0_sh_mask.h"
+
+#include "smu/smu_7_0_1_d.h"
+#include "smu/smu_7_0_1_sh_mask.h"
+
+#include "gca/gfx_7_2_d.h"
+#include "gca/gfx_7_2_sh_mask.h"
+
+static const struct baco_cmd_entry gpio_tbl[] =
+{
+ { CMD_WRITE, mmGPIOPAD_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_PD_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_PU_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_MASK, 0, 0, 0, 0xff77ffff },
+ { CMD_WRITE, mmDC_GPIO_DVODATA_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmDC_GPIO_DVODATA_MASK, 0, 0, 0, 0xffffffff },
+ { CMD_WRITE, mmDC_GPIO_GENERIC_EN, 0, 0, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmDC_GPIO_GENERIC_MASK, 0, 0, 0, 0x03333333 },
+ { CMD_WRITE, mmDC_GPIO_SYNCA_EN, 0, 0, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmDC_GPIO_SYNCA_MASK, 0, 0, 0, 0x00001111 }
+};
+
+static const struct baco_cmd_entry enable_fb_req_rej_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0300024 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 },
+ { CMD_WRITE, mmBIF_FB_EN, 0, 0, 0, 0x0 }
+};
+
+static const struct baco_cmd_entry use_bclk_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_STATUS },
+ { CMD_WAITFOR, mmGCK_SMC_IND_DATA, CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK, 0, 0xffffffff, 0x2 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_STATUS },
+ { CMD_WAITFOR, mmGCK_SMC_IND_DATA, CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK, 0, 0xffffffff, 0x2 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x4000000, 0x1a, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x2 },
+ { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_SW_DIR_CONTROL_MASK, MPLL_CNTL_MODE__MPLL_SW_DIR_CONTROL__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_MCLK_SEL_MASK, MPLL_CNTL_MODE__MPLL_MCLK_SEL__SHIFT, 0, 0x0 }
+};
+
+static const struct baco_cmd_entry turn_off_plls_tbl[] =
+{
+ { CMD_READMODIFYWRITE, mmDISPPLL_BG_CNTL, DISPPLL_BG_CNTL__DISPPLL_BG_PDN_MASK, DISPPLL_BG_CNTL__DISPPLL_BG_PDN__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL_DC },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL_DC__OSC_EN_MASK, CG_CLKPIN_CNTL_DC__OSC_EN__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL_DC__XTALIN_SEL_MASK, CG_CLKPIN_CNTL_DC__XTALIN_SEL__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmPLL_CNTL, PLL_CNTL__PLL_RESET_MASK, PLL_CNTL__PLL_RESET__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmPLL_CNTL, PLL_CNTL__PLL_POWER_DOWN_MASK, PLL_CNTL__PLL_POWER_DOWN__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmPLL_CNTL, PLL_CNTL__PLL_BYPASS_CAL_MASK, PLL_CNTL__PLL_BYPASS_CAL__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK, CG_SPLL_FUNC_CNTL__SPLL_RESET__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK, CG_SPLL_FUNC_CNTL__SPLL_PWRON__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x2000000, 0x19, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x8000000, 0x1b, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__GLOBAL_MPLL_RESET_MASK, MPLL_CNTL_MODE__GLOBAL_MPLL_RESET__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmMPLL_CONTROL, 0, 0, 0, 0x00000006 },
+ { CMD_WRITE, mmMC_IO_RXCNTL_DPHY0_D0, 0, 0, 0, 0x00007740 },
+ { CMD_WRITE, mmMC_IO_RXCNTL_DPHY0_D1, 0, 0, 0, 0x00007740 },
+ { CMD_WRITE, mmMC_IO_RXCNTL_DPHY1_D0, 0, 0, 0, 0x00007740 },
+ { CMD_WRITE, mmMC_IO_RXCNTL_DPHY1_D1, 0, 0, 0, 0x00007740 },
+ { CMD_READMODIFYWRITE, mmMCLK_PWRMGT_CNTL, MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK, MCLK_PWRMGT_CNTL__MRDCK0_PDNB__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmMCLK_PWRMGT_CNTL, MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK, MCLK_PWRMGT_CNTL__MRDCK1_PDNB__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmMC_SEQ_CNTL_2, MC_SEQ_CNTL_2__DRST_PU_MASK, MC_SEQ_CNTL_2__DRST_PU__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmMC_SEQ_CNTL_2, MC_SEQ_CNTL_2__DRST_PD_MASK, MC_SEQ_CNTL_2__DRST_PD__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x4 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMISC_CLK_CTRL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT, 0, 0x2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__ZCLK_SEL_MASK, MISC_CLK_CTRL__ZCLK_SEL__SHIFT, 0, 0x2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL_MASK, MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL__SHIFT, 0, 0x2 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixTHM_CLK_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__CMON_CLK_SEL_MASK, THM_CLK_CNTL__CMON_CLK_SEL__SHIFT, 0, 0x2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__TMON_CLK_SEL_MASK, THM_CLK_CNTL__TMON_CLK_SEL__SHIFT, 0, 0x2 }
+};
+
+static const struct baco_cmd_entry enter_baco_tbl[] =
+{
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, 0, 5, 0x02 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, 0, 5, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, 0, 5, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, 0, 5, 0x08 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x40 }
+};
+
+#define BACO_CNTL__PWRGOOD_MASK BACO_CNTL__PWRGOOD_GPIO_MASK+BACO_CNTL__PWRGOOD_MEM_MASK+BACO_CNTL__PWRGOOD_DVO_MASK
+
+static const struct baco_cmd_entry exit_baco_tbl[] =
+{
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 },
+ { CMD_DELAY_MS, 0, 0, 0, 20, 0 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_BF_MASK, 0, 0xffffffff, 0x20 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_MASK, 0, 5, 0x1c },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK, 0, 5, 0x10 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 }
+};
+
+static const struct baco_cmd_entry clean_baco_tbl[] =
+{
+ { CMD_WRITE, mmBIOS_SCRATCH_6, 0, 0, 0, 0 },
+ { CMD_WRITE, mmCP_PFP_UCODE_ADDR, 0, 0, 0, 0 }
+};
+
+int ci_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
+{
+ enum BACO_STATE cur_state;
+
+ smu7_baco_get_state(hwmgr, &cur_state);
+
+ if (cur_state == state)
+ /* aisc already in the target state */
+ return 0;
+
+ if (state == BACO_STATE_IN) {
+ baco_program_registers(hwmgr, gpio_tbl, ARRAY_SIZE(gpio_tbl));
+ baco_program_registers(hwmgr, enable_fb_req_rej_tbl,
+ ARRAY_SIZE(enable_fb_req_rej_tbl));
+ baco_program_registers(hwmgr, use_bclk_tbl, ARRAY_SIZE(use_bclk_tbl));
+ baco_program_registers(hwmgr, turn_off_plls_tbl,
+ ARRAY_SIZE(turn_off_plls_tbl));
+ if (baco_program_registers(hwmgr, enter_baco_tbl,
+ ARRAY_SIZE(enter_baco_tbl)))
+ return 0;
+
+ } else if (state == BACO_STATE_OUT) {
+ /* HW requires at least 20ms between regulator off and on */
+ msleep(20);
+ /* Execute Hardware BACO exit sequence */
+ if (baco_program_registers(hwmgr, exit_baco_tbl,
+ ARRAY_SIZE(exit_baco_tbl))) {
+ if (baco_program_registers(hwmgr, clean_baco_tbl,
+ ARRAY_SIZE(clean_baco_tbl)))
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.h
new file mode 100644
index 000000000000..17041f187020
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __CI_BACO_H__
+#define __CI_BACO_H__
+#include "smu7_baco.h"
+
+extern int ci_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state);
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.c
index 9c57c1f67749..1c73776bd606 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.c
@@ -79,6 +79,25 @@ static bool baco_cmd_handler(struct pp_hwmgr *hwmgr, u32 command, u32 reg, u32 m
return ret;
}
+bool baco_program_registers(struct pp_hwmgr *hwmgr,
+ const struct baco_cmd_entry *entry,
+ const u32 array_size)
+{
+ u32 i, reg = 0;
+
+ for (i = 0; i < array_size; i++) {
+ if ((entry[i].cmd == CMD_WRITE) ||
+ (entry[i].cmd == CMD_READMODIFYWRITE) ||
+ (entry[i].cmd == CMD_WAITFOR))
+ reg = entry[i].reg_offset;
+ if (!baco_cmd_handler(hwmgr, entry[i].cmd, reg, entry[i].mask,
+ entry[i].shift, entry[i].val, entry[i].timeout))
+ return false;
+ }
+
+ return true;
+}
+
bool soc15_baco_program_registers(struct pp_hwmgr *hwmgr,
const struct soc15_baco_cmd_entry *entry,
const u32 array_size)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.h
index 95296c916f4e..8393eb62706d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.h
@@ -33,6 +33,15 @@ enum baco_cmd_type {
CMD_DELAY_US,
};
+struct baco_cmd_entry {
+ enum baco_cmd_type cmd;
+ uint32_t reg_offset;
+ uint32_t mask;
+ uint32_t shift;
+ uint32_t timeout;
+ uint32_t val;
+};
+
struct soc15_baco_cmd_entry {
enum baco_cmd_type cmd;
uint32_t hwip;
@@ -44,6 +53,10 @@ struct soc15_baco_cmd_entry {
uint32_t timeout;
uint32_t val;
};
+
+extern bool baco_program_registers(struct pp_hwmgr *hwmgr,
+ const struct baco_cmd_entry *entry,
+ const u32 array_size);
extern bool soc15_baco_program_registers(struct pp_hwmgr *hwmgr,
const struct soc15_baco_cmd_entry *entry,
const u32 array_size);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.c
new file mode 100644
index 000000000000..c0368f2dfb21
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.c
@@ -0,0 +1,196 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "fiji_baco.h"
+
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+
+#include "smu/smu_7_1_3_d.h"
+#include "smu/smu_7_1_3_sh_mask.h"
+
+
+static const struct baco_cmd_entry gpio_tbl[] =
+{
+ { CMD_WRITE, mmGPIOPAD_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_PD_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_PU_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_MASK, 0, 0, 0, 0xff77ffff },
+ { CMD_WRITE, mmDC_GPIO_DVODATA_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmDC_GPIO_DVODATA_MASK, 0, 0, 0, 0xffffffff },
+ { CMD_WRITE, mmDC_GPIO_GENERIC_EN, 0, 0, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmDC_GPIO_GENERIC_MASK, 0, 0, 0, 0x03333333 },
+ { CMD_WRITE, mmDC_GPIO_SYNCA_EN, 0, 0, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmDC_GPIO_SYNCA_MASK, 0, 0, 0, 0x00001111 }
+};
+
+static const struct baco_cmd_entry enable_fb_req_rej_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0300024 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 },
+ { CMD_WRITE, mmBIF_FB_EN, 0, 0, 0, 0x0 }
+};
+
+static const struct baco_cmd_entry use_bclk_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_STATUS },
+ { CMD_WAITFOR, mmGCK_SMC_IND_DATA, CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK, 0, 0xffffffff, 0x2 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_STATUS },
+ { CMD_WAITFOR, mmGCK_SMC_IND_DATA, CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK, 0, 0xffffffff, 0x2 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x4000000, 0x1a, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x2 }
+};
+
+static const struct baco_cmd_entry turn_off_plls_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK, CG_SPLL_FUNC_CNTL__SPLL_RESET__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK, CG_SPLL_FUNC_CNTL__SPLL_PWRON__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x2000000, 0x19, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x8000000, 0x1b, 0, 0x0 }
+};
+
+static const struct baco_cmd_entry clk_req_b_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x4 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMISC_CLK_CTRL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__ZCLK_SEL_MASK, MISC_CLK_CTRL__ZCLK_SEL__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL__BCLK_AS_XCLK_MASK, CG_CLKPIN_CNTL__BCLK_AS_XCLK__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixTHM_CLK_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__CMON_CLK_SEL_MASK, THM_CLK_CNTL__CMON_CLK_SEL__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__TMON_CLK_SEL_MASK, THM_CLK_CNTL__TMON_CLK_SEL__SHIFT, 0, 0x1 }
+};
+
+static const struct baco_cmd_entry enter_baco_tbl[] =
+{
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, 0, 5, 0x40000 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, 0, 5, 0x02 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, 0, 5, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, 0, 5, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, 0, 5, 0x08 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x40 }
+};
+
+#define BACO_CNTL__PWRGOOD_MASK BACO_CNTL__PWRGOOD_GPIO_MASK+BACO_CNTL__PWRGOOD_MEM_MASK+BACO_CNTL__PWRGOOD_DVO_MASK
+
+static const struct baco_cmd_entry exit_baco_tbl[] =
+{
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_BF_MASK, 0, 0xffffffff, 0x200 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_MASK, 0, 5, 0x1c00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK, 0, 5, 0x100 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 }
+};
+
+static const struct baco_cmd_entry clean_baco_tbl[] =
+{
+ { CMD_WRITE, mmBIOS_SCRATCH_0, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_1, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_2, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_3, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_4, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_5, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_6, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_7, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_8, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_9, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_10, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_11, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_12, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_13, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_14, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_15, 0, 0, 0, 0 }
+};
+
+int fiji_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
+{
+ enum BACO_STATE cur_state;
+
+ smu7_baco_get_state(hwmgr, &cur_state);
+
+ if (cur_state == state)
+ /* aisc already in the target state */
+ return 0;
+
+ if (state == BACO_STATE_IN) {
+ baco_program_registers(hwmgr, gpio_tbl, ARRAY_SIZE(gpio_tbl));
+ baco_program_registers(hwmgr, enable_fb_req_rej_tbl,
+ ARRAY_SIZE(enable_fb_req_rej_tbl));
+ baco_program_registers(hwmgr, use_bclk_tbl, ARRAY_SIZE(use_bclk_tbl));
+ baco_program_registers(hwmgr, turn_off_plls_tbl,
+ ARRAY_SIZE(turn_off_plls_tbl));
+ baco_program_registers(hwmgr, clk_req_b_tbl, ARRAY_SIZE(clk_req_b_tbl));
+ if (baco_program_registers(hwmgr, enter_baco_tbl,
+ ARRAY_SIZE(enter_baco_tbl)))
+ return 0;
+
+ } else if (state == BACO_STATE_OUT) {
+ /* HW requires at least 20ms between regulator off and on */
+ msleep(20);
+ /* Execute Hardware BACO exit sequence */
+ if (baco_program_registers(hwmgr, exit_baco_tbl,
+ ARRAY_SIZE(exit_baco_tbl))) {
+ if (baco_program_registers(hwmgr, clean_baco_tbl,
+ ARRAY_SIZE(clean_baco_tbl)))
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.h
new file mode 100644
index 000000000000..47f402900bdb
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __FIJI_BACO_H__
+#define __FIJI_BACO_H__
+#include "smu7_baco.h"
+
+extern int fiji_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state);
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index cc57fb953e62..9454ab50f9a1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -81,8 +81,8 @@ int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr)
adev = hwmgr->adev;
/* Skip for suspend/resume case */
- if (smum_is_dpm_running(hwmgr) && !amdgpu_passthrough(adev)
- && adev->in_suspend) {
+ if (!hwmgr->pp_one_vf && smum_is_dpm_running(hwmgr)
+ && !amdgpu_passthrough(adev) && adev->in_suspend) {
pr_info("dpm has been enabled\n");
return 0;
}
@@ -99,6 +99,9 @@ int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr)
PHM_FUNC_CHECK(hwmgr);
+ if (!hwmgr->not_vf)
+ return 0;
+
if (!smum_is_dpm_running(hwmgr)) {
pr_info("dpm has been disabled\n");
return 0;
@@ -200,6 +203,9 @@ int phm_stop_thermal_controller(struct pp_hwmgr *hwmgr)
{
PHM_FUNC_CHECK(hwmgr);
+ if (!hwmgr->not_vf)
+ return 0;
+
if (hwmgr->hwmgr_func->stop_thermal_controller == NULL)
return -EINVAL;
@@ -237,6 +243,9 @@ int phm_start_thermal_controller(struct pp_hwmgr *hwmgr)
TEMP_RANGE_MAX};
struct amdgpu_device *adev = hwmgr->adev;
+ if (!hwmgr->not_vf)
+ return 0;
+
if (hwmgr->hwmgr_func->get_thermal_temperature_range)
hwmgr->hwmgr_func->get_thermal_temperature_range(
hwmgr, &range);
@@ -263,6 +272,8 @@ int phm_start_thermal_controller(struct pp_hwmgr *hwmgr)
bool phm_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
{
PHM_FUNC_CHECK(hwmgr);
+ if (hwmgr->pp_one_vf)
+ return false;
if (hwmgr->hwmgr_func->check_smc_update_required_for_display_configuration == NULL)
return false;
@@ -482,6 +493,9 @@ int phm_disable_smc_firmware_ctf(struct pp_hwmgr *hwmgr)
{
PHM_FUNC_CHECK(hwmgr);
+ if (!hwmgr->not_vf)
+ return 0;
+
if (hwmgr->hwmgr_func->disable_smc_firmware_ctf == NULL)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index a24beaa4fb01..f48fdc7f0382 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -81,6 +81,8 @@ static void hwmgr_init_workload_prority(struct pp_hwmgr *hwmgr)
int hwmgr_early_init(struct pp_hwmgr *hwmgr)
{
+ struct amdgpu_device *adev;
+
if (!hwmgr)
return -EINVAL;
@@ -94,8 +96,11 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
hwmgr_init_workload_prority(hwmgr);
hwmgr->gfxoff_state_changed_by_workload = false;
+ adev = hwmgr->adev;
+
switch (hwmgr->chip_family) {
case AMDGPU_FAMILY_CI:
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
hwmgr->smumgr_funcs = &ci_smu_funcs;
ci_set_asic_special_caps(hwmgr);
hwmgr->feature_mask &= ~(PP_VBI_TIME_SUPPORT_MASK |
@@ -106,12 +111,14 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
smu7_init_function_pointers(hwmgr);
break;
case AMDGPU_FAMILY_CZ:
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
hwmgr->od_enabled = false;
hwmgr->smumgr_funcs = &smu8_smu_funcs;
hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
smu8_init_function_pointers(hwmgr);
break;
case AMDGPU_FAMILY_VI:
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
switch (hwmgr->chip_id) {
case CHIP_TOPAZ:
@@ -153,6 +160,7 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
case AMDGPU_FAMILY_AI:
switch (hwmgr->chip_id) {
case CHIP_VEGA10:
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
hwmgr->smumgr_funcs = &vega10_smu_funcs;
vega10_hwmgr_init(hwmgr);
@@ -162,6 +170,7 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
vega12_hwmgr_init(hwmgr);
break;
case CHIP_VEGA20:
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
hwmgr->smumgr_funcs = &vega20_smu_funcs;
vega20_hwmgr_init(hwmgr);
@@ -212,6 +221,9 @@ int hwmgr_hw_init(struct pp_hwmgr *hwmgr)
{
int ret = 0;
+ hwmgr->pp_one_vf = amdgpu_sriov_is_pp_one_vf((struct amdgpu_device *)hwmgr->adev);
+ hwmgr->pm_en = (amdgpu_dpm && (hwmgr->not_vf || hwmgr->pp_one_vf))
+ ? true : false;
if (!hwmgr->pm_en)
return 0;
@@ -270,7 +282,7 @@ err:
int hwmgr_hw_fini(struct pp_hwmgr *hwmgr)
{
- if (!hwmgr || !hwmgr->pm_en)
+ if (!hwmgr || !hwmgr->pm_en || !hwmgr->not_vf)
return 0;
phm_stop_thermal_controller(hwmgr);
@@ -290,7 +302,7 @@ int hwmgr_suspend(struct pp_hwmgr *hwmgr)
{
int ret = 0;
- if (!hwmgr || !hwmgr->pm_en)
+ if (!hwmgr || !hwmgr->pm_en || !hwmgr->not_vf)
return 0;
phm_disable_smc_firmware_ctf(hwmgr);
@@ -312,7 +324,7 @@ int hwmgr_resume(struct pp_hwmgr *hwmgr)
if (!hwmgr)
return -EINVAL;
- if (!hwmgr->pm_en)
+ if (!hwmgr->not_vf || !hwmgr->pm_en)
return 0;
ret = phm_setup_asic(hwmgr);
@@ -356,6 +368,8 @@ int hwmgr_handle_task(struct pp_hwmgr *hwmgr, enum amd_pp_task task_id,
switch (task_id) {
case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
+ if (!hwmgr->not_vf)
+ return ret;
ret = phm_pre_display_configuration_changed(hwmgr);
if (ret)
return ret;
@@ -372,6 +386,8 @@ int hwmgr_handle_task(struct pp_hwmgr *hwmgr, enum amd_pp_task task_id,
enum PP_StateUILabel requested_ui_label;
struct pp_power_state *requested_ps = NULL;
+ if (!hwmgr->not_vf)
+ return ret;
if (user_state == NULL) {
ret = -EINVAL;
break;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.c
new file mode 100644
index 000000000000..8f8e296f2fe9
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.c
@@ -0,0 +1,222 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "polaris_baco.h"
+
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_sh_mask.h"
+
+#include "smu/smu_7_1_3_d.h"
+#include "smu/smu_7_1_3_sh_mask.h"
+
+static const struct baco_cmd_entry gpio_tbl[] =
+{
+ { CMD_WRITE, mmGPIOPAD_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_PD_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_PU_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_MASK, 0, 0, 0, 0xff77ffff },
+ { CMD_WRITE, mmDC_GPIO_DVODATA_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmDC_GPIO_DVODATA_MASK, 0, 0, 0, 0xffffffff },
+ { CMD_WRITE, mmDC_GPIO_GENERIC_EN, 0, 0, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmDC_GPIO_GENERIC_MASK, 0, 0, 0, 0x03333333 },
+ { CMD_WRITE, mmDC_GPIO_SYNCA_EN, 0, 0, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmDC_GPIO_SYNCA_MASK, 0, 0, 0, 0x00001111 }
+};
+
+static const struct baco_cmd_entry enable_fb_req_rej_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0300024 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 },
+ { CMD_WRITE, mmBIF_FB_EN, 0, 0, 0, 0x0 }
+};
+
+static const struct baco_cmd_entry use_bclk_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x4000000, 0x1a, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixGCK_DFS_BYPASS_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, GCK_DFS_BYPASS_CNTL__BYPASSACLK_MASK, GCK_DFS_BYPASS_CNTL__BYPASSACLK__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x2 },
+ { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_SW_DIR_CONTROL_MASK, MPLL_CNTL_MODE__MPLL_SW_DIR_CONTROL__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_MCLK_SEL_MASK, MPLL_CNTL_MODE__MPLL_MCLK_SEL__SHIFT, 0, 0x0 }
+};
+
+static const struct baco_cmd_entry turn_off_plls_tbl[] =
+{
+ { CMD_READMODIFYWRITE, mmDC_GPIO_PAD_STRENGTH_1, DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP_MASK, DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP__SHIFT, 0, 0x1 },
+ { CMD_DELAY_US, 0, 0, 0, 1, 0x0 },
+ { CMD_READMODIFYWRITE, mmMC_SEQ_DRAM, MC_SEQ_DRAM__RST_CTL_MASK, MC_SEQ_DRAM__RST_CTL__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC05002B0 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x10, 0x4, 0, 0x1 },
+ { CMD_WAITFOR, mmGCK_SMC_IND_DATA, 0x10, 0, 1, 0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC050032C },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x10, 0x4, 0, 0x1 },
+ { CMD_WAITFOR, mmGCK_SMC_IND_DATA, 0x10, 0, 1, 0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500080 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 },
+ { CMD_READMODIFYWRITE, 0xda2, 0x40, 0x6, 0, 0x0 },
+ { CMD_DELAY_US, 0, 0, 0, 3, 0x0 },
+ { CMD_READMODIFYWRITE, 0xda2, 0x8, 0x3, 0, 0x0 },
+ { CMD_READMODIFYWRITE, 0xda2, 0x3fff00, 0x8, 0, 0x32 },
+ { CMD_DELAY_US, 0, 0, 0, 3, 0x0 },
+ { CMD_READMODIFYWRITE, mmMPLL_FUNC_CNTL_2, MPLL_FUNC_CNTL_2__ISO_DIS_P_MASK, MPLL_FUNC_CNTL_2__ISO_DIS_P__SHIFT, 0, 0x0 },
+ { CMD_DELAY_US, 0, 0, 0, 5, 0x0 }
+};
+
+static const struct baco_cmd_entry clk_req_b_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixTHM_CLK_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__CMON_CLK_SEL_MASK, THM_CLK_CNTL__CMON_CLK_SEL__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__TMON_CLK_SEL_MASK, THM_CLK_CNTL__TMON_CLK_SEL__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMISC_CLK_CTRL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__ZCLK_SEL_MASK, MISC_CLK_CTRL__ZCLK_SEL__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL__BCLK_AS_XCLK_MASK, CG_CLKPIN_CNTL__BCLK_AS_XCLK__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x4 }
+};
+
+static const struct baco_cmd_entry enter_baco_tbl[] =
+{
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, 0, 5, 0x40000 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, 0, 5, 0x02 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, 0, 5, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, 0, 5, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, 0, 5, 0x08 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x40 }
+};
+
+#define BACO_CNTL__PWRGOOD_MASK BACO_CNTL__PWRGOOD_GPIO_MASK+BACO_CNTL__PWRGOOD_MEM_MASK+BACO_CNTL__PWRGOOD_DVO_MASK
+
+static const struct baco_cmd_entry exit_baco_tbl[] =
+{
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_BF_MASK, 0, 0xffffffff, 0x200 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_MASK, 0, 5, 0x1c00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK, 0, 5, 0x100 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 }
+};
+
+static const struct baco_cmd_entry clean_baco_tbl[] =
+{
+ { CMD_WRITE, mmBIOS_SCRATCH_6, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_7, 0, 0, 0, 0 }
+};
+
+static const struct baco_cmd_entry use_bclk_tbl_vg[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x4000000, 0x1a, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixGCK_DFS_BYPASS_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, GCK_DFS_BYPASS_CNTL__BYPASSACLK_MASK, GCK_DFS_BYPASS_CNTL__BYPASSACLK__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x2 }
+};
+
+static const struct baco_cmd_entry turn_off_plls_tbl_vg[] =
+{
+ { CMD_READMODIFYWRITE, mmDC_GPIO_PAD_STRENGTH_1, DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP_MASK, DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP__SHIFT, 0, 0x1 },
+ { CMD_DELAY_US, 0, 0, 0, 1, 0x0 },
+ { CMD_READMODIFYWRITE, mmMC_SEQ_DRAM, MC_SEQ_DRAM__RST_CTL_MASK, MC_SEQ_DRAM__RST_CTL__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC05002B0 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x10, 0x4, 0, 0x1 },
+ { CMD_WAITFOR, mmGCK_SMC_IND_DATA, 0x10, 0, 1, 0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC050032C },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x10, 0x4, 0, 0x1 },
+ { CMD_WAITFOR, mmGCK_SMC_IND_DATA, 0x10, 0, 1, 0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500080 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 },
+ { CMD_DELAY_US, 0, 0, 0, 3, 0x0 },
+ { CMD_DELAY_US, 0, 0, 0, 3, 0x0 },
+ { CMD_DELAY_US, 0, 0, 0, 5, 0x0 }
+};
+
+int polaris_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
+{
+ enum BACO_STATE cur_state;
+
+ smu7_baco_get_state(hwmgr, &cur_state);
+
+ if (cur_state == state)
+ /* aisc already in the target state */
+ return 0;
+
+ if (state == BACO_STATE_IN) {
+ baco_program_registers(hwmgr, gpio_tbl, ARRAY_SIZE(gpio_tbl));
+ baco_program_registers(hwmgr, enable_fb_req_rej_tbl,
+ ARRAY_SIZE(enable_fb_req_rej_tbl));
+ if (hwmgr->chip_id == CHIP_VEGAM) {
+ baco_program_registers(hwmgr, use_bclk_tbl_vg, ARRAY_SIZE(use_bclk_tbl_vg));
+ baco_program_registers(hwmgr, turn_off_plls_tbl_vg,
+ ARRAY_SIZE(turn_off_plls_tbl_vg));
+ } else {
+ baco_program_registers(hwmgr, use_bclk_tbl, ARRAY_SIZE(use_bclk_tbl));
+ baco_program_registers(hwmgr, turn_off_plls_tbl,
+ ARRAY_SIZE(turn_off_plls_tbl));
+ }
+ baco_program_registers(hwmgr, clk_req_b_tbl, ARRAY_SIZE(clk_req_b_tbl));
+ if (baco_program_registers(hwmgr, enter_baco_tbl,
+ ARRAY_SIZE(enter_baco_tbl)))
+ return 0;
+
+ } else if (state == BACO_STATE_OUT) {
+ /* HW requires at least 20ms between regulator off and on */
+ msleep(20);
+ /* Execute Hardware BACO exit sequence */
+ if (baco_program_registers(hwmgr, exit_baco_tbl,
+ ARRAY_SIZE(exit_baco_tbl))) {
+ if (baco_program_registers(hwmgr, clean_baco_tbl,
+ ARRAY_SIZE(clean_baco_tbl)))
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.h
new file mode 100644
index 000000000000..87a5fa0a157a
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __POLARIS_BACO_H__
+#define __POLARIS_BACO_H__
+#include "smu7_baco.h"
+
+extern int polaris_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state);
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
index 6bf48934fdc4..31a32a79cfc2 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
@@ -262,20 +262,22 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip_display_set
uint32_t index;
long workload;
- if (!skip_display_settings)
- phm_display_configuration_changed(hwmgr);
-
- if (hwmgr->ps)
- power_state_management(hwmgr, new_ps);
- else
- /*
- * for vega12/vega20 which does not support power state manager
- * DAL clock limits should also be honoured
- */
- phm_apply_clock_adjust_rules(hwmgr);
-
- if (!skip_display_settings)
- phm_notify_smc_display_config_after_ps_adjustment(hwmgr);
+ if (hwmgr->not_vf) {
+ if (!skip_display_settings)
+ phm_display_configuration_changed(hwmgr);
+
+ if (hwmgr->ps)
+ power_state_management(hwmgr, new_ps);
+ else
+ /*
+ * for vega12/vega20 which does not support power state manager
+ * DAL clock limits should also be honoured
+ */
+ phm_apply_clock_adjust_rules(hwmgr);
+
+ if (!skip_display_settings)
+ phm_notify_smc_display_config_after_ps_adjustment(hwmgr);
+ }
if (!phm_force_dpm_levels(hwmgr, hwmgr->request_dpm_level))
hwmgr->dpm_level = hwmgr->request_dpm_level;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index 18e780f566fa..689072a312a7 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -1026,12 +1026,15 @@ static int smu10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
clocks->num_levels = 0;
for (i = 0; i < pclk_vol_table->count; i++) {
- clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk * 10;
- clocks->data[i].latency_in_us = latency_required ?
- smu10_get_mem_latency(hwmgr,
- pclk_vol_table->entries[i].clk) :
- 0;
- clocks->num_levels++;
+ if (pclk_vol_table->entries[i].clk) {
+ clocks->data[clocks->num_levels].clocks_in_khz =
+ pclk_vol_table->entries[i].clk * 10;
+ clocks->data[clocks->num_levels].latency_in_us = latency_required ?
+ smu10_get_mem_latency(hwmgr,
+ pclk_vol_table->entries[i].clk) :
+ 0;
+ clocks->num_levels++;
+ }
}
return 0;
@@ -1077,9 +1080,11 @@ static int smu10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
clocks->num_levels = 0;
for (i = 0; i < pclk_vol_table->count; i++) {
- clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk * 10;
- clocks->data[i].voltage_in_mv = pclk_vol_table->entries[i].vol;
- clocks->num_levels++;
+ if (pclk_vol_table->entries[i].clk) {
+ clocks->data[clocks->num_levels].clocks_in_khz = pclk_vol_table->entries[i].clk * 10;
+ clocks->data[clocks->num_levels].voltage_in_mv = pclk_vol_table->entries[i].vol;
+ clocks->num_levels++;
+ }
}
return 0;
@@ -1151,12 +1156,11 @@ static int smu10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
struct smu10_hwmgr *data = hwmgr->backend;
struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_ranges;
Watermarks_t *table = &(data->water_marks_table);
- int result = 0;
smu_set_watermarks_for_clocks_ranges(table,wm_with_clock_ranges);
smum_smc_table_manager(hwmgr, (uint8_t *)table, (uint16_t)SMU10_WMTABLE, false);
data->water_marks_exist = true;
- return result;
+ return 0;
}
static int smu10_smus_notify_pwe(struct pp_hwmgr *hwmgr)
@@ -1311,6 +1315,12 @@ static int smu10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uin
return 0;
}
+static int smu10_asic_reset(struct pp_hwmgr *hwmgr, enum SMU_ASIC_RESET_MODE mode)
+{
+ return smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_DeviceDriverReset,
+ mode);
+}
static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
.backend_init = smu10_hwmgr_backend_init,
@@ -1355,6 +1365,7 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
.set_hard_min_fclk_by_freq = smu10_set_hard_min_fclk_by_freq,
.get_power_profile_mode = smu10_get_power_profile_mode,
.set_power_profile_mode = smu10_set_power_profile_mode,
+ .asic_reset = smu10_asic_reset,
};
int smu10_init_function_pointers(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.c
new file mode 100644
index 000000000000..044cda005aed
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.c
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "smu7_baco.h"
+#include "tonga_baco.h"
+#include "fiji_baco.h"
+#include "polaris_baco.h"
+#include "ci_baco.h"
+
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+
+#include "smu/smu_7_1_2_d.h"
+#include "smu/smu_7_1_2_sh_mask.h"
+
+int smu7_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
+ uint32_t reg;
+
+ *cap = false;
+ if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_BACO))
+ return 0;
+
+ reg = RREG32(mmCC_BIF_BX_FUSESTRAP0);
+
+ if (reg & CC_BIF_BX_FUSESTRAP0__STRAP_BIF_PX_CAPABLE_MASK)
+ *cap = true;
+
+ return 0;
+}
+
+int smu7_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
+ uint32_t reg;
+
+ reg = RREG32(mmBACO_CNTL);
+
+ if (reg & BACO_CNTL__BACO_MODE_MASK)
+ /* gfx has already entered BACO state */
+ *state = BACO_STATE_IN;
+ else
+ *state = BACO_STATE_OUT;
+ return 0;
+}
+
+int smu7_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
+
+ switch (adev->asic_type) {
+ case CHIP_TOPAZ:
+ case CHIP_TONGA:
+ return tonga_baco_set_state(hwmgr, state);
+ case CHIP_FIJI:
+ return fiji_baco_set_state(hwmgr, state);
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
+ case CHIP_VEGAM:
+ return polaris_baco_set_state(hwmgr, state);
+#ifdef CONFIG_DRM_AMDGPU_CIK
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ return ci_baco_set_state(hwmgr, state);
+#endif
+ default:
+ return -EINVAL;
+ }
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.h
new file mode 100644
index 000000000000..be0d98abb536
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __SMU7_BACO_H__
+#define __SMU7_BACO_H__
+#include "hwmgr.h"
+#include "common_baco.h"
+
+extern int smu7_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap);
+extern int smu7_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state);
+extern int smu7_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state);
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 487aeee1cf8a..bf04cfefb283 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -48,6 +48,7 @@
#include "smu7_clockpowergating.h"
#include "processpptables.h"
#include "pp_thermal.h"
+#include "smu7_baco.h"
#include "ivsrcid/ivsrcid_vislands30.h"
@@ -719,7 +720,7 @@ static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr)
data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
data->dpm_table.vddc_table.dpm_levels[i].param1 = std_voltage_table->entries[i].Leakage;
/* param1 is for corresponding std voltage */
- data->dpm_table.vddc_table.dpm_levels[i].enabled = 1;
+ data->dpm_table.vddc_table.dpm_levels[i].enabled = true;
}
data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count;
@@ -729,7 +730,7 @@ static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr)
/* Initialize Vddci DPM table based on allow Mclk values */
for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
data->dpm_table.vddci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
- data->dpm_table.vddci_table.dpm_levels[i].enabled = 1;
+ data->dpm_table.vddci_table.dpm_levels[i].enabled = true;
}
data->dpm_table.vddci_table.count = allowed_vdd_mclk_table->count;
}
@@ -743,7 +744,7 @@ static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr)
*/
for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
- data->dpm_table.mvdd_table.dpm_levels[i].enabled = 1;
+ data->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
}
data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count;
}
@@ -1994,7 +1995,6 @@ static int smu7_sort_lookup_table(struct pp_hwmgr *hwmgr,
struct phm_ppt_v1_voltage_lookup_table *lookup_table)
{
uint32_t table_size, i, j;
- struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
table_size = lookup_table->count;
PP_ASSERT_WITH_CODE(0 != lookup_table->count,
@@ -2005,9 +2005,8 @@ static int smu7_sort_lookup_table(struct pp_hwmgr *hwmgr,
for (j = i + 1; j > 0; j--) {
if (lookup_table->entries[j].us_vdd <
lookup_table->entries[j - 1].us_vdd) {
- tmp_voltage_lookup_record = lookup_table->entries[j - 1];
- lookup_table->entries[j - 1] = lookup_table->entries[j];
- lookup_table->entries[j] = tmp_voltage_lookup_record;
+ swap(lookup_table->entries[j - 1],
+ lookup_table->entries[j]);
}
}
}
@@ -2956,9 +2955,10 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
if (hwmgr->display_config->num_display == 0)
disable_mclk_switching = false;
else
- disable_mclk_switching = ((1 < hwmgr->display_config->num_display) ||
- disable_mclk_switching_for_frame_lock ||
- smu7_vblank_too_short(hwmgr, hwmgr->display_config->min_vblank_time));
+ disable_mclk_switching = ((1 < hwmgr->display_config->num_display) &&
+ !hwmgr->display_config->multi_monitor_in_sync) ||
+ disable_mclk_switching_for_frame_lock ||
+ smu7_vblank_too_short(hwmgr, hwmgr->display_config->min_vblank_time);
sclk = smu7_ps->performance_levels[0].engine_clock;
mclk = smu7_ps->performance_levels[0].memory_clock;
@@ -3477,18 +3477,31 @@ static int smu7_get_pp_table_entry(struct pp_hwmgr *hwmgr,
static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query)
{
+ struct amdgpu_device *adev = hwmgr->adev;
int i;
u32 tmp = 0;
if (!query)
return -EINVAL;
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0);
- tmp = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
- *query = tmp;
+ /*
+ * PPSMC_MSG_GetCurrPkgPwr is not supported on:
+ * - Hawaii
+ * - Bonaire
+ * - Fiji
+ * - Tonga
+ */
+ if ((adev->asic_type != CHIP_HAWAII) &&
+ (adev->asic_type != CHIP_BONAIRE) &&
+ (adev->asic_type != CHIP_FIJI) &&
+ (adev->asic_type != CHIP_TONGA)) {
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0);
+ tmp = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+ *query = tmp;
- if (tmp != 0)
- return 0;
+ if (tmp != 0)
+ return 0;
+ }
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart);
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
@@ -3969,6 +3982,13 @@ static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
"Failed to populate and upload SCLK MCLK DPM levels!",
result = tmp_result);
+ /*
+ * If a custom pp table is loaded, set DPMTABLE_OD_UPDATE_VDDC flag.
+ * That effectively disables AVFS feature.
+ */
+ if (hwmgr->hardcode_pp_table != NULL)
+ data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC;
+
tmp_result = smu7_update_avfs(hwmgr);
PP_ASSERT_WITH_CODE((0 == tmp_result),
"Failed to update avfs voltages!",
@@ -4068,6 +4088,11 @@ static int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
data->frame_time_x2 = frame_time_in_us * 2 / 100;
+ if (data->frame_time_x2 < 280) {
+ pr_debug("%s: enforce minimal VBITimeout: %d -> 280\n", __func__, data->frame_time_x2);
+ data->frame_time_x2 = 280;
+ }
+
display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2);
@@ -4213,7 +4238,6 @@ static int smu7_check_mc_firmware(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- uint32_t vbios_version;
uint32_t tmp;
/* Read MC indirect register offset 0x9F bits [3:0] to see
@@ -4222,7 +4246,6 @@ static int smu7_check_mc_firmware(struct pp_hwmgr *hwmgr)
*/
smu7_get_mc_microcode_version(hwmgr);
- vbios_version = hwmgr->microcode_version_info.MC & 0xf;
data->need_long_memory_training = false;
@@ -4920,7 +4943,7 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
title[0], title[1], title[2], title[3],
title[4], title[5], title[6], title[7]);
- len = sizeof(smu7_profiling) / sizeof(struct profile_mode_setting);
+ len = ARRAY_SIZE(smu7_profiling);
for (i = 0; i < len; i++) {
if (i == hwmgr->power_profile_mode) {
@@ -5052,13 +5075,11 @@ static int smu7_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw
PHM_PerformanceLevel *level)
{
const struct smu7_power_state *ps;
- struct smu7_hwmgr *data;
uint32_t i;
if (level == NULL || hwmgr == NULL || state == NULL)
return -EINVAL;
- data = hwmgr->backend;
ps = cast_const_phw_smu7_power_state(state);
i = index > ps->performance_level_count - 1 ?
@@ -5139,6 +5160,9 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
.get_power_profile_mode = smu7_get_power_profile_mode,
.set_power_profile_mode = smu7_set_power_profile_mode,
.get_performance_level = smu7_get_performance_level,
+ .get_asic_baco_capability = smu7_baco_get_capability,
+ .get_asic_baco_state = smu7_baco_get_state,
+ .set_asic_baco_state = smu7_baco_set_state,
.power_off_asic = smu7_power_off_asic,
};
@@ -5161,13 +5185,11 @@ uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,
int smu7_init_function_pointers(struct pp_hwmgr *hwmgr)
{
- int ret = 0;
-
hwmgr->hwmgr_func = &smu7_hwmgr_funcs;
if (hwmgr->pp_table_version == PP_TABLE_V0)
hwmgr->pptable_func = &pptable_funcs;
else if (hwmgr->pp_table_version == PP_TABLE_V1)
hwmgr->pptable_func = &pptable_v1_0_funcs;
- return ret;
+ return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.c
new file mode 100644
index 000000000000..ea743bea8e29
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.c
@@ -0,0 +1,231 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "tonga_baco.h"
+
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+
+#include "smu/smu_7_1_2_d.h"
+#include "smu/smu_7_1_2_sh_mask.h"
+
+
+static const struct baco_cmd_entry gpio_tbl[] =
+{
+ { CMD_WRITE, mmGPIOPAD_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_PD_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_PU_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_MASK, 0, 0, 0, 0xff77ffff },
+ { CMD_WRITE, mmDC_GPIO_DVODATA_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmDC_GPIO_DVODATA_MASK, 0, 0, 0, 0xffffffff },
+ { CMD_WRITE, mmDC_GPIO_GENERIC_EN, 0, 0, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmDC_GPIO_GENERIC_MASK, 0, 0, 0, 0x03333333 },
+ { CMD_WRITE, mmDC_GPIO_SYNCA_EN, 0, 0, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmDC_GPIO_SYNCA_MASK, 0, 0, 0, 0x00001111 }
+};
+
+static const struct baco_cmd_entry enable_fb_req_rej_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0300024 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 },
+ { CMD_WRITE, mmBIF_FB_EN, 0, 0, 0, 0x0 }
+};
+
+static const struct baco_cmd_entry use_bclk_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_STATUS },
+ { CMD_WAITFOR, mmGCK_SMC_IND_DATA, CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK, 0, 0xffffffff, 0x2 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_STATUS },
+ { CMD_WAITFOR, mmGCK_SMC_IND_DATA, CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK, 0, 0xffffffff, 0x2 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x4000000, 0x1a, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x2 },
+ { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_SW_DIR_CONTROL_MASK, MPLL_CNTL_MODE__MPLL_SW_DIR_CONTROL__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_MCLK_SEL_MASK, MPLL_CNTL_MODE__MPLL_MCLK_SEL__SHIFT, 0, 0x0 }
+};
+
+static const struct baco_cmd_entry turn_off_plls_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK, CG_SPLL_FUNC_CNTL__SPLL_RESET__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK, CG_SPLL_FUNC_CNTL__SPLL_PWRON__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x2000000, 0x19, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x8000000, 0x1b, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__GLOBAL_MPLL_RESET_MASK, MPLL_CNTL_MODE__GLOBAL_MPLL_RESET__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmMPLL_CONTROL, 0, 0, 0, 0x00000006 },
+ { CMD_WRITE, mmMC_IO_RXCNTL_DPHY0_D0, 0, 0, 0, 0x00007740 },
+ { CMD_WRITE, mmMC_IO_RXCNTL_DPHY0_D1, 0, 0, 0, 0x00007740 },
+ { CMD_WRITE, mmMC_IO_RXCNTL_DPHY1_D0, 0, 0, 0, 0x00007740 },
+ { CMD_WRITE, mmMC_IO_RXCNTL_DPHY1_D1, 0, 0, 0, 0x00007740 },
+ { CMD_READMODIFYWRITE, mmMCLK_PWRMGT_CNTL, MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK, MCLK_PWRMGT_CNTL__MRDCK0_PDNB__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmMCLK_PWRMGT_CNTL, MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK, MCLK_PWRMGT_CNTL__MRDCK1_PDNB__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmMC_SEQ_CNTL_2, MC_SEQ_CNTL_2__DRST_PU_MASK, MC_SEQ_CNTL_2__DRST_PU__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmMC_SEQ_CNTL_2, MC_SEQ_CNTL_2__DRST_PD_MASK, MC_SEQ_CNTL_2__DRST_PD__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x4 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMISC_CLK_CTRL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__ZCLK_SEL_MASK, MISC_CLK_CTRL__ZCLK_SEL__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL__BCLK_AS_XCLK_MASK, CG_CLKPIN_CNTL__BCLK_AS_XCLK__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixTHM_CLK_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__CMON_CLK_SEL_MASK, THM_CLK_CNTL__CMON_CLK_SEL__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__TMON_CLK_SEL_MASK, THM_CLK_CNTL__TMON_CLK_SEL__SHIFT, 0, 0x1 }
+};
+
+static const struct baco_cmd_entry enter_baco_tbl[] =
+{
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, 0, 5, 0x40000 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, 0, 5, 0x02 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, 0, 5, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, 0, 5, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, 0, 5, 0x08 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x40 }
+};
+
+#define BACO_CNTL__PWRGOOD_MASK BACO_CNTL__PWRGOOD_GPIO_MASK+BACO_CNTL__PWRGOOD_MEM_MASK+BACO_CNTL__PWRGOOD_DVO_MASK
+
+static const struct baco_cmd_entry exit_baco_tbl[] =
+{
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_BF_MASK, 0, 0xffffffff, 0x200 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_MASK, 0, 5, 0x1c00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK, 0, 5, 0x100 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 }
+};
+
+static const struct baco_cmd_entry clean_baco_tbl[] =
+{
+ { CMD_WRITE, mmBIOS_SCRATCH_6, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_7, 0, 0, 0, 0 }
+};
+
+static const struct baco_cmd_entry gpio_tbl_iceland[] =
+{
+ { CMD_WRITE, mmGPIOPAD_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_PD_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_PU_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_MASK, 0, 0, 0, 0xff77ffff }
+};
+
+static const struct baco_cmd_entry exit_baco_tbl_iceland[] =
+{
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 },
+ { CMD_DELAY_MS, 0, 0, 0, 20, 0 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_BF_MASK, 0, 0xffffffff, 0x200 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_MASK, 0, 5, 0x1c00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK, 0, 5, 0x100 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 }
+};
+
+static const struct baco_cmd_entry clean_baco_tbl_iceland[] =
+{
+ { CMD_WRITE, mmBIOS_SCRATCH_7, 0, 0, 0, 0 }
+};
+
+int tonga_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
+{
+ enum BACO_STATE cur_state;
+
+ smu7_baco_get_state(hwmgr, &cur_state);
+
+ if (cur_state == state)
+ /* aisc already in the target state */
+ return 0;
+
+ if (state == BACO_STATE_IN) {
+ if (hwmgr->chip_id == CHIP_TOPAZ)
+ baco_program_registers(hwmgr, gpio_tbl_iceland, ARRAY_SIZE(gpio_tbl_iceland));
+ else
+ baco_program_registers(hwmgr, gpio_tbl, ARRAY_SIZE(gpio_tbl));
+ baco_program_registers(hwmgr, enable_fb_req_rej_tbl,
+ ARRAY_SIZE(enable_fb_req_rej_tbl));
+ baco_program_registers(hwmgr, use_bclk_tbl, ARRAY_SIZE(use_bclk_tbl));
+ baco_program_registers(hwmgr, turn_off_plls_tbl,
+ ARRAY_SIZE(turn_off_plls_tbl));
+ if (baco_program_registers(hwmgr, enter_baco_tbl,
+ ARRAY_SIZE(enter_baco_tbl)))
+ return 0;
+
+ } else if (state == BACO_STATE_OUT) {
+ /* HW requires at least 20ms between regulator off and on */
+ msleep(20);
+ /* Execute Hardware BACO exit sequence */
+ if (hwmgr->chip_id == CHIP_TOPAZ) {
+ if (baco_program_registers(hwmgr, exit_baco_tbl_iceland,
+ ARRAY_SIZE(exit_baco_tbl_iceland))) {
+ if (baco_program_registers(hwmgr, clean_baco_tbl_iceland,
+ ARRAY_SIZE(clean_baco_tbl_iceland)))
+ return 0;
+ }
+ } else {
+ if (baco_program_registers(hwmgr, exit_baco_tbl,
+ ARRAY_SIZE(exit_baco_tbl))) {
+ if (baco_program_registers(hwmgr, clean_baco_tbl,
+ ARRAY_SIZE(clean_baco_tbl)))
+ return 0;
+ }
+ }
+ }
+
+ return -EINVAL;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.h
new file mode 100644
index 000000000000..5dc16cc8a295
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __TONGA_BACO_H__
+#define __TONGA_BACO_H__
+#include "smu7_baco.h"
+
+extern int tonga_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state);
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 3be8eb21fd6e..92a65e3daff4 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -712,7 +712,6 @@ static int vega10_sort_lookup_table(struct pp_hwmgr *hwmgr,
struct phm_ppt_v1_voltage_lookup_table *lookup_table)
{
uint32_t table_size, i, j;
- struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
PP_ASSERT_WITH_CODE(lookup_table && lookup_table->count,
"Lookup table is empty", return -EINVAL);
@@ -724,9 +723,8 @@ static int vega10_sort_lookup_table(struct pp_hwmgr *hwmgr,
for (j = i + 1; j > 0; j--) {
if (lookup_table->entries[j].us_vdd <
lookup_table->entries[j - 1].us_vdd) {
- tmp_voltage_lookup_record = lookup_table->entries[j - 1];
- lookup_table->entries[j - 1] = lookup_table->entries[j];
- lookup_table->entries[j] = tmp_voltage_lookup_record;
+ swap(lookup_table->entries[j - 1],
+ lookup_table->entries[j]);
}
}
}
@@ -914,6 +912,9 @@ static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
hwmgr->platform_descriptor.clockStep.memoryClock = 500;
data->total_active_cus = adev->gfx.cu_info.number;
+ if (!hwmgr->not_vf)
+ return result;
+
/* Setup default Overdrive Fan control settings */
data->odn_fan_table.target_fan_speed =
hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM;
@@ -981,6 +982,9 @@ static int vega10_setup_dpm_led_config(struct pp_hwmgr *hwmgr)
static int vega10_setup_asic_task(struct pp_hwmgr *hwmgr)
{
+ if (!hwmgr->not_vf)
+ return 0;
+
PP_ASSERT_WITH_CODE(!vega10_init_sclk_threshold(hwmgr),
"Failed to init sclk threshold!",
return -EINVAL);
@@ -2505,6 +2509,9 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
"Failed to setup default DPM tables!",
return result);
+ if (!hwmgr->not_vf)
+ return 0;
+
/* initialize ODN table */
if (hwmgr->od_enabled) {
if (odn_table->max_vddc) {
@@ -2828,6 +2835,8 @@ static int vega10_stop_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
struct vega10_hwmgr *data = hwmgr->backend;
uint32_t i, feature_mask = 0;
+ if (!hwmgr->not_vf)
+ return 0;
if(data->smu_features[GNLD_LED_DISPLAY].supported == true){
PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
@@ -2934,61 +2943,73 @@ static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
struct vega10_hwmgr *data = hwmgr->backend;
int tmp_result, result = 0;
- vega10_enable_disable_PCC_limit_feature(hwmgr, true);
+ if (hwmgr->not_vf) {
+ vega10_enable_disable_PCC_limit_feature(hwmgr, true);
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_ConfigureTelemetry, data->config_telemetry);
-
- tmp_result = vega10_construct_voltage_tables(hwmgr);
- PP_ASSERT_WITH_CODE(!tmp_result,
- "Failed to construct voltage tables!",
- result = tmp_result);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_ConfigureTelemetry, data->config_telemetry);
- tmp_result = vega10_init_smc_table(hwmgr);
- PP_ASSERT_WITH_CODE(!tmp_result,
- "Failed to initialize SMC table!",
- result = tmp_result);
+ tmp_result = vega10_construct_voltage_tables(hwmgr);
+ PP_ASSERT_WITH_CODE(!tmp_result,
+ "Failed to construct voltage tables!",
+ result = tmp_result);
+ }
- if (PP_CAP(PHM_PlatformCaps_ThermalController)) {
- tmp_result = vega10_enable_thermal_protection(hwmgr);
+ if (hwmgr->not_vf || hwmgr->pp_one_vf) {
+ tmp_result = vega10_init_smc_table(hwmgr);
PP_ASSERT_WITH_CODE(!tmp_result,
- "Failed to enable thermal protection!",
- result = tmp_result);
+ "Failed to initialize SMC table!",
+ result = tmp_result);
}
- tmp_result = vega10_enable_vrhot_feature(hwmgr);
- PP_ASSERT_WITH_CODE(!tmp_result,
- "Failed to enable VR hot feature!",
- result = tmp_result);
+ if (hwmgr->not_vf) {
+ if (PP_CAP(PHM_PlatformCaps_ThermalController)) {
+ tmp_result = vega10_enable_thermal_protection(hwmgr);
+ PP_ASSERT_WITH_CODE(!tmp_result,
+ "Failed to enable thermal protection!",
+ result = tmp_result);
+ }
- tmp_result = vega10_enable_deep_sleep_master_switch(hwmgr);
- PP_ASSERT_WITH_CODE(!tmp_result,
- "Failed to enable deep sleep master switch!",
- result = tmp_result);
+ tmp_result = vega10_enable_vrhot_feature(hwmgr);
+ PP_ASSERT_WITH_CODE(!tmp_result,
+ "Failed to enable VR hot feature!",
+ result = tmp_result);
- tmp_result = vega10_start_dpm(hwmgr, SMC_DPM_FEATURES);
- PP_ASSERT_WITH_CODE(!tmp_result,
- "Failed to start DPM!", result = tmp_result);
+ tmp_result = vega10_enable_deep_sleep_master_switch(hwmgr);
+ PP_ASSERT_WITH_CODE(!tmp_result,
+ "Failed to enable deep sleep master switch!",
+ result = tmp_result);
+ }
- /* enable didt, do not abort if failed didt */
- tmp_result = vega10_enable_didt_config(hwmgr);
- PP_ASSERT(!tmp_result,
- "Failed to enable didt config!");
+ if (hwmgr->not_vf) {
+ tmp_result = vega10_start_dpm(hwmgr, SMC_DPM_FEATURES);
+ PP_ASSERT_WITH_CODE(!tmp_result,
+ "Failed to start DPM!", result = tmp_result);
+ }
+
+ if (hwmgr->not_vf) {
+ /* enable didt, do not abort if failed didt */
+ tmp_result = vega10_enable_didt_config(hwmgr);
+ PP_ASSERT(!tmp_result,
+ "Failed to enable didt config!");
+ }
tmp_result = vega10_enable_power_containment(hwmgr);
PP_ASSERT_WITH_CODE(!tmp_result,
- "Failed to enable power containment!",
- result = tmp_result);
+ "Failed to enable power containment!",
+ result = tmp_result);
- tmp_result = vega10_power_control_set_level(hwmgr);
- PP_ASSERT_WITH_CODE(!tmp_result,
- "Failed to power control set level!",
- result = tmp_result);
+ if (hwmgr->not_vf) {
+ tmp_result = vega10_power_control_set_level(hwmgr);
+ PP_ASSERT_WITH_CODE(!tmp_result,
+ "Failed to power control set level!",
+ result = tmp_result);
- tmp_result = vega10_enable_ulv(hwmgr);
- PP_ASSERT_WITH_CODE(!tmp_result,
- "Failed to enable ULV!",
- result = tmp_result);
+ tmp_result = vega10_enable_ulv(hwmgr);
+ PP_ASSERT_WITH_CODE(!tmp_result,
+ "Failed to enable ULV!",
+ result = tmp_result);
+ }
return result;
}
@@ -3082,11 +3103,22 @@ static int vega10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
performance_level->soc_clock = socclk_dep_table->entries
[state_entry->ucSocClockIndexHigh].ulClk;
if (gfxclk_dep_table->ucRevId == 0) {
- performance_level->gfx_clock = gfxclk_dep_table->entries
- [state_entry->ucGfxClockIndexHigh].ulClk;
+ /* under vega10 pp one vf mode, the gfx clk dpm need be lower
+ * to level-4 due to the limited 110w-power
+ */
+ if (hwmgr->pp_one_vf && (state_entry->ucGfxClockIndexHigh > 0))
+ performance_level->gfx_clock =
+ gfxclk_dep_table->entries[4].ulClk;
+ else
+ performance_level->gfx_clock = gfxclk_dep_table->entries
+ [state_entry->ucGfxClockIndexHigh].ulClk;
} else if (gfxclk_dep_table->ucRevId == 1) {
patom_record_V2 = (ATOM_Vega10_GFXCLK_Dependency_Record_V2 *)gfxclk_dep_table->entries;
- performance_level->gfx_clock = patom_record_V2[state_entry->ucGfxClockIndexHigh].ulClk;
+ if (hwmgr->pp_one_vf && (state_entry->ucGfxClockIndexHigh > 0))
+ performance_level->gfx_clock = patom_record_V2[4].ulClk;
+ else
+ performance_level->gfx_clock =
+ patom_record_V2[state_entry->ucGfxClockIndexHigh].ulClk;
}
performance_level->mem_clock = mclk_dep_table->entries
@@ -3220,7 +3252,8 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
if (hwmgr->display_config->num_display == 0)
disable_mclk_switching = false;
else
- disable_mclk_switching = (hwmgr->display_config->num_display > 1) ||
+ disable_mclk_switching = ((1 < hwmgr->display_config->num_display) &&
+ !hwmgr->display_config->multi_monitor_in_sync) ||
disable_mclk_switching_for_frame_lock ||
disable_mclk_switching_for_vr ||
force_mclk_high;
@@ -3496,6 +3529,7 @@ static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMinGfxclkByIndex,
data->smc_state_table.gfx_boot_level);
+
data->dpm_table.gfx_table.dpm_state.soft_min_level =
data->smc_state_table.gfx_boot_level;
}
@@ -3504,7 +3538,8 @@ static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
if (!data->registry_data.mclk_dpm_key_disabled) {
if (data->smc_state_table.mem_boot_level !=
data->dpm_table.mem_table.dpm_state.soft_min_level) {
- if (data->smc_state_table.mem_boot_level == NUM_UCLK_DPM_LEVELS - 1) {
+ if ((data->smc_state_table.mem_boot_level == NUM_UCLK_DPM_LEVELS - 1)
+ && hwmgr->not_vf) {
socclk_idx = vega10_get_soc_index_for_max_uclk(hwmgr);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMinSocclkByIndex,
@@ -3519,6 +3554,9 @@ static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
}
}
+ if (!hwmgr->not_vf)
+ return 0;
+
if (!data->registry_data.socclk_dpm_key_disabled) {
if (data->smc_state_table.soc_boot_level !=
data->dpm_table.soc_table.dpm_state.soft_min_level) {
@@ -3561,6 +3599,9 @@ static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
}
}
+ if (!hwmgr->not_vf)
+ return 0;
+
if (!data->registry_data.socclk_dpm_key_disabled) {
if (data->smc_state_table.soc_max_level !=
data->dpm_table.soc_table.dpm_state.soft_max_level) {
@@ -3690,6 +3731,13 @@ static int vega10_set_power_state_tasks(struct pp_hwmgr *hwmgr,
PP_ASSERT_WITH_CODE(!result,
"Failed to upload PPtable!", return result);
+ /*
+ * If a custom pp table is loaded, set DPMTABLE_OD_UPDATE_VDDC flag.
+ * That effectively disables AVFS feature.
+ */
+ if(hwmgr->hardcode_pp_table != NULL)
+ data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC;
+
vega10_update_avfs(hwmgr);
/*
@@ -4048,15 +4096,25 @@ static int vega10_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_fo
} else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
*mclk_mask = 0;
} else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
- *sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
+ /* under vega10 pp one vf mode, the gfx clk dpm need be lower
+ * to level-4 due to the limited power
+ */
+ if (hwmgr->pp_one_vf)
+ *sclk_mask = 4;
+ else
+ *sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
*soc_mask = table_info->vdd_dep_on_socclk->count - 1;
*mclk_mask = table_info->vdd_dep_on_mclk->count - 1;
}
+
return 0;
}
static void vega10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
{
+ if (!hwmgr->not_vf)
+ return;
+
switch (mode) {
case AMD_FAN_CTRL_NONE:
vega10_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
@@ -4170,6 +4228,9 @@ static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
break;
}
+ if (!hwmgr->not_vf)
+ return ret;
+
if (!ret) {
if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_NONE);
@@ -4354,14 +4415,13 @@ static int vega10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
struct vega10_hwmgr *data = hwmgr->backend;
struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_range;
Watermarks_t *table = &(data->smc_state_table.water_marks_table);
- int result = 0;
if (!data->registry_data.disable_water_mark) {
smu_set_watermarks_for_clocks_ranges(table, wm_with_clock_ranges);
data->water_marks_bitmap = WaterMarksExist;
}
- return result;
+ return 0;
}
static int vega10_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf)
@@ -4474,7 +4534,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table);
struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep = NULL;
- int i, now, size = 0;
+ int i, now, size = 0, count = 0;
switch (type) {
case PP_SCLK:
@@ -4484,7 +4544,12 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex);
now = smum_get_argument(hwmgr);
- for (i = 0; i < sclk_table->count; i++)
+ if (hwmgr->pp_one_vf &&
+ (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK))
+ count = 5;
+ else
+ count = sclk_table->count;
+ for (i = 0; i < count; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
i, sclk_table->dpm_levels[i].value / 100,
(i == now) ? "*" : "");
@@ -4695,6 +4760,9 @@ static int vega10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
{
int tmp_result, result = 0;
+ if (!hwmgr->not_vf)
+ return 0;
+
if (PP_CAP(PHM_PlatformCaps_ThermalController))
vega10_disable_thermal_protection(hwmgr);
@@ -5097,9 +5165,7 @@ static void vega10_odn_update_soc_table(struct pp_hwmgr *hwmgr,
if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) {
podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk;
- for (i = 0; i < podn_vdd_dep->count - 1; i++)
- od_vddc_lookup_table->entries[i].us_vdd = podn_vdd_dep->entries[i].vddc;
- if (od_vddc_lookup_table->entries[i].us_vdd < podn_vdd_dep->entries[i].vddc)
+ for (i = 0; i < podn_vdd_dep->count; i++)
od_vddc_lookup_table->entries[i].us_vdd = podn_vdd_dep->entries[i].vddc;
} else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) {
podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk;
@@ -5219,18 +5285,40 @@ static int vega10_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
return 0;
}
+static int vega10_set_mp1_state(struct pp_hwmgr *hwmgr,
+ enum pp_mp1_state mp1_state)
+{
+ uint16_t msg;
+ int ret;
+
+ switch (mp1_state) {
+ case PP_MP1_STATE_UNLOAD:
+ msg = PPSMC_MSG_PrepareMp1ForUnload;
+ break;
+ case PP_MP1_STATE_SHUTDOWN:
+ case PP_MP1_STATE_RESET:
+ case PP_MP1_STATE_NONE:
+ default:
+ return 0;
+ }
+
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg)) == 0,
+ "[PrepareMp1] Failed!",
+ return ret);
+
+ return 0;
+}
+
static int vega10_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
PHM_PerformanceLevelDesignation designation, uint32_t index,
PHM_PerformanceLevel *level)
{
const struct vega10_power_state *ps;
- struct vega10_hwmgr *data;
uint32_t i;
if (level == NULL || hwmgr == NULL || state == NULL)
return -EINVAL;
- data = hwmgr->backend;
ps = cast_const_phw_vega10_power_state(state);
i = index > ps->performance_level_count - 1 ?
@@ -5242,6 +5330,59 @@ static int vega10_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_
return 0;
}
+static int vega10_disable_power_features_for_compute_performance(struct pp_hwmgr *hwmgr, bool disable)
+{
+ struct vega10_hwmgr *data = hwmgr->backend;
+ uint32_t feature_mask = 0;
+
+ if (disable) {
+ feature_mask |= data->smu_features[GNLD_ULV].enabled ?
+ data->smu_features[GNLD_ULV].smu_feature_bitmap : 0;
+ feature_mask |= data->smu_features[GNLD_DS_GFXCLK].enabled ?
+ data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap : 0;
+ feature_mask |= data->smu_features[GNLD_DS_SOCCLK].enabled ?
+ data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap : 0;
+ feature_mask |= data->smu_features[GNLD_DS_LCLK].enabled ?
+ data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap : 0;
+ feature_mask |= data->smu_features[GNLD_DS_DCEFCLK].enabled ?
+ data->smu_features[GNLD_DS_DCEFCLK].smu_feature_bitmap : 0;
+ } else {
+ feature_mask |= (!data->smu_features[GNLD_ULV].enabled) ?
+ data->smu_features[GNLD_ULV].smu_feature_bitmap : 0;
+ feature_mask |= (!data->smu_features[GNLD_DS_GFXCLK].enabled) ?
+ data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap : 0;
+ feature_mask |= (!data->smu_features[GNLD_DS_SOCCLK].enabled) ?
+ data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap : 0;
+ feature_mask |= (!data->smu_features[GNLD_DS_LCLK].enabled) ?
+ data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap : 0;
+ feature_mask |= (!data->smu_features[GNLD_DS_DCEFCLK].enabled) ?
+ data->smu_features[GNLD_DS_DCEFCLK].smu_feature_bitmap : 0;
+ }
+
+ if (feature_mask)
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
+ !disable, feature_mask),
+ "enable/disable power features for compute performance Failed!",
+ return -EINVAL);
+
+ if (disable) {
+ data->smu_features[GNLD_ULV].enabled = false;
+ data->smu_features[GNLD_DS_GFXCLK].enabled = false;
+ data->smu_features[GNLD_DS_SOCCLK].enabled = false;
+ data->smu_features[GNLD_DS_LCLK].enabled = false;
+ data->smu_features[GNLD_DS_DCEFCLK].enabled = false;
+ } else {
+ data->smu_features[GNLD_ULV].enabled = true;
+ data->smu_features[GNLD_DS_GFXCLK].enabled = true;
+ data->smu_features[GNLD_DS_SOCCLK].enabled = true;
+ data->smu_features[GNLD_DS_LCLK].enabled = true;
+ data->smu_features[GNLD_DS_DCEFCLK].enabled = true;
+ }
+
+ return 0;
+
+}
+
static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
.backend_init = vega10_hwmgr_backend_init,
.backend_fini = vega10_hwmgr_backend_fini,
@@ -5308,6 +5449,9 @@ static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
.enable_mgpu_fan_boost = vega10_enable_mgpu_fan_boost,
.get_ppfeature_status = vega10_get_ppfeature_status,
.set_ppfeature_status = vega10_set_ppfeature_status,
+ .set_mp1_state = vega10_set_mp1_state,
+ .disable_power_features_for_compute_performance =
+ vega10_disable_power_features_for_compute_performance,
};
int vega10_hwmgr_init(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
index 6f26cb241ecc..0a677d4bc87b 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
@@ -1343,6 +1343,9 @@ int vega10_enable_power_containment(struct pp_hwmgr *hwmgr)
hwmgr->default_power_limit = hwmgr->power_limit =
(uint32_t)(tdp_table->usMaximumPowerDeliveryLimit);
+ if (!hwmgr->not_vf)
+ return 0;
+
if (PP_CAP(PHM_PlatformCaps_PowerContainment)) {
if (data->smu_features[GNLD_PPT].supported)
PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
index efb6d3762feb..aca61d1ff3c2 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -499,8 +499,6 @@ static int vega12_get_number_of_dpm_level(struct pp_hwmgr *hwmgr,
static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
PPCLK_e clkID, uint32_t index, uint32_t *clock)
{
- int result = 0;
-
/*
*SMU expects the Clock ID to be in the top 16 bits.
*Lower 16 bits specify the level
@@ -512,7 +510,7 @@ static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
*clock = smum_get_argument(hwmgr);
- return result;
+ return 0;
}
static int vega12_setup_single_dpm_table(struct pp_hwmgr *hwmgr,
@@ -2639,6 +2637,30 @@ static int vega12_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_
return 0;
}
+static int vega12_set_mp1_state(struct pp_hwmgr *hwmgr,
+ enum pp_mp1_state mp1_state)
+{
+ uint16_t msg;
+ int ret;
+
+ switch (mp1_state) {
+ case PP_MP1_STATE_UNLOAD:
+ msg = PPSMC_MSG_PrepareMp1ForUnload;
+ break;
+ case PP_MP1_STATE_SHUTDOWN:
+ case PP_MP1_STATE_RESET:
+ case PP_MP1_STATE_NONE:
+ default:
+ return 0;
+ }
+
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg)) == 0,
+ "[PrepareMp1] Failed!",
+ return ret);
+
+ return 0;
+}
+
static const struct pp_hwmgr_func vega12_hwmgr_funcs = {
.backend_init = vega12_hwmgr_backend_init,
.backend_fini = vega12_hwmgr_backend_fini,
@@ -2695,7 +2717,7 @@ static const struct pp_hwmgr_func vega12_hwmgr_funcs = {
.set_asic_baco_state = vega12_baco_set_state,
.get_ppfeature_status = vega12_get_ppfeature_status,
.set_ppfeature_status = vega12_set_ppfeature_status,
-
+ .set_mp1_state = vega12_set_mp1_state,
};
int vega12_hwmgr_init(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c
index df6ff9252401..9b5e72bdceca 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c
@@ -29,7 +29,7 @@
#include "vega20_baco.h"
#include "vega20_smumgr.h"
-
+#include "amdgpu_ras.h"
static const struct soc15_baco_cmd_entry clean_baco_tbl[] =
{
@@ -74,6 +74,7 @@ int vega20_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state)
int vega20_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
enum BACO_STATE cur_state;
uint32_t data;
@@ -84,13 +85,19 @@ int vega20_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
return 0;
if (state == BACO_STATE_IN) {
- data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL);
- data |= 0x80000000;
- WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data);
-
-
- if(smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_EnterBaco, 0))
- return -EINVAL;
+ if (!ras || !ras->supported) {
+ data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL);
+ data |= 0x80000000;
+ WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data);
+
+ if(smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_EnterBaco, 0))
+ return -EINVAL;
+ } else {
+ if(smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_EnterBaco, 1))
+ return -EINVAL;
+ }
} else if (state == BACO_STATE_OUT) {
if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ExitBaco))
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index f27c6fbb192e..3b3ec5666051 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -183,6 +183,9 @@ static int vega20_set_features_platform_caps(struct pp_hwmgr *hwmgr)
PHM_PlatformCaps_TablelessHardwareInterface);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_BACO);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_EnableSMU7ThermalManagement);
if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
@@ -490,8 +493,8 @@ static int vega20_setup_asic_task(struct pp_hwmgr *hwmgr)
"Failed to init sclk threshold!",
return ret);
- if (adev->in_baco_reset) {
- adev->in_baco_reset = 0;
+ if (adev->in_gpu_reset &&
+ (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) {
ret = vega20_baco_apply_vdci_flush_workaround(hwmgr);
if (ret)
@@ -869,7 +872,7 @@ static int vega20_override_pcie_parameters(struct pp_hwmgr *hwmgr)
"[OverridePcieParameters] Attempt to override pcie params failed!",
return ret);
- data->pcie_parameters_override = 1;
+ data->pcie_parameters_override = true;
data->pcie_gen_level1 = pcie_gen;
data->pcie_width_level1 = pcie_width;
@@ -2101,7 +2104,11 @@ static int vega20_get_gpu_power(struct pp_hwmgr *hwmgr,
if (ret)
return ret;
- *query = metrics_table.CurrSocketPower << 8;
+ /* For the 40.46 release, they changed the value name */
+ if (hwmgr->smu_version == 0x282e00)
+ *query = metrics_table.AverageSocketPower << 8;
+ else
+ *query = metrics_table.CurrSocketPower << 8;
return ret;
}
@@ -2349,12 +2356,16 @@ static int vega20_force_dpm_highest(struct pp_hwmgr *hwmgr)
data->dpm_table.soc_table.dpm_state.soft_max_level =
data->dpm_table.soc_table.dpm_levels[soft_level].value;
- ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF);
+ ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
+ FEATURE_DPM_UCLK_MASK |
+ FEATURE_DPM_SOCCLK_MASK);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload boot level to highest!",
return ret);
- ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF);
+ ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
+ FEATURE_DPM_UCLK_MASK |
+ FEATURE_DPM_SOCCLK_MASK);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload dpm max level to highest!",
return ret);
@@ -2387,12 +2398,16 @@ static int vega20_force_dpm_lowest(struct pp_hwmgr *hwmgr)
data->dpm_table.soc_table.dpm_state.soft_max_level =
data->dpm_table.soc_table.dpm_levels[soft_level].value;
- ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF);
+ ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
+ FEATURE_DPM_UCLK_MASK |
+ FEATURE_DPM_SOCCLK_MASK);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload boot level to highest!",
return ret);
- ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF);
+ ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
+ FEATURE_DPM_UCLK_MASK |
+ FEATURE_DPM_SOCCLK_MASK);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload dpm max level to highest!",
return ret);
@@ -2403,14 +2418,54 @@ static int vega20_force_dpm_lowest(struct pp_hwmgr *hwmgr)
static int vega20_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
{
+ struct vega20_hwmgr *data =
+ (struct vega20_hwmgr *)(hwmgr->backend);
+ uint32_t soft_min_level, soft_max_level;
int ret = 0;
- ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF);
+ /* gfxclk soft min/max settings */
+ soft_min_level =
+ vega20_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
+ soft_max_level =
+ vega20_find_highest_dpm_level(&(data->dpm_table.gfx_table));
+
+ data->dpm_table.gfx_table.dpm_state.soft_min_level =
+ data->dpm_table.gfx_table.dpm_levels[soft_min_level].value;
+ data->dpm_table.gfx_table.dpm_state.soft_max_level =
+ data->dpm_table.gfx_table.dpm_levels[soft_max_level].value;
+
+ /* uclk soft min/max settings */
+ soft_min_level =
+ vega20_find_lowest_dpm_level(&(data->dpm_table.mem_table));
+ soft_max_level =
+ vega20_find_highest_dpm_level(&(data->dpm_table.mem_table));
+
+ data->dpm_table.mem_table.dpm_state.soft_min_level =
+ data->dpm_table.mem_table.dpm_levels[soft_min_level].value;
+ data->dpm_table.mem_table.dpm_state.soft_max_level =
+ data->dpm_table.mem_table.dpm_levels[soft_max_level].value;
+
+ /* socclk soft min/max settings */
+ soft_min_level =
+ vega20_find_lowest_dpm_level(&(data->dpm_table.soc_table));
+ soft_max_level =
+ vega20_find_highest_dpm_level(&(data->dpm_table.soc_table));
+
+ data->dpm_table.soc_table.dpm_state.soft_min_level =
+ data->dpm_table.soc_table.dpm_levels[soft_min_level].value;
+ data->dpm_table.soc_table.dpm_state.soft_max_level =
+ data->dpm_table.soc_table.dpm_levels[soft_max_level].value;
+
+ ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
+ FEATURE_DPM_UCLK_MASK |
+ FEATURE_DPM_SOCCLK_MASK);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload DPM Bootup Levels!",
return ret);
- ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF);
+ ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
+ FEATURE_DPM_UCLK_MASK |
+ FEATURE_DPM_SOCCLK_MASK);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload DPM Max Levels!",
return ret);
@@ -3063,6 +3118,34 @@ static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
return 0;
}
+static int vega20_set_mp1_state(struct pp_hwmgr *hwmgr,
+ enum pp_mp1_state mp1_state)
+{
+ uint16_t msg;
+ int ret;
+
+ switch (mp1_state) {
+ case PP_MP1_STATE_SHUTDOWN:
+ msg = PPSMC_MSG_PrepareMp1ForShutdown;
+ break;
+ case PP_MP1_STATE_UNLOAD:
+ msg = PPSMC_MSG_PrepareMp1ForUnload;
+ break;
+ case PP_MP1_STATE_RESET:
+ msg = PPSMC_MSG_PrepareMp1ForReset;
+ break;
+ case PP_MP1_STATE_NONE:
+ default:
+ return 0;
+ }
+
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg)) == 0,
+ "[PrepareMp1] Failed!",
+ return ret);
+
+ return 0;
+}
+
static int vega20_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf)
{
static const char *ppfeature_name[] = {
@@ -4057,6 +4140,56 @@ static int vega20_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
return 0;
}
+static int vega20_smu_i2c_bus_access(struct pp_hwmgr *hwmgr, bool acquire)
+{
+ int res;
+
+ /* I2C bus access can happen very early, when SMU not loaded yet */
+ if (!vega20_is_smc_ram_running(hwmgr))
+ return 0;
+
+ res = smum_send_msg_to_smc_with_parameter(hwmgr,
+ (acquire ?
+ PPSMC_MSG_RequestI2CBus :
+ PPSMC_MSG_ReleaseI2CBus),
+ 0);
+
+ PP_ASSERT_WITH_CODE(!res, "[SmuI2CAccessBus] Failed to access bus!", return res);
+ return res;
+}
+
+static int vega20_set_df_cstate(struct pp_hwmgr *hwmgr,
+ enum pp_df_cstate state)
+{
+ int ret;
+
+ /* PPSMC_MSG_DFCstateControl is supported with 40.50 and later fws */
+ if (hwmgr->smu_version < 0x283200) {
+ pr_err("Df cstate control is supported with 40.50 and later SMC fw!\n");
+ return -EINVAL;
+ }
+
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DFCstateControl, state);
+ if (ret)
+ pr_err("SetDfCstate failed!\n");
+
+ return ret;
+}
+
+static int vega20_set_xgmi_pstate(struct pp_hwmgr *hwmgr,
+ uint32_t pstate)
+{
+ int ret;
+
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetXgmiMode,
+ pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3);
+ if (ret)
+ pr_err("SetXgmiPstate failed!\n");
+
+ return ret;
+}
+
static const struct pp_hwmgr_func vega20_hwmgr_funcs = {
/* init/fini related */
.backend_init = vega20_hwmgr_backend_init,
@@ -4123,6 +4256,10 @@ static const struct pp_hwmgr_func vega20_hwmgr_funcs = {
.get_asic_baco_capability = vega20_baco_get_capability,
.get_asic_baco_state = vega20_baco_get_state,
.set_asic_baco_state = vega20_baco_set_state,
+ .set_mp1_state = vega20_set_mp1_state,
+ .smu_i2c_bus_access = vega20_smu_i2c_bus_access,
+ .set_df_cstate = vega20_set_df_cstate,
+ .set_xgmi_pstate = vega20_set_xgmi_pstate,
};
int vega20_hwmgr_init(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index a0f52c86d8c7..97b6714e83e6 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -26,6 +26,7 @@
#include "kgd_pp_interface.h"
#include "dm_pp_interface.h"
#include "dm_pp_smu.h"
+#include "smu_types.h"
#define SMU_THERMAL_MINIMUM_ALERT_TEMP 0
#define SMU_THERMAL_MAXIMUM_ALERT_TEMP 255
@@ -150,124 +151,6 @@ struct smu_power_state {
struct smu_hw_power_state hardware;
};
-enum smu_message_type
-{
- SMU_MSG_TestMessage = 0,
- SMU_MSG_GetSmuVersion,
- SMU_MSG_GetDriverIfVersion,
- SMU_MSG_SetAllowedFeaturesMaskLow,
- SMU_MSG_SetAllowedFeaturesMaskHigh,
- SMU_MSG_EnableAllSmuFeatures,
- SMU_MSG_DisableAllSmuFeatures,
- SMU_MSG_EnableSmuFeaturesLow,
- SMU_MSG_EnableSmuFeaturesHigh,
- SMU_MSG_DisableSmuFeaturesLow,
- SMU_MSG_DisableSmuFeaturesHigh,
- SMU_MSG_GetEnabledSmuFeaturesLow,
- SMU_MSG_GetEnabledSmuFeaturesHigh,
- SMU_MSG_SetWorkloadMask,
- SMU_MSG_SetPptLimit,
- SMU_MSG_SetDriverDramAddrHigh,
- SMU_MSG_SetDriverDramAddrLow,
- SMU_MSG_SetToolsDramAddrHigh,
- SMU_MSG_SetToolsDramAddrLow,
- SMU_MSG_TransferTableSmu2Dram,
- SMU_MSG_TransferTableDram2Smu,
- SMU_MSG_UseDefaultPPTable,
- SMU_MSG_UseBackupPPTable,
- SMU_MSG_RunBtc,
- SMU_MSG_RequestI2CBus,
- SMU_MSG_ReleaseI2CBus,
- SMU_MSG_SetFloorSocVoltage,
- SMU_MSG_SoftReset,
- SMU_MSG_StartBacoMonitor,
- SMU_MSG_CancelBacoMonitor,
- SMU_MSG_EnterBaco,
- SMU_MSG_SetSoftMinByFreq,
- SMU_MSG_SetSoftMaxByFreq,
- SMU_MSG_SetHardMinByFreq,
- SMU_MSG_SetHardMaxByFreq,
- SMU_MSG_GetMinDpmFreq,
- SMU_MSG_GetMaxDpmFreq,
- SMU_MSG_GetDpmFreqByIndex,
- SMU_MSG_GetDpmClockFreq,
- SMU_MSG_GetSsVoltageByDpm,
- SMU_MSG_SetMemoryChannelConfig,
- SMU_MSG_SetGeminiMode,
- SMU_MSG_SetGeminiApertureHigh,
- SMU_MSG_SetGeminiApertureLow,
- SMU_MSG_SetMinLinkDpmByIndex,
- SMU_MSG_OverridePcieParameters,
- SMU_MSG_OverDriveSetPercentage,
- SMU_MSG_SetMinDeepSleepDcefclk,
- SMU_MSG_ReenableAcDcInterrupt,
- SMU_MSG_NotifyPowerSource,
- SMU_MSG_SetUclkFastSwitch,
- SMU_MSG_SetUclkDownHyst,
- SMU_MSG_GfxDeviceDriverReset,
- SMU_MSG_GetCurrentRpm,
- SMU_MSG_SetVideoFps,
- SMU_MSG_SetTjMax,
- SMU_MSG_SetFanTemperatureTarget,
- SMU_MSG_PrepareMp1ForUnload,
- SMU_MSG_DramLogSetDramAddrHigh,
- SMU_MSG_DramLogSetDramAddrLow,
- SMU_MSG_DramLogSetDramSize,
- SMU_MSG_SetFanMaxRpm,
- SMU_MSG_SetFanMinPwm,
- SMU_MSG_ConfigureGfxDidt,
- SMU_MSG_NumOfDisplays,
- SMU_MSG_RemoveMargins,
- SMU_MSG_ReadSerialNumTop32,
- SMU_MSG_ReadSerialNumBottom32,
- SMU_MSG_SetSystemVirtualDramAddrHigh,
- SMU_MSG_SetSystemVirtualDramAddrLow,
- SMU_MSG_WaflTest,
- SMU_MSG_SetFclkGfxClkRatio,
- SMU_MSG_AllowGfxOff,
- SMU_MSG_DisallowGfxOff,
- SMU_MSG_GetPptLimit,
- SMU_MSG_GetDcModeMaxDpmFreq,
- SMU_MSG_GetDebugData,
- SMU_MSG_SetXgmiMode,
- SMU_MSG_RunAfllBtc,
- SMU_MSG_ExitBaco,
- SMU_MSG_PrepareMp1ForReset,
- SMU_MSG_PrepareMp1ForShutdown,
- SMU_MSG_SetMGpuFanBoostLimitRpm,
- SMU_MSG_GetAVFSVoltageByDpm,
- SMU_MSG_PowerUpVcn,
- SMU_MSG_PowerDownVcn,
- SMU_MSG_PowerUpJpeg,
- SMU_MSG_PowerDownJpeg,
- SMU_MSG_BacoAudioD3PME,
- SMU_MSG_ArmD3,
- SMU_MSG_MAX_COUNT,
-};
-
-enum smu_clk_type
-{
- SMU_GFXCLK,
- SMU_VCLK,
- SMU_DCLK,
- SMU_ECLK,
- SMU_SOCCLK,
- SMU_UCLK,
- SMU_DCEFCLK,
- SMU_DISPCLK,
- SMU_PIXCLK,
- SMU_PHYCLK,
- SMU_FCLK,
- SMU_SCLK,
- SMU_MCLK,
- SMU_PCIE,
- SMU_OD_SCLK,
- SMU_OD_MCLK,
- SMU_OD_VDDC_CURVE,
- SMU_OD_RANGE,
- SMU_CLK_COUNT,
-};
-
enum smu_power_src_type
{
SMU_POWER_SOURCE_AC,
@@ -275,63 +158,6 @@ enum smu_power_src_type
SMU_POWER_SOURCE_COUNT,
};
-enum smu_feature_mask
-{
- SMU_FEATURE_DPM_PREFETCHER_BIT,
- SMU_FEATURE_DPM_GFXCLK_BIT,
- SMU_FEATURE_DPM_UCLK_BIT,
- SMU_FEATURE_DPM_SOCCLK_BIT,
- SMU_FEATURE_DPM_UVD_BIT,
- SMU_FEATURE_DPM_VCE_BIT,
- SMU_FEATURE_ULV_BIT,
- SMU_FEATURE_DPM_MP0CLK_BIT,
- SMU_FEATURE_DPM_LINK_BIT,
- SMU_FEATURE_DPM_DCEFCLK_BIT,
- SMU_FEATURE_DS_GFXCLK_BIT,
- SMU_FEATURE_DS_SOCCLK_BIT,
- SMU_FEATURE_DS_LCLK_BIT,
- SMU_FEATURE_PPT_BIT,
- SMU_FEATURE_TDC_BIT,
- SMU_FEATURE_THERMAL_BIT,
- SMU_FEATURE_GFX_PER_CU_CG_BIT,
- SMU_FEATURE_RM_BIT,
- SMU_FEATURE_DS_DCEFCLK_BIT,
- SMU_FEATURE_ACDC_BIT,
- SMU_FEATURE_VR0HOT_BIT,
- SMU_FEATURE_VR1HOT_BIT,
- SMU_FEATURE_FW_CTF_BIT,
- SMU_FEATURE_LED_DISPLAY_BIT,
- SMU_FEATURE_FAN_CONTROL_BIT,
- SMU_FEATURE_GFX_EDC_BIT,
- SMU_FEATURE_GFXOFF_BIT,
- SMU_FEATURE_CG_BIT,
- SMU_FEATURE_DPM_FCLK_BIT,
- SMU_FEATURE_DS_FCLK_BIT,
- SMU_FEATURE_DS_MP1CLK_BIT,
- SMU_FEATURE_DS_MP0CLK_BIT,
- SMU_FEATURE_XGMI_BIT,
- SMU_FEATURE_DPM_GFX_PACE_BIT,
- SMU_FEATURE_MEM_VDDCI_SCALING_BIT,
- SMU_FEATURE_MEM_MVDD_SCALING_BIT,
- SMU_FEATURE_DS_UCLK_BIT,
- SMU_FEATURE_GFX_ULV_BIT,
- SMU_FEATURE_FW_DSTATE_BIT,
- SMU_FEATURE_BACO_BIT,
- SMU_FEATURE_VCN_PG_BIT,
- SMU_FEATURE_JPEG_PG_BIT,
- SMU_FEATURE_USB_PG_BIT,
- SMU_FEATURE_RSMU_SMN_CG_BIT,
- SMU_FEATURE_APCC_PLUS_BIT,
- SMU_FEATURE_GTHR_BIT,
- SMU_FEATURE_GFX_DCS_BIT,
- SMU_FEATURE_GFX_SS_BIT,
- SMU_FEATURE_OUT_OF_BAND_MONITOR_BIT,
- SMU_FEATURE_TEMP_DEPENDENT_VMIN_BIT,
- SMU_FEATURE_MMHUB_PG_BIT,
- SMU_FEATURE_ATHUB_PG_BIT,
- SMU_FEATURE_COUNT,
-};
-
enum smu_memory_pool_size
{
SMU_MEMORY_POOL_SIZE_ZERO = 0,
@@ -396,12 +222,17 @@ struct smu_bios_boot_up_values
uint16_t vdd_gfx;
uint8_t cooling_id;
uint32_t pp_table_id;
+ uint32_t format_revision;
+ uint32_t content_revision;
+ uint32_t fclk;
};
enum smu_table_id
{
SMU_TABLE_PPTABLE = 0,
SMU_TABLE_WATERMARKS,
+ SMU_TABLE_CUSTOM_DPM,
+ SMU_TABLE_DPMCLOCKS,
SMU_TABLE_AVFS,
SMU_TABLE_AVFS_PSM_DEBUG,
SMU_TABLE_AVFS_FUSE_OVERRIDE,
@@ -422,17 +253,27 @@ struct smu_table_context
void *hardcode_pptable;
unsigned long metrics_time;
void *metrics_table;
+ void *clocks_table;
+ void *watermarks_table;
void *max_sustainable_clocks;
struct smu_bios_boot_up_values boot_values;
void *driver_pptable;
struct smu_table *tables;
- uint32_t table_count;
+ /*
+ * The driver table is just a staging buffer for
+ * uploading/downloading content from the SMU.
+ *
+ * And the table_id for SMU_MSG_TransferTableSmu2Dram/
+ * SMU_MSG_TransferTableDram2Smu instructs SMU
+ * which content driver is interested.
+ */
+ struct smu_table driver_table;
struct smu_table memory_pool;
uint8_t thermal_controller_type;
- uint16_t TDPODLimit;
void *overdrive_table;
+ void *boot_overdrive_table;
};
struct smu_dpm_context {
@@ -452,6 +293,7 @@ struct smu_power_gate {
bool uvd_gated;
bool vce_gated;
bool vcn_gated;
+ bool jpeg_gated;
};
struct smu_power_context {
@@ -490,6 +332,13 @@ struct mclock_latency_table {
struct mclk_latency_entries entries[MAX_REGULAR_DPM_NUM];
};
+enum smu_reset_mode
+{
+ SMU_RESET_MODE_0,
+ SMU_RESET_MODE_1,
+ SMU_RESET_MODE_2,
+};
+
enum smu_baco_state
{
SMU_BACO_STATE_ENTER = 0,
@@ -509,9 +358,10 @@ struct smu_context
struct amdgpu_device *adev;
struct amdgpu_irq_src *irq_source;
- const struct smu_funcs *funcs;
const struct pptable_funcs *ppt_funcs;
struct mutex mutex;
+ struct mutex sensor_lock;
+ struct mutex metrics_lock;
uint64_t pool_size;
struct smu_table_context smu_table;
@@ -540,6 +390,8 @@ struct smu_context
#define WATERMARKS_EXIST (1 << 0)
#define WATERMARKS_LOADED (1 << 1)
uint32_t watermarks_bitmap;
+ uint32_t hard_min_uclk_req_from_dal;
+ bool disable_uclk_switch;
uint32_t workload_mask;
uint32_t workload_prority[WORKLOAD_POLICY_MAX];
@@ -547,11 +399,15 @@ struct smu_context
uint32_t power_profile_mode;
uint32_t default_power_profile_mode;
bool pm_enabled;
+ bool is_apu;
uint32_t smc_if_version;
+ bool uploading_custom_pp_table;
};
+struct i2c_adapter;
+
struct pptable_funcs {
int (*alloc_dpm_context)(struct smu_context *smu);
int (*store_powerplay_table)(struct smu_context *smu);
@@ -563,7 +419,7 @@ struct pptable_funcs {
int (*get_smu_table_index)(struct smu_context *smu, uint32_t index);
int (*get_smu_power_index)(struct smu_context *smu, uint32_t index);
int (*get_workload_type)(struct smu_context *smu, enum PP_SMC_POWER_PROFILE profile);
- int (*run_afll_btc)(struct smu_context *smu);
+ int (*run_btc)(struct smu_context *smu);
int (*get_allowed_feature_mask)(struct smu_context *smu, uint32_t *feature_mask, uint32_t num);
enum amd_pm_state_type (*get_current_power_state)(struct smu_context *smu);
int (*set_default_dpm_table)(struct smu_context *smu);
@@ -593,12 +449,13 @@ struct pptable_funcs {
int (*set_power_profile_mode)(struct smu_context *smu, long *input, uint32_t size);
int (*dpm_set_uvd_enable)(struct smu_context *smu, bool enable);
int (*dpm_set_vce_enable)(struct smu_context *smu, bool enable);
+ int (*dpm_set_jpeg_enable)(struct smu_context *smu, bool enable);
int (*read_sensor)(struct smu_context *smu, enum amd_pp_sensors sensor,
void *data, uint32_t *size);
int (*pre_display_config_changed)(struct smu_context *smu);
int (*display_config_changed)(struct smu_context *smu);
int (*apply_clocks_adjust_rules)(struct smu_context *smu);
- int (*notify_smc_dispaly_config)(struct smu_context *smu);
+ int (*notify_smc_display_config)(struct smu_context *smu);
int (*force_dpm_limit_value)(struct smu_context *smu, bool highest);
int (*unforce_dpm_levels)(struct smu_context *smu);
int (*get_profiling_clk_mask)(struct smu_context *smu,
@@ -607,8 +464,6 @@ struct pptable_funcs {
uint32_t *mclk_mask,
uint32_t *soc_mask);
int (*set_cpu_power_state)(struct smu_context *smu);
- int (*set_ppfeature_status)(struct smu_context *smu, uint64_t ppfeatures);
- int (*get_ppfeature_status)(struct smu_context *smu, char *buf);
bool (*is_dpm_running)(struct smu_context *smu);
int (*tables_init)(struct smu_context *smu, struct smu_table *tables);
int (*set_thermal_fan_table)(struct smu_context *smu);
@@ -623,46 +478,53 @@ struct pptable_funcs {
int (*get_uclk_dpm_states)(struct smu_context *smu, uint32_t *clocks_in_khz, uint32_t *num_states);
int (*set_default_od_settings)(struct smu_context *smu, bool initialize);
int (*set_performance_level)(struct smu_context *smu, enum amd_dpm_forced_level level);
-};
-
-struct smu_funcs
-{
+ int (*display_disable_memory_clock_switch)(struct smu_context *smu, bool disable_memory_clock_switch);
+ void (*dump_pptable)(struct smu_context *smu);
+ int (*get_power_limit)(struct smu_context *smu, uint32_t *limit, bool asic_default);
+ int (*get_dpm_clk_limited)(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t dpm_level, uint32_t *freq);
+ int (*set_df_cstate)(struct smu_context *smu, enum pp_df_cstate state);
+ int (*update_pcie_parameters)(struct smu_context *smu, uint32_t pcie_gen_cap, uint32_t pcie_width_cap);
+ int (*i2c_eeprom_init)(struct i2c_adapter *control);
+ void (*i2c_eeprom_fini)(struct i2c_adapter *control);
+ int (*get_dpm_clock_table)(struct smu_context *smu, struct dpm_clocks *clock_table);
int (*init_microcode)(struct smu_context *smu);
+ int (*load_microcode)(struct smu_context *smu);
int (*init_smc_tables)(struct smu_context *smu);
int (*fini_smc_tables)(struct smu_context *smu);
int (*init_power)(struct smu_context *smu);
int (*fini_power)(struct smu_context *smu);
- int (*load_microcode)(struct smu_context *smu);
int (*check_fw_status)(struct smu_context *smu);
int (*setup_pptable)(struct smu_context *smu);
int (*get_vbios_bootup_values)(struct smu_context *smu);
int (*get_clk_info_from_vbios)(struct smu_context *smu);
int (*check_pptable)(struct smu_context *smu);
int (*parse_pptable)(struct smu_context *smu);
- int (*populate_smc_pptable)(struct smu_context *smu);
+ int (*populate_smc_tables)(struct smu_context *smu);
int (*check_fw_version)(struct smu_context *smu);
+ int (*powergate_sdma)(struct smu_context *smu, bool gate);
+ int (*powergate_vcn)(struct smu_context *smu, bool gate);
+ int (*powergate_jpeg)(struct smu_context *smu, bool gate);
+ int (*set_gfx_cgpg)(struct smu_context *smu, bool enable);
int (*write_pptable)(struct smu_context *smu);
int (*set_min_dcef_deep_sleep)(struct smu_context *smu);
+ int (*set_driver_table_location)(struct smu_context *smu);
int (*set_tool_table_location)(struct smu_context *smu);
int (*notify_memory_pool_location)(struct smu_context *smu);
- int (*write_watermarks_table)(struct smu_context *smu);
int (*set_last_dcef_min_deep_sleep_clk)(struct smu_context *smu);
int (*system_features_control)(struct smu_context *smu, bool en);
- int (*send_smc_msg)(struct smu_context *smu, uint16_t msg);
- int (*send_smc_msg_with_param)(struct smu_context *smu, uint16_t msg, uint32_t param);
+ int (*send_smc_msg_with_param)(struct smu_context *smu,
+ enum smu_message_type msg, uint32_t param);
int (*read_smc_arg)(struct smu_context *smu, uint32_t *arg);
int (*init_display_count)(struct smu_context *smu, uint32_t count);
int (*set_allowed_mask)(struct smu_context *smu);
int (*get_enabled_mask)(struct smu_context *smu, uint32_t *feature_mask, uint32_t num);
- int (*update_feature_enable_state)(struct smu_context *smu, uint32_t feature_id, bool enabled);
int (*notify_display_change)(struct smu_context *smu);
- int (*get_power_limit)(struct smu_context *smu, uint32_t *limit, bool def);
int (*set_power_limit)(struct smu_context *smu, uint32_t n);
int (*get_current_clk_freq)(struct smu_context *smu, enum smu_clk_type clk_id, uint32_t *value);
int (*init_max_sustainable_clocks)(struct smu_context *smu);
int (*start_thermal_control)(struct smu_context *smu);
- int (*read_sensor)(struct smu_context *smu, enum amd_pp_sensors sensor,
- void *data, uint32_t *size);
+ int (*stop_thermal_control)(struct smu_context *smu);
int (*set_deep_sleep_dcefclk)(struct smu_context *smu, uint32_t clk);
int (*set_active_display_count)(struct smu_context *smu, uint32_t count);
int (*store_cc6_data)(struct smu_context *smu, uint32_t separation_time,
@@ -684,8 +546,6 @@ struct smu_funcs
int (*get_current_shallow_sleep_clocks)(struct smu_context *smu,
struct smu_clock_info *clocks);
int (*notify_smu_enable_pwe)(struct smu_context *smu);
- int (*set_watermarks_for_clock_ranges)(struct smu_context *smu,
- struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges);
int (*conv_power_profile_to_pplib_workload)(int power_profile);
uint32_t (*get_fan_control_mode)(struct smu_context *smu);
int (*set_fan_control_mode)(struct smu_context *smu, uint32_t mode);
@@ -699,229 +559,94 @@ struct smu_funcs
bool (*baco_is_support)(struct smu_context *smu);
enum smu_baco_state (*baco_get_state)(struct smu_context *smu);
int (*baco_set_state)(struct smu_context *smu, enum smu_baco_state state);
- int (*baco_reset)(struct smu_context *smu);
-
-};
-
-#define smu_init_microcode(smu) \
- ((smu)->funcs->init_microcode ? (smu)->funcs->init_microcode((smu)) : 0)
-#define smu_init_smc_tables(smu) \
- ((smu)->funcs->init_smc_tables ? (smu)->funcs->init_smc_tables((smu)) : 0)
-#define smu_fini_smc_tables(smu) \
- ((smu)->funcs->fini_smc_tables ? (smu)->funcs->fini_smc_tables((smu)) : 0)
-#define smu_init_power(smu) \
- ((smu)->funcs->init_power ? (smu)->funcs->init_power((smu)) : 0)
-#define smu_fini_power(smu) \
- ((smu)->funcs->fini_power ? (smu)->funcs->fini_power((smu)) : 0)
-#define smu_load_microcode(smu) \
- ((smu)->funcs->load_microcode ? (smu)->funcs->load_microcode((smu)) : 0)
-#define smu_check_fw_status(smu) \
- ((smu)->funcs->check_fw_status ? (smu)->funcs->check_fw_status((smu)) : 0)
-#define smu_setup_pptable(smu) \
- ((smu)->funcs->setup_pptable ? (smu)->funcs->setup_pptable((smu)) : 0)
-#define smu_get_vbios_bootup_values(smu) \
- ((smu)->funcs->get_vbios_bootup_values ? (smu)->funcs->get_vbios_bootup_values((smu)) : 0)
-#define smu_get_clk_info_from_vbios(smu) \
- ((smu)->funcs->get_clk_info_from_vbios ? (smu)->funcs->get_clk_info_from_vbios((smu)) : 0)
-#define smu_check_pptable(smu) \
- ((smu)->funcs->check_pptable ? (smu)->funcs->check_pptable((smu)) : 0)
-#define smu_parse_pptable(smu) \
- ((smu)->funcs->parse_pptable ? (smu)->funcs->parse_pptable((smu)) : 0)
-#define smu_populate_smc_pptable(smu) \
- ((smu)->funcs->populate_smc_pptable ? (smu)->funcs->populate_smc_pptable((smu)) : 0)
-#define smu_check_fw_version(smu) \
- ((smu)->funcs->check_fw_version ? (smu)->funcs->check_fw_version((smu)) : 0)
-#define smu_write_pptable(smu) \
- ((smu)->funcs->write_pptable ? (smu)->funcs->write_pptable((smu)) : 0)
-#define smu_set_min_dcef_deep_sleep(smu) \
- ((smu)->funcs->set_min_dcef_deep_sleep ? (smu)->funcs->set_min_dcef_deep_sleep((smu)) : 0)
-#define smu_set_tool_table_location(smu) \
- ((smu)->funcs->set_tool_table_location ? (smu)->funcs->set_tool_table_location((smu)) : 0)
-#define smu_notify_memory_pool_location(smu) \
- ((smu)->funcs->notify_memory_pool_location ? (smu)->funcs->notify_memory_pool_location((smu)) : 0)
-#define smu_gfx_off_control(smu, enable) \
- ((smu)->funcs->gfx_off_control ? (smu)->funcs->gfx_off_control((smu), (enable)) : 0)
-
-#define smu_write_watermarks_table(smu) \
- ((smu)->funcs->write_watermarks_table ? (smu)->funcs->write_watermarks_table((smu)) : 0)
-#define smu_set_last_dcef_min_deep_sleep_clk(smu) \
- ((smu)->funcs->set_last_dcef_min_deep_sleep_clk ? (smu)->funcs->set_last_dcef_min_deep_sleep_clk((smu)) : 0)
-#define smu_system_features_control(smu, en) \
- ((smu)->funcs->system_features_control ? (smu)->funcs->system_features_control((smu), (en)) : 0)
-#define smu_init_max_sustainable_clocks(smu) \
- ((smu)->funcs->init_max_sustainable_clocks ? (smu)->funcs->init_max_sustainable_clocks((smu)) : 0)
-#define smu_set_default_od_settings(smu, initialize) \
- ((smu)->ppt_funcs->set_default_od_settings ? (smu)->ppt_funcs->set_default_od_settings((smu), (initialize)) : 0)
-#define smu_set_fan_speed_rpm(smu, speed) \
- ((smu)->funcs->set_fan_speed_rpm ? (smu)->funcs->set_fan_speed_rpm((smu), (speed)) : 0)
-#define smu_send_smc_msg(smu, msg) \
- ((smu)->funcs->send_smc_msg? (smu)->funcs->send_smc_msg((smu), (msg)) : 0)
-#define smu_send_smc_msg_with_param(smu, msg, param) \
- ((smu)->funcs->send_smc_msg_with_param? (smu)->funcs->send_smc_msg_with_param((smu), (msg), (param)) : 0)
-#define smu_read_smc_arg(smu, arg) \
- ((smu)->funcs->read_smc_arg? (smu)->funcs->read_smc_arg((smu), (arg)) : 0)
-#define smu_alloc_dpm_context(smu) \
- ((smu)->ppt_funcs->alloc_dpm_context ? (smu)->ppt_funcs->alloc_dpm_context((smu)) : 0)
-#define smu_init_display_count(smu, count) \
- ((smu)->funcs->init_display_count ? (smu)->funcs->init_display_count((smu), (count)) : 0)
-#define smu_feature_set_allowed_mask(smu) \
- ((smu)->funcs->set_allowed_mask? (smu)->funcs->set_allowed_mask((smu)) : 0)
-#define smu_feature_get_enabled_mask(smu, mask, num) \
- ((smu)->funcs->get_enabled_mask? (smu)->funcs->get_enabled_mask((smu), (mask), (num)) : 0)
-#define smu_is_dpm_running(smu) \
- ((smu)->ppt_funcs->is_dpm_running ? (smu)->ppt_funcs->is_dpm_running((smu)) : 0)
-#define smu_feature_update_enable_state(smu, feature_id, enabled) \
- ((smu)->funcs->update_feature_enable_state? (smu)->funcs->update_feature_enable_state((smu), (feature_id), (enabled)) : 0)
-#define smu_notify_display_change(smu) \
- ((smu)->funcs->notify_display_change? (smu)->funcs->notify_display_change((smu)) : 0)
-#define smu_store_powerplay_table(smu) \
- ((smu)->ppt_funcs->store_powerplay_table ? (smu)->ppt_funcs->store_powerplay_table((smu)) : 0)
-#define smu_check_powerplay_table(smu) \
- ((smu)->ppt_funcs->check_powerplay_table ? (smu)->ppt_funcs->check_powerplay_table((smu)) : 0)
-#define smu_append_powerplay_table(smu) \
- ((smu)->ppt_funcs->append_powerplay_table ? (smu)->ppt_funcs->append_powerplay_table((smu)) : 0)
-#define smu_set_default_dpm_table(smu) \
- ((smu)->ppt_funcs->set_default_dpm_table ? (smu)->ppt_funcs->set_default_dpm_table((smu)) : 0)
-#define smu_populate_umd_state_clk(smu) \
- ((smu)->ppt_funcs->populate_umd_state_clk ? (smu)->ppt_funcs->populate_umd_state_clk((smu)) : 0)
-#define smu_set_default_od8_settings(smu) \
- ((smu)->ppt_funcs->set_default_od8_settings ? (smu)->ppt_funcs->set_default_od8_settings((smu)) : 0)
-#define smu_get_power_limit(smu, limit, def) \
- ((smu)->funcs->get_power_limit ? (smu)->funcs->get_power_limit((smu), (limit), (def)) : 0)
-#define smu_set_power_limit(smu, limit) \
- ((smu)->funcs->set_power_limit ? (smu)->funcs->set_power_limit((smu), (limit)) : 0)
-#define smu_get_current_clk_freq(smu, clk_id, value) \
- ((smu)->funcs->get_current_clk_freq? (smu)->funcs->get_current_clk_freq((smu), (clk_id), (value)) : 0)
-#define smu_print_clk_levels(smu, clk_type, buf) \
- ((smu)->ppt_funcs->print_clk_levels ? (smu)->ppt_funcs->print_clk_levels((smu), (clk_type), (buf)) : 0)
-#define smu_force_clk_levels(smu, clk_type, level) \
- ((smu)->ppt_funcs->force_clk_levels ? (smu)->ppt_funcs->force_clk_levels((smu), (clk_type), (level)) : 0)
-#define smu_get_od_percentage(smu, type) \
- ((smu)->ppt_funcs->get_od_percentage ? (smu)->ppt_funcs->get_od_percentage((smu), (type)) : 0)
-#define smu_set_od_percentage(smu, type, value) \
- ((smu)->ppt_funcs->set_od_percentage ? (smu)->ppt_funcs->set_od_percentage((smu), (type), (value)) : 0)
-#define smu_od_edit_dpm_table(smu, type, input, size) \
- ((smu)->ppt_funcs->od_edit_dpm_table ? (smu)->ppt_funcs->od_edit_dpm_table((smu), (type), (input), (size)) : 0)
-#define smu_tables_init(smu, tab) \
- ((smu)->ppt_funcs->tables_init ? (smu)->ppt_funcs->tables_init((smu), (tab)) : 0)
-#define smu_set_thermal_fan_table(smu) \
- ((smu)->ppt_funcs->set_thermal_fan_table ? (smu)->ppt_funcs->set_thermal_fan_table((smu)) : 0)
-#define smu_start_thermal_control(smu) \
- ((smu)->funcs->start_thermal_control? (smu)->funcs->start_thermal_control((smu)) : 0)
-#define smu_read_sensor(smu, sensor, data, size) \
- ((smu)->funcs->read_sensor? (smu)->funcs->read_sensor((smu), (sensor), (data), (size)) : 0)
-#define smu_asic_read_sensor(smu, sensor, data, size) \
- ((smu)->ppt_funcs->read_sensor? (smu)->ppt_funcs->read_sensor((smu), (sensor), (data), (size)) : 0)
-#define smu_get_power_profile_mode(smu, buf) \
- ((smu)->ppt_funcs->get_power_profile_mode ? (smu)->ppt_funcs->get_power_profile_mode((smu), buf) : 0)
-#define smu_set_power_profile_mode(smu, param, param_size) \
- ((smu)->ppt_funcs->set_power_profile_mode ? (smu)->ppt_funcs->set_power_profile_mode((smu), (param), (param_size)) : 0)
-#define smu_pre_display_config_changed(smu) \
- ((smu)->ppt_funcs->pre_display_config_changed ? (smu)->ppt_funcs->pre_display_config_changed((smu)) : 0)
-#define smu_display_config_changed(smu) \
- ((smu)->ppt_funcs->display_config_changed ? (smu)->ppt_funcs->display_config_changed((smu)) : 0)
-#define smu_apply_clocks_adjust_rules(smu) \
- ((smu)->ppt_funcs->apply_clocks_adjust_rules ? (smu)->ppt_funcs->apply_clocks_adjust_rules((smu)) : 0)
-#define smu_notify_smc_dispaly_config(smu) \
- ((smu)->ppt_funcs->notify_smc_dispaly_config ? (smu)->ppt_funcs->notify_smc_dispaly_config((smu)) : 0)
-#define smu_force_dpm_limit_value(smu, highest) \
- ((smu)->ppt_funcs->force_dpm_limit_value ? (smu)->ppt_funcs->force_dpm_limit_value((smu), (highest)) : 0)
-#define smu_unforce_dpm_levels(smu) \
- ((smu)->ppt_funcs->unforce_dpm_levels ? (smu)->ppt_funcs->unforce_dpm_levels((smu)) : 0)
-#define smu_get_profiling_clk_mask(smu, level, sclk_mask, mclk_mask, soc_mask) \
- ((smu)->ppt_funcs->get_profiling_clk_mask ? (smu)->ppt_funcs->get_profiling_clk_mask((smu), (level), (sclk_mask), (mclk_mask), (soc_mask)) : 0)
-#define smu_set_cpu_power_state(smu) \
- ((smu)->ppt_funcs->set_cpu_power_state ? (smu)->ppt_funcs->set_cpu_power_state((smu)) : 0)
-#define smu_get_fan_control_mode(smu) \
- ((smu)->funcs->get_fan_control_mode ? (smu)->funcs->get_fan_control_mode((smu)) : 0)
-#define smu_set_fan_control_mode(smu, value) \
- ((smu)->funcs->set_fan_control_mode ? (smu)->funcs->set_fan_control_mode((smu), (value)) : 0)
-#define smu_get_fan_speed_percent(smu, speed) \
- ((smu)->ppt_funcs->get_fan_speed_percent ? (smu)->ppt_funcs->get_fan_speed_percent((smu), (speed)) : 0)
-#define smu_set_fan_speed_percent(smu, speed) \
- ((smu)->funcs->set_fan_speed_percent ? (smu)->funcs->set_fan_speed_percent((smu), (speed)) : 0)
-#define smu_get_fan_speed_rpm(smu, speed) \
- ((smu)->ppt_funcs->get_fan_speed_rpm ? (smu)->ppt_funcs->get_fan_speed_rpm((smu), (speed)) : 0)
-
-#define smu_msg_get_index(smu, msg) \
- ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_msg_index? (smu)->ppt_funcs->get_smu_msg_index((smu), (msg)) : -EINVAL) : -EINVAL)
-#define smu_clk_get_index(smu, msg) \
- ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_clk_index? (smu)->ppt_funcs->get_smu_clk_index((smu), (msg)) : -EINVAL) : -EINVAL)
-#define smu_feature_get_index(smu, msg) \
- ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_feature_index? (smu)->ppt_funcs->get_smu_feature_index((smu), (msg)) : -EINVAL) : -EINVAL)
-#define smu_table_get_index(smu, tab) \
- ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_table_index? (smu)->ppt_funcs->get_smu_table_index((smu), (tab)) : -EINVAL) : -EINVAL)
-#define smu_power_get_index(smu, src) \
- ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_power_index? (smu)->ppt_funcs->get_smu_power_index((smu), (src)) : -EINVAL) : -EINVAL)
-#define smu_workload_get_type(smu, profile) \
- ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_workload_type? (smu)->ppt_funcs->get_workload_type((smu), (profile)) : -EINVAL) : -EINVAL)
-#define smu_run_afll_btc(smu) \
- ((smu)->ppt_funcs? ((smu)->ppt_funcs->run_afll_btc? (smu)->ppt_funcs->run_afll_btc((smu)) : 0) : 0)
-#define smu_get_allowed_feature_mask(smu, feature_mask, num) \
- ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_allowed_feature_mask? (smu)->ppt_funcs->get_allowed_feature_mask((smu), (feature_mask), (num)) : 0) : 0)
-#define smu_set_deep_sleep_dcefclk(smu, clk) \
- ((smu)->funcs->set_deep_sleep_dcefclk ? (smu)->funcs->set_deep_sleep_dcefclk((smu), (clk)) : 0)
-#define smu_set_active_display_count(smu, count) \
- ((smu)->funcs->set_active_display_count ? (smu)->funcs->set_active_display_count((smu), (count)) : 0)
-#define smu_store_cc6_data(smu, st, cc6_dis, pst_dis, pst_sw_dis) \
- ((smu)->funcs->store_cc6_data ? (smu)->funcs->store_cc6_data((smu), (st), (cc6_dis), (pst_dis), (pst_sw_dis)) : 0)
-#define smu_get_clock_by_type(smu, type, clocks) \
- ((smu)->funcs->get_clock_by_type ? (smu)->funcs->get_clock_by_type((smu), (type), (clocks)) : 0)
-#define smu_get_max_high_clocks(smu, clocks) \
- ((smu)->funcs->get_max_high_clocks ? (smu)->funcs->get_max_high_clocks((smu), (clocks)) : 0)
-#define smu_get_clock_by_type_with_latency(smu, clk_type, clocks) \
- ((smu)->ppt_funcs->get_clock_by_type_with_latency ? (smu)->ppt_funcs->get_clock_by_type_with_latency((smu), (clk_type), (clocks)) : 0)
-#define smu_get_clock_by_type_with_voltage(smu, type, clocks) \
- ((smu)->ppt_funcs->get_clock_by_type_with_voltage ? (smu)->ppt_funcs->get_clock_by_type_with_voltage((smu), (type), (clocks)) : 0)
-#define smu_display_clock_voltage_request(smu, clock_req) \
- ((smu)->funcs->display_clock_voltage_request ? (smu)->funcs->display_clock_voltage_request((smu), (clock_req)) : 0)
-#define smu_get_dal_power_level(smu, clocks) \
- ((smu)->funcs->get_dal_power_level ? (smu)->funcs->get_dal_power_level((smu), (clocks)) : 0)
-#define smu_get_perf_level(smu, designation, level) \
- ((smu)->funcs->get_perf_level ? (smu)->funcs->get_perf_level((smu), (designation), (level)) : 0)
-#define smu_get_current_shallow_sleep_clocks(smu, clocks) \
- ((smu)->funcs->get_current_shallow_sleep_clocks ? (smu)->funcs->get_current_shallow_sleep_clocks((smu), (clocks)) : 0)
-#define smu_notify_smu_enable_pwe(smu) \
- ((smu)->funcs->notify_smu_enable_pwe ? (smu)->funcs->notify_smu_enable_pwe((smu)) : 0)
-#define smu_set_watermarks_for_clock_ranges(smu, clock_ranges) \
- ((smu)->funcs->set_watermarks_for_clock_ranges ? (smu)->funcs->set_watermarks_for_clock_ranges((smu), (clock_ranges)) : 0)
-#define smu_dpm_set_uvd_enable(smu, enable) \
- ((smu)->ppt_funcs->dpm_set_uvd_enable ? (smu)->ppt_funcs->dpm_set_uvd_enable((smu), (enable)) : 0)
-#define smu_dpm_set_vce_enable(smu, enable) \
- ((smu)->ppt_funcs->dpm_set_vce_enable ? (smu)->ppt_funcs->dpm_set_vce_enable((smu), (enable)) : 0)
-#define smu_set_xgmi_pstate(smu, pstate) \
- ((smu)->funcs->set_xgmi_pstate ? (smu)->funcs->set_xgmi_pstate((smu), (pstate)) : 0)
-#define smu_set_ppfeature_status(smu, ppfeatures) \
- ((smu)->ppt_funcs->set_ppfeature_status ? (smu)->ppt_funcs->set_ppfeature_status((smu), (ppfeatures)) : -EINVAL)
-#define smu_get_ppfeature_status(smu, buf) \
- ((smu)->ppt_funcs->get_ppfeature_status ? (smu)->ppt_funcs->get_ppfeature_status((smu), (buf)) : -EINVAL)
-#define smu_set_watermarks_table(smu, tab, clock_ranges) \
- ((smu)->ppt_funcs->set_watermarks_table ? (smu)->ppt_funcs->set_watermarks_table((smu), (tab), (clock_ranges)) : 0)
-#define smu_get_current_clk_freq_by_table(smu, clk_type, value) \
- ((smu)->ppt_funcs->get_current_clk_freq_by_table ? (smu)->ppt_funcs->get_current_clk_freq_by_table((smu), (clk_type), (value)) : 0)
-#define smu_thermal_temperature_range_update(smu, range, rw) \
- ((smu)->ppt_funcs->thermal_temperature_range_update? (smu)->ppt_funcs->thermal_temperature_range_update((smu), (range), (rw)) : 0)
-#define smu_get_thermal_temperature_range(smu, range) \
- ((smu)->ppt_funcs->get_thermal_temperature_range? (smu)->ppt_funcs->get_thermal_temperature_range((smu), (range)) : 0)
-#define smu_register_irq_handler(smu) \
- ((smu)->funcs->register_irq_handler ? (smu)->funcs->register_irq_handler(smu) : 0)
-#define smu_set_azalia_d3_pme(smu) \
- ((smu)->funcs->set_azalia_d3_pme ? (smu)->funcs->set_azalia_d3_pme((smu)) : 0)
-#define smu_get_uclk_dpm_states(smu, clocks_in_khz, num_states) \
- ((smu)->ppt_funcs->get_uclk_dpm_states ? (smu)->ppt_funcs->get_uclk_dpm_states((smu), (clocks_in_khz), (num_states)) : 0)
-#define smu_get_max_sustainable_clocks_by_dc(smu, max_clocks) \
- ((smu)->funcs->get_max_sustainable_clocks_by_dc ? (smu)->funcs->get_max_sustainable_clocks_by_dc((smu), (max_clocks)) : 0)
-#define smu_get_uclk_dpm_states(smu, clocks_in_khz, num_states) \
- ((smu)->ppt_funcs->get_uclk_dpm_states ? (smu)->ppt_funcs->get_uclk_dpm_states((smu), (clocks_in_khz), (num_states)) : 0)
-#define smu_baco_is_support(smu) \
- ((smu)->funcs->baco_is_support? (smu)->funcs->baco_is_support((smu)) : false)
-#define smu_baco_get_state(smu, state) \
- ((smu)->funcs->baco_get_state? (smu)->funcs->baco_get_state((smu), (state)) : 0)
-#define smu_baco_reset(smu) \
- ((smu)->funcs->baco_reset? (smu)->funcs->baco_reset((smu)) : 0)
-#define smu_asic_set_performance_level(smu, level) \
- ((smu)->ppt_funcs->set_performance_level? (smu)->ppt_funcs->set_performance_level((smu), (level)) : -EINVAL);
+ int (*baco_enter)(struct smu_context *smu);
+ int (*baco_exit)(struct smu_context *smu);
+ int (*mode2_reset)(struct smu_context *smu);
+ int (*get_dpm_ultimate_freq)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *min, uint32_t *max);
+ int (*set_soft_freq_limited_range)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max);
+ int (*override_pcie_parameters)(struct smu_context *smu);
+ uint32_t (*get_pptable_power_limit)(struct smu_context *smu);
+ int (*disable_umc_cdr_12gbps_workaround)(struct smu_context *smu);
+};
+
+int smu_load_microcode(struct smu_context *smu);
+
+int smu_check_fw_status(struct smu_context *smu);
+int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);
+
+#define smu_i2c_eeprom_init(smu, control) \
+ ((smu)->ppt_funcs->i2c_eeprom_init ? (smu)->ppt_funcs->i2c_eeprom_init((control)) : -EINVAL)
+#define smu_i2c_eeprom_fini(smu, control) \
+ ((smu)->ppt_funcs->i2c_eeprom_fini ? (smu)->ppt_funcs->i2c_eeprom_fini((control)) : -EINVAL)
+
+int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed);
+
+int smu_get_power_limit(struct smu_context *smu,
+ uint32_t *limit,
+ bool def,
+ bool lock_needed);
+
+int smu_set_power_limit(struct smu_context *smu, uint32_t limit);
+int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf);
+int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type);
+int smu_set_od_percentage(struct smu_context *smu, enum smu_clk_type type, uint32_t value);
+
+int smu_od_edit_dpm_table(struct smu_context *smu,
+ enum PP_OD_DPM_TABLE_COMMAND type,
+ long *input, uint32_t size);
+
+int smu_read_sensor(struct smu_context *smu,
+ enum amd_pp_sensors sensor,
+ void *data, uint32_t *size);
+int smu_get_power_profile_mode(struct smu_context *smu, char *buf);
+
+int smu_set_power_profile_mode(struct smu_context *smu,
+ long *param,
+ uint32_t param_size,
+ bool lock_needed);
+int smu_get_fan_control_mode(struct smu_context *smu);
+int smu_set_fan_control_mode(struct smu_context *smu, int value);
+int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed);
+int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed);
+int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed);
+
+int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk);
+int smu_set_active_display_count(struct smu_context *smu, uint32_t count);
+
+int smu_get_clock_by_type(struct smu_context *smu,
+ enum amd_pp_clock_type type,
+ struct amd_pp_clocks *clocks);
+
+int smu_get_max_high_clocks(struct smu_context *smu,
+ struct amd_pp_simple_clock_info *clocks);
+
+int smu_get_clock_by_type_with_latency(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ struct pp_clock_levels_with_latency *clocks);
+
+int smu_get_clock_by_type_with_voltage(struct smu_context *smu,
+ enum amd_pp_clock_type type,
+ struct pp_clock_levels_with_voltage *clocks);
+
+int smu_display_clock_voltage_request(struct smu_context *smu,
+ struct pp_display_clock_request *clock_req);
+int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch);
+int smu_notify_smu_enable_pwe(struct smu_context *smu);
+
+int smu_set_xgmi_pstate(struct smu_context *smu,
+ uint32_t pstate);
+
+int smu_set_azalia_d3_pme(struct smu_context *smu);
+
+bool smu_baco_is_support(struct smu_context *smu);
+
+int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state);
+
+int smu_baco_enter(struct smu_context *smu);
+int smu_baco_exit(struct smu_context *smu);
+
+int smu_mode2_reset(struct smu_context *smu);
extern int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
uint16_t *size, uint8_t *frev, uint8_t *crev,
@@ -930,6 +655,8 @@ extern int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
extern const struct amd_ip_funcs smu_ip_funcs;
extern const struct amdgpu_ip_block_version smu_v11_0_ip_block;
+extern const struct amdgpu_ip_block_version smu_v12_0_ip_block;
+
extern int smu_feature_init_dpm(struct smu_context *smu);
extern int smu_feature_is_enabled(struct smu_context *smu,
@@ -945,6 +672,7 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int
void *table_data, bool drv2smu);
bool is_support_sw_smu(struct amdgpu_device *adev);
+bool is_support_sw_smu_xgmi(struct amdgpu_device *adev);
int smu_reset(struct smu_context *smu);
int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
void *data, uint32_t *size);
@@ -952,6 +680,10 @@ int smu_sys_get_pp_table(struct smu_context *smu, void **table);
int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size);
int smu_get_power_num_states(struct smu_context *smu, struct pp_states_info *state_info);
enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu);
+int smu_write_watermarks_table(struct smu_context *smu);
+int smu_set_watermarks_for_clock_ranges(
+ struct smu_context *smu,
+ struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges);
/* smu to display interface */
extern int smu_display_configuration_change(struct smu_context *smu, const
@@ -962,21 +694,51 @@ extern int smu_get_current_clocks(struct smu_context *smu,
extern int smu_dpm_set_power_gate(struct smu_context *smu,uint32_t block_type, bool gate);
extern int smu_handle_task(struct smu_context *smu,
enum amd_dpm_forced_level level,
- enum amd_pp_task task_id);
+ enum amd_pp_task task_id,
+ bool lock_needed);
+int smu_switch_power_profile(struct smu_context *smu,
+ enum PP_SMC_POWER_PROFILE type,
+ bool en);
int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version);
int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_type,
uint16_t level, uint32_t *value);
int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t *value);
int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
- uint32_t *min, uint32_t *max);
+ uint32_t *min, uint32_t *max, bool lock_needed);
int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t min, uint32_t max);
int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t min, uint32_t max);
+int smu_get_dpm_level_range(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t *min_value, uint32_t *max_value);
enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu);
int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level);
int smu_set_display_count(struct smu_context *smu, uint32_t count);
bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type);
+const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type);
+const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature);
+size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf);
+int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask);
+int smu_force_clk_levels(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t mask,
+ bool lock_needed);
+int smu_set_mp1_state(struct smu_context *smu,
+ enum pp_mp1_state mp1_state);
+int smu_set_df_cstate(struct smu_context *smu,
+ enum pp_df_cstate state);
+
+int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
+ struct pp_smu_nv_clock_table *max_clocks);
+
+int smu_get_uclk_dpm_states(struct smu_context *smu,
+ unsigned int *clock_values_in_khz,
+ unsigned int *num_states);
+
+int smu_get_dpm_clock_table(struct smu_context *smu,
+ struct dpm_clocks *clock_table);
+
+uint32_t smu_get_pptable_power_limit(struct smu_context *smu);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h
new file mode 100644
index 000000000000..e3291259b249
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef ARCTURUS_PP_SMC_H
+#define ARCTURUS_PP_SMC_H
+
+#pragma pack(push, 1)
+
+// SMU Response Codes:
+#define PPSMC_Result_OK 0x1
+#define PPSMC_Result_Failed 0xFF
+#define PPSMC_Result_UnknownCmd 0xFE
+#define PPSMC_Result_CmdRejectedPrereq 0xFD
+#define PPSMC_Result_CmdRejectedBusy 0xFC
+
+// Message Definitions:
+// BASIC
+#define PPSMC_MSG_TestMessage 0x1
+#define PPSMC_MSG_GetSmuVersion 0x2
+#define PPSMC_MSG_GetDriverIfVersion 0x3
+#define PPSMC_MSG_SetAllowedFeaturesMaskLow 0x4
+#define PPSMC_MSG_SetAllowedFeaturesMaskHigh 0x5
+#define PPSMC_MSG_EnableAllSmuFeatures 0x6
+#define PPSMC_MSG_DisableAllSmuFeatures 0x7
+#define PPSMC_MSG_EnableSmuFeaturesLow 0x8
+#define PPSMC_MSG_EnableSmuFeaturesHigh 0x9
+#define PPSMC_MSG_DisableSmuFeaturesLow 0xA
+#define PPSMC_MSG_DisableSmuFeaturesHigh 0xB
+#define PPSMC_MSG_GetEnabledSmuFeaturesLow 0xC
+#define PPSMC_MSG_GetEnabledSmuFeaturesHigh 0xD
+#define PPSMC_MSG_SetDriverDramAddrHigh 0xE
+#define PPSMC_MSG_SetDriverDramAddrLow 0xF
+#define PPSMC_MSG_SetToolsDramAddrHigh 0x10
+#define PPSMC_MSG_SetToolsDramAddrLow 0x11
+#define PPSMC_MSG_TransferTableSmu2Dram 0x12
+#define PPSMC_MSG_TransferTableDram2Smu 0x13
+#define PPSMC_MSG_UseDefaultPPTable 0x14
+#define PPSMC_MSG_UseBackupPPTable 0x15
+#define PPSMC_MSG_SetSystemVirtualDramAddrHigh 0x16
+#define PPSMC_MSG_SetSystemVirtualDramAddrLow 0x17
+
+//BACO/BAMACO/BOMACO
+#define PPSMC_MSG_EnterBaco 0x18
+#define PPSMC_MSG_ExitBaco 0x19
+#define PPSMC_MSG_ArmD3 0x1A
+
+//DPM
+#define PPSMC_MSG_SetSoftMinByFreq 0x1B
+#define PPSMC_MSG_SetSoftMaxByFreq 0x1C
+#define PPSMC_MSG_SetHardMinByFreq 0x1D
+#define PPSMC_MSG_SetHardMaxByFreq 0x1E
+#define PPSMC_MSG_GetMinDpmFreq 0x1F
+#define PPSMC_MSG_GetMaxDpmFreq 0x20
+#define PPSMC_MSG_GetDpmFreqByIndex 0x21
+
+#define PPSMC_MSG_SetWorkloadMask 0x22
+#define PPSMC_MSG_SetDfSwitchType 0x23
+#define PPSMC_MSG_GetVoltageByDpm 0x24
+#define PPSMC_MSG_GetVoltageByDpmOverdrive 0x25
+
+#define PPSMC_MSG_SetPptLimit 0x26
+#define PPSMC_MSG_GetPptLimit 0x27
+
+//Power Gating
+#define PPSMC_MSG_PowerUpVcn0 0x28
+#define PPSMC_MSG_PowerDownVcn0 0x29
+#define PPSMC_MSG_PowerUpVcn1 0x2A
+#define PPSMC_MSG_PowerDownVcn1 0x2B
+
+//Resets and reload
+#define PPSMC_MSG_PrepareMp1ForUnload 0x2C
+#define PPSMC_MSG_PrepareMp1ForReset 0x2D
+#define PPSMC_MSG_PrepareMp1ForShutdown 0x2E
+#define PPSMC_MSG_SoftReset 0x2F
+
+//BTC
+#define PPSMC_MSG_RunAfllBtc 0x30
+#define PPSMC_MSG_RunDcBtc 0x31
+
+//Debug
+#define PPSMC_MSG_DramLogSetDramAddrHigh 0x33
+#define PPSMC_MSG_DramLogSetDramAddrLow 0x34
+#define PPSMC_MSG_DramLogSetDramSize 0x35
+#define PPSMC_MSG_GetDebugData 0x36
+
+//WAFL and XGMI
+#define PPSMC_MSG_WaflTest 0x37
+#define PPSMC_MSG_SetXgmiMode 0x38
+
+//Others
+#define PPSMC_MSG_SetMemoryChannelEnable 0x39
+
+#define PPSMC_Message_Count 0x3A
+
+typedef uint32_t PPSMC_Result;
+typedef uint32_t PPSMC_Msg;
+#pragma pack(pop)
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index c5989cb38b1b..2ffb666b97e6 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -189,6 +189,14 @@ struct phm_vce_clock_voltage_dependency_table {
struct phm_vce_clock_voltage_dependency_record entries[1];
};
+
+enum SMU_ASIC_RESET_MODE
+{
+ SMU_ASIC_RESET_MODE_0,
+ SMU_ASIC_RESET_MODE_1,
+ SMU_ASIC_RESET_MODE_2,
+};
+
struct pp_smumgr_func {
char *name;
int (*smu_init)(struct pp_hwmgr *hwmgr);
@@ -344,6 +352,13 @@ struct pp_hwmgr_func {
int (*set_asic_baco_state)(struct pp_hwmgr *hwmgr, enum BACO_STATE state);
int (*get_ppfeature_status)(struct pp_hwmgr *hwmgr, char *buf);
int (*set_ppfeature_status)(struct pp_hwmgr *hwmgr, uint64_t ppfeature_masks);
+ int (*set_mp1_state)(struct pp_hwmgr *hwmgr, enum pp_mp1_state mp1_state);
+ int (*asic_reset)(struct pp_hwmgr *hwmgr, enum SMU_ASIC_RESET_MODE mode);
+ int (*smu_i2c_bus_access)(struct pp_hwmgr *hwmgr, bool aquire);
+ int (*set_df_cstate)(struct pp_hwmgr *hwmgr, enum pp_df_cstate state);
+ int (*set_xgmi_pstate)(struct pp_hwmgr *hwmgr, uint32_t pstate);
+ int (*disable_power_features_for_compute_performance)(struct pp_hwmgr *hwmgr,
+ bool disable);
};
struct pp_table_func {
@@ -726,6 +741,7 @@ struct pp_hwmgr {
uint32_t smu_version;
bool not_vf;
bool pm_en;
+ bool pp_one_vf;
struct mutex smu_lock;
uint32_t pp_table_version;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h
index 90879e4092a3..df4677da736c 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h
@@ -59,7 +59,7 @@
#define PPSMC_MSG_SetDriverDramAddrLow 0x1B
#define PPSMC_MSG_TransferTableSmu2Dram 0x1C
#define PPSMC_MSG_TransferTableDram2Smu 0x1D
-#define PPSMC_MSG_ControlGfxRM 0x1E
+#define PPSMC_MSG_DeviceDriverReset 0x1E
#define PPSMC_MSG_SetGfxclkOverdriveByFreqVid 0x1F
#define PPSMC_MSG_SetHardMinDcefclkByFreq 0x20
#define PPSMC_MSG_SetHardMinSocclkByFreq 0x21
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
index 755d51f9c6a9..fdc6b7a57bc9 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
@@ -27,7 +27,9 @@
// *** IMPORTANT ***
// SMU TEAM: Always increment the interface version if
// any structure is changed in this file
-#define SMU11_DRIVER_IF_VERSION 0x13
+// Be aware of that the version should be updated in
+// smu_v11_0.h, rename is also needed.
+// #define SMU11_DRIVER_IF_VERSION 0x13
#define PPTABLE_V20_SMU_VERSION 3
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h
new file mode 100644
index 000000000000..ce5b5011c122
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h
@@ -0,0 +1,912 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef SMU11_DRIVER_IF_ARCTURUS_H
+#define SMU11_DRIVER_IF_ARCTURUS_H
+
+// *** IMPORTANT ***
+// SMU TEAM: Always increment the interface version if
+// any structure is changed in this file
+//#define SMU11_DRIVER_IF_VERSION 0x09
+
+#define PPTABLE_ARCTURUS_SMU_VERSION 4
+
+#define NUM_GFXCLK_DPM_LEVELS 16
+#define NUM_VCLK_DPM_LEVELS 8
+#define NUM_DCLK_DPM_LEVELS 8
+#define NUM_MP0CLK_DPM_LEVELS 2
+#define NUM_SOCCLK_DPM_LEVELS 8
+#define NUM_UCLK_DPM_LEVELS 4
+#define NUM_FCLK_DPM_LEVELS 8
+#define NUM_XGMI_LEVELS 2
+#define NUM_XGMI_PSTATE_LEVELS 4
+
+#define MAX_GFXCLK_DPM_LEVEL (NUM_GFXCLK_DPM_LEVELS - 1)
+#define MAX_VCLK_DPM_LEVEL (NUM_VCLK_DPM_LEVELS - 1)
+#define MAX_DCLK_DPM_LEVEL (NUM_DCLK_DPM_LEVELS - 1)
+#define MAX_MP0CLK_DPM_LEVEL (NUM_MP0CLK_DPM_LEVELS - 1)
+#define MAX_SOCCLK_DPM_LEVEL (NUM_SOCCLK_DPM_LEVELS - 1)
+#define MAX_UCLK_DPM_LEVEL (NUM_UCLK_DPM_LEVELS - 1)
+#define MAX_FCLK_DPM_LEVEL (NUM_FCLK_DPM_LEVELS - 1)
+#define MAX_XGMI_LEVEL (NUM_XGMI_LEVELS - 1)
+#define MAX_XGMI_PSTATE_LEVEL (NUM_XGMI_PSTATE_LEVELS - 1)
+
+// Feature Control Defines
+// DPM
+#define FEATURE_DPM_PREFETCHER_BIT 0
+#define FEATURE_DPM_GFXCLK_BIT 1
+#define FEATURE_DPM_UCLK_BIT 2
+#define FEATURE_DPM_SOCCLK_BIT 3
+#define FEATURE_DPM_FCLK_BIT 4
+#define FEATURE_DPM_MP0CLK_BIT 5
+#define FEATURE_DPM_XGMI_BIT 6
+// Idle
+#define FEATURE_DS_GFXCLK_BIT 7
+#define FEATURE_DS_SOCCLK_BIT 8
+#define FEATURE_DS_LCLK_BIT 9
+#define FEATURE_DS_FCLK_BIT 10
+#define FEATURE_DS_UCLK_BIT 11
+#define FEATURE_GFX_ULV_BIT 12
+#define FEATURE_DPM_VCN_BIT 13
+#define FEATURE_RSMU_SMN_CG_BIT 14
+#define FEATURE_WAFL_CG_BIT 15
+// Throttler/Response
+#define FEATURE_PPT_BIT 16
+#define FEATURE_TDC_BIT 17
+#define FEATURE_APCC_PLUS_BIT 18
+#define FEATURE_VR0HOT_BIT 19
+#define FEATURE_VR1HOT_BIT 20
+#define FEATURE_FW_CTF_BIT 21
+#define FEATURE_FAN_CONTROL_BIT 22
+#define FEATURE_THERMAL_BIT 23
+// Other
+#define FEATURE_OUT_OF_BAND_MONITOR_BIT 24
+#define FEATURE_TEMP_DEPENDENT_VMIN_BIT 25
+
+#define FEATURE_SPARE_26_BIT 26
+#define FEATURE_SPARE_27_BIT 27
+#define FEATURE_SPARE_28_BIT 28
+#define FEATURE_SPARE_29_BIT 29
+#define FEATURE_SPARE_30_BIT 30
+#define FEATURE_SPARE_31_BIT 31
+#define FEATURE_SPARE_32_BIT 32
+#define FEATURE_SPARE_33_BIT 33
+#define FEATURE_SPARE_34_BIT 34
+#define FEATURE_SPARE_35_BIT 35
+#define FEATURE_SPARE_36_BIT 36
+#define FEATURE_SPARE_37_BIT 37
+#define FEATURE_SPARE_38_BIT 38
+#define FEATURE_SPARE_39_BIT 39
+#define FEATURE_SPARE_40_BIT 40
+#define FEATURE_SPARE_41_BIT 41
+#define FEATURE_SPARE_42_BIT 42
+#define FEATURE_SPARE_43_BIT 43
+#define FEATURE_SPARE_44_BIT 44
+#define FEATURE_SPARE_45_BIT 45
+#define FEATURE_SPARE_46_BIT 46
+#define FEATURE_SPARE_47_BIT 47
+#define FEATURE_SPARE_48_BIT 48
+#define FEATURE_SPARE_49_BIT 49
+#define FEATURE_SPARE_50_BIT 50
+#define FEATURE_SPARE_51_BIT 51
+#define FEATURE_SPARE_52_BIT 52
+#define FEATURE_SPARE_53_BIT 53
+#define FEATURE_SPARE_54_BIT 54
+#define FEATURE_SPARE_55_BIT 55
+#define FEATURE_SPARE_56_BIT 56
+#define FEATURE_SPARE_57_BIT 57
+#define FEATURE_SPARE_58_BIT 58
+#define FEATURE_SPARE_59_BIT 59
+#define FEATURE_SPARE_60_BIT 60
+#define FEATURE_SPARE_61_BIT 61
+#define FEATURE_SPARE_62_BIT 62
+#define FEATURE_SPARE_63_BIT 63
+
+#define NUM_FEATURES 64
+
+
+#define FEATURE_DPM_PREFETCHER_MASK (1 << FEATURE_DPM_PREFETCHER_BIT )
+#define FEATURE_DPM_GFXCLK_MASK (1 << FEATURE_DPM_GFXCLK_BIT )
+#define FEATURE_DPM_UCLK_MASK (1 << FEATURE_DPM_UCLK_BIT )
+#define FEATURE_DPM_SOCCLK_MASK (1 << FEATURE_DPM_SOCCLK_BIT )
+#define FEATURE_DPM_FCLK_MASK (1 << FEATURE_DPM_FCLK_BIT )
+#define FEATURE_DPM_MP0CLK_MASK (1 << FEATURE_DPM_MP0CLK_BIT )
+#define FEATURE_DPM_XGMI_MASK (1 << FEATURE_DPM_XGMI_BIT )
+
+#define FEATURE_DS_GFXCLK_MASK (1 << FEATURE_DS_GFXCLK_BIT )
+#define FEATURE_DS_SOCCLK_MASK (1 << FEATURE_DS_SOCCLK_BIT )
+#define FEATURE_DS_LCLK_MASK (1 << FEATURE_DS_LCLK_BIT )
+#define FEATURE_DS_FCLK_MASK (1 << FEATURE_DS_FCLK_BIT )
+#define FEATURE_DS_UCLK_MASK (1 << FEATURE_DS_UCLK_BIT )
+#define FEATURE_GFX_ULV_MASK (1 << FEATURE_GFX_ULV_BIT )
+#define FEATURE_DPM_VCN_MASK (1 << FEATURE_DPM_VCN_BIT )
+#define FEATURE_RSMU_SMN_CG_MASK (1 << FEATURE_RSMU_SMN_CG_BIT )
+#define FEATURE_WAFL_CG_MASK (1 << FEATURE_WAFL_CG_BIT )
+
+#define FEATURE_PPT_MASK (1 << FEATURE_PPT_BIT )
+#define FEATURE_TDC_MASK (1 << FEATURE_TDC_BIT )
+#define FEATURE_APCC_PLUS_MASK (1 << FEATURE_APCC_PLUS_BIT )
+#define FEATURE_VR0HOT_MASK (1 << FEATURE_VR0HOT_BIT )
+#define FEATURE_VR1HOT_MASK (1 << FEATURE_VR1HOT_BIT )
+#define FEATURE_FW_CTF_MASK (1 << FEATURE_FW_CTF_BIT )
+#define FEATURE_FAN_CONTROL_MASK (1 << FEATURE_FAN_CONTROL_BIT )
+#define FEATURE_THERMAL_MASK (1 << FEATURE_THERMAL_BIT )
+
+#define FEATURE_OUT_OF_BAND_MONITOR_MASK (1 << FEATURE_OUT_OF_BAND_MONITOR_BIT )
+#define FEATURE_TEMP_DEPENDENT_VMIN_MASK (1 << FEATURE_TEMP_DEPENDENT_VMIN_BIT )
+
+
+//FIXME need updating
+// Debug Overrides Bitmask
+#define DPM_OVERRIDE_DISABLE_UCLK_PID 0x00000001
+#define DPM_OVERRIDE_DISABLE_VOLT_LINK_VCN_FCLK 0x00000002
+
+// I2C Config Bit Defines
+#define I2C_CONTROLLER_ENABLED 1
+#define I2C_CONTROLLER_DISABLED 0
+
+// VR Mapping Bit Defines
+#define VR_MAPPING_VR_SELECT_MASK 0x01
+#define VR_MAPPING_VR_SELECT_SHIFT 0x00
+
+#define VR_MAPPING_PLANE_SELECT_MASK 0x02
+#define VR_MAPPING_PLANE_SELECT_SHIFT 0x01
+
+// PSI Bit Defines
+#define PSI_SEL_VR0_PLANE0_PSI0 0x01
+#define PSI_SEL_VR0_PLANE0_PSI1 0x02
+#define PSI_SEL_VR0_PLANE1_PSI0 0x04
+#define PSI_SEL_VR0_PLANE1_PSI1 0x08
+#define PSI_SEL_VR1_PLANE0_PSI0 0x10
+#define PSI_SEL_VR1_PLANE0_PSI1 0x20
+#define PSI_SEL_VR1_PLANE1_PSI0 0x40
+#define PSI_SEL_VR1_PLANE1_PSI1 0x80
+
+// Throttler Control/Status Bits
+#define THROTTLER_PADDING_BIT 0
+#define THROTTLER_TEMP_EDGE_BIT 1
+#define THROTTLER_TEMP_HOTSPOT_BIT 2
+#define THROTTLER_TEMP_MEM_BIT 3
+#define THROTTLER_TEMP_VR_GFX_BIT 4
+#define THROTTLER_TEMP_VR_MEM_BIT 5
+#define THROTTLER_TEMP_VR_SOC_BIT 6
+#define THROTTLER_TDC_GFX_BIT 7
+#define THROTTLER_TDC_SOC_BIT 8
+#define THROTTLER_PPT0_BIT 9
+#define THROTTLER_PPT1_BIT 10
+#define THROTTLER_PPT2_BIT 11
+#define THROTTLER_PPT3_BIT 12
+#define THROTTLER_PPM_BIT 13
+#define THROTTLER_FIT_BIT 14
+#define THROTTLER_APCC_BIT 15
+
+// Table transfer status
+#define TABLE_TRANSFER_OK 0x0
+#define TABLE_TRANSFER_FAILED 0xFF
+#define TABLE_TRANSFER_PENDING 0xAB
+
+// Workload bits
+#define WORKLOAD_PPLIB_DEFAULT_BIT 0
+#define WORKLOAD_PPLIB_POWER_SAVING_BIT 1
+#define WORKLOAD_PPLIB_VIDEO_BIT 2
+#define WORKLOAD_PPLIB_COMPUTE_BIT 3
+#define WORKLOAD_PPLIB_CUSTOM_BIT 4
+#define WORKLOAD_PPLIB_COUNT 5
+
+//XGMI performance states
+#define XGMI_STATE_D0 1
+#define XGMI_STATE_D3 0
+
+#define NUM_I2C_CONTROLLERS 8
+
+#define I2C_CONTROLLER_ENABLED 1
+#define I2C_CONTROLLER_DISABLED 0
+
+#define MAX_SW_I2C_COMMANDS 8
+
+typedef enum {
+ I2C_CONTROLLER_PORT_0 = 0, //CKSVII2C0
+ I2C_CONTROLLER_PORT_1 = 1, //CKSVII2C1
+ I2C_CONTROLLER_PORT_COUNT,
+} I2cControllerPort_e;
+
+typedef enum {
+ I2C_CONTROLLER_NAME_VR_GFX = 0,
+ I2C_CONTROLLER_NAME_VR_SOC,
+ I2C_CONTROLLER_NAME_VR_MEM,
+ I2C_CONTROLLER_NAME_SPARE,
+ I2C_CONTROLLER_NAME_COUNT,
+} I2cControllerName_e;
+
+typedef enum {
+ I2C_CONTROLLER_THROTTLER_TYPE_NONE = 0,
+ I2C_CONTROLLER_THROTTLER_VR_GFX,
+ I2C_CONTROLLER_THROTTLER_VR_SOC,
+ I2C_CONTROLLER_THROTTLER_VR_MEM,
+ I2C_CONTROLLER_THROTTLER_COUNT,
+} I2cControllerThrottler_e;
+
+typedef enum {
+ I2C_CONTROLLER_PROTOCOL_VR_0,
+ I2C_CONTROLLER_PROTOCOL_VR_1,
+ I2C_CONTROLLER_PROTOCOL_TMP_0,
+ I2C_CONTROLLER_PROTOCOL_TMP_1,
+ I2C_CONTROLLER_PROTOCOL_SPARE_0,
+ I2C_CONTROLLER_PROTOCOL_SPARE_1,
+ I2C_CONTROLLER_PROTOCOL_COUNT,
+} I2cControllerProtocol_e;
+
+typedef struct {
+ uint8_t Enabled;
+ uint8_t Speed;
+ uint8_t Padding[2];
+ uint32_t SlaveAddress;
+ uint8_t ControllerPort;
+ uint8_t ControllerName;
+ uint8_t ThermalThrotter;
+ uint8_t I2cProtocol;
+} I2cControllerConfig_t;
+
+typedef enum {
+ I2C_PORT_SVD_SCL = 0,
+ I2C_PORT_GPIO,
+} I2cPort_e;
+
+typedef enum {
+ I2C_SPEED_FAST_50K = 0, //50 Kbits/s
+ I2C_SPEED_FAST_100K, //100 Kbits/s
+ I2C_SPEED_FAST_400K, //400 Kbits/s
+ I2C_SPEED_FAST_PLUS_1M, //1 Mbits/s (in fast mode)
+ I2C_SPEED_HIGH_1M, //1 Mbits/s (in high speed mode)
+ I2C_SPEED_HIGH_2M, //2.3 Mbits/s
+ I2C_SPEED_COUNT,
+} I2cSpeed_e;
+
+typedef enum {
+ I2C_CMD_READ = 0,
+ I2C_CMD_WRITE,
+ I2C_CMD_COUNT,
+} I2cCmdType_e;
+
+#define CMDCONFIG_STOP_BIT 0
+#define CMDCONFIG_RESTART_BIT 1
+
+#define CMDCONFIG_STOP_MASK (1 << CMDCONFIG_STOP_BIT)
+#define CMDCONFIG_RESTART_MASK (1 << CMDCONFIG_RESTART_BIT)
+
+typedef struct {
+ uint8_t RegisterAddr; ////only valid for write, ignored for read
+ uint8_t Cmd; //Read(0) or Write(1)
+ uint8_t Data; //Return data for read. Data to send for write
+ uint8_t CmdConfig; //Includes whether associated command should have a stop or restart command
+} SwI2cCmd_t; //SW I2C Command Table
+
+typedef struct {
+ uint8_t I2CcontrollerPort; //CKSVII2C0(0) or //CKSVII2C1(1)
+ uint8_t I2CSpeed; //Slow(0) or Fast(1)
+ uint16_t SlaveAddress;
+ uint8_t NumCmds; //Number of commands
+ uint8_t Padding[3];
+
+ SwI2cCmd_t SwI2cCmds[MAX_SW_I2C_COMMANDS];
+
+ uint32_t MmHubPadding[8]; // SMU internal use
+
+} SwI2cRequest_t; // SW I2C Request Table
+
+//D3HOT sequences
+typedef enum {
+ BACO_SEQUENCE,
+ MSR_SEQUENCE,
+ BAMACO_SEQUENCE,
+ ULPS_SEQUENCE,
+ D3HOT_SEQUENCE_COUNT,
+}D3HOTSequence_e;
+
+//THis is aligned with RSMU PGFSM Register Mapping
+typedef enum {
+ PG_DYNAMIC_MODE = 0,
+ PG_STATIC_MODE,
+} PowerGatingMode_e;
+
+//This is aligned with RSMU PGFSM Register Mapping
+typedef enum {
+ PG_POWER_DOWN = 0,
+ PG_POWER_UP,
+} PowerGatingSettings_e;
+
+typedef struct {
+ uint32_t a; // store in IEEE float format in this variable
+ uint32_t b; // store in IEEE float format in this variable
+ uint32_t c; // store in IEEE float format in this variable
+} QuadraticInt_t;
+
+typedef struct {
+ uint32_t m; // store in IEEE float format in this variable
+ uint32_t b; // store in IEEE float format in this variable
+} LinearInt_t;
+
+typedef struct {
+ uint32_t a; // store in IEEE float format in this variable
+ uint32_t b; // store in IEEE float format in this variable
+ uint32_t c; // store in IEEE float format in this variable
+} DroopInt_t;
+
+typedef enum {
+ GFXCLK_SOURCE_PLL = 0,
+ GFXCLK_SOURCE_AFLL,
+ GFXCLK_SOURCE_COUNT,
+} GfxclkSrc_e;
+
+typedef enum {
+ PPCLK_GFXCLK,
+ PPCLK_VCLK,
+ PPCLK_DCLK,
+ PPCLK_SOCCLK,
+ PPCLK_UCLK,
+ PPCLK_FCLK,
+ PPCLK_COUNT,
+} PPCLK_e;
+
+typedef enum {
+ POWER_SOURCE_AC,
+ POWER_SOURCE_DC,
+ POWER_SOURCE_COUNT,
+} POWER_SOURCE_e;
+
+typedef enum {
+ TEMP_EDGE,
+ TEMP_HOTSPOT,
+ TEMP_MEM,
+ TEMP_VR_GFX,
+ TEMP_VR_SOC,
+ TEMP_VR_MEM,
+ TEMP_COUNT
+} TEMP_TYPE_e;
+
+typedef enum {
+ PPT_THROTTLER_PPT0,
+ PPT_THROTTLER_PPT1,
+ PPT_THROTTLER_PPT2,
+ PPT_THROTTLER_PPT3,
+ PPT_THROTTLER_COUNT
+} PPT_THROTTLER_e;
+
+typedef enum {
+ VOLTAGE_MODE_AVFS = 0,
+ VOLTAGE_MODE_AVFS_SS,
+ VOLTAGE_MODE_SS,
+ VOLTAGE_MODE_COUNT,
+} VOLTAGE_MODE_e;
+
+typedef enum {
+ AVFS_VOLTAGE_GFX = 0,
+ AVFS_VOLTAGE_SOC,
+ AVFS_VOLTAGE_COUNT,
+} AVFS_VOLTAGE_TYPE_e;
+
+typedef enum {
+ GPIO_INT_POLARITY_ACTIVE_LOW = 0,
+ GPIO_INT_POLARITY_ACTIVE_HIGH,
+} GpioIntPolarity_e;
+
+typedef enum {
+ MEMORY_TYPE_GDDR6 = 0,
+ MEMORY_TYPE_HBM,
+} MemoryType_e;
+
+typedef enum {
+ PWR_CONFIG_TDP = 0,
+ PWR_CONFIG_TGP,
+ PWR_CONFIG_TCP_ESTIMATED,
+ PWR_CONFIG_TCP_MEASURED,
+} PwrConfig_e;
+
+typedef enum {
+ XGMI_LINK_RATE_2 = 2, // 2Gbps
+ XGMI_LINK_RATE_4 = 4, // 4Gbps
+ XGMI_LINK_RATE_8 = 8, // 8Gbps
+ XGMI_LINK_RATE_12 = 12, // 12Gbps
+ XGMI_LINK_RATE_16 = 16, // 16Gbps
+ XGMI_LINK_RATE_17 = 17, // 17Gbps
+ XGMI_LINK_RATE_18 = 18, // 18Gbps
+ XGMI_LINK_RATE_19 = 19, // 19Gbps
+ XGMI_LINK_RATE_20 = 20, // 20Gbps
+ XGMI_LINK_RATE_21 = 21, // 21Gbps
+ XGMI_LINK_RATE_22 = 22, // 22Gbps
+ XGMI_LINK_RATE_23 = 23, // 23Gbps
+ XGMI_LINK_RATE_24 = 24, // 24Gbps
+ XGMI_LINK_RATE_25 = 25, // 25Gbps
+ XGMI_LINK_RATE_COUNT
+} XGMI_LINK_RATE_e;
+
+typedef enum {
+ XGMI_LINK_WIDTH_1 = 1, // x1
+ XGMI_LINK_WIDTH_2 = 2, // x2
+ XGMI_LINK_WIDTH_4 = 4, // x4
+ XGMI_LINK_WIDTH_8 = 8, // x8
+ XGMI_LINK_WIDTH_9 = 9, // x9
+ XGMI_LINK_WIDTH_16 = 16, // x16
+ XGMI_LINK_WIDTH_COUNT
+} XGMI_LINK_WIDTH_e;
+
+typedef struct {
+ uint8_t VoltageMode; // 0 - AVFS only, 1- min(AVFS,SS), 2-SS only
+ uint8_t SnapToDiscrete; // 0 - Fine grained DPM, 1 - Discrete DPM
+ uint8_t NumDiscreteLevels; // Set to 2 (Fmin, Fmax) when using fine grained DPM, otherwise set to # discrete levels used
+ uint8_t padding;
+ LinearInt_t ConversionToAvfsClk; // Transfer function to AVFS Clock (GHz->GHz)
+ QuadraticInt_t SsCurve; // Slow-slow curve (GHz->V)
+ uint16_t SsFmin; // Fmin for SS curve. If SS curve is selected, will use V@SSFmin for F <= Fmin
+ uint16_t Padding16;
+} DpmDescriptor_t;
+
+typedef struct {
+ uint32_t Version;
+
+ // SECTION: Feature Enablement
+ uint32_t FeaturesToRun[2];
+
+ // SECTION: Infrastructure Limits
+ uint16_t SocketPowerLimitAc[PPT_THROTTLER_COUNT];
+ uint16_t SocketPowerLimitAcTau[PPT_THROTTLER_COUNT];
+ uint16_t TdcLimitSoc; // Amps
+ uint16_t TdcLimitSocTau; // Time constant of LPF in ms
+ uint16_t TdcLimitGfx; // Amps
+ uint16_t TdcLimitGfxTau; // Time constant of LPF in ms
+
+ uint16_t TedgeLimit; // Celcius
+ uint16_t ThotspotLimit; // Celcius
+ uint16_t TmemLimit; // Celcius
+ uint16_t Tvr_gfxLimit; // Celcius
+ uint16_t Tvr_memLimit; // Celcius
+ uint16_t Tvr_socLimit; // Celcius
+ uint32_t FitLimit; // Failures in time (failures per million parts over the defined lifetime)
+
+ uint16_t PpmPowerLimit; // Switch this this power limit when temperature is above PpmTempThreshold
+ uint16_t PpmTemperatureThreshold;
+
+ // SECTION: Throttler settings
+ uint32_t ThrottlerControlMask; // See Throtter masks defines
+
+ // SECTION: ULV Settings
+ uint16_t UlvVoltageOffsetGfx; // In mV(Q2)
+ uint16_t UlvPadding; // Padding
+
+ uint8_t UlvGfxclkBypass; // 1 to turn off/bypass Gfxclk during ULV, 0 to leave Gfxclk on during ULV
+ uint8_t Padding234[3];
+
+ // SECTION: Voltage Control Parameters
+ uint16_t MinVoltageGfx; // In mV(Q2) Minimum Voltage ("Vmin") of VDD_GFX
+ uint16_t MinVoltageSoc; // In mV(Q2) Minimum Voltage ("Vmin") of VDD_SOC
+ uint16_t MaxVoltageGfx; // In mV(Q2) Maximum Voltage allowable of VDD_GFX
+ uint16_t MaxVoltageSoc; // In mV(Q2) Maximum Voltage allowable of VDD_SOC
+
+ uint16_t LoadLineResistanceGfx; // In mOhms with 8 fractional bits
+ uint16_t LoadLineResistanceSoc; // In mOhms with 8 fractional bits
+
+ //SECTION: DPM Config 1
+ DpmDescriptor_t DpmDescriptor[PPCLK_COUNT];
+
+ uint16_t FreqTableGfx [NUM_GFXCLK_DPM_LEVELS ]; // In MHz
+ uint16_t FreqTableVclk [NUM_VCLK_DPM_LEVELS ]; // In MHz
+ uint16_t FreqTableDclk [NUM_DCLK_DPM_LEVELS ]; // In MHz
+ uint16_t FreqTableSocclk [NUM_SOCCLK_DPM_LEVELS ]; // In MHz
+ uint16_t FreqTableUclk [NUM_UCLK_DPM_LEVELS ]; // In MHz
+ uint16_t FreqTableFclk [NUM_FCLK_DPM_LEVELS ]; // In MHz
+
+ uint32_t Paddingclks[16];
+
+ // SECTION: DPM Config 2
+ uint16_t Mp0clkFreq [NUM_MP0CLK_DPM_LEVELS]; // in MHz
+ uint16_t Mp0DpmVoltage [NUM_MP0CLK_DPM_LEVELS]; // mV(Q2)
+
+ // GFXCLK DPM
+ uint16_t GfxclkFidle; // In MHz
+ uint16_t GfxclkSlewRate; // for PLL babystepping???
+ uint8_t Padding567[4];
+ uint16_t GfxclkDsMaxFreq; // In MHz
+ uint8_t GfxclkSource; // 0 = PLL, 1 = AFLL
+ uint8_t Padding456;
+
+ // GFXCLK Thermal DPM (formerly 'Boost' Settings)
+ uint16_t EnableTdpm;
+ uint16_t TdpmHighHystTemperature;
+ uint16_t TdpmLowHystTemperature;
+ uint16_t GfxclkFreqHighTempLimit; // High limit on GFXCLK when temperature is high, for reliability.
+
+ // SECTION: Fan Control
+ uint16_t FanStopTemp; //Celcius
+ uint16_t FanStartTemp; //Celcius
+
+ uint16_t FanGainEdge;
+ uint16_t FanGainHotspot;
+ uint16_t FanGainVrGfx;
+ uint16_t FanGainVrSoc;
+ uint16_t FanGainVrMem;
+ uint16_t FanGainHbm;
+ uint16_t FanPwmMin;
+ uint16_t FanAcousticLimitRpm;
+ uint16_t FanThrottlingRpm;
+ uint16_t FanMaximumRpm;
+ uint16_t FanTargetTemperature;
+ uint16_t FanTargetGfxclk;
+ uint8_t FanZeroRpmEnable;
+ uint8_t FanTachEdgePerRev;
+ uint8_t FanTempInputSelect;
+ uint8_t padding8_Fan;
+
+ // The following are AFC override parameters. Leave at 0 to use FW defaults.
+ int16_t FuzzyFan_ErrorSetDelta;
+ int16_t FuzzyFan_ErrorRateSetDelta;
+ int16_t FuzzyFan_PwmSetDelta;
+ uint16_t FuzzyFan_Reserved;
+
+
+ // SECTION: AVFS
+ // Overrides
+ uint8_t OverrideAvfsGb[AVFS_VOLTAGE_COUNT];
+ uint8_t Padding8_Avfs[2];
+
+ QuadraticInt_t qAvfsGb[AVFS_VOLTAGE_COUNT]; // GHz->V Override of fused curve
+ DroopInt_t dBtcGbGfxPll; // GHz->V BtcGb
+ DroopInt_t dBtcGbGfxAfll; // GHz->V BtcGb
+ DroopInt_t dBtcGbSoc; // GHz->V BtcGb
+ LinearInt_t qAgingGb[AVFS_VOLTAGE_COUNT]; // GHz->V
+
+ QuadraticInt_t qStaticVoltageOffset[AVFS_VOLTAGE_COUNT]; // GHz->V
+
+ uint16_t DcTol[AVFS_VOLTAGE_COUNT]; // mV Q2
+
+ uint8_t DcBtcEnabled[AVFS_VOLTAGE_COUNT];
+ uint8_t Padding8_GfxBtc[2];
+
+ uint16_t DcBtcMin[AVFS_VOLTAGE_COUNT]; // mV Q2
+ uint16_t DcBtcMax[AVFS_VOLTAGE_COUNT]; // mV Q2
+
+ uint16_t DcBtcGb[AVFS_VOLTAGE_COUNT]; // mV Q2
+
+ // SECTION: XGMI
+ uint8_t XgmiDpmPstates[NUM_XGMI_LEVELS]; // 2 DPM states, high and low. 0-P0, 1-P1, 2-P2, 3-P3.
+ uint8_t XgmiDpmSpare[2];
+
+ // Temperature Dependent Vmin
+ uint16_t VDDGFX_TVmin; //Celcius
+ uint16_t VDDSOC_TVmin; //Celcius
+ uint16_t VDDGFX_Vmin_HiTemp; // mV Q2
+ uint16_t VDDGFX_Vmin_LoTemp; // mV Q2
+ uint16_t VDDSOC_Vmin_HiTemp; // mV Q2
+ uint16_t VDDSOC_Vmin_LoTemp; // mV Q2
+
+ uint16_t VDDGFX_TVminHystersis; // Celcius
+ uint16_t VDDSOC_TVminHystersis; // Celcius
+
+
+ // SECTION: Advanced Options
+ uint32_t DebugOverrides;
+ QuadraticInt_t ReservedEquation0;
+ QuadraticInt_t ReservedEquation1;
+ QuadraticInt_t ReservedEquation2;
+ QuadraticInt_t ReservedEquation3;
+
+ uint16_t MinVoltageUlvGfx; // In mV(Q2) Minimum Voltage ("Vmin") of VDD_GFX in ULV mode
+ uint16_t PaddingUlv; // Padding
+
+ // Total Power configuration, use defines from PwrConfig_e
+ uint8_t TotalPowerConfig; //0-TDP, 1-TGP, 2-TCP Estimated, 3-TCP Measured
+ uint8_t TotalPowerSpare1;
+ uint16_t TotalPowerSpare2;
+
+ // APCC Settings
+ uint16_t PccThresholdLow;
+ uint16_t PccThresholdHigh;
+ uint32_t PaddingAPCC[6]; //FIXME pending SPEC
+
+ // OOB Settings
+ uint16_t BasePerformanceCardPower;
+ uint16_t MaxPerformanceCardPower;
+ uint16_t BasePerformanceFrequencyCap; //In Mhz
+ uint16_t MaxPerformanceFrequencyCap; //In Mhz
+
+ // SECTION: Reserved
+ uint32_t Reserved[9];
+
+ // SECTION: BOARD PARAMETERS
+
+ // SVI2 Board Parameters
+ uint16_t MaxVoltageStepGfx; // In mV(Q2) Max voltage step that SMU will request. Multiple steps are taken if voltage change exceeds this value.
+ uint16_t MaxVoltageStepSoc; // In mV(Q2) Max voltage step that SMU will request. Multiple steps are taken if voltage change exceeds this value.
+
+ uint8_t VddGfxVrMapping; // Use VR_MAPPING* bitfields
+ uint8_t VddSocVrMapping; // Use VR_MAPPING* bitfields
+ uint8_t VddMemVrMapping; // Use VR_MAPPING* bitfields
+ uint8_t BoardVrMapping; // Use VR_MAPPING* bitfields
+
+ uint8_t GfxUlvPhaseSheddingMask; // set this to 1 to set PSI0/1 to 1 in ULV mode
+ uint8_t ExternalSensorPresent; // External RDI connected to TMON (aka TEMP IN)
+ uint8_t Padding8_V[2];
+
+ // Telemetry Settings
+ uint16_t GfxMaxCurrent; // in Amps
+ int8_t GfxOffset; // in Amps
+ uint8_t Padding_TelemetryGfx;
+
+ uint16_t SocMaxCurrent; // in Amps
+ int8_t SocOffset; // in Amps
+ uint8_t Padding_TelemetrySoc;
+
+ uint16_t MemMaxCurrent; // in Amps
+ int8_t MemOffset; // in Amps
+ uint8_t Padding_TelemetryMem;
+
+ uint16_t BoardMaxCurrent; // in Amps
+ int8_t BoardOffset; // in Amps
+ uint8_t Padding_TelemetryBoardInput;
+
+ // GPIO Settings
+ uint8_t VR0HotGpio; // GPIO pin configured for VR0 HOT event
+ uint8_t VR0HotPolarity; // GPIO polarity for VR0 HOT event
+ uint8_t VR1HotGpio; // GPIO pin configured for VR1 HOT event
+ uint8_t VR1HotPolarity; // GPIO polarity for VR1 HOT event
+
+ // GFXCLK PLL Spread Spectrum
+ uint8_t PllGfxclkSpreadEnabled; // on or off
+ uint8_t PllGfxclkSpreadPercent; // Q4.4
+ uint16_t PllGfxclkSpreadFreq; // kHz
+
+ // UCLK Spread Spectrum
+ uint8_t UclkSpreadEnabled; // on or off
+ uint8_t UclkSpreadPercent; // Q4.4
+ uint16_t UclkSpreadFreq; // kHz
+
+ // FCLK Spread Spectrum
+ uint8_t FclkSpreadEnabled; // on or off
+ uint8_t FclkSpreadPercent; // Q4.4
+ uint16_t FclkSpreadFreq; // kHz
+
+ // GFXCLK Fll Spread Spectrum
+ uint8_t FllGfxclkSpreadEnabled; // on or off
+ uint8_t FllGfxclkSpreadPercent; // Q4.4
+ uint16_t FllGfxclkSpreadFreq; // kHz
+
+ // I2C Controller Structure
+ I2cControllerConfig_t I2cControllers[NUM_I2C_CONTROLLERS];
+
+ // Memory section
+ uint32_t MemoryChannelEnabled; // For DRAM use only, Max 32 channels enabled bit mask.
+
+ uint8_t DramBitWidth; // For DRAM use only. See Dram Bit width type defines
+ uint8_t PaddingMem[3];
+
+ // Total board power
+ uint16_t TotalBoardPower; //Only needed for TCP Estimated case, where TCP = TGP+Total Board Power
+ uint16_t BoardPadding;
+
+ // SECTION: XGMI Training
+ uint8_t XgmiLinkSpeed [NUM_XGMI_PSTATE_LEVELS];
+ uint8_t XgmiLinkWidth [NUM_XGMI_PSTATE_LEVELS];
+
+ uint16_t XgmiFclkFreq [NUM_XGMI_PSTATE_LEVELS];
+ uint16_t XgmiSocVoltage [NUM_XGMI_PSTATE_LEVELS];
+
+ // GPIO pins for I2C communications with 2nd controller for Input Telemetry Sequence
+ uint8_t GpioI2cScl; // Serial Clock
+ uint8_t GpioI2cSda; // Serial Data
+ uint16_t GpioPadding;
+
+ // Platform input telemetry voltage coefficient
+ uint32_t BoardVoltageCoeffA; // decode by /1000
+ uint32_t BoardVoltageCoeffB; // decode by /1000
+
+ uint32_t BoardReserved[7];
+
+ // Padding for MMHUB - do not modify this
+ uint32_t MmHubPadding[8]; // SMU internal use
+
+} PPTable_t;
+
+typedef struct {
+ // Time constant parameters for clock averages in ms
+ uint16_t GfxclkAverageLpfTau;
+ uint16_t SocclkAverageLpfTau;
+ uint16_t UclkAverageLpfTau;
+ uint16_t GfxActivityLpfTau;
+ uint16_t UclkActivityLpfTau;
+
+ uint16_t SocketPowerLpfTau;
+
+ // Padding - ignore
+ uint32_t MmHubPadding[8]; // SMU internal use
+} DriverSmuConfig_t;
+
+typedef struct {
+ uint16_t CurrClock[PPCLK_COUNT];
+ uint16_t AverageGfxclkFrequency;
+ uint16_t AverageSocclkFrequency;
+ uint16_t AverageUclkFrequency ;
+ uint16_t AverageGfxActivity ;
+ uint16_t AverageUclkActivity ;
+ uint8_t CurrSocVoltageOffset ;
+ uint8_t CurrGfxVoltageOffset ;
+ uint8_t CurrMemVidOffset ;
+ uint8_t Padding8 ;
+ uint16_t AverageSocketPower ;
+ uint16_t TemperatureEdge ;
+ uint16_t TemperatureHotspot ;
+ uint16_t TemperatureHBM ;
+ uint16_t TemperatureVrGfx ;
+ uint16_t TemperatureVrSoc ;
+ uint16_t TemperatureVrMem ;
+ uint32_t ThrottlerStatus ;
+
+ uint16_t CurrFanSpeed ;
+ uint16_t Padding16;
+
+ uint32_t Padding[4];
+
+ // Padding - ignore
+ uint32_t MmHubPadding[8]; // SMU internal use
+} SmuMetrics_t;
+
+
+typedef struct {
+ uint16_t avgPsmCount[75];
+ uint16_t minPsmCount[75];
+ float avgPsmVoltage[75];
+ float minPsmVoltage[75];
+
+ uint32_t MmHubPadding[8]; // SMU internal use
+} AvfsDebugTable_t;
+
+typedef struct {
+ uint8_t AvfsVersion;
+ uint8_t Padding;
+ uint8_t AvfsEn[AVFS_VOLTAGE_COUNT];
+
+ uint8_t OverrideVFT[AVFS_VOLTAGE_COUNT];
+ uint8_t OverrideAvfsGb[AVFS_VOLTAGE_COUNT];
+
+ uint8_t OverrideTemperatures[AVFS_VOLTAGE_COUNT];
+ uint8_t OverrideVInversion[AVFS_VOLTAGE_COUNT];
+ uint8_t OverrideP2V[AVFS_VOLTAGE_COUNT];
+ uint8_t OverrideP2VCharzFreq[AVFS_VOLTAGE_COUNT];
+
+ int32_t VFT0_m1[AVFS_VOLTAGE_COUNT]; // Q8.24
+ int32_t VFT0_m2[AVFS_VOLTAGE_COUNT]; // Q12.12
+ int32_t VFT0_b[AVFS_VOLTAGE_COUNT]; // Q32
+
+ int32_t VFT1_m1[AVFS_VOLTAGE_COUNT]; // Q8.16
+ int32_t VFT1_m2[AVFS_VOLTAGE_COUNT]; // Q12.12
+ int32_t VFT1_b[AVFS_VOLTAGE_COUNT]; // Q32
+
+ int32_t VFT2_m1[AVFS_VOLTAGE_COUNT]; // Q8.16
+ int32_t VFT2_m2[AVFS_VOLTAGE_COUNT]; // Q12.12
+ int32_t VFT2_b[AVFS_VOLTAGE_COUNT]; // Q32
+
+ int32_t AvfsGb0_m1[AVFS_VOLTAGE_COUNT]; // Q8.24
+ int32_t AvfsGb0_m2[AVFS_VOLTAGE_COUNT]; // Q12.12
+ int32_t AvfsGb0_b[AVFS_VOLTAGE_COUNT]; // Q32
+
+ int32_t AcBtcGb_m1[AVFS_VOLTAGE_COUNT]; // Q8.24
+ int32_t AcBtcGb_m2[AVFS_VOLTAGE_COUNT]; // Q12.12
+ int32_t AcBtcGb_b[AVFS_VOLTAGE_COUNT]; // Q32
+
+ uint32_t AvfsTempCold[AVFS_VOLTAGE_COUNT];
+ uint32_t AvfsTempMid[AVFS_VOLTAGE_COUNT];
+ uint32_t AvfsTempHot[AVFS_VOLTAGE_COUNT];
+
+ uint32_t VInversion[AVFS_VOLTAGE_COUNT]; // in mV with 2 fractional bits
+
+
+ int32_t P2V_m1[AVFS_VOLTAGE_COUNT]; // Q8.24
+ int32_t P2V_m2[AVFS_VOLTAGE_COUNT]; // Q12.12
+ int32_t P2V_b[AVFS_VOLTAGE_COUNT]; // Q32
+
+ uint32_t P2VCharzFreq[AVFS_VOLTAGE_COUNT]; // in 10KHz units
+
+ uint32_t EnabledAvfsModules[3];
+
+ uint32_t MmHubPadding[8]; // SMU internal use
+} AvfsFuseOverride_t;
+
+typedef struct {
+ uint8_t Gfx_ActiveHystLimit;
+ uint8_t Gfx_IdleHystLimit;
+ uint8_t Gfx_FPS;
+ uint8_t Gfx_MinActiveFreqType;
+ uint8_t Gfx_BoosterFreqType;
+ uint8_t Gfx_MinFreqStep; // Minimum delta between current and target frequeny in order for FW to change clock.
+ uint8_t Gfx_UseRlcBusy;
+ uint8_t PaddingGfx[3];
+ uint16_t Gfx_MinActiveFreq; // MHz
+ uint16_t Gfx_BoosterFreq; // MHz
+ uint16_t Gfx_PD_Data_time_constant; // Time constant of PD controller in ms
+ uint32_t Gfx_PD_Data_limit_a; // Q16
+ uint32_t Gfx_PD_Data_limit_b; // Q16
+ uint32_t Gfx_PD_Data_limit_c; // Q16
+ uint32_t Gfx_PD_Data_error_coeff; // Q16
+ uint32_t Gfx_PD_Data_error_rate_coeff; // Q16
+
+ uint8_t Mem_ActiveHystLimit;
+ uint8_t Mem_IdleHystLimit;
+ uint8_t Mem_FPS;
+ uint8_t Mem_MinActiveFreqType;
+ uint8_t Mem_BoosterFreqType;
+ uint8_t Mem_MinFreqStep; // Minimum delta between current and target frequeny in order for FW to change clock.
+ uint8_t Mem_UseRlcBusy;
+ uint8_t PaddingMem[3];
+ uint16_t Mem_MinActiveFreq; // MHz
+ uint16_t Mem_BoosterFreq; // MHz
+ uint16_t Mem_PD_Data_time_constant; // Time constant of PD controller in ms
+ uint32_t Mem_PD_Data_limit_a; // Q16
+ uint32_t Mem_PD_Data_limit_b; // Q16
+ uint32_t Mem_PD_Data_limit_c; // Q16
+ uint32_t Mem_PD_Data_error_coeff; // Q16
+ uint32_t Mem_PD_Data_error_rate_coeff; // Q16
+
+ uint32_t Mem_UpThreshold_Limit; // Q16
+ uint8_t Mem_UpHystLimit;
+ uint8_t Mem_DownHystLimit;
+ uint16_t Mem_Fps;
+
+ uint32_t MmHubPadding[8]; // SMU internal use
+} DpmActivityMonitorCoeffInt_t;
+
+// These defines are used with the following messages:
+// SMC_MSG_TransferTableDram2Smu
+// SMC_MSG_TransferTableSmu2Dram
+#define TABLE_PPTABLE 0
+#define TABLE_AVFS 1
+#define TABLE_AVFS_PSM_DEBUG 2
+#define TABLE_AVFS_FUSE_OVERRIDE 3
+#define TABLE_PMSTATUSLOG 4
+#define TABLE_SMU_METRICS 5
+#define TABLE_DRIVER_SMU_CONFIG 6
+#define TABLE_OVERDRIVE 7
+#define TABLE_WAFL_XGMI_TOPOLOGY 8
+#define TABLE_I2C_COMMANDS 9
+#define TABLE_ACTIVITY_MONITOR_COEFF 10
+#define TABLE_COUNT 11
+
+// These defines are used with the SMC_MSG_SetUclkFastSwitch message.
+typedef enum {
+ DF_SWITCH_TYPE_FAST = 0,
+ DF_SWITCH_TYPE_SLOW,
+ DF_SWITCH_TYPE_COUNT,
+} DF_SWITCH_TYPE_e;
+
+typedef enum {
+ DRAM_BIT_WIDTH_DISABLED = 0,
+ DRAM_BIT_WIDTH_X_8,
+ DRAM_BIT_WIDTH_X_16,
+ DRAM_BIT_WIDTH_X_32,
+ DRAM_BIT_WIDTH_X_64, // NOT USED.
+ DRAM_BIT_WIDTH_X_128,
+ DRAM_BIT_WIDTH_COUNT,
+} DRAM_BIT_WIDTH_TYPE_e;
+
+#define REMOVE_FMAX_MARGIN_BIT 0x0
+#define REMOVE_DCTOL_MARGIN_BIT 0x1
+#define REMOVE_PLATFORM_MARGIN_BIT 0x2
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_navi10.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_navi10.h
index adbbfebbb1e5..ac0120e384be 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_navi10.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_navi10.h
@@ -26,7 +26,9 @@
// *** IMPORTANT ***
// SMU TEAM: Always increment the interface version if
// any structure is changed in this file
-#define SMU11_DRIVER_IF_VERSION 0x33
+// Be aware of that the version should be updated in
+// smu_v11_0.h, maybe rename is also needed.
+// #define SMU11_DRIVER_IF_VERSION 0x33
#define PPTABLE_NV10_SMU_VERSION 8
@@ -504,10 +506,11 @@ typedef struct {
uint32_t Status;
uint16_t DieTemperature;
- uint16_t MemoryTemperature;
+ uint16_t CurrentMemoryTemperature;
- uint16_t SelectedCardPower;
- uint16_t Reserved4;
+ uint16_t MemoryTemperature;
+ uint8_t MemoryHotspotPosition;
+ uint8_t Reserved4;
uint32_t BoardLevelEnergyAccumulator;
} OutOfBandMonitor_t;
@@ -799,7 +802,12 @@ typedef struct {
// Mvdd Svi2 Div Ratio Setting
uint32_t MvddRatio; // This is used for MVDD Vid workaround. It has 16 fractional bits (Q16.16)
- uint32_t BoardReserved[9];
+ uint8_t RenesesLoadLineEnabled;
+ uint8_t GfxLoadlineResistance;
+ uint8_t SocLoadlineResistance;
+ uint8_t Padding8_Loadline;
+
+ uint32_t BoardReserved[8];
// Padding for MMHUB - do not modify this
uint32_t MmHubPadding[8]; // SMU internal use
@@ -903,13 +911,22 @@ typedef struct {
} Watermarks_t;
typedef struct {
+ uint16_t avgPsmCount[28];
+ uint16_t minPsmCount[28];
+ float avgPsmVoltage[28];
+ float minPsmVoltage[28];
+
+ uint32_t MmHubPadding[32]; // SMU internal use
+} AvfsDebugTable_t_NV14;
+
+typedef struct {
uint16_t avgPsmCount[36];
uint16_t minPsmCount[36];
float avgPsmVoltage[36];
float minPsmVoltage[36];
uint32_t MmHubPadding[8]; // SMU internal use
-} AvfsDebugTable_t;
+} AvfsDebugTable_t_NV10;
typedef struct {
uint8_t AvfsVersion;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu12_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/smu12_driver_if.h
new file mode 100644
index 000000000000..2f85a34c0591
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu12_driver_if.h
@@ -0,0 +1,222 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef SMU12_DRIVER_IF_H
+#define SMU12_DRIVER_IF_H
+
+// *** IMPORTANT ***
+// SMU TEAM: Always increment the interface version if
+// any structure is changed in this file
+#define SMU12_DRIVER_IF_VERSION 11
+
+typedef struct {
+ int32_t value;
+ uint32_t numFractionalBits;
+} FloatInIntFormat_t;
+
+typedef enum {
+ DSPCLK_DCFCLK = 0,
+ DSPCLK_DISPCLK,
+ DSPCLK_PIXCLK,
+ DSPCLK_PHYCLK,
+ DSPCLK_COUNT,
+} DSPCLK_e;
+
+typedef struct {
+ uint16_t Freq; // in MHz
+ uint16_t Vid; // min voltage in SVI2 VID
+} DisplayClockTable_t;
+
+typedef struct {
+ uint16_t MinClock; // This is either DCFCLK or SOCCLK (in MHz)
+ uint16_t MaxClock; // This is either DCFCLK or SOCCLK (in MHz)
+ uint16_t MinMclk;
+ uint16_t MaxMclk;
+
+ uint8_t WmSetting;
+ uint8_t WmType; // Used for normal pstate change or memory retraining
+ uint8_t Padding[2];
+} WatermarkRowGeneric_t;
+
+#define NUM_WM_RANGES 4
+#define WM_PSTATE_CHG 0
+#define WM_RETRAINING 1
+
+typedef enum {
+ WM_SOCCLK = 0,
+ WM_DCFCLK,
+ WM_COUNT,
+} WM_CLOCK_e;
+
+typedef struct {
+ // Watermarks
+ WatermarkRowGeneric_t WatermarkRow[WM_COUNT][NUM_WM_RANGES];
+
+ uint32_t MmHubPadding[7]; // SMU internal use
+} Watermarks_t;
+
+typedef enum {
+ CUSTOM_DPM_SETTING_GFXCLK,
+ CUSTOM_DPM_SETTING_CCLK,
+ CUSTOM_DPM_SETTING_FCLK_CCX,
+ CUSTOM_DPM_SETTING_FCLK_GFX,
+ CUSTOM_DPM_SETTING_FCLK_STALLS,
+ CUSTOM_DPM_SETTING_LCLK,
+ CUSTOM_DPM_SETTING_COUNT,
+} CUSTOM_DPM_SETTING_e;
+
+typedef struct {
+ uint8_t ActiveHystLimit;
+ uint8_t IdleHystLimit;
+ uint8_t FPS;
+ uint8_t MinActiveFreqType;
+ FloatInIntFormat_t MinActiveFreq;
+ FloatInIntFormat_t PD_Data_limit;
+ FloatInIntFormat_t PD_Data_time_constant;
+ FloatInIntFormat_t PD_Data_error_coeff;
+ FloatInIntFormat_t PD_Data_error_rate_coeff;
+} DpmActivityMonitorCoeffExt_t;
+
+typedef struct {
+ DpmActivityMonitorCoeffExt_t DpmActivityMonitorCoeff[CUSTOM_DPM_SETTING_COUNT];
+} CustomDpmSettings_t;
+
+
+#define NUM_DCFCLK_DPM_LEVELS 8
+#define NUM_SOCCLK_DPM_LEVELS 8
+#define NUM_FCLK_DPM_LEVELS 4
+#define NUM_MEMCLK_DPM_LEVELS 4
+#define NUM_VCN_DPM_LEVELS 8
+
+typedef struct {
+ uint32_t Freq; // In MHz
+ uint32_t Vol; // Millivolts with 2 fractional bits
+} DpmClock_t;
+
+typedef struct {
+ DpmClock_t DcfClocks[NUM_DCFCLK_DPM_LEVELS];
+ DpmClock_t SocClocks[NUM_SOCCLK_DPM_LEVELS];
+ DpmClock_t FClocks[NUM_FCLK_DPM_LEVELS];
+ DpmClock_t MemClocks[NUM_MEMCLK_DPM_LEVELS];
+ DpmClock_t VClocks[NUM_VCN_DPM_LEVELS];
+ DpmClock_t DClocks[NUM_VCN_DPM_LEVELS];
+
+ uint8_t NumDcfClkDpmEnabled;
+ uint8_t NumSocClkDpmEnabled;
+ uint8_t NumFClkDpmEnabled;
+ uint8_t NumMemClkDpmEnabled;
+ uint8_t NumVClkDpmEnabled;
+ uint8_t NumDClkDpmEnabled;
+ uint8_t spare[2];
+} DpmClocks_t;
+
+
+typedef enum {
+ CLOCK_SMNCLK = 0,
+ CLOCK_SOCCLK,
+ CLOCK_MP0CLK,
+ CLOCK_MP1CLK,
+ CLOCK_MP2CLK,
+ CLOCK_VCLK,
+ CLOCK_LCLK,
+ CLOCK_DCLK,
+ CLOCK_ACLK,
+ CLOCK_ISPCLK,
+ CLOCK_SHUBCLK,
+ CLOCK_DISPCLK,
+ CLOCK_DPPCLK,
+ CLOCK_DPREFCLK,
+ CLOCK_DCFCLK,
+ CLOCK_FCLK,
+ CLOCK_UMCCLK,
+ CLOCK_GFXCLK,
+ CLOCK_COUNT,
+} CLOCK_IDs_e;
+
+// Throttler Status Bitmask
+#define THROTTLER_STATUS_BIT_SPL 0
+#define THROTTLER_STATUS_BIT_FPPT 1
+#define THROTTLER_STATUS_BIT_SPPT 2
+#define THROTTLER_STATUS_BIT_SPPT_APU 3
+#define THROTTLER_STATUS_BIT_THM_CORE 4
+#define THROTTLER_STATUS_BIT_THM_GFX 5
+#define THROTTLER_STATUS_BIT_THM_SOC 6
+#define THROTTLER_STATUS_BIT_TDC_VDD 7
+#define THROTTLER_STATUS_BIT_TDC_SOC 8
+
+typedef struct {
+ uint16_t ClockFrequency[CLOCK_COUNT]; //[MHz]
+
+ uint16_t AverageGfxclkFrequency; //[MHz]
+ uint16_t AverageSocclkFrequency; //[MHz]
+ uint16_t AverageVclkFrequency; //[MHz]
+ uint16_t AverageFclkFrequency; //[MHz]
+
+ uint16_t AverageGfxActivity; //[centi]
+ uint16_t AverageUvdActivity; //[centi]
+
+ uint16_t Voltage[2]; //[mV] indices: VDDCR_VDD, VDDCR_SOC
+ uint16_t Current[2]; //[mA] indices: VDDCR_VDD, VDDCR_SOC
+ uint16_t Power[2]; //[mW] indices: VDDCR_VDD, VDDCR_SOC
+
+ uint16_t FanPwm; //[milli]
+ uint16_t CurrentSocketPower; //[mW]
+
+ uint16_t CoreFrequency[8]; //[MHz]
+ uint16_t CorePower[8]; //[mW]
+ uint16_t CoreTemperature[8]; //[centi-Celsius]
+ uint16_t L3Frequency[2]; //[MHz]
+ uint16_t L3Temperature[2]; //[centi-Celsius]
+
+ uint16_t GfxTemperature; //[centi-Celsius]
+ uint16_t SocTemperature; //[centi-Celsius]
+ uint16_t ThrottlerStatus;
+ uint16_t spare;
+
+ uint16_t StapmOriginalLimit; //[mW]
+ uint16_t StapmCurrentLimit; //[mW]
+ uint16_t ApuPower; //[mW]
+ uint16_t dGpuPower; //[mW]
+} SmuMetrics_t;
+
+
+// Workload bits
+#define WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT 0
+#define WORKLOAD_PPLIB_VIDEO_BIT 2
+#define WORKLOAD_PPLIB_VR_BIT 3
+#define WORKLOAD_PPLIB_COMPUTE_BIT 4
+#define WORKLOAD_PPLIB_CUSTOM_BIT 5
+#define WORKLOAD_PPLIB_COUNT 6
+
+#define TABLE_BIOS_IF 0 // Called by BIOS
+#define TABLE_WATERMARKS 1 // Called by Driver
+#define TABLE_CUSTOM_DPM 2 // Called by Driver
+#define TABLE_SPARE1 3
+#define TABLE_DPMCLOCKS 4 // Called by Driver
+#define TABLE_MOMENTARY_PM 5 // Called by Tools
+#define TABLE_MODERN_STDBY 6 // Called by Tools for Modern Standby Log
+#define TABLE_SMU_METRICS 7 // Called by Driver
+#define TABLE_COUNT 8
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_types.h b/drivers/gpu/drm/amd/powerplay/inc/smu_types.h
new file mode 100644
index 000000000000..a5b4df146713
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_types.h
@@ -0,0 +1,268 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __SMU_TYPES_H__
+#define __SMU_TYPES_H__
+
+#define SMU_MESSAGE_TYPES \
+ __SMU_DUMMY_MAP(TestMessage), \
+ __SMU_DUMMY_MAP(GetSmuVersion), \
+ __SMU_DUMMY_MAP(GetDriverIfVersion), \
+ __SMU_DUMMY_MAP(SetAllowedFeaturesMaskLow), \
+ __SMU_DUMMY_MAP(SetAllowedFeaturesMaskHigh), \
+ __SMU_DUMMY_MAP(EnableAllSmuFeatures), \
+ __SMU_DUMMY_MAP(DisableAllSmuFeatures), \
+ __SMU_DUMMY_MAP(EnableSmuFeaturesLow), \
+ __SMU_DUMMY_MAP(EnableSmuFeaturesHigh), \
+ __SMU_DUMMY_MAP(DisableSmuFeaturesLow), \
+ __SMU_DUMMY_MAP(DisableSmuFeaturesHigh), \
+ __SMU_DUMMY_MAP(GetEnabledSmuFeaturesLow), \
+ __SMU_DUMMY_MAP(GetEnabledSmuFeaturesHigh), \
+ __SMU_DUMMY_MAP(SetWorkloadMask), \
+ __SMU_DUMMY_MAP(SetPptLimit), \
+ __SMU_DUMMY_MAP(SetDriverDramAddrHigh), \
+ __SMU_DUMMY_MAP(SetDriverDramAddrLow), \
+ __SMU_DUMMY_MAP(SetToolsDramAddrHigh), \
+ __SMU_DUMMY_MAP(SetToolsDramAddrLow), \
+ __SMU_DUMMY_MAP(TransferTableSmu2Dram), \
+ __SMU_DUMMY_MAP(TransferTableDram2Smu), \
+ __SMU_DUMMY_MAP(UseDefaultPPTable), \
+ __SMU_DUMMY_MAP(UseBackupPPTable), \
+ __SMU_DUMMY_MAP(RunBtc), \
+ __SMU_DUMMY_MAP(RequestI2CBus), \
+ __SMU_DUMMY_MAP(ReleaseI2CBus), \
+ __SMU_DUMMY_MAP(SetFloorSocVoltage), \
+ __SMU_DUMMY_MAP(SoftReset), \
+ __SMU_DUMMY_MAP(StartBacoMonitor), \
+ __SMU_DUMMY_MAP(CancelBacoMonitor), \
+ __SMU_DUMMY_MAP(EnterBaco), \
+ __SMU_DUMMY_MAP(SetSoftMinByFreq), \
+ __SMU_DUMMY_MAP(SetSoftMaxByFreq), \
+ __SMU_DUMMY_MAP(SetHardMinByFreq), \
+ __SMU_DUMMY_MAP(SetHardMaxByFreq), \
+ __SMU_DUMMY_MAP(GetMinDpmFreq), \
+ __SMU_DUMMY_MAP(GetMaxDpmFreq), \
+ __SMU_DUMMY_MAP(GetDpmFreqByIndex), \
+ __SMU_DUMMY_MAP(GetDpmClockFreq), \
+ __SMU_DUMMY_MAP(GetSsVoltageByDpm), \
+ __SMU_DUMMY_MAP(SetMemoryChannelConfig), \
+ __SMU_DUMMY_MAP(SetGeminiMode), \
+ __SMU_DUMMY_MAP(SetGeminiApertureHigh), \
+ __SMU_DUMMY_MAP(SetGeminiApertureLow), \
+ __SMU_DUMMY_MAP(SetMinLinkDpmByIndex), \
+ __SMU_DUMMY_MAP(OverridePcieParameters), \
+ __SMU_DUMMY_MAP(OverDriveSetPercentage), \
+ __SMU_DUMMY_MAP(SetMinDeepSleepDcefclk), \
+ __SMU_DUMMY_MAP(ReenableAcDcInterrupt), \
+ __SMU_DUMMY_MAP(NotifyPowerSource), \
+ __SMU_DUMMY_MAP(SetUclkFastSwitch), \
+ __SMU_DUMMY_MAP(SetUclkDownHyst), \
+ __SMU_DUMMY_MAP(GfxDeviceDriverReset), \
+ __SMU_DUMMY_MAP(GetCurrentRpm), \
+ __SMU_DUMMY_MAP(SetVideoFps), \
+ __SMU_DUMMY_MAP(SetTjMax), \
+ __SMU_DUMMY_MAP(SetFanTemperatureTarget), \
+ __SMU_DUMMY_MAP(PrepareMp1ForUnload), \
+ __SMU_DUMMY_MAP(DramLogSetDramAddrHigh), \
+ __SMU_DUMMY_MAP(DramLogSetDramAddrLow), \
+ __SMU_DUMMY_MAP(DramLogSetDramSize), \
+ __SMU_DUMMY_MAP(SetFanMaxRpm), \
+ __SMU_DUMMY_MAP(SetFanMinPwm), \
+ __SMU_DUMMY_MAP(ConfigureGfxDidt), \
+ __SMU_DUMMY_MAP(NumOfDisplays), \
+ __SMU_DUMMY_MAP(RemoveMargins), \
+ __SMU_DUMMY_MAP(ReadSerialNumTop32), \
+ __SMU_DUMMY_MAP(ReadSerialNumBottom32), \
+ __SMU_DUMMY_MAP(SetSystemVirtualDramAddrHigh), \
+ __SMU_DUMMY_MAP(SetSystemVirtualDramAddrLow), \
+ __SMU_DUMMY_MAP(WaflTest), \
+ __SMU_DUMMY_MAP(SetFclkGfxClkRatio), \
+ __SMU_DUMMY_MAP(AllowGfxOff), \
+ __SMU_DUMMY_MAP(DisallowGfxOff), \
+ __SMU_DUMMY_MAP(GetPptLimit), \
+ __SMU_DUMMY_MAP(GetDcModeMaxDpmFreq), \
+ __SMU_DUMMY_MAP(GetDebugData), \
+ __SMU_DUMMY_MAP(SetXgmiMode), \
+ __SMU_DUMMY_MAP(RunAfllBtc), \
+ __SMU_DUMMY_MAP(ExitBaco), \
+ __SMU_DUMMY_MAP(PrepareMp1ForReset), \
+ __SMU_DUMMY_MAP(PrepareMp1ForShutdown), \
+ __SMU_DUMMY_MAP(SetMGpuFanBoostLimitRpm), \
+ __SMU_DUMMY_MAP(GetAVFSVoltageByDpm), \
+ __SMU_DUMMY_MAP(PowerUpVcn), \
+ __SMU_DUMMY_MAP(PowerDownVcn), \
+ __SMU_DUMMY_MAP(PowerUpJpeg), \
+ __SMU_DUMMY_MAP(PowerDownJpeg), \
+ __SMU_DUMMY_MAP(BacoAudioD3PME), \
+ __SMU_DUMMY_MAP(ArmD3), \
+ __SMU_DUMMY_MAP(RunDcBtc), \
+ __SMU_DUMMY_MAP(RunGfxDcBtc), \
+ __SMU_DUMMY_MAP(RunSocDcBtc), \
+ __SMU_DUMMY_MAP(SetMemoryChannelEnable), \
+ __SMU_DUMMY_MAP(SetDfSwitchType), \
+ __SMU_DUMMY_MAP(GetVoltageByDpm), \
+ __SMU_DUMMY_MAP(GetVoltageByDpmOverdrive), \
+ __SMU_DUMMY_MAP(PowerUpVcn0), \
+ __SMU_DUMMY_MAP(PowerDownVcn0), \
+ __SMU_DUMMY_MAP(PowerUpVcn1), \
+ __SMU_DUMMY_MAP(PowerDownVcn1), \
+ __SMU_DUMMY_MAP(PowerUpGfx), \
+ __SMU_DUMMY_MAP(PowerDownIspByTile), \
+ __SMU_DUMMY_MAP(PowerUpIspByTile), \
+ __SMU_DUMMY_MAP(PowerDownSdma), \
+ __SMU_DUMMY_MAP(PowerUpSdma), \
+ __SMU_DUMMY_MAP(SetHardMinIspclkByFreq), \
+ __SMU_DUMMY_MAP(SetHardMinVcn), \
+ __SMU_DUMMY_MAP(Spare1), \
+ __SMU_DUMMY_MAP(Spare2), \
+ __SMU_DUMMY_MAP(SetAllowFclkSwitch), \
+ __SMU_DUMMY_MAP(SetMinVideoGfxclkFreq), \
+ __SMU_DUMMY_MAP(ActiveProcessNotify), \
+ __SMU_DUMMY_MAP(SetCustomPolicy), \
+ __SMU_DUMMY_MAP(QueryPowerLimit), \
+ __SMU_DUMMY_MAP(SetGfxclkOverdriveByFreqVid), \
+ __SMU_DUMMY_MAP(SetHardMinDcfclkByFreq), \
+ __SMU_DUMMY_MAP(SetHardMinSocclkByFreq), \
+ __SMU_DUMMY_MAP(ControlIgpuATS), \
+ __SMU_DUMMY_MAP(SetMinVideoFclkFreq), \
+ __SMU_DUMMY_MAP(SetMinDeepSleepDcfclk), \
+ __SMU_DUMMY_MAP(ForcePowerDownGfx), \
+ __SMU_DUMMY_MAP(SetPhyclkVoltageByFreq), \
+ __SMU_DUMMY_MAP(SetDppclkVoltageByFreq), \
+ __SMU_DUMMY_MAP(SetSoftMinVcn), \
+ __SMU_DUMMY_MAP(EnablePostCode), \
+ __SMU_DUMMY_MAP(GetGfxclkFrequency), \
+ __SMU_DUMMY_MAP(GetFclkFrequency), \
+ __SMU_DUMMY_MAP(GetMinGfxclkFrequency), \
+ __SMU_DUMMY_MAP(GetMaxGfxclkFrequency), \
+ __SMU_DUMMY_MAP(SetGfxCGPG), \
+ __SMU_DUMMY_MAP(SetSoftMaxGfxClk), \
+ __SMU_DUMMY_MAP(SetHardMinGfxClk), \
+ __SMU_DUMMY_MAP(SetSoftMaxSocclkByFreq), \
+ __SMU_DUMMY_MAP(SetSoftMaxFclkByFreq), \
+ __SMU_DUMMY_MAP(SetSoftMaxVcn), \
+ __SMU_DUMMY_MAP(PowerGateMmHub), \
+ __SMU_DUMMY_MAP(UpdatePmeRestore), \
+ __SMU_DUMMY_MAP(GpuChangeState), \
+ __SMU_DUMMY_MAP(SetPowerLimitPercentage), \
+ __SMU_DUMMY_MAP(ForceGfxContentSave), \
+ __SMU_DUMMY_MAP(EnableTmdp48MHzRefclkPwrDown), \
+ __SMU_DUMMY_MAP(PowerGateAtHub), \
+ __SMU_DUMMY_MAP(SetSoftMinJpeg), \
+ __SMU_DUMMY_MAP(SetHardMinFclkByFreq), \
+ __SMU_DUMMY_MAP(DFCstateControl), \
+ __SMU_DUMMY_MAP(DAL_DISABLE_DUMMY_PSTATE_CHANGE), \
+ __SMU_DUMMY_MAP(DAL_ENABLE_DUMMY_PSTATE_CHANGE), \
+
+#undef __SMU_DUMMY_MAP
+#define __SMU_DUMMY_MAP(type) SMU_MSG_##type
+enum smu_message_type {
+ SMU_MESSAGE_TYPES
+ SMU_MSG_MAX_COUNT,
+};
+
+enum smu_clk_type {
+ SMU_GFXCLK,
+ SMU_VCLK,
+ SMU_DCLK,
+ SMU_ECLK,
+ SMU_SOCCLK,
+ SMU_UCLK,
+ SMU_DCEFCLK,
+ SMU_DISPCLK,
+ SMU_PIXCLK,
+ SMU_PHYCLK,
+ SMU_FCLK,
+ SMU_SCLK,
+ SMU_MCLK,
+ SMU_PCIE,
+ SMU_OD_SCLK,
+ SMU_OD_MCLK,
+ SMU_OD_VDDC_CURVE,
+ SMU_OD_RANGE,
+ SMU_CLK_COUNT,
+};
+
+#define SMU_FEATURE_MASKS \
+ __SMU_DUMMY_MAP(DPM_PREFETCHER), \
+ __SMU_DUMMY_MAP(DPM_GFXCLK), \
+ __SMU_DUMMY_MAP(DPM_UCLK), \
+ __SMU_DUMMY_MAP(DPM_SOCCLK), \
+ __SMU_DUMMY_MAP(DPM_UVD), \
+ __SMU_DUMMY_MAP(DPM_VCE), \
+ __SMU_DUMMY_MAP(ULV), \
+ __SMU_DUMMY_MAP(DPM_MP0CLK), \
+ __SMU_DUMMY_MAP(DPM_LINK), \
+ __SMU_DUMMY_MAP(DPM_DCEFCLK), \
+ __SMU_DUMMY_MAP(DS_GFXCLK), \
+ __SMU_DUMMY_MAP(DS_SOCCLK), \
+ __SMU_DUMMY_MAP(DS_LCLK), \
+ __SMU_DUMMY_MAP(PPT), \
+ __SMU_DUMMY_MAP(TDC), \
+ __SMU_DUMMY_MAP(THERMAL), \
+ __SMU_DUMMY_MAP(GFX_PER_CU_CG), \
+ __SMU_DUMMY_MAP(RM), \
+ __SMU_DUMMY_MAP(DS_DCEFCLK), \
+ __SMU_DUMMY_MAP(ACDC), \
+ __SMU_DUMMY_MAP(VR0HOT), \
+ __SMU_DUMMY_MAP(VR1HOT), \
+ __SMU_DUMMY_MAP(FW_CTF), \
+ __SMU_DUMMY_MAP(LED_DISPLAY), \
+ __SMU_DUMMY_MAP(FAN_CONTROL), \
+ __SMU_DUMMY_MAP(GFX_EDC), \
+ __SMU_DUMMY_MAP(GFXOFF), \
+ __SMU_DUMMY_MAP(CG), \
+ __SMU_DUMMY_MAP(DPM_FCLK), \
+ __SMU_DUMMY_MAP(DS_FCLK), \
+ __SMU_DUMMY_MAP(DS_MP1CLK), \
+ __SMU_DUMMY_MAP(DS_MP0CLK), \
+ __SMU_DUMMY_MAP(XGMI), \
+ __SMU_DUMMY_MAP(DPM_GFX_PACE), \
+ __SMU_DUMMY_MAP(MEM_VDDCI_SCALING), \
+ __SMU_DUMMY_MAP(MEM_MVDD_SCALING), \
+ __SMU_DUMMY_MAP(DS_UCLK), \
+ __SMU_DUMMY_MAP(GFX_ULV), \
+ __SMU_DUMMY_MAP(FW_DSTATE), \
+ __SMU_DUMMY_MAP(BACO), \
+ __SMU_DUMMY_MAP(VCN_PG), \
+ __SMU_DUMMY_MAP(JPEG_PG), \
+ __SMU_DUMMY_MAP(USB_PG), \
+ __SMU_DUMMY_MAP(RSMU_SMN_CG), \
+ __SMU_DUMMY_MAP(APCC_PLUS), \
+ __SMU_DUMMY_MAP(GTHR), \
+ __SMU_DUMMY_MAP(GFX_DCS), \
+ __SMU_DUMMY_MAP(GFX_SS), \
+ __SMU_DUMMY_MAP(OUT_OF_BAND_MONITOR), \
+ __SMU_DUMMY_MAP(TEMP_DEPENDENT_VMIN), \
+ __SMU_DUMMY_MAP(MMHUB_PG), \
+ __SMU_DUMMY_MAP(ATHUB_PG), \
+ __SMU_DUMMY_MAP(APCC_DFLL), \
+ __SMU_DUMMY_MAP(WAFL_CG),
+
+#undef __SMU_DUMMY_MAP
+#define __SMU_DUMMY_MAP(feature) SMU_FEATURE_##feature##_BIT
+enum smu_feature_mask {
+ SMU_FEATURE_MASKS
+ SMU_FEATURE_COUNT,
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
index 2fff4b16cb4e..d5314d12628a 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
@@ -25,6 +25,12 @@
#include "amdgpu_smu.h"
+#define SMU11_DRIVER_IF_VERSION_INV 0xFFFFFFFF
+#define SMU11_DRIVER_IF_VERSION_VG20 0x13
+#define SMU11_DRIVER_IF_VERSION_ARCT 0x12
+#define SMU11_DRIVER_IF_VERSION_NV10 0x33
+#define SMU11_DRIVER_IF_VERSION_NV14 0x34
+
/* MP Apertures */
#define MP0_Public 0x03800000
#define MP0_SRAM 0x03900000
@@ -42,20 +48,33 @@
#define SMU11_TOOL_SIZE 0x19000
+#define MAX_PCIE_CONF 2
+
#define CLK_MAP(clk, index) \
- [SMU_##clk] = index
+ [SMU_##clk] = {1, (index)}
#define FEA_MAP(fea) \
- [SMU_FEATURE_##fea##_BIT] = FEATURE_##fea##_BIT
+ [SMU_FEATURE_##fea##_BIT] = {1, FEATURE_##fea##_BIT}
#define TAB_MAP(tab) \
- [SMU_TABLE_##tab] = TABLE_##tab
+ [SMU_TABLE_##tab] = {1, TABLE_##tab}
#define PWR_MAP(tab) \
- [SMU_POWER_SOURCE_##tab] = POWER_SOURCE_##tab
+ [SMU_POWER_SOURCE_##tab] = {1, POWER_SOURCE_##tab}
#define WORKLOAD_MAP(profile, workload) \
- [profile] = workload
+ [profile] = {1, (workload)}
+
+static const struct smu_temperature_range smu11_thermal_policy[] =
+{
+ {-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000},
+ { 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000},
+};
+
+struct smu_11_0_cmn2aisc_mapping {
+ int valid_mapping;
+ int map_to;
+};
struct smu_11_0_max_sustainable_clocks {
uint32_t display_clock;
@@ -71,6 +90,11 @@ struct smu_11_0_dpm_table {
uint32_t max; /* MHz */
};
+struct smu_11_0_pcie_table {
+ uint8_t pcie_gen[MAX_PCIE_CONF];
+ uint8_t pcie_lane[MAX_PCIE_CONF];
+};
+
struct smu_11_0_dpm_tables {
struct smu_11_0_dpm_table soc_table;
struct smu_11_0_dpm_table gfx_table;
@@ -83,6 +107,7 @@ struct smu_11_0_dpm_tables {
struct smu_11_0_dpm_table display_table;
struct smu_11_0_dpm_table phy_table;
struct smu_11_0_dpm_table fclk_table;
+ struct smu_11_0_pcie_table pcie_table;
};
struct smu_11_0_dpm_context {
@@ -113,6 +138,133 @@ enum smu_v11_0_baco_seq {
BACO_SEQ_COUNT,
};
-void smu_v11_0_set_smu_funcs(struct smu_context *smu);
+int smu_v11_0_init_microcode(struct smu_context *smu);
+
+int smu_v11_0_load_microcode(struct smu_context *smu);
+
+int smu_v11_0_init_smc_tables(struct smu_context *smu);
+
+int smu_v11_0_fini_smc_tables(struct smu_context *smu);
+
+int smu_v11_0_init_power(struct smu_context *smu);
+
+int smu_v11_0_fini_power(struct smu_context *smu);
+
+int smu_v11_0_check_fw_status(struct smu_context *smu);
+
+int smu_v11_0_setup_pptable(struct smu_context *smu);
+
+int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu);
+
+int smu_v11_0_get_clk_info_from_vbios(struct smu_context *smu);
+
+int smu_v11_0_check_pptable(struct smu_context *smu);
+
+int smu_v11_0_parse_pptable(struct smu_context *smu);
+
+int smu_v11_0_populate_smc_pptable(struct smu_context *smu);
+
+int smu_v11_0_check_fw_version(struct smu_context *smu);
+
+int smu_v11_0_write_pptable(struct smu_context *smu);
+
+int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu);
+
+int smu_v11_0_set_driver_table_location(struct smu_context *smu);
+
+int smu_v11_0_set_tool_table_location(struct smu_context *smu);
+
+int smu_v11_0_notify_memory_pool_location(struct smu_context *smu);
+
+int smu_v11_0_system_features_control(struct smu_context *smu,
+ bool en);
+
+int
+smu_v11_0_send_msg_with_param(struct smu_context *smu,
+ enum smu_message_type msg,
+ uint32_t param);
+
+int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg);
+
+int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count);
+
+int smu_v11_0_set_allowed_mask(struct smu_context *smu);
+
+int smu_v11_0_get_enabled_mask(struct smu_context *smu,
+ uint32_t *feature_mask, uint32_t num);
+
+int smu_v11_0_notify_display_change(struct smu_context *smu);
+
+int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n);
+
+int smu_v11_0_get_current_clk_freq(struct smu_context *smu,
+ enum smu_clk_type clk_id,
+ uint32_t *value);
+
+int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu);
+
+int smu_v11_0_start_thermal_control(struct smu_context *smu);
+
+int smu_v11_0_stop_thermal_control(struct smu_context *smu);
+
+int smu_v11_0_read_sensor(struct smu_context *smu,
+ enum amd_pp_sensors sensor,
+ void *data, uint32_t *size);
+
+int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk);
+
+int
+smu_v11_0_display_clock_voltage_request(struct smu_context *smu,
+ struct pp_display_clock_request
+ *clock_req);
+
+uint32_t
+smu_v11_0_get_fan_control_mode(struct smu_context *smu);
+
+int
+smu_v11_0_set_fan_control_mode(struct smu_context *smu,
+ uint32_t mode);
+
+int
+smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed);
+
+int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
+ uint32_t speed);
+
+int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
+ uint32_t pstate);
+
+int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable);
+
+int smu_v11_0_register_irq_handler(struct smu_context *smu);
+
+int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu);
+
+int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
+ struct pp_smu_nv_clock_table *max_clocks);
+
+bool smu_v11_0_baco_is_support(struct smu_context *smu);
+
+enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu);
+
+int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state);
+
+int smu_v11_0_baco_enter(struct smu_context *smu);
+int smu_v11_0_baco_exit(struct smu_context *smu);
+
+int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t *min, uint32_t *max);
+
+int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t min, uint32_t max);
+
+int smu_v11_0_override_pcie_parameters(struct smu_context *smu);
+
+int smu_v11_0_set_default_od_settings(struct smu_context *smu, bool initialize, size_t overdrive_table_size);
+
+uint32_t smu_v11_0_get_max_power_limit(struct smu_context *smu);
+
+int smu_v11_0_set_performance_level(struct smu_context *smu,
+ enum amd_dpm_forced_level level);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_ppsmc.h
index 373861ddccd0..406bfd187ce8 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_ppsmc.h
@@ -120,7 +120,10 @@
#define PPSMC_MSG_GetVoltageByDpmOverdrive 0x45
#define PPSMC_MSG_BacoAudioD3PME 0x48
-#define PPSMC_Message_Count 0x49
+#define PPSMC_MSG_DALDisableDummyPstateChange 0x49
+#define PPSMC_MSG_DALEnableDummyPstateChange 0x4A
+
+#define PPSMC_Message_Count 0x4B
typedef uint32_t PPSMC_Result;
typedef uint32_t PPSMC_Msg;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h
index 86cdc3393eac..7a63cf8e85ed 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h
@@ -39,21 +39,39 @@
#define SMU_11_0_PP_OVERDRIVE_VERSION 0x0800
#define SMU_11_0_PP_POWERSAVINGCLOCK_VERSION 0x0100
+enum SMU_11_0_ODFEATURE_CAP {
+ SMU_11_0_ODCAP_GFXCLK_LIMITS = 0,
+ SMU_11_0_ODCAP_GFXCLK_CURVE,
+ SMU_11_0_ODCAP_UCLK_MAX,
+ SMU_11_0_ODCAP_POWER_LIMIT,
+ SMU_11_0_ODCAP_FAN_ACOUSTIC_LIMIT,
+ SMU_11_0_ODCAP_FAN_SPEED_MIN,
+ SMU_11_0_ODCAP_TEMPERATURE_FAN,
+ SMU_11_0_ODCAP_TEMPERATURE_SYSTEM,
+ SMU_11_0_ODCAP_MEMORY_TIMING_TUNE,
+ SMU_11_0_ODCAP_FAN_ZERO_RPM_CONTROL,
+ SMU_11_0_ODCAP_AUTO_UV_ENGINE,
+ SMU_11_0_ODCAP_AUTO_OC_ENGINE,
+ SMU_11_0_ODCAP_AUTO_OC_MEMORY,
+ SMU_11_0_ODCAP_FAN_CURVE,
+ SMU_11_0_ODCAP_COUNT,
+};
+
enum SMU_11_0_ODFEATURE_ID {
- SMU_11_0_ODFEATURE_GFXCLK_LIMITS = 1 << 0, //GFXCLK Limit feature
- SMU_11_0_ODFEATURE_GFXCLK_CURVE = 1 << 1, //GFXCLK Curve feature
- SMU_11_0_ODFEATURE_UCLK_MAX = 1 << 2, //UCLK Limit feature
- SMU_11_0_ODFEATURE_POWER_LIMIT = 1 << 3, //Power Limit feature
- SMU_11_0_ODFEATURE_FAN_ACOUSTIC_LIMIT = 1 << 4, //Fan Acoustic RPM feature
- SMU_11_0_ODFEATURE_FAN_SPEED_MIN = 1 << 5, //Minimum Fan Speed feature
- SMU_11_0_ODFEATURE_TEMPERATURE_FAN = 1 << 6, //Fan Target Temperature Limit feature
- SMU_11_0_ODFEATURE_TEMPERATURE_SYSTEM = 1 << 7, //Operating Temperature Limit feature
- SMU_11_0_ODFEATURE_MEMORY_TIMING_TUNE = 1 << 8, //AC Timing Tuning feature
- SMU_11_0_ODFEATURE_FAN_ZERO_RPM_CONTROL = 1 << 9, //Zero RPM feature
- SMU_11_0_ODFEATURE_AUTO_UV_ENGINE = 1 << 10, //Auto Under Volt GFXCLK feature
- SMU_11_0_ODFEATURE_AUTO_OC_ENGINE = 1 << 11, //Auto Over Clock GFXCLK feature
- SMU_11_0_ODFEATURE_AUTO_OC_MEMORY = 1 << 12, //Auto Over Clock MCLK feature
- SMU_11_0_ODFEATURE_FAN_CURVE = 1 << 13, //VICTOR TODO
+ SMU_11_0_ODFEATURE_GFXCLK_LIMITS = 1 << SMU_11_0_ODCAP_GFXCLK_LIMITS, //GFXCLK Limit feature
+ SMU_11_0_ODFEATURE_GFXCLK_CURVE = 1 << SMU_11_0_ODCAP_GFXCLK_CURVE, //GFXCLK Curve feature
+ SMU_11_0_ODFEATURE_UCLK_MAX = 1 << SMU_11_0_ODCAP_UCLK_MAX, //UCLK Limit feature
+ SMU_11_0_ODFEATURE_POWER_LIMIT = 1 << SMU_11_0_ODCAP_POWER_LIMIT, //Power Limit feature
+ SMU_11_0_ODFEATURE_FAN_ACOUSTIC_LIMIT = 1 << SMU_11_0_ODCAP_FAN_ACOUSTIC_LIMIT, //Fan Acoustic RPM feature
+ SMU_11_0_ODFEATURE_FAN_SPEED_MIN = 1 << SMU_11_0_ODCAP_FAN_SPEED_MIN, //Minimum Fan Speed feature
+ SMU_11_0_ODFEATURE_TEMPERATURE_FAN = 1 << SMU_11_0_ODCAP_TEMPERATURE_FAN, //Fan Target Temperature Limit feature
+ SMU_11_0_ODFEATURE_TEMPERATURE_SYSTEM = 1 << SMU_11_0_ODCAP_TEMPERATURE_SYSTEM, //Operating Temperature Limit feature
+ SMU_11_0_ODFEATURE_MEMORY_TIMING_TUNE = 1 << SMU_11_0_ODCAP_MEMORY_TIMING_TUNE, //AC Timing Tuning feature
+ SMU_11_0_ODFEATURE_FAN_ZERO_RPM_CONTROL = 1 << SMU_11_0_ODCAP_FAN_ZERO_RPM_CONTROL, //Zero RPM feature
+ SMU_11_0_ODFEATURE_AUTO_UV_ENGINE = 1 << SMU_11_0_ODCAP_AUTO_UV_ENGINE, //Auto Under Volt GFXCLK feature
+ SMU_11_0_ODFEATURE_AUTO_OC_ENGINE = 1 << SMU_11_0_ODCAP_AUTO_OC_ENGINE, //Auto Over Clock GFXCLK feature
+ SMU_11_0_ODFEATURE_AUTO_OC_MEMORY = 1 << SMU_11_0_ODCAP_AUTO_OC_MEMORY, //Auto Over Clock MCLK feature
+ SMU_11_0_ODFEATURE_FAN_CURVE = 1 << SMU_11_0_ODCAP_FAN_CURVE, //Fan Curve feature
SMU_11_0_ODFEATURE_COUNT = 14,
};
#define SMU_11_0_MAX_ODFEATURE 32 //Maximum Number of OD Features
@@ -141,7 +159,9 @@ struct smu_11_0_powerplay_table
struct smu_11_0_power_saving_clock_table power_saving_clock;
struct smu_11_0_overdrive_table overdrive_table;
+#ifndef SMU_11_0_PARTIAL_PPTABLE
PPTable_t smc_pptable; //PPTable_t in smu11_driver_if.h
+#endif
} __attribute__((packed));
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
new file mode 100644
index 000000000000..d79e54b5ebf6
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __SMU_V12_0_H__
+#define __SMU_V12_0_H__
+
+#include "amdgpu_smu.h"
+
+/* MP Apertures */
+#define MP0_Public 0x03800000
+#define MP0_SRAM 0x03900000
+#define MP1_Public 0x03b00000
+#define MP1_SRAM 0x03c00004
+
+
+struct smu_12_0_cmn2aisc_mapping {
+ int valid_mapping;
+ int map_to;
+};
+
+int smu_v12_0_send_msg_without_waiting(struct smu_context *smu,
+ uint16_t msg);
+
+int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg);
+
+int smu_v12_0_wait_for_response(struct smu_context *smu);
+
+int
+smu_v12_0_send_msg_with_param(struct smu_context *smu,
+ enum smu_message_type msg,
+ uint32_t param);
+
+int smu_v12_0_check_fw_status(struct smu_context *smu);
+
+int smu_v12_0_check_fw_version(struct smu_context *smu);
+
+int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate);
+
+int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate);
+
+int smu_v12_0_powergate_jpeg(struct smu_context *smu, bool gate);
+
+int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable);
+
+int smu_v12_0_read_sensor(struct smu_context *smu,
+ enum amd_pp_sensors sensor,
+ void *data, uint32_t *size);
+
+uint32_t smu_v12_0_get_gfxoff_status(struct smu_context *smu);
+
+int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable);
+
+int smu_v12_0_init_smc_tables(struct smu_context *smu);
+
+int smu_v12_0_fini_smc_tables(struct smu_context *smu);
+
+int smu_v12_0_populate_smc_tables(struct smu_context *smu);
+
+int smu_v12_0_get_enabled_mask(struct smu_context *smu,
+ uint32_t *feature_mask, uint32_t num);
+
+int smu_v12_0_get_current_clk_freq(struct smu_context *smu,
+ enum smu_clk_type clk_id,
+ uint32_t *value);
+
+int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t *min, uint32_t *max);
+
+int smu_v12_0_mode2_reset(struct smu_context *smu);
+
+int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t min, uint32_t max);
+
+int smu_v12_0_set_driver_table_location(struct smu_context *smu);
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0_ppsmc.h
new file mode 100644
index 000000000000..9ac9f3bd3664
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0_ppsmc.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef SMU_12_0_PPSMC_H
+#define SMU_12_0_PPSMC_H
+
+// SMU Response Codes:
+#define PPSMC_Result_OK 0x1
+#define PPSMC_Result_Failed 0xFF
+#define PPSMC_Result_UnknownCmd 0xFE
+#define PPSMC_Result_CmdRejectedPrereq 0xFD
+#define PPSMC_Result_CmdRejectedBusy 0xFC
+
+
+// Message Definitions:
+#define PPSMC_MSG_TestMessage 0x1
+#define PPSMC_MSG_GetSmuVersion 0x2
+#define PPSMC_MSG_GetDriverIfVersion 0x3
+#define PPSMC_MSG_PowerUpGfx 0x6
+#define PPSMC_MSG_EnableGfxOff 0x7
+#define PPSMC_MSG_DisableGfxOff 0x8
+#define PPSMC_MSG_PowerDownIspByTile 0x9 // ISP is power gated by default
+#define PPSMC_MSG_PowerUpIspByTile 0xA
+#define PPSMC_MSG_PowerDownVcn 0xB // VCN is power gated by default
+#define PPSMC_MSG_PowerUpVcn 0xC
+#define PPSMC_MSG_PowerDownSdma 0xD // SDMA is power gated by default
+#define PPSMC_MSG_PowerUpSdma 0xE
+#define PPSMC_MSG_SetHardMinIspclkByFreq 0xF
+#define PPSMC_MSG_SetHardMinVcn 0x10 // For wireless display
+#define PPSMC_MSG_spare1 0x11
+#define PPSMC_MSG_spare2 0x12
+#define PPSMC_MSG_SetAllowFclkSwitch 0x13
+#define PPSMC_MSG_SetMinVideoGfxclkFreq 0x14
+#define PPSMC_MSG_ActiveProcessNotify 0x15
+#define PPSMC_MSG_SetCustomPolicy 0x16
+#define PPSMC_MSG_SetVideoFps 0x17
+#define PPSMC_MSG_SetDisplayCount 0x18 // Moved to VBIOS
+#define PPSMC_MSG_QueryPowerLimit 0x19 //Driver to look up sustainable clocks for VQ
+#define PPSMC_MSG_SetDriverDramAddrHigh 0x1A
+#define PPSMC_MSG_SetDriverDramAddrLow 0x1B
+#define PPSMC_MSG_TransferTableSmu2Dram 0x1C
+#define PPSMC_MSG_TransferTableDram2Smu 0x1D
+#define PPSMC_MSG_GfxDeviceDriverReset 0x1E
+#define PPSMC_MSG_SetGfxclkOverdriveByFreqVid 0x1F
+#define PPSMC_MSG_SetHardMinDcfclkByFreq 0x20 // Moved to VBIOS
+#define PPSMC_MSG_SetHardMinSocclkByFreq 0x21
+#define PPSMC_MSG_ControlIgpuATS 0x22
+#define PPSMC_MSG_SetMinVideoFclkFreq 0x23
+#define PPSMC_MSG_SetMinDeepSleepDcfclk 0x24 // Moved to VBIOS
+#define PPSMC_MSG_ForcePowerDownGfx 0x25
+#define PPSMC_MSG_SetPhyclkVoltageByFreq 0x26 // Moved to VBIOS
+#define PPSMC_MSG_SetDppclkVoltageByFreq 0x27 // Moved to VBIOS and is SetDppclkFreq
+#define PPSMC_MSG_SetSoftMinVcn 0x28
+#define PPSMC_MSG_EnablePostCode 0x29
+#define PPSMC_MSG_GetGfxclkFrequency 0x2A
+#define PPSMC_MSG_GetFclkFrequency 0x2B
+#define PPSMC_MSG_GetMinGfxclkFrequency 0x2C
+#define PPSMC_MSG_GetMaxGfxclkFrequency 0x2D
+#define PPSMC_MSG_SoftReset 0x2E // Not supported
+#define PPSMC_MSG_SetGfxCGPG 0x2F
+#define PPSMC_MSG_SetSoftMaxGfxClk 0x30
+#define PPSMC_MSG_SetHardMinGfxClk 0x31
+#define PPSMC_MSG_SetSoftMaxSocclkByFreq 0x32
+#define PPSMC_MSG_SetSoftMaxFclkByFreq 0x33
+#define PPSMC_MSG_SetSoftMaxVcn 0x34
+#define PPSMC_MSG_PowerGateMmHub 0x35
+#define PPSMC_MSG_UpdatePmeRestore 0x36 // Moved to VBIOS
+#define PPSMC_MSG_GpuChangeState 0x37
+#define PPSMC_MSG_SetPowerLimitPercentage 0x38
+#define PPSMC_MSG_ForceGfxContentSave 0x39
+#define PPSMC_MSG_EnableTmdp48MHzRefclkPwrDown 0x3A // Moved to VBIOS
+#define PPSMC_MSG_PowerDownJpeg 0x3B
+#define PPSMC_MSG_PowerUpJpeg 0x3C
+#define PPSMC_MSG_PowerGateAtHub 0x3D
+#define PPSMC_MSG_SetSoftMinJpeg 0x3E
+#define PPSMC_MSG_SetHardMinFclkByFreq 0x3F
+#define PPSMC_Message_Count 0x40
+
+
+//Argument for PPSMC_MSG_GpuChangeState
+enum {
+ eGpuChangeState_D0Entry = 1,
+ eGpuChangeState_D3Entry,
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h
index a0883038f3c3..0c66f0fe1aaf 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h
@@ -120,7 +120,8 @@
#define PPSMC_MSG_SetMGpuFanBoostLimitRpm 0x5D
#define PPSMC_MSG_GetAVFSVoltageByDpm 0x5F
#define PPSMC_MSG_BacoWorkAroundFlushVDCI 0x60
-#define PPSMC_Message_Count 0x61
+#define PPSMC_MSG_DFCstateControl 0x63
+#define PPSMC_Message_Count 0x64
typedef uint32_t PPSMC_Result;
typedef uint32_t PPSMC_Msg;
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index b81c7e715dc9..0d73a49166af 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -26,6 +26,7 @@
#include <linux/pci.h>
#include "amdgpu.h"
#include "amdgpu_smu.h"
+#include "smu_internal.h"
#include "atomfirmware.h"
#include "amdgpu_atomfirmware.h"
#include "smu_v11_0.h"
@@ -35,6 +36,7 @@
#include "navi10_ppt.h"
#include "smu_v11_0_pptable.h"
#include "smu_v11_0_ppsmc.h"
+#include "nbio/nbio_7_4_sh_mask.h"
#include "asic_reg/mp/mp_11_0_sh_mask.h"
@@ -50,9 +52,9 @@
FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT))
#define MSG_MAP(msg, index) \
- [SMU_MSG_##msg] = index
+ [SMU_MSG_##msg] = {1, (index)}
-static int navi10_message_map[SMU_MSG_MAX_COUNT] = {
+static struct smu_11_0_cmn2aisc_mapping navi10_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage),
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion),
MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion),
@@ -117,9 +119,13 @@ static int navi10_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg),
MSG_MAP(BacoAudioD3PME, PPSMC_MSG_BacoAudioD3PME),
MSG_MAP(ArmD3, PPSMC_MSG_ArmD3),
+ MSG_MAP(DAL_DISABLE_DUMMY_PSTATE_CHANGE,PPSMC_MSG_DALDisableDummyPstateChange),
+ MSG_MAP(DAL_ENABLE_DUMMY_PSTATE_CHANGE, PPSMC_MSG_DALEnableDummyPstateChange),
+ MSG_MAP(GetVoltageByDpm, PPSMC_MSG_GetVoltageByDpm),
+ MSG_MAP(GetVoltageByDpmOverdrive, PPSMC_MSG_GetVoltageByDpmOverdrive),
};
-static int navi10_clk_map[SMU_CLK_COUNT] = {
+static struct smu_11_0_cmn2aisc_mapping navi10_clk_map[SMU_CLK_COUNT] = {
CLK_MAP(GFXCLK, PPCLK_GFXCLK),
CLK_MAP(SCLK, PPCLK_GFXCLK),
CLK_MAP(SOCCLK, PPCLK_SOCCLK),
@@ -134,7 +140,7 @@ static int navi10_clk_map[SMU_CLK_COUNT] = {
CLK_MAP(PHYCLK, PPCLK_PHYCLK),
};
-static int navi10_feature_mask_map[SMU_FEATURE_COUNT] = {
+static struct smu_11_0_cmn2aisc_mapping navi10_feature_mask_map[SMU_FEATURE_COUNT] = {
FEA_MAP(DPM_PREFETCHER),
FEA_MAP(DPM_GFXCLK),
FEA_MAP(DPM_GFX_PACE),
@@ -177,9 +183,10 @@ static int navi10_feature_mask_map[SMU_FEATURE_COUNT] = {
FEA_MAP(TEMP_DEPENDENT_VMIN),
FEA_MAP(MMHUB_PG),
FEA_MAP(ATHUB_PG),
+ FEA_MAP(APCC_DFLL),
};
-static int navi10_table_map[SMU_TABLE_COUNT] = {
+static struct smu_11_0_cmn2aisc_mapping navi10_table_map[SMU_TABLE_COUNT] = {
TAB_MAP(PPTABLE),
TAB_MAP(WATERMARKS),
TAB_MAP(AVFS),
@@ -194,96 +201,110 @@ static int navi10_table_map[SMU_TABLE_COUNT] = {
TAB_MAP(PACE),
};
-static int navi10_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
+static struct smu_11_0_cmn2aisc_mapping navi10_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
PWR_MAP(AC),
PWR_MAP(DC),
};
-static int navi10_workload_map[] = {
+static struct smu_11_0_cmn2aisc_mapping navi10_workload_map[PP_SMC_POWER_PROFILE_COUNT] = {
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT, WORKLOAD_PPLIB_DEFAULT_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT),
- WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_CUSTOM_BIT),
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
};
static int navi10_get_smu_msg_index(struct smu_context *smc, uint32_t index)
{
- int val;
- if (index > SMU_MSG_MAX_COUNT)
+ struct smu_11_0_cmn2aisc_mapping mapping;
+
+ if (index >= SMU_MSG_MAX_COUNT)
return -EINVAL;
- val = navi10_message_map[index];
- if (val > PPSMC_Message_Count)
+ mapping = navi10_message_map[index];
+ if (!(mapping.valid_mapping)) {
return -EINVAL;
+ }
- return val;
+ return mapping.map_to;
}
static int navi10_get_smu_clk_index(struct smu_context *smc, uint32_t index)
{
- int val;
+ struct smu_11_0_cmn2aisc_mapping mapping;
+
if (index >= SMU_CLK_COUNT)
return -EINVAL;
- val = navi10_clk_map[index];
- if (val >= PPCLK_COUNT)
+ mapping = navi10_clk_map[index];
+ if (!(mapping.valid_mapping)) {
return -EINVAL;
+ }
- return val;
+ return mapping.map_to;
}
static int navi10_get_smu_feature_index(struct smu_context *smc, uint32_t index)
{
- int val;
+ struct smu_11_0_cmn2aisc_mapping mapping;
+
if (index >= SMU_FEATURE_COUNT)
return -EINVAL;
- val = navi10_feature_mask_map[index];
- if (val > 64)
+ mapping = navi10_feature_mask_map[index];
+ if (!(mapping.valid_mapping)) {
return -EINVAL;
+ }
- return val;
+ return mapping.map_to;
}
static int navi10_get_smu_table_index(struct smu_context *smc, uint32_t index)
{
- int val;
+ struct smu_11_0_cmn2aisc_mapping mapping;
+
if (index >= SMU_TABLE_COUNT)
return -EINVAL;
- val = navi10_table_map[index];
- if (val >= TABLE_COUNT)
+ mapping = navi10_table_map[index];
+ if (!(mapping.valid_mapping)) {
return -EINVAL;
+ }
- return val;
+ return mapping.map_to;
}
static int navi10_get_pwr_src_index(struct smu_context *smc, uint32_t index)
{
- int val;
+ struct smu_11_0_cmn2aisc_mapping mapping;
+
if (index >= SMU_POWER_SOURCE_COUNT)
return -EINVAL;
- val = navi10_pwr_src_map[index];
- if (val >= POWER_SOURCE_COUNT)
+ mapping = navi10_pwr_src_map[index];
+ if (!(mapping.valid_mapping)) {
return -EINVAL;
+ }
- return val;
+ return mapping.map_to;
}
static int navi10_get_workload_type(struct smu_context *smu, enum PP_SMC_POWER_PROFILE profile)
{
- int val;
+ struct smu_11_0_cmn2aisc_mapping mapping;
+
if (profile > PP_SMC_POWER_PROFILE_CUSTOM)
return -EINVAL;
- val = navi10_workload_map[profile];
+ mapping = navi10_workload_map[profile];
+ if (!(mapping.valid_mapping)) {
+ return -EINVAL;
+ }
- return val;
+ return mapping.map_to;
}
static bool is_asic_secure(struct smu_context *smu)
@@ -313,40 +334,52 @@ navi10_get_allowed_feature_mask(struct smu_context *smu,
memset(feature_mask, 0, sizeof(uint32_t) * num);
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT)
- | FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT)
- | FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT)
| FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT)
- | FEATURE_MASK(FEATURE_DPM_LINK_BIT)
- | FEATURE_MASK(FEATURE_GFX_ULV_BIT)
| FEATURE_MASK(FEATURE_RSMU_SMN_CG_BIT)
| FEATURE_MASK(FEATURE_DS_SOCCLK_BIT)
| FEATURE_MASK(FEATURE_PPT_BIT)
| FEATURE_MASK(FEATURE_TDC_BIT)
| FEATURE_MASK(FEATURE_GFX_EDC_BIT)
+ | FEATURE_MASK(FEATURE_APCC_PLUS_BIT)
| FEATURE_MASK(FEATURE_VR0HOT_BIT)
| FEATURE_MASK(FEATURE_FAN_CONTROL_BIT)
| FEATURE_MASK(FEATURE_THERMAL_BIT)
| FEATURE_MASK(FEATURE_LED_DISPLAY_BIT)
- | FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT)
- | FEATURE_MASK(FEATURE_DS_GFXCLK_BIT)
+ | FEATURE_MASK(FEATURE_DS_LCLK_BIT)
| FEATURE_MASK(FEATURE_DS_DCEFCLK_BIT)
| FEATURE_MASK(FEATURE_FW_DSTATE_BIT)
| FEATURE_MASK(FEATURE_BACO_BIT)
| FEATURE_MASK(FEATURE_ACDC_BIT)
| FEATURE_MASK(FEATURE_GFX_SS_BIT)
| FEATURE_MASK(FEATURE_APCC_DFLL_BIT)
- | FEATURE_MASK(FEATURE_FW_CTF_BIT);
+ | FEATURE_MASK(FEATURE_FW_CTF_BIT)
+ | FEATURE_MASK(FEATURE_OUT_OF_BAND_MONITOR_BIT);
+
+ if (adev->pm.pp_feature & PP_SOCCLK_DPM_MASK)
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT);
+
+ if (adev->pm.pp_feature & PP_SCLK_DPM_MASK)
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT);
+
+ if (adev->pm.pp_feature & PP_PCIE_DPM_MASK)
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_LINK_BIT);
+
+ if (adev->pm.pp_feature & PP_DCEFCLK_DPM_MASK)
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT);
if (adev->pm.pp_feature & PP_MCLK_DPM_MASK)
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_UCLK_BIT)
| FEATURE_MASK(FEATURE_MEM_VDDCI_SCALING_BIT)
| FEATURE_MASK(FEATURE_MEM_MVDD_SCALING_BIT);
- if (adev->pm.pp_feature & PP_GFXOFF_MASK) {
+ if (adev->pm.pp_feature & PP_ULV_MASK)
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_ULV_BIT);
+
+ if (adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_GFXCLK_BIT);
+
+ if (adev->pm.pp_feature & PP_GFXOFF_MASK)
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFXOFF_BIT);
- /* TODO: remove it once fw fix the bug */
- *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_FW_DSTATE_BIT);
- }
if (smu->adev->pg_flags & AMD_PG_SUPPORT_MMHUB)
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_MMHUB_PG_BIT);
@@ -357,6 +390,9 @@ navi10_get_allowed_feature_mask(struct smu_context *smu,
if (smu->adev->pg_flags & AMD_PG_SUPPORT_VCN)
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VCN_PG_BIT);
+ if (smu->adev->pg_flags & AMD_PG_SUPPORT_JPEG)
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_JPEG_PG_BIT);
+
/* disable DPM UCLK and DS SOCCLK on navi10 A0 secure board */
if (is_asic_secure(smu)) {
/* only for navi10 A0 */
@@ -523,6 +559,10 @@ static int navi10_tables_init(struct smu_context *smu, struct smu_table *tables)
return -ENOMEM;
smu_table->metrics_time = 0;
+ smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL);
+ if (!smu_table->watermarks_table)
+ return -ENOMEM;
+
return 0;
}
@@ -532,17 +572,20 @@ static int navi10_get_metrics_table(struct smu_context *smu,
struct smu_table_context *smu_table= &smu->smu_table;
int ret = 0;
- if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + HZ / 1000)) {
+ mutex_lock(&smu->metrics_lock);
+ if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(100))) {
ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
(void *)smu_table->metrics_table, false);
if (ret) {
pr_info("Failed to export SMU metrics table!\n");
+ mutex_unlock(&smu->metrics_lock);
return ret;
}
smu_table->metrics_time = jiffies;
}
memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t));
+ mutex_unlock(&smu->metrics_lock);
return ret;
}
@@ -570,6 +613,7 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
struct smu_table_context *table_context = &smu->smu_table;
struct smu_11_0_dpm_context *dpm_context = smu_dpm->dpm_context;
PPTable_t *driver_ppt = NULL;
+ int i;
driver_ppt = table_context->driver_pptable;
@@ -600,6 +644,11 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
dpm_context->dpm_tables.phy_table.min = driver_ppt->FreqTablePhyclk[0];
dpm_context->dpm_tables.phy_table.max = driver_ppt->FreqTablePhyclk[NUM_PHYCLK_DPM_LEVELS - 1];
+ for (i = 0; i < MAX_PCIE_CONF; i++) {
+ dpm_context->dpm_tables.pcie_table.pcie_gen[i] = driver_ppt->PcieGenSpeed[i];
+ dpm_context->dpm_tables.pcie_table.pcie_lane[i] = driver_ppt->PcieLaneCount[i];
+ }
+
return 0;
}
@@ -629,6 +678,31 @@ static int navi10_dpm_set_uvd_enable(struct smu_context *smu, bool enable)
return ret;
}
+static int navi10_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
+{
+ struct smu_power_context *smu_power = &smu->smu_power;
+ struct smu_power_gate *power_gate = &smu_power->power_gate;
+ int ret = 0;
+
+ if (enable) {
+ if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
+ ret = smu_send_smc_msg(smu, SMU_MSG_PowerUpJpeg);
+ if (ret)
+ return ret;
+ }
+ power_gate->jpeg_gated = false;
+ } else {
+ if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
+ ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownJpeg);
+ if (ret)
+ return ret;
+ }
+ power_gate->jpeg_gated = true;
+ }
+
+ return ret;
+}
+
static int navi10_get_current_clk_freq_by_table(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t *value)
@@ -662,13 +736,39 @@ static bool navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu
return dpm_desc->SnapToDiscrete == 0 ? true : false;
}
+static inline bool navi10_od_feature_is_supported(struct smu_11_0_overdrive_table *od_table, enum SMU_11_0_ODFEATURE_CAP cap)
+{
+ return od_table->cap[cap];
+}
+
+static void navi10_od_setting_get_range(struct smu_11_0_overdrive_table *od_table,
+ enum SMU_11_0_ODSETTING_ID setting,
+ uint32_t *min, uint32_t *max)
+{
+ if (min)
+ *min = od_table->min[setting];
+ if (max)
+ *max = od_table->max[setting];
+}
+
static int navi10_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
+ uint16_t *curve_settings;
int i, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0;
uint32_t freq_values[3] = {0};
uint32_t mark_index = 0;
+ struct smu_table_context *table_context = &smu->smu_table;
+ uint32_t gen_speed, lane_width;
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+ struct smu_11_0_dpm_context *dpm_context = smu_dpm->dpm_context;
+ struct amdgpu_device *adev = smu->adev;
+ PPTable_t *pptable = (PPTable_t *)table_context->driver_pptable;
+ OverDriveTable_t *od_table =
+ (OverDriveTable_t *)table_context->overdrive_table;
+ struct smu_11_0_overdrive_table *od_settings = smu->od_settings;
+ uint32_t min_value, max_value;
switch (clk_type) {
case SMU_GFXCLK:
@@ -719,6 +819,118 @@ static int navi10_print_clk_levels(struct smu_context *smu,
}
break;
+ case SMU_PCIE:
+ gen_speed = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
+ PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
+ >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
+ lane_width = (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) &
+ PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
+ >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
+ for (i = 0; i < NUM_LINK_LEVELS; i++)
+ size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i,
+ (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 0) ? "2.5GT/s," :
+ (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 1) ? "5.0GT/s," :
+ (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 2) ? "8.0GT/s," :
+ (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 3) ? "16.0GT/s," : "",
+ (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 1) ? "x1" :
+ (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 2) ? "x2" :
+ (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 3) ? "x4" :
+ (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 4) ? "x8" :
+ (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 5) ? "x12" :
+ (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 6) ? "x16" : "",
+ pptable->LclkFreq[i],
+ (gen_speed == dpm_context->dpm_tables.pcie_table.pcie_gen[i]) &&
+ (lane_width == dpm_context->dpm_tables.pcie_table.pcie_lane[i]) ?
+ "*" : "");
+ break;
+ case SMU_OD_SCLK:
+ if (!smu->od_enabled || !od_table || !od_settings)
+ break;
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS))
+ break;
+ size += sprintf(buf + size, "OD_SCLK:\n");
+ size += sprintf(buf + size, "0: %uMhz\n1: %uMhz\n", od_table->GfxclkFmin, od_table->GfxclkFmax);
+ break;
+ case SMU_OD_MCLK:
+ if (!smu->od_enabled || !od_table || !od_settings)
+ break;
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_UCLK_MAX))
+ break;
+ size += sprintf(buf + size, "OD_MCLK:\n");
+ size += sprintf(buf + size, "1: %uMHz\n", od_table->UclkFmax);
+ break;
+ case SMU_OD_VDDC_CURVE:
+ if (!smu->od_enabled || !od_table || !od_settings)
+ break;
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_CURVE))
+ break;
+ size += sprintf(buf + size, "OD_VDDC_CURVE:\n");
+ for (i = 0; i < 3; i++) {
+ switch (i) {
+ case 0:
+ curve_settings = &od_table->GfxclkFreq1;
+ break;
+ case 1:
+ curve_settings = &od_table->GfxclkFreq2;
+ break;
+ case 2:
+ curve_settings = &od_table->GfxclkFreq3;
+ break;
+ default:
+ break;
+ }
+ size += sprintf(buf + size, "%d: %uMHz @ %umV\n", i, curve_settings[0], curve_settings[1] / NAVI10_VOLTAGE_SCALE);
+ }
+ break;
+ case SMU_OD_RANGE:
+ if (!smu->od_enabled || !od_table || !od_settings)
+ break;
+ size = sprintf(buf, "%s:\n", "OD_RANGE");
+
+ if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS)) {
+ navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMIN,
+ &min_value, NULL);
+ navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMAX,
+ NULL, &max_value);
+ size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n",
+ min_value, max_value);
+ }
+
+ if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_UCLK_MAX)) {
+ navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_UCLKFMAX,
+ &min_value, &max_value);
+ size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n",
+ min_value, max_value);
+ }
+
+ if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_CURVE)) {
+ navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P1,
+ &min_value, &max_value);
+ size += sprintf(buf + size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n",
+ min_value, max_value);
+ navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P1,
+ &min_value, &max_value);
+ size += sprintf(buf + size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n",
+ min_value, max_value);
+ navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P2,
+ &min_value, &max_value);
+ size += sprintf(buf + size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n",
+ min_value, max_value);
+ navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P2,
+ &min_value, &max_value);
+ size += sprintf(buf + size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n",
+ min_value, max_value);
+ navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P3,
+ &min_value, &max_value);
+ size += sprintf(buf + size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n",
+ min_value, max_value);
+ navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P3,
+ &min_value, &max_value);
+ size += sprintf(buf + size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n",
+ min_value, max_value);
+ }
+
+ break;
default:
break;
}
@@ -744,6 +956,12 @@ static int navi10_force_clk_levels(struct smu_context *smu,
case SMU_UCLK:
case SMU_DCEFCLK:
case SMU_FCLK:
+ /* There is only 2 levels for fine grained DPM */
+ if (navi10_is_support_fine_grained_dpm(smu, clk_type)) {
+ soft_max_level = (soft_max_level >= 1 ? 1 : 0);
+ soft_min_level = (soft_min_level >= 1 ? 1 : 0);
+ }
+
ret = smu_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq);
if (ret)
return size;
@@ -768,13 +986,13 @@ static int navi10_populate_umd_state_clk(struct smu_context *smu)
int ret = 0;
uint32_t min_sclk_freq = 0, min_mclk_freq = 0;
- ret = smu_get_dpm_freq_range(smu, SMU_SCLK, &min_sclk_freq, NULL);
+ ret = smu_get_dpm_freq_range(smu, SMU_SCLK, &min_sclk_freq, NULL, false);
if (ret)
return ret;
smu->pstate_sclk = min_sclk_freq * 100;
- ret = smu_get_dpm_freq_range(smu, SMU_MCLK, &min_mclk_freq, NULL);
+ ret = smu_get_dpm_freq_range(smu, SMU_MCLK, &min_mclk_freq, NULL, false);
if (ret)
return ret;
@@ -794,6 +1012,8 @@ static int navi10_get_clock_by_type_with_latency(struct smu_context *smu,
case SMU_GFXCLK:
case SMU_DCEFCLK:
case SMU_SOCCLK:
+ case SMU_MCLK:
+ case SMU_UCLK:
ret = smu_get_dpm_level_count(smu, clk_type, &level_count);
if (ret)
return ret;
@@ -827,7 +1047,7 @@ static int navi10_pre_display_config_changed(struct smu_context *smu)
return ret;
if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
- ret = smu_get_dpm_freq_range(smu, SMU_UCLK, NULL, &max_freq);
+ ret = smu_get_dpm_freq_range(smu, SMU_UCLK, NULL, &max_freq, false);
if (ret)
return ret;
ret = smu_set_hard_freq_range(smu, SMU_UCLK, 0, max_freq);
@@ -877,7 +1097,7 @@ static int navi10_force_dpm_limit_value(struct smu_context *smu, bool highest)
for (i = 0; i < ARRAY_SIZE(clks); i++) {
clk_type = clks[i];
- ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq);
+ ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq, false);
if (ret)
return ret;
@@ -904,7 +1124,7 @@ static int navi10_unforce_dpm_levels(struct smu_context *smu)
for (i = 0; i < ARRAY_SIZE(clks); i++) {
clk_type = clks[i];
- ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq);
+ ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq, false);
if (ret)
return ret;
@@ -927,8 +1147,6 @@ static int navi10_get_gpu_power(struct smu_context *smu, uint32_t *value)
ret = navi10_get_metrics_table(smu, &metrics);
if (ret)
return ret;
- if (ret)
- return ret;
*value = metrics.AverageSocketPower << 8;
@@ -987,8 +1205,6 @@ static int navi10_get_fan_speed_rpm(struct smu_context *smu,
ret = navi10_get_metrics_table(smu, &metrics);
if (ret)
return ret;
- if (ret)
- return ret;
*speed = metrics.CurrFanSpeed;
@@ -1017,7 +1233,7 @@ static int navi10_get_power_profile_mode(struct smu_context *smu, char *buf)
{
DpmActivityMonitorCoeffInt_t activity_monitor;
uint32_t i, size = 0;
- uint16_t workload_type = 0;
+ int16_t workload_type = 0;
static const char *profile_name[] = {
"BOOTUP_DEFAULT",
"3D_FULL_SCREEN",
@@ -1050,6 +1266,9 @@ static int navi10_get_power_profile_mode(struct smu_context *smu, char *buf)
for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
workload_type = smu_workload_get_type(smu, i);
+ if (workload_type < 0)
+ return -EINVAL;
+
result = smu_update_table(smu,
SMU_TABLE_ACTIVITY_MONITOR_COEFF, workload_type,
(void *)(&activity_monitor), false);
@@ -1178,6 +1397,8 @@ static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, u
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
workload_type = smu_workload_get_type(smu, smu->power_profile_mode);
+ if (workload_type < 0)
+ return -EINVAL;
smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
1 << workload_type);
@@ -1225,7 +1446,7 @@ static int navi10_get_profiling_clk_mask(struct smu_context *smu,
return ret;
}
-static int navi10_notify_smc_dispaly_config(struct smu_context *smu)
+static int navi10_notify_smc_display_config(struct smu_context *smu)
{
struct smu_clocks min_clocks = {0};
struct pp_display_clock_request clock_req;
@@ -1238,7 +1459,9 @@ static int navi10_notify_smc_dispaly_config(struct smu_context *smu)
if (smu_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
clock_req.clock_type = amd_pp_dcef_clock;
clock_req.clock_freq_in_khz = min_clocks.dcef_clock * 10;
- if (!smu_display_clock_voltage_request(smu, &clock_req)) {
+
+ ret = smu_v11_0_display_clock_voltage_request(smu, &clock_req);
+ if (!ret) {
if (smu_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) {
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetMinDeepSleepDcefclk,
@@ -1367,6 +1590,10 @@ static int navi10_read_sensor(struct smu_context *smu,
struct smu_table_context *table_context = &smu->smu_table;
PPTable_t *pptable = table_context->driver_pptable;
+ if(!data || !size)
+ return -EINVAL;
+
+ mutex_lock(&smu->sensor_lock);
switch (sensor) {
case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
*(uint32_t *)data = pptable->FanMaximumRpm;
@@ -1388,8 +1615,9 @@ static int navi10_read_sensor(struct smu_context *smu,
*size = 4;
break;
default:
- return -EINVAL;
+ ret = smu_v11_0_read_sensor(smu, sensor, data, size);
}
+ mutex_unlock(&smu->sensor_lock);
return ret;
}
@@ -1423,194 +1651,94 @@ static int navi10_get_uclk_dpm_states(struct smu_context *smu, uint32_t *clocks_
return 0;
}
-static int navi10_get_ppfeature_status(struct smu_context *smu,
- char *buf)
-{
- static const char *ppfeature_name[] = {
- "DPM_PREFETCHER",
- "DPM_GFXCLK",
- "DPM_GFX_PACE",
- "DPM_UCLK",
- "DPM_SOCCLK",
- "DPM_MP0CLK",
- "DPM_LINK",
- "DPM_DCEFCLK",
- "MEM_VDDCI_SCALING",
- "MEM_MVDD_SCALING",
- "DS_GFXCLK",
- "DS_SOCCLK",
- "DS_LCLK",
- "DS_DCEFCLK",
- "DS_UCLK",
- "GFX_ULV",
- "FW_DSTATE",
- "GFXOFF",
- "BACO",
- "VCN_PG",
- "JPEG_PG",
- "USB_PG",
- "RSMU_SMN_CG",
- "PPT",
- "TDC",
- "GFX_EDC",
- "APCC_PLUS",
- "GTHR",
- "ACDC",
- "VR0HOT",
- "VR1HOT",
- "FW_CTF",
- "FAN_CONTROL",
- "THERMAL",
- "GFX_DCS",
- "RM",
- "LED_DISPLAY",
- "GFX_SS",
- "OUT_OF_BAND_MONITOR",
- "TEMP_DEPENDENT_VMIN",
- "MMHUB_PG",
- "ATHUB_PG"};
- static const char *output_title[] = {
- "FEATURES",
- "BITMASK",
- "ENABLEMENT"};
- uint64_t features_enabled;
- uint32_t feature_mask[2];
- int i;
- int ret = 0;
- int size = 0;
-
- ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
- PP_ASSERT_WITH_CODE(!ret,
- "[GetPPfeatureStatus] Failed to get enabled smc features!",
- return ret);
- features_enabled = (uint64_t)feature_mask[0] |
- (uint64_t)feature_mask[1] << 32;
-
- size += sprintf(buf + size, "Current ppfeatures: 0x%016llx\n", features_enabled);
- size += sprintf(buf + size, "%-19s %-22s %s\n",
- output_title[0],
- output_title[1],
- output_title[2]);
- for (i = 0; i < (sizeof(ppfeature_name) / sizeof(ppfeature_name[0])); i++) {
- size += sprintf(buf + size, "%-19s 0x%016llx %6s\n",
- ppfeature_name[i],
- 1ULL << i,
- (features_enabled & (1ULL << i)) ? "Y" : "N");
- }
-
- return size;
-}
+static int navi10_set_performance_level(struct smu_context *smu,
+ enum amd_dpm_forced_level level);
-static int navi10_enable_smc_features(struct smu_context *smu,
- bool enabled,
- uint64_t feature_masks)
+static int navi10_set_standard_performance_level(struct smu_context *smu)
{
- struct smu_feature *feature = &smu->smu_feature;
- uint32_t feature_low, feature_high;
- uint32_t feature_mask[2];
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ uint32_t sclk_freq = 0, uclk_freq = 0;
- feature_low = (uint32_t)(feature_masks & 0xFFFFFFFF);
- feature_high = (uint32_t)((feature_masks & 0xFFFFFFFF00000000ULL) >> 32);
-
- if (enabled) {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow,
- feature_low);
- if (ret)
- return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh,
- feature_high);
- if (ret)
- return ret;
- } else {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow,
- feature_low);
- if (ret)
- return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh,
- feature_high);
- if (ret)
- return ret;
+ switch (adev->asic_type) {
+ case CHIP_NAVI10:
+ sclk_freq = NAVI10_UMD_PSTATE_PROFILING_GFXCLK;
+ uclk_freq = NAVI10_UMD_PSTATE_PROFILING_MEMCLK;
+ break;
+ case CHIP_NAVI14:
+ sclk_freq = NAVI14_UMD_PSTATE_PROFILING_GFXCLK;
+ uclk_freq = NAVI14_UMD_PSTATE_PROFILING_MEMCLK;
+ break;
+ default:
+ /* by default, this is same as auto performance level */
+ return navi10_set_performance_level(smu, AMD_DPM_FORCED_LEVEL_AUTO);
}
- ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
+ ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq);
+ if (ret)
+ return ret;
+ ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq);
if (ret)
return ret;
- mutex_lock(&feature->mutex);
- bitmap_copy(feature->enabled, (unsigned long *)&feature_mask,
- feature->feature_num);
- mutex_unlock(&feature->mutex);
-
- return 0;
-}
-
-static int navi10_set_ppfeature_status(struct smu_context *smu,
- uint64_t new_ppfeature_masks)
-{
- uint64_t features_enabled;
- uint32_t feature_mask[2];
- uint64_t features_to_enable;
- uint64_t features_to_disable;
- int ret = 0;
-
- ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
- PP_ASSERT_WITH_CODE(!ret,
- "[SetPPfeatureStatus] Failed to get enabled smc features!",
- return ret);
- features_enabled = (uint64_t)feature_mask[0] |
- (uint64_t)feature_mask[1] << 32;
-
- features_to_disable =
- features_enabled & ~new_ppfeature_masks;
- features_to_enable =
- ~features_enabled & new_ppfeature_masks;
-
- pr_debug("features_to_disable 0x%llx\n", features_to_disable);
- pr_debug("features_to_enable 0x%llx\n", features_to_enable);
-
- if (features_to_disable) {
- ret = navi10_enable_smc_features(smu, false, features_to_disable);
- PP_ASSERT_WITH_CODE(!ret,
- "[SetPPfeatureStatus] Failed to disable smc features!",
- return ret);
- }
-
- if (features_to_enable) {
- ret = navi10_enable_smc_features(smu, true, features_to_enable);
- PP_ASSERT_WITH_CODE(!ret,
- "[SetPPfeatureStatus] Failed to enable smc features!",
- return ret);
- }
-
- return 0;
+ return ret;
}
-static int navi10_set_peak_clock_by_device(struct smu_context *smu)
+static int navi10_set_peak_performance_level(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
int ret = 0;
uint32_t sclk_freq = 0, uclk_freq = 0;
- uint32_t uclk_level = 0;
- switch (adev->pdev->revision) {
- case 0xf0: /* XTX */
- case 0xc0:
- sclk_freq = NAVI10_PEAK_SCLK_XTX;
+ switch (adev->asic_type) {
+ case CHIP_NAVI10:
+ switch (adev->pdev->revision) {
+ case 0xf0: /* XTX */
+ case 0xc0:
+ sclk_freq = NAVI10_PEAK_SCLK_XTX;
+ break;
+ case 0xf1: /* XT */
+ case 0xc1:
+ sclk_freq = NAVI10_PEAK_SCLK_XT;
+ break;
+ default: /* XL */
+ sclk_freq = NAVI10_PEAK_SCLK_XL;
+ break;
+ }
break;
- case 0xf1: /* XT */
- case 0xc1:
- sclk_freq = NAVI10_PEAK_SCLK_XT;
+ case CHIP_NAVI14:
+ switch (adev->pdev->revision) {
+ case 0xc7: /* XT */
+ case 0xf4:
+ sclk_freq = NAVI14_UMD_PSTATE_PEAK_XT_GFXCLK;
+ break;
+ case 0xc1: /* XTM */
+ case 0xf2:
+ sclk_freq = NAVI14_UMD_PSTATE_PEAK_XTM_GFXCLK;
+ break;
+ case 0xc3: /* XLM */
+ case 0xf3:
+ sclk_freq = NAVI14_UMD_PSTATE_PEAK_XLM_GFXCLK;
+ break;
+ case 0xc5: /* XTX */
+ case 0xf6:
+ sclk_freq = NAVI14_UMD_PSTATE_PEAK_XLM_GFXCLK;
+ break;
+ default: /* XL */
+ sclk_freq = NAVI14_UMD_PSTATE_PEAK_XL_GFXCLK;
+ break;
+ }
break;
- default: /* XL */
- sclk_freq = NAVI10_PEAK_SCLK_XL;
+ case CHIP_NAVI12:
+ sclk_freq = NAVI12_UMD_PSTATE_PEAK_GFXCLK;
break;
+ default:
+ ret = smu_get_dpm_level_range(smu, SMU_SCLK, NULL, &sclk_freq);
+ if (ret)
+ return ret;
}
- ret = smu_get_dpm_level_count(smu, SMU_UCLK, &uclk_level);
- if (ret)
- return ret;
- ret = smu_get_dpm_freq_by_index(smu, SMU_UCLK, uclk_level - 1, &uclk_freq);
+ ret = smu_get_dpm_level_range(smu, SMU_UCLK, NULL, &uclk_freq);
if (ret)
return ret;
@@ -1624,19 +1752,45 @@ static int navi10_set_peak_clock_by_device(struct smu_context *smu)
return ret;
}
-static int navi10_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
+static int navi10_set_performance_level(struct smu_context *smu,
+ enum amd_dpm_forced_level level)
{
int ret = 0;
+ uint32_t sclk_mask, mclk_mask, soc_mask;
switch (level) {
+ case AMD_DPM_FORCED_LEVEL_HIGH:
+ ret = smu_force_dpm_limit_value(smu, true);
+ break;
+ case AMD_DPM_FORCED_LEVEL_LOW:
+ ret = smu_force_dpm_limit_value(smu, false);
+ break;
+ case AMD_DPM_FORCED_LEVEL_AUTO:
+ ret = smu_unforce_dpm_levels(smu);
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+ ret = navi10_set_standard_performance_level(smu);
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+ ret = smu_get_profiling_clk_mask(smu, level,
+ &sclk_mask,
+ &mclk_mask,
+ &soc_mask);
+ if (ret)
+ return ret;
+ smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask, false);
+ smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask, false);
+ smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask, false);
+ break;
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
- ret = navi10_set_peak_clock_by_device(smu);
+ ret = navi10_set_peak_performance_level(smu);
break;
+ case AMD_DPM_FORCED_LEVEL_MANUAL:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
default:
- ret = -EINVAL;
break;
}
-
return ret;
}
@@ -1649,13 +1803,474 @@ static int navi10_get_thermal_temperature_range(struct smu_context *smu,
if (!range || !powerplay_table)
return -EINVAL;
- /* The unit is temperature */
- range->min = 0;
- range->max = powerplay_table->software_shutdown_temp;
+ range->max = powerplay_table->software_shutdown_temp *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+ return 0;
+}
+
+static int navi10_display_disable_memory_clock_switch(struct smu_context *smu,
+ bool disable_memory_clock_switch)
+{
+ int ret = 0;
+ struct smu_11_0_max_sustainable_clocks *max_sustainable_clocks =
+ (struct smu_11_0_max_sustainable_clocks *)
+ smu->smu_table.max_sustainable_clocks;
+ uint32_t min_memory_clock = smu->hard_min_uclk_req_from_dal;
+ uint32_t max_memory_clock = max_sustainable_clocks->uclock;
+
+ if(smu->disable_uclk_switch == disable_memory_clock_switch)
+ return 0;
+
+ if(disable_memory_clock_switch)
+ ret = smu_set_hard_freq_range(smu, SMU_UCLK, max_memory_clock, 0);
+ else
+ ret = smu_set_hard_freq_range(smu, SMU_UCLK, min_memory_clock, 0);
+
+ if(!ret)
+ smu->disable_uclk_switch = disable_memory_clock_switch;
+
+ return ret;
+}
+
+static uint32_t navi10_get_pptable_power_limit(struct smu_context *smu)
+{
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+ return pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0];
+}
+
+static int navi10_get_power_limit(struct smu_context *smu,
+ uint32_t *limit,
+ bool cap)
+{
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+ uint32_t asic_default_power_limit = 0;
+ int ret = 0;
+ int power_src;
+
+ if (!smu->power_limit) {
+ if (smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
+ power_src = smu_power_get_index(smu, SMU_POWER_SOURCE_AC);
+ if (power_src < 0)
+ return -EINVAL;
+
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetPptLimit,
+ power_src << 16);
+ if (ret) {
+ pr_err("[%s] get PPT limit failed!", __func__);
+ return ret;
+ }
+ smu_read_smc_arg(smu, &asic_default_power_limit);
+ } else {
+ /* the last hope to figure out the ppt limit */
+ if (!pptable) {
+ pr_err("Cannot get PPT limit due to pptable missing!");
+ return -EINVAL;
+ }
+ asic_default_power_limit =
+ pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0];
+ }
+
+ smu->power_limit = asic_default_power_limit;
+ }
+
+ if (cap)
+ *limit = smu_v11_0_get_max_power_limit(smu);
+ else
+ *limit = smu->power_limit;
+
+ return 0;
+}
+
+static int navi10_update_pcie_parameters(struct smu_context *smu,
+ uint32_t pcie_gen_cap,
+ uint32_t pcie_width_cap)
+{
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+ int ret, i;
+ uint32_t smu_pcie_arg;
+
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+ struct smu_11_0_dpm_context *dpm_context = smu_dpm->dpm_context;
+
+ for (i = 0; i < NUM_LINK_LEVELS; i++) {
+ smu_pcie_arg = (i << 16) |
+ ((pptable->PcieGenSpeed[i] <= pcie_gen_cap) ? (pptable->PcieGenSpeed[i] << 8) :
+ (pcie_gen_cap << 8)) | ((pptable->PcieLaneCount[i] <= pcie_width_cap) ?
+ pptable->PcieLaneCount[i] : pcie_width_cap);
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg);
+
+ if (ret)
+ return ret;
+
+ if (pptable->PcieGenSpeed[i] > pcie_gen_cap)
+ dpm_context->dpm_tables.pcie_table.pcie_gen[i] = pcie_gen_cap;
+ if (pptable->PcieLaneCount[i] > pcie_width_cap)
+ dpm_context->dpm_tables.pcie_table.pcie_lane[i] = pcie_width_cap;
+ }
+
+ return 0;
+}
+
+static inline void navi10_dump_od_table(OverDriveTable_t *od_table) {
+ pr_debug("OD: Gfxclk: (%d, %d)\n", od_table->GfxclkFmin, od_table->GfxclkFmax);
+ pr_debug("OD: Gfx1: (%d, %d)\n", od_table->GfxclkFreq1, od_table->GfxclkVolt1);
+ pr_debug("OD: Gfx2: (%d, %d)\n", od_table->GfxclkFreq2, od_table->GfxclkVolt2);
+ pr_debug("OD: Gfx3: (%d, %d)\n", od_table->GfxclkFreq3, od_table->GfxclkVolt3);
+ pr_debug("OD: UclkFmax: %d\n", od_table->UclkFmax);
+ pr_debug("OD: OverDrivePct: %d\n", od_table->OverDrivePct);
+}
+
+static int navi10_od_setting_check_range(struct smu_11_0_overdrive_table *od_table, enum SMU_11_0_ODSETTING_ID setting, uint32_t value)
+{
+ if (value < od_table->min[setting]) {
+ pr_warn("OD setting (%d, %d) is less than the minimum allowed (%d)\n", setting, value, od_table->min[setting]);
+ return -EINVAL;
+ }
+ if (value > od_table->max[setting]) {
+ pr_warn("OD setting (%d, %d) is greater than the maximum allowed (%d)\n", setting, value, od_table->max[setting]);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int navi10_overdrive_get_gfx_clk_base_voltage(struct smu_context *smu,
+ uint16_t *voltage,
+ uint32_t freq)
+{
+ uint32_t param = (freq & 0xFFFF) | (PPCLK_GFXCLK << 16);
+ uint32_t value = 0;
+ int ret;
+
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_GetVoltageByDpm,
+ param);
+ if (ret) {
+ pr_err("[GetBaseVoltage] failed to get GFXCLK AVFS voltage from SMU!");
+ return ret;
+ }
+
+ smu_read_smc_arg(smu, &value);
+ *voltage = (uint16_t)value;
return 0;
}
+static int navi10_setup_od_limits(struct smu_context *smu) {
+ struct smu_11_0_overdrive_table *overdrive_table = NULL;
+ struct smu_11_0_powerplay_table *powerplay_table = NULL;
+
+ if (!smu->smu_table.power_play_table) {
+ pr_err("powerplay table uninitialized!\n");
+ return -ENOENT;
+ }
+ powerplay_table = (struct smu_11_0_powerplay_table *)smu->smu_table.power_play_table;
+ overdrive_table = &powerplay_table->overdrive_table;
+ if (!smu->od_settings) {
+ smu->od_settings = kmemdup(overdrive_table, sizeof(struct smu_11_0_overdrive_table), GFP_KERNEL);
+ } else {
+ memcpy(smu->od_settings, overdrive_table, sizeof(struct smu_11_0_overdrive_table));
+ }
+ return 0;
+}
+
+static int navi10_set_default_od_settings(struct smu_context *smu, bool initialize) {
+ OverDriveTable_t *od_table, *boot_od_table;
+ int ret = 0;
+
+ ret = smu_v11_0_set_default_od_settings(smu, initialize, sizeof(OverDriveTable_t));
+ if (ret)
+ return ret;
+
+ od_table = (OverDriveTable_t *)smu->smu_table.overdrive_table;
+ boot_od_table = (OverDriveTable_t *)smu->smu_table.boot_overdrive_table;
+ if (initialize) {
+ ret = navi10_setup_od_limits(smu);
+ if (ret) {
+ pr_err("Failed to retrieve board OD limits\n");
+ return ret;
+ }
+ if (od_table) {
+ if (!od_table->GfxclkVolt1) {
+ ret = navi10_overdrive_get_gfx_clk_base_voltage(smu,
+ &od_table->GfxclkVolt1,
+ od_table->GfxclkFreq1);
+ if (ret)
+ od_table->GfxclkVolt1 = 0;
+ if (boot_od_table)
+ boot_od_table->GfxclkVolt1 = od_table->GfxclkVolt1;
+ }
+
+ if (!od_table->GfxclkVolt2) {
+ ret = navi10_overdrive_get_gfx_clk_base_voltage(smu,
+ &od_table->GfxclkVolt2,
+ od_table->GfxclkFreq2);
+ if (ret)
+ od_table->GfxclkVolt2 = 0;
+ if (boot_od_table)
+ boot_od_table->GfxclkVolt2 = od_table->GfxclkVolt2;
+ }
+
+ if (!od_table->GfxclkVolt3) {
+ ret = navi10_overdrive_get_gfx_clk_base_voltage(smu,
+ &od_table->GfxclkVolt3,
+ od_table->GfxclkFreq3);
+ if (ret)
+ od_table->GfxclkVolt3 = 0;
+ if (boot_od_table)
+ boot_od_table->GfxclkVolt3 = od_table->GfxclkVolt3;
+ }
+ }
+ }
+
+ if (od_table) {
+ navi10_dump_od_table(od_table);
+ }
+
+ return ret;
+}
+
+static int navi10_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type, long input[], uint32_t size) {
+ int i;
+ int ret = 0;
+ struct smu_table_context *table_context = &smu->smu_table;
+ OverDriveTable_t *od_table;
+ struct smu_11_0_overdrive_table *od_settings;
+ enum SMU_11_0_ODSETTING_ID freq_setting, voltage_setting;
+ uint16_t *freq_ptr, *voltage_ptr;
+ od_table = (OverDriveTable_t *)table_context->overdrive_table;
+
+ if (!smu->od_enabled) {
+ pr_warn("OverDrive is not enabled!\n");
+ return -EINVAL;
+ }
+
+ if (!smu->od_settings) {
+ pr_err("OD board limits are not set!\n");
+ return -ENOENT;
+ }
+
+ od_settings = smu->od_settings;
+
+ switch (type) {
+ case PP_OD_EDIT_SCLK_VDDC_TABLE:
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS)) {
+ pr_warn("GFXCLK_LIMITS not supported!\n");
+ return -ENOTSUPP;
+ }
+ if (!table_context->overdrive_table) {
+ pr_err("Overdrive is not initialized\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < size; i += 2) {
+ if (i + 2 > size) {
+ pr_info("invalid number of input parameters %d\n", size);
+ return -EINVAL;
+ }
+ switch (input[i]) {
+ case 0:
+ freq_setting = SMU_11_0_ODSETTING_GFXCLKFMIN;
+ freq_ptr = &od_table->GfxclkFmin;
+ if (input[i + 1] > od_table->GfxclkFmax) {
+ pr_info("GfxclkFmin (%ld) must be <= GfxclkFmax (%u)!\n",
+ input[i + 1],
+ od_table->GfxclkFmin);
+ return -EINVAL;
+ }
+ break;
+ case 1:
+ freq_setting = SMU_11_0_ODSETTING_GFXCLKFMAX;
+ freq_ptr = &od_table->GfxclkFmax;
+ if (input[i + 1] < od_table->GfxclkFmin) {
+ pr_info("GfxclkFmax (%ld) must be >= GfxclkFmin (%u)!\n",
+ input[i + 1],
+ od_table->GfxclkFmax);
+ return -EINVAL;
+ }
+ break;
+ default:
+ pr_info("Invalid SCLK_VDDC_TABLE index: %ld\n", input[i]);
+ pr_info("Supported indices: [0:min,1:max]\n");
+ return -EINVAL;
+ }
+ ret = navi10_od_setting_check_range(od_settings, freq_setting, input[i + 1]);
+ if (ret)
+ return ret;
+ *freq_ptr = input[i + 1];
+ }
+ break;
+ case PP_OD_EDIT_MCLK_VDDC_TABLE:
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_UCLK_MAX)) {
+ pr_warn("UCLK_MAX not supported!\n");
+ return -ENOTSUPP;
+ }
+ if (size < 2) {
+ pr_info("invalid number of parameters: %d\n", size);
+ return -EINVAL;
+ }
+ if (input[0] != 1) {
+ pr_info("Invalid MCLK_VDDC_TABLE index: %ld\n", input[0]);
+ pr_info("Supported indices: [1:max]\n");
+ return -EINVAL;
+ }
+ ret = navi10_od_setting_check_range(od_settings, SMU_11_0_ODSETTING_UCLKFMAX, input[1]);
+ if (ret)
+ return ret;
+ od_table->UclkFmax = input[1];
+ break;
+ case PP_OD_RESTORE_DEFAULT_TABLE:
+ if (!(table_context->overdrive_table && table_context->boot_overdrive_table)) {
+ pr_err("Overdrive table was not initialized!\n");
+ return -EINVAL;
+ }
+ memcpy(table_context->overdrive_table, table_context->boot_overdrive_table, sizeof(OverDriveTable_t));
+ break;
+ case PP_OD_COMMIT_DPM_TABLE:
+ navi10_dump_od_table(od_table);
+ ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, (void *)od_table, true);
+ if (ret) {
+ pr_err("Failed to import overdrive table!\n");
+ return ret;
+ }
+ // no lock needed because smu_od_edit_dpm_table has it
+ ret = smu_handle_task(smu, smu->smu_dpm.dpm_level,
+ AMD_PP_TASK_READJUST_POWER_STATE,
+ false);
+ if (ret) {
+ return ret;
+ }
+ break;
+ case PP_OD_EDIT_VDDC_CURVE:
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_CURVE)) {
+ pr_warn("GFXCLK_CURVE not supported!\n");
+ return -ENOTSUPP;
+ }
+ if (size < 3) {
+ pr_info("invalid number of parameters: %d\n", size);
+ return -EINVAL;
+ }
+ if (!od_table) {
+ pr_info("Overdrive is not initialized\n");
+ return -EINVAL;
+ }
+
+ switch (input[0]) {
+ case 0:
+ freq_setting = SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P1;
+ voltage_setting = SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P1;
+ freq_ptr = &od_table->GfxclkFreq1;
+ voltage_ptr = &od_table->GfxclkVolt1;
+ break;
+ case 1:
+ freq_setting = SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P2;
+ voltage_setting = SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P2;
+ freq_ptr = &od_table->GfxclkFreq2;
+ voltage_ptr = &od_table->GfxclkVolt2;
+ break;
+ case 2:
+ freq_setting = SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P3;
+ voltage_setting = SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P3;
+ freq_ptr = &od_table->GfxclkFreq3;
+ voltage_ptr = &od_table->GfxclkVolt3;
+ break;
+ default:
+ pr_info("Invalid VDDC_CURVE index: %ld\n", input[0]);
+ pr_info("Supported indices: [0, 1, 2]\n");
+ return -EINVAL;
+ }
+ ret = navi10_od_setting_check_range(od_settings, freq_setting, input[1]);
+ if (ret)
+ return ret;
+ // Allow setting zero to disable the OverDrive VDDC curve
+ if (input[2] != 0) {
+ ret = navi10_od_setting_check_range(od_settings, voltage_setting, input[2]);
+ if (ret)
+ return ret;
+ *freq_ptr = input[1];
+ *voltage_ptr = ((uint16_t)input[2]) * NAVI10_VOLTAGE_SCALE;
+ pr_debug("OD: set curve %ld: (%d, %d)\n", input[0], *freq_ptr, *voltage_ptr);
+ } else {
+ // If setting 0, disable all voltage curve settings
+ od_table->GfxclkVolt1 = 0;
+ od_table->GfxclkVolt2 = 0;
+ od_table->GfxclkVolt3 = 0;
+ }
+ navi10_dump_od_table(od_table);
+ break;
+ default:
+ return -ENOSYS;
+ }
+ return ret;
+}
+
+static int navi10_run_btc(struct smu_context *smu)
+{
+ int ret = 0;
+
+ ret = smu_send_smc_msg(smu, SMU_MSG_RunBtc);
+ if (ret)
+ pr_err("RunBtc failed!\n");
+
+ return ret;
+}
+
+static int navi10_dummy_pstate_control(struct smu_context *smu, bool enable)
+{
+ int result = 0;
+
+ if (!enable)
+ result = smu_send_smc_msg(smu, SMU_MSG_DAL_DISABLE_DUMMY_PSTATE_CHANGE);
+ else
+ result = smu_send_smc_msg(smu, SMU_MSG_DAL_ENABLE_DUMMY_PSTATE_CHANGE);
+
+ return result;
+}
+
+static int navi10_disable_umc_cdr_12gbps_workaround(struct smu_context *smu)
+{
+ uint32_t uclk_count, uclk_min, uclk_max;
+ uint32_t smu_version;
+ int ret = 0;
+
+ ret = smu_get_smc_version(smu, NULL, &smu_version);
+ if (ret)
+ return ret;
+
+ /* This workaround is available only for 42.50 or later SMC firmwares */
+ if (smu_version < 0x2A3200)
+ return 0;
+
+ ret = smu_get_dpm_level_count(smu, SMU_UCLK, &uclk_count);
+ if (ret)
+ return ret;
+
+ ret = smu_get_dpm_freq_by_index(smu, SMU_UCLK, (uint16_t)0, &uclk_min);
+ if (ret)
+ return ret;
+
+ ret = smu_get_dpm_freq_by_index(smu, SMU_UCLK, (uint16_t)(uclk_count - 1), &uclk_max);
+ if (ret)
+ return ret;
+
+ /* Force UCLK out of the highest DPM */
+ ret = smu_set_hard_freq_range(smu, SMU_UCLK, 0, uclk_min);
+ if (ret)
+ return ret;
+
+ /* Revert the UCLK Hardmax */
+ ret = smu_set_hard_freq_range(smu, SMU_UCLK, 0, uclk_max);
+ if (ret)
+ return ret;
+
+ /*
+ * In this case, SMU already disabled dummy pstate during enablement
+ * of UCLK DPM, we have to re-enabled it.
+ * */
+ return navi10_dummy_pstate_control(smu, true);
+}
+
static const struct pptable_funcs navi10_ppt_funcs = {
.tables_init = navi10_tables_init,
.alloc_dpm_context = navi10_allocate_dpm_context,
@@ -1671,6 +2286,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.get_allowed_feature_mask = navi10_get_allowed_feature_mask,
.set_default_dpm_table = navi10_set_default_dpm_table,
.dpm_set_uvd_enable = navi10_dpm_set_uvd_enable,
+ .dpm_set_jpeg_enable = navi10_dpm_set_jpeg_enable,
.get_current_clk_freq_by_table = navi10_get_current_clk_freq_by_table,
.print_clk_levels = navi10_print_clk_levels,
.force_clk_levels = navi10_force_clk_levels,
@@ -1678,7 +2294,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.get_clock_by_type_with_latency = navi10_get_clock_by_type_with_latency,
.pre_display_config_changed = navi10_pre_display_config_changed,
.display_config_changed = navi10_display_config_changed,
- .notify_smc_dispaly_config = navi10_notify_smc_dispaly_config,
+ .notify_smc_display_config = navi10_notify_smc_display_config,
.force_dpm_limit_value = navi10_force_dpm_limit_value,
.unforce_dpm_levels = navi10_unforce_dpm_levels,
.is_dpm_running = navi10_is_dpm_running,
@@ -1690,17 +2306,69 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.set_watermarks_table = navi10_set_watermarks_table,
.read_sensor = navi10_read_sensor,
.get_uclk_dpm_states = navi10_get_uclk_dpm_states,
- .get_ppfeature_status = navi10_get_ppfeature_status,
- .set_ppfeature_status = navi10_set_ppfeature_status,
.set_performance_level = navi10_set_performance_level,
.get_thermal_temperature_range = navi10_get_thermal_temperature_range,
+ .display_disable_memory_clock_switch = navi10_display_disable_memory_clock_switch,
+ .get_power_limit = navi10_get_power_limit,
+ .update_pcie_parameters = navi10_update_pcie_parameters,
+ .init_microcode = smu_v11_0_init_microcode,
+ .load_microcode = smu_v11_0_load_microcode,
+ .init_smc_tables = smu_v11_0_init_smc_tables,
+ .fini_smc_tables = smu_v11_0_fini_smc_tables,
+ .init_power = smu_v11_0_init_power,
+ .fini_power = smu_v11_0_fini_power,
+ .check_fw_status = smu_v11_0_check_fw_status,
+ .setup_pptable = smu_v11_0_setup_pptable,
+ .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
+ .get_clk_info_from_vbios = smu_v11_0_get_clk_info_from_vbios,
+ .check_pptable = smu_v11_0_check_pptable,
+ .parse_pptable = smu_v11_0_parse_pptable,
+ .populate_smc_tables = smu_v11_0_populate_smc_pptable,
+ .check_fw_version = smu_v11_0_check_fw_version,
+ .write_pptable = smu_v11_0_write_pptable,
+ .set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
+ .set_driver_table_location = smu_v11_0_set_driver_table_location,
+ .set_tool_table_location = smu_v11_0_set_tool_table_location,
+ .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
+ .system_features_control = smu_v11_0_system_features_control,
+ .send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
+ .read_smc_arg = smu_v11_0_read_arg,
+ .init_display_count = smu_v11_0_init_display_count,
+ .set_allowed_mask = smu_v11_0_set_allowed_mask,
+ .get_enabled_mask = smu_v11_0_get_enabled_mask,
+ .notify_display_change = smu_v11_0_notify_display_change,
+ .set_power_limit = smu_v11_0_set_power_limit,
+ .get_current_clk_freq = smu_v11_0_get_current_clk_freq,
+ .init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks,
+ .start_thermal_control = smu_v11_0_start_thermal_control,
+ .stop_thermal_control = smu_v11_0_stop_thermal_control,
+ .set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk,
+ .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
+ .get_fan_control_mode = smu_v11_0_get_fan_control_mode,
+ .set_fan_control_mode = smu_v11_0_set_fan_control_mode,
+ .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
+ .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
+ .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
+ .gfx_off_control = smu_v11_0_gfx_off_control,
+ .register_irq_handler = smu_v11_0_register_irq_handler,
+ .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme,
+ .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc,
+ .baco_is_support= smu_v11_0_baco_is_support,
+ .baco_get_state = smu_v11_0_baco_get_state,
+ .baco_set_state = smu_v11_0_baco_set_state,
+ .baco_enter = smu_v11_0_baco_enter,
+ .baco_exit = smu_v11_0_baco_exit,
+ .get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq,
+ .set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
+ .override_pcie_parameters = smu_v11_0_override_pcie_parameters,
+ .set_default_od_settings = navi10_set_default_od_settings,
+ .od_edit_dpm_table = navi10_od_edit_dpm_table,
+ .get_pptable_power_limit = navi10_get_pptable_power_limit,
+ .run_btc = navi10_run_btc,
+ .disable_umc_cdr_12gbps_workaround = navi10_disable_umc_cdr_12gbps_workaround,
};
void navi10_set_ppt_funcs(struct smu_context *smu)
{
- struct smu_table_context *smu_table = &smu->smu_table;
-
smu->ppt_funcs = &navi10_ppt_funcs;
- smu->smc_if_version = SMU11_DRIVER_IF_VERSION;
- smu_table->table_count = TABLE_COUNT;
}
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.h b/drivers/gpu/drm/amd/powerplay/navi10_ppt.h
index 620ff17c2fef..2abb4ba01db1 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.h
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.h
@@ -27,6 +27,31 @@
#define NAVI10_PEAK_SCLK_XT (1755)
#define NAVI10_PEAK_SCLK_XL (1625)
+#define NAVI10_UMD_PSTATE_PROFILING_GFXCLK (1300)
+#define NAVI10_UMD_PSTATE_PROFILING_SOCCLK (980)
+#define NAVI10_UMD_PSTATE_PROFILING_MEMCLK (625)
+#define NAVI10_UMD_PSTATE_PROFILING_VCLK (980)
+#define NAVI10_UMD_PSTATE_PROFILING_DCLK (850)
+
+#define NAVI14_UMD_PSTATE_PEAK_XT_GFXCLK (1670)
+#define NAVI14_UMD_PSTATE_PEAK_XTM_GFXCLK (1448)
+#define NAVI14_UMD_PSTATE_PEAK_XLM_GFXCLK (1181)
+#define NAVI14_UMD_PSTATE_PEAK_XTX_GFXCLK (1717)
+#define NAVI14_UMD_PSTATE_PEAK_XL_GFXCLK (1448)
+
+#define NAVI14_UMD_PSTATE_PROFILING_GFXCLK (1200)
+#define NAVI14_UMD_PSTATE_PROFILING_SOCCLK (900)
+#define NAVI14_UMD_PSTATE_PROFILING_MEMCLK (600)
+#define NAVI14_UMD_PSTATE_PROFILING_VCLK (900)
+#define NAVI14_UMD_PSTATE_PROFILING_DCLK (800)
+
+#define NAVI12_UMD_PSTATE_PEAK_GFXCLK (1100)
+
+#define NAVI10_VOLTAGE_SCALE (4)
+
+#define smnPCIE_LC_SPEED_CNTL 0x11140290
+#define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288
+
extern void navi10_set_ppt_funcs(struct smu_context *smu);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
new file mode 100644
index 000000000000..861e6410363b
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
@@ -0,0 +1,931 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_smu.h"
+#include "smu_internal.h"
+#include "soc15_common.h"
+#include "smu_v12_0_ppsmc.h"
+#include "smu12_driver_if.h"
+#include "smu_v12_0.h"
+#include "renoir_ppt.h"
+
+
+#define CLK_MAP(clk, index) \
+ [SMU_##clk] = {1, (index)}
+
+#define MSG_MAP(msg, index) \
+ [SMU_MSG_##msg] = {1, (index)}
+
+#define TAB_MAP_VALID(tab) \
+ [SMU_TABLE_##tab] = {1, TABLE_##tab}
+
+#define TAB_MAP_INVALID(tab) \
+ [SMU_TABLE_##tab] = {0, TABLE_##tab}
+
+static struct smu_12_0_cmn2aisc_mapping renoir_message_map[SMU_MSG_MAX_COUNT] = {
+ MSG_MAP(TestMessage, PPSMC_MSG_TestMessage),
+ MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion),
+ MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion),
+ MSG_MAP(PowerUpGfx, PPSMC_MSG_PowerUpGfx),
+ MSG_MAP(AllowGfxOff, PPSMC_MSG_EnableGfxOff),
+ MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisableGfxOff),
+ MSG_MAP(PowerDownIspByTile, PPSMC_MSG_PowerDownIspByTile),
+ MSG_MAP(PowerUpIspByTile, PPSMC_MSG_PowerUpIspByTile),
+ MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn),
+ MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn),
+ MSG_MAP(PowerDownSdma, PPSMC_MSG_PowerDownSdma),
+ MSG_MAP(PowerUpSdma, PPSMC_MSG_PowerUpSdma),
+ MSG_MAP(SetHardMinIspclkByFreq, PPSMC_MSG_SetHardMinIspclkByFreq),
+ MSG_MAP(SetHardMinVcn, PPSMC_MSG_SetHardMinVcn),
+ MSG_MAP(Spare1, PPSMC_MSG_spare1),
+ MSG_MAP(Spare2, PPSMC_MSG_spare2),
+ MSG_MAP(SetAllowFclkSwitch, PPSMC_MSG_SetAllowFclkSwitch),
+ MSG_MAP(SetMinVideoGfxclkFreq, PPSMC_MSG_SetMinVideoGfxclkFreq),
+ MSG_MAP(ActiveProcessNotify, PPSMC_MSG_ActiveProcessNotify),
+ MSG_MAP(SetCustomPolicy, PPSMC_MSG_SetCustomPolicy),
+ MSG_MAP(SetVideoFps, PPSMC_MSG_SetVideoFps),
+ MSG_MAP(NumOfDisplays, PPSMC_MSG_SetDisplayCount),
+ MSG_MAP(QueryPowerLimit, PPSMC_MSG_QueryPowerLimit),
+ MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh),
+ MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow),
+ MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram),
+ MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu),
+ MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDeviceDriverReset),
+ MSG_MAP(SetGfxclkOverdriveByFreqVid, PPSMC_MSG_SetGfxclkOverdriveByFreqVid),
+ MSG_MAP(SetHardMinDcfclkByFreq, PPSMC_MSG_SetHardMinDcfclkByFreq),
+ MSG_MAP(SetHardMinSocclkByFreq, PPSMC_MSG_SetHardMinSocclkByFreq),
+ MSG_MAP(ControlIgpuATS, PPSMC_MSG_ControlIgpuATS),
+ MSG_MAP(SetMinVideoFclkFreq, PPSMC_MSG_SetMinVideoFclkFreq),
+ MSG_MAP(SetMinDeepSleepDcfclk, PPSMC_MSG_SetMinDeepSleepDcfclk),
+ MSG_MAP(ForcePowerDownGfx, PPSMC_MSG_ForcePowerDownGfx),
+ MSG_MAP(SetPhyclkVoltageByFreq, PPSMC_MSG_SetPhyclkVoltageByFreq),
+ MSG_MAP(SetDppclkVoltageByFreq, PPSMC_MSG_SetDppclkVoltageByFreq),
+ MSG_MAP(SetSoftMinVcn, PPSMC_MSG_SetSoftMinVcn),
+ MSG_MAP(EnablePostCode, PPSMC_MSG_EnablePostCode),
+ MSG_MAP(GetGfxclkFrequency, PPSMC_MSG_GetGfxclkFrequency),
+ MSG_MAP(GetFclkFrequency, PPSMC_MSG_GetFclkFrequency),
+ MSG_MAP(GetMinGfxclkFrequency, PPSMC_MSG_GetMinGfxclkFrequency),
+ MSG_MAP(GetMaxGfxclkFrequency, PPSMC_MSG_GetMaxGfxclkFrequency),
+ MSG_MAP(SoftReset, PPSMC_MSG_SoftReset),
+ MSG_MAP(SetGfxCGPG, PPSMC_MSG_SetGfxCGPG),
+ MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk),
+ MSG_MAP(SetHardMinGfxClk, PPSMC_MSG_SetHardMinGfxClk),
+ MSG_MAP(SetSoftMaxSocclkByFreq, PPSMC_MSG_SetSoftMaxSocclkByFreq),
+ MSG_MAP(SetSoftMaxFclkByFreq, PPSMC_MSG_SetSoftMaxFclkByFreq),
+ MSG_MAP(SetSoftMaxVcn, PPSMC_MSG_SetSoftMaxVcn),
+ MSG_MAP(PowerGateMmHub, PPSMC_MSG_PowerGateMmHub),
+ MSG_MAP(UpdatePmeRestore, PPSMC_MSG_UpdatePmeRestore),
+ MSG_MAP(GpuChangeState, PPSMC_MSG_GpuChangeState),
+ MSG_MAP(SetPowerLimitPercentage, PPSMC_MSG_SetPowerLimitPercentage),
+ MSG_MAP(ForceGfxContentSave, PPSMC_MSG_ForceGfxContentSave),
+ MSG_MAP(EnableTmdp48MHzRefclkPwrDown, PPSMC_MSG_EnableTmdp48MHzRefclkPwrDown),
+ MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg),
+ MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg),
+ MSG_MAP(PowerGateAtHub, PPSMC_MSG_PowerGateAtHub),
+ MSG_MAP(SetSoftMinJpeg, PPSMC_MSG_SetSoftMinJpeg),
+ MSG_MAP(SetHardMinFclkByFreq, PPSMC_MSG_SetHardMinFclkByFreq),
+};
+
+static struct smu_12_0_cmn2aisc_mapping renoir_clk_map[SMU_CLK_COUNT] = {
+ CLK_MAP(GFXCLK, CLOCK_GFXCLK),
+ CLK_MAP(SCLK, CLOCK_GFXCLK),
+ CLK_MAP(SOCCLK, CLOCK_SOCCLK),
+ CLK_MAP(UCLK, CLOCK_UMCCLK),
+ CLK_MAP(MCLK, CLOCK_UMCCLK),
+};
+
+static struct smu_12_0_cmn2aisc_mapping renoir_table_map[SMU_TABLE_COUNT] = {
+ TAB_MAP_VALID(WATERMARKS),
+ TAB_MAP_INVALID(CUSTOM_DPM),
+ TAB_MAP_VALID(DPMCLOCKS),
+ TAB_MAP_VALID(SMU_METRICS),
+};
+
+static int renoir_get_smu_msg_index(struct smu_context *smc, uint32_t index)
+{
+ struct smu_12_0_cmn2aisc_mapping mapping;
+
+ if (index >= SMU_MSG_MAX_COUNT)
+ return -EINVAL;
+
+ mapping = renoir_message_map[index];
+ if (!(mapping.valid_mapping))
+ return -EINVAL;
+
+ return mapping.map_to;
+}
+
+static int renoir_get_smu_clk_index(struct smu_context *smc, uint32_t index)
+{
+ struct smu_12_0_cmn2aisc_mapping mapping;
+
+ if (index >= SMU_CLK_COUNT)
+ return -EINVAL;
+
+ mapping = renoir_clk_map[index];
+ if (!(mapping.valid_mapping)) {
+ return -EINVAL;
+ }
+
+ return mapping.map_to;
+}
+
+static int renoir_get_smu_table_index(struct smu_context *smc, uint32_t index)
+{
+ struct smu_12_0_cmn2aisc_mapping mapping;
+
+ if (index >= SMU_TABLE_COUNT)
+ return -EINVAL;
+
+ mapping = renoir_table_map[index];
+ if (!(mapping.valid_mapping))
+ return -EINVAL;
+
+ return mapping.map_to;
+}
+
+static int renoir_get_metrics_table(struct smu_context *smu,
+ SmuMetrics_t *metrics_table)
+{
+ struct smu_table_context *smu_table= &smu->smu_table;
+ int ret = 0;
+
+ mutex_lock(&smu->metrics_lock);
+ if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(100))) {
+ ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
+ (void *)smu_table->metrics_table, false);
+ if (ret) {
+ pr_info("Failed to export SMU metrics table!\n");
+ mutex_unlock(&smu->metrics_lock);
+ return ret;
+ }
+ smu_table->metrics_time = jiffies;
+ }
+
+ memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t));
+ mutex_unlock(&smu->metrics_lock);
+
+ return ret;
+}
+
+static int renoir_tables_init(struct smu_context *smu, struct smu_table *tables)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+
+ SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, sizeof(DpmClocks_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+
+ smu_table->clocks_table = kzalloc(sizeof(DpmClocks_t), GFP_KERNEL);
+ if (!smu_table->clocks_table)
+ return -ENOMEM;
+
+ smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
+ if (!smu_table->metrics_table)
+ return -ENOMEM;
+ smu_table->metrics_time = 0;
+
+ smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL);
+ if (!smu_table->watermarks_table)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * This interface just for getting uclk ultimate freq and should't introduce
+ * other likewise function result in overmuch callback.
+ */
+static int renoir_get_dpm_clk_limited(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t dpm_level, uint32_t *freq)
+{
+ DpmClocks_t *clk_table = smu->smu_table.clocks_table;
+
+ if (!clk_table || clk_type >= SMU_CLK_COUNT)
+ return -EINVAL;
+
+ GET_DPM_CUR_FREQ(clk_table, clk_type, dpm_level, *freq);
+
+ return 0;
+}
+
+static int renoir_print_clk_levels(struct smu_context *smu,
+ enum smu_clk_type clk_type, char *buf)
+{
+ int i, size = 0, ret = 0;
+ uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0;
+ DpmClocks_t *clk_table = smu->smu_table.clocks_table;
+ SmuMetrics_t metrics;
+
+ if (!clk_table || clk_type >= SMU_CLK_COUNT)
+ return -EINVAL;
+
+ memset(&metrics, 0, sizeof(metrics));
+
+ ret = renoir_get_metrics_table(smu, &metrics);
+ if (ret)
+ return ret;
+
+ switch (clk_type) {
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ /* retirve table returned paramters unit is MHz */
+ cur_value = metrics.ClockFrequency[CLOCK_GFXCLK];
+ ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK, &min, &max, false);
+ if (!ret) {
+ /* driver only know min/max gfx_clk, Add level 1 for all other gfx clks */
+ if (cur_value == max)
+ i = 2;
+ else if (cur_value == min)
+ i = 0;
+ else
+ i = 1;
+
+ size += sprintf(buf + size, "0: %uMhz %s\n", min,
+ i == 0 ? "*" : "");
+ size += sprintf(buf + size, "1: %uMhz %s\n",
+ i == 1 ? cur_value : RENOIR_UMD_PSTATE_GFXCLK,
+ i == 1 ? "*" : "");
+ size += sprintf(buf + size, "2: %uMhz %s\n", max,
+ i == 2 ? "*" : "");
+ }
+ return size;
+ case SMU_SOCCLK:
+ count = NUM_SOCCLK_DPM_LEVELS;
+ cur_value = metrics.ClockFrequency[CLOCK_SOCCLK];
+ break;
+ case SMU_MCLK:
+ count = NUM_MEMCLK_DPM_LEVELS;
+ cur_value = metrics.ClockFrequency[CLOCK_UMCCLK];
+ break;
+ case SMU_DCEFCLK:
+ count = NUM_DCFCLK_DPM_LEVELS;
+ cur_value = metrics.ClockFrequency[CLOCK_DCFCLK];
+ break;
+ case SMU_FCLK:
+ count = NUM_FCLK_DPM_LEVELS;
+ cur_value = metrics.ClockFrequency[CLOCK_FCLK];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ for (i = 0; i < count; i++) {
+ GET_DPM_CUR_FREQ(clk_table, clk_type, i, value);
+ size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
+ cur_value == value ? "*" : "");
+ }
+
+ return size;
+}
+
+static enum amd_pm_state_type renoir_get_current_power_state(struct smu_context *smu)
+{
+ enum amd_pm_state_type pm_type;
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+
+ if (!smu_dpm_ctx->dpm_context ||
+ !smu_dpm_ctx->dpm_current_power_state)
+ return -EINVAL;
+
+ switch (smu_dpm_ctx->dpm_current_power_state->classification.ui_label) {
+ case SMU_STATE_UI_LABEL_BATTERY:
+ pm_type = POWER_STATE_TYPE_BATTERY;
+ break;
+ case SMU_STATE_UI_LABEL_BALLANCED:
+ pm_type = POWER_STATE_TYPE_BALANCED;
+ break;
+ case SMU_STATE_UI_LABEL_PERFORMANCE:
+ pm_type = POWER_STATE_TYPE_PERFORMANCE;
+ break;
+ default:
+ if (smu_dpm_ctx->dpm_current_power_state->classification.flags & SMU_STATE_CLASSIFICATION_FLAG_BOOT)
+ pm_type = POWER_STATE_TYPE_INTERNAL_BOOT;
+ else
+ pm_type = POWER_STATE_TYPE_DEFAULT;
+ break;
+ }
+
+ return pm_type;
+}
+
+static int renoir_dpm_set_uvd_enable(struct smu_context *smu, bool enable)
+{
+ struct smu_power_context *smu_power = &smu->smu_power;
+ struct smu_power_gate *power_gate = &smu_power->power_gate;
+ int ret = 0;
+
+ if (enable) {
+ /* vcn dpm on is a prerequisite for vcn power gate messages */
+ if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0);
+ if (ret)
+ return ret;
+ }
+ power_gate->vcn_gated = false;
+ } else {
+ if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
+ ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn);
+ if (ret)
+ return ret;
+ }
+ power_gate->vcn_gated = true;
+ }
+
+ return ret;
+}
+
+static int renoir_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
+{
+ struct smu_power_context *smu_power = &smu->smu_power;
+ struct smu_power_gate *power_gate = &smu_power->power_gate;
+ int ret = 0;
+
+ if (enable) {
+ if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0);
+ if (ret)
+ return ret;
+ }
+ power_gate->jpeg_gated = false;
+ } else {
+ if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0);
+ if (ret)
+ return ret;
+ }
+ power_gate->jpeg_gated = true;
+ }
+
+ return ret;
+}
+
+static int renoir_get_current_clk_freq_by_table(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *value)
+{
+ int ret = 0, clk_id = 0;
+ SmuMetrics_t metrics;
+
+ ret = renoir_get_metrics_table(smu, &metrics);
+ if (ret)
+ return ret;
+
+ clk_id = smu_clk_get_index(smu, clk_type);
+ if (clk_id < 0)
+ return clk_id;
+
+ *value = metrics.ClockFrequency[clk_id];
+
+ return ret;
+}
+
+static int renoir_force_dpm_limit_value(struct smu_context *smu, bool highest)
+{
+ int ret = 0, i = 0;
+ uint32_t min_freq, max_freq, force_freq;
+ enum smu_clk_type clk_type;
+
+ enum smu_clk_type clks[] = {
+ SMU_GFXCLK,
+ SMU_MCLK,
+ SMU_SOCCLK,
+ };
+
+ for (i = 0; i < ARRAY_SIZE(clks); i++) {
+ clk_type = clks[i];
+ ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq, false);
+ if (ret)
+ return ret;
+
+ force_freq = highest ? max_freq : min_freq;
+ ret = smu_set_soft_freq_range(smu, clk_type, force_freq, force_freq);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int renoir_unforce_dpm_levels(struct smu_context *smu) {
+
+ int ret = 0, i = 0;
+ uint32_t min_freq, max_freq;
+ enum smu_clk_type clk_type;
+
+ struct clk_feature_map {
+ enum smu_clk_type clk_type;
+ uint32_t feature;
+ } clk_feature_map[] = {
+ {SMU_GFXCLK, SMU_FEATURE_DPM_GFXCLK_BIT},
+ {SMU_MCLK, SMU_FEATURE_DPM_UCLK_BIT},
+ {SMU_SOCCLK, SMU_FEATURE_DPM_SOCCLK_BIT},
+ };
+
+ for (i = 0; i < ARRAY_SIZE(clk_feature_map); i++) {
+ if (!smu_feature_is_enabled(smu, clk_feature_map[i].feature))
+ continue;
+
+ clk_type = clk_feature_map[i].clk_type;
+
+ ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq, false);
+ if (ret)
+ return ret;
+
+ ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int renoir_get_gpu_temperature(struct smu_context *smu, uint32_t *value)
+{
+ int ret = 0;
+ SmuMetrics_t metrics;
+
+ if (!value)
+ return -EINVAL;
+
+ ret = renoir_get_metrics_table(smu, &metrics);
+ if (ret)
+ return ret;
+
+ *value = (metrics.GfxTemperature / 100) *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+ return 0;
+}
+
+static int renoir_get_current_activity_percent(struct smu_context *smu,
+ enum amd_pp_sensors sensor,
+ uint32_t *value)
+{
+ int ret = 0;
+ SmuMetrics_t metrics;
+
+ if (!value)
+ return -EINVAL;
+
+ ret = renoir_get_metrics_table(smu, &metrics);
+ if (ret)
+ return ret;
+
+ switch (sensor) {
+ case AMDGPU_PP_SENSOR_GPU_LOAD:
+ *value = metrics.AverageGfxActivity / 100;
+ break;
+ default:
+ pr_err("Invalid sensor for retrieving clock activity\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int renoir_get_workload_type(struct smu_context *smu, uint32_t profile)
+{
+
+ uint32_t pplib_workload = 0;
+
+ switch (profile) {
+ case PP_SMC_POWER_PROFILE_FULLSCREEN3D:
+ pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT;
+ break;
+ case PP_SMC_POWER_PROFILE_CUSTOM:
+ pplib_workload = WORKLOAD_PPLIB_COUNT;
+ break;
+ case PP_SMC_POWER_PROFILE_VIDEO:
+ pplib_workload = WORKLOAD_PPLIB_VIDEO_BIT;
+ break;
+ case PP_SMC_POWER_PROFILE_VR:
+ pplib_workload = WORKLOAD_PPLIB_VR_BIT;
+ break;
+ case PP_SMC_POWER_PROFILE_COMPUTE:
+ pplib_workload = WORKLOAD_PPLIB_COMPUTE_BIT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return pplib_workload;
+}
+
+static int renoir_get_profiling_clk_mask(struct smu_context *smu,
+ enum amd_dpm_forced_level level,
+ uint32_t *sclk_mask,
+ uint32_t *mclk_mask,
+ uint32_t *soc_mask)
+{
+
+ if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
+ if (sclk_mask)
+ *sclk_mask = 0;
+ } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
+ if (mclk_mask)
+ *mclk_mask = 0;
+ } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+ if(sclk_mask)
+ /* The sclk as gfxclk and has three level about max/min/current */
+ *sclk_mask = 3 - 1;
+
+ if(mclk_mask)
+ *mclk_mask = NUM_MEMCLK_DPM_LEVELS - 1;
+
+ if(soc_mask)
+ *soc_mask = NUM_SOCCLK_DPM_LEVELS - 1;
+ }
+
+ return 0;
+}
+
+/**
+ * This interface get dpm clock table for dc
+ */
+static int renoir_get_dpm_clock_table(struct smu_context *smu, struct dpm_clocks *clock_table)
+{
+ DpmClocks_t *table = smu->smu_table.clocks_table;
+ int i;
+
+ if (!clock_table || !table)
+ return -EINVAL;
+
+ for (i = 0; i < NUM_DCFCLK_DPM_LEVELS; i++) {
+ clock_table->DcfClocks[i].Freq = table->DcfClocks[i].Freq;
+ clock_table->DcfClocks[i].Vol = table->DcfClocks[i].Vol;
+ }
+
+ for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++) {
+ clock_table->SocClocks[i].Freq = table->SocClocks[i].Freq;
+ clock_table->SocClocks[i].Vol = table->SocClocks[i].Vol;
+ }
+
+ for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) {
+ clock_table->FClocks[i].Freq = table->FClocks[i].Freq;
+ clock_table->FClocks[i].Vol = table->FClocks[i].Vol;
+ }
+
+ for (i = 0; i< NUM_MEMCLK_DPM_LEVELS; i++) {
+ clock_table->MemClocks[i].Freq = table->MemClocks[i].Freq;
+ clock_table->MemClocks[i].Vol = table->MemClocks[i].Vol;
+ }
+
+ return 0;
+}
+
+static int renoir_force_clk_levels(struct smu_context *smu,
+ enum smu_clk_type clk_type, uint32_t mask)
+{
+
+ int ret = 0 ;
+ uint32_t soft_min_level = 0, soft_max_level = 0, min_freq = 0, max_freq = 0;
+ DpmClocks_t *clk_table = smu->smu_table.clocks_table;
+
+ soft_min_level = mask ? (ffs(mask) - 1) : 0;
+ soft_max_level = mask ? (fls(mask) - 1) : 0;
+
+ switch (clk_type) {
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ if (soft_min_level > 2 || soft_max_level > 2) {
+ pr_info("Currently sclk only support 3 levels on APU\n");
+ return -EINVAL;
+ }
+
+ ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK, &min_freq, &max_freq, false);
+ if (ret)
+ return ret;
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
+ soft_max_level == 0 ? min_freq :
+ soft_max_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : max_freq);
+ if (ret)
+ return ret;
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
+ soft_min_level == 2 ? max_freq :
+ soft_min_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : min_freq);
+ if (ret)
+ return ret;
+ break;
+ case SMU_SOCCLK:
+ GET_DPM_CUR_FREQ(clk_table, clk_type, soft_min_level, min_freq);
+ GET_DPM_CUR_FREQ(clk_table, clk_type, soft_max_level, max_freq);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max_freq);
+ if (ret)
+ return ret;
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min_freq);
+ if (ret)
+ return ret;
+ break;
+ case SMU_MCLK:
+ case SMU_FCLK:
+ GET_DPM_CUR_FREQ(clk_table, clk_type, soft_min_level, min_freq);
+ GET_DPM_CUR_FREQ(clk_table, clk_type, soft_max_level, max_freq);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max_freq);
+ if (ret)
+ return ret;
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min_freq);
+ if (ret)
+ return ret;
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
+{
+ int workload_type, ret;
+ uint32_t profile_mode = input[size];
+
+ if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
+ pr_err("Invalid power profile mode %d\n", smu->power_profile_mode);
+ return -EINVAL;
+ }
+
+ /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
+ workload_type = smu_workload_get_type(smu, smu->power_profile_mode);
+ if (workload_type < 0) {
+ pr_err("Unsupported power profile mode %d on RENOIR\n",smu->power_profile_mode);
+ return -EINVAL;
+ }
+
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
+ 1 << workload_type);
+ if (ret) {
+ pr_err("Fail to set workload type %d\n", workload_type);
+ return ret;
+ }
+
+ smu->power_profile_mode = profile_mode;
+
+ return 0;
+}
+
+static int renoir_set_peak_clock_by_device(struct smu_context *smu)
+{
+ int ret = 0;
+ uint32_t sclk_freq = 0, uclk_freq = 0;
+
+ ret = smu_get_dpm_freq_range(smu, SMU_SCLK, NULL, &sclk_freq, false);
+ if (ret)
+ return ret;
+
+ ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq);
+ if (ret)
+ return ret;
+
+ ret = smu_get_dpm_freq_range(smu, SMU_UCLK, NULL, &uclk_freq, false);
+ if (ret)
+ return ret;
+
+ ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int renoir_set_performance_level(struct smu_context *smu,
+ enum amd_dpm_forced_level level)
+{
+ int ret = 0;
+ uint32_t sclk_mask, mclk_mask, soc_mask;
+
+ switch (level) {
+ case AMD_DPM_FORCED_LEVEL_HIGH:
+ ret = smu_force_dpm_limit_value(smu, true);
+ break;
+ case AMD_DPM_FORCED_LEVEL_LOW:
+ ret = smu_force_dpm_limit_value(smu, false);
+ break;
+ case AMD_DPM_FORCED_LEVEL_AUTO:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+ ret = smu_unforce_dpm_levels(smu);
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+ ret = smu_get_profiling_clk_mask(smu, level,
+ &sclk_mask,
+ &mclk_mask,
+ &soc_mask);
+ if (ret)
+ return ret;
+ smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask, false);
+ smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask, false);
+ smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask, false);
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+ ret = renoir_set_peak_clock_by_device(smu);
+ break;
+ case AMD_DPM_FORCED_LEVEL_MANUAL:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
+ default:
+ break;
+ }
+ return ret;
+}
+
+/* save watermark settings into pplib smu structure,
+ * also pass data to smu controller
+ */
+static int renoir_set_watermarks_table(
+ struct smu_context *smu,
+ void *watermarks,
+ struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
+{
+ int i;
+ int ret = 0;
+ Watermarks_t *table = watermarks;
+
+ if (!table || !clock_ranges)
+ return -EINVAL;
+
+ if (clock_ranges->num_wm_dmif_sets > 4 ||
+ clock_ranges->num_wm_mcif_sets > 4)
+ return -EINVAL;
+
+ /* save into smu->smu_table.tables[SMU_TABLE_WATERMARKS]->cpu_addr*/
+ for (i = 0; i < clock_ranges->num_wm_dmif_sets; i++) {
+ table->WatermarkRow[WM_DCFCLK][i].MinClock =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz));
+ table->WatermarkRow[WM_DCFCLK][i].MaxClock =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz));
+ table->WatermarkRow[WM_DCFCLK][i].MinMclk =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz));
+ table->WatermarkRow[WM_DCFCLK][i].MaxMclk =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz));
+ table->WatermarkRow[WM_DCFCLK][i].WmSetting = (uint8_t)
+ clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
+ }
+
+ for (i = 0; i < clock_ranges->num_wm_mcif_sets; i++) {
+ table->WatermarkRow[WM_SOCCLK][i].MinClock =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz));
+ table->WatermarkRow[WM_SOCCLK][i].MaxClock =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz));
+ table->WatermarkRow[WM_SOCCLK][i].MinMclk =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz));
+ table->WatermarkRow[WM_SOCCLK][i].MaxMclk =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz));
+ table->WatermarkRow[WM_SOCCLK][i].WmSetting = (uint8_t)
+ clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
+ }
+
+ /* pass data to smu controller */
+ if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
+ !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
+ ret = smu_write_watermarks_table(smu);
+ if (ret) {
+ pr_err("Failed to update WMTABLE!");
+ return ret;
+ }
+ smu->watermarks_bitmap |= WATERMARKS_LOADED;
+ }
+
+ return 0;
+}
+
+static int renoir_get_power_profile_mode(struct smu_context *smu,
+ char *buf)
+{
+ static const char *profile_name[] = {
+ "BOOTUP_DEFAULT",
+ "3D_FULL_SCREEN",
+ "POWER_SAVING",
+ "VIDEO",
+ "VR",
+ "COMPUTE",
+ "CUSTOM"};
+ uint32_t i, size = 0;
+ int16_t workload_type = 0;
+
+ if (!smu->pm_enabled || !buf)
+ return -EINVAL;
+
+ for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
+ /*
+ * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT
+ * Not all profile modes are supported on arcturus.
+ */
+ workload_type = smu_workload_get_type(smu, i);
+ if (workload_type < 0)
+ continue;
+
+ size += sprintf(buf + size, "%2d %14s%s\n",
+ i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
+ }
+
+ return size;
+}
+
+static int renoir_read_sensor(struct smu_context *smu,
+ enum amd_pp_sensors sensor,
+ void *data, uint32_t *size)
+{
+ int ret = 0;
+
+ if (!data || !size)
+ return -EINVAL;
+
+ mutex_lock(&smu->sensor_lock);
+ switch (sensor) {
+ case AMDGPU_PP_SENSOR_GPU_LOAD:
+ ret = renoir_get_current_activity_percent(smu, sensor, (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GPU_TEMP:
+ ret = renoir_get_gpu_temperature(smu, (uint32_t *)data);
+ *size = 4;
+ break;
+ default:
+ ret = smu_v12_0_read_sensor(smu, sensor, data, size);
+ }
+ mutex_unlock(&smu->sensor_lock);
+
+ return ret;
+}
+
+static const struct pptable_funcs renoir_ppt_funcs = {
+ .get_smu_msg_index = renoir_get_smu_msg_index,
+ .get_smu_clk_index = renoir_get_smu_clk_index,
+ .get_smu_table_index = renoir_get_smu_table_index,
+ .tables_init = renoir_tables_init,
+ .set_power_state = NULL,
+ .get_dpm_clk_limited = renoir_get_dpm_clk_limited,
+ .print_clk_levels = renoir_print_clk_levels,
+ .get_current_power_state = renoir_get_current_power_state,
+ .dpm_set_uvd_enable = renoir_dpm_set_uvd_enable,
+ .dpm_set_jpeg_enable = renoir_dpm_set_jpeg_enable,
+ .get_current_clk_freq_by_table = renoir_get_current_clk_freq_by_table,
+ .force_dpm_limit_value = renoir_force_dpm_limit_value,
+ .unforce_dpm_levels = renoir_unforce_dpm_levels,
+ .get_workload_type = renoir_get_workload_type,
+ .get_profiling_clk_mask = renoir_get_profiling_clk_mask,
+ .force_clk_levels = renoir_force_clk_levels,
+ .set_power_profile_mode = renoir_set_power_profile_mode,
+ .set_performance_level = renoir_set_performance_level,
+ .get_dpm_clock_table = renoir_get_dpm_clock_table,
+ .set_watermarks_table = renoir_set_watermarks_table,
+ .get_power_profile_mode = renoir_get_power_profile_mode,
+ .read_sensor = renoir_read_sensor,
+ .check_fw_status = smu_v12_0_check_fw_status,
+ .check_fw_version = smu_v12_0_check_fw_version,
+ .powergate_sdma = smu_v12_0_powergate_sdma,
+ .powergate_vcn = smu_v12_0_powergate_vcn,
+ .powergate_jpeg = smu_v12_0_powergate_jpeg,
+ .send_smc_msg_with_param = smu_v12_0_send_msg_with_param,
+ .read_smc_arg = smu_v12_0_read_arg,
+ .set_gfx_cgpg = smu_v12_0_set_gfx_cgpg,
+ .gfx_off_control = smu_v12_0_gfx_off_control,
+ .init_smc_tables = smu_v12_0_init_smc_tables,
+ .fini_smc_tables = smu_v12_0_fini_smc_tables,
+ .populate_smc_tables = smu_v12_0_populate_smc_tables,
+ .get_enabled_mask = smu_v12_0_get_enabled_mask,
+ .get_current_clk_freq = smu_v12_0_get_current_clk_freq,
+ .get_dpm_ultimate_freq = smu_v12_0_get_dpm_ultimate_freq,
+ .mode2_reset = smu_v12_0_mode2_reset,
+ .set_soft_freq_limited_range = smu_v12_0_set_soft_freq_limited_range,
+ .set_driver_table_location = smu_v12_0_set_driver_table_location,
+};
+
+void renoir_set_ppt_funcs(struct smu_context *smu)
+{
+ smu->ppt_funcs = &renoir_ppt_funcs;
+ smu->smc_if_version = SMU12_DRIVER_IF_VERSION;
+ smu->is_apu = true;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.h b/drivers/gpu/drm/amd/powerplay/renoir_ppt.h
new file mode 100644
index 000000000000..2a390ddd37dd
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __RENOIR_PPT_H__
+#define __RENOIR_PPT_H__
+
+extern void renoir_set_ppt_funcs(struct smu_context *smu);
+
+/* UMD PState Renoir Msg Parameters in MHz */
+#define RENOIR_UMD_PSTATE_GFXCLK 700
+#define RENOIR_UMD_PSTATE_SOCCLK 678
+#define RENOIR_UMD_PSTATE_FCLK 800
+
+#define GET_DPM_CUR_FREQ(table, clk_type, dpm_level, freq) \
+ do { \
+ switch (clk_type) { \
+ case SMU_SOCCLK: \
+ freq = table->SocClocks[dpm_level].Freq; \
+ break; \
+ case SMU_MCLK: \
+ freq = table->MemClocks[dpm_level].Freq; \
+ break; \
+ case SMU_DCEFCLK: \
+ freq = table->DcfClocks[dpm_level].Freq; \
+ break; \
+ case SMU_FCLK: \
+ freq = table->FClocks[dpm_level].Freq; \
+ break; \
+ default: \
+ break; \
+ } \
+ } while (0)
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smu_internal.h b/drivers/gpu/drm/amd/powerplay/smu_internal.h
new file mode 100644
index 000000000000..7bd200ffcda8
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smu_internal.h
@@ -0,0 +1,213 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __SMU_INTERNAL_H__
+#define __SMU_INTERNAL_H__
+
+#include "amdgpu_smu.h"
+
+#define smu_init_microcode(smu) \
+ ((smu)->ppt_funcs->init_microcode ? (smu)->ppt_funcs->init_microcode((smu)) : 0)
+#define smu_init_smc_tables(smu) \
+ ((smu)->ppt_funcs->init_smc_tables ? (smu)->ppt_funcs->init_smc_tables((smu)) : 0)
+#define smu_fini_smc_tables(smu) \
+ ((smu)->ppt_funcs->fini_smc_tables ? (smu)->ppt_funcs->fini_smc_tables((smu)) : 0)
+#define smu_init_power(smu) \
+ ((smu)->ppt_funcs->init_power ? (smu)->ppt_funcs->init_power((smu)) : 0)
+#define smu_fini_power(smu) \
+ ((smu)->ppt_funcs->fini_power ? (smu)->ppt_funcs->fini_power((smu)) : 0)
+
+#define smu_setup_pptable(smu) \
+ ((smu)->ppt_funcs->setup_pptable ? (smu)->ppt_funcs->setup_pptable((smu)) : 0)
+#define smu_powergate_sdma(smu, gate) \
+ ((smu)->ppt_funcs->powergate_sdma ? (smu)->ppt_funcs->powergate_sdma((smu), (gate)) : 0)
+#define smu_powergate_vcn(smu, gate) \
+ ((smu)->ppt_funcs->powergate_vcn ? (smu)->ppt_funcs->powergate_vcn((smu), (gate)) : 0)
+#define smu_powergate_jpeg(smu, gate) \
+ ((smu)->ppt_funcs->powergate_jpeg ? (smu)->ppt_funcs->powergate_jpeg((smu), (gate)) : 0)
+
+#define smu_get_vbios_bootup_values(smu) \
+ ((smu)->ppt_funcs->get_vbios_bootup_values ? (smu)->ppt_funcs->get_vbios_bootup_values((smu)) : 0)
+#define smu_get_clk_info_from_vbios(smu) \
+ ((smu)->ppt_funcs->get_clk_info_from_vbios ? (smu)->ppt_funcs->get_clk_info_from_vbios((smu)) : 0)
+#define smu_check_pptable(smu) \
+ ((smu)->ppt_funcs->check_pptable ? (smu)->ppt_funcs->check_pptable((smu)) : 0)
+#define smu_parse_pptable(smu) \
+ ((smu)->ppt_funcs->parse_pptable ? (smu)->ppt_funcs->parse_pptable((smu)) : 0)
+#define smu_populate_smc_tables(smu) \
+ ((smu)->ppt_funcs->populate_smc_tables ? (smu)->ppt_funcs->populate_smc_tables((smu)) : 0)
+#define smu_check_fw_version(smu) \
+ ((smu)->ppt_funcs->check_fw_version ? (smu)->ppt_funcs->check_fw_version((smu)) : 0)
+#define smu_write_pptable(smu) \
+ ((smu)->ppt_funcs->write_pptable ? (smu)->ppt_funcs->write_pptable((smu)) : 0)
+#define smu_set_min_dcef_deep_sleep(smu) \
+ ((smu)->ppt_funcs->set_min_dcef_deep_sleep ? (smu)->ppt_funcs->set_min_dcef_deep_sleep((smu)) : 0)
+#define smu_set_driver_table_location(smu) \
+ ((smu)->ppt_funcs->set_driver_table_location ? (smu)->ppt_funcs->set_driver_table_location((smu)) : 0)
+#define smu_set_tool_table_location(smu) \
+ ((smu)->ppt_funcs->set_tool_table_location ? (smu)->ppt_funcs->set_tool_table_location((smu)) : 0)
+#define smu_notify_memory_pool_location(smu) \
+ ((smu)->ppt_funcs->notify_memory_pool_location ? (smu)->ppt_funcs->notify_memory_pool_location((smu)) : 0)
+#define smu_gfx_off_control(smu, enable) \
+ ((smu)->ppt_funcs->gfx_off_control ? (smu)->ppt_funcs->gfx_off_control((smu), (enable)) : 0)
+
+#define smu_set_last_dcef_min_deep_sleep_clk(smu) \
+ ((smu)->ppt_funcs->set_last_dcef_min_deep_sleep_clk ? (smu)->ppt_funcs->set_last_dcef_min_deep_sleep_clk((smu)) : 0)
+#define smu_system_features_control(smu, en) \
+ ((smu)->ppt_funcs->system_features_control ? (smu)->ppt_funcs->system_features_control((smu), (en)) : 0)
+#define smu_init_max_sustainable_clocks(smu) \
+ ((smu)->ppt_funcs->init_max_sustainable_clocks ? (smu)->ppt_funcs->init_max_sustainable_clocks((smu)) : 0)
+#define smu_set_default_od_settings(smu, initialize) \
+ ((smu)->ppt_funcs->set_default_od_settings ? (smu)->ppt_funcs->set_default_od_settings((smu), (initialize)) : 0)
+
+int smu_send_smc_msg(struct smu_context *smu, enum smu_message_type msg);
+
+#define smu_send_smc_msg_with_param(smu, msg, param) \
+ ((smu)->ppt_funcs->send_smc_msg_with_param? (smu)->ppt_funcs->send_smc_msg_with_param((smu), (msg), (param)) : 0)
+#define smu_read_smc_arg(smu, arg) \
+ ((smu)->ppt_funcs->read_smc_arg? (smu)->ppt_funcs->read_smc_arg((smu), (arg)) : 0)
+#define smu_alloc_dpm_context(smu) \
+ ((smu)->ppt_funcs->alloc_dpm_context ? (smu)->ppt_funcs->alloc_dpm_context((smu)) : 0)
+#define smu_init_display_count(smu, count) \
+ ((smu)->ppt_funcs->init_display_count ? (smu)->ppt_funcs->init_display_count((smu), (count)) : 0)
+#define smu_feature_set_allowed_mask(smu) \
+ ((smu)->ppt_funcs->set_allowed_mask? (smu)->ppt_funcs->set_allowed_mask((smu)) : 0)
+#define smu_feature_get_enabled_mask(smu, mask, num) \
+ ((smu)->ppt_funcs->get_enabled_mask? (smu)->ppt_funcs->get_enabled_mask((smu), (mask), (num)) : 0)
+#define smu_is_dpm_running(smu) \
+ ((smu)->ppt_funcs->is_dpm_running ? (smu)->ppt_funcs->is_dpm_running((smu)) : 0)
+#define smu_notify_display_change(smu) \
+ ((smu)->ppt_funcs->notify_display_change? (smu)->ppt_funcs->notify_display_change((smu)) : 0)
+#define smu_store_powerplay_table(smu) \
+ ((smu)->ppt_funcs->store_powerplay_table ? (smu)->ppt_funcs->store_powerplay_table((smu)) : 0)
+#define smu_check_powerplay_table(smu) \
+ ((smu)->ppt_funcs->check_powerplay_table ? (smu)->ppt_funcs->check_powerplay_table((smu)) : 0)
+#define smu_append_powerplay_table(smu) \
+ ((smu)->ppt_funcs->append_powerplay_table ? (smu)->ppt_funcs->append_powerplay_table((smu)) : 0)
+#define smu_set_default_dpm_table(smu) \
+ ((smu)->ppt_funcs->set_default_dpm_table ? (smu)->ppt_funcs->set_default_dpm_table((smu)) : 0)
+#define smu_populate_umd_state_clk(smu) \
+ ((smu)->ppt_funcs->populate_umd_state_clk ? (smu)->ppt_funcs->populate_umd_state_clk((smu)) : 0)
+#define smu_set_default_od8_settings(smu) \
+ ((smu)->ppt_funcs->set_default_od8_settings ? (smu)->ppt_funcs->set_default_od8_settings((smu)) : 0)
+
+#define smu_get_current_clk_freq(smu, clk_id, value) \
+ ((smu)->ppt_funcs->get_current_clk_freq? (smu)->ppt_funcs->get_current_clk_freq((smu), (clk_id), (value)) : 0)
+
+#define smu_tables_init(smu, tab) \
+ ((smu)->ppt_funcs->tables_init ? (smu)->ppt_funcs->tables_init((smu), (tab)) : 0)
+#define smu_set_thermal_fan_table(smu) \
+ ((smu)->ppt_funcs->set_thermal_fan_table ? (smu)->ppt_funcs->set_thermal_fan_table((smu)) : 0)
+#define smu_start_thermal_control(smu) \
+ ((smu)->ppt_funcs->start_thermal_control? (smu)->ppt_funcs->start_thermal_control((smu)) : 0)
+#define smu_stop_thermal_control(smu) \
+ ((smu)->ppt_funcs->stop_thermal_control? (smu)->ppt_funcs->stop_thermal_control((smu)) : 0)
+
+#define smu_smc_read_sensor(smu, sensor, data, size) \
+ ((smu)->ppt_funcs->read_sensor? (smu)->ppt_funcs->read_sensor((smu), (sensor), (data), (size)) : -EINVAL)
+
+#define smu_pre_display_config_changed(smu) \
+ ((smu)->ppt_funcs->pre_display_config_changed ? (smu)->ppt_funcs->pre_display_config_changed((smu)) : 0)
+#define smu_display_config_changed(smu) \
+ ((smu)->ppt_funcs->display_config_changed ? (smu)->ppt_funcs->display_config_changed((smu)) : 0)
+#define smu_apply_clocks_adjust_rules(smu) \
+ ((smu)->ppt_funcs->apply_clocks_adjust_rules ? (smu)->ppt_funcs->apply_clocks_adjust_rules((smu)) : 0)
+#define smu_notify_smc_display_config(smu) \
+ ((smu)->ppt_funcs->notify_smc_display_config ? (smu)->ppt_funcs->notify_smc_display_config((smu)) : 0)
+#define smu_force_dpm_limit_value(smu, highest) \
+ ((smu)->ppt_funcs->force_dpm_limit_value ? (smu)->ppt_funcs->force_dpm_limit_value((smu), (highest)) : 0)
+#define smu_unforce_dpm_levels(smu) \
+ ((smu)->ppt_funcs->unforce_dpm_levels ? (smu)->ppt_funcs->unforce_dpm_levels((smu)) : 0)
+#define smu_get_profiling_clk_mask(smu, level, sclk_mask, mclk_mask, soc_mask) \
+ ((smu)->ppt_funcs->get_profiling_clk_mask ? (smu)->ppt_funcs->get_profiling_clk_mask((smu), (level), (sclk_mask), (mclk_mask), (soc_mask)) : 0)
+#define smu_set_cpu_power_state(smu) \
+ ((smu)->ppt_funcs->set_cpu_power_state ? (smu)->ppt_funcs->set_cpu_power_state((smu)) : 0)
+
+#define smu_msg_get_index(smu, msg) \
+ ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_msg_index? (smu)->ppt_funcs->get_smu_msg_index((smu), (msg)) : -EINVAL) : -EINVAL)
+#define smu_clk_get_index(smu, msg) \
+ ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_clk_index? (smu)->ppt_funcs->get_smu_clk_index((smu), (msg)) : -EINVAL) : -EINVAL)
+#define smu_feature_get_index(smu, msg) \
+ ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_feature_index? (smu)->ppt_funcs->get_smu_feature_index((smu), (msg)) : -EINVAL) : -EINVAL)
+#define smu_table_get_index(smu, tab) \
+ ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_table_index? (smu)->ppt_funcs->get_smu_table_index((smu), (tab)) : -EINVAL) : -EINVAL)
+#define smu_power_get_index(smu, src) \
+ ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_power_index? (smu)->ppt_funcs->get_smu_power_index((smu), (src)) : -EINVAL) : -EINVAL)
+#define smu_workload_get_type(smu, profile) \
+ ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_workload_type? (smu)->ppt_funcs->get_workload_type((smu), (profile)) : -EINVAL) : -EINVAL)
+#define smu_run_btc(smu) \
+ ((smu)->ppt_funcs? ((smu)->ppt_funcs->run_btc? (smu)->ppt_funcs->run_btc((smu)) : 0) : 0)
+#define smu_get_allowed_feature_mask(smu, feature_mask, num) \
+ ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_allowed_feature_mask? (smu)->ppt_funcs->get_allowed_feature_mask((smu), (feature_mask), (num)) : 0) : 0)
+
+
+#define smu_store_cc6_data(smu, st, cc6_dis, pst_dis, pst_sw_dis) \
+ ((smu)->ppt_funcs->store_cc6_data ? (smu)->ppt_funcs->store_cc6_data((smu), (st), (cc6_dis), (pst_dis), (pst_sw_dis)) : 0)
+
+#define smu_get_dal_power_level(smu, clocks) \
+ ((smu)->ppt_funcs->get_dal_power_level ? (smu)->ppt_funcs->get_dal_power_level((smu), (clocks)) : 0)
+#define smu_get_perf_level(smu, designation, level) \
+ ((smu)->ppt_funcs->get_perf_level ? (smu)->ppt_funcs->get_perf_level((smu), (designation), (level)) : 0)
+#define smu_get_current_shallow_sleep_clocks(smu, clocks) \
+ ((smu)->ppt_funcs->get_current_shallow_sleep_clocks ? (smu)->ppt_funcs->get_current_shallow_sleep_clocks((smu), (clocks)) : 0)
+
+#define smu_dpm_set_uvd_enable(smu, enable) \
+ ((smu)->ppt_funcs->dpm_set_uvd_enable ? (smu)->ppt_funcs->dpm_set_uvd_enable((smu), (enable)) : 0)
+#define smu_dpm_set_vce_enable(smu, enable) \
+ ((smu)->ppt_funcs->dpm_set_vce_enable ? (smu)->ppt_funcs->dpm_set_vce_enable((smu), (enable)) : 0)
+#define smu_dpm_set_jpeg_enable(smu, enable) \
+ ((smu)->ppt_funcs->dpm_set_jpeg_enable ? (smu)->ppt_funcs->dpm_set_jpeg_enable((smu), (enable)) : 0)
+
+#define smu_set_watermarks_table(smu, tab, clock_ranges) \
+ ((smu)->ppt_funcs->set_watermarks_table ? (smu)->ppt_funcs->set_watermarks_table((smu), (tab), (clock_ranges)) : 0)
+#define smu_get_current_clk_freq_by_table(smu, clk_type, value) \
+ ((smu)->ppt_funcs->get_current_clk_freq_by_table ? (smu)->ppt_funcs->get_current_clk_freq_by_table((smu), (clk_type), (value)) : 0)
+#define smu_thermal_temperature_range_update(smu, range, rw) \
+ ((smu)->ppt_funcs->thermal_temperature_range_update? (smu)->ppt_funcs->thermal_temperature_range_update((smu), (range), (rw)) : 0)
+#define smu_get_thermal_temperature_range(smu, range) \
+ ((smu)->ppt_funcs->get_thermal_temperature_range? (smu)->ppt_funcs->get_thermal_temperature_range((smu), (range)) : 0)
+#define smu_register_irq_handler(smu) \
+ ((smu)->ppt_funcs->register_irq_handler ? (smu)->ppt_funcs->register_irq_handler(smu) : 0)
+
+#define smu_get_dpm_ultimate_freq(smu, param, min, max) \
+ ((smu)->ppt_funcs->get_dpm_ultimate_freq ? (smu)->ppt_funcs->get_dpm_ultimate_freq((smu), (param), (min), (max)) : 0)
+
+#define smu_asic_set_performance_level(smu, level) \
+ ((smu)->ppt_funcs->set_performance_level? (smu)->ppt_funcs->set_performance_level((smu), (level)) : -EINVAL);
+#define smu_dump_pptable(smu) \
+ ((smu)->ppt_funcs->dump_pptable ? (smu)->ppt_funcs->dump_pptable((smu)) : 0)
+#define smu_get_dpm_clk_limited(smu, clk_type, dpm_level, freq) \
+ ((smu)->ppt_funcs->get_dpm_clk_limited ? (smu)->ppt_funcs->get_dpm_clk_limited((smu), (clk_type), (dpm_level), (freq)) : -EINVAL)
+
+#define smu_set_soft_freq_limited_range(smu, clk_type, min, max) \
+ ((smu)->ppt_funcs->set_soft_freq_limited_range ? (smu)->ppt_funcs->set_soft_freq_limited_range((smu), (clk_type), (min), (max)) : -EINVAL)
+
+#define smu_override_pcie_parameters(smu) \
+ ((smu)->ppt_funcs->override_pcie_parameters ? (smu)->ppt_funcs->override_pcie_parameters((smu)) : 0)
+
+#define smu_update_pcie_parameters(smu, pcie_gen_cap, pcie_width_cap) \
+ ((smu)->ppt_funcs->update_pcie_parameters ? (smu)->ppt_funcs->update_pcie_parameters((smu), (pcie_gen_cap), (pcie_width_cap)) : 0)
+
+#define smu_disable_umc_cdr_12gbps_workaround(smu) \
+ ((smu)->ppt_funcs->disable_umc_cdr_12gbps_workaround ? (smu)->ppt_funcs->disable_umc_cdr_12gbps_workaround((smu)) : 0)
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
index 5fde5cf65b42..0dc49479a7eb 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
@@ -24,16 +24,20 @@
#include <linux/module.h>
#include <linux/pci.h>
+#define SMU_11_0_PARTIAL_PPTABLE
+
#include "pp_debug.h"
#include "amdgpu.h"
#include "amdgpu_smu.h"
+#include "smu_internal.h"
#include "atomfirmware.h"
#include "amdgpu_atomfirmware.h"
#include "smu_v11_0.h"
+#include "smu_v11_0_pptable.h"
#include "soc15_common.h"
#include "atom.h"
-#include "vega20_ppt.h"
-#include "navi10_ppt.h"
+#include "amd_pcie.h"
+#include "amdgpu_ras.h"
#include "asic_reg/thm/thm_11_0_2_offset.h"
#include "asic_reg/thm/thm_11_0_2_sh_mask.h"
@@ -45,7 +49,10 @@
#include "asic_reg/smuio/smuio_11_0_0_sh_mask.h"
MODULE_FIRMWARE("amdgpu/vega20_smc.bin");
+MODULE_FIRMWARE("amdgpu/arcturus_smc.bin");
MODULE_FIRMWARE("amdgpu/navi10_smc.bin");
+MODULE_FIRMWARE("amdgpu/navi14_smc.bin");
+MODULE_FIRMWARE("amdgpu/navi12_smc.bin");
#define SMU11_VOLTAGE_SCALE 4
@@ -57,7 +64,7 @@ static int smu_v11_0_send_msg_without_waiting(struct smu_context *smu,
return 0;
}
-static int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg)
+int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg)
{
struct amdgpu_device *adev = smu->adev;
@@ -73,47 +80,20 @@ static int smu_v11_0_wait_for_response(struct smu_context *smu)
for (i = 0; i < timeout; i++) {
cur_value = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0)
- break;
+ return cur_value == 0x1 ? 0 : -EIO;
+
udelay(1);
}
/* timeout means wrong logic */
- if (i == timeout)
- return -ETIME;
-
- return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
-}
-
-static int smu_v11_0_send_msg(struct smu_context *smu, uint16_t msg)
-{
- struct amdgpu_device *adev = smu->adev;
- int ret = 0, index = 0;
-
- index = smu_msg_get_index(smu, msg);
- if (index < 0)
- return index;
-
- smu_v11_0_wait_for_response(smu);
-
- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
-
- smu_v11_0_send_msg_without_waiting(smu, (uint16_t)index);
-
- ret = smu_v11_0_wait_for_response(smu);
-
- if (ret)
- pr_err("Failed to send message 0x%x, response 0x%x\n", index,
- ret);
-
- return ret;
-
+ return -ETIME;
}
-static int
-smu_v11_0_send_msg_with_param(struct smu_context *smu, uint16_t msg,
+int
+smu_v11_0_send_msg_with_param(struct smu_context *smu,
+ enum smu_message_type msg,
uint32_t param)
{
-
struct amdgpu_device *adev = smu->adev;
int ret = 0, index = 0;
@@ -122,9 +102,11 @@ smu_v11_0_send_msg_with_param(struct smu_context *smu, uint16_t msg,
return index;
ret = smu_v11_0_wait_for_response(smu);
- if (ret)
- pr_err("Failed to send message 0x%x, response 0x%x, param 0x%x\n",
- index, ret, param);
+ if (ret) {
+ pr_err("Msg issuing pre-check failed and "
+ "SMU may be not in the right state!\n");
+ return ret;
+ }
WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
@@ -134,13 +116,13 @@ smu_v11_0_send_msg_with_param(struct smu_context *smu, uint16_t msg,
ret = smu_v11_0_wait_for_response(smu);
if (ret)
- pr_err("Failed to send message 0x%x, response 0x%x param 0x%x\n",
- index, ret, param);
+ pr_err("failed send message: %10s (%d) \tparam: 0x%08x response %#x\n",
+ smu_get_message_name(smu, msg), index, param, ret);
return ret;
}
-static int smu_v11_0_init_microcode(struct smu_context *smu)
+int smu_v11_0_init_microcode(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
const char *chip_name;
@@ -154,9 +136,18 @@ static int smu_v11_0_init_microcode(struct smu_context *smu)
case CHIP_VEGA20:
chip_name = "vega20";
break;
+ case CHIP_ARCTURUS:
+ chip_name = "arcturus";
+ break;
case CHIP_NAVI10:
chip_name = "navi10";
break;
+ case CHIP_NAVI14:
+ chip_name = "navi14";
+ break;
+ case CHIP_NAVI12:
+ chip_name = "navi12";
+ break;
default:
BUG();
}
@@ -193,7 +184,7 @@ out:
return err;
}
-static int smu_v11_0_load_microcode(struct smu_context *smu)
+int smu_v11_0_load_microcode(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
const uint32_t *src;
@@ -202,7 +193,7 @@ static int smu_v11_0_load_microcode(struct smu_context *smu)
uint32_t i;
uint32_t mp1_fw_flags;
- hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
+ hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
src = (const uint32_t *)(adev->pm.fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
@@ -231,7 +222,7 @@ static int smu_v11_0_load_microcode(struct smu_context *smu)
return 0;
}
-static int smu_v11_0_check_fw_status(struct smu_context *smu)
+int smu_v11_0_check_fw_status(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
uint32_t mp1_fw_flags;
@@ -246,7 +237,7 @@ static int smu_v11_0_check_fw_status(struct smu_context *smu)
return -EIO;
}
-static int smu_v11_0_check_fw_version(struct smu_context *smu)
+int smu_v11_0_check_fw_version(struct smu_context *smu)
{
uint32_t if_version = 0xff, smu_version = 0xff;
uint16_t smu_major;
@@ -261,6 +252,25 @@ static int smu_v11_0_check_fw_version(struct smu_context *smu)
smu_minor = (smu_version >> 8) & 0xff;
smu_debug = (smu_version >> 0) & 0xff;
+ switch (smu->adev->asic_type) {
+ case CHIP_VEGA20:
+ smu->smc_if_version = SMU11_DRIVER_IF_VERSION_VG20;
+ break;
+ case CHIP_ARCTURUS:
+ smu->smc_if_version = SMU11_DRIVER_IF_VERSION_ARCT;
+ break;
+ case CHIP_NAVI10:
+ smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV10;
+ break;
+ case CHIP_NAVI14:
+ smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV14;
+ break;
+ default:
+ pr_err("smu unsupported asic type:%d.\n", smu->adev->asic_type);
+ smu->smc_if_version = SMU11_DRIVER_IF_VERSION_INV;
+ break;
+ }
+
/*
* 1. if_version mismatch is not critical as our fw is designed
* to be backward compatible.
@@ -295,7 +305,8 @@ static int smu_v11_0_set_pptable_v2_0(struct smu_context *smu, void **table, uin
return 0;
}
-static int smu_v11_0_set_pptable_v2_1(struct smu_context *smu, void **table, uint32_t *size, uint32_t pptable_id)
+static int smu_v11_0_set_pptable_v2_1(struct smu_context *smu, void **table,
+ uint32_t *size, uint32_t pptable_id)
{
struct amdgpu_device *adev = smu->adev;
const struct smc_firmware_header_v2_1 *v2_1;
@@ -321,12 +332,13 @@ static int smu_v11_0_set_pptable_v2_1(struct smu_context *smu, void **table, uin
return 0;
}
-static int smu_v11_0_setup_pptable(struct smu_context *smu)
+int smu_v11_0_setup_pptable(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
const struct smc_firmware_header_v1_0 *hdr;
int ret, index;
- uint32_t size;
+ uint32_t size = 0;
+ uint16_t atom_table_size;
uint8_t frev, crev;
void *table;
uint16_t version_major, version_minor;
@@ -335,6 +347,7 @@ static int smu_v11_0_setup_pptable(struct smu_context *smu)
version_major = le16_to_cpu(hdr->header.header_version_major);
version_minor = le16_to_cpu(hdr->header.header_version_minor);
if (version_major == 2 && smu->smu_table.boot_values.pp_table_id > 0) {
+ pr_info("use driver provided pptable %d\n", smu->smu_table.boot_values.pp_table_id);
switch (version_minor) {
case 0:
ret = smu_v11_0_set_pptable_v2_0(smu, &table, &size);
@@ -351,13 +364,15 @@ static int smu_v11_0_setup_pptable(struct smu_context *smu)
return ret;
} else {
+ pr_info("use vbios provided pptable\n");
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
powerplayinfo);
- ret = smu_get_atom_data_table(smu, index, (uint16_t *)&size, &frev, &crev,
+ ret = smu_get_atom_data_table(smu, index, &atom_table_size, &frev, &crev,
(uint8_t **)&table);
if (ret)
return ret;
+ size = atom_table_size;
}
if (!smu->smu_table.power_play_table)
@@ -398,13 +413,13 @@ static int smu_v11_0_fini_dpm_context(struct smu_context *smu)
return 0;
}
-static int smu_v11_0_init_smc_tables(struct smu_context *smu)
+int smu_v11_0_init_smc_tables(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *tables = NULL;
int ret = 0;
- if (smu_table->tables || smu_table->table_count == 0)
+ if (smu_table->tables)
return -EINVAL;
tables = kcalloc(SMU_TABLE_COUNT, sizeof(struct smu_table),
@@ -425,19 +440,20 @@ static int smu_v11_0_init_smc_tables(struct smu_context *smu)
return 0;
}
-static int smu_v11_0_fini_smc_tables(struct smu_context *smu)
+int smu_v11_0_fini_smc_tables(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
int ret = 0;
- if (!smu_table->tables || smu_table->table_count == 0)
+ if (!smu_table->tables)
return -EINVAL;
kfree(smu_table->tables);
kfree(smu_table->metrics_table);
+ kfree(smu_table->watermarks_table);
smu_table->tables = NULL;
- smu_table->table_count = 0;
smu_table->metrics_table = NULL;
+ smu_table->watermarks_table = NULL;
smu_table->metrics_time = 0;
ret = smu_v11_0_fini_dpm_context(smu);
@@ -446,7 +462,7 @@ static int smu_v11_0_fini_smc_tables(struct smu_context *smu)
return 0;
}
-static int smu_v11_0_init_power(struct smu_context *smu)
+int smu_v11_0_init_power(struct smu_context *smu)
{
struct smu_power_context *smu_power = &smu->smu_power;
@@ -464,7 +480,7 @@ static int smu_v11_0_init_power(struct smu_context *smu)
return 0;
}
-static int smu_v11_0_fini_power(struct smu_context *smu)
+int smu_v11_0_fini_power(struct smu_context *smu)
{
struct smu_power_context *smu_power = &smu->smu_power;
@@ -535,10 +551,13 @@ int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu)
smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id;
}
+ smu->smu_table.boot_values.format_revision = header->format_revision;
+ smu->smu_table.boot_values.content_revision = header->content_revision;
+
return 0;
}
-static int smu_v11_0_get_clk_info_from_vbios(struct smu_context *smu)
+int smu_v11_0_get_clk_info_from_vbios(struct smu_context *smu)
{
int ret, index;
struct amdgpu_device *adev = smu->adev;
@@ -614,10 +633,28 @@ static int smu_v11_0_get_clk_info_from_vbios(struct smu_context *smu)
output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
smu->smu_table.boot_values.dclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
+ if ((smu->smu_table.boot_values.format_revision == 3) &&
+ (smu->smu_table.boot_values.content_revision >= 2)) {
+ memset(&input, 0, sizeof(input));
+ input.clk_id = SMU11_SYSPLL1_0_FCLK_ID;
+ input.syspll_id = SMU11_SYSPLL1_2_ID;
+ input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
+ index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
+ getsmuclockinfo);
+
+ ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
+ (uint32_t *)&input);
+ if (ret)
+ return -EINVAL;
+
+ output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
+ smu->smu_table.boot_values.fclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
+ }
+
return 0;
}
-static int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
+int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *memory_pool = &smu_table->memory_pool;
@@ -663,7 +700,7 @@ static int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
return ret;
}
-static int smu_v11_0_check_pptable(struct smu_context *smu)
+int smu_v11_0_check_pptable(struct smu_context *smu)
{
int ret;
@@ -671,7 +708,7 @@ static int smu_v11_0_check_pptable(struct smu_context *smu)
return ret;
}
-static int smu_v11_0_parse_pptable(struct smu_context *smu)
+int smu_v11_0_parse_pptable(struct smu_context *smu)
{
int ret;
@@ -695,7 +732,7 @@ static int smu_v11_0_parse_pptable(struct smu_context *smu)
return ret;
}
-static int smu_v11_0_populate_smc_pptable(struct smu_context *smu)
+int smu_v11_0_populate_smc_pptable(struct smu_context *smu)
{
int ret;
@@ -704,7 +741,7 @@ static int smu_v11_0_populate_smc_pptable(struct smu_context *smu)
return ret;
}
-static int smu_v11_0_write_pptable(struct smu_context *smu)
+int smu_v11_0_write_pptable(struct smu_context *smu)
{
struct smu_table_context *table_context = &smu->smu_table;
int ret = 0;
@@ -715,26 +752,7 @@ static int smu_v11_0_write_pptable(struct smu_context *smu)
return ret;
}
-static int smu_v11_0_write_watermarks_table(struct smu_context *smu)
-{
- int ret = 0;
- struct smu_table_context *smu_table = &smu->smu_table;
- struct smu_table *table = NULL;
-
- table = &smu_table->tables[SMU_TABLE_WATERMARKS];
- if (!table)
- return -EINVAL;
-
- if (!table->cpu_addr)
- return -EINVAL;
-
- ret = smu_update_table(smu, SMU_TABLE_WATERMARKS, 0, table->cpu_addr,
- true);
-
- return ret;
-}
-
-static int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk)
+int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk)
{
int ret;
@@ -746,7 +764,7 @@ static int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t cl
return ret;
}
-static int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu)
+int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu)
{
struct smu_table_context *table_context = &smu->smu_table;
@@ -755,11 +773,28 @@ static int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu)
if (!table_context)
return -EINVAL;
- return smu_set_deep_sleep_dcefclk(smu,
- table_context->boot_values.dcefclk / 100);
+ return smu_v11_0_set_deep_sleep_dcefclk(smu, table_context->boot_values.dcefclk / 100);
+}
+
+int smu_v11_0_set_driver_table_location(struct smu_context *smu)
+{
+ struct smu_table *driver_table = &smu->smu_table.driver_table;
+ int ret = 0;
+
+ if (driver_table->mc_address) {
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_SetDriverDramAddrHigh,
+ upper_32_bits(driver_table->mc_address));
+ if (!ret)
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_SetDriverDramAddrLow,
+ lower_32_bits(driver_table->mc_address));
+ }
+
+ return ret;
}
-static int smu_v11_0_set_tool_table_location(struct smu_context *smu)
+int smu_v11_0_set_tool_table_location(struct smu_context *smu)
{
int ret = 0;
struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG];
@@ -777,7 +812,7 @@ static int smu_v11_0_set_tool_table_location(struct smu_context *smu)
return ret;
}
-static int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count)
+int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count)
{
int ret = 0;
@@ -788,46 +823,8 @@ static int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count)
return ret;
}
-static int smu_v11_0_update_feature_enable_state(struct smu_context *smu, uint32_t feature_id, bool enabled)
-{
- uint32_t feature_low = 0, feature_high = 0;
- int ret = 0;
-
- if (!smu->pm_enabled)
- return ret;
- if (feature_id >= 0 && feature_id < 31)
- feature_low = (1 << feature_id);
- else if (feature_id > 31 && feature_id < 63)
- feature_high = (1 << feature_id);
- else
- return -EINVAL;
-
- if (enabled) {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow,
- feature_low);
- if (ret)
- return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh,
- feature_high);
- if (ret)
- return ret;
-
- } else {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow,
- feature_low);
- if (ret)
- return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh,
- feature_high);
- if (ret)
- return ret;
-
- }
- return ret;
-}
-
-static int smu_v11_0_set_allowed_mask(struct smu_context *smu)
+int smu_v11_0_set_allowed_mask(struct smu_context *smu)
{
struct smu_feature *feature = &smu->smu_feature;
int ret = 0;
@@ -854,62 +851,71 @@ failed:
return ret;
}
-static int smu_v11_0_get_enabled_mask(struct smu_context *smu,
+int smu_v11_0_get_enabled_mask(struct smu_context *smu,
uint32_t *feature_mask, uint32_t num)
{
uint32_t feature_mask_high = 0, feature_mask_low = 0;
+ struct smu_feature *feature = &smu->smu_feature;
int ret = 0;
if (!feature_mask || num < 2)
return -EINVAL;
- ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh);
- if (ret)
- return ret;
- ret = smu_read_smc_arg(smu, &feature_mask_high);
- if (ret)
- return ret;
+ if (bitmap_empty(feature->enabled, feature->feature_num)) {
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh);
+ if (ret)
+ return ret;
+ ret = smu_read_smc_arg(smu, &feature_mask_high);
+ if (ret)
+ return ret;
- ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow);
- if (ret)
- return ret;
- ret = smu_read_smc_arg(smu, &feature_mask_low);
- if (ret)
- return ret;
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow);
+ if (ret)
+ return ret;
+ ret = smu_read_smc_arg(smu, &feature_mask_low);
+ if (ret)
+ return ret;
- feature_mask[0] = feature_mask_low;
- feature_mask[1] = feature_mask_high;
+ feature_mask[0] = feature_mask_low;
+ feature_mask[1] = feature_mask_high;
+ } else {
+ bitmap_copy((unsigned long *)feature_mask, feature->enabled,
+ feature->feature_num);
+ }
return ret;
}
-static int smu_v11_0_system_features_control(struct smu_context *smu,
+int smu_v11_0_system_features_control(struct smu_context *smu,
bool en)
{
struct smu_feature *feature = &smu->smu_feature;
uint32_t feature_mask[2];
int ret = 0;
- if (smu->pm_enabled) {
- ret = smu_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
- SMU_MSG_DisableAllSmuFeatures));
- if (ret)
- return ret;
- }
-
- ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
+ ret = smu_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
+ SMU_MSG_DisableAllSmuFeatures));
if (ret)
return ret;
- bitmap_copy(feature->enabled, (unsigned long *)&feature_mask,
- feature->feature_num);
- bitmap_copy(feature->supported, (unsigned long *)&feature_mask,
- feature->feature_num);
+ if (en) {
+ ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
+ if (ret)
+ return ret;
+
+ bitmap_copy(feature->enabled, (unsigned long *)&feature_mask,
+ feature->feature_num);
+ bitmap_copy(feature->supported, (unsigned long *)&feature_mask,
+ feature->feature_num);
+ } else {
+ bitmap_zero(feature->enabled, feature->feature_num);
+ bitmap_zero(feature->supported, feature->feature_num);
+ }
return ret;
}
-static int smu_v11_0_notify_display_change(struct smu_context *smu)
+int smu_v11_0_notify_display_change(struct smu_context *smu)
{
int ret = 0;
@@ -927,11 +933,21 @@ smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
enum smu_clk_type clock_select)
{
int ret = 0;
+ int clk_id;
if (!smu->pm_enabled)
return ret;
+
+ if ((smu_msg_get_index(smu, SMU_MSG_GetDcModeMaxDpmFreq) < 0) ||
+ (smu_msg_get_index(smu, SMU_MSG_GetMaxDpmFreq) < 0))
+ return 0;
+
+ clk_id = smu_clk_get_index(smu, clock_select);
+ if (clk_id < 0)
+ return -EINVAL;
+
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq,
- smu_clk_get_index(smu, clock_select) << 16);
+ clk_id << 16);
if (ret) {
pr_err("[GetMaxSustainableClock] Failed to get max DC clock from SMC!");
return ret;
@@ -946,7 +962,7 @@ smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
/* if DC limit is zero, return AC limit */
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq,
- smu_clk_get_index(smu, clock_select) << 16);
+ clk_id << 16);
if (ret) {
pr_err("[GetMaxSustainableClock] failed to get max AC clock from SMC!");
return ret;
@@ -957,7 +973,7 @@ smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
return ret;
}
-static int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
+int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
{
struct smu_11_0_max_sustainable_clocks *max_sustainable_clocks;
int ret = 0;
@@ -1037,75 +1053,86 @@ static int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
return 0;
}
-static int smu_v11_0_get_power_limit(struct smu_context *smu,
- uint32_t *limit,
- bool get_default)
-{
- int ret = 0;
+uint32_t smu_v11_0_get_max_power_limit(struct smu_context *smu) {
+ uint32_t od_limit, max_power_limit;
+ struct smu_11_0_powerplay_table *powerplay_table = NULL;
+ struct smu_table_context *table_context = &smu->smu_table;
+ powerplay_table = table_context->power_play_table;
- if (get_default) {
- mutex_lock(&smu->mutex);
- *limit = smu->default_power_limit;
- if (smu->od_enabled) {
- *limit *= (100 + smu->smu_table.TDPODLimit);
- *limit /= 100;
- }
- mutex_unlock(&smu->mutex);
- } else {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetPptLimit,
- smu_power_get_index(smu, SMU_POWER_SOURCE_AC) << 16);
- if (ret) {
- pr_err("[%s] get PPT limit failed!", __func__);
- return ret;
- }
- smu_read_smc_arg(smu, limit);
- smu->power_limit = *limit;
+ max_power_limit = smu_get_pptable_power_limit(smu);
+
+ if (!max_power_limit) {
+ // If we couldn't get the table limit, fall back on first-read value
+ if (!smu->default_power_limit)
+ smu->default_power_limit = smu->power_limit;
+ max_power_limit = smu->default_power_limit;
}
- return ret;
+ if (smu->od_enabled) {
+ od_limit = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
+
+ pr_debug("ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_limit, smu->default_power_limit);
+
+ max_power_limit *= (100 + od_limit);
+ max_power_limit /= 100;
+ }
+
+ return max_power_limit;
}
-static int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
+int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
{
- uint32_t max_power_limit;
int ret = 0;
+ uint32_t max_power_limit;
+
+ max_power_limit = smu_v11_0_get_max_power_limit(smu);
+
+ if (n > max_power_limit) {
+ pr_err("New power limit (%d) is over the max allowed %d\n",
+ n,
+ max_power_limit);
+ return -EINVAL;
+ }
if (n == 0)
n = smu->default_power_limit;
- max_power_limit = smu->default_power_limit;
-
- if (smu->od_enabled) {
- max_power_limit *= (100 + smu->smu_table.TDPODLimit);
- max_power_limit /= 100;
+ if (!smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
+ pr_err("Setting new power limit is not supported!\n");
+ return -EOPNOTSUPP;
}
- if (smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT))
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n);
if (ret) {
- pr_err("[%s] Set power limit Failed!", __func__);
+ pr_err("[%s] Set power limit Failed!\n", __func__);
return ret;
}
+ smu->power_limit = n;
- return ret;
+ return 0;
}
-static int smu_v11_0_get_current_clk_freq(struct smu_context *smu,
+int smu_v11_0_get_current_clk_freq(struct smu_context *smu,
enum smu_clk_type clk_id,
uint32_t *value)
{
int ret = 0;
uint32_t freq = 0;
+ int asic_clk_id;
if (clk_id >= SMU_CLK_COUNT || !value)
return -EINVAL;
+ asic_clk_id = smu_clk_get_index(smu, clk_id);
+ if (asic_clk_id < 0)
+ return -EINVAL;
+
/* if don't has GetDpmClockFreq Message, try get current clock by SmuMetrics_t */
- if (smu_msg_get_index(smu, SMU_MSG_GetDpmClockFreq) == 0)
+ if (smu_msg_get_index(smu, SMU_MSG_GetDpmClockFreq) < 0)
ret = smu_get_current_clk_freq_by_table(smu, clk_id, &freq);
else {
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDpmClockFreq,
- (smu_clk_get_index(smu, clk_id) << 16));
+ (asic_clk_id << 16));
if (ret)
return ret;
@@ -1121,23 +1148,18 @@ static int smu_v11_0_get_current_clk_freq(struct smu_context *smu,
}
static int smu_v11_0_set_thermal_range(struct smu_context *smu,
- struct smu_temperature_range *range)
+ struct smu_temperature_range range)
{
struct amdgpu_device *adev = smu->adev;
int low = SMU_THERMAL_MINIMUM_ALERT_TEMP;
int high = SMU_THERMAL_MAXIMUM_ALERT_TEMP;
uint32_t val;
+ struct smu_table_context *table_context = &smu->smu_table;
+ struct smu_11_0_powerplay_table *powerplay_table = table_context->power_play_table;
- if (!range)
- return -EINVAL;
-
- if (low < range->min)
- low = range->min;
- if (high > range->max)
- high = range->max;
-
- low = max(SMU_THERMAL_MINIMUM_ALERT_TEMP, range->min);
- high = min(SMU_THERMAL_MAXIMUM_ALERT_TEMP, range->max);
+ low = max(SMU_THERMAL_MINIMUM_ALERT_TEMP,
+ range.min / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES);
+ high = min((uint16_t)SMU_THERMAL_MAXIMUM_ALERT_TEMP, powerplay_table->software_shutdown_temp);
if (low > high)
return -EINVAL;
@@ -1170,30 +1192,23 @@ static int smu_v11_0_enable_thermal_alert(struct smu_context *smu)
return 0;
}
-static int smu_v11_0_start_thermal_control(struct smu_context *smu)
+int smu_v11_0_start_thermal_control(struct smu_context *smu)
{
int ret = 0;
- struct smu_temperature_range range = {
- TEMP_RANGE_MIN,
- TEMP_RANGE_MAX,
- TEMP_RANGE_MAX,
- TEMP_RANGE_MIN,
- TEMP_RANGE_MAX,
- TEMP_RANGE_MAX,
- TEMP_RANGE_MIN,
- TEMP_RANGE_MAX,
- TEMP_RANGE_MAX};
+ struct smu_temperature_range range;
struct amdgpu_device *adev = smu->adev;
if (!smu->pm_enabled)
return ret;
+ memcpy(&range, &smu11_thermal_policy[0], sizeof(struct smu_temperature_range));
+
ret = smu_get_thermal_temperature_range(smu, &range);
if (ret)
return ret;
if (smu->smu_table.thermal_controller_type) {
- ret = smu_v11_0_set_thermal_range(smu, &range);
+ ret = smu_v11_0_set_thermal_range(smu, range);
if (ret)
return ret;
@@ -1206,21 +1221,28 @@ static int smu_v11_0_start_thermal_control(struct smu_context *smu)
return ret;
}
- adev->pm.dpm.thermal.min_temp = range.min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
- adev->pm.dpm.thermal.max_temp = range.max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
- adev->pm.dpm.thermal.max_edge_emergency_temp = range.edge_emergency_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
- adev->pm.dpm.thermal.min_hotspot_temp = range.hotspot_min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
- adev->pm.dpm.thermal.max_hotspot_crit_temp = range.hotspot_crit_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
- adev->pm.dpm.thermal.max_hotspot_emergency_temp = range.hotspot_emergency_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
- adev->pm.dpm.thermal.min_mem_temp = range.mem_min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
- adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
- adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
- adev->pm.dpm.thermal.min_temp = range.min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
- adev->pm.dpm.thermal.max_temp = range.max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ adev->pm.dpm.thermal.min_temp = range.min;
+ adev->pm.dpm.thermal.max_temp = range.max;
+ adev->pm.dpm.thermal.max_edge_emergency_temp = range.edge_emergency_max;
+ adev->pm.dpm.thermal.min_hotspot_temp = range.hotspot_min;
+ adev->pm.dpm.thermal.max_hotspot_crit_temp = range.hotspot_crit_max;
+ adev->pm.dpm.thermal.max_hotspot_emergency_temp = range.hotspot_emergency_max;
+ adev->pm.dpm.thermal.min_mem_temp = range.mem_min;
+ adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max;
+ adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max;
return ret;
}
+int smu_v11_0_stop_thermal_control(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, 0);
+
+ return 0;
+}
+
static uint16_t convert_to_vddc(uint8_t vid)
{
return (uint16_t) ((6200 - (vid * 25)) / SMU11_VOLTAGE_SCALE);
@@ -1245,11 +1267,15 @@ static int smu_v11_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value)
}
-static int smu_v11_0_read_sensor(struct smu_context *smu,
+int smu_v11_0_read_sensor(struct smu_context *smu,
enum amd_pp_sensors sensor,
void *data, uint32_t *size)
{
int ret = 0;
+
+ if(!data || !size)
+ return -EINVAL;
+
switch (sensor) {
case AMDGPU_PP_SENSOR_GFX_MCLK:
ret = smu_get_current_clk_freq(smu, SMU_UCLK, (uint32_t *)data);
@@ -1272,17 +1298,13 @@ static int smu_v11_0_read_sensor(struct smu_context *smu,
break;
}
- /* try get sensor data by asic */
- if (ret)
- ret = smu_asic_read_sensor(smu, sensor, data, size);
-
if (ret)
*size = 0;
return ret;
}
-static int
+int
smu_v11_0_display_clock_voltage_request(struct smu_context *smu,
struct pp_display_clock_request
*clock_req)
@@ -1322,37 +1344,20 @@ smu_v11_0_display_clock_voltage_request(struct smu_context *smu,
if (ret)
goto failed;
- mutex_lock(&smu->mutex);
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
- (smu_clk_get_index(smu, clk_select) << 16) | clk_freq);
- mutex_unlock(&smu->mutex);
- }
+ if (clk_select == SMU_UCLK && smu->disable_uclk_switch)
+ return 0;
-failed:
- return ret;
-}
+ ret = smu_set_hard_freq_range(smu, clk_select, clk_freq, 0);
-static int
-smu_v11_0_set_watermarks_for_clock_ranges(struct smu_context *smu, struct
- dm_pp_wm_sets_with_clock_ranges_soc15
- *clock_ranges)
-{
- int ret = 0;
- struct smu_table *watermarks = &smu->smu_table.tables[SMU_TABLE_WATERMARKS];
- void *table = watermarks->cpu_addr;
-
- if (!smu->disable_watermark &&
- smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
- smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
- smu_set_watermarks_table(smu, table, clock_ranges);
- smu->watermarks_bitmap |= WATERMARKS_EXIST;
- smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
+ if(clk_select == SMU_UCLK)
+ smu->hard_min_uclk_req_from_dal = clk_freq;
}
+failed:
return ret;
}
-static int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
+int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
{
int ret = 0;
struct amdgpu_device *adev = smu->adev;
@@ -1361,14 +1366,14 @@ static int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
case CHIP_VEGA20:
break;
case CHIP_NAVI10:
+ case CHIP_NAVI14:
+ case CHIP_NAVI12:
if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
return 0;
- mutex_lock(&smu->mutex);
if (enable)
ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff);
else
ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff);
- mutex_unlock(&smu->mutex);
break;
default:
break;
@@ -1377,7 +1382,7 @@ static int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
return ret;
}
-static uint32_t
+uint32_t
smu_v11_0_get_fan_control_mode(struct smu_context *smu)
{
if (!smu_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT))
@@ -1387,17 +1392,17 @@ smu_v11_0_get_fan_control_mode(struct smu_context *smu)
}
static int
-smu_v11_0_smc_fan_control(struct smu_context *smu, bool start)
+smu_v11_0_auto_fan_control(struct smu_context *smu, bool auto_fan_control)
{
int ret = 0;
if (!smu_feature_is_supported(smu, SMU_FEATURE_FAN_CONTROL_BIT))
return 0;
- ret = smu_feature_set_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT, start);
+ ret = smu_feature_set_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT, auto_fan_control);
if (ret)
pr_err("[%s]%s smc FAN CONTROL feature failed!",
- __func__, (start ? "Start" : "Stop"));
+ __func__, (auto_fan_control ? "Start" : "Stop"));
return ret;
}
@@ -1417,20 +1422,19 @@ smu_v11_0_set_fan_static_mode(struct smu_context *smu, uint32_t mode)
return 0;
}
-static int
+int
smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
{
struct amdgpu_device *adev = smu->adev;
- uint32_t duty100;
- uint32_t duty;
+ uint32_t duty100, duty;
uint64_t tmp64;
- bool stop = 0;
if (speed > 100)
speed = 100;
- if (smu_v11_0_smc_fan_control(smu, stop))
+ if (smu_v11_0_auto_fan_control(smu, 0))
return -EINVAL;
+
duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1),
CG_FDO_CTRL1, FMAX_DUTY100);
if (!duty100)
@@ -1447,23 +1451,21 @@ smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
return smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC);
}
-static int
+int
smu_v11_0_set_fan_control_mode(struct smu_context *smu,
uint32_t mode)
{
int ret = 0;
- bool start = 1;
- bool stop = 0;
switch (mode) {
case AMD_FAN_CTRL_NONE:
ret = smu_v11_0_set_fan_speed_percent(smu, 100);
break;
case AMD_FAN_CTRL_MANUAL:
- ret = smu_v11_0_smc_fan_control(smu, stop);
+ ret = smu_v11_0_auto_fan_control(smu, 0);
break;
case AMD_FAN_CTRL_AUTO:
- ret = smu_v11_0_smc_fan_control(smu, start);
+ ret = smu_v11_0_auto_fan_control(smu, 1);
break;
default:
break;
@@ -1477,21 +1479,19 @@ smu_v11_0_set_fan_control_mode(struct smu_context *smu,
return ret;
}
-static int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
+int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
uint32_t speed)
{
struct amdgpu_device *adev = smu->adev;
int ret;
uint32_t tach_period, crystal_clock_freq;
- bool stop = 0;
if (!speed)
return -EINVAL;
- mutex_lock(&(smu->mutex));
- ret = smu_v11_0_smc_fan_control(smu, stop);
+ ret = smu_v11_0_auto_fan_control(smu, 0);
if (ret)
- goto set_fan_speed_rpm_failed;
+ return ret;
crystal_clock_freq = amdgpu_asic_get_xclk(adev);
tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
@@ -1502,23 +1502,16 @@ static int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
ret = smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC_RPM);
-set_fan_speed_rpm_failed:
- mutex_unlock(&(smu->mutex));
return ret;
}
-#define XGMI_STATE_D0 1
-#define XGMI_STATE_D3 0
-
-static int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
+int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
uint32_t pstate)
{
int ret = 0;
- mutex_lock(&(smu->mutex));
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetXgmiMode,
- pstate ? XGMI_STATE_D0 : XGMI_STATE_D3);
- mutex_unlock(&(smu->mutex));
+ pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3);
return ret;
}
@@ -1565,7 +1558,7 @@ static const struct amdgpu_irq_src_funcs smu_v11_0_irq_funcs =
.process = smu_v11_0_irq_process,
};
-static int smu_v11_0_register_irq_handler(struct smu_context *smu)
+int smu_v11_0_register_irq_handler(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
struct amdgpu_irq_src *irq_src = smu->irq_source;
@@ -1597,7 +1590,7 @@ static int smu_v11_0_register_irq_handler(struct smu_context *smu)
return ret;
}
-static int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
+int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
struct pp_smu_nv_clock_table *max_clocks)
{
struct smu_table_context *table_context = &smu->smu_table;
@@ -1627,13 +1620,11 @@ static int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
return 0;
}
-static int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu)
+int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu)
{
int ret = 0;
- mutex_lock(&smu->mutex);
ret = smu_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME);
- mutex_unlock(&smu->mutex);
return ret;
}
@@ -1643,7 +1634,7 @@ static int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu, enum smu_v
return smu_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq);
}
-static bool smu_v11_0_baco_is_support(struct smu_context *smu)
+bool smu_v11_0_baco_is_support(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
struct smu_baco_context *smu_baco = &smu->smu_baco;
@@ -1657,7 +1648,9 @@ static bool smu_v11_0_baco_is_support(struct smu_context *smu)
if (!baco_support)
return false;
- if (!smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
+ /* Arcturus does not support this bit mask */
+ if (smu_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) &&
+ !smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
return false;
val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0);
@@ -1667,10 +1660,10 @@ static bool smu_v11_0_baco_is_support(struct smu_context *smu)
return false;
}
-static enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu)
+enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu)
{
struct smu_baco_context *smu_baco = &smu->smu_baco;
- enum smu_baco_state baco_state = SMU_BACO_STATE_EXIT;
+ enum smu_baco_state baco_state;
mutex_lock(&smu_baco->mutex);
baco_state = smu_baco->state;
@@ -1679,10 +1672,14 @@ static enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu)
return baco_state;
}
-static int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
+int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
{
struct smu_baco_context *smu_baco = &smu->smu_baco;
+ struct amdgpu_device *adev = smu->adev;
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ uint32_t bif_doorbell_intr_cntl;
+ uint32_t data;
int ret = 0;
if (smu_v11_0_baco_get_state(smu) == state)
@@ -1690,10 +1687,37 @@ static int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state
mutex_lock(&smu_baco->mutex);
- if (state == SMU_BACO_STATE_ENTER)
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, BACO_SEQ_BACO);
- else
+ bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);
+
+ if (state == SMU_BACO_STATE_ENTER) {
+ bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
+ BIF_DOORBELL_INT_CNTL,
+ DOORBELL_INTERRUPT_DISABLE, 1);
+ WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
+
+ if (!ras || !ras->supported) {
+ data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL);
+ data |= 0x80000000;
+ WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data);
+
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 0);
+ } else {
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 1);
+ }
+ } else {
ret = smu_send_smc_msg(smu, SMU_MSG_ExitBaco);
+ if (ret)
+ goto out;
+
+ bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
+ BIF_DOORBELL_INT_CNTL,
+ DOORBELL_INTERRUPT_DISABLE, 0);
+ WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
+
+ /* clear vbios scratch 6 and 7 for coming asic reinit */
+ WREG32(adev->bios_scratch_reg_offset + 6, 0);
+ WREG32(adev->bios_scratch_reg_offset + 7, 0);
+ }
if (ret)
goto out;
@@ -1703,13 +1727,17 @@ out:
return ret;
}
-static int smu_v11_0_baco_reset(struct smu_context *smu)
+int smu_v11_0_baco_enter(struct smu_context *smu)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
- ret = smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO);
- if (ret)
- return ret;
+ /* Arcturus does not need this audio workaround */
+ if (adev->asic_type != CHIP_ARCTURUS) {
+ ret = smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO);
+ if (ret)
+ return ret;
+ }
ret = smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_ENTER);
if (ret)
@@ -1717,6 +1745,13 @@ static int smu_v11_0_baco_reset(struct smu_context *smu)
msleep(10);
+ return ret;
+}
+
+int smu_v11_0_baco_exit(struct smu_context *smu)
+{
+ int ret = 0;
+
ret = smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_EXIT);
if (ret)
return ret;
@@ -1724,72 +1759,179 @@ static int smu_v11_0_baco_reset(struct smu_context *smu)
return ret;
}
-static const struct smu_funcs smu_v11_0_funcs = {
- .init_microcode = smu_v11_0_init_microcode,
- .load_microcode = smu_v11_0_load_microcode,
- .check_fw_status = smu_v11_0_check_fw_status,
- .check_fw_version = smu_v11_0_check_fw_version,
- .send_smc_msg = smu_v11_0_send_msg,
- .send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
- .read_smc_arg = smu_v11_0_read_arg,
- .setup_pptable = smu_v11_0_setup_pptable,
- .init_smc_tables = smu_v11_0_init_smc_tables,
- .fini_smc_tables = smu_v11_0_fini_smc_tables,
- .init_power = smu_v11_0_init_power,
- .fini_power = smu_v11_0_fini_power,
- .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
- .get_clk_info_from_vbios = smu_v11_0_get_clk_info_from_vbios,
- .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
- .check_pptable = smu_v11_0_check_pptable,
- .parse_pptable = smu_v11_0_parse_pptable,
- .populate_smc_pptable = smu_v11_0_populate_smc_pptable,
- .write_pptable = smu_v11_0_write_pptable,
- .write_watermarks_table = smu_v11_0_write_watermarks_table,
- .set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
- .set_tool_table_location = smu_v11_0_set_tool_table_location,
- .init_display_count = smu_v11_0_init_display_count,
- .set_allowed_mask = smu_v11_0_set_allowed_mask,
- .get_enabled_mask = smu_v11_0_get_enabled_mask,
- .system_features_control = smu_v11_0_system_features_control,
- .update_feature_enable_state = smu_v11_0_update_feature_enable_state,
- .notify_display_change = smu_v11_0_notify_display_change,
- .get_power_limit = smu_v11_0_get_power_limit,
- .set_power_limit = smu_v11_0_set_power_limit,
- .get_current_clk_freq = smu_v11_0_get_current_clk_freq,
- .init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks,
- .start_thermal_control = smu_v11_0_start_thermal_control,
- .read_sensor = smu_v11_0_read_sensor,
- .set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk,
- .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
- .set_watermarks_for_clock_ranges = smu_v11_0_set_watermarks_for_clock_ranges,
- .get_fan_control_mode = smu_v11_0_get_fan_control_mode,
- .set_fan_control_mode = smu_v11_0_set_fan_control_mode,
- .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
- .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
- .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
- .gfx_off_control = smu_v11_0_gfx_off_control,
- .register_irq_handler = smu_v11_0_register_irq_handler,
- .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme,
- .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc,
- .baco_is_support = smu_v11_0_baco_is_support,
- .baco_get_state = smu_v11_0_baco_get_state,
- .baco_set_state = smu_v11_0_baco_set_state,
- .baco_reset = smu_v11_0_baco_reset,
-};
+int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t *min, uint32_t *max)
+{
+ int ret = 0, clk_id = 0;
+ uint32_t param = 0;
-void smu_v11_0_set_smu_funcs(struct smu_context *smu)
+ clk_id = smu_clk_get_index(smu, clk_type);
+ if (clk_id < 0) {
+ ret = -EINVAL;
+ goto failed;
+ }
+ param = (clk_id & 0xffff) << 16;
+
+ if (max) {
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param);
+ if (ret)
+ goto failed;
+ ret = smu_read_smc_arg(smu, max);
+ if (ret)
+ goto failed;
+ }
+
+ if (min) {
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param);
+ if (ret)
+ goto failed;
+ ret = smu_read_smc_arg(smu, min);
+ if (ret)
+ goto failed;
+ }
+
+failed:
+ return ret;
+}
+
+int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t min, uint32_t max)
+{
+ int ret = 0, clk_id = 0;
+ uint32_t param;
+
+ clk_id = smu_clk_get_index(smu, clk_type);
+ if (clk_id < 0)
+ return clk_id;
+
+ if (max > 0) {
+ param = (uint32_t)((clk_id << 16) | (max & 0xffff));
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
+ param);
+ if (ret)
+ return ret;
+ }
+
+ if (min > 0) {
+ param = (uint32_t)((clk_id << 16) | (min & 0xffff));
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
+ param);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+int smu_v11_0_override_pcie_parameters(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
+ uint32_t pcie_gen = 0, pcie_width = 0;
+ int ret;
- smu->funcs = &smu_v11_0_funcs;
- switch (adev->asic_type) {
- case CHIP_VEGA20:
- vega20_set_ppt_funcs(smu);
+ if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
+ pcie_gen = 3;
+ else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
+ pcie_gen = 2;
+ else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
+ pcie_gen = 1;
+ else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
+ pcie_gen = 0;
+
+ /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
+ * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
+ * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32
+ */
+ if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
+ pcie_width = 6;
+ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
+ pcie_width = 5;
+ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
+ pcie_width = 4;
+ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
+ pcie_width = 3;
+ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
+ pcie_width = 2;
+ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
+ pcie_width = 1;
+
+ ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width);
+
+ if (ret)
+ pr_err("[%s] Attempt to override pcie params failed!\n", __func__);
+
+ return ret;
+
+}
+
+int smu_v11_0_set_default_od_settings(struct smu_context *smu, bool initialize, size_t overdrive_table_size)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ int ret = 0;
+
+ if (initialize) {
+ if (table_context->overdrive_table) {
+ return -EINVAL;
+ }
+ table_context->overdrive_table = kzalloc(overdrive_table_size, GFP_KERNEL);
+ if (!table_context->overdrive_table) {
+ return -ENOMEM;
+ }
+ ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, table_context->overdrive_table, false);
+ if (ret) {
+ pr_err("Failed to export overdrive table!\n");
+ return ret;
+ }
+ if (!table_context->boot_overdrive_table) {
+ table_context->boot_overdrive_table = kmemdup(table_context->overdrive_table, overdrive_table_size, GFP_KERNEL);
+ if (!table_context->boot_overdrive_table) {
+ return -ENOMEM;
+ }
+ }
+ }
+ ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, table_context->overdrive_table, true);
+ if (ret) {
+ pr_err("Failed to import overdrive table!\n");
+ return ret;
+ }
+ return ret;
+}
+
+int smu_v11_0_set_performance_level(struct smu_context *smu,
+ enum amd_dpm_forced_level level)
+{
+ int ret = 0;
+ uint32_t sclk_mask, mclk_mask, soc_mask;
+
+ switch (level) {
+ case AMD_DPM_FORCED_LEVEL_HIGH:
+ ret = smu_force_dpm_limit_value(smu, true);
break;
- case CHIP_NAVI10:
- navi10_set_ppt_funcs(smu);
+ case AMD_DPM_FORCED_LEVEL_LOW:
+ ret = smu_force_dpm_limit_value(smu, false);
+ break;
+ case AMD_DPM_FORCED_LEVEL_AUTO:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+ ret = smu_unforce_dpm_levels(smu);
break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+ ret = smu_get_profiling_clk_mask(smu, level,
+ &sclk_mask,
+ &mclk_mask,
+ &soc_mask);
+ if (ret)
+ return ret;
+ smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask, false);
+ smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask, false);
+ smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask, false);
+ break;
+ case AMD_DPM_FORCED_LEVEL_MANUAL:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
default:
- pr_warn("Unknown asic for smu11\n");
+ break;
}
+ return ret;
}
+
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
new file mode 100644
index 000000000000..870e6db2907e
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
@@ -0,0 +1,526 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "pp_debug.h"
+#include <linux/firmware.h>
+#include "amdgpu.h"
+#include "amdgpu_smu.h"
+#include "smu_internal.h"
+#include "atomfirmware.h"
+#include "amdgpu_atomfirmware.h"
+#include "smu_v12_0.h"
+#include "soc15_common.h"
+#include "atom.h"
+
+#include "asic_reg/mp/mp_12_0_0_offset.h"
+#include "asic_reg/mp/mp_12_0_0_sh_mask.h"
+
+#define smnMP1_FIRMWARE_FLAGS 0x3010024
+
+#define mmSMUIO_GFX_MISC_CNTL 0x00c8
+#define mmSMUIO_GFX_MISC_CNTL_BASE_IDX 0
+#define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK 0x00000006L
+#define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT 0x1
+
+int smu_v12_0_send_msg_without_waiting(struct smu_context *smu,
+ uint16_t msg)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
+ return 0;
+}
+
+int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ *arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
+ return 0;
+}
+
+int smu_v12_0_wait_for_response(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t cur_value, i;
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ cur_value = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
+ if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0)
+ return cur_value == 0x1 ? 0 : -EIO;
+
+ udelay(1);
+ }
+
+ /* timeout means wrong logic */
+ return -ETIME;
+}
+
+int
+smu_v12_0_send_msg_with_param(struct smu_context *smu,
+ enum smu_message_type msg,
+ uint32_t param)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0, index = 0;
+
+ index = smu_msg_get_index(smu, msg);
+ if (index < 0)
+ return index;
+
+ ret = smu_v12_0_wait_for_response(smu);
+ if (ret) {
+ pr_err("Msg issuing pre-check failed and "
+ "SMU may be not in the right state!\n");
+ return ret;
+ }
+
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
+
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
+
+ smu_v12_0_send_msg_without_waiting(smu, (uint16_t)index);
+
+ ret = smu_v12_0_wait_for_response(smu);
+ if (ret)
+ pr_err("Failed to send message 0x%x, response 0x%x param 0x%x\n",
+ index, ret, param);
+
+ return ret;
+}
+
+int smu_v12_0_check_fw_status(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t mp1_fw_flags;
+
+ mp1_fw_flags = RREG32_PCIE(MP1_Public |
+ (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
+
+ if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
+ MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
+ return 0;
+
+ return -EIO;
+}
+
+int smu_v12_0_check_fw_version(struct smu_context *smu)
+{
+ uint32_t if_version = 0xff, smu_version = 0xff;
+ uint16_t smu_major;
+ uint8_t smu_minor, smu_debug;
+ int ret = 0;
+
+ ret = smu_get_smc_version(smu, &if_version, &smu_version);
+ if (ret)
+ return ret;
+
+ smu_major = (smu_version >> 16) & 0xffff;
+ smu_minor = (smu_version >> 8) & 0xff;
+ smu_debug = (smu_version >> 0) & 0xff;
+
+ /*
+ * 1. if_version mismatch is not critical as our fw is designed
+ * to be backward compatible.
+ * 2. New fw usually brings some optimizations. But that's visible
+ * only on the paired driver.
+ * Considering above, we just leave user a warning message instead
+ * of halt driver loading.
+ */
+ if (if_version != smu->smc_if_version) {
+ pr_info("smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
+ "smu fw version = 0x%08x (%d.%d.%d)\n",
+ smu->smc_if_version, if_version,
+ smu_version, smu_major, smu_minor, smu_debug);
+ pr_warn("SMU driver if version not matched\n");
+ }
+
+ return ret;
+}
+
+int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate)
+{
+ if (!smu->is_apu)
+ return 0;
+
+ if (gate)
+ return smu_send_smc_msg(smu, SMU_MSG_PowerDownSdma);
+ else
+ return smu_send_smc_msg(smu, SMU_MSG_PowerUpSdma);
+}
+
+int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate)
+{
+ if (!smu->is_apu)
+ return 0;
+
+ if (gate)
+ return smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn);
+ else
+ return smu_send_smc_msg(smu, SMU_MSG_PowerUpVcn);
+}
+
+int smu_v12_0_powergate_jpeg(struct smu_context *smu, bool gate)
+{
+ if (!smu->is_apu)
+ return 0;
+
+ if (gate)
+ return smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0);
+ else
+ return smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0);
+}
+
+int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable)
+{
+ if (!(smu->adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
+ return 0;
+
+ return smu_v12_0_send_msg_with_param(smu,
+ SMU_MSG_SetGfxCGPG, enable ? 1 : 0);
+}
+
+int smu_v12_0_read_sensor(struct smu_context *smu,
+ enum amd_pp_sensors sensor,
+ void *data, uint32_t *size)
+{
+ int ret = 0;
+
+ if(!data || !size)
+ return -EINVAL;
+
+ switch (sensor) {
+ case AMDGPU_PP_SENSOR_GFX_MCLK:
+ ret = smu_get_current_clk_freq(smu, SMU_UCLK, (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GFX_SCLK:
+ ret = smu_get_current_clk_freq(smu, SMU_GFXCLK, (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
+ *(uint32_t *)data = 0;
+ *size = 4;
+ break;
+ default:
+ ret = smu_common_read_sensor(smu, sensor, data, size);
+ break;
+ }
+
+ if (ret)
+ *size = 0;
+
+ return ret;
+}
+
+/**
+ * smu_v12_0_get_gfxoff_status - get gfxoff status
+ *
+ * @smu: amdgpu_device pointer
+ *
+ * This function will be used to get gfxoff status
+ *
+ * Returns 0=GFXOFF(default).
+ * Returns 1=Transition out of GFX State.
+ * Returns 2=Not in GFXOFF.
+ * Returns 3=Transition into GFXOFF.
+ */
+uint32_t smu_v12_0_get_gfxoff_status(struct smu_context *smu)
+{
+ uint32_t reg;
+ uint32_t gfxOff_Status = 0;
+ struct amdgpu_device *adev = smu->adev;
+
+ reg = RREG32_SOC15(SMUIO, 0, mmSMUIO_GFX_MISC_CNTL);
+ gfxOff_Status = (reg & SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK)
+ >> SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT;
+
+ return gfxOff_Status;
+}
+
+int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable)
+{
+ int ret = 0, timeout = 500;
+
+ if (enable) {
+ ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff);
+
+ } else {
+ ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff);
+
+ /* confirm gfx is back to "on" state, timeout is 0.5 second */
+ while (!(smu_v12_0_get_gfxoff_status(smu) == 2)) {
+ msleep(1);
+ timeout--;
+ if (timeout == 0) {
+ DRM_ERROR("disable gfxoff timeout and failed!\n");
+ break;
+ }
+ }
+ }
+
+ return ret;
+}
+
+int smu_v12_0_init_smc_tables(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *tables = NULL;
+
+ if (smu_table->tables)
+ return -EINVAL;
+
+ tables = kcalloc(SMU_TABLE_COUNT, sizeof(struct smu_table),
+ GFP_KERNEL);
+ if (!tables)
+ return -ENOMEM;
+
+ smu_table->tables = tables;
+
+ return smu_tables_init(smu, tables);
+}
+
+int smu_v12_0_fini_smc_tables(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+
+ if (!smu_table->tables)
+ return -EINVAL;
+
+ kfree(smu_table->clocks_table);
+ kfree(smu_table->tables);
+
+ smu_table->clocks_table = NULL;
+ smu_table->tables = NULL;
+
+ return 0;
+}
+
+int smu_v12_0_populate_smc_tables(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+
+ return smu_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false);
+}
+
+int smu_v12_0_get_enabled_mask(struct smu_context *smu,
+ uint32_t *feature_mask, uint32_t num)
+{
+ uint32_t feature_mask_high = 0, feature_mask_low = 0;
+ int ret = 0;
+
+ if (!feature_mask || num < 2)
+ return -EINVAL;
+
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh);
+ if (ret)
+ return ret;
+ ret = smu_read_smc_arg(smu, &feature_mask_high);
+ if (ret)
+ return ret;
+
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow);
+ if (ret)
+ return ret;
+ ret = smu_read_smc_arg(smu, &feature_mask_low);
+ if (ret)
+ return ret;
+
+ feature_mask[0] = feature_mask_low;
+ feature_mask[1] = feature_mask_high;
+
+ return ret;
+}
+
+int smu_v12_0_get_current_clk_freq(struct smu_context *smu,
+ enum smu_clk_type clk_id,
+ uint32_t *value)
+{
+ int ret = 0;
+ uint32_t freq = 0;
+
+ if (clk_id >= SMU_CLK_COUNT || !value)
+ return -EINVAL;
+
+ ret = smu_get_current_clk_freq_by_table(smu, clk_id, &freq);
+ if (ret)
+ return ret;
+
+ freq *= 100;
+ *value = freq;
+
+ return ret;
+}
+
+int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t *min, uint32_t *max)
+{
+ int ret = 0;
+ uint32_t mclk_mask, soc_mask;
+
+ if (max) {
+ ret = smu_get_profiling_clk_mask(smu, AMD_DPM_FORCED_LEVEL_PROFILE_PEAK,
+ NULL,
+ &mclk_mask,
+ &soc_mask);
+ if (ret)
+ goto failed;
+
+ switch (clk_type) {
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetMaxGfxclkFrequency);
+ if (ret) {
+ pr_err("Attempt to get max GX frequency from SMC Failed !\n");
+ goto failed;
+ }
+ ret = smu_read_smc_arg(smu, max);
+ if (ret)
+ goto failed;
+ break;
+ case SMU_UCLK:
+ case SMU_FCLK:
+ case SMU_MCLK:
+ ret = smu_get_dpm_clk_limited(smu, clk_type, mclk_mask, max);
+ if (ret)
+ goto failed;
+ break;
+ case SMU_SOCCLK:
+ ret = smu_get_dpm_clk_limited(smu, clk_type, soc_mask, max);
+ if (ret)
+ goto failed;
+ break;
+ default:
+ ret = -EINVAL;
+ goto failed;
+ }
+ }
+
+ if (min) {
+ switch (clk_type) {
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetMinGfxclkFrequency);
+ if (ret) {
+ pr_err("Attempt to get min GX frequency from SMC Failed !\n");
+ goto failed;
+ }
+ ret = smu_read_smc_arg(smu, min);
+ if (ret)
+ goto failed;
+ break;
+ case SMU_UCLK:
+ case SMU_FCLK:
+ case SMU_MCLK:
+ ret = smu_get_dpm_clk_limited(smu, clk_type, 0, min);
+ if (ret)
+ goto failed;
+ break;
+ case SMU_SOCCLK:
+ ret = smu_get_dpm_clk_limited(smu, clk_type, 0, min);
+ if (ret)
+ goto failed;
+ break;
+ default:
+ ret = -EINVAL;
+ goto failed;
+ }
+ }
+failed:
+ return ret;
+}
+
+int smu_v12_0_mode2_reset(struct smu_context *smu){
+ return smu_v12_0_send_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, SMU_RESET_MODE_2);
+}
+
+int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t min, uint32_t max)
+{
+ int ret = 0;
+
+ if (max < min)
+ return -EINVAL;
+
+ switch (clk_type) {
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, min);
+ if (ret)
+ return ret;
+
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, max);
+ if (ret)
+ return ret;
+ break;
+ case SMU_FCLK:
+ case SMU_MCLK:
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min);
+ if (ret)
+ return ret;
+
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max);
+ if (ret)
+ return ret;
+ break;
+ case SMU_SOCCLK:
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min);
+ if (ret)
+ return ret;
+
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max);
+ if (ret)
+ return ret;
+ break;
+ case SMU_VCLK:
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinVcn, min);
+ if (ret)
+ return ret;
+
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxVcn, max);
+ if (ret)
+ return ret;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+int smu_v12_0_set_driver_table_location(struct smu_context *smu)
+{
+ struct smu_table *driver_table = &smu->smu_table.driver_table;
+ int ret = 0;
+
+ if (driver_table->mc_address) {
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_SetDriverDramAddrHigh,
+ upper_32_bits(driver_table->mc_address));
+ if (!ret)
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_SetDriverDramAddrLow,
+ lower_32_bits(driver_table->mc_address));
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
index 15590fd86ef4..868e2d5f6e62 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
@@ -653,8 +653,8 @@ static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct pp_hwmgr *hwmgr)
static int ci_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
{
struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
- uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
- uint16_t LoSidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
+ uint16_t HiSidd;
+ uint16_t LoSidd;
struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table;
HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
index da025b1d302d..32ebb383c456 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
@@ -940,7 +940,7 @@ static int fiji_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
{
int result;
/* PP_Clocks minClocks; */
- uint32_t threshold, mvdd;
+ uint32_t mvdd;
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
@@ -973,8 +973,6 @@ static int fiji_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
level->VoltageDownHyst = 0;
level->PowerThrottle = 0;
- threshold = clock * data->fast_watermark_threshold / 100;
-
data->display_timing.min_clock_in_sr = hwmgr->display_config->min_core_set_clock_in_sr;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
@@ -1501,7 +1499,7 @@ static int fiji_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
uint32_t dram_timing;
uint32_t dram_timing2;
uint32_t burstTime;
- ULONG state, trrds, trrdl;
+ ULONG trrds, trrdl;
int result;
result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
@@ -1513,7 +1511,6 @@ static int fiji_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
burstTime = cgs_read_register(hwmgr->device, mmMC_ARB_BURST_TIME);
- state = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, STATE0);
trrds = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDS0);
trrdl = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDL0);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index dc754447f0dd..23c12018dbc1 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -655,7 +655,7 @@ static int polaris10_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
count = SMU_MAX_SMIO_LEVELS;
for (level = 0; level < count; level++) {
table->SmioTable2.Pattern[level].Voltage =
- PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
+ PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[level].value * VOLTAGE_SCALE);
/* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
table->SmioTable2.Pattern[level].Smio =
(uint8_t) level;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
index 7fb3e57cfc41..2319400a3fcb 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
@@ -118,6 +118,7 @@ static int smu10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
{
struct smu10_smumgr *priv =
(struct smu10_smumgr *)(hwmgr->smu_backend);
+ struct amdgpu_device *adev = hwmgr->adev;
PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE,
"Invalid SMU Table ID!", return -EINVAL;);
@@ -135,6 +136,9 @@ static int smu10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
PPSMC_MSG_TransferTableSmu2Dram,
priv->smu_tables.entry[table_id].table_id);
+ /* flush hdp cache */
+ amdgpu_asic_flush_hdp(adev, NULL);
+
memcpy(table, (uint8_t *)priv->smu_tables.entry[table_id].table,
priv->smu_tables.entry[table_id].size);
@@ -146,6 +150,7 @@ static int smu10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
{
struct smu10_smumgr *priv =
(struct smu10_smumgr *)(hwmgr->smu_backend);
+ struct amdgpu_device *adev = hwmgr->adev;
PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE,
"Invalid SMU Table ID!", return -EINVAL;);
@@ -157,6 +162,8 @@ static int smu10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
memcpy(priv->smu_tables.entry[table_id].table, table,
priv->smu_tables.entry[table_id].size);
+ amdgpu_asic_flush_hdp(adev, NULL);
+
smu10_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
upper_32_bits(priv->smu_tables.entry[table_id].mc_addr));
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
index 8189fe402c6d..7dca04a89217 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
@@ -177,12 +177,10 @@ static int smu8_load_mec_firmware(struct pp_hwmgr *hwmgr)
uint32_t tmp;
int ret = 0;
struct cgs_firmware_info info = {0};
- struct smu8_smumgr *smu8_smu;
if (hwmgr == NULL || hwmgr->device == NULL)
return -EINVAL;
- smu8_smu = hwmgr->smu_backend;
ret = cgs_get_firmware_info(hwmgr->device,
CGS_UCODE_ID_CP_MEC, &info);
@@ -722,16 +720,17 @@ static int smu8_request_smu_load_fw(struct pp_hwmgr *hwmgr)
static int smu8_start_smu(struct pp_hwmgr *hwmgr)
{
- struct amdgpu_device *adev = hwmgr->adev;
+ struct amdgpu_device *adev;
uint32_t index = SMN_MP1_SRAM_START_ADDR +
SMU8_FIRMWARE_HEADER_LOCATION +
offsetof(struct SMU8_Firmware_Header, Version);
-
if (hwmgr == NULL || hwmgr->device == NULL)
return -EINVAL;
+ adev = hwmgr->adev;
+
cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX, index);
hwmgr->smu_version = cgs_read_register(hwmgr->device, mmMP0PUB_IND_DATA);
pr_info("smu version %02d.%02d.%02d\n",
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c
index 742b3dc1f6cb..adfbcbe5d113 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c
@@ -61,15 +61,29 @@ static uint32_t smu9_wait_for_response(struct pp_hwmgr *hwmgr)
uint32_t reg;
uint32_t ret;
- reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
+ /* Due to the L1 policy problem under SRIOV, we have to use
+ * mmMP1_SMN_C2PMSG_103 as the driver response register
+ */
+ if (hwmgr->pp_one_vf) {
+ reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_103);
- ret = phm_wait_for_register_unequal(hwmgr, reg,
- 0, MP1_C2PMSG_90__CONTENT_MASK);
+ ret = phm_wait_for_register_unequal(hwmgr, reg,
+ 0, MP1_C2PMSG_103__CONTENT_MASK);
- if (ret)
- pr_err("No response from smu\n");
+ if (ret)
+ pr_err("No response from smu\n");
- return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
+ return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_103);
+ } else {
+ reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
+
+ ret = phm_wait_for_register_unequal(hwmgr, reg,
+ 0, MP1_C2PMSG_90__CONTENT_MASK);
+
+ if (ret)
+ pr_err("No response from smu\n");
+ return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
+ }
}
/*
@@ -83,7 +97,11 @@ static int smu9_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
{
struct amdgpu_device *adev = hwmgr->adev;
- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
+ if (hwmgr->pp_one_vf) {
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_101, msg);
+ } else {
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
+ }
return 0;
}
@@ -101,7 +119,10 @@ int smu9_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
smu9_wait_for_response(hwmgr);
- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
+ if (hwmgr->pp_one_vf)
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_103, 0);
+ else
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
smu9_send_msg_to_smc_without_waiting(hwmgr, msg);
@@ -127,9 +148,17 @@ int smu9_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
smu9_wait_for_response(hwmgr);
- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
-
- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, parameter);
+ /* Due to the L1 policy problem under SRIOV, we have to use
+ * mmMP1_SMN_C2PMSG_101 as the driver message register and
+ * mmMP1_SMN_C2PMSG_102 as the driver parameter register.
+ */
+ if (hwmgr->pp_one_vf) {
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_103, 0);
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_102, parameter);
+ } else {
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, parameter);
+ }
smu9_send_msg_to_smc_without_waiting(hwmgr, msg);
@@ -144,5 +173,8 @@ uint32_t smu9_get_argument(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = hwmgr->adev;
- return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
+ if (hwmgr->pp_one_vf)
+ return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_102);
+ else
+ return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
index 967d34b1dc51..715564009089 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
@@ -39,6 +39,7 @@ static int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
uint8_t *table, int16_t table_id)
{
struct vega10_smumgr *priv = hwmgr->smu_backend;
+ struct amdgpu_device *adev = hwmgr->adev;
PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE,
"Invalid SMU Table ID!", return -EINVAL);
@@ -56,6 +57,9 @@ static int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
PPSMC_MSG_TransferTableSmu2Dram,
priv->smu_tables.entry[table_id].table_id);
+ /* flush hdp cache */
+ amdgpu_asic_flush_hdp(adev, NULL);
+
memcpy(table, priv->smu_tables.entry[table_id].table,
priv->smu_tables.entry[table_id].size);
@@ -66,6 +70,13 @@ static int vega10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
uint8_t *table, int16_t table_id)
{
struct vega10_smumgr *priv = hwmgr->smu_backend;
+ struct amdgpu_device *adev = hwmgr->adev;
+
+ /* under sriov, vbios or hypervisor driver
+ * has already copy table to smc so here only skip it
+ */
+ if (!hwmgr->not_vf)
+ return 0;
PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE,
"Invalid SMU Table ID!", return -EINVAL);
@@ -77,6 +88,8 @@ static int vega10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
memcpy(priv->smu_tables.entry[table_id].table, table,
priv->smu_tables.entry[table_id].size);
+ amdgpu_asic_flush_hdp(adev, NULL);
+
smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
upper_32_bits(priv->smu_tables.entry[table_id].mc_addr));
@@ -96,6 +109,14 @@ int vega10_enable_smc_features(struct pp_hwmgr *hwmgr,
int msg = enable ? PPSMC_MSG_EnableSmuFeatures :
PPSMC_MSG_DisableSmuFeatures;
+ /* VF has no permission to change smu feature due
+ * to security concern even under pp one vf mode
+ * it still can't do it. For vega10, the smu in
+ * vbios will enable the appropriate features.
+ * */
+ if (!hwmgr->not_vf)
+ return 0;
+
return smum_send_msg_to_smc_with_parameter(hwmgr,
msg, feature_mask);
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
index bab3df85fdcd..275dbf65f1a0 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
@@ -42,6 +42,7 @@ static int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr,
{
struct vega12_smumgr *priv =
(struct vega12_smumgr *)(hwmgr->smu_backend);
+ struct amdgpu_device *adev = hwmgr->adev;
PP_ASSERT_WITH_CODE(table_id < TABLE_COUNT,
"Invalid SMU Table ID!", return -EINVAL);
@@ -64,6 +65,9 @@ static int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr,
"[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!",
return -EINVAL);
+ /* flush hdp cache */
+ amdgpu_asic_flush_hdp(adev, NULL);
+
memcpy(table, priv->smu_tables.entry[table_id].table,
priv->smu_tables.entry[table_id].size);
@@ -80,6 +84,7 @@ static int vega12_copy_table_to_smc(struct pp_hwmgr *hwmgr,
{
struct vega12_smumgr *priv =
(struct vega12_smumgr *)(hwmgr->smu_backend);
+ struct amdgpu_device *adev = hwmgr->adev;
PP_ASSERT_WITH_CODE(table_id < TABLE_COUNT,
"Invalid SMU Table ID!", return -EINVAL);
@@ -91,6 +96,8 @@ static int vega12_copy_table_to_smc(struct pp_hwmgr *hwmgr,
memcpy(priv->smu_tables.entry[table_id].table, table,
priv->smu_tables.entry[table_id].size);
+ amdgpu_asic_flush_hdp(adev, NULL);
+
PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
@@ -121,20 +128,20 @@ int vega12_enable_smc_features(struct pp_hwmgr *hwmgr,
if (enable) {
PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low) == 0,
- "[EnableDisableSMCFeatures] Attemp to enable SMU features Low failed!",
+ "[EnableDisableSMCFeatures] Attempt to enable SMU features Low failed!",
return -EINVAL);
PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high) == 0,
- "[EnableDisableSMCFeatures] Attemp to enable SMU features High failed!",
+ "[EnableDisableSMCFeatures] Attempt to enable SMU features High failed!",
return -EINVAL);
} else {
PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low) == 0,
- "[EnableDisableSMCFeatures] Attemp to disable SMU features Low failed!",
+ "[EnableDisableSMCFeatures] Attempt to disable SMU features Low failed!",
return -EINVAL);
PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high) == 0,
- "[EnableDisableSMCFeatures] Attemp to disable SMU features High failed!",
+ "[EnableDisableSMCFeatures] Attempt to disable SMU features High failed!",
return -EINVAL);
}
@@ -151,13 +158,13 @@ int vega12_get_enabled_smc_features(struct pp_hwmgr *hwmgr,
PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc(hwmgr,
PPSMC_MSG_GetEnabledSmuFeaturesLow) == 0,
- "[GetEnabledSMCFeatures] Attemp to get SMU features Low failed!",
+ "[GetEnabledSMCFeatures] Attempt to get SMU features Low failed!",
return -EINVAL);
smc_features_low = smu9_get_argument(hwmgr);
PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc(hwmgr,
PPSMC_MSG_GetEnabledSmuFeaturesHigh) == 0,
- "[GetEnabledSMCFeatures] Attemp to get SMU features High failed!",
+ "[GetEnabledSMCFeatures] Attempt to get SMU features High failed!",
return -EINVAL);
smc_features_high = smu9_get_argument(hwmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
index 957446cf467e..49e5ef3e3876 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
@@ -44,7 +44,7 @@
#define smnMP0_FW_INTF 0x30101c0
#define smnMP1_PUB_CTRL 0x3010b14
-static bool vega20_is_smc_ram_running(struct pp_hwmgr *hwmgr)
+bool vega20_is_smc_ram_running(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = hwmgr->adev;
uint32_t mp1_fw_flags;
@@ -163,6 +163,7 @@ static int vega20_copy_table_from_smc(struct pp_hwmgr *hwmgr,
{
struct vega20_smumgr *priv =
(struct vega20_smumgr *)(hwmgr->smu_backend);
+ struct amdgpu_device *adev = hwmgr->adev;
int ret = 0;
PP_ASSERT_WITH_CODE(table_id < TABLE_COUNT,
@@ -187,6 +188,9 @@ static int vega20_copy_table_from_smc(struct pp_hwmgr *hwmgr,
"[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!",
return ret);
+ /* flush hdp cache */
+ amdgpu_asic_flush_hdp(adev, NULL);
+
memcpy(table, priv->smu_tables.entry[table_id].table,
priv->smu_tables.entry[table_id].size);
@@ -203,6 +207,7 @@ static int vega20_copy_table_to_smc(struct pp_hwmgr *hwmgr,
{
struct vega20_smumgr *priv =
(struct vega20_smumgr *)(hwmgr->smu_backend);
+ struct amdgpu_device *adev = hwmgr->adev;
int ret = 0;
PP_ASSERT_WITH_CODE(table_id < TABLE_COUNT,
@@ -215,6 +220,8 @@ static int vega20_copy_table_to_smc(struct pp_hwmgr *hwmgr,
memcpy(priv->smu_tables.entry[table_id].table, table,
priv->smu_tables.entry[table_id].size);
+ amdgpu_asic_flush_hdp(adev, NULL);
+
PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
upper_32_bits(priv->smu_tables.entry[table_id].mc_addr))) == 0,
@@ -238,11 +245,14 @@ int vega20_set_activity_monitor_coeff(struct pp_hwmgr *hwmgr,
{
struct vega20_smumgr *priv =
(struct vega20_smumgr *)(hwmgr->smu_backend);
+ struct amdgpu_device *adev = hwmgr->adev;
int ret = 0;
memcpy(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table, table,
priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size);
+ amdgpu_asic_flush_hdp(adev, NULL);
+
PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
upper_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr))) == 0,
@@ -266,6 +276,7 @@ int vega20_get_activity_monitor_coeff(struct pp_hwmgr *hwmgr,
{
struct vega20_smumgr *priv =
(struct vega20_smumgr *)(hwmgr->smu_backend);
+ struct amdgpu_device *adev = hwmgr->adev;
int ret = 0;
PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
@@ -284,6 +295,9 @@ int vega20_get_activity_monitor_coeff(struct pp_hwmgr *hwmgr,
"[GetActivityMonitor] Attempt to Transfer Table From SMU Failed!",
return ret);
+ /* flush hdp cache */
+ amdgpu_asic_flush_hdp(adev, NULL);
+
memcpy(table, priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table,
priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size);
@@ -302,20 +316,20 @@ int vega20_enable_smc_features(struct pp_hwmgr *hwmgr,
if (enable) {
PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low)) == 0,
- "[EnableDisableSMCFeatures] Attemp to enable SMU features Low failed!",
+ "[EnableDisableSMCFeatures] Attempt to enable SMU features Low failed!",
return ret);
PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high)) == 0,
- "[EnableDisableSMCFeatures] Attemp to enable SMU features High failed!",
+ "[EnableDisableSMCFeatures] Attempt to enable SMU features High failed!",
return ret);
} else {
PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low)) == 0,
- "[EnableDisableSMCFeatures] Attemp to disable SMU features Low failed!",
+ "[EnableDisableSMCFeatures] Attempt to disable SMU features Low failed!",
return ret);
PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high)) == 0,
- "[EnableDisableSMCFeatures] Attemp to disable SMU features High failed!",
+ "[EnableDisableSMCFeatures] Attempt to disable SMU features High failed!",
return ret);
}
@@ -333,12 +347,12 @@ int vega20_get_enabled_smc_features(struct pp_hwmgr *hwmgr,
PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc(hwmgr,
PPSMC_MSG_GetEnabledSmuFeaturesLow)) == 0,
- "[GetEnabledSMCFeatures] Attemp to get SMU features Low failed!",
+ "[GetEnabledSMCFeatures] Attempt to get SMU features Low failed!",
return ret);
smc_features_low = vega20_get_argument(hwmgr);
PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc(hwmgr,
PPSMC_MSG_GetEnabledSmuFeaturesHigh)) == 0,
- "[GetEnabledSMCFeatures] Attemp to get SMU features High failed!",
+ "[GetEnabledSMCFeatures] Attempt to get SMU features High failed!",
return ret);
smc_features_high = vega20_get_argument(hwmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h
index ec953ab13e87..62ebbfd6068f 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h
@@ -57,5 +57,7 @@ int vega20_get_activity_monitor_coeff(struct pp_hwmgr *hwmgr,
uint8_t *table, uint16_t workload_type);
int vega20_set_pptable_driver_address(struct pp_hwmgr *hwmgr);
+bool vega20_is_smc_ram_running(struct pp_hwmgr *hwmgr);
+
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
index 7c960b07746f..b0e0d67cd54b 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
@@ -456,7 +456,7 @@ static int vegam_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
count = SMU_MAX_SMIO_LEVELS;
for (level = 0; level < count; level++) {
table->SmioTable2.Pattern[level].Voltage = PP_HOST_TO_SMC_US(
- data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
+ data->mvdd_voltage_table.entries[level].value * VOLTAGE_SCALE);
/* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
table->SmioTable2.Pattern[level].Smio =
(uint8_t) level;
@@ -1114,7 +1114,6 @@ static int vegam_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
(struct phm_ppt_v1_information *)(hwmgr->pptable);
SMIO_Pattern vol_level;
uint32_t mvdd;
- uint16_t us_mvdd;
table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
@@ -1168,17 +1167,6 @@ static int vegam_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
"in Clock Dependency Table",
);
- us_mvdd = 0;
- if ((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
- (data->mclk_dpm_key_disabled))
- us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
- else {
- if (!vegam_populate_mvdd_value(hwmgr,
- data->dpm_table.mclk_table.dpm_levels[0].value,
- &vol_level))
- us_mvdd = vol_level.Voltage;
- }
-
if (!vegam_populate_mvdd_value(hwmgr, 0, &vol_level))
table->MemoryACPILevel.MinMvdd = PP_HOST_TO_SMC_UL(vol_level.Voltage);
else
@@ -1383,11 +1371,16 @@ static int vegam_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
result = phm_find_boot_level(&(data->dpm_table.sclk_table),
data->vbios_boot_state.sclk_bootup_value,
(uint32_t *)&(table->GraphicsBootLevel));
+ if (result)
+ return result;
result = phm_find_boot_level(&(data->dpm_table.mclk_table),
data->vbios_boot_state.mclk_bootup_value,
(uint32_t *)&(table->MemoryBootLevel));
+ if (result)
+ return result;
+
table->BootVddc = data->vbios_boot_state.vddc_bootup_value *
VOLTAGE_SCALE;
table->BootVddci = data->vbios_boot_state.vddci_bootup_value *
@@ -1493,7 +1486,7 @@ static int vegam_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
struct vegam_smumgr *smu_data =
(struct vegam_smumgr *)(hwmgr->smu_backend);
- uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0;
+ uint8_t i, stretch_amount, volt_offset = 0;
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
@@ -1532,11 +1525,9 @@ static int vegam_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
(table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ?
table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 5;
/* Populate CKS Lookup Table */
- if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
- stretch_amount2 = 0;
- else if (stretch_amount == 3 || stretch_amount == 4)
- stretch_amount2 = 1;
- else {
+ if (!(stretch_amount == 1 || stretch_amount == 2 ||
+ stretch_amount == 5 || stretch_amount == 3 ||
+ stretch_amount == 4)) {
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_ClockStretcher);
PP_ASSERT_WITH_CODE(false,
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
index dd6fd1c8bf24..4ad8d6c14ee5 100644
--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
@@ -25,6 +25,7 @@
#include <linux/firmware.h>
#include "amdgpu.h"
#include "amdgpu_smu.h"
+#include "smu_internal.h"
#include "atomfirmware.h"
#include "amdgpu_atomfirmware.h"
#include "smu_v11_0.h"
@@ -47,7 +48,7 @@
#define CTF_OFFSET_HBM 5
#define MSG_MAP(msg) \
- [SMU_MSG_##msg] = PPSMC_MSG_##msg
+ [SMU_MSG_##msg] = {1, PPSMC_MSG_##msg}
#define SMC_DPM_FEATURE (FEATURE_DPM_PREFETCHER_MASK | \
FEATURE_DPM_GFXCLK_MASK | \
@@ -59,7 +60,7 @@
FEATURE_DPM_LINK_MASK | \
FEATURE_DPM_DCEFCLK_MASK)
-static int vega20_message_map[SMU_MSG_MAX_COUNT] = {
+static struct smu_11_0_cmn2aisc_mapping vega20_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TestMessage),
MSG_MAP(GetSmuVersion),
MSG_MAP(GetDriverIfVersion),
@@ -143,9 +144,10 @@ static int vega20_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(PrepareMp1ForShutdown),
MSG_MAP(SetMGpuFanBoostLimitRpm),
MSG_MAP(GetAVFSVoltageByDpm),
+ MSG_MAP(DFCstateControl),
};
-static int vega20_clk_map[SMU_CLK_COUNT] = {
+static struct smu_11_0_cmn2aisc_mapping vega20_clk_map[SMU_CLK_COUNT] = {
CLK_MAP(GFXCLK, PPCLK_GFXCLK),
CLK_MAP(VCLK, PPCLK_VCLK),
CLK_MAP(DCLK, PPCLK_DCLK),
@@ -159,7 +161,7 @@ static int vega20_clk_map[SMU_CLK_COUNT] = {
CLK_MAP(FCLK, PPCLK_FCLK),
};
-static int vega20_feature_mask_map[SMU_FEATURE_COUNT] = {
+static struct smu_11_0_cmn2aisc_mapping vega20_feature_mask_map[SMU_FEATURE_COUNT] = {
FEA_MAP(DPM_PREFETCHER),
FEA_MAP(DPM_GFXCLK),
FEA_MAP(DPM_UCLK),
@@ -195,7 +197,7 @@ static int vega20_feature_mask_map[SMU_FEATURE_COUNT] = {
FEA_MAP(XGMI),
};
-static int vega20_table_map[SMU_TABLE_COUNT] = {
+static struct smu_11_0_cmn2aisc_mapping vega20_table_map[SMU_TABLE_COUNT] = {
TAB_MAP(PPTABLE),
TAB_MAP(WATERMARKS),
TAB_MAP(AVFS),
@@ -208,96 +210,109 @@ static int vega20_table_map[SMU_TABLE_COUNT] = {
TAB_MAP(OVERDRIVE),
};
-static int vega20_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
+static struct smu_11_0_cmn2aisc_mapping vega20_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
PWR_MAP(AC),
PWR_MAP(DC),
};
-static int vega20_workload_map[] = {
+static struct smu_11_0_cmn2aisc_mapping vega20_workload_map[PP_SMC_POWER_PROFILE_COUNT] = {
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT, WORKLOAD_DEFAULT_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT),
- WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_CUSTOM_BIT),
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
};
static int vega20_get_smu_table_index(struct smu_context *smc, uint32_t index)
{
- int val;
+ struct smu_11_0_cmn2aisc_mapping mapping;
+
if (index >= SMU_TABLE_COUNT)
return -EINVAL;
- val = vega20_table_map[index];
- if (val >= TABLE_COUNT)
+ mapping = vega20_table_map[index];
+ if (!(mapping.valid_mapping)) {
return -EINVAL;
+ }
- return val;
+ return mapping.map_to;
}
static int vega20_get_pwr_src_index(struct smu_context *smc, uint32_t index)
{
- int val;
+ struct smu_11_0_cmn2aisc_mapping mapping;
+
if (index >= SMU_POWER_SOURCE_COUNT)
return -EINVAL;
- val = vega20_pwr_src_map[index];
- if (val >= POWER_SOURCE_COUNT)
+ mapping = vega20_pwr_src_map[index];
+ if (!(mapping.valid_mapping)) {
return -EINVAL;
+ }
- return val;
+ return mapping.map_to;
}
static int vega20_get_smu_feature_index(struct smu_context *smc, uint32_t index)
{
- int val;
+ struct smu_11_0_cmn2aisc_mapping mapping;
+
if (index >= SMU_FEATURE_COUNT)
return -EINVAL;
- val = vega20_feature_mask_map[index];
- if (val > 64)
+ mapping = vega20_feature_mask_map[index];
+ if (!(mapping.valid_mapping)) {
return -EINVAL;
+ }
- return val;
+ return mapping.map_to;
}
static int vega20_get_smu_clk_index(struct smu_context *smc, uint32_t index)
{
- int val;
+ struct smu_11_0_cmn2aisc_mapping mapping;
+
if (index >= SMU_CLK_COUNT)
return -EINVAL;
- val = vega20_clk_map[index];
- if (val >= PPCLK_COUNT)
+ mapping = vega20_clk_map[index];
+ if (!(mapping.valid_mapping)) {
return -EINVAL;
+ }
- return val;
+ return mapping.map_to;
}
static int vega20_get_smu_msg_index(struct smu_context *smc, uint32_t index)
{
- int val;
+ struct smu_11_0_cmn2aisc_mapping mapping;
if (index >= SMU_MSG_MAX_COUNT)
return -EINVAL;
- val = vega20_message_map[index];
- if (val > PPSMC_Message_Count)
+ mapping = vega20_message_map[index];
+ if (!(mapping.valid_mapping)) {
return -EINVAL;
+ }
- return val;
+ return mapping.map_to;
}
static int vega20_get_workload_type(struct smu_context *smu, enum PP_SMC_POWER_PROFILE profile)
{
- int val;
+ struct smu_11_0_cmn2aisc_mapping mapping;
+
if (profile > PP_SMC_POWER_PROFILE_CUSTOM)
return -EINVAL;
- val = vega20_workload_map[profile];
+ mapping = vega20_workload_map[profile];
+ if (!(mapping.valid_mapping)) {
+ return -EINVAL;
+ }
- return val;
+ return mapping.map_to;
}
static int vega20_tables_init(struct smu_context *smu, struct smu_table *tables)
@@ -323,6 +338,10 @@ static int vega20_tables_init(struct smu_context *smu, struct smu_table *tables)
return -ENOMEM;
smu_table->metrics_time = 0;
+ smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL);
+ if (!smu_table->watermarks_table)
+ return -ENOMEM;
+
return 0;
}
@@ -451,7 +470,6 @@ static int vega20_store_powerplay_table(struct smu_context *smu)
sizeof(PPTable_t));
table_context->thermal_controller_type = powerplay_table->ucThermalControllerType;
- table_context->TDPODLimit = le32_to_cpu(powerplay_table->OverDrive8Table.ODSettingsMax[ATOM_VEGA20_ODSETTING_POWERPERCENTAGE]);
return 0;
}
@@ -621,7 +639,6 @@ amd_pm_state_type vega20_get_current_power_state(struct smu_context *smu)
!smu_dpm_ctx->dpm_current_power_state)
return -EINVAL;
- mutex_lock(&(smu->mutex));
switch (smu_dpm_ctx->dpm_current_power_state->classification.ui_label) {
case SMU_STATE_UI_LABEL_BATTERY:
pm_type = POWER_STATE_TYPE_BATTERY;
@@ -639,7 +656,6 @@ amd_pm_state_type vega20_get_current_power_state(struct smu_context *smu)
pm_type = POWER_STATE_TYPE_DEFAULT;
break;
}
- mutex_unlock(&(smu->mutex));
return pm_type;
}
@@ -1261,16 +1277,8 @@ static int vega20_force_clk_levels(struct smu_context *smu,
struct vega20_dpm_table *dpm_table;
struct vega20_single_dpm_table *single_dpm_table;
uint32_t soft_min_level, soft_max_level, hard_min_level;
- struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
int ret = 0;
- if (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
- pr_info("force clock level is for dpm manual mode only.\n");
- return -EINVAL;
- }
-
- mutex_lock(&(smu->mutex));
-
soft_min_level = mask ? (ffs(mask) - 1) : 0;
soft_max_level = mask ? (fls(mask) - 1) : 0;
@@ -1423,7 +1431,6 @@ static int vega20_force_clk_levels(struct smu_context *smu,
break;
}
- mutex_unlock(&(smu->mutex));
return ret;
}
@@ -1438,8 +1445,6 @@ static int vega20_get_clock_by_type_with_latency(struct smu_context *smu,
dpm_table = smu_dpm->dpm_context;
- mutex_lock(&smu->mutex);
-
switch (clk_type) {
case SMU_GFXCLK:
single_dpm_table = &(dpm_table->gfx_table);
@@ -1461,7 +1466,6 @@ static int vega20_get_clock_by_type_with_latency(struct smu_context *smu,
ret = -EINVAL;
}
- mutex_unlock(&smu->mutex);
return ret;
}
@@ -1678,17 +1682,20 @@ static int vega20_get_metrics_table(struct smu_context *smu,
struct smu_table_context *smu_table= &smu->smu_table;
int ret = 0;
+ mutex_lock(&smu->metrics_lock);
if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + HZ / 1000)) {
ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
(void *)smu_table->metrics_table, false);
if (ret) {
pr_info("Failed to export SMU metrics table!\n");
+ mutex_unlock(&smu->metrics_lock);
return ret;
}
smu_table->metrics_time = jiffies;
}
memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t));
+ mutex_unlock(&smu->metrics_lock);
return ret;
}
@@ -1699,22 +1706,11 @@ static int vega20_set_default_od_settings(struct smu_context *smu,
struct smu_table_context *table_context = &smu->smu_table;
int ret;
- if (initialize) {
- if (table_context->overdrive_table)
- return -EINVAL;
-
- table_context->overdrive_table = kzalloc(sizeof(OverDriveTable_t), GFP_KERNEL);
-
- if (!table_context->overdrive_table)
- return -ENOMEM;
-
- ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0,
- table_context->overdrive_table, false);
- if (ret) {
- pr_err("Failed to export over drive table!\n");
- return ret;
- }
+ ret = smu_v11_0_set_default_od_settings(smu, initialize, sizeof(OverDriveTable_t));
+ if (ret)
+ return ret;
+ if (initialize) {
ret = vega20_set_default_od8_setttings(smu);
if (ret)
return ret;
@@ -1770,7 +1766,7 @@ static int vega20_get_power_profile_mode(struct smu_context *smu, char *buf)
{
DpmActivityMonitorCoeffInt_t activity_monitor;
uint32_t i, size = 0;
- uint16_t workload_type = 0;
+ int16_t workload_type = 0;
static const char *profile_name[] = {
"BOOTUP_DEFAULT",
"3D_FULL_SCREEN",
@@ -1803,6 +1799,9 @@ static int vega20_get_power_profile_mode(struct smu_context *smu, char *buf)
for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
workload_type = smu_workload_get_type(smu, i);
+ if (workload_type < 0)
+ return -EINVAL;
+
result = smu_update_table(smu,
SMU_TABLE_ACTIVITY_MONITOR_COEFF, workload_type,
(void *)(&activity_monitor), false);
@@ -1955,6 +1954,8 @@ static int vega20_set_power_profile_mode(struct smu_context *smu, long *input, u
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
workload_type = smu_workload_get_type(smu, smu->power_profile_mode);
+ if (workload_type < 0)
+ return -EINVAL;
smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
1 << workload_type);
@@ -2227,7 +2228,7 @@ static int vega20_apply_clocks_adjust_rules(struct smu_context *smu)
}
static int
-vega20_notify_smc_dispaly_config(struct smu_context *smu)
+vega20_notify_smc_display_config(struct smu_context *smu)
{
struct vega20_dpm_table *dpm_table = smu->smu_dpm.dpm_context;
struct vega20_single_dpm_table *memtable = &dpm_table->mem_table;
@@ -2242,7 +2243,7 @@ vega20_notify_smc_dispaly_config(struct smu_context *smu)
if (smu_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
clock_req.clock_type = amd_pp_dcef_clock;
clock_req.clock_freq_in_khz = min_clocks.dcef_clock * 10;
- if (!smu->funcs->display_clock_voltage_request(smu, &clock_req)) {
+ if (!smu_v11_0_display_clock_voltage_request(smu, &clock_req)) {
if (smu_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) {
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetMinDeepSleepDcefclk,
@@ -2529,8 +2530,6 @@ static int vega20_set_od_percentage(struct smu_context *smu,
int feature_enabled;
PPCLK_e clk_id;
- mutex_lock(&(smu->mutex));
-
dpm_table = smu_dpm->dpm_context;
golden_table = smu_dpm->golden_dpm_context;
@@ -2580,11 +2579,10 @@ static int vega20_set_od_percentage(struct smu_context *smu,
}
ret = smu_handle_task(smu, smu_dpm->dpm_level,
- AMD_PP_TASK_READJUST_POWER_STATE);
+ AMD_PP_TASK_READJUST_POWER_STATE,
+ false);
set_od_failed:
- mutex_unlock(&(smu->mutex));
-
return ret;
}
@@ -2769,12 +2767,11 @@ static int vega20_odn_edit_dpm_table(struct smu_context *smu,
break;
case PP_OD_RESTORE_DEFAULT_TABLE:
- ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, table_context->overdrive_table, false);
- if (ret) {
- pr_err("Failed to export over drive table!\n");
- return ret;
+ if (!(table_context->overdrive_table && table_context->boot_overdrive_table)) {
+ pr_err("Overdrive table was not initialized!\n");
+ return -EINVAL;
}
-
+ memcpy(table_context->overdrive_table, table_context->boot_overdrive_table, sizeof(OverDriveTable_t));
break;
case PP_OD_COMMIT_DPM_TABLE:
@@ -2809,10 +2806,9 @@ static int vega20_odn_edit_dpm_table(struct smu_context *smu,
}
if (type == PP_OD_COMMIT_DPM_TABLE) {
- mutex_lock(&(smu->mutex));
ret = smu_handle_task(smu, smu_dpm->dpm_level,
- AMD_PP_TASK_READJUST_POWER_STATE);
- mutex_unlock(&(smu->mutex));
+ AMD_PP_TASK_READJUST_POWER_STATE,
+ false);
}
return ret;
@@ -2840,157 +2836,6 @@ static int vega20_dpm_set_vce_enable(struct smu_context *smu, bool enable)
return smu_feature_set_enabled(smu, SMU_FEATURE_DPM_VCE_BIT, enable);
}
-static int vega20_get_enabled_smc_features(struct smu_context *smu,
- uint64_t *features_enabled)
-{
- uint32_t feature_mask[2] = {0, 0};
- int ret = 0;
-
- ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
- if (ret)
- return ret;
-
- *features_enabled = ((((uint64_t)feature_mask[0] << SMU_FEATURES_LOW_SHIFT) & SMU_FEATURES_LOW_MASK) |
- (((uint64_t)feature_mask[1] << SMU_FEATURES_HIGH_SHIFT) & SMU_FEATURES_HIGH_MASK));
-
- return ret;
-}
-
-static int vega20_enable_smc_features(struct smu_context *smu,
- bool enable, uint64_t feature_mask)
-{
- uint32_t smu_features_low, smu_features_high;
- int ret = 0;
-
- smu_features_low = (uint32_t)((feature_mask & SMU_FEATURES_LOW_MASK) >> SMU_FEATURES_LOW_SHIFT);
- smu_features_high = (uint32_t)((feature_mask & SMU_FEATURES_HIGH_MASK) >> SMU_FEATURES_HIGH_SHIFT);
-
- if (enable) {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow,
- smu_features_low);
- if (ret)
- return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh,
- smu_features_high);
- if (ret)
- return ret;
- } else {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow,
- smu_features_low);
- if (ret)
- return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh,
- smu_features_high);
- if (ret)
- return ret;
- }
-
- return 0;
-
-}
-
-static int vega20_get_ppfeature_status(struct smu_context *smu, char *buf)
-{
- static const char *ppfeature_name[] = {
- "DPM_PREFETCHER",
- "GFXCLK_DPM",
- "UCLK_DPM",
- "SOCCLK_DPM",
- "UVD_DPM",
- "VCE_DPM",
- "ULV",
- "MP0CLK_DPM",
- "LINK_DPM",
- "DCEFCLK_DPM",
- "GFXCLK_DS",
- "SOCCLK_DS",
- "LCLK_DS",
- "PPT",
- "TDC",
- "THERMAL",
- "GFX_PER_CU_CG",
- "RM",
- "DCEFCLK_DS",
- "ACDC",
- "VR0HOT",
- "VR1HOT",
- "FW_CTF",
- "LED_DISPLAY",
- "FAN_CONTROL",
- "GFX_EDC",
- "GFXOFF",
- "CG",
- "FCLK_DPM",
- "FCLK_DS",
- "MP1CLK_DS",
- "MP0CLK_DS",
- "XGMI",
- "ECC"};
- static const char *output_title[] = {
- "FEATURES",
- "BITMASK",
- "ENABLEMENT"};
- uint64_t features_enabled;
- int i;
- int ret = 0;
- int size = 0;
-
- ret = vega20_get_enabled_smc_features(smu, &features_enabled);
- if (ret)
- return ret;
-
- size += sprintf(buf + size, "Current ppfeatures: 0x%016llx\n", features_enabled);
- size += sprintf(buf + size, "%-19s %-22s %s\n",
- output_title[0],
- output_title[1],
- output_title[2]);
- for (i = 0; i < GNLD_FEATURES_MAX; i++) {
- size += sprintf(buf + size, "%-19s 0x%016llx %6s\n",
- ppfeature_name[i],
- 1ULL << i,
- (features_enabled & (1ULL << i)) ? "Y" : "N");
- }
-
- return size;
-}
-
-static int vega20_set_ppfeature_status(struct smu_context *smu, uint64_t new_ppfeature_masks)
-{
- uint64_t features_enabled;
- uint64_t features_to_enable;
- uint64_t features_to_disable;
- int ret = 0;
-
- if (new_ppfeature_masks >= (1ULL << GNLD_FEATURES_MAX))
- return -EINVAL;
-
- ret = vega20_get_enabled_smc_features(smu, &features_enabled);
- if (ret)
- return ret;
-
- features_to_disable =
- features_enabled & ~new_ppfeature_masks;
- features_to_enable =
- ~features_enabled & new_ppfeature_masks;
-
- pr_debug("features_to_disable 0x%llx\n", features_to_disable);
- pr_debug("features_to_enable 0x%llx\n", features_to_enable);
-
- if (features_to_disable) {
- ret = vega20_enable_smc_features(smu, false, features_to_disable);
- if (ret)
- return ret;
- }
-
- if (features_to_enable) {
- ret = vega20_enable_smc_features(smu, true, features_to_enable);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
static bool vega20_is_dpm_running(struct smu_context *smu)
{
int ret = 0;
@@ -3050,6 +2895,7 @@ static int vega20_get_fan_speed_percent(struct smu_context *smu,
static int vega20_get_gpu_power(struct smu_context *smu, uint32_t *value)
{
+ uint32_t smu_version;
int ret = 0;
SmuMetrics_t metrics;
@@ -3060,7 +2906,15 @@ static int vega20_get_gpu_power(struct smu_context *smu, uint32_t *value)
if (ret)
return ret;
- *value = metrics.CurrSocketPower << 8;
+ ret = smu_get_smc_version(smu, NULL, &smu_version);
+ if (ret)
+ return ret;
+
+ /* For the 40.46 release, they changed the value name */
+ if (smu_version == 0x282e00)
+ *value = metrics.AverageSocketPower << 8;
+ else
+ *value = metrics.CurrSocketPower << 8;
return 0;
}
@@ -3144,6 +2998,10 @@ static int vega20_read_sensor(struct smu_context *smu,
struct smu_table_context *table_context = &smu->smu_table;
PPTable_t *pptable = table_context->driver_pptable;
+ if(!data || !size)
+ return -EINVAL;
+
+ mutex_lock(&smu->sensor_lock);
switch (sensor) {
case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
*(uint32_t *)data = pptable->FanMaximumRpm;
@@ -3167,8 +3025,9 @@ static int vega20_read_sensor(struct smu_context *smu,
*size = 4;
break;
default:
- return -EINVAL;
+ ret = smu_v11_0_read_sensor(smu, sensor, data, size);
}
+ mutex_unlock(&smu->sensor_lock);
return ret;
}
@@ -3243,19 +3102,66 @@ static int vega20_get_thermal_temperature_range(struct smu_context *smu,
if (!range || !powerplay_table)
return -EINVAL;
- /* The unit is temperature */
- range->min = 0;
- range->max = powerplay_table->usSoftwareShutdownTemp;
- range->edge_emergency_max = (pptable->TedgeLimit + CTF_OFFSET_EDGE);
- range->hotspot_crit_max = pptable->ThotspotLimit;
- range->hotspot_emergency_max = (pptable->ThotspotLimit + CTF_OFFSET_HOTSPOT);
- range->mem_crit_max = pptable->ThbmLimit;
- range->mem_emergency_max = (pptable->ThbmLimit + CTF_OFFSET_HBM);
+ range->max = powerplay_table->usSoftwareShutdownTemp *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ range->edge_emergency_max = (pptable->TedgeLimit + CTF_OFFSET_EDGE) *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ range->hotspot_crit_max = pptable->ThotspotLimit *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ range->hotspot_emergency_max = (pptable->ThotspotLimit + CTF_OFFSET_HOTSPOT) *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ range->mem_crit_max = pptable->ThbmLimit *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ range->mem_emergency_max = (pptable->ThbmLimit + CTF_OFFSET_HBM) *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
return 0;
}
+static int vega20_set_df_cstate(struct smu_context *smu,
+ enum pp_df_cstate state)
+{
+ uint32_t smu_version;
+ int ret;
+
+ ret = smu_get_smc_version(smu, NULL, &smu_version);
+ if (ret) {
+ pr_err("Failed to get smu version!\n");
+ return ret;
+ }
+
+ /* PPSMC_MSG_DFCstateControl is supported with 40.50 and later fws */
+ if (smu_version < 0x283200) {
+ pr_err("Df cstate control is supported with 40.50 and later SMC fw!\n");
+ return -EINVAL;
+ }
+
+ return smu_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state);
+}
+
+static int vega20_update_pcie_parameters(struct smu_context *smu,
+ uint32_t pcie_gen_cap,
+ uint32_t pcie_width_cap)
+{
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+ int ret, i;
+ uint32_t smu_pcie_arg;
+
+ for (i = 0; i < NUM_LINK_LEVELS; i++) {
+ smu_pcie_arg = (i << 16) |
+ ((pptable->PcieGenSpeed[i] <= pcie_gen_cap) ? (pptable->PcieGenSpeed[i] << 8) :
+ (pcie_gen_cap << 8)) | ((pptable->PcieLaneCount[i] <= pcie_width_cap) ?
+ pptable->PcieLaneCount[i] : pcie_width_cap);
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg);
+ }
+
+ return ret;
+}
+
+
static const struct pptable_funcs vega20_ppt_funcs = {
.tables_init = vega20_tables_init,
.alloc_dpm_context = vega20_allocate_dpm_context,
@@ -3268,7 +3174,7 @@ static const struct pptable_funcs vega20_ppt_funcs = {
.get_smu_table_index = vega20_get_smu_table_index,
.get_smu_power_index = vega20_get_pwr_src_index,
.get_workload_type = vega20_get_workload_type,
- .run_afll_btc = vega20_run_btc_afll,
+ .run_btc = vega20_run_btc_afll,
.get_allowed_feature_mask = vega20_get_allowed_feature_mask,
.get_current_power_state = vega20_get_current_power_state,
.set_default_dpm_table = vega20_set_default_dpm_table,
@@ -3280,6 +3186,7 @@ static const struct pptable_funcs vega20_ppt_funcs = {
.get_od_percentage = vega20_get_od_percentage,
.get_power_profile_mode = vega20_get_power_profile_mode,
.set_power_profile_mode = vega20_set_power_profile_mode,
+ .set_performance_level = smu_v11_0_set_performance_level,
.set_od_percentage = vega20_set_od_percentage,
.set_default_od_settings = vega20_set_default_od_settings,
.od_edit_dpm_table = vega20_odn_edit_dpm_table,
@@ -3289,25 +3196,71 @@ static const struct pptable_funcs vega20_ppt_funcs = {
.pre_display_config_changed = vega20_pre_display_config_changed,
.display_config_changed = vega20_display_config_changed,
.apply_clocks_adjust_rules = vega20_apply_clocks_adjust_rules,
- .notify_smc_dispaly_config = vega20_notify_smc_dispaly_config,
+ .notify_smc_display_config = vega20_notify_smc_display_config,
.force_dpm_limit_value = vega20_force_dpm_limit_value,
.unforce_dpm_levels = vega20_unforce_dpm_levels,
.get_profiling_clk_mask = vega20_get_profiling_clk_mask,
- .set_ppfeature_status = vega20_set_ppfeature_status,
- .get_ppfeature_status = vega20_get_ppfeature_status,
.is_dpm_running = vega20_is_dpm_running,
.set_thermal_fan_table = vega20_set_thermal_fan_table,
.get_fan_speed_percent = vega20_get_fan_speed_percent,
.get_fan_speed_rpm = vega20_get_fan_speed_rpm,
.set_watermarks_table = vega20_set_watermarks_table,
- .get_thermal_temperature_range = vega20_get_thermal_temperature_range
+ .get_thermal_temperature_range = vega20_get_thermal_temperature_range,
+ .set_df_cstate = vega20_set_df_cstate,
+ .update_pcie_parameters = vega20_update_pcie_parameters,
+ .init_microcode = smu_v11_0_init_microcode,
+ .load_microcode = smu_v11_0_load_microcode,
+ .init_smc_tables = smu_v11_0_init_smc_tables,
+ .fini_smc_tables = smu_v11_0_fini_smc_tables,
+ .init_power = smu_v11_0_init_power,
+ .fini_power = smu_v11_0_fini_power,
+ .check_fw_status = smu_v11_0_check_fw_status,
+ .setup_pptable = smu_v11_0_setup_pptable,
+ .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
+ .get_clk_info_from_vbios = smu_v11_0_get_clk_info_from_vbios,
+ .check_pptable = smu_v11_0_check_pptable,
+ .parse_pptable = smu_v11_0_parse_pptable,
+ .populate_smc_tables = smu_v11_0_populate_smc_pptable,
+ .check_fw_version = smu_v11_0_check_fw_version,
+ .write_pptable = smu_v11_0_write_pptable,
+ .set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
+ .set_driver_table_location = smu_v11_0_set_driver_table_location,
+ .set_tool_table_location = smu_v11_0_set_tool_table_location,
+ .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
+ .system_features_control = smu_v11_0_system_features_control,
+ .send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
+ .read_smc_arg = smu_v11_0_read_arg,
+ .init_display_count = smu_v11_0_init_display_count,
+ .set_allowed_mask = smu_v11_0_set_allowed_mask,
+ .get_enabled_mask = smu_v11_0_get_enabled_mask,
+ .notify_display_change = smu_v11_0_notify_display_change,
+ .set_power_limit = smu_v11_0_set_power_limit,
+ .get_current_clk_freq = smu_v11_0_get_current_clk_freq,
+ .init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks,
+ .start_thermal_control = smu_v11_0_start_thermal_control,
+ .stop_thermal_control = smu_v11_0_stop_thermal_control,
+ .set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk,
+ .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
+ .get_fan_control_mode = smu_v11_0_get_fan_control_mode,
+ .set_fan_control_mode = smu_v11_0_set_fan_control_mode,
+ .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
+ .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
+ .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
+ .gfx_off_control = smu_v11_0_gfx_off_control,
+ .register_irq_handler = smu_v11_0_register_irq_handler,
+ .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme,
+ .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc,
+ .baco_is_support= smu_v11_0_baco_is_support,
+ .baco_get_state = smu_v11_0_baco_get_state,
+ .baco_set_state = smu_v11_0_baco_set_state,
+ .baco_enter = smu_v11_0_baco_enter,
+ .baco_exit = smu_v11_0_baco_exit,
+ .get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq,
+ .set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
+ .override_pcie_parameters = smu_v11_0_override_pcie_parameters,
};
void vega20_set_ppt_funcs(struct smu_context *smu)
{
- struct smu_table_context *smu_table = &smu->smu_table;
-
smu->ppt_funcs = &vega20_ppt_funcs;
- smu->smc_if_version = SMU11_DRIVER_IF_VERSION;
- smu_table->table_count = TABLE_COUNT;
}
diff --git a/drivers/gpu/drm/arc/arcpgu_crtc.c b/drivers/gpu/drm/arc/arcpgu_crtc.c
index dfaddbb7da0d..8ae1e1f97a73 100644
--- a/drivers/gpu/drm/arc/arcpgu_crtc.c
+++ b/drivers/gpu/drm/arc/arcpgu_crtc.c
@@ -20,9 +20,10 @@
#define ENCODE_PGU_XY(x, y) ((((x) - 1) << 16) | ((y) - 1))
-static struct simplefb_format supported_formats[] = {
- { "r5g6b5", 16, {11, 5}, {5, 6}, {0, 5}, {0, 0}, DRM_FORMAT_RGB565 },
- { "r8g8b8", 24, {16, 8}, {8, 8}, {0, 8}, {0, 0}, DRM_FORMAT_RGB888 },
+static const u32 arc_pgu_supported_formats[] = {
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ARGB8888,
};
static void arc_pgu_set_pxl_fmt(struct drm_crtc *crtc)
@@ -30,22 +31,24 @@ static void arc_pgu_set_pxl_fmt(struct drm_crtc *crtc)
struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc);
const struct drm_framebuffer *fb = crtc->primary->state->fb;
uint32_t pixel_format = fb->format->format;
- struct simplefb_format *format = NULL;
+ u32 format = DRM_FORMAT_INVALID;
int i;
+ u32 reg_ctrl;
- for (i = 0; i < ARRAY_SIZE(supported_formats); i++) {
- if (supported_formats[i].fourcc == pixel_format)
- format = &supported_formats[i];
+ for (i = 0; i < ARRAY_SIZE(arc_pgu_supported_formats); i++) {
+ if (arc_pgu_supported_formats[i] == pixel_format)
+ format = arc_pgu_supported_formats[i];
}
- if (WARN_ON(!format))
+ if (WARN_ON(format == DRM_FORMAT_INVALID))
return;
- if (format->fourcc == DRM_FORMAT_RGB888)
- arc_pgu_write(arcpgu, ARCPGU_REG_CTRL,
- arc_pgu_read(arcpgu, ARCPGU_REG_CTRL) |
- ARCPGU_MODE_RGB888_MASK);
-
+ reg_ctrl = arc_pgu_read(arcpgu, ARCPGU_REG_CTRL);
+ if (format == DRM_FORMAT_RGB565)
+ reg_ctrl &= ~ARCPGU_MODE_XRGB8888;
+ else
+ reg_ctrl |= ARCPGU_MODE_XRGB8888;
+ arc_pgu_write(arcpgu, ARCPGU_REG_CTRL, reg_ctrl);
}
static const struct drm_crtc_funcs arc_pgu_crtc_funcs = {
@@ -193,18 +196,15 @@ static struct drm_plane *arc_pgu_plane_init(struct drm_device *drm)
{
struct arcpgu_drm_private *arcpgu = drm->dev_private;
struct drm_plane *plane = NULL;
- u32 formats[ARRAY_SIZE(supported_formats)], i;
int ret;
plane = devm_kzalloc(drm->dev, sizeof(*plane), GFP_KERNEL);
if (!plane)
return ERR_PTR(-ENOMEM);
- for (i = 0; i < ARRAY_SIZE(supported_formats); i++)
- formats[i] = supported_formats[i].fourcc;
-
ret = drm_universal_plane_init(drm, plane, 0xff, &arc_pgu_plane_funcs,
- formats, ARRAY_SIZE(formats),
+ arc_pgu_supported_formats,
+ ARRAY_SIZE(arc_pgu_supported_formats),
NULL,
DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret)
diff --git a/drivers/gpu/drm/arc/arcpgu_drv.c b/drivers/gpu/drm/arc/arcpgu_drv.c
index af60c6d7a5f4..d6a6692db0ac 100644
--- a/drivers/gpu/drm/arc/arcpgu_drv.c
+++ b/drivers/gpu/drm/arc/arcpgu_drv.c
@@ -14,6 +14,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
@@ -45,7 +46,7 @@ static int arcpgu_load(struct drm_device *drm)
{
struct platform_device *pdev = to_platform_device(drm->dev);
struct arcpgu_drm_private *arcpgu;
- struct device_node *encoder_node;
+ struct device_node *encoder_node = NULL, *endpoint_node = NULL;
struct resource *res;
int ret;
@@ -80,14 +81,23 @@ static int arcpgu_load(struct drm_device *drm)
if (arc_pgu_setup_crtc(drm) < 0)
return -ENODEV;
- /* find the encoder node and initialize it */
- encoder_node = of_parse_phandle(drm->dev->of_node, "encoder-slave", 0);
+ /*
+ * There is only one output port inside each device. It is linked with
+ * encoder endpoint.
+ */
+ endpoint_node = of_graph_get_next_endpoint(pdev->dev.of_node, NULL);
+ if (endpoint_node) {
+ encoder_node = of_graph_get_remote_port_parent(endpoint_node);
+ of_node_put(endpoint_node);
+ }
+
if (encoder_node) {
ret = arcpgu_drm_hdmi_init(drm, encoder_node);
of_node_put(encoder_node);
if (ret < 0)
return ret;
} else {
+ dev_info(drm->dev, "no encoder found. Assumed virtual LCD on simulation platform\n");
ret = arcpgu_drm_sim_init(drm, NULL);
if (ret < 0)
return ret;
@@ -135,8 +145,7 @@ static int arcpgu_debugfs_init(struct drm_minor *minor)
#endif
static struct drm_driver arcpgu_drm_driver = {
- .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
- DRIVER_ATOMIC,
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.name = "arcpgu",
.desc = "ARC PGU Controller",
.date = "20160219",
@@ -150,8 +159,6 @@ static struct drm_driver arcpgu_drm_driver = {
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_print_info = drm_gem_cma_print_info,
.gem_vm_ops = &drm_gem_cma_vm_ops,
- .gem_prime_export = drm_gem_prime_export,
- .gem_prime_import = drm_gem_prime_import,
.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
.gem_prime_vmap = drm_gem_cma_prime_vmap,
diff --git a/drivers/gpu/drm/arc/arcpgu_hdmi.c b/drivers/gpu/drm/arc/arcpgu_hdmi.c
index 98aac743cc26..8fd7094beece 100644
--- a/drivers/gpu/drm/arc/arcpgu_hdmi.c
+++ b/drivers/gpu/drm/arc/arcpgu_hdmi.c
@@ -5,6 +5,7 @@
* Copyright (C) 2016 Synopsys, Inc. (www.synopsys.com)
*/
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_encoder.h>
#include <drm/drm_device.h>
diff --git a/drivers/gpu/drm/arc/arcpgu_regs.h b/drivers/gpu/drm/arc/arcpgu_regs.h
index dab2c380f7f3..b689a382d556 100644
--- a/drivers/gpu/drm/arc/arcpgu_regs.h
+++ b/drivers/gpu/drm/arc/arcpgu_regs.h
@@ -25,7 +25,7 @@
#define ARCPGU_CTRL_VS_POL_OFST 0x3
#define ARCPGU_CTRL_HS_POL_MASK 0x1
#define ARCPGU_CTRL_HS_POL_OFST 0x4
-#define ARCPGU_MODE_RGB888_MASK 0x04
+#define ARCPGU_MODE_XRGB8888 BIT(2)
#define ARCPGU_STAT_BUSY_MASK 0x02
#endif
diff --git a/drivers/gpu/drm/arm/display/include/malidp_product.h b/drivers/gpu/drm/arm/display/include/malidp_product.h
index 1053b11352eb..16a8a2c22c42 100644
--- a/drivers/gpu/drm/arm/display/include/malidp_product.h
+++ b/drivers/gpu/drm/arm/display/include/malidp_product.h
@@ -18,7 +18,8 @@
#define MALIDP_CORE_ID_STATUS(__core_id) (((__u32)(__core_id)) & 0xFF)
/* Mali-display product IDs */
-#define MALIDP_D71_PRODUCT_ID 0x0071
+#define MALIDP_D71_PRODUCT_ID 0x0071
+#define MALIDP_D32_PRODUCT_ID 0x0032
union komeda_config_id {
struct {
diff --git a/drivers/gpu/drm/arm/display/komeda/Makefile b/drivers/gpu/drm/arm/display/komeda/Makefile
index 5c3900c2e764..1931a7fa1a14 100644
--- a/drivers/gpu/drm/arm/display/komeda/Makefile
+++ b/drivers/gpu/drm/arm/display/komeda/Makefile
@@ -16,7 +16,8 @@ komeda-y := \
komeda_crtc.o \
komeda_plane.o \
komeda_wb_connector.o \
- komeda_private_obj.o
+ komeda_private_obj.o \
+ komeda_event.o
komeda-y += \
d71/d71_dev.o \
diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c b/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c
index 4073a452e24a..8a02ade369db 100644
--- a/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c
+++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c
@@ -4,8 +4,6 @@
* Author: James.Qian.Wang <james.qian.wang@arm.com>
*
*/
-
-#include <drm/drm_print.h>
#include "d71_dev.h"
#include "komeda_kms.h"
#include "malidp_io.h"
@@ -108,6 +106,23 @@ static void dump_block_header(struct seq_file *sf, void __iomem *reg)
i, hdr.output_ids[i]);
}
+/* On D71, we are using the global line size. From D32, every component have
+ * a line size register to indicate the fifo size.
+ */
+static u32 __get_blk_line_size(struct d71_dev *d71, u32 __iomem *reg,
+ u32 max_default)
+{
+ if (!d71->periph_addr)
+ max_default = malidp_read32(reg, BLK_MAX_LINE_SIZE);
+
+ return max_default;
+}
+
+static u32 get_blk_line_size(struct d71_dev *d71, u32 __iomem *reg)
+{
+ return __get_blk_line_size(d71, reg, d71->max_line_size);
+}
+
static u32 to_rot_ctrl(u32 rot)
{
u32 lr_ctrl = 0;
@@ -334,7 +349,56 @@ static void d71_layer_dump(struct komeda_component *c, struct seq_file *sf)
seq_printf(sf, "%sAD_V_CROP:\t\t0x%X\n", prefix, v[2]);
}
+static int d71_layer_validate(struct komeda_component *c,
+ struct komeda_component_state *state)
+{
+ struct komeda_layer_state *st = to_layer_st(state);
+ struct komeda_layer *layer = to_layer(c);
+ struct drm_plane_state *plane_st;
+ struct drm_framebuffer *fb;
+ u32 fourcc, line_sz, max_line_sz;
+
+ plane_st = drm_atomic_get_new_plane_state(state->obj.state,
+ state->plane);
+ fb = plane_st->fb;
+ fourcc = fb->format->format;
+
+ if (drm_rotation_90_or_270(st->rot))
+ line_sz = st->vsize - st->afbc_crop_t - st->afbc_crop_b;
+ else
+ line_sz = st->hsize - st->afbc_crop_l - st->afbc_crop_r;
+
+ if (fb->modifier) {
+ if ((fb->modifier & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK) ==
+ AFBC_FORMAT_MOD_BLOCK_SIZE_32x8)
+ max_line_sz = layer->line_sz;
+ else
+ max_line_sz = layer->line_sz / 2;
+
+ if (line_sz > max_line_sz) {
+ DRM_DEBUG_ATOMIC("afbc request line_sz: %d exceed the max afbc line_sz: %d.\n",
+ line_sz, max_line_sz);
+ return -EINVAL;
+ }
+ }
+
+ if (fourcc == DRM_FORMAT_YUV420_10BIT && line_sz > 2046 && (st->afbc_crop_l % 4)) {
+ DRM_DEBUG_ATOMIC("YUV420_10BIT input_hsize: %d exceed the max size 2046.\n",
+ line_sz);
+ return -EINVAL;
+ }
+
+ if (fourcc == DRM_FORMAT_X0L2 && line_sz > 2046 && (st->addr[0] % 16)) {
+ DRM_DEBUG_ATOMIC("X0L2 input_hsize: %d exceed the max size 2046.\n",
+ line_sz);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static const struct komeda_component_funcs d71_layer_funcs = {
+ .validate = d71_layer_validate,
.update = d71_layer_update,
.disable = d71_layer_disable,
.dump_register = d71_layer_dump,
@@ -367,7 +431,28 @@ static int d71_layer_init(struct d71_dev *d71,
else
layer->layer_type = KOMEDA_FMT_SIMPLE_LAYER;
- set_range(&layer->hsize_in, 4, d71->max_line_size);
+ if (!d71->periph_addr) {
+ /* D32 or newer product */
+ layer->line_sz = malidp_read32(reg, BLK_MAX_LINE_SIZE);
+ layer->yuv_line_sz = L_INFO_YUV_MAX_LINESZ(layer_info);
+ } else if (d71->max_line_size > 2048) {
+ /* D71 4K */
+ layer->line_sz = d71->max_line_size;
+ layer->yuv_line_sz = layer->line_sz / 2;
+ } else {
+ /* D71 2K */
+ if (layer->layer_type == KOMEDA_FMT_RICH_LAYER) {
+ /* rich layer is 4K configuration */
+ layer->line_sz = d71->max_line_size * 2;
+ layer->yuv_line_sz = layer->line_sz / 2;
+ } else {
+ layer->line_sz = d71->max_line_size;
+ layer->yuv_line_sz = 0;
+ }
+ }
+
+ set_range(&layer->hsize_in, 4, layer->line_sz);
+
set_range(&layer->vsize_in, 4, d71->max_vsize);
malidp_write32(reg, LAYER_PALPHA, D71_PALPHA_DEF_MAP);
@@ -458,9 +543,11 @@ static int d71_wb_layer_init(struct d71_dev *d71,
wb_layer = to_layer(c);
wb_layer->layer_type = KOMEDA_FMT_WB_LAYER;
+ wb_layer->line_sz = get_blk_line_size(d71, reg);
+ wb_layer->yuv_line_sz = wb_layer->line_sz;
- set_range(&wb_layer->hsize_in, D71_MIN_LINE_SIZE, d71->max_line_size);
- set_range(&wb_layer->vsize_in, D71_MIN_VERTICAL_SIZE, d71->max_vsize);
+ set_range(&wb_layer->hsize_in, 64, wb_layer->line_sz);
+ set_range(&wb_layer->vsize_in, 64, d71->max_vsize);
return 0;
}
@@ -597,8 +684,8 @@ static int d71_compiz_init(struct d71_dev *d71,
compiz = to_compiz(c);
- set_range(&compiz->hsize, D71_MIN_LINE_SIZE, d71->max_line_size);
- set_range(&compiz->vsize, D71_MIN_VERTICAL_SIZE, d71->max_vsize);
+ set_range(&compiz->hsize, 64, get_blk_line_size(d71, reg));
+ set_range(&compiz->vsize, 64, d71->max_vsize);
return 0;
}
@@ -705,7 +792,7 @@ static void d71_scaler_update(struct komeda_component *c,
static void d71_scaler_dump(struct komeda_component *c, struct seq_file *sf)
{
- u32 v[9];
+ u32 v[10];
dump_block_header(sf, c->reg);
@@ -725,6 +812,18 @@ static void d71_scaler_dump(struct komeda_component *c, struct seq_file *sf)
seq_printf(sf, "SC_H_DELTA_PH:\t\t0x%X\n", v[6]);
seq_printf(sf, "SC_V_INIT_PH:\t\t0x%X\n", v[7]);
seq_printf(sf, "SC_V_DELTA_PH:\t\t0x%X\n", v[8]);
+
+ get_values_from_reg(c->reg, 0x130, 10, v);
+ seq_printf(sf, "SC_ENH_LIMITS:\t\t0x%X\n", v[0]);
+ seq_printf(sf, "SC_ENH_COEFF0:\t\t0x%X\n", v[1]);
+ seq_printf(sf, "SC_ENH_COEFF1:\t\t0x%X\n", v[2]);
+ seq_printf(sf, "SC_ENH_COEFF2:\t\t0x%X\n", v[3]);
+ seq_printf(sf, "SC_ENH_COEFF3:\t\t0x%X\n", v[4]);
+ seq_printf(sf, "SC_ENH_COEFF4:\t\t0x%X\n", v[5]);
+ seq_printf(sf, "SC_ENH_COEFF5:\t\t0x%X\n", v[6]);
+ seq_printf(sf, "SC_ENH_COEFF6:\t\t0x%X\n", v[7]);
+ seq_printf(sf, "SC_ENH_COEFF7:\t\t0x%X\n", v[8]);
+ seq_printf(sf, "SC_ENH_COEFF8:\t\t0x%X\n", v[9]);
}
static const struct komeda_component_funcs d71_scaler_funcs = {
@@ -755,7 +854,7 @@ static int d71_scaler_init(struct d71_dev *d71,
}
scaler = to_scaler(c);
- set_range(&scaler->hsize, 4, 2048);
+ set_range(&scaler->hsize, 4, __get_blk_line_size(d71, reg, 2048));
set_range(&scaler->vsize, 4, 4096);
scaler->max_downscaling = 6;
scaler->max_upscaling = 64;
@@ -804,7 +903,7 @@ static int d71_downscaling_clk_check(struct komeda_pipeline *pipe,
denominator = (mode->htotal - 1) * v_out - 2 * v_in;
}
- return aclk_rate * denominator >= mode->clock * 1000 * fraction ?
+ return aclk_rate * denominator >= mode->crtc_clock * 1000 * fraction ?
0 : -EINVAL;
}
@@ -864,7 +963,7 @@ static int d71_splitter_init(struct d71_dev *d71,
splitter = to_splitter(c);
- set_range(&splitter->hsize, 4, d71->max_line_size);
+ set_range(&splitter->hsize, 4, get_blk_line_size(d71, reg));
set_range(&splitter->vsize, 4, d71->max_vsize);
return 0;
@@ -935,7 +1034,8 @@ static int d71_merger_init(struct d71_dev *d71,
merger = to_merger(c);
- set_range(&merger->hsize_merged, 4, 4032);
+ set_range(&merger->hsize_merged, 4,
+ __get_blk_line_size(d71, reg, 4032));
set_range(&merger->vsize_merged, 4, 4096);
return 0;
@@ -944,15 +1044,48 @@ static int d71_merger_init(struct d71_dev *d71,
static void d71_improc_update(struct komeda_component *c,
struct komeda_component_state *state)
{
+ struct drm_crtc_state *crtc_st = state->crtc->state;
struct komeda_improc_state *st = to_improc_st(state);
+ struct d71_pipeline *pipe = to_d71_pipeline(c->pipeline);
u32 __iomem *reg = c->reg;
- u32 index;
+ u32 index, mask = 0, ctrl = 0;
for_each_changed_input(state, index)
malidp_write32(reg, BLK_INPUT_ID0 + index * 4,
to_d71_input_id(state, index));
malidp_write32(reg, BLK_SIZE, HV_SIZE(st->hsize, st->vsize));
+ malidp_write32(reg, IPS_DEPTH, st->color_depth);
+
+ if (crtc_st->color_mgmt_changed) {
+ mask |= IPS_CTRL_FT | IPS_CTRL_RGB;
+
+ if (crtc_st->gamma_lut) {
+ malidp_write_group(pipe->dou_ft_coeff_addr, FT_COEFF0,
+ KOMEDA_N_GAMMA_COEFFS,
+ st->fgamma_coeffs);
+ ctrl |= IPS_CTRL_FT; /* enable gamma */
+ }
+
+ if (crtc_st->ctm) {
+ malidp_write_group(reg, IPS_RGB_RGB_COEFF0,
+ KOMEDA_N_CTM_COEFFS,
+ st->ctm_coeffs);
+ ctrl |= IPS_CTRL_RGB; /* enable gamut */
+ }
+ }
+
+ mask |= IPS_CTRL_YUV | IPS_CTRL_CHD422 | IPS_CTRL_CHD420;
+
+ /* config color format */
+ if (st->color_format == DRM_COLOR_FORMAT_YCRCB420)
+ ctrl |= IPS_CTRL_YUV | IPS_CTRL_CHD422 | IPS_CTRL_CHD420;
+ else if (st->color_format == DRM_COLOR_FORMAT_YCRCB422)
+ ctrl |= IPS_CTRL_YUV | IPS_CTRL_CHD422;
+ else if (st->color_format == DRM_COLOR_FORMAT_YCRCB444)
+ ctrl |= IPS_CTRL_YUV;
+
+ malidp_write32_mask(reg, BLK_CONTROL, mask, ctrl);
}
static void d71_improc_dump(struct komeda_component *c, struct seq_file *sf)
@@ -1032,21 +1165,31 @@ static void d71_timing_ctrlr_update(struct komeda_component *c,
struct komeda_component_state *state)
{
struct drm_crtc_state *crtc_st = state->crtc->state;
+ struct drm_display_mode *mode = &crtc_st->adjusted_mode;
u32 __iomem *reg = c->reg;
- struct videomode vm;
+ u32 hactive, hfront_porch, hback_porch, hsync_len;
+ u32 vactive, vfront_porch, vback_porch, vsync_len;
u32 value;
- drm_display_mode_to_videomode(&crtc_st->adjusted_mode, &vm);
-
- malidp_write32(reg, BS_ACTIVESIZE, HV_SIZE(vm.hactive, vm.vactive));
- malidp_write32(reg, BS_HINTERVALS, BS_H_INTVALS(vm.hfront_porch,
- vm.hback_porch));
- malidp_write32(reg, BS_VINTERVALS, BS_V_INTVALS(vm.vfront_porch,
- vm.vback_porch));
-
- value = BS_SYNC_VSW(vm.vsync_len) | BS_SYNC_HSW(vm.hsync_len);
- value |= vm.flags & DISPLAY_FLAGS_VSYNC_HIGH ? BS_SYNC_VSP : 0;
- value |= vm.flags & DISPLAY_FLAGS_HSYNC_HIGH ? BS_SYNC_HSP : 0;
+ hactive = mode->crtc_hdisplay;
+ hfront_porch = mode->crtc_hsync_start - mode->crtc_hdisplay;
+ hsync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
+ hback_porch = mode->crtc_htotal - mode->crtc_hsync_end;
+
+ vactive = mode->crtc_vdisplay;
+ vfront_porch = mode->crtc_vsync_start - mode->crtc_vdisplay;
+ vsync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
+ vback_porch = mode->crtc_vtotal - mode->crtc_vsync_end;
+
+ malidp_write32(reg, BS_ACTIVESIZE, HV_SIZE(hactive, vactive));
+ malidp_write32(reg, BS_HINTERVALS, BS_H_INTVALS(hfront_porch,
+ hback_porch));
+ malidp_write32(reg, BS_VINTERVALS, BS_V_INTVALS(vfront_porch,
+ vback_porch));
+
+ value = BS_SYNC_VSW(vsync_len) | BS_SYNC_HSW(hsync_len);
+ value |= mode->flags & DRM_MODE_FLAG_PVSYNC ? BS_SYNC_VSP : 0;
+ value |= mode->flags & DRM_MODE_FLAG_PHSYNC ? BS_SYNC_HSP : 0;
malidp_write32(reg, BS_SYNC, value);
malidp_write32(reg, BS_PROG_LINE, D71_DEFAULT_PREPRETCH_LINE - 1);
@@ -1054,6 +1197,10 @@ static void d71_timing_ctrlr_update(struct komeda_component *c,
/* configure bs control register */
value = BS_CTRL_EN | BS_CTRL_VM;
+ if (c->pipeline->dual_link) {
+ malidp_write32(reg, BS_DRIFT_TO, hfront_porch + 16);
+ value |= BS_CTRL_DL;
+ }
malidp_write32(reg, BLK_CONTROL, value);
}
@@ -1123,7 +1270,7 @@ static int d71_timing_ctrlr_init(struct d71_dev *d71,
ctrlr = to_ctrlr(c);
- ctrlr->supports_dual_link = true;
+ ctrlr->supports_dual_link = d71->supports_dual_link;
return 0;
}
@@ -1206,6 +1353,90 @@ int d71_probe_block(struct d71_dev *d71,
return err;
}
+static void d71_gcu_dump(struct d71_dev *d71, struct seq_file *sf)
+{
+ u32 v[5];
+
+ seq_puts(sf, "\n------ GCU ------\n");
+
+ get_values_from_reg(d71->gcu_addr, 0, 3, v);
+ seq_printf(sf, "GLB_ARCH_ID:\t\t0x%X\n", v[0]);
+ seq_printf(sf, "GLB_CORE_ID:\t\t0x%X\n", v[1]);
+ seq_printf(sf, "GLB_CORE_INFO:\t\t0x%X\n", v[2]);
+
+ get_values_from_reg(d71->gcu_addr, 0x10, 1, v);
+ seq_printf(sf, "GLB_IRQ_STATUS:\t\t0x%X\n", v[0]);
+
+ get_values_from_reg(d71->gcu_addr, 0xA0, 5, v);
+ seq_printf(sf, "GCU_IRQ_RAW_STATUS:\t0x%X\n", v[0]);
+ seq_printf(sf, "GCU_IRQ_CLEAR:\t\t0x%X\n", v[1]);
+ seq_printf(sf, "GCU_IRQ_MASK:\t\t0x%X\n", v[2]);
+ seq_printf(sf, "GCU_IRQ_STATUS:\t\t0x%X\n", v[3]);
+ seq_printf(sf, "GCU_STATUS:\t\t0x%X\n", v[4]);
+
+ get_values_from_reg(d71->gcu_addr, 0xD0, 3, v);
+ seq_printf(sf, "GCU_CONTROL:\t\t0x%X\n", v[0]);
+ seq_printf(sf, "GCU_CONFIG_VALID0:\t0x%X\n", v[1]);
+ seq_printf(sf, "GCU_CONFIG_VALID1:\t0x%X\n", v[2]);
+}
+
+static void d71_lpu_dump(struct d71_pipeline *pipe, struct seq_file *sf)
+{
+ u32 v[6];
+
+ seq_printf(sf, "\n------ LPU%d ------\n", pipe->base.id);
+
+ dump_block_header(sf, pipe->lpu_addr);
+
+ get_values_from_reg(pipe->lpu_addr, 0xA0, 6, v);
+ seq_printf(sf, "LPU_IRQ_RAW_STATUS:\t0x%X\n", v[0]);
+ seq_printf(sf, "LPU_IRQ_CLEAR:\t\t0x%X\n", v[1]);
+ seq_printf(sf, "LPU_IRQ_MASK:\t\t0x%X\n", v[2]);
+ seq_printf(sf, "LPU_IRQ_STATUS:\t\t0x%X\n", v[3]);
+ seq_printf(sf, "LPU_STATUS:\t\t0x%X\n", v[4]);
+ seq_printf(sf, "LPU_TBU_STATUS:\t\t0x%X\n", v[5]);
+
+ get_values_from_reg(pipe->lpu_addr, 0xC0, 1, v);
+ seq_printf(sf, "LPU_INFO:\t\t0x%X\n", v[0]);
+
+ get_values_from_reg(pipe->lpu_addr, 0xD0, 3, v);
+ seq_printf(sf, "LPU_RAXI_CONTROL:\t0x%X\n", v[0]);
+ seq_printf(sf, "LPU_WAXI_CONTROL:\t0x%X\n", v[1]);
+ seq_printf(sf, "LPU_TBU_CONTROL:\t0x%X\n", v[2]);
+}
+
+static void d71_dou_dump(struct d71_pipeline *pipe, struct seq_file *sf)
+{
+ u32 v[5];
+
+ seq_printf(sf, "\n------ DOU%d ------\n", pipe->base.id);
+
+ dump_block_header(sf, pipe->dou_addr);
+
+ get_values_from_reg(pipe->dou_addr, 0xA0, 5, v);
+ seq_printf(sf, "DOU_IRQ_RAW_STATUS:\t0x%X\n", v[0]);
+ seq_printf(sf, "DOU_IRQ_CLEAR:\t\t0x%X\n", v[1]);
+ seq_printf(sf, "DOU_IRQ_MASK:\t\t0x%X\n", v[2]);
+ seq_printf(sf, "DOU_IRQ_STATUS:\t\t0x%X\n", v[3]);
+ seq_printf(sf, "DOU_STATUS:\t\t0x%X\n", v[4]);
+}
+
+static void d71_pipeline_dump(struct komeda_pipeline *pipe, struct seq_file *sf)
+{
+ struct d71_pipeline *d71_pipe = to_d71_pipeline(pipe);
+
+ d71_lpu_dump(d71_pipe, sf);
+ d71_dou_dump(d71_pipe, sf);
+}
+
const struct komeda_pipeline_funcs d71_pipeline_funcs = {
- .downscaling_clk_check = d71_downscaling_clk_check,
+ .downscaling_clk_check = d71_downscaling_clk_check,
+ .dump_register = d71_pipeline_dump,
};
+
+void d71_dump(struct komeda_dev *mdev, struct seq_file *sf)
+{
+ struct d71_dev *d71 = mdev->chip_data;
+
+ d71_gcu_dump(d71, sf);
+}
diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c b/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c
index d567ab7ed314..00fa56c29b3e 100644
--- a/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c
+++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c
@@ -20,8 +20,10 @@ static u64 get_lpu_event(struct d71_pipeline *d71_pipeline)
evts |= KOMEDA_EVENT_IBSY;
if (raw_status & LPU_IRQ_EOW)
evts |= KOMEDA_EVENT_EOW;
+ if (raw_status & LPU_IRQ_OVR)
+ evts |= KOMEDA_EVENT_OVR;
- if (raw_status & (LPU_IRQ_ERR | LPU_IRQ_IBSY)) {
+ if (raw_status & (LPU_IRQ_ERR | LPU_IRQ_IBSY | LPU_IRQ_OVR)) {
u32 restore = 0, tbu_status;
/* Check error of LPU status */
status = malidp_read32(reg, BLK_STATUS);
@@ -45,6 +47,15 @@ static u64 get_lpu_event(struct d71_pipeline *d71_pipeline)
restore |= LPU_STATUS_ACE3;
evts |= KOMEDA_ERR_ACE3;
}
+ if (status & LPU_STATUS_FEMPTY) {
+ restore |= LPU_STATUS_FEMPTY;
+ evts |= KOMEDA_EVENT_EMPTY;
+ }
+ if (status & LPU_STATUS_FFULL) {
+ restore |= LPU_STATUS_FFULL;
+ evts |= KOMEDA_EVENT_FULL;
+ }
+
if (restore != 0)
malidp_write32_mask(reg, BLK_STATUS, restore, 0);
@@ -195,7 +206,7 @@ d71_irq_handler(struct komeda_dev *mdev, struct komeda_events *evts)
if (gcu_status & GLB_IRQ_STATUS_PIPE1)
evts->pipes[1] |= get_pipeline_event(d71->pipes[1], gcu_status);
- return gcu_status ? IRQ_HANDLED : IRQ_NONE;
+ return IRQ_RETVAL(gcu_status);
}
#define ENABLED_GCU_IRQS (GCU_IRQ_CVAL0 | GCU_IRQ_CVAL1 | \
@@ -371,23 +382,33 @@ static int d71_enum_resources(struct komeda_dev *mdev)
goto err_cleanup;
}
- /* probe PERIPH */
+ /* Only the legacy HW has the periph block, the newer merges the periph
+ * into GCU
+ */
value = malidp_read32(d71->periph_addr, BLK_BLOCK_INFO);
- if (BLOCK_INFO_BLK_TYPE(value) != D71_BLK_TYPE_PERIPH) {
- DRM_ERROR("access blk periph but got blk: %d.\n",
- BLOCK_INFO_BLK_TYPE(value));
- err = -EINVAL;
- goto err_cleanup;
+ if (BLOCK_INFO_BLK_TYPE(value) != D71_BLK_TYPE_PERIPH)
+ d71->periph_addr = NULL;
+
+ if (d71->periph_addr) {
+ /* probe PERIPHERAL in legacy HW */
+ value = malidp_read32(d71->periph_addr, PERIPH_CONFIGURATION_ID);
+
+ d71->max_line_size = value & PERIPH_MAX_LINE_SIZE ? 4096 : 2048;
+ d71->max_vsize = 4096;
+ d71->num_rich_layers = value & PERIPH_NUM_RICH_LAYERS ? 2 : 1;
+ d71->supports_dual_link = !!(value & PERIPH_SPLIT_EN);
+ d71->integrates_tbu = !!(value & PERIPH_TBU_EN);
+ } else {
+ value = malidp_read32(d71->gcu_addr, GCU_CONFIGURATION_ID0);
+ d71->max_line_size = GCU_MAX_LINE_SIZE(value);
+ d71->max_vsize = GCU_MAX_NUM_LINES(value);
+
+ value = malidp_read32(d71->gcu_addr, GCU_CONFIGURATION_ID1);
+ d71->num_rich_layers = GCU_NUM_RICH_LAYERS(value);
+ d71->supports_dual_link = GCU_DISPLAY_SPLIT_EN(value);
+ d71->integrates_tbu = GCU_DISPLAY_TBU_EN(value);
}
- value = malidp_read32(d71->periph_addr, PERIPH_CONFIGURATION_ID);
-
- d71->max_line_size = value & PERIPH_MAX_LINE_SIZE ? 4096 : 2048;
- d71->max_vsize = 4096;
- d71->num_rich_layers = value & PERIPH_NUM_RICH_LAYERS ? 2 : 1;
- d71->supports_dual_link = value & PERIPH_SPLIT_EN ? true : false;
- d71->integrates_tbu = value & PERIPH_TBU_EN ? true : false;
-
for (i = 0; i < d71->num_pipelines; i++) {
pipe = komeda_pipeline_add(mdev, sizeof(struct d71_pipeline),
&d71_pipeline_funcs);
@@ -395,11 +416,30 @@ static int d71_enum_resources(struct komeda_dev *mdev)
err = PTR_ERR(pipe);
goto err_cleanup;
}
+
+ /* D71 HW doesn't update shadow registers when display output
+ * is turning off, so when we disable all pipeline components
+ * together with display output disable by one flush or one
+ * operation, the disable operation updated registers will not
+ * be flush to or valid in HW, which may leads problem.
+ * To workaround this problem, introduce a two phase disable.
+ * Phase1: Disabling components with display is on to make sure
+ * the disable can be flushed to HW.
+ * Phase2: Only turn-off display output.
+ */
+ value = KOMEDA_PIPELINE_IMPROCS |
+ BIT(KOMEDA_COMPONENT_TIMING_CTRLR);
+
+ pipe->standalone_disabled_comps = value;
+
d71->pipes[i] = to_d71_pipeline(pipe);
}
- /* loop the register blks and probe */
- i = 2; /* exclude GCU and PERIPH */
+ /* loop the register blks and probe.
+ * NOTE: d71->num_blocks includes reserved blocks.
+ * d71->num_blocks = GCU + valid blocks + reserved blocks
+ */
+ i = 1; /* exclude GCU */
offset = D71_BLOCK_SIZE; /* skip GCU */
while (i < d71->num_blocks) {
blk_base = mdev->reg_base + (offset >> 2);
@@ -409,9 +449,9 @@ static int d71_enum_resources(struct komeda_dev *mdev)
err = d71_probe_block(d71, &blk, blk_base);
if (err)
goto err_cleanup;
- i++;
}
+ i++;
offset += D71_BLOCK_SIZE;
}
@@ -561,26 +601,43 @@ static int d71_disconnect_iommu(struct komeda_dev *mdev)
}
static const struct komeda_dev_funcs d71_chip_funcs = {
- .init_format_table = d71_init_fmt_tbl,
- .enum_resources = d71_enum_resources,
- .cleanup = d71_cleanup,
- .irq_handler = d71_irq_handler,
- .enable_irq = d71_enable_irq,
- .disable_irq = d71_disable_irq,
- .on_off_vblank = d71_on_off_vblank,
- .change_opmode = d71_change_opmode,
- .flush = d71_flush,
- .connect_iommu = d71_connect_iommu,
- .disconnect_iommu = d71_disconnect_iommu,
+ .init_format_table = d71_init_fmt_tbl,
+ .enum_resources = d71_enum_resources,
+ .cleanup = d71_cleanup,
+ .irq_handler = d71_irq_handler,
+ .enable_irq = d71_enable_irq,
+ .disable_irq = d71_disable_irq,
+ .on_off_vblank = d71_on_off_vblank,
+ .change_opmode = d71_change_opmode,
+ .flush = d71_flush,
+ .connect_iommu = d71_connect_iommu,
+ .disconnect_iommu = d71_disconnect_iommu,
+ .dump_register = d71_dump,
};
const struct komeda_dev_funcs *
d71_identify(u32 __iomem *reg_base, struct komeda_chip_info *chip)
{
+ const struct komeda_dev_funcs *funcs;
+ u32 product_id;
+
+ chip->core_id = malidp_read32(reg_base, GLB_CORE_ID);
+
+ product_id = MALIDP_CORE_ID_PRODUCT_ID(chip->core_id);
+
+ switch (product_id) {
+ case MALIDP_D71_PRODUCT_ID:
+ case MALIDP_D32_PRODUCT_ID:
+ funcs = &d71_chip_funcs;
+ break;
+ default:
+ DRM_ERROR("Unsupported product: 0x%x\n", product_id);
+ return NULL;
+ }
+
chip->arch_id = malidp_read32(reg_base, GLB_ARCH_ID);
- chip->core_id = malidp_read32(reg_base, GLB_CORE_ID);
chip->core_info = malidp_read32(reg_base, GLB_CORE_INFO);
chip->bus_width = D71_BUS_WIDTH_16_BYTES;
- return &d71_chip_funcs;
+ return funcs;
}
diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.h b/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.h
index 84f1878b647d..c7357c2b9e62 100644
--- a/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.h
+++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.h
@@ -49,4 +49,6 @@ int d71_probe_block(struct d71_dev *d71,
struct block_header *blk, u32 __iomem *reg);
void d71_read_block_header(u32 __iomem *reg, struct block_header *blk);
+void d71_dump(struct komeda_dev *mdev, struct seq_file *sf);
+
#endif /* !_D71_DEV_H_ */
diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h b/drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h
index 2d5e6d00b42c..e80172a0b320 100644
--- a/drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h
+++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h
@@ -10,6 +10,7 @@
/* Common block registers offset */
#define BLK_BLOCK_INFO 0x000
#define BLK_PIPELINE_INFO 0x004
+#define BLK_MAX_LINE_SIZE 0x008
#define BLK_VALID_INPUT_ID0 0x020
#define BLK_OUTPUT_ID0 0x060
#define BLK_INPUT_ID0 0x080
@@ -71,6 +72,19 @@
#define GCU_CONTROL_MODE(x) ((x) & 0x7)
#define GCU_CONTROL_SRST BIT(16)
+/* GCU_CONFIGURATION registers */
+#define GCU_CONFIGURATION_ID0 0x100
+#define GCU_CONFIGURATION_ID1 0x104
+
+/* GCU configuration */
+#define GCU_MAX_LINE_SIZE(x) ((x) & 0xFFFF)
+#define GCU_MAX_NUM_LINES(x) ((x) >> 16)
+#define GCU_NUM_RICH_LAYERS(x) ((x) & 0x7)
+#define GCU_NUM_PIPELINES(x) (((x) >> 3) & 0x7)
+#define GCU_NUM_SCALERS(x) (((x) >> 6) & 0x7)
+#define GCU_DISPLAY_SPLIT_EN(x) (((x) >> 16) & 0x1)
+#define GCU_DISPLAY_TBU_EN(x) (((x) >> 17) & 0x1)
+
/* GCU opmode */
#define INACTIVE_MODE 0
#define TBU_CONNECT_MODE 1
@@ -161,6 +175,7 @@
#define TBU_DOUTSTDCAPB_MASK 0x3F
/* LPU_IRQ_BITS */
+#define LPU_IRQ_OVR BIT(9)
#define LPU_IRQ_IBSY BIT(10)
#define LPU_IRQ_ERR BIT(11)
#define LPU_IRQ_EOW BIT(12)
@@ -171,6 +186,8 @@
#define LPU_STATUS_AXIE BIT(4)
#define LPU_STATUS_AXIRP BIT(5)
#define LPU_STATUS_AXIWP BIT(6)
+#define LPU_STATUS_FEMPTY BIT(11)
+#define LPU_STATUS_FFULL BIT(14)
#define LPU_STATUS_ACE0 BIT(16)
#define LPU_STATUS_ACE1 BIT(17)
#define LPU_STATUS_ACE2 BIT(18)
@@ -321,6 +338,7 @@
#define L_INFO_RF BIT(0)
#define L_INFO_CM BIT(1)
#define L_INFO_ABUF_SIZE(x) (((x) >> 4) & 0x7)
+#define L_INFO_YUV_MAX_LINESZ(x) (((x) >> 16) & 0xFFFF)
/* Scaler registers */
#define SC_COEFFTAB 0x0DC
@@ -494,13 +512,6 @@ enum d71_blk_type {
#define D71_DEFAULT_PREPRETCH_LINE 5
#define D71_BUS_WIDTH_16_BYTES 16
-#define D71_MIN_LINE_SIZE 64
-#define D71_MIN_VERTICAL_SIZE 64
-#define D71_SC_MIN_LIN_SIZE 4
-#define D71_SC_MIN_VERTICAL_SIZE 4
-#define D71_SC_MAX_LIN_SIZE 2048
-#define D71_SC_MAX_VERTICAL_SIZE 4096
-
#define D71_SC_MAX_UPSCALING 64
#define D71_SC_MAX_DOWNSCALING 6
#define D71_SC_SPLIT_OVERLAP 8
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.c b/drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.c
index 9d14a92dbb17..d8e449e6ebda 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.c
@@ -65,3 +65,69 @@ const s32 *komeda_select_yuv2rgb_coeffs(u32 color_encoding, u32 color_range)
return coeffs;
}
+
+struct gamma_curve_sector {
+ u32 boundary_start;
+ u32 num_of_segments;
+ u32 segment_width;
+};
+
+struct gamma_curve_segment {
+ u32 start;
+ u32 end;
+};
+
+static struct gamma_curve_sector sector_tbl[] = {
+ { 0, 4, 4 },
+ { 16, 4, 4 },
+ { 32, 4, 8 },
+ { 64, 4, 16 },
+ { 128, 4, 32 },
+ { 256, 4, 64 },
+ { 512, 16, 32 },
+ { 1024, 24, 128 },
+};
+
+static void
+drm_lut_to_coeffs(struct drm_property_blob *lut_blob, u32 *coeffs,
+ struct gamma_curve_sector *sector_tbl, u32 num_sectors)
+{
+ struct drm_color_lut *lut;
+ u32 i, j, in, num = 0;
+
+ if (!lut_blob)
+ return;
+
+ lut = lut_blob->data;
+
+ for (i = 0; i < num_sectors; i++) {
+ for (j = 0; j < sector_tbl[i].num_of_segments; j++) {
+ in = sector_tbl[i].boundary_start +
+ j * sector_tbl[i].segment_width;
+
+ coeffs[num++] = drm_color_lut_extract(lut[in].red,
+ KOMEDA_COLOR_PRECISION);
+ }
+ }
+
+ coeffs[num] = BIT(KOMEDA_COLOR_PRECISION);
+}
+
+void drm_lut_to_fgamma_coeffs(struct drm_property_blob *lut_blob, u32 *coeffs)
+{
+ drm_lut_to_coeffs(lut_blob, coeffs, sector_tbl, ARRAY_SIZE(sector_tbl));
+}
+
+void drm_ctm_to_coeffs(struct drm_property_blob *ctm_blob, u32 *coeffs)
+{
+ struct drm_color_ctm *ctm;
+ u32 i;
+
+ if (!ctm_blob)
+ return;
+
+ ctm = ctm_blob->data;
+
+ for (i = 0; i < KOMEDA_N_CTM_COEFFS; i++)
+ coeffs[i] = drm_color_ctm_s31_32_to_qm_n(ctm->matrix[i], 3, 12);
+}
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.h b/drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.h
index a2df218f58e7..2f4668466112 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.h
@@ -11,7 +11,15 @@
#include <drm/drm_color_mgmt.h>
#define KOMEDA_N_YUV2RGB_COEFFS 12
+#define KOMEDA_N_RGB2YUV_COEFFS 12
+#define KOMEDA_COLOR_PRECISION 12
+#define KOMEDA_N_GAMMA_COEFFS 65
+#define KOMEDA_COLOR_LUT_SIZE BIT(KOMEDA_COLOR_PRECISION)
+#define KOMEDA_N_CTM_COEFFS 9
+
+void drm_lut_to_fgamma_coeffs(struct drm_property_blob *lut_blob, u32 *coeffs);
+void drm_ctm_to_coeffs(struct drm_property_blob *ctm_blob, u32 *coeffs);
const s32 *komeda_select_yuv2rgb_coeffs(u32 color_encoding, u32 color_range);
-#endif
+#endif /*_KOMEDA_COLOR_MGMT_H_*/
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
index f4400788ab94..56bd938961ee 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
@@ -18,6 +18,33 @@
#include "komeda_dev.h"
#include "komeda_kms.h"
+void komeda_crtc_get_color_config(struct drm_crtc_state *crtc_st,
+ u32 *color_depths, u32 *color_formats)
+{
+ struct drm_connector *conn;
+ struct drm_connector_state *conn_st;
+ u32 conn_color_formats = ~0u;
+ int i, min_bpc = 31, conn_bpc = 0;
+
+ for_each_new_connector_in_state(crtc_st->state, conn, conn_st, i) {
+ if (conn_st->crtc != crtc_st->crtc)
+ continue;
+
+ conn_bpc = conn->display_info.bpc ? conn->display_info.bpc : 8;
+ conn_color_formats &= conn->display_info.color_formats;
+
+ if (conn_bpc < min_bpc)
+ min_bpc = conn_bpc;
+ }
+
+ /* connector doesn't config any color_format, use RGB444 as default */
+ if (!conn_color_formats)
+ conn_color_formats = DRM_COLOR_FORMAT_RGB444;
+
+ *color_depths = GENMASK(min_bpc, 0);
+ *color_formats = conn_color_formats;
+}
+
static void komeda_crtc_update_clock_ratio(struct komeda_crtc_state *kcrtc_st)
{
u64 pxlclk, aclk;
@@ -27,8 +54,8 @@ static void komeda_crtc_update_clock_ratio(struct komeda_crtc_state *kcrtc_st)
return;
}
- pxlclk = kcrtc_st->base.adjusted_mode.clock * 1000;
- aclk = komeda_calc_aclk(kcrtc_st);
+ pxlclk = kcrtc_st->base.adjusted_mode.crtc_clock * 1000ULL;
+ aclk = komeda_crtc_get_aclk(kcrtc_st);
kcrtc_st->clock_ratio = div64_u64(aclk << 32, pxlclk);
}
@@ -74,14 +101,6 @@ komeda_crtc_atomic_check(struct drm_crtc *crtc,
return 0;
}
-unsigned long komeda_calc_aclk(struct komeda_crtc_state *kcrtc_st)
-{
- struct komeda_dev *mdev = kcrtc_st->base.crtc->dev->dev_private;
- unsigned long pxlclk = kcrtc_st->base.adjusted_mode.clock;
-
- return clk_round_rate(mdev->aclk, pxlclk * 1000);
-}
-
/* For active a crtc, mainly need two parts of preparation
* 1. adjust display operation mode.
* 2. enable needed clk
@@ -92,7 +111,7 @@ komeda_crtc_prepare(struct komeda_crtc *kcrtc)
struct komeda_dev *mdev = kcrtc->base.dev->dev_private;
struct komeda_pipeline *master = kcrtc->master;
struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(kcrtc->base.state);
- unsigned long pxlclk_rate = kcrtc_st->base.adjusted_mode.clock * 1000;
+ struct drm_display_mode *mode = &kcrtc_st->base.adjusted_mode;
u32 new_mode;
int err;
@@ -118,7 +137,7 @@ komeda_crtc_prepare(struct komeda_crtc *kcrtc)
* to enable it again.
*/
if (new_mode != KOMEDA_MODE_DUAL_DISP) {
- err = clk_set_rate(mdev->aclk, komeda_calc_aclk(kcrtc_st));
+ err = clk_set_rate(mdev->aclk, komeda_crtc_get_aclk(kcrtc_st));
if (err)
DRM_ERROR("failed to set aclk.\n");
err = clk_prepare_enable(mdev->aclk);
@@ -126,7 +145,7 @@ komeda_crtc_prepare(struct komeda_crtc *kcrtc)
DRM_ERROR("failed to enable aclk.\n");
}
- err = clk_set_rate(master->pxlclk, pxlclk_rate);
+ err = clk_set_rate(master->pxlclk, mode->crtc_clock * 1000);
if (err)
DRM_ERROR("failed to set pxlclk for pipe%d\n", master->id);
err = clk_prepare_enable(master->pxlclk);
@@ -256,25 +275,60 @@ static void
komeda_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old)
{
+ pm_runtime_get_sync(crtc->dev->dev);
komeda_crtc_prepare(to_kcrtc(crtc));
drm_crtc_vblank_on(crtc);
+ WARN_ON(drm_crtc_vblank_get(crtc));
komeda_crtc_do_flush(crtc, old);
}
static void
+komeda_crtc_flush_and_wait_for_flip_done(struct komeda_crtc *kcrtc,
+ struct completion *input_flip_done)
+{
+ struct drm_device *drm = kcrtc->base.dev;
+ struct komeda_dev *mdev = kcrtc->master->mdev;
+ struct completion *flip_done;
+ struct completion temp;
+ int timeout;
+
+ /* if caller doesn't send a flip_done, use a private flip_done */
+ if (input_flip_done) {
+ flip_done = input_flip_done;
+ } else {
+ init_completion(&temp);
+ kcrtc->disable_done = &temp;
+ flip_done = &temp;
+ }
+
+ mdev->funcs->flush(mdev, kcrtc->master->id, 0);
+
+ /* wait the flip take affect.*/
+ timeout = wait_for_completion_timeout(flip_done, HZ);
+ if (timeout == 0) {
+ DRM_ERROR("wait pipe%d flip done timeout\n", kcrtc->master->id);
+ if (!input_flip_done) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&drm->event_lock, flags);
+ kcrtc->disable_done = NULL;
+ spin_unlock_irqrestore(&drm->event_lock, flags);
+ }
+ }
+}
+
+static void
komeda_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_crtc_state *old)
{
struct komeda_crtc *kcrtc = to_kcrtc(crtc);
struct komeda_crtc_state *old_st = to_kcrtc_st(old);
- struct komeda_dev *mdev = crtc->dev->dev_private;
struct komeda_pipeline *master = kcrtc->master;
struct komeda_pipeline *slave = kcrtc->slave;
- struct completion *disable_done = &crtc->state->commit->flip_done;
- struct completion temp;
- int timeout;
+ struct completion *disable_done;
+ bool needs_phase2 = false;
- DRM_DEBUG_ATOMIC("CRTC%d_DISABLE: active_pipes: 0x%x, affected: 0x%x.\n",
+ DRM_DEBUG_ATOMIC("CRTC%d_DISABLE: active_pipes: 0x%x, affected: 0x%x\n",
drm_crtc_index(crtc),
old_st->active_pipes, old_st->affected_pipes);
@@ -282,7 +336,7 @@ komeda_crtc_atomic_disable(struct drm_crtc *crtc,
komeda_pipeline_disable(slave, old->state);
if (has_bit(master->id, old_st->active_pipes))
- komeda_pipeline_disable(master, old->state);
+ needs_phase2 = komeda_pipeline_disable(master, old->state);
/* crtc_disable has two scenarios according to the state->active switch.
* 1. active -> inactive
@@ -301,34 +355,26 @@ komeda_crtc_atomic_disable(struct drm_crtc *crtc,
* That's also the reason why skip modeset commit in
* komeda_crtc_atomic_flush()
*/
- if (crtc->state->active) {
- struct komeda_pipeline_state *pipe_st;
- /* clear the old active_comps to zero */
- pipe_st = komeda_pipeline_get_old_state(master, old->state);
- pipe_st->active_comps = 0;
+ disable_done = (needs_phase2 || crtc->state->active) ?
+ NULL : &crtc->state->commit->flip_done;
- init_completion(&temp);
- kcrtc->disable_done = &temp;
- disable_done = &temp;
- }
+ /* wait phase 1 disable done */
+ komeda_crtc_flush_and_wait_for_flip_done(kcrtc, disable_done);
- mdev->funcs->flush(mdev, master->id, 0);
+ /* phase 2 */
+ if (needs_phase2) {
+ komeda_pipeline_disable(kcrtc->master, old->state);
- /* wait the disable take affect.*/
- timeout = wait_for_completion_timeout(disable_done, HZ);
- if (timeout == 0) {
- DRM_ERROR("disable pipeline%d timeout.\n", kcrtc->master->id);
- if (crtc->state->active) {
- unsigned long flags;
+ disable_done = crtc->state->active ?
+ NULL : &crtc->state->commit->flip_done;
- spin_lock_irqsave(&crtc->dev->event_lock, flags);
- kcrtc->disable_done = NULL;
- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
- }
+ komeda_crtc_flush_and_wait_for_flip_done(kcrtc, disable_done);
}
+ drm_crtc_vblank_put(crtc);
drm_crtc_vblank_off(crtc);
komeda_crtc_unprepare(kcrtc);
+ pm_runtime_put(crtc->dev->dev);
}
static void
@@ -342,29 +388,58 @@ komeda_crtc_atomic_flush(struct drm_crtc *crtc,
komeda_crtc_do_flush(crtc, old);
}
+/* Returns the minimum frequency of the aclk rate (main engine clock) in Hz */
+static unsigned long
+komeda_calc_min_aclk_rate(struct komeda_crtc *kcrtc,
+ unsigned long pxlclk)
+{
+ /* Once dual-link one display pipeline drives two display outputs,
+ * the aclk needs run on the double rate of pxlclk
+ */
+ if (kcrtc->master->dual_link)
+ return pxlclk * 2;
+ else
+ return pxlclk;
+}
+
+/* Get current aclk rate that specified by state */
+unsigned long komeda_crtc_get_aclk(struct komeda_crtc_state *kcrtc_st)
+{
+ struct drm_crtc *crtc = kcrtc_st->base.crtc;
+ struct komeda_dev *mdev = crtc->dev->dev_private;
+ unsigned long pxlclk = kcrtc_st->base.adjusted_mode.crtc_clock * 1000;
+ unsigned long min_aclk;
+
+ min_aclk = komeda_calc_min_aclk_rate(to_kcrtc(crtc), pxlclk);
+
+ return clk_round_rate(mdev->aclk, min_aclk);
+}
+
static enum drm_mode_status
komeda_crtc_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *m)
{
struct komeda_dev *mdev = crtc->dev->dev_private;
struct komeda_crtc *kcrtc = to_kcrtc(crtc);
struct komeda_pipeline *master = kcrtc->master;
- long mode_clk, pxlclk;
+ unsigned long min_pxlclk, min_aclk;
if (m->flags & DRM_MODE_FLAG_INTERLACE)
return MODE_NO_INTERLACE;
- mode_clk = m->clock * 1000;
- pxlclk = clk_round_rate(master->pxlclk, mode_clk);
- if (pxlclk != mode_clk) {
- DRM_DEBUG_ATOMIC("pxlclk doesn't support %ld Hz\n", mode_clk);
+ min_pxlclk = m->clock * 1000;
+ if (master->dual_link)
+ min_pxlclk /= 2;
+
+ if (min_pxlclk != clk_round_rate(master->pxlclk, min_pxlclk)) {
+ DRM_DEBUG_ATOMIC("pxlclk doesn't support %lu Hz\n", min_pxlclk);
return MODE_NOCLOCK;
}
- /* main engine clock must be faster than pxlclk*/
- if (clk_round_rate(mdev->aclk, mode_clk) < pxlclk) {
- DRM_DEBUG_ATOMIC("engine clk can't satisfy the requirement of %s-clk: %ld.\n",
- m->name, pxlclk);
+ min_aclk = komeda_calc_min_aclk_rate(to_kcrtc(crtc), min_pxlclk);
+ if (clk_round_rate(mdev->aclk, min_aclk) < min_aclk) {
+ DRM_DEBUG_ATOMIC("engine clk can't satisfy the requirement of %s-clk: %lu.\n",
+ m->name, min_pxlclk);
return MODE_CLOCK_HIGH;
}
@@ -377,10 +452,22 @@ static bool komeda_crtc_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *adjusted_mode)
{
struct komeda_crtc *kcrtc = to_kcrtc(crtc);
- struct komeda_pipeline *master = kcrtc->master;
- long mode_clk = m->clock * 1000;
+ unsigned long clk_rate;
+
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
+ /* In dual link half the horizontal settings */
+ if (kcrtc->master->dual_link) {
+ adjusted_mode->crtc_clock /= 2;
+ adjusted_mode->crtc_hdisplay /= 2;
+ adjusted_mode->crtc_hsync_start /= 2;
+ adjusted_mode->crtc_hsync_end /= 2;
+ adjusted_mode->crtc_htotal /= 2;
+ }
- adjusted_mode->clock = clk_round_rate(master->pxlclk, mode_clk) / 1000;
+ clk_rate = adjusted_mode->crtc_clock * 1000;
+ /* crtc_clock will be used as the komeda output pixel clock */
+ adjusted_mode->crtc_clock = clk_round_rate(kcrtc->master->pxlclk,
+ clk_rate) / 1000;
return true;
}
@@ -488,10 +575,8 @@ int komeda_kms_setup_crtcs(struct komeda_kms_dev *kms,
else
sprintf(str, "None");
- DRM_INFO("crtc%d: master(pipe-%d) slave(%s) output: %s.\n",
- kms->n_crtcs, master->id, str,
- master->of_output_dev ?
- master->of_output_dev->full_name : "None");
+ DRM_INFO("CRTC-%d: master(pipe-%d) slave(%s).\n",
+ kms->n_crtcs, master->id, str);
kms->n_crtcs++;
}
@@ -535,6 +620,8 @@ static int komeda_crtc_add(struct komeda_kms_dev *kms,
crtc->port = kcrtc->master->of_output_port;
+ drm_crtc_enable_color_mgmt(crtc, 0, true, KOMEDA_COLOR_LUT_SIZE);
+
return err;
}
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
index 5a118984de33..1d767473ba8a 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
@@ -8,7 +8,9 @@
#include <linux/iommu.h>
#include <linux/of_device.h>
#include <linux/of_graph.h>
+#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/dma-mapping.h>
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
@@ -24,12 +26,18 @@ static int komeda_register_show(struct seq_file *sf, void *x)
struct komeda_dev *mdev = sf->private;
int i;
+ seq_puts(sf, "\n====== Komeda register dump =========\n");
+
+ pm_runtime_get_sync(mdev->dev);
+
if (mdev->funcs->dump_register)
mdev->funcs->dump_register(mdev, sf);
for (i = 0; i < mdev->n_pipelines; i++)
komeda_pipeline_dump_register(mdev->pipelines[i], sf);
+ pm_runtime_put(mdev->dev);
+
return 0;
}
@@ -55,6 +63,8 @@ static void komeda_debugfs_init(struct komeda_dev *mdev)
mdev->debugfs_root = debugfs_create_dir("komeda", NULL);
debugfs_create_file("register", 0444, mdev->debugfs_root,
mdev, &komeda_register_fops);
+ debugfs_create_x16("err_verbosity", 0664, mdev->debugfs_root,
+ &mdev->err_verbosity);
}
#endif
@@ -90,9 +100,19 @@ config_id_show(struct device *dev, struct device_attribute *attr, char *buf)
}
static DEVICE_ATTR_RO(config_id);
+static ssize_t
+aclk_hz_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct komeda_dev *mdev = dev_to_mdev(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%lu\n", clk_get_rate(mdev->aclk));
+}
+static DEVICE_ATTR_RO(aclk_hz);
+
static struct attribute *komeda_sysfs_entries[] = {
&dev_attr_core_id.attr,
&dev_attr_config_id.attr,
+ &dev_attr_aclk_hz.attr,
NULL,
};
@@ -100,33 +120,27 @@ static struct attribute_group komeda_sysfs_attr_group = {
.attrs = komeda_sysfs_entries,
};
-static int komeda_parse_pipe_dt(struct komeda_dev *mdev, struct device_node *np)
+static int komeda_parse_pipe_dt(struct komeda_pipeline *pipe)
{
- struct komeda_pipeline *pipe;
+ struct device_node *np = pipe->of_node;
struct clk *clk;
- u32 pipe_id;
- int ret = 0;
-
- ret = of_property_read_u32(np, "reg", &pipe_id);
- if (ret != 0 || pipe_id >= mdev->n_pipelines)
- return -EINVAL;
-
- pipe = mdev->pipelines[pipe_id];
clk = of_clk_get_by_name(np, "pxclk");
if (IS_ERR(clk)) {
- DRM_ERROR("get pxclk for pipeline %d failed!\n", pipe_id);
+ DRM_ERROR("get pxclk for pipeline %d failed!\n", pipe->id);
return PTR_ERR(clk);
}
pipe->pxlclk = clk;
/* enum ports */
- pipe->of_output_dev =
+ pipe->of_output_links[0] =
of_graph_get_remote_node(np, KOMEDA_OF_PORT_OUTPUT, 0);
+ pipe->of_output_links[1] =
+ of_graph_get_remote_node(np, KOMEDA_OF_PORT_OUTPUT, 1);
pipe->of_output_port =
of_graph_get_port_by_id(np, KOMEDA_OF_PORT_OUTPUT);
- pipe->of_node = np;
+ pipe->dual_link = pipe->of_output_links[0] && pipe->of_output_links[1];
return 0;
}
@@ -135,7 +149,9 @@ static int komeda_parse_dt(struct device *dev, struct komeda_dev *mdev)
{
struct platform_device *pdev = to_platform_device(dev);
struct device_node *child, *np = dev->of_node;
- int ret;
+ struct komeda_pipeline *pipe;
+ u32 pipe_id = U32_MAX;
+ int ret = -1;
mdev->irq = platform_get_irq(pdev, 0);
if (mdev->irq < 0) {
@@ -143,37 +159,50 @@ static int komeda_parse_dt(struct device *dev, struct komeda_dev *mdev)
return mdev->irq;
}
+ /* Get the optional framebuffer memory resource */
+ ret = of_reserved_mem_device_init(dev);
+ if (ret && ret != -ENODEV)
+ return ret;
+ ret = 0;
+
for_each_available_child_of_node(np, child) {
- if (of_node_cmp(child->name, "pipeline") == 0) {
- ret = komeda_parse_pipe_dt(mdev, child);
- if (ret) {
- DRM_ERROR("parse pipeline dt error!\n");
- of_node_put(child);
- break;
+ if (of_node_name_eq(child, "pipeline")) {
+ of_property_read_u32(child, "reg", &pipe_id);
+ if (pipe_id >= mdev->n_pipelines) {
+ DRM_WARN("Skip the redundant DT node: pipeline-%u.\n",
+ pipe_id);
+ continue;
}
+ mdev->pipelines[pipe_id]->of_node = of_node_get(child);
+ }
+ }
+
+ for (pipe_id = 0; pipe_id < mdev->n_pipelines; pipe_id++) {
+ pipe = mdev->pipelines[pipe_id];
+
+ if (!pipe->of_node) {
+ DRM_ERROR("Pipeline-%d doesn't have a DT node.\n",
+ pipe->id);
+ return -EINVAL;
}
+ ret = komeda_parse_pipe_dt(pipe);
+ if (ret)
+ return ret;
}
- return ret;
+ return 0;
}
struct komeda_dev *komeda_dev_create(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
- const struct komeda_product_data *product;
+ komeda_identify_func komeda_identify;
struct komeda_dev *mdev;
- struct resource *io_res;
int err = 0;
- product = of_device_get_match_data(dev);
- if (!product)
- return ERR_PTR(-ENODEV);
-
- io_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!io_res) {
- DRM_ERROR("No registers defined.\n");
+ komeda_identify = of_device_get_match_data(dev);
+ if (!komeda_identify)
return ERR_PTR(-ENODEV);
- }
mdev = devm_kzalloc(dev, sizeof(*mdev), GFP_KERNEL);
if (!mdev)
@@ -182,7 +211,7 @@ struct komeda_dev *komeda_dev_create(struct device *dev)
mutex_init(&mdev->lock);
mdev->dev = dev;
- mdev->reg_base = devm_ioremap_resource(dev, io_res);
+ mdev->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mdev->reg_base)) {
DRM_ERROR("Map register space failed.\n");
err = PTR_ERR(mdev->reg_base);
@@ -200,13 +229,11 @@ struct komeda_dev *komeda_dev_create(struct device *dev)
clk_prepare_enable(mdev->aclk);
- mdev->funcs = product->identify(mdev->reg_base, &mdev->chip);
- if (!komeda_product_match(mdev, product->product_id)) {
- DRM_ERROR("DT configured %x mismatch with real HW %x.\n",
- product->product_id,
- MALIDP_CORE_ID_PRODUCT_ID(mdev->chip.core_id));
+ mdev->funcs = komeda_identify(mdev->reg_base, &mdev->chip);
+ if (!mdev->funcs) {
+ DRM_ERROR("Failed to identify the HW.\n");
err = -ENODEV;
- goto err_cleanup;
+ goto disable_clk;
}
DRM_INFO("Found ARM Mali-D%x version r%dp%d\n",
@@ -219,19 +246,19 @@ struct komeda_dev *komeda_dev_create(struct device *dev)
err = mdev->funcs->enum_resources(mdev);
if (err) {
DRM_ERROR("enumerate display resource failed.\n");
- goto err_cleanup;
+ goto disable_clk;
}
err = komeda_parse_dt(dev, mdev);
if (err) {
DRM_ERROR("parse device tree failed.\n");
- goto err_cleanup;
+ goto disable_clk;
}
err = komeda_assemble_pipelines(mdev);
if (err) {
DRM_ERROR("assemble display pipelines failed.\n");
- goto err_cleanup;
+ goto disable_clk;
}
dev->dma_parms = &mdev->dma_parms;
@@ -241,13 +268,7 @@ struct komeda_dev *komeda_dev_create(struct device *dev)
if (!mdev->iommu)
DRM_INFO("continue without IOMMU support!\n");
- if (mdev->iommu && mdev->funcs->connect_iommu) {
- err = mdev->funcs->connect_iommu(mdev);
- if (err) {
- mdev->iommu = NULL;
- goto err_cleanup;
- }
- }
+ clk_disable_unprepare(mdev->aclk);
err = sysfs_create_group(&dev->kobj, &komeda_sysfs_attr_group);
if (err) {
@@ -255,12 +276,16 @@ struct komeda_dev *komeda_dev_create(struct device *dev)
goto err_cleanup;
}
+ mdev->err_verbosity = KOMEDA_DEV_PRINT_ERR_EVENTS;
+
#ifdef CONFIG_DEBUG_FS
komeda_debugfs_init(mdev);
#endif
return mdev;
+disable_clk:
+ clk_disable_unprepare(mdev->aclk);
err_cleanup:
komeda_dev_destroy(mdev);
return ERR_PTR(err);
@@ -278,9 +303,8 @@ void komeda_dev_destroy(struct komeda_dev *mdev)
debugfs_remove_recursive(mdev->debugfs_root);
#endif
- if (mdev->iommu && mdev->funcs->disconnect_iommu)
- mdev->funcs->disconnect_iommu(mdev);
- mdev->iommu = NULL;
+ if (mdev->aclk)
+ clk_prepare_enable(mdev->aclk);
for (i = 0; i < mdev->n_pipelines; i++) {
komeda_pipeline_destroy(mdev, mdev->pipelines[i]);
@@ -289,6 +313,8 @@ void komeda_dev_destroy(struct komeda_dev *mdev)
mdev->n_pipelines = 0;
+ of_reserved_mem_device_release(dev);
+
if (funcs && funcs->cleanup)
funcs->cleanup(mdev);
@@ -305,3 +331,29 @@ void komeda_dev_destroy(struct komeda_dev *mdev)
devm_kfree(dev, mdev);
}
+
+int komeda_dev_resume(struct komeda_dev *mdev)
+{
+ clk_prepare_enable(mdev->aclk);
+
+ mdev->funcs->enable_irq(mdev);
+
+ if (mdev->iommu && mdev->funcs->connect_iommu)
+ if (mdev->funcs->connect_iommu(mdev))
+ DRM_ERROR("connect iommu failed.\n");
+
+ return 0;
+}
+
+int komeda_dev_suspend(struct komeda_dev *mdev)
+{
+ if (mdev->iommu && mdev->funcs->disconnect_iommu)
+ if (mdev->funcs->disconnect_iommu(mdev))
+ DRM_ERROR("disconnect iommu failed.\n");
+
+ mdev->funcs->disable_irq(mdev);
+
+ clk_disable_unprepare(mdev->aclk);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.h b/drivers/gpu/drm/arm/display/komeda/komeda_dev.h
index d1c86b6174c8..ce27f2f27c24 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_dev.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_dev.h
@@ -20,6 +20,8 @@
#define KOMEDA_EVENT_OVR BIT_ULL(4)
#define KOMEDA_EVENT_EOW BIT_ULL(5)
#define KOMEDA_EVENT_MODE BIT_ULL(6)
+#define KOMEDA_EVENT_FULL BIT_ULL(7)
+#define KOMEDA_EVENT_EMPTY BIT_ULL(8)
#define KOMEDA_ERR_TETO BIT_ULL(14)
#define KOMEDA_ERR_TEMR BIT_ULL(15)
@@ -40,10 +42,24 @@
#define KOMEDA_ERR_TTNG BIT_ULL(30)
#define KOMEDA_ERR_TTF BIT_ULL(31)
-/* malidp device id */
-enum {
- MALI_D71 = 0,
-};
+#define KOMEDA_ERR_EVENTS \
+ (KOMEDA_EVENT_URUN | KOMEDA_EVENT_IBSY | KOMEDA_EVENT_OVR |\
+ KOMEDA_ERR_TETO | KOMEDA_ERR_TEMR | KOMEDA_ERR_TITR |\
+ KOMEDA_ERR_CPE | KOMEDA_ERR_CFGE | KOMEDA_ERR_AXIE |\
+ KOMEDA_ERR_ACE0 | KOMEDA_ERR_ACE1 | KOMEDA_ERR_ACE2 |\
+ KOMEDA_ERR_ACE3 | KOMEDA_ERR_DRIFTTO | KOMEDA_ERR_FRAMETO |\
+ KOMEDA_ERR_ZME | KOMEDA_ERR_MERR | KOMEDA_ERR_TCF |\
+ KOMEDA_ERR_TTNG | KOMEDA_ERR_TTF)
+
+#define KOMEDA_WARN_EVENTS \
+ (KOMEDA_ERR_CSCE | KOMEDA_EVENT_FULL | KOMEDA_EVENT_EMPTY)
+
+#define KOMEDA_INFO_EVENTS (0 \
+ | KOMEDA_EVENT_VSYNC \
+ | KOMEDA_EVENT_FLIP \
+ | KOMEDA_EVENT_EOW \
+ | KOMEDA_EVENT_MODE \
+ )
/* pipeline DT ports */
enum {
@@ -58,12 +74,6 @@ struct komeda_chip_info {
u32 bus_width;
};
-struct komeda_product_data {
- u32 product_id;
- const struct komeda_dev_funcs *(*identify)(u32 __iomem *reg,
- struct komeda_chip_info *info);
-};
-
struct komeda_dev;
struct komeda_events {
@@ -191,6 +201,23 @@ struct komeda_dev {
/** @debugfs_root: root directory of komeda debugfs */
struct dentry *debugfs_root;
+ /**
+ * @err_verbosity: bitmask for how much extra info to print on error
+ *
+ * See KOMEDA_DEV_* macros for details. Low byte contains the debug
+ * level categories, the high byte contains extra debug options.
+ */
+ u16 err_verbosity;
+ /* Print a single line per error per frame with error events. */
+#define KOMEDA_DEV_PRINT_ERR_EVENTS BIT(0)
+ /* Print a single line per warning per frame with error events. */
+#define KOMEDA_DEV_PRINT_WARN_EVENTS BIT(1)
+ /* Print a single line per info event per frame with error events. */
+#define KOMEDA_DEV_PRINT_INFO_EVENTS BIT(2)
+ /* Dump DRM state on an error or warning event. */
+#define KOMEDA_DEV_PRINT_DUMP_STATE_ON_EVENT BIT(8)
+ /* Disable rate limiting of event prints (normally one per commit) */
+#define KOMEDA_DEV_PRINT_DISABLE_RATELIMIT BIT(12)
};
static inline bool
@@ -199,6 +226,9 @@ komeda_product_match(struct komeda_dev *mdev, u32 target)
return MALIDP_CORE_ID_PRODUCT_ID(mdev->chip.core_id) == target;
}
+typedef const struct komeda_dev_funcs *
+(*komeda_identify_func)(u32 __iomem *reg, struct komeda_chip_info *chip);
+
const struct komeda_dev_funcs *
d71_identify(u32 __iomem *reg, struct komeda_chip_info *chip);
@@ -207,4 +237,9 @@ void komeda_dev_destroy(struct komeda_dev *mdev);
struct komeda_dev *dev_to_mdev(struct device *dev);
+void komeda_print_events(struct komeda_events *evts, struct drm_device *dev);
+
+int komeda_dev_resume(struct komeda_dev *mdev);
+int komeda_dev_suspend(struct komeda_dev *mdev);
+
#endif /*_KOMEDA_DEV_H_*/
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
index cfa5068d9d1e..ea5cd1e17304 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
@@ -8,6 +8,7 @@
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/component.h>
+#include <linux/pm_runtime.h>
#include <drm/drm_of.h>
#include "komeda_dev.h"
#include "komeda_kms.h"
@@ -32,6 +33,12 @@ static void komeda_unbind(struct device *dev)
return;
komeda_kms_detach(mdrv->kms);
+
+ if (pm_runtime_enabled(dev))
+ pm_runtime_disable(dev);
+ else
+ komeda_dev_suspend(mdrv->mdev);
+
komeda_dev_destroy(mdrv->mdev);
dev_set_drvdata(dev, NULL);
@@ -53,6 +60,10 @@ static int komeda_bind(struct device *dev)
goto free_mdrv;
}
+ pm_runtime_enable(dev);
+ if (!pm_runtime_enabled(dev))
+ komeda_dev_resume(mdrv->mdev);
+
mdrv->kms = komeda_kms_attach(mdrv->mdev);
if (IS_ERR(mdrv->kms)) {
err = PTR_ERR(mdrv->kms);
@@ -64,6 +75,11 @@ static int komeda_bind(struct device *dev)
return 0;
destroy_mdev:
+ if (pm_runtime_enabled(dev))
+ pm_runtime_disable(dev);
+ else
+ komeda_dev_suspend(mdrv->mdev);
+
komeda_dev_destroy(mdrv->mdev);
free_mdrv:
@@ -83,11 +99,12 @@ static int compare_of(struct device *dev, void *data)
static void komeda_add_slave(struct device *master,
struct component_match **match,
- struct device_node *np, int port)
+ struct device_node *np,
+ u32 port, u32 endpoint)
{
struct device_node *remote;
- remote = of_graph_get_remote_node(np, port, 0);
+ remote = of_graph_get_remote_node(np, port, endpoint);
if (remote) {
drm_of_component_match_add(master, match, compare_of, remote);
of_node_put(remote);
@@ -108,7 +125,8 @@ static int komeda_platform_probe(struct platform_device *pdev)
continue;
/* add connector */
- komeda_add_slave(dev, &match, child, KOMEDA_OF_PORT_OUTPUT);
+ komeda_add_slave(dev, &match, child, KOMEDA_OF_PORT_OUTPUT, 0);
+ komeda_add_slave(dev, &match, child, KOMEDA_OF_PORT_OUTPUT, 1);
}
return component_master_add_with_match(dev, &komeda_master_ops, match);
@@ -120,27 +138,63 @@ static int komeda_platform_remove(struct platform_device *pdev)
return 0;
}
-static const struct komeda_product_data komeda_products[] = {
- [MALI_D71] = {
- .product_id = MALIDP_D71_PRODUCT_ID,
- .identify = d71_identify,
- },
-};
-
static const struct of_device_id komeda_of_match[] = {
- { .compatible = "arm,mali-d71", .data = &komeda_products[MALI_D71], },
+ { .compatible = "arm,mali-d71", .data = d71_identify, },
+ { .compatible = "arm,mali-d32", .data = d71_identify, },
{},
};
MODULE_DEVICE_TABLE(of, komeda_of_match);
+static int komeda_rt_pm_suspend(struct device *dev)
+{
+ struct komeda_drv *mdrv = dev_get_drvdata(dev);
+
+ return komeda_dev_suspend(mdrv->mdev);
+}
+
+static int komeda_rt_pm_resume(struct device *dev)
+{
+ struct komeda_drv *mdrv = dev_get_drvdata(dev);
+
+ return komeda_dev_resume(mdrv->mdev);
+}
+
+static int __maybe_unused komeda_pm_suspend(struct device *dev)
+{
+ struct komeda_drv *mdrv = dev_get_drvdata(dev);
+ int res;
+
+ res = drm_mode_config_helper_suspend(&mdrv->kms->base);
+
+ if (!pm_runtime_status_suspended(dev))
+ komeda_dev_suspend(mdrv->mdev);
+
+ return res;
+}
+
+static int __maybe_unused komeda_pm_resume(struct device *dev)
+{
+ struct komeda_drv *mdrv = dev_get_drvdata(dev);
+
+ if (!pm_runtime_status_suspended(dev))
+ komeda_dev_resume(mdrv->mdev);
+
+ return drm_mode_config_helper_resume(&mdrv->kms->base);
+}
+
+static const struct dev_pm_ops komeda_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(komeda_pm_suspend, komeda_pm_resume)
+ SET_RUNTIME_PM_OPS(komeda_rt_pm_suspend, komeda_rt_pm_resume, NULL)
+};
+
static struct platform_driver komeda_platform_driver = {
.probe = komeda_platform_probe,
.remove = komeda_platform_remove,
.driver = {
.name = "komeda",
.of_match_table = komeda_of_match,
- .pm = NULL,
+ .pm = &komeda_pm_ops,
},
};
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_event.c b/drivers/gpu/drm/arm/display/komeda/komeda_event.c
new file mode 100644
index 000000000000..53f944e66dfc
--- /dev/null
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_event.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * (C) COPYRIGHT 2019 ARM Limited. All rights reserved.
+ * Author: James.Qian.Wang <james.qian.wang@arm.com>
+ *
+ */
+#include <drm/drm_atomic.h>
+#include <drm/drm_print.h>
+
+#include "komeda_dev.h"
+
+struct komeda_str {
+ char *str;
+ u32 sz;
+ u32 len;
+};
+
+/* return 0 on success, < 0 on no space.
+ */
+__printf(2, 3)
+static int komeda_sprintf(struct komeda_str *str, const char *fmt, ...)
+{
+ va_list args;
+ int num, free_sz;
+ int err;
+
+ free_sz = str->sz - str->len - 1;
+ if (free_sz <= 0)
+ return -ENOSPC;
+
+ va_start(args, fmt);
+
+ num = vsnprintf(str->str + str->len, free_sz, fmt, args);
+
+ va_end(args);
+
+ if (num < free_sz) {
+ str->len += num;
+ err = 0;
+ } else {
+ str->len = str->sz - 1;
+ err = -ENOSPC;
+ }
+
+ return err;
+}
+
+static void evt_sprintf(struct komeda_str *str, u64 evt, const char *msg)
+{
+ if (evt)
+ komeda_sprintf(str, msg);
+}
+
+static void evt_str(struct komeda_str *str, u64 events)
+{
+ if (events == 0ULL) {
+ komeda_sprintf(str, "None");
+ return;
+ }
+
+ evt_sprintf(str, events & KOMEDA_EVENT_VSYNC, "VSYNC|");
+ evt_sprintf(str, events & KOMEDA_EVENT_FLIP, "FLIP|");
+ evt_sprintf(str, events & KOMEDA_EVENT_EOW, "EOW|");
+ evt_sprintf(str, events & KOMEDA_EVENT_MODE, "OP-MODE|");
+
+ evt_sprintf(str, events & KOMEDA_EVENT_URUN, "UNDERRUN|");
+ evt_sprintf(str, events & KOMEDA_EVENT_OVR, "OVERRUN|");
+
+ /* GLB error */
+ evt_sprintf(str, events & KOMEDA_ERR_MERR, "MERR|");
+ evt_sprintf(str, events & KOMEDA_ERR_FRAMETO, "FRAMETO|");
+
+ /* DOU error */
+ evt_sprintf(str, events & KOMEDA_ERR_DRIFTTO, "DRIFTTO|");
+ evt_sprintf(str, events & KOMEDA_ERR_FRAMETO, "FRAMETO|");
+ evt_sprintf(str, events & KOMEDA_ERR_TETO, "TETO|");
+ evt_sprintf(str, events & KOMEDA_ERR_CSCE, "CSCE|");
+
+ /* LPU errors or events */
+ evt_sprintf(str, events & KOMEDA_EVENT_IBSY, "IBSY|");
+ evt_sprintf(str, events & KOMEDA_EVENT_EMPTY, "EMPTY|");
+ evt_sprintf(str, events & KOMEDA_EVENT_FULL, "FULL|");
+ evt_sprintf(str, events & KOMEDA_ERR_AXIE, "AXIE|");
+ evt_sprintf(str, events & KOMEDA_ERR_ACE0, "ACE0|");
+ evt_sprintf(str, events & KOMEDA_ERR_ACE1, "ACE1|");
+ evt_sprintf(str, events & KOMEDA_ERR_ACE2, "ACE2|");
+ evt_sprintf(str, events & KOMEDA_ERR_ACE3, "ACE3|");
+
+ /* LPU TBU errors*/
+ evt_sprintf(str, events & KOMEDA_ERR_TCF, "TCF|");
+ evt_sprintf(str, events & KOMEDA_ERR_TTNG, "TTNG|");
+ evt_sprintf(str, events & KOMEDA_ERR_TITR, "TITR|");
+ evt_sprintf(str, events & KOMEDA_ERR_TEMR, "TEMR|");
+ evt_sprintf(str, events & KOMEDA_ERR_TTF, "TTF|");
+
+ /* CU errors*/
+ evt_sprintf(str, events & KOMEDA_ERR_CPE, "COPROC|");
+ evt_sprintf(str, events & KOMEDA_ERR_ZME, "ZME|");
+ evt_sprintf(str, events & KOMEDA_ERR_CFGE, "CFGE|");
+ evt_sprintf(str, events & KOMEDA_ERR_TEMR, "TEMR|");
+
+ if (str->len > 0 && (str->str[str->len - 1] == '|')) {
+ str->str[str->len - 1] = 0;
+ str->len--;
+ }
+}
+
+static bool is_new_frame(struct komeda_events *a)
+{
+ return (a->pipes[0] | a->pipes[1]) &
+ (KOMEDA_EVENT_FLIP | KOMEDA_EVENT_EOW);
+}
+
+void komeda_print_events(struct komeda_events *evts, struct drm_device *dev)
+{
+ u64 print_evts = 0;
+ static bool en_print = true;
+ struct komeda_dev *mdev = dev->dev_private;
+ u16 const err_verbosity = mdev->err_verbosity;
+ u64 evts_mask = evts->global | evts->pipes[0] | evts->pipes[1];
+
+ /* reduce the same msg print, only print the first evt for one frame */
+ if (evts->global || is_new_frame(evts))
+ en_print = true;
+ if (!(err_verbosity & KOMEDA_DEV_PRINT_DISABLE_RATELIMIT) && !en_print)
+ return;
+
+ if (err_verbosity & KOMEDA_DEV_PRINT_ERR_EVENTS)
+ print_evts |= KOMEDA_ERR_EVENTS;
+ if (err_verbosity & KOMEDA_DEV_PRINT_WARN_EVENTS)
+ print_evts |= KOMEDA_WARN_EVENTS;
+ if (err_verbosity & KOMEDA_DEV_PRINT_INFO_EVENTS)
+ print_evts |= KOMEDA_INFO_EVENTS;
+
+ if (evts_mask & print_evts) {
+ char msg[256];
+ struct komeda_str str;
+ struct drm_printer p = drm_info_printer(dev->dev);
+
+ str.str = msg;
+ str.sz = sizeof(msg);
+ str.len = 0;
+
+ komeda_sprintf(&str, "gcu: ");
+ evt_str(&str, evts->global);
+ komeda_sprintf(&str, ", pipes[0]: ");
+ evt_str(&str, evts->pipes[0]);
+ komeda_sprintf(&str, ", pipes[1]: ");
+ evt_str(&str, evts->pipes[1]);
+
+ DRM_ERROR("err detect: %s\n", msg);
+ if ((err_verbosity & KOMEDA_DEV_PRINT_DUMP_STATE_ON_EVENT) &&
+ (evts_mask & (KOMEDA_ERR_EVENTS | KOMEDA_WARN_EVENTS)))
+ drm_state_dump(dev, &p);
+
+ en_print = false;
+ }
+}
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c b/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c
index cd4d9f53ddef..c9a1edb9a000 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c
@@ -35,6 +35,25 @@ komeda_get_format_caps(struct komeda_format_caps_table *table,
return NULL;
}
+u32 komeda_get_afbc_format_bpp(const struct drm_format_info *info, u64 modifier)
+{
+ u32 bpp;
+
+ switch (info->format) {
+ case DRM_FORMAT_YUV420_8BIT:
+ bpp = 12;
+ break;
+ case DRM_FORMAT_YUV420_10BIT:
+ bpp = 15;
+ break;
+ default:
+ bpp = info->cpp[0] * 8;
+ break;
+ }
+
+ return bpp;
+}
+
/* Two assumptions
* 1. RGB always has YTR
* 2. Tiled RGB always has SC
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h b/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h
index 3631910d33b5..32273cf18f7c 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h
@@ -97,6 +97,9 @@ const struct komeda_format_caps *
komeda_get_format_caps(struct komeda_format_caps_table *table,
u32 fourcc, u64 modifier);
+u32 komeda_get_afbc_format_bpp(const struct drm_format_info *info,
+ u64 modifier);
+
u32 *komeda_get_layer_fourcc_list(struct komeda_format_caps_table *table,
u32 layer_type, u32 *n_fmts);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c
index 3b0a70ed6aa0..1b01a625f40e 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c
@@ -43,7 +43,7 @@ komeda_fb_afbc_size_check(struct komeda_fb *kfb, struct drm_file *file,
struct drm_framebuffer *fb = &kfb->base;
const struct drm_format_info *info = fb->format;
struct drm_gem_object *obj;
- u32 alignment_w = 0, alignment_h = 0, alignment_header, n_blocks;
+ u32 alignment_w = 0, alignment_h = 0, alignment_header, n_blocks, bpp;
u64 min_size;
obj = drm_gem_object_lookup(file, mode_cmd->handles[0]);
@@ -88,8 +88,9 @@ komeda_fb_afbc_size_check(struct komeda_fb *kfb, struct drm_file *file,
kfb->offset_payload = ALIGN(n_blocks * AFBC_HEADER_SIZE,
alignment_header);
+ bpp = komeda_get_afbc_format_bpp(info, fb->modifier);
kfb->afbc_size = kfb->offset_payload + n_blocks *
- ALIGN(info->cpp[0] * AFBC_SUPERBLK_PIXELS,
+ ALIGN(bpp * AFBC_SUPERBLK_PIXELS / 8,
AFBC_SUPERBLK_ALIGNMENT);
min_size = kfb->afbc_size + fb->offsets[0];
if (min_size > obj->size) {
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
index 419a8b0e5de8..442d4656150a 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
@@ -14,6 +14,7 @@
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_irq.h>
+#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include "komeda_dev.h"
@@ -47,6 +48,8 @@ static irqreturn_t komeda_kms_irq_handler(int irq, void *data)
memset(&evts, 0, sizeof(evts));
status = mdev->funcs->irq_handler(mdev, &evts);
+ komeda_print_events(&evts, drm);
+
/* Notify the crtc to handle the events */
for (i = 0; i < kms->n_crtcs; i++)
komeda_crtc_handle_event(&kms->crtcs[i], &evts);
@@ -55,16 +58,13 @@ static irqreturn_t komeda_kms_irq_handler(int irq, void *data)
}
static struct drm_driver komeda_kms_driver = {
- .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC |
- DRIVER_PRIME | DRIVER_HAVE_IRQ,
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.lastclose = drm_fb_helper_lastclose,
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = komeda_gem_cma_dumb_create,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = drm_gem_prime_export,
- .gem_prime_import = drm_gem_prime_import,
.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
.gem_prime_vmap = drm_gem_cma_prime_vmap,
@@ -84,7 +84,8 @@ static void komeda_kms_commit_tail(struct drm_atomic_state *old_state)
drm_atomic_helper_commit_modeset_disables(dev, old_state);
- drm_atomic_helper_commit_planes(dev, old_state, 0);
+ drm_atomic_helper_commit_planes(dev, old_state,
+ DRM_PLANE_COMMIT_ACTIVE_ONLY);
drm_atomic_helper_commit_modeset_enables(dev, old_state);
@@ -146,7 +147,6 @@ static int komeda_crtc_normalize_zpos(struct drm_crtc *crtc,
struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(crtc_st);
struct komeda_plane_state *kplane_st;
struct drm_plane_state *plane_st;
- struct drm_framebuffer *fb;
struct drm_plane *plane;
struct list_head zorder_list;
int order = 0, err;
@@ -172,7 +172,6 @@ static int komeda_crtc_normalize_zpos(struct drm_crtc *crtc,
list_for_each_entry(kplane_st, &zorder_list, zlist_node) {
plane_st = &kplane_st->base;
- fb = plane_st->fb;
plane = plane_st->plane;
plane_st->normalized_zpos = order++;
@@ -205,7 +204,7 @@ static int komeda_kms_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
- struct drm_crtc_state *old_crtc_st, *new_crtc_st;
+ struct drm_crtc_state *new_crtc_st;
int i, err;
err = drm_atomic_helper_check_modeset(dev, state);
@@ -216,7 +215,7 @@ static int komeda_kms_check(struct drm_device *dev,
* so need to add all affected_planes (even unchanged) to
* drm_atomic_state.
*/
- for_each_oldnew_crtc_in_state(state, crtc, old_crtc_st, new_crtc_st, i) {
+ for_each_new_crtc_in_state(state, crtc, new_crtc_st, i) {
err = drm_atomic_add_affected_planes(state, crtc);
if (err)
return err;
@@ -307,24 +306,28 @@ struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev)
komeda_kms_irq_handler, IRQF_SHARED,
drm->driver->name, drm);
if (err)
- goto cleanup_mode_config;
-
- err = mdev->funcs->enable_irq(mdev);
- if (err)
- goto cleanup_mode_config;
+ goto free_component_binding;
drm->irq_enabled = true;
+ drm_kms_helper_poll_init(drm);
+
err = drm_dev_register(drm, 0);
if (err)
- goto cleanup_mode_config;
+ goto free_interrupts;
return kms;
-cleanup_mode_config:
+free_interrupts:
+ drm_kms_helper_poll_fini(drm);
drm->irq_enabled = false;
+free_component_binding:
+ component_unbind_all(mdev->dev, drm);
+cleanup_mode_config:
drm_mode_config_cleanup(drm);
komeda_kms_cleanup_private_objs(kms);
+ drm->dev_private = NULL;
+ drm_dev_put(drm);
free_kms:
kfree(kms);
return ERR_PTR(err);
@@ -335,12 +338,13 @@ void komeda_kms_detach(struct komeda_kms_dev *kms)
struct drm_device *drm = &kms->base;
struct komeda_dev *mdev = drm->dev_private;
- drm->irq_enabled = false;
- mdev->funcs->disable_irq(mdev);
drm_dev_unregister(drm);
+ drm_kms_helper_poll_fini(drm);
+ drm_atomic_helper_shutdown(drm);
+ drm->irq_enabled = false;
component_unbind_all(mdev->dev, drm);
- komeda_kms_cleanup_private_objs(kms);
drm_mode_config_cleanup(drm);
+ komeda_kms_cleanup_private_objs(kms);
drm->dev_private = NULL;
drm_dev_put(drm);
}
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.h b/drivers/gpu/drm/arm/display/komeda/komeda_kms.h
index 8c89fc245b83..456f3c435719 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.h
@@ -14,8 +14,6 @@
#include <drm/drm_device.h>
#include <drm/drm_writeback.h>
#include <drm/drm_print.h>
-#include <video/videomode.h>
-#include <video/display_timing.h>
/**
* struct komeda_plane - komeda instance of drm_plane
@@ -168,7 +166,9 @@ static inline bool has_flip_h(u32 rot)
return !!(rotation & DRM_MODE_REFLECT_X);
}
-unsigned long komeda_calc_aclk(struct komeda_crtc_state *kcrtc_st);
+void komeda_crtc_get_color_config(struct drm_crtc_state *crtc_st,
+ u32 *color_depths, u32 *color_formats);
+unsigned long komeda_crtc_get_aclk(struct komeda_crtc_state *kcrtc_st);
int komeda_kms_setup_crtcs(struct komeda_kms_dev *kms, struct komeda_dev *mdev);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
index 78e44d9e1520..452e505a1fd3 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
@@ -54,7 +54,8 @@ void komeda_pipeline_destroy(struct komeda_dev *mdev,
clk_put(pipe->pxlclk);
- of_node_put(pipe->of_output_dev);
+ of_node_put(pipe->of_output_links[0]);
+ of_node_put(pipe->of_output_links[1]);
of_node_put(pipe->of_output_port);
of_node_put(pipe->of_node);
@@ -246,9 +247,15 @@ static void komeda_pipeline_dump(struct komeda_pipeline *pipe)
struct komeda_component *c;
int id;
- DRM_INFO("Pipeline-%d: n_layers: %d, n_scalers: %d, output: %s\n",
+ DRM_INFO("Pipeline-%d: n_layers: %d, n_scalers: %d, output: %s.\n",
pipe->id, pipe->n_layers, pipe->n_scalers,
- pipe->of_output_dev ? pipe->of_output_dev->full_name : "none");
+ pipe->dual_link ? "dual-link" : "single-link");
+ DRM_INFO(" output_link[0]: %s.\n",
+ pipe->of_output_links[0] ?
+ pipe->of_output_links[0]->full_name : "none");
+ DRM_INFO(" output_link[1]: %s.\n",
+ pipe->of_output_links[1] ?
+ pipe->of_output_links[1]->full_name : "none");
dp_for_each_set_bit(id, pipe->avail_comps) {
c = komeda_pipeline_get_component(pipe, id);
@@ -305,6 +312,12 @@ static void komeda_pipeline_assemble(struct komeda_pipeline *pipe)
layer->right = komeda_get_layer_split_right_layer(pipe, layer);
}
+
+ if (pipe->dual_link && !pipe->ctrlr->supports_dual_link) {
+ pipe->dual_link = false;
+ DRM_WARN("PIPE-%d doesn't support dual-link, ignore DT dual-link configuration.\n",
+ pipe->id);
+ }
}
/* if pipeline_A accept another pipeline_B's component as input, treat
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
index a90bcbb3cb23..ac8725e24853 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
@@ -11,6 +11,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include "malidp_utils.h"
+#include "komeda_color_mgmt.h"
#define KOMEDA_MAX_PIPELINES 2
#define KOMEDA_PIPELINE_MAX_LAYERS 4
@@ -227,6 +228,8 @@ struct komeda_layer {
/* accepted h/v input range before rotation */
struct malidp_range hsize_in, vsize_in;
u32 layer_type; /* RICH, SIMPLE or WB */
+ u32 line_sz;
+ u32 yuv_line_sz; /* maximum line size for YUV422 and YUV420 */
u32 supported_rots;
/* komeda supports layer split which splits a whole image to two parts
* left and right and handle them by two individual layer processors
@@ -323,7 +326,10 @@ struct komeda_improc {
struct komeda_improc_state {
struct komeda_component_state base;
+ u8 color_format, color_depth;
u16 hsize, vsize;
+ u32 fgamma_coeffs[KOMEDA_N_GAMMA_COEFFS];
+ u32 ctm_coeffs[KOMEDA_N_CTM_COEFFS];
};
/* display timing controller */
@@ -389,6 +395,18 @@ struct komeda_pipeline {
int id;
/** @avail_comps: available components mask of pipeline */
u32 avail_comps;
+ /**
+ * @standalone_disabled_comps:
+ *
+ * When disable the pipeline, some components can not be disabled
+ * together with others, but need a sparated and standalone disable.
+ * The standalone_disabled_comps are the components which need to be
+ * disabled standalone, and this concept also introduce concept of
+ * two phase.
+ * phase 1: for disabling the common components.
+ * phase 2: for disabling the standalong_disabled_comps.
+ */
+ u32 standalone_disabled_comps;
/** @n_layers: the number of layer on @layers */
int n_layers;
/** @layers: the pipeline layers */
@@ -416,8 +434,10 @@ struct komeda_pipeline {
struct device_node *of_node;
/** @of_output_port: pipeline output port */
struct device_node *of_output_port;
- /** @of_output_dev: output connector device node */
- struct device_node *of_output_dev;
+ /** @of_output_links: output connector device nodes */
+ struct device_node *of_output_links[2];
+ /** @dual_link: true if of_output_links[0] and [1] are both valid */
+ bool dual_link;
};
/**
@@ -480,6 +500,7 @@ void komeda_pipeline_dump_register(struct komeda_pipeline *pipe,
struct seq_file *sf);
/* component APIs */
+extern __printf(10, 11)
struct komeda_component *
komeda_component_add(struct komeda_pipeline *pipe,
size_t comp_sz, u32 id, u32 hw_id,
@@ -532,7 +553,7 @@ int komeda_release_unclaimed_resources(struct komeda_pipeline *pipe,
struct komeda_pipeline_state *
komeda_pipeline_get_old_state(struct komeda_pipeline *pipe,
struct drm_atomic_state *state);
-void komeda_pipeline_disable(struct komeda_pipeline *pipe,
+bool komeda_pipeline_disable(struct komeda_pipeline *pipe,
struct drm_atomic_state *old_state);
void komeda_pipeline_update(struct komeda_pipeline *pipe,
struct drm_atomic_state *old_state);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
index 950235af1e79..8f32ae7c25d0 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
@@ -285,6 +285,7 @@ komeda_layer_check_cfg(struct komeda_layer *layer,
struct komeda_data_flow_cfg *dflow)
{
u32 src_x, src_y, src_w, src_h;
+ u32 line_sz, max_line_sz;
if (!komeda_fb_is_layer_supported(kfb, layer->layer_type, dflow->rot))
return -EINVAL;
@@ -314,6 +315,22 @@ komeda_layer_check_cfg(struct komeda_layer *layer,
return -EINVAL;
}
+ if (drm_rotation_90_or_270(dflow->rot))
+ line_sz = dflow->in_h;
+ else
+ line_sz = dflow->in_w;
+
+ if (kfb->base.format->hsub > 1)
+ max_line_sz = layer->yuv_line_sz;
+ else
+ max_line_sz = layer->line_sz;
+
+ if (line_sz > max_line_sz) {
+ DRM_DEBUG_ATOMIC("Required line_sz: %d exceeds the max size %d\n",
+ line_sz, max_line_sz);
+ return -EINVAL;
+ }
+
return 0;
}
@@ -473,7 +490,7 @@ komeda_scaler_check_cfg(struct komeda_scaler *scaler,
err = pipe->funcs->downscaling_clk_check(pipe,
&kcrtc_st->base.adjusted_mode,
- komeda_calc_aclk(kcrtc_st), dflow);
+ komeda_crtc_get_aclk(kcrtc_st), dflow);
if (err) {
DRM_DEBUG_ATOMIC("aclk can't satisfy the clock requirement of the downscaling\n");
return err;
@@ -564,8 +581,8 @@ komeda_splitter_validate(struct komeda_splitter *splitter,
}
if (!in_range(&splitter->vsize, dflow->in_h)) {
- DRM_DEBUG_ATOMIC("split in_in: %d exceed the acceptable range.\n",
- dflow->in_w);
+ DRM_DEBUG_ATOMIC("split in_h: %d exceeds the acceptable range.\n",
+ dflow->in_h);
return -EINVAL;
}
@@ -743,6 +760,7 @@ komeda_improc_validate(struct komeda_improc *improc,
struct komeda_data_flow_cfg *dflow)
{
struct drm_crtc *crtc = kcrtc_st->base.crtc;
+ struct drm_crtc_state *crtc_st = &kcrtc_st->base;
struct komeda_component_state *c_st;
struct komeda_improc_state *st;
@@ -756,6 +774,40 @@ komeda_improc_validate(struct komeda_improc *improc,
st->hsize = dflow->in_w;
st->vsize = dflow->in_h;
+ if (drm_atomic_crtc_needs_modeset(crtc_st)) {
+ u32 output_depths, output_formats;
+ u32 avail_depths, avail_formats;
+
+ komeda_crtc_get_color_config(crtc_st, &output_depths,
+ &output_formats);
+
+ avail_depths = output_depths & improc->supported_color_depths;
+ if (avail_depths == 0) {
+ DRM_DEBUG_ATOMIC("No available color depths, conn depths: 0x%x & display: 0x%x\n",
+ output_depths,
+ improc->supported_color_depths);
+ return -EINVAL;
+ }
+
+ avail_formats = output_formats &
+ improc->supported_color_formats;
+ if (!avail_formats) {
+ DRM_DEBUG_ATOMIC("No available color_formats, conn formats 0x%x & display: 0x%x\n",
+ output_formats,
+ improc->supported_color_formats);
+ return -EINVAL;
+ }
+
+ st->color_depth = __fls(avail_depths);
+ st->color_format = BIT(__ffs(avail_formats));
+ }
+
+ if (kcrtc_st->base.color_mgmt_changed) {
+ drm_lut_to_fgamma_coeffs(kcrtc_st->base.gamma_lut,
+ st->fgamma_coeffs);
+ drm_ctm_to_coeffs(kcrtc_st->base.ctm, st->ctm_coeffs);
+ }
+
komeda_component_add_input(&st->base, &dflow->input, 0);
komeda_component_set_output(&dflow->input, &improc->base, 0);
@@ -1218,7 +1270,17 @@ int komeda_release_unclaimed_resources(struct komeda_pipeline *pipe,
return 0;
}
-void komeda_pipeline_disable(struct komeda_pipeline *pipe,
+/* Since standalong disabled components must be disabled separately and in the
+ * last, So a complete disable operation may needs to call pipeline_disable
+ * twice (two phase disabling).
+ * Phase 1: disable the common components, flush it.
+ * Phase 2: disable the standalone disabled components, flush it.
+ *
+ * RETURNS:
+ * true: disable is not complete, needs a phase 2 disable.
+ * false: disable is complete.
+ */
+bool komeda_pipeline_disable(struct komeda_pipeline *pipe,
struct drm_atomic_state *old_state)
{
struct komeda_pipeline_state *old;
@@ -1228,9 +1290,14 @@ void komeda_pipeline_disable(struct komeda_pipeline *pipe,
old = komeda_pipeline_get_old_state(pipe, old_state);
- disabling_comps = old->active_comps;
- DRM_DEBUG_ATOMIC("PIPE%d: disabling_comps: 0x%x.\n",
- pipe->id, disabling_comps);
+ disabling_comps = old->active_comps &
+ (~pipe->standalone_disabled_comps);
+ if (!disabling_comps)
+ disabling_comps = old->active_comps &
+ pipe->standalone_disabled_comps;
+
+ DRM_DEBUG_ATOMIC("PIPE%d: active_comps: 0x%x, disabling_comps: 0x%x.\n",
+ pipe->id, old->active_comps, disabling_comps);
dp_for_each_set_bit(id, disabling_comps) {
c = komeda_pipeline_get_component(pipe, id);
@@ -1248,6 +1315,13 @@ void komeda_pipeline_disable(struct komeda_pipeline *pipe,
c->funcs->disable(c);
}
+
+ /* Update the pipeline state, if there are components that are still
+ * active, return true for calling the phase 2 disable.
+ */
+ old->active_comps &= ~disabling_comps;
+
+ return old->active_comps ? true : false;
}
void komeda_pipeline_update(struct komeda_pipeline *pipe,
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_plane.c b/drivers/gpu/drm/arm/display/komeda/komeda_plane.c
index c095af154216..98e915e325dd 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_plane.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_plane.c
@@ -158,7 +158,7 @@ static void komeda_plane_reset(struct drm_plane *plane)
static struct drm_plane_state *
komeda_plane_atomic_duplicate_state(struct drm_plane *plane)
{
- struct komeda_plane_state *new, *old;
+ struct komeda_plane_state *new;
if (WARN_ON(!plane->state))
return NULL;
@@ -169,8 +169,6 @@ komeda_plane_atomic_duplicate_state(struct drm_plane *plane)
__drm_atomic_helper_plane_duplicate_state(plane, &new->base);
- old = to_kplane_st(plane->state);
-
return &new->base;
}
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
index 617e1f7b8472..e465cc4879c9 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
@@ -43,9 +43,8 @@ komeda_wb_encoder_atomic_check(struct drm_encoder *encoder,
struct komeda_data_flow_cfg dflow;
int err;
- if (!writeback_job || !writeback_job->fb) {
+ if (!writeback_job)
return 0;
- }
if (!crtc_st->active) {
DRM_DEBUG_ATOMIC("Cannot write the composition result out on a inactive CRTC.\n");
@@ -142,13 +141,14 @@ static int komeda_wb_connector_add(struct komeda_kms_dev *kms,
struct komeda_dev *mdev = kms->base.dev_private;
struct komeda_wb_connector *kwb_conn;
struct drm_writeback_connector *wb_conn;
+ struct drm_display_info *info;
u32 *formats, n_formats = 0;
int err;
if (!kcrtc->master->wb_layer)
return 0;
- kwb_conn = kzalloc(sizeof(*wb_conn), GFP_KERNEL);
+ kwb_conn = kzalloc(sizeof(*kwb_conn), GFP_KERNEL);
if (!kwb_conn)
return -ENOMEM;
@@ -166,11 +166,17 @@ static int komeda_wb_connector_add(struct komeda_kms_dev *kms,
&komeda_wb_encoder_helper_funcs,
formats, n_formats);
komeda_put_fourcc_list(formats);
- if (err)
+ if (err) {
+ kfree(kwb_conn);
return err;
+ }
drm_connector_helper_add(&wb_conn->base, &komeda_wb_conn_helper_funcs);
+ info = &kwb_conn->base.base.display_info;
+ info->bpc = __fls(kcrtc->master->improc->supported_color_depths);
+ info->color_formats = kcrtc->master->improc->supported_color_formats;
+
kcrtc->wb_conn = kwb_conn;
return 0;
diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c
index a3efa28436ea..af67fefed38d 100644
--- a/drivers/gpu/drm/arm/hdlcd_crtc.c
+++ b/drivers/gpu/drm/arm/hdlcd_crtc.c
@@ -9,7 +9,12 @@
* Implementation of a CRTC class for the HDLCD driver.
*/
-#include <drm/drmP.h>
+#include <linux/clk.h>
+#include <linux/of_graph.h>
+#include <linux/platform_data/simplefb.h>
+
+#include <video/videomode.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
@@ -19,10 +24,7 @@
#include <drm/drm_of.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
-#include <linux/clk.h>
-#include <linux/of_graph.h>
-#include <linux/platform_data/simplefb.h>
-#include <video/videomode.h>
+#include <drm/drm_vblank.h>
#include "hdlcd_drv.h"
#include "hdlcd_regs.h"
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index 8fc0b884c428..2e053815b54a 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -14,21 +14,26 @@
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/console.h>
+#include <linux/dma-mapping.h>
#include <linux/list.h>
#include <linux/of_graph.h>
#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_debugfs.h>
+#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_irq.h>
#include <drm/drm_modeset_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "hdlcd_drv.h"
#include "hdlcd_regs.h"
@@ -229,9 +234,7 @@ static int hdlcd_debugfs_init(struct drm_minor *minor)
DEFINE_DRM_GEM_CMA_FOPS(fops);
static struct drm_driver hdlcd_driver = {
- .driver_features = DRIVER_GEM |
- DRIVER_MODESET | DRIVER_PRIME |
- DRIVER_ATOMIC,
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.irq_handler = hdlcd_irq,
.irq_preinstall = hdlcd_irq_preinstall,
.irq_postinstall = hdlcd_irq_postinstall,
@@ -242,8 +245,6 @@ static struct drm_driver hdlcd_driver = {
.dumb_create = drm_gem_cma_dumb_create,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = drm_gem_prime_export,
- .gem_prime_import = drm_gem_prime_import,
.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
.gem_prime_vmap = drm_gem_cma_prime_vmap,
diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c
index db4451260fff..587d94798f5c 100644
--- a/drivers/gpu/drm/arm/malidp_crtc.c
+++ b/drivers/gpu/drm/arm/malidp_crtc.c
@@ -6,14 +6,17 @@
* ARM Mali DP500/DP550/DP650 driver (crtc operations)
*/
-#include <drm/drmP.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+
+#include <video/videomode.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include <linux/clk.h>
-#include <linux/pm_runtime.h>
-#include <video/videomode.h>
+#include <drm/drm_vblank.h>
#include "malidp_drv.h"
#include "malidp_hw.h"
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index f25ec4382277..37d92a06318e 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -15,17 +15,19 @@
#include <linux/pm_runtime.h>
#include <linux/debugfs.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_probe_helper.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_modeset_helper.h>
#include <drm/drm_of.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "malidp_drv.h"
#include "malidp_mw.h"
@@ -366,7 +368,7 @@ malidp_verify_afbc_framebuffer(struct drm_device *dev, struct drm_file *file,
return false;
}
-struct drm_framebuffer *
+static struct drm_framebuffer *
malidp_fb_create(struct drm_device *dev, struct drm_file *file,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
@@ -489,9 +491,9 @@ void malidp_error(struct malidp_drm *malidp,
spin_unlock_irqrestore(&malidp->errors_lock, irqflags);
}
-void malidp_error_stats_dump(const char *prefix,
- struct malidp_error_stats error_stats,
- struct seq_file *m)
+static void malidp_error_stats_dump(const char *prefix,
+ struct malidp_error_stats error_stats,
+ struct seq_file *m)
{
seq_printf(m, "[%s] num_errors : %d\n", prefix,
error_stats.num_errors);
@@ -561,15 +563,12 @@ static int malidp_debugfs_init(struct drm_minor *minor)
#endif //CONFIG_DEBUG_FS
static struct drm_driver malidp_driver = {
- .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC |
- DRIVER_PRIME,
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = malidp_dumb_create,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = drm_gem_prime_export,
- .gem_prime_import = drm_gem_prime_import,
.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
.gem_prime_vmap = drm_gem_cma_prime_vmap,
@@ -666,7 +665,7 @@ static ssize_t core_id_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "%08x\n", malidp->core_id);
}
-DEVICE_ATTR_RO(core_id);
+static DEVICE_ATTR_RO(core_id);
static int malidp_init_sysfs(struct device *dev)
{
@@ -818,6 +817,12 @@ static int malidp_bind(struct device *dev)
malidp->core_id = version;
+ ret = of_property_read_u32(dev->of_node,
+ "arm,malidp-arqos-value",
+ &hwdev->arqos_value);
+ if (ret)
+ hwdev->arqos_value = 0x0;
+
/* set the number of lines used for output of RGB data */
ret = of_property_read_u8_array(dev->of_node,
"arm,malidp-output-port-lines",
diff --git a/drivers/gpu/drm/arm/malidp_drv.h b/drivers/gpu/drm/arm/malidp_drv.h
index 0a639af8337e..cdfddfabf2d1 100644
--- a/drivers/gpu/drm/arm/malidp_drv.h
+++ b/drivers/gpu/drm/arm/malidp_drv.h
@@ -9,12 +9,13 @@
#ifndef __MALIDP_DRV_H__
#define __MALIDP_DRV_H__
-#include <drm/drm_writeback.h>
-#include <drm/drm_encoder.h>
#include <linux/mutex.h>
#include <linux/wait.h>
#include <linux/spinlock.h>
-#include <drm/drmP.h>
+
+#include <drm/drm_writeback.h>
+#include <drm/drm_encoder.h>
+
#include "malidp_hw.h"
#define MALIDP_CONFIG_VALID_INIT 0
diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c
index 50af399d7f6f..ca570b135478 100644
--- a/drivers/gpu/drm/arm/malidp_hw.c
+++ b/drivers/gpu/drm/arm/malidp_hw.c
@@ -9,12 +9,17 @@
*/
#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/types.h>
#include <linux/io.h>
-#include <drm/drmP.h>
+
#include <video/videomode.h>
#include <video/display_timing.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_vblank.h>
+#include <drm/drm_print.h>
+
#include "malidp_drv.h"
#include "malidp_hw.h"
#include "malidp_mw.h"
@@ -374,6 +379,15 @@ static void malidp500_modeset(struct malidp_hw_device *hwdev, struct videomode *
malidp_hw_setbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC);
else
malidp_hw_clearbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC);
+
+ /*
+ * Program the RQoS register to avoid high resolutions flicker
+ * issue on the LS1028A.
+ */
+ if (hwdev->arqos_value) {
+ val = hwdev->arqos_value;
+ malidp_hw_setbits(hwdev, val, MALIDP500_RQOS_QUALITY);
+ }
}
int malidp_format_get_bpp(u32 fmt)
@@ -385,6 +399,7 @@ int malidp_format_get_bpp(u32 fmt)
switch (fmt) {
case DRM_FORMAT_VUY101010:
bpp = 30;
+ break;
case DRM_FORMAT_YUV420_10BIT:
bpp = 15;
break;
@@ -1309,7 +1324,7 @@ static irqreturn_t malidp_se_irq(int irq, void *arg)
break;
case MW_RESTART:
drm_writeback_signal_completion(&malidp->mw_connector, 0);
- /* fall through to a new start */
+ /* fall through - to a new start */
case MW_START:
/* writeback started, need to emulate one-shot mode */
hw->disable_memwrite(hwdev);
diff --git a/drivers/gpu/drm/arm/malidp_hw.h b/drivers/gpu/drm/arm/malidp_hw.h
index 968a65eed371..e4c36bc90bda 100644
--- a/drivers/gpu/drm/arm/malidp_hw.h
+++ b/drivers/gpu/drm/arm/malidp_hw.h
@@ -251,6 +251,9 @@ struct malidp_hw_device {
/* size of memory used for rotating layers, up to two banks available */
u32 rotation_memory[2];
+
+ /* priority level of RQOS register used for driven the ARQOS signal */
+ u32 arqos_value;
};
static inline u32 malidp_hw_read(struct malidp_hw_device *hwdev, u32 reg)
diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c
index 2e812525025d..7d0e7b031e44 100644
--- a/drivers/gpu/drm/arm/malidp_mw.c
+++ b/drivers/gpu/drm/arm/malidp_mw.c
@@ -5,13 +5,14 @@
*
* ARM Mali DP Writeback connector implementation
*/
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_probe_helper.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_cma_helper.h>
-#include <drm/drmP.h>
+#include <drm/drm_probe_helper.h>
#include <drm/drm_writeback.h>
#include "malidp_drv.h"
@@ -55,7 +56,7 @@ malidp_mw_connector_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-const struct drm_connector_helper_funcs malidp_mw_connector_helper_funcs = {
+static const struct drm_connector_helper_funcs malidp_mw_connector_helper_funcs = {
.get_modes = malidp_mw_connector_get_modes,
.mode_valid = malidp_mw_connector_mode_valid,
};
@@ -130,7 +131,7 @@ malidp_mw_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_framebuffer *fb;
int i, n_planes;
- if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
+ if (!conn_state->writeback_job)
return 0;
fb = conn_state->writeback_job->fb;
@@ -247,7 +248,7 @@ void malidp_mw_atomic_commit(struct drm_device *drm,
mw_state = to_mw_state(conn_state);
- if (conn_state->writeback_job && conn_state->writeback_job->fb) {
+ if (conn_state->writeback_job) {
struct drm_framebuffer *fb = conn_state->writeback_job->fb;
DRM_DEV_DEBUG_DRIVER(drm->dev,
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index 488375bd133d..37715cc6064e 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -7,11 +7,13 @@
*/
#include <linux/iommu.h>
+#include <linux/platform_device.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
@@ -510,7 +512,7 @@ static int malidp_de_plane_check(struct drm_plane *plane,
int i, ret;
unsigned int block_w, block_h;
- if (!state->crtc || !state->fb)
+ if (!state->crtc || WARN_ON(!state->fb))
return 0;
fb = state->fb;
diff --git a/drivers/gpu/drm/arm/malidp_regs.h b/drivers/gpu/drm/arm/malidp_regs.h
index 993031542fa1..514c50dcb74d 100644
--- a/drivers/gpu/drm/arm/malidp_regs.h
+++ b/drivers/gpu/drm/arm/malidp_regs.h
@@ -210,6 +210,16 @@
#define MALIDP500_CONFIG_VALID 0x00f00
#define MALIDP500_CONFIG_ID 0x00fd4
+/*
+ * The quality of service (QoS) register on the DP500. RQOS register values
+ * are driven by the ARQOS signal, using AXI transacations, dependent on the
+ * FIFO input level.
+ * The RQOS register can also set QoS levels for:
+ * - RED_ARQOS @ A 4-bit signal value for close to underflow conditions
+ * - GREEN_ARQOS @ A 4-bit signal value for normal conditions
+ */
+#define MALIDP500_RQOS_QUALITY 0x00500
+
/* register offsets and bits specific to DP550/DP650 */
#define MALIDP550_ADDR_SPACE_SIZE 0x10000
#define MALIDP550_DE_CONTROL 0x00010
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index d44fca4e1655..c2b92acd1e9a 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -3,15 +3,19 @@
* Copyright (C) 2012 Russell King
* Rewritten from the dovefb driver, and Armada510 manuals.
*/
+
#include <linux/clk.h>
#include <linux/component.h>
+#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
-#include <drm/drmP.h>
+
#include <drm/drm_atomic.h>
-#include <drm/drm_probe_helper.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+
#include "armada_crtc.h"
#include "armada_drm.h"
#include "armada_fb.h"
diff --git a/drivers/gpu/drm/armada/armada_debugfs.c b/drivers/gpu/drm/armada/armada_debugfs.c
index dc3716dbb2c0..c6fc2f1d58e9 100644
--- a/drivers/gpu/drm/armada/armada_debugfs.c
+++ b/drivers/gpu/drm/armada/armada_debugfs.c
@@ -3,11 +3,15 @@
* Copyright (C) 2012 Russell King
* Rewritten from the dovefb driver, and Armada510 manuals.
*/
+
#include <linux/ctype.h>
-#include <linux/debugfs.h>
#include <linux/module.h>
#include <linux/seq_file.h>
-#include <drm/drmP.h>
+#include <linux/uaccess.h>
+
+#include <drm/drm_debugfs.h>
+#include <drm/drm_file.h>
+
#include "armada_crtc.h"
#include "armada_drm.h"
diff --git a/drivers/gpu/drm/armada/armada_drm.h b/drivers/gpu/drm/armada/armada_drm.h
index c7794c8bdd90..a11bdaccbb33 100644
--- a/drivers/gpu/drm/armada/armada_drm.h
+++ b/drivers/gpu/drm/armada/armada_drm.h
@@ -8,11 +8,14 @@
#include <linux/kfifo.h>
#include <linux/io.h>
#include <linux/workqueue.h>
-#include <drm/drmP.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_mm.h>
struct armada_crtc;
struct armada_gem_object;
struct clk;
+struct drm_display_mode;
struct drm_fb_helper;
static inline void
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 521464f08ccd..197dca3fc84c 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -2,14 +2,22 @@
/*
* Copyright (C) 2012 Russell King
*/
+
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/module.h>
#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_prime.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_of.h>
+#include <drm/drm_vblank.h>
+
#include "armada_crtc.h"
#include "armada_drm.h"
#include "armada_gem.h"
@@ -40,8 +48,7 @@ static struct drm_driver armada_drm_driver = {
.name = "armada-drm",
.desc = "Armada SoC DRM",
.date = "20120730",
- .driver_features = DRIVER_GEM | DRIVER_MODESET |
- DRIVER_PRIME | DRIVER_ATOMIC,
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.ioctls = armada_ioctls,
.fops = &armada_drm_fops,
};
diff --git a/drivers/gpu/drm/armada/armada_fb.c b/drivers/gpu/drm/armada/armada_fb.c
index de030cb0aa90..426ca383d696 100644
--- a/drivers/gpu/drm/armada/armada_fb.c
+++ b/drivers/gpu/drm/armada/armada_fb.c
@@ -2,9 +2,12 @@
/*
* Copyright (C) 2012 Russell King
*/
+
#include <drm/drm_modeset_helper.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
+
#include "armada_drm.h"
#include "armada_fb.h"
#include "armada_gem.h"
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index 096aff530b01..ac8a78bfda03 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -3,17 +3,20 @@
* Copyright (C) 2012 Russell King
* Written from the i915 driver.
*/
+
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_fourcc.h>
+
#include "armada_crtc.h"
#include "armada_drm.h"
#include "armada_fb.h"
#include "armada_gem.h"
-static /*const*/ struct fb_ops armada_fb_ops = {
+static const struct fb_ops armada_fb_ops = {
.owner = THIS_MODULE,
DRM_FB_HELPER_DEFAULT_OPS,
.fb_fillrect = drm_fb_helper_cfb_fillrect,
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index 874b2968a866..976685f2939e 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -2,12 +2,17 @@
/*
* Copyright (C) 2012 Russell King
*/
+
#include <linux/dma-buf.h>
#include <linux/dma-mapping.h>
+#include <linux/mman.h>
#include <linux/shmem_fs.h>
+
+#include <drm/armada_drm.h>
+#include <drm/drm_prime.h>
+
#include "armada_drm.h"
#include "armada_gem.h"
-#include <drm/armada_drm.h>
#include "armada_ioctlP.h"
static vm_fault_t armada_gem_vm_fault(struct vm_fault *vmf)
@@ -456,16 +461,6 @@ static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
kfree(sgt);
}
-static void *armada_gem_dmabuf_no_kmap(struct dma_buf *buf, unsigned long n)
-{
- return NULL;
-}
-
-static void
-armada_gem_dmabuf_no_kunmap(struct dma_buf *buf, unsigned long n, void *addr)
-{
-}
-
static int
armada_gem_dmabuf_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
{
@@ -476,14 +471,11 @@ static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = {
.map_dma_buf = armada_gem_prime_map_dma_buf,
.unmap_dma_buf = armada_gem_prime_unmap_dma_buf,
.release = drm_gem_dmabuf_release,
- .map = armada_gem_dmabuf_no_kmap,
- .unmap = armada_gem_dmabuf_no_kunmap,
.mmap = armada_gem_dmabuf_mmap,
};
struct dma_buf *
-armada_gem_prime_export(struct drm_device *dev, struct drm_gem_object *obj,
- int flags)
+armada_gem_prime_export(struct drm_gem_object *obj, int flags)
{
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
@@ -492,7 +484,7 @@ armada_gem_prime_export(struct drm_device *dev, struct drm_gem_object *obj,
exp_info.flags = O_RDWR;
exp_info.priv = obj;
- return drm_gem_dmabuf_export(dev, &exp_info);
+ return drm_gem_dmabuf_export(obj->dev, &exp_info);
}
struct drm_gem_object *
diff --git a/drivers/gpu/drm/armada/armada_gem.h b/drivers/gpu/drm/armada/armada_gem.h
index 1dd80540b8ce..de04cc2c8f0e 100644
--- a/drivers/gpu/drm/armada/armada_gem.h
+++ b/drivers/gpu/drm/armada/armada_gem.h
@@ -32,8 +32,7 @@ struct armada_gem_object *armada_gem_alloc_private_object(struct drm_device *,
size_t);
int armada_gem_dumb_create(struct drm_file *, struct drm_device *,
struct drm_mode_create_dumb *);
-struct dma_buf *armada_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *obj, int flags);
+struct dma_buf *armada_gem_prime_export(struct drm_gem_object *obj, int flags);
struct drm_gem_object *armada_gem_prime_import(struct drm_device *,
struct dma_buf *);
int armada_gem_map_import(struct armada_gem_object *);
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
index e8060216b389..07f0da4d9ba1 100644
--- a/drivers/gpu/drm/armada/armada_overlay.c
+++ b/drivers/gpu/drm/armada/armada_overlay.c
@@ -3,12 +3,14 @@
* Copyright (C) 2012 Russell King
* Rewritten from the dovefb driver, and Armada510 manuals.
*/
-#include <drm/drmP.h>
+
+#include <drm/armada_drm.h>
#include <drm/drm_atomic.h>
-#include <drm/drm_atomic_uapi.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
-#include <drm/armada_drm.h>
+
#include "armada_crtc.h"
#include "armada_drm.h"
#include "armada_fb.h"
diff --git a/drivers/gpu/drm/armada/armada_plane.c b/drivers/gpu/drm/armada/armada_plane.c
index f08b4f37816d..e7cc2b343bcb 100644
--- a/drivers/gpu/drm/armada/armada_plane.c
+++ b/drivers/gpu/drm/armada/armada_plane.c
@@ -3,10 +3,12 @@
* Copyright (C) 2012 Russell King
* Rewritten from the dovefb driver, and Armada510 manuals.
*/
-#include <drm/drmP.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
+
#include "armada_crtc.h"
#include "armada_drm.h"
#include "armada_fb.h"
diff --git a/drivers/gpu/drm/armada/armada_trace.h b/drivers/gpu/drm/armada/armada_trace.h
index f03a56bda596..528f20fe3147 100644
--- a/drivers/gpu/drm/armada/armada_trace.h
+++ b/drivers/gpu/drm/armada/armada_trace.h
@@ -3,7 +3,10 @@
#define ARMADA_TRACE_H
#include <linux/tracepoint.h>
-#include <drm/drmP.h>
+
+struct drm_crtc;
+struct drm_framebuffer;
+struct drm_plane;
#undef TRACE_SYSTEM
#define TRACE_SYSTEM armada
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c b/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c
index 15db9e426ec4..2184b8be6fd4 100644
--- a/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c
@@ -215,7 +215,7 @@ static void aspeed_gfx_disable_vblank(struct drm_simple_display_pipe *pipe)
writel(reg | CRT_CTRL_VERTICAL_INTR_STS, priv->base + CRT_CTRL1);
}
-static struct drm_simple_display_pipe_funcs aspeed_gfx_funcs = {
+static const struct drm_simple_display_pipe_funcs aspeed_gfx_funcs = {
.enable = aspeed_gfx_pipe_enable,
.disable = aspeed_gfx_pipe_disable,
.update = aspeed_gfx_pipe_update,
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
index eeb22eccd1fc..ada2f6aca906 100644
--- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
@@ -194,8 +194,7 @@ static void aspeed_gfx_unload(struct drm_device *drm)
DEFINE_DRM_GEM_CMA_FOPS(fops);
static struct drm_driver aspeed_gfx_driver = {
- .driver_features = DRIVER_GEM | DRIVER_MODESET |
- DRIVER_PRIME | DRIVER_ATOMIC,
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.gem_create_object = drm_cma_gem_create_object_default_funcs,
.dumb_create = drm_gem_cma_dumb_create,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
diff --git a/drivers/gpu/drm/ast/Kconfig b/drivers/gpu/drm/ast/Kconfig
index 829620d5326c..fbcf2f45cef5 100644
--- a/drivers/gpu/drm/ast/Kconfig
+++ b/drivers/gpu/drm/ast/Kconfig
@@ -4,6 +4,8 @@ config DRM_AST
depends on DRM && PCI && MMU
select DRM_KMS_HELPER
select DRM_VRAM_HELPER
+ select DRM_TTM
+ select DRM_TTM_HELPER
help
Say yes for experimental AST GPU driver. Do not enable
this driver without having a working -modesetting,
diff --git a/drivers/gpu/drm/ast/Makefile b/drivers/gpu/drm/ast/Makefile
index b086dae17013..561f7c4199e4 100644
--- a/drivers/gpu/drm/ast/Makefile
+++ b/drivers/gpu/drm/ast/Makefile
@@ -3,6 +3,6 @@
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-ast-y := ast_drv.o ast_main.o ast_mode.o ast_fb.o ast_ttm.o ast_post.o ast_dp501.o
+ast-y := ast_drv.o ast_main.o ast_mode.o ast_ttm.o ast_post.o ast_dp501.o
obj-$(CONFIG_DRM_AST) := ast.o
diff --git a/drivers/gpu/drm/ast/ast_dp501.c b/drivers/gpu/drm/ast/ast_dp501.c
index 4c7375b45281..98cd69269263 100644
--- a/drivers/gpu/drm/ast/ast_dp501.c
+++ b/drivers/gpu/drm/ast/ast_dp501.c
@@ -1,8 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/delay.h>
#include <linux/firmware.h>
-#include <drm/drmP.h>
+#include <linux/module.h>
+
#include "ast_drv.h"
+
MODULE_FIRMWARE("ast_dp501_fw.bin");
static int ast_load_dp501_microcode(struct drm_device *dev)
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 3811997e78c4..30aa73a5d9b7 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -25,11 +25,14 @@
/*
* Authors: Dave Airlie <airlied@redhat.com>
*/
-#include <linux/module.h>
+
#include <linux/console.h>
+#include <linux/module.h>
+#include <linux/pci.h>
-#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_probe_helper.h>
#include "ast_drv.h"
@@ -82,9 +85,42 @@ static void ast_kick_out_firmware_fb(struct pci_dev *pdev)
static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
+ struct drm_device *dev;
+ int ret;
+
ast_kick_out_firmware_fb(pdev);
- return drm_get_pci_dev(pdev, ent, &driver);
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ dev = drm_dev_alloc(&driver, &pdev->dev);
+ if (IS_ERR(dev)) {
+ ret = PTR_ERR(dev);
+ goto err_pci_disable_device;
+ }
+
+ dev->pdev = pdev;
+ pci_set_drvdata(pdev, dev);
+
+ ret = ast_driver_load(dev, ent->driver_data);
+ if (ret)
+ goto err_drm_dev_put;
+
+ ret = drm_dev_register(dev, ent->driver_data);
+ if (ret)
+ goto err_ast_driver_unload;
+
+ return 0;
+
+err_ast_driver_unload:
+ ast_driver_unload(dev);
+err_drm_dev_put:
+ drm_dev_put(dev);
+err_pci_disable_device:
+ pci_disable_device(pdev);
+ return ret;
+
}
static void
@@ -92,36 +128,27 @@ ast_pci_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
- drm_put_dev(dev);
+ drm_dev_unregister(dev);
+ ast_driver_unload(dev);
+ drm_dev_put(dev);
}
-
-
static int ast_drm_freeze(struct drm_device *dev)
{
- drm_kms_helper_poll_disable(dev);
+ int error;
+ error = drm_mode_config_helper_suspend(dev);
+ if (error)
+ return error;
pci_save_state(dev->pdev);
-
- console_lock();
- ast_fbdev_set_suspend(dev, 1);
- console_unlock();
return 0;
}
static int ast_drm_thaw(struct drm_device *dev)
{
- int error = 0;
-
ast_post_gpu(dev);
- drm_mode_config_reset(dev);
- drm_helper_resume_force_mode(dev);
-
- console_lock();
- ast_fbdev_set_suspend(dev, 0);
- console_unlock();
- return error;
+ return drm_mode_config_helper_resume(dev);
}
static int ast_drm_resume(struct drm_device *dev)
@@ -134,8 +161,6 @@ static int ast_drm_resume(struct drm_device *dev)
ret = ast_drm_thaw(dev);
if (ret)
return ret;
-
- drm_kms_helper_poll_enable(dev);
return 0;
}
@@ -153,6 +178,7 @@ static int ast_pm_suspend(struct device *dev)
pci_set_power_state(pdev, PCI_D3hot);
return 0;
}
+
static int ast_pm_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
@@ -168,7 +194,6 @@ static int ast_pm_freeze(struct device *dev)
if (!ddev || !ddev->dev_private)
return -ENODEV;
return ast_drm_freeze(ddev);
-
}
static int ast_pm_thaw(struct device *dev)
@@ -203,16 +228,12 @@ static struct pci_driver ast_pci_driver = {
.driver.pm = &ast_pm_ops,
};
-static const struct file_operations ast_fops = {
- .owner = THIS_MODULE,
- DRM_VRAM_MM_FILE_OPERATIONS
-};
+DEFINE_DRM_GEM_FOPS(ast_fops);
static struct drm_driver driver = {
- .driver_features = DRIVER_MODESET | DRIVER_GEM,
-
- .load = ast_driver_load,
- .unload = ast_driver_unload,
+ .driver_features = DRIVER_ATOMIC |
+ DRIVER_GEM |
+ DRIVER_MODESET,
.fops = &ast_fops,
.name = DRIVER_NAME,
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 684e15e64a62..f5d8780776ae 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -28,17 +28,18 @@
#ifndef __AST_DRV_H__
#define __AST_DRV_H__
-#include <drm/drm_encoder.h>
-#include <drm/drm_fb_helper.h>
-
-#include <drm/drm_gem.h>
-#include <drm/drm_gem_vram_helper.h>
-
-#include <drm/drm_vram_mm_helper.h>
-
+#include <linux/types.h>
+#include <linux/io.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_mode.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_fb_helper.h>
+
#define DRIVER_AUTHOR "Dave Airlie"
#define DRIVER_NAME "ast"
@@ -81,7 +82,24 @@ enum ast_tx_chip {
#define AST_DRAM_4Gx16 7
#define AST_DRAM_8Gx16 8
-struct ast_fbdev;
+
+#define AST_MAX_HWC_WIDTH 64
+#define AST_MAX_HWC_HEIGHT 64
+
+#define AST_HWC_SIZE (AST_MAX_HWC_WIDTH * AST_MAX_HWC_HEIGHT * 2)
+#define AST_HWC_SIGNATURE_SIZE 32
+
+#define AST_DEFAULT_HWC_NUM 2
+
+/* define for signature structure */
+#define AST_HWC_SIGNATURE_CHECKSUM 0x00
+#define AST_HWC_SIGNATURE_SizeX 0x04
+#define AST_HWC_SIGNATURE_SizeY 0x08
+#define AST_HWC_SIGNATURE_X 0x0C
+#define AST_HWC_SIGNATURE_Y 0x10
+#define AST_HWC_SIGNATURE_HOTSPOTX 0x14
+#define AST_HWC_SIGNATURE_HOTSPOTY 0x18
+
struct ast_private {
struct drm_device *dev;
@@ -96,12 +114,16 @@ struct ast_private {
uint32_t mclk;
uint32_t vram_size;
- struct ast_fbdev *fbdev;
-
int fb_mtrr;
- struct drm_gem_object *cursor_cache;
- int next_cursor;
+ struct {
+ struct drm_gem_vram_object *gbo[AST_DEFAULT_HWC_NUM];
+ unsigned int next_index;
+ } cursor;
+
+ struct drm_plane primary_plane;
+ struct drm_plane cursor_plane;
+
bool support_wide_screen;
enum {
ast_use_p2a,
@@ -118,8 +140,6 @@ struct ast_private {
int ast_driver_load(struct drm_device *dev, unsigned long flags);
void ast_driver_unload(struct drm_device *dev);
-struct ast_gem_object;
-
#define AST_IO_AR_PORT_WRITE (0x40)
#define AST_IO_MISC_PORT_WRITE (0x42)
#define AST_IO_VGA_ENABLE_PORT (0x43)
@@ -202,23 +222,6 @@ static inline void ast_open_key(struct ast_private *ast)
#define AST_VIDMEM_DEFAULT_SIZE AST_VIDMEM_SIZE_8M
-#define AST_MAX_HWC_WIDTH 64
-#define AST_MAX_HWC_HEIGHT 64
-
-#define AST_HWC_SIZE (AST_MAX_HWC_WIDTH*AST_MAX_HWC_HEIGHT*2)
-#define AST_HWC_SIGNATURE_SIZE 32
-
-#define AST_DEFAULT_HWC_NUM 2
-/* define for signature structure */
-#define AST_HWC_SIGNATURE_CHECKSUM 0x00
-#define AST_HWC_SIGNATURE_SizeX 0x04
-#define AST_HWC_SIGNATURE_SizeY 0x08
-#define AST_HWC_SIGNATURE_X 0x0C
-#define AST_HWC_SIGNATURE_Y 0x10
-#define AST_HWC_SIGNATURE_HOTSPOTX 0x14
-#define AST_HWC_SIGNATURE_HOTSPOTY 0x18
-
-
struct ast_i2c_chan {
struct i2c_adapter adapter;
struct drm_device *dev;
@@ -239,24 +242,9 @@ struct ast_encoder {
struct drm_encoder base;
};
-struct ast_framebuffer {
- struct drm_framebuffer base;
- struct drm_gem_object *obj;
-};
-
-struct ast_fbdev {
- struct drm_fb_helper helper; /* must be first */
- struct ast_framebuffer afb;
- void *sysram;
- int size;
- int x1, y1, x2, y2; /* dirty rect */
- spinlock_t dirty_lock;
-};
-
#define to_ast_crtc(x) container_of(x, struct ast_crtc, base)
#define to_ast_connector(x) container_of(x, struct ast_connector, base)
#define to_ast_encoder(x) container_of(x, struct ast_encoder, base)
-#define to_ast_framebuffer(x) container_of(x, struct ast_framebuffer, base)
struct ast_vbios_stdtable {
u8 misc;
@@ -293,18 +281,19 @@ struct ast_vbios_mode_info {
const struct ast_vbios_enhtable *enh_table;
};
-extern int ast_mode_init(struct drm_device *dev);
-extern void ast_mode_fini(struct drm_device *dev);
+struct ast_crtc_state {
+ struct drm_crtc_state base;
+
+ /* Last known format of primary plane */
+ const struct drm_format_info *format;
+
+ struct ast_vbios_mode_info vbios_mode_info;
+};
-int ast_framebuffer_init(struct drm_device *dev,
- struct ast_framebuffer *ast_fb,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object *obj);
+#define to_ast_crtc_state(state) container_of(state, struct ast_crtc_state, base)
-int ast_fbdev_init(struct drm_device *dev);
-void ast_fbdev_fini(struct drm_device *dev);
-void ast_fbdev_set_suspend(struct drm_device *dev, int state);
-void ast_fbdev_set_base(struct ast_private *ast, unsigned long gpu_addr);
+extern int ast_mode_init(struct drm_device *dev);
+extern void ast_mode_fini(struct drm_device *dev);
#define AST_MM_ALIGN_SHIFT 4
#define AST_MM_ALIGN_MASK ((1 << AST_MM_ALIGN_SHIFT) - 1)
@@ -312,10 +301,6 @@ void ast_fbdev_set_base(struct ast_private *ast, unsigned long gpu_addr);
int ast_mm_init(struct ast_private *ast);
void ast_mm_fini(struct ast_private *ast);
-int ast_gem_create(struct drm_device *dev,
- u32 size, bool iskernel,
- struct drm_gem_object **obj);
-
/* ast post */
void ast_enable_vga(struct drm_device *dev);
void ast_enable_mmio(struct drm_device *dev);
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
deleted file mode 100644
index 8200b25dad16..000000000000
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ /dev/null
@@ -1,346 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- */
-/*
- * Authors: Dave Airlie <airlied@redhat.com>
- */
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/tty.h>
-#include <linux/sysrq.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-
-
-#include <drm/drmP.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_util.h>
-#include <drm/drm_crtc_helper.h>
-
-#include "ast_drv.h"
-
-static void ast_dirty_update(struct ast_fbdev *afbdev,
- int x, int y, int width, int height)
-{
- int i;
- struct drm_gem_vram_object *gbo;
- int src_offset, dst_offset;
- int bpp = afbdev->afb.base.format->cpp[0];
- int ret;
- u8 *dst;
- bool unmap = false;
- bool store_for_later = false;
- int x2, y2;
- unsigned long flags;
-
- gbo = drm_gem_vram_of_gem(afbdev->afb.obj);
-
- if (drm_can_sleep()) {
- /* We pin the BO so it won't be moved during the
- * update. The actual location, video RAM or system
- * memory, is not important.
- */
- ret = drm_gem_vram_pin(gbo, 0);
- if (ret) {
- if (ret != -EBUSY)
- return;
- store_for_later = true;
- }
- } else {
- store_for_later = true;
- }
-
- x2 = x + width - 1;
- y2 = y + height - 1;
- spin_lock_irqsave(&afbdev->dirty_lock, flags);
-
- if (afbdev->y1 < y)
- y = afbdev->y1;
- if (afbdev->y2 > y2)
- y2 = afbdev->y2;
- if (afbdev->x1 < x)
- x = afbdev->x1;
- if (afbdev->x2 > x2)
- x2 = afbdev->x2;
-
- if (store_for_later) {
- afbdev->x1 = x;
- afbdev->x2 = x2;
- afbdev->y1 = y;
- afbdev->y2 = y2;
- spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
- return;
- }
-
- afbdev->x1 = afbdev->y1 = INT_MAX;
- afbdev->x2 = afbdev->y2 = 0;
- spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
-
- dst = drm_gem_vram_kmap(gbo, false, NULL);
- if (IS_ERR(dst)) {
- DRM_ERROR("failed to kmap fb updates\n");
- goto out;
- } else if (!dst) {
- dst = drm_gem_vram_kmap(gbo, true, NULL);
- if (IS_ERR(dst)) {
- DRM_ERROR("failed to kmap fb updates\n");
- goto out;
- }
- unmap = true;
- }
-
- for (i = y; i <= y2; i++) {
- /* assume equal stride for now */
- src_offset = dst_offset =
- i * afbdev->afb.base.pitches[0] + (x * bpp);
- memcpy_toio(dst + dst_offset, afbdev->sysram + src_offset,
- (x2 - x + 1) * bpp);
- }
-
- if (unmap)
- drm_gem_vram_kunmap(gbo);
-
-out:
- drm_gem_vram_unpin(gbo);
-}
-
-static void ast_fillrect(struct fb_info *info,
- const struct fb_fillrect *rect)
-{
- struct ast_fbdev *afbdev = info->par;
- drm_fb_helper_sys_fillrect(info, rect);
- ast_dirty_update(afbdev, rect->dx, rect->dy, rect->width,
- rect->height);
-}
-
-static void ast_copyarea(struct fb_info *info,
- const struct fb_copyarea *area)
-{
- struct ast_fbdev *afbdev = info->par;
- drm_fb_helper_sys_copyarea(info, area);
- ast_dirty_update(afbdev, area->dx, area->dy, area->width,
- area->height);
-}
-
-static void ast_imageblit(struct fb_info *info,
- const struct fb_image *image)
-{
- struct ast_fbdev *afbdev = info->par;
- drm_fb_helper_sys_imageblit(info, image);
- ast_dirty_update(afbdev, image->dx, image->dy, image->width,
- image->height);
-}
-
-static struct fb_ops astfb_ops = {
- .owner = THIS_MODULE,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
- .fb_fillrect = ast_fillrect,
- .fb_copyarea = ast_copyarea,
- .fb_imageblit = ast_imageblit,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_blank = drm_fb_helper_blank,
- .fb_setcmap = drm_fb_helper_setcmap,
-};
-
-static int astfb_create_object(struct ast_fbdev *afbdev,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object **gobj_p)
-{
- struct drm_device *dev = afbdev->helper.dev;
- u32 size;
- struct drm_gem_object *gobj;
- int ret = 0;
-
- size = mode_cmd->pitches[0] * mode_cmd->height;
- ret = ast_gem_create(dev, size, true, &gobj);
- if (ret)
- return ret;
-
- *gobj_p = gobj;
- return ret;
-}
-
-static int astfb_create(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- struct ast_fbdev *afbdev =
- container_of(helper, struct ast_fbdev, helper);
- struct drm_device *dev = afbdev->helper.dev;
- struct drm_mode_fb_cmd2 mode_cmd;
- struct drm_framebuffer *fb;
- struct fb_info *info;
- int size, ret;
- void *sysram;
- struct drm_gem_object *gobj = NULL;
- mode_cmd.width = sizes->surface_width;
- mode_cmd.height = sizes->surface_height;
- mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7)/8);
-
- mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
- sizes->surface_depth);
-
- size = mode_cmd.pitches[0] * mode_cmd.height;
-
- ret = astfb_create_object(afbdev, &mode_cmd, &gobj);
- if (ret) {
- DRM_ERROR("failed to create fbcon backing object %d\n", ret);
- return ret;
- }
-
- sysram = vmalloc(size);
- if (!sysram)
- return -ENOMEM;
-
- info = drm_fb_helper_alloc_fbi(helper);
- if (IS_ERR(info)) {
- ret = PTR_ERR(info);
- goto out;
- }
- ret = ast_framebuffer_init(dev, &afbdev->afb, &mode_cmd, gobj);
- if (ret)
- goto out;
-
- afbdev->sysram = sysram;
- afbdev->size = size;
-
- fb = &afbdev->afb.base;
- afbdev->helper.fb = fb;
-
- info->fbops = &astfb_ops;
-
- info->apertures->ranges[0].base = pci_resource_start(dev->pdev, 0);
- info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
-
- drm_fb_helper_fill_info(info, &afbdev->helper, sizes);
-
- info->screen_base = sysram;
- info->screen_size = size;
-
- info->pixmap.flags = FB_PIXMAP_SYSTEM;
-
- DRM_DEBUG_KMS("allocated %dx%d\n",
- fb->width, fb->height);
-
- return 0;
-
-out:
- vfree(sysram);
- return ret;
-}
-
-static const struct drm_fb_helper_funcs ast_fb_helper_funcs = {
- .fb_probe = astfb_create,
-};
-
-static void ast_fbdev_destroy(struct drm_device *dev,
- struct ast_fbdev *afbdev)
-{
- struct ast_framebuffer *afb = &afbdev->afb;
-
- drm_helper_force_disable_all(dev);
- drm_fb_helper_unregister_fbi(&afbdev->helper);
-
- if (afb->obj) {
- drm_gem_object_put_unlocked(afb->obj);
- afb->obj = NULL;
- }
- drm_fb_helper_fini(&afbdev->helper);
-
- vfree(afbdev->sysram);
- drm_framebuffer_unregister_private(&afb->base);
- drm_framebuffer_cleanup(&afb->base);
-}
-
-int ast_fbdev_init(struct drm_device *dev)
-{
- struct ast_private *ast = dev->dev_private;
- struct ast_fbdev *afbdev;
- int ret;
-
- afbdev = kzalloc(sizeof(struct ast_fbdev), GFP_KERNEL);
- if (!afbdev)
- return -ENOMEM;
-
- ast->fbdev = afbdev;
- spin_lock_init(&afbdev->dirty_lock);
-
- drm_fb_helper_prepare(dev, &afbdev->helper, &ast_fb_helper_funcs);
-
- ret = drm_fb_helper_init(dev, &afbdev->helper, 1);
- if (ret)
- goto free;
-
- ret = drm_fb_helper_single_add_all_connectors(&afbdev->helper);
- if (ret)
- goto fini;
-
- /* disable all the possible outputs/crtcs before entering KMS mode */
- drm_helper_disable_unused_functions(dev);
-
- ret = drm_fb_helper_initial_config(&afbdev->helper, 32);
- if (ret)
- goto fini;
-
- return 0;
-
-fini:
- drm_fb_helper_fini(&afbdev->helper);
-free:
- kfree(afbdev);
- return ret;
-}
-
-void ast_fbdev_fini(struct drm_device *dev)
-{
- struct ast_private *ast = dev->dev_private;
-
- if (!ast->fbdev)
- return;
-
- ast_fbdev_destroy(dev, ast->fbdev);
- kfree(ast->fbdev);
- ast->fbdev = NULL;
-}
-
-void ast_fbdev_set_suspend(struct drm_device *dev, int state)
-{
- struct ast_private *ast = dev->dev_private;
-
- if (!ast->fbdev)
- return;
-
- drm_fb_helper_set_suspend(&ast->fbdev->helper, state);
-}
-
-void ast_fbdev_set_base(struct ast_private *ast, unsigned long gpu_addr)
-{
- ast->fbdev->helper.fbdev->fix.smem_start =
- ast->fbdev->helper.fbdev->apertures->ranges[0].base + gpu_addr;
- ast->fbdev->helper.fbdev->fix.smem_len = ast->vram_size - gpu_addr;
-}
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index a5d1494a3dc4..b79f484e9bd2 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -25,12 +25,17 @@
/*
* Authors: Dave Airlie <airlied@redhat.com>
*/
-#include <drm/drmP.h>
-#include "ast_drv.h"
+#include <linux/pci.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_gem_vram_helper.h>
+
+#include "ast_drv.h"
void ast_set_index_reg_mask(struct ast_private *ast,
uint32_t base, uint8_t index,
@@ -383,67 +388,33 @@ static int ast_get_dram_info(struct drm_device *dev)
return 0;
}
-static void ast_user_framebuffer_destroy(struct drm_framebuffer *fb)
+enum drm_mode_status ast_mode_config_mode_valid(struct drm_device *dev,
+ const struct drm_display_mode *mode)
{
- struct ast_framebuffer *ast_fb = to_ast_framebuffer(fb);
-
- drm_gem_object_put_unlocked(ast_fb->obj);
- drm_framebuffer_cleanup(fb);
- kfree(ast_fb);
-}
+ static const unsigned long max_bpp = 4; /* DRM_FORMAT_XRGBA8888 */
-static const struct drm_framebuffer_funcs ast_fb_funcs = {
- .destroy = ast_user_framebuffer_destroy,
-};
+ struct ast_private *ast = dev->dev_private;
+ unsigned long fbsize, fbpages, max_fbpages;
+ /* To support double buffering, a framebuffer may not
+ * consume more than half of the available VRAM.
+ */
+ max_fbpages = (ast->vram_size / 2) >> PAGE_SHIFT;
-int ast_framebuffer_init(struct drm_device *dev,
- struct ast_framebuffer *ast_fb,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object *obj)
-{
- int ret;
-
- drm_helper_mode_fill_fb_struct(dev, &ast_fb->base, mode_cmd);
- ast_fb->obj = obj;
- ret = drm_framebuffer_init(dev, &ast_fb->base, &ast_fb_funcs);
- if (ret) {
- DRM_ERROR("framebuffer init failed %d\n", ret);
- return ret;
- }
- return 0;
-}
+ fbsize = mode->hdisplay * mode->vdisplay * max_bpp;
+ fbpages = DIV_ROUND_UP(fbsize, PAGE_SIZE);
-static struct drm_framebuffer *
-ast_user_framebuffer_create(struct drm_device *dev,
- struct drm_file *filp,
- const struct drm_mode_fb_cmd2 *mode_cmd)
-{
- struct drm_gem_object *obj;
- struct ast_framebuffer *ast_fb;
- int ret;
-
- obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
- if (obj == NULL)
- return ERR_PTR(-ENOENT);
-
- ast_fb = kzalloc(sizeof(*ast_fb), GFP_KERNEL);
- if (!ast_fb) {
- drm_gem_object_put_unlocked(obj);
- return ERR_PTR(-ENOMEM);
- }
+ if (fbpages > max_fbpages)
+ return MODE_MEM;
- ret = ast_framebuffer_init(dev, ast_fb, mode_cmd, obj);
- if (ret) {
- drm_gem_object_put_unlocked(obj);
- kfree(ast_fb);
- return ERR_PTR(ret);
- }
- return &ast_fb->base;
+ return MODE_OK;
}
static const struct drm_mode_config_funcs ast_mode_funcs = {
- .fb_create = ast_user_framebuffer_create,
+ .fb_create = drm_gem_fb_create,
+ .mode_valid = ast_mode_config_mode_valid,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
};
static u32 ast_get_vram_info(struct drm_device *dev)
@@ -561,7 +532,9 @@ int ast_driver_load(struct drm_device *dev, unsigned long flags)
if (ret)
goto out_free;
- ret = ast_fbdev_init(dev);
+ drm_mode_config_reset(dev);
+
+ ret = drm_fbdev_generic_setup(dev, 32);
if (ret)
goto out_free;
@@ -582,7 +555,6 @@ void ast_driver_unload(struct drm_device *dev)
ast_release_firmware(dev);
kfree(ast->dp501_fw_addr);
ast_mode_fini(dev);
- ast_fbdev_fini(dev);
drm_mode_config_cleanup(dev);
ast_mm_fini(ast);
@@ -591,27 +563,3 @@ void ast_driver_unload(struct drm_device *dev)
pci_iounmap(dev->pdev, ast->regs);
kfree(ast);
}
-
-int ast_gem_create(struct drm_device *dev,
- u32 size, bool iskernel,
- struct drm_gem_object **obj)
-{
- struct drm_gem_vram_object *gbo;
- int ret;
-
- *obj = NULL;
-
- size = roundup(size, PAGE_SIZE);
- if (size == 0)
- return -EINVAL;
-
- gbo = drm_gem_vram_create(dev, &dev->vram_mm->bdev, size, 0, false);
- if (IS_ERR(gbo)) {
- ret = PTR_ERR(gbo);
- if (ret != -ERESTARTSYS)
- DRM_ERROR("failed to allocate GEM object\n");
- return ret;
- }
- *obj = &gbo->gem;
- return 0;
-}
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index a1cb020e07e5..34608f0499eb 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -27,23 +27,33 @@
/*
* Authors: Dave Airlie <airlied@redhat.com>
*/
+
#include <linux/export.h>
-#include <drm/drmP.h>
+#include <linux/pci.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
-#include "ast_drv.h"
+#include "ast_drv.h"
#include "ast_tables.h"
static struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev);
static void ast_i2c_destroy(struct ast_i2c_chan *i2c);
-static int ast_cursor_set(struct drm_crtc *crtc,
- struct drm_file *file_priv,
- uint32_t handle,
- uint32_t width,
- uint32_t height);
+static int ast_cursor_move(struct drm_crtc *crtc,
+ int x, int y);
+
+
+static u32 copy_cursor_image(u8 *src, u8 *dst, int width, int height);
+static int ast_cursor_update(void *dst, void *src, unsigned int width,
+ unsigned int height);
+static void ast_cursor_set_base(struct ast_private *ast, u64 address);
static int ast_cursor_move(struct drm_crtc *crtc,
int x, int y);
@@ -61,9 +71,8 @@ static inline void ast_load_palette_index(struct ast_private *ast,
ast_io_read8(ast, AST_IO_SEQ_PORT);
}
-static void ast_crtc_load_lut(struct drm_crtc *crtc)
+static void ast_crtc_load_lut(struct ast_private *ast, struct drm_crtc *crtc)
{
- struct ast_private *ast = crtc->dev->dev_private;
u16 *r, *g, *b;
int i;
@@ -78,36 +87,32 @@ static void ast_crtc_load_lut(struct drm_crtc *crtc)
ast_load_palette_index(ast, i, *r++ >> 8, *g++ >> 8, *b++ >> 8);
}
-static bool ast_get_vbios_mode_info(struct drm_crtc *crtc, struct drm_display_mode *mode,
+static bool ast_get_vbios_mode_info(const struct drm_format_info *format,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
struct ast_vbios_mode_info *vbios_mode)
{
- struct ast_private *ast = crtc->dev->dev_private;
- const struct drm_framebuffer *fb = crtc->primary->fb;
- u32 refresh_rate_index = 0, mode_id, color_index, refresh_rate;
+ u32 refresh_rate_index = 0, refresh_rate;
const struct ast_vbios_enhtable *best = NULL;
u32 hborder, vborder;
bool check_sync;
- switch (fb->format->cpp[0] * 8) {
+ switch (format->cpp[0] * 8) {
case 8:
vbios_mode->std_table = &vbios_stdtable[VGAModeIndex];
- color_index = VGAModeIndex - 1;
break;
case 16:
vbios_mode->std_table = &vbios_stdtable[HiCModeIndex];
- color_index = HiCModeIndex;
break;
case 24:
case 32:
vbios_mode->std_table = &vbios_stdtable[TrueCModeIndex];
- color_index = TrueCModeIndex;
break;
default:
return false;
}
- switch (crtc->mode.crtc_hdisplay) {
+ switch (mode->crtc_hdisplay) {
case 640:
vbios_mode->enh_table = &res_640x480[refresh_rate_index];
break;
@@ -118,7 +123,7 @@ static bool ast_get_vbios_mode_info(struct drm_crtc *crtc, struct drm_display_mo
vbios_mode->enh_table = &res_1024x768[refresh_rate_index];
break;
case 1280:
- if (crtc->mode.crtc_vdisplay == 800)
+ if (mode->crtc_vdisplay == 800)
vbios_mode->enh_table = &res_1280x800[refresh_rate_index];
else
vbios_mode->enh_table = &res_1280x1024[refresh_rate_index];
@@ -130,7 +135,7 @@ static bool ast_get_vbios_mode_info(struct drm_crtc *crtc, struct drm_display_mo
vbios_mode->enh_table = &res_1440x900[refresh_rate_index];
break;
case 1600:
- if (crtc->mode.crtc_vdisplay == 900)
+ if (mode->crtc_vdisplay == 900)
vbios_mode->enh_table = &res_1600x900[refresh_rate_index];
else
vbios_mode->enh_table = &res_1600x1200[refresh_rate_index];
@@ -139,7 +144,7 @@ static bool ast_get_vbios_mode_info(struct drm_crtc *crtc, struct drm_display_mo
vbios_mode->enh_table = &res_1680x1050[refresh_rate_index];
break;
case 1920:
- if (crtc->mode.crtc_vdisplay == 1080)
+ if (mode->crtc_vdisplay == 1080)
vbios_mode->enh_table = &res_1920x1080[refresh_rate_index];
else
vbios_mode->enh_table = &res_1920x1200[refresh_rate_index];
@@ -150,7 +155,8 @@ static bool ast_get_vbios_mode_info(struct drm_crtc *crtc, struct drm_display_mo
refresh_rate = drm_mode_vrefresh(mode);
check_sync = vbios_mode->enh_table->flags & WideScreenMode;
- do {
+
+ while (1) {
const struct ast_vbios_enhtable *loop = vbios_mode->enh_table;
while (loop->refresh_rate != 0xff) {
@@ -174,7 +180,8 @@ static bool ast_get_vbios_mode_info(struct drm_crtc *crtc, struct drm_display_mo
if (best || !check_sync)
break;
check_sync = 0;
- } while (1);
+ }
+
if (best)
vbios_mode->enh_table = best;
@@ -199,38 +206,67 @@ static bool ast_get_vbios_mode_info(struct drm_crtc *crtc, struct drm_display_mo
vbios_mode->enh_table->vfp +
vbios_mode->enh_table->vsync);
- refresh_rate_index = vbios_mode->enh_table->refresh_rate_index;
- mode_id = vbios_mode->enh_table->mode_id;
+ return true;
+}
- if (ast->chip == AST1180) {
- /* TODO 1180 */
- } else {
- ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8c, (u8)((color_index & 0xf) << 4));
- ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8d, refresh_rate_index & 0xff);
- ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8e, mode_id & 0xff);
-
- ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x91, 0x00);
- if (vbios_mode->enh_table->flags & NewModeInfo) {
- ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x91, 0xa8);
- ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x92,
- fb->format->cpp[0] * 8);
- ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x93, adjusted_mode->clock / 1000);
- ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x94, adjusted_mode->crtc_hdisplay);
- ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x95, adjusted_mode->crtc_hdisplay >> 8);
-
- ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x96, adjusted_mode->crtc_vdisplay);
- ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x97, adjusted_mode->crtc_vdisplay >> 8);
- }
+static void ast_set_vbios_color_reg(struct ast_private *ast,
+ const struct drm_format_info *format,
+ const struct ast_vbios_mode_info *vbios_mode)
+{
+ u32 color_index;
+
+ switch (format->cpp[0]) {
+ case 1:
+ color_index = VGAModeIndex - 1;
+ break;
+ case 2:
+ color_index = HiCModeIndex;
+ break;
+ case 3:
+ case 4:
+ color_index = TrueCModeIndex;
+ default:
+ return;
}
- return true;
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8c, (u8)((color_index & 0x0f) << 4));
+
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x91, 0x00);
+
+ if (vbios_mode->enh_table->flags & NewModeInfo) {
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x91, 0xa8);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x92, format->cpp[0] * 8);
+ }
+}
+
+static void ast_set_vbios_mode_reg(struct ast_private *ast,
+ const struct drm_display_mode *adjusted_mode,
+ const struct ast_vbios_mode_info *vbios_mode)
+{
+ u32 refresh_rate_index, mode_id;
+
+ refresh_rate_index = vbios_mode->enh_table->refresh_rate_index;
+ mode_id = vbios_mode->enh_table->mode_id;
+
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8d, refresh_rate_index & 0xff);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8e, mode_id & 0xff);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x91, 0x00);
+ if (vbios_mode->enh_table->flags & NewModeInfo) {
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x91, 0xa8);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x93, adjusted_mode->clock / 1000);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x94, adjusted_mode->crtc_hdisplay);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x95, adjusted_mode->crtc_hdisplay >> 8);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x96, adjusted_mode->crtc_vdisplay);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x97, adjusted_mode->crtc_vdisplay >> 8);
+ }
}
-static void ast_set_std_reg(struct drm_crtc *crtc, struct drm_display_mode *mode,
+
+static void ast_set_std_reg(struct ast_private *ast,
+ struct drm_display_mode *mode,
struct ast_vbios_mode_info *vbios_mode)
{
- struct ast_private *ast = crtc->dev->dev_private;
const struct ast_vbios_stdtable *stdtable;
u32 i;
u8 jreg;
@@ -240,18 +276,21 @@ static void ast_set_std_reg(struct drm_crtc *crtc, struct drm_display_mode *mode
jreg = stdtable->misc;
ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, jreg);
- /* Set SEQ */
+ /* Set SEQ; except Screen Disable field */
ast_set_index_reg(ast, AST_IO_SEQ_PORT, 0x00, 0x03);
- for (i = 0; i < 4; i++) {
+ ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x01, 0xdf, stdtable->seq[0]);
+ for (i = 1; i < 4; i++) {
jreg = stdtable->seq[i];
- if (!i)
- jreg |= 0x20;
ast_set_index_reg(ast, AST_IO_SEQ_PORT, (i + 1) , jreg);
}
- /* Set CRTC */
+ /* Set CRTC; except base address and offset */
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x7f, 0x00);
- for (i = 0; i < 25; i++)
+ for (i = 0; i < 12; i++)
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, i, stdtable->crtc[i]);
+ for (i = 14; i < 19; i++)
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, i, stdtable->crtc[i]);
+ for (i = 20; i < 25; i++)
ast_set_index_reg(ast, AST_IO_CRTC_PORT, i, stdtable->crtc[i]);
/* set AR */
@@ -272,10 +311,10 @@ static void ast_set_std_reg(struct drm_crtc *crtc, struct drm_display_mode *mode
ast_set_index_reg(ast, AST_IO_GR_PORT, i, stdtable->gr[i]);
}
-static void ast_set_crtc_reg(struct drm_crtc *crtc, struct drm_display_mode *mode,
+static void ast_set_crtc_reg(struct ast_private *ast,
+ struct drm_display_mode *mode,
struct ast_vbios_mode_info *vbios_mode)
{
- struct ast_private *ast = crtc->dev->dev_private;
u8 jreg05 = 0, jreg07 = 0, jreg09 = 0, jregAC = 0, jregAD = 0, jregAE = 0;
u16 temp, precache = 0;
@@ -381,11 +420,9 @@ static void ast_set_crtc_reg(struct drm_crtc *crtc, struct drm_display_mode *mod
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x7f, 0x80);
}
-static void ast_set_offset_reg(struct drm_crtc *crtc)
+static void ast_set_offset_reg(struct ast_private *ast,
+ struct drm_framebuffer *fb)
{
- struct ast_private *ast = crtc->dev->dev_private;
- const struct drm_framebuffer *fb = crtc->primary->fb;
-
u16 offset;
offset = fb->pitches[0] >> 3;
@@ -393,10 +430,10 @@ static void ast_set_offset_reg(struct drm_crtc *crtc)
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xb0, (offset >> 8) & 0x3f);
}
-static void ast_set_dclk_reg(struct drm_device *dev, struct drm_display_mode *mode,
+static void ast_set_dclk_reg(struct ast_private *ast,
+ struct drm_display_mode *mode,
struct ast_vbios_mode_info *vbios_mode)
{
- struct ast_private *ast = dev->dev_private;
const struct ast_vbios_dclk_info *clk_info;
if (ast->chip == AST2500)
@@ -411,14 +448,12 @@ static void ast_set_dclk_reg(struct drm_device *dev, struct drm_display_mode *mo
((clk_info->param3 & 0x3) << 4));
}
-static void ast_set_ext_reg(struct drm_crtc *crtc, struct drm_display_mode *mode,
- struct ast_vbios_mode_info *vbios_mode)
+static void ast_set_color_reg(struct ast_private *ast,
+ const struct drm_format_info *format)
{
- struct ast_private *ast = crtc->dev->dev_private;
- const struct drm_framebuffer *fb = crtc->primary->fb;
u8 jregA0 = 0, jregA3 = 0, jregA8 = 0;
- switch (fb->format->cpp[0] * 8) {
+ switch (format->cpp[0] * 8) {
case 8:
jregA0 = 0x70;
jregA3 = 0x01;
@@ -440,7 +475,10 @@ static void ast_set_ext_reg(struct drm_crtc *crtc, struct drm_display_mode *mode
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa0, 0x8f, jregA0);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xf0, jregA3);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa8, 0xfd, jregA8);
+}
+static void ast_set_crtthd_reg(struct ast_private *ast)
+{
/* Set Threshold */
if (ast->chip == AST2300 || ast->chip == AST2400 ||
ast->chip == AST2500) {
@@ -458,10 +496,10 @@ static void ast_set_ext_reg(struct drm_crtc *crtc, struct drm_display_mode *mode
}
}
-static void ast_set_sync_reg(struct drm_device *dev, struct drm_display_mode *mode,
- struct ast_vbios_mode_info *vbios_mode)
+static void ast_set_sync_reg(struct ast_private *ast,
+ struct drm_display_mode *mode,
+ struct ast_vbios_mode_info *vbios_mode)
{
- struct ast_private *ast = dev->dev_private;
u8 jreg;
jreg = ast_io_read8(ast, AST_IO_MISC_PORT_READ);
@@ -471,23 +509,9 @@ static void ast_set_sync_reg(struct drm_device *dev, struct drm_display_mode *mo
ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, jreg);
}
-static bool ast_set_dac_reg(struct drm_crtc *crtc, struct drm_display_mode *mode,
- struct ast_vbios_mode_info *vbios_mode)
-{
- const struct drm_framebuffer *fb = crtc->primary->fb;
-
- switch (fb->format->cpp[0] * 8) {
- case 8:
- break;
- default:
- return false;
- }
- return true;
-}
-
-static void ast_set_start_address_crt1(struct drm_crtc *crtc, unsigned offset)
+static void ast_set_start_address_crt1(struct ast_private *ast,
+ unsigned offset)
{
- struct ast_private *ast = crtc->dev->dev_private;
u32 addr;
addr = offset >> 2;
@@ -497,183 +521,366 @@ static void ast_set_start_address_crt1(struct drm_crtc *crtc, unsigned offset)
}
-static void ast_crtc_dpms(struct drm_crtc *crtc, int mode)
+/*
+ * Primary plane
+ */
+
+static const uint32_t ast_primary_plane_formats[] = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_C8,
+};
+
+static int ast_primary_plane_helper_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
{
- struct ast_private *ast = crtc->dev->dev_private;
+ struct drm_crtc_state *crtc_state;
+ struct ast_crtc_state *ast_crtc_state;
+ int ret;
- if (ast->chip == AST1180)
- return;
+ if (!state->crtc)
+ return 0;
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0);
- if (ast->tx_chip_type == AST_TX_DP501)
- ast_set_dp501_video_output(crtc->dev, 1);
- ast_crtc_load_lut(crtc);
- break;
- case DRM_MODE_DPMS_OFF:
- if (ast->tx_chip_type == AST_TX_DP501)
- ast_set_dp501_video_output(crtc->dev, 0);
- ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0x20);
- break;
- }
+ crtc_state = drm_atomic_get_new_crtc_state(state->state, state->crtc);
+
+ ret = drm_atomic_helper_check_plane_state(state, crtc_state,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ false, true);
+ if (ret)
+ return ret;
+
+ if (!state->visible)
+ return 0;
+
+ ast_crtc_state = to_ast_crtc_state(crtc_state);
+
+ ast_crtc_state->format = state->fb->format;
+
+ return 0;
}
-static int ast_crtc_do_set_base(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int x, int y, int atomic)
+void ast_primary_plane_helper_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
{
- struct ast_private *ast = crtc->dev->dev_private;
- struct drm_gem_object *obj;
- struct ast_framebuffer *ast_fb;
+ struct ast_private *ast = plane->dev->dev_private;
+ struct drm_plane_state *state = plane->state;
struct drm_gem_vram_object *gbo;
- int ret;
s64 gpu_addr;
- void *base;
- if (!atomic && fb) {
- ast_fb = to_ast_framebuffer(fb);
- obj = ast_fb->obj;
- gbo = drm_gem_vram_of_gem(obj);
+ gbo = drm_gem_vram_of_gem(state->fb->obj[0]);
+ gpu_addr = drm_gem_vram_offset(gbo);
+ if (WARN_ON_ONCE(gpu_addr < 0))
+ return; /* Bug: we didn't pin the BO to VRAM in prepare_fb. */
+
+ ast_set_offset_reg(ast, state->fb);
+ ast_set_start_address_crt1(ast, (u32)gpu_addr);
- /* unmap if console */
- if (&ast->fbdev->afb == ast_fb)
- drm_gem_vram_kunmap(gbo);
- drm_gem_vram_unpin(gbo);
- }
+ ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0x00);
+}
- ast_fb = to_ast_framebuffer(crtc->primary->fb);
- obj = ast_fb->obj;
- gbo = drm_gem_vram_of_gem(obj);
+static void
+ast_primary_plane_helper_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct ast_private *ast = plane->dev->dev_private;
- ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
- if (ret)
- return ret;
- gpu_addr = drm_gem_vram_offset(gbo);
- if (gpu_addr < 0) {
- ret = (int)gpu_addr;
+ ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0x20);
+}
+
+static const struct drm_plane_helper_funcs ast_primary_plane_helper_funcs = {
+ .prepare_fb = drm_gem_vram_plane_helper_prepare_fb,
+ .cleanup_fb = drm_gem_vram_plane_helper_cleanup_fb,
+ .atomic_check = ast_primary_plane_helper_atomic_check,
+ .atomic_update = ast_primary_plane_helper_atomic_update,
+ .atomic_disable = ast_primary_plane_helper_atomic_disable,
+};
+
+static const struct drm_plane_funcs ast_primary_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+/*
+ * Cursor plane
+ */
+
+static const uint32_t ast_cursor_plane_formats[] = {
+ DRM_FORMAT_ARGB8888,
+};
+
+static int
+ast_cursor_plane_helper_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ struct drm_framebuffer *fb = new_state->fb;
+ struct drm_crtc *crtc = new_state->crtc;
+ struct drm_gem_vram_object *gbo;
+ struct ast_private *ast;
+ int ret;
+ void *src, *dst;
+
+ if (!crtc || !fb)
+ return 0;
+
+ if (WARN_ON_ONCE(fb->width > AST_MAX_HWC_WIDTH) ||
+ WARN_ON_ONCE(fb->height > AST_MAX_HWC_HEIGHT))
+ return -EINVAL; /* BUG: didn't test in atomic_check() */
+
+ ast = crtc->dev->dev_private;
+
+ gbo = drm_gem_vram_of_gem(fb->obj[0]);
+ src = drm_gem_vram_vmap(gbo);
+ if (IS_ERR(src)) {
+ ret = PTR_ERR(src);
goto err_drm_gem_vram_unpin;
}
- if (&ast->fbdev->afb == ast_fb) {
- /* if pushing console in kmap it */
- base = drm_gem_vram_kmap(gbo, true, NULL);
- if (IS_ERR(base)) {
- ret = PTR_ERR(base);
- DRM_ERROR("failed to kmap fbcon\n");
- } else {
- ast_fbdev_set_base(ast, gpu_addr);
- }
+ dst = drm_gem_vram_vmap(ast->cursor.gbo[ast->cursor.next_index]);
+ if (IS_ERR(dst)) {
+ ret = PTR_ERR(dst);
+ goto err_drm_gem_vram_vunmap_src;
}
- ast_set_offset_reg(crtc);
- ast_set_start_address_crt1(crtc, (u32)gpu_addr);
+ ret = ast_cursor_update(dst, src, fb->width, fb->height);
+ if (ret)
+ goto err_drm_gem_vram_vunmap_dst;
+
+ /* Always unmap buffers here. Destination buffers are
+ * perma-pinned while the driver is active. We're only
+ * changing ref-counters here.
+ */
+ drm_gem_vram_vunmap(ast->cursor.gbo[ast->cursor.next_index], dst);
+ drm_gem_vram_vunmap(gbo, src);
return 0;
+err_drm_gem_vram_vunmap_dst:
+ drm_gem_vram_vunmap(ast->cursor.gbo[ast->cursor.next_index], dst);
+err_drm_gem_vram_vunmap_src:
+ drm_gem_vram_vunmap(gbo, src);
err_drm_gem_vram_unpin:
drm_gem_vram_unpin(gbo);
return ret;
}
-static int ast_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
+static int ast_cursor_plane_helper_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
{
- return ast_crtc_do_set_base(crtc, old_fb, x, y, 0);
-}
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_crtc_state *crtc_state;
+ int ret;
-static int ast_crtc_mode_set(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode,
- int x, int y,
- struct drm_framebuffer *old_fb)
-{
- struct drm_device *dev = crtc->dev;
- struct ast_private *ast = crtc->dev->dev_private;
- struct ast_vbios_mode_info vbios_mode;
- bool ret;
- if (ast->chip == AST1180) {
- DRM_ERROR("AST 1180 modesetting not supported\n");
- return -EINVAL;
- }
+ if (!state->crtc)
+ return 0;
- ret = ast_get_vbios_mode_info(crtc, mode, adjusted_mode, &vbios_mode);
- if (ret == false)
- return -EINVAL;
- ast_open_key(ast);
+ crtc_state = drm_atomic_get_new_crtc_state(state->state, state->crtc);
- ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x06);
+ ret = drm_atomic_helper_check_plane_state(state, crtc_state,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ true, true);
+ if (ret)
+ return ret;
- ast_set_std_reg(crtc, adjusted_mode, &vbios_mode);
- ast_set_crtc_reg(crtc, adjusted_mode, &vbios_mode);
- ast_set_offset_reg(crtc);
- ast_set_dclk_reg(dev, adjusted_mode, &vbios_mode);
- ast_set_ext_reg(crtc, adjusted_mode, &vbios_mode);
- ast_set_sync_reg(dev, adjusted_mode, &vbios_mode);
- ast_set_dac_reg(crtc, adjusted_mode, &vbios_mode);
+ if (!state->visible)
+ return 0;
- ast_crtc_mode_set_base(crtc, x, y, old_fb);
+ if (fb->width > AST_MAX_HWC_WIDTH || fb->height > AST_MAX_HWC_HEIGHT)
+ return -EINVAL;
return 0;
}
-static void ast_crtc_disable(struct drm_crtc *crtc)
+static void
+ast_cursor_plane_helper_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
{
- DRM_DEBUG_KMS("\n");
- ast_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
- if (crtc->primary->fb) {
- struct ast_private *ast = crtc->dev->dev_private;
- struct ast_framebuffer *ast_fb = to_ast_framebuffer(crtc->primary->fb);
- struct drm_gem_object *obj = ast_fb->obj;
- struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(obj);
-
- /* unmap if console */
- if (&ast->fbdev->afb == ast_fb)
- drm_gem_vram_kunmap(gbo);
- drm_gem_vram_unpin(gbo);
+ struct drm_plane_state *state = plane->state;
+ struct drm_crtc *crtc = state->crtc;
+ struct drm_framebuffer *fb = state->fb;
+ struct ast_private *ast = plane->dev->dev_private;
+ struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
+ struct drm_gem_vram_object *gbo;
+ s64 off;
+ u8 jreg;
+
+ ast_crtc->offset_x = AST_MAX_HWC_WIDTH - fb->width;
+ ast_crtc->offset_y = AST_MAX_HWC_WIDTH - fb->height;
+
+ if (state->fb != old_state->fb) {
+ /* A new cursor image was installed. */
+ gbo = ast->cursor.gbo[ast->cursor.next_index];
+ off = drm_gem_vram_offset(gbo);
+ if (WARN_ON_ONCE(off < 0))
+ return; /* Bug: we didn't pin cursor HW BO to VRAM. */
+ ast_cursor_set_base(ast, off);
+
+ ++ast->cursor.next_index;
+ ast->cursor.next_index %= ARRAY_SIZE(ast->cursor.gbo);
}
- crtc->primary->fb = NULL;
+
+ ast_cursor_move(crtc, state->crtc_x, state->crtc_y);
+
+ jreg = 0x2;
+ /* enable ARGB cursor */
+ jreg |= 1;
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, jreg);
}
-static void ast_crtc_prepare(struct drm_crtc *crtc)
+static void
+ast_cursor_plane_helper_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
{
+ struct ast_private *ast = plane->dev->dev_private;
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, 0x00);
}
-static void ast_crtc_commit(struct drm_crtc *crtc)
+static const struct drm_plane_helper_funcs ast_cursor_plane_helper_funcs = {
+ .prepare_fb = ast_cursor_plane_helper_prepare_fb,
+ .cleanup_fb = NULL, /* not required for cursor plane */
+ .atomic_check = ast_cursor_plane_helper_atomic_check,
+ .atomic_update = ast_cursor_plane_helper_atomic_update,
+ .atomic_disable = ast_cursor_plane_helper_atomic_disable,
+};
+
+static const struct drm_plane_funcs ast_cursor_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+/*
+ * CRTC
+ */
+
+static void ast_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct ast_private *ast = crtc->dev->dev_private;
- ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0);
- ast_crtc_load_lut(crtc);
+
+ if (ast->chip == AST1180)
+ return;
+
+ /* TODO: Maybe control display signal generation with
+ * Sync Enable (bit CR17.7).
+ */
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ if (ast->tx_chip_type == AST_TX_DP501)
+ ast_set_dp501_video_output(crtc->dev, 1);
+ ast_crtc_load_lut(ast, crtc);
+ break;
+ case DRM_MODE_DPMS_OFF:
+ if (ast->tx_chip_type == AST_TX_DP501)
+ ast_set_dp501_video_output(crtc->dev, 0);
+ break;
+ }
}
+static int ast_crtc_helper_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+ struct ast_crtc_state *ast_state;
+ const struct drm_format_info *format;
+ bool succ;
-static const struct drm_crtc_helper_funcs ast_crtc_helper_funcs = {
- .dpms = ast_crtc_dpms,
- .mode_set = ast_crtc_mode_set,
- .mode_set_base = ast_crtc_mode_set_base,
- .disable = ast_crtc_disable,
- .prepare = ast_crtc_prepare,
- .commit = ast_crtc_commit,
+ if (ast->chip == AST1180) {
+ DRM_ERROR("AST 1180 modesetting not supported\n");
+ return -EINVAL;
+ }
-};
+ ast_state = to_ast_crtc_state(state);
+
+ format = ast_state->format;
+ if (!format)
+ return 0;
+
+ succ = ast_get_vbios_mode_info(format, &state->mode,
+ &state->adjusted_mode,
+ &ast_state->vbios_mode_info);
+ if (!succ)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void ast_crtc_helper_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+
+ ast_open_key(ast);
+}
-static void ast_crtc_reset(struct drm_crtc *crtc)
+static void ast_crtc_helper_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
{
+ struct drm_device *dev = crtc->dev;
+ struct ast_private *ast = dev->dev_private;
+ struct ast_crtc_state *ast_state;
+ const struct drm_format_info *format;
+ struct ast_vbios_mode_info *vbios_mode_info;
+ struct drm_display_mode *adjusted_mode;
+
+ crtc->state->no_vblank = true;
+
+ ast_state = to_ast_crtc_state(crtc->state);
+
+ format = ast_state->format;
+ if (!format)
+ return;
+
+ vbios_mode_info = &ast_state->vbios_mode_info;
+ ast_set_color_reg(ast, format);
+ ast_set_vbios_color_reg(ast, format, vbios_mode_info);
+
+ if (!crtc->state->mode_changed)
+ return;
+
+ adjusted_mode = &crtc->state->adjusted_mode;
+
+ ast_set_vbios_mode_reg(ast, adjusted_mode, vbios_mode_info);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x06);
+ ast_set_std_reg(ast, adjusted_mode, vbios_mode_info);
+ ast_set_crtc_reg(ast, adjusted_mode, vbios_mode_info);
+ ast_set_dclk_reg(ast, adjusted_mode, vbios_mode_info);
+ ast_set_crtthd_reg(ast);
+ ast_set_sync_reg(ast, adjusted_mode, vbios_mode_info);
}
-static int ast_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, uint32_t size,
- struct drm_modeset_acquire_ctx *ctx)
+static void
+ast_crtc_helper_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
{
- ast_crtc_load_lut(crtc);
+ ast_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+}
- return 0;
+static void
+ast_crtc_helper_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ ast_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
}
+static const struct drm_crtc_helper_funcs ast_crtc_helper_funcs = {
+ .atomic_check = ast_crtc_helper_atomic_check,
+ .atomic_begin = ast_crtc_helper_atomic_begin,
+ .atomic_flush = ast_crtc_helper_atomic_flush,
+ .atomic_enable = ast_crtc_helper_atomic_enable,
+ .atomic_disable = ast_crtc_helper_atomic_disable,
+};
static void ast_crtc_destroy(struct drm_crtc *crtc)
{
@@ -681,79 +888,87 @@ static void ast_crtc_destroy(struct drm_crtc *crtc)
kfree(crtc);
}
+static struct drm_crtc_state *
+ast_crtc_atomic_duplicate_state(struct drm_crtc *crtc)
+{
+ struct ast_crtc_state *new_ast_state, *ast_state;
+
+ if (WARN_ON(!crtc->state))
+ return NULL;
+
+ new_ast_state = kmalloc(sizeof(*new_ast_state), GFP_KERNEL);
+ if (!new_ast_state)
+ return NULL;
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &new_ast_state->base);
+
+ ast_state = to_ast_crtc_state(crtc->state);
+
+ new_ast_state->format = ast_state->format;
+ memcpy(&new_ast_state->vbios_mode_info, &ast_state->vbios_mode_info,
+ sizeof(new_ast_state->vbios_mode_info));
+
+ return &new_ast_state->base;
+}
+
+static void ast_crtc_atomic_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct ast_crtc_state *ast_state = to_ast_crtc_state(state);
+
+ __drm_atomic_helper_crtc_destroy_state(&ast_state->base);
+ kfree(ast_state);
+}
+
static const struct drm_crtc_funcs ast_crtc_funcs = {
- .cursor_set = ast_cursor_set,
- .cursor_move = ast_cursor_move,
- .reset = ast_crtc_reset,
+ .reset = drm_atomic_helper_crtc_reset,
.set_config = drm_crtc_helper_set_config,
- .gamma_set = ast_crtc_gamma_set,
+ .gamma_set = drm_atomic_helper_legacy_gamma_set,
.destroy = ast_crtc_destroy,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_duplicate_state = ast_crtc_atomic_duplicate_state,
+ .atomic_destroy_state = ast_crtc_atomic_destroy_state,
};
static int ast_crtc_init(struct drm_device *dev)
{
+ struct ast_private *ast = dev->dev_private;
struct ast_crtc *crtc;
+ int ret;
crtc = kzalloc(sizeof(struct ast_crtc), GFP_KERNEL);
if (!crtc)
return -ENOMEM;
- drm_crtc_init(dev, &crtc->base, &ast_crtc_funcs);
+ ret = drm_crtc_init_with_planes(dev, &crtc->base, &ast->primary_plane,
+ &ast->cursor_plane, &ast_crtc_funcs,
+ NULL);
+ if (ret)
+ goto err_kfree;
+
drm_mode_crtc_set_gamma_size(&crtc->base, 256);
drm_crtc_helper_add(&crtc->base, &ast_crtc_helper_funcs);
return 0;
+
+err_kfree:
+ kfree(crtc);
+ return ret;
}
+/*
+ * Encoder
+ */
+
static void ast_encoder_destroy(struct drm_encoder *encoder)
{
drm_encoder_cleanup(encoder);
kfree(encoder);
}
-
-static struct drm_encoder *ast_best_single_encoder(struct drm_connector *connector)
-{
- int enc_id = connector->encoder_ids[0];
- /* pick the encoder ids */
- if (enc_id)
- return drm_encoder_find(connector->dev, NULL, enc_id);
- return NULL;
-}
-
-
static const struct drm_encoder_funcs ast_enc_funcs = {
.destroy = ast_encoder_destroy,
};
-static void ast_encoder_dpms(struct drm_encoder *encoder, int mode)
-{
-
-}
-
-static void ast_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
-}
-
-static void ast_encoder_prepare(struct drm_encoder *encoder)
-{
-
-}
-
-static void ast_encoder_commit(struct drm_encoder *encoder)
-{
-
-}
-
-
-static const struct drm_encoder_helper_funcs ast_enc_helper_funcs = {
- .dpms = ast_encoder_dpms,
- .prepare = ast_encoder_prepare,
- .commit = ast_encoder_commit,
- .mode_set = ast_encoder_mode_set,
-};
-
static int ast_encoder_init(struct drm_device *dev)
{
struct ast_encoder *ast_encoder;
@@ -764,12 +979,15 @@ static int ast_encoder_init(struct drm_device *dev)
drm_encoder_init(dev, &ast_encoder->base, &ast_enc_funcs,
DRM_MODE_ENCODER_DAC, NULL);
- drm_encoder_helper_add(&ast_encoder->base, &ast_enc_helper_funcs);
ast_encoder->base.possible_crtcs = 1;
return 0;
}
+/*
+ * Connector
+ */
+
static int ast_get_modes(struct drm_connector *connector)
{
struct ast_connector *ast_connector = to_ast_connector(connector);
@@ -868,15 +1086,16 @@ static void ast_connector_destroy(struct drm_connector *connector)
}
static const struct drm_connector_helper_funcs ast_connector_helper_funcs = {
- .mode_valid = ast_mode_valid,
.get_modes = ast_get_modes,
- .best_encoder = ast_best_single_encoder,
+ .mode_valid = ast_mode_valid,
};
static const struct drm_connector_funcs ast_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
+ .reset = drm_atomic_helper_connector_reset,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = ast_connector_destroy,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static int ast_connector_init(struct drm_device *dev)
@@ -890,7 +1109,14 @@ static int ast_connector_init(struct drm_device *dev)
return -ENOMEM;
connector = &ast_connector->base;
- drm_connector_init(dev, connector, &ast_connector_funcs, DRM_MODE_CONNECTOR_VGA);
+ ast_connector->i2c = ast_i2c_create(dev);
+ if (!ast_connector->i2c)
+ DRM_ERROR("failed to add ddc bus for connector\n");
+
+ drm_connector_init_with_ddc(dev, connector,
+ &ast_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA,
+ &ast_connector->i2c->adapter);
drm_connector_helper_add(connector, &ast_connector_helper_funcs);
@@ -904,10 +1130,6 @@ static int ast_connector_init(struct drm_device *dev)
encoder = list_first_entry(&dev->mode_config.encoder_list, struct drm_encoder, head);
drm_connector_attach_encoder(connector, encoder);
- ast_connector->i2c = ast_i2c_create(dev);
- if (!ast_connector->i2c)
- DRM_ERROR("failed to add ddc bus for connector\n");
-
return 0;
}
@@ -915,58 +1137,89 @@ static int ast_connector_init(struct drm_device *dev)
static int ast_cursor_init(struct drm_device *dev)
{
struct ast_private *ast = dev->dev_private;
- int size;
- int ret;
- struct drm_gem_object *obj;
+ size_t size, i;
struct drm_gem_vram_object *gbo;
- s64 gpu_addr;
- void *base;
+ int ret;
- size = (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE) * AST_DEFAULT_HWC_NUM;
+ size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE);
- ret = ast_gem_create(dev, size, true, &obj);
- if (ret)
- return ret;
- gbo = drm_gem_vram_of_gem(obj);
- ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
- if (ret)
- goto fail;
- gpu_addr = drm_gem_vram_offset(gbo);
- if (gpu_addr < 0) {
- drm_gem_vram_unpin(gbo);
- ret = (int)gpu_addr;
- goto fail;
- }
+ for (i = 0; i < ARRAY_SIZE(ast->cursor.gbo); ++i) {
+ gbo = drm_gem_vram_create(dev, size, 0);
+ if (IS_ERR(gbo)) {
+ ret = PTR_ERR(gbo);
+ goto err_drm_gem_vram_put;
+ }
+ ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM |
+ DRM_GEM_VRAM_PL_FLAG_TOPDOWN);
+ if (ret) {
+ drm_gem_vram_put(gbo);
+ goto err_drm_gem_vram_put;
+ }
- /* kmap the object */
- base = drm_gem_vram_kmap(gbo, true, NULL);
- if (IS_ERR(base)) {
- ret = PTR_ERR(base);
- goto fail;
+ ast->cursor.gbo[i] = gbo;
}
- ast->cursor_cache = obj;
return 0;
-fail:
+
+err_drm_gem_vram_put:
+ while (i) {
+ --i;
+ gbo = ast->cursor.gbo[i];
+ drm_gem_vram_unpin(gbo);
+ drm_gem_vram_put(gbo);
+ ast->cursor.gbo[i] = NULL;
+ }
return ret;
}
static void ast_cursor_fini(struct drm_device *dev)
{
struct ast_private *ast = dev->dev_private;
- struct drm_gem_vram_object *gbo =
- drm_gem_vram_of_gem(ast->cursor_cache);
- drm_gem_vram_kunmap(gbo);
- drm_gem_vram_unpin(gbo);
- drm_gem_object_put_unlocked(ast->cursor_cache);
+ size_t i;
+ struct drm_gem_vram_object *gbo;
+
+ for (i = 0; i < ARRAY_SIZE(ast->cursor.gbo); ++i) {
+ gbo = ast->cursor.gbo[i];
+ drm_gem_vram_unpin(gbo);
+ drm_gem_vram_put(gbo);
+ }
}
int ast_mode_init(struct drm_device *dev)
{
+ struct ast_private *ast = dev->dev_private;
+ int ret;
+
+ memset(&ast->primary_plane, 0, sizeof(ast->primary_plane));
+ ret = drm_universal_plane_init(dev, &ast->primary_plane, 0x01,
+ &ast_primary_plane_funcs,
+ ast_primary_plane_formats,
+ ARRAY_SIZE(ast_primary_plane_formats),
+ NULL, DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret) {
+ DRM_ERROR("ast: drm_universal_plane_init() failed: %d\n", ret);
+ return ret;
+ }
+ drm_plane_helper_add(&ast->primary_plane,
+ &ast_primary_plane_helper_funcs);
+
+ ret = drm_universal_plane_init(dev, &ast->cursor_plane, 0x01,
+ &ast_cursor_plane_funcs,
+ ast_cursor_plane_formats,
+ ARRAY_SIZE(ast_cursor_plane_formats),
+ NULL, DRM_PLANE_TYPE_CURSOR, NULL);
+ if (ret) {
+ DRM_ERROR("drm_universal_plane_failed(): %d\n", ret);
+ return ret;
+ }
+ drm_plane_helper_add(&ast->cursor_plane,
+ &ast_cursor_plane_helper_funcs);
+
ast_cursor_init(dev);
ast_crtc_init(dev);
ast_encoder_init(dev);
ast_connector_init(dev);
+
return 0;
}
@@ -1096,23 +1349,6 @@ static void ast_i2c_destroy(struct ast_i2c_chan *i2c)
kfree(i2c);
}
-static void ast_show_cursor(struct drm_crtc *crtc)
-{
- struct ast_private *ast = crtc->dev->dev_private;
- u8 jreg;
-
- jreg = 0x2;
- /* enable ARGB cursor */
- jreg |= 1;
- ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, jreg);
-}
-
-static void ast_hide_cursor(struct drm_crtc *crtc)
-{
- struct ast_private *ast = crtc->dev->dev_private;
- ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, 0x00);
-}
-
static u32 copy_cursor_image(u8 *src, u8 *dst, int width, int height)
{
union {
@@ -1169,103 +1405,34 @@ static u32 copy_cursor_image(u8 *src, u8 *dst, int width, int height)
return csum;
}
-static int ast_cursor_set(struct drm_crtc *crtc,
- struct drm_file *file_priv,
- uint32_t handle,
- uint32_t width,
- uint32_t height)
+static int ast_cursor_update(void *dst, void *src, unsigned int width,
+ unsigned int height)
{
- struct ast_private *ast = crtc->dev->dev_private;
- struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
- struct drm_gem_object *obj;
- struct drm_gem_vram_object *gbo;
- s64 dst_gpu;
- u64 gpu_addr;
u32 csum;
- int ret;
- u8 *src, *dst;
-
- if (!handle) {
- ast_hide_cursor(crtc);
- return 0;
- }
-
- if (width > AST_MAX_HWC_WIDTH || height > AST_MAX_HWC_HEIGHT)
- return -EINVAL;
-
- obj = drm_gem_object_lookup(file_priv, handle);
- if (!obj) {
- DRM_ERROR("Cannot find cursor object %x for crtc\n", handle);
- return -ENOENT;
- }
- gbo = drm_gem_vram_of_gem(obj);
-
- ret = drm_gem_vram_pin(gbo, 0);
- if (ret)
- goto err_drm_gem_object_put_unlocked;
- src = drm_gem_vram_kmap(gbo, true, NULL);
- if (IS_ERR(src)) {
- ret = PTR_ERR(src);
- goto err_drm_gem_vram_unpin;
- }
-
- dst = drm_gem_vram_kmap(drm_gem_vram_of_gem(ast->cursor_cache),
- false, NULL);
- if (IS_ERR(dst)) {
- ret = PTR_ERR(dst);
- goto err_drm_gem_vram_kunmap;
- }
- dst_gpu = drm_gem_vram_offset(drm_gem_vram_of_gem(ast->cursor_cache));
- if (dst_gpu < 0) {
- ret = (int)dst_gpu;
- goto err_drm_gem_vram_kunmap;
- }
-
- dst += (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor;
/* do data transfer to cursor cache */
csum = copy_cursor_image(src, dst, width, height);
/* write checksum + signature */
- {
- struct drm_gem_vram_object *dst_gbo =
- drm_gem_vram_of_gem(ast->cursor_cache);
- u8 *dst = drm_gem_vram_kmap(dst_gbo, false, NULL);
- dst += (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor + AST_HWC_SIZE;
- writel(csum, dst);
- writel(width, dst + AST_HWC_SIGNATURE_SizeX);
- writel(height, dst + AST_HWC_SIGNATURE_SizeY);
- writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTX);
- writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTY);
-
- /* set pattern offset */
- gpu_addr = (u64)dst_gpu;
- gpu_addr += (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor;
- gpu_addr >>= 3;
- ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc8, gpu_addr & 0xff);
- ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc9, (gpu_addr >> 8) & 0xff);
- ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xca, (gpu_addr >> 16) & 0xff);
- }
- ast_crtc->offset_x = AST_MAX_HWC_WIDTH - width;
- ast_crtc->offset_y = AST_MAX_HWC_WIDTH - height;
-
- ast->next_cursor = (ast->next_cursor + 1) % AST_DEFAULT_HWC_NUM;
-
- ast_show_cursor(crtc);
-
- drm_gem_vram_kunmap(gbo);
- drm_gem_vram_unpin(gbo);
- drm_gem_object_put_unlocked(obj);
+ dst += AST_HWC_SIZE;
+ writel(csum, dst);
+ writel(width, dst + AST_HWC_SIGNATURE_SizeX);
+ writel(height, dst + AST_HWC_SIGNATURE_SizeY);
+ writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTX);
+ writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTY);
return 0;
+}
-err_drm_gem_vram_kunmap:
- drm_gem_vram_kunmap(gbo);
-err_drm_gem_vram_unpin:
- drm_gem_vram_unpin(gbo);
-err_drm_gem_object_put_unlocked:
- drm_gem_object_put_unlocked(obj);
- return ret;
+static void ast_cursor_set_base(struct ast_private *ast, u64 address)
+{
+ u8 addr0 = (address >> 3) & 0xff;
+ u8 addr1 = (address >> 11) & 0xff;
+ u8 addr2 = (address >> 19) & 0xff;
+
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc8, addr0);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc9, addr1);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xca, addr2);
}
static int ast_cursor_move(struct drm_crtc *crtc,
@@ -1273,12 +1440,17 @@ static int ast_cursor_move(struct drm_crtc *crtc,
{
struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
struct ast_private *ast = crtc->dev->dev_private;
+ struct drm_gem_vram_object *gbo;
int x_offset, y_offset;
- u8 *sig;
+ u8 *dst, *sig;
+ u8 jreg;
- sig = drm_gem_vram_kmap(drm_gem_vram_of_gem(ast->cursor_cache),
- false, NULL);
- sig += (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor + AST_HWC_SIZE;
+ gbo = ast->cursor.gbo[ast->cursor.next_index];
+ dst = drm_gem_vram_vmap(gbo);
+ if (IS_ERR(dst))
+ return PTR_ERR(dst);
+
+ sig = dst + AST_HWC_SIZE;
writel(x, sig + AST_HWC_SIGNATURE_X);
writel(y, sig + AST_HWC_SIGNATURE_Y);
@@ -1301,7 +1473,11 @@ static int ast_cursor_move(struct drm_crtc *crtc,
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc7, ((y >> 8) & 0x07));
/* dummy write to fire HWC */
- ast_show_cursor(crtc);
+ jreg = 0x02 |
+ 0x01; /* enable ARGB4444 cursor */
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, jreg);
+
+ drm_gem_vram_vunmap(gbo, dst);
return 0;
}
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
index c1d1ac51d1c2..2d1b18619743 100644
--- a/drivers/gpu/drm/ast/ast_post.c
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -26,10 +26,13 @@
* Authors: Dave Airlie <airlied@redhat.com>
*/
-#include <drm/drmP.h>
-#include "ast_drv.h"
+#include <linux/delay.h>
+#include <linux/pci.h>
+
+#include <drm/drm_print.h>
#include "ast_dram_tables.h"
+#include "ast_drv.h"
static void ast_post_chip_2300(struct drm_device *dev);
static void ast_post_chip_2500(struct drm_device *dev);
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 779c53efee8e..fad34106083a 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -25,7 +25,11 @@
/*
* Authors: Dave Airlie <airlied@redhat.com>
*/
-#include <drm/drmP.h>
+
+#include <linux/pci.h>
+
+#include <drm/drm_print.h>
+#include <drm/drm_gem_vram_helper.h>
#include "ast_drv.h"
@@ -37,7 +41,7 @@ int ast_mm_init(struct ast_private *ast)
vmm = drm_vram_helper_alloc_mm(
dev, pci_resource_start(dev->pdev, 0),
- ast->vram_size, &drm_gem_vram_mm_funcs);
+ ast->vram_size);
if (IS_ERR(vmm)) {
ret = PTR_ERR(vmm);
DRM_ERROR("Error initializing VRAM MM; %d\n", ret);
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index 6c6c7cf3c3e8..10985134ce0b 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -8,15 +8,19 @@
*/
#include <linux/clk.h>
+#include <linux/mfd/atmel-hlcdc.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
-#include <linux/pinctrl/consumer.h>
+#include <video/videomode.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drmP.h>
-
-#include <video/videomode.h>
+#include <drm/drm_vblank.h>
#include "atmel_hlcdc_dc.h"
@@ -69,7 +73,11 @@ static void atmel_hlcdc_crtc_mode_set_nofb(struct drm_crtc *c)
unsigned long prate;
unsigned int mask = ATMEL_HLCDC_CLKDIV_MASK | ATMEL_HLCDC_CLKPOL;
unsigned int cfg = 0;
- int div;
+ int div, ret;
+
+ ret = clk_prepare_enable(crtc->dc->hlcdc->sys_clk);
+ if (ret)
+ return;
vm.vfront_porch = adj->crtc_vsync_start - adj->crtc_vdisplay;
vm.vback_porch = adj->crtc_vtotal - adj->crtc_vsync_end;
@@ -91,14 +99,14 @@ static void atmel_hlcdc_crtc_mode_set_nofb(struct drm_crtc *c)
(adj->crtc_hdisplay - 1) |
((adj->crtc_vdisplay - 1) << 16));
+ prate = clk_get_rate(crtc->dc->hlcdc->sys_clk);
+ mode_rate = adj->crtc_clock * 1000;
if (!crtc->dc->desc->fixed_clksrc) {
+ prate *= 2;
cfg |= ATMEL_HLCDC_CLKSEL;
mask |= ATMEL_HLCDC_CLKSEL;
}
- prate = 2 * clk_get_rate(crtc->dc->hlcdc->sys_clk);
- mode_rate = adj->crtc_clock * 1000;
-
div = DIV_ROUND_UP(prate, mode_rate);
if (div < 2) {
div = 2;
@@ -113,8 +121,8 @@ static void atmel_hlcdc_crtc_mode_set_nofb(struct drm_crtc *c)
int div_low = prate / mode_rate;
if (div_low >= 2 &&
- ((prate / div_low - mode_rate) <
- 10 * (mode_rate - prate / div)))
+ (10 * (prate / div_low - mode_rate) <
+ (mode_rate - prate / div)))
/*
* At least 10 times better when using a higher
* frequency than requested, instead of a lower.
@@ -143,6 +151,8 @@ static void atmel_hlcdc_crtc_mode_set_nofb(struct drm_crtc *c)
ATMEL_HLCDC_VSPSU | ATMEL_HLCDC_VSPHO |
ATMEL_HLCDC_GUARDTIME_MASK | ATMEL_HLCDC_MODE_MASK,
cfg);
+
+ clk_disable_unprepare(crtc->dc->hlcdc->sys_clk);
}
static enum drm_mode_status
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index 9bab6e5ba76b..112aa5066cee 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -11,8 +11,20 @@
#include <linux/clk.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
+#include <linux/mfd/atmel-hlcdc.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_irq.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "atmel_hlcdc_dc.h"
@@ -545,12 +557,6 @@ static irqreturn_t atmel_hlcdc_dc_irq_handler(int irq, void *data)
return IRQ_HANDLED;
}
-static struct drm_framebuffer *atmel_hlcdc_fb_create(struct drm_device *dev,
- struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd)
-{
- return drm_gem_fb_create(dev, file_priv, mode_cmd);
-}
-
struct atmel_hlcdc_dc_commit {
struct work_struct work;
struct drm_device *dev;
@@ -645,7 +651,7 @@ error:
}
static const struct drm_mode_config_funcs mode_config_funcs = {
- .fb_create = atmel_hlcdc_fb_create,
+ .fb_create = drm_gem_fb_create,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = atmel_hlcdc_dc_atomic_commit,
};
@@ -715,18 +721,10 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev)
dc->hlcdc = dev_get_drvdata(dev->dev->parent);
dev->dev_private = dc;
- if (dc->desc->fixed_clksrc) {
- ret = clk_prepare_enable(dc->hlcdc->sys_clk);
- if (ret) {
- dev_err(dev->dev, "failed to enable sys_clk\n");
- goto err_destroy_wq;
- }
- }
-
ret = clk_prepare_enable(dc->hlcdc->periph_clk);
if (ret) {
dev_err(dev->dev, "failed to enable periph_clk\n");
- goto err_sys_clk_disable;
+ goto err_destroy_wq;
}
pm_runtime_enable(dev->dev);
@@ -762,9 +760,6 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev)
err_periph_clk_disable:
pm_runtime_disable(dev->dev);
clk_disable_unprepare(dc->hlcdc->periph_clk);
-err_sys_clk_disable:
- if (dc->desc->fixed_clksrc)
- clk_disable_unprepare(dc->hlcdc->sys_clk);
err_destroy_wq:
destroy_workqueue(dc->wq);
@@ -789,8 +784,6 @@ static void atmel_hlcdc_dc_unload(struct drm_device *dev)
pm_runtime_disable(dev->dev);
clk_disable_unprepare(dc->hlcdc->periph_clk);
- if (dc->desc->fixed_clksrc)
- clk_disable_unprepare(dc->hlcdc->sys_clk);
destroy_workqueue(dc->wq);
}
@@ -823,9 +816,7 @@ static void atmel_hlcdc_dc_irq_uninstall(struct drm_device *dev)
DEFINE_DRM_GEM_CMA_FOPS(fops);
static struct drm_driver atmel_hlcdc_dc_driver = {
- .driver_features = DRIVER_GEM |
- DRIVER_MODESET | DRIVER_PRIME |
- DRIVER_ATOMIC,
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.irq_handler = atmel_hlcdc_dc_irq_handler,
.irq_preinstall = atmel_hlcdc_dc_irq_uninstall,
.irq_postinstall = atmel_hlcdc_dc_irq_postinstall,
@@ -834,8 +825,6 @@ static struct drm_driver atmel_hlcdc_dc_driver = {
.gem_vm_ops = &drm_gem_cma_vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_import = drm_gem_prime_import,
- .gem_prime_export = drm_gem_prime_export,
.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
.gem_prime_vmap = drm_gem_cma_prime_vmap,
@@ -908,8 +897,6 @@ static int atmel_hlcdc_dc_drm_suspend(struct device *dev)
regmap_read(regmap, ATMEL_HLCDC_IMR, &dc->suspend.imr);
regmap_write(regmap, ATMEL_HLCDC_IDR, dc->suspend.imr);
clk_disable_unprepare(dc->hlcdc->periph_clk);
- if (dc->desc->fixed_clksrc)
- clk_disable_unprepare(dc->hlcdc->sys_clk);
return 0;
}
@@ -919,8 +906,6 @@ static int atmel_hlcdc_dc_drm_resume(struct device *dev)
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct atmel_hlcdc_dc *dc = drm_dev->dev_private;
- if (dc->desc->fixed_clksrc)
- clk_prepare_enable(dc->hlcdc->sys_clk);
clk_prepare_enable(dc->hlcdc->periph_clk);
regmap_write(dc->hlcdc->regmap, ATMEL_HLCDC_IER, dc->suspend.imr);
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
index 7300e3fd273e..469d4507e576 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
@@ -11,23 +11,9 @@
#ifndef DRM_ATMEL_HLCDC_H
#define DRM_ATMEL_HLCDC_H
-#include <linux/clk.h>
-#include <linux/dmapool.h>
-#include <linux/irqdomain.h>
-#include <linux/mfd/atmel-hlcdc.h>
-#include <linux/pwm.h>
-
-#include <drm/drm_atomic.h>
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_probe_helper.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_fb_cma_helper.h>
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drm_panel.h>
-#include <drm/drm_plane_helper.h>
-#include <drm/drmP.h>
+#include <linux/regmap.h>
+
+#include <drm/drm_plane.h>
#define ATMEL_HLCDC_LAYER_CHER 0x0
#define ATMEL_HLCDC_LAYER_CHDR 0x4
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
index 7e08318b262e..121b62682d80 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
@@ -8,9 +8,10 @@
* Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
*/
+#include <linux/media-bus-format.h>
#include <linux/of_graph.h>
-#include <drm/drmP.h>
+#include <drm/drm_encoder.h>
#include <drm/drm_of.h>
#include <drm/drm_bridge.h>
@@ -106,7 +107,8 @@ static int atmel_hlcdc_attach_endpoint(struct drm_device *dev, int endpoint)
output->encoder.possible_crtcs = 0x1;
if (panel) {
- bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_Unknown);
+ bridge = drm_panel_bridge_add_typed(panel,
+ DRM_MODE_CONNECTOR_Unknown);
if (IS_ERR(bridge))
return PTR_ERR(bridge);
}
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
index 4127aca212bb..40800ec5700a 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
@@ -6,6 +6,16 @@
* Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
*/
+#include <linux/dmapool.h>
+#include <linux/mfd/atmel-hlcdc.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_plane_helper.h>
+
#include "atmel_hlcdc_dc.h"
/**
@@ -361,7 +371,7 @@ atmel_hlcdc_plane_update_general_settings(struct atmel_hlcdc_plane *plane,
atmel_hlcdc_layer_write_cfg(&plane->layer, ATMEL_HLCDC_LAYER_DMA_CFG,
cfg);
- cfg = ATMEL_HLCDC_LAYER_DMA;
+ cfg = ATMEL_HLCDC_LAYER_DMA | ATMEL_HLCDC_LAYER_REP;
if (plane->base.type != DRM_PLANE_TYPE_PRIMARY) {
cfg |= ATMEL_HLCDC_LAYER_OVR | ATMEL_HLCDC_LAYER_ITER2BL |
@@ -591,11 +601,10 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
struct drm_framebuffer *fb = state->base.fb;
const struct drm_display_mode *mode;
struct drm_crtc_state *crtc_state;
- unsigned int tmp;
int ret;
int i;
- if (!state->base.crtc || !fb)
+ if (!state->base.crtc || WARN_ON(!fb))
return 0;
crtc_state = drm_atomic_get_existing_crtc_state(s->state, s->crtc);
@@ -684,9 +693,7 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
* Swap width and size in case of 90 or 270 degrees rotation
*/
if (drm_rotation_90_or_270(state->base.rotation)) {
- tmp = state->src_w;
- state->src_w = state->src_h;
- state->src_h = tmp;
+ swap(state->src_w, state->src_h);
}
if (!desc->layout.size &&
diff --git a/drivers/gpu/drm/bochs/Kconfig b/drivers/gpu/drm/bochs/Kconfig
index 32b043abb668..7bcdf294fed8 100644
--- a/drivers/gpu/drm/bochs/Kconfig
+++ b/drivers/gpu/drm/bochs/Kconfig
@@ -4,6 +4,8 @@ config DRM_BOCHS
depends on DRM && PCI && MMU
select DRM_KMS_HELPER
select DRM_VRAM_HELPER
+ select DRM_TTM
+ select DRM_TTM_HELPER
help
Choose this option for qemu.
If M is selected the module will be called bochs-drm.
diff --git a/drivers/gpu/drm/bochs/bochs.h b/drivers/gpu/drm/bochs/bochs.h
index 2a65434500ee..917767173ee6 100644
--- a/drivers/gpu/drm/bochs/bochs.h
+++ b/drivers/gpu/drm/bochs/bochs.h
@@ -1,18 +1,15 @@
/* SPDX-License-Identifier: GPL-2.0 */
+
#include <linux/io.h>
#include <linux/console.h>
-#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_encoder.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_simple_kms_helper.h>
-
#include <drm/drm_gem.h>
#include <drm/drm_gem_vram_helper.h>
-
-#include <drm/drm_vram_mm_helper.h>
+#include <drm/drm_simple_kms_helper.h>
/* ---------------------------------------------------------------------- */
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
index 8f3a5bda9d03..10460878414e 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -2,11 +2,10 @@
/*
*/
-#include <linux/mm.h>
#include <linux/module.h>
-#include <linux/slab.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_probe_helper.h>
+#include <linux/pci.h>
+
+#include <drm/drm_drv.h>
#include <drm/drm_atomic_helper.h>
#include "bochs.h"
@@ -59,14 +58,10 @@ err:
return ret;
}
-static const struct file_operations bochs_fops = {
- .owner = THIS_MODULE,
- DRM_VRAM_MM_FILE_OPERATIONS
-};
+DEFINE_DRM_GEM_FOPS(bochs_fops);
static struct drm_driver bochs_driver = {
- .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC |
- DRIVER_PRIME,
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &bochs_fops,
.name = "bochs-drm",
.desc = "bochs dispi vga interface (qemu stdvga)",
@@ -74,7 +69,6 @@ static struct drm_driver bochs_driver = {
.major = 1,
.minor = 0,
DRM_GEM_VRAM_DRIVER,
- DRM_GEM_VRAM_DRIVER_PRIME,
};
/* ---------------------------------------------------------------------- */
@@ -83,16 +77,14 @@ static struct drm_driver bochs_driver = {
#ifdef CONFIG_PM_SLEEP
static int bochs_pm_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
return drm_mode_config_helper_suspend(drm_dev);
}
static int bochs_pm_resume(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
return drm_mode_config_helper_resume(drm_dev);
}
@@ -119,7 +111,7 @@ static int bochs_pci_probe(struct pci_dev *pdev,
return -ENOMEM;
}
- ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "bochsdrmfb");
+ ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "bochsdrmfb");
if (ret)
return ret;
diff --git a/drivers/gpu/drm/bochs/bochs_hw.c b/drivers/gpu/drm/bochs/bochs_hw.c
index ebfea8744fe6..b615b7dfdd9d 100644
--- a/drivers/gpu/drm/bochs/bochs_hw.c
+++ b/drivers/gpu/drm/bochs/bochs_hw.c
@@ -2,6 +2,10 @@
/*
*/
+#include <linux/pci.h>
+
+#include <drm/drm_fourcc.h>
+
#include "bochs.h"
/* ---------------------------------------------------------------------- */
@@ -251,7 +255,7 @@ void bochs_hw_setformat(struct bochs_device *bochs,
DRM_ERROR("%s: Huh? Got framebuffer format 0x%x",
__func__, format->format);
break;
- };
+ }
}
void bochs_hw_setbase(struct bochs_device *bochs,
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index 359030d5d818..3f0006c2470d 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -2,12 +2,14 @@
/*
*/
-#include "bochs.h"
+#include <linux/moduleparam.h>
+
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_plane_helper.h>
-#include <drm/drm_atomic_uapi.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "bochs.h"
static int defx = 1024;
static int defy = 768;
@@ -67,33 +69,11 @@ static void bochs_pipe_update(struct drm_simple_display_pipe *pipe,
}
}
-static int bochs_pipe_prepare_fb(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *new_state)
-{
- struct drm_gem_vram_object *gbo;
-
- if (!new_state->fb)
- return 0;
- gbo = drm_gem_vram_of_gem(new_state->fb->obj[0]);
- return drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
-}
-
-static void bochs_pipe_cleanup_fb(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *old_state)
-{
- struct drm_gem_vram_object *gbo;
-
- if (!old_state->fb)
- return;
- gbo = drm_gem_vram_of_gem(old_state->fb->obj[0]);
- drm_gem_vram_unpin(gbo);
-}
-
static const struct drm_simple_display_pipe_funcs bochs_pipe_funcs = {
.enable = bochs_pipe_enable,
.update = bochs_pipe_update,
- .prepare_fb = bochs_pipe_prepare_fb,
- .cleanup_fb = bochs_pipe_cleanup_fb,
+ .prepare_fb = drm_gem_vram_simple_display_pipe_prepare_fb,
+ .cleanup_fb = drm_gem_vram_simple_display_pipe_cleanup_fb,
};
static int bochs_connector_get_modes(struct drm_connector *connector)
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
index 8f9bb886f7ad..1b74f530b07c 100644
--- a/drivers/gpu/drm/bochs/bochs_mm.c
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -11,8 +11,7 @@ int bochs_mm_init(struct bochs_device *bochs)
struct drm_vram_mm *vmm;
vmm = drm_vram_helper_alloc_mm(bochs->dev, bochs->fb_base,
- bochs->fb_size,
- &drm_gem_vram_mm_funcs);
+ bochs->fb_size);
return PTR_ERR_OR_ZERO(vmm);
}
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index e4e22bbae2a7..0b9ca5862455 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -16,16 +16,6 @@ config DRM_PANEL_BRIDGE
menu "Display Interface Bridges"
depends on DRM && DRM_BRIDGE
-config DRM_ANALOGIX_ANX78XX
- tristate "Analogix ANX78XX bridge"
- select DRM_KMS_HELPER
- select REGMAP_I2C
- ---help---
- ANX78XX is an ultra-low Full-HD SlimPort transmitter
- designed for portable devices. The ANX78XX transforms
- the HDMI output of an application processor to MyDP
- or DisplayPort.
-
config DRM_CDNS_DSI
tristate "Cadence DPI/DSI bridge"
select DRM_KMS_HELPER
@@ -45,14 +35,14 @@ config DRM_DUMB_VGA_DAC
Support for non-programmable RGB to VGA DAC bridges, such as ADI
ADV7123, TI THS8134 and THS8135 or passive resistor ladder DACs.
-config DRM_LVDS_ENCODER
- tristate "Transparent parallel to LVDS encoder support"
+config DRM_LVDS_CODEC
+ tristate "Transparent LVDS encoders and decoders support"
depends on OF
select DRM_KMS_HELPER
select DRM_PANEL_BRIDGE
help
- Support for transparent parallel to LVDS encoders that don't require
- any configuration.
+ Support for transparent LVDS encoders and decoders that don't
+ require any configuration.
config DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW
tristate "MegaChips stdp4028-ge-b850v3-fw and stdp2690-ge-b850v3-fw"
@@ -60,10 +50,10 @@ config DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW
select DRM_KMS_HELPER
select DRM_PANEL
---help---
- This is a driver for the display bridges of
- GE B850v3 that convert dual channel LVDS
- to DP++. This is used with the i.MX6 imx-ldb
- driver. You are likely to say N here.
+ This is a driver for the display bridges of
+ GE B850v3 that convert dual channel LVDS
+ to DP++. This is used with the i.MX6 imx-ldb
+ driver. You are likely to say N here.
config DRM_NXP_PTN3460
tristate "NXP PTN3460 DP/LVDS bridge"
@@ -87,8 +77,7 @@ config DRM_SIL_SII8620
depends on OF
select DRM_KMS_HELPER
imply EXTCON
- select INPUT
- select RC_CORE
+ depends on RC_CORE || !RC_CORE
help
Silicon Image SII8620 HDMI/MHL bridge chip driver.
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index 4934fcf5a6f8..cd16ce830270 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -1,8 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_DRM_ANALOGIX_ANX78XX) += analogix-anx78xx.o
obj-$(CONFIG_DRM_CDNS_DSI) += cdns-dsi.o
obj-$(CONFIG_DRM_DUMB_VGA_DAC) += dumb-vga-dac.o
-obj-$(CONFIG_DRM_LVDS_ENCODER) += lvds-encoder.o
+obj-$(CONFIG_DRM_LVDS_CODEC) += lvds-codec.o
obj-$(CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW) += megachips-stdpxxxx-ge-b850v3-fw.o
obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o
obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o
@@ -12,8 +11,9 @@ obj-$(CONFIG_DRM_SII9234) += sii9234.o
obj-$(CONFIG_DRM_THINE_THC63LVD1024) += thc63lvd1024.o
obj-$(CONFIG_DRM_TOSHIBA_TC358764) += tc358764.o
obj-$(CONFIG_DRM_TOSHIBA_TC358767) += tc358767.o
-obj-$(CONFIG_DRM_ANALOGIX_DP) += analogix/
obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511/
obj-$(CONFIG_DRM_TI_SN65DSI86) += ti-sn65dsi86.o
obj-$(CONFIG_DRM_TI_TFP410) += ti-tfp410.o
+
+obj-y += analogix/
obj-y += synopsys/
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index f6d2681f6927..9e13e466e72c 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -981,10 +981,10 @@ static int adv7511_init_cec_regmap(struct adv7511 *adv)
{
int ret;
- adv->i2c_cec = i2c_new_secondary_device(adv->i2c_main, "cec",
+ adv->i2c_cec = i2c_new_ancillary_device(adv->i2c_main, "cec",
ADV7511_CEC_I2C_ADDR_DEFAULT);
- if (!adv->i2c_cec)
- return -EINVAL;
+ if (IS_ERR(adv->i2c_cec))
+ return PTR_ERR(adv->i2c_cec);
i2c_set_clientdata(adv->i2c_cec, adv);
adv->regmap_cec = devm_regmap_init_i2c(adv->i2c_cec,
@@ -1165,20 +1165,20 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
adv7511_packet_disable(adv7511, 0xffff);
- adv7511->i2c_edid = i2c_new_secondary_device(i2c, "edid",
+ adv7511->i2c_edid = i2c_new_ancillary_device(i2c, "edid",
ADV7511_EDID_I2C_ADDR_DEFAULT);
- if (!adv7511->i2c_edid) {
- ret = -EINVAL;
+ if (IS_ERR(adv7511->i2c_edid)) {
+ ret = PTR_ERR(adv7511->i2c_edid);
goto uninit_regulators;
}
regmap_write(adv7511->regmap, ADV7511_REG_EDID_I2C_ADDR,
adv7511->i2c_edid->addr << 1);
- adv7511->i2c_packet = i2c_new_secondary_device(i2c, "packet",
+ adv7511->i2c_packet = i2c_new_ancillary_device(i2c, "packet",
ADV7511_PACKET_I2C_ADDR_DEFAULT);
- if (!adv7511->i2c_packet) {
- ret = -EINVAL;
+ if (IS_ERR(adv7511->i2c_packet)) {
+ ret = PTR_ERR(adv7511->i2c_packet);
goto err_i2c_unregister_edid;
}
diff --git a/drivers/gpu/drm/bridge/analogix-anx78xx.h b/drivers/gpu/drm/bridge/analogix-anx78xx.h
deleted file mode 100644
index 25e063bcecbc..000000000000
--- a/drivers/gpu/drm/bridge/analogix-anx78xx.h
+++ /dev/null
@@ -1,710 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright(c) 2016, Analogix Semiconductor. All rights reserved.
- */
-
-#ifndef __ANX78xx_H
-#define __ANX78xx_H
-
-#define TX_P0 0x70
-#define TX_P1 0x7a
-#define TX_P2 0x72
-
-#define RX_P0 0x7e
-#define RX_P1 0x80
-
-/***************************************************************/
-/* Register definition of device address 0x7e */
-/***************************************************************/
-
-/*
- * System Control and Status
- */
-
-/* Software Reset Register 1 */
-#define SP_SOFTWARE_RESET1_REG 0x11
-#define SP_VIDEO_RST BIT(4)
-#define SP_HDCP_MAN_RST BIT(2)
-#define SP_TMDS_RST BIT(1)
-#define SP_SW_MAN_RST BIT(0)
-
-/* System Status Register */
-#define SP_SYSTEM_STATUS_REG 0x14
-#define SP_TMDS_CLOCK_DET BIT(1)
-#define SP_TMDS_DE_DET BIT(0)
-
-/* HDMI Status Register */
-#define SP_HDMI_STATUS_REG 0x15
-#define SP_HDMI_AUD_LAYOUT BIT(3)
-#define SP_HDMI_DET BIT(0)
-# define SP_DVI_MODE 0
-# define SP_HDMI_MODE 1
-
-/* HDMI Mute Control Register */
-#define SP_HDMI_MUTE_CTRL_REG 0x16
-#define SP_AUD_MUTE BIT(1)
-#define SP_VID_MUTE BIT(0)
-
-/* System Power Down Register 1 */
-#define SP_SYSTEM_POWER_DOWN1_REG 0x18
-#define SP_PWDN_CTRL BIT(0)
-
-/*
- * Audio and Video Auto Control
- */
-
-/* Auto Audio and Video Control register */
-#define SP_AUDVID_CTRL_REG 0x20
-#define SP_AVC_OE BIT(7)
-#define SP_AAC_OE BIT(6)
-#define SP_AVC_EN BIT(1)
-#define SP_AAC_EN BIT(0)
-
-/* Audio Exception Enable Registers */
-#define SP_AUD_EXCEPTION_ENABLE_BASE (0x24 - 1)
-/* Bits for Audio Exception Enable Register 3 */
-#define SP_AEC_EN21 BIT(5)
-
-/*
- * Interrupt
- */
-
-/* Interrupt Status Register 1 */
-#define SP_INT_STATUS1_REG 0x31
-/* Bits for Interrupt Status Register 1 */
-#define SP_HDMI_DVI BIT(7)
-#define SP_CKDT_CHG BIT(6)
-#define SP_SCDT_CHG BIT(5)
-#define SP_PCLK_CHG BIT(4)
-#define SP_PLL_UNLOCK BIT(3)
-#define SP_CABLE_PLUG_CHG BIT(2)
-#define SP_SET_MUTE BIT(1)
-#define SP_SW_INTR BIT(0)
-/* Bits for Interrupt Status Register 2 */
-#define SP_HDCP_ERR BIT(5)
-#define SP_AUDIO_SAMPLE_CHG BIT(0) /* undocumented */
-/* Bits for Interrupt Status Register 3 */
-#define SP_AUD_MODE_CHG BIT(0)
-/* Bits for Interrupt Status Register 5 */
-#define SP_AUDIO_RCV BIT(0)
-/* Bits for Interrupt Status Register 6 */
-#define SP_INT_STATUS6_REG 0x36
-#define SP_CTS_RCV BIT(7)
-#define SP_NEW_AUD_PKT BIT(4)
-#define SP_NEW_AVI_PKT BIT(1)
-#define SP_NEW_CP_PKT BIT(0)
-/* Bits for Interrupt Status Register 7 */
-#define SP_NO_VSI BIT(7)
-#define SP_NEW_VS BIT(4)
-
-/* Interrupt Mask 1 Status Registers */
-#define SP_INT_MASK1_REG 0x41
-
-/* HDMI US TIMER Control Register */
-#define SP_HDMI_US_TIMER_CTRL_REG 0x49
-#define SP_MS_TIMER_MARGIN_10_8_MASK 0x07
-
-/*
- * TMDS Control
- */
-
-/* TMDS Control Registers */
-#define SP_TMDS_CTRL_BASE (0x50 - 1)
-/* Bits for TMDS Control Register 7 */
-#define SP_PD_RT BIT(0)
-
-/*
- * Video Control
- */
-
-/* Video Status Register */
-#define SP_VIDEO_STATUS_REG 0x70
-#define SP_COLOR_DEPTH_MASK 0xf0
-#define SP_COLOR_DEPTH_SHIFT 4
-# define SP_COLOR_DEPTH_MODE_LEGACY 0x00
-# define SP_COLOR_DEPTH_MODE_24BIT 0x04
-# define SP_COLOR_DEPTH_MODE_30BIT 0x05
-# define SP_COLOR_DEPTH_MODE_36BIT 0x06
-# define SP_COLOR_DEPTH_MODE_48BIT 0x07
-
-/* Video Data Range Control Register */
-#define SP_VID_DATA_RANGE_CTRL_REG 0x83
-#define SP_R2Y_INPUT_LIMIT BIT(1)
-
-/* Pixel Clock High Resolution Counter Registers */
-#define SP_PCLK_HIGHRES_CNT_BASE (0x8c - 1)
-
-/*
- * Audio Control
- */
-
-/* Number of Audio Channels Status Registers */
-#define SP_AUD_CH_STATUS_REG_NUM 6
-
-/* Audio IN S/PDIF Channel Status Registers */
-#define SP_AUD_SPDIF_CH_STATUS_BASE 0xc7
-
-/* Audio IN S/PDIF Channel Status Register 4 */
-#define SP_FS_FREQ_MASK 0x0f
-# define SP_FS_FREQ_44100HZ 0x00
-# define SP_FS_FREQ_48000HZ 0x02
-# define SP_FS_FREQ_32000HZ 0x03
-# define SP_FS_FREQ_88200HZ 0x08
-# define SP_FS_FREQ_96000HZ 0x0a
-# define SP_FS_FREQ_176400HZ 0x0c
-# define SP_FS_FREQ_192000HZ 0x0e
-
-/*
- * Micellaneous Control Block
- */
-
-/* CHIP Control Register */
-#define SP_CHIP_CTRL_REG 0xe3
-#define SP_MAN_HDMI5V_DET BIT(3)
-#define SP_PLLLOCK_CKDT_EN BIT(2)
-#define SP_ANALOG_CKDT_EN BIT(1)
-#define SP_DIGITAL_CKDT_EN BIT(0)
-
-/* Packet Receiving Status Register */
-#define SP_PACKET_RECEIVING_STATUS_REG 0xf3
-#define SP_AVI_RCVD BIT(5)
-#define SP_VSI_RCVD BIT(1)
-
-/***************************************************************/
-/* Register definition of device address 0x80 */
-/***************************************************************/
-
-/* HDCP BCAPS Shadow Register */
-#define SP_HDCP_BCAPS_SHADOW_REG 0x2a
-#define SP_BCAPS_REPEATER BIT(5)
-
-/* HDCP Status Register */
-#define SP_RX_HDCP_STATUS_REG 0x3f
-#define SP_AUTH_EN BIT(4)
-
-/*
- * InfoFrame and Control Packet Registers
- */
-
-/* AVI InfoFrame packet checksum */
-#define SP_AVI_INFOFRAME_CHECKSUM 0xa3
-
-/* AVI InfoFrame Registers */
-#define SP_AVI_INFOFRAME_DATA_BASE 0xa4
-
-#define SP_AVI_COLOR_F_MASK 0x60
-#define SP_AVI_COLOR_F_SHIFT 5
-
-/* Audio InfoFrame Registers */
-#define SP_AUD_INFOFRAME_DATA_BASE 0xc4
-#define SP_AUD_INFOFRAME_LAYOUT_MASK 0x0f
-
-/* MPEG/HDMI Vendor Specific InfoFrame Packet type code */
-#define SP_MPEG_VS_INFOFRAME_TYPE_REG 0xe0
-
-/* MPEG/HDMI Vendor Specific InfoFrame Packet length */
-#define SP_MPEG_VS_INFOFRAME_LEN_REG 0xe2
-
-/* MPEG/HDMI Vendor Specific InfoFrame Packet version number */
-#define SP_MPEG_VS_INFOFRAME_VER_REG 0xe1
-
-/* MPEG/HDMI Vendor Specific InfoFrame Packet content */
-#define SP_MPEG_VS_INFOFRAME_DATA_BASE 0xe4
-
-/* General Control Packet Register */
-#define SP_GENERAL_CTRL_PACKET_REG 0x9f
-#define SP_CLEAR_AVMUTE BIT(4)
-#define SP_SET_AVMUTE BIT(0)
-
-/***************************************************************/
-/* Register definition of device address 0x70 */
-/***************************************************************/
-
-/* HDCP Status Register */
-#define SP_TX_HDCP_STATUS_REG 0x00
-#define SP_AUTH_FAIL BIT(5)
-#define SP_AUTHEN_PASS BIT(1)
-
-/* HDCP Control Register 0 */
-#define SP_HDCP_CTRL0_REG 0x01
-#define SP_RX_REPEATER BIT(6)
-#define SP_RE_AUTH BIT(5)
-#define SP_SW_AUTH_OK BIT(4)
-#define SP_HARD_AUTH_EN BIT(3)
-#define SP_HDCP_ENC_EN BIT(2)
-#define SP_BKSV_SRM_PASS BIT(1)
-#define SP_KSVLIST_VLD BIT(0)
-/* HDCP Function Enabled */
-#define SP_HDCP_FUNCTION_ENABLED (BIT(0) | BIT(1) | BIT(2) | BIT(3))
-
-/* HDCP Receiver BSTATUS Register 0 */
-#define SP_HDCP_RX_BSTATUS0_REG 0x1b
-/* HDCP Receiver BSTATUS Register 1 */
-#define SP_HDCP_RX_BSTATUS1_REG 0x1c
-
-/* HDCP Embedded "Blue Screen" Content Registers */
-#define SP_HDCP_VID0_BLUE_SCREEN_REG 0x2c
-#define SP_HDCP_VID1_BLUE_SCREEN_REG 0x2d
-#define SP_HDCP_VID2_BLUE_SCREEN_REG 0x2e
-
-/* HDCP Wait R0 Timing Register */
-#define SP_HDCP_WAIT_R0_TIME_REG 0x40
-
-/* HDCP Link Integrity Check Timer Register */
-#define SP_HDCP_LINK_CHECK_TIMER_REG 0x41
-
-/* HDCP Repeater Ready Wait Timer Register */
-#define SP_HDCP_RPTR_RDY_WAIT_TIME_REG 0x42
-
-/* HDCP Auto Timer Register */
-#define SP_HDCP_AUTO_TIMER_REG 0x51
-
-/* HDCP Key Status Register */
-#define SP_HDCP_KEY_STATUS_REG 0x5e
-
-/* HDCP Key Command Register */
-#define SP_HDCP_KEY_COMMAND_REG 0x5f
-#define SP_DISABLE_SYNC_HDCP BIT(2)
-
-/* OTP Memory Key Protection Registers */
-#define SP_OTP_KEY_PROTECT1_REG 0x60
-#define SP_OTP_KEY_PROTECT2_REG 0x61
-#define SP_OTP_KEY_PROTECT3_REG 0x62
-#define SP_OTP_PSW1 0xa2
-#define SP_OTP_PSW2 0x7e
-#define SP_OTP_PSW3 0xc6
-
-/* DP System Control Registers */
-#define SP_DP_SYSTEM_CTRL_BASE (0x80 - 1)
-/* Bits for DP System Control Register 2 */
-#define SP_CHA_STA BIT(2)
-/* Bits for DP System Control Register 3 */
-#define SP_HPD_STATUS BIT(6)
-#define SP_STRM_VALID BIT(2)
-/* Bits for DP System Control Register 4 */
-#define SP_ENHANCED_MODE BIT(3)
-
-/* DP Video Control Register */
-#define SP_DP_VIDEO_CTRL_REG 0x84
-#define SP_COLOR_F_MASK 0x06
-#define SP_COLOR_F_SHIFT 1
-#define SP_BPC_MASK 0xe0
-#define SP_BPC_SHIFT 5
-# define SP_BPC_6BITS 0x00
-# define SP_BPC_8BITS 0x01
-# define SP_BPC_10BITS 0x02
-# define SP_BPC_12BITS 0x03
-
-/* DP Audio Control Register */
-#define SP_DP_AUDIO_CTRL_REG 0x87
-#define SP_AUD_EN BIT(0)
-
-/* 10us Pulse Generate Timer Registers */
-#define SP_I2C_GEN_10US_TIMER0_REG 0x88
-#define SP_I2C_GEN_10US_TIMER1_REG 0x89
-
-/* Packet Send Control Register */
-#define SP_PACKET_SEND_CTRL_REG 0x90
-#define SP_AUD_IF_UP BIT(7)
-#define SP_AVI_IF_UD BIT(6)
-#define SP_MPEG_IF_UD BIT(5)
-#define SP_SPD_IF_UD BIT(4)
-#define SP_AUD_IF_EN BIT(3)
-#define SP_AVI_IF_EN BIT(2)
-#define SP_MPEG_IF_EN BIT(1)
-#define SP_SPD_IF_EN BIT(0)
-
-/* DP HDCP Control Register */
-#define SP_DP_HDCP_CTRL_REG 0x92
-#define SP_AUTO_EN BIT(7)
-#define SP_AUTO_START BIT(5)
-#define SP_LINK_POLLING BIT(1)
-
-/* DP Main Link Bandwidth Setting Register */
-#define SP_DP_MAIN_LINK_BW_SET_REG 0xa0
-#define SP_LINK_BW_SET_MASK 0x1f
-#define SP_INITIAL_SLIM_M_AUD_SEL BIT(5)
-
-/* DP Training Pattern Set Register */
-#define SP_DP_TRAINING_PATTERN_SET_REG 0xa2
-
-/* DP Lane 0 Link Training Control Register */
-#define SP_DP_LANE0_LT_CTRL_REG 0xa3
-#define SP_TX_SW_SET_MASK 0x1b
-#define SP_MAX_PRE_REACH BIT(5)
-#define SP_MAX_DRIVE_REACH BIT(4)
-#define SP_PRE_EMP_LEVEL1 BIT(3)
-#define SP_DRVIE_CURRENT_LEVEL1 BIT(0)
-
-/* DP Link Training Control Register */
-#define SP_DP_LT_CTRL_REG 0xa8
-#define SP_LT_ERROR_TYPE_MASK 0x70
-# define SP_LT_NO_ERROR 0x00
-# define SP_LT_AUX_WRITE_ERROR 0x01
-# define SP_LT_MAX_DRIVE_REACHED 0x02
-# define SP_LT_WRONG_LANE_COUNT_SET 0x03
-# define SP_LT_LOOP_SAME_5_TIME 0x04
-# define SP_LT_CR_FAIL_IN_EQ 0x05
-# define SP_LT_EQ_LOOP_5_TIME 0x06
-#define SP_LT_EN BIT(0)
-
-/* DP CEP Training Control Registers */
-#define SP_DP_CEP_TRAINING_CTRL0_REG 0xa9
-#define SP_DP_CEP_TRAINING_CTRL1_REG 0xaa
-
-/* DP Debug Register 1 */
-#define SP_DP_DEBUG1_REG 0xb0
-#define SP_DEBUG_PLL_LOCK BIT(4)
-#define SP_POLLING_EN BIT(1)
-
-/* DP Polling Control Register */
-#define SP_DP_POLLING_CTRL_REG 0xb4
-#define SP_AUTO_POLLING_DISABLE BIT(0)
-
-/* DP Link Debug Control Register */
-#define SP_DP_LINK_DEBUG_CTRL_REG 0xb8
-#define SP_M_VID_DEBUG BIT(5)
-#define SP_NEW_PRBS7 BIT(4)
-#define SP_INSERT_ER BIT(1)
-#define SP_PRBS31_EN BIT(0)
-
-/* AUX Misc control Register */
-#define SP_AUX_MISC_CTRL_REG 0xbf
-
-/* DP PLL control Register */
-#define SP_DP_PLL_CTRL_REG 0xc7
-#define SP_PLL_RST BIT(6)
-
-/* DP Analog Power Down Register */
-#define SP_DP_ANALOG_POWER_DOWN_REG 0xc8
-#define SP_CH0_PD BIT(0)
-
-/* DP Misc Control Register */
-#define SP_DP_MISC_CTRL_REG 0xcd
-#define SP_EQ_TRAINING_LOOP BIT(6)
-
-/* DP Extra I2C Device Address Register */
-#define SP_DP_EXTRA_I2C_DEV_ADDR_REG 0xce
-#define SP_I2C_STRETCH_DISABLE BIT(7)
-
-#define SP_I2C_EXTRA_ADDR 0x50
-
-/* DP Downspread Control Register 1 */
-#define SP_DP_DOWNSPREAD_CTRL1_REG 0xd0
-
-/* DP M Value Calculation Control Register */
-#define SP_DP_M_CALCULATION_CTRL_REG 0xd9
-#define SP_M_GEN_CLK_SEL BIT(0)
-
-/* AUX Channel Access Status Register */
-#define SP_AUX_CH_STATUS_REG 0xe0
-#define SP_AUX_STATUS 0x0f
-
-/* AUX Channel DEFER Control Register */
-#define SP_AUX_DEFER_CTRL_REG 0xe2
-#define SP_DEFER_CTRL_EN BIT(7)
-
-/* DP Buffer Data Count Register */
-#define SP_BUF_DATA_COUNT_REG 0xe4
-#define SP_BUF_DATA_COUNT_MASK 0x1f
-#define SP_BUF_CLR BIT(7)
-
-/* DP AUX Channel Control Register 1 */
-#define SP_DP_AUX_CH_CTRL1_REG 0xe5
-#define SP_AUX_TX_COMM_MASK 0x0f
-#define SP_AUX_LENGTH_MASK 0xf0
-#define SP_AUX_LENGTH_SHIFT 4
-
-/* DP AUX CH Address Register 0 */
-#define SP_AUX_ADDR_7_0_REG 0xe6
-
-/* DP AUX CH Address Register 1 */
-#define SP_AUX_ADDR_15_8_REG 0xe7
-
-/* DP AUX CH Address Register 2 */
-#define SP_AUX_ADDR_19_16_REG 0xe8
-#define SP_AUX_ADDR_19_16_MASK 0x0f
-
-/* DP AUX Channel Control Register 2 */
-#define SP_DP_AUX_CH_CTRL2_REG 0xe9
-#define SP_AUX_SEL_RXCM BIT(6)
-#define SP_AUX_CHSEL BIT(3)
-#define SP_AUX_PN_INV BIT(2)
-#define SP_ADDR_ONLY BIT(1)
-#define SP_AUX_EN BIT(0)
-
-/* DP Video Stream Control InfoFrame Register */
-#define SP_DP_3D_VSC_CTRL_REG 0xea
-#define SP_INFO_FRAME_VSC_EN BIT(0)
-
-/* DP Video Stream Data Byte 1 Register */
-#define SP_DP_VSC_DB1_REG 0xeb
-
-/* DP AUX Channel Control Register 3 */
-#define SP_DP_AUX_CH_CTRL3_REG 0xec
-#define SP_WAIT_COUNTER_7_0_MASK 0xff
-
-/* DP AUX Channel Control Register 4 */
-#define SP_DP_AUX_CH_CTRL4_REG 0xed
-
-/* DP AUX Buffer Data Registers */
-#define SP_DP_BUF_DATA0_REG 0xf0
-
-/***************************************************************/
-/* Register definition of device address 0x72 */
-/***************************************************************/
-
-/*
- * Core Register Definitions
- */
-
-/* Device ID Low Byte Register */
-#define SP_DEVICE_IDL_REG 0x02
-
-/* Device ID High Byte Register */
-#define SP_DEVICE_IDH_REG 0x03
-
-/* Device version register */
-#define SP_DEVICE_VERSION_REG 0x04
-
-/* Power Down Control Register */
-#define SP_POWERDOWN_CTRL_REG 0x05
-#define SP_REGISTER_PD BIT(7)
-#define SP_HDCP_PD BIT(5)
-#define SP_AUDIO_PD BIT(4)
-#define SP_VIDEO_PD BIT(3)
-#define SP_LINK_PD BIT(2)
-#define SP_TOTAL_PD BIT(1)
-
-/* Reset Control Register 1 */
-#define SP_RESET_CTRL1_REG 0x06
-#define SP_MISC_RST BIT(7)
-#define SP_VIDCAP_RST BIT(6)
-#define SP_VIDFIF_RST BIT(5)
-#define SP_AUDFIF_RST BIT(4)
-#define SP_AUDCAP_RST BIT(3)
-#define SP_HDCP_RST BIT(2)
-#define SP_SW_RST BIT(1)
-#define SP_HW_RST BIT(0)
-
-/* Reset Control Register 2 */
-#define SP_RESET_CTRL2_REG 0x07
-#define SP_AUX_RST BIT(2)
-#define SP_SERDES_FIFO_RST BIT(1)
-#define SP_I2C_REG_RST BIT(0)
-
-/* Video Control Register 1 */
-#define SP_VID_CTRL1_REG 0x08
-#define SP_VIDEO_EN BIT(7)
-#define SP_VIDEO_MUTE BIT(2)
-#define SP_DE_GEN BIT(1)
-#define SP_DEMUX BIT(0)
-
-/* Video Control Register 2 */
-#define SP_VID_CTRL2_REG 0x09
-#define SP_IN_COLOR_F_MASK 0x03
-#define SP_IN_YC_BIT_SEL BIT(2)
-#define SP_IN_BPC_MASK 0x70
-#define SP_IN_BPC_SHIFT 4
-# define SP_IN_BPC_12BIT 0x03
-# define SP_IN_BPC_10BIT 0x02
-# define SP_IN_BPC_8BIT 0x01
-# define SP_IN_BPC_6BIT 0x00
-#define SP_IN_D_RANGE BIT(7)
-
-/* Video Control Register 3 */
-#define SP_VID_CTRL3_REG 0x0a
-#define SP_HPD_OUT BIT(6)
-
-/* Video Control Register 5 */
-#define SP_VID_CTRL5_REG 0x0c
-#define SP_CSC_STD_SEL BIT(7)
-#define SP_XVYCC_RNG_LMT BIT(6)
-#define SP_RANGE_Y2R BIT(5)
-#define SP_CSPACE_Y2R BIT(4)
-#define SP_RGB_RNG_LMT BIT(3)
-#define SP_Y_RNG_LMT BIT(2)
-#define SP_RANGE_R2Y BIT(1)
-#define SP_CSPACE_R2Y BIT(0)
-
-/* Video Control Register 6 */
-#define SP_VID_CTRL6_REG 0x0d
-#define SP_TEST_PATTERN_EN BIT(7)
-#define SP_VIDEO_PROCESS_EN BIT(6)
-#define SP_VID_US_MODE BIT(3)
-#define SP_VID_DS_MODE BIT(2)
-#define SP_UP_SAMPLE BIT(1)
-#define SP_DOWN_SAMPLE BIT(0)
-
-/* Video Control Register 8 */
-#define SP_VID_CTRL8_REG 0x0f
-#define SP_VID_VRES_TH BIT(0)
-
-/* Total Line Status Low Byte Register */
-#define SP_TOTAL_LINE_STAL_REG 0x24
-
-/* Total Line Status High Byte Register */
-#define SP_TOTAL_LINE_STAH_REG 0x25
-
-/* Active Line Status Low Byte Register */
-#define SP_ACT_LINE_STAL_REG 0x26
-
-/* Active Line Status High Byte Register */
-#define SP_ACT_LINE_STAH_REG 0x27
-
-/* Vertical Front Porch Status Register */
-#define SP_V_F_PORCH_STA_REG 0x28
-
-/* Vertical SYNC Width Status Register */
-#define SP_V_SYNC_STA_REG 0x29
-
-/* Vertical Back Porch Status Register */
-#define SP_V_B_PORCH_STA_REG 0x2a
-
-/* Total Pixel Status Low Byte Register */
-#define SP_TOTAL_PIXEL_STAL_REG 0x2b
-
-/* Total Pixel Status High Byte Register */
-#define SP_TOTAL_PIXEL_STAH_REG 0x2c
-
-/* Active Pixel Status Low Byte Register */
-#define SP_ACT_PIXEL_STAL_REG 0x2d
-
-/* Active Pixel Status High Byte Register */
-#define SP_ACT_PIXEL_STAH_REG 0x2e
-
-/* Horizontal Front Porch Status Low Byte Register */
-#define SP_H_F_PORCH_STAL_REG 0x2f
-
-/* Horizontal Front Porch Statys High Byte Register */
-#define SP_H_F_PORCH_STAH_REG 0x30
-
-/* Horizontal SYNC Width Status Low Byte Register */
-#define SP_H_SYNC_STAL_REG 0x31
-
-/* Horizontal SYNC Width Status High Byte Register */
-#define SP_H_SYNC_STAH_REG 0x32
-
-/* Horizontal Back Porch Status Low Byte Register */
-#define SP_H_B_PORCH_STAL_REG 0x33
-
-/* Horizontal Back Porch Status High Byte Register */
-#define SP_H_B_PORCH_STAH_REG 0x34
-
-/* InfoFrame AVI Packet DB1 Register */
-#define SP_INFOFRAME_AVI_DB1_REG 0x70
-
-/* Bit Control Specific Register */
-#define SP_BIT_CTRL_SPECIFIC_REG 0x80
-#define SP_BIT_CTRL_SELECT_SHIFT 1
-#define SP_ENABLE_BIT_CTRL BIT(0)
-
-/* InfoFrame Audio Packet DB1 Register */
-#define SP_INFOFRAME_AUD_DB1_REG 0x83
-
-/* InfoFrame MPEG Packet DB1 Register */
-#define SP_INFOFRAME_MPEG_DB1_REG 0xb0
-
-/* Audio Channel Status Registers */
-#define SP_AUD_CH_STATUS_BASE 0xd0
-
-/* Audio Channel Num Register 5 */
-#define SP_I2S_CHANNEL_NUM_MASK 0xe0
-# define SP_I2S_CH_NUM_1 (0x00 << 5)
-# define SP_I2S_CH_NUM_2 (0x01 << 5)
-# define SP_I2S_CH_NUM_3 (0x02 << 5)
-# define SP_I2S_CH_NUM_4 (0x03 << 5)
-# define SP_I2S_CH_NUM_5 (0x04 << 5)
-# define SP_I2S_CH_NUM_6 (0x05 << 5)
-# define SP_I2S_CH_NUM_7 (0x06 << 5)
-# define SP_I2S_CH_NUM_8 (0x07 << 5)
-#define SP_EXT_VUCP BIT(2)
-#define SP_VBIT BIT(1)
-#define SP_AUDIO_LAYOUT BIT(0)
-
-/* Analog Debug Register 2 */
-#define SP_ANALOG_DEBUG2_REG 0xdd
-#define SP_FORCE_SW_OFF_BYPASS 0x20
-#define SP_XTAL_FRQ 0x1c
-# define SP_XTAL_FRQ_19M2 (0x00 << 2)
-# define SP_XTAL_FRQ_24M (0x01 << 2)
-# define SP_XTAL_FRQ_25M (0x02 << 2)
-# define SP_XTAL_FRQ_26M (0x03 << 2)
-# define SP_XTAL_FRQ_27M (0x04 << 2)
-# define SP_XTAL_FRQ_38M4 (0x05 << 2)
-# define SP_XTAL_FRQ_52M (0x06 << 2)
-#define SP_POWERON_TIME_1P5MS 0x03
-
-/* Analog Control 0 Register */
-#define SP_ANALOG_CTRL0_REG 0xe1
-
-/* Common Interrupt Status Register 1 */
-#define SP_COMMON_INT_STATUS_BASE (0xf1 - 1)
-#define SP_PLL_LOCK_CHG 0x40
-
-/* Common Interrupt Status Register 2 */
-#define SP_COMMON_INT_STATUS2 0xf2
-#define SP_HDCP_AUTH_CHG BIT(1)
-#define SP_HDCP_AUTH_DONE BIT(0)
-
-#define SP_HDCP_LINK_CHECK_FAIL BIT(0)
-
-/* Common Interrupt Status Register 4 */
-#define SP_COMMON_INT_STATUS4_REG 0xf4
-#define SP_HPD_IRQ BIT(6)
-#define SP_HPD_ESYNC_ERR BIT(4)
-#define SP_HPD_CHG BIT(2)
-#define SP_HPD_LOST BIT(1)
-#define SP_HPD_PLUG BIT(0)
-
-/* DP Interrupt Status Register */
-#define SP_DP_INT_STATUS1_REG 0xf7
-#define SP_TRAINING_FINISH BIT(5)
-#define SP_POLLING_ERR BIT(4)
-
-/* Common Interrupt Mask Register */
-#define SP_COMMON_INT_MASK_BASE (0xf8 - 1)
-
-#define SP_COMMON_INT_MASK4_REG 0xfb
-
-/* DP Interrupts Mask Register */
-#define SP_DP_INT_MASK1_REG 0xfe
-
-/* Interrupt Control Register */
-#define SP_INT_CTRL_REG 0xff
-
-/***************************************************************/
-/* Register definition of device address 0x7a */
-/***************************************************************/
-
-/* DP TX Link Training Control Register */
-#define SP_DP_TX_LT_CTRL0_REG 0x30
-
-/* PD 1.2 Lint Training 80bit Pattern Register */
-#define SP_DP_LT_80BIT_PATTERN0_REG 0x80
-#define SP_DP_LT_80BIT_PATTERN_REG_NUM 10
-
-/* Audio Interface Control Register 0 */
-#define SP_AUD_INTERFACE_CTRL0_REG 0x5f
-#define SP_AUD_INTERFACE_DISABLE 0x80
-
-/* Audio Interface Control Register 2 */
-#define SP_AUD_INTERFACE_CTRL2_REG 0x60
-#define SP_M_AUD_ADJUST_ST 0x04
-
-/* Audio Interface Control Register 3 */
-#define SP_AUD_INTERFACE_CTRL3_REG 0x62
-
-/* Audio Interface Control Register 4 */
-#define SP_AUD_INTERFACE_CTRL4_REG 0x67
-
-/* Audio Interface Control Register 5 */
-#define SP_AUD_INTERFACE_CTRL5_REG 0x68
-
-/* Audio Interface Control Register 6 */
-#define SP_AUD_INTERFACE_CTRL6_REG 0x69
-
-/* Firmware Version Register */
-#define SP_FW_VER_REG 0xb7
-
-#endif
diff --git a/drivers/gpu/drm/bridge/analogix/Kconfig b/drivers/gpu/drm/bridge/analogix/Kconfig
index e930ff9b5cd4..e1fa7d820373 100644
--- a/drivers/gpu/drm/bridge/analogix/Kconfig
+++ b/drivers/gpu/drm/bridge/analogix/Kconfig
@@ -1,4 +1,27 @@
# SPDX-License-Identifier: GPL-2.0-only
+config DRM_ANALOGIX_ANX6345
+ tristate "Analogix ANX6345 bridge"
+ depends on OF
+ select DRM_ANALOGIX_DP
+ select DRM_KMS_HELPER
+ select REGMAP_I2C
+ help
+ ANX6345 is an ultra-low Full-HD DisplayPort/eDP
+ transmitter designed for portable devices. The
+ ANX6345 transforms the LVTTL RGB output of an
+ application processor to eDP or DisplayPort.
+
+config DRM_ANALOGIX_ANX78XX
+ tristate "Analogix ANX78XX bridge"
+ select DRM_ANALOGIX_DP
+ select DRM_KMS_HELPER
+ select REGMAP_I2C
+ help
+ ANX78XX is an ultra-low power Full-HD SlimPort transmitter
+ designed for portable devices. The ANX78XX transforms
+ the HDMI output of an application processor to MyDP
+ or DisplayPort.
+
config DRM_ANALOGIX_DP
tristate
depends on DRM
diff --git a/drivers/gpu/drm/bridge/analogix/Makefile b/drivers/gpu/drm/bridge/analogix/Makefile
index fdbf3fd2f087..97669b374098 100644
--- a/drivers/gpu/drm/bridge/analogix/Makefile
+++ b/drivers/gpu/drm/bridge/analogix/Makefile
@@ -1,3 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
-analogix_dp-objs := analogix_dp_core.o analogix_dp_reg.o
+analogix_dp-objs := analogix_dp_core.o analogix_dp_reg.o analogix-i2c-dptx.o
+obj-$(CONFIG_DRM_ANALOGIX_ANX6345) += analogix-anx6345.o
+obj-$(CONFIG_DRM_ANALOGIX_ANX78XX) += analogix-anx78xx.o
obj-$(CONFIG_DRM_ANALOGIX_DP) += analogix_dp.o
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
new file mode 100644
index 000000000000..56f55c53abfd
--- /dev/null
+++ b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
@@ -0,0 +1,817 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright(c) 2016, Analogix Semiconductor.
+ * Copyright(c) 2017, Icenowy Zheng <icenowy@aosc.io>
+ *
+ * Based on anx7808 driver obtained from chromeos with copyright:
+ * Copyright(c) 2013, Google Inc.
+ */
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/types.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+
+#include "analogix-i2c-dptx.h"
+#include "analogix-i2c-txcommon.h"
+
+#define POLL_DELAY 50000 /* us */
+#define POLL_TIMEOUT 5000000 /* us */
+
+#define I2C_IDX_DPTX 0
+#define I2C_IDX_TXCOM 1
+
+static const u8 anx6345_i2c_addresses[] = {
+ [I2C_IDX_DPTX] = 0x70,
+ [I2C_IDX_TXCOM] = 0x72,
+};
+#define I2C_NUM_ADDRESSES ARRAY_SIZE(anx6345_i2c_addresses)
+
+struct anx6345 {
+ struct drm_dp_aux aux;
+ struct drm_bridge bridge;
+ struct i2c_client *client;
+ struct edid *edid;
+ struct drm_connector connector;
+ struct drm_panel *panel;
+ struct regulator *dvdd12;
+ struct regulator *dvdd25;
+ struct gpio_desc *gpiod_reset;
+ struct mutex lock; /* protect EDID access */
+
+ /* I2C Slave addresses of ANX6345 are mapped as DPTX and SYS */
+ struct i2c_client *i2c_clients[I2C_NUM_ADDRESSES];
+ struct regmap *map[I2C_NUM_ADDRESSES];
+
+ u16 chipid;
+ u8 dpcd[DP_RECEIVER_CAP_SIZE];
+
+ bool powered;
+};
+
+static inline struct anx6345 *connector_to_anx6345(struct drm_connector *c)
+{
+ return container_of(c, struct anx6345, connector);
+}
+
+static inline struct anx6345 *bridge_to_anx6345(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct anx6345, bridge);
+}
+
+static int anx6345_set_bits(struct regmap *map, u8 reg, u8 mask)
+{
+ return regmap_update_bits(map, reg, mask, mask);
+}
+
+static int anx6345_clear_bits(struct regmap *map, u8 reg, u8 mask)
+{
+ return regmap_update_bits(map, reg, mask, 0);
+}
+
+static ssize_t anx6345_aux_transfer(struct drm_dp_aux *aux,
+ struct drm_dp_aux_msg *msg)
+{
+ struct anx6345 *anx6345 = container_of(aux, struct anx6345, aux);
+
+ return anx_dp_aux_transfer(anx6345->map[I2C_IDX_DPTX], msg);
+}
+
+static int anx6345_dp_link_training(struct anx6345 *anx6345)
+{
+ unsigned int value;
+ u8 dp_bw, dpcd[2];
+ int err;
+
+ err = anx6345_clear_bits(anx6345->map[I2C_IDX_TXCOM],
+ SP_POWERDOWN_CTRL_REG,
+ SP_TOTAL_PD);
+ if (err)
+ return err;
+
+ err = drm_dp_dpcd_readb(&anx6345->aux, DP_MAX_LINK_RATE, &dp_bw);
+ if (err < 0)
+ return err;
+
+ switch (dp_bw) {
+ case DP_LINK_BW_1_62:
+ case DP_LINK_BW_2_7:
+ break;
+
+ default:
+ DRM_DEBUG_KMS("DP bandwidth (%#02x) not supported\n", dp_bw);
+ return -EINVAL;
+ }
+
+ err = anx6345_set_bits(anx6345->map[I2C_IDX_TXCOM], SP_VID_CTRL1_REG,
+ SP_VIDEO_MUTE);
+ if (err)
+ return err;
+
+ err = anx6345_clear_bits(anx6345->map[I2C_IDX_TXCOM],
+ SP_VID_CTRL1_REG, SP_VIDEO_EN);
+ if (err)
+ return err;
+
+ /* Get DPCD info */
+ err = drm_dp_dpcd_read(&anx6345->aux, DP_DPCD_REV,
+ &anx6345->dpcd, DP_RECEIVER_CAP_SIZE);
+ if (err < 0) {
+ DRM_ERROR("Failed to read DPCD: %d\n", err);
+ return err;
+ }
+
+ /* Clear channel x SERDES power down */
+ err = anx6345_clear_bits(anx6345->map[I2C_IDX_DPTX],
+ SP_DP_ANALOG_POWER_DOWN_REG, SP_CH0_PD);
+ if (err)
+ return err;
+
+ /*
+ * Power up the sink (DP_SET_POWER register is only available on DPCD
+ * v1.1 and later).
+ */
+ if (anx6345->dpcd[DP_DPCD_REV] >= 0x11) {
+ err = drm_dp_dpcd_readb(&anx6345->aux, DP_SET_POWER, &dpcd[0]);
+ if (err < 0) {
+ DRM_ERROR("Failed to read DP_SET_POWER register: %d\n",
+ err);
+ return err;
+ }
+
+ dpcd[0] &= ~DP_SET_POWER_MASK;
+ dpcd[0] |= DP_SET_POWER_D0;
+
+ err = drm_dp_dpcd_writeb(&anx6345->aux, DP_SET_POWER, dpcd[0]);
+ if (err < 0) {
+ DRM_ERROR("Failed to power up DisplayPort link: %d\n",
+ err);
+ return err;
+ }
+
+ /*
+ * According to the DP 1.1 specification, a "Sink Device must
+ * exit the power saving state within 1 ms" (Section 2.5.3.1,
+ * Table 5-52, "Sink Control Field" (register 0x600).
+ */
+ usleep_range(1000, 2000);
+ }
+
+ /* Possibly enable downspread on the sink */
+ err = regmap_write(anx6345->map[I2C_IDX_DPTX],
+ SP_DP_DOWNSPREAD_CTRL1_REG, 0);
+ if (err)
+ return err;
+
+ if (anx6345->dpcd[DP_MAX_DOWNSPREAD] & DP_MAX_DOWNSPREAD_0_5) {
+ DRM_DEBUG("Enable downspread on the sink\n");
+ /* 4000PPM */
+ err = regmap_write(anx6345->map[I2C_IDX_DPTX],
+ SP_DP_DOWNSPREAD_CTRL1_REG, 8);
+ if (err)
+ return err;
+
+ err = drm_dp_dpcd_writeb(&anx6345->aux, DP_DOWNSPREAD_CTRL,
+ DP_SPREAD_AMP_0_5);
+ if (err < 0)
+ return err;
+ } else {
+ err = drm_dp_dpcd_writeb(&anx6345->aux, DP_DOWNSPREAD_CTRL, 0);
+ if (err < 0)
+ return err;
+ }
+
+ /* Set the lane count and the link rate on the sink */
+ if (drm_dp_enhanced_frame_cap(anx6345->dpcd))
+ err = anx6345_set_bits(anx6345->map[I2C_IDX_DPTX],
+ SP_DP_SYSTEM_CTRL_BASE + 4,
+ SP_ENHANCED_MODE);
+ else
+ err = anx6345_clear_bits(anx6345->map[I2C_IDX_DPTX],
+ SP_DP_SYSTEM_CTRL_BASE + 4,
+ SP_ENHANCED_MODE);
+ if (err)
+ return err;
+
+ dpcd[0] = drm_dp_max_link_rate(anx6345->dpcd);
+ dpcd[0] = drm_dp_link_rate_to_bw_code(dpcd[0]);
+ err = regmap_write(anx6345->map[I2C_IDX_DPTX],
+ SP_DP_MAIN_LINK_BW_SET_REG, dpcd[0]);
+ if (err)
+ return err;
+
+ dpcd[1] = drm_dp_max_lane_count(anx6345->dpcd);
+
+ err = regmap_write(anx6345->map[I2C_IDX_DPTX],
+ SP_DP_LANE_COUNT_SET_REG, dpcd[1]);
+ if (err)
+ return err;
+
+ if (drm_dp_enhanced_frame_cap(anx6345->dpcd))
+ dpcd[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+
+ err = drm_dp_dpcd_write(&anx6345->aux, DP_LINK_BW_SET, dpcd,
+ sizeof(dpcd));
+
+ if (err < 0) {
+ DRM_ERROR("Failed to configure link: %d\n", err);
+ return err;
+ }
+
+ /* Start training on the source */
+ err = regmap_write(anx6345->map[I2C_IDX_DPTX], SP_DP_LT_CTRL_REG,
+ SP_LT_EN);
+ if (err)
+ return err;
+
+ return regmap_read_poll_timeout(anx6345->map[I2C_IDX_DPTX],
+ SP_DP_LT_CTRL_REG,
+ value, !(value & SP_DP_LT_INPROGRESS),
+ POLL_DELAY, POLL_TIMEOUT);
+}
+
+static int anx6345_tx_initialization(struct anx6345 *anx6345)
+{
+ int err, i;
+
+ /* FIXME: colordepth is hardcoded for now */
+ err = regmap_write(anx6345->map[I2C_IDX_TXCOM], SP_VID_CTRL2_REG,
+ SP_IN_BPC_6BIT << SP_IN_BPC_SHIFT);
+ if (err)
+ return err;
+
+ err = regmap_write(anx6345->map[I2C_IDX_DPTX], SP_DP_PLL_CTRL_REG, 0);
+ if (err)
+ return err;
+
+ err = regmap_write(anx6345->map[I2C_IDX_TXCOM],
+ SP_ANALOG_DEBUG1_REG, 0);
+ if (err)
+ return err;
+
+ err = regmap_write(anx6345->map[I2C_IDX_DPTX],
+ SP_DP_LINK_DEBUG_CTRL_REG,
+ SP_NEW_PRBS7 | SP_M_VID_DEBUG);
+ if (err)
+ return err;
+
+ err = regmap_write(anx6345->map[I2C_IDX_DPTX],
+ SP_DP_ANALOG_POWER_DOWN_REG, 0);
+ if (err)
+ return err;
+
+ /* Force HPD */
+ err = anx6345_set_bits(anx6345->map[I2C_IDX_DPTX],
+ SP_DP_SYSTEM_CTRL_BASE + 3,
+ SP_HPD_FORCE | SP_HPD_CTRL);
+ if (err)
+ return err;
+
+ for (i = 0; i < 4; i++) {
+ /* 4 lanes */
+ err = regmap_write(anx6345->map[I2C_IDX_DPTX],
+ SP_DP_LANE0_LT_CTRL_REG + i, 0);
+ if (err)
+ return err;
+ }
+
+ /* Reset AUX */
+ err = anx6345_set_bits(anx6345->map[I2C_IDX_TXCOM],
+ SP_RESET_CTRL2_REG, SP_AUX_RST);
+ if (err)
+ return err;
+
+ return anx6345_clear_bits(anx6345->map[I2C_IDX_TXCOM],
+ SP_RESET_CTRL2_REG, SP_AUX_RST);
+}
+
+static void anx6345_poweron(struct anx6345 *anx6345)
+{
+ int err;
+
+ /* Ensure reset is asserted before starting power on sequence */
+ gpiod_set_value_cansleep(anx6345->gpiod_reset, 1);
+ usleep_range(1000, 2000);
+
+ err = regulator_enable(anx6345->dvdd12);
+ if (err) {
+ DRM_ERROR("Failed to enable dvdd12 regulator: %d\n",
+ err);
+ return;
+ }
+
+ /* T1 - delay between VDD12 and VDD25 should be 0-2ms */
+ usleep_range(1000, 2000);
+
+ err = regulator_enable(anx6345->dvdd25);
+ if (err) {
+ DRM_ERROR("Failed to enable dvdd25 regulator: %d\n",
+ err);
+ return;
+ }
+
+ /* T2 - delay between RESETN and all power rail stable,
+ * should be 2-5ms
+ */
+ usleep_range(2000, 5000);
+
+ gpiod_set_value_cansleep(anx6345->gpiod_reset, 0);
+
+ /* Power on registers module */
+ anx6345_set_bits(anx6345->map[I2C_IDX_TXCOM], SP_POWERDOWN_CTRL_REG,
+ SP_HDCP_PD | SP_AUDIO_PD | SP_VIDEO_PD | SP_LINK_PD);
+ anx6345_clear_bits(anx6345->map[I2C_IDX_TXCOM], SP_POWERDOWN_CTRL_REG,
+ SP_REGISTER_PD | SP_TOTAL_PD);
+
+ if (anx6345->panel)
+ drm_panel_prepare(anx6345->panel);
+
+ anx6345->powered = true;
+}
+
+static void anx6345_poweroff(struct anx6345 *anx6345)
+{
+ int err;
+
+ gpiod_set_value_cansleep(anx6345->gpiod_reset, 1);
+ usleep_range(1000, 2000);
+
+ if (anx6345->panel)
+ drm_panel_unprepare(anx6345->panel);
+
+ err = regulator_disable(anx6345->dvdd25);
+ if (err) {
+ DRM_ERROR("Failed to disable dvdd25 regulator: %d\n",
+ err);
+ return;
+ }
+
+ usleep_range(5000, 10000);
+
+ err = regulator_disable(anx6345->dvdd12);
+ if (err) {
+ DRM_ERROR("Failed to disable dvdd12 regulator: %d\n",
+ err);
+ return;
+ }
+
+ usleep_range(1000, 2000);
+
+ anx6345->powered = false;
+}
+
+static int anx6345_start(struct anx6345 *anx6345)
+{
+ int err;
+
+ if (!anx6345->powered)
+ anx6345_poweron(anx6345);
+
+ /* Power on needed modules */
+ err = anx6345_clear_bits(anx6345->map[I2C_IDX_TXCOM],
+ SP_POWERDOWN_CTRL_REG,
+ SP_VIDEO_PD | SP_LINK_PD);
+
+ err = anx6345_tx_initialization(anx6345);
+ if (err) {
+ DRM_ERROR("Failed eDP transmitter initialization: %d\n", err);
+ anx6345_poweroff(anx6345);
+ return err;
+ }
+
+ err = anx6345_dp_link_training(anx6345);
+ if (err) {
+ DRM_ERROR("Failed link training: %d\n", err);
+ anx6345_poweroff(anx6345);
+ return err;
+ }
+
+ /*
+ * This delay seems to help keep the hardware in a good state. Without
+ * it, there are times where it fails silently.
+ */
+ usleep_range(10000, 15000);
+
+ return 0;
+}
+
+static int anx6345_config_dp_output(struct anx6345 *anx6345)
+{
+ int err;
+
+ err = anx6345_clear_bits(anx6345->map[I2C_IDX_TXCOM], SP_VID_CTRL1_REG,
+ SP_VIDEO_MUTE);
+ if (err)
+ return err;
+
+ /* Enable DP output */
+ err = anx6345_set_bits(anx6345->map[I2C_IDX_TXCOM], SP_VID_CTRL1_REG,
+ SP_VIDEO_EN);
+ if (err)
+ return err;
+
+ /* Force stream valid */
+ return anx6345_set_bits(anx6345->map[I2C_IDX_DPTX],
+ SP_DP_SYSTEM_CTRL_BASE + 3,
+ SP_STRM_FORCE | SP_STRM_CTRL);
+}
+
+static int anx6345_get_downstream_info(struct anx6345 *anx6345)
+{
+ u8 value;
+ int err;
+
+ err = drm_dp_dpcd_readb(&anx6345->aux, DP_SINK_COUNT, &value);
+ if (err < 0) {
+ DRM_ERROR("Get sink count failed %d\n", err);
+ return err;
+ }
+
+ if (!DP_GET_SINK_COUNT(value)) {
+ DRM_ERROR("Downstream disconnected\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int anx6345_get_modes(struct drm_connector *connector)
+{
+ struct anx6345 *anx6345 = connector_to_anx6345(connector);
+ int err, num_modes = 0;
+ bool power_off = false;
+
+ mutex_lock(&anx6345->lock);
+
+ if (!anx6345->edid) {
+ if (!anx6345->powered) {
+ anx6345_poweron(anx6345);
+ power_off = true;
+ }
+
+ err = anx6345_get_downstream_info(anx6345);
+ if (err) {
+ DRM_ERROR("Failed to get downstream info: %d\n", err);
+ goto unlock;
+ }
+
+ anx6345->edid = drm_get_edid(connector, &anx6345->aux.ddc);
+ if (!anx6345->edid)
+ DRM_ERROR("Failed to read EDID from panel\n");
+
+ err = drm_connector_update_edid_property(connector,
+ anx6345->edid);
+ if (err) {
+ DRM_ERROR("Failed to update EDID property: %d\n", err);
+ goto unlock;
+ }
+ }
+
+ num_modes += drm_add_edid_modes(connector, anx6345->edid);
+
+unlock:
+ if (power_off)
+ anx6345_poweroff(anx6345);
+
+ mutex_unlock(&anx6345->lock);
+
+ if (!num_modes && anx6345->panel)
+ num_modes += drm_panel_get_modes(anx6345->panel, connector);
+
+ return num_modes;
+}
+
+static const struct drm_connector_helper_funcs anx6345_connector_helper_funcs = {
+ .get_modes = anx6345_get_modes,
+};
+
+static void
+anx6345_connector_destroy(struct drm_connector *connector)
+{
+ struct anx6345 *anx6345 = connector_to_anx6345(connector);
+
+ if (anx6345->panel)
+ drm_panel_detach(anx6345->panel);
+ drm_connector_cleanup(connector);
+}
+
+static const struct drm_connector_funcs anx6345_connector_funcs = {
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = anx6345_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int anx6345_bridge_attach(struct drm_bridge *bridge)
+{
+ struct anx6345 *anx6345 = bridge_to_anx6345(bridge);
+ int err;
+
+ if (!bridge->encoder) {
+ DRM_ERROR("Parent encoder object not found");
+ return -ENODEV;
+ }
+
+ /* Register aux channel */
+ anx6345->aux.name = "DP-AUX";
+ anx6345->aux.dev = &anx6345->client->dev;
+ anx6345->aux.transfer = anx6345_aux_transfer;
+
+ err = drm_dp_aux_register(&anx6345->aux);
+ if (err < 0) {
+ DRM_ERROR("Failed to register aux channel: %d\n", err);
+ return err;
+ }
+
+ err = drm_connector_init(bridge->dev, &anx6345->connector,
+ &anx6345_connector_funcs,
+ DRM_MODE_CONNECTOR_eDP);
+ if (err) {
+ DRM_ERROR("Failed to initialize connector: %d\n", err);
+ return err;
+ }
+
+ drm_connector_helper_add(&anx6345->connector,
+ &anx6345_connector_helper_funcs);
+
+ err = drm_connector_register(&anx6345->connector);
+ if (err) {
+ DRM_ERROR("Failed to register connector: %d\n", err);
+ return err;
+ }
+
+ anx6345->connector.polled = DRM_CONNECTOR_POLL_HPD;
+
+ err = drm_connector_attach_encoder(&anx6345->connector,
+ bridge->encoder);
+ if (err) {
+ DRM_ERROR("Failed to link up connector to encoder: %d\n", err);
+ return err;
+ }
+
+ if (anx6345->panel) {
+ err = drm_panel_attach(anx6345->panel, &anx6345->connector);
+ if (err) {
+ DRM_ERROR("Failed to attach panel: %d\n", err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static enum drm_mode_status
+anx6345_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode)
+{
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ return MODE_NO_INTERLACE;
+
+ /* Max 1200p at 5.4 Ghz, one lane */
+ if (mode->clock > 154000)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static void anx6345_bridge_disable(struct drm_bridge *bridge)
+{
+ struct anx6345 *anx6345 = bridge_to_anx6345(bridge);
+
+ /* Power off all modules except configuration registers access */
+ anx6345_set_bits(anx6345->map[I2C_IDX_TXCOM], SP_POWERDOWN_CTRL_REG,
+ SP_HDCP_PD | SP_AUDIO_PD | SP_VIDEO_PD | SP_LINK_PD);
+ if (anx6345->panel)
+ drm_panel_disable(anx6345->panel);
+
+ if (anx6345->powered)
+ anx6345_poweroff(anx6345);
+}
+
+static void anx6345_bridge_enable(struct drm_bridge *bridge)
+{
+ struct anx6345 *anx6345 = bridge_to_anx6345(bridge);
+ int err;
+
+ if (anx6345->panel)
+ drm_panel_enable(anx6345->panel);
+
+ err = anx6345_start(anx6345);
+ if (err) {
+ DRM_ERROR("Failed to initialize: %d\n", err);
+ return;
+ }
+
+ err = anx6345_config_dp_output(anx6345);
+ if (err)
+ DRM_ERROR("Failed to enable DP output: %d\n", err);
+}
+
+static const struct drm_bridge_funcs anx6345_bridge_funcs = {
+ .attach = anx6345_bridge_attach,
+ .mode_valid = anx6345_bridge_mode_valid,
+ .disable = anx6345_bridge_disable,
+ .enable = anx6345_bridge_enable,
+};
+
+static void unregister_i2c_dummy_clients(struct anx6345 *anx6345)
+{
+ unsigned int i;
+
+ for (i = 1; i < ARRAY_SIZE(anx6345->i2c_clients); i++)
+ if (anx6345->i2c_clients[i] &&
+ anx6345->i2c_clients[i]->addr != anx6345->client->addr)
+ i2c_unregister_device(anx6345->i2c_clients[i]);
+}
+
+static const struct regmap_config anx6345_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xff,
+ .cache_type = REGCACHE_NONE,
+};
+
+static const u16 anx6345_chipid_list[] = {
+ 0x6345,
+};
+
+static bool anx6345_get_chip_id(struct anx6345 *anx6345)
+{
+ unsigned int i, idl, idh, version;
+
+ if (regmap_read(anx6345->map[I2C_IDX_TXCOM], SP_DEVICE_IDL_REG, &idl))
+ return false;
+
+ if (regmap_read(anx6345->map[I2C_IDX_TXCOM], SP_DEVICE_IDH_REG, &idh))
+ return false;
+
+ anx6345->chipid = (u8)idl | ((u8)idh << 8);
+
+ if (regmap_read(anx6345->map[I2C_IDX_TXCOM], SP_DEVICE_VERSION_REG,
+ &version))
+ return false;
+
+ for (i = 0; i < ARRAY_SIZE(anx6345_chipid_list); i++) {
+ if (anx6345->chipid == anx6345_chipid_list[i]) {
+ DRM_INFO("Found ANX%x (ver. %d) eDP Transmitter\n",
+ anx6345->chipid, version);
+ return true;
+ }
+ }
+
+ DRM_ERROR("ANX%x (ver. %d) not supported by this driver\n",
+ anx6345->chipid, version);
+
+ return false;
+}
+
+static int anx6345_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct anx6345 *anx6345;
+ struct device *dev;
+ int i, err;
+
+ anx6345 = devm_kzalloc(&client->dev, sizeof(*anx6345), GFP_KERNEL);
+ if (!anx6345)
+ return -ENOMEM;
+
+ mutex_init(&anx6345->lock);
+
+ anx6345->bridge.of_node = client->dev.of_node;
+
+ anx6345->client = client;
+ i2c_set_clientdata(client, anx6345);
+
+ dev = &anx6345->client->dev;
+
+ err = drm_of_find_panel_or_bridge(client->dev.of_node, 1, 0,
+ &anx6345->panel, NULL);
+ if (err == -EPROBE_DEFER)
+ return err;
+
+ if (err)
+ DRM_DEBUG("No panel found\n");
+
+ /* 1.2V digital core power regulator */
+ anx6345->dvdd12 = devm_regulator_get(dev, "dvdd12-supply");
+ if (IS_ERR(anx6345->dvdd12)) {
+ DRM_ERROR("dvdd12-supply not found\n");
+ return PTR_ERR(anx6345->dvdd12);
+ }
+
+ /* 2.5V digital core power regulator */
+ anx6345->dvdd25 = devm_regulator_get(dev, "dvdd25-supply");
+ if (IS_ERR(anx6345->dvdd25)) {
+ DRM_ERROR("dvdd25-supply not found\n");
+ return PTR_ERR(anx6345->dvdd25);
+ }
+
+ /* GPIO for chip reset */
+ anx6345->gpiod_reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(anx6345->gpiod_reset)) {
+ DRM_ERROR("Reset gpio not found\n");
+ return PTR_ERR(anx6345->gpiod_reset);
+ }
+
+ /* Map slave addresses of ANX6345 */
+ for (i = 0; i < I2C_NUM_ADDRESSES; i++) {
+ if (anx6345_i2c_addresses[i] >> 1 != client->addr)
+ anx6345->i2c_clients[i] = i2c_new_dummy_device(client->adapter,
+ anx6345_i2c_addresses[i] >> 1);
+ else
+ anx6345->i2c_clients[i] = client;
+
+ if (IS_ERR(anx6345->i2c_clients[i])) {
+ err = PTR_ERR(anx6345->i2c_clients[i]);
+ DRM_ERROR("Failed to reserve I2C bus %02x\n",
+ anx6345_i2c_addresses[i]);
+ goto err_unregister_i2c;
+ }
+
+ anx6345->map[i] = devm_regmap_init_i2c(anx6345->i2c_clients[i],
+ &anx6345_regmap_config);
+ if (IS_ERR(anx6345->map[i])) {
+ err = PTR_ERR(anx6345->map[i]);
+ DRM_ERROR("Failed regmap initialization %02x\n",
+ anx6345_i2c_addresses[i]);
+ goto err_unregister_i2c;
+ }
+ }
+
+ /* Look for supported chip ID */
+ anx6345_poweron(anx6345);
+ if (anx6345_get_chip_id(anx6345)) {
+ anx6345->bridge.funcs = &anx6345_bridge_funcs;
+ drm_bridge_add(&anx6345->bridge);
+
+ return 0;
+ } else {
+ anx6345_poweroff(anx6345);
+ err = -ENODEV;
+ }
+
+err_unregister_i2c:
+ unregister_i2c_dummy_clients(anx6345);
+ return err;
+}
+
+static int anx6345_i2c_remove(struct i2c_client *client)
+{
+ struct anx6345 *anx6345 = i2c_get_clientdata(client);
+
+ drm_bridge_remove(&anx6345->bridge);
+
+ unregister_i2c_dummy_clients(anx6345);
+
+ kfree(anx6345->edid);
+
+ mutex_destroy(&anx6345->lock);
+
+ return 0;
+}
+
+static const struct i2c_device_id anx6345_id[] = {
+ { "anx6345", 0 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(i2c, anx6345_id);
+
+static const struct of_device_id anx6345_match_table[] = {
+ { .compatible = "analogix,anx6345", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, anx6345_match_table);
+
+static struct i2c_driver anx6345_driver = {
+ .driver = {
+ .name = "anx6345",
+ .of_match_table = of_match_ptr(anx6345_match_table),
+ },
+ .probe = anx6345_i2c_probe,
+ .remove = anx6345_i2c_remove,
+ .id_table = anx6345_id,
+};
+module_i2c_driver(anx6345_driver);
+
+MODULE_DESCRIPTION("ANX6345 eDP Transmitter driver");
+MODULE_AUTHOR("Icenowy Zheng <icenowy@aosc.io>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/bridge/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
index 3c7cc5af735c..41867be03751 100644
--- a/drivers/gpu/drm/bridge/analogix-anx78xx.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
@@ -19,6 +19,7 @@
#include <linux/types.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_edid.h>
@@ -35,15 +36,21 @@
#define I2C_IDX_RX_P1 4
#define XTAL_CLK 270 /* 27M */
-#define AUX_CH_BUFFER_SIZE 16
-#define AUX_WAIT_TIMEOUT_MS 15
-
-static const u8 anx78xx_i2c_addresses[] = {
- [I2C_IDX_TX_P0] = TX_P0,
- [I2C_IDX_TX_P1] = TX_P1,
- [I2C_IDX_TX_P2] = TX_P2,
- [I2C_IDX_RX_P0] = RX_P0,
- [I2C_IDX_RX_P1] = RX_P1,
+
+static const u8 anx7808_i2c_addresses[] = {
+ [I2C_IDX_TX_P0] = 0x78,
+ [I2C_IDX_TX_P1] = 0x7a,
+ [I2C_IDX_TX_P2] = 0x72,
+ [I2C_IDX_RX_P0] = 0x7e,
+ [I2C_IDX_RX_P1] = 0x80,
+};
+
+static const u8 anx781x_i2c_addresses[] = {
+ [I2C_IDX_TX_P0] = 0x70,
+ [I2C_IDX_TX_P1] = 0x7a,
+ [I2C_IDX_TX_P2] = 0x72,
+ [I2C_IDX_RX_P0] = 0x7e,
+ [I2C_IDX_RX_P1] = 0x80,
};
struct anx78xx_platform_data {
@@ -62,7 +69,6 @@ struct anx78xx {
struct i2c_client *client;
struct edid *edid;
struct drm_connector connector;
- struct drm_dp_link link;
struct anx78xx_platform_data pdata;
struct mutex lock;
@@ -99,153 +105,11 @@ static int anx78xx_clear_bits(struct regmap *map, u8 reg, u8 mask)
return regmap_update_bits(map, reg, mask, 0);
}
-static bool anx78xx_aux_op_finished(struct anx78xx *anx78xx)
-{
- unsigned int value;
- int err;
-
- err = regmap_read(anx78xx->map[I2C_IDX_TX_P0], SP_DP_AUX_CH_CTRL2_REG,
- &value);
- if (err < 0)
- return false;
-
- return (value & SP_AUX_EN) == 0;
-}
-
-static int anx78xx_aux_wait(struct anx78xx *anx78xx)
-{
- unsigned long timeout;
- unsigned int status;
- int err;
-
- timeout = jiffies + msecs_to_jiffies(AUX_WAIT_TIMEOUT_MS) + 1;
-
- while (!anx78xx_aux_op_finished(anx78xx)) {
- if (time_after(jiffies, timeout)) {
- if (!anx78xx_aux_op_finished(anx78xx)) {
- DRM_ERROR("Timed out waiting AUX to finish\n");
- return -ETIMEDOUT;
- }
-
- break;
- }
-
- usleep_range(1000, 2000);
- }
-
- /* Read the AUX channel access status */
- err = regmap_read(anx78xx->map[I2C_IDX_TX_P0], SP_AUX_CH_STATUS_REG,
- &status);
- if (err < 0) {
- DRM_ERROR("Failed to read from AUX channel: %d\n", err);
- return err;
- }
-
- if (status & SP_AUX_STATUS) {
- DRM_ERROR("Failed to wait for AUX channel (status: %02x)\n",
- status);
- return -ETIMEDOUT;
- }
-
- return 0;
-}
-
-static int anx78xx_aux_address(struct anx78xx *anx78xx, unsigned int addr)
-{
- int err;
-
- err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_AUX_ADDR_7_0_REG,
- addr & 0xff);
- if (err)
- return err;
-
- err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_AUX_ADDR_15_8_REG,
- (addr & 0xff00) >> 8);
- if (err)
- return err;
-
- /*
- * DP AUX CH Address Register #2, only update bits[3:0]
- * [7:4] RESERVED
- * [3:0] AUX_ADDR[19:16], Register control AUX CH address.
- */
- err = regmap_update_bits(anx78xx->map[I2C_IDX_TX_P0],
- SP_AUX_ADDR_19_16_REG,
- SP_AUX_ADDR_19_16_MASK,
- (addr & 0xf0000) >> 16);
-
- if (err)
- return err;
-
- return 0;
-}
-
static ssize_t anx78xx_aux_transfer(struct drm_dp_aux *aux,
struct drm_dp_aux_msg *msg)
{
struct anx78xx *anx78xx = container_of(aux, struct anx78xx, aux);
- u8 ctrl1 = msg->request;
- u8 ctrl2 = SP_AUX_EN;
- u8 *buffer = msg->buffer;
- int err;
-
- /* The DP AUX transmit and receive buffer has 16 bytes. */
- if (WARN_ON(msg->size > AUX_CH_BUFFER_SIZE))
- return -E2BIG;
-
- /* Zero-sized messages specify address-only transactions. */
- if (msg->size < 1)
- ctrl2 |= SP_ADDR_ONLY;
- else /* For non-zero-sized set the length field. */
- ctrl1 |= (msg->size - 1) << SP_AUX_LENGTH_SHIFT;
-
- if ((msg->request & DP_AUX_I2C_READ) == 0) {
- /* When WRITE | MOT write values to data buffer */
- err = regmap_bulk_write(anx78xx->map[I2C_IDX_TX_P0],
- SP_DP_BUF_DATA0_REG, buffer,
- msg->size);
- if (err)
- return err;
- }
-
- /* Write address and request */
- err = anx78xx_aux_address(anx78xx, msg->address);
- if (err)
- return err;
-
- err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_DP_AUX_CH_CTRL1_REG,
- ctrl1);
- if (err)
- return err;
-
- /* Start transaction */
- err = regmap_update_bits(anx78xx->map[I2C_IDX_TX_P0],
- SP_DP_AUX_CH_CTRL2_REG, SP_ADDR_ONLY |
- SP_AUX_EN, ctrl2);
- if (err)
- return err;
-
- err = anx78xx_aux_wait(anx78xx);
- if (err)
- return err;
-
- msg->reply = DP_AUX_I2C_REPLY_ACK;
-
- if ((msg->size > 0) && (msg->request & DP_AUX_I2C_READ)) {
- /* Read values from data buffer */
- err = regmap_bulk_read(anx78xx->map[I2C_IDX_TX_P0],
- SP_DP_BUF_DATA0_REG, buffer,
- msg->size);
- if (err)
- return err;
- }
-
- err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P0],
- SP_DP_AUX_CH_CTRL2_REG, SP_ADDR_ONLY);
- if (err)
- return err;
-
- return msg->size;
+ return anx_dp_aux_transfer(anx78xx->map[I2C_IDX_TX_P0], msg);
}
static int anx78xx_set_hpd(struct anx78xx *anx78xx)
@@ -715,7 +579,9 @@ static int anx78xx_init_pdata(struct anx78xx *anx78xx)
/* 1.0V digital core power regulator */
pdata->dvdd10 = devm_regulator_get(dev, "dvdd10");
if (IS_ERR(pdata->dvdd10)) {
- DRM_ERROR("DVDD10 regulator not found\n");
+ if (PTR_ERR(pdata->dvdd10) != -EPROBE_DEFER)
+ DRM_ERROR("DVDD10 regulator not found\n");
+
return PTR_ERR(pdata->dvdd10);
}
@@ -737,7 +603,7 @@ static int anx78xx_init_pdata(struct anx78xx *anx78xx)
static int anx78xx_dp_link_training(struct anx78xx *anx78xx)
{
- u8 dp_bw, value;
+ u8 dp_bw, dpcd[2];
int err;
err = regmap_write(anx78xx->map[I2C_IDX_RX_P0], SP_HDMI_MUTE_CTRL_REG,
@@ -790,18 +656,34 @@ static int anx78xx_dp_link_training(struct anx78xx *anx78xx)
if (err)
return err;
- /* Check link capabilities */
- err = drm_dp_link_probe(&anx78xx->aux, &anx78xx->link);
- if (err < 0) {
- DRM_ERROR("Failed to probe link capabilities: %d\n", err);
- return err;
- }
+ /*
+ * Power up the sink (DP_SET_POWER register is only available on DPCD
+ * v1.1 and later).
+ */
+ if (anx78xx->dpcd[DP_DPCD_REV] >= 0x11) {
+ err = drm_dp_dpcd_readb(&anx78xx->aux, DP_SET_POWER, &dpcd[0]);
+ if (err < 0) {
+ DRM_ERROR("Failed to read DP_SET_POWER register: %d\n",
+ err);
+ return err;
+ }
- /* Power up the sink */
- err = drm_dp_link_power_up(&anx78xx->aux, &anx78xx->link);
- if (err < 0) {
- DRM_ERROR("Failed to power up DisplayPort link: %d\n", err);
- return err;
+ dpcd[0] &= ~DP_SET_POWER_MASK;
+ dpcd[0] |= DP_SET_POWER_D0;
+
+ err = drm_dp_dpcd_writeb(&anx78xx->aux, DP_SET_POWER, dpcd[0]);
+ if (err < 0) {
+ DRM_ERROR("Failed to power up DisplayPort link: %d\n",
+ err);
+ return err;
+ }
+
+ /*
+ * According to the DP 1.1 specification, a "Sink Device must
+ * exit the power saving state within 1 ms" (Section 2.5.3.1,
+ * Table 5-52, "Sink Control Field" (register 0x600).
+ */
+ usleep_range(1000, 2000);
}
/* Possibly enable downspread on the sink */
@@ -840,15 +722,22 @@ static int anx78xx_dp_link_training(struct anx78xx *anx78xx)
if (err)
return err;
- value = drm_dp_link_rate_to_bw_code(anx78xx->link.rate);
+ dpcd[0] = drm_dp_max_link_rate(anx78xx->dpcd);
+ dpcd[0] = drm_dp_link_rate_to_bw_code(dpcd[0]);
err = regmap_write(anx78xx->map[I2C_IDX_TX_P0],
- SP_DP_MAIN_LINK_BW_SET_REG, value);
+ SP_DP_MAIN_LINK_BW_SET_REG, dpcd[0]);
if (err)
return err;
- err = drm_dp_link_configure(&anx78xx->aux, &anx78xx->link);
+ dpcd[1] = drm_dp_max_lane_count(anx78xx->dpcd);
+
+ if (drm_dp_enhanced_frame_cap(anx78xx->dpcd))
+ dpcd[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+
+ err = drm_dp_dpcd_write(&anx78xx->aux, DP_LINK_BW_SET, dpcd,
+ sizeof(dpcd));
if (err < 0) {
- DRM_ERROR("Failed to configure DisplayPort link: %d\n", err);
+ DRM_ERROR("Failed to configure link: %d\n", err);
return err;
}
@@ -1301,6 +1190,7 @@ static const struct regmap_config anx78xx_regmap_config = {
};
static const u16 anx78xx_chipid_list[] = {
+ 0x7808,
0x7812,
0x7814,
0x7818,
@@ -1312,6 +1202,7 @@ static int anx78xx_i2c_probe(struct i2c_client *client,
struct anx78xx *anx78xx;
struct anx78xx_platform_data *pdata;
unsigned int i, idl, idh, version;
+ const u8 *i2c_addresses;
bool found = false;
int err;
@@ -1332,7 +1223,9 @@ static int anx78xx_i2c_probe(struct i2c_client *client,
err = anx78xx_init_pdata(anx78xx);
if (err) {
- DRM_ERROR("Failed to initialize pdata: %d\n", err);
+ if (err != -EPROBE_DEFER)
+ DRM_ERROR("Failed to initialize pdata: %d\n", err);
+
return err;
}
@@ -1349,22 +1242,26 @@ static int anx78xx_i2c_probe(struct i2c_client *client,
}
/* Map slave addresses of ANX7814 */
+ i2c_addresses = device_get_match_data(&client->dev);
for (i = 0; i < I2C_NUM_ADDRESSES; i++) {
- anx78xx->i2c_dummy[i] = i2c_new_dummy(client->adapter,
- anx78xx_i2c_addresses[i] >> 1);
- if (!anx78xx->i2c_dummy[i]) {
- err = -ENOMEM;
- DRM_ERROR("Failed to reserve I2C bus %02x\n",
- anx78xx_i2c_addresses[i]);
+ struct i2c_client *i2c_dummy;
+
+ i2c_dummy = i2c_new_dummy_device(client->adapter,
+ i2c_addresses[i] >> 1);
+ if (IS_ERR(i2c_dummy)) {
+ err = PTR_ERR(i2c_dummy);
+ DRM_ERROR("Failed to reserve I2C bus %02x: %d\n",
+ i2c_addresses[i], err);
goto err_unregister_i2c;
}
+ anx78xx->i2c_dummy[i] = i2c_dummy;
anx78xx->map[i] = devm_regmap_init_i2c(anx78xx->i2c_dummy[i],
&anx78xx_regmap_config);
if (IS_ERR(anx78xx->map[i])) {
err = PTR_ERR(anx78xx->map[i]);
DRM_ERROR("Failed regmap initialization %02x\n",
- anx78xx_i2c_addresses[i]);
+ i2c_addresses[i]);
goto err_unregister_i2c;
}
}
@@ -1463,7 +1360,10 @@ MODULE_DEVICE_TABLE(i2c, anx78xx_id);
#if IS_ENABLED(CONFIG_OF)
static const struct of_device_id anx78xx_match_table[] = {
- { .compatible = "analogix,anx7814", },
+ { .compatible = "analogix,anx7808", .data = anx7808_i2c_addresses },
+ { .compatible = "analogix,anx7812", .data = anx781x_i2c_addresses },
+ { .compatible = "analogix,anx7814", .data = anx781x_i2c_addresses },
+ { .compatible = "analogix,anx7818", .data = anx781x_i2c_addresses },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, anx78xx_match_table);
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.h b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.h
new file mode 100644
index 000000000000..db2a2725acb2
--- /dev/null
+++ b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.h
@@ -0,0 +1,249 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright(c) 2016, Analogix Semiconductor. All rights reserved.
+ */
+
+#ifndef __ANX78xx_H
+#define __ANX78xx_H
+
+#include "analogix-i2c-dptx.h"
+#include "analogix-i2c-txcommon.h"
+
+/***************************************************************/
+/* Register definitions for RX_PO */
+/***************************************************************/
+
+/*
+ * System Control and Status
+ */
+
+/* Software Reset Register 1 */
+#define SP_SOFTWARE_RESET1_REG 0x11
+#define SP_VIDEO_RST BIT(4)
+#define SP_HDCP_MAN_RST BIT(2)
+#define SP_TMDS_RST BIT(1)
+#define SP_SW_MAN_RST BIT(0)
+
+/* System Status Register */
+#define SP_SYSTEM_STATUS_REG 0x14
+#define SP_TMDS_CLOCK_DET BIT(1)
+#define SP_TMDS_DE_DET BIT(0)
+
+/* HDMI Status Register */
+#define SP_HDMI_STATUS_REG 0x15
+#define SP_HDMI_AUD_LAYOUT BIT(3)
+#define SP_HDMI_DET BIT(0)
+# define SP_DVI_MODE 0
+# define SP_HDMI_MODE 1
+
+/* HDMI Mute Control Register */
+#define SP_HDMI_MUTE_CTRL_REG 0x16
+#define SP_AUD_MUTE BIT(1)
+#define SP_VID_MUTE BIT(0)
+
+/* System Power Down Register 1 */
+#define SP_SYSTEM_POWER_DOWN1_REG 0x18
+#define SP_PWDN_CTRL BIT(0)
+
+/*
+ * Audio and Video Auto Control
+ */
+
+/* Auto Audio and Video Control register */
+#define SP_AUDVID_CTRL_REG 0x20
+#define SP_AVC_OE BIT(7)
+#define SP_AAC_OE BIT(6)
+#define SP_AVC_EN BIT(1)
+#define SP_AAC_EN BIT(0)
+
+/* Audio Exception Enable Registers */
+#define SP_AUD_EXCEPTION_ENABLE_BASE (0x24 - 1)
+/* Bits for Audio Exception Enable Register 3 */
+#define SP_AEC_EN21 BIT(5)
+
+/*
+ * Interrupt
+ */
+
+/* Interrupt Status Register 1 */
+#define SP_INT_STATUS1_REG 0x31
+/* Bits for Interrupt Status Register 1 */
+#define SP_HDMI_DVI BIT(7)
+#define SP_CKDT_CHG BIT(6)
+#define SP_SCDT_CHG BIT(5)
+#define SP_PCLK_CHG BIT(4)
+#define SP_PLL_UNLOCK BIT(3)
+#define SP_CABLE_PLUG_CHG BIT(2)
+#define SP_SET_MUTE BIT(1)
+#define SP_SW_INTR BIT(0)
+/* Bits for Interrupt Status Register 2 */
+#define SP_HDCP_ERR BIT(5)
+#define SP_AUDIO_SAMPLE_CHG BIT(0) /* undocumented */
+/* Bits for Interrupt Status Register 3 */
+#define SP_AUD_MODE_CHG BIT(0)
+/* Bits for Interrupt Status Register 5 */
+#define SP_AUDIO_RCV BIT(0)
+/* Bits for Interrupt Status Register 6 */
+#define SP_INT_STATUS6_REG 0x36
+#define SP_CTS_RCV BIT(7)
+#define SP_NEW_AUD_PKT BIT(4)
+#define SP_NEW_AVI_PKT BIT(1)
+#define SP_NEW_CP_PKT BIT(0)
+/* Bits for Interrupt Status Register 7 */
+#define SP_NO_VSI BIT(7)
+#define SP_NEW_VS BIT(4)
+
+/* Interrupt Mask 1 Status Registers */
+#define SP_INT_MASK1_REG 0x41
+
+/* HDMI US TIMER Control Register */
+#define SP_HDMI_US_TIMER_CTRL_REG 0x49
+#define SP_MS_TIMER_MARGIN_10_8_MASK 0x07
+
+/*
+ * TMDS Control
+ */
+
+/* TMDS Control Registers */
+#define SP_TMDS_CTRL_BASE (0x50 - 1)
+/* Bits for TMDS Control Register 7 */
+#define SP_PD_RT BIT(0)
+
+/*
+ * Video Control
+ */
+
+/* Video Status Register */
+#define SP_VIDEO_STATUS_REG 0x70
+#define SP_COLOR_DEPTH_MASK 0xf0
+#define SP_COLOR_DEPTH_SHIFT 4
+# define SP_COLOR_DEPTH_MODE_LEGACY 0x00
+# define SP_COLOR_DEPTH_MODE_24BIT 0x04
+# define SP_COLOR_DEPTH_MODE_30BIT 0x05
+# define SP_COLOR_DEPTH_MODE_36BIT 0x06
+# define SP_COLOR_DEPTH_MODE_48BIT 0x07
+
+/* Video Data Range Control Register */
+#define SP_VID_DATA_RANGE_CTRL_REG 0x83
+#define SP_R2Y_INPUT_LIMIT BIT(1)
+
+/* Pixel Clock High Resolution Counter Registers */
+#define SP_PCLK_HIGHRES_CNT_BASE (0x8c - 1)
+
+/*
+ * Audio Control
+ */
+
+/* Number of Audio Channels Status Registers */
+#define SP_AUD_CH_STATUS_REG_NUM 6
+
+/* Audio IN S/PDIF Channel Status Registers */
+#define SP_AUD_SPDIF_CH_STATUS_BASE 0xc7
+
+/* Audio IN S/PDIF Channel Status Register 4 */
+#define SP_FS_FREQ_MASK 0x0f
+# define SP_FS_FREQ_44100HZ 0x00
+# define SP_FS_FREQ_48000HZ 0x02
+# define SP_FS_FREQ_32000HZ 0x03
+# define SP_FS_FREQ_88200HZ 0x08
+# define SP_FS_FREQ_96000HZ 0x0a
+# define SP_FS_FREQ_176400HZ 0x0c
+# define SP_FS_FREQ_192000HZ 0x0e
+
+/*
+ * Micellaneous Control Block
+ */
+
+/* CHIP Control Register */
+#define SP_CHIP_CTRL_REG 0xe3
+#define SP_MAN_HDMI5V_DET BIT(3)
+#define SP_PLLLOCK_CKDT_EN BIT(2)
+#define SP_ANALOG_CKDT_EN BIT(1)
+#define SP_DIGITAL_CKDT_EN BIT(0)
+
+/* Packet Receiving Status Register */
+#define SP_PACKET_RECEIVING_STATUS_REG 0xf3
+#define SP_AVI_RCVD BIT(5)
+#define SP_VSI_RCVD BIT(1)
+
+/***************************************************************/
+/* Register definitions for RX_P1 */
+/***************************************************************/
+
+/* HDCP BCAPS Shadow Register */
+#define SP_HDCP_BCAPS_SHADOW_REG 0x2a
+#define SP_BCAPS_REPEATER BIT(5)
+
+/* HDCP Status Register */
+#define SP_RX_HDCP_STATUS_REG 0x3f
+#define SP_AUTH_EN BIT(4)
+
+/*
+ * InfoFrame and Control Packet Registers
+ */
+
+/* AVI InfoFrame packet checksum */
+#define SP_AVI_INFOFRAME_CHECKSUM 0xa3
+
+/* AVI InfoFrame Registers */
+#define SP_AVI_INFOFRAME_DATA_BASE 0xa4
+
+#define SP_AVI_COLOR_F_MASK 0x60
+#define SP_AVI_COLOR_F_SHIFT 5
+
+/* Audio InfoFrame Registers */
+#define SP_AUD_INFOFRAME_DATA_BASE 0xc4
+#define SP_AUD_INFOFRAME_LAYOUT_MASK 0x0f
+
+/* MPEG/HDMI Vendor Specific InfoFrame Packet type code */
+#define SP_MPEG_VS_INFOFRAME_TYPE_REG 0xe0
+
+/* MPEG/HDMI Vendor Specific InfoFrame Packet length */
+#define SP_MPEG_VS_INFOFRAME_LEN_REG 0xe2
+
+/* MPEG/HDMI Vendor Specific InfoFrame Packet version number */
+#define SP_MPEG_VS_INFOFRAME_VER_REG 0xe1
+
+/* MPEG/HDMI Vendor Specific InfoFrame Packet content */
+#define SP_MPEG_VS_INFOFRAME_DATA_BASE 0xe4
+
+/* General Control Packet Register */
+#define SP_GENERAL_CTRL_PACKET_REG 0x9f
+#define SP_CLEAR_AVMUTE BIT(4)
+#define SP_SET_AVMUTE BIT(0)
+
+/***************************************************************/
+/* Register definitions for TX_P1 */
+/***************************************************************/
+
+/* DP TX Link Training Control Register */
+#define SP_DP_TX_LT_CTRL0_REG 0x30
+
+/* PD 1.2 Lint Training 80bit Pattern Register */
+#define SP_DP_LT_80BIT_PATTERN0_REG 0x80
+#define SP_DP_LT_80BIT_PATTERN_REG_NUM 10
+
+/* Audio Interface Control Register 0 */
+#define SP_AUD_INTERFACE_CTRL0_REG 0x5f
+#define SP_AUD_INTERFACE_DISABLE 0x80
+
+/* Audio Interface Control Register 2 */
+#define SP_AUD_INTERFACE_CTRL2_REG 0x60
+#define SP_M_AUD_ADJUST_ST 0x04
+
+/* Audio Interface Control Register 3 */
+#define SP_AUD_INTERFACE_CTRL3_REG 0x62
+
+/* Audio Interface Control Register 4 */
+#define SP_AUD_INTERFACE_CTRL4_REG 0x67
+
+/* Audio Interface Control Register 5 */
+#define SP_AUD_INTERFACE_CTRL5_REG 0x68
+
+/* Audio Interface Control Register 6 */
+#define SP_AUD_INTERFACE_CTRL6_REG 0x69
+
+/* Firmware Version Register */
+#define SP_FW_VER_REG 0xb7
+
+#endif
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-i2c-dptx.c b/drivers/gpu/drm/bridge/analogix/analogix-i2c-dptx.c
new file mode 100644
index 000000000000..fe40bab21530
--- /dev/null
+++ b/drivers/gpu/drm/bridge/analogix/analogix-i2c-dptx.c
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright(c) 2016, Analogix Semiconductor.
+ *
+ * Based on anx7808 driver obtained from chromeos with copyright:
+ * Copyright(c) 2013, Google Inc.
+ */
+#include <linux/regmap.h>
+
+#include <drm/drm.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_print.h>
+
+#include "analogix-i2c-dptx.h"
+
+#define AUX_WAIT_TIMEOUT_MS 15
+#define AUX_CH_BUFFER_SIZE 16
+
+static int anx_i2c_dp_clear_bits(struct regmap *map, u8 reg, u8 mask)
+{
+ return regmap_update_bits(map, reg, mask, 0);
+}
+
+static bool anx_dp_aux_op_finished(struct regmap *map_dptx)
+{
+ unsigned int value;
+ int err;
+
+ err = regmap_read(map_dptx, SP_DP_AUX_CH_CTRL2_REG, &value);
+ if (err < 0)
+ return false;
+
+ return (value & SP_AUX_EN) == 0;
+}
+
+static int anx_dp_aux_wait(struct regmap *map_dptx)
+{
+ unsigned long timeout;
+ unsigned int status;
+ int err;
+
+ timeout = jiffies + msecs_to_jiffies(AUX_WAIT_TIMEOUT_MS) + 1;
+
+ while (!anx_dp_aux_op_finished(map_dptx)) {
+ if (time_after(jiffies, timeout)) {
+ if (!anx_dp_aux_op_finished(map_dptx)) {
+ DRM_ERROR("Timed out waiting AUX to finish\n");
+ return -ETIMEDOUT;
+ }
+
+ break;
+ }
+
+ usleep_range(1000, 2000);
+ }
+
+ /* Read the AUX channel access status */
+ err = regmap_read(map_dptx, SP_AUX_CH_STATUS_REG, &status);
+ if (err < 0) {
+ DRM_ERROR("Failed to read from AUX channel: %d\n", err);
+ return err;
+ }
+
+ if (status & SP_AUX_STATUS) {
+ DRM_ERROR("Failed to wait for AUX channel (status: %02x)\n",
+ status);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int anx_dp_aux_address(struct regmap *map_dptx, unsigned int addr)
+{
+ int err;
+
+ err = regmap_write(map_dptx, SP_AUX_ADDR_7_0_REG, addr & 0xff);
+ if (err)
+ return err;
+
+ err = regmap_write(map_dptx, SP_AUX_ADDR_15_8_REG,
+ (addr & 0xff00) >> 8);
+ if (err)
+ return err;
+
+ /*
+ * DP AUX CH Address Register #2, only update bits[3:0]
+ * [7:4] RESERVED
+ * [3:0] AUX_ADDR[19:16], Register control AUX CH address.
+ */
+ err = regmap_update_bits(map_dptx, SP_AUX_ADDR_19_16_REG,
+ SP_AUX_ADDR_19_16_MASK,
+ (addr & 0xf0000) >> 16);
+
+ if (err)
+ return err;
+
+ return 0;
+}
+
+ssize_t anx_dp_aux_transfer(struct regmap *map_dptx,
+ struct drm_dp_aux_msg *msg)
+{
+ u8 ctrl1 = msg->request;
+ u8 ctrl2 = SP_AUX_EN;
+ u8 *buffer = msg->buffer;
+ int err;
+
+ /* The DP AUX transmit and receive buffer has 16 bytes. */
+ if (WARN_ON(msg->size > AUX_CH_BUFFER_SIZE))
+ return -E2BIG;
+
+ /* Zero-sized messages specify address-only transactions. */
+ if (msg->size < 1)
+ ctrl2 |= SP_ADDR_ONLY;
+ else /* For non-zero-sized set the length field. */
+ ctrl1 |= (msg->size - 1) << SP_AUX_LENGTH_SHIFT;
+
+ if ((msg->size > 0) && ((msg->request & DP_AUX_I2C_READ) == 0)) {
+ /* When WRITE | MOT write values to data buffer */
+ err = regmap_bulk_write(map_dptx,
+ SP_DP_BUF_DATA0_REG, buffer,
+ msg->size);
+ if (err)
+ return err;
+ }
+
+ /* Write address and request */
+ err = anx_dp_aux_address(map_dptx, msg->address);
+ if (err)
+ return err;
+
+ err = regmap_write(map_dptx, SP_DP_AUX_CH_CTRL1_REG, ctrl1);
+ if (err)
+ return err;
+
+ /* Start transaction */
+ err = regmap_update_bits(map_dptx, SP_DP_AUX_CH_CTRL2_REG,
+ SP_ADDR_ONLY | SP_AUX_EN, ctrl2);
+ if (err)
+ return err;
+
+ err = anx_dp_aux_wait(map_dptx);
+ if (err)
+ return err;
+
+ msg->reply = DP_AUX_I2C_REPLY_ACK;
+
+ if ((msg->size > 0) && (msg->request & DP_AUX_I2C_READ)) {
+ /* Read values from data buffer */
+ err = regmap_bulk_read(map_dptx,
+ SP_DP_BUF_DATA0_REG, buffer,
+ msg->size);
+ if (err)
+ return err;
+ }
+
+ err = anx_i2c_dp_clear_bits(map_dptx, SP_DP_AUX_CH_CTRL2_REG,
+ SP_ADDR_ONLY);
+ if (err)
+ return err;
+
+ return msg->size;
+}
+EXPORT_SYMBOL_GPL(anx_dp_aux_transfer);
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-i2c-dptx.h b/drivers/gpu/drm/bridge/analogix/analogix-i2c-dptx.h
new file mode 100644
index 000000000000..663c4bea6e70
--- /dev/null
+++ b/drivers/gpu/drm/bridge/analogix/analogix-i2c-dptx.h
@@ -0,0 +1,256 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright(c) 2016, Analogix Semiconductor.
+ *
+ * Based on anx7808 driver obtained from chromeos with copyright:
+ * Copyright(c) 2013, Google Inc.
+ */
+#ifndef _ANALOGIX_I2C_DPTX_H_
+#define _ANALOGIX_I2C_DPTX_H_
+
+/***************************************************************/
+/* Register definitions for TX_P0 */
+/***************************************************************/
+
+/* HDCP Status Register */
+#define SP_TX_HDCP_STATUS_REG 0x00
+#define SP_AUTH_FAIL BIT(5)
+#define SP_AUTHEN_PASS BIT(1)
+
+/* HDCP Control Register 0 */
+#define SP_HDCP_CTRL0_REG 0x01
+#define SP_RX_REPEATER BIT(6)
+#define SP_RE_AUTH BIT(5)
+#define SP_SW_AUTH_OK BIT(4)
+#define SP_HARD_AUTH_EN BIT(3)
+#define SP_HDCP_ENC_EN BIT(2)
+#define SP_BKSV_SRM_PASS BIT(1)
+#define SP_KSVLIST_VLD BIT(0)
+/* HDCP Function Enabled */
+#define SP_HDCP_FUNCTION_ENABLED (BIT(0) | BIT(1) | BIT(2) | BIT(3))
+
+/* HDCP Receiver BSTATUS Register 0 */
+#define SP_HDCP_RX_BSTATUS0_REG 0x1b
+/* HDCP Receiver BSTATUS Register 1 */
+#define SP_HDCP_RX_BSTATUS1_REG 0x1c
+
+/* HDCP Embedded "Blue Screen" Content Registers */
+#define SP_HDCP_VID0_BLUE_SCREEN_REG 0x2c
+#define SP_HDCP_VID1_BLUE_SCREEN_REG 0x2d
+#define SP_HDCP_VID2_BLUE_SCREEN_REG 0x2e
+
+/* HDCP Wait R0 Timing Register */
+#define SP_HDCP_WAIT_R0_TIME_REG 0x40
+
+/* HDCP Link Integrity Check Timer Register */
+#define SP_HDCP_LINK_CHECK_TIMER_REG 0x41
+
+/* HDCP Repeater Ready Wait Timer Register */
+#define SP_HDCP_RPTR_RDY_WAIT_TIME_REG 0x42
+
+/* HDCP Auto Timer Register */
+#define SP_HDCP_AUTO_TIMER_REG 0x51
+
+/* HDCP Key Status Register */
+#define SP_HDCP_KEY_STATUS_REG 0x5e
+
+/* HDCP Key Command Register */
+#define SP_HDCP_KEY_COMMAND_REG 0x5f
+#define SP_DISABLE_SYNC_HDCP BIT(2)
+
+/* OTP Memory Key Protection Registers */
+#define SP_OTP_KEY_PROTECT1_REG 0x60
+#define SP_OTP_KEY_PROTECT2_REG 0x61
+#define SP_OTP_KEY_PROTECT3_REG 0x62
+#define SP_OTP_PSW1 0xa2
+#define SP_OTP_PSW2 0x7e
+#define SP_OTP_PSW3 0xc6
+
+/* DP System Control Registers */
+#define SP_DP_SYSTEM_CTRL_BASE (0x80 - 1)
+/* Bits for DP System Control Register 2 */
+#define SP_CHA_STA BIT(2)
+/* Bits for DP System Control Register 3 */
+#define SP_HPD_STATUS BIT(6)
+#define SP_HPD_FORCE BIT(5)
+#define SP_HPD_CTRL BIT(4)
+#define SP_STRM_VALID BIT(2)
+#define SP_STRM_FORCE BIT(1)
+#define SP_STRM_CTRL BIT(0)
+/* Bits for DP System Control Register 4 */
+#define SP_ENHANCED_MODE BIT(3)
+
+/* DP Video Control Register */
+#define SP_DP_VIDEO_CTRL_REG 0x84
+#define SP_COLOR_F_MASK 0x06
+#define SP_COLOR_F_SHIFT 1
+#define SP_BPC_MASK 0xe0
+#define SP_BPC_SHIFT 5
+# define SP_BPC_6BITS 0x00
+# define SP_BPC_8BITS 0x01
+# define SP_BPC_10BITS 0x02
+# define SP_BPC_12BITS 0x03
+
+/* DP Audio Control Register */
+#define SP_DP_AUDIO_CTRL_REG 0x87
+#define SP_AUD_EN BIT(0)
+
+/* 10us Pulse Generate Timer Registers */
+#define SP_I2C_GEN_10US_TIMER0_REG 0x88
+#define SP_I2C_GEN_10US_TIMER1_REG 0x89
+
+/* Packet Send Control Register */
+#define SP_PACKET_SEND_CTRL_REG 0x90
+#define SP_AUD_IF_UP BIT(7)
+#define SP_AVI_IF_UD BIT(6)
+#define SP_MPEG_IF_UD BIT(5)
+#define SP_SPD_IF_UD BIT(4)
+#define SP_AUD_IF_EN BIT(3)
+#define SP_AVI_IF_EN BIT(2)
+#define SP_MPEG_IF_EN BIT(1)
+#define SP_SPD_IF_EN BIT(0)
+
+/* DP HDCP Control Register */
+#define SP_DP_HDCP_CTRL_REG 0x92
+#define SP_AUTO_EN BIT(7)
+#define SP_AUTO_START BIT(5)
+#define SP_LINK_POLLING BIT(1)
+
+/* DP Main Link Bandwidth Setting Register */
+#define SP_DP_MAIN_LINK_BW_SET_REG 0xa0
+#define SP_LINK_BW_SET_MASK 0x1f
+#define SP_INITIAL_SLIM_M_AUD_SEL BIT(5)
+
+/* DP Lane Count Setting Register */
+#define SP_DP_LANE_COUNT_SET_REG 0xa1
+
+/* DP Training Pattern Set Register */
+#define SP_DP_TRAINING_PATTERN_SET_REG 0xa2
+
+/* DP Lane 0 Link Training Control Register */
+#define SP_DP_LANE0_LT_CTRL_REG 0xa3
+#define SP_TX_SW_SET_MASK 0x1b
+#define SP_MAX_PRE_REACH BIT(5)
+#define SP_MAX_DRIVE_REACH BIT(4)
+#define SP_PRE_EMP_LEVEL1 BIT(3)
+#define SP_DRVIE_CURRENT_LEVEL1 BIT(0)
+
+/* DP Link Training Control Register */
+#define SP_DP_LT_CTRL_REG 0xa8
+#define SP_DP_LT_INPROGRESS 0x80
+#define SP_LT_ERROR_TYPE_MASK 0x70
+# define SP_LT_NO_ERROR 0x00
+# define SP_LT_AUX_WRITE_ERROR 0x01
+# define SP_LT_MAX_DRIVE_REACHED 0x02
+# define SP_LT_WRONG_LANE_COUNT_SET 0x03
+# define SP_LT_LOOP_SAME_5_TIME 0x04
+# define SP_LT_CR_FAIL_IN_EQ 0x05
+# define SP_LT_EQ_LOOP_5_TIME 0x06
+#define SP_LT_EN BIT(0)
+
+/* DP CEP Training Control Registers */
+#define SP_DP_CEP_TRAINING_CTRL0_REG 0xa9
+#define SP_DP_CEP_TRAINING_CTRL1_REG 0xaa
+
+/* DP Debug Register 1 */
+#define SP_DP_DEBUG1_REG 0xb0
+#define SP_DEBUG_PLL_LOCK BIT(4)
+#define SP_POLLING_EN BIT(1)
+
+/* DP Polling Control Register */
+#define SP_DP_POLLING_CTRL_REG 0xb4
+#define SP_AUTO_POLLING_DISABLE BIT(0)
+
+/* DP Link Debug Control Register */
+#define SP_DP_LINK_DEBUG_CTRL_REG 0xb8
+#define SP_M_VID_DEBUG BIT(5)
+#define SP_NEW_PRBS7 BIT(4)
+#define SP_INSERT_ER BIT(1)
+#define SP_PRBS31_EN BIT(0)
+
+/* AUX Misc control Register */
+#define SP_AUX_MISC_CTRL_REG 0xbf
+
+/* DP PLL control Register */
+#define SP_DP_PLL_CTRL_REG 0xc7
+#define SP_PLL_RST BIT(6)
+
+/* DP Analog Power Down Register */
+#define SP_DP_ANALOG_POWER_DOWN_REG 0xc8
+#define SP_CH0_PD BIT(0)
+
+/* DP Misc Control Register */
+#define SP_DP_MISC_CTRL_REG 0xcd
+#define SP_EQ_TRAINING_LOOP BIT(6)
+
+/* DP Extra I2C Device Address Register */
+#define SP_DP_EXTRA_I2C_DEV_ADDR_REG 0xce
+#define SP_I2C_STRETCH_DISABLE BIT(7)
+
+#define SP_I2C_EXTRA_ADDR 0x50
+
+/* DP Downspread Control Register 1 */
+#define SP_DP_DOWNSPREAD_CTRL1_REG 0xd0
+
+/* DP M Value Calculation Control Register */
+#define SP_DP_M_CALCULATION_CTRL_REG 0xd9
+#define SP_M_GEN_CLK_SEL BIT(0)
+
+/* AUX Channel Access Status Register */
+#define SP_AUX_CH_STATUS_REG 0xe0
+#define SP_AUX_STATUS 0x0f
+
+/* AUX Channel DEFER Control Register */
+#define SP_AUX_DEFER_CTRL_REG 0xe2
+#define SP_DEFER_CTRL_EN BIT(7)
+
+/* DP Buffer Data Count Register */
+#define SP_BUF_DATA_COUNT_REG 0xe4
+#define SP_BUF_DATA_COUNT_MASK 0x1f
+#define SP_BUF_CLR BIT(7)
+
+/* DP AUX Channel Control Register 1 */
+#define SP_DP_AUX_CH_CTRL1_REG 0xe5
+#define SP_AUX_TX_COMM_MASK 0x0f
+#define SP_AUX_LENGTH_MASK 0xf0
+#define SP_AUX_LENGTH_SHIFT 4
+
+/* DP AUX CH Address Register 0 */
+#define SP_AUX_ADDR_7_0_REG 0xe6
+
+/* DP AUX CH Address Register 1 */
+#define SP_AUX_ADDR_15_8_REG 0xe7
+
+/* DP AUX CH Address Register 2 */
+#define SP_AUX_ADDR_19_16_REG 0xe8
+#define SP_AUX_ADDR_19_16_MASK 0x0f
+
+/* DP AUX Channel Control Register 2 */
+#define SP_DP_AUX_CH_CTRL2_REG 0xe9
+#define SP_AUX_SEL_RXCM BIT(6)
+#define SP_AUX_CHSEL BIT(3)
+#define SP_AUX_PN_INV BIT(2)
+#define SP_ADDR_ONLY BIT(1)
+#define SP_AUX_EN BIT(0)
+
+/* DP Video Stream Control InfoFrame Register */
+#define SP_DP_3D_VSC_CTRL_REG 0xea
+#define SP_INFO_FRAME_VSC_EN BIT(0)
+
+/* DP Video Stream Data Byte 1 Register */
+#define SP_DP_VSC_DB1_REG 0xeb
+
+/* DP AUX Channel Control Register 3 */
+#define SP_DP_AUX_CH_CTRL3_REG 0xec
+#define SP_WAIT_COUNTER_7_0_MASK 0xff
+
+/* DP AUX Channel Control Register 4 */
+#define SP_DP_AUX_CH_CTRL4_REG 0xed
+
+/* DP AUX Buffer Data Registers */
+#define SP_DP_BUF_DATA0_REG 0xf0
+
+ssize_t anx_dp_aux_transfer(struct regmap *map_dptx,
+ struct drm_dp_aux_msg *msg);
+
+#endif
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-i2c-txcommon.h b/drivers/gpu/drm/bridge/analogix/analogix-i2c-txcommon.h
new file mode 100644
index 000000000000..3c843497d835
--- /dev/null
+++ b/drivers/gpu/drm/bridge/analogix/analogix-i2c-txcommon.h
@@ -0,0 +1,234 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright(c) 2016, Analogix Semiconductor. All rights reserved.
+ */
+#ifndef _ANALOGIX_I2C_TXCOMMON_H_
+#define _ANALOGIX_I2C_TXCOMMON_H_
+
+/***************************************************************/
+/* Register definitions for TX_P2 */
+/***************************************************************/
+
+/*
+ * Core Register Definitions
+ */
+
+/* Device ID Low Byte Register */
+#define SP_DEVICE_IDL_REG 0x02
+
+/* Device ID High Byte Register */
+#define SP_DEVICE_IDH_REG 0x03
+
+/* Device version register */
+#define SP_DEVICE_VERSION_REG 0x04
+
+/* Power Down Control Register */
+#define SP_POWERDOWN_CTRL_REG 0x05
+#define SP_REGISTER_PD BIT(7)
+#define SP_HDCP_PD BIT(5)
+#define SP_AUDIO_PD BIT(4)
+#define SP_VIDEO_PD BIT(3)
+#define SP_LINK_PD BIT(2)
+#define SP_TOTAL_PD BIT(1)
+
+/* Reset Control Register 1 */
+#define SP_RESET_CTRL1_REG 0x06
+#define SP_MISC_RST BIT(7)
+#define SP_VIDCAP_RST BIT(6)
+#define SP_VIDFIF_RST BIT(5)
+#define SP_AUDFIF_RST BIT(4)
+#define SP_AUDCAP_RST BIT(3)
+#define SP_HDCP_RST BIT(2)
+#define SP_SW_RST BIT(1)
+#define SP_HW_RST BIT(0)
+
+/* Reset Control Register 2 */
+#define SP_RESET_CTRL2_REG 0x07
+#define SP_AUX_RST BIT(2)
+#define SP_SERDES_FIFO_RST BIT(1)
+#define SP_I2C_REG_RST BIT(0)
+
+/* Video Control Register 1 */
+#define SP_VID_CTRL1_REG 0x08
+#define SP_VIDEO_EN BIT(7)
+#define SP_VIDEO_MUTE BIT(2)
+#define SP_DE_GEN BIT(1)
+#define SP_DEMUX BIT(0)
+
+/* Video Control Register 2 */
+#define SP_VID_CTRL2_REG 0x09
+#define SP_IN_COLOR_F_MASK 0x03
+#define SP_IN_YC_BIT_SEL BIT(2)
+#define SP_IN_BPC_MASK 0x70
+#define SP_IN_BPC_SHIFT 4
+# define SP_IN_BPC_12BIT 0x03
+# define SP_IN_BPC_10BIT 0x02
+# define SP_IN_BPC_8BIT 0x01
+# define SP_IN_BPC_6BIT 0x00
+#define SP_IN_D_RANGE BIT(7)
+
+/* Video Control Register 3 */
+#define SP_VID_CTRL3_REG 0x0a
+#define SP_HPD_OUT BIT(6)
+
+/* Video Control Register 5 */
+#define SP_VID_CTRL5_REG 0x0c
+#define SP_CSC_STD_SEL BIT(7)
+#define SP_XVYCC_RNG_LMT BIT(6)
+#define SP_RANGE_Y2R BIT(5)
+#define SP_CSPACE_Y2R BIT(4)
+#define SP_RGB_RNG_LMT BIT(3)
+#define SP_Y_RNG_LMT BIT(2)
+#define SP_RANGE_R2Y BIT(1)
+#define SP_CSPACE_R2Y BIT(0)
+
+/* Video Control Register 6 */
+#define SP_VID_CTRL6_REG 0x0d
+#define SP_TEST_PATTERN_EN BIT(7)
+#define SP_VIDEO_PROCESS_EN BIT(6)
+#define SP_VID_US_MODE BIT(3)
+#define SP_VID_DS_MODE BIT(2)
+#define SP_UP_SAMPLE BIT(1)
+#define SP_DOWN_SAMPLE BIT(0)
+
+/* Video Control Register 8 */
+#define SP_VID_CTRL8_REG 0x0f
+#define SP_VID_VRES_TH BIT(0)
+
+/* Total Line Status Low Byte Register */
+#define SP_TOTAL_LINE_STAL_REG 0x24
+
+/* Total Line Status High Byte Register */
+#define SP_TOTAL_LINE_STAH_REG 0x25
+
+/* Active Line Status Low Byte Register */
+#define SP_ACT_LINE_STAL_REG 0x26
+
+/* Active Line Status High Byte Register */
+#define SP_ACT_LINE_STAH_REG 0x27
+
+/* Vertical Front Porch Status Register */
+#define SP_V_F_PORCH_STA_REG 0x28
+
+/* Vertical SYNC Width Status Register */
+#define SP_V_SYNC_STA_REG 0x29
+
+/* Vertical Back Porch Status Register */
+#define SP_V_B_PORCH_STA_REG 0x2a
+
+/* Total Pixel Status Low Byte Register */
+#define SP_TOTAL_PIXEL_STAL_REG 0x2b
+
+/* Total Pixel Status High Byte Register */
+#define SP_TOTAL_PIXEL_STAH_REG 0x2c
+
+/* Active Pixel Status Low Byte Register */
+#define SP_ACT_PIXEL_STAL_REG 0x2d
+
+/* Active Pixel Status High Byte Register */
+#define SP_ACT_PIXEL_STAH_REG 0x2e
+
+/* Horizontal Front Porch Status Low Byte Register */
+#define SP_H_F_PORCH_STAL_REG 0x2f
+
+/* Horizontal Front Porch Statys High Byte Register */
+#define SP_H_F_PORCH_STAH_REG 0x30
+
+/* Horizontal SYNC Width Status Low Byte Register */
+#define SP_H_SYNC_STAL_REG 0x31
+
+/* Horizontal SYNC Width Status High Byte Register */
+#define SP_H_SYNC_STAH_REG 0x32
+
+/* Horizontal Back Porch Status Low Byte Register */
+#define SP_H_B_PORCH_STAL_REG 0x33
+
+/* Horizontal Back Porch Status High Byte Register */
+#define SP_H_B_PORCH_STAH_REG 0x34
+
+/* InfoFrame AVI Packet DB1 Register */
+#define SP_INFOFRAME_AVI_DB1_REG 0x70
+
+/* Bit Control Specific Register */
+#define SP_BIT_CTRL_SPECIFIC_REG 0x80
+#define SP_BIT_CTRL_SELECT_SHIFT 1
+#define SP_ENABLE_BIT_CTRL BIT(0)
+
+/* InfoFrame Audio Packet DB1 Register */
+#define SP_INFOFRAME_AUD_DB1_REG 0x83
+
+/* InfoFrame MPEG Packet DB1 Register */
+#define SP_INFOFRAME_MPEG_DB1_REG 0xb0
+
+/* Audio Channel Status Registers */
+#define SP_AUD_CH_STATUS_BASE 0xd0
+
+/* Audio Channel Num Register 5 */
+#define SP_I2S_CHANNEL_NUM_MASK 0xe0
+# define SP_I2S_CH_NUM_1 (0x00 << 5)
+# define SP_I2S_CH_NUM_2 (0x01 << 5)
+# define SP_I2S_CH_NUM_3 (0x02 << 5)
+# define SP_I2S_CH_NUM_4 (0x03 << 5)
+# define SP_I2S_CH_NUM_5 (0x04 << 5)
+# define SP_I2S_CH_NUM_6 (0x05 << 5)
+# define SP_I2S_CH_NUM_7 (0x06 << 5)
+# define SP_I2S_CH_NUM_8 (0x07 << 5)
+#define SP_EXT_VUCP BIT(2)
+#define SP_VBIT BIT(1)
+#define SP_AUDIO_LAYOUT BIT(0)
+
+/* Analog Debug Register 1 */
+#define SP_ANALOG_DEBUG1_REG 0xdc
+
+/* Analog Debug Register 2 */
+#define SP_ANALOG_DEBUG2_REG 0xdd
+#define SP_FORCE_SW_OFF_BYPASS 0x20
+#define SP_XTAL_FRQ 0x1c
+# define SP_XTAL_FRQ_19M2 (0x00 << 2)
+# define SP_XTAL_FRQ_24M (0x01 << 2)
+# define SP_XTAL_FRQ_25M (0x02 << 2)
+# define SP_XTAL_FRQ_26M (0x03 << 2)
+# define SP_XTAL_FRQ_27M (0x04 << 2)
+# define SP_XTAL_FRQ_38M4 (0x05 << 2)
+# define SP_XTAL_FRQ_52M (0x06 << 2)
+#define SP_POWERON_TIME_1P5MS 0x03
+
+/* Analog Control 0 Register */
+#define SP_ANALOG_CTRL0_REG 0xe1
+
+/* Common Interrupt Status Register 1 */
+#define SP_COMMON_INT_STATUS_BASE (0xf1 - 1)
+#define SP_PLL_LOCK_CHG 0x40
+
+/* Common Interrupt Status Register 2 */
+#define SP_COMMON_INT_STATUS2 0xf2
+#define SP_HDCP_AUTH_CHG BIT(1)
+#define SP_HDCP_AUTH_DONE BIT(0)
+
+#define SP_HDCP_LINK_CHECK_FAIL BIT(0)
+
+/* Common Interrupt Status Register 4 */
+#define SP_COMMON_INT_STATUS4_REG 0xf4
+#define SP_HPD_IRQ BIT(6)
+#define SP_HPD_ESYNC_ERR BIT(4)
+#define SP_HPD_CHG BIT(2)
+#define SP_HPD_LOST BIT(1)
+#define SP_HPD_PLUG BIT(0)
+
+/* DP Interrupt Status Register */
+#define SP_DP_INT_STATUS1_REG 0xf7
+#define SP_TRAINING_FINISH BIT(5)
+#define SP_POLLING_ERR BIT(4)
+
+/* Common Interrupt Mask Register */
+#define SP_COMMON_INT_MASK_BASE (0xf8 - 1)
+
+#define SP_COMMON_INT_MASK4_REG 0xfb
+
+/* DP Interrupts Mask Register */
+#define SP_DP_INT_MASK1_REG 0xfe
+
+/* Interrupt Control Register */
+#define SP_INT_CTRL_REG 0xff
+
+#endif /* _ANALOGIX_I2C_TXCOMMON_H_ */
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 3f7f4880be09..6effe532f820 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -19,7 +19,9 @@
#include <linux/platform_device.h>
#include <drm/bridge/analogix_dp.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
#include <drm/drm_panel.h>
@@ -101,63 +103,7 @@ static int analogix_dp_detect_hpd(struct analogix_dp_device *dp)
return 0;
}
-int analogix_dp_psr_enabled(struct analogix_dp_device *dp)
-{
-
- return dp->psr_enable;
-}
-EXPORT_SYMBOL_GPL(analogix_dp_psr_enabled);
-
-int analogix_dp_enable_psr(struct analogix_dp_device *dp)
-{
- struct dp_sdp psr_vsc;
-
- if (!dp->psr_enable)
- return 0;
-
- /* Prepare VSC packet as per EDP 1.4 spec, Table 6.9 */
- memset(&psr_vsc, 0, sizeof(psr_vsc));
- psr_vsc.sdp_header.HB0 = 0;
- psr_vsc.sdp_header.HB1 = 0x7;
- psr_vsc.sdp_header.HB2 = 0x2;
- psr_vsc.sdp_header.HB3 = 0x8;
-
- psr_vsc.db[0] = 0;
- psr_vsc.db[1] = EDP_VSC_PSR_STATE_ACTIVE | EDP_VSC_PSR_CRC_VALUES_VALID;
-
- return analogix_dp_send_psr_spd(dp, &psr_vsc, true);
-}
-EXPORT_SYMBOL_GPL(analogix_dp_enable_psr);
-
-int analogix_dp_disable_psr(struct analogix_dp_device *dp)
-{
- struct dp_sdp psr_vsc;
- int ret;
-
- if (!dp->psr_enable)
- return 0;
-
- /* Prepare VSC packet as per EDP 1.4 spec, Table 6.9 */
- memset(&psr_vsc, 0, sizeof(psr_vsc));
- psr_vsc.sdp_header.HB0 = 0;
- psr_vsc.sdp_header.HB1 = 0x7;
- psr_vsc.sdp_header.HB2 = 0x2;
- psr_vsc.sdp_header.HB3 = 0x8;
-
- psr_vsc.db[0] = 0;
- psr_vsc.db[1] = 0;
-
- ret = drm_dp_dpcd_writeb(&dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
- if (ret != 1) {
- dev_err(dp->dev, "Failed to set DP Power0 %d\n", ret);
- return ret;
- }
-
- return analogix_dp_send_psr_spd(dp, &psr_vsc, false);
-}
-EXPORT_SYMBOL_GPL(analogix_dp_disable_psr);
-
-static int analogix_dp_detect_sink_psr(struct analogix_dp_device *dp)
+static bool analogix_dp_detect_sink_psr(struct analogix_dp_device *dp)
{
unsigned char psr_version;
int ret;
@@ -165,14 +111,11 @@ static int analogix_dp_detect_sink_psr(struct analogix_dp_device *dp)
ret = drm_dp_dpcd_readb(&dp->aux, DP_PSR_SUPPORT, &psr_version);
if (ret != 1) {
dev_err(dp->dev, "failed to get PSR version, disable it\n");
- return ret;
+ return false;
}
dev_dbg(dp->dev, "Panel PSR version : %x\n", psr_version);
-
- dp->psr_enable = (psr_version & DP_PSR_IS_SUPPORTED) ? true : false;
-
- return 0;
+ return psr_version & DP_PSR_IS_SUPPORTED;
}
static int analogix_dp_enable_sink_psr(struct analogix_dp_device *dp)
@@ -195,7 +138,7 @@ static int analogix_dp_enable_sink_psr(struct analogix_dp_device *dp)
}
/* Main-Link transmitter remains active during PSR active states */
- psr_en = DP_PSR_MAIN_LINK_ACTIVE | DP_PSR_CRC_VERIFICATION;
+ psr_en = DP_PSR_CRC_VERIFICATION;
ret = drm_dp_dpcd_writeb(&dp->aux, DP_PSR_EN_CFG, psr_en);
if (ret != 1) {
dev_err(dp->dev, "failed to set panel psr\n");
@@ -203,8 +146,7 @@ static int analogix_dp_enable_sink_psr(struct analogix_dp_device *dp)
}
/* Enable psr function */
- psr_en = DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE |
- DP_PSR_CRC_VERIFICATION;
+ psr_en = DP_PSR_ENABLE | DP_PSR_CRC_VERIFICATION;
ret = drm_dp_dpcd_writeb(&dp->aux, DP_PSR_EN_CFG, psr_en);
if (ret != 1) {
dev_err(dp->dev, "failed to set panel psr\n");
@@ -213,10 +155,11 @@ static int analogix_dp_enable_sink_psr(struct analogix_dp_device *dp)
analogix_dp_enable_psr_crc(dp);
+ dp->psr_supported = true;
+
return 0;
end:
dev_err(dp->dev, "enable psr fail, force to disable psr\n");
- dp->psr_enable = false;
return ret;
}
@@ -1031,24 +974,90 @@ static int analogix_dp_commit(struct analogix_dp_device *dp)
}
}
- ret = analogix_dp_detect_sink_psr(dp);
+ /* Check whether panel supports fast training */
+ ret = analogix_dp_fast_link_train_detection(dp);
if (ret)
return ret;
- if (dp->psr_enable) {
+ if (analogix_dp_detect_sink_psr(dp)) {
ret = analogix_dp_enable_sink_psr(dp);
if (ret)
return ret;
}
- /* Check whether panel supports fast training */
- ret = analogix_dp_fast_link_train_detection(dp);
- if (ret)
- dp->psr_enable = false;
+ return ret;
+}
+
+static int analogix_dp_enable_psr(struct analogix_dp_device *dp)
+{
+ struct dp_sdp psr_vsc;
+ int ret;
+ u8 sink;
+
+ ret = drm_dp_dpcd_readb(&dp->aux, DP_PSR_STATUS, &sink);
+ if (ret != 1)
+ DRM_DEV_ERROR(dp->dev, "Failed to read psr status %d\n", ret);
+ else if (sink == DP_PSR_SINK_ACTIVE_RFB)
+ return 0;
+
+ /* Prepare VSC packet as per EDP 1.4 spec, Table 6.9 */
+ memset(&psr_vsc, 0, sizeof(psr_vsc));
+ psr_vsc.sdp_header.HB0 = 0;
+ psr_vsc.sdp_header.HB1 = 0x7;
+ psr_vsc.sdp_header.HB2 = 0x2;
+ psr_vsc.sdp_header.HB3 = 0x8;
+ psr_vsc.db[0] = 0;
+ psr_vsc.db[1] = EDP_VSC_PSR_STATE_ACTIVE | EDP_VSC_PSR_CRC_VALUES_VALID;
+
+ ret = analogix_dp_send_psr_spd(dp, &psr_vsc, true);
+ if (!ret)
+ analogix_dp_set_analog_power_down(dp, POWER_ALL, true);
return ret;
}
+static int analogix_dp_disable_psr(struct analogix_dp_device *dp)
+{
+ struct dp_sdp psr_vsc;
+ int ret;
+ u8 sink;
+
+ analogix_dp_set_analog_power_down(dp, POWER_ALL, false);
+
+ ret = drm_dp_dpcd_writeb(&dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
+ if (ret != 1) {
+ DRM_DEV_ERROR(dp->dev, "Failed to set DP Power0 %d\n", ret);
+ return ret;
+ }
+
+ ret = drm_dp_dpcd_readb(&dp->aux, DP_PSR_STATUS, &sink);
+ if (ret != 1) {
+ DRM_DEV_ERROR(dp->dev, "Failed to read psr status %d\n", ret);
+ return ret;
+ } else if (sink == DP_PSR_SINK_INACTIVE) {
+ DRM_DEV_ERROR(dp->dev, "sink inactive, skip disable psr");
+ return 0;
+ }
+
+ ret = analogix_dp_train_link(dp);
+ if (ret) {
+ DRM_DEV_ERROR(dp->dev, "Failed to train the link %d\n", ret);
+ return ret;
+ }
+
+ /* Prepare VSC packet as per EDP 1.4 spec, Table 6.9 */
+ memset(&psr_vsc, 0, sizeof(psr_vsc));
+ psr_vsc.sdp_header.HB0 = 0;
+ psr_vsc.sdp_header.HB1 = 0x7;
+ psr_vsc.sdp_header.HB2 = 0x2;
+ psr_vsc.sdp_header.HB3 = 0x8;
+
+ psr_vsc.db[0] = 0;
+ psr_vsc.db[1] = 0;
+
+ return analogix_dp_send_psr_spd(dp, &psr_vsc, true);
+}
+
/*
* This function is a bit of a catch-all for panel preparation, hopefully
* simplifying the logic of functions that need to prepare/unprepare the panel
@@ -1102,7 +1111,7 @@ static int analogix_dp_get_modes(struct drm_connector *connector)
int ret, num_modes = 0;
if (dp->plat_data->panel) {
- num_modes += drm_panel_get_modes(dp->plat_data->panel);
+ num_modes += drm_panel_get_modes(dp->plat_data->panel, connector);
} else {
ret = analogix_dp_prepare_panel(dp, true, false);
if (ret) {
@@ -1139,9 +1148,37 @@ analogix_dp_best_encoder(struct drm_connector *connector)
return dp->encoder;
}
+
+static int analogix_dp_atomic_check(struct drm_connector *connector,
+ struct drm_atomic_state *state)
+{
+ struct analogix_dp_device *dp = to_dp(connector);
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
+
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
+ if (WARN_ON(!conn_state))
+ return -ENODEV;
+
+ conn_state->self_refresh_aware = true;
+
+ if (!conn_state->crtc)
+ return 0;
+
+ crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+ if (!crtc_state)
+ return 0;
+
+ if (crtc_state->self_refresh_active && !dp->psr_supported)
+ return -EINVAL;
+
+ return 0;
+}
+
static const struct drm_connector_helper_funcs analogix_dp_connector_helper_funcs = {
.get_modes = analogix_dp_get_modes,
.best_encoder = analogix_dp_best_encoder,
+ .atomic_check = analogix_dp_atomic_check,
};
static enum drm_connector_status
@@ -1233,11 +1270,42 @@ static int analogix_dp_bridge_attach(struct drm_bridge *bridge)
return 0;
}
-static void analogix_dp_bridge_pre_enable(struct drm_bridge *bridge)
+static
+struct drm_crtc *analogix_dp_get_new_crtc(struct analogix_dp_device *dp,
+ struct drm_atomic_state *state)
+{
+ struct drm_encoder *encoder = dp->encoder;
+ struct drm_connector *connector;
+ struct drm_connector_state *conn_state;
+
+ connector = drm_atomic_get_new_connector_for_encoder(state, encoder);
+ if (!connector)
+ return NULL;
+
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
+ if (!conn_state)
+ return NULL;
+
+ return conn_state->crtc;
+}
+
+static void analogix_dp_bridge_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct analogix_dp_device *dp = bridge->driver_private;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state;
int ret;
+ crtc = analogix_dp_get_new_crtc(dp, state);
+ if (!crtc)
+ return;
+
+ old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
+ /* Don't touch the panel if we're coming back from PSR */
+ if (old_crtc_state && old_crtc_state->self_refresh_active)
+ return;
+
ret = analogix_dp_prepare_panel(dp, true, true);
if (ret)
DRM_ERROR("failed to setup the panel ret = %d\n", ret);
@@ -1298,10 +1366,27 @@ out_dp_clk_pre:
return ret;
}
-static void analogix_dp_bridge_enable(struct drm_bridge *bridge)
+static void analogix_dp_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct analogix_dp_device *dp = bridge->driver_private;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state;
int timeout_loop = 0;
+ int ret;
+
+ crtc = analogix_dp_get_new_crtc(dp, state);
+ if (!crtc)
+ return;
+
+ old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
+ /* Not a full enable, just disable PSR and continue */
+ if (old_crtc_state && old_crtc_state->self_refresh_active) {
+ ret = analogix_dp_disable_psr(dp);
+ if (ret)
+ DRM_ERROR("Failed to disable psr %d\n", ret);
+ return;
+ }
if (dp->dpms_mode == DRM_MODE_DPMS_ON)
return;
@@ -1350,11 +1435,56 @@ static void analogix_dp_bridge_disable(struct drm_bridge *bridge)
if (ret)
DRM_ERROR("failed to setup the panel ret = %d\n", ret);
- dp->psr_enable = false;
dp->fast_train_enable = false;
+ dp->psr_supported = false;
dp->dpms_mode = DRM_MODE_DPMS_OFF;
}
+static void analogix_dp_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+ struct analogix_dp_device *dp = bridge->driver_private;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *new_crtc_state = NULL;
+
+ crtc = analogix_dp_get_new_crtc(dp, state);
+ if (!crtc)
+ goto out;
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ if (!new_crtc_state)
+ goto out;
+
+ /* Don't do a full disable on PSR transitions */
+ if (new_crtc_state->self_refresh_active)
+ return;
+
+out:
+ analogix_dp_bridge_disable(bridge);
+}
+
+static
+void analogix_dp_bridge_atomic_post_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+ struct analogix_dp_device *dp = bridge->driver_private;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *new_crtc_state;
+ int ret;
+
+ crtc = analogix_dp_get_new_crtc(dp, state);
+ if (!crtc)
+ return;
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ if (!new_crtc_state || !new_crtc_state->self_refresh_active)
+ return;
+
+ ret = analogix_dp_enable_psr(dp);
+ if (ret)
+ DRM_ERROR("Failed to enable psr (%d)\n", ret);
+}
+
static void analogix_dp_bridge_mode_set(struct drm_bridge *bridge,
const struct drm_display_mode *orig_mode,
const struct drm_display_mode *mode)
@@ -1432,16 +1562,11 @@ static void analogix_dp_bridge_mode_set(struct drm_bridge *bridge,
video->interlaced = true;
}
-static void analogix_dp_bridge_nop(struct drm_bridge *bridge)
-{
- /* do nothing */
-}
-
static const struct drm_bridge_funcs analogix_dp_bridge_funcs = {
- .pre_enable = analogix_dp_bridge_pre_enable,
- .enable = analogix_dp_bridge_enable,
- .disable = analogix_dp_bridge_disable,
- .post_disable = analogix_dp_bridge_nop,
+ .atomic_pre_enable = analogix_dp_bridge_atomic_pre_enable,
+ .atomic_enable = analogix_dp_bridge_atomic_enable,
+ .atomic_disable = analogix_dp_bridge_atomic_disable,
+ .atomic_post_disable = analogix_dp_bridge_atomic_post_disable,
.mode_set = analogix_dp_bridge_mode_set,
.attach = analogix_dp_bridge_attach,
};
@@ -1656,8 +1781,7 @@ void analogix_dp_unbind(struct analogix_dp_device *dp)
if (dp->plat_data->panel) {
if (drm_panel_unprepare(dp->plat_data->panel))
DRM_ERROR("failed to turnoff the panel\n");
- if (drm_panel_detach(dp->plat_data->panel))
- DRM_ERROR("failed to detach the panel\n");
+ drm_panel_detach(dp->plat_data->panel);
}
drm_dp_aux_unregister(&dp->aux);
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
index da058252dcaf..c051502d7fbf 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
@@ -171,8 +171,8 @@ struct analogix_dp_device {
int dpms_mode;
struct gpio_desc *hpd_gpiod;
bool force_hpd;
- bool psr_enable;
bool fast_train_enable;
+ bool psr_supported;
struct mutex panel_lock;
bool panel_is_modeset;
diff --git a/drivers/gpu/drm/bridge/cdns-dsi.c b/drivers/gpu/drm/bridge/cdns-dsi.c
index 6166dca6be81..b7c97f060241 100644
--- a/drivers/gpu/drm/bridge/cdns-dsi.c
+++ b/drivers/gpu/drm/bridge/cdns-dsi.c
@@ -512,7 +512,7 @@ static int cdns_dsi_mode2cfg(struct cdns_dsi *dsi,
struct cdns_dsi_output *output = &dsi->output;
unsigned int tmp;
bool sync_pulse = false;
- int bpp, nlanes;
+ int bpp;
memset(dsi_cfg, 0, sizeof(*dsi_cfg));
@@ -520,7 +520,6 @@ static int cdns_dsi_mode2cfg(struct cdns_dsi *dsi,
sync_pulse = true;
bpp = mipi_dsi_pixel_format_to_bpp(output->dev->format);
- nlanes = output->dev->lanes;
if (mode_valid_check)
tmp = mode->htotal -
@@ -785,13 +784,12 @@ static void cdns_dsi_bridge_enable(struct drm_bridge *bridge)
unsigned long tx_byte_period;
struct cdns_dsi_cfg dsi_cfg;
u32 tmp, reg_wakeup, div;
- int bpp, nlanes;
+ int nlanes;
if (WARN_ON(pm_runtime_get_sync(dsi->base.dev) < 0))
return;
mode = &bridge->encoder->crtc->state->adjusted_mode;
- bpp = mipi_dsi_pixel_format_to_bpp(output->dev->format);
nlanes = output->dev->lanes;
WARN_ON_ONCE(cdns_dsi_check_conf(dsi, mode, &dsi_cfg, false));
@@ -956,7 +954,8 @@ static int cdns_dsi_attach(struct mipi_dsi_host *host,
panel = of_drm_find_panel(np);
if (!IS_ERR(panel)) {
- bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_DSI);
+ bridge = drm_panel_bridge_add_typed(panel,
+ DRM_MODE_CONNECTOR_DSI);
} else {
bridge = of_drm_find_bridge(dev->dev.of_node);
if (!bridge)
diff --git a/drivers/gpu/drm/bridge/dumb-vga-dac.c b/drivers/gpu/drm/bridge/dumb-vga-dac.c
index d32885b906ae..cc33dc411b9e 100644
--- a/drivers/gpu/drm/bridge/dumb-vga-dac.c
+++ b/drivers/gpu/drm/bridge/dumb-vga-dac.c
@@ -12,6 +12,7 @@
#include <linux/regulator/consumer.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
@@ -42,7 +43,7 @@ static int dumb_vga_get_modes(struct drm_connector *connector)
struct edid *edid;
int ret;
- if (IS_ERR(vga->ddc))
+ if (!vga->ddc)
goto fallback;
edid = drm_get_edid(connector, vga->ddc);
@@ -84,7 +85,7 @@ dumb_vga_connector_detect(struct drm_connector *connector, bool force)
* wire the DDC pins, or the I2C bus might not be working at
* all.
*/
- if (!IS_ERR(vga->ddc) && drm_probe_ddc(vga->ddc))
+ if (vga->ddc && drm_probe_ddc(vga->ddc))
return connector_status_connected;
return connector_status_unknown;
@@ -111,8 +112,10 @@ static int dumb_vga_attach(struct drm_bridge *bridge)
drm_connector_helper_add(&vga->connector,
&dumb_vga_con_helper_funcs);
- ret = drm_connector_init(bridge->dev, &vga->connector,
- &dumb_vga_con_funcs, DRM_MODE_CONNECTOR_VGA);
+ ret = drm_connector_init_with_ddc(bridge->dev, &vga->connector,
+ &dumb_vga_con_funcs,
+ DRM_MODE_CONNECTOR_VGA,
+ vga->ddc);
if (ret) {
DRM_ERROR("Failed to initialize connector\n");
return ret;
@@ -195,6 +198,7 @@ static int dumb_vga_probe(struct platform_device *pdev)
if (PTR_ERR(vga->ddc) == -ENODEV) {
dev_dbg(&pdev->dev,
"No i2c bus specified. Disabling EDID readout\n");
+ vga->ddc = NULL;
} else {
dev_err(&pdev->dev, "Couldn't retrieve i2c bus\n");
return PTR_ERR(vga->ddc);
@@ -216,7 +220,7 @@ static int dumb_vga_remove(struct platform_device *pdev)
drm_bridge_remove(&vga->bridge);
- if (!IS_ERR(vga->ddc))
+ if (vga->ddc)
i2c_put_adapter(vga->ddc);
return 0;
diff --git a/drivers/gpu/drm/bridge/lvds-codec.c b/drivers/gpu/drm/bridge/lvds-codec.c
new file mode 100644
index 000000000000..5f04cc11227e
--- /dev/null
+++ b/drivers/gpu/drm/bridge/lvds-codec.c
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2019 Renesas Electronics Corporation
+ * Copyright (C) 2016 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ */
+
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+
+#include <drm/drm_bridge.h>
+#include <drm/drm_panel.h>
+
+struct lvds_codec {
+ struct drm_bridge bridge;
+ struct drm_bridge *panel_bridge;
+ struct gpio_desc *powerdown_gpio;
+ u32 connector_type;
+};
+
+static int lvds_codec_attach(struct drm_bridge *bridge)
+{
+ struct lvds_codec *lvds_codec = container_of(bridge,
+ struct lvds_codec, bridge);
+
+ return drm_bridge_attach(bridge->encoder, lvds_codec->panel_bridge,
+ bridge);
+}
+
+static void lvds_codec_enable(struct drm_bridge *bridge)
+{
+ struct lvds_codec *lvds_codec = container_of(bridge,
+ struct lvds_codec, bridge);
+
+ if (lvds_codec->powerdown_gpio)
+ gpiod_set_value_cansleep(lvds_codec->powerdown_gpio, 0);
+}
+
+static void lvds_codec_disable(struct drm_bridge *bridge)
+{
+ struct lvds_codec *lvds_codec = container_of(bridge,
+ struct lvds_codec, bridge);
+
+ if (lvds_codec->powerdown_gpio)
+ gpiod_set_value_cansleep(lvds_codec->powerdown_gpio, 1);
+}
+
+static struct drm_bridge_funcs funcs = {
+ .attach = lvds_codec_attach,
+ .enable = lvds_codec_enable,
+ .disable = lvds_codec_disable,
+};
+
+static int lvds_codec_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *panel_node;
+ struct drm_panel *panel;
+ struct lvds_codec *lvds_codec;
+
+ lvds_codec = devm_kzalloc(dev, sizeof(*lvds_codec), GFP_KERNEL);
+ if (!lvds_codec)
+ return -ENOMEM;
+
+ lvds_codec->connector_type = (uintptr_t)of_device_get_match_data(dev);
+ lvds_codec->powerdown_gpio = devm_gpiod_get_optional(dev, "powerdown",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(lvds_codec->powerdown_gpio)) {
+ int err = PTR_ERR(lvds_codec->powerdown_gpio);
+
+ if (err != -EPROBE_DEFER)
+ dev_err(dev, "powerdown GPIO failure: %d\n", err);
+ return err;
+ }
+
+ /* Locate the panel DT node. */
+ panel_node = of_graph_get_remote_node(dev->of_node, 1, 0);
+ if (!panel_node) {
+ dev_dbg(dev, "panel DT node not found\n");
+ return -ENXIO;
+ }
+
+ panel = of_drm_find_panel(panel_node);
+ of_node_put(panel_node);
+ if (IS_ERR(panel)) {
+ dev_dbg(dev, "panel not found, deferring probe\n");
+ return PTR_ERR(panel);
+ }
+
+ lvds_codec->panel_bridge =
+ devm_drm_panel_bridge_add_typed(dev, panel,
+ lvds_codec->connector_type);
+ if (IS_ERR(lvds_codec->panel_bridge))
+ return PTR_ERR(lvds_codec->panel_bridge);
+
+ /*
+ * The panel_bridge bridge is attached to the panel's of_node,
+ * but we need a bridge attached to our of_node for our user
+ * to look up.
+ */
+ lvds_codec->bridge.of_node = dev->of_node;
+ lvds_codec->bridge.funcs = &funcs;
+ drm_bridge_add(&lvds_codec->bridge);
+
+ platform_set_drvdata(pdev, lvds_codec);
+
+ return 0;
+}
+
+static int lvds_codec_remove(struct platform_device *pdev)
+{
+ struct lvds_codec *lvds_codec = platform_get_drvdata(pdev);
+
+ drm_bridge_remove(&lvds_codec->bridge);
+
+ return 0;
+}
+
+static const struct of_device_id lvds_codec_match[] = {
+ {
+ .compatible = "lvds-decoder",
+ .data = (void *)DRM_MODE_CONNECTOR_DPI,
+ },
+ {
+ .compatible = "lvds-encoder",
+ .data = (void *)DRM_MODE_CONNECTOR_LVDS,
+ },
+ {
+ .compatible = "thine,thc63lvdm83d",
+ .data = (void *)DRM_MODE_CONNECTOR_LVDS,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, lvds_codec_match);
+
+static struct platform_driver lvds_codec_driver = {
+ .probe = lvds_codec_probe,
+ .remove = lvds_codec_remove,
+ .driver = {
+ .name = "lvds-codec",
+ .of_match_table = lvds_codec_match,
+ },
+};
+module_platform_driver(lvds_codec_driver);
+
+MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
+MODULE_DESCRIPTION("LVDS encoders and decoders");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/lvds-encoder.c b/drivers/gpu/drm/bridge/lvds-encoder.c
deleted file mode 100644
index 2ab2c234f26c..000000000000
--- a/drivers/gpu/drm/bridge/lvds-encoder.c
+++ /dev/null
@@ -1,154 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2016 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
- */
-
-#include <linux/gpio/consumer.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_graph.h>
-#include <linux/platform_device.h>
-
-#include <drm/drm_bridge.h>
-#include <drm/drm_panel.h>
-
-struct lvds_encoder {
- struct drm_bridge bridge;
- struct drm_bridge *panel_bridge;
- struct gpio_desc *powerdown_gpio;
-};
-
-static int lvds_encoder_attach(struct drm_bridge *bridge)
-{
- struct lvds_encoder *lvds_encoder = container_of(bridge,
- struct lvds_encoder,
- bridge);
-
- return drm_bridge_attach(bridge->encoder, lvds_encoder->panel_bridge,
- bridge);
-}
-
-static void lvds_encoder_enable(struct drm_bridge *bridge)
-{
- struct lvds_encoder *lvds_encoder = container_of(bridge,
- struct lvds_encoder,
- bridge);
-
- if (lvds_encoder->powerdown_gpio)
- gpiod_set_value_cansleep(lvds_encoder->powerdown_gpio, 0);
-}
-
-static void lvds_encoder_disable(struct drm_bridge *bridge)
-{
- struct lvds_encoder *lvds_encoder = container_of(bridge,
- struct lvds_encoder,
- bridge);
-
- if (lvds_encoder->powerdown_gpio)
- gpiod_set_value_cansleep(lvds_encoder->powerdown_gpio, 1);
-}
-
-static struct drm_bridge_funcs funcs = {
- .attach = lvds_encoder_attach,
- .enable = lvds_encoder_enable,
- .disable = lvds_encoder_disable,
-};
-
-static int lvds_encoder_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct device_node *port;
- struct device_node *endpoint;
- struct device_node *panel_node;
- struct drm_panel *panel;
- struct lvds_encoder *lvds_encoder;
-
- lvds_encoder = devm_kzalloc(dev, sizeof(*lvds_encoder), GFP_KERNEL);
- if (!lvds_encoder)
- return -ENOMEM;
-
- lvds_encoder->powerdown_gpio = devm_gpiod_get_optional(dev, "powerdown",
- GPIOD_OUT_HIGH);
- if (IS_ERR(lvds_encoder->powerdown_gpio)) {
- int err = PTR_ERR(lvds_encoder->powerdown_gpio);
-
- if (err != -EPROBE_DEFER)
- dev_err(dev, "powerdown GPIO failure: %d\n", err);
- return err;
- }
-
- /* Locate the panel DT node. */
- port = of_graph_get_port_by_id(dev->of_node, 1);
- if (!port) {
- dev_dbg(dev, "port 1 not found\n");
- return -ENXIO;
- }
-
- endpoint = of_get_child_by_name(port, "endpoint");
- of_node_put(port);
- if (!endpoint) {
- dev_dbg(dev, "no endpoint for port 1\n");
- return -ENXIO;
- }
-
- panel_node = of_graph_get_remote_port_parent(endpoint);
- of_node_put(endpoint);
- if (!panel_node) {
- dev_dbg(dev, "no remote endpoint for port 1\n");
- return -ENXIO;
- }
-
- panel = of_drm_find_panel(panel_node);
- of_node_put(panel_node);
- if (IS_ERR(panel)) {
- dev_dbg(dev, "panel not found, deferring probe\n");
- return PTR_ERR(panel);
- }
-
- lvds_encoder->panel_bridge =
- devm_drm_panel_bridge_add(dev, panel, DRM_MODE_CONNECTOR_LVDS);
- if (IS_ERR(lvds_encoder->panel_bridge))
- return PTR_ERR(lvds_encoder->panel_bridge);
-
- /* The panel_bridge bridge is attached to the panel's of_node,
- * but we need a bridge attached to our of_node for our user
- * to look up.
- */
- lvds_encoder->bridge.of_node = dev->of_node;
- lvds_encoder->bridge.funcs = &funcs;
- drm_bridge_add(&lvds_encoder->bridge);
-
- platform_set_drvdata(pdev, lvds_encoder);
-
- return 0;
-}
-
-static int lvds_encoder_remove(struct platform_device *pdev)
-{
- struct lvds_encoder *lvds_encoder = platform_get_drvdata(pdev);
-
- drm_bridge_remove(&lvds_encoder->bridge);
-
- return 0;
-}
-
-static const struct of_device_id lvds_encoder_match[] = {
- { .compatible = "lvds-encoder" },
- { .compatible = "thine,thc63lvdm83d" },
- {},
-};
-MODULE_DEVICE_TABLE(of, lvds_encoder_match);
-
-static struct platform_driver lvds_encoder_driver = {
- .probe = lvds_encoder_probe,
- .remove = lvds_encoder_remove,
- .driver = {
- .name = "lvds-encoder",
- .of_match_table = lvds_encoder_match,
- },
-};
-module_platform_driver(lvds_encoder_driver);
-
-MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
-MODULE_DESCRIPTION("Transparent parallel to LVDS encoder");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
index 79311f8354bd..e8a49f6146c6 100644
--- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
+++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
@@ -19,13 +19,13 @@
* Host -> LVDS|--(STDP4028)--|DP -> DP|--(STDP2690)--|DP++ -> Video output
*/
-#include <linux/gpio.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/of.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_edid.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
index 98bc650b8c95..57ff01339559 100644
--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c
+++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
@@ -6,14 +6,12 @@
*/
#include <linux/delay.h>
-#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
-
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
index b12ae3a4c5f1..f66777e24968 100644
--- a/drivers/gpu/drm/bridge/panel.c
+++ b/drivers/gpu/drm/bridge/panel.c
@@ -5,6 +5,7 @@
*/
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_connector.h>
#include <drm/drm_encoder.h>
#include <drm/drm_modeset_helper_vtables.h>
@@ -36,7 +37,7 @@ static int panel_bridge_connector_get_modes(struct drm_connector *connector)
struct panel_bridge *panel_bridge =
drm_connector_to_panel_bridge(connector);
- return drm_panel_get_modes(panel_bridge->panel);
+ return drm_panel_get_modes(panel_bridge->panel, connector);
}
static const struct drm_connector_helper_funcs
@@ -133,8 +134,6 @@ static const struct drm_bridge_funcs panel_bridge_bridge_funcs = {
* just calls the appropriate functions from &drm_panel.
*
* @panel: The drm_panel being wrapped. Must be non-NULL.
- * @connector_type: The DRM_MODE_CONNECTOR_* for the connector to be
- * created.
*
* For drivers converting from directly using drm_panel: The expected
* usage pattern is that during either encoder module probe or DSI
@@ -148,11 +147,37 @@ static const struct drm_bridge_funcs panel_bridge_bridge_funcs = {
* drm_mode_config_cleanup() if the bridge has already been attached), then
* drm_panel_bridge_remove() to free it.
*
+ * The connector type is set to @panel->connector_type, which must be set to a
+ * known type. Calling this function with a panel whose connector type is
+ * DRM_MODE_CONNECTOR_Unknown will return NULL.
+ *
* See devm_drm_panel_bridge_add() for an automatically manged version of this
* function.
*/
-struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel,
- u32 connector_type)
+struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel)
+{
+ if (WARN_ON(panel->connector_type == DRM_MODE_CONNECTOR_Unknown))
+ return NULL;
+
+ return drm_panel_bridge_add_typed(panel, panel->connector_type);
+}
+EXPORT_SYMBOL(drm_panel_bridge_add);
+
+/**
+ * drm_panel_bridge_add_typed - Creates a &drm_bridge and &drm_connector with
+ * an explicit connector type.
+ * @panel: The drm_panel being wrapped. Must be non-NULL.
+ * @connector_type: The connector type (DRM_MODE_CONNECTOR_*)
+ *
+ * This is just like drm_panel_bridge_add(), but forces the connector type to
+ * @connector_type instead of infering it from the panel.
+ *
+ * This function is deprecated and should not be used in new drivers. Use
+ * drm_panel_bridge_add() instead, and fix panel drivers as necessary if they
+ * don't report a connector type.
+ */
+struct drm_bridge *drm_panel_bridge_add_typed(struct drm_panel *panel,
+ u32 connector_type)
{
struct panel_bridge *panel_bridge;
@@ -176,7 +201,7 @@ struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel,
return &panel_bridge->bridge;
}
-EXPORT_SYMBOL(drm_panel_bridge_add);
+EXPORT_SYMBOL(drm_panel_bridge_add_typed);
/**
* drm_panel_bridge_remove - Unregisters and frees a drm_bridge
@@ -213,15 +238,38 @@ static void devm_drm_panel_bridge_release(struct device *dev, void *res)
* that just calls the appropriate functions from &drm_panel.
* @dev: device to tie the bridge lifetime to
* @panel: The drm_panel being wrapped. Must be non-NULL.
- * @connector_type: The DRM_MODE_CONNECTOR_* for the connector to be
- * created.
*
* This is the managed version of drm_panel_bridge_add() which automatically
* calls drm_panel_bridge_remove() when @dev is unbound.
*/
struct drm_bridge *devm_drm_panel_bridge_add(struct device *dev,
- struct drm_panel *panel,
- u32 connector_type)
+ struct drm_panel *panel)
+{
+ if (WARN_ON(panel->connector_type == DRM_MODE_CONNECTOR_Unknown))
+ return NULL;
+
+ return devm_drm_panel_bridge_add_typed(dev, panel,
+ panel->connector_type);
+}
+EXPORT_SYMBOL(devm_drm_panel_bridge_add);
+
+/**
+ * devm_drm_panel_bridge_add_typed - Creates a managed &drm_bridge and
+ * &drm_connector with an explicit connector type.
+ * @dev: device to tie the bridge lifetime to
+ * @panel: The drm_panel being wrapped. Must be non-NULL.
+ * @connector_type: The connector type (DRM_MODE_CONNECTOR_*)
+ *
+ * This is just like devm_drm_panel_bridge_add(), but forces the connector type
+ * to @connector_type instead of infering it from the panel.
+ *
+ * This function is deprecated and should not be used in new drivers. Use
+ * devm_drm_panel_bridge_add() instead, and fix panel drivers as necessary if
+ * they don't report a connector type.
+ */
+struct drm_bridge *devm_drm_panel_bridge_add_typed(struct device *dev,
+ struct drm_panel *panel,
+ u32 connector_type)
{
struct drm_bridge **ptr, *bridge;
@@ -230,7 +278,7 @@ struct drm_bridge *devm_drm_panel_bridge_add(struct device *dev,
if (!ptr)
return ERR_PTR(-ENOMEM);
- bridge = drm_panel_bridge_add(panel, connector_type);
+ bridge = drm_panel_bridge_add_typed(panel, connector_type);
if (!IS_ERR(bridge)) {
*ptr = bridge;
devres_add(dev, ptr);
@@ -240,4 +288,22 @@ struct drm_bridge *devm_drm_panel_bridge_add(struct device *dev,
return bridge;
}
-EXPORT_SYMBOL(devm_drm_panel_bridge_add);
+EXPORT_SYMBOL(devm_drm_panel_bridge_add_typed);
+
+/**
+ * drm_panel_bridge_connector - return the connector for the panel bridge
+ *
+ * drm_panel_bridge creates the connector.
+ * This function gives external access to the connector.
+ *
+ * Returns: Pointer to drm_connector
+ */
+struct drm_connector *drm_panel_bridge_connector(struct drm_bridge *bridge)
+{
+ struct panel_bridge *panel_bridge;
+
+ panel_bridge = drm_bridge_to_panel_bridge(bridge);
+
+ return &panel_bridge->connector;
+}
+EXPORT_SYMBOL(drm_panel_bridge_connector);
diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c
index 2d88146e4836..10c47c008b40 100644
--- a/drivers/gpu/drm/bridge/parade-ps8622.c
+++ b/drivers/gpu/drm/bridge/parade-ps8622.c
@@ -8,7 +8,6 @@
#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/err.h>
-#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/module.h>
@@ -18,6 +17,7 @@
#include <linux/regulator/consumer.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
@@ -461,7 +461,7 @@ static int ps8622_get_modes(struct drm_connector *connector)
ps8622 = connector_to_ps8622(connector);
- return drm_panel_get_modes(ps8622->panel);
+ return drm_panel_get_modes(ps8622->panel, connector);
}
static const struct drm_connector_helper_funcs ps8622_connector_helper_funcs = {
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index dd7aa466b280..b70e8c5cf2e1 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -20,6 +20,7 @@
#include <linux/clk.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_print.h>
@@ -158,6 +159,8 @@
#define SII902X_I2C_BUS_ACQUISITION_TIMEOUT_MS 500
+#define SII902X_AUDIO_PORT_INDEX 3
+
struct sii902x {
struct i2c_client *i2c;
struct regmap *regmap;
@@ -568,13 +571,14 @@ static int sii902x_audio_hw_params(struct device *dev, void *data,
return ret;
}
- mclk_rate = clk_get_rate(sii902x->audio.mclk);
-
- ret = sii902x_select_mclk_div(&i2s_config_reg, params->sample_rate,
- mclk_rate);
- if (mclk_rate != ret * params->sample_rate)
- dev_dbg(dev, "Inaccurate reference clock (%ld/%d != %u)\n",
- mclk_rate, ret, params->sample_rate);
+ if (sii902x->audio.mclk) {
+ mclk_rate = clk_get_rate(sii902x->audio.mclk);
+ ret = sii902x_select_mclk_div(&i2s_config_reg,
+ params->sample_rate, mclk_rate);
+ if (mclk_rate != ret * params->sample_rate)
+ dev_dbg(dev, "Inaccurate reference clock (%ld/%d != %u)\n",
+ mclk_rate, ret, params->sample_rate);
+ }
mutex_lock(&sii902x->mutex);
@@ -662,7 +666,8 @@ static void sii902x_audio_shutdown(struct device *dev, void *data)
clk_disable_unprepare(sii902x->audio.mclk);
}
-int sii902x_audio_digital_mute(struct device *dev, void *data, bool enable)
+static int sii902x_audio_digital_mute(struct device *dev,
+ void *data, bool enable)
{
struct sii902x *sii902x = dev_get_drvdata(dev);
@@ -690,11 +695,32 @@ static int sii902x_audio_get_eld(struct device *dev, void *data,
return 0;
}
+static int sii902x_audio_get_dai_id(struct snd_soc_component *component,
+ struct device_node *endpoint)
+{
+ struct of_endpoint of_ep;
+ int ret;
+
+ ret = of_graph_parse_endpoint(endpoint, &of_ep);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * HDMI sound should be located at reg = <3>
+ * Return expected DAI index 0.
+ */
+ if (of_ep.port == SII902X_AUDIO_PORT_INDEX)
+ return 0;
+
+ return -EINVAL;
+}
+
static const struct hdmi_codec_ops sii902x_audio_codec_ops = {
.hw_params = sii902x_audio_hw_params,
.audio_shutdown = sii902x_audio_shutdown,
.digital_mute = sii902x_audio_digital_mute,
.get_eld = sii902x_audio_get_eld,
+ .get_dai_id = sii902x_audio_get_dai_id,
};
static int sii902x_audio_codec_init(struct sii902x *sii902x,
@@ -750,10 +776,11 @@ static int sii902x_audio_codec_init(struct sii902x *sii902x,
sii902x->audio.i2s_fifo_sequence[i] |= audio_fifo_id[i] |
i2s_lane_id[lanes[i]] | SII902X_TPI_I2S_FIFO_ENABLE;
+ sii902x->audio.mclk = devm_clk_get_optional(dev, "mclk");
if (IS_ERR(sii902x->audio.mclk)) {
dev_err(dev, "%s: No clock (audio mclk) found: %ld\n",
__func__, PTR_ERR(sii902x->audio.mclk));
- return 0;
+ return PTR_ERR(sii902x->audio.mclk);
}
sii902x->audio.pdev = platform_device_register_data(
diff --git a/drivers/gpu/drm/bridge/sii9234.c b/drivers/gpu/drm/bridge/sii9234.c
index 25d4ad8c7ad6..f81f81b7051f 100644
--- a/drivers/gpu/drm/bridge/sii9234.c
+++ b/drivers/gpu/drm/bridge/sii9234.c
@@ -13,6 +13,7 @@
* Dharam Kumar <dharam.kr@samsung.com>
*/
#include <drm/bridge/mhl.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
@@ -841,39 +842,28 @@ static int sii9234_init_resources(struct sii9234 *ctx,
ctx->client[I2C_MHL] = client;
- ctx->client[I2C_TPI] = i2c_new_dummy(adapter, I2C_TPI_ADDR);
- if (!ctx->client[I2C_TPI]) {
+ ctx->client[I2C_TPI] = devm_i2c_new_dummy_device(&client->dev, adapter,
+ I2C_TPI_ADDR);
+ if (IS_ERR(ctx->client[I2C_TPI])) {
dev_err(ctx->dev, "failed to create TPI client\n");
- return -ENODEV;
+ return PTR_ERR(ctx->client[I2C_TPI]);
}
- ctx->client[I2C_HDMI] = i2c_new_dummy(adapter, I2C_HDMI_ADDR);
- if (!ctx->client[I2C_HDMI]) {
+ ctx->client[I2C_HDMI] = devm_i2c_new_dummy_device(&client->dev, adapter,
+ I2C_HDMI_ADDR);
+ if (IS_ERR(ctx->client[I2C_HDMI])) {
dev_err(ctx->dev, "failed to create HDMI RX client\n");
- goto fail_tpi;
+ return PTR_ERR(ctx->client[I2C_HDMI]);
}
- ctx->client[I2C_CBUS] = i2c_new_dummy(adapter, I2C_CBUS_ADDR);
- if (!ctx->client[I2C_CBUS]) {
+ ctx->client[I2C_CBUS] = devm_i2c_new_dummy_device(&client->dev, adapter,
+ I2C_CBUS_ADDR);
+ if (IS_ERR(ctx->client[I2C_CBUS])) {
dev_err(ctx->dev, "failed to create CBUS client\n");
- goto fail_hdmi;
+ return PTR_ERR(ctx->client[I2C_CBUS]);
}
return 0;
-
-fail_hdmi:
- i2c_unregister_device(ctx->client[I2C_HDMI]);
-fail_tpi:
- i2c_unregister_device(ctx->client[I2C_TPI]);
-
- return -ENODEV;
-}
-
-static void sii9234_deinit_resources(struct sii9234 *ctx)
-{
- i2c_unregister_device(ctx->client[I2C_CBUS]);
- i2c_unregister_device(ctx->client[I2C_HDMI]);
- i2c_unregister_device(ctx->client[I2C_TPI]);
}
static inline struct sii9234 *bridge_to_sii9234(struct drm_bridge *bridge)
@@ -950,7 +940,6 @@ static int sii9234_remove(struct i2c_client *client)
sii9234_cable_out(ctx);
drm_bridge_remove(&ctx->bridge);
- sii9234_deinit_resources(ctx);
return 0;
}
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
index bd3165ee5354..4c0eef406eb1 100644
--- a/drivers/gpu/drm/bridge/sil-sii8620.c
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -9,6 +9,7 @@
#include <asm/unaligned.h>
#include <drm/bridge/mhl.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_encoder.h>
@@ -1759,10 +1760,8 @@ static bool sii8620_rcp_consume(struct sii8620 *ctx, u8 scancode)
scancode &= MHL_RCP_KEY_ID_MASK;
- if (!ctx->rc_dev) {
- dev_dbg(ctx->dev, "RCP input device not initialized\n");
+ if (!IS_ENABLED(CONFIG_RC_CORE) || !ctx->rc_dev)
return false;
- }
if (pressed)
rc_keydown(ctx->rc_dev, RC_PROTO_CEC, scancode, 0);
@@ -2099,6 +2098,9 @@ static void sii8620_init_rcp_input_dev(struct sii8620 *ctx)
struct rc_dev *rc_dev;
int ret;
+ if (!IS_ENABLED(CONFIG_RC_CORE))
+ return;
+
rc_dev = rc_allocate_device(RC_DRIVER_SCANCODE);
if (!rc_dev) {
dev_err(ctx->dev, "Failed to allocate RC device\n");
@@ -2213,6 +2215,9 @@ static void sii8620_detach(struct drm_bridge *bridge)
{
struct sii8620 *ctx = bridge_to_sii8620(bridge);
+ if (!IS_ENABLED(CONFIG_RC_CORE))
+ return;
+
rc_unregister_device(ctx->rc_dev);
}
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
index a494186ae6ce..dd56996fe9c7 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
@@ -63,10 +63,6 @@ enum {
HDMI_REVISION_ID = 0x0001,
HDMI_IH_AHBDMAAUD_STAT0 = 0x0109,
HDMI_IH_MUTE_AHBDMAAUD_STAT0 = 0x0189,
- HDMI_FC_AUDICONF2 = 0x1027,
- HDMI_FC_AUDSCONF = 0x1063,
- HDMI_FC_AUDSCONF_LAYOUT1 = 1 << 0,
- HDMI_FC_AUDSCONF_LAYOUT0 = 0 << 0,
HDMI_AHB_DMA_CONF0 = 0x3600,
HDMI_AHB_DMA_START = 0x3601,
HDMI_AHB_DMA_STOP = 0x3602,
@@ -295,7 +291,7 @@ static irqreturn_t snd_dw_hdmi_irq(int irq, void *data)
return IRQ_HANDLED;
}
-static struct snd_pcm_hardware dw_hdmi_hw = {
+static const struct snd_pcm_hardware dw_hdmi_hw = {
.info = SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP |
@@ -403,7 +399,7 @@ static int dw_hdmi_prepare(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_dw_hdmi *dw = substream->private_data;
- u8 threshold, conf0, conf1, layout, ca;
+ u8 threshold, conf0, conf1, ca;
/* Setup as per 3.0.5 FSL 4.1.0 BSP */
switch (dw->revision) {
@@ -434,20 +430,12 @@ static int dw_hdmi_prepare(struct snd_pcm_substream *substream)
conf1 = default_hdmi_channel_config[runtime->channels - 2].conf1;
ca = default_hdmi_channel_config[runtime->channels - 2].ca;
- /*
- * For >2 channel PCM audio, we need to select layout 1
- * and set an appropriate channel map.
- */
- if (runtime->channels > 2)
- layout = HDMI_FC_AUDSCONF_LAYOUT1;
- else
- layout = HDMI_FC_AUDSCONF_LAYOUT0;
-
writeb_relaxed(threshold, dw->data.base + HDMI_AHB_DMA_THRSLD);
writeb_relaxed(conf0, dw->data.base + HDMI_AHB_DMA_CONF0);
writeb_relaxed(conf1, dw->data.base + HDMI_AHB_DMA_CONF1);
- writeb_relaxed(layout, dw->data.base + HDMI_FC_AUDSCONF);
- writeb_relaxed(ca, dw->data.base + HDMI_FC_AUDICONF2);
+
+ dw_hdmi_set_channel_count(dw->data.hdmi, runtime->channels);
+ dw_hdmi_set_channel_allocation(dw->data.hdmi, ca);
switch (runtime->format) {
case SNDRV_PCM_FORMAT_IEC958_SUBFRAME_LE:
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-audio.h b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-audio.h
index 63b5756f463b..cb07dc0da5a7 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-audio.h
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-audio.h
@@ -14,6 +14,7 @@ struct dw_hdmi_audio_data {
struct dw_hdmi_i2s_audio_data {
struct dw_hdmi *hdmi;
+ u8 *eld;
void (*write)(struct dw_hdmi *hdmi, u8 val, int offset);
u8 (*read)(struct dw_hdmi *hdmi, int offset);
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c
index 0f949978d3fc..70ab4fbdc23e 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c
@@ -256,8 +256,8 @@ static int dw_hdmi_cec_probe(struct platform_device *pdev)
dw_hdmi_write(cec, 0, HDMI_CEC_POLARITY);
cec->adap = cec_allocate_adapter(&dw_hdmi_cec_ops, cec, "dw_hdmi",
- CEC_CAP_LOG_ADDRS | CEC_CAP_TRANSMIT |
- CEC_CAP_RC | CEC_CAP_PASSTHROUGH,
+ CEC_CAP_DEFAULTS |
+ CEC_CAP_CONNECTOR_INFO,
CEC_MAX_LOG_ADDRS);
if (IS_ERR(cec->adap))
return PTR_ERR(cec->adap);
@@ -278,13 +278,14 @@ static int dw_hdmi_cec_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- cec->notify = cec_notifier_get(pdev->dev.parent);
+ cec->notify = cec_notifier_cec_adap_register(pdev->dev.parent,
+ NULL, cec->adap);
if (!cec->notify)
return -ENOMEM;
ret = cec_register_adapter(cec->adap, pdev->dev.parent);
if (ret < 0) {
- cec_notifier_put(cec->notify);
+ cec_notifier_cec_adap_unregister(cec->notify, cec->adap);
return ret;
}
@@ -294,8 +295,6 @@ static int dw_hdmi_cec_probe(struct platform_device *pdev)
*/
devm_remove_action(&pdev->dev, dw_hdmi_cec_del, cec);
- cec_register_cec_notifier(cec->adap, cec->notify);
-
return 0;
}
@@ -303,8 +302,8 @@ static int dw_hdmi_cec_remove(struct platform_device *pdev)
{
struct dw_hdmi_cec *cec = platform_get_drvdata(pdev);
+ cec_notifier_cec_adap_unregister(cec->notify, cec->adap);
cec_unregister_adapter(cec->adap);
- cec_notifier_put(cec->notify);
return 0;
}
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
index 5cbb71a866d5..d7e65c869415 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <drm/bridge/dw_hdmi.h>
+#include <drm/drm_crtc.h>
#include <sound/hdmi-codec.h>
@@ -44,14 +45,30 @@ static int dw_hdmi_i2s_hw_params(struct device *dev, void *data,
u8 inputclkfs = 0;
/* it cares I2S only */
- if ((fmt->fmt != HDMI_I2S) ||
- (fmt->bit_clk_master | fmt->frame_clk_master)) {
- dev_err(dev, "unsupported format/settings\n");
+ if (fmt->bit_clk_master | fmt->frame_clk_master) {
+ dev_err(dev, "unsupported clock settings\n");
return -EINVAL;
}
+ /* Reset the FIFOs before applying new params */
+ hdmi_write(audio, HDMI_AUD_CONF0_SW_RESET, HDMI_AUD_CONF0);
+ hdmi_write(audio, (u8)~HDMI_MC_SWRSTZ_I2SSWRST_REQ, HDMI_MC_SWRSTZ);
+
inputclkfs = HDMI_AUD_INPUTCLKFS_64FS;
- conf0 = HDMI_AUD_CONF0_I2S_ALL_ENABLE;
+ conf0 = (HDMI_AUD_CONF0_I2S_SELECT | HDMI_AUD_CONF0_I2S_EN0);
+
+ /* Enable the required i2s lanes */
+ switch (hparms->channels) {
+ case 7 ... 8:
+ conf0 |= HDMI_AUD_CONF0_I2S_EN3;
+ /* Fall-thru */
+ case 5 ... 6:
+ conf0 |= HDMI_AUD_CONF0_I2S_EN2;
+ /* Fall-thru */
+ case 3 ... 4:
+ conf0 |= HDMI_AUD_CONF0_I2S_EN1;
+ /* Fall-thru */
+ }
switch (hparms->sample_width) {
case 16:
@@ -63,12 +80,44 @@ static int dw_hdmi_i2s_hw_params(struct device *dev, void *data,
break;
}
+ switch (fmt->fmt) {
+ case HDMI_I2S:
+ conf1 |= HDMI_AUD_CONF1_MODE_I2S;
+ break;
+ case HDMI_RIGHT_J:
+ conf1 |= HDMI_AUD_CONF1_MODE_RIGHT_J;
+ break;
+ case HDMI_LEFT_J:
+ conf1 |= HDMI_AUD_CONF1_MODE_LEFT_J;
+ break;
+ case HDMI_DSP_A:
+ conf1 |= HDMI_AUD_CONF1_MODE_BURST_1;
+ break;
+ case HDMI_DSP_B:
+ conf1 |= HDMI_AUD_CONF1_MODE_BURST_2;
+ break;
+ default:
+ dev_err(dev, "unsupported format\n");
+ return -EINVAL;
+ }
+
dw_hdmi_set_sample_rate(hdmi, hparms->sample_rate);
+ dw_hdmi_set_channel_status(hdmi, hparms->iec.status);
+ dw_hdmi_set_channel_count(hdmi, hparms->channels);
+ dw_hdmi_set_channel_allocation(hdmi, hparms->cea.channel_allocation);
hdmi_write(audio, inputclkfs, HDMI_AUD_INPUTCLKFS);
hdmi_write(audio, conf0, HDMI_AUD_CONF0);
hdmi_write(audio, conf1, HDMI_AUD_CONF1);
+ return 0;
+}
+
+static int dw_hdmi_i2s_audio_startup(struct device *dev, void *data)
+{
+ struct dw_hdmi_i2s_audio_data *audio = data;
+ struct dw_hdmi *hdmi = audio->hdmi;
+
dw_hdmi_audio_enable(hdmi);
return 0;
@@ -80,8 +129,15 @@ static void dw_hdmi_i2s_audio_shutdown(struct device *dev, void *data)
struct dw_hdmi *hdmi = audio->hdmi;
dw_hdmi_audio_disable(hdmi);
+}
- hdmi_write(audio, HDMI_AUD_CONF0_SW_RESET, HDMI_AUD_CONF0);
+static int dw_hdmi_i2s_get_eld(struct device *dev, void *data, uint8_t *buf,
+ size_t len)
+{
+ struct dw_hdmi_i2s_audio_data *audio = data;
+
+ memcpy(buf, audio->eld, min_t(size_t, MAX_ELD_BYTES, len));
+ return 0;
}
static int dw_hdmi_i2s_get_dai_id(struct snd_soc_component *component,
@@ -104,10 +160,23 @@ static int dw_hdmi_i2s_get_dai_id(struct snd_soc_component *component,
return -EINVAL;
}
+static int dw_hdmi_i2s_hook_plugged_cb(struct device *dev, void *data,
+ hdmi_codec_plugged_cb fn,
+ struct device *codec_dev)
+{
+ struct dw_hdmi_i2s_audio_data *audio = data;
+ struct dw_hdmi *hdmi = audio->hdmi;
+
+ return dw_hdmi_set_plugged_cb(hdmi, fn, codec_dev);
+}
+
static struct hdmi_codec_ops dw_hdmi_i2s_ops = {
.hw_params = dw_hdmi_i2s_hw_params,
+ .audio_startup = dw_hdmi_i2s_audio_startup,
.audio_shutdown = dw_hdmi_i2s_audio_shutdown,
+ .get_eld = dw_hdmi_i2s_get_eld,
.get_dai_id = dw_hdmi_i2s_get_dai_id,
+ .hook_plugged_cb = dw_hdmi_i2s_hook_plugged_cb,
};
static int snd_dw_hdmi_probe(struct platform_device *pdev)
@@ -119,7 +188,7 @@ static int snd_dw_hdmi_probe(struct platform_device *pdev)
pdata.ops = &dw_hdmi_i2s_ops;
pdata.i2s = 1;
- pdata.max_i2s_channels = 6;
+ pdata.max_i2s_channels = 8;
pdata.data = audio;
memset(&pdevinfo, 0, sizeof(pdevinfo));
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index c6490949d9db..67fca439bbfb 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -25,9 +25,10 @@
#include <uapi/linux/videodev2.h>
#include <drm/bridge/dw_hdmi.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_edid.h>
-#include <drm/drm_encoder_slave.h>
#include <drm/drm_of.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
@@ -37,6 +38,7 @@
#include "dw-hdmi-cec.h"
#include "dw-hdmi.h"
+#define DDC_CI_ADDR 0x37
#define DDC_SEGMENT_ADDR 0x30
#define HDMI_EDID_LEN 512
@@ -190,7 +192,12 @@ struct dw_hdmi {
void (*enable_audio)(struct dw_hdmi *hdmi);
void (*disable_audio)(struct dw_hdmi *hdmi);
+ struct mutex cec_notifier_mutex;
struct cec_notifier *cec_notifier;
+
+ hdmi_codec_plugged_cb plugged_cb;
+ struct device *codec_dev;
+ enum drm_connector_status last_connector_result;
};
#define HDMI_IH_PHY_STAT0_RX_SENSE \
@@ -215,6 +222,28 @@ static inline u8 hdmi_readb(struct dw_hdmi *hdmi, int offset)
return val;
}
+static void handle_plugged_change(struct dw_hdmi *hdmi, bool plugged)
+{
+ if (hdmi->plugged_cb && hdmi->codec_dev)
+ hdmi->plugged_cb(hdmi->codec_dev, plugged);
+}
+
+int dw_hdmi_set_plugged_cb(struct dw_hdmi *hdmi, hdmi_codec_plugged_cb fn,
+ struct device *codec_dev)
+{
+ bool plugged;
+
+ mutex_lock(&hdmi->mutex);
+ hdmi->plugged_cb = fn;
+ hdmi->codec_dev = codec_dev;
+ plugged = hdmi->last_connector_result == connector_status_connected;
+ handle_plugged_change(hdmi, plugged);
+ mutex_unlock(&hdmi->mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dw_hdmi_set_plugged_cb);
+
static void hdmi_modb(struct dw_hdmi *hdmi, u8 data, u8 mask, unsigned reg)
{
regmap_update_bits(hdmi->regm, reg << hdmi->reg_shift, mask, data);
@@ -398,6 +427,15 @@ static int dw_hdmi_i2c_xfer(struct i2c_adapter *adap,
u8 addr = msgs[0].addr;
int i, ret = 0;
+ if (addr == DDC_CI_ADDR)
+ /*
+ * The internal I2C controller does not support the multi-byte
+ * read and write operations needed for DDC/CI.
+ * TOFIX: Blacklist the DDC/CI address until we filter out
+ * unsupported I2C operations.
+ */
+ return -EOPNOTSUPP;
+
dev_dbg(hdmi->dev, "xfer: num: %d, addr: %#x\n", num, addr);
for (i = 0; i < num; i++) {
@@ -508,8 +546,14 @@ static void hdmi_set_cts_n(struct dw_hdmi *hdmi, unsigned int cts,
/* nshift factor = 0 */
hdmi_modb(hdmi, 0, HDMI_AUD_CTS3_N_SHIFT_MASK, HDMI_AUD_CTS3);
- hdmi_writeb(hdmi, ((cts >> 16) & HDMI_AUD_CTS3_AUDCTS19_16_MASK) |
- HDMI_AUD_CTS3_CTS_MANUAL, HDMI_AUD_CTS3);
+ /* Use automatic CTS generation mode when CTS is not set */
+ if (cts)
+ hdmi_writeb(hdmi, ((cts >> 16) &
+ HDMI_AUD_CTS3_AUDCTS19_16_MASK) |
+ HDMI_AUD_CTS3_CTS_MANUAL,
+ HDMI_AUD_CTS3);
+ else
+ hdmi_writeb(hdmi, 0, HDMI_AUD_CTS3);
hdmi_writeb(hdmi, (cts >> 8) & 0xff, HDMI_AUD_CTS2);
hdmi_writeb(hdmi, cts & 0xff, HDMI_AUD_CTS1);
@@ -574,29 +618,58 @@ static unsigned int hdmi_compute_n(unsigned int freq, unsigned long pixel_clk)
return n;
}
+/*
+ * When transmitting IEC60958 linear PCM audio, these registers allow to
+ * configure the channel status information of all the channel status
+ * bits in the IEC60958 frame. For the moment this configuration is only
+ * used when the I2S audio interface, General Purpose Audio (GPA),
+ * or AHB audio DMA (AHBAUDDMA) interface is active
+ * (for S/PDIF interface this information comes from the stream).
+ */
+void dw_hdmi_set_channel_status(struct dw_hdmi *hdmi,
+ u8 *channel_status)
+{
+ /*
+ * Set channel status register for frequency and word length.
+ * Use default values for other registers.
+ */
+ hdmi_writeb(hdmi, channel_status[3], HDMI_FC_AUDSCHNLS7);
+ hdmi_writeb(hdmi, channel_status[4], HDMI_FC_AUDSCHNLS8);
+}
+EXPORT_SYMBOL_GPL(dw_hdmi_set_channel_status);
+
static void hdmi_set_clk_regenerator(struct dw_hdmi *hdmi,
unsigned long pixel_clk, unsigned int sample_rate)
{
unsigned long ftdms = pixel_clk;
unsigned int n, cts;
+ u8 config3;
u64 tmp;
n = hdmi_compute_n(sample_rate, pixel_clk);
- /*
- * Compute the CTS value from the N value. Note that CTS and N
- * can be up to 20 bits in total, so we need 64-bit math. Also
- * note that our TDMS clock is not fully accurate; it is accurate
- * to kHz. This can introduce an unnecessary remainder in the
- * calculation below, so we don't try to warn about that.
- */
- tmp = (u64)ftdms * n;
- do_div(tmp, 128 * sample_rate);
- cts = tmp;
+ config3 = hdmi_readb(hdmi, HDMI_CONFIG3_ID);
- dev_dbg(hdmi->dev, "%s: fs=%uHz ftdms=%lu.%03luMHz N=%d cts=%d\n",
- __func__, sample_rate, ftdms / 1000000, (ftdms / 1000) % 1000,
- n, cts);
+ /* Only compute CTS when using internal AHB audio */
+ if (config3 & HDMI_CONFIG3_AHBAUDDMA) {
+ /*
+ * Compute the CTS value from the N value. Note that CTS and N
+ * can be up to 20 bits in total, so we need 64-bit math. Also
+ * note that our TDMS clock is not fully accurate; it is
+ * accurate to kHz. This can introduce an unnecessary remainder
+ * in the calculation below, so we don't try to warn about that.
+ */
+ tmp = (u64)ftdms * n;
+ do_div(tmp, 128 * sample_rate);
+ cts = tmp;
+
+ dev_dbg(hdmi->dev, "%s: fs=%uHz ftdms=%lu.%03luMHz N=%d cts=%d\n",
+ __func__, sample_rate,
+ ftdms / 1000000, (ftdms / 1000) % 1000,
+ n, cts);
+ } else {
+ cts = 0;
+ }
spin_lock_irq(&hdmi->audio_lock);
hdmi->audio_n = n;
@@ -630,6 +703,42 @@ void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate)
}
EXPORT_SYMBOL_GPL(dw_hdmi_set_sample_rate);
+void dw_hdmi_set_channel_count(struct dw_hdmi *hdmi, unsigned int cnt)
+{
+ u8 layout;
+
+ mutex_lock(&hdmi->audio_mutex);
+
+ /*
+ * For >2 channel PCM audio, we need to select layout 1
+ * and set an appropriate channel map.
+ */
+ if (cnt > 2)
+ layout = HDMI_FC_AUDSCONF_AUD_PACKET_LAYOUT_LAYOUT1;
+ else
+ layout = HDMI_FC_AUDSCONF_AUD_PACKET_LAYOUT_LAYOUT0;
+
+ hdmi_modb(hdmi, layout, HDMI_FC_AUDSCONF_AUD_PACKET_LAYOUT_MASK,
+ HDMI_FC_AUDSCONF);
+
+ /* Set the audio infoframes channel count */
+ hdmi_modb(hdmi, (cnt - 1) << HDMI_FC_AUDICONF0_CC_OFFSET,
+ HDMI_FC_AUDICONF0_CC_MASK, HDMI_FC_AUDICONF0);
+
+ mutex_unlock(&hdmi->audio_mutex);
+}
+EXPORT_SYMBOL_GPL(dw_hdmi_set_channel_count);
+
+void dw_hdmi_set_channel_allocation(struct dw_hdmi *hdmi, unsigned int ca)
+{
+ mutex_lock(&hdmi->audio_mutex);
+
+ hdmi_writeb(hdmi, ca, HDMI_FC_AUDICONF2);
+
+ mutex_unlock(&hdmi->audio_mutex);
+}
+EXPORT_SYMBOL_GPL(dw_hdmi_set_channel_allocation);
+
static void hdmi_enable_audio_clk(struct dw_hdmi *hdmi, bool enable)
{
if (enable)
@@ -1661,6 +1770,41 @@ static void hdmi_config_vendor_specific_infoframe(struct dw_hdmi *hdmi,
HDMI_FC_DATAUTO0_VSD_MASK);
}
+static void hdmi_config_drm_infoframe(struct dw_hdmi *hdmi)
+{
+ const struct drm_connector_state *conn_state = hdmi->connector.state;
+ struct hdmi_drm_infoframe frame;
+ u8 buffer[30];
+ ssize_t err;
+ int i;
+
+ if (!hdmi->plat_data->use_drm_infoframe)
+ return;
+
+ hdmi_modb(hdmi, HDMI_FC_PACKET_TX_EN_DRM_DISABLE,
+ HDMI_FC_PACKET_TX_EN_DRM_MASK, HDMI_FC_PACKET_TX_EN);
+
+ err = drm_hdmi_infoframe_set_hdr_metadata(&frame, conn_state);
+ if (err < 0)
+ return;
+
+ err = hdmi_drm_infoframe_pack(&frame, buffer, sizeof(buffer));
+ if (err < 0) {
+ dev_err(hdmi->dev, "Failed to pack drm infoframe: %zd\n", err);
+ return;
+ }
+
+ hdmi_writeb(hdmi, frame.version, HDMI_FC_DRM_HB0);
+ hdmi_writeb(hdmi, frame.length, HDMI_FC_DRM_HB1);
+
+ for (i = 0; i < frame.length; i++)
+ hdmi_writeb(hdmi, buffer[4 + i], HDMI_FC_DRM_PB0 + i);
+
+ hdmi_writeb(hdmi, 1, HDMI_FC_DRM_UP);
+ hdmi_modb(hdmi, HDMI_FC_PACKET_TX_EN_DRM_ENABLE,
+ HDMI_FC_PACKET_TX_EN_DRM_MASK, HDMI_FC_PACKET_TX_EN);
+}
+
static void hdmi_av_composer(struct dw_hdmi *hdmi,
const struct drm_display_mode *mode)
{
@@ -1972,7 +2116,7 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
/* HDMI Initialization Step E - Configure audio */
hdmi_clk_regenerator_update_pixel_clock(hdmi);
- hdmi_enable_audio_clk(hdmi, true);
+ hdmi_enable_audio_clk(hdmi, hdmi->audio_enable);
}
/* not for DVI mode */
@@ -1982,6 +2126,7 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
/* HDMI Initialization Step F - Configure AVI InfoFrame */
hdmi_config_AVI(hdmi, mode);
hdmi_config_vendor_specific_infoframe(hdmi, mode);
+ hdmi_config_drm_infoframe(hdmi);
} else {
dev_dbg(hdmi->dev, "%s DVI mode\n", __func__);
}
@@ -2110,6 +2255,7 @@ dw_hdmi_connector_detect(struct drm_connector *connector, bool force)
{
struct dw_hdmi *hdmi = container_of(connector, struct dw_hdmi,
connector);
+ enum drm_connector_status result;
mutex_lock(&hdmi->mutex);
hdmi->force = DRM_FORCE_UNSPECIFIED;
@@ -2117,7 +2263,18 @@ dw_hdmi_connector_detect(struct drm_connector *connector, bool force)
dw_hdmi_update_phy_mask(hdmi);
mutex_unlock(&hdmi->mutex);
- return hdmi->phy.ops->read_hpd(hdmi, hdmi->phy.data);
+ result = hdmi->phy.ops->read_hpd(hdmi, hdmi->phy.data);
+
+ mutex_lock(&hdmi->mutex);
+ if (result != hdmi->last_connector_result) {
+ dev_dbg(hdmi->dev, "read_hpd result: %d", result);
+ handle_plugged_change(hdmi,
+ result == connector_status_connected);
+ hdmi->last_connector_result = result;
+ }
+ mutex_unlock(&hdmi->mutex);
+
+ return result;
}
static int dw_hdmi_connector_get_modes(struct drm_connector *connector)
@@ -2148,6 +2305,45 @@ static int dw_hdmi_connector_get_modes(struct drm_connector *connector)
return ret;
}
+static bool hdr_metadata_equal(const struct drm_connector_state *old_state,
+ const struct drm_connector_state *new_state)
+{
+ struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
+ struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
+
+ if (!old_blob || !new_blob)
+ return old_blob == new_blob;
+
+ if (old_blob->length != new_blob->length)
+ return false;
+
+ return !memcmp(old_blob->data, new_blob->data, old_blob->length);
+}
+
+static int dw_hdmi_connector_atomic_check(struct drm_connector *connector,
+ struct drm_atomic_state *state)
+{
+ struct drm_connector_state *old_state =
+ drm_atomic_get_old_connector_state(state, connector);
+ struct drm_connector_state *new_state =
+ drm_atomic_get_new_connector_state(state, connector);
+ struct drm_crtc *crtc = new_state->crtc;
+ struct drm_crtc_state *crtc_state;
+
+ if (!crtc)
+ return 0;
+
+ if (!hdr_metadata_equal(old_state, new_state)) {
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ crtc_state->mode_changed = true;
+ }
+
+ return 0;
+}
+
static void dw_hdmi_connector_force(struct drm_connector *connector)
{
struct dw_hdmi *hdmi = container_of(connector, struct dw_hdmi,
@@ -2172,6 +2368,7 @@ static const struct drm_connector_funcs dw_hdmi_connector_funcs = {
static const struct drm_connector_helper_funcs dw_hdmi_connector_helper_funcs = {
.get_modes = dw_hdmi_connector_get_modes,
+ .atomic_check = dw_hdmi_connector_atomic_check,
};
static int dw_hdmi_bridge_attach(struct drm_bridge *bridge)
@@ -2179,20 +2376,48 @@ static int dw_hdmi_bridge_attach(struct drm_bridge *bridge)
struct dw_hdmi *hdmi = bridge->driver_private;
struct drm_encoder *encoder = bridge->encoder;
struct drm_connector *connector = &hdmi->connector;
+ struct cec_connector_info conn_info;
+ struct cec_notifier *notifier;
connector->interlace_allowed = 1;
connector->polled = DRM_CONNECTOR_POLL_HPD;
drm_connector_helper_add(connector, &dw_hdmi_connector_helper_funcs);
- drm_connector_init(bridge->dev, connector, &dw_hdmi_connector_funcs,
- DRM_MODE_CONNECTOR_HDMIA);
+ drm_connector_init_with_ddc(bridge->dev, connector,
+ &dw_hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA,
+ hdmi->ddc);
+
+ if (hdmi->version >= 0x200a && hdmi->plat_data->use_drm_infoframe)
+ drm_object_attach_property(&connector->base,
+ connector->dev->mode_config.hdr_output_metadata_property, 0);
drm_connector_attach_encoder(connector, encoder);
+ cec_fill_conn_info_from_drm(&conn_info, connector);
+
+ notifier = cec_notifier_conn_register(hdmi->dev, NULL, &conn_info);
+ if (!notifier)
+ return -ENOMEM;
+
+ mutex_lock(&hdmi->cec_notifier_mutex);
+ hdmi->cec_notifier = notifier;
+ mutex_unlock(&hdmi->cec_notifier_mutex);
+
return 0;
}
+static void dw_hdmi_bridge_detach(struct drm_bridge *bridge)
+{
+ struct dw_hdmi *hdmi = bridge->driver_private;
+
+ mutex_lock(&hdmi->cec_notifier_mutex);
+ cec_notifier_conn_unregister(hdmi->cec_notifier);
+ hdmi->cec_notifier = NULL;
+ mutex_unlock(&hdmi->cec_notifier_mutex);
+}
+
static enum drm_mode_status
dw_hdmi_bridge_mode_valid(struct drm_bridge *bridge,
const struct drm_display_mode *mode)
@@ -2249,6 +2474,7 @@ static void dw_hdmi_bridge_enable(struct drm_bridge *bridge)
static const struct drm_bridge_funcs dw_hdmi_bridge_funcs = {
.attach = dw_hdmi_bridge_attach,
+ .detach = dw_hdmi_bridge_detach,
.enable = dw_hdmi_bridge_enable,
.disable = dw_hdmi_bridge_disable,
.mode_set = dw_hdmi_bridge_mode_set,
@@ -2356,9 +2582,11 @@ static irqreturn_t dw_hdmi_irq(int irq, void *dev_id)
phy_stat & HDMI_PHY_HPD,
phy_stat & HDMI_PHY_RX_SENSE);
- if ((phy_stat & (HDMI_PHY_RX_SENSE | HDMI_PHY_HPD)) == 0)
- cec_notifier_set_phys_addr(hdmi->cec_notifier,
- CEC_PHYS_ADDR_INVALID);
+ if ((phy_stat & (HDMI_PHY_RX_SENSE | HDMI_PHY_HPD)) == 0) {
+ mutex_lock(&hdmi->cec_notifier_mutex);
+ cec_notifier_phys_addr_invalidate(hdmi->cec_notifier);
+ mutex_unlock(&hdmi->cec_notifier_mutex);
+ }
}
if (intr_stat & HDMI_IH_PHY_STAT0_HPD) {
@@ -2541,9 +2769,11 @@ __dw_hdmi_probe(struct platform_device *pdev,
hdmi->rxsense = true;
hdmi->phy_mask = (u8)~(HDMI_PHY_HPD | HDMI_PHY_RX_SENSE);
hdmi->mc_clkdis = 0x7f;
+ hdmi->last_connector_result = connector_status_disconnected;
mutex_init(&hdmi->mutex);
mutex_init(&hdmi->audio_mutex);
+ mutex_init(&hdmi->cec_notifier_mutex);
spin_lock_init(&hdmi->audio_lock);
ddc_node = of_parse_phandle(np, "ddc-i2c-bus", 0);
@@ -2676,12 +2906,6 @@ __dw_hdmi_probe(struct platform_device *pdev,
if (ret)
goto err_iahb;
- hdmi->cec_notifier = cec_notifier_get(dev);
- if (!hdmi->cec_notifier) {
- ret = -ENOMEM;
- goto err_iahb;
- }
-
/*
* To prevent overflows in HDMI_IH_FC_STAT2, set the clk regenerator
* N and cts values before enabling phy
@@ -2746,6 +2970,7 @@ __dw_hdmi_probe(struct platform_device *pdev,
struct dw_hdmi_i2s_audio_data audio;
audio.hdmi = hdmi;
+ audio.eld = hdmi->connector.eld;
audio.write = hdmi_writeb;
audio.read = hdmi_readb;
hdmi->enable_audio = dw_hdmi_i2s_audio_enable;
@@ -2779,9 +3004,6 @@ err_iahb:
hdmi->ddc = NULL;
}
- if (hdmi->cec_notifier)
- cec_notifier_put(hdmi->cec_notifier);
-
clk_disable_unprepare(hdmi->iahb_clk);
if (hdmi->cec_clk)
clk_disable_unprepare(hdmi->cec_clk);
@@ -2803,9 +3025,6 @@ static void __dw_hdmi_remove(struct dw_hdmi *hdmi)
/* Disable all interrupts */
hdmi_writeb(hdmi, ~0, HDMI_IH_MUTE_PHY_STAT0);
- if (hdmi->cec_notifier)
- cec_notifier_put(hdmi->cec_notifier);
-
clk_disable_unprepare(hdmi->iahb_clk);
clk_disable_unprepare(hdmi->isfr_clk);
if (hdmi->cec_clk)
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h
index 4e3ec09d3ca4..1999db05bc3b 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h
@@ -158,6 +158,8 @@
#define HDMI_FC_SPDDEVICEINF 0x1062
#define HDMI_FC_AUDSCONF 0x1063
#define HDMI_FC_AUDSSTAT 0x1064
+#define HDMI_FC_AUDSCHNLS7 0x106e
+#define HDMI_FC_AUDSCHNLS8 0x106f
#define HDMI_FC_DATACH0FILL 0x1070
#define HDMI_FC_DATACH1FILL 0x1071
#define HDMI_FC_DATACH2FILL 0x1072
@@ -252,6 +254,7 @@
#define HDMI_FC_POL2 0x10DB
#define HDMI_FC_PRCONF 0x10E0
#define HDMI_FC_SCRAMBLER_CTRL 0x10E1
+#define HDMI_FC_PACKET_TX_EN 0x10E3
#define HDMI_FC_GMD_STAT 0x1100
#define HDMI_FC_GMD_EN 0x1101
@@ -287,6 +290,37 @@
#define HDMI_FC_GMD_PB26 0x111F
#define HDMI_FC_GMD_PB27 0x1120
+#define HDMI_FC_DRM_UP 0x1167
+#define HDMI_FC_DRM_HB0 0x1168
+#define HDMI_FC_DRM_HB1 0x1169
+#define HDMI_FC_DRM_PB0 0x116A
+#define HDMI_FC_DRM_PB1 0x116B
+#define HDMI_FC_DRM_PB2 0x116C
+#define HDMI_FC_DRM_PB3 0x116D
+#define HDMI_FC_DRM_PB4 0x116E
+#define HDMI_FC_DRM_PB5 0x116F
+#define HDMI_FC_DRM_PB6 0x1170
+#define HDMI_FC_DRM_PB7 0x1171
+#define HDMI_FC_DRM_PB8 0x1172
+#define HDMI_FC_DRM_PB9 0x1173
+#define HDMI_FC_DRM_PB10 0x1174
+#define HDMI_FC_DRM_PB11 0x1175
+#define HDMI_FC_DRM_PB12 0x1176
+#define HDMI_FC_DRM_PB13 0x1177
+#define HDMI_FC_DRM_PB14 0x1178
+#define HDMI_FC_DRM_PB15 0x1179
+#define HDMI_FC_DRM_PB16 0x117A
+#define HDMI_FC_DRM_PB17 0x117B
+#define HDMI_FC_DRM_PB18 0x117C
+#define HDMI_FC_DRM_PB19 0x117D
+#define HDMI_FC_DRM_PB20 0x117E
+#define HDMI_FC_DRM_PB21 0x117F
+#define HDMI_FC_DRM_PB22 0x1180
+#define HDMI_FC_DRM_PB23 0x1181
+#define HDMI_FC_DRM_PB24 0x1182
+#define HDMI_FC_DRM_PB25 0x1183
+#define HDMI_FC_DRM_PB26 0x1184
+
#define HDMI_FC_DBGFORCE 0x1200
#define HDMI_FC_DBGAUD0CH0 0x1201
#define HDMI_FC_DBGAUD1CH0 0x1202
@@ -742,6 +776,11 @@ enum {
HDMI_FC_PRCONF_OUTPUT_PR_FACTOR_MASK = 0x0F,
HDMI_FC_PRCONF_OUTPUT_PR_FACTOR_OFFSET = 0,
+/* FC_PACKET_TX_EN field values */
+ HDMI_FC_PACKET_TX_EN_DRM_MASK = 0x80,
+ HDMI_FC_PACKET_TX_EN_DRM_ENABLE = 0x80,
+ HDMI_FC_PACKET_TX_EN_DRM_DISABLE = 0x00,
+
/* FC_AVICONF0-FC_AVICONF3 field values */
HDMI_FC_AVICONF0_PIX_FMT_MASK = 0x03,
HDMI_FC_AVICONF0_PIX_FMT_RGB = 0x00,
@@ -865,12 +904,18 @@ enum {
/* AUD_CONF0 field values */
HDMI_AUD_CONF0_SW_RESET = 0x80,
- HDMI_AUD_CONF0_I2S_ALL_ENABLE = 0x2F,
+ HDMI_AUD_CONF0_I2S_SELECT = 0x20,
+ HDMI_AUD_CONF0_I2S_EN3 = 0x08,
+ HDMI_AUD_CONF0_I2S_EN2 = 0x04,
+ HDMI_AUD_CONF0_I2S_EN1 = 0x02,
+ HDMI_AUD_CONF0_I2S_EN0 = 0x01,
/* AUD_CONF1 field values */
HDMI_AUD_CONF1_MODE_I2S = 0x00,
- HDMI_AUD_CONF1_MODE_RIGHT_J = 0x02,
- HDMI_AUD_CONF1_MODE_LEFT_J = 0x04,
+ HDMI_AUD_CONF1_MODE_RIGHT_J = 0x20,
+ HDMI_AUD_CONF1_MODE_LEFT_J = 0x40,
+ HDMI_AUD_CONF1_MODE_BURST_1 = 0x60,
+ HDMI_AUD_CONF1_MODE_BURST_2 = 0x80,
HDMI_AUD_CONF1_WIDTH_16 = 0x10,
HDMI_AUD_CONF1_WIDTH_24 = 0x18,
@@ -938,6 +983,7 @@ enum {
HDMI_MC_CLKDIS_PIXELCLK_DISABLE = 0x1,
/* MC_SWRSTZ field values */
+ HDMI_MC_SWRSTZ_I2SSWRST_REQ = 0x08,
HDMI_MC_SWRSTZ_TMDSSWRST_REQ = 0x02,
/* MC_FLOWCTRL field values */
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
index 281c58bab1a1..b18351b6760a 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
@@ -10,6 +10,7 @@
#include <linux/clk.h>
#include <linux/component.h>
+#include <linux/debugfs.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of_device.h>
@@ -89,6 +90,8 @@
#define VID_MODE_TYPE_NON_BURST_SYNC_EVENTS 0x1
#define VID_MODE_TYPE_BURST 0x2
#define VID_MODE_TYPE_MASK 0x3
+#define VID_MODE_VPG_ENABLE BIT(16)
+#define VID_MODE_VPG_HORIZONTAL BIT(24)
#define DSI_VID_PKT_SIZE 0x3c
#define VID_PKT_SIZE(p) ((p) & 0x3fff)
@@ -233,6 +236,13 @@ struct dw_mipi_dsi {
u32 format;
unsigned long mode_flags;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs;
+
+ bool vpg;
+ bool vpg_horizontal;
+#endif /* CONFIG_DEBUG_FS */
+
struct dw_mipi_dsi *master; /* dual-dsi master ptr */
struct dw_mipi_dsi *slave; /* dual-dsi slave ptr */
@@ -306,7 +316,8 @@ static int dw_mipi_dsi_host_attach(struct mipi_dsi_host *host,
return ret;
if (panel) {
- bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_DSI);
+ bridge = drm_panel_bridge_add_typed(panel,
+ DRM_MODE_CONNECTOR_DSI);
if (IS_ERR(bridge))
return PTR_ERR(bridge);
}
@@ -518,6 +529,13 @@ static void dw_mipi_dsi_video_mode_config(struct dw_mipi_dsi *dsi)
else
val |= VID_MODE_TYPE_NON_BURST_SYNC_EVENTS;
+#ifdef CONFIG_DEBUG_FS
+ if (dsi->vpg) {
+ val |= VID_MODE_VPG_ENABLE;
+ val |= dsi->vpg_horizontal ? VID_MODE_VPG_HORIZONTAL : 0;
+ }
+#endif /* CONFIG_DEBUG_FS */
+
dsi_write(dsi, DSI_VID_MODE_CFG, val);
}
@@ -701,7 +719,15 @@ static void dw_mipi_dsi_vertical_timing_config(struct dw_mipi_dsi *dsi,
static void dw_mipi_dsi_dphy_timing_config(struct dw_mipi_dsi *dsi)
{
+ const struct dw_mipi_dsi_phy_ops *phy_ops = dsi->plat_data->phy_ops;
+ struct dw_mipi_dsi_dphy_timing timing;
u32 hw_version;
+ int ret;
+
+ ret = phy_ops->get_timing(dsi->plat_data->priv_data,
+ dsi->lane_mbps, &timing);
+ if (ret)
+ DRM_DEV_ERROR(dsi->dev, "Retrieving phy timings failed\n");
/*
* TODO dw drv improvements
@@ -714,16 +740,20 @@ static void dw_mipi_dsi_dphy_timing_config(struct dw_mipi_dsi *dsi)
hw_version = dsi_read(dsi, DSI_VERSION) & VERSION;
if (hw_version >= HWVER_131) {
- dsi_write(dsi, DSI_PHY_TMR_CFG, PHY_HS2LP_TIME_V131(0x40) |
- PHY_LP2HS_TIME_V131(0x40));
+ dsi_write(dsi, DSI_PHY_TMR_CFG,
+ PHY_HS2LP_TIME_V131(timing.data_hs2lp) |
+ PHY_LP2HS_TIME_V131(timing.data_lp2hs));
dsi_write(dsi, DSI_PHY_TMR_RD_CFG, MAX_RD_TIME_V131(10000));
} else {
- dsi_write(dsi, DSI_PHY_TMR_CFG, PHY_HS2LP_TIME(0x40) |
- PHY_LP2HS_TIME(0x40) | MAX_RD_TIME(10000));
+ dsi_write(dsi, DSI_PHY_TMR_CFG,
+ PHY_HS2LP_TIME(timing.data_hs2lp) |
+ PHY_LP2HS_TIME(timing.data_lp2hs) |
+ MAX_RD_TIME(10000));
}
- dsi_write(dsi, DSI_PHY_TMR_LPCLK_CFG, PHY_CLKHS2LP_TIME(0x40)
- | PHY_CLKLP2HS_TIME(0x40));
+ dsi_write(dsi, DSI_PHY_TMR_LPCLK_CFG,
+ PHY_CLKHS2LP_TIME(timing.clk_hs2lp) |
+ PHY_CLKLP2HS_TIME(timing.clk_lp2hs));
}
static void dw_mipi_dsi_dphy_interface_config(struct dw_mipi_dsi *dsi)
@@ -780,9 +810,6 @@ static void dw_mipi_dsi_bridge_post_disable(struct drm_bridge *bridge)
struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
const struct dw_mipi_dsi_phy_ops *phy_ops = dsi->plat_data->phy_ops;
- if (phy_ops->power_off)
- phy_ops->power_off(dsi->plat_data->priv_data);
-
/*
* Switch to command mode before panel-bridge post_disable &
* panel unprepare.
@@ -799,6 +826,9 @@ static void dw_mipi_dsi_bridge_post_disable(struct drm_bridge *bridge)
*/
dsi->panel_bridge->funcs->post_disable(dsi->panel_bridge);
+ if (phy_ops->power_off)
+ phy_ops->power_off(dsi->plat_data->priv_data);
+
if (dsi->slave) {
dw_mipi_dsi_disable(dsi->slave);
clk_disable_unprepare(dsi->slave->pclk);
@@ -865,6 +895,9 @@ static void dw_mipi_dsi_mode_set(struct dw_mipi_dsi *dsi,
/* Switch to cmd mode for panel-bridge pre_enable & panel prepare */
dw_mipi_dsi_set_mode(dsi, 0);
+
+ if (phy_ops->power_on)
+ phy_ops->power_on(dsi->plat_data->priv_data);
}
static void dw_mipi_dsi_bridge_mode_set(struct drm_bridge *bridge,
@@ -881,15 +914,11 @@ static void dw_mipi_dsi_bridge_mode_set(struct drm_bridge *bridge,
static void dw_mipi_dsi_bridge_enable(struct drm_bridge *bridge)
{
struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
- const struct dw_mipi_dsi_phy_ops *phy_ops = dsi->plat_data->phy_ops;
/* Switch to video mode for panel-bridge enable & panel enable */
dw_mipi_dsi_set_mode(dsi, MIPI_DSI_MODE_VIDEO);
if (dsi->slave)
dw_mipi_dsi_set_mode(dsi->slave, MIPI_DSI_MODE_VIDEO);
-
- if (phy_ops->power_on)
- phy_ops->power_on(dsi->plat_data->priv_data);
}
static enum drm_mode_status
@@ -930,6 +959,33 @@ static const struct drm_bridge_funcs dw_mipi_dsi_bridge_funcs = {
.attach = dw_mipi_dsi_bridge_attach,
};
+#ifdef CONFIG_DEBUG_FS
+
+static void dw_mipi_dsi_debugfs_init(struct dw_mipi_dsi *dsi)
+{
+ dsi->debugfs = debugfs_create_dir(dev_name(dsi->dev), NULL);
+ if (IS_ERR(dsi->debugfs)) {
+ dev_err(dsi->dev, "failed to create debugfs root\n");
+ return;
+ }
+
+ debugfs_create_bool("vpg", 0660, dsi->debugfs, &dsi->vpg);
+ debugfs_create_bool("vpg_horizontal", 0660, dsi->debugfs,
+ &dsi->vpg_horizontal);
+}
+
+static void dw_mipi_dsi_debugfs_remove(struct dw_mipi_dsi *dsi)
+{
+ debugfs_remove_recursive(dsi->debugfs);
+}
+
+#else
+
+static void dw_mipi_dsi_debugfs_init(struct dw_mipi_dsi *dsi) { }
+static void dw_mipi_dsi_debugfs_remove(struct dw_mipi_dsi *dsi) { }
+
+#endif /* CONFIG_DEBUG_FS */
+
static struct dw_mipi_dsi *
__dw_mipi_dsi_probe(struct platform_device *pdev,
const struct dw_mipi_dsi_plat_data *plat_data)
@@ -937,7 +993,6 @@ __dw_mipi_dsi_probe(struct platform_device *pdev,
struct device *dev = &pdev->dev;
struct reset_control *apb_rst;
struct dw_mipi_dsi *dsi;
- struct resource *res;
int ret;
dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
@@ -947,17 +1002,14 @@ __dw_mipi_dsi_probe(struct platform_device *pdev,
dsi->dev = dev;
dsi->plat_data = plat_data;
- if (!plat_data->phy_ops->init || !plat_data->phy_ops->get_lane_mbps) {
+ if (!plat_data->phy_ops->init || !plat_data->phy_ops->get_lane_mbps ||
+ !plat_data->phy_ops->get_timing) {
DRM_ERROR("Phy not properly configured\n");
return ERR_PTR(-ENODEV);
}
if (!plat_data->base) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return ERR_PTR(-ENODEV);
-
- dsi->base = devm_ioremap_resource(dev, res);
+ dsi->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dsi->base))
return ERR_PTR(-ENODEV);
@@ -1000,6 +1052,7 @@ __dw_mipi_dsi_probe(struct platform_device *pdev,
clk_disable_unprepare(dsi->pclk);
}
+ dw_mipi_dsi_debugfs_init(dsi);
pm_runtime_enable(dev);
dsi->dsi_host.ops = &dw_mipi_dsi_host_ops;
@@ -1007,6 +1060,7 @@ __dw_mipi_dsi_probe(struct platform_device *pdev,
ret = mipi_dsi_host_register(&dsi->dsi_host);
if (ret) {
dev_err(dev, "Failed to register MIPI host: %d\n", ret);
+ dw_mipi_dsi_debugfs_remove(dsi);
return ERR_PTR(ret);
}
@@ -1024,6 +1078,7 @@ static void __dw_mipi_dsi_remove(struct dw_mipi_dsi *dsi)
mipi_dsi_host_unregister(&dsi->dsi_host);
pm_runtime_disable(dsi->dev);
+ dw_mipi_dsi_debugfs_remove(dsi);
}
void dw_mipi_dsi_set_slave(struct dw_mipi_dsi *dsi, struct dw_mipi_dsi *slave)
diff --git a/drivers/gpu/drm/bridge/tc358764.c b/drivers/gpu/drm/bridge/tc358764.c
index 170f162ffa55..96207fcfde19 100644
--- a/drivers/gpu/drm/bridge/tc358764.c
+++ b/drivers/gpu/drm/bridge/tc358764.c
@@ -16,6 +16,7 @@
#include <video/mipi_display.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_mipi_dsi.h>
@@ -281,7 +282,7 @@ static int tc358764_get_modes(struct drm_connector *connector)
{
struct tc358764 *ctx = connector_to_tc358764(connector);
- return drm_panel_get_modes(ctx->panel);
+ return drm_panel_get_modes(ctx->panel, connector);
}
static const
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 13ade28a36a8..3709e5ace724 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -15,6 +15,7 @@
* Author: Rob Clark <robdclark@gmail.com>
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/gpio/consumer.h>
@@ -25,6 +26,7 @@
#include <linux/slab.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
@@ -47,6 +49,7 @@
/* Video Path */
#define VPCTRL0 0x0450
+#define VSDELAY GENMASK(31, 20)
#define OPXLFMT_RGB666 (0 << 8)
#define OPXLFMT_RGB888 (1 << 8)
#define FRMSYNC_DISABLED (0 << 4) /* Video Timing Gen Disabled */
@@ -54,9 +57,17 @@
#define MSF_DISABLED (0 << 0) /* Magic Square FRC disabled */
#define MSF_ENABLED (1 << 0) /* Magic Square FRC enabled */
#define HTIM01 0x0454
+#define HPW GENMASK(8, 0)
+#define HBPR GENMASK(24, 16)
#define HTIM02 0x0458
+#define HDISPR GENMASK(10, 0)
+#define HFPR GENMASK(24, 16)
#define VTIM01 0x045c
+#define VSPR GENMASK(7, 0)
+#define VBPR GENMASK(23, 16)
#define VTIM02 0x0460
+#define VFPR GENMASK(23, 16)
+#define VDISPR GENMASK(10, 0)
#define VFUEN0 0x0464
#define VFUEN BIT(0) /* Video Frame Timing Upload */
@@ -70,6 +81,13 @@
#define DP0_VIDSRC_DSI_RX (1 << 0)
#define DP0_VIDSRC_DPI_RX (2 << 0)
#define DP0_VIDSRC_COLOR_BAR (3 << 0)
+#define SYSRSTENB 0x050c
+#define ENBI2C (1 << 0)
+#define ENBLCD0 (1 << 2)
+#define ENBBM (1 << 3)
+#define ENBDSIRX (1 << 4)
+#define ENBREG (1 << 5)
+#define ENBHDCP (1 << 8)
#define GPIOM 0x0540
#define GPIOC 0x0544
#define GPIOO 0x0548
@@ -99,19 +117,35 @@
/* Main Channel */
#define DP0_SECSAMPLE 0x0640
#define DP0_VIDSYNCDELAY 0x0644
+#define VID_SYNC_DLY GENMASK(15, 0)
+#define THRESH_DLY GENMASK(31, 16)
+
#define DP0_TOTALVAL 0x0648
+#define H_TOTAL GENMASK(15, 0)
+#define V_TOTAL GENMASK(31, 16)
#define DP0_STARTVAL 0x064c
+#define H_START GENMASK(15, 0)
+#define V_START GENMASK(31, 16)
#define DP0_ACTIVEVAL 0x0650
+#define H_ACT GENMASK(15, 0)
+#define V_ACT GENMASK(31, 16)
+
#define DP0_SYNCVAL 0x0654
+#define VS_WIDTH GENMASK(30, 16)
+#define HS_WIDTH GENMASK(14, 0)
#define SYNCVAL_HS_POL_ACTIVE_LOW (1 << 15)
#define SYNCVAL_VS_POL_ACTIVE_LOW (1 << 31)
#define DP0_MISC 0x0658
#define TU_SIZE_RECOMMENDED (63) /* LSCLK cycles per TU */
+#define MAX_TU_SYMBOL GENMASK(28, 23)
+#define TU_SIZE GENMASK(21, 16)
#define BPC_6 (0 << 5)
#define BPC_8 (1 << 5)
/* AUX channel */
#define DP0_AUXCFG0 0x0660
+#define DP0_AUXCFG0_BSIZE GENMASK(11, 8)
+#define DP0_AUXCFG0_ADDR_ONLY BIT(4)
#define DP0_AUXCFG1 0x0664
#define AUX_RX_FILTER_EN BIT(16)
@@ -119,10 +153,10 @@
#define DP0_AUXWDATA(i) (0x066c + (i) * 4)
#define DP0_AUXRDATA(i) (0x067c + (i) * 4)
#define DP0_AUXSTATUS 0x068c
-#define AUX_STATUS_MASK 0xf0
-#define AUX_STATUS_SHIFT 4
-#define AUX_TIMEOUT BIT(1)
-#define AUX_BUSY BIT(0)
+#define AUX_BYTES GENMASK(15, 8)
+#define AUX_STATUS GENMASK(7, 4)
+#define AUX_TIMEOUT BIT(1)
+#define AUX_BUSY BIT(0)
#define DP0_AUXI2CADR 0x0698
/* Link Training */
@@ -183,13 +217,21 @@
/* Test & Debug */
#define TSTCTL 0x0a00
+#define COLOR_R GENMASK(31, 24)
+#define COLOR_G GENMASK(23, 16)
+#define COLOR_B GENMASK(15, 8)
+#define ENI2CFILTER BIT(4)
+#define COLOR_BAR_MODE GENMASK(1, 0)
+#define COLOR_BAR_MODE_BARS 2
#define PLL_DBG 0x0a04
static bool tc_test_pattern;
module_param_named(test, tc_test_pattern, bool, 0644);
struct tc_edp_link {
- struct drm_dp_link base;
+ u8 dpcd[DP_RECEIVER_CAP_SIZE];
+ unsigned int rate;
+ u8 num_lanes;
u8 assr;
bool scrambler_dis;
bool spread;
@@ -241,137 +283,131 @@ static inline struct tc_data *connector_to_tc(struct drm_connector *c)
return container_of(c, struct tc_data, connector);
}
-/* Simple macros to avoid repeated error checks */
-#define tc_write(reg, var) \
- do { \
- ret = regmap_write(tc->regmap, reg, var); \
- if (ret) \
- goto err; \
- } while (0)
-#define tc_read(reg, var) \
- do { \
- ret = regmap_read(tc->regmap, reg, var); \
- if (ret) \
- goto err; \
- } while (0)
-
-static inline int tc_poll_timeout(struct regmap *map, unsigned int addr,
+static inline int tc_poll_timeout(struct tc_data *tc, unsigned int addr,
unsigned int cond_mask,
unsigned int cond_value,
unsigned long sleep_us, u64 timeout_us)
{
- ktime_t timeout = ktime_add_us(ktime_get(), timeout_us);
unsigned int val;
- int ret;
- for (;;) {
- ret = regmap_read(map, addr, &val);
- if (ret)
- break;
- if ((val & cond_mask) == cond_value)
- break;
- if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) {
- ret = regmap_read(map, addr, &val);
- break;
- }
- if (sleep_us)
- usleep_range((sleep_us >> 2) + 1, sleep_us);
- }
- return ret ?: (((val & cond_mask) == cond_value) ? 0 : -ETIMEDOUT);
+ return regmap_read_poll_timeout(tc->regmap, addr, val,
+ (val & cond_mask) == cond_value,
+ sleep_us, timeout_us);
}
-static int tc_aux_wait_busy(struct tc_data *tc, unsigned int timeout_ms)
+static int tc_aux_wait_busy(struct tc_data *tc)
{
- return tc_poll_timeout(tc->regmap, DP0_AUXSTATUS, AUX_BUSY, 0,
- 1000, 1000 * timeout_ms);
+ return tc_poll_timeout(tc, DP0_AUXSTATUS, AUX_BUSY, 0, 1000, 100000);
}
-static int tc_aux_get_status(struct tc_data *tc, u8 *reply)
+static int tc_aux_write_data(struct tc_data *tc, const void *data,
+ size_t size)
{
- int ret;
- u32 value;
+ u32 auxwdata[DP_AUX_MAX_PAYLOAD_BYTES / sizeof(u32)] = { 0 };
+ int ret, count = ALIGN(size, sizeof(u32));
- ret = regmap_read(tc->regmap, DP0_AUXSTATUS, &value);
- if (ret < 0)
+ memcpy(auxwdata, data, size);
+
+ ret = regmap_raw_write(tc->regmap, DP0_AUXWDATA(0), auxwdata, count);
+ if (ret)
return ret;
- if (value & AUX_BUSY) {
- dev_err(tc->dev, "aux busy!\n");
- return -EBUSY;
- }
+ return size;
+}
- if (value & AUX_TIMEOUT) {
- dev_err(tc->dev, "aux access timeout!\n");
- return -ETIMEDOUT;
- }
+static int tc_aux_read_data(struct tc_data *tc, void *data, size_t size)
+{
+ u32 auxrdata[DP_AUX_MAX_PAYLOAD_BYTES / sizeof(u32)];
+ int ret, count = ALIGN(size, sizeof(u32));
- *reply = (value & AUX_STATUS_MASK) >> AUX_STATUS_SHIFT;
- return 0;
+ ret = regmap_raw_read(tc->regmap, DP0_AUXRDATA(0), auxrdata, count);
+ if (ret)
+ return ret;
+
+ memcpy(data, auxrdata, size);
+
+ return size;
+}
+
+static u32 tc_auxcfg0(struct drm_dp_aux_msg *msg, size_t size)
+{
+ u32 auxcfg0 = msg->request;
+
+ if (size)
+ auxcfg0 |= FIELD_PREP(DP0_AUXCFG0_BSIZE, size - 1);
+ else
+ auxcfg0 |= DP0_AUXCFG0_ADDR_ONLY;
+
+ return auxcfg0;
}
static ssize_t tc_aux_transfer(struct drm_dp_aux *aux,
struct drm_dp_aux_msg *msg)
{
struct tc_data *tc = aux_to_tc(aux);
- size_t size = min_t(size_t, 8, msg->size);
+ size_t size = min_t(size_t, DP_AUX_MAX_PAYLOAD_BYTES - 1, msg->size);
u8 request = msg->request & ~DP_AUX_I2C_MOT;
- u8 *buf = msg->buffer;
- u32 tmp = 0;
- int i = 0;
+ u32 auxstatus;
int ret;
- if (size == 0)
- return 0;
-
- ret = tc_aux_wait_busy(tc, 100);
+ ret = tc_aux_wait_busy(tc);
if (ret)
- goto err;
+ return ret;
- if (request == DP_AUX_I2C_WRITE || request == DP_AUX_NATIVE_WRITE) {
- /* Store data */
- while (i < size) {
- if (request == DP_AUX_NATIVE_WRITE)
- tmp = tmp | (buf[i] << (8 * (i & 0x3)));
- else
- tmp = (tmp << 8) | buf[i];
- i++;
- if (((i % 4) == 0) || (i == size)) {
- tc_write(DP0_AUXWDATA((i - 1) >> 2), tmp);
- tmp = 0;
- }
+ switch (request) {
+ case DP_AUX_NATIVE_READ:
+ case DP_AUX_I2C_READ:
+ break;
+ case DP_AUX_NATIVE_WRITE:
+ case DP_AUX_I2C_WRITE:
+ if (size) {
+ ret = tc_aux_write_data(tc, msg->buffer, size);
+ if (ret < 0)
+ return ret;
}
- } else if (request != DP_AUX_I2C_READ &&
- request != DP_AUX_NATIVE_READ) {
+ break;
+ default:
return -EINVAL;
}
/* Store address */
- tc_write(DP0_AUXADDR, msg->address);
+ ret = regmap_write(tc->regmap, DP0_AUXADDR, msg->address);
+ if (ret)
+ return ret;
/* Start transfer */
- tc_write(DP0_AUXCFG0, ((size - 1) << 8) | request);
+ ret = regmap_write(tc->regmap, DP0_AUXCFG0, tc_auxcfg0(msg, size));
+ if (ret)
+ return ret;
- ret = tc_aux_wait_busy(tc, 100);
+ ret = tc_aux_wait_busy(tc);
if (ret)
- goto err;
+ return ret;
- ret = tc_aux_get_status(tc, &msg->reply);
+ ret = regmap_read(tc->regmap, DP0_AUXSTATUS, &auxstatus);
if (ret)
- goto err;
+ return ret;
- if (request == DP_AUX_I2C_READ || request == DP_AUX_NATIVE_READ) {
- /* Read data */
- while (i < size) {
- if ((i % 4) == 0)
- tc_read(DP0_AUXRDATA(i >> 2), &tmp);
- buf[i] = tmp & 0xff;
- tmp = tmp >> 8;
- i++;
- }
+ if (auxstatus & AUX_TIMEOUT)
+ return -ETIMEDOUT;
+ /*
+ * For some reason address-only DP_AUX_I2C_WRITE (MOT), still
+ * reports 1 byte transferred in its status. To deal we that
+ * we ignore aux_bytes field if we know that this was an
+ * address-only transfer
+ */
+ if (size)
+ size = FIELD_GET(AUX_BYTES, auxstatus);
+ msg->reply = FIELD_GET(AUX_STATUS, auxstatus);
+
+ switch (request) {
+ case DP_AUX_NATIVE_READ:
+ case DP_AUX_I2C_READ:
+ if (size)
+ return tc_aux_read_data(tc, msg->buffer, size);
+ break;
}
return size;
-err:
- return ret;
}
static const char * const training_pattern1_errors[] = {
@@ -404,17 +440,25 @@ static u32 tc_srcctrl(struct tc_data *tc)
reg |= DP0_SRCCTRL_SCRMBLDIS; /* Scrambler Disabled */
if (tc->link.spread)
reg |= DP0_SRCCTRL_SSCG; /* Spread Spectrum Enable */
- if (tc->link.base.num_lanes == 2)
+ if (tc->link.num_lanes == 2)
reg |= DP0_SRCCTRL_LANES_2; /* Two Main Channel Lanes */
- if (tc->link.base.rate != 162000)
+ if (tc->link.rate != 162000)
reg |= DP0_SRCCTRL_BW27; /* 2.7 Gbps link */
return reg;
}
-static void tc_wait_pll_lock(struct tc_data *tc)
+static int tc_pllupdate(struct tc_data *tc, unsigned int pllctrl)
{
+ int ret;
+
+ ret = regmap_write(tc->regmap, pllctrl, PLLUPDATE | PLLEN);
+ if (ret)
+ return ret;
+
/* Wait for PLL to lock: up to 2.09 ms, depending on refclk */
usleep_range(3000, 6000);
+
+ return 0;
}
static int tc_pxl_pll_en(struct tc_data *tc, u32 refclk, u32 pixelclock)
@@ -428,6 +472,7 @@ static int tc_pxl_pll_en(struct tc_data *tc, u32 refclk, u32 pixelclock)
int ext_div[] = {1, 2, 3, 5, 7};
int best_pixelclock = 0;
int vco_hi = 0;
+ u32 pxl_pllparam;
dev_dbg(tc->dev, "PLL: requested %d pixelclock, ref %d\n", pixelclock,
refclk);
@@ -497,24 +542,23 @@ static int tc_pxl_pll_en(struct tc_data *tc, u32 refclk, u32 pixelclock)
best_mul = 0;
/* Power up PLL and switch to bypass */
- tc_write(PXL_PLLCTRL, PLLBYP | PLLEN);
-
- tc_write(PXL_PLLPARAM,
- (vco_hi << 24) | /* For PLL VCO >= 300 MHz = 1 */
- (ext_div[best_pre] << 20) | /* External Pre-divider */
- (ext_div[best_post] << 16) | /* External Post-divider */
- IN_SEL_REFCLK | /* Use RefClk as PLL input */
- (best_div << 8) | /* Divider for PLL RefClk */
- (best_mul << 0)); /* Multiplier for PLL */
+ ret = regmap_write(tc->regmap, PXL_PLLCTRL, PLLBYP | PLLEN);
+ if (ret)
+ return ret;
- /* Force PLL parameter update and disable bypass */
- tc_write(PXL_PLLCTRL, PLLUPDATE | PLLEN);
+ pxl_pllparam = vco_hi << 24; /* For PLL VCO >= 300 MHz = 1 */
+ pxl_pllparam |= ext_div[best_pre] << 20; /* External Pre-divider */
+ pxl_pllparam |= ext_div[best_post] << 16; /* External Post-divider */
+ pxl_pllparam |= IN_SEL_REFCLK; /* Use RefClk as PLL input */
+ pxl_pllparam |= best_div << 8; /* Divider for PLL RefClk */
+ pxl_pllparam |= best_mul; /* Multiplier for PLL */
- tc_wait_pll_lock(tc);
+ ret = regmap_write(tc->regmap, PXL_PLLPARAM, pxl_pllparam);
+ if (ret)
+ return ret;
- return 0;
-err:
- return ret;
+ /* Force PLL parameter update and disable bypass */
+ return tc_pllupdate(tc, PXL_PLLCTRL);
}
static int tc_pxl_pll_dis(struct tc_data *tc)
@@ -525,7 +569,6 @@ static int tc_pxl_pll_dis(struct tc_data *tc)
static int tc_stream_clock_calc(struct tc_data *tc)
{
- int ret;
/*
* If the Stream clock and Link Symbol clock are
* asynchronous with each other, the value of M changes over
@@ -541,56 +584,63 @@ static int tc_stream_clock_calc(struct tc_data *tc)
* M/N = f_STRMCLK / f_LSCLK
*
*/
- tc_write(DP0_VIDMNGEN1, 32768);
-
- return 0;
-err:
- return ret;
+ return regmap_write(tc->regmap, DP0_VIDMNGEN1, 32768);
}
-static int tc_aux_link_setup(struct tc_data *tc)
+static int tc_set_syspllparam(struct tc_data *tc)
{
unsigned long rate;
- u32 value;
- int ret;
+ u32 pllparam = SYSCLK_SEL_LSCLK | LSCLK_DIV_2;
rate = clk_get_rate(tc->refclk);
switch (rate) {
case 38400000:
- value = REF_FREQ_38M4;
+ pllparam |= REF_FREQ_38M4;
break;
case 26000000:
- value = REF_FREQ_26M;
+ pllparam |= REF_FREQ_26M;
break;
case 19200000:
- value = REF_FREQ_19M2;
+ pllparam |= REF_FREQ_19M2;
break;
case 13000000:
- value = REF_FREQ_13M;
+ pllparam |= REF_FREQ_13M;
break;
default:
dev_err(tc->dev, "Invalid refclk rate: %lu Hz\n", rate);
return -EINVAL;
}
- /* Setup DP-PHY / PLL */
- value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2;
- tc_write(SYS_PLLPARAM, value);
+ return regmap_write(tc->regmap, SYS_PLLPARAM, pllparam);
+}
- tc_write(DP_PHY_CTRL, BGREN | PWR_SW_EN | PHY_A0_EN);
+static int tc_aux_link_setup(struct tc_data *tc)
+{
+ int ret;
+ u32 dp0_auxcfg1;
+ /* Setup DP-PHY / PLL */
+ ret = tc_set_syspllparam(tc);
+ if (ret)
+ goto err;
+
+ ret = regmap_write(tc->regmap, DP_PHY_CTRL,
+ BGREN | PWR_SW_EN | PHY_A0_EN);
+ if (ret)
+ goto err;
/*
* Initially PLLs are in bypass. Force PLL parameter update,
* disable PLL bypass, enable PLL
*/
- tc_write(DP0_PLLCTRL, PLLUPDATE | PLLEN);
- tc_wait_pll_lock(tc);
+ ret = tc_pllupdate(tc, DP0_PLLCTRL);
+ if (ret)
+ goto err;
- tc_write(DP1_PLLCTRL, PLLUPDATE | PLLEN);
- tc_wait_pll_lock(tc);
+ ret = tc_pllupdate(tc, DP1_PLLCTRL);
+ if (ret)
+ goto err;
- ret = tc_poll_timeout(tc->regmap, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 1,
- 1000);
+ ret = tc_poll_timeout(tc, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 1, 1000);
if (ret == -ETIMEDOUT) {
dev_err(tc->dev, "Timeout waiting for PHY to become ready");
return ret;
@@ -599,9 +649,13 @@ static int tc_aux_link_setup(struct tc_data *tc)
}
/* Setup AUX link */
- tc_write(DP0_AUXCFG1, AUX_RX_FILTER_EN |
- (0x06 << 8) | /* Aux Bit Period Calculator Threshold */
- (0x3f << 0)); /* Aux Response Timeout Timer */
+ dp0_auxcfg1 = AUX_RX_FILTER_EN;
+ dp0_auxcfg1 |= 0x06 << 8; /* Aux Bit Period Calculator Threshold */
+ dp0_auxcfg1 |= 0x3f << 0; /* Aux Response Timeout Timer */
+
+ ret = regmap_write(tc->regmap, DP0_AUXCFG1, dp0_auxcfg1);
+ if (ret)
+ goto err;
return 0;
err:
@@ -611,46 +665,57 @@ err:
static int tc_get_display_props(struct tc_data *tc)
{
+ u8 revision, num_lanes;
+ unsigned int rate;
int ret;
- /* temp buffer */
- u8 tmp[8];
+ u8 reg;
/* Read DP Rx Link Capability */
- ret = drm_dp_link_probe(&tc->aux, &tc->link.base);
+ ret = drm_dp_dpcd_read(&tc->aux, DP_DPCD_REV, tc->link.dpcd,
+ DP_RECEIVER_CAP_SIZE);
if (ret < 0)
goto err_dpcd_read;
- if (tc->link.base.rate != 162000 && tc->link.base.rate != 270000) {
+
+ revision = tc->link.dpcd[DP_DPCD_REV];
+ rate = drm_dp_max_link_rate(tc->link.dpcd);
+ num_lanes = drm_dp_max_lane_count(tc->link.dpcd);
+
+ if (rate != 162000 && rate != 270000) {
dev_dbg(tc->dev, "Falling to 2.7 Gbps rate\n");
- tc->link.base.rate = 270000;
+ rate = 270000;
}
- if (tc->link.base.num_lanes > 2) {
+ tc->link.rate = rate;
+
+ if (num_lanes > 2) {
dev_dbg(tc->dev, "Falling to 2 lanes\n");
- tc->link.base.num_lanes = 2;
+ num_lanes = 2;
}
- ret = drm_dp_dpcd_readb(&tc->aux, DP_MAX_DOWNSPREAD, tmp);
+ tc->link.num_lanes = num_lanes;
+
+ ret = drm_dp_dpcd_readb(&tc->aux, DP_MAX_DOWNSPREAD, &reg);
if (ret < 0)
goto err_dpcd_read;
- tc->link.spread = tmp[0] & DP_MAX_DOWNSPREAD_0_5;
+ tc->link.spread = reg & DP_MAX_DOWNSPREAD_0_5;
- ret = drm_dp_dpcd_readb(&tc->aux, DP_MAIN_LINK_CHANNEL_CODING, tmp);
+ ret = drm_dp_dpcd_readb(&tc->aux, DP_MAIN_LINK_CHANNEL_CODING, &reg);
if (ret < 0)
goto err_dpcd_read;
tc->link.scrambler_dis = false;
/* read assr */
- ret = drm_dp_dpcd_readb(&tc->aux, DP_EDP_CONFIGURATION_SET, tmp);
+ ret = drm_dp_dpcd_readb(&tc->aux, DP_EDP_CONFIGURATION_SET, &reg);
if (ret < 0)
goto err_dpcd_read;
- tc->link.assr = tmp[0] & DP_ALTERNATE_SCRAMBLER_RESET_ENABLE;
+ tc->link.assr = reg & DP_ALTERNATE_SCRAMBLER_RESET_ENABLE;
dev_dbg(tc->dev, "DPCD rev: %d.%d, rate: %s, lanes: %d, framing: %s\n",
- tc->link.base.revision >> 4, tc->link.base.revision & 0x0f,
- (tc->link.base.rate == 162000) ? "1.62Gbps" : "2.7Gbps",
- tc->link.base.num_lanes,
- (tc->link.base.capabilities & DP_LINK_CAP_ENHANCED_FRAMING) ?
- "enhanced" : "non-enhanced");
+ revision >> 4, revision & 0x0f,
+ (tc->link.rate == 162000) ? "1.62Gbps" : "2.7Gbps",
+ tc->link.num_lanes,
+ drm_dp_enhanced_frame_cap(tc->link.dpcd) ?
+ "enhanced" : "default");
dev_dbg(tc->dev, "Downspread: %s, scrambler: %s\n",
tc->link.spread ? "0.5%" : "0.0%",
tc->link.scrambler_dis ? "disabled" : "enabled");
@@ -677,6 +742,9 @@ static int tc_set_video_mode(struct tc_data *tc,
int upper_margin = mode->vtotal - mode->vsync_end;
int lower_margin = mode->vsync_start - mode->vdisplay;
int vsync_len = mode->vsync_end - mode->vsync_start;
+ u32 dp0_syncval;
+ u32 bits_per_pixel = 24;
+ u32 in_bw, out_bw;
/*
* Recommended maximum number of symbols transferred in a transfer unit:
@@ -684,7 +752,10 @@ static int tc_set_video_mode(struct tc_data *tc,
* (output active video bandwidth in bytes))
* Must be less than tu_size.
*/
- max_tu_symbol = TU_SIZE_RECOMMENDED - 1;
+
+ in_bw = mode->clock * bits_per_pixel / 8;
+ out_bw = tc->link.num_lanes * tc->link.rate;
+ max_tu_symbol = DIV_ROUND_UP(in_bw * TU_SIZE_RECOMMENDED, out_bw);
dev_dbg(tc->dev, "set mode %dx%d\n",
mode->hdisplay, mode->vdisplay);
@@ -701,156 +772,193 @@ static int tc_set_video_mode(struct tc_data *tc,
* assume we do not need any delay when DPI is a source of
* sync signals
*/
- tc_write(VPCTRL0, (0 << 20) /* VSDELAY */ |
- OPXLFMT_RGB888 | FRMSYNC_DISABLED | MSF_DISABLED);
- tc_write(HTIM01, (ALIGN(left_margin, 2) << 16) | /* H back porch */
- (ALIGN(hsync_len, 2) << 0)); /* Hsync */
- tc_write(HTIM02, (ALIGN(right_margin, 2) << 16) | /* H front porch */
- (ALIGN(mode->hdisplay, 2) << 0)); /* width */
- tc_write(VTIM01, (upper_margin << 16) | /* V back porch */
- (vsync_len << 0)); /* Vsync */
- tc_write(VTIM02, (lower_margin << 16) | /* V front porch */
- (mode->vdisplay << 0)); /* height */
- tc_write(VFUEN0, VFUEN); /* update settings */
+ ret = regmap_write(tc->regmap, VPCTRL0,
+ FIELD_PREP(VSDELAY, 0) |
+ OPXLFMT_RGB888 | FRMSYNC_DISABLED | MSF_DISABLED);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(tc->regmap, HTIM01,
+ FIELD_PREP(HBPR, ALIGN(left_margin, 2)) |
+ FIELD_PREP(HPW, ALIGN(hsync_len, 2)));
+ if (ret)
+ return ret;
+
+ ret = regmap_write(tc->regmap, HTIM02,
+ FIELD_PREP(HDISPR, ALIGN(mode->hdisplay, 2)) |
+ FIELD_PREP(HFPR, ALIGN(right_margin, 2)));
+ if (ret)
+ return ret;
+
+ ret = regmap_write(tc->regmap, VTIM01,
+ FIELD_PREP(VBPR, upper_margin) |
+ FIELD_PREP(VSPR, vsync_len));
+ if (ret)
+ return ret;
+
+ ret = regmap_write(tc->regmap, VTIM02,
+ FIELD_PREP(VFPR, lower_margin) |
+ FIELD_PREP(VDISPR, mode->vdisplay));
+ if (ret)
+ return ret;
+
+ ret = regmap_write(tc->regmap, VFUEN0, VFUEN); /* update settings */
+ if (ret)
+ return ret;
/* Test pattern settings */
- tc_write(TSTCTL,
- (120 << 24) | /* Red Color component value */
- (20 << 16) | /* Green Color component value */
- (99 << 8) | /* Blue Color component value */
- (1 << 4) | /* Enable I2C Filter */
- (2 << 0) | /* Color bar Mode */
- 0);
+ ret = regmap_write(tc->regmap, TSTCTL,
+ FIELD_PREP(COLOR_R, 120) |
+ FIELD_PREP(COLOR_G, 20) |
+ FIELD_PREP(COLOR_B, 99) |
+ ENI2CFILTER |
+ FIELD_PREP(COLOR_BAR_MODE, COLOR_BAR_MODE_BARS));
+ if (ret)
+ return ret;
/* DP Main Stream Attributes */
vid_sync_dly = hsync_len + left_margin + mode->hdisplay;
- tc_write(DP0_VIDSYNCDELAY,
- (max_tu_symbol << 16) | /* thresh_dly */
- (vid_sync_dly << 0));
+ ret = regmap_write(tc->regmap, DP0_VIDSYNCDELAY,
+ FIELD_PREP(THRESH_DLY, max_tu_symbol) |
+ FIELD_PREP(VID_SYNC_DLY, vid_sync_dly));
- tc_write(DP0_TOTALVAL, (mode->vtotal << 16) | (mode->htotal));
+ ret = regmap_write(tc->regmap, DP0_TOTALVAL,
+ FIELD_PREP(H_TOTAL, mode->htotal) |
+ FIELD_PREP(V_TOTAL, mode->vtotal));
+ if (ret)
+ return ret;
+
+ ret = regmap_write(tc->regmap, DP0_STARTVAL,
+ FIELD_PREP(H_START, left_margin + hsync_len) |
+ FIELD_PREP(V_START, upper_margin + vsync_len));
+ if (ret)
+ return ret;
+
+ ret = regmap_write(tc->regmap, DP0_ACTIVEVAL,
+ FIELD_PREP(V_ACT, mode->vdisplay) |
+ FIELD_PREP(H_ACT, mode->hdisplay));
+ if (ret)
+ return ret;
+
+ dp0_syncval = FIELD_PREP(VS_WIDTH, vsync_len) |
+ FIELD_PREP(HS_WIDTH, hsync_len);
- tc_write(DP0_STARTVAL,
- ((upper_margin + vsync_len) << 16) |
- ((left_margin + hsync_len) << 0));
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ dp0_syncval |= SYNCVAL_VS_POL_ACTIVE_LOW;
- tc_write(DP0_ACTIVEVAL, (mode->vdisplay << 16) | (mode->hdisplay));
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ dp0_syncval |= SYNCVAL_HS_POL_ACTIVE_LOW;
- tc_write(DP0_SYNCVAL, (vsync_len << 16) | (hsync_len << 0) |
- ((mode->flags & DRM_MODE_FLAG_NHSYNC) ? SYNCVAL_HS_POL_ACTIVE_LOW : 0) |
- ((mode->flags & DRM_MODE_FLAG_NVSYNC) ? SYNCVAL_VS_POL_ACTIVE_LOW : 0));
+ ret = regmap_write(tc->regmap, DP0_SYNCVAL, dp0_syncval);
+ if (ret)
+ return ret;
- tc_write(DPIPXLFMT, VS_POL_ACTIVE_LOW | HS_POL_ACTIVE_LOW |
- DE_POL_ACTIVE_HIGH | SUB_CFG_TYPE_CONFIG1 | DPI_BPP_RGB888);
+ ret = regmap_write(tc->regmap, DPIPXLFMT,
+ VS_POL_ACTIVE_LOW | HS_POL_ACTIVE_LOW |
+ DE_POL_ACTIVE_HIGH | SUB_CFG_TYPE_CONFIG1 |
+ DPI_BPP_RGB888);
+ if (ret)
+ return ret;
- tc_write(DP0_MISC, (max_tu_symbol << 23) | (TU_SIZE_RECOMMENDED << 16) |
+ ret = regmap_write(tc->regmap, DP0_MISC,
+ FIELD_PREP(MAX_TU_SYMBOL, max_tu_symbol) |
+ FIELD_PREP(TU_SIZE, TU_SIZE_RECOMMENDED) |
BPC_8);
+ if (ret)
+ return ret;
return 0;
-err:
- return ret;
}
static int tc_wait_link_training(struct tc_data *tc)
{
- u32 timeout = 1000;
u32 value;
int ret;
- do {
- udelay(1);
- tc_read(DP0_LTSTAT, &value);
- } while ((!(value & LT_LOOPDONE)) && (--timeout));
-
- if (timeout == 0) {
+ ret = tc_poll_timeout(tc, DP0_LTSTAT, LT_LOOPDONE,
+ LT_LOOPDONE, 1, 1000);
+ if (ret) {
dev_err(tc->dev, "Link training timeout waiting for LT_LOOPDONE!\n");
- return -ETIMEDOUT;
+ return ret;
}
- return (value >> 8) & 0x7;
+ ret = regmap_read(tc->regmap, DP0_LTSTAT, &value);
+ if (ret)
+ return ret;
-err:
- return ret;
+ return (value >> 8) & 0x7;
}
static int tc_main_link_enable(struct tc_data *tc)
{
struct drm_dp_aux *aux = &tc->aux;
struct device *dev = tc->dev;
- unsigned int rate;
u32 dp_phy_ctrl;
- int timeout;
u32 value;
int ret;
- u8 tmp[8];
+ u8 tmp[DP_LINK_STATUS_SIZE];
dev_dbg(tc->dev, "link enable\n");
- tc_read(DP0CTL, &value);
- if (WARN_ON(value & DP_EN))
- tc_write(DP0CTL, 0);
+ ret = regmap_read(tc->regmap, DP0CTL, &value);
+ if (ret)
+ return ret;
- tc_write(DP0_SRCCTRL, tc_srcctrl(tc));
+ if (WARN_ON(value & DP_EN)) {
+ ret = regmap_write(tc->regmap, DP0CTL, 0);
+ if (ret)
+ return ret;
+ }
+
+ ret = regmap_write(tc->regmap, DP0_SRCCTRL, tc_srcctrl(tc));
+ if (ret)
+ return ret;
/* SSCG and BW27 on DP1 must be set to the same as on DP0 */
- tc_write(DP1_SRCCTRL,
+ ret = regmap_write(tc->regmap, DP1_SRCCTRL,
(tc->link.spread ? DP0_SRCCTRL_SSCG : 0) |
- ((tc->link.base.rate != 162000) ? DP0_SRCCTRL_BW27 : 0));
+ ((tc->link.rate != 162000) ? DP0_SRCCTRL_BW27 : 0));
+ if (ret)
+ return ret;
- rate = clk_get_rate(tc->refclk);
- switch (rate) {
- case 38400000:
- value = REF_FREQ_38M4;
- break;
- case 26000000:
- value = REF_FREQ_26M;
- break;
- case 19200000:
- value = REF_FREQ_19M2;
- break;
- case 13000000:
- value = REF_FREQ_13M;
- break;
- default:
- return -EINVAL;
- }
- value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2;
- tc_write(SYS_PLLPARAM, value);
+ ret = tc_set_syspllparam(tc);
+ if (ret)
+ return ret;
/* Setup Main Link */
dp_phy_ctrl = BGREN | PWR_SW_EN | PHY_A0_EN | PHY_M0_EN;
- if (tc->link.base.num_lanes == 2)
+ if (tc->link.num_lanes == 2)
dp_phy_ctrl |= PHY_2LANE;
- tc_write(DP_PHY_CTRL, dp_phy_ctrl);
+
+ ret = regmap_write(tc->regmap, DP_PHY_CTRL, dp_phy_ctrl);
+ if (ret)
+ return ret;
/* PLL setup */
- tc_write(DP0_PLLCTRL, PLLUPDATE | PLLEN);
- tc_wait_pll_lock(tc);
+ ret = tc_pllupdate(tc, DP0_PLLCTRL);
+ if (ret)
+ return ret;
- tc_write(DP1_PLLCTRL, PLLUPDATE | PLLEN);
- tc_wait_pll_lock(tc);
+ ret = tc_pllupdate(tc, DP1_PLLCTRL);
+ if (ret)
+ return ret;
/* Reset/Enable Main Links */
dp_phy_ctrl |= DP_PHY_RST | PHY_M1_RST | PHY_M0_RST;
- tc_write(DP_PHY_CTRL, dp_phy_ctrl);
+ ret = regmap_write(tc->regmap, DP_PHY_CTRL, dp_phy_ctrl);
usleep_range(100, 200);
dp_phy_ctrl &= ~(DP_PHY_RST | PHY_M1_RST | PHY_M0_RST);
- tc_write(DP_PHY_CTRL, dp_phy_ctrl);
-
- timeout = 1000;
- do {
- tc_read(DP_PHY_CTRL, &value);
- udelay(1);
- } while ((!(value & PHY_RDY)) && (--timeout));
+ ret = regmap_write(tc->regmap, DP_PHY_CTRL, dp_phy_ctrl);
- if (timeout == 0) {
+ ret = tc_poll_timeout(tc, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 1, 1000);
+ if (ret) {
dev_err(dev, "timeout waiting for phy become ready");
- return -ETIMEDOUT;
+ return ret;
}
/* Set misc: 8 bits per color */
ret = regmap_update_bits(tc->regmap, DP0_MISC, BPC_8, BPC_8);
if (ret)
- goto err;
+ return ret;
/*
* ASSR mode
@@ -881,7 +989,13 @@ static int tc_main_link_enable(struct tc_data *tc)
}
/* Setup Link & DPRx Config for Training */
- ret = drm_dp_link_configure(aux, &tc->link.base);
+ tmp[0] = drm_dp_link_rate_to_bw_code(tc->link.rate);
+ tmp[1] = tc->link.num_lanes;
+
+ if (drm_dp_enhanced_frame_cap(tc->link.dpcd))
+ tmp[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+
+ ret = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, tmp, 2);
if (ret < 0)
goto err_dpcd_write;
@@ -903,53 +1017,70 @@ static int tc_main_link_enable(struct tc_data *tc)
/* Clock-Recovery */
/* Set DPCD 0x102 for Training Pattern 1 */
- tc_write(DP0_SNKLTCTRL, DP_LINK_SCRAMBLING_DISABLE |
- DP_TRAINING_PATTERN_1);
+ ret = regmap_write(tc->regmap, DP0_SNKLTCTRL,
+ DP_LINK_SCRAMBLING_DISABLE |
+ DP_TRAINING_PATTERN_1);
+ if (ret)
+ return ret;
- tc_write(DP0_LTLOOPCTRL,
- (15 << 28) | /* Defer Iteration Count */
- (15 << 24) | /* Loop Iteration Count */
- (0xd << 0)); /* Loop Timer Delay */
+ ret = regmap_write(tc->regmap, DP0_LTLOOPCTRL,
+ (15 << 28) | /* Defer Iteration Count */
+ (15 << 24) | /* Loop Iteration Count */
+ (0xd << 0)); /* Loop Timer Delay */
+ if (ret)
+ return ret;
- tc_write(DP0_SRCCTRL, tc_srcctrl(tc) | DP0_SRCCTRL_SCRMBLDIS |
- DP0_SRCCTRL_AUTOCORRECT | DP0_SRCCTRL_TP1);
+ ret = regmap_write(tc->regmap, DP0_SRCCTRL,
+ tc_srcctrl(tc) | DP0_SRCCTRL_SCRMBLDIS |
+ DP0_SRCCTRL_AUTOCORRECT |
+ DP0_SRCCTRL_TP1);
+ if (ret)
+ return ret;
/* Enable DP0 to start Link Training */
- tc_write(DP0CTL,
- ((tc->link.base.capabilities & DP_LINK_CAP_ENHANCED_FRAMING) ? EF_EN : 0) |
- DP_EN);
+ ret = regmap_write(tc->regmap, DP0CTL,
+ (drm_dp_enhanced_frame_cap(tc->link.dpcd) ?
+ EF_EN : 0) | DP_EN);
+ if (ret)
+ return ret;
/* wait */
+
ret = tc_wait_link_training(tc);
if (ret < 0)
- goto err;
+ return ret;
if (ret) {
dev_err(tc->dev, "Link training phase 1 failed: %s\n",
training_pattern1_errors[ret]);
- ret = -ENODEV;
- goto err;
+ return -ENODEV;
}
/* Channel Equalization */
/* Set DPCD 0x102 for Training Pattern 2 */
- tc_write(DP0_SNKLTCTRL, DP_LINK_SCRAMBLING_DISABLE |
- DP_TRAINING_PATTERN_2);
+ ret = regmap_write(tc->regmap, DP0_SNKLTCTRL,
+ DP_LINK_SCRAMBLING_DISABLE |
+ DP_TRAINING_PATTERN_2);
+ if (ret)
+ return ret;
- tc_write(DP0_SRCCTRL, tc_srcctrl(tc) | DP0_SRCCTRL_SCRMBLDIS |
- DP0_SRCCTRL_AUTOCORRECT | DP0_SRCCTRL_TP2);
+ ret = regmap_write(tc->regmap, DP0_SRCCTRL,
+ tc_srcctrl(tc) | DP0_SRCCTRL_SCRMBLDIS |
+ DP0_SRCCTRL_AUTOCORRECT |
+ DP0_SRCCTRL_TP2);
+ if (ret)
+ return ret;
/* wait */
ret = tc_wait_link_training(tc);
if (ret < 0)
- goto err;
+ return ret;
if (ret) {
dev_err(tc->dev, "Link training phase 2 failed: %s\n",
training_pattern2_errors[ret]);
- ret = -ENODEV;
- goto err;
+ return -ENODEV;
}
/*
@@ -962,7 +1093,10 @@ static int tc_main_link_enable(struct tc_data *tc)
*/
/* Clear Training Pattern, set AutoCorrect Mode = 1 */
- tc_write(DP0_SRCCTRL, tc_srcctrl(tc) | DP0_SRCCTRL_AUTOCORRECT);
+ ret = regmap_write(tc->regmap, DP0_SRCCTRL, tc_srcctrl(tc) |
+ DP0_SRCCTRL_AUTOCORRECT);
+ if (ret)
+ return ret;
/* Clear DPCD 0x102 */
/* Note: Can Not use DP0_SNKLTCTRL (0x06E4) short cut */
@@ -985,7 +1119,7 @@ static int tc_main_link_enable(struct tc_data *tc)
ret = -ENODEV;
}
- if (tc->link.base.num_lanes == 2) {
+ if (tc->link.num_lanes == 2) {
value = (tmp[0] >> 4) & DP_CHANNEL_EQ_BITS;
if (value != DP_CHANNEL_EQ_BITS) {
@@ -1006,7 +1140,7 @@ static int tc_main_link_enable(struct tc_data *tc)
dev_err(dev, "0x0205 SINK_STATUS: 0x%02x\n", tmp[3]);
dev_err(dev, "0x0206 ADJUST_REQUEST_LANE0_1: 0x%02x\n", tmp[4]);
dev_err(dev, "0x0207 ADJUST_REQUEST_LANE2_3: 0x%02x\n", tmp[5]);
- goto err;
+ return ret;
}
return 0;
@@ -1015,7 +1149,6 @@ err_dpcd_read:
return ret;
err_dpcd_write:
dev_err(tc->dev, "Failed to write DPCD: %d\n", ret);
-err:
return ret;
}
@@ -1025,12 +1158,11 @@ static int tc_main_link_disable(struct tc_data *tc)
dev_dbg(tc->dev, "link disable\n");
- tc_write(DP0_SRCCTRL, 0);
- tc_write(DP0CTL, 0);
+ ret = regmap_write(tc->regmap, DP0_SRCCTRL, 0);
+ if (ret)
+ return ret;
- return 0;
-err:
- return ret;
+ return regmap_write(tc->regmap, DP0CTL, 0);
}
static int tc_stream_enable(struct tc_data *tc)
@@ -1045,7 +1177,7 @@ static int tc_stream_enable(struct tc_data *tc)
ret = tc_pxl_pll_en(tc, clk_get_rate(tc->refclk),
1000 * tc->mode.clock);
if (ret)
- goto err;
+ return ret;
}
ret = tc_set_video_mode(tc, &tc->mode);
@@ -1058,9 +1190,11 @@ static int tc_stream_enable(struct tc_data *tc)
return ret;
value = VID_MN_GEN | DP_EN;
- if (tc->link.base.capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
+ if (drm_dp_enhanced_frame_cap(tc->link.dpcd))
value |= EF_EN;
- tc_write(DP0CTL, value);
+ ret = regmap_write(tc->regmap, DP0CTL, value);
+ if (ret)
+ return ret;
/*
* VID_EN assertion should be delayed by at least N * LSCLK
* cycles from the time VID_MN_GEN is enabled in order to
@@ -1070,36 +1204,35 @@ static int tc_stream_enable(struct tc_data *tc)
*/
usleep_range(500, 1000);
value |= VID_EN;
- tc_write(DP0CTL, value);
+ ret = regmap_write(tc->regmap, DP0CTL, value);
+ if (ret)
+ return ret;
/* Set input interface */
value = DP0_AUDSRC_NO_INPUT;
if (tc_test_pattern)
value |= DP0_VIDSRC_COLOR_BAR;
else
value |= DP0_VIDSRC_DPI_RX;
- tc_write(SYSCTRL, value);
+ ret = regmap_write(tc->regmap, SYSCTRL, value);
+ if (ret)
+ return ret;
return 0;
-err:
- return ret;
}
static int tc_stream_disable(struct tc_data *tc)
{
int ret;
- u32 val;
dev_dbg(tc->dev, "disable video stream\n");
- tc_read(DP0CTL, &val);
- val &= ~VID_EN;
- tc_write(DP0CTL, val);
+ ret = regmap_update_bits(tc->regmap, DP0CTL, VID_EN, 0);
+ if (ret)
+ return ret;
tc_pxl_pll_dis(tc);
return 0;
-err:
- return ret;
}
static void tc_bridge_pre_enable(struct drm_bridge *bridge)
@@ -1183,7 +1316,7 @@ static enum drm_mode_status tc_mode_valid(struct drm_bridge *bridge,
return MODE_CLOCK_HIGH;
req = mode->clock * bits_per_pixel / 8;
- avail = tc->link.base.num_lanes * tc->link.base.rate;
+ avail = tc->link.num_lanes * tc->link.rate;
if (req > avail)
return MODE_BAD;
@@ -1204,7 +1337,7 @@ static int tc_connector_get_modes(struct drm_connector *connector)
{
struct tc_data *tc = connector_to_tc(connector);
struct edid *edid;
- unsigned int count;
+ int count;
int ret;
ret = tc_get_display_props(tc);
@@ -1213,11 +1346,9 @@ static int tc_connector_get_modes(struct drm_connector *connector)
return 0;
}
- if (tc->panel && tc->panel->funcs && tc->panel->funcs->get_modes) {
- count = tc->panel->funcs->get_modes(tc->panel);
- if (count > 0)
- return count;
- }
+ count = drm_panel_get_modes(tc->panel, connector);
+ if (count > 0)
+ return count;
edid = drm_get_edid(connector, &tc->aux.ddc);
@@ -1251,7 +1382,9 @@ static enum drm_connector_status tc_connector_detect(struct drm_connector *conne
return connector_status_unknown;
}
- tc_read(GPIOI, &val);
+ ret = regmap_read(tc->regmap, GPIOI, &val);
+ if (ret)
+ return connector_status_unknown;
conn = val & BIT(tc->hpd_pin);
@@ -1259,9 +1392,6 @@ static enum drm_connector_status tc_connector_detect(struct drm_connector *conne
return connector_status_connected;
else
return connector_status_disconnected;
-
-err:
- return connector_status_unknown;
}
static const struct drm_connector_funcs tc_connector_funcs = {
@@ -1497,6 +1627,22 @@ static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
tc->assr = (tc->rev == 0x6601); /* Enable ASSR for eDP panels */
+ if (!tc->reset_gpio) {
+ /*
+ * If the reset pin isn't present, do a software reset. It isn't
+ * as thorough as the hardware reset, as we can't reset the I2C
+ * communication block for obvious reasons, but it's getting the
+ * chip into a defined state.
+ */
+ regmap_update_bits(tc->regmap, SYSRSTENB,
+ ENBLCD0 | ENBBM | ENBDSIRX | ENBREG | ENBHDCP,
+ 0);
+ regmap_update_bits(tc->regmap, SYSRSTENB,
+ ENBLCD0 | ENBBM | ENBDSIRX | ENBREG | ENBHDCP,
+ ENBLCD0 | ENBBM | ENBDSIRX | ENBREG | ENBHDCP);
+ usleep_range(5000, 10000);
+ }
+
if (tc->hpd_pin >= 0) {
u32 lcnt_reg = tc->hpd_pin == 0 ? INT_GP0_LCNT : INT_GP1_LCNT;
u32 h_lc = INT_GPIO_H(tc->hpd_pin) | INT_GPIO_LC(tc->hpd_pin);
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index b77a52d05061..9a2dd986afa5 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -1,9 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * datasheet: http://www.ti.com/lit/ds/symlink/sn65dsi86.pdf
*/
#include <linux/clk.h>
+#include <linux/debugfs.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/iopoll.h>
@@ -15,6 +17,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
@@ -90,6 +93,7 @@ struct ti_sn_bridge {
struct drm_dp_aux aux;
struct drm_bridge bridge;
struct drm_connector connector;
+ struct dentry *debugfs;
struct device_node *host_node;
struct mipi_dsi_device *dsi;
struct clk *refclk;
@@ -155,6 +159,42 @@ static const struct dev_pm_ops ti_sn_bridge_pm_ops = {
SET_RUNTIME_PM_OPS(ti_sn_bridge_suspend, ti_sn_bridge_resume, NULL)
};
+static int status_show(struct seq_file *s, void *data)
+{
+ struct ti_sn_bridge *pdata = s->private;
+ unsigned int reg, val;
+
+ seq_puts(s, "STATUS REGISTERS:\n");
+
+ pm_runtime_get_sync(pdata->dev);
+
+ /* IRQ Status Registers, see Table 31 in datasheet */
+ for (reg = 0xf0; reg <= 0xf8; reg++) {
+ regmap_read(pdata->regmap, reg, &val);
+ seq_printf(s, "[0x%02x] = 0x%08x\n", reg, val);
+ }
+
+ pm_runtime_put(pdata->dev);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(status);
+
+static void ti_sn_debugfs_init(struct ti_sn_bridge *pdata)
+{
+ pdata->debugfs = debugfs_create_dir(dev_name(pdata->dev), NULL);
+
+ debugfs_create_file("status", 0600, pdata->debugfs, pdata,
+ &status_fops);
+}
+
+static void ti_sn_debugfs_remove(struct ti_sn_bridge *pdata)
+{
+ debugfs_remove_recursive(pdata->debugfs);
+ pdata->debugfs = NULL;
+}
+
/* Connector funcs */
static struct ti_sn_bridge *
connector_to_ti_sn_bridge(struct drm_connector *connector)
@@ -166,7 +206,7 @@ static int ti_sn_bridge_connector_get_modes(struct drm_connector *connector)
{
struct ti_sn_bridge *pdata = connector_to_ti_sn_bridge(connector);
- return drm_panel_get_modes(pdata->panel);
+ return drm_panel_get_modes(pdata->panel, connector);
}
static enum drm_mode_status
@@ -275,8 +315,7 @@ static int ti_sn_bridge_attach(struct drm_bridge *bridge)
/* TODO: setting to 4 lanes always for now */
dsi->lanes = 4;
dsi->format = MIPI_DSI_FMT_RGB888;
- dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
- MIPI_DSI_MODE_EOT_PACKET | MIPI_DSI_MODE_VIDEO_HSE;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO;
/* check if continuous dsi clock is required or not */
pm_runtime_get_sync(pdata->dev);
@@ -731,6 +770,8 @@ static int ti_sn_bridge_probe(struct i2c_client *client,
drm_bridge_add(&pdata->bridge);
+ ti_sn_debugfs_init(pdata);
+
return 0;
}
@@ -741,6 +782,8 @@ static int ti_sn_bridge_remove(struct i2c_client *client)
if (!pdata)
return -EINVAL;
+ ti_sn_debugfs_remove(pdata);
+
of_node_put(pdata->host_node);
pm_runtime_disable(pdata->dev);
diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c
index dbf35c7bc85e..6f6d6d1e60ae 100644
--- a/drivers/gpu/drm/bridge/ti-tfp410.c
+++ b/drivers/gpu/drm/bridge/ti-tfp410.c
@@ -14,6 +14,7 @@
#include <linux/platform_device.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
@@ -134,8 +135,10 @@ static int tfp410_attach(struct drm_bridge *bridge)
drm_connector_helper_add(&dvi->connector,
&tfp410_con_helper_funcs);
- ret = drm_connector_init(bridge->dev, &dvi->connector,
- &tfp410_con_funcs, dvi->connector_type);
+ ret = drm_connector_init_with_ddc(bridge->dev, &dvi->connector,
+ &tfp410_con_funcs,
+ dvi->connector_type,
+ dvi->ddc);
if (ret) {
dev_err(dvi->dev, "drm_connector_init() failed: %d\n", ret);
return ret;
@@ -282,8 +285,8 @@ static int tfp410_get_connector_properties(struct tfp410 *dvi)
else
dvi->connector_type = DRM_MODE_CONNECTOR_DVID;
- dvi->hpd = fwnode_get_named_gpiod(&connector_node->fwnode,
- "hpd-gpios", 0, GPIOD_IN, "hpd");
+ dvi->hpd = fwnode_gpiod_get_index(&connector_node->fwnode,
+ "hpd", 0, GPIOD_IN, "hpd");
if (IS_ERR(dvi->hpd)) {
ret = PTR_ERR(dvi->hpd);
dvi->hpd = NULL;
diff --git a/drivers/gpu/drm/cirrus/cirrus.c b/drivers/gpu/drm/cirrus/cirrus.c
index be4ea370ba31..248c9f765c45 100644
--- a/drivers/gpu/drm/cirrus/cirrus.c
+++ b/drivers/gpu/drm/cirrus/cirrus.c
@@ -390,7 +390,7 @@ static int cirrus_conn_init(struct cirrus_device *cirrus)
/* ------------------------------------------------------------------ */
/* cirrus (simple) display pipe */
-static enum drm_mode_status cirrus_pipe_mode_valid(struct drm_crtc *crtc,
+static enum drm_mode_status cirrus_pipe_mode_valid(struct drm_simple_display_pipe *pipe,
const struct drm_display_mode *mode)
{
if (cirrus_check_size(mode->hdisplay, mode->vdisplay, NULL) < 0)
@@ -510,10 +510,10 @@ static void cirrus_mode_config_init(struct cirrus_device *cirrus)
/* ------------------------------------------------------------------ */
-DEFINE_DRM_GEM_SHMEM_FOPS(cirrus_fops);
+DEFINE_DRM_GEM_FOPS(cirrus_fops);
static struct drm_driver cirrus_driver = {
- .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC | DRIVER_PRIME,
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
@@ -532,7 +532,7 @@ static int cirrus_pci_probe(struct pci_dev *pdev,
struct cirrus_device *cirrus;
int ret;
- ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "cirrusdrmfb");
+ ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "cirrusdrmfb");
if (ret)
return ret;
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
deleted file mode 100644
index 1f73916e528e..000000000000
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ /dev/null
@@ -1,247 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2012 Red Hat
- *
- * Authors: Matthew Garrett
- * Dave Airlie
- */
-#ifndef __CIRRUS_DRV_H__
-#define __CIRRUS_DRV_H__
-
-#include <video/vga.h>
-
-#include <drm/drm_encoder.h>
-#include <drm/drm_fb_helper.h>
-
-#include <drm/ttm/ttm_bo_api.h>
-#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_placement.h>
-#include <drm/ttm/ttm_memory.h>
-#include <drm/ttm/ttm_module.h>
-
-#include <drm/drm_gem.h>
-
-#define DRIVER_AUTHOR "Matthew Garrett"
-
-#define DRIVER_NAME "cirrus"
-#define DRIVER_DESC "qemu Cirrus emulation"
-#define DRIVER_DATE "20110418"
-
-#define DRIVER_MAJOR 1
-#define DRIVER_MINOR 0
-#define DRIVER_PATCHLEVEL 0
-
-#define CIRRUSFB_CONN_LIMIT 1
-
-#define RREG8(reg) ioread8(((void __iomem *)cdev->rmmio) + (reg))
-#define WREG8(reg, v) iowrite8(v, ((void __iomem *)cdev->rmmio) + (reg))
-#define RREG32(reg) ioread32(((void __iomem *)cdev->rmmio) + (reg))
-#define WREG32(reg, v) iowrite32(v, ((void __iomem *)cdev->rmmio) + (reg))
-
-#define SEQ_INDEX 4
-#define SEQ_DATA 5
-
-#define WREG_SEQ(reg, v) \
- do { \
- WREG8(SEQ_INDEX, reg); \
- WREG8(SEQ_DATA, v); \
- } while (0) \
-
-#define CRT_INDEX 0x14
-#define CRT_DATA 0x15
-
-#define WREG_CRT(reg, v) \
- do { \
- WREG8(CRT_INDEX, reg); \
- WREG8(CRT_DATA, v); \
- } while (0) \
-
-#define GFX_INDEX 0xe
-#define GFX_DATA 0xf
-
-#define WREG_GFX(reg, v) \
- do { \
- WREG8(GFX_INDEX, reg); \
- WREG8(GFX_DATA, v); \
- } while (0) \
-
-/*
- * Cirrus has a "hidden" DAC register that can be accessed by writing to
- * the pixel mask register to reset the state, then reading from the register
- * four times. The next write will then pass to the DAC
- */
-#define VGA_DAC_MASK 0x6
-
-#define WREG_HDR(v) \
- do { \
- RREG8(VGA_DAC_MASK); \
- RREG8(VGA_DAC_MASK); \
- RREG8(VGA_DAC_MASK); \
- RREG8(VGA_DAC_MASK); \
- WREG8(VGA_DAC_MASK, v); \
- } while (0) \
-
-
-#define CIRRUS_MAX_FB_HEIGHT 4096
-#define CIRRUS_MAX_FB_WIDTH 4096
-
-#define CIRRUS_DPMS_CLEARED (-1)
-
-#define to_cirrus_crtc(x) container_of(x, struct cirrus_crtc, base)
-#define to_cirrus_encoder(x) container_of(x, struct cirrus_encoder, base)
-
-struct cirrus_crtc {
- struct drm_crtc base;
- int last_dpms;
- bool enabled;
-};
-
-struct cirrus_fbdev;
-struct cirrus_mode_info {
- struct cirrus_crtc *crtc;
- /* pointer to fbdev info structure */
- struct cirrus_fbdev *gfbdev;
-};
-
-struct cirrus_encoder {
- struct drm_encoder base;
- int last_dpms;
-};
-
-struct cirrus_connector {
- struct drm_connector base;
-};
-
-struct cirrus_mc {
- resource_size_t vram_size;
- resource_size_t vram_base;
-};
-
-struct cirrus_device {
- struct drm_device *dev;
- unsigned long flags;
-
- resource_size_t rmmio_base;
- resource_size_t rmmio_size;
- void __iomem *rmmio;
-
- struct cirrus_mc mc;
- struct cirrus_mode_info mode_info;
-
- int num_crtc;
- int fb_mtrr;
-
- struct {
- struct ttm_bo_device bdev;
- } ttm;
- bool mm_inited;
-};
-
-
-struct cirrus_fbdev {
- struct drm_fb_helper helper; /* must be first */
- struct drm_framebuffer *gfb;
- void *sysram;
- int size;
- int x1, y1, x2, y2; /* dirty rect */
- spinlock_t dirty_lock;
-};
-
-struct cirrus_bo {
- struct ttm_buffer_object bo;
- struct ttm_placement placement;
- struct ttm_bo_kmap_obj kmap;
- struct drm_gem_object gem;
- struct ttm_place placements[3];
- int pin_count;
-};
-#define gem_to_cirrus_bo(gobj) container_of((gobj), struct cirrus_bo, gem)
-
-static inline struct cirrus_bo *
-cirrus_bo(struct ttm_buffer_object *bo)
-{
- return container_of(bo, struct cirrus_bo, bo);
-}
-
-
-#define to_cirrus_obj(x) container_of(x, struct cirrus_gem_object, base)
-
- /* cirrus_main.c */
-int cirrus_device_init(struct cirrus_device *cdev,
- struct drm_device *ddev,
- struct pci_dev *pdev,
- uint32_t flags);
-void cirrus_device_fini(struct cirrus_device *cdev);
-void cirrus_gem_free_object(struct drm_gem_object *obj);
-int cirrus_dumb_mmap_offset(struct drm_file *file,
- struct drm_device *dev,
- uint32_t handle,
- uint64_t *offset);
-int cirrus_gem_create(struct drm_device *dev,
- u32 size, bool iskernel,
- struct drm_gem_object **obj);
-int cirrus_dumb_create(struct drm_file *file,
- struct drm_device *dev,
- struct drm_mode_create_dumb *args);
-
-int cirrus_framebuffer_init(struct drm_device *dev,
- struct drm_framebuffer *gfb,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object *obj);
-
-bool cirrus_check_framebuffer(struct cirrus_device *cdev, int width, int height,
- int bpp, int pitch);
-
- /* cirrus_display.c */
-int cirrus_modeset_init(struct cirrus_device *cdev);
-void cirrus_modeset_fini(struct cirrus_device *cdev);
-
- /* cirrus_fbdev.c */
-int cirrus_fbdev_init(struct cirrus_device *cdev);
-void cirrus_fbdev_fini(struct cirrus_device *cdev);
-
-
-
- /* cirrus_irq.c */
-void cirrus_driver_irq_preinstall(struct drm_device *dev);
-int cirrus_driver_irq_postinstall(struct drm_device *dev);
-void cirrus_driver_irq_uninstall(struct drm_device *dev);
-irqreturn_t cirrus_driver_irq_handler(int irq, void *arg);
-
- /* cirrus_kms.c */
-int cirrus_driver_load(struct drm_device *dev, unsigned long flags);
-void cirrus_driver_unload(struct drm_device *dev);
-extern struct drm_ioctl_desc cirrus_ioctls[];
-extern int cirrus_max_ioctl;
-
-int cirrus_mm_init(struct cirrus_device *cirrus);
-void cirrus_mm_fini(struct cirrus_device *cirrus);
-void cirrus_ttm_placement(struct cirrus_bo *bo, int domain);
-int cirrus_bo_create(struct drm_device *dev, int size, int align,
- uint32_t flags, struct cirrus_bo **pcirrusbo);
-int cirrus_mmap(struct file *filp, struct vm_area_struct *vma);
-
-static inline int cirrus_bo_reserve(struct cirrus_bo *bo, bool no_wait)
-{
- int ret;
-
- ret = ttm_bo_reserve(&bo->bo, true, no_wait, NULL);
- if (ret) {
- if (ret != -ERESTARTSYS && ret != -EBUSY)
- DRM_ERROR("reserve failed %p\n", bo);
- return ret;
- }
- return 0;
-}
-
-static inline void cirrus_bo_unreserve(struct cirrus_bo *bo)
-{
- ttm_bo_unreserve(&bo->bo);
-}
-
-int cirrus_bo_push_sysram(struct cirrus_bo *bo);
-int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr);
-
-extern int cirrus_bpp;
-
-#endif /* __CIRRUS_DRV_H__ */
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
index 117b8ee98243..4c7ad46fdd21 100644
--- a/drivers/gpu/drm/drm_agpsupport.c
+++ b/drivers/gpu/drm/drm_agpsupport.c
@@ -1,4 +1,4 @@
-/**
+/*
* \file drm_agpsupport.c
* DRM support for AGP/GART backend
*
@@ -212,7 +212,7 @@ int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request)
if (!entry)
return -ENOMEM;
- pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
+ pages = DIV_ROUND_UP(request->size, PAGE_SIZE);
type = (u32) request->type;
memory = agp_allocate_memory(dev->agp->bridge, pages, type);
if (!memory) {
@@ -325,7 +325,7 @@ int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request)
entry = drm_agp_lookup_entry(dev, request->handle);
if (!entry || entry->bound)
return -EINVAL;
- page = (request->offset + PAGE_SIZE - 1) / PAGE_SIZE;
+ page = DIV_ROUND_UP(request->offset, PAGE_SIZE);
retcode = drm_bind_agp(entry->memory, page);
if (retcode)
return retcode;
@@ -465,46 +465,3 @@ void drm_legacy_agp_clear(struct drm_device *dev)
dev->agp->acquired = 0;
dev->agp->enabled = 0;
}
-
-/**
- * Binds a collection of pages into AGP memory at the given offset, returning
- * the AGP memory structure containing them.
- *
- * No reference is held on the pages during this time -- it is up to the
- * caller to handle that.
- */
-struct agp_memory *
-drm_agp_bind_pages(struct drm_device *dev,
- struct page **pages,
- unsigned long num_pages,
- uint32_t gtt_offset,
- u32 type)
-{
- struct agp_memory *mem;
- int ret, i;
-
- DRM_DEBUG("\n");
-
- mem = agp_allocate_memory(dev->agp->bridge, num_pages,
- type);
- if (mem == NULL) {
- DRM_ERROR("Failed to allocate memory for %ld pages\n",
- num_pages);
- return NULL;
- }
-
- for (i = 0; i < num_pages; i++)
- mem->pages[i] = pages[i];
- mem->page_count = num_pages;
-
- mem->is_flushed = true;
- ret = agp_bind_memory(mem, gtt_offset / PAGE_SIZE);
- if (ret != 0) {
- DRM_ERROR("Failed to bind AGP memory: %d\n", ret);
- agp_free_memory(mem);
- return NULL;
- }
-
- return mem;
-}
-EXPORT_SYMBOL(drm_agp_bind_pages);
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 419381abbdd1..d33691512a8e 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -251,7 +251,7 @@ EXPORT_SYMBOL(drm_atomic_state_clear);
* @ref: This atomic state to deallocate
*
* This frees all memory associated with an atomic state, including all the
- * per-object state for planes, crtcs and connectors.
+ * per-object state for planes, CRTCs and connectors.
*/
void __drm_atomic_state_free(struct kref *ref)
{
@@ -272,12 +272,12 @@ void __drm_atomic_state_free(struct kref *ref)
EXPORT_SYMBOL(__drm_atomic_state_free);
/**
- * drm_atomic_get_crtc_state - get crtc state
+ * drm_atomic_get_crtc_state - get CRTC state
* @state: global atomic state object
- * @crtc: crtc to get state object for
+ * @crtc: CRTC to get state object for
*
- * This function returns the crtc state for the given crtc, allocating it if
- * needed. It will also grab the relevant crtc lock to make sure that the state
+ * This function returns the CRTC state for the given CRTC, allocating it if
+ * needed. It will also grab the relevant CRTC lock to make sure that the state
* is consistent.
*
* Returns:
@@ -430,10 +430,15 @@ static int drm_atomic_connector_check(struct drm_connector *connector,
return -EINVAL;
}
- if (writeback_job->out_fence && !writeback_job->fb) {
- DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] requesting out-fence without framebuffer\n",
- connector->base.id, connector->name);
- return -EINVAL;
+ if (!writeback_job->fb) {
+ if (writeback_job->out_fence) {
+ DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] requesting out-fence without framebuffer\n",
+ connector->base.id, connector->name);
+ return -EINVAL;
+ }
+
+ drm_writeback_cleanup_job(writeback_job);
+ state->writeback_job = NULL;
}
return 0;
@@ -683,10 +688,12 @@ static void drm_atomic_plane_print_state(struct drm_printer *p,
* associated state struct &drm_private_state.
*
* Similar to userspace-exposed objects, private state structures can be
- * acquired by calling drm_atomic_get_private_obj_state(). Since this function
- * does not take care of locking, drivers should wrap it for each type of
- * private state object they have with the required call to drm_modeset_lock()
- * for the corresponding &drm_modeset_lock.
+ * acquired by calling drm_atomic_get_private_obj_state(). This also takes care
+ * of locking, hence drivers should not have a need to call drm_modeset_lock()
+ * directly. Sequence of the actual hardware state commit is not handled,
+ * drivers might need to keep track of struct drm_crtc_commit within subclassed
+ * structure of &drm_private_state as necessary, e.g. similar to
+ * &drm_plane_state.commit. See also &drm_atomic_state.fake_commit.
*
* All private state structures contained in a &drm_atomic_state update can be
* iterated using for_each_oldnew_private_obj_in_state(),
@@ -1011,14 +1018,14 @@ static void drm_atomic_connector_print_state(struct drm_printer *p,
}
/**
- * drm_atomic_add_affected_connectors - add connectors for crtc
+ * drm_atomic_add_affected_connectors - add connectors for CRTC
* @state: atomic state
- * @crtc: DRM crtc
+ * @crtc: DRM CRTC
*
* This function walks the current configuration and adds all connectors
* currently using @crtc to the atomic configuration @state. Note that this
* function must acquire the connection mutex. This can potentially cause
- * unneeded seralization if the update is just for the planes on one crtc. Hence
+ * unneeded seralization if the update is just for the planes on one CRTC. Hence
* drivers and helpers should only call this when really needed (e.g. when a
* full modeset needs to happen due to some change).
*
@@ -1071,9 +1078,9 @@ drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
EXPORT_SYMBOL(drm_atomic_add_affected_connectors);
/**
- * drm_atomic_add_affected_planes - add planes for crtc
+ * drm_atomic_add_affected_planes - add planes for CRTC
* @state: atomic state
- * @crtc: DRM crtc
+ * @crtc: DRM CRTC
*
* This function walks the current configuration and adds all planes
* currently used by @crtc to the atomic configuration @state. This is useful
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index aa16ea17ff9b..4511c2e07bb9 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -26,10 +26,12 @@
*/
#include <linux/dma-fence.h>
+#include <linux/ktime.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_plane_helper.h>
@@ -96,17 +98,6 @@ drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
}
}
-/*
- * For connectors that support multiple encoders, either the
- * .atomic_best_encoder() or .best_encoder() operation must be implemented.
- */
-static struct drm_encoder *
-pick_single_encoder_for_connector(struct drm_connector *connector)
-{
- WARN_ON(connector->encoder_ids[1]);
- return drm_encoder_find(connector->dev, NULL, connector->encoder_ids[0]);
-}
-
static int handle_conflicting_encoders(struct drm_atomic_state *state,
bool disable_conflicting_encoders)
{
@@ -134,7 +125,7 @@ static int handle_conflicting_encoders(struct drm_atomic_state *state,
else if (funcs->best_encoder)
new_encoder = funcs->best_encoder(connector);
else
- new_encoder = pick_single_encoder_for_connector(connector);
+ new_encoder = drm_connector_get_single_encoder(connector);
if (new_encoder) {
if (encoder_mask & drm_encoder_mask(new_encoder)) {
@@ -159,8 +150,8 @@ static int handle_conflicting_encoders(struct drm_atomic_state *state,
* is not set, an error is returned. Userspace can provide a solution
* through the atomic ioctl.
*
- * If the flag is set conflicting connectors are removed from the crtc
- * and the crtc is disabled if no encoder is left. This preserves
+ * If the flag is set conflicting connectors are removed from the CRTC
+ * and the CRTC is disabled if no encoder is left. This preserves
* compatibility with the legacy set_config behavior.
*/
drm_connector_list_iter_begin(state->dev, &conn_iter);
@@ -229,7 +220,7 @@ set_best_encoder(struct drm_atomic_state *state,
crtc = conn_state->connector->state->crtc;
/* A NULL crtc is an error here because we should have
- * duplicated a NULL best_encoder when crtc was NULL.
+ * duplicated a NULL best_encoder when crtc was NULL.
* As an exception restoring duplicated atomic state
* during resume is allowed, so don't warn when
* best_encoder is equal to encoder we intend to set.
@@ -358,7 +349,7 @@ update_connector_routing(struct drm_atomic_state *state,
else if (funcs->best_encoder)
new_encoder = funcs->best_encoder(connector);
else
- new_encoder = pick_single_encoder_for_connector(connector);
+ new_encoder = drm_connector_get_single_encoder(connector);
if (!new_encoder) {
DRM_DEBUG_ATOMIC("No suitable encoder found for [CONNECTOR:%d:%s]\n",
@@ -428,6 +419,7 @@ mode_fixup(struct drm_atomic_state *state)
for_each_new_connector_in_state(state, connector, new_conn_state, i) {
const struct drm_encoder_helper_funcs *funcs;
struct drm_encoder *encoder;
+ struct drm_bridge *bridge;
WARN_ON(!!new_conn_state->best_encoder != !!new_conn_state->crtc);
@@ -444,8 +436,10 @@ mode_fixup(struct drm_atomic_state *state)
encoder = new_conn_state->best_encoder;
funcs = encoder->helper_private;
- ret = drm_bridge_mode_fixup(encoder->bridge, &new_crtc_state->mode,
- &new_crtc_state->adjusted_mode);
+ bridge = drm_bridge_chain_get_first_bridge(encoder);
+ ret = drm_bridge_chain_mode_fixup(bridge,
+ &new_crtc_state->mode,
+ &new_crtc_state->adjusted_mode);
if (!ret) {
DRM_DEBUG_ATOMIC("Bridge fixup failed\n");
return -EINVAL;
@@ -481,7 +475,7 @@ mode_fixup(struct drm_atomic_state *state)
continue;
funcs = crtc->helper_private;
- if (!funcs->mode_fixup)
+ if (!funcs || !funcs->mode_fixup)
continue;
ret = funcs->mode_fixup(crtc, &new_crtc_state->mode,
@@ -501,6 +495,7 @@ static enum drm_mode_status mode_valid_path(struct drm_connector *connector,
struct drm_crtc *crtc,
const struct drm_display_mode *mode)
{
+ struct drm_bridge *bridge;
enum drm_mode_status ret;
ret = drm_encoder_mode_valid(encoder, mode);
@@ -510,7 +505,8 @@ static enum drm_mode_status mode_valid_path(struct drm_connector *connector,
return ret;
}
- ret = drm_bridge_mode_valid(encoder->bridge, mode);
+ bridge = drm_bridge_chain_get_first_bridge(encoder);
+ ret = drm_bridge_chain_mode_valid(bridge, mode);
if (ret != MODE_OK) {
DRM_DEBUG_ATOMIC("[BRIDGE] mode_valid() failed\n");
return ret;
@@ -565,27 +561,27 @@ mode_valid(struct drm_atomic_state *state)
* @state: the driver state object
*
* Check the state object to see if the requested state is physically possible.
- * This does all the crtc and connector related computations for an atomic
+ * This does all the CRTC and connector related computations for an atomic
* update and adds any additional connectors needed for full modesets. It calls
* the various per-object callbacks in the follow order:
*
* 1. &drm_connector_helper_funcs.atomic_best_encoder for determining the new encoder.
* 2. &drm_connector_helper_funcs.atomic_check to validate the connector state.
- * 3. If it's determined a modeset is needed then all connectors on the affected crtc
- * crtc are added and &drm_connector_helper_funcs.atomic_check is run on them.
+ * 3. If it's determined a modeset is needed then all connectors on the affected
+ * CRTC are added and &drm_connector_helper_funcs.atomic_check is run on them.
* 4. &drm_encoder_helper_funcs.mode_valid, &drm_bridge_funcs.mode_valid and
* &drm_crtc_helper_funcs.mode_valid are called on the affected components.
* 5. &drm_bridge_funcs.mode_fixup is called on all encoder bridges.
* 6. &drm_encoder_helper_funcs.atomic_check is called to validate any encoder state.
- * This function is only called when the encoder will be part of a configured crtc,
+ * This function is only called when the encoder will be part of a configured CRTC,
* it must not be used for implementing connector property validation.
* If this function is NULL, &drm_atomic_encoder_helper_funcs.mode_fixup is called
* instead.
- * 7. &drm_crtc_helper_funcs.mode_fixup is called last, to fix up the mode with crtc constraints.
+ * 7. &drm_crtc_helper_funcs.mode_fixup is called last, to fix up the mode with CRTC constraints.
*
* &drm_crtc_state.mode_changed is set when the input mode is changed.
* &drm_crtc_state.connectors_changed is set when a connector is added or
- * removed from the crtc. &drm_crtc_state.active_changed is set when
+ * removed from the CRTC. &drm_crtc_state.active_changed is set when
* &drm_crtc_state.active changes, which is used for DPMS.
* See also: drm_atomic_crtc_needs_modeset()
*
@@ -696,7 +692,7 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
/*
* After all the routing has been prepared we need to add in any
- * connector which is itself unchanged, but whose crtc changes its
+ * connector which is itself unchanged, but whose CRTC changes its
* configuration. This must be done before calling mode_fixup in case a
* crtc only changed its mode but has the same set of connectors.
*/
@@ -745,13 +741,13 @@ EXPORT_SYMBOL(drm_atomic_helper_check_modeset);
/**
* drm_atomic_helper_check_plane_state() - Check plane state for validity
* @plane_state: plane state to check
- * @crtc_state: crtc state to check
+ * @crtc_state: CRTC state to check
* @min_scale: minimum @src:@dest scaling factor in 16.16 fixed point
* @max_scale: maximum @src:@dest scaling factor in 16.16 fixed point
* @can_position: is it legal to position the plane such that it
- * doesn't cover the entire crtc? This will generally
+ * doesn't cover the entire CRTC? This will generally
* only be false for primary planes.
- * @can_update_disabled: can the plane be updated while the crtc
+ * @can_update_disabled: can the plane be updated while the CRTC
* is disabled?
*
* Checks that a desired plane update is valid, and updates various
@@ -848,7 +844,7 @@ EXPORT_SYMBOL(drm_atomic_helper_check_plane_state);
* &drm_crtc_helper_funcs.atomic_check and &drm_plane_helper_funcs.atomic_check
* hooks provided by the driver.
*
- * It also sets &drm_crtc_state.planes_changed to indicate that a crtc has
+ * It also sets &drm_crtc_state.planes_changed to indicate that a CRTC has
* updated planes.
*
* RETURNS:
@@ -912,7 +908,7 @@ EXPORT_SYMBOL(drm_atomic_helper_check_planes);
* @state: the driver state object
*
* Check the state object to see if the requested state is physically possible.
- * Only crtcs and planes have check callbacks, so for any additional (global)
+ * Only CRTCs and planes have check callbacks, so for any additional (global)
* checking that a driver needs it can simply wrap that around this function.
* Drivers without such needs can directly use this as their
* &drm_mode_config_funcs.atomic_check callback.
@@ -965,14 +961,14 @@ crtc_needs_disable(struct drm_crtc_state *old_state,
struct drm_crtc_state *new_state)
{
/*
- * No new_state means the crtc is off, so the only criteria is whether
+ * No new_state means the CRTC is off, so the only criteria is whether
* it's currently active or in self refresh mode.
*/
if (!new_state)
return drm_atomic_crtc_effectively_active(old_state);
/*
- * We need to run through the crtc_funcs->disable() function if the crtc
+ * We need to run through the crtc_funcs->disable() function if the CRTC
* is currently on, if it's transitioning to self refresh mode, or if
* it's in self refresh mode and needs to be fully disabled.
*/
@@ -993,6 +989,7 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
for_each_oldnew_connector_in_state(old_state, connector, old_conn_state, new_conn_state, i) {
const struct drm_encoder_helper_funcs *funcs;
struct drm_encoder *encoder;
+ struct drm_bridge *bridge;
/* Shut down everything that's in the changeset and currently
* still on. So need to check the old, saved state. */
@@ -1029,7 +1026,8 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
* Each encoder has at most one connector (since we always steal
* it away), so we won't call disable hooks twice.
*/
- drm_atomic_bridge_disable(encoder->bridge, old_state);
+ bridge = drm_bridge_chain_get_first_bridge(encoder);
+ drm_atomic_bridge_chain_disable(bridge, old_state);
/* Right function depends upon target state. */
if (funcs) {
@@ -1043,7 +1041,7 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
}
- drm_atomic_bridge_post_disable(encoder->bridge, old_state);
+ drm_atomic_bridge_chain_post_disable(bridge, old_state);
}
for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
@@ -1089,7 +1087,7 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
* @old_state: atomic state object with old state structures
*
* This function updates all the various legacy modeset state pointers in
- * connectors, encoders and crtcs. It also updates the timestamping constants
+ * connectors, encoders and CRTCs. It also updates the timestamping constants
* used for precise vblank timestamps by calling
* drm_calc_timestamping_constants().
*
@@ -1197,6 +1195,7 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
const struct drm_encoder_helper_funcs *funcs;
struct drm_encoder *encoder;
struct drm_display_mode *mode, *adjusted_mode;
+ struct drm_bridge *bridge;
if (!new_conn_state->best_encoder)
continue;
@@ -1224,7 +1223,8 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
funcs->mode_set(encoder, mode, adjusted_mode);
}
- drm_bridge_mode_set(encoder->bridge, mode, adjusted_mode);
+ bridge = drm_bridge_chain_get_first_bridge(encoder);
+ drm_bridge_chain_mode_set(bridge, mode, adjusted_mode);
}
}
@@ -1236,7 +1236,7 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
* This function shuts down all the outputs that need to be shut down and
* prepares them (if required) with the new mode.
*
- * For compatibility with legacy crtc helpers this should be called before
+ * For compatibility with legacy CRTC helpers this should be called before
* drm_atomic_helper_commit_planes(), which is what the default commit function
* does. But drivers with different needs can group the modeset commits together
* and do the plane commits at the end. This is useful for drivers doing runtime
@@ -1282,7 +1282,7 @@ static void drm_atomic_helper_commit_writebacks(struct drm_device *dev,
* This function enables all the outputs with the new configuration which had to
* be turned off for the update.
*
- * For compatibility with legacy crtc helpers this should be called after
+ * For compatibility with legacy CRTC helpers this should be called after
* drm_atomic_helper_commit_planes(), which is what the default commit function
* does. But drivers with different needs can group the modeset commits together
* and do the plane commits at the end. This is useful for drivers doing runtime
@@ -1323,6 +1323,7 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
const struct drm_encoder_helper_funcs *funcs;
struct drm_encoder *encoder;
+ struct drm_bridge *bridge;
if (!new_conn_state->best_encoder)
continue;
@@ -1341,7 +1342,8 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
* Each encoder has at most one connector (since we always steal
* it away), so we won't call enable hooks twice.
*/
- drm_atomic_bridge_pre_enable(encoder->bridge, old_state);
+ bridge = drm_bridge_chain_get_first_bridge(encoder);
+ drm_atomic_bridge_chain_pre_enable(bridge, old_state);
if (funcs) {
if (funcs->atomic_enable)
@@ -1352,7 +1354,7 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
funcs->commit(encoder);
}
- drm_atomic_bridge_enable(encoder->bridge, old_state);
+ drm_atomic_bridge_chain_enable(bridge, old_state);
}
drm_atomic_helper_commit_writebacks(dev, old_state);
@@ -1412,12 +1414,12 @@ int drm_atomic_helper_wait_for_fences(struct drm_device *dev,
EXPORT_SYMBOL(drm_atomic_helper_wait_for_fences);
/**
- * drm_atomic_helper_wait_for_vblanks - wait for vblank on crtcs
+ * drm_atomic_helper_wait_for_vblanks - wait for vblank on CRTCs
* @dev: DRM device
* @old_state: atomic state object with old state structures
*
- * Helper to, after atomic commit, wait for vblanks on all effected
- * crtcs (ie. before cleaning up old framebuffers using
+ * Helper to, after atomic commit, wait for vblanks on all affected
+ * CRTCs (ie. before cleaning up old framebuffers using
* drm_atomic_helper_cleanup_planes()). It will only wait on CRTCs where the
* framebuffers have actually changed to optimize for the legacy cursor and
* plane update use-case.
@@ -1476,10 +1478,10 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
* @dev: DRM device
* @old_state: atomic state object with old state structures
*
- * Helper to, after atomic commit, wait for page flips on all effected
+ * Helper to, after atomic commit, wait for page flips on all affected
* crtcs (ie. before cleaning up old framebuffers using
* drm_atomic_helper_cleanup_planes()). Compared to
- * drm_atomic_helper_wait_for_vblanks() this waits for the completion of on all
+ * drm_atomic_helper_wait_for_vblanks() this waits for the completion on all
* CRTCs, assuming that cursors-only updates are signalling their completion
* immediately (or using a different path).
*
@@ -1580,18 +1582,50 @@ static void commit_tail(struct drm_atomic_state *old_state)
{
struct drm_device *dev = old_state->dev;
const struct drm_mode_config_helper_funcs *funcs;
+ struct drm_crtc_state *new_crtc_state;
+ struct drm_crtc *crtc;
+ ktime_t start;
+ s64 commit_time_ms;
+ unsigned int i, new_self_refresh_mask = 0;
funcs = dev->mode_config.helper_private;
+ /*
+ * We're measuring the _entire_ commit, so the time will vary depending
+ * on how many fences and objects are involved. For the purposes of self
+ * refresh, this is desirable since it'll give us an idea of how
+ * congested things are. This will inform our decision on how often we
+ * should enter self refresh after idle.
+ *
+ * These times will be averaged out in the self refresh helpers to avoid
+ * overreacting over one outlier frame
+ */
+ start = ktime_get();
+
drm_atomic_helper_wait_for_fences(dev, old_state, false);
drm_atomic_helper_wait_for_dependencies(old_state);
+ /*
+ * We cannot safely access new_crtc_state after
+ * drm_atomic_helper_commit_hw_done() so figure out which crtc's have
+ * self-refresh active beforehand:
+ */
+ for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i)
+ if (new_crtc_state->self_refresh_active)
+ new_self_refresh_mask |= BIT(i);
+
if (funcs && funcs->atomic_commit_tail)
funcs->atomic_commit_tail(old_state);
else
drm_atomic_helper_commit_tail(old_state);
+ commit_time_ms = ktime_ms_delta(ktime_get(), start);
+ if (commit_time_ms > 0)
+ drm_self_refresh_helper_update_avg_times(old_state,
+ (unsigned long)commit_time_ms,
+ new_self_refresh_mask);
+
drm_atomic_helper_commit_cleanup_done(old_state);
drm_atomic_state_put(old_state);
@@ -1811,17 +1845,21 @@ EXPORT_SYMBOL(drm_atomic_helper_commit);
/**
* DOC: implementing nonblocking commit
*
- * Nonblocking atomic commits have to be implemented in the following sequence:
+ * Nonblocking atomic commits should use struct &drm_crtc_commit to sequence
+ * different operations against each another. Locks, especially struct
+ * &drm_modeset_lock, should not be held in worker threads or any other
+ * asynchronous context used to commit the hardware state.
+ *
+ * drm_atomic_helper_commit() implements the recommended sequence for
+ * nonblocking commits, using drm_atomic_helper_setup_commit() internally:
*
- * 1. Run drm_atomic_helper_prepare_planes() first. This is the only function
- * which commit needs to call which can fail, so we want to run it first and
+ * 1. Run drm_atomic_helper_prepare_planes(). Since this can fail and we
+ * need to propagate out of memory/VRAM errors to userspace, it must be called
* synchronously.
*
* 2. Synchronize with any outstanding nonblocking commit worker threads which
- * might be affected the new state update. This can be done by either cancelling
- * or flushing the work items, depending upon whether the driver can deal with
- * cancelled updates. Note that it is important to ensure that the framebuffer
- * cleanup is still done when cancelling.
+ * might be affected by the new state update. This is handled by
+ * drm_atomic_helper_setup_commit().
*
* Asynchronous workers need to have sufficient parallelism to be able to run
* different atomic commits on different CRTCs in parallel. The simplest way to
@@ -1832,21 +1870,29 @@ EXPORT_SYMBOL(drm_atomic_helper_commit);
* must be done as one global operation, and enabling or disabling a CRTC can
* take a long time. But even that is not required.
*
+ * IMPORTANT: A &drm_atomic_state update for multiple CRTCs is sequenced
+ * against all CRTCs therein. Therefore for atomic state updates which only flip
+ * planes the driver must not get the struct &drm_crtc_state of unrelated CRTCs
+ * in its atomic check code: This would prevent committing of atomic updates to
+ * multiple CRTCs in parallel. In general, adding additional state structures
+ * should be avoided as much as possible, because this reduces parallelism in
+ * (nonblocking) commits, both due to locking and due to commit sequencing
+ * requirements.
+ *
* 3. The software state is updated synchronously with
* drm_atomic_helper_swap_state(). Doing this under the protection of all modeset
- * locks means concurrent callers never see inconsistent state. And doing this
- * while it's guaranteed that no relevant nonblocking worker runs means that
- * nonblocking workers do not need grab any locks. Actually they must not grab
- * locks, for otherwise the work flushing will deadlock.
+ * locks means concurrent callers never see inconsistent state. Note that commit
+ * workers do not hold any locks; their access is only coordinated through
+ * ordering. If workers would access state only through the pointers in the
+ * free-standing state objects (currently not the case for any driver) then even
+ * multiple pending commits could be in-flight at the same time.
*
* 4. Schedule a work item to do all subsequent steps, using the split-out
* commit helpers: a) pre-plane commit b) plane commit c) post-plane commit and
* then cleaning up the framebuffers after the old framebuffer is no longer
- * being displayed.
- *
- * The above scheme is implemented in the atomic helper libraries in
- * drm_atomic_helper_commit() using a bunch of helper functions. See
- * drm_atomic_helper_setup_commit() for a starting point.
+ * being displayed. The scheduled work should synchronize against other workers
+ * using the &drm_crtc_commit infrastructure as needed. See
+ * drm_atomic_helper_setup_commit() for more details.
*/
static int stall_checks(struct drm_crtc *crtc, bool nonblock)
@@ -2075,7 +2121,7 @@ EXPORT_SYMBOL(drm_atomic_helper_setup_commit);
*
* This function waits for all preceeding commits that touch the same CRTC as
* @old_state to both be committed to the hardware (as signalled by
- * drm_atomic_helper_commit_hw_done) and executed by the hardware (as signalled
+ * drm_atomic_helper_commit_hw_done()) and executed by the hardware (as signalled
* by calling drm_crtc_send_vblank_event() on the &drm_crtc_state.event).
*
* This is part of the atomic helper support for nonblocking commits, see
@@ -2162,7 +2208,7 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies);
* drm_atomic_helper_fake_vblank - fake VBLANK events if needed
* @old_state: atomic state object with old state structures
*
- * This function walks all CRTCs and fake VBLANK events on those with
+ * This function walks all CRTCs and fakes VBLANK events on those with
* &drm_crtc_state.no_vblank set to true and &drm_crtc_state.event != NULL.
* The primary use of this function is writeback connectors working in oneshot
* mode and faking VBLANK events. In this case they only fake the VBLANK event
@@ -2358,7 +2404,7 @@ static bool plane_crtc_active(const struct drm_plane_state *state)
* @flags: flags for committing plane state
*
* This function commits the new plane state using the plane and atomic helper
- * functions for planes and crtcs. It assumes that the atomic state has already
+ * functions for planes and CRTCs. It assumes that the atomic state has already
* been pushed into the relevant object state pointers, since this step can no
* longer fail.
*
@@ -2479,15 +2525,15 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev,
EXPORT_SYMBOL(drm_atomic_helper_commit_planes);
/**
- * drm_atomic_helper_commit_planes_on_crtc - commit plane state for a crtc
- * @old_crtc_state: atomic state object with the old crtc state
+ * drm_atomic_helper_commit_planes_on_crtc - commit plane state for a CRTC
+ * @old_crtc_state: atomic state object with the old CRTC state
*
* This function commits the new plane state using the plane and atomic helper
- * functions for planes on the specific crtc. It assumes that the atomic state
+ * functions for planes on the specific CRTC. It assumes that the atomic state
* has already been pushed into the relevant object state pointers, since this
* step can no longer fail.
*
- * This function is useful when plane updates should be done crtc-by-crtc
+ * This function is useful when plane updates should be done CRTC-by-CRTC
* instead of one global step like drm_atomic_helper_commit_planes() does.
*
* This function can only be savely used when planes are not allowed to move
@@ -2777,10 +2823,10 @@ EXPORT_SYMBOL(drm_atomic_helper_swap_state);
* @plane: plane object to update
* @crtc: owning CRTC of owning plane
* @fb: framebuffer to flip onto plane
- * @crtc_x: x offset of primary plane on crtc
- * @crtc_y: y offset of primary plane on crtc
- * @crtc_w: width of primary plane rectangle on crtc
- * @crtc_h: height of primary plane rectangle on crtc
+ * @crtc_x: x offset of primary plane on @crtc
+ * @crtc_y: y offset of primary plane on @crtc
+ * @crtc_w: width of primary plane rectangle on @crtc
+ * @crtc_h: height of primary plane rectangle on @crtc
* @src_x: x offset of @fb for panning
* @src_y: y offset of @fb for panning
* @src_w: width of source rectangle in @fb
@@ -2886,7 +2932,7 @@ EXPORT_SYMBOL(drm_atomic_helper_disable_plane);
* @set: mode set configuration
* @ctx: lock acquisition context
*
- * Provides a default crtc set_config handler using the atomic driver interface.
+ * Provides a default CRTC set_config handler using the atomic driver interface.
*
* NOTE: For backwards compatibility with old userspace this automatically
* resets the "link-status" property to GOOD, to force any link
@@ -3275,7 +3321,7 @@ static int page_flip_common(struct drm_atomic_state *state,
return PTR_ERR(crtc_state);
crtc_state->event = event;
- crtc_state->pageflip_flags = flags;
+ crtc_state->async_flip = flags & DRM_MODE_PAGE_FLIP_ASYNC;
plane_state = drm_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state))
@@ -3299,7 +3345,7 @@ static int page_flip_common(struct drm_atomic_state *state,
/**
* drm_atomic_helper_page_flip - execute a legacy page flip
- * @crtc: DRM crtc
+ * @crtc: DRM CRTC
* @fb: DRM framebuffer
* @event: optional DRM event to signal upon completion
* @flags: flip flags for non-vblank sync'ed updates
@@ -3343,7 +3389,7 @@ EXPORT_SYMBOL(drm_atomic_helper_page_flip);
/**
* drm_atomic_helper_page_flip_target - do page flip on target vblank period.
- * @crtc: DRM crtc
+ * @crtc: DRM CRTC
* @fb: DRM framebuffer
* @event: optional DRM event to signal upon completion
* @flags: flip flags for non-vblank sync'ed updates
diff --git a/drivers/gpu/drm/drm_atomic_state_helper.c b/drivers/gpu/drm/drm_atomic_state_helper.c
index 46dc264a248b..7cf3cf936547 100644
--- a/drivers/gpu/drm/drm_atomic_state_helper.c
+++ b/drivers/gpu/drm/drm_atomic_state_helper.c
@@ -58,6 +58,22 @@
*/
/**
+ * __drm_atomic_helper_crtc_state_reset - reset the CRTC state
+ * @crtc_state: atomic CRTC state, must not be NULL
+ * @crtc: CRTC object, must not be NULL
+ *
+ * Initializes the newly allocated @crtc_state with default
+ * values. This is useful for drivers that subclass the CRTC state.
+ */
+void
+__drm_atomic_helper_crtc_state_reset(struct drm_crtc_state *crtc_state,
+ struct drm_crtc *crtc)
+{
+ crtc_state->crtc = crtc;
+}
+EXPORT_SYMBOL(__drm_atomic_helper_crtc_state_reset);
+
+/**
* __drm_atomic_helper_crtc_reset - reset state on CRTC
* @crtc: drm CRTC
* @crtc_state: CRTC state to assign
@@ -74,7 +90,7 @@ __drm_atomic_helper_crtc_reset(struct drm_crtc *crtc,
struct drm_crtc_state *crtc_state)
{
if (crtc_state)
- crtc_state->crtc = crtc;
+ __drm_atomic_helper_crtc_state_reset(crtc_state, crtc);
crtc->state = crtc_state;
}
@@ -128,7 +144,7 @@ void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc,
state->zpos_changed = false;
state->commit = NULL;
state->event = NULL;
- state->pageflip_flags = 0;
+ state->async_flip = false;
/* Self refresh should be canceled when a new update is available */
state->active = drm_atomic_crtc_effectively_active(state);
@@ -212,23 +228,43 @@ void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
EXPORT_SYMBOL(drm_atomic_helper_crtc_destroy_state);
/**
- * __drm_atomic_helper_plane_reset - resets planes state to default values
+ * __drm_atomic_helper_plane_state_reset - resets plane state to default values
+ * @plane_state: atomic plane state, must not be NULL
* @plane: plane object, must not be NULL
- * @state: atomic plane state, must not be NULL
*
- * Initializes plane state to default. This is useful for drivers that subclass
- * the plane state.
+ * Initializes the newly allocated @plane_state with default
+ * values. This is useful for drivers that subclass the CRTC state.
*/
-void __drm_atomic_helper_plane_reset(struct drm_plane *plane,
- struct drm_plane_state *state)
+void __drm_atomic_helper_plane_state_reset(struct drm_plane_state *plane_state,
+ struct drm_plane *plane)
{
- state->plane = plane;
- state->rotation = DRM_MODE_ROTATE_0;
+ plane_state->plane = plane;
+ plane_state->rotation = DRM_MODE_ROTATE_0;
- state->alpha = DRM_BLEND_ALPHA_OPAQUE;
- state->pixel_blend_mode = DRM_MODE_BLEND_PREMULTI;
+ plane_state->alpha = DRM_BLEND_ALPHA_OPAQUE;
+ plane_state->pixel_blend_mode = DRM_MODE_BLEND_PREMULTI;
+}
+EXPORT_SYMBOL(__drm_atomic_helper_plane_state_reset);
- plane->state = state;
+/**
+ * __drm_atomic_helper_plane_reset - reset state on plane
+ * @plane: drm plane
+ * @plane_state: plane state to assign
+ *
+ * Initializes the newly allocated @plane_state and assigns it to
+ * the &drm_crtc->state pointer of @plane, usually required when
+ * initializing the drivers or when called from the &drm_plane_funcs.reset
+ * hook.
+ *
+ * This is useful for drivers that subclass the plane state.
+ */
+void __drm_atomic_helper_plane_reset(struct drm_plane *plane,
+ struct drm_plane_state *plane_state)
+{
+ if (plane_state)
+ __drm_atomic_helper_plane_state_reset(plane_state, plane);
+
+ plane->state = plane_state;
}
EXPORT_SYMBOL(__drm_atomic_helper_plane_reset);
@@ -336,6 +372,22 @@ void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
EXPORT_SYMBOL(drm_atomic_helper_plane_destroy_state);
/**
+ * __drm_atomic_helper_connector_state_reset - reset the connector state
+ * @conn_state: atomic connector state, must not be NULL
+ * @connector: connectotr object, must not be NULL
+ *
+ * Initializes the newly allocated @conn_state with default
+ * values. This is useful for drivers that subclass the connector state.
+ */
+void
+__drm_atomic_helper_connector_state_reset(struct drm_connector_state *conn_state,
+ struct drm_connector *connector)
+{
+ conn_state->connector = connector;
+}
+EXPORT_SYMBOL(__drm_atomic_helper_connector_state_reset);
+
+/**
* __drm_atomic_helper_connector_reset - reset state on connector
* @connector: drm connector
* @conn_state: connector state to assign
@@ -352,7 +404,7 @@ __drm_atomic_helper_connector_reset(struct drm_connector *connector,
struct drm_connector_state *conn_state)
{
if (conn_state)
- conn_state->connector = connector;
+ __drm_atomic_helper_connector_state_reset(conn_state, connector);
connector->state = conn_state;
}
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index abe38bdf85ae..a1e5e262bae2 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -160,12 +160,12 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
EXPORT_SYMBOL(drm_atomic_set_mode_prop_for_crtc);
/**
- * drm_atomic_set_crtc_for_plane - set crtc for plane
+ * drm_atomic_set_crtc_for_plane - set CRTC for plane
* @plane_state: the plane whose incoming state to update
- * @crtc: crtc to use for the plane
+ * @crtc: CRTC to use for the plane
*
- * Changing the assigned crtc for a plane requires us to grab the lock and state
- * for the new crtc, as needed. This function takes care of all these details
+ * Changing the assigned CRTC for a plane requires us to grab the lock and state
+ * for the new CRTC, as needed. This function takes care of all these details
* besides updating the pointer in the state object itself.
*
* Returns:
@@ -279,12 +279,12 @@ drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state,
EXPORT_SYMBOL(drm_atomic_set_fence_for_plane);
/**
- * drm_atomic_set_crtc_for_connector - set crtc for connector
+ * drm_atomic_set_crtc_for_connector - set CRTC for connector
* @conn_state: atomic state object for the connector
- * @crtc: crtc to use for the connector
+ * @crtc: CRTC to use for the connector
*
- * Changing the assigned crtc for a connector requires us to grab the lock and
- * state for the new crtc, as needed. This function takes care of all these
+ * Changing the assigned CRTC for a connector requires us to grab the lock and
+ * state for the new CRTC, as needed. This function takes care of all these
* details besides updating the pointer in the state object itself.
*
* Returns:
@@ -747,6 +747,8 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector,
return -EINVAL;
}
state->content_protection = val;
+ } else if (property == config->hdcp_content_type_property) {
+ state->hdcp_content_type = val;
} else if (property == connector->colorspace_property) {
state->colorspace = val;
} else if (property == config->writeback_fb_id_property) {
@@ -831,6 +833,8 @@ drm_atomic_connector_get_property(struct drm_connector *connector,
state->hdr_output_metadata->base.id : 0;
} else if (property == config->content_protection_property) {
*val = state->content_protection;
+ } else if (property == config->hdcp_content_type_property) {
+ *val = state->hdcp_content_type;
} else if (property == config->writeback_fb_id_property) {
/* Writeback framebuffer is one-shot, write and forget */
*val = 0;
@@ -1033,7 +1037,7 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
* As a contrast, with implicit fencing the kernel keeps track of any
* ongoing rendering, and automatically ensures that the atomic update waits
* for any pending rendering to complete. For shared buffers represented with
- * a &struct dma_buf this is tracked in &struct reservation_object.
+ * a &struct dma_buf this is tracked in &struct dma_resv.
* Implicit syncing is how Linux traditionally worked (e.g. DRI2/3 on X.org),
* whereas explicit fencing is what Android wants.
*
@@ -1301,8 +1305,7 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
if (arg->reserved)
return -EINVAL;
- if ((arg->flags & DRM_MODE_PAGE_FLIP_ASYNC) &&
- !dev->mode_config.async_page_flip)
+ if (arg->flags & DRM_MODE_PAGE_FLIP_ASYNC)
return -EINVAL;
/* can't test and expect an event at the same time. */
@@ -1402,7 +1405,7 @@ retry:
} else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) {
ret = drm_atomic_nonblocking_commit(state);
} else {
- if (unlikely(drm_debug & DRM_UT_STATE))
+ if (drm_debug_enabled(DRM_UT_STATE))
drm_atomic_print_state(state);
ret = drm_atomic_commit(state);
diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c
index 37ac168fcb60..121481f6aa71 100644
--- a/drivers/gpu/drm/drm_blend.c
+++ b/drivers/gpu/drm/drm_blend.c
@@ -130,7 +130,12 @@
* Z position is set up with drm_plane_create_zpos_immutable_property() and
* drm_plane_create_zpos_property(). It controls the visibility of overlapping
* planes. Without this property the primary plane is always below the cursor
- * plane, and ordering between all other planes is undefined.
+ * plane, and ordering between all other planes is undefined. The positive
+ * Z axis points towards the user, i.e. planes with lower Z position values
+ * are underneath planes with higher Z position values. Two planes with the
+ * same Z position value have undefined ordering. Note that the Z position
+ * value can also be immutable, to inform userspace about the hard-coded
+ * stacking of planes, see drm_plane_create_zpos_immutable_property().
*
* pixel blend mode:
* Pixel blend mode is set up with drm_plane_create_blend_mode_property().
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index cba537c99e43..c2cf0c90fa26 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -55,7 +55,7 @@
* just provide additional hooks to get the desired output at the end of the
* encoder chain.
*
- * Bridges can also be chained up using the &drm_bridge.next pointer.
+ * Bridges can also be chained up using the &drm_bridge.chain_node field.
*
* Both legacy CRTC helpers and the new atomic modeset helpers support bridges.
*/
@@ -128,20 +128,21 @@ int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
bridge->dev = encoder->dev;
bridge->encoder = encoder;
+ if (previous)
+ list_add(&bridge->chain_node, &previous->chain_node);
+ else
+ list_add(&bridge->chain_node, &encoder->bridge_chain);
+
if (bridge->funcs->attach) {
ret = bridge->funcs->attach(bridge);
if (ret < 0) {
+ list_del(&bridge->chain_node);
bridge->dev = NULL;
bridge->encoder = NULL;
return ret;
}
}
- if (previous)
- previous->next = bridge;
- else
- encoder->bridge = bridge;
-
return 0;
}
EXPORT_SYMBOL(drm_bridge_attach);
@@ -157,6 +158,7 @@ void drm_bridge_detach(struct drm_bridge *bridge)
if (bridge->funcs->detach)
bridge->funcs->detach(bridge);
+ list_del(&bridge->chain_node);
bridge->dev = NULL;
}
@@ -172,8 +174,8 @@ void drm_bridge_detach(struct drm_bridge *bridge)
*/
/**
- * drm_bridge_mode_fixup - fixup proposed mode for all bridges in the
- * encoder chain
+ * drm_bridge_chain_mode_fixup - fixup proposed mode for all bridges in the
+ * encoder chain
* @bridge: bridge control structure
* @mode: desired mode to be set for the bridge
* @adjusted_mode: updated mode that works for this bridge
@@ -186,27 +188,31 @@ void drm_bridge_detach(struct drm_bridge *bridge)
* RETURNS:
* true on success, false on failure
*/
-bool drm_bridge_mode_fixup(struct drm_bridge *bridge,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+bool drm_bridge_chain_mode_fixup(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
- bool ret = true;
+ struct drm_encoder *encoder;
if (!bridge)
return true;
- if (bridge->funcs->mode_fixup)
- ret = bridge->funcs->mode_fixup(bridge, mode, adjusted_mode);
+ encoder = bridge->encoder;
+ list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
+ if (!bridge->funcs->mode_fixup)
+ continue;
- ret = ret && drm_bridge_mode_fixup(bridge->next, mode, adjusted_mode);
+ if (!bridge->funcs->mode_fixup(bridge, mode, adjusted_mode))
+ return false;
+ }
- return ret;
+ return true;
}
-EXPORT_SYMBOL(drm_bridge_mode_fixup);
+EXPORT_SYMBOL(drm_bridge_chain_mode_fixup);
/**
- * drm_bridge_mode_valid - validate the mode against all bridges in the
- * encoder chain.
+ * drm_bridge_chain_mode_valid - validate the mode against all bridges in the
+ * encoder chain.
* @bridge: bridge control structure
* @mode: desired mode to be validated
*
@@ -219,26 +225,33 @@ EXPORT_SYMBOL(drm_bridge_mode_fixup);
* RETURNS:
* MODE_OK on success, drm_mode_status Enum error code on failure
*/
-enum drm_mode_status drm_bridge_mode_valid(struct drm_bridge *bridge,
- const struct drm_display_mode *mode)
+enum drm_mode_status
+drm_bridge_chain_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode)
{
- enum drm_mode_status ret = MODE_OK;
+ struct drm_encoder *encoder;
if (!bridge)
- return ret;
+ return MODE_OK;
- if (bridge->funcs->mode_valid)
- ret = bridge->funcs->mode_valid(bridge, mode);
+ encoder = bridge->encoder;
+ list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
+ enum drm_mode_status ret;
- if (ret != MODE_OK)
- return ret;
+ if (!bridge->funcs->mode_valid)
+ continue;
- return drm_bridge_mode_valid(bridge->next, mode);
+ ret = bridge->funcs->mode_valid(bridge, mode);
+ if (ret != MODE_OK)
+ return ret;
+ }
+
+ return MODE_OK;
}
-EXPORT_SYMBOL(drm_bridge_mode_valid);
+EXPORT_SYMBOL(drm_bridge_chain_mode_valid);
/**
- * drm_bridge_disable - disables all bridges in the encoder chain
+ * drm_bridge_chain_disable - disables all bridges in the encoder chain
* @bridge: bridge control structure
*
* Calls &drm_bridge_funcs.disable op for all the bridges in the encoder
@@ -247,20 +260,28 @@ EXPORT_SYMBOL(drm_bridge_mode_valid);
*
* Note: the bridge passed should be the one closest to the encoder
*/
-void drm_bridge_disable(struct drm_bridge *bridge)
+void drm_bridge_chain_disable(struct drm_bridge *bridge)
{
+ struct drm_encoder *encoder;
+ struct drm_bridge *iter;
+
if (!bridge)
return;
- drm_bridge_disable(bridge->next);
+ encoder = bridge->encoder;
+ list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
+ if (iter->funcs->disable)
+ iter->funcs->disable(iter);
- if (bridge->funcs->disable)
- bridge->funcs->disable(bridge);
+ if (iter == bridge)
+ break;
+ }
}
-EXPORT_SYMBOL(drm_bridge_disable);
+EXPORT_SYMBOL(drm_bridge_chain_disable);
/**
- * drm_bridge_post_disable - cleans up after disabling all bridges in the encoder chain
+ * drm_bridge_chain_post_disable - cleans up after disabling all bridges in the
+ * encoder chain
* @bridge: bridge control structure
*
* Calls &drm_bridge_funcs.post_disable op for all the bridges in the
@@ -269,47 +290,53 @@ EXPORT_SYMBOL(drm_bridge_disable);
*
* Note: the bridge passed should be the one closest to the encoder
*/
-void drm_bridge_post_disable(struct drm_bridge *bridge)
+void drm_bridge_chain_post_disable(struct drm_bridge *bridge)
{
+ struct drm_encoder *encoder;
+
if (!bridge)
return;
- if (bridge->funcs->post_disable)
- bridge->funcs->post_disable(bridge);
-
- drm_bridge_post_disable(bridge->next);
+ encoder = bridge->encoder;
+ list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
+ if (bridge->funcs->post_disable)
+ bridge->funcs->post_disable(bridge);
+ }
}
-EXPORT_SYMBOL(drm_bridge_post_disable);
+EXPORT_SYMBOL(drm_bridge_chain_post_disable);
/**
- * drm_bridge_mode_set - set proposed mode for all bridges in the
- * encoder chain
+ * drm_bridge_chain_mode_set - set proposed mode for all bridges in the
+ * encoder chain
* @bridge: bridge control structure
- * @mode: desired mode to be set for the bridge
- * @adjusted_mode: updated mode that works for this bridge
+ * @mode: desired mode to be set for the encoder chain
+ * @adjusted_mode: updated mode that works for this encoder chain
*
* Calls &drm_bridge_funcs.mode_set op for all the bridges in the
* encoder chain, starting from the first bridge to the last.
*
* Note: the bridge passed should be the one closest to the encoder
*/
-void drm_bridge_mode_set(struct drm_bridge *bridge,
- const struct drm_display_mode *mode,
- const struct drm_display_mode *adjusted_mode)
+void drm_bridge_chain_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
{
+ struct drm_encoder *encoder;
+
if (!bridge)
return;
- if (bridge->funcs->mode_set)
- bridge->funcs->mode_set(bridge, mode, adjusted_mode);
-
- drm_bridge_mode_set(bridge->next, mode, adjusted_mode);
+ encoder = bridge->encoder;
+ list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
+ if (bridge->funcs->mode_set)
+ bridge->funcs->mode_set(bridge, mode, adjusted_mode);
+ }
}
-EXPORT_SYMBOL(drm_bridge_mode_set);
+EXPORT_SYMBOL(drm_bridge_chain_mode_set);
/**
- * drm_bridge_pre_enable - prepares for enabling all
- * bridges in the encoder chain
+ * drm_bridge_chain_pre_enable - prepares for enabling all bridges in the
+ * encoder chain
* @bridge: bridge control structure
*
* Calls &drm_bridge_funcs.pre_enable op for all the bridges in the encoder
@@ -318,20 +345,24 @@ EXPORT_SYMBOL(drm_bridge_mode_set);
*
* Note: the bridge passed should be the one closest to the encoder
*/
-void drm_bridge_pre_enable(struct drm_bridge *bridge)
+void drm_bridge_chain_pre_enable(struct drm_bridge *bridge)
{
+ struct drm_encoder *encoder;
+ struct drm_bridge *iter;
+
if (!bridge)
return;
- drm_bridge_pre_enable(bridge->next);
-
- if (bridge->funcs->pre_enable)
- bridge->funcs->pre_enable(bridge);
+ encoder = bridge->encoder;
+ list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
+ if (iter->funcs->pre_enable)
+ iter->funcs->pre_enable(iter);
+ }
}
-EXPORT_SYMBOL(drm_bridge_pre_enable);
+EXPORT_SYMBOL(drm_bridge_chain_pre_enable);
/**
- * drm_bridge_enable - enables all bridges in the encoder chain
+ * drm_bridge_chain_enable - enables all bridges in the encoder chain
* @bridge: bridge control structure
*
* Calls &drm_bridge_funcs.enable op for all the bridges in the encoder
@@ -340,22 +371,25 @@ EXPORT_SYMBOL(drm_bridge_pre_enable);
*
* Note that the bridge passed should be the one closest to the encoder
*/
-void drm_bridge_enable(struct drm_bridge *bridge)
+void drm_bridge_chain_enable(struct drm_bridge *bridge)
{
+ struct drm_encoder *encoder;
+
if (!bridge)
return;
- if (bridge->funcs->enable)
- bridge->funcs->enable(bridge);
-
- drm_bridge_enable(bridge->next);
+ encoder = bridge->encoder;
+ list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
+ if (bridge->funcs->enable)
+ bridge->funcs->enable(bridge);
+ }
}
-EXPORT_SYMBOL(drm_bridge_enable);
+EXPORT_SYMBOL(drm_bridge_chain_enable);
/**
- * drm_atomic_bridge_disable - disables all bridges in the encoder chain
+ * drm_atomic_bridge_chain_disable - disables all bridges in the encoder chain
* @bridge: bridge control structure
- * @state: atomic state being committed
+ * @old_state: old atomic state
*
* Calls &drm_bridge_funcs.atomic_disable (falls back on
* &drm_bridge_funcs.disable) op for all the bridges in the encoder chain,
@@ -364,26 +398,33 @@ EXPORT_SYMBOL(drm_bridge_enable);
*
* Note: the bridge passed should be the one closest to the encoder
*/
-void drm_atomic_bridge_disable(struct drm_bridge *bridge,
- struct drm_atomic_state *state)
+void drm_atomic_bridge_chain_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *old_state)
{
+ struct drm_encoder *encoder;
+ struct drm_bridge *iter;
+
if (!bridge)
return;
- drm_atomic_bridge_disable(bridge->next, state);
+ encoder = bridge->encoder;
+ list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
+ if (iter->funcs->atomic_disable)
+ iter->funcs->atomic_disable(iter, old_state);
+ else if (iter->funcs->disable)
+ iter->funcs->disable(iter);
- if (bridge->funcs->atomic_disable)
- bridge->funcs->atomic_disable(bridge, state);
- else if (bridge->funcs->disable)
- bridge->funcs->disable(bridge);
+ if (iter == bridge)
+ break;
+ }
}
-EXPORT_SYMBOL(drm_atomic_bridge_disable);
+EXPORT_SYMBOL(drm_atomic_bridge_chain_disable);
/**
- * drm_atomic_bridge_post_disable - cleans up after disabling all bridges in the
- * encoder chain
+ * drm_atomic_bridge_chain_post_disable - cleans up after disabling all bridges
+ * in the encoder chain
* @bridge: bridge control structure
- * @state: atomic state being committed
+ * @old_state: old atomic state
*
* Calls &drm_bridge_funcs.atomic_post_disable (falls back on
* &drm_bridge_funcs.post_disable) op for all the bridges in the encoder chain,
@@ -392,26 +433,29 @@ EXPORT_SYMBOL(drm_atomic_bridge_disable);
*
* Note: the bridge passed should be the one closest to the encoder
*/
-void drm_atomic_bridge_post_disable(struct drm_bridge *bridge,
- struct drm_atomic_state *state)
+void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *old_state)
{
+ struct drm_encoder *encoder;
+
if (!bridge)
return;
- if (bridge->funcs->atomic_post_disable)
- bridge->funcs->atomic_post_disable(bridge, state);
- else if (bridge->funcs->post_disable)
- bridge->funcs->post_disable(bridge);
-
- drm_atomic_bridge_post_disable(bridge->next, state);
+ encoder = bridge->encoder;
+ list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
+ if (bridge->funcs->atomic_post_disable)
+ bridge->funcs->atomic_post_disable(bridge, old_state);
+ else if (bridge->funcs->post_disable)
+ bridge->funcs->post_disable(bridge);
+ }
}
-EXPORT_SYMBOL(drm_atomic_bridge_post_disable);
+EXPORT_SYMBOL(drm_atomic_bridge_chain_post_disable);
/**
- * drm_atomic_bridge_pre_enable - prepares for enabling all bridges in the
- * encoder chain
+ * drm_atomic_bridge_chain_pre_enable - prepares for enabling all bridges in
+ * the encoder chain
* @bridge: bridge control structure
- * @state: atomic state being committed
+ * @old_state: old atomic state
*
* Calls &drm_bridge_funcs.atomic_pre_enable (falls back on
* &drm_bridge_funcs.pre_enable) op for all the bridges in the encoder chain,
@@ -420,25 +464,32 @@ EXPORT_SYMBOL(drm_atomic_bridge_post_disable);
*
* Note: the bridge passed should be the one closest to the encoder
*/
-void drm_atomic_bridge_pre_enable(struct drm_bridge *bridge,
- struct drm_atomic_state *state)
+void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *old_state)
{
+ struct drm_encoder *encoder;
+ struct drm_bridge *iter;
+
if (!bridge)
return;
- drm_atomic_bridge_pre_enable(bridge->next, state);
+ encoder = bridge->encoder;
+ list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
+ if (iter->funcs->atomic_pre_enable)
+ iter->funcs->atomic_pre_enable(iter, old_state);
+ else if (iter->funcs->pre_enable)
+ iter->funcs->pre_enable(iter);
- if (bridge->funcs->atomic_pre_enable)
- bridge->funcs->atomic_pre_enable(bridge, state);
- else if (bridge->funcs->pre_enable)
- bridge->funcs->pre_enable(bridge);
+ if (iter == bridge)
+ break;
+ }
}
-EXPORT_SYMBOL(drm_atomic_bridge_pre_enable);
+EXPORT_SYMBOL(drm_atomic_bridge_chain_pre_enable);
/**
- * drm_atomic_bridge_enable - enables all bridges in the encoder chain
+ * drm_atomic_bridge_chain_enable - enables all bridges in the encoder chain
* @bridge: bridge control structure
- * @state: atomic state being committed
+ * @old_state: old atomic state
*
* Calls &drm_bridge_funcs.atomic_enable (falls back on
* &drm_bridge_funcs.enable) op for all the bridges in the encoder chain,
@@ -447,20 +498,23 @@ EXPORT_SYMBOL(drm_atomic_bridge_pre_enable);
*
* Note: the bridge passed should be the one closest to the encoder
*/
-void drm_atomic_bridge_enable(struct drm_bridge *bridge,
- struct drm_atomic_state *state)
+void drm_atomic_bridge_chain_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *old_state)
{
+ struct drm_encoder *encoder;
+
if (!bridge)
return;
- if (bridge->funcs->atomic_enable)
- bridge->funcs->atomic_enable(bridge, state);
- else if (bridge->funcs->enable)
- bridge->funcs->enable(bridge);
-
- drm_atomic_bridge_enable(bridge->next, state);
+ encoder = bridge->encoder;
+ list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
+ if (bridge->funcs->atomic_enable)
+ bridge->funcs->atomic_enable(bridge, old_state);
+ else if (bridge->funcs->enable)
+ bridge->funcs->enable(bridge);
+ }
}
-EXPORT_SYMBOL(drm_atomic_bridge_enable);
+EXPORT_SYMBOL(drm_atomic_bridge_chain_enable);
#ifdef CONFIG_OF
/**
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index 3bd76e918b5d..03e01b000f7a 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -62,10 +62,10 @@ static void drm_cache_flush_clflush(struct page *pages[],
{
unsigned long i;
- mb();
+ mb(); /*Full memory barrier used before so that CLFLUSH is ordered*/
for (i = 0; i < num_pages; i++)
drm_clflush_page(*pages++);
- mb();
+ mb(); /*Also used after CLFLUSH so that all cache is flushed*/
}
#endif
@@ -92,6 +92,7 @@ drm_clflush_pages(struct page *pages[], unsigned long num_pages)
#elif defined(__powerpc__)
unsigned long i;
+
for (i = 0; i < num_pages; i++) {
struct page *page = pages[i];
void *page_virtual;
@@ -125,10 +126,10 @@ drm_clflush_sg(struct sg_table *st)
if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
struct sg_page_iter sg_iter;
- mb();
+ mb(); /*CLFLUSH is ordered only by using memory barriers*/
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
drm_clflush_page(sg_page_iter_page(&sg_iter));
- mb();
+ mb(); /*Make sure that all cache line entry is flushed*/
return;
}
@@ -157,12 +158,13 @@ drm_clflush_virt_range(void *addr, unsigned long length)
if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
const int size = boot_cpu_data.x86_clflush_size;
void *end = addr + length;
+
addr = (void *)(((unsigned long)addr) & -size);
- mb();
+ mb(); /*CLFLUSH is only ordered with a full memory barrier*/
for (; addr < end; addr += size)
clflushopt(addr);
clflushopt(end - 1); /* force serialisation */
- mb();
+ mb(); /*Ensure that evry data cache line entry is flushed*/
return;
}
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
index e1dafb0cc5e2..b031b45aa8ef 100644
--- a/drivers/gpu/drm/drm_client.c
+++ b/drivers/gpu/drm/drm_client.c
@@ -59,7 +59,6 @@ static void drm_client_close(struct drm_client_dev *client)
drm_file_free(client->file);
}
-EXPORT_SYMBOL(drm_client_close);
/**
* drm_client_init - Initialise a DRM client
@@ -151,7 +150,7 @@ void drm_client_release(struct drm_client_dev *client)
{
struct drm_device *dev = client->dev;
- DRM_DEV_DEBUG_KMS(dev->dev, "%s\n", client->name);
+ drm_dbg_kms(dev, "%s\n", client->name);
drm_client_modeset_free(client);
drm_client_close(client);
@@ -204,7 +203,7 @@ void drm_client_dev_hotplug(struct drm_device *dev)
continue;
ret = client->funcs->hotplug(client);
- DRM_DEV_DEBUG_KMS(dev->dev, "%s: ret=%d\n", client->name, ret);
+ drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret);
}
mutex_unlock(&dev->clientlist_mutex);
}
@@ -224,7 +223,7 @@ void drm_client_dev_restore(struct drm_device *dev)
continue;
ret = client->funcs->restore(client);
- DRM_DEV_DEBUG_KMS(dev->dev, "%s: ret=%d\n", client->name, ret);
+ drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret);
if (!ret) /* The first one to return zero gets the privilege to restore */
break;
}
@@ -352,8 +351,8 @@ static void drm_client_buffer_rmfb(struct drm_client_buffer *buffer)
ret = drm_mode_rmfb(buffer->client->dev, buffer->fb->base.id, buffer->client->file);
if (ret)
- DRM_DEV_ERROR(buffer->client->dev->dev,
- "Error removing FB:%u (%d)\n", buffer->fb->base.id, ret);
+ drm_err(buffer->client->dev,
+ "Error removing FB:%u (%d)\n", buffer->fb->base.id, ret);
buffer->fb = NULL;
}
diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
index c8922b7cac09..6d4a29e99ae2 100644
--- a/drivers/gpu/drm/drm_client_modeset.c
+++ b/drivers/gpu/drm/drm_client_modeset.c
@@ -115,6 +115,33 @@ drm_client_find_modeset(struct drm_client_dev *client, struct drm_crtc *crtc)
}
static struct drm_display_mode *
+drm_connector_get_tiled_mode(struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+
+ list_for_each_entry(mode, &connector->modes, head) {
+ if (mode->hdisplay == connector->tile_h_size &&
+ mode->vdisplay == connector->tile_v_size)
+ return mode;
+ }
+ return NULL;
+}
+
+static struct drm_display_mode *
+drm_connector_fallback_non_tiled_mode(struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+
+ list_for_each_entry(mode, &connector->modes, head) {
+ if (mode->hdisplay == connector->tile_h_size &&
+ mode->vdisplay == connector->tile_v_size)
+ continue;
+ return mode;
+ }
+ return NULL;
+}
+
+static struct drm_display_mode *
drm_connector_has_preferred_mode(struct drm_connector *connector, int width, int height)
{
struct drm_display_mode *mode;
@@ -348,8 +375,15 @@ static bool drm_client_target_preferred(struct drm_connector **connectors,
struct drm_connector *connector;
u64 conn_configured = 0;
int tile_pass = 0;
+ int num_tiled_conns = 0;
int i;
+ for (i = 0; i < connector_count; i++) {
+ if (connectors[i]->has_tile &&
+ connectors[i]->status == connector_status_connected)
+ num_tiled_conns++;
+ }
+
retry:
for (i = 0; i < connector_count; i++) {
connector = connectors[i];
@@ -399,6 +433,28 @@ retry:
list_for_each_entry(modes[i], &connector->modes, head)
break;
}
+ /*
+ * In case of tiled mode if all tiles not present fallback to
+ * first available non tiled mode.
+ * After all tiles are present, try to find the tiled mode
+ * for all and if tiled mode not present due to fbcon size
+ * limitations, use first non tiled mode only for
+ * tile 0,0 and set to no mode for all other tiles.
+ */
+ if (connector->has_tile) {
+ if (num_tiled_conns <
+ connector->num_h_tile * connector->num_v_tile ||
+ (connector->tile_h_loc == 0 &&
+ connector->tile_v_loc == 0 &&
+ !drm_connector_get_tiled_mode(connector))) {
+ DRM_DEBUG_KMS("Falling back to non tiled mode on Connector %d\n",
+ connector->base.id);
+ modes[i] = drm_connector_fallback_non_tiled_mode(connector);
+ } else {
+ modes[i] = drm_connector_get_tiled_mode(connector);
+ }
+ }
+
DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name :
"none");
conn_configured |= BIT_ULL(i);
@@ -415,9 +471,8 @@ static bool connector_has_possible_crtc(struct drm_connector *connector,
struct drm_crtc *crtc)
{
struct drm_encoder *encoder;
- int i;
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
if (encoder->possible_crtcs & drm_crtc_mask(crtc))
return true;
}
@@ -516,6 +571,7 @@ static bool drm_client_firmware_config(struct drm_client_dev *client,
bool fallback = true, ret = true;
int num_connectors_enabled = 0;
int num_connectors_detected = 0;
+ int num_tiled_conns = 0;
struct drm_modeset_acquire_ctx ctx;
if (!drm_drv_uses_atomic_modeset(dev))
@@ -533,6 +589,11 @@ static bool drm_client_firmware_config(struct drm_client_dev *client,
memcpy(save_enabled, enabled, count);
mask = GENMASK(count - 1, 0);
conn_configured = 0;
+ for (i = 0; i < count; i++) {
+ if (connectors[i]->has_tile &&
+ connectors[i]->status == connector_status_connected)
+ num_tiled_conns++;
+ }
retry:
conn_seq = conn_configured;
for (i = 0; i < count; i++) {
@@ -632,6 +693,16 @@ retry:
connector->name);
modes[i] = &connector->state->crtc->mode;
}
+ /*
+ * In case of tiled modes, if all tiles are not present
+ * then fallback to a non tiled mode.
+ */
+ if (connector->has_tile &&
+ num_tiled_conns < connector->num_h_tile * connector->num_v_tile) {
+ DRM_DEBUG_KMS("Falling back to non tiled mode on Connector %d\n",
+ connector->base.id);
+ modes[i] = drm_connector_fallback_non_tiled_mode(connector);
+ }
crtcs[i] = new_crtc;
DRM_DEBUG_KMS("connector %s on [CRTC:%d:%s]: %dx%d%s\n",
diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c
index 4ce5c6d8de99..c93123ff7c21 100644
--- a/drivers/gpu/drm/drm_color_mgmt.c
+++ b/drivers/gpu/drm/drm_color_mgmt.c
@@ -109,28 +109,38 @@
*/
/**
- * drm_color_lut_extract - clamp and round LUT entries
+ * drm_color_ctm_s31_32_to_qm_n
+ *
* @user_input: input value
- * @bit_precision: number of bits the hw LUT supports
+ * @m: number of integer bits, only support m <= 32, include the sign-bit
+ * @n: number of fractional bits, only support n <= 32
+ *
+ * Convert and clamp S31.32 sign-magnitude to Qm.n (signed 2's complement).
+ * The sign-bit BIT(m+n-1) and above are 0 for positive value and 1 for negative
+ * the range of value is [-2^(m-1), 2^(m-1) - 2^-n]
+ *
+ * For example
+ * A Q3.12 format number:
+ * - required bit: 3 + 12 = 15bits
+ * - range: [-2^2, 2^2 - 2^â’15]
*
- * Extract a degamma/gamma LUT value provided by user (in the form of
- * &drm_color_lut entries) and round it to the precision supported by the
- * hardware.
+ * NOTE: the m can be zero if all bit_precision are used to present fractional
+ * bits like Q0.32
*/
-uint32_t drm_color_lut_extract(uint32_t user_input, uint32_t bit_precision)
+u64 drm_color_ctm_s31_32_to_qm_n(u64 user_input, u32 m, u32 n)
{
- uint32_t val = user_input;
- uint32_t max = 0xffff >> (16 - bit_precision);
+ u64 mag = (user_input & ~BIT_ULL(63)) >> (32 - n);
+ bool negative = !!(user_input & BIT_ULL(63));
+ s64 val;
- /* Round only if we're not using full precision. */
- if (bit_precision < 16) {
- val += 1UL << (16 - bit_precision - 1);
- val >>= 16 - bit_precision;
- }
+ WARN_ON(m > 32 || n > 32);
+
+ val = clamp_val(mag, 0, negative ?
+ BIT_ULL(n + m - 1) : BIT_ULL(n + m - 1) - 1);
- return clamp_val(val, 0, max);
+ return negative ? -val : val;
}
-EXPORT_SYMBOL(drm_color_lut_extract);
+EXPORT_SYMBOL(drm_color_ctm_s31_32_to_qm_n);
/**
* drm_crtc_enable_color_mgmt - enable color management properties
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index b3f2cf7eae9c..2166000ed057 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -92,6 +92,7 @@ static struct drm_conn_prop_enum_list drm_connector_enum_list[] = {
{ DRM_MODE_CONNECTOR_DSI, "DSI" },
{ DRM_MODE_CONNECTOR_DPI, "DPI" },
{ DRM_MODE_CONNECTOR_WRITEBACK, "Writeback" },
+ { DRM_MODE_CONNECTOR_SPI, "SPI" },
};
void drm_connector_ida_init(void)
@@ -140,8 +141,7 @@ static void drm_connector_get_cmdline_mode(struct drm_connector *connector)
}
DRM_DEBUG_KMS("cmdline mode for connector %s %s %dx%d@%dHz%s%s%s\n",
- connector->name,
- mode->name,
+ connector->name, mode->name,
mode->xres, mode->yres,
mode->refresh_specified ? mode->refresh : 60,
mode->rb ? " reduced blanking" : "",
@@ -298,6 +298,41 @@ out_put:
EXPORT_SYMBOL(drm_connector_init);
/**
+ * drm_connector_init_with_ddc - Init a preallocated connector
+ * @dev: DRM device
+ * @connector: the connector to init
+ * @funcs: callbacks for this connector
+ * @connector_type: user visible type of the connector
+ * @ddc: pointer to the associated ddc adapter
+ *
+ * Initialises a preallocated connector. Connectors should be
+ * subclassed as part of driver connector objects.
+ *
+ * Ensures that the ddc field of the connector is correctly set.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_connector_init_with_ddc(struct drm_device *dev,
+ struct drm_connector *connector,
+ const struct drm_connector_funcs *funcs,
+ int connector_type,
+ struct i2c_adapter *ddc)
+{
+ int ret;
+
+ ret = drm_connector_init(dev, connector, funcs, connector_type);
+ if (ret)
+ return ret;
+
+ /* provide ddc symlink in sysfs */
+ connector->ddc = ddc;
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_connector_init_with_ddc);
+
+/**
* drm_connector_attach_edid_property - attach edid property.
* @connector: the connector
*
@@ -330,8 +365,6 @@ EXPORT_SYMBOL(drm_connector_attach_edid_property);
int drm_connector_attach_encoder(struct drm_connector *connector,
struct drm_encoder *encoder)
{
- int i;
-
/*
* In the past, drivers have attempted to model the static association
* of connector to encoder in simple connector/encoder devices using a
@@ -346,18 +379,15 @@ int drm_connector_attach_encoder(struct drm_connector *connector,
if (WARN_ON(connector->encoder))
return -EINVAL;
- for (i = 0; i < ARRAY_SIZE(connector->encoder_ids); i++) {
- if (connector->encoder_ids[i] == 0) {
- connector->encoder_ids[i] = encoder->base.id;
- return 0;
- }
- }
- return -ENOMEM;
+ connector->possible_encoders |= drm_encoder_mask(encoder);
+
+ return 0;
}
EXPORT_SYMBOL(drm_connector_attach_encoder);
/**
- * drm_connector_has_possible_encoder - check if the connector and encoder are assosicated with each other
+ * drm_connector_has_possible_encoder - check if the connector and encoder are
+ * associated with each other
* @connector: the connector
* @encoder: the encoder
*
@@ -367,15 +397,7 @@ EXPORT_SYMBOL(drm_connector_attach_encoder);
bool drm_connector_has_possible_encoder(struct drm_connector *connector,
struct drm_encoder *encoder)
{
- struct drm_encoder *enc;
- int i;
-
- drm_connector_for_each_possible_encoder(connector, enc, i) {
- if (enc == encoder)
- return true;
- }
-
- return false;
+ return connector->possible_encoders & drm_encoder_mask(encoder);
}
EXPORT_SYMBOL(drm_connector_has_possible_encoder);
@@ -445,7 +467,10 @@ EXPORT_SYMBOL(drm_connector_cleanup);
* drm_connector_register - register a connector
* @connector: the connector to register
*
- * Register userspace interfaces for a connector
+ * Register userspace interfaces for a connector. Only call this for connectors
+ * which can be hotplugged after drm_dev_register() has been called already,
+ * e.g. DP MST connectors. All other connectors will be registered automatically
+ * when calling drm_dev_register().
*
* Returns:
* Zero on success, error code on failure.
@@ -491,7 +516,10 @@ EXPORT_SYMBOL(drm_connector_register);
* drm_connector_unregister - unregister a connector
* @connector: the connector to unregister
*
- * Unregister userspace interfaces for a connector
+ * Unregister userspace interfaces for a connector. Only call this for
+ * connectors which have registered explicitly by calling drm_dev_register(),
+ * since connectors are unregistered automatically when drm_dev_unregister() is
+ * called.
*/
void drm_connector_unregister(struct drm_connector *connector)
{
@@ -684,7 +712,7 @@ void drm_connector_list_iter_end(struct drm_connector_list_iter *iter)
__drm_connector_put_safe(iter->conn);
spin_unlock_irqrestore(&config->connector_list_lock, flags);
}
- lock_release(&connector_list_iter_dep_map, 0, _RET_IP_);
+ lock_release(&connector_list_iter_dep_map, _RET_IP_);
}
EXPORT_SYMBOL(drm_connector_list_iter_end);
@@ -847,6 +875,38 @@ static const struct drm_prop_enum_list hdmi_colorspaces[] = {
{ DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER, "DCI-P3_RGB_Theater" },
};
+/*
+ * As per DP 1.4a spec, 2.2.5.7.5 VSC SDP Payload for Pixel Encoding/Colorimetry
+ * Format Table 2-120
+ */
+static const struct drm_prop_enum_list dp_colorspaces[] = {
+ /* For Default case, driver will set the colorspace */
+ { DRM_MODE_COLORIMETRY_DEFAULT, "Default" },
+ { DRM_MODE_COLORIMETRY_RGB_WIDE_FIXED, "RGB_Wide_Gamut_Fixed_Point" },
+ /* Colorimetry based on scRGB (IEC 61966-2-2) */
+ { DRM_MODE_COLORIMETRY_RGB_WIDE_FLOAT, "RGB_Wide_Gamut_Floating_Point" },
+ /* Colorimetry based on IEC 61966-2-5 */
+ { DRM_MODE_COLORIMETRY_OPRGB, "opRGB" },
+ /* Colorimetry based on SMPTE RP 431-2 */
+ { DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65, "DCI-P3_RGB_D65" },
+ /* Colorimetry based on ITU-R BT.2020 */
+ { DRM_MODE_COLORIMETRY_BT2020_RGB, "BT2020_RGB" },
+ { DRM_MODE_COLORIMETRY_BT601_YCC, "BT601_YCC" },
+ { DRM_MODE_COLORIMETRY_BT709_YCC, "BT709_YCC" },
+ /* Standard Definition Colorimetry based on IEC 61966-2-4 */
+ { DRM_MODE_COLORIMETRY_XVYCC_601, "XVYCC_601" },
+ /* High Definition Colorimetry based on IEC 61966-2-4 */
+ { DRM_MODE_COLORIMETRY_XVYCC_709, "XVYCC_709" },
+ /* Colorimetry based on IEC 61966-2-1/Amendment 1 */
+ { DRM_MODE_COLORIMETRY_SYCC_601, "SYCC_601" },
+ /* Colorimetry based on IEC 61966-2-5 [33] */
+ { DRM_MODE_COLORIMETRY_OPYCC_601, "opYCC_601" },
+ /* Colorimetry based on ITU-R BT.2020 */
+ { DRM_MODE_COLORIMETRY_BT2020_CYCC, "BT2020_CYCC" },
+ /* Colorimetry based on ITU-R BT.2020 */
+ { DRM_MODE_COLORIMETRY_BT2020_YCC, "BT2020_YCC" },
+};
+
/**
* DOC: standard connector properties
*
@@ -948,10 +1008,72 @@ static const struct drm_prop_enum_list hdmi_colorspaces[] = {
* - If the state is DESIRED, kernel should attempt to re-authenticate the
* link whenever possible. This includes across disable/enable, dpms,
* hotplug, downstream device changes, link status failures, etc..
- * - Userspace is responsible for polling the property to determine when
- * the value transitions from ENABLED to DESIRED. This signifies the link
- * is no longer protected and userspace should take appropriate action
- * (whatever that might be).
+ * - Kernel sends uevent with the connector id and property id through
+ * @drm_hdcp_update_content_protection, upon below kernel triggered
+ * scenarios:
+ *
+ * - DESIRED -> ENABLED (authentication success)
+ * - ENABLED -> DESIRED (termination of authentication)
+ * - Please note no uevents for userspace triggered property state changes,
+ * which can't fail such as
+ *
+ * - DESIRED/ENABLED -> UNDESIRED
+ * - UNDESIRED -> DESIRED
+ * - Userspace is responsible for polling the property or listen to uevents
+ * to determine when the value transitions from ENABLED to DESIRED.
+ * This signifies the link is no longer protected and userspace should
+ * take appropriate action (whatever that might be).
+ *
+ * HDCP Content Type:
+ * This Enum property is used by the userspace to declare the content type
+ * of the display stream, to kernel. Here display stream stands for any
+ * display content that userspace intended to display through HDCP
+ * encryption.
+ *
+ * Content Type of a stream is decided by the owner of the stream, as
+ * "HDCP Type0" or "HDCP Type1".
+ *
+ * The value of the property can be one of the below:
+ * - "HDCP Type0": DRM_MODE_HDCP_CONTENT_TYPE0 = 0
+ * - "HDCP Type1": DRM_MODE_HDCP_CONTENT_TYPE1 = 1
+ *
+ * When kernel starts the HDCP authentication (see "Content Protection"
+ * for details), it uses the content type in "HDCP Content Type"
+ * for performing the HDCP authentication with the display sink.
+ *
+ * Please note in HDCP spec versions, a link can be authenticated with
+ * HDCP 2.2 for Content Type 0/Content Type 1. Where as a link can be
+ * authenticated with HDCP1.4 only for Content Type 0(though it is implicit
+ * in nature. As there is no reference for Content Type in HDCP1.4).
+ *
+ * HDCP2.2 authentication protocol itself takes the "Content Type" as a
+ * parameter, which is a input for the DP HDCP2.2 encryption algo.
+ *
+ * In case of Type 0 content protection request, kernel driver can choose
+ * either of HDCP spec versions 1.4 and 2.2. When HDCP2.2 is used for
+ * "HDCP Type 0", a HDCP 2.2 capable repeater in the downstream can send
+ * that content to a HDCP 1.4 authenticated HDCP sink (Type0 link).
+ * But if the content is classified as "HDCP Type 1", above mentioned
+ * HDCP 2.2 repeater wont send the content to the HDCP sink as it can't
+ * authenticate the HDCP1.4 capable sink for "HDCP Type 1".
+ *
+ * Please note userspace can be ignorant of the HDCP versions used by the
+ * kernel driver to achieve the "HDCP Content Type".
+ *
+ * At current scenario, classifying a content as Type 1 ensures that the
+ * content will be displayed only through the HDCP2.2 encrypted link.
+ *
+ * Note that the HDCP Content Type property is introduced at HDCP 2.2, and
+ * defaults to type 0. It is only exposed by drivers supporting HDCP 2.2
+ * (hence supporting Type 0 and Type 1). Based on how next versions of
+ * HDCP specs are defined content Type could be used for higher versions
+ * too.
+ *
+ * If content type is changed when "Content Protection" is not UNDESIRED,
+ * then kernel will disable the HDCP and re-enable with new type in the
+ * same atomic commit. And when "Content Protection" is ENABLED, it means
+ * that link is HDCP authenticated and encrypted, for the transmission of
+ * the Type of stream mentioned at "HDCP Content Type".
*
* HDR_OUTPUT_METADATA:
* Connector property to enable userspace to send HDR Metadata to
@@ -1577,7 +1699,6 @@ EXPORT_SYMBOL(drm_mode_create_aspect_ratio_property);
* DOC: standard connector properties
*
* Colorspace:
- * drm_mode_create_colorspace_property - create colorspace property
* This property helps select a suitable colorspace based on the sink
* capability. Modern sink devices support wider gamut like BT2020.
* This helps switch to BT2020 mode if the BT2020 encoded video stream
@@ -1597,32 +1718,68 @@ EXPORT_SYMBOL(drm_mode_create_aspect_ratio_property);
* - This property is just to inform sink what colorspace
* source is trying to drive.
*
+ * Because between HDMI and DP have different colorspaces,
+ * drm_mode_create_hdmi_colorspace_property() is used for HDMI connector and
+ * drm_mode_create_dp_colorspace_property() is used for DP connector.
+ */
+
+/**
+ * drm_mode_create_hdmi_colorspace_property - create hdmi colorspace property
+ * @connector: connector to create the Colorspace property on.
+ *
* Called by a driver the first time it's needed, must be attached to desired
- * connectors.
+ * HDMI connectors.
+ *
+ * Returns:
+ * Zero on success, negative errono on failure.
*/
-int drm_mode_create_colorspace_property(struct drm_connector *connector)
+int drm_mode_create_hdmi_colorspace_property(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct drm_property *prop;
- if (connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
- connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
- prop = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM,
- "Colorspace",
- hdmi_colorspaces,
- ARRAY_SIZE(hdmi_colorspaces));
- if (!prop)
- return -ENOMEM;
- } else {
- DRM_DEBUG_KMS("Colorspace property not supported\n");
+ if (connector->colorspace_property)
return 0;
- }
- connector->colorspace_property = prop;
+ connector->colorspace_property =
+ drm_property_create_enum(dev, DRM_MODE_PROP_ENUM, "Colorspace",
+ hdmi_colorspaces,
+ ARRAY_SIZE(hdmi_colorspaces));
+
+ if (!connector->colorspace_property)
+ return -ENOMEM;
return 0;
}
-EXPORT_SYMBOL(drm_mode_create_colorspace_property);
+EXPORT_SYMBOL(drm_mode_create_hdmi_colorspace_property);
+
+/**
+ * drm_mode_create_dp_colorspace_property - create dp colorspace property
+ * @connector: connector to create the Colorspace property on.
+ *
+ * Called by a driver the first time it's needed, must be attached to desired
+ * DP connectors.
+ *
+ * Returns:
+ * Zero on success, negative errono on failure.
+ */
+int drm_mode_create_dp_colorspace_property(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+
+ if (connector->colorspace_property)
+ return 0;
+
+ connector->colorspace_property =
+ drm_property_create_enum(dev, DRM_MODE_PROP_ENUM, "Colorspace",
+ dp_colorspaces,
+ ARRAY_SIZE(dp_colorspaces));
+
+ if (!connector->colorspace_property)
+ return -ENOMEM;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_dp_colorspace_property);
/**
* drm_mode_create_content_type_property - create content type property
@@ -2024,7 +2181,6 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
int encoders_count = 0;
int ret = 0;
int copied = 0;
- int i;
struct drm_mode_modeinfo u_mode;
struct drm_mode_modeinfo __user *mode_ptr;
uint32_t __user *encoder_ptr;
@@ -2039,14 +2195,13 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
if (!connector)
return -ENOENT;
- drm_connector_for_each_possible_encoder(connector, encoder, i)
- encoders_count++;
+ encoders_count = hweight32(connector->possible_encoders);
if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
copied = 0;
encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
if (put_user(encoder->base.id, encoder_ptr + copied)) {
ret = -EFAULT;
goto out;
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 6dd49a60deac..93a4eec429e8 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -36,6 +36,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
@@ -47,6 +48,8 @@
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
+#include "drm_crtc_helper_internal.h"
+
/**
* DOC: overview
*
@@ -159,14 +162,10 @@ drm_encoder_disable(struct drm_encoder *encoder)
if (!encoder_funcs)
return;
- drm_bridge_disable(encoder->bridge);
-
if (encoder_funcs->disable)
(*encoder_funcs->disable)(encoder);
else if (encoder_funcs->dpms)
(*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF);
-
- drm_bridge_post_disable(encoder->bridge);
}
static void __drm_helper_disable_unused_functions(struct drm_device *dev)
@@ -326,13 +325,6 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
if (!encoder_funcs)
continue;
- ret = drm_bridge_mode_fixup(encoder->bridge,
- mode, adjusted_mode);
- if (!ret) {
- DRM_DEBUG_KMS("Bridge fixup failed\n");
- goto done;
- }
-
encoder_funcs = encoder->helper_private;
if (encoder_funcs->mode_fixup) {
if (!(ret = encoder_funcs->mode_fixup(encoder, mode,
@@ -364,13 +356,9 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
if (!encoder_funcs)
continue;
- drm_bridge_disable(encoder->bridge);
-
/* Disable the encoders as the first thing we do. */
if (encoder_funcs->prepare)
encoder_funcs->prepare(encoder);
-
- drm_bridge_post_disable(encoder->bridge);
}
drm_crtc_prepare_encoders(dev);
@@ -397,8 +385,6 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
encoder->base.id, encoder->name, mode->name);
if (encoder_funcs->mode_set)
encoder_funcs->mode_set(encoder, mode, adjusted_mode);
-
- drm_bridge_mode_set(encoder->bridge, mode, adjusted_mode);
}
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
@@ -413,12 +399,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
if (!encoder_funcs)
continue;
- drm_bridge_pre_enable(encoder->bridge);
-
if (encoder_funcs->commit)
encoder_funcs->commit(encoder);
-
- drm_bridge_enable(encoder->bridge);
}
/* Calculate and store various constants which
@@ -480,6 +462,22 @@ drm_crtc_helper_disable(struct drm_crtc *crtc)
__drm_helper_disable_unused_functions(dev);
}
+/*
+ * For connectors that support multiple encoders, either the
+ * .atomic_best_encoder() or .best_encoder() operation must be implemented.
+ */
+struct drm_encoder *
+drm_connector_get_single_encoder(struct drm_connector *connector)
+{
+ struct drm_encoder *encoder;
+
+ WARN_ON(hweight32(connector->possible_encoders) > 1);
+ drm_connector_for_each_possible_encoder(connector, encoder)
+ return encoder;
+
+ return NULL;
+}
+
/**
* drm_crtc_helper_set_config - set a new config from userspace
* @set: mode set configuration
@@ -645,7 +643,11 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set,
new_encoder = connector->encoder;
for (ro = 0; ro < set->num_connectors; ro++) {
if (set->connectors[ro] == connector) {
- new_encoder = connector_funcs->best_encoder(connector);
+ if (connector_funcs->best_encoder)
+ new_encoder = connector_funcs->best_encoder(connector);
+ else
+ new_encoder = drm_connector_get_single_encoder(connector);
+
/* if we can't get an encoder for a connector
we are setting now - then fail */
if (new_encoder == NULL)
@@ -817,25 +819,14 @@ static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder)
/* Helper which handles bridge ordering around encoder dpms */
static void drm_helper_encoder_dpms(struct drm_encoder *encoder, int mode)
{
- struct drm_bridge *bridge = encoder->bridge;
const struct drm_encoder_helper_funcs *encoder_funcs;
encoder_funcs = encoder->helper_private;
if (!encoder_funcs)
return;
- if (mode == DRM_MODE_DPMS_ON)
- drm_bridge_pre_enable(bridge);
- else
- drm_bridge_disable(bridge);
-
if (encoder_funcs->dpms)
encoder_funcs->dpms(encoder, mode);
-
- if (mode == DRM_MODE_DPMS_ON)
- drm_bridge_enable(bridge);
- else
- drm_bridge_post_disable(bridge);
}
static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/drm_crtc_helper_internal.h b/drivers/gpu/drm/drm_crtc_helper_internal.h
index b5ac1581e623..f0a66ef47e5a 100644
--- a/drivers/gpu/drm/drm_crtc_helper_internal.h
+++ b/drivers/gpu/drm/drm_crtc_helper_internal.h
@@ -75,3 +75,6 @@ enum drm_mode_status drm_encoder_mode_valid(struct drm_encoder *encoder,
const struct drm_display_mode *mode);
enum drm_mode_status drm_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode);
+
+struct drm_encoder *
+drm_connector_get_single_encoder(struct drm_connector *connector);
diff --git a/drivers/gpu/drm/drm_damage_helper.c b/drivers/gpu/drm/drm_damage_helper.c
index 8230dac01a89..3a4126dc2520 100644
--- a/drivers/gpu/drm/drm_damage_helper.c
+++ b/drivers/gpu/drm/drm_damage_helper.c
@@ -212,8 +212,14 @@ retry:
drm_for_each_plane(plane, fb->dev) {
struct drm_plane_state *plane_state;
- if (plane->state->fb != fb)
+ ret = drm_modeset_lock(&plane->mutex, state->acquire_ctx);
+ if (ret)
+ goto out;
+
+ if (plane->state->fb != fb) {
+ drm_modeset_unlock(&plane->mutex);
continue;
+ }
plane_state = drm_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state)) {
diff --git a/drivers/gpu/drm/drm_debugfs_crc.c b/drivers/gpu/drm/drm_debugfs_crc.c
index 7ca486d750e9..e22b812c4b80 100644
--- a/drivers/gpu/drm/drm_debugfs_crc.c
+++ b/drivers/gpu/drm/drm_debugfs_crc.c
@@ -66,9 +66,18 @@
* the reported CRCs of frames that should have the same contents.
*
* On the driver side the implementation effort is minimal, drivers only need to
- * implement &drm_crtc_funcs.set_crc_source. The debugfs files are automatically
- * set up if that vfunc is set. CRC samples need to be captured in the driver by
- * calling drm_crtc_add_crc_entry().
+ * implement &drm_crtc_funcs.set_crc_source and &drm_crtc_funcs.verify_crc_source.
+ * The debugfs files are automatically set up if those vfuncs are set. CRC samples
+ * need to be captured in the driver by calling drm_crtc_add_crc_entry().
+ * Depending on the driver and HW requirements, &drm_crtc_funcs.set_crc_source
+ * may result in a commit (even a full modeset).
+ *
+ * CRC results must be reliable across non-full-modeset atomic commits, so if a
+ * commit via DRM_IOCTL_MODE_ATOMIC would disable or otherwise interfere with
+ * CRC generation, then the driver must mark that commit as a full modeset
+ * (drm_atomic_crtc_needs_modeset() should return true). As a result, to ensure
+ * consistent results, generic userspace must re-setup CRC generation after a
+ * legacy SETCRTC or an atomic commit with DRM_MODE_ATOMIC_ALLOW_MODESET.
*/
static int crc_control_show(struct seq_file *m, void *data)
@@ -131,8 +140,8 @@ static ssize_t crc_control_write(struct file *file, const char __user *ubuf,
if (IS_ERR(source))
return PTR_ERR(source);
- if (source[len] == '\n')
- source[len] = '\0';
+ if (source[len - 1] == '\n')
+ source[len - 1] = '\0';
ret = crtc->funcs->verify_crc_source(crtc, source, &values_cnt);
if (ret)
@@ -249,6 +258,11 @@ static int crtc_crc_release(struct inode *inode, struct file *filep)
struct drm_crtc *crtc = filep->f_inode->i_private;
struct drm_crtc_crc *crc = &crtc->crc;
+ /* terminate the infinite while loop if 'drm_dp_aux_crc_work' running */
+ spin_lock_irq(&crc->lock);
+ crc->opened = false;
+ spin_unlock_irq(&crc->lock);
+
crtc->funcs->set_crc_source(crtc, NULL);
spin_lock_irq(&crc->lock);
@@ -325,19 +339,17 @@ static ssize_t crtc_crc_read(struct file *filep, char __user *user_buf,
return LINE_LEN(crc->values_cnt);
}
-static unsigned int crtc_crc_poll(struct file *file, poll_table *wait)
+static __poll_t crtc_crc_poll(struct file *file, poll_table *wait)
{
struct drm_crtc *crtc = file->f_inode->i_private;
struct drm_crtc_crc *crc = &crtc->crc;
- unsigned ret;
+ __poll_t ret = 0;
poll_wait(file, &crc->wq, wait);
spin_lock_irq(&crc->lock);
if (crc->source && crtc_crc_data_count(crc))
- ret = POLLIN | POLLRDNORM;
- else
- ret = 0;
+ ret |= EPOLLIN | EPOLLRDNORM;
spin_unlock_irq(&crc->lock);
return ret;
diff --git a/drivers/gpu/drm/drm_dma.c b/drivers/gpu/drm/drm_dma.c
index 5ef0227eaa0e..e45b07890c5a 100644
--- a/drivers/gpu/drm/drm_dma.c
+++ b/drivers/gpu/drm/drm_dma.c
@@ -1,4 +1,4 @@
-/**
+/*
* \file drm_dma.c
* DMA IOCTL and function support
*
diff --git a/drivers/gpu/drm/drm_dp_aux_dev.c b/drivers/gpu/drm/drm_dp_aux_dev.c
index 5be28e3295f3..2510717d5a08 100644
--- a/drivers/gpu/drm/drm_dp_aux_dev.c
+++ b/drivers/gpu/drm/drm_dp_aux_dev.c
@@ -37,6 +37,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_dp_helper.h>
+#include <drm/drm_dp_mst_helper.h>
#include <drm/drm_print.h>
#include "drm_crtc_helper_internal.h"
@@ -82,8 +83,7 @@ static struct drm_dp_aux_dev *alloc_drm_dp_aux_dev(struct drm_dp_aux *aux)
kref_init(&aux_dev->refcount);
mutex_lock(&aux_idr_mutex);
- index = idr_alloc_cyclic(&aux_idr, aux_dev, 0, DRM_AUX_MINORS,
- GFP_KERNEL);
+ index = idr_alloc(&aux_idr, aux_dev, 0, DRM_AUX_MINORS, GFP_KERNEL);
mutex_unlock(&aux_idr_mutex);
if (index < 0) {
kfree(aux_dev);
@@ -164,6 +164,7 @@ static ssize_t auxdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
}
res = drm_dp_dpcd_read(aux_dev->aux, pos, buf, todo);
+
if (res <= 0)
break;
@@ -211,6 +212,7 @@ static ssize_t auxdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
}
res = drm_dp_dpcd_write(aux_dev->aux, pos, buf, todo);
+
if (res <= 0)
break;
diff --git a/drivers/gpu/drm/drm_dp_cec.c b/drivers/gpu/drm/drm_dp_cec.c
index b15cee85b702..3ab2609f9ec7 100644
--- a/drivers/gpu/drm/drm_dp_cec.c
+++ b/drivers/gpu/drm/drm_dp_cec.c
@@ -8,9 +8,13 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <drm/drm_dp_helper.h>
+
#include <media/cec.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_device.h>
+#include <drm/drm_dp_helper.h>
+
/*
* Unfortunately it turns out that we have a chicken-and-egg situation
* here. Quite a few active (mini-)DP-to-HDMI or USB-C-to-HDMI adapters
@@ -295,7 +299,10 @@ static void drm_dp_cec_unregister_work(struct work_struct *work)
*/
void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const struct edid *edid)
{
- u32 cec_caps = CEC_CAP_DEFAULTS | CEC_CAP_NEEDS_HPD;
+ struct drm_connector *connector = aux->cec.connector;
+ u32 cec_caps = CEC_CAP_DEFAULTS | CEC_CAP_NEEDS_HPD |
+ CEC_CAP_CONNECTOR_INFO;
+ struct cec_connector_info conn_info;
unsigned int num_las = 1;
u8 cap;
@@ -344,13 +351,17 @@ void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const struct edid *edid)
/* Create a new adapter */
aux->cec.adap = cec_allocate_adapter(&drm_dp_cec_adap_ops,
- aux, aux->cec.name, cec_caps,
+ aux, connector->name, cec_caps,
num_las);
if (IS_ERR(aux->cec.adap)) {
aux->cec.adap = NULL;
goto unlock;
}
- if (cec_register_adapter(aux->cec.adap, aux->cec.parent)) {
+
+ cec_fill_conn_info_from_drm(&conn_info, connector);
+ cec_s_conn_info(aux->cec.adap, &conn_info);
+
+ if (cec_register_adapter(aux->cec.adap, connector->dev->dev)) {
cec_delete_adapter(aux->cec.adap);
aux->cec.adap = NULL;
} else {
@@ -406,22 +417,20 @@ EXPORT_SYMBOL(drm_dp_cec_unset_edid);
/**
* drm_dp_cec_register_connector() - register a new connector
* @aux: DisplayPort AUX channel
- * @name: name of the CEC device
- * @parent: parent device
+ * @connector: drm connector
*
* A new connector was registered with associated CEC adapter name and
* CEC adapter parent device. After registering the name and parent
* drm_dp_cec_set_edid() is called to check if the connector supports
* CEC and to register a CEC adapter if that is the case.
*/
-void drm_dp_cec_register_connector(struct drm_dp_aux *aux, const char *name,
- struct device *parent)
+void drm_dp_cec_register_connector(struct drm_dp_aux *aux,
+ struct drm_connector *connector)
{
WARN_ON(aux->cec.adap);
if (WARN_ON(!aux->transfer))
return;
- aux->cec.name = name;
- aux->cec.parent = parent;
+ aux->cec.connector = connector;
INIT_DELAYED_WORK(&aux->cec.unregister_work,
drm_dp_cec_unregister_work);
}
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 0b994d083a89..a5364b5192b8 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -32,6 +32,7 @@
#include <drm/drm_dp_helper.h>
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
+#include <drm/drm_dp_mst_helper.h>
#include "drm_crtc_helper_internal.h"
@@ -120,70 +121,63 @@ u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SI
}
EXPORT_SYMBOL(drm_dp_get_adjust_request_pre_emphasis);
-void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
- int rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
- DP_TRAINING_AUX_RD_MASK;
+u8 drm_dp_get_adjust_request_post_cursor(const u8 link_status[DP_LINK_STATUS_SIZE],
+ unsigned int lane)
+{
+ unsigned int offset = DP_ADJUST_REQUEST_POST_CURSOR2;
+ u8 value = dp_link_status(link_status, offset);
+
+ return (value >> (lane << 1)) & 0x3;
+}
+EXPORT_SYMBOL(drm_dp_get_adjust_request_post_cursor);
+
+void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ unsigned long rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
+ DP_TRAINING_AUX_RD_MASK;
if (rd_interval > 4)
- DRM_DEBUG_KMS("AUX interval %d, out of range (max 4)\n",
+ DRM_DEBUG_KMS("AUX interval %lu, out of range (max 4)\n",
rd_interval);
if (rd_interval == 0 || dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14)
- udelay(100);
+ rd_interval = 100;
else
- mdelay(rd_interval * 4);
+ rd_interval *= 4 * USEC_PER_MSEC;
+
+ usleep_range(rd_interval, rd_interval * 2);
}
EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay);
-void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
- int rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
- DP_TRAINING_AUX_RD_MASK;
+void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ unsigned long rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
+ DP_TRAINING_AUX_RD_MASK;
if (rd_interval > 4)
- DRM_DEBUG_KMS("AUX interval %d, out of range (max 4)\n",
+ DRM_DEBUG_KMS("AUX interval %lu, out of range (max 4)\n",
rd_interval);
if (rd_interval == 0)
- udelay(400);
+ rd_interval = 400;
else
- mdelay(rd_interval * 4);
+ rd_interval *= 4 * USEC_PER_MSEC;
+
+ usleep_range(rd_interval, rd_interval * 2);
}
EXPORT_SYMBOL(drm_dp_link_train_channel_eq_delay);
u8 drm_dp_link_rate_to_bw_code(int link_rate)
{
- switch (link_rate) {
- default:
- WARN(1, "unknown DP link rate %d, using %x\n", link_rate,
- DP_LINK_BW_1_62);
- /* fall through */
- case 162000:
- return DP_LINK_BW_1_62;
- case 270000:
- return DP_LINK_BW_2_7;
- case 540000:
- return DP_LINK_BW_5_4;
- case 810000:
- return DP_LINK_BW_8_1;
- }
+ /* Spec says link_bw = link_rate / 0.27Gbps */
+ return link_rate / 27000;
}
EXPORT_SYMBOL(drm_dp_link_rate_to_bw_code);
int drm_dp_bw_code_to_link_rate(u8 link_bw)
{
- switch (link_bw) {
- default:
- WARN(1, "unknown DP link BW code %x, using 162000\n", link_bw);
- /* fall through */
- case DP_LINK_BW_1_62:
- return 162000;
- case DP_LINK_BW_2_7:
- return 270000;
- case DP_LINK_BW_5_4:
- return 540000;
- case DP_LINK_BW_8_1:
- return 810000;
- }
+ /* Spec says link_rate = link_bw * 0.27Gbps */
+ return link_bw * 27000;
}
EXPORT_SYMBOL(drm_dp_bw_code_to_link_rate);
@@ -243,7 +237,6 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
}
ret = aux->transfer(aux, &msg);
-
if (ret >= 0) {
native_reply = msg.reply & DP_AUX_NATIVE_REPLY_MASK;
if (native_reply == DP_AUX_NATIVE_REPLY_ACK) {
@@ -274,7 +267,7 @@ unlock:
/**
* drm_dp_dpcd_read() - read a series of bytes from the DPCD
- * @aux: DisplayPort AUX channel
+ * @aux: DisplayPort AUX channel (SST or MST)
* @offset: address of the (first) register to read
* @buffer: buffer to store the register values
* @size: number of bytes in @buffer
@@ -303,13 +296,18 @@ ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
* We just have to do it before any DPCD access and hope that the
* monitor doesn't power down exactly after the throw away read.
*/
- ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, DP_DPCD_REV, buffer,
- 1);
- if (ret != 1)
- goto out;
+ if (!aux->is_remote) {
+ ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, DP_DPCD_REV,
+ buffer, 1);
+ if (ret != 1)
+ goto out;
+ }
- ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, offset, buffer,
- size);
+ if (aux->is_remote)
+ ret = drm_dp_mst_dpcd_read(aux, offset, buffer, size);
+ else
+ ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, offset,
+ buffer, size);
out:
drm_dp_dump_access(aux, DP_AUX_NATIVE_READ, offset, buffer, ret);
@@ -319,7 +317,7 @@ EXPORT_SYMBOL(drm_dp_dpcd_read);
/**
* drm_dp_dpcd_write() - write a series of bytes to the DPCD
- * @aux: DisplayPort AUX channel
+ * @aux: DisplayPort AUX channel (SST or MST)
* @offset: address of the (first) register to write
* @buffer: buffer containing the values to write
* @size: number of bytes in @buffer
@@ -336,8 +334,12 @@ ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset,
{
int ret;
- ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_WRITE, offset, buffer,
- size);
+ if (aux->is_remote)
+ ret = drm_dp_mst_dpcd_write(aux, offset, buffer, size);
+ else
+ ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_WRITE, offset,
+ buffer, size);
+
drm_dp_dump_access(aux, DP_AUX_NATIVE_WRITE, offset, buffer, ret);
return ret;
}
@@ -360,134 +362,6 @@ int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux,
EXPORT_SYMBOL(drm_dp_dpcd_read_link_status);
/**
- * drm_dp_link_probe() - probe a DisplayPort link for capabilities
- * @aux: DisplayPort AUX channel
- * @link: pointer to structure in which to return link capabilities
- *
- * The structure filled in by this function can usually be passed directly
- * into drm_dp_link_power_up() and drm_dp_link_configure() to power up and
- * configure the link based on the link's capabilities.
- *
- * Returns 0 on success or a negative error code on failure.
- */
-int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link)
-{
- u8 values[3];
- int err;
-
- memset(link, 0, sizeof(*link));
-
- err = drm_dp_dpcd_read(aux, DP_DPCD_REV, values, sizeof(values));
- if (err < 0)
- return err;
-
- link->revision = values[0];
- link->rate = drm_dp_bw_code_to_link_rate(values[1]);
- link->num_lanes = values[2] & DP_MAX_LANE_COUNT_MASK;
-
- if (values[2] & DP_ENHANCED_FRAME_CAP)
- link->capabilities |= DP_LINK_CAP_ENHANCED_FRAMING;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_dp_link_probe);
-
-/**
- * drm_dp_link_power_up() - power up a DisplayPort link
- * @aux: DisplayPort AUX channel
- * @link: pointer to a structure containing the link configuration
- *
- * Returns 0 on success or a negative error code on failure.
- */
-int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link)
-{
- u8 value;
- int err;
-
- /* DP_SET_POWER register is only available on DPCD v1.1 and later */
- if (link->revision < 0x11)
- return 0;
-
- err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
- if (err < 0)
- return err;
-
- value &= ~DP_SET_POWER_MASK;
- value |= DP_SET_POWER_D0;
-
- err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
- if (err < 0)
- return err;
-
- /*
- * According to the DP 1.1 specification, a "Sink Device must exit the
- * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink
- * Control Field" (register 0x600).
- */
- usleep_range(1000, 2000);
-
- return 0;
-}
-EXPORT_SYMBOL(drm_dp_link_power_up);
-
-/**
- * drm_dp_link_power_down() - power down a DisplayPort link
- * @aux: DisplayPort AUX channel
- * @link: pointer to a structure containing the link configuration
- *
- * Returns 0 on success or a negative error code on failure.
- */
-int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link)
-{
- u8 value;
- int err;
-
- /* DP_SET_POWER register is only available on DPCD v1.1 and later */
- if (link->revision < 0x11)
- return 0;
-
- err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
- if (err < 0)
- return err;
-
- value &= ~DP_SET_POWER_MASK;
- value |= DP_SET_POWER_D3;
-
- err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
- if (err < 0)
- return err;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_dp_link_power_down);
-
-/**
- * drm_dp_link_configure() - configure a DisplayPort link
- * @aux: DisplayPort AUX channel
- * @link: pointer to a structure containing the link configuration
- *
- * Returns 0 on success or a negative error code on failure.
- */
-int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link)
-{
- u8 values[2];
- int err;
-
- values[0] = drm_dp_link_rate_to_bw_code(link->rate);
- values[1] = link->num_lanes;
-
- if (link->capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
- values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
-
- err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, values, sizeof(values));
- if (err < 0)
- return err;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_dp_link_configure);
-
-/**
* drm_dp_downstream_max_clock() - extract branch device max
* pixel rate for legacy VGA
* converter or max TMDS clock
@@ -1105,6 +979,19 @@ static void drm_dp_aux_crc_work(struct work_struct *work)
}
/**
+ * drm_dp_remote_aux_init() - minimally initialise a remote aux channel
+ * @aux: DisplayPort AUX channel
+ *
+ * Used for remote aux channel in general. Merely initialize the crc work
+ * struct.
+ */
+void drm_dp_remote_aux_init(struct drm_dp_aux *aux)
+{
+ INIT_WORK(&aux->crc_work, drm_dp_aux_crc_work);
+}
+EXPORT_SYMBOL(drm_dp_remote_aux_init);
+
+/**
* drm_dp_aux_init() - minimally initialise an aux channel
* @aux: DisplayPort AUX channel
*
@@ -1132,6 +1019,14 @@ EXPORT_SYMBOL(drm_dp_aux_init);
* @aux: DisplayPort AUX channel
*
* Automatically calls drm_dp_aux_init() if this hasn't been done yet.
+ * This should only be called when the underlying &struct drm_connector is
+ * initialiazed already. Therefore the best place to call this is from
+ * &drm_connector_funcs.late_register. Not that drivers which don't follow this
+ * will Oops when CONFIG_DRM_DP_AUX_CHARDEV is enabled.
+ *
+ * Drivers which need to use the aux channel before that point (e.g. at driver
+ * load time, before drm_dev_register() has been called) need to call
+ * drm_dp_aux_init().
*
* Returns 0 on success or a negative error code on failure.
*/
@@ -1283,6 +1178,8 @@ static const struct dpcd_quirk dpcd_quirk_list[] = {
{ OUI(0x00, 0x10, 0xfa), DEVICE_ID_ANY, false, BIT(DP_DPCD_QUIRK_NO_PSR) },
/* CH7511 seems to leave SINK_COUNT zeroed */
{ OUI(0x00, 0x00, 0x00), DEVICE_ID('C', 'H', '7', '5', '1', '1'), false, BIT(DP_DPCD_QUIRK_NO_SINK_COUNT) },
+ /* Synaptics DP1.4 MST hubs can support DSC without virtual DPCD */
+ { OUI(0x90, 0xCC, 0x24), DEVICE_ID_ANY, true, BIT(DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) },
};
#undef OUI
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 0984b9a34d55..cce0b1bba591 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -28,14 +28,23 @@
#include <linux/sched.h>
#include <linux/seq_file.h>
+#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
+#include <linux/stacktrace.h>
+#include <linux/sort.h>
+#include <linux/timekeeping.h>
+#include <linux/math64.h>
+#endif
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_dp_mst_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fixed.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include "drm_crtc_helper_internal.h"
+#include "drm_dp_mst_topology_internal.h"
+
/**
* DOC: dp mst helper
*
@@ -43,9 +52,14 @@
* protocol. The helpers contain a topology manager and bandwidth manager.
* The helpers encapsulate the sending and received of sideband msgs.
*/
+struct drm_dp_pending_up_req {
+ struct drm_dp_sideband_msg_hdr hdr;
+ struct drm_dp_sideband_msg_req_body msg;
+ struct list_head next;
+};
+
static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
char *buf);
-static int test_calc_pbn_mode(void);
static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port);
@@ -53,12 +67,20 @@ static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
int id,
struct drm_dp_payload *payload);
+static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port,
+ int offset, int size, u8 *bytes);
static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port,
int offset, int size, u8 *bytes);
-static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_branch *mstb);
+static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_branch *mstb);
+
+static void
+drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_branch *mstb);
+
static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb,
struct drm_dp_mst_port *port);
@@ -69,6 +91,8 @@ static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux);
static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux);
static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
+#define DBG_PREFIX "[dp_mst]"
+
#define DP_STR(x) [DP_ ## x] = #x
static const char *drm_dp_mst_req_type_str(u8 req_type)
@@ -125,6 +149,43 @@ static const char *drm_dp_mst_nak_reason_str(u8 nak_reason)
}
#undef DP_STR
+#define DP_STR(x) [DRM_DP_SIDEBAND_TX_ ## x] = #x
+
+static const char *drm_dp_mst_sideband_tx_state_str(int state)
+{
+ static const char * const sideband_reason_str[] = {
+ DP_STR(QUEUED),
+ DP_STR(START_SEND),
+ DP_STR(SENT),
+ DP_STR(RX),
+ DP_STR(TIMEOUT),
+ };
+
+ if (state >= ARRAY_SIZE(sideband_reason_str) ||
+ !sideband_reason_str[state])
+ return "unknown";
+
+ return sideband_reason_str[state];
+}
+
+static int
+drm_dp_mst_rad_to_str(const u8 rad[8], u8 lct, char *out, size_t len)
+{
+ int i;
+ u8 unpacked_rad[16];
+
+ for (i = 0; i < lct; i++) {
+ if (i % 2)
+ unpacked_rad[i] = rad[i / 2] >> 4;
+ else
+ unpacked_rad[i] = rad[i / 2] & BIT_MASK(4);
+ }
+
+ /* TODO: Eventually add something to printk so we can format the rad
+ * like this: 1.2.3
+ */
+ return snprintf(out, len, "%*phC", lct, unpacked_rad);
+}
/* sideband msg handling */
static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
@@ -257,8 +318,9 @@ static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
return true;
}
-static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req,
- struct drm_dp_sideband_msg_tx *raw)
+void
+drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req,
+ struct drm_dp_sideband_msg_tx *raw)
{
int idx = 0;
int i;
@@ -267,6 +329,8 @@ static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req,
switch (req->req_type) {
case DP_ENUM_PATH_RESOURCES:
+ case DP_POWER_DOWN_PHY:
+ case DP_POWER_UP_PHY:
buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
idx++;
break;
@@ -334,7 +398,7 @@ static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req,
memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
idx += req->u.i2c_read.transactions[i].num_bytes;
- buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 5;
+ buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 4;
buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
idx++;
}
@@ -354,14 +418,255 @@ static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req,
memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
idx += req->u.i2c_write.num_bytes;
break;
+ }
+ raw->cur_len = idx;
+}
+EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_encode_sideband_req);
+
+/* Decode a sideband request we've encoded, mainly used for debugging */
+int
+drm_dp_decode_sideband_req(const struct drm_dp_sideband_msg_tx *raw,
+ struct drm_dp_sideband_msg_req_body *req)
+{
+ const u8 *buf = raw->msg;
+ int i, idx = 0;
+ req->req_type = buf[idx++] & 0x7f;
+ switch (req->req_type) {
+ case DP_ENUM_PATH_RESOURCES:
case DP_POWER_DOWN_PHY:
case DP_POWER_UP_PHY:
- buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
- idx++;
+ req->u.port_num.port_number = (buf[idx] >> 4) & 0xf;
+ break;
+ case DP_ALLOCATE_PAYLOAD:
+ {
+ struct drm_dp_allocate_payload *a =
+ &req->u.allocate_payload;
+
+ a->number_sdp_streams = buf[idx] & 0xf;
+ a->port_number = (buf[idx] >> 4) & 0xf;
+
+ WARN_ON(buf[++idx] & 0x80);
+ a->vcpi = buf[idx] & 0x7f;
+
+ a->pbn = buf[++idx] << 8;
+ a->pbn |= buf[++idx];
+
+ idx++;
+ for (i = 0; i < a->number_sdp_streams; i++) {
+ a->sdp_stream_sink[i] =
+ (buf[idx + (i / 2)] >> ((i % 2) ? 0 : 4)) & 0xf;
+ }
+ }
+ break;
+ case DP_QUERY_PAYLOAD:
+ req->u.query_payload.port_number = (buf[idx] >> 4) & 0xf;
+ WARN_ON(buf[++idx] & 0x80);
+ req->u.query_payload.vcpi = buf[idx] & 0x7f;
+ break;
+ case DP_REMOTE_DPCD_READ:
+ {
+ struct drm_dp_remote_dpcd_read *r = &req->u.dpcd_read;
+
+ r->port_number = (buf[idx] >> 4) & 0xf;
+
+ r->dpcd_address = (buf[idx] << 16) & 0xf0000;
+ r->dpcd_address |= (buf[++idx] << 8) & 0xff00;
+ r->dpcd_address |= buf[++idx] & 0xff;
+
+ r->num_bytes = buf[++idx];
+ }
+ break;
+ case DP_REMOTE_DPCD_WRITE:
+ {
+ struct drm_dp_remote_dpcd_write *w =
+ &req->u.dpcd_write;
+
+ w->port_number = (buf[idx] >> 4) & 0xf;
+
+ w->dpcd_address = (buf[idx] << 16) & 0xf0000;
+ w->dpcd_address |= (buf[++idx] << 8) & 0xff00;
+ w->dpcd_address |= buf[++idx] & 0xff;
+
+ w->num_bytes = buf[++idx];
+
+ w->bytes = kmemdup(&buf[++idx], w->num_bytes,
+ GFP_KERNEL);
+ if (!w->bytes)
+ return -ENOMEM;
+ }
+ break;
+ case DP_REMOTE_I2C_READ:
+ {
+ struct drm_dp_remote_i2c_read *r = &req->u.i2c_read;
+ struct drm_dp_remote_i2c_read_tx *tx;
+ bool failed = false;
+
+ r->num_transactions = buf[idx] & 0x3;
+ r->port_number = (buf[idx] >> 4) & 0xf;
+ for (i = 0; i < r->num_transactions; i++) {
+ tx = &r->transactions[i];
+
+ tx->i2c_dev_id = buf[++idx] & 0x7f;
+ tx->num_bytes = buf[++idx];
+ tx->bytes = kmemdup(&buf[++idx],
+ tx->num_bytes,
+ GFP_KERNEL);
+ if (!tx->bytes) {
+ failed = true;
+ break;
+ }
+ idx += tx->num_bytes;
+ tx->no_stop_bit = (buf[idx] >> 5) & 0x1;
+ tx->i2c_transaction_delay = buf[idx] & 0xf;
+ }
+
+ if (failed) {
+ for (i = 0; i < r->num_transactions; i++) {
+ tx = &r->transactions[i];
+ kfree(tx->bytes);
+ }
+ return -ENOMEM;
+ }
+
+ r->read_i2c_device_id = buf[++idx] & 0x7f;
+ r->num_bytes_read = buf[++idx];
+ }
+ break;
+ case DP_REMOTE_I2C_WRITE:
+ {
+ struct drm_dp_remote_i2c_write *w = &req->u.i2c_write;
+
+ w->port_number = (buf[idx] >> 4) & 0xf;
+ w->write_i2c_device_id = buf[++idx] & 0x7f;
+ w->num_bytes = buf[++idx];
+ w->bytes = kmemdup(&buf[++idx], w->num_bytes,
+ GFP_KERNEL);
+ if (!w->bytes)
+ return -ENOMEM;
+ }
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_decode_sideband_req);
+
+void
+drm_dp_dump_sideband_msg_req_body(const struct drm_dp_sideband_msg_req_body *req,
+ int indent, struct drm_printer *printer)
+{
+ int i;
+
+#define P(f, ...) drm_printf_indent(printer, indent, f, ##__VA_ARGS__)
+ if (req->req_type == DP_LINK_ADDRESS) {
+ /* No contents to print */
+ P("type=%s\n", drm_dp_mst_req_type_str(req->req_type));
+ return;
+ }
+
+ P("type=%s contents:\n", drm_dp_mst_req_type_str(req->req_type));
+ indent++;
+
+ switch (req->req_type) {
+ case DP_ENUM_PATH_RESOURCES:
+ case DP_POWER_DOWN_PHY:
+ case DP_POWER_UP_PHY:
+ P("port=%d\n", req->u.port_num.port_number);
+ break;
+ case DP_ALLOCATE_PAYLOAD:
+ P("port=%d vcpi=%d pbn=%d sdp_streams=%d %*ph\n",
+ req->u.allocate_payload.port_number,
+ req->u.allocate_payload.vcpi, req->u.allocate_payload.pbn,
+ req->u.allocate_payload.number_sdp_streams,
+ req->u.allocate_payload.number_sdp_streams,
+ req->u.allocate_payload.sdp_stream_sink);
+ break;
+ case DP_QUERY_PAYLOAD:
+ P("port=%d vcpi=%d\n",
+ req->u.query_payload.port_number,
+ req->u.query_payload.vcpi);
+ break;
+ case DP_REMOTE_DPCD_READ:
+ P("port=%d dpcd_addr=%05x len=%d\n",
+ req->u.dpcd_read.port_number, req->u.dpcd_read.dpcd_address,
+ req->u.dpcd_read.num_bytes);
+ break;
+ case DP_REMOTE_DPCD_WRITE:
+ P("port=%d addr=%05x len=%d: %*ph\n",
+ req->u.dpcd_write.port_number,
+ req->u.dpcd_write.dpcd_address,
+ req->u.dpcd_write.num_bytes, req->u.dpcd_write.num_bytes,
+ req->u.dpcd_write.bytes);
+ break;
+ case DP_REMOTE_I2C_READ:
+ P("port=%d num_tx=%d id=%d size=%d:\n",
+ req->u.i2c_read.port_number,
+ req->u.i2c_read.num_transactions,
+ req->u.i2c_read.read_i2c_device_id,
+ req->u.i2c_read.num_bytes_read);
+
+ indent++;
+ for (i = 0; i < req->u.i2c_read.num_transactions; i++) {
+ const struct drm_dp_remote_i2c_read_tx *rtx =
+ &req->u.i2c_read.transactions[i];
+
+ P("%d: id=%03d size=%03d no_stop_bit=%d tx_delay=%03d: %*ph\n",
+ i, rtx->i2c_dev_id, rtx->num_bytes,
+ rtx->no_stop_bit, rtx->i2c_transaction_delay,
+ rtx->num_bytes, rtx->bytes);
+ }
+ break;
+ case DP_REMOTE_I2C_WRITE:
+ P("port=%d id=%d size=%d: %*ph\n",
+ req->u.i2c_write.port_number,
+ req->u.i2c_write.write_i2c_device_id,
+ req->u.i2c_write.num_bytes, req->u.i2c_write.num_bytes,
+ req->u.i2c_write.bytes);
+ break;
+ default:
+ P("???\n");
+ break;
+ }
+#undef P
+}
+EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_dump_sideband_msg_req_body);
+
+static inline void
+drm_dp_mst_dump_sideband_msg_tx(struct drm_printer *p,
+ const struct drm_dp_sideband_msg_tx *txmsg)
+{
+ struct drm_dp_sideband_msg_req_body req;
+ char buf[64];
+ int ret;
+ int i;
+
+ drm_dp_mst_rad_to_str(txmsg->dst->rad, txmsg->dst->lct, buf,
+ sizeof(buf));
+ drm_printf(p, "txmsg cur_offset=%x cur_len=%x seqno=%x state=%s path_msg=%d dst=%s\n",
+ txmsg->cur_offset, txmsg->cur_len, txmsg->seqno,
+ drm_dp_mst_sideband_tx_state_str(txmsg->state),
+ txmsg->path_msg, buf);
+
+ ret = drm_dp_decode_sideband_req(txmsg, &req);
+ if (ret) {
+ drm_printf(p, "<failed to decode sideband req: %d>\n", ret);
+ return;
+ }
+ drm_dp_dump_sideband_msg_req_body(&req, 1, p);
+
+ switch (req.req_type) {
+ case DP_REMOTE_DPCD_WRITE:
+ kfree(req.u.dpcd_write.bytes);
+ break;
+ case DP_REMOTE_I2C_READ:
+ for (i = 0; i < req.u.i2c_read.num_transactions; i++)
+ kfree(req.u.i2c_read.transactions[i].bytes);
+ break;
+ case DP_REMOTE_I2C_WRITE:
+ kfree(req.u.i2c_write.bytes);
break;
}
- raw->cur_len = idx;
}
static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
@@ -548,6 +853,7 @@ static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband
{
int idx = 1;
repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
+ repmsg->u.path_resources.fec_capable = raw->msg[idx] & 0x1;
idx++;
if (idx > raw->curlen)
goto fail_len;
@@ -652,6 +958,8 @@ static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
case DP_POWER_DOWN_PHY:
case DP_POWER_UP_PHY:
return drm_dp_sideband_parse_power_updown_phy_ack(raw, msg);
+ case DP_CLEAR_PAYLOAD_ID_TABLE:
+ return true; /* since there's nothing to parse */
default:
DRM_ERROR("Got unknown reply 0x%02x (%s)\n", msg->req_type,
drm_dp_mst_req_type_str(msg->req_type));
@@ -750,6 +1058,15 @@ static int build_link_address(struct drm_dp_sideband_msg_tx *msg)
return 0;
}
+static int build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg)
+{
+ struct drm_dp_sideband_msg_req_body req;
+
+ req.req_type = DP_CLEAR_PAYLOAD_ID_TABLE;
+ drm_dp_encode_sideband_req(&req, msg);
+ return 0;
+}
+
static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num)
{
struct drm_dp_sideband_msg_req_body req;
@@ -837,11 +1154,11 @@ static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
clear_bit(vcpi - 1, &mgr->vcpi_mask);
for (i = 0; i < mgr->max_payloads; i++) {
- if (mgr->proposed_vcpis[i])
- if (mgr->proposed_vcpis[i]->vcpi == vcpi) {
- mgr->proposed_vcpis[i] = NULL;
- clear_bit(i + 1, &mgr->payload_mask);
- }
+ if (mgr->proposed_vcpis[i] &&
+ mgr->proposed_vcpis[i]->vcpi == vcpi) {
+ mgr->proposed_vcpis[i] = NULL;
+ clear_bit(i + 1, &mgr->payload_mask);
+ }
}
mutex_unlock(&mgr->payload_lock);
}
@@ -892,10 +1209,18 @@ static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
mstb->tx_slots[txmsg->seqno] = NULL;
}
+ mgr->is_waiting_for_dwn_reply = false;
+
}
out:
+ if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) {
+ struct drm_printer p = drm_debug_printer(DBG_PREFIX);
+
+ drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
+ }
mutex_unlock(&mgr->qlock);
+ drm_dp_mst_kick_tx(mgr);
return ret;
}
@@ -1103,39 +1428,194 @@ drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port)
}
EXPORT_SYMBOL(drm_dp_mst_put_port_malloc);
-static void drm_dp_destroy_mst_branch_device(struct kref *kref)
+#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
+
+#define STACK_DEPTH 8
+
+static noinline void
+__topology_ref_save(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_topology_ref_history *history,
+ enum drm_dp_mst_topology_ref_type type)
{
- struct drm_dp_mst_branch *mstb =
- container_of(kref, struct drm_dp_mst_branch, topology_kref);
- struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
- struct drm_dp_mst_port *port, *tmp;
- bool wake_tx = false;
+ struct drm_dp_mst_topology_ref_entry *entry = NULL;
+ depot_stack_handle_t backtrace;
+ ulong stack_entries[STACK_DEPTH];
+ uint n;
+ int i;
- mutex_lock(&mgr->lock);
- list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
- list_del(&port->next);
- drm_dp_mst_topology_put_port(port);
+ n = stack_trace_save(stack_entries, ARRAY_SIZE(stack_entries), 1);
+ backtrace = stack_depot_save(stack_entries, n, GFP_KERNEL);
+ if (!backtrace)
+ return;
+
+ /* Try to find an existing entry for this backtrace */
+ for (i = 0; i < history->len; i++) {
+ if (history->entries[i].backtrace == backtrace) {
+ entry = &history->entries[i];
+ break;
+ }
}
- mutex_unlock(&mgr->lock);
- /* drop any tx slots msg */
- mutex_lock(&mstb->mgr->qlock);
- if (mstb->tx_slots[0]) {
- mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
- mstb->tx_slots[0] = NULL;
- wake_tx = true;
+ /* Otherwise add one */
+ if (!entry) {
+ struct drm_dp_mst_topology_ref_entry *new;
+ int new_len = history->len + 1;
+
+ new = krealloc(history->entries, sizeof(*new) * new_len,
+ GFP_KERNEL);
+ if (!new)
+ return;
+
+ entry = &new[history->len];
+ history->len = new_len;
+ history->entries = new;
+
+ entry->backtrace = backtrace;
+ entry->type = type;
+ entry->count = 0;
}
- if (mstb->tx_slots[1]) {
- mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
- mstb->tx_slots[1] = NULL;
- wake_tx = true;
+ entry->count++;
+ entry->ts_nsec = ktime_get_ns();
+}
+
+static int
+topology_ref_history_cmp(const void *a, const void *b)
+{
+ const struct drm_dp_mst_topology_ref_entry *entry_a = a, *entry_b = b;
+
+ if (entry_a->ts_nsec > entry_b->ts_nsec)
+ return 1;
+ else if (entry_a->ts_nsec < entry_b->ts_nsec)
+ return -1;
+ else
+ return 0;
+}
+
+static inline const char *
+topology_ref_type_to_str(enum drm_dp_mst_topology_ref_type type)
+{
+ if (type == DRM_DP_MST_TOPOLOGY_REF_GET)
+ return "get";
+ else
+ return "put";
+}
+
+static void
+__dump_topology_ref_history(struct drm_dp_mst_topology_ref_history *history,
+ void *ptr, const char *type_str)
+{
+ struct drm_printer p = drm_debug_printer(DBG_PREFIX);
+ char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ int i;
+
+ if (!buf)
+ return;
+
+ if (!history->len)
+ goto out;
+
+ /* First, sort the list so that it goes from oldest to newest
+ * reference entry
+ */
+ sort(history->entries, history->len, sizeof(*history->entries),
+ topology_ref_history_cmp, NULL);
+
+ drm_printf(&p, "%s (%p) topology count reached 0, dumping history:\n",
+ type_str, ptr);
+
+ for (i = 0; i < history->len; i++) {
+ const struct drm_dp_mst_topology_ref_entry *entry =
+ &history->entries[i];
+ ulong *entries;
+ uint nr_entries;
+ u64 ts_nsec = entry->ts_nsec;
+ u32 rem_nsec = do_div(ts_nsec, 1000000000);
+
+ nr_entries = stack_depot_fetch(entry->backtrace, &entries);
+ stack_trace_snprint(buf, PAGE_SIZE, entries, nr_entries, 4);
+
+ drm_printf(&p, " %d %ss (last at %5llu.%06u):\n%s",
+ entry->count,
+ topology_ref_type_to_str(entry->type),
+ ts_nsec, rem_nsec / 1000, buf);
}
- mutex_unlock(&mstb->mgr->qlock);
- if (wake_tx)
- wake_up_all(&mstb->mgr->tx_waitq);
+ /* Now free the history, since this is the only time we expose it */
+ kfree(history->entries);
+out:
+ kfree(buf);
+}
- drm_dp_mst_put_mstb_malloc(mstb);
+static __always_inline void
+drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb)
+{
+ __dump_topology_ref_history(&mstb->topology_ref_history, mstb,
+ "MSTB");
+}
+
+static __always_inline void
+drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port)
+{
+ __dump_topology_ref_history(&port->topology_ref_history, port,
+ "Port");
+}
+
+static __always_inline void
+save_mstb_topology_ref(struct drm_dp_mst_branch *mstb,
+ enum drm_dp_mst_topology_ref_type type)
+{
+ __topology_ref_save(mstb->mgr, &mstb->topology_ref_history, type);
+}
+
+static __always_inline void
+save_port_topology_ref(struct drm_dp_mst_port *port,
+ enum drm_dp_mst_topology_ref_type type)
+{
+ __topology_ref_save(port->mgr, &port->topology_ref_history, type);
+}
+
+static inline void
+topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr)
+{
+ mutex_lock(&mgr->topology_ref_history_lock);
+}
+
+static inline void
+topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr)
+{
+ mutex_unlock(&mgr->topology_ref_history_lock);
+}
+#else
+static inline void
+topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr) {}
+static inline void
+topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr) {}
+static inline void
+drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb) {}
+static inline void
+drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port) {}
+#define save_mstb_topology_ref(mstb, type)
+#define save_port_topology_ref(port, type)
+#endif
+
+static void drm_dp_destroy_mst_branch_device(struct kref *kref)
+{
+ struct drm_dp_mst_branch *mstb =
+ container_of(kref, struct drm_dp_mst_branch, topology_kref);
+ struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
+
+ drm_dp_mst_dump_mstb_topology_history(mstb);
+
+ INIT_LIST_HEAD(&mstb->destroy_next);
+
+ /*
+ * This can get called under mgr->mutex, so we need to perform the
+ * actual destruction of the mstb in another worker
+ */
+ mutex_lock(&mgr->delayed_destroy_lock);
+ list_add(&mstb->destroy_next, &mgr->destroy_branch_device_list);
+ mutex_unlock(&mgr->delayed_destroy_lock);
+ schedule_work(&mgr->delayed_destroy_work);
}
/**
@@ -1163,11 +1643,17 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref)
static int __must_check
drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb)
{
- int ret = kref_get_unless_zero(&mstb->topology_kref);
+ int ret;
- if (ret)
- DRM_DEBUG("mstb %p (%d)\n", mstb,
- kref_read(&mstb->topology_kref));
+ topology_ref_history_lock(mstb->mgr);
+ ret = kref_get_unless_zero(&mstb->topology_kref);
+ if (ret) {
+ DRM_DEBUG("mstb %p (%d)\n",
+ mstb, kref_read(&mstb->topology_kref));
+ save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
+ }
+
+ topology_ref_history_unlock(mstb->mgr);
return ret;
}
@@ -1188,9 +1674,14 @@ drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb)
*/
static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb)
{
+ topology_ref_history_lock(mstb->mgr);
+
+ save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
WARN_ON(kref_read(&mstb->topology_kref) == 0);
kref_get(&mstb->topology_kref);
DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref));
+
+ topology_ref_history_unlock(mstb->mgr);
}
/**
@@ -1208,27 +1699,14 @@ static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb)
static void
drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb)
{
+ topology_ref_history_lock(mstb->mgr);
+
DRM_DEBUG("mstb %p (%d)\n",
mstb, kref_read(&mstb->topology_kref) - 1);
- kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device);
-}
+ save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_PUT);
-static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
-{
- struct drm_dp_mst_branch *mstb;
-
- switch (old_pdt) {
- case DP_PEER_DEVICE_DP_LEGACY_CONV:
- case DP_PEER_DEVICE_SST_SINK:
- /* remove i2c over sideband */
- drm_dp_mst_unregister_i2c_bus(&port->aux);
- break;
- case DP_PEER_DEVICE_MST_BRANCHING:
- mstb = port->mstb;
- port->mstb = NULL;
- drm_dp_mst_topology_put_mstb(mstb);
- break;
- }
+ topology_ref_history_unlock(mstb->mgr);
+ kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device);
}
static void drm_dp_destroy_port(struct kref *kref)
@@ -1237,31 +1715,24 @@ static void drm_dp_destroy_port(struct kref *kref)
container_of(kref, struct drm_dp_mst_port, topology_kref);
struct drm_dp_mst_topology_mgr *mgr = port->mgr;
- if (!port->input) {
- kfree(port->cached_edid);
+ drm_dp_mst_dump_port_topology_history(port);
- /*
- * The only time we don't have a connector
- * on an output port is if the connector init
- * fails.
- */
- if (port->connector) {
- /* we can't destroy the connector here, as
- * we might be holding the mode_config.mutex
- * from an EDID retrieval */
-
- mutex_lock(&mgr->destroy_connector_lock);
- list_add(&port->next, &mgr->destroy_connector_list);
- mutex_unlock(&mgr->destroy_connector_lock);
- schedule_work(&mgr->destroy_connector_work);
- return;
- }
- /* no need to clean up vcpi
- * as if we have no connector we never setup a vcpi */
- drm_dp_port_teardown_pdt(port, port->pdt);
- port->pdt = DP_PEER_DEVICE_NONE;
+ /* There's nothing that needs locking to destroy an input port yet */
+ if (port->input) {
+ drm_dp_mst_put_port_malloc(port);
+ return;
}
- drm_dp_mst_put_port_malloc(port);
+
+ kfree(port->cached_edid);
+
+ /*
+ * we can't destroy the connector here, as we might be holding the
+ * mode_config.mutex from an EDID retrieval
+ */
+ mutex_lock(&mgr->delayed_destroy_lock);
+ list_add(&port->next, &mgr->destroy_port_list);
+ mutex_unlock(&mgr->delayed_destroy_lock);
+ schedule_work(&mgr->delayed_destroy_work);
}
/**
@@ -1289,12 +1760,17 @@ static void drm_dp_destroy_port(struct kref *kref)
static int __must_check
drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port)
{
- int ret = kref_get_unless_zero(&port->topology_kref);
+ int ret;
- if (ret)
- DRM_DEBUG("port %p (%d)\n", port,
- kref_read(&port->topology_kref));
+ topology_ref_history_lock(port->mgr);
+ ret = kref_get_unless_zero(&port->topology_kref);
+ if (ret) {
+ DRM_DEBUG("port %p (%d)\n",
+ port, kref_read(&port->topology_kref));
+ save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
+ }
+ topology_ref_history_unlock(port->mgr);
return ret;
}
@@ -1313,9 +1789,14 @@ drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port)
*/
static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port)
{
+ topology_ref_history_lock(port->mgr);
+
WARN_ON(kref_read(&port->topology_kref) == 0);
kref_get(&port->topology_kref);
DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->topology_kref));
+ save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
+
+ topology_ref_history_unlock(port->mgr);
}
/**
@@ -1331,8 +1812,13 @@ static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port)
*/
static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port)
{
+ topology_ref_history_lock(port->mgr);
+
DRM_DEBUG("port %p (%d)\n",
port, kref_read(&port->topology_kref) - 1);
+ save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_PUT);
+
+ topology_ref_history_unlock(port->mgr);
kref_put(&port->topology_kref, drm_dp_destroy_port);
}
@@ -1449,38 +1935,142 @@ static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
return parent_lct + 1;
}
-/*
- * return sends link address for new mstb
- */
-static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
+static bool drm_dp_mst_is_dp_mst_end_device(u8 pdt, bool mcs)
{
- int ret;
- u8 rad[6], lct;
- bool send_link = false;
- switch (port->pdt) {
+ switch (pdt) {
case DP_PEER_DEVICE_DP_LEGACY_CONV:
case DP_PEER_DEVICE_SST_SINK:
- /* add i2c over sideband */
- ret = drm_dp_mst_register_i2c_bus(&port->aux);
- break;
+ return true;
case DP_PEER_DEVICE_MST_BRANCHING:
- lct = drm_dp_calculate_rad(port, rad);
+ /* For sst branch device */
+ if (!mcs)
+ return true;
+
+ return false;
+ }
+ return true;
+}
+
+static int
+drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt,
+ bool new_mcs)
+{
+ struct drm_dp_mst_topology_mgr *mgr = port->mgr;
+ struct drm_dp_mst_branch *mstb;
+ u8 rad[8], lct;
+ int ret = 0;
+
+ if (port->pdt == new_pdt && port->mcs == new_mcs)
+ return 0;
+
+ /* Teardown the old pdt, if there is one */
+ if (port->pdt != DP_PEER_DEVICE_NONE) {
+ if (drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
+ /*
+ * If the new PDT would also have an i2c bus,
+ * don't bother with reregistering it
+ */
+ if (new_pdt != DP_PEER_DEVICE_NONE &&
+ drm_dp_mst_is_dp_mst_end_device(new_pdt, new_mcs)) {
+ port->pdt = new_pdt;
+ port->mcs = new_mcs;
+ return 0;
+ }
+
+ /* remove i2c over sideband */
+ drm_dp_mst_unregister_i2c_bus(&port->aux);
+ } else {
+ mutex_lock(&mgr->lock);
+ drm_dp_mst_topology_put_mstb(port->mstb);
+ port->mstb = NULL;
+ mutex_unlock(&mgr->lock);
+ }
+ }
+
+ port->pdt = new_pdt;
+ port->mcs = new_mcs;
+
+ if (port->pdt != DP_PEER_DEVICE_NONE) {
+ if (drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
+ /* add i2c over sideband */
+ ret = drm_dp_mst_register_i2c_bus(&port->aux);
+ } else {
+ lct = drm_dp_calculate_rad(port, rad);
+ mstb = drm_dp_add_mst_branch_device(lct, rad);
+ if (!mstb) {
+ ret = -ENOMEM;
+ DRM_ERROR("Failed to create MSTB for port %p",
+ port);
+ goto out;
+ }
+
+ mutex_lock(&mgr->lock);
+ port->mstb = mstb;
+ mstb->mgr = port->mgr;
+ mstb->port_parent = port;
- port->mstb = drm_dp_add_mst_branch_device(lct, rad);
- if (port->mstb) {
- port->mstb->mgr = port->mgr;
- port->mstb->port_parent = port;
/*
* Make sure this port's memory allocation stays
* around until its child MSTB releases it
*/
drm_dp_mst_get_port_malloc(port);
+ mutex_unlock(&mgr->lock);
- send_link = true;
+ /* And make sure we send a link address for this */
+ ret = 1;
}
- break;
}
- return send_link;
+
+out:
+ if (ret < 0)
+ port->pdt = DP_PEER_DEVICE_NONE;
+ return ret;
+}
+
+/**
+ * drm_dp_mst_dpcd_read() - read a series of bytes from the DPCD via sideband
+ * @aux: Fake sideband AUX CH
+ * @offset: address of the (first) register to read
+ * @buffer: buffer to store the register values
+ * @size: number of bytes in @buffer
+ *
+ * Performs the same functionality for remote devices via
+ * sideband messaging as drm_dp_dpcd_read() does for local
+ * devices via actual AUX CH.
+ *
+ * Return: Number of bytes read, or negative error code on failure.
+ */
+ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux,
+ unsigned int offset, void *buffer, size_t size)
+{
+ struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
+ aux);
+
+ return drm_dp_send_dpcd_read(port->mgr, port,
+ offset, size, buffer);
+}
+
+/**
+ * drm_dp_mst_dpcd_write() - write a series of bytes to the DPCD via sideband
+ * @aux: Fake sideband AUX CH
+ * @offset: address of the (first) register to write
+ * @buffer: buffer containing the values to write
+ * @size: number of bytes in @buffer
+ *
+ * Performs the same functionality for remote devices via
+ * sideband messaging as drm_dp_dpcd_write() does for local
+ * devices via actual AUX CH.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
+ unsigned int offset, void *buffer, size_t size)
+{
+ struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
+ aux);
+
+ return drm_dp_send_dpcd_write(port->mgr, port,
+ offset, size, buffer);
}
static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
@@ -1526,44 +2116,176 @@ static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
strlcat(proppath, temp, proppath_size);
}
-static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
- struct drm_device *dev,
- struct drm_dp_link_addr_reply_port *port_msg)
+/**
+ * drm_dp_mst_connector_late_register() - Late MST connector registration
+ * @connector: The MST connector
+ * @port: The MST port for this connector
+ *
+ * Helper to register the remote aux device for this MST port. Drivers should
+ * call this from their mst connector's late_register hook to enable MST aux
+ * devices.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int drm_dp_mst_connector_late_register(struct drm_connector *connector,
+ struct drm_dp_mst_port *port)
+{
+ DRM_DEBUG_KMS("registering %s remote bus for %s\n",
+ port->aux.name, connector->kdev->kobj.name);
+
+ port->aux.dev = connector->kdev;
+ return drm_dp_aux_register_devnode(&port->aux);
+}
+EXPORT_SYMBOL(drm_dp_mst_connector_late_register);
+
+/**
+ * drm_dp_mst_connector_early_unregister() - Early MST connector unregistration
+ * @connector: The MST connector
+ * @port: The MST port for this connector
+ *
+ * Helper to unregister the remote aux device for this MST port, registered by
+ * drm_dp_mst_connector_late_register(). Drivers should call this from their mst
+ * connector's early_unregister hook.
+ */
+void drm_dp_mst_connector_early_unregister(struct drm_connector *connector,
+ struct drm_dp_mst_port *port)
+{
+ DRM_DEBUG_KMS("unregistering %s remote bus for %s\n",
+ port->aux.name, connector->kdev->kobj.name);
+ drm_dp_aux_unregister_devnode(&port->aux);
+}
+EXPORT_SYMBOL(drm_dp_mst_connector_early_unregister);
+
+static void
+drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
+ struct drm_dp_mst_port *port)
+{
+ struct drm_dp_mst_topology_mgr *mgr = port->mgr;
+ char proppath[255];
+ int ret;
+
+ build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));
+ port->connector = mgr->cbs->add_connector(mgr, port, proppath);
+ if (!port->connector) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ if (port->pdt != DP_PEER_DEVICE_NONE &&
+ drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
+ port->cached_edid = drm_get_edid(port->connector,
+ &port->aux.ddc);
+ drm_connector_set_tile_property(port->connector);
+ }
+
+ mgr->cbs->register_connector(port->connector);
+ return;
+
+error:
+ DRM_ERROR("Failed to create connector for port %p: %d\n", port, ret);
+}
+
+/*
+ * Drop a topology reference, and unlink the port from the in-memory topology
+ * layout
+ */
+static void
+drm_dp_mst_topology_unlink_port(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port)
+{
+ mutex_lock(&mgr->lock);
+ port->parent->num_ports--;
+ list_del(&port->next);
+ mutex_unlock(&mgr->lock);
+ drm_dp_mst_topology_put_port(port);
+}
+
+static struct drm_dp_mst_port *
+drm_dp_mst_add_port(struct drm_device *dev,
+ struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_branch *mstb, u8 port_number)
+{
+ struct drm_dp_mst_port *port = kzalloc(sizeof(*port), GFP_KERNEL);
+
+ if (!port)
+ return NULL;
+
+ kref_init(&port->topology_kref);
+ kref_init(&port->malloc_kref);
+ port->parent = mstb;
+ port->port_num = port_number;
+ port->mgr = mgr;
+ port->aux.name = "DPMST";
+ port->aux.dev = dev->dev;
+ port->aux.is_remote = true;
+
+ /* initialize the MST downstream port's AUX crc work queue */
+ drm_dp_remote_aux_init(&port->aux);
+
+ /*
+ * Make sure the memory allocation for our parent branch stays
+ * around until our own memory allocation is released
+ */
+ drm_dp_mst_get_mstb_malloc(mstb);
+
+ return port;
+}
+
+static int
+drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
+ struct drm_device *dev,
+ struct drm_dp_link_addr_reply_port *port_msg)
{
+ struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
struct drm_dp_mst_port *port;
- bool ret;
- bool created = false;
- int old_pdt = 0;
- int old_ddps = 0;
+ int old_ddps = 0, ret;
+ u8 new_pdt = DP_PEER_DEVICE_NONE;
+ bool new_mcs = 0;
+ bool created = false, send_link_addr = false, changed = false;
port = drm_dp_get_port(mstb, port_msg->port_number);
if (!port) {
- port = kzalloc(sizeof(*port), GFP_KERNEL);
+ port = drm_dp_mst_add_port(dev, mgr, mstb,
+ port_msg->port_number);
if (!port)
- return;
- kref_init(&port->topology_kref);
- kref_init(&port->malloc_kref);
- port->parent = mstb;
- port->port_num = port_msg->port_number;
- port->mgr = mstb->mgr;
- port->aux.name = "DPMST";
- port->aux.dev = dev->dev;
-
- /*
- * Make sure the memory allocation for our parent branch stays
- * around until our own memory allocation is released
+ return -ENOMEM;
+ created = true;
+ changed = true;
+ } else if (!port->input && port_msg->input_port && port->connector) {
+ /* Since port->connector can't be changed here, we create a
+ * new port if input_port changes from 0 to 1
*/
- drm_dp_mst_get_mstb_malloc(mstb);
-
+ drm_dp_mst_topology_unlink_port(mgr, port);
+ drm_dp_mst_topology_put_port(port);
+ port = drm_dp_mst_add_port(dev, mgr, mstb,
+ port_msg->port_number);
+ if (!port)
+ return -ENOMEM;
+ changed = true;
created = true;
- } else {
- old_pdt = port->pdt;
+ } else if (port->input && !port_msg->input_port) {
+ changed = true;
+ } else if (port->connector) {
+ /* We're updating a port that's exposed to userspace, so do it
+ * under lock
+ */
+ drm_modeset_lock(&mgr->base.lock, NULL);
+
old_ddps = port->ddps;
+ changed = port->ddps != port_msg->ddps ||
+ (port->ddps &&
+ (port->ldps != port_msg->legacy_device_plug_status ||
+ port->dpcd_rev != port_msg->dpcd_revision ||
+ port->mcs != port_msg->mcs ||
+ port->pdt != port_msg->peer_device_type ||
+ port->num_sdp_stream_sinks !=
+ port_msg->num_sdp_stream_sinks));
}
- port->pdt = port_msg->peer_device_type;
port->input = port_msg->input_port;
- port->mcs = port_msg->mcs;
+ if (!port->input)
+ new_pdt = port_msg->peer_device_type;
+ new_mcs = port_msg->mcs;
port->ddps = port_msg->ddps;
port->ldps = port_msg->legacy_device_plug_status;
port->dpcd_rev = port_msg->dpcd_revision;
@@ -1573,78 +2295,108 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
/* manage mstb port lists with mgr lock - take a reference
for this list */
if (created) {
- mutex_lock(&mstb->mgr->lock);
+ mutex_lock(&mgr->lock);
drm_dp_mst_topology_get_port(port);
list_add(&port->next, &mstb->ports);
- mutex_unlock(&mstb->mgr->lock);
+ mstb->num_ports++;
+ mutex_unlock(&mgr->lock);
}
if (old_ddps != port->ddps) {
if (port->ddps) {
if (!port->input) {
- drm_dp_send_enum_path_resources(mstb->mgr,
- mstb, port);
+ drm_dp_send_enum_path_resources(mgr, mstb,
+ port);
}
} else {
port->available_pbn = 0;
}
}
- if (old_pdt != port->pdt && !port->input) {
- drm_dp_port_teardown_pdt(port, old_pdt);
-
- ret = drm_dp_port_setup_pdt(port);
- if (ret == true)
- drm_dp_send_link_address(mstb->mgr, port->mstb);
+ ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
+ if (ret == 1) {
+ send_link_addr = true;
+ } else if (ret < 0) {
+ DRM_ERROR("Failed to change PDT on port %p: %d\n",
+ port, ret);
+ goto fail;
}
- if (created && !port->input) {
- char proppath[255];
-
- build_mst_prop_path(mstb, port->port_num, proppath,
- sizeof(proppath));
- port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr,
- port,
- proppath);
- if (!port->connector) {
- /* remove it from the port list */
- mutex_lock(&mstb->mgr->lock);
- list_del(&port->next);
- mutex_unlock(&mstb->mgr->lock);
- /* drop port list reference */
- drm_dp_mst_topology_put_port(port);
- goto out;
- }
- if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
- port->pdt == DP_PEER_DEVICE_SST_SINK) &&
- port->port_num >= DP_MST_LOGICAL_PORT_0) {
- port->cached_edid = drm_get_edid(port->connector,
- &port->aux.ddc);
- drm_connector_set_tile_property(port->connector);
- }
- (*mstb->mgr->cbs->register_connector)(port->connector);
+ /*
+ * If this port wasn't just created, then we're reprobing because
+ * we're coming out of suspend. In this case, always resend the link
+ * address if there's an MSTB on this port
+ */
+ if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
+ port->mcs)
+ send_link_addr = true;
+
+ if (port->connector)
+ drm_modeset_unlock(&mgr->base.lock);
+ else if (!port->input)
+ drm_dp_mst_port_add_connector(mstb, port);
+
+ if (send_link_addr && port->mstb) {
+ ret = drm_dp_send_link_address(mgr, port->mstb);
+ if (ret == 1) /* MSTB below us changed */
+ changed = true;
+ else if (ret < 0)
+ goto fail_put;
}
-out:
/* put reference to this port */
drm_dp_mst_topology_put_port(port);
+ return changed;
+
+fail:
+ drm_dp_mst_topology_unlink_port(mgr, port);
+ if (port->connector)
+ drm_modeset_unlock(&mgr->base.lock);
+fail_put:
+ drm_dp_mst_topology_put_port(port);
+ return ret;
}
-static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
- struct drm_dp_connection_status_notify *conn_stat)
+static void
+drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
+ struct drm_dp_connection_status_notify *conn_stat)
{
+ struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
struct drm_dp_mst_port *port;
- int old_pdt;
- int old_ddps;
- bool dowork = false;
+ int old_ddps, old_input, ret, i;
+ u8 new_pdt;
+ bool new_mcs;
+ bool dowork = false, create_connector = false;
+
port = drm_dp_get_port(mstb, conn_stat->port_number);
if (!port)
return;
+ if (port->connector) {
+ if (!port->input && conn_stat->input_port) {
+ /*
+ * We can't remove a connector from an already exposed
+ * port, so just throw the port out and make sure we
+ * reprobe the link address of it's parent MSTB
+ */
+ drm_dp_mst_topology_unlink_port(mgr, port);
+ mstb->link_address_sent = false;
+ dowork = true;
+ goto out;
+ }
+
+ /* Locking is only needed if the port's exposed to userspace */
+ drm_modeset_lock(&mgr->base.lock, NULL);
+ } else if (port->input && !conn_stat->input_port) {
+ create_connector = true;
+ /* Reprobe link address so we get num_sdp_streams */
+ mstb->link_address_sent = false;
+ dowork = true;
+ }
+
old_ddps = port->ddps;
- old_pdt = port->pdt;
- port->pdt = conn_stat->peer_device_type;
- port->mcs = conn_stat->message_capability_status;
+ old_input = port->input;
+ port->input = conn_stat->input_port;
port->ldps = conn_stat->legacy_device_plug_status;
port->ddps = conn_stat->displayport_device_plug_status;
@@ -1655,17 +2407,49 @@ static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
port->available_pbn = 0;
}
}
- if (old_pdt != port->pdt && !port->input) {
- drm_dp_port_teardown_pdt(port, old_pdt);
- if (drm_dp_port_setup_pdt(port))
- dowork = true;
+ new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type;
+ new_mcs = conn_stat->message_capability_status;
+ ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
+ if (ret == 1) {
+ dowork = true;
+ } else if (ret < 0) {
+ DRM_ERROR("Failed to change PDT for port %p: %d\n",
+ port, ret);
+ dowork = false;
+ }
+
+ if (!old_input && old_ddps != port->ddps && !port->ddps) {
+ for (i = 0; i < mgr->max_payloads; i++) {
+ struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
+ struct drm_dp_mst_port *port_validated;
+
+ if (!vcpi)
+ continue;
+
+ port_validated =
+ container_of(vcpi, struct drm_dp_mst_port, vcpi);
+ port_validated =
+ drm_dp_mst_topology_get_port_validated(mgr, port_validated);
+ if (!port_validated) {
+ mutex_lock(&mgr->payload_lock);
+ vcpi->num_slots = 0;
+ mutex_unlock(&mgr->payload_lock);
+ } else {
+ drm_dp_mst_topology_put_port(port_validated);
+ }
+ }
}
+ if (port->connector)
+ drm_modeset_unlock(&mgr->base.lock);
+ else if (create_connector)
+ drm_dp_mst_port_add_connector(mstb, port);
+
+out:
drm_dp_mst_topology_put_port(port);
if (dowork)
queue_work(system_long_wq, &mstb->mgr->work);
-
}
static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
@@ -1708,7 +2492,7 @@ out:
static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
struct drm_dp_mst_branch *mstb,
- uint8_t *guid)
+ const uint8_t *guid)
{
struct drm_dp_mst_branch *found_mstb;
struct drm_dp_mst_port *port;
@@ -1732,7 +2516,7 @@ static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
static struct drm_dp_mst_branch *
drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr,
- uint8_t *guid)
+ const uint8_t *guid)
{
struct drm_dp_mst_branch *mstb;
int ret;
@@ -1751,42 +2535,67 @@ drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr,
return mstb;
}
-static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
+static int drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb)
{
struct drm_dp_mst_port *port;
- struct drm_dp_mst_branch *mstb_child;
- if (!mstb->link_address_sent)
- drm_dp_send_link_address(mgr, mstb);
+ int ret;
+ bool changed = false;
+
+ if (!mstb->link_address_sent) {
+ ret = drm_dp_send_link_address(mgr, mstb);
+ if (ret == 1)
+ changed = true;
+ else if (ret < 0)
+ return ret;
+ }
list_for_each_entry(port, &mstb->ports, next) {
- if (port->input)
- continue;
+ struct drm_dp_mst_branch *mstb_child = NULL;
- if (!port->ddps)
+ if (port->input || !port->ddps)
continue;
- if (!port->available_pbn)
+ if (!port->available_pbn) {
+ drm_modeset_lock(&mgr->base.lock, NULL);
drm_dp_send_enum_path_resources(mgr, mstb, port);
+ drm_modeset_unlock(&mgr->base.lock);
+ changed = true;
+ }
- if (port->mstb) {
+ if (port->mstb)
mstb_child = drm_dp_mst_topology_get_mstb_validated(
mgr, port->mstb);
- if (mstb_child) {
- drm_dp_check_and_send_link_address(mgr, mstb_child);
- drm_dp_mst_topology_put_mstb(mstb_child);
- }
+
+ if (mstb_child) {
+ ret = drm_dp_check_and_send_link_address(mgr,
+ mstb_child);
+ drm_dp_mst_topology_put_mstb(mstb_child);
+ if (ret == 1)
+ changed = true;
+ else if (ret < 0)
+ return ret;
}
}
+
+ return changed;
}
static void drm_dp_mst_link_probe_work(struct work_struct *work)
{
- struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work);
+ struct drm_dp_mst_topology_mgr *mgr =
+ container_of(work, struct drm_dp_mst_topology_mgr, work);
+ struct drm_device *dev = mgr->dev;
struct drm_dp_mst_branch *mstb;
int ret;
+ bool clear_payload_id_table;
+
+ mutex_lock(&mgr->probe_lock);
mutex_lock(&mgr->lock);
+ clear_payload_id_table = !mgr->payload_id_table_cleared;
+ mgr->payload_id_table_cleared = true;
+
mstb = mgr->mst_primary;
if (mstb) {
ret = drm_dp_mst_topology_try_get_mstb(mstb);
@@ -1794,10 +2603,30 @@ static void drm_dp_mst_link_probe_work(struct work_struct *work)
mstb = NULL;
}
mutex_unlock(&mgr->lock);
- if (mstb) {
- drm_dp_check_and_send_link_address(mgr, mstb);
- drm_dp_mst_topology_put_mstb(mstb);
+ if (!mstb) {
+ mutex_unlock(&mgr->probe_lock);
+ return;
+ }
+
+ /*
+ * Certain branch devices seem to incorrectly report an available_pbn
+ * of 0 on downstream sinks, even after clearing the
+ * DP_PAYLOAD_ALLOCATE_* registers in
+ * drm_dp_mst_topology_mgr_set_mst(). Namely, the CableMatters USB-C
+ * 2x DP hub. Sending a CLEAR_PAYLOAD_ID_TABLE message seems to make
+ * things work again.
+ */
+ if (clear_payload_id_table) {
+ DRM_DEBUG_KMS("Clearing payload ID table\n");
+ drm_dp_send_clear_payload_id_table(mgr, mstb);
}
+
+ ret = drm_dp_check_and_send_link_address(mgr, mstb);
+ drm_dp_mst_topology_put_mstb(mstb);
+
+ mutex_unlock(&mgr->probe_lock);
+ if (ret)
+ drm_kms_helper_hotplug_event(dev);
}
static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
@@ -1816,7 +2645,6 @@ static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
return false;
}
-#if 0
static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes)
{
struct drm_dp_sideband_msg_req_body req;
@@ -1829,7 +2657,6 @@ static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32
return 0;
}
-#endif
static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
bool up, u8 *msg, int len)
@@ -1945,8 +2772,11 @@ static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
idx += tosend + 1;
ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
- if (ret) {
- DRM_DEBUG_KMS("sideband msg failed to send\n");
+ if (unlikely(ret) && drm_debug_enabled(DRM_UT_DP)) {
+ struct drm_printer p = drm_debug_printer(DBG_PREFIX);
+
+ drm_printf(&p, "sideband msg failed to send\n");
+ drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
return ret;
}
@@ -1973,9 +2803,11 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
ret = process_single_tx_qlock(mgr, txmsg, false);
if (ret == 1) {
/* txmsg is sent it should be in the slots now */
+ mgr->is_waiting_for_dwn_reply = true;
list_del(&txmsg->next);
} else if (ret) {
DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
+ mgr->is_waiting_for_dwn_reply = false;
list_del(&txmsg->next);
if (txmsg->seqno != -1)
txmsg->dst->tx_slots[txmsg->seqno] = NULL;
@@ -2008,21 +2840,53 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
{
mutex_lock(&mgr->qlock);
list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
- if (list_is_singular(&mgr->tx_msg_downq))
+
+ if (drm_debug_enabled(DRM_UT_DP)) {
+ struct drm_printer p = drm_debug_printer(DBG_PREFIX);
+
+ drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
+ }
+
+ if (list_is_singular(&mgr->tx_msg_downq) &&
+ !mgr->is_waiting_for_dwn_reply)
process_single_down_tx_qlock(mgr);
mutex_unlock(&mgr->qlock);
}
-static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
+static void
+drm_dp_dump_link_address(struct drm_dp_link_address_ack_reply *reply)
+{
+ struct drm_dp_link_addr_reply_port *port_reply;
+ int i;
+
+ for (i = 0; i < reply->nports; i++) {
+ port_reply = &reply->ports[i];
+ DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n",
+ i,
+ port_reply->input_port,
+ port_reply->peer_device_type,
+ port_reply->port_number,
+ port_reply->dpcd_revision,
+ port_reply->mcs,
+ port_reply->ddps,
+ port_reply->legacy_device_plug_status,
+ port_reply->num_sdp_streams,
+ port_reply->num_sdp_stream_sinks);
+ }
+}
+
+static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb)
{
- int len;
struct drm_dp_sideband_msg_tx *txmsg;
- int ret;
+ struct drm_dp_link_address_ack_reply *reply;
+ struct drm_dp_mst_port *port, *tmp;
+ int i, len, ret, port_mask = 0;
+ bool changed = false;
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
if (!txmsg)
- return;
+ return -ENOMEM;
txmsg->dst = mstb;
len = build_link_address(txmsg);
@@ -2030,48 +2894,89 @@ static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
mstb->link_address_sent = true;
drm_dp_queue_down_tx(mgr, txmsg);
+ /* FIXME: Actually do some real error handling here */
ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
- if (ret > 0) {
- int i;
+ if (ret <= 0) {
+ DRM_ERROR("Sending link address failed with %d\n", ret);
+ goto out;
+ }
+ if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
+ DRM_ERROR("link address NAK received\n");
+ ret = -EIO;
+ goto out;
+ }
- if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
- DRM_DEBUG_KMS("link address nak received\n");
- } else {
- DRM_DEBUG_KMS("link address reply: %d\n", txmsg->reply.u.link_addr.nports);
- for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
- DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n", i,
- txmsg->reply.u.link_addr.ports[i].input_port,
- txmsg->reply.u.link_addr.ports[i].peer_device_type,
- txmsg->reply.u.link_addr.ports[i].port_number,
- txmsg->reply.u.link_addr.ports[i].dpcd_revision,
- txmsg->reply.u.link_addr.ports[i].mcs,
- txmsg->reply.u.link_addr.ports[i].ddps,
- txmsg->reply.u.link_addr.ports[i].legacy_device_plug_status,
- txmsg->reply.u.link_addr.ports[i].num_sdp_streams,
- txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks);
- }
+ reply = &txmsg->reply.u.link_addr;
+ DRM_DEBUG_KMS("link address reply: %d\n", reply->nports);
+ drm_dp_dump_link_address(reply);
- drm_dp_check_mstb_guid(mstb, txmsg->reply.u.link_addr.guid);
+ drm_dp_check_mstb_guid(mstb, reply->guid);
- for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
- drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
- }
- drm_kms_helper_hotplug_event(mgr->dev);
- }
- } else {
- mstb->link_address_sent = false;
- DRM_DEBUG_KMS("link address failed %d\n", ret);
+ for (i = 0; i < reply->nports; i++) {
+ port_mask |= BIT(reply->ports[i].port_number);
+ ret = drm_dp_mst_handle_link_address_port(mstb, mgr->dev,
+ &reply->ports[i]);
+ if (ret == 1)
+ changed = true;
+ else if (ret < 0)
+ goto out;
}
+ /* Prune any ports that are currently a part of mstb in our in-memory
+ * topology, but were not seen in this link address. Usually this
+ * means that they were removed while the topology was out of sync,
+ * e.g. during suspend/resume
+ */
+ mutex_lock(&mgr->lock);
+ list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
+ if (port_mask & BIT(port->port_num))
+ continue;
+
+ DRM_DEBUG_KMS("port %d was not in link address, removing\n",
+ port->port_num);
+ list_del(&port->next);
+ drm_dp_mst_topology_put_port(port);
+ changed = true;
+ }
+ mutex_unlock(&mgr->lock);
+
+out:
+ if (ret <= 0)
+ mstb->link_address_sent = false;
kfree(txmsg);
+ return ret < 0 ? ret : changed;
}
-static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_branch *mstb,
- struct drm_dp_mst_port *port)
+void drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_branch *mstb)
{
- int len;
struct drm_dp_sideband_msg_tx *txmsg;
+ int len, ret;
+
+ txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
+ if (!txmsg)
+ return;
+
+ txmsg->dst = mstb;
+ len = build_clear_payload_id_table(txmsg);
+
+ drm_dp_queue_down_tx(mgr, txmsg);
+
+ ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
+ if (ret > 0 && txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
+ DRM_DEBUG_KMS("clear payload table id nak received\n");
+
+ kfree(txmsg);
+}
+
+static int
+drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_branch *mstb,
+ struct drm_dp_mst_port *port)
+{
+ struct drm_dp_enum_path_resources_ack_reply *path_res;
+ struct drm_dp_sideband_msg_tx *txmsg;
+ int len;
int ret;
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
@@ -2085,14 +2990,21 @@ static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
if (ret > 0) {
+ path_res = &txmsg->reply.u.path_resources;
+
if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
DRM_DEBUG_KMS("enum path resources nak received\n");
} else {
- if (port->port_num != txmsg->reply.u.path_resources.port_number)
+ if (port->port_num != path_res->port_number)
DRM_ERROR("got incorrect port in response\n");
- DRM_DEBUG_KMS("enum path resources %d: %d %d\n", txmsg->reply.u.path_resources.port_number, txmsg->reply.u.path_resources.full_payload_bw_number,
- txmsg->reply.u.path_resources.avail_payload_bw_number);
- port->available_pbn = txmsg->reply.u.path_resources.avail_payload_bw_number;
+
+ DRM_DEBUG_KMS("enum path resources %d: %d %d\n",
+ path_res->port_number,
+ path_res->full_payload_bw_number,
+ path_res->avail_payload_bw_number);
+ port->available_pbn =
+ path_res->avail_payload_bw_number;
+ port->fec_capable = path_res->fec_capable;
}
}
@@ -2375,9 +3287,11 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
drm_dp_mst_topology_put_port(port);
}
- for (i = 0; i < mgr->max_payloads; i++) {
- if (mgr->payloads[i].payload_state != DP_PAYLOAD_DELETE_LOCAL)
+ for (i = 0; i < mgr->max_payloads; /* do nothing */) {
+ if (mgr->payloads[i].payload_state != DP_PAYLOAD_DELETE_LOCAL) {
+ i++;
continue;
+ }
DRM_DEBUG_KMS("removing payload %d\n", i);
for (j = i; j < mgr->max_payloads - 1; j++) {
@@ -2441,26 +3355,58 @@ int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
}
EXPORT_SYMBOL(drm_dp_update_payload_part2);
-#if 0 /* unused as of yet */
static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port,
- int offset, int size)
+ int offset, int size, u8 *bytes)
{
int len;
+ int ret = 0;
struct drm_dp_sideband_msg_tx *txmsg;
+ struct drm_dp_mst_branch *mstb;
+
+ mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
+ if (!mstb)
+ return -EINVAL;
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
- if (!txmsg)
- return -ENOMEM;
+ if (!txmsg) {
+ ret = -ENOMEM;
+ goto fail_put;
+ }
- len = build_dpcd_read(txmsg, port->port_num, 0, 8);
+ len = build_dpcd_read(txmsg, port->port_num, offset, size);
txmsg->dst = port->parent;
drm_dp_queue_down_tx(mgr, txmsg);
- return 0;
+ ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
+ if (ret < 0)
+ goto fail_free;
+
+ /* DPCD read should never be NACKed */
+ if (txmsg->reply.reply_type == 1) {
+ DRM_ERROR("mstb %p port %d: DPCD read on addr 0x%x for %d bytes NAKed\n",
+ mstb, port->port_num, offset, size);
+ ret = -EIO;
+ goto fail_free;
+ }
+
+ if (txmsg->reply.u.remote_dpcd_read_ack.num_bytes != size) {
+ ret = -EPROTO;
+ goto fail_free;
+ }
+
+ ret = min_t(size_t, txmsg->reply.u.remote_dpcd_read_ack.num_bytes,
+ size);
+ memcpy(bytes, txmsg->reply.u.remote_dpcd_read_ack.bytes, ret);
+
+fail_free:
+ kfree(txmsg);
+fail_put:
+ drm_dp_mst_topology_put_mstb(mstb);
+
+ return ret;
}
-#endif
static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port,
@@ -2489,7 +3435,7 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
if (ret > 0) {
if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
- ret = -EINVAL;
+ ret = -EIO;
else
ret = 0;
}
@@ -2533,30 +3479,13 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
return 0;
}
-static bool drm_dp_get_vc_payload_bw(int dp_link_bw,
- int dp_link_count,
- int *out)
+static int drm_dp_get_vc_payload_bw(u8 dp_link_bw, u8 dp_link_count)
{
- switch (dp_link_bw) {
- default:
+ if (dp_link_bw == 0 || dp_link_count == 0)
DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
dp_link_bw, dp_link_count);
- return false;
- case DP_LINK_BW_1_62:
- *out = 3 * dp_link_count;
- break;
- case DP_LINK_BW_2_7:
- *out = 5 * dp_link_count;
- break;
- case DP_LINK_BW_5_4:
- *out = 10 * dp_link_count;
- break;
- case DP_LINK_BW_8_1:
- *out = 15 * dp_link_count;
- break;
- }
- return true;
+ return dp_link_bw * dp_link_count / 2;
}
/**
@@ -2570,6 +3499,7 @@ static bool drm_dp_get_vc_payload_bw(int dp_link_bw,
int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
{
int ret = 0;
+ int i = 0;
struct drm_dp_mst_branch *mstb = NULL;
mutex_lock(&mgr->lock);
@@ -2588,9 +3518,9 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
goto out_unlock;
}
- if (!drm_dp_get_vc_payload_bw(mgr->dpcd[1],
- mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK,
- &mgr->pbn_div)) {
+ mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr->dpcd[1],
+ mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK);
+ if (mgr->pbn_div == 0) {
ret = -EINVAL;
goto out_unlock;
}
@@ -2630,10 +3560,23 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
/* this can fail if the device is gone */
drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
ret = 0;
+ mutex_lock(&mgr->payload_lock);
memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
mgr->payload_mask = 0;
set_bit(0, &mgr->payload_mask);
+ for (i = 0; i < mgr->max_payloads; i++) {
+ struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
+
+ if (vcpi) {
+ vcpi->vcpi = 0;
+ vcpi->num_slots = 0;
+ }
+ mgr->proposed_vcpis[i] = NULL;
+ }
mgr->vcpi_mask = 0;
+ mutex_unlock(&mgr->payload_lock);
+
+ mgr->payload_id_table_cleared = false;
}
out_unlock:
@@ -2645,6 +3588,23 @@ out_unlock:
}
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
+static void
+drm_dp_mst_topology_mgr_invalidate_mstb(struct drm_dp_mst_branch *mstb)
+{
+ struct drm_dp_mst_port *port;
+
+ /* The link address will need to be re-sent on resume */
+ mstb->link_address_sent = false;
+
+ list_for_each_entry(port, &mstb->ports, next) {
+ /* The PBN for each port will also need to be re-probed */
+ port->available_pbn = 0;
+
+ if (port->mstb)
+ drm_dp_mst_topology_mgr_invalidate_mstb(port->mstb);
+ }
+}
+
/**
* drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
* @mgr: manager to suspend
@@ -2658,62 +3618,89 @@ void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
DP_MST_EN | DP_UPSTREAM_IS_SRC);
mutex_unlock(&mgr->lock);
+ flush_work(&mgr->up_req_work);
flush_work(&mgr->work);
- flush_work(&mgr->destroy_connector_work);
+ flush_work(&mgr->delayed_destroy_work);
+
+ mutex_lock(&mgr->lock);
+ if (mgr->mst_state && mgr->mst_primary)
+ drm_dp_mst_topology_mgr_invalidate_mstb(mgr->mst_primary);
+ mutex_unlock(&mgr->lock);
}
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
/**
* drm_dp_mst_topology_mgr_resume() - resume the MST manager
* @mgr: manager to resume
+ * @sync: whether or not to perform topology reprobing synchronously
*
* This will fetch DPCD and see if the device is still there,
* if it is, it will rewrite the MSTM control bits, and return.
*
- * if the device fails this returns -1, and the driver should do
+ * If the device fails this returns -1, and the driver should do
* a full MST reprobe, in case we were undocked.
+ *
+ * During system resume (where it is assumed that the driver will be calling
+ * drm_atomic_helper_resume()) this function should be called beforehand with
+ * @sync set to true. In contexts like runtime resume where the driver is not
+ * expected to be calling drm_atomic_helper_resume(), this function should be
+ * called with @sync set to false in order to avoid deadlocking.
+ *
+ * Returns: -1 if the MST topology was removed while we were suspended, 0
+ * otherwise.
*/
-int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
+int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
+ bool sync)
{
- int ret = 0;
+ int ret;
+ u8 guid[16];
mutex_lock(&mgr->lock);
+ if (!mgr->mst_primary)
+ goto out_fail;
- if (mgr->mst_primary) {
- int sret;
- u8 guid[16];
+ ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd,
+ DP_RECEIVER_CAP_SIZE);
+ if (ret != DP_RECEIVER_CAP_SIZE) {
+ DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
+ goto out_fail;
+ }
- sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
- if (sret != DP_RECEIVER_CAP_SIZE) {
- DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
- ret = -1;
- goto out_unlock;
- }
+ ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
+ DP_MST_EN |
+ DP_UP_REQ_EN |
+ DP_UPSTREAM_IS_SRC);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
+ goto out_fail;
+ }
- ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
- DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
- if (ret < 0) {
- DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
- ret = -1;
- goto out_unlock;
- }
+ /* Some hubs forget their guids after they resume */
+ ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
+ if (ret != 16) {
+ DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
+ goto out_fail;
+ }
+ drm_dp_check_mstb_guid(mgr->mst_primary, guid);
- /* Some hubs forget their guids after they resume */
- sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
- if (sret != 16) {
- DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
- ret = -1;
- goto out_unlock;
- }
- drm_dp_check_mstb_guid(mgr->mst_primary, guid);
+ /*
+ * For the final step of resuming the topology, we need to bring the
+ * state of our in-memory topology back into sync with reality. So,
+ * restart the probing process as if we're probing a new hub
+ */
+ queue_work(system_long_wq, &mgr->work);
+ mutex_unlock(&mgr->lock);
- ret = 0;
- } else
- ret = -1;
+ if (sync) {
+ DRM_DEBUG_KMS("Waiting for link probe work to finish re-syncing topology...\n");
+ flush_work(&mgr->work);
+ }
-out_unlock:
+ return 0;
+
+out_fail:
mutex_unlock(&mgr->lock);
- return ret;
+ return -1;
}
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
@@ -2768,136 +3755,203 @@ static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
{
- int ret = 0;
+ struct drm_dp_sideband_msg_tx *txmsg;
+ struct drm_dp_mst_branch *mstb;
+ struct drm_dp_sideband_msg_hdr *hdr = &mgr->down_rep_recv.initial_hdr;
+ int slot = -1;
+
+ if (!drm_dp_get_one_sb_msg(mgr, false))
+ goto clear_down_rep_recv;
- if (!drm_dp_get_one_sb_msg(mgr, false)) {
- memset(&mgr->down_rep_recv, 0,
- sizeof(struct drm_dp_sideband_msg_rx));
+ if (!mgr->down_rep_recv.have_eomt)
return 0;
+
+ mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
+ if (!mstb) {
+ DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
+ hdr->lct);
+ goto clear_down_rep_recv;
}
- if (mgr->down_rep_recv.have_eomt) {
- struct drm_dp_sideband_msg_tx *txmsg;
- struct drm_dp_mst_branch *mstb;
- int slot = -1;
- mstb = drm_dp_get_mst_branch_device(mgr,
- mgr->down_rep_recv.initial_hdr.lct,
- mgr->down_rep_recv.initial_hdr.rad);
+ /* find the message */
+ slot = hdr->seqno;
+ mutex_lock(&mgr->qlock);
+ txmsg = mstb->tx_slots[slot];
+ /* remove from slots */
+ mutex_unlock(&mgr->qlock);
- if (!mstb) {
- DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->down_rep_recv.initial_hdr.lct);
- memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
- return 0;
- }
+ if (!txmsg) {
+ DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
+ mstb, hdr->seqno, hdr->lct, hdr->rad[0],
+ mgr->down_rep_recv.msg[0]);
+ goto no_msg;
+ }
- /* find the message */
- slot = mgr->down_rep_recv.initial_hdr.seqno;
- mutex_lock(&mgr->qlock);
- txmsg = mstb->tx_slots[slot];
- /* remove from slots */
- mutex_unlock(&mgr->qlock);
+ drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply);
- if (!txmsg) {
- DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
- mstb,
- mgr->down_rep_recv.initial_hdr.seqno,
- mgr->down_rep_recv.initial_hdr.lct,
- mgr->down_rep_recv.initial_hdr.rad[0],
- mgr->down_rep_recv.msg[0]);
- drm_dp_mst_topology_put_mstb(mstb);
- memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
- return 0;
- }
+ if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
+ DRM_DEBUG_KMS("Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n",
+ txmsg->reply.req_type,
+ drm_dp_mst_req_type_str(txmsg->reply.req_type),
+ txmsg->reply.u.nak.reason,
+ drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason),
+ txmsg->reply.u.nak.nak_data);
- drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply);
+ memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+ drm_dp_mst_topology_put_mstb(mstb);
- if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
- DRM_DEBUG_KMS("Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n",
- txmsg->reply.req_type,
- drm_dp_mst_req_type_str(txmsg->reply.req_type),
- txmsg->reply.u.nak.reason,
- drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason),
- txmsg->reply.u.nak.nak_data);
-
- memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
- drm_dp_mst_topology_put_mstb(mstb);
+ mutex_lock(&mgr->qlock);
+ txmsg->state = DRM_DP_SIDEBAND_TX_RX;
+ mstb->tx_slots[slot] = NULL;
+ mgr->is_waiting_for_dwn_reply = false;
+ mutex_unlock(&mgr->qlock);
- mutex_lock(&mgr->qlock);
- txmsg->state = DRM_DP_SIDEBAND_TX_RX;
- mstb->tx_slots[slot] = NULL;
- mutex_unlock(&mgr->qlock);
+ wake_up_all(&mgr->tx_waitq);
- wake_up_all(&mgr->tx_waitq);
- }
- return ret;
+ return 0;
+
+no_msg:
+ drm_dp_mst_topology_put_mstb(mstb);
+clear_down_rep_recv:
+ mutex_lock(&mgr->qlock);
+ mgr->is_waiting_for_dwn_reply = false;
+ mutex_unlock(&mgr->qlock);
+ memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+
+ return 0;
}
-static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
+static inline bool
+drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_pending_up_req *up_req)
{
- int ret = 0;
+ struct drm_dp_mst_branch *mstb = NULL;
+ struct drm_dp_sideband_msg_req_body *msg = &up_req->msg;
+ struct drm_dp_sideband_msg_hdr *hdr = &up_req->hdr;
+ bool hotplug = false;
- if (!drm_dp_get_one_sb_msg(mgr, true)) {
- memset(&mgr->up_req_recv, 0,
- sizeof(struct drm_dp_sideband_msg_rx));
- return 0;
+ if (hdr->broadcast) {
+ const u8 *guid = NULL;
+
+ if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY)
+ guid = msg->u.conn_stat.guid;
+ else if (msg->req_type == DP_RESOURCE_STATUS_NOTIFY)
+ guid = msg->u.resource_stat.guid;
+
+ if (guid)
+ mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid);
+ } else {
+ mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
}
- if (mgr->up_req_recv.have_eomt) {
- struct drm_dp_sideband_msg_req_body msg;
- struct drm_dp_mst_branch *mstb = NULL;
- bool seqno;
+ if (!mstb) {
+ DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
+ hdr->lct);
+ return false;
+ }
- if (!mgr->up_req_recv.initial_hdr.broadcast) {
- mstb = drm_dp_get_mst_branch_device(mgr,
- mgr->up_req_recv.initial_hdr.lct,
- mgr->up_req_recv.initial_hdr.rad);
- if (!mstb) {
- DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
- memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
- return 0;
- }
- }
+ /* TODO: Add missing handler for DP_RESOURCE_STATUS_NOTIFY events */
+ if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) {
+ drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat);
+ hotplug = true;
+ }
- seqno = mgr->up_req_recv.initial_hdr.seqno;
- drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg);
+ drm_dp_mst_topology_put_mstb(mstb);
+ return hotplug;
+}
- if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
- drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
+static void drm_dp_mst_up_req_work(struct work_struct *work)
+{
+ struct drm_dp_mst_topology_mgr *mgr =
+ container_of(work, struct drm_dp_mst_topology_mgr,
+ up_req_work);
+ struct drm_dp_pending_up_req *up_req;
+ bool send_hotplug = false;
- if (!mstb)
- mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.conn_stat.guid);
+ mutex_lock(&mgr->probe_lock);
+ while (true) {
+ mutex_lock(&mgr->up_req_lock);
+ up_req = list_first_entry_or_null(&mgr->up_req_list,
+ struct drm_dp_pending_up_req,
+ next);
+ if (up_req)
+ list_del(&up_req->next);
+ mutex_unlock(&mgr->up_req_lock);
+
+ if (!up_req)
+ break;
- if (!mstb) {
- DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
- memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
- return 0;
- }
+ send_hotplug |= drm_dp_mst_process_up_req(mgr, up_req);
+ kfree(up_req);
+ }
+ mutex_unlock(&mgr->probe_lock);
- drm_dp_update_port(mstb, &msg.u.conn_stat);
+ if (send_hotplug)
+ drm_kms_helper_hotplug_event(mgr->dev);
+}
- DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
- drm_kms_helper_hotplug_event(mgr->dev);
+static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
+{
+ struct drm_dp_sideband_msg_hdr *hdr = &mgr->up_req_recv.initial_hdr;
+ struct drm_dp_pending_up_req *up_req;
+ bool seqno;
- } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
- drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
- if (!mstb)
- mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.resource_stat.guid);
+ if (!drm_dp_get_one_sb_msg(mgr, true))
+ goto out;
- if (!mstb) {
- DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
- memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
- return 0;
- }
+ if (!mgr->up_req_recv.have_eomt)
+ return 0;
- DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
- }
+ up_req = kzalloc(sizeof(*up_req), GFP_KERNEL);
+ if (!up_req) {
+ DRM_ERROR("Not enough memory to process MST up req\n");
+ return -ENOMEM;
+ }
+ INIT_LIST_HEAD(&up_req->next);
- if (mstb)
- drm_dp_mst_topology_put_mstb(mstb);
+ seqno = hdr->seqno;
+ drm_dp_sideband_parse_req(&mgr->up_req_recv, &up_req->msg);
- memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+ if (up_req->msg.req_type != DP_CONNECTION_STATUS_NOTIFY &&
+ up_req->msg.req_type != DP_RESOURCE_STATUS_NOTIFY) {
+ DRM_DEBUG_KMS("Received unknown up req type, ignoring: %x\n",
+ up_req->msg.req_type);
+ kfree(up_req);
+ goto out;
}
- return ret;
+
+ drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, up_req->msg.req_type,
+ seqno, false);
+
+ if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
+ const struct drm_dp_connection_status_notify *conn_stat =
+ &up_req->msg.u.conn_stat;
+
+ DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n",
+ conn_stat->port_number,
+ conn_stat->legacy_device_plug_status,
+ conn_stat->displayport_device_plug_status,
+ conn_stat->message_capability_status,
+ conn_stat->input_port,
+ conn_stat->peer_device_type);
+ } else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
+ const struct drm_dp_resource_status_notify *res_stat =
+ &up_req->msg.u.resource_stat;
+
+ DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n",
+ res_stat->port_number,
+ res_stat->available_pbn);
+ }
+
+ up_req->hdr = *hdr;
+ mutex_lock(&mgr->up_req_lock);
+ list_add_tail(&up_req->next, &mgr->up_req_list);
+ mutex_unlock(&mgr->up_req_lock);
+ queue_work(system_long_wq, &mgr->up_req_work);
+
+out:
+ memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+ return 0;
}
/**
@@ -2941,32 +3995,43 @@ EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
/**
* drm_dp_mst_detect_port() - get connection status for an MST port
* @connector: DRM connector for this port
+ * @ctx: The acquisition context to use for grabbing locks
* @mgr: manager for this port
- * @port: unverified pointer to a port
+ * @port: pointer to a port
*
- * This returns the current connection state for a port. It validates the
- * port pointer still exists so the caller doesn't require a reference
+ * This returns the current connection state for a port.
*/
-enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector,
- struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
+int
+drm_dp_mst_detect_port(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx,
+ struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port)
{
- enum drm_connector_status status = connector_status_disconnected;
+ int ret;
/* we need to search for the port in the mgr in case it's gone */
port = drm_dp_mst_topology_get_port_validated(mgr, port);
if (!port)
return connector_status_disconnected;
+ ret = drm_modeset_lock(&mgr->base.lock, ctx);
+ if (ret)
+ goto out;
+
+ ret = connector_status_disconnected;
+
if (!port->ddps)
goto out;
switch (port->pdt) {
case DP_PEER_DEVICE_NONE:
case DP_PEER_DEVICE_MST_BRANCHING:
+ if (!port->mcs)
+ ret = connector_status_connected;
break;
case DP_PEER_DEVICE_SST_SINK:
- status = connector_status_connected;
+ ret = connector_status_connected;
/* for logical ports - cache the EDID */
if (port->port_num >= 8 && !port->cached_edid) {
port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
@@ -2974,12 +4039,12 @@ enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector
break;
case DP_PEER_DEVICE_DP_LEGACY_CONV:
if (port->ldps)
- status = connector_status_connected;
+ ret = connector_status_connected;
break;
}
out:
drm_dp_mst_topology_put_port(port);
- return status;
+ return ret;
}
EXPORT_SYMBOL(drm_dp_mst_detect_port);
@@ -3085,6 +4150,7 @@ static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
* @mgr: MST topology manager for the port
* @port: port to find vcpi slots for
* @pbn: bandwidth required for the mode in PBN
+ * @pbn_div: divider for DSC mode that takes FEC into account
*
* Allocates VCPI slots to @port, replacing any previous VCPI allocations it
* may have had. Any atomic drivers which support MST must call this function
@@ -3111,11 +4177,12 @@ static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
*/
int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_port *port, int pbn)
+ struct drm_dp_mst_port *port, int pbn,
+ int pbn_div)
{
struct drm_dp_mst_topology_state *topology_state;
struct drm_dp_vcpi_allocation *pos, *vcpi = NULL;
- int prev_slots, req_slots, ret;
+ int prev_slots, prev_bw, req_slots;
topology_state = drm_atomic_get_mst_topology_state(state, mgr);
if (IS_ERR(topology_state))
@@ -3126,6 +4193,7 @@ int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
if (pos->port == port) {
vcpi = pos;
prev_slots = vcpi->vcpi;
+ prev_bw = vcpi->pbn;
/*
* This should never happen, unless the driver tries
@@ -3141,14 +4209,22 @@ int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
break;
}
}
- if (!vcpi)
+ if (!vcpi) {
prev_slots = 0;
+ prev_bw = 0;
+ }
+
+ if (pbn_div <= 0)
+ pbn_div = mgr->pbn_div;
- req_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
+ req_slots = DIV_ROUND_UP(pbn, pbn_div);
DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] VCPI %d -> %d\n",
port->connector->base.id, port->connector->name,
port, prev_slots, req_slots);
+ DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] PBN %d -> %d\n",
+ port->connector->base.id, port->connector->name,
+ port, prev_bw, pbn);
/* Add the new allocation to the state */
if (!vcpi) {
@@ -3161,9 +4237,9 @@ int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
list_add(&vcpi->next, &topology_state->vcpis);
}
vcpi->vcpi = req_slots;
+ vcpi->pbn = pbn;
- ret = req_slots;
- return ret;
+ return req_slots;
}
EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots);
@@ -3412,18 +4488,12 @@ EXPORT_SYMBOL(drm_dp_check_act_status);
* drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
* @clock: dot clock for the mode
* @bpp: bpp for the mode.
+ * @dsc: DSC mode. If true, bpp has units of 1/16 of a bit per pixel
*
* This uses the formula in the spec to calculate the PBN value for a mode.
*/
-int drm_dp_calc_pbn_mode(int clock, int bpp)
+int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc)
{
- u64 kbps;
- s64 peak_kbps;
- u32 numerator;
- u32 denominator;
-
- kbps = clock * bpp;
-
/*
* margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
* The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
@@ -3433,42 +4503,21 @@ int drm_dp_calc_pbn_mode(int clock, int bpp)
* peak_kbps *= (1006/1000)
* peak_kbps *= (64/54)
* peak_kbps *= 8 convert to bytes
+ *
+ * If the bpp is in units of 1/16, further divide by 16. Put this
+ * factor in the numerator rather than the denominator to avoid
+ * integer overflow
*/
- numerator = 64 * 1006;
- denominator = 54 * 8 * 1000 * 1000;
-
- kbps *= numerator;
- peak_kbps = drm_fixp_from_fraction(kbps, denominator);
+ if (dsc)
+ return DIV_ROUND_UP_ULL(mul_u32_u32(clock * (bpp / 16), 64 * 1006),
+ 8 * 54 * 1000 * 1000);
- return drm_fixp2int_ceil(peak_kbps);
+ return DIV_ROUND_UP_ULL(mul_u32_u32(clock * bpp, 64 * 1006),
+ 8 * 54 * 1000 * 1000);
}
EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
-static int test_calc_pbn_mode(void)
-{
- int ret;
- ret = drm_dp_calc_pbn_mode(154000, 30);
- if (ret != 689) {
- DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
- 154000, 30, 689, ret);
- return -EINVAL;
- }
- ret = drm_dp_calc_pbn_mode(234000, 30);
- if (ret != 1047) {
- DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
- 234000, 30, 1047, ret);
- return -EINVAL;
- }
- ret = drm_dp_calc_pbn_mode(297000, 24);
- if (ret != 1063) {
- DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
- 297000, 24, 1063, ret);
- return -EINVAL;
- }
- return 0;
-}
-
/* we want to kick the TX after we've ack the up/down IRQs. */
static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
{
@@ -3602,41 +4651,108 @@ static void drm_dp_tx_work(struct work_struct *work)
struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
mutex_lock(&mgr->qlock);
- if (!list_empty(&mgr->tx_msg_downq))
+ if (!list_empty(&mgr->tx_msg_downq) && !mgr->is_waiting_for_dwn_reply)
process_single_down_tx_qlock(mgr);
mutex_unlock(&mgr->qlock);
}
-static void drm_dp_destroy_connector_work(struct work_struct *work)
+static inline void
+drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port)
{
- struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
- struct drm_dp_mst_port *port;
- bool send_hotplug = false;
+ if (port->connector)
+ port->mgr->cbs->destroy_connector(port->mgr, port->connector);
+
+ drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE, port->mcs);
+ drm_dp_mst_put_port_malloc(port);
+}
+
+static inline void
+drm_dp_delayed_destroy_mstb(struct drm_dp_mst_branch *mstb)
+{
+ struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
+ struct drm_dp_mst_port *port, *tmp;
+ bool wake_tx = false;
+
+ mutex_lock(&mgr->lock);
+ list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
+ list_del(&port->next);
+ drm_dp_mst_topology_put_port(port);
+ }
+ mutex_unlock(&mgr->lock);
+
+ /* drop any tx slots msg */
+ mutex_lock(&mstb->mgr->qlock);
+ if (mstb->tx_slots[0]) {
+ mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
+ mstb->tx_slots[0] = NULL;
+ wake_tx = true;
+ }
+ if (mstb->tx_slots[1]) {
+ mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
+ mstb->tx_slots[1] = NULL;
+ wake_tx = true;
+ }
+ mutex_unlock(&mstb->mgr->qlock);
+
+ if (wake_tx)
+ wake_up_all(&mstb->mgr->tx_waitq);
+
+ drm_dp_mst_put_mstb_malloc(mstb);
+}
+
+static void drm_dp_delayed_destroy_work(struct work_struct *work)
+{
+ struct drm_dp_mst_topology_mgr *mgr =
+ container_of(work, struct drm_dp_mst_topology_mgr,
+ delayed_destroy_work);
+ bool send_hotplug = false, go_again;
+
/*
* Not a regular list traverse as we have to drop the destroy
- * connector lock before destroying the connector, to avoid AB->BA
+ * connector lock before destroying the mstb/port, to avoid AB->BA
* ordering between this lock and the config mutex.
*/
- for (;;) {
- mutex_lock(&mgr->destroy_connector_lock);
- port = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_dp_mst_port, next);
- if (!port) {
- mutex_unlock(&mgr->destroy_connector_lock);
- break;
+ do {
+ go_again = false;
+
+ for (;;) {
+ struct drm_dp_mst_branch *mstb;
+
+ mutex_lock(&mgr->delayed_destroy_lock);
+ mstb = list_first_entry_or_null(&mgr->destroy_branch_device_list,
+ struct drm_dp_mst_branch,
+ destroy_next);
+ if (mstb)
+ list_del(&mstb->destroy_next);
+ mutex_unlock(&mgr->delayed_destroy_lock);
+
+ if (!mstb)
+ break;
+
+ drm_dp_delayed_destroy_mstb(mstb);
+ go_again = true;
}
- list_del(&port->next);
- mutex_unlock(&mgr->destroy_connector_lock);
- INIT_LIST_HEAD(&port->next);
+ for (;;) {
+ struct drm_dp_mst_port *port;
- mgr->cbs->destroy_connector(mgr, port->connector);
+ mutex_lock(&mgr->delayed_destroy_lock);
+ port = list_first_entry_or_null(&mgr->destroy_port_list,
+ struct drm_dp_mst_port,
+ next);
+ if (port)
+ list_del(&port->next);
+ mutex_unlock(&mgr->delayed_destroy_lock);
- drm_dp_port_teardown_pdt(port, port->pdt);
- port->pdt = DP_PEER_DEVICE_NONE;
+ if (!port)
+ break;
+
+ drm_dp_delayed_destroy_port(port);
+ send_hotplug = true;
+ go_again = true;
+ }
+ } while (go_again);
- drm_dp_mst_put_port_malloc(port);
- send_hotplug = true;
- }
if (send_hotplug)
drm_kms_helper_hotplug_event(mgr->dev);
}
@@ -3698,9 +4814,61 @@ static void drm_dp_mst_destroy_state(struct drm_private_obj *obj,
kfree(mst_state);
}
+static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port,
+ struct drm_dp_mst_branch *branch)
+{
+ while (port->parent) {
+ if (port->parent == branch)
+ return true;
+
+ if (port->parent->port_parent)
+ port = port->parent->port_parent;
+ else
+ break;
+ }
+ return false;
+}
+
+static inline
+int drm_dp_mst_atomic_check_bw_limit(struct drm_dp_mst_branch *branch,
+ struct drm_dp_mst_topology_state *mst_state)
+{
+ struct drm_dp_mst_port *port;
+ struct drm_dp_vcpi_allocation *vcpi;
+ int pbn_limit = 0, pbn_used = 0;
+
+ list_for_each_entry(port, &branch->ports, next) {
+ if (port->mstb)
+ if (drm_dp_mst_atomic_check_bw_limit(port->mstb, mst_state))
+ return -ENOSPC;
+
+ if (port->available_pbn > 0)
+ pbn_limit = port->available_pbn;
+ }
+ DRM_DEBUG_ATOMIC("[MST BRANCH:%p] branch has %d PBN available\n",
+ branch, pbn_limit);
+
+ list_for_each_entry(vcpi, &mst_state->vcpis, next) {
+ if (!vcpi->pbn)
+ continue;
+
+ if (drm_dp_mst_port_downstream_of_branch(vcpi->port, branch))
+ pbn_used += vcpi->pbn;
+ }
+ DRM_DEBUG_ATOMIC("[MST BRANCH:%p] branch used %d PBN\n",
+ branch, pbn_used);
+
+ if (pbn_used > pbn_limit) {
+ DRM_DEBUG_ATOMIC("[MST BRANCH:%p] No available bandwidth\n",
+ branch);
+ return -ENOSPC;
+ }
+ return 0;
+}
+
static inline int
-drm_dp_mst_atomic_check_topology_state(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_topology_state *mst_state)
+drm_dp_mst_atomic_check_vcpi_alloc_limit(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_topology_state *mst_state)
{
struct drm_dp_vcpi_allocation *vcpi;
int avail_slots = 63, payload_count = 0;
@@ -3738,6 +4906,128 @@ drm_dp_mst_atomic_check_topology_state(struct drm_dp_mst_topology_mgr *mgr,
}
/**
+ * drm_dp_mst_add_affected_dsc_crtcs
+ * @state: Pointer to the new struct drm_dp_mst_topology_state
+ * @mgr: MST topology manager
+ *
+ * Whenever there is a change in mst topology
+ * DSC configuration would have to be recalculated
+ * therefore we need to trigger modeset on all affected
+ * CRTCs in that topology
+ *
+ * See also:
+ * drm_dp_mst_atomic_enable_dsc()
+ */
+int drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr)
+{
+ struct drm_dp_mst_topology_state *mst_state;
+ struct drm_dp_vcpi_allocation *pos;
+ struct drm_connector *connector;
+ struct drm_connector_state *conn_state;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+
+ mst_state = drm_atomic_get_mst_topology_state(state, mgr);
+
+ if (IS_ERR(mst_state))
+ return -EINVAL;
+
+ list_for_each_entry(pos, &mst_state->vcpis, next) {
+
+ connector = pos->port->connector;
+
+ if (!connector)
+ return -EINVAL;
+
+ conn_state = drm_atomic_get_connector_state(state, connector);
+
+ if (IS_ERR(conn_state))
+ return PTR_ERR(conn_state);
+
+ crtc = conn_state->crtc;
+
+ if (WARN_ON(!crtc))
+ return -EINVAL;
+
+ if (!drm_dp_mst_dsc_aux_for_port(pos->port))
+ continue;
+
+ crtc_state = drm_atomic_get_crtc_state(mst_state->base.state, crtc);
+
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ DRM_DEBUG_ATOMIC("[MST MGR:%p] Setting mode_changed flag on CRTC %p\n",
+ mgr, crtc);
+
+ crtc_state->mode_changed = true;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_mst_add_affected_dsc_crtcs);
+
+/**
+ * drm_dp_mst_atomic_enable_dsc - Set DSC Enable Flag to On/Off
+ * @state: Pointer to the new drm_atomic_state
+ * @port: Pointer to the affected MST Port
+ * @pbn: Newly recalculated bw required for link with DSC enabled
+ * @pbn_div: Divider to calculate correct number of pbn per slot
+ * @enable: Boolean flag to enable or disable DSC on the port
+ *
+ * This function enables DSC on the given Port
+ * by recalculating its vcpi from pbn provided
+ * and sets dsc_enable flag to keep track of which
+ * ports have DSC enabled
+ *
+ */
+int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
+ struct drm_dp_mst_port *port,
+ int pbn, int pbn_div,
+ bool enable)
+{
+ struct drm_dp_mst_topology_state *mst_state;
+ struct drm_dp_vcpi_allocation *pos;
+ bool found = false;
+ int vcpi = 0;
+
+ mst_state = drm_atomic_get_mst_topology_state(state, port->mgr);
+
+ if (IS_ERR(mst_state))
+ return PTR_ERR(mst_state);
+
+ list_for_each_entry(pos, &mst_state->vcpis, next) {
+ if (pos->port == port) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ DRM_DEBUG_ATOMIC("[MST PORT:%p] Couldn't find VCPI allocation in mst state %p\n",
+ port, mst_state);
+ return -EINVAL;
+ }
+
+ if (pos->dsc_enabled == enable) {
+ DRM_DEBUG_ATOMIC("[MST PORT:%p] DSC flag is already set to %d, returning %d VCPI slots\n",
+ port, enable, pos->vcpi);
+ vcpi = pos->vcpi;
+ }
+
+ if (enable) {
+ vcpi = drm_dp_atomic_find_vcpi_slots(state, port->mgr, port, pbn, pbn_div);
+ DRM_DEBUG_ATOMIC("[MST PORT:%p] Enabling DSC flag, reallocating %d VCPI slots on the port\n",
+ port, vcpi);
+ if (vcpi < 0)
+ return -EINVAL;
+ }
+
+ pos->dsc_enabled = enable;
+
+ return vcpi;
+}
+EXPORT_SYMBOL(drm_dp_mst_atomic_enable_dsc);
+/**
* drm_dp_mst_atomic_check - Check that the new state of an MST topology in an
* atomic update is valid
* @state: Pointer to the new &struct drm_dp_mst_topology_state
@@ -3765,7 +5055,13 @@ int drm_dp_mst_atomic_check(struct drm_atomic_state *state)
int i, ret = 0;
for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
- ret = drm_dp_mst_atomic_check_topology_state(mgr, mst_state);
+ if (!mgr->mst_state)
+ continue;
+
+ ret = drm_dp_mst_atomic_check_vcpi_alloc_limit(mgr, mst_state);
+ if (ret)
+ break;
+ ret = drm_dp_mst_atomic_check_bw_limit(mgr->mst_primary, mst_state);
if (ret)
break;
}
@@ -3798,9 +5094,6 @@ EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs);
struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
struct drm_dp_mst_topology_mgr *mgr)
{
- struct drm_device *dev = mgr->dev;
-
- WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
return to_dp_mst_topology_state(drm_atomic_get_private_obj_state(state, &mgr->base));
}
EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
@@ -3826,12 +5119,20 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
mutex_init(&mgr->lock);
mutex_init(&mgr->qlock);
mutex_init(&mgr->payload_lock);
- mutex_init(&mgr->destroy_connector_lock);
+ mutex_init(&mgr->delayed_destroy_lock);
+ mutex_init(&mgr->up_req_lock);
+ mutex_init(&mgr->probe_lock);
+#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
+ mutex_init(&mgr->topology_ref_history_lock);
+#endif
INIT_LIST_HEAD(&mgr->tx_msg_downq);
- INIT_LIST_HEAD(&mgr->destroy_connector_list);
+ INIT_LIST_HEAD(&mgr->destroy_port_list);
+ INIT_LIST_HEAD(&mgr->destroy_branch_device_list);
+ INIT_LIST_HEAD(&mgr->up_req_list);
INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
- INIT_WORK(&mgr->destroy_connector_work, drm_dp_destroy_connector_work);
+ INIT_WORK(&mgr->delayed_destroy_work, drm_dp_delayed_destroy_work);
+ INIT_WORK(&mgr->up_req_work, drm_dp_mst_up_req_work);
init_waitqueue_head(&mgr->tx_waitq);
mgr->dev = dev;
mgr->aux = aux;
@@ -3848,8 +5149,6 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
if (!mgr->proposed_vcpis)
return -ENOMEM;
set_bit(0, &mgr->payload_mask);
- if (test_calc_pbn_mode() < 0)
- DRM_ERROR("MST PBN self-test failed\n");
mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL);
if (mst_state == NULL)
@@ -3874,7 +5173,7 @@ void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
{
drm_dp_mst_topology_mgr_set_mst(mgr, false);
flush_work(&mgr->work);
- flush_work(&mgr->destroy_connector_work);
+ cancel_work_sync(&mgr->delayed_destroy_work);
mutex_lock(&mgr->payload_lock);
kfree(mgr->payloads);
mgr->payloads = NULL;
@@ -3885,6 +5184,16 @@ void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
mgr->aux = NULL;
drm_atomic_private_obj_fini(&mgr->base);
mgr->funcs = NULL;
+
+ mutex_destroy(&mgr->delayed_destroy_lock);
+ mutex_destroy(&mgr->payload_lock);
+ mutex_destroy(&mgr->qlock);
+ mutex_destroy(&mgr->lock);
+ mutex_destroy(&mgr->up_req_lock);
+ mutex_destroy(&mgr->probe_lock);
+#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
+ mutex_destroy(&mgr->topology_ref_history_lock);
+#endif
}
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
@@ -4016,3 +5325,173 @@ static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux)
{
i2c_del_adapter(&aux->ddc);
}
+
+/**
+ * drm_dp_mst_is_virtual_dpcd() - Is the given port a virtual DP Peer Device
+ * @port: The port to check
+ *
+ * A single physical MST hub object can be represented in the topology
+ * by multiple branches, with virtual ports between those branches.
+ *
+ * As of DP1.4, An MST hub with internal (virtual) ports must expose
+ * certain DPCD registers over those ports. See sections 2.6.1.1.1
+ * and 2.6.1.1.2 of Display Port specification v1.4 for details.
+ *
+ * May acquire mgr->lock
+ *
+ * Returns:
+ * true if the port is a virtual DP peer device, false otherwise
+ */
+static bool drm_dp_mst_is_virtual_dpcd(struct drm_dp_mst_port *port)
+{
+ struct drm_dp_mst_port *downstream_port;
+
+ if (!port || port->dpcd_rev < DP_DPCD_REV_14)
+ return false;
+
+ /* Virtual DP Sink (Internal Display Panel) */
+ if (port->port_num >= 8)
+ return true;
+
+ /* DP-to-HDMI Protocol Converter */
+ if (port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV &&
+ !port->mcs &&
+ port->ldps)
+ return true;
+
+ /* DP-to-DP */
+ mutex_lock(&port->mgr->lock);
+ if (port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
+ port->mstb &&
+ port->mstb->num_ports == 2) {
+ list_for_each_entry(downstream_port, &port->mstb->ports, next) {
+ if (downstream_port->pdt == DP_PEER_DEVICE_SST_SINK &&
+ !downstream_port->input) {
+ mutex_unlock(&port->mgr->lock);
+ return true;
+ }
+ }
+ }
+ mutex_unlock(&port->mgr->lock);
+
+ return false;
+}
+
+/**
+ * drm_dp_mst_dsc_aux_for_port() - Find the correct aux for DSC
+ * @port: The port to check. A leaf of the MST tree with an attached display.
+ *
+ * Depending on the situation, DSC may be enabled via the endpoint aux,
+ * the immediately upstream aux, or the connector's physical aux.
+ *
+ * This is both the correct aux to read DSC_CAPABILITY and the
+ * correct aux to write DSC_ENABLED.
+ *
+ * This operation can be expensive (up to four aux reads), so
+ * the caller should cache the return.
+ *
+ * Returns:
+ * NULL if DSC cannot be enabled on this port, otherwise the aux device
+ */
+struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
+{
+ struct drm_dp_mst_port *immediate_upstream_port;
+ struct drm_dp_mst_port *fec_port;
+ struct drm_dp_desc desc = { 0 };
+ u8 endpoint_fec;
+ u8 endpoint_dsc;
+
+ if (!port)
+ return NULL;
+
+ if (port->parent->port_parent)
+ immediate_upstream_port = port->parent->port_parent;
+ else
+ immediate_upstream_port = NULL;
+
+ fec_port = immediate_upstream_port;
+ while (fec_port) {
+ /*
+ * Each physical link (i.e. not a virtual port) between the
+ * output and the primary device must support FEC
+ */
+ if (!drm_dp_mst_is_virtual_dpcd(fec_port) &&
+ !fec_port->fec_capable)
+ return NULL;
+
+ fec_port = fec_port->parent->port_parent;
+ }
+
+ /* DP-to-DP peer device */
+ if (drm_dp_mst_is_virtual_dpcd(immediate_upstream_port)) {
+ u8 upstream_dsc;
+
+ if (drm_dp_dpcd_read(&port->aux,
+ DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
+ return NULL;
+ if (drm_dp_dpcd_read(&port->aux,
+ DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
+ return NULL;
+ if (drm_dp_dpcd_read(&immediate_upstream_port->aux,
+ DP_DSC_SUPPORT, &upstream_dsc, 1) != 1)
+ return NULL;
+
+ /* Enpoint decompression with DP-to-DP peer device */
+ if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
+ (endpoint_fec & DP_FEC_CAPABLE) &&
+ (upstream_dsc & 0x2) /* DSC passthrough */)
+ return &port->aux;
+
+ /* Virtual DPCD decompression with DP-to-DP peer device */
+ return &immediate_upstream_port->aux;
+ }
+
+ /* Virtual DPCD decompression with DP-to-HDMI or Virtual DP Sink */
+ if (drm_dp_mst_is_virtual_dpcd(port))
+ return &port->aux;
+
+ /*
+ * Synaptics quirk
+ * Applies to ports for which:
+ * - Physical aux has Synaptics OUI
+ * - DPv1.4 or higher
+ * - Port is on primary branch device
+ * - Not a VGA adapter (DP_DWN_STRM_PORT_TYPE_ANALOG)
+ */
+ if (drm_dp_read_desc(port->mgr->aux, &desc, true))
+ return NULL;
+
+ if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) &&
+ port->mgr->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14 &&
+ port->parent == port->mgr->mst_primary) {
+ u8 downstreamport;
+
+ if (drm_dp_dpcd_read(&port->aux, DP_DOWNSTREAMPORT_PRESENT,
+ &downstreamport, 1) < 0)
+ return NULL;
+
+ if ((downstreamport & DP_DWN_STRM_PORT_PRESENT) &&
+ ((downstreamport & DP_DWN_STRM_PORT_TYPE_MASK)
+ != DP_DWN_STRM_PORT_TYPE_ANALOG))
+ return port->mgr->aux;
+ }
+
+ /*
+ * The check below verifies if the MST sink
+ * connected to the GPU is capable of DSC -
+ * therefore the endpoint needs to be
+ * both DSC and FEC capable.
+ */
+ if (drm_dp_dpcd_read(&port->aux,
+ DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
+ return NULL;
+ if (drm_dp_dpcd_read(&port->aux,
+ DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
+ return NULL;
+ if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
+ (endpoint_fec & DP_FEC_CAPABLE))
+ return &port->aux;
+
+ return NULL;
+}
+EXPORT_SYMBOL(drm_dp_mst_dsc_aux_for_port);
diff --git a/drivers/gpu/drm/drm_dp_mst_topology_internal.h b/drivers/gpu/drm/drm_dp_mst_topology_internal.h
new file mode 100644
index 000000000000..eeda9a61c657
--- /dev/null
+++ b/drivers/gpu/drm/drm_dp_mst_topology_internal.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Declarations for DP MST related functions which are only used in selftests
+ *
+ * Copyright © 2018 Red Hat
+ * Authors:
+ * Lyude Paul <lyude@redhat.com>
+ */
+
+#ifndef _DRM_DP_MST_HELPER_INTERNAL_H_
+#define _DRM_DP_MST_HELPER_INTERNAL_H_
+
+#include <drm/drm_dp_mst_helper.h>
+
+void
+drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req,
+ struct drm_dp_sideband_msg_tx *raw);
+int drm_dp_decode_sideband_req(const struct drm_dp_sideband_msg_tx *raw,
+ struct drm_dp_sideband_msg_req_body *req);
+void
+drm_dp_dump_sideband_msg_req_body(const struct drm_dp_sideband_msg_req_body *req,
+ int indent, struct drm_printer *printer);
+
+#endif /* !_DRM_DP_MST_HELPER_INTERNAL_H_ */
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 9d00947ca447..7c18a980cd4b 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -46,26 +46,9 @@
#include "drm_internal.h"
#include "drm_legacy.h"
-/*
- * drm_debug: Enable debug output.
- * Bitmask of DRM_UT_x. See include/drm/drm_print.h for details.
- */
-unsigned int drm_debug = 0;
-EXPORT_SYMBOL(drm_debug);
-
MODULE_AUTHOR("Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl");
MODULE_DESCRIPTION("DRM shared core routines");
MODULE_LICENSE("GPL and additional rights");
-MODULE_PARM_DESC(debug, "Enable debug output, where each bit enables a debug category.\n"
-"\t\tBit 0 (0x01) will enable CORE messages (drm core code)\n"
-"\t\tBit 1 (0x02) will enable DRIVER messages (drm controller code)\n"
-"\t\tBit 2 (0x04) will enable KMS messages (modesetting code)\n"
-"\t\tBit 3 (0x08) will enable PRIME messages (prime code)\n"
-"\t\tBit 4 (0x10) will enable ATOMIC messages (atomic code)\n"
-"\t\tBit 5 (0x20) will enable VBL messages (vblank code)\n"
-"\t\tBit 7 (0x80) will enable LEASE messages (leasing code)\n"
-"\t\tBit 8 (0x100) will enable DP messages (displayport code)");
-module_param_named(debug, drm_debug, int, 0600);
static DEFINE_SPINLOCK(drm_minor_lock);
static struct idr drm_minors_idr;
@@ -328,11 +311,9 @@ void drm_minor_release(struct drm_minor *minor)
* struct drm_device *drm;
* int ret;
*
- * [
- * devm_kzalloc() can't be used here because the drm_device
- * lifetime can exceed the device lifetime if driver unbind
- * happens when userspace still has open file descriptors.
- * ]
+ * // devm_kzalloc() can't be used here because the drm_device '
+ * // lifetime can exceed the device lifetime if driver unbind
+ * // happens when userspace still has open file descriptors.
* priv = kzalloc(sizeof(*priv), GFP_KERNEL);
* if (!priv)
* return -ENOMEM;
@@ -355,7 +336,7 @@ void drm_minor_release(struct drm_minor *minor)
* if (IS_ERR(priv->pclk))
* return PTR_ERR(priv->pclk);
*
- * [ Further setup, display pipeline etc ]
+ * // Further setup, display pipeline etc
*
* platform_set_drvdata(pdev, drm);
*
@@ -370,7 +351,7 @@ void drm_minor_release(struct drm_minor *minor)
* return 0;
* }
*
- * [ This function is called before the devm_ resources are released ]
+ * // This function is called before the devm_ resources are released
* static int driver_remove(struct platform_device *pdev)
* {
* struct drm_device *drm = platform_get_drvdata(pdev);
@@ -381,7 +362,7 @@ void drm_minor_release(struct drm_minor *minor)
* return 0;
* }
*
- * [ This function is called on kernel restart and shutdown ]
+ * // This function is called on kernel restart and shutdown
* static void driver_shutdown(struct platform_device *pdev)
* {
* drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
@@ -641,7 +622,8 @@ int drm_dev_init(struct drm_device *dev,
return -ENODEV;
}
- BUG_ON(!parent);
+ if (WARN_ON(!parent))
+ return -EINVAL;
kref_init(&dev->ref);
dev->dev = get_device(parent);
@@ -744,7 +726,7 @@ int devm_drm_dev_init(struct device *parent,
{
int ret;
- if (WARN_ON(!parent || !driver->release))
+ if (WARN_ON(!driver->release))
return -EINVAL;
ret = drm_dev_init(dev, driver, parent);
diff --git a/drivers/gpu/drm/drm_dsc.c b/drivers/gpu/drm/drm_dsc.c
index 77f4e5ae4197..4a475d9696ff 100644
--- a/drivers/gpu/drm/drm_dsc.c
+++ b/drivers/gpu/drm/drm_dsc.c
@@ -216,13 +216,11 @@ void drm_dsc_pps_payload_pack(struct drm_dsc_picture_parameter_set *pps_payload,
*/
for (i = 0; i < DSC_NUM_BUF_RANGES; i++) {
pps_payload->rc_range_parameters[i] =
- ((dsc_cfg->rc_range_params[i].range_min_qp <<
- DSC_PPS_RC_RANGE_MINQP_SHIFT) |
- (dsc_cfg->rc_range_params[i].range_max_qp <<
- DSC_PPS_RC_RANGE_MAXQP_SHIFT) |
- (dsc_cfg->rc_range_params[i].range_bpg_offset));
- pps_payload->rc_range_parameters[i] =
- cpu_to_be16(pps_payload->rc_range_parameters[i]);
+ cpu_to_be16((dsc_cfg->rc_range_params[i].range_min_qp <<
+ DSC_PPS_RC_RANGE_MINQP_SHIFT) |
+ (dsc_cfg->rc_range_params[i].range_max_qp <<
+ DSC_PPS_RC_RANGE_MAXQP_SHIFT) |
+ (dsc_cfg->rc_range_params[i].range_bpg_offset));
}
/* PPS 88 */
@@ -336,12 +334,6 @@ int drm_dsc_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg)
else
vdsc_cfg->nfl_bpg_offset = 0;
- /* 2^16 - 1 */
- if (vdsc_cfg->nfl_bpg_offset > 65535) {
- DRM_DEBUG_KMS("NflBpgOffset is too large for this slice height\n");
- return -ERANGE;
- }
-
/* Number of groups used to code the entire slice */
groups_total = groups_per_line * vdsc_cfg->slice_height;
@@ -371,11 +363,6 @@ int drm_dsc_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg)
vdsc_cfg->scale_increment_interval = 0;
}
- if (vdsc_cfg->scale_increment_interval > 65535) {
- DRM_DEBUG_KMS("ScaleIncrementInterval is large for slice height\n");
- return -ERANGE;
- }
-
/*
* DSC spec mentions that bits_per_pixel specifies the target
* bits/pixel (bpp) rate that is used by the encoder,
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 82a4ceed3fcf..805fb004c8eb 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -159,6 +159,9 @@ static const struct edid_quirk {
/* Medion MD 30217 PG */
{ "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 },
+ /* Lenovo G50 */
+ { "SDC", 18514, EDID_QUIRK_FORCE_6BPC },
+
/* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
{ "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC },
@@ -707,14 +710,11 @@ static const struct minimode extra_modes[] = {
};
/*
- * Probably taken from CEA-861 spec.
- * This table is converted from xorg's hw/xfree86/modes/xf86EdidModes.c.
+ * From CEA/CTA-861 spec.
*
- * Index using the VIC.
+ * Do not access directly, instead always use cea_mode_for_vic().
*/
-static const struct drm_display_mode edid_cea_modes[] = {
- /* 0 - dummy, VICs start at 1 */
- { },
+static const struct drm_display_mode edid_cea_modes_1[] = {
/* 1 - 640x480@60Hz 4:3 */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
752, 800, 0, 480, 490, 492, 525, 0,
@@ -1275,6 +1275,249 @@ static const struct drm_display_mode edid_cea_modes[] = {
4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 108 - 1280x720@48Hz 16:9 */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 90000, 1280, 2240,
+ 2280, 2500, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 109 - 1280x720@48Hz 64:27 */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 90000, 1280, 2240,
+ 2280, 2500, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 110 - 1680x720@48Hz 64:27 */
+ { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 99000, 1680, 2490,
+ 2530, 2750, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 111 - 1920x1080@48Hz 16:9 */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2558,
+ 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 112 - 1920x1080@48Hz 64:27 */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2558,
+ 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 113 - 2560x1080@48Hz 64:27 */
+ { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 198000, 2560, 3558,
+ 3602, 3750, 0, 1080, 1084, 1089, 1100, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 114 - 3840x2160@48Hz 16:9 */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 5116,
+ 5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 115 - 4096x2160@48Hz 256:135 */
+ { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 5116,
+ 5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
+ /* 116 - 3840x2160@48Hz 64:27 */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 5116,
+ 5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 117 - 3840x2160@100Hz 16:9 */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 1188000, 3840, 4896,
+ 4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 118 - 3840x2160@120Hz 16:9 */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 1188000, 3840, 4016,
+ 4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 119 - 3840x2160@100Hz 64:27 */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 1188000, 3840, 4896,
+ 4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 120 - 3840x2160@120Hz 64:27 */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 1188000, 3840, 4016,
+ 4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 121 - 5120x2160@24Hz 64:27 */
+ { DRM_MODE("5120x2160", DRM_MODE_TYPE_DRIVER, 396000, 5120, 7116,
+ 7204, 7500, 0, 2160, 2168, 2178, 2200, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 122 - 5120x2160@25Hz 64:27 */
+ { DRM_MODE("5120x2160", DRM_MODE_TYPE_DRIVER, 396000, 5120, 6816,
+ 6904, 7200, 0, 2160, 2168, 2178, 2200, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 123 - 5120x2160@30Hz 64:27 */
+ { DRM_MODE("5120x2160", DRM_MODE_TYPE_DRIVER, 396000, 5120, 5784,
+ 5872, 6000, 0, 2160, 2168, 2178, 2200, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 124 - 5120x2160@48Hz 64:27 */
+ { DRM_MODE("5120x2160", DRM_MODE_TYPE_DRIVER, 742500, 5120, 5866,
+ 5954, 6250, 0, 2160, 2168, 2178, 2475, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 125 - 5120x2160@50Hz 64:27 */
+ { DRM_MODE("5120x2160", DRM_MODE_TYPE_DRIVER, 742500, 5120, 6216,
+ 6304, 6600, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 126 - 5120x2160@60Hz 64:27 */
+ { DRM_MODE("5120x2160", DRM_MODE_TYPE_DRIVER, 742500, 5120, 5284,
+ 5372, 5500, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 127 - 5120x2160@100Hz 64:27 */
+ { DRM_MODE("5120x2160", DRM_MODE_TYPE_DRIVER, 1485000, 5120, 6216,
+ 6304, 6600, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+};
+
+/*
+ * From CEA/CTA-861 spec.
+ *
+ * Do not access directly, instead always use cea_mode_for_vic().
+ */
+static const struct drm_display_mode edid_cea_modes_193[] = {
+ /* 193 - 5120x2160@120Hz 64:27 */
+ { DRM_MODE("5120x2160", DRM_MODE_TYPE_DRIVER, 1485000, 5120, 5284,
+ 5372, 5500, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 194 - 7680x4320@24Hz 16:9 */
+ { DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 1188000, 7680, 10232,
+ 10408, 11000, 0, 4320, 4336, 4356, 4500, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 195 - 7680x4320@25Hz 16:9 */
+ { DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 1188000, 7680, 10032,
+ 10208, 10800, 0, 4320, 4336, 4356, 4400, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 196 - 7680x4320@30Hz 16:9 */
+ { DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 1188000, 7680, 8232,
+ 8408, 9000, 0, 4320, 4336, 4356, 4400, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 197 - 7680x4320@48Hz 16:9 */
+ { DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 2376000, 7680, 10232,
+ 10408, 11000, 0, 4320, 4336, 4356, 4500, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 198 - 7680x4320@50Hz 16:9 */
+ { DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 2376000, 7680, 10032,
+ 10208, 10800, 0, 4320, 4336, 4356, 4400, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 199 - 7680x4320@60Hz 16:9 */
+ { DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 2376000, 7680, 8232,
+ 8408, 9000, 0, 4320, 4336, 4356, 4400, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 200 - 7680x4320@100Hz 16:9 */
+ { DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 4752000, 7680, 9792,
+ 9968, 10560, 0, 4320, 4336, 4356, 4500, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 201 - 7680x4320@120Hz 16:9 */
+ { DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 4752000, 7680, 8032,
+ 8208, 8800, 0, 4320, 4336, 4356, 4500, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 202 - 7680x4320@24Hz 64:27 */
+ { DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 1188000, 7680, 10232,
+ 10408, 11000, 0, 4320, 4336, 4356, 4500, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 203 - 7680x4320@25Hz 64:27 */
+ { DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 1188000, 7680, 10032,
+ 10208, 10800, 0, 4320, 4336, 4356, 4400, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 204 - 7680x4320@30Hz 64:27 */
+ { DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 1188000, 7680, 8232,
+ 8408, 9000, 0, 4320, 4336, 4356, 4400, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 205 - 7680x4320@48Hz 64:27 */
+ { DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 2376000, 7680, 10232,
+ 10408, 11000, 0, 4320, 4336, 4356, 4500, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 206 - 7680x4320@50Hz 64:27 */
+ { DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 2376000, 7680, 10032,
+ 10208, 10800, 0, 4320, 4336, 4356, 4400, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 207 - 7680x4320@60Hz 64:27 */
+ { DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 2376000, 7680, 8232,
+ 8408, 9000, 0, 4320, 4336, 4356, 4400, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 208 - 7680x4320@100Hz 64:27 */
+ { DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 4752000, 7680, 9792,
+ 9968, 10560, 0, 4320, 4336, 4356, 4500, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 209 - 7680x4320@120Hz 64:27 */
+ { DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 4752000, 7680, 8032,
+ 8208, 8800, 0, 4320, 4336, 4356, 4500, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 210 - 10240x4320@24Hz 64:27 */
+ { DRM_MODE("10240x4320", DRM_MODE_TYPE_DRIVER, 1485000, 10240, 11732,
+ 11908, 12500, 0, 4320, 4336, 4356, 4950, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 211 - 10240x4320@25Hz 64:27 */
+ { DRM_MODE("10240x4320", DRM_MODE_TYPE_DRIVER, 1485000, 10240, 12732,
+ 12908, 13500, 0, 4320, 4336, 4356, 4400, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 212 - 10240x4320@30Hz 64:27 */
+ { DRM_MODE("10240x4320", DRM_MODE_TYPE_DRIVER, 1485000, 10240, 10528,
+ 10704, 11000, 0, 4320, 4336, 4356, 4500, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 213 - 10240x4320@48Hz 64:27 */
+ { DRM_MODE("10240x4320", DRM_MODE_TYPE_DRIVER, 2970000, 10240, 11732,
+ 11908, 12500, 0, 4320, 4336, 4356, 4950, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 214 - 10240x4320@50Hz 64:27 */
+ { DRM_MODE("10240x4320", DRM_MODE_TYPE_DRIVER, 2970000, 10240, 12732,
+ 12908, 13500, 0, 4320, 4336, 4356, 4400, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 215 - 10240x4320@60Hz 64:27 */
+ { DRM_MODE("10240x4320", DRM_MODE_TYPE_DRIVER, 2970000, 10240, 10528,
+ 10704, 11000, 0, 4320, 4336, 4356, 4500, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 216 - 10240x4320@100Hz 64:27 */
+ { DRM_MODE("10240x4320", DRM_MODE_TYPE_DRIVER, 5940000, 10240, 12432,
+ 12608, 13200, 0, 4320, 4336, 4356, 4500, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 217 - 10240x4320@120Hz 64:27 */
+ { DRM_MODE("10240x4320", DRM_MODE_TYPE_DRIVER, 5940000, 10240, 10528,
+ 10704, 11000, 0, 4320, 4336, 4356, 4500, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 218 - 4096x2160@100Hz 256:135 */
+ { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 1188000, 4096, 4896,
+ 4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
+ /* 219 - 4096x2160@120Hz 256:135 */
+ { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 1188000, 4096, 4184,
+ 4272, 4400, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
};
/*
@@ -1288,25 +1531,25 @@ static const struct drm_display_mode edid_4k_modes[] = {
3840, 4016, 4104, 4400, 0,
2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 30, },
+ .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 2 - 3840x2160@25Hz */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000,
3840, 4896, 4984, 5280, 0,
2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 25, },
+ .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 3 - 3840x2160@24Hz */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000,
3840, 5116, 5204, 5500, 0,
2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 24, },
+ .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 4 - 4096x2160@24Hz (SMPTE) */
{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000,
4096, 5116, 5204, 5500, 0,
2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 24, },
+ .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
};
/*** DDC fetch and block validation ***/
@@ -1551,7 +1794,7 @@ static void connector_bad_edid(struct drm_connector *connector,
{
int i;
- if (connector->bad_edid_counter++ && !(drm_debug & DRM_UT_KMS))
+ if (connector->bad_edid_counter++ && !drm_debug_enabled(DRM_UT_KMS))
return;
dev_warn(connector->dev->dev,
@@ -2089,7 +2332,8 @@ static int standard_timing_level(struct edid *edid)
return LEVEL_CVT;
if (drm_gtf2_hbreak(edid))
return LEVEL_GTF2;
- return LEVEL_GTF;
+ if (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF)
+ return LEVEL_GTF;
}
return LEVEL_DMT;
}
@@ -2967,6 +3211,30 @@ static u8 *drm_find_cea_extension(const struct edid *edid)
return cea;
}
+static __always_inline const struct drm_display_mode *cea_mode_for_vic(u8 vic)
+{
+ BUILD_BUG_ON(1 + ARRAY_SIZE(edid_cea_modes_1) - 1 != 127);
+ BUILD_BUG_ON(193 + ARRAY_SIZE(edid_cea_modes_193) - 1 != 219);
+
+ if (vic >= 1 && vic < 1 + ARRAY_SIZE(edid_cea_modes_1))
+ return &edid_cea_modes_1[vic - 1];
+ if (vic >= 193 && vic < 193 + ARRAY_SIZE(edid_cea_modes_193))
+ return &edid_cea_modes_193[vic - 193];
+ return NULL;
+}
+
+static u8 cea_num_vics(void)
+{
+ return 193 + ARRAY_SIZE(edid_cea_modes_193);
+}
+
+static u8 cea_next_vic(u8 vic)
+{
+ if (++vic == 1 + ARRAY_SIZE(edid_cea_modes_1))
+ vic = 193;
+ return vic;
+}
+
/*
* Calculate the alternate clock for the CEA mode
* (60Hz vs. 59.94Hz etc.)
@@ -3004,14 +3272,14 @@ cea_mode_alternate_timings(u8 vic, struct drm_display_mode *mode)
* get the other variants by simply increasing the
* vertical front porch length.
*/
- BUILD_BUG_ON(edid_cea_modes[8].vtotal != 262 ||
- edid_cea_modes[9].vtotal != 262 ||
- edid_cea_modes[12].vtotal != 262 ||
- edid_cea_modes[13].vtotal != 262 ||
- edid_cea_modes[23].vtotal != 312 ||
- edid_cea_modes[24].vtotal != 312 ||
- edid_cea_modes[27].vtotal != 312 ||
- edid_cea_modes[28].vtotal != 312);
+ BUILD_BUG_ON(cea_mode_for_vic(8)->vtotal != 262 ||
+ cea_mode_for_vic(9)->vtotal != 262 ||
+ cea_mode_for_vic(12)->vtotal != 262 ||
+ cea_mode_for_vic(13)->vtotal != 262 ||
+ cea_mode_for_vic(23)->vtotal != 312 ||
+ cea_mode_for_vic(24)->vtotal != 312 ||
+ cea_mode_for_vic(27)->vtotal != 312 ||
+ cea_mode_for_vic(28)->vtotal != 312);
if (((vic == 8 || vic == 9 ||
vic == 12 || vic == 13) && mode->vtotal < 263) ||
@@ -3039,8 +3307,8 @@ static u8 drm_match_cea_mode_clock_tolerance(const struct drm_display_mode *to_m
if (to_match->picture_aspect_ratio)
match_flags |= DRM_MODE_MATCH_ASPECT_RATIO;
- for (vic = 1; vic < ARRAY_SIZE(edid_cea_modes); vic++) {
- struct drm_display_mode cea_mode = edid_cea_modes[vic];
+ for (vic = 1; vic < cea_num_vics(); vic = cea_next_vic(vic)) {
+ struct drm_display_mode cea_mode = *cea_mode_for_vic(vic);
unsigned int clock1, clock2;
/* Check both 60Hz and 59.94Hz */
@@ -3078,8 +3346,8 @@ u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
if (to_match->picture_aspect_ratio)
match_flags |= DRM_MODE_MATCH_ASPECT_RATIO;
- for (vic = 1; vic < ARRAY_SIZE(edid_cea_modes); vic++) {
- struct drm_display_mode cea_mode = edid_cea_modes[vic];
+ for (vic = 1; vic < cea_num_vics(); vic = cea_next_vic(vic)) {
+ struct drm_display_mode cea_mode = *cea_mode_for_vic(vic);
unsigned int clock1, clock2;
/* Check both 60Hz and 59.94Hz */
@@ -3102,36 +3370,31 @@ EXPORT_SYMBOL(drm_match_cea_mode);
static bool drm_valid_cea_vic(u8 vic)
{
- return vic > 0 && vic < ARRAY_SIZE(edid_cea_modes);
+ return cea_mode_for_vic(vic) != NULL;
}
-/**
- * drm_get_cea_aspect_ratio - get the picture aspect ratio corresponding to
- * the input VIC from the CEA mode list
- * @video_code: ID given to each of the CEA modes
- *
- * Returns picture aspect ratio
- */
-enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code)
+static enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code)
+{
+ const struct drm_display_mode *mode = cea_mode_for_vic(video_code);
+
+ if (mode)
+ return mode->picture_aspect_ratio;
+
+ return HDMI_PICTURE_ASPECT_NONE;
+}
+
+static enum hdmi_picture_aspect drm_get_hdmi_aspect_ratio(const u8 video_code)
{
- return edid_cea_modes[video_code].picture_aspect_ratio;
+ return edid_4k_modes[video_code].picture_aspect_ratio;
}
-EXPORT_SYMBOL(drm_get_cea_aspect_ratio);
/*
* Calculate the alternate clock for HDMI modes (those from the HDMI vendor
* specific block).
- *
- * It's almost like cea_mode_alternate_clock(), we just need to add an
- * exception for the VIC 4 mode (4096x2160@24Hz): no alternate clock for this
- * one.
*/
static unsigned int
hdmi_mode_alternate_clock(const struct drm_display_mode *hdmi_mode)
{
- if (hdmi_mode->vdisplay == 4096 && hdmi_mode->hdisplay == 2160)
- return hdmi_mode->clock;
-
return cea_mode_alternate_clock(hdmi_mode);
}
@@ -3144,6 +3407,9 @@ static u8 drm_match_hdmi_mode_clock_tolerance(const struct drm_display_mode *to_
if (!to_match->clock)
return 0;
+ if (to_match->picture_aspect_ratio)
+ match_flags |= DRM_MODE_MATCH_ASPECT_RATIO;
+
for (vic = 1; vic < ARRAY_SIZE(edid_4k_modes); vic++) {
const struct drm_display_mode *hdmi_mode = &edid_4k_modes[vic];
unsigned int clock1, clock2;
@@ -3179,6 +3445,9 @@ static u8 drm_match_hdmi_mode(const struct drm_display_mode *to_match)
if (!to_match->clock)
return 0;
+ if (to_match->picture_aspect_ratio)
+ match_flags |= DRM_MODE_MATCH_ASPECT_RATIO;
+
for (vic = 1; vic < ARRAY_SIZE(edid_4k_modes); vic++) {
const struct drm_display_mode *hdmi_mode = &edid_4k_modes[vic];
unsigned int clock1, clock2;
@@ -3223,7 +3492,7 @@ add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid)
unsigned int clock1, clock2;
if (drm_valid_cea_vic(vic)) {
- cea_mode = &edid_cea_modes[vic];
+ cea_mode = cea_mode_for_vic(vic);
clock2 = cea_mode_alternate_clock(cea_mode);
} else {
vic = drm_match_hdmi_mode(mode);
@@ -3298,7 +3567,7 @@ drm_display_mode_from_vic_index(struct drm_connector *connector,
if (!drm_valid_cea_vic(vic))
return NULL;
- newmode = drm_mode_duplicate(dev, &edid_cea_modes[vic]);
+ newmode = drm_mode_duplicate(dev, cea_mode_for_vic(vic));
if (!newmode)
return NULL;
@@ -3332,7 +3601,7 @@ static int do_y420vdb_modes(struct drm_connector *connector,
if (!drm_valid_cea_vic(vic))
continue;
- newmode = drm_mode_duplicate(dev, &edid_cea_modes[vic]);
+ newmode = drm_mode_duplicate(dev, cea_mode_for_vic(vic));
if (!newmode)
break;
bitmap_set(hdmi->y420_vdb_modes, vic, 1);
@@ -3719,7 +3988,7 @@ cea_db_offsets(const u8 *cea, int *start, int *end)
if (*end < 4 || *end > 127)
return -ERANGE;
} else {
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
return 0;
@@ -3901,7 +4170,7 @@ static void fixup_detailed_cea_mode_clock(struct drm_display_mode *mode)
vic = drm_match_cea_mode_clock_tolerance(mode, 5);
if (drm_valid_cea_vic(vic)) {
type = "CEA";
- cea_mode = &edid_cea_modes[vic];
+ cea_mode = cea_mode_for_vic(vic);
clock1 = cea_mode->clock;
clock2 = cea_mode_alternate_clock(cea_mode);
} else {
@@ -4183,12 +4452,12 @@ int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads)
cea = drm_find_cea_extension(edid);
if (!cea) {
DRM_DEBUG_KMS("SAD: no CEA Extension found\n");
- return -ENOENT;
+ return 0;
}
if (cea_revision(cea) < 3) {
DRM_DEBUG_KMS("SAD: wrong CEA revision\n");
- return -ENOTSUPP;
+ return 0;
}
if (cea_db_offsets(cea, &start, &end)) {
@@ -4244,12 +4513,12 @@ int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb)
cea = drm_find_cea_extension(edid);
if (!cea) {
DRM_DEBUG_KMS("SAD: no CEA Extension found\n");
- return -ENOENT;
+ return 0;
}
if (cea_revision(cea) < 3) {
DRM_DEBUG_KMS("SAD: wrong CEA revision\n");
- return -ENOTSUPP;
+ return 0;
}
if (cea_db_offsets(cea, &start, &end)) {
@@ -4477,7 +4746,7 @@ static void drm_parse_hdmi_forum_vsdb(struct drm_connector *connector,
if (scdc->supported) {
scdc->scrambling.supported = true;
- /* Few sinks support scrambling for cloks < 340M */
+ /* Few sinks support scrambling for clocks < 340M */
if ((hf_vsdb[6] & 0x8))
scdc->scrambling.low_rates = true;
}
@@ -5068,6 +5337,49 @@ drm_hdmi_infoframe_set_hdr_metadata(struct hdmi_drm_infoframe *frame,
}
EXPORT_SYMBOL(drm_hdmi_infoframe_set_hdr_metadata);
+static u8 drm_mode_hdmi_vic(struct drm_connector *connector,
+ const struct drm_display_mode *mode)
+{
+ bool has_hdmi_infoframe = connector ?
+ connector->display_info.has_hdmi_infoframe : false;
+
+ if (!has_hdmi_infoframe)
+ return 0;
+
+ /* No HDMI VIC when signalling 3D video format */
+ if (mode->flags & DRM_MODE_FLAG_3D_MASK)
+ return 0;
+
+ return drm_match_hdmi_mode(mode);
+}
+
+static u8 drm_mode_cea_vic(struct drm_connector *connector,
+ const struct drm_display_mode *mode)
+{
+ u8 vic;
+
+ /*
+ * HDMI spec says if a mode is found in HDMI 1.4b 4K modes
+ * we should send its VIC in vendor infoframes, else send the
+ * VIC in AVI infoframes. Lets check if this mode is present in
+ * HDMI 1.4b 4K modes
+ */
+ if (drm_mode_hdmi_vic(connector, mode))
+ return 0;
+
+ vic = drm_match_cea_mode(mode);
+
+ /*
+ * HDMI 1.4 VIC range: 1 <= VIC <= 64 (CEA-861-D) but
+ * HDMI 2.0 VIC range: 1 <= VIC <= 107 (CEA-861-F). So we
+ * have to make sure we dont break HDMI 1.4 sinks.
+ */
+ if (!is_hdmi2_sink(connector) && vic > 64)
+ return 0;
+
+ return vic;
+}
+
/**
* drm_hdmi_avi_infoframe_from_display_mode() - fill an HDMI AVI infoframe with
* data from a DRM display mode
@@ -5083,6 +5395,7 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
const struct drm_display_mode *mode)
{
enum hdmi_picture_aspect picture_aspect;
+ u8 vic, hdmi_vic;
int err;
if (!frame || !mode)
@@ -5095,29 +5408,8 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
frame->pixel_repeat = 1;
- frame->video_code = drm_match_cea_mode(mode);
-
- /*
- * HDMI 1.4 VIC range: 1 <= VIC <= 64 (CEA-861-D) but
- * HDMI 2.0 VIC range: 1 <= VIC <= 107 (CEA-861-F). So we
- * have to make sure we dont break HDMI 1.4 sinks.
- */
- if (!is_hdmi2_sink(connector) && frame->video_code > 64)
- frame->video_code = 0;
-
- /*
- * HDMI spec says if a mode is found in HDMI 1.4b 4K modes
- * we should send its VIC in vendor infoframes, else send the
- * VIC in AVI infoframes. Lets check if this mode is present in
- * HDMI 1.4b 4K modes
- */
- if (frame->video_code) {
- u8 vendor_if_vic = drm_match_hdmi_mode(mode);
- bool is_s3d = mode->flags & DRM_MODE_FLAG_3D_MASK;
-
- if (drm_valid_hdmi_vic(vendor_if_vic) && !is_s3d)
- frame->video_code = 0;
- }
+ vic = drm_mode_cea_vic(connector, mode);
+ hdmi_vic = drm_mode_hdmi_vic(connector, mode);
frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE;
@@ -5131,11 +5423,15 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
/*
* Populate picture aspect ratio from either
- * user input (if specified) or from the CEA mode list.
+ * user input (if specified) or from the CEA/HDMI mode lists.
*/
picture_aspect = mode->picture_aspect_ratio;
- if (picture_aspect == HDMI_PICTURE_ASPECT_NONE)
- picture_aspect = drm_get_cea_aspect_ratio(frame->video_code);
+ if (picture_aspect == HDMI_PICTURE_ASPECT_NONE) {
+ if (vic)
+ picture_aspect = drm_get_cea_aspect_ratio(vic);
+ else if (hdmi_vic)
+ picture_aspect = drm_get_hdmi_aspect_ratio(hdmi_vic);
+ }
/*
* The infoframe can't convey anything but none, 4:3
@@ -5143,12 +5439,20 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
* we can only satisfy it by specifying the right VIC.
*/
if (picture_aspect > HDMI_PICTURE_ASPECT_16_9) {
- if (picture_aspect !=
- drm_get_cea_aspect_ratio(frame->video_code))
+ if (vic) {
+ if (picture_aspect != drm_get_cea_aspect_ratio(vic))
+ return -EINVAL;
+ } else if (hdmi_vic) {
+ if (picture_aspect != drm_get_hdmi_aspect_ratio(hdmi_vic))
+ return -EINVAL;
+ } else {
return -EINVAL;
+ }
+
picture_aspect = HDMI_PICTURE_ASPECT_NONE;
}
+ frame->video_code = vic;
frame->picture_aspect = picture_aspect;
frame->active_aspect = HDMI_ACTIVE_ASPECT_PICTURE;
frame->scan_mode = HDMI_SCAN_MODE_UNDERSCAN;
@@ -5282,6 +5586,23 @@ drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
}
EXPORT_SYMBOL(drm_hdmi_avi_infoframe_quant_range);
+/**
+ * drm_hdmi_avi_infoframe_bars() - fill the HDMI AVI infoframe
+ * bar information
+ * @frame: HDMI AVI infoframe
+ * @conn_state: connector state
+ */
+void
+drm_hdmi_avi_infoframe_bars(struct hdmi_avi_infoframe *frame,
+ const struct drm_connector_state *conn_state)
+{
+ frame->right_bar = conn_state->tv.margins.right;
+ frame->left_bar = conn_state->tv.margins.left;
+ frame->top_bar = conn_state->tv.margins.top;
+ frame->bottom_bar = conn_state->tv.margins.bottom;
+}
+EXPORT_SYMBOL(drm_hdmi_avi_infoframe_bars);
+
static enum hdmi_3d_structure
s3d_structure_from_display_mode(const struct drm_display_mode *mode)
{
@@ -5334,8 +5655,6 @@ drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
bool has_hdmi_infoframe = connector ?
connector->display_info.has_hdmi_infoframe : false;
int err;
- u32 s3d_flags;
- u8 vic;
if (!frame || !mode)
return -EINVAL;
@@ -5343,8 +5662,9 @@ drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
if (!has_hdmi_infoframe)
return -EINVAL;
- vic = drm_match_hdmi_mode(mode);
- s3d_flags = mode->flags & DRM_MODE_FLAG_3D_MASK;
+ err = hdmi_vendor_infoframe_init(frame);
+ if (err < 0)
+ return err;
/*
* Even if it's not absolutely necessary to send the infoframe
@@ -5355,15 +5675,7 @@ drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
* mode if the source simply stops sending the infoframe when
* it wants to switch from 3D to 2D.
*/
-
- if (vic && s3d_flags)
- return -EINVAL;
-
- err = hdmi_vendor_infoframe_init(frame);
- if (err < 0)
- return err;
-
- frame->vic = vic;
+ frame->vic = drm_mode_hdmi_vic(connector, mode);
frame->s3d_struct = s3d_structure_from_display_mode(mode);
return 0;
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index d38b3b255926..37d8ba3ddb46 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -175,7 +175,7 @@ static void *edid_load(struct drm_connector *connector, const char *name,
u8 *edid;
int fwsize, builtin;
int i, valid_extensions = 0;
- bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS);
+ bool print_bad_edid = !connector->bad_edid_counter || drm_debug_enabled(DRM_UT_KMS);
builtin = match_string(generic_edid_name, GENERIC_EDIDS, name);
if (builtin >= 0) {
diff --git a/drivers/gpu/drm/drm_encoder.c b/drivers/gpu/drm/drm_encoder.c
index 7fb47b7b8b44..e555281f43d4 100644
--- a/drivers/gpu/drm/drm_encoder.c
+++ b/drivers/gpu/drm/drm_encoder.c
@@ -22,6 +22,7 @@
#include <linux/export.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_encoder.h>
@@ -139,6 +140,7 @@ int drm_encoder_init(struct drm_device *dev,
goto out_put;
}
+ INIT_LIST_HEAD(&encoder->bridge_chain);
list_add_tail(&encoder->head, &dev->mode_config.encoder_list);
encoder->index = dev->mode_config.num_encoder++;
@@ -159,22 +161,16 @@ EXPORT_SYMBOL(drm_encoder_init);
void drm_encoder_cleanup(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
+ struct drm_bridge *bridge, *next;
/* Note that the encoder_list is considered to be static; should we
* remove the drm_encoder at runtime we would have to decrement all
* the indices on the drm_encoder after us in the encoder_list.
*/
- if (encoder->bridge) {
- struct drm_bridge *bridge = encoder->bridge;
- struct drm_bridge *next;
-
- while (bridge) {
- next = bridge->next;
- drm_bridge_detach(bridge);
- bridge = next;
- }
- }
+ list_for_each_entry_safe(bridge, next, &encoder->bridge_chain,
+ chain_node)
+ drm_bridge_detach(bridge);
drm_mode_object_unregister(dev, &encoder->base);
kfree(encoder->name);
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index c0b0f603af63..9801c0333eca 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -9,6 +9,7 @@
* Copyright (C) 2012 Red Hat
*/
+#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_cma_helper.h>
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index a7ba5b4902d6..4c7cbce7bae7 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -46,6 +46,7 @@
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
+#include "drm_crtc_helper_internal.h"
#include "drm_internal.h"
static bool drm_fbdev_emulation = true;
@@ -91,9 +92,8 @@ static DEFINE_MUTEX(kernel_fb_helper_lock);
*
* Drivers that support a dumb buffer with a virtual address and mmap support,
* should try out the generic fbdev emulation using drm_fbdev_generic_setup().
- *
- * Setup fbdev emulation by calling drm_fb_helper_fbdev_setup() and tear it
- * down by calling drm_fb_helper_fbdev_teardown().
+ * It will automatically set up deferred I/O if the driver requires a shadow
+ * buffer.
*
* At runtime drivers should restore the fbdev console by using
* drm_fb_helper_lastclose() as their &drm_driver.lastclose callback.
@@ -126,8 +126,10 @@ static DEFINE_MUTEX(kernel_fb_helper_lock);
* always run in process context since the fb_*() function could be running in
* atomic context. If drm_fb_helper_deferred_io() is used as the deferred_io
* callback it will also schedule dirty_work with the damage collected from the
- * mmap page writes. Drivers can use drm_fb_helper_defio_init() to setup
- * deferred I/O (coupled with drm_fb_helper_fbdev_teardown()).
+ * mmap page writes.
+ *
+ * Deferred I/O is not compatible with SHMEM. Such drivers should request an
+ * fbdev shadow buffer and call drm_fbdev_generic_setup() instead.
*/
static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc)
@@ -189,6 +191,7 @@ int drm_fb_helper_debug_leave(struct fb_info *info)
{
struct drm_fb_helper *helper = info->par;
struct drm_client_dev *client = &helper->client;
+ struct drm_device *dev = helper->dev;
struct drm_crtc *crtc;
const struct drm_crtc_helper_funcs *funcs;
struct drm_mode_set *mode_set;
@@ -207,7 +210,7 @@ int drm_fb_helper_debug_leave(struct fb_info *info)
continue;
if (!fb) {
- DRM_ERROR("no fb to restore??\n");
+ drm_err(dev, "no fb to restore?\n");
continue;
}
@@ -561,8 +564,7 @@ EXPORT_SYMBOL(drm_fb_helper_unregister_fbi);
* drm_fb_helper_fini - finialize a &struct drm_fb_helper
* @fb_helper: driver-allocated fbdev helper, can be NULL
*
- * This cleans up all remaining resources associated with @fb_helper. Must be
- * called after drm_fb_helper_unlink_fbi() was called.
+ * This cleans up all remaining resources associated with @fb_helper.
*/
void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
{
@@ -602,19 +604,6 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
}
EXPORT_SYMBOL(drm_fb_helper_fini);
-/**
- * drm_fb_helper_unlink_fbi - wrapper around unlink_framebuffer
- * @fb_helper: driver-allocated fbdev helper, can be NULL
- *
- * A wrapper around unlink_framebuffer implemented by fbdev core
- */
-void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper)
-{
- if (fb_helper && fb_helper->fbdev)
- unlink_framebuffer(fb_helper->fbdev);
-}
-EXPORT_SYMBOL(drm_fb_helper_unlink_fbi);
-
static bool drm_fbdev_use_shadow_fb(struct drm_fb_helper *fb_helper)
{
struct drm_device *dev = fb_helper->dev;
@@ -679,49 +668,6 @@ void drm_fb_helper_deferred_io(struct fb_info *info,
EXPORT_SYMBOL(drm_fb_helper_deferred_io);
/**
- * drm_fb_helper_defio_init - fbdev deferred I/O initialization
- * @fb_helper: driver-allocated fbdev helper
- *
- * This function allocates &fb_deferred_io, sets callback to
- * drm_fb_helper_deferred_io(), delay to 50ms and calls fb_deferred_io_init().
- * It should be called from the &drm_fb_helper_funcs->fb_probe callback.
- * drm_fb_helper_fbdev_teardown() cleans up deferred I/O.
- *
- * NOTE: A copy of &fb_ops is made and assigned to &info->fbops. This is done
- * because fb_deferred_io_cleanup() clears &fbops->fb_mmap and would thereby
- * affect other instances of that &fb_ops.
- *
- * Returns:
- * 0 on success or a negative error code on failure.
- */
-int drm_fb_helper_defio_init(struct drm_fb_helper *fb_helper)
-{
- struct fb_info *info = fb_helper->fbdev;
- struct fb_deferred_io *fbdefio;
- struct fb_ops *fbops;
-
- fbdefio = kzalloc(sizeof(*fbdefio), GFP_KERNEL);
- fbops = kzalloc(sizeof(*fbops), GFP_KERNEL);
- if (!fbdefio || !fbops) {
- kfree(fbdefio);
- kfree(fbops);
- return -ENOMEM;
- }
-
- info->fbdefio = fbdefio;
- fbdefio->delay = msecs_to_jiffies(50);
- fbdefio->deferred_io = drm_fb_helper_deferred_io;
-
- *fbops = *info->fbops;
- info->fbops = fbops;
-
- fb_deferred_io_init(info);
-
- return 0;
-}
-EXPORT_SYMBOL(drm_fb_helper_defio_init);
-
-/**
* drm_fb_helper_sys_read - wrapper around fb_sys_read
* @info: fb_info struct pointer
* @buf: userspace buffer to read from framebuffer memory
@@ -1303,12 +1249,13 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
{
struct drm_fb_helper *fb_helper = info->par;
struct drm_framebuffer *fb = fb_helper->fb;
+ struct drm_device *dev = fb_helper->dev;
if (in_dbg_master())
return -EINVAL;
if (var->pixclock != 0) {
- DRM_DEBUG("fbdev emulation doesn't support changing the pixel clock, value of pixclock is ignored\n");
+ drm_dbg_kms(dev, "fbdev emulation doesn't support changing the pixel clock, value of pixclock is ignored\n");
var->pixclock = 0;
}
@@ -1320,10 +1267,10 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
* Changes struct fb_var_screeninfo are currently not pushed back
* to KMS, hence fail if different settings are requested.
*/
- if (var->bits_per_pixel != fb->format->cpp[0] * 8 ||
+ if (var->bits_per_pixel > fb->format->cpp[0] * 8 ||
var->xres > fb->width || var->yres > fb->height ||
var->xres_virtual > fb->width || var->yres_virtual > fb->height) {
- DRM_DEBUG("fb requested width/height/bpp can't fit in current fb "
+ drm_dbg_kms(dev, "fb requested width/height/bpp can't fit in current fb "
"request %dx%d-%d (virtual %dx%d) > %dx%d-%d\n",
var->xres, var->yres, var->bits_per_pixel,
var->xres_virtual, var->yres_virtual,
@@ -1346,11 +1293,16 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
}
/*
+ * Likewise, bits_per_pixel should be rounded up to a supported value.
+ */
+ var->bits_per_pixel = fb->format->cpp[0] * 8;
+
+ /*
* drm fbdev emulation doesn't support changing the pixel format at all,
* so reject all pixel format changing requests.
*/
if (!drm_fb_pixel_format_equal(var, &info->var)) {
- DRM_DEBUG("fbdev emulation doesn't support changing the pixel format\n");
+ drm_dbg_kms(dev, "fbdev emulation doesn't support changing the pixel format\n");
return -EINVAL;
}
@@ -1375,7 +1327,7 @@ int drm_fb_helper_set_par(struct fb_info *info)
return -EBUSY;
if (var->pixclock != 0) {
- DRM_ERROR("PIXEL CLOCK SET\n");
+ drm_err(fb_helper->dev, "PIXEL CLOCK SET\n");
return -EINVAL;
}
@@ -1485,6 +1437,7 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
int preferred_bpp)
{
struct drm_client_dev *client = &fb_helper->client;
+ struct drm_device *dev = fb_helper->dev;
int ret = 0;
int crtc_count = 0;
struct drm_connector_list_iter conn_iter;
@@ -1548,7 +1501,7 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
struct drm_plane *plane = crtc->primary;
int j;
- DRM_DEBUG("test CRTC %u primary plane\n", drm_crtc_index(crtc));
+ drm_dbg_kms(dev, "test CRTC %u primary plane\n", drm_crtc_index(crtc));
for (j = 0; j < plane->format_count; j++) {
const struct drm_format_info *fmt;
@@ -1581,7 +1534,7 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
}
}
if (sizes.surface_depth != best_depth && best_depth) {
- DRM_INFO("requested bpp %d, scaled depth down to %d",
+ drm_info(dev, "requested bpp %d, scaled depth down to %d",
sizes.surface_bpp, best_depth);
sizes.surface_depth = best_depth;
}
@@ -1613,7 +1566,9 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
for (j = 0; j < mode_set->num_connectors; j++) {
struct drm_connector *connector = mode_set->connectors[j];
- if (connector->has_tile) {
+ if (connector->has_tile &&
+ desired_mode->hdisplay == connector->tile_h_size &&
+ desired_mode->vdisplay == connector->tile_v_size) {
lasth = (connector->tile_h_loc == (connector->num_h_tile - 1));
lastv = (connector->tile_v_loc == (connector->num_v_tile - 1));
/* cloning to multiple tiles is just crazy-talk, so: */
@@ -1629,7 +1584,7 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
mutex_unlock(&client->modeset_mutex);
if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) {
- DRM_INFO("Cannot find any crtc or sizes\n");
+ drm_info(dev, "Cannot find any crtc or sizes\n");
/* First time: disable all crtc's.. */
if (!fb_helper->deferred_setup)
@@ -1944,7 +1899,7 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
drm_master_internal_release(fb_helper->dev);
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(fb_helper->dev, "\n");
drm_client_modeset_probe(&fb_helper->client, fb_helper->fb->width, fb_helper->fb->height);
drm_setup_crtcs_fb(fb_helper);
@@ -1957,108 +1912,6 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
EXPORT_SYMBOL(drm_fb_helper_hotplug_event);
/**
- * drm_fb_helper_fbdev_setup() - Setup fbdev emulation
- * @dev: DRM device
- * @fb_helper: fbdev helper structure to set up
- * @funcs: fbdev helper functions
- * @preferred_bpp: Preferred bits per pixel for the device.
- * @dev->mode_config.preferred_depth is used if this is zero.
- * @max_conn_count: Maximum number of connectors (not used)
- *
- * This function sets up fbdev emulation and registers fbdev for access by
- * userspace. If all connectors are disconnected, setup is deferred to the next
- * time drm_fb_helper_hotplug_event() is called.
- * The caller must to provide a &drm_fb_helper_funcs->fb_probe callback
- * function.
- *
- * Use drm_fb_helper_fbdev_teardown() to destroy the fbdev.
- *
- * See also: drm_fb_helper_initial_config(), drm_fbdev_generic_setup().
- *
- * Returns:
- * Zero on success or negative error code on failure.
- */
-int drm_fb_helper_fbdev_setup(struct drm_device *dev,
- struct drm_fb_helper *fb_helper,
- const struct drm_fb_helper_funcs *funcs,
- unsigned int preferred_bpp,
- unsigned int max_conn_count)
-{
- int ret;
-
- if (!preferred_bpp)
- preferred_bpp = dev->mode_config.preferred_depth;
- if (!preferred_bpp)
- preferred_bpp = 32;
-
- drm_fb_helper_prepare(dev, fb_helper, funcs);
-
- ret = drm_fb_helper_init(dev, fb_helper, 0);
- if (ret < 0) {
- DRM_DEV_ERROR(dev->dev, "fbdev: Failed to initialize (ret=%d)\n", ret);
- return ret;
- }
-
- if (!drm_drv_uses_atomic_modeset(dev))
- drm_helper_disable_unused_functions(dev);
-
- ret = drm_fb_helper_initial_config(fb_helper, preferred_bpp);
- if (ret < 0) {
- DRM_DEV_ERROR(dev->dev, "fbdev: Failed to set configuration (ret=%d)\n", ret);
- goto err_drm_fb_helper_fini;
- }
-
- return 0;
-
-err_drm_fb_helper_fini:
- drm_fb_helper_fbdev_teardown(dev);
-
- return ret;
-}
-EXPORT_SYMBOL(drm_fb_helper_fbdev_setup);
-
-/**
- * drm_fb_helper_fbdev_teardown - Tear down fbdev emulation
- * @dev: DRM device
- *
- * This function unregisters fbdev if not already done and cleans up the
- * associated resources including the &drm_framebuffer.
- * The driver is responsible for freeing the &drm_fb_helper structure which is
- * stored in &drm_device->fb_helper. Do note that this pointer has been cleared
- * when this function returns.
- *
- * In order to support device removal/unplug while file handles are still open,
- * drm_fb_helper_unregister_fbi() should be called on device removal and
- * drm_fb_helper_fbdev_teardown() in the &drm_driver->release callback when
- * file handles are closed.
- */
-void drm_fb_helper_fbdev_teardown(struct drm_device *dev)
-{
- struct drm_fb_helper *fb_helper = dev->fb_helper;
- struct fb_ops *fbops = NULL;
-
- if (!fb_helper)
- return;
-
- /* Unregister if it hasn't been done already */
- if (fb_helper->fbdev && fb_helper->fbdev->dev)
- drm_fb_helper_unregister_fbi(fb_helper);
-
- if (fb_helper->fbdev && fb_helper->fbdev->fbdefio) {
- fb_deferred_io_cleanup(fb_helper->fbdev);
- kfree(fb_helper->fbdev->fbdefio);
- fbops = fb_helper->fbdev->fbops;
- }
-
- drm_fb_helper_fini(fb_helper);
- kfree(fbops);
-
- if (fb_helper->fb)
- drm_framebuffer_remove(fb_helper->fb);
-}
-EXPORT_SYMBOL(drm_fb_helper_fbdev_teardown);
-
-/**
* drm_fb_helper_lastclose - DRM driver lastclose helper for fbdev emulation
* @dev: DRM device
*
@@ -2111,7 +1964,6 @@ static int drm_fbdev_fb_release(struct fb_info *info, int user)
static void drm_fbdev_cleanup(struct drm_fb_helper *fb_helper)
{
struct fb_info *fbi = fb_helper->fbdev;
- struct fb_ops *fbops = NULL;
void *shadow = NULL;
if (!fb_helper->dev)
@@ -2120,15 +1972,11 @@ static void drm_fbdev_cleanup(struct drm_fb_helper *fb_helper)
if (fbi && fbi->fbdefio) {
fb_deferred_io_cleanup(fbi);
shadow = fbi->screen_buffer;
- fbops = fbi->fbops;
}
drm_fb_helper_fini(fb_helper);
- if (shadow) {
- vfree(shadow);
- kfree(fbops);
- }
+ vfree(shadow);
drm_client_framebuffer_delete(fb_helper->buffer);
}
@@ -2159,7 +2007,7 @@ static int drm_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
return -ENODEV;
}
-static struct fb_ops drm_fbdev_fb_ops = {
+static const struct fb_ops drm_fbdev_fb_ops = {
.owner = THIS_MODULE,
DRM_FB_HELPER_DEFAULT_OPS,
.fb_open = drm_fbdev_fb_open,
@@ -2178,32 +2026,26 @@ static struct fb_deferred_io drm_fbdev_defio = {
.deferred_io = drm_fb_helper_deferred_io,
};
-/**
- * drm_fb_helper_generic_probe - Generic fbdev emulation probe helper
- * @fb_helper: fbdev helper structure
- * @sizes: describes fbdev size and scanout surface size
- *
+/*
* This function uses the client API to create a framebuffer backed by a dumb buffer.
*
* The _sys_ versions are used for &fb_ops.fb_read, fb_write, fb_fillrect,
* fb_copyarea, fb_imageblit.
- *
- * Returns:
- * Zero on success or negative error code on failure.
*/
-int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
- struct drm_fb_helper_surface_size *sizes)
+static int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_surface_size *sizes)
{
struct drm_client_dev *client = &fb_helper->client;
+ struct drm_device *dev = fb_helper->dev;
struct drm_client_buffer *buffer;
struct drm_framebuffer *fb;
struct fb_info *fbi;
u32 format;
void *vaddr;
- DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d)\n",
- sizes->surface_width, sizes->surface_height,
- sizes->surface_bpp);
+ drm_dbg_kms(dev, "surface width(%d), height(%d) and bpp(%d)\n",
+ sizes->surface_width, sizes->surface_height,
+ sizes->surface_bpp);
format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth);
buffer = drm_client_framebuffer_create(client, sizes->surface_width,
@@ -2226,24 +2068,10 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
drm_fb_helper_fill_info(fbi, fb_helper, sizes);
if (drm_fbdev_use_shadow_fb(fb_helper)) {
- struct fb_ops *fbops;
- void *shadow;
-
- /*
- * fb_deferred_io_cleanup() clears &fbops->fb_mmap so a per
- * instance version is necessary.
- */
- fbops = kzalloc(sizeof(*fbops), GFP_KERNEL);
- shadow = vzalloc(fbi->screen_size);
- if (!fbops || !shadow) {
- kfree(fbops);
- vfree(shadow);
+ fbi->screen_buffer = vzalloc(fbi->screen_size);
+ if (!fbi->screen_buffer)
return -ENOMEM;
- }
- *fbops = *fbi->fbops;
- fbi->fbops = fbops;
- fbi->screen_buffer = shadow;
fbi->fbdefio = &drm_fbdev_defio;
fb_deferred_io_init(fbi);
@@ -2264,7 +2092,6 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
return 0;
}
-EXPORT_SYMBOL(drm_fb_helper_generic_probe);
static const struct drm_fb_helper_funcs drm_fb_helper_generic_funcs = {
.fb_probe = drm_fb_helper_generic_probe,
@@ -2302,7 +2129,7 @@ static int drm_fbdev_client_hotplug(struct drm_client_dev *client)
return drm_fb_helper_hotplug_event(dev->fb_helper);
if (!dev->mode_config.num_connector) {
- DRM_DEV_DEBUG(dev->dev, "No connectors found, will not create framebuffer!\n");
+ drm_dbg_kms(dev, "No connectors found, will not create framebuffer!\n");
return 0;
}
@@ -2327,7 +2154,7 @@ err:
fb_helper->dev = NULL;
fb_helper->fbdev = NULL;
- DRM_DEV_ERROR(dev->dev, "fbdev: Failed to setup generic emulation (ret=%d)\n", ret);
+ drm_err(dev, "fbdev: Failed to setup generic emulation (ret=%d)\n", ret);
return ret;
}
@@ -2346,8 +2173,7 @@ static const struct drm_client_funcs drm_fbdev_client_funcs = {
* @dev->mode_config.preferred_depth is used if this is zero.
*
* This function sets up generic fbdev emulation for drivers that supports
- * dumb buffers with a virtual address and that can be mmap'ed. If the driver
- * does not support these functions, it could use drm_fb_helper_fbdev_setup().
+ * dumb buffers with a virtual address and that can be mmap'ed.
*
* Restore, hotplug events and teardown are all taken care of. Drivers that do
* suspend/resume need to call drm_fb_helper_set_suspend_unlocked() themselves.
@@ -2355,7 +2181,10 @@ static const struct drm_client_funcs drm_fbdev_client_funcs = {
*
* Drivers that set the dirty callback on their framebuffer will get a shadow
* fbdev buffer that is blitted onto the real buffer. This is done in order to
- * make deferred I/O work with all kinds of buffers.
+ * make deferred I/O work with all kinds of buffers. A shadow buffer can be
+ * requested explicitly by setting struct drm_mode_config.prefer_shadow or
+ * struct drm_mode_config.prefer_shadow_fbdev to true beforehand. This is
+ * required to use generic fbdev emulation with SHMEM helpers.
*
* This function is safe to call even when there are no connectors present.
* Setup will be retried on the next hotplug event.
@@ -2382,7 +2211,7 @@ int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs);
if (ret) {
kfree(fb_helper);
- DRM_DEV_ERROR(dev->dev, "Failed to register client: %d\n", ret);
+ drm_err(dev, "Failed to register client: %d\n", ret);
return ret;
}
@@ -2394,7 +2223,7 @@ int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
ret = drm_fbdev_client_hotplug(&fb_helper->client);
if (ret)
- DRM_DEV_DEBUG(dev->dev, "client hotplug ret=%d\n", ret);
+ drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
drm_client_register(&fb_helper->client);
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index 754af25fe255..92d16724f949 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -31,7 +31,9 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <linux/anon_inodes.h>
#include <linux/dma-fence.h>
+#include <linux/file.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/poll.h>
@@ -147,8 +149,7 @@ struct drm_file *drm_file_alloc(struct drm_minor *minor)
if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
drm_syncobj_open(file);
- if (drm_core_check_feature(dev, DRIVER_PRIME))
- drm_prime_init_file_private(&file->prime);
+ drm_prime_init_file_private(&file->prime);
if (dev->driver->open) {
ret = dev->driver->open(dev, file);
@@ -159,8 +160,7 @@ struct drm_file *drm_file_alloc(struct drm_minor *minor)
return file;
out_prime_destroy:
- if (drm_core_check_feature(dev, DRIVER_PRIME))
- drm_prime_destroy_file_private(&file->prime);
+ drm_prime_destroy_file_private(&file->prime);
if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
drm_syncobj_release(file);
if (drm_core_check_feature(dev, DRIVER_GEM))
@@ -253,8 +253,7 @@ void drm_file_free(struct drm_file *file)
if (dev->driver->postclose)
dev->driver->postclose(dev, file);
- if (drm_core_check_feature(dev, DRIVER_PRIME))
- drm_prime_destroy_file_private(&file->prime);
+ drm_prime_destroy_file_private(&file->prime);
WARN_ON(!list_empty(&file->event_list));
@@ -288,7 +287,7 @@ static int drm_cpu_valid(void)
}
/*
- * Called whenever a process opens /dev/drm.
+ * Called whenever a process opens a drm node
*
* \param filp file pointer.
* \param minor acquired minor-object.
@@ -757,3 +756,43 @@ void drm_send_event(struct drm_device *dev, struct drm_pending_event *e)
spin_unlock_irqrestore(&dev->event_lock, irqflags);
}
EXPORT_SYMBOL(drm_send_event);
+
+/**
+ * mock_drm_getfile - Create a new struct file for the drm device
+ * @minor: drm minor to wrap (e.g. #drm_device.primary)
+ * @flags: file creation mode (O_RDWR etc)
+ *
+ * This create a new struct file that wraps a DRM file context around a
+ * DRM minor. This mimicks userspace opening e.g. /dev/dri/card0, but without
+ * invoking userspace. The struct file may be operated on using its f_op
+ * (the drm_device.driver.fops) to mimick userspace operations, or be supplied
+ * to userspace facing functions as an internal/anonymous client.
+ *
+ * RETURNS:
+ * Pointer to newly created struct file, ERR_PTR on failure.
+ */
+struct file *mock_drm_getfile(struct drm_minor *minor, unsigned int flags)
+{
+ struct drm_device *dev = minor->dev;
+ struct drm_file *priv;
+ struct file *file;
+
+ priv = drm_file_alloc(minor);
+ if (IS_ERR(priv))
+ return ERR_CAST(priv);
+
+ file = anon_inode_getfile("drm", dev->driver->fops, priv, flags);
+ if (IS_ERR(file)) {
+ drm_file_free(priv);
+ return file;
+ }
+
+ /* Everyone shares a single global address space */
+ file->f_mapping = dev->anon_inode->i_mapping;
+
+ drm_dev_get(dev);
+ priv->filp = file;
+
+ return file;
+}
+EXPORT_SYMBOL_FOR_TESTS_ONLY(mock_drm_getfile);
diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
index c630064ccf41..b234bfaeda06 100644
--- a/drivers/gpu/drm/drm_fourcc.c
+++ b/drivers/gpu/drm/drm_fourcc.c
@@ -253,17 +253,17 @@ const struct drm_format_info *__drm_format_info(u32 format)
.char_per_block = { 8, 0, 0 }, .block_w = { 2, 0, 0 }, .block_h = { 2, 0, 0 },
.hsub = 2, .vsub = 2, .is_yuv = true },
{ .format = DRM_FORMAT_P010, .depth = 0, .num_planes = 2,
- .char_per_block = { 2, 4, 0 }, .block_w = { 1, 0, 0 }, .block_h = { 1, 0, 0 },
+ .char_per_block = { 2, 4, 0 }, .block_w = { 1, 1, 0 }, .block_h = { 1, 1, 0 },
.hsub = 2, .vsub = 2, .is_yuv = true},
{ .format = DRM_FORMAT_P012, .depth = 0, .num_planes = 2,
- .char_per_block = { 2, 4, 0 }, .block_w = { 1, 0, 0 }, .block_h = { 1, 0, 0 },
+ .char_per_block = { 2, 4, 0 }, .block_w = { 1, 1, 0 }, .block_h = { 1, 1, 0 },
.hsub = 2, .vsub = 2, .is_yuv = true},
{ .format = DRM_FORMAT_P016, .depth = 0, .num_planes = 2,
- .char_per_block = { 2, 4, 0 }, .block_w = { 1, 0, 0 }, .block_h = { 1, 0, 0 },
+ .char_per_block = { 2, 4, 0 }, .block_w = { 1, 1, 0 }, .block_h = { 1, 1, 0 },
.hsub = 2, .vsub = 2, .is_yuv = true},
{ .format = DRM_FORMAT_P210, .depth = 0,
.num_planes = 2, .char_per_block = { 2, 4, 0 },
- .block_w = { 1, 0, 0 }, .block_h = { 1, 0, 0 }, .hsub = 2,
+ .block_w = { 1, 1, 0 }, .block_h = { 1, 1, 0 }, .hsub = 2,
.vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_VUY101010, .depth = 0,
.num_planes = 1, .cpp = { 0, 0, 0 }, .hsub = 1, .vsub = 1,
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index a8c4468f03d9..a9e4a610445a 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -39,6 +39,7 @@
#include <linux/mem_encrypt.h>
#include <linux/pagevec.h>
+#include <drm/drm.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
@@ -158,7 +159,7 @@ void drm_gem_private_object_init(struct drm_device *dev,
kref_init(&obj->refcount);
obj->handle_count = 0;
obj->size = size;
- reservation_object_init(&obj->_resv);
+ dma_resv_init(&obj->_resv);
if (!obj->resv)
obj->resv = &obj->_resv;
@@ -254,8 +255,7 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
else if (dev->driver->gem_close_object)
dev->driver->gem_close_object(obj, file_priv);
- if (drm_core_check_feature(dev, DRIVER_PRIME))
- drm_gem_remove_prime_handles(obj, file_priv);
+ drm_gem_remove_prime_handles(obj, file_priv);
drm_vma_node_revoke(&obj->vma_node, file_priv);
drm_gem_object_handle_put_unlocked(obj);
@@ -633,6 +633,9 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
pagevec_init(&pvec);
for (i = 0; i < npages; i++) {
+ if (!pages[i])
+ continue;
+
if (dirty)
set_page_dirty(pages[i]);
@@ -752,7 +755,7 @@ drm_gem_object_lookup(struct drm_file *filp, u32 handle)
EXPORT_SYMBOL(drm_gem_object_lookup);
/**
- * drm_gem_reservation_object_wait - Wait on GEM object's reservation's objects
+ * drm_gem_dma_resv_wait - Wait on GEM object's reservation's objects
* shared and/or exclusive fences.
* @filep: DRM file private date
* @handle: userspace handle
@@ -764,7 +767,7 @@ EXPORT_SYMBOL(drm_gem_object_lookup);
* Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
* greater than 0 on success.
*/
-long drm_gem_reservation_object_wait(struct drm_file *filep, u32 handle,
+long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
bool wait_all, unsigned long timeout)
{
long ret;
@@ -776,7 +779,7 @@ long drm_gem_reservation_object_wait(struct drm_file *filep, u32 handle,
return -EINVAL;
}
- ret = reservation_object_wait_timeout_rcu(obj->resv, wait_all,
+ ret = dma_resv_wait_timeout_rcu(obj->resv, wait_all,
true, timeout);
if (ret == 0)
ret = -ETIME;
@@ -787,7 +790,7 @@ long drm_gem_reservation_object_wait(struct drm_file *filep, u32 handle,
return ret;
}
-EXPORT_SYMBOL(drm_gem_reservation_object_wait);
+EXPORT_SYMBOL(drm_gem_dma_resv_wait);
/**
* drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
@@ -953,7 +956,7 @@ drm_gem_object_release(struct drm_gem_object *obj)
if (obj->filp)
fput(obj->filp);
- reservation_object_fini(&obj->_resv);
+ dma_resv_fini(&obj->_resv);
drm_gem_free_mmap_offset(obj);
}
EXPORT_SYMBOL(drm_gem_object_release);
@@ -1096,23 +1099,12 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
struct vm_area_struct *vma)
{
struct drm_device *dev = obj->dev;
+ int ret;
/* Check for valid size. */
if (obj_size < vma->vm_end - vma->vm_start)
return -EINVAL;
- if (obj->funcs && obj->funcs->vm_ops)
- vma->vm_ops = obj->funcs->vm_ops;
- else if (dev->driver->gem_vm_ops)
- vma->vm_ops = dev->driver->gem_vm_ops;
- else
- return -EINVAL;
-
- vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
- vma->vm_private_data = obj;
- vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
- vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
-
/* Take a ref for this mapping of the object, so that the fault
* handler can dereference the mmap offset's pointer to the object.
* This reference is cleaned up by the corresponding vm_close
@@ -1121,6 +1113,30 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
*/
drm_gem_object_get(obj);
+ if (obj->funcs && obj->funcs->mmap) {
+ ret = obj->funcs->mmap(obj, vma);
+ if (ret) {
+ drm_gem_object_put_unlocked(obj);
+ return ret;
+ }
+ WARN_ON(!(vma->vm_flags & VM_DONTEXPAND));
+ } else {
+ if (obj->funcs && obj->funcs->vm_ops)
+ vma->vm_ops = obj->funcs->vm_ops;
+ else if (dev->driver->gem_vm_ops)
+ vma->vm_ops = dev->driver->gem_vm_ops;
+ else {
+ drm_gem_object_put_unlocked(obj);
+ return -EINVAL;
+ }
+
+ vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+ }
+
+ vma->vm_private_data = obj;
+
return 0;
}
EXPORT_SYMBOL(drm_gem_mmap_obj);
@@ -1288,8 +1304,8 @@ retry:
if (contended != -1) {
struct drm_gem_object *obj = objs[contended];
- ret = ww_mutex_lock_slow_interruptible(&obj->resv->lock,
- acquire_ctx);
+ ret = dma_resv_lock_slow_interruptible(obj->resv,
+ acquire_ctx);
if (ret) {
ww_acquire_done(acquire_ctx);
return ret;
@@ -1300,16 +1316,16 @@ retry:
if (i == contended)
continue;
- ret = ww_mutex_lock_interruptible(&objs[i]->resv->lock,
- acquire_ctx);
+ ret = dma_resv_lock_interruptible(objs[i]->resv,
+ acquire_ctx);
if (ret) {
int j;
for (j = 0; j < i; j++)
- ww_mutex_unlock(&objs[j]->resv->lock);
+ dma_resv_unlock(objs[j]->resv);
if (contended != -1 && contended >= i)
- ww_mutex_unlock(&objs[contended]->resv->lock);
+ dma_resv_unlock(objs[contended]->resv);
if (ret == -EDEADLK) {
contended = i;
@@ -1334,7 +1350,7 @@ drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
int i;
for (i = 0; i < count; i++)
- ww_mutex_unlock(&objs[i]->resv->lock);
+ dma_resv_unlock(objs[i]->resv);
ww_acquire_fini(acquire_ctx);
}
@@ -1410,12 +1426,12 @@ int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
if (!write) {
struct dma_fence *fence =
- reservation_object_get_excl_rcu(obj->resv);
+ dma_resv_get_excl_rcu(obj->resv);
return drm_gem_fence_array_add(fence_array, fence);
}
- ret = reservation_object_get_fences_rcu(obj->resv, NULL,
+ ret = dma_resv_get_fences_rcu(obj->resv, NULL,
&fence_count, &fences);
if (ret || !fence_count)
return ret;
diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
index 8fcbabf02dfd..3a7ace19a902 100644
--- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c
+++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
@@ -7,7 +7,7 @@
#include <linux/dma-buf.h>
#include <linux/dma-fence.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
#include <linux/slab.h>
#include <drm/drm_atomic.h>
@@ -74,8 +74,7 @@ drm_gem_fb_alloc(struct drm_device *dev,
ret = drm_framebuffer_init(dev, fb, funcs);
if (ret) {
- DRM_DEV_ERROR(dev->dev, "Failed to init framebuffer: %d\n",
- ret);
+ drm_err(dev, "Failed to init framebuffer: %d\n", ret);
kfree(fb);
return ERR_PTR(ret);
}
@@ -160,7 +159,7 @@ drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file,
objs[i] = drm_gem_object_lookup(file, mode_cmd->handles[i]);
if (!objs[i]) {
- DRM_DEBUG_KMS("Failed to lookup GEM object\n");
+ drm_dbg_kms(dev, "Failed to lookup GEM object\n");
ret = -ENOENT;
goto err_gem_object_put;
}
@@ -271,11 +270,11 @@ EXPORT_SYMBOL_GPL(drm_gem_fb_create_with_dirty);
* @plane: Plane
* @state: Plane state the fence will be attached to
*
- * This function prepares a GEM backed framebuffer for scanout by checking if
- * the plane framebuffer has a DMA-BUF attached. If it does, it extracts the
- * exclusive fence and attaches it to the plane state for the atomic helper to
- * wait on. This function can be used as the &drm_plane_helper_funcs.prepare_fb
- * callback.
+ * This function extracts the exclusive fence from &drm_gem_object.resv and
+ * attaches it to plane state for the atomic helper to wait on. This is
+ * necessary to correctly implement implicit synchronization for any buffers
+ * shared as a struct &dma_buf. This function can be used as the
+ * &drm_plane_helper_funcs.prepare_fb callback.
*
* There is no need for &drm_plane_helper_funcs.cleanup_fb hook for simple
* gem based framebuffer drivers which have their buffers always pinned in
@@ -287,17 +286,15 @@ EXPORT_SYMBOL_GPL(drm_gem_fb_create_with_dirty);
int drm_gem_fb_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *state)
{
- struct dma_buf *dma_buf;
+ struct drm_gem_object *obj;
struct dma_fence *fence;
if (!state->fb)
return 0;
- dma_buf = drm_gem_fb_get_obj(state->fb, 0)->dma_buf;
- if (dma_buf) {
- fence = reservation_object_get_excl_rcu(dma_buf->resv);
- drm_atomic_set_fence_for_plane(state, fence);
- }
+ obj = drm_gem_fb_get_obj(state->fb, 0);
+ fence = dma_resv_get_excl_rcu(obj->resv);
+ drm_atomic_set_fence_for_plane(state, fence);
return 0;
}
@@ -309,10 +306,11 @@ EXPORT_SYMBOL_GPL(drm_gem_fb_prepare_fb);
* @pipe: Simple display pipe
* @plane_state: Plane state
*
- * This function uses drm_gem_fb_prepare_fb() to check if the plane FB has a
- * &dma_buf attached, extracts the exclusive fence and attaches it to plane
- * state for the atomic helper to wait on. Drivers can use this as their
- * &drm_simple_display_pipe_funcs.prepare_fb callback.
+ * This function uses drm_gem_fb_prepare_fb() to extract the exclusive fence
+ * from &drm_gem_object.resv and attaches it to plane state for the atomic
+ * helper to wait on. This is necessary to correctly implement implicit
+ * synchronization for any buffers shared as a struct &dma_buf. Drivers can use
+ * this as their &drm_simple_display_pipe_funcs.prepare_fb callback.
*
* See drm_atomic_set_fence_for_plane() for a discussion of implicit and
* explicit fencing in atomic modeset updates.
@@ -323,46 +321,3 @@ int drm_gem_fb_simple_display_pipe_prepare_fb(struct drm_simple_display_pipe *pi
return drm_gem_fb_prepare_fb(&pipe->plane, plane_state);
}
EXPORT_SYMBOL(drm_gem_fb_simple_display_pipe_prepare_fb);
-
-/**
- * drm_gem_fbdev_fb_create - Create a GEM backed &drm_framebuffer for fbdev
- * emulation
- * @dev: DRM device
- * @sizes: fbdev size description
- * @pitch_align: Optional pitch alignment
- * @obj: GEM object backing the framebuffer
- * @funcs: Optional vtable to be used for the new framebuffer object when the
- * dirty callback is needed.
- *
- * This function creates a framebuffer from a &drm_fb_helper_surface_size
- * description for use in the &drm_fb_helper_funcs.fb_probe callback.
- *
- * Returns:
- * Pointer to a &drm_framebuffer on success or an error pointer on failure.
- */
-struct drm_framebuffer *
-drm_gem_fbdev_fb_create(struct drm_device *dev,
- struct drm_fb_helper_surface_size *sizes,
- unsigned int pitch_align, struct drm_gem_object *obj,
- const struct drm_framebuffer_funcs *funcs)
-{
- struct drm_mode_fb_cmd2 mode_cmd = { 0 };
-
- mode_cmd.width = sizes->surface_width;
- mode_cmd.height = sizes->surface_height;
- mode_cmd.pitches[0] = sizes->surface_width *
- DIV_ROUND_UP(sizes->surface_bpp, 8);
- if (pitch_align)
- mode_cmd.pitches[0] = roundup(mode_cmd.pitches[0],
- pitch_align);
- mode_cmd.pixel_format = drm_driver_legacy_fb_format(dev, sizes->surface_bpp,
- sizes->surface_depth);
- if (obj->size < mode_cmd.pitches[0] * mode_cmd.height)
- return ERR_PTR(-EINVAL);
-
- if (!funcs)
- funcs = &drm_gem_fb_funcs;
-
- return drm_gem_fb_alloc(dev, &mode_cmd, &obj, 1, funcs);
-}
-EXPORT_SYMBOL(drm_gem_fbdev_fb_create);
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 472ea5d81f82..a421a2eed48a 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -10,6 +10,7 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
+#include <drm/drm.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_gem_shmem_helper.h>
@@ -31,7 +32,7 @@ static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
.get_sg_table = drm_gem_shmem_get_sg_table,
.vmap = drm_gem_shmem_vmap,
.vunmap = drm_gem_shmem_vunmap,
- .vm_ops = &drm_gem_shmem_vm_ops,
+ .mmap = drm_gem_shmem_mmap,
};
/**
@@ -74,6 +75,7 @@ struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t
shmem = to_drm_gem_shmem_obj(obj);
mutex_init(&shmem->pages_lock);
mutex_init(&shmem->vmap_lock);
+ INIT_LIST_HEAD(&shmem->madv_list);
/*
* Our buffers are kept pinned, so allocating them
@@ -117,11 +119,11 @@ void drm_gem_shmem_free_object(struct drm_gem_object *obj)
if (shmem->sgt) {
dma_unmap_sg(obj->dev->dev, shmem->sgt->sgl,
shmem->sgt->nents, DMA_BIDIRECTIONAL);
-
- drm_gem_shmem_put_pages(shmem);
sg_free_table(shmem->sgt);
kfree(shmem->sgt);
}
+ if (shmem->pages)
+ drm_gem_shmem_put_pages(shmem);
}
WARN_ON(shmem->pages_use_count);
@@ -361,6 +363,71 @@ drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
}
EXPORT_SYMBOL(drm_gem_shmem_create_with_handle);
+/* Update madvise status, returns true if not purged, else
+ * false or -errno.
+ */
+int drm_gem_shmem_madvise(struct drm_gem_object *obj, int madv)
+{
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+ mutex_lock(&shmem->pages_lock);
+
+ if (shmem->madv >= 0)
+ shmem->madv = madv;
+
+ madv = shmem->madv;
+
+ mutex_unlock(&shmem->pages_lock);
+
+ return (madv >= 0);
+}
+EXPORT_SYMBOL(drm_gem_shmem_madvise);
+
+void drm_gem_shmem_purge_locked(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+ WARN_ON(!drm_gem_shmem_is_purgeable(shmem));
+
+ dma_unmap_sg(obj->dev->dev, shmem->sgt->sgl,
+ shmem->sgt->nents, DMA_BIDIRECTIONAL);
+ sg_free_table(shmem->sgt);
+ kfree(shmem->sgt);
+ shmem->sgt = NULL;
+
+ drm_gem_shmem_put_pages_locked(shmem);
+
+ shmem->madv = -1;
+
+ drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
+ drm_gem_free_mmap_offset(obj);
+
+ /* Our goal here is to return as much of the memory as
+ * is possible back to the system as we are called from OOM.
+ * To do this we must instruct the shmfs to drop all of its
+ * backing pages, *now*.
+ */
+ shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
+
+ invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
+ 0, (loff_t)-1);
+}
+EXPORT_SYMBOL(drm_gem_shmem_purge_locked);
+
+bool drm_gem_shmem_purge(struct drm_gem_object *obj)
+{
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+ if (!mutex_trylock(&shmem->pages_lock))
+ return false;
+ drm_gem_shmem_purge_locked(obj);
+ mutex_unlock(&shmem->pages_lock);
+
+ return true;
+}
+EXPORT_SYMBOL(drm_gem_shmem_purge);
+
/**
* drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
* @file: DRM file structure to create the dumb buffer for
@@ -438,39 +505,33 @@ static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
drm_gem_vm_close(vma);
}
-const struct vm_operations_struct drm_gem_shmem_vm_ops = {
+static const struct vm_operations_struct drm_gem_shmem_vm_ops = {
.fault = drm_gem_shmem_fault,
.open = drm_gem_shmem_vm_open,
.close = drm_gem_shmem_vm_close,
};
-EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
/**
* drm_gem_shmem_mmap - Memory-map a shmem GEM object
- * @filp: File object
+ * @obj: gem object
* @vma: VMA for the area to be mapped
*
* This function implements an augmented version of the GEM DRM file mmap
* operation for shmem objects. Drivers which employ the shmem helpers should
- * use this function as their &file_operations.mmap handler in the DRM device file's
- * file_operations structure.
- *
- * Instead of directly referencing this function, drivers should use the
- * DEFINE_DRM_GEM_SHMEM_FOPS() macro.
+ * use this function as their &drm_gem_object_funcs.mmap handler.
*
* Returns:
* 0 on success or a negative error code on failure.
*/
-int drm_gem_shmem_mmap(struct file *filp, struct vm_area_struct *vma)
+int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{
struct drm_gem_shmem_object *shmem;
int ret;
- ret = drm_gem_mmap(filp, vma);
- if (ret)
- return ret;
+ /* Remove the fake offset */
+ vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
- shmem = to_drm_gem_shmem_obj(vma->vm_private_data);
+ shmem = to_drm_gem_shmem_obj(obj);
ret = drm_gem_shmem_get_pages(shmem);
if (ret) {
@@ -478,12 +539,10 @@ int drm_gem_shmem_mmap(struct file *filp, struct vm_area_struct *vma)
return ret;
}
- /* VM_PFNMAP was set by drm_gem_mmap() */
- vma->vm_flags &= ~VM_PFNMAP;
- vma->vm_flags |= VM_MIXEDMAP;
-
- /* Remove the fake offset */
- vma->vm_pgoff -= drm_vma_node_start(&shmem->base.vma_node);
+ vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
+ vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+ vma->vm_ops = &drm_gem_shmem_vm_ops;
return 0;
}
diff --git a/drivers/gpu/drm/drm_gem_ttm_helper.c b/drivers/gpu/drm/drm_gem_ttm_helper.c
new file mode 100644
index 000000000000..605a8a3da7f9
--- /dev/null
+++ b/drivers/gpu/drm/drm_gem_ttm_helper.c
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/module.h>
+
+#include <drm/drm_gem_ttm_helper.h>
+
+/**
+ * DOC: overview
+ *
+ * This library provides helper functions for gem objects backed by
+ * ttm.
+ */
+
+/**
+ * drm_gem_ttm_print_info() - Print &ttm_buffer_object info for debugfs
+ * @p: DRM printer
+ * @indent: Tab indentation level
+ * @gem: GEM object
+ *
+ * This function can be used as &drm_gem_object_funcs.print_info
+ * callback.
+ */
+void drm_gem_ttm_print_info(struct drm_printer *p, unsigned int indent,
+ const struct drm_gem_object *gem)
+{
+ static const char * const plname[] = {
+ [ TTM_PL_SYSTEM ] = "system",
+ [ TTM_PL_TT ] = "tt",
+ [ TTM_PL_VRAM ] = "vram",
+ [ TTM_PL_PRIV ] = "priv",
+
+ [ 16 ] = "cached",
+ [ 17 ] = "uncached",
+ [ 18 ] = "wc",
+ [ 19 ] = "contig",
+
+ [ 21 ] = "pinned", /* NO_EVICT */
+ [ 22 ] = "topdown",
+ };
+ const struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem);
+
+ drm_printf_indent(p, indent, "placement=");
+ drm_print_bits(p, bo->mem.placement, plname, ARRAY_SIZE(plname));
+ drm_printf(p, "\n");
+
+ if (bo->mem.bus.is_iomem) {
+ drm_printf_indent(p, indent, "bus.base=%lx\n",
+ (unsigned long)bo->mem.bus.base);
+ drm_printf_indent(p, indent, "bus.offset=%lx\n",
+ (unsigned long)bo->mem.bus.offset);
+ }
+}
+EXPORT_SYMBOL(drm_gem_ttm_print_info);
+
+/**
+ * drm_gem_ttm_mmap() - mmap &ttm_buffer_object
+ * @gem: GEM object.
+ * @vma: vm area.
+ *
+ * This function can be used as &drm_gem_object_funcs.mmap
+ * callback.
+ */
+int drm_gem_ttm_mmap(struct drm_gem_object *gem,
+ struct vm_area_struct *vma)
+{
+ struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem);
+ int ret;
+
+ ret = ttm_bo_mmap_obj(vma, bo);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * ttm has its own object refcounting, so drop gem reference
+ * to avoid double accounting counting.
+ */
+ drm_gem_object_put_unlocked(gem);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_gem_ttm_mmap);
+
+MODULE_DESCRIPTION("DRM gem ttm helpers");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
index 4de782ca26b2..a4863326061a 100644
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -1,17 +1,30 @@
// SPDX-License-Identifier: GPL-2.0-or-later
-#include <drm/drm_gem_vram_helper.h>
+#include <drm/drm_debugfs.h>
#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_ttm_helper.h>
+#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_mode.h>
+#include <drm/drm_plane.h>
#include <drm/drm_prime.h>
-#include <drm/drm_vram_mm_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include <drm/ttm/ttm_page_alloc.h>
+static const struct drm_gem_object_funcs drm_gem_vram_object_funcs;
+
/**
* DOC: overview
*
* This library provides a GEM buffer object that is backed by video RAM
* (VRAM). It can be used for framebuffer devices with dedicated memory.
+ *
+ * The data structure &struct drm_vram_mm and its helpers implement a memory
+ * manager for simple framebuffer devices with dedicated video memory. Buffer
+ * objects are either placed in video RAM or evicted to system memory. The rsp.
+ * buffer object is provided by &struct drm_gem_vram_object.
*/
/*
@@ -24,7 +37,11 @@ static void drm_gem_vram_cleanup(struct drm_gem_vram_object *gbo)
* TTM buffer object in 'bo' has already been cleaned
* up; only release the GEM object.
*/
- drm_gem_object_release(&gbo->gem);
+
+ WARN_ON(gbo->kmap_use_count);
+ WARN_ON(gbo->kmap.virtual);
+
+ drm_gem_object_release(&gbo->bo.base);
}
static void drm_gem_vram_destroy(struct drm_gem_vram_object *gbo)
@@ -45,6 +62,7 @@ static void drm_gem_vram_placement(struct drm_gem_vram_object *gbo,
{
unsigned int i;
unsigned int c = 0;
+ u32 invariant_flags = pl_flag & TTM_PL_FLAG_TOPDOWN;
gbo->placement.placement = gbo->placements;
gbo->placement.busy_placement = gbo->placements;
@@ -52,15 +70,18 @@ static void drm_gem_vram_placement(struct drm_gem_vram_object *gbo,
if (pl_flag & TTM_PL_FLAG_VRAM)
gbo->placements[c++].flags = TTM_PL_FLAG_WC |
TTM_PL_FLAG_UNCACHED |
- TTM_PL_FLAG_VRAM;
+ TTM_PL_FLAG_VRAM |
+ invariant_flags;
if (pl_flag & TTM_PL_FLAG_SYSTEM)
gbo->placements[c++].flags = TTM_PL_MASK_CACHING |
- TTM_PL_FLAG_SYSTEM;
+ TTM_PL_FLAG_SYSTEM |
+ invariant_flags;
if (!c)
gbo->placements[c++].flags = TTM_PL_MASK_CACHING |
- TTM_PL_FLAG_SYSTEM;
+ TTM_PL_FLAG_SYSTEM |
+ invariant_flags;
gbo->placement.num_placement = c;
gbo->placement.num_busy_placement = c;
@@ -72,15 +93,21 @@ static void drm_gem_vram_placement(struct drm_gem_vram_object *gbo,
}
static int drm_gem_vram_init(struct drm_device *dev,
- struct ttm_bo_device *bdev,
struct drm_gem_vram_object *gbo,
- size_t size, unsigned long pg_align,
- bool interruptible)
+ size_t size, unsigned long pg_align)
{
+ struct drm_vram_mm *vmm = dev->vram_mm;
+ struct ttm_bo_device *bdev;
int ret;
size_t acc_size;
- ret = drm_gem_object_init(dev, &gbo->gem, size);
+ if (WARN_ONCE(!vmm, "VRAM MM not initialized"))
+ return -EINVAL;
+ bdev = &vmm->bdev;
+
+ gbo->bo.base.funcs = &drm_gem_vram_object_funcs;
+
+ ret = drm_gem_object_init(dev, &gbo->bo.base, size);
if (ret)
return ret;
@@ -90,7 +117,7 @@ static int drm_gem_vram_init(struct drm_device *dev,
drm_gem_vram_placement(gbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
ret = ttm_bo_init(bdev, &gbo->bo, size, ttm_bo_type_device,
- &gbo->placement, pg_align, interruptible, acc_size,
+ &gbo->placement, pg_align, false, acc_size,
NULL, NULL, ttm_buffer_object_destroy);
if (ret)
goto err_drm_gem_object_release;
@@ -98,36 +125,40 @@ static int drm_gem_vram_init(struct drm_device *dev,
return 0;
err_drm_gem_object_release:
- drm_gem_object_release(&gbo->gem);
+ drm_gem_object_release(&gbo->bo.base);
return ret;
}
/**
* drm_gem_vram_create() - Creates a VRAM-backed GEM object
* @dev: the DRM device
- * @bdev: the TTM BO device backing the object
* @size: the buffer size in bytes
* @pg_align: the buffer's alignment in multiples of the page size
- * @interruptible: sleep interruptible if waiting for memory
*
* Returns:
* A new instance of &struct drm_gem_vram_object on success, or
* an ERR_PTR()-encoded error code otherwise.
*/
struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev,
- struct ttm_bo_device *bdev,
size_t size,
- unsigned long pg_align,
- bool interruptible)
+ unsigned long pg_align)
{
struct drm_gem_vram_object *gbo;
int ret;
- gbo = kzalloc(sizeof(*gbo), GFP_KERNEL);
- if (!gbo)
- return ERR_PTR(-ENOMEM);
+ if (dev->driver->gem_create_object) {
+ struct drm_gem_object *gem =
+ dev->driver->gem_create_object(dev, size);
+ if (!gem)
+ return ERR_PTR(-ENOMEM);
+ gbo = drm_gem_vram_of_gem(gem);
+ } else {
+ gbo = kzalloc(sizeof(*gbo), GFP_KERNEL);
+ if (!gbo)
+ return ERR_PTR(-ENOMEM);
+ }
- ret = drm_gem_vram_init(dev, bdev, gbo, size, pg_align, interruptible);
+ ret = drm_gem_vram_init(dev, gbo, size, pg_align);
if (ret < 0)
goto err_kfree;
@@ -163,7 +194,7 @@ EXPORT_SYMBOL(drm_gem_vram_put);
*/
u64 drm_gem_vram_mmap_offset(struct drm_gem_vram_object *gbo)
{
- return drm_vma_node_offset_addr(&gbo->bo.vma_node);
+ return drm_vma_node_offset_addr(&gbo->bo.base.vma_node);
}
EXPORT_SYMBOL(drm_gem_vram_mmap_offset);
@@ -187,30 +218,12 @@ s64 drm_gem_vram_offset(struct drm_gem_vram_object *gbo)
}
EXPORT_SYMBOL(drm_gem_vram_offset);
-/**
- * drm_gem_vram_pin() - Pins a GEM VRAM object in a region.
- * @gbo: the GEM VRAM object
- * @pl_flag: a bitmask of possible memory regions
- *
- * Pinning a buffer object ensures that it is not evicted from
- * a memory region. A pinned buffer object has to be unpinned before
- * it can be pinned to another region. If the pl_flag argument is 0,
- * the buffer is pinned at its current location (video RAM or system
- * memory).
- *
- * Returns:
- * 0 on success, or
- * a negative error code otherwise.
- */
-int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag)
+static int drm_gem_vram_pin_locked(struct drm_gem_vram_object *gbo,
+ unsigned long pl_flag)
{
int i, ret;
struct ttm_operation_ctx ctx = { false, false };
- ret = ttm_bo_reserve(&gbo->bo, true, false, NULL);
- if (ret < 0)
- return ret;
-
if (gbo->pin_count)
goto out;
@@ -222,62 +235,123 @@ int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag)
ret = ttm_bo_validate(&gbo->bo, &gbo->placement, &ctx);
if (ret < 0)
- goto err_ttm_bo_unreserve;
+ return ret;
out:
++gbo->pin_count;
- ttm_bo_unreserve(&gbo->bo);
return 0;
-
-err_ttm_bo_unreserve:
- ttm_bo_unreserve(&gbo->bo);
- return ret;
}
-EXPORT_SYMBOL(drm_gem_vram_pin);
/**
- * drm_gem_vram_unpin() - Unpins a GEM VRAM object
+ * drm_gem_vram_pin() - Pins a GEM VRAM object in a region.
* @gbo: the GEM VRAM object
+ * @pl_flag: a bitmask of possible memory regions
+ *
+ * Pinning a buffer object ensures that it is not evicted from
+ * a memory region. A pinned buffer object has to be unpinned before
+ * it can be pinned to another region. If the pl_flag argument is 0,
+ * the buffer is pinned at its current location (video RAM or system
+ * memory).
+ *
+ * Small buffer objects, such as cursor images, can lead to memory
+ * fragmentation if they are pinned in the middle of video RAM. This
+ * is especially a problem on devices with only a small amount of
+ * video RAM. Fragmentation can prevent the primary framebuffer from
+ * fitting in, even though there's enough memory overall. The modifier
+ * DRM_GEM_VRAM_PL_FLAG_TOPDOWN marks the buffer object to be pinned
+ * at the high end of the memory region to avoid fragmentation.
*
* Returns:
* 0 on success, or
* a negative error code otherwise.
*/
-int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo)
+int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag)
{
- int i, ret;
- struct ttm_operation_ctx ctx = { false, false };
+ int ret;
ret = ttm_bo_reserve(&gbo->bo, true, false, NULL);
- if (ret < 0)
+ if (ret)
return ret;
+ ret = drm_gem_vram_pin_locked(gbo, pl_flag);
+ ttm_bo_unreserve(&gbo->bo);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_gem_vram_pin);
+
+static int drm_gem_vram_unpin_locked(struct drm_gem_vram_object *gbo)
+{
+ int i, ret;
+ struct ttm_operation_ctx ctx = { false, false };
if (WARN_ON_ONCE(!gbo->pin_count))
- goto out;
+ return 0;
--gbo->pin_count;
if (gbo->pin_count)
- goto out;
+ return 0;
for (i = 0; i < gbo->placement.num_placement ; ++i)
gbo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
ret = ttm_bo_validate(&gbo->bo, &gbo->placement, &ctx);
if (ret < 0)
- goto err_ttm_bo_unreserve;
-
-out:
- ttm_bo_unreserve(&gbo->bo);
+ return ret;
return 0;
+}
-err_ttm_bo_unreserve:
+/**
+ * drm_gem_vram_unpin() - Unpins a GEM VRAM object
+ * @gbo: the GEM VRAM object
+ *
+ * Returns:
+ * 0 on success, or
+ * a negative error code otherwise.
+ */
+int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo)
+{
+ int ret;
+
+ ret = ttm_bo_reserve(&gbo->bo, true, false, NULL);
+ if (ret)
+ return ret;
+ ret = drm_gem_vram_unpin_locked(gbo);
ttm_bo_unreserve(&gbo->bo);
+
return ret;
}
EXPORT_SYMBOL(drm_gem_vram_unpin);
+static void *drm_gem_vram_kmap_locked(struct drm_gem_vram_object *gbo,
+ bool map, bool *is_iomem)
+{
+ int ret;
+ struct ttm_bo_kmap_obj *kmap = &gbo->kmap;
+
+ if (gbo->kmap_use_count > 0)
+ goto out;
+
+ if (kmap->virtual || !map)
+ goto out;
+
+ ret = ttm_bo_kmap(&gbo->bo, 0, gbo->bo.num_pages, kmap);
+ if (ret)
+ return ERR_PTR(ret);
+
+out:
+ if (!kmap->virtual) {
+ if (is_iomem)
+ *is_iomem = false;
+ return NULL; /* not mapped; don't increment ref */
+ }
+ ++gbo->kmap_use_count;
+ if (is_iomem)
+ return ttm_kmap_obj_virtual(kmap, is_iomem);
+ return kmap->virtual;
+}
+
/**
* drm_gem_vram_kmap() - Maps a GEM VRAM object into kernel address space
* @gbo: the GEM VRAM object
@@ -299,50 +373,127 @@ void *drm_gem_vram_kmap(struct drm_gem_vram_object *gbo, bool map,
bool *is_iomem)
{
int ret;
- struct ttm_bo_kmap_obj *kmap = &gbo->kmap;
+ void *virtual;
- if (kmap->virtual || !map)
- goto out;
-
- ret = ttm_bo_kmap(&gbo->bo, 0, gbo->bo.num_pages, kmap);
+ ret = ttm_bo_reserve(&gbo->bo, true, false, NULL);
if (ret)
return ERR_PTR(ret);
+ virtual = drm_gem_vram_kmap_locked(gbo, map, is_iomem);
+ ttm_bo_unreserve(&gbo->bo);
-out:
- if (!is_iomem)
- return kmap->virtual;
- if (!kmap->virtual) {
- *is_iomem = false;
- return NULL;
- }
- return ttm_kmap_obj_virtual(kmap, is_iomem);
+ return virtual;
}
EXPORT_SYMBOL(drm_gem_vram_kmap);
+static void drm_gem_vram_kunmap_locked(struct drm_gem_vram_object *gbo)
+{
+ if (WARN_ON_ONCE(!gbo->kmap_use_count))
+ return;
+ if (--gbo->kmap_use_count > 0)
+ return;
+
+ /*
+ * Permanently mapping and unmapping buffers adds overhead from
+ * updating the page tables and creates debugging output. Therefore,
+ * we delay the actual unmap operation until the BO gets evicted
+ * from memory. See drm_gem_vram_bo_driver_move_notify().
+ */
+}
+
/**
* drm_gem_vram_kunmap() - Unmaps a GEM VRAM object
* @gbo: the GEM VRAM object
*/
void drm_gem_vram_kunmap(struct drm_gem_vram_object *gbo)
{
- struct ttm_bo_kmap_obj *kmap = &gbo->kmap;
+ int ret;
- if (!kmap->virtual)
+ ret = ttm_bo_reserve(&gbo->bo, false, false, NULL);
+ if (WARN_ONCE(ret, "ttm_bo_reserve_failed(): ret=%d\n", ret))
return;
-
- ttm_bo_kunmap(kmap);
- kmap->virtual = NULL;
+ drm_gem_vram_kunmap_locked(gbo);
+ ttm_bo_unreserve(&gbo->bo);
}
EXPORT_SYMBOL(drm_gem_vram_kunmap);
/**
+ * drm_gem_vram_vmap() - Pins and maps a GEM VRAM object into kernel address
+ * space
+ * @gbo: The GEM VRAM object to map
+ *
+ * The vmap function pins a GEM VRAM object to its current location, either
+ * system or video memory, and maps its buffer into kernel address space.
+ * As pinned object cannot be relocated, you should avoid pinning objects
+ * permanently. Call drm_gem_vram_vunmap() with the returned address to
+ * unmap and unpin the GEM VRAM object.
+ *
+ * If you have special requirements for the pinning or mapping operations,
+ * call drm_gem_vram_pin() and drm_gem_vram_kmap() directly.
+ *
+ * Returns:
+ * The buffer's virtual address on success, or
+ * an ERR_PTR()-encoded error code otherwise.
+ */
+void *drm_gem_vram_vmap(struct drm_gem_vram_object *gbo)
+{
+ int ret;
+ void *base;
+
+ ret = ttm_bo_reserve(&gbo->bo, true, false, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = drm_gem_vram_pin_locked(gbo, 0);
+ if (ret)
+ goto err_ttm_bo_unreserve;
+ base = drm_gem_vram_kmap_locked(gbo, true, NULL);
+ if (IS_ERR(base)) {
+ ret = PTR_ERR(base);
+ goto err_drm_gem_vram_unpin_locked;
+ }
+
+ ttm_bo_unreserve(&gbo->bo);
+
+ return base;
+
+err_drm_gem_vram_unpin_locked:
+ drm_gem_vram_unpin_locked(gbo);
+err_ttm_bo_unreserve:
+ ttm_bo_unreserve(&gbo->bo);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(drm_gem_vram_vmap);
+
+/**
+ * drm_gem_vram_vunmap() - Unmaps and unpins a GEM VRAM object
+ * @gbo: The GEM VRAM object to unmap
+ * @vaddr: The mapping's base address as returned by drm_gem_vram_vmap()
+ *
+ * A call to drm_gem_vram_vunmap() unmaps and unpins a GEM VRAM buffer. See
+ * the documentation for drm_gem_vram_vmap() for more information.
+ */
+void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo, void *vaddr)
+{
+ int ret;
+
+ ret = ttm_bo_reserve(&gbo->bo, false, false, NULL);
+ if (WARN_ONCE(ret, "ttm_bo_reserve_failed(): ret=%d\n", ret))
+ return;
+
+ drm_gem_vram_kunmap_locked(gbo);
+ drm_gem_vram_unpin_locked(gbo);
+
+ ttm_bo_unreserve(&gbo->bo);
+}
+EXPORT_SYMBOL(drm_gem_vram_vunmap);
+
+/**
* drm_gem_vram_fill_create_dumb() - \
Helper for implementing &struct drm_driver.dumb_create
* @file: the DRM file
* @dev: the DRM device
- * @bdev: the TTM BO device managing the buffer object
* @pg_align: the buffer's alignment in multiples of the page size
- * @interruptible: sleep interruptible if waiting for memory
+ * @pitch_align: the scanline's alignment in powers of 2
* @args: the arguments as provided to \
&struct drm_driver.dumb_create
*
@@ -357,9 +508,8 @@ EXPORT_SYMBOL(drm_gem_vram_kunmap);
*/
int drm_gem_vram_fill_create_dumb(struct drm_file *file,
struct drm_device *dev,
- struct ttm_bo_device *bdev,
unsigned long pg_align,
- bool interruptible,
+ unsigned long pitch_align,
struct drm_mode_create_dumb *args)
{
size_t pitch, size;
@@ -367,22 +517,27 @@ int drm_gem_vram_fill_create_dumb(struct drm_file *file,
int ret;
u32 handle;
- pitch = args->width * ((args->bpp + 7) / 8);
+ pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
+ if (pitch_align) {
+ if (WARN_ON_ONCE(!is_power_of_2(pitch_align)))
+ return -EINVAL;
+ pitch = ALIGN(pitch, pitch_align);
+ }
size = pitch * args->height;
size = roundup(size, PAGE_SIZE);
if (!size)
return -EINVAL;
- gbo = drm_gem_vram_create(dev, bdev, size, pg_align, interruptible);
+ gbo = drm_gem_vram_create(dev, size, pg_align);
if (IS_ERR(gbo))
return PTR_ERR(gbo);
- ret = drm_gem_handle_create(file, &gbo->gem, &handle);
+ ret = drm_gem_handle_create(file, &gbo->bo.base, &handle);
if (ret)
goto err_drm_gem_object_put_unlocked;
- drm_gem_object_put_unlocked(&gbo->gem);
+ drm_gem_object_put_unlocked(&gbo->bo.base);
args->pitch = pitch;
args->size = size;
@@ -391,7 +546,7 @@ int drm_gem_vram_fill_create_dumb(struct drm_file *file,
return 0;
err_drm_gem_object_put_unlocked:
- drm_gem_object_put_unlocked(&gbo->gem);
+ drm_gem_object_put_unlocked(&gbo->bo.base);
return ret;
}
EXPORT_SYMBOL(drm_gem_vram_fill_create_dumb);
@@ -405,76 +560,47 @@ static bool drm_is_gem_vram(struct ttm_buffer_object *bo)
return (bo->destroy == ttm_buffer_object_destroy);
}
-/**
- * drm_gem_vram_bo_driver_evict_flags() - \
- Implements &struct ttm_bo_driver.evict_flags
- * @bo: TTM buffer object. Refers to &struct drm_gem_vram_object.bo
- * @pl: TTM placement information.
- */
-void drm_gem_vram_bo_driver_evict_flags(struct ttm_buffer_object *bo,
- struct ttm_placement *pl)
+static void drm_gem_vram_bo_driver_evict_flags(struct drm_gem_vram_object *gbo,
+ struct ttm_placement *pl)
{
- struct drm_gem_vram_object *gbo;
-
- /* TTM may pass BOs that are not GEM VRAM BOs. */
- if (!drm_is_gem_vram(bo))
- return;
-
- gbo = drm_gem_vram_of_bo(bo);
drm_gem_vram_placement(gbo, TTM_PL_FLAG_SYSTEM);
*pl = gbo->placement;
}
-EXPORT_SYMBOL(drm_gem_vram_bo_driver_evict_flags);
-/**
- * drm_gem_vram_bo_driver_verify_access() - \
- Implements &struct ttm_bo_driver.verify_access
- * @bo: TTM buffer object. Refers to &struct drm_gem_vram_object.bo
- * @filp: File pointer.
- *
- * Returns:
- * 0 on success, or
- * a negative errno code otherwise.
- */
-int drm_gem_vram_bo_driver_verify_access(struct ttm_buffer_object *bo,
- struct file *filp)
+static void drm_gem_vram_bo_driver_move_notify(struct drm_gem_vram_object *gbo,
+ bool evict,
+ struct ttm_mem_reg *new_mem)
{
- struct drm_gem_vram_object *gbo = drm_gem_vram_of_bo(bo);
+ struct ttm_bo_kmap_obj *kmap = &gbo->kmap;
- return drm_vma_node_verify_access(&gbo->gem.vma_node,
- filp->private_data);
-}
-EXPORT_SYMBOL(drm_gem_vram_bo_driver_verify_access);
+ if (WARN_ON_ONCE(gbo->kmap_use_count))
+ return;
-/*
- * drm_gem_vram_mm_funcs - Functions for &struct drm_vram_mm
- *
- * Most users of @struct drm_gem_vram_object will also use
- * @struct drm_vram_mm. This instance of &struct drm_vram_mm_funcs
- * can be used to connect both.
- */
-const struct drm_vram_mm_funcs drm_gem_vram_mm_funcs = {
- .evict_flags = drm_gem_vram_bo_driver_evict_flags,
- .verify_access = drm_gem_vram_bo_driver_verify_access
-};
-EXPORT_SYMBOL(drm_gem_vram_mm_funcs);
+ if (!kmap->virtual)
+ return;
+ ttm_bo_kunmap(kmap);
+ kmap->virtual = NULL;
+}
/*
- * Helpers for struct drm_driver
+ * Helpers for struct drm_gem_object_funcs
*/
/**
- * drm_gem_vram_driver_gem_free_object_unlocked() - \
- Implements &struct drm_driver.gem_free_object_unlocked
- * @gem: GEM object. Refers to &struct drm_gem_vram_object.gem
+ * drm_gem_vram_object_free() - \
+ Implements &struct drm_gem_object_funcs.free
+ * @gem: GEM object. Refers to &struct drm_gem_vram_object.gem
*/
-void drm_gem_vram_driver_gem_free_object_unlocked(struct drm_gem_object *gem)
+static void drm_gem_vram_object_free(struct drm_gem_object *gem)
{
struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
drm_gem_vram_put(gbo);
}
-EXPORT_SYMBOL(drm_gem_vram_driver_gem_free_object_unlocked);
+
+/*
+ * Helpers for dump buffers
+ */
/**
* drm_gem_vram_driver_create_dumb() - \
@@ -498,8 +624,7 @@ int drm_gem_vram_driver_dumb_create(struct drm_file *file,
if (WARN_ONCE(!dev->vram_mm, "VRAM MM not initialized"))
return -EINVAL;
- return drm_gem_vram_fill_create_dumb(file, dev, &dev->vram_mm->bdev, 0,
- false, args);
+ return drm_gem_vram_fill_create_dumb(file, dev, 0, 0, args);
}
EXPORT_SYMBOL(drm_gem_vram_driver_dumb_create);
@@ -536,19 +661,142 @@ int drm_gem_vram_driver_dumb_mmap_offset(struct drm_file *file,
EXPORT_SYMBOL(drm_gem_vram_driver_dumb_mmap_offset);
/*
- * PRIME helpers for struct drm_driver
+ * Helpers for struct drm_plane_helper_funcs
+ */
+
+/**
+ * drm_gem_vram_plane_helper_prepare_fb() - \
+ * Implements &struct drm_plane_helper_funcs.prepare_fb
+ * @plane: a DRM plane
+ * @new_state: the plane's new state
+ *
+ * During plane updates, this function pins the GEM VRAM
+ * objects of the plane's new framebuffer to VRAM. Call
+ * drm_gem_vram_plane_helper_cleanup_fb() to unpin them.
+ *
+ * Returns:
+ * 0 on success, or
+ * a negative errno code otherwise.
+ */
+int
+drm_gem_vram_plane_helper_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ size_t i;
+ struct drm_gem_vram_object *gbo;
+ int ret;
+
+ if (!new_state->fb)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(new_state->fb->obj); ++i) {
+ if (!new_state->fb->obj[i])
+ continue;
+ gbo = drm_gem_vram_of_gem(new_state->fb->obj[i]);
+ ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
+ if (ret)
+ goto err_drm_gem_vram_unpin;
+ }
+
+ return 0;
+
+err_drm_gem_vram_unpin:
+ while (i) {
+ --i;
+ gbo = drm_gem_vram_of_gem(new_state->fb->obj[i]);
+ drm_gem_vram_unpin(gbo);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(drm_gem_vram_plane_helper_prepare_fb);
+
+/**
+ * drm_gem_vram_plane_helper_cleanup_fb() - \
+ * Implements &struct drm_plane_helper_funcs.cleanup_fb
+ * @plane: a DRM plane
+ * @old_state: the plane's old state
+ *
+ * During plane updates, this function unpins the GEM VRAM
+ * objects of the plane's old framebuffer from VRAM. Complements
+ * drm_gem_vram_plane_helper_prepare_fb().
+ */
+void
+drm_gem_vram_plane_helper_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ size_t i;
+ struct drm_gem_vram_object *gbo;
+
+ if (!old_state->fb)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(old_state->fb->obj); ++i) {
+ if (!old_state->fb->obj[i])
+ continue;
+ gbo = drm_gem_vram_of_gem(old_state->fb->obj[i]);
+ drm_gem_vram_unpin(gbo);
+ }
+}
+EXPORT_SYMBOL(drm_gem_vram_plane_helper_cleanup_fb);
+
+/*
+ * Helpers for struct drm_simple_display_pipe_funcs
*/
/**
- * drm_gem_vram_driver_gem_prime_pin() - \
- Implements &struct drm_driver.gem_prime_pin
+ * drm_gem_vram_simple_display_pipe_prepare_fb() - \
+ * Implements &struct drm_simple_display_pipe_funcs.prepare_fb
+ * @pipe: a simple display pipe
+ * @new_state: the plane's new state
+ *
+ * During plane updates, this function pins the GEM VRAM
+ * objects of the plane's new framebuffer to VRAM. Call
+ * drm_gem_vram_simple_display_pipe_cleanup_fb() to unpin them.
+ *
+ * Returns:
+ * 0 on success, or
+ * a negative errno code otherwise.
+ */
+int drm_gem_vram_simple_display_pipe_prepare_fb(
+ struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *new_state)
+{
+ return drm_gem_vram_plane_helper_prepare_fb(&pipe->plane, new_state);
+}
+EXPORT_SYMBOL(drm_gem_vram_simple_display_pipe_prepare_fb);
+
+/**
+ * drm_gem_vram_simple_display_pipe_cleanup_fb() - \
+ * Implements &struct drm_simple_display_pipe_funcs.cleanup_fb
+ * @pipe: a simple display pipe
+ * @old_state: the plane's old state
+ *
+ * During plane updates, this function unpins the GEM VRAM
+ * objects of the plane's old framebuffer from VRAM. Complements
+ * drm_gem_vram_simple_display_pipe_prepare_fb().
+ */
+void drm_gem_vram_simple_display_pipe_cleanup_fb(
+ struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *old_state)
+{
+ drm_gem_vram_plane_helper_cleanup_fb(&pipe->plane, old_state);
+}
+EXPORT_SYMBOL(drm_gem_vram_simple_display_pipe_cleanup_fb);
+
+/*
+ * PRIME helpers
+ */
+
+/**
+ * drm_gem_vram_object_pin() - \
+ Implements &struct drm_gem_object_funcs.pin
* @gem: The GEM object to pin
*
* Returns:
* 0 on success, or
* a negative errno code otherwise.
*/
-int drm_gem_vram_driver_gem_prime_pin(struct drm_gem_object *gem)
+static int drm_gem_vram_object_pin(struct drm_gem_object *gem)
{
struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
@@ -562,80 +810,334 @@ int drm_gem_vram_driver_gem_prime_pin(struct drm_gem_object *gem)
*/
return drm_gem_vram_pin(gbo, 0);
}
-EXPORT_SYMBOL(drm_gem_vram_driver_gem_prime_pin);
/**
- * drm_gem_vram_driver_gem_prime_unpin() - \
- Implements &struct drm_driver.gem_prime_unpin
+ * drm_gem_vram_object_unpin() - \
+ Implements &struct drm_gem_object_funcs.unpin
* @gem: The GEM object to unpin
*/
-void drm_gem_vram_driver_gem_prime_unpin(struct drm_gem_object *gem)
+static void drm_gem_vram_object_unpin(struct drm_gem_object *gem)
{
struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
drm_gem_vram_unpin(gbo);
}
-EXPORT_SYMBOL(drm_gem_vram_driver_gem_prime_unpin);
/**
- * drm_gem_vram_driver_gem_prime_vmap() - \
- Implements &struct drm_driver.gem_prime_vmap
+ * drm_gem_vram_object_vmap() - \
+ Implements &struct drm_gem_object_funcs.vmap
* @gem: The GEM object to map
*
* Returns:
* The buffers virtual address on success, or
* NULL otherwise.
*/
-void *drm_gem_vram_driver_gem_prime_vmap(struct drm_gem_object *gem)
+static void *drm_gem_vram_object_vmap(struct drm_gem_object *gem)
{
struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
- int ret;
void *base;
- ret = drm_gem_vram_pin(gbo, 0);
- if (ret)
+ base = drm_gem_vram_vmap(gbo);
+ if (IS_ERR(base))
return NULL;
- base = drm_gem_vram_kmap(gbo, true, NULL);
- if (IS_ERR(base)) {
- drm_gem_vram_unpin(gbo);
- return NULL;
- }
return base;
}
-EXPORT_SYMBOL(drm_gem_vram_driver_gem_prime_vmap);
/**
- * drm_gem_vram_driver_gem_prime_vunmap() - \
- Implements &struct drm_driver.gem_prime_vunmap
+ * drm_gem_vram_object_vunmap() - \
+ Implements &struct drm_gem_object_funcs.vunmap
* @gem: The GEM object to unmap
* @vaddr: The mapping's base address
*/
-void drm_gem_vram_driver_gem_prime_vunmap(struct drm_gem_object *gem,
- void *vaddr)
+static void drm_gem_vram_object_vunmap(struct drm_gem_object *gem,
+ void *vaddr)
{
struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
- drm_gem_vram_kunmap(gbo);
- drm_gem_vram_unpin(gbo);
+ drm_gem_vram_vunmap(gbo, vaddr);
}
-EXPORT_SYMBOL(drm_gem_vram_driver_gem_prime_vunmap);
+
+/*
+ * GEM object funcs
+ */
+
+static const struct drm_gem_object_funcs drm_gem_vram_object_funcs = {
+ .free = drm_gem_vram_object_free,
+ .pin = drm_gem_vram_object_pin,
+ .unpin = drm_gem_vram_object_unpin,
+ .vmap = drm_gem_vram_object_vmap,
+ .vunmap = drm_gem_vram_object_vunmap,
+ .mmap = drm_gem_ttm_mmap,
+ .print_info = drm_gem_ttm_print_info,
+};
+
+/*
+ * VRAM memory manager
+ */
+
+/*
+ * TTM TT
+ */
+
+static void backend_func_destroy(struct ttm_tt *tt)
+{
+ ttm_tt_fini(tt);
+ kfree(tt);
+}
+
+static struct ttm_backend_func backend_func = {
+ .destroy = backend_func_destroy
+};
+
+/*
+ * TTM BO device
+ */
+
+static struct ttm_tt *bo_driver_ttm_tt_create(struct ttm_buffer_object *bo,
+ uint32_t page_flags)
+{
+ struct ttm_tt *tt;
+ int ret;
+
+ tt = kzalloc(sizeof(*tt), GFP_KERNEL);
+ if (!tt)
+ return NULL;
+
+ tt->func = &backend_func;
+
+ ret = ttm_tt_init(tt, bo, page_flags);
+ if (ret < 0)
+ goto err_ttm_tt_init;
+
+ return tt;
+
+err_ttm_tt_init:
+ kfree(tt);
+ return NULL;
+}
+
+static int bo_driver_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+ struct ttm_mem_type_manager *man)
+{
+ switch (type) {
+ case TTM_PL_SYSTEM:
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_MASK_CACHING;
+ man->default_caching = TTM_PL_FLAG_CACHED;
+ break;
+ case TTM_PL_VRAM:
+ man->func = &ttm_bo_manager_func;
+ man->flags = TTM_MEMTYPE_FLAG_FIXED |
+ TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_FLAG_UNCACHED |
+ TTM_PL_FLAG_WC;
+ man->default_caching = TTM_PL_FLAG_WC;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void bo_driver_evict_flags(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement)
+{
+ struct drm_gem_vram_object *gbo;
+
+ /* TTM may pass BOs that are not GEM VRAM BOs. */
+ if (!drm_is_gem_vram(bo))
+ return;
+
+ gbo = drm_gem_vram_of_bo(bo);
+
+ drm_gem_vram_bo_driver_evict_flags(gbo, placement);
+}
+
+static void bo_driver_move_notify(struct ttm_buffer_object *bo,
+ bool evict,
+ struct ttm_mem_reg *new_mem)
+{
+ struct drm_gem_vram_object *gbo;
+
+ /* TTM may pass BOs that are not GEM VRAM BOs. */
+ if (!drm_is_gem_vram(bo))
+ return;
+
+ gbo = drm_gem_vram_of_bo(bo);
+
+ drm_gem_vram_bo_driver_move_notify(gbo, evict, new_mem);
+}
+
+static int bo_driver_io_mem_reserve(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = bdev->man + mem->mem_type;
+ struct drm_vram_mm *vmm = drm_vram_mm_of_bdev(bdev);
+
+ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+ return -EINVAL;
+
+ mem->bus.addr = NULL;
+ mem->bus.size = mem->num_pages << PAGE_SHIFT;
+
+ switch (mem->mem_type) {
+ case TTM_PL_SYSTEM: /* nothing to do */
+ mem->bus.offset = 0;
+ mem->bus.base = 0;
+ mem->bus.is_iomem = false;
+ break;
+ case TTM_PL_VRAM:
+ mem->bus.offset = mem->start << PAGE_SHIFT;
+ mem->bus.base = vmm->vram_base;
+ mem->bus.is_iomem = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void bo_driver_io_mem_free(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
+{ }
+
+static struct ttm_bo_driver bo_driver = {
+ .ttm_tt_create = bo_driver_ttm_tt_create,
+ .ttm_tt_populate = ttm_pool_populate,
+ .ttm_tt_unpopulate = ttm_pool_unpopulate,
+ .init_mem_type = bo_driver_init_mem_type,
+ .eviction_valuable = ttm_bo_eviction_valuable,
+ .evict_flags = bo_driver_evict_flags,
+ .move_notify = bo_driver_move_notify,
+ .io_mem_reserve = bo_driver_io_mem_reserve,
+ .io_mem_free = bo_driver_io_mem_free,
+};
+
+/*
+ * struct drm_vram_mm
+ */
+
+#if defined(CONFIG_DEBUG_FS)
+static int drm_vram_mm_debugfs(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_vram_mm *vmm = node->minor->dev->vram_mm;
+ struct drm_mm *mm = vmm->bdev.man[TTM_PL_VRAM].priv;
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ spin_lock(&ttm_bo_glob.lru_lock);
+ drm_mm_print(mm, &p);
+ spin_unlock(&ttm_bo_glob.lru_lock);
+ return 0;
+}
+
+static const struct drm_info_list drm_vram_mm_debugfs_list[] = {
+ { "vram-mm", drm_vram_mm_debugfs, 0, NULL },
+};
+#endif
/**
- * drm_gem_vram_driver_gem_prime_mmap() - \
- Implements &struct drm_driver.gem_prime_mmap
- * @gem: The GEM object to map
- * @vma: The VMA describing the mapping
+ * drm_vram_mm_debugfs_init() - Register VRAM MM debugfs file.
+ *
+ * @minor: drm minor device.
*
* Returns:
* 0 on success, or
- * a negative errno code otherwise.
+ * a negative error code otherwise.
*/
-int drm_gem_vram_driver_gem_prime_mmap(struct drm_gem_object *gem,
- struct vm_area_struct *vma)
+int drm_vram_mm_debugfs_init(struct drm_minor *minor)
{
- struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
+ int ret = 0;
+
+#if defined(CONFIG_DEBUG_FS)
+ ret = drm_debugfs_create_files(drm_vram_mm_debugfs_list,
+ ARRAY_SIZE(drm_vram_mm_debugfs_list),
+ minor->debugfs_root, minor);
+#endif
+ return ret;
+}
+EXPORT_SYMBOL(drm_vram_mm_debugfs_init);
+
+static int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev,
+ uint64_t vram_base, size_t vram_size)
+{
+ int ret;
+
+ vmm->vram_base = vram_base;
+ vmm->vram_size = vram_size;
+
+ ret = ttm_bo_device_init(&vmm->bdev, &bo_driver,
+ dev->anon_inode->i_mapping,
+ dev->vma_offset_manager,
+ true);
+ if (ret)
+ return ret;
+
+ ret = ttm_bo_init_mm(&vmm->bdev, TTM_PL_VRAM, vram_size >> PAGE_SHIFT);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void drm_vram_mm_cleanup(struct drm_vram_mm *vmm)
+{
+ ttm_bo_device_release(&vmm->bdev);
+}
+
+/*
+ * Helpers for integration with struct drm_device
+ */
+
+/**
+ * drm_vram_helper_alloc_mm - Allocates a device's instance of \
+ &struct drm_vram_mm
+ * @dev: the DRM device
+ * @vram_base: the base address of the video memory
+ * @vram_size: the size of the video memory in bytes
+ *
+ * Returns:
+ * The new instance of &struct drm_vram_mm on success, or
+ * an ERR_PTR()-encoded errno code otherwise.
+ */
+struct drm_vram_mm *drm_vram_helper_alloc_mm(
+ struct drm_device *dev, uint64_t vram_base, size_t vram_size)
+{
+ int ret;
+
+ if (WARN_ON(dev->vram_mm))
+ return dev->vram_mm;
+
+ dev->vram_mm = kzalloc(sizeof(*dev->vram_mm), GFP_KERNEL);
+ if (!dev->vram_mm)
+ return ERR_PTR(-ENOMEM);
+
+ ret = drm_vram_mm_init(dev->vram_mm, dev, vram_base, vram_size);
+ if (ret)
+ goto err_kfree;
+
+ return dev->vram_mm;
+
+err_kfree:
+ kfree(dev->vram_mm);
+ dev->vram_mm = NULL;
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(drm_vram_helper_alloc_mm);
+
+/**
+ * drm_vram_helper_release_mm - Releases a device's instance of \
+ &struct drm_vram_mm
+ * @dev: the DRM device
+ */
+void drm_vram_helper_release_mm(struct drm_device *dev)
+{
+ if (!dev->vram_mm)
+ return;
- gbo->gem.vma_node.vm_node.start = gbo->bo.vma_node.vm_node.start;
- return drm_gem_prime_mmap(gem, vma);
+ drm_vram_mm_cleanup(dev->vram_mm);
+ kfree(dev->vram_mm);
+ dev->vram_mm = NULL;
}
-EXPORT_SYMBOL(drm_gem_vram_driver_gem_prime_mmap);
+EXPORT_SYMBOL(drm_vram_helper_release_mm);
diff --git a/drivers/gpu/drm/drm_hdcp.c b/drivers/gpu/drm/drm_hdcp.c
index cd837bd409f7..9191633a3c43 100644
--- a/drivers/gpu/drm/drm_hdcp.c
+++ b/drivers/gpu/drm/drm_hdcp.c
@@ -271,6 +271,13 @@ exit:
*
* SRM should be presented in the name of "display_hdcp_srm.bin".
*
+ * Format of the SRM table, that userspace needs to write into the binary file,
+ * is defined at:
+ * 1. Renewability chapter on 55th page of HDCP 1.4 specification
+ * https://www.digital-cp.com/sites/default/files/specifications/HDCP%20Specification%20Rev1_4_Secure.pdf
+ * 2. Renewability chapter on 63rd page of HDCP 2.2 specification
+ * https://www.digital-cp.com/sites/default/files/specifications/HDCP%20on%20HDMI%20Specification%20Rev2_2_Final1.pdf
+ *
* Returns:
* TRUE on any of the KSV is revoked, else FALSE.
*/
@@ -344,23 +351,45 @@ static struct drm_prop_enum_list drm_cp_enum_list[] = {
};
DRM_ENUM_NAME_FN(drm_get_content_protection_name, drm_cp_enum_list)
+static struct drm_prop_enum_list drm_hdcp_content_type_enum_list[] = {
+ { DRM_MODE_HDCP_CONTENT_TYPE0, "HDCP Type0" },
+ { DRM_MODE_HDCP_CONTENT_TYPE1, "HDCP Type1" },
+};
+DRM_ENUM_NAME_FN(drm_get_hdcp_content_type_name,
+ drm_hdcp_content_type_enum_list)
+
/**
* drm_connector_attach_content_protection_property - attach content protection
* property
*
* @connector: connector to attach CP property on.
+ * @hdcp_content_type: is HDCP Content Type property needed for connector
*
* This is used to add support for content protection on select connectors.
* Content Protection is intentionally vague to allow for different underlying
* technologies, however it is most implemented by HDCP.
*
+ * When hdcp_content_type is true enum property called HDCP Content Type is
+ * created (if it is not already) and attached to the connector.
+ *
+ * This property is used for sending the protected content's stream type
+ * from userspace to kernel on selected connectors. Protected content provider
+ * will decide their type of their content and declare the same to kernel.
+ *
+ * Content type will be used during the HDCP 2.2 authentication.
+ * Content type will be set to &drm_connector_state.hdcp_content_type.
+ *
* The content protection will be set to &drm_connector_state.content_protection
*
+ * When kernel triggered content protection state change like DESIRED->ENABLED
+ * and ENABLED->DESIRED, will use drm_hdcp_update_content_protection() to update
+ * the content protection state of a connector.
+ *
* Returns:
* Zero on success, negative errno on failure.
*/
int drm_connector_attach_content_protection_property(
- struct drm_connector *connector)
+ struct drm_connector *connector, bool hdcp_content_type)
{
struct drm_device *dev = connector->dev;
struct drm_property *prop =
@@ -377,6 +406,52 @@ int drm_connector_attach_content_protection_property(
DRM_MODE_CONTENT_PROTECTION_UNDESIRED);
dev->mode_config.content_protection_property = prop;
+ if (!hdcp_content_type)
+ return 0;
+
+ prop = dev->mode_config.hdcp_content_type_property;
+ if (!prop)
+ prop = drm_property_create_enum(dev, 0, "HDCP Content Type",
+ drm_hdcp_content_type_enum_list,
+ ARRAY_SIZE(
+ drm_hdcp_content_type_enum_list));
+ if (!prop)
+ return -ENOMEM;
+
+ drm_object_attach_property(&connector->base, prop,
+ DRM_MODE_HDCP_CONTENT_TYPE0);
+ dev->mode_config.hdcp_content_type_property = prop;
+
return 0;
}
EXPORT_SYMBOL(drm_connector_attach_content_protection_property);
+
+/**
+ * drm_hdcp_update_content_protection - Updates the content protection state
+ * of a connector
+ *
+ * @connector: drm_connector on which content protection state needs an update
+ * @val: New state of the content protection property
+ *
+ * This function can be used by display drivers, to update the kernel triggered
+ * content protection state changes of a drm_connector such as DESIRED->ENABLED
+ * and ENABLED->DESIRED. No uevent for DESIRED->UNDESIRED or ENABLED->UNDESIRED,
+ * as userspace is triggering such state change and kernel performs it without
+ * fail.This function update the new state of the property into the connector's
+ * state and generate an uevent to notify the userspace.
+ */
+void drm_hdcp_update_content_protection(struct drm_connector *connector,
+ u64 val)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_connector_state *state = connector->state;
+
+ WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+ if (state->content_protection == val)
+ return;
+
+ state->content_protection = val;
+ drm_sysfs_connector_status_event(connector,
+ dev->mode_config.content_protection_property);
+}
+EXPORT_SYMBOL(drm_hdcp_update_content_protection);
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 51a2055c8f18..6937bf923f05 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -45,12 +45,34 @@ struct drm_file *drm_file_alloc(struct drm_minor *minor);
void drm_file_free(struct drm_file *file);
void drm_lastclose(struct drm_device *dev);
+#ifdef CONFIG_PCI
+
/* drm_pci.c */
int drm_irq_by_busid(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void drm_pci_agp_destroy(struct drm_device *dev);
int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master);
+#else
+
+static inline int drm_irq_by_busid(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ return -EINVAL;
+}
+
+static inline void drm_pci_agp_destroy(struct drm_device *dev)
+{
+}
+
+static inline int drm_pci_set_busid(struct drm_device *dev,
+ struct drm_master *master)
+{
+ return -EINVAL;
+}
+
+#endif
+
/* drm_prime.c */
int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index a16b6dc2fa47..22c7fd7196c8 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -108,7 +108,7 @@ static int compat_drm_version(struct file *file, unsigned int cmd,
.desc = compat_ptr(v32.desc),
};
err = drm_ioctl_kernel(file, drm_version, &v,
- DRM_UNLOCKED|DRM_RENDER_ALLOW);
+ DRM_RENDER_ALLOW);
if (err)
return err;
@@ -142,7 +142,7 @@ static int compat_drm_getunique(struct file *file, unsigned int cmd,
.unique = compat_ptr(uq32.unique),
};
- err = drm_ioctl_kernel(file, drm_getunique, &uq, DRM_UNLOCKED);
+ err = drm_ioctl_kernel(file, drm_getunique, &uq, 0);
if (err)
return err;
@@ -181,7 +181,7 @@ static int compat_drm_getmap(struct file *file, unsigned int cmd,
return -EFAULT;
map.offset = m32.offset;
- err = drm_ioctl_kernel(file, drm_legacy_getmap_ioctl, &map, DRM_UNLOCKED);
+ err = drm_ioctl_kernel(file, drm_legacy_getmap_ioctl, &map, 0);
if (err)
return err;
@@ -267,7 +267,7 @@ static int compat_drm_getclient(struct file *file, unsigned int cmd,
client.idx = c32.idx;
- err = drm_ioctl_kernel(file, drm_getclient, &client, DRM_UNLOCKED);
+ err = drm_ioctl_kernel(file, drm_getclient, &client, 0);
if (err)
return err;
@@ -297,7 +297,7 @@ static int compat_drm_getstats(struct file *file, unsigned int cmd,
drm_stats32_t __user *argp = (void __user *)arg;
int err;
- err = drm_ioctl_kernel(file, drm_noop, NULL, DRM_UNLOCKED);
+ err = drm_ioctl_kernel(file, drm_noop, NULL, 0);
if (err)
return err;
@@ -895,8 +895,7 @@ static int compat_drm_mode_addfb2(struct file *file, unsigned int cmd,
sizeof(req64.modifier)))
return -EFAULT;
- err = drm_ioctl_kernel(file, drm_mode_addfb2, &req64,
- DRM_UNLOCKED);
+ err = drm_ioctl_kernel(file, drm_mode_addfb2, &req64, 0);
if (err)
return err;
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index bd810454d239..5afb39688b55 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -336,7 +336,12 @@ drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
case DRM_CLIENT_CAP_ATOMIC:
if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
return -EOPNOTSUPP;
- if (req->value > 1)
+ /* The modesetting DDX has a totally broken idea of atomic. */
+ if (current->comm[0] == 'X' && req->value == 1) {
+ pr_info("broken atomic modeset userspace detected, disabling atomic\n");
+ return -EOPNOTSUPP;
+ }
+ if (req->value > 2)
return -EINVAL;
file_priv->atomic = req->value;
file_priv->universal_planes = req->value;
@@ -570,24 +575,23 @@ EXPORT_SYMBOL(drm_ioctl_permit);
/* Ioctl table */
static const struct drm_ioctl_desc drm_ioctls[] = {
- DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version,
- DRM_UNLOCKED|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_legacy_getmap_ioctl, DRM_UNLOCKED),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_legacy_getmap_ioctl, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_UNLOCKED | DRM_MASTER),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_UNLOCKED|DRM_MASTER),
+ DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_MASTER),
DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_legacy_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_legacy_rmmap_ioctl, DRM_AUTH),
@@ -595,8 +599,8 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_legacy_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_legacy_getsareactx, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_UNLOCKED|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_UNLOCKED|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY),
DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_legacy_addctx, DRM_AUTH|DRM_ROOT_ONLY),
DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_legacy_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -642,74 +646,74 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED),
-
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_UNLOCKED),
-
- DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
-
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_noop, DRM_MASTER|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_noop, DRM_MASTER|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_connector_property_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR2, drm_mode_cursor2_ioctl, DRM_MASTER|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATOMIC, drm_mode_atomic_ioctl, DRM_MASTER|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATEPROPBLOB, drm_mode_createblob_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROYPROPBLOB, drm_mode_destroyblob_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, 0),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_RENDER_ALLOW),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_noop, DRM_MASTER),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_noop, DRM_MASTER),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_connector_property_set_ioctl, DRM_MASTER),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR2, drm_mode_cursor2_ioctl, DRM_MASTER),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATOMIC, drm_mode_atomic_ioctl, DRM_MASTER),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATEPROPBLOB, drm_mode_createblob_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROYPROPBLOB, drm_mode_destroyblob_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_CREATE, drm_syncobj_create_ioctl,
- DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_DESTROY, drm_syncobj_destroy_ioctl,
- DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, drm_syncobj_handle_to_fd_ioctl,
- DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, drm_syncobj_fd_to_handle_ioctl,
- DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_TRANSFER, drm_syncobj_transfer_ioctl,
- DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_WAIT, drm_syncobj_wait_ioctl,
- DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT, drm_syncobj_timeline_wait_ioctl,
- DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_RESET, drm_syncobj_reset_ioctl,
- DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_SIGNAL, drm_syncobj_signal_ioctl,
- DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL, drm_syncobj_timeline_signal_ioctl,
- DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_QUERY, drm_syncobj_query_ioctl,
- DRM_UNLOCKED|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_CRTC_GET_SEQUENCE, drm_crtc_get_sequence_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_CRTC_QUEUE_SEQUENCE, drm_crtc_queue_sequence_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_LEASE, drm_mode_create_lease_ioctl, DRM_MASTER|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_LIST_LESSEES, drm_mode_list_lessees_ioctl, DRM_MASTER|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GET_LEASE, drm_mode_get_lease_ioctl, DRM_MASTER|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_REVOKE_LEASE, drm_mode_revoke_lease_ioctl, DRM_MASTER|DRM_UNLOCKED),
+ DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_CRTC_GET_SEQUENCE, drm_crtc_get_sequence_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_CRTC_QUEUE_SEQUENCE, drm_crtc_queue_sequence_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_LEASE, drm_mode_create_lease_ioctl, DRM_MASTER),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_LIST_LESSEES, drm_mode_list_lessees_ioctl, DRM_MASTER),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GET_LEASE, drm_mode_get_lease_ioctl, DRM_MASTER),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_REVOKE_LEASE, drm_mode_revoke_lease_ioctl, DRM_MASTER),
};
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
@@ -777,7 +781,7 @@ long drm_ioctl_kernel(struct file *file, drm_ioctl_t *func, void *kdata,
return retcode;
/* Enforce sane locking for modern driver ioctls. */
- if (!drm_core_check_feature(dev, DRIVER_LEGACY) ||
+ if (likely(!drm_core_check_feature(dev, DRIVER_LEGACY)) ||
(flags & DRM_UNLOCKED))
retcode = func(dev, kdata, file_priv);
else {
diff --git a/drivers/gpu/drm/drm_kms_helper_common.c b/drivers/gpu/drm/drm_kms_helper_common.c
index d9a5ac81949e..221a8528c993 100644
--- a/drivers/gpu/drm/drm_kms_helper_common.c
+++ b/drivers/gpu/drm/drm_kms_helper_common.c
@@ -40,7 +40,7 @@ MODULE_LICENSE("GPL and additional rights");
/* Backward compatibility for drm_kms_helper.edid_firmware */
static int edid_firmware_set(const char *val, const struct kernel_param *kp)
{
- DRM_NOTE("drm_kms_firmware.edid_firmware is deprecated, please use drm.edid_firmware instead.\n");
+ DRM_NOTE("drm_kms_helper.edid_firmware is deprecated, please use drm.edid_firmware instead.\n");
return __drm_set_edid_firmware_path(val);
}
diff --git a/drivers/gpu/drm/drm_legacy_misc.c b/drivers/gpu/drm/drm_legacy_misc.c
index 4d3a11cfd979..8f54e6a78b6f 100644
--- a/drivers/gpu/drm/drm_legacy_misc.c
+++ b/drivers/gpu/drm/drm_legacy_misc.c
@@ -1,4 +1,4 @@
-/**
+/*
* \file drm_legacy_misc.c
* Misc legacy support functions.
*
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index 68b18b0e290c..2c79e8199e3c 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -1,4 +1,4 @@
-/**
+/*
* \file drm_lock.c
* IOCTLs for locking
*
@@ -360,7 +360,8 @@ void drm_legacy_lock_master_cleanup(struct drm_device *dev, struct drm_master *m
/*
* Since the master is disappearing, so is the
* possibility to lock.
- */ mutex_lock(&dev->struct_mutex);
+ */
+ mutex_lock(&dev->struct_mutex);
if (master->lock.hw_lock) {
if (dev->sigdata.lock == master->lock.hw_lock)
dev->sigdata.lock = NULL;
diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
index b634e1670190..fbea69d6f909 100644
--- a/drivers/gpu/drm/drm_memory.c
+++ b/drivers/gpu/drm/drm_memory.c
@@ -1,4 +1,4 @@
-/**
+/*
* \file drm_memory.c
* Memory management wrappers for DRM
*
@@ -40,6 +40,7 @@
#include <xen/xen.h>
#include <drm/drm_agpsupport.h>
+#include <drm/drm_cache.h>
#include <drm/drm_device.h>
#include "drm_legacy.h"
diff --git a/drivers/gpu/drm/tinydrm/mipi-dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c
index ca9da654fc6f..16bff1be4b8a 100644
--- a/drivers/gpu/drm/tinydrm/mipi-dbi.c
+++ b/drivers/gpu/drm/drm_mipi_dbi.c
@@ -13,17 +13,18 @@
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
+#include <drm/drm_connector.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_format_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drm_vblank.h>
+#include <drm/drm_mipi_dbi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_probe_helper.h>
#include <drm/drm_rect.h>
-#include <drm/tinydrm/mipi-dbi.h>
-#include <drm/tinydrm/tinydrm-helpers.h>
+#include <drm/drm_vblank.h>
#include <video/mipi_display.h>
#define MIPI_DBI_MAX_SPI_READ_SPEED 2000000 /* 2MHz */
@@ -98,17 +99,17 @@ static const u8 mipi_dbi_dcs_read_commands[] = {
0, /* sentinel */
};
-static bool mipi_dbi_command_is_read(struct mipi_dbi *mipi, u8 cmd)
+static bool mipi_dbi_command_is_read(struct mipi_dbi *dbi, u8 cmd)
{
unsigned int i;
- if (!mipi->read_commands)
+ if (!dbi->read_commands)
return false;
for (i = 0; i < 0xff; i++) {
- if (!mipi->read_commands[i])
+ if (!dbi->read_commands[i])
return false;
- if (cmd == mipi->read_commands[i])
+ if (cmd == dbi->read_commands[i])
return true;
}
@@ -117,7 +118,7 @@ static bool mipi_dbi_command_is_read(struct mipi_dbi *mipi, u8 cmd)
/**
* mipi_dbi_command_read - MIPI DCS read command
- * @mipi: MIPI structure
+ * @dbi: MIPI DBI structure
* @cmd: Command
* @val: Value read
*
@@ -126,21 +127,21 @@ static bool mipi_dbi_command_is_read(struct mipi_dbi *mipi, u8 cmd)
* Returns:
* Zero on success, negative error code on failure.
*/
-int mipi_dbi_command_read(struct mipi_dbi *mipi, u8 cmd, u8 *val)
+int mipi_dbi_command_read(struct mipi_dbi *dbi, u8 cmd, u8 *val)
{
- if (!mipi->read_commands)
+ if (!dbi->read_commands)
return -EACCES;
- if (!mipi_dbi_command_is_read(mipi, cmd))
+ if (!mipi_dbi_command_is_read(dbi, cmd))
return -EINVAL;
- return mipi_dbi_command_buf(mipi, cmd, val, 1);
+ return mipi_dbi_command_buf(dbi, cmd, val, 1);
}
EXPORT_SYMBOL(mipi_dbi_command_read);
/**
* mipi_dbi_command_buf - MIPI DCS command with parameter(s) in an array
- * @mipi: MIPI structure
+ * @dbi: MIPI DBI structure
* @cmd: Command
* @data: Parameter buffer
* @len: Buffer length
@@ -148,7 +149,7 @@ EXPORT_SYMBOL(mipi_dbi_command_read);
* Returns:
* Zero on success, negative error code on failure.
*/
-int mipi_dbi_command_buf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len)
+int mipi_dbi_command_buf(struct mipi_dbi *dbi, u8 cmd, u8 *data, size_t len)
{
u8 *cmdbuf;
int ret;
@@ -158,9 +159,9 @@ int mipi_dbi_command_buf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len)
if (!cmdbuf)
return -ENOMEM;
- mutex_lock(&mipi->cmdlock);
- ret = mipi->command(mipi, cmdbuf, data, len);
- mutex_unlock(&mipi->cmdlock);
+ mutex_lock(&dbi->cmdlock);
+ ret = dbi->command(dbi, cmdbuf, data, len);
+ mutex_unlock(&dbi->cmdlock);
kfree(cmdbuf);
@@ -169,7 +170,7 @@ int mipi_dbi_command_buf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len)
EXPORT_SYMBOL(mipi_dbi_command_buf);
/* This should only be used by mipi_dbi_command() */
-int mipi_dbi_command_stackbuf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len)
+int mipi_dbi_command_stackbuf(struct mipi_dbi *dbi, u8 cmd, u8 *data, size_t len)
{
u8 *buf;
int ret;
@@ -178,7 +179,7 @@ int mipi_dbi_command_stackbuf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t le
if (!buf)
return -ENOMEM;
- ret = mipi_dbi_command_buf(mipi, cmd, buf, len);
+ ret = mipi_dbi_command_buf(dbi, cmd, buf, len);
kfree(buf);
@@ -199,8 +200,9 @@ EXPORT_SYMBOL(mipi_dbi_command_stackbuf);
int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
struct drm_rect *clip, bool swap)
{
- struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
- struct dma_buf_attachment *import_attach = cma_obj->base.import_attach;
+ struct drm_gem_object *gem = drm_gem_fb_get_obj(fb, 0);
+ struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(gem);
+ struct dma_buf_attachment *import_attach = gem->import_attach;
struct drm_format_name_buf format_name;
void *src = cma_obj->vaddr;
int ret = 0;
@@ -238,16 +240,18 @@ EXPORT_SYMBOL(mipi_dbi_buf_copy);
static void mipi_dbi_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
{
- struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
- struct mipi_dbi *mipi = drm_to_mipi_dbi(fb->dev);
+ struct drm_gem_object *gem = drm_gem_fb_get_obj(fb, 0);
+ struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(gem);
+ struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(fb->dev);
unsigned int height = rect->y2 - rect->y1;
unsigned int width = rect->x2 - rect->x1;
- bool swap = mipi->swap_bytes;
+ struct mipi_dbi *dbi = &dbidev->dbi;
+ bool swap = dbi->swap_bytes;
int idx, ret = 0;
bool full;
void *tr;
- if (!mipi->enabled)
+ if (!dbidev->enabled)
return;
if (!drm_dev_enter(fb->dev, &idx))
@@ -257,24 +261,24 @@ static void mipi_dbi_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
DRM_DEBUG_KMS("Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect));
- if (!mipi->dc || !full || swap ||
+ if (!dbi->dc || !full || swap ||
fb->format->format == DRM_FORMAT_XRGB8888) {
- tr = mipi->tx_buf;
- ret = mipi_dbi_buf_copy(mipi->tx_buf, fb, rect, swap);
+ tr = dbidev->tx_buf;
+ ret = mipi_dbi_buf_copy(dbidev->tx_buf, fb, rect, swap);
if (ret)
goto err_msg;
} else {
tr = cma_obj->vaddr;
}
- mipi_dbi_command(mipi, MIPI_DCS_SET_COLUMN_ADDRESS,
+ mipi_dbi_command(dbi, MIPI_DCS_SET_COLUMN_ADDRESS,
(rect->x1 >> 8) & 0xff, rect->x1 & 0xff,
((rect->x2 - 1) >> 8) & 0xff, (rect->x2 - 1) & 0xff);
- mipi_dbi_command(mipi, MIPI_DCS_SET_PAGE_ADDRESS,
+ mipi_dbi_command(dbi, MIPI_DCS_SET_PAGE_ADDRESS,
(rect->y1 >> 8) & 0xff, rect->y1 & 0xff,
((rect->y2 - 1) >> 8) & 0xff, (rect->y2 - 1) & 0xff);
- ret = mipi_dbi_command_buf(mipi, MIPI_DCS_WRITE_MEMORY_START, tr,
+ ret = mipi_dbi_command_buf(dbi, MIPI_DCS_WRITE_MEMORY_START, tr,
width * height * 2);
err_msg:
if (ret)
@@ -312,7 +316,7 @@ EXPORT_SYMBOL(mipi_dbi_pipe_update);
/**
* mipi_dbi_enable_flush - MIPI DBI enable helper
- * @mipi: MIPI DBI structure
+ * @dbidev: MIPI DBI device structure
* @crtc_state: CRTC state
* @plane_state: Plane state
*
@@ -324,7 +328,7 @@ EXPORT_SYMBOL(mipi_dbi_pipe_update);
* framebuffer flushing, can't use this function since they both use the same
* flushing code.
*/
-void mipi_dbi_enable_flush(struct mipi_dbi *mipi,
+void mipi_dbi_enable_flush(struct mipi_dbi_dev *dbidev,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
@@ -337,36 +341,37 @@ void mipi_dbi_enable_flush(struct mipi_dbi *mipi,
};
int idx;
- if (!drm_dev_enter(&mipi->drm, &idx))
+ if (!drm_dev_enter(&dbidev->drm, &idx))
return;
- mipi->enabled = true;
+ dbidev->enabled = true;
mipi_dbi_fb_dirty(fb, &rect);
- backlight_enable(mipi->backlight);
+ backlight_enable(dbidev->backlight);
drm_dev_exit(idx);
}
EXPORT_SYMBOL(mipi_dbi_enable_flush);
-static void mipi_dbi_blank(struct mipi_dbi *mipi)
+static void mipi_dbi_blank(struct mipi_dbi_dev *dbidev)
{
- struct drm_device *drm = &mipi->drm;
+ struct drm_device *drm = &dbidev->drm;
u16 height = drm->mode_config.min_height;
u16 width = drm->mode_config.min_width;
+ struct mipi_dbi *dbi = &dbidev->dbi;
size_t len = width * height * 2;
int idx;
if (!drm_dev_enter(drm, &idx))
return;
- memset(mipi->tx_buf, 0, len);
+ memset(dbidev->tx_buf, 0, len);
- mipi_dbi_command(mipi, MIPI_DCS_SET_COLUMN_ADDRESS, 0, 0,
- (width >> 8) & 0xFF, (width - 1) & 0xFF);
- mipi_dbi_command(mipi, MIPI_DCS_SET_PAGE_ADDRESS, 0, 0,
- (height >> 8) & 0xFF, (height - 1) & 0xFF);
- mipi_dbi_command_buf(mipi, MIPI_DCS_WRITE_MEMORY_START,
- (u8 *)mipi->tx_buf, len);
+ mipi_dbi_command(dbi, MIPI_DCS_SET_COLUMN_ADDRESS, 0, 0,
+ ((width - 1) >> 8) & 0xFF, (width - 1) & 0xFF);
+ mipi_dbi_command(dbi, MIPI_DCS_SET_PAGE_ADDRESS, 0, 0,
+ ((height - 1) >> 8) & 0xFF, (height - 1) & 0xFF);
+ mipi_dbi_command_buf(dbi, MIPI_DCS_WRITE_MEMORY_START,
+ (u8 *)dbidev->tx_buf, len);
drm_dev_exit(idx);
}
@@ -381,25 +386,79 @@ static void mipi_dbi_blank(struct mipi_dbi *mipi)
*/
void mipi_dbi_pipe_disable(struct drm_simple_display_pipe *pipe)
{
- struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev);
+ struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(pipe->crtc.dev);
- if (!mipi->enabled)
+ if (!dbidev->enabled)
return;
DRM_DEBUG_KMS("\n");
- mipi->enabled = false;
+ dbidev->enabled = false;
- if (mipi->backlight)
- backlight_disable(mipi->backlight);
+ if (dbidev->backlight)
+ backlight_disable(dbidev->backlight);
else
- mipi_dbi_blank(mipi);
+ mipi_dbi_blank(dbidev);
- if (mipi->regulator)
- regulator_disable(mipi->regulator);
+ if (dbidev->regulator)
+ regulator_disable(dbidev->regulator);
}
EXPORT_SYMBOL(mipi_dbi_pipe_disable);
+static int mipi_dbi_connector_get_modes(struct drm_connector *connector)
+{
+ struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(connector->dev);
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &dbidev->mode);
+ if (!mode) {
+ DRM_ERROR("Failed to duplicate mode\n");
+ return 0;
+ }
+
+ if (mode->name[0] == '\0')
+ drm_mode_set_name(mode);
+
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+ drm_mode_probed_add(connector, mode);
+
+ if (mode->width_mm) {
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+ }
+
+ return 1;
+}
+
+static const struct drm_connector_helper_funcs mipi_dbi_connector_hfuncs = {
+ .get_modes = mipi_dbi_connector_get_modes,
+};
+
+static const struct drm_connector_funcs mipi_dbi_connector_funcs = {
+ .reset = drm_atomic_helper_connector_reset,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int mipi_dbi_rotate_mode(struct drm_display_mode *mode,
+ unsigned int rotation)
+{
+ if (rotation == 0 || rotation == 180) {
+ return 0;
+ } else if (rotation == 90 || rotation == 270) {
+ swap(mode->hdisplay, mode->vdisplay);
+ swap(mode->hsync_start, mode->vsync_start);
+ swap(mode->hsync_end, mode->vsync_end);
+ swap(mode->htotal, mode->vtotal);
+ swap(mode->width_mm, mode->height_mm);
+ return 0;
+ } else {
+ return -EINVAL;
+ }
+}
+
static const struct drm_mode_config_funcs mipi_dbi_mode_config_funcs = {
.fb_create = drm_gem_fb_create_with_dirty,
.atomic_check = drm_atomic_helper_check,
@@ -412,60 +471,111 @@ static const uint32_t mipi_dbi_formats[] = {
};
/**
- * mipi_dbi_init - MIPI DBI initialization
- * @mipi: &mipi_dbi structure to initialize
+ * mipi_dbi_dev_init_with_formats - MIPI DBI device initialization with custom formats
+ * @dbidev: MIPI DBI device structure to initialize
* @funcs: Display pipe functions
+ * @formats: Array of supported formats (DRM_FORMAT\_\*).
+ * @format_count: Number of elements in @formats
* @mode: Display mode
* @rotation: Initial rotation in degrees Counter Clock Wise
+ * @tx_buf_size: Allocate a transmit buffer of this size.
*
* This function sets up a &drm_simple_display_pipe with a &drm_connector that
* has one fixed &drm_display_mode which is rotated according to @rotation.
* This mode is used to set the mode config min/max width/height properties.
- * Additionally &mipi_dbi.tx_buf is allocated.
*
- * Supported formats: Native RGB565 and emulated XRGB8888.
+ * Use mipi_dbi_dev_init() if you don't need custom formats.
+ *
+ * Note:
+ * Some of the helper functions expects RGB565 to be the default format and the
+ * transmit buffer sized to fit that.
*
* Returns:
* Zero on success, negative error code on failure.
*/
-int mipi_dbi_init(struct mipi_dbi *mipi,
- const struct drm_simple_display_pipe_funcs *funcs,
- const struct drm_display_mode *mode, unsigned int rotation)
+int mipi_dbi_dev_init_with_formats(struct mipi_dbi_dev *dbidev,
+ const struct drm_simple_display_pipe_funcs *funcs,
+ const uint32_t *formats, unsigned int format_count,
+ const struct drm_display_mode *mode,
+ unsigned int rotation, size_t tx_buf_size)
{
- size_t bufsize = mode->vdisplay * mode->hdisplay * sizeof(u16);
- struct drm_device *drm = &mipi->drm;
+ static const uint64_t modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+ };
+ struct drm_device *drm = &dbidev->drm;
int ret;
- if (!mipi->command)
+ if (!dbidev->dbi.command)
return -EINVAL;
- mutex_init(&mipi->cmdlock);
-
- mipi->tx_buf = devm_kmalloc(drm->dev, bufsize, GFP_KERNEL);
- if (!mipi->tx_buf)
+ dbidev->tx_buf = devm_kmalloc(drm->dev, tx_buf_size, GFP_KERNEL);
+ if (!dbidev->tx_buf)
return -ENOMEM;
- /* TODO: Maybe add DRM_MODE_CONNECTOR_SPI */
- ret = tinydrm_display_pipe_init(drm, &mipi->pipe, funcs,
- DRM_MODE_CONNECTOR_VIRTUAL,
- mipi_dbi_formats,
- ARRAY_SIZE(mipi_dbi_formats), mode,
- rotation);
+ drm_mode_copy(&dbidev->mode, mode);
+ ret = mipi_dbi_rotate_mode(&dbidev->mode, rotation);
+ if (ret) {
+ DRM_ERROR("Illegal rotation value %u\n", rotation);
+ return -EINVAL;
+ }
+
+ drm_connector_helper_add(&dbidev->connector, &mipi_dbi_connector_hfuncs);
+ ret = drm_connector_init(drm, &dbidev->connector, &mipi_dbi_connector_funcs,
+ DRM_MODE_CONNECTOR_SPI);
if (ret)
return ret;
- drm_plane_enable_fb_damage_clips(&mipi->pipe.plane);
+ ret = drm_simple_display_pipe_init(drm, &dbidev->pipe, funcs, formats, format_count,
+ modifiers, &dbidev->connector);
+ if (ret)
+ return ret;
+
+ drm_plane_enable_fb_damage_clips(&dbidev->pipe.plane);
drm->mode_config.funcs = &mipi_dbi_mode_config_funcs;
- drm->mode_config.preferred_depth = 16;
- mipi->rotation = rotation;
+ drm->mode_config.min_width = dbidev->mode.hdisplay;
+ drm->mode_config.max_width = dbidev->mode.hdisplay;
+ drm->mode_config.min_height = dbidev->mode.vdisplay;
+ drm->mode_config.max_height = dbidev->mode.vdisplay;
+ dbidev->rotation = rotation;
- DRM_DEBUG_KMS("preferred_depth=%u, rotation = %u\n",
- drm->mode_config.preferred_depth, rotation);
+ DRM_DEBUG_KMS("rotation = %u\n", rotation);
return 0;
}
-EXPORT_SYMBOL(mipi_dbi_init);
+EXPORT_SYMBOL(mipi_dbi_dev_init_with_formats);
+
+/**
+ * mipi_dbi_dev_init - MIPI DBI device initialization
+ * @dbidev: MIPI DBI device structure to initialize
+ * @funcs: Display pipe functions
+ * @mode: Display mode
+ * @rotation: Initial rotation in degrees Counter Clock Wise
+ *
+ * This function sets up a &drm_simple_display_pipe with a &drm_connector that
+ * has one fixed &drm_display_mode which is rotated according to @rotation.
+ * This mode is used to set the mode config min/max width/height properties.
+ * Additionally &mipi_dbi.tx_buf is allocated.
+ *
+ * Supported formats: Native RGB565 and emulated XRGB8888.
+ *
+ * Returns:
+ * Zero on success, negative error code on failure.
+ */
+int mipi_dbi_dev_init(struct mipi_dbi_dev *dbidev,
+ const struct drm_simple_display_pipe_funcs *funcs,
+ const struct drm_display_mode *mode, unsigned int rotation)
+{
+ size_t bufsize = mode->vdisplay * mode->hdisplay * sizeof(u16);
+
+ dbidev->drm.mode_config.preferred_depth = 16;
+
+ return mipi_dbi_dev_init_with_formats(dbidev, funcs, mipi_dbi_formats,
+ ARRAY_SIZE(mipi_dbi_formats), mode,
+ rotation, bufsize);
+}
+EXPORT_SYMBOL(mipi_dbi_dev_init);
/**
* mipi_dbi_release - DRM driver release helper
@@ -477,37 +587,37 @@ EXPORT_SYMBOL(mipi_dbi_init);
*/
void mipi_dbi_release(struct drm_device *drm)
{
- struct mipi_dbi *dbi = drm_to_mipi_dbi(drm);
+ struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(drm);
DRM_DEBUG_DRIVER("\n");
drm_mode_config_cleanup(drm);
drm_dev_fini(drm);
- kfree(dbi);
+ kfree(dbidev);
}
EXPORT_SYMBOL(mipi_dbi_release);
/**
* mipi_dbi_hw_reset - Hardware reset of controller
- * @mipi: MIPI DBI structure
+ * @dbi: MIPI DBI structure
*
* Reset controller if the &mipi_dbi->reset gpio is set.
*/
-void mipi_dbi_hw_reset(struct mipi_dbi *mipi)
+void mipi_dbi_hw_reset(struct mipi_dbi *dbi)
{
- if (!mipi->reset)
+ if (!dbi->reset)
return;
- gpiod_set_value_cansleep(mipi->reset, 0);
+ gpiod_set_value_cansleep(dbi->reset, 0);
usleep_range(20, 1000);
- gpiod_set_value_cansleep(mipi->reset, 1);
+ gpiod_set_value_cansleep(dbi->reset, 1);
msleep(120);
}
EXPORT_SYMBOL(mipi_dbi_hw_reset);
/**
* mipi_dbi_display_is_on - Check if display is on
- * @mipi: MIPI DBI structure
+ * @dbi: MIPI DBI structure
*
* This function checks the Power Mode register (if readable) to see if
* display output is turned on. This can be used to see if the bootloader
@@ -517,11 +627,11 @@ EXPORT_SYMBOL(mipi_dbi_hw_reset);
* Returns:
* true if the display can be verified to be on, false otherwise.
*/
-bool mipi_dbi_display_is_on(struct mipi_dbi *mipi)
+bool mipi_dbi_display_is_on(struct mipi_dbi *dbi)
{
u8 val;
- if (mipi_dbi_command_read(mipi, MIPI_DCS_GET_POWER_MODE, &val))
+ if (mipi_dbi_command_read(dbi, MIPI_DCS_GET_POWER_MODE, &val))
return false;
val &= ~DCS_POWER_MODE_RESERVED_MASK;
@@ -537,28 +647,29 @@ bool mipi_dbi_display_is_on(struct mipi_dbi *mipi)
}
EXPORT_SYMBOL(mipi_dbi_display_is_on);
-static int mipi_dbi_poweron_reset_conditional(struct mipi_dbi *mipi, bool cond)
+static int mipi_dbi_poweron_reset_conditional(struct mipi_dbi_dev *dbidev, bool cond)
{
- struct device *dev = mipi->drm.dev;
+ struct device *dev = dbidev->drm.dev;
+ struct mipi_dbi *dbi = &dbidev->dbi;
int ret;
- if (mipi->regulator) {
- ret = regulator_enable(mipi->regulator);
+ if (dbidev->regulator) {
+ ret = regulator_enable(dbidev->regulator);
if (ret) {
DRM_DEV_ERROR(dev, "Failed to enable regulator (%d)\n", ret);
return ret;
}
}
- if (cond && mipi_dbi_display_is_on(mipi))
+ if (cond && mipi_dbi_display_is_on(dbi))
return 1;
- mipi_dbi_hw_reset(mipi);
- ret = mipi_dbi_command(mipi, MIPI_DCS_SOFT_RESET);
+ mipi_dbi_hw_reset(dbi);
+ ret = mipi_dbi_command(dbi, MIPI_DCS_SOFT_RESET);
if (ret) {
DRM_DEV_ERROR(dev, "Failed to send reset command (%d)\n", ret);
- if (mipi->regulator)
- regulator_disable(mipi->regulator);
+ if (dbidev->regulator)
+ regulator_disable(dbidev->regulator);
return ret;
}
@@ -567,7 +678,7 @@ static int mipi_dbi_poweron_reset_conditional(struct mipi_dbi *mipi, bool cond)
* per MIPI DSC spec should wait 5ms after soft reset. If we didn't,
* we assume worst case and wait 120ms.
*/
- if (mipi->reset)
+ if (dbi->reset)
usleep_range(5000, 20000);
else
msleep(120);
@@ -577,7 +688,7 @@ static int mipi_dbi_poweron_reset_conditional(struct mipi_dbi *mipi, bool cond)
/**
* mipi_dbi_poweron_reset - MIPI DBI poweron and reset
- * @mipi: MIPI DBI structure
+ * @dbidev: MIPI DBI device structure
*
* This function enables the regulator if used and does a hardware and software
* reset.
@@ -585,15 +696,15 @@ static int mipi_dbi_poweron_reset_conditional(struct mipi_dbi *mipi, bool cond)
* Returns:
* Zero on success, or a negative error code.
*/
-int mipi_dbi_poweron_reset(struct mipi_dbi *mipi)
+int mipi_dbi_poweron_reset(struct mipi_dbi_dev *dbidev)
{
- return mipi_dbi_poweron_reset_conditional(mipi, false);
+ return mipi_dbi_poweron_reset_conditional(dbidev, false);
}
EXPORT_SYMBOL(mipi_dbi_poweron_reset);
/**
* mipi_dbi_poweron_conditional_reset - MIPI DBI poweron and conditional reset
- * @mipi: MIPI DBI structure
+ * @dbidev: MIPI DBI device structure
*
* This function enables the regulator if used and if the display is off, it
* does a hardware and software reset. If mipi_dbi_display_is_on() determines
@@ -603,9 +714,9 @@ EXPORT_SYMBOL(mipi_dbi_poweron_reset);
* Zero if the controller was reset, 1 if the display was already on, or a
* negative error code.
*/
-int mipi_dbi_poweron_conditional_reset(struct mipi_dbi *mipi)
+int mipi_dbi_poweron_conditional_reset(struct mipi_dbi_dev *dbidev)
{
- return mipi_dbi_poweron_reset_conditional(mipi, true);
+ return mipi_dbi_poweron_reset_conditional(dbidev, true);
}
EXPORT_SYMBOL(mipi_dbi_poweron_conditional_reset);
@@ -629,6 +740,15 @@ u32 mipi_dbi_spi_cmd_max_speed(struct spi_device *spi, size_t len)
}
EXPORT_SYMBOL(mipi_dbi_spi_cmd_max_speed);
+static bool mipi_dbi_machine_little_endian(void)
+{
+#if defined(__LITTLE_ENDIAN)
+ return true;
+#else
+ return false;
+#endif
+}
+
/*
* MIPI DBI Type C Option 1
*
@@ -647,15 +767,15 @@ EXPORT_SYMBOL(mipi_dbi_spi_cmd_max_speed);
* 76543210
*/
-static int mipi_dbi_spi1e_transfer(struct mipi_dbi *mipi, int dc,
+static int mipi_dbi_spi1e_transfer(struct mipi_dbi *dbi, int dc,
const void *buf, size_t len,
unsigned int bpw)
{
- bool swap_bytes = (bpw == 16 && tinydrm_machine_little_endian());
- size_t chunk, max_chunk = mipi->tx_buf9_len;
- struct spi_device *spi = mipi->spi;
+ bool swap_bytes = (bpw == 16 && mipi_dbi_machine_little_endian());
+ size_t chunk, max_chunk = dbi->tx_buf9_len;
+ struct spi_device *spi = dbi->spi;
struct spi_transfer tr = {
- .tx_buf = mipi->tx_buf9,
+ .tx_buf = dbi->tx_buf9,
.bits_per_word = 8,
};
struct spi_message m;
@@ -663,7 +783,7 @@ static int mipi_dbi_spi1e_transfer(struct mipi_dbi *mipi, int dc,
int i, ret;
u8 *dst;
- if (drm_debug & DRM_UT_DRIVER)
+ if (drm_debug_enabled(DRM_UT_DRIVER))
pr_debug("[drm:%s] dc=%d, max_chunk=%zu, transfers:\n",
__func__, dc, max_chunk);
@@ -675,13 +795,11 @@ static int mipi_dbi_spi1e_transfer(struct mipi_dbi *mipi, int dc,
return -EINVAL;
/* Command: pad no-op's (zeroes) at beginning of block */
- dst = mipi->tx_buf9;
+ dst = dbi->tx_buf9;
memset(dst, 0, 9);
dst[8] = *src;
tr.len = 9;
- tinydrm_dbg_spi_message(spi, &m);
-
return spi_sync(spi, &m);
}
@@ -697,7 +815,7 @@ static int mipi_dbi_spi1e_transfer(struct mipi_dbi *mipi, int dc,
chunk = min(len, max_chunk);
len -= chunk;
- dst = mipi->tx_buf9;
+ dst = dbi->tx_buf9;
if (chunk < 8) {
u8 val, carry = 0;
@@ -759,7 +877,6 @@ static int mipi_dbi_spi1e_transfer(struct mipi_dbi *mipi, int dc,
tr.len = chunk + added;
- tinydrm_dbg_spi_message(spi, &m);
ret = spi_sync(spi, &m);
if (ret)
return ret;
@@ -768,11 +885,11 @@ static int mipi_dbi_spi1e_transfer(struct mipi_dbi *mipi, int dc,
return 0;
}
-static int mipi_dbi_spi1_transfer(struct mipi_dbi *mipi, int dc,
+static int mipi_dbi_spi1_transfer(struct mipi_dbi *dbi, int dc,
const void *buf, size_t len,
unsigned int bpw)
{
- struct spi_device *spi = mipi->spi;
+ struct spi_device *spi = dbi->spi;
struct spi_transfer tr = {
.bits_per_word = 9,
};
@@ -783,14 +900,14 @@ static int mipi_dbi_spi1_transfer(struct mipi_dbi *mipi, int dc,
u16 *dst16;
int ret;
- if (!tinydrm_spi_bpw_supported(spi, 9))
- return mipi_dbi_spi1e_transfer(mipi, dc, buf, len, bpw);
+ if (!spi_is_bpw_supported(spi, 9))
+ return mipi_dbi_spi1e_transfer(dbi, dc, buf, len, bpw);
tr.speed_hz = mipi_dbi_spi_cmd_max_speed(spi, len);
- max_chunk = mipi->tx_buf9_len;
- dst16 = mipi->tx_buf9;
+ max_chunk = dbi->tx_buf9_len;
+ dst16 = dbi->tx_buf9;
- if (drm_debug & DRM_UT_DRIVER)
+ if (drm_debug_enabled(DRM_UT_DRIVER))
pr_debug("[drm:%s] dc=%d, max_chunk=%zu, transfers:\n",
__func__, dc, max_chunk);
@@ -803,7 +920,7 @@ static int mipi_dbi_spi1_transfer(struct mipi_dbi *mipi, int dc,
size_t chunk = min(len, max_chunk);
unsigned int i;
- if (bpw == 16 && tinydrm_machine_little_endian()) {
+ if (bpw == 16 && mipi_dbi_machine_little_endian()) {
for (i = 0; i < (chunk * 2); i += 2) {
dst16[i] = *src16 >> 8;
dst16[i + 1] = *src16++ & 0xFF;
@@ -823,7 +940,6 @@ static int mipi_dbi_spi1_transfer(struct mipi_dbi *mipi, int dc,
tr.len = chunk;
len -= chunk;
- tinydrm_dbg_spi_message(spi, &m);
ret = spi_sync(spi, &m);
if (ret)
return ret;
@@ -832,30 +948,30 @@ static int mipi_dbi_spi1_transfer(struct mipi_dbi *mipi, int dc,
return 0;
}
-static int mipi_dbi_typec1_command(struct mipi_dbi *mipi, u8 *cmd,
+static int mipi_dbi_typec1_command(struct mipi_dbi *dbi, u8 *cmd,
u8 *parameters, size_t num)
{
unsigned int bpw = (*cmd == MIPI_DCS_WRITE_MEMORY_START) ? 16 : 8;
int ret;
- if (mipi_dbi_command_is_read(mipi, *cmd))
- return -ENOTSUPP;
+ if (mipi_dbi_command_is_read(dbi, *cmd))
+ return -EOPNOTSUPP;
MIPI_DBI_DEBUG_COMMAND(*cmd, parameters, num);
- ret = mipi_dbi_spi1_transfer(mipi, 0, cmd, 1, 8);
+ ret = mipi_dbi_spi1_transfer(dbi, 0, cmd, 1, 8);
if (ret || !num)
return ret;
- return mipi_dbi_spi1_transfer(mipi, 1, parameters, num, bpw);
+ return mipi_dbi_spi1_transfer(dbi, 1, parameters, num, bpw);
}
/* MIPI DBI Type C Option 3 */
-static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 *cmd,
+static int mipi_dbi_typec3_command_read(struct mipi_dbi *dbi, u8 *cmd,
u8 *data, size_t len)
{
- struct spi_device *spi = mipi->spi;
+ struct spi_device *spi = dbi->spi;
u32 speed_hz = min_t(u32, MIPI_DBI_MAX_SPI_READ_SPEED,
spi->max_speed_hz / 2);
struct spi_transfer tr[2] = {
@@ -892,22 +1008,20 @@ static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 *cmd,
return -ENOMEM;
tr[1].rx_buf = buf;
- gpiod_set_value_cansleep(mipi->dc, 0);
+ gpiod_set_value_cansleep(dbi->dc, 0);
spi_message_init_with_transfers(&m, tr, ARRAY_SIZE(tr));
ret = spi_sync(spi, &m);
if (ret)
goto err_free;
- tinydrm_dbg_spi_message(spi, &m);
-
if (tr[1].len == len) {
memcpy(data, buf, len);
} else {
unsigned int i;
for (i = 0; i < len; i++)
- data[i] = (buf[i] << 1) | !!(buf[i + 1] & BIT(7));
+ data[i] = (buf[i] << 1) | (buf[i + 1] >> 7);
}
MIPI_DBI_DEBUG_COMMAND(*cmd, data, len);
@@ -918,42 +1032,42 @@ err_free:
return ret;
}
-static int mipi_dbi_typec3_command(struct mipi_dbi *mipi, u8 *cmd,
+static int mipi_dbi_typec3_command(struct mipi_dbi *dbi, u8 *cmd,
u8 *par, size_t num)
{
- struct spi_device *spi = mipi->spi;
+ struct spi_device *spi = dbi->spi;
unsigned int bpw = 8;
u32 speed_hz;
int ret;
- if (mipi_dbi_command_is_read(mipi, *cmd))
- return mipi_dbi_typec3_command_read(mipi, cmd, par, num);
+ if (mipi_dbi_command_is_read(dbi, *cmd))
+ return mipi_dbi_typec3_command_read(dbi, cmd, par, num);
MIPI_DBI_DEBUG_COMMAND(*cmd, par, num);
- gpiod_set_value_cansleep(mipi->dc, 0);
+ gpiod_set_value_cansleep(dbi->dc, 0);
speed_hz = mipi_dbi_spi_cmd_max_speed(spi, 1);
- ret = tinydrm_spi_transfer(spi, speed_hz, NULL, 8, cmd, 1);
+ ret = mipi_dbi_spi_transfer(spi, speed_hz, 8, cmd, 1);
if (ret || !num)
return ret;
- if (*cmd == MIPI_DCS_WRITE_MEMORY_START && !mipi->swap_bytes)
+ if (*cmd == MIPI_DCS_WRITE_MEMORY_START && !dbi->swap_bytes)
bpw = 16;
- gpiod_set_value_cansleep(mipi->dc, 1);
+ gpiod_set_value_cansleep(dbi->dc, 1);
speed_hz = mipi_dbi_spi_cmd_max_speed(spi, num);
- return tinydrm_spi_transfer(spi, speed_hz, NULL, bpw, par, num);
+ return mipi_dbi_spi_transfer(spi, speed_hz, bpw, par, num);
}
/**
- * mipi_dbi_spi_init - Initialize MIPI DBI SPI interfaced controller
+ * mipi_dbi_spi_init - Initialize MIPI DBI SPI interface
* @spi: SPI device
- * @mipi: &mipi_dbi structure to initialize
+ * @dbi: MIPI DBI structure to initialize
* @dc: D/C gpio (optional)
*
- * This function sets &mipi_dbi->command, enables &mipi->read_commands for the
- * usual read commands. It should be followed by a call to mipi_dbi_init() or
+ * This function sets &mipi_dbi->command, enables &mipi_dbi->read_commands for the
+ * usual read commands. It should be followed by a call to mipi_dbi_dev_init() or
* a driver-specific init.
*
* If @dc is set, a Type C Option 3 interface is assumed, if not
@@ -968,18 +1082,12 @@ static int mipi_dbi_typec3_command(struct mipi_dbi *mipi, u8 *cmd,
* Returns:
* Zero on success, negative error code on failure.
*/
-int mipi_dbi_spi_init(struct spi_device *spi, struct mipi_dbi *mipi,
+int mipi_dbi_spi_init(struct spi_device *spi, struct mipi_dbi *dbi,
struct gpio_desc *dc)
{
- size_t tx_size = tinydrm_spi_max_transfer_size(spi, 0);
struct device *dev = &spi->dev;
int ret;
- if (tx_size < 16) {
- DRM_ERROR("SPI transmit buffer too small: %zu\n", tx_size);
- return -EINVAL;
- }
-
/*
* Even though it's not the SPI device that does DMA (the master does),
* the dma mask is necessary for the dma_alloc_wc() in
@@ -998,29 +1106,75 @@ int mipi_dbi_spi_init(struct spi_device *spi, struct mipi_dbi *mipi,
}
}
- mipi->spi = spi;
- mipi->read_commands = mipi_dbi_dcs_read_commands;
+ dbi->spi = spi;
+ dbi->read_commands = mipi_dbi_dcs_read_commands;
if (dc) {
- mipi->command = mipi_dbi_typec3_command;
- mipi->dc = dc;
- if (tinydrm_machine_little_endian() &&
- !tinydrm_spi_bpw_supported(spi, 16))
- mipi->swap_bytes = true;
+ dbi->command = mipi_dbi_typec3_command;
+ dbi->dc = dc;
+ if (mipi_dbi_machine_little_endian() && !spi_is_bpw_supported(spi, 16))
+ dbi->swap_bytes = true;
} else {
- mipi->command = mipi_dbi_typec1_command;
- mipi->tx_buf9_len = tx_size;
- mipi->tx_buf9 = devm_kmalloc(dev, tx_size, GFP_KERNEL);
- if (!mipi->tx_buf9)
+ dbi->command = mipi_dbi_typec1_command;
+ dbi->tx_buf9_len = SZ_16K;
+ dbi->tx_buf9 = devm_kmalloc(dev, dbi->tx_buf9_len, GFP_KERNEL);
+ if (!dbi->tx_buf9)
return -ENOMEM;
}
+ mutex_init(&dbi->cmdlock);
+
DRM_DEBUG_DRIVER("SPI speed: %uMHz\n", spi->max_speed_hz / 1000000);
return 0;
}
EXPORT_SYMBOL(mipi_dbi_spi_init);
+/**
+ * mipi_dbi_spi_transfer - SPI transfer helper
+ * @spi: SPI device
+ * @speed_hz: Override speed (optional)
+ * @bpw: Bits per word
+ * @buf: Buffer to transfer
+ * @len: Buffer length
+ *
+ * This SPI transfer helper breaks up the transfer of @buf into chunks which
+ * the SPI controller driver can handle.
+ *
+ * Returns:
+ * Zero on success, negative error code on failure.
+ */
+int mipi_dbi_spi_transfer(struct spi_device *spi, u32 speed_hz,
+ u8 bpw, const void *buf, size_t len)
+{
+ size_t max_chunk = spi_max_transfer_size(spi);
+ struct spi_transfer tr = {
+ .bits_per_word = bpw,
+ .speed_hz = speed_hz,
+ };
+ struct spi_message m;
+ size_t chunk;
+ int ret;
+
+ spi_message_init_with_transfers(&m, &tr, 1);
+
+ while (len) {
+ chunk = min(len, max_chunk);
+
+ tr.tx_buf = buf;
+ tr.len = chunk;
+ buf += chunk;
+ len -= chunk;
+
+ ret = spi_sync(spi, &m);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mipi_dbi_spi_transfer);
+
#endif /* CONFIG_SPI */
#ifdef CONFIG_DEBUG_FS
@@ -1030,13 +1184,12 @@ static ssize_t mipi_dbi_debugfs_command_write(struct file *file,
size_t count, loff_t *ppos)
{
struct seq_file *m = file->private_data;
- struct mipi_dbi *mipi = m->private;
+ struct mipi_dbi_dev *dbidev = m->private;
u8 val, cmd = 0, parameters[64];
char *buf, *pos, *token;
- unsigned int i;
- int ret, idx;
+ int i, ret, idx;
- if (!drm_dev_enter(&mipi->drm, &idx))
+ if (!drm_dev_enter(&dbidev->drm, &idx))
return -ENODEV;
buf = memdup_user_nul(ubuf, count);
@@ -1075,7 +1228,7 @@ static ssize_t mipi_dbi_debugfs_command_write(struct file *file,
}
}
- ret = mipi_dbi_command_buf(mipi, cmd, parameters, i);
+ ret = mipi_dbi_command_buf(&dbidev->dbi, cmd, parameters, i);
err_free:
kfree(buf);
@@ -1087,16 +1240,17 @@ err_exit:
static int mipi_dbi_debugfs_command_show(struct seq_file *m, void *unused)
{
- struct mipi_dbi *mipi = m->private;
+ struct mipi_dbi_dev *dbidev = m->private;
+ struct mipi_dbi *dbi = &dbidev->dbi;
u8 cmd, val[4];
int ret, idx;
size_t len;
- if (!drm_dev_enter(&mipi->drm, &idx))
+ if (!drm_dev_enter(&dbidev->drm, &idx))
return -ENODEV;
for (cmd = 0; cmd < 255; cmd++) {
- if (!mipi_dbi_command_is_read(mipi, cmd))
+ if (!mipi_dbi_command_is_read(dbi, cmd))
continue;
switch (cmd) {
@@ -1116,7 +1270,7 @@ static int mipi_dbi_debugfs_command_show(struct seq_file *m, void *unused)
}
seq_printf(m, "%02x: ", cmd);
- ret = mipi_dbi_command_buf(mipi, cmd, val, len);
+ ret = mipi_dbi_command_buf(dbi, cmd, val, len);
if (ret) {
seq_puts(m, "XX\n");
continue;
@@ -1158,12 +1312,12 @@ static const struct file_operations mipi_dbi_debugfs_command_fops = {
*/
int mipi_dbi_debugfs_init(struct drm_minor *minor)
{
- struct mipi_dbi *mipi = drm_to_mipi_dbi(minor->dev);
+ struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(minor->dev);
umode_t mode = S_IFREG | S_IWUSR;
- if (mipi->read_commands)
+ if (dbidev->dbi.read_commands)
mode |= S_IRUGO;
- debugfs_create_file("command", mode, minor->debugfs_root, mipi,
+ debugfs_create_file("command", mode, minor->debugfs_root, dbidev,
&mipi_dbi_debugfs_command_fops);
return 0;
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index ad19df0686c9..55531895dde6 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -33,6 +33,7 @@
#include <linux/pm_runtime.h>
#include <linux/slab.h>
+#include <drm/drm_dsc.h>
#include <video/mipi_display.h>
/**
@@ -93,11 +94,6 @@ static struct bus_type mipi_dsi_bus_type = {
.pm = &mipi_dsi_device_pm_ops,
};
-static int of_device_match(struct device *dev, const void *data)
-{
- return dev->of_node == data;
-}
-
/**
* of_find_mipi_dsi_device_by_node() - find the MIPI DSI device matching a
* device tree node
@@ -110,7 +106,7 @@ struct mipi_dsi_device *of_find_mipi_dsi_device_by_node(struct device_node *np)
{
struct device *dev;
- dev = bus_find_device(&mipi_dsi_bus_type, NULL, np, of_device_match);
+ dev = bus_find_device_by_of_node(&mipi_dsi_bus_type, np);
return dev ? to_mipi_dsi_device(dev) : NULL;
}
@@ -378,6 +374,7 @@ bool mipi_dsi_packet_format_is_short(u8 type)
case MIPI_DSI_V_SYNC_END:
case MIPI_DSI_H_SYNC_START:
case MIPI_DSI_H_SYNC_END:
+ case MIPI_DSI_COMPRESSION_MODE:
case MIPI_DSI_END_OF_TRANSMISSION:
case MIPI_DSI_COLOR_MODE_OFF:
case MIPI_DSI_COLOR_MODE_ON:
@@ -392,7 +389,7 @@ bool mipi_dsi_packet_format_is_short(u8 type)
case MIPI_DSI_DCS_SHORT_WRITE:
case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
case MIPI_DSI_DCS_READ:
- case MIPI_DSI_DCS_COMPRESSION_MODE:
+ case MIPI_DSI_EXECUTE_QUEUE:
case MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE:
return true;
}
@@ -411,11 +408,12 @@ EXPORT_SYMBOL(mipi_dsi_packet_format_is_short);
bool mipi_dsi_packet_format_is_long(u8 type)
{
switch (type) {
- case MIPI_DSI_PPS_LONG_WRITE:
case MIPI_DSI_NULL_PACKET:
case MIPI_DSI_BLANKING_PACKET:
case MIPI_DSI_GENERIC_LONG_WRITE:
case MIPI_DSI_DCS_LONG_WRITE:
+ case MIPI_DSI_PICTURE_PARAMETER_SET:
+ case MIPI_DSI_COMPRESSED_PIXEL_STREAM:
case MIPI_DSI_LOOSELY_PACKED_PIXEL_STREAM_YCBCR20:
case MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR24:
case MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR16:
@@ -552,6 +550,56 @@ int mipi_dsi_set_maximum_return_packet_size(struct mipi_dsi_device *dsi,
EXPORT_SYMBOL(mipi_dsi_set_maximum_return_packet_size);
/**
+ * mipi_dsi_compression_mode() - enable/disable DSC on the peripheral
+ * @dsi: DSI peripheral device
+ * @enable: Whether to enable or disable the DSC
+ *
+ * Enable or disable Display Stream Compression on the peripheral using the
+ * default Picture Parameter Set and VESA DSC 1.1 algorithm.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+ssize_t mipi_dsi_compression_mode(struct mipi_dsi_device *dsi, bool enable)
+{
+ /* Note: Needs updating for non-default PPS or algorithm */
+ u8 tx[2] = { enable << 0, 0 };
+ struct mipi_dsi_msg msg = {
+ .channel = dsi->channel,
+ .type = MIPI_DSI_COMPRESSION_MODE,
+ .tx_len = sizeof(tx),
+ .tx_buf = tx,
+ };
+ int ret = mipi_dsi_device_transfer(dsi, &msg);
+
+ return (ret < 0) ? ret : 0;
+}
+EXPORT_SYMBOL(mipi_dsi_compression_mode);
+
+/**
+ * mipi_dsi_picture_parameter_set() - transmit the DSC PPS to the peripheral
+ * @dsi: DSI peripheral device
+ * @pps: VESA DSC 1.1 Picture Parameter Set
+ *
+ * Transmit the VESA DSC 1.1 Picture Parameter Set to the peripheral.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+ssize_t mipi_dsi_picture_parameter_set(struct mipi_dsi_device *dsi,
+ const struct drm_dsc_picture_parameter_set *pps)
+{
+ struct mipi_dsi_msg msg = {
+ .channel = dsi->channel,
+ .type = MIPI_DSI_PICTURE_PARAMETER_SET,
+ .tx_len = sizeof(*pps),
+ .tx_buf = pps,
+ };
+ int ret = mipi_dsi_device_transfer(dsi, &msg);
+
+ return (ret < 0) ? ret : 0;
+}
+EXPORT_SYMBOL(mipi_dsi_picture_parameter_set);
+
+/**
* mipi_dsi_generic_write() - transmit data using a generic write packet
* @dsi: DSI peripheral device
* @payload: buffer containing the payload
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 9a59865ce574..2a6e34663146 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -174,7 +174,7 @@ static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
node->__subtree_last = LAST(node);
- if (hole_node->allocated) {
+ if (drm_mm_node_allocated(hole_node)) {
rb = &hole_node->rb;
while (rb) {
parent = rb_entry(rb, struct drm_mm_node, rb);
@@ -424,9 +424,9 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
node->mm = mm;
+ __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
list_add(&node->node_list, &hole->node_list);
drm_mm_interval_tree_add_node(hole, node);
- node->allocated = true;
node->hole_size = 0;
rm_hole(hole);
@@ -472,7 +472,7 @@ int drm_mm_insert_node_in_range(struct drm_mm * const mm,
u64 remainder_mask;
bool once;
- DRM_MM_BUG_ON(range_start >= range_end);
+ DRM_MM_BUG_ON(range_start > range_end);
if (unlikely(size == 0 || range_end - range_start < size))
return -ENOSPC;
@@ -543,9 +543,9 @@ int drm_mm_insert_node_in_range(struct drm_mm * const mm,
node->color = color;
node->hole_size = 0;
+ __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
list_add(&node->node_list, &hole->node_list);
drm_mm_interval_tree_add_node(hole, node);
- node->allocated = true;
rm_hole(hole);
if (adj_start > hole_start)
@@ -561,6 +561,11 @@ int drm_mm_insert_node_in_range(struct drm_mm * const mm,
}
EXPORT_SYMBOL(drm_mm_insert_node_in_range);
+static inline bool drm_mm_node_scanned_block(const struct drm_mm_node *node)
+{
+ return test_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
+}
+
/**
* drm_mm_remove_node - Remove a memory node from the allocator.
* @node: drm_mm_node to remove
@@ -574,8 +579,8 @@ void drm_mm_remove_node(struct drm_mm_node *node)
struct drm_mm *mm = node->mm;
struct drm_mm_node *prev_node;
- DRM_MM_BUG_ON(!node->allocated);
- DRM_MM_BUG_ON(node->scanned_block);
+ DRM_MM_BUG_ON(!drm_mm_node_allocated(node));
+ DRM_MM_BUG_ON(drm_mm_node_scanned_block(node));
prev_node = list_prev_entry(node, node_list);
@@ -584,11 +589,12 @@ void drm_mm_remove_node(struct drm_mm_node *node)
drm_mm_interval_tree_remove(node, &mm->interval_tree);
list_del(&node->node_list);
- node->allocated = false;
if (drm_mm_hole_follows(prev_node))
rm_hole(prev_node);
add_hole(prev_node);
+
+ clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
}
EXPORT_SYMBOL(drm_mm_remove_node);
@@ -605,10 +611,11 @@ void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
{
struct drm_mm *mm = old->mm;
- DRM_MM_BUG_ON(!old->allocated);
+ DRM_MM_BUG_ON(!drm_mm_node_allocated(old));
*new = *old;
+ __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &new->flags);
list_replace(&old->node_list, &new->node_list);
rb_replace_node_cached(&old->rb, &new->rb, &mm->interval_tree);
@@ -622,8 +629,7 @@ void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
&mm->holes_addr);
}
- old->allocated = false;
- new->allocated = true;
+ clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, &old->flags);
}
EXPORT_SYMBOL(drm_mm_replace_node);
@@ -731,9 +737,9 @@ bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
u64 adj_start, adj_end;
DRM_MM_BUG_ON(node->mm != mm);
- DRM_MM_BUG_ON(!node->allocated);
- DRM_MM_BUG_ON(node->scanned_block);
- node->scanned_block = true;
+ DRM_MM_BUG_ON(!drm_mm_node_allocated(node));
+ DRM_MM_BUG_ON(drm_mm_node_scanned_block(node));
+ __set_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
mm->scan_active++;
/* Remove this block from the node_list so that we enlarge the hole
@@ -818,8 +824,8 @@ bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
struct drm_mm_node *prev_node;
DRM_MM_BUG_ON(node->mm != scan->mm);
- DRM_MM_BUG_ON(!node->scanned_block);
- node->scanned_block = false;
+ DRM_MM_BUG_ON(!drm_mm_node_scanned_block(node));
+ __clear_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
DRM_MM_BUG_ON(!node->mm->scan_active);
node->mm->scan_active--;
@@ -917,7 +923,7 @@ void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
/* Clever trick to avoid a special case in the free hole tracking. */
INIT_LIST_HEAD(&mm->head_node.node_list);
- mm->head_node.allocated = false;
+ mm->head_node.flags = 0;
mm->head_node.mm = mm;
mm->head_node.start = start + size;
mm->head_node.size = -size;
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index 7bc03c3c154f..08e6eff6a179 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -27,6 +27,7 @@
#include <drm/drm_file.h>
#include <drm/drm_mode_config.h>
#include <drm/drm_print.h>
+#include <linux/dma-resv.h>
#include "drm_crtc_internal.h"
#include "drm_internal.h"
@@ -415,6 +416,33 @@ void drm_mode_config_init(struct drm_device *dev)
dev->mode_config.num_crtc = 0;
dev->mode_config.num_encoder = 0;
dev->mode_config.num_total_plane = 0;
+
+ if (IS_ENABLED(CONFIG_LOCKDEP)) {
+ struct drm_modeset_acquire_ctx modeset_ctx;
+ struct ww_acquire_ctx resv_ctx;
+ struct dma_resv resv;
+ int ret;
+
+ dma_resv_init(&resv);
+
+ drm_modeset_acquire_init(&modeset_ctx, 0);
+ ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
+ &modeset_ctx);
+ if (ret == -EDEADLK)
+ ret = drm_modeset_backoff(&modeset_ctx);
+
+ ww_acquire_init(&resv_ctx, &reservation_ww_class);
+ ret = dma_resv_lock(&resv, &resv_ctx);
+ if (ret == -EDEADLK)
+ dma_resv_lock_slow(&resv, &resv_ctx);
+
+ dma_resv_unlock(&resv);
+ ww_acquire_fini(&resv_ctx);
+
+ drm_modeset_drop_locks(&modeset_ctx);
+ drm_modeset_acquire_fini(&modeset_ctx);
+ dma_resv_fini(&resv);
+ }
}
EXPORT_SYMBOL(drm_mode_config_init);
@@ -428,8 +456,6 @@ EXPORT_SYMBOL(drm_mode_config_init);
* Note that since this /should/ happen single-threaded at driver/device
* teardown time, no locking is required. It's the driver's job to ensure that
* this guarantee actually holds true.
- *
- * FIXME: cleanup any dangling user buffer objects too
*/
void drm_mode_config_cleanup(struct drm_device *dev)
{
diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c
index 1c6e51135962..35c2719407a8 100644
--- a/drivers/gpu/drm/drm_mode_object.c
+++ b/drivers/gpu/drm/drm_mode_object.c
@@ -42,6 +42,8 @@ int __drm_mode_object_add(struct drm_device *dev, struct drm_mode_object *obj,
{
int ret;
+ WARN_ON(!dev->driver->load && dev->registered && !obj_free_cb);
+
mutex_lock(&dev->mode_config.idr_mutex);
ret = idr_alloc(&dev->mode_config.object_idr, register_obj ? obj : NULL,
1, 0, GFP_KERNEL);
@@ -102,6 +104,8 @@ void drm_mode_object_register(struct drm_device *dev,
void drm_mode_object_unregister(struct drm_device *dev,
struct drm_mode_object *object)
{
+ WARN_ON(!dev->driver->load && dev->registered && !object->free_cb);
+
mutex_lock(&dev->mode_config.idr_mutex);
if (object->id) {
idr_remove(&dev->mode_config.object_idr, object->id);
@@ -220,12 +224,26 @@ EXPORT_SYMBOL(drm_mode_object_get);
* This attaches the given property to the modeset object with the given initial
* value. Currently this function cannot fail since the properties are stored in
* a statically sized array.
+ *
+ * Note that all properties must be attached before the object itself is
+ * registered and accessible from userspace.
*/
void drm_object_attach_property(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t init_val)
{
int count = obj->properties->count;
+ struct drm_device *dev = property->dev;
+
+
+ if (obj->type == DRM_MODE_OBJECT_CONNECTOR) {
+ struct drm_connector *connector = obj_to_connector(obj);
+
+ WARN_ON(!dev->driver->load &&
+ connector->registration_state == DRM_CONNECTOR_REGISTERED);
+ } else {
+ WARN_ON(!dev->driver->load && dev->registered);
+ }
if (count == DRM_OBJECT_MAX_PROPERTY) {
WARN(1, "Failed to attach object property (type: 0x%x). Please "
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index b0369e690f36..10336b144c72 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -233,7 +233,7 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
/* 3) Nominal HSync width (% of line period) - default 8 */
#define CVT_HSYNC_PERCENTAGE 8
unsigned int hblank_percentage;
- int vsyncandback_porch, vback_porch, hblank;
+ int vsyncandback_porch, __maybe_unused vback_porch, hblank;
/* estimated the horizontal period */
tmp1 = HV_FACTOR * 1000000 -
@@ -386,9 +386,10 @@ drm_gtf_mode_complex(struct drm_device *dev, int hdisplay, int vdisplay,
int top_margin, bottom_margin;
int interlace;
unsigned int hfreq_est;
- int vsync_plus_bp, vback_porch;
- unsigned int vtotal_lines, vfieldrate_est, hperiod;
- unsigned int vfield_rate, vframe_rate;
+ int vsync_plus_bp, __maybe_unused vback_porch;
+ unsigned int vtotal_lines, __maybe_unused vfieldrate_est;
+ unsigned int __maybe_unused hperiod;
+ unsigned int vfield_rate, __maybe_unused vframe_rate;
int left_margin, right_margin;
unsigned int total_active_pixels, ideal_duty_cycle;
unsigned int hblank, total_pixels, pixel_freq;
@@ -1454,6 +1455,7 @@ static int drm_mode_parse_cmdline_refresh(const char *str, char **end_ptr,
}
static int drm_mode_parse_cmdline_extra(const char *str, int length,
+ bool freestanding,
const struct drm_connector *connector,
struct drm_cmdline_mode *mode)
{
@@ -1462,9 +1464,15 @@ static int drm_mode_parse_cmdline_extra(const char *str, int length,
for (i = 0; i < length; i++) {
switch (str[i]) {
case 'i':
+ if (freestanding)
+ return -EINVAL;
+
mode->interlace = true;
break;
case 'm':
+ if (freestanding)
+ return -EINVAL;
+
mode->margins = true;
break;
case 'D':
@@ -1542,6 +1550,7 @@ static int drm_mode_parse_cmdline_res_mode(const char *str, unsigned int length,
if (extras) {
int ret = drm_mode_parse_cmdline_extra(end_ptr + i,
1,
+ false,
connector,
mode);
if (ret)
@@ -1560,33 +1569,76 @@ static int drm_mode_parse_cmdline_res_mode(const char *str, unsigned int length,
return 0;
}
-static int drm_mode_parse_cmdline_options(char *str, size_t len,
+static int drm_mode_parse_cmdline_int(const char *delim, unsigned int *int_ret)
+{
+ const char *value;
+ char *endp;
+
+ /*
+ * delim must point to the '=', otherwise it is a syntax error and
+ * if delim points to the terminating zero, then delim + 1 wil point
+ * past the end of the string.
+ */
+ if (*delim != '=')
+ return -EINVAL;
+
+ value = delim + 1;
+ *int_ret = simple_strtol(value, &endp, 10);
+
+ /* Make sure we have parsed something */
+ if (endp == value)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int drm_mode_parse_panel_orientation(const char *delim,
+ struct drm_cmdline_mode *mode)
+{
+ const char *value;
+
+ if (*delim != '=')
+ return -EINVAL;
+
+ value = delim + 1;
+ delim = strchr(value, ',');
+ if (!delim)
+ delim = value + strlen(value);
+
+ if (!strncmp(value, "normal", delim - value))
+ mode->panel_orientation = DRM_MODE_PANEL_ORIENTATION_NORMAL;
+ else if (!strncmp(value, "upside_down", delim - value))
+ mode->panel_orientation = DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP;
+ else if (!strncmp(value, "left_side_up", delim - value))
+ mode->panel_orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP;
+ else if (!strncmp(value, "right_side_up", delim - value))
+ mode->panel_orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int drm_mode_parse_cmdline_options(const char *str,
+ bool freestanding,
const struct drm_connector *connector,
struct drm_cmdline_mode *mode)
{
- unsigned int rotation = 0;
- char *sep = str;
-
- while ((sep = strchr(sep, ','))) {
- char *delim, *option;
+ unsigned int deg, margin, rotation = 0;
+ const char *delim, *option, *sep;
- option = sep + 1;
+ option = str;
+ do {
delim = strchr(option, '=');
if (!delim) {
delim = strchr(option, ',');
if (!delim)
- delim = str + len;
+ delim = option + strlen(option);
}
if (!strncmp(option, "rotate", delim - option)) {
- const char *value = delim + 1;
- unsigned int deg;
-
- deg = simple_strtol(value, &sep, 10);
-
- /* Make sure we have parsed something */
- if (sep == value)
+ if (drm_mode_parse_cmdline_int(delim, &deg))
return -EINVAL;
switch (deg) {
@@ -1611,64 +1663,51 @@ static int drm_mode_parse_cmdline_options(char *str, size_t len,
}
} else if (!strncmp(option, "reflect_x", delim - option)) {
rotation |= DRM_MODE_REFLECT_X;
- sep = delim;
} else if (!strncmp(option, "reflect_y", delim - option)) {
rotation |= DRM_MODE_REFLECT_Y;
- sep = delim;
} else if (!strncmp(option, "margin_right", delim - option)) {
- const char *value = delim + 1;
- unsigned int margin;
-
- margin = simple_strtol(value, &sep, 10);
-
- /* Make sure we have parsed something */
- if (sep == value)
+ if (drm_mode_parse_cmdline_int(delim, &margin))
return -EINVAL;
mode->tv_margins.right = margin;
} else if (!strncmp(option, "margin_left", delim - option)) {
- const char *value = delim + 1;
- unsigned int margin;
-
- margin = simple_strtol(value, &sep, 10);
-
- /* Make sure we have parsed something */
- if (sep == value)
+ if (drm_mode_parse_cmdline_int(delim, &margin))
return -EINVAL;
mode->tv_margins.left = margin;
} else if (!strncmp(option, "margin_top", delim - option)) {
- const char *value = delim + 1;
- unsigned int margin;
-
- margin = simple_strtol(value, &sep, 10);
-
- /* Make sure we have parsed something */
- if (sep == value)
+ if (drm_mode_parse_cmdline_int(delim, &margin))
return -EINVAL;
mode->tv_margins.top = margin;
} else if (!strncmp(option, "margin_bottom", delim - option)) {
- const char *value = delim + 1;
- unsigned int margin;
-
- margin = simple_strtol(value, &sep, 10);
-
- /* Make sure we have parsed something */
- if (sep == value)
+ if (drm_mode_parse_cmdline_int(delim, &margin))
return -EINVAL;
mode->tv_margins.bottom = margin;
+ } else if (!strncmp(option, "panel_orientation", delim - option)) {
+ if (drm_mode_parse_panel_orientation(delim, mode))
+ return -EINVAL;
} else {
return -EINVAL;
}
- }
+ sep = strchr(delim, ',');
+ option = sep + 1;
+ } while (sep);
+
+ if (rotation && freestanding)
+ return -EINVAL;
mode->rotation_reflection = rotation;
return 0;
}
+static const char * const drm_named_modes_whitelist[] = {
+ "NTSC",
+ "PAL",
+};
+
/**
* drm_mode_parse_command_line_for_connector - parse command line modeline for connector
* @mode_option: optional per connector mode option
@@ -1699,58 +1738,30 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
struct drm_cmdline_mode *mode)
{
const char *name;
- bool named_mode = false, parse_extras = false;
+ bool freestanding = false, parse_extras = false;
unsigned int bpp_off = 0, refresh_off = 0, options_off = 0;
unsigned int mode_end = 0;
- char *bpp_ptr = NULL, *refresh_ptr = NULL, *extra_ptr = NULL;
- char *options_ptr = NULL;
+ const char *bpp_ptr = NULL, *refresh_ptr = NULL, *extra_ptr = NULL;
+ const char *options_ptr = NULL;
char *bpp_end_ptr = NULL, *refresh_end_ptr = NULL;
- int ret;
+ int i, len, ret;
-#ifdef CONFIG_FB
- if (!mode_option)
- mode_option = fb_mode_option;
-#endif
+ memset(mode, 0, sizeof(*mode));
+ mode->panel_orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
- if (!mode_option) {
- mode->specified = false;
+ if (!mode_option)
return false;
- }
name = mode_option;
- /*
- * This is a bit convoluted. To differentiate between the
- * named modes and poorly formatted resolutions, we need a
- * bunch of things:
- * - We need to make sure that the first character (which
- * would be our resolution in X) is a digit.
- * - However, if the X resolution is missing, then we end up
- * with something like x<yres>, with our first character
- * being an alpha-numerical character, which would be
- * considered a named mode.
- *
- * If this isn't enough, we should add more heuristics here,
- * and matching unit-tests.
- */
- if (!isdigit(name[0]) && name[0] != 'x')
- named_mode = true;
-
/* Try to locate the bpp and refresh specifiers, if any */
bpp_ptr = strchr(name, '-');
- if (bpp_ptr) {
+ if (bpp_ptr)
bpp_off = bpp_ptr - name;
- mode->bpp_specified = true;
- }
refresh_ptr = strchr(name, '@');
- if (refresh_ptr) {
- if (named_mode)
- return false;
-
+ if (refresh_ptr)
refresh_off = refresh_ptr - name;
- mode->refresh_specified = true;
- }
/* Locate the start of named options */
options_ptr = strchr(name, ',');
@@ -1764,29 +1775,58 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
mode_end = refresh_off;
} else if (options_ptr) {
mode_end = options_off;
+ parse_extras = true;
} else {
mode_end = strlen(name);
parse_extras = true;
}
- if (named_mode) {
- if (mode_end + 1 > DRM_DISPLAY_MODE_LEN)
- return false;
- strscpy(mode->name, name, mode_end + 1);
- } else {
+ /* First check for a named mode */
+ for (i = 0; i < ARRAY_SIZE(drm_named_modes_whitelist); i++) {
+ ret = str_has_prefix(name, drm_named_modes_whitelist[i]);
+ if (ret == mode_end) {
+ if (refresh_ptr)
+ return false; /* named + refresh is invalid */
+
+ strcpy(mode->name, drm_named_modes_whitelist[i]);
+ mode->specified = true;
+ break;
+ }
+ }
+
+ /* No named mode? Check for a normal mode argument, e.g. 1024x768 */
+ if (!mode->specified && isdigit(name[0])) {
ret = drm_mode_parse_cmdline_res_mode(name, mode_end,
parse_extras,
connector,
mode);
if (ret)
return false;
+
+ mode->specified = true;
+ }
+
+ /* No mode? Check for freestanding extras and/or options */
+ if (!mode->specified) {
+ unsigned int len = strlen(mode_option);
+
+ if (bpp_ptr || refresh_ptr)
+ return false; /* syntax error */
+
+ if (len == 1 || (len >= 2 && mode_option[1] == ','))
+ extra_ptr = mode_option;
+ else
+ options_ptr = mode_option - 1;
+
+ freestanding = true;
}
- mode->specified = true;
if (bpp_ptr) {
ret = drm_mode_parse_cmdline_bpp(bpp_ptr, &bpp_end_ptr, mode);
if (ret)
return false;
+
+ mode->bpp_specified = true;
}
if (refresh_ptr) {
@@ -1794,6 +1834,8 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
&refresh_end_ptr, mode);
if (ret)
return false;
+
+ mode->refresh_specified = true;
}
/*
@@ -1807,20 +1849,21 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
else if (refresh_ptr)
extra_ptr = refresh_end_ptr;
- if (extra_ptr &&
- extra_ptr != options_ptr) {
- int len = strlen(name) - (extra_ptr - name);
+ if (extra_ptr) {
+ if (options_ptr)
+ len = options_ptr - extra_ptr;
+ else
+ len = strlen(extra_ptr);
- ret = drm_mode_parse_cmdline_extra(extra_ptr, len,
+ ret = drm_mode_parse_cmdline_extra(extra_ptr, len, freestanding,
connector, mode);
if (ret)
return false;
}
if (options_ptr) {
- int len = strlen(name) - (options_ptr - name);
-
- ret = drm_mode_parse_cmdline_options(options_ptr, len,
+ ret = drm_mode_parse_cmdline_options(options_ptr + 1,
+ freestanding,
connector, mode);
if (ret)
return false;
@@ -1914,8 +1957,11 @@ void drm_mode_convert_to_umode(struct drm_mode_modeinfo *out,
case HDMI_PICTURE_ASPECT_256_135:
out->flags |= DRM_MODE_FLAG_PIC_AR_256_135;
break;
- case HDMI_PICTURE_ASPECT_RESERVED:
default:
+ WARN(1, "Invalid aspect ratio (0%x) on mode\n",
+ in->picture_aspect_ratio);
+ /* fall through */
+ case HDMI_PICTURE_ASPECT_NONE:
out->flags |= DRM_MODE_FLAG_PIC_AR_NONE;
break;
}
@@ -1974,20 +2020,22 @@ int drm_mode_convert_umode(struct drm_device *dev,
switch (in->flags & DRM_MODE_FLAG_PIC_AR_MASK) {
case DRM_MODE_FLAG_PIC_AR_4_3:
- out->picture_aspect_ratio |= HDMI_PICTURE_ASPECT_4_3;
+ out->picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3;
break;
case DRM_MODE_FLAG_PIC_AR_16_9:
- out->picture_aspect_ratio |= HDMI_PICTURE_ASPECT_16_9;
+ out->picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9;
break;
case DRM_MODE_FLAG_PIC_AR_64_27:
- out->picture_aspect_ratio |= HDMI_PICTURE_ASPECT_64_27;
+ out->picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27;
break;
case DRM_MODE_FLAG_PIC_AR_256_135:
- out->picture_aspect_ratio |= HDMI_PICTURE_ASPECT_256_135;
+ out->picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135;
break;
- default:
+ case DRM_MODE_FLAG_PIC_AR_NONE:
out->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
break;
+ default:
+ return -EINVAL;
}
out->status = drm_mode_validate_driver(dev, out);
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
index 43d89dd59c6b..b50b44e76279 100644
--- a/drivers/gpu/drm/drm_of.c
+++ b/drivers/gpu/drm/drm_of.c
@@ -250,11 +250,6 @@ int drm_of_find_panel_or_bridge(const struct device_node *np,
if (!remote)
return -ENODEV;
- if (!of_device_is_available(remote)) {
- of_node_put(remote);
- return -ENODEV;
- }
-
if (panel) {
*panel = of_drm_find_panel(remote);
if (!IS_ERR(*panel))
@@ -279,3 +274,119 @@ int drm_of_find_panel_or_bridge(const struct device_node *np,
return ret;
}
EXPORT_SYMBOL_GPL(drm_of_find_panel_or_bridge);
+
+enum drm_of_lvds_pixels {
+ DRM_OF_LVDS_EVEN = BIT(0),
+ DRM_OF_LVDS_ODD = BIT(1),
+};
+
+static int drm_of_lvds_get_port_pixels_type(struct device_node *port_node)
+{
+ bool even_pixels =
+ of_property_read_bool(port_node, "dual-lvds-even-pixels");
+ bool odd_pixels =
+ of_property_read_bool(port_node, "dual-lvds-odd-pixels");
+
+ return (even_pixels ? DRM_OF_LVDS_EVEN : 0) |
+ (odd_pixels ? DRM_OF_LVDS_ODD : 0);
+}
+
+static int drm_of_lvds_get_remote_pixels_type(
+ const struct device_node *port_node)
+{
+ struct device_node *endpoint = NULL;
+ int pixels_type = -EPIPE;
+
+ for_each_child_of_node(port_node, endpoint) {
+ struct device_node *remote_port;
+ int current_pt;
+
+ if (!of_node_name_eq(endpoint, "endpoint"))
+ continue;
+
+ remote_port = of_graph_get_remote_port(endpoint);
+ if (!remote_port) {
+ of_node_put(remote_port);
+ return -EPIPE;
+ }
+
+ current_pt = drm_of_lvds_get_port_pixels_type(remote_port);
+ of_node_put(remote_port);
+ if (pixels_type < 0)
+ pixels_type = current_pt;
+
+ /*
+ * Sanity check, ensure that all remote endpoints have the same
+ * pixel type. We may lift this restriction later if we need to
+ * support multiple sinks with different dual-link
+ * configurations by passing the endpoints explicitly to
+ * drm_of_lvds_get_dual_link_pixel_order().
+ */
+ if (!current_pt || pixels_type != current_pt) {
+ of_node_put(remote_port);
+ return -EINVAL;
+ }
+ }
+
+ return pixels_type;
+}
+
+/**
+ * drm_of_lvds_get_dual_link_pixel_order - Get LVDS dual-link pixel order
+ * @port1: First DT port node of the Dual-link LVDS source
+ * @port2: Second DT port node of the Dual-link LVDS source
+ *
+ * An LVDS dual-link connection is made of two links, with even pixels
+ * transitting on one link, and odd pixels on the other link. This function
+ * returns, for two ports of an LVDS dual-link source, which port shall transmit
+ * the even and odd pixels, based on the requirements of the connected sink.
+ *
+ * The pixel order is determined from the dual-lvds-even-pixels and
+ * dual-lvds-odd-pixels properties in the sink's DT port nodes. If those
+ * properties are not present, or if their usage is not valid, this function
+ * returns -EINVAL.
+ *
+ * If either port is not connected, this function returns -EPIPE.
+ *
+ * @port1 and @port2 are typically DT sibling nodes, but may have different
+ * parents when, for instance, two separate LVDS encoders carry the even and odd
+ * pixels.
+ *
+ * Return:
+ * * DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS - @port1 carries even pixels and @port2
+ * carries odd pixels
+ * * DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS - @port1 carries odd pixels and @port2
+ * carries even pixels
+ * * -EINVAL - @port1 and @port2 are not connected to a dual-link LVDS sink, or
+ * the sink configuration is invalid
+ * * -EPIPE - when @port1 or @port2 are not connected
+ */
+int drm_of_lvds_get_dual_link_pixel_order(const struct device_node *port1,
+ const struct device_node *port2)
+{
+ int remote_p1_pt, remote_p2_pt;
+
+ if (!port1 || !port2)
+ return -EINVAL;
+
+ remote_p1_pt = drm_of_lvds_get_remote_pixels_type(port1);
+ if (remote_p1_pt < 0)
+ return remote_p1_pt;
+
+ remote_p2_pt = drm_of_lvds_get_remote_pixels_type(port2);
+ if (remote_p2_pt < 0)
+ return remote_p2_pt;
+
+ /*
+ * A valid dual-lVDS bus is found when one remote port is marked with
+ * "dual-lvds-even-pixels", and the other remote port is marked with
+ * "dual-lvds-odd-pixels", bail out if the markers are not right.
+ */
+ if (remote_p1_pt + remote_p2_pt != DRM_OF_LVDS_EVEN + DRM_OF_LVDS_ODD)
+ return -EINVAL;
+
+ return remote_p1_pt == DRM_OF_LVDS_EVEN ?
+ DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS :
+ DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS;
+}
+EXPORT_SYMBOL_GPL(drm_of_lvds_get_dual_link_pixel_order);
diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c
index dbd5b873e8f2..8c7bac85a793 100644
--- a/drivers/gpu/drm/drm_panel.c
+++ b/drivers/gpu/drm/drm_panel.c
@@ -21,11 +21,13 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include <linux/backlight.h>
#include <linux/err.h>
#include <linux/module.h>
#include <drm/drm_crtc.h>
#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
static DEFINE_MUTEX(panel_lock);
static LIST_HEAD(panel_list);
@@ -44,13 +46,21 @@ static LIST_HEAD(panel_list);
/**
* drm_panel_init - initialize a panel
* @panel: DRM panel
+ * @dev: parent device of the panel
+ * @funcs: panel operations
+ * @connector_type: the connector type (DRM_MODE_CONNECTOR_*) corresponding to
+ * the panel interface
*
- * Sets up internal fields of the panel so that it can subsequently be added
- * to the registry.
+ * Initialize the panel structure for subsequent registration with
+ * drm_panel_add().
*/
-void drm_panel_init(struct drm_panel *panel)
+void drm_panel_init(struct drm_panel *panel, struct device *dev,
+ const struct drm_panel_funcs *funcs, int connector_type)
{
INIT_LIST_HEAD(&panel->list);
+ panel->dev = dev;
+ panel->funcs = funcs;
+ panel->connector_type = connector_type;
}
EXPORT_SYMBOL(drm_panel_init);
@@ -104,12 +114,6 @@ EXPORT_SYMBOL(drm_panel_remove);
*/
int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector)
{
- if (panel->connector)
- return -EBUSY;
-
- panel->connector = connector;
- panel->drm = connector->dev;
-
return 0;
}
EXPORT_SYMBOL(drm_panel_attach);
@@ -123,17 +127,141 @@ EXPORT_SYMBOL(drm_panel_attach);
*
* This function should not be called by the panel device itself. It
* is only for the drm device that called drm_panel_attach().
+ */
+void drm_panel_detach(struct drm_panel *panel)
+{
+}
+EXPORT_SYMBOL(drm_panel_detach);
+
+/**
+ * drm_panel_prepare - power on a panel
+ * @panel: DRM panel
+ *
+ * Calling this function will enable power and deassert any reset signals to
+ * the panel. After this has completed it is possible to communicate with any
+ * integrated circuitry via a command bus.
*
* Return: 0 on success or a negative error code on failure.
*/
-int drm_panel_detach(struct drm_panel *panel)
+int drm_panel_prepare(struct drm_panel *panel)
{
- panel->connector = NULL;
- panel->drm = NULL;
+ if (!panel)
+ return -EINVAL;
+
+ if (panel->funcs && panel->funcs->prepare)
+ return panel->funcs->prepare(panel);
return 0;
}
-EXPORT_SYMBOL(drm_panel_detach);
+EXPORT_SYMBOL(drm_panel_prepare);
+
+/**
+ * drm_panel_unprepare - power off a panel
+ * @panel: DRM panel
+ *
+ * Calling this function will completely power off a panel (assert the panel's
+ * reset, turn off power supplies, ...). After this function has completed, it
+ * is usually no longer possible to communicate with the panel until another
+ * call to drm_panel_prepare().
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int drm_panel_unprepare(struct drm_panel *panel)
+{
+ if (!panel)
+ return -EINVAL;
+
+ if (panel->funcs && panel->funcs->unprepare)
+ return panel->funcs->unprepare(panel);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_panel_unprepare);
+
+/**
+ * drm_panel_enable - enable a panel
+ * @panel: DRM panel
+ *
+ * Calling this function will cause the panel display drivers to be turned on
+ * and the backlight to be enabled. Content will be visible on screen after
+ * this call completes.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int drm_panel_enable(struct drm_panel *panel)
+{
+ int ret;
+
+ if (!panel)
+ return -EINVAL;
+
+ if (panel->funcs && panel->funcs->enable) {
+ ret = panel->funcs->enable(panel);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = backlight_enable(panel->backlight);
+ if (ret < 0)
+ DRM_DEV_INFO(panel->dev, "failed to enable backlight: %d\n",
+ ret);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_panel_enable);
+
+/**
+ * drm_panel_disable - disable a panel
+ * @panel: DRM panel
+ *
+ * This will typically turn off the panel's backlight or disable the display
+ * drivers. For smart panels it should still be possible to communicate with
+ * the integrated circuitry via any command bus after this call.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int drm_panel_disable(struct drm_panel *panel)
+{
+ int ret;
+
+ if (!panel)
+ return -EINVAL;
+
+ ret = backlight_disable(panel->backlight);
+ if (ret < 0)
+ DRM_DEV_INFO(panel->dev, "failed to disable backlight: %d\n",
+ ret);
+
+ if (panel->funcs && panel->funcs->disable)
+ return panel->funcs->disable(panel);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_panel_disable);
+
+/**
+ * drm_panel_get_modes - probe the available display modes of a panel
+ * @panel: DRM panel
+ * @connector: DRM connector
+ *
+ * The modes probed from the panel are automatically added to the connector
+ * that the panel is attached to.
+ *
+ * Return: The number of modes available from the panel on success or a
+ * negative error code on failure.
+ */
+int drm_panel_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ if (!panel)
+ return -EINVAL;
+
+ if (panel->funcs && panel->funcs->get_modes)
+ return panel->funcs->get_modes(panel, connector);
+
+ return -EOPNOTSUPP;
+}
+EXPORT_SYMBOL(drm_panel_get_modes);
#ifdef CONFIG_OF
/**
@@ -174,6 +302,45 @@ struct drm_panel *of_drm_find_panel(const struct device_node *np)
EXPORT_SYMBOL(of_drm_find_panel);
#endif
+#if IS_REACHABLE(CONFIG_BACKLIGHT_CLASS_DEVICE)
+/**
+ * drm_panel_of_backlight - use backlight device node for backlight
+ * @panel: DRM panel
+ *
+ * Use this function to enable backlight handling if your panel
+ * uses device tree and has a backlight phandle.
+ *
+ * When the panel is enabled backlight will be enabled after a
+ * successful call to &drm_panel_funcs.enable()
+ *
+ * When the panel is disabled backlight will be disabled before the
+ * call to &drm_panel_funcs.disable().
+ *
+ * A typical implementation for a panel driver supporting device tree
+ * will call this function at probe time. Backlight will then be handled
+ * transparently without requiring any intervention from the driver.
+ * drm_panel_of_backlight() must be called after the call to drm_panel_init().
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int drm_panel_of_backlight(struct drm_panel *panel)
+{
+ struct backlight_device *backlight;
+
+ if (!panel || !panel->dev)
+ return -EINVAL;
+
+ backlight = devm_of_find_backlight(panel->dev);
+
+ if (IS_ERR(backlight))
+ return PTR_ERR(backlight);
+
+ panel->backlight = backlight;
+ return 0;
+}
+EXPORT_SYMBOL(drm_panel_of_backlight);
+#endif
+
MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
MODULE_DESCRIPTION("DRM panel infrastructure");
MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index a86a3ab2771c..f2e43d341980 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -125,8 +125,6 @@ void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
EXPORT_SYMBOL(drm_pci_free);
-#ifdef CONFIG_PCI
-
static int drm_get_pci_domain(struct drm_device *dev)
{
#ifndef __alpha__
@@ -284,6 +282,8 @@ err_free:
}
EXPORT_SYMBOL(drm_get_pci_dev);
+#ifdef CONFIG_DRM_LEGACY
+
/**
* drm_legacy_pci_init - shadow-attach a legacy DRM PCI driver
* @driver: DRM device driver
@@ -331,17 +331,6 @@ int drm_legacy_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
}
EXPORT_SYMBOL(drm_legacy_pci_init);
-#else
-
-void drm_pci_agp_destroy(struct drm_device *dev) {}
-
-int drm_irq_by_busid(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- return -EINVAL;
-}
-#endif
-
/**
* drm_legacy_pci_exit - unregister shadow-attach legacy DRM driver
* @driver: DRM device driver
@@ -367,3 +356,5 @@ void drm_legacy_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver)
DRM_INFO("Module unloaded\n");
}
EXPORT_SYMBOL(drm_legacy_pci_exit);
+
+#endif
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index d0c01318076b..86d9b0e45c8c 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -30,6 +30,7 @@
#include <linux/dma-buf.h>
#include <linux/rbtree.h>
+#include <drm/drm.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_framebuffer.h>
@@ -38,47 +39,52 @@
#include "drm_internal.h"
-/*
- * DMA-BUF/GEM Object references and lifetime overview:
- *
- * On the export the dma_buf holds a reference to the exporting GEM
- * object. It takes this reference in handle_to_fd_ioctl, when it
- * first calls .prime_export and stores the exporting GEM object in
- * the dma_buf priv. This reference needs to be released when the
- * final reference to the &dma_buf itself is dropped and its
- * &dma_buf_ops.release function is called. For GEM-based drivers,
- * the dma_buf should be exported using drm_gem_dmabuf_export() and
- * then released by drm_gem_dmabuf_release().
- *
- * On the import the importing GEM object holds a reference to the
- * dma_buf (which in turn holds a ref to the exporting GEM object).
- * It takes that reference in the fd_to_handle ioctl.
- * It calls dma_buf_get, creates an attachment to it and stores the
- * attachment in the GEM object. When this attachment is destroyed
- * when the imported object is destroyed, we remove the attachment
- * and drop the reference to the dma_buf.
- *
- * When all the references to the &dma_buf are dropped, i.e. when
- * userspace has closed both handles to the imported GEM object (through the
- * FD_TO_HANDLE IOCTL) and closed the file descriptor of the exported
- * (through the HANDLE_TO_FD IOCTL) dma_buf, and all kernel-internal references
- * are also gone, then the dma_buf gets destroyed. This can also happen as a
- * part of the clean up procedure in the drm_release() function if userspace
- * fails to properly clean up. Note that both the kernel and userspace (by
- * keeeping the PRIME file descriptors open) can hold references onto a
- * &dma_buf.
- *
- * Thus the chain of references always flows in one direction
- * (avoiding loops): importing_gem -> dmabuf -> exporting_gem
- *
- * Self-importing: if userspace is using PRIME as a replacement for flink
- * then it will get a fd->handle request for a GEM object that it created.
- * Drivers should detect this situation and return back the gem object
- * from the dma-buf private. Prime will do this automatically for drivers that
- * use the drm_gem_prime_{import,export} helpers.
- *
- * GEM struct &dma_buf_ops symbols are now exported. They can be resued by
- * drivers which implement GEM interface.
+/**
+ * DOC: overview and lifetime rules
+ *
+ * Similar to GEM global names, PRIME file descriptors are also used to share
+ * buffer objects across processes. They offer additional security: as file
+ * descriptors must be explicitly sent over UNIX domain sockets to be shared
+ * between applications, they can't be guessed like the globally unique GEM
+ * names.
+ *
+ * Drivers that support the PRIME API implement the
+ * &drm_driver.prime_handle_to_fd and &drm_driver.prime_fd_to_handle operations.
+ * GEM based drivers must use drm_gem_prime_handle_to_fd() and
+ * drm_gem_prime_fd_to_handle() to implement these. For GEM based drivers the
+ * actual driver interfaces is provided through the &drm_gem_object_funcs.export
+ * and &drm_driver.gem_prime_import hooks.
+ *
+ * &dma_buf_ops implementations for GEM drivers are all individually exported
+ * for drivers which need to overwrite or reimplement some of them.
+ *
+ * Reference Counting for GEM Drivers
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * On the export the &dma_buf holds a reference to the exported buffer object,
+ * usually a &drm_gem_object. It takes this reference in the PRIME_HANDLE_TO_FD
+ * IOCTL, when it first calls &drm_gem_object_funcs.export
+ * and stores the exporting GEM object in the &dma_buf.priv field. This
+ * reference needs to be released when the final reference to the &dma_buf
+ * itself is dropped and its &dma_buf_ops.release function is called. For
+ * GEM-based drivers, the &dma_buf should be exported using
+ * drm_gem_dmabuf_export() and then released by drm_gem_dmabuf_release().
+ *
+ * Thus the chain of references always flows in one direction, avoiding loops:
+ * importing GEM object -> dma-buf -> exported GEM bo. A further complication
+ * are the lookup caches for import and export. These are required to guarantee
+ * that any given object will always have only one uniqe userspace handle. This
+ * is required to allow userspace to detect duplicated imports, since some GEM
+ * drivers do fail command submissions if a given buffer object is listed more
+ * than once. These import and export caches in &drm_prime_file_private only
+ * retain a weak reference, which is cleaned up when the corresponding object is
+ * released.
+ *
+ * Self-importing: If userspace is using PRIME as a replacement for flink then
+ * it will get a fd->handle request for a GEM object that it created. Drivers
+ * should detect this situation and return back the underlying object from the
+ * dma-buf private. For GEM based drivers this is handled in
+ * drm_gem_prime_import() already.
*/
struct drm_prime_member {
@@ -181,42 +187,6 @@ static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpri
return -ENOENT;
}
-/**
- * drm_gem_map_attach - dma_buf attach implementation for GEM
- * @dma_buf: buffer to attach device to
- * @attach: buffer attachment data
- *
- * Calls &drm_driver.gem_prime_pin for device specific handling. This can be
- * used as the &dma_buf_ops.attach callback.
- *
- * Returns 0 on success, negative error code on failure.
- */
-int drm_gem_map_attach(struct dma_buf *dma_buf,
- struct dma_buf_attachment *attach)
-{
- struct drm_gem_object *obj = dma_buf->priv;
-
- return drm_gem_pin(obj);
-}
-EXPORT_SYMBOL(drm_gem_map_attach);
-
-/**
- * drm_gem_map_detach - dma_buf detach implementation for GEM
- * @dma_buf: buffer to detach from
- * @attach: attachment to be detached
- *
- * Cleans up &dma_buf_attachment. This can be used as the &dma_buf_ops.detach
- * callback.
- */
-void drm_gem_map_detach(struct dma_buf *dma_buf,
- struct dma_buf_attachment *attach)
-{
- struct drm_gem_object *obj = dma_buf->priv;
-
- drm_gem_unpin(obj);
-}
-EXPORT_SYMBOL(drm_gem_map_detach);
-
void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
struct dma_buf *dma_buf)
{
@@ -242,67 +212,21 @@ void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpr
}
}
-/**
- * drm_gem_map_dma_buf - map_dma_buf implementation for GEM
- * @attach: attachment whose scatterlist is to be returned
- * @dir: direction of DMA transfer
- *
- * Calls &drm_driver.gem_prime_get_sg_table and then maps the scatterlist. This
- * can be used as the &dma_buf_ops.map_dma_buf callback.
- *
- * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
- * on error. May return -EINTR if it is interrupted by a signal.
- */
-
-struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
- enum dma_data_direction dir)
+void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
{
- struct drm_gem_object *obj = attach->dmabuf->priv;
- struct sg_table *sgt;
-
- if (WARN_ON(dir == DMA_NONE))
- return ERR_PTR(-EINVAL);
-
- if (obj->funcs)
- sgt = obj->funcs->get_sg_table(obj);
- else
- sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
-
- if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
- DMA_ATTR_SKIP_CPU_SYNC)) {
- sg_free_table(sgt);
- kfree(sgt);
- sgt = ERR_PTR(-ENOMEM);
- }
-
- return sgt;
+ mutex_init(&prime_fpriv->lock);
+ prime_fpriv->dmabufs = RB_ROOT;
+ prime_fpriv->handles = RB_ROOT;
}
-EXPORT_SYMBOL(drm_gem_map_dma_buf);
-/**
- * drm_gem_unmap_dma_buf - unmap_dma_buf implementation for GEM
- * @attach: attachment to unmap buffer from
- * @sgt: scatterlist info of the buffer to unmap
- * @dir: direction of DMA transfer
- *
- * This can be used as the &dma_buf_ops.unmap_dma_buf callback.
- */
-void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
- struct sg_table *sgt,
- enum dma_data_direction dir)
+void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
{
- if (!sgt)
- return;
-
- dma_unmap_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
- DMA_ATTR_SKIP_CPU_SYNC);
- sg_free_table(sgt);
- kfree(sgt);
+ /* by now drm_gem_release should've made sure the list is empty */
+ WARN_ON(!RB_EMPTY_ROOT(&prime_fpriv->dmabufs));
}
-EXPORT_SYMBOL(drm_gem_unmap_dma_buf);
/**
- * drm_gem_dmabuf_export - dma_buf export implementation for GEM
+ * drm_gem_dmabuf_export - &dma_buf export implementation for GEM
* @dev: parent device for the exported dmabuf
* @exp_info: the export information used by dma_buf_export()
*
@@ -316,6 +240,7 @@ EXPORT_SYMBOL(drm_gem_unmap_dma_buf);
struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
struct dma_buf_export_info *exp_info)
{
+ struct drm_gem_object *obj = exp_info->priv;
struct dma_buf *dma_buf;
dma_buf = dma_buf_export(exp_info);
@@ -323,18 +248,19 @@ struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
return dma_buf;
drm_dev_get(dev);
- drm_gem_object_get(exp_info->priv);
+ drm_gem_object_get(obj);
+ dma_buf->file->f_mapping = obj->dev->anon_inode->i_mapping;
return dma_buf;
}
EXPORT_SYMBOL(drm_gem_dmabuf_export);
/**
- * drm_gem_dmabuf_release - dma_buf release implementation for GEM
+ * drm_gem_dmabuf_release - &dma_buf release implementation for GEM
* @dma_buf: buffer to be released
*
* Generic release function for dma_bufs exported as PRIME buffers. GEM drivers
- * must use this in their dma_buf ops structure as the release callback.
+ * must use this in their &dma_buf_ops structure as the release callback.
* drm_gem_dmabuf_release() should be used in conjunction with
* drm_gem_dmabuf_export().
*/
@@ -351,128 +277,100 @@ void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
EXPORT_SYMBOL(drm_gem_dmabuf_release);
/**
- * drm_gem_dmabuf_vmap - dma_buf vmap implementation for GEM
- * @dma_buf: buffer to be mapped
+ * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
+ * @dev: dev to export the buffer from
+ * @file_priv: drm file-private structure
+ * @prime_fd: fd id of the dma-buf which should be imported
+ * @handle: pointer to storage for the handle of the imported buffer object
*
- * Sets up a kernel virtual mapping. This can be used as the &dma_buf_ops.vmap
- * callback.
+ * This is the PRIME import function which must be used mandatorily by GEM
+ * drivers to ensure correct lifetime management of the underlying GEM object.
+ * The actual importing of GEM object from the dma-buf is done through the
+ * &drm_driver.gem_prime_import driver callback.
*
- * Returns the kernel virtual address.
+ * Returns 0 on success or a negative error code on failure.
*/
-void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
+int drm_gem_prime_fd_to_handle(struct drm_device *dev,
+ struct drm_file *file_priv, int prime_fd,
+ uint32_t *handle)
{
- struct drm_gem_object *obj = dma_buf->priv;
- void *vaddr;
+ struct dma_buf *dma_buf;
+ struct drm_gem_object *obj;
+ int ret;
- vaddr = drm_gem_vmap(obj);
- if (IS_ERR(vaddr))
- vaddr = NULL;
+ dma_buf = dma_buf_get(prime_fd);
+ if (IS_ERR(dma_buf))
+ return PTR_ERR(dma_buf);
- return vaddr;
-}
-EXPORT_SYMBOL(drm_gem_dmabuf_vmap);
+ mutex_lock(&file_priv->prime.lock);
-/**
- * drm_gem_dmabuf_vunmap - dma_buf vunmap implementation for GEM
- * @dma_buf: buffer to be unmapped
- * @vaddr: the virtual address of the buffer
- *
- * Releases a kernel virtual mapping. This can be used as the
- * &dma_buf_ops.vunmap callback.
- */
-void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
-{
- struct drm_gem_object *obj = dma_buf->priv;
+ ret = drm_prime_lookup_buf_handle(&file_priv->prime,
+ dma_buf, handle);
+ if (ret == 0)
+ goto out_put;
- drm_gem_vunmap(obj, vaddr);
-}
-EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
+ /* never seen this one, need to import */
+ mutex_lock(&dev->object_name_lock);
+ if (dev->driver->gem_prime_import)
+ obj = dev->driver->gem_prime_import(dev, dma_buf);
+ else
+ obj = drm_gem_prime_import(dev, dma_buf);
+ if (IS_ERR(obj)) {
+ ret = PTR_ERR(obj);
+ goto out_unlock;
+ }
-/**
- * drm_gem_dmabuf_mmap - dma_buf mmap implementation for GEM
- * @dma_buf: buffer to be mapped
- * @vma: virtual address range
- *
- * Provides memory mapping for the buffer. This can be used as the
- * &dma_buf_ops.mmap callback.
- *
- * Returns 0 on success or a negative error code on failure.
- */
-int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
-{
- struct drm_gem_object *obj = dma_buf->priv;
- struct drm_device *dev = obj->dev;
+ if (obj->dma_buf) {
+ WARN_ON(obj->dma_buf != dma_buf);
+ } else {
+ obj->dma_buf = dma_buf;
+ get_dma_buf(dma_buf);
+ }
- if (!dev->driver->gem_prime_mmap)
- return -ENOSYS;
+ /* _handle_create_tail unconditionally unlocks dev->object_name_lock. */
+ ret = drm_gem_handle_create_tail(file_priv, obj, handle);
+ drm_gem_object_put_unlocked(obj);
+ if (ret)
+ goto out_put;
- return dev->driver->gem_prime_mmap(obj, vma);
-}
-EXPORT_SYMBOL(drm_gem_dmabuf_mmap);
+ ret = drm_prime_add_buf_handle(&file_priv->prime,
+ dma_buf, *handle);
+ mutex_unlock(&file_priv->prime.lock);
+ if (ret)
+ goto fail;
-static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
- .cache_sgt_mapping = true,
- .attach = drm_gem_map_attach,
- .detach = drm_gem_map_detach,
- .map_dma_buf = drm_gem_map_dma_buf,
- .unmap_dma_buf = drm_gem_unmap_dma_buf,
- .release = drm_gem_dmabuf_release,
- .mmap = drm_gem_dmabuf_mmap,
- .vmap = drm_gem_dmabuf_vmap,
- .vunmap = drm_gem_dmabuf_vunmap,
-};
+ dma_buf_put(dma_buf);
-/**
- * DOC: PRIME Helpers
- *
- * Drivers can implement @gem_prime_export and @gem_prime_import in terms of
- * simpler APIs by using the helper functions @drm_gem_prime_export and
- * @drm_gem_prime_import. These functions implement dma-buf support in terms of
- * six lower-level driver callbacks:
- *
- * Export callbacks:
- *
- * * @gem_prime_pin (optional): prepare a GEM object for exporting
- * * @gem_prime_get_sg_table: provide a scatter/gather table of pinned pages
- * * @gem_prime_vmap: vmap a buffer exported by your driver
- * * @gem_prime_vunmap: vunmap a buffer exported by your driver
- * * @gem_prime_mmap (optional): mmap a buffer exported by your driver
- *
- * Import callback:
- *
- * * @gem_prime_import_sg_table (import): produce a GEM object from another
- * driver's scatter/gather table
- */
+ return 0;
-/**
- * drm_gem_prime_export - helper library implementation of the export callback
- * @dev: drm_device to export from
- * @obj: GEM object to export
- * @flags: flags like DRM_CLOEXEC and DRM_RDWR
- *
- * This is the implementation of the gem_prime_export functions for GEM drivers
- * using the PRIME helpers.
- */
-struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *obj,
- int flags)
+fail:
+ /* hmm, if driver attached, we are relying on the free-object path
+ * to detach.. which seems ok..
+ */
+ drm_gem_handle_delete(file_priv, *handle);
+ dma_buf_put(dma_buf);
+ return ret;
+
+out_unlock:
+ mutex_unlock(&dev->object_name_lock);
+out_put:
+ mutex_unlock(&file_priv->prime.lock);
+ dma_buf_put(dma_buf);
+ return ret;
+}
+EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
+
+int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- struct dma_buf_export_info exp_info = {
- .exp_name = KBUILD_MODNAME, /* white lie for debug */
- .owner = dev->driver->fops->owner,
- .ops = &drm_gem_prime_dmabuf_ops,
- .size = obj->size,
- .flags = flags,
- .priv = obj,
- .resv = obj->resv,
- };
+ struct drm_prime_handle *args = data;
- if (dev->driver->gem_prime_res_obj)
- exp_info.resv = dev->driver->gem_prime_res_obj(obj);
+ if (!dev->driver->prime_fd_to_handle)
+ return -ENOSYS;
- return drm_gem_dmabuf_export(dev, &exp_info);
+ return dev->driver->prime_fd_to_handle(dev, file_priv,
+ args->fd, &args->handle);
}
-EXPORT_SYMBOL(drm_gem_prime_export);
static struct dma_buf *export_and_register_object(struct drm_device *dev,
struct drm_gem_object *obj,
@@ -489,9 +387,9 @@ static struct dma_buf *export_and_register_object(struct drm_device *dev,
if (obj->funcs && obj->funcs->export)
dmabuf = obj->funcs->export(obj, flags);
else if (dev->driver->gem_prime_export)
- dmabuf = dev->driver->gem_prime_export(dev, obj, flags);
+ dmabuf = dev->driver->gem_prime_export(obj, flags);
else
- dmabuf = drm_gem_prime_export(dev, obj, flags);
+ dmabuf = drm_gem_prime_export(obj, flags);
if (IS_ERR(dmabuf)) {
/* normally the created dma-buf takes ownership of the ref,
* but if that fails then drop the ref
@@ -521,7 +419,7 @@ static struct dma_buf *export_and_register_object(struct drm_device *dev,
* This is the PRIME export function which must be used mandatorily by GEM
* drivers to ensure correct lifetime management of the underlying GEM object.
* The actual exporting from GEM object to a dma-buf is done through the
- * gem_prime_export driver callback.
+ * &drm_driver.gem_prime_export driver callback.
*/
int drm_gem_prime_handle_to_fd(struct drm_device *dev,
struct drm_file *file_priv, uint32_t handle,
@@ -610,6 +508,195 @@ out_unlock:
}
EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
+int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_prime_handle *args = data;
+
+ if (!dev->driver->prime_handle_to_fd)
+ return -ENOSYS;
+
+ /* check flags are valid */
+ if (args->flags & ~(DRM_CLOEXEC | DRM_RDWR))
+ return -EINVAL;
+
+ return dev->driver->prime_handle_to_fd(dev, file_priv,
+ args->handle, args->flags, &args->fd);
+}
+
+/**
+ * DOC: PRIME Helpers
+ *
+ * Drivers can implement &drm_gem_object_funcs.export and
+ * &drm_driver.gem_prime_import in terms of simpler APIs by using the helper
+ * functions drm_gem_prime_export() and drm_gem_prime_import(). These functions
+ * implement dma-buf support in terms of some lower-level helpers, which are
+ * again exported for drivers to use individually:
+ *
+ * Exporting buffers
+ * ~~~~~~~~~~~~~~~~~
+ *
+ * Optional pinning of buffers is handled at dma-buf attach and detach time in
+ * drm_gem_map_attach() and drm_gem_map_detach(). Backing storage itself is
+ * handled by drm_gem_map_dma_buf() and drm_gem_unmap_dma_buf(), which relies on
+ * &drm_gem_object_funcs.get_sg_table.
+ *
+ * For kernel-internal access there's drm_gem_dmabuf_vmap() and
+ * drm_gem_dmabuf_vunmap(). Userspace mmap support is provided by
+ * drm_gem_dmabuf_mmap().
+ *
+ * Note that these export helpers can only be used if the underlying backing
+ * storage is fully coherent and either permanently pinned, or it is safe to pin
+ * it indefinitely.
+ *
+ * FIXME: The underlying helper functions are named rather inconsistently.
+ *
+ * Exporting buffers
+ * ~~~~~~~~~~~~~~~~~
+ *
+ * Importing dma-bufs using drm_gem_prime_import() relies on
+ * &drm_driver.gem_prime_import_sg_table.
+ *
+ * Note that similarly to the export helpers this permanently pins the
+ * underlying backing storage. Which is ok for scanout, but is not the best
+ * option for sharing lots of buffers for rendering.
+ */
+
+/**
+ * drm_gem_map_attach - dma_buf attach implementation for GEM
+ * @dma_buf: buffer to attach device to
+ * @attach: buffer attachment data
+ *
+ * Calls &drm_gem_object_funcs.pin for device specific handling. This can be
+ * used as the &dma_buf_ops.attach callback. Must be used together with
+ * drm_gem_map_detach().
+ *
+ * Returns 0 on success, negative error code on failure.
+ */
+int drm_gem_map_attach(struct dma_buf *dma_buf,
+ struct dma_buf_attachment *attach)
+{
+ struct drm_gem_object *obj = dma_buf->priv;
+
+ return drm_gem_pin(obj);
+}
+EXPORT_SYMBOL(drm_gem_map_attach);
+
+/**
+ * drm_gem_map_detach - dma_buf detach implementation for GEM
+ * @dma_buf: buffer to detach from
+ * @attach: attachment to be detached
+ *
+ * Calls &drm_gem_object_funcs.pin for device specific handling. Cleans up
+ * &dma_buf_attachment from drm_gem_map_attach(). This can be used as the
+ * &dma_buf_ops.detach callback.
+ */
+void drm_gem_map_detach(struct dma_buf *dma_buf,
+ struct dma_buf_attachment *attach)
+{
+ struct drm_gem_object *obj = dma_buf->priv;
+
+ drm_gem_unpin(obj);
+}
+EXPORT_SYMBOL(drm_gem_map_detach);
+
+/**
+ * drm_gem_map_dma_buf - map_dma_buf implementation for GEM
+ * @attach: attachment whose scatterlist is to be returned
+ * @dir: direction of DMA transfer
+ *
+ * Calls &drm_gem_object_funcs.get_sg_table and then maps the scatterlist. This
+ * can be used as the &dma_buf_ops.map_dma_buf callback. Should be used together
+ * with drm_gem_unmap_dma_buf().
+ *
+ * Returns:sg_table containing the scatterlist to be returned; returns ERR_PTR
+ * on error. May return -EINTR if it is interrupted by a signal.
+ */
+struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
+ enum dma_data_direction dir)
+{
+ struct drm_gem_object *obj = attach->dmabuf->priv;
+ struct sg_table *sgt;
+
+ if (WARN_ON(dir == DMA_NONE))
+ return ERR_PTR(-EINVAL);
+
+ if (obj->funcs)
+ sgt = obj->funcs->get_sg_table(obj);
+ else
+ sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
+
+ if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
+ DMA_ATTR_SKIP_CPU_SYNC)) {
+ sg_free_table(sgt);
+ kfree(sgt);
+ sgt = ERR_PTR(-ENOMEM);
+ }
+
+ return sgt;
+}
+EXPORT_SYMBOL(drm_gem_map_dma_buf);
+
+/**
+ * drm_gem_unmap_dma_buf - unmap_dma_buf implementation for GEM
+ * @attach: attachment to unmap buffer from
+ * @sgt: scatterlist info of the buffer to unmap
+ * @dir: direction of DMA transfer
+ *
+ * This can be used as the &dma_buf_ops.unmap_dma_buf callback.
+ */
+void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
+ struct sg_table *sgt,
+ enum dma_data_direction dir)
+{
+ if (!sgt)
+ return;
+
+ dma_unmap_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ sg_free_table(sgt);
+ kfree(sgt);
+}
+EXPORT_SYMBOL(drm_gem_unmap_dma_buf);
+
+/**
+ * drm_gem_dmabuf_vmap - dma_buf vmap implementation for GEM
+ * @dma_buf: buffer to be mapped
+ *
+ * Sets up a kernel virtual mapping. This can be used as the &dma_buf_ops.vmap
+ * callback. Calls into &drm_gem_object_funcs.vmap for device specific handling.
+ *
+ * Returns the kernel virtual address or NULL on failure.
+ */
+void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
+{
+ struct drm_gem_object *obj = dma_buf->priv;
+ void *vaddr;
+
+ vaddr = drm_gem_vmap(obj);
+ if (IS_ERR(vaddr))
+ vaddr = NULL;
+
+ return vaddr;
+}
+EXPORT_SYMBOL(drm_gem_dmabuf_vmap);
+
+/**
+ * drm_gem_dmabuf_vunmap - dma_buf vunmap implementation for GEM
+ * @dma_buf: buffer to be unmapped
+ * @vaddr: the virtual address of the buffer
+ *
+ * Releases a kernel virtual mapping. This can be used as the
+ * &dma_buf_ops.vunmap callback. Calls into &drm_gem_object_funcs.vunmap for device specific handling.
+ */
+void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
+{
+ struct drm_gem_object *obj = dma_buf->priv;
+
+ drm_gem_vunmap(obj, vaddr);
+}
+EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
+
/**
* drm_gem_prime_mmap - PRIME mmap function for GEM drivers
* @obj: GEM object
@@ -628,6 +715,18 @@ int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
struct file *fil;
int ret;
+ /* Add the fake offset */
+ vma->vm_pgoff += drm_vma_node_start(&obj->vma_node);
+
+ if (obj->funcs && obj->funcs->mmap) {
+ ret = obj->funcs->mmap(obj, vma);
+ if (ret)
+ return ret;
+ vma->vm_private_data = obj;
+ drm_gem_object_get(obj);
+ return 0;
+ }
+
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
fil = kzalloc(sizeof(*fil), GFP_KERNEL);
if (!priv || !fil) {
@@ -643,8 +742,6 @@ int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
if (ret)
goto out;
- vma->vm_pgoff += drm_vma_node_start(&obj->vma_node);
-
ret = obj->dev->driver->fops->mmap(fil, vma);
drm_vma_node_revoke(&obj->vma_node, priv);
@@ -657,14 +754,117 @@ out:
EXPORT_SYMBOL(drm_gem_prime_mmap);
/**
+ * drm_gem_dmabuf_mmap - dma_buf mmap implementation for GEM
+ * @dma_buf: buffer to be mapped
+ * @vma: virtual address range
+ *
+ * Provides memory mapping for the buffer. This can be used as the
+ * &dma_buf_ops.mmap callback. It just forwards to &drm_driver.gem_prime_mmap,
+ * which should be set to drm_gem_prime_mmap().
+ *
+ * FIXME: There's really no point to this wrapper, drivers which need anything
+ * else but drm_gem_prime_mmap can roll their own &dma_buf_ops.mmap callback.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
+{
+ struct drm_gem_object *obj = dma_buf->priv;
+ struct drm_device *dev = obj->dev;
+
+ if (!dev->driver->gem_prime_mmap)
+ return -ENOSYS;
+
+ return dev->driver->gem_prime_mmap(obj, vma);
+}
+EXPORT_SYMBOL(drm_gem_dmabuf_mmap);
+
+static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
+ .cache_sgt_mapping = true,
+ .attach = drm_gem_map_attach,
+ .detach = drm_gem_map_detach,
+ .map_dma_buf = drm_gem_map_dma_buf,
+ .unmap_dma_buf = drm_gem_unmap_dma_buf,
+ .release = drm_gem_dmabuf_release,
+ .mmap = drm_gem_dmabuf_mmap,
+ .vmap = drm_gem_dmabuf_vmap,
+ .vunmap = drm_gem_dmabuf_vunmap,
+};
+
+/**
+ * drm_prime_pages_to_sg - converts a page array into an sg list
+ * @pages: pointer to the array of page pointers to convert
+ * @nr_pages: length of the page vector
+ *
+ * This helper creates an sg table object from a set of pages
+ * the driver is responsible for mapping the pages into the
+ * importers address space for use with dma_buf itself.
+ *
+ * This is useful for implementing &drm_gem_object_funcs.get_sg_table.
+ */
+struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages)
+{
+ struct sg_table *sg = NULL;
+ int ret;
+
+ sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!sg) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
+ nr_pages << PAGE_SHIFT, GFP_KERNEL);
+ if (ret)
+ goto out;
+
+ return sg;
+out:
+ kfree(sg);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(drm_prime_pages_to_sg);
+
+/**
+ * drm_gem_prime_export - helper library implementation of the export callback
+ * @obj: GEM object to export
+ * @flags: flags like DRM_CLOEXEC and DRM_RDWR
+ *
+ * This is the implementation of the &drm_gem_object_funcs.export functions for GEM drivers
+ * using the PRIME helpers. It is used as the default in
+ * drm_gem_prime_handle_to_fd().
+ */
+struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj,
+ int flags)
+{
+ struct drm_device *dev = obj->dev;
+ struct dma_buf_export_info exp_info = {
+ .exp_name = KBUILD_MODNAME, /* white lie for debug */
+ .owner = dev->driver->fops->owner,
+ .ops = &drm_gem_prime_dmabuf_ops,
+ .size = obj->size,
+ .flags = flags,
+ .priv = obj,
+ .resv = obj->resv,
+ };
+
+ return drm_gem_dmabuf_export(dev, &exp_info);
+}
+EXPORT_SYMBOL(drm_gem_prime_export);
+
+/**
* drm_gem_prime_import_dev - core implementation of the import callback
* @dev: drm_device to import into
* @dma_buf: dma-buf object to import
* @attach_dev: struct device to dma_buf attach
*
- * This is the core of drm_gem_prime_import. It's designed to be called by
- * drivers who want to use a different device structure than dev->dev for
- * attaching via dma_buf.
+ * This is the core of drm_gem_prime_import(). It's designed to be called by
+ * drivers who want to use a different device structure than &drm_device.dev for
+ * attaching via dma_buf. This function calls
+ * &drm_driver.gem_prime_import_sg_table internally.
+ *
+ * Drivers must arrange to call drm_prime_gem_destroy() from their
+ * &drm_gem_object_funcs.free hook when using this function.
*/
struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev,
struct dma_buf *dma_buf,
@@ -709,6 +909,7 @@ struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev,
}
obj->import_attach = attach;
+ obj->resv = dma_buf->resv;
return obj;
@@ -728,7 +929,12 @@ EXPORT_SYMBOL(drm_gem_prime_import_dev);
* @dma_buf: dma-buf object to import
*
* This is the implementation of the gem_prime_import functions for GEM drivers
- * using the PRIME helpers.
+ * using the PRIME helpers. Drivers can use this as their
+ * &drm_driver.gem_prime_import implementation. It is used as the default
+ * implementation in drm_gem_prime_fd_to_handle().
+ *
+ * Drivers must arrange to call drm_prime_gem_destroy() from their
+ * &drm_gem_object_funcs.free hook when using this function.
*/
struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf)
@@ -738,154 +944,6 @@ struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
EXPORT_SYMBOL(drm_gem_prime_import);
/**
- * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
- * @dev: dev to export the buffer from
- * @file_priv: drm file-private structure
- * @prime_fd: fd id of the dma-buf which should be imported
- * @handle: pointer to storage for the handle of the imported buffer object
- *
- * This is the PRIME import function which must be used mandatorily by GEM
- * drivers to ensure correct lifetime management of the underlying GEM object.
- * The actual importing of GEM object from the dma-buf is done through the
- * gem_import_export driver callback.
- */
-int drm_gem_prime_fd_to_handle(struct drm_device *dev,
- struct drm_file *file_priv, int prime_fd,
- uint32_t *handle)
-{
- struct dma_buf *dma_buf;
- struct drm_gem_object *obj;
- int ret;
-
- dma_buf = dma_buf_get(prime_fd);
- if (IS_ERR(dma_buf))
- return PTR_ERR(dma_buf);
-
- mutex_lock(&file_priv->prime.lock);
-
- ret = drm_prime_lookup_buf_handle(&file_priv->prime,
- dma_buf, handle);
- if (ret == 0)
- goto out_put;
-
- /* never seen this one, need to import */
- mutex_lock(&dev->object_name_lock);
- if (dev->driver->gem_prime_import)
- obj = dev->driver->gem_prime_import(dev, dma_buf);
- else
- obj = drm_gem_prime_import(dev, dma_buf);
- if (IS_ERR(obj)) {
- ret = PTR_ERR(obj);
- goto out_unlock;
- }
-
- if (obj->dma_buf) {
- WARN_ON(obj->dma_buf != dma_buf);
- } else {
- obj->dma_buf = dma_buf;
- get_dma_buf(dma_buf);
- }
-
- /* _handle_create_tail unconditionally unlocks dev->object_name_lock. */
- ret = drm_gem_handle_create_tail(file_priv, obj, handle);
- drm_gem_object_put_unlocked(obj);
- if (ret)
- goto out_put;
-
- ret = drm_prime_add_buf_handle(&file_priv->prime,
- dma_buf, *handle);
- mutex_unlock(&file_priv->prime.lock);
- if (ret)
- goto fail;
-
- dma_buf_put(dma_buf);
-
- return 0;
-
-fail:
- /* hmm, if driver attached, we are relying on the free-object path
- * to detach.. which seems ok..
- */
- drm_gem_handle_delete(file_priv, *handle);
- dma_buf_put(dma_buf);
- return ret;
-
-out_unlock:
- mutex_unlock(&dev->object_name_lock);
-out_put:
- mutex_unlock(&file_priv->prime.lock);
- dma_buf_put(dma_buf);
- return ret;
-}
-EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
-
-int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_prime_handle *args = data;
-
- if (!drm_core_check_feature(dev, DRIVER_PRIME))
- return -EOPNOTSUPP;
-
- if (!dev->driver->prime_handle_to_fd)
- return -ENOSYS;
-
- /* check flags are valid */
- if (args->flags & ~(DRM_CLOEXEC | DRM_RDWR))
- return -EINVAL;
-
- return dev->driver->prime_handle_to_fd(dev, file_priv,
- args->handle, args->flags, &args->fd);
-}
-
-int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_prime_handle *args = data;
-
- if (!drm_core_check_feature(dev, DRIVER_PRIME))
- return -EOPNOTSUPP;
-
- if (!dev->driver->prime_fd_to_handle)
- return -ENOSYS;
-
- return dev->driver->prime_fd_to_handle(dev, file_priv,
- args->fd, &args->handle);
-}
-
-/**
- * drm_prime_pages_to_sg - converts a page array into an sg list
- * @pages: pointer to the array of page pointers to convert
- * @nr_pages: length of the page vector
- *
- * This helper creates an sg table object from a set of pages
- * the driver is responsible for mapping the pages into the
- * importers address space for use with dma_buf itself.
- */
-struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages)
-{
- struct sg_table *sg = NULL;
- int ret;
-
- sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
- if (!sg) {
- ret = -ENOMEM;
- goto out;
- }
-
- ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
- nr_pages << PAGE_SHIFT, GFP_KERNEL);
- if (ret)
- goto out;
-
- return sg;
-out:
- kfree(sg);
- return ERR_PTR(ret);
-}
-EXPORT_SYMBOL(drm_prime_pages_to_sg);
-
-/**
* drm_prime_sg_to_page_addr_arrays - convert an sg table into a page array
* @sgt: scatter-gather table to convert
* @pages: optional array of page pointers to store the page array in
@@ -894,6 +952,9 @@ EXPORT_SYMBOL(drm_prime_pages_to_sg);
*
* Exports an sg table into an array of pages and addresses. This is currently
* required by the TTM driver in order to do correct fault handling.
+ *
+ * Drivers can use this in their &drm_driver.gem_prime_import_sg_table
+ * implementation.
*/
int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
dma_addr_t *addrs, int max_entries)
@@ -934,7 +995,7 @@ EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
* @sg: the sg-table which was pinned at import time
*
* This is the cleanup functions which GEM drivers need to call when they use
- * @drm_gem_prime_import to import dma-bufs.
+ * drm_gem_prime_import() or drm_gem_prime_import_dev() to import dma-bufs.
*/
void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
{
@@ -949,16 +1010,3 @@ void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
dma_buf_put(dma_buf);
}
EXPORT_SYMBOL(drm_prime_gem_destroy);
-
-void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
-{
- mutex_init(&prime_fpriv->lock);
- prime_fpriv->dmabufs = RB_ROOT;
- prime_fpriv->handles = RB_ROOT;
-}
-
-void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
-{
- /* by now drm_gem_release should've made sure the list is empty */
- WARN_ON(!RB_EMPTY_ROOT(&prime_fpriv->dmabufs));
-}
diff --git a/drivers/gpu/drm/drm_print.c b/drivers/gpu/drm/drm_print.c
index a17c8a14dba4..111b932cf2a9 100644
--- a/drivers/gpu/drm/drm_print.c
+++ b/drivers/gpu/drm/drm_print.c
@@ -28,6 +28,7 @@
#include <stdarg.h>
#include <linux/io.h>
+#include <linux/moduleparam.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
@@ -35,6 +36,24 @@
#include <drm/drm_drv.h>
#include <drm/drm_print.h>
+/*
+ * __drm_debug: Enable debug output.
+ * Bitmask of DRM_UT_x. See include/drm/drm_print.h for details.
+ */
+unsigned int __drm_debug;
+EXPORT_SYMBOL(__drm_debug);
+
+MODULE_PARM_DESC(debug, "Enable debug output, where each bit enables a debug category.\n"
+"\t\tBit 0 (0x01) will enable CORE messages (drm core code)\n"
+"\t\tBit 1 (0x02) will enable DRIVER messages (drm controller code)\n"
+"\t\tBit 2 (0x04) will enable KMS messages (modesetting code)\n"
+"\t\tBit 3 (0x08) will enable PRIME messages (prime code)\n"
+"\t\tBit 4 (0x10) will enable ATOMIC messages (atomic code)\n"
+"\t\tBit 5 (0x20) will enable VBL messages (vblank code)\n"
+"\t\tBit 7 (0x80) will enable LEASE messages (leasing code)\n"
+"\t\tBit 8 (0x100) will enable DP messages (displayport code)");
+module_param_named(debug, __drm_debug, int, 0600);
+
void __drm_puts_coredump(struct drm_printer *p, const char *str)
{
struct drm_print_iterator *iterator = p->arg;
@@ -147,6 +166,12 @@ void __drm_printfn_debug(struct drm_printer *p, struct va_format *vaf)
}
EXPORT_SYMBOL(__drm_printfn_debug);
+void __drm_printfn_err(struct drm_printer *p, struct va_format *vaf)
+{
+ pr_err("*ERROR* %s %pV", p->prefix, vaf);
+}
+EXPORT_SYMBOL(__drm_printfn_err);
+
/**
* drm_puts - print a const string to a &drm_printer stream
* @p: the &drm printer
@@ -179,6 +204,37 @@ void drm_printf(struct drm_printer *p, const char *f, ...)
}
EXPORT_SYMBOL(drm_printf);
+/**
+ * drm_print_bits - print bits to a &drm_printer stream
+ *
+ * Print bits (in flag fields for example) in human readable form.
+ *
+ * @p: the &drm_printer
+ * @value: field value.
+ * @bits: Array with bit names.
+ * @nbits: Size of bit names array.
+ */
+void drm_print_bits(struct drm_printer *p, unsigned long value,
+ const char * const bits[], unsigned int nbits)
+{
+ bool first = true;
+ unsigned int i;
+
+ if (WARN_ON_ONCE(nbits > BITS_PER_TYPE(value)))
+ nbits = BITS_PER_TYPE(value);
+
+ for_each_set_bit(i, &value, nbits) {
+ if (WARN_ON_ONCE(!bits[i]))
+ continue;
+ drm_printf(p, "%s%s", first ? "" : ",",
+ bits[i]);
+ first = false;
+ }
+ if (first)
+ drm_printf(p, "(none)");
+}
+EXPORT_SYMBOL(drm_print_bits);
+
void drm_dev_printk(const struct device *dev, const char *level,
const char *format, ...)
{
@@ -200,13 +256,13 @@ void drm_dev_printk(const struct device *dev, const char *level,
}
EXPORT_SYMBOL(drm_dev_printk);
-void drm_dev_dbg(const struct device *dev, unsigned int category,
+void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
const char *format, ...)
{
struct va_format vaf;
va_list args;
- if (!(drm_debug & category))
+ if (!drm_debug_enabled(category))
return;
va_start(args, format);
@@ -224,12 +280,12 @@ void drm_dev_dbg(const struct device *dev, unsigned int category,
}
EXPORT_SYMBOL(drm_dev_dbg);
-void drm_dbg(unsigned int category, const char *format, ...)
+void __drm_dbg(enum drm_debug_category category, const char *format, ...)
{
struct va_format vaf;
va_list args;
- if (!(drm_debug & category))
+ if (!drm_debug_enabled(category))
return;
va_start(args, format);
@@ -241,9 +297,9 @@ void drm_dbg(unsigned int category, const char *format, ...)
va_end(args);
}
-EXPORT_SYMBOL(drm_dbg);
+EXPORT_SYMBOL(__drm_dbg);
-void drm_err(const char *format, ...)
+void __drm_err(const char *format, ...)
{
struct va_format vaf;
va_list args;
@@ -257,7 +313,7 @@ void drm_err(const char *format, ...)
va_end(args);
}
-EXPORT_SYMBOL(drm_err);
+EXPORT_SYMBOL(__drm_err);
/**
* drm_print_regset32 - print the contents of registers to a
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index ef2c468205a2..576b4b7dcd89 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -32,6 +32,7 @@
#include <linux/export.h>
#include <linux/moduleparam.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_client.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
@@ -92,7 +93,6 @@ drm_mode_validate_pipeline(struct drm_display_mode *mode,
struct drm_device *dev = connector->dev;
enum drm_mode_status ret = MODE_OK;
struct drm_encoder *encoder;
- int i;
/* Step 1: Validate against connector */
ret = drm_connector_mode_valid(connector, mode);
@@ -100,7 +100,8 @@ drm_mode_validate_pipeline(struct drm_display_mode *mode,
return ret;
/* Step 2: Validate against encoders and crtcs */
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
+ struct drm_bridge *bridge;
struct drm_crtc *crtc;
ret = drm_encoder_mode_valid(encoder, mode);
@@ -112,7 +113,8 @@ drm_mode_validate_pipeline(struct drm_display_mode *mode,
continue;
}
- ret = drm_bridge_mode_valid(encoder->bridge, mode);
+ bridge = drm_bridge_chain_get_first_bridge(encoder);
+ ret = drm_bridge_chain_mode_valid(bridge, mode);
if (ret != MODE_OK) {
/* There is also no point in continuing for crtc check
* here. */
diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c
index 892ce636ef72..6ee04803c362 100644
--- a/drivers/gpu/drm/drm_property.c
+++ b/drivers/gpu/drm/drm_property.c
@@ -561,7 +561,7 @@ drm_property_create_blob(struct drm_device *dev, size_t length,
struct drm_property_blob *blob;
int ret;
- if (!length || length > ULONG_MAX - sizeof(struct drm_property_blob))
+ if (!length || length > INT_MAX - sizeof(struct drm_property_blob))
return ERR_PTR(-EINVAL);
blob = kvzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL);
diff --git a/drivers/gpu/drm/drm_rect.c b/drivers/gpu/drm/drm_rect.c
index b8363aaa9032..0460e874896e 100644
--- a/drivers/gpu/drm/drm_rect.c
+++ b/drivers/gpu/drm/drm_rect.c
@@ -52,9 +52,17 @@ bool drm_rect_intersect(struct drm_rect *r1, const struct drm_rect *r2)
}
EXPORT_SYMBOL(drm_rect_intersect);
-static u32 clip_scaled(u32 src, u32 dst, u32 clip)
+static u32 clip_scaled(int src, int dst, int *clip)
{
- u64 tmp = mul_u32_u32(src, dst - clip);
+ u64 tmp;
+
+ if (dst == 0)
+ return 0;
+
+ /* Only clip what we have. Keeps the result bounded. */
+ *clip = min(*clip, dst);
+
+ tmp = mul_u32_u32(src, dst - *clip);
/*
* Round toward 1.0 when clipping so that we don't accidentally
@@ -73,11 +81,13 @@ static u32 clip_scaled(u32 src, u32 dst, u32 clip)
* @clip: clip rectangle
*
* Clip rectangle @dst by rectangle @clip. Clip rectangle @src by the
- * same amounts multiplied by @hscale and @vscale.
+ * the corresponding amounts, retaining the vertical and horizontal scaling
+ * factors from @src to @dst.
*
* RETURNS:
+ *
* %true if rectangle @dst is still visible after being clipped,
- * %false otherwise
+ * %false otherwise.
*/
bool drm_rect_clip_scaled(struct drm_rect *src, struct drm_rect *dst,
const struct drm_rect *clip)
@@ -87,34 +97,34 @@ bool drm_rect_clip_scaled(struct drm_rect *src, struct drm_rect *dst,
diff = clip->x1 - dst->x1;
if (diff > 0) {
u32 new_src_w = clip_scaled(drm_rect_width(src),
- drm_rect_width(dst), diff);
+ drm_rect_width(dst), &diff);
- src->x1 = clamp_t(int64_t, src->x2 - new_src_w, INT_MIN, INT_MAX);
- dst->x1 = clip->x1;
+ src->x1 = src->x2 - new_src_w;
+ dst->x1 += diff;
}
diff = clip->y1 - dst->y1;
if (diff > 0) {
u32 new_src_h = clip_scaled(drm_rect_height(src),
- drm_rect_height(dst), diff);
+ drm_rect_height(dst), &diff);
- src->y1 = clamp_t(int64_t, src->y2 - new_src_h, INT_MIN, INT_MAX);
- dst->y1 = clip->y1;
+ src->y1 = src->y2 - new_src_h;
+ dst->y1 += diff;
}
diff = dst->x2 - clip->x2;
if (diff > 0) {
u32 new_src_w = clip_scaled(drm_rect_width(src),
- drm_rect_width(dst), diff);
+ drm_rect_width(dst), &diff);
- src->x2 = clamp_t(int64_t, src->x1 + new_src_w, INT_MIN, INT_MAX);
- dst->x2 = clip->x2;
+ src->x2 = src->x1 + new_src_w;
+ dst->x2 -= diff;
}
diff = dst->y2 - clip->y2;
if (diff > 0) {
u32 new_src_h = clip_scaled(drm_rect_height(src),
- drm_rect_height(dst), diff);
+ drm_rect_height(dst), &diff);
- src->y2 = clamp_t(int64_t, src->y1 + new_src_h, INT_MIN, INT_MAX);
- dst->y2 = clip->y2;
+ src->y2 = src->y1 + new_src_h;
+ dst->y2 -= diff;
}
return drm_rect_visible(dst);
diff --git a/drivers/gpu/drm/drm_scatter.c b/drivers/gpu/drm/drm_scatter.c
index 2d7790f14b0c..d5c386154246 100644
--- a/drivers/gpu/drm/drm_scatter.c
+++ b/drivers/gpu/drm/drm_scatter.c
@@ -1,4 +1,4 @@
-/**
+/*
* \file drm_scatter.c
* IOCTLs to manage scatter/gather memory
*
diff --git a/drivers/gpu/drm/drm_self_refresh_helper.c b/drivers/gpu/drm/drm_self_refresh_helper.c
index 4b9424a8f1f1..dd33fec5aabd 100644
--- a/drivers/gpu/drm/drm_self_refresh_helper.c
+++ b/drivers/gpu/drm/drm_self_refresh_helper.c
@@ -5,6 +5,7 @@
* Authors:
* Sean Paul <seanpaul@chromium.org>
*/
+#include <linux/average.h>
#include <linux/bitops.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
@@ -50,11 +51,17 @@
* atomic_check when &drm_crtc_state.self_refresh_active is true.
*/
+#define SELF_REFRESH_AVG_SEED_MS 200
+
+DECLARE_EWMA(psr_time, 4, 4)
+
struct drm_self_refresh_data {
struct drm_crtc *crtc;
struct delayed_work entry_work;
- struct drm_atomic_state *save_state;
- unsigned int entry_delay_ms;
+
+ struct mutex avg_mutex;
+ struct ewma_psr_time entry_avg_ms;
+ struct ewma_psr_time exit_avg_ms;
};
static void drm_self_refresh_helper_entry_work(struct work_struct *work)
@@ -123,6 +130,48 @@ out_drop_locks:
}
/**
+ * drm_self_refresh_helper_update_avg_times - Updates a crtc's SR time averages
+ * @state: the state which has just been applied to hardware
+ * @commit_time_ms: the amount of time in ms that this commit took to complete
+ * @new_self_refresh_mask: bitmask of crtc's that have self_refresh_active in
+ * new state
+ *
+ * Called after &drm_mode_config_funcs.atomic_commit_tail, this function will
+ * update the average entry/exit self refresh times on self refresh transitions.
+ * These averages will be used when calculating how long to delay before
+ * entering self refresh mode after activity.
+ */
+void
+drm_self_refresh_helper_update_avg_times(struct drm_atomic_state *state,
+ unsigned int commit_time_ms,
+ unsigned int new_self_refresh_mask)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state;
+ int i;
+
+ for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
+ bool new_self_refresh_active = new_self_refresh_mask & BIT(i);
+ struct drm_self_refresh_data *sr_data = crtc->self_refresh_data;
+ struct ewma_psr_time *time;
+
+ if (old_crtc_state->self_refresh_active ==
+ new_self_refresh_active)
+ continue;
+
+ if (new_self_refresh_active)
+ time = &sr_data->entry_avg_ms;
+ else
+ time = &sr_data->exit_avg_ms;
+
+ mutex_lock(&sr_data->avg_mutex);
+ ewma_psr_time_add(time, commit_time_ms);
+ mutex_unlock(&sr_data->avg_mutex);
+ }
+}
+EXPORT_SYMBOL(drm_self_refresh_helper_update_avg_times);
+
+/**
* drm_self_refresh_helper_alter_state - Alters the atomic state for SR exit
* @state: the state currently being checked
*
@@ -153,6 +202,7 @@ void drm_self_refresh_helper_alter_state(struct drm_atomic_state *state)
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
struct drm_self_refresh_data *sr_data;
+ unsigned int delay;
/* Don't trigger the entry timer when we're already in SR */
if (crtc_state->self_refresh_active)
@@ -162,8 +212,13 @@ void drm_self_refresh_helper_alter_state(struct drm_atomic_state *state)
if (!sr_data)
continue;
+ mutex_lock(&sr_data->avg_mutex);
+ delay = (ewma_psr_time_read(&sr_data->entry_avg_ms) +
+ ewma_psr_time_read(&sr_data->exit_avg_ms)) * 2;
+ mutex_unlock(&sr_data->avg_mutex);
+
mod_delayed_work(system_wq, &sr_data->entry_work,
- msecs_to_jiffies(sr_data->entry_delay_ms));
+ msecs_to_jiffies(delay));
}
}
EXPORT_SYMBOL(drm_self_refresh_helper_alter_state);
@@ -171,12 +226,10 @@ EXPORT_SYMBOL(drm_self_refresh_helper_alter_state);
/**
* drm_self_refresh_helper_init - Initializes self refresh helpers for a crtc
* @crtc: the crtc which supports self refresh supported displays
- * @entry_delay_ms: amount of inactivity to wait before entering self refresh
*
* Returns zero if successful or -errno on failure
*/
-int drm_self_refresh_helper_init(struct drm_crtc *crtc,
- unsigned int entry_delay_ms)
+int drm_self_refresh_helper_init(struct drm_crtc *crtc)
{
struct drm_self_refresh_data *sr_data = crtc->self_refresh_data;
@@ -190,8 +243,18 @@ int drm_self_refresh_helper_init(struct drm_crtc *crtc,
INIT_DELAYED_WORK(&sr_data->entry_work,
drm_self_refresh_helper_entry_work);
- sr_data->entry_delay_ms = entry_delay_ms;
sr_data->crtc = crtc;
+ mutex_init(&sr_data->avg_mutex);
+ ewma_psr_time_init(&sr_data->entry_avg_ms);
+ ewma_psr_time_init(&sr_data->exit_avg_ms);
+
+ /*
+ * Seed the averages so they're non-zero (and sufficiently large
+ * for even poorly performing panels). As time goes on, this will be
+ * averaged out and the values will trend to their true value.
+ */
+ ewma_psr_time_add(&sr_data->entry_avg_ms, SELF_REFRESH_AVG_SEED_MS);
+ ewma_psr_time_add(&sr_data->exit_avg_ms, SELF_REFRESH_AVG_SEED_MS);
crtc->self_refresh_data = sr_data;
return 0;
diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c
index b11910f14c46..15fb516ae2d8 100644
--- a/drivers/gpu/drm/drm_simple_kms_helper.c
+++ b/drivers/gpu/drm/drm_simple_kms_helper.c
@@ -8,6 +8,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
@@ -42,7 +43,7 @@ drm_simple_kms_crtc_mode_valid(struct drm_crtc *crtc,
/* Anything goes */
return MODE_OK;
- return pipe->funcs->mode_valid(crtc, mode);
+ return pipe->funcs->mode_valid(pipe, mode);
}
static int drm_simple_kms_crtc_check(struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index a199c8d56b95..669c93fe2500 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -29,21 +29,97 @@
/**
* DOC: Overview
*
- * DRM synchronisation objects (syncobj, see struct &drm_syncobj) are
- * persistent objects that contain an optional fence. The fence can be updated
- * with a new fence, or be NULL.
+ * DRM synchronisation objects (syncobj, see struct &drm_syncobj) provide a
+ * container for a synchronization primitive which can be used by userspace
+ * to explicitly synchronize GPU commands, can be shared between userspace
+ * processes, and can be shared between different DRM drivers.
+ * Their primary use-case is to implement Vulkan fences and semaphores.
+ * The syncobj userspace API provides ioctls for several operations:
*
- * syncobj's can be waited upon, where it will wait for the underlying
- * fence.
+ * - Creation and destruction of syncobjs
+ * - Import and export of syncobjs to/from a syncobj file descriptor
+ * - Import and export a syncobj's underlying fence to/from a sync file
+ * - Reset a syncobj (set its fence to NULL)
+ * - Signal a syncobj (set a trivially signaled fence)
+ * - Wait for a syncobj's fence to appear and be signaled
*
- * syncobj's can be export to fd's and back, these fd's are opaque and
- * have no other use case, except passing the syncobj between processes.
+ * At it's core, a syncobj is simply a wrapper around a pointer to a struct
+ * &dma_fence which may be NULL.
+ * When a syncobj is first created, its pointer is either NULL or a pointer
+ * to an already signaled fence depending on whether the
+ * &DRM_SYNCOBJ_CREATE_SIGNALED flag is passed to
+ * &DRM_IOCTL_SYNCOBJ_CREATE.
+ * When GPU work which signals a syncobj is enqueued in a DRM driver,
+ * the syncobj fence is replaced with a fence which will be signaled by the
+ * completion of that work.
+ * When GPU work which waits on a syncobj is enqueued in a DRM driver, the
+ * driver retrieves syncobj's current fence at the time the work is enqueued
+ * waits on that fence before submitting the work to hardware.
+ * If the syncobj's fence is NULL, the enqueue operation is expected to fail.
+ * All manipulation of the syncobjs's fence happens in terms of the current
+ * fence at the time the ioctl is called by userspace regardless of whether
+ * that operation is an immediate host-side operation (signal or reset) or
+ * or an operation which is enqueued in some driver queue.
+ * &DRM_IOCTL_SYNCOBJ_RESET and &DRM_IOCTL_SYNCOBJ_SIGNAL can be used to
+ * manipulate a syncobj from the host by resetting its pointer to NULL or
+ * setting its pointer to a fence which is already signaled.
*
- * Their primary use-case is to implement Vulkan fences and semaphores.
*
- * syncobj have a kref reference count, but also have an optional file.
- * The file is only created once the syncobj is exported.
- * The file takes a reference on the kref.
+ * Host-side wait on syncobjs
+ * --------------------------
+ *
+ * &DRM_IOCTL_SYNCOBJ_WAIT takes an array of syncobj handles and does a
+ * host-side wait on all of the syncobj fences simultaneously.
+ * If &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL is set, the wait ioctl will wait on
+ * all of the syncobj fences to be signaled before it returns.
+ * Otherwise, it returns once at least one syncobj fence has been signaled
+ * and the index of a signaled fence is written back to the client.
+ *
+ * Unlike the enqueued GPU work dependencies which fail if they see a NULL
+ * fence in a syncobj, if &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT is set,
+ * the host-side wait will first wait for the syncobj to receive a non-NULL
+ * fence and then wait on that fence.
+ * If &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT is not set and any one of the
+ * syncobjs in the array has a NULL fence, -EINVAL will be returned.
+ * Assuming the syncobj starts off with a NULL fence, this allows a client
+ * to do a host wait in one thread (or process) which waits on GPU work
+ * submitted in another thread (or process) without having to manually
+ * synchronize between the two.
+ * This requirement is inherited from the Vulkan fence API.
+ *
+ *
+ * Import/export of syncobjs
+ * -------------------------
+ *
+ * &DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE and &DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD
+ * provide two mechanisms for import/export of syncobjs.
+ *
+ * The first lets the client import or export an entire syncobj to a file
+ * descriptor.
+ * These fd's are opaque and have no other use case, except passing the
+ * syncobj between processes.
+ * All exported file descriptors and any syncobj handles created as a
+ * result of importing those file descriptors own a reference to the
+ * same underlying struct &drm_syncobj and the syncobj can be used
+ * persistently across all the processes with which it is shared.
+ * The syncobj is freed only once the last reference is dropped.
+ * Unlike dma-buf, importing a syncobj creates a new handle (with its own
+ * reference) for every import instead of de-duplicating.
+ * The primary use-case of this persistent import/export is for shared
+ * Vulkan fences and semaphores.
+ *
+ * The second import/export mechanism, which is indicated by
+ * &DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE or
+ * &DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE lets the client
+ * import/export the syncobj's current fence from/to a &sync_file.
+ * When a syncobj is exported to a sync file, that sync file wraps the
+ * sycnobj's fence at the time of export and any later signal or reset
+ * operations on the syncobj will not affect the exported sync file.
+ * When a sync file is imported into a syncobj, the syncobj's fence is set
+ * to the fence wrapped by that sync file.
+ * Because sync files are immutable, resetting or signaling the syncobj
+ * will not affect any sync files whose fences have been imported into the
+ * syncobj.
*/
#include <linux/anon_inodes.h>
@@ -53,11 +129,13 @@
#include <linux/sync_file.h>
#include <linux/uaccess.h>
+#include <drm/drm.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_gem.h>
#include <drm/drm_print.h>
#include <drm/drm_syncobj.h>
+#include <drm/drm_utils.h>
#include "drm_internal.h"
@@ -1202,7 +1280,7 @@ drm_syncobj_timeline_signal_ioctl(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
return -EOPNOTSUPP;
- if (args->pad != 0)
+ if (args->flags != 0)
return -EINVAL;
if (args->count_handles == 0)
@@ -1273,7 +1351,7 @@ int drm_syncobj_query_ioctl(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
return -EOPNOTSUPP;
- if (args->pad != 0)
+ if (args->flags & ~DRM_SYNCOBJ_QUERY_FLAGS_LAST_SUBMITTED)
return -EINVAL;
if (args->count_handles == 0)
@@ -1294,25 +1372,32 @@ int drm_syncobj_query_ioctl(struct drm_device *dev, void *data,
fence = drm_syncobj_fence_get(syncobjs[i]);
chain = to_dma_fence_chain(fence);
if (chain) {
- struct dma_fence *iter, *last_signaled = NULL;
-
- dma_fence_chain_for_each(iter, fence) {
- if (!iter)
- break;
- dma_fence_put(last_signaled);
- last_signaled = dma_fence_get(iter);
- if (!to_dma_fence_chain(last_signaled)->prev_seqno)
- /* It is most likely that timeline has
- * unorder points. */
- break;
+ struct dma_fence *iter, *last_signaled =
+ dma_fence_get(fence);
+
+ if (args->flags &
+ DRM_SYNCOBJ_QUERY_FLAGS_LAST_SUBMITTED) {
+ point = fence->seqno;
+ } else {
+ dma_fence_chain_for_each(iter, fence) {
+ if (iter->context != fence->context) {
+ dma_fence_put(iter);
+ /* It is most likely that timeline has
+ * unorder points. */
+ break;
+ }
+ dma_fence_put(last_signaled);
+ last_signaled = dma_fence_get(iter);
+ }
+ point = dma_fence_is_signaled(last_signaled) ?
+ last_signaled->seqno :
+ to_dma_fence_chain(last_signaled)->prev_seqno;
}
- point = dma_fence_is_signaled(last_signaled) ?
- last_signaled->seqno :
- to_dma_fence_chain(last_signaled)->prev_seqno;
dma_fence_put(last_signaled);
} else {
point = 0;
}
+ dma_fence_put(fence);
ret = copy_to_user(&points[i], &point, sizeof(uint64_t));
ret = ret ? -EFAULT : 0;
if (ret)
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index ad10810bc972..dd2bc85f43cc 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -14,6 +14,7 @@
#include <linux/err.h>
#include <linux/export.h>
#include <linux/gfp.h>
+#include <linux/i2c.h>
#include <linux/kdev_t.h>
#include <linux/slab.h>
@@ -26,6 +27,7 @@
#include <drm/drm_sysfs.h>
#include "drm_internal.h"
+#include "drm_crtc_internal.h"
#define to_drm_minor(d) dev_get_drvdata(d)
#define to_drm_connector(d) dev_get_drvdata(d)
@@ -294,6 +296,9 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
/* Let userspace know we have a new connector */
drm_sysfs_hotplug_event(dev);
+ if (connector->ddc)
+ return sysfs_create_link(&connector->kdev->kobj,
+ &connector->ddc->dev.kobj, "ddc");
return 0;
}
@@ -301,6 +306,10 @@ void drm_sysfs_connector_remove(struct drm_connector *connector)
{
if (!connector->kdev)
return;
+
+ if (connector->ddc)
+ sysfs_remove_link(&connector->kdev->kobj, "ddc");
+
DRM_DEBUG("removing \"%s\" from sysfs\n",
connector->name);
@@ -325,6 +334,9 @@ void drm_sysfs_lease_event(struct drm_device *dev)
* Send a uevent for the DRM device specified by @dev. Currently we only
* set HOTPLUG=1 in the uevent environment, but this could be expanded to
* deal with other types of events.
+ *
+ * Any new uapi should be using the drm_sysfs_connector_status_event()
+ * for uevents on connector status change.
*/
void drm_sysfs_hotplug_event(struct drm_device *dev)
{
@@ -337,6 +349,37 @@ void drm_sysfs_hotplug_event(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_sysfs_hotplug_event);
+/**
+ * drm_sysfs_connector_status_event - generate a DRM uevent for connector
+ * property status change
+ * @connector: connector on which property status changed
+ * @property: connector property whose status changed.
+ *
+ * Send a uevent for the DRM device specified by @dev. Currently we
+ * set HOTPLUG=1 and connector id along with the attached property id
+ * related to the status change.
+ */
+void drm_sysfs_connector_status_event(struct drm_connector *connector,
+ struct drm_property *property)
+{
+ struct drm_device *dev = connector->dev;
+ char hotplug_str[] = "HOTPLUG=1", conn_id[21], prop_id[21];
+ char *envp[4] = { hotplug_str, conn_id, prop_id, NULL };
+
+ WARN_ON(!drm_mode_obj_find_prop_id(&connector->base,
+ property->base.id));
+
+ snprintf(conn_id, ARRAY_SIZE(conn_id),
+ "CONNECTOR=%u", connector->base.id);
+ snprintf(prop_id, ARRAY_SIZE(prop_id),
+ "PROPERTY=%u", property->base.id);
+
+ DRM_DEBUG("generating connector status event\n");
+
+ kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp);
+}
+EXPORT_SYMBOL(drm_sysfs_connector_status_event);
+
static void drm_sysfs_release(struct device *dev)
{
kfree(dev);
diff --git a/drivers/gpu/drm/drm_trace.h b/drivers/gpu/drm/drm_trace.h
index 471eb927474b..11c6dd577e8e 100644
--- a/drivers/gpu/drm/drm_trace.h
+++ b/drivers/gpu/drm/drm_trace.h
@@ -13,17 +13,23 @@ struct drm_file;
#define TRACE_INCLUDE_FILE drm_trace
TRACE_EVENT(drm_vblank_event,
- TP_PROTO(int crtc, unsigned int seq),
- TP_ARGS(crtc, seq),
+ TP_PROTO(int crtc, unsigned int seq, ktime_t time, bool high_prec),
+ TP_ARGS(crtc, seq, time, high_prec),
TP_STRUCT__entry(
__field(int, crtc)
__field(unsigned int, seq)
+ __field(ktime_t, time)
+ __field(bool, high_prec)
),
TP_fast_assign(
__entry->crtc = crtc;
__entry->seq = seq;
- ),
- TP_printk("crtc=%d, seq=%u", __entry->crtc, __entry->seq)
+ __entry->time = time;
+ __entry->high_prec = high_prec;
+ ),
+ TP_printk("crtc=%d, seq=%u, time=%lld, high-prec=%s",
+ __entry->crtc, __entry->seq, __entry->time,
+ __entry->high_prec ? "true" : "false")
);
TRACE_EVENT(drm_vblank_event_queued,
diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
index 603ab105125d..1659b13b178c 100644
--- a/drivers/gpu/drm/drm_vblank.c
+++ b/drivers/gpu/drm/drm_vblank.c
@@ -31,7 +31,6 @@
#include <drm/drm_drv.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_print.h>
-#include <drm/drm_os_linux.h>
#include <drm/drm_vblank.h>
#include "drm_internal.h"
@@ -107,7 +106,7 @@ static void store_vblank(struct drm_device *dev, unsigned int pipe,
write_seqlock(&vblank->seqlock);
vblank->time = t_vblank;
- vblank->count += vblank_count_inc;
+ atomic64_add(vblank_count_inc, &vblank->count);
write_sequnlock(&vblank->seqlock);
}
@@ -273,7 +272,8 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
DRM_DEBUG_VBL("updating vblank count on crtc %u:"
" current=%llu, diff=%u, hw=%u hw_last=%u\n",
- pipe, vblank->count, diff, cur_vblank, vblank->last);
+ pipe, atomic64_read(&vblank->count), diff,
+ cur_vblank, vblank->last);
if (diff == 0) {
WARN_ON_ONCE(cur_vblank != vblank->last);
@@ -295,11 +295,23 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
static u64 drm_vblank_count(struct drm_device *dev, unsigned int pipe)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ u64 count;
if (WARN_ON(pipe >= dev->num_crtcs))
return 0;
- return vblank->count;
+ count = atomic64_read(&vblank->count);
+
+ /*
+ * This read barrier corresponds to the implicit write barrier of the
+ * write seqlock in store_vblank(). Note that this is the only place
+ * where we need an explicit barrier, since all other access goes
+ * through drm_vblank_count_and_time(), which already has the required
+ * read barrier curtesy of the read seqlock.
+ */
+ smp_rmb();
+
+ return count;
}
/**
@@ -320,7 +332,7 @@ u64 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc)
u64 vblank;
unsigned long flags;
- WARN_ONCE(drm_debug & DRM_UT_VBL && !dev->driver->get_vblank_timestamp,
+ WARN_ONCE(drm_debug_enabled(DRM_UT_VBL) && !dev->driver->get_vblank_timestamp,
"This function requires support for accurate vblank timestamps.");
spin_lock_irqsave(&dev->vblank_time_lock, flags);
@@ -694,7 +706,7 @@ bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
*/
*vblank_time = ktime_sub_ns(etime, delta_ns);
- if ((drm_debug & DRM_UT_VBL) == 0)
+ if (!drm_debug_enabled(DRM_UT_VBL))
return true;
ts_etime = ktime_to_timespec64(etime);
@@ -764,6 +776,14 @@ drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
* vblank interrupt (since it only reports the software vblank counter), see
* drm_crtc_accurate_vblank_count() for such use-cases.
*
+ * Note that for a given vblank counter value drm_crtc_handle_vblank()
+ * and drm_crtc_vblank_count() or drm_crtc_vblank_count_and_time()
+ * provide a barrier: Any writes done before calling
+ * drm_crtc_handle_vblank() will be visible to callers of the later
+ * functions, iff the vblank count is the same or a later one.
+ *
+ * See also &drm_vblank_crtc.count.
+ *
* Returns:
* The software vblank counter.
*/
@@ -801,7 +821,7 @@ static u64 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
do {
seq = read_seqbegin(&vblank->seqlock);
- vblank_count = vblank->count;
+ vblank_count = atomic64_read(&vblank->count);
*vblanktime = vblank->time;
} while (read_seqretry(&vblank->seqlock, seq));
@@ -818,6 +838,14 @@ static u64 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
* vblank events since the system was booted, including lost events due to
* modesetting activity. Returns corresponding system timestamp of the time
* of the vblank interval that corresponds to the current vblank counter value.
+ *
+ * Note that for a given vblank counter value drm_crtc_handle_vblank()
+ * and drm_crtc_vblank_count() or drm_crtc_vblank_count_and_time()
+ * provide a barrier: Any writes done before calling
+ * drm_crtc_handle_vblank() will be visible to callers of the later
+ * functions, iff the vblank count is the same or a later one.
+ *
+ * See also &drm_vblank_crtc.count.
*/
u64 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc,
ktime_t *vblanktime)
@@ -1324,7 +1352,7 @@ void drm_vblank_restore(struct drm_device *dev, unsigned int pipe)
assert_spin_locked(&dev->vblank_time_lock);
vblank = &dev->vblank[pipe];
- WARN_ONCE((drm_debug & DRM_UT_VBL) && !vblank->framedur_ns,
+ WARN_ONCE(drm_debug_enabled(DRM_UT_VBL) && !vblank->framedur_ns,
"Cannot compute missed vblanks without frame duration\n");
framedur_ns = vblank->framedur_ns;
@@ -1582,7 +1610,7 @@ int drm_wait_vblank_ioctl(struct drm_device *dev, void *data,
unsigned int flags, pipe, high_pipe;
if (!dev->irq_enabled)
- return -EINVAL;
+ return -EOPNOTSUPP;
if (vblwait->request.type & _DRM_VBLANK_SIGNAL)
return -EINVAL;
@@ -1670,12 +1698,28 @@ int drm_wait_vblank_ioctl(struct drm_device *dev, void *data,
}
if (req_seq != seq) {
+ int wait;
+
DRM_DEBUG("waiting on vblank count %llu, crtc %u\n",
req_seq, pipe);
- DRM_WAIT_ON(ret, vblank->queue, 3 * HZ,
- vblank_passed(drm_vblank_count(dev, pipe),
- req_seq) ||
- !READ_ONCE(vblank->enabled));
+ wait = wait_event_interruptible_timeout(vblank->queue,
+ vblank_passed(drm_vblank_count(dev, pipe), req_seq) ||
+ !READ_ONCE(vblank->enabled),
+ msecs_to_jiffies(3000));
+
+ switch (wait) {
+ case 0:
+ /* timeout */
+ ret = -EBUSY;
+ break;
+ case -ERESTARTSYS:
+ /* interrupted by signal */
+ ret = -EINTR;
+ break;
+ default:
+ ret = 0;
+ break;
+ }
}
if (ret != -EINTR) {
@@ -1716,7 +1760,8 @@ static void drm_handle_vblank_events(struct drm_device *dev, unsigned int pipe)
send_vblank_event(dev, e, seq, now);
}
- trace_drm_vblank_event(pipe, seq);
+ trace_drm_vblank_event(pipe, seq, now,
+ dev->driver->get_vblank_timestamp != NULL);
}
/**
@@ -1791,6 +1836,14 @@ EXPORT_SYMBOL(drm_handle_vblank);
*
* This is the native KMS version of drm_handle_vblank().
*
+ * Note that for a given vblank counter value drm_crtc_handle_vblank()
+ * and drm_crtc_vblank_count() or drm_crtc_vblank_count_and_time()
+ * provide a barrier: Any writes done before calling
+ * drm_crtc_handle_vblank() will be visible to callers of the later
+ * functions, iff the vblank count is the same or a later one.
+ *
+ * See also &drm_vblank_crtc.count.
+ *
* Returns:
* True if the event was successfully handled, false on failure.
*/
@@ -1823,7 +1876,7 @@ int drm_crtc_get_sequence_ioctl(struct drm_device *dev, void *data,
return -EOPNOTSUPP;
if (!dev->irq_enabled)
- return -EINVAL;
+ return -EOPNOTSUPP;
crtc = drm_crtc_find(dev, file_priv, get_seq->crtc_id);
if (!crtc)
@@ -1881,7 +1934,7 @@ int drm_crtc_queue_sequence_ioctl(struct drm_device *dev, void *data,
return -EOPNOTSUPP;
if (!dev->irq_enabled)
- return -EINVAL;
+ return -EOPNOTSUPP;
crtc = drm_crtc_find(dev, file_priv, queue_seq->crtc_id);
if (!crtc)
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 05f7c5833946..52e87e4869a5 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -1,4 +1,4 @@
-/**
+/*
* \file drm_vm.c
* Memory mapping for DRM
*
diff --git a/drivers/gpu/drm/drm_vram_helper_common.c b/drivers/gpu/drm/drm_vram_helper_common.c
index e9c9f9a80ba3..2000d9b33fd5 100644
--- a/drivers/gpu/drm/drm_vram_helper_common.c
+++ b/drivers/gpu/drm/drm_vram_helper_common.c
@@ -7,9 +7,8 @@
*
* This library provides &struct drm_gem_vram_object (GEM VRAM), a GEM
* buffer object that is backed by video RAM. It can be used for
- * framebuffer devices with dedicated memory. The video RAM can be
- * managed with &struct drm_vram_mm (VRAM MM). Both data structures are
- * supposed to be used together, but can also be used individually.
+ * framebuffer devices with dedicated memory. The video RAM is managed
+ * by &struct drm_vram_mm (VRAM MM).
*
* With the GEM interface userspace applications create, manage and destroy
* graphics buffers, such as an on-screen framebuffer. GEM does not provide
@@ -50,8 +49,7 @@
* // setup device, vram base and size
* // ...
*
- * ret = drm_vram_helper_alloc_mm(dev, vram_base, vram_size,
- * &drm_gem_vram_mm_funcs);
+ * ret = drm_vram_helper_alloc_mm(dev, vram_base, vram_size);
* if (ret)
* return ret;
* return 0;
diff --git a/drivers/gpu/drm/drm_vram_mm_helper.c b/drivers/gpu/drm/drm_vram_mm_helper.c
deleted file mode 100644
index c911781d6728..000000000000
--- a/drivers/gpu/drm/drm_vram_mm_helper.c
+++ /dev/null
@@ -1,297 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-#include <drm/drm_device.h>
-#include <drm/drm_file.h>
-#include <drm/drm_vram_mm_helper.h>
-
-#include <drm/ttm/ttm_page_alloc.h>
-
-/**
- * DOC: overview
- *
- * The data structure &struct drm_vram_mm and its helpers implement a memory
- * manager for simple framebuffer devices with dedicated video memory. Buffer
- * objects are either placed in video RAM or evicted to system memory. These
- * helper functions work well with &struct drm_gem_vram_object.
- */
-
-/*
- * TTM TT
- */
-
-static void backend_func_destroy(struct ttm_tt *tt)
-{
- ttm_tt_fini(tt);
- kfree(tt);
-}
-
-static struct ttm_backend_func backend_func = {
- .destroy = backend_func_destroy
-};
-
-/*
- * TTM BO device
- */
-
-static struct ttm_tt *bo_driver_ttm_tt_create(struct ttm_buffer_object *bo,
- uint32_t page_flags)
-{
- struct ttm_tt *tt;
- int ret;
-
- tt = kzalloc(sizeof(*tt), GFP_KERNEL);
- if (!tt)
- return NULL;
-
- tt->func = &backend_func;
-
- ret = ttm_tt_init(tt, bo, page_flags);
- if (ret < 0)
- goto err_ttm_tt_init;
-
- return tt;
-
-err_ttm_tt_init:
- kfree(tt);
- return NULL;
-}
-
-static int bo_driver_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
- struct ttm_mem_type_manager *man)
-{
- switch (type) {
- case TTM_PL_SYSTEM:
- man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
- man->available_caching = TTM_PL_MASK_CACHING;
- man->default_caching = TTM_PL_FLAG_CACHED;
- break;
- case TTM_PL_VRAM:
- man->func = &ttm_bo_manager_func;
- man->flags = TTM_MEMTYPE_FLAG_FIXED |
- TTM_MEMTYPE_FLAG_MAPPABLE;
- man->available_caching = TTM_PL_FLAG_UNCACHED |
- TTM_PL_FLAG_WC;
- man->default_caching = TTM_PL_FLAG_WC;
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static void bo_driver_evict_flags(struct ttm_buffer_object *bo,
- struct ttm_placement *placement)
-{
- struct drm_vram_mm *vmm = drm_vram_mm_of_bdev(bo->bdev);
-
- if (vmm->funcs && vmm->funcs->evict_flags)
- vmm->funcs->evict_flags(bo, placement);
-}
-
-static int bo_driver_verify_access(struct ttm_buffer_object *bo,
- struct file *filp)
-{
- struct drm_vram_mm *vmm = drm_vram_mm_of_bdev(bo->bdev);
-
- if (!vmm->funcs || !vmm->funcs->verify_access)
- return 0;
- return vmm->funcs->verify_access(bo, filp);
-}
-
-static int bo_driver_io_mem_reserve(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem)
-{
- struct ttm_mem_type_manager *man = bdev->man + mem->mem_type;
- struct drm_vram_mm *vmm = drm_vram_mm_of_bdev(bdev);
-
- if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
- return -EINVAL;
-
- mem->bus.addr = NULL;
- mem->bus.size = mem->num_pages << PAGE_SHIFT;
-
- switch (mem->mem_type) {
- case TTM_PL_SYSTEM: /* nothing to do */
- mem->bus.offset = 0;
- mem->bus.base = 0;
- mem->bus.is_iomem = false;
- break;
- case TTM_PL_VRAM:
- mem->bus.offset = mem->start << PAGE_SHIFT;
- mem->bus.base = vmm->vram_base;
- mem->bus.is_iomem = true;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static void bo_driver_io_mem_free(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem)
-{ }
-
-static struct ttm_bo_driver bo_driver = {
- .ttm_tt_create = bo_driver_ttm_tt_create,
- .ttm_tt_populate = ttm_pool_populate,
- .ttm_tt_unpopulate = ttm_pool_unpopulate,
- .init_mem_type = bo_driver_init_mem_type,
- .eviction_valuable = ttm_bo_eviction_valuable,
- .evict_flags = bo_driver_evict_flags,
- .verify_access = bo_driver_verify_access,
- .io_mem_reserve = bo_driver_io_mem_reserve,
- .io_mem_free = bo_driver_io_mem_free,
-};
-
-/*
- * struct drm_vram_mm
- */
-
-/**
- * drm_vram_mm_init() - Initialize an instance of VRAM MM.
- * @vmm: the VRAM MM instance to initialize
- * @dev: the DRM device
- * @vram_base: the base address of the video memory
- * @vram_size: the size of the video memory in bytes
- * @funcs: callback functions for buffer objects
- *
- * Returns:
- * 0 on success, or
- * a negative error code otherwise.
- */
-int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev,
- uint64_t vram_base, size_t vram_size,
- const struct drm_vram_mm_funcs *funcs)
-{
- int ret;
-
- vmm->vram_base = vram_base;
- vmm->vram_size = vram_size;
- vmm->funcs = funcs;
-
- ret = ttm_bo_device_init(&vmm->bdev, &bo_driver,
- dev->anon_inode->i_mapping,
- true);
- if (ret)
- return ret;
-
- ret = ttm_bo_init_mm(&vmm->bdev, TTM_PL_VRAM, vram_size >> PAGE_SHIFT);
- if (ret)
- return ret;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_vram_mm_init);
-
-/**
- * drm_vram_mm_cleanup() - Cleans up an initialized instance of VRAM MM.
- * @vmm: the VRAM MM instance to clean up
- */
-void drm_vram_mm_cleanup(struct drm_vram_mm *vmm)
-{
- ttm_bo_device_release(&vmm->bdev);
-}
-EXPORT_SYMBOL(drm_vram_mm_cleanup);
-
-/**
- * drm_vram_mm_mmap() - Helper for implementing &struct file_operations.mmap()
- * @filp: the mapping's file structure
- * @vma: the mapping's memory area
- * @vmm: the VRAM MM instance
- *
- * Returns:
- * 0 on success, or
- * a negative error code otherwise.
- */
-int drm_vram_mm_mmap(struct file *filp, struct vm_area_struct *vma,
- struct drm_vram_mm *vmm)
-{
- return ttm_bo_mmap(filp, vma, &vmm->bdev);
-}
-EXPORT_SYMBOL(drm_vram_mm_mmap);
-
-/*
- * Helpers for integration with struct drm_device
- */
-
-/**
- * drm_vram_helper_alloc_mm - Allocates a device's instance of \
- &struct drm_vram_mm
- * @dev: the DRM device
- * @vram_base: the base address of the video memory
- * @vram_size: the size of the video memory in bytes
- * @funcs: callback functions for buffer objects
- *
- * Returns:
- * The new instance of &struct drm_vram_mm on success, or
- * an ERR_PTR()-encoded errno code otherwise.
- */
-struct drm_vram_mm *drm_vram_helper_alloc_mm(
- struct drm_device *dev, uint64_t vram_base, size_t vram_size,
- const struct drm_vram_mm_funcs *funcs)
-{
- int ret;
-
- if (WARN_ON(dev->vram_mm))
- return dev->vram_mm;
-
- dev->vram_mm = kzalloc(sizeof(*dev->vram_mm), GFP_KERNEL);
- if (!dev->vram_mm)
- return ERR_PTR(-ENOMEM);
-
- ret = drm_vram_mm_init(dev->vram_mm, dev, vram_base, vram_size, funcs);
- if (ret)
- goto err_kfree;
-
- return dev->vram_mm;
-
-err_kfree:
- kfree(dev->vram_mm);
- dev->vram_mm = NULL;
- return ERR_PTR(ret);
-}
-EXPORT_SYMBOL(drm_vram_helper_alloc_mm);
-
-/**
- * drm_vram_helper_release_mm - Releases a device's instance of \
- &struct drm_vram_mm
- * @dev: the DRM device
- */
-void drm_vram_helper_release_mm(struct drm_device *dev)
-{
- if (!dev->vram_mm)
- return;
-
- drm_vram_mm_cleanup(dev->vram_mm);
- kfree(dev->vram_mm);
- dev->vram_mm = NULL;
-}
-EXPORT_SYMBOL(drm_vram_helper_release_mm);
-
-/*
- * Helpers for &struct file_operations
- */
-
-/**
- * drm_vram_mm_file_operations_mmap() - \
- Implements &struct file_operations.mmap()
- * @filp: the mapping's file structure
- * @vma: the mapping's memory area
- *
- * Returns:
- * 0 on success, or
- * a negative error code otherwise.
- */
-int drm_vram_mm_file_operations_mmap(
- struct file *filp, struct vm_area_struct *vma)
-{
- struct drm_file *file_priv = filp->private_data;
- struct drm_device *dev = file_priv->minor->dev;
-
- if (WARN_ONCE(!dev->vram_mm, "VRAM MM not initialized"))
- return -EINVAL;
-
- return drm_vram_mm_mmap(filp, vma, dev->vram_mm);
-}
-EXPORT_SYMBOL(drm_vram_mm_file_operations_mmap);
diff --git a/drivers/gpu/drm/drm_writeback.c b/drivers/gpu/drm/drm_writeback.c
index ff138b6ec48b..43d9e3bb3a94 100644
--- a/drivers/gpu/drm/drm_writeback.c
+++ b/drivers/gpu/drm/drm_writeback.c
@@ -324,6 +324,9 @@ void drm_writeback_cleanup_job(struct drm_writeback_job *job)
if (job->fb)
drm_framebuffer_put(job->fb);
+ if (job->out_fence)
+ dma_fence_put(job->out_fence);
+
kfree(job);
}
EXPORT_SYMBOL(drm_writeback_cleanup_job);
@@ -366,25 +369,29 @@ drm_writeback_signal_completion(struct drm_writeback_connector *wb_connector,
{
unsigned long flags;
struct drm_writeback_job *job;
+ struct dma_fence *out_fence;
spin_lock_irqsave(&wb_connector->job_lock, flags);
job = list_first_entry_or_null(&wb_connector->job_queue,
struct drm_writeback_job,
list_entry);
- if (job) {
+ if (job)
list_del(&job->list_entry);
- if (job->out_fence) {
- if (status)
- dma_fence_set_error(job->out_fence, status);
- dma_fence_signal(job->out_fence);
- dma_fence_put(job->out_fence);
- }
- }
+
spin_unlock_irqrestore(&wb_connector->job_lock, flags);
if (WARN_ON(!job))
return;
+ out_fence = job->out_fence;
+ if (out_fence) {
+ if (status)
+ dma_fence_set_error(out_fence, status);
+ dma_fence_signal(out_fence);
+ dma_fence_put(out_fence);
+ job->out_fence = NULL;
+ }
+
INIT_WORK(&job->cleanup_work, cleanup_work);
queue_work(system_long_wq, &job->cleanup_work);
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
index 160ce3c060a5..32d9fac587f9 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
@@ -3,6 +3,8 @@
* Copyright (C) 2014-2018 Etnaviv Project
*/
+#include <drm/drm_drv.h>
+
#include "etnaviv_cmdbuf.h"
#include "etnaviv_gpu.h"
#include "etnaviv_gem.h"
@@ -116,7 +118,9 @@ static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu,
u32 *ptr = buf->vaddr + off;
dev_info(gpu->dev, "virt %p phys 0x%08x free 0x%08x\n",
- ptr, etnaviv_cmdbuf_get_va(buf) + off, size - len * 4 - off);
+ ptr, etnaviv_cmdbuf_get_va(buf,
+ &gpu->mmu_context->cmdbuf_mapping) +
+ off, size - len * 4 - off);
print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
ptr, len * 4, 0);
@@ -149,7 +153,9 @@ static u32 etnaviv_buffer_reserve(struct etnaviv_gpu *gpu,
if (buffer->user_size + cmd_dwords * sizeof(u64) > buffer->size)
buffer->user_size = 0;
- return etnaviv_cmdbuf_get_va(buffer) + buffer->user_size;
+ return etnaviv_cmdbuf_get_va(buffer,
+ &gpu->mmu_context->cmdbuf_mapping) +
+ buffer->user_size;
}
u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu)
@@ -162,8 +168,9 @@ u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu)
buffer->user_size = 0;
CMD_WAIT(buffer);
- CMD_LINK(buffer, 2, etnaviv_cmdbuf_get_va(buffer) +
- buffer->user_size - 4);
+ CMD_LINK(buffer, 2,
+ etnaviv_cmdbuf_get_va(buffer, &gpu->mmu_context->cmdbuf_mapping)
+ + buffer->user_size - 4);
return buffer->user_size / 8;
}
@@ -203,7 +210,7 @@ u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, u32 mtlb_addr, u32 safe
return buffer->user_size / 8;
}
-u16 etnaviv_buffer_config_pta(struct etnaviv_gpu *gpu)
+u16 etnaviv_buffer_config_pta(struct etnaviv_gpu *gpu, unsigned short id)
{
struct etnaviv_cmdbuf *buffer = &gpu->buffer;
@@ -212,7 +219,7 @@ u16 etnaviv_buffer_config_pta(struct etnaviv_gpu *gpu)
buffer->user_size = 0;
CMD_LOAD_STATE(buffer, VIVS_MMUv2_PTA_CONFIG,
- VIVS_MMUv2_PTA_CONFIG_INDEX(0));
+ VIVS_MMUv2_PTA_CONFIG_INDEX(id));
CMD_END(buffer);
@@ -289,8 +296,9 @@ void etnaviv_sync_point_queue(struct etnaviv_gpu *gpu, unsigned int event)
/* Append waitlink */
CMD_WAIT(buffer);
- CMD_LINK(buffer, 2, etnaviv_cmdbuf_get_va(buffer) +
- buffer->user_size - 4);
+ CMD_LINK(buffer, 2,
+ etnaviv_cmdbuf_get_va(buffer, &gpu->mmu_context->cmdbuf_mapping)
+ + buffer->user_size - 4);
/*
* Kick off the 'sync point' command by replacing the previous
@@ -304,36 +312,41 @@ void etnaviv_sync_point_queue(struct etnaviv_gpu *gpu, unsigned int event)
/* Append a command buffer to the ring buffer. */
void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
- unsigned int event, struct etnaviv_cmdbuf *cmdbuf)
+ struct etnaviv_iommu_context *mmu_context, unsigned int event,
+ struct etnaviv_cmdbuf *cmdbuf)
{
struct etnaviv_cmdbuf *buffer = &gpu->buffer;
unsigned int waitlink_offset = buffer->user_size - 16;
u32 return_target, return_dwords;
u32 link_target, link_dwords;
bool switch_context = gpu->exec_state != exec_state;
+ bool switch_mmu_context = gpu->mmu_context != mmu_context;
+ unsigned int new_flush_seq = READ_ONCE(gpu->mmu_context->flush_seq);
+ bool need_flush = switch_mmu_context || gpu->flush_seq != new_flush_seq;
lockdep_assert_held(&gpu->lock);
- if (drm_debug & DRM_UT_DRIVER)
+ if (drm_debug_enabled(DRM_UT_DRIVER))
etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
- link_target = etnaviv_cmdbuf_get_va(cmdbuf);
+ link_target = etnaviv_cmdbuf_get_va(cmdbuf,
+ &gpu->mmu_context->cmdbuf_mapping);
link_dwords = cmdbuf->size / 8;
/*
- * If we need maintanence prior to submitting this buffer, we will
+ * If we need maintenance prior to submitting this buffer, we will
* need to append a mmu flush load state, followed by a new
* link to this buffer - a total of four additional words.
*/
- if (gpu->mmu->need_flush || switch_context) {
+ if (need_flush || switch_context) {
u32 target, extra_dwords;
/* link command */
extra_dwords = 1;
/* flush command */
- if (gpu->mmu->need_flush) {
- if (gpu->mmu->version == ETNAVIV_IOMMU_V1)
+ if (need_flush) {
+ if (gpu->mmu_context->global->version == ETNAVIV_IOMMU_V1)
extra_dwords += 1;
else
extra_dwords += 3;
@@ -343,11 +356,28 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
if (switch_context)
extra_dwords += 4;
+ /* PTA load command */
+ if (switch_mmu_context && gpu->sec_mode == ETNA_SEC_KERNEL)
+ extra_dwords += 1;
+
target = etnaviv_buffer_reserve(gpu, buffer, extra_dwords);
+ /*
+ * Switch MMU context if necessary. Must be done after the
+ * link target has been calculated, as the jump forward in the
+ * kernel ring still uses the last active MMU context before
+ * the switch.
+ */
+ if (switch_mmu_context) {
+ struct etnaviv_iommu_context *old_context = gpu->mmu_context;
+
+ etnaviv_iommu_context_get(mmu_context);
+ gpu->mmu_context = mmu_context;
+ etnaviv_iommu_context_put(old_context);
+ }
- if (gpu->mmu->need_flush) {
+ if (need_flush) {
/* Add the MMU flush */
- if (gpu->mmu->version == ETNAVIV_IOMMU_V1) {
+ if (gpu->mmu_context->global->version == ETNAVIV_IOMMU_V1) {
CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_MMU,
VIVS_GL_FLUSH_MMU_FLUSH_FEMMU |
VIVS_GL_FLUSH_MMU_FLUSH_UNK1 |
@@ -355,17 +385,30 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
VIVS_GL_FLUSH_MMU_FLUSH_PEMMU |
VIVS_GL_FLUSH_MMU_FLUSH_UNK4);
} else {
+ u32 flush = VIVS_MMUv2_CONFIGURATION_MODE_MASK |
+ VIVS_MMUv2_CONFIGURATION_FLUSH_FLUSH;
+
+ if (switch_mmu_context &&
+ gpu->sec_mode == ETNA_SEC_KERNEL) {
+ unsigned short id =
+ etnaviv_iommuv2_get_pta_id(gpu->mmu_context);
+ CMD_LOAD_STATE(buffer,
+ VIVS_MMUv2_PTA_CONFIG,
+ VIVS_MMUv2_PTA_CONFIG_INDEX(id));
+ }
+
+ if (gpu->sec_mode == ETNA_SEC_NONE)
+ flush |= etnaviv_iommuv2_get_mtlb_addr(gpu->mmu_context);
+
CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
- VIVS_MMUv2_CONFIGURATION_MODE_MASK |
- VIVS_MMUv2_CONFIGURATION_ADDRESS_MASK |
- VIVS_MMUv2_CONFIGURATION_FLUSH_FLUSH);
+ flush);
CMD_SEM(buffer, SYNC_RECIPIENT_FE,
SYNC_RECIPIENT_PE);
CMD_STALL(buffer, SYNC_RECIPIENT_FE,
SYNC_RECIPIENT_PE);
}
- gpu->mmu->need_flush = false;
+ gpu->flush_seq = new_flush_seq;
}
if (switch_context) {
@@ -374,6 +417,8 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
}
/* And the link to the submitted buffer */
+ link_target = etnaviv_cmdbuf_get_va(cmdbuf,
+ &gpu->mmu_context->cmdbuf_mapping);
CMD_LINK(buffer, link_dwords, link_target);
/* Update the link target to point to above instructions */
@@ -410,15 +455,17 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
VIVS_GL_EVENT_FROM_PE);
CMD_WAIT(buffer);
- CMD_LINK(buffer, 2, etnaviv_cmdbuf_get_va(buffer) +
- buffer->user_size - 4);
+ CMD_LINK(buffer, 2,
+ etnaviv_cmdbuf_get_va(buffer, &gpu->mmu_context->cmdbuf_mapping)
+ + buffer->user_size - 4);
- if (drm_debug & DRM_UT_DRIVER)
+ if (drm_debug_enabled(DRM_UT_DRIVER))
pr_info("stream link to 0x%08x @ 0x%08x %p\n",
- return_target, etnaviv_cmdbuf_get_va(cmdbuf),
+ return_target,
+ etnaviv_cmdbuf_get_va(cmdbuf, &gpu->mmu_context->cmdbuf_mapping),
cmdbuf->vaddr);
- if (drm_debug & DRM_UT_DRIVER) {
+ if (drm_debug_enabled(DRM_UT_DRIVER)) {
print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
cmdbuf->vaddr, cmdbuf->size, 0);
@@ -437,6 +484,6 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
VIV_FE_LINK_HEADER_PREFETCH(link_dwords),
link_target);
- if (drm_debug & DRM_UT_DRIVER)
+ if (drm_debug_enabled(DRM_UT_DRIVER))
etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
index a3c44f145c1d..9dc20d892c15 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
@@ -3,27 +3,26 @@
* Copyright (C) 2017-2018 Etnaviv Project
*/
+#include <linux/dma-mapping.h>
+
#include <drm/drm_mm.h>
#include "etnaviv_cmdbuf.h"
+#include "etnaviv_gem.h"
#include "etnaviv_gpu.h"
#include "etnaviv_mmu.h"
#include "etnaviv_perfmon.h"
-#define SUBALLOC_SIZE SZ_256K
+#define SUBALLOC_SIZE SZ_512K
#define SUBALLOC_GRANULE SZ_4K
#define SUBALLOC_GRANULES (SUBALLOC_SIZE / SUBALLOC_GRANULE)
struct etnaviv_cmdbuf_suballoc {
/* suballocated dma buffer properties */
- struct etnaviv_gpu *gpu;
+ struct device *dev;
void *vaddr;
dma_addr_t paddr;
- /* GPU mapping */
- u32 iova;
- struct drm_mm_node vram_node; /* only used on MMUv2 */
-
/* allocation management */
struct mutex lock;
DECLARE_BITMAP(granule_map, SUBALLOC_GRANULES);
@@ -32,7 +31,7 @@ struct etnaviv_cmdbuf_suballoc {
};
struct etnaviv_cmdbuf_suballoc *
-etnaviv_cmdbuf_suballoc_new(struct etnaviv_gpu * gpu)
+etnaviv_cmdbuf_suballoc_new(struct device *dev)
{
struct etnaviv_cmdbuf_suballoc *suballoc;
int ret;
@@ -41,36 +40,44 @@ etnaviv_cmdbuf_suballoc_new(struct etnaviv_gpu * gpu)
if (!suballoc)
return ERR_PTR(-ENOMEM);
- suballoc->gpu = gpu;
+ suballoc->dev = dev;
mutex_init(&suballoc->lock);
init_waitqueue_head(&suballoc->free_event);
- suballoc->vaddr = dma_alloc_wc(gpu->dev, SUBALLOC_SIZE,
+ BUILD_BUG_ON(ETNAVIV_SOFTPIN_START_ADDRESS < SUBALLOC_SIZE);
+ suballoc->vaddr = dma_alloc_wc(dev, SUBALLOC_SIZE,
&suballoc->paddr, GFP_KERNEL);
- if (!suballoc->vaddr)
+ if (!suballoc->vaddr) {
+ ret = -ENOMEM;
goto free_suballoc;
-
- ret = etnaviv_iommu_get_suballoc_va(gpu, suballoc->paddr,
- &suballoc->vram_node, SUBALLOC_SIZE,
- &suballoc->iova);
- if (ret)
- goto free_dma;
+ }
return suballoc;
-free_dma:
- dma_free_wc(gpu->dev, SUBALLOC_SIZE, suballoc->vaddr, suballoc->paddr);
free_suballoc:
kfree(suballoc);
- return NULL;
+ return ERR_PTR(ret);
+}
+
+int etnaviv_cmdbuf_suballoc_map(struct etnaviv_cmdbuf_suballoc *suballoc,
+ struct etnaviv_iommu_context *context,
+ struct etnaviv_vram_mapping *mapping,
+ u32 memory_base)
+{
+ return etnaviv_iommu_get_suballoc_va(context, mapping, memory_base,
+ suballoc->paddr, SUBALLOC_SIZE);
+}
+
+void etnaviv_cmdbuf_suballoc_unmap(struct etnaviv_iommu_context *context,
+ struct etnaviv_vram_mapping *mapping)
+{
+ etnaviv_iommu_put_suballoc_va(context, mapping);
}
void etnaviv_cmdbuf_suballoc_destroy(struct etnaviv_cmdbuf_suballoc *suballoc)
{
- etnaviv_iommu_put_suballoc_va(suballoc->gpu, &suballoc->vram_node,
- SUBALLOC_SIZE, suballoc->iova);
- dma_free_wc(suballoc->gpu->dev, SUBALLOC_SIZE, suballoc->vaddr,
+ dma_free_wc(suballoc->dev, SUBALLOC_SIZE, suballoc->vaddr,
suballoc->paddr);
kfree(suballoc);
}
@@ -95,7 +102,7 @@ retry:
suballoc->free_space,
msecs_to_jiffies(10 * 1000));
if (!ret) {
- dev_err(suballoc->gpu->dev,
+ dev_err(suballoc->dev,
"Timeout waiting for cmdbuf space\n");
return -ETIMEDOUT;
}
@@ -123,9 +130,10 @@ void etnaviv_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf)
wake_up_all(&suballoc->free_event);
}
-u32 etnaviv_cmdbuf_get_va(struct etnaviv_cmdbuf *buf)
+u32 etnaviv_cmdbuf_get_va(struct etnaviv_cmdbuf *buf,
+ struct etnaviv_vram_mapping *mapping)
{
- return buf->suballoc->iova + buf->suballoc_offset;
+ return mapping->iova + buf->suballoc_offset;
}
dma_addr_t etnaviv_cmdbuf_get_pa(struct etnaviv_cmdbuf *buf)
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h
index 4d5d1a77eb2a..ad6fd8eb0378 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h
@@ -8,7 +8,9 @@
#include <linux/types.h>
-struct etnaviv_gpu;
+struct device;
+struct etnaviv_iommu_context;
+struct etnaviv_vram_mapping;
struct etnaviv_cmdbuf_suballoc;
struct etnaviv_perfmon_request;
@@ -23,15 +25,22 @@ struct etnaviv_cmdbuf {
};
struct etnaviv_cmdbuf_suballoc *
-etnaviv_cmdbuf_suballoc_new(struct etnaviv_gpu * gpu);
+etnaviv_cmdbuf_suballoc_new(struct device *dev);
void etnaviv_cmdbuf_suballoc_destroy(struct etnaviv_cmdbuf_suballoc *suballoc);
+int etnaviv_cmdbuf_suballoc_map(struct etnaviv_cmdbuf_suballoc *suballoc,
+ struct etnaviv_iommu_context *context,
+ struct etnaviv_vram_mapping *mapping,
+ u32 memory_base);
+void etnaviv_cmdbuf_suballoc_unmap(struct etnaviv_iommu_context *context,
+ struct etnaviv_vram_mapping *mapping);
int etnaviv_cmdbuf_init(struct etnaviv_cmdbuf_suballoc *suballoc,
struct etnaviv_cmdbuf *cmdbuf, u32 size);
void etnaviv_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf);
-u32 etnaviv_cmdbuf_get_va(struct etnaviv_cmdbuf *buf);
+u32 etnaviv_cmdbuf_get_va(struct etnaviv_cmdbuf *buf,
+ struct etnaviv_vram_mapping *mapping);
dma_addr_t etnaviv_cmdbuf_get_pa(struct etnaviv_cmdbuf *buf);
#endif /* __ETNAVIV_CMDBUF_H__ */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 7eb7cf9c3fa8..6b43c1c94e8f 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -4,8 +4,17 @@
*/
#include <linux/component.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
#include <linux/of_platform.h>
+#include <linux/uaccess.h>
+
+#include <drm/drm_debugfs.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_ioctl.h>
#include <drm/drm_of.h>
+#include <drm/drm_prime.h>
#include "etnaviv_cmdbuf.h"
#include "etnaviv_drv.h"
@@ -41,26 +50,38 @@ static int etnaviv_open(struct drm_device *dev, struct drm_file *file)
{
struct etnaviv_drm_private *priv = dev->dev_private;
struct etnaviv_file_private *ctx;
- int i;
+ int ret, i;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
+ ctx->mmu = etnaviv_iommu_context_init(priv->mmu_global,
+ priv->cmdbuf_suballoc);
+ if (!ctx->mmu) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
for (i = 0; i < ETNA_MAX_PIPES; i++) {
struct etnaviv_gpu *gpu = priv->gpu[i];
- struct drm_sched_rq *rq;
+ struct drm_gpu_scheduler *sched;
if (gpu) {
- rq = &gpu->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+ sched = &gpu->sched;
drm_sched_entity_init(&ctx->sched_entity[i],
- &rq, 1, NULL);
+ DRM_SCHED_PRIORITY_NORMAL, &sched,
+ 1, NULL);
}
}
file->driver_priv = ctx;
return 0;
+
+out_free:
+ kfree(ctx);
+ return ret;
}
static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file)
@@ -76,6 +97,8 @@ static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file)
drm_sched_entity_destroy(&ctx->sched_entity[i]);
}
+ etnaviv_iommu_context_put(ctx->mmu);
+
kfree(ctx);
}
@@ -107,12 +130,29 @@ static int etnaviv_mm_show(struct drm_device *dev, struct seq_file *m)
static int etnaviv_mmu_show(struct etnaviv_gpu *gpu, struct seq_file *m)
{
struct drm_printer p = drm_seq_file_printer(m);
+ struct etnaviv_iommu_context *mmu_context;
seq_printf(m, "Active Objects (%s):\n", dev_name(gpu->dev));
- mutex_lock(&gpu->mmu->lock);
- drm_mm_print(&gpu->mmu->mm, &p);
- mutex_unlock(&gpu->mmu->lock);
+ /*
+ * Lock the GPU to avoid a MMU context switch just now and elevate
+ * the refcount of the current context to avoid it disappearing from
+ * under our feet.
+ */
+ mutex_lock(&gpu->lock);
+ mmu_context = gpu->mmu_context;
+ if (mmu_context)
+ etnaviv_iommu_context_get(mmu_context);
+ mutex_unlock(&gpu->lock);
+
+ if (!mmu_context)
+ return 0;
+
+ mutex_lock(&mmu_context->lock);
+ drm_mm_print(&mmu_context->mm, &p);
+ mutex_unlock(&mmu_context->lock);
+
+ etnaviv_iommu_context_put(mmu_context);
return 0;
}
@@ -243,11 +283,6 @@ static int etnaviv_ioctl_gem_new(struct drm_device *dev, void *data,
args->flags, &args->handle);
}
-#define TS(t) ((struct timespec){ \
- .tv_sec = (t).tv_sec, \
- .tv_nsec = (t).tv_nsec \
-})
-
static int etnaviv_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
struct drm_file *file)
{
@@ -262,7 +297,7 @@ static int etnaviv_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
if (!obj)
return -ENOENT;
- ret = etnaviv_gem_cpu_prep(obj, args->op, &TS(args->timeout));
+ ret = etnaviv_gem_cpu_prep(obj, args->op, &args->timeout);
drm_gem_object_put_unlocked(obj);
@@ -315,7 +350,7 @@ static int etnaviv_ioctl_wait_fence(struct drm_device *dev, void *data,
{
struct drm_etnaviv_wait_fence *args = data;
struct etnaviv_drm_private *priv = dev->dev_private;
- struct timespec *timeout = &TS(args->timeout);
+ struct drm_etnaviv_timespec *timeout = &args->timeout;
struct etnaviv_gpu *gpu;
if (args->flags & ~(ETNA_WAIT_NONBLOCK))
@@ -364,7 +399,7 @@ static int etnaviv_ioctl_gem_wait(struct drm_device *dev, void *data,
{
struct etnaviv_drm_private *priv = dev->dev_private;
struct drm_etnaviv_gem_wait *args = data;
- struct timespec *timeout = &TS(args->timeout);
+ struct drm_etnaviv_timespec *timeout = &args->timeout;
struct drm_gem_object *obj;
struct etnaviv_gpu *gpu;
int ret;
@@ -430,17 +465,17 @@ static int etnaviv_ioctl_pm_query_sig(struct drm_device *dev, void *data,
static const struct drm_ioctl_desc etnaviv_ioctls[] = {
#define ETNA_IOCTL(n, func, flags) \
DRM_IOCTL_DEF_DRV(ETNAVIV_##n, etnaviv_ioctl_##func, flags)
- ETNA_IOCTL(GET_PARAM, get_param, DRM_AUTH|DRM_RENDER_ALLOW),
- ETNA_IOCTL(GEM_NEW, gem_new, DRM_AUTH|DRM_RENDER_ALLOW),
- ETNA_IOCTL(GEM_INFO, gem_info, DRM_AUTH|DRM_RENDER_ALLOW),
- ETNA_IOCTL(GEM_CPU_PREP, gem_cpu_prep, DRM_AUTH|DRM_RENDER_ALLOW),
- ETNA_IOCTL(GEM_CPU_FINI, gem_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW),
- ETNA_IOCTL(GEM_SUBMIT, gem_submit, DRM_AUTH|DRM_RENDER_ALLOW),
- ETNA_IOCTL(WAIT_FENCE, wait_fence, DRM_AUTH|DRM_RENDER_ALLOW),
- ETNA_IOCTL(GEM_USERPTR, gem_userptr, DRM_AUTH|DRM_RENDER_ALLOW),
- ETNA_IOCTL(GEM_WAIT, gem_wait, DRM_AUTH|DRM_RENDER_ALLOW),
- ETNA_IOCTL(PM_QUERY_DOM, pm_query_dom, DRM_AUTH|DRM_RENDER_ALLOW),
- ETNA_IOCTL(PM_QUERY_SIG, pm_query_sig, DRM_AUTH|DRM_RENDER_ALLOW),
+ ETNA_IOCTL(GET_PARAM, get_param, DRM_RENDER_ALLOW),
+ ETNA_IOCTL(GEM_NEW, gem_new, DRM_RENDER_ALLOW),
+ ETNA_IOCTL(GEM_INFO, gem_info, DRM_RENDER_ALLOW),
+ ETNA_IOCTL(GEM_CPU_PREP, gem_cpu_prep, DRM_RENDER_ALLOW),
+ ETNA_IOCTL(GEM_CPU_FINI, gem_cpu_fini, DRM_RENDER_ALLOW),
+ ETNA_IOCTL(GEM_SUBMIT, gem_submit, DRM_RENDER_ALLOW),
+ ETNA_IOCTL(WAIT_FENCE, wait_fence, DRM_RENDER_ALLOW),
+ ETNA_IOCTL(GEM_USERPTR, gem_userptr, DRM_RENDER_ALLOW),
+ ETNA_IOCTL(GEM_WAIT, gem_wait, DRM_RENDER_ALLOW),
+ ETNA_IOCTL(PM_QUERY_DOM, pm_query_dom, DRM_RENDER_ALLOW),
+ ETNA_IOCTL(PM_QUERY_SIG, pm_query_sig, DRM_RENDER_ALLOW),
};
static const struct vm_operations_struct vm_ops = {
@@ -462,17 +497,13 @@ static const struct file_operations fops = {
};
static struct drm_driver etnaviv_drm_driver = {
- .driver_features = DRIVER_GEM |
- DRIVER_PRIME |
- DRIVER_RENDER,
+ .driver_features = DRIVER_GEM | DRIVER_RENDER,
.open = etnaviv_open,
.postclose = etnaviv_postclose,
.gem_free_object_unlocked = etnaviv_gem_free_object,
.gem_vm_ops = &vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = drm_gem_prime_export,
- .gem_prime_import = drm_gem_prime_import,
.gem_prime_pin = etnaviv_gem_prime_pin,
.gem_prime_unpin = etnaviv_gem_prime_unpin,
.gem_prime_get_sg_table = etnaviv_gem_prime_get_sg_table,
@@ -490,7 +521,7 @@ static struct drm_driver etnaviv_drm_driver = {
.desc = "etnaviv DRM",
.date = "20151214",
.major = 1,
- .minor = 2,
+ .minor = 3,
};
/*
@@ -521,23 +552,32 @@ static int etnaviv_bind(struct device *dev)
INIT_LIST_HEAD(&priv->gem_list);
priv->num_gpus = 0;
+ priv->cmdbuf_suballoc = etnaviv_cmdbuf_suballoc_new(drm->dev);
+ if (IS_ERR(priv->cmdbuf_suballoc)) {
+ dev_err(drm->dev, "Failed to create cmdbuf suballocator\n");
+ ret = PTR_ERR(priv->cmdbuf_suballoc);
+ goto out_free_priv;
+ }
+
dev_set_drvdata(dev, drm);
ret = component_bind_all(dev, drm);
if (ret < 0)
- goto out_bind;
+ goto out_destroy_suballoc;
load_gpu(drm);
ret = drm_dev_register(drm, 0);
if (ret)
- goto out_register;
+ goto out_unbind;
return 0;
-out_register:
+out_unbind:
component_unbind_all(dev, drm);
-out_bind:
+out_destroy_suballoc:
+ etnaviv_cmdbuf_suballoc_destroy(priv->cmdbuf_suballoc);
+out_free_priv:
kfree(priv);
out_put:
drm_dev_put(drm);
@@ -556,6 +596,8 @@ static void etnaviv_unbind(struct device *dev)
dev->dma_parms = NULL;
+ etnaviv_cmdbuf_suballoc_destroy(priv->cmdbuf_suballoc);
+
drm->dev_private = NULL;
kfree(priv);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
index 8798423705e1..efc656efeb0f 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
@@ -6,21 +6,12 @@
#ifndef __ETNAVIV_DRV_H__
#define __ETNAVIV_DRV_H__
-#include <linux/kernel.h>
-#include <linux/clk.h>
-#include <linux/cpufreq.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/pm.h>
-#include <linux/pm_runtime.h>
-#include <linux/slab.h>
#include <linux/list.h>
+#include <linux/mm_types.h>
+#include <linux/sizes.h>
#include <linux/time64.h>
#include <linux/types.h>
-#include <linux/sizes.h>
-#include <linux/mm_types.h>
-#include <drm/drmP.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem.h>
#include <drm/etnaviv_drm.h>
@@ -31,12 +22,12 @@ struct etnaviv_gpu;
struct etnaviv_mmu;
struct etnaviv_gem_object;
struct etnaviv_gem_submit;
+struct etnaviv_iommu_global;
+
+#define ETNAVIV_SOFTPIN_START_ADDRESS SZ_4M /* must be >= SUBALLOC_SIZE */
struct etnaviv_file_private {
- /*
- * When per-context address spaces are supported we'd keep track of
- * the context's page-tables here.
- */
+ struct etnaviv_iommu_context *mmu;
struct drm_sched_entity sched_entity[ETNA_MAX_PIPES];
};
@@ -45,6 +36,9 @@ struct etnaviv_drm_private {
struct device_dma_parameters dma_parms;
struct etnaviv_gpu *gpu[ETNA_MAX_PIPES];
+ struct etnaviv_cmdbuf_suballoc *cmdbuf_suballoc;
+ struct etnaviv_iommu_global *mmu_global;
+
/* list of GEM objects: */
struct mutex gem_lock;
struct list_head gem_list;
@@ -67,7 +61,7 @@ int etnaviv_gem_prime_pin(struct drm_gem_object *obj);
void etnaviv_gem_prime_unpin(struct drm_gem_object *obj);
void *etnaviv_gem_vmap(struct drm_gem_object *obj);
int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
- struct timespec *timeout);
+ struct drm_etnaviv_timespec *timeout);
int etnaviv_gem_cpu_fini(struct drm_gem_object *obj);
void etnaviv_gem_free_object(struct drm_gem_object *obj);
int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
@@ -76,10 +70,11 @@ int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
uintptr_t ptr, u32 size, u32 flags, u32 *handle);
u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu);
u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, u32 mtlb_addr, u32 safe_addr);
-u16 etnaviv_buffer_config_pta(struct etnaviv_gpu *gpu);
+u16 etnaviv_buffer_config_pta(struct etnaviv_gpu *gpu, unsigned short id);
void etnaviv_buffer_end(struct etnaviv_gpu *gpu);
void etnaviv_sync_point_queue(struct etnaviv_gpu *gpu, unsigned int event);
void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
+ struct etnaviv_iommu_context *mmu,
unsigned int event, struct etnaviv_cmdbuf *cmdbuf);
void etnaviv_validate_init(void);
bool etnaviv_cmd_validate_one(struct etnaviv_gpu *gpu,
@@ -112,11 +107,12 @@ static inline size_t size_vstruct(size_t nelem, size_t elem_size, size_t base)
* between the specified timeout and the current CLOCK_MONOTONIC time.
*/
static inline unsigned long etnaviv_timeout_to_jiffies(
- const struct timespec *timeout)
+ const struct drm_etnaviv_timespec *timeout)
{
- struct timespec64 ts, to;
-
- to = timespec_to_timespec64(*timeout);
+ struct timespec64 ts, to = {
+ .tv_sec = timeout->tv_sec,
+ .tv_nsec = timeout->tv_nsec,
+ };
ktime_get_ts64(&ts);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
index 9a6f5b65488f..648cf0207309 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_dump.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
@@ -4,6 +4,8 @@
*/
#include <linux/devcoredump.h>
+#include <linux/moduleparam.h>
+
#include "etnaviv_cmdbuf.h"
#include "etnaviv_dump.h"
#include "etnaviv_gem.h"
@@ -91,9 +93,9 @@ static void etnaviv_core_dump_registers(struct core_dump_iterator *iter,
}
static void etnaviv_core_dump_mmu(struct core_dump_iterator *iter,
- struct etnaviv_gpu *gpu, size_t mmu_size)
+ struct etnaviv_iommu_context *mmu, size_t mmu_size)
{
- etnaviv_iommu_dump(gpu->mmu, iter->data);
+ etnaviv_iommu_dump(mmu, iter->data);
etnaviv_core_dump_header(iter, ETDUMP_BUF_MMU, iter->data + mmu_size);
}
@@ -108,46 +110,35 @@ static void etnaviv_core_dump_mem(struct core_dump_iterator *iter, u32 type,
etnaviv_core_dump_header(iter, type, iter->data + size);
}
-void etnaviv_core_dump(struct etnaviv_gpu *gpu)
+void etnaviv_core_dump(struct etnaviv_gem_submit *submit)
{
+ struct etnaviv_gpu *gpu = submit->gpu;
struct core_dump_iterator iter;
- struct etnaviv_vram_mapping *vram;
struct etnaviv_gem_object *obj;
- struct etnaviv_gem_submit *submit;
- struct drm_sched_job *s_job;
unsigned int n_obj, n_bomap_pages;
size_t file_size, mmu_size;
__le64 *bomap, *bomap_start;
+ int i;
/* Only catch the first event, or when manually re-armed */
if (!etnaviv_dump_core)
return;
etnaviv_dump_core = false;
- mutex_lock(&gpu->mmu->lock);
+ mutex_lock(&gpu->mmu_context->lock);
- mmu_size = etnaviv_iommu_dump_size(gpu->mmu);
+ mmu_size = etnaviv_iommu_dump_size(gpu->mmu_context);
- /* We always dump registers, mmu, ring and end marker */
- n_obj = 4;
+ /* We always dump registers, mmu, ring, hanging cmdbuf and end marker */
+ n_obj = 5;
n_bomap_pages = 0;
file_size = ARRAY_SIZE(etnaviv_dump_registers) *
sizeof(struct etnaviv_dump_registers) +
- mmu_size + gpu->buffer.size;
-
- /* Add in the active command buffers */
- list_for_each_entry(s_job, &gpu->sched.ring_mirror_list, node) {
- submit = to_etnaviv_submit(s_job);
- file_size += submit->cmdbuf.size;
- n_obj++;
- }
+ mmu_size + gpu->buffer.size + submit->cmdbuf.size;
/* Add in the active buffer objects */
- list_for_each_entry(vram, &gpu->mmu->mappings, mmu_node) {
- if (!vram->use)
- continue;
-
- obj = vram->object;
+ for (i = 0; i < submit->nr_bos; i++) {
+ obj = submit->bos[i].obj;
file_size += obj->base.size;
n_bomap_pages += obj->base.size >> PAGE_SHIFT;
n_obj++;
@@ -166,7 +157,7 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
PAGE_KERNEL);
if (!iter.start) {
- mutex_unlock(&gpu->mmu->lock);
+ mutex_unlock(&gpu->mmu_context->lock);
dev_warn(gpu->dev, "failed to allocate devcoredump file\n");
return;
}
@@ -178,17 +169,18 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
memset(iter.hdr, 0, iter.data - iter.start);
etnaviv_core_dump_registers(&iter, gpu);
- etnaviv_core_dump_mmu(&iter, gpu, mmu_size);
+ etnaviv_core_dump_mmu(&iter, gpu->mmu_context, mmu_size);
etnaviv_core_dump_mem(&iter, ETDUMP_BUF_RING, gpu->buffer.vaddr,
gpu->buffer.size,
- etnaviv_cmdbuf_get_va(&gpu->buffer));
+ etnaviv_cmdbuf_get_va(&gpu->buffer,
+ &gpu->mmu_context->cmdbuf_mapping));
- list_for_each_entry(s_job, &gpu->sched.ring_mirror_list, node) {
- submit = to_etnaviv_submit(s_job);
- etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD,
- submit->cmdbuf.vaddr, submit->cmdbuf.size,
- etnaviv_cmdbuf_get_va(&submit->cmdbuf));
- }
+ etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD,
+ submit->cmdbuf.vaddr, submit->cmdbuf.size,
+ etnaviv_cmdbuf_get_va(&submit->cmdbuf,
+ &gpu->mmu_context->cmdbuf_mapping));
+
+ mutex_unlock(&gpu->mmu_context->lock);
/* Reserve space for the bomap */
if (n_bomap_pages) {
@@ -201,14 +193,13 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
bomap_start = bomap = NULL;
}
- list_for_each_entry(vram, &gpu->mmu->mappings, mmu_node) {
+ for (i = 0; i < submit->nr_bos; i++) {
+ struct etnaviv_vram_mapping *vram;
struct page **pages;
void *vaddr;
- if (vram->use == 0)
- continue;
-
- obj = vram->object;
+ obj = submit->bos[i].obj;
+ vram = submit->bos[i].mapping;
mutex_lock(&obj->lock);
pages = etnaviv_gem_get_pages(obj);
@@ -232,8 +223,6 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
obj->base.size);
}
- mutex_unlock(&gpu->mmu->lock);
-
etnaviv_core_dump_header(&iter, ETDUMP_BUF_END, iter.data);
dev_coredumpv(gpu->dev, iter.start, iter.data - iter.start, GFP_KERNEL);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.h b/drivers/gpu/drm/etnaviv/etnaviv_dump.h
index 2d916c2667ee..a125c46b895b 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_dump.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.h
@@ -35,8 +35,8 @@ struct etnaviv_dump_registers {
};
#ifdef __KERNEL__
-struct etnaviv_gpu;
-void etnaviv_core_dump(struct etnaviv_gpu *gpu);
+struct etnaviv_gem_submit;
+void etnaviv_core_dump(struct etnaviv_gem_submit *submit);
#endif
#endif
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index e8778ebb72e6..6adea180d629 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -3,10 +3,11 @@
* Copyright (C) 2015-2018 Etnaviv Project
*/
-#include <linux/spinlock.h>
+#include <drm/drm_prime.h>
+#include <linux/dma-mapping.h>
#include <linux/shmem_fs.h>
-#include <linux/sched/mm.h>
-#include <linux/sched/task.h>
+#include <linux/spinlock.h>
+#include <linux/vmalloc.h>
#include "etnaviv_drv.h"
#include "etnaviv_gem.h"
@@ -222,30 +223,18 @@ int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
static struct etnaviv_vram_mapping *
etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj,
- struct etnaviv_iommu *mmu)
+ struct etnaviv_iommu_context *context)
{
struct etnaviv_vram_mapping *mapping;
list_for_each_entry(mapping, &obj->vram_list, obj_node) {
- if (mapping->mmu == mmu)
+ if (mapping->context == context)
return mapping;
}
return NULL;
}
-void etnaviv_gem_mapping_reference(struct etnaviv_vram_mapping *mapping)
-{
- struct etnaviv_gem_object *etnaviv_obj = mapping->object;
-
- drm_gem_object_get(&etnaviv_obj->base);
-
- mutex_lock(&etnaviv_obj->lock);
- WARN_ON(mapping->use == 0);
- mapping->use += 1;
- mutex_unlock(&etnaviv_obj->lock);
-}
-
void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping)
{
struct etnaviv_gem_object *etnaviv_obj = mapping->object;
@@ -259,7 +248,8 @@ void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping)
}
struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
- struct drm_gem_object *obj, struct etnaviv_gpu *gpu)
+ struct drm_gem_object *obj, struct etnaviv_iommu_context *mmu_context,
+ u64 va)
{
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
struct etnaviv_vram_mapping *mapping;
@@ -267,7 +257,7 @@ struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
int ret = 0;
mutex_lock(&etnaviv_obj->lock);
- mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, gpu->mmu);
+ mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, mmu_context);
if (mapping) {
/*
* Holding the object lock prevents the use count changing
@@ -276,12 +266,12 @@ struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
* the MMU owns this mapping to close this race.
*/
if (mapping->use == 0) {
- mutex_lock(&gpu->mmu->lock);
- if (mapping->mmu == gpu->mmu)
+ mutex_lock(&mmu_context->lock);
+ if (mapping->context == mmu_context)
mapping->use += 1;
else
mapping = NULL;
- mutex_unlock(&gpu->mmu->lock);
+ mutex_unlock(&mmu_context->lock);
if (mapping)
goto out;
} else {
@@ -314,15 +304,19 @@ struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
list_del(&mapping->obj_node);
}
- mapping->mmu = gpu->mmu;
+ etnaviv_iommu_context_get(mmu_context);
+ mapping->context = mmu_context;
mapping->use = 1;
- ret = etnaviv_iommu_map_gem(gpu->mmu, etnaviv_obj, gpu->memory_base,
- mapping);
- if (ret < 0)
+ ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj,
+ mmu_context->global->memory_base,
+ mapping, va);
+ if (ret < 0) {
+ etnaviv_iommu_context_put(mmu_context);
kfree(mapping);
- else
+ } else {
list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list);
+ }
out:
mutex_unlock(&etnaviv_obj->lock);
@@ -379,7 +373,7 @@ static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
}
int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
- struct timespec *timeout)
+ struct drm_etnaviv_timespec *timeout)
{
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
struct drm_device *dev = obj->dev;
@@ -397,13 +391,13 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
}
if (op & ETNA_PREP_NOSYNC) {
- if (!reservation_object_test_signaled_rcu(obj->resv,
+ if (!dma_resv_test_signaled_rcu(obj->resv,
write))
return -EBUSY;
} else {
unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
- ret = reservation_object_wait_timeout_rcu(obj->resv,
+ ret = dma_resv_wait_timeout_rcu(obj->resv,
write, true, remain);
if (ret <= 0)
return ret == 0 ? -ETIMEDOUT : ret;
@@ -437,7 +431,7 @@ int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
}
int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
- struct timespec *timeout)
+ struct drm_etnaviv_timespec *timeout)
{
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
@@ -459,8 +453,8 @@ static void etnaviv_gem_describe_fence(struct dma_fence *fence,
static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
{
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
- struct reservation_object *robj = obj->resv;
- struct reservation_object_list *fobj;
+ struct dma_resv *robj = obj->resv;
+ struct dma_resv_list *fobj;
struct dma_fence *fence;
unsigned long off = drm_vma_node_start(&obj->vma_node);
@@ -536,12 +530,14 @@ void etnaviv_gem_free_object(struct drm_gem_object *obj)
list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
obj_node) {
- struct etnaviv_iommu *mmu = mapping->mmu;
+ struct etnaviv_iommu_context *context = mapping->context;
WARN_ON(mapping->use);
- if (mmu)
- etnaviv_iommu_unmap_gem(mmu, mapping);
+ if (context) {
+ etnaviv_iommu_unmap_gem(context, mapping);
+ etnaviv_iommu_context_put(context);
+ }
list_del(&mapping->obj_node);
kfree(mapping);
@@ -565,8 +561,7 @@ void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
}
static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
- struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
- struct drm_gem_object **obj)
+ const struct etnaviv_gem_ops *ops, struct drm_gem_object **obj)
{
struct etnaviv_gem_object *etnaviv_obj;
unsigned sz = sizeof(*etnaviv_obj);
@@ -594,8 +589,6 @@ static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
etnaviv_obj->flags = flags;
etnaviv_obj->ops = ops;
- if (robj)
- etnaviv_obj->base.resv = robj;
mutex_init(&etnaviv_obj->lock);
INIT_LIST_HEAD(&etnaviv_obj->vram_list);
@@ -614,7 +607,7 @@ int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
size = PAGE_ALIGN(size);
- ret = etnaviv_gem_new_impl(dev, size, flags, NULL,
+ ret = etnaviv_gem_new_impl(dev, size, flags,
&etnaviv_gem_shmem_ops, &obj);
if (ret)
goto fail;
@@ -646,13 +639,12 @@ fail:
}
int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
- struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
- struct etnaviv_gem_object **res)
+ const struct etnaviv_gem_ops *ops, struct etnaviv_gem_object **res)
{
struct drm_gem_object *obj;
int ret;
- ret = etnaviv_gem_new_impl(dev, size, flags, robj, ops, &obj);
+ ret = etnaviv_gem_new_impl(dev, size, flags, ops, &obj);
if (ret)
return ret;
@@ -734,7 +726,7 @@ int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
struct etnaviv_gem_object *etnaviv_obj;
int ret;
- ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED, NULL,
+ ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED,
&etnaviv_gem_userptr_ops, &etnaviv_obj);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
index 753c458497d0..6b68fe16041b 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
@@ -6,7 +6,7 @@
#ifndef __ETNAVIV_GEM_H__
#define __ETNAVIV_GEM_H__
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
#include "etnaviv_cmdbuf.h"
#include "etnaviv_drv.h"
@@ -25,7 +25,7 @@ struct etnaviv_vram_mapping {
struct list_head scan_node;
struct list_head mmu_node;
struct etnaviv_gem_object *object;
- struct etnaviv_iommu *mmu;
+ struct etnaviv_iommu_context *context;
struct drm_mm_node vram_node;
unsigned int use;
u32 iova;
@@ -77,6 +77,7 @@ static inline bool is_active(struct etnaviv_gem_object *etnaviv_obj)
struct etnaviv_gem_submit_bo {
u32 flags;
+ u64 va;
struct etnaviv_gem_object *obj;
struct etnaviv_vram_mapping *mapping;
struct dma_fence *excl;
@@ -93,6 +94,7 @@ struct etnaviv_gem_submit {
struct kref refcount;
struct etnaviv_file_private *ctx;
struct etnaviv_gpu *gpu;
+ struct etnaviv_iommu_context *mmu_context, *prev_mmu_context;
struct dma_fence *out_fence, *in_fence;
int out_fence_id;
struct list_head node; /* GPU active submit list */
@@ -110,17 +112,16 @@ struct etnaviv_gem_submit {
void etnaviv_submit_put(struct etnaviv_gem_submit * submit);
int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
- struct timespec *timeout);
+ struct drm_etnaviv_timespec *timeout);
int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
- struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
- struct etnaviv_gem_object **res);
+ const struct etnaviv_gem_ops *ops, struct etnaviv_gem_object **res);
void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj);
struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *obj);
void etnaviv_gem_put_pages(struct etnaviv_gem_object *obj);
struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
- struct drm_gem_object *obj, struct etnaviv_gpu *gpu);
-void etnaviv_gem_mapping_reference(struct etnaviv_vram_mapping *mapping);
+ struct drm_gem_object *obj, struct etnaviv_iommu_context *mmu_context,
+ u64 va);
void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping);
#endif /* __ETNAVIV_GEM_H__ */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
index 00e8b6a817e3..f24dd21c2363 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
@@ -3,7 +3,9 @@
* Copyright (C) 2014-2018 Etnaviv Project
*/
+#include <drm/drm_prime.h>
#include <linux/dma-buf.h>
+
#include "etnaviv_drv.h"
#include "etnaviv_gem.h"
@@ -109,7 +111,6 @@ struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev,
int ret, npages;
ret = etnaviv_gem_new_private(dev, size, ETNA_BO_WC,
- attach->dmabuf->resv,
&etnaviv_gem_prime_ops, &etnaviv_obj);
if (ret < 0)
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 1a636469eeda..3b0afa156d92 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -3,9 +3,15 @@
* Copyright (C) 2015 Etnaviv Project
*/
+#include <drm/drm_file.h>
#include <linux/dma-fence-array.h>
-#include <linux/reservation.h>
+#include <linux/file.h>
+#include <linux/pm_runtime.h>
+#include <linux/dma-resv.h>
#include <linux/sync_file.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+
#include "etnaviv_cmdbuf.h"
#include "etnaviv_drv.h"
#include "etnaviv_gpu.h"
@@ -66,6 +72,14 @@ static int submit_lookup_objects(struct etnaviv_gem_submit *submit,
}
submit->bos[i].flags = bo->flags;
+ if (submit->flags & ETNA_SUBMIT_SOFTPIN) {
+ if (bo->presumed < ETNAVIV_SOFTPIN_START_ADDRESS) {
+ DRM_ERROR("invalid softpin address\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ submit->bos[i].va = bo->presumed;
+ }
/* normally use drm_gem_object_lookup(), but for bulk lookup
* all under single table_lock just hit object_idr directly:
@@ -99,7 +113,7 @@ static void submit_unlock_object(struct etnaviv_gem_submit *submit, int i)
if (submit->bos[i].flags & BO_LOCKED) {
struct drm_gem_object *obj = &submit->bos[i].obj->base;
- ww_mutex_unlock(&obj->resv->lock);
+ dma_resv_unlock(obj->resv);
submit->bos[i].flags &= ~BO_LOCKED;
}
}
@@ -119,8 +133,7 @@ retry:
contended = i;
if (!(submit->bos[i].flags & BO_LOCKED)) {
- ret = ww_mutex_lock_interruptible(&obj->resv->lock,
- ticket);
+ ret = dma_resv_lock_interruptible(obj->resv, ticket);
if (ret == -EALREADY)
DRM_ERROR("BO at index %u already on submit list\n",
i);
@@ -147,8 +160,7 @@ fail:
obj = &submit->bos[contended].obj->base;
/* we lost out in a seqno race, lock and retry.. */
- ret = ww_mutex_lock_slow_interruptible(&obj->resv->lock,
- ticket);
+ ret = dma_resv_lock_slow_interruptible(obj->resv, ticket);
if (!ret) {
submit->bos[contended].flags |= BO_LOCKED;
slow_locked = contended;
@@ -165,10 +177,10 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit)
for (i = 0; i < submit->nr_bos; i++) {
struct etnaviv_gem_submit_bo *bo = &submit->bos[i];
- struct reservation_object *robj = bo->obj->base.resv;
+ struct dma_resv *robj = bo->obj->base.resv;
if (!(bo->flags & ETNA_SUBMIT_BO_WRITE)) {
- ret = reservation_object_reserve_shared(robj, 1);
+ ret = dma_resv_reserve_shared(robj, 1);
if (ret)
return ret;
}
@@ -177,13 +189,13 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit)
continue;
if (bo->flags & ETNA_SUBMIT_BO_WRITE) {
- ret = reservation_object_get_fences_rcu(robj, &bo->excl,
+ ret = dma_resv_get_fences_rcu(robj, &bo->excl,
&bo->nr_shared,
&bo->shared);
if (ret)
return ret;
} else {
- bo->excl = reservation_object_get_excl_rcu(robj);
+ bo->excl = dma_resv_get_excl_rcu(robj);
}
}
@@ -199,10 +211,10 @@ static void submit_attach_object_fences(struct etnaviv_gem_submit *submit)
struct drm_gem_object *obj = &submit->bos[i].obj->base;
if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE)
- reservation_object_add_excl_fence(obj->resv,
+ dma_resv_add_excl_fence(obj->resv,
submit->out_fence);
else
- reservation_object_add_shared_fence(obj->resv,
+ dma_resv_add_shared_fence(obj->resv,
submit->out_fence);
submit_unlock_object(submit, i);
@@ -218,11 +230,17 @@ static int submit_pin_objects(struct etnaviv_gem_submit *submit)
struct etnaviv_vram_mapping *mapping;
mapping = etnaviv_gem_mapping_get(&etnaviv_obj->base,
- submit->gpu);
+ submit->mmu_context,
+ submit->bos[i].va);
if (IS_ERR(mapping)) {
ret = PTR_ERR(mapping);
break;
}
+
+ if ((submit->flags & ETNA_SUBMIT_SOFTPIN) &&
+ submit->bos[i].va != mapping->iova)
+ return -EINVAL;
+
atomic_inc(&etnaviv_obj->gpu_active);
submit->bos[i].flags |= BO_PINNED;
@@ -255,6 +273,10 @@ static int submit_reloc(struct etnaviv_gem_submit *submit, void *stream,
u32 *ptr = stream;
int ret;
+ /* Submits using softpin don't blend with relocs */
+ if ((submit->flags & ETNA_SUBMIT_SOFTPIN) && nr_relocs != 0)
+ return -EINVAL;
+
for (i = 0; i < nr_relocs; i++) {
const struct drm_etnaviv_gem_submit_reloc *r = relocs + i;
struct etnaviv_gem_submit_bo *bo;
@@ -355,6 +377,12 @@ static void submit_cleanup(struct kref *kref)
if (submit->cmdbuf.suballoc)
etnaviv_cmdbuf_free(&submit->cmdbuf);
+ if (submit->mmu_context)
+ etnaviv_iommu_context_put(submit->mmu_context);
+
+ if (submit->prev_mmu_context)
+ etnaviv_iommu_context_put(submit->prev_mmu_context);
+
for (i = 0; i < submit->nr_bos; i++) {
struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
@@ -433,6 +461,12 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
return -EINVAL;
}
+ if ((args->flags & ETNA_SUBMIT_SOFTPIN) &&
+ priv->mmu_global->version != ETNAVIV_IOMMU_V2) {
+ DRM_ERROR("softpin requested on incompatible MMU\n");
+ return -EINVAL;
+ }
+
/*
* Copy the command submission and bo array to kernel space in
* one go, and do this outside of any locks.
@@ -490,12 +524,14 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
goto err_submit_ww_acquire;
}
- ret = etnaviv_cmdbuf_init(gpu->cmdbuf_suballoc, &submit->cmdbuf,
+ ret = etnaviv_cmdbuf_init(priv->cmdbuf_suballoc, &submit->cmdbuf,
ALIGN(args->stream_size, 8) + 8);
if (ret)
goto err_submit_objects;
submit->ctx = file->driver_priv;
+ etnaviv_iommu_context_get(submit->ctx->mmu);
+ submit->mmu_context = submit->ctx->mmu;
submit->exec_state = args->exec_state;
submit->flags = args->flags;
@@ -503,7 +539,8 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
if (ret)
goto err_submit_objects;
- if (!etnaviv_cmd_validate_one(gpu, stream, args->stream_size / 4,
+ if ((priv->mmu_global->version != ETNAVIV_IOMMU_V2) &&
+ !etnaviv_cmd_validate_one(gpu, stream, args->stream_size / 4,
relocs, args->nr_relocs)) {
ret = -EINVAL;
goto err_submit_objects;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 5418a1a87b2c..799ec20b267d 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -5,9 +5,13 @@
#include <linux/clk.h>
#include <linux/component.h>
+#include <linux/delay.h>
#include <linux/dma-fence.h>
-#include <linux/moduleparam.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <linux/thermal.h>
@@ -38,6 +42,8 @@ static const struct platform_device_id gpu_ids[] = {
int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value)
{
+ struct etnaviv_drm_private *priv = gpu->drm->dev_private;
+
switch (param) {
case ETNAVIV_PARAM_GPU_MODEL:
*value = gpu->identity.model;
@@ -143,6 +149,13 @@ int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value)
*value = gpu->identity.varyings_count;
break;
+ case ETNAVIV_PARAM_SOFTPIN_START_ADDR:
+ if (priv->mmu_global->version == ETNAVIV_IOMMU_V2)
+ *value = ETNAVIV_SOFTPIN_START_ADDRESS;
+ else
+ *value = ~0ULL;
+ break;
+
default:
DBG("%s: invalid param: %u", dev_name(gpu->dev), param);
return -EINVAL;
@@ -596,6 +609,21 @@ void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch)
}
}
+static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu)
+{
+ u32 address = etnaviv_cmdbuf_get_va(&gpu->buffer,
+ &gpu->mmu_context->cmdbuf_mapping);
+ u16 prefetch;
+
+ /* setup the MMU */
+ etnaviv_iommu_restore(gpu, gpu->mmu_context);
+
+ /* Start command processor */
+ prefetch = etnaviv_buffer_init(gpu);
+
+ etnaviv_gpu_start_fe(gpu, address, prefetch);
+}
+
static void etnaviv_gpu_setup_pulse_eater(struct etnaviv_gpu *gpu)
{
/*
@@ -629,8 +657,6 @@ static void etnaviv_gpu_setup_pulse_eater(struct etnaviv_gpu *gpu)
static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
{
- u16 prefetch;
-
if ((etnaviv_is_model_rev(gpu, GC320, 0x5007) ||
etnaviv_is_model_rev(gpu, GC320, 0x5220)) &&
gpu_read(gpu, VIVS_HI_CHIP_TIME) != 0x2062400) {
@@ -676,19 +702,12 @@ static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
/* setup the pulse eater */
etnaviv_gpu_setup_pulse_eater(gpu);
- /* setup the MMU */
- etnaviv_iommu_restore(gpu);
-
- /* Start command processor */
- prefetch = etnaviv_buffer_init(gpu);
-
gpu_write(gpu, VIVS_HI_INTR_ENBL, ~0U);
- etnaviv_gpu_start_fe(gpu, etnaviv_cmdbuf_get_va(&gpu->buffer),
- prefetch);
}
int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
{
+ struct etnaviv_drm_private *priv = gpu->drm->dev_private;
int ret, i;
ret = pm_runtime_get_sync(gpu->dev);
@@ -714,6 +733,24 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
}
/*
+ * On cores with security features supported, we claim control over the
+ * security states.
+ */
+ if ((gpu->identity.minor_features7 & chipMinorFeatures7_BIT_SECURITY) &&
+ (gpu->identity.minor_features10 & chipMinorFeatures10_SECURITY_AHB))
+ gpu->sec_mode = ETNA_SEC_KERNEL;
+
+ ret = etnaviv_hw_reset(gpu);
+ if (ret) {
+ dev_err(gpu->dev, "GPU reset failed\n");
+ goto fail;
+ }
+
+ ret = etnaviv_iommu_global_init(gpu);
+ if (ret)
+ goto fail;
+
+ /*
* Set the GPU linear window to be at the end of the DMA window, where
* the CMA area is likely to reside. This ensures that we are able to
* map the command buffers while having the linear window overlap as
@@ -726,57 +763,21 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
(gpu->identity.minor_features0 & chipMinorFeatures0_MC20)) {
u32 dma_mask = (u32)dma_get_required_mask(gpu->dev);
if (dma_mask < PHYS_OFFSET + SZ_2G)
- gpu->memory_base = PHYS_OFFSET;
+ priv->mmu_global->memory_base = PHYS_OFFSET;
else
- gpu->memory_base = dma_mask - SZ_2G + 1;
+ priv->mmu_global->memory_base = dma_mask - SZ_2G + 1;
} else if (PHYS_OFFSET >= SZ_2G) {
dev_info(gpu->dev, "Need to move linear window on MC1.0, disabling TS\n");
- gpu->memory_base = PHYS_OFFSET;
+ priv->mmu_global->memory_base = PHYS_OFFSET;
gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
}
- /*
- * On cores with security features supported, we claim control over the
- * security states.
- */
- if ((gpu->identity.minor_features7 & chipMinorFeatures7_BIT_SECURITY) &&
- (gpu->identity.minor_features10 & chipMinorFeatures10_SECURITY_AHB))
- gpu->sec_mode = ETNA_SEC_KERNEL;
-
- ret = etnaviv_hw_reset(gpu);
- if (ret) {
- dev_err(gpu->dev, "GPU reset failed\n");
- goto fail;
- }
-
- gpu->mmu = etnaviv_iommu_new(gpu);
- if (IS_ERR(gpu->mmu)) {
- dev_err(gpu->dev, "Failed to instantiate GPU IOMMU\n");
- ret = PTR_ERR(gpu->mmu);
- goto fail;
- }
-
- gpu->cmdbuf_suballoc = etnaviv_cmdbuf_suballoc_new(gpu);
- if (IS_ERR(gpu->cmdbuf_suballoc)) {
- dev_err(gpu->dev, "Failed to create cmdbuf suballocator\n");
- ret = PTR_ERR(gpu->cmdbuf_suballoc);
- goto destroy_iommu;
- }
-
/* Create buffer: */
- ret = etnaviv_cmdbuf_init(gpu->cmdbuf_suballoc, &gpu->buffer,
+ ret = etnaviv_cmdbuf_init(priv->cmdbuf_suballoc, &gpu->buffer,
PAGE_SIZE);
if (ret) {
dev_err(gpu->dev, "could not create command buffer\n");
- goto destroy_suballoc;
- }
-
- if (gpu->mmu->version == ETNAVIV_IOMMU_V1 &&
- etnaviv_cmdbuf_get_va(&gpu->buffer) > 0x80000000) {
- ret = -EINVAL;
- dev_err(gpu->dev,
- "command buffer outside valid memory window\n");
- goto free_buffer;
+ goto fail;
}
/* Setup event management */
@@ -795,17 +796,10 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
pm_runtime_mark_last_busy(gpu->dev);
pm_runtime_put_autosuspend(gpu->dev);
+ gpu->initialized = true;
+
return 0;
-free_buffer:
- etnaviv_cmdbuf_free(&gpu->buffer);
- gpu->buffer.suballoc = NULL;
-destroy_suballoc:
- etnaviv_cmdbuf_suballoc_destroy(gpu->cmdbuf_suballoc);
- gpu->cmdbuf_suballoc = NULL;
-destroy_iommu:
- etnaviv_iommu_destroy(gpu->mmu);
- gpu->mmu = NULL;
fail:
pm_runtime_mark_last_busy(gpu->dev);
pm_runtime_put_autosuspend(gpu->dev);
@@ -999,6 +993,7 @@ void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu)
etnaviv_gpu_hw_init(gpu);
gpu->exec_state = -1;
+ gpu->mmu_context = NULL;
mutex_unlock(&gpu->lock);
pm_runtime_mark_last_busy(gpu->dev);
@@ -1137,7 +1132,7 @@ static void event_free(struct etnaviv_gpu *gpu, unsigned int event)
* Cmdstream submission/retirement:
*/
int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
- u32 id, struct timespec *timeout)
+ u32 id, struct drm_etnaviv_timespec *timeout)
{
struct dma_fence *fence;
int ret;
@@ -1184,7 +1179,8 @@ int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
* that lock in this function while waiting.
*/
int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
- struct etnaviv_gem_object *etnaviv_obj, struct timespec *timeout)
+ struct etnaviv_gem_object *etnaviv_obj,
+ struct drm_etnaviv_timespec *timeout)
{
unsigned long remaining;
long ret;
@@ -1305,6 +1301,15 @@ struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit)
goto out_unlock;
}
+ if (!gpu->mmu_context) {
+ etnaviv_iommu_context_get(submit->mmu_context);
+ gpu->mmu_context = submit->mmu_context;
+ etnaviv_gpu_start_fe_idleloop(gpu);
+ } else {
+ etnaviv_iommu_context_get(gpu->mmu_context);
+ submit->prev_mmu_context = gpu->mmu_context;
+ }
+
if (submit->nr_pmrs) {
gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre;
kref_get(&submit->refcount);
@@ -1314,8 +1319,8 @@ struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit)
gpu->event[event[0]].fence = gpu_fence;
submit->cmdbuf.user_size = submit->cmdbuf.size - 8;
- etnaviv_buffer_queue(gpu, submit->exec_state, event[0],
- &submit->cmdbuf);
+ etnaviv_buffer_queue(gpu, submit->exec_state, submit->mmu_context,
+ event[0], &submit->cmdbuf);
if (submit->nr_pmrs) {
gpu->event[event[2]].sync_point = &sync_point_perfmon_sample_post;
@@ -1517,7 +1522,7 @@ int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms)
static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
{
- if (gpu->buffer.suballoc) {
+ if (gpu->initialized && gpu->mmu_context) {
/* Replace the last WAIT with END */
mutex_lock(&gpu->lock);
etnaviv_buffer_end(gpu);
@@ -1529,8 +1534,13 @@ static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
* we fail, just warn and continue.
*/
etnaviv_gpu_wait_idle(gpu, 100);
+
+ etnaviv_iommu_context_put(gpu->mmu_context);
+ gpu->mmu_context = NULL;
}
+ gpu->exec_state = -1;
+
return etnaviv_gpu_clk_disable(gpu);
}
@@ -1546,8 +1556,6 @@ static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
etnaviv_gpu_update_clock(gpu);
etnaviv_gpu_hw_init(gpu);
- gpu->exec_state = -1;
-
mutex_unlock(&gpu->lock);
return 0;
@@ -1676,17 +1684,10 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
etnaviv_gpu_hw_suspend(gpu);
#endif
- if (gpu->buffer.suballoc)
+ if (gpu->initialized) {
etnaviv_cmdbuf_free(&gpu->buffer);
-
- if (gpu->cmdbuf_suballoc) {
- etnaviv_cmdbuf_suballoc_destroy(gpu->cmdbuf_suballoc);
- gpu->cmdbuf_suballoc = NULL;
- }
-
- if (gpu->mmu) {
- etnaviv_iommu_destroy(gpu->mmu);
- gpu->mmu = NULL;
+ etnaviv_iommu_global_fini(gpu);
+ gpu->initialized = false;
}
gpu->drm = NULL;
@@ -1714,7 +1715,6 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct etnaviv_gpu *gpu;
- struct resource *res;
int err;
gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL);
@@ -1726,8 +1726,7 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
mutex_init(&gpu->fence_lock);
/* Map registers: */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- gpu->mmio = devm_ioremap_resource(&pdev->dev, res);
+ gpu->mmio = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(gpu->mmio))
return PTR_ERR(gpu->mmio);
@@ -1825,7 +1824,7 @@ static int etnaviv_gpu_rpm_resume(struct device *dev)
return ret;
/* Re-initialise the basic hardware state */
- if (gpu->drm && gpu->buffer.suballoc) {
+ if (gpu->drm && gpu->initialized) {
ret = etnaviv_gpu_hw_resume(gpu);
if (ret) {
etnaviv_gpu_clk_disable(gpu);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index 9bcf151f706b..97bb48042b4d 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -7,6 +7,8 @@
#define __ETNAVIV_GPU_H__
#include "etnaviv_cmdbuf.h"
+#include "etnaviv_gem.h"
+#include "etnaviv_mmu.h"
#include "etnaviv_drv.h"
struct etnaviv_gem_submit;
@@ -84,7 +86,6 @@ struct etnaviv_event {
};
struct etnaviv_cmdbuf_suballoc;
-struct etnaviv_cmdbuf;
struct regulator;
struct clk;
@@ -99,14 +100,12 @@ struct etnaviv_gpu {
enum etnaviv_sec_mode sec_mode;
struct workqueue_struct *wq;
struct drm_gpu_scheduler sched;
+ bool initialized;
/* 'ring'-buffer: */
struct etnaviv_cmdbuf buffer;
int exec_state;
- /* bus base address of memory */
- u32 memory_base;
-
/* event management: */
DECLARE_BITMAP(event_bitmap, ETNA_NR_EVENTS);
struct etnaviv_event event[ETNA_NR_EVENTS];
@@ -134,8 +133,8 @@ struct etnaviv_gpu {
void __iomem *mmio;
int irq;
- struct etnaviv_iommu *mmu;
- struct etnaviv_cmdbuf_suballoc *cmdbuf_suballoc;
+ struct etnaviv_iommu_context *mmu_context;
+ unsigned int flush_seq;
/* Power Control: */
struct clk *clk_bus;
@@ -170,9 +169,10 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m);
void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu);
void etnaviv_gpu_retire(struct etnaviv_gpu *gpu);
int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
- u32 fence, struct timespec *timeout);
+ u32 fence, struct drm_etnaviv_timespec *timeout);
int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
- struct etnaviv_gem_object *etnaviv_obj, struct timespec *timeout);
+ struct etnaviv_gem_object *etnaviv_obj,
+ struct drm_etnaviv_timespec *timeout);
struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit);
int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu);
void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
index b163bdbcb880..1a7c89a67bea 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
@@ -3,15 +3,14 @@
* Copyright (C) 2014-2018 Etnaviv Project
*/
+#include <linux/bitops.h>
+#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/sizes.h>
#include <linux/slab.h>
-#include <linux/dma-mapping.h>
-#include <linux/bitops.h>
#include "etnaviv_gpu.h"
#include "etnaviv_mmu.h"
-#include "etnaviv_iommu.h"
#include "state_hi.xml.h"
#define PT_SIZE SZ_2M
@@ -19,124 +18,89 @@
#define GPU_MEM_START 0x80000000
-struct etnaviv_iommuv1_domain {
- struct etnaviv_iommu_domain base;
+struct etnaviv_iommuv1_context {
+ struct etnaviv_iommu_context base;
u32 *pgtable_cpu;
dma_addr_t pgtable_dma;
};
-static struct etnaviv_iommuv1_domain *
-to_etnaviv_domain(struct etnaviv_iommu_domain *domain)
+static struct etnaviv_iommuv1_context *
+to_v1_context(struct etnaviv_iommu_context *context)
{
- return container_of(domain, struct etnaviv_iommuv1_domain, base);
+ return container_of(context, struct etnaviv_iommuv1_context, base);
}
-static int __etnaviv_iommu_init(struct etnaviv_iommuv1_domain *etnaviv_domain)
+static void etnaviv_iommuv1_free(struct etnaviv_iommu_context *context)
{
- u32 *p;
- int i;
-
- etnaviv_domain->base.bad_page_cpu =
- dma_alloc_wc(etnaviv_domain->base.dev, SZ_4K,
- &etnaviv_domain->base.bad_page_dma,
- GFP_KERNEL);
- if (!etnaviv_domain->base.bad_page_cpu)
- return -ENOMEM;
-
- p = etnaviv_domain->base.bad_page_cpu;
- for (i = 0; i < SZ_4K / 4; i++)
- *p++ = 0xdead55aa;
-
- etnaviv_domain->pgtable_cpu = dma_alloc_wc(etnaviv_domain->base.dev,
- PT_SIZE,
- &etnaviv_domain->pgtable_dma,
- GFP_KERNEL);
- if (!etnaviv_domain->pgtable_cpu) {
- dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
- etnaviv_domain->base.bad_page_cpu,
- etnaviv_domain->base.bad_page_dma);
- return -ENOMEM;
- }
-
- memset32(etnaviv_domain->pgtable_cpu, etnaviv_domain->base.bad_page_dma,
- PT_ENTRIES);
-
- return 0;
-}
+ struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
-static void etnaviv_iommuv1_domain_free(struct etnaviv_iommu_domain *domain)
-{
- struct etnaviv_iommuv1_domain *etnaviv_domain =
- to_etnaviv_domain(domain);
+ drm_mm_takedown(&context->mm);
- dma_free_wc(etnaviv_domain->base.dev, PT_SIZE,
- etnaviv_domain->pgtable_cpu, etnaviv_domain->pgtable_dma);
+ dma_free_wc(context->global->dev, PT_SIZE, v1_context->pgtable_cpu,
+ v1_context->pgtable_dma);
- dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
- etnaviv_domain->base.bad_page_cpu,
- etnaviv_domain->base.bad_page_dma);
+ context->global->v1.shared_context = NULL;
- kfree(etnaviv_domain);
+ kfree(v1_context);
}
-static int etnaviv_iommuv1_map(struct etnaviv_iommu_domain *domain,
+static int etnaviv_iommuv1_map(struct etnaviv_iommu_context *context,
unsigned long iova, phys_addr_t paddr,
size_t size, int prot)
{
- struct etnaviv_iommuv1_domain *etnaviv_domain = to_etnaviv_domain(domain);
+ struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
if (size != SZ_4K)
return -EINVAL;
- etnaviv_domain->pgtable_cpu[index] = paddr;
+ v1_context->pgtable_cpu[index] = paddr;
return 0;
}
-static size_t etnaviv_iommuv1_unmap(struct etnaviv_iommu_domain *domain,
+static size_t etnaviv_iommuv1_unmap(struct etnaviv_iommu_context *context,
unsigned long iova, size_t size)
{
- struct etnaviv_iommuv1_domain *etnaviv_domain =
- to_etnaviv_domain(domain);
+ struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
if (size != SZ_4K)
return -EINVAL;
- etnaviv_domain->pgtable_cpu[index] = etnaviv_domain->base.bad_page_dma;
+ v1_context->pgtable_cpu[index] = context->global->bad_page_dma;
return SZ_4K;
}
-static size_t etnaviv_iommuv1_dump_size(struct etnaviv_iommu_domain *domain)
+static size_t etnaviv_iommuv1_dump_size(struct etnaviv_iommu_context *context)
{
return PT_SIZE;
}
-static void etnaviv_iommuv1_dump(struct etnaviv_iommu_domain *domain, void *buf)
+static void etnaviv_iommuv1_dump(struct etnaviv_iommu_context *context,
+ void *buf)
{
- struct etnaviv_iommuv1_domain *etnaviv_domain =
- to_etnaviv_domain(domain);
+ struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
- memcpy(buf, etnaviv_domain->pgtable_cpu, PT_SIZE);
+ memcpy(buf, v1_context->pgtable_cpu, PT_SIZE);
}
-void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu)
+static void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu,
+ struct etnaviv_iommu_context *context)
{
- struct etnaviv_iommuv1_domain *etnaviv_domain =
- to_etnaviv_domain(gpu->mmu->domain);
+ struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
u32 pgtable;
/* set base addresses */
- gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, gpu->memory_base);
- gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, gpu->memory_base);
- gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_TX, gpu->memory_base);
- gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PEZ, gpu->memory_base);
- gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PE, gpu->memory_base);
+ gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, context->global->memory_base);
+ gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, context->global->memory_base);
+ gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_TX, context->global->memory_base);
+ gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PEZ, context->global->memory_base);
+ gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PE, context->global->memory_base);
/* set page table address in MC */
- pgtable = (u32)etnaviv_domain->pgtable_dma;
+ pgtable = (u32)v1_context->pgtable_dma;
gpu_write(gpu, VIVS_MC_MMU_FE_PAGE_TABLE, pgtable);
gpu_write(gpu, VIVS_MC_MMU_TX_PAGE_TABLE, pgtable);
@@ -145,39 +109,64 @@ void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu)
gpu_write(gpu, VIVS_MC_MMU_RA_PAGE_TABLE, pgtable);
}
-static const struct etnaviv_iommu_domain_ops etnaviv_iommuv1_ops = {
- .free = etnaviv_iommuv1_domain_free,
+
+const struct etnaviv_iommu_ops etnaviv_iommuv1_ops = {
+ .free = etnaviv_iommuv1_free,
.map = etnaviv_iommuv1_map,
.unmap = etnaviv_iommuv1_unmap,
.dump_size = etnaviv_iommuv1_dump_size,
.dump = etnaviv_iommuv1_dump,
+ .restore = etnaviv_iommuv1_restore,
};
-struct etnaviv_iommu_domain *
-etnaviv_iommuv1_domain_alloc(struct etnaviv_gpu *gpu)
+struct etnaviv_iommu_context *
+etnaviv_iommuv1_context_alloc(struct etnaviv_iommu_global *global)
{
- struct etnaviv_iommuv1_domain *etnaviv_domain;
- struct etnaviv_iommu_domain *domain;
- int ret;
+ struct etnaviv_iommuv1_context *v1_context;
+ struct etnaviv_iommu_context *context;
+
+ mutex_lock(&global->lock);
+
+ /*
+ * MMUv1 does not support switching between different contexts without
+ * a stop the world operation, so we only support a single shared
+ * context with this version.
+ */
+ if (global->v1.shared_context) {
+ context = global->v1.shared_context;
+ etnaviv_iommu_context_get(context);
+ mutex_unlock(&global->lock);
+ return context;
+ }
- etnaviv_domain = kzalloc(sizeof(*etnaviv_domain), GFP_KERNEL);
- if (!etnaviv_domain)
+ v1_context = kzalloc(sizeof(*v1_context), GFP_KERNEL);
+ if (!v1_context) {
+ mutex_unlock(&global->lock);
return NULL;
+ }
+
+ v1_context->pgtable_cpu = dma_alloc_wc(global->dev, PT_SIZE,
+ &v1_context->pgtable_dma,
+ GFP_KERNEL);
+ if (!v1_context->pgtable_cpu)
+ goto out_free;
- domain = &etnaviv_domain->base;
+ memset32(v1_context->pgtable_cpu, global->bad_page_dma, PT_ENTRIES);
- domain->dev = gpu->dev;
- domain->base = GPU_MEM_START;
- domain->size = PT_ENTRIES * SZ_4K;
- domain->ops = &etnaviv_iommuv1_ops;
+ context = &v1_context->base;
+ context->global = global;
+ kref_init(&context->refcount);
+ mutex_init(&context->lock);
+ INIT_LIST_HEAD(&context->mappings);
+ drm_mm_init(&context->mm, GPU_MEM_START, PT_ENTRIES * SZ_4K);
+ context->global->v1.shared_context = context;
- ret = __etnaviv_iommu_init(etnaviv_domain);
- if (ret)
- goto out_free;
+ mutex_unlock(&global->lock);
- return &etnaviv_domain->base;
+ return context;
out_free:
- kfree(etnaviv_domain);
+ mutex_unlock(&global->lock);
+ kfree(v1_context);
return NULL;
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.h b/drivers/gpu/drm/etnaviv/etnaviv_iommu.h
deleted file mode 100644
index b279404ce91a..000000000000
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2014-2018 Etnaviv Project
- */
-
-#ifndef __ETNAVIV_IOMMU_H__
-#define __ETNAVIV_IOMMU_H__
-
-struct etnaviv_gpu;
-struct etnaviv_iommu_domain;
-
-struct etnaviv_iommu_domain *
-etnaviv_iommuv1_domain_alloc(struct etnaviv_gpu *gpu);
-void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu);
-
-struct etnaviv_iommu_domain *
-etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu);
-void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu);
-
-#endif /* __ETNAVIV_IOMMU_H__ */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
index f794e04be9e6..f8bf488e9d71 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
@@ -3,16 +3,16 @@
* Copyright (C) 2016-2018 Etnaviv Project
*/
+#include <linux/bitops.h>
+#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/sizes.h>
#include <linux/slab.h>
-#include <linux/dma-mapping.h>
-#include <linux/bitops.h>
+#include <linux/vmalloc.h>
#include "etnaviv_cmdbuf.h"
#include "etnaviv_gpu.h"
#include "etnaviv_mmu.h"
-#include "etnaviv_iommu.h"
#include "state.xml.h"
#include "state_hi.xml.h"
@@ -27,11 +27,9 @@
#define MMUv2_MAX_STLB_ENTRIES 1024
-struct etnaviv_iommuv2_domain {
- struct etnaviv_iommu_domain base;
- /* P(age) T(able) A(rray) */
- u64 *pta_cpu;
- dma_addr_t pta_dma;
+struct etnaviv_iommuv2_context {
+ struct etnaviv_iommu_context base;
+ unsigned short id;
/* M(aster) TLB aka first level pagetable */
u32 *mtlb_cpu;
dma_addr_t mtlb_dma;
@@ -40,41 +38,62 @@ struct etnaviv_iommuv2_domain {
dma_addr_t stlb_dma[MMUv2_MAX_STLB_ENTRIES];
};
-static struct etnaviv_iommuv2_domain *
-to_etnaviv_domain(struct etnaviv_iommu_domain *domain)
+static struct etnaviv_iommuv2_context *
+to_v2_context(struct etnaviv_iommu_context *context)
{
- return container_of(domain, struct etnaviv_iommuv2_domain, base);
+ return container_of(context, struct etnaviv_iommuv2_context, base);
}
+static void etnaviv_iommuv2_free(struct etnaviv_iommu_context *context)
+{
+ struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
+ int i;
+
+ drm_mm_takedown(&context->mm);
+
+ for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
+ if (v2_context->stlb_cpu[i])
+ dma_free_wc(context->global->dev, SZ_4K,
+ v2_context->stlb_cpu[i],
+ v2_context->stlb_dma[i]);
+ }
+
+ dma_free_wc(context->global->dev, SZ_4K, v2_context->mtlb_cpu,
+ v2_context->mtlb_dma);
+
+ clear_bit(v2_context->id, context->global->v2.pta_alloc);
+
+ vfree(v2_context);
+}
static int
-etnaviv_iommuv2_ensure_stlb(struct etnaviv_iommuv2_domain *etnaviv_domain,
+etnaviv_iommuv2_ensure_stlb(struct etnaviv_iommuv2_context *v2_context,
int stlb)
{
- if (etnaviv_domain->stlb_cpu[stlb])
+ if (v2_context->stlb_cpu[stlb])
return 0;
- etnaviv_domain->stlb_cpu[stlb] =
- dma_alloc_wc(etnaviv_domain->base.dev, SZ_4K,
- &etnaviv_domain->stlb_dma[stlb],
+ v2_context->stlb_cpu[stlb] =
+ dma_alloc_wc(v2_context->base.global->dev, SZ_4K,
+ &v2_context->stlb_dma[stlb],
GFP_KERNEL);
- if (!etnaviv_domain->stlb_cpu[stlb])
+ if (!v2_context->stlb_cpu[stlb])
return -ENOMEM;
- memset32(etnaviv_domain->stlb_cpu[stlb], MMUv2_PTE_EXCEPTION,
+ memset32(v2_context->stlb_cpu[stlb], MMUv2_PTE_EXCEPTION,
SZ_4K / sizeof(u32));
- etnaviv_domain->mtlb_cpu[stlb] = etnaviv_domain->stlb_dma[stlb] |
- MMUv2_PTE_PRESENT;
+ v2_context->mtlb_cpu[stlb] =
+ v2_context->stlb_dma[stlb] | MMUv2_PTE_PRESENT;
+
return 0;
}
-static int etnaviv_iommuv2_map(struct etnaviv_iommu_domain *domain,
+static int etnaviv_iommuv2_map(struct etnaviv_iommu_context *context,
unsigned long iova, phys_addr_t paddr,
size_t size, int prot)
{
- struct etnaviv_iommuv2_domain *etnaviv_domain =
- to_etnaviv_domain(domain);
+ struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
int mtlb_entry, stlb_entry, ret;
u32 entry = lower_32_bits(paddr) | MMUv2_PTE_PRESENT;
@@ -90,20 +109,19 @@ static int etnaviv_iommuv2_map(struct etnaviv_iommu_domain *domain,
mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
- ret = etnaviv_iommuv2_ensure_stlb(etnaviv_domain, mtlb_entry);
+ ret = etnaviv_iommuv2_ensure_stlb(v2_context, mtlb_entry);
if (ret)
return ret;
- etnaviv_domain->stlb_cpu[mtlb_entry][stlb_entry] = entry;
+ v2_context->stlb_cpu[mtlb_entry][stlb_entry] = entry;
return 0;
}
-static size_t etnaviv_iommuv2_unmap(struct etnaviv_iommu_domain *domain,
+static size_t etnaviv_iommuv2_unmap(struct etnaviv_iommu_context *context,
unsigned long iova, size_t size)
{
- struct etnaviv_iommuv2_domain *etnaviv_domain =
- to_etnaviv_domain(domain);
+ struct etnaviv_iommuv2_context *etnaviv_domain = to_v2_context(context);
int mtlb_entry, stlb_entry;
if (size != SZ_4K)
@@ -117,118 +135,37 @@ static size_t etnaviv_iommuv2_unmap(struct etnaviv_iommu_domain *domain,
return SZ_4K;
}
-static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
+static size_t etnaviv_iommuv2_dump_size(struct etnaviv_iommu_context *context)
{
- int ret;
-
- /* allocate scratch page */
- etnaviv_domain->base.bad_page_cpu =
- dma_alloc_wc(etnaviv_domain->base.dev, SZ_4K,
- &etnaviv_domain->base.bad_page_dma,
- GFP_KERNEL);
- if (!etnaviv_domain->base.bad_page_cpu) {
- ret = -ENOMEM;
- goto fail_mem;
- }
-
- memset32(etnaviv_domain->base.bad_page_cpu, 0xdead55aa,
- SZ_4K / sizeof(u32));
-
- etnaviv_domain->pta_cpu = dma_alloc_wc(etnaviv_domain->base.dev,
- SZ_4K, &etnaviv_domain->pta_dma,
- GFP_KERNEL);
- if (!etnaviv_domain->pta_cpu) {
- ret = -ENOMEM;
- goto fail_mem;
- }
-
- etnaviv_domain->mtlb_cpu = dma_alloc_wc(etnaviv_domain->base.dev,
- SZ_4K, &etnaviv_domain->mtlb_dma,
- GFP_KERNEL);
- if (!etnaviv_domain->mtlb_cpu) {
- ret = -ENOMEM;
- goto fail_mem;
- }
-
- memset32(etnaviv_domain->mtlb_cpu, MMUv2_PTE_EXCEPTION,
- MMUv2_MAX_STLB_ENTRIES);
-
- return 0;
-
-fail_mem:
- if (etnaviv_domain->base.bad_page_cpu)
- dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
- etnaviv_domain->base.bad_page_cpu,
- etnaviv_domain->base.bad_page_dma);
-
- if (etnaviv_domain->pta_cpu)
- dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
- etnaviv_domain->pta_cpu, etnaviv_domain->pta_dma);
-
- if (etnaviv_domain->mtlb_cpu)
- dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
- etnaviv_domain->mtlb_cpu, etnaviv_domain->mtlb_dma);
-
- return ret;
-}
-
-static void etnaviv_iommuv2_domain_free(struct etnaviv_iommu_domain *domain)
-{
- struct etnaviv_iommuv2_domain *etnaviv_domain =
- to_etnaviv_domain(domain);
- int i;
-
- dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
- etnaviv_domain->base.bad_page_cpu,
- etnaviv_domain->base.bad_page_dma);
-
- dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
- etnaviv_domain->pta_cpu, etnaviv_domain->pta_dma);
-
- dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
- etnaviv_domain->mtlb_cpu, etnaviv_domain->mtlb_dma);
-
- for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
- if (etnaviv_domain->stlb_cpu[i])
- dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
- etnaviv_domain->stlb_cpu[i],
- etnaviv_domain->stlb_dma[i]);
- }
-
- vfree(etnaviv_domain);
-}
-
-static size_t etnaviv_iommuv2_dump_size(struct etnaviv_iommu_domain *domain)
-{
- struct etnaviv_iommuv2_domain *etnaviv_domain =
- to_etnaviv_domain(domain);
+ struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
size_t dump_size = SZ_4K;
int i;
for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++)
- if (etnaviv_domain->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
+ if (v2_context->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
dump_size += SZ_4K;
return dump_size;
}
-static void etnaviv_iommuv2_dump(struct etnaviv_iommu_domain *domain, void *buf)
+static void etnaviv_iommuv2_dump(struct etnaviv_iommu_context *context, void *buf)
{
- struct etnaviv_iommuv2_domain *etnaviv_domain =
- to_etnaviv_domain(domain);
+ struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
int i;
- memcpy(buf, etnaviv_domain->mtlb_cpu, SZ_4K);
+ memcpy(buf, v2_context->mtlb_cpu, SZ_4K);
buf += SZ_4K;
- for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++, buf += SZ_4K)
- if (etnaviv_domain->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
- memcpy(buf, etnaviv_domain->stlb_cpu[i], SZ_4K);
+ for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++)
+ if (v2_context->mtlb_cpu[i] & MMUv2_PTE_PRESENT) {
+ memcpy(buf, v2_context->stlb_cpu[i], SZ_4K);
+ buf += SZ_4K;
+ }
}
-static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu)
+static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu,
+ struct etnaviv_iommu_context *context)
{
- struct etnaviv_iommuv2_domain *etnaviv_domain =
- to_etnaviv_domain(gpu->mmu->domain);
+ struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
u16 prefetch;
/* If the MMU is already enabled the state is still there. */
@@ -236,8 +173,8 @@ static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu)
return;
prefetch = etnaviv_buffer_config_mmuv2(gpu,
- (u32)etnaviv_domain->mtlb_dma,
- (u32)etnaviv_domain->base.bad_page_dma);
+ (u32)v2_context->mtlb_dma,
+ (u32)context->global->bad_page_dma);
etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(&gpu->buffer),
prefetch);
etnaviv_gpu_wait_idle(gpu, 100);
@@ -245,10 +182,10 @@ static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu)
gpu_write(gpu, VIVS_MMUv2_CONTROL, VIVS_MMUv2_CONTROL_ENABLE);
}
-static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu)
+static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu,
+ struct etnaviv_iommu_context *context)
{
- struct etnaviv_iommuv2_domain *etnaviv_domain =
- to_etnaviv_domain(gpu->mmu->domain);
+ struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
u16 prefetch;
/* If the MMU is already enabled the state is still there. */
@@ -256,26 +193,26 @@ static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu)
return;
gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_LOW,
- lower_32_bits(etnaviv_domain->pta_dma));
+ lower_32_bits(context->global->v2.pta_dma));
gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_HIGH,
- upper_32_bits(etnaviv_domain->pta_dma));
+ upper_32_bits(context->global->v2.pta_dma));
gpu_write(gpu, VIVS_MMUv2_PTA_CONTROL, VIVS_MMUv2_PTA_CONTROL_ENABLE);
gpu_write(gpu, VIVS_MMUv2_NONSEC_SAFE_ADDR_LOW,
- lower_32_bits(etnaviv_domain->base.bad_page_dma));
+ lower_32_bits(context->global->bad_page_dma));
gpu_write(gpu, VIVS_MMUv2_SEC_SAFE_ADDR_LOW,
- lower_32_bits(etnaviv_domain->base.bad_page_dma));
+ lower_32_bits(context->global->bad_page_dma));
gpu_write(gpu, VIVS_MMUv2_SAFE_ADDRESS_CONFIG,
VIVS_MMUv2_SAFE_ADDRESS_CONFIG_NON_SEC_SAFE_ADDR_HIGH(
- upper_32_bits(etnaviv_domain->base.bad_page_dma)) |
+ upper_32_bits(context->global->bad_page_dma)) |
VIVS_MMUv2_SAFE_ADDRESS_CONFIG_SEC_SAFE_ADDR_HIGH(
- upper_32_bits(etnaviv_domain->base.bad_page_dma)));
+ upper_32_bits(context->global->bad_page_dma)));
- etnaviv_domain->pta_cpu[0] = etnaviv_domain->mtlb_dma |
- VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K;
+ context->global->v2.pta_cpu[v2_context->id] = v2_context->mtlb_dma |
+ VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K;
/* trigger a PTA load through the FE */
- prefetch = etnaviv_buffer_config_pta(gpu);
+ prefetch = etnaviv_buffer_config_pta(gpu, v2_context->id);
etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(&gpu->buffer),
prefetch);
etnaviv_gpu_wait_idle(gpu, 100);
@@ -283,14 +220,28 @@ static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu)
gpu_write(gpu, VIVS_MMUv2_SEC_CONTROL, VIVS_MMUv2_SEC_CONTROL_ENABLE);
}
-void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu)
+u32 etnaviv_iommuv2_get_mtlb_addr(struct etnaviv_iommu_context *context)
+{
+ struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
+
+ return v2_context->mtlb_dma;
+}
+
+unsigned short etnaviv_iommuv2_get_pta_id(struct etnaviv_iommu_context *context)
+{
+ struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
+
+ return v2_context->id;
+}
+static void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu,
+ struct etnaviv_iommu_context *context)
{
switch (gpu->sec_mode) {
case ETNA_SEC_NONE:
- etnaviv_iommuv2_restore_nonsec(gpu);
+ etnaviv_iommuv2_restore_nonsec(gpu, context);
break;
case ETNA_SEC_KERNEL:
- etnaviv_iommuv2_restore_sec(gpu);
+ etnaviv_iommuv2_restore_sec(gpu, context);
break;
default:
WARN(1, "unhandled GPU security mode\n");
@@ -298,39 +249,58 @@ void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu)
}
}
-static const struct etnaviv_iommu_domain_ops etnaviv_iommuv2_ops = {
- .free = etnaviv_iommuv2_domain_free,
+const struct etnaviv_iommu_ops etnaviv_iommuv2_ops = {
+ .free = etnaviv_iommuv2_free,
.map = etnaviv_iommuv2_map,
.unmap = etnaviv_iommuv2_unmap,
.dump_size = etnaviv_iommuv2_dump_size,
.dump = etnaviv_iommuv2_dump,
+ .restore = etnaviv_iommuv2_restore,
};
-struct etnaviv_iommu_domain *
-etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu)
+struct etnaviv_iommu_context *
+etnaviv_iommuv2_context_alloc(struct etnaviv_iommu_global *global)
{
- struct etnaviv_iommuv2_domain *etnaviv_domain;
- struct etnaviv_iommu_domain *domain;
- int ret;
+ struct etnaviv_iommuv2_context *v2_context;
+ struct etnaviv_iommu_context *context;
- etnaviv_domain = vzalloc(sizeof(*etnaviv_domain));
- if (!etnaviv_domain)
+ v2_context = vzalloc(sizeof(*v2_context));
+ if (!v2_context)
return NULL;
- domain = &etnaviv_domain->base;
+ mutex_lock(&global->lock);
+ v2_context->id = find_first_zero_bit(global->v2.pta_alloc,
+ ETNAVIV_PTA_ENTRIES);
+ if (v2_context->id < ETNAVIV_PTA_ENTRIES) {
+ set_bit(v2_context->id, global->v2.pta_alloc);
+ } else {
+ mutex_unlock(&global->lock);
+ goto out_free;
+ }
+ mutex_unlock(&global->lock);
- domain->dev = gpu->dev;
- domain->base = SZ_4K;
- domain->size = (u64)SZ_1G * 4 - SZ_4K;
- domain->ops = &etnaviv_iommuv2_ops;
+ v2_context->mtlb_cpu = dma_alloc_wc(global->dev, SZ_4K,
+ &v2_context->mtlb_dma, GFP_KERNEL);
+ if (!v2_context->mtlb_cpu)
+ goto out_free_id;
- ret = etnaviv_iommuv2_init(etnaviv_domain);
- if (ret)
- goto out_free;
+ memset32(v2_context->mtlb_cpu, MMUv2_PTE_EXCEPTION,
+ MMUv2_MAX_STLB_ENTRIES);
+
+ global->v2.pta_cpu[v2_context->id] = v2_context->mtlb_dma;
+
+ context = &v2_context->base;
+ context->global = global;
+ kref_init(&context->refcount);
+ mutex_init(&context->lock);
+ INIT_LIST_HEAD(&context->mappings);
+ drm_mm_init(&context->mm, SZ_4K, (u64)SZ_1G * 4 - SZ_4K);
- return &etnaviv_domain->base;
+ return context;
+out_free_id:
+ clear_bit(v2_context->id, global->v2.pta_alloc);
out_free:
- vfree(etnaviv_domain);
+ vfree(v2_context);
return NULL;
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
index 8069f9f36a2e..3607d348c298 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
@@ -3,15 +3,17 @@
* Copyright (C) 2015-2018 Etnaviv Project
*/
+#include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
+
#include "common.xml.h"
#include "etnaviv_cmdbuf.h"
#include "etnaviv_drv.h"
#include "etnaviv_gem.h"
#include "etnaviv_gpu.h"
-#include "etnaviv_iommu.h"
#include "etnaviv_mmu.h"
-static void etnaviv_domain_unmap(struct etnaviv_iommu_domain *domain,
+static void etnaviv_context_unmap(struct etnaviv_iommu_context *context,
unsigned long iova, size_t size)
{
size_t unmapped_page, unmapped = 0;
@@ -24,7 +26,8 @@ static void etnaviv_domain_unmap(struct etnaviv_iommu_domain *domain,
}
while (unmapped < size) {
- unmapped_page = domain->ops->unmap(domain, iova, pgsize);
+ unmapped_page = context->global->ops->unmap(context, iova,
+ pgsize);
if (!unmapped_page)
break;
@@ -33,7 +36,7 @@ static void etnaviv_domain_unmap(struct etnaviv_iommu_domain *domain,
}
}
-static int etnaviv_domain_map(struct etnaviv_iommu_domain *domain,
+static int etnaviv_context_map(struct etnaviv_iommu_context *context,
unsigned long iova, phys_addr_t paddr,
size_t size, int prot)
{
@@ -49,7 +52,8 @@ static int etnaviv_domain_map(struct etnaviv_iommu_domain *domain,
}
while (size) {
- ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
+ ret = context->global->ops->map(context, iova, paddr, pgsize,
+ prot);
if (ret)
break;
@@ -60,21 +64,19 @@ static int etnaviv_domain_map(struct etnaviv_iommu_domain *domain,
/* unroll mapping in case something went wrong */
if (ret)
- etnaviv_domain_unmap(domain, orig_iova, orig_size - size);
+ etnaviv_context_unmap(context, orig_iova, orig_size - size);
return ret;
}
-static int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
+static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova,
struct sg_table *sgt, unsigned len, int prot)
-{
- struct etnaviv_iommu_domain *domain = iommu->domain;
- struct scatterlist *sg;
+{ struct scatterlist *sg;
unsigned int da = iova;
unsigned int i, j;
int ret;
- if (!domain || !sgt)
+ if (!context || !sgt)
return -EINVAL;
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
@@ -83,7 +85,7 @@ static int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
- ret = etnaviv_domain_map(domain, da, pa, bytes, prot);
+ ret = etnaviv_context_map(context, da, pa, bytes, prot);
if (ret)
goto fail;
@@ -98,16 +100,15 @@ fail:
for_each_sg(sgt->sgl, sg, i, j) {
size_t bytes = sg_dma_len(sg) + sg->offset;
- etnaviv_domain_unmap(domain, da, bytes);
+ etnaviv_context_unmap(context, da, bytes);
da += bytes;
}
return ret;
}
-static void etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
+static void etnaviv_iommu_unmap(struct etnaviv_iommu_context *context, u32 iova,
struct sg_table *sgt, unsigned len)
{
- struct etnaviv_iommu_domain *domain = iommu->domain;
struct scatterlist *sg;
unsigned int da = iova;
int i;
@@ -115,7 +116,7 @@ static void etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
size_t bytes = sg_dma_len(sg) + sg->offset;
- etnaviv_domain_unmap(domain, da, bytes);
+ etnaviv_context_unmap(context, da, bytes);
VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
@@ -125,24 +126,24 @@ static void etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
}
}
-static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu *mmu,
+static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu_context *context,
struct etnaviv_vram_mapping *mapping)
{
struct etnaviv_gem_object *etnaviv_obj = mapping->object;
- etnaviv_iommu_unmap(mmu, mapping->vram_node.start,
+ etnaviv_iommu_unmap(context, mapping->vram_node.start,
etnaviv_obj->sgt, etnaviv_obj->base.size);
drm_mm_remove_node(&mapping->vram_node);
}
-static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
+static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context,
struct drm_mm_node *node, size_t size)
{
struct etnaviv_vram_mapping *free = NULL;
enum drm_mm_insert_mode mode = DRM_MM_INSERT_LOW;
int ret;
- lockdep_assert_held(&mmu->lock);
+ lockdep_assert_held(&context->lock);
while (1) {
struct etnaviv_vram_mapping *m, *n;
@@ -150,17 +151,17 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
struct list_head list;
bool found;
- ret = drm_mm_insert_node_in_range(&mmu->mm, node,
+ ret = drm_mm_insert_node_in_range(&context->mm, node,
size, 0, 0, 0, U64_MAX, mode);
if (ret != -ENOSPC)
break;
/* Try to retire some entries */
- drm_mm_scan_init(&scan, &mmu->mm, size, 0, 0, mode);
+ drm_mm_scan_init(&scan, &context->mm, size, 0, 0, mode);
found = 0;
INIT_LIST_HEAD(&list);
- list_for_each_entry(free, &mmu->mappings, mmu_node) {
+ list_for_each_entry(free, &context->mappings, mmu_node) {
/* If this vram node has not been used, skip this. */
if (!free->vram_node.mm)
continue;
@@ -202,8 +203,8 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
* this mapping.
*/
list_for_each_entry_safe(m, n, &list, scan_node) {
- etnaviv_iommu_remove_mapping(mmu, m);
- m->mmu = NULL;
+ etnaviv_iommu_remove_mapping(context, m);
+ m->context = NULL;
list_del_init(&m->mmu_node);
list_del_init(&m->scan_node);
}
@@ -219,9 +220,16 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
return ret;
}
-int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
+static int etnaviv_iommu_insert_exact(struct etnaviv_iommu_context *context,
+ struct drm_mm_node *node, size_t size, u64 va)
+{
+ return drm_mm_insert_node_in_range(&context->mm, node, size, 0, 0, va,
+ va + size, DRM_MM_INSERT_LOWEST);
+}
+
+int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context,
struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
- struct etnaviv_vram_mapping *mapping)
+ struct etnaviv_vram_mapping *mapping, u64 va)
{
struct sg_table *sgt = etnaviv_obj->sgt;
struct drm_mm_node *node;
@@ -229,17 +237,17 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
lockdep_assert_held(&etnaviv_obj->lock);
- mutex_lock(&mmu->lock);
+ mutex_lock(&context->lock);
/* v1 MMU can optimize single entry (contiguous) scatterlists */
- if (mmu->version == ETNAVIV_IOMMU_V1 &&
+ if (context->global->version == ETNAVIV_IOMMU_V1 &&
sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) {
u32 iova;
iova = sg_dma_address(sgt->sgl) - memory_base;
if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
mapping->iova = iova;
- list_add_tail(&mapping->mmu_node, &mmu->mappings);
+ list_add_tail(&mapping->mmu_node, &context->mappings);
ret = 0;
goto unlock;
}
@@ -247,12 +255,17 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
node = &mapping->vram_node;
- ret = etnaviv_iommu_find_iova(mmu, node, etnaviv_obj->base.size);
+ if (va)
+ ret = etnaviv_iommu_insert_exact(context, node,
+ etnaviv_obj->base.size, va);
+ else
+ ret = etnaviv_iommu_find_iova(context, node,
+ etnaviv_obj->base.size);
if (ret < 0)
goto unlock;
mapping->iova = node->start;
- ret = etnaviv_iommu_map(mmu, node->start, sgt, etnaviv_obj->base.size,
+ ret = etnaviv_iommu_map(context, node->start, sgt, etnaviv_obj->base.size,
ETNAVIV_PROT_READ | ETNAVIV_PROT_WRITE);
if (ret < 0) {
@@ -260,130 +273,244 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
goto unlock;
}
- list_add_tail(&mapping->mmu_node, &mmu->mappings);
- mmu->need_flush = true;
+ list_add_tail(&mapping->mmu_node, &context->mappings);
+ context->flush_seq++;
unlock:
- mutex_unlock(&mmu->lock);
+ mutex_unlock(&context->lock);
return ret;
}
-void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
+void etnaviv_iommu_unmap_gem(struct etnaviv_iommu_context *context,
struct etnaviv_vram_mapping *mapping)
{
WARN_ON(mapping->use);
- mutex_lock(&mmu->lock);
+ mutex_lock(&context->lock);
/* If the vram node is on the mm, unmap and remove the node */
- if (mapping->vram_node.mm == &mmu->mm)
- etnaviv_iommu_remove_mapping(mmu, mapping);
+ if (mapping->vram_node.mm == &context->mm)
+ etnaviv_iommu_remove_mapping(context, mapping);
list_del(&mapping->mmu_node);
- mmu->need_flush = true;
- mutex_unlock(&mmu->lock);
+ context->flush_seq++;
+ mutex_unlock(&context->lock);
}
-void etnaviv_iommu_destroy(struct etnaviv_iommu *mmu)
+static void etnaviv_iommu_context_free(struct kref *kref)
{
- drm_mm_takedown(&mmu->mm);
- mmu->domain->ops->free(mmu->domain);
- kfree(mmu);
+ struct etnaviv_iommu_context *context =
+ container_of(kref, struct etnaviv_iommu_context, refcount);
+
+ etnaviv_cmdbuf_suballoc_unmap(context, &context->cmdbuf_mapping);
+
+ context->global->ops->free(context);
+}
+void etnaviv_iommu_context_put(struct etnaviv_iommu_context *context)
+{
+ kref_put(&context->refcount, etnaviv_iommu_context_free);
}
-struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu)
+struct etnaviv_iommu_context *
+etnaviv_iommu_context_init(struct etnaviv_iommu_global *global,
+ struct etnaviv_cmdbuf_suballoc *suballoc)
{
- enum etnaviv_iommu_version version;
- struct etnaviv_iommu *mmu;
+ struct etnaviv_iommu_context *ctx;
+ int ret;
- mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
- if (!mmu)
- return ERR_PTR(-ENOMEM);
+ if (global->version == ETNAVIV_IOMMU_V1)
+ ctx = etnaviv_iommuv1_context_alloc(global);
+ else
+ ctx = etnaviv_iommuv2_context_alloc(global);
- if (!(gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION)) {
- mmu->domain = etnaviv_iommuv1_domain_alloc(gpu);
- version = ETNAVIV_IOMMU_V1;
- } else {
- mmu->domain = etnaviv_iommuv2_domain_alloc(gpu);
- version = ETNAVIV_IOMMU_V2;
- }
+ if (!ctx)
+ return NULL;
- if (!mmu->domain) {
- dev_err(gpu->dev, "Failed to allocate GPU IOMMU domain\n");
- kfree(mmu);
- return ERR_PTR(-ENOMEM);
- }
+ ret = etnaviv_cmdbuf_suballoc_map(suballoc, ctx, &ctx->cmdbuf_mapping,
+ global->memory_base);
+ if (ret)
+ goto out_free;
- mmu->gpu = gpu;
- mmu->version = version;
- mutex_init(&mmu->lock);
- INIT_LIST_HEAD(&mmu->mappings);
+ if (global->version == ETNAVIV_IOMMU_V1 &&
+ ctx->cmdbuf_mapping.iova > 0x80000000) {
+ dev_err(global->dev,
+ "command buffer outside valid memory window\n");
+ goto out_unmap;
+ }
- drm_mm_init(&mmu->mm, mmu->domain->base, mmu->domain->size);
+ return ctx;
- return mmu;
+out_unmap:
+ etnaviv_cmdbuf_suballoc_unmap(ctx, &ctx->cmdbuf_mapping);
+out_free:
+ global->ops->free(ctx);
+ return NULL;
}
-void etnaviv_iommu_restore(struct etnaviv_gpu *gpu)
+void etnaviv_iommu_restore(struct etnaviv_gpu *gpu,
+ struct etnaviv_iommu_context *context)
{
- if (gpu->mmu->version == ETNAVIV_IOMMU_V1)
- etnaviv_iommuv1_restore(gpu);
- else
- etnaviv_iommuv2_restore(gpu);
+ context->global->ops->restore(gpu, context);
}
-int etnaviv_iommu_get_suballoc_va(struct etnaviv_gpu *gpu, dma_addr_t paddr,
- struct drm_mm_node *vram_node, size_t size,
- u32 *iova)
+int etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu_context *context,
+ struct etnaviv_vram_mapping *mapping,
+ u32 memory_base, dma_addr_t paddr,
+ size_t size)
{
- struct etnaviv_iommu *mmu = gpu->mmu;
+ mutex_lock(&context->lock);
- if (mmu->version == ETNAVIV_IOMMU_V1) {
- *iova = paddr - gpu->memory_base;
+ if (mapping->use > 0) {
+ mapping->use++;
+ mutex_unlock(&context->lock);
return 0;
+ }
+
+ /*
+ * For MMUv1 we don't add the suballoc region to the pagetables, as
+ * those GPUs can only work with cmdbufs accessed through the linear
+ * window. Instead we manufacture a mapping to make it look uniform
+ * to the upper layers.
+ */
+ if (context->global->version == ETNAVIV_IOMMU_V1) {
+ mapping->iova = paddr - memory_base;
} else {
+ struct drm_mm_node *node = &mapping->vram_node;
int ret;
- mutex_lock(&mmu->lock);
- ret = etnaviv_iommu_find_iova(mmu, vram_node, size);
+ ret = etnaviv_iommu_find_iova(context, node, size);
if (ret < 0) {
- mutex_unlock(&mmu->lock);
+ mutex_unlock(&context->lock);
return ret;
}
- ret = etnaviv_domain_map(mmu->domain, vram_node->start, paddr,
- size, ETNAVIV_PROT_READ);
+
+ mapping->iova = node->start;
+ ret = etnaviv_context_map(context, node->start, paddr, size,
+ ETNAVIV_PROT_READ);
if (ret < 0) {
- drm_mm_remove_node(vram_node);
- mutex_unlock(&mmu->lock);
+ drm_mm_remove_node(node);
+ mutex_unlock(&context->lock);
return ret;
}
- gpu->mmu->need_flush = true;
- mutex_unlock(&mmu->lock);
- *iova = (u32)vram_node->start;
- return 0;
+ context->flush_seq++;
}
+
+ list_add_tail(&mapping->mmu_node, &context->mappings);
+ mapping->use = 1;
+
+ mutex_unlock(&context->lock);
+
+ return 0;
}
-void etnaviv_iommu_put_suballoc_va(struct etnaviv_gpu *gpu,
- struct drm_mm_node *vram_node, size_t size,
- u32 iova)
+void etnaviv_iommu_put_suballoc_va(struct etnaviv_iommu_context *context,
+ struct etnaviv_vram_mapping *mapping)
{
- struct etnaviv_iommu *mmu = gpu->mmu;
+ struct drm_mm_node *node = &mapping->vram_node;
- if (mmu->version == ETNAVIV_IOMMU_V2) {
- mutex_lock(&mmu->lock);
- etnaviv_domain_unmap(mmu->domain, iova, size);
- drm_mm_remove_node(vram_node);
- mutex_unlock(&mmu->lock);
+ mutex_lock(&context->lock);
+ mapping->use--;
+
+ if (mapping->use > 0 || context->global->version == ETNAVIV_IOMMU_V1) {
+ mutex_unlock(&context->lock);
+ return;
}
+
+ etnaviv_context_unmap(context, node->start, node->size);
+ drm_mm_remove_node(node);
+ mutex_unlock(&context->lock);
}
-size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu)
+
+size_t etnaviv_iommu_dump_size(struct etnaviv_iommu_context *context)
{
- return iommu->domain->ops->dump_size(iommu->domain);
+ return context->global->ops->dump_size(context);
}
-void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf)
+void etnaviv_iommu_dump(struct etnaviv_iommu_context *context, void *buf)
{
- iommu->domain->ops->dump(iommu->domain, buf);
+ context->global->ops->dump(context, buf);
+}
+
+int etnaviv_iommu_global_init(struct etnaviv_gpu *gpu)
+{
+ enum etnaviv_iommu_version version = ETNAVIV_IOMMU_V1;
+ struct etnaviv_drm_private *priv = gpu->drm->dev_private;
+ struct etnaviv_iommu_global *global;
+ struct device *dev = gpu->drm->dev;
+
+ if (gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION)
+ version = ETNAVIV_IOMMU_V2;
+
+ if (priv->mmu_global) {
+ if (priv->mmu_global->version != version) {
+ dev_err(gpu->dev,
+ "MMU version doesn't match global version\n");
+ return -ENXIO;
+ }
+
+ priv->mmu_global->use++;
+ return 0;
+ }
+
+ global = kzalloc(sizeof(*global), GFP_KERNEL);
+ if (!global)
+ return -ENOMEM;
+
+ global->bad_page_cpu = dma_alloc_wc(dev, SZ_4K, &global->bad_page_dma,
+ GFP_KERNEL);
+ if (!global->bad_page_cpu)
+ goto free_global;
+
+ memset32(global->bad_page_cpu, 0xdead55aa, SZ_4K / sizeof(u32));
+
+ if (version == ETNAVIV_IOMMU_V2) {
+ global->v2.pta_cpu = dma_alloc_wc(dev, ETNAVIV_PTA_SIZE,
+ &global->v2.pta_dma, GFP_KERNEL);
+ if (!global->v2.pta_cpu)
+ goto free_bad_page;
+ }
+
+ global->dev = dev;
+ global->version = version;
+ global->use = 1;
+ mutex_init(&global->lock);
+
+ if (version == ETNAVIV_IOMMU_V1)
+ global->ops = &etnaviv_iommuv1_ops;
+ else
+ global->ops = &etnaviv_iommuv2_ops;
+
+ priv->mmu_global = global;
+
+ return 0;
+
+free_bad_page:
+ dma_free_wc(dev, SZ_4K, global->bad_page_cpu, global->bad_page_dma);
+free_global:
+ kfree(global);
+
+ return -ENOMEM;
+}
+
+void etnaviv_iommu_global_fini(struct etnaviv_gpu *gpu)
+{
+ struct etnaviv_drm_private *priv = gpu->drm->dev_private;
+ struct etnaviv_iommu_global *global = priv->mmu_global;
+
+ if (--global->use > 0)
+ return;
+
+ if (global->v2.pta_cpu)
+ dma_free_wc(global->dev, ETNAVIV_PTA_SIZE,
+ global->v2.pta_cpu, global->v2.pta_dma);
+
+ if (global->bad_page_cpu)
+ dma_free_wc(global->dev, SZ_4K,
+ global->bad_page_cpu, global->bad_page_dma);
+
+ mutex_destroy(&global->lock);
+ kfree(global);
+
+ priv->mmu_global = NULL;
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
index a0db17ffb686..d1d6902fd13b 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
@@ -16,61 +16,109 @@ enum etnaviv_iommu_version {
struct etnaviv_gpu;
struct etnaviv_vram_mapping;
-struct etnaviv_iommu_domain;
+struct etnaviv_iommu_global;
+struct etnaviv_iommu_context;
-struct etnaviv_iommu_domain_ops {
- void (*free)(struct etnaviv_iommu_domain *);
- int (*map)(struct etnaviv_iommu_domain *domain, unsigned long iova,
+struct etnaviv_iommu_ops {
+ struct etnaviv_iommu_context *(*init)(struct etnaviv_iommu_global *);
+ void (*free)(struct etnaviv_iommu_context *);
+ int (*map)(struct etnaviv_iommu_context *context, unsigned long iova,
phys_addr_t paddr, size_t size, int prot);
- size_t (*unmap)(struct etnaviv_iommu_domain *domain, unsigned long iova,
+ size_t (*unmap)(struct etnaviv_iommu_context *context, unsigned long iova,
size_t size);
- size_t (*dump_size)(struct etnaviv_iommu_domain *);
- void (*dump)(struct etnaviv_iommu_domain *, void *);
+ size_t (*dump_size)(struct etnaviv_iommu_context *);
+ void (*dump)(struct etnaviv_iommu_context *, void *);
+ void (*restore)(struct etnaviv_gpu *, struct etnaviv_iommu_context *);
};
-struct etnaviv_iommu_domain {
+extern const struct etnaviv_iommu_ops etnaviv_iommuv1_ops;
+extern const struct etnaviv_iommu_ops etnaviv_iommuv2_ops;
+
+#define ETNAVIV_PTA_SIZE SZ_4K
+#define ETNAVIV_PTA_ENTRIES (ETNAVIV_PTA_SIZE / sizeof(u64))
+
+struct etnaviv_iommu_global {
struct device *dev;
+ enum etnaviv_iommu_version version;
+ const struct etnaviv_iommu_ops *ops;
+ unsigned int use;
+ struct mutex lock;
+
void *bad_page_cpu;
dma_addr_t bad_page_dma;
- u64 base;
- u64 size;
- const struct etnaviv_iommu_domain_ops *ops;
+ u32 memory_base;
+
+ /*
+ * This union holds members needed by either MMUv1 or MMUv2, which
+ * can not exist at the same time.
+ */
+ union {
+ struct {
+ struct etnaviv_iommu_context *shared_context;
+ } v1;
+ struct {
+ /* P(age) T(able) A(rray) */
+ u64 *pta_cpu;
+ dma_addr_t pta_dma;
+ struct spinlock pta_lock;
+ DECLARE_BITMAP(pta_alloc, ETNAVIV_PTA_ENTRIES);
+ } v2;
+ };
};
-struct etnaviv_iommu {
- struct etnaviv_gpu *gpu;
- struct etnaviv_iommu_domain *domain;
-
- enum etnaviv_iommu_version version;
+struct etnaviv_iommu_context {
+ struct kref refcount;
+ struct etnaviv_iommu_global *global;
/* memory manager for GPU address area */
struct mutex lock;
struct list_head mappings;
struct drm_mm mm;
- bool need_flush;
+ unsigned int flush_seq;
+
+ /* Not part of the context, but needs to have the same lifetime */
+ struct etnaviv_vram_mapping cmdbuf_mapping;
};
+int etnaviv_iommu_global_init(struct etnaviv_gpu *gpu);
+void etnaviv_iommu_global_fini(struct etnaviv_gpu *gpu);
+
struct etnaviv_gem_object;
-int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
+int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context,
struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
+ struct etnaviv_vram_mapping *mapping, u64 va);
+void etnaviv_iommu_unmap_gem(struct etnaviv_iommu_context *context,
struct etnaviv_vram_mapping *mapping);
-void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
- struct etnaviv_vram_mapping *mapping);
-
-int etnaviv_iommu_get_suballoc_va(struct etnaviv_gpu *gpu, dma_addr_t paddr,
- struct drm_mm_node *vram_node, size_t size,
- u32 *iova);
-void etnaviv_iommu_put_suballoc_va(struct etnaviv_gpu *gpu,
- struct drm_mm_node *vram_node, size_t size,
- u32 iova);
-
-size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu);
-void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf);
-struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu);
-void etnaviv_iommu_destroy(struct etnaviv_iommu *iommu);
-void etnaviv_iommu_restore(struct etnaviv_gpu *gpu);
+int etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu_context *ctx,
+ struct etnaviv_vram_mapping *mapping,
+ u32 memory_base, dma_addr_t paddr,
+ size_t size);
+void etnaviv_iommu_put_suballoc_va(struct etnaviv_iommu_context *ctx,
+ struct etnaviv_vram_mapping *mapping);
+
+size_t etnaviv_iommu_dump_size(struct etnaviv_iommu_context *ctx);
+void etnaviv_iommu_dump(struct etnaviv_iommu_context *ctx, void *buf);
+
+struct etnaviv_iommu_context *
+etnaviv_iommu_context_init(struct etnaviv_iommu_global *global,
+ struct etnaviv_cmdbuf_suballoc *suballoc);
+static inline void etnaviv_iommu_context_get(struct etnaviv_iommu_context *ctx)
+{
+ kref_get(&ctx->refcount);
+}
+void etnaviv_iommu_context_put(struct etnaviv_iommu_context *ctx);
+void etnaviv_iommu_restore(struct etnaviv_gpu *gpu,
+ struct etnaviv_iommu_context *ctx);
+
+struct etnaviv_iommu_context *
+etnaviv_iommuv1_context_alloc(struct etnaviv_iommu_global *global);
+struct etnaviv_iommu_context *
+etnaviv_iommuv2_context_alloc(struct etnaviv_iommu_global *global);
+
+u32 etnaviv_iommuv2_get_mtlb_addr(struct etnaviv_iommu_context *context);
+unsigned short etnaviv_iommuv2_get_pta_id(struct etnaviv_iommu_context *context);
#endif /* __ETNAVIV_MMU_H__ */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
index 4227a4006c34..8adbf2861bff 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
@@ -4,6 +4,7 @@
* Copyright (C) 2017 Zodiac Inflight Innovations
*/
+#include "common.xml.h"
#include "etnaviv_gpu.h"
#include "etnaviv_perfmon.h"
#include "state_hi.xml.h"
@@ -15,8 +16,8 @@ struct etnaviv_pm_signal {
u32 data;
u32 (*sample)(struct etnaviv_gpu *gpu,
- const struct etnaviv_pm_domain *domain,
- const struct etnaviv_pm_signal *signal);
+ const struct etnaviv_pm_domain *domain,
+ const struct etnaviv_pm_signal *signal);
};
struct etnaviv_pm_domain {
@@ -35,13 +36,6 @@ struct etnaviv_pm_domain_meta {
u32 nr_domains;
};
-static u32 simple_reg_read(struct etnaviv_gpu *gpu,
- const struct etnaviv_pm_domain *domain,
- const struct etnaviv_pm_signal *signal)
-{
- return gpu_read(gpu, signal->data);
-}
-
static u32 perf_reg_read(struct etnaviv_gpu *gpu,
const struct etnaviv_pm_domain *domain,
const struct etnaviv_pm_signal *signal)
@@ -75,6 +69,34 @@ static u32 pipe_reg_read(struct etnaviv_gpu *gpu,
return value;
}
+static u32 hi_total_cycle_read(struct etnaviv_gpu *gpu,
+ const struct etnaviv_pm_domain *domain,
+ const struct etnaviv_pm_signal *signal)
+{
+ u32 reg = VIVS_HI_PROFILE_TOTAL_CYCLES;
+
+ if (gpu->identity.model == chipModel_GC880 ||
+ gpu->identity.model == chipModel_GC2000 ||
+ gpu->identity.model == chipModel_GC2100)
+ reg = VIVS_MC_PROFILE_CYCLE_COUNTER;
+
+ return gpu_read(gpu, reg);
+}
+
+static u32 hi_total_idle_cycle_read(struct etnaviv_gpu *gpu,
+ const struct etnaviv_pm_domain *domain,
+ const struct etnaviv_pm_signal *signal)
+{
+ u32 reg = VIVS_HI_PROFILE_IDLE_CYCLES;
+
+ if (gpu->identity.model == chipModel_GC880 ||
+ gpu->identity.model == chipModel_GC2000 ||
+ gpu->identity.model == chipModel_GC2100)
+ reg = VIVS_HI_PROFILE_TOTAL_CYCLES;
+
+ return gpu_read(gpu, reg);
+}
+
static const struct etnaviv_pm_domain doms_3d[] = {
{
.name = "HI",
@@ -84,13 +106,13 @@ static const struct etnaviv_pm_domain doms_3d[] = {
.signal = (const struct etnaviv_pm_signal[]) {
{
"TOTAL_CYCLES",
- VIVS_HI_PROFILE_TOTAL_CYCLES,
- &simple_reg_read
+ 0,
+ &hi_total_cycle_read
},
{
"IDLE_CYCLES",
- VIVS_HI_PROFILE_IDLE_CYCLES,
- &simple_reg_read
+ 0,
+ &hi_total_idle_cycle_read
},
{
"AXI_CYCLES_READ_REQUEST_STALLED",
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
index a813c824e154..4e3e95dce6d8 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -3,7 +3,7 @@
* Copyright (C) 2017 Etnaviv Project
*/
-#include <linux/kthread.h>
+#include <linux/moduleparam.h>
#include "etnaviv_drv.h"
#include "etnaviv_dump.h"
@@ -115,7 +115,7 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job)
drm_sched_increase_karma(sched_job);
/* get the GPU back into the init state */
- etnaviv_core_dump(gpu);
+ etnaviv_core_dump(submit);
etnaviv_gpu_recover_hang(gpu);
drm_sched_resubmit_jobs(&gpu->sched);
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 6f7d3b3b3628..6417f374b923 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -1,13 +1,13 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_EXYNOS
- tristate "DRM Support for Samsung SoC EXYNOS Series"
+ tristate "DRM Support for Samsung SoC Exynos Series"
depends on OF && DRM && (ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS || ARCH_MULTIPLATFORM || COMPILE_TEST)
depends on MMU
select DRM_KMS_HELPER
select VIDEOMODE_HELPERS
select SND_SOC_HDMI_CODEC if SND_SOC
help
- Choose this option if you have a Samsung SoC EXYNOS chipset.
+ Choose this option if you have a Samsung SoC Exynos chipset.
If M is selected the module will be called exynosdrm.
if DRM_EXYNOS
@@ -62,7 +62,7 @@ config DRM_EXYNOS_DSI
This enables support for Exynos MIPI-DSI device.
config DRM_EXYNOS_DP
- bool "EXYNOS specific extensions for Analogix DP driver"
+ bool "Exynos specific extensions for Analogix DP driver"
depends on DRM_EXYNOS_FIMD || DRM_EXYNOS7_DECON
select DRM_ANALOGIX_DP
default DRM_EXYNOS
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 2d5cbfda3ca7..8428ae12dfa5 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -510,7 +510,7 @@ static void decon_swreset(struct decon_context *ctx)
ctx->addr + DECON_CRCCTRL);
}
-static void decon_enable(struct exynos_drm_crtc *crtc)
+static void decon_atomic_enable(struct exynos_drm_crtc *crtc)
{
struct decon_context *ctx = crtc->ctx;
@@ -523,7 +523,7 @@ static void decon_enable(struct exynos_drm_crtc *crtc)
decon_commit(ctx->crtc);
}
-static void decon_disable(struct exynos_drm_crtc *crtc)
+static void decon_atomic_disable(struct exynos_drm_crtc *crtc)
{
struct decon_context *ctx = crtc->ctx;
int i;
@@ -599,8 +599,8 @@ static enum drm_mode_status decon_mode_valid(struct exynos_drm_crtc *crtc,
}
static const struct exynos_drm_crtc_ops decon_crtc_ops = {
- .enable = decon_enable,
- .disable = decon_disable,
+ .atomic_enable = decon_atomic_enable,
+ .atomic_disable = decon_atomic_disable,
.enable_vblank = decon_enable_vblank,
.disable_vblank = decon_disable_vblank,
.atomic_begin = decon_atomic_begin,
@@ -651,7 +651,7 @@ static void decon_unbind(struct device *dev, struct device *master, void *data)
{
struct decon_context *ctx = dev_get_drvdata(dev);
- decon_disable(ctx->crtc);
+ decon_atomic_disable(ctx->crtc);
/* detach this sub driver from iommu mapping if supported. */
exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev);
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index f0640950bd46..ff59c641fa80 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -526,7 +526,7 @@ static void decon_init(struct decon_context *ctx)
writel(VIDCON1_VCLK_HOLD, ctx->regs + VIDCON1(0));
}
-static void decon_enable(struct exynos_drm_crtc *crtc)
+static void decon_atomic_enable(struct exynos_drm_crtc *crtc)
{
struct decon_context *ctx = crtc->ctx;
@@ -546,7 +546,7 @@ static void decon_enable(struct exynos_drm_crtc *crtc)
ctx->suspended = false;
}
-static void decon_disable(struct exynos_drm_crtc *crtc)
+static void decon_atomic_disable(struct exynos_drm_crtc *crtc)
{
struct decon_context *ctx = crtc->ctx;
int i;
@@ -568,8 +568,8 @@ static void decon_disable(struct exynos_drm_crtc *crtc)
}
static const struct exynos_drm_crtc_ops decon_crtc_ops = {
- .enable = decon_enable,
- .disable = decon_disable,
+ .atomic_enable = decon_atomic_enable,
+ .atomic_disable = decon_atomic_disable,
.enable_vblank = decon_enable_vblank,
.disable_vblank = decon_disable_vblank,
.atomic_begin = decon_atomic_begin,
@@ -653,7 +653,7 @@ static void decon_unbind(struct device *dev, struct device *master,
{
struct decon_context *ctx = dev_get_drvdata(dev);
- decon_disable(ctx->crtc);
+ decon_atomic_disable(ctx->crtc);
if (ctx->encoder)
exynos_dpi_remove(ctx->encoder);
diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c
index 3a0f0ba8c63a..4785885c0f4f 100644
--- a/drivers/gpu/drm/exynos/exynos_dp.c
+++ b/drivers/gpu/drm/exynos/exynos_dp.c
@@ -19,6 +19,7 @@
#include <drm/bridge/analogix_dp.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
@@ -109,7 +110,6 @@ static int exynos_dp_bridge_attach(struct analogix_dp_plat_data *plat_data,
if (ret) {
DRM_DEV_ERROR(dp->dev,
"Failed to attach bridge to drm\n");
- bridge->next = NULL;
return ret;
}
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 77ce78986408..1c03485676ef 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -23,8 +23,8 @@ static void exynos_drm_crtc_atomic_enable(struct drm_crtc *crtc,
{
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
- if (exynos_crtc->ops->enable)
- exynos_crtc->ops->enable(exynos_crtc);
+ if (exynos_crtc->ops->atomic_enable)
+ exynos_crtc->ops->atomic_enable(exynos_crtc);
drm_crtc_vblank_on(crtc);
}
@@ -36,8 +36,8 @@ static void exynos_drm_crtc_atomic_disable(struct drm_crtc *crtc,
drm_crtc_vblank_off(crtc);
- if (exynos_crtc->ops->disable)
- exynos_crtc->ops->disable(exynos_crtc);
+ if (exynos_crtc->ops->atomic_disable)
+ exynos_crtc->ops->atomic_disable(exynos_crtc);
if (crtc->state->event && !crtc->state->active) {
spin_lock_irq(&crtc->dev->event_lock);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
index 3cebb19ec1c4..43fa0f26c052 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -43,7 +43,7 @@ exynos_dpi_detect(struct drm_connector *connector, bool force)
{
struct exynos_dpi *ctx = connector_to_dpi(connector);
- if (ctx->panel && !ctx->panel->connector)
+ if (ctx->panel)
drm_panel_attach(ctx->panel, &ctx->connector);
return connector_status_connected;
@@ -85,7 +85,7 @@ static int exynos_dpi_get_modes(struct drm_connector *connector)
}
if (ctx->panel)
- return ctx->panel->funcs->get_modes(ctx->panel);
+ return drm_panel_get_modes(ctx->panel, connector);
return 0;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 58baf49d9926..ba0f868b2477 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -8,12 +8,20 @@
*/
#include <linux/component.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/uaccess.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_file.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_ioctl.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include <drm/exynos_drm.h>
#include "exynos_drm_drv.h"
@@ -75,29 +83,29 @@ static const struct vm_operations_struct exynos_drm_gem_vm_ops = {
static const struct drm_ioctl_desc exynos_ioctls[] = {
DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CREATE, exynos_drm_gem_create_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MAP, exynos_drm_gem_map_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(EXYNOS_GEM_GET, exynos_drm_gem_get_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(EXYNOS_VIDI_CONNECTION, vidi_connection_ioctl,
DRM_AUTH),
DRM_IOCTL_DEF_DRV(EXYNOS_G2D_GET_VER, exynos_g2d_get_ver_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(EXYNOS_G2D_SET_CMDLIST, exynos_g2d_set_cmdlist_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC, exynos_g2d_exec_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_RESOURCES,
exynos_drm_ipp_get_res_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_CAPS, exynos_drm_ipp_get_caps_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_LIMITS,
exynos_drm_ipp_get_limits_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(EXYNOS_IPP_COMMIT, exynos_drm_ipp_commit_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
};
static const struct file_operations exynos_drm_driver_fops = {
@@ -112,7 +120,7 @@ static const struct file_operations exynos_drm_driver_fops = {
};
static struct drm_driver exynos_drm_driver = {
- .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME
+ .driver_features = DRIVER_MODESET | DRIVER_GEM
| DRIVER_ATOMIC | DRIVER_RENDER,
.open = exynos_drm_open,
.lastclose = drm_fb_helper_lastclose,
@@ -122,7 +130,6 @@ static struct drm_driver exynos_drm_driver = {
.dumb_create = exynos_drm_gem_dumb_create,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = drm_gem_prime_export,
.gem_prime_import = exynos_drm_gem_prime_import,
.gem_prime_get_sg_table = exynos_drm_gem_prime_get_sg_table,
.gem_prime_import_sg_table = exynos_drm_gem_prime_import_sg_table,
@@ -242,9 +249,7 @@ static struct component_match *exynos_drm_match_add(struct device *dev)
if (!info->driver || !(info->flags & DRM_COMPONENT_DRIVER))
continue;
- while ((d = bus_find_device(&platform_bus_type, p,
- &info->driver->driver,
- (void *)platform_bus_type.match))) {
+ while ((d = platform_find_device_by_driver(p, &info->driver->driver))) {
put_device(p);
if (!(info->flags & DRM_FIMC_DEVICE) ||
@@ -412,9 +417,8 @@ static void exynos_drm_unregister_devices(void)
if (!info->driver || !(info->flags & DRM_VIRTUAL_DEVICE))
continue;
- while ((dev = bus_find_device(&platform_bus_type, NULL,
- &info->driver->driver,
- (void *)platform_bus_type.match))) {
+ while ((dev = platform_find_device_by_driver(NULL,
+ &info->driver->driver))) {
put_device(dev);
platform_device_unregister(to_platform_device(dev));
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index d4014ba592fd..d4d21d8cfb90 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -118,8 +118,8 @@ struct exynos_drm_plane_config {
/*
* Exynos drm crtc ops
*
- * @enable: enable the device
- * @disable: disable the device
+ * @atomic_enable: enable the device
+ * @atomic_disable: disable the device
* @enable_vblank: specific driver callback for enabling vblank interrupt.
* @disable_vblank: specific driver callback for disabling vblank interrupt.
* @mode_valid: specific driver callback for mode validation
@@ -133,8 +133,8 @@ struct exynos_drm_plane_config {
*/
struct exynos_drm_crtc;
struct exynos_drm_crtc_ops {
- void (*enable)(struct exynos_drm_crtc *crtc);
- void (*disable)(struct exynos_drm_crtc *crtc);
+ void (*atomic_enable)(struct exynos_drm_crtc *crtc);
+ void (*atomic_disable)(struct exynos_drm_crtc *crtc);
int (*enable_vblank)(struct exynos_drm_crtc *crtc);
void (*disable_vblank)(struct exynos_drm_crtc *crtc);
enum drm_mode_status (*mode_valid)(struct exynos_drm_crtc *crtc,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 6926cee91b36..33628d85edad 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -24,6 +24,7 @@
#include <video/videomode.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_panel.h>
@@ -254,6 +255,7 @@ struct exynos_dsi {
struct mipi_dsi_host dsi_host;
struct drm_connector connector;
struct drm_panel *panel;
+ struct list_head bridge_chain;
struct drm_bridge *out_bridge;
struct device *dev;
@@ -1376,6 +1378,7 @@ static void exynos_dsi_unregister_te_irq(struct exynos_dsi *dsi)
static void exynos_dsi_enable(struct drm_encoder *encoder)
{
struct exynos_dsi *dsi = encoder_to_dsi(encoder);
+ struct drm_bridge *iter;
int ret;
if (dsi->state & DSIM_STATE_ENABLED)
@@ -1389,7 +1392,11 @@ static void exynos_dsi_enable(struct drm_encoder *encoder)
if (ret < 0)
goto err_put_sync;
} else {
- drm_bridge_pre_enable(dsi->out_bridge);
+ list_for_each_entry_reverse(iter, &dsi->bridge_chain,
+ chain_node) {
+ if (iter->funcs->pre_enable)
+ iter->funcs->pre_enable(iter);
+ }
}
exynos_dsi_set_display_mode(dsi);
@@ -1400,7 +1407,10 @@ static void exynos_dsi_enable(struct drm_encoder *encoder)
if (ret < 0)
goto err_display_disable;
} else {
- drm_bridge_enable(dsi->out_bridge);
+ list_for_each_entry(iter, &dsi->bridge_chain, chain_node) {
+ if (iter->funcs->enable)
+ iter->funcs->enable(iter);
+ }
}
dsi->state |= DSIM_STATE_VIDOUT_AVAILABLE;
@@ -1418,6 +1428,7 @@ err_put_sync:
static void exynos_dsi_disable(struct drm_encoder *encoder)
{
struct exynos_dsi *dsi = encoder_to_dsi(encoder);
+ struct drm_bridge *iter;
if (!(dsi->state & DSIM_STATE_ENABLED))
return;
@@ -1425,10 +1436,20 @@ static void exynos_dsi_disable(struct drm_encoder *encoder)
dsi->state &= ~DSIM_STATE_VIDOUT_AVAILABLE;
drm_panel_disable(dsi->panel);
- drm_bridge_disable(dsi->out_bridge);
+
+ list_for_each_entry_reverse(iter, &dsi->bridge_chain, chain_node) {
+ if (iter->funcs->disable)
+ iter->funcs->disable(iter);
+ }
+
exynos_dsi_set_display_enable(dsi, false);
drm_panel_unprepare(dsi->panel);
- drm_bridge_post_disable(dsi->out_bridge);
+
+ list_for_each_entry(iter, &dsi->bridge_chain, chain_node) {
+ if (iter->funcs->post_disable)
+ iter->funcs->post_disable(iter);
+ }
+
dsi->state &= ~DSIM_STATE_ENABLED;
pm_runtime_put_sync(dsi->dev);
}
@@ -1460,7 +1481,7 @@ static int exynos_dsi_get_modes(struct drm_connector *connector)
struct exynos_dsi *dsi = connector_to_dsi(connector);
if (dsi->panel)
- return dsi->panel->funcs->get_modes(dsi->panel);
+ return drm_panel_get_modes(dsi->panel, connector);
return 0;
}
@@ -1521,7 +1542,7 @@ static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
if (out_bridge) {
drm_bridge_attach(encoder, out_bridge, NULL);
dsi->out_bridge = out_bridge;
- encoder->bridge = NULL;
+ list_splice_init(&encoder->bridge_chain, &dsi->bridge_chain);
} else {
int ret = exynos_dsi_create_connector(encoder);
@@ -1587,6 +1608,7 @@ static int exynos_dsi_host_detach(struct mipi_dsi_host *host,
if (dsi->out_bridge->funcs->detach)
dsi->out_bridge->funcs->detach(dsi->out_bridge);
dsi->out_bridge = NULL;
+ INIT_LIST_HEAD(&dsi->bridge_chain);
}
if (drm->mode_config.poll_enabled)
@@ -1734,6 +1756,7 @@ static int exynos_dsi_probe(struct platform_device *pdev)
init_completion(&dsi->completed);
spin_lock_init(&dsi->transfer_lock);
INIT_LIST_HEAD(&dsi->transfer_list);
+ INIT_LIST_HEAD(&dsi->bridge_chain);
dsi->dsi_host.ops = &exynos_dsi_ops;
dsi->dsi_host.dev = dev;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index b0877b97291c..647a1fd1d815 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -60,7 +60,7 @@ static int exynos_drm_fb_mmap(struct fb_info *info,
return 0;
}
-static struct fb_ops exynos_drm_fb_ops = {
+static const struct fb_ops exynos_drm_fb_ops = {
.owner = THIS_MODULE,
DRM_FB_HELPER_DEFAULT_OPS,
.fb_mmap = exynos_drm_fb_mmap,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 164d914cbe9a..8ea2e1d77802 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -17,6 +17,8 @@
#include <linux/regmap.h>
#include <linux/spinlock.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_print.h>
#include <drm/exynos_drm.h>
#include "exynos_drm_drv.h"
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 8d0a929104e5..21aec38702fc 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -894,7 +894,7 @@ static void fimd_disable_plane(struct exynos_drm_crtc *crtc,
fimd_enable_shadow_channel_path(ctx, win, false);
}
-static void fimd_enable(struct exynos_drm_crtc *crtc)
+static void fimd_atomic_enable(struct exynos_drm_crtc *crtc)
{
struct fimd_context *ctx = crtc->ctx;
@@ -912,7 +912,7 @@ static void fimd_enable(struct exynos_drm_crtc *crtc)
fimd_commit(ctx->crtc);
}
-static void fimd_disable(struct exynos_drm_crtc *crtc)
+static void fimd_atomic_disable(struct exynos_drm_crtc *crtc)
{
struct fimd_context *ctx = crtc->ctx;
int i;
@@ -1006,8 +1006,8 @@ static void fimd_dp_clock_enable(struct exynos_drm_clk *clk, bool enable)
}
static const struct exynos_drm_crtc_ops fimd_crtc_ops = {
- .enable = fimd_enable,
- .disable = fimd_disable,
+ .atomic_enable = fimd_atomic_enable,
+ .atomic_disable = fimd_atomic_disable,
.enable_vblank = fimd_enable_vblank,
.disable_vblank = fimd_disable_vblank,
.atomic_begin = fimd_atomic_begin,
@@ -1098,7 +1098,7 @@ static void fimd_unbind(struct device *dev, struct device *master,
{
struct fimd_context *ctx = dev_get_drvdata(dev);
- fimd_disable(ctx->crtc);
+ fimd_atomic_disable(ctx->crtc);
exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 1c524db9570f..88b6fcaa20be 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -16,6 +16,8 @@
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_print.h>
#include <drm/exynos_drm.h>
#include "exynos_drm_drv.h"
@@ -1311,6 +1313,7 @@ static int gsc_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ component_del(dev, &gsc_component_ops);
pm_runtime_dont_use_autosuspend(dev);
pm_runtime_disable(dev);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index d45bfab6fe40..4f2b7551b251 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -16,7 +16,10 @@
* all copies or substantial portions of the Software.
*/
-#include <drm/drmP.h>
+#include <linux/uaccess.h>
+
+#include <drm/drm_file.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_mode.h>
#include <drm/exynos_drm.h>
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.h b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
index 9cbbc301bec9..67a0805ee009 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
@@ -6,8 +6,6 @@
#ifndef _EXYNOS_DRM_IPP_H_
#define _EXYNOS_DRM_IPP_H_
-#include <drm/drmP.h>
-
struct exynos_drm_ipp;
struct exynos_drm_ipp_task;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index b78e8c5ba553..f41d75923557 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -21,6 +21,7 @@
#include <video/of_videomode.h>
#include <video/videomode.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_encoder.h>
#include <drm/drm_print.h>
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index 8ebad2740ad5..b98482990d1a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -15,7 +15,9 @@
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/sizes.h>
+#include <drm/drm_fourcc.h>
#include <drm/exynos_drm.h>
#include "exynos_drm_drv.h"
diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
index b24ba948b725..497973e9b2c5 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
@@ -15,6 +15,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <drm/drm_fourcc.h>
#include <drm/exynos_drm.h>
#include "exynos_drm_drv.h"
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 65b891cb9c50..b320b3a21ad4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -120,7 +120,7 @@ static void vidi_update_plane(struct exynos_drm_crtc *crtc,
DRM_DEV_DEBUG_KMS(ctx->dev, "dma_addr = %pad\n", &addr);
}
-static void vidi_enable(struct exynos_drm_crtc *crtc)
+static void vidi_atomic_enable(struct exynos_drm_crtc *crtc)
{
struct vidi_context *ctx = crtc->ctx;
@@ -133,7 +133,7 @@ static void vidi_enable(struct exynos_drm_crtc *crtc)
drm_crtc_vblank_on(&crtc->base);
}
-static void vidi_disable(struct exynos_drm_crtc *crtc)
+static void vidi_atomic_disable(struct exynos_drm_crtc *crtc)
{
struct vidi_context *ctx = crtc->ctx;
@@ -147,8 +147,8 @@ static void vidi_disable(struct exynos_drm_crtc *crtc)
}
static const struct exynos_drm_crtc_ops vidi_crtc_ops = {
- .enable = vidi_enable,
- .disable = vidi_disable,
+ .atomic_enable = vidi_atomic_enable,
+ .atomic_disable = vidi_atomic_disable,
.enable_vblank = vidi_enable_vblank,
.disable_vblank = vidi_disable_vblank,
.update_plane = vidi_update_plane,
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index bc1565f1822a..9ff921f43a93 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -34,6 +34,7 @@
#include <media/cec-notifier.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_edid.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
@@ -852,6 +853,10 @@ static enum drm_connector_status hdmi_detect(struct drm_connector *connector,
static void hdmi_connector_destroy(struct drm_connector *connector)
{
+ struct hdmi_context *hdata = connector_to_hdmi(connector);
+
+ cec_notifier_conn_unregister(hdata->notifier);
+
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
}
@@ -935,13 +940,16 @@ static int hdmi_create_connector(struct drm_encoder *encoder)
{
struct hdmi_context *hdata = encoder_to_hdmi(encoder);
struct drm_connector *connector = &hdata->connector;
+ struct cec_connector_info conn_info;
int ret;
connector->interlace_allowed = true;
connector->polled = DRM_CONNECTOR_POLL_HPD;
- ret = drm_connector_init(hdata->drm_dev, connector,
- &hdmi_connector_funcs, DRM_MODE_CONNECTOR_HDMIA);
+ ret = drm_connector_init_with_ddc(hdata->drm_dev, connector,
+ &hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA,
+ hdata->ddc_adpt);
if (ret) {
DRM_DEV_ERROR(hdata->dev,
"Failed to initialize connector with drm\n");
@@ -957,6 +965,15 @@ static int hdmi_create_connector(struct drm_encoder *encoder)
DRM_DEV_ERROR(hdata->dev, "Failed to attach bridge\n");
}
+ cec_fill_conn_info_from_drm(&conn_info, connector);
+
+ hdata->notifier = cec_notifier_conn_register(hdata->dev, NULL,
+ &conn_info);
+ if (!hdata->notifier) {
+ ret = -ENOMEM;
+ DRM_DEV_ERROR(hdata->dev, "Failed to allocate CEC notifier\n");
+ }
+
return ret;
}
@@ -1528,8 +1545,8 @@ static void hdmi_disable(struct drm_encoder *encoder)
*/
mutex_unlock(&hdata->mutex);
cancel_delayed_work(&hdata->hotplug_work);
- cec_notifier_set_phys_addr(hdata->notifier,
- CEC_PHYS_ADDR_INVALID);
+ if (hdata->notifier)
+ cec_notifier_phys_addr_invalidate(hdata->notifier);
return;
}
@@ -2006,12 +2023,6 @@ static int hdmi_probe(struct platform_device *pdev)
}
}
- hdata->notifier = cec_notifier_get(&pdev->dev);
- if (hdata->notifier == NULL) {
- ret = -ENOMEM;
- goto err_hdmiphy;
- }
-
pm_runtime_enable(dev);
audio_infoframe = &hdata->audio.infoframe;
@@ -2023,7 +2034,7 @@ static int hdmi_probe(struct platform_device *pdev)
ret = hdmi_register_audio_device(hdata);
if (ret)
- goto err_notifier_put;
+ goto err_rpm_disable;
ret = component_add(&pdev->dev, &hdmi_component_ops);
if (ret)
@@ -2034,8 +2045,7 @@ static int hdmi_probe(struct platform_device *pdev)
err_unregister_audio:
platform_device_unregister(hdata->audio.pdev);
-err_notifier_put:
- cec_notifier_put(hdata->notifier);
+err_rpm_disable:
pm_runtime_disable(dev);
err_hdmiphy:
@@ -2054,12 +2064,10 @@ static int hdmi_remove(struct platform_device *pdev)
struct hdmi_context *hdata = platform_get_drvdata(pdev);
cancel_delayed_work_sync(&hdata->hotplug_work);
- cec_notifier_set_phys_addr(hdata->notifier, CEC_PHYS_ADDR_INVALID);
component_del(&pdev->dev, &hdmi_component_ops);
platform_device_unregister(hdata->audio.pdev);
- cec_notifier_put(hdata->notifier);
pm_runtime_disable(&pdev->dev);
if (!IS_ERR(hdata->reg_hdmi_en))
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 7b24338fad3c..38ae9c32feef 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -986,7 +986,7 @@ static void mixer_atomic_flush(struct exynos_drm_crtc *crtc)
exynos_crtc_handle_event(crtc);
}
-static void mixer_enable(struct exynos_drm_crtc *crtc)
+static void mixer_atomic_enable(struct exynos_drm_crtc *crtc)
{
struct mixer_context *ctx = crtc->ctx;
@@ -1015,7 +1015,7 @@ static void mixer_enable(struct exynos_drm_crtc *crtc)
set_bit(MXR_BIT_POWERED, &ctx->flags);
}
-static void mixer_disable(struct exynos_drm_crtc *crtc)
+static void mixer_atomic_disable(struct exynos_drm_crtc *crtc)
{
struct mixer_context *ctx = crtc->ctx;
int i;
@@ -1069,9 +1069,9 @@ static bool mixer_mode_fixup(struct exynos_drm_crtc *crtc,
struct mixer_context *ctx = crtc->ctx;
int width = mode->hdisplay, height = mode->vdisplay, i;
- struct {
+ static const struct {
int hdisplay, vdisplay, htotal, vtotal, scan_val;
- } static const modes[] = {
+ } modes[] = {
{ 720, 480, 858, 525, MXR_CFG_SCAN_NTSC | MXR_CFG_SCAN_SD },
{ 720, 576, 864, 625, MXR_CFG_SCAN_PAL | MXR_CFG_SCAN_SD },
{ 1280, 720, 1650, 750, MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD },
@@ -1109,8 +1109,8 @@ static bool mixer_mode_fixup(struct exynos_drm_crtc *crtc,
}
static const struct exynos_drm_crtc_ops mixer_crtc_ops = {
- .enable = mixer_enable,
- .disable = mixer_disable,
+ .atomic_enable = mixer_atomic_enable,
+ .atomic_disable = mixer_atomic_disable,
.enable_vblank = mixer_enable_vblank,
.disable_vblank = mixer_disable_vblank,
.atomic_begin = mixer_atomic_begin,
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
index f4635bea0265..b9ca81a6f80f 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
@@ -8,12 +8,13 @@
#include <linux/clk.h>
#include <linux/regmap.h>
-#include <drm/drmP.h>
+#include <video/videomode.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_probe_helper.h>
-#include <video/videomode.h>
+#include <drm/drm_vblank.h>
#include "fsl_dcu_drm_crtc.h"
#include "fsl_dcu_drm_drv.h"
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index e81daaaa5965..f15d2e7967a3 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -18,13 +18,15 @@
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_irq.h>
#include <drm/drm_modeset_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "fsl_dcu_drm_crtc.h"
#include "fsl_dcu_drm_drv.h"
@@ -133,8 +135,7 @@ static irqreturn_t fsl_dcu_drm_irq(int irq, void *arg)
DEFINE_DRM_GEM_CMA_FOPS(fsl_dcu_drm_fops);
static struct drm_driver fsl_dcu_drm_driver = {
- .driver_features = DRIVER_GEM | DRIVER_MODESET
- | DRIVER_PRIME | DRIVER_ATOMIC,
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.load = fsl_dcu_load,
.unload = fsl_dcu_unload,
.irq_handler = fsl_dcu_drm_irq,
@@ -144,8 +145,6 @@ static struct drm_driver fsl_dcu_drm_driver = {
.gem_vm_ops = &drm_gem_cma_vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_import = drm_gem_prime_import,
- .gem_prime_export = drm_gem_prime_export,
.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
.gem_prime_vmap = drm_gem_cma_prime_vmap,
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c
index 2467c8934405..d763f53f480c 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c
@@ -5,7 +5,6 @@
* Freescale DCU drm device driver
*/
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
index 6f2f65030dd1..86fac677fe69 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
@@ -7,10 +7,10 @@
#include <linux/regmap.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
index c49e9e3740f8..9598ee3cc4d2 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
@@ -8,8 +8,8 @@
#include <linux/backlight.h>
#include <linux/of_graph.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
@@ -66,17 +66,9 @@ static const struct drm_connector_funcs fsl_dcu_drm_connector_funcs = {
static int fsl_dcu_drm_connector_get_modes(struct drm_connector *connector)
{
struct fsl_dcu_drm_connector *fsl_connector;
- int (*get_modes)(struct drm_panel *panel);
- int num_modes = 0;
fsl_connector = to_fsl_dcu_connector(connector);
- if (fsl_connector->panel && fsl_connector->panel->funcs &&
- fsl_connector->panel->funcs->get_modes) {
- get_modes = fsl_connector->panel->funcs->get_modes;
- num_modes = get_modes(fsl_connector->panel);
- }
-
- return num_modes;
+ return drm_panel_get_modes(fsl_connector->panel, connector);
}
static int fsl_dcu_drm_connector_mode_valid(struct drm_connector *connector,
diff --git a/drivers/gpu/drm/gma500/accel_2d.c b/drivers/gpu/drm/gma500/accel_2d.c
index 45ad5ffedc93..adc0507545bf 100644
--- a/drivers/gpu/drm/gma500/accel_2d.c
+++ b/drivers/gpu/drm/gma500/accel_2d.c
@@ -21,9 +21,9 @@
#include <drm/drm.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
-#include "framebuffer.h"
#include "psb_drv.h"
#include "psb_reg.h"
@@ -226,11 +226,10 @@ static int psb_accel_2d_copy(struct drm_psb_private *dev_priv,
static void psbfb_copyarea_accel(struct fb_info *info,
const struct fb_copyarea *a)
{
- struct psb_fbdev *fbdev = info->par;
- struct psb_framebuffer *psbfb = &fbdev->pfb;
- struct drm_device *dev = psbfb->base.dev;
- struct drm_framebuffer *fb = fbdev->psb_fb_helper.fb;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_fb_helper *fb_helper = info->par;
+ struct drm_framebuffer *fb = fb_helper->fb;
+ struct drm_device *dev;
+ struct drm_psb_private *dev_priv;
uint32_t offset;
uint32_t stride;
uint32_t src_format;
@@ -239,6 +238,8 @@ static void psbfb_copyarea_accel(struct fb_info *info,
if (!fb)
return;
+ dev = fb->dev;
+ dev_priv = dev->dev_private;
offset = to_gtt_range(fb->obj[0])->offset;
stride = fb->pitches[0];
@@ -309,9 +310,9 @@ void psbfb_copyarea(struct fb_info *info,
*/
int psbfb_sync(struct fb_info *info)
{
- struct psb_fbdev *fbdev = info->par;
- struct psb_framebuffer *psbfb = &fbdev->pfb;
- struct drm_device *dev = psbfb->base.dev;
+ struct drm_fb_helper *fb_helper = info->par;
+ struct drm_framebuffer *fb = fb_helper->fb;
+ struct drm_device *dev = fb->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
unsigned long _end = jiffies + HZ;
int busy = 0;
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index f56852a503e8..1ed854f498b7 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -405,6 +405,8 @@ static bool cdv_intel_find_dp_pll(const struct gma_limit_t *limit,
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct gma_clock_t clock;
+ memset(&clock, 0, sizeof(clock));
+
switch (refclk) {
case 27000:
if (target < 200000) {
@@ -580,8 +582,8 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
struct gma_clock_t clock;
u32 dpll = 0, dspcntr, pipeconf;
bool ok;
- bool is_crt = false, is_lvds = false, is_tv = false;
- bool is_hdmi = false, is_dp = false;
+ bool is_lvds = false, is_tv = false;
+ bool is_dp = false;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
const struct gma_limit_t *limit;
@@ -605,10 +607,7 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
is_tv = true;
break;
case INTEL_OUTPUT_ANALOG:
- is_crt = true;
- break;
case INTEL_OUTPUT_HDMI:
- is_hdmi = true;
break;
case INTEL_OUTPUT_DISPLAYPORT:
is_dp = true;
@@ -977,6 +976,7 @@ const struct drm_crtc_funcs cdv_intel_crtc_funcs = {
.gamma_set = gma_crtc_gamma_set,
.set_config = gma_crtc_set_config,
.destroy = gma_crtc_destroy,
+ .page_flip = gma_crtc_page_flip,
};
const struct gma_clock_funcs cdv_clock_funcs = {
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index 570b59520fd1..5772b2dce0d6 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -1594,7 +1594,6 @@ cdv_intel_dp_complete_link_train(struct gma_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
- bool channel_eq = false;
int tries, cr_tries;
u32 reg;
uint32_t DP = intel_dp->DP;
@@ -1602,7 +1601,6 @@ cdv_intel_dp_complete_link_train(struct gma_encoder *encoder)
/* channel equalization */
tries = 0;
cr_tries = 0;
- channel_eq = false;
DRM_DEBUG_KMS("\n");
reg = DP | DP_LINK_TRAIN_PAT_2;
@@ -1648,7 +1646,6 @@ cdv_intel_dp_complete_link_train(struct gma_encoder *encoder)
if (cdv_intel_channel_eq_ok(encoder)) {
DRM_DEBUG_KMS("PT2 train is done\n");
- channel_eq = true;
break;
}
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 218f3bb15276..1459076d1980 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -40,8 +40,8 @@ static int psbfb_setcolreg(unsigned regno, unsigned red, unsigned green,
unsigned blue, unsigned transp,
struct fb_info *info)
{
- struct psb_fbdev *fbdev = info->par;
- struct drm_framebuffer *fb = fbdev->psb_fb_helper.fb;
+ struct drm_fb_helper *fb_helper = info->par;
+ struct drm_framebuffer *fb = fb_helper->fb;
uint32_t v;
if (!fb)
@@ -77,10 +77,10 @@ static int psbfb_setcolreg(unsigned regno, unsigned red, unsigned green,
static int psbfb_pan(struct fb_var_screeninfo *var, struct fb_info *info)
{
- struct psb_fbdev *fbdev = info->par;
- struct psb_framebuffer *psbfb = &fbdev->pfb;
- struct drm_device *dev = psbfb->base.dev;
- struct gtt_range *gtt = to_gtt_range(psbfb->base.obj[0]);
+ struct drm_fb_helper *fb_helper = info->par;
+ struct drm_framebuffer *fb = fb_helper->fb;
+ struct drm_device *dev = fb->dev;
+ struct gtt_range *gtt = to_gtt_range(fb->obj[0]);
/*
* We have to poke our nose in here. The core fb code assumes
@@ -99,10 +99,10 @@ static int psbfb_pan(struct fb_var_screeninfo *var, struct fb_info *info)
static vm_fault_t psbfb_vm_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
- struct psb_framebuffer *psbfb = vma->vm_private_data;
- struct drm_device *dev = psbfb->base.dev;
+ struct drm_framebuffer *fb = vma->vm_private_data;
+ struct drm_device *dev = fb->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
- struct gtt_range *gtt = to_gtt_range(psbfb->base.obj[0]);
+ struct gtt_range *gtt = to_gtt_range(fb->obj[0]);
int page_num;
int i;
unsigned long address;
@@ -145,23 +145,21 @@ static const struct vm_operations_struct psbfb_vm_ops = {
static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
- struct psb_fbdev *fbdev = info->par;
- struct psb_framebuffer *psbfb = &fbdev->pfb;
+ struct drm_fb_helper *fb_helper = info->par;
+ struct drm_framebuffer *fb = fb_helper->fb;
if (vma->vm_pgoff != 0)
return -EINVAL;
if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
return -EINVAL;
- if (!psbfb->addr_space)
- psbfb->addr_space = vma->vm_file->f_mapping;
/*
* If this is a GEM object then info->screen_base is the virtual
* kernel remapping of the object. FIXME: Review if this is
* suitable for our mmap work
*/
vma->vm_ops = &psbfb_vm_ops;
- vma->vm_private_data = (void *)psbfb;
+ vma->vm_private_data = (void *)fb;
vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
return 0;
}
@@ -209,9 +207,9 @@ static struct fb_ops psbfb_unaccel_ops = {
* 0 on success or an error code if we fail.
*/
static int psb_framebuffer_init(struct drm_device *dev,
- struct psb_framebuffer *fb,
+ struct drm_framebuffer *fb,
const struct drm_mode_fb_cmd2 *mode_cmd,
- struct gtt_range *gt)
+ struct drm_gem_object *obj)
{
const struct drm_format_info *info;
int ret;
@@ -227,9 +225,9 @@ static int psb_framebuffer_init(struct drm_device *dev,
if (mode_cmd->pitches[0] & 63)
return -EINVAL;
- drm_helper_mode_fill_fb_struct(dev, &fb->base, mode_cmd);
- fb->base.obj[0] = &gt->gem;
- ret = drm_framebuffer_init(dev, &fb->base, &psb_fb_funcs);
+ drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
+ fb->obj[0] = obj;
+ ret = drm_framebuffer_init(dev, fb, &psb_fb_funcs);
if (ret) {
dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
return ret;
@@ -252,21 +250,21 @@ static int psb_framebuffer_init(struct drm_device *dev,
static struct drm_framebuffer *psb_framebuffer_create
(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd,
- struct gtt_range *gt)
+ struct drm_gem_object *obj)
{
- struct psb_framebuffer *fb;
+ struct drm_framebuffer *fb;
int ret;
fb = kzalloc(sizeof(*fb), GFP_KERNEL);
if (!fb)
return ERR_PTR(-ENOMEM);
- ret = psb_framebuffer_init(dev, fb, mode_cmd, gt);
+ ret = psb_framebuffer_init(dev, fb, mode_cmd, obj);
if (ret) {
kfree(fb);
return ERR_PTR(ret);
}
- return &fb->base;
+ return fb;
}
/**
@@ -300,14 +298,13 @@ static struct gtt_range *psbfb_alloc(struct drm_device *dev, int aligned_size)
*
* Create a framebuffer to the specifications provided
*/
-static int psbfb_create(struct psb_fbdev *fbdev,
+static int psbfb_create(struct drm_fb_helper *fb_helper,
struct drm_fb_helper_surface_size *sizes)
{
- struct drm_device *dev = fbdev->psb_fb_helper.dev;
+ struct drm_device *dev = fb_helper->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
struct fb_info *info;
struct drm_framebuffer *fb;
- struct psb_framebuffer *psbfb = &fbdev->pfb;
struct drm_mode_fb_cmd2 mode_cmd;
int size;
int ret;
@@ -372,7 +369,7 @@ static int psbfb_create(struct psb_fbdev *fbdev,
memset(dev_priv->vram_addr + backing->offset, 0, size);
- info = drm_fb_helper_alloc_fbi(&fbdev->psb_fb_helper);
+ info = drm_fb_helper_alloc_fbi(fb_helper);
if (IS_ERR(info)) {
ret = PTR_ERR(info);
goto out;
@@ -380,14 +377,13 @@ static int psbfb_create(struct psb_fbdev *fbdev,
mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
- ret = psb_framebuffer_init(dev, psbfb, &mode_cmd, backing);
- if (ret)
+ fb = psb_framebuffer_create(dev, &mode_cmd, &backing->gem);
+ if (IS_ERR(fb)) {
+ ret = PTR_ERR(fb);
goto out;
+ }
- fb = &psbfb->base;
- psbfb->fbdev = info;
-
- fbdev->psb_fb_helper.fb = fb;
+ fb_helper->fb = fb;
if (dev_priv->ops->accel_2d && pitch_lines > 8) /* 2D engine */
info->fbops = &psbfb_ops;
@@ -411,15 +407,14 @@ static int psbfb_create(struct psb_fbdev *fbdev,
info->apertures->ranges[0].size = dev_priv->gtt.stolen_size;
}
- drm_fb_helper_fill_info(info, &fbdev->psb_fb_helper, sizes);
+ drm_fb_helper_fill_info(info, fb_helper, sizes);
info->fix.mmio_start = pci_resource_start(dev->pdev, 0);
info->fix.mmio_len = pci_resource_len(dev->pdev, 0);
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
- dev_dbg(dev->dev, "allocated %dx%d fb\n",
- psbfb->base.width, psbfb->base.height);
+ dev_dbg(dev->dev, "allocated %dx%d fb\n", fb->width, fb->height);
return 0;
out:
@@ -439,7 +434,6 @@ static struct drm_framebuffer *psb_user_framebuffer_create
(struct drm_device *dev, struct drm_file *filp,
const struct drm_mode_fb_cmd2 *cmd)
{
- struct gtt_range *r;
struct drm_gem_object *obj;
/*
@@ -451,17 +445,15 @@ static struct drm_framebuffer *psb_user_framebuffer_create
return ERR_PTR(-ENOENT);
/* Let the core code do all the work */
- r = container_of(obj, struct gtt_range, gem);
- return psb_framebuffer_create(dev, cmd, r);
+ return psb_framebuffer_create(dev, cmd, obj);
}
-static int psbfb_probe(struct drm_fb_helper *helper,
+static int psbfb_probe(struct drm_fb_helper *fb_helper,
struct drm_fb_helper_surface_size *sizes)
{
- struct psb_fbdev *psb_fbdev =
- container_of(helper, struct psb_fbdev, psb_fb_helper);
- struct drm_device *dev = psb_fbdev->psb_fb_helper.dev;
+ struct drm_device *dev = fb_helper->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
+ unsigned int fb_size;
int bytespp;
bytespp = sizes->surface_bpp / 8;
@@ -471,72 +463,77 @@ static int psbfb_probe(struct drm_fb_helper *helper,
/* If the mode will not fit in 32bit then switch to 16bit to get
a console on full resolution. The X mode setting server will
allocate its own 32bit GEM framebuffer */
- if (ALIGN(sizes->fb_width * bytespp, 64) * sizes->fb_height >
- dev_priv->vram_stolen_size) {
+ fb_size = ALIGN(sizes->surface_width * bytespp, 64) *
+ sizes->surface_height;
+ fb_size = ALIGN(fb_size, PAGE_SIZE);
+
+ if (fb_size > dev_priv->vram_stolen_size) {
sizes->surface_bpp = 16;
sizes->surface_depth = 16;
}
- return psbfb_create(psb_fbdev, sizes);
+ return psbfb_create(fb_helper, sizes);
}
static const struct drm_fb_helper_funcs psb_fb_helper_funcs = {
.fb_probe = psbfb_probe,
};
-static int psb_fbdev_destroy(struct drm_device *dev, struct psb_fbdev *fbdev)
+static int psb_fbdev_destroy(struct drm_device *dev,
+ struct drm_fb_helper *fb_helper)
{
- struct psb_framebuffer *psbfb = &fbdev->pfb;
+ struct drm_framebuffer *fb = fb_helper->fb;
+
+ drm_fb_helper_unregister_fbi(fb_helper);
- drm_fb_helper_unregister_fbi(&fbdev->psb_fb_helper);
+ drm_fb_helper_fini(fb_helper);
+ drm_framebuffer_unregister_private(fb);
+ drm_framebuffer_cleanup(fb);
- drm_fb_helper_fini(&fbdev->psb_fb_helper);
- drm_framebuffer_unregister_private(&psbfb->base);
- drm_framebuffer_cleanup(&psbfb->base);
+ if (fb->obj[0])
+ drm_gem_object_put_unlocked(fb->obj[0]);
+ kfree(fb);
- if (psbfb->base.obj[0])
- drm_gem_object_put_unlocked(psbfb->base.obj[0]);
return 0;
}
int psb_fbdev_init(struct drm_device *dev)
{
- struct psb_fbdev *fbdev;
+ struct drm_fb_helper *fb_helper;
struct drm_psb_private *dev_priv = dev->dev_private;
int ret;
- fbdev = kzalloc(sizeof(struct psb_fbdev), GFP_KERNEL);
- if (!fbdev) {
+ fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
+ if (!fb_helper) {
dev_err(dev->dev, "no memory\n");
return -ENOMEM;
}
- dev_priv->fbdev = fbdev;
+ dev_priv->fb_helper = fb_helper;
- drm_fb_helper_prepare(dev, &fbdev->psb_fb_helper, &psb_fb_helper_funcs);
+ drm_fb_helper_prepare(dev, fb_helper, &psb_fb_helper_funcs);
- ret = drm_fb_helper_init(dev, &fbdev->psb_fb_helper,
- INTELFB_CONN_LIMIT);
+ ret = drm_fb_helper_init(dev, fb_helper, INTELFB_CONN_LIMIT);
if (ret)
goto free;
- ret = drm_fb_helper_single_add_all_connectors(&fbdev->psb_fb_helper);
+ ret = drm_fb_helper_single_add_all_connectors(fb_helper);
if (ret)
goto fini;
/* disable all the possible outputs/crtcs before entering KMS mode */
drm_helper_disable_unused_functions(dev);
- ret = drm_fb_helper_initial_config(&fbdev->psb_fb_helper, 32);
+ ret = drm_fb_helper_initial_config(fb_helper, 32);
if (ret)
goto fini;
return 0;
fini:
- drm_fb_helper_fini(&fbdev->psb_fb_helper);
+ drm_fb_helper_fini(fb_helper);
free:
- kfree(fbdev);
+ kfree(fb_helper);
return ret;
}
@@ -544,12 +541,12 @@ static void psb_fbdev_fini(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
- if (!dev_priv->fbdev)
+ if (!dev_priv->fb_helper)
return;
- psb_fbdev_destroy(dev, dev_priv->fbdev);
- kfree(dev_priv->fbdev);
- dev_priv->fbdev = NULL;
+ psb_fbdev_destroy(dev, dev_priv->fb_helper);
+ kfree(dev_priv->fb_helper);
+ dev_priv->fb_helper = NULL;
}
static const struct drm_mode_config_funcs psb_mode_funcs = {
diff --git a/drivers/gpu/drm/gma500/framebuffer.h b/drivers/gpu/drm/gma500/framebuffer.h
index ae8a02639fd9..2fbba4b48841 100644
--- a/drivers/gpu/drm/gma500/framebuffer.h
+++ b/drivers/gpu/drm/gma500/framebuffer.h
@@ -9,23 +9,8 @@
#ifndef _FRAMEBUFFER_H_
#define _FRAMEBUFFER_H_
-#include <drm/drm_fb_helper.h>
-
#include "psb_drv.h"
-struct psb_framebuffer {
- struct drm_framebuffer base;
- struct address_space *addr_space;
- struct fb_info *fbdev;
-};
-
-struct psb_fbdev {
- struct drm_fb_helper psb_fb_helper; /* must be first */
- struct psb_framebuffer pfb;
-};
-
-#define to_psb_fb(x) container_of(x, struct psb_framebuffer, base)
-
extern int gma_connector_clones(struct drm_device *dev, int type_mask);
#endif
diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
index e20ccb5d10fd..17f136985d21 100644
--- a/drivers/gpu/drm/gma500/gma_display.c
+++ b/drivers/gpu/drm/gma500/gma_display.c
@@ -255,6 +255,8 @@ void gma_crtc_dpms(struct drm_crtc *crtc, int mode)
/* Give the overlay scaler a chance to enable
* if it's on this pipe */
/* psb_intel_crtc_dpms_video(crtc, true); TODO */
+
+ drm_crtc_vblank_on(crtc);
break;
case DRM_MODE_DPMS_OFF:
if (!gma_crtc->active)
@@ -501,6 +503,52 @@ void gma_crtc_destroy(struct drm_crtc *crtc)
kfree(gma_crtc);
}
+int gma_crtc_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ struct drm_framebuffer *current_fb = crtc->primary->fb;
+ struct drm_framebuffer *old_fb = crtc->primary->old_fb;
+ const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ struct drm_device *dev = crtc->dev;
+ unsigned long flags;
+ int ret;
+
+ if (!crtc_funcs->mode_set_base)
+ return -EINVAL;
+
+ /* Using mode_set_base requires the new fb to be set already. */
+ crtc->primary->fb = fb;
+
+ if (event) {
+ spin_lock_irqsave(&dev->event_lock, flags);
+
+ WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+
+ gma_crtc->page_flip_event = event;
+
+ /* Call this locked if we want an event at vblank interrupt. */
+ ret = crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, old_fb);
+ if (ret) {
+ gma_crtc->page_flip_event = NULL;
+ drm_crtc_vblank_put(crtc);
+ }
+
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ } else {
+ ret = crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, old_fb);
+ }
+
+ /* Restore previous fb in case of failure. */
+ if (ret)
+ crtc->primary->fb = current_fb;
+
+ return ret;
+}
+
int gma_crtc_set_config(struct drm_mode_set *set,
struct drm_modeset_acquire_ctx *ctx)
{
diff --git a/drivers/gpu/drm/gma500/gma_display.h b/drivers/gpu/drm/gma500/gma_display.h
index fdbd7ecaa59c..7bd6c1ee8b21 100644
--- a/drivers/gpu/drm/gma500/gma_display.h
+++ b/drivers/gpu/drm/gma500/gma_display.h
@@ -11,6 +11,7 @@
#define _GMA_DISPLAY_H_
#include <linux/pm_runtime.h>
+#include <drm/drm_vblank.h>
struct drm_encoder;
struct drm_mode_set;
@@ -71,6 +72,11 @@ extern void gma_crtc_prepare(struct drm_crtc *crtc);
extern void gma_crtc_commit(struct drm_crtc *crtc);
extern void gma_crtc_disable(struct drm_crtc *crtc);
extern void gma_crtc_destroy(struct drm_crtc *crtc);
+extern int gma_crtc_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags,
+ struct drm_modeset_acquire_ctx *ctx);
extern int gma_crtc_set_config(struct drm_mode_set *set,
struct drm_modeset_acquire_ctx *ctx);
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
index afaf4bea21cf..9278bcfad1bf 100644
--- a/drivers/gpu/drm/gma500/gtt.c
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -503,7 +503,7 @@ int psb_gtt_init(struct drm_device *dev, int resume)
* Map the GTT and the stolen memory area
*/
if (!resume)
- dev_priv->gtt_map = ioremap_nocache(pg->gtt_phys_start,
+ dev_priv->gtt_map = ioremap(pg->gtt_phys_start,
gtt_pages << PAGE_SHIFT);
if (!dev_priv->gtt_map) {
dev_err(dev->dev, "Failure to map gtt.\n");
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
index 03023fa0fb6f..f350ac1ead18 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
@@ -498,7 +498,7 @@ void mdfld_dsi_output_init(struct drm_device *dev,
return;
}
- /*create a new connetor*/
+ /*create a new connector*/
dsi_connector = kzalloc(sizeof(struct mdfld_dsi_connector), GFP_KERNEL);
if (!dsi_connector) {
DRM_ERROR("No memory");
diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c
index b8bfb96008b8..4fff110c4921 100644
--- a/drivers/gpu/drm/gma500/mdfld_intel_display.c
+++ b/drivers/gpu/drm/gma500/mdfld_intel_display.c
@@ -113,27 +113,6 @@ static int psb_intel_panel_fitter_pipe(struct drm_device *dev)
return (pfit_control >> 29) & 0x3;
}
-static struct drm_device globle_dev;
-
-void mdfld__intel_plane_set_alpha(int enable)
-{
- struct drm_device *dev = &globle_dev;
- int dspcntr_reg = DSPACNTR;
- u32 dspcntr;
-
- dspcntr = REG_READ(dspcntr_reg);
-
- if (enable) {
- dspcntr &= ~DISPPLANE_32BPP_NO_ALPHA;
- dspcntr |= DISPPLANE_32BPP;
- } else {
- dspcntr &= ~DISPPLANE_32BPP;
- dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
- }
-
- REG_WRITE(dspcntr_reg, dspcntr);
-}
-
static int check_fb(struct drm_framebuffer *fb)
{
if (!fb)
@@ -164,8 +143,6 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
u32 dspcntr;
int ret;
- memcpy(&globle_dev, dev, sizeof(struct drm_device));
-
dev_dbg(dev->dev, "pipe = 0x%x.\n", pipe);
/* no fb bound */
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index 167c10767dd4..900e5499249d 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -129,6 +129,7 @@ static bool mrst_sdvo_find_best_pll(const struct gma_limit_t *limit,
s32 freq_error, min_error = 100000;
memset(best_clock, 0, sizeof(*best_clock));
+ memset(&clock, 0, sizeof(clock));
for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
for (clock.n = limit->n.min; clock.n <= limit->n.max;
@@ -185,6 +186,7 @@ static bool mrst_lvds_find_best_pll(const struct gma_limit_t *limit,
int err = target;
memset(best_clock, 0, sizeof(*best_clock));
+ memset(&clock, 0, sizeof(clock));
for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max;
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index f4c520893ceb..f4370232767d 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -159,9 +159,7 @@ static void oaktrail_hdmi_audio_disable(struct drm_device *dev)
static unsigned int htotal_calculate(struct drm_display_mode *mode)
{
- u32 htotal, new_crtc_htotal;
-
- htotal = (mode->crtc_hdisplay - 1) | ((mode->crtc_htotal - 1) << 16);
+ u32 new_crtc_htotal;
/*
* 1024 x 768 new_crtc_htotal = 0x1024;
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index 7390403ea1b7..582e09597500 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -117,6 +117,7 @@ static void oaktrail_lvds_mode_set(struct drm_encoder *encoder,
if (!connector) {
DRM_ERROR("Couldn't find connector when setting mode");
+ gma_power_end(dev);
return;
}
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index 7005f8f69c68..6956c8e7501c 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -19,10 +19,10 @@
#include <drm/drm.h>
#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_file.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_irq.h>
-#include <drm/drm_pci.h>
#include <drm/drm_pciids.h>
#include <drm/drm_vblank.h>
@@ -256,7 +256,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
PSB_AUX_RESOURCE);
resource_len = pci_resource_len(dev_priv->aux_pdev,
PSB_AUX_RESOURCE);
- dev_priv->aux_reg = ioremap_nocache(resource_start,
+ dev_priv->aux_reg = ioremap(resource_start,
resource_len);
if (!dev_priv->aux_reg)
goto out_err;
@@ -426,14 +426,48 @@ static long psb_unlocked_ioctl(struct file *filp, unsigned int cmd,
static int psb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- return drm_get_pci_dev(pdev, ent, &driver);
-}
+ struct drm_device *dev;
+ int ret;
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ dev = drm_dev_alloc(&driver, &pdev->dev);
+ if (IS_ERR(dev)) {
+ ret = PTR_ERR(dev);
+ goto err_pci_disable_device;
+ }
+
+ dev->pdev = pdev;
+ pci_set_drvdata(pdev, dev);
+
+ ret = psb_driver_load(dev, ent->driver_data);
+ if (ret)
+ goto err_drm_dev_put;
+ ret = drm_dev_register(dev, ent->driver_data);
+ if (ret)
+ goto err_psb_driver_unload;
+
+ return 0;
+
+err_psb_driver_unload:
+ psb_driver_unload(dev);
+err_drm_dev_put:
+ drm_dev_put(dev);
+err_pci_disable_device:
+ pci_disable_device(pdev);
+ return ret;
+}
static void psb_pci_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
- drm_put_dev(dev);
+
+ drm_dev_unregister(dev);
+ psb_driver_unload(dev);
+ drm_dev_put(dev);
}
static const struct dev_pm_ops psb_pm_ops = {
@@ -466,8 +500,6 @@ static const struct file_operations psb_gem_fops = {
static struct drm_driver driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM,
- .load = psb_driver_load,
- .unload = psb_driver_unload,
.lastclose = drm_fb_helper_lastclose,
.num_ioctls = ARRAY_SIZE(psb_ioctls),
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 9b3c03f4a38d..3d4ef3071d45 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -229,6 +229,8 @@ enum {
#define KSEL_BYPASS_25 6
#define KSEL_BYPASS_83_100 7
+struct drm_fb_helper;
+
struct opregion_header;
struct opregion_acpi;
struct opregion_swsci;
@@ -432,7 +434,7 @@ struct drm_psb_private {
struct pci_dev *lpc_pdev; /* Currently only used by mrst */
const struct psb_ops *ops;
const struct psb_offset *regmap;
-
+
struct child_device_config *child_dev;
int child_dev_num;
@@ -540,7 +542,7 @@ struct drm_psb_private {
/* Oaktrail HDMI state */
struct oaktrail_hdmi_dev *hdmi_priv;
-
+
/* Register state */
struct psb_save_area regs;
@@ -572,7 +574,7 @@ struct drm_psb_private {
uint32_t blc_adj1;
uint32_t blc_adj2;
- void *fbdev;
+ struct drm_fb_helper *fb_helper;
/* 2D acceleration */
spinlock_t lock_2d;
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 4256410535f0..fed3b563e62e 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -432,6 +432,7 @@ const struct drm_crtc_funcs psb_intel_crtc_funcs = {
.gamma_set = gma_crtc_gamma_set,
.set_config = gma_crtc_set_config,
.destroy = gma_crtc_destroy,
+ .page_flip = gma_crtc_page_flip,
};
const struct gma_clock_funcs psb_clock_funcs = {
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index cdf10333d1c2..16c6136f778b 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -12,6 +12,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_encoder.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include <linux/gpio.h>
#include "gma_display.h"
@@ -182,6 +183,8 @@ struct gma_crtc {
struct psb_intel_crtc_state *crtc_state;
const struct gma_clock_funcs *clock_funcs;
+
+ struct drm_pending_vblank_event *page_flip_event;
};
#define to_gma_crtc(x) \
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
index e6265fb85626..91f90016dba9 100644
--- a/drivers/gpu/drm/gma500/psb_irq.c
+++ b/drivers/gpu/drm/gma500/psb_irq.c
@@ -165,11 +165,23 @@ static void mid_pipe_event_handler(struct drm_device *dev, int pipe)
"%s, can't clear status bits for pipe %d, its value = 0x%x.\n",
__func__, pipe, PSB_RVDC32(pipe_stat_reg));
- if (pipe_stat_val & PIPE_VBLANK_STATUS)
- drm_handle_vblank(dev, pipe);
+ if (pipe_stat_val & PIPE_VBLANK_STATUS ||
+ (IS_MFLD(dev) && pipe_stat_val & PIPE_TE_STATUS)) {
+ struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe);
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ unsigned long flags;
- if (pipe_stat_val & PIPE_TE_STATUS)
drm_handle_vblank(dev, pipe);
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (gma_crtc->page_flip_event) {
+ drm_crtc_send_vblank_event(crtc,
+ gma_crtc->page_flip_event);
+ gma_crtc->page_flip_event = NULL;
+ drm_crtc_vblank_put(crtc);
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ }
}
/*
@@ -194,7 +206,6 @@ static void psb_sgx_interrupt(struct drm_device *dev, u32 stat_1, u32 stat_2)
{
struct drm_psb_private *dev_priv = dev->dev_private;
u32 val, addr;
- int error = false;
if (stat_1 & _PSB_CE_TWOD_COMPLETE)
val = PSB_RSGX32(PSB_CR_2D_BLIT_STATUS);
@@ -229,7 +240,6 @@ static void psb_sgx_interrupt(struct drm_device *dev, u32 stat_1, u32 stat_2)
DRM_ERROR("\tMMU failing address is 0x%08x.\n",
(unsigned int)addr);
- error = true;
}
}
@@ -460,12 +470,11 @@ void psb_irq_turn_off_dpst(struct drm_device *dev)
{
struct drm_psb_private *dev_priv =
(struct drm_psb_private *) dev->dev_private;
- u32 hist_reg;
u32 pwm_reg;
if (gma_power_begin(dev, false)) {
PSB_WVDC32(0x00000000, HISTOGRAM_INT_CONTROL);
- hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
+ PSB_RVDC32(HISTOGRAM_INT_CONTROL);
psb_disable_pipestat(dev_priv, 0, PIPE_DPST_EVENT_ENABLE);
diff --git a/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c b/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c
index 7de3ce637c7f..9e8224456ea2 100644
--- a/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c
+++ b/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c
@@ -25,7 +25,7 @@
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/platform_data/tc35876x.h>
+#include <linux/gpio/consumer.h>
#include <asm/intel_scu_ipc.h>
@@ -36,6 +36,11 @@
static struct i2c_client *tc35876x_client;
static struct i2c_client *cmi_lcd_i2c_client;
+/* Panel GPIOs */
+static struct gpio_desc *bridge_reset;
+static struct gpio_desc *bridge_bl_enable;
+static struct gpio_desc *backlight_voltage;
+
#define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end))
#define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end))
@@ -316,27 +321,23 @@ static int tc35876x_regr(struct i2c_client *client, u16 reg, u32 *value)
void tc35876x_set_bridge_reset_state(struct drm_device *dev, int state)
{
- struct tc35876x_platform_data *pdata;
-
if (WARN(!tc35876x_client, "%s called before probe", __func__))
return;
dev_dbg(&tc35876x_client->dev, "%s: state %d\n", __func__, state);
- pdata = dev_get_platdata(&tc35876x_client->dev);
-
- if (pdata->gpio_bridge_reset == -1)
+ if (!bridge_reset)
return;
if (state) {
- gpio_set_value_cansleep(pdata->gpio_bridge_reset, 0);
+ gpiod_set_value_cansleep(bridge_reset, 0);
mdelay(10);
} else {
/* Pull MIPI Bridge reset pin to Low */
- gpio_set_value_cansleep(pdata->gpio_bridge_reset, 0);
+ gpiod_set_value_cansleep(bridge_reset, 0);
mdelay(20);
/* Pull MIPI Bridge reset pin to High */
- gpio_set_value_cansleep(pdata->gpio_bridge_reset, 1);
+ gpiod_set_value_cansleep(bridge_reset, 1);
mdelay(40);
}
}
@@ -510,25 +511,20 @@ void tc35876x_brightness_control(struct drm_device *dev, int level)
void tc35876x_toshiba_bridge_panel_off(struct drm_device *dev)
{
- struct tc35876x_platform_data *pdata;
-
if (WARN(!tc35876x_client, "%s called before probe", __func__))
return;
dev_dbg(&tc35876x_client->dev, "%s\n", __func__);
- pdata = dev_get_platdata(&tc35876x_client->dev);
-
- if (pdata->gpio_panel_bl_en != -1)
- gpio_set_value_cansleep(pdata->gpio_panel_bl_en, 0);
+ if (bridge_bl_enable)
+ gpiod_set_value_cansleep(bridge_bl_enable, 0);
- if (pdata->gpio_panel_vadd != -1)
- gpio_set_value_cansleep(pdata->gpio_panel_vadd, 0);
+ if (backlight_voltage)
+ gpiod_set_value_cansleep(backlight_voltage, 0);
}
void tc35876x_toshiba_bridge_panel_on(struct drm_device *dev)
{
- struct tc35876x_platform_data *pdata;
struct drm_psb_private *dev_priv = dev->dev_private;
if (WARN(!tc35876x_client, "%s called before probe", __func__))
@@ -536,10 +532,8 @@ void tc35876x_toshiba_bridge_panel_on(struct drm_device *dev)
dev_dbg(&tc35876x_client->dev, "%s\n", __func__);
- pdata = dev_get_platdata(&tc35876x_client->dev);
-
- if (pdata->gpio_panel_vadd != -1) {
- gpio_set_value_cansleep(pdata->gpio_panel_vadd, 1);
+ if (backlight_voltage) {
+ gpiod_set_value_cansleep(backlight_voltage, 1);
msleep(260);
}
@@ -571,8 +565,8 @@ void tc35876x_toshiba_bridge_panel_on(struct drm_device *dev)
"i2c write failed (%d)\n", ret);
}
- if (pdata->gpio_panel_bl_en != -1)
- gpio_set_value_cansleep(pdata->gpio_panel_bl_en, 1);
+ if (bridge_bl_enable)
+ gpiod_set_value_cansleep(bridge_bl_enable, 1);
tc35876x_brightness_control(dev, dev_priv->brightness_adjusted);
}
@@ -635,8 +629,6 @@ static int tc35876x_get_panel_info(struct drm_device *dev, int pipe,
static int tc35876x_bridge_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct tc35876x_platform_data *pdata;
-
dev_info(&client->dev, "%s\n", __func__);
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
@@ -645,26 +637,23 @@ static int tc35876x_bridge_probe(struct i2c_client *client,
return -ENODEV;
}
- pdata = dev_get_platdata(&client->dev);
- if (!pdata) {
- dev_err(&client->dev, "%s: no platform data\n", __func__);
- return -ENODEV;
- }
+ bridge_reset = devm_gpiod_get_optional(&client->dev, "bridge-reset", GPIOD_OUT_LOW);
+ if (IS_ERR(bridge_reset))
+ return PTR_ERR(bridge_reset);
+ if (bridge_reset)
+ gpiod_set_consumer_name(bridge_reset, "tc35876x bridge reset");
- if (pdata->gpio_bridge_reset != -1) {
- gpio_request(pdata->gpio_bridge_reset, "tc35876x bridge reset");
- gpio_direction_output(pdata->gpio_bridge_reset, 0);
- }
-
- if (pdata->gpio_panel_bl_en != -1) {
- gpio_request(pdata->gpio_panel_bl_en, "tc35876x panel bl en");
- gpio_direction_output(pdata->gpio_panel_bl_en, 0);
- }
+ bridge_bl_enable = devm_gpiod_get_optional(&client->dev, "bl-en", GPIOD_OUT_LOW);
+ if (IS_ERR(bridge_bl_enable))
+ return PTR_ERR(bridge_bl_enable);
+ if (bridge_bl_enable)
+ gpiod_set_consumer_name(bridge_bl_enable, "tc35876x panel bl en");
- if (pdata->gpio_panel_vadd != -1) {
- gpio_request(pdata->gpio_panel_vadd, "tc35876x panel vadd");
- gpio_direction_output(pdata->gpio_panel_vadd, 0);
- }
+ backlight_voltage = devm_gpiod_get_optional(&client->dev, "vadd", GPIOD_OUT_LOW);
+ if (IS_ERR(backlight_voltage))
+ return PTR_ERR(backlight_voltage);
+ if (backlight_voltage)
+ gpiod_set_consumer_name(backlight_voltage, "tc35876x panel vadd");
tc35876x_client = client;
@@ -673,19 +662,8 @@ static int tc35876x_bridge_probe(struct i2c_client *client,
static int tc35876x_bridge_remove(struct i2c_client *client)
{
- struct tc35876x_platform_data *pdata = dev_get_platdata(&client->dev);
-
dev_dbg(&client->dev, "%s\n", __func__);
- if (pdata->gpio_bridge_reset != -1)
- gpio_free(pdata->gpio_bridge_reset);
-
- if (pdata->gpio_panel_bl_en != -1)
- gpio_free(pdata->gpio_panel_bl_en);
-
- if (pdata->gpio_panel_vadd != -1)
- gpio_free(pdata->gpio_panel_vadd);
-
tc35876x_client = NULL;
return 0;
diff --git a/drivers/gpu/drm/hisilicon/hibmc/Kconfig b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
index f20eedf0073a..dfc5aef62f7b 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/Kconfig
+++ b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
@@ -1,10 +1,11 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_HISI_HIBMC
tristate "DRM Support for Hisilicon Hibmc"
- depends on DRM && PCI && MMU
+ depends on DRM && PCI && MMU && ARM64
select DRM_KMS_HELPER
select DRM_VRAM_HELPER
-
+ select DRM_TTM
+ select DRM_TTM_HELPER
help
Choose this option if you have a Hisilicon Hibmc soc chipset.
If M is selected the module will be called hibmc-drm.
diff --git a/drivers/gpu/drm/hisilicon/hibmc/Makefile b/drivers/gpu/drm/hisilicon/hibmc/Makefile
index 0c2d4296bccd..f99132715597 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/Makefile
+++ b/drivers/gpu/drm/hisilicon/hibmc/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-hibmc-drm-y := hibmc_drm_drv.o hibmc_drm_de.o hibmc_drm_vdac.o hibmc_drm_fbdev.o hibmc_ttm.o
+hibmc-drm-y := hibmc_drm_drv.o hibmc_drm_de.o hibmc_drm_vdac.o hibmc_ttm.o
obj-$(CONFIG_DRM_HISI_HIBMC) += hibmc-drm.o
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
index 08657a3627f3..7fa7d4933f60 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
@@ -11,10 +11,16 @@
* Jianhua Li <lijianhua@huawei.com>
*/
+#include <linux/delay.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_plane_helper.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "hibmc_drm_drv.h"
#include "hibmc_drm_regs.h"
@@ -90,29 +96,19 @@ static void hibmc_plane_atomic_update(struct drm_plane *plane,
{
struct drm_plane_state *state = plane->state;
u32 reg;
- int ret;
s64 gpu_addr = 0;
unsigned int line_l;
struct hibmc_drm_private *priv = plane->dev->dev_private;
- struct hibmc_framebuffer *hibmc_fb;
struct drm_gem_vram_object *gbo;
if (!state->fb)
return;
- hibmc_fb = to_hibmc_framebuffer(state->fb);
- gbo = drm_gem_vram_of_gem(hibmc_fb->obj);
+ gbo = drm_gem_vram_of_gem(state->fb->obj[0]);
- ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
- if (ret) {
- DRM_ERROR("failed to pin bo: %d", ret);
- return;
- }
gpu_addr = drm_gem_vram_offset(gbo);
- if (gpu_addr < 0) {
- drm_gem_vram_unpin(gbo);
- return;
- }
+ if (WARN_ON_ONCE(gpu_addr < 0))
+ return; /* Bug: we didn't pin the BO to VRAM in prepare_fb. */
writel(gpu_addr, priv->mmio + HIBMC_CRT_FB_ADDRESS);
@@ -151,6 +147,8 @@ static struct drm_plane_funcs hibmc_plane_funcs = {
};
static const struct drm_plane_helper_funcs hibmc_plane_helper_funcs = {
+ .prepare_fb = drm_gem_vram_plane_helper_prepare_fb,
+ .cleanup_fb = drm_gem_vram_plane_helper_cleanup_fb,
.atomic_check = hibmc_plane_atomic_check,
.atomic_update = hibmc_plane_atomic_update,
};
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
index ce89e56937b0..4a8a4cfb4b75 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
@@ -13,17 +13,21 @@
#include <linux/console.h>
#include <linux/module.h>
+#include <linux/pci.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_vram_helper.h>
+#include <drm/drm_irq.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "hibmc_drm_drv.h"
#include "hibmc_drm_regs.h"
-static const struct file_operations hibmc_fops = {
- .owner = THIS_MODULE,
- DRM_VRAM_MM_FILE_OPERATIONS
-};
+DEFINE_DRM_GEM_FOPS(hibmc_fops);
static irqreturn_t hibmc_drm_interrupt(int irq, void *arg)
{
@@ -51,25 +55,23 @@ static struct drm_driver hibmc_driver = {
.desc = "hibmc drm driver",
.major = 1,
.minor = 0,
- .gem_free_object_unlocked =
- drm_gem_vram_driver_gem_free_object_unlocked,
+ .debugfs_init = drm_vram_mm_debugfs_init,
.dumb_create = hibmc_dumb_create,
.dumb_map_offset = drm_gem_vram_driver_dumb_mmap_offset,
+ .gem_prime_mmap = drm_gem_prime_mmap,
.irq_handler = hibmc_drm_interrupt,
};
static int __maybe_unused hibmc_pm_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
return drm_mode_config_helper_suspend(drm_dev);
}
static int __maybe_unused hibmc_pm_resume(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
return drm_mode_config_helper_resume(drm_dev);
}
@@ -211,7 +213,7 @@ static int hibmc_hw_map(struct hibmc_drm_private *priv)
ioaddr = pci_resource_start(pdev, 1);
iosize = pci_resource_len(pdev, 1);
- priv->mmio = devm_ioremap_nocache(dev->dev, ioaddr, iosize);
+ priv->mmio = devm_ioremap(dev->dev, ioaddr, iosize);
if (!priv->mmio) {
DRM_ERROR("Cannot map mmio region\n");
return -ENOMEM;
@@ -247,8 +249,6 @@ static int hibmc_unload(struct drm_device *dev)
{
struct hibmc_drm_private *priv = dev->dev_private;
- hibmc_fbdev_fini(priv);
-
drm_atomic_helper_shutdown(dev);
if (dev->irq_enabled)
@@ -307,7 +307,7 @@ static int hibmc_load(struct drm_device *dev)
/* reset all the states of crtc/plane/encoder/connector */
drm_mode_config_reset(dev);
- ret = hibmc_fbdev_init(priv);
+ ret = drm_fbdev_generic_setup(dev, 16);
if (ret) {
DRM_ERROR("failed to initialize fbdev: %d\n", ret);
goto err;
@@ -388,18 +388,7 @@ static struct pci_driver hibmc_pci_driver = {
.driver.pm = &hibmc_pm_ops,
};
-static int __init hibmc_init(void)
-{
- return pci_register_driver(&hibmc_pci_driver);
-}
-
-static void __exit hibmc_exit(void)
-{
- return pci_unregister_driver(&hibmc_pci_driver);
-}
-
-module_init(hibmc_init);
-module_exit(hibmc_exit);
+module_pci_driver(hibmc_pci_driver);
MODULE_DEVICE_TABLE(pci, hibmc_pci_table);
MODULE_AUTHOR("RongrongZou <zourongrong@huawei.com>");
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
index 69348bf54a84..50a0c1f9d211 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
@@ -14,23 +14,10 @@
#ifndef HIBMC_DRM_DRV_H
#define HIBMC_DRM_DRV_H
-#include <drm/drmP.h>
-#include <drm/drm_atomic.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem.h>
-#include <drm/drm_gem_vram_helper.h>
-#include <drm/drm_vram_mm_helper.h>
+#include <drm/drm_framebuffer.h>
-struct hibmc_framebuffer {
- struct drm_framebuffer fb;
- struct drm_gem_object *obj;
-};
-
-struct hibmc_fbdev {
- struct drm_fb_helper helper; /* must be first */
- struct hibmc_framebuffer *fb;
- int size;
-};
+struct drm_device;
struct hibmc_drm_private {
/* hw */
@@ -43,13 +30,8 @@ struct hibmc_drm_private {
/* drm */
struct drm_device *dev;
bool mode_config_initialized;
-
- /* fbdev */
- struct hibmc_fbdev *fbdev;
};
-#define to_hibmc_framebuffer(x) container_of(x, struct hibmc_framebuffer, fb)
-
void hibmc_set_power_mode(struct hibmc_drm_private *priv,
unsigned int power_mode);
void hibmc_set_current_gate(struct hibmc_drm_private *priv,
@@ -57,15 +39,6 @@ void hibmc_set_current_gate(struct hibmc_drm_private *priv,
int hibmc_de_init(struct hibmc_drm_private *priv);
int hibmc_vdac_init(struct hibmc_drm_private *priv);
-int hibmc_fbdev_init(struct hibmc_drm_private *priv);
-void hibmc_fbdev_fini(struct hibmc_drm_private *priv);
-
-int hibmc_gem_create(struct drm_device *dev, u32 size, bool iskernel,
- struct drm_gem_object **obj);
-struct hibmc_framebuffer *
-hibmc_framebuffer_init(struct drm_device *dev,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object *obj);
int hibmc_mm_init(struct hibmc_drm_private *hibmc);
void hibmc_mm_fini(struct hibmc_drm_private *hibmc);
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c
deleted file mode 100644
index af1ea4cceffa..000000000000
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c
+++ /dev/null
@@ -1,238 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* Hisilicon Hibmc SoC drm driver
- *
- * Based on the bochs drm driver.
- *
- * Copyright (c) 2016 Huawei Limited.
- *
- * Author:
- * Rongrong Zou <zourongrong@huawei.com>
- * Rongrong Zou <zourongrong@gmail.com>
- * Jianhua Li <lijianhua@huawei.com>
- */
-
-#include <drm/drm_crtc.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_probe_helper.h>
-
-#include "hibmc_drm_drv.h"
-
-static int hibmcfb_create_object(
- struct hibmc_drm_private *priv,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object **gobj_p)
-{
- struct drm_gem_object *gobj;
- struct drm_device *dev = priv->dev;
- u32 size;
- int ret = 0;
-
- size = mode_cmd->pitches[0] * mode_cmd->height;
- ret = hibmc_gem_create(dev, size, true, &gobj);
- if (ret)
- return ret;
-
- *gobj_p = gobj;
- return ret;
-}
-
-static struct fb_ops hibmc_drm_fb_ops = {
- .owner = THIS_MODULE,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
- .fb_fillrect = drm_fb_helper_sys_fillrect,
- .fb_copyarea = drm_fb_helper_sys_copyarea,
- .fb_imageblit = drm_fb_helper_sys_imageblit,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_blank = drm_fb_helper_blank,
- .fb_setcmap = drm_fb_helper_setcmap,
-};
-
-static int hibmc_drm_fb_create(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- struct hibmc_fbdev *hi_fbdev =
- container_of(helper, struct hibmc_fbdev, helper);
- struct hibmc_drm_private *priv = helper->dev->dev_private;
- struct fb_info *info;
- struct drm_mode_fb_cmd2 mode_cmd;
- struct drm_gem_object *gobj = NULL;
- int ret = 0;
- size_t size;
- unsigned int bytes_per_pixel;
- struct drm_gem_vram_object *gbo = NULL;
- void *base;
-
- DRM_DEBUG_DRIVER("surface width(%d), height(%d) and bpp(%d)\n",
- sizes->surface_width, sizes->surface_height,
- sizes->surface_bpp);
-
- bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8);
-
- mode_cmd.width = sizes->surface_width;
- mode_cmd.height = sizes->surface_height;
- mode_cmd.pitches[0] = mode_cmd.width * bytes_per_pixel;
- mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
- sizes->surface_depth);
-
- size = PAGE_ALIGN(mode_cmd.pitches[0] * mode_cmd.height);
-
- ret = hibmcfb_create_object(priv, &mode_cmd, &gobj);
- if (ret) {
- DRM_ERROR("failed to create fbcon backing object: %d\n", ret);
- return -ENOMEM;
- }
-
- gbo = drm_gem_vram_of_gem(gobj);
-
- ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
- if (ret) {
- DRM_ERROR("failed to pin fbcon: %d\n", ret);
- goto out_unref_gem;
- }
-
- base = drm_gem_vram_kmap(gbo, true, NULL);
- if (IS_ERR(base)) {
- ret = PTR_ERR(base);
- DRM_ERROR("failed to kmap fbcon: %d\n", ret);
- goto out_unpin_bo;
- }
-
- info = drm_fb_helper_alloc_fbi(helper);
- if (IS_ERR(info)) {
- ret = PTR_ERR(info);
- DRM_ERROR("failed to allocate fbi: %d\n", ret);
- goto out_release_fbi;
- }
-
- hi_fbdev->fb = hibmc_framebuffer_init(priv->dev, &mode_cmd, gobj);
- if (IS_ERR(hi_fbdev->fb)) {
- ret = PTR_ERR(hi_fbdev->fb);
- hi_fbdev->fb = NULL;
- DRM_ERROR("failed to initialize framebuffer: %d\n", ret);
- goto out_release_fbi;
- }
-
- priv->fbdev->size = size;
- hi_fbdev->helper.fb = &hi_fbdev->fb->fb;
-
- info->fbops = &hibmc_drm_fb_ops;
-
- drm_fb_helper_fill_info(info, &priv->fbdev->helper, sizes);
-
- info->screen_base = base;
- info->screen_size = size;
-
- info->fix.smem_start = gbo->bo.mem.bus.offset + gbo->bo.mem.bus.base;
- info->fix.smem_len = size;
- return 0;
-
-out_release_fbi:
- drm_gem_vram_kunmap(gbo);
-out_unpin_bo:
- drm_gem_vram_unpin(gbo);
-out_unref_gem:
- drm_gem_object_put_unlocked(gobj);
-
- return ret;
-}
-
-static void hibmc_fbdev_destroy(struct hibmc_fbdev *fbdev)
-{
- struct hibmc_framebuffer *gfb = fbdev->fb;
- struct drm_fb_helper *fbh = &fbdev->helper;
-
- drm_fb_helper_unregister_fbi(fbh);
-
- drm_fb_helper_fini(fbh);
-
- if (gfb)
- drm_framebuffer_put(&gfb->fb);
-}
-
-static const struct drm_fb_helper_funcs hibmc_fbdev_helper_funcs = {
- .fb_probe = hibmc_drm_fb_create,
-};
-
-int hibmc_fbdev_init(struct hibmc_drm_private *priv)
-{
- int ret;
- struct fb_var_screeninfo *var;
- struct fb_fix_screeninfo *fix;
- struct hibmc_fbdev *hifbdev;
-
- hifbdev = devm_kzalloc(priv->dev->dev, sizeof(*hifbdev), GFP_KERNEL);
- if (!hifbdev) {
- DRM_ERROR("failed to allocate hibmc_fbdev\n");
- return -ENOMEM;
- }
-
- priv->fbdev = hifbdev;
- drm_fb_helper_prepare(priv->dev, &hifbdev->helper,
- &hibmc_fbdev_helper_funcs);
-
- /* Now just one crtc and one channel */
- ret = drm_fb_helper_init(priv->dev, &hifbdev->helper, 1);
- if (ret) {
- DRM_ERROR("failed to initialize fb helper: %d\n", ret);
- return ret;
- }
-
- ret = drm_fb_helper_single_add_all_connectors(&hifbdev->helper);
- if (ret) {
- DRM_ERROR("failed to add all connectors: %d\n", ret);
- goto fini;
- }
-
- ret = drm_fb_helper_initial_config(&hifbdev->helper, 16);
- if (ret) {
- DRM_ERROR("failed to setup initial conn config: %d\n", ret);
- goto fini;
- }
-
- var = &hifbdev->helper.fbdev->var;
- fix = &hifbdev->helper.fbdev->fix;
-
- DRM_DEBUG_DRIVER("Member of info->var is :\n"
- "xres=%d\n"
- "yres=%d\n"
- "xres_virtual=%d\n"
- "yres_virtual=%d\n"
- "xoffset=%d\n"
- "yoffset=%d\n"
- "bits_per_pixel=%d\n"
- "...\n", var->xres, var->yres, var->xres_virtual,
- var->yres_virtual, var->xoffset, var->yoffset,
- var->bits_per_pixel);
- DRM_DEBUG_DRIVER("Member of info->fix is :\n"
- "smem_start=%lx\n"
- "smem_len=%d\n"
- "type=%d\n"
- "type_aux=%d\n"
- "visual=%d\n"
- "xpanstep=%d\n"
- "ypanstep=%d\n"
- "ywrapstep=%d\n"
- "line_length=%d\n"
- "accel=%d\n"
- "capabilities=%d\n"
- "...\n", fix->smem_start, fix->smem_len, fix->type,
- fix->type_aux, fix->visual, fix->xpanstep,
- fix->ypanstep, fix->ywrapstep, fix->line_length,
- fix->accel, fix->capabilities);
-
- return 0;
-
-fini:
- drm_fb_helper_fini(&hifbdev->helper);
- return ret;
-}
-
-void hibmc_fbdev_fini(struct hibmc_drm_private *priv)
-{
- if (!priv->fbdev)
- return;
-
- hibmc_fbdev_destroy(priv->fbdev);
- priv->fbdev = NULL;
-}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
index 634a3bf018b2..6d98fdc06f6c 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
@@ -13,6 +13,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_print.h>
#include "hibmc_drm_drv.h"
#include "hibmc_drm_regs.h"
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
index 5d4a03cd7d50..50b988fdd5cc 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
@@ -11,7 +11,13 @@
* Jianhua Li <lijianhua@huawei.com>
*/
+#include <linux/pci.h>
+
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_gem_vram_helper.h>
+#include <drm/drm_print.h>
#include "hibmc_drm_drv.h"
@@ -23,7 +29,7 @@ int hibmc_mm_init(struct hibmc_drm_private *hibmc)
vmm = drm_vram_helper_alloc_mm(dev,
pci_resource_start(dev->pdev, 0),
- hibmc->fb_size, &drm_gem_vram_mm_funcs);
+ hibmc->fb_size);
if (IS_ERR(vmm)) {
ret = PTR_ERR(vmm);
DRM_ERROR("Error initializing VRAM MM; %d\n", ret);
@@ -41,125 +47,14 @@ void hibmc_mm_fini(struct hibmc_drm_private *hibmc)
drm_vram_helper_release_mm(hibmc->dev);
}
-int hibmc_gem_create(struct drm_device *dev, u32 size, bool iskernel,
- struct drm_gem_object **obj)
-{
- struct drm_gem_vram_object *gbo;
- int ret;
-
- *obj = NULL;
-
- size = roundup(size, PAGE_SIZE);
- if (size == 0)
- return -EINVAL;
-
- gbo = drm_gem_vram_create(dev, &dev->vram_mm->bdev, size, 0, false);
- if (IS_ERR(gbo)) {
- ret = PTR_ERR(gbo);
- if (ret != -ERESTARTSYS)
- DRM_ERROR("failed to allocate GEM object: %d\n", ret);
- return ret;
- }
- *obj = &gbo->gem;
- return 0;
-}
-
int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
- struct drm_gem_object *gobj;
- u32 handle;
- int ret;
-
- args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 16);
- args->size = args->pitch * args->height;
-
- ret = hibmc_gem_create(dev, args->size, false,
- &gobj);
- if (ret) {
- DRM_ERROR("failed to create GEM object: %d\n", ret);
- return ret;
- }
-
- ret = drm_gem_handle_create(file, gobj, &handle);
- drm_gem_object_put_unlocked(gobj);
- if (ret) {
- DRM_ERROR("failed to unreference GEM object: %d\n", ret);
- return ret;
- }
-
- args->handle = handle;
- return 0;
-}
-
-static void hibmc_user_framebuffer_destroy(struct drm_framebuffer *fb)
-{
- struct hibmc_framebuffer *hibmc_fb = to_hibmc_framebuffer(fb);
-
- drm_gem_object_put_unlocked(hibmc_fb->obj);
- drm_framebuffer_cleanup(fb);
- kfree(hibmc_fb);
-}
-
-static const struct drm_framebuffer_funcs hibmc_fb_funcs = {
- .destroy = hibmc_user_framebuffer_destroy,
-};
-
-struct hibmc_framebuffer *
-hibmc_framebuffer_init(struct drm_device *dev,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object *obj)
-{
- struct hibmc_framebuffer *hibmc_fb;
- int ret;
-
- hibmc_fb = kzalloc(sizeof(*hibmc_fb), GFP_KERNEL);
- if (!hibmc_fb) {
- DRM_ERROR("failed to allocate hibmc_fb\n");
- return ERR_PTR(-ENOMEM);
- }
-
- drm_helper_mode_fill_fb_struct(dev, &hibmc_fb->fb, mode_cmd);
- hibmc_fb->obj = obj;
- ret = drm_framebuffer_init(dev, &hibmc_fb->fb, &hibmc_fb_funcs);
- if (ret) {
- DRM_ERROR("drm_framebuffer_init failed: %d\n", ret);
- kfree(hibmc_fb);
- return ERR_PTR(ret);
- }
-
- return hibmc_fb;
-}
-
-static struct drm_framebuffer *
-hibmc_user_framebuffer_create(struct drm_device *dev,
- struct drm_file *filp,
- const struct drm_mode_fb_cmd2 *mode_cmd)
-{
- struct drm_gem_object *obj;
- struct hibmc_framebuffer *hibmc_fb;
-
- DRM_DEBUG_DRIVER("%dx%d, format %c%c%c%c\n",
- mode_cmd->width, mode_cmd->height,
- (mode_cmd->pixel_format) & 0xff,
- (mode_cmd->pixel_format >> 8) & 0xff,
- (mode_cmd->pixel_format >> 16) & 0xff,
- (mode_cmd->pixel_format >> 24) & 0xff);
-
- obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
- if (!obj)
- return ERR_PTR(-ENOENT);
-
- hibmc_fb = hibmc_framebuffer_init(dev, mode_cmd, obj);
- if (IS_ERR(hibmc_fb)) {
- drm_gem_object_put_unlocked(obj);
- return ERR_PTR((long)hibmc_fb);
- }
- return &hibmc_fb->fb;
+ return drm_gem_vram_fill_create_dumb(file, dev, 0, 16, args);
}
const struct drm_mode_config_funcs hibmc_mode_funcs = {
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
- .fb_create = hibmc_user_framebuffer_create,
+ .fb_create = drm_gem_fb_create,
};
diff --git a/drivers/gpu/drm/hisilicon/kirin/Kconfig b/drivers/gpu/drm/hisilicon/kirin/Kconfig
index 0fa29af08ad0..290553e2f6b4 100644
--- a/drivers/gpu/drm/hisilicon/kirin/Kconfig
+++ b/drivers/gpu/drm/hisilicon/kirin/Kconfig
@@ -5,16 +5,8 @@ config DRM_HISI_KIRIN
select DRM_KMS_HELPER
select DRM_GEM_CMA_HELPER
select DRM_KMS_CMA_HELPER
- select HISI_KIRIN_DW_DSI
+ select DRM_MIPI_DSI
help
Choose this option if you have a hisilicon Kirin chipsets(hi6220).
If M is selected the module will be called kirin-drm.
-config HISI_KIRIN_DW_DSI
- tristate "HiSilicon Kirin specific extensions for Synopsys DW MIPI DSI"
- depends on DRM_HISI_KIRIN
- select DRM_MIPI_DSI
- help
- This selects support for HiSilicon Kirin SoC specific extensions for
- the Synopsys DesignWare DSI driver. If you want to enable MIPI DSI on
- hi6220 based SoC, you should selet this option.
diff --git a/drivers/gpu/drm/hisilicon/kirin/Makefile b/drivers/gpu/drm/hisilicon/kirin/Makefile
index c0501fa3fe53..d9323f66a7d4 100644
--- a/drivers/gpu/drm/hisilicon/kirin/Makefile
+++ b/drivers/gpu/drm/hisilicon/kirin/Makefile
@@ -2,6 +2,5 @@
kirin-drm-y := kirin_drm_drv.o \
kirin_drm_ade.o
-obj-$(CONFIG_DRM_HISI_KIRIN) += kirin-drm.o
+obj-$(CONFIG_DRM_HISI_KIRIN) += kirin-drm.o dw_drm_dsi.o
-obj-$(CONFIG_HISI_KIRIN_DW_DSI) += dw_drm_dsi.o
diff --git a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
index 5bf8138941de..bdcf9c6ae9e9 100644
--- a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
+++ b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
@@ -18,6 +18,7 @@
#include <linux/platform_device.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_device.h>
#include <drm/drm_encoder_slave.h>
#include <drm/drm_mipi_dsi.h>
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h b/drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h
index e2ac09894a6d..0da860200410 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h
@@ -83,6 +83,7 @@
#define VSIZE_OFST 20
#define LDI_INT_EN 0x741C
#define FRAME_END_INT_EN_OFST 1
+#define UNDERFLOW_INT_EN_OFST 2
#define LDI_CTRL 0x7420
#define BPP_OFST 3
#define DATA_GATE_EN BIT(2)
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
index ad7042ae2241..73cd28a6ea07 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
@@ -13,32 +13,31 @@
#include <linux/bitops.h>
#include <linux/clk.h>
-#include <video/display_timing.h>
#include <linux/mfd/syscon.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/reset.h>
-#include <drm/drmP.h>
+#include <video/display_timing.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include "kirin_drm_drv.h"
#include "kirin_ade_reg.h"
-#define PRIMARY_CH ADE_CH1 /* primary plane */
#define OUT_OVLY ADE_OVLY2 /* output overlay compositor */
#define ADE_DEBUG 1
-#define to_ade_crtc(crtc) \
- container_of(crtc, struct ade_crtc, base)
-
-#define to_ade_plane(plane) \
- container_of(plane, struct ade_plane, base)
struct ade_hw_ctx {
void __iomem *base;
@@ -47,36 +46,14 @@ struct ade_hw_ctx {
struct clk *media_noc_clk;
struct clk *ade_pix_clk;
struct reset_control *reset;
+ struct work_struct display_reset_wq;
bool power_on;
int irq;
-};
-
-struct ade_crtc {
- struct drm_crtc base;
- struct ade_hw_ctx *ctx;
- bool enable;
- u32 out_format;
-};
-
-struct ade_plane {
- struct drm_plane base;
- void *ctx;
- u8 ch; /* channel */
-};
-
-struct ade_data {
- struct ade_crtc acrtc;
- struct ade_plane aplane[ADE_CH_NUM];
- struct ade_hw_ctx ctx;
-};
-/* ade-format info: */
-struct ade_format {
- u32 pixel_format;
- enum ade_fb_format ade_format;
+ struct drm_crtc *crtc;
};
-static const struct ade_format ade_formats[] = {
+static const struct kirin_format ade_formats[] = {
/* 16bpp RGB: */
{ DRM_FORMAT_RGB565, ADE_RGB_565 },
{ DRM_FORMAT_BGR565, ADE_BGR_565 },
@@ -92,7 +69,7 @@ static const struct ade_format ade_formats[] = {
{ DRM_FORMAT_ABGR8888, ADE_ABGR_8888 },
};
-static const u32 channel_formats1[] = {
+static const u32 channel_formats[] = {
/* channel 1,2,3,4 */
DRM_FORMAT_RGB565, DRM_FORMAT_BGR565, DRM_FORMAT_RGB888,
DRM_FORMAT_BGR888, DRM_FORMAT_XRGB8888, DRM_FORMAT_XBGR8888,
@@ -100,19 +77,6 @@ static const u32 channel_formats1[] = {
DRM_FORMAT_ABGR8888
};
-u32 ade_get_channel_formats(u8 ch, const u32 **formats)
-{
- switch (ch) {
- case ADE_CH1:
- *formats = channel_formats1;
- return ARRAY_SIZE(channel_formats1);
- default:
- DRM_ERROR("no this channel %d\n", ch);
- *formats = NULL;
- return 0;
- }
-}
-
/* convert from fourcc format to ade format */
static u32 ade_get_format(u32 pixel_format)
{
@@ -120,7 +84,7 @@ static u32 ade_get_format(u32 pixel_format)
for (i = 0; i < ARRAY_SIZE(ade_formats); i++)
if (ade_formats[i].pixel_format == pixel_format)
- return ade_formats[i].ade_format;
+ return ade_formats[i].hw_format;
/* not found */
DRM_ERROR("Not found pixel format!!fourcc_format= %d\n",
@@ -172,14 +136,15 @@ static void ade_init(struct ade_hw_ctx *ctx)
*/
ade_update_bits(base + ADE_CTRL, FRM_END_START_OFST,
FRM_END_START_MASK, REG_EFFECTIVE_IN_ADEEN_FRMEND);
+ ade_update_bits(base + LDI_INT_EN, UNDERFLOW_INT_EN_OFST, MASK(1), 1);
}
static bool ade_crtc_mode_fixup(struct drm_crtc *crtc,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct ade_crtc *acrtc = to_ade_crtc(crtc);
- struct ade_hw_ctx *ctx = acrtc->ctx;
+ struct kirin_crtc *kcrtc = to_kirin_crtc(crtc);
+ struct ade_hw_ctx *ctx = kcrtc->hw_ctx;
adjusted_mode->clock =
clk_round_rate(ctx->ade_pix_clk, mode->clock * 1000) / 1000;
@@ -204,11 +169,10 @@ static void ade_set_pix_clk(struct ade_hw_ctx *ctx,
adj_mode->clock = clk_get_rate(ctx->ade_pix_clk) / 1000;
}
-static void ade_ldi_set_mode(struct ade_crtc *acrtc,
+static void ade_ldi_set_mode(struct ade_hw_ctx *ctx,
struct drm_display_mode *mode,
struct drm_display_mode *adj_mode)
{
- struct ade_hw_ctx *ctx = acrtc->ctx;
void __iomem *base = ctx->base;
u32 width = mode->hdisplay;
u32 height = mode->vdisplay;
@@ -295,9 +259,8 @@ static void ade_power_down(struct ade_hw_ctx *ctx)
ctx->power_on = false;
}
-static void ade_set_medianoc_qos(struct ade_crtc *acrtc)
+static void ade_set_medianoc_qos(struct ade_hw_ctx *ctx)
{
- struct ade_hw_ctx *ctx = acrtc->ctx;
struct regmap *map = ctx->noc_regmap;
regmap_update_bits(map, ADE0_QOSGENERATOR_MODE,
@@ -313,8 +276,8 @@ static void ade_set_medianoc_qos(struct ade_crtc *acrtc)
static int ade_crtc_enable_vblank(struct drm_crtc *crtc)
{
- struct ade_crtc *acrtc = to_ade_crtc(crtc);
- struct ade_hw_ctx *ctx = acrtc->ctx;
+ struct kirin_crtc *kcrtc = to_kirin_crtc(crtc);
+ struct ade_hw_ctx *ctx = kcrtc->hw_ctx;
void __iomem *base = ctx->base;
if (!ctx->power_on)
@@ -328,8 +291,8 @@ static int ade_crtc_enable_vblank(struct drm_crtc *crtc)
static void ade_crtc_disable_vblank(struct drm_crtc *crtc)
{
- struct ade_crtc *acrtc = to_ade_crtc(crtc);
- struct ade_hw_ctx *ctx = acrtc->ctx;
+ struct kirin_crtc *kcrtc = to_kirin_crtc(crtc);
+ struct ade_hw_ctx *ctx = kcrtc->hw_ctx;
void __iomem *base = ctx->base;
if (!ctx->power_on) {
@@ -341,11 +304,21 @@ static void ade_crtc_disable_vblank(struct drm_crtc *crtc)
MASK(1), 0);
}
+static void drm_underflow_wq(struct work_struct *work)
+{
+ struct ade_hw_ctx *ctx = container_of(work, struct ade_hw_ctx,
+ display_reset_wq);
+ struct drm_device *drm_dev = ctx->crtc->dev;
+ struct drm_atomic_state *state;
+
+ state = drm_atomic_helper_suspend(drm_dev);
+ drm_atomic_helper_resume(drm_dev, state);
+}
+
static irqreturn_t ade_irq_handler(int irq, void *data)
{
- struct ade_crtc *acrtc = data;
- struct ade_hw_ctx *ctx = acrtc->ctx;
- struct drm_crtc *crtc = &acrtc->base;
+ struct ade_hw_ctx *ctx = data;
+ struct drm_crtc *crtc = ctx->crtc;
void __iomem *base = ctx->base;
u32 status;
@@ -358,15 +331,20 @@ static irqreturn_t ade_irq_handler(int irq, void *data)
MASK(1), 1);
drm_crtc_handle_vblank(crtc);
}
+ if (status & BIT(UNDERFLOW_INT_EN_OFST)) {
+ ade_update_bits(base + LDI_INT_CLR, UNDERFLOW_INT_EN_OFST,
+ MASK(1), 1);
+ DRM_ERROR("LDI underflow!");
+ schedule_work(&ctx->display_reset_wq);
+ }
return IRQ_HANDLED;
}
-static void ade_display_enable(struct ade_crtc *acrtc)
+static void ade_display_enable(struct ade_hw_ctx *ctx)
{
- struct ade_hw_ctx *ctx = acrtc->ctx;
void __iomem *base = ctx->base;
- u32 out_fmt = acrtc->out_format;
+ u32 out_fmt = LDI_OUT_RGB_888;
/* enable output overlay compositor */
writel(ADE_ENABLE, base + ADE_OVLYX_CTL(OUT_OVLY));
@@ -479,11 +457,11 @@ static void ade_dump_regs(void __iomem *base) { }
static void ade_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
- struct ade_crtc *acrtc = to_ade_crtc(crtc);
- struct ade_hw_ctx *ctx = acrtc->ctx;
+ struct kirin_crtc *kcrtc = to_kirin_crtc(crtc);
+ struct ade_hw_ctx *ctx = kcrtc->hw_ctx;
int ret;
- if (acrtc->enable)
+ if (kcrtc->enable)
return;
if (!ctx->power_on) {
@@ -492,63 +470,63 @@ static void ade_crtc_atomic_enable(struct drm_crtc *crtc,
return;
}
- ade_set_medianoc_qos(acrtc);
- ade_display_enable(acrtc);
+ ade_set_medianoc_qos(ctx);
+ ade_display_enable(ctx);
ade_dump_regs(ctx->base);
drm_crtc_vblank_on(crtc);
- acrtc->enable = true;
+ kcrtc->enable = true;
}
static void ade_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
- struct ade_crtc *acrtc = to_ade_crtc(crtc);
- struct ade_hw_ctx *ctx = acrtc->ctx;
+ struct kirin_crtc *kcrtc = to_kirin_crtc(crtc);
+ struct ade_hw_ctx *ctx = kcrtc->hw_ctx;
- if (!acrtc->enable)
+ if (!kcrtc->enable)
return;
drm_crtc_vblank_off(crtc);
ade_power_down(ctx);
- acrtc->enable = false;
+ kcrtc->enable = false;
}
static void ade_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
- struct ade_crtc *acrtc = to_ade_crtc(crtc);
- struct ade_hw_ctx *ctx = acrtc->ctx;
+ struct kirin_crtc *kcrtc = to_kirin_crtc(crtc);
+ struct ade_hw_ctx *ctx = kcrtc->hw_ctx;
struct drm_display_mode *mode = &crtc->state->mode;
struct drm_display_mode *adj_mode = &crtc->state->adjusted_mode;
if (!ctx->power_on)
(void)ade_power_up(ctx);
- ade_ldi_set_mode(acrtc, mode, adj_mode);
+ ade_ldi_set_mode(ctx, mode, adj_mode);
}
static void ade_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
- struct ade_crtc *acrtc = to_ade_crtc(crtc);
- struct ade_hw_ctx *ctx = acrtc->ctx;
+ struct kirin_crtc *kcrtc = to_kirin_crtc(crtc);
+ struct ade_hw_ctx *ctx = kcrtc->hw_ctx;
struct drm_display_mode *mode = &crtc->state->mode;
struct drm_display_mode *adj_mode = &crtc->state->adjusted_mode;
if (!ctx->power_on)
(void)ade_power_up(ctx);
- ade_ldi_set_mode(acrtc, mode, adj_mode);
+ ade_ldi_set_mode(ctx, mode, adj_mode);
}
static void ade_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
- struct ade_crtc *acrtc = to_ade_crtc(crtc);
- struct ade_hw_ctx *ctx = acrtc->ctx;
+ struct kirin_crtc *kcrtc = to_kirin_crtc(crtc);
+ struct ade_hw_ctx *ctx = kcrtc->hw_ctx;
struct drm_pending_vblank_event *event = crtc->state->event;
void __iomem *base = ctx->base;
/* only crtc is enabled regs take effect */
- if (acrtc->enable) {
+ if (kcrtc->enable) {
ade_dump_regs(base);
/* flush ade registers */
writel(ADE_ENABLE, base + ADE_EN);
@@ -586,35 +564,6 @@ static const struct drm_crtc_funcs ade_crtc_funcs = {
.disable_vblank = ade_crtc_disable_vblank,
};
-static int ade_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
- struct drm_plane *plane)
-{
- struct device_node *port;
- int ret;
-
- /* set crtc port so that
- * drm_of_find_possible_crtcs call works
- */
- port = of_get_child_by_name(dev->dev->of_node, "port");
- if (!port) {
- DRM_ERROR("no port node found in %pOF\n", dev->dev->of_node);
- return -EINVAL;
- }
- of_node_put(port);
- crtc->port = port;
-
- ret = drm_crtc_init_with_planes(dev, crtc, plane, NULL,
- &ade_crtc_funcs, NULL);
- if (ret) {
- DRM_ERROR("failed to init crtc.\n");
- return ret;
- }
-
- drm_crtc_helper_add(crtc, &ade_crtc_helper_funcs);
-
- return 0;
-}
-
static void ade_rdma_set(void __iomem *base, struct drm_framebuffer *fb,
u32 ch, u32 y, u32 in_h, u32 fmt)
{
@@ -776,16 +725,16 @@ static void ade_compositor_routing_disable(void __iomem *base, u32 ch)
/*
* Typicaly, a channel looks like: DMA-->clip-->scale-->ctrans-->compositor
*/
-static void ade_update_channel(struct ade_plane *aplane,
+static void ade_update_channel(struct kirin_plane *kplane,
struct drm_framebuffer *fb, int crtc_x,
int crtc_y, unsigned int crtc_w,
unsigned int crtc_h, u32 src_x,
u32 src_y, u32 src_w, u32 src_h)
{
- struct ade_hw_ctx *ctx = aplane->ctx;
+ struct ade_hw_ctx *ctx = kplane->hw_ctx;
void __iomem *base = ctx->base;
u32 fmt = ade_get_format(fb->format->format);
- u32 ch = aplane->ch;
+ u32 ch = kplane->ch;
u32 in_w;
u32 in_h;
@@ -809,11 +758,11 @@ static void ade_update_channel(struct ade_plane *aplane,
ade_compositor_routing_set(base, ch, crtc_x, crtc_y, in_w, in_h, fmt);
}
-static void ade_disable_channel(struct ade_plane *aplane)
+static void ade_disable_channel(struct kirin_plane *kplane)
{
- struct ade_hw_ctx *ctx = aplane->ctx;
+ struct ade_hw_ctx *ctx = kplane->hw_ctx;
void __iomem *base = ctx->base;
- u32 ch = aplane->ch;
+ u32 ch = kplane->ch;
DRM_DEBUG_DRIVER("disable channel%d\n", ch + 1);
@@ -875,10 +824,10 @@ static int ade_plane_atomic_check(struct drm_plane *plane,
static void ade_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
- struct drm_plane_state *state = plane->state;
- struct ade_plane *aplane = to_ade_plane(plane);
+ struct drm_plane_state *state = plane->state;
+ struct kirin_plane *kplane = to_kirin_plane(plane);
- ade_update_channel(aplane, state->fb, state->crtc_x, state->crtc_y,
+ ade_update_channel(kplane, state->fb, state->crtc_x, state->crtc_y,
state->crtc_w, state->crtc_h,
state->src_x >> 16, state->src_y >> 16,
state->src_w >> 16, state->src_h >> 16);
@@ -887,9 +836,9 @@ static void ade_plane_atomic_update(struct drm_plane *plane,
static void ade_plane_atomic_disable(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
- struct ade_plane *aplane = to_ade_plane(plane);
+ struct kirin_plane *kplane = to_kirin_plane(plane);
- ade_disable_channel(aplane);
+ ade_disable_channel(kplane);
}
static const struct drm_plane_helper_funcs ade_plane_helper_funcs = {
@@ -907,144 +856,124 @@ static struct drm_plane_funcs ade_plane_funcs = {
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
};
-static int ade_plane_init(struct drm_device *dev, struct ade_plane *aplane,
- enum drm_plane_type type)
-{
- const u32 *fmts;
- u32 fmts_cnt;
- int ret = 0;
-
- /* get properties */
- fmts_cnt = ade_get_channel_formats(aplane->ch, &fmts);
- if (ret)
- return ret;
-
- ret = drm_universal_plane_init(dev, &aplane->base, 1, &ade_plane_funcs,
- fmts, fmts_cnt, NULL, type, NULL);
- if (ret) {
- DRM_ERROR("fail to init plane, ch=%d\n", aplane->ch);
- return ret;
- }
-
- drm_plane_helper_add(&aplane->base, &ade_plane_helper_funcs);
-
- return 0;
-}
-
-static int ade_dts_parse(struct platform_device *pdev, struct ade_hw_ctx *ctx)
+static void *ade_hw_ctx_alloc(struct platform_device *pdev,
+ struct drm_crtc *crtc)
{
struct resource *res;
struct device *dev = &pdev->dev;
struct device_node *np = pdev->dev.of_node;
+ struct ade_hw_ctx *ctx = NULL;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ DRM_ERROR("failed to alloc ade_hw_ctx\n");
+ return ERR_PTR(-ENOMEM);
+ }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ctx->base = devm_ioremap_resource(dev, res);
if (IS_ERR(ctx->base)) {
DRM_ERROR("failed to remap ade io base\n");
- return PTR_ERR(ctx->base);
+ return ERR_PTR(-EIO);
}
ctx->reset = devm_reset_control_get(dev, NULL);
if (IS_ERR(ctx->reset))
- return PTR_ERR(ctx->reset);
+ return ERR_PTR(-ENODEV);
ctx->noc_regmap =
syscon_regmap_lookup_by_phandle(np, "hisilicon,noc-syscon");
if (IS_ERR(ctx->noc_regmap)) {
DRM_ERROR("failed to get noc regmap\n");
- return PTR_ERR(ctx->noc_regmap);
+ return ERR_PTR(-ENODEV);
}
ctx->irq = platform_get_irq(pdev, 0);
if (ctx->irq < 0) {
DRM_ERROR("failed to get irq\n");
- return -ENODEV;
+ return ERR_PTR(-ENODEV);
}
ctx->ade_core_clk = devm_clk_get(dev, "clk_ade_core");
if (IS_ERR(ctx->ade_core_clk)) {
DRM_ERROR("failed to parse clk ADE_CORE\n");
- return PTR_ERR(ctx->ade_core_clk);
+ return ERR_PTR(-ENODEV);
}
ctx->media_noc_clk = devm_clk_get(dev, "clk_codec_jpeg");
if (IS_ERR(ctx->media_noc_clk)) {
DRM_ERROR("failed to parse clk CODEC_JPEG\n");
- return PTR_ERR(ctx->media_noc_clk);
+ return ERR_PTR(-ENODEV);
}
ctx->ade_pix_clk = devm_clk_get(dev, "clk_ade_pix");
if (IS_ERR(ctx->ade_pix_clk)) {
DRM_ERROR("failed to parse clk ADE_PIX\n");
- return PTR_ERR(ctx->ade_pix_clk);
- }
-
- return 0;
-}
-
-static int ade_drm_init(struct platform_device *pdev)
-{
- struct drm_device *dev = platform_get_drvdata(pdev);
- struct ade_data *ade;
- struct ade_hw_ctx *ctx;
- struct ade_crtc *acrtc;
- struct ade_plane *aplane;
- enum drm_plane_type type;
- int ret;
- int i;
-
- ade = devm_kzalloc(dev->dev, sizeof(*ade), GFP_KERNEL);
- if (!ade) {
- DRM_ERROR("failed to alloc ade_data\n");
- return -ENOMEM;
- }
- platform_set_drvdata(pdev, ade);
-
- ctx = &ade->ctx;
- acrtc = &ade->acrtc;
- acrtc->ctx = ctx;
- acrtc->out_format = LDI_OUT_RGB_888;
-
- ret = ade_dts_parse(pdev, ctx);
- if (ret)
- return ret;
-
- /*
- * plane init
- * TODO: Now only support primary plane, overlay planes
- * need to do.
- */
- for (i = 0; i < ADE_CH_NUM; i++) {
- aplane = &ade->aplane[i];
- aplane->ch = i;
- aplane->ctx = ctx;
- type = i == PRIMARY_CH ? DRM_PLANE_TYPE_PRIMARY :
- DRM_PLANE_TYPE_OVERLAY;
-
- ret = ade_plane_init(dev, aplane, type);
- if (ret)
- return ret;
+ return ERR_PTR(-ENODEV);
}
- /* crtc init */
- ret = ade_crtc_init(dev, &acrtc->base, &ade->aplane[PRIMARY_CH].base);
- if (ret)
- return ret;
-
/* vblank irq init */
- ret = devm_request_irq(dev->dev, ctx->irq, ade_irq_handler,
- IRQF_SHARED, dev->driver->name, acrtc);
+ ret = devm_request_irq(dev, ctx->irq, ade_irq_handler,
+ IRQF_SHARED, dev->driver->name, ctx);
if (ret)
- return ret;
+ return ERR_PTR(-EIO);
- return 0;
+ INIT_WORK(&ctx->display_reset_wq, drm_underflow_wq);
+ ctx->crtc = crtc;
+
+ return ctx;
}
-static void ade_drm_cleanup(struct platform_device *pdev)
+static void ade_hw_ctx_cleanup(void *hw_ctx)
{
}
-const struct kirin_dc_ops ade_dc_ops = {
- .init = ade_drm_init,
- .cleanup = ade_drm_cleanup
+static const struct drm_mode_config_funcs ade_mode_config_funcs = {
+ .fb_create = drm_gem_fb_create,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+
+};
+
+DEFINE_DRM_GEM_CMA_FOPS(ade_fops);
+
+static struct drm_driver ade_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
+ .fops = &ade_fops,
+ .gem_free_object_unlocked = drm_gem_cma_free_object,
+ .gem_vm_ops = &drm_gem_cma_vm_ops,
+ .dumb_create = drm_gem_cma_dumb_create_internal,
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
+ .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+ .gem_prime_vmap = drm_gem_cma_prime_vmap,
+ .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
+ .gem_prime_mmap = drm_gem_cma_prime_mmap,
+
+ .name = "kirin",
+ .desc = "Hisilicon Kirin620 SoC DRM Driver",
+ .date = "20150718",
+ .major = 1,
+ .minor = 0,
+};
+
+struct kirin_drm_data ade_driver_data = {
+ .register_connects = false,
+ .num_planes = ADE_CH_NUM,
+ .prim_plane = ADE_CH1,
+ .channel_formats = channel_formats,
+ .channel_formats_cnt = ARRAY_SIZE(channel_formats),
+ .config_max_width = 2048,
+ .config_max_height = 2048,
+ .driver = &ade_driver,
+ .crtc_helper_funcs = &ade_crtc_helper_funcs,
+ .crtc_funcs = &ade_crtc_funcs,
+ .plane_helper_funcs = &ade_plane_helper_funcs,
+ .plane_funcs = &ade_plane_funcs,
+ .mode_config_funcs = &ade_mode_config_funcs,
+
+ .alloc_hw_ctx = ade_hw_ctx_alloc,
+ .cleanup_hw_ctx = ade_hw_ctx_cleanup,
};
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
index 4a7fe10a37cb..d3145ae877d7 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
@@ -13,59 +13,162 @@
#include <linux/of_platform.h>
#include <linux/component.h>
+#include <linux/module.h>
#include <linux/of_graph.h>
+#include <linux/platform_device.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "kirin_drm_drv.h"
-static struct kirin_dc_ops *dc_ops;
+#define KIRIN_MAX_PLANE 2
-static int kirin_drm_kms_cleanup(struct drm_device *dev)
+struct kirin_drm_private {
+ struct kirin_crtc crtc;
+ struct kirin_plane planes[KIRIN_MAX_PLANE];
+ void *hw_ctx;
+};
+
+static int kirin_drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
+ struct drm_plane *plane,
+ const struct kirin_drm_data *driver_data)
{
- drm_kms_helper_poll_fini(dev);
- dc_ops->cleanup(to_platform_device(dev->dev));
- drm_mode_config_cleanup(dev);
+ struct device_node *port;
+ int ret;
+
+ /* set crtc port so that
+ * drm_of_find_possible_crtcs call works
+ */
+ port = of_get_child_by_name(dev->dev->of_node, "port");
+ if (!port) {
+ DRM_ERROR("no port node found in %pOF\n", dev->dev->of_node);
+ return -EINVAL;
+ }
+ of_node_put(port);
+ crtc->port = port;
+
+ ret = drm_crtc_init_with_planes(dev, crtc, plane, NULL,
+ driver_data->crtc_funcs, NULL);
+ if (ret) {
+ DRM_ERROR("failed to init crtc.\n");
+ return ret;
+ }
+
+ drm_crtc_helper_add(crtc, driver_data->crtc_helper_funcs);
return 0;
}
-static const struct drm_mode_config_funcs kirin_drm_mode_config_funcs = {
- .fb_create = drm_gem_fb_create,
- .atomic_check = drm_atomic_helper_check,
- .atomic_commit = drm_atomic_helper_commit,
-};
+static int kirin_drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
+ enum drm_plane_type type,
+ const struct kirin_drm_data *data)
+{
+ int ret = 0;
+
+ ret = drm_universal_plane_init(dev, plane, 1, data->plane_funcs,
+ data->channel_formats,
+ data->channel_formats_cnt,
+ NULL, type, NULL);
+ if (ret) {
+ DRM_ERROR("fail to init plane, ch=%d\n", 0);
+ return ret;
+ }
+
+ drm_plane_helper_add(plane, data->plane_helper_funcs);
-static void kirin_drm_mode_config_init(struct drm_device *dev)
+ return 0;
+}
+
+static void kirin_drm_private_cleanup(struct drm_device *dev)
{
- dev->mode_config.min_width = 0;
- dev->mode_config.min_height = 0;
+ struct kirin_drm_private *kirin_priv = dev->dev_private;
+ struct kirin_drm_data *data;
- dev->mode_config.max_width = 2048;
- dev->mode_config.max_height = 2048;
+ data = (struct kirin_drm_data *)of_device_get_match_data(dev->dev);
+ if (data->cleanup_hw_ctx)
+ data->cleanup_hw_ctx(kirin_priv->hw_ctx);
- dev->mode_config.funcs = &kirin_drm_mode_config_funcs;
+ devm_kfree(dev->dev, kirin_priv);
+ dev->dev_private = NULL;
}
-static int kirin_drm_kms_init(struct drm_device *dev)
+static int kirin_drm_private_init(struct drm_device *dev,
+ const struct kirin_drm_data *driver_data)
{
+ struct platform_device *pdev = to_platform_device(dev->dev);
+ struct kirin_drm_private *kirin_priv;
+ struct drm_plane *prim_plane;
+ enum drm_plane_type type;
+ void *ctx;
int ret;
+ u32 ch;
+
+ kirin_priv = devm_kzalloc(dev->dev, sizeof(*kirin_priv), GFP_KERNEL);
+ if (!kirin_priv) {
+ DRM_ERROR("failed to alloc kirin_drm_private\n");
+ return -ENOMEM;
+ }
+
+ ctx = driver_data->alloc_hw_ctx(pdev, &kirin_priv->crtc.base);
+ if (IS_ERR(ctx)) {
+ DRM_ERROR("failed to initialize kirin_priv hw ctx\n");
+ return -EINVAL;
+ }
+ kirin_priv->hw_ctx = ctx;
+
+ /*
+ * plane init
+ * TODO: Now only support primary plane, overlay planes
+ * need to do.
+ */
+ for (ch = 0; ch < driver_data->num_planes; ch++) {
+ if (ch == driver_data->prim_plane)
+ type = DRM_PLANE_TYPE_PRIMARY;
+ else
+ type = DRM_PLANE_TYPE_OVERLAY;
+ ret = kirin_drm_plane_init(dev, &kirin_priv->planes[ch].base,
+ type, driver_data);
+ if (ret)
+ return ret;
+ kirin_priv->planes[ch].ch = ch;
+ kirin_priv->planes[ch].hw_ctx = ctx;
+ }
- dev_set_drvdata(dev->dev, dev);
+ /* crtc init */
+ prim_plane = &kirin_priv->planes[driver_data->prim_plane].base;
+ ret = kirin_drm_crtc_init(dev, &kirin_priv->crtc.base,
+ prim_plane, driver_data);
+ if (ret)
+ return ret;
+ kirin_priv->crtc.hw_ctx = ctx;
+ dev->dev_private = kirin_priv;
+
+ return 0;
+}
+
+static int kirin_drm_kms_init(struct drm_device *dev,
+ const struct kirin_drm_data *driver_data)
+{
+ int ret;
/* dev->mode_config initialization */
drm_mode_config_init(dev);
- kirin_drm_mode_config_init(dev);
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+ dev->mode_config.max_width = driver_data->config_max_width;
+ dev->mode_config.max_height = driver_data->config_max_width;
+ dev->mode_config.funcs = driver_data->mode_config_funcs;
/* display controller init */
- ret = dc_ops->init(to_platform_device(dev->dev));
+ ret = kirin_drm_private_init(dev, driver_data);
if (ret)
goto err_mode_config_cleanup;
@@ -73,7 +176,7 @@ static int kirin_drm_kms_init(struct drm_device *dev)
ret = component_bind_all(dev->dev, dev);
if (ret) {
DRM_ERROR("failed to bind all component.\n");
- goto err_dc_cleanup;
+ goto err_private_cleanup;
}
/* vblank init */
@@ -95,65 +198,78 @@ static int kirin_drm_kms_init(struct drm_device *dev)
err_unbind_all:
component_unbind_all(dev->dev, dev);
-err_dc_cleanup:
- dc_ops->cleanup(to_platform_device(dev->dev));
+err_private_cleanup:
+ kirin_drm_private_cleanup(dev);
err_mode_config_cleanup:
drm_mode_config_cleanup(dev);
-
return ret;
}
-DEFINE_DRM_GEM_CMA_FOPS(kirin_drm_fops);
-
-static int kirin_gem_cma_dumb_create(struct drm_file *file,
- struct drm_device *dev,
- struct drm_mode_create_dumb *args)
+static int compare_of(struct device *dev, void *data)
{
- return drm_gem_cma_dumb_create_internal(file, dev, args);
+ return dev->of_node == data;
}
-static struct drm_driver kirin_drm_driver = {
- .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
- DRIVER_ATOMIC,
- .fops = &kirin_drm_fops,
-
- .gem_free_object_unlocked = drm_gem_cma_free_object,
- .gem_vm_ops = &drm_gem_cma_vm_ops,
- .dumb_create = kirin_gem_cma_dumb_create,
-
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = drm_gem_prime_export,
- .gem_prime_import = drm_gem_prime_import,
- .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
- .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
- .gem_prime_vmap = drm_gem_cma_prime_vmap,
- .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
- .gem_prime_mmap = drm_gem_cma_prime_mmap,
-
- .name = "kirin",
- .desc = "Hisilicon Kirin SoCs' DRM Driver",
- .date = "20150718",
- .major = 1,
- .minor = 0,
-};
+static int kirin_drm_kms_cleanup(struct drm_device *dev)
+{
+ drm_kms_helper_poll_fini(dev);
+ kirin_drm_private_cleanup(dev);
+ drm_mode_config_cleanup(dev);
-static int compare_of(struct device *dev, void *data)
+ return 0;
+}
+
+static int kirin_drm_connectors_register(struct drm_device *dev)
{
- return dev->of_node == data;
+ struct drm_connector *connector;
+ struct drm_connector *failed_connector;
+ struct drm_connector_list_iter conn_iter;
+ int ret;
+
+ mutex_lock(&dev->mode_config.mutex);
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ ret = drm_connector_register(connector);
+ if (ret) {
+ failed_connector = connector;
+ goto err;
+ }
+ }
+ drm_connector_list_iter_end(&conn_iter);
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return 0;
+
+err:
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ if (failed_connector == connector)
+ break;
+ drm_connector_unregister(connector);
+ }
+ drm_connector_list_iter_end(&conn_iter);
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return ret;
}
static int kirin_drm_bind(struct device *dev)
{
- struct drm_driver *driver = &kirin_drm_driver;
+ struct kirin_drm_data *driver_data;
struct drm_device *drm_dev;
int ret;
- drm_dev = drm_dev_alloc(driver, dev);
+ driver_data = (struct kirin_drm_data *)of_device_get_match_data(dev);
+ if (!driver_data)
+ return -EINVAL;
+
+ drm_dev = drm_dev_alloc(driver_data->driver, dev);
if (IS_ERR(drm_dev))
return PTR_ERR(drm_dev);
+ dev_set_drvdata(dev, drm_dev);
- ret = kirin_drm_kms_init(drm_dev);
+ /* display controller init */
+ ret = kirin_drm_kms_init(drm_dev, driver_data);
if (ret)
goto err_drm_dev_put;
@@ -163,8 +279,17 @@ static int kirin_drm_bind(struct device *dev)
drm_fbdev_generic_setup(drm_dev, 32);
+ /* connectors should be registered after drm device register */
+ if (driver_data->register_connects) {
+ ret = kirin_drm_connectors_register(drm_dev);
+ if (ret)
+ goto err_drm_dev_unregister;
+ }
+
return 0;
+err_drm_dev_unregister:
+ drm_dev_unregister(drm_dev);
err_kms_cleanup:
kirin_drm_kms_cleanup(drm_dev);
err_drm_dev_put:
@@ -194,12 +319,6 @@ static int kirin_drm_platform_probe(struct platform_device *pdev)
struct component_match *match = NULL;
struct device_node *remote;
- dc_ops = (struct kirin_dc_ops *)of_device_get_match_data(dev);
- if (!dc_ops) {
- DRM_ERROR("failed to get dt id data\n");
- return -EINVAL;
- }
-
remote = of_graph_get_remote_node(np, 0, 0);
if (!remote)
return -ENODEV;
@@ -208,20 +327,17 @@ static int kirin_drm_platform_probe(struct platform_device *pdev)
of_node_put(remote);
return component_master_add_with_match(dev, &kirin_drm_ops, match);
-
- return 0;
}
static int kirin_drm_platform_remove(struct platform_device *pdev)
{
component_master_del(&pdev->dev, &kirin_drm_ops);
- dc_ops = NULL;
return 0;
}
static const struct of_device_id kirin_drm_dt_ids[] = {
{ .compatible = "hisilicon,hi6220-ade",
- .data = &ade_dc_ops,
+ .data = &ade_driver_data,
},
{ /* end node */ },
};
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h
index 22d1291668cd..4d5c05a24065 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h
@@ -7,14 +7,52 @@
#ifndef __KIRIN_DRM_DRV_H__
#define __KIRIN_DRM_DRV_H__
-#define MAX_CRTC 2
+#define to_kirin_crtc(crtc) \
+ container_of(crtc, struct kirin_crtc, base)
+
+#define to_kirin_plane(plane) \
+ container_of(plane, struct kirin_plane, base)
+
+/* kirin-format translate table */
+struct kirin_format {
+ u32 pixel_format;
+ u32 hw_format;
+};
+
+struct kirin_crtc {
+ struct drm_crtc base;
+ void *hw_ctx;
+ bool enable;
+};
+
+struct kirin_plane {
+ struct drm_plane base;
+ void *hw_ctx;
+ u32 ch;
+};
/* display controller init/cleanup ops */
-struct kirin_dc_ops {
- int (*init)(struct platform_device *pdev);
- void (*cleanup)(struct platform_device *pdev);
+struct kirin_drm_data {
+ const u32 *channel_formats;
+ u32 channel_formats_cnt;
+ int config_max_width;
+ int config_max_height;
+ bool register_connects;
+ u32 num_planes;
+ u32 prim_plane;
+
+ struct drm_driver *driver;
+ const struct drm_crtc_helper_funcs *crtc_helper_funcs;
+ const struct drm_crtc_funcs *crtc_funcs;
+ const struct drm_plane_helper_funcs *plane_helper_funcs;
+ const struct drm_plane_funcs *plane_funcs;
+ const struct drm_mode_config_funcs *mode_config_funcs;
+
+ void *(*alloc_hw_ctx)(struct platform_device *pdev,
+ struct drm_crtc *crtc);
+ void (*cleanup_hw_ctx)(void *hw_ctx);
};
-extern const struct kirin_dc_ops ade_dc_ops;
+extern struct kirin_drm_data ade_driver_data;
#endif /* __KIRIN_DRM_DRV_H__ */
diff --git a/drivers/gpu/drm/i2c/ch7006_priv.h b/drivers/gpu/drm/i2c/ch7006_priv.h
index b6e091935977..986b04599906 100644
--- a/drivers/gpu/drm/i2c/ch7006_priv.h
+++ b/drivers/gpu/drm/i2c/ch7006_priv.h
@@ -27,7 +27,6 @@
#ifndef __DRM_I2C_CH7006_PRIV_H__
#define __DRM_I2C_CH7006_PRIV_H__
-#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_encoder_slave.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/i2c/sil164_drv.c b/drivers/gpu/drm/i2c/sil164_drv.c
index 878ba8d06ce2..a839f78a4c8a 100644
--- a/drivers/gpu/drm/i2c/sil164_drv.c
+++ b/drivers/gpu/drm/i2c/sil164_drv.c
@@ -26,8 +26,9 @@
#include <linux/module.h>
-#include <drm/drmP.h>
+#include <drm/drm_drv.h>
#include <drm/drm_encoder_slave.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/i2c/sil164.h>
@@ -43,7 +44,7 @@ struct sil164_priv {
((struct sil164_priv *)to_encoder_slave(x)->slave_priv)
#define sil164_dbg(client, format, ...) do { \
- if (drm_debug & DRM_UT_KMS) \
+ if (drm_debug_enabled(DRM_UT_KMS)) \
dev_printk(KERN_DEBUG, &client->dev, \
"%s: " format, __func__, ## __VA_ARGS__); \
} while (0)
diff --git a/drivers/gpu/drm/i2c/tda9950.c b/drivers/gpu/drm/i2c/tda9950.c
index 8039fc0d83db..5b03fdd1eaa4 100644
--- a/drivers/gpu/drm/i2c/tda9950.c
+++ b/drivers/gpu/drm/i2c/tda9950.c
@@ -420,7 +420,8 @@ static int tda9950_probe(struct i2c_client *client,
priv->hdmi = glue->parent;
priv->adap = cec_allocate_adapter(&tda9950_cec_ops, priv, "tda9950",
- CEC_CAP_DEFAULTS,
+ CEC_CAP_DEFAULTS |
+ CEC_CAP_CONNECTOR_INFO,
CEC_MAX_LOG_ADDRS);
if (IS_ERR(priv->adap))
return PTR_ERR(priv->adap);
@@ -457,13 +458,14 @@ static int tda9950_probe(struct i2c_client *client,
if (ret < 0)
return ret;
- priv->notify = cec_notifier_get(priv->hdmi);
+ priv->notify = cec_notifier_cec_adap_register(priv->hdmi, NULL,
+ priv->adap);
if (!priv->notify)
return -ENOMEM;
ret = cec_register_adapter(priv->adap, priv->hdmi);
if (ret < 0) {
- cec_notifier_put(priv->notify);
+ cec_notifier_cec_adap_unregister(priv->notify, priv->adap);
return ret;
}
@@ -473,8 +475,6 @@ static int tda9950_probe(struct i2c_client *client,
*/
devm_remove_action(dev, tda9950_cec_del, priv);
- cec_register_cec_notifier(priv->adap, priv->notify);
-
return 0;
}
@@ -482,8 +482,8 @@ static int tda9950_remove(struct i2c_client *client)
{
struct tda9950_priv *priv = i2c_get_clientdata(client);
+ cec_notifier_cec_adap_unregister(priv->notify, priv->adap);
cec_unregister_adapter(priv->adap);
- cec_notifier_put(priv->notify);
return 0;
}
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 61e042918a7f..a63790d32d75 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -13,10 +13,11 @@
#include <sound/asoundef.h>
#include <sound/hdmi-codec.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/i2c/tda998x.h>
@@ -805,8 +806,8 @@ static irqreturn_t tda998x_irq_thread(int irq, void *data)
tda998x_edid_delay_start(priv);
} else {
schedule_work(&priv->detect_work);
- cec_notifier_set_phys_addr(priv->cec_notify,
- CEC_PHYS_ADDR_INVALID);
+ cec_notifier_phys_addr_invalidate(
+ priv->cec_notify);
}
handled = true;
@@ -1790,8 +1791,7 @@ static void tda998x_destroy(struct device *dev)
i2c_unregister_device(priv->cec);
- if (priv->cec_notify)
- cec_notifier_put(priv->cec_notify);
+ cec_notifier_conn_unregister(priv->cec_notify);
}
static int tda998x_create(struct device *dev)
@@ -1916,7 +1916,7 @@ static int tda998x_create(struct device *dev)
cec_write(priv, REG_CEC_RXSHPDINTENA, CEC_RXSHPDLEV_HPD);
}
- priv->cec_notify = cec_notifier_get(dev);
+ priv->cec_notify = cec_notifier_conn_register(dev, NULL, NULL);
if (!priv->cec_notify) {
ret = -ENOMEM;
goto fail;
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index 3b378936f575..b88c3d5f92b4 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -30,13 +30,20 @@
*
*/
-#include <drm/drmP.h>
+#include <linux/delay.h>
+#include <linux/mman.h>
+#include <linux/pci.h>
+
+#include <drm/drm_agpsupport.h>
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_irq.h>
+#include <drm/drm_print.h>
#include <drm/i810_drm.h>
+
#include "i810_drv.h"
-#include <linux/interrupt.h> /* For task queue support */
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/pagemap.h>
#define I810_BUF_FREE 2
#define I810_BUF_CLIENT 1
@@ -721,7 +728,7 @@ static void i810_dma_dispatch_vertex(struct drm_device *dev,
if (nbox > I810_NR_SAREA_CLIPRECTS)
nbox = I810_NR_SAREA_CLIPRECTS;
- if (used > 4 * 1024)
+ if (used < 0 || used > 4 * 1024)
used = 0;
if (sarea_priv->dirty)
@@ -1041,7 +1048,7 @@ static void i810_dma_dispatch_mc(struct drm_device *dev, struct drm_buf *buf, in
if (u != I810_BUF_CLIENT)
DRM_DEBUG("MC found buffer that isn't mine!\n");
- if (used > 4 * 1024)
+ if (used < 0 || used > 4 * 1024)
used = 0;
sarea_priv->dirty = 0x7f;
diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c
index c69d5c487f51..0e53a066d4db 100644
--- a/drivers/gpu/drm/i810/i810_drv.c
+++ b/drivers/gpu/drm/i810/i810_drv.c
@@ -30,13 +30,16 @@
* Gareth Hughes <gareth@valinux.com>
*/
+#include "i810_drv.h"
+
#include <linux/module.h>
+#include <linux/pci.h>
-#include <drm/drmP.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_pciids.h>
#include <drm/i810_drm.h>
-#include "i810_drv.h"
-#include <drm/drm_pciids.h>
static struct pci_device_id pciidlist[] = {
i810_PCI_IDS
diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
index c73d2f2da57b..9df3981ffc66 100644
--- a/drivers/gpu/drm/i810/i810_drv.h
+++ b/drivers/gpu/drm/i810/i810_drv.h
@@ -32,7 +32,9 @@
#ifndef _I810_DRV_H_
#define _I810_DRV_H_
+#include <drm/drm_ioctl.h>
#include <drm/drm_legacy.h>
+#include <drm/i810_drm.h>
/* General customization:
*/
diff --git a/drivers/gpu/drm/i915/.gitignore b/drivers/gpu/drm/i915/.gitignore
new file mode 100644
index 000000000000..d9a77f3b59b2
--- /dev/null
+++ b/drivers/gpu/drm/i915/.gitignore
@@ -0,0 +1 @@
+*.hdrtest
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 0d21402945ab..ba9595960bbe 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -76,7 +76,7 @@ config DRM_I915_CAPTURE_ERROR
This option enables capturing the GPU state when a hang is detected.
This information is vital for triaging hangs and assists in debugging.
Please report any hang to
- https://bugs.freedesktop.org/enter_bug.cgi?product=DRI
+ https://bugs.freedesktop.org/enter_bug.cgi?product=DRI
for triaging.
If in doubt, say "Y".
@@ -105,11 +105,11 @@ config DRM_I915_USERPTR
If in doubt, say "Y".
config DRM_I915_GVT
- bool "Enable Intel GVT-g graphics virtualization host support"
- depends on DRM_I915
- depends on 64BIT
- default n
- help
+ bool "Enable Intel GVT-g graphics virtualization host support"
+ depends on DRM_I915
+ depends on 64BIT
+ default n
+ help
Choose this option if you want to enable Intel GVT-g graphics
virtualization technology host support with integrated graphics.
With GVT-g, it's possible to have one integrated graphics
@@ -148,3 +148,9 @@ menu "drm/i915 Profile Guided Optimisation"
depends on DRM_I915
source "drivers/gpu/drm/i915/Kconfig.profile"
endmenu
+
+menu "drm/i915 Unstable Evolution"
+ visible if EXPERT && STAGING && BROKEN
+ depends on DRM_I915
+ source "drivers/gpu/drm/i915/Kconfig.unstable"
+endmenu
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index 8d922bb4d953..1cb28c20807c 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -1,47 +1,48 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_I915_WERROR
- bool "Force GCC to throw an error instead of a warning when compiling"
- # As this may inadvertently break the build, only allow the user
- # to shoot oneself in the foot iff they aim really hard
- depends on EXPERT
- # We use the dependency on !COMPILE_TEST to not be enabled in
- # allmodconfig or allyesconfig configurations
- depends on !COMPILE_TEST
- default n
- help
- Add -Werror to the build flags for (and only for) i915.ko.
- Do not enable this unless you are writing code for the i915.ko module.
-
- Recommended for driver developers only.
-
- If in doubt, say "N".
+ bool "Force GCC to throw an error instead of a warning when compiling"
+ # As this may inadvertently break the build, only allow the user
+ # to shoot oneself in the foot iff they aim really hard
+ depends on EXPERT
+ # We use the dependency on !COMPILE_TEST to not be enabled in
+ # allmodconfig or allyesconfig configurations
+ depends on !COMPILE_TEST
+ default n
+ help
+ Add -Werror to the build flags for (and only for) i915.ko.
+ Do not enable this unless you are writing code for the i915.ko module.
+
+ Recommended for driver developers only.
+
+ If in doubt, say "N".
config DRM_I915_DEBUG
- bool "Enable additional driver debugging"
- depends on DRM_I915
- select DEBUG_FS
- select PREEMPT_COUNT
- select REFCOUNT_FULL
- select I2C_CHARDEV
- select STACKDEPOT
- select DRM_DP_AUX_CHARDEV
- select X86_MSR # used by igt/pm_rpm
- select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks)
- select DRM_DEBUG_MM if DRM=y
+ bool "Enable additional driver debugging"
+ depends on DRM_I915
+ select DEBUG_FS
+ select PREEMPT_COUNT
+ select I2C_CHARDEV
+ select STACKDEPOT
+ select DRM_DP_AUX_CHARDEV
+ select X86_MSR # used by igt/pm_rpm
+ select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks)
+ select DRM_DEBUG_MM if DRM=y
+ select DRM_EXPORT_FOR_TESTS if m
select DRM_DEBUG_SELFTEST
+ select DMABUF_SELFTESTS
select SW_SYNC # signaling validation framework (igt/syncobj*)
select DRM_I915_SW_FENCE_DEBUG_OBJECTS
select DRM_I915_SELFTEST
select DRM_I915_DEBUG_RUNTIME_PM
select DRM_I915_DEBUG_MMIO
- default n
- help
- Choose this option to turn on extra driver debugging that may affect
- performance but will catch some internal issues.
+ default n
+ help
+ Choose this option to turn on extra driver debugging that may affect
+ performance but will catch some internal issues.
- Recommended for driver developers only.
+ Recommended for driver developers only.
- If in doubt, say "N".
+ If in doubt, say "N".
config DRM_I915_DEBUG_MMIO
bool "Always insert extra checks around mmio access by default"
@@ -57,16 +58,16 @@ config DRM_I915_DEBUG_MMIO
If in doubt, say "N".
config DRM_I915_DEBUG_GEM
- bool "Insert extra checks into the GEM internals"
- default n
- depends on DRM_I915_WERROR
- help
- Enable extra sanity checks (including BUGs) along the GEM driver
- paths that may slow the system down and if hit hang the machine.
+ bool "Insert extra checks into the GEM internals"
+ default n
+ depends on DRM_I915_WERROR
+ help
+ Enable extra sanity checks (including BUGs) along the GEM driver
+ paths that may slow the system down and if hit hang the machine.
- Recommended for driver developers only.
+ Recommended for driver developers only.
- If in doubt, say "N".
+ If in doubt, say "N".
config DRM_I915_ERRLOG_GEM
bool "Insert extra logging (very verbose) for common GEM errors"
@@ -94,47 +95,62 @@ config DRM_I915_TRACE_GEM
If in doubt, say "N".
+config DRM_I915_TRACE_GTT
+ bool "Insert extra ftrace output from the GTT internals"
+ depends on DRM_I915_DEBUG_GEM
+ select TRACING
+ default n
+ help
+ Enable additional and verbose debugging output that will spam
+ ordinary tests, but may be vital for post-mortem debugging when
+ used with /proc/sys/kernel/ftrace_dump_on_oops
+
+ Recommended for driver developers only.
+
+ If in doubt, say "N".
+
config DRM_I915_SW_FENCE_DEBUG_OBJECTS
- bool "Enable additional driver debugging for fence objects"
- depends on DRM_I915
- select DEBUG_OBJECTS
- default n
- help
- Choose this option to turn on extra driver debugging that may affect
- performance but will catch some internal issues.
+ bool "Enable additional driver debugging for fence objects"
+ depends on DRM_I915
+ select DEBUG_OBJECTS
+ default n
+ help
+ Choose this option to turn on extra driver debugging that may affect
+ performance but will catch some internal issues.
- Recommended for driver developers only.
+ Recommended for driver developers only.
- If in doubt, say "N".
+ If in doubt, say "N".
config DRM_I915_SW_FENCE_CHECK_DAG
- bool "Enable additional driver debugging for detecting dependency cycles"
- depends on DRM_I915
- default n
- help
- Choose this option to turn on extra driver debugging that may affect
- performance but will catch some internal issues.
+ bool "Enable additional driver debugging for detecting dependency cycles"
+ depends on DRM_I915
+ default n
+ help
+ Choose this option to turn on extra driver debugging that may affect
+ performance but will catch some internal issues.
- Recommended for driver developers only.
+ Recommended for driver developers only.
- If in doubt, say "N".
+ If in doubt, say "N".
config DRM_I915_DEBUG_GUC
- bool "Enable additional driver debugging for GuC"
- depends on DRM_I915
- default n
- help
- Choose this option to turn on extra driver debugging that may affect
- performance but will help resolve GuC related issues.
+ bool "Enable additional driver debugging for GuC"
+ depends on DRM_I915
+ default n
+ help
+ Choose this option to turn on extra driver debugging that may affect
+ performance but will help resolve GuC related issues.
- Recommended for driver developers only.
+ Recommended for driver developers only.
- If in doubt, say "N".
+ If in doubt, say "N".
config DRM_I915_SELFTEST
bool "Enable selftests upon driver load"
depends on DRM_I915
default n
+ select DRM_EXPORT_FOR_TESTS if m
select FAULT_INJECTION
select PRIME_NUMBERS
help
@@ -162,15 +178,15 @@ config DRM_I915_SELFTEST_BROKEN
If in doubt, say "N".
config DRM_I915_LOW_LEVEL_TRACEPOINTS
- bool "Enable low level request tracing events"
- depends on DRM_I915
- default n
- help
- Choose this option to turn on low level request tracing events.
- This provides the ability to precisely monitor engine utilisation
- and also analyze the request dependency resolving timeline.
-
- If in doubt, say "N".
+ bool "Enable low level request tracing events"
+ depends on DRM_I915
+ default n
+ help
+ Choose this option to turn on low level request tracing events.
+ This provides the ability to precisely monitor engine utilisation
+ and also analyze the request dependency resolving timeline.
+
+ If in doubt, say "N".
config DRM_I915_DEBUG_VBLANK_EVADE
bool "Enable extra debug warnings for vblank evasion"
diff --git a/drivers/gpu/drm/i915/Kconfig.profile b/drivers/gpu/drm/i915/Kconfig.profile
index 48df8889a88a..c280b6ae38eb 100644
--- a/drivers/gpu/drm/i915/Kconfig.profile
+++ b/drivers/gpu/drm/i915/Kconfig.profile
@@ -12,6 +12,29 @@ config DRM_I915_USERFAULT_AUTOSUSPEND
May be 0 to disable the extra delay and solely use the device level
runtime pm autosuspend delay tunable.
+config DRM_I915_HEARTBEAT_INTERVAL
+ int "Interval between heartbeat pulses (ms)"
+ default 2500 # milliseconds
+ help
+ The driver sends a periodic heartbeat down all active engines to
+ check the health of the GPU and undertake regular house-keeping of
+ internal driver state.
+
+ May be 0 to disable heartbeats and therefore disable automatic GPU
+ hang detection.
+
+config DRM_I915_PREEMPT_TIMEOUT
+ int "Preempt timeout (ms, jiffy granularity)"
+ default 640 # milliseconds
+ help
+ How long to wait (in milliseconds) for a preemption event to occur
+ when submitting a new context via execlists. If the current context
+ does not hit an arbitration point and yield to HW before the timer
+ expires, the HW will be reset to allow the more important context
+ to execute.
+
+ May be 0 to disable the timeout.
+
config DRM_I915_SPIN_REQUEST
int "Busywait for request completion (us)"
default 5 # microseconds
@@ -25,3 +48,29 @@ config DRM_I915_SPIN_REQUEST
May be 0 to disable the initial spin. In practice, we estimate
the cost of enabling the interrupt (if currently disabled) to be
a few microseconds.
+
+config DRM_I915_STOP_TIMEOUT
+ int "How long to wait for an engine to quiesce gracefully before reset (ms)"
+ default 100 # milliseconds
+ help
+ By stopping submission and sleeping for a short time before resetting
+ the GPU, we allow the innocent contexts also on the system to quiesce.
+ It is then less likely for a hanging context to cause collateral
+ damage as the system is reset in order to recover. The corollary is
+ that the reset itself may take longer and so be more disruptive to
+ interactive or low latency workloads.
+
+config DRM_I915_TIMESLICE_DURATION
+ int "Scheduling quantum for userspace batches (ms, jiffy granularity)"
+ default 1 # milliseconds
+ help
+ When two user batches of equal priority are executing, we will
+ alternate execution of each batch to ensure forward progress of
+ all users. This is necessary in some cases where there may be
+ an implicit dependency between those batches that requires
+ concurrent execution in order for them to proceed, e.g. they
+ interact with each other via userspace semaphores. Each context
+ is scheduled for execution for the timeslice duration, before
+ switching to the next context.
+
+ May be 0 to disable timeslicing.
diff --git a/drivers/gpu/drm/i915/Kconfig.unstable b/drivers/gpu/drm/i915/Kconfig.unstable
new file mode 100644
index 000000000000..0c2276155c2b
--- /dev/null
+++ b/drivers/gpu/drm/i915/Kconfig.unstable
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config DRM_I915_UNSTABLE
+ bool "Enable unstable API for early prototype development"
+ depends on EXPERT
+ depends on STAGING
+ depends on BROKEN # should never be enabled by distros!
+ # We use the dependency on !COMPILE_TEST to not be enabled in
+ # allmodconfig or allyesconfig configurations
+ depends on !COMPILE_TEST
+ default n
+ help
+ Enable prototype uAPI under general discussion before they are
+ finalized. Such prototypes may be withdrawn or substantially
+ changed before release. They are only enabled here so that a wide
+ number of interested parties (userspace driver developers) can
+ verify that the uAPI meet their expectations. These uAPI should
+ never be used in production.
+
+ Recommended for driver developers _only_.
+
+ If in the slightest bit of doubt, say "N".
+
+config DRM_I915_UNSTABLE_FAKE_LMEM
+ bool "Enable the experimental fake lmem"
+ depends on DRM_I915_UNSTABLE
+ default n
+ help
+ Convert some system memory into a fake local memory region for
+ testing.
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 8cace65f50ce..b8c5f8934dbd 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -26,28 +26,30 @@ subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror
# Fine grained warnings disable
CFLAGS_i915_pci.o = $(call cc-disable-warning, override-init)
-CFLAGS_intel_fbdev.o = $(call cc-disable-warning, override-init)
+CFLAGS_display/intel_fbdev.o = $(call cc-disable-warning, override-init)
subdir-ccflags-y += \
$(call as-instr,movntdqa (%eax)$(comma)%xmm0,-DCONFIG_AS_MOVNTDQA)
-# Extra header tests
-include $(src)/Makefile.header-test
-
-subdir-ccflags-y += -I$(src)
+subdir-ccflags-y += -I$(srctree)/$(src)
# Please keep these build lists sorted!
# core driver code
i915-y += i915_drv.o \
i915_irq.o \
+ i915_getparam.o \
i915_params.o \
i915_pci.o \
i915_scatterlist.o \
i915_suspend.o \
+ i915_switcheroo.o \
i915_sysfs.o \
+ i915_utils.o \
intel_csr.o \
intel_device_info.o \
+ intel_memory_region.o \
+ intel_pch.o \
intel_pm.o \
intel_runtime_pm.o \
intel_sideband.o \
@@ -59,6 +61,7 @@ i915-y += \
i915_memcpy.o \
i915_mm.o \
i915_sw_fence.o \
+ i915_sw_fence_work.o \
i915_syncmap.o \
i915_user_extensions.o
@@ -67,26 +70,48 @@ i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o display/intel_pipe_crc.o
i915-$(CONFIG_PERF_EVENTS) += i915_pmu.o
# "Graphics Technology" (aka we talk to the gpu)
-obj-y += gt/
gt-y += \
+ gt/debugfs_engines.o \
+ gt/debugfs_gt.o \
+ gt/debugfs_gt_pm.o \
+ gt/gen6_ppgtt.o \
+ gt/gen8_ppgtt.o \
gt/intel_breadcrumbs.o \
gt/intel_context.o \
gt/intel_engine_cs.o \
+ gt/intel_engine_heartbeat.o \
gt/intel_engine_pm.o \
+ gt/intel_engine_pool.o \
+ gt/intel_engine_user.o \
+ gt/intel_ggtt.o \
+ gt/intel_gt.o \
+ gt/intel_gt_irq.o \
gt/intel_gt_pm.o \
- gt/intel_hangcheck.o \
+ gt/intel_gt_pm_irq.o \
+ gt/intel_gt_requests.o \
+ gt/intel_gtt.o \
+ gt/intel_llc.o \
gt/intel_lrc.o \
- gt/intel_reset.o \
- gt/intel_ringbuffer.o \
gt/intel_mocs.o \
+ gt/intel_ppgtt.o \
+ gt/intel_rc6.o \
+ gt/intel_renderstate.o \
+ gt/intel_reset.o \
+ gt/intel_ring.o \
+ gt/intel_ring_submission.o \
+ gt/intel_rps.o \
gt/intel_sseu.o \
+ gt/intel_timeline.o \
gt/intel_workarounds.o
-gt-$(CONFIG_DRM_I915_SELFTEST) += \
- gt/mock_engine.o
+# autogenerated null render state
+gt-y += \
+ gt/gen6_renderstate.o \
+ gt/gen7_renderstate.o \
+ gt/gen8_renderstate.o \
+ gt/gen9_renderstate.o
i915-y += $(gt-y)
# GEM (Graphics Execution Management) code
-obj-y += gem/
gem-y += \
gem/i915_gem_busy.o \
gem/i915_gem_clflush.o \
@@ -99,10 +124,12 @@ gem-y += \
gem/i915_gem_internal.o \
gem/i915_gem_object.o \
gem/i915_gem_object_blt.o \
+ gem/i915_gem_lmem.o \
gem/i915_gem_mman.o \
gem/i915_gem_pages.o \
gem/i915_gem_phys.o \
gem/i915_gem_pm.o \
+ gem/i915_gem_region.o \
gem/i915_gem_shmem.o \
gem/i915_gem_shrinker.o \
gem/i915_gem_stolen.o \
@@ -114,42 +141,34 @@ gem-y += \
i915-y += \
$(gem-y) \
i915_active.o \
+ i915_buddy.o \
i915_cmd_parser.o \
- i915_gem_batch_pool.o \
i915_gem_evict.o \
i915_gem_fence_reg.o \
i915_gem_gtt.o \
i915_gem.o \
- i915_gem_render_state.o \
i915_globals.o \
i915_query.o \
i915_request.o \
i915_scheduler.o \
- i915_timeline.o \
i915_trace_points.o \
i915_vma.o \
+ intel_region_lmem.o \
intel_wopcm.o
# general-purpose microcontroller (GuC) support
-i915-y += intel_uc.o \
- intel_uc_fw.o \
- intel_guc.o \
- intel_guc_ads.o \
- intel_guc_ct.o \
- intel_guc_fw.o \
- intel_guc_log.o \
- intel_guc_submission.o \
- intel_huc.o \
- intel_huc_fw.o
-
-# autogenerated null render state
-i915-y += intel_renderstate_gen6.o \
- intel_renderstate_gen7.o \
- intel_renderstate_gen8.o \
- intel_renderstate_gen9.o
+i915-y += gt/uc/intel_uc.o \
+ gt/uc/intel_uc_fw.o \
+ gt/uc/intel_guc.o \
+ gt/uc/intel_guc_ads.o \
+ gt/uc/intel_guc_ct.o \
+ gt/uc/intel_guc_fw.o \
+ gt/uc/intel_guc_log.o \
+ gt/uc/intel_guc_submission.o \
+ gt/uc/intel_huc.o \
+ gt/uc/intel_huc_fw.o
# modesetting core code
-obj-y += display/
i915-y += \
display/intel_atomic.o \
display/intel_atomic_plane.o \
@@ -164,6 +183,7 @@ i915-y += \
display/intel_display_power.o \
display/intel_dpio_phy.o \
display/intel_dpll_mgr.o \
+ display/intel_dsb.o \
display/intel_fbc.o \
display/intel_fifo_underrun.o \
display/intel_frontbuffer.o \
@@ -173,7 +193,9 @@ i915-y += \
display/intel_overlay.o \
display/intel_psr.o \
display/intel_quirks.o \
- display/intel_sprite.o
+ display/intel_sprite.o \
+ display/intel_tc.o \
+ display/intel_vga.o
i915-$(CONFIG_ACPI) += \
display/intel_acpi.o \
display/intel_opregion.o
@@ -210,37 +232,41 @@ i915-y += \
display/vlv_dsi.o \
display/vlv_dsi_pll.o
+# perf code
+i915-y += \
+ oa/i915_oa_hsw.o \
+ oa/i915_oa_bdw.o \
+ oa/i915_oa_chv.o \
+ oa/i915_oa_sklgt2.o \
+ oa/i915_oa_sklgt3.o \
+ oa/i915_oa_sklgt4.o \
+ oa/i915_oa_bxt.o \
+ oa/i915_oa_kblgt2.o \
+ oa/i915_oa_kblgt3.o \
+ oa/i915_oa_glk.o \
+ oa/i915_oa_cflgt2.o \
+ oa/i915_oa_cflgt3.o \
+ oa/i915_oa_cnl.o \
+ oa/i915_oa_icl.o \
+ oa/i915_oa_tgl.o
+i915-y += i915_perf.o
+
# Post-mortem debug and GPU hang state capture
i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += i915_gpu_error.o
i915-$(CONFIG_DRM_I915_SELFTEST) += \
gem/selftests/igt_gem_utils.o \
selftests/i915_random.o \
selftests/i915_selftest.o \
+ selftests/igt_atomic.o \
selftests/igt_flush_test.o \
selftests/igt_live_test.o \
+ selftests/igt_mmap.o \
selftests/igt_reset.o \
selftests/igt_spinner.o
# virtual gpu code
i915-y += i915_vgpu.o
-# perf code
-i915-y += i915_perf.o \
- i915_oa_hsw.o \
- i915_oa_bdw.o \
- i915_oa_chv.o \
- i915_oa_sklgt2.o \
- i915_oa_sklgt3.o \
- i915_oa_sklgt4.o \
- i915_oa_bxt.o \
- i915_oa_kblgt2.o \
- i915_oa_kblgt3.o \
- i915_oa_glk.o \
- i915_oa_cflgt2.o \
- i915_oa_cflgt3.o \
- i915_oa_cnl.o \
- i915_oa_icl.o
-
ifeq ($(CONFIG_DRM_I915_GVT),y)
i915-y += intel_gvt.o
include $(src)/gvt/Makefile
@@ -248,3 +274,27 @@ endif
obj-$(CONFIG_DRM_I915) += i915.o
obj-$(CONFIG_DRM_I915_GVT_KVMGT) += gvt/kvmgt.o
+
+# header test
+
+# exclude some broken headers from the test coverage
+no-header-test := \
+ display/intel_vbt_defs.h \
+ gvt/execlist.h \
+ gvt/fb_decoder.h \
+ gvt/gtt.h \
+ gvt/gvt.h \
+ gvt/interrupt.h \
+ gvt/mmio_context.h \
+ gvt/mpt.h \
+ gvt/scheduler.h
+
+extra-$(CONFIG_DRM_I915_WERROR) += \
+ $(patsubst %.h,%.hdrtest, $(filter-out $(no-header-test), \
+ $(shell cd $(srctree)/$(src) && find * -name '*.h')))
+
+quiet_cmd_hdrtest = HDRTEST $(patsubst %.hdrtest,%.h,$@)
+ cmd_hdrtest = $(CC) $(c_flags) -S -o /dev/null -x c /dev/null -include $<; touch $@
+
+$(obj)/%.hdrtest: $(src)/%.h FORCE
+ $(call if_changed_dep,hdrtest)
diff --git a/drivers/gpu/drm/i915/Makefile.header-test b/drivers/gpu/drm/i915/Makefile.header-test
deleted file mode 100644
index 7cde0ec34615..000000000000
--- a/drivers/gpu/drm/i915/Makefile.header-test
+++ /dev/null
@@ -1,22 +0,0 @@
-# SPDX-License-Identifier: MIT
-# Copyright © 2019 Intel Corporation
-
-# Test the headers are compilable as standalone units
-header-test-$(CONFIG_DRM_I915_WERROR) := \
- i915_active_types.h \
- i915_debugfs.h \
- i915_drv.h \
- i915_irq.h \
- i915_params.h \
- i915_priolist_types.h \
- i915_reg.h \
- i915_scheduler_types.h \
- i915_timeline_types.h \
- i915_utils.h \
- intel_csr.h \
- intel_drv.h \
- intel_pm.h \
- intel_runtime_pm.h \
- intel_sideband.h \
- intel_uncore.h \
- intel_wakeref.h
diff --git a/drivers/gpu/drm/i915/display/Makefile b/drivers/gpu/drm/i915/display/Makefile
deleted file mode 100644
index 1c75b5c9790c..000000000000
--- a/drivers/gpu/drm/i915/display/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-# Extra header tests
-include $(src)/Makefile.header-test
diff --git a/drivers/gpu/drm/i915/display/Makefile.header-test b/drivers/gpu/drm/i915/display/Makefile.header-test
deleted file mode 100644
index fc7d4e5bd2c6..000000000000
--- a/drivers/gpu/drm/i915/display/Makefile.header-test
+++ /dev/null
@@ -1,16 +0,0 @@
-# SPDX-License-Identifier: MIT
-# Copyright © 2019 Intel Corporation
-
-# Test the headers are compilable as standalone units
-header_test := $(notdir $(filter-out %/intel_vbt_defs.h,$(wildcard $(src)/*.h)))
-
-quiet_cmd_header_test = HDRTEST $@
- cmd_header_test = echo "\#include \"$(<F)\"" > $@
-
-header_test_%.c: %.h
- $(call cmd,header_test)
-
-extra-$(CONFIG_DRM_I915_WERROR) += \
- $(foreach h,$(header_test),$(patsubst %.h,header_test_%.o,$(h)))
-
-clean-files += $(foreach h,$(header_test),$(patsubst %.h,header_test_%.c,$(h)))
diff --git a/drivers/gpu/drm/i915/display/dvo_ch7017.c b/drivers/gpu/drm/i915/display/dvo_ch7017.c
index 602380fe74f3..0589994dde11 100644
--- a/drivers/gpu/drm/i915/display/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/display/dvo_ch7017.c
@@ -25,7 +25,7 @@
*
*/
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_dvo_dev.h"
#define CH7017_TV_DISPLAY_MODE 0x00
diff --git a/drivers/gpu/drm/i915/display/dvo_ch7xxx.c b/drivers/gpu/drm/i915/display/dvo_ch7xxx.c
index e070bebee7b5..54f58ba44b9f 100644
--- a/drivers/gpu/drm/i915/display/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/display/dvo_ch7xxx.c
@@ -26,7 +26,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
**************************************************************************/
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_dvo_dev.h"
#define CH7xxx_REG_VID 0x4a
diff --git a/drivers/gpu/drm/i915/display/dvo_ivch.c b/drivers/gpu/drm/i915/display/dvo_ivch.c
index 09dba35f3ffa..f43d8c610d3f 100644
--- a/drivers/gpu/drm/i915/display/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/display/dvo_ivch.c
@@ -29,7 +29,7 @@
*
*/
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_dvo_dev.h"
/*
diff --git a/drivers/gpu/drm/i915/display/dvo_ns2501.c b/drivers/gpu/drm/i915/display/dvo_ns2501.c
index c83a5d88d62b..a724a8755673 100644
--- a/drivers/gpu/drm/i915/display/dvo_ns2501.c
+++ b/drivers/gpu/drm/i915/display/dvo_ns2501.c
@@ -28,7 +28,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_dvo_dev.h"
#define NS2501_VID 0x1305
diff --git a/drivers/gpu/drm/i915/display/dvo_sil164.c b/drivers/gpu/drm/i915/display/dvo_sil164.c
index 04698eaeb632..0dfa0a0209ff 100644
--- a/drivers/gpu/drm/i915/display/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/display/dvo_sil164.c
@@ -26,7 +26,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
**************************************************************************/
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_dvo_dev.h"
#define SIL164_VID 0x0001
diff --git a/drivers/gpu/drm/i915/display/dvo_tfp410.c b/drivers/gpu/drm/i915/display/dvo_tfp410.c
index 623114ee73cd..009d65b0f3e9 100644
--- a/drivers/gpu/drm/i915/display/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/display/dvo_tfp410.c
@@ -25,7 +25,7 @@
*
*/
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_dvo_dev.h"
/* register definitions according to the TFP410 data sheet */
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 74448e6bf749..f8e882101396 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -34,6 +34,7 @@
#include "intel_ddi.h"
#include "intel_dsi.h"
#include "intel_panel.h"
+#include "intel_vdsc.h"
static inline int header_credits_available(struct drm_i915_private *dev_priv,
enum transcoder dsi_trans)
@@ -76,7 +77,7 @@ static enum transcoder dsi_port_to_transcoder(enum port port)
static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct mipi_dsi_device *dsi;
enum port port;
enum transcoder dsi_trans;
@@ -201,64 +202,63 @@ static int dsi_send_pkt_payld(struct intel_dsi_host *host,
static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
- enum port port;
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ enum phy phy;
u32 tmp;
int lane;
- for_each_dsi_port(port, intel_dsi->ports) {
-
+ for_each_dsi_phy(phy, intel_dsi->phys) {
/*
* Program voltage swing and pre-emphasis level values as per
* table in BSPEC under DDI buffer programing
*/
- tmp = I915_READ(ICL_PORT_TX_DW5_LN0(port));
+ tmp = I915_READ(ICL_PORT_TX_DW5_LN0(phy));
tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK);
tmp |= SCALING_MODE_SEL(0x2);
tmp |= TAP2_DISABLE | TAP3_DISABLE;
tmp |= RTERM_SELECT(0x6);
- I915_WRITE(ICL_PORT_TX_DW5_GRP(port), tmp);
+ I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), tmp);
- tmp = I915_READ(ICL_PORT_TX_DW5_AUX(port));
+ tmp = I915_READ(ICL_PORT_TX_DW5_AUX(phy));
tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK);
tmp |= SCALING_MODE_SEL(0x2);
tmp |= TAP2_DISABLE | TAP3_DISABLE;
tmp |= RTERM_SELECT(0x6);
- I915_WRITE(ICL_PORT_TX_DW5_AUX(port), tmp);
+ I915_WRITE(ICL_PORT_TX_DW5_AUX(phy), tmp);
- tmp = I915_READ(ICL_PORT_TX_DW2_LN0(port));
+ tmp = I915_READ(ICL_PORT_TX_DW2_LN0(phy));
tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
RCOMP_SCALAR_MASK);
tmp |= SWING_SEL_UPPER(0x2);
tmp |= SWING_SEL_LOWER(0x2);
tmp |= RCOMP_SCALAR(0x98);
- I915_WRITE(ICL_PORT_TX_DW2_GRP(port), tmp);
+ I915_WRITE(ICL_PORT_TX_DW2_GRP(phy), tmp);
- tmp = I915_READ(ICL_PORT_TX_DW2_AUX(port));
+ tmp = I915_READ(ICL_PORT_TX_DW2_AUX(phy));
tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
RCOMP_SCALAR_MASK);
tmp |= SWING_SEL_UPPER(0x2);
tmp |= SWING_SEL_LOWER(0x2);
tmp |= RCOMP_SCALAR(0x98);
- I915_WRITE(ICL_PORT_TX_DW2_AUX(port), tmp);
+ I915_WRITE(ICL_PORT_TX_DW2_AUX(phy), tmp);
- tmp = I915_READ(ICL_PORT_TX_DW4_AUX(port));
+ tmp = I915_READ(ICL_PORT_TX_DW4_AUX(phy));
tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
CURSOR_COEFF_MASK);
tmp |= POST_CURSOR_1(0x0);
tmp |= POST_CURSOR_2(0x0);
tmp |= CURSOR_COEFF(0x3f);
- I915_WRITE(ICL_PORT_TX_DW4_AUX(port), tmp);
+ I915_WRITE(ICL_PORT_TX_DW4_AUX(phy), tmp);
for (lane = 0; lane <= 3; lane++) {
/* Bspec: must not use GRP register for write */
- tmp = I915_READ(ICL_PORT_TX_DW4_LN(lane, port));
+ tmp = I915_READ(ICL_PORT_TX_DW4_LN(lane, phy));
tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
CURSOR_COEFF_MASK);
tmp |= POST_CURSOR_1(0x0);
tmp |= POST_CURSOR_2(0x0);
tmp |= CURSOR_COEFF(0x3f);
- I915_WRITE(ICL_PORT_TX_DW4_LN(lane, port), tmp);
+ I915_WRITE(ICL_PORT_TX_DW4_LN(lane, phy), tmp);
}
}
}
@@ -267,7 +267,7 @@ static void configure_dual_link_mode(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
u32 dss_ctl1;
dss_ctl1 = I915_READ(DSS_CTL1);
@@ -277,7 +277,7 @@ static void configure_dual_link_mode(struct intel_encoder *encoder,
if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
const struct drm_display_mode *adjusted_mode =
- &pipe_config->base.adjusted_mode;
+ &pipe_config->hw.adjusted_mode;
u32 dss_ctl2;
u16 hactive = adjusted_mode->crtc_hdisplay;
u16 dl_buffer_depth;
@@ -302,18 +302,31 @@ static void configure_dual_link_mode(struct intel_encoder *encoder,
I915_WRITE(DSS_CTL1, dss_ctl1);
}
-static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder)
+/* aka DSI 8X clock */
+static int afe_clk(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ int bpp;
+
+ if (crtc_state->dsc.compression_enable)
+ bpp = crtc_state->dsc.compressed_bpp;
+ else
+ bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
+
+ return DIV_ROUND_CLOSEST(intel_dsi->pclk * bpp, intel_dsi->lane_count);
+}
+
+static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- u32 bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
- u32 afe_clk_khz; /* 8X Clock */
+ int afe_clk_khz;
u32 esc_clk_div_m;
- afe_clk_khz = DIV_ROUND_CLOSEST(intel_dsi->pclk * bpp,
- intel_dsi->lane_count);
-
+ afe_clk_khz = afe_clk(encoder, crtc_state);
esc_clk_div_m = DIV_ROUND_UP(afe_clk_khz, DSI_MAX_ESC_CLK);
for_each_dsi_port(port, intel_dsi->ports) {
@@ -347,7 +360,7 @@ static void get_dsi_io_power_domains(struct drm_i915_private *dev_priv,
static void gen11_dsi_enable_io_power(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
u32 tmp;
@@ -363,46 +376,59 @@ static void gen11_dsi_enable_io_power(struct intel_encoder *encoder)
static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
- enum port port;
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ enum phy phy;
- for_each_dsi_port(port, intel_dsi->ports)
- intel_combo_phy_power_up_lanes(dev_priv, port, true,
+ for_each_dsi_phy(phy, intel_dsi->phys)
+ intel_combo_phy_power_up_lanes(dev_priv, phy, true,
intel_dsi->lane_count, false);
}
static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
- enum port port;
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ enum phy phy;
u32 tmp;
int lane;
/* Step 4b(i) set loadgen select for transmit and aux lanes */
- for_each_dsi_port(port, intel_dsi->ports) {
- tmp = I915_READ(ICL_PORT_TX_DW4_AUX(port));
+ for_each_dsi_phy(phy, intel_dsi->phys) {
+ tmp = I915_READ(ICL_PORT_TX_DW4_AUX(phy));
tmp &= ~LOADGEN_SELECT;
- I915_WRITE(ICL_PORT_TX_DW4_AUX(port), tmp);
+ I915_WRITE(ICL_PORT_TX_DW4_AUX(phy), tmp);
for (lane = 0; lane <= 3; lane++) {
- tmp = I915_READ(ICL_PORT_TX_DW4_LN(lane, port));
+ tmp = I915_READ(ICL_PORT_TX_DW4_LN(lane, phy));
tmp &= ~LOADGEN_SELECT;
if (lane != 2)
tmp |= LOADGEN_SELECT;
- I915_WRITE(ICL_PORT_TX_DW4_LN(lane, port), tmp);
+ I915_WRITE(ICL_PORT_TX_DW4_LN(lane, phy), tmp);
}
}
/* Step 4b(ii) set latency optimization for transmit and aux lanes */
- for_each_dsi_port(port, intel_dsi->ports) {
- tmp = I915_READ(ICL_PORT_TX_DW2_AUX(port));
+ for_each_dsi_phy(phy, intel_dsi->phys) {
+ tmp = I915_READ(ICL_PORT_TX_DW2_AUX(phy));
tmp &= ~FRC_LATENCY_OPTIM_MASK;
tmp |= FRC_LATENCY_OPTIM_VAL(0x5);
- I915_WRITE(ICL_PORT_TX_DW2_AUX(port), tmp);
- tmp = I915_READ(ICL_PORT_TX_DW2_LN0(port));
+ I915_WRITE(ICL_PORT_TX_DW2_AUX(phy), tmp);
+ tmp = I915_READ(ICL_PORT_TX_DW2_LN0(phy));
tmp &= ~FRC_LATENCY_OPTIM_MASK;
tmp |= FRC_LATENCY_OPTIM_VAL(0x5);
- I915_WRITE(ICL_PORT_TX_DW2_GRP(port), tmp);
+ I915_WRITE(ICL_PORT_TX_DW2_GRP(phy), tmp);
+
+ /* For EHL, TGL, set latency optimization for PCS_DW1 lanes */
+ if (IS_ELKHARTLAKE(dev_priv) || (INTEL_GEN(dev_priv) >= 12)) {
+ tmp = I915_READ(ICL_PORT_PCS_DW1_AUX(phy));
+ tmp &= ~LATENCY_OPTIM_MASK;
+ tmp |= LATENCY_OPTIM_VAL(0);
+ I915_WRITE(ICL_PORT_PCS_DW1_AUX(phy), tmp);
+
+ tmp = I915_READ(ICL_PORT_PCS_DW1_LN0(phy));
+ tmp &= ~LATENCY_OPTIM_MASK;
+ tmp |= LATENCY_OPTIM_VAL(0x1);
+ I915_WRITE(ICL_PORT_PCS_DW1_GRP(phy), tmp);
+ }
}
}
@@ -410,18 +436,18 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
u32 tmp;
- enum port port;
+ enum phy phy;
/* clear common keeper enable bit */
- for_each_dsi_port(port, intel_dsi->ports) {
- tmp = I915_READ(ICL_PORT_PCS_DW1_LN0(port));
+ for_each_dsi_phy(phy, intel_dsi->phys) {
+ tmp = I915_READ(ICL_PORT_PCS_DW1_LN0(phy));
tmp &= ~COMMON_KEEPER_EN;
- I915_WRITE(ICL_PORT_PCS_DW1_GRP(port), tmp);
- tmp = I915_READ(ICL_PORT_PCS_DW1_AUX(port));
+ I915_WRITE(ICL_PORT_PCS_DW1_GRP(phy), tmp);
+ tmp = I915_READ(ICL_PORT_PCS_DW1_AUX(phy));
tmp &= ~COMMON_KEEPER_EN;
- I915_WRITE(ICL_PORT_PCS_DW1_AUX(port), tmp);
+ I915_WRITE(ICL_PORT_PCS_DW1_AUX(phy), tmp);
}
/*
@@ -429,40 +455,40 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
* Note: loadgen select program is done
* as part of lane phy sequence configuration
*/
- for_each_dsi_port(port, intel_dsi->ports) {
- tmp = I915_READ(ICL_PORT_CL_DW5(port));
+ for_each_dsi_phy(phy, intel_dsi->phys) {
+ tmp = I915_READ(ICL_PORT_CL_DW5(phy));
tmp |= SUS_CLOCK_CONFIG;
- I915_WRITE(ICL_PORT_CL_DW5(port), tmp);
+ I915_WRITE(ICL_PORT_CL_DW5(phy), tmp);
}
/* Clear training enable to change swing values */
- for_each_dsi_port(port, intel_dsi->ports) {
- tmp = I915_READ(ICL_PORT_TX_DW5_LN0(port));
+ for_each_dsi_phy(phy, intel_dsi->phys) {
+ tmp = I915_READ(ICL_PORT_TX_DW5_LN0(phy));
tmp &= ~TX_TRAINING_EN;
- I915_WRITE(ICL_PORT_TX_DW5_GRP(port), tmp);
- tmp = I915_READ(ICL_PORT_TX_DW5_AUX(port));
+ I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), tmp);
+ tmp = I915_READ(ICL_PORT_TX_DW5_AUX(phy));
tmp &= ~TX_TRAINING_EN;
- I915_WRITE(ICL_PORT_TX_DW5_AUX(port), tmp);
+ I915_WRITE(ICL_PORT_TX_DW5_AUX(phy), tmp);
}
/* Program swing and de-emphasis */
dsi_program_swing_and_deemphasis(encoder);
/* Set training enable to trigger update */
- for_each_dsi_port(port, intel_dsi->ports) {
- tmp = I915_READ(ICL_PORT_TX_DW5_LN0(port));
+ for_each_dsi_phy(phy, intel_dsi->phys) {
+ tmp = I915_READ(ICL_PORT_TX_DW5_LN0(phy));
tmp |= TX_TRAINING_EN;
- I915_WRITE(ICL_PORT_TX_DW5_GRP(port), tmp);
- tmp = I915_READ(ICL_PORT_TX_DW5_AUX(port));
+ I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), tmp);
+ tmp = I915_READ(ICL_PORT_TX_DW5_AUX(phy));
tmp |= TX_TRAINING_EN;
- I915_WRITE(ICL_PORT_TX_DW5_AUX(port), tmp);
+ I915_WRITE(ICL_PORT_TX_DW5_AUX(phy), tmp);
}
}
static void gen11_dsi_enable_ddi_buffer(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
u32 tmp;
enum port port;
@@ -478,12 +504,15 @@ static void gen11_dsi_enable_ddi_buffer(struct intel_encoder *encoder)
}
}
-static void gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder)
+static void
+gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
u32 tmp;
enum port port;
+ enum phy phy;
/* Program T-INIT master registers */
for_each_dsi_port(port, intel_dsi->ports) {
@@ -517,18 +546,28 @@ static void gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder)
* a value '0' inside TA_PARAM_REGISTERS otherwise
* leave all fields at HW default values.
*/
- if (intel_dsi_bitrate(intel_dsi) <= 800000) {
- for_each_dsi_port(port, intel_dsi->ports) {
- tmp = I915_READ(DPHY_TA_TIMING_PARAM(port));
- tmp &= ~TA_SURE_MASK;
- tmp |= TA_SURE_OVERRIDE | TA_SURE(0);
- I915_WRITE(DPHY_TA_TIMING_PARAM(port), tmp);
-
- /* shadow register inside display core */
- tmp = I915_READ(DSI_TA_TIMING_PARAM(port));
- tmp &= ~TA_SURE_MASK;
- tmp |= TA_SURE_OVERRIDE | TA_SURE(0);
- I915_WRITE(DSI_TA_TIMING_PARAM(port), tmp);
+ if (IS_GEN(dev_priv, 11)) {
+ if (afe_clk(encoder, crtc_state) <= 800000) {
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp = I915_READ(DPHY_TA_TIMING_PARAM(port));
+ tmp &= ~TA_SURE_MASK;
+ tmp |= TA_SURE_OVERRIDE | TA_SURE(0);
+ I915_WRITE(DPHY_TA_TIMING_PARAM(port), tmp);
+
+ /* shadow register inside display core */
+ tmp = I915_READ(DSI_TA_TIMING_PARAM(port));
+ tmp &= ~TA_SURE_MASK;
+ tmp |= TA_SURE_OVERRIDE | TA_SURE(0);
+ I915_WRITE(DSI_TA_TIMING_PARAM(port), tmp);
+ }
+ }
+ }
+
+ if (IS_ELKHARTLAKE(dev_priv)) {
+ for_each_dsi_phy(phy, intel_dsi->phys) {
+ tmp = I915_READ(ICL_DPHY_CHKN(phy));
+ tmp |= ICL_DPHY_CHKN_AFE_OVER_PPI_STRAP;
+ I915_WRITE(ICL_DPHY_CHKN(phy), tmp);
}
}
}
@@ -536,34 +575,32 @@ static void gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder)
static void gen11_dsi_gate_clocks(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
u32 tmp;
- enum port port;
+ enum phy phy;
mutex_lock(&dev_priv->dpll_lock);
- tmp = I915_READ(DPCLKA_CFGCR0_ICL);
- for_each_dsi_port(port, intel_dsi->ports) {
- tmp |= DPCLKA_CFGCR0_DDI_CLK_OFF(port);
- }
+ tmp = I915_READ(ICL_DPCLKA_CFGCR0);
+ for_each_dsi_phy(phy, intel_dsi->phys)
+ tmp |= ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
- I915_WRITE(DPCLKA_CFGCR0_ICL, tmp);
+ I915_WRITE(ICL_DPCLKA_CFGCR0, tmp);
mutex_unlock(&dev_priv->dpll_lock);
}
static void gen11_dsi_ungate_clocks(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
u32 tmp;
- enum port port;
+ enum phy phy;
mutex_lock(&dev_priv->dpll_lock);
- tmp = I915_READ(DPCLKA_CFGCR0_ICL);
- for_each_dsi_port(port, intel_dsi->ports) {
- tmp &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port);
- }
+ tmp = I915_READ(ICL_DPCLKA_CFGCR0);
+ for_each_dsi_phy(phy, intel_dsi->phys)
+ tmp &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
- I915_WRITE(DPCLKA_CFGCR0_ICL, tmp);
+ I915_WRITE(ICL_DPCLKA_CFGCR0, tmp);
mutex_unlock(&dev_priv->dpll_lock);
}
@@ -571,26 +608,29 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct intel_shared_dpll *pll = crtc_state->shared_dpll;
- enum port port;
+ enum phy phy;
u32 val;
mutex_lock(&dev_priv->dpll_lock);
- val = I915_READ(DPCLKA_CFGCR0_ICL);
- for_each_dsi_port(port, intel_dsi->ports) {
- val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
- val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port);
+ val = I915_READ(ICL_DPCLKA_CFGCR0);
+ for_each_dsi_phy(phy, intel_dsi->phys) {
+ val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
+ val |= ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy);
}
- I915_WRITE(DPCLKA_CFGCR0_ICL, val);
+ I915_WRITE(ICL_DPCLKA_CFGCR0, val);
- for_each_dsi_port(port, intel_dsi->ports) {
- val &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port);
+ for_each_dsi_phy(phy, intel_dsi->phys) {
+ if (INTEL_GEN(dev_priv) >= 12)
+ val |= ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
+ else
+ val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
}
- I915_WRITE(DPCLKA_CFGCR0_ICL, val);
+ I915_WRITE(ICL_DPCLKA_CFGCR0, val);
- POSTING_READ(DPCLKA_CFGCR0_ICL);
+ POSTING_READ(ICL_DPCLKA_CFGCR0);
mutex_unlock(&dev_priv->dpll_lock);
}
@@ -600,8 +640,8 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
- struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
enum pipe pipe = intel_crtc->pipe;
u32 tmp;
enum port port;
@@ -617,7 +657,7 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
tmp |= EOTP_DISABLED;
/* enable link calibration if freq > 1.5Gbps */
- if (intel_dsi_bitrate(intel_dsi) >= 1500 * 1000) {
+ if (afe_clk(encoder, pipe_config) >= 1500 * 1000) {
tmp &= ~LINK_CALIBRATION_MASK;
tmp |= CALIBRATION_ENABLED_INITIAL_ONLY;
}
@@ -643,22 +683,31 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
/* select pixel format */
tmp &= ~PIX_FMT_MASK;
- switch (intel_dsi->pixel_format) {
- default:
- MISSING_CASE(intel_dsi->pixel_format);
- /* fallthrough */
- case MIPI_DSI_FMT_RGB565:
- tmp |= PIX_FMT_RGB565;
- break;
- case MIPI_DSI_FMT_RGB666_PACKED:
- tmp |= PIX_FMT_RGB666_PACKED;
- break;
- case MIPI_DSI_FMT_RGB666:
- tmp |= PIX_FMT_RGB666_LOOSE;
- break;
- case MIPI_DSI_FMT_RGB888:
- tmp |= PIX_FMT_RGB888;
- break;
+ if (pipe_config->dsc.compression_enable) {
+ tmp |= PIX_FMT_COMPRESSED;
+ } else {
+ switch (intel_dsi->pixel_format) {
+ default:
+ MISSING_CASE(intel_dsi->pixel_format);
+ /* fallthrough */
+ case MIPI_DSI_FMT_RGB565:
+ tmp |= PIX_FMT_RGB565;
+ break;
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ tmp |= PIX_FMT_RGB666_PACKED;
+ break;
+ case MIPI_DSI_FMT_RGB666:
+ tmp |= PIX_FMT_RGB666_LOOSE;
+ break;
+ case MIPI_DSI_FMT_RGB888:
+ tmp |= PIX_FMT_RGB888;
+ break;
+ }
+ }
+
+ if (INTEL_GEN(dev_priv) >= 12) {
+ if (is_vid_mode(intel_dsi))
+ tmp |= BLANKING_PACKET_ENABLE;
}
/* program DSI operation mode */
@@ -716,6 +765,9 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
case PIPE_C:
tmp |= TRANS_DDI_EDP_INPUT_C_ONOFF;
break;
+ case PIPE_D:
+ tmp |= TRANS_DDI_EDP_INPUT_D_ONOFF;
+ break;
}
/* enable DDI buffer */
@@ -734,27 +786,39 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
static void
gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config)
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
const struct drm_display_mode *adjusted_mode =
- &pipe_config->base.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
enum port port;
enum transcoder dsi_trans;
/* horizontal timings */
u16 htotal, hactive, hsync_start, hsync_end, hsync_size;
- u16 hfront_porch, hback_porch;
+ u16 hback_porch;
/* vertical timings */
u16 vtotal, vactive, vsync_start, vsync_end, vsync_shift;
+ int mul = 1, div = 1;
+
+ /*
+ * Adjust horizontal timings (htotal, hsync_start, hsync_end) to account
+ * for slower link speed if DSC is enabled.
+ *
+ * The compression frequency ratio is the ratio between compressed and
+ * non-compressed link speeds, and simplifies down to the ratio between
+ * compressed and non-compressed bpp.
+ */
+ if (crtc_state->dsc.compression_enable) {
+ mul = crtc_state->dsc.compressed_bpp;
+ div = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
+ }
hactive = adjusted_mode->crtc_hdisplay;
- htotal = adjusted_mode->crtc_htotal;
- hsync_start = adjusted_mode->crtc_hsync_start;
- hsync_end = adjusted_mode->crtc_hsync_end;
+ htotal = DIV_ROUND_UP(adjusted_mode->crtc_htotal * mul, div);
+ hsync_start = DIV_ROUND_UP(adjusted_mode->crtc_hsync_start * mul, div);
+ hsync_end = DIV_ROUND_UP(adjusted_mode->crtc_hsync_end * mul, div);
hsync_size = hsync_end - hsync_start;
- hfront_porch = (adjusted_mode->crtc_hsync_start -
- adjusted_mode->crtc_hdisplay);
hback_porch = (adjusted_mode->crtc_htotal -
adjusted_mode->crtc_hsync_end);
vactive = adjusted_mode->crtc_vdisplay;
@@ -845,12 +909,21 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
dsi_trans = dsi_port_to_transcoder(port);
I915_WRITE(VSYNCSHIFT(dsi_trans), vsync_shift);
}
+
+ /* program TRANS_VBLANK register, should be same as vtotal programmed */
+ if (INTEL_GEN(dev_priv) >= 12) {
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ I915_WRITE(VBLANK(dsi_trans),
+ (vactive - 1) | ((vtotal - 1) << 16));
+ }
+ }
}
static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
enum transcoder dsi_trans;
u32 tmp;
@@ -862,18 +935,17 @@ static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder)
I915_WRITE(PIPECONF(dsi_trans), tmp);
/* wait for transcoder to be enabled */
- if (intel_wait_for_register(&dev_priv->uncore,
- PIPECONF(dsi_trans),
- I965_PIPECONF_ACTIVE,
- I965_PIPECONF_ACTIVE, 10))
+ if (intel_de_wait_for_set(dev_priv, PIPECONF(dsi_trans),
+ I965_PIPECONF_ACTIVE, 10))
DRM_ERROR("DSI transcoder not enabled\n");
}
}
-static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder)
+static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
enum transcoder dsi_trans;
u32 tmp, hs_tx_timeout, lp_rx_timeout, ta_timeout, divisor, mul;
@@ -885,7 +957,7 @@ static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder)
* TIME_NS = (BYTE_CLK_COUNT * 8 * 10^6)/ Bitrate
* ESCAPE_CLK_COUNT = TIME_NS/ESC_CLK_NS
*/
- divisor = intel_dsi_tlpx_ns(intel_dsi) * intel_dsi_bitrate(intel_dsi) * 1000;
+ divisor = intel_dsi_tlpx_ns(intel_dsi) * afe_clk(encoder, crtc_state) * 1000;
mul = 8 * 1000000;
hs_tx_timeout = DIV_ROUND_UP(intel_dsi->hs_tx_timeout * mul,
divisor);
@@ -921,8 +993,10 @@ static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder)
static void
gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config)
+ const struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
/* step 4a: power up all lanes of the DDI used by DSI */
gen11_dsi_power_up_lanes(encoder);
@@ -936,22 +1010,23 @@ gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder,
gen11_dsi_enable_ddi_buffer(encoder);
/* setup D-PHY timings */
- gen11_dsi_setup_dphy_timings(encoder);
+ gen11_dsi_setup_dphy_timings(encoder, crtc_state);
/* step 4h: setup DSI protocol timeouts */
- gen11_dsi_setup_timeouts(encoder);
+ gen11_dsi_setup_timeouts(encoder, crtc_state);
/* Step (4h, 4i, 4j, 4k): Configure transcoder */
- gen11_dsi_configure_transcoder(encoder, pipe_config);
+ gen11_dsi_configure_transcoder(encoder, crtc_state);
/* Step 4l: Gate DDI clocks */
- gen11_dsi_gate_clocks(encoder);
+ if (IS_GEN(dev_priv, 11))
+ gen11_dsi_gate_clocks(encoder);
}
static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct mipi_dsi_device *dsi;
enum port port;
enum transcoder dsi_trans;
@@ -988,21 +1063,21 @@ static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
}
static void gen11_dsi_pre_pll_enable(struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
+ const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
/* step2: enable IO power */
gen11_dsi_enable_io_power(encoder);
/* step3: enable DSI PLL */
- gen11_dsi_program_esc_clk_div(encoder);
+ gen11_dsi_program_esc_clk_div(encoder, crtc_state);
}
static void gen11_dsi_pre_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
/* step3b */
gen11_dsi_map_pll(encoder, pipe_config);
@@ -1013,6 +1088,8 @@ static void gen11_dsi_pre_enable(struct intel_encoder *encoder,
/* step5: program and powerup panel */
gen11_dsi_powerup_panel(encoder);
+ intel_dsc_enable(encoder, pipe_config);
+
/* step6c: configure transcoder timings */
gen11_dsi_set_transcoder_timings(encoder, pipe_config);
@@ -1027,7 +1104,7 @@ static void gen11_dsi_pre_enable(struct intel_encoder *encoder,
static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
enum transcoder dsi_trans;
u32 tmp;
@@ -1041,16 +1118,15 @@ static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder)
I915_WRITE(PIPECONF(dsi_trans), tmp);
/* wait for transcoder to be disabled */
- if (intel_wait_for_register(&dev_priv->uncore,
- PIPECONF(dsi_trans),
- I965_PIPECONF_ACTIVE, 0, 50))
+ if (intel_de_wait_for_clear(dev_priv, PIPECONF(dsi_trans),
+ I965_PIPECONF_ACTIVE, 50))
DRM_ERROR("DSI trancoder not disabled\n");
}
}
static void gen11_dsi_powerdown_panel(struct intel_encoder *encoder)
{
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_OFF);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_ASSERT_RESET);
@@ -1063,7 +1139,7 @@ static void gen11_dsi_powerdown_panel(struct intel_encoder *encoder)
static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
enum transcoder dsi_trans;
u32 tmp;
@@ -1104,7 +1180,7 @@ static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder)
static void gen11_dsi_disable_port(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
u32 tmp;
enum port port;
@@ -1126,7 +1202,7 @@ static void gen11_dsi_disable_port(struct intel_encoder *encoder)
static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
u32 tmp;
@@ -1153,7 +1229,7 @@ static void gen11_dsi_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
/* step1: turn off backlight */
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF);
@@ -1175,12 +1251,42 @@ static void gen11_dsi_disable(struct intel_encoder *encoder,
gen11_dsi_disable_io_power(encoder);
}
+static void gen11_dsi_post_disable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ intel_crtc_vblank_off(old_crtc_state);
+
+ intel_dsc_disable(old_crtc_state);
+
+ skl_scaler_disable(old_crtc_state);
+}
+
+static enum drm_mode_status gen11_dsi_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ /* FIXME: DSC? */
+ return intel_dsi_mode_valid(connector, mode);
+}
+
static void gen11_dsi_get_timings(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct drm_display_mode *adjusted_mode =
- &pipe_config->base.adjusted_mode;
+ &pipe_config->hw.adjusted_mode;
+
+ if (pipe_config->dsc.compressed_bpp) {
+ int div = pipe_config->dsc.compressed_bpp;
+ int mul = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
+
+ adjusted_mode->crtc_htotal =
+ DIV_ROUND_UP(adjusted_mode->crtc_htotal * mul, div);
+ adjusted_mode->crtc_hsync_start =
+ DIV_ROUND_UP(adjusted_mode->crtc_hsync_start * mul, div);
+ adjusted_mode->crtc_hsync_end =
+ DIV_ROUND_UP(adjusted_mode->crtc_hsync_end * mul, div);
+ }
if (intel_dsi->dual_link) {
adjusted_mode->crtc_hdisplay *= 2;
@@ -1206,22 +1312,66 @@ static void gen11_dsi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+
+ intel_dsc_get_config(encoder, pipe_config);
/* FIXME: adapt icl_ddi_clock_get() for DSI and use that? */
pipe_config->port_clock =
cnl_calc_wrpll_link(dev_priv, &pipe_config->dpll_hw_state);
- pipe_config->base.adjusted_mode.crtc_clock = intel_dsi->pclk;
+ pipe_config->hw.adjusted_mode.crtc_clock = intel_dsi->pclk;
if (intel_dsi->dual_link)
- pipe_config->base.adjusted_mode.crtc_clock *= 2;
+ pipe_config->hw.adjusted_mode.crtc_clock *= 2;
gen11_dsi_get_timings(encoder, pipe_config);
pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI);
pipe_config->pipe_bpp = bdw_get_pipemisc_bpp(crtc);
}
+static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
+ int dsc_max_bpc = INTEL_GEN(dev_priv) >= 12 ? 12 : 10;
+ bool use_dsc;
+ int ret;
+
+ use_dsc = intel_bios_get_dsc_params(encoder, crtc_state, dsc_max_bpc);
+ if (!use_dsc)
+ return 0;
+
+ if (crtc_state->pipe_bpp < 8 * 3)
+ return -EINVAL;
+
+ /* FIXME: split only when necessary */
+ if (crtc_state->dsc.slice_count > 1)
+ crtc_state->dsc.dsc_split = true;
+
+ vdsc_cfg->convert_rgb = true;
+
+ ret = intel_dsc_compute_params(encoder, crtc_state);
+ if (ret)
+ return ret;
+
+ /* DSI specific sanity checks on the common code */
+ WARN_ON(vdsc_cfg->vbr_enable);
+ WARN_ON(vdsc_cfg->simple_422);
+ WARN_ON(vdsc_cfg->pic_width % vdsc_cfg->slice_width);
+ WARN_ON(vdsc_cfg->slice_height < 8);
+ WARN_ON(vdsc_cfg->pic_height % vdsc_cfg->slice_height);
+
+ ret = drm_dsc_compute_rc_parameters(vdsc_cfg);
+ if (ret)
+ return ret;
+
+ crtc_state->dsc.compression_enable = true;
+
+ return 0;
+}
+
static int gen11_dsi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
@@ -1229,11 +1379,11 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder,
struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
base);
struct intel_connector *intel_connector = intel_dsi->attached_connector;
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
const struct drm_display_mode *fixed_mode =
intel_connector->panel.fixed_mode;
struct drm_display_mode *adjusted_mode =
- &pipe_config->base.adjusted_mode;
+ &pipe_config->hw.adjusted_mode;
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
intel_fixed_panel_mode(fixed_mode, adjusted_mode);
@@ -1247,8 +1397,17 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder,
else
pipe_config->cpu_transcoder = TRANSCODER_DSI_0;
+ if (intel_dsi->pixel_format == MIPI_DSI_FMT_RGB888)
+ pipe_config->pipe_bpp = 24;
+ else
+ pipe_config->pipe_bpp = 18;
+
pipe_config->clock_set = true;
- pipe_config->port_clock = intel_dsi_bitrate(intel_dsi) / 5;
+
+ if (gen11_dsi_dsc_compute_config(encoder, pipe_config))
+ DRM_DEBUG_KMS("Attempting to use DSC failed\n");
+
+ pipe_config->port_clock = afe_clk(encoder, pipe_config) / 5;
return 0;
}
@@ -1256,15 +1415,21 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder,
static void gen11_dsi_get_power_domains(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
- get_dsi_io_power_domains(to_i915(encoder->base.dev),
- enc_to_intel_dsi(&encoder->base));
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ get_dsi_io_power_domains(i915,
+ enc_to_intel_dsi(encoder));
+
+ if (crtc_state->dsc.compression_enable)
+ intel_display_power_get(i915,
+ intel_dsc_power_domain(crtc_state));
}
static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum transcoder dsi_trans;
intel_wakeref_t wakeref;
enum port port;
@@ -1289,6 +1454,9 @@ static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
case TRANS_DDI_EDP_INPUT_C_ONOFF:
*pipe = PIPE_C;
break;
+ case TRANS_DDI_EDP_INPUT_D_ONOFF:
+ *pipe = PIPE_D;
+ break;
default:
DRM_ERROR("Invalid PIPE input\n");
goto out;
@@ -1324,7 +1492,7 @@ static const struct drm_connector_funcs gen11_dsi_connector_funcs = {
static const struct drm_connector_helper_funcs gen11_dsi_connector_helper_funcs = {
.get_modes = intel_dsi_get_modes,
- .mode_valid = intel_dsi_mode_valid,
+ .mode_valid = gen11_dsi_mode_valid,
.atomic_check = intel_digital_connector_atomic_check,
};
@@ -1487,6 +1655,26 @@ static void icl_dphy_param_init(struct intel_dsi *intel_dsi)
intel_dsi_log_params(intel_dsi);
}
+static void icl_dsi_add_properties(struct intel_connector *connector)
+{
+ u32 allowed_scalers;
+
+ allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) |
+ BIT(DRM_MODE_SCALE_FULLSCREEN) |
+ BIT(DRM_MODE_SCALE_CENTER);
+
+ drm_connector_attach_scaling_mode_property(&connector->base,
+ allowed_scalers);
+
+ connector->base.state->scaling_mode = DRM_MODE_SCALE_ASPECT;
+
+ connector->base.display_info.panel_orientation =
+ intel_dsi_get_panel_orientation(connector);
+ drm_connector_init_panel_orientation_property(&connector->base,
+ connector->panel.fixed_mode->hdisplay,
+ connector->panel.fixed_mode->vdisplay);
+}
+
void icl_dsi_init(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = &dev_priv->drm;
@@ -1521,6 +1709,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
encoder->pre_pll_enable = gen11_dsi_pre_pll_enable;
encoder->pre_enable = gen11_dsi_pre_enable;
encoder->disable = gen11_dsi_disable;
+ encoder->post_disable = gen11_dsi_post_disable;
encoder->port = port;
encoder->get_config = gen11_dsi_get_config;
encoder->update_pipe = intel_panel_update_backlight;
@@ -1528,7 +1717,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
encoder->get_hw_state = gen11_dsi_get_hw_state;
encoder->type = INTEL_OUTPUT_DSI;
encoder->cloneable = 0;
- encoder->crtc_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C);
+ encoder->pipe_mask = ~0;
encoder->power_domain = POWER_DOMAIN_PORT_DSI;
encoder->get_power_domains = gen11_dsi_get_power_domains;
@@ -1580,6 +1769,8 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
}
icl_dphy_param_init(intel_dsi);
+
+ icl_dsi_add_properties(intel_connector);
return;
err:
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
index 90ca11a4ae88..c362eecdd414 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic.c
@@ -35,8 +35,9 @@
#include <drm/drm_plane_helper.h>
#include "intel_atomic.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_hdcp.h"
+#include "intel_psr.h"
#include "intel_sprite.h"
/**
@@ -129,6 +130,7 @@ int intel_digital_connector_atomic_check(struct drm_connector *conn,
struct drm_crtc_state *crtc_state;
intel_hdcp_atomic_check(conn, old_state, new_state);
+ intel_psr_atomic_check(conn, old_state, new_state);
if (!new_state->crtc)
return 0;
@@ -175,6 +177,38 @@ intel_digital_connector_duplicate_state(struct drm_connector *connector)
}
/**
+ * intel_connector_needs_modeset - check if connector needs a modeset
+ */
+bool
+intel_connector_needs_modeset(struct intel_atomic_state *state,
+ struct drm_connector *connector)
+{
+ const struct drm_connector_state *old_conn_state, *new_conn_state;
+
+ old_conn_state = drm_atomic_get_old_connector_state(&state->base, connector);
+ new_conn_state = drm_atomic_get_new_connector_state(&state->base, connector);
+
+ return old_conn_state->crtc != new_conn_state->crtc ||
+ (new_conn_state->crtc &&
+ drm_atomic_crtc_needs_modeset(drm_atomic_get_new_crtc_state(&state->base,
+ new_conn_state->crtc)));
+}
+
+struct intel_digital_connector_state *
+intel_atomic_get_digital_connector_state(struct intel_atomic_state *state,
+ struct intel_connector *connector)
+{
+ struct drm_connector_state *conn_state;
+
+ conn_state = drm_atomic_get_connector_state(&state->base,
+ &connector->base);
+ if (IS_ERR(conn_state))
+ return ERR_CAST(conn_state);
+
+ return to_intel_digital_connector_state(conn_state);
+}
+
+/**
* intel_crtc_duplicate_state - duplicate crtc state
* @crtc: drm crtc
*
@@ -186,26 +220,57 @@ intel_digital_connector_duplicate_state(struct drm_connector *connector)
struct drm_crtc_state *
intel_crtc_duplicate_state(struct drm_crtc *crtc)
{
+ const struct intel_crtc_state *old_crtc_state = to_intel_crtc_state(crtc->state);
struct intel_crtc_state *crtc_state;
- crtc_state = kmemdup(crtc->state, sizeof(*crtc_state), GFP_KERNEL);
+ crtc_state = kmemdup(old_crtc_state, sizeof(*crtc_state), GFP_KERNEL);
if (!crtc_state)
return NULL;
- __drm_atomic_helper_crtc_duplicate_state(crtc, &crtc_state->base);
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &crtc_state->uapi);
+
+ /* copy color blobs */
+ if (crtc_state->hw.degamma_lut)
+ drm_property_blob_get(crtc_state->hw.degamma_lut);
+ if (crtc_state->hw.ctm)
+ drm_property_blob_get(crtc_state->hw.ctm);
+ if (crtc_state->hw.gamma_lut)
+ drm_property_blob_get(crtc_state->hw.gamma_lut);
crtc_state->update_pipe = false;
crtc_state->disable_lp_wm = false;
crtc_state->disable_cxsr = false;
crtc_state->update_wm_pre = false;
crtc_state->update_wm_post = false;
- crtc_state->fb_changed = false;
crtc_state->fifo_changed = false;
+ crtc_state->preload_luts = false;
crtc_state->wm.need_postvbl_update = false;
crtc_state->fb_bits = 0;
crtc_state->update_planes = 0;
- return &crtc_state->base;
+ return &crtc_state->uapi;
+}
+
+static void intel_crtc_put_color_blobs(struct intel_crtc_state *crtc_state)
+{
+ drm_property_blob_put(crtc_state->hw.degamma_lut);
+ drm_property_blob_put(crtc_state->hw.gamma_lut);
+ drm_property_blob_put(crtc_state->hw.ctm);
+}
+
+void intel_crtc_free_hw_state(struct intel_crtc_state *crtc_state)
+{
+ intel_crtc_put_color_blobs(crtc_state);
+}
+
+void intel_crtc_copy_color_blobs(struct intel_crtc_state *crtc_state)
+{
+ drm_property_replace_blob(&crtc_state->hw.degamma_lut,
+ crtc_state->uapi.degamma_lut);
+ drm_property_replace_blob(&crtc_state->hw.gamma_lut,
+ crtc_state->uapi.gamma_lut);
+ drm_property_replace_blob(&crtc_state->hw.ctm,
+ crtc_state->uapi.ctm);
}
/**
@@ -220,7 +285,11 @@ void
intel_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
- drm_atomic_helper_crtc_destroy_state(crtc, state);
+ struct intel_crtc_state *crtc_state = to_intel_crtc_state(state);
+
+ __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
+ intel_crtc_free_hw_state(crtc_state);
+ kfree(crtc_state);
}
static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_state,
@@ -249,10 +318,10 @@ static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_sta
return;
/* set scaler mode */
- if (plane_state && plane_state->base.fb &&
- plane_state->base.fb->format->is_yuv &&
- plane_state->base.fb->format->num_planes > 1) {
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ if (plane_state && plane_state->hw.fb &&
+ plane_state->hw.fb->format->is_yuv &&
+ plane_state->hw.fb->format->num_planes > 1) {
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
if (IS_GEN(dev_priv, 9) &&
!IS_GEMINILAKE(dev_priv)) {
mode = SKL_PS_SCALER_MODE_NV12;
@@ -264,10 +333,13 @@ static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_sta
*/
mode = PS_SCALER_MODE_NORMAL;
} else {
+ struct intel_plane *linked =
+ plane_state->planar_linked_plane;
+
mode = PS_SCALER_MODE_PLANAR;
- if (plane_state->linked_plane)
- mode |= PS_PLANE_Y_SEL(plane_state->linked_plane->id);
+ if (linked)
+ mode |= PS_PLANE_Y_SEL(linked->id);
}
} else if (INTEL_GEN(dev_priv) > 9 || IS_GEMINILAKE(dev_priv)) {
mode = PS_SCALER_MODE_NORMAL;
@@ -316,7 +388,7 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
struct intel_plane_state *plane_state = NULL;
struct intel_crtc_scaler_state *scaler_state =
&crtc_state->scaler_state;
- struct drm_atomic_state *drm_state = crtc_state->base.state;
+ struct drm_atomic_state *drm_state = crtc_state->uapi.state;
struct intel_atomic_state *intel_state = to_intel_atomic_state(drm_state);
int num_scalers_need;
int i;
@@ -371,6 +443,15 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
*/
if (!plane) {
struct drm_plane_state *state;
+
+ /*
+ * GLK+ scalers don't have a HQ mode so it
+ * isn't necessary to change between HQ and dyn mode
+ * on those platforms.
+ */
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ continue;
+
plane = drm_plane_from_index(&dev_priv->drm, i);
state = drm_atomic_get_plane_state(drm_state, plane);
if (IS_ERR(state)) {
@@ -378,13 +459,6 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
plane->base.id);
return PTR_ERR(state);
}
-
- /*
- * the plane is added after plane checks are run,
- * but since this plane is unchanged just do the
- * minimum required validation.
- */
- crtc_state->base.planes_changed = true;
}
intel_plane = to_intel_plane(plane);
@@ -425,6 +499,13 @@ void intel_atomic_state_clear(struct drm_atomic_state *s)
struct intel_atomic_state *state = to_intel_atomic_state(s);
drm_atomic_state_default_clear(&state->base);
state->dpll_set = state->modeset = false;
+ state->global_state_changed = false;
+ state->active_pipes = 0;
+ memset(&state->min_cdclk, 0, sizeof(state->min_cdclk));
+ memset(&state->min_voltage_level, 0, sizeof(state->min_voltage_level));
+ memset(&state->cdclk.logical, 0, sizeof(state->cdclk.logical));
+ memset(&state->cdclk.actual, 0, sizeof(state->cdclk.actual));
+ state->cdclk.pipe = INVALID_PIPE;
}
struct intel_crtc_state *
@@ -438,3 +519,40 @@ intel_atomic_get_crtc_state(struct drm_atomic_state *state,
return to_intel_crtc_state(crtc_state);
}
+
+int intel_atomic_lock_global_state(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc *crtc;
+
+ state->global_state_changed = true;
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ int ret;
+
+ ret = drm_modeset_lock(&crtc->base.mutex,
+ state->base.acquire_ctx);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int intel_atomic_serialize_global_state(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc *crtc;
+
+ state->global_state_changed = true;
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ struct intel_crtc_state *crtc_state;
+
+ crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.h b/drivers/gpu/drm/i915/display/intel_atomic.h
index 58065d3161a3..74c749dbfb4f 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.h
+++ b/drivers/gpu/drm/i915/display/intel_atomic.h
@@ -16,6 +16,8 @@ struct drm_crtc_state;
struct drm_device;
struct drm_i915_private;
struct drm_property;
+struct intel_atomic_state;
+struct intel_connector;
struct intel_crtc;
struct intel_crtc_state;
@@ -31,10 +33,17 @@ int intel_digital_connector_atomic_check(struct drm_connector *conn,
struct drm_atomic_state *state);
struct drm_connector_state *
intel_digital_connector_duplicate_state(struct drm_connector *connector);
+bool intel_connector_needs_modeset(struct intel_atomic_state *state,
+ struct drm_connector *connector);
+struct intel_digital_connector_state *
+intel_atomic_get_digital_connector_state(struct intel_atomic_state *state,
+ struct intel_connector *connector);
struct drm_crtc_state *intel_crtc_duplicate_state(struct drm_crtc *crtc);
void intel_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state);
+void intel_crtc_free_hw_state(struct intel_crtc_state *crtc_state);
+void intel_crtc_copy_color_blobs(struct intel_crtc_state *crtc_state);
struct drm_atomic_state *intel_atomic_state_alloc(struct drm_device *dev);
void intel_atomic_state_clear(struct drm_atomic_state *state);
@@ -46,4 +55,8 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
struct intel_crtc *intel_crtc,
struct intel_crtc_state *crtc_state);
+int intel_atomic_lock_global_state(struct intel_atomic_state *state);
+
+int intel_atomic_serialize_global_state(struct intel_atomic_state *state);
+
#endif /* __INTEL_ATOMIC_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index 30bd4e76fff9..3e97af682b1b 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -35,11 +35,22 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
+#include "i915_trace.h"
#include "intel_atomic_plane.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_pm.h"
#include "intel_sprite.h"
+static void intel_plane_state_reset(struct intel_plane_state *plane_state,
+ struct intel_plane *plane)
+{
+ memset(plane_state, 0, sizeof(*plane_state));
+
+ __drm_atomic_helper_plane_state_reset(&plane_state->uapi, &plane->base);
+
+ plane_state->scaler_id = -1;
+}
+
struct intel_plane *intel_plane_alloc(void)
{
struct intel_plane_state *plane_state;
@@ -55,8 +66,9 @@ struct intel_plane *intel_plane_alloc(void)
return ERR_PTR(-ENOMEM);
}
- __drm_atomic_helper_plane_reset(&plane->base, &plane_state->base);
- plane_state->scaler_id = -1;
+ intel_plane_state_reset(plane_state, plane);
+
+ plane->base.state = &plane_state->uapi;
return plane;
}
@@ -79,22 +91,24 @@ void intel_plane_free(struct intel_plane *plane)
struct drm_plane_state *
intel_plane_duplicate_state(struct drm_plane *plane)
{
- struct drm_plane_state *state;
struct intel_plane_state *intel_state;
- intel_state = kmemdup(plane->state, sizeof(*intel_state), GFP_KERNEL);
+ intel_state = to_intel_plane_state(plane->state);
+ intel_state = kmemdup(intel_state, sizeof(*intel_state), GFP_KERNEL);
if (!intel_state)
return NULL;
- state = &intel_state->base;
-
- __drm_atomic_helper_plane_duplicate_state(plane, state);
+ __drm_atomic_helper_plane_duplicate_state(plane, &intel_state->uapi);
intel_state->vma = NULL;
intel_state->flags = 0;
- return state;
+ /* add reference to fb */
+ if (intel_state->hw.fb)
+ drm_framebuffer_get(intel_state->hw.fb);
+
+ return &intel_state->uapi;
}
/**
@@ -109,18 +123,22 @@ void
intel_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
- WARN_ON(to_intel_plane_state(state)->vma);
+ struct intel_plane_state *plane_state = to_intel_plane_state(state);
+ WARN_ON(plane_state->vma);
- drm_atomic_helper_plane_destroy_state(plane, state);
+ __drm_atomic_helper_plane_destroy_state(&plane_state->uapi);
+ if (plane_state->hw.fb)
+ drm_framebuffer_put(plane_state->hw.fb);
+ kfree(plane_state);
}
unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
unsigned int cpp;
- if (!plane_state->base.visible)
+ if (!plane_state->uapi.visible)
return 0;
cpp = fb->format->cpp[0];
@@ -137,21 +155,90 @@ unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state,
return cpp * crtc_state->pixel_rate;
}
+bool intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
+ struct intel_plane *plane)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct intel_plane_state *plane_state =
+ intel_atomic_get_new_plane_state(state, plane);
+ struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc);
+ struct intel_crtc_state *crtc_state;
+
+ if (!plane_state->uapi.visible || !plane->min_cdclk)
+ return false;
+
+ crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
+
+ crtc_state->min_cdclk[plane->id] =
+ plane->min_cdclk(crtc_state, plane_state);
+
+ /*
+ * Does the cdclk need to be bumbed up?
+ *
+ * Note: we obviously need to be called before the new
+ * cdclk frequency is calculated so state->cdclk.logical
+ * hasn't been populated yet. Hence we look at the old
+ * cdclk state under dev_priv->cdclk.logical. This is
+ * safe as long we hold at least one crtc mutex (which
+ * must be true since we have crtc_state).
+ */
+ if (crtc_state->min_cdclk[plane->id] > dev_priv->cdclk.logical.cdclk) {
+ DRM_DEBUG_KMS("[PLANE:%d:%s] min_cdclk (%d kHz) > logical cdclk (%d kHz)\n",
+ plane->base.base.id, plane->base.name,
+ crtc_state->min_cdclk[plane->id],
+ dev_priv->cdclk.logical.cdclk);
+ return true;
+ }
+
+ return false;
+}
+
+static void intel_plane_clear_hw_state(struct intel_plane_state *plane_state)
+{
+ if (plane_state->hw.fb)
+ drm_framebuffer_put(plane_state->hw.fb);
+
+ memset(&plane_state->hw, 0, sizeof(plane_state->hw));
+}
+
+void intel_plane_copy_uapi_to_hw_state(struct intel_plane_state *plane_state,
+ const struct intel_plane_state *from_plane_state)
+{
+ intel_plane_clear_hw_state(plane_state);
+
+ plane_state->hw.crtc = from_plane_state->uapi.crtc;
+ plane_state->hw.fb = from_plane_state->uapi.fb;
+ if (plane_state->hw.fb)
+ drm_framebuffer_get(plane_state->hw.fb);
+
+ plane_state->hw.alpha = from_plane_state->uapi.alpha;
+ plane_state->hw.pixel_blend_mode =
+ from_plane_state->uapi.pixel_blend_mode;
+ plane_state->hw.rotation = from_plane_state->uapi.rotation;
+ plane_state->hw.color_encoding = from_plane_state->uapi.color_encoding;
+ plane_state->hw.color_range = from_plane_state->uapi.color_range;
+}
+
int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *new_crtc_state,
const struct intel_plane_state *old_plane_state,
struct intel_plane_state *new_plane_state)
{
- struct intel_plane *plane = to_intel_plane(new_plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane);
+ const struct drm_framebuffer *fb;
int ret;
+ intel_plane_copy_uapi_to_hw_state(new_plane_state, new_plane_state);
+ fb = new_plane_state->hw.fb;
+
new_crtc_state->active_planes &= ~BIT(plane->id);
new_crtc_state->nv12_planes &= ~BIT(plane->id);
new_crtc_state->c8_planes &= ~BIT(plane->id);
new_crtc_state->data_rate[plane->id] = 0;
- new_plane_state->base.visible = false;
+ new_crtc_state->min_cdclk[plane->id] = 0;
+ new_plane_state->uapi.visible = false;
- if (!new_plane_state->base.crtc && !old_plane_state->base.crtc)
+ if (!new_plane_state->hw.crtc && !old_plane_state->hw.crtc)
return 0;
ret = plane->check_plane(new_crtc_state, new_plane_state);
@@ -159,50 +246,63 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
return ret;
/* FIXME pre-g4x don't work like this */
- if (new_plane_state->base.visible)
+ if (new_plane_state->uapi.visible)
new_crtc_state->active_planes |= BIT(plane->id);
- if (new_plane_state->base.visible &&
- is_planar_yuv_format(new_plane_state->base.fb->format->format))
+ if (new_plane_state->uapi.visible &&
+ intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
new_crtc_state->nv12_planes |= BIT(plane->id);
- if (new_plane_state->base.visible &&
- new_plane_state->base.fb->format->format == DRM_FORMAT_C8)
+ if (new_plane_state->uapi.visible &&
+ fb->format->format == DRM_FORMAT_C8)
new_crtc_state->c8_planes |= BIT(plane->id);
- if (new_plane_state->base.visible || old_plane_state->base.visible)
+ if (new_plane_state->uapi.visible || old_plane_state->uapi.visible)
new_crtc_state->update_planes |= BIT(plane->id);
new_crtc_state->data_rate[plane->id] =
intel_plane_data_rate(new_crtc_state, new_plane_state);
- return intel_plane_atomic_calc_changes(old_crtc_state,
- &new_crtc_state->base,
- old_plane_state,
- &new_plane_state->base);
+ return intel_plane_atomic_calc_changes(old_crtc_state, new_crtc_state,
+ old_plane_state, new_plane_state);
}
-static int intel_plane_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *new_plane_state)
+static struct intel_crtc *
+get_crtc_from_states(const struct intel_plane_state *old_plane_state,
+ const struct intel_plane_state *new_plane_state)
{
- struct drm_atomic_state *state = new_plane_state->state;
- const struct drm_plane_state *old_plane_state =
- drm_atomic_get_old_plane_state(state, plane);
- struct drm_crtc *crtc = new_plane_state->crtc ?: old_plane_state->crtc;
- const struct drm_crtc_state *old_crtc_state;
- struct drm_crtc_state *new_crtc_state;
-
- new_plane_state->visible = false;
+ if (new_plane_state->uapi.crtc)
+ return to_intel_crtc(new_plane_state->uapi.crtc);
+
+ if (old_plane_state->uapi.crtc)
+ return to_intel_crtc(old_plane_state->uapi.crtc);
+
+ return NULL;
+}
+
+int intel_plane_atomic_check(struct intel_atomic_state *state,
+ struct intel_plane *plane)
+{
+ struct intel_plane_state *new_plane_state =
+ intel_atomic_get_new_plane_state(state, plane);
+ const struct intel_plane_state *old_plane_state =
+ intel_atomic_get_old_plane_state(state, plane);
+ struct intel_crtc *crtc =
+ get_crtc_from_states(old_plane_state, new_plane_state);
+ const struct intel_crtc_state *old_crtc_state;
+ struct intel_crtc_state *new_crtc_state;
+
+ new_plane_state->uapi.visible = false;
if (!crtc)
return 0;
- old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
- new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
+ new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
- return intel_plane_atomic_check_with_state(to_intel_crtc_state(old_crtc_state),
- to_intel_crtc_state(new_crtc_state),
- to_intel_plane_state(old_plane_state),
- to_intel_plane_state(new_plane_state));
+ return intel_plane_atomic_check_with_state(old_crtc_state,
+ new_crtc_state,
+ old_plane_state,
+ new_plane_state);
}
static struct intel_plane *
@@ -253,26 +353,16 @@ void intel_update_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
trace_intel_update_plane(&plane->base, crtc);
plane->update_plane(plane, crtc_state, plane_state);
}
-void intel_update_slave(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-
- trace_intel_update_plane(&plane->base, crtc);
- plane->update_slave(plane, crtc_state, plane_state);
-}
-
void intel_disable_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
trace_intel_disable_plane(&plane->base, crtc);
plane->disable_plane(plane, crtc_state);
@@ -301,25 +391,9 @@ void skl_update_planes_on_crtc(struct intel_atomic_state *state,
struct intel_plane_state *new_plane_state =
intel_atomic_get_new_plane_state(state, plane);
- if (new_plane_state->base.visible) {
+ if (new_plane_state->uapi.visible ||
+ new_plane_state->planar_slave) {
intel_update_plane(plane, new_crtc_state, new_plane_state);
- } else if (new_plane_state->slave) {
- struct intel_plane *master =
- new_plane_state->linked_plane;
-
- /*
- * We update the slave plane from this function because
- * programming it from the master plane's update_plane
- * callback runs into issues when the Y plane is
- * reassigned, disabled or used by a different plane.
- *
- * The slave plane is updated with the master plane's
- * plane_state.
- */
- new_plane_state =
- intel_atomic_get_new_plane_state(state, master);
-
- intel_update_slave(plane, new_crtc_state, new_plane_state);
} else {
intel_disable_plane(plane, new_crtc_state);
}
@@ -341,7 +415,7 @@ void i9xx_update_planes_on_crtc(struct intel_atomic_state *state,
!(update_mask & BIT(plane->id)))
continue;
- if (new_plane_state->base.visible)
+ if (new_plane_state->uapi.visible)
intel_update_plane(plane, new_crtc_state, new_plane_state);
else
intel_disable_plane(plane, new_crtc_state);
@@ -351,5 +425,4 @@ void i9xx_update_planes_on_crtc(struct intel_atomic_state *state,
const struct drm_plane_helper_funcs intel_plane_helper_funcs = {
.prepare_fb = intel_prepare_plane_fb,
.cleanup_fb = intel_cleanup_plane_fb,
- .atomic_check = intel_plane_atomic_check,
};
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.h b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
index 1437a8797e10..5cedafdddb55 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.h
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
@@ -8,7 +8,6 @@
#include <linux/types.h>
-struct drm_crtc_state;
struct drm_plane;
struct drm_property;
struct intel_atomic_state;
@@ -21,12 +20,11 @@ extern const struct drm_plane_helper_funcs intel_plane_helper_funcs;
unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
+void intel_plane_copy_uapi_to_hw_state(struct intel_plane_state *plane_state,
+ const struct intel_plane_state *from_plane_state);
void intel_update_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
-void intel_update_slave(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state);
void intel_disable_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state);
struct intel_plane *intel_plane_alloc(void);
@@ -42,9 +40,13 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
struct intel_crtc_state *crtc_state,
const struct intel_plane_state *old_plane_state,
struct intel_plane_state *intel_state);
+int intel_plane_atomic_check(struct intel_atomic_state *state,
+ struct intel_plane *plane);
int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
- struct drm_crtc_state *crtc_state,
+ struct intel_crtc_state *crtc_state,
const struct intel_plane_state *old_plane_state,
- struct drm_plane_state *plane_state);
+ struct intel_plane_state *plane_state);
+bool intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
+ struct intel_plane *plane);
#endif /* __INTEL_ATOMIC_PLANE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index 840daff12246..b18040793d9e 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -28,8 +28,9 @@
#include <drm/i915_component.h>
#include "i915_drv.h"
+#include "intel_atomic.h"
#include "intel_audio.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_lpe_audio.h"
/**
@@ -72,6 +73,13 @@ struct dp_aud_n_m {
u16 n;
};
+struct hdmi_aud_ncts {
+ int sample_rate;
+ int clock;
+ int n;
+ int cts;
+};
+
/* Values according to DP 1.4 Table 2-104 */
static const struct dp_aud_n_m dp_aud_n_m[] = {
{ 32000, LC_162M, 1024, 10125 },
@@ -148,12 +156,7 @@ static const struct {
#define TMDS_594M 594000
#define TMDS_593M 593407
-static const struct {
- int sample_rate;
- int clock;
- int n;
- int cts;
-} hdmi_aud_ncts[] = {
+static const struct hdmi_aud_ncts hdmi_aud_ncts_24bpp[] = {
{ 32000, TMDS_296M, 5824, 421875 },
{ 32000, TMDS_297M, 3072, 222750 },
{ 32000, TMDS_593M, 5824, 843750 },
@@ -184,11 +187,54 @@ static const struct {
{ 192000, TMDS_594M, 24576, 594000 },
};
+/* Appendix C - N & CTS values for deep color from HDMI 2.0 spec*/
+/* HDMI N/CTS table for 10 bit deep color(30 bpp)*/
+#define TMDS_371M 371250
+#define TMDS_370M 370878
+
+static const struct hdmi_aud_ncts hdmi_aud_ncts_30bpp[] = {
+ { 32000, TMDS_370M, 5824, 527344 },
+ { 32000, TMDS_371M, 6144, 556875 },
+ { 44100, TMDS_370M, 8918, 585938 },
+ { 44100, TMDS_371M, 4704, 309375 },
+ { 88200, TMDS_370M, 17836, 585938 },
+ { 88200, TMDS_371M, 9408, 309375 },
+ { 176400, TMDS_370M, 35672, 585938 },
+ { 176400, TMDS_371M, 18816, 309375 },
+ { 48000, TMDS_370M, 11648, 703125 },
+ { 48000, TMDS_371M, 5120, 309375 },
+ { 96000, TMDS_370M, 23296, 703125 },
+ { 96000, TMDS_371M, 10240, 309375 },
+ { 192000, TMDS_370M, 46592, 703125 },
+ { 192000, TMDS_371M, 20480, 309375 },
+};
+
+/* HDMI N/CTS table for 12 bit deep color(36 bpp)*/
+#define TMDS_445_5M 445500
+#define TMDS_445M 445054
+
+static const struct hdmi_aud_ncts hdmi_aud_ncts_36bpp[] = {
+ { 32000, TMDS_445M, 5824, 632813 },
+ { 32000, TMDS_445_5M, 4096, 445500 },
+ { 44100, TMDS_445M, 8918, 703125 },
+ { 44100, TMDS_445_5M, 4704, 371250 },
+ { 88200, TMDS_445M, 17836, 703125 },
+ { 88200, TMDS_445_5M, 9408, 371250 },
+ { 176400, TMDS_445M, 35672, 703125 },
+ { 176400, TMDS_445_5M, 18816, 371250 },
+ { 48000, TMDS_445M, 5824, 421875 },
+ { 48000, TMDS_445_5M, 5120, 371250 },
+ { 96000, TMDS_445M, 11648, 421875 },
+ { 96000, TMDS_445_5M, 10240, 371250 },
+ { 192000, TMDS_445M, 23296, 421875 },
+ { 192000, TMDS_445_5M, 20480, 371250 },
+};
+
/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
static u32 audio_config_hdmi_pixel_clock(const struct intel_crtc_state *crtc_state)
{
const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
int i;
for (i = 0; i < ARRAY_SIZE(hdmi_audio_clock); i++) {
@@ -212,14 +258,24 @@ static u32 audio_config_hdmi_pixel_clock(const struct intel_crtc_state *crtc_sta
static int audio_config_hdmi_get_n(const struct intel_crtc_state *crtc_state,
int rate)
{
- const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
- int i;
+ const struct hdmi_aud_ncts *hdmi_ncts_table;
+ int i, size;
+
+ if (crtc_state->pipe_bpp == 36) {
+ hdmi_ncts_table = hdmi_aud_ncts_36bpp;
+ size = ARRAY_SIZE(hdmi_aud_ncts_36bpp);
+ } else if (crtc_state->pipe_bpp == 30) {
+ hdmi_ncts_table = hdmi_aud_ncts_30bpp;
+ size = ARRAY_SIZE(hdmi_aud_ncts_30bpp);
+ } else {
+ hdmi_ncts_table = hdmi_aud_ncts_24bpp;
+ size = ARRAY_SIZE(hdmi_aud_ncts_24bpp);
+ }
- for (i = 0; i < ARRAY_SIZE(hdmi_aud_ncts); i++) {
- if (rate == hdmi_aud_ncts[i].sample_rate &&
- adjusted_mode->crtc_clock == hdmi_aud_ncts[i].clock) {
- return hdmi_aud_ncts[i].n;
+ for (i = 0; i < size; i++) {
+ if (rate == hdmi_ncts_table[i].sample_rate &&
+ crtc_state->port_clock == hdmi_ncts_table[i].clock) {
+ return hdmi_ncts_table[i].n;
}
}
return 0;
@@ -499,14 +555,15 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder,
const struct drm_connector_state *old_conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
enum pipe pipe = crtc->pipe;
enum port port = encoder->port;
u32 tmp, eldv;
i915_reg_t aud_config, aud_cntrl_st2;
- DRM_DEBUG_KMS("Disable audio codec on port %c, pipe %c\n",
- port_name(port), pipe_name(pipe));
+ DRM_DEBUG_KMS("Disable audio codec on [ENCODER:%d:%s], pipe %c\n",
+ encoder->base.base.id, encoder->base.name,
+ pipe_name(pipe));
if (WARN_ON(port == PORT_A))
return;
@@ -545,7 +602,7 @@ static void ilk_audio_codec_enable(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_connector *connector = conn_state->connector;
enum pipe pipe = crtc->pipe;
enum port port = encoder->port;
@@ -554,8 +611,9 @@ static void ilk_audio_codec_enable(struct intel_encoder *encoder,
int len, i;
i915_reg_t hdmiw_hdmiedid, aud_config, aud_cntl_st, aud_cntrl_st2;
- DRM_DEBUG_KMS("Enable audio codec on port %c, pipe %c, %u bytes ELD\n",
- port_name(port), pipe_name(pipe), drm_eld_size(eld));
+ DRM_DEBUG_KMS("Enable audio codec on [ENCODER:%d:%s], pipe %c, %u bytes ELD\n",
+ encoder->base.base.id, encoder->base.name,
+ pipe_name(pipe), drm_eld_size(eld));
if (WARN_ON(port == PORT_A))
return;
@@ -634,10 +692,10 @@ void intel_audio_codec_enable(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct i915_audio_component *acomp = dev_priv->audio_component;
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_connector *connector = conn_state->connector;
const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
enum port port = encoder->port;
enum pipe pipe = crtc->pipe;
@@ -649,8 +707,8 @@ void intel_audio_codec_enable(struct intel_encoder *encoder,
DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
connector->base.id,
connector->name,
- connector->encoder->base.id,
- connector->encoder->name);
+ encoder->base.base.id,
+ encoder->base.name);
connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
@@ -695,7 +753,7 @@ void intel_audio_codec_disable(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct i915_audio_component *acomp = dev_priv->audio_component;
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
enum port port = encoder->port;
enum pipe pipe = crtc->pipe;
@@ -761,13 +819,8 @@ retry:
to_intel_atomic_state(state)->cdclk.force_min_cdclk =
enable ? 2 * 96000 : 0;
- /*
- * Protects dev_priv->cdclk.force_min_cdclk
- * Need to lock this here in case we have no active pipes
- * and thus wouldn't lock it during the commit otherwise.
- */
- ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
- &ctx);
+ /* Protects dev_priv->cdclk.force_min_cdclk */
+ ret = intel_atomic_lock_global_state(to_intel_atomic_state(state));
if (!ret)
ret = drm_atomic_commit(state);
@@ -795,11 +848,23 @@ static unsigned long i915_audio_component_get_power(struct device *kdev)
ret = intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
- /* Force CDCLK to 2*BCLK as long as we need audio to be powered. */
- if (dev_priv->audio_power_refcount++ == 0)
- if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
+ if (dev_priv->audio_power_refcount++ == 0) {
+ if (IS_TIGERLAKE(dev_priv) || IS_ICELAKE(dev_priv)) {
+ I915_WRITE(AUD_FREQ_CNTRL, dev_priv->audio_freq_cntrl);
+ DRM_DEBUG_KMS("restored AUD_FREQ_CNTRL to 0x%x\n",
+ dev_priv->audio_freq_cntrl);
+ }
+
+ /* Force CDCLK to 2*BCLK as long as we need audio powered. */
+ if (IS_GEMINILAKE(dev_priv))
glk_force_audio_cdclk(dev_priv, true);
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ I915_WRITE(AUD_PIN_BUF_CTL,
+ (I915_READ(AUD_PIN_BUF_CTL) |
+ AUD_PIN_BUF_ENABLE));
+ }
+
return ret;
}
@@ -810,7 +875,7 @@ static void i915_audio_component_put_power(struct device *kdev,
/* Stop forcing CDCLK to 2*BCLK if no need for audio to be powered. */
if (--dev_priv->audio_power_refcount == 0)
- if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
+ if (IS_GEMINILAKE(dev_priv))
glk_force_audio_cdclk(dev_priv, false);
intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO, cookie);
@@ -1059,6 +1124,12 @@ static void i915_audio_component_init(struct drm_i915_private *dev_priv)
return;
}
+ if (IS_TIGERLAKE(dev_priv) || IS_ICELAKE(dev_priv)) {
+ dev_priv->audio_freq_cntrl = I915_READ(AUD_FREQ_CNTRL);
+ DRM_DEBUG_KMS("init value of AUD_FREQ_CNTRL of 0x%x\n",
+ dev_priv->audio_freq_cntrl);
+ }
+
dev_priv->audio_component_registered = true;
}
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index 3ef4e9f573cf..ef4017a1baba 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -28,6 +28,8 @@
#include <drm/drm_dp_helper.h>
#include <drm/i915_drm.h>
+#include "display/intel_display.h"
+#include "display/intel_display_types.h"
#include "display/intel_gmbus.h"
#include "i915_drv.h"
@@ -57,6 +59,13 @@
* that.
*/
+/* Wrapper for VBT child device config */
+struct display_device_data {
+ struct child_device_config child;
+ struct dsc_compression_parameters_entry *dsc;
+ struct list_head node;
+};
+
#define SLAVE_ADDR1 0x70
#define SLAVE_ADDR2 0x72
@@ -201,17 +210,12 @@ get_lvds_fp_timing(const struct bdb_header *bdb,
return (const struct lvds_fp_timing *)((const u8 *)bdb + ofs);
}
-/* Try to find integrated panel data */
+/* Parse general panel options */
static void
-parse_lfp_panel_data(struct drm_i915_private *dev_priv,
- const struct bdb_header *bdb)
+parse_panel_options(struct drm_i915_private *dev_priv,
+ const struct bdb_header *bdb)
{
const struct bdb_lvds_options *lvds_options;
- const struct bdb_lvds_lfp_data *lvds_lfp_data;
- const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs;
- const struct lvds_dvo_timing *panel_dvo_timing;
- const struct lvds_fp_timing *fp_timing;
- struct drm_display_mode *panel_fixed_mode;
int panel_type;
int drrs_mode;
int ret;
@@ -260,6 +264,19 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
DRM_DEBUG_KMS("DRRS not supported (VBT input)\n");
break;
}
+}
+
+/* Try to find integrated panel timing data */
+static void
+parse_lfp_panel_dtd(struct drm_i915_private *dev_priv,
+ const struct bdb_header *bdb)
+{
+ const struct bdb_lvds_lfp_data *lvds_lfp_data;
+ const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs;
+ const struct lvds_dvo_timing *panel_dvo_timing;
+ const struct lvds_fp_timing *fp_timing;
+ struct drm_display_mode *panel_fixed_mode;
+ int panel_type = dev_priv->vbt.panel_type;
lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA);
if (!lvds_lfp_data)
@@ -281,7 +298,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
dev_priv->vbt.lfp_lvds_vbt_mode = panel_fixed_mode;
- DRM_DEBUG_KMS("Found panel mode in BIOS VBT tables:\n");
+ DRM_DEBUG_KMS("Found panel mode in BIOS VBT legacy lfp table:\n");
drm_mode_debug_printmodeline(panel_fixed_mode);
fp_timing = get_lvds_fp_timing(bdb, lvds_lfp_data,
@@ -299,6 +316,100 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
}
static void
+parse_generic_dtd(struct drm_i915_private *dev_priv,
+ const struct bdb_header *bdb)
+{
+ const struct bdb_generic_dtd *generic_dtd;
+ const struct generic_dtd_entry *dtd;
+ struct drm_display_mode *panel_fixed_mode;
+ int num_dtd;
+
+ generic_dtd = find_section(bdb, BDB_GENERIC_DTD);
+ if (!generic_dtd)
+ return;
+
+ if (generic_dtd->gdtd_size < sizeof(struct generic_dtd_entry)) {
+ DRM_ERROR("GDTD size %u is too small.\n",
+ generic_dtd->gdtd_size);
+ return;
+ } else if (generic_dtd->gdtd_size !=
+ sizeof(struct generic_dtd_entry)) {
+ DRM_ERROR("Unexpected GDTD size %u\n", generic_dtd->gdtd_size);
+ /* DTD has unknown fields, but keep going */
+ }
+
+ num_dtd = (get_blocksize(generic_dtd) -
+ sizeof(struct bdb_generic_dtd)) / generic_dtd->gdtd_size;
+ if (dev_priv->vbt.panel_type >= num_dtd) {
+ DRM_ERROR("Panel type %d not found in table of %d DTD's\n",
+ dev_priv->vbt.panel_type, num_dtd);
+ return;
+ }
+
+ dtd = &generic_dtd->dtd[dev_priv->vbt.panel_type];
+
+ panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
+ if (!panel_fixed_mode)
+ return;
+
+ panel_fixed_mode->hdisplay = dtd->hactive;
+ panel_fixed_mode->hsync_start =
+ panel_fixed_mode->hdisplay + dtd->hfront_porch;
+ panel_fixed_mode->hsync_end =
+ panel_fixed_mode->hsync_start + dtd->hsync;
+ panel_fixed_mode->htotal =
+ panel_fixed_mode->hdisplay + dtd->hblank;
+
+ panel_fixed_mode->vdisplay = dtd->vactive;
+ panel_fixed_mode->vsync_start =
+ panel_fixed_mode->vdisplay + dtd->vfront_porch;
+ panel_fixed_mode->vsync_end =
+ panel_fixed_mode->vsync_start + dtd->vsync;
+ panel_fixed_mode->vtotal =
+ panel_fixed_mode->vdisplay + dtd->vblank;
+
+ panel_fixed_mode->clock = dtd->pixel_clock;
+ panel_fixed_mode->width_mm = dtd->width_mm;
+ panel_fixed_mode->height_mm = dtd->height_mm;
+
+ panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED;
+ drm_mode_set_name(panel_fixed_mode);
+
+ if (dtd->hsync_positive_polarity)
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_PHSYNC;
+ else
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_NHSYNC;
+
+ if (dtd->vsync_positive_polarity)
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_PVSYNC;
+ else
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC;
+
+ DRM_DEBUG_KMS("Found panel mode in BIOS VBT generic dtd table:\n");
+ drm_mode_debug_printmodeline(panel_fixed_mode);
+
+ dev_priv->vbt.lfp_lvds_vbt_mode = panel_fixed_mode;
+}
+
+static void
+parse_panel_dtd(struct drm_i915_private *dev_priv,
+ const struct bdb_header *bdb)
+{
+ /*
+ * Older VBTs provided provided DTD information for internal displays
+ * through the "LFP panel DTD" block (42). As of VBT revision 229,
+ * that block is now deprecated and DTD information should be provided
+ * via a newer "generic DTD" block (58). Just to be safe, we'll
+ * try the new generic DTD block first on VBT >= 229, but still fall
+ * back to trying the old LFP block if that fails.
+ */
+ if (bdb->version >= 229)
+ parse_generic_dtd(dev_priv, bdb);
+ if (!dev_priv->vbt.lfp_lvds_vbt_mode)
+ parse_lfp_panel_dtd(dev_priv, bdb);
+}
+
+static void
parse_lfp_backlight(struct drm_i915_private *dev_priv,
const struct bdb_header *bdb)
{
@@ -448,8 +559,9 @@ static void
parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, u8 bdb_version)
{
struct sdvo_device_mapping *mapping;
+ const struct display_device_data *devdata;
const struct child_device_config *child;
- int i, count = 0;
+ int count = 0;
/*
* Only parse SDVO mappings on gens that could have SDVO. This isn't
@@ -460,8 +572,8 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, u8 bdb_version)
return;
}
- for (i = 0, count = 0; i < dev_priv->vbt.child_dev_num; i++) {
- child = dev_priv->vbt.child_dev + i;
+ list_for_each_entry(devdata, &dev_priv->vbt.display_devices, node) {
+ child = &devdata->child;
if (child->slave_addr != SLAVE_ADDR1 &&
child->slave_addr != SLAVE_ADDR2) {
@@ -551,16 +663,45 @@ parse_driver_features(struct drm_i915_private *dev_priv,
dev_priv->vbt.int_lvds_support = 0;
}
- DRM_DEBUG_KMS("DRRS State Enabled:%d\n", driver->drrs_enabled);
+ if (bdb->version < 228) {
+ DRM_DEBUG_KMS("DRRS State Enabled:%d\n", driver->drrs_enabled);
+ /*
+ * If DRRS is not supported, drrs_type has to be set to 0.
+ * This is because, VBT is configured in such a way that
+ * static DRRS is 0 and DRRS not supported is represented by
+ * driver->drrs_enabled=false
+ */
+ if (!driver->drrs_enabled)
+ dev_priv->vbt.drrs_type = DRRS_NOT_SUPPORTED;
+
+ dev_priv->vbt.psr.enable = driver->psr_enabled;
+ }
+}
+
+static void
+parse_power_conservation_features(struct drm_i915_private *dev_priv,
+ const struct bdb_header *bdb)
+{
+ const struct bdb_lfp_power *power;
+ u8 panel_type = dev_priv->vbt.panel_type;
+
+ if (bdb->version < 228)
+ return;
+
+ power = find_section(bdb, BDB_LVDS_POWER);
+ if (!power)
+ return;
+
+ dev_priv->vbt.psr.enable = power->psr & BIT(panel_type);
+
/*
* If DRRS is not supported, drrs_type has to be set to 0.
* This is because, VBT is configured in such a way that
* static DRRS is 0 and DRRS not supported is represented by
- * driver->drrs_enabled=false
+ * power->drrs & BIT(panel_type)=false
*/
- if (!driver->drrs_enabled)
+ if (!(power->drrs & BIT(panel_type)))
dev_priv->vbt.drrs_type = DRRS_NOT_SUPPORTED;
- dev_priv->vbt.psr.enable = driver->psr_enabled;
}
static void
@@ -1229,6 +1370,57 @@ err:
memset(dev_priv->vbt.dsi.sequence, 0, sizeof(dev_priv->vbt.dsi.sequence));
}
+static void
+parse_compression_parameters(struct drm_i915_private *i915,
+ const struct bdb_header *bdb)
+{
+ const struct bdb_compression_parameters *params;
+ struct display_device_data *devdata;
+ const struct child_device_config *child;
+ u16 block_size;
+ int index;
+
+ if (bdb->version < 198)
+ return;
+
+ params = find_section(bdb, BDB_COMPRESSION_PARAMETERS);
+ if (params) {
+ /* Sanity checks */
+ if (params->entry_size != sizeof(params->data[0])) {
+ DRM_DEBUG_KMS("VBT: unsupported compression param entry size\n");
+ return;
+ }
+
+ block_size = get_blocksize(params);
+ if (block_size < sizeof(*params)) {
+ DRM_DEBUG_KMS("VBT: expected 16 compression param entries\n");
+ return;
+ }
+ }
+
+ list_for_each_entry(devdata, &i915->vbt.display_devices, node) {
+ child = &devdata->child;
+
+ if (!child->compression_enable)
+ continue;
+
+ if (!params) {
+ DRM_DEBUG_KMS("VBT: compression params not available\n");
+ continue;
+ }
+
+ if (child->compression_method_cps) {
+ DRM_DEBUG_KMS("VBT: CPS compression not supported\n");
+ continue;
+ }
+
+ index = child->compression_structure_index;
+
+ devdata->dsc = kmemdup(&params->data[index],
+ sizeof(*devdata->dsc), GFP_KERNEL);
+ }
+}
+
static u8 translate_iboost(u8 val)
{
static const u8 mapping[] = { 1, 3, 7 }; /* See VBT spec */
@@ -1245,7 +1437,7 @@ static enum port get_port_by_ddc_pin(struct drm_i915_private *i915, u8 ddc_pin)
const struct ddi_vbt_port_info *info;
enum port port;
- for (port = PORT_A; port < I915_MAX_PORTS; port++) {
+ for_each_port(port) {
info = &i915->vbt.ddi_port_info[port];
if (info->child && ddc_pin == info->alternate_ddc_pin)
@@ -1269,7 +1461,7 @@ static void sanitize_ddc_pin(struct drm_i915_private *dev_priv,
DRM_DEBUG_KMS("port %c trying to use the same DDC pin (0x%x) as port %c, "
"disabling port %c DVI/HDMI support\n",
port_name(port), info->alternate_ddc_pin,
- port_name(p), port_name(port));
+ port_name(p), port_name(p));
/*
* If we have multiple ports supposedly sharing the
@@ -1277,9 +1469,14 @@ static void sanitize_ddc_pin(struct drm_i915_private *dev_priv,
* port. Otherwise they share the same ddc bin and
* system couldn't communicate with them separately.
*
- * Give child device order the priority, first come first
- * served.
+ * Give inverse child device order the priority,
+ * last one wins. Yes, there are real machines
+ * (eg. Asrock B250M-HDV) where VBT has both
+ * port A and port E with the same AUX ch and
+ * we must pick port E :(
*/
+ info = &dev_priv->vbt.ddi_port_info[p];
+
info->supports_dvi = false;
info->supports_hdmi = false;
info->alternate_ddc_pin = 0;
@@ -1291,7 +1488,7 @@ static enum port get_port_by_aux_ch(struct drm_i915_private *i915, u8 aux_ch)
const struct ddi_vbt_port_info *info;
enum port port;
- for (port = PORT_A; port < I915_MAX_PORTS; port++) {
+ for_each_port(port) {
info = &i915->vbt.ddi_port_info[port];
if (info->child && aux_ch == info->alternate_aux_channel)
@@ -1315,7 +1512,7 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
DRM_DEBUG_KMS("port %c trying to use the same AUX CH (0x%x) as port %c, "
"disabling port %c DP support\n",
port_name(port), info->alternate_aux_channel,
- port_name(p), port_name(port));
+ port_name(p), port_name(p));
/*
* If we have multiple ports supposedlt sharing the
@@ -1323,9 +1520,14 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
* port. Otherwise they share the same aux channel
* and system couldn't communicate with them separately.
*
- * Give child device order the priority, first come first
- * served.
+ * Give inverse child device order the priority,
+ * last one wins. Yes, there are real machines
+ * (eg. Asrock B250M-HDV) where VBT has both
+ * port A and port E with the same AUX ch and
+ * we must pick port E :(
*/
+ info = &dev_priv->vbt.ddi_port_info[p];
+
info->supports_dp = false;
info->alternate_aux_channel = 0;
}
@@ -1342,16 +1544,13 @@ static const u8 cnp_ddc_pin_map[] = {
static const u8 icp_ddc_pin_map[] = {
[ICL_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT,
[ICL_DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT,
+ [TGL_DDC_BUS_DDI_C] = GMBUS_PIN_3_BXT,
[ICL_DDC_BUS_PORT_1] = GMBUS_PIN_9_TC1_ICP,
[ICL_DDC_BUS_PORT_2] = GMBUS_PIN_10_TC2_ICP,
[ICL_DDC_BUS_PORT_3] = GMBUS_PIN_11_TC3_ICP,
[ICL_DDC_BUS_PORT_4] = GMBUS_PIN_12_TC4_ICP,
-};
-
-static const u8 mcc_ddc_pin_map[] = {
- [MCC_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT,
- [MCC_DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT,
- [MCC_DDC_BUS_DDI_C] = GMBUS_PIN_9_TC1_ICP,
+ [TGL_DDC_BUS_PORT_5] = GMBUS_PIN_13_TC5_TGP,
+ [TGL_DDC_BUS_PORT_6] = GMBUS_PIN_14_TC6_TGP,
};
static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin)
@@ -1359,10 +1558,7 @@ static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin)
const u8 *ddc_pin_map;
int n_entries;
- if (HAS_PCH_MCC(dev_priv)) {
- ddc_pin_map = mcc_ddc_pin_map;
- n_entries = ARRAY_SIZE(mcc_ddc_pin_map);
- } else if (HAS_PCH_ICP(dev_priv)) {
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) {
ddc_pin_map = icp_ddc_pin_map;
n_entries = ARRAY_SIZE(icp_ddc_pin_map);
} else if (HAS_PCH_CNP(dev_priv)) {
@@ -1394,6 +1590,7 @@ static enum port dvo_port_to_port(u8 dvo_port)
[PORT_D] = { DVO_PORT_HDMID, DVO_PORT_DPD, -1},
[PORT_E] = { DVO_PORT_CRT, DVO_PORT_HDMIE, DVO_PORT_DPE},
[PORT_F] = { DVO_PORT_HDMIF, DVO_PORT_DPF, -1},
+ [PORT_G] = { DVO_PORT_HDMIG, DVO_PORT_DPG, -1},
};
enum port port;
int i;
@@ -1412,9 +1609,10 @@ static enum port dvo_port_to_port(u8 dvo_port)
}
static void parse_ddi_port(struct drm_i915_private *dev_priv,
- const struct child_device_config *child,
+ struct display_device_data *devdata,
u8 bdb_version)
{
+ const struct child_device_config *child = &devdata->child;
struct ddi_vbt_port_info *info;
bool is_dvi, is_hdmi, is_dp, is_edp, is_crt;
enum port port;
@@ -1437,7 +1635,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
is_hdmi = is_dvi && (child->device_type & DEVICE_TYPE_NOT_HDMI_OUTPUT) == 0;
is_edp = is_dp && (child->device_type & DEVICE_TYPE_INTERNAL_CONNECTOR);
- if (port == PORT_A && is_dvi) {
+ if (port == PORT_A && is_dvi && INTEL_GEN(dev_priv) < 12) {
DRM_DEBUG_KMS("VBT claims port A supports DVI%s, ignoring\n",
is_hdmi ? "/HDMI" : "");
is_dvi = false;
@@ -1455,26 +1653,11 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
if (bdb_version >= 209)
info->supports_tbt = child->tbt;
- DRM_DEBUG_KMS("Port %c VBT info: CRT:%d DVI:%d HDMI:%d DP:%d eDP:%d LSPCON:%d USB-Type-C:%d TBT:%d\n",
+ DRM_DEBUG_KMS("Port %c VBT info: CRT:%d DVI:%d HDMI:%d DP:%d eDP:%d LSPCON:%d USB-Type-C:%d TBT:%d DSC:%d\n",
port_name(port), is_crt, is_dvi, is_hdmi, is_dp, is_edp,
HAS_LSPCON(dev_priv) && child->lspcon,
- info->supports_typec_usb, info->supports_tbt);
-
- if (is_edp && is_dvi)
- DRM_DEBUG_KMS("Internal DP port %c is TMDS compatible\n",
- port_name(port));
- if (is_crt && port != PORT_E)
- DRM_DEBUG_KMS("Port %c is analog\n", port_name(port));
- if (is_crt && (is_dvi || is_dp))
- DRM_DEBUG_KMS("Analog port %c is also DP or TMDS compatible\n",
- port_name(port));
- if (is_dvi && (port == PORT_A || port == PORT_E))
- DRM_DEBUG_KMS("Port %c is TMDS compatible\n", port_name(port));
- if (!is_dvi && !is_dp && !is_crt)
- DRM_DEBUG_KMS("Port %c is not DP/TMDS/CRT compatible\n",
- port_name(port));
- if (is_edp && (port == PORT_B || port == PORT_C || port == PORT_E))
- DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port));
+ info->supports_typec_usb, info->supports_tbt,
+ devdata->dsc != NULL);
if (is_dvi) {
u8 ddc_pin;
@@ -1503,6 +1686,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
port_name(port),
hdmi_level_shift);
info->hdmi_level_shift = hdmi_level_shift;
+ info->hdmi_level_shift_set = true;
}
if (bdb_version >= 204) {
@@ -1565,8 +1749,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
static void parse_ddi_ports(struct drm_i915_private *dev_priv, u8 bdb_version)
{
- const struct child_device_config *child;
- int i;
+ struct display_device_data *devdata;
if (!HAS_DDI(dev_priv) && !IS_CHERRYVIEW(dev_priv))
return;
@@ -1574,11 +1757,8 @@ static void parse_ddi_ports(struct drm_i915_private *dev_priv, u8 bdb_version)
if (bdb_version < 155)
return;
- for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
- child = dev_priv->vbt.child_dev + i;
-
- parse_ddi_port(dev_priv, child, bdb_version);
- }
+ list_for_each_entry(devdata, &dev_priv->vbt.display_devices, node)
+ parse_ddi_port(dev_priv, devdata, bdb_version);
}
static void
@@ -1586,8 +1766,9 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
const struct bdb_header *bdb)
{
const struct bdb_general_definitions *defs;
+ struct display_device_data *devdata;
const struct child_device_config *child;
- int i, child_device_num, count;
+ int i, child_device_num;
u8 expected_size;
u16 block_size;
int bus_pin;
@@ -1620,7 +1801,7 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
expected_size = 37;
} else if (bdb->version <= 215) {
expected_size = 38;
- } else if (bdb->version <= 216) {
+ } else if (bdb->version <= 229) {
expected_size = 39;
} else {
expected_size = sizeof(*child);
@@ -1643,48 +1824,38 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
/* get the number of child device */
child_device_num = (block_size - sizeof(*defs)) / defs->child_dev_size;
- count = 0;
- /* get the number of child device that is present */
- for (i = 0; i < child_device_num; i++) {
- child = child_device_ptr(defs, i);
- if (!child->device_type)
- continue;
- count++;
- }
- if (!count) {
- DRM_DEBUG_KMS("no child dev is parsed from VBT\n");
- return;
- }
- dev_priv->vbt.child_dev = kcalloc(count, sizeof(*child), GFP_KERNEL);
- if (!dev_priv->vbt.child_dev) {
- DRM_DEBUG_KMS("No memory space for child device\n");
- return;
- }
- dev_priv->vbt.child_dev_num = count;
- count = 0;
for (i = 0; i < child_device_num; i++) {
child = child_device_ptr(defs, i);
if (!child->device_type)
continue;
+ DRM_DEBUG_KMS("Found VBT child device with type 0x%x\n",
+ child->device_type);
+
+ devdata = kzalloc(sizeof(*devdata), GFP_KERNEL);
+ if (!devdata)
+ break;
+
/*
* Copy as much as we know (sizeof) and is available
- * (child_dev_size) of the child device. Accessing the data must
- * depend on VBT version.
+ * (child_dev_size) of the child device config. Accessing the
+ * data must depend on VBT version.
*/
- memcpy(dev_priv->vbt.child_dev + count, child,
+ memcpy(&devdata->child, child,
min_t(size_t, defs->child_dev_size, sizeof(*child)));
- count++;
+
+ list_add_tail(&devdata->node, &dev_priv->vbt.display_devices);
}
+
+ if (list_empty(&dev_priv->vbt.display_devices))
+ DRM_DEBUG_KMS("no child dev is parsed from VBT\n");
}
/* Common defaults which may be overridden by VBT. */
static void
init_vbt_defaults(struct drm_i915_private *dev_priv)
{
- enum port port;
-
dev_priv->vbt.crt_ddc_pin = GMBUS_PIN_VGADDC;
/* Default to having backlight */
@@ -1712,13 +1883,6 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev_priv,
!HAS_PCH_SPLIT(dev_priv));
DRM_DEBUG_KMS("Set default to SSC at %d kHz\n", dev_priv->vbt.lvds_ssc_freq);
-
- for (port = PORT_A; port < I915_MAX_PORTS; port++) {
- struct ddi_vbt_port_info *info =
- &dev_priv->vbt.ddi_port_info[port];
-
- info->hdmi_level_shift = HDMI_LEVEL_SHIFT_UNKNOWN;
- }
}
/* Defaults to initialize only if there is no VBT. */
@@ -1727,15 +1891,16 @@ init_vbt_missing_defaults(struct drm_i915_private *dev_priv)
{
enum port port;
- for (port = PORT_A; port < I915_MAX_PORTS; port++) {
+ for_each_port(port) {
struct ddi_vbt_port_info *info =
&dev_priv->vbt.ddi_port_info[port];
+ enum phy phy = intel_port_to_phy(dev_priv, port);
/*
* VBT has the TypeC mode (native,TBT/USB) and we don't want
* to detect it.
*/
- if (intel_port_is_tc(dev_priv, port))
+ if (intel_phy_is_tc(dev_priv, phy))
continue;
info->supports_dvi = (port != PORT_A && port != PORT_E);
@@ -1777,6 +1942,13 @@ bool intel_bios_is_valid_vbt(const void *buf, size_t size)
return false;
}
+ if (vbt->vbt_size > size) {
+ DRM_DEBUG_DRIVER("VBT incomplete (vbt_size overflows)\n");
+ return false;
+ }
+
+ size = vbt->vbt_size;
+
if (range_overflows_t(size_t,
vbt->bdb_offset,
sizeof(struct bdb_header),
@@ -1794,28 +1966,61 @@ bool intel_bios_is_valid_vbt(const void *buf, size_t size)
return vbt;
}
-static const struct vbt_header *find_vbt(void __iomem *bios, size_t size)
+static struct vbt_header *oprom_get_vbt(struct drm_i915_private *dev_priv)
{
- size_t i;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
+ void __iomem *p = NULL, *oprom;
+ struct vbt_header *vbt;
+ u16 vbt_size;
+ size_t i, size;
- /* Scour memory looking for the VBT signature. */
- for (i = 0; i + 4 < size; i++) {
- void *vbt;
+ oprom = pci_map_rom(pdev, &size);
+ if (!oprom)
+ return NULL;
- if (ioread32(bios + i) != *((const u32 *) "$VBT"))
+ /* Scour memory looking for the VBT signature. */
+ for (i = 0; i + 4 < size; i += 4) {
+ if (ioread32(oprom + i) != *((const u32 *)"$VBT"))
continue;
- /*
- * This is the one place where we explicitly discard the address
- * space (__iomem) of the BIOS/VBT.
- */
- vbt = (void __force *) bios + i;
- if (intel_bios_is_valid_vbt(vbt, size - i))
- return vbt;
-
+ p = oprom + i;
+ size -= i;
break;
}
+ if (!p)
+ goto err_unmap_oprom;
+
+ if (sizeof(struct vbt_header) > size) {
+ DRM_DEBUG_DRIVER("VBT header incomplete\n");
+ goto err_unmap_oprom;
+ }
+
+ vbt_size = ioread16(p + offsetof(struct vbt_header, vbt_size));
+ if (vbt_size > size) {
+ DRM_DEBUG_DRIVER("VBT incomplete (vbt_size overflows)\n");
+ goto err_unmap_oprom;
+ }
+
+ /* The rest will be validated by intel_bios_is_valid_vbt() */
+ vbt = kmalloc(vbt_size, GFP_KERNEL);
+ if (!vbt)
+ goto err_unmap_oprom;
+
+ memcpy_fromio(vbt, p, vbt_size);
+
+ if (!intel_bios_is_valid_vbt(vbt, vbt_size))
+ goto err_free_vbt;
+
+ pci_unmap_rom(pdev, oprom);
+
+ return vbt;
+
+err_free_vbt:
+ kfree(vbt);
+err_unmap_oprom:
+ pci_unmap_rom(pdev, oprom);
+
return NULL;
}
@@ -1829,12 +2034,13 @@ static const struct vbt_header *find_vbt(void __iomem *bios, size_t size)
*/
void intel_bios_init(struct drm_i915_private *dev_priv)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
const struct vbt_header *vbt = dev_priv->opregion.vbt;
+ struct vbt_header *oprom_vbt = NULL;
const struct bdb_header *bdb;
- u8 __iomem *bios = NULL;
- if (!HAS_DISPLAY(dev_priv)) {
+ INIT_LIST_HEAD(&dev_priv->vbt.display_devices);
+
+ if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv)) {
DRM_DEBUG_KMS("Skipping VBT init due to disabled display.\n");
return;
}
@@ -1843,15 +2049,11 @@ void intel_bios_init(struct drm_i915_private *dev_priv)
/* If the OpRegion does not have VBT, look in PCI ROM. */
if (!vbt) {
- size_t size;
-
- bios = pci_map_rom(pdev, &size);
- if (!bios)
+ oprom_vbt = oprom_get_vbt(dev_priv);
+ if (!oprom_vbt)
goto out;
- vbt = find_vbt(bios, size);
- if (!vbt)
- goto out;
+ vbt = oprom_vbt;
DRM_DEBUG_KMS("Found valid VBT in PCI ROM\n");
}
@@ -1864,15 +2066,20 @@ void intel_bios_init(struct drm_i915_private *dev_priv)
/* Grab useful general definitions */
parse_general_features(dev_priv, bdb);
parse_general_definitions(dev_priv, bdb);
- parse_lfp_panel_data(dev_priv, bdb);
+ parse_panel_options(dev_priv, bdb);
+ parse_panel_dtd(dev_priv, bdb);
parse_lfp_backlight(dev_priv, bdb);
parse_sdvo_panel_data(dev_priv, bdb);
parse_driver_features(dev_priv, bdb);
+ parse_power_conservation_features(dev_priv, bdb);
parse_edp(dev_priv, bdb);
parse_psr(dev_priv, bdb);
parse_mipi_config(dev_priv, bdb);
parse_mipi_sequence(dev_priv, bdb);
+ /* Depends on child device list */
+ parse_compression_parameters(dev_priv, bdb);
+
/* Further processing on pre-parsed data */
parse_sdvo_device_mapping(dev_priv, bdb->version);
parse_ddi_ports(dev_priv, bdb->version);
@@ -1883,19 +2090,23 @@ out:
init_vbt_missing_defaults(dev_priv);
}
- if (bios)
- pci_unmap_rom(pdev, bios);
+ kfree(oprom_vbt);
}
/**
- * intel_bios_cleanup - Free any resources allocated by intel_bios_init()
+ * intel_bios_driver_remove - Free any resources allocated by intel_bios_init()
* @dev_priv: i915 device instance
*/
-void intel_bios_cleanup(struct drm_i915_private *dev_priv)
+void intel_bios_driver_remove(struct drm_i915_private *dev_priv)
{
- kfree(dev_priv->vbt.child_dev);
- dev_priv->vbt.child_dev = NULL;
- dev_priv->vbt.child_dev_num = 0;
+ struct display_device_data *devdata, *n;
+
+ list_for_each_entry_safe(devdata, n, &dev_priv->vbt.display_devices, node) {
+ list_del(&devdata->node);
+ kfree(devdata->dsc);
+ kfree(devdata);
+ }
+
kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
@@ -1919,17 +2130,18 @@ void intel_bios_cleanup(struct drm_i915_private *dev_priv)
*/
bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv)
{
+ const struct display_device_data *devdata;
const struct child_device_config *child;
- int i;
if (!dev_priv->vbt.int_tv_support)
return false;
- if (!dev_priv->vbt.child_dev_num)
+ if (list_empty(&dev_priv->vbt.display_devices))
return true;
- for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
- child = dev_priv->vbt.child_dev + i;
+ list_for_each_entry(devdata, &dev_priv->vbt.display_devices, node) {
+ child = &devdata->child;
+
/*
* If the device type is not TV, continue.
*/
@@ -1961,14 +2173,14 @@ bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv)
*/
bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin)
{
+ const struct display_device_data *devdata;
const struct child_device_config *child;
- int i;
- if (!dev_priv->vbt.child_dev_num)
+ if (list_empty(&dev_priv->vbt.display_devices))
return true;
- for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
- child = dev_priv->vbt.child_dev + i;
+ list_for_each_entry(devdata, &dev_priv->vbt.display_devices, node) {
+ child = &devdata->child;
/* If the device type is not LFP, continue.
* We have to check both the new identifiers as well as the
@@ -2010,6 +2222,7 @@ bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin)
*/
bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port)
{
+ const struct display_device_data *devdata;
const struct child_device_config *child;
static const struct {
u16 dp, hdmi;
@@ -2020,7 +2233,6 @@ bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port por
[PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, },
[PORT_F] = { DVO_PORT_DPF, DVO_PORT_HDMIF, },
};
- int i;
if (HAS_DDI(dev_priv)) {
const struct ddi_vbt_port_info *port_info =
@@ -2035,11 +2247,8 @@ bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port por
if (WARN_ON(port == PORT_A) || port >= ARRAY_SIZE(port_mapping))
return false;
- if (!dev_priv->vbt.child_dev_num)
- return false;
-
- for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
- child = dev_priv->vbt.child_dev + i;
+ list_for_each_entry(devdata, &dev_priv->vbt.display_devices, node) {
+ child = &devdata->child;
if ((child->dvo_port == port_mapping[port].dp ||
child->dvo_port == port_mapping[port].hdmi) &&
@@ -2060,6 +2269,7 @@ bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port por
*/
bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
{
+ const struct display_device_data *devdata;
const struct child_device_config *child;
static const short port_mapping[] = {
[PORT_B] = DVO_PORT_DPB,
@@ -2068,16 +2278,12 @@ bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
[PORT_E] = DVO_PORT_DPE,
[PORT_F] = DVO_PORT_DPF,
};
- int i;
if (HAS_DDI(dev_priv))
return dev_priv->vbt.ddi_port_info[port].supports_edp;
- if (!dev_priv->vbt.child_dev_num)
- return false;
-
- for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
- child = dev_priv->vbt.child_dev + i;
+ list_for_each_entry(devdata, &dev_priv->vbt.display_devices, node) {
+ child = &devdata->child;
if (child->dvo_port == port_mapping[port] &&
(child->device_type & DEVICE_TYPE_eDP_BITS) ==
@@ -2126,13 +2332,10 @@ static bool child_dev_is_dp_dual_mode(const struct child_device_config *child,
bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv,
enum port port)
{
- const struct child_device_config *child;
- int i;
-
- for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
- child = dev_priv->vbt.child_dev + i;
+ const struct display_device_data *devdata;
- if (child_dev_is_dp_dual_mode(child, port))
+ list_for_each_entry(devdata, &dev_priv->vbt.display_devices, node) {
+ if (child_dev_is_dp_dual_mode(&devdata->child, port))
return true;
}
@@ -2149,12 +2352,12 @@ bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv,
bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv,
enum port *port)
{
+ const struct display_device_data *devdata;
const struct child_device_config *child;
u8 dvo_port;
- int i;
- for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
- child = dev_priv->vbt.child_dev + i;
+ list_for_each_entry(devdata, &dev_priv->vbt.display_devices, node) {
+ child = &devdata->child;
if (!(child->device_type & DEVICE_TYPE_MIPI_OUTPUT))
continue;
@@ -2178,6 +2381,104 @@ bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv,
return false;
}
+static void fill_dsc(struct intel_crtc_state *crtc_state,
+ struct dsc_compression_parameters_entry *dsc,
+ int dsc_max_bpc)
+{
+ struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
+ int bpc = 8;
+
+ vdsc_cfg->dsc_version_major = dsc->version_major;
+ vdsc_cfg->dsc_version_minor = dsc->version_minor;
+
+ if (dsc->support_12bpc && dsc_max_bpc >= 12)
+ bpc = 12;
+ else if (dsc->support_10bpc && dsc_max_bpc >= 10)
+ bpc = 10;
+ else if (dsc->support_8bpc && dsc_max_bpc >= 8)
+ bpc = 8;
+ else
+ DRM_DEBUG_KMS("VBT: Unsupported BPC %d for DCS\n",
+ dsc_max_bpc);
+
+ crtc_state->pipe_bpp = bpc * 3;
+
+ crtc_state->dsc.compressed_bpp = min(crtc_state->pipe_bpp,
+ VBT_DSC_MAX_BPP(dsc->max_bpp));
+
+ /*
+ * FIXME: This is ugly, and slice count should take DSC engine
+ * throughput etc. into account.
+ *
+ * Also, per spec DSI supports 1, 2, 3 or 4 horizontal slices.
+ */
+ if (dsc->slices_per_line & BIT(2)) {
+ crtc_state->dsc.slice_count = 4;
+ } else if (dsc->slices_per_line & BIT(1)) {
+ crtc_state->dsc.slice_count = 2;
+ } else {
+ /* FIXME */
+ if (!(dsc->slices_per_line & BIT(0)))
+ DRM_DEBUG_KMS("VBT: Unsupported DSC slice count for DSI\n");
+
+ crtc_state->dsc.slice_count = 1;
+ }
+
+ if (crtc_state->hw.adjusted_mode.crtc_hdisplay %
+ crtc_state->dsc.slice_count != 0)
+ DRM_DEBUG_KMS("VBT: DSC hdisplay %d not divisible by slice count %d\n",
+ crtc_state->hw.adjusted_mode.crtc_hdisplay,
+ crtc_state->dsc.slice_count);
+
+ /*
+ * FIXME: Use VBT rc_buffer_block_size and rc_buffer_size for the
+ * implementation specific physical rate buffer size. Currently we use
+ * the required rate buffer model size calculated in
+ * drm_dsc_compute_rc_parameters() according to VESA DSC Annex E.
+ *
+ * The VBT rc_buffer_block_size and rc_buffer_size definitions
+ * correspond to DP 1.4 DPCD offsets 0x62 and 0x63. The DP DSC
+ * implementation should also use the DPCD (or perhaps VBT for eDP)
+ * provided value for the buffer size.
+ */
+
+ /* FIXME: DSI spec says bpc + 1 for this one */
+ vdsc_cfg->line_buf_depth = VBT_DSC_LINE_BUFFER_DEPTH(dsc->line_buffer_depth);
+
+ vdsc_cfg->block_pred_enable = dsc->block_prediction_enable;
+
+ vdsc_cfg->slice_height = dsc->slice_height;
+}
+
+/* FIXME: initially DSI specific */
+bool intel_bios_get_dsc_params(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ int dsc_max_bpc)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ const struct display_device_data *devdata;
+ const struct child_device_config *child;
+
+ list_for_each_entry(devdata, &i915->vbt.display_devices, node) {
+ child = &devdata->child;
+
+ if (!(child->device_type & DEVICE_TYPE_MIPI_OUTPUT))
+ continue;
+
+ if (child->dvo_port - DVO_PORT_MIPIA == encoder->port) {
+ if (!devdata->dsc)
+ return false;
+
+ if (crtc_state)
+ fill_dsc(crtc_state, devdata->dsc, dsc_max_bpc);
+
+ return true;
+ }
+ }
+
+ return false;
+}
+
/**
* intel_bios_is_port_hpd_inverted - is HPD inverted for %port
* @i915: i915 device instance
@@ -2249,6 +2550,9 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv,
case DP_AUX_F:
aux_ch = AUX_CH_F;
break;
+ case DP_AUX_G:
+ aux_ch = AUX_CH_G;
+ break;
default:
MISSING_CASE(info->alternate_aux_channel);
aux_ch = AUX_CH_A;
diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h
index 4e42cfaf61a7..d6a0c29d37ac 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.h
+++ b/drivers/gpu/drm/i915/display/intel_bios.h
@@ -1,5 +1,5 @@
/*
- * Copyright © 2016 Intel Corporation
+ * Copyright © 2016-2019 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -35,6 +35,9 @@
#include <drm/i915_drm.h>
struct drm_i915_private;
+struct intel_crtc_state;
+struct intel_encoder;
+enum port;
enum intel_backlight_type {
INTEL_BACKLIGHT_PMIC,
@@ -42,6 +45,7 @@ enum intel_backlight_type {
INTEL_BACKLIGHT_DISPLAY_DDI,
INTEL_BACKLIGHT_DSI_DCS,
INTEL_BACKLIGHT_PANEL_DRIVER_INTERFACE,
+ INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE,
};
struct edp_power_seq {
@@ -227,7 +231,7 @@ struct mipi_pps_data {
} __packed;
void intel_bios_init(struct drm_i915_private *dev_priv);
-void intel_bios_cleanup(struct drm_i915_private *dev_priv);
+void intel_bios_driver_remove(struct drm_i915_private *dev_priv);
bool intel_bios_is_valid_vbt(const void *buf, size_t size);
bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
@@ -240,5 +244,8 @@ bool intel_bios_is_port_hpd_inverted(const struct drm_i915_private *i915,
bool intel_bios_is_lspcon_present(const struct drm_i915_private *i915,
enum port port);
enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv, enum port port);
+bool intel_bios_get_dsc_params(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ int dsc_max_bpc);
#endif /* _INTEL_BIOS_H_ */
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index 7b908e10d32e..b228671d5a5d 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -6,7 +6,7 @@
#include <drm/drm_atomic_state_helper.h>
#include "intel_bw.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_sideband.h"
/* Parameters for Qclk Geyserville (QGV) */
@@ -15,7 +15,7 @@ struct intel_qgv_point {
};
struct intel_qgv_info {
- struct intel_qgv_point points[3];
+ struct intel_qgv_point points[I915_NUM_QGV_POINTS];
u8 num_points;
u8 num_channels;
u8 t_bl;
@@ -35,28 +35,54 @@ static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv,
if (ret)
return ret;
- switch (val & 0xf) {
- case 0:
- qi->dram_type = INTEL_DRAM_DDR4;
- break;
- case 1:
- qi->dram_type = INTEL_DRAM_DDR3;
- break;
- case 2:
- qi->dram_type = INTEL_DRAM_LPDDR3;
- break;
- case 3:
- qi->dram_type = INTEL_DRAM_LPDDR3;
- break;
- default:
- MISSING_CASE(val & 0xf);
- break;
+ if (IS_GEN(dev_priv, 12)) {
+ switch (val & 0xf) {
+ case 0:
+ qi->dram_type = INTEL_DRAM_DDR4;
+ break;
+ case 3:
+ qi->dram_type = INTEL_DRAM_LPDDR4;
+ break;
+ case 4:
+ qi->dram_type = INTEL_DRAM_DDR3;
+ break;
+ case 5:
+ qi->dram_type = INTEL_DRAM_LPDDR3;
+ break;
+ default:
+ MISSING_CASE(val & 0xf);
+ break;
+ }
+ } else if (IS_GEN(dev_priv, 11)) {
+ switch (val & 0xf) {
+ case 0:
+ qi->dram_type = INTEL_DRAM_DDR4;
+ break;
+ case 1:
+ qi->dram_type = INTEL_DRAM_DDR3;
+ break;
+ case 2:
+ qi->dram_type = INTEL_DRAM_LPDDR3;
+ break;
+ case 3:
+ qi->dram_type = INTEL_DRAM_LPDDR4;
+ break;
+ default:
+ MISSING_CASE(val & 0xf);
+ break;
+ }
+ } else {
+ MISSING_CASE(INTEL_GEN(dev_priv));
+ qi->dram_type = INTEL_DRAM_LPDDR3; /* Conservative default */
}
qi->num_channels = (val & 0xf0) >> 4;
qi->num_points = (val & 0xf00) >> 8;
- qi->t_bl = qi->dram_type == INTEL_DRAM_DDR4 ? 4 : 8;
+ if (IS_GEN(dev_priv, 12))
+ qi->t_bl = qi->dram_type == INTEL_DRAM_DDR4 ? 4 : 16;
+ else if (IS_GEN(dev_priv, 11))
+ qi->t_bl = qi->dram_type == INTEL_DRAM_DDR4 ? 4 : 8;
return 0;
}
@@ -65,7 +91,7 @@ static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv,
struct intel_qgv_point *sp,
int point)
{
- u32 val = 0, val2;
+ u32 val = 0, val2 = 0;
int ret;
ret = sandybridge_pcode_read(dev_priv,
@@ -132,20 +158,25 @@ static int icl_sagv_max_dclk(const struct intel_qgv_info *qi)
}
struct intel_sa_info {
- u8 deburst, mpagesize, deprogbwlimit, displayrtids;
+ u16 displayrtids;
+ u8 deburst, deprogbwlimit;
};
static const struct intel_sa_info icl_sa_info = {
.deburst = 8,
- .mpagesize = 16,
.deprogbwlimit = 25, /* GB/s */
.displayrtids = 128,
};
-static int icl_get_bw_info(struct drm_i915_private *dev_priv)
+static const struct intel_sa_info tgl_sa_info = {
+ .deburst = 16,
+ .deprogbwlimit = 34, /* GB/s */
+ .displayrtids = 256,
+};
+
+static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa)
{
struct intel_qgv_info qi = {};
- const struct intel_sa_info *sa = &icl_sa_info;
bool is_y_tile = true; /* assume y tile may be used */
int num_channels;
int deinterleave;
@@ -233,24 +264,41 @@ static unsigned int icl_max_bw(struct drm_i915_private *dev_priv,
void intel_bw_init_hw(struct drm_i915_private *dev_priv)
{
- if (IS_GEN(dev_priv, 11))
- icl_get_bw_info(dev_priv);
+ if (!HAS_DISPLAY(dev_priv))
+ return;
+
+ if (IS_GEN(dev_priv, 12))
+ icl_get_bw_info(dev_priv, &tgl_sa_info);
+ else if (IS_GEN(dev_priv, 11))
+ icl_get_bw_info(dev_priv, &icl_sa_info);
}
static unsigned int intel_max_data_rate(struct drm_i915_private *dev_priv,
int num_planes)
{
- if (IS_GEN(dev_priv, 11))
+ if (INTEL_GEN(dev_priv) >= 11) {
+ /*
+ * Any bw group has same amount of QGV points
+ */
+ const struct intel_bw_info *bi =
+ &dev_priv->max_bw[0];
+ unsigned int min_bw = UINT_MAX;
+ int i;
+
/*
* FIXME with SAGV disabled maybe we can assume
* point 1 will always be used? Seems to match
* the behaviour observed in the wild.
*/
- return min3(icl_max_bw(dev_priv, num_planes, 0),
- icl_max_bw(dev_priv, num_planes, 1),
- icl_max_bw(dev_priv, num_planes, 2));
- else
+ for (i = 0; i < bi->num_qgv_points; i++) {
+ unsigned int bw = icl_max_bw(dev_priv, num_planes, i);
+
+ min_bw = min(bw, min_bw);
+ }
+ return min_bw;
+ } else {
return UINT_MAX;
+ }
}
static unsigned int intel_bw_crtc_num_active_planes(const struct intel_crtc_state *crtc_state)
@@ -264,7 +312,7 @@ static unsigned int intel_bw_crtc_num_active_planes(const struct intel_crtc_stat
static unsigned int intel_bw_crtc_data_rate(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
unsigned int data_rate = 0;
enum plane_id plane_id;
@@ -285,7 +333,7 @@ static unsigned int intel_bw_crtc_data_rate(const struct intel_crtc_state *crtc_
void intel_bw_crtc_update(struct intel_bw_state *bw_state,
const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
bw_state->data_rate[crtc->pipe] =
intel_bw_crtc_data_rate(crtc_state);
@@ -322,6 +370,20 @@ static unsigned int intel_bw_data_rate(struct drm_i915_private *dev_priv,
return data_rate;
}
+static struct intel_bw_state *
+intel_atomic_get_bw_state(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct drm_private_state *bw_state;
+
+ bw_state = drm_atomic_get_private_obj_state(&state->base,
+ &dev_priv->bw_obj);
+ if (IS_ERR(bw_state))
+ return ERR_CAST(bw_state);
+
+ return to_intel_bw_state(bw_state);
+}
+
int intel_bw_atomic_check(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
@@ -424,3 +486,8 @@ int intel_bw_init(struct drm_i915_private *dev_priv)
return 0;
}
+
+void intel_bw_cleanup(struct drm_i915_private *dev_priv)
+{
+ drm_atomic_private_obj_fini(&dev_priv->bw_obj);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h
index e9d9c6d63bc3..20b9ad241802 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.h
+++ b/drivers/gpu/drm/i915/display/intel_bw.h
@@ -8,7 +8,6 @@
#include <drm/drm_atomic.h>
-#include "i915_drv.h"
#include "intel_display.h"
struct drm_i915_private;
@@ -24,22 +23,9 @@ struct intel_bw_state {
#define to_intel_bw_state(x) container_of((x), struct intel_bw_state, base)
-static inline struct intel_bw_state *
-intel_atomic_get_bw_state(struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct drm_private_state *bw_state;
-
- bw_state = drm_atomic_get_private_obj_state(&state->base,
- &dev_priv->bw_obj);
- if (IS_ERR(bw_state))
- return ERR_CAST(bw_state);
-
- return to_intel_bw_state(bw_state);
-}
-
void intel_bw_init_hw(struct drm_i915_private *dev_priv);
int intel_bw_init(struct drm_i915_private *dev_priv);
+void intel_bw_cleanup(struct drm_i915_private *dev_priv);
int intel_bw_atomic_check(struct intel_atomic_state *state);
void intel_bw_crtc_update(struct intel_bw_state *bw_state,
const struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index 0d19bbd08122..0ce5926006ca 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -21,8 +21,9 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include "intel_atomic.h"
#include "intel_cdclk.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_sideband.h"
/**
@@ -545,10 +546,10 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
/* There are cases where we can end up here with power domains
* off and a CDCLK frequency other than the minimum, like when
* issuing a modeset without actually changing any display after
- * a system suspend. So grab the PIPE-A domain, which covers
+ * a system suspend. So grab the display core domain, which covers
* the HW blocks needed for the following programming.
*/
- wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
+ wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE);
vlv_iosf_sb_get(dev_priv,
BIT(VLV_IOSF_SB_CCK) |
@@ -606,7 +607,7 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
vlv_program_pfi_credits(dev_priv);
- intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A, wakeref);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
}
static void chv_set_cdclk(struct drm_i915_private *dev_priv,
@@ -631,10 +632,10 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
/* There are cases where we can end up here with power domains
* off and a CDCLK frequency other than the minimum, like when
* issuing a modeset without actually changing any display after
- * a system suspend. So grab the PIPE-A domain, which covers
+ * a system suspend. So grab the display core domain, which covers
* the HW blocks needed for the following programming.
*/
- wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
+ wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE);
vlv_punit_get(dev_priv);
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
@@ -653,7 +654,7 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
vlv_program_pfi_credits(dev_priv);
- intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A, wakeref);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
}
static int bdw_calc_cdclk(int min_cdclk)
@@ -969,9 +970,7 @@ static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE);
- if (intel_wait_for_register(&dev_priv->uncore,
- LCPLL1_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
- 5))
+ if (intel_de_wait_for_set(dev_priv, LCPLL1_CTL, LCPLL_PLL_LOCK, 5))
DRM_ERROR("DPLL0 not locked\n");
dev_priv->cdclk.hw.vco = vco;
@@ -983,9 +982,7 @@ static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
static void skl_dpll0_disable(struct drm_i915_private *dev_priv)
{
I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
- if (intel_wait_for_register(&dev_priv->uncore,
- LCPLL1_CTL, LCPLL_PLL_LOCK, 0,
- 1))
+ if (intel_de_wait_for_clear(dev_priv, LCPLL1_CTL, LCPLL_PLL_LOCK, 1))
DRM_ERROR("Couldn't disable DPLL0\n");
dev_priv->cdclk.hw.vco = 0;
@@ -1165,28 +1162,88 @@ static void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
skl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
}
-static int bxt_calc_cdclk(int min_cdclk)
-{
- if (min_cdclk > 576000)
- return 624000;
- else if (min_cdclk > 384000)
- return 576000;
- else if (min_cdclk > 288000)
- return 384000;
- else if (min_cdclk > 144000)
- return 288000;
- else
- return 144000;
+static const struct intel_cdclk_vals bxt_cdclk_table[] = {
+ { .refclk = 19200, .cdclk = 144000, .divider = 8, .ratio = 60 },
+ { .refclk = 19200, .cdclk = 288000, .divider = 4, .ratio = 60 },
+ { .refclk = 19200, .cdclk = 384000, .divider = 3, .ratio = 60 },
+ { .refclk = 19200, .cdclk = 576000, .divider = 2, .ratio = 60 },
+ { .refclk = 19200, .cdclk = 624000, .divider = 2, .ratio = 65 },
+ {}
+};
+
+static const struct intel_cdclk_vals glk_cdclk_table[] = {
+ { .refclk = 19200, .cdclk = 79200, .divider = 8, .ratio = 33 },
+ { .refclk = 19200, .cdclk = 158400, .divider = 4, .ratio = 33 },
+ { .refclk = 19200, .cdclk = 316800, .divider = 2, .ratio = 33 },
+ {}
+};
+
+static const struct intel_cdclk_vals cnl_cdclk_table[] = {
+ { .refclk = 19200, .cdclk = 168000, .divider = 4, .ratio = 35 },
+ { .refclk = 19200, .cdclk = 336000, .divider = 2, .ratio = 35 },
+ { .refclk = 19200, .cdclk = 528000, .divider = 2, .ratio = 55 },
+
+ { .refclk = 24000, .cdclk = 168000, .divider = 4, .ratio = 28 },
+ { .refclk = 24000, .cdclk = 336000, .divider = 2, .ratio = 28 },
+ { .refclk = 24000, .cdclk = 528000, .divider = 2, .ratio = 44 },
+ {}
+};
+
+static const struct intel_cdclk_vals icl_cdclk_table[] = {
+ { .refclk = 19200, .cdclk = 172800, .divider = 2, .ratio = 18 },
+ { .refclk = 19200, .cdclk = 192000, .divider = 2, .ratio = 20 },
+ { .refclk = 19200, .cdclk = 307200, .divider = 2, .ratio = 32 },
+ { .refclk = 19200, .cdclk = 326400, .divider = 4, .ratio = 68 },
+ { .refclk = 19200, .cdclk = 556800, .divider = 2, .ratio = 58 },
+ { .refclk = 19200, .cdclk = 652800, .divider = 2, .ratio = 68 },
+
+ { .refclk = 24000, .cdclk = 180000, .divider = 2, .ratio = 15 },
+ { .refclk = 24000, .cdclk = 192000, .divider = 2, .ratio = 16 },
+ { .refclk = 24000, .cdclk = 312000, .divider = 2, .ratio = 26 },
+ { .refclk = 24000, .cdclk = 324000, .divider = 4, .ratio = 54 },
+ { .refclk = 24000, .cdclk = 552000, .divider = 2, .ratio = 46 },
+ { .refclk = 24000, .cdclk = 648000, .divider = 2, .ratio = 54 },
+
+ { .refclk = 38400, .cdclk = 172800, .divider = 2, .ratio = 9 },
+ { .refclk = 38400, .cdclk = 192000, .divider = 2, .ratio = 10 },
+ { .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16 },
+ { .refclk = 38400, .cdclk = 326400, .divider = 4, .ratio = 34 },
+ { .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29 },
+ { .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34 },
+ {}
+};
+
+static int bxt_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk)
+{
+ const struct intel_cdclk_vals *table = dev_priv->cdclk.table;
+ int i;
+
+ for (i = 0; table[i].refclk; i++)
+ if (table[i].refclk == dev_priv->cdclk.hw.ref &&
+ table[i].cdclk >= min_cdclk)
+ return table[i].cdclk;
+
+ WARN(1, "Cannot satisfy minimum cdclk %d with refclk %u\n",
+ min_cdclk, dev_priv->cdclk.hw.ref);
+ return 0;
}
-static int glk_calc_cdclk(int min_cdclk)
+static int bxt_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
{
- if (min_cdclk > 158400)
- return 316800;
- else if (min_cdclk > 79200)
- return 158400;
- else
- return 79200;
+ const struct intel_cdclk_vals *table = dev_priv->cdclk.table;
+ int i;
+
+ if (cdclk == dev_priv->cdclk.hw.bypass)
+ return 0;
+
+ for (i = 0; table[i].refclk; i++)
+ if (table[i].refclk == dev_priv->cdclk.hw.ref &&
+ table[i].cdclk == cdclk)
+ return dev_priv->cdclk.hw.ref * table[i].ratio;
+
+ WARN(1, "cdclk %d not valid for refclk %u\n",
+ cdclk, dev_priv->cdclk.hw.ref);
+ return 0;
}
static u8 bxt_calc_voltage_level(int cdclk)
@@ -1194,69 +1251,101 @@ static u8 bxt_calc_voltage_level(int cdclk)
return DIV_ROUND_UP(cdclk, 25000);
}
-static int bxt_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
+static u8 cnl_calc_voltage_level(int cdclk)
{
- int ratio;
-
- if (cdclk == dev_priv->cdclk.hw.bypass)
+ if (cdclk > 336000)
+ return 2;
+ else if (cdclk > 168000)
+ return 1;
+ else
return 0;
+}
- switch (cdclk) {
- default:
- MISSING_CASE(cdclk);
- /* fall through */
- case 144000:
- case 288000:
- case 384000:
- case 576000:
- ratio = 60;
- break;
- case 624000:
- ratio = 65;
- break;
- }
+static u8 icl_calc_voltage_level(int cdclk)
+{
+ if (cdclk > 556800)
+ return 2;
+ else if (cdclk > 312000)
+ return 1;
+ else
+ return 0;
+}
- return dev_priv->cdclk.hw.ref * ratio;
+static u8 ehl_calc_voltage_level(int cdclk)
+{
+ if (cdclk > 326400)
+ return 3;
+ else if (cdclk > 312000)
+ return 2;
+ else if (cdclk > 180000)
+ return 1;
+ else
+ return 0;
}
-static int glk_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
+static void cnl_readout_refclk(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_state *cdclk_state)
{
- int ratio;
+ if (I915_READ(SKL_DSSM) & CNL_DSSM_CDCLK_PLL_REFCLK_24MHz)
+ cdclk_state->ref = 24000;
+ else
+ cdclk_state->ref = 19200;
+}
- if (cdclk == dev_priv->cdclk.hw.bypass)
- return 0;
+static void icl_readout_refclk(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_state *cdclk_state)
+{
+ u32 dssm = I915_READ(SKL_DSSM) & ICL_DSSM_CDCLK_PLL_REFCLK_MASK;
- switch (cdclk) {
+ switch (dssm) {
default:
- MISSING_CASE(cdclk);
+ MISSING_CASE(dssm);
/* fall through */
- case 79200:
- case 158400:
- case 316800:
- ratio = 33;
+ case ICL_DSSM_CDCLK_PLL_REFCLK_24MHz:
+ cdclk_state->ref = 24000;
+ break;
+ case ICL_DSSM_CDCLK_PLL_REFCLK_19_2MHz:
+ cdclk_state->ref = 19200;
+ break;
+ case ICL_DSSM_CDCLK_PLL_REFCLK_38_4MHz:
+ cdclk_state->ref = 38400;
break;
}
-
- return dev_priv->cdclk.hw.ref * ratio;
}
-static void bxt_de_pll_update(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+static void bxt_de_pll_readout(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_state *cdclk_state)
{
- u32 val;
+ u32 val, ratio;
- cdclk_state->ref = 19200;
- cdclk_state->vco = 0;
+ if (INTEL_GEN(dev_priv) >= 11)
+ icl_readout_refclk(dev_priv, cdclk_state);
+ else if (IS_CANNONLAKE(dev_priv))
+ cnl_readout_refclk(dev_priv, cdclk_state);
+ else
+ cdclk_state->ref = 19200;
val = I915_READ(BXT_DE_PLL_ENABLE);
- if ((val & BXT_DE_PLL_PLL_ENABLE) == 0)
+ if ((val & BXT_DE_PLL_PLL_ENABLE) == 0 ||
+ (val & BXT_DE_PLL_LOCK) == 0) {
+ /*
+ * CDCLK PLL is disabled, the VCO/ratio doesn't matter, but
+ * setting it to zero is a way to signal that.
+ */
+ cdclk_state->vco = 0;
return;
+ }
- if (WARN_ON((val & BXT_DE_PLL_LOCK) == 0))
- return;
+ /*
+ * CNL+ have the ratio directly in the PLL enable register, gen9lp had
+ * it in a separate PLL control register.
+ */
+ if (INTEL_GEN(dev_priv) >= 10)
+ ratio = val & CNL_CDCLK_PLL_RATIO_MASK;
+ else
+ ratio = I915_READ(BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK;
- val = I915_READ(BXT_DE_PLL_CTL);
- cdclk_state->vco = (val & BXT_DE_PLL_RATIO_MASK) * cdclk_state->ref;
+ cdclk_state->vco = ratio * cdclk_state->ref;
}
static void bxt_get_cdclk(struct drm_i915_private *dev_priv,
@@ -1265,12 +1354,19 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv,
u32 divider;
int div;
- bxt_de_pll_update(dev_priv, cdclk_state);
+ bxt_de_pll_readout(dev_priv, cdclk_state);
- cdclk_state->cdclk = cdclk_state->bypass = cdclk_state->ref;
+ if (INTEL_GEN(dev_priv) >= 12)
+ cdclk_state->bypass = cdclk_state->ref / 2;
+ else if (INTEL_GEN(dev_priv) >= 11)
+ cdclk_state->bypass = 50000;
+ else
+ cdclk_state->bypass = cdclk_state->ref;
- if (cdclk_state->vco == 0)
+ if (cdclk_state->vco == 0) {
+ cdclk_state->cdclk = cdclk_state->bypass;
goto out;
+ }
divider = I915_READ(CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK;
@@ -1279,13 +1375,15 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv,
div = 2;
break;
case BXT_CDCLK_CD2X_DIV_SEL_1_5:
- WARN(IS_GEMINILAKE(dev_priv), "Unsupported divider\n");
+ WARN(IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10,
+ "Unsupported divider\n");
div = 3;
break;
case BXT_CDCLK_CD2X_DIV_SEL_2:
div = 4;
break;
case BXT_CDCLK_CD2X_DIV_SEL_4:
+ WARN(INTEL_GEN(dev_priv) >= 10, "Unsupported divider\n");
div = 8;
break;
default:
@@ -1301,7 +1399,7 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv,
* at least what the CDCLK frequency requires.
*/
cdclk_state->voltage_level =
- bxt_calc_voltage_level(cdclk_state->cdclk);
+ dev_priv->display.calc_voltage_level(cdclk_state->cdclk);
}
static void bxt_de_pll_disable(struct drm_i915_private *dev_priv)
@@ -1309,9 +1407,8 @@ static void bxt_de_pll_disable(struct drm_i915_private *dev_priv)
I915_WRITE(BXT_DE_PLL_ENABLE, 0);
/* Timeout 200us */
- if (intel_wait_for_register(&dev_priv->uncore,
- BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 0,
- 1))
+ if (intel_de_wait_for_clear(dev_priv,
+ BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
DRM_ERROR("timeout waiting for DE PLL unlock\n");
dev_priv->cdclk.hw.vco = 0;
@@ -1330,269 +1427,13 @@ static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco)
I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
/* Timeout 200us */
- if (intel_wait_for_register(&dev_priv->uncore,
- BXT_DE_PLL_ENABLE,
- BXT_DE_PLL_LOCK,
- BXT_DE_PLL_LOCK,
- 1))
+ if (intel_de_wait_for_set(dev_priv,
+ BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
DRM_ERROR("timeout waiting for DE PLL lock\n");
dev_priv->cdclk.hw.vco = vco;
}
-static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *cdclk_state,
- enum pipe pipe)
-{
- int cdclk = cdclk_state->cdclk;
- int vco = cdclk_state->vco;
- u32 val, divider;
- int ret;
-
- /* cdclk = vco / 2 / div{1,1.5,2,4} */
- switch (DIV_ROUND_CLOSEST(vco, cdclk)) {
- default:
- WARN_ON(cdclk != dev_priv->cdclk.hw.bypass);
- WARN_ON(vco != 0);
- /* fall through */
- case 2:
- divider = BXT_CDCLK_CD2X_DIV_SEL_1;
- break;
- case 3:
- WARN(IS_GEMINILAKE(dev_priv), "Unsupported divider\n");
- divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
- break;
- case 4:
- divider = BXT_CDCLK_CD2X_DIV_SEL_2;
- break;
- case 8:
- divider = BXT_CDCLK_CD2X_DIV_SEL_4;
- break;
- }
-
- /*
- * Inform power controller of upcoming frequency change. BSpec
- * requires us to wait up to 150usec, but that leads to timeouts;
- * the 2ms used here is based on experiment.
- */
- ret = sandybridge_pcode_write_timeout(dev_priv,
- HSW_PCODE_DE_WRITE_FREQ_REQ,
- 0x80000000, 150, 2);
- if (ret) {
- DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n",
- ret, cdclk);
- return;
- }
-
- if (dev_priv->cdclk.hw.vco != 0 &&
- dev_priv->cdclk.hw.vco != vco)
- bxt_de_pll_disable(dev_priv);
-
- if (dev_priv->cdclk.hw.vco != vco)
- bxt_de_pll_enable(dev_priv, vco);
-
- val = divider | skl_cdclk_decimal(cdclk);
- if (pipe == INVALID_PIPE)
- val |= BXT_CDCLK_CD2X_PIPE_NONE;
- else
- val |= BXT_CDCLK_CD2X_PIPE(pipe);
- /*
- * Disable SSA Precharge when CD clock frequency < 500 MHz,
- * enable otherwise.
- */
- if (cdclk >= 500000)
- val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
- I915_WRITE(CDCLK_CTL, val);
-
- if (pipe != INVALID_PIPE)
- intel_wait_for_vblank(dev_priv, pipe);
-
- /*
- * The timeout isn't specified, the 2ms used here is based on
- * experiment.
- * FIXME: Waiting for the request completion could be delayed until
- * the next PCODE request based on BSpec.
- */
- ret = sandybridge_pcode_write_timeout(dev_priv,
- HSW_PCODE_DE_WRITE_FREQ_REQ,
- cdclk_state->voltage_level, 150, 2);
- if (ret) {
- DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
- ret, cdclk);
- return;
- }
-
- intel_update_cdclk(dev_priv);
-}
-
-static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
-{
- u32 cdctl, expected;
-
- intel_update_cdclk(dev_priv);
- intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
-
- if (dev_priv->cdclk.hw.vco == 0 ||
- dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.bypass)
- goto sanitize;
-
- /* DPLL okay; verify the cdclock
- *
- * Some BIOS versions leave an incorrect decimal frequency value and
- * set reserved MBZ bits in CDCLK_CTL at least during exiting from S4,
- * so sanitize this register.
- */
- cdctl = I915_READ(CDCLK_CTL);
- /*
- * Let's ignore the pipe field, since BIOS could have configured the
- * dividers both synching to an active pipe, or asynchronously
- * (PIPE_NONE).
- */
- cdctl &= ~BXT_CDCLK_CD2X_PIPE_NONE;
-
- expected = (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) |
- skl_cdclk_decimal(dev_priv->cdclk.hw.cdclk);
- /*
- * Disable SSA Precharge when CD clock frequency < 500 MHz,
- * enable otherwise.
- */
- if (dev_priv->cdclk.hw.cdclk >= 500000)
- expected |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
-
- if (cdctl == expected)
- /* All well; nothing to sanitize */
- return;
-
-sanitize:
- DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
-
- /* force cdclk programming */
- dev_priv->cdclk.hw.cdclk = 0;
-
- /* force full PLL disable + enable */
- dev_priv->cdclk.hw.vco = -1;
-}
-
-static void bxt_init_cdclk(struct drm_i915_private *dev_priv)
-{
- struct intel_cdclk_state cdclk_state;
-
- bxt_sanitize_cdclk(dev_priv);
-
- if (dev_priv->cdclk.hw.cdclk != 0 &&
- dev_priv->cdclk.hw.vco != 0)
- return;
-
- cdclk_state = dev_priv->cdclk.hw;
-
- /*
- * FIXME:
- * - The initial CDCLK needs to be read from VBT.
- * Need to make this change after VBT has changes for BXT.
- */
- if (IS_GEMINILAKE(dev_priv)) {
- cdclk_state.cdclk = glk_calc_cdclk(0);
- cdclk_state.vco = glk_de_pll_vco(dev_priv, cdclk_state.cdclk);
- } else {
- cdclk_state.cdclk = bxt_calc_cdclk(0);
- cdclk_state.vco = bxt_de_pll_vco(dev_priv, cdclk_state.cdclk);
- }
- cdclk_state.voltage_level = bxt_calc_voltage_level(cdclk_state.cdclk);
-
- bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
-}
-
-static void bxt_uninit_cdclk(struct drm_i915_private *dev_priv)
-{
- struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
-
- cdclk_state.cdclk = cdclk_state.bypass;
- cdclk_state.vco = 0;
- cdclk_state.voltage_level = bxt_calc_voltage_level(cdclk_state.cdclk);
-
- bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
-}
-
-static int cnl_calc_cdclk(int min_cdclk)
-{
- if (min_cdclk > 336000)
- return 528000;
- else if (min_cdclk > 168000)
- return 336000;
- else
- return 168000;
-}
-
-static u8 cnl_calc_voltage_level(int cdclk)
-{
- if (cdclk > 336000)
- return 2;
- else if (cdclk > 168000)
- return 1;
- else
- return 0;
-}
-
-static void cnl_cdclk_pll_update(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
-{
- u32 val;
-
- if (I915_READ(SKL_DSSM) & CNL_DSSM_CDCLK_PLL_REFCLK_24MHz)
- cdclk_state->ref = 24000;
- else
- cdclk_state->ref = 19200;
-
- cdclk_state->vco = 0;
-
- val = I915_READ(BXT_DE_PLL_ENABLE);
- if ((val & BXT_DE_PLL_PLL_ENABLE) == 0)
- return;
-
- if (WARN_ON((val & BXT_DE_PLL_LOCK) == 0))
- return;
-
- cdclk_state->vco = (val & CNL_CDCLK_PLL_RATIO_MASK) * cdclk_state->ref;
-}
-
-static void cnl_get_cdclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
-{
- u32 divider;
- int div;
-
- cnl_cdclk_pll_update(dev_priv, cdclk_state);
-
- cdclk_state->cdclk = cdclk_state->bypass = cdclk_state->ref;
-
- if (cdclk_state->vco == 0)
- goto out;
-
- divider = I915_READ(CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK;
-
- switch (divider) {
- case BXT_CDCLK_CD2X_DIV_SEL_1:
- div = 2;
- break;
- case BXT_CDCLK_CD2X_DIV_SEL_2:
- div = 4;
- break;
- default:
- MISSING_CASE(divider);
- return;
- }
-
- cdclk_state->cdclk = DIV_ROUND_CLOSEST(cdclk_state->vco, div);
-
- out:
- /*
- * Can't read this out :( Let's assume it's
- * at least what the CDCLK frequency requires.
- */
- cdclk_state->voltage_level =
- cnl_calc_voltage_level(cdclk_state->cdclk);
-}
-
static void cnl_cdclk_pll_disable(struct drm_i915_private *dev_priv)
{
u32 val;
@@ -1626,7 +1467,27 @@ static void cnl_cdclk_pll_enable(struct drm_i915_private *dev_priv, int vco)
dev_priv->cdclk.hw.vco = vco;
}
-static void cnl_set_cdclk(struct drm_i915_private *dev_priv,
+static u32 bxt_cdclk_cd2x_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+ if (INTEL_GEN(dev_priv) >= 12) {
+ if (pipe == INVALID_PIPE)
+ return TGL_CDCLK_CD2X_PIPE_NONE;
+ else
+ return TGL_CDCLK_CD2X_PIPE(pipe);
+ } else if (INTEL_GEN(dev_priv) >= 11) {
+ if (pipe == INVALID_PIPE)
+ return ICL_CDCLK_CD2X_PIPE_NONE;
+ else
+ return ICL_CDCLK_CD2X_PIPE(pipe);
+ } else {
+ if (pipe == INVALID_PIPE)
+ return BXT_CDCLK_CD2X_PIPE_NONE;
+ else
+ return BXT_CDCLK_CD2X_PIPE(pipe);
+ }
+}
+
+static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
const struct intel_cdclk_state *cdclk_state,
enum pipe pipe)
{
@@ -1635,17 +1496,28 @@ static void cnl_set_cdclk(struct drm_i915_private *dev_priv,
u32 val, divider;
int ret;
- ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
- SKL_CDCLK_PREPARE_FOR_CHANGE,
- SKL_CDCLK_READY_FOR_CHANGE,
- SKL_CDCLK_READY_FOR_CHANGE, 3);
+ /* Inform power controller of upcoming frequency change. */
+ if (INTEL_GEN(dev_priv) >= 10)
+ ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
+ SKL_CDCLK_PREPARE_FOR_CHANGE,
+ SKL_CDCLK_READY_FOR_CHANGE,
+ SKL_CDCLK_READY_FOR_CHANGE, 3);
+ else
+ /*
+ * BSpec requires us to wait up to 150usec, but that leads to
+ * timeouts; the 2ms used here is based on experiment.
+ */
+ ret = sandybridge_pcode_write_timeout(dev_priv,
+ HSW_PCODE_DE_WRITE_FREQ_REQ,
+ 0x80000000, 150, 2);
+
if (ret) {
- DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n",
- ret);
+ DRM_ERROR("Failed to inform PCU about cdclk change (err %d, freq %d)\n",
+ ret, cdclk);
return;
}
- /* cdclk = vco / 2 / div{1,2} */
+ /* cdclk = vco / 2 / div{1,1.5,2,4} */
switch (DIV_ROUND_CLOSEST(vco, cdclk)) {
default:
WARN_ON(cdclk != dev_priv->cdclk.hw.bypass);
@@ -1654,67 +1526,87 @@ static void cnl_set_cdclk(struct drm_i915_private *dev_priv,
case 2:
divider = BXT_CDCLK_CD2X_DIV_SEL_1;
break;
+ case 3:
+ WARN(IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10,
+ "Unsupported divider\n");
+ divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
+ break;
case 4:
divider = BXT_CDCLK_CD2X_DIV_SEL_2;
break;
+ case 8:
+ WARN(INTEL_GEN(dev_priv) >= 10, "Unsupported divider\n");
+ divider = BXT_CDCLK_CD2X_DIV_SEL_4;
+ break;
}
- if (dev_priv->cdclk.hw.vco != 0 &&
- dev_priv->cdclk.hw.vco != vco)
- cnl_cdclk_pll_disable(dev_priv);
-
- if (dev_priv->cdclk.hw.vco != vco)
- cnl_cdclk_pll_enable(dev_priv, vco);
+ if (INTEL_GEN(dev_priv) >= 10) {
+ if (dev_priv->cdclk.hw.vco != 0 &&
+ dev_priv->cdclk.hw.vco != vco)
+ cnl_cdclk_pll_disable(dev_priv);
- val = divider | skl_cdclk_decimal(cdclk);
- if (pipe == INVALID_PIPE)
- val |= BXT_CDCLK_CD2X_PIPE_NONE;
- else
- val |= BXT_CDCLK_CD2X_PIPE(pipe);
- I915_WRITE(CDCLK_CTL, val);
+ if (dev_priv->cdclk.hw.vco != vco)
+ cnl_cdclk_pll_enable(dev_priv, vco);
- if (pipe != INVALID_PIPE)
- intel_wait_for_vblank(dev_priv, pipe);
+ } else {
+ if (dev_priv->cdclk.hw.vco != 0 &&
+ dev_priv->cdclk.hw.vco != vco)
+ bxt_de_pll_disable(dev_priv);
- /* inform PCU of the change */
- sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
- cdclk_state->voltage_level);
+ if (dev_priv->cdclk.hw.vco != vco)
+ bxt_de_pll_enable(dev_priv, vco);
+ }
- intel_update_cdclk(dev_priv);
+ val = divider | skl_cdclk_decimal(cdclk) |
+ bxt_cdclk_cd2x_pipe(dev_priv, pipe);
/*
- * Can't read out the voltage level :(
- * Let's just assume everything is as expected.
+ * Disable SSA Precharge when CD clock frequency < 500 MHz,
+ * enable otherwise.
*/
- dev_priv->cdclk.hw.voltage_level = cdclk_state->voltage_level;
-}
+ if (IS_GEN9_LP(dev_priv) && cdclk >= 500000)
+ val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
+ I915_WRITE(CDCLK_CTL, val);
-static int cnl_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
-{
- int ratio;
+ if (pipe != INVALID_PIPE)
+ intel_wait_for_vblank(dev_priv, pipe);
- if (cdclk == dev_priv->cdclk.hw.bypass)
- return 0;
+ if (INTEL_GEN(dev_priv) >= 10) {
+ ret = sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
+ cdclk_state->voltage_level);
+ } else {
+ /*
+ * The timeout isn't specified, the 2ms used here is based on
+ * experiment.
+ * FIXME: Waiting for the request completion could be delayed
+ * until the next PCODE request based on BSpec.
+ */
+ ret = sandybridge_pcode_write_timeout(dev_priv,
+ HSW_PCODE_DE_WRITE_FREQ_REQ,
+ cdclk_state->voltage_level,
+ 150, 2);
+ }
- switch (cdclk) {
- default:
- MISSING_CASE(cdclk);
- /* fall through */
- case 168000:
- case 336000:
- ratio = dev_priv->cdclk.hw.ref == 19200 ? 35 : 28;
- break;
- case 528000:
- ratio = dev_priv->cdclk.hw.ref == 19200 ? 55 : 44;
- break;
+ if (ret) {
+ DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
+ ret, cdclk);
+ return;
}
- return dev_priv->cdclk.hw.ref * ratio;
+ intel_update_cdclk(dev_priv);
+
+ if (INTEL_GEN(dev_priv) >= 10)
+ /*
+ * Can't read out the voltage level :(
+ * Let's just assume everything is as expected.
+ */
+ dev_priv->cdclk.hw.voltage_level = cdclk_state->voltage_level;
}
-static void cnl_sanitize_cdclk(struct drm_i915_private *dev_priv)
+static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
{
u32 cdctl, expected;
+ int cdclk, vco;
intel_update_cdclk(dev_priv);
intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
@@ -1735,239 +1627,65 @@ static void cnl_sanitize_cdclk(struct drm_i915_private *dev_priv)
* dividers both synching to an active pipe, or asynchronously
* (PIPE_NONE).
*/
- cdctl &= ~BXT_CDCLK_CD2X_PIPE_NONE;
-
- expected = (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) |
- skl_cdclk_decimal(dev_priv->cdclk.hw.cdclk);
-
- if (cdctl == expected)
- /* All well; nothing to sanitize */
- return;
+ cdctl &= ~bxt_cdclk_cd2x_pipe(dev_priv, INVALID_PIPE);
-sanitize:
- DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
-
- /* force cdclk programming */
- dev_priv->cdclk.hw.cdclk = 0;
-
- /* force full PLL disable + enable */
- dev_priv->cdclk.hw.vco = -1;
-}
-
-static int icl_calc_cdclk(int min_cdclk, unsigned int ref)
-{
- int ranges_24[] = { 312000, 552000, 648000 };
- int ranges_19_38[] = { 307200, 556800, 652800 };
- int *ranges;
-
- switch (ref) {
- default:
- MISSING_CASE(ref);
- /* fall through */
- case 24000:
- ranges = ranges_24;
- break;
- case 19200:
- case 38400:
- ranges = ranges_19_38;
- break;
- }
-
- if (min_cdclk > ranges[1])
- return ranges[2];
- else if (min_cdclk > ranges[0])
- return ranges[1];
- else
- return ranges[0];
-}
+ /* Make sure this is a legal cdclk value for the platform */
+ cdclk = bxt_calc_cdclk(dev_priv, dev_priv->cdclk.hw.cdclk);
+ if (cdclk != dev_priv->cdclk.hw.cdclk)
+ goto sanitize;
-static int icl_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
-{
- int ratio;
+ /* Make sure the VCO is correct for the cdclk */
+ vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk);
+ if (vco != dev_priv->cdclk.hw.vco)
+ goto sanitize;
- if (cdclk == dev_priv->cdclk.hw.bypass)
- return 0;
+ expected = skl_cdclk_decimal(cdclk);
- switch (cdclk) {
- default:
- MISSING_CASE(cdclk);
- /* fall through */
- case 307200:
- case 556800:
- case 652800:
- WARN_ON(dev_priv->cdclk.hw.ref != 19200 &&
- dev_priv->cdclk.hw.ref != 38400);
+ /* Figure out what CD2X divider we should be using for this cdclk */
+ switch (DIV_ROUND_CLOSEST(dev_priv->cdclk.hw.vco,
+ dev_priv->cdclk.hw.cdclk)) {
+ case 2:
+ expected |= BXT_CDCLK_CD2X_DIV_SEL_1;
break;
- case 312000:
- case 552000:
- case 648000:
- WARN_ON(dev_priv->cdclk.hw.ref != 24000);
- }
-
- ratio = cdclk / (dev_priv->cdclk.hw.ref / 2);
-
- return dev_priv->cdclk.hw.ref * ratio;
-}
-
-static void icl_set_cdclk(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *cdclk_state,
- enum pipe pipe)
-{
- unsigned int cdclk = cdclk_state->cdclk;
- unsigned int vco = cdclk_state->vco;
- int ret;
-
- ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
- SKL_CDCLK_PREPARE_FOR_CHANGE,
- SKL_CDCLK_READY_FOR_CHANGE,
- SKL_CDCLK_READY_FOR_CHANGE, 3);
- if (ret) {
- DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n",
- ret);
- return;
- }
-
- if (dev_priv->cdclk.hw.vco != 0 &&
- dev_priv->cdclk.hw.vco != vco)
- cnl_cdclk_pll_disable(dev_priv);
-
- if (dev_priv->cdclk.hw.vco != vco)
- cnl_cdclk_pll_enable(dev_priv, vco);
-
- /*
- * On ICL CD2X_DIV can only be 1, so we'll never end up changing the
- * divider here synchronized to a pipe while CDCLK is on, nor will we
- * need the corresponding vblank wait.
- */
- I915_WRITE(CDCLK_CTL, ICL_CDCLK_CD2X_PIPE_NONE |
- skl_cdclk_decimal(cdclk));
-
- sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
- cdclk_state->voltage_level);
-
- intel_update_cdclk(dev_priv);
-
- /*
- * Can't read out the voltage level :(
- * Let's just assume everything is as expected.
- */
- dev_priv->cdclk.hw.voltage_level = cdclk_state->voltage_level;
-}
-
-static u8 icl_calc_voltage_level(int cdclk)
-{
- if (cdclk > 556800)
- return 2;
- else if (cdclk > 312000)
- return 1;
- else
- return 0;
-}
-
-static void icl_get_cdclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
-{
- u32 val;
-
- cdclk_state->bypass = 50000;
-
- val = I915_READ(SKL_DSSM);
- switch (val & ICL_DSSM_CDCLK_PLL_REFCLK_MASK) {
- default:
- MISSING_CASE(val);
- /* fall through */
- case ICL_DSSM_CDCLK_PLL_REFCLK_24MHz:
- cdclk_state->ref = 24000;
+ case 3:
+ expected |= BXT_CDCLK_CD2X_DIV_SEL_1_5;
break;
- case ICL_DSSM_CDCLK_PLL_REFCLK_19_2MHz:
- cdclk_state->ref = 19200;
+ case 4:
+ expected |= BXT_CDCLK_CD2X_DIV_SEL_2;
break;
- case ICL_DSSM_CDCLK_PLL_REFCLK_38_4MHz:
- cdclk_state->ref = 38400;
+ case 8:
+ expected |= BXT_CDCLK_CD2X_DIV_SEL_4;
break;
+ default:
+ goto sanitize;
}
- val = I915_READ(BXT_DE_PLL_ENABLE);
- if ((val & BXT_DE_PLL_PLL_ENABLE) == 0 ||
- (val & BXT_DE_PLL_LOCK) == 0) {
- /*
- * CDCLK PLL is disabled, the VCO/ratio doesn't matter, but
- * setting it to zero is a way to signal that.
- */
- cdclk_state->vco = 0;
- cdclk_state->cdclk = cdclk_state->bypass;
- goto out;
- }
-
- cdclk_state->vco = (val & BXT_DE_PLL_RATIO_MASK) * cdclk_state->ref;
-
- val = I915_READ(CDCLK_CTL);
- WARN_ON((val & BXT_CDCLK_CD2X_DIV_SEL_MASK) != 0);
-
- cdclk_state->cdclk = cdclk_state->vco / 2;
-
-out:
/*
- * Can't read this out :( Let's assume it's
- * at least what the CDCLK frequency requires.
+ * Disable SSA Precharge when CD clock frequency < 500 MHz,
+ * enable otherwise.
*/
- cdclk_state->voltage_level =
- icl_calc_voltage_level(cdclk_state->cdclk);
-}
-
-static void icl_init_cdclk(struct drm_i915_private *dev_priv)
-{
- struct intel_cdclk_state sanitized_state;
- u32 val;
-
- /* This sets dev_priv->cdclk.hw. */
- intel_update_cdclk(dev_priv);
- intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
-
- /* This means CDCLK disabled. */
- if (dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.bypass)
- goto sanitize;
-
- val = I915_READ(CDCLK_CTL);
-
- if ((val & BXT_CDCLK_CD2X_DIV_SEL_MASK) != 0)
- goto sanitize;
-
- if ((val & CDCLK_FREQ_DECIMAL_MASK) !=
- skl_cdclk_decimal(dev_priv->cdclk.hw.cdclk))
- goto sanitize;
+ if (IS_GEN9_LP(dev_priv) && dev_priv->cdclk.hw.cdclk >= 500000)
+ expected |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
- return;
+ if (cdctl == expected)
+ /* All well; nothing to sanitize */
+ return;
sanitize:
DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
- sanitized_state.ref = dev_priv->cdclk.hw.ref;
- sanitized_state.cdclk = icl_calc_cdclk(0, sanitized_state.ref);
- sanitized_state.vco = icl_calc_cdclk_pll_vco(dev_priv,
- sanitized_state.cdclk);
- sanitized_state.voltage_level =
- icl_calc_voltage_level(sanitized_state.cdclk);
-
- icl_set_cdclk(dev_priv, &sanitized_state, INVALID_PIPE);
-}
-
-static void icl_uninit_cdclk(struct drm_i915_private *dev_priv)
-{
- struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
-
- cdclk_state.cdclk = cdclk_state.bypass;
- cdclk_state.vco = 0;
- cdclk_state.voltage_level = icl_calc_voltage_level(cdclk_state.cdclk);
+ /* force cdclk programming */
+ dev_priv->cdclk.hw.cdclk = 0;
- icl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
+ /* force full PLL disable + enable */
+ dev_priv->cdclk.hw.vco = -1;
}
-static void cnl_init_cdclk(struct drm_i915_private *dev_priv)
+static void bxt_init_cdclk(struct drm_i915_private *dev_priv)
{
struct intel_cdclk_state cdclk_state;
- cnl_sanitize_cdclk(dev_priv);
+ bxt_sanitize_cdclk(dev_priv);
if (dev_priv->cdclk.hw.cdclk != 0 &&
dev_priv->cdclk.hw.vco != 0)
@@ -1975,22 +1693,29 @@ static void cnl_init_cdclk(struct drm_i915_private *dev_priv)
cdclk_state = dev_priv->cdclk.hw;
- cdclk_state.cdclk = cnl_calc_cdclk(0);
- cdclk_state.vco = cnl_cdclk_pll_vco(dev_priv, cdclk_state.cdclk);
- cdclk_state.voltage_level = cnl_calc_voltage_level(cdclk_state.cdclk);
+ /*
+ * FIXME:
+ * - The initial CDCLK needs to be read from VBT.
+ * Need to make this change after VBT has changes for BXT.
+ */
+ cdclk_state.cdclk = bxt_calc_cdclk(dev_priv, 0);
+ cdclk_state.vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk_state.cdclk);
+ cdclk_state.voltage_level =
+ dev_priv->display.calc_voltage_level(cdclk_state.cdclk);
- cnl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
+ bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
}
-static void cnl_uninit_cdclk(struct drm_i915_private *dev_priv)
+static void bxt_uninit_cdclk(struct drm_i915_private *dev_priv)
{
struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
cdclk_state.cdclk = cdclk_state.bypass;
cdclk_state.vco = 0;
- cdclk_state.voltage_level = cnl_calc_voltage_level(cdclk_state.cdclk);
+ cdclk_state.voltage_level =
+ dev_priv->display.calc_voltage_level(cdclk_state.cdclk);
- cnl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
+ bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
}
/**
@@ -2004,14 +1729,10 @@ static void cnl_uninit_cdclk(struct drm_i915_private *dev_priv)
*/
void intel_cdclk_init(struct drm_i915_private *i915)
{
- if (INTEL_GEN(i915) >= 11)
- icl_init_cdclk(i915);
- else if (IS_CANNONLAKE(i915))
- cnl_init_cdclk(i915);
+ if (IS_GEN9_LP(i915) || INTEL_GEN(i915) >= 10)
+ bxt_init_cdclk(i915);
else if (IS_GEN9_BC(i915))
skl_init_cdclk(i915);
- else if (IS_GEN9_LP(i915))
- bxt_init_cdclk(i915);
}
/**
@@ -2023,14 +1744,10 @@ void intel_cdclk_init(struct drm_i915_private *i915)
*/
void intel_cdclk_uninit(struct drm_i915_private *i915)
{
- if (INTEL_GEN(i915) >= 11)
- icl_uninit_cdclk(i915);
- else if (IS_CANNONLAKE(i915))
- cnl_uninit_cdclk(i915);
+ if (INTEL_GEN(i915) >= 10 || IS_GEN9_LP(i915))
+ bxt_uninit_cdclk(i915);
else if (IS_GEN9_BC(i915))
skl_uninit_cdclk(i915);
- else if (IS_GEN9_LP(i915))
- bxt_uninit_cdclk(i915);
}
/**
@@ -2058,9 +1775,9 @@ bool intel_cdclk_needs_modeset(const struct intel_cdclk_state *a,
* Returns:
* True if the CDCLK states require just a cd2x divider update, false if not.
*/
-bool intel_cdclk_needs_cd2x_update(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *a,
- const struct intel_cdclk_state *b)
+static bool intel_cdclk_needs_cd2x_update(struct drm_i915_private *dev_priv,
+ const struct intel_cdclk_state *a,
+ const struct intel_cdclk_state *b)
{
/* Older hw doesn't have the capability */
if (INTEL_GEN(dev_priv) < 10 && !IS_GEN9_LP(dev_priv))
@@ -2079,8 +1796,8 @@ bool intel_cdclk_needs_cd2x_update(struct drm_i915_private *dev_priv,
* Returns:
* True if the CDCLK states don't match, false if they do.
*/
-bool intel_cdclk_changed(const struct intel_cdclk_state *a,
- const struct intel_cdclk_state *b)
+static bool intel_cdclk_changed(const struct intel_cdclk_state *a,
+ const struct intel_cdclk_state *b)
{
return intel_cdclk_needs_modeset(a, b) ||
a->voltage_level != b->voltage_level;
@@ -2185,9 +1902,11 @@ intel_set_cdclk_post_plane_update(struct drm_i915_private *dev_priv,
intel_set_cdclk(dev_priv, new_state, pipe);
}
-static int intel_pixel_rate_to_cdclk(struct drm_i915_private *dev_priv,
- int pixel_rate)
+static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ int pixel_rate = crtc_state->pixel_rate;
+
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
return DIV_ROUND_UP(pixel_rate, 2);
else if (IS_GEN(dev_priv, 9) ||
@@ -2195,20 +1914,35 @@ static int intel_pixel_rate_to_cdclk(struct drm_i915_private *dev_priv,
return pixel_rate;
else if (IS_CHERRYVIEW(dev_priv))
return DIV_ROUND_UP(pixel_rate * 100, 95);
+ else if (crtc_state->double_wide)
+ return DIV_ROUND_UP(pixel_rate * 100, 90 * 2);
else
return DIV_ROUND_UP(pixel_rate * 100, 90);
}
+static int intel_planes_min_cdclk(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_plane *plane;
+ int min_cdclk = 0;
+
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
+ min_cdclk = max(crtc_state->min_cdclk[plane->id], min_cdclk);
+
+ return min_cdclk;
+}
+
int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv =
- to_i915(crtc_state->base.crtc->dev);
+ to_i915(crtc_state->uapi.crtc->dev);
int min_cdclk;
- if (!crtc_state->base.enable)
+ if (!crtc_state->hw.enable)
return 0;
- min_cdclk = intel_pixel_rate_to_cdclk(dev_priv, crtc_state->pixel_rate);
+ min_cdclk = intel_pixel_rate_to_cdclk(crtc_state);
/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
if (IS_BROADWELL(dev_priv) && hsw_crtc_state_ips_capable(crtc_state))
@@ -2267,6 +2001,21 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
IS_GEMINILAKE(dev_priv))
min_cdclk = max(158400, min_cdclk);
+ /* Account for additional needs from the planes */
+ min_cdclk = max(intel_planes_min_cdclk(crtc_state), min_cdclk);
+
+ /*
+ * HACK. Currently for TGL platforms we calculate
+ * min_cdclk initially based on pixel_rate divided
+ * by 2, accounting for also plane requirements,
+ * however in some cases the lowest possible CDCLK
+ * doesn't work and causing the underruns.
+ * Explicitly stating here that this seems to be currently
+ * rather a Hack, than final solution.
+ */
+ if (IS_TIGERLAKE(dev_priv))
+ min_cdclk = max(min_cdclk, (int)crtc_state->pixel_rate);
+
if (min_cdclk > dev_priv->max_cdclk_freq) {
DRM_DEBUG_KMS("required cdclk (%d kHz) exceeds max (%d kHz)\n",
min_cdclk, dev_priv->max_cdclk_freq);
@@ -2288,11 +2037,20 @@ static int intel_compute_min_cdclk(struct intel_atomic_state *state)
sizeof(state->min_cdclk));
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ int ret;
+
min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
if (min_cdclk < 0)
return min_cdclk;
+ if (state->min_cdclk[i] == min_cdclk)
+ continue;
+
state->min_cdclk[i] = min_cdclk;
+
+ ret = intel_atomic_lock_global_state(state);
+ if (ret)
+ return ret;
}
min_cdclk = state->cdclk.force_min_cdclk;
@@ -2303,6 +2061,10 @@ static int intel_compute_min_cdclk(struct intel_atomic_state *state)
}
/*
+ * Account for port clock min voltage level requirements.
+ * This only really does something on CNL+ but can be
+ * called on earlier platforms as well.
+ *
* Note that this functions assumes that 0 is
* the lowest voltage value, and higher values
* correspond to increasingly higher voltages.
@@ -2311,7 +2073,7 @@ static int intel_compute_min_cdclk(struct intel_atomic_state *state)
* future platforms this code will need to be
* adjusted.
*/
-static u8 cnl_compute_min_voltage_level(struct intel_atomic_state *state)
+static int bxt_compute_min_voltage_level(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc *crtc;
@@ -2324,11 +2086,21 @@ static u8 cnl_compute_min_voltage_level(struct intel_atomic_state *state)
sizeof(state->min_voltage_level));
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
- if (crtc_state->base.enable)
- state->min_voltage_level[i] =
- crtc_state->min_voltage_level;
+ int ret;
+
+ if (crtc_state->hw.enable)
+ min_voltage_level = crtc_state->min_voltage_level;
else
- state->min_voltage_level[i] = 0;
+ min_voltage_level = 0;
+
+ if (state->min_voltage_level[i] == min_voltage_level)
+ continue;
+
+ state->min_voltage_level[i] = min_voltage_level;
+
+ ret = intel_atomic_lock_global_state(state);
+ if (ret)
+ return ret;
}
min_voltage_level = 0;
@@ -2354,7 +2126,7 @@ static int vlv_modeset_calc_cdclk(struct intel_atomic_state *state)
state->cdclk.logical.voltage_level =
vlv_calc_voltage_level(dev_priv, cdclk);
- if (!state->active_crtcs) {
+ if (!state->active_pipes) {
cdclk = vlv_calc_cdclk(dev_priv, state->cdclk.force_min_cdclk);
state->cdclk.actual.cdclk = cdclk;
@@ -2385,7 +2157,7 @@ static int bdw_modeset_calc_cdclk(struct intel_atomic_state *state)
state->cdclk.logical.voltage_level =
bdw_calc_voltage_level(cdclk);
- if (!state->active_crtcs) {
+ if (!state->active_pipes) {
cdclk = bdw_calc_cdclk(state->cdclk.force_min_cdclk);
state->cdclk.actual.cdclk = cdclk;
@@ -2410,7 +2182,7 @@ static int skl_dpll0_vco(struct intel_atomic_state *state)
vco = dev_priv->skl_preferred_vco_freq;
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
- if (!crtc_state->base.enable)
+ if (!crtc_state->hw.enable)
continue;
if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
@@ -2455,7 +2227,7 @@ static int skl_modeset_calc_cdclk(struct intel_atomic_state *state)
state->cdclk.logical.voltage_level =
skl_calc_voltage_level(cdclk);
- if (!state->active_crtcs) {
+ if (!state->active_pipes) {
cdclk = skl_calc_cdclk(state->cdclk.force_min_cdclk, vco);
state->cdclk.actual.vco = vco;
@@ -2472,38 +2244,33 @@ static int skl_modeset_calc_cdclk(struct intel_atomic_state *state)
static int bxt_modeset_calc_cdclk(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- int min_cdclk, cdclk, vco;
+ int min_cdclk, min_voltage_level, cdclk, vco;
min_cdclk = intel_compute_min_cdclk(state);
if (min_cdclk < 0)
return min_cdclk;
- if (IS_GEMINILAKE(dev_priv)) {
- cdclk = glk_calc_cdclk(min_cdclk);
- vco = glk_de_pll_vco(dev_priv, cdclk);
- } else {
- cdclk = bxt_calc_cdclk(min_cdclk);
- vco = bxt_de_pll_vco(dev_priv, cdclk);
- }
+ min_voltage_level = bxt_compute_min_voltage_level(state);
+ if (min_voltage_level < 0)
+ return min_voltage_level;
+
+ cdclk = bxt_calc_cdclk(dev_priv, min_cdclk);
+ vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk);
state->cdclk.logical.vco = vco;
state->cdclk.logical.cdclk = cdclk;
state->cdclk.logical.voltage_level =
- bxt_calc_voltage_level(cdclk);
-
- if (!state->active_crtcs) {
- if (IS_GEMINILAKE(dev_priv)) {
- cdclk = glk_calc_cdclk(state->cdclk.force_min_cdclk);
- vco = glk_de_pll_vco(dev_priv, cdclk);
- } else {
- cdclk = bxt_calc_cdclk(state->cdclk.force_min_cdclk);
- vco = bxt_de_pll_vco(dev_priv, cdclk);
- }
+ max_t(int, min_voltage_level,
+ dev_priv->display.calc_voltage_level(cdclk));
+
+ if (!state->active_pipes) {
+ cdclk = bxt_calc_cdclk(dev_priv, state->cdclk.force_min_cdclk);
+ vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk);
state->cdclk.actual.vco = vco;
state->cdclk.actual.cdclk = cdclk;
state->cdclk.actual.voltage_level =
- bxt_calc_voltage_level(cdclk);
+ dev_priv->display.calc_voltage_level(cdclk);
} else {
state->cdclk.actual = state->cdclk.logical;
}
@@ -2511,70 +2278,138 @@ static int bxt_modeset_calc_cdclk(struct intel_atomic_state *state)
return 0;
}
-static int cnl_modeset_calc_cdclk(struct intel_atomic_state *state)
+static int intel_modeset_all_pipes(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- int min_cdclk, cdclk, vco;
+ struct intel_crtc *crtc;
- min_cdclk = intel_compute_min_cdclk(state);
- if (min_cdclk < 0)
- return min_cdclk;
+ /*
+ * Add all pipes to the state, and force
+ * a modeset on all the active ones.
+ */
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ struct intel_crtc_state *crtc_state;
+ int ret;
- cdclk = cnl_calc_cdclk(min_cdclk);
- vco = cnl_cdclk_pll_vco(dev_priv, cdclk);
+ crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
- state->cdclk.logical.vco = vco;
- state->cdclk.logical.cdclk = cdclk;
- state->cdclk.logical.voltage_level =
- max(cnl_calc_voltage_level(cdclk),
- cnl_compute_min_voltage_level(state));
+ if (!crtc_state->hw.active ||
+ drm_atomic_crtc_needs_modeset(&crtc_state->uapi))
+ continue;
- if (!state->active_crtcs) {
- cdclk = cnl_calc_cdclk(state->cdclk.force_min_cdclk);
- vco = cnl_cdclk_pll_vco(dev_priv, cdclk);
+ crtc_state->uapi.mode_changed = true;
- state->cdclk.actual.vco = vco;
- state->cdclk.actual.cdclk = cdclk;
- state->cdclk.actual.voltage_level =
- cnl_calc_voltage_level(cdclk);
- } else {
- state->cdclk.actual = state->cdclk.logical;
+ ret = drm_atomic_add_affected_connectors(&state->base,
+ &crtc->base);
+ if (ret)
+ return ret;
+
+ ret = drm_atomic_add_affected_planes(&state->base,
+ &crtc->base);
+ if (ret)
+ return ret;
+
+ crtc_state->update_planes |= crtc_state->active_planes;
}
return 0;
}
-static int icl_modeset_calc_cdclk(struct intel_atomic_state *state)
+static int fixed_modeset_calc_cdclk(struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- unsigned int ref = state->cdclk.logical.ref;
- int min_cdclk, cdclk, vco;
+ int min_cdclk;
+ /*
+ * We can't change the cdclk frequency, but we still want to
+ * check that the required minimum frequency doesn't exceed
+ * the actual cdclk frequency.
+ */
min_cdclk = intel_compute_min_cdclk(state);
if (min_cdclk < 0)
return min_cdclk;
- cdclk = icl_calc_cdclk(min_cdclk, ref);
- vco = icl_calc_cdclk_pll_vco(dev_priv, cdclk);
+ return 0;
+}
- state->cdclk.logical.vco = vco;
- state->cdclk.logical.cdclk = cdclk;
- state->cdclk.logical.voltage_level =
- max(icl_calc_voltage_level(cdclk),
- cnl_compute_min_voltage_level(state));
+int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ enum pipe pipe;
+ int ret;
- if (!state->active_crtcs) {
- cdclk = icl_calc_cdclk(state->cdclk.force_min_cdclk, ref);
- vco = icl_calc_cdclk_pll_vco(dev_priv, cdclk);
+ ret = dev_priv->display.modeset_calc_cdclk(state);
+ if (ret)
+ return ret;
- state->cdclk.actual.vco = vco;
- state->cdclk.actual.cdclk = cdclk;
- state->cdclk.actual.voltage_level =
- icl_calc_voltage_level(cdclk);
+ /*
+ * Writes to dev_priv->cdclk.{actual,logical} must protected
+ * by holding all the crtc mutexes even if we don't end up
+ * touching the hardware
+ */
+ if (intel_cdclk_changed(&dev_priv->cdclk.actual,
+ &state->cdclk.actual)) {
+ /*
+ * Also serialize commits across all crtcs
+ * if the actual hw needs to be poked.
+ */
+ ret = intel_atomic_serialize_global_state(state);
+ if (ret)
+ return ret;
+ } else if (intel_cdclk_changed(&dev_priv->cdclk.logical,
+ &state->cdclk.logical)) {
+ ret = intel_atomic_lock_global_state(state);
+ if (ret)
+ return ret;
} else {
- state->cdclk.actual = state->cdclk.logical;
+ return 0;
+ }
+
+ if (is_power_of_2(state->active_pipes) &&
+ intel_cdclk_needs_cd2x_update(dev_priv,
+ &dev_priv->cdclk.actual,
+ &state->cdclk.actual)) {
+ struct intel_crtc *crtc;
+ struct intel_crtc_state *crtc_state;
+
+ pipe = ilog2(state->active_pipes);
+ crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+
+ crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ if (drm_atomic_crtc_needs_modeset(&crtc_state->uapi))
+ pipe = INVALID_PIPE;
+ } else {
+ pipe = INVALID_PIPE;
+ }
+
+ if (pipe != INVALID_PIPE) {
+ state->cdclk.pipe = pipe;
+
+ DRM_DEBUG_KMS("Can change cdclk with pipe %c active\n",
+ pipe_name(pipe));
+ } else if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual,
+ &state->cdclk.actual)) {
+ /* All pipes must be switched off while we change the cdclk. */
+ ret = intel_modeset_all_pipes(state);
+ if (ret)
+ return ret;
+
+ state->cdclk.pipe = INVALID_PIPE;
+
+ DRM_DEBUG_KMS("Modeset required for cdclk change\n");
}
+ DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n",
+ state->cdclk.logical.cdclk,
+ state->cdclk.actual.cdclk);
+ DRM_DEBUG_KMS("New voltage level calculated to be logical %u, actual %u\n",
+ state->cdclk.logical.voltage_level,
+ state->cdclk.actual.voltage_level);
+
return 0;
}
@@ -2605,7 +2440,12 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
*/
void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
{
- if (INTEL_GEN(dev_priv) >= 11) {
+ if (IS_ELKHARTLAKE(dev_priv)) {
+ if (dev_priv->cdclk.hw.ref == 24000)
+ dev_priv->max_cdclk_freq = 552000;
+ else
+ dev_priv->max_cdclk_freq = 556800;
+ } else if (INTEL_GEN(dev_priv) >= 11) {
if (dev_priv->cdclk.hw.ref == 24000)
dev_priv->max_cdclk_freq = 648000;
else
@@ -2789,15 +2629,29 @@ void intel_update_rawclk(struct drm_i915_private *dev_priv)
*/
void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
{
- if (INTEL_GEN(dev_priv) >= 11) {
- dev_priv->display.set_cdclk = icl_set_cdclk;
- dev_priv->display.modeset_calc_cdclk = icl_modeset_calc_cdclk;
+ if (IS_ELKHARTLAKE(dev_priv)) {
+ dev_priv->display.set_cdclk = bxt_set_cdclk;
+ dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
+ dev_priv->display.calc_voltage_level = ehl_calc_voltage_level;
+ dev_priv->cdclk.table = icl_cdclk_table;
+ } else if (INTEL_GEN(dev_priv) >= 11) {
+ dev_priv->display.set_cdclk = bxt_set_cdclk;
+ dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
+ dev_priv->display.calc_voltage_level = icl_calc_voltage_level;
+ dev_priv->cdclk.table = icl_cdclk_table;
} else if (IS_CANNONLAKE(dev_priv)) {
- dev_priv->display.set_cdclk = cnl_set_cdclk;
- dev_priv->display.modeset_calc_cdclk = cnl_modeset_calc_cdclk;
+ dev_priv->display.set_cdclk = bxt_set_cdclk;
+ dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
+ dev_priv->display.calc_voltage_level = cnl_calc_voltage_level;
+ dev_priv->cdclk.table = cnl_cdclk_table;
} else if (IS_GEN9_LP(dev_priv)) {
dev_priv->display.set_cdclk = bxt_set_cdclk;
dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
+ dev_priv->display.calc_voltage_level = bxt_calc_voltage_level;
+ if (IS_GEMINILAKE(dev_priv))
+ dev_priv->cdclk.table = glk_cdclk_table;
+ else
+ dev_priv->cdclk.table = bxt_cdclk_table;
} else if (IS_GEN9_BC(dev_priv)) {
dev_priv->display.set_cdclk = skl_set_cdclk;
dev_priv->display.modeset_calc_cdclk = skl_modeset_calc_cdclk;
@@ -2810,13 +2664,11 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
} else if (IS_VALLEYVIEW(dev_priv)) {
dev_priv->display.set_cdclk = vlv_set_cdclk;
dev_priv->display.modeset_calc_cdclk = vlv_modeset_calc_cdclk;
+ } else {
+ dev_priv->display.modeset_calc_cdclk = fixed_modeset_calc_cdclk;
}
- if (INTEL_GEN(dev_priv) >= 11)
- dev_priv->display.get_cdclk = icl_get_cdclk;
- else if (IS_CANNONLAKE(dev_priv))
- dev_priv->display.get_cdclk = cnl_get_cdclk;
- else if (IS_GEN9_LP(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEN9_LP(dev_priv))
dev_priv->display.get_cdclk = bxt_get_cdclk;
else if (IS_GEN9_BC(dev_priv))
dev_priv->display.get_cdclk = skl_get_cdclk;
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h
index 4d6f7f5f8930..cf71394cc79c 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.h
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.h
@@ -15,6 +15,13 @@ struct intel_atomic_state;
struct intel_cdclk_state;
struct intel_crtc_state;
+struct intel_cdclk_vals {
+ u16 refclk;
+ u32 cdclk;
+ u8 divider; /* CD2X divider * 2 */
+ u8 ratio;
+};
+
int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state);
void intel_cdclk_init(struct drm_i915_private *i915);
void intel_cdclk_uninit(struct drm_i915_private *i915);
@@ -22,13 +29,8 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv);
void intel_update_max_cdclk(struct drm_i915_private *dev_priv);
void intel_update_cdclk(struct drm_i915_private *dev_priv);
void intel_update_rawclk(struct drm_i915_private *dev_priv);
-bool intel_cdclk_needs_cd2x_update(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *a,
- const struct intel_cdclk_state *b);
bool intel_cdclk_needs_modeset(const struct intel_cdclk_state *a,
const struct intel_cdclk_state *b);
-bool intel_cdclk_changed(const struct intel_cdclk_state *a,
- const struct intel_cdclk_state *b);
void intel_cdclk_swap_state(struct intel_atomic_state *state);
void
intel_set_cdclk_pre_plane_update(struct drm_i915_private *dev_priv,
@@ -42,5 +44,6 @@ intel_set_cdclk_post_plane_update(struct drm_i915_private *dev_priv,
enum pipe pipe);
void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state,
const char *context);
+int intel_modeset_calc_cdclk(struct intel_atomic_state *state);
#endif /* __INTEL_CDCLK_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index 23a84dd7989f..3980e8b50c28 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -23,7 +23,7 @@
*/
#include "intel_color.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#define CTM_COEFF_SIGN (1ULL << 63)
@@ -43,6 +43,21 @@
#define LEGACY_LUT_LENGTH 256
/*
+ * ILK+ csc matrix:
+ *
+ * |R/Cr| | c0 c1 c2 | ( |R/Cr| |preoff0| ) |postoff0|
+ * |G/Y | = | c3 c4 c5 | x ( |G/Y | + |preoff1| ) + |postoff1|
+ * |B/Cb| | c6 c7 c8 | ( |B/Cb| |preoff2| ) |postoff2|
+ *
+ * ILK/SNB don't have explicit post offsets, and instead
+ * CSC_MODE_YUV_TO_RGB and CSC_BLACK_SCREEN_OFFSET are used:
+ * CSC_MODE_YUV_TO_RGB=0 + CSC_BLACK_SCREEN_OFFSET=0 -> 1/2, 0, 1/2
+ * CSC_MODE_YUV_TO_RGB=0 + CSC_BLACK_SCREEN_OFFSET=1 -> 1/2, 1/16, 1/2
+ * CSC_MODE_YUV_TO_RGB=1 + CSC_BLACK_SCREEN_OFFSET=0 -> 0, 0, 0
+ * CSC_MODE_YUV_TO_RGB=1 + CSC_BLACK_SCREEN_OFFSET=1 -> 1/16, 1/16, 1/16
+ */
+
+/*
* Extract the CSC coefficient from a CTM coefficient (in U32.32 fixed point
* format). This macro takes the coefficient we want transformed and the
* number of fractional bits.
@@ -59,37 +74,38 @@
#define ILK_CSC_POSTOFF_LIMITED_RANGE (16 * (1 << 12) / 255)
+/* Nop pre/post offsets */
static const u16 ilk_csc_off_zero[3] = {};
+/* Identity matrix */
static const u16 ilk_csc_coeff_identity[9] = {
ILK_CSC_COEFF_1_0, 0, 0,
0, ILK_CSC_COEFF_1_0, 0,
0, 0, ILK_CSC_COEFF_1_0,
};
+/* Limited range RGB post offsets */
static const u16 ilk_csc_postoff_limited_range[3] = {
ILK_CSC_POSTOFF_LIMITED_RANGE,
ILK_CSC_POSTOFF_LIMITED_RANGE,
ILK_CSC_POSTOFF_LIMITED_RANGE,
};
+/* Full range RGB -> limited range RGB matrix */
static const u16 ilk_csc_coeff_limited_range[9] = {
ILK_CSC_COEFF_LIMITED_RANGE, 0, 0,
0, ILK_CSC_COEFF_LIMITED_RANGE, 0,
0, 0, ILK_CSC_COEFF_LIMITED_RANGE,
};
-/*
- * These values are direct register values specified in the Bspec,
- * for RGB->YUV conversion matrix (colorspace BT709)
- */
+/* BT.709 full range RGB -> limited range YCbCr matrix */
static const u16 ilk_csc_coeff_rgb_to_ycbcr[9] = {
0x1e08, 0x9cc0, 0xb528,
0x2ba8, 0x09d8, 0x37e8,
0xbce8, 0x9ad8, 0x1e08,
};
-/* Post offset values for RGB->YCBCR conversion */
+/* Limited range YCbCr post offsets */
static const u16 ilk_csc_postoff_rgb_to_ycbcr[3] = {
0x0800, 0x0100, 0x0800,
};
@@ -101,10 +117,10 @@ static bool lut_is_legacy(const struct drm_property_blob *lut)
static bool crtc_state_is_legacy_gamma(const struct intel_crtc_state *crtc_state)
{
- return !crtc_state->base.degamma_lut &&
- !crtc_state->base.ctm &&
- crtc_state->base.gamma_lut &&
- lut_is_legacy(crtc_state->base.gamma_lut);
+ return !crtc_state->hw.degamma_lut &&
+ !crtc_state->hw.ctm &&
+ crtc_state->hw.gamma_lut &&
+ lut_is_legacy(crtc_state->hw.gamma_lut);
}
/*
@@ -189,7 +205,7 @@ static void icl_update_output_csc(struct intel_crtc *crtc,
static bool ilk_csc_limited_range(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
/*
* FIXME if there's a gamma LUT after the CSC, we should
@@ -203,7 +219,7 @@ static bool ilk_csc_limited_range(const struct intel_crtc_state *crtc_state)
static void ilk_csc_convert_ctm(const struct intel_crtc_state *crtc_state,
u16 coeffs[9])
{
- const struct drm_color_ctm *ctm = crtc_state->base.ctm->data;
+ const struct drm_color_ctm *ctm = crtc_state->hw.ctm->data;
const u64 *input;
u64 temp[9];
int i;
@@ -254,11 +270,11 @@ static void ilk_csc_convert_ctm(const struct intel_crtc_state *crtc_state,
static void ilk_load_csc_matrix(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
bool limited_color_range = ilk_csc_limited_range(crtc_state);
- if (crtc_state->base.ctm) {
+ if (crtc_state->hw.ctm) {
u16 coeff[9];
ilk_csc_convert_ctm(crtc_state, coeff);
@@ -293,10 +309,10 @@ static void ilk_load_csc_matrix(const struct intel_crtc_state *crtc_state)
static void icl_load_csc_matrix(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- if (crtc_state->base.ctm) {
+ if (crtc_state->hw.ctm) {
u16 coeff[9];
ilk_csc_convert_ctm(crtc_state, coeff);
@@ -322,12 +338,12 @@ static void icl_load_csc_matrix(const struct intel_crtc_state *crtc_state)
*/
static void cherryview_load_csc_matrix(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- if (crtc_state->base.ctm) {
- const struct drm_color_ctm *ctm = crtc_state->base.ctm->data;
+ if (crtc_state->hw.ctm) {
+ const struct drm_color_ctm *ctm = crtc_state->hw.ctm->data;
u16 coeffs[9] = {};
int i;
@@ -388,7 +404,7 @@ static u32 ilk_lut_10(const struct drm_color_lut *color)
static void i9xx_load_luts_internal(const struct intel_crtc_state *crtc_state,
const struct drm_property_blob *blob)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
int i;
@@ -419,12 +435,12 @@ static void i9xx_load_luts_internal(const struct intel_crtc_state *crtc_state,
static void i9xx_load_luts(const struct intel_crtc_state *crtc_state)
{
- i9xx_load_luts_internal(crtc_state, crtc_state->base.gamma_lut);
+ i9xx_load_luts_internal(crtc_state, crtc_state->hw.gamma_lut);
}
static void i9xx_color_commit(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
u32 val;
@@ -437,7 +453,7 @@ static void i9xx_color_commit(const struct intel_crtc_state *crtc_state)
static void ilk_color_commit(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
u32 val;
@@ -452,7 +468,7 @@ static void ilk_color_commit(const struct intel_crtc_state *crtc_state)
static void hsw_color_commit(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
I915_WRITE(GAMMA_MODE(crtc->pipe), crtc_state->gamma_mode);
@@ -462,7 +478,7 @@ static void hsw_color_commit(const struct intel_crtc_state *crtc_state)
static void skl_color_commit(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
u32 val = 0;
@@ -508,8 +524,8 @@ static void i965_load_lut_10p6(struct intel_crtc *crtc,
static void i965_load_luts(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
i9xx_load_luts(crtc_state);
@@ -531,8 +547,8 @@ static void ilk_load_lut_10(struct intel_crtc *crtc,
static void ilk_load_luts(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
i9xx_load_luts(crtc_state);
@@ -611,12 +627,13 @@ static void bdw_load_lut_10(struct intel_crtc *crtc,
static void ivb_load_lut_ext_max(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_dsb *dsb = intel_dsb_get(crtc);
enum pipe pipe = crtc->pipe;
/* Program the max register to clamp values > 1.0. */
- I915_WRITE(PREC_PAL_EXT_GC_MAX(pipe, 0), 1 << 16);
- I915_WRITE(PREC_PAL_EXT_GC_MAX(pipe, 1), 1 << 16);
- I915_WRITE(PREC_PAL_EXT_GC_MAX(pipe, 2), 1 << 16);
+ intel_dsb_reg_write(dsb, PREC_PAL_EXT_GC_MAX(pipe, 0), 1 << 16);
+ intel_dsb_reg_write(dsb, PREC_PAL_EXT_GC_MAX(pipe, 1), 1 << 16);
+ intel_dsb_reg_write(dsb, PREC_PAL_EXT_GC_MAX(pipe, 2), 1 << 16);
/*
* Program the gc max 2 register to clamp values > 1.0.
@@ -624,17 +641,22 @@ static void ivb_load_lut_ext_max(struct intel_crtc *crtc)
* from 3.0 to 7.0
*/
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
- I915_WRITE(PREC_PAL_EXT2_GC_MAX(pipe, 0), 1 << 16);
- I915_WRITE(PREC_PAL_EXT2_GC_MAX(pipe, 1), 1 << 16);
- I915_WRITE(PREC_PAL_EXT2_GC_MAX(pipe, 2), 1 << 16);
+ intel_dsb_reg_write(dsb, PREC_PAL_EXT2_GC_MAX(pipe, 0),
+ 1 << 16);
+ intel_dsb_reg_write(dsb, PREC_PAL_EXT2_GC_MAX(pipe, 1),
+ 1 << 16);
+ intel_dsb_reg_write(dsb, PREC_PAL_EXT2_GC_MAX(pipe, 2),
+ 1 << 16);
}
+
+ intel_dsb_put(dsb);
}
static void ivb_load_luts(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
- const struct drm_property_blob *degamma_lut = crtc_state->base.degamma_lut;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+ const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut;
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) {
i9xx_load_luts(crtc_state);
@@ -655,9 +677,9 @@ static void ivb_load_luts(const struct intel_crtc_state *crtc_state)
static void bdw_load_luts(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
- const struct drm_property_blob *degamma_lut = crtc_state->base.degamma_lut;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+ const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut;
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) {
i9xx_load_luts(crtc_state);
@@ -678,11 +700,11 @@ static void bdw_load_luts(const struct intel_crtc_state *crtc_state)
static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
const u32 lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
- const struct drm_color_lut *lut = crtc_state->base.degamma_lut->data;
+ const struct drm_color_lut *lut = crtc_state->hw.degamma_lut->data;
u32 i;
/*
@@ -717,7 +739,7 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state)
static void glk_load_degamma_lut_linear(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
const u32 lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
@@ -744,8 +766,8 @@ static void glk_load_degamma_lut_linear(const struct intel_crtc_state *crtc_stat
static void glk_load_luts(const struct intel_crtc_state *crtc_state)
{
- const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
/*
* On GLK+ both pipe CSC and degamma LUT are controlled
@@ -755,7 +777,7 @@ static void glk_load_luts(const struct intel_crtc_state *crtc_state)
* the degama LUT so that we don't have to reload
* it every time the pipe CSC is being enabled.
*/
- if (crtc_state->base.degamma_lut)
+ if (crtc_state->hw.degamma_lut)
glk_load_degamma_lut(crtc_state);
else
glk_load_degamma_lut_linear(crtc_state);
@@ -786,79 +808,84 @@ static void
icl_load_gcmax(const struct intel_crtc_state *crtc_state,
const struct drm_color_lut *color)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_dsb *dsb = intel_dsb_get(crtc);
enum pipe pipe = crtc->pipe;
/* Fixme: LUT entries are 16 bit only, so we can prog 0xFFFF max */
- I915_WRITE(PREC_PAL_GC_MAX(pipe, 0), color->red);
- I915_WRITE(PREC_PAL_GC_MAX(pipe, 1), color->green);
- I915_WRITE(PREC_PAL_GC_MAX(pipe, 2), color->blue);
+ intel_dsb_reg_write(dsb, PREC_PAL_GC_MAX(pipe, 0), color->red);
+ intel_dsb_reg_write(dsb, PREC_PAL_GC_MAX(pipe, 1), color->green);
+ intel_dsb_reg_write(dsb, PREC_PAL_GC_MAX(pipe, 2), color->blue);
+ intel_dsb_put(dsb);
}
static void
icl_program_gamma_superfine_segment(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct drm_property_blob *blob = crtc_state->base.gamma_lut;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ const struct drm_property_blob *blob = crtc_state->hw.gamma_lut;
const struct drm_color_lut *lut = blob->data;
+ struct intel_dsb *dsb = intel_dsb_get(crtc);
enum pipe pipe = crtc->pipe;
u32 i;
/*
- * Every entry in the multi-segment LUT is corresponding to a superfine
- * segment step which is 1/(8 * 128 * 256).
+ * Program Super Fine segment (let's call it seg1)...
*
- * Superfine segment has 9 entries, corresponding to values
- * 0, 1/(8 * 128 * 256), 2/(8 * 128 * 256) .... 8/(8 * 128 * 256).
+ * Super Fine segment's step is 1/(8 * 128 * 256) and it has
+ * 9 entries, corresponding to values 0, 1/(8 * 128 * 256),
+ * 2/(8 * 128 * 256) ... 8/(8 * 128 * 256).
*/
- I915_WRITE(PREC_PAL_MULTI_SEG_INDEX(pipe), PAL_PREC_AUTO_INCREMENT);
+ intel_dsb_reg_write(dsb, PREC_PAL_MULTI_SEG_INDEX(pipe),
+ PAL_PREC_AUTO_INCREMENT);
for (i = 0; i < 9; i++) {
const struct drm_color_lut *entry = &lut[i];
- I915_WRITE(PREC_PAL_MULTI_SEG_DATA(pipe),
- ilk_lut_12p4_ldw(entry));
- I915_WRITE(PREC_PAL_MULTI_SEG_DATA(pipe),
- ilk_lut_12p4_udw(entry));
+ intel_dsb_indexed_reg_write(dsb, PREC_PAL_MULTI_SEG_DATA(pipe),
+ ilk_lut_12p4_ldw(entry));
+ intel_dsb_indexed_reg_write(dsb, PREC_PAL_MULTI_SEG_DATA(pipe),
+ ilk_lut_12p4_udw(entry));
}
+
+ intel_dsb_put(dsb);
}
static void
icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct drm_property_blob *blob = crtc_state->base.gamma_lut;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ const struct drm_property_blob *blob = crtc_state->hw.gamma_lut;
const struct drm_color_lut *lut = blob->data;
const struct drm_color_lut *entry;
+ struct intel_dsb *dsb = intel_dsb_get(crtc);
enum pipe pipe = crtc->pipe;
u32 i;
/*
- *
* Program Fine segment (let's call it seg2)...
*
- * Fine segment's step is 1/(128 * 256) ie 1/(128 * 256), 2/(128*256)
- * ... 256/(128*256). So in order to program fine segment of LUT we
- * need to pick every 8'th entry in LUT, and program 256 indexes.
+ * Fine segment's step is 1/(128 * 256) i.e. 1/(128 * 256), 2/(128 * 256)
+ * ... 256/(128 * 256). So in order to program fine segment of LUT we
+ * need to pick every 8th entry in the LUT, and program 256 indexes.
*
* PAL_PREC_INDEX[0] and PAL_PREC_INDEX[1] map to seg2[1],
- * with seg2[0] being unused by the hardware.
+ * seg2[0] being unused by the hardware.
*/
- I915_WRITE(PREC_PAL_INDEX(pipe), PAL_PREC_AUTO_INCREMENT);
+ intel_dsb_reg_write(dsb, PREC_PAL_INDEX(pipe), PAL_PREC_AUTO_INCREMENT);
for (i = 1; i < 257; i++) {
entry = &lut[i * 8];
- I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_12p4_ldw(entry));
- I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_12p4_udw(entry));
+ intel_dsb_indexed_reg_write(dsb, PREC_PAL_DATA(pipe),
+ ilk_lut_12p4_ldw(entry));
+ intel_dsb_indexed_reg_write(dsb, PREC_PAL_DATA(pipe),
+ ilk_lut_12p4_udw(entry));
}
/*
* Program Coarse segment (let's call it seg3)...
*
- * Coarse segment's starts from index 0 and it's step is 1/256 ie 0,
- * 1/256, 2/256 ...256/256. As per the description of each entry in LUT
+ * Coarse segment starts from index 0 and it's step is 1/256 ie 0,
+ * 1/256, 2/256 ... 256/256. As per the description of each entry in LUT
* above, we need to pick every (8 * 128)th entry in LUT, and
* program 256 of those.
*
@@ -868,38 +895,43 @@ icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state)
*/
for (i = 0; i < 256; i++) {
entry = &lut[i * 8 * 128];
- I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_12p4_ldw(entry));
- I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_12p4_udw(entry));
+ intel_dsb_indexed_reg_write(dsb, PREC_PAL_DATA(pipe),
+ ilk_lut_12p4_ldw(entry));
+ intel_dsb_indexed_reg_write(dsb, PREC_PAL_DATA(pipe),
+ ilk_lut_12p4_udw(entry));
}
/* The last entry in the LUT is to be programmed in GCMAX */
entry = &lut[256 * 8 * 128];
icl_load_gcmax(crtc_state, entry);
ivb_load_lut_ext_max(crtc);
+ intel_dsb_put(dsb);
}
static void icl_load_luts(const struct intel_crtc_state *crtc_state)
{
- const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_dsb *dsb = intel_dsb_get(crtc);
- if (crtc_state->base.degamma_lut)
+ if (crtc_state->hw.degamma_lut)
glk_load_degamma_lut(crtc_state);
switch (crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) {
case GAMMA_MODE_MODE_8BIT:
i9xx_load_luts(crtc_state);
break;
-
case GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED:
icl_program_gamma_superfine_segment(crtc_state);
icl_program_gamma_multi_segment(crtc_state);
break;
-
default:
bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_INDEX_VALUE(0));
ivb_load_lut_ext_max(crtc);
}
+
+ intel_dsb_commit(dsb);
+ intel_dsb_put(dsb);
}
static u32 chv_cgm_degamma_ldw(const struct drm_color_lut *color)
@@ -958,9 +990,9 @@ static void chv_load_cgm_gamma(struct intel_crtc *crtc,
static void chv_load_luts(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
- const struct drm_property_blob *degamma_lut = crtc_state->base.degamma_lut;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+ const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut;
cherryview_load_csc_matrix(crtc_state);
@@ -978,28 +1010,77 @@ static void chv_load_luts(const struct intel_crtc_state *crtc_state)
void intel_color_load_luts(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
dev_priv->display.load_luts(crtc_state);
}
void intel_color_commit(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
dev_priv->display.color_commit(crtc_state);
}
+static bool intel_can_preload_luts(const struct intel_crtc_state *new_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(new_crtc_state->uapi.state);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+
+ return !old_crtc_state->hw.gamma_lut &&
+ !old_crtc_state->hw.degamma_lut;
+}
+
+static bool chv_can_preload_luts(const struct intel_crtc_state *new_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(new_crtc_state->uapi.state);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+
+ /*
+ * CGM_PIPE_MODE is itself single buffered. We'd have to
+ * somehow split it out from chv_load_luts() if we wanted
+ * the ability to preload the CGM LUTs/CSC without tearing.
+ */
+ if (old_crtc_state->cgm_mode || new_crtc_state->cgm_mode)
+ return false;
+
+ return !old_crtc_state->hw.gamma_lut;
+}
+
+static bool glk_can_preload_luts(const struct intel_crtc_state *new_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(new_crtc_state->uapi.state);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+
+ /*
+ * The hardware degamma is active whenever the pipe
+ * CSC is active. Thus even if the old state has no
+ * software degamma we need to avoid clobbering the
+ * linear hardware degamma mid scanout.
+ */
+ return !old_crtc_state->csc_enable &&
+ !old_crtc_state->hw.gamma_lut;
+}
+
int intel_color_check(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
return dev_priv->display.color_check(crtc_state);
}
void intel_color_get_config(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
if (dev_priv->display.read_luts)
dev_priv->display.read_luts(crtc_state);
@@ -1023,16 +1104,16 @@ static bool need_plane_update(struct intel_plane *plane,
static int
intel_color_add_affected_planes(struct intel_crtc_state *new_crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_atomic_state *state =
- to_intel_atomic_state(new_crtc_state->base.state);
+ to_intel_atomic_state(new_crtc_state->uapi.state);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
struct intel_plane *plane;
- if (!new_crtc_state->base.active ||
- drm_atomic_crtc_needs_modeset(&new_crtc_state->base))
+ if (!new_crtc_state->hw.active ||
+ drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi))
return 0;
if (new_crtc_state->gamma_enable == old_crtc_state->gamma_enable &&
@@ -1074,9 +1155,9 @@ static int check_lut_size(const struct drm_property_blob *lut, int expected)
static int check_luts(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
- const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
- const struct drm_property_blob *degamma_lut = crtc_state->base.degamma_lut;
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+ const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut;
int gamma_length, degamma_length;
u32 gamma_tests, degamma_tests;
@@ -1124,7 +1205,7 @@ static int i9xx_color_check(struct intel_crtc_state *crtc_state)
return ret;
crtc_state->gamma_enable =
- crtc_state->base.gamma_lut &&
+ crtc_state->hw.gamma_lut &&
!crtc_state->c8_planes;
crtc_state->gamma_mode = i9xx_gamma_mode(crtc_state);
@@ -1133,6 +1214,8 @@ static int i9xx_color_check(struct intel_crtc_state *crtc_state)
if (ret)
return ret;
+ crtc_state->preload_luts = intel_can_preload_luts(crtc_state);
+
return 0;
}
@@ -1143,11 +1226,11 @@ static u32 chv_cgm_mode(const struct intel_crtc_state *crtc_state)
if (crtc_state_is_legacy_gamma(crtc_state))
return 0;
- if (crtc_state->base.degamma_lut)
+ if (crtc_state->hw.degamma_lut)
cgm_mode |= CGM_PIPE_MODE_DEGAMMA;
- if (crtc_state->base.ctm)
+ if (crtc_state->hw.ctm)
cgm_mode |= CGM_PIPE_MODE_CSC;
- if (crtc_state->base.gamma_lut)
+ if (crtc_state->hw.gamma_lut)
cgm_mode |= CGM_PIPE_MODE_GAMMA;
return cgm_mode;
@@ -1185,6 +1268,8 @@ static int chv_color_check(struct intel_crtc_state *crtc_state)
if (ret)
return ret;
+ crtc_state->preload_luts = chv_can_preload_luts(crtc_state);
+
return 0;
}
@@ -1197,6 +1282,21 @@ static u32 ilk_gamma_mode(const struct intel_crtc_state *crtc_state)
return GAMMA_MODE_MODE_10BIT;
}
+static u32 ilk_csc_mode(const struct intel_crtc_state *crtc_state)
+{
+ /*
+ * CSC comes after the LUT in RGB->YCbCr mode.
+ * RGB->YCbCr needs the limited range offsets added to
+ * the output. RGB limited range output is handled by
+ * the hw automagically elsewhere.
+ */
+ if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
+ return CSC_BLACK_SCREEN_OFFSET;
+
+ return CSC_MODE_YUV_TO_RGB |
+ CSC_POSITION_BEFORE_GAMMA;
+}
+
static int ilk_color_check(struct intel_crtc_state *crtc_state)
{
int ret;
@@ -1206,24 +1306,26 @@ static int ilk_color_check(struct intel_crtc_state *crtc_state)
return ret;
crtc_state->gamma_enable =
- crtc_state->base.gamma_lut &&
+ crtc_state->hw.gamma_lut &&
!crtc_state->c8_planes;
/*
- * We don't expose the ctm on ilk/snb currently,
- * nor do we enable YCbCr output. Also RGB limited
- * range output is handled by the hw automagically.
+ * We don't expose the ctm on ilk/snb currently, also RGB
+ * limited range output is handled by the hw automagically.
*/
- crtc_state->csc_enable = false;
+ crtc_state->csc_enable =
+ crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB;
crtc_state->gamma_mode = ilk_gamma_mode(crtc_state);
- crtc_state->csc_mode = 0;
+ crtc_state->csc_mode = ilk_csc_mode(crtc_state);
ret = intel_color_add_affected_planes(crtc_state);
if (ret)
return ret;
+ crtc_state->preload_luts = intel_can_preload_luts(crtc_state);
+
return 0;
}
@@ -1232,8 +1334,8 @@ static u32 ivb_gamma_mode(const struct intel_crtc_state *crtc_state)
if (!crtc_state->gamma_enable ||
crtc_state_is_legacy_gamma(crtc_state))
return GAMMA_MODE_MODE_8BIT;
- else if (crtc_state->base.gamma_lut &&
- crtc_state->base.degamma_lut)
+ else if (crtc_state->hw.gamma_lut &&
+ crtc_state->hw.degamma_lut)
return GAMMA_MODE_MODE_SPLIT;
else
return GAMMA_MODE_MODE_10BIT;
@@ -1247,7 +1349,7 @@ static u32 ivb_csc_mode(const struct intel_crtc_state *crtc_state)
* CSC comes after the LUT in degamma, RGB->YCbCr,
* and RGB full->limited range mode.
*/
- if (crtc_state->base.degamma_lut ||
+ if (crtc_state->hw.degamma_lut ||
crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB ||
limited_color_range)
return 0;
@@ -1265,13 +1367,13 @@ static int ivb_color_check(struct intel_crtc_state *crtc_state)
return ret;
crtc_state->gamma_enable =
- (crtc_state->base.gamma_lut ||
- crtc_state->base.degamma_lut) &&
+ (crtc_state->hw.gamma_lut ||
+ crtc_state->hw.degamma_lut) &&
!crtc_state->c8_planes;
crtc_state->csc_enable =
crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB ||
- crtc_state->base.ctm || limited_color_range;
+ crtc_state->hw.ctm || limited_color_range;
crtc_state->gamma_mode = ivb_gamma_mode(crtc_state);
@@ -1281,6 +1383,8 @@ static int ivb_color_check(struct intel_crtc_state *crtc_state)
if (ret)
return ret;
+ crtc_state->preload_luts = intel_can_preload_luts(crtc_state);
+
return 0;
}
@@ -1302,14 +1406,14 @@ static int glk_color_check(struct intel_crtc_state *crtc_state)
return ret;
crtc_state->gamma_enable =
- crtc_state->base.gamma_lut &&
+ crtc_state->hw.gamma_lut &&
!crtc_state->c8_planes;
/* On GLK+ degamma LUT is controlled by csc_enable */
crtc_state->csc_enable =
- crtc_state->base.degamma_lut ||
+ crtc_state->hw.degamma_lut ||
crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB ||
- crtc_state->base.ctm || crtc_state->limited_color_range;
+ crtc_state->hw.ctm || crtc_state->limited_color_range;
crtc_state->gamma_mode = glk_gamma_mode(crtc_state);
@@ -1319,6 +1423,8 @@ static int glk_color_check(struct intel_crtc_state *crtc_state)
if (ret)
return ret;
+ crtc_state->preload_luts = glk_can_preload_luts(crtc_state);
+
return 0;
}
@@ -1326,14 +1432,14 @@ static u32 icl_gamma_mode(const struct intel_crtc_state *crtc_state)
{
u32 gamma_mode = 0;
- if (crtc_state->base.degamma_lut)
+ if (crtc_state->hw.degamma_lut)
gamma_mode |= PRE_CSC_GAMMA_ENABLE;
- if (crtc_state->base.gamma_lut &&
+ if (crtc_state->hw.gamma_lut &&
!crtc_state->c8_planes)
gamma_mode |= POST_CSC_GAMMA_ENABLE;
- if (!crtc_state->base.gamma_lut ||
+ if (!crtc_state->hw.gamma_lut ||
crtc_state_is_legacy_gamma(crtc_state))
gamma_mode |= GAMMA_MODE_MODE_8BIT;
else
@@ -1346,7 +1452,7 @@ static u32 icl_csc_mode(const struct intel_crtc_state *crtc_state)
{
u32 csc_mode = 0;
- if (crtc_state->base.ctm)
+ if (crtc_state->hw.ctm)
csc_mode |= ICL_CSC_ENABLE;
if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB ||
@@ -1368,9 +1474,408 @@ static int icl_color_check(struct intel_crtc_state *crtc_state)
crtc_state->csc_mode = icl_csc_mode(crtc_state);
+ crtc_state->preload_luts = intel_can_preload_luts(crtc_state);
+
+ return 0;
+}
+
+static int i9xx_gamma_precision(const struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->gamma_enable)
+ return 0;
+
+ switch (crtc_state->gamma_mode) {
+ case GAMMA_MODE_MODE_8BIT:
+ return 8;
+ case GAMMA_MODE_MODE_10BIT:
+ return 16;
+ default:
+ MISSING_CASE(crtc_state->gamma_mode);
+ return 0;
+ }
+}
+
+static int ilk_gamma_precision(const struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->gamma_enable)
+ return 0;
+
+ if ((crtc_state->csc_mode & CSC_POSITION_BEFORE_GAMMA) == 0)
+ return 0;
+
+ switch (crtc_state->gamma_mode) {
+ case GAMMA_MODE_MODE_8BIT:
+ return 8;
+ case GAMMA_MODE_MODE_10BIT:
+ return 10;
+ default:
+ MISSING_CASE(crtc_state->gamma_mode);
+ return 0;
+ }
+}
+
+static int chv_gamma_precision(const struct intel_crtc_state *crtc_state)
+{
+ if (crtc_state->cgm_mode & CGM_PIPE_MODE_GAMMA)
+ return 10;
+ else
+ return i9xx_gamma_precision(crtc_state);
+}
+
+static int glk_gamma_precision(const struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->gamma_enable)
+ return 0;
+
+ switch (crtc_state->gamma_mode) {
+ case GAMMA_MODE_MODE_8BIT:
+ return 8;
+ case GAMMA_MODE_MODE_10BIT:
+ return 10;
+ default:
+ MISSING_CASE(crtc_state->gamma_mode);
+ return 0;
+ }
+}
+
+int intel_color_get_gamma_bit_precision(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ if (HAS_GMCH(dev_priv)) {
+ if (IS_CHERRYVIEW(dev_priv))
+ return chv_gamma_precision(crtc_state);
+ else
+ return i9xx_gamma_precision(crtc_state);
+ } else {
+ if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
+ return glk_gamma_precision(crtc_state);
+ else if (IS_IRONLAKE(dev_priv))
+ return ilk_gamma_precision(crtc_state);
+ }
+
return 0;
}
+static bool err_check(struct drm_color_lut *lut1,
+ struct drm_color_lut *lut2, u32 err)
+{
+ return ((abs((long)lut2->red - lut1->red)) <= err) &&
+ ((abs((long)lut2->blue - lut1->blue)) <= err) &&
+ ((abs((long)lut2->green - lut1->green)) <= err);
+}
+
+static bool intel_color_lut_entry_equal(struct drm_color_lut *lut1,
+ struct drm_color_lut *lut2,
+ int lut_size, u32 err)
+{
+ int i;
+
+ for (i = 0; i < lut_size; i++) {
+ if (!err_check(&lut1[i], &lut2[i], err))
+ return false;
+ }
+
+ return true;
+}
+
+bool intel_color_lut_equal(struct drm_property_blob *blob1,
+ struct drm_property_blob *blob2,
+ u32 gamma_mode, u32 bit_precision)
+{
+ struct drm_color_lut *lut1, *lut2;
+ int lut_size1, lut_size2;
+ u32 err;
+
+ if (!blob1 != !blob2)
+ return false;
+
+ if (!blob1)
+ return true;
+
+ lut_size1 = drm_color_lut_size(blob1);
+ lut_size2 = drm_color_lut_size(blob2);
+
+ /* check sw and hw lut size */
+ switch (gamma_mode) {
+ case GAMMA_MODE_MODE_8BIT:
+ case GAMMA_MODE_MODE_10BIT:
+ if (lut_size1 != lut_size2)
+ return false;
+ break;
+ default:
+ MISSING_CASE(gamma_mode);
+ return false;
+ }
+
+ lut1 = blob1->data;
+ lut2 = blob2->data;
+
+ err = 0xffff >> bit_precision;
+
+ /* check sw and hw lut entry to be equal */
+ switch (gamma_mode) {
+ case GAMMA_MODE_MODE_8BIT:
+ case GAMMA_MODE_MODE_10BIT:
+ if (!intel_color_lut_entry_equal(lut1, lut2,
+ lut_size2, err))
+ return false;
+ break;
+ default:
+ MISSING_CASE(gamma_mode);
+ return false;
+ }
+
+ return true;
+}
+
+/* convert hw value with given bit_precision to lut property val */
+static u32 intel_color_lut_pack(u32 val, u32 bit_precision)
+{
+ u32 max = 0xffff >> (16 - bit_precision);
+
+ val = clamp_val(val, 0, max);
+
+ if (bit_precision < 16)
+ val <<= 16 - bit_precision;
+
+ return val;
+}
+
+static struct drm_property_blob *
+i9xx_read_lut_8(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ struct drm_property_blob *blob;
+ struct drm_color_lut *blob_data;
+ u32 i, val;
+
+ blob = drm_property_create_blob(&dev_priv->drm,
+ sizeof(struct drm_color_lut) * LEGACY_LUT_LENGTH,
+ NULL);
+ if (IS_ERR(blob))
+ return NULL;
+
+ blob_data = blob->data;
+
+ for (i = 0; i < LEGACY_LUT_LENGTH; i++) {
+ if (HAS_GMCH(dev_priv))
+ val = I915_READ(PALETTE(pipe, i));
+ else
+ val = I915_READ(LGC_PALETTE(pipe, i));
+
+ blob_data[i].red = intel_color_lut_pack(REG_FIELD_GET(
+ LGC_PALETTE_RED_MASK, val), 8);
+ blob_data[i].green = intel_color_lut_pack(REG_FIELD_GET(
+ LGC_PALETTE_GREEN_MASK, val), 8);
+ blob_data[i].blue = intel_color_lut_pack(REG_FIELD_GET(
+ LGC_PALETTE_BLUE_MASK, val), 8);
+ }
+
+ return blob;
+}
+
+static void i9xx_read_luts(struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->gamma_enable)
+ return;
+
+ crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc_state);
+}
+
+static struct drm_property_blob *
+i965_read_lut_10p6(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
+ enum pipe pipe = crtc->pipe;
+ struct drm_property_blob *blob;
+ struct drm_color_lut *blob_data;
+ u32 i, val1, val2;
+
+ blob = drm_property_create_blob(&dev_priv->drm,
+ sizeof(struct drm_color_lut) * lut_size,
+ NULL);
+ if (IS_ERR(blob))
+ return NULL;
+
+ blob_data = blob->data;
+
+ for (i = 0; i < lut_size - 1; i++) {
+ val1 = I915_READ(PALETTE(pipe, 2 * i + 0));
+ val2 = I915_READ(PALETTE(pipe, 2 * i + 1));
+
+ blob_data[i].red = REG_FIELD_GET(PALETTE_RED_MASK, val2) << 8 |
+ REG_FIELD_GET(PALETTE_RED_MASK, val1);
+ blob_data[i].green = REG_FIELD_GET(PALETTE_GREEN_MASK, val2) << 8 |
+ REG_FIELD_GET(PALETTE_GREEN_MASK, val1);
+ blob_data[i].blue = REG_FIELD_GET(PALETTE_BLUE_MASK, val2) << 8 |
+ REG_FIELD_GET(PALETTE_BLUE_MASK, val1);
+ }
+
+ blob_data[i].red = REG_FIELD_GET(PIPEGCMAX_RGB_MASK,
+ I915_READ(PIPEGCMAX(pipe, 0)));
+ blob_data[i].green = REG_FIELD_GET(PIPEGCMAX_RGB_MASK,
+ I915_READ(PIPEGCMAX(pipe, 1)));
+ blob_data[i].blue = REG_FIELD_GET(PIPEGCMAX_RGB_MASK,
+ I915_READ(PIPEGCMAX(pipe, 2)));
+
+ return blob;
+}
+
+static void i965_read_luts(struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->gamma_enable)
+ return;
+
+ if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
+ crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc_state);
+ else
+ crtc_state->hw.gamma_lut = i965_read_lut_10p6(crtc_state);
+}
+
+static struct drm_property_blob *
+chv_read_cgm_lut(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
+ enum pipe pipe = crtc->pipe;
+ struct drm_property_blob *blob;
+ struct drm_color_lut *blob_data;
+ u32 i, val;
+
+ blob = drm_property_create_blob(&dev_priv->drm,
+ sizeof(struct drm_color_lut) * lut_size,
+ NULL);
+ if (IS_ERR(blob))
+ return NULL;
+
+ blob_data = blob->data;
+
+ for (i = 0; i < lut_size; i++) {
+ val = I915_READ(CGM_PIPE_GAMMA(pipe, i, 0));
+ blob_data[i].green = intel_color_lut_pack(REG_FIELD_GET(
+ CGM_PIPE_GAMMA_GREEN_MASK, val), 10);
+ blob_data[i].blue = intel_color_lut_pack(REG_FIELD_GET(
+ CGM_PIPE_GAMMA_BLUE_MASK, val), 10);
+
+ val = I915_READ(CGM_PIPE_GAMMA(pipe, i, 1));
+ blob_data[i].red = intel_color_lut_pack(REG_FIELD_GET(
+ CGM_PIPE_GAMMA_RED_MASK, val), 10);
+ }
+
+ return blob;
+}
+
+static void chv_read_luts(struct intel_crtc_state *crtc_state)
+{
+ if (crtc_state->cgm_mode & CGM_PIPE_MODE_GAMMA)
+ crtc_state->hw.gamma_lut = chv_read_cgm_lut(crtc_state);
+ else
+ i965_read_luts(crtc_state);
+}
+
+static struct drm_property_blob *
+ilk_read_lut_10(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
+ enum pipe pipe = crtc->pipe;
+ struct drm_property_blob *blob;
+ struct drm_color_lut *blob_data;
+ u32 i, val;
+
+ blob = drm_property_create_blob(&dev_priv->drm,
+ sizeof(struct drm_color_lut) * lut_size,
+ NULL);
+ if (IS_ERR(blob))
+ return NULL;
+
+ blob_data = blob->data;
+
+ for (i = 0; i < lut_size; i++) {
+ val = I915_READ(PREC_PALETTE(pipe, i));
+
+ blob_data[i].red = intel_color_lut_pack(REG_FIELD_GET(
+ PREC_PALETTE_RED_MASK, val), 10);
+ blob_data[i].green = intel_color_lut_pack(REG_FIELD_GET(
+ PREC_PALETTE_GREEN_MASK, val), 10);
+ blob_data[i].blue = intel_color_lut_pack(REG_FIELD_GET(
+ PREC_PALETTE_BLUE_MASK, val), 10);
+ }
+
+ return blob;
+}
+
+static void ilk_read_luts(struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->gamma_enable)
+ return;
+
+ if ((crtc_state->csc_mode & CSC_POSITION_BEFORE_GAMMA) == 0)
+ return;
+
+ if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
+ crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc_state);
+ else
+ crtc_state->hw.gamma_lut = ilk_read_lut_10(crtc_state);
+}
+
+static struct drm_property_blob *
+glk_read_lut_10(const struct intel_crtc_state *crtc_state, u32 prec_index)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ int hw_lut_size = ivb_lut_10_size(prec_index);
+ enum pipe pipe = crtc->pipe;
+ struct drm_property_blob *blob;
+ struct drm_color_lut *blob_data;
+ u32 i, val;
+
+ blob = drm_property_create_blob(&dev_priv->drm,
+ sizeof(struct drm_color_lut) * hw_lut_size,
+ NULL);
+ if (IS_ERR(blob))
+ return NULL;
+
+ blob_data = blob->data;
+
+ I915_WRITE(PREC_PAL_INDEX(pipe), prec_index |
+ PAL_PREC_AUTO_INCREMENT);
+
+ for (i = 0; i < hw_lut_size; i++) {
+ val = I915_READ(PREC_PAL_DATA(pipe));
+
+ blob_data[i].red = intel_color_lut_pack(REG_FIELD_GET(
+ PREC_PAL_DATA_RED_MASK, val), 10);
+ blob_data[i].green = intel_color_lut_pack(REG_FIELD_GET(
+ PREC_PAL_DATA_GREEN_MASK, val), 10);
+ blob_data[i].blue = intel_color_lut_pack(REG_FIELD_GET(
+ PREC_PAL_DATA_BLUE_MASK, val), 10);
+ }
+
+ I915_WRITE(PREC_PAL_INDEX(pipe), 0);
+
+ return blob;
+}
+
+static void glk_read_luts(struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->gamma_enable)
+ return;
+
+ if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
+ crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc_state);
+ else
+ crtc_state->hw.gamma_lut = glk_read_lut_10(crtc_state, PAL_PREC_INDEX_VALUE(0));
+}
+
void intel_color_init(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -1383,14 +1888,17 @@ void intel_color_init(struct intel_crtc *crtc)
dev_priv->display.color_check = chv_color_check;
dev_priv->display.color_commit = i9xx_color_commit;
dev_priv->display.load_luts = chv_load_luts;
+ dev_priv->display.read_luts = chv_read_luts;
} else if (INTEL_GEN(dev_priv) >= 4) {
dev_priv->display.color_check = i9xx_color_check;
dev_priv->display.color_commit = i9xx_color_commit;
dev_priv->display.load_luts = i965_load_luts;
+ dev_priv->display.read_luts = i965_read_luts;
} else {
dev_priv->display.color_check = i9xx_color_check;
dev_priv->display.color_commit = i9xx_color_commit;
dev_priv->display.load_luts = i9xx_load_luts;
+ dev_priv->display.read_luts = i9xx_read_luts;
}
} else {
if (INTEL_GEN(dev_priv) >= 11)
@@ -1409,16 +1917,19 @@ void intel_color_init(struct intel_crtc *crtc)
else
dev_priv->display.color_commit = ilk_color_commit;
- if (INTEL_GEN(dev_priv) >= 11)
+ if (INTEL_GEN(dev_priv) >= 11) {
dev_priv->display.load_luts = icl_load_luts;
- else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
+ } else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) {
dev_priv->display.load_luts = glk_load_luts;
- else if (INTEL_GEN(dev_priv) >= 8)
+ dev_priv->display.read_luts = glk_read_luts;
+ } else if (INTEL_GEN(dev_priv) >= 8) {
dev_priv->display.load_luts = bdw_load_luts;
- else if (INTEL_GEN(dev_priv) >= 7)
+ } else if (INTEL_GEN(dev_priv) >= 7) {
dev_priv->display.load_luts = ivb_load_luts;
- else
+ } else {
dev_priv->display.load_luts = ilk_load_luts;
+ dev_priv->display.read_luts = ilk_read_luts;
+ }
}
drm_crtc_enable_color_mgmt(&crtc->base,
diff --git a/drivers/gpu/drm/i915/display/intel_color.h b/drivers/gpu/drm/i915/display/intel_color.h
index 057e8ac63555..173727aaa24d 100644
--- a/drivers/gpu/drm/i915/display/intel_color.h
+++ b/drivers/gpu/drm/i915/display/intel_color.h
@@ -6,13 +6,20 @@
#ifndef __INTEL_COLOR_H__
#define __INTEL_COLOR_H__
+#include <linux/types.h>
+
struct intel_crtc_state;
struct intel_crtc;
+struct drm_property_blob;
void intel_color_init(struct intel_crtc *crtc);
int intel_color_check(struct intel_crtc_state *crtc_state);
void intel_color_commit(const struct intel_crtc_state *crtc_state);
void intel_color_load_luts(const struct intel_crtc_state *crtc_state);
void intel_color_get_config(struct intel_crtc_state *crtc_state);
+int intel_color_get_gamma_bit_precision(const struct intel_crtc_state *crtc_state);
+bool intel_color_lut_equal(struct drm_property_blob *blob1,
+ struct drm_property_blob *blob2,
+ u32 gamma_mode, u32 bit_precision);
#endif /* __INTEL_COLOR_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c
index 841708da5a56..44bbc7e74fc3 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c
@@ -4,15 +4,15 @@
*/
#include "intel_combo_phy.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
-#define for_each_combo_port(__dev_priv, __port) \
- for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \
- for_each_if(intel_port_is_combophy(__dev_priv, __port))
+#define for_each_combo_phy(__dev_priv, __phy) \
+ for ((__phy) = PHY_A; (__phy) < I915_MAX_PHYS; (__phy)++) \
+ for_each_if(intel_phy_is_combo(__dev_priv, __phy))
-#define for_each_combo_port_reverse(__dev_priv, __port) \
- for ((__port) = I915_MAX_PORTS; (__port)-- > PORT_A;) \
- for_each_if(intel_port_is_combophy(__dev_priv, __port))
+#define for_each_combo_phy_reverse(__dev_priv, __phy) \
+ for ((__phy) = I915_MAX_PHYS; (__phy)-- > PHY_A;) \
+ for_each_if(intel_phy_is_combo(__dev_priv, __phy))
enum {
PROCMON_0_85V_DOT_0,
@@ -38,18 +38,17 @@ static const struct cnl_procmon {
};
/*
- * CNL has just one set of registers, while ICL has two sets: one for port A and
- * the other for port B. The CNL registers are equivalent to the ICL port A
- * registers, that's why we call the ICL macros even though the function has CNL
- * on its name.
+ * CNL has just one set of registers, while gen11 has a set for each combo PHY.
+ * The CNL registers are equivalent to the gen11 PHY A registers, that's why we
+ * call the ICL macros even though the function has CNL on its name.
*/
static const struct cnl_procmon *
-cnl_get_procmon_ref_values(struct drm_i915_private *dev_priv, enum port port)
+cnl_get_procmon_ref_values(struct drm_i915_private *dev_priv, enum phy phy)
{
const struct cnl_procmon *procmon;
u32 val;
- val = I915_READ(ICL_PORT_COMP_DW3(port));
+ val = I915_READ(ICL_PORT_COMP_DW3(phy));
switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) {
default:
MISSING_CASE(val);
@@ -75,32 +74,32 @@ cnl_get_procmon_ref_values(struct drm_i915_private *dev_priv, enum port port)
}
static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv,
- enum port port)
+ enum phy phy)
{
const struct cnl_procmon *procmon;
u32 val;
- procmon = cnl_get_procmon_ref_values(dev_priv, port);
+ procmon = cnl_get_procmon_ref_values(dev_priv, phy);
- val = I915_READ(ICL_PORT_COMP_DW1(port));
+ val = I915_READ(ICL_PORT_COMP_DW1(phy));
val &= ~((0xff << 16) | 0xff);
val |= procmon->dw1;
- I915_WRITE(ICL_PORT_COMP_DW1(port), val);
+ I915_WRITE(ICL_PORT_COMP_DW1(phy), val);
- I915_WRITE(ICL_PORT_COMP_DW9(port), procmon->dw9);
- I915_WRITE(ICL_PORT_COMP_DW10(port), procmon->dw10);
+ I915_WRITE(ICL_PORT_COMP_DW9(phy), procmon->dw9);
+ I915_WRITE(ICL_PORT_COMP_DW10(phy), procmon->dw10);
}
static bool check_phy_reg(struct drm_i915_private *dev_priv,
- enum port port, i915_reg_t reg, u32 mask,
+ enum phy phy, i915_reg_t reg, u32 mask,
u32 expected_val)
{
u32 val = I915_READ(reg);
if ((val & mask) != expected_val) {
- DRM_DEBUG_DRIVER("Port %c combo PHY reg %08x state mismatch: "
+ DRM_DEBUG_DRIVER("Combo PHY %c reg %08x state mismatch: "
"current %08x mask %08x expected %08x\n",
- port_name(port),
+ phy_name(phy),
reg.reg, val, mask, expected_val);
return false;
}
@@ -109,18 +108,18 @@ static bool check_phy_reg(struct drm_i915_private *dev_priv,
}
static bool cnl_verify_procmon_ref_values(struct drm_i915_private *dev_priv,
- enum port port)
+ enum phy phy)
{
const struct cnl_procmon *procmon;
bool ret;
- procmon = cnl_get_procmon_ref_values(dev_priv, port);
+ procmon = cnl_get_procmon_ref_values(dev_priv, phy);
- ret = check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW1(port),
+ ret = check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW1(phy),
(0xff << 16) | 0xff, procmon->dw1);
- ret &= check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW9(port),
+ ret &= check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW9(phy),
-1U, procmon->dw9);
- ret &= check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW10(port),
+ ret &= check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW10(phy),
-1U, procmon->dw10);
return ret;
@@ -134,15 +133,15 @@ static bool cnl_combo_phy_enabled(struct drm_i915_private *dev_priv)
static bool cnl_combo_phy_verify_state(struct drm_i915_private *dev_priv)
{
- enum port port = PORT_A;
+ enum phy phy = PHY_A;
bool ret;
if (!cnl_combo_phy_enabled(dev_priv))
return false;
- ret = cnl_verify_procmon_ref_values(dev_priv, port);
+ ret = cnl_verify_procmon_ref_values(dev_priv, phy);
- ret &= check_phy_reg(dev_priv, port, CNL_PORT_CL1CM_DW5,
+ ret &= check_phy_reg(dev_priv, phy, CNL_PORT_CL1CM_DW5,
CL_POWER_DOWN_ENABLE, CL_POWER_DOWN_ENABLE);
return ret;
@@ -157,7 +156,7 @@ static void cnl_combo_phys_init(struct drm_i915_private *dev_priv)
I915_WRITE(CHICKEN_MISC_2, val);
/* Dummy PORT_A to get the correct CNL register from the ICL macro */
- cnl_set_procmon_ref_values(dev_priv, PORT_A);
+ cnl_set_procmon_ref_values(dev_priv, PHY_A);
val = I915_READ(CNL_PORT_COMP_DW0);
val |= COMP_INIT;
@@ -181,35 +180,39 @@ static void cnl_combo_phys_uninit(struct drm_i915_private *dev_priv)
}
static bool icl_combo_phy_enabled(struct drm_i915_private *dev_priv,
- enum port port)
+ enum phy phy)
{
- return !(I915_READ(ICL_PHY_MISC(port)) &
- ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN) &&
- (I915_READ(ICL_PORT_COMP_DW0(port)) & COMP_INIT);
+ /* The PHY C added by EHL has no PHY_MISC register */
+ if (IS_ELKHARTLAKE(dev_priv) && phy == PHY_C)
+ return I915_READ(ICL_PORT_COMP_DW0(phy)) & COMP_INIT;
+ else
+ return !(I915_READ(ICL_PHY_MISC(phy)) &
+ ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN) &&
+ (I915_READ(ICL_PORT_COMP_DW0(phy)) & COMP_INIT);
}
static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv,
- enum port port)
+ enum phy phy)
{
bool ret;
- if (!icl_combo_phy_enabled(dev_priv, port))
+ if (!icl_combo_phy_enabled(dev_priv, phy))
return false;
- ret = cnl_verify_procmon_ref_values(dev_priv, port);
+ ret = cnl_verify_procmon_ref_values(dev_priv, phy);
- if (port == PORT_A)
- ret &= check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW8(port),
+ if (phy == PHY_A)
+ ret &= check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW8(phy),
IREFGEN, IREFGEN);
- ret &= check_phy_reg(dev_priv, port, ICL_PORT_CL_DW5(port),
+ ret &= check_phy_reg(dev_priv, phy, ICL_PORT_CL_DW5(phy),
CL_POWER_DOWN_ENABLE, CL_POWER_DOWN_ENABLE);
return ret;
}
void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv,
- enum port port, bool is_dsi,
+ enum phy phy, bool is_dsi,
int lane_count, bool lane_reversal)
{
u8 lane_mask;
@@ -254,66 +257,120 @@ void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv,
}
}
- val = I915_READ(ICL_PORT_CL_DW10(port));
+ val = I915_READ(ICL_PORT_CL_DW10(phy));
val &= ~PWR_DOWN_LN_MASK;
val |= lane_mask << PWR_DOWN_LN_SHIFT;
- I915_WRITE(ICL_PORT_CL_DW10(port), val);
+ I915_WRITE(ICL_PORT_CL_DW10(phy), val);
+}
+
+static u32 ehl_combo_phy_a_mux(struct drm_i915_private *i915, u32 val)
+{
+ bool ddi_a_present = i915->vbt.ddi_port_info[PORT_A].child != NULL;
+ bool ddi_d_present = i915->vbt.ddi_port_info[PORT_D].child != NULL;
+ bool dsi_present = intel_bios_is_dsi_present(i915, NULL);
+
+ /*
+ * VBT's 'dvo port' field for child devices references the DDI, not
+ * the PHY. So if combo PHY A is wired up to drive an external
+ * display, we should see a child device present on PORT_D and
+ * nothing on PORT_A and no DSI.
+ */
+ if (ddi_d_present && !ddi_a_present && !dsi_present)
+ return val | ICL_PHY_MISC_MUX_DDID;
+
+ /*
+ * If we encounter a VBT that claims to have an external display on
+ * DDI-D _and_ an internal display on DDI-A/DSI leave an error message
+ * in the log and let the internal display win.
+ */
+ if (ddi_d_present)
+ DRM_ERROR("VBT claims to have both internal and external displays on PHY A. Configuring for internal.\n");
+
+ return val & ~ICL_PHY_MISC_MUX_DDID;
}
static void icl_combo_phys_init(struct drm_i915_private *dev_priv)
{
- enum port port;
+ enum phy phy;
- for_each_combo_port(dev_priv, port) {
+ for_each_combo_phy(dev_priv, phy) {
u32 val;
- if (icl_combo_phy_verify_state(dev_priv, port)) {
- DRM_DEBUG_DRIVER("Port %c combo PHY already enabled, won't reprogram it.\n",
- port_name(port));
+ if (icl_combo_phy_verify_state(dev_priv, phy)) {
+ DRM_DEBUG_DRIVER("Combo PHY %c already enabled, won't reprogram it.\n",
+ phy_name(phy));
continue;
}
- val = I915_READ(ICL_PHY_MISC(port));
+ /*
+ * Although EHL adds a combo PHY C, there's no PHY_MISC
+ * register for it and no need to program the
+ * DE_IO_COMP_PWR_DOWN setting on PHY C.
+ */
+ if (IS_ELKHARTLAKE(dev_priv) && phy == PHY_C)
+ goto skip_phy_misc;
+
+ /*
+ * EHL's combo PHY A can be hooked up to either an external
+ * display (via DDI-D) or an internal display (via DDI-A or
+ * the DSI DPHY). This is a motherboard design decision that
+ * can't be changed on the fly, so initialize the PHY's mux
+ * based on whether our VBT indicates the presence of any
+ * "internal" child devices.
+ */
+ val = I915_READ(ICL_PHY_MISC(phy));
+ if (IS_ELKHARTLAKE(dev_priv) && phy == PHY_A)
+ val = ehl_combo_phy_a_mux(dev_priv, val);
val &= ~ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
- I915_WRITE(ICL_PHY_MISC(port), val);
+ I915_WRITE(ICL_PHY_MISC(phy), val);
- cnl_set_procmon_ref_values(dev_priv, port);
+skip_phy_misc:
+ cnl_set_procmon_ref_values(dev_priv, phy);
- if (port == PORT_A) {
- val = I915_READ(ICL_PORT_COMP_DW8(port));
+ if (phy == PHY_A) {
+ val = I915_READ(ICL_PORT_COMP_DW8(phy));
val |= IREFGEN;
- I915_WRITE(ICL_PORT_COMP_DW8(port), val);
+ I915_WRITE(ICL_PORT_COMP_DW8(phy), val);
}
- val = I915_READ(ICL_PORT_COMP_DW0(port));
+ val = I915_READ(ICL_PORT_COMP_DW0(phy));
val |= COMP_INIT;
- I915_WRITE(ICL_PORT_COMP_DW0(port), val);
+ I915_WRITE(ICL_PORT_COMP_DW0(phy), val);
- val = I915_READ(ICL_PORT_CL_DW5(port));
+ val = I915_READ(ICL_PORT_CL_DW5(phy));
val |= CL_POWER_DOWN_ENABLE;
- I915_WRITE(ICL_PORT_CL_DW5(port), val);
+ I915_WRITE(ICL_PORT_CL_DW5(phy), val);
}
}
static void icl_combo_phys_uninit(struct drm_i915_private *dev_priv)
{
- enum port port;
+ enum phy phy;
- for_each_combo_port_reverse(dev_priv, port) {
+ for_each_combo_phy_reverse(dev_priv, phy) {
u32 val;
- if (port == PORT_A &&
- !icl_combo_phy_verify_state(dev_priv, port))
- DRM_WARN("Port %c combo PHY HW state changed unexpectedly\n",
- port_name(port));
+ if (phy == PHY_A &&
+ !icl_combo_phy_verify_state(dev_priv, phy))
+ DRM_WARN("Combo PHY %c HW state changed unexpectedly\n",
+ phy_name(phy));
+
+ /*
+ * Although EHL adds a combo PHY C, there's no PHY_MISC
+ * register for it and no need to program the
+ * DE_IO_COMP_PWR_DOWN setting on PHY C.
+ */
+ if (IS_ELKHARTLAKE(dev_priv) && phy == PHY_C)
+ goto skip_phy_misc;
- val = I915_READ(ICL_PHY_MISC(port));
+ val = I915_READ(ICL_PHY_MISC(phy));
val |= ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
- I915_WRITE(ICL_PHY_MISC(port), val);
+ I915_WRITE(ICL_PHY_MISC(phy), val);
- val = I915_READ(ICL_PORT_COMP_DW0(port));
+skip_phy_misc:
+ val = I915_READ(ICL_PORT_COMP_DW0(phy));
val &= ~COMP_INIT;
- I915_WRITE(ICL_PORT_COMP_DW0(port), val);
+ I915_WRITE(ICL_PORT_COMP_DW0(phy), val);
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.h b/drivers/gpu/drm/i915/display/intel_combo_phy.h
index e6e195a83b19..660886f86c59 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy.h
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy.h
@@ -7,14 +7,14 @@
#define __INTEL_COMBO_PHY_H__
#include <linux/types.h>
-#include <drm/i915_drm.h>
struct drm_i915_private;
+enum phy;
void intel_combo_phy_init(struct drm_i915_private *dev_priv);
void intel_combo_phy_uninit(struct drm_i915_private *dev_priv);
void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv,
- enum port port, bool is_dsi,
+ enum phy phy, bool is_dsi,
int lane_count, bool lane_reversal);
#endif /* __INTEL_COMBO_PHY_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c
index 41310f8e5a2a..1133c4e97bb4 100644
--- a/drivers/gpu/drm/i915/display/intel_connector.c
+++ b/drivers/gpu/drm/i915/display/intel_connector.c
@@ -33,7 +33,7 @@
#include "i915_drv.h"
#include "intel_connector.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_hdcp.h"
int intel_connector_init(struct intel_connector *connector)
@@ -118,7 +118,7 @@ int intel_connector_register(struct drm_connector *connector)
if (ret)
goto err;
- if (i915_inject_load_failure()) {
+ if (i915_inject_probe_failure(to_i915(connector->dev))) {
ret = -EFAULT;
goto err_backlight;
}
@@ -277,7 +277,22 @@ intel_attach_aspect_ratio_property(struct drm_connector *connector)
void
intel_attach_colorspace_property(struct drm_connector *connector)
{
- if (!drm_mode_create_colorspace_property(connector))
- drm_object_attach_property(&connector->base,
- connector->colorspace_property, 0);
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_HDMIA:
+ case DRM_MODE_CONNECTOR_HDMIB:
+ if (drm_mode_create_hdmi_colorspace_property(connector))
+ return;
+ break;
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ case DRM_MODE_CONNECTOR_eDP:
+ if (drm_mode_create_dp_colorspace_property(connector))
+ return;
+ break;
+ default:
+ DRM_DEBUG_KMS("Colorspace property not supported\n");
+ return;
+ }
+
+ drm_object_attach_property(&connector->base,
+ connector->colorspace_property, 0);
}
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index 3fcf2f84bcce..f976b800b245 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -38,7 +38,7 @@
#include "intel_connector.h"
#include "intel_crt.h"
#include "intel_ddi.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_fifo_underrun.h"
#include "intel_gmbus.h"
#include "intel_hotplug.h"
@@ -65,7 +65,7 @@ static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder)
return container_of(encoder, struct intel_crt, base);
}
-static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
+static struct intel_crt *intel_attached_crt(struct intel_connector *connector)
{
return intel_encoder_to_crt(intel_attached_encoder(connector));
}
@@ -132,9 +132,9 @@ static void intel_crt_get_config(struct intel_encoder *encoder,
{
pipe_config->output_types |= BIT(INTEL_OUTPUT_ANALOG);
- pipe_config->base.adjusted_mode.flags |= intel_crt_get_flags(encoder);
+ pipe_config->hw.adjusted_mode.flags |= intel_crt_get_flags(encoder);
- pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
+ pipe_config->hw.adjusted_mode.crtc_clock = pipe_config->port_clock;
}
static void hsw_crt_get_config(struct intel_encoder *encoder,
@@ -144,13 +144,13 @@ static void hsw_crt_get_config(struct intel_encoder *encoder,
intel_ddi_get_config(encoder, pipe_config);
- pipe_config->base.adjusted_mode.flags &= ~(DRM_MODE_FLAG_PHSYNC |
+ pipe_config->hw.adjusted_mode.flags &= ~(DRM_MODE_FLAG_PHSYNC |
DRM_MODE_FLAG_NHSYNC |
DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_NVSYNC);
- pipe_config->base.adjusted_mode.flags |= intel_crt_get_flags(encoder);
+ pipe_config->hw.adjusted_mode.flags |= intel_crt_get_flags(encoder);
- pipe_config->base.adjusted_mode.crtc_clock = lpt_get_iclkip(dev_priv);
+ pipe_config->hw.adjusted_mode.crtc_clock = lpt_get_iclkip(dev_priv);
}
/* Note: The caller is required to filter out dpms modes not supported by the
@@ -161,8 +161,8 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crt *crt = intel_encoder_to_crt(encoder);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
u32 adpa;
if (INTEL_GEN(dev_priv) >= 5)
@@ -241,6 +241,14 @@ static void hsw_post_disable_crt(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ intel_crtc_vblank_off(old_crtc_state);
+
+ intel_disable_pipe(old_crtc_state);
+
+ intel_ddi_disable_transcoder_func(old_crtc_state);
+
+ ilk_pfit_disable(old_crtc_state);
+
intel_ddi_disable_pipe_clock(old_crtc_state);
pch_post_disable_crt(encoder, old_crtc_state, old_conn_state);
@@ -271,14 +279,14 @@ static void hsw_pre_enable_crt(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum pipe pipe = crtc->pipe;
WARN_ON(!crtc_state->has_pch_encoder);
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
- dev_priv->display.fdi_link_train(crtc, crtc_state);
+ hsw_fdi_link_train(encoder, crtc_state);
intel_ddi_enable_pipe_clock(crtc_state);
}
@@ -288,7 +296,7 @@ static void hsw_enable_crt(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum pipe pipe = crtc->pipe;
WARN_ON(!crtc_state->has_pch_encoder);
@@ -343,7 +351,7 @@ intel_crt_mode_valid(struct drm_connector *connector,
/* The FDI receiver on LPT only supports 8bpc and only has 2 lanes. */
if (HAS_PCH_LPT(dev_priv) &&
- (ironlake_get_lanes_required(mode->clock, 270000, 24) > 2))
+ ilk_get_lanes_required(mode->clock, 270000, 24) > 2)
return MODE_CLOCK_HIGH;
/* HSW/BDW FDI limited to 4k */
@@ -358,7 +366,7 @@ static int intel_crt_compute_config(struct intel_encoder *encoder,
struct drm_connector_state *conn_state)
{
struct drm_display_mode *adjusted_mode =
- &pipe_config->base.adjusted_mode;
+ &pipe_config->hw.adjusted_mode;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
@@ -373,7 +381,7 @@ static int pch_crt_compute_config(struct intel_encoder *encoder,
struct drm_connector_state *conn_state)
{
struct drm_display_mode *adjusted_mode =
- &pipe_config->base.adjusted_mode;
+ &pipe_config->hw.adjusted_mode;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
@@ -390,7 +398,7 @@ static int hsw_crt_compute_config(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct drm_display_mode *adjusted_mode =
- &pipe_config->base.adjusted_mode;
+ &pipe_config->hw.adjusted_mode;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
@@ -419,10 +427,10 @@ static int hsw_crt_compute_config(struct intel_encoder *encoder,
return 0;
}
-static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
+static bool ilk_crt_detect_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct intel_crt *crt = intel_attached_crt(connector);
+ struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
struct drm_i915_private *dev_priv = to_i915(dev);
u32 adpa;
bool ret;
@@ -432,7 +440,7 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
bool turn_off_dac = HAS_PCH_SPLIT(dev_priv);
u32 save_adpa;
- crt->force_hotplug_required = 0;
+ crt->force_hotplug_required = false;
save_adpa = adpa = I915_READ(crt->adpa_reg);
DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
@@ -443,9 +451,9 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
I915_WRITE(crt->adpa_reg, adpa);
- if (intel_wait_for_register(&dev_priv->uncore,
+ if (intel_de_wait_for_clear(dev_priv,
crt->adpa_reg,
- ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 0,
+ ADPA_CRT_HOTPLUG_FORCE_TRIGGER,
1000))
DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
@@ -469,7 +477,7 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct intel_crt *crt = intel_attached_crt(connector);
+ struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
struct drm_i915_private *dev_priv = to_i915(dev);
bool reenable_hpd;
u32 adpa;
@@ -497,10 +505,8 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
I915_WRITE(crt->adpa_reg, adpa);
- if (intel_wait_for_register(&dev_priv->uncore,
- crt->adpa_reg,
- ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 0,
- 1000)) {
+ if (intel_de_wait_for_clear(dev_priv, crt->adpa_reg,
+ ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 1000)) {
DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
I915_WRITE(crt->adpa_reg, save_adpa);
}
@@ -529,7 +535,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
int i, tries = 0;
if (HAS_PCH_SPLIT(dev_priv))
- return intel_ironlake_crt_detect_hotplug(connector);
+ return ilk_crt_detect_hotplug(connector);
if (IS_VALLEYVIEW(dev_priv))
return valleyview_crt_detect_hotplug(connector);
@@ -550,9 +556,8 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
CRT_HOTPLUG_FORCE_DETECT,
CRT_HOTPLUG_FORCE_DETECT);
/* wait for FORCE_DETECT to go off */
- if (intel_wait_for_register(&dev_priv->uncore, PORT_HOTPLUG_EN,
- CRT_HOTPLUG_FORCE_DETECT, 0,
- 1000))
+ if (intel_de_wait_for_clear(dev_priv, PORT_HOTPLUG_EN,
+ CRT_HOTPLUG_FORCE_DETECT, 1000))
DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off");
}
@@ -604,7 +609,7 @@ static int intel_crt_ddc_get_modes(struct drm_connector *connector,
static bool intel_crt_detect_ddc(struct drm_connector *connector)
{
- struct intel_crt *crt = intel_attached_crt(connector);
+ struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
struct drm_i915_private *dev_priv = to_i915(crt->base.base.dev);
struct edid *edid;
struct i2c_adapter *i2c;
@@ -790,7 +795,7 @@ intel_crt_detect(struct drm_connector *connector,
bool force)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
- struct intel_crt *crt = intel_attached_crt(connector);
+ struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
struct intel_encoder *intel_encoder = &crt->base;
intel_wakeref_t wakeref;
int status, ret;
@@ -847,7 +852,7 @@ load_detect:
}
/* for pre-945g platforms use load detect */
- ret = intel_get_load_detect_pipe(connector, NULL, &tmp, ctx);
+ ret = intel_get_load_detect_pipe(connector, &tmp, ctx);
if (ret > 0) {
if (intel_crt_detect_ddc(connector))
status = connector_status_connected;
@@ -867,6 +872,13 @@ load_detect:
out:
intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref);
+
+ /*
+ * Make sure the refs for power wells enabled during detect are
+ * dropped to avoid a new detect cycle triggered by HPD polling.
+ */
+ intel_display_power_flush_work(dev_priv);
+
return status;
}
@@ -874,7 +886,7 @@ static int intel_crt_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crt *crt = intel_attached_crt(connector);
+ struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
struct intel_encoder *intel_encoder = &crt->base;
intel_wakeref_t wakeref;
struct i2c_adapter *i2c;
@@ -913,7 +925,7 @@ void intel_crt_reset(struct drm_encoder *encoder)
POSTING_READ(crt->adpa_reg);
DRM_DEBUG_KMS("crt adpa set to 0x%x\n", adpa);
- crt->force_hotplug_required = 1;
+ crt->force_hotplug_required = true;
}
}
@@ -997,9 +1009,9 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
crt->base.type = INTEL_OUTPUT_ANALOG;
crt->base.cloneable = (1 << INTEL_OUTPUT_DVO) | (1 << INTEL_OUTPUT_HDMI);
if (IS_I830(dev_priv))
- crt->base.crtc_mask = (1 << 0);
+ crt->base.pipe_mask = BIT(PIPE_A);
else
- crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ crt->base.pipe_mask = ~0;
if (IS_GEN(dev_priv, 2))
connector->interlace_allowed = 0;
@@ -1051,7 +1063,7 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
/*
* Configure the automatic hotplug detection stuff
*/
- crt->force_hotplug_required = 0;
+ crt->force_hotplug_required = false;
/*
* TODO: find a proper way to discover whether we need to set the the
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 7925a176f900..33f1dc3d7c1a 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -32,10 +32,11 @@
#include "intel_combo_phy.h"
#include "intel_connector.h"
#include "intel_ddi.h"
+#include "intel_display_types.h"
#include "intel_dp.h"
+#include "intel_dp_mst.h"
#include "intel_dp_link_training.h"
#include "intel_dpio_phy.h"
-#include "intel_drv.h"
#include "intel_dsi.h"
#include "intel_fifo_underrun.h"
#include "intel_gmbus.h"
@@ -45,6 +46,8 @@
#include "intel_lspcon.h"
#include "intel_panel.h"
#include "intel_psr.h"
+#include "intel_sprite.h"
+#include "intel_tc.h"
#include "intel_vdsc.h"
struct ddi_buf_trans {
@@ -585,6 +588,40 @@ static const struct icl_mg_phy_ddi_buf_trans icl_mg_phy_ddi_translations[] = {
{ 0x0, 0x00, 0x00 }, /* 3 0 */
};
+struct tgl_dkl_phy_ddi_buf_trans {
+ u32 dkl_vswing_control;
+ u32 dkl_preshoot_control;
+ u32 dkl_de_emphasis_control;
+};
+
+static const struct tgl_dkl_phy_ddi_buf_trans tgl_dkl_phy_dp_ddi_trans[] = {
+ /* VS pre-emp Non-trans mV Pre-emph dB */
+ { 0x7, 0x0, 0x00 }, /* 0 0 400mV 0 dB */
+ { 0x5, 0x0, 0x03 }, /* 0 1 400mV 3.5 dB */
+ { 0x2, 0x0, 0x0b }, /* 0 2 400mV 6 dB */
+ { 0x0, 0x0, 0x19 }, /* 0 3 400mV 9.5 dB */
+ { 0x5, 0x0, 0x00 }, /* 1 0 600mV 0 dB */
+ { 0x2, 0x0, 0x03 }, /* 1 1 600mV 3.5 dB */
+ { 0x0, 0x0, 0x14 }, /* 1 2 600mV 6 dB */
+ { 0x2, 0x0, 0x00 }, /* 2 0 800mV 0 dB */
+ { 0x0, 0x0, 0x0B }, /* 2 1 800mV 3.5 dB */
+ { 0x0, 0x0, 0x00 }, /* 3 0 1200mV 0 dB HDMI default */
+};
+
+static const struct tgl_dkl_phy_ddi_buf_trans tgl_dkl_phy_hdmi_ddi_trans[] = {
+ /* HDMI Preset VS Pre-emph */
+ { 0x7, 0x0, 0x0 }, /* 1 400mV 0dB */
+ { 0x6, 0x0, 0x0 }, /* 2 500mV 0dB */
+ { 0x4, 0x0, 0x0 }, /* 3 650mV 0dB */
+ { 0x2, 0x0, 0x0 }, /* 4 800mV 0dB */
+ { 0x0, 0x0, 0x0 }, /* 5 1000mV 0dB */
+ { 0x0, 0x0, 0x5 }, /* 6 Full -1.5 dB */
+ { 0x0, 0x0, 0x6 }, /* 7 Full -1.8 dB */
+ { 0x0, 0x0, 0x7 }, /* 8 Full -2 dB */
+ { 0x0, 0x0, 0x8 }, /* 9 Full -2.5 dB */
+ { 0x0, 0x0, 0xA }, /* 10 Full -3 dB */
+};
+
static const struct ddi_buf_trans *
bdw_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
{
@@ -846,8 +883,8 @@ cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
}
static const struct cnl_ddi_buf_trans *
-icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, enum port port,
- int type, int rate, int *n_entries)
+icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
+ int *n_entries)
{
if (type == INTEL_OUTPUT_HDMI) {
*n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi);
@@ -866,13 +903,20 @@ icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, enum port port,
static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port)
{
+ struct ddi_vbt_port_info *port_info = &dev_priv->vbt.ddi_port_info[port];
int n_entries, level, default_entry;
+ enum phy phy = intel_port_to_phy(dev_priv, port);
- level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
-
- if (INTEL_GEN(dev_priv) >= 11) {
- if (intel_port_is_combophy(dev_priv, port))
- icl_get_combo_buf_trans(dev_priv, port, INTEL_OUTPUT_HDMI,
+ if (INTEL_GEN(dev_priv) >= 12) {
+ if (intel_phy_is_combo(dev_priv, phy))
+ icl_get_combo_buf_trans(dev_priv, INTEL_OUTPUT_HDMI,
+ 0, &n_entries);
+ else
+ n_entries = ARRAY_SIZE(tgl_dkl_phy_hdmi_ddi_trans);
+ default_entry = n_entries - 1;
+ } else if (INTEL_GEN(dev_priv) == 11) {
+ if (intel_phy_is_combo(dev_priv, phy))
+ icl_get_combo_buf_trans(dev_priv, INTEL_OUTPUT_HDMI,
0, &n_entries);
else
n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations);
@@ -897,12 +941,14 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por
return 0;
}
- /* Choose a good default if VBT is badly populated */
- if (level == HDMI_LEVEL_SHIFT_UNKNOWN || level >= n_entries)
- level = default_entry;
-
if (WARN_ON_ONCE(n_entries == 0))
return 0;
+
+ if (port_info->hdmi_level_shift_set)
+ level = port_info->hdmi_level_shift;
+ else
+ level = default_entry;
+
if (WARN_ON_ONCE(level >= n_entries))
level = n_entries - 1;
@@ -1047,6 +1093,8 @@ static u32 icl_pll_to_ddi_clk_sel(struct intel_encoder *encoder,
case DPLL_ID_ICL_MGPLL2:
case DPLL_ID_ICL_MGPLL3:
case DPLL_ID_ICL_MGPLL4:
+ case DPLL_ID_TGL_MGPLL5:
+ case DPLL_ID_TGL_MGPLL6:
return DDI_CLK_SEL_MG;
}
}
@@ -1060,18 +1108,14 @@ static u32 icl_pll_to_ddi_clk_sel(struct intel_encoder *encoder,
* DDI A (which is used for eDP)
*/
-void hsw_fdi_link_train(struct intel_crtc *crtc,
+void hsw_fdi_link_train(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_encoder *encoder;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 temp, i, rx_ctl_val, ddi_pll_sel;
- for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
- WARN_ON(encoder->type != INTEL_OUTPUT_ANALOG);
- intel_prepare_dp_ddi_buffers(encoder, crtc_state);
- }
+ intel_prepare_dp_ddi_buffers(encoder, crtc_state);
/* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
* mode set "sequence for CRT port" document:
@@ -1194,9 +1238,9 @@ void hsw_fdi_link_train(struct intel_crtc *crtc,
static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_digital_port *intel_dig_port =
- enc_to_dig_port(&encoder->base);
+ enc_to_dig_port(encoder);
intel_dp->DP = intel_dig_port->saved_port_bits |
DDI_BUF_CTL_ENABLE | DDI_BUF_TRANS_SELECT(0);
@@ -1411,11 +1455,30 @@ static int icl_calc_mg_pll_link(struct drm_i915_private *dev_priv,
ref_clock = dev_priv->cdclk.hw.ref;
- m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
- m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
- m2_frac = (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) ?
- (pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_FRAC_MASK) >>
- MG_PLL_DIV0_FBDIV_FRAC_SHIFT : 0;
+ if (INTEL_GEN(dev_priv) >= 12) {
+ m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
+ m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
+ m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
+
+ if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
+ m2_frac = pll_state->mg_pll_bias &
+ DKL_PLL_BIAS_FBDIV_FRAC_MASK;
+ m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
+ } else {
+ m2_frac = 0;
+ }
+ } else {
+ m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
+ m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
+
+ if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
+ m2_frac = pll_state->mg_pll_div0 &
+ MG_PLL_DIV0_FBDIV_FRAC_MASK;
+ m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
+ } else {
+ m2_frac = 0;
+ }
+ }
switch (pll_state->mg_clktop2_hsclkctl &
MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
@@ -1465,8 +1528,8 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
else if (intel_crtc_has_dp_encoder(pipe_config))
dotclock = intel_dotclock_calculate(pipe_config->port_clock,
&pipe_config->dp_m_n);
- else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp == 36)
- dotclock = pipe_config->port_clock * 2 / 3;
+ else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp > 24)
+ dotclock = pipe_config->port_clock * 24 / pipe_config->pipe_bpp;
else
dotclock = pipe_config->port_clock;
@@ -1477,7 +1540,7 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
if (pipe_config->pixel_multiplier)
dotclock /= pipe_config->pixel_multiplier;
- pipe_config->base.adjusted_mode.crtc_clock = dotclock;
+ pipe_config->hw.adjusted_mode.crtc_clock = dotclock;
}
static void icl_ddi_clock_get(struct intel_encoder *encoder,
@@ -1486,9 +1549,10 @@ static void icl_ddi_clock_get(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dpll_hw_state *pll_state = &pipe_config->dpll_hw_state;
enum port port = encoder->port;
+ enum phy phy = intel_port_to_phy(dev_priv, port);
int link_clock;
- if (intel_port_is_combophy(dev_priv, port)) {
+ if (intel_phy_is_combo(dev_priv, phy)) {
link_clock = cnl_calc_wrpll_link(dev_priv, pll_state);
} else {
enum intel_dpll_id pll_id = intel_get_shared_dpll_id(dev_priv,
@@ -1689,9 +1753,10 @@ static void intel_ddi_clock_get(struct intel_encoder *encoder,
hsw_ddi_clock_get(encoder, pipe_config);
}
-void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
+void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 temp;
@@ -1701,66 +1766,63 @@ void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
WARN_ON(transcoder_is_dsi(cpu_transcoder));
- temp = TRANS_MSA_SYNC_CLK;
-
- if (crtc_state->limited_color_range)
- temp |= TRANS_MSA_CEA_RANGE;
+ temp = DP_MSA_MISC_SYNC_CLOCK;
switch (crtc_state->pipe_bpp) {
case 18:
- temp |= TRANS_MSA_6_BPC;
+ temp |= DP_MSA_MISC_6_BPC;
break;
case 24:
- temp |= TRANS_MSA_8_BPC;
+ temp |= DP_MSA_MISC_8_BPC;
break;
case 30:
- temp |= TRANS_MSA_10_BPC;
+ temp |= DP_MSA_MISC_10_BPC;
break;
case 36:
- temp |= TRANS_MSA_12_BPC;
+ temp |= DP_MSA_MISC_12_BPC;
break;
default:
MISSING_CASE(crtc_state->pipe_bpp);
break;
}
+ /* nonsense combination */
+ WARN_ON(crtc_state->limited_color_range &&
+ crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
+
+ if (crtc_state->limited_color_range)
+ temp |= DP_MSA_MISC_COLOR_CEA_RGB;
+
/*
* As per DP 1.2 spec section 2.3.4.3 while sending
* YCBCR 444 signals we should program MSA MISC1/0 fields with
- * colorspace information. The output colorspace encoding is BT601.
+ * colorspace information.
*/
if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
- temp |= TRANS_MSA_SAMPLING_444 | TRANS_MSA_CLRSP_YCBCR;
+ temp |= DP_MSA_MISC_COLOR_YCBCR_444_BT709;
+
/*
* As per DP 1.4a spec section 2.2.4.3 [MSA Field for Indication
* of Color Encoding Format and Content Color Gamut] while sending
- * YCBCR 420 signals we should program MSA MISC1 fields which
- * indicate VSC SDP for the Pixel Encoding/Colorimetry Format.
+ * YCBCR 420, HDR BT.2020 signals we should program MSA MISC1 fields
+ * which indicate VSC SDP for the Pixel Encoding/Colorimetry Format.
*/
- if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
- temp |= TRANS_MSA_USE_VSC_SDP;
- I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp);
-}
+ if (intel_dp_needs_vsc_sdp(crtc_state, conn_state))
+ temp |= DP_MSA_MISC_COLOR_VSC_SDP;
-void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
- bool state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- u32 temp;
-
- temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
- if (state == true)
- temp |= TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
- else
- temp &= ~TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
- I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
+ I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp);
}
-void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
+/*
+ * Returns the TRANS_DDI_FUNC_CTL value based on CRTC state.
+ *
+ * Only intended to be used by intel_ddi_enable_transcoder_func() and
+ * intel_ddi_config_transcoder_func().
+ */
+static u32
+intel_ddi_transcoder_func_reg_val_get(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_encoder *encoder = intel_ddi_get_crtc_encoder(crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
@@ -1770,7 +1832,10 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
/* Enable TRANS_DDI_FUNC_CTL for the pipe to work in HDMI mode */
temp = TRANS_DDI_FUNC_ENABLE;
- temp |= TRANS_DDI_SELECT_PORT(port);
+ if (INTEL_GEN(dev_priv) >= 12)
+ temp |= TGL_TRANS_DDI_SELECT_PORT(port);
+ else
+ temp |= TRANS_DDI_SELECT_PORT(port);
switch (crtc_state->pipe_bpp) {
case 18:
@@ -1789,9 +1854,9 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
BUG();
}
- if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_PVSYNC)
+ if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_PVSYNC)
temp |= TRANS_DDI_PVSYNC;
- if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_PHSYNC)
+ if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_PHSYNC)
temp |= TRANS_DDI_PHSYNC;
if (cpu_transcoder == TRANSCODER_EDP) {
@@ -1834,25 +1899,69 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) {
temp |= TRANS_DDI_MODE_SELECT_DP_MST;
temp |= DDI_PORT_WIDTH(crtc_state->lane_count);
+
+ if (INTEL_GEN(dev_priv) >= 12) {
+ enum transcoder master;
+
+ master = crtc_state->mst_master_transcoder;
+ WARN_ON(master == INVALID_TRANSCODER);
+ temp |= TRANS_DDI_MST_TRANSPORT_SELECT(master);
+ }
} else {
temp |= TRANS_DDI_MODE_SELECT_DP_SST;
temp |= DDI_PORT_WIDTH(crtc_state->lane_count);
}
+ return temp;
+}
+
+void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ u32 temp;
+
+ temp = intel_ddi_transcoder_func_reg_val_get(crtc_state);
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST))
+ temp |= TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
+ I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
+}
+
+/*
+ * Same as intel_ddi_enable_transcoder_func(), but it does not set the enable
+ * bit.
+ */
+static void
+intel_ddi_config_transcoder_func(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ u32 temp;
+
+ temp = intel_ddi_transcoder_func_reg_val_get(crtc_state);
+ temp &= ~TRANS_DDI_FUNC_ENABLE;
I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
}
void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- i915_reg_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
- u32 val = I915_READ(reg);
+ u32 val;
- val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK | TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
- val |= TRANS_DDI_PORT_NONE;
- I915_WRITE(reg, val);
+ val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+ val &= ~TRANS_DDI_FUNC_ENABLE;
+
+ if (INTEL_GEN(dev_priv) >= 12) {
+ if (!intel_dp_mst_is_master_trans(crtc_state))
+ val &= ~TGL_TRANS_DDI_PORT_MASK;
+ } else {
+ val &= ~TRANS_DDI_PORT_MASK;
+ }
+ I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), val);
if (dev_priv->quirks & QUIRK_INCREASE_DDI_DISABLED_TIME &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
@@ -2003,10 +2112,27 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
mst_pipe_mask = 0;
for_each_pipe(dev_priv, p) {
enum transcoder cpu_transcoder = (enum transcoder)p;
+ unsigned int port_mask, ddi_select;
+ intel_wakeref_t trans_wakeref;
+
+ trans_wakeref = intel_display_power_get_if_enabled(dev_priv,
+ POWER_DOMAIN_TRANSCODER(cpu_transcoder));
+ if (!trans_wakeref)
+ continue;
+
+ if (INTEL_GEN(dev_priv) >= 12) {
+ port_mask = TGL_TRANS_DDI_PORT_MASK;
+ ddi_select = TGL_TRANS_DDI_SELECT_PORT(port);
+ } else {
+ port_mask = TRANS_DDI_PORT_MASK;
+ ddi_select = TRANS_DDI_SELECT_PORT(port);
+ }
tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+ intel_display_power_put(dev_priv, POWER_DOMAIN_TRANSCODER(cpu_transcoder),
+ trans_wakeref);
- if ((tmp & TRANS_DDI_PORT_MASK) != TRANS_DDI_SELECT_PORT(port))
+ if ((tmp & port_mask) != ddi_select)
continue;
if ((tmp & TRANS_DDI_MODE_SELECT_MASK) ==
@@ -2017,18 +2143,20 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
}
if (!*pipe_mask)
- DRM_DEBUG_KMS("No pipe for ddi port %c found\n",
- port_name(port));
+ DRM_DEBUG_KMS("No pipe for [ENCODER:%d:%s] found\n",
+ encoder->base.base.id, encoder->base.name);
if (!mst_pipe_mask && hweight8(*pipe_mask) > 1) {
- DRM_DEBUG_KMS("Multiple pipes for non DP-MST port %c (pipe_mask %02x)\n",
- port_name(port), *pipe_mask);
+ DRM_DEBUG_KMS("Multiple pipes for [ENCODER:%d:%s] (pipe_mask %02x)\n",
+ encoder->base.base.id, encoder->base.name,
+ *pipe_mask);
*pipe_mask = BIT(ffs(*pipe_mask) - 1);
}
if (mst_pipe_mask && mst_pipe_mask != *pipe_mask)
- DRM_DEBUG_KMS("Conflicting MST and non-MST encoders for port %c (pipe_mask %02x mst_pipe_mask %02x)\n",
- port_name(port), *pipe_mask, mst_pipe_mask);
+ DRM_DEBUG_KMS("Conflicting MST and non-MST state for [ENCODER:%d:%s] (pipe_mask %02x mst_pipe_mask %02x)\n",
+ encoder->base.base.id, encoder->base.name,
+ *pipe_mask, mst_pipe_mask);
else
*is_dp_mst = mst_pipe_mask;
@@ -2038,8 +2166,9 @@ out:
if ((tmp & (BXT_PHY_CMNLANE_POWERDOWN_ACK |
BXT_PHY_LANE_POWERDOWN_ACK |
BXT_PHY_LANE_ENABLED)) != BXT_PHY_LANE_ENABLED)
- DRM_ERROR("Port %c enabled but PHY powered down? "
- "(PHY_CTL %08x)\n", port_name(port), tmp);
+ DRM_ERROR("[ENCODER:%d:%s] enabled but PHY powered down? "
+ "(PHY_CTL %08x)\n", encoder->base.base.id,
+ encoder->base.name, tmp);
}
intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
@@ -2085,6 +2214,7 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port;
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
/*
* TODO: Add support for MST encoders. Atm, the following should never
@@ -2094,7 +2224,7 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
if (WARN_ON(intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)))
return;
- dig_port = enc_to_dig_port(&encoder->base);
+ dig_port = enc_to_dig_port(encoder);
intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
/*
@@ -2102,39 +2232,49 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
* ports.
*/
if (intel_crtc_has_dp_encoder(crtc_state) ||
- intel_port_is_tc(dev_priv, encoder->port))
+ intel_phy_is_tc(dev_priv, phy))
intel_display_power_get(dev_priv,
intel_ddi_main_link_aux_domain(dig_port));
/*
* VDSC power is needed when DSC is enabled
*/
- if (crtc_state->dsc_params.compression_enable)
+ if (crtc_state->dsc.compression_enable)
intel_display_power_get(dev_priv,
intel_dsc_power_domain(crtc_state));
}
void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_encoder *encoder = intel_ddi_get_crtc_encoder(crtc);
enum port port = encoder->port;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- if (cpu_transcoder != TRANSCODER_EDP)
- I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
- TRANS_CLK_SEL_PORT(port));
+ if (cpu_transcoder != TRANSCODER_EDP) {
+ if (INTEL_GEN(dev_priv) >= 12)
+ I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
+ TGL_TRANS_CLK_SEL_PORT(port));
+ else
+ I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
+ TRANS_CLK_SEL_PORT(port));
+ }
}
void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- if (cpu_transcoder != TRANSCODER_EDP)
- I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
- TRANS_CLK_SEL_DISABLED);
+ if (cpu_transcoder != TRANSCODER_EDP) {
+ if (INTEL_GEN(dev_priv) >= 12)
+ I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
+ TGL_TRANS_CLK_SEL_DISABLED);
+ else
+ I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
+ TRANS_CLK_SEL_DISABLED);
+ }
}
static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
@@ -2154,7 +2294,7 @@ static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
static void skl_ddi_set_iboost(struct intel_encoder *encoder,
int level, enum intel_output_type type)
{
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
u8 iboost;
@@ -2225,13 +2365,20 @@ static void bxt_ddi_vswing_sequence(struct intel_encoder *encoder,
u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
enum port port = encoder->port;
+ enum phy phy = intel_port_to_phy(dev_priv, port);
int n_entries;
- if (INTEL_GEN(dev_priv) >= 11) {
- if (intel_port_is_combophy(dev_priv, port))
- icl_get_combo_buf_trans(dev_priv, port, encoder->type,
+ if (INTEL_GEN(dev_priv) >= 12) {
+ if (intel_phy_is_combo(dev_priv, phy))
+ icl_get_combo_buf_trans(dev_priv, encoder->type,
+ intel_dp->link_rate, &n_entries);
+ else
+ n_entries = ARRAY_SIZE(tgl_dkl_phy_dp_ddi_trans);
+ } else if (INTEL_GEN(dev_priv) == 11) {
+ if (intel_phy_is_combo(dev_priv, phy))
+ icl_get_combo_buf_trans(dev_priv, encoder->type,
intel_dp->link_rate, &n_entries);
else
n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations);
@@ -2357,7 +2504,7 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder,
width = 4;
rate = 0; /* Rate is always < than 6GHz for HDMI */
} else {
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
width = intel_dp->lane_count;
rate = intel_dp->link_rate;
@@ -2413,15 +2560,15 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder,
}
static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
- u32 level, enum port port, int type,
+ u32 level, enum phy phy, int type,
int rate)
{
const struct cnl_ddi_buf_trans *ddi_translations = NULL;
u32 n_entries, val;
int ln;
- ddi_translations = icl_get_combo_buf_trans(dev_priv, port, type,
- rate, &n_entries);
+ ddi_translations = icl_get_combo_buf_trans(dev_priv, type, rate,
+ &n_entries);
if (!ddi_translations)
return;
@@ -2431,41 +2578,41 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
}
/* Set PORT_TX_DW5 */
- val = I915_READ(ICL_PORT_TX_DW5_LN0(port));
+ val = I915_READ(ICL_PORT_TX_DW5_LN0(phy));
val &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK |
TAP2_DISABLE | TAP3_DISABLE);
val |= SCALING_MODE_SEL(0x2);
val |= RTERM_SELECT(0x6);
val |= TAP3_DISABLE;
- I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val);
+ I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), val);
/* Program PORT_TX_DW2 */
- val = I915_READ(ICL_PORT_TX_DW2_LN0(port));
+ val = I915_READ(ICL_PORT_TX_DW2_LN0(phy));
val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
RCOMP_SCALAR_MASK);
val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_sel);
val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_sel);
/* Program Rcomp scalar for every table entry */
val |= RCOMP_SCALAR(0x98);
- I915_WRITE(ICL_PORT_TX_DW2_GRP(port), val);
+ I915_WRITE(ICL_PORT_TX_DW2_GRP(phy), val);
/* Program PORT_TX_DW4 */
/* We cannot write to GRP. It would overwrite individual loadgen. */
for (ln = 0; ln <= 3; ln++) {
- val = I915_READ(ICL_PORT_TX_DW4_LN(ln, port));
+ val = I915_READ(ICL_PORT_TX_DW4_LN(ln, phy));
val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
CURSOR_COEFF_MASK);
val |= POST_CURSOR_1(ddi_translations[level].dw4_post_cursor_1);
val |= POST_CURSOR_2(ddi_translations[level].dw4_post_cursor_2);
val |= CURSOR_COEFF(ddi_translations[level].dw4_cursor_coeff);
- I915_WRITE(ICL_PORT_TX_DW4_LN(ln, port), val);
+ I915_WRITE(ICL_PORT_TX_DW4_LN(ln, phy), val);
}
/* Program PORT_TX_DW7 */
- val = I915_READ(ICL_PORT_TX_DW7_LN0(port));
+ val = I915_READ(ICL_PORT_TX_DW7_LN0(phy));
val &= ~N_SCALAR_MASK;
val |= N_SCALAR(ddi_translations[level].dw7_n_scalar);
- I915_WRITE(ICL_PORT_TX_DW7_GRP(port), val);
+ I915_WRITE(ICL_PORT_TX_DW7_GRP(phy), val);
}
static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
@@ -2473,7 +2620,7 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
enum intel_output_type type)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = encoder->port;
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
int width = 0;
int rate = 0;
u32 val;
@@ -2483,7 +2630,7 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
width = 4;
/* Rate is always < than 6GHz for HDMI */
} else {
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
width = intel_dp->lane_count;
rate = intel_dp->link_rate;
@@ -2494,12 +2641,12 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
* set PORT_PCS_DW1 cmnkeeper_enable to 1b,
* else clear to 0b.
*/
- val = I915_READ(ICL_PORT_PCS_DW1_LN0(port));
+ val = I915_READ(ICL_PORT_PCS_DW1_LN0(phy));
if (type == INTEL_OUTPUT_HDMI)
val &= ~COMMON_KEEPER_EN;
else
val |= COMMON_KEEPER_EN;
- I915_WRITE(ICL_PORT_PCS_DW1_GRP(port), val);
+ I915_WRITE(ICL_PORT_PCS_DW1_GRP(phy), val);
/* 2. Program loadgen select */
/*
@@ -2509,33 +2656,33 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
* > 6 GHz (LN0=0, LN1=0, LN2=0, LN3=0)
*/
for (ln = 0; ln <= 3; ln++) {
- val = I915_READ(ICL_PORT_TX_DW4_LN(ln, port));
+ val = I915_READ(ICL_PORT_TX_DW4_LN(ln, phy));
val &= ~LOADGEN_SELECT;
if ((rate <= 600000 && width == 4 && ln >= 1) ||
(rate <= 600000 && width < 4 && (ln == 1 || ln == 2))) {
val |= LOADGEN_SELECT;
}
- I915_WRITE(ICL_PORT_TX_DW4_LN(ln, port), val);
+ I915_WRITE(ICL_PORT_TX_DW4_LN(ln, phy), val);
}
/* 3. Set PORT_CL_DW5 SUS Clock Config to 11b */
- val = I915_READ(ICL_PORT_CL_DW5(port));
+ val = I915_READ(ICL_PORT_CL_DW5(phy));
val |= SUS_CLOCK_CONFIG;
- I915_WRITE(ICL_PORT_CL_DW5(port), val);
+ I915_WRITE(ICL_PORT_CL_DW5(phy), val);
/* 4. Clear training enable to change swing values */
- val = I915_READ(ICL_PORT_TX_DW5_LN0(port));
+ val = I915_READ(ICL_PORT_TX_DW5_LN0(phy));
val &= ~TX_TRAINING_EN;
- I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val);
+ I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), val);
/* 5. Program swing and de-emphasis */
- icl_ddi_combo_vswing_program(dev_priv, level, port, type, rate);
+ icl_ddi_combo_vswing_program(dev_priv, level, phy, type, rate);
/* 6. Set training enable to trigger update */
- val = I915_READ(ICL_PORT_TX_DW5_LN0(port));
+ val = I915_READ(ICL_PORT_TX_DW5_LN0(phy));
val |= TX_TRAINING_EN;
- I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val);
+ I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), val);
}
static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
@@ -2543,7 +2690,7 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
u32 level)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = encoder->port;
+ enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port);
const struct icl_mg_phy_ddi_buf_trans *ddi_translations;
u32 n_entries, val;
int ln;
@@ -2559,33 +2706,33 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
/* Set MG_TX_LINK_PARAMS cri_use_fs32 to 0. */
for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_TX1_LINK_PARAMS(ln, port));
+ val = I915_READ(MG_TX1_LINK_PARAMS(ln, tc_port));
val &= ~CRI_USE_FS32;
- I915_WRITE(MG_TX1_LINK_PARAMS(ln, port), val);
+ I915_WRITE(MG_TX1_LINK_PARAMS(ln, tc_port), val);
- val = I915_READ(MG_TX2_LINK_PARAMS(ln, port));
+ val = I915_READ(MG_TX2_LINK_PARAMS(ln, tc_port));
val &= ~CRI_USE_FS32;
- I915_WRITE(MG_TX2_LINK_PARAMS(ln, port), val);
+ I915_WRITE(MG_TX2_LINK_PARAMS(ln, tc_port), val);
}
/* Program MG_TX_SWINGCTRL with values from vswing table */
for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_TX1_SWINGCTRL(ln, port));
+ val = I915_READ(MG_TX1_SWINGCTRL(ln, tc_port));
val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK;
val |= CRI_TXDEEMPH_OVERRIDE_17_12(
ddi_translations[level].cri_txdeemph_override_17_12);
- I915_WRITE(MG_TX1_SWINGCTRL(ln, port), val);
+ I915_WRITE(MG_TX1_SWINGCTRL(ln, tc_port), val);
- val = I915_READ(MG_TX2_SWINGCTRL(ln, port));
+ val = I915_READ(MG_TX2_SWINGCTRL(ln, tc_port));
val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK;
val |= CRI_TXDEEMPH_OVERRIDE_17_12(
ddi_translations[level].cri_txdeemph_override_17_12);
- I915_WRITE(MG_TX2_SWINGCTRL(ln, port), val);
+ I915_WRITE(MG_TX2_SWINGCTRL(ln, tc_port), val);
}
/* Program MG_TX_DRVCTRL with values from vswing table */
for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_TX1_DRVCTRL(ln, port));
+ val = I915_READ(MG_TX1_DRVCTRL(ln, tc_port));
val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
CRI_TXDEEMPH_OVERRIDE_5_0_MASK);
val |= CRI_TXDEEMPH_OVERRIDE_5_0(
@@ -2593,9 +2740,9 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
CRI_TXDEEMPH_OVERRIDE_11_6(
ddi_translations[level].cri_txdeemph_override_11_6) |
CRI_TXDEEMPH_OVERRIDE_EN;
- I915_WRITE(MG_TX1_DRVCTRL(ln, port), val);
+ I915_WRITE(MG_TX1_DRVCTRL(ln, tc_port), val);
- val = I915_READ(MG_TX2_DRVCTRL(ln, port));
+ val = I915_READ(MG_TX2_DRVCTRL(ln, tc_port));
val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
CRI_TXDEEMPH_OVERRIDE_5_0_MASK);
val |= CRI_TXDEEMPH_OVERRIDE_5_0(
@@ -2603,7 +2750,7 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
CRI_TXDEEMPH_OVERRIDE_11_6(
ddi_translations[level].cri_txdeemph_override_11_6) |
CRI_TXDEEMPH_OVERRIDE_EN;
- I915_WRITE(MG_TX2_DRVCTRL(ln, port), val);
+ I915_WRITE(MG_TX2_DRVCTRL(ln, tc_port), val);
/* FIXME: Program CRI_LOADGEN_SEL after the spec is updated */
}
@@ -2614,17 +2761,17 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
* values from table for which TX1 and TX2 enabled.
*/
for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_CLKHUB(ln, port));
+ val = I915_READ(MG_CLKHUB(ln, tc_port));
if (link_clock < 300000)
val |= CFG_LOW_RATE_LKREN_EN;
else
val &= ~CFG_LOW_RATE_LKREN_EN;
- I915_WRITE(MG_CLKHUB(ln, port), val);
+ I915_WRITE(MG_CLKHUB(ln, tc_port), val);
}
/* Program the MG_TX_DCC<LN, port being used> based on the link frequency */
for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_TX1_DCC(ln, port));
+ val = I915_READ(MG_TX1_DCC(ln, tc_port));
val &= ~CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK;
if (link_clock <= 500000) {
val &= ~CFG_AMI_CK_DIV_OVERRIDE_EN;
@@ -2632,9 +2779,9 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
val |= CFG_AMI_CK_DIV_OVERRIDE_EN |
CFG_AMI_CK_DIV_OVERRIDE_VAL(1);
}
- I915_WRITE(MG_TX1_DCC(ln, port), val);
+ I915_WRITE(MG_TX1_DCC(ln, tc_port), val);
- val = I915_READ(MG_TX2_DCC(ln, port));
+ val = I915_READ(MG_TX2_DCC(ln, tc_port));
val &= ~CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK;
if (link_clock <= 500000) {
val &= ~CFG_AMI_CK_DIV_OVERRIDE_EN;
@@ -2642,18 +2789,18 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
val |= CFG_AMI_CK_DIV_OVERRIDE_EN |
CFG_AMI_CK_DIV_OVERRIDE_VAL(1);
}
- I915_WRITE(MG_TX2_DCC(ln, port), val);
+ I915_WRITE(MG_TX2_DCC(ln, tc_port), val);
}
/* Program MG_TX_PISO_READLOAD with values from vswing table */
for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_TX1_PISO_READLOAD(ln, port));
+ val = I915_READ(MG_TX1_PISO_READLOAD(ln, tc_port));
val |= CRI_CALCINIT;
- I915_WRITE(MG_TX1_PISO_READLOAD(ln, port), val);
+ I915_WRITE(MG_TX1_PISO_READLOAD(ln, tc_port), val);
- val = I915_READ(MG_TX2_PISO_READLOAD(ln, port));
+ val = I915_READ(MG_TX2_PISO_READLOAD(ln, tc_port));
val |= CRI_CALCINIT;
- I915_WRITE(MG_TX2_PISO_READLOAD(ln, port), val);
+ I915_WRITE(MG_TX2_PISO_READLOAD(ln, tc_port), val);
}
}
@@ -2663,14 +2810,77 @@ static void icl_ddi_vswing_sequence(struct intel_encoder *encoder,
enum intel_output_type type)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = encoder->port;
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
- if (intel_port_is_combophy(dev_priv, port))
+ if (intel_phy_is_combo(dev_priv, phy))
icl_combo_phy_ddi_vswing_sequence(encoder, level, type);
else
icl_mg_phy_ddi_vswing_sequence(encoder, link_clock, level);
}
+static void
+tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder, int link_clock,
+ u32 level)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port);
+ const struct tgl_dkl_phy_ddi_buf_trans *ddi_translations;
+ u32 n_entries, val, ln, dpcnt_mask, dpcnt_val;
+
+ if (encoder->type == INTEL_OUTPUT_HDMI) {
+ n_entries = ARRAY_SIZE(tgl_dkl_phy_hdmi_ddi_trans);
+ ddi_translations = tgl_dkl_phy_hdmi_ddi_trans;
+ } else {
+ n_entries = ARRAY_SIZE(tgl_dkl_phy_dp_ddi_trans);
+ ddi_translations = tgl_dkl_phy_dp_ddi_trans;
+ }
+
+ if (level >= n_entries)
+ level = n_entries - 1;
+
+ dpcnt_mask = (DKL_TX_PRESHOOT_COEFF_MASK |
+ DKL_TX_DE_EMPAHSIS_COEFF_MASK |
+ DKL_TX_VSWING_CONTROL_MASK);
+ dpcnt_val = DKL_TX_VSWING_CONTROL(ddi_translations[level].dkl_vswing_control);
+ dpcnt_val |= DKL_TX_DE_EMPHASIS_COEFF(ddi_translations[level].dkl_de_emphasis_control);
+ dpcnt_val |= DKL_TX_PRESHOOT_COEFF(ddi_translations[level].dkl_preshoot_control);
+
+ for (ln = 0; ln < 2; ln++) {
+ I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, ln));
+
+ I915_WRITE(DKL_TX_PMD_LANE_SUS(tc_port), 0);
+
+ /* All the registers are RMW */
+ val = I915_READ(DKL_TX_DPCNTL0(tc_port));
+ val &= ~dpcnt_mask;
+ val |= dpcnt_val;
+ I915_WRITE(DKL_TX_DPCNTL0(tc_port), val);
+
+ val = I915_READ(DKL_TX_DPCNTL1(tc_port));
+ val &= ~dpcnt_mask;
+ val |= dpcnt_val;
+ I915_WRITE(DKL_TX_DPCNTL1(tc_port), val);
+
+ val = I915_READ(DKL_TX_DPCNTL2(tc_port));
+ val &= ~DKL_TX_DP20BITMODE;
+ I915_WRITE(DKL_TX_DPCNTL2(tc_port), val);
+ }
+}
+
+static void tgl_ddi_vswing_sequence(struct intel_encoder *encoder,
+ int link_clock,
+ u32 level,
+ enum intel_output_type type)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+
+ if (intel_phy_is_combo(dev_priv, phy))
+ icl_combo_phy_ddi_vswing_sequence(encoder, level, type);
+ else
+ tgl_dkl_phy_ddi_vswing_sequence(encoder, link_clock, level);
+}
+
static u32 translate_signal_level(int signal_levels)
{
int i;
@@ -2702,7 +2912,10 @@ u32 bxt_signal_levels(struct intel_dp *intel_dp)
struct intel_encoder *encoder = &dport->base;
int level = intel_ddi_dp_level(intel_dp);
- if (INTEL_GEN(dev_priv) >= 11)
+ if (INTEL_GEN(dev_priv) >= 12)
+ tgl_ddi_vswing_sequence(encoder, intel_dp->link_rate,
+ level, encoder->type);
+ else if (INTEL_GEN(dev_priv) >= 11)
icl_ddi_vswing_sequence(encoder, intel_dp->link_rate,
level, encoder->type);
else if (IS_CANNONLAKE(dev_priv))
@@ -2728,12 +2941,13 @@ u32 ddi_signal_levels(struct intel_dp *intel_dp)
static inline
u32 icl_dpclka_cfgcr0_clk_off(struct drm_i915_private *dev_priv,
- enum port port)
+ enum phy phy)
{
- if (intel_port_is_combophy(dev_priv, port)) {
- return ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(port);
- } else if (intel_port_is_tc(dev_priv, port)) {
- enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
+ if (intel_phy_is_combo(dev_priv, phy)) {
+ return ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
+ } else if (intel_phy_is_tc(dev_priv, phy)) {
+ enum tc_port tc_port = intel_port_to_tc(dev_priv,
+ (enum port)phy);
return ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port);
}
@@ -2746,23 +2960,33 @@ static void icl_map_plls_to_ports(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_shared_dpll *pll = crtc_state->shared_dpll;
- enum port port = encoder->port;
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
u32 val;
mutex_lock(&dev_priv->dpll_lock);
- val = I915_READ(DPCLKA_CFGCR0_ICL);
- WARN_ON((val & icl_dpclka_cfgcr0_clk_off(dev_priv, port)) == 0);
+ val = I915_READ(ICL_DPCLKA_CFGCR0);
+ WARN_ON((val & icl_dpclka_cfgcr0_clk_off(dev_priv, phy)) == 0);
- if (intel_port_is_combophy(dev_priv, port)) {
- val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
- val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port);
- I915_WRITE(DPCLKA_CFGCR0_ICL, val);
- POSTING_READ(DPCLKA_CFGCR0_ICL);
+ if (intel_phy_is_combo(dev_priv, phy)) {
+ /*
+ * Even though this register references DDIs, note that we
+ * want to pass the PHY rather than the port (DDI). For
+ * ICL, port=phy in all cases so it doesn't matter, but for
+ * EHL the bspec notes the following:
+ *
+ * "DDID clock tied to DDIA clock, so DPCLKA_CFGCR0 DDIA
+ * Clock Select chooses the PLL for both DDIA and DDID and
+ * drives port A in all cases."
+ */
+ val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
+ val |= ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy);
+ I915_WRITE(ICL_DPCLKA_CFGCR0, val);
+ POSTING_READ(ICL_DPCLKA_CFGCR0);
}
- val &= ~icl_dpclka_cfgcr0_clk_off(dev_priv, port);
- I915_WRITE(DPCLKA_CFGCR0_ICL, val);
+ val &= ~icl_dpclka_cfgcr0_clk_off(dev_priv, phy);
+ I915_WRITE(ICL_DPCLKA_CFGCR0, val);
mutex_unlock(&dev_priv->dpll_lock);
}
@@ -2770,23 +2994,50 @@ static void icl_map_plls_to_ports(struct intel_encoder *encoder,
static void icl_unmap_plls_to_ports(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = encoder->port;
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
u32 val;
mutex_lock(&dev_priv->dpll_lock);
- val = I915_READ(DPCLKA_CFGCR0_ICL);
- val |= icl_dpclka_cfgcr0_clk_off(dev_priv, port);
- I915_WRITE(DPCLKA_CFGCR0_ICL, val);
+ val = I915_READ(ICL_DPCLKA_CFGCR0);
+ val |= icl_dpclka_cfgcr0_clk_off(dev_priv, phy);
+ I915_WRITE(ICL_DPCLKA_CFGCR0, val);
mutex_unlock(&dev_priv->dpll_lock);
}
+static void icl_sanitize_port_clk_off(struct drm_i915_private *dev_priv,
+ u32 port_mask, bool ddi_clk_needed)
+{
+ enum port port;
+ u32 val;
+
+ val = I915_READ(ICL_DPCLKA_CFGCR0);
+ for_each_port_masked(port, port_mask) {
+ enum phy phy = intel_port_to_phy(dev_priv, port);
+ bool ddi_clk_off = val & icl_dpclka_cfgcr0_clk_off(dev_priv,
+ phy);
+
+ if (ddi_clk_needed == !ddi_clk_off)
+ continue;
+
+ /*
+ * Punt on the case now where clock is gated, but it would
+ * be needed by the port. Something else is really broken then.
+ */
+ if (WARN_ON(ddi_clk_needed))
+ continue;
+
+ DRM_NOTE("PHY %c is disabled/in DSI mode with an ungated DDI clock, gate it\n",
+ phy_name(phy));
+ val |= icl_dpclka_cfgcr0_clk_off(dev_priv, phy);
+ I915_WRITE(ICL_DPCLKA_CFGCR0, val);
+ }
+}
+
void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 val;
- enum port port;
u32 port_mask;
bool ddi_clk_needed;
@@ -2835,27 +3086,7 @@ void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
ddi_clk_needed = false;
}
- val = I915_READ(DPCLKA_CFGCR0_ICL);
- for_each_port_masked(port, port_mask) {
- bool ddi_clk_ungated = !(val &
- icl_dpclka_cfgcr0_clk_off(dev_priv,
- port));
-
- if (ddi_clk_needed == ddi_clk_ungated)
- continue;
-
- /*
- * Punt on the case now where clock is gated, but it would
- * be needed by the port. Something else is really broken then.
- */
- if (WARN_ON(ddi_clk_needed))
- continue;
-
- DRM_NOTE("Port %c is disabled/in DSI mode with an ungated DDI clock, gate it\n",
- port_name(port));
- val |= icl_dpclka_cfgcr0_clk_off(dev_priv, port);
- I915_WRITE(DPCLKA_CFGCR0_ICL, val);
- }
+ icl_sanitize_port_clk_off(dev_priv, port_mask, ddi_clk_needed);
}
static void intel_ddi_clk_select(struct intel_encoder *encoder,
@@ -2863,6 +3094,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
+ enum phy phy = intel_port_to_phy(dev_priv, port);
u32 val;
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
@@ -2872,9 +3104,15 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
mutex_lock(&dev_priv->dpll_lock);
if (INTEL_GEN(dev_priv) >= 11) {
- if (!intel_port_is_combophy(dev_priv, port))
+ if (!intel_phy_is_combo(dev_priv, phy))
I915_WRITE(DDI_CLK_SEL(port),
icl_pll_to_ddi_clk_sel(encoder, crtc_state));
+ else if (IS_ELKHARTLAKE(dev_priv) && port >= PORT_C)
+ /*
+ * MG does not exist but the programming is required
+ * to ungate DDIC and DDID
+ */
+ I915_WRITE(DDI_CLK_SEL(port), DDI_CLK_SEL_MG);
} else if (IS_CANNONLAKE(dev_priv)) {
/* Configure DPCLKA_CFGCR0 to map the DPLL to the DDI. */
val = I915_READ(DPCLKA_CFGCR0);
@@ -2912,9 +3150,11 @@ static void intel_ddi_clk_disable(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
+ enum phy phy = intel_port_to_phy(dev_priv, port);
if (INTEL_GEN(dev_priv) >= 11) {
- if (!intel_port_is_combophy(dev_priv, port))
+ if (!intel_phy_is_combo(dev_priv, phy) ||
+ (IS_ELKHARTLAKE(dev_priv) && port >= PORT_C))
I915_WRITE(DDI_CLK_SEL(port), DDI_CLK_SEL_NONE);
} else if (IS_CANNONLAKE(dev_priv)) {
I915_WRITE(DPCLKA_CFGCR0, I915_READ(DPCLKA_CFGCR0) |
@@ -2927,133 +3167,90 @@ static void intel_ddi_clk_disable(struct intel_encoder *encoder)
}
}
-static void icl_enable_phy_clock_gating(struct intel_digital_port *dig_port)
-{
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- enum port port = dig_port->base.port;
- enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
- u32 val;
- int ln;
-
- if (tc_port == PORT_TC_NONE)
- return;
-
- for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_DP_MODE(ln, port));
- val |= MG_DP_MODE_CFG_TR2PWR_GATING |
- MG_DP_MODE_CFG_TRPWR_GATING |
- MG_DP_MODE_CFG_CLNPWR_GATING |
- MG_DP_MODE_CFG_DIGPWR_GATING |
- MG_DP_MODE_CFG_GAONPWR_GATING;
- I915_WRITE(MG_DP_MODE(ln, port), val);
- }
-
- val = I915_READ(MG_MISC_SUS0(tc_port));
- val |= MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE(3) |
- MG_MISC_SUS0_CFG_TR2PWR_GATING |
- MG_MISC_SUS0_CFG_CL2PWR_GATING |
- MG_MISC_SUS0_CFG_GAONPWR_GATING |
- MG_MISC_SUS0_CFG_TRPWR_GATING |
- MG_MISC_SUS0_CFG_CL1PWR_GATING |
- MG_MISC_SUS0_CFG_DGPWR_GATING;
- I915_WRITE(MG_MISC_SUS0(tc_port), val);
-}
-
-static void icl_disable_phy_clock_gating(struct intel_digital_port *dig_port)
-{
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- enum port port = dig_port->base.port;
- enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
- u32 val;
- int ln;
-
- if (tc_port == PORT_TC_NONE)
- return;
-
- for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_DP_MODE(ln, port));
- val &= ~(MG_DP_MODE_CFG_TR2PWR_GATING |
- MG_DP_MODE_CFG_TRPWR_GATING |
- MG_DP_MODE_CFG_CLNPWR_GATING |
- MG_DP_MODE_CFG_DIGPWR_GATING |
- MG_DP_MODE_CFG_GAONPWR_GATING);
- I915_WRITE(MG_DP_MODE(ln, port), val);
- }
-
- val = I915_READ(MG_MISC_SUS0(tc_port));
- val &= ~(MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE_MASK |
- MG_MISC_SUS0_CFG_TR2PWR_GATING |
- MG_MISC_SUS0_CFG_CL2PWR_GATING |
- MG_MISC_SUS0_CFG_GAONPWR_GATING |
- MG_MISC_SUS0_CFG_TRPWR_GATING |
- MG_MISC_SUS0_CFG_CL1PWR_GATING |
- MG_MISC_SUS0_CFG_DGPWR_GATING);
- I915_WRITE(MG_MISC_SUS0(tc_port), val);
-}
-
-static void icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port)
+static void
+icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
- enum port port = intel_dig_port->base.port;
- enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
- u32 ln0, ln1, lane_info;
+ enum tc_port tc_port = intel_port_to_tc(dev_priv, intel_dig_port->base.port);
+ u32 ln0, ln1, pin_assignment;
+ u8 width;
- if (tc_port == PORT_TC_NONE || intel_dig_port->tc_type == TC_PORT_TBT)
+ if (intel_dig_port->tc_mode == TC_PORT_TBT_ALT)
return;
- ln0 = I915_READ(MG_DP_MODE(0, port));
- ln1 = I915_READ(MG_DP_MODE(1, port));
+ if (INTEL_GEN(dev_priv) >= 12) {
+ I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x0));
+ ln0 = I915_READ(DKL_DP_MODE(tc_port));
+ I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x1));
+ ln1 = I915_READ(DKL_DP_MODE(tc_port));
+ } else {
+ ln0 = I915_READ(MG_DP_MODE(0, tc_port));
+ ln1 = I915_READ(MG_DP_MODE(1, tc_port));
+ }
- switch (intel_dig_port->tc_type) {
- case TC_PORT_TYPEC:
- ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
- ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
+ ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X1_MODE);
+ ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
- lane_info = (I915_READ(PORT_TX_DFLEXDPSP) &
- DP_LANE_ASSIGNMENT_MASK(tc_port)) >>
- DP_LANE_ASSIGNMENT_SHIFT(tc_port);
+ /* DPPATC */
+ pin_assignment = intel_tc_port_get_pin_assignment_mask(intel_dig_port);
+ width = crtc_state->lane_count;
- switch (lane_info) {
- case 0x1:
- case 0x4:
- break;
- case 0x2:
+ switch (pin_assignment) {
+ case 0x0:
+ WARN_ON(intel_dig_port->tc_mode != TC_PORT_LEGACY);
+ if (width == 1) {
+ ln1 |= MG_DP_MODE_CFG_DP_X1_MODE;
+ } else {
+ ln0 |= MG_DP_MODE_CFG_DP_X2_MODE;
+ ln1 |= MG_DP_MODE_CFG_DP_X2_MODE;
+ }
+ break;
+ case 0x1:
+ if (width == 4) {
+ ln0 |= MG_DP_MODE_CFG_DP_X2_MODE;
+ ln1 |= MG_DP_MODE_CFG_DP_X2_MODE;
+ }
+ break;
+ case 0x2:
+ if (width == 2) {
+ ln0 |= MG_DP_MODE_CFG_DP_X2_MODE;
+ ln1 |= MG_DP_MODE_CFG_DP_X2_MODE;
+ }
+ break;
+ case 0x3:
+ case 0x5:
+ if (width == 1) {
ln0 |= MG_DP_MODE_CFG_DP_X1_MODE;
- break;
- case 0x3:
- ln0 |= MG_DP_MODE_CFG_DP_X1_MODE |
- MG_DP_MODE_CFG_DP_X2_MODE;
- break;
- case 0x8:
ln1 |= MG_DP_MODE_CFG_DP_X1_MODE;
- break;
- case 0xC:
- ln1 |= MG_DP_MODE_CFG_DP_X1_MODE |
- MG_DP_MODE_CFG_DP_X2_MODE;
- break;
- case 0xF:
- ln0 |= MG_DP_MODE_CFG_DP_X1_MODE |
- MG_DP_MODE_CFG_DP_X2_MODE;
- ln1 |= MG_DP_MODE_CFG_DP_X1_MODE |
- MG_DP_MODE_CFG_DP_X2_MODE;
- break;
- default:
- MISSING_CASE(lane_info);
+ } else {
+ ln0 |= MG_DP_MODE_CFG_DP_X2_MODE;
+ ln1 |= MG_DP_MODE_CFG_DP_X2_MODE;
}
break;
-
- case TC_PORT_LEGACY:
- ln0 |= MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE;
- ln1 |= MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE;
+ case 0x4:
+ case 0x6:
+ if (width == 1) {
+ ln0 |= MG_DP_MODE_CFG_DP_X1_MODE;
+ ln1 |= MG_DP_MODE_CFG_DP_X1_MODE;
+ } else {
+ ln0 |= MG_DP_MODE_CFG_DP_X2_MODE;
+ ln1 |= MG_DP_MODE_CFG_DP_X2_MODE;
+ }
break;
-
default:
- MISSING_CASE(intel_dig_port->tc_type);
- return;
+ MISSING_CASE(pin_assignment);
}
- I915_WRITE(MG_DP_MODE(0, port), ln0);
- I915_WRITE(MG_DP_MODE(1, port), ln1);
+ if (INTEL_GEN(dev_priv) >= 12) {
+ I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x0));
+ I915_WRITE(DKL_DP_MODE(tc_port), ln0);
+ I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x1));
+ I915_WRITE(DKL_DP_MODE(tc_port), ln1);
+ } else {
+ I915_WRITE(MG_DP_MODE(0, tc_port), ln0);
+ I915_WRITE(MG_DP_MODE(1, tc_port), ln1);
+ }
}
static void intel_dp_sink_set_fec_ready(struct intel_dp *intel_dp,
@@ -3070,20 +3267,19 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = encoder->port;
+ struct intel_dp *intel_dp;
u32 val;
if (!crtc_state->fec_enable)
return;
- val = I915_READ(DP_TP_CTL(port));
+ intel_dp = enc_to_intel_dp(encoder);
+ val = I915_READ(intel_dp->regs.dp_tp_ctl);
val |= DP_TP_CTL_FEC_ENABLE;
- I915_WRITE(DP_TP_CTL(port), val);
+ I915_WRITE(intel_dp->regs.dp_tp_ctl, val);
- if (intel_wait_for_register(&dev_priv->uncore, DP_TP_STATUS(port),
- DP_TP_STATUS_FEC_ENABLE_LIVE,
- DP_TP_STATUS_FEC_ENABLE_LIVE,
- 1))
+ if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
+ DP_TP_STATUS_FEC_ENABLE_LIVE, 1))
DRM_ERROR("Timed out waiting for FEC Enable Status\n");
}
@@ -3091,42 +3287,273 @@ static void intel_ddi_disable_fec_state(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = encoder->port;
+ struct intel_dp *intel_dp;
u32 val;
if (!crtc_state->fec_enable)
return;
- val = I915_READ(DP_TP_CTL(port));
+ intel_dp = enc_to_intel_dp(encoder);
+ val = I915_READ(intel_dp->regs.dp_tp_ctl);
val &= ~DP_TP_CTL_FEC_ENABLE;
- I915_WRITE(DP_TP_CTL(port), val);
- POSTING_READ(DP_TP_CTL(port));
+ I915_WRITE(intel_dp->regs.dp_tp_ctl, val);
+ POSTING_READ(intel_dp->regs.dp_tp_ctl);
}
-static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+static void
+tgl_clear_psr2_transcoder_exitline(const struct intel_crtc_state *cstate)
+{
+ struct drm_i915_private *dev_priv = to_i915(cstate->uapi.crtc->dev);
+ u32 val;
+
+ if (!cstate->dc3co_exitline)
+ return;
+
+ val = I915_READ(EXITLINE(cstate->cpu_transcoder));
+ val &= ~(EXITLINE_MASK | EXITLINE_ENABLE);
+ I915_WRITE(EXITLINE(cstate->cpu_transcoder), val);
+}
+
+static void
+tgl_set_psr2_transcoder_exitline(const struct intel_crtc_state *cstate)
+{
+ u32 val, exit_scanlines;
+ struct drm_i915_private *dev_priv = to_i915(cstate->uapi.crtc->dev);
+
+ if (!cstate->dc3co_exitline)
+ return;
+
+ exit_scanlines = cstate->dc3co_exitline;
+ exit_scanlines <<= EXITLINE_SHIFT;
+ val = I915_READ(EXITLINE(cstate->cpu_transcoder));
+ val &= ~(EXITLINE_MASK | EXITLINE_ENABLE);
+ val |= exit_scanlines;
+ val |= EXITLINE_ENABLE;
+ I915_WRITE(EXITLINE(cstate->cpu_transcoder), val);
+}
+
+static void tgl_dc3co_exitline_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *cstate)
+{
+ u32 exit_scanlines;
+ struct drm_i915_private *dev_priv = to_i915(cstate->uapi.crtc->dev);
+ u32 crtc_vdisplay = cstate->hw.adjusted_mode.crtc_vdisplay;
+
+ cstate->dc3co_exitline = 0;
+
+ if (!(dev_priv->csr.allowed_dc_mask & DC_STATE_EN_DC3CO))
+ return;
+
+ /* B.Specs:49196 DC3CO only works with pipeA and DDIA.*/
+ if (to_intel_crtc(cstate->uapi.crtc)->pipe != PIPE_A ||
+ encoder->port != PORT_A)
+ return;
+
+ if (!cstate->has_psr2 || !cstate->hw.active)
+ return;
+
+ /*
+ * DC3CO Exit time 200us B.Spec 49196
+ * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
+ */
+ exit_scanlines =
+ intel_usecs_to_scanlines(&cstate->hw.adjusted_mode, 200) + 1;
+
+ if (WARN_ON(exit_scanlines > crtc_vdisplay))
+ return;
+
+ cstate->dc3co_exitline = crtc_vdisplay - exit_scanlines;
+ DRM_DEBUG_KMS("DC3CO exit scanlines %d\n", cstate->dc3co_exitline);
+}
+
+static void tgl_dc3co_exitline_get_config(struct intel_crtc_state *crtc_state)
+{
+ u32 val;
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+
+ if (INTEL_GEN(dev_priv) < 12)
+ return;
+
+ val = I915_READ(EXITLINE(crtc_state->cpu_transcoder));
+
+ if (val & EXITLINE_ENABLE)
+ crtc_state->dc3co_exitline = val & EXITLINE_MASK;
+}
+
+static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
+ int level = intel_ddi_dp_level(intel_dp);
+ enum transcoder transcoder = crtc_state->cpu_transcoder;
+
+ tgl_set_psr2_transcoder_exitline(crtc_state);
+ intel_dp_set_link_params(intel_dp, crtc_state->port_clock,
+ crtc_state->lane_count, is_mst);
+
+ intel_dp->regs.dp_tp_ctl = TGL_DP_TP_CTL(transcoder);
+ intel_dp->regs.dp_tp_status = TGL_DP_TP_STATUS(transcoder);
+
+ /*
+ * 1. Enable Power Wells
+ *
+ * This was handled at the beginning of intel_atomic_commit_tail(),
+ * before we called down into this function.
+ */
+
+ /* 2. Enable Panel Power if PPS is required */
+ intel_edp_panel_on(intel_dp);
+
+ /*
+ * 3. For non-TBT Type-C ports, set FIA lane count
+ * (DFLEXDPSP.DPX4TXLATC)
+ *
+ * This was done before tgl_ddi_pre_enable_dp by
+ * hsw_crtc_enable()->intel_encoders_pre_pll_enable().
+ */
+
+ /*
+ * 4. Enable the port PLL.
+ *
+ * The PLL enabling itself was already done before this function by
+ * hsw_crtc_enable()->intel_enable_shared_dpll(). We need only
+ * configure the PLL to port mapping here.
+ */
+ intel_ddi_clk_select(encoder, crtc_state);
+
+ /* 5. If IO power is controlled through PWR_WELL_CTL, Enable IO Power */
+ if (!intel_phy_is_tc(dev_priv, phy) ||
+ dig_port->tc_mode != TC_PORT_TBT_ALT)
+ intel_display_power_get(dev_priv,
+ dig_port->ddi_io_power_domain);
+
+ /* 6. Program DP_MODE */
+ icl_program_mg_dp_mode(dig_port, crtc_state);
+
+ /*
+ * 7. The rest of the below are substeps under the bspec's "Enable and
+ * Train Display Port" step. Note that steps that are specific to
+ * MST will be handled by intel_mst_pre_enable_dp() before/after it
+ * calls into this function. Also intel_mst_pre_enable_dp() only calls
+ * us when active_mst_links==0, so any steps designated for "single
+ * stream or multi-stream master transcoder" can just be performed
+ * unconditionally here.
+ */
+
+ /*
+ * 7.a Configure Transcoder Clock Select to direct the Port clock to the
+ * Transcoder.
+ */
+ intel_ddi_enable_pipe_clock(crtc_state);
+
+ /*
+ * 7.b Configure TRANS_DDI_FUNC_CTL DDI Select, DDI Mode Select & MST
+ * Transport Select
+ */
+ intel_ddi_config_transcoder_func(crtc_state);
+
+ /*
+ * 7.c Configure & enable DP_TP_CTL with link training pattern 1
+ * selected
+ *
+ * This will be handled by the intel_dp_start_link_train() farther
+ * down this function.
+ */
+
+ /* 7.e Configure voltage swing and related IO settings */
+ tgl_ddi_vswing_sequence(encoder, crtc_state->port_clock, level,
+ encoder->type);
+
+ /*
+ * 7.f Combo PHY: Configure PORT_CL_DW10 Static Power Down to power up
+ * the used lanes of the DDI.
+ */
+ if (intel_phy_is_combo(dev_priv, phy)) {
+ bool lane_reversal =
+ dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
+
+ intel_combo_phy_power_up_lanes(dev_priv, phy, false,
+ crtc_state->lane_count,
+ lane_reversal);
+ }
+
+ /*
+ * 7.g Configure and enable DDI_BUF_CTL
+ * 7.h Wait for DDI_BUF_CTL DDI Idle Status = 0b (Not Idle), timeout
+ * after 500 us.
+ *
+ * We only configure what the register value will be here. Actual
+ * enabling happens during link training farther down.
+ */
+ intel_ddi_init_dp_buf_reg(encoder);
+
+ if (!is_mst)
+ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+
+ intel_dp_sink_set_decompression_state(intel_dp, crtc_state, true);
+ /*
+ * DDI FEC: "anticipates enabling FEC encoding sets the FEC_READY bit
+ * in the FEC_CONFIGURATION register to 1 before initiating link
+ * training
+ */
+ intel_dp_sink_set_fec_ready(intel_dp, crtc_state);
+
+ /*
+ * 7.i Follow DisplayPort specification training sequence (see notes for
+ * failure handling)
+ * 7.j If DisplayPort multi-stream - Set DP_TP_CTL link training to Idle
+ * Pattern, wait for 5 idle patterns (DP_TP_STATUS Min_Idles_Sent)
+ * (timeout after 800 us)
+ */
+ intel_dp_start_link_train(intel_dp);
+
+ /* 7.k Set DP_TP_CTL link training to Normal */
+ if (!is_trans_port_sync_mode(crtc_state))
+ intel_dp_stop_link_train(intel_dp);
+
+ /* 7.l Configure and enable FEC if needed */
+ intel_ddi_enable_fec(encoder, crtc_state);
+ intel_dsc_enable(encoder, crtc_state);
+}
+
+static void hsw_ddi_pre_enable_dp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
- struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ enum phy phy = intel_port_to_phy(dev_priv, port);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
int level = intel_ddi_dp_level(intel_dp);
- WARN_ON(is_mst && (port == PORT_A || port == PORT_E));
+ if (INTEL_GEN(dev_priv) < 11)
+ WARN_ON(is_mst && (port == PORT_A || port == PORT_E));
+ else
+ WARN_ON(is_mst && port == PORT_A);
intel_dp_set_link_params(intel_dp, crtc_state->port_clock,
crtc_state->lane_count, is_mst);
+ intel_dp->regs.dp_tp_ctl = DP_TP_CTL(port);
+ intel_dp->regs.dp_tp_status = DP_TP_STATUS(port);
+
intel_edp_panel_on(intel_dp);
intel_ddi_clk_select(encoder, crtc_state);
- intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
+ if (!intel_phy_is_tc(dev_priv, phy) ||
+ dig_port->tc_mode != TC_PORT_TBT_ALT)
+ intel_display_power_get(dev_priv,
+ dig_port->ddi_io_power_domain);
- icl_program_mg_dp_mode(dig_port);
- icl_disable_phy_clock_gating(dig_port);
+ icl_program_mg_dp_mode(dig_port, crtc_state);
if (INTEL_GEN(dev_priv) >= 11)
icl_ddi_vswing_sequence(encoder, crtc_state->port_clock,
@@ -3138,11 +3565,11 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
else
intel_prepare_dp_ddi_buffers(encoder, crtc_state);
- if (intel_port_is_combophy(dev_priv, port)) {
+ if (intel_phy_is_combo(dev_priv, phy)) {
bool lane_reversal =
dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
- intel_combo_phy_power_up_lanes(dev_priv, port, false,
+ intel_combo_phy_power_up_lanes(dev_priv, phy, false,
crtc_state->lane_count,
lane_reversal);
}
@@ -3154,39 +3581,58 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
true);
intel_dp_sink_set_fec_ready(intel_dp, crtc_state);
intel_dp_start_link_train(intel_dp);
- if (port != PORT_A || INTEL_GEN(dev_priv) >= 9)
+ if ((port != PORT_A || INTEL_GEN(dev_priv) >= 9) &&
+ !is_trans_port_sync_mode(crtc_state))
intel_dp_stop_link_train(intel_dp);
intel_ddi_enable_fec(encoder, crtc_state);
- icl_enable_phy_clock_gating(dig_port);
-
if (!is_mst)
intel_ddi_enable_pipe_clock(crtc_state);
intel_dsc_enable(encoder, crtc_state);
}
+static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ if (INTEL_GEN(dev_priv) >= 12)
+ tgl_ddi_pre_enable_dp(encoder, crtc_state, conn_state);
+ else
+ hsw_ddi_pre_enable_dp(encoder, crtc_state, conn_state);
+
+ /* MST will call a setting of MSA after an allocating of Virtual Channel
+ * from MST encoder pre_enable callback.
+ */
+ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST))
+ intel_ddi_set_dp_msa(crtc_state, conn_state);
+}
+
static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
int level = intel_ddi_hdmi_level(dev_priv, port);
- struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
intel_ddi_clk_select(encoder, crtc_state);
intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
- icl_program_mg_dp_mode(dig_port);
- icl_disable_phy_clock_gating(dig_port);
+ icl_program_mg_dp_mode(dig_port, crtc_state);
- if (INTEL_GEN(dev_priv) >= 11)
+ if (INTEL_GEN(dev_priv) >= 12)
+ tgl_ddi_vswing_sequence(encoder, crtc_state->port_clock,
+ level, INTEL_OUTPUT_HDMI);
+ else if (INTEL_GEN(dev_priv) == 11)
icl_ddi_vswing_sequence(encoder, crtc_state->port_clock,
level, INTEL_OUTPUT_HDMI);
else if (IS_CANNONLAKE(dev_priv))
@@ -3196,8 +3642,6 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
else
intel_prepare_hdmi_ddi_buffers(encoder, level);
- icl_enable_phy_clock_gating(dig_port);
-
if (IS_GEN9_BC(dev_priv))
skl_ddi_set_iboost(encoder, level, INTEL_OUTPUT_HDMI);
@@ -3212,7 +3656,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
@@ -3240,12 +3684,12 @@ static void intel_ddi_pre_enable(struct intel_encoder *encoder,
intel_ddi_pre_enable_hdmi(encoder, crtc_state, conn_state);
} else {
struct intel_lspcon *lspcon =
- enc_to_intel_lspcon(&encoder->base);
+ enc_to_intel_lspcon(encoder);
intel_ddi_pre_enable_dp(encoder, crtc_state, conn_state);
if (lspcon->active) {
struct intel_digital_port *dig_port =
- enc_to_dig_port(&encoder->base);
+ enc_to_dig_port(encoder);
dig_port->set_infoframes(encoder,
crtc_state->has_infoframe,
@@ -3269,10 +3713,14 @@ static void intel_disable_ddi_buf(struct intel_encoder *encoder,
wait = true;
}
- val = I915_READ(DP_TP_CTL(port));
- val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
- val |= DP_TP_CTL_LINK_TRAIN_PAT1;
- I915_WRITE(DP_TP_CTL(port), val);
+ if (intel_crtc_has_dp_encoder(crtc_state)) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ val = I915_READ(intel_dp->regs.dp_tp_ctl);
+ val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
+ val |= DP_TP_CTL_LINK_TRAIN_PAT1;
+ I915_WRITE(intel_dp->regs.dp_tp_ctl, val);
+ }
/* Disable FEC in DP Sink */
intel_ddi_disable_fec_state(encoder, crtc_state);
@@ -3286,29 +3734,52 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
const struct drm_connector_state *old_conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_dp *intel_dp = &dig_port->dp;
bool is_mst = intel_crtc_has_type(old_crtc_state,
INTEL_OUTPUT_DP_MST);
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
- if (!is_mst) {
- intel_ddi_disable_pipe_clock(old_crtc_state);
- /*
- * Power down sink before disabling the port, otherwise we end
- * up getting interrupts from the sink on detecting link loss.
- */
- intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
+ /*
+ * Power down sink before disabling the port, otherwise we end
+ * up getting interrupts from the sink on detecting link loss.
+ */
+ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
+
+ if (INTEL_GEN(dev_priv) >= 12) {
+ if (is_mst) {
+ enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
+ u32 val;
+
+ val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+ val &= ~TGL_TRANS_DDI_PORT_MASK;
+ I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), val);
+ }
+ } else {
+ if (!is_mst)
+ intel_ddi_disable_pipe_clock(old_crtc_state);
}
intel_disable_ddi_buf(encoder, old_crtc_state);
+ /*
+ * From TGL spec: "If single stream or multi-stream master transcoder:
+ * Configure Transcoder Clock select to direct no clock to the
+ * transcoder"
+ */
+ if (INTEL_GEN(dev_priv) >= 12)
+ intel_ddi_disable_pipe_clock(old_crtc_state);
+
intel_edp_panel_vdd_on(intel_dp);
intel_edp_panel_off(intel_dp);
- intel_display_power_put_unchecked(dev_priv,
- dig_port->ddi_io_power_domain);
+ if (!intel_phy_is_tc(dev_priv, phy) ||
+ dig_port->tc_mode != TC_PORT_TBT_ALT)
+ intel_display_power_put_unchecked(dev_priv,
+ dig_port->ddi_io_power_domain);
intel_ddi_clk_disable(encoder);
+ tgl_clear_psr2_transcoder_exitline(old_crtc_state);
}
static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
@@ -3316,7 +3787,7 @@ static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
const struct drm_connector_state *old_conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
dig_port->set_infoframes(encoder, false,
@@ -3334,11 +3805,46 @@ static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
intel_dp_dual_mode_set_tmds_output(intel_hdmi, false);
}
+static void icl_disable_transcoder_port_sync(const struct intel_crtc_state *old_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ if (old_crtc_state->master_transcoder == INVALID_TRANSCODER)
+ return;
+
+ DRM_DEBUG_KMS("Disabling Transcoder Port Sync on Slave Transcoder %s\n",
+ transcoder_name(old_crtc_state->cpu_transcoder));
+
+ I915_WRITE(TRANS_DDI_FUNC_CTL2(old_crtc_state->cpu_transcoder), 0);
+}
+
static void intel_ddi_post_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+ bool is_tc_port = intel_phy_is_tc(dev_priv, phy);
+
+ if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST)) {
+ intel_crtc_vblank_off(old_crtc_state);
+
+ intel_disable_pipe(old_crtc_state);
+
+ if (INTEL_GEN(dev_priv) >= 11)
+ icl_disable_transcoder_port_sync(old_crtc_state);
+
+ intel_ddi_disable_transcoder_func(old_crtc_state);
+
+ intel_dsc_disable(old_crtc_state);
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ skl_scaler_disable(old_crtc_state);
+ else
+ ilk_pfit_disable(old_crtc_state);
+ }
/*
* When called from DP MST code:
@@ -3362,6 +3868,13 @@ static void intel_ddi_post_disable(struct intel_encoder *encoder,
if (INTEL_GEN(dev_priv) >= 11)
icl_unmap_plls_to_ports(encoder);
+
+ if (intel_crtc_has_dp_encoder(old_crtc_state) || is_tc_port)
+ intel_display_power_put_unchecked(dev_priv,
+ intel_ddi_main_link_aux_domain(dig_port));
+
+ if (is_tc_port)
+ intel_tc_port_put_link(dig_port);
}
void intel_ddi_fdi_post_disable(struct intel_encoder *encoder,
@@ -3403,7 +3916,7 @@ static void intel_enable_ddi_dp(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
enum port port = encoder->port;
if (port == PORT_A && INTEL_GEN(dev_priv) < 9)
@@ -3411,7 +3924,8 @@ static void intel_enable_ddi_dp(struct intel_encoder *encoder,
intel_edp_backlight_on(crtc_state, conn_state);
intel_psr_enable(intel_dp, crtc_state);
- intel_dp_ycbcr_420_enable(intel_dp, crtc_state);
+ intel_dp_vsc_enable(intel_dp, crtc_state, conn_state);
+ intel_dp_hdr_metadata_enable(intel_dp, crtc_state, conn_state);
intel_edp_drrs_enable(intel_dp, crtc_state);
if (crtc_state->has_audio)
@@ -3422,12 +3936,12 @@ static i915_reg_t
gen9_chicken_trans_reg_by_port(struct drm_i915_private *dev_priv,
enum port port)
{
- static const i915_reg_t regs[] = {
- [PORT_A] = CHICKEN_TRANS_EDP,
- [PORT_B] = CHICKEN_TRANS_A,
- [PORT_C] = CHICKEN_TRANS_B,
- [PORT_D] = CHICKEN_TRANS_C,
- [PORT_E] = CHICKEN_TRANS_A,
+ static const enum transcoder trans[] = {
+ [PORT_A] = TRANSCODER_EDP,
+ [PORT_B] = TRANSCODER_A,
+ [PORT_C] = TRANSCODER_B,
+ [PORT_D] = TRANSCODER_C,
+ [PORT_E] = TRANSCODER_A,
};
WARN_ON(INTEL_GEN(dev_priv) < 9);
@@ -3435,7 +3949,7 @@ gen9_chicken_trans_reg_by_port(struct drm_i915_private *dev_priv,
if (WARN_ON(port < PORT_A || port > PORT_E))
port = PORT_A;
- return regs[port];
+ return CHICKEN_TRANS(trans[port]);
}
static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
@@ -3443,7 +3957,7 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_connector *connector = conn_state->connector;
enum port port = encoder->port;
@@ -3511,14 +4025,16 @@ static void intel_enable_ddi(struct intel_encoder *encoder,
/* Enable hdcp if it's desired */
if (conn_state->content_protection ==
DRM_MODE_CONTENT_PROTECTION_DESIRED)
- intel_hdcp_enable(to_intel_connector(conn_state->connector));
+ intel_hdcp_enable(to_intel_connector(conn_state->connector),
+ crtc_state->cpu_transcoder,
+ (u8)conn_state->hdcp_content_type);
}
static void intel_disable_ddi_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
intel_dp->link_trained = false;
@@ -3566,9 +4082,9 @@ static void intel_ddi_update_pipe_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- intel_ddi_set_pipe_settings(crtc_state);
+ intel_ddi_set_dp_msa(crtc_state, conn_state);
intel_psr_update(intel_dp, crtc_state);
intel_edp_drrs_enable(intel_dp, crtc_state);
@@ -3580,44 +4096,68 @@ static void intel_ddi_update_pipe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
+ struct intel_connector *connector =
+ to_intel_connector(conn_state->connector);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ bool content_protection_type_changed =
+ (conn_state->hdcp_content_type != hdcp->content_type &&
+ conn_state->content_protection !=
+ DRM_MODE_CONTENT_PROTECTION_UNDESIRED);
+
if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
intel_ddi_update_pipe_dp(encoder, crtc_state, conn_state);
+ /*
+ * During the HDCP encryption session if Type change is requested,
+ * disable the HDCP and reenable it with new TYPE value.
+ */
if (conn_state->content_protection ==
- DRM_MODE_CONTENT_PROTECTION_DESIRED)
- intel_hdcp_enable(to_intel_connector(conn_state->connector));
- else if (conn_state->content_protection ==
- DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
- intel_hdcp_disable(to_intel_connector(conn_state->connector));
+ DRM_MODE_CONTENT_PROTECTION_UNDESIRED ||
+ content_protection_type_changed)
+ intel_hdcp_disable(connector);
+
+ /*
+ * Mark the hdcp state as DESIRED after the hdcp disable of type
+ * change procedure.
+ */
+ if (content_protection_type_changed) {
+ mutex_lock(&hdcp->mutex);
+ hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ schedule_work(&hdcp->prop_work);
+ mutex_unlock(&hdcp->mutex);
+ }
+
+ if (conn_state->content_protection ==
+ DRM_MODE_CONTENT_PROTECTION_DESIRED ||
+ content_protection_type_changed)
+ intel_hdcp_enable(connector,
+ crtc_state->cpu_transcoder,
+ (u8)conn_state->hdcp_content_type);
}
-static void intel_ddi_set_fia_lane_count(struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
- enum port port)
+static void
+intel_ddi_update_prepare(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
- enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
- u32 val = I915_READ(PORT_TX_DFLEXDPMLE1);
- bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
-
- val &= ~DFLEXDPMLE1_DPMLETC_MASK(tc_port);
- switch (pipe_config->lane_count) {
- case 1:
- val |= (lane_reversal) ? DFLEXDPMLE1_DPMLETC_ML3(tc_port) :
- DFLEXDPMLE1_DPMLETC_ML0(tc_port);
- break;
- case 2:
- val |= (lane_reversal) ? DFLEXDPMLE1_DPMLETC_ML3_2(tc_port) :
- DFLEXDPMLE1_DPMLETC_ML1_0(tc_port);
- break;
- case 4:
- val |= DFLEXDPMLE1_DPMLETC_ML3_0(tc_port);
- break;
- default:
- MISSING_CASE(pipe_config->lane_count);
- }
- I915_WRITE(PORT_TX_DFLEXDPMLE1, val);
+ struct intel_crtc_state *crtc_state =
+ crtc ? intel_atomic_get_new_crtc_state(state, crtc) : NULL;
+ int required_lanes = crtc_state ? crtc_state->lane_count : 1;
+
+ WARN_ON(crtc && crtc->active);
+
+ intel_tc_port_get_link(enc_to_dig_port(encoder),
+ required_lanes);
+ if (crtc_state && crtc_state->hw.active)
+ intel_update_active_dpll(state, crtc, encoder);
+}
+
+static void
+intel_ddi_update_complete(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ struct intel_crtc *crtc)
+{
+ intel_tc_port_put_link(enc_to_dig_port(encoder));
}
static void
@@ -3626,41 +4166,26 @@ intel_ddi_pre_pll_enable(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
- enum port port = encoder->port;
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+ bool is_tc_port = intel_phy_is_tc(dev_priv, phy);
- if (intel_crtc_has_dp_encoder(crtc_state) ||
- intel_port_is_tc(dev_priv, encoder->port))
+ if (is_tc_port)
+ intel_tc_port_get_link(dig_port, crtc_state->lane_count);
+
+ if (intel_crtc_has_dp_encoder(crtc_state) || is_tc_port)
intel_display_power_get(dev_priv,
intel_ddi_main_link_aux_domain(dig_port));
- if (IS_GEN9_LP(dev_priv))
+ if (is_tc_port && dig_port->tc_mode != TC_PORT_TBT_ALT)
+ /*
+ * Program the lane count for static/dynamic connections on
+ * Type-C ports. Skip this step for TBT.
+ */
+ intel_tc_port_set_fia_lane_count(dig_port, crtc_state->lane_count);
+ else if (IS_GEN9_LP(dev_priv))
bxt_ddi_phy_set_lane_optim_mask(encoder,
crtc_state->lane_lat_optim_mask);
-
- /*
- * Program the lane count for static/dynamic connections on Type-C ports.
- * Skip this step for TBT.
- */
- if (dig_port->tc_type == TC_PORT_UNKNOWN ||
- dig_port->tc_type == TC_PORT_TBT)
- return;
-
- intel_ddi_set_fia_lane_count(encoder, crtc_state, port);
-}
-
-static void
-intel_ddi_post_pll_disable(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
-
- if (intel_crtc_has_dp_encoder(crtc_state) ||
- intel_port_is_tc(dev_priv, encoder->port))
- intel_display_power_put_unchecked(dev_priv,
- intel_ddi_main_link_aux_domain(dig_port));
}
static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
@@ -3669,38 +4194,39 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv =
to_i915(intel_dig_port->base.base.dev);
enum port port = intel_dig_port->base.port;
- u32 val;
+ u32 dp_tp_ctl, ddi_buf_ctl;
bool wait = false;
- if (I915_READ(DP_TP_CTL(port)) & DP_TP_CTL_ENABLE) {
- val = I915_READ(DDI_BUF_CTL(port));
- if (val & DDI_BUF_CTL_ENABLE) {
- val &= ~DDI_BUF_CTL_ENABLE;
- I915_WRITE(DDI_BUF_CTL(port), val);
+ dp_tp_ctl = I915_READ(intel_dp->regs.dp_tp_ctl);
+
+ if (dp_tp_ctl & DP_TP_CTL_ENABLE) {
+ ddi_buf_ctl = I915_READ(DDI_BUF_CTL(port));
+ if (ddi_buf_ctl & DDI_BUF_CTL_ENABLE) {
+ I915_WRITE(DDI_BUF_CTL(port),
+ ddi_buf_ctl & ~DDI_BUF_CTL_ENABLE);
wait = true;
}
- val = I915_READ(DP_TP_CTL(port));
- val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
- val |= DP_TP_CTL_LINK_TRAIN_PAT1;
- I915_WRITE(DP_TP_CTL(port), val);
- POSTING_READ(DP_TP_CTL(port));
+ dp_tp_ctl &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
+ dp_tp_ctl |= DP_TP_CTL_LINK_TRAIN_PAT1;
+ I915_WRITE(intel_dp->regs.dp_tp_ctl, dp_tp_ctl);
+ POSTING_READ(intel_dp->regs.dp_tp_ctl);
if (wait)
intel_wait_ddi_buf_idle(dev_priv, port);
}
- val = DP_TP_CTL_ENABLE |
- DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE;
+ dp_tp_ctl = DP_TP_CTL_ENABLE |
+ DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE;
if (intel_dp->link_mst)
- val |= DP_TP_CTL_MODE_MST;
+ dp_tp_ctl |= DP_TP_CTL_MODE_MST;
else {
- val |= DP_TP_CTL_MODE_SST;
+ dp_tp_ctl |= DP_TP_CTL_MODE_SST;
if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
- val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
+ dp_tp_ctl |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
}
- I915_WRITE(DP_TP_CTL(port), val);
- POSTING_READ(DP_TP_CTL(port));
+ I915_WRITE(intel_dp->regs.dp_tp_ctl, dp_tp_ctl);
+ POSTING_READ(intel_dp->regs.dp_tp_ctl);
intel_dp->DP |= DDI_BUF_CTL_ENABLE;
I915_WRITE(DDI_BUF_CTL(port), intel_dp->DP);
@@ -3735,15 +4261,16 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
- struct intel_digital_port *intel_dig_port;
u32 temp, flags = 0;
/* XXX: DSI transcoder paranoia */
if (WARN_ON(transcoder_is_dsi(cpu_transcoder)))
return;
+ intel_dsc_get_config(encoder, pipe_config);
+
temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
if (temp & TRANS_DDI_PHSYNC)
flags |= DRM_MODE_FLAG_PHSYNC;
@@ -3754,7 +4281,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
else
flags |= DRM_MODE_FLAG_NVSYNC;
- pipe_config->base.adjusted_mode.flags |= flags;
+ pipe_config->hw.adjusted_mode.flags |= flags;
switch (temp & TRANS_DDI_BPC_MASK) {
case TRANS_DDI_BPC_6:
@@ -3776,7 +4303,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
switch (temp & TRANS_DDI_MODE_SELECT_MASK) {
case TRANS_DDI_MODE_SELECT_HDMI:
pipe_config->has_hdmi_sink = true;
- intel_dig_port = enc_to_dig_port(&encoder->base);
pipe_config->infoframes.enable |=
intel_hdmi_infoframes_enabled(encoder, pipe_config);
@@ -3804,17 +4330,42 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
pipe_config->lane_count =
((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
intel_dp_get_m_n(intel_crtc, pipe_config);
+
+ if (INTEL_GEN(dev_priv) >= 11) {
+ i915_reg_t dp_tp_ctl;
+
+ if (IS_GEN(dev_priv, 11))
+ dp_tp_ctl = DP_TP_CTL(encoder->port);
+ else
+ dp_tp_ctl = TGL_DP_TP_CTL(pipe_config->cpu_transcoder);
+
+ pipe_config->fec_enable =
+ I915_READ(dp_tp_ctl) & DP_TP_CTL_FEC_ENABLE;
+
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] Fec status: %u\n",
+ encoder->base.base.id, encoder->base.name,
+ pipe_config->fec_enable);
+ }
+
break;
case TRANS_DDI_MODE_SELECT_DP_MST:
pipe_config->output_types |= BIT(INTEL_OUTPUT_DP_MST);
pipe_config->lane_count =
((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
+
+ if (INTEL_GEN(dev_priv) >= 12)
+ pipe_config->mst_master_transcoder =
+ REG_FIELD_GET(TRANS_DDI_MST_TRANSPORT_SELECT_MASK, temp);
+
intel_dp_get_m_n(intel_crtc, pipe_config);
break;
default:
break;
}
+ if (encoder->type == INTEL_OUTPUT_EDP)
+ tgl_dc3co_exitline_get_config(pipe_config);
+
pipe_config->has_audio =
intel_ddi_is_audio_enabled(dev_priv, cpu_transcoder);
@@ -3884,7 +4435,7 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
int ret;
@@ -3892,10 +4443,13 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
if (HAS_TRANSCODER_EDP(dev_priv) && port == PORT_A)
pipe_config->cpu_transcoder = TRANSCODER_EDP;
- if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
+ if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI)) {
ret = intel_hdmi_compute_config(encoder, pipe_config, conn_state);
- else
+ } else {
ret = intel_dp_compute_config(encoder, pipe_config, conn_state);
+ tgl_dc3co_exitline_compute_config(encoder, pipe_config);
+ }
+
if (ret)
return ret;
@@ -3914,49 +4468,18 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
return 0;
}
-static void intel_ddi_encoder_suspend(struct intel_encoder *encoder)
-{
- struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
-
- intel_dp_encoder_suspend(encoder);
-
- /*
- * TODO: disconnect also from USB DP alternate mode once we have a
- * way to handle the modeset restore in that mode during resume
- * even if the sink has disappeared while being suspended.
- */
- if (dig_port->tc_legacy_port)
- icl_tc_phy_disconnect(i915, dig_port);
-}
-
-static void intel_ddi_encoder_reset(struct drm_encoder *drm_encoder)
-{
- struct intel_digital_port *dig_port = enc_to_dig_port(drm_encoder);
- struct drm_i915_private *i915 = to_i915(drm_encoder->dev);
-
- if (intel_port_is_tc(i915, dig_port->base.port))
- intel_digital_port_connected(&dig_port->base);
-
- intel_dp_encoder_reset(drm_encoder);
-}
-
static void intel_ddi_encoder_destroy(struct drm_encoder *encoder)
{
- struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- struct drm_i915_private *i915 = to_i915(encoder->dev);
+ struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder));
intel_dp_encoder_flush_work(encoder);
- if (intel_port_is_tc(i915, dig_port->base.port))
- icl_tc_phy_disconnect(i915, dig_port);
-
drm_encoder_cleanup(encoder);
kfree(dig_port);
}
static const struct drm_encoder_funcs intel_ddi_funcs = {
- .reset = intel_ddi_encoder_reset,
+ .reset = intel_dp_encoder_reset,
.destroy = intel_ddi_encoder_destroy,
};
@@ -4014,7 +4537,7 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_hdmi *hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct intel_hdmi *hdmi = enc_to_intel_hdmi(encoder);
struct intel_connector *connector = hdmi->attached_connector;
struct i2c_adapter *adapter =
intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus);
@@ -4046,7 +4569,7 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
WARN_ON(!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI));
- if (!crtc_state->base.active)
+ if (!crtc_state->hw.active)
return 0;
if (!crtc_state->hdmi_high_tmds_clock_ratio &&
@@ -4081,14 +4604,17 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
return modeset_pipe(&crtc->base, ctx);
}
-static bool intel_ddi_hotplug(struct intel_encoder *encoder,
- struct intel_connector *connector)
+static enum intel_hotplug_state
+intel_ddi_hotplug(struct intel_encoder *encoder,
+ struct intel_connector *connector,
+ bool irq_received)
{
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_modeset_acquire_ctx ctx;
- bool changed;
+ enum intel_hotplug_state state;
int ret;
- changed = intel_encoder_hotplug(encoder, connector);
+ state = intel_encoder_hotplug(encoder, connector, irq_received);
drm_modeset_acquire_init(&ctx, 0);
@@ -4110,7 +4636,27 @@ static bool intel_ddi_hotplug(struct intel_encoder *encoder,
drm_modeset_acquire_fini(&ctx);
WARN(ret, "Acquiring modeset locks failed with %i\n", ret);
- return changed;
+ /*
+ * Unpowered type-c dongles can take some time to boot and be
+ * responsible, so here giving some time to those dongles to power up
+ * and then retrying the probe.
+ *
+ * On many platforms the HDMI live state signal is known to be
+ * unreliable, so we can't use it to detect if a sink is connected or
+ * not. Instead we detect if it's connected based on whether we can
+ * read the EDID or not. That in turn has a problem during disconnect,
+ * since the HPD interrupt may be raised before the DDC lines get
+ * disconnected (due to how the required length of DDC vs. HPD
+ * connector pins are specified) and so we'll still be able to get a
+ * valid EDID. To solve this schedule another detection cycle if this
+ * time around we didn't detect any change in the sink's connection
+ * status.
+ */
+ if (state == INTEL_HOTPLUG_UNCHANGED && irq_received &&
+ !dig_port->dp.is_mst)
+ state = INTEL_HOTPLUG_RETRY;
+
+ return state;
}
static struct intel_connector *
@@ -4194,10 +4740,9 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
struct ddi_vbt_port_info *port_info =
&dev_priv->vbt.ddi_port_info[port];
struct intel_digital_port *intel_dig_port;
- struct intel_encoder *intel_encoder;
- struct drm_encoder *encoder;
+ struct intel_encoder *encoder;
bool init_hdmi, init_dp, init_lspcon = false;
- enum pipe pipe;
+ enum phy phy = intel_port_to_phy(dev_priv, port);
init_hdmi = port_info->supports_dvi || port_info->supports_hdmi;
init_dp = port_info->supports_dp;
@@ -4224,32 +4769,30 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
if (!intel_dig_port)
return;
- intel_encoder = &intel_dig_port->base;
- encoder = &intel_encoder->base;
+ encoder = &intel_dig_port->base;
- drm_encoder_init(&dev_priv->drm, encoder, &intel_ddi_funcs,
+ drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs,
DRM_MODE_ENCODER_TMDS, "DDI %c", port_name(port));
- intel_encoder->hotplug = intel_ddi_hotplug;
- intel_encoder->compute_output_type = intel_ddi_compute_output_type;
- intel_encoder->compute_config = intel_ddi_compute_config;
- intel_encoder->enable = intel_enable_ddi;
- intel_encoder->pre_pll_enable = intel_ddi_pre_pll_enable;
- intel_encoder->post_pll_disable = intel_ddi_post_pll_disable;
- intel_encoder->pre_enable = intel_ddi_pre_enable;
- intel_encoder->disable = intel_disable_ddi;
- intel_encoder->post_disable = intel_ddi_post_disable;
- intel_encoder->update_pipe = intel_ddi_update_pipe;
- intel_encoder->get_hw_state = intel_ddi_get_hw_state;
- intel_encoder->get_config = intel_ddi_get_config;
- intel_encoder->suspend = intel_ddi_encoder_suspend;
- intel_encoder->get_power_domains = intel_ddi_get_power_domains;
- intel_encoder->type = INTEL_OUTPUT_DDI;
- intel_encoder->power_domain = intel_port_to_power_domain(port);
- intel_encoder->port = port;
- intel_encoder->cloneable = 0;
- for_each_pipe(dev_priv, pipe)
- intel_encoder->crtc_mask |= BIT(pipe);
+ encoder->hotplug = intel_ddi_hotplug;
+ encoder->compute_output_type = intel_ddi_compute_output_type;
+ encoder->compute_config = intel_ddi_compute_config;
+ encoder->enable = intel_enable_ddi;
+ encoder->pre_pll_enable = intel_ddi_pre_pll_enable;
+ encoder->pre_enable = intel_ddi_pre_enable;
+ encoder->disable = intel_disable_ddi;
+ encoder->post_disable = intel_ddi_post_disable;
+ encoder->update_pipe = intel_ddi_update_pipe;
+ encoder->get_hw_state = intel_ddi_get_hw_state;
+ encoder->get_config = intel_ddi_get_config;
+ encoder->suspend = intel_dp_encoder_suspend;
+ encoder->get_power_domains = intel_ddi_get_power_domains;
+
+ encoder->type = INTEL_OUTPUT_DDI;
+ encoder->power_domain = intel_port_to_power_domain(port);
+ encoder->port = port;
+ encoder->cloneable = 0;
+ encoder->pipe_mask = ~0;
if (INTEL_GEN(dev_priv) >= 11)
intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
@@ -4257,43 +4800,25 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
else
intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
(DDI_BUF_PORT_REVERSAL | DDI_A_4_LANES);
+
intel_dig_port->dp.output_reg = INVALID_MMIO_REG;
intel_dig_port->max_lanes = intel_ddi_max_lanes(intel_dig_port);
intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
- intel_dig_port->tc_legacy_port = intel_port_is_tc(dev_priv, port) &&
- !port_info->supports_typec_usb &&
- !port_info->supports_tbt;
+ if (intel_phy_is_tc(dev_priv, phy)) {
+ bool is_legacy = !port_info->supports_typec_usb &&
+ !port_info->supports_tbt;
- switch (port) {
- case PORT_A:
- intel_dig_port->ddi_io_power_domain =
- POWER_DOMAIN_PORT_DDI_A_IO;
- break;
- case PORT_B:
- intel_dig_port->ddi_io_power_domain =
- POWER_DOMAIN_PORT_DDI_B_IO;
- break;
- case PORT_C:
- intel_dig_port->ddi_io_power_domain =
- POWER_DOMAIN_PORT_DDI_C_IO;
- break;
- case PORT_D:
- intel_dig_port->ddi_io_power_domain =
- POWER_DOMAIN_PORT_DDI_D_IO;
- break;
- case PORT_E:
- intel_dig_port->ddi_io_power_domain =
- POWER_DOMAIN_PORT_DDI_E_IO;
- break;
- case PORT_F:
- intel_dig_port->ddi_io_power_domain =
- POWER_DOMAIN_PORT_DDI_F_IO;
- break;
- default:
- MISSING_CASE(port);
+ intel_tc_port_init(intel_dig_port, is_legacy);
+
+ encoder->update_prepare = intel_ddi_update_prepare;
+ encoder->update_complete = intel_ddi_update_complete;
}
+ WARN_ON(port > PORT_I);
+ intel_dig_port->ddi_io_power_domain = POWER_DOMAIN_PORT_DDI_A_IO +
+ port - PORT_A;
+
if (init_dp) {
if (!intel_ddi_init_dp_connector(intel_dig_port))
goto err;
@@ -4303,7 +4828,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
/* In theory we don't need the encoder->type check, but leave it just in
* case we have some really bad VBTs... */
- if (intel_encoder->type != INTEL_OUTPUT_EDP && init_hdmi) {
+ if (encoder->type != INTEL_OUTPUT_EDP && init_hdmi) {
if (!intel_ddi_init_hdmi_connector(intel_dig_port))
goto err;
}
@@ -4324,12 +4849,9 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_infoframe_init(intel_dig_port);
- if (intel_port_is_tc(dev_priv, port))
- intel_digital_port_connected(intel_encoder);
-
return;
err:
- drm_encoder_cleanup(encoder);
+ drm_encoder_cleanup(&encoder->base);
kfree(intel_dig_port);
}
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h
index a08365da2643..167c6579d972 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi.h
@@ -22,7 +22,7 @@ struct intel_encoder;
void intel_ddi_fdi_post_disable(struct intel_encoder *intel_encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state);
-void hsw_fdi_link_train(struct intel_crtc *crtc,
+void hsw_fdi_link_train(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port);
bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
@@ -30,7 +30,8 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state);
void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state);
void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state);
-void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state);
+void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config);
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 592b92782fab..064dd99bbc49 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -29,9 +29,8 @@
#include <linux/intel-iommu.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
#include <linux/slab.h>
-#include <linux/vgaarb.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
@@ -47,6 +46,7 @@
#include "display/intel_crt.h"
#include "display/intel_ddi.h"
#include "display/intel_dp.h"
+#include "display/intel_dp_mst.h"
#include "display/intel_dsi.h"
#include "display/intel_dvo.h"
#include "display/intel_gmbus.h"
@@ -56,15 +56,18 @@
#include "display/intel_tv.h"
#include "display/intel_vdsc.h"
+#include "gt/intel_rps.h"
+
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_acpi.h"
#include "intel_atomic.h"
#include "intel_atomic_plane.h"
#include "intel_bw.h"
-#include "intel_color.h"
#include "intel_cdclk.h"
-#include "intel_drv.h"
+#include "intel_color.h"
+#include "intel_display_types.h"
+#include "intel_dp_link_training.h"
#include "intel_fbc.h"
#include "intel_fbdev.h"
#include "intel_fifo_underrun.h"
@@ -78,16 +81,28 @@
#include "intel_quirks.h"
#include "intel_sideband.h"
#include "intel_sprite.h"
+#include "intel_tc.h"
+#include "intel_vga.h"
/* Primary plane formats for gen <= 3 */
static const u32 i8xx_primary_formats[] = {
DRM_FORMAT_C8,
- DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+};
+
+/* Primary plane formats for ivb (no fp16 due to hw issue) */
+static const u32 ivb_primary_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
};
-/* Primary plane formats for gen >= 4 */
+/* Primary plane formats for gen >= 4, except ivb */
static const u32 i965_primary_formats[] = {
DRM_FORMAT_C8,
DRM_FORMAT_RGB565,
@@ -95,6 +110,22 @@ static const u32 i965_primary_formats[] = {
DRM_FORMAT_XBGR8888,
DRM_FORMAT_XRGB2101010,
DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_XBGR16161616F,
+};
+
+/* Primary plane formats for vlv/chv */
+static const u32 vlv_primary_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_ABGR2101010,
+ DRM_FORMAT_XBGR16161616F,
};
static const u64 i9xx_format_modifiers[] = {
@@ -115,8 +146,8 @@ static const u64 cursor_format_modifiers[] = {
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
-static void ironlake_pch_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config);
+static void ilk_pch_clock_get(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config);
static int intel_framebuffer_init(struct intel_framebuffer *ifb,
struct drm_i915_gem_object *obj,
@@ -127,23 +158,18 @@ static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_sta
const struct intel_link_m_n *m_n,
const struct intel_link_m_n *m2_n2);
static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
-static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state);
-static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state);
+static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state);
+static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state);
static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
static void vlv_prepare_pll(struct intel_crtc *crtc,
const struct intel_crtc_state *pipe_config);
static void chv_prepare_pll(struct intel_crtc *crtc,
const struct intel_crtc_state *pipe_config);
-static void intel_begin_crtc_commit(struct intel_atomic_state *, struct intel_crtc *);
-static void intel_finish_crtc_commit(struct intel_atomic_state *, struct intel_crtc *);
-static void intel_crtc_init_scalers(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state);
-static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state);
-static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state);
-static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state);
+static void skl_pfit_enable(const struct intel_crtc_state *crtc_state);
+static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
static void intel_modeset_setup_hw_state(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx);
-static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
+static struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc);
struct intel_limit {
struct {
@@ -344,7 +370,7 @@ static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
},
};
-static const struct intel_limit intel_limits_pineview_sdvo = {
+static const struct intel_limit pnv_limits_sdvo = {
.dot = { .min = 20000, .max = 400000},
.vco = { .min = 1700000, .max = 3500000 },
/* Pineview's Ncounter is a ring counter */
@@ -359,7 +385,7 @@ static const struct intel_limit intel_limits_pineview_sdvo = {
.p2_slow = 10, .p2_fast = 5 },
};
-static const struct intel_limit intel_limits_pineview_lvds = {
+static const struct intel_limit pnv_limits_lvds = {
.dot = { .min = 20000, .max = 400000 },
.vco = { .min = 1700000, .max = 3500000 },
.n = { .min = 3, .max = 6 },
@@ -377,7 +403,7 @@ static const struct intel_limit intel_limits_pineview_lvds = {
* We calculate clock using (register_value + 2) for N/M1/M2, so here
* the range value for them is (actual_value - 2).
*/
-static const struct intel_limit intel_limits_ironlake_dac = {
+static const struct intel_limit ilk_limits_dac = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 1760000, .max = 3510000 },
.n = { .min = 1, .max = 5 },
@@ -390,7 +416,7 @@ static const struct intel_limit intel_limits_ironlake_dac = {
.p2_slow = 10, .p2_fast = 5 },
};
-static const struct intel_limit intel_limits_ironlake_single_lvds = {
+static const struct intel_limit ilk_limits_single_lvds = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 1760000, .max = 3510000 },
.n = { .min = 1, .max = 3 },
@@ -403,7 +429,7 @@ static const struct intel_limit intel_limits_ironlake_single_lvds = {
.p2_slow = 14, .p2_fast = 14 },
};
-static const struct intel_limit intel_limits_ironlake_dual_lvds = {
+static const struct intel_limit ilk_limits_dual_lvds = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 1760000, .max = 3510000 },
.n = { .min = 1, .max = 3 },
@@ -417,7 +443,7 @@ static const struct intel_limit intel_limits_ironlake_dual_lvds = {
};
/* LVDS 100mhz refclk limits. */
-static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
+static const struct intel_limit ilk_limits_single_lvds_100m = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 1760000, .max = 3510000 },
.n = { .min = 1, .max = 2 },
@@ -430,7 +456,7 @@ static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
.p2_slow = 14, .p2_fast = 14 },
};
-static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
+static const struct intel_limit ilk_limits_dual_lvds_100m = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 1760000, .max = 3510000 },
.n = { .min = 1, .max = 3 },
@@ -489,7 +515,7 @@ static const struct intel_limit intel_limits_bxt = {
/* WA Display #0827: Gen9:all */
static void
-skl_wa_827(struct drm_i915_private *dev_priv, int pipe, bool enable)
+skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
{
if (enable)
I915_WRITE(CLKGATE_DIS_PSL(pipe),
@@ -515,9 +541,22 @@ icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
}
static bool
-needs_modeset(const struct drm_crtc_state *state)
+needs_modeset(const struct intel_crtc_state *state)
{
- return drm_atomic_crtc_needs_modeset(state);
+ return drm_atomic_crtc_needs_modeset(&state->uapi);
+}
+
+bool
+is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
+{
+ return (crtc_state->master_transcoder != INVALID_TRANSCODER ||
+ crtc_state->sync_mode_slaves_mask);
+}
+
+static bool
+is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
+{
+ return crtc_state->master_transcoder != INVALID_TRANSCODER;
}
/*
@@ -631,7 +670,7 @@ i9xx_select_p2_div(const struct intel_limit *limit,
const struct intel_crtc_state *crtc_state,
int target)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
/*
@@ -667,7 +706,7 @@ i9xx_find_best_dpll(const struct intel_limit *limit,
int target, int refclk, struct dpll *match_clock,
struct dpll *best_clock)
{
- struct drm_device *dev = crtc_state->base.crtc->dev;
+ struct drm_device *dev = crtc_state->uapi.crtc->dev;
struct dpll clock;
int err = target;
@@ -725,7 +764,7 @@ pnv_find_best_dpll(const struct intel_limit *limit,
int target, int refclk, struct dpll *match_clock,
struct dpll *best_clock)
{
- struct drm_device *dev = crtc_state->base.crtc->dev;
+ struct drm_device *dev = crtc_state->uapi.crtc->dev;
struct dpll clock;
int err = target;
@@ -781,7 +820,7 @@ g4x_find_best_dpll(const struct intel_limit *limit,
int target, int refclk, struct dpll *match_clock,
struct dpll *best_clock)
{
- struct drm_device *dev = crtc_state->base.crtc->dev;
+ struct drm_device *dev = crtc_state->uapi.crtc->dev;
struct dpll clock;
int max_n;
bool found = false;
@@ -875,7 +914,7 @@ vlv_find_best_dpll(const struct intel_limit *limit,
int target, int refclk, struct dpll *match_clock,
struct dpll *best_clock)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_device *dev = crtc->base.dev;
struct dpll clock;
unsigned int bestppm = 1000000;
@@ -935,7 +974,7 @@ chv_find_best_dpll(const struct intel_limit *limit,
int target, int refclk, struct dpll *match_clock,
struct dpll *best_clock)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_device *dev = crtc->base.dev;
unsigned int best_error_ppm;
struct dpll clock;
@@ -998,33 +1037,6 @@ bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
NULL, best_clock);
}
-bool intel_crtc_active(struct intel_crtc *crtc)
-{
- /* Be paranoid as we can arrive here with only partial
- * state retrieved from the hardware during setup.
- *
- * We can ditch the adjusted_mode.crtc_clock check as soon
- * as Haswell has gained clock readout/fastboot support.
- *
- * We can ditch the crtc->primary->state->fb check as soon as we can
- * properly reconstruct framebuffers.
- *
- * FIXME: The intel_crtc->active here should be switched to
- * crtc->state->active once we have proper CRTC states wired up
- * for atomic.
- */
- return crtc->active && crtc->base.primary->state->fb &&
- crtc->config->base.adjusted_mode.crtc_clock;
-}
-
-enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
- enum pipe pipe)
-{
- struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
-
- return crtc->config->cpu_transcoder;
-}
-
static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
@@ -1068,7 +1080,7 @@ static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
static void
intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
if (INTEL_GEN(dev_priv) >= 4) {
@@ -1076,9 +1088,8 @@ intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
i915_reg_t reg = PIPECONF(cpu_transcoder);
/* Wait for the Pipe State to go off */
- if (intel_wait_for_register(&dev_priv->uncore,
- reg, I965_PIPECONF_ACTIVE, 0,
- 100))
+ if (intel_de_wait_for_clear(dev_priv, reg,
+ I965_PIPECONF_ACTIVE, 100))
WARN(1, "pipe_off wait timed out\n");
} else {
intel_wait_for_pipe_scanline_stopped(crtc);
@@ -1119,11 +1130,15 @@ static void assert_fdi_tx(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
{
bool cur_state;
- enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
- pipe);
if (HAS_DDI(dev_priv)) {
- /* DDI does not have a specific FDI_TX register */
+ /*
+ * DDI does not have a specific FDI_TX register.
+ *
+ * FDI is never fed from EDP transcoder
+ * so pipe->transcoder cast is fine here.
+ */
+ enum transcoder cpu_transcoder = (enum transcoder)pipe;
u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
} else {
@@ -1240,11 +1255,9 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
}
void assert_pipe(struct drm_i915_private *dev_priv,
- enum pipe pipe, bool state)
+ enum transcoder cpu_transcoder, bool state)
{
bool cur_state;
- enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
- pipe);
enum intel_display_power_domain power_domain;
intel_wakeref_t wakeref;
@@ -1264,8 +1277,9 @@ void assert_pipe(struct drm_i915_private *dev_priv,
}
I915_STATE_WARN(cur_state != state,
- "pipe %c assertion failure (expected %s, current %s)\n",
- pipe_name(pipe), onoff(state), onoff(cur_state));
+ "transcoder %s assertion failure (expected %s, current %s)\n",
+ transcoder_name(cpu_transcoder),
+ onoff(state), onoff(cur_state));
}
static void assert_plane(struct intel_plane *plane, bool state)
@@ -1382,11 +1396,7 @@ static void _vlv_enable_pll(struct intel_crtc *crtc,
POSTING_READ(DPLL(pipe));
udelay(150);
- if (intel_wait_for_register(&dev_priv->uncore,
- DPLL(pipe),
- DPLL_LOCK_VLV,
- DPLL_LOCK_VLV,
- 1))
+ if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
DRM_ERROR("DPLL %d failed to lock\n", pipe);
}
@@ -1396,7 +1406,7 @@ static void vlv_enable_pll(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- assert_pipe_disabled(dev_priv, pipe);
+ assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
/* PLL is protected by panel, make sure we can write it */
assert_panel_unlocked(dev_priv, pipe);
@@ -1435,9 +1445,7 @@ static void _chv_enable_pll(struct intel_crtc *crtc,
I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
/* Check PLL is locked */
- if (intel_wait_for_register(&dev_priv->uncore,
- DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV,
- 1))
+ if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
DRM_ERROR("PLL %d failed to lock\n", pipe);
}
@@ -1447,7 +1455,7 @@ static void chv_enable_pll(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- assert_pipe_disabled(dev_priv, pipe);
+ assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
/* PLL is protected by panel, make sure we can write it */
assert_panel_unlocked(dev_priv, pipe);
@@ -1494,7 +1502,7 @@ static void i9xx_enable_pll(struct intel_crtc *crtc,
u32 dpll = crtc_state->dpll_hw_state.dpll;
int i;
- assert_pipe_disabled(dev_priv, crtc->pipe);
+ assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
/* PLL is protected by panel, make sure we can write it */
if (i9xx_has_pps(dev_priv))
@@ -1534,7 +1542,7 @@ static void i9xx_enable_pll(struct intel_crtc *crtc,
static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
@@ -1543,7 +1551,7 @@ static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
return;
/* Make sure the pipe isn't still relying on us */
- assert_pipe_disabled(dev_priv, pipe);
+ assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
POSTING_READ(DPLL(pipe));
@@ -1554,7 +1562,7 @@ static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
u32 val;
/* Make sure the pipe isn't still relying on us */
- assert_pipe_disabled(dev_priv, pipe);
+ assert_pipe_disabled(dev_priv, (enum transcoder)pipe);
val = DPLL_INTEGRATED_REF_CLK_VLV |
DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
@@ -1571,7 +1579,7 @@ static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
u32 val;
/* Make sure the pipe isn't still relying on us */
- assert_pipe_disabled(dev_priv, pipe);
+ assert_pipe_disabled(dev_priv, (enum transcoder)pipe);
val = DPLL_SSC_REF_CLK_CHV |
DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
@@ -1616,17 +1624,16 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
BUG();
}
- if (intel_wait_for_register(&dev_priv->uncore,
- dpll_reg, port_mask, expected_mask,
- 1000))
- WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
- port_name(dport->base.port),
+ if (intel_de_wait_for_register(dev_priv, dpll_reg,
+ port_mask, expected_mask, 1000))
+ WARN(1, "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
+ dport->base.base.base.id, dport->base.base.name,
I915_READ(dpll_reg) & port_mask, expected_mask);
}
-static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
+static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
i915_reg_t reg;
@@ -1640,11 +1647,16 @@ static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_s
assert_fdi_rx_enabled(dev_priv, pipe);
if (HAS_PCH_CPT(dev_priv)) {
- /* Workaround: Set the timing override bit before enabling the
- * pch transcoder. */
reg = TRANS_CHICKEN2(pipe);
val = I915_READ(reg);
+ /*
+ * Workaround: Set the timing override bit
+ * before enabling the pch transcoder.
+ */
val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
+ /* Configure frame start delay to match the CPU */
+ val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
+ val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
I915_WRITE(reg, val);
}
@@ -1653,6 +1665,10 @@ static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_s
pipeconf_val = I915_READ(PIPECONF(pipe));
if (HAS_PCH_IBX(dev_priv)) {
+ /* Configure frame start delay to match the CPU */
+ val &= ~TRANS_FRAME_START_DELAY_MASK;
+ val |= TRANS_FRAME_START_DELAY(0);
+
/*
* Make the BPC in transcoder be consistent with
* that in pipeconf reg. For HDMI we must use 8bpc
@@ -1677,9 +1693,7 @@ static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_s
}
I915_WRITE(reg, val | TRANS_ENABLE);
- if (intel_wait_for_register(&dev_priv->uncore,
- reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE,
- 100))
+ if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100))
DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
}
@@ -1692,9 +1706,12 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
assert_fdi_rx_enabled(dev_priv, PIPE_A);
- /* Workaround: set timing override bit. */
val = I915_READ(TRANS_CHICKEN2(PIPE_A));
+ /* Workaround: set timing override bit. */
val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
+ /* Configure frame start delay to match the CPU */
+ val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
+ val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
val = TRANS_ENABLE;
@@ -1707,16 +1724,13 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
val |= TRANS_PROGRESSIVE;
I915_WRITE(LPT_TRANSCONF, val);
- if (intel_wait_for_register(&dev_priv->uncore,
- LPT_TRANSCONF,
- TRANS_STATE_ENABLE,
- TRANS_STATE_ENABLE,
- 100))
+ if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF,
+ TRANS_STATE_ENABLE, 100))
DRM_ERROR("Failed to enable PCH transcoder\n");
}
-static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
- enum pipe pipe)
+static void ilk_disable_pch_transcoder(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
{
i915_reg_t reg;
u32 val;
@@ -1733,9 +1747,7 @@ static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
val &= ~TRANS_ENABLE;
I915_WRITE(reg, val);
/* wait for PCH transcoder off, transcoder state */
- if (intel_wait_for_register(&dev_priv->uncore,
- reg, TRANS_STATE_ENABLE, 0,
- 50))
+ if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
if (HAS_PCH_CPT(dev_priv)) {
@@ -1755,9 +1767,8 @@ void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
val &= ~TRANS_ENABLE;
I915_WRITE(LPT_TRANSCONF, val);
/* wait for PCH transcoder off, transcoder state */
- if (intel_wait_for_register(&dev_priv->uncore,
- LPT_TRANSCONF, TRANS_STATE_ENABLE, 0,
- 50))
+ if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
+ TRANS_STATE_ENABLE, 50))
DRM_ERROR("Failed to disable PCH transcoder\n");
/* Workaround: clear timing override bit. */
@@ -1778,7 +1789,7 @@ enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
/*
* On i965gm the hardware frame counter reads
@@ -1798,16 +1809,25 @@ static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state
static void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ assert_vblank_disabled(&crtc->base);
drm_crtc_set_max_vblank_count(&crtc->base,
intel_crtc_max_vblank_count(crtc_state));
drm_crtc_vblank_on(&crtc->base);
}
+void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ drm_crtc_vblank_off(&crtc->base);
+ assert_vblank_disabled(&crtc->base);
+}
+
static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
enum pipe pipe = crtc->pipe;
@@ -1863,9 +1883,9 @@ static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
intel_wait_for_pipe_scanline_moving(crtc);
}
-static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
+void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
enum pipe pipe = crtc->pipe;
@@ -1908,6 +1928,74 @@ static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
return IS_GEN(dev_priv, 2) ? 2048 : 4096;
}
+static bool is_ccs_plane(const struct drm_framebuffer *fb, int plane)
+{
+ if (!is_ccs_modifier(fb->modifier))
+ return false;
+
+ return plane >= fb->format->num_planes / 2;
+}
+
+static bool is_gen12_ccs_modifier(u64 modifier)
+{
+ return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
+ modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS;
+
+}
+
+static bool is_gen12_ccs_plane(const struct drm_framebuffer *fb, int plane)
+{
+ return is_gen12_ccs_modifier(fb->modifier) && is_ccs_plane(fb, plane);
+}
+
+static bool is_aux_plane(const struct drm_framebuffer *fb, int plane)
+{
+ if (is_ccs_modifier(fb->modifier))
+ return is_ccs_plane(fb, plane);
+
+ return plane == 1;
+}
+
+static int main_to_ccs_plane(const struct drm_framebuffer *fb, int main_plane)
+{
+ WARN_ON(!is_ccs_modifier(fb->modifier) ||
+ (main_plane && main_plane >= fb->format->num_planes / 2));
+
+ return fb->format->num_planes / 2 + main_plane;
+}
+
+static int ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane)
+{
+ WARN_ON(!is_ccs_modifier(fb->modifier) ||
+ ccs_plane < fb->format->num_planes / 2);
+
+ return ccs_plane - fb->format->num_planes / 2;
+}
+
+/* Return either the main plane's CCS or - if not a CCS FB - UV plane */
+int intel_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane)
+{
+ if (is_ccs_modifier(fb->modifier))
+ return main_to_ccs_plane(fb, main_plane);
+
+ return 1;
+}
+
+bool
+intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
+ uint64_t modifier)
+{
+ return info->is_yuv &&
+ info->num_planes == (is_ccs_modifier(modifier) ? 4 : 2);
+}
+
+static bool is_semiplanar_uv_plane(const struct drm_framebuffer *fb,
+ int color_plane)
+{
+ return intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) &&
+ color_plane == 1;
+}
+
static unsigned int
intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
{
@@ -1923,16 +2011,21 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
else
return 512;
case I915_FORMAT_MOD_Y_TILED_CCS:
- if (color_plane == 1)
+ if (is_ccs_plane(fb, color_plane))
return 128;
/* fall through */
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
+ if (is_ccs_plane(fb, color_plane))
+ return 64;
+ /* fall through */
case I915_FORMAT_MOD_Y_TILED:
if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
return 128;
else
return 512;
case I915_FORMAT_MOD_Yf_TILED_CCS:
- if (color_plane == 1)
+ if (is_ccs_plane(fb, color_plane))
return 128;
/* fall through */
case I915_FORMAT_MOD_Yf_TILED:
@@ -1959,6 +2052,9 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
static unsigned int
intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
{
+ if (is_gen12_ccs_plane(fb, color_plane))
+ return 1;
+
return intel_tile_size(to_i915(fb->dev)) /
intel_tile_width_bytes(fb, color_plane);
}
@@ -1972,7 +2068,17 @@ static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
unsigned int cpp = fb->format->cpp[color_plane];
*tile_width = tile_width_bytes / cpp;
- *tile_height = intel_tile_size(to_i915(fb->dev)) / tile_width_bytes;
+ *tile_height = intel_tile_height(fb, color_plane);
+}
+
+static unsigned int intel_tile_row_size(const struct drm_framebuffer *fb,
+ int color_plane)
+{
+ unsigned int tile_width, tile_height;
+
+ intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
+
+ return fb->pitches[color_plane] * tile_height;
}
unsigned int
@@ -2049,7 +2155,8 @@ static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
struct drm_i915_private *dev_priv = to_i915(fb->dev);
/* AUX_DIST needs only 4K alignment */
- if (color_plane == 1)
+ if ((INTEL_GEN(dev_priv) < 12 && is_aux_plane(fb, color_plane)) ||
+ is_ccs_plane(fb, color_plane))
return 4096;
switch (fb->modifier) {
@@ -2059,9 +2166,19 @@ static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
if (INTEL_GEN(dev_priv) >= 9)
return 256 * 1024;
return 0;
+ case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
+ if (is_semiplanar_uv_plane(fb, color_plane))
+ return intel_tile_row_size(fb, color_plane);
+ /* Fall-through */
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+ return 16 * 1024;
case I915_FORMAT_MOD_Y_TILED_CCS:
case I915_FORMAT_MOD_Yf_TILED_CCS:
case I915_FORMAT_MOD_Y_TILED:
+ if (INTEL_GEN(dev_priv) >= 12 &&
+ is_semiplanar_uv_plane(fb, color_plane))
+ return intel_tile_row_size(fb, color_plane);
+ /* Fall-through */
case I915_FORMAT_MOD_Yf_TILED:
return 1 * 1024 * 1024;
default:
@@ -2072,7 +2189,7 @@ static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
return INTEL_GEN(dev_priv) < 4 ||
@@ -2094,9 +2211,12 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
unsigned int pinctl;
u32 alignment;
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+ if (WARN_ON(!i915_gem_object_is_framebuffer(obj)))
+ return ERR_PTR(-EINVAL);
alignment = intel_surf_alignment(fb, 0);
+ if (WARN_ON(alignment && !is_power_of_2(alignment)))
+ return ERR_PTR(-EINVAL);
/* Note that the w/a also requires 64 PTE of padding following the
* bo. We currently fill all unused PTE with the shadow page and so
@@ -2114,19 +2234,18 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
* pin/unpin/fence and not more.
*/
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
- i915_gem_object_lock(obj);
atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
- pinctl = 0;
-
- /* Valleyview is definitely limited to scanning out the first
+ /*
+ * Valleyview is definitely limited to scanning out the first
* 512MiB. Lets presume this behaviour was inherited from the
* g4x display engine and that all earlier gen are similarly
* limited. Testing suggests that it is a little more
* complicated than this. For example, Cherryview appears quite
* happy to scanout from anywhere within its global aperture.
*/
+ pinctl = 0;
if (HAS_GMCH(dev_priv))
pinctl |= PIN_MAPPABLE;
@@ -2138,7 +2257,8 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
int ret;
- /* Install a fence for tiled scan-out. Pre-i965 always needs a
+ /*
+ * Install a fence for tiled scan-out. Pre-i965 always needs a
* fence, whereas 965+ only requires a fence if using
* framebuffer compression. For simplicity, we always, when
* possible, install a fence as the cost is not that onerous.
@@ -2168,16 +2288,12 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
i915_vma_get(vma);
err:
atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
-
- i915_gem_object_unlock(obj);
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return vma;
}
void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
{
- lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
-
i915_gem_object_lock(vma->obj);
if (flags & PLANE_HAS_FENCE)
i915_vma_unpin_fence(vma);
@@ -2206,7 +2322,7 @@ u32 intel_fb_xy_to_linear(int x, int y,
const struct intel_plane_state *state,
int color_plane)
{
- const struct drm_framebuffer *fb = state->base.fb;
+ const struct drm_framebuffer *fb = state->hw.fb;
unsigned int cpp = fb->format->cpp[color_plane];
unsigned int pitch = state->color_plane[color_plane].stride;
@@ -2254,9 +2370,10 @@ static u32 intel_adjust_tile_offset(int *x, int *y,
return new_offset;
}
-static bool is_surface_linear(u64 modifier, int color_plane)
+static bool is_surface_linear(const struct drm_framebuffer *fb, int color_plane)
{
- return modifier == DRM_FORMAT_MOD_LINEAR;
+ return fb->modifier == DRM_FORMAT_MOD_LINEAR ||
+ is_gen12_ccs_plane(fb, color_plane);
}
static u32 intel_adjust_aligned_offset(int *x, int *y,
@@ -2271,7 +2388,7 @@ static u32 intel_adjust_aligned_offset(int *x, int *y,
WARN_ON(new_offset > old_offset);
- if (!is_surface_linear(fb->modifier, color_plane)) {
+ if (!is_surface_linear(fb, color_plane)) {
unsigned int tile_size, tile_width, tile_height;
unsigned int pitch_tiles;
@@ -2307,8 +2424,8 @@ static u32 intel_plane_adjust_aligned_offset(int *x, int *y,
int color_plane,
u32 old_offset, u32 new_offset)
{
- return intel_adjust_aligned_offset(x, y, state->base.fb, color_plane,
- state->base.rotation,
+ return intel_adjust_aligned_offset(x, y, state->hw.fb, color_plane,
+ state->hw.rotation,
state->color_plane[color_plane].stride,
old_offset, new_offset);
}
@@ -2338,10 +2455,7 @@ static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
unsigned int cpp = fb->format->cpp[color_plane];
u32 offset, offset_aligned;
- if (alignment)
- alignment--;
-
- if (!is_surface_linear(fb->modifier, color_plane)) {
+ if (!is_surface_linear(fb, color_plane)) {
unsigned int tile_size, tile_width, tile_height;
unsigned int tile_rows, tiles, pitch_tiles;
@@ -2362,17 +2476,24 @@ static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
*x %= tile_width;
offset = (tile_rows * pitch_tiles + tiles) * tile_size;
- offset_aligned = offset & ~alignment;
+
+ offset_aligned = offset;
+ if (alignment)
+ offset_aligned = rounddown(offset_aligned, alignment);
intel_adjust_tile_offset(x, y, tile_width, tile_height,
tile_size, pitch_tiles,
offset, offset_aligned);
} else {
offset = *y * pitch + *x * cpp;
- offset_aligned = offset & ~alignment;
-
- *y = (offset & alignment) / pitch;
- *x = ((offset & alignment) - *y * pitch) / cpp;
+ offset_aligned = offset;
+ if (alignment) {
+ offset_aligned = rounddown(offset_aligned, alignment);
+ *y = (offset % alignment) / pitch;
+ *x = ((offset % alignment) - *y * pitch) / cpp;
+ } else {
+ *y = *x = 0;
+ }
}
return offset_aligned;
@@ -2382,10 +2503,10 @@ static u32 intel_plane_compute_aligned_offset(int *x, int *y,
const struct intel_plane_state *state,
int color_plane)
{
- struct intel_plane *intel_plane = to_intel_plane(state->base.plane);
+ struct intel_plane *intel_plane = to_intel_plane(state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
- const struct drm_framebuffer *fb = state->base.fb;
- unsigned int rotation = state->base.rotation;
+ const struct drm_framebuffer *fb = state->hw.fb;
+ unsigned int rotation = state->hw.rotation;
int pitch = state->color_plane[color_plane].stride;
u32 alignment;
@@ -2405,9 +2526,17 @@ static int intel_fb_offset_to_xy(int *x, int *y,
{
struct drm_i915_private *dev_priv = to_i915(fb->dev);
unsigned int height;
+ u32 alignment;
- if (fb->modifier != DRM_FORMAT_MOD_LINEAR &&
- fb->offsets[color_plane] % intel_tile_size(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 12 &&
+ is_semiplanar_uv_plane(fb, color_plane))
+ alignment = intel_tile_row_size(fb, color_plane);
+ else if (fb->modifier != DRM_FORMAT_MOD_LINEAR)
+ alignment = intel_tile_size(dev_priv);
+ else
+ alignment = 0;
+
+ if (alignment != 0 && fb->offsets[color_plane] % alignment) {
DRM_DEBUG_KMS("Misaligned offset 0x%08x for color plane %d\n",
fb->offsets[color_plane], color_plane);
return -EINVAL;
@@ -2443,6 +2572,8 @@ static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
return I915_TILING_X;
case I915_FORMAT_MOD_Y_TILED:
case I915_FORMAT_MOD_Y_TILED_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
return I915_TILING_Y;
default:
return I915_TILING_NONE;
@@ -2463,7 +2594,7 @@ static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
* us a ratio of one byte in the CCS for each 8x16 pixels in the
* main surface.
*/
-static const struct drm_format_info ccs_formats[] = {
+static const struct drm_format_info skl_ccs_formats[] = {
{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
.cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
{ .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
@@ -2474,6 +2605,52 @@ static const struct drm_format_info ccs_formats[] = {
.cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
};
+/*
+ * Gen-12 compression uses 4 bits of CCS data for each cache line pair in the
+ * main surface. And each 64B CCS cache line represents an area of 4x1 Y-tiles
+ * in the main surface. With 4 byte pixels and each Y-tile having dimensions of
+ * 32x32 pixels, the ratio turns out to 1B in the CCS for every 2x32 pixels in
+ * the main surface.
+ */
+static const struct drm_format_info gen12_ccs_formats[] = {
+ { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
+ .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+ .hsub = 1, .vsub = 1, },
+ { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
+ .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+ .hsub = 1, .vsub = 1, },
+ { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
+ .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+ .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
+ .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+ .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_YUYV, .num_planes = 2,
+ .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+ .hsub = 2, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_YVYU, .num_planes = 2,
+ .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+ .hsub = 2, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_UYVY, .num_planes = 2,
+ .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+ .hsub = 2, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_VYUY, .num_planes = 2,
+ .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+ .hsub = 2, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_NV12, .num_planes = 4,
+ .char_per_block = { 1, 2, 1, 1 }, .block_w = { 1, 1, 4, 4 }, .block_h = { 1, 1, 1, 1 },
+ .hsub = 2, .vsub = 2, .is_yuv = true },
+ { .format = DRM_FORMAT_P010, .num_planes = 4,
+ .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
+ .hsub = 2, .vsub = 2, .is_yuv = true },
+ { .format = DRM_FORMAT_P012, .num_planes = 4,
+ .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
+ .hsub = 2, .vsub = 2, .is_yuv = true },
+ { .format = DRM_FORMAT_P016, .num_planes = 4,
+ .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
+ .hsub = 2, .vsub = 2, .is_yuv = true },
+};
+
static const struct drm_format_info *
lookup_format_info(const struct drm_format_info formats[],
int num_formats, u32 format)
@@ -2494,8 +2671,13 @@ intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
switch (cmd->modifier[0]) {
case I915_FORMAT_MOD_Y_TILED_CCS:
case I915_FORMAT_MOD_Yf_TILED_CCS:
- return lookup_format_info(ccs_formats,
- ARRAY_SIZE(ccs_formats),
+ return lookup_format_info(skl_ccs_formats,
+ ARRAY_SIZE(skl_ccs_formats),
+ cmd->pixel_format);
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
+ return lookup_format_info(gen12_ccs_formats,
+ ARRAY_SIZE(gen12_ccs_formats),
cmd->pixel_format);
default:
return NULL;
@@ -2504,10 +2686,18 @@ intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
bool is_ccs_modifier(u64 modifier)
{
- return modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
+ return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
+ modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS ||
+ modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
}
+static int gen12_ccs_aux_stride(struct drm_framebuffer *fb, int ccs_plane)
+{
+ return DIV_ROUND_UP(fb->pitches[ccs_to_main_plane(fb, ccs_plane)],
+ 512) * 64;
+}
+
u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
u32 pixel_format, u64 modifier)
{
@@ -2519,6 +2709,9 @@ u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
* the highest stride limits of them all.
*/
crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
+ if (!crtc)
+ return 0;
+
plane = to_intel_plane(crtc->base.primary);
return plane->max_stride(plane, pixel_format, modifier,
@@ -2549,8 +2742,9 @@ static u32
intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
{
struct drm_i915_private *dev_priv = to_i915(fb->dev);
+ u32 tile_width;
- if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
+ if (is_surface_linear(fb, color_plane)) {
u32 max_stride = intel_plane_fb_max_stride(dev_priv,
fb->format->format,
fb->modifier);
@@ -2559,20 +2753,41 @@ intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
* To make remapping with linear generally feasible
* we need the stride to be page aligned.
*/
- if (fb->pitches[color_plane] > max_stride)
+ if (fb->pitches[color_plane] > max_stride &&
+ !is_ccs_modifier(fb->modifier))
return intel_tile_size(dev_priv);
else
return 64;
- } else {
- return intel_tile_width_bytes(fb, color_plane);
}
+
+ tile_width = intel_tile_width_bytes(fb, color_plane);
+ if (is_ccs_modifier(fb->modifier)) {
+ /*
+ * Display WA #0531: skl,bxt,kbl,glk
+ *
+ * Render decompression and plane width > 3840
+ * combined with horizontal panning requires the
+ * plane stride to be a multiple of 4. We'll just
+ * require the entire fb to accommodate that to avoid
+ * potential runtime errors at plane configuration time.
+ */
+ if (IS_GEN(dev_priv, 9) && color_plane == 0 && fb->width > 3840)
+ tile_width *= 4;
+ /*
+ * The main surface pitch must be padded to a multiple of four
+ * tile widths.
+ */
+ else if (INTEL_GEN(dev_priv) >= 12)
+ tile_width *= 4;
+ }
+ return tile_width;
}
bool intel_plane_can_remap(const struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
int i;
/* We don't want to deal with remapping with cursors */
@@ -2610,16 +2825,16 @@ bool intel_plane_can_remap(const struct intel_plane_state *plane_state)
static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
- const struct drm_framebuffer *fb = plane_state->base.fb;
- unsigned int rotation = plane_state->base.rotation;
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
u32 stride, max_stride;
/*
* No remapping for invisible planes since we don't have
* an actual source viewport to remap.
*/
- if (!plane_state->base.visible)
+ if (!plane_state->uapi.visible)
return false;
if (!intel_plane_can_remap(plane_state))
@@ -2636,12 +2851,171 @@ static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state)
return stride > max_stride;
}
+static void
+intel_fb_plane_get_subsampling(int *hsub, int *vsub,
+ const struct drm_framebuffer *fb,
+ int color_plane)
+{
+ int main_plane;
+
+ if (color_plane == 0) {
+ *hsub = 1;
+ *vsub = 1;
+
+ return;
+ }
+
+ /*
+ * TODO: Deduct the subsampling from the char block for all CCS
+ * formats and planes.
+ */
+ if (!is_gen12_ccs_plane(fb, color_plane)) {
+ *hsub = fb->format->hsub;
+ *vsub = fb->format->vsub;
+
+ return;
+ }
+
+ main_plane = ccs_to_main_plane(fb, color_plane);
+ *hsub = drm_format_info_block_width(fb->format, color_plane) /
+ drm_format_info_block_width(fb->format, main_plane);
+
+ /*
+ * The min stride check in the core framebuffer_check() function
+ * assumes that format->hsub applies to every plane except for the
+ * first plane. That's incorrect for the CCS AUX plane of the first
+ * plane, but for the above check to pass we must define the block
+ * width with that subsampling applied to it. Adjust the width here
+ * accordingly, so we can calculate the actual subsampling factor.
+ */
+ if (main_plane == 0)
+ *hsub *= fb->format->hsub;
+
+ *vsub = 32;
+}
+static int
+intel_fb_check_ccs_xy(struct drm_framebuffer *fb, int ccs_plane, int x, int y)
+{
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ int main_plane;
+ int hsub, vsub;
+ int tile_width, tile_height;
+ int ccs_x, ccs_y;
+ int main_x, main_y;
+
+ if (!is_ccs_plane(fb, ccs_plane))
+ return 0;
+
+ intel_tile_dims(fb, ccs_plane, &tile_width, &tile_height);
+ intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
+
+ tile_width *= hsub;
+ tile_height *= vsub;
+
+ ccs_x = (x * hsub) % tile_width;
+ ccs_y = (y * vsub) % tile_height;
+
+ main_plane = ccs_to_main_plane(fb, ccs_plane);
+ main_x = intel_fb->normal[main_plane].x % tile_width;
+ main_y = intel_fb->normal[main_plane].y % tile_height;
+
+ /*
+ * CCS doesn't have its own x/y offset register, so the intra CCS tile
+ * x/y offsets must match between CCS and the main surface.
+ */
+ if (main_x != ccs_x || main_y != ccs_y) {
+ DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
+ main_x, main_y,
+ ccs_x, ccs_y,
+ intel_fb->normal[main_plane].x,
+ intel_fb->normal[main_plane].y,
+ x, y);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void
+intel_fb_plane_dims(int *w, int *h, struct drm_framebuffer *fb, int color_plane)
+{
+ int main_plane = is_ccs_plane(fb, color_plane) ?
+ ccs_to_main_plane(fb, color_plane) : 0;
+ int main_hsub, main_vsub;
+ int hsub, vsub;
+
+ intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb, main_plane);
+ intel_fb_plane_get_subsampling(&hsub, &vsub, fb, color_plane);
+ *w = fb->width / main_hsub / hsub;
+ *h = fb->height / main_vsub / vsub;
+}
+
+/*
+ * Setup the rotated view for an FB plane and return the size the GTT mapping
+ * requires for this view.
+ */
+static u32
+setup_fb_rotation(int plane, const struct intel_remapped_plane_info *plane_info,
+ u32 gtt_offset_rotated, int x, int y,
+ unsigned int width, unsigned int height,
+ unsigned int tile_size,
+ unsigned int tile_width, unsigned int tile_height,
+ struct drm_framebuffer *fb)
+{
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct intel_rotation_info *rot_info = &intel_fb->rot_info;
+ unsigned int pitch_tiles;
+ struct drm_rect r;
+
+ /* Y or Yf modifiers required for 90/270 rotation */
+ if (fb->modifier != I915_FORMAT_MOD_Y_TILED &&
+ fb->modifier != I915_FORMAT_MOD_Yf_TILED)
+ return 0;
+
+ if (WARN_ON(plane >= ARRAY_SIZE(rot_info->plane)))
+ return 0;
+
+ rot_info->plane[plane] = *plane_info;
+
+ intel_fb->rotated[plane].pitch = plane_info->height * tile_height;
+
+ /* rotate the x/y offsets to match the GTT view */
+ drm_rect_init(&r, x, y, width, height);
+ drm_rect_rotate(&r,
+ plane_info->width * tile_width,
+ plane_info->height * tile_height,
+ DRM_MODE_ROTATE_270);
+ x = r.x1;
+ y = r.y1;
+
+ /* rotate the tile dimensions to match the GTT view */
+ pitch_tiles = intel_fb->rotated[plane].pitch / tile_height;
+ swap(tile_width, tile_height);
+
+ /*
+ * We only keep the x/y offsets, so push all of the
+ * gtt offset into the x/y offsets.
+ */
+ intel_adjust_tile_offset(&x, &y,
+ tile_width, tile_height,
+ tile_size, pitch_tiles,
+ gtt_offset_rotated * tile_size, 0);
+
+ /*
+ * First pixel of the framebuffer from
+ * the start of the rotated gtt mapping.
+ */
+ intel_fb->rotated[plane].x = x;
+ intel_fb->rotated[plane].y = y;
+
+ return plane_info->width * plane_info->height;
+}
+
static int
intel_fill_fb_info(struct drm_i915_private *dev_priv,
struct drm_framebuffer *fb)
{
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct intel_rotation_info *rot_info = &intel_fb->rot_info;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
u32 gtt_offset_rotated = 0;
unsigned int max_size = 0;
@@ -2656,8 +3030,7 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
int ret;
cpp = fb->format->cpp[i];
- width = drm_framebuffer_plane_width(fb->width, fb, i);
- height = drm_framebuffer_plane_height(fb->height, fb, i);
+ intel_fb_plane_dims(&width, &height, fb, i);
ret = intel_fb_offset_to_xy(&x, &y, fb, i);
if (ret) {
@@ -2666,36 +3039,9 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
return ret;
}
- if (is_ccs_modifier(fb->modifier) && i == 1) {
- int hsub = fb->format->hsub;
- int vsub = fb->format->vsub;
- int tile_width, tile_height;
- int main_x, main_y;
- int ccs_x, ccs_y;
-
- intel_tile_dims(fb, i, &tile_width, &tile_height);
- tile_width *= hsub;
- tile_height *= vsub;
-
- ccs_x = (x * hsub) % tile_width;
- ccs_y = (y * vsub) % tile_height;
- main_x = intel_fb->normal[0].x % tile_width;
- main_y = intel_fb->normal[0].y % tile_height;
-
- /*
- * CCS doesn't have its own x/y offset register, so the intra CCS tile
- * x/y offsets must match between CCS and the main surface.
- */
- if (main_x != ccs_x || main_y != ccs_y) {
- DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
- main_x, main_y,
- ccs_x, ccs_y,
- intel_fb->normal[0].x,
- intel_fb->normal[0].y,
- x, y);
- return -EINVAL;
- }
- }
+ ret = intel_fb_check_ccs_xy(fb, i, x, y);
+ if (ret)
+ return ret;
/*
* The fence (if used) is aligned to the start of the object
@@ -2726,23 +3072,21 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
tile_size);
offset /= tile_size;
- if (!is_surface_linear(fb->modifier, i)) {
+ if (!is_surface_linear(fb, i)) {
+ struct intel_remapped_plane_info plane_info;
unsigned int tile_width, tile_height;
- unsigned int pitch_tiles;
- struct drm_rect r;
intel_tile_dims(fb, i, &tile_width, &tile_height);
- rot_info->plane[i].offset = offset;
- rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp);
- rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
- rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
-
- intel_fb->rotated[i].pitch =
- rot_info->plane[i].height * tile_height;
+ plane_info.offset = offset;
+ plane_info.stride = DIV_ROUND_UP(fb->pitches[i],
+ tile_width * cpp);
+ plane_info.width = DIV_ROUND_UP(x + width, tile_width);
+ plane_info.height = DIV_ROUND_UP(y + height,
+ tile_height);
/* how many tiles does this plane need */
- size = rot_info->plane[i].stride * rot_info->plane[i].height;
+ size = plane_info.stride * plane_info.height;
/*
* If the plane isn't horizontally tile aligned,
* we need one more tile.
@@ -2750,39 +3094,13 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
if (x != 0)
size++;
- /* rotate the x/y offsets to match the GTT view */
- r.x1 = x;
- r.y1 = y;
- r.x2 = x + width;
- r.y2 = y + height;
- drm_rect_rotate(&r,
- rot_info->plane[i].width * tile_width,
- rot_info->plane[i].height * tile_height,
- DRM_MODE_ROTATE_270);
- x = r.x1;
- y = r.y1;
-
- /* rotate the tile dimensions to match the GTT view */
- pitch_tiles = intel_fb->rotated[i].pitch / tile_height;
- swap(tile_width, tile_height);
-
- /*
- * We only keep the x/y offsets, so push all of the
- * gtt offset into the x/y offsets.
- */
- intel_adjust_tile_offset(&x, &y,
- tile_width, tile_height,
- tile_size, pitch_tiles,
- gtt_offset_rotated * tile_size, 0);
-
- gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
-
- /*
- * First pixel of the framebuffer from
- * the start of the rotated gtt mapping.
- */
- intel_fb->rotated[i].x = x;
- intel_fb->rotated[i].y = y;
+ gtt_offset_rotated +=
+ setup_fb_rotation(i, &plane_info,
+ gtt_offset_rotated,
+ x, y, width, height,
+ tile_size,
+ tile_width, tile_height,
+ fb);
} else {
size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
x * cpp, tile_size);
@@ -2805,11 +3123,11 @@ static void
intel_plane_remap_gtt(struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv =
- to_i915(plane_state->base.plane->dev);
- struct drm_framebuffer *fb = plane_state->base.fb;
+ to_i915(plane_state->uapi.plane->dev);
+ struct drm_framebuffer *fb = plane_state->hw.fb;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct intel_rotation_info *info = &plane_state->view.rotated;
- unsigned int rotation = plane_state->base.rotation;
+ unsigned int rotation = plane_state->hw.rotation;
int i, num_planes = fb->format->num_planes;
unsigned int tile_size = intel_tile_size(dev_priv);
unsigned int src_x, src_y;
@@ -2820,20 +3138,20 @@ intel_plane_remap_gtt(struct intel_plane_state *plane_state)
plane_state->view.type = drm_rotation_90_or_270(rotation) ?
I915_GGTT_VIEW_ROTATED : I915_GGTT_VIEW_REMAPPED;
- src_x = plane_state->base.src.x1 >> 16;
- src_y = plane_state->base.src.y1 >> 16;
- src_w = drm_rect_width(&plane_state->base.src) >> 16;
- src_h = drm_rect_height(&plane_state->base.src) >> 16;
+ src_x = plane_state->uapi.src.x1 >> 16;
+ src_y = plane_state->uapi.src.y1 >> 16;
+ src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
WARN_ON(is_ccs_modifier(fb->modifier));
/* Make src coordinates relative to the viewport */
- drm_rect_translate(&plane_state->base.src,
+ drm_rect_translate(&plane_state->uapi.src,
-(src_x << 16), -(src_y << 16));
/* Rotate src coordinates to match rotated GTT view */
if (drm_rotation_90_or_270(rotation))
- drm_rect_rotate(&plane_state->base.src,
+ drm_rect_rotate(&plane_state->uapi.src,
src_w << 16, src_h << 16,
DRM_MODE_ROTATE_270);
@@ -2866,6 +3184,7 @@ intel_plane_remap_gtt(struct intel_plane_state *plane_state)
DRM_MODE_ROTATE_0, tile_size);
offset /= tile_size;
+ WARN_ON(i >= ARRAY_SIZE(info->plane));
info->plane[i].offset = offset;
info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i],
tile_width * cpp);
@@ -2876,10 +3195,7 @@ intel_plane_remap_gtt(struct intel_plane_state *plane_state)
struct drm_rect r;
/* rotate the x/y offsets to match the GTT view */
- r.x1 = x;
- r.y1 = y;
- r.x2 = x + width;
- r.y2 = y + height;
+ drm_rect_init(&r, x, y, width, height);
drm_rect_rotate(&r,
info->plane[i].width * tile_width,
info->plane[i].height * tile_height,
@@ -2918,8 +3234,8 @@ static int
intel_plane_compute_gtt(struct intel_plane_state *plane_state)
{
const struct intel_framebuffer *fb =
- to_intel_framebuffer(plane_state->base.fb);
- unsigned int rotation = plane_state->base.rotation;
+ to_intel_framebuffer(plane_state->hw.fb);
+ unsigned int rotation = plane_state->hw.rotation;
int i, num_planes;
if (!fb)
@@ -2956,7 +3272,7 @@ intel_plane_compute_gtt(struct intel_plane_state *plane_state)
/* Rotate src coordinates to match rotated GTT view */
if (drm_rotation_90_or_270(rotation))
- drm_rect_rotate(&plane_state->base.src,
+ drm_rect_rotate(&plane_state->uapi.src,
fb->base.width << 16, fb->base.height << 16,
DRM_MODE_ROTATE_270);
@@ -2968,6 +3284,8 @@ static int i9xx_format_to_fourcc(int format)
switch (format) {
case DISPPLANE_8BPP:
return DRM_FORMAT_C8;
+ case DISPPLANE_BGRA555:
+ return DRM_FORMAT_ARGB1555;
case DISPPLANE_BGRX555:
return DRM_FORMAT_XRGB1555;
case DISPPLANE_BGRX565:
@@ -2977,10 +3295,20 @@ static int i9xx_format_to_fourcc(int format)
return DRM_FORMAT_XRGB8888;
case DISPPLANE_RGBX888:
return DRM_FORMAT_XBGR8888;
+ case DISPPLANE_BGRA888:
+ return DRM_FORMAT_ARGB8888;
+ case DISPPLANE_RGBA888:
+ return DRM_FORMAT_ABGR8888;
case DISPPLANE_BGRX101010:
return DRM_FORMAT_XRGB2101010;
case DISPPLANE_RGBX101010:
return DRM_FORMAT_XBGR2101010;
+ case DISPPLANE_BGRA101010:
+ return DRM_FORMAT_ARGB2101010;
+ case DISPPLANE_RGBA101010:
+ return DRM_FORMAT_ABGR2101010;
+ case DISPPLANE_RGBX161616:
+ return DRM_FORMAT_XBGR16161616F;
}
}
@@ -3023,10 +3351,17 @@ int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
return DRM_FORMAT_XRGB8888;
}
case PLANE_CTL_FORMAT_XRGB_2101010:
- if (rgb_order)
- return DRM_FORMAT_XBGR2101010;
- else
- return DRM_FORMAT_XRGB2101010;
+ if (rgb_order) {
+ if (alpha)
+ return DRM_FORMAT_ABGR2101010;
+ else
+ return DRM_FORMAT_XBGR2101010;
+ } else {
+ if (alpha)
+ return DRM_FORMAT_ARGB2101010;
+ else
+ return DRM_FORMAT_XRGB2101010;
+ }
case PLANE_CTL_FORMAT_XRGB_16161616F:
if (rgb_order) {
if (alpha)
@@ -3048,12 +3383,13 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_i915_gem_object *obj = NULL;
struct drm_mode_fb_cmd2 mode_cmd = { 0 };
struct drm_framebuffer *fb = &plane_config->fb->base;
u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
u32 size_aligned = round_up(plane_config->base + plane_config->size,
PAGE_SIZE);
+ struct drm_i915_gem_object *obj;
+ bool ret = false;
size_aligned -= base_aligned;
@@ -3077,13 +3413,11 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
return false;
}
- mutex_lock(&dev->struct_mutex);
obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
base_aligned,
base_aligned,
size_aligned);
- mutex_unlock(&dev->struct_mutex);
- if (!obj)
+ if (IS_ERR(obj))
return false;
switch (plane_config->tiling) {
@@ -3095,7 +3429,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
break;
default:
MISSING_CASE(plane_config->tiling);
- return false;
+ goto out;
}
mode_cmd.pixel_format = fb->format->format;
@@ -3107,16 +3441,15 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) {
DRM_DEBUG_KMS("intel fb init failed\n");
- goto out_unref_obj;
+ goto out;
}
DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
- return true;
-
-out_unref_obj:
+ ret = true;
+out:
i915_gem_object_put(obj);
- return false;
+ return ret;
}
static void
@@ -3124,19 +3457,19 @@ intel_set_plane_visible(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state,
bool visible)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- plane_state->base.visible = visible;
+ plane_state->uapi.visible = visible;
if (visible)
- crtc_state->base.plane_mask |= drm_plane_mask(&plane->base);
+ crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base);
else
- crtc_state->base.plane_mask &= ~drm_plane_mask(&plane->base);
+ crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
}
static void fixup_active_planes(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
struct drm_plane *plane;
/*
@@ -3147,13 +3480,14 @@ static void fixup_active_planes(struct intel_crtc_state *crtc_state)
crtc_state->active_planes = 0;
drm_for_each_plane_mask(plane, &dev_priv->drm,
- crtc_state->base.plane_mask)
+ crtc_state->uapi.plane_mask)
crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
}
static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
struct intel_plane *plane)
{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_plane_state *plane_state =
@@ -3166,13 +3500,40 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
intel_set_plane_visible(crtc_state, plane_state, false);
fixup_active_planes(crtc_state);
crtc_state->data_rate[plane->id] = 0;
+ crtc_state->min_cdclk[plane->id] = 0;
if (plane->id == PLANE_PRIMARY)
- intel_pre_disable_primary_noatomic(&crtc->base);
+ hsw_disable_ips(crtc_state);
+
+ /*
+ * Vblank time updates from the shadow to live plane control register
+ * are blocked if the memory self-refresh mode is active at that
+ * moment. So to make sure the plane gets truly disabled, disable
+ * first the self-refresh mode. The self-refresh enable bit in turn
+ * will be checked/applied by the HW only at the next frame start
+ * event which is after the vblank start event, so we need to have a
+ * wait-for-vblank between disabling the plane and the pipe.
+ */
+ if (HAS_GMCH(dev_priv) &&
+ intel_set_memory_cxsr(dev_priv, false))
+ intel_wait_for_vblank(dev_priv, crtc->pipe);
+
+ /*
+ * Gen2 reports pipe underruns whenever all planes are disabled.
+ * So disable underrun reporting before all the planes get disabled.
+ */
+ if (IS_GEN(dev_priv, 2) && !crtc_state->active_planes)
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
intel_disable_plane(plane, crtc_state);
}
+static struct intel_frontbuffer *
+to_intel_frontbuffer(struct drm_framebuffer *fb)
+{
+ return fb ? to_intel_framebuffer(fb)->frontbuffer : NULL;
+}
+
static void
intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
struct intel_initial_plane_config *plane_config)
@@ -3180,7 +3541,6 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *c;
- struct drm_i915_gem_object *obj;
struct drm_plane *primary = intel_crtc->base.primary;
struct drm_plane_state *plane_state = primary->state;
struct intel_plane *intel_plane = to_intel_plane(primary);
@@ -3216,7 +3576,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
continue;
if (intel_plane_ggtt_offset(state) == plane_config->base) {
- fb = state->base.fb;
+ fb = state->hw.fb;
drm_framebuffer_get(fb);
goto valid_fb;
}
@@ -3234,19 +3594,17 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
return;
valid_fb:
- intel_state->base.rotation = plane_config->rotation;
+ intel_state->hw.rotation = plane_config->rotation;
intel_fill_fb_ggtt_view(&intel_state->view, fb,
- intel_state->base.rotation);
+ intel_state->hw.rotation);
intel_state->color_plane[0].stride =
- intel_fb_pitch(fb, 0, intel_state->base.rotation);
+ intel_fb_pitch(fb, 0, intel_state->hw.rotation);
- mutex_lock(&dev->struct_mutex);
intel_state->vma =
intel_pin_and_fence_fb_obj(fb,
&intel_state->view,
intel_plane_uses_fence(intel_state),
&intel_state->flags);
- mutex_unlock(&dev->struct_mutex);
if (IS_ERR(intel_state->vma)) {
DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
intel_crtc->pipe, PTR_ERR(intel_state->vma));
@@ -3256,8 +3614,7 @@ valid_fb:
return;
}
- obj = intel_fb_obj(fb);
- intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
+ intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
plane_state->src_x = 0;
plane_state->src_y = 0;
@@ -3269,17 +3626,18 @@ valid_fb:
plane_state->crtc_w = fb->width;
plane_state->crtc_h = fb->height;
- intel_state->base.src = drm_plane_state_src(plane_state);
- intel_state->base.dst = drm_plane_state_dest(plane_state);
+ intel_state->uapi.src = drm_plane_state_src(plane_state);
+ intel_state->uapi.dst = drm_plane_state_dest(plane_state);
- if (i915_gem_object_is_tiled(obj))
+ if (plane_config->tiling)
dev_priv->preserve_bios_swizzle = true;
plane_state->fb = fb;
plane_state->crtc = &intel_crtc->base;
+ intel_plane_copy_uapi_to_hw_state(intel_state, intel_state);
atomic_or(to_intel_plane(primary)->frontbuffer_bit,
- &obj->frontbuffer_bits);
+ &to_intel_frontbuffer(fb)->bits);
}
static int skl_max_plane_width(const struct drm_framebuffer *fb,
@@ -3291,9 +3649,23 @@ static int skl_max_plane_width(const struct drm_framebuffer *fb,
switch (fb->modifier) {
case DRM_FORMAT_MOD_LINEAR:
case I915_FORMAT_MOD_X_TILED:
- return 4096;
+ /*
+ * Validated limit is 4k, but has 5k should
+ * work apart from the following features:
+ * - Ytile (already limited to 4k)
+ * - FP16 (already limited to 4k)
+ * - render compression (already limited to 4k)
+ * - KVMR sprite and cursor (don't care)
+ * - horizontal panning (TODO verify this)
+ * - pipe and plane scaling (TODO verify this)
+ */
+ if (cpp == 8)
+ return 4096;
+ else
+ return 5120;
case I915_FORMAT_MOD_Y_TILED_CCS:
case I915_FORMAT_MOD_Yf_TILED_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
/* FIXME AUX plane? */
case I915_FORMAT_MOD_Y_TILED:
case I915_FORMAT_MOD_Yf_TILED:
@@ -3342,17 +3714,30 @@ static int icl_max_plane_width(const struct drm_framebuffer *fb,
return 5120;
}
-static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
- int main_x, int main_y, u32 main_offset)
+static int skl_max_plane_height(void)
+{
+ return 4096;
+}
+
+static int icl_max_plane_height(void)
{
- const struct drm_framebuffer *fb = plane_state->base.fb;
- int hsub = fb->format->hsub;
- int vsub = fb->format->vsub;
- int aux_x = plane_state->color_plane[1].x;
- int aux_y = plane_state->color_plane[1].y;
- u32 aux_offset = plane_state->color_plane[1].offset;
- u32 alignment = intel_surf_alignment(fb, 1);
+ return 4320;
+}
+static bool
+skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
+ int main_x, int main_y, u32 main_offset,
+ int ccs_plane)
+{
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ int aux_x = plane_state->color_plane[ccs_plane].x;
+ int aux_y = plane_state->color_plane[ccs_plane].y;
+ u32 aux_offset = plane_state->color_plane[ccs_plane].offset;
+ u32 alignment = intel_surf_alignment(fb, ccs_plane);
+ int hsub;
+ int vsub;
+
+ intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
while (aux_offset >= main_offset && aux_y <= main_y) {
int x, y;
@@ -3364,8 +3749,12 @@ static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state
x = aux_x / hsub;
y = aux_y / vsub;
- aux_offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 1,
- aux_offset, aux_offset - alignment);
+ aux_offset = intel_plane_adjust_aligned_offset(&x, &y,
+ plane_state,
+ ccs_plane,
+ aux_offset,
+ aux_offset -
+ alignment);
aux_x = x * hsub + aux_x % hsub;
aux_y = y * vsub + aux_y % vsub;
}
@@ -3373,25 +3762,28 @@ static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state
if (aux_x != main_x || aux_y != main_y)
return false;
- plane_state->color_plane[1].offset = aux_offset;
- plane_state->color_plane[1].x = aux_x;
- plane_state->color_plane[1].y = aux_y;
+ plane_state->color_plane[ccs_plane].offset = aux_offset;
+ plane_state->color_plane[ccs_plane].x = aux_x;
+ plane_state->color_plane[ccs_plane].y = aux_y;
return true;
}
static int skl_check_main_surface(struct intel_plane_state *plane_state)
{
- struct drm_i915_private *dev_priv = to_i915(plane_state->base.plane->dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
- unsigned int rotation = plane_state->base.rotation;
- int x = plane_state->base.src.x1 >> 16;
- int y = plane_state->base.src.y1 >> 16;
- int w = drm_rect_width(&plane_state->base.src) >> 16;
- int h = drm_rect_height(&plane_state->base.src) >> 16;
+ struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
+ int x = plane_state->uapi.src.x1 >> 16;
+ int y = plane_state->uapi.src.y1 >> 16;
+ int w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ int h = drm_rect_height(&plane_state->uapi.src) >> 16;
int max_width;
- int max_height = 4096;
- u32 alignment, offset, aux_offset = plane_state->color_plane[1].offset;
+ int max_height;
+ u32 alignment;
+ u32 offset;
+ int aux_plane = intel_main_to_aux_plane(fb, 0);
+ u32 aux_offset = plane_state->color_plane[aux_plane].offset;
if (INTEL_GEN(dev_priv) >= 11)
max_width = icl_max_plane_width(fb, 0, rotation);
@@ -3400,6 +3792,11 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
else
max_width = skl_max_plane_width(fb, 0, rotation);
+ if (INTEL_GEN(dev_priv) >= 11)
+ max_height = icl_max_plane_height();
+ else
+ max_height = skl_max_plane_height();
+
if (w > max_width || h > max_height) {
DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
w, h, max_width, max_height);
@@ -3409,6 +3806,8 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
intel_add_fb_offsets(&x, &y, plane_state, 0);
offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0);
alignment = intel_surf_alignment(fb, 0);
+ if (WARN_ON(alignment && !is_power_of_2(alignment)))
+ return -EINVAL;
/*
* AUX surface offset is specified as the distance from the
@@ -3444,7 +3843,8 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
* they match with the main surface x/y offsets.
*/
if (is_ccs_modifier(fb->modifier)) {
- while (!skl_check_main_ccs_coordinates(plane_state, x, y, offset)) {
+ while (!skl_check_main_ccs_coordinates(plane_state, x, y,
+ offset, aux_plane)) {
if (offset == 0)
break;
@@ -3452,7 +3852,8 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
offset, offset - alignment);
}
- if (x != plane_state->color_plane[1].x || y != plane_state->color_plane[1].y) {
+ if (x != plane_state->color_plane[aux_plane].x ||
+ y != plane_state->color_plane[aux_plane].y) {
DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n");
return -EINVAL;
}
@@ -3466,27 +3867,28 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
* Put the final coordinates back so that the src
* coordinate checks will see the right values.
*/
- drm_rect_translate(&plane_state->base.src,
- (x << 16) - plane_state->base.src.x1,
- (y << 16) - plane_state->base.src.y1);
+ drm_rect_translate_to(&plane_state->uapi.src,
+ x << 16, y << 16);
return 0;
}
static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
{
- const struct drm_framebuffer *fb = plane_state->base.fb;
- unsigned int rotation = plane_state->base.rotation;
- int max_width = skl_max_plane_width(fb, 1, rotation);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
+ int uv_plane = 1;
+ int max_width = skl_max_plane_width(fb, uv_plane, rotation);
int max_height = 4096;
- int x = plane_state->base.src.x1 >> 17;
- int y = plane_state->base.src.y1 >> 17;
- int w = drm_rect_width(&plane_state->base.src) >> 17;
- int h = drm_rect_height(&plane_state->base.src) >> 17;
+ int x = plane_state->uapi.src.x1 >> 17;
+ int y = plane_state->uapi.src.y1 >> 17;
+ int w = drm_rect_width(&plane_state->uapi.src) >> 17;
+ int h = drm_rect_height(&plane_state->uapi.src) >> 17;
u32 offset;
- intel_add_fb_offsets(&x, &y, plane_state, 1);
- offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
+ intel_add_fb_offsets(&x, &y, plane_state, uv_plane);
+ offset = intel_plane_compute_aligned_offset(&x, &y,
+ plane_state, uv_plane);
/* FIXME not quite sure how/if these apply to the chroma plane */
if (w > max_width || h > max_height) {
@@ -3495,62 +3897,126 @@ static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
return -EINVAL;
}
- plane_state->color_plane[1].offset = offset;
- plane_state->color_plane[1].x = x;
- plane_state->color_plane[1].y = y;
+ if (is_ccs_modifier(fb->modifier)) {
+ int ccs_plane = main_to_ccs_plane(fb, uv_plane);
+ int aux_offset = plane_state->color_plane[ccs_plane].offset;
+ int alignment = intel_surf_alignment(fb, uv_plane);
+
+ if (offset > aux_offset)
+ offset = intel_plane_adjust_aligned_offset(&x, &y,
+ plane_state,
+ uv_plane,
+ offset,
+ aux_offset & ~(alignment - 1));
+
+ while (!skl_check_main_ccs_coordinates(plane_state, x, y,
+ offset, ccs_plane)) {
+ if (offset == 0)
+ break;
+
+ offset = intel_plane_adjust_aligned_offset(&x, &y,
+ plane_state,
+ uv_plane,
+ offset, offset - alignment);
+ }
+
+ if (x != plane_state->color_plane[ccs_plane].x ||
+ y != plane_state->color_plane[ccs_plane].y) {
+ DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n");
+ return -EINVAL;
+ }
+ }
+
+ plane_state->color_plane[uv_plane].offset = offset;
+ plane_state->color_plane[uv_plane].x = x;
+ plane_state->color_plane[uv_plane].y = y;
return 0;
}
static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
{
- const struct drm_framebuffer *fb = plane_state->base.fb;
- int src_x = plane_state->base.src.x1 >> 16;
- int src_y = plane_state->base.src.y1 >> 16;
- int hsub = fb->format->hsub;
- int vsub = fb->format->vsub;
- int x = src_x / hsub;
- int y = src_y / vsub;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ int src_x = plane_state->uapi.src.x1 >> 16;
+ int src_y = plane_state->uapi.src.y1 >> 16;
u32 offset;
+ int ccs_plane;
+
+ for (ccs_plane = 0; ccs_plane < fb->format->num_planes; ccs_plane++) {
+ int main_hsub, main_vsub;
+ int hsub, vsub;
+ int x, y;
+
+ if (!is_ccs_plane(fb, ccs_plane))
+ continue;
+
+ intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb,
+ ccs_to_main_plane(fb, ccs_plane));
+ intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
- intel_add_fb_offsets(&x, &y, plane_state, 1);
- offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
+ hsub *= main_hsub;
+ vsub *= main_vsub;
+ x = src_x / hsub;
+ y = src_y / vsub;
+
+ intel_add_fb_offsets(&x, &y, plane_state, ccs_plane);
+
+ offset = intel_plane_compute_aligned_offset(&x, &y,
+ plane_state,
+ ccs_plane);
- plane_state->color_plane[1].offset = offset;
- plane_state->color_plane[1].x = x * hsub + src_x % hsub;
- plane_state->color_plane[1].y = y * vsub + src_y % vsub;
+ plane_state->color_plane[ccs_plane].offset = offset;
+ plane_state->color_plane[ccs_plane].x = (x * hsub +
+ src_x % hsub) /
+ main_hsub;
+ plane_state->color_plane[ccs_plane].y = (y * vsub +
+ src_y % vsub) /
+ main_vsub;
+ }
return 0;
}
int skl_check_plane_surface(struct intel_plane_state *plane_state)
{
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
int ret;
+ bool needs_aux = false;
ret = intel_plane_compute_gtt(plane_state);
if (ret)
return ret;
- if (!plane_state->base.visible)
+ if (!plane_state->uapi.visible)
return 0;
/*
- * Handle the AUX surface first since
- * the main surface setup depends on it.
+ * Handle the AUX surface first since the main surface setup depends on
+ * it.
*/
- if (is_planar_yuv_format(fb->format->format)) {
- ret = skl_check_nv12_aux_surface(plane_state);
+ if (is_ccs_modifier(fb->modifier)) {
+ needs_aux = true;
+ ret = skl_check_ccs_aux_surface(plane_state);
if (ret)
return ret;
- } else if (is_ccs_modifier(fb->modifier)) {
- ret = skl_check_ccs_aux_surface(plane_state);
+ }
+
+ if (intel_format_info_is_yuv_semiplanar(fb->format,
+ fb->modifier)) {
+ needs_aux = true;
+ ret = skl_check_nv12_aux_surface(plane_state);
if (ret)
return ret;
- } else {
- plane_state->color_plane[1].offset = ~0xfff;
- plane_state->color_plane[1].x = 0;
- plane_state->color_plane[1].y = 0;
+ }
+
+ if (!needs_aux) {
+ int i;
+
+ for (i = 1; i < fb->format->num_planes; i++) {
+ plane_state->color_plane[i].offset = ~0xfff;
+ plane_state->color_plane[i].x = 0;
+ plane_state->color_plane[i].y = 0;
+ }
}
ret = skl_check_main_surface(plane_state);
@@ -3560,6 +4026,53 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
return 0;
}
+static void i9xx_plane_ratio(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ unsigned int *num, unsigned int *den)
+{
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int cpp = fb->format->cpp[0];
+
+ /*
+ * g4x bspec says 64bpp pixel rate can't exceed 80%
+ * of cdclk when the sprite plane is enabled on the
+ * same pipe. ilk/snb bspec says 64bpp pixel rate is
+ * never allowed to exceed 80% of cdclk. Let's just go
+ * with the ilk/snb limit always.
+ */
+ if (cpp == 8) {
+ *num = 10;
+ *den = 8;
+ } else {
+ *num = 1;
+ *den = 1;
+ }
+}
+
+static int i9xx_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ unsigned int pixel_rate;
+ unsigned int num, den;
+
+ /*
+ * Note that crtc_state->pixel_rate accounts for both
+ * horizontal and vertical panel fitter downscaling factors.
+ * Pre-HSW bspec tells us to only consider the horizontal
+ * downscaling factor here. We ignore that and just consider
+ * both for simplicity.
+ */
+ pixel_rate = crtc_state->pixel_rate;
+
+ i9xx_plane_ratio(crtc_state, plane_state, &num, &den);
+
+ /* two pixels per clock with double wide pipe */
+ if (crtc_state->double_wide)
+ den *= 2;
+
+ return DIV_ROUND_UP(pixel_rate * num, den);
+}
+
unsigned int
i9xx_plane_max_stride(struct intel_plane *plane,
u32 pixel_format, u64 modifier,
@@ -3589,7 +4102,7 @@ i9xx_plane_max_stride(struct intel_plane *plane,
static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 dspcntr = 0;
@@ -3609,9 +4122,9 @@ static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv =
- to_i915(plane_state->base.plane->dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
- unsigned int rotation = plane_state->base.rotation;
+ to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
u32 dspcntr;
dspcntr = DISPLAY_PLANE_ENABLE;
@@ -3627,6 +4140,9 @@ static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
case DRM_FORMAT_XRGB1555:
dspcntr |= DISPPLANE_BGRX555;
break;
+ case DRM_FORMAT_ARGB1555:
+ dspcntr |= DISPPLANE_BGRA555;
+ break;
case DRM_FORMAT_RGB565:
dspcntr |= DISPPLANE_BGRX565;
break;
@@ -3636,12 +4152,27 @@ static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
case DRM_FORMAT_XBGR8888:
dspcntr |= DISPPLANE_RGBX888;
break;
+ case DRM_FORMAT_ARGB8888:
+ dspcntr |= DISPPLANE_BGRA888;
+ break;
+ case DRM_FORMAT_ABGR8888:
+ dspcntr |= DISPPLANE_RGBA888;
+ break;
case DRM_FORMAT_XRGB2101010:
dspcntr |= DISPPLANE_BGRX101010;
break;
case DRM_FORMAT_XBGR2101010:
dspcntr |= DISPPLANE_RGBX101010;
break;
+ case DRM_FORMAT_ARGB2101010:
+ dspcntr |= DISPPLANE_BGRA101010;
+ break;
+ case DRM_FORMAT_ABGR2101010:
+ dspcntr |= DISPPLANE_RGBA101010;
+ break;
+ case DRM_FORMAT_XBGR16161616F:
+ dspcntr |= DISPPLANE_RGBX161616;
+ break;
default:
MISSING_CASE(fb->format->format);
return 0;
@@ -3663,8 +4194,9 @@ static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv =
- to_i915(plane_state->base.plane->dev);
- int src_x, src_y;
+ to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ int src_x, src_y, src_w;
u32 offset;
int ret;
@@ -3672,11 +4204,16 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
if (ret)
return ret;
- if (!plane_state->base.visible)
+ if (!plane_state->uapi.visible)
return 0;
- src_x = plane_state->base.src.x1 >> 16;
- src_y = plane_state->base.src.y1 >> 16;
+ src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ src_x = plane_state->uapi.src.x1 >> 16;
+ src_y = plane_state->uapi.src.y1 >> 16;
+
+ /* Undocumented hardware limit on i965/g4x/vlv/chv */
+ if (HAS_GMCH(dev_priv) && fb->format->cpp[0] == 8 && src_w > 2048)
+ return -EINVAL;
intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
@@ -3690,15 +4227,14 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
* Put the final coordinates back so that the src
* coordinate checks will see the right values.
*/
- drm_rect_translate(&plane_state->base.src,
- (src_x << 16) - plane_state->base.src.x1,
- (src_y << 16) - plane_state->base.src.y1);
+ drm_rect_translate_to(&plane_state->uapi.src,
+ src_x << 16, src_y << 16);
/* HSW/BDW do this automagically in hardware */
if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
- unsigned int rotation = plane_state->base.rotation;
- int src_w = drm_rect_width(&plane_state->base.src) >> 16;
- int src_h = drm_rect_height(&plane_state->base.src) >> 16;
+ unsigned int rotation = plane_state->hw.rotation;
+ int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
if (rotation & DRM_MODE_ROTATE_180) {
src_x += src_w - 1;
@@ -3715,21 +4251,39 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
return 0;
}
+static bool i9xx_plane_has_windowing(struct intel_plane *plane)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+
+ if (IS_CHERRYVIEW(dev_priv))
+ return i9xx_plane == PLANE_B;
+ else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
+ return false;
+ else if (IS_GEN(dev_priv, 4))
+ return i9xx_plane == PLANE_C;
+ else
+ return i9xx_plane == PLANE_B ||
+ i9xx_plane == PLANE_C;
+}
+
static int
i9xx_plane_check(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
int ret;
ret = chv_plane_check_rotation(plane_state);
if (ret)
return ret;
- ret = drm_atomic_helper_check_plane_state(&plane_state->base,
- &crtc_state->base,
+ ret = drm_atomic_helper_check_plane_state(&plane_state->uapi,
+ &crtc_state->uapi,
DRM_PLANE_HELPER_NO_SCALING,
DRM_PLANE_HELPER_NO_SCALING,
- false, true);
+ i9xx_plane_has_windowing(plane),
+ true);
if (ret)
return ret;
@@ -3737,7 +4291,7 @@ i9xx_plane_check(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
- if (!plane_state->base.visible)
+ if (!plane_state->uapi.visible)
return 0;
ret = intel_plane_check_src_coordinates(plane_state);
@@ -3758,6 +4312,10 @@ static void i9xx_update_plane(struct intel_plane *plane,
u32 linear_offset;
int x = plane_state->color_plane[0].x;
int y = plane_state->color_plane[0].y;
+ int crtc_x = plane_state->uapi.dst.x1;
+ int crtc_y = plane_state->uapi.dst.y1;
+ int crtc_w = drm_rect_width(&plane_state->uapi.dst);
+ int crtc_h = drm_rect_height(&plane_state->uapi.dst);
unsigned long irqflags;
u32 dspaddr_offset;
u32 dspcntr;
@@ -3776,18 +4334,18 @@ static void i9xx_update_plane(struct intel_plane *plane,
I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride);
if (INTEL_GEN(dev_priv) < 4) {
- /* pipesrc and dspsize control the size that is scaled from,
- * which should always be the user's requested size.
+ /*
+ * PLANE_A doesn't actually have a full window
+ * generator but let's assume we still need to
+ * program whatever is there.
*/
- I915_WRITE_FW(DSPPOS(i9xx_plane), 0);
+ I915_WRITE_FW(DSPPOS(i9xx_plane), (crtc_y << 16) | crtc_x);
I915_WRITE_FW(DSPSIZE(i9xx_plane),
- ((crtc_state->pipe_src_h - 1) << 16) |
- (crtc_state->pipe_src_w - 1));
+ ((crtc_h - 1) << 16) | (crtc_w - 1));
} else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
- I915_WRITE_FW(PRIMPOS(i9xx_plane), 0);
+ I915_WRITE_FW(PRIMPOS(i9xx_plane), (crtc_y << 16) | crtc_x);
I915_WRITE_FW(PRIMSIZE(i9xx_plane),
- ((crtc_state->pipe_src_h - 1) << 16) |
- (crtc_state->pipe_src_w - 1));
+ ((crtc_h - 1) << 16) | (crtc_w - 1));
I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0);
}
@@ -3897,7 +4455,7 @@ static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
*/
static void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct intel_crtc_scaler_state *scaler_state =
&crtc_state->scaler_state;
int i;
@@ -3916,7 +4474,7 @@ static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
* The stride is either expressed as a multiple of 64 bytes chunks for
* linear buffers or in number of tiles for tiled buffers.
*/
- if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
+ if (is_surface_linear(fb, color_plane))
return 64;
else if (drm_rotation_90_or_270(rotation))
return intel_tile_height(fb, color_plane);
@@ -3927,8 +4485,8 @@ static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
u32 skl_plane_stride(const struct intel_plane_state *plane_state,
int color_plane)
{
- const struct drm_framebuffer *fb = plane_state->base.fb;
- unsigned int rotation = plane_state->base.rotation;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
u32 stride = plane_state->color_plane[color_plane].stride;
if (color_plane >= fb->format->num_planes)
@@ -3950,10 +4508,12 @@ static u32 skl_plane_ctl_format(u32 pixel_format)
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
return PLANE_CTL_FORMAT_XRGB_8888;
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ABGR2101010:
+ return PLANE_CTL_FORMAT_XRGB_2101010 | PLANE_CTL_ORDER_RGBX;
case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_ARGB2101010:
return PLANE_CTL_FORMAT_XRGB_2101010;
- case DRM_FORMAT_XBGR2101010:
- return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
case DRM_FORMAT_XBGR16161616F:
case DRM_FORMAT_ABGR16161616F:
return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX;
@@ -3997,10 +4557,10 @@ static u32 skl_plane_ctl_format(u32 pixel_format)
static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
{
- if (!plane_state->base.fb->format->has_alpha)
+ if (!plane_state->hw.fb->format->has_alpha)
return PLANE_CTL_ALPHA_DISABLE;
- switch (plane_state->base.pixel_blend_mode) {
+ switch (plane_state->hw.pixel_blend_mode) {
case DRM_MODE_BLEND_PIXEL_NONE:
return PLANE_CTL_ALPHA_DISABLE;
case DRM_MODE_BLEND_PREMULTI:
@@ -4008,17 +4568,17 @@ static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
case DRM_MODE_BLEND_COVERAGE:
return PLANE_CTL_ALPHA_HW_PREMULTIPLY;
default:
- MISSING_CASE(plane_state->base.pixel_blend_mode);
+ MISSING_CASE(plane_state->hw.pixel_blend_mode);
return PLANE_CTL_ALPHA_DISABLE;
}
}
static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state)
{
- if (!plane_state->base.fb->format->has_alpha)
+ if (!plane_state->hw.fb->format->has_alpha)
return PLANE_COLOR_ALPHA_DISABLE;
- switch (plane_state->base.pixel_blend_mode) {
+ switch (plane_state->hw.pixel_blend_mode) {
case DRM_MODE_BLEND_PIXEL_NONE:
return PLANE_COLOR_ALPHA_DISABLE;
case DRM_MODE_BLEND_PREMULTI:
@@ -4026,7 +4586,7 @@ static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state
case DRM_MODE_BLEND_COVERAGE:
return PLANE_COLOR_ALPHA_HW_PREMULTIPLY;
default:
- MISSING_CASE(plane_state->base.pixel_blend_mode);
+ MISSING_CASE(plane_state->hw.pixel_blend_mode);
return PLANE_COLOR_ALPHA_DISABLE;
}
}
@@ -4042,6 +4602,12 @@ static u32 skl_plane_ctl_tiling(u64 fb_modifier)
return PLANE_CTL_TILED_Y;
case I915_FORMAT_MOD_Y_TILED_CCS:
return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+ return PLANE_CTL_TILED_Y |
+ PLANE_CTL_RENDER_DECOMPRESSION_ENABLE |
+ PLANE_CTL_CLEAR_COLOR_DISABLE;
+ case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
+ return PLANE_CTL_TILED_Y | PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE;
case I915_FORMAT_MOD_Yf_TILED:
return PLANE_CTL_TILED_YF;
case I915_FORMAT_MOD_Yf_TILED_CCS:
@@ -4092,7 +4658,7 @@ static u32 cnl_plane_ctl_flip(unsigned int reflect)
u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
u32 plane_ctl = 0;
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
@@ -4111,9 +4677,9 @@ u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv =
- to_i915(plane_state->base.plane->dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
- unsigned int rotation = plane_state->base.rotation;
+ to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
u32 plane_ctl;
@@ -4123,10 +4689,10 @@ u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
plane_ctl |= skl_plane_ctl_alpha(plane_state);
plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
- if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
+ if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709;
- if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+ if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE;
}
@@ -4148,7 +4714,7 @@ u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
u32 plane_color_ctl = 0;
if (INTEL_GEN(dev_priv) >= 11)
@@ -4167,21 +4733,21 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv =
- to_i915(plane_state->base.plane->dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
u32 plane_color_ctl = 0;
plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) {
- if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
+ if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
else
plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709;
- if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+ if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
} else if (fb->format->is_yuv) {
plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
@@ -4200,7 +4766,7 @@ __intel_display_resume(struct drm_device *dev,
int i, ret;
intel_modeset_setup_hw_state(dev, ctx);
- i915_redisable_vga(to_i915(dev));
+ intel_vga_redisable(to_i915(dev));
if (!state)
return 0;
@@ -4232,7 +4798,7 @@ __intel_display_resume(struct drm_device *dev,
static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
{
return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
- intel_has_gpu_reset(dev_priv));
+ intel_has_gpu_reset(&dev_priv->gt));
}
void intel_prepare_reset(struct drm_i915_private *dev_priv)
@@ -4248,12 +4814,13 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv)
return;
/* We have a modeset vs reset deadlock, defensively unbreak it. */
- set_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
- wake_up_all(&dev_priv->gpu_error.wait_queue);
+ set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
+ smp_mb__after_atomic();
+ wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET);
if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
DRM_DEBUG_KMS("Modeset potentially stuck, unbreaking through wedging\n");
- i915_gem_set_wedged(dev_priv);
+ intel_gt_set_wedged(&dev_priv->gt);
}
/*
@@ -4299,7 +4866,7 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
int ret;
/* reset doesn't touch the display */
- if (!test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags))
+ if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
return;
state = fetch_and_zero(&dev_priv->modeset_restore_state);
@@ -4318,7 +4885,7 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
* so need a full re-initialization.
*/
intel_pps_unlock_regs_wa(dev_priv);
- intel_modeset_init_hw(dev);
+ intel_modeset_init_hw(dev_priv);
intel_init_clock_gating(dev_priv);
spin_lock_irq(&dev_priv->irq_lock);
@@ -4339,7 +4906,7 @@ unlock:
drm_modeset_acquire_fini(ctx);
mutex_unlock(&dev->mode_config.mutex);
- clear_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
+ clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
}
static void icl_set_pipe_chicken(struct intel_crtc *crtc)
@@ -4366,50 +4933,41 @@ static void icl_set_pipe_chicken(struct intel_crtc *crtc)
I915_WRITE(PIPE_CHICKEN(pipe), tmp);
}
-static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_state,
- const struct intel_crtc_state *new_crtc_state)
+static void icl_enable_trans_port_sync(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
- /* drm_atomic_helper_update_legacy_modeset_state might not be called. */
- crtc->base.mode = new_crtc_state->base.mode;
+ u32 trans_ddi_func_ctl2_val;
+ u8 master_select;
/*
- * Update pipe size and adjust fitter if needed: the reason for this is
- * that in compute_mode_changes we check the native mode (not the pfit
- * mode) to see if we can flip rather than do a full mode set. In the
- * fastboot case, we'll flip, but if we don't update the pipesrc and
- * pfit state, we'll end up with a big fb scanned out into the wrong
- * sized surface.
+ * Configure the master select and enable Transcoder Port Sync for
+ * Slave CRTCs transcoder.
*/
+ if (crtc_state->master_transcoder == INVALID_TRANSCODER)
+ return;
- I915_WRITE(PIPESRC(crtc->pipe),
- ((new_crtc_state->pipe_src_w - 1) << 16) |
- (new_crtc_state->pipe_src_h - 1));
+ if (crtc_state->master_transcoder == TRANSCODER_EDP)
+ master_select = 0;
+ else
+ master_select = crtc_state->master_transcoder + 1;
- /* on skylake this is done by detaching scalers */
- if (INTEL_GEN(dev_priv) >= 9) {
- skl_detach_scalers(new_crtc_state);
+ /* Set the master select bits for Tranascoder Port Sync */
+ trans_ddi_func_ctl2_val = (PORT_SYNC_MODE_MASTER_SELECT(master_select) &
+ PORT_SYNC_MODE_MASTER_SELECT_MASK) <<
+ PORT_SYNC_MODE_MASTER_SELECT_SHIFT;
+ /* Enable Transcoder Port Sync */
+ trans_ddi_func_ctl2_val |= PORT_SYNC_MODE_ENABLE;
- if (new_crtc_state->pch_pfit.enabled)
- skylake_pfit_enable(new_crtc_state);
- } else if (HAS_PCH_SPLIT(dev_priv)) {
- if (new_crtc_state->pch_pfit.enabled)
- ironlake_pfit_enable(new_crtc_state);
- else if (old_crtc_state->pch_pfit.enabled)
- ironlake_pfit_disable(old_crtc_state);
- }
-
- if (INTEL_GEN(dev_priv) >= 11)
- icl_set_pipe_chicken(crtc);
+ I915_WRITE(TRANS_DDI_FUNC_CTL2(crtc_state->cpu_transcoder),
+ trans_ddi_func_ctl2_val);
}
static void intel_fdi_normal_train(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- int pipe = crtc->pipe;
+ enum pipe pipe = crtc->pipe;
i915_reg_t reg;
u32 temp;
@@ -4447,17 +5005,17 @@ static void intel_fdi_normal_train(struct intel_crtc *crtc)
}
/* The FDI link training functions for ILK/Ibexpeak. */
-static void ironlake_fdi_link_train(struct intel_crtc *crtc,
- const struct intel_crtc_state *crtc_state)
+static void ilk_fdi_link_train(struct intel_crtc *crtc,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- int pipe = crtc->pipe;
+ enum pipe pipe = crtc->pipe;
i915_reg_t reg;
u32 temp, tries;
/* FDI needs bits from pipe first */
- assert_pipe_enabled(dev_priv, pipe);
+ assert_pipe_enabled(dev_priv, crtc_state->cpu_transcoder);
/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
for train result */
@@ -4553,7 +5111,7 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- int pipe = crtc->pipe;
+ enum pipe pipe = crtc->pipe;
i915_reg_t reg;
u32 temp, i, retry;
@@ -4686,7 +5244,7 @@ static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- int pipe = crtc->pipe;
+ enum pipe pipe = crtc->pipe;
i915_reg_t reg;
u32 temp, i, j;
@@ -4800,11 +5358,11 @@ train_done:
DRM_DEBUG_KMS("FDI train done.\n");
}
-static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
+static void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
- int pipe = intel_crtc->pipe;
+ enum pipe pipe = intel_crtc->pipe;
i915_reg_t reg;
u32 temp;
@@ -4837,11 +5395,11 @@ static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
}
}
-static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
+static void ilk_fdi_pll_disable(struct intel_crtc *intel_crtc)
{
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- int pipe = intel_crtc->pipe;
+ enum pipe pipe = intel_crtc->pipe;
i915_reg_t reg;
u32 temp;
@@ -4867,12 +5425,10 @@ static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
udelay(100);
}
-static void ironlake_fdi_disable(struct drm_crtc *crtc)
+static void ilk_fdi_disable(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
i915_reg_t reg;
u32 temp;
@@ -4963,9 +5519,9 @@ void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
/* Program iCLKIP clock to the desired frequency */
static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- int clock = crtc_state->base.adjusted_mode.crtc_clock;
+ int clock = crtc_state->hw.adjusted_mode.crtc_clock;
u32 divsel, phaseinc, auxdiv, phasedir = 0;
u32 temp;
@@ -5076,10 +5632,10 @@ int lpt_get_iclkip(struct drm_i915_private *dev_priv)
desired_divisor << auxdiv);
}
-static void ironlake_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
- enum pipe pch_transcoder)
+static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
+ enum pipe pch_transcoder)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
@@ -5120,9 +5676,9 @@ static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool e
POSTING_READ(SOUTH_CHICKEN1);
}
-static void ivybridge_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
+static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
switch (crtc->pipe) {
@@ -5152,7 +5708,7 @@ static struct intel_encoder *
intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct drm_connector_state *connector_state;
const struct drm_connector *connector;
struct intel_encoder *encoder = NULL;
@@ -5181,19 +5737,19 @@ intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
* - DP transcoding bits
* - transcoder
*/
-static void ironlake_pch_enable(const struct intel_atomic_state *state,
- const struct intel_crtc_state *crtc_state)
+static void ilk_pch_enable(const struct intel_atomic_state *state,
+ const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- int pipe = crtc->pipe;
+ enum pipe pipe = crtc->pipe;
u32 temp;
assert_pch_transcoder_disabled(dev_priv, pipe);
if (IS_IVYBRIDGE(dev_priv))
- ivybridge_update_fdi_bc_bifurcation(crtc_state);
+ ivb_update_fdi_bc_bifurcation(crtc_state);
/* Write the TU size bits before fdi link training, so that error
* detection works. */
@@ -5230,7 +5786,7 @@ static void ironlake_pch_enable(const struct intel_atomic_state *state,
/* set transcoder timing, panel must allow it */
assert_panel_unlocked(dev_priv, pipe);
- ironlake_pch_transcoder_set_timings(crtc_state, pipe);
+ ilk_pch_transcoder_set_timings(crtc_state, pipe);
intel_fdi_normal_train(crtc);
@@ -5238,7 +5794,7 @@ static void ironlake_pch_enable(const struct intel_atomic_state *state,
if (HAS_PCH_CPT(dev_priv) &&
intel_crtc_has_dp_encoder(crtc_state)) {
const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
i915_reg_t reg = TRANS_DP_CTL(pipe);
enum port port;
@@ -5262,13 +5818,13 @@ static void ironlake_pch_enable(const struct intel_atomic_state *state,
I915_WRITE(reg, temp);
}
- ironlake_enable_pch_transcoder(crtc_state);
+ ilk_enable_pch_transcoder(crtc_state);
}
static void lpt_pch_enable(const struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
@@ -5277,14 +5833,14 @@ static void lpt_pch_enable(const struct intel_atomic_state *state,
lpt_program_iclkip(crtc_state);
/* Set transcoder timing. */
- ironlake_pch_transcoder_set_timings(crtc_state, PIPE_A);
+ ilk_pch_transcoder_set_timings(crtc_state, PIPE_A);
lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
}
-static void cpt_verify_modeset(struct drm_device *dev, int pipe)
+static void cpt_verify_modeset(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
i915_reg_t dslreg = PIPEDSL(pipe);
u32 temp;
@@ -5380,15 +5936,16 @@ static int
skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
unsigned int scaler_user, int *scaler_id,
int src_w, int src_h, int dst_w, int dst_h,
- const struct drm_format_info *format, bool need_scaler)
+ const struct drm_format_info *format,
+ u64 modifier, bool need_scaler)
{
struct intel_crtc_scaler_state *scaler_state =
&crtc_state->scaler_state;
struct intel_crtc *intel_crtc =
- to_intel_crtc(crtc_state->base.crtc);
+ to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
/*
* Src coordinates are already rotated by 270 degrees for
@@ -5404,7 +5961,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
* Once NV12 is enabled, handle it here while allocating scaler
* for NV12.
*/
- if (INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable &&
+ if (INTEL_GEN(dev_priv) >= 9 && crtc_state->hw.enable &&
need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n");
return -EINVAL;
@@ -5434,7 +5991,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
return 0;
}
- if (format && is_planar_yuv_format(format->format) &&
+ if (format && intel_format_info_is_yuv_semiplanar(format, modifier) &&
(src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
DRM_DEBUG_KMS("Planar YUV: src dimensions not met\n");
return -EINVAL;
@@ -5476,17 +6033,18 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
*/
int skl_update_scaler_crtc(struct intel_crtc_state *state)
{
- const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode = &state->hw.adjusted_mode;
bool need_scaler = false;
if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
need_scaler = true;
- return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
+ return skl_update_scaler(state, !state->hw.active, SKL_CRTC_INDEX,
&state->scaler_state.scaler_id,
state->pipe_src_w, state->pipe_src_h,
adjusted_mode->crtc_hdisplay,
- adjusted_mode->crtc_vdisplay, NULL, need_scaler);
+ adjusted_mode->crtc_vdisplay, NULL, 0,
+ need_scaler);
}
/**
@@ -5502,26 +6060,28 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
struct intel_plane *intel_plane =
- to_intel_plane(plane_state->base.plane);
+ to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
- struct drm_framebuffer *fb = plane_state->base.fb;
+ struct drm_framebuffer *fb = plane_state->hw.fb;
int ret;
- bool force_detach = !fb || !plane_state->base.visible;
+ bool force_detach = !fb || !plane_state->uapi.visible;
bool need_scaler = false;
/* Pre-gen11 and SDR planes always need a scaler for planar formats. */
if (!icl_is_hdr_plane(dev_priv, intel_plane->id) &&
- fb && is_planar_yuv_format(fb->format->format))
+ fb && intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
need_scaler = true;
ret = skl_update_scaler(crtc_state, force_detach,
drm_plane_index(&intel_plane->base),
&plane_state->scaler_id,
- drm_rect_width(&plane_state->base.src) >> 16,
- drm_rect_height(&plane_state->base.src) >> 16,
- drm_rect_width(&plane_state->base.dst),
- drm_rect_height(&plane_state->base.dst),
- fb ? fb->format : NULL, need_scaler);
+ drm_rect_width(&plane_state->uapi.src) >> 16,
+ drm_rect_height(&plane_state->uapi.src) >> 16,
+ drm_rect_width(&plane_state->uapi.dst),
+ drm_rect_height(&plane_state->uapi.dst),
+ fb ? fb->format : NULL,
+ fb ? fb->modifier : 0,
+ need_scaler);
if (ret || plane_state->scaler_id < 0)
return ret;
@@ -5543,10 +6103,8 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_XBGR2101010:
- case DRM_FORMAT_XBGR16161616F:
- case DRM_FORMAT_ABGR16161616F:
- case DRM_FORMAT_XRGB16161616F:
- case DRM_FORMAT_ARGB16161616F:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_ABGR2101010:
case DRM_FORMAT_YUYV:
case DRM_FORMAT_YVYU:
case DRM_FORMAT_UYVY:
@@ -5562,6 +6120,13 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
case DRM_FORMAT_XVYU12_16161616:
case DRM_FORMAT_XVYU16161616:
break;
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_ARGB16161616F:
+ if (INTEL_GEN(dev_priv) >= 11)
+ break;
+ /* fall through */
default:
DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
intel_plane->base.base.id, intel_plane->base.name,
@@ -5572,17 +6137,18 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
return 0;
}
-static void skylake_scaler_disable(struct intel_crtc *crtc)
+void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
int i;
for (i = 0; i < crtc->num_scalers; i++)
skl_detach_scaler(crtc, i);
}
-static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state)
+static void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
const struct intel_crtc_scaler_state *scaler_state =
@@ -5617,11 +6183,11 @@ static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state)
}
}
-static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state)
+static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- int pipe = crtc->pipe;
+ enum pipe pipe = crtc->pipe;
if (crtc_state->pch_pfit.enabled) {
/* Force use of hard-coded filter coefficients
@@ -5640,7 +6206,7 @@ static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state)
void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -5669,16 +6235,14 @@ void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
* and don't wait for vblanks until the end of crtc_enable, then
* the HW state readout code will complain that the expected
* IPS_CTL value is not the one we read. */
- if (intel_wait_for_register(&dev_priv->uncore,
- IPS_CTL, IPS_ENABLE, IPS_ENABLE,
- 50))
+ if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50))
DRM_ERROR("Timed out waiting for IPS enable\n");
}
}
void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -5692,9 +6256,7 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
* 42ms timeout value leads to occasional timeouts so use 100ms
* instead.
*/
- if (intel_wait_for_register(&dev_priv->uncore,
- IPS_CTL, IPS_ENABLE, 0,
- 100))
+ if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100))
DRM_ERROR("Timed out waiting for IPS disable\n");
} else {
I915_WRITE(IPS_CTL, 0);
@@ -5707,96 +6269,24 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
{
- if (intel_crtc->overlay) {
- struct drm_device *dev = intel_crtc->base.dev;
-
- mutex_lock(&dev->struct_mutex);
+ if (intel_crtc->overlay)
(void) intel_overlay_switch_off(intel_crtc->overlay);
- mutex_unlock(&dev->struct_mutex);
- }
/* Let userspace switch the overlay on again. In most cases userspace
* has to recompute where to put it anyway.
*/
}
-/**
- * intel_post_enable_primary - Perform operations after enabling primary plane
- * @crtc: the CRTC whose primary plane was just enabled
- * @new_crtc_state: the enabling state
- *
- * Performs potentially sleeping operations that must be done after the primary
- * plane is enabled, such as updating FBC and IPS. Note that this may be
- * called due to an explicit primary plane update, or due to an implicit
- * re-enable that is caused when a sprite plane is updated to no longer
- * completely hide the primary plane.
- */
-static void
-intel_post_enable_primary(struct drm_crtc *crtc,
- const struct intel_crtc_state *new_crtc_state)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
-
- /*
- * Gen2 reports pipe underruns whenever all planes are disabled.
- * So don't enable underrun reporting before at least some planes
- * are enabled.
- * FIXME: Need to fix the logic to work when we turn off all planes
- * but leave the pipe running.
- */
- if (IS_GEN(dev_priv, 2))
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
-
- /* Underruns don't always raise interrupts, so check manually. */
- intel_check_cpu_fifo_underruns(dev_priv);
- intel_check_pch_fifo_underruns(dev_priv);
-}
-
-/* FIXME get rid of this and use pre_plane_update */
-static void
-intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
-
- /*
- * Gen2 reports pipe underruns whenever all planes are disabled.
- * So disable underrun reporting before all the planes get disabled.
- */
- if (IS_GEN(dev_priv, 2))
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
-
- hsw_disable_ips(to_intel_crtc_state(crtc->state));
-
- /*
- * Vblank time updates from the shadow to live plane control register
- * are blocked if the memory self-refresh mode is active at that
- * moment. So to make sure the plane gets truly disabled, disable
- * first the self-refresh mode. The self-refresh enable bit in turn
- * will be checked/applied by the HW only at the next frame start
- * event which is after the vblank start event, so we need to have a
- * wait-for-vblank between disabling the plane and the pipe.
- */
- if (HAS_GMCH(dev_priv) &&
- intel_set_memory_cxsr(dev_priv, false))
- intel_wait_for_vblank(dev_priv, pipe);
-}
-
static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
const struct intel_crtc_state *new_crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
if (!old_crtc_state->ips_enabled)
return false;
- if (needs_modeset(&new_crtc_state->base))
+ if (needs_modeset(new_crtc_state))
return true;
/*
@@ -5806,7 +6296,7 @@ static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_s
* Disable IPS before we program the LUT.
*/
if (IS_HASWELL(dev_priv) &&
- (new_crtc_state->base.color_mgmt_changed ||
+ (new_crtc_state->uapi.color_mgmt_changed ||
new_crtc_state->update_pipe) &&
new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
return true;
@@ -5817,13 +6307,13 @@ static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_s
static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
const struct intel_crtc_state *new_crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
if (!new_crtc_state->ips_enabled)
return false;
- if (needs_modeset(&new_crtc_state->base))
+ if (needs_modeset(new_crtc_state))
return true;
/*
@@ -5833,7 +6323,7 @@ static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_s
* Re-enable IPS after the LUT has been programmed.
*/
if (IS_HASWELL(dev_priv) &&
- (new_crtc_state->base.color_mgmt_changed ||
+ (new_crtc_state->uapi.color_mgmt_changed ||
new_crtc_state->update_pipe) &&
new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
return true;
@@ -5843,15 +6333,16 @@ static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_s
* forcibly enable IPS on the first fastset.
*/
if (new_crtc_state->update_pipe &&
- old_crtc_state->base.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED)
+ old_crtc_state->hw.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED)
return true;
return !old_crtc_state->ips_enabled;
}
-static bool needs_nv12_wa(struct drm_i915_private *dev_priv,
- const struct intel_crtc_state *crtc_state)
+static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+
if (!crtc_state->nv12_planes)
return false;
@@ -5862,9 +6353,10 @@ static bool needs_nv12_wa(struct drm_i915_private *dev_priv,
return false;
}
-static bool needs_scalerclk_wa(struct drm_i915_private *dev_priv,
- const struct intel_crtc_state *crtc_state)
+static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+
/* Wa_2006604312:icl */
if (crtc_state->scaler_state.scaler_users > 0 && IS_ICELAKE(dev_priv))
return true;
@@ -5872,89 +6364,82 @@ static bool needs_scalerclk_wa(struct drm_i915_private *dev_priv,
return false;
}
-static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
+static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_atomic_state *old_state = old_crtc_state->base.state;
- struct intel_crtc_state *pipe_config =
- intel_atomic_get_new_crtc_state(to_intel_atomic_state(old_state),
- crtc);
- struct drm_plane *primary = crtc->base.primary;
- struct drm_plane_state *old_primary_state =
- drm_atomic_get_old_plane_state(old_state, primary);
+ return (!old_crtc_state->active_planes || needs_modeset(new_crtc_state)) &&
+ new_crtc_state->active_planes;
+}
- intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits);
+static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state)
+{
+ return old_crtc_state->active_planes &&
+ (!new_crtc_state->active_planes || needs_modeset(new_crtc_state));
+}
- if (pipe_config->update_wm_post && pipe_config->base.active)
- intel_update_watermarks(crtc);
+static void intel_post_plane_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_plane *primary = to_intel_plane(crtc->base.primary);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_plane_state *new_primary_state =
+ intel_atomic_get_new_plane_state(state, primary);
+ enum pipe pipe = crtc->pipe;
- if (hsw_post_update_enable_ips(old_crtc_state, pipe_config))
- hsw_enable_ips(pipe_config);
+ intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
- if (old_primary_state) {
- struct drm_plane_state *new_primary_state =
- drm_atomic_get_new_plane_state(old_state, primary);
+ if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
+ intel_update_watermarks(crtc);
- intel_fbc_post_update(crtc);
+ if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state))
+ hsw_enable_ips(new_crtc_state);
- if (new_primary_state->visible &&
- (needs_modeset(&pipe_config->base) ||
- !old_primary_state->visible))
- intel_post_enable_primary(&crtc->base, pipe_config);
- }
+ if (new_primary_state)
+ intel_fbc_post_update(crtc);
- if (needs_nv12_wa(dev_priv, old_crtc_state) &&
- !needs_nv12_wa(dev_priv, pipe_config))
- skl_wa_827(dev_priv, crtc->pipe, false);
+ if (needs_nv12_wa(old_crtc_state) &&
+ !needs_nv12_wa(new_crtc_state))
+ skl_wa_827(dev_priv, pipe, false);
- if (needs_scalerclk_wa(dev_priv, old_crtc_state) &&
- !needs_scalerclk_wa(dev_priv, pipe_config))
- icl_wa_scalerclkgating(dev_priv, crtc->pipe, false);
+ if (needs_scalerclk_wa(old_crtc_state) &&
+ !needs_scalerclk_wa(new_crtc_state))
+ icl_wa_scalerclkgating(dev_priv, pipe, false);
}
-static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
- struct intel_crtc_state *pipe_config)
+static void intel_pre_plane_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_atomic_state *old_state = old_crtc_state->base.state;
- struct drm_plane *primary = crtc->base.primary;
- struct drm_plane_state *old_primary_state =
- drm_atomic_get_old_plane_state(old_state, primary);
- bool modeset = needs_modeset(&pipe_config->base);
- struct intel_atomic_state *old_intel_state =
- to_intel_atomic_state(old_state);
-
- if (hsw_pre_update_disable_ips(old_crtc_state, pipe_config))
- hsw_disable_ips(old_crtc_state);
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_plane *primary = to_intel_plane(crtc->base.primary);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_plane_state *new_primary_state =
+ intel_atomic_get_new_plane_state(state, primary);
+ enum pipe pipe = crtc->pipe;
- if (old_primary_state) {
- struct intel_plane_state *new_primary_state =
- intel_atomic_get_new_plane_state(old_intel_state,
- to_intel_plane(primary));
+ if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state))
+ hsw_disable_ips(old_crtc_state);
- intel_fbc_pre_update(crtc, pipe_config, new_primary_state);
- /*
- * Gen2 reports pipe underruns whenever all planes are disabled.
- * So disable underrun reporting before all the planes get disabled.
- */
- if (IS_GEN(dev_priv, 2) && old_primary_state->visible &&
- (modeset || !new_primary_state->base.visible))
- intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
- }
+ if (new_primary_state &&
+ intel_fbc_pre_update(crtc, new_crtc_state, new_primary_state))
+ intel_wait_for_vblank(dev_priv, pipe);
/* Display WA 827 */
- if (!needs_nv12_wa(dev_priv, old_crtc_state) &&
- needs_nv12_wa(dev_priv, pipe_config))
- skl_wa_827(dev_priv, crtc->pipe, true);
+ if (!needs_nv12_wa(old_crtc_state) &&
+ needs_nv12_wa(new_crtc_state))
+ skl_wa_827(dev_priv, pipe, true);
/* Wa_2006604312:icl */
- if (!needs_scalerclk_wa(dev_priv, old_crtc_state) &&
- needs_scalerclk_wa(dev_priv, pipe_config))
- icl_wa_scalerclkgating(dev_priv, crtc->pipe, true);
+ if (!needs_scalerclk_wa(old_crtc_state) &&
+ needs_scalerclk_wa(new_crtc_state))
+ icl_wa_scalerclkgating(dev_priv, pipe, true);
/*
* Vblank time updates from the shadow to live plane control register
@@ -5965,9 +6450,9 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
* event which is after the vblank start event, so we need to have a
* wait-for-vblank between disabling the plane and the pipe.
*/
- if (HAS_GMCH(dev_priv) && old_crtc_state->base.active &&
- pipe_config->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
- intel_wait_for_vblank(dev_priv, crtc->pipe);
+ if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
+ new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
+ intel_wait_for_vblank(dev_priv, pipe);
/*
* IVB workaround: must disable low power watermarks for at least
@@ -5976,36 +6461,45 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
*
* WaCxSRDisabledForSpriteScaling:ivb
*/
- if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev) &&
- old_crtc_state->base.active)
- intel_wait_for_vblank(dev_priv, crtc->pipe);
+ if (old_crtc_state->hw.active &&
+ new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv))
+ intel_wait_for_vblank(dev_priv, pipe);
/*
- * If we're doing a modeset, we're done. No need to do any pre-vblank
- * watermark programming here.
+ * If we're doing a modeset we don't need to do any
+ * pre-vblank watermark programming here.
*/
- if (needs_modeset(&pipe_config->base))
- return;
+ if (!needs_modeset(new_crtc_state)) {
+ /*
+ * For platforms that support atomic watermarks, program the
+ * 'intermediate' watermarks immediately. On pre-gen9 platforms, these
+ * will be the intermediate values that are safe for both pre- and
+ * post- vblank; when vblank happens, the 'active' values will be set
+ * to the final 'target' values and we'll do this again to get the
+ * optimal watermarks. For gen9+ platforms, the values we program here
+ * will be the final target values which will get automatically latched
+ * at vblank time; no further programming will be necessary.
+ *
+ * If a platform hasn't been transitioned to atomic watermarks yet,
+ * we'll continue to update watermarks the old way, if flags tell
+ * us to.
+ */
+ if (dev_priv->display.initial_watermarks)
+ dev_priv->display.initial_watermarks(state, crtc);
+ else if (new_crtc_state->update_wm_pre)
+ intel_update_watermarks(crtc);
+ }
/*
- * For platforms that support atomic watermarks, program the
- * 'intermediate' watermarks immediately. On pre-gen9 platforms, these
- * will be the intermediate values that are safe for both pre- and
- * post- vblank; when vblank happens, the 'active' values will be set
- * to the final 'target' values and we'll do this again to get the
- * optimal watermarks. For gen9+ platforms, the values we program here
- * will be the final target values which will get automatically latched
- * at vblank time; no further programming will be necessary.
+ * Gen2 reports pipe underruns whenever all planes are disabled.
+ * So disable underrun reporting before all the planes get disabled.
*
- * If a platform hasn't been transitioned to atomic watermarks yet,
- * we'll continue to update watermarks the old way, if flags tell
- * us to.
+ * We do this after .initial_watermarks() so that we have a
+ * chance of catching underruns with the intermediate watermarks
+ * vs. the old plane configuration.
*/
- if (dev_priv->display.initial_watermarks != NULL)
- dev_priv->display.initial_watermarks(old_intel_state,
- pipe_config);
- else if (pipe_config->update_wm_pre)
- intel_update_watermarks(crtc);
+ if (IS_GEN(dev_priv, 2) && planes_disabling(old_crtc_state, new_crtc_state))
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
}
static void intel_crtc_disable_planes(struct intel_atomic_state *state,
@@ -6029,26 +6523,102 @@ static void intel_crtc_disable_planes(struct intel_atomic_state *state,
intel_disable_plane(plane, new_crtc_state);
- if (old_plane_state->base.visible)
+ if (old_plane_state->uapi.visible)
fb_bits |= plane->frontbuffer_bit;
}
intel_frontbuffer_flip(dev_priv, fb_bits);
}
-static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct drm_atomic_state *old_state)
+/*
+ * intel_connector_primary_encoder - get the primary encoder for a connector
+ * @connector: connector for which to return the encoder
+ *
+ * Returns the primary encoder for a connector. There is a 1:1 mapping from
+ * all connectors to their encoder, except for DP-MST connectors which have
+ * both a virtual and a primary encoder. These DP-MST primary encoders can be
+ * pointed to by as many DP-MST connectors as there are pipes.
+ */
+static struct intel_encoder *
+intel_connector_primary_encoder(struct intel_connector *connector)
{
- struct drm_connector_state *conn_state;
+ struct intel_encoder *encoder;
+
+ if (connector->mst_port)
+ return &dp_to_dig_port(connector->mst_port)->base;
+
+ encoder = intel_attached_encoder(connector);
+ WARN_ON(!encoder);
+
+ return encoder;
+}
+
+static void intel_encoders_update_prepare(struct intel_atomic_state *state)
+{
+ struct drm_connector_state *new_conn_state;
+ struct drm_connector *connector;
+ int i;
+
+ for_each_new_connector_in_state(&state->base, connector, new_conn_state,
+ i) {
+ struct intel_connector *intel_connector;
+ struct intel_encoder *encoder;
+ struct intel_crtc *crtc;
+
+ if (!intel_connector_needs_modeset(state, connector))
+ continue;
+
+ intel_connector = to_intel_connector(connector);
+ encoder = intel_connector_primary_encoder(intel_connector);
+ if (!encoder->update_prepare)
+ continue;
+
+ crtc = new_conn_state->crtc ?
+ to_intel_crtc(new_conn_state->crtc) : NULL;
+ encoder->update_prepare(state, encoder, crtc);
+ }
+}
+
+static void intel_encoders_update_complete(struct intel_atomic_state *state)
+{
+ struct drm_connector_state *new_conn_state;
+ struct drm_connector *connector;
+ int i;
+
+ for_each_new_connector_in_state(&state->base, connector, new_conn_state,
+ i) {
+ struct intel_connector *intel_connector;
+ struct intel_encoder *encoder;
+ struct intel_crtc *crtc;
+
+ if (!intel_connector_needs_modeset(state, connector))
+ continue;
+
+ intel_connector = to_intel_connector(connector);
+ encoder = intel_connector_primary_encoder(intel_connector);
+ if (!encoder->update_complete)
+ continue;
+
+ crtc = new_conn_state->crtc ?
+ to_intel_crtc(new_conn_state->crtc) : NULL;
+ encoder->update_complete(state, encoder, crtc);
+ }
+}
+
+static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct drm_connector_state *conn_state;
struct drm_connector *conn;
int i;
- for_each_new_connector_in_state(old_state, conn, conn_state, i) {
+ for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
struct intel_encoder *encoder =
to_intel_encoder(conn_state->best_encoder);
- if (conn_state->crtc != crtc)
+ if (conn_state->crtc != &crtc->base)
continue;
if (encoder->pre_pll_enable)
@@ -6056,19 +6626,20 @@ static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc,
}
}
-static void intel_encoders_pre_enable(struct drm_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct drm_atomic_state *old_state)
+static void intel_encoders_pre_enable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_connector_state *conn_state;
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct drm_connector_state *conn_state;
struct drm_connector *conn;
int i;
- for_each_new_connector_in_state(old_state, conn, conn_state, i) {
+ for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
struct intel_encoder *encoder =
to_intel_encoder(conn_state->best_encoder);
- if (conn_state->crtc != crtc)
+ if (conn_state->crtc != &crtc->base)
continue;
if (encoder->pre_enable)
@@ -6076,19 +6647,20 @@ static void intel_encoders_pre_enable(struct drm_crtc *crtc,
}
}
-static void intel_encoders_enable(struct drm_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct drm_atomic_state *old_state)
+static void intel_encoders_enable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_connector_state *conn_state;
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct drm_connector_state *conn_state;
struct drm_connector *conn;
int i;
- for_each_new_connector_in_state(old_state, conn, conn_state, i) {
+ for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
struct intel_encoder *encoder =
to_intel_encoder(conn_state->best_encoder);
- if (conn_state->crtc != crtc)
+ if (conn_state->crtc != &crtc->base)
continue;
if (encoder->enable)
@@ -6097,19 +6669,20 @@ static void intel_encoders_enable(struct drm_crtc *crtc,
}
}
-static void intel_encoders_disable(struct drm_crtc *crtc,
- struct intel_crtc_state *old_crtc_state,
- struct drm_atomic_state *old_state)
+static void intel_encoders_disable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_connector_state *old_conn_state;
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct drm_connector_state *old_conn_state;
struct drm_connector *conn;
int i;
- for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
+ for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
struct intel_encoder *encoder =
to_intel_encoder(old_conn_state->best_encoder);
- if (old_conn_state->crtc != crtc)
+ if (old_conn_state->crtc != &crtc->base)
continue;
intel_opregion_notify_encoder(encoder, false);
@@ -6118,19 +6691,20 @@ static void intel_encoders_disable(struct drm_crtc *crtc,
}
}
-static void intel_encoders_post_disable(struct drm_crtc *crtc,
- struct intel_crtc_state *old_crtc_state,
- struct drm_atomic_state *old_state)
+static void intel_encoders_post_disable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_connector_state *old_conn_state;
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct drm_connector_state *old_conn_state;
struct drm_connector *conn;
int i;
- for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
+ for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
struct intel_encoder *encoder =
to_intel_encoder(old_conn_state->best_encoder);
- if (old_conn_state->crtc != crtc)
+ if (old_conn_state->crtc != &crtc->base)
continue;
if (encoder->post_disable)
@@ -6138,19 +6712,20 @@ static void intel_encoders_post_disable(struct drm_crtc *crtc,
}
}
-static void intel_encoders_post_pll_disable(struct drm_crtc *crtc,
- struct intel_crtc_state *old_crtc_state,
- struct drm_atomic_state *old_state)
+static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_connector_state *old_conn_state;
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct drm_connector_state *old_conn_state;
struct drm_connector *conn;
int i;
- for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
+ for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
struct intel_encoder *encoder =
to_intel_encoder(old_conn_state->best_encoder);
- if (old_conn_state->crtc != crtc)
+ if (old_conn_state->crtc != &crtc->base)
continue;
if (encoder->post_pll_disable)
@@ -6158,19 +6733,20 @@ static void intel_encoders_post_pll_disable(struct drm_crtc *crtc,
}
}
-static void intel_encoders_update_pipe(struct drm_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct drm_atomic_state *old_state)
+static void intel_encoders_update_pipe(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_connector_state *conn_state;
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct drm_connector_state *conn_state;
struct drm_connector *conn;
int i;
- for_each_new_connector_in_state(old_state, conn, conn_state, i) {
+ for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
struct intel_encoder *encoder =
to_intel_encoder(conn_state->best_encoder);
- if (conn_state->crtc != crtc)
+ if (conn_state->crtc != &crtc->base)
continue;
if (encoder->update_pipe)
@@ -6180,24 +6756,21 @@ static void intel_encoders_update_pipe(struct drm_crtc *crtc,
static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_plane *plane = to_intel_plane(crtc->base.primary);
plane->disable_plane(plane, crtc_state);
}
-static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
- struct drm_atomic_state *old_state)
+static void ilk_crtc_enable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_crtc *crtc = pipe_config->base.crtc;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
- struct intel_atomic_state *old_intel_state =
- to_intel_atomic_state(old_state);
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
- if (WARN_ON(intel_crtc->active))
+ if (WARN_ON(crtc->active))
return;
/*
@@ -6213,61 +6786,59 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
- if (pipe_config->has_pch_encoder)
- intel_prepare_shared_dpll(pipe_config);
+ if (new_crtc_state->has_pch_encoder)
+ intel_prepare_shared_dpll(new_crtc_state);
- if (intel_crtc_has_dp_encoder(pipe_config))
- intel_dp_set_m_n(pipe_config, M1_N1);
+ if (intel_crtc_has_dp_encoder(new_crtc_state))
+ intel_dp_set_m_n(new_crtc_state, M1_N1);
- intel_set_pipe_timings(pipe_config);
- intel_set_pipe_src_size(pipe_config);
+ intel_set_pipe_timings(new_crtc_state);
+ intel_set_pipe_src_size(new_crtc_state);
- if (pipe_config->has_pch_encoder) {
- intel_cpu_transcoder_set_m_n(pipe_config,
- &pipe_config->fdi_m_n, NULL);
- }
+ if (new_crtc_state->has_pch_encoder)
+ intel_cpu_transcoder_set_m_n(new_crtc_state,
+ &new_crtc_state->fdi_m_n, NULL);
- ironlake_set_pipeconf(pipe_config);
+ ilk_set_pipeconf(new_crtc_state);
- intel_crtc->active = true;
+ crtc->active = true;
- intel_encoders_pre_enable(crtc, pipe_config, old_state);
+ intel_encoders_pre_enable(state, crtc);
- if (pipe_config->has_pch_encoder) {
+ if (new_crtc_state->has_pch_encoder) {
/* Note: FDI PLL enabling _must_ be done before we enable the
* cpu pipes, hence this is separate from all the other fdi/pch
* enabling. */
- ironlake_fdi_pll_enable(pipe_config);
+ ilk_fdi_pll_enable(new_crtc_state);
} else {
assert_fdi_tx_disabled(dev_priv, pipe);
assert_fdi_rx_disabled(dev_priv, pipe);
}
- ironlake_pfit_enable(pipe_config);
+ ilk_pfit_enable(new_crtc_state);
/*
* On ILK+ LUT must be loaded before the pipe is running but with
* clocks enabled
*/
- intel_color_load_luts(pipe_config);
- intel_color_commit(pipe_config);
+ intel_color_load_luts(new_crtc_state);
+ intel_color_commit(new_crtc_state);
/* update DSPCNTR to configure gamma for pipe bottom color */
- intel_disable_primary_plane(pipe_config);
+ intel_disable_primary_plane(new_crtc_state);
- if (dev_priv->display.initial_watermarks != NULL)
- dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
- intel_enable_pipe(pipe_config);
+ if (dev_priv->display.initial_watermarks)
+ dev_priv->display.initial_watermarks(state, crtc);
+ intel_enable_pipe(new_crtc_state);
- if (pipe_config->has_pch_encoder)
- ironlake_pch_enable(old_intel_state, pipe_config);
+ if (new_crtc_state->has_pch_encoder)
+ ilk_pch_enable(state, new_crtc_state);
- assert_vblank_disabled(crtc);
- intel_crtc_vblank_on(pipe_config);
+ intel_crtc_vblank_on(new_crtc_state);
- intel_encoders_enable(crtc, pipe_config, old_state);
+ intel_encoders_enable(state, crtc);
if (HAS_PCH_CPT(dev_priv))
- cpt_verify_modeset(dev, intel_crtc->pipe);
+ cpt_verify_modeset(dev_priv, pipe);
/*
* Must wait for vblank to avoid spurious PCH FIFO underruns.
@@ -6275,7 +6846,7 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
* some interlaced HDMI modes. Let's do the double wait always
* in case there are more corner cases we don't know about.
*/
- if (pipe_config->has_pch_encoder) {
+ if (new_crtc_state->has_pch_encoder) {
intel_wait_for_vblank(dev_priv, pipe);
intel_wait_for_vblank(dev_priv, pipe);
}
@@ -6310,109 +6881,124 @@ static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
u32 val;
val = MBUS_DBOX_A_CREDIT(2);
- val |= MBUS_DBOX_BW_CREDIT(1);
- val |= MBUS_DBOX_B_CREDIT(8);
+
+ if (INTEL_GEN(dev_priv) >= 12) {
+ val |= MBUS_DBOX_BW_CREDIT(2);
+ val |= MBUS_DBOX_B_CREDIT(12);
+ } else {
+ val |= MBUS_DBOX_BW_CREDIT(1);
+ val |= MBUS_DBOX_B_CREDIT(8);
+ }
I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val);
}
-static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
- struct drm_atomic_state *old_state)
+static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
{
- struct drm_crtc *crtc = pipe_config->base.crtc;
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe, hsw_workaround_pipe;
- enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
- struct intel_atomic_state *old_intel_state =
- to_intel_atomic_state(old_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder);
+ u32 val;
+
+ val = I915_READ(reg);
+ val &= ~HSW_FRAME_START_DELAY_MASK;
+ val |= HSW_FRAME_START_DELAY(0);
+ I915_WRITE(reg, val);
+}
+
+static void hsw_crtc_enable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe, hsw_workaround_pipe;
+ enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
bool psl_clkgate_wa;
- if (WARN_ON(intel_crtc->active))
+ if (WARN_ON(crtc->active))
return;
- intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
+ intel_encoders_pre_pll_enable(state, crtc);
- if (pipe_config->shared_dpll)
- intel_enable_shared_dpll(pipe_config);
+ if (new_crtc_state->shared_dpll)
+ intel_enable_shared_dpll(new_crtc_state);
- intel_encoders_pre_enable(crtc, pipe_config, old_state);
+ intel_encoders_pre_enable(state, crtc);
- if (intel_crtc_has_dp_encoder(pipe_config))
- intel_dp_set_m_n(pipe_config, M1_N1);
+ if (intel_crtc_has_dp_encoder(new_crtc_state))
+ intel_dp_set_m_n(new_crtc_state, M1_N1);
if (!transcoder_is_dsi(cpu_transcoder))
- intel_set_pipe_timings(pipe_config);
+ intel_set_pipe_timings(new_crtc_state);
- intel_set_pipe_src_size(pipe_config);
+ if (INTEL_GEN(dev_priv) >= 11)
+ icl_enable_trans_port_sync(new_crtc_state);
+
+ intel_set_pipe_src_size(new_crtc_state);
if (cpu_transcoder != TRANSCODER_EDP &&
- !transcoder_is_dsi(cpu_transcoder)) {
+ !transcoder_is_dsi(cpu_transcoder))
I915_WRITE(PIPE_MULT(cpu_transcoder),
- pipe_config->pixel_multiplier - 1);
- }
+ new_crtc_state->pixel_multiplier - 1);
- if (pipe_config->has_pch_encoder) {
- intel_cpu_transcoder_set_m_n(pipe_config,
- &pipe_config->fdi_m_n, NULL);
- }
+ if (new_crtc_state->has_pch_encoder)
+ intel_cpu_transcoder_set_m_n(new_crtc_state,
+ &new_crtc_state->fdi_m_n, NULL);
- if (!transcoder_is_dsi(cpu_transcoder))
- haswell_set_pipeconf(pipe_config);
+ if (!transcoder_is_dsi(cpu_transcoder)) {
+ hsw_set_frame_start_delay(new_crtc_state);
+ hsw_set_pipeconf(new_crtc_state);
+ }
if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
- bdw_set_pipemisc(pipe_config);
+ bdw_set_pipemisc(new_crtc_state);
- intel_crtc->active = true;
+ crtc->active = true;
/* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
- pipe_config->pch_pfit.enabled;
+ new_crtc_state->pch_pfit.enabled;
if (psl_clkgate_wa)
glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
if (INTEL_GEN(dev_priv) >= 9)
- skylake_pfit_enable(pipe_config);
+ skl_pfit_enable(new_crtc_state);
else
- ironlake_pfit_enable(pipe_config);
+ ilk_pfit_enable(new_crtc_state);
/*
* On ILK+ LUT must be loaded before the pipe is running but with
* clocks enabled
*/
- intel_color_load_luts(pipe_config);
- intel_color_commit(pipe_config);
+ intel_color_load_luts(new_crtc_state);
+ intel_color_commit(new_crtc_state);
/* update DSPCNTR to configure gamma/csc for pipe bottom color */
if (INTEL_GEN(dev_priv) < 9)
- intel_disable_primary_plane(pipe_config);
+ intel_disable_primary_plane(new_crtc_state);
if (INTEL_GEN(dev_priv) >= 11)
- icl_set_pipe_chicken(intel_crtc);
+ icl_set_pipe_chicken(crtc);
- intel_ddi_set_pipe_settings(pipe_config);
if (!transcoder_is_dsi(cpu_transcoder))
- intel_ddi_enable_transcoder_func(pipe_config);
+ intel_ddi_enable_transcoder_func(new_crtc_state);
- if (dev_priv->display.initial_watermarks != NULL)
- dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
+ if (dev_priv->display.initial_watermarks)
+ dev_priv->display.initial_watermarks(state, crtc);
if (INTEL_GEN(dev_priv) >= 11)
- icl_pipe_mbus_enable(intel_crtc);
+ icl_pipe_mbus_enable(crtc);
/* XXX: Do the pipe assertions at the right place for BXT DSI. */
if (!transcoder_is_dsi(cpu_transcoder))
- intel_enable_pipe(pipe_config);
-
- if (pipe_config->has_pch_encoder)
- lpt_pch_enable(old_intel_state, pipe_config);
+ intel_enable_pipe(new_crtc_state);
- if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST))
- intel_ddi_set_vc_payload_alloc(pipe_config, true);
+ if (new_crtc_state->has_pch_encoder)
+ lpt_pch_enable(state, new_crtc_state);
- assert_vblank_disabled(crtc);
- intel_crtc_vblank_on(pipe_config);
+ intel_crtc_vblank_on(new_crtc_state);
- intel_encoders_enable(crtc, pipe_config, old_state);
+ intel_encoders_enable(state, crtc);
if (psl_clkgate_wa) {
intel_wait_for_vblank(dev_priv, pipe);
@@ -6421,16 +7007,16 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
/* If we change the relative order between pipe/planes enabling, we need
* to change the workaround. */
- hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
+ hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe;
if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
}
}
-static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state)
+void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
@@ -6443,14 +7029,13 @@ static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state)
}
}
-static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
- struct drm_atomic_state *old_state)
+static void ilk_crtc_disable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_crtc *crtc = old_crtc_state->base.crtc;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
/*
* Sometimes spurious CPU pipe underruns happen when the
@@ -6460,22 +7045,21 @@ static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
- intel_encoders_disable(crtc, old_crtc_state, old_state);
+ intel_encoders_disable(state, crtc);
- drm_crtc_vblank_off(crtc);
- assert_vblank_disabled(crtc);
+ intel_crtc_vblank_off(old_crtc_state);
intel_disable_pipe(old_crtc_state);
- ironlake_pfit_disable(old_crtc_state);
+ ilk_pfit_disable(old_crtc_state);
if (old_crtc_state->has_pch_encoder)
- ironlake_fdi_disable(crtc);
+ ilk_fdi_disable(crtc);
- intel_encoders_post_disable(crtc, old_crtc_state, old_state);
+ intel_encoders_post_disable(state, crtc);
if (old_crtc_state->has_pch_encoder) {
- ironlake_disable_pch_transcoder(dev_priv, pipe);
+ ilk_disable_pch_transcoder(dev_priv, pipe);
if (HAS_PCH_CPT(dev_priv)) {
i915_reg_t reg;
@@ -6495,51 +7079,27 @@ static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
I915_WRITE(PCH_DPLL_SEL, temp);
}
- ironlake_fdi_pll_disable(intel_crtc);
+ ilk_fdi_pll_disable(crtc);
}
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
}
-static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
- struct drm_atomic_state *old_state)
+static void hsw_crtc_disable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_crtc *crtc = old_crtc_state->base.crtc;
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
-
- intel_encoders_disable(crtc, old_crtc_state, old_state);
-
- drm_crtc_vblank_off(crtc);
- assert_vblank_disabled(crtc);
-
- /* XXX: Do the pipe assertions at the right place for BXT DSI. */
- if (!transcoder_is_dsi(cpu_transcoder))
- intel_disable_pipe(old_crtc_state);
-
- if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST))
- intel_ddi_set_vc_payload_alloc(old_crtc_state, false);
-
- if (!transcoder_is_dsi(cpu_transcoder))
- intel_ddi_disable_transcoder_func(old_crtc_state);
-
- intel_dsc_disable(old_crtc_state);
-
- if (INTEL_GEN(dev_priv) >= 9)
- skylake_scaler_disable(intel_crtc);
- else
- ironlake_pfit_disable(old_crtc_state);
-
- intel_encoders_post_disable(crtc, old_crtc_state, old_state);
-
- intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
+ /*
+ * FIXME collapse everything to one hook.
+ * Need care with mst->ddi interactions.
+ */
+ intel_encoders_disable(state, crtc);
+ intel_encoders_post_disable(state, crtc);
}
static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
if (!crtc_state->gmch_pfit.control)
@@ -6550,7 +7110,7 @@ static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
* according to register description and PRM.
*/
WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
- assert_pipe_disabled(dev_priv, crtc->pipe);
+ assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
I915_WRITE(PFIT_PGM_RATIOS, crtc_state->gmch_pfit.pgm_ratios);
I915_WRITE(PFIT_CONTROL, crtc_state->gmch_pfit.control);
@@ -6560,33 +7120,47 @@ static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
I915_WRITE(BCLRPAT(crtc->pipe), 0);
}
-bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port)
+bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
{
- if (port == PORT_NONE)
+ if (phy == PHY_NONE)
return false;
if (IS_ELKHARTLAKE(dev_priv))
- return port <= PORT_C;
+ return phy <= PHY_C;
if (INTEL_GEN(dev_priv) >= 11)
- return port <= PORT_B;
+ return phy <= PHY_B;
return false;
}
-bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port)
+bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
{
+ if (INTEL_GEN(dev_priv) >= 12)
+ return phy >= PHY_D && phy <= PHY_I;
+
if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv))
- return port >= PORT_C && port <= PORT_F;
+ return phy >= PHY_C && phy <= PHY_F;
return false;
}
+enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
+{
+ if (IS_ELKHARTLAKE(i915) && port == PORT_D)
+ return PHY_A;
+
+ return (enum phy)port;
+}
+
enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
{
- if (!intel_port_is_tc(dev_priv, port))
+ if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
return PORT_TC_NONE;
+ if (INTEL_GEN(dev_priv) >= 12)
+ return port - PORT_D;
+
return port - PORT_C;
}
@@ -6605,6 +7179,8 @@ enum intel_display_power_domain intel_port_to_power_domain(enum port port)
return POWER_DOMAIN_PORT_DDI_E_LANES;
case PORT_F:
return POWER_DOMAIN_PORT_DDI_F_LANES;
+ case PORT_G:
+ return POWER_DOMAIN_PORT_DDI_G_LANES;
default:
MISSING_CASE(port);
return POWER_DOMAIN_PORT_OTHER;
@@ -6614,6 +7190,28 @@ enum intel_display_power_domain intel_port_to_power_domain(enum port port)
enum intel_display_power_domain
intel_aux_power_domain(struct intel_digital_port *dig_port)
{
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
+
+ if (intel_phy_is_tc(dev_priv, phy) &&
+ dig_port->tc_mode == TC_PORT_TBT_ALT) {
+ switch (dig_port->aux_ch) {
+ case AUX_CH_C:
+ return POWER_DOMAIN_AUX_C_TBT;
+ case AUX_CH_D:
+ return POWER_DOMAIN_AUX_D_TBT;
+ case AUX_CH_E:
+ return POWER_DOMAIN_AUX_E_TBT;
+ case AUX_CH_F:
+ return POWER_DOMAIN_AUX_F_TBT;
+ case AUX_CH_G:
+ return POWER_DOMAIN_AUX_G_TBT;
+ default:
+ MISSING_CASE(dig_port->aux_ch);
+ return POWER_DOMAIN_AUX_C_TBT;
+ }
+ }
+
switch (dig_port->aux_ch) {
case AUX_CH_A:
return POWER_DOMAIN_AUX_A;
@@ -6627,24 +7225,24 @@ intel_aux_power_domain(struct intel_digital_port *dig_port)
return POWER_DOMAIN_AUX_E;
case AUX_CH_F:
return POWER_DOMAIN_AUX_F;
+ case AUX_CH_G:
+ return POWER_DOMAIN_AUX_G;
default:
MISSING_CASE(dig_port->aux_ch);
return POWER_DOMAIN_AUX_A;
}
}
-static u64 get_crtc_power_domains(struct drm_crtc *crtc,
- struct intel_crtc_state *crtc_state)
+static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct drm_encoder *encoder;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
+ enum pipe pipe = crtc->pipe;
u64 mask;
enum transcoder transcoder = crtc_state->cpu_transcoder;
- if (!crtc_state->base.active)
+ if (!crtc_state->hw.active)
return 0;
mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
@@ -6653,7 +7251,8 @@ static u64 get_crtc_power_domains(struct drm_crtc *crtc,
crtc_state->pch_pfit.force_thru)
mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
- drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) {
+ drm_for_each_encoder_mask(encoder, &dev_priv->drm,
+ crtc_state->uapi.encoder_mask) {
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
mask |= BIT_ULL(intel_encoder->power_domain);
@@ -6669,17 +7268,16 @@ static u64 get_crtc_power_domains(struct drm_crtc *crtc,
}
static u64
-modeset_get_crtc_power_domains(struct drm_crtc *crtc,
- struct intel_crtc_state *crtc_state)
+modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum intel_display_power_domain domain;
u64 domains, new_domains, old_domains;
- old_domains = intel_crtc->enabled_power_domains;
- intel_crtc->enabled_power_domains = new_domains =
- get_crtc_power_domains(crtc, crtc_state);
+ old_domains = crtc->enabled_power_domains;
+ crtc->enabled_power_domains = new_domains =
+ get_crtc_power_domains(crtc_state);
domains = new_domains & ~old_domains;
@@ -6698,151 +7296,140 @@ static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
intel_display_power_put_unchecked(dev_priv, domain);
}
-static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
- struct drm_atomic_state *old_state)
+static void valleyview_crtc_enable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct intel_atomic_state *old_intel_state =
- to_intel_atomic_state(old_state);
- struct drm_crtc *crtc = pipe_config->base.crtc;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
- if (WARN_ON(intel_crtc->active))
+ if (WARN_ON(crtc->active))
return;
- if (intel_crtc_has_dp_encoder(pipe_config))
- intel_dp_set_m_n(pipe_config, M1_N1);
+ if (intel_crtc_has_dp_encoder(new_crtc_state))
+ intel_dp_set_m_n(new_crtc_state, M1_N1);
- intel_set_pipe_timings(pipe_config);
- intel_set_pipe_src_size(pipe_config);
+ intel_set_pipe_timings(new_crtc_state);
+ intel_set_pipe_src_size(new_crtc_state);
if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
I915_WRITE(CHV_CANVAS(pipe), 0);
}
- i9xx_set_pipeconf(pipe_config);
+ i9xx_set_pipeconf(new_crtc_state);
- intel_crtc->active = true;
+ crtc->active = true;
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
- intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
+ intel_encoders_pre_pll_enable(state, crtc);
if (IS_CHERRYVIEW(dev_priv)) {
- chv_prepare_pll(intel_crtc, pipe_config);
- chv_enable_pll(intel_crtc, pipe_config);
+ chv_prepare_pll(crtc, new_crtc_state);
+ chv_enable_pll(crtc, new_crtc_state);
} else {
- vlv_prepare_pll(intel_crtc, pipe_config);
- vlv_enable_pll(intel_crtc, pipe_config);
+ vlv_prepare_pll(crtc, new_crtc_state);
+ vlv_enable_pll(crtc, new_crtc_state);
}
- intel_encoders_pre_enable(crtc, pipe_config, old_state);
+ intel_encoders_pre_enable(state, crtc);
- i9xx_pfit_enable(pipe_config);
+ i9xx_pfit_enable(new_crtc_state);
- intel_color_load_luts(pipe_config);
- intel_color_commit(pipe_config);
+ intel_color_load_luts(new_crtc_state);
+ intel_color_commit(new_crtc_state);
/* update DSPCNTR to configure gamma for pipe bottom color */
- intel_disable_primary_plane(pipe_config);
+ intel_disable_primary_plane(new_crtc_state);
- dev_priv->display.initial_watermarks(old_intel_state,
- pipe_config);
- intel_enable_pipe(pipe_config);
+ dev_priv->display.initial_watermarks(state, crtc);
+ intel_enable_pipe(new_crtc_state);
- assert_vblank_disabled(crtc);
- intel_crtc_vblank_on(pipe_config);
+ intel_crtc_vblank_on(new_crtc_state);
- intel_encoders_enable(crtc, pipe_config, old_state);
+ intel_encoders_enable(state, crtc);
}
static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
I915_WRITE(FP0(crtc->pipe), crtc_state->dpll_hw_state.fp0);
I915_WRITE(FP1(crtc->pipe), crtc_state->dpll_hw_state.fp1);
}
-static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
- struct drm_atomic_state *old_state)
+static void i9xx_crtc_enable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct intel_atomic_state *old_intel_state =
- to_intel_atomic_state(old_state);
- struct drm_crtc *crtc = pipe_config->base.crtc;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
- if (WARN_ON(intel_crtc->active))
+ if (WARN_ON(crtc->active))
return;
- i9xx_set_pll_dividers(pipe_config);
+ i9xx_set_pll_dividers(new_crtc_state);
- if (intel_crtc_has_dp_encoder(pipe_config))
- intel_dp_set_m_n(pipe_config, M1_N1);
+ if (intel_crtc_has_dp_encoder(new_crtc_state))
+ intel_dp_set_m_n(new_crtc_state, M1_N1);
- intel_set_pipe_timings(pipe_config);
- intel_set_pipe_src_size(pipe_config);
+ intel_set_pipe_timings(new_crtc_state);
+ intel_set_pipe_src_size(new_crtc_state);
- i9xx_set_pipeconf(pipe_config);
+ i9xx_set_pipeconf(new_crtc_state);
- intel_crtc->active = true;
+ crtc->active = true;
if (!IS_GEN(dev_priv, 2))
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
- intel_encoders_pre_enable(crtc, pipe_config, old_state);
+ intel_encoders_pre_enable(state, crtc);
- i9xx_enable_pll(intel_crtc, pipe_config);
+ i9xx_enable_pll(crtc, new_crtc_state);
- i9xx_pfit_enable(pipe_config);
+ i9xx_pfit_enable(new_crtc_state);
- intel_color_load_luts(pipe_config);
- intel_color_commit(pipe_config);
+ intel_color_load_luts(new_crtc_state);
+ intel_color_commit(new_crtc_state);
/* update DSPCNTR to configure gamma for pipe bottom color */
- intel_disable_primary_plane(pipe_config);
+ intel_disable_primary_plane(new_crtc_state);
- if (dev_priv->display.initial_watermarks != NULL)
- dev_priv->display.initial_watermarks(old_intel_state,
- pipe_config);
+ if (dev_priv->display.initial_watermarks)
+ dev_priv->display.initial_watermarks(state, crtc);
else
- intel_update_watermarks(intel_crtc);
- intel_enable_pipe(pipe_config);
+ intel_update_watermarks(crtc);
+ intel_enable_pipe(new_crtc_state);
- assert_vblank_disabled(crtc);
- intel_crtc_vblank_on(pipe_config);
+ intel_crtc_vblank_on(new_crtc_state);
- intel_encoders_enable(crtc, pipe_config, old_state);
+ intel_encoders_enable(state, crtc);
}
static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
if (!old_crtc_state->gmch_pfit.control)
return;
- assert_pipe_disabled(dev_priv, crtc->pipe);
+ assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder);
DRM_DEBUG_KMS("disabling pfit, current: 0x%08x\n",
I915_READ(PFIT_CONTROL));
I915_WRITE(PFIT_CONTROL, 0);
}
-static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
- struct drm_atomic_state *old_state)
+static void i9xx_crtc_disable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_crtc *crtc = old_crtc_state->base.crtc;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
+ struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
/*
* On gen2 planes are double buffered but the pipe isn't, so we must
@@ -6851,16 +7438,15 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
if (IS_GEN(dev_priv, 2))
intel_wait_for_vblank(dev_priv, pipe);
- intel_encoders_disable(crtc, old_crtc_state, old_state);
+ intel_encoders_disable(state, crtc);
- drm_crtc_vblank_off(crtc);
- assert_vblank_disabled(crtc);
+ intel_crtc_vblank_off(old_crtc_state);
intel_disable_pipe(old_crtc_state);
i9xx_pfit_disable(old_crtc_state);
- intel_encoders_post_disable(crtc, old_crtc_state, old_state);
+ intel_encoders_post_disable(state, crtc);
if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
if (IS_CHERRYVIEW(dev_priv))
@@ -6871,92 +7457,97 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
i9xx_disable_pll(old_crtc_state);
}
- intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
+ intel_encoders_post_pll_disable(state, crtc);
if (!IS_GEN(dev_priv, 2))
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
if (!dev_priv->display.initial_watermarks)
- intel_update_watermarks(intel_crtc);
+ intel_update_watermarks(crtc);
/* clock the pipe down to 640x480@60 to potentially save power */
if (IS_I830(dev_priv))
i830_enable_pipe(dev_priv, pipe);
}
-static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
+static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
struct drm_modeset_acquire_ctx *ctx)
{
struct intel_encoder *encoder;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_bw_state *bw_state =
to_intel_bw_state(dev_priv->bw_obj.state);
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
enum intel_display_power_domain domain;
struct intel_plane *plane;
- u64 domains;
struct drm_atomic_state *state;
- struct intel_crtc_state *crtc_state;
+ struct intel_crtc_state *temp_crtc_state;
+ enum pipe pipe = crtc->pipe;
+ u64 domains;
int ret;
- if (!intel_crtc->active)
+ if (!crtc_state->hw.active)
return;
- for_each_intel_plane_on_crtc(&dev_priv->drm, intel_crtc, plane) {
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
const struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
- if (plane_state->base.visible)
- intel_plane_disable_noatomic(intel_crtc, plane);
+ if (plane_state->uapi.visible)
+ intel_plane_disable_noatomic(crtc, plane);
}
- state = drm_atomic_state_alloc(crtc->dev);
+ state = drm_atomic_state_alloc(&dev_priv->drm);
if (!state) {
DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
- crtc->base.id, crtc->name);
+ crtc->base.base.id, crtc->base.name);
return;
}
state->acquire_ctx = ctx;
/* Everything's already locked, -EDEADLK can't happen. */
- crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
- ret = drm_atomic_add_affected_connectors(state, crtc);
+ temp_crtc_state = intel_atomic_get_crtc_state(state, crtc);
+ ret = drm_atomic_add_affected_connectors(state, &crtc->base);
- WARN_ON(IS_ERR(crtc_state) || ret);
+ WARN_ON(IS_ERR(temp_crtc_state) || ret);
- dev_priv->display.crtc_disable(crtc_state, state);
+ dev_priv->display.crtc_disable(to_intel_atomic_state(state), crtc);
drm_atomic_state_put(state);
DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
- crtc->base.id, crtc->name);
+ crtc->base.base.id, crtc->base.name);
- WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
- crtc->state->active = false;
- intel_crtc->active = false;
- crtc->enabled = false;
- crtc->state->connector_mask = 0;
- crtc->state->encoder_mask = 0;
+ crtc->active = false;
+ crtc->base.enabled = false;
- for_each_encoder_on_crtc(crtc->dev, crtc, encoder)
+ WARN_ON(drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0);
+ crtc_state->uapi.active = false;
+ crtc_state->uapi.connector_mask = 0;
+ crtc_state->uapi.encoder_mask = 0;
+ intel_crtc_free_hw_state(crtc_state);
+ memset(&crtc_state->hw, 0, sizeof(crtc_state->hw));
+
+ for_each_encoder_on_crtc(&dev_priv->drm, &crtc->base, encoder)
encoder->base.crtc = NULL;
- intel_fbc_disable(intel_crtc);
- intel_update_watermarks(intel_crtc);
- intel_disable_shared_dpll(to_intel_crtc_state(crtc->state));
+ intel_fbc_disable(crtc);
+ intel_update_watermarks(crtc);
+ intel_disable_shared_dpll(crtc_state);
- domains = intel_crtc->enabled_power_domains;
+ domains = crtc->enabled_power_domains;
for_each_power_domain(domain, domains)
intel_display_power_put_unchecked(dev_priv, domain);
- intel_crtc->enabled_power_domains = 0;
+ crtc->enabled_power_domains = 0;
- dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
- dev_priv->min_cdclk[intel_crtc->pipe] = 0;
- dev_priv->min_voltage_level[intel_crtc->pipe] = 0;
+ dev_priv->active_pipes &= ~BIT(pipe);
+ dev_priv->min_cdclk[pipe] = 0;
+ dev_priv->min_voltage_level[pipe] = 0;
- bw_state->data_rate[intel_crtc->pipe] = 0;
- bw_state->num_active_planes[intel_crtc->pipe] = 0;
+ bw_state->data_rate[pipe] = 0;
+ bw_state->num_active_planes[pipe] = 0;
}
/*
@@ -6988,7 +7579,7 @@ void intel_encoder_destroy(struct drm_encoder *encoder)
/* Cross check the actual hw state with our own modeset state tracking (and it's
* internal consistency). */
-static void intel_connector_verify_state(struct drm_crtc_state *crtc_state,
+static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
@@ -7006,8 +7597,8 @@ static void intel_connector_verify_state(struct drm_crtc_state *crtc_state,
if (!crtc_state)
return;
- I915_STATE_WARN(!crtc_state->active,
- "connector is active, but attached crtc isn't\n");
+ I915_STATE_WARN(!crtc_state->hw.active,
+ "connector is active, but attached crtc isn't\n");
if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
return;
@@ -7018,8 +7609,8 @@ static void intel_connector_verify_state(struct drm_crtc_state *crtc_state,
I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
"attached encoder crtc differs from connector crtc\n");
} else {
- I915_STATE_WARN(crtc_state && crtc_state->active,
- "attached crtc is active, but connector isn't\n");
+ I915_STATE_WARN(crtc_state && crtc_state->hw.active,
+ "attached crtc is active, but connector isn't\n");
I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
"best encoder set without crtc!\n");
}
@@ -7027,17 +7618,17 @@ static void intel_connector_verify_state(struct drm_crtc_state *crtc_state,
static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
{
- if (crtc_state->base.enable && crtc_state->has_pch_encoder)
+ if (crtc_state->hw.enable && crtc_state->has_pch_encoder)
return crtc_state->fdi_lanes;
return 0;
}
-static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
- struct intel_crtc_state *pipe_config)
+static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
+ struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_atomic_state *state = pipe_config->base.state;
+ struct drm_atomic_state *state = pipe_config->uapi.state;
struct intel_crtc *other_crtc;
struct intel_crtc_state *other_crtc_state;
@@ -7059,7 +7650,7 @@ static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
}
}
- if (INTEL_INFO(dev_priv)->num_pipes == 2)
+ if (INTEL_NUM_PIPES(dev_priv) == 2)
return 0;
/* Ivybridge 3 pipe is really complicated */
@@ -7106,11 +7697,11 @@ static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
}
#define RETRY 1
-static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
- struct intel_crtc_state *pipe_config)
+static int ilk_fdi_compute_config(struct intel_crtc *intel_crtc,
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = intel_crtc->base.dev;
- const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
int lane, link_bw, fdi_dotclock, ret;
bool needs_recompute = false;
@@ -7126,15 +7717,15 @@ retry:
fdi_dotclock = adjusted_mode->crtc_clock;
- lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
- pipe_config->pipe_bpp);
+ lane = ilk_get_lanes_required(fdi_dotclock, link_bw,
+ pipe_config->pipe_bpp);
pipe_config->fdi_lanes = lane;
intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
- link_bw, &pipe_config->fdi_m_n, false);
+ link_bw, &pipe_config->fdi_m_n, false, false);
- ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
+ ret = ilk_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
if (ret == -EDEADLK)
return ret;
@@ -7156,7 +7747,7 @@ retry:
bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
/* IPS only exists on ULT machines and is tied to pipe A. */
@@ -7186,9 +7777,9 @@ bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv =
- to_i915(crtc_state->base.crtc->dev);
+ to_i915(crtc_state->uapi.crtc->dev);
struct intel_atomic_state *intel_state =
- to_intel_atomic_state(crtc_state->base.state);
+ to_intel_atomic_state(crtc_state->uapi.state);
if (!hsw_crtc_state_ips_capable(crtc_state))
return false;
@@ -7227,7 +7818,7 @@ static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
{
u32 pixel_rate;
- pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;
+ pixel_rate = pipe_config->hw.adjusted_mode.crtc_clock;
/*
* We only use IF-ID interlacing. If we ever use
@@ -7260,12 +7851,12 @@ static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
if (HAS_GMCH(dev_priv))
/* FIXME calculate proper pipe pixel rate for GMCH pfit */
crtc_state->pixel_rate =
- crtc_state->base.adjusted_mode.crtc_clock;
+ crtc_state->hw.adjusted_mode.crtc_clock;
else
crtc_state->pixel_rate =
ilk_pipe_pixel_rate(crtc_state);
@@ -7275,7 +7866,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
int clock_limit = dev_priv->max_dotclk_freq;
if (INTEL_GEN(dev_priv) < 4) {
@@ -7301,7 +7892,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) &&
- pipe_config->base.ctm) {
+ pipe_config->hw.ctm) {
/*
* There is only one pipe CSC unit per pipe, and we need that
* for output conversion from RGB->YCBCR. So if CTM is already
@@ -7340,7 +7931,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
intel_crtc_compute_pixel_rate(pipe_config);
if (pipe_config->has_pch_encoder)
- return ironlake_fdi_compute_config(crtc, pipe_config);
+ return ilk_fdi_compute_config(crtc, pipe_config);
return 0;
}
@@ -7379,11 +7970,15 @@ void
intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
int pixel_clock, int link_clock,
struct intel_link_m_n *m_n,
- bool constant_n)
+ bool constant_n, bool fec_enable)
{
- m_n->tu = 64;
+ u32 data_clock = bits_per_pixel * pixel_clock;
- compute_m_n(bits_per_pixel * pixel_clock,
+ if (fec_enable)
+ data_clock = intel_dp_mode_to_fec_clock(data_clock);
+
+ m_n->tu = 64;
+ compute_m_n(data_clock,
link_clock * nlanes * 8,
&m_n->gmch_m, &m_n->gmch_n,
constant_n);
@@ -7393,6 +7988,27 @@ intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
constant_n);
}
+static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
+{
+ /*
+ * There may be no VBT; and if the BIOS enabled SSC we can
+ * just keep using it to avoid unnecessary flicker. Whereas if the
+ * BIOS isn't using it, don't assume it will work even if the VBT
+ * indicates as much.
+ */
+ if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
+ bool bios_lvds_use_ssc = I915_READ(PCH_DREF_CONTROL) &
+ DREF_SSC1_ENABLE;
+
+ if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
+ DRM_DEBUG_KMS("SSC %s by BIOS, overriding VBT which says %s\n",
+ enableddisabled(bios_lvds_use_ssc),
+ enableddisabled(dev_priv->vbt.lvds_use_ssc));
+ dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
+ }
+ }
+}
+
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
{
if (i915_modparams.panel_use_ssc >= 0)
@@ -7470,7 +8086,7 @@ static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
const struct intel_link_m_n *m_n)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
@@ -7497,7 +8113,7 @@ static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_sta
const struct intel_link_m_n *m_n,
const struct intel_link_m_n *m2_n2)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
enum transcoder transcoder = crtc_state->cpu_transcoder;
@@ -7806,11 +8422,11 @@ int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
struct intel_crtc_state *pipe_config;
- pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
+ pipe_config = intel_crtc_state_alloc(crtc);
if (!pipe_config)
return -ENOMEM;
- pipe_config->base.crtc = &crtc->base;
+ pipe_config->cpu_transcoder = (enum transcoder)pipe;
pipe_config->pixel_multiplier = 1;
pipe_config->dpll = *dpll;
@@ -7970,11 +8586,11 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc,
static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
u32 crtc_vtotal, crtc_vblank_end;
int vsyncshift = 0;
@@ -8032,7 +8648,7 @@ static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
@@ -8044,6 +8660,21 @@ static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
(crtc_state->pipe_src_h - 1));
}
+static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ if (IS_GEN(dev_priv, 2))
+ return false;
+
+ if (INTEL_GEN(dev_priv) >= 9 ||
+ IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
+ else
+ return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
+}
+
static void intel_get_pipe_timings(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
@@ -8053,39 +8684,39 @@ static void intel_get_pipe_timings(struct intel_crtc *crtc,
u32 tmp;
tmp = I915_READ(HTOTAL(cpu_transcoder));
- pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
- pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
+ pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
+ pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
if (!transcoder_is_dsi(cpu_transcoder)) {
tmp = I915_READ(HBLANK(cpu_transcoder));
- pipe_config->base.adjusted_mode.crtc_hblank_start =
+ pipe_config->hw.adjusted_mode.crtc_hblank_start =
(tmp & 0xffff) + 1;
- pipe_config->base.adjusted_mode.crtc_hblank_end =
+ pipe_config->hw.adjusted_mode.crtc_hblank_end =
((tmp >> 16) & 0xffff) + 1;
}
tmp = I915_READ(HSYNC(cpu_transcoder));
- pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
- pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
+ pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
+ pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
tmp = I915_READ(VTOTAL(cpu_transcoder));
- pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
- pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
+ pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
+ pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
if (!transcoder_is_dsi(cpu_transcoder)) {
tmp = I915_READ(VBLANK(cpu_transcoder));
- pipe_config->base.adjusted_mode.crtc_vblank_start =
+ pipe_config->hw.adjusted_mode.crtc_vblank_start =
(tmp & 0xffff) + 1;
- pipe_config->base.adjusted_mode.crtc_vblank_end =
+ pipe_config->hw.adjusted_mode.crtc_vblank_end =
((tmp >> 16) & 0xffff) + 1;
}
tmp = I915_READ(VSYNC(cpu_transcoder));
- pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
- pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
+ pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
+ pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
- if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
- pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
- pipe_config->base.adjusted_mode.crtc_vtotal += 1;
- pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
+ if (intel_pipe_is_interlaced(pipe_config)) {
+ pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
+ pipe_config->hw.adjusted_mode.crtc_vtotal += 1;
+ pipe_config->hw.adjusted_mode.crtc_vblank_end += 1;
}
}
@@ -8100,27 +8731,27 @@ static void intel_get_pipe_src_size(struct intel_crtc *crtc,
pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
- pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
- pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
+ pipe_config->hw.mode.vdisplay = pipe_config->pipe_src_h;
+ pipe_config->hw.mode.hdisplay = pipe_config->pipe_src_w;
}
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
struct intel_crtc_state *pipe_config)
{
- mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
- mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
- mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
- mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
+ mode->hdisplay = pipe_config->hw.adjusted_mode.crtc_hdisplay;
+ mode->htotal = pipe_config->hw.adjusted_mode.crtc_htotal;
+ mode->hsync_start = pipe_config->hw.adjusted_mode.crtc_hsync_start;
+ mode->hsync_end = pipe_config->hw.adjusted_mode.crtc_hsync_end;
- mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
- mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
- mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
- mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
+ mode->vdisplay = pipe_config->hw.adjusted_mode.crtc_vdisplay;
+ mode->vtotal = pipe_config->hw.adjusted_mode.crtc_vtotal;
+ mode->vsync_start = pipe_config->hw.adjusted_mode.crtc_vsync_start;
+ mode->vsync_end = pipe_config->hw.adjusted_mode.crtc_vsync_end;
- mode->flags = pipe_config->base.adjusted_mode.flags;
+ mode->flags = pipe_config->hw.adjusted_mode.flags;
mode->type = DRM_MODE_TYPE_DRIVER;
- mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
+ mode->clock = pipe_config->hw.adjusted_mode.crtc_clock;
mode->hsync = drm_mode_hsync(mode);
mode->vrefresh = drm_mode_vrefresh(mode);
@@ -8129,7 +8760,7 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 pipeconf;
@@ -8166,7 +8797,7 @@ static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
}
}
- if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
+ if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
if (INTEL_GEN(dev_priv) < 4 ||
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
@@ -8182,6 +8813,8 @@ static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
+ pipeconf |= PIPECONF_FRAME_START_DELAY(0);
+
I915_WRITE(PIPECONF(crtc->pipe), pipeconf);
POSTING_READ(PIPECONF(crtc->pipe));
}
@@ -8281,9 +8914,9 @@ static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
}
- limit = &intel_limits_pineview_lvds;
+ limit = &pnv_limits_lvds;
} else {
- limit = &intel_limits_pineview_sdvo;
+ limit = &pnv_limits_sdvo;
}
if (!crtc_state->clock_set &&
@@ -8414,7 +9047,7 @@ static void vlv_crtc_clock_get(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- int pipe = pipe_config->cpu_transcoder;
+ enum pipe pipe = crtc->pipe;
struct dpll clock;
u32 mdiv;
int refclk = 100000;
@@ -8524,7 +9157,7 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- int pipe = pipe_config->cpu_transcoder;
+ enum pipe pipe = crtc->pipe;
enum dpio_channel port = vlv_pipe_to_channel(pipe);
struct dpll clock;
u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
@@ -8553,52 +9186,29 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
}
-static void intel_get_crtc_ycbcr_config(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
+static enum intel_output_format
+bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum intel_output_format output = INTEL_OUTPUT_FORMAT_RGB;
-
- pipe_config->lspcon_downsampling = false;
+ u32 tmp;
- if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
- u32 tmp = I915_READ(PIPEMISC(crtc->pipe));
+ tmp = I915_READ(PIPEMISC(crtc->pipe));
- if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
- bool ycbcr420_enabled = tmp & PIPEMISC_YUV420_ENABLE;
- bool blend = tmp & PIPEMISC_YUV420_MODE_FULL_BLEND;
+ if (tmp & PIPEMISC_YUV420_ENABLE) {
+ /* We support 4:2:0 in full blend mode only */
+ WARN_ON((tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
- if (ycbcr420_enabled) {
- /* We support 4:2:0 in full blend mode only */
- if (!blend)
- output = INTEL_OUTPUT_FORMAT_INVALID;
- else if (!(IS_GEMINILAKE(dev_priv) ||
- INTEL_GEN(dev_priv) >= 10))
- output = INTEL_OUTPUT_FORMAT_INVALID;
- else
- output = INTEL_OUTPUT_FORMAT_YCBCR420;
- } else {
- /*
- * Currently there is no interface defined to
- * check user preference between RGB/YCBCR444
- * or YCBCR420. So the only possible case for
- * YCBCR444 usage is driving YCBCR420 output
- * with LSPCON, when pipe is configured for
- * YCBCR444 output and LSPCON takes care of
- * downsampling it.
- */
- pipe_config->lspcon_downsampling = true;
- output = INTEL_OUTPUT_FORMAT_YCBCR444;
- }
- }
+ return INTEL_OUTPUT_FORMAT_YCBCR420;
+ } else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
+ return INTEL_OUTPUT_FORMAT_YCBCR444;
+ } else {
+ return INTEL_OUTPUT_FORMAT_RGB;
}
-
- pipe_config->output_format = output;
}
static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_plane *plane = to_intel_plane(crtc->base.primary);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
@@ -8631,6 +9241,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = NULL;
+ pipe_config->master_transcoder = INVALID_TRANSCODER;
ret = false;
@@ -8721,7 +9332,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
* but in case the pipe is enabled w/o any ports we need a sane
* default.
*/
- pipe_config->base.adjusted_mode.crtc_clock =
+ pipe_config->hw.adjusted_mode.crtc_clock =
pipe_config->port_clock / pipe_config->pixel_multiplier;
ret = true;
@@ -8732,7 +9343,7 @@ out:
return ret;
}
-static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv)
+static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
{
struct intel_encoder *encoder;
int i;
@@ -9169,7 +9780,6 @@ static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
{
struct intel_encoder *encoder;
- bool pch_ssc_in_use = false;
bool has_fdi = false;
for_each_intel_encoder(&dev_priv->drm, encoder) {
@@ -9197,22 +9807,24 @@ static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
* clock hierarchy. That would also allow us to do
* clock bending finally.
*/
+ dev_priv->pch_ssc_use = 0;
+
if (spll_uses_pch_ssc(dev_priv)) {
DRM_DEBUG_KMS("SPLL using PCH SSC\n");
- pch_ssc_in_use = true;
+ dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL);
}
if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
DRM_DEBUG_KMS("WRPLL1 using PCH SSC\n");
- pch_ssc_in_use = true;
+ dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
}
if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
DRM_DEBUG_KMS("WRPLL2 using PCH SSC\n");
- pch_ssc_in_use = true;
+ dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
}
- if (pch_ssc_in_use)
+ if (dev_priv->pch_ssc_use)
return;
if (has_fdi) {
@@ -9229,14 +9841,14 @@ static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
{
if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
- ironlake_init_pch_refclk(dev_priv);
+ ilk_init_pch_refclk(dev_priv);
else if (HAS_PCH_LPT(dev_priv))
lpt_init_pch_refclk(dev_priv);
}
-static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
+static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
u32 val;
@@ -9264,23 +9876,35 @@ static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
if (crtc_state->dither)
val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
- if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
+ if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
val |= PIPECONF_INTERLACED_ILK;
else
val |= PIPECONF_PROGRESSIVE;
+ /*
+ * This would end up with an odd purple hue over
+ * the entire display. Make sure we don't do it.
+ */
+ WARN_ON(crtc_state->limited_color_range &&
+ crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
+
if (crtc_state->limited_color_range)
val |= PIPECONF_COLOR_RANGE_SELECT;
+ if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
+ val |= PIPECONF_OUTPUT_COLORSPACE_YUV709;
+
val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
+ val |= PIPECONF_FRAME_START_DELAY(0);
+
I915_WRITE(PIPECONF(pipe), val);
POSTING_READ(PIPECONF(pipe));
}
-static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state)
+static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 val = 0;
@@ -9288,18 +9912,22 @@ static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state)
if (IS_HASWELL(dev_priv) && crtc_state->dither)
val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
- if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
+ if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
val |= PIPECONF_INTERLACED_ILK;
else
val |= PIPECONF_PROGRESSIVE;
+ if (IS_HASWELL(dev_priv) &&
+ crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
+ val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
+
I915_WRITE(PIPECONF(cpu_transcoder), val);
POSTING_READ(PIPECONF(cpu_transcoder));
}
static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 val = 0;
@@ -9362,7 +9990,7 @@ int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
}
}
-int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
+int ilk_get_lanes_required(int target_clock, int link_bw, int bpp)
{
/*
* Account for spread spectrum to avoid
@@ -9373,14 +10001,14 @@ int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
return DIV_ROUND_UP(bps, link_bw * 8);
}
-static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
+static bool ilk_needs_fb_cb_tune(struct dpll *dpll, int factor)
{
return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
}
-static void ironlake_compute_dpll(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct dpll *reduced_clock)
+static void ilk_compute_dpll(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
+ struct dpll *reduced_clock)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 dpll, fp, fp2;
@@ -9400,7 +10028,7 @@ static void ironlake_compute_dpll(struct intel_crtc *crtc,
fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
- if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
+ if (ilk_needs_fb_cb_tune(&crtc_state->dpll, factor))
fp |= FP_CB_TUNE;
if (reduced_clock) {
@@ -9443,7 +10071,7 @@ static void ironlake_compute_dpll(struct intel_crtc *crtc,
* clear if it''s a win or loss power wise. No point in doing
* this on ILK at all since it has a fixed DPLL<->pipe mapping.
*/
- if (INTEL_INFO(dev_priv)->num_pipes == 3 &&
+ if (INTEL_NUM_PIPES(dev_priv) == 3 &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
dpll |= DPLL_SDVO_HIGH_SPEED;
@@ -9480,10 +10108,12 @@ static void ironlake_compute_dpll(struct intel_crtc *crtc,
crtc_state->dpll_hw_state.fp1 = fp2;
}
-static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
+static int ilk_crtc_compute_clock(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(crtc_state->uapi.state);
const struct intel_limit *limit;
int refclk = 120000;
@@ -9503,17 +10133,17 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
if (intel_is_dual_link_lvds(dev_priv)) {
if (refclk == 100000)
- limit = &intel_limits_ironlake_dual_lvds_100m;
+ limit = &ilk_limits_dual_lvds_100m;
else
- limit = &intel_limits_ironlake_dual_lvds;
+ limit = &ilk_limits_dual_lvds;
} else {
if (refclk == 100000)
- limit = &intel_limits_ironlake_single_lvds_100m;
+ limit = &ilk_limits_single_lvds_100m;
else
- limit = &intel_limits_ironlake_single_lvds;
+ limit = &ilk_limits_single_lvds;
}
} else {
- limit = &intel_limits_ironlake_dac;
+ limit = &ilk_limits_dac;
}
if (!crtc_state->clock_set &&
@@ -9523,9 +10153,9 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
return -EINVAL;
}
- ironlake_compute_dpll(crtc, crtc_state, NULL);
+ ilk_compute_dpll(crtc, crtc_state, NULL);
- if (!intel_get_shared_dpll(crtc_state, NULL)) {
+ if (!intel_reserve_shared_dplls(state, crtc, NULL)) {
DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
pipe_name(crtc->pipe));
return -EINVAL;
@@ -9598,15 +10228,15 @@ void intel_dp_get_m_n(struct intel_crtc *crtc,
&pipe_config->dp_m2_n2);
}
-static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
+static void ilk_get_fdi_m_n_config(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
{
intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
&pipe_config->fdi_m_n, NULL);
}
-static void skylake_get_pfit_config(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
+static void skl_get_pfit_config(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -9637,8 +10267,8 @@ static void skylake_get_pfit_config(struct intel_crtc *crtc,
}
static void
-skylake_get_initial_plane_config(struct intel_crtc *crtc,
- struct intel_initial_plane_config *plane_config)
+skl_get_initial_plane_config(struct intel_crtc *crtc,
+ struct intel_initial_plane_config *plane_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -9696,7 +10326,11 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
case PLANE_CTL_TILED_Y:
plane_config->tiling = I915_TILING_Y;
if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
- fb->modifier = I915_FORMAT_MOD_Y_TILED_CCS;
+ fb->modifier = INTEL_GEN(dev_priv) >= 12 ?
+ I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS :
+ I915_FORMAT_MOD_Y_TILED_CCS;
+ else if (val & PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE)
+ fb->modifier = I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS;
else
fb->modifier = I915_FORMAT_MOD_Y_TILED;
break;
@@ -9740,8 +10374,8 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
offset = I915_READ(PLANE_OFFSET(pipe, plane_id));
val = I915_READ(PLANE_SIZE(pipe, plane_id));
- fb->height = ((val >> 16) & 0xfff) + 1;
- fb->width = ((val >> 0) & 0x1fff) + 1;
+ fb->height = ((val >> 16) & 0xffff) + 1;
+ fb->width = ((val >> 0) & 0xffff) + 1;
val = I915_READ(PLANE_STRIDE(pipe, plane_id));
stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0);
@@ -9763,8 +10397,8 @@ error:
kfree(intel_fb);
}
-static void ironlake_get_pfit_config(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
+static void ilk_get_pfit_config(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -9787,8 +10421,8 @@ static void ironlake_get_pfit_config(struct intel_crtc *crtc,
}
}
-static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
+static bool ilk_get_pipe_config(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -9802,9 +10436,9 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
if (!wakeref)
return false;
- pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = NULL;
+ pipe_config->master_transcoder = INVALID_TRANSCODER;
ret = false;
tmp = I915_READ(PIPECONF(crtc->pipe));
@@ -9831,6 +10465,16 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
if (tmp & PIPECONF_COLOR_RANGE_SELECT)
pipe_config->limited_color_range = true;
+ switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) {
+ case PIPECONF_OUTPUT_COLORSPACE_YUV601:
+ case PIPECONF_OUTPUT_COLORSPACE_YUV709:
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
+ break;
+ default:
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
+ break;
+ }
+
pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
PIPECONF_GAMMA_MODE_SHIFT;
@@ -9849,7 +10493,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
FDI_DP_PORT_WIDTH_SHIFT) + 1;
- ironlake_get_fdi_m_n_config(crtc, pipe_config);
+ ilk_get_fdi_m_n_config(crtc, pipe_config);
if (HAS_PCH_IBX(dev_priv)) {
/*
@@ -9877,7 +10521,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
>> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
- ironlake_pch_clock_get(crtc, pipe_config);
+ ilk_pch_clock_get(crtc, pipe_config);
} else {
pipe_config->pixel_multiplier = 1;
}
@@ -9885,7 +10529,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
intel_get_pipe_timings(crtc, pipe_config);
intel_get_pipe_src_size(crtc, pipe_config);
- ironlake_get_pfit_config(crtc, pipe_config);
+ ilk_get_pfit_config(crtc, pipe_config);
ret = true;
@@ -9894,19 +10538,20 @@ out:
return ret;
}
-static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
+
+static int hsw_crtc_compute_clock(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_atomic_state *state =
- to_intel_atomic_state(crtc_state->base.state);
+ to_intel_atomic_state(crtc_state->uapi.state);
if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
INTEL_GEN(dev_priv) >= 11) {
struct intel_encoder *encoder =
intel_get_crtc_new_encoder(state, crtc_state);
- if (!intel_get_shared_dpll(crtc_state, encoder)) {
+ if (!intel_reserve_shared_dplls(state, crtc, encoder)) {
DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
pipe_name(crtc->pipe));
return -EINVAL;
@@ -9916,9 +10561,8 @@ static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
return 0;
}
-static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv,
- enum port port,
- struct intel_crtc_state *pipe_config)
+static void cnl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
+ struct intel_crtc_state *pipe_config)
{
enum intel_dpll_id id;
u32 temp;
@@ -9932,26 +10576,40 @@ static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv,
pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
}
-static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv,
- enum port port,
- struct intel_crtc_state *pipe_config)
+static void icl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
+ struct intel_crtc_state *pipe_config)
{
+ enum phy phy = intel_port_to_phy(dev_priv, port);
+ enum icl_port_dpll_id port_dpll_id;
enum intel_dpll_id id;
u32 temp;
- /* TODO: TBT pll not implemented. */
- if (intel_port_is_combophy(dev_priv, port)) {
- temp = I915_READ(DPCLKA_CFGCR0_ICL) &
- DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
- id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
- } else if (intel_port_is_tc(dev_priv, port)) {
- id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv, port));
+ if (intel_phy_is_combo(dev_priv, phy)) {
+ temp = I915_READ(ICL_DPCLKA_CFGCR0) &
+ ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
+ id = temp >> ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy);
+ port_dpll_id = ICL_PORT_DPLL_DEFAULT;
+ } else if (intel_phy_is_tc(dev_priv, phy)) {
+ u32 clk_sel = I915_READ(DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
+
+ if (clk_sel == DDI_CLK_SEL_MG) {
+ id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
+ port));
+ port_dpll_id = ICL_PORT_DPLL_MG_PHY;
+ } else {
+ WARN_ON(clk_sel < DDI_CLK_SEL_TBT_162);
+ id = DPLL_ID_ICL_TBTPLL;
+ port_dpll_id = ICL_PORT_DPLL_DEFAULT;
+ }
} else {
WARN(1, "Invalid port %x\n", port);
return;
}
- pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
+ pipe_config->icl_port_dplls[port_dpll_id].pll =
+ intel_get_shared_dpll_by_id(dev_priv, id);
+
+ icl_set_active_port_dpll(pipe_config, port_dpll_id);
}
static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
@@ -9978,9 +10636,8 @@ static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
}
-static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
- enum port port,
- struct intel_crtc_state *pipe_config)
+static void skl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
+ struct intel_crtc_state *pipe_config)
{
enum intel_dpll_id id;
u32 temp;
@@ -9994,9 +10651,8 @@ static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
}
-static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
- enum port port,
- struct intel_crtc_state *pipe_config)
+static void hsw_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
+ struct intel_crtc_state *pipe_config)
{
enum intel_dpll_id id;
u32 ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
@@ -10097,6 +10753,9 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
case TRANS_DDI_EDP_INPUT_C_ONOFF:
trans_pipe = PIPE_C;
break;
+ case TRANS_DDI_EDP_INPUT_D_ONOFF:
+ trans_pipe = PIPE_D;
+ break;
}
if (trans_pipe == crtc->pipe) {
@@ -10181,28 +10840,36 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
return transcoder_is_dsi(pipe_config->cpu_transcoder);
}
-static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
+static void hsw_get_ddi_port_state(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
struct intel_shared_dpll *pll;
enum port port;
u32 tmp;
- tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
-
- port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
+ if (transcoder_is_dsi(cpu_transcoder)) {
+ port = (cpu_transcoder == TRANSCODER_DSI_A) ?
+ PORT_A : PORT_B;
+ } else {
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+ if (INTEL_GEN(dev_priv) >= 12)
+ port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
+ else
+ port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
+ }
if (INTEL_GEN(dev_priv) >= 11)
- icelake_get_ddi_pll(dev_priv, port, pipe_config);
+ icl_get_ddi_pll(dev_priv, port, pipe_config);
else if (IS_CANNONLAKE(dev_priv))
- cannonlake_get_ddi_pll(dev_priv, port, pipe_config);
+ cnl_get_ddi_pll(dev_priv, port, pipe_config);
else if (IS_GEN9_BC(dev_priv))
- skylake_get_ddi_pll(dev_priv, port, pipe_config);
+ skl_get_ddi_pll(dev_priv, port, pipe_config);
else if (IS_GEN9_LP(dev_priv))
bxt_get_ddi_pll(dev_priv, port, pipe_config);
else
- haswell_get_ddi_pll(dev_priv, port, pipe_config);
+ hsw_get_ddi_pll(dev_priv, port, pipe_config);
pll = pipe_config->shared_dpll;
if (pll) {
@@ -10223,12 +10890,65 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
FDI_DP_PORT_WIDTH_SHIFT) + 1;
- ironlake_get_fdi_m_n_config(crtc, pipe_config);
+ ilk_get_fdi_m_n_config(crtc, pipe_config);
}
}
-static bool haswell_get_pipe_config(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
+static enum transcoder transcoder_master_readout(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder)
+{
+ u32 trans_port_sync, master_select;
+
+ trans_port_sync = I915_READ(TRANS_DDI_FUNC_CTL2(cpu_transcoder));
+
+ if ((trans_port_sync & PORT_SYNC_MODE_ENABLE) == 0)
+ return INVALID_TRANSCODER;
+
+ master_select = trans_port_sync &
+ PORT_SYNC_MODE_MASTER_SELECT_MASK;
+ if (master_select == 0)
+ return TRANSCODER_EDP;
+ else
+ return master_select - 1;
+}
+
+static void icl_get_trans_port_sync_config(struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ u32 transcoders;
+ enum transcoder cpu_transcoder;
+
+ crtc_state->master_transcoder = transcoder_master_readout(dev_priv,
+ crtc_state->cpu_transcoder);
+
+ transcoders = BIT(TRANSCODER_A) |
+ BIT(TRANSCODER_B) |
+ BIT(TRANSCODER_C) |
+ BIT(TRANSCODER_D);
+ for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
+ enum intel_display_power_domain power_domain;
+ intel_wakeref_t trans_wakeref;
+
+ power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
+ trans_wakeref = intel_display_power_get_if_enabled(dev_priv,
+ power_domain);
+
+ if (!trans_wakeref)
+ continue;
+
+ if (transcoder_master_readout(dev_priv, cpu_transcoder) ==
+ crtc_state->cpu_transcoder)
+ crtc_state->sync_mode_slaves_mask |= BIT(cpu_transcoder);
+
+ intel_display_power_put(dev_priv, power_domain, trans_wakeref);
+ }
+
+ WARN_ON(crtc_state->master_transcoder != INVALID_TRANSCODER &&
+ crtc_state->sync_mode_slaves_mask);
+}
+
+static bool hsw_get_pipe_config(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
intel_wakeref_t wakerefs[POWER_DOMAIN_NUM], wf;
@@ -10236,7 +10956,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
u64 power_domain_mask;
bool active;
- intel_crtc_init_scalers(crtc, pipe_config);
+ pipe_config->master_transcoder = INVALID_TRANSCODER;
power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
@@ -10263,12 +10983,35 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
INTEL_GEN(dev_priv) >= 11) {
- haswell_get_ddi_port_state(crtc, pipe_config);
+ hsw_get_ddi_port_state(crtc, pipe_config);
intel_get_pipe_timings(crtc, pipe_config);
}
intel_get_pipe_src_size(crtc, pipe_config);
- intel_get_crtc_ycbcr_config(crtc, pipe_config);
+
+ if (IS_HASWELL(dev_priv)) {
+ u32 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
+
+ if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
+ else
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
+ } else {
+ pipe_config->output_format =
+ bdw_get_pipemisc_output_format(crtc);
+
+ /*
+ * Currently there is no interface defined to
+ * check user preference between RGB/YCBCR444
+ * or YCBCR420. So the only possible case for
+ * YCBCR444 usage is driving YCBCR420 output
+ * with LSPCON, when pipe is configured for
+ * YCBCR444 output and LSPCON takes care of
+ * downsampling it.
+ */
+ pipe_config->lspcon_downsampling =
+ pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444;
+ }
pipe_config->gamma_mode = I915_READ(GAMMA_MODE(crtc->pipe));
@@ -10297,9 +11040,9 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
power_domain_mask |= BIT_ULL(power_domain);
if (INTEL_GEN(dev_priv) >= 9)
- skylake_get_pfit_config(crtc, pipe_config);
+ skl_get_pfit_config(crtc, pipe_config);
else
- ironlake_get_pfit_config(crtc, pipe_config);
+ ilk_get_pfit_config(crtc, pipe_config);
}
if (hsw_crtc_supports_ips(crtc)) {
@@ -10323,6 +11066,10 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
pipe_config->pixel_multiplier = 1;
}
+ if (INTEL_GEN(dev_priv) >= 11 &&
+ !transcoder_is_dsi(pipe_config->cpu_transcoder))
+ icl_get_trans_port_sync_config(pipe_config);
+
out:
for_each_power_domain(power_domain, power_domain_mask)
intel_display_power_put(dev_priv,
@@ -10334,8 +11081,8 @@ out:
static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv =
- to_i915(plane_state->base.plane->dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
u32 base;
@@ -10344,21 +11091,13 @@ static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
else
base = intel_plane_ggtt_offset(plane_state);
- base += plane_state->color_plane[0].offset;
-
- /* ILK+ do this automagically */
- if (HAS_GMCH(dev_priv) &&
- plane_state->base.rotation & DRM_MODE_ROTATE_180)
- base += (plane_state->base.crtc_h *
- plane_state->base.crtc_w - 1) * fb->format->cpp[0];
-
- return base;
+ return base + plane_state->color_plane[0].offset;
}
static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
{
- int x = plane_state->base.crtc_x;
- int y = plane_state->base.crtc_y;
+ int x = plane_state->uapi.dst.x1;
+ int y = plane_state->uapi.dst.y1;
u32 pos = 0;
if (x < 0) {
@@ -10379,9 +11118,9 @@ static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
{
const struct drm_mode_config *config =
- &plane_state->base.plane->dev->mode_config;
- int width = plane_state->base.crtc_w;
- int height = plane_state->base.crtc_h;
+ &plane_state->uapi.plane->dev->mode_config;
+ int width = drm_rect_width(&plane_state->uapi.dst);
+ int height = drm_rect_height(&plane_state->uapi.dst);
return width > 0 && width <= config->cursor_width &&
height > 0 && height <= config->cursor_height;
@@ -10389,6 +11128,9 @@ static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->uapi.plane->dev);
+ unsigned int rotation = plane_state->hw.rotation;
int src_x, src_y;
u32 offset;
int ret;
@@ -10397,11 +11139,11 @@ static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
if (ret)
return ret;
- if (!plane_state->base.visible)
+ if (!plane_state->uapi.visible)
return 0;
- src_x = plane_state->base.src_x >> 16;
- src_y = plane_state->base.src_y >> 16;
+ src_x = plane_state->uapi.src.x1 >> 16;
+ src_y = plane_state->uapi.src.y1 >> 16;
intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
@@ -10412,7 +11154,25 @@ static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
return -EINVAL;
}
+ /*
+ * Put the final coordinates back so that the src
+ * coordinate checks will see the right values.
+ */
+ drm_rect_translate_to(&plane_state->uapi.src,
+ src_x << 16, src_y << 16);
+
+ /* ILK+ do this automagically in hardware */
+ if (HAS_GMCH(dev_priv) && rotation & DRM_MODE_ROTATE_180) {
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
+
+ offset += (src_h * src_w - 1) * fb->format->cpp[0];
+ }
+
plane_state->color_plane[0].offset = offset;
+ plane_state->color_plane[0].x = src_x;
+ plane_state->color_plane[0].y = src_y;
return 0;
}
@@ -10420,7 +11180,7 @@ static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
static int intel_check_cursor(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
int ret;
if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
@@ -10428,19 +11188,23 @@ static int intel_check_cursor(struct intel_crtc_state *crtc_state,
return -EINVAL;
}
- ret = drm_atomic_helper_check_plane_state(&plane_state->base,
- &crtc_state->base,
+ ret = drm_atomic_helper_check_plane_state(&plane_state->uapi,
+ &crtc_state->uapi,
DRM_PLANE_HELPER_NO_SCALING,
DRM_PLANE_HELPER_NO_SCALING,
true, true);
if (ret)
return ret;
+ /* Use the unclipped src/dst rectangles, which we program to hw */
+ plane_state->uapi.src = drm_plane_state_src(&plane_state->uapi);
+ plane_state->uapi.dst = drm_plane_state_dest(&plane_state->uapi);
+
ret = intel_cursor_check_surface(plane_state);
if (ret)
return ret;
- if (!plane_state->base.visible)
+ if (!plane_state->uapi.visible)
return 0;
ret = intel_plane_check_src_coordinates(plane_state);
@@ -10478,7 +11242,7 @@ static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
{
- int width = plane_state->base.crtc_w;
+ int width = drm_rect_width(&plane_state->uapi.dst);
/*
* 845g/865g are only limited by the width of their cursors,
@@ -10490,7 +11254,7 @@ static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
static int i845_check_cursor(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
int ret;
ret = intel_check_cursor(crtc_state, plane_state);
@@ -10504,12 +11268,12 @@ static int i845_check_cursor(struct intel_crtc_state *crtc_state,
/* Check for which cursor types we support */
if (!i845_cursor_size_ok(plane_state)) {
DRM_DEBUG("Cursor dimension %dx%d not supported\n",
- plane_state->base.crtc_w,
- plane_state->base.crtc_h);
+ drm_rect_width(&plane_state->uapi.dst),
+ drm_rect_height(&plane_state->uapi.dst));
return -EINVAL;
}
- WARN_ON(plane_state->base.visible &&
+ WARN_ON(plane_state->uapi.visible &&
plane_state->color_plane[0].stride != fb->pitches[0]);
switch (fb->pitches[0]) {
@@ -10537,9 +11301,9 @@ static void i845_update_cursor(struct intel_plane *plane,
u32 cntl = 0, base = 0, pos = 0, size = 0;
unsigned long irqflags;
- if (plane_state && plane_state->base.visible) {
- unsigned int width = plane_state->base.crtc_w;
- unsigned int height = plane_state->base.crtc_h;
+ if (plane_state && plane_state->uapi.visible) {
+ unsigned int width = drm_rect_width(&plane_state->uapi.dst);
+ unsigned int height = drm_rect_height(&plane_state->uapi.dst);
cntl = plane_state->ctl |
i845_cursor_ctl_crtc(crtc_state);
@@ -10612,7 +11376,7 @@ i9xx_cursor_max_stride(struct intel_plane *plane,
static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 cntl = 0;
@@ -10635,13 +11399,13 @@ static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv =
- to_i915(plane_state->base.plane->dev);
+ to_i915(plane_state->uapi.plane->dev);
u32 cntl = 0;
if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
- switch (plane_state->base.crtc_w) {
+ switch (drm_rect_width(&plane_state->uapi.dst)) {
case 64:
cntl |= MCURSOR_MODE_64_ARGB_AX;
break;
@@ -10652,11 +11416,11 @@ static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
cntl |= MCURSOR_MODE_256_ARGB_AX;
break;
default:
- MISSING_CASE(plane_state->base.crtc_w);
+ MISSING_CASE(drm_rect_width(&plane_state->uapi.dst));
return 0;
}
- if (plane_state->base.rotation & DRM_MODE_ROTATE_180)
+ if (plane_state->hw.rotation & DRM_MODE_ROTATE_180)
cntl |= MCURSOR_ROTATE_180;
return cntl;
@@ -10665,9 +11429,9 @@ static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv =
- to_i915(plane_state->base.plane->dev);
- int width = plane_state->base.crtc_w;
- int height = plane_state->base.crtc_h;
+ to_i915(plane_state->uapi.plane->dev);
+ int width = drm_rect_width(&plane_state->uapi.dst);
+ int height = drm_rect_height(&plane_state->uapi.dst);
if (!intel_cursor_size_ok(plane_state))
return false;
@@ -10689,7 +11453,7 @@ static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
* cursors.
*/
if (HAS_CUR_FBC(dev_priv) &&
- plane_state->base.rotation & DRM_MODE_ROTATE_0) {
+ plane_state->hw.rotation & DRM_MODE_ROTATE_0) {
if (height < 8 || height > width)
return false;
} else {
@@ -10703,9 +11467,9 @@ static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
enum pipe pipe = plane->pipe;
int ret;
@@ -10720,17 +11484,19 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
/* Check for which cursor types we support */
if (!i9xx_cursor_size_ok(plane_state)) {
DRM_DEBUG("Cursor dimension %dx%d not supported\n",
- plane_state->base.crtc_w,
- plane_state->base.crtc_h);
+ drm_rect_width(&plane_state->uapi.dst),
+ drm_rect_height(&plane_state->uapi.dst));
return -EINVAL;
}
- WARN_ON(plane_state->base.visible &&
+ WARN_ON(plane_state->uapi.visible &&
plane_state->color_plane[0].stride != fb->pitches[0]);
- if (fb->pitches[0] != plane_state->base.crtc_w * fb->format->cpp[0]) {
+ if (fb->pitches[0] !=
+ drm_rect_width(&plane_state->uapi.dst) * fb->format->cpp[0]) {
DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n",
- fb->pitches[0], plane_state->base.crtc_w);
+ fb->pitches[0],
+ drm_rect_width(&plane_state->uapi.dst));
return -EINVAL;
}
@@ -10745,7 +11511,7 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
* Refuse the put the cursor into that compromised position.
*/
if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
- plane_state->base.visible && plane_state->base.crtc_x < 0) {
+ plane_state->uapi.visible && plane_state->uapi.dst.x1 < 0) {
DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
return -EINVAL;
}
@@ -10764,12 +11530,15 @@ static void i9xx_update_cursor(struct intel_plane *plane,
u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
unsigned long irqflags;
- if (plane_state && plane_state->base.visible) {
+ if (plane_state && plane_state->uapi.visible) {
+ unsigned width = drm_rect_width(&plane_state->uapi.dst);
+ unsigned height = drm_rect_height(&plane_state->uapi.dst);
+
cntl = plane_state->ctl |
i9xx_cursor_ctl_crtc(crtc_state);
- if (plane_state->base.crtc_h != plane_state->base.crtc_w)
- fbc_ctl = CUR_FBC_CTL_EN | (plane_state->base.crtc_h - 1);
+ if (width != height)
+ fbc_ctl = CUR_FBC_CTL_EN | (height - 1);
base = intel_cursor_base(plane_state);
pos = intel_cursor_position(plane_state);
@@ -10914,13 +11683,12 @@ static int intel_modeset_disable_planes(struct drm_atomic_state *state,
}
int intel_get_load_detect_pipe(struct drm_connector *connector,
- const struct drm_display_mode *mode,
struct intel_load_detect_pipe *old,
struct drm_modeset_acquire_ctx *ctx)
{
struct intel_crtc *intel_crtc;
struct intel_encoder *intel_encoder =
- intel_attached_encoder(connector);
+ intel_attached_encoder(to_intel_connector(connector));
struct drm_crtc *possible_crtc;
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_crtc *crtc = NULL;
@@ -11019,12 +11787,10 @@ found:
goto fail;
}
- crtc_state->base.active = crtc_state->base.enable = true;
-
- if (!mode)
- mode = &load_detect_mode;
+ crtc_state->uapi.active = true;
- ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
+ ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi,
+ &load_detect_mode);
if (ret)
goto fail;
@@ -11076,7 +11842,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx)
{
struct intel_encoder *intel_encoder =
- intel_attached_encoder(connector);
+ intel_attached_encoder(to_intel_connector(connector));
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_atomic_state *state = old->restore_state;
int ret;
@@ -11116,7 +11882,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- int pipe = pipe_config->cpu_transcoder;
+ enum pipe pipe = crtc->pipe;
u32 dpll = pipe_config->dpll_hw_state.dpll;
u32 fp;
struct dpll clock;
@@ -11219,8 +11985,8 @@ int intel_dotclock_calculate(int link_freq,
return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
}
-static void ironlake_pch_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
+static void ilk_pch_clock_get(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -11232,11 +11998,38 @@ static void ironlake_pch_clock_get(struct intel_crtc *crtc,
* we may need some idea for the dotclock anyway.
* Calculate one based on the FDI configuration.
*/
- pipe_config->base.adjusted_mode.crtc_clock =
+ pipe_config->hw.adjusted_mode.crtc_clock =
intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
&pipe_config->fdi_m_n);
}
+static void intel_crtc_state_reset(struct intel_crtc_state *crtc_state,
+ struct intel_crtc *crtc)
+{
+ memset(crtc_state, 0, sizeof(*crtc_state));
+
+ __drm_atomic_helper_crtc_state_reset(&crtc_state->uapi, &crtc->base);
+
+ crtc_state->cpu_transcoder = INVALID_TRANSCODER;
+ crtc_state->master_transcoder = INVALID_TRANSCODER;
+ crtc_state->hsw_workaround_pipe = INVALID_PIPE;
+ crtc_state->output_format = INTEL_OUTPUT_FORMAT_INVALID;
+ crtc_state->scaler_state.scaler_id = -1;
+ crtc_state->mst_master_transcoder = INVALID_TRANSCODER;
+}
+
+static struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *crtc_state;
+
+ crtc_state = kmalloc(sizeof(*crtc_state), GFP_KERNEL);
+
+ if (crtc_state)
+ intel_crtc_state_reset(crtc_state, crtc);
+
+ return crtc_state;
+}
+
/* Returns the currently programmed mode of the given encoder. */
struct drm_display_mode *
intel_encoder_current_mode(struct intel_encoder *encoder)
@@ -11256,14 +12049,12 @@ intel_encoder_current_mode(struct intel_encoder *encoder)
if (!mode)
return NULL;
- crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
+ crtc_state = intel_crtc_state_alloc(crtc);
if (!crtc_state) {
kfree(mode);
return NULL;
}
- crtc_state->base.crtc = &crtc->base;
-
if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) {
kfree(crtc_state);
kfree(mode);
@@ -11297,22 +12088,22 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
*
* Returns true or false.
*/
-static bool intel_wm_need_update(struct intel_plane_state *cur,
+static bool intel_wm_need_update(const struct intel_plane_state *cur,
struct intel_plane_state *new)
{
/* Update watermarks on tiling or size changes. */
- if (new->base.visible != cur->base.visible)
+ if (new->uapi.visible != cur->uapi.visible)
return true;
- if (!cur->base.fb || !new->base.fb)
+ if (!cur->hw.fb || !new->hw.fb)
return false;
- if (cur->base.fb->modifier != new->base.fb->modifier ||
- cur->base.rotation != new->base.rotation ||
- drm_rect_width(&new->base.src) != drm_rect_width(&cur->base.src) ||
- drm_rect_height(&new->base.src) != drm_rect_height(&cur->base.src) ||
- drm_rect_width(&new->base.dst) != drm_rect_width(&cur->base.dst) ||
- drm_rect_height(&new->base.dst) != drm_rect_height(&cur->base.dst))
+ if (cur->hw.fb->modifier != new->hw.fb->modifier ||
+ cur->hw.rotation != new->hw.rotation ||
+ drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) ||
+ drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) ||
+ drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) ||
+ drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst))
return true;
return false;
@@ -11320,42 +12111,36 @@ static bool intel_wm_need_update(struct intel_plane_state *cur,
static bool needs_scaling(const struct intel_plane_state *state)
{
- int src_w = drm_rect_width(&state->base.src) >> 16;
- int src_h = drm_rect_height(&state->base.src) >> 16;
- int dst_w = drm_rect_width(&state->base.dst);
- int dst_h = drm_rect_height(&state->base.dst);
+ int src_w = drm_rect_width(&state->uapi.src) >> 16;
+ int src_h = drm_rect_height(&state->uapi.src) >> 16;
+ int dst_w = drm_rect_width(&state->uapi.dst);
+ int dst_h = drm_rect_height(&state->uapi.dst);
return (src_w != dst_w || src_h != dst_h);
}
int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
- struct drm_crtc_state *crtc_state,
+ struct intel_crtc_state *crtc_state,
const struct intel_plane_state *old_plane_state,
- struct drm_plane_state *plane_state)
+ struct intel_plane_state *plane_state)
{
- struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state);
- struct drm_crtc *crtc = crtc_state->crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_plane *plane = to_intel_plane(plane_state->plane);
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
bool mode_changed = needs_modeset(crtc_state);
- bool was_crtc_enabled = old_crtc_state->base.active;
- bool is_crtc_enabled = crtc_state->active;
+ bool was_crtc_enabled = old_crtc_state->hw.active;
+ bool is_crtc_enabled = crtc_state->hw.active;
bool turn_off, turn_on, visible, was_visible;
- struct drm_framebuffer *fb = plane_state->fb;
int ret;
if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
- ret = skl_update_scaler_plane(
- to_intel_crtc_state(crtc_state),
- to_intel_plane_state(plane_state));
+ ret = skl_update_scaler_plane(crtc_state, plane_state);
if (ret)
return ret;
}
- was_visible = old_plane_state->base.visible;
- visible = plane_state->visible;
+ was_visible = old_plane_state->uapi.visible;
+ visible = plane_state->uapi.visible;
if (!was_crtc_enabled && WARN_ON(was_visible))
was_visible = false;
@@ -11371,55 +12156,48 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
* only combine the results from all planes in the current place?
*/
if (!is_crtc_enabled) {
- plane_state->visible = visible = false;
- to_intel_crtc_state(crtc_state)->active_planes &= ~BIT(plane->id);
- to_intel_crtc_state(crtc_state)->data_rate[plane->id] = 0;
+ plane_state->uapi.visible = visible = false;
+ crtc_state->active_planes &= ~BIT(plane->id);
+ crtc_state->data_rate[plane->id] = 0;
+ crtc_state->min_cdclk[plane->id] = 0;
}
if (!was_visible && !visible)
return 0;
- if (fb != old_plane_state->base.fb)
- pipe_config->fb_changed = true;
-
turn_off = was_visible && (!visible || mode_changed);
turn_on = visible && (!was_visible || mode_changed);
- DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n",
- intel_crtc->base.base.id, intel_crtc->base.name,
- plane->base.base.id, plane->base.name,
- fb ? fb->base.id : -1);
-
- DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
+ DRM_DEBUG_ATOMIC("[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
+ crtc->base.base.id, crtc->base.name,
plane->base.base.id, plane->base.name,
was_visible, visible,
turn_off, turn_on, mode_changed);
if (turn_on) {
if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
- pipe_config->update_wm_pre = true;
+ crtc_state->update_wm_pre = true;
/* must disable cxsr around plane enable/disable */
if (plane->id != PLANE_CURSOR)
- pipe_config->disable_cxsr = true;
+ crtc_state->disable_cxsr = true;
} else if (turn_off) {
if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
- pipe_config->update_wm_post = true;
+ crtc_state->update_wm_post = true;
/* must disable cxsr around plane enable/disable */
if (plane->id != PLANE_CURSOR)
- pipe_config->disable_cxsr = true;
- } else if (intel_wm_need_update(to_intel_plane_state(plane->base.state),
- to_intel_plane_state(plane_state))) {
+ crtc_state->disable_cxsr = true;
+ } else if (intel_wm_need_update(old_plane_state, plane_state)) {
if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
/* FIXME bollocks */
- pipe_config->update_wm_pre = true;
- pipe_config->update_wm_post = true;
+ crtc_state->update_wm_pre = true;
+ crtc_state->update_wm_post = true;
}
}
if (visible || was_visible)
- pipe_config->fb_bits |= plane->frontbuffer_bit;
+ crtc_state->fb_bits |= plane->frontbuffer_bit;
/*
* ILK/SNB DVSACNTR/Sprite Enable
@@ -11458,8 +12236,8 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
(IS_GEN_RANGE(dev_priv, 5, 6) ||
IS_IVYBRIDGE(dev_priv)) &&
(turn_on || (!needs_scaling(old_plane_state) &&
- needs_scaling(to_intel_plane_state(plane_state)))))
- pipe_config->disable_lp_wm = true;
+ needs_scaling(plane_state))))
+ crtc_state->disable_lp_wm = true;
return 0;
}
@@ -11501,7 +12279,7 @@ static int icl_add_linked_planes(struct intel_atomic_state *state)
int i;
for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
- linked = plane_state->linked_plane;
+ linked = plane_state->planar_linked_plane;
if (!linked)
continue;
@@ -11510,8 +12288,8 @@ static int icl_add_linked_planes(struct intel_atomic_state *state)
if (IS_ERR(linked_plane_state))
return PTR_ERR(linked_plane_state);
- WARN_ON(linked_plane_state->linked_plane != plane);
- WARN_ON(linked_plane_state->slave == plane_state->slave);
+ WARN_ON(linked_plane_state->planar_linked_plane != plane);
+ WARN_ON(linked_plane_state->planar_slave == plane_state->planar_slave);
}
return 0;
@@ -11519,9 +12297,9 @@ static int icl_add_linked_planes(struct intel_atomic_state *state)
static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->base.state);
+ struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
struct intel_plane *plane, *linked;
struct intel_plane_state *plane_state;
int i;
@@ -11534,16 +12312,16 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
* in the crtc_state->active_planes mask.
*/
for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
- if (plane->pipe != crtc->pipe || !plane_state->linked_plane)
+ if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
continue;
- plane_state->linked_plane = NULL;
- if (plane_state->slave && !plane_state->base.visible) {
+ plane_state->planar_linked_plane = NULL;
+ if (plane_state->planar_slave && !plane_state->uapi.visible) {
crtc_state->active_planes &= ~BIT(plane->id);
crtc_state->update_planes |= BIT(plane->id);
}
- plane_state->slave = false;
+ plane_state->planar_slave = false;
}
if (!crtc_state->nv12_planes)
@@ -11577,13 +12355,33 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
return -EINVAL;
}
- plane_state->linked_plane = linked;
+ plane_state->planar_linked_plane = linked;
- linked_state->slave = true;
- linked_state->linked_plane = plane;
+ linked_state->planar_slave = true;
+ linked_state->planar_linked_plane = plane;
crtc_state->active_planes |= BIT(linked->id);
crtc_state->update_planes |= BIT(linked->id);
DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name);
+
+ /* Copy parameters to slave plane */
+ linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
+ linked_state->color_ctl = plane_state->color_ctl;
+ linked_state->view = plane_state->view;
+ memcpy(linked_state->color_plane, plane_state->color_plane,
+ sizeof(linked_state->color_plane));
+
+ intel_plane_copy_uapi_to_hw_state(linked_state, plane_state);
+ linked_state->uapi.src = plane_state->uapi.src;
+ linked_state->uapi.dst = plane_state->uapi.dst;
+
+ if (icl_is_hdr_plane(dev_priv, plane->id)) {
+ if (linked->id == PLANE_SPRITE5)
+ plane_state->cus_ctl |= PLANE_CUS_PLANE_7;
+ else if (linked->id == PLANE_SPRITE4)
+ plane_state->cus_ctl |= PLANE_CUS_PLANE_6;
+ else
+ MISSING_CASE(linked->id);
+ }
}
return 0;
@@ -11591,34 +12389,150 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct intel_atomic_state *state =
- to_intel_atomic_state(new_crtc_state->base.state);
+ to_intel_atomic_state(new_crtc_state->uapi.state);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
}
-static int intel_crtc_atomic_check(struct drm_crtc *crtc,
- struct drm_crtc_state *crtc_state)
+static bool
+intel_atomic_is_master_connector(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_crtc_state *pipe_config =
- to_intel_crtc_state(crtc_state);
- int ret;
+ struct drm_crtc *crtc = crtc_state->uapi.crtc;
+ struct drm_atomic_state *state = crtc_state->uapi.state;
+ struct drm_connector *connector;
+ struct drm_connector_state *connector_state;
+ int i;
+
+ for_each_new_connector_in_state(state, connector, connector_state, i) {
+ if (connector_state->crtc != crtc)
+ continue;
+ if (connector->has_tile &&
+ connector->tile_h_loc == connector->num_h_tile - 1 &&
+ connector->tile_v_loc == connector->num_v_tile - 1)
+ return true;
+ }
+
+ return false;
+}
+
+static void reset_port_sync_mode_state(struct intel_crtc_state *crtc_state)
+{
+ crtc_state->master_transcoder = INVALID_TRANSCODER;
+ crtc_state->sync_mode_slaves_mask = 0;
+}
+
+static int icl_compute_port_sync_crtc_state(struct drm_connector *connector,
+ struct intel_crtc_state *crtc_state,
+ int num_tiled_conns)
+{
+ struct drm_crtc *crtc = crtc_state->uapi.crtc;
+ struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct drm_connector *master_connector;
+ struct drm_connector_list_iter conn_iter;
+ struct drm_crtc *master_crtc = NULL;
+ struct drm_crtc_state *master_crtc_state;
+ struct intel_crtc_state *master_pipe_config;
+
+ if (INTEL_GEN(dev_priv) < 11)
+ return 0;
+
+ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP))
+ return 0;
+
+ /*
+ * In case of tiled displays there could be one or more slaves but there is
+ * only one master. Lets make the CRTC used by the connector corresponding
+ * to the last horizonal and last vertical tile a master/genlock CRTC.
+ * All the other CRTCs corresponding to other tiles of the same Tile group
+ * are the slave CRTCs and hold a pointer to their genlock CRTC.
+ * If all tiles not present do not make master slave assignments.
+ */
+ if (!connector->has_tile ||
+ crtc_state->hw.mode.hdisplay != connector->tile_h_size ||
+ crtc_state->hw.mode.vdisplay != connector->tile_v_size ||
+ num_tiled_conns < connector->num_h_tile * connector->num_v_tile) {
+ reset_port_sync_mode_state(crtc_state);
+ return 0;
+ }
+ /* Last Horizontal and last vertical tile connector is a master
+ * Master's crtc state is already populated in slave for port sync
+ */
+ if (connector->tile_h_loc == connector->num_h_tile - 1 &&
+ connector->tile_v_loc == connector->num_v_tile - 1)
+ return 0;
+
+ /* Loop through all connectors and configure the Slave crtc_state
+ * to point to the correct master.
+ */
+ drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ drm_for_each_connector_iter(master_connector, &conn_iter) {
+ struct drm_connector_state *master_conn_state = NULL;
+
+ if (!(master_connector->has_tile &&
+ master_connector->tile_group->id == connector->tile_group->id))
+ continue;
+ if (master_connector->tile_h_loc != master_connector->num_h_tile - 1 ||
+ master_connector->tile_v_loc != master_connector->num_v_tile - 1)
+ continue;
+
+ master_conn_state = drm_atomic_get_connector_state(&state->base,
+ master_connector);
+ if (IS_ERR(master_conn_state)) {
+ drm_connector_list_iter_end(&conn_iter);
+ return PTR_ERR(master_conn_state);
+ }
+ if (master_conn_state->crtc) {
+ master_crtc = master_conn_state->crtc;
+ break;
+ }
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ if (!master_crtc) {
+ DRM_DEBUG_KMS("Could not find Master CRTC for Slave CRTC %d\n",
+ crtc->base.id);
+ return -EINVAL;
+ }
+
+ master_crtc_state = drm_atomic_get_crtc_state(&state->base,
+ master_crtc);
+ if (IS_ERR(master_crtc_state))
+ return PTR_ERR(master_crtc_state);
+
+ master_pipe_config = to_intel_crtc_state(master_crtc_state);
+ crtc_state->master_transcoder = master_pipe_config->cpu_transcoder;
+ master_pipe_config->sync_mode_slaves_mask |=
+ BIT(crtc_state->cpu_transcoder);
+ DRM_DEBUG_KMS("Master Transcoder = %s added for Slave CRTC = %d, slave transcoder bitmask = %d\n",
+ transcoder_name(crtc_state->master_transcoder),
+ crtc->base.id,
+ master_pipe_config->sync_mode_slaves_mask);
+
+ return 0;
+}
+
+static int intel_crtc_atomic_check(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
bool mode_changed = needs_modeset(crtc_state);
+ int ret;
if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) &&
- mode_changed && !crtc_state->active)
- pipe_config->update_wm_post = true;
+ mode_changed && !crtc_state->hw.active)
+ crtc_state->update_wm_post = true;
- if (mode_changed && crtc_state->enable &&
+ if (mode_changed && crtc_state->hw.enable &&
dev_priv->display.crtc_compute_clock &&
- !WARN_ON(pipe_config->shared_dpll)) {
- ret = dev_priv->display.crtc_compute_clock(intel_crtc,
- pipe_config);
+ !WARN_ON(crtc_state->shared_dpll)) {
+ ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state);
if (ret)
return ret;
}
@@ -11627,19 +12541,19 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
* May need to update pipe gamma enable bits
* when C8 planes are getting enabled/disabled.
*/
- if (c8_planes_changed(pipe_config))
- crtc_state->color_mgmt_changed = true;
+ if (c8_planes_changed(crtc_state))
+ crtc_state->uapi.color_mgmt_changed = true;
- if (mode_changed || pipe_config->update_pipe ||
- crtc_state->color_mgmt_changed) {
- ret = intel_color_check(pipe_config);
+ if (mode_changed || crtc_state->update_pipe ||
+ crtc_state->uapi.color_mgmt_changed) {
+ ret = intel_color_check(crtc_state);
if (ret)
return ret;
}
ret = 0;
if (dev_priv->display.compute_pipe_wm) {
- ret = dev_priv->display.compute_pipe_wm(pipe_config);
+ ret = dev_priv->display.compute_pipe_wm(crtc_state);
if (ret) {
DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
return ret;
@@ -11655,7 +12569,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
* old state and the new state. We can program these
* immediately.
*/
- ret = dev_priv->display.compute_intermediate_wm(pipe_config);
+ ret = dev_priv->display.compute_intermediate_wm(crtc_state);
if (ret) {
DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
return ret;
@@ -11663,29 +12577,19 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
}
if (INTEL_GEN(dev_priv) >= 9) {
- if (mode_changed || pipe_config->update_pipe)
- ret = skl_update_scaler_crtc(pipe_config);
-
- if (!ret)
- ret = icl_check_nv12_planes(pipe_config);
- if (!ret)
- ret = skl_check_pipe_max_pixel_rate(intel_crtc,
- pipe_config);
+ if (mode_changed || crtc_state->update_pipe)
+ ret = skl_update_scaler_crtc(crtc_state);
if (!ret)
- ret = intel_atomic_setup_scalers(dev_priv, intel_crtc,
- pipe_config);
+ ret = intel_atomic_setup_scalers(dev_priv, crtc,
+ crtc_state);
}
if (HAS_IPS(dev_priv))
- pipe_config->ips_enabled = hsw_compute_ips_config(pipe_config);
+ crtc_state->ips_enabled = hsw_compute_ips_config(crtc_state);
return ret;
}
-static const struct drm_crtc_helper_funcs intel_helper_funcs = {
- .atomic_check = intel_crtc_atomic_check,
-};
-
static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
{
struct intel_connector *connector;
@@ -11754,7 +12658,7 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct drm_atomic_state *state = pipe_config->base.state;
+ struct drm_atomic_state *state = pipe_config->uapi.state;
struct drm_connector *connector;
struct drm_connector_state *connector_state;
int bpp, i;
@@ -11811,7 +12715,7 @@ static void
intel_dump_infoframe(struct drm_i915_private *dev_priv,
const union hdmi_infoframe *frame)
{
- if ((drm_debug & DRM_UT_KMS) == 0)
+ if (!drm_debug_enabled(DRM_UT_KMS))
return;
hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
@@ -11879,14 +12783,14 @@ static const char *output_formats(enum intel_output_format format)
static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
struct drm_format_name_buf format_name;
if (!fb) {
DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
plane->base.base.id, plane->base.name,
- yesno(plane_state->base.visible));
+ yesno(plane_state->uapi.visible));
return;
}
@@ -11894,20 +12798,20 @@ static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
plane->base.base.id, plane->base.name,
fb->base.id, fb->width, fb->height,
drm_get_format_name(fb->format->format, &format_name),
- yesno(plane_state->base.visible));
+ yesno(plane_state->uapi.visible));
DRM_DEBUG_KMS("\trotation: 0x%x, scaler: %d\n",
- plane_state->base.rotation, plane_state->scaler_id);
- if (plane_state->base.visible)
+ plane_state->hw.rotation, plane_state->scaler_id);
+ if (plane_state->uapi.visible)
DRM_DEBUG_KMS("\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
- DRM_RECT_FP_ARG(&plane_state->base.src),
- DRM_RECT_ARG(&plane_state->base.dst));
+ DRM_RECT_FP_ARG(&plane_state->uapi.src),
+ DRM_RECT_ARG(&plane_state->uapi.dst));
}
static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
struct intel_atomic_state *state,
const char *context)
{
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct intel_plane_state *plane_state;
struct intel_plane *plane;
@@ -11916,14 +12820,14 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
DRM_DEBUG_KMS("[CRTC:%d:%s] enable: %s %s\n",
crtc->base.base.id, crtc->base.name,
- yesno(pipe_config->base.enable), context);
+ yesno(pipe_config->hw.enable), context);
- if (!pipe_config->base.enable)
+ if (!pipe_config->hw.enable)
goto dump_planes;
snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
DRM_DEBUG_KMS("active: %s, output_types: %s (0x%x), output format: %s\n",
- yesno(pipe_config->base.active),
+ yesno(pipe_config->hw.active),
buf, pipe_config->output_types,
output_formats(pipe_config->output_format));
@@ -11963,10 +12867,10 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
DRM_DEBUG_KMS("requested mode:\n");
- drm_mode_debug_printmodeline(&pipe_config->base.mode);
+ drm_mode_debug_printmodeline(&pipe_config->hw.mode);
DRM_DEBUG_KMS("adjusted mode:\n");
- drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
- intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
+ drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode);
+ intel_dump_crtc_timings(&pipe_config->hw.adjusted_mode);
DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
pipe_config->port_clock,
pipe_config->pipe_src_w, pipe_config->pipe_src_h,
@@ -11995,6 +12899,18 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
+ if (IS_CHERRYVIEW(dev_priv))
+ DRM_DEBUG_KMS("cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
+ pipe_config->cgm_mode, pipe_config->gamma_mode,
+ pipe_config->gamma_enable, pipe_config->csc_enable);
+ else
+ DRM_DEBUG_KMS("csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
+ pipe_config->csc_mode, pipe_config->gamma_mode,
+ pipe_config->gamma_enable, pipe_config->csc_enable);
+
+ DRM_DEBUG_KMS("MST master transcoder: %s\n",
+ transcoder_name(pipe_config->mst_master_transcoder));
+
dump_planes:
if (!state)
return;
@@ -12015,6 +12931,12 @@ static bool check_digital_port_conflicts(struct intel_atomic_state *state)
bool ret = true;
/*
+ * We're going to peek into connector->state,
+ * hence connection_mutex must be held.
+ */
+ drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
+
+ /*
* Walk the connector list instead of the encoder
* list to detect the problem on ddi platforms
* where there's just one encoder per digital port.
@@ -12071,55 +12993,98 @@ static bool check_digital_port_conflicts(struct intel_atomic_state *state)
return ret;
}
+static void
+intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_crtc_state *crtc_state)
+{
+ intel_crtc_copy_color_blobs(crtc_state);
+}
+
+static void
+intel_crtc_copy_uapi_to_hw_state(struct intel_crtc_state *crtc_state)
+{
+ crtc_state->hw.enable = crtc_state->uapi.enable;
+ crtc_state->hw.active = crtc_state->uapi.active;
+ crtc_state->hw.mode = crtc_state->uapi.mode;
+ crtc_state->hw.adjusted_mode = crtc_state->uapi.adjusted_mode;
+ intel_crtc_copy_uapi_to_hw_state_nomodeset(crtc_state);
+}
+
+static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
+{
+ crtc_state->uapi.enable = crtc_state->hw.enable;
+ crtc_state->uapi.active = crtc_state->hw.active;
+ WARN_ON(drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
+
+ crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
+
+ /* copy color blobs to uapi */
+ drm_property_replace_blob(&crtc_state->uapi.degamma_lut,
+ crtc_state->hw.degamma_lut);
+ drm_property_replace_blob(&crtc_state->uapi.gamma_lut,
+ crtc_state->hw.gamma_lut);
+ drm_property_replace_blob(&crtc_state->uapi.ctm,
+ crtc_state->hw.ctm);
+}
+
static int
-clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
+intel_crtc_prepare_cleared_state(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv =
- to_i915(crtc_state->base.crtc->dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_crtc_state *saved_state;
- saved_state = kzalloc(sizeof(*saved_state), GFP_KERNEL);
+ saved_state = intel_crtc_state_alloc(crtc);
if (!saved_state)
return -ENOMEM;
+ /* free the old crtc_state->hw members */
+ intel_crtc_free_hw_state(crtc_state);
+
/* FIXME: before the switch to atomic started, a new pipe_config was
* kzalloc'd. Code that depends on any field being zero should be
* fixed, so that the crtc_state can be safely duplicated. For now,
* only fields that are know to not cause problems are preserved. */
+ saved_state->uapi = crtc_state->uapi;
saved_state->scaler_state = crtc_state->scaler_state;
saved_state->shared_dpll = crtc_state->shared_dpll;
saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
+ memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
+ sizeof(saved_state->icl_port_dplls));
saved_state->crc_enabled = crtc_state->crc_enabled;
if (IS_G4X(dev_priv) ||
IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
saved_state->wm = crtc_state->wm;
+ /*
+ * Save the slave bitmask which gets filled for master crtc state during
+ * slave atomic check call. For all other CRTCs reset the port sync variables
+ * crtc_state->master_transcoder needs to be set to INVALID
+ */
+ reset_port_sync_mode_state(saved_state);
+ if (intel_atomic_is_master_connector(crtc_state))
+ saved_state->sync_mode_slaves_mask =
+ crtc_state->sync_mode_slaves_mask;
- /* Keep base drm_crtc_state intact, only clear our extended struct */
- BUILD_BUG_ON(offsetof(struct intel_crtc_state, base));
- memcpy(&crtc_state->base + 1, &saved_state->base + 1,
- sizeof(*crtc_state) - sizeof(crtc_state->base));
-
+ memcpy(crtc_state, saved_state, sizeof(*crtc_state));
kfree(saved_state);
+
+ intel_crtc_copy_uapi_to_hw_state(crtc_state);
+
return 0;
}
static int
intel_modeset_pipe_config(struct intel_crtc_state *pipe_config)
{
- struct drm_crtc *crtc = pipe_config->base.crtc;
- struct drm_atomic_state *state = pipe_config->base.state;
+ struct drm_crtc *crtc = pipe_config->uapi.crtc;
+ struct drm_atomic_state *state = pipe_config->uapi.state;
struct intel_encoder *encoder;
struct drm_connector *connector;
struct drm_connector_state *connector_state;
int base_bpp, ret;
- int i;
+ int i, tile_group_id = -1, num_tiled_conns = 0;
bool retry = true;
- ret = clear_intel_crtc_state(pipe_config);
- if (ret)
- return ret;
-
pipe_config->cpu_transcoder =
(enum transcoder) to_intel_crtc(crtc)->pipe;
@@ -12128,13 +13093,13 @@ intel_modeset_pipe_config(struct intel_crtc_state *pipe_config)
* positive or negative polarity is requested, treat this as meaning
* negative polarity.
*/
- if (!(pipe_config->base.adjusted_mode.flags &
+ if (!(pipe_config->hw.adjusted_mode.flags &
(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
- pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
+ pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
- if (!(pipe_config->base.adjusted_mode.flags &
+ if (!(pipe_config->hw.adjusted_mode.flags &
(DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
- pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
+ pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
pipe_config);
@@ -12151,7 +13116,7 @@ intel_modeset_pipe_config(struct intel_crtc_state *pipe_config)
* computation to clearly distinguish it from the adjusted mode, which
* can be changed by the connectors in the below retry loop.
*/
- drm_mode_get_hv_timing(&pipe_config->base.mode,
+ drm_mode_get_hv_timing(&pipe_config->hw.mode,
&pipe_config->pipe_src_w,
&pipe_config->pipe_src_h);
@@ -12184,9 +13149,27 @@ encoder_retry:
pipe_config->pixel_multiplier = 1;
/* Fill in default crtc timings, allow encoders to overwrite them. */
- drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
+ drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode,
CRTC_STEREO_DOUBLE);
+ /* Get tile_group_id of tiled connector */
+ for_each_new_connector_in_state(state, connector, connector_state, i) {
+ if (connector_state->crtc == crtc &&
+ connector->has_tile) {
+ tile_group_id = connector->tile_group->id;
+ break;
+ }
+ }
+
+ /* Get total number of tiled connectors in state that belong to
+ * this tile group.
+ */
+ for_each_new_connector_in_state(state, connector, connector_state, i) {
+ if (connector->has_tile &&
+ connector->tile_group->id == tile_group_id)
+ num_tiled_conns++;
+ }
+
/* Pass our mode to the connectors and the CRTC to give them a chance to
* adjust it according to limitations or connector properties, and also
* a chance to reject the mode entirely.
@@ -12195,6 +13178,14 @@ encoder_retry:
if (connector_state->crtc != crtc)
continue;
+ ret = icl_compute_port_sync_crtc_state(connector, pipe_config,
+ num_tiled_conns);
+ if (ret) {
+ DRM_DEBUG_KMS("Cannot assign Sync Mode CRTCs: %d\n",
+ ret);
+ return ret;
+ }
+
encoder = to_intel_encoder(connector_state->best_encoder);
ret = encoder->compute_config(encoder, pipe_config,
connector_state);
@@ -12209,7 +13200,7 @@ encoder_retry:
/* Set default port clock if not overwritten by the encoder. Needs to be
* done afterwards in case the encoder adjusts the mode. */
if (!pipe_config->port_clock)
- pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
+ pipe_config->port_clock = pipe_config->hw.adjusted_mode.crtc_clock
* pipe_config->pixel_multiplier;
ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
@@ -12238,6 +13229,12 @@ encoder_retry:
DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
+ /*
+ * Make drm_calc_timestamping_constants in
+ * drm_atomic_helper_update_legacy_modeset_state() happy
+ */
+ pipe_config->uapi.adjusted_mode = pipe_config->hw.adjusted_mode;
+
return 0;
}
@@ -12316,25 +13313,26 @@ pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
const union hdmi_infoframe *b)
{
if (fastset) {
- if ((drm_debug & DRM_UT_KMS) == 0)
+ if (!drm_debug_enabled(DRM_UT_KMS))
return;
- drm_dbg(DRM_UT_KMS, "fastset mismatch in %s infoframe", name);
- drm_dbg(DRM_UT_KMS, "expected:");
+ DRM_DEBUG_KMS("fastset mismatch in %s infoframe\n", name);
+ DRM_DEBUG_KMS("expected:\n");
hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
- drm_dbg(DRM_UT_KMS, "found");
+ DRM_DEBUG_KMS("found:\n");
hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
} else {
- drm_err("mismatch in %s infoframe", name);
- drm_err("expected:");
+ DRM_ERROR("mismatch in %s infoframe\n", name);
+ DRM_ERROR("expected:\n");
hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
- drm_err("found");
+ DRM_ERROR("found:\n");
hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
}
}
-static void __printf(3, 4)
-pipe_config_mismatch(bool fastset, const char *name, const char *format, ...)
+static void __printf(4, 5)
+pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
+ const char *name, const char *format, ...)
{
struct va_format vaf;
va_list args;
@@ -12344,9 +13342,11 @@ pipe_config_mismatch(bool fastset, const char *name, const char *format, ...)
vaf.va = &args;
if (fastset)
- drm_dbg(DRM_UT_KMS, "fastset mismatch in %s %pV", name, &vaf);
+ DRM_DEBUG_KMS("[CRTC:%d:%s] fastset mismatch in %s %pV\n",
+ crtc->base.base.id, crtc->base.name, name, &vaf);
else
- drm_err("mismatch in %s %pV", name, &vaf);
+ DRM_ERROR("[CRTC:%d:%s] mismatch in %s %pV\n",
+ crtc->base.base.id, crtc->base.name, name, &vaf);
va_end(args);
}
@@ -12373,11 +13373,13 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
const struct intel_crtc_state *pipe_config,
bool fastset)
{
- struct drm_i915_private *dev_priv = to_i915(current_config->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
bool ret = true;
+ u32 bp_gamma = 0;
bool fixup_inherited = fastset &&
- (current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
- !(pipe_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED);
+ (current_config->hw.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
+ !(pipe_config->hw.mode.private_flags & I915_MODE_FLAG_INHERITED);
if (fixup_inherited && !fastboot_enabled(dev_priv)) {
DRM_DEBUG_KMS("initial modeset and fastboot not set\n");
@@ -12386,8 +13388,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_CHECK_X(name) do { \
if (current_config->name != pipe_config->name) { \
- pipe_config_mismatch(fastset, __stringify(name), \
- "(expected 0x%08x, found 0x%08x)\n", \
+ pipe_config_mismatch(fastset, crtc, __stringify(name), \
+ "(expected 0x%08x, found 0x%08x)", \
current_config->name, \
pipe_config->name); \
ret = false; \
@@ -12396,8 +13398,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_CHECK_I(name) do { \
if (current_config->name != pipe_config->name) { \
- pipe_config_mismatch(fastset, __stringify(name), \
- "(expected %i, found %i)\n", \
+ pipe_config_mismatch(fastset, crtc, __stringify(name), \
+ "(expected %i, found %i)", \
current_config->name, \
pipe_config->name); \
ret = false; \
@@ -12406,8 +13408,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_CHECK_BOOL(name) do { \
if (current_config->name != pipe_config->name) { \
- pipe_config_mismatch(fastset, __stringify(name), \
- "(expected %s, found %s)\n", \
+ pipe_config_mismatch(fastset, crtc, __stringify(name), \
+ "(expected %s, found %s)", \
yesno(current_config->name), \
yesno(pipe_config->name)); \
ret = false; \
@@ -12423,8 +13425,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
PIPE_CONF_CHECK_BOOL(name); \
} else { \
- pipe_config_mismatch(fastset, __stringify(name), \
- "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)\n", \
+ pipe_config_mismatch(fastset, crtc, __stringify(name), \
+ "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \
yesno(current_config->name), \
yesno(pipe_config->name)); \
ret = false; \
@@ -12433,8 +13435,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_CHECK_P(name) do { \
if (current_config->name != pipe_config->name) { \
- pipe_config_mismatch(fastset, __stringify(name), \
- "(expected %p, found %p)\n", \
+ pipe_config_mismatch(fastset, crtc, __stringify(name), \
+ "(expected %p, found %p)", \
current_config->name, \
pipe_config->name); \
ret = false; \
@@ -12445,9 +13447,9 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
if (!intel_compare_link_m_n(&current_config->name, \
&pipe_config->name,\
!fastset)) { \
- pipe_config_mismatch(fastset, __stringify(name), \
+ pipe_config_mismatch(fastset, crtc, __stringify(name), \
"(expected tu %i gmch %i/%i link %i/%i, " \
- "found tu %i, gmch %i/%i link %i/%i)\n", \
+ "found tu %i, gmch %i/%i link %i/%i)", \
current_config->name.tu, \
current_config->name.gmch_m, \
current_config->name.gmch_n, \
@@ -12472,10 +13474,10 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
&pipe_config->name, !fastset) && \
!intel_compare_link_m_n(&current_config->alt_name, \
&pipe_config->name, !fastset)) { \
- pipe_config_mismatch(fastset, __stringify(name), \
+ pipe_config_mismatch(fastset, crtc, __stringify(name), \
"(expected tu %i gmch %i/%i link %i/%i, " \
"or tu %i gmch %i/%i link %i/%i, " \
- "found tu %i, gmch %i/%i link %i/%i)\n", \
+ "found tu %i, gmch %i/%i link %i/%i)", \
current_config->name.tu, \
current_config->name.gmch_m, \
current_config->name.gmch_n, \
@@ -12497,8 +13499,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
if ((current_config->name ^ pipe_config->name) & (mask)) { \
- pipe_config_mismatch(fastset, __stringify(name), \
- "(%x) (expected %i, found %i)\n", \
+ pipe_config_mismatch(fastset, crtc, __stringify(name), \
+ "(%x) (expected %i, found %i)", \
(mask), \
current_config->name & (mask), \
pipe_config->name & (mask)); \
@@ -12508,8 +13510,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
- pipe_config_mismatch(fastset, __stringify(name), \
- "(expected %i, found %i)\n", \
+ pipe_config_mismatch(fastset, crtc, __stringify(name), \
+ "(expected %i, found %i)", \
current_config->name, \
pipe_config->name); \
ret = false; \
@@ -12526,6 +13528,24 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
} \
} while (0)
+#define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \
+ if (current_config->name1 != pipe_config->name1) { \
+ pipe_config_mismatch(fastset, crtc, __stringify(name1), \
+ "(expected %i, found %i, won't compare lut values)", \
+ current_config->name1, \
+ pipe_config->name1); \
+ ret = false;\
+ } else { \
+ if (!intel_color_lut_equal(current_config->name2, \
+ pipe_config->name2, pipe_config->name1, \
+ bit_precision)) { \
+ pipe_config_mismatch(fastset, crtc, __stringify(name2), \
+ "hw_state doesn't match sw_state"); \
+ ret = false; \
+ } \
+ } \
+} while (0)
+
#define PIPE_CONF_QUIRK(quirk) \
((current_config->quirks | pipe_config->quirks) & (quirk))
@@ -12548,22 +13568,23 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_X(output_types);
- PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
- PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
- PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
- PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
- PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
- PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay);
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal);
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start);
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end);
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start);
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end);
- PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
- PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
- PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
- PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
- PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
- PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay);
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal);
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start);
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end);
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start);
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end);
PIPE_CONF_CHECK_I(pixel_multiplier);
PIPE_CONF_CHECK_I(output_format);
+ PIPE_CONF_CHECK_I(dc3co_exitline);
PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
@@ -12572,20 +13593,21 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
PIPE_CONF_CHECK_BOOL(has_infoframe);
+ PIPE_CONF_CHECK_BOOL(fec_enable);
PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
- PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
+ PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
DRM_MODE_FLAG_INTERLACE);
if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
- PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
+ PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
DRM_MODE_FLAG_PHSYNC);
- PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
+ PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
DRM_MODE_FLAG_NHSYNC);
- PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
+ PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
DRM_MODE_FLAG_PVSYNC);
- PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
+ PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
DRM_MODE_FLAG_NVSYNC);
}
@@ -12621,6 +13643,11 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_X(csc_mode);
PIPE_CONF_CHECK_BOOL(gamma_enable);
PIPE_CONF_CHECK_BOOL(csc_enable);
+
+ bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
+ if (bp_gamma)
+ PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma);
+
}
PIPE_CONF_CHECK_BOOL(double_wide);
@@ -12664,7 +13691,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
PIPE_CONF_CHECK_I(pipe_bpp);
- PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
+ PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock);
PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
PIPE_CONF_CHECK_I(min_voltage_level);
@@ -12676,6 +13703,15 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_INFOFRAME(hdmi);
PIPE_CONF_CHECK_INFOFRAME(drm);
+ PIPE_CONF_CHECK_I(sync_mode_slaves_mask);
+ PIPE_CONF_CHECK_I(master_transcoder);
+
+ PIPE_CONF_CHECK_I(dsc.compression_enable);
+ PIPE_CONF_CHECK_I(dsc.dsc_split);
+ PIPE_CONF_CHECK_I(dsc.compressed_bpp);
+
+ PIPE_CONF_CHECK_I(mst_master_transcoder);
+
#undef PIPE_CONF_CHECK_X
#undef PIPE_CONF_CHECK_I
#undef PIPE_CONF_CHECK_BOOL
@@ -12683,6 +13719,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#undef PIPE_CONF_CHECK_P
#undef PIPE_CONF_CHECK_FLAGS
#undef PIPE_CONF_CHECK_CLOCK_FUZZY
+#undef PIPE_CONF_CHECK_COLOR_LUT
#undef PIPE_CONF_QUIRK
return ret;
@@ -12694,7 +13731,7 @@ static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
if (pipe_config->has_pch_encoder) {
int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
&pipe_config->fdi_m_n);
- int dotclock = pipe_config->base.adjusted_mode.crtc_clock;
+ int dotclock = pipe_config->hw.adjusted_mode.crtc_clock;
/*
* FDI already provided one idea for the dotclock.
@@ -12706,10 +13743,10 @@ static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
}
}
-static void verify_wm_state(struct drm_crtc *crtc,
- struct drm_crtc_state *new_state)
+static void verify_wm_state(struct intel_crtc *crtc,
+ struct intel_crtc_state *new_crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct skl_hw_state {
struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
@@ -12719,21 +13756,20 @@ static void verify_wm_state(struct drm_crtc *crtc,
struct skl_ddb_allocation *sw_ddb;
struct skl_pipe_wm *sw_wm;
struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- const enum pipe pipe = intel_crtc->pipe;
+ const enum pipe pipe = crtc->pipe;
int plane, level, max_level = ilk_wm_max_level(dev_priv);
- if (INTEL_GEN(dev_priv) < 9 || !new_state->active)
+ if (INTEL_GEN(dev_priv) < 9 || !new_crtc_state->hw.active)
return;
hw = kzalloc(sizeof(*hw), GFP_KERNEL);
if (!hw)
return;
- skl_pipe_wm_get_hw_state(intel_crtc, &hw->wm);
- sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal;
+ skl_pipe_wm_get_hw_state(crtc, &hw->wm);
+ sw_wm = &new_crtc_state->wm.skl.optimal;
- skl_pipe_ddb_get_hw_state(intel_crtc, hw->ddb_y, hw->ddb_uv);
+ skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv);
skl_ddb_get_hw_state(dev_priv, &hw->ddb);
sw_ddb = &dev_priv->wm.skl_hw.ddb;
@@ -12781,7 +13817,7 @@ static void verify_wm_state(struct drm_crtc *crtc,
/* DDB */
hw_ddb_entry = &hw->ddb_y[plane];
- sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[plane];
+ sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane];
if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
@@ -12833,7 +13869,7 @@ static void verify_wm_state(struct drm_crtc *crtc,
/* DDB */
hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR];
- sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[PLANE_CURSOR];
+ sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
@@ -12847,23 +13883,22 @@ static void verify_wm_state(struct drm_crtc *crtc,
}
static void
-verify_connector_state(struct drm_device *dev,
- struct drm_atomic_state *state,
- struct drm_crtc *crtc)
+verify_connector_state(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
struct drm_connector *connector;
struct drm_connector_state *new_conn_state;
int i;
- for_each_new_connector_in_state(state, connector, new_conn_state, i) {
+ for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) {
struct drm_encoder *encoder = connector->encoder;
- struct drm_crtc_state *crtc_state = NULL;
+ struct intel_crtc_state *crtc_state = NULL;
- if (new_conn_state->crtc != crtc)
+ if (new_conn_state->crtc != &crtc->base)
continue;
if (crtc)
- crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
+ crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
intel_connector_verify_state(crtc_state, new_conn_state);
@@ -12873,14 +13908,14 @@ verify_connector_state(struct drm_device *dev,
}
static void
-verify_encoder_state(struct drm_device *dev, struct drm_atomic_state *state)
+verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state)
{
struct intel_encoder *encoder;
struct drm_connector *connector;
struct drm_connector_state *old_conn_state, *new_conn_state;
int i;
- for_each_intel_encoder(dev, encoder) {
+ for_each_intel_encoder(&dev_priv->drm, encoder) {
bool enabled = false, found = false;
enum pipe pipe;
@@ -12888,7 +13923,7 @@ verify_encoder_state(struct drm_device *dev, struct drm_atomic_state *state)
encoder->base.base.id,
encoder->base.name);
- for_each_oldnew_connector_in_state(state, connector, old_conn_state,
+ for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
new_conn_state, i) {
if (old_conn_state->best_encoder == &encoder->base)
found = true;
@@ -12922,50 +13957,50 @@ verify_encoder_state(struct drm_device *dev, struct drm_atomic_state *state)
}
static void
-verify_crtc_state(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state,
- struct drm_crtc_state *new_crtc_state)
+verify_crtc_state(struct intel_crtc *crtc,
+ struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_encoder *encoder;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_crtc_state *pipe_config, *sw_config;
- struct drm_atomic_state *old_state;
+ struct intel_crtc_state *pipe_config = old_crtc_state;
+ struct drm_atomic_state *state = old_crtc_state->uapi.state;
bool active;
- old_state = old_crtc_state->state;
- __drm_atomic_helper_crtc_destroy_state(old_crtc_state);
- pipe_config = to_intel_crtc_state(old_crtc_state);
- memset(pipe_config, 0, sizeof(*pipe_config));
- pipe_config->base.crtc = crtc;
- pipe_config->base.state = old_state;
+ __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi);
+ intel_crtc_free_hw_state(old_crtc_state);
+ intel_crtc_state_reset(old_crtc_state, crtc);
+ old_crtc_state->uapi.state = state;
- DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
+ DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.base.id, crtc->base.name);
- active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config);
+ active = dev_priv->display.get_pipe_config(crtc, pipe_config);
/* we keep both pipes enabled on 830 */
if (IS_I830(dev_priv))
- active = new_crtc_state->active;
+ active = new_crtc_state->hw.active;
- I915_STATE_WARN(new_crtc_state->active != active,
- "crtc active state doesn't match with hw state "
- "(expected %i, found %i)\n", new_crtc_state->active, active);
+ I915_STATE_WARN(new_crtc_state->hw.active != active,
+ "crtc active state doesn't match with hw state "
+ "(expected %i, found %i)\n",
+ new_crtc_state->hw.active, active);
- I915_STATE_WARN(intel_crtc->active != new_crtc_state->active,
- "transitional active state does not match atomic hw state "
- "(expected %i, found %i)\n", new_crtc_state->active, intel_crtc->active);
+ I915_STATE_WARN(crtc->active != new_crtc_state->hw.active,
+ "transitional active state does not match atomic hw state "
+ "(expected %i, found %i)\n",
+ new_crtc_state->hw.active, crtc->active);
- for_each_encoder_on_crtc(dev, crtc, encoder) {
+ for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
enum pipe pipe;
active = encoder->get_hw_state(encoder, &pipe);
- I915_STATE_WARN(active != new_crtc_state->active,
- "[ENCODER:%i] active %i with crtc active %i\n",
- encoder->base.base.id, active, new_crtc_state->active);
+ I915_STATE_WARN(active != new_crtc_state->hw.active,
+ "[ENCODER:%i] active %i with crtc active %i\n",
+ encoder->base.base.id, active,
+ new_crtc_state->hw.active);
- I915_STATE_WARN(active && intel_crtc->pipe != pipe,
+ I915_STATE_WARN(active && crtc->pipe != pipe,
"Encoder connected to wrong pipe %c\n",
pipe_name(pipe));
@@ -12975,16 +14010,16 @@ verify_crtc_state(struct drm_crtc *crtc,
intel_crtc_compute_pixel_rate(pipe_config);
- if (!new_crtc_state->active)
+ if (!new_crtc_state->hw.active)
return;
intel_pipe_config_sanity_check(dev_priv, pipe_config);
- sw_config = to_intel_crtc_state(new_crtc_state);
- if (!intel_pipe_config_compare(sw_config, pipe_config, false)) {
+ if (!intel_pipe_config_compare(new_crtc_state,
+ pipe_config, false)) {
I915_STATE_WARN(1, "pipe state doesn't match!\n");
intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
- intel_dump_pipe_config(sw_config, NULL, "[sw state]");
+ intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]");
}
}
@@ -12997,15 +14032,15 @@ intel_verify_planes(struct intel_atomic_state *state)
for_each_new_intel_plane_in_state(state, plane,
plane_state, i)
- assert_plane(plane, plane_state->slave ||
- plane_state->base.visible);
+ assert_plane(plane, plane_state->planar_slave ||
+ plane_state->uapi.visible);
}
static void
verify_single_dpll_state(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
- struct drm_crtc *crtc,
- struct drm_crtc_state *new_state)
+ struct intel_crtc *crtc,
+ struct intel_crtc_state *new_crtc_state)
{
struct intel_dpll_hw_state dpll_hw_state;
unsigned int crtc_mask;
@@ -13035,16 +14070,16 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv,
return;
}
- crtc_mask = drm_crtc_mask(crtc);
+ crtc_mask = drm_crtc_mask(&crtc->base);
- if (new_state->active)
+ if (new_crtc_state->hw.active)
I915_STATE_WARN(!(pll->active_mask & crtc_mask),
"pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
- pipe_name(drm_crtc_index(crtc)), pll->active_mask);
+ pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask);
else
I915_STATE_WARN(pll->active_mask & crtc_mask,
"pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
- pipe_name(drm_crtc_index(crtc)), pll->active_mask);
+ pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask);
I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
"pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
@@ -13057,51 +14092,47 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv,
}
static void
-verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state,
- struct drm_crtc_state *new_crtc_state)
+verify_shared_dpll_state(struct intel_crtc *crtc,
+ struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state);
- struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- if (new_state->shared_dpll)
- verify_single_dpll_state(dev_priv, new_state->shared_dpll, crtc, new_crtc_state);
+ if (new_crtc_state->shared_dpll)
+ verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state);
- if (old_state->shared_dpll &&
- old_state->shared_dpll != new_state->shared_dpll) {
- unsigned int crtc_mask = drm_crtc_mask(crtc);
- struct intel_shared_dpll *pll = old_state->shared_dpll;
+ if (old_crtc_state->shared_dpll &&
+ old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
+ unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
+ struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
I915_STATE_WARN(pll->active_mask & crtc_mask,
"pll active mismatch (didn't expect pipe %c in active mask)\n",
- pipe_name(drm_crtc_index(crtc)));
+ pipe_name(drm_crtc_index(&crtc->base)));
I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
"pll enabled crtcs mismatch (found %x in enabled mask)\n",
- pipe_name(drm_crtc_index(crtc)));
+ pipe_name(drm_crtc_index(&crtc->base)));
}
}
static void
-intel_modeset_verify_crtc(struct drm_crtc *crtc,
- struct drm_atomic_state *state,
- struct drm_crtc_state *old_state,
- struct drm_crtc_state *new_state)
+intel_modeset_verify_crtc(struct intel_crtc *crtc,
+ struct intel_atomic_state *state,
+ struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state)
{
- if (!needs_modeset(new_state) &&
- !to_intel_crtc_state(new_state)->update_pipe)
+ if (!needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
return;
- verify_wm_state(crtc, new_state);
- verify_connector_state(crtc->dev, state, crtc);
- verify_crtc_state(crtc, old_state, new_state);
- verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state);
+ verify_wm_state(crtc, new_crtc_state);
+ verify_connector_state(state, crtc);
+ verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
+ verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state);
}
static void
-verify_disabled_dpll_state(struct drm_device *dev)
+verify_disabled_dpll_state(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
int i;
for (i = 0; i < dev_priv->num_shared_dpll; i++)
@@ -13109,18 +14140,23 @@ verify_disabled_dpll_state(struct drm_device *dev)
}
static void
-intel_modeset_verify_disabled(struct drm_device *dev,
- struct drm_atomic_state *state)
+intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
+ struct intel_atomic_state *state)
{
- verify_encoder_state(dev, state);
- verify_connector_state(dev, state, NULL);
- verify_disabled_dpll_state(dev);
+ verify_encoder_state(dev_priv, state);
+ verify_connector_state(state, NULL);
+ verify_disabled_dpll_state(dev_priv);
}
-static void update_scanline_offset(const struct intel_crtc_state *crtc_state)
+static void
+intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+
+ drm_calc_timestamping_constants(&crtc->base, adjusted_mode);
/*
* The scanline counter increments at the leading edge of hsync.
@@ -13150,7 +14186,6 @@ static void update_scanline_offset(const struct intel_crtc_state *crtc_state)
* answer that's slightly in the future.
*/
if (IS_GEN(dev_priv, 2)) {
- const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
int vtotal;
vtotal = adjusted_mode->crtc_vtotal;
@@ -13161,34 +14196,26 @@ static void update_scanline_offset(const struct intel_crtc_state *crtc_state)
} else if (HAS_DDI(dev_priv) &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
crtc->scanline_offset = 2;
- } else
+ } else {
crtc->scanline_offset = 1;
+ }
}
static void intel_modeset_clear_plls(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct intel_crtc_state *old_crtc_state, *new_crtc_state;
+ struct intel_crtc_state *new_crtc_state;
struct intel_crtc *crtc;
int i;
if (!dev_priv->display.crtc_compute_clock)
return;
- for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
- new_crtc_state, i) {
- struct intel_shared_dpll *old_dpll =
- old_crtc_state->shared_dpll;
-
- if (!needs_modeset(&new_crtc_state->base))
- continue;
-
- new_crtc_state->shared_dpll = NULL;
-
- if (!old_dpll)
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ if (!needs_modeset(new_crtc_state))
continue;
- intel_release_shared_dpll(old_dpll, crtc, &state->base);
+ intel_release_shared_dplls(state, crtc);
}
}
@@ -13198,7 +14225,7 @@ static void intel_modeset_clear_plls(struct intel_atomic_state *state)
* multiple pipes, and planes are enabled after the pipe, we need to wait at
* least 2 vblanks on the first pipe before enabling planes on the second pipe.
*/
-static int haswell_mode_set_planes_workaround(struct intel_atomic_state *state)
+static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
{
struct intel_crtc_state *crtc_state;
struct intel_crtc *crtc;
@@ -13209,8 +14236,8 @@ static int haswell_mode_set_planes_workaround(struct intel_atomic_state *state)
/* look at all crtc's that are going to be enabled in during modeset */
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
- if (!crtc_state->base.active ||
- !needs_modeset(&crtc_state->base))
+ if (!crtc_state->hw.active ||
+ !needs_modeset(crtc_state))
continue;
if (first_crtc_state) {
@@ -13234,8 +14261,8 @@ static int haswell_mode_set_planes_workaround(struct intel_atomic_state *state)
crtc_state->hsw_workaround_pipe = INVALID_PIPE;
- if (!crtc_state->base.active ||
- needs_modeset(&crtc_state->base))
+ if (!crtc_state->hw.active ||
+ needs_modeset(crtc_state))
continue;
/* 2 or more enabled crtcs means no need for w/a */
@@ -13253,158 +14280,47 @@ static int haswell_mode_set_planes_workaround(struct intel_atomic_state *state)
return 0;
}
-static int intel_lock_all_pipes(struct drm_atomic_state *state)
-{
- struct drm_crtc *crtc;
-
- /* Add all pipes to the state */
- for_each_crtc(state->dev, crtc) {
- struct drm_crtc_state *crtc_state;
-
- crtc_state = drm_atomic_get_crtc_state(state, crtc);
- if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
- }
-
- return 0;
-}
-
-static int intel_modeset_all_pipes(struct drm_atomic_state *state)
-{
- struct drm_crtc *crtc;
-
- /*
- * Add all pipes to the state, and force
- * a modeset on all the active ones.
- */
- for_each_crtc(state->dev, crtc) {
- struct drm_crtc_state *crtc_state;
- int ret;
-
- crtc_state = drm_atomic_get_crtc_state(state, crtc);
- if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
-
- if (!crtc_state->active || needs_modeset(crtc_state))
- continue;
-
- crtc_state->mode_changed = true;
-
- ret = drm_atomic_add_affected_connectors(state, crtc);
- if (ret)
- return ret;
-
- ret = drm_atomic_add_affected_planes(state, crtc);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
static int intel_modeset_checks(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *old_crtc_state, *new_crtc_state;
struct intel_crtc *crtc;
- int ret = 0, i;
-
- if (!check_digital_port_conflicts(state)) {
- DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
- return -EINVAL;
- }
+ int ret, i;
/* keep the current setting */
if (!state->cdclk.force_min_cdclk_changed)
state->cdclk.force_min_cdclk = dev_priv->cdclk.force_min_cdclk;
state->modeset = true;
- state->active_crtcs = dev_priv->active_crtcs;
+ state->active_pipes = dev_priv->active_pipes;
state->cdclk.logical = dev_priv->cdclk.logical;
state->cdclk.actual = dev_priv->cdclk.actual;
- state->cdclk.pipe = INVALID_PIPE;
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
- if (new_crtc_state->base.active)
- state->active_crtcs |= 1 << i;
+ if (new_crtc_state->hw.active)
+ state->active_pipes |= BIT(crtc->pipe);
else
- state->active_crtcs &= ~(1 << i);
+ state->active_pipes &= ~BIT(crtc->pipe);
- if (old_crtc_state->base.active != new_crtc_state->base.active)
- state->active_pipe_changes |= drm_crtc_mask(&crtc->base);
+ if (old_crtc_state->hw.active != new_crtc_state->hw.active)
+ state->active_pipe_changes |= BIT(crtc->pipe);
}
- /*
- * See if the config requires any additional preparation, e.g.
- * to adjust global state with pipes off. We need to do this
- * here so we can get the modeset_pipe updated config for the new
- * mode set on this crtc. For other crtcs we need to use the
- * adjusted_mode bits in the crtc directly.
- */
- if (dev_priv->display.modeset_calc_cdclk) {
- enum pipe pipe;
-
- ret = dev_priv->display.modeset_calc_cdclk(state);
- if (ret < 0)
+ if (state->active_pipe_changes) {
+ ret = intel_atomic_lock_global_state(state);
+ if (ret)
return ret;
-
- /*
- * Writes to dev_priv->cdclk.logical must protected by
- * holding all the crtc locks, even if we don't end up
- * touching the hardware
- */
- if (intel_cdclk_changed(&dev_priv->cdclk.logical,
- &state->cdclk.logical)) {
- ret = intel_lock_all_pipes(&state->base);
- if (ret < 0)
- return ret;
- }
-
- if (is_power_of_2(state->active_crtcs)) {
- struct drm_crtc *crtc;
- struct drm_crtc_state *crtc_state;
-
- pipe = ilog2(state->active_crtcs);
- crtc = &intel_get_crtc_for_pipe(dev_priv, pipe)->base;
- crtc_state = drm_atomic_get_new_crtc_state(&state->base, crtc);
- if (crtc_state && needs_modeset(crtc_state))
- pipe = INVALID_PIPE;
- } else {
- pipe = INVALID_PIPE;
- }
-
- /* All pipes must be switched off while we change the cdclk. */
- if (pipe != INVALID_PIPE &&
- intel_cdclk_needs_cd2x_update(dev_priv,
- &dev_priv->cdclk.actual,
- &state->cdclk.actual)) {
- ret = intel_lock_all_pipes(&state->base);
- if (ret < 0)
- return ret;
-
- state->cdclk.pipe = pipe;
- } else if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual,
- &state->cdclk.actual)) {
- ret = intel_modeset_all_pipes(&state->base);
- if (ret < 0)
- return ret;
-
- state->cdclk.pipe = INVALID_PIPE;
- }
-
- DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n",
- state->cdclk.logical.cdclk,
- state->cdclk.actual.cdclk);
- DRM_DEBUG_KMS("New voltage level calculated to be logical %u, actual %u\n",
- state->cdclk.logical.voltage_level,
- state->cdclk.actual.voltage_level);
}
+ ret = intel_modeset_calc_cdclk(state);
+ if (ret)
+ return ret;
+
intel_modeset_clear_plls(state);
if (IS_HASWELL(dev_priv))
- return haswell_mode_set_planes_workaround(state);
+ return hsw_mode_set_planes_workaround(state);
return 0;
}
@@ -13432,9 +14348,13 @@ static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_sta
if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
return;
- new_crtc_state->base.mode_changed = false;
+ new_crtc_state->uapi.mode_changed = false;
new_crtc_state->update_pipe = true;
+}
+static void intel_crtc_copy_fastset(const struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state)
+{
/*
* If we're not doing the full modeset we want to
* keep the current M/N values as they may be
@@ -13449,6 +14369,201 @@ static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_sta
new_crtc_state->has_drrs = old_crtc_state->has_drrs;
}
+static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ u8 plane_ids_mask)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_plane *plane;
+
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+ struct intel_plane_state *plane_state;
+
+ if ((plane_ids_mask & BIT(plane->id)) == 0)
+ continue;
+
+ plane_state = intel_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state))
+ return PTR_ERR(plane_state);
+ }
+
+ return 0;
+}
+
+static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
+{
+ /* See {hsw,vlv,ivb}_plane_ratio() */
+ return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
+ IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
+ IS_IVYBRIDGE(dev_priv);
+}
+
+static int intel_atomic_check_planes(struct intel_atomic_state *state,
+ bool *need_modeset)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc_state *old_crtc_state, *new_crtc_state;
+ struct intel_plane_state *plane_state;
+ struct intel_plane *plane;
+ struct intel_crtc *crtc;
+ int i, ret;
+
+ ret = icl_add_linked_planes(state);
+ if (ret)
+ return ret;
+
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ ret = intel_plane_atomic_check(state, plane);
+ if (ret) {
+ DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic driver check failed\n",
+ plane->base.base.id, plane->base.name);
+ return ret;
+ }
+ }
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ u8 old_active_planes, new_active_planes;
+
+ ret = icl_check_nv12_planes(new_crtc_state);
+ if (ret)
+ return ret;
+
+ /*
+ * On some platforms the number of active planes affects
+ * the planes' minimum cdclk calculation. Add such planes
+ * to the state before we compute the minimum cdclk.
+ */
+ if (!active_planes_affects_min_cdclk(dev_priv))
+ continue;
+
+ old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
+ new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
+
+ if (hweight8(old_active_planes) == hweight8(new_active_planes))
+ continue;
+
+ ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * active_planes bitmask has been updated, and potentially
+ * affected planes are part of the state. We can now
+ * compute the minimum cdclk for each plane.
+ */
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i)
+ *need_modeset |= intel_plane_calc_min_cdclk(state, plane);
+
+ return 0;
+}
+
+static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
+{
+ struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+ int i;
+
+ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ int ret = intel_crtc_atomic_check(state, crtc);
+ if (ret) {
+ DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n",
+ crtc->base.base.id, crtc->base.name);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
+ u8 transcoders)
+{
+ const struct intel_crtc_state *new_crtc_state;
+ struct intel_crtc *crtc;
+ int i;
+
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ if (new_crtc_state->hw.enable &&
+ transcoders & BIT(new_crtc_state->cpu_transcoder) &&
+ needs_modeset(new_crtc_state))
+ return true;
+ }
+
+ return false;
+}
+
+static int
+intel_modeset_all_tiles(struct intel_atomic_state *state, int tile_grp_id)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+ int ret = 0;
+
+ drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
+
+ if (!connector->has_tile ||
+ connector->tile_group->id != tile_grp_id)
+ continue;
+ conn_state = drm_atomic_get_connector_state(&state->base,
+ connector);
+ if (IS_ERR(conn_state)) {
+ ret = PTR_ERR(conn_state);
+ break;
+ }
+
+ if (!conn_state->crtc)
+ continue;
+
+ crtc_state = drm_atomic_get_crtc_state(&state->base,
+ conn_state->crtc);
+ if (IS_ERR(crtc_state)) {
+ ret = PTR_ERR(crtc_state);
+ break;
+ }
+ crtc_state->mode_changed = true;
+ ret = drm_atomic_add_affected_connectors(&state->base,
+ conn_state->crtc);
+ if (ret)
+ break;
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ return ret;
+}
+
+static int
+intel_atomic_check_tiled_conns(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct drm_connector *connector;
+ struct drm_connector_state *old_conn_state, *new_conn_state;
+ int i, ret;
+
+ if (INTEL_GEN(dev_priv) < 11)
+ return 0;
+
+ /* Is tiled, mark all other tiled CRTCs as needing a modeset */
+ for_each_oldnew_connector_in_state(&state->base, connector,
+ old_conn_state, new_conn_state, i) {
+ if (!connector->has_tile)
+ continue;
+ if (!intel_connector_needs_modeset(state, connector))
+ continue;
+
+ ret = intel_modeset_all_tiles(state, connector->tile_group->id);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
/**
* intel_atomic_check - validate state object
* @dev: drm device
@@ -13462,44 +14577,124 @@ static int intel_atomic_check(struct drm_device *dev,
struct intel_crtc_state *old_crtc_state, *new_crtc_state;
struct intel_crtc *crtc;
int ret, i;
- bool any_ms = state->cdclk.force_min_cdclk_changed;
+ bool any_ms = false;
/* Catch I915_MODE_FLAG_INHERITED */
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
- if (new_crtc_state->base.mode.private_flags !=
- old_crtc_state->base.mode.private_flags)
- new_crtc_state->base.mode_changed = true;
+ if (new_crtc_state->hw.mode.private_flags !=
+ old_crtc_state->hw.mode.private_flags)
+ new_crtc_state->uapi.mode_changed = true;
}
ret = drm_atomic_helper_check_modeset(dev, &state->base);
if (ret)
goto fail;
+ /**
+ * This check adds all the connectors in current state that belong to
+ * the same tile group to a full modeset.
+ * This function directly sets the mode_changed to true and we also call
+ * drm_atomic_add_affected_connectors(). Hence we are not explicitly
+ * calling drm_atomic_helper_check_modeset() after this.
+ *
+ * Fixme: Handle some corner cases where one of the
+ * tiled connectors gets disconnected and tile info is lost but since it
+ * was previously synced to other conn, we need to add that to the modeset.
+ */
+ ret = intel_atomic_check_tiled_conns(state);
+ if (ret)
+ goto fail;
+
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
- if (!needs_modeset(&new_crtc_state->base))
+ if (!needs_modeset(new_crtc_state)) {
+ /* Light copy */
+ intel_crtc_copy_uapi_to_hw_state_nomodeset(new_crtc_state);
+
continue;
+ }
- if (!new_crtc_state->base.enable) {
- any_ms = true;
+ if (!new_crtc_state->uapi.enable) {
+ intel_crtc_copy_uapi_to_hw_state(new_crtc_state);
continue;
}
+ ret = intel_crtc_prepare_cleared_state(new_crtc_state);
+ if (ret)
+ goto fail;
+
ret = intel_modeset_pipe_config(new_crtc_state);
if (ret)
goto fail;
intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
+ }
+
+ /**
+ * Check if fastset is allowed by external dependencies like other
+ * pipes and transcoders.
+ *
+ * Right now it only forces a fullmodeset when the MST master
+ * transcoder did not changed but the pipe of the master transcoder
+ * needs a fullmodeset so all slaves also needs to do a fullmodeset or
+ * in case of port synced crtcs, if one of the synced crtcs
+ * needs a full modeset, all other synced crtcs should be
+ * forced a full modeset.
+ */
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ if (!new_crtc_state->hw.enable || needs_modeset(new_crtc_state))
+ continue;
+
+ if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
+ enum transcoder master = new_crtc_state->mst_master_transcoder;
+
+ if (intel_cpu_transcoders_need_modeset(state, BIT(master))) {
+ new_crtc_state->uapi.mode_changed = true;
+ new_crtc_state->update_pipe = false;
+ }
+ }
- if (needs_modeset(&new_crtc_state->base))
+ if (is_trans_port_sync_mode(new_crtc_state)) {
+ u8 trans = new_crtc_state->sync_mode_slaves_mask |
+ BIT(new_crtc_state->master_transcoder);
+
+ if (intel_cpu_transcoders_need_modeset(state, trans)) {
+ new_crtc_state->uapi.mode_changed = true;
+ new_crtc_state->update_pipe = false;
+ }
+ }
+ }
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ if (needs_modeset(new_crtc_state)) {
any_ms = true;
+ continue;
+ }
+
+ if (!new_crtc_state->update_pipe)
+ continue;
+
+ intel_crtc_copy_fastset(old_crtc_state, new_crtc_state);
+ }
+
+ if (any_ms && !check_digital_port_conflicts(state)) {
+ DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
+ ret = EINVAL;
+ goto fail;
}
ret = drm_dp_mst_atomic_check(&state->base);
if (ret)
goto fail;
+ any_ms |= state->cdclk.force_min_cdclk_changed;
+
+ ret = intel_atomic_check_planes(state, &any_ms);
+ if (ret)
+ goto fail;
+
if (any_ms) {
ret = intel_modeset_checks(state);
if (ret)
@@ -13508,11 +14703,7 @@ static int intel_atomic_check(struct drm_device *dev,
state->cdclk.logical = dev_priv->cdclk.logical;
}
- ret = icl_add_linked_planes(state);
- if (ret)
- goto fail;
-
- ret = drm_atomic_helper_check_planes(dev, &state->base);
+ ret = intel_atomic_check_crtcs(state);
if (ret)
goto fail;
@@ -13527,12 +14718,12 @@ static int intel_atomic_check(struct drm_device *dev,
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
- if (!needs_modeset(&new_crtc_state->base) &&
+ if (!needs_modeset(new_crtc_state) &&
!new_crtc_state->update_pipe)
continue;
intel_dump_pipe_config(new_crtc_state, state,
- needs_modeset(&new_crtc_state->base) ?
+ needs_modeset(new_crtc_state) ?
"[modeset]" : "[fastset]");
}
@@ -13553,10 +14744,10 @@ static int intel_atomic_check(struct drm_device *dev,
return ret;
}
-static int intel_atomic_prepare_commit(struct drm_device *dev,
- struct drm_atomic_state *state)
+static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
{
- return drm_atomic_helper_prepare_planes(dev, state);
+ return drm_atomic_helper_prepare_planes(state->base.dev,
+ &state->base);
}
u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
@@ -13567,60 +14758,239 @@ u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
if (!vblank->max_vblank_count)
return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
- return dev->driver->get_vblank_counter(dev, crtc->pipe);
+ return crtc->base.funcs->get_vblank_counter(&crtc->base);
}
-static void intel_update_crtc(struct drm_crtc *crtc,
- struct drm_atomic_state *state,
- struct drm_crtc_state *old_crtc_state,
- struct drm_crtc_state *new_crtc_state)
+void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_crtc_state *pipe_config = to_intel_crtc_state(new_crtc_state);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ if (!IS_GEN(dev_priv, 2) || crtc_state->active_planes)
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
+
+ if (crtc_state->has_pch_encoder) {
+ enum pipe pch_transcoder =
+ intel_crtc_pch_transcoder(crtc);
+
+ intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
+ }
+}
+
+static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ /*
+ * Update pipe size and adjust fitter if needed: the reason for this is
+ * that in compute_mode_changes we check the native mode (not the pfit
+ * mode) to see if we can flip rather than do a full mode set. In the
+ * fastboot case, we'll flip, but if we don't update the pipesrc and
+ * pfit state, we'll end up with a big fb scanned out into the wrong
+ * sized surface.
+ */
+ intel_set_pipe_src_size(new_crtc_state);
+
+ /* on skylake this is done by detaching scalers */
+ if (INTEL_GEN(dev_priv) >= 9) {
+ skl_detach_scalers(new_crtc_state);
+
+ if (new_crtc_state->pch_pfit.enabled)
+ skl_pfit_enable(new_crtc_state);
+ } else if (HAS_PCH_SPLIT(dev_priv)) {
+ if (new_crtc_state->pch_pfit.enabled)
+ ilk_pfit_enable(new_crtc_state);
+ else if (old_crtc_state->pch_pfit.enabled)
+ ilk_pfit_disable(old_crtc_state);
+ }
+
+ if (INTEL_GEN(dev_priv) >= 11)
+ icl_set_pipe_chicken(crtc);
+}
+
+static void commit_pipe_config(struct intel_atomic_state *state,
+ struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ bool modeset = needs_modeset(new_crtc_state);
+
+ /*
+ * During modesets pipe configuration was programmed as the
+ * CRTC was enabled.
+ */
+ if (!modeset) {
+ if (new_crtc_state->uapi.color_mgmt_changed ||
+ new_crtc_state->update_pipe)
+ intel_color_commit(new_crtc_state);
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ skl_detach_scalers(new_crtc_state);
+
+ if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
+ bdw_set_pipemisc(new_crtc_state);
+
+ if (new_crtc_state->update_pipe)
+ intel_pipe_fastset(old_crtc_state, new_crtc_state);
+ }
+
+ if (dev_priv->display.atomic_update_watermarks)
+ dev_priv->display.atomic_update_watermarks(state, crtc);
+}
+
+static void intel_update_crtc(struct intel_crtc *crtc,
+ struct intel_atomic_state *state,
+ struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
bool modeset = needs_modeset(new_crtc_state);
struct intel_plane_state *new_plane_state =
- intel_atomic_get_new_plane_state(to_intel_atomic_state(state),
- to_intel_plane(crtc->primary));
+ intel_atomic_get_new_plane_state(state,
+ to_intel_plane(crtc->base.primary));
if (modeset) {
- update_scanline_offset(pipe_config);
- dev_priv->display.crtc_enable(pipe_config, state);
+ intel_crtc_update_active_timings(new_crtc_state);
+
+ dev_priv->display.crtc_enable(state, crtc);
/* vblanks work again, re-enable pipe CRC. */
- intel_crtc_enable_pipe_crc(intel_crtc);
+ intel_crtc_enable_pipe_crc(crtc);
} else {
- intel_pre_plane_update(to_intel_crtc_state(old_crtc_state),
- pipe_config);
+ if (new_crtc_state->preload_luts &&
+ (new_crtc_state->uapi.color_mgmt_changed ||
+ new_crtc_state->update_pipe))
+ intel_color_load_luts(new_crtc_state);
+
+ intel_pre_plane_update(state, crtc);
- if (pipe_config->update_pipe)
- intel_encoders_update_pipe(crtc, pipe_config, state);
+ if (new_crtc_state->update_pipe)
+ intel_encoders_update_pipe(state, crtc);
}
- if (pipe_config->update_pipe && !pipe_config->enable_fbc)
- intel_fbc_disable(intel_crtc);
+ if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
+ intel_fbc_disable(crtc);
else if (new_plane_state)
- intel_fbc_enable(intel_crtc, pipe_config, new_plane_state);
+ intel_fbc_enable(crtc, new_crtc_state, new_plane_state);
- intel_begin_crtc_commit(to_intel_atomic_state(state), intel_crtc);
+ /* Perform vblank evasion around commit operation */
+ intel_pipe_update_start(new_crtc_state);
+
+ commit_pipe_config(state, old_crtc_state, new_crtc_state);
if (INTEL_GEN(dev_priv) >= 9)
- skl_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
+ skl_update_planes_on_crtc(state, crtc);
else
- i9xx_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
+ i9xx_update_planes_on_crtc(state, crtc);
+
+ intel_pipe_update_end(new_crtc_state);
+
+ /*
+ * We usually enable FIFO underrun interrupts as part of the
+ * CRTC enable sequence during modesets. But when we inherit a
+ * valid pipe configuration from the BIOS we need to take care
+ * of enabling them on the CRTC's first fastset.
+ */
+ if (new_crtc_state->update_pipe && !modeset &&
+ old_crtc_state->hw.mode.private_flags & I915_MODE_FLAG_INHERITED)
+ intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
+}
+
+static struct intel_crtc *intel_get_slave_crtc(const struct intel_crtc_state *new_crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
+ enum transcoder slave_transcoder;
+
+ WARN_ON(!is_power_of_2(new_crtc_state->sync_mode_slaves_mask));
- intel_finish_crtc_commit(to_intel_atomic_state(state), intel_crtc);
+ slave_transcoder = ffs(new_crtc_state->sync_mode_slaves_mask) - 1;
+ return intel_get_crtc_for_pipe(dev_priv,
+ (enum pipe)slave_transcoder);
}
-static void intel_update_crtcs(struct drm_atomic_state *state)
+static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
+ struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state,
+ struct intel_crtc *crtc)
{
- struct drm_crtc *crtc;
- struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+
+ intel_crtc_disable_planes(state, crtc);
+
+ /*
+ * We need to disable pipe CRC before disabling the pipe,
+ * or we race against vblank off.
+ */
+ intel_crtc_disable_pipe_crc(crtc);
+
+ dev_priv->display.crtc_disable(state, crtc);
+ crtc->active = false;
+ intel_fbc_disable(crtc);
+ intel_disable_shared_dpll(old_crtc_state);
+
+ /* FIXME unify this for all platforms */
+ if (!new_crtc_state->hw.active &&
+ !HAS_GMCH(dev_priv) &&
+ dev_priv->display.initial_watermarks)
+ dev_priv->display.initial_watermarks(state, crtc);
+}
+
+static void intel_commit_modeset_disables(struct intel_atomic_state *state)
+{
+ struct intel_crtc_state *new_crtc_state, *old_crtc_state;
+ struct intel_crtc *crtc;
+ u32 handled = 0;
int i;
- for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
- if (!new_crtc_state->active)
+ /* Only disable port sync and MST slaves */
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ if (!needs_modeset(new_crtc_state))
+ continue;
+
+ if (!old_crtc_state->hw.active)
+ continue;
+
+ /* In case of Transcoder port Sync master slave CRTCs can be
+ * assigned in any order and we need to make sure that
+ * slave CRTCs are disabled first and then master CRTC since
+ * Slave vblanks are masked till Master Vblanks.
+ */
+ if (!is_trans_port_sync_slave(old_crtc_state) &&
+ !intel_dp_mst_is_slave_trans(old_crtc_state))
+ continue;
+
+ intel_pre_plane_update(state, crtc);
+ intel_old_crtc_state_disables(state, old_crtc_state,
+ new_crtc_state, crtc);
+ handled |= BIT(crtc->pipe);
+ }
+
+ /* Disable everything else left on */
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ if (!needs_modeset(new_crtc_state) ||
+ (handled & BIT(crtc->pipe)))
+ continue;
+
+ intel_pre_plane_update(state, crtc);
+ if (old_crtc_state->hw.active)
+ intel_old_crtc_state_disables(state, old_crtc_state,
+ new_crtc_state, crtc);
+ }
+}
+
+static void intel_commit_modeset_enables(struct intel_atomic_state *state)
+{
+ struct intel_crtc *crtc;
+ struct intel_crtc_state *old_crtc_state, *new_crtc_state;
+ int i;
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ if (!new_crtc_state->hw.active)
continue;
intel_update_crtc(crtc, state, old_crtc_state,
@@ -13628,26 +14998,141 @@ static void intel_update_crtcs(struct drm_atomic_state *state)
}
}
-static void skl_update_crtcs(struct drm_atomic_state *state)
+static void intel_crtc_enable_trans_port_sync(struct intel_crtc *crtc,
+ struct intel_atomic_state *state,
+ struct intel_crtc_state *new_crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(state->dev);
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
- struct drm_crtc *crtc;
- struct intel_crtc *intel_crtc;
- struct drm_crtc_state *old_crtc_state, *new_crtc_state;
- struct intel_crtc_state *cstate;
- unsigned int updated = 0;
- bool progress;
- enum pipe pipe;
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+
+ intel_crtc_update_active_timings(new_crtc_state);
+ dev_priv->display.crtc_enable(state, crtc);
+ intel_crtc_enable_pipe_crc(crtc);
+}
+
+static void intel_set_dp_tp_ctl_normal(struct intel_crtc *crtc,
+ struct intel_atomic_state *state)
+{
+ struct drm_connector *uninitialized_var(conn);
+ struct drm_connector_state *conn_state;
+ struct intel_dp *intel_dp;
int i;
+
+ for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
+ if (conn_state->crtc == &crtc->base)
+ break;
+ }
+ intel_dp = enc_to_intel_dp(intel_attached_encoder(to_intel_connector(conn)));
+ intel_dp_stop_link_train(intel_dp);
+}
+
+/*
+ * TODO: This is only called from port sync and it is identical to what will be
+ * executed again in intel_update_crtc() over port sync pipes
+ */
+static void intel_post_crtc_enable_updates(struct intel_crtc *crtc,
+ struct intel_atomic_state *state)
+{
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct intel_plane_state *new_plane_state =
+ intel_atomic_get_new_plane_state(state,
+ to_intel_plane(crtc->base.primary));
+ bool modeset = needs_modeset(new_crtc_state);
+
+ if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
+ intel_fbc_disable(crtc);
+ else if (new_plane_state)
+ intel_fbc_enable(crtc, new_crtc_state, new_plane_state);
+
+ /* Perform vblank evasion around commit operation */
+ intel_pipe_update_start(new_crtc_state);
+ commit_pipe_config(state, old_crtc_state, new_crtc_state);
+ skl_update_planes_on_crtc(state, crtc);
+ intel_pipe_update_end(new_crtc_state);
+
+ /*
+ * We usually enable FIFO underrun interrupts as part of the
+ * CRTC enable sequence during modesets. But when we inherit a
+ * valid pipe configuration from the BIOS we need to take care
+ * of enabling them on the CRTC's first fastset.
+ */
+ if (new_crtc_state->update_pipe && !modeset &&
+ old_crtc_state->hw.mode.private_flags & I915_MODE_FLAG_INHERITED)
+ intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
+}
+
+static void intel_update_trans_port_sync_crtcs(struct intel_crtc *crtc,
+ struct intel_atomic_state *state,
+ struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state)
+{
+ struct intel_crtc *slave_crtc = intel_get_slave_crtc(new_crtc_state);
+ struct intel_crtc_state *new_slave_crtc_state =
+ intel_atomic_get_new_crtc_state(state, slave_crtc);
+ struct intel_crtc_state *old_slave_crtc_state =
+ intel_atomic_get_old_crtc_state(state, slave_crtc);
+
+ WARN_ON(!slave_crtc || !new_slave_crtc_state ||
+ !old_slave_crtc_state);
+
+ DRM_DEBUG_KMS("Updating Transcoder Port Sync Master CRTC = %d %s and Slave CRTC %d %s\n",
+ crtc->base.base.id, crtc->base.name, slave_crtc->base.base.id,
+ slave_crtc->base.name);
+
+ /* Enable seq for slave with with DP_TP_CTL left Idle until the
+ * master is ready
+ */
+ intel_crtc_enable_trans_port_sync(slave_crtc,
+ state,
+ new_slave_crtc_state);
+
+ /* Enable seq for master with with DP_TP_CTL left Idle */
+ intel_crtc_enable_trans_port_sync(crtc,
+ state,
+ new_crtc_state);
+
+ /* Set Slave's DP_TP_CTL to Normal */
+ intel_set_dp_tp_ctl_normal(slave_crtc,
+ state);
+
+ /* Set Master's DP_TP_CTL To Normal */
+ usleep_range(200, 400);
+ intel_set_dp_tp_ctl_normal(crtc,
+ state);
+
+ /* Now do the post crtc enable for all master and slaves */
+ intel_post_crtc_enable_updates(slave_crtc,
+ state);
+ intel_post_crtc_enable_updates(crtc,
+ state);
+}
+
+static void skl_commit_modeset_enables(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc *crtc;
+ struct intel_crtc_state *old_crtc_state, *new_crtc_state;
u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
- u8 required_slices = intel_state->wm_results.ddb.enabled_slices;
+ u8 required_slices = state->wm_results.ddb.enabled_slices;
struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
+ const u8 num_pipes = INTEL_NUM_PIPES(dev_priv);
+ u8 update_pipes = 0, modeset_pipes = 0;
+ int i;
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ if (!new_crtc_state->hw.active)
+ continue;
- for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
/* ignore allocations for crtc's that have been turned off. */
- if (new_crtc_state->active)
- entries[i] = to_intel_crtc_state(old_crtc_state)->wm.skl.ddb;
+ if (!needs_modeset(new_crtc_state)) {
+ entries[i] = old_crtc_state->wm.skl.ddb;
+ update_pipes |= BIT(crtc->pipe);
+ } else {
+ modeset_pipes |= BIT(crtc->pipe);
+ }
+ }
/* If 2nd DBuf slice required, enable it here */
if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
@@ -13656,30 +15141,29 @@ static void skl_update_crtcs(struct drm_atomic_state *state)
/*
* Whenever the number of active pipes changes, we need to make sure we
* update the pipes in the right order so that their ddb allocations
- * never overlap with eachother inbetween CRTC updates. Otherwise we'll
+ * never overlap with each other between CRTC updates. Otherwise we'll
* cause pipe underruns and other bad stuff.
+ *
+ * So first lets enable all pipes that do not need a fullmodeset as
+ * those don't have any external dependency.
*/
- do {
- progress = false;
-
- for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
- bool vbl_wait = false;
- unsigned int cmask = drm_crtc_mask(crtc);
-
- intel_crtc = to_intel_crtc(crtc);
- cstate = to_intel_crtc_state(new_crtc_state);
- pipe = intel_crtc->pipe;
+ while (update_pipes) {
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ enum pipe pipe = crtc->pipe;
- if (updated & cmask || !cstate->base.active)
+ if ((update_pipes & BIT(pipe)) == 0)
continue;
- if (skl_ddb_allocation_overlaps(&cstate->wm.skl.ddb,
- entries,
- INTEL_INFO(dev_priv)->num_pipes, i))
+ if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
+ entries, num_pipes, i))
continue;
- updated |= cmask;
- entries[i] = cstate->wm.skl.ddb;
+ entries[i] = new_crtc_state->wm.skl.ddb;
+ update_pipes &= ~BIT(pipe);
+
+ intel_update_crtc(crtc, state, old_crtc_state,
+ new_crtc_state);
/*
* If this is an already active pipe, it's DDB changed,
@@ -13687,21 +15171,73 @@ static void skl_update_crtcs(struct drm_atomic_state *state)
* then we need to wait for a vblank to pass for the
* new ddb allocation to take effect.
*/
- if (!skl_ddb_entry_equal(&cstate->wm.skl.ddb,
- &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb) &&
- !new_crtc_state->active_changed &&
- intel_state->wm_results.dirty_pipes != updated)
- vbl_wait = true;
+ if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
+ &old_crtc_state->wm.skl.ddb) &&
+ (update_pipes | modeset_pipes))
+ intel_wait_for_vblank(dev_priv, pipe);
+ }
+ }
+
+ /*
+ * Enable all pipes that needs a modeset and do not depends on other
+ * pipes
+ */
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ enum pipe pipe = crtc->pipe;
+
+ if ((modeset_pipes & BIT(pipe)) == 0)
+ continue;
+
+ if (intel_dp_mst_is_slave_trans(new_crtc_state) ||
+ is_trans_port_sync_slave(new_crtc_state))
+ continue;
+
+ WARN_ON(skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
+ entries, num_pipes, i));
+
+ entries[i] = new_crtc_state->wm.skl.ddb;
+ modeset_pipes &= ~BIT(pipe);
+
+ if (is_trans_port_sync_mode(new_crtc_state)) {
+ struct intel_crtc *slave_crtc;
+ intel_update_trans_port_sync_crtcs(crtc, state,
+ old_crtc_state,
+ new_crtc_state);
+
+ slave_crtc = intel_get_slave_crtc(new_crtc_state);
+ /* TODO: update entries[] of slave */
+ modeset_pipes &= ~BIT(slave_crtc->pipe);
+
+ } else {
intel_update_crtc(crtc, state, old_crtc_state,
new_crtc_state);
+ }
+ }
- if (vbl_wait)
- intel_wait_for_vblank(dev_priv, pipe);
+ /*
+ * Finally enable all pipes that needs a modeset and depends on
+ * other pipes, right now it is only MST slaves as both port sync slave
+ * and master are enabled together
+ */
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ enum pipe pipe = crtc->pipe;
- progress = true;
- }
- } while (progress);
+ if ((modeset_pipes & BIT(pipe)) == 0)
+ continue;
+
+ WARN_ON(skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
+ entries, num_pipes, i));
+
+ entries[i] = new_crtc_state->wm.skl.ddb;
+ modeset_pipes &= ~BIT(pipe);
+
+ intel_update_crtc(crtc, state, old_crtc_state, new_crtc_state);
+ }
+
+ WARN_ON(modeset_pipes);
/* If 2nd DBuf slice is no more required disable it */
if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices)
@@ -13736,18 +15272,21 @@ static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_stat
for (;;) {
prepare_to_wait(&intel_state->commit_ready.wait,
&wait_fence, TASK_UNINTERRUPTIBLE);
- prepare_to_wait(&dev_priv->gpu_error.wait_queue,
+ prepare_to_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
+ I915_RESET_MODESET),
&wait_reset, TASK_UNINTERRUPTIBLE);
- if (i915_sw_fence_done(&intel_state->commit_ready)
- || test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags))
+ if (i915_sw_fence_done(&intel_state->commit_ready) ||
+ test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
break;
schedule();
}
finish_wait(&intel_state->commit_ready.wait, &wait_fence);
- finish_wait(&dev_priv->gpu_error.wait_queue, &wait_reset);
+ finish_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
+ I915_RESET_MODESET),
+ &wait_reset);
}
static void intel_atomic_cleanup_work(struct work_struct *work)
@@ -13763,85 +15302,46 @@ static void intel_atomic_cleanup_work(struct work_struct *work)
intel_atomic_helper_free_state(i915);
}
-static void intel_atomic_commit_tail(struct drm_atomic_state *state)
+static void intel_atomic_commit_tail(struct intel_atomic_state *state)
{
- struct drm_device *dev = state->dev;
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+ struct drm_device *dev = state->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_crtc_state *old_crtc_state, *new_crtc_state;
- struct intel_crtc_state *new_intel_crtc_state, *old_intel_crtc_state;
- struct drm_crtc *crtc;
- struct intel_crtc *intel_crtc;
+ struct intel_crtc_state *new_crtc_state, *old_crtc_state;
+ struct intel_crtc *crtc;
u64 put_domains[I915_MAX_PIPES] = {};
intel_wakeref_t wakeref = 0;
int i;
- intel_atomic_commit_fence_wait(intel_state);
+ intel_atomic_commit_fence_wait(state);
- drm_atomic_helper_wait_for_dependencies(state);
+ drm_atomic_helper_wait_for_dependencies(&state->base);
- if (intel_state->modeset)
+ if (state->modeset)
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
- for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
- old_intel_crtc_state = to_intel_crtc_state(old_crtc_state);
- new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
- intel_crtc = to_intel_crtc(crtc);
-
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
if (needs_modeset(new_crtc_state) ||
- to_intel_crtc_state(new_crtc_state)->update_pipe) {
+ new_crtc_state->update_pipe) {
- put_domains[intel_crtc->pipe] =
- modeset_get_crtc_power_domains(crtc,
- new_intel_crtc_state);
- }
-
- if (!needs_modeset(new_crtc_state))
- continue;
-
- intel_pre_plane_update(old_intel_crtc_state, new_intel_crtc_state);
-
- if (old_crtc_state->active) {
- intel_crtc_disable_planes(intel_state, intel_crtc);
-
- /*
- * We need to disable pipe CRC before disabling the pipe,
- * or we race against vblank off.
- */
- intel_crtc_disable_pipe_crc(intel_crtc);
-
- dev_priv->display.crtc_disable(old_intel_crtc_state, state);
- intel_crtc->active = false;
- intel_fbc_disable(intel_crtc);
- intel_disable_shared_dpll(old_intel_crtc_state);
-
- /*
- * Underruns don't always raise
- * interrupts, so check manually.
- */
- intel_check_cpu_fifo_underruns(dev_priv);
- intel_check_pch_fifo_underruns(dev_priv);
-
- /* FIXME unify this for all platforms */
- if (!new_crtc_state->active &&
- !HAS_GMCH(dev_priv) &&
- dev_priv->display.initial_watermarks)
- dev_priv->display.initial_watermarks(intel_state,
- new_intel_crtc_state);
+ put_domains[crtc->pipe] =
+ modeset_get_crtc_power_domains(new_crtc_state);
}
}
- /* FIXME: Eventually get rid of our intel_crtc->config pointer */
- for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
- to_intel_crtc(crtc)->config = to_intel_crtc_state(new_crtc_state);
+ intel_commit_modeset_disables(state);
- if (intel_state->modeset) {
- drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
+ /* FIXME: Eventually get rid of our crtc->config pointer */
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
+ crtc->config = new_crtc_state;
+
+ if (state->modeset) {
+ drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
intel_set_cdclk_pre_plane_update(dev_priv,
- &intel_state->cdclk.actual,
+ &state->cdclk.actual,
&dev_priv->cdclk.actual,
- intel_state->cdclk.pipe);
+ state->cdclk.pipe);
/*
* SKL workaround: bspec recommends we disable the SAGV when we
@@ -13850,31 +15350,38 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
if (!intel_can_enable_sagv(state))
intel_disable_sagv(dev_priv);
- intel_modeset_verify_disabled(dev, state);
+ intel_modeset_verify_disabled(dev_priv, state);
}
/* Complete the events for pipes that have now been disabled */
- for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
bool modeset = needs_modeset(new_crtc_state);
/* Complete events for now disable pipes here. */
- if (modeset && !new_crtc_state->active && new_crtc_state->event) {
+ if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
spin_lock_irq(&dev->event_lock);
- drm_crtc_send_vblank_event(crtc, new_crtc_state->event);
+ drm_crtc_send_vblank_event(&crtc->base,
+ new_crtc_state->uapi.event);
spin_unlock_irq(&dev->event_lock);
- new_crtc_state->event = NULL;
+ new_crtc_state->uapi.event = NULL;
}
}
+ if (state->modeset)
+ intel_encoders_update_prepare(state);
+
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
- dev_priv->display.update_crtcs(state);
+ dev_priv->display.commit_modeset_enables(state);
+
+ if (state->modeset) {
+ intel_encoders_update_complete(state);
- if (intel_state->modeset)
intel_set_cdclk_post_plane_update(dev_priv,
- &intel_state->cdclk.actual,
+ &state->cdclk.actual,
&dev_priv->cdclk.actual,
- intel_state->cdclk.pipe);
+ state->cdclk.pipe);
+ }
/* FIXME: We should call drm_atomic_helper_commit_hw_done() here
* already, but still need the state for the delayed optimization. To
@@ -13885,16 +15392,15 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
* - switch over to the vblank wait helper in the core after that since
* we don't need out special handling any more.
*/
- drm_atomic_helper_wait_for_flip_done(dev, state);
+ drm_atomic_helper_wait_for_flip_done(dev, &state->base);
- for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
- new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
-
- if (new_crtc_state->active &&
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ if (new_crtc_state->hw.active &&
!needs_modeset(new_crtc_state) &&
- (new_intel_crtc_state->base.color_mgmt_changed ||
- new_intel_crtc_state->update_pipe))
- intel_color_load_luts(new_intel_crtc_state);
+ !new_crtc_state->preload_luts &&
+ (new_crtc_state->uapi.color_mgmt_changed ||
+ new_crtc_state->update_pipe))
+ intel_color_load_luts(new_crtc_state);
}
/*
@@ -13904,16 +15410,25 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
*
* TODO: Move this (and other cleanup) to an async worker eventually.
*/
- for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
- new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ /*
+ * Gen2 reports pipe underruns whenever all planes are disabled.
+ * So re-enable underrun reporting after some planes get enabled.
+ *
+ * We do this before .optimize_watermarks() so that we have a
+ * chance of catching underruns with the intermediate watermarks
+ * vs. the new plane configuration.
+ */
+ if (IS_GEN(dev_priv, 2) && planes_enabling(old_crtc_state, new_crtc_state))
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
if (dev_priv->display.optimize_watermarks)
- dev_priv->display.optimize_watermarks(intel_state,
- new_intel_crtc_state);
+ dev_priv->display.optimize_watermarks(state, crtc);
}
- for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
- intel_post_plane_update(to_intel_crtc_state(old_crtc_state));
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ intel_post_plane_update(state, crtc);
if (put_domains[i])
modeset_put_power_domains(dev_priv, put_domains[i]);
@@ -13921,15 +15436,19 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
}
- if (intel_state->modeset)
- intel_verify_planes(intel_state);
+ /* Underruns don't always raise interrupts, so check manually */
+ intel_check_cpu_fifo_underruns(dev_priv);
+ intel_check_pch_fifo_underruns(dev_priv);
- if (intel_state->modeset && intel_can_enable_sagv(state))
+ if (state->modeset)
+ intel_verify_planes(state);
+
+ if (state->modeset && intel_can_enable_sagv(state))
intel_enable_sagv(dev_priv);
- drm_atomic_helper_commit_hw_done(state);
+ drm_atomic_helper_commit_hw_done(&state->base);
- if (intel_state->modeset) {
+ if (state->modeset) {
/* As one of the primary mmio accessors, KMS has a high
* likelihood of triggering bugs in unclaimed access. After we
* finish modesetting, see if an error has been flagged, and if
@@ -13939,7 +15458,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
}
- intel_runtime_pm_put(&dev_priv->runtime_pm, intel_state->wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
/*
* Defer the cleanup of the old state to a separate worker to not
@@ -13949,14 +15468,14 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
* schedule point (cond_resched()) here anyway to keep latencies
* down.
*/
- INIT_WORK(&state->commit_work, intel_atomic_cleanup_work);
- queue_work(system_highpri_wq, &state->commit_work);
+ INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
+ queue_work(system_highpri_wq, &state->base.commit_work);
}
static void intel_atomic_commit_work(struct work_struct *work)
{
- struct drm_atomic_state *state =
- container_of(work, struct drm_atomic_state, commit_work);
+ struct intel_atomic_state *state =
+ container_of(work, struct intel_atomic_state, base.commit_work);
intel_atomic_commit_tail(state);
}
@@ -13986,42 +15505,39 @@ intel_atomic_commit_ready(struct i915_sw_fence *fence,
return NOTIFY_DONE;
}
-static void intel_atomic_track_fbs(struct drm_atomic_state *state)
+static void intel_atomic_track_fbs(struct intel_atomic_state *state)
{
- struct drm_plane_state *old_plane_state, *new_plane_state;
- struct drm_plane *plane;
+ struct intel_plane_state *old_plane_state, *new_plane_state;
+ struct intel_plane *plane;
int i;
- for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)
- i915_gem_track_fb(intel_fb_obj(old_plane_state->fb),
- intel_fb_obj(new_plane_state->fb),
- to_intel_plane(plane)->frontbuffer_bit);
+ for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
+ new_plane_state, i)
+ intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
+ to_intel_frontbuffer(new_plane_state->hw.fb),
+ plane->frontbuffer_bit);
+}
+
+static void assert_global_state_locked(struct drm_i915_private *dev_priv)
+{
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc(&dev_priv->drm, crtc)
+ drm_modeset_lock_assert_held(&crtc->base.mutex);
}
-/**
- * intel_atomic_commit - commit validated state object
- * @dev: DRM device
- * @state: the top-level driver state object
- * @nonblock: nonblocking commit
- *
- * This function commits a top-level state object that has been validated
- * with drm_atomic_helper_check().
- *
- * RETURNS
- * Zero for success or -errno.
- */
static int intel_atomic_commit(struct drm_device *dev,
- struct drm_atomic_state *state,
+ struct drm_atomic_state *_state,
bool nonblock)
{
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+ struct intel_atomic_state *state = to_intel_atomic_state(_state);
struct drm_i915_private *dev_priv = to_i915(dev);
int ret = 0;
- intel_state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
- drm_atomic_state_get(state);
- i915_sw_fence_init(&intel_state->commit_ready,
+ drm_atomic_state_get(&state->base);
+ i915_sw_fence_init(&state->commit_ready,
intel_atomic_commit_ready);
/*
@@ -14041,63 +15557,63 @@ static int intel_atomic_commit(struct drm_device *dev,
* FIXME doing watermarks and fb cleanup from a vblank worker
* (assuming we had any) would solve these problems.
*/
- if (INTEL_GEN(dev_priv) < 9 && state->legacy_cursor_update) {
+ if (INTEL_GEN(dev_priv) < 9 && state->base.legacy_cursor_update) {
struct intel_crtc_state *new_crtc_state;
struct intel_crtc *crtc;
int i;
- for_each_new_intel_crtc_in_state(intel_state, crtc, new_crtc_state, i)
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
if (new_crtc_state->wm.need_postvbl_update ||
new_crtc_state->update_wm_post)
- state->legacy_cursor_update = false;
+ state->base.legacy_cursor_update = false;
}
- ret = intel_atomic_prepare_commit(dev, state);
+ ret = intel_atomic_prepare_commit(state);
if (ret) {
DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
- i915_sw_fence_commit(&intel_state->commit_ready);
- intel_runtime_pm_put(&dev_priv->runtime_pm, intel_state->wakeref);
+ i915_sw_fence_commit(&state->commit_ready);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
return ret;
}
- ret = drm_atomic_helper_setup_commit(state, nonblock);
+ ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
if (!ret)
- ret = drm_atomic_helper_swap_state(state, true);
+ ret = drm_atomic_helper_swap_state(&state->base, true);
if (ret) {
- i915_sw_fence_commit(&intel_state->commit_ready);
+ i915_sw_fence_commit(&state->commit_ready);
- drm_atomic_helper_cleanup_planes(dev, state);
- intel_runtime_pm_put(&dev_priv->runtime_pm, intel_state->wakeref);
+ drm_atomic_helper_cleanup_planes(dev, &state->base);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
return ret;
}
dev_priv->wm.distrust_bios_wm = false;
intel_shared_dpll_swap_state(state);
intel_atomic_track_fbs(state);
- if (intel_state->modeset) {
- memcpy(dev_priv->min_cdclk, intel_state->min_cdclk,
- sizeof(intel_state->min_cdclk));
- memcpy(dev_priv->min_voltage_level,
- intel_state->min_voltage_level,
- sizeof(intel_state->min_voltage_level));
- dev_priv->active_crtcs = intel_state->active_crtcs;
- dev_priv->cdclk.force_min_cdclk =
- intel_state->cdclk.force_min_cdclk;
+ if (state->global_state_changed) {
+ assert_global_state_locked(dev_priv);
- intel_cdclk_swap_state(intel_state);
+ memcpy(dev_priv->min_cdclk, state->min_cdclk,
+ sizeof(state->min_cdclk));
+ memcpy(dev_priv->min_voltage_level, state->min_voltage_level,
+ sizeof(state->min_voltage_level));
+ dev_priv->active_pipes = state->active_pipes;
+ dev_priv->cdclk.force_min_cdclk = state->cdclk.force_min_cdclk;
+
+ intel_cdclk_swap_state(state);
}
- drm_atomic_state_get(state);
- INIT_WORK(&state->commit_work, intel_atomic_commit_work);
+ drm_atomic_state_get(&state->base);
+ INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
- i915_sw_fence_commit(&intel_state->commit_ready);
- if (nonblock && intel_state->modeset) {
- queue_work(dev_priv->modeset_wq, &state->commit_work);
+ i915_sw_fence_commit(&state->commit_ready);
+ if (nonblock && state->modeset) {
+ queue_work(dev_priv->modeset_wq, &state->base.commit_work);
} else if (nonblock) {
- queue_work(system_unbound_wq, &state->commit_work);
+ queue_work(dev_priv->flip_wq, &state->base.commit_work);
} else {
- if (intel_state->modeset)
+ if (state->modeset)
flush_workqueue(dev_priv->modeset_wq);
intel_atomic_commit_tail(state);
}
@@ -14105,18 +15621,6 @@ static int intel_atomic_commit(struct drm_device *dev,
return 0;
}
-static const struct drm_crtc_funcs intel_crtc_funcs = {
- .gamma_set = drm_atomic_helper_legacy_gamma_set,
- .set_config = drm_atomic_helper_set_config,
- .destroy = intel_crtc_destroy,
- .page_flip = drm_atomic_helper_page_flip,
- .atomic_duplicate_state = intel_crtc_duplicate_state,
- .atomic_destroy_state = intel_crtc_destroy_state,
- .set_crc_source = intel_crtc_set_crc_source,
- .verify_crc_source = intel_crtc_verify_crc_source,
- .get_crc_sources = intel_crtc_get_crc_sources,
-};
-
struct wait_rps_boost {
struct wait_queue_entry wait;
@@ -14136,7 +15640,7 @@ static int do_rps_boost(struct wait_queue_entry *_wait,
* vblank without our intervention, so leave RPS alone.
*/
if (!i915_request_started(rq))
- gen6_rps_boost(rq);
+ intel_rps_boost(rq);
i915_request_put(rq);
drm_crtc_vblank_put(wait->crtc);
@@ -14177,9 +15681,9 @@ static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- struct drm_framebuffer *fb = plane_state->base.fb;
+ struct drm_framebuffer *fb = plane_state->hw.fb;
struct i915_vma *vma;
if (plane->id == PLANE_CURSOR &&
@@ -14217,7 +15721,7 @@ static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
{
struct i915_sched_attr attr = {
- .priority = I915_PRIORITY_DISPLAY,
+ .priority = I915_USER_PRIORITY(I915_PRIORITY_DISPLAY),
};
i915_gem_object_wait_priority(obj, 0, &attr);
@@ -14226,33 +15730,33 @@ static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
/**
* intel_prepare_plane_fb - Prepare fb for usage on plane
* @plane: drm plane to prepare for
- * @new_state: the plane state being prepared
+ * @_new_plane_state: the plane state being prepared
*
* Prepares a framebuffer for usage on a display plane. Generally this
* involves pinning the underlying object and updating the frontbuffer tracking
* bits. Some older platforms need special physical address handling for
* cursor planes.
*
- * Must be called with struct_mutex held.
- *
* Returns 0 on success, negative error code on failure.
*/
int
intel_prepare_plane_fb(struct drm_plane *plane,
- struct drm_plane_state *new_state)
+ struct drm_plane_state *_new_plane_state)
{
+ struct intel_plane_state *new_plane_state =
+ to_intel_plane_state(_new_plane_state);
struct intel_atomic_state *intel_state =
- to_intel_atomic_state(new_state->state);
+ to_intel_atomic_state(new_plane_state->uapi.state);
struct drm_i915_private *dev_priv = to_i915(plane->dev);
- struct drm_framebuffer *fb = new_state->fb;
+ struct drm_framebuffer *fb = new_plane_state->hw.fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
int ret;
if (old_obj) {
- struct drm_crtc_state *crtc_state =
- drm_atomic_get_new_crtc_state(new_state->state,
- plane->state->crtc);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(intel_state,
+ to_intel_crtc(plane->state->crtc));
/* Big Hammer, we also need to ensure that any pending
* MI_WAIT_FOR_EVENT inside a user batch buffer on the
@@ -14275,9 +15779,9 @@ intel_prepare_plane_fb(struct drm_plane *plane,
}
}
- if (new_state->fence) { /* explicit fencing */
+ if (new_plane_state->uapi.fence) { /* explicit fencing */
ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready,
- new_state->fence,
+ new_plane_state->uapi.fence,
I915_FENCE_TIMEOUT,
GFP_KERNEL);
if (ret < 0)
@@ -14291,23 +15795,16 @@ intel_prepare_plane_fb(struct drm_plane *plane,
if (ret)
return ret;
- ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
- if (ret) {
- i915_gem_object_unpin_pages(obj);
- return ret;
- }
-
- ret = intel_plane_pin_fb(to_intel_plane_state(new_state));
+ ret = intel_plane_pin_fb(new_plane_state);
- mutex_unlock(&dev_priv->drm.struct_mutex);
i915_gem_object_unpin_pages(obj);
if (ret)
return ret;
fb_obj_bump_render_priority(obj);
- intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
+ i915_gem_object_flush_frontbuffer(obj, ORIGIN_DIRTYFB);
- if (!new_state->fence) { /* implicit fencing */
+ if (!new_plane_state->uapi.fence) { /* implicit fencing */
struct dma_fence *fence;
ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
@@ -14317,13 +15814,15 @@ intel_prepare_plane_fb(struct drm_plane *plane,
if (ret < 0)
return ret;
- fence = reservation_object_get_excl_rcu(obj->base.resv);
+ fence = dma_resv_get_excl_rcu(obj->base.resv);
if (fence) {
- add_rps_boost_after_vblank(new_state->crtc, fence);
+ add_rps_boost_after_vblank(new_plane_state->hw.crtc,
+ fence);
dma_fence_put(fence);
}
} else {
- add_rps_boost_after_vblank(new_state->crtc, new_state->fence);
+ add_rps_boost_after_vblank(new_plane_state->hw.crtc,
+ new_plane_state->uapi.fence);
}
/*
@@ -14335,7 +15834,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
* maximum clocks following a vblank miss (see do_rps_boost()).
*/
if (!intel_state->rps_interactive) {
- intel_rps_mark_interactive(dev_priv, true);
+ intel_rps_mark_interactive(&dev_priv->gt.rps, true);
intel_state->rps_interactive = true;
}
@@ -14345,130 +15844,27 @@ intel_prepare_plane_fb(struct drm_plane *plane,
/**
* intel_cleanup_plane_fb - Cleans up an fb after plane use
* @plane: drm plane to clean up for
- * @old_state: the state from the previous modeset
+ * @_old_plane_state: the state from the previous modeset
*
* Cleans up a framebuffer that has just been removed from a plane.
- *
- * Must be called with struct_mutex held.
*/
void
intel_cleanup_plane_fb(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_plane_state *_old_plane_state)
{
+ struct intel_plane_state *old_plane_state =
+ to_intel_plane_state(_old_plane_state);
struct intel_atomic_state *intel_state =
- to_intel_atomic_state(old_state->state);
+ to_intel_atomic_state(old_plane_state->uapi.state);
struct drm_i915_private *dev_priv = to_i915(plane->dev);
if (intel_state->rps_interactive) {
- intel_rps_mark_interactive(dev_priv, false);
+ intel_rps_mark_interactive(&dev_priv->gt.rps, false);
intel_state->rps_interactive = false;
}
/* Should only be called after a successful intel_prepare_plane_fb()! */
- mutex_lock(&dev_priv->drm.struct_mutex);
- intel_plane_unpin_fb(to_intel_plane_state(old_state));
- mutex_unlock(&dev_priv->drm.struct_mutex);
-}
-
-int
-skl_max_scale(const struct intel_crtc_state *crtc_state,
- u32 pixel_format)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- int max_scale, mult;
- int crtc_clock, max_dotclk, tmpclk1, tmpclk2;
-
- if (!crtc_state->base.enable)
- return DRM_PLANE_HELPER_NO_SCALING;
-
- crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
- max_dotclk = to_intel_atomic_state(crtc_state->base.state)->cdclk.logical.cdclk;
-
- if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10)
- max_dotclk *= 2;
-
- if (WARN_ON_ONCE(!crtc_clock || max_dotclk < crtc_clock))
- return DRM_PLANE_HELPER_NO_SCALING;
-
- /*
- * skl max scale is lower of:
- * close to 3 but not 3, -1 is for that purpose
- * or
- * cdclk/crtc_clock
- */
- mult = is_planar_yuv_format(pixel_format) ? 2 : 3;
- tmpclk1 = (1 << 16) * mult - 1;
- tmpclk2 = (1 << 8) * ((max_dotclk << 8) / crtc_clock);
- max_scale = min(tmpclk1, tmpclk2);
-
- return max_scale;
-}
-
-static void intel_begin_crtc_commit(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_crtc_state *old_crtc_state =
- intel_atomic_get_old_crtc_state(state, crtc);
- struct intel_crtc_state *new_crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- bool modeset = needs_modeset(&new_crtc_state->base);
-
- /* Perform vblank evasion around commit operation */
- intel_pipe_update_start(new_crtc_state);
-
- if (modeset)
- goto out;
-
- if (new_crtc_state->base.color_mgmt_changed ||
- new_crtc_state->update_pipe)
- intel_color_commit(new_crtc_state);
-
- if (new_crtc_state->update_pipe)
- intel_update_pipe_config(old_crtc_state, new_crtc_state);
- else if (INTEL_GEN(dev_priv) >= 9)
- skl_detach_scalers(new_crtc_state);
-
- if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
- bdw_set_pipemisc(new_crtc_state);
-
-out:
- if (dev_priv->display.atomic_update_watermarks)
- dev_priv->display.atomic_update_watermarks(state,
- new_crtc_state);
-}
-
-void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
- if (!IS_GEN(dev_priv, 2))
- intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
-
- if (crtc_state->has_pch_encoder) {
- enum pipe pch_transcoder =
- intel_crtc_pch_transcoder(crtc);
-
- intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
- }
-}
-
-static void intel_finish_crtc_commit(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct intel_crtc_state *old_crtc_state =
- intel_atomic_get_old_crtc_state(state, crtc);
- struct intel_crtc_state *new_crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
-
- intel_pipe_update_end(new_crtc_state);
-
- if (new_crtc_state->update_pipe &&
- !needs_modeset(&new_crtc_state->base) &&
- old_crtc_state->base.mode.private_flags & I915_MODE_FLAG_INHERITED)
- intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
+ intel_plane_unpin_fb(old_plane_state);
}
/**
@@ -14523,8 +15919,13 @@ static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
case DRM_FORMAT_RGB565:
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_ABGR2101010:
+ case DRM_FORMAT_XBGR16161616F:
return modifier == DRM_FORMAT_MOD_LINEAR ||
modifier == I915_FORMAT_MOD_X_TILED;
default:
@@ -14558,8 +15959,8 @@ static const struct drm_plane_funcs i8xx_plane_funcs = {
};
static int
-intel_legacy_cursor_update(struct drm_plane *plane,
- struct drm_crtc *crtc,
+intel_legacy_cursor_update(struct drm_plane *_plane,
+ struct drm_crtc *_crtc,
struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
@@ -14567,31 +15968,31 @@ intel_legacy_cursor_update(struct drm_plane *plane,
u32 src_w, u32 src_h,
struct drm_modeset_acquire_ctx *ctx)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- int ret;
- struct drm_plane_state *old_plane_state, *new_plane_state;
- struct intel_plane *intel_plane = to_intel_plane(plane);
- struct drm_framebuffer *old_fb;
+ struct intel_plane *plane = to_intel_plane(_plane);
+ struct intel_crtc *crtc = to_intel_crtc(_crtc);
+ struct intel_plane_state *old_plane_state =
+ to_intel_plane_state(plane->base.state);
+ struct intel_plane_state *new_plane_state;
struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->state);
+ to_intel_crtc_state(crtc->base.state);
struct intel_crtc_state *new_crtc_state;
+ int ret;
/*
* When crtc is inactive or there is a modeset pending,
* wait for it to complete in the slowpath
*/
- if (!crtc_state->base.active || needs_modeset(&crtc_state->base) ||
+ if (!crtc_state->hw.active || needs_modeset(crtc_state) ||
crtc_state->update_pipe)
goto slow;
- old_plane_state = plane->state;
/*
* Don't do an async update if there is an outstanding commit modifying
* the plane. This prevents our async update's changes from getting
* overridden by a previous synchronous update's state.
*/
- if (old_plane_state->commit &&
- !try_wait_for_completion(&old_plane_state->commit->hw_done))
+ if (old_plane_state->uapi.commit &&
+ !try_wait_for_completion(&old_plane_state->uapi.commit->hw_done))
goto slow;
/*
@@ -14599,57 +16000,52 @@ intel_legacy_cursor_update(struct drm_plane *plane,
* take the slowpath. Only changing fb or position should be
* in the fastpath.
*/
- if (old_plane_state->crtc != crtc ||
- old_plane_state->src_w != src_w ||
- old_plane_state->src_h != src_h ||
- old_plane_state->crtc_w != crtc_w ||
- old_plane_state->crtc_h != crtc_h ||
- !old_plane_state->fb != !fb)
+ if (old_plane_state->uapi.crtc != &crtc->base ||
+ old_plane_state->uapi.src_w != src_w ||
+ old_plane_state->uapi.src_h != src_h ||
+ old_plane_state->uapi.crtc_w != crtc_w ||
+ old_plane_state->uapi.crtc_h != crtc_h ||
+ !old_plane_state->uapi.fb != !fb)
goto slow;
- new_plane_state = intel_plane_duplicate_state(plane);
+ new_plane_state = to_intel_plane_state(intel_plane_duplicate_state(&plane->base));
if (!new_plane_state)
return -ENOMEM;
- new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(crtc));
+ new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(&crtc->base));
if (!new_crtc_state) {
ret = -ENOMEM;
goto out_free;
}
- drm_atomic_set_fb_for_plane(new_plane_state, fb);
+ drm_atomic_set_fb_for_plane(&new_plane_state->uapi, fb);
- new_plane_state->src_x = src_x;
- new_plane_state->src_y = src_y;
- new_plane_state->src_w = src_w;
- new_plane_state->src_h = src_h;
- new_plane_state->crtc_x = crtc_x;
- new_plane_state->crtc_y = crtc_y;
- new_plane_state->crtc_w = crtc_w;
- new_plane_state->crtc_h = crtc_h;
+ new_plane_state->uapi.src_x = src_x;
+ new_plane_state->uapi.src_y = src_y;
+ new_plane_state->uapi.src_w = src_w;
+ new_plane_state->uapi.src_h = src_h;
+ new_plane_state->uapi.crtc_x = crtc_x;
+ new_plane_state->uapi.crtc_y = crtc_y;
+ new_plane_state->uapi.crtc_w = crtc_w;
+ new_plane_state->uapi.crtc_h = crtc_h;
ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
- to_intel_plane_state(old_plane_state),
- to_intel_plane_state(new_plane_state));
+ old_plane_state, new_plane_state);
if (ret)
goto out_free;
- ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
+ ret = intel_plane_pin_fb(new_plane_state);
if (ret)
goto out_free;
- ret = intel_plane_pin_fb(to_intel_plane_state(new_plane_state));
- if (ret)
- goto out_unlock;
-
- intel_fb_obj_flush(intel_fb_obj(fb), ORIGIN_FLIP);
-
- old_fb = old_plane_state->fb;
- i915_gem_track_fb(intel_fb_obj(old_fb), intel_fb_obj(fb),
- intel_plane->frontbuffer_bit);
+ intel_frontbuffer_flush(to_intel_frontbuffer(new_plane_state->hw.fb),
+ ORIGIN_FLIP);
+ intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
+ to_intel_frontbuffer(new_plane_state->hw.fb),
+ plane->frontbuffer_bit);
/* Swap plane state */
- plane->state = new_plane_state;
+ plane->base.state = &new_plane_state->uapi;
/*
* We cannot swap crtc_state as it may be in use by an atomic commit or
@@ -14663,27 +16059,24 @@ intel_legacy_cursor_update(struct drm_plane *plane,
*/
crtc_state->active_planes = new_crtc_state->active_planes;
- if (plane->state->visible)
- intel_update_plane(intel_plane, crtc_state,
- to_intel_plane_state(plane->state));
+ if (new_plane_state->uapi.visible)
+ intel_update_plane(plane, crtc_state, new_plane_state);
else
- intel_disable_plane(intel_plane, crtc_state);
+ intel_disable_plane(plane, crtc_state);
- intel_plane_unpin_fb(to_intel_plane_state(old_plane_state));
+ intel_plane_unpin_fb(old_plane_state);
-out_unlock:
- mutex_unlock(&dev_priv->drm.struct_mutex);
out_free:
if (new_crtc_state)
- intel_crtc_destroy_state(crtc, &new_crtc_state->base);
+ intel_crtc_destroy_state(&crtc->base, &new_crtc_state->uapi);
if (ret)
- intel_plane_destroy_state(plane, new_plane_state);
+ intel_plane_destroy_state(&plane->base, &new_plane_state->uapi);
else
- intel_plane_destroy_state(plane, old_plane_state);
+ intel_plane_destroy_state(&plane->base, &old_plane_state->uapi);
return ret;
slow:
- return drm_atomic_helper_update_plane(plane, crtc, fb,
+ return drm_atomic_helper_update_plane(&plane->base, &crtc->base, fb,
crtc_x, crtc_y, crtc_w, crtc_h,
src_x, src_y, src_w, src_h, ctx);
}
@@ -14721,10 +16114,9 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
const struct drm_plane_funcs *plane_funcs;
unsigned int supported_rotations;
unsigned int possible_crtcs;
- const u64 *modifiers;
const u32 *formats;
int num_formats;
- int ret;
+ int ret, zpos;
if (INTEL_GEN(dev_priv) >= 9)
return skl_universal_plane_create(dev_priv, pipe,
@@ -14753,44 +16145,69 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
}
- if (INTEL_GEN(dev_priv) >= 4) {
- formats = i965_primary_formats;
- num_formats = ARRAY_SIZE(i965_primary_formats);
- modifiers = i9xx_format_modifiers;
-
- plane->max_stride = i9xx_plane_max_stride;
- plane->update_plane = i9xx_update_plane;
- plane->disable_plane = i9xx_disable_plane;
- plane->get_hw_state = i9xx_plane_get_hw_state;
- plane->check_plane = i9xx_plane_check;
-
- plane_funcs = &i965_plane_funcs;
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ formats = vlv_primary_formats;
+ num_formats = ARRAY_SIZE(vlv_primary_formats);
+ } else if (INTEL_GEN(dev_priv) >= 4) {
+ /*
+ * WaFP16GammaEnabling:ivb
+ * "Workaround : When using the 64-bit format, the plane
+ * output on each color channel has one quarter amplitude.
+ * It can be brought up to full amplitude by using pipe
+ * gamma correction or pipe color space conversion to
+ * multiply the plane output by four."
+ *
+ * There is no dedicated plane gamma for the primary plane,
+ * and using the pipe gamma/csc could conflict with other
+ * planes, so we choose not to expose fp16 on IVB primary
+ * planes. HSW primary planes no longer have this problem.
+ */
+ if (IS_IVYBRIDGE(dev_priv)) {
+ formats = ivb_primary_formats;
+ num_formats = ARRAY_SIZE(ivb_primary_formats);
+ } else {
+ formats = i965_primary_formats;
+ num_formats = ARRAY_SIZE(i965_primary_formats);
+ }
} else {
formats = i8xx_primary_formats;
num_formats = ARRAY_SIZE(i8xx_primary_formats);
- modifiers = i9xx_format_modifiers;
-
- plane->max_stride = i9xx_plane_max_stride;
- plane->update_plane = i9xx_update_plane;
- plane->disable_plane = i9xx_disable_plane;
- plane->get_hw_state = i9xx_plane_get_hw_state;
- plane->check_plane = i9xx_plane_check;
+ }
+ if (INTEL_GEN(dev_priv) >= 4)
+ plane_funcs = &i965_plane_funcs;
+ else
plane_funcs = &i8xx_plane_funcs;
- }
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ plane->min_cdclk = vlv_plane_min_cdclk;
+ else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ plane->min_cdclk = hsw_plane_min_cdclk;
+ else if (IS_IVYBRIDGE(dev_priv))
+ plane->min_cdclk = ivb_plane_min_cdclk;
+ else
+ plane->min_cdclk = i9xx_plane_min_cdclk;
+
+ plane->max_stride = i9xx_plane_max_stride;
+ plane->update_plane = i9xx_update_plane;
+ plane->disable_plane = i9xx_disable_plane;
+ plane->get_hw_state = i9xx_plane_get_hw_state;
+ plane->check_plane = i9xx_plane_check;
possible_crtcs = BIT(pipe);
if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
possible_crtcs, plane_funcs,
- formats, num_formats, modifiers,
+ formats, num_formats,
+ i9xx_format_modifiers,
DRM_PLANE_TYPE_PRIMARY,
"primary %c", pipe_name(pipe));
else
ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
possible_crtcs, plane_funcs,
- formats, num_formats, modifiers,
+ formats, num_formats,
+ i9xx_format_modifiers,
DRM_PLANE_TYPE_PRIMARY,
"plane %c",
plane_name(plane->i9xx_plane));
@@ -14813,6 +16230,9 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
DRM_MODE_ROTATE_0,
supported_rotations);
+ zpos = 0;
+ drm_plane_create_zpos_immutable_property(&plane->base, zpos);
+
drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
return plane;
@@ -14829,7 +16249,7 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
{
unsigned int possible_crtcs;
struct intel_plane *cursor;
- int ret;
+ int ret, zpos;
cursor = intel_plane_alloc();
if (IS_ERR(cursor))
@@ -14878,6 +16298,9 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
DRM_MODE_ROTATE_0 |
DRM_MODE_ROTATE_180);
+ zpos = RUNTIME_INFO(dev_priv)->num_sprites[pipe] + 1;
+ drm_plane_create_zpos_immutable_property(&cursor->base, zpos);
+
drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
return cursor;
@@ -14888,54 +16311,120 @@ fail:
return ERR_PTR(ret);
}
-static void intel_crtc_init_scalers(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc_scaler_state *scaler_state =
- &crtc_state->scaler_state;
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- int i;
+#define INTEL_CRTC_FUNCS \
+ .gamma_set = drm_atomic_helper_legacy_gamma_set, \
+ .set_config = drm_atomic_helper_set_config, \
+ .destroy = intel_crtc_destroy, \
+ .page_flip = drm_atomic_helper_page_flip, \
+ .atomic_duplicate_state = intel_crtc_duplicate_state, \
+ .atomic_destroy_state = intel_crtc_destroy_state, \
+ .set_crc_source = intel_crtc_set_crc_source, \
+ .verify_crc_source = intel_crtc_verify_crc_source, \
+ .get_crc_sources = intel_crtc_get_crc_sources
- crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[crtc->pipe];
- if (!crtc->num_scalers)
- return;
+static const struct drm_crtc_funcs bdw_crtc_funcs = {
+ INTEL_CRTC_FUNCS,
- for (i = 0; i < crtc->num_scalers; i++) {
- struct intel_scaler *scaler = &scaler_state->scalers[i];
+ .get_vblank_counter = g4x_get_vblank_counter,
+ .enable_vblank = bdw_enable_vblank,
+ .disable_vblank = bdw_disable_vblank,
+};
+
+static const struct drm_crtc_funcs ilk_crtc_funcs = {
+ INTEL_CRTC_FUNCS,
+
+ .get_vblank_counter = g4x_get_vblank_counter,
+ .enable_vblank = ilk_enable_vblank,
+ .disable_vblank = ilk_disable_vblank,
+};
+
+static const struct drm_crtc_funcs g4x_crtc_funcs = {
+ INTEL_CRTC_FUNCS,
+
+ .get_vblank_counter = g4x_get_vblank_counter,
+ .enable_vblank = i965_enable_vblank,
+ .disable_vblank = i965_disable_vblank,
+};
+
+static const struct drm_crtc_funcs i965_crtc_funcs = {
+ INTEL_CRTC_FUNCS,
+
+ .get_vblank_counter = i915_get_vblank_counter,
+ .enable_vblank = i965_enable_vblank,
+ .disable_vblank = i965_disable_vblank,
+};
+
+static const struct drm_crtc_funcs i915gm_crtc_funcs = {
+ INTEL_CRTC_FUNCS,
- scaler->in_use = 0;
- scaler->mode = 0;
+ .get_vblank_counter = i915_get_vblank_counter,
+ .enable_vblank = i915gm_enable_vblank,
+ .disable_vblank = i915gm_disable_vblank,
+};
+
+static const struct drm_crtc_funcs i915_crtc_funcs = {
+ INTEL_CRTC_FUNCS,
+
+ .get_vblank_counter = i915_get_vblank_counter,
+ .enable_vblank = i8xx_enable_vblank,
+ .disable_vblank = i8xx_disable_vblank,
+};
+
+static const struct drm_crtc_funcs i8xx_crtc_funcs = {
+ INTEL_CRTC_FUNCS,
+
+ /* no hw vblank counter */
+ .enable_vblank = i8xx_enable_vblank,
+ .disable_vblank = i8xx_disable_vblank,
+};
+
+static struct intel_crtc *intel_crtc_alloc(void)
+{
+ struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+
+ crtc = kzalloc(sizeof(*crtc), GFP_KERNEL);
+ if (!crtc)
+ return ERR_PTR(-ENOMEM);
+
+ crtc_state = intel_crtc_state_alloc(crtc);
+ if (!crtc_state) {
+ kfree(crtc);
+ return ERR_PTR(-ENOMEM);
}
- scaler_state->scaler_id = -1;
+ crtc->base.state = &crtc_state->uapi;
+ crtc->config = crtc_state;
+
+ return crtc;
+}
+
+static void intel_crtc_free(struct intel_crtc *crtc)
+{
+ intel_crtc_destroy_state(&crtc->base, crtc->base.state);
+ kfree(crtc);
}
static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
{
- struct intel_crtc *intel_crtc;
- struct intel_crtc_state *crtc_state = NULL;
- struct intel_plane *primary = NULL;
- struct intel_plane *cursor = NULL;
+ struct intel_plane *primary, *cursor;
+ const struct drm_crtc_funcs *funcs;
+ struct intel_crtc *crtc;
int sprite, ret;
- intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
- if (!intel_crtc)
- return -ENOMEM;
+ crtc = intel_crtc_alloc();
+ if (IS_ERR(crtc))
+ return PTR_ERR(crtc);
- crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
- if (!crtc_state) {
- ret = -ENOMEM;
- goto fail;
- }
- __drm_atomic_helper_crtc_reset(&intel_crtc->base, &crtc_state->base);
- intel_crtc->config = crtc_state;
+ crtc->pipe = pipe;
+ crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[pipe];
primary = intel_primary_plane_create(dev_priv, pipe);
if (IS_ERR(primary)) {
ret = PTR_ERR(primary);
goto fail;
}
- intel_crtc->plane_ids_mask |= BIT(primary->id);
+ crtc->plane_ids_mask |= BIT(primary->id);
for_each_sprite(dev_priv, pipe, sprite) {
struct intel_plane *plane;
@@ -14945,7 +16434,7 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
ret = PTR_ERR(plane);
goto fail;
}
- intel_crtc->plane_ids_mask |= BIT(plane->id);
+ crtc->plane_ids_mask |= BIT(plane->id);
}
cursor = intel_cursor_plane_create(dev_priv, pipe);
@@ -14953,47 +16442,53 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
ret = PTR_ERR(cursor);
goto fail;
}
- intel_crtc->plane_ids_mask |= BIT(cursor->id);
+ crtc->plane_ids_mask |= BIT(cursor->id);
+
+ if (HAS_GMCH(dev_priv)) {
+ if (IS_CHERRYVIEW(dev_priv) ||
+ IS_VALLEYVIEW(dev_priv) || IS_G4X(dev_priv))
+ funcs = &g4x_crtc_funcs;
+ else if (IS_GEN(dev_priv, 4))
+ funcs = &i965_crtc_funcs;
+ else if (IS_I945GM(dev_priv) || IS_I915GM(dev_priv))
+ funcs = &i915gm_crtc_funcs;
+ else if (IS_GEN(dev_priv, 3))
+ funcs = &i915_crtc_funcs;
+ else
+ funcs = &i8xx_crtc_funcs;
+ } else {
+ if (INTEL_GEN(dev_priv) >= 8)
+ funcs = &bdw_crtc_funcs;
+ else
+ funcs = &ilk_crtc_funcs;
+ }
- ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base,
+ ret = drm_crtc_init_with_planes(&dev_priv->drm, &crtc->base,
&primary->base, &cursor->base,
- &intel_crtc_funcs,
- "pipe %c", pipe_name(pipe));
+ funcs, "pipe %c", pipe_name(pipe));
if (ret)
goto fail;
- intel_crtc->pipe = pipe;
-
- /* initialize shared scalers */
- intel_crtc_init_scalers(intel_crtc, crtc_state);
-
BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
- dev_priv->pipe_to_crtc_mapping[pipe] = intel_crtc;
+ dev_priv->pipe_to_crtc_mapping[pipe] = crtc;
if (INTEL_GEN(dev_priv) < 9) {
enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
- dev_priv->plane_to_crtc_mapping[i9xx_plane] = intel_crtc;
+ dev_priv->plane_to_crtc_mapping[i9xx_plane] = crtc;
}
- drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
-
- intel_color_init(intel_crtc);
+ intel_color_init(crtc);
- WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
+ WARN_ON(drm_crtc_index(&crtc->base) != crtc->pipe);
return 0;
fail:
- /*
- * drm_mode_config_cleanup() will free up any
- * crtcs/planes already initialized.
- */
- kfree(crtc_state);
- kfree(intel_crtc);
+ intel_crtc_free(crtc);
return ret;
}
@@ -15015,21 +16510,32 @@ int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
return 0;
}
-static int intel_encoder_clones(struct intel_encoder *encoder)
+static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct intel_encoder *source_encoder;
- int index_mask = 0;
- int entry = 0;
+ u32 possible_clones = 0;
for_each_intel_encoder(dev, source_encoder) {
if (encoders_cloneable(encoder, source_encoder))
- index_mask |= (1 << entry);
+ possible_clones |= drm_encoder_mask(&source_encoder->base);
+ }
+
+ return possible_clones;
+}
+
+static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct intel_crtc *crtc;
+ u32 possible_crtcs = 0;
- entry++;
+ for_each_intel_crtc(dev, crtc) {
+ if (encoder->pipe_mask & BIT(crtc->pipe))
+ possible_crtcs |= drm_crtc_mask(&crtc->base);
}
- return index_mask;
+ return possible_crtcs;
}
static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
@@ -15111,15 +16617,26 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
intel_pps_init(dev_priv);
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))
return;
- if (IS_ELKHARTLAKE(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 12) {
+ intel_ddi_init(dev_priv, PORT_A);
+ intel_ddi_init(dev_priv, PORT_B);
+ intel_ddi_init(dev_priv, PORT_D);
+ intel_ddi_init(dev_priv, PORT_E);
+ intel_ddi_init(dev_priv, PORT_F);
+ intel_ddi_init(dev_priv, PORT_G);
+ intel_ddi_init(dev_priv, PORT_H);
+ intel_ddi_init(dev_priv, PORT_I);
+ icl_dsi_init(dev_priv);
+ } else if (IS_ELKHARTLAKE(dev_priv)) {
intel_ddi_init(dev_priv, PORT_A);
intel_ddi_init(dev_priv, PORT_B);
intel_ddi_init(dev_priv, PORT_C);
+ intel_ddi_init(dev_priv, PORT_D);
icl_dsi_init(dev_priv);
- } else if (INTEL_GEN(dev_priv) >= 11) {
+ } else if (IS_GEN(dev_priv, 11)) {
intel_ddi_init(dev_priv, PORT_A);
intel_ddi_init(dev_priv, PORT_B);
intel_ddi_init(dev_priv, PORT_C);
@@ -15321,9 +16838,10 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
intel_psr_init(dev_priv);
for_each_intel_encoder(&dev_priv->drm, encoder) {
- encoder->base.possible_crtcs = encoder->crtc_mask;
+ encoder->base.possible_crtcs =
+ intel_encoder_possible_crtcs(encoder);
encoder->base.possible_clones =
- intel_encoder_clones(encoder);
+ intel_encoder_possible_clones(encoder);
}
intel_init_pch_refclk(dev_priv);
@@ -15334,15 +16852,9 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
drm_framebuffer_cleanup(fb);
-
- i915_gem_object_lock(obj);
- WARN_ON(!obj->framebuffer_references--);
- i915_gem_object_unlock(obj);
-
- i915_gem_object_put(obj);
+ intel_frontbuffer_put(intel_fb->frontbuffer);
kfree(intel_fb);
}
@@ -15370,7 +16882,7 @@ static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
i915_gem_object_flush_if_display(obj);
- intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
+ intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
return 0;
}
@@ -15392,8 +16904,11 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
int ret = -EINVAL;
int i;
+ intel_fb->frontbuffer = intel_frontbuffer_get(obj);
+ if (!intel_fb->frontbuffer)
+ return -ENOMEM;
+
i915_gem_object_lock(obj);
- obj->framebuffer_references++;
tiling = i915_gem_object_get_tiling(obj);
stride = i915_gem_object_get_stride(obj);
i915_gem_object_unlock(obj);
@@ -15460,8 +16975,11 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
}
/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
- if (mode_cmd->offsets[0] != 0)
+ if (mode_cmd->offsets[0] != 0) {
+ DRM_DEBUG_KMS("plane 0 offset (0x%08x) must be 0\n",
+ mode_cmd->offsets[0]);
goto err;
+ }
drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
@@ -15474,26 +16992,23 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
}
stride_alignment = intel_fb_stride_alignment(fb, i);
-
- /*
- * Display WA #0531: skl,bxt,kbl,glk
- *
- * Render decompression and plane width > 3840
- * combined with horizontal panning requires the
- * plane stride to be a multiple of 4. We'll just
- * require the entire fb to accommodate that to avoid
- * potential runtime errors at plane configuration time.
- */
- if (IS_GEN(dev_priv, 9) && i == 0 && fb->width > 3840 &&
- is_ccs_modifier(fb->modifier))
- stride_alignment *= 4;
-
if (fb->pitches[i] & (stride_alignment - 1)) {
DRM_DEBUG_KMS("plane %d pitch (%d) must be at least %u byte aligned\n",
i, fb->pitches[i], stride_alignment);
goto err;
}
+ if (is_gen12_ccs_plane(fb, i)) {
+ int ccs_aux_stride = gen12_ccs_aux_stride(fb, i);
+
+ if (fb->pitches[i] != ccs_aux_stride) {
+ DRM_DEBUG_KMS("ccs aux plane %d pitch (%d) must be %d\n",
+ i,
+ fb->pitches[i], ccs_aux_stride);
+ goto err;
+ }
+ }
+
fb->obj[i] = &obj->base;
}
@@ -15510,9 +17025,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
return 0;
err:
- i915_gem_object_lock(obj);
- obj->framebuffer_references--;
- i915_gem_object_unlock(obj);
+ intel_frontbuffer_put(intel_fb->frontbuffer);
return ret;
}
@@ -15530,8 +17043,7 @@ intel_user_framebuffer_create(struct drm_device *dev,
return ERR_PTR(-ENOENT);
fb = intel_framebuffer_create(obj, &mode_cmd);
- if (IS_ERR(fb))
- i915_gem_object_put(obj);
+ i915_gem_object_put(obj);
return fb;
}
@@ -15584,8 +17096,14 @@ intel_mode_valid(struct drm_device *dev,
DRM_MODE_FLAG_CLKDIV2))
return MODE_BAD;
- if (INTEL_GEN(dev_priv) >= 9 ||
- IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
+ /* Transcoder timing limits */
+ if (INTEL_GEN(dev_priv) >= 11) {
+ hdisplay_max = 16384;
+ vdisplay_max = 8192;
+ htotal_max = 16384;
+ vtotal_max = 8192;
+ } else if (INTEL_GEN(dev_priv) >= 9 ||
+ IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
vdisplay_max = 4096;
htotal_max = 8192;
@@ -15614,6 +17132,56 @@ intel_mode_valid(struct drm_device *dev,
mode->vtotal > vtotal_max)
return MODE_V_ILLEGAL;
+ if (INTEL_GEN(dev_priv) >= 5) {
+ if (mode->hdisplay < 64 ||
+ mode->htotal - mode->hdisplay < 32)
+ return MODE_H_ILLEGAL;
+
+ if (mode->vtotal - mode->vdisplay < 5)
+ return MODE_V_ILLEGAL;
+ } else {
+ if (mode->htotal - mode->hdisplay < 32)
+ return MODE_H_ILLEGAL;
+
+ if (mode->vtotal - mode->vdisplay < 3)
+ return MODE_V_ILLEGAL;
+ }
+
+ return MODE_OK;
+}
+
+enum drm_mode_status
+intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
+ const struct drm_display_mode *mode)
+{
+ int plane_width_max, plane_height_max;
+
+ /*
+ * intel_mode_valid() should be
+ * sufficient on older platforms.
+ */
+ if (INTEL_GEN(dev_priv) < 9)
+ return MODE_OK;
+
+ /*
+ * Most people will probably want a fullscreen
+ * plane so let's not advertize modes that are
+ * too big for that.
+ */
+ if (INTEL_GEN(dev_priv) >= 11) {
+ plane_width_max = 5120;
+ plane_height_max = 4320;
+ } else {
+ plane_width_max = 5120;
+ plane_height_max = 4096;
+ }
+
+ if (mode->hdisplay > plane_width_max)
+ return MODE_H_ILLEGAL;
+
+ if (mode->vdisplay > plane_height_max)
+ return MODE_V_ILLEGAL;
+
return MODE_OK;
}
@@ -15638,29 +17206,28 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
intel_init_cdclk_hooks(dev_priv);
if (INTEL_GEN(dev_priv) >= 9) {
- dev_priv->display.get_pipe_config = haswell_get_pipe_config;
+ dev_priv->display.get_pipe_config = hsw_get_pipe_config;
dev_priv->display.get_initial_plane_config =
- skylake_get_initial_plane_config;
- dev_priv->display.crtc_compute_clock =
- haswell_crtc_compute_clock;
- dev_priv->display.crtc_enable = haswell_crtc_enable;
- dev_priv->display.crtc_disable = haswell_crtc_disable;
+ skl_get_initial_plane_config;
+ dev_priv->display.crtc_compute_clock = hsw_crtc_compute_clock;
+ dev_priv->display.crtc_enable = hsw_crtc_enable;
+ dev_priv->display.crtc_disable = hsw_crtc_disable;
} else if (HAS_DDI(dev_priv)) {
- dev_priv->display.get_pipe_config = haswell_get_pipe_config;
+ dev_priv->display.get_pipe_config = hsw_get_pipe_config;
dev_priv->display.get_initial_plane_config =
i9xx_get_initial_plane_config;
dev_priv->display.crtc_compute_clock =
- haswell_crtc_compute_clock;
- dev_priv->display.crtc_enable = haswell_crtc_enable;
- dev_priv->display.crtc_disable = haswell_crtc_disable;
+ hsw_crtc_compute_clock;
+ dev_priv->display.crtc_enable = hsw_crtc_enable;
+ dev_priv->display.crtc_disable = hsw_crtc_disable;
} else if (HAS_PCH_SPLIT(dev_priv)) {
- dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
+ dev_priv->display.get_pipe_config = ilk_get_pipe_config;
dev_priv->display.get_initial_plane_config =
i9xx_get_initial_plane_config;
dev_priv->display.crtc_compute_clock =
- ironlake_crtc_compute_clock;
- dev_priv->display.crtc_enable = ironlake_crtc_enable;
- dev_priv->display.crtc_disable = ironlake_crtc_disable;
+ ilk_crtc_compute_clock;
+ dev_priv->display.crtc_enable = ilk_crtc_enable;
+ dev_priv->display.crtc_disable = ilk_crtc_disable;
} else if (IS_CHERRYVIEW(dev_priv)) {
dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
dev_priv->display.get_initial_plane_config =
@@ -15706,58 +17273,26 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
}
if (IS_GEN(dev_priv, 5)) {
- dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
+ dev_priv->display.fdi_link_train = ilk_fdi_link_train;
} else if (IS_GEN(dev_priv, 6)) {
dev_priv->display.fdi_link_train = gen6_fdi_link_train;
} else if (IS_IVYBRIDGE(dev_priv)) {
/* FIXME: detect B0+ stepping and use auto training */
dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
- } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- dev_priv->display.fdi_link_train = hsw_fdi_link_train;
}
if (INTEL_GEN(dev_priv) >= 9)
- dev_priv->display.update_crtcs = skl_update_crtcs;
+ dev_priv->display.commit_modeset_enables = skl_commit_modeset_enables;
else
- dev_priv->display.update_crtcs = intel_update_crtcs;
-}
+ dev_priv->display.commit_modeset_enables = intel_commit_modeset_enables;
-static i915_reg_t i915_vgacntrl_reg(struct drm_i915_private *dev_priv)
-{
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- return VLV_VGACNTRL;
- else if (INTEL_GEN(dev_priv) >= 5)
- return CPU_VGACNTRL;
- else
- return VGACNTRL;
}
-/* Disable the VGA plane that we never use */
-static void i915_disable_vga(struct drm_i915_private *dev_priv)
+void intel_modeset_init_hw(struct drm_i915_private *i915)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
- u8 sr1;
- i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
-
- /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
- vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
- outb(SR01, VGA_SR_INDEX);
- sr1 = inb(VGA_SR_DATA);
- outb(sr1 | 1<<5, VGA_SR_DATA);
- vga_put(pdev, VGA_RSRC_LEGACY_IO);
- udelay(300);
-
- I915_WRITE(vga_reg, VGA_DISP_DISABLE);
- POSTING_READ(vga_reg);
-}
-
-void intel_modeset_init_hw(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- intel_update_cdclk(dev_priv);
- intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
- dev_priv->cdclk.logical = dev_priv->cdclk.actual = dev_priv->cdclk.hw;
+ intel_update_cdclk(i915);
+ intel_dump_cdclk_state(&i915->cdclk.hw, "Current CDCLK");
+ i915->cdclk.logical = i915->cdclk.actual = i915->cdclk.hw;
}
/*
@@ -15775,8 +17310,8 @@ static void sanitize_watermarks(struct drm_device *dev)
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_atomic_state *state;
struct intel_atomic_state *intel_state;
- struct drm_crtc *crtc;
- struct drm_crtc_state *cstate;
+ struct intel_crtc *crtc;
+ struct intel_crtc_state *crtc_state;
struct drm_modeset_acquire_ctx ctx;
int ret;
int i;
@@ -15831,13 +17366,11 @@ retry:
}
/* Write calculated watermark values back */
- for_each_new_crtc_in_state(state, crtc, cstate, i) {
- struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
-
- cs->wm.need_postvbl_update = true;
- dev_priv->display.optimize_watermarks(intel_state, cs);
+ for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
+ crtc_state->wm.need_postvbl_update = true;
+ dev_priv->display.optimize_watermarks(intel_state, crtc);
- to_intel_crtc_state(crtc->state)->wm = cs->wm;
+ to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
}
put_state:
@@ -15867,8 +17400,7 @@ static int intel_initial_commit(struct drm_device *dev)
{
struct drm_atomic_state *state = NULL;
struct drm_modeset_acquire_ctx ctx;
- struct drm_crtc *crtc;
- struct drm_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
int ret = 0;
state = drm_atomic_state_alloc(dev);
@@ -15880,15 +17412,17 @@ static int intel_initial_commit(struct drm_device *dev)
retry:
state->acquire_ctx = &ctx;
- drm_for_each_crtc(crtc, dev) {
- crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ for_each_intel_crtc(dev, crtc) {
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_crtc_state(state, crtc);
+
if (IS_ERR(crtc_state)) {
ret = PTR_ERR(crtc_state);
goto out;
}
- if (crtc_state->active) {
- ret = drm_atomic_add_affected_planes(state, crtc);
+ if (crtc_state->hw.active) {
+ ret = drm_atomic_add_affected_planes(state, &crtc->base);
if (ret)
goto out;
@@ -15898,7 +17432,7 @@ retry:
* having a proper LUT loaded. Remove once we
* have readout for pipe gamma enable.
*/
- crtc_state->color_mgmt_changed = true;
+ crtc_state->uapi.color_mgmt_changed = true;
}
}
@@ -15919,117 +17453,111 @@ out:
return ret;
}
-int intel_modeset_init(struct drm_device *dev)
+static void intel_mode_config_init(struct drm_i915_private *i915)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- enum pipe pipe;
- struct intel_crtc *crtc;
- int ret;
-
- dev_priv->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
+ struct drm_mode_config *mode_config = &i915->drm.mode_config;
- drm_mode_config_init(dev);
+ drm_mode_config_init(&i915->drm);
- ret = intel_bw_init(dev_priv);
- if (ret)
- return ret;
-
- dev->mode_config.min_width = 0;
- dev->mode_config.min_height = 0;
-
- dev->mode_config.preferred_depth = 24;
- dev->mode_config.prefer_shadow = 1;
-
- dev->mode_config.allow_fb_modifiers = true;
-
- dev->mode_config.funcs = &intel_mode_funcs;
-
- init_llist_head(&dev_priv->atomic_helper.free_list);
- INIT_WORK(&dev_priv->atomic_helper.free_work,
- intel_atomic_helper_free_state_worker);
+ mode_config->min_width = 0;
+ mode_config->min_height = 0;
- intel_init_quirks(dev_priv);
+ mode_config->preferred_depth = 24;
+ mode_config->prefer_shadow = 1;
- intel_fbc_init(dev_priv);
-
- intel_init_pm(dev_priv);
-
- /*
- * There may be no VBT; and if the BIOS enabled SSC we can
- * just keep using it to avoid unnecessary flicker. Whereas if the
- * BIOS isn't using it, don't assume it will work even if the VBT
- * indicates as much.
- */
- if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
- bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
- DREF_SSC1_ENABLE);
+ mode_config->allow_fb_modifiers = true;
- if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
- DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
- bios_lvds_use_ssc ? "en" : "dis",
- dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
- dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
- }
- }
+ mode_config->funcs = &intel_mode_funcs;
/*
* Maximum framebuffer dimensions, chosen to match
* the maximum render engine surface size on gen4+.
*/
- if (INTEL_GEN(dev_priv) >= 7) {
- dev->mode_config.max_width = 16384;
- dev->mode_config.max_height = 16384;
- } else if (INTEL_GEN(dev_priv) >= 4) {
- dev->mode_config.max_width = 8192;
- dev->mode_config.max_height = 8192;
- } else if (IS_GEN(dev_priv, 3)) {
- dev->mode_config.max_width = 4096;
- dev->mode_config.max_height = 4096;
+ if (INTEL_GEN(i915) >= 7) {
+ mode_config->max_width = 16384;
+ mode_config->max_height = 16384;
+ } else if (INTEL_GEN(i915) >= 4) {
+ mode_config->max_width = 8192;
+ mode_config->max_height = 8192;
+ } else if (IS_GEN(i915, 3)) {
+ mode_config->max_width = 4096;
+ mode_config->max_height = 4096;
} else {
- dev->mode_config.max_width = 2048;
- dev->mode_config.max_height = 2048;
+ mode_config->max_width = 2048;
+ mode_config->max_height = 2048;
}
- if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
- dev->mode_config.cursor_width = IS_I845G(dev_priv) ? 64 : 512;
- dev->mode_config.cursor_height = 1023;
- } else if (IS_GEN(dev_priv, 2)) {
- dev->mode_config.cursor_width = 64;
- dev->mode_config.cursor_height = 64;
+ if (IS_I845G(i915) || IS_I865G(i915)) {
+ mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
+ mode_config->cursor_height = 1023;
+ } else if (IS_GEN(i915, 2)) {
+ mode_config->cursor_width = 64;
+ mode_config->cursor_height = 64;
} else {
- dev->mode_config.cursor_width = 256;
- dev->mode_config.cursor_height = 256;
+ mode_config->cursor_width = 256;
+ mode_config->cursor_height = 256;
}
+}
- dev->mode_config.fb_base = ggtt->gmadr.start;
+int intel_modeset_init(struct drm_i915_private *i915)
+{
+ struct drm_device *dev = &i915->drm;
+ enum pipe pipe;
+ struct intel_crtc *crtc;
+ int ret;
- DRM_DEBUG_KMS("%d display pipe%s available.\n",
- INTEL_INFO(dev_priv)->num_pipes,
- INTEL_INFO(dev_priv)->num_pipes > 1 ? "s" : "");
+ i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
+ i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
+ WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
- for_each_pipe(dev_priv, pipe) {
- ret = intel_crtc_init(dev_priv, pipe);
- if (ret) {
- drm_mode_config_cleanup(dev);
- return ret;
+ intel_mode_config_init(i915);
+
+ ret = intel_bw_init(i915);
+ if (ret)
+ return ret;
+
+ init_llist_head(&i915->atomic_helper.free_list);
+ INIT_WORK(&i915->atomic_helper.free_work,
+ intel_atomic_helper_free_state_worker);
+
+ intel_init_quirks(i915);
+
+ intel_fbc_init(i915);
+
+ intel_init_pm(i915);
+
+ intel_panel_sanitize_ssc(i915);
+
+ intel_gmbus_setup(i915);
+
+ DRM_DEBUG_KMS("%d display pipe%s available.\n",
+ INTEL_NUM_PIPES(i915),
+ INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
+
+ if (HAS_DISPLAY(i915) && INTEL_DISPLAY_ENABLED(i915)) {
+ for_each_pipe(i915, pipe) {
+ ret = intel_crtc_init(i915, pipe);
+ if (ret) {
+ drm_mode_config_cleanup(dev);
+ return ret;
+ }
}
}
intel_shared_dpll_init(dev);
- intel_update_fdi_pll_freq(dev_priv);
+ intel_update_fdi_pll_freq(i915);
- intel_update_czclk(dev_priv);
- intel_modeset_init_hw(dev);
+ intel_update_czclk(i915);
+ intel_modeset_init_hw(i915);
- intel_hdcp_component_init(dev_priv);
+ intel_hdcp_component_init(i915);
- if (dev_priv->max_cdclk_freq == 0)
- intel_update_max_cdclk(dev_priv);
+ if (i915->max_cdclk_freq == 0)
+ intel_update_max_cdclk(i915);
/* Just disable it once at startup */
- i915_disable_vga(dev_priv);
- intel_setup_outputs(dev_priv);
+ intel_vga_disable(i915);
+ intel_setup_outputs(i915);
drm_modeset_lock_all(dev);
intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
@@ -16048,8 +17576,7 @@ int intel_modeset_init(struct drm_device *dev)
* can even allow for smooth boot transitions if the BIOS
* fb is large enough for the active pipe configuration.
*/
- dev_priv->display.get_initial_plane_config(crtc,
- &plane_config);
+ i915->display.get_initial_plane_config(crtc, &plane_config);
/*
* If the fb is shared between multiple heads, we'll
@@ -16063,7 +17590,7 @@ int intel_modeset_init(struct drm_device *dev)
* Note that we need to do this after reconstructing the BIOS fb's
* since the watermark calculation done here will use pstate->fb.
*/
- if (!HAS_GMCH(dev_priv))
+ if (!HAS_GMCH(i915))
sanitize_watermarks(dev);
/*
@@ -16228,31 +17755,76 @@ static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
(HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
}
-static void intel_sanitize_crtc(struct intel_crtc *crtc,
- struct drm_modeset_acquire_ctx *ctx)
+static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- /* Clear any frame start delays used for debugging left by the BIOS */
- if (crtc->active && !transcoder_is_dsi(cpu_transcoder)) {
+ if (INTEL_GEN(dev_priv) >= 9 ||
+ IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
+ i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
+ u32 val;
+
+ if (transcoder_is_dsi(cpu_transcoder))
+ return;
+
+ val = I915_READ(reg);
+ val &= ~HSW_FRAME_START_DELAY_MASK;
+ val |= HSW_FRAME_START_DELAY(0);
+ I915_WRITE(reg, val);
+ } else {
i915_reg_t reg = PIPECONF(cpu_transcoder);
+ u32 val;
- I915_WRITE(reg,
- I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
+ val = I915_READ(reg);
+ val &= ~PIPECONF_FRAME_START_DELAY_MASK;
+ val |= PIPECONF_FRAME_START_DELAY(0);
+ I915_WRITE(reg, val);
}
- if (crtc_state->base.active) {
+ if (!crtc_state->has_pch_encoder)
+ return;
+
+ if (HAS_PCH_IBX(dev_priv)) {
+ i915_reg_t reg = PCH_TRANSCONF(crtc->pipe);
+ u32 val;
+
+ val = I915_READ(reg);
+ val &= ~TRANS_FRAME_START_DELAY_MASK;
+ val |= TRANS_FRAME_START_DELAY(0);
+ I915_WRITE(reg, val);
+ } else {
+ enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc);
+ i915_reg_t reg = TRANS_CHICKEN2(pch_transcoder);
+ u32 val;
+
+ val = I915_READ(reg);
+ val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
+ val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
+ I915_WRITE(reg, val);
+ }
+}
+
+static void intel_sanitize_crtc(struct intel_crtc *crtc,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
+
+ if (crtc_state->hw.active) {
struct intel_plane *plane;
+ /* Clear any frame start delays used for debugging left by the BIOS */
+ intel_sanitize_frame_start_delay(crtc_state);
+
/* Disable everything but the primary plane */
for_each_intel_plane_on_crtc(dev, crtc, plane) {
const struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
- if (plane_state->base.visible &&
+ if (plane_state->uapi.visible &&
plane->base.type != DRM_PLANE_TYPE_PRIMARY)
intel_plane_disable_noatomic(crtc, plane);
}
@@ -16269,10 +17841,10 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
/* Adjust the state of the output pipe according to whether we
* have active connectors/encoders. */
- if (crtc_state->base.active && !intel_crtc_has_encoders(crtc))
- intel_crtc_disable_noatomic(&crtc->base, ctx);
+ if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc))
+ intel_crtc_disable_noatomic(crtc, ctx);
- if (crtc_state->base.active || HAS_GMCH(dev_priv)) {
+ if (crtc_state->hw.active || HAS_GMCH(dev_priv)) {
/*
* We start out with underrun reporting disabled to avoid races.
* For correct bookkeeping mark this on active crtcs.
@@ -16303,7 +17875,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
/*
* Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
@@ -16316,7 +17888,7 @@ static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
* road.
*/
return IS_GEN(dev_priv, 6) &&
- crtc_state->base.active &&
+ crtc_state->hw.active &&
crtc_state->shared_dpll &&
crtc_state->port_clock == 0;
}
@@ -16333,7 +17905,7 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
* encoder is active and trying to read from a pipe) and the
* pipe itself being active. */
bool has_active_crtc = crtc_state &&
- crtc_state->base.active;
+ crtc_state->hw.active;
if (crtc_state && has_bogus_dpll_config(crtc_state)) {
DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n",
@@ -16388,39 +17960,6 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
icl_sanitize_encoder_pll_mapping(encoder);
}
-void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv)
-{
- i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
-
- if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
- DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
- i915_disable_vga(dev_priv);
- }
-}
-
-void i915_redisable_vga(struct drm_i915_private *dev_priv)
-{
- intel_wakeref_t wakeref;
-
- /*
- * This function can be called both from intel_modeset_setup_hw_state or
- * at a very early point in our resume sequence, where the power well
- * structures are not yet restored. Since this function is at a very
- * paranoid "someone might have enabled VGA while we were not looking"
- * level, just check if the power well is enabled instead of trying to
- * follow the "don't touch the power well if we don't need it" policy
- * the rest of the driver uses.
- */
- wakeref = intel_display_power_get_if_enabled(dev_priv,
- POWER_DOMAIN_VGA);
- if (!wakeref)
- return;
-
- i915_redisable_vga_power_on(dev_priv);
-
- intel_display_power_put(dev_priv, POWER_DOMAIN_VGA, wakeref);
-}
-
/* FIXME read out full plane state for all planes */
static void readout_plane_state(struct drm_i915_private *dev_priv)
{
@@ -16464,28 +18003,28 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
struct drm_connector_list_iter conn_iter;
int i;
- dev_priv->active_crtcs = 0;
+ dev_priv->active_pipes = 0;
for_each_intel_crtc(dev, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
- __drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
- memset(crtc_state, 0, sizeof(*crtc_state));
- __drm_atomic_helper_crtc_reset(&crtc->base, &crtc_state->base);
+ __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
+ intel_crtc_free_hw_state(crtc_state);
+ intel_crtc_state_reset(crtc_state, crtc);
- crtc_state->base.active = crtc_state->base.enable =
+ crtc_state->hw.active = crtc_state->hw.enable =
dev_priv->display.get_pipe_config(crtc, crtc_state);
- crtc->base.enabled = crtc_state->base.enable;
- crtc->active = crtc_state->base.active;
+ crtc->base.enabled = crtc_state->hw.enable;
+ crtc->active = crtc_state->hw.active;
- if (crtc_state->base.active)
- dev_priv->active_crtcs |= 1 << crtc->pipe;
+ if (crtc_state->hw.active)
+ dev_priv->active_pipes |= BIT(crtc->pipe);
DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
crtc->base.base.id, crtc->base.name,
- enableddisabled(crtc_state->base.active));
+ enableddisabled(crtc_state->hw.active));
}
readout_plane_state(dev_priv);
@@ -16495,12 +18034,19 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
pll->on = pll->info->funcs->get_hw_state(dev_priv, pll,
&pll->state.hw_state);
+
+ if (IS_ELKHARTLAKE(dev_priv) && pll->on &&
+ pll->info->id == DPLL_ID_EHL_DPLL4) {
+ pll->wakeref = intel_display_power_get(dev_priv,
+ POWER_DOMAIN_DPLL_DC_OFF);
+ }
+
pll->state.crtc_mask = 0;
for_each_intel_crtc(dev, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
- if (crtc_state->base.active &&
+ if (crtc_state->hw.active &&
crtc_state->shared_dpll == pll)
pll->state.crtc_mask |= 1 << crtc->pipe;
}
@@ -16534,24 +18080,28 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
drm_connector_list_iter_begin(dev, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
if (connector->get_hw_state(connector)) {
+ struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+
connector->base.dpms = DRM_MODE_DPMS_ON;
encoder = connector->encoder;
connector->base.encoder = &encoder->base;
- if (encoder->base.crtc &&
- encoder->base.crtc->state->active) {
+ crtc = to_intel_crtc(encoder->base.crtc);
+ crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL;
+
+ if (crtc_state && crtc_state->hw.active) {
/*
* This has to be done during hardware readout
* because anything calling .crtc_disable may
* rely on the connector_mask being accurate.
*/
- encoder->base.crtc->state->connector_mask |=
+ crtc_state->uapi.connector_mask |=
drm_connector_mask(&connector->base);
- encoder->base.crtc->state->encoder_mask |=
+ crtc_state->uapi.encoder_mask |=
drm_encoder_mask(&encoder->base);
}
-
} else {
connector->base.dpms = DRM_MODE_DPMS_OFF;
connector->base.encoder = NULL;
@@ -16570,13 +18120,15 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
struct intel_plane *plane;
int min_cdclk = 0;
- memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
- if (crtc_state->base.active) {
- intel_mode_from_pipe_config(&crtc->base.mode, crtc_state);
- crtc->base.mode.hdisplay = crtc_state->pipe_src_w;
- crtc->base.mode.vdisplay = crtc_state->pipe_src_h;
- intel_mode_from_pipe_config(&crtc_state->base.adjusted_mode, crtc_state);
- WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
+ if (crtc_state->hw.active) {
+ struct drm_display_mode *mode = &crtc_state->hw.mode;
+
+ intel_mode_from_pipe_config(&crtc_state->hw.adjusted_mode,
+ crtc_state);
+
+ *mode = crtc_state->hw.adjusted_mode;
+ mode->hdisplay = crtc_state->pipe_src_w;
+ mode->vdisplay = crtc_state->pipe_src_h;
/*
* The initial mode needs to be set in order to keep
@@ -16587,25 +18139,15 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
* set a flag to indicate that a full recalculation is
* needed on the next commit.
*/
- crtc_state->base.mode.private_flags = I915_MODE_FLAG_INHERITED;
+ mode->private_flags = I915_MODE_FLAG_INHERITED;
intel_crtc_compute_pixel_rate(crtc_state);
- if (dev_priv->display.modeset_calc_cdclk) {
- min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
- if (WARN_ON(min_cdclk < 0))
- min_cdclk = 0;
- }
+ intel_crtc_update_active_timings(crtc_state);
- drm_calc_timestamping_constants(&crtc->base,
- &crtc_state->base.adjusted_mode);
- update_scanline_offset(crtc_state);
+ intel_crtc_copy_hw_to_uapi_state(crtc_state);
}
- dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
- dev_priv->min_voltage_level[crtc->pipe] =
- crtc_state->min_voltage_level;
-
for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
const struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
@@ -16614,11 +18156,37 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
* FIXME don't have the fb yet, so can't
* use intel_plane_data_rate() :(
*/
- if (plane_state->base.visible)
+ if (plane_state->uapi.visible)
crtc_state->data_rate[plane->id] =
4 * crtc_state->pixel_rate;
+ /*
+ * FIXME don't have the fb yet, so can't
+ * use plane->min_cdclk() :(
+ */
+ if (plane_state->uapi.visible && plane->min_cdclk) {
+ if (crtc_state->double_wide ||
+ INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ crtc_state->min_cdclk[plane->id] =
+ DIV_ROUND_UP(crtc_state->pixel_rate, 2);
+ else
+ crtc_state->min_cdclk[plane->id] =
+ crtc_state->pixel_rate;
+ }
+ DRM_DEBUG_KMS("[PLANE:%d:%s] min_cdclk %d kHz\n",
+ plane->base.base.id, plane->base.name,
+ crtc_state->min_cdclk[plane->id]);
+ }
+
+ if (crtc_state->hw.active) {
+ min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
+ if (WARN_ON(min_cdclk < 0))
+ min_cdclk = 0;
}
+ dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
+ dev_priv->min_voltage_level[crtc->pipe] =
+ crtc_state->min_voltage_level;
+
intel_bw_crtc_update(bw_state, crtc_state);
intel_pipe_config_sanity_check(dev_priv, crtc_state);
@@ -16650,8 +18218,11 @@ get_encoder_power_domains(struct drm_i915_private *dev_priv)
static void intel_early_display_was(struct drm_i915_private *dev_priv)
{
- /* Display WA #1185 WaDisableDARBFClkGating:cnl,glk */
- if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
+ /*
+ * Display WA #1185 WaDisableDARBFClkGating:cnl,glk,icl,ehl,tgl
+ * Also known as Wa_14010480278.
+ */
+ if (IS_GEN_RANGE(dev_priv, 10, 12) || IS_GEMINILAKE(dev_priv))
I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
DARBF_GATING_DIS);
@@ -16732,7 +18303,6 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc_state *crtc_state;
struct intel_encoder *encoder;
struct intel_crtc *crtc;
intel_wakeref_t wakeref;
@@ -16744,6 +18314,17 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
intel_modeset_readout_hw_state(dev);
/* HW state is read out, now we need to sanitize this mess. */
+
+ /* Sanitize the TypeC port mode upfront, encoders depend on this */
+ for_each_intel_encoder(dev, encoder) {
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+
+ /* We need to sanitize only the MST primary port. */
+ if (encoder->type != INTEL_OUTPUT_DP_MST &&
+ intel_phy_is_tc(dev_priv, phy))
+ intel_tc_port_sanitize(enc_to_dig_port(encoder));
+ }
+
get_encoder_power_domains(dev_priv);
if (HAS_PCH_IBX(dev_priv))
@@ -16754,11 +18335,12 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
* waits, so we need vblank interrupts restored beforehand.
*/
for_each_intel_crtc(&dev_priv->drm, crtc) {
- crtc_state = to_intel_crtc_state(crtc->base.state);
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
drm_crtc_vblank_reset(&crtc->base);
- if (crtc_state->base.active)
+ if (crtc_state->hw.active)
intel_crtc_vblank_on(crtc_state);
}
@@ -16768,7 +18350,9 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
intel_sanitize_encoder(encoder);
for_each_intel_crtc(&dev_priv->drm, crtc) {
- crtc_state = to_intel_crtc_state(crtc->base.state);
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
intel_sanitize_crtc(crtc, ctx);
intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
}
@@ -16801,17 +18385,16 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
}
for_each_intel_crtc(dev, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
u64 put_domains;
- crtc_state = to_intel_crtc_state(crtc->base.state);
- put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc_state);
+ put_domains = modeset_get_crtc_power_domains(crtc_state);
if (WARN_ON(put_domains))
modeset_put_power_domains(dev_priv, put_domains);
}
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
-
- intel_fbc_init_pipe_state(dev_priv);
}
void intel_display_resume(struct drm_device *dev)
@@ -16848,13 +18431,13 @@ void intel_display_resume(struct drm_device *dev)
drm_atomic_state_put(state);
}
-static void intel_hpd_poll_fini(struct drm_device *dev)
+static void intel_hpd_poll_fini(struct drm_i915_private *i915)
{
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
/* Kill all the work that may have been queued by hpd. */
- drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_connector_list_iter_begin(&i915->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
if (connector->modeset_retry_work.func)
cancel_work_sync(&connector->modeset_retry_work);
@@ -16866,78 +18449,58 @@ static void intel_hpd_poll_fini(struct drm_device *dev)
drm_connector_list_iter_end(&conn_iter);
}
-void intel_modeset_cleanup(struct drm_device *dev)
+void intel_modeset_driver_remove(struct drm_i915_private *i915)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- flush_workqueue(dev_priv->modeset_wq);
+ flush_workqueue(i915->flip_wq);
+ flush_workqueue(i915->modeset_wq);
- flush_work(&dev_priv->atomic_helper.free_work);
- WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list));
+ flush_work(&i915->atomic_helper.free_work);
+ WARN_ON(!llist_empty(&i915->atomic_helper.free_list));
/*
* Interrupts and polling as the first thing to avoid creating havoc.
* Too much stuff here (turning of connectors, ...) would
* experience fancy races otherwise.
*/
- intel_irq_uninstall(dev_priv);
+ intel_irq_uninstall(i915);
/*
* Due to the hpd irq storm handling the hotplug work can re-arm the
* poll handlers. Hence disable polling after hpd handling is shut down.
*/
- intel_hpd_poll_fini(dev);
+ intel_hpd_poll_fini(i915);
+
+ /*
+ * MST topology needs to be suspended so we don't have any calls to
+ * fbdev after it's finalized. MST will be destroyed later as part of
+ * drm_mode_config_cleanup()
+ */
+ intel_dp_mst_suspend(i915);
/* poll work can call into fbdev, hence clean that up afterwards */
- intel_fbdev_fini(dev_priv);
+ intel_fbdev_fini(i915);
intel_unregister_dsm_handler();
- intel_fbc_global_disable(dev_priv);
+ intel_fbc_global_disable(i915);
/* flush any delayed tasks or pending work */
flush_scheduled_work();
- intel_hdcp_component_fini(dev_priv);
+ intel_hdcp_component_fini(i915);
- drm_mode_config_cleanup(dev);
+ drm_mode_config_cleanup(&i915->drm);
- intel_overlay_cleanup(dev_priv);
+ intel_overlay_cleanup(i915);
- intel_gmbus_teardown(dev_priv);
+ intel_gmbus_teardown(i915);
- destroy_workqueue(dev_priv->modeset_wq);
+ intel_bw_cleanup(i915);
- intel_fbc_cleanup_cfb(dev_priv);
-}
-
-/*
- * set vga decode state - true == enable VGA decode
- */
-int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state)
-{
- unsigned reg = INTEL_GEN(dev_priv) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
- u16 gmch_ctrl;
-
- if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
- DRM_ERROR("failed to read control word\n");
- return -EIO;
- }
+ destroy_workqueue(i915->flip_wq);
+ destroy_workqueue(i915->modeset_wq);
- if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
- return 0;
-
- if (state)
- gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
- else
- gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
-
- if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
- DRM_ERROR("failed to write control word\n");
- return -EIO;
- }
-
- return 0;
+ intel_fbc_cleanup_cfb(i915);
}
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
@@ -16982,7 +18545,7 @@ struct intel_display_error_state {
u32 vtotal;
u32 vblank;
u32 vsync;
- } transcoder[4];
+ } transcoder[5];
};
struct intel_display_error_state *
@@ -16993,13 +18556,14 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv)
TRANSCODER_A,
TRANSCODER_B,
TRANSCODER_C,
+ TRANSCODER_D,
TRANSCODER_EDP,
};
int i;
BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder));
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))
return NULL;
error = kzalloc(sizeof(*error), GFP_ATOMIC);
@@ -17078,7 +18642,7 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
if (!error)
return;
- err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev_priv)->num_pipes);
+ err_printf(m, "Num Pipes: %d\n", INTEL_NUM_PIPES(dev_priv));
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
err_printf(m, "PWR_WELL_CTL2: %08x\n",
error->power_well_driver);
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index ee6b8194a459..028aab728514 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -1,5 +1,5 @@
/*
- * Copyright © 2006-2017 Intel Corporation
+ * Copyright © 2006-2019 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -28,8 +28,33 @@
#include <drm/drm_util.h>
#include <drm/i915_drm.h>
+enum link_m_n_set;
+struct dpll;
+struct drm_connector;
+struct drm_device;
+struct drm_display_mode;
+struct drm_encoder;
+struct drm_file;
+struct drm_format_info;
+struct drm_framebuffer;
+struct drm_i915_error_state_buf;
+struct drm_i915_gem_object;
struct drm_i915_private;
+struct drm_modeset_acquire_ctx;
+struct drm_plane;
+struct drm_plane_state;
+struct i915_ggtt_view;
+struct intel_crtc;
+struct intel_crtc_state;
+struct intel_digital_port;
+struct intel_dp;
+struct intel_encoder;
+struct intel_load_detect_pipe;
+struct intel_plane;
struct intel_plane_state;
+struct intel_remapped_info;
+struct intel_rotation_info;
+struct intel_crtc_state;
enum i915_gpio {
GPIOA,
@@ -45,6 +70,8 @@ enum i915_gpio {
GPIOK,
GPIOL,
GPIOM,
+ GPION,
+ GPIOO,
};
/*
@@ -58,6 +85,7 @@ enum pipe {
PIPE_A = 0,
PIPE_B,
PIPE_C,
+ PIPE_D,
_PIPE_EDP,
I915_MAX_PIPES = _PIPE_EDP
@@ -66,6 +94,7 @@ enum pipe {
#define pipe_name(p) ((p) + 'A')
enum transcoder {
+ INVALID_TRANSCODER = -1,
/*
* The following transcoders have a 1:1 transcoder -> pipe mapping,
* keep their values fixed: the code assumes that TRANSCODER_A=0, the
@@ -75,6 +104,7 @@ enum transcoder {
TRANSCODER_A = PIPE_A,
TRANSCODER_B = PIPE_B,
TRANSCODER_C = PIPE_C,
+ TRANSCODER_D = PIPE_D,
/*
* The following transcoders can map to any pipe, their enum value
@@ -98,6 +128,8 @@ static inline const char *transcoder_name(enum transcoder transcoder)
return "B";
case TRANSCODER_C:
return "C";
+ case TRANSCODER_D:
+ return "D";
case TRANSCODER_EDP:
return "EDP";
case TRANSCODER_DSI_A:
@@ -154,6 +186,24 @@ enum plane_id {
for ((__p) = PLANE_PRIMARY; (__p) < I915_MAX_PLANES; (__p)++) \
for_each_if((__crtc)->plane_ids_mask & BIT(__p))
+enum port {
+ PORT_NONE = -1,
+
+ PORT_A = 0,
+ PORT_B,
+ PORT_C,
+ PORT_D,
+ PORT_E,
+ PORT_F,
+ PORT_G,
+ PORT_H,
+ PORT_I,
+
+ I915_MAX_PORTS
+};
+
+#define port_name(p) ((p) + 'A')
+
/*
* Ports identifier referenced from other drivers.
* Expected to remain stable over time
@@ -173,6 +223,12 @@ static inline const char *port_identifier(enum port port)
return "Port E";
case PORT_F:
return "Port F";
+ case PORT_G:
+ return "Port G";
+ case PORT_H:
+ return "Port H";
+ case PORT_I:
+ return "Port I";
default:
return "<invalid>";
}
@@ -185,14 +241,15 @@ enum tc_port {
PORT_TC2,
PORT_TC3,
PORT_TC4,
+ PORT_TC5,
+ PORT_TC6,
I915_MAX_TC_PORTS
};
-enum tc_port_type {
- TC_PORT_UNKNOWN = 0,
- TC_PORT_TYPEC,
- TC_PORT_TBT,
+enum tc_port_mode {
+ TC_PORT_TBT_ALT,
+ TC_PORT_DP_ALT,
TC_PORT_LEGACY,
};
@@ -216,6 +273,7 @@ enum aux_ch {
AUX_CH_D,
AUX_CH_E, /* ICL+ */
AUX_CH_F,
+ AUX_CH_G,
};
#define aux_ch_name(a) ((a) + 'A')
@@ -229,11 +287,35 @@ struct intel_link_m_n {
u32 link_n;
};
+enum phy {
+ PHY_NONE = -1,
+
+ PHY_A = 0,
+ PHY_B,
+ PHY_C,
+ PHY_D,
+ PHY_E,
+ PHY_F,
+ PHY_G,
+ PHY_H,
+ PHY_I,
+
+ I915_MAX_PHYS
+};
+
+#define phy_name(a) ((a) + 'A')
+
+enum phy_fia {
+ FIA1,
+ FIA2,
+ FIA3,
+};
+
#define for_each_pipe(__dev_priv, __p) \
- for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++)
+ for ((__p) = 0; (__p) < INTEL_NUM_PIPES(__dev_priv); (__p)++)
#define for_each_pipe_masked(__dev_priv, __p, __mask) \
- for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) \
+ for ((__p) = 0; (__p) < INTEL_NUM_PIPES(__dev_priv); (__p)++) \
for_each_if((__mask) & BIT(__p))
#define for_each_cpu_transcoder_masked(__dev_priv, __t, __mask) \
@@ -250,10 +332,17 @@ struct intel_link_m_n {
(__s) < RUNTIME_INFO(__dev_priv)->num_sprites[(__p)]; \
(__s)++)
-#define for_each_port_masked(__port, __ports_mask) \
- for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \
+#define for_each_port(__port) \
+ for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++)
+
+#define for_each_port_masked(__port, __ports_mask) \
+ for_each_port(__port) \
for_each_if((__ports_mask) & BIT(__port))
+#define for_each_phy_masked(__phy, __phys_mask) \
+ for ((__phy) = PHY_A; (__phy) < I915_MAX_PHYS; (__phy)++) \
+ for_each_if((__phys_mask) & BIT(__phy))
+
#define for_each_crtc(dev, crtc) \
list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head)
@@ -267,7 +356,7 @@ struct intel_link_m_n {
&(dev)->mode_config.plane_list, \
base.head) \
for_each_if((plane_mask) & \
- drm_plane_mask(&intel_plane->base)))
+ drm_plane_mask(&intel_plane->base))
#define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \
list_for_each_entry(intel_plane, \
@@ -291,6 +380,13 @@ struct intel_link_m_n {
&(dev)->mode_config.encoder_list, \
base.head)
+#define for_each_intel_encoder_mask(dev, intel_encoder, encoder_mask) \
+ list_for_each_entry(intel_encoder, \
+ &(dev)->mode_config.encoder_list, \
+ base.head) \
+ for_each_if((encoder_mask) & \
+ drm_encoder_mask(&intel_encoder->base))
+
#define for_each_intel_dp(dev, intel_encoder) \
for_each_intel_encoder(dev, intel_encoder) \
for_each_if(intel_encoder_is_dp(intel_encoder))
@@ -348,14 +444,210 @@ struct intel_link_m_n {
(__i)++) \
for_each_if(crtc)
+#define for_each_oldnew_intel_crtc_in_state_reverse(__state, crtc, old_crtc_state, new_crtc_state, __i) \
+ for ((__i) = (__state)->base.dev->mode_config.num_crtc - 1; \
+ (__i) >= 0 && \
+ ((crtc) = to_intel_crtc((__state)->base.crtcs[__i].ptr), \
+ (old_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].old_state), \
+ (new_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].new_state), 1); \
+ (__i)--) \
+ for_each_if(crtc)
+
+#define intel_atomic_crtc_state_for_each_plane_state( \
+ plane, plane_state, \
+ crtc_state) \
+ for_each_intel_plane_mask(((crtc_state)->uapi.state->dev), (plane), \
+ ((crtc_state)->uapi.plane_mask)) \
+ for_each_if ((plane_state = \
+ to_intel_plane_state(__drm_atomic_get_current_plane_state((crtc_state)->uapi.state, &plane->base))))
+
+#define for_each_new_intel_connector_in_state(__state, connector, new_connector_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->base.num_connector; \
+ (__i)++) \
+ for_each_if ((__state)->base.connectors[__i].ptr && \
+ ((connector) = to_intel_connector((__state)->base.connectors[__i].ptr), \
+ (new_connector_state) = to_intel_digital_connector_state((__state)->base.connectors[__i].new_state), 1))
+
void intel_link_compute_m_n(u16 bpp, int nlanes,
int pixel_clock, int link_clock,
struct intel_link_m_n *m_n,
- bool constant_n);
+ bool constant_n, bool fec_enable);
bool is_ccs_modifier(u64 modifier);
+int intel_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane);
void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv);
u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
u32 pixel_format, u64 modifier);
bool intel_plane_can_remap(const struct intel_plane_state *plane_state);
+enum drm_mode_status
+intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
+ const struct drm_display_mode *mode);
+enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port);
+bool is_trans_port_sync_mode(const struct intel_crtc_state *state);
+
+void intel_plane_destroy(struct drm_plane *plane);
+void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state);
+void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
+void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
+enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc);
+int vlv_get_hpll_vco(struct drm_i915_private *dev_priv);
+int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
+ const char *name, u32 reg, int ref_freq);
+int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
+ const char *name, u32 reg);
+void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv);
+void lpt_disable_iclkip(struct drm_i915_private *dev_priv);
+void intel_init_display_hooks(struct drm_i915_private *dev_priv);
+unsigned int intel_fb_xy_to_linear(int x, int y,
+ const struct intel_plane_state *state,
+ int plane);
+unsigned int intel_fb_align_height(const struct drm_framebuffer *fb,
+ int color_plane, unsigned int height);
+void intel_add_fb_offsets(int *x, int *y,
+ const struct intel_plane_state *state, int plane);
+unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info);
+unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info);
+bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv);
+int intel_display_suspend(struct drm_device *dev);
+void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv);
+void intel_encoder_destroy(struct drm_encoder *encoder);
+struct drm_display_mode *
+intel_encoder_current_mode(struct intel_encoder *encoder);
+bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy);
+bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy);
+enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv,
+ enum port port);
+int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc);
+void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state);
+
+int ilk_get_lanes_required(int target_clock, int link_bw, int bpp);
+void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
+ struct intel_digital_port *dport,
+ unsigned int expected_mask);
+int intel_get_load_detect_pipe(struct drm_connector *connector,
+ struct intel_load_detect_pipe *old,
+ struct drm_modeset_acquire_ctx *ctx);
+void intel_release_load_detect_pipe(struct drm_connector *connector,
+ struct intel_load_detect_pipe *old,
+ struct drm_modeset_acquire_ctx *ctx);
+struct i915_vma *
+intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
+ const struct i915_ggtt_view *view,
+ bool uses_fence,
+ unsigned long *out_flags);
+void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags);
+struct drm_framebuffer *
+intel_framebuffer_create(struct drm_i915_gem_object *obj,
+ struct drm_mode_fb_cmd2 *mode_cmd);
+int intel_prepare_plane_fb(struct drm_plane *plane,
+ struct drm_plane_state *new_state);
+void intel_cleanup_plane_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state);
+
+void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe);
+
+int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
+ const struct dpll *dpll);
+void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe);
+int lpt_get_iclkip(struct drm_i915_private *dev_priv);
+bool intel_fuzzy_clock_check(int clock1, int clock2);
+
+void intel_prepare_reset(struct drm_i915_private *dev_priv);
+void intel_finish_reset(struct drm_i915_private *dev_priv);
+void intel_dp_get_m_n(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config);
+void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state,
+ enum link_m_n_set m_n);
+int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
+bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
+ struct dpll *best_clock);
+int chv_calc_dpll_params(int refclk, struct dpll *pll_clock);
+
+bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state);
+void hsw_enable_ips(const struct intel_crtc_state *crtc_state);
+void hsw_disable_ips(const struct intel_crtc_state *crtc_state);
+enum intel_display_power_domain intel_port_to_power_domain(enum port port);
+enum intel_display_power_domain
+intel_aux_power_domain(struct intel_digital_port *dig_port);
+void intel_mode_from_pipe_config(struct drm_display_mode *mode,
+ struct intel_crtc_state *pipe_config);
+void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state);
+
+u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_center);
+int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
+void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state);
+void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state);
+u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
+u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state);
+u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
+u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state);
+u32 skl_plane_stride(const struct intel_plane_state *plane_state,
+ int plane);
+int skl_check_plane_surface(struct intel_plane_state *plane_state);
+int i9xx_check_plane_surface(struct intel_plane_state *plane_state);
+int skl_format_to_fourcc(int format, bool rgb_order, bool alpha);
+unsigned int i9xx_plane_max_stride(struct intel_plane *plane,
+ u32 pixel_format, u64 modifier,
+ unsigned int rotation);
+int bdw_get_pipemisc_bpp(struct intel_crtc *crtc);
+
+struct intel_display_error_state *
+intel_display_capture_error_state(struct drm_i915_private *dev_priv);
+void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
+ struct intel_display_error_state *error);
+
+bool
+intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
+ uint64_t modifier);
+
+/* modesetting */
+void intel_modeset_init_hw(struct drm_i915_private *i915);
+int intel_modeset_init(struct drm_i915_private *i915);
+void intel_modeset_driver_remove(struct drm_i915_private *i915);
+void intel_display_resume(struct drm_device *dev);
+void intel_init_pch_refclk(struct drm_i915_private *dev_priv);
+
+/* modesetting asserts */
+void assert_panel_unlocked(struct drm_i915_private *dev_priv,
+ enum pipe pipe);
+void assert_pll(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state);
+#define assert_pll_enabled(d, p) assert_pll(d, p, true)
+#define assert_pll_disabled(d, p) assert_pll(d, p, false)
+void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state);
+#define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
+#define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
+void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state);
+#define assert_fdi_rx_pll_enabled(d, p) assert_fdi_rx_pll(d, p, true)
+#define assert_fdi_rx_pll_disabled(d, p) assert_fdi_rx_pll(d, p, false)
+void assert_pipe(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder, bool state);
+#define assert_pipe_enabled(d, t) assert_pipe(d, t, true)
+#define assert_pipe_disabled(d, t) assert_pipe(d, t, false)
+
+/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
+ * WARN_ON()) for hw state sanity checks to check for unexpected conditions
+ * which may not necessarily be a user visible problem. This will either
+ * WARN() or DRM_ERROR() depending on the verbose_checks moduleparam, to
+ * enable distros and users to tailor their preferred amount of i915 abrt
+ * spam.
+ */
+#define I915_STATE_WARN(condition, format...) ({ \
+ int __ret_warn_on = !!(condition); \
+ if (unlikely(__ret_warn_on)) \
+ if (!WARN(i915_modparams.verbose_state_checks, format)) \
+ DRM_ERROR(format); \
+ unlikely(__ret_warn_on); \
+})
+
+#define I915_STATE_WARN_ON(x) \
+ I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 2d1939db108f..21561acfa3ac 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -3,8 +3,6 @@
* Copyright © 2019 Intel Corporation
*/
-#include <linux/vgaarb.h>
-
#include "display/intel_crt.h"
#include "display/intel_dp.h"
@@ -13,10 +11,13 @@
#include "intel_cdclk.h"
#include "intel_combo_phy.h"
#include "intel_csr.h"
+#include "intel_display_power.h"
+#include "intel_display_types.h"
#include "intel_dpio_phy.h"
-#include "intel_drv.h"
#include "intel_hotplug.h"
#include "intel_sideband.h"
+#include "intel_tc.h"
+#include "intel_vga.h"
bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
enum i915_power_well_id power_well_id);
@@ -33,22 +34,28 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
return "PIPE_B";
case POWER_DOMAIN_PIPE_C:
return "PIPE_C";
+ case POWER_DOMAIN_PIPE_D:
+ return "PIPE_D";
case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
return "PIPE_A_PANEL_FITTER";
case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
return "PIPE_B_PANEL_FITTER";
case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
return "PIPE_C_PANEL_FITTER";
+ case POWER_DOMAIN_PIPE_D_PANEL_FITTER:
+ return "PIPE_D_PANEL_FITTER";
case POWER_DOMAIN_TRANSCODER_A:
return "TRANSCODER_A";
case POWER_DOMAIN_TRANSCODER_B:
return "TRANSCODER_B";
case POWER_DOMAIN_TRANSCODER_C:
return "TRANSCODER_C";
+ case POWER_DOMAIN_TRANSCODER_D:
+ return "TRANSCODER_D";
case POWER_DOMAIN_TRANSCODER_EDP:
return "TRANSCODER_EDP";
- case POWER_DOMAIN_TRANSCODER_EDP_VDSC:
- return "TRANSCODER_EDP_VDSC";
+ case POWER_DOMAIN_TRANSCODER_VDSC_PW2:
+ return "TRANSCODER_VDSC_PW2";
case POWER_DOMAIN_TRANSCODER_DSI_A:
return "TRANSCODER_DSI_A";
case POWER_DOMAIN_TRANSCODER_DSI_C:
@@ -65,6 +72,12 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
return "PORT_DDI_E_LANES";
case POWER_DOMAIN_PORT_DDI_F_LANES:
return "PORT_DDI_F_LANES";
+ case POWER_DOMAIN_PORT_DDI_G_LANES:
+ return "PORT_DDI_G_LANES";
+ case POWER_DOMAIN_PORT_DDI_H_LANES:
+ return "PORT_DDI_H_LANES";
+ case POWER_DOMAIN_PORT_DDI_I_LANES:
+ return "PORT_DDI_I_LANES";
case POWER_DOMAIN_PORT_DDI_A_IO:
return "PORT_DDI_A_IO";
case POWER_DOMAIN_PORT_DDI_B_IO:
@@ -77,6 +90,12 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
return "PORT_DDI_E_IO";
case POWER_DOMAIN_PORT_DDI_F_IO:
return "PORT_DDI_F_IO";
+ case POWER_DOMAIN_PORT_DDI_G_IO:
+ return "PORT_DDI_G_IO";
+ case POWER_DOMAIN_PORT_DDI_H_IO:
+ return "PORT_DDI_H_IO";
+ case POWER_DOMAIN_PORT_DDI_I_IO:
+ return "PORT_DDI_I_IO";
case POWER_DOMAIN_PORT_DSI:
return "PORT_DSI";
case POWER_DOMAIN_PORT_CRT:
@@ -99,16 +118,28 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
return "AUX_E";
case POWER_DOMAIN_AUX_F:
return "AUX_F";
+ case POWER_DOMAIN_AUX_G:
+ return "AUX_G";
+ case POWER_DOMAIN_AUX_H:
+ return "AUX_H";
+ case POWER_DOMAIN_AUX_I:
+ return "AUX_I";
case POWER_DOMAIN_AUX_IO_A:
return "AUX_IO_A";
- case POWER_DOMAIN_AUX_TBT1:
- return "AUX_TBT1";
- case POWER_DOMAIN_AUX_TBT2:
- return "AUX_TBT2";
- case POWER_DOMAIN_AUX_TBT3:
- return "AUX_TBT3";
- case POWER_DOMAIN_AUX_TBT4:
- return "AUX_TBT4";
+ case POWER_DOMAIN_AUX_C_TBT:
+ return "AUX_C_TBT";
+ case POWER_DOMAIN_AUX_D_TBT:
+ return "AUX_D_TBT";
+ case POWER_DOMAIN_AUX_E_TBT:
+ return "AUX_E_TBT";
+ case POWER_DOMAIN_AUX_F_TBT:
+ return "AUX_F_TBT";
+ case POWER_DOMAIN_AUX_G_TBT:
+ return "AUX_G_TBT";
+ case POWER_DOMAIN_AUX_H_TBT:
+ return "AUX_H_TBT";
+ case POWER_DOMAIN_AUX_I_TBT:
+ return "AUX_I_TBT";
case POWER_DOMAIN_GMBUS:
return "GMBUS";
case POWER_DOMAIN_INIT:
@@ -117,6 +148,8 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
return "MODESET";
case POWER_DOMAIN_GT_IRQ:
return "GT_IRQ";
+ case POWER_DOMAIN_DPLL_DC_OFF:
+ return "DPLL_DC_OFF";
default:
MISSING_CASE(domain);
return "?";
@@ -233,23 +266,8 @@ bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
u8 irq_pipe_mask, bool has_vga)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
-
- /*
- * After we re-enable the power well, if we touch VGA register 0x3d5
- * we'll get unclaimed register interrupts. This stops after we write
- * anything to the VGA MSR register. The vgacon module uses this
- * register all the time, so if we unbind our driver and, as a
- * consequence, bind vgacon, we'll get stuck in an infinite loop at
- * console_unlock(). So make here we touch the VGA MSR register, making
- * sure vgacon can keep working normally without triggering interrupts
- * and error messages.
- */
- if (has_vga) {
- vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
- outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
- vga_put(pdev, VGA_RSRC_LEGACY_IO);
- }
+ if (has_vga)
+ intel_vga_reset_io_mem(dev_priv);
if (irq_pipe_mask)
gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
@@ -269,11 +287,14 @@ static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
int pw_idx = power_well->desc->hsw.idx;
/* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
- WARN_ON(intel_wait_for_register(&dev_priv->uncore,
- regs->driver,
- HSW_PWR_WELL_CTL_STATE(pw_idx),
- HSW_PWR_WELL_CTL_STATE(pw_idx),
- 1));
+ if (intel_de_wait_for_set(dev_priv, regs->driver,
+ HSW_PWR_WELL_CTL_STATE(pw_idx), 1)) {
+ DRM_DEBUG_KMS("%s power well enable timeout\n",
+ power_well->desc->name);
+
+ /* An AUX timeout is expected if the TBT DP tunnel is down. */
+ WARN_ON(!power_well->desc->hsw.is_tc_tbt);
+ }
}
static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
@@ -324,9 +345,8 @@ static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
enum skl_power_gate pg)
{
/* Timeout 5us for PG#0, for other PGs 1us */
- WARN_ON(intel_wait_for_register(&dev_priv->uncore, SKL_FUSE_STATUS,
- SKL_FUSE_PG_DIST_STATUS(pg),
- SKL_FUSE_PG_DIST_STATUS(pg), 1));
+ WARN_ON(intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS,
+ SKL_FUSE_PG_DIST_STATUS(pg), 1));
}
static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
@@ -388,7 +408,7 @@ static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
hsw_wait_for_power_well_disable(dev_priv, power_well);
}
-#define ICL_AUX_PW_TO_PORT(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
+#define ICL_AUX_PW_TO_PHY(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
static void
icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
@@ -396,21 +416,24 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
{
const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
int pw_idx = power_well->desc->hsw.idx;
- enum port port = ICL_AUX_PW_TO_PORT(pw_idx);
+ enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);
u32 val;
+ WARN_ON(!IS_ICELAKE(dev_priv));
+
val = I915_READ(regs->driver);
I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
- val = I915_READ(ICL_PORT_CL_DW12(port));
- I915_WRITE(ICL_PORT_CL_DW12(port), val | ICL_LANE_ENABLE_AUX);
+ if (INTEL_GEN(dev_priv) < 12) {
+ val = I915_READ(ICL_PORT_CL_DW12(phy));
+ I915_WRITE(ICL_PORT_CL_DW12(phy), val | ICL_LANE_ENABLE_AUX);
+ }
hsw_wait_for_power_well_enable(dev_priv, power_well);
/* Display WA #1178: icl */
- if (IS_ICELAKE(dev_priv) &&
- pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
- !intel_bios_is_port_edp(dev_priv, port)) {
+ if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
+ !intel_bios_is_port_edp(dev_priv, (enum port)phy)) {
val = I915_READ(ICL_AUX_ANAOVRD1(pw_idx));
val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
I915_WRITE(ICL_AUX_ANAOVRD1(pw_idx), val);
@@ -423,11 +446,13 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
{
const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
int pw_idx = power_well->desc->hsw.idx;
- enum port port = ICL_AUX_PW_TO_PORT(pw_idx);
+ enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);
u32 val;
- val = I915_READ(ICL_PORT_CL_DW12(port));
- I915_WRITE(ICL_PORT_CL_DW12(port), val & ~ICL_LANE_ENABLE_AUX);
+ WARN_ON(!IS_ICELAKE(dev_priv));
+
+ val = I915_READ(ICL_PORT_CL_DW12(phy));
+ I915_WRITE(ICL_PORT_CL_DW12(phy), val & ~ICL_LANE_ENABLE_AUX);
val = I915_READ(regs->driver);
I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
@@ -441,24 +466,119 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
#define ICL_TBT_AUX_PW_TO_CH(pw_idx) \
((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C)
+static enum aux_ch icl_tc_phy_aux_ch(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ int pw_idx = power_well->desc->hsw.idx;
+
+ return power_well->desc->hsw.is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) :
+ ICL_AUX_PW_TO_CH(pw_idx);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+
+static u64 async_put_domains_mask(struct i915_power_domains *power_domains);
+
+static int power_well_async_ref_count(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ int refs = hweight64(power_well->desc->domains &
+ async_put_domains_mask(&dev_priv->power_domains));
+
+ WARN_ON(refs > power_well->count);
+
+ return refs;
+}
+
+static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
+ struct intel_digital_port *dig_port = NULL;
+ struct intel_encoder *encoder;
+
+ /* Bypass the check if all references are released asynchronously */
+ if (power_well_async_ref_count(dev_priv, power_well) ==
+ power_well->count)
+ return;
+
+ aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
+
+ for_each_intel_encoder(&dev_priv->drm, encoder) {
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+
+ if (!intel_phy_is_tc(dev_priv, phy))
+ continue;
+
+ /* We'll check the MST primary port */
+ if (encoder->type == INTEL_OUTPUT_DP_MST)
+ continue;
+
+ dig_port = enc_to_dig_port(encoder);
+ if (WARN_ON(!dig_port))
+ continue;
+
+ if (dig_port->aux_ch != aux_ch) {
+ dig_port = NULL;
+ continue;
+ }
+
+ break;
+ }
+
+ if (WARN_ON(!dig_port))
+ return;
+
+ WARN_ON(!intel_tc_port_ref_held(dig_port));
+}
+
+#else
+
+static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+}
+
+#endif
+
+#define TGL_AUX_PW_TO_TC_PORT(pw_idx) ((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1)
+
static void
icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- int pw_idx = power_well->desc->hsw.idx;
- bool is_tbt = power_well->desc->hsw.is_tc_tbt;
- enum aux_ch aux_ch;
+ enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
u32 val;
- aux_ch = is_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) :
- ICL_AUX_PW_TO_CH(pw_idx);
+ icl_tc_port_assert_ref_held(dev_priv, power_well);
+
val = I915_READ(DP_AUX_CH_CTL(aux_ch));
val &= ~DP_AUX_CH_CTL_TBT_IO;
- if (is_tbt)
+ if (power_well->desc->hsw.is_tc_tbt)
val |= DP_AUX_CH_CTL_TBT_IO;
I915_WRITE(DP_AUX_CH_CTL(aux_ch), val);
hsw_power_well_enable(dev_priv, power_well);
+
+ if (INTEL_GEN(dev_priv) >= 12 && !power_well->desc->hsw.is_tc_tbt) {
+ enum tc_port tc_port;
+
+ tc_port = TGL_AUX_PW_TO_TC_PORT(power_well->desc->hsw.idx);
+ I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x2));
+
+ if (intel_de_wait_for_set(dev_priv, DKL_CMN_UC_DW_27(tc_port),
+ DKL_CMN_UC_DW27_UC_HEALTH, 1))
+ DRM_WARN("Timeout waiting TC uC health\n");
+ }
+}
+
+static void
+icl_tc_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ icl_tc_port_assert_ref_held(dev_priv, power_well);
+
+ hsw_power_well_disable(dev_priv, power_well);
}
/*
@@ -570,7 +690,11 @@ static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
u32 mask;
mask = DC_STATE_EN_UPTO_DC5;
- if (INTEL_GEN(dev_priv) >= 11)
+
+ if (INTEL_GEN(dev_priv) >= 12)
+ mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6
+ | DC_STATE_EN_DC9;
+ else if (IS_GEN(dev_priv, 11))
mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
else if (IS_GEN9_LP(dev_priv))
mask |= DC_STATE_EN_DC9;
@@ -580,7 +704,7 @@ static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
return mask;
}
-void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
+static void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
{
u32 val;
@@ -640,7 +764,53 @@ static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
dev_priv->csr.dc_state = val & mask;
}
-void bxt_enable_dc9(struct drm_i915_private *dev_priv)
+static u32
+sanitize_target_dc_state(struct drm_i915_private *dev_priv,
+ u32 target_dc_state)
+{
+ u32 states[] = {
+ DC_STATE_EN_UPTO_DC6,
+ DC_STATE_EN_UPTO_DC5,
+ DC_STATE_EN_DC3CO,
+ DC_STATE_DISABLE,
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
+ if (target_dc_state != states[i])
+ continue;
+
+ if (dev_priv->csr.allowed_dc_mask & target_dc_state)
+ break;
+
+ target_dc_state = states[i + 1];
+ }
+
+ return target_dc_state;
+}
+
+static void tgl_enable_dc3co(struct drm_i915_private *dev_priv)
+{
+ DRM_DEBUG_KMS("Enabling DC3CO\n");
+ gen9_set_dc_state(dev_priv, DC_STATE_EN_DC3CO);
+}
+
+static void tgl_disable_dc3co(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ DRM_DEBUG_KMS("Disabling DC3CO\n");
+ val = I915_READ(DC_STATE_EN);
+ val &= ~DC_STATE_DC3CO_STATUS;
+ I915_WRITE(DC_STATE_EN, val);
+ gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+ /*
+ * Delay of 200us DC3CO Exit time B.Spec 49196
+ */
+ usleep_range(200, 210);
+}
+
+static void bxt_enable_dc9(struct drm_i915_private *dev_priv)
{
assert_can_enable_dc9(dev_priv);
@@ -655,7 +825,7 @@ void bxt_enable_dc9(struct drm_i915_private *dev_priv)
gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
}
-void bxt_disable_dc9(struct drm_i915_private *dev_priv)
+static void bxt_disable_dc9(struct drm_i915_private *dev_priv)
{
assert_can_disable_dc9(dev_priv);
@@ -695,6 +865,51 @@ lookup_power_well(struct drm_i915_private *dev_priv,
return &dev_priv->power_domains.power_wells[0];
}
+/**
+ * intel_display_power_set_target_dc_state - Set target dc state.
+ * @dev_priv: i915 device
+ * @state: state which needs to be set as target_dc_state.
+ *
+ * This function set the "DC off" power well target_dc_state,
+ * based upon this target_dc_stste, "DC off" power well will
+ * enable desired DC state.
+ */
+void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
+ u32 state)
+{
+ struct i915_power_well *power_well;
+ bool dc_off_enabled;
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+
+ mutex_lock(&power_domains->lock);
+ power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF);
+
+ if (WARN_ON(!power_well))
+ goto unlock;
+
+ state = sanitize_target_dc_state(dev_priv, state);
+
+ if (state == dev_priv->csr.target_dc_state)
+ goto unlock;
+
+ dc_off_enabled = power_well->desc->ops->is_enabled(dev_priv,
+ power_well);
+ /*
+ * If DC off power well is disabled, need to enable and disable the
+ * DC off power well to effect target DC state.
+ */
+ if (!dc_off_enabled)
+ power_well->desc->ops->enable(dev_priv, power_well);
+
+ dev_priv->csr.target_dc_state = state;
+
+ if (!dc_off_enabled)
+ power_well->desc->ops->disable(dev_priv, power_well);
+
+unlock:
+ mutex_unlock(&power_domains->lock);
+}
+
static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
{
bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
@@ -709,7 +924,7 @@ static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
assert_csr_loaded(dev_priv);
}
-void gen9_enable_dc5(struct drm_i915_private *dev_priv)
+static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
{
assert_can_enable_dc5(dev_priv);
@@ -733,7 +948,7 @@ static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
assert_csr_loaded(dev_priv);
}
-void skl_enable_dc6(struct drm_i915_private *dev_priv)
+static void skl_enable_dc6(struct drm_i915_private *dev_priv)
{
assert_can_enable_dc6(dev_priv);
@@ -807,7 +1022,8 @@ static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0;
+ return ((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 &&
+ (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0);
}
static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
@@ -819,11 +1035,15 @@ static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
"Unexpected DBuf power power state (0x%08x)\n", tmp);
}
-static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
+static void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
{
struct intel_cdclk_state cdclk_state = {};
+ if (dev_priv->csr.target_dc_state == DC_STATE_EN_DC3CO) {
+ tgl_disable_dc3co(dev_priv);
+ return;
+ }
+
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
dev_priv->display.get_cdclk(dev_priv, &cdclk_state);
@@ -844,16 +1064,29 @@ static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
intel_combo_phy_init(dev_priv);
}
+static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ gen9_disable_dc_states(dev_priv);
+}
+
static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
if (!dev_priv->csr.dmc_payload)
return;
- if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
+ switch (dev_priv->csr.target_dc_state) {
+ case DC_STATE_EN_DC3CO:
+ tgl_enable_dc3co(dev_priv);
+ break;
+ case DC_STATE_EN_UPTO_DC6:
skl_enable_dc6(dev_priv);
- else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
+ break;
+ case DC_STATE_EN_UPTO_DC5:
gen9_enable_dc5(dev_priv);
+ break;
+ }
}
static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
@@ -1059,7 +1292,7 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
intel_crt_reset(&encoder->base);
}
- i915_redisable_vga_power_on(dev_priv);
+ intel_vga_redisable_power_on(dev_priv);
intel_pps_unlock_regs_wa(dev_priv);
}
@@ -1071,7 +1304,7 @@ static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
spin_unlock_irq(&dev_priv->irq_lock);
/* make sure we're done processing display irqs */
- synchronize_irq(dev_priv->drm.irq);
+ intel_synchronize_irq(dev_priv);
intel_power_sequencer_reset(dev_priv);
@@ -1232,11 +1465,8 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
* The PHY may be busy with some initial calibration and whatnot,
* so the power state can take a while to actually change.
*/
- if (intel_wait_for_register(&dev_priv->uncore,
- DISPLAY_PHY_STATUS,
- phy_status_mask,
- phy_status,
- 10))
+ if (intel_de_wait_for_register(dev_priv, DISPLAY_PHY_STATUS,
+ phy_status_mask, phy_status, 10))
DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask,
phy_status, dev_priv->chv_phy_control);
@@ -1267,11 +1497,8 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
vlv_set_power_well(dev_priv, power_well, true);
/* Poll for phypwrgood signal */
- if (intel_wait_for_register(&dev_priv->uncore,
- DISPLAY_PHY_STATUS,
- PHY_POWERGOOD(phy),
- PHY_POWERGOOD(phy),
- 1))
+ if (intel_de_wait_for_set(dev_priv, DISPLAY_PHY_STATUS,
+ PHY_POWERGOOD(phy), 1))
DRM_ERROR("Display PHY %d is not power up\n", phy);
vlv_dpio_get(dev_priv);
@@ -1437,8 +1664,8 @@ void chv_phy_powergate_lanes(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct i915_power_domains *power_domains = &dev_priv->power_domains;
- enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base));
- enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
+ enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(encoder));
+ enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(encoder));
mutex_lock(&power_domains->lock);
@@ -2332,24 +2559,19 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) | \
BIT_ULL(POWER_DOMAIN_AUX_B) | \
BIT_ULL(POWER_DOMAIN_AUX_C) | \
BIT_ULL(POWER_DOMAIN_AUX_D) | \
BIT_ULL(POWER_DOMAIN_AUX_E) | \
BIT_ULL(POWER_DOMAIN_AUX_F) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \
+ BIT_ULL(POWER_DOMAIN_AUX_C_TBT) | \
+ BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \
+ BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \
+ BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \
BIT_ULL(POWER_DOMAIN_VGA) | \
BIT_ULL(POWER_DOMAIN_AUDIO) | \
BIT_ULL(POWER_DOMAIN_INIT))
@@ -2359,7 +2581,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
*/
#define ICL_PW_2_POWER_DOMAINS ( \
ICL_PW_3_POWER_DOMAINS | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_EDP_VDSC) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \
BIT_ULL(POWER_DOMAIN_INIT))
/*
* - KVMR (HW control)
@@ -2368,6 +2590,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
ICL_PW_2_POWER_DOMAINS | \
BIT_ULL(POWER_DOMAIN_MODESET) | \
BIT_ULL(POWER_DOMAIN_AUX_A) | \
+ BIT_ULL(POWER_DOMAIN_DPLL_DC_OFF) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define ICL_DDI_IO_A_POWER_DOMAINS ( \
@@ -2388,22 +2611,120 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT_ULL(POWER_DOMAIN_AUX_A))
#define ICL_AUX_B_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_B))
-#define ICL_AUX_C_IO_POWER_DOMAINS ( \
+#define ICL_AUX_C_TC1_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_C))
+#define ICL_AUX_D_TC2_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_D))
+#define ICL_AUX_E_TC3_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_E))
+#define ICL_AUX_F_TC4_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_F))
+#define ICL_AUX_C_TBT1_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_C_TBT))
+#define ICL_AUX_D_TBT2_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_D_TBT))
+#define ICL_AUX_E_TBT3_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_E_TBT))
+#define ICL_AUX_F_TBT4_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_F_TBT))
+
+#define TGL_PW_5_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PIPE_D) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_D) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+#define TGL_PW_4_POWER_DOMAINS ( \
+ TGL_PW_5_POWER_DOMAINS | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+#define TGL_PW_3_POWER_DOMAINS ( \
+ TGL_PW_4_POWER_DOMAINS | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_G_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_H_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_I_LANES) | \
+ BIT_ULL(POWER_DOMAIN_AUX_D) | \
+ BIT_ULL(POWER_DOMAIN_AUX_E) | \
+ BIT_ULL(POWER_DOMAIN_AUX_F) | \
+ BIT_ULL(POWER_DOMAIN_AUX_G) | \
+ BIT_ULL(POWER_DOMAIN_AUX_H) | \
+ BIT_ULL(POWER_DOMAIN_AUX_I) | \
+ BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \
+ BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \
+ BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \
+ BIT_ULL(POWER_DOMAIN_AUX_G_TBT) | \
+ BIT_ULL(POWER_DOMAIN_AUX_H_TBT) | \
+ BIT_ULL(POWER_DOMAIN_AUX_I_TBT) | \
+ BIT_ULL(POWER_DOMAIN_VGA) | \
+ BIT_ULL(POWER_DOMAIN_AUDIO) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+#define TGL_PW_2_POWER_DOMAINS ( \
+ TGL_PW_3_POWER_DOMAINS | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+#define TGL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
+ TGL_PW_2_POWER_DOMAINS | \
+ BIT_ULL(POWER_DOMAIN_MODESET) | \
+ BIT_ULL(POWER_DOMAIN_AUX_A) | \
+ BIT_ULL(POWER_DOMAIN_AUX_B) | \
+ BIT_ULL(POWER_DOMAIN_AUX_C) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+#define TGL_DDI_IO_D_TC1_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
+#define TGL_DDI_IO_E_TC2_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
+#define TGL_DDI_IO_F_TC3_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
+#define TGL_DDI_IO_G_TC4_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_G_IO))
+#define TGL_DDI_IO_H_TC5_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_H_IO))
+#define TGL_DDI_IO_I_TC6_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_I_IO))
+
+#define TGL_AUX_A_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
+ BIT_ULL(POWER_DOMAIN_AUX_A))
+#define TGL_AUX_B_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_B))
+#define TGL_AUX_C_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_C))
-#define ICL_AUX_D_IO_POWER_DOMAINS ( \
+#define TGL_AUX_D_TC1_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_D))
-#define ICL_AUX_E_IO_POWER_DOMAINS ( \
+#define TGL_AUX_E_TC2_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_E))
-#define ICL_AUX_F_IO_POWER_DOMAINS ( \
+#define TGL_AUX_F_TC3_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_F))
-#define ICL_AUX_TBT1_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_TBT1))
-#define ICL_AUX_TBT2_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_TBT2))
-#define ICL_AUX_TBT3_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_TBT3))
-#define ICL_AUX_TBT4_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_TBT4))
+#define TGL_AUX_G_TC4_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_G))
+#define TGL_AUX_H_TC5_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_H))
+#define TGL_AUX_I_TC6_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_I))
+#define TGL_AUX_D_TBT1_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_D_TBT))
+#define TGL_AUX_E_TBT2_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_E_TBT))
+#define TGL_AUX_F_TBT3_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_F_TBT))
+#define TGL_AUX_G_TBT4_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_G_TBT))
+#define TGL_AUX_H_TBT5_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_H_TBT))
+#define TGL_AUX_I_TBT6_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_I_TBT))
static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
.sync_hw = i9xx_power_well_sync_hw_noop,
@@ -2715,7 +3036,7 @@ static const struct i915_power_well_desc skl_power_wells[] = {
.name = "DC off",
.domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
.ops = &gen9_dc_off_power_well_ops,
- .id = DISP_PW_ID_NONE,
+ .id = SKL_DISP_DC_OFF,
},
{
.name = "power well 2",
@@ -2797,7 +3118,7 @@ static const struct i915_power_well_desc bxt_power_wells[] = {
.name = "DC off",
.domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
.ops = &gen9_dc_off_power_well_ops,
- .id = DISP_PW_ID_NONE,
+ .id = SKL_DISP_DC_OFF,
},
{
.name = "power well 2",
@@ -2857,7 +3178,7 @@ static const struct i915_power_well_desc glk_power_wells[] = {
.name = "DC off",
.domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
.ops = &gen9_dc_off_power_well_ops,
- .id = DISP_PW_ID_NONE,
+ .id = SKL_DISP_DC_OFF,
},
{
.name = "power well 2",
@@ -3026,7 +3347,7 @@ static const struct i915_power_well_desc cnl_power_wells[] = {
.name = "DC off",
.domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS,
.ops = &gen9_dc_off_power_well_ops,
- .id = DISP_PW_ID_NONE,
+ .id = SKL_DISP_DC_OFF,
},
{
.name = "power well 2",
@@ -3113,7 +3434,7 @@ static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = {
static const struct i915_power_well_ops icl_tc_phy_aux_power_well_ops = {
.sync_hw = hsw_power_well_sync_hw,
.enable = icl_tc_phy_aux_power_well_enable,
- .disable = hsw_power_well_disable,
+ .disable = icl_tc_phy_aux_power_well_disable,
.is_enabled = hsw_power_well_enabled,
};
@@ -3154,7 +3475,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
.name = "DC off",
.domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
.ops = &gen9_dc_off_power_well_ops,
- .id = DISP_PW_ID_NONE,
+ .id = SKL_DISP_DC_OFF,
},
{
.name = "power well 2",
@@ -3261,8 +3582,8 @@ static const struct i915_power_well_desc icl_power_wells[] = {
},
},
{
- .name = "AUX C",
- .domains = ICL_AUX_C_IO_POWER_DOMAINS,
+ .name = "AUX C TC1",
+ .domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3272,8 +3593,8 @@ static const struct i915_power_well_desc icl_power_wells[] = {
},
},
{
- .name = "AUX D",
- .domains = ICL_AUX_D_IO_POWER_DOMAINS,
+ .name = "AUX D TC2",
+ .domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3283,8 +3604,8 @@ static const struct i915_power_well_desc icl_power_wells[] = {
},
},
{
- .name = "AUX E",
- .domains = ICL_AUX_E_IO_POWER_DOMAINS,
+ .name = "AUX E TC3",
+ .domains = ICL_AUX_E_TC3_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3294,8 +3615,8 @@ static const struct i915_power_well_desc icl_power_wells[] = {
},
},
{
- .name = "AUX F",
- .domains = ICL_AUX_F_IO_POWER_DOMAINS,
+ .name = "AUX F TC4",
+ .domains = ICL_AUX_F_TC4_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3305,8 +3626,8 @@ static const struct i915_power_well_desc icl_power_wells[] = {
},
},
{
- .name = "AUX TBT1",
- .domains = ICL_AUX_TBT1_IO_POWER_DOMAINS,
+ .name = "AUX C TBT1",
+ .domains = ICL_AUX_C_TBT1_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3316,8 +3637,8 @@ static const struct i915_power_well_desc icl_power_wells[] = {
},
},
{
- .name = "AUX TBT2",
- .domains = ICL_AUX_TBT2_IO_POWER_DOMAINS,
+ .name = "AUX D TBT2",
+ .domains = ICL_AUX_D_TBT2_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3327,8 +3648,8 @@ static const struct i915_power_well_desc icl_power_wells[] = {
},
},
{
- .name = "AUX TBT3",
- .domains = ICL_AUX_TBT3_IO_POWER_DOMAINS,
+ .name = "AUX E TBT3",
+ .domains = ICL_AUX_E_TBT3_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3338,8 +3659,8 @@ static const struct i915_power_well_desc icl_power_wells[] = {
},
},
{
- .name = "AUX TBT4",
- .domains = ICL_AUX_TBT4_IO_POWER_DOMAINS,
+ .name = "AUX F TBT4",
+ .domains = ICL_AUX_F_TBT4_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3362,6 +3683,480 @@ static const struct i915_power_well_desc icl_power_wells[] = {
},
};
+static const struct i915_power_well_desc ehl_power_wells[] = {
+ {
+ .name = "always-on",
+ .always_on = true,
+ .domains = POWER_DOMAIN_MASK,
+ .ops = &i9xx_always_on_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ },
+ {
+ .name = "power well 1",
+ /* Handled by the DMC firmware */
+ .always_on = true,
+ .domains = 0,
+ .ops = &hsw_power_well_ops,
+ .id = SKL_DISP_PW_1,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_1,
+ .hsw.has_fuses = true,
+ },
+ },
+ {
+ .name = "DC off",
+ .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
+ .ops = &gen9_dc_off_power_well_ops,
+ .id = SKL_DISP_DC_OFF,
+ },
+ {
+ .name = "power well 2",
+ .domains = ICL_PW_2_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = SKL_DISP_PW_2,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_2,
+ .hsw.has_fuses = true,
+ },
+ },
+ {
+ .name = "power well 3",
+ .domains = ICL_PW_3_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_3,
+ .hsw.irq_pipe_mask = BIT(PIPE_B),
+ .hsw.has_vga = true,
+ .hsw.has_fuses = true,
+ },
+ },
+ {
+ .name = "DDI A IO",
+ .domains = ICL_DDI_IO_A_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
+ },
+ },
+ {
+ .name = "DDI B IO",
+ .domains = ICL_DDI_IO_B_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
+ },
+ },
+ {
+ .name = "DDI C IO",
+ .domains = ICL_DDI_IO_C_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_DDI_C,
+ },
+ },
+ {
+ .name = "DDI D IO",
+ .domains = ICL_DDI_IO_D_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_DDI_D,
+ },
+ },
+ {
+ .name = "AUX A",
+ .domains = ICL_AUX_A_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
+ },
+ },
+ {
+ .name = "AUX B",
+ .domains = ICL_AUX_B_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
+ },
+ },
+ {
+ .name = "AUX C",
+ .domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_AUX_C,
+ },
+ },
+ {
+ .name = "AUX D",
+ .domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_AUX_D,
+ },
+ },
+ {
+ .name = "power well 4",
+ .domains = ICL_PW_4_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_4,
+ .hsw.has_fuses = true,
+ .hsw.irq_pipe_mask = BIT(PIPE_C),
+ },
+ },
+};
+
+static const struct i915_power_well_desc tgl_power_wells[] = {
+ {
+ .name = "always-on",
+ .always_on = true,
+ .domains = POWER_DOMAIN_MASK,
+ .ops = &i9xx_always_on_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ },
+ {
+ .name = "power well 1",
+ /* Handled by the DMC firmware */
+ .always_on = true,
+ .domains = 0,
+ .ops = &hsw_power_well_ops,
+ .id = SKL_DISP_PW_1,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_1,
+ .hsw.has_fuses = true,
+ },
+ },
+ {
+ .name = "DC off",
+ .domains = TGL_DISPLAY_DC_OFF_POWER_DOMAINS,
+ .ops = &gen9_dc_off_power_well_ops,
+ .id = SKL_DISP_DC_OFF,
+ },
+ {
+ .name = "power well 2",
+ .domains = TGL_PW_2_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = SKL_DISP_PW_2,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_2,
+ .hsw.has_fuses = true,
+ },
+ },
+ {
+ .name = "power well 3",
+ .domains = TGL_PW_3_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_3,
+ .hsw.irq_pipe_mask = BIT(PIPE_B),
+ .hsw.has_vga = true,
+ .hsw.has_fuses = true,
+ },
+ },
+ {
+ .name = "DDI A IO",
+ .domains = ICL_DDI_IO_A_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
+ }
+ },
+ {
+ .name = "DDI B IO",
+ .domains = ICL_DDI_IO_B_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
+ }
+ },
+ {
+ .name = "DDI C IO",
+ .domains = ICL_DDI_IO_C_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_DDI_C,
+ }
+ },
+ {
+ .name = "DDI D TC1 IO",
+ .domains = TGL_DDI_IO_D_TC1_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1,
+ },
+ },
+ {
+ .name = "DDI E TC2 IO",
+ .domains = TGL_DDI_IO_E_TC2_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2,
+ },
+ },
+ {
+ .name = "DDI F TC3 IO",
+ .domains = TGL_DDI_IO_F_TC3_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3,
+ },
+ },
+ {
+ .name = "DDI G TC4 IO",
+ .domains = TGL_DDI_IO_G_TC4_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4,
+ },
+ },
+ {
+ .name = "DDI H TC5 IO",
+ .domains = TGL_DDI_IO_H_TC5_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_DDI_TC5,
+ },
+ },
+ {
+ .name = "DDI I TC6 IO",
+ .domains = TGL_DDI_IO_I_TC6_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_DDI_TC6,
+ },
+ },
+ {
+ .name = "AUX A",
+ .domains = TGL_AUX_A_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
+ },
+ },
+ {
+ .name = "AUX B",
+ .domains = TGL_AUX_B_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
+ },
+ },
+ {
+ .name = "AUX C",
+ .domains = TGL_AUX_C_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_AUX_C,
+ },
+ },
+ {
+ .name = "AUX D TC1",
+ .domains = TGL_AUX_D_TC1_IO_POWER_DOMAINS,
+ .ops = &icl_tc_phy_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1,
+ .hsw.is_tc_tbt = false,
+ },
+ },
+ {
+ .name = "AUX E TC2",
+ .domains = TGL_AUX_E_TC2_IO_POWER_DOMAINS,
+ .ops = &icl_tc_phy_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2,
+ .hsw.is_tc_tbt = false,
+ },
+ },
+ {
+ .name = "AUX F TC3",
+ .domains = TGL_AUX_F_TC3_IO_POWER_DOMAINS,
+ .ops = &icl_tc_phy_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3,
+ .hsw.is_tc_tbt = false,
+ },
+ },
+ {
+ .name = "AUX G TC4",
+ .domains = TGL_AUX_G_TC4_IO_POWER_DOMAINS,
+ .ops = &icl_tc_phy_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4,
+ .hsw.is_tc_tbt = false,
+ },
+ },
+ {
+ .name = "AUX H TC5",
+ .domains = TGL_AUX_H_TC5_IO_POWER_DOMAINS,
+ .ops = &icl_tc_phy_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_AUX_TC5,
+ .hsw.is_tc_tbt = false,
+ },
+ },
+ {
+ .name = "AUX I TC6",
+ .domains = TGL_AUX_I_TC6_IO_POWER_DOMAINS,
+ .ops = &icl_tc_phy_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_AUX_TC6,
+ .hsw.is_tc_tbt = false,
+ },
+ },
+ {
+ .name = "AUX D TBT1",
+ .domains = TGL_AUX_D_TBT1_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1,
+ .hsw.is_tc_tbt = true,
+ },
+ },
+ {
+ .name = "AUX E TBT2",
+ .domains = TGL_AUX_E_TBT2_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2,
+ .hsw.is_tc_tbt = true,
+ },
+ },
+ {
+ .name = "AUX F TBT3",
+ .domains = TGL_AUX_F_TBT3_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3,
+ .hsw.is_tc_tbt = true,
+ },
+ },
+ {
+ .name = "AUX G TBT4",
+ .domains = TGL_AUX_G_TBT4_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4,
+ .hsw.is_tc_tbt = true,
+ },
+ },
+ {
+ .name = "AUX H TBT5",
+ .domains = TGL_AUX_H_TBT5_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT5,
+ .hsw.is_tc_tbt = true,
+ },
+ },
+ {
+ .name = "AUX I TBT6",
+ .domains = TGL_AUX_I_TBT6_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT6,
+ .hsw.is_tc_tbt = true,
+ },
+ },
+ {
+ .name = "power well 4",
+ .domains = TGL_PW_4_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_4,
+ .hsw.has_fuses = true,
+ .hsw.irq_pipe_mask = BIT(PIPE_C),
+ }
+ },
+ {
+ .name = "power well 5",
+ .domains = TGL_PW_5_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_PW_5,
+ .hsw.has_fuses = true,
+ .hsw.irq_pipe_mask = BIT(PIPE_D),
+ },
+ },
+};
+
static int
sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
int disable_power_well)
@@ -3379,14 +4174,17 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
int requested_dc;
int max_dc;
- if (INTEL_GEN(dev_priv) >= 11) {
- max_dc = 2;
+ if (INTEL_GEN(dev_priv) >= 12) {
+ max_dc = 4;
/*
* DC9 has a separate HW flow from the rest of the DC states,
* not depending on the DMC firmware. It's needed by system
* suspend/resume, so allow it unconditionally.
*/
mask = DC_STATE_EN_DC9;
+ } else if (IS_GEN(dev_priv, 11)) {
+ max_dc = 2;
+ mask = DC_STATE_EN_DC9;
} else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv)) {
max_dc = 2;
mask = 0;
@@ -3405,7 +4203,7 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
requested_dc = enable_dc;
} else if (enable_dc == -1) {
requested_dc = max_dc;
- } else if (enable_dc > max_dc && enable_dc <= 2) {
+ } else if (enable_dc > max_dc && enable_dc <= 4) {
DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n",
enable_dc, max_dc);
requested_dc = max_dc;
@@ -3414,10 +4212,20 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
requested_dc = max_dc;
}
- if (requested_dc > 1)
+ switch (requested_dc) {
+ case 4:
+ mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6;
+ break;
+ case 3:
+ mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC5;
+ break;
+ case 2:
mask |= DC_STATE_EN_UPTO_DC6;
- if (requested_dc > 0)
+ break;
+ case 1:
mask |= DC_STATE_EN_UPTO_DC5;
+ break;
+ }
DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask);
@@ -3478,6 +4286,9 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
dev_priv->csr.allowed_dc_mask =
get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc);
+ dev_priv->csr.target_dc_state =
+ sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
+
BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
mutex_init(&power_domains->lock);
@@ -3489,7 +4300,11 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
* The enabling order will be from lower to higher indexed wells,
* the disabling order is reversed.
*/
- if (IS_GEN(dev_priv, 11)) {
+ if (IS_GEN(dev_priv, 12)) {
+ err = set_power_wells(power_domains, tgl_power_wells);
+ } else if (IS_ELKHARTLAKE(dev_priv)) {
+ err = set_power_wells(power_domains, ehl_power_wells);
+ } else if (IS_GEN(dev_priv, 11)) {
err = set_power_wells(power_domains, icl_power_wells);
} else if (IS_CANNONLAKE(dev_priv)) {
err = set_power_wells(power_domains, cnl_power_wells);
@@ -3773,8 +4588,7 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
I915_WRITE(LCPLL_CTL, val);
POSTING_READ(LCPLL_CTL);
- if (intel_wait_for_register(&dev_priv->uncore, LCPLL_CTL,
- LCPLL_PLL_LOCK, 0, 1))
+ if (intel_de_wait_for_clear(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 1))
DRM_ERROR("LCPLL still locked\n");
val = hsw_read_dcomp(dev_priv);
@@ -3829,8 +4643,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
val &= ~LCPLL_PLL_DISABLE;
I915_WRITE(LCPLL_CTL, val);
- if (intel_wait_for_register(&dev_priv->uncore, LCPLL_CTL,
- LCPLL_PLL_LOCK, LCPLL_PLL_LOCK, 5))
+ if (intel_de_wait_for_set(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 5))
DRM_ERROR("LCPLL not locked yet\n");
if (val & LCPLL_CD_SOURCE_FCLK) {
@@ -3872,7 +4685,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
* For more, read "Display Sequences for Package C8" on the hardware
* documentation.
*/
-void hsw_enable_pc8(struct drm_i915_private *dev_priv)
+static void hsw_enable_pc8(struct drm_i915_private *dev_priv)
{
u32 val;
@@ -3888,7 +4701,7 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv)
hsw_disable_lcpll(dev_priv, true, true);
}
-void hsw_disable_pc8(struct drm_i915_private *dev_priv)
+static void hsw_disable_pc8(struct drm_i915_private *dev_priv)
{
u32 val;
@@ -3963,7 +4776,7 @@ static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *well;
- gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+ gen9_disable_dc_states(dev_priv);
gen9_dbuf_disable(dev_priv);
@@ -3988,8 +4801,7 @@ static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
usleep_range(10, 30); /* 10 us delay per Bspec */
}
-void bxt_display_core_init(struct drm_i915_private *dev_priv,
- bool resume)
+static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *well;
@@ -4020,12 +4832,12 @@ void bxt_display_core_init(struct drm_i915_private *dev_priv,
intel_csr_load_program(dev_priv);
}
-void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
+static void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *well;
- gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+ gen9_disable_dc_states(dev_priv);
gen9_dbuf_disable(dev_priv);
@@ -4085,7 +4897,7 @@ static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *well;
- gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+ gen9_disable_dc_states(dev_priv);
/* 1. Disable all display engine functions -> aready done */
@@ -4111,8 +4923,58 @@ static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
intel_combo_phy_uninit(dev_priv);
}
-void icl_display_core_init(struct drm_i915_private *dev_priv,
- bool resume)
+struct buddy_page_mask {
+ u32 page_mask;
+ u8 type;
+ u8 num_channels;
+};
+
+static const struct buddy_page_mask tgl_buddy_page_masks[] = {
+ { .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0xE },
+ { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0xF },
+ { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C },
+ { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x1F },
+ {}
+};
+
+static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = {
+ { .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1 },
+ { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0x1 },
+ { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x3 },
+ { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x3 },
+ {}
+};
+
+static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
+{
+ enum intel_dram_type type = dev_priv->dram_info.type;
+ u8 num_channels = dev_priv->dram_info.num_channels;
+ const struct buddy_page_mask *table;
+ int i;
+
+ if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0))
+ /* Wa_1409767108: tgl */
+ table = wa_1409767108_buddy_page_masks;
+ else
+ table = tgl_buddy_page_masks;
+
+ for (i = 0; table[i].page_mask != 0; i++)
+ if (table[i].num_channels == num_channels &&
+ table[i].type == type)
+ break;
+
+ if (table[i].page_mask == 0) {
+ DRM_DEBUG_DRIVER("Unknown memory configuration; disabling address buddy logic.\n");
+ I915_WRITE(BW_BUDDY1_CTL, BW_BUDDY_DISABLE);
+ I915_WRITE(BW_BUDDY2_CTL, BW_BUDDY_DISABLE);
+ } else {
+ I915_WRITE(BW_BUDDY1_PAGE_MASK, table[i].page_mask);
+ I915_WRITE(BW_BUDDY2_PAGE_MASK, table[i].page_mask);
+ }
+}
+
+static void icl_display_core_init(struct drm_i915_private *dev_priv,
+ bool resume)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *well;
@@ -4143,16 +5005,20 @@ void icl_display_core_init(struct drm_i915_private *dev_priv,
/* 6. Setup MBUS. */
icl_mbus_init(dev_priv);
+ /* 7. Program arbiter BW_BUDDY registers */
+ if (INTEL_GEN(dev_priv) >= 12)
+ tgl_bw_buddy_init(dev_priv);
+
if (resume && dev_priv->csr.dmc_payload)
intel_csr_load_program(dev_priv);
}
-void icl_display_core_uninit(struct drm_i915_private *dev_priv)
+static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *well;
- gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+ gen9_disable_dc_states(dev_priv);
/* 1. Disable all display engine functions -> aready done */
@@ -4337,7 +5203,7 @@ static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
*
* It will return with power domains disabled (to be enabled later by
* intel_power_domains_enable()) and must be paired with
- * intel_power_domains_fini_hw().
+ * intel_power_domains_driver_remove().
*/
void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
{
@@ -4345,6 +5211,9 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
power_domains->initializing = true;
+ /* Must happen before power domain init on VLV/CHV */
+ intel_update_rawclk(i915);
+
if (INTEL_GEN(i915) >= 11) {
icl_display_core_init(i915, resume);
} else if (IS_CANNONLAKE(i915)) {
@@ -4389,7 +5258,7 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
}
/**
- * intel_power_domains_fini_hw - deinitialize hw power domain state
+ * intel_power_domains_driver_remove - deinitialize hw power domain state
* @i915: i915 device instance
*
* De-initializes the display power domain HW state. It also ensures that the
@@ -4399,7 +5268,7 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
* intel_power_domains_disable()) and must be paired with
* intel_power_domains_init_hw().
*/
-void intel_power_domains_fini_hw(struct drm_i915_private *i915)
+void intel_power_domains_driver_remove(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref __maybe_unused =
fetch_and_zero(&i915->power_domains.wakeref);
@@ -4623,3 +5492,58 @@ static void intel_power_domains_verify_state(struct drm_i915_private *i915)
}
#endif
+
+void intel_display_power_suspend_late(struct drm_i915_private *i915)
+{
+ if (INTEL_GEN(i915) >= 11 || IS_GEN9_LP(i915))
+ bxt_enable_dc9(i915);
+ else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
+ hsw_enable_pc8(i915);
+}
+
+void intel_display_power_resume_early(struct drm_i915_private *i915)
+{
+ if (INTEL_GEN(i915) >= 11 || IS_GEN9_LP(i915)) {
+ gen9_sanitize_dc_state(i915);
+ bxt_disable_dc9(i915);
+ } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
+ hsw_disable_pc8(i915);
+ }
+}
+
+void intel_display_power_suspend(struct drm_i915_private *i915)
+{
+ if (INTEL_GEN(i915) >= 11) {
+ icl_display_core_uninit(i915);
+ bxt_enable_dc9(i915);
+ } else if (IS_GEN9_LP(i915)) {
+ bxt_display_core_uninit(i915);
+ bxt_enable_dc9(i915);
+ } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
+ hsw_enable_pc8(i915);
+ }
+}
+
+void intel_display_power_resume(struct drm_i915_private *i915)
+{
+ if (INTEL_GEN(i915) >= 11) {
+ bxt_disable_dc9(i915);
+ icl_display_core_init(i915, true);
+ if (i915->csr.dmc_payload) {
+ if (i915->csr.allowed_dc_mask &
+ DC_STATE_EN_UPTO_DC6)
+ skl_enable_dc6(i915);
+ else if (i915->csr.allowed_dc_mask &
+ DC_STATE_EN_UPTO_DC5)
+ gen9_enable_dc5(i915);
+ }
+ } else if (IS_GEN9_LP(i915)) {
+ bxt_disable_dc9(i915);
+ bxt_display_core_init(i915, true);
+ if (i915->csr.dmc_payload &&
+ (i915->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
+ gen9_enable_dc5(i915);
+ } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
+ hsw_disable_pc8(i915);
+ }
+}
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h
index ff57b0a7fe59..2608a65af7fa 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power.h
@@ -18,14 +18,18 @@ enum intel_display_power_domain {
POWER_DOMAIN_PIPE_A,
POWER_DOMAIN_PIPE_B,
POWER_DOMAIN_PIPE_C,
+ POWER_DOMAIN_PIPE_D,
POWER_DOMAIN_PIPE_A_PANEL_FITTER,
POWER_DOMAIN_PIPE_B_PANEL_FITTER,
POWER_DOMAIN_PIPE_C_PANEL_FITTER,
+ POWER_DOMAIN_PIPE_D_PANEL_FITTER,
POWER_DOMAIN_TRANSCODER_A,
POWER_DOMAIN_TRANSCODER_B,
POWER_DOMAIN_TRANSCODER_C,
+ POWER_DOMAIN_TRANSCODER_D,
POWER_DOMAIN_TRANSCODER_EDP,
- POWER_DOMAIN_TRANSCODER_EDP_VDSC,
+ /* VDSC/joining for eDP/DSI transcoder (ICL) or pipe A (TGL) */
+ POWER_DOMAIN_TRANSCODER_VDSC_PW2,
POWER_DOMAIN_TRANSCODER_DSI_A,
POWER_DOMAIN_TRANSCODER_DSI_C,
POWER_DOMAIN_PORT_DDI_A_LANES,
@@ -34,12 +38,18 @@ enum intel_display_power_domain {
POWER_DOMAIN_PORT_DDI_D_LANES,
POWER_DOMAIN_PORT_DDI_E_LANES,
POWER_DOMAIN_PORT_DDI_F_LANES,
+ POWER_DOMAIN_PORT_DDI_G_LANES,
+ POWER_DOMAIN_PORT_DDI_H_LANES,
+ POWER_DOMAIN_PORT_DDI_I_LANES,
POWER_DOMAIN_PORT_DDI_A_IO,
POWER_DOMAIN_PORT_DDI_B_IO,
POWER_DOMAIN_PORT_DDI_C_IO,
POWER_DOMAIN_PORT_DDI_D_IO,
POWER_DOMAIN_PORT_DDI_E_IO,
POWER_DOMAIN_PORT_DDI_F_IO,
+ POWER_DOMAIN_PORT_DDI_G_IO,
+ POWER_DOMAIN_PORT_DDI_H_IO,
+ POWER_DOMAIN_PORT_DDI_I_IO,
POWER_DOMAIN_PORT_DSI,
POWER_DOMAIN_PORT_CRT,
POWER_DOMAIN_PORT_OTHER,
@@ -51,19 +61,48 @@ enum intel_display_power_domain {
POWER_DOMAIN_AUX_D,
POWER_DOMAIN_AUX_E,
POWER_DOMAIN_AUX_F,
+ POWER_DOMAIN_AUX_G,
+ POWER_DOMAIN_AUX_H,
+ POWER_DOMAIN_AUX_I,
POWER_DOMAIN_AUX_IO_A,
- POWER_DOMAIN_AUX_TBT1,
- POWER_DOMAIN_AUX_TBT2,
- POWER_DOMAIN_AUX_TBT3,
- POWER_DOMAIN_AUX_TBT4,
+ POWER_DOMAIN_AUX_C_TBT,
+ POWER_DOMAIN_AUX_D_TBT,
+ POWER_DOMAIN_AUX_E_TBT,
+ POWER_DOMAIN_AUX_F_TBT,
+ POWER_DOMAIN_AUX_G_TBT,
+ POWER_DOMAIN_AUX_H_TBT,
+ POWER_DOMAIN_AUX_I_TBT,
POWER_DOMAIN_GMBUS,
POWER_DOMAIN_MODESET,
POWER_DOMAIN_GT_IRQ,
+ POWER_DOMAIN_DPLL_DC_OFF,
POWER_DOMAIN_INIT,
POWER_DOMAIN_NUM,
};
+/*
+ * i915_power_well_id:
+ *
+ * IDs used to look up power wells. Power wells accessed directly bypassing
+ * the power domains framework must be assigned a unique ID. The rest of power
+ * wells must be assigned DISP_PW_ID_NONE.
+ */
+enum i915_power_well_id {
+ DISP_PW_ID_NONE,
+
+ VLV_DISP_PW_DISP2D,
+ BXT_DISP_PW_DPIO_CMN_A,
+ VLV_DISP_PW_DPIO_CMN_BC,
+ GLK_DISP_PW_DPIO_CMN_C,
+ CHV_DISP_PW_DPIO_CMN_D,
+ HSW_DISP_PW_GLOBAL,
+ SKL_DISP_PW_MISC_IO,
+ SKL_DISP_PW_1,
+ SKL_DISP_PW_2,
+ SKL_DISP_DC_OFF,
+};
+
#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
@@ -204,27 +243,22 @@ struct i915_power_domains {
for_each_power_well_reverse(__dev_priv, __power_well) \
for_each_if((__power_well)->desc->domains & (__domain_mask))
-void skl_enable_dc6(struct drm_i915_private *dev_priv);
-void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv);
-void bxt_enable_dc9(struct drm_i915_private *dev_priv);
-void bxt_disable_dc9(struct drm_i915_private *dev_priv);
-void gen9_enable_dc5(struct drm_i915_private *dev_priv);
-
int intel_power_domains_init(struct drm_i915_private *dev_priv);
void intel_power_domains_cleanup(struct drm_i915_private *dev_priv);
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume);
-void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv);
-void icl_display_core_init(struct drm_i915_private *dev_priv, bool resume);
-void icl_display_core_uninit(struct drm_i915_private *dev_priv);
+void intel_power_domains_driver_remove(struct drm_i915_private *dev_priv);
void intel_power_domains_enable(struct drm_i915_private *dev_priv);
void intel_power_domains_disable(struct drm_i915_private *dev_priv);
void intel_power_domains_suspend(struct drm_i915_private *dev_priv,
enum i915_drm_suspend_mode);
void intel_power_domains_resume(struct drm_i915_private *dev_priv);
-void hsw_enable_pc8(struct drm_i915_private *dev_priv);
-void hsw_disable_pc8(struct drm_i915_private *dev_priv);
-void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume);
-void bxt_display_core_uninit(struct drm_i915_private *dev_priv);
+
+void intel_display_power_suspend_late(struct drm_i915_private *i915);
+void intel_display_power_resume_early(struct drm_i915_private *i915);
+void intel_display_power_suspend(struct drm_i915_private *i915);
+void intel_display_power_resume(struct drm_i915_private *i915);
+void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
+ u32 state);
const char *
intel_display_power_domain_str(enum intel_display_power_domain domain);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 1d58f7ec5d84..888ea8a170d1 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -22,8 +22,9 @@
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
-#ifndef __INTEL_DRV_H__
-#define __INTEL_DRV_H__
+
+#ifndef __INTEL_DISPLAY_TYPES_H__
+#define __INTEL_DISPLAY_TYPES_H__
#include <linux/async.h>
#include <linux/i2c.h>
@@ -67,15 +68,30 @@ enum intel_output_type {
INTEL_OUTPUT_DP_MST = 11,
};
+enum hdmi_force_audio {
+ HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
+ HDMI_AUDIO_OFF, /* force turn off HDMI audio */
+ HDMI_AUDIO_AUTO, /* trust EDID */
+ HDMI_AUDIO_ON, /* force turn on HDMI audio */
+};
+
+/* "Broadcast RGB" property */
+enum intel_broadcast_rgb {
+ INTEL_BROADCAST_RGB_AUTO,
+ INTEL_BROADCAST_RGB_FULL,
+ INTEL_BROADCAST_RGB_LIMITED,
+};
+
struct intel_framebuffer {
struct drm_framebuffer base;
+ struct intel_frontbuffer *frontbuffer;
struct intel_rotation_info rot_info;
/* for each plane in the normal GTT view */
struct {
unsigned int x, y;
- } normal[2];
- /* for each plane in the rotated GTT view */
+ } normal[4];
+ /* for each plane in the rotated GTT view for no-CCS formats */
struct {
unsigned int x, y;
unsigned int pitch; /* pixels */
@@ -101,20 +117,31 @@ struct intel_fbdev {
struct mutex hpd_lock;
};
+enum intel_hotplug_state {
+ INTEL_HOTPLUG_UNCHANGED,
+ INTEL_HOTPLUG_CHANGED,
+ INTEL_HOTPLUG_RETRY,
+};
+
struct intel_encoder {
struct drm_encoder base;
enum intel_output_type type;
enum port port;
- unsigned int cloneable;
- bool (*hotplug)(struct intel_encoder *encoder,
- struct intel_connector *connector);
+ u16 cloneable;
+ u8 pipe_mask;
+ enum intel_hotplug_state (*hotplug)(struct intel_encoder *encoder,
+ struct intel_connector *connector,
+ bool irq_received);
enum intel_output_type (*compute_output_type)(struct intel_encoder *,
struct intel_crtc_state *,
struct drm_connector_state *);
int (*compute_config)(struct intel_encoder *,
struct intel_crtc_state *,
struct drm_connector_state *);
+ void (*update_prepare)(struct intel_atomic_state *,
+ struct intel_encoder *,
+ struct intel_crtc *);
void (*pre_pll_enable)(struct intel_encoder *,
const struct intel_crtc_state *,
const struct drm_connector_state *);
@@ -124,6 +151,9 @@ struct intel_encoder {
void (*enable)(struct intel_encoder *,
const struct intel_crtc_state *,
const struct drm_connector_state *);
+ void (*update_complete)(struct intel_atomic_state *,
+ struct intel_encoder *,
+ struct intel_crtc *);
void (*disable)(struct intel_encoder *,
const struct intel_crtc_state *,
const struct drm_connector_state *);
@@ -158,7 +188,6 @@ struct intel_encoder {
* device interrupts are disabled.
*/
void (*suspend)(struct intel_encoder *);
- int crtc_mask;
enum hpd_pin hpd_pin;
enum intel_display_power_domain power_domain;
/* for communication with audio component; protected by av_mutex */
@@ -359,6 +388,13 @@ struct intel_hdcp {
wait_queue_head_t cp_irq_queue;
atomic_t cp_irq_count;
int cp_irq_count_cached;
+
+ /*
+ * HDCP register access for gen12+ need the transcoder associated.
+ * Transcoder attached to the connector could be changed at modeset.
+ * Hence caching the transcoder here.
+ */
+ enum transcoder cpu_transcoder;
};
struct intel_connector {
@@ -452,9 +488,9 @@ struct intel_atomic_state {
* but the converse is not necessarily true; simply changing a mode may
* not flip the final active status of any CRTC's
*/
- unsigned int active_pipe_changes;
+ u8 active_pipe_changes;
- unsigned int active_crtcs;
+ u8 active_pipes;
/* minimum acceptable cdclk for each pipe */
int min_cdclk[I915_MAX_PIPES];
/* minimum acceptable voltage level for each pipe */
@@ -470,6 +506,14 @@ struct intel_atomic_state {
bool rps_interactive;
+ /*
+ * active_pipes
+ * min_cdclk[]
+ * min_voltage_level[]
+ * cdclk.*
+ */
+ bool global_state_changed;
+
/* Gen9+ only */
struct skl_ddb_values wm_results;
@@ -479,7 +523,24 @@ struct intel_atomic_state {
};
struct intel_plane_state {
- struct drm_plane_state base;
+ struct drm_plane_state uapi;
+
+ /*
+ * actual hardware state, the state we program to the hardware.
+ * The following members are used to verify the hardware state:
+ * During initial hw readout, they need to be copied from uapi.
+ */
+ struct {
+ struct drm_crtc *crtc;
+ struct drm_framebuffer *fb;
+
+ u16 alpha;
+ uint16_t pixel_blend_mode;
+ unsigned int rotation;
+ enum drm_color_encoding color_encoding;
+ enum drm_color_range color_range;
+ } hw;
+
struct i915_ggtt_view view;
struct i915_vma *vma;
unsigned long flags;
@@ -494,7 +555,7 @@ struct intel_plane_state {
*/
u32 stride;
int x, y;
- } color_plane[2];
+ } color_plane[4];
/* plane control register */
u32 ctl;
@@ -502,6 +563,9 @@ struct intel_plane_state {
/* plane color control register */
u32 color_ctl;
+ /* chroma upsampler control register */
+ u32 cus_ctl;
+
/*
* scaler_id
* = -1 : not using a scaler
@@ -523,24 +587,24 @@ struct intel_plane_state {
int scaler_id;
/*
- * linked_plane:
+ * planar_linked_plane:
*
* ICL planar formats require 2 planes that are updated as pairs.
* This member is used to make sure the other plane is also updated
* when required, and for update_slave() to find the correct
* plane_state to pass as argument.
*/
- struct intel_plane *linked_plane;
+ struct intel_plane *planar_linked_plane;
/*
- * slave:
+ * planar_slave:
* If set don't update use the linked plane's state for updating
* this plane during atomic commit with the update_slave() callback.
*
* It's also used by the watermark code to ignore wm calculations on
* this plane. They're calculated by the linked plane's wm code.
*/
- u32 slave;
+ u32 planar_slave;
struct drm_intel_sprite_colorkey ckey;
};
@@ -713,7 +777,33 @@ enum intel_output_format {
};
struct intel_crtc_state {
- struct drm_crtc_state base;
+ /*
+ * uapi (drm) state. This is the software state shown to userspace.
+ * In particular, the following members are used for bookkeeping:
+ * - crtc
+ * - state
+ * - *_changed
+ * - event
+ * - commit
+ * - mode_blob
+ */
+ struct drm_crtc_state uapi;
+
+ /*
+ * actual hardware state, the state we program to the hardware.
+ * The following members are used to verify the hardware state:
+ * - enable
+ * - active
+ * - mode / adjusted_mode
+ * - color property blobs.
+ *
+ * During initial hw readout, they need to be copied to uapi.
+ */
+ struct {
+ bool active, enable;
+ struct drm_property_blob *degamma_lut, *gamma_lut, *ctm;
+ struct drm_display_mode mode, adjusted_mode;
+ } hw;
/**
* quirks - bitfield with hw state readout quirks
@@ -730,8 +820,8 @@ struct intel_crtc_state {
bool update_pipe; /* can a fast modeset be performed? */
bool disable_cxsr;
bool update_wm_pre, update_wm_post; /* watermarks are updated */
- bool fb_changed; /* fb on any of the planes is changed */
bool fifo_changed; /* FIFO split is changed */
+ bool preload_luts;
/* Pipe source size (ie. panel fitter input size)
* All planes will be positioned inside this space,
@@ -812,6 +902,15 @@ struct intel_crtc_state {
/* Actual register state of the dpll, for shared dpll cross-checking. */
struct intel_dpll_hw_state dpll_hw_state;
+ /*
+ * ICL reserved DPLLs for the CRTC/port. The active PLL is selected by
+ * setting shared_dpll and dpll_hw_state to one of these reserved ones.
+ */
+ struct icl_port_dpll {
+ struct intel_shared_dpll *pll;
+ struct intel_dpll_hw_state hw_state;
+ } icl_port_dplls[ICL_PORT_DPLL_COUNT];
+
/* DSI PLL registers */
struct {
u32 ctrl, div;
@@ -826,10 +925,11 @@ struct intel_crtc_state {
bool has_psr;
bool has_psr2;
+ u32 dc3co_exitline;
/*
* Frequence the dpll for the port should run at. Differs from the
- * adjusted dotclock e.g. for DP or 12bpc hdmi mode. This is also
+ * adjusted dotclock e.g. for DP or 10/12bpc hdmi mode. This is also
* already multiplied by pixel_multiplier.
*/
int port_clock;
@@ -887,6 +987,8 @@ struct intel_crtc_state {
struct intel_crtc_wm_state wm;
+ int min_cdclk[I915_MAX_PLANES];
+
u32 data_rate[I915_MAX_PLANES];
/* Gamma mode programmed on the pipe */
@@ -941,11 +1043,20 @@ struct intel_crtc_state {
bool dsc_split;
u16 compressed_bpp;
u8 slice_count;
- } dsc_params;
- struct drm_dsc_config dp_dsc_cfg;
+ struct drm_dsc_config config;
+ } dsc;
/* Forward Error correction State */
bool fec_enable;
+
+ /* Pointer to master transcoder in case of tiled displays */
+ enum transcoder master_transcoder;
+
+ /* Bitmask to indicate slaves attached */
+ u8 sync_mode_slaves_mask;
+
+ /* Only valid on TGL+ */
+ enum transcoder mst_master_transcoder;
};
struct intel_crtc {
@@ -988,6 +1099,9 @@ struct intel_crtc {
/* scalers available on this crtc */
int num_scalers;
+
+ /* per pipe DSB related info */
+ struct intel_dsb dsb;
};
struct intel_plane {
@@ -1015,14 +1129,13 @@ struct intel_plane {
void (*update_plane)(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
- void (*update_slave)(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state);
void (*disable_plane)(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state);
bool (*get_hw_state)(struct intel_plane *plane, enum pipe *pipe);
int (*check_plane)(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state);
+ int (*min_cdclk)(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
};
struct intel_watermark_params {
@@ -1046,12 +1159,12 @@ struct cxsr_latency {
#define to_intel_atomic_state(x) container_of(x, struct intel_atomic_state, base)
#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
-#define to_intel_crtc_state(x) container_of(x, struct intel_crtc_state, base)
+#define to_intel_crtc_state(x) container_of(x, struct intel_crtc_state, uapi)
#define to_intel_connector(x) container_of(x, struct intel_connector, base)
#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
#define to_intel_plane(x) container_of(x, struct intel_plane, base)
-#define to_intel_plane_state(x) container_of(x, struct intel_plane_state, base)
+#define to_intel_plane_state(x) container_of(x, struct intel_plane_state, uapi)
#define intel_fb_obj(x) ((x) ? to_intel_bo((x)->obj[0]) : NULL)
struct intel_hdmi {
@@ -1138,6 +1251,7 @@ struct intel_dp {
/* sink or branch descriptor */
struct drm_dp_desc desc;
struct drm_dp_aux aux;
+ u32 aux_busy_last_status;
u8 train_set[4];
int panel_power_up_delay;
int panel_power_down_delay;
@@ -1173,6 +1287,15 @@ struct intel_dp {
bool can_mst; /* this port supports mst */
bool is_mst;
int active_mst_links;
+
+ /*
+ * DP_TP_* registers may be either on port or transcoder register space.
+ */
+ struct {
+ i915_reg_t dp_tp_ctl;
+ i915_reg_t dp_tp_status;
+ } regs;
+
/* connector directly attached - won't be use for modeset in mst world */
struct intel_connector *attached_connector;
@@ -1224,8 +1347,14 @@ struct intel_digital_port {
/* Used for DP and ICL+ TypeC/DP and TypeC/HDMI ports. */
enum aux_ch aux_ch;
enum intel_display_power_domain ddi_io_power_domain;
+ struct mutex tc_lock; /* protects the TypeC port mode */
+ intel_wakeref_t tc_lock_wakeref;
+ int tc_link_refcount;
bool tc_legacy_port:1;
- enum tc_port_type tc_type;
+ char tc_port_name[8];
+ enum tc_port_mode tc_mode;
+ enum phy_fia tc_phy_fia;
+ u8 tc_phy_fia_idx;
void (*write_infoframe)(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
@@ -1309,9 +1438,9 @@ struct intel_load_detect_pipe {
};
static inline struct intel_encoder *
-intel_attached_encoder(struct drm_connector *connector)
+intel_attached_encoder(struct intel_connector *connector)
{
- return to_intel_connector(connector)->encoder;
+ return connector->encoder;
}
static inline bool intel_encoder_is_dig_port(struct intel_encoder *encoder)
@@ -1328,12 +1457,12 @@ static inline bool intel_encoder_is_dig_port(struct intel_encoder *encoder)
}
static inline struct intel_digital_port *
-enc_to_dig_port(struct drm_encoder *encoder)
+enc_to_dig_port(struct intel_encoder *encoder)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+ struct intel_encoder *intel_encoder = encoder;
if (intel_encoder_is_dig_port(intel_encoder))
- return container_of(encoder, struct intel_digital_port,
+ return container_of(&encoder->base, struct intel_digital_port,
base.base);
else
return NULL;
@@ -1342,16 +1471,17 @@ enc_to_dig_port(struct drm_encoder *encoder)
static inline struct intel_digital_port *
conn_to_dig_port(struct intel_connector *connector)
{
- return enc_to_dig_port(&intel_attached_encoder(&connector->base)->base);
+ return enc_to_dig_port(intel_attached_encoder(connector));
}
static inline struct intel_dp_mst_encoder *
-enc_to_mst(struct drm_encoder *encoder)
+enc_to_mst(struct intel_encoder *encoder)
{
- return container_of(encoder, struct intel_dp_mst_encoder, base.base);
+ return container_of(&encoder->base, struct intel_dp_mst_encoder,
+ base.base);
}
-static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
+static inline struct intel_dp *enc_to_intel_dp(struct intel_encoder *encoder)
{
return &enc_to_dig_port(encoder)->dp;
}
@@ -1364,14 +1494,14 @@ static inline bool intel_encoder_is_dp(struct intel_encoder *encoder)
return true;
case INTEL_OUTPUT_DDI:
/* Skip pure HDMI/DVI DDI encoders */
- return i915_mmio_reg_valid(enc_to_intel_dp(&encoder->base)->output_reg);
+ return i915_mmio_reg_valid(enc_to_intel_dp(encoder)->output_reg);
default:
return false;
}
}
static inline struct intel_lspcon *
-enc_to_intel_lspcon(struct drm_encoder *encoder)
+enc_to_intel_lspcon(struct intel_encoder *encoder)
{
return &enc_to_dig_port(encoder)->lspcon;
}
@@ -1445,42 +1575,25 @@ intel_atomic_get_new_crtc_state(struct intel_atomic_state *state,
&crtc->base));
}
+static inline struct intel_digital_connector_state *
+intel_atomic_get_new_connector_state(struct intel_atomic_state *state,
+ struct intel_connector *connector)
+{
+ return to_intel_digital_connector_state(
+ drm_atomic_get_new_connector_state(&state->base,
+ &connector->base));
+}
+
+static inline struct intel_digital_connector_state *
+intel_atomic_get_old_connector_state(struct intel_atomic_state *state,
+ struct intel_connector *connector)
+{
+ return to_intel_digital_connector_state(
+ drm_atomic_get_old_connector_state(&state->base,
+ &connector->base));
+}
+
/* intel_display.c */
-void intel_plane_destroy(struct drm_plane *plane);
-void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
-void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
-enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc);
-int vlv_get_hpll_vco(struct drm_i915_private *dev_priv);
-int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
- const char *name, u32 reg, int ref_freq);
-int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
- const char *name, u32 reg);
-void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv);
-void lpt_disable_iclkip(struct drm_i915_private *dev_priv);
-void intel_init_display_hooks(struct drm_i915_private *dev_priv);
-unsigned int intel_fb_xy_to_linear(int x, int y,
- const struct intel_plane_state *state,
- int plane);
-unsigned int intel_fb_align_height(const struct drm_framebuffer *fb,
- int color_plane, unsigned int height);
-void intel_add_fb_offsets(int *x, int *y,
- const struct intel_plane_state *state, int plane);
-unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info);
-unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info);
-bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv);
-int intel_display_suspend(struct drm_device *dev);
-void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv);
-void intel_encoder_destroy(struct drm_encoder *encoder);
-struct drm_display_mode *
-intel_encoder_current_mode(struct intel_encoder *encoder);
-bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port);
-bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port);
-enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv,
- enum port port);
-int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
- enum pipe pipe);
static inline bool
intel_crtc_has_type(const struct intel_crtc_state *crtc_state,
enum intel_output_type type)
@@ -1501,7 +1614,7 @@ intel_wait_for_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
drm_wait_one_vblank(&dev_priv->drm, pipe);
}
static inline void
-intel_wait_for_vblank_if_active(struct drm_i915_private *dev_priv, int pipe)
+intel_wait_for_vblank_if_active(struct drm_i915_private *dev_priv, enum pipe pipe)
{
const struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
@@ -1509,108 +1622,9 @@ intel_wait_for_vblank_if_active(struct drm_i915_private *dev_priv, int pipe)
intel_wait_for_vblank(dev_priv, pipe);
}
-u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc);
-
-int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
-void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
- struct intel_digital_port *dport,
- unsigned int expected_mask);
-int intel_get_load_detect_pipe(struct drm_connector *connector,
- const struct drm_display_mode *mode,
- struct intel_load_detect_pipe *old,
- struct drm_modeset_acquire_ctx *ctx);
-void intel_release_load_detect_pipe(struct drm_connector *connector,
- struct intel_load_detect_pipe *old,
- struct drm_modeset_acquire_ctx *ctx);
-struct i915_vma *
-intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
- const struct i915_ggtt_view *view,
- bool uses_fence,
- unsigned long *out_flags);
-void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags);
-struct drm_framebuffer *
-intel_framebuffer_create(struct drm_i915_gem_object *obj,
- struct drm_mode_fb_cmd2 *mode_cmd);
-int intel_prepare_plane_fb(struct drm_plane *plane,
- struct drm_plane_state *new_state);
-void intel_cleanup_plane_fb(struct drm_plane *plane,
- struct drm_plane_state *old_state);
-
-void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
- enum pipe pipe);
-
-int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
- const struct dpll *dpll);
-void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe);
-int lpt_get_iclkip(struct drm_i915_private *dev_priv);
-bool intel_fuzzy_clock_check(int clock1, int clock2);
-
-/* modesetting asserts */
-void assert_panel_unlocked(struct drm_i915_private *dev_priv,
- enum pipe pipe);
-void assert_pll(struct drm_i915_private *dev_priv,
- enum pipe pipe, bool state);
-#define assert_pll_enabled(d, p) assert_pll(d, p, true)
-#define assert_pll_disabled(d, p) assert_pll(d, p, false)
-void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state);
-#define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
-#define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
-void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
- enum pipe pipe, bool state);
-#define assert_fdi_rx_pll_enabled(d, p) assert_fdi_rx_pll(d, p, true)
-#define assert_fdi_rx_pll_disabled(d, p) assert_fdi_rx_pll(d, p, false)
-void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state);
-#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
-#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
-void intel_prepare_reset(struct drm_i915_private *dev_priv);
-void intel_finish_reset(struct drm_i915_private *dev_priv);
-void intel_dp_get_m_n(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config);
-void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state,
- enum link_m_n_set m_n);
-void intel_dp_ycbcr_420_enable(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state);
-int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
-bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
- struct dpll *best_clock);
-int chv_calc_dpll_params(int refclk, struct dpll *pll_clock);
-
-bool intel_crtc_active(struct intel_crtc *crtc);
-bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state);
-void hsw_enable_ips(const struct intel_crtc_state *crtc_state);
-void hsw_disable_ips(const struct intel_crtc_state *crtc_state);
-enum intel_display_power_domain intel_port_to_power_domain(enum port port);
-enum intel_display_power_domain
-intel_aux_power_domain(struct intel_digital_port *dig_port);
-void intel_mode_from_pipe_config(struct drm_display_mode *mode,
- struct intel_crtc_state *pipe_config);
-void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state);
-
-u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_center);
-int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
-int skl_max_scale(const struct intel_crtc_state *crtc_state,
- u32 pixel_format);
-
static inline u32 intel_plane_ggtt_offset(const struct intel_plane_state *state)
{
return i915_ggtt_offset(state->vma);
}
-u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state);
-u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state);
-u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state);
-u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state);
-u32 skl_plane_stride(const struct intel_plane_state *plane_state,
- int plane);
-int skl_check_plane_surface(struct intel_plane_state *plane_state);
-int i9xx_check_plane_surface(struct intel_plane_state *plane_state);
-int skl_format_to_fourcc(int format, bool rgb_order, bool alpha);
-unsigned int i9xx_plane_max_stride(struct intel_plane *plane,
- u32 pixel_format, u64 modifier,
- unsigned int rotation);
-int bdw_get_pipemisc_bpp(struct intel_crtc *crtc);
-
-#endif /* __INTEL_DRV_H__ */
+#endif /* __INTEL_DISPLAY_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index d0fc34826771..c7424e2a04a3 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -44,15 +44,16 @@
#include "i915_debugfs.h"
#include "i915_drv.h"
+#include "i915_trace.h"
#include "intel_atomic.h"
#include "intel_audio.h"
#include "intel_connector.h"
#include "intel_ddi.h"
+#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_link_training.h"
#include "intel_dp_mst.h"
#include "intel_dpio_phy.h"
-#include "intel_drv.h"
#include "intel_fifo_underrun.h"
#include "intel_hdcp.h"
#include "intel_hdmi.h"
@@ -62,22 +63,18 @@
#include "intel_panel.h"
#include "intel_psr.h"
#include "intel_sideband.h"
+#include "intel_tc.h"
#include "intel_vdsc.h"
#define DP_DPRX_ESI_LEN 14
-/* DP DSC small joiner has 2 FIFOs each of 640 x 6 bytes */
-#define DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER 61440
-#define DP_DSC_MIN_SUPPORTED_BPC 8
-#define DP_DSC_MAX_SUPPORTED_BPC 10
-
/* DP DSC throughput values used for slice count calculations KPixels/s */
#define DP_DSC_PEAK_PIXEL_RATE 2720000
#define DP_DSC_MAX_ENC_THROUGHPUT_0 340000
#define DP_DSC_MAX_ENC_THROUGHPUT_1 400000
-/* DP DSC FEC Overhead factor = (100 - 2.4)/100 */
-#define DP_DSC_FEC_OVERHEAD_FACTOR 976
+/* DP DSC FEC Overhead factor = 1/(0.972261) */
+#define DP_DSC_FEC_OVERHEAD_FACTOR 972261
/* Compliance test status bits */
#define INTEL_DP_RESOLUTION_SHIFT_MASK 0
@@ -149,9 +146,9 @@ bool intel_dp_is_edp(struct intel_dp *intel_dp)
return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
}
-static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
+static struct intel_dp *intel_attached_dp(struct intel_connector *connector)
{
- return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
+ return enc_to_intel_dp(intel_attached_encoder(connector));
}
static void intel_dp_link_down(struct intel_encoder *encoder,
@@ -211,47 +208,13 @@ static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
return intel_dp->common_rates[intel_dp->num_common_rates - 1];
}
-static int intel_dp_get_fia_supported_lane_count(struct intel_dp *intel_dp)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
- intel_wakeref_t wakeref;
- u32 lane_info;
-
- if (tc_port == PORT_TC_NONE || dig_port->tc_type != TC_PORT_TYPEC)
- return 4;
-
- lane_info = 0;
- with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref)
- lane_info = (I915_READ(PORT_TX_DFLEXDPSP) &
- DP_LANE_ASSIGNMENT_MASK(tc_port)) >>
- DP_LANE_ASSIGNMENT_SHIFT(tc_port);
-
- switch (lane_info) {
- default:
- MISSING_CASE(lane_info);
- /* fall through */
- case 1:
- case 2:
- case 4:
- case 8:
- return 1;
- case 3:
- case 12:
- return 2;
- case 15:
- return 4;
- }
-}
-
/* Theoretical max between source and sink */
static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
int source_max = intel_dig_port->max_lanes;
int sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
- int fia_max = intel_dp_get_fia_supported_lane_count(intel_dp);
+ int fia_max = intel_tc_port_fia_max_lane_count(intel_dig_port);
return min3(source_max, sink_max, fia_max);
}
@@ -330,9 +293,9 @@ static int icl_max_source_rate(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- enum port port = dig_port->base.port;
+ enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
- if (intel_port_is_combophy(dev_priv, port) &&
+ if (intel_phy_is_combo(dev_priv, phy) &&
!IS_ELKHARTLAKE(dev_priv) &&
!intel_dp_is_edp(intel_dp))
return 540000;
@@ -526,11 +489,132 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
return 0;
}
+u32 intel_dp_mode_to_fec_clock(u32 mode_clock)
+{
+ return div_u64(mul_u32_u32(mode_clock, 1000000U),
+ DP_DSC_FEC_OVERHEAD_FACTOR);
+}
+
+static int
+small_joiner_ram_size_bits(struct drm_i915_private *i915)
+{
+ if (INTEL_GEN(i915) >= 11)
+ return 7680 * 8;
+ else
+ return 6144 * 8;
+}
+
+static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915,
+ u32 link_clock, u32 lane_count,
+ u32 mode_clock, u32 mode_hdisplay)
+{
+ u32 bits_per_pixel, max_bpp_small_joiner_ram;
+ int i;
+
+ /*
+ * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)*
+ * (LinkSymbolClock)* 8 * (TimeSlotsPerMTP)
+ * for SST -> TimeSlotsPerMTP is 1,
+ * for MST -> TimeSlotsPerMTP has to be calculated
+ */
+ bits_per_pixel = (link_clock * lane_count * 8) /
+ intel_dp_mode_to_fec_clock(mode_clock);
+ DRM_DEBUG_KMS("Max link bpp: %u\n", bits_per_pixel);
+
+ /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
+ max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) /
+ mode_hdisplay;
+ DRM_DEBUG_KMS("Max small joiner bpp: %u\n", max_bpp_small_joiner_ram);
+
+ /*
+ * Greatest allowed DSC BPP = MIN (output BPP from available Link BW
+ * check, output bpp from small joiner RAM check)
+ */
+ bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram);
+
+ /* Error out if the max bpp is less than smallest allowed valid bpp */
+ if (bits_per_pixel < valid_dsc_bpp[0]) {
+ DRM_DEBUG_KMS("Unsupported BPP %u, min %u\n",
+ bits_per_pixel, valid_dsc_bpp[0]);
+ return 0;
+ }
+
+ /* Find the nearest match in the array of known BPPs from VESA */
+ for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
+ if (bits_per_pixel < valid_dsc_bpp[i + 1])
+ break;
+ }
+ bits_per_pixel = valid_dsc_bpp[i];
+
+ /*
+ * Compressed BPP in U6.4 format so multiply by 16, for Gen 11,
+ * fractional part is 0
+ */
+ return bits_per_pixel << 4;
+}
+
+static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
+ int mode_clock, int mode_hdisplay)
+{
+ u8 min_slice_count, i;
+ int max_slice_width;
+
+ if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE)
+ min_slice_count = DIV_ROUND_UP(mode_clock,
+ DP_DSC_MAX_ENC_THROUGHPUT_0);
+ else
+ min_slice_count = DIV_ROUND_UP(mode_clock,
+ DP_DSC_MAX_ENC_THROUGHPUT_1);
+
+ max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
+ if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
+ DRM_DEBUG_KMS("Unsupported slice width %d by DP DSC Sink device\n",
+ max_slice_width);
+ return 0;
+ }
+ /* Also take into account max slice width */
+ min_slice_count = min_t(u8, min_slice_count,
+ DIV_ROUND_UP(mode_hdisplay,
+ max_slice_width));
+
+ /* Find the closest match to the valid slice count values */
+ for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
+ if (valid_dsc_slicecount[i] >
+ drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
+ false))
+ break;
+ if (min_slice_count <= valid_dsc_slicecount[i])
+ return valid_dsc_slicecount[i];
+ }
+
+ DRM_DEBUG_KMS("Unsupported Slice Count %d\n", min_slice_count);
+ return 0;
+}
+
+static bool intel_dp_hdisplay_bad(struct drm_i915_private *dev_priv,
+ int hdisplay)
+{
+ /*
+ * Older platforms don't like hdisplay==4096 with DP.
+ *
+ * On ILK/SNB/IVB the pipe seems to be somewhat running (scanline
+ * and frame counter increment), but we don't get vblank interrupts,
+ * and the pipe underruns immediately. The link also doesn't seem
+ * to get trained properly.
+ *
+ * On CHV the vblank interrupts don't seem to disappear but
+ * otherwise the symptoms are similar.
+ *
+ * TODO: confirm the behaviour on HSW+
+ */
+ return hdisplay == 4096 && !HAS_DDI(dev_priv);
+}
+
static enum drm_mode_status
intel_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
struct drm_i915_private *dev_priv = to_i915(connector->dev);
@@ -561,6 +645,9 @@ intel_dp_mode_valid(struct drm_connector *connector,
max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
mode_rate = intel_dp_link_required(target_clock, 18);
+ if (intel_dp_hdisplay_bad(dev_priv, mode->hdisplay))
+ return MODE_H_ILLEGAL;
+
/*
* Output bpp is stored in 6.4 format so right shift by 4 to get the
* integer value since we support only integer values of bpp.
@@ -575,7 +662,8 @@ intel_dp_mode_valid(struct drm_connector *connector,
true);
} else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) {
dsc_max_output_bpp =
- intel_dp_dsc_get_output_bpp(max_link_clock,
+ intel_dp_dsc_get_output_bpp(dev_priv,
+ max_link_clock,
max_lanes,
target_clock,
mode->hdisplay) >> 4;
@@ -596,7 +684,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
return MODE_H_ILLEGAL;
- return MODE_OK;
+ return intel_mode_valid_max_plane_size(dev_priv, mode);
}
u32 intel_dp_pack_aux(const u8 *src, int src_bytes)
@@ -673,12 +761,14 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
u32 DP;
if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
- "skipping pipe %c power sequencer kick due to port %c being active\n",
- pipe_name(pipe), port_name(intel_dig_port->base.port)))
+ "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n",
+ pipe_name(pipe), intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name))
return;
- DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
- pipe_name(pipe), port_name(intel_dig_port->base.port));
+ DRM_DEBUG_KMS("kicking pipe %c power sequencer for [ENCODER:%d:%s]\n",
+ pipe_name(pipe), intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
/* Preserve the BIOS-computed detected bit. This is
* supposed to be read-only.
@@ -744,7 +834,7 @@ static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
* Pick one that's not used by other ports.
*/
for_each_intel_dp(&dev_priv->drm, encoder) {
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
if (encoder->type == INTEL_OUTPUT_EDP) {
WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
@@ -796,9 +886,10 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
vlv_steal_power_sequencer(dev_priv, pipe);
intel_dp->pps_pipe = pipe;
- DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
+ DRM_DEBUG_KMS("picked pipe %c power sequencer for [ENCODER:%d:%s]\n",
pipe_name(intel_dp->pps_pipe),
- port_name(intel_dig_port->base.port));
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
/* init power sequencer on this pipe and port */
intel_dp_init_panel_power_sequencer(intel_dp);
@@ -906,13 +997,16 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
/* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
if (intel_dp->pps_pipe == INVALID_PIPE) {
- DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
- port_name(port));
+ DRM_DEBUG_KMS("no initial power sequencer for [ENCODER:%d:%s]\n",
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
return;
}
- DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
- port_name(port), pipe_name(intel_dp->pps_pipe));
+ DRM_DEBUG_KMS("initial power sequencer for [ENCODER:%d:%s]: pipe %c\n",
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name,
+ pipe_name(intel_dp->pps_pipe));
intel_dp_init_panel_power_sequencer(intel_dp);
intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
@@ -937,7 +1031,7 @@ void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
*/
for_each_intel_dp(&dev_priv->drm, encoder) {
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
@@ -1085,18 +1179,20 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
+ const unsigned int timeout_ms = 10;
u32 status;
bool done;
#define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
done = wait_event_timeout(i915->gmbus_wait_queue, C,
- msecs_to_jiffies_timeout(10));
+ msecs_to_jiffies_timeout(timeout_ms));
/* just trace the final value */
trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
if (!done)
- DRM_ERROR("dp aux hw did not signal timeout!\n");
+ DRM_ERROR("%s did not complete or timeout within %ums (status 0x%08x)\n",
+ intel_dp->aux.name, timeout_ms, status);
#undef C
return status;
@@ -1197,6 +1293,9 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
u32 unused)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *i915 =
+ to_i915(intel_dig_port->base.base.dev);
+ enum phy phy = intel_port_to_phy(i915, intel_dig_port->base.port);
u32 ret;
ret = DP_AUX_CH_CTL_SEND_BUSY |
@@ -1209,7 +1308,8 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
- if (intel_dig_port->tc_type == TC_PORT_TBT)
+ if (intel_phy_is_tc(i915, phy) &&
+ intel_dig_port->tc_mode == TC_PORT_TBT_ALT)
ret |= DP_AUX_CH_CTL_TBT_IO;
return ret;
@@ -1225,6 +1325,8 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
struct drm_i915_private *i915 =
to_i915(intel_dig_port->base.base.dev);
struct intel_uncore *uncore = &i915->uncore;
+ enum phy phy = intel_port_to_phy(i915, intel_dig_port->base.port);
+ bool is_tc_port = intel_phy_is_tc(i915, phy);
i915_reg_t ch_ctl, ch_data[5];
u32 aux_clock_divider;
enum intel_display_power_domain aux_domain =
@@ -1240,6 +1342,9 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
for (i = 0; i < ARRAY_SIZE(ch_data); i++)
ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
+ if (is_tc_port)
+ intel_tc_port_lock(intel_dig_port);
+
aux_wakeref = intel_display_power_get(i915, aux_domain);
pps_wakeref = pps_lock(intel_dp);
@@ -1270,13 +1375,12 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
if (try == 3) {
- static u32 last_status = -1;
const u32 status = intel_uncore_read(uncore, ch_ctl);
- if (status != last_status) {
+ if (status != intel_dp->aux_busy_last_status) {
WARN(1, "dp_aux_ch not started status 0x%08x\n",
status);
- last_status = status;
+ intel_dp->aux_busy_last_status = status;
}
ret = -EBUSY;
@@ -1392,6 +1496,9 @@ out:
pps_unlock(intel_dp, pps_wakeref);
intel_display_power_put_async(i915, aux_domain, aux_wakeref);
+ if (is_tc_port)
+ intel_tc_port_unlock(intel_dig_port);
+
return ret;
}
@@ -1565,6 +1672,7 @@ static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
case AUX_CH_D:
case AUX_CH_E:
case AUX_CH_F:
+ case AUX_CH_G:
return DP_AUX_CH_CTL(aux_ch);
default:
MISSING_CASE(aux_ch);
@@ -1585,6 +1693,7 @@ static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
case AUX_CH_D:
case AUX_CH_E:
case AUX_CH_F:
+ case AUX_CH_G:
return DP_AUX_CH_DATA(aux_ch, index);
default:
MISSING_CASE(aux_ch);
@@ -1705,7 +1814,7 @@ static void intel_dp_print_rates(struct intel_dp *intel_dp)
{
char str[128]; /* FIXME: too big for stack? */
- if ((drm_debug & DRM_UT_KMS) == 0)
+ if (!drm_debug_enabled(DRM_UT_KMS))
return;
snprintf_int_array(str, sizeof(str),
@@ -1763,8 +1872,14 @@ static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- return INTEL_GEN(dev_priv) >= 11 &&
- pipe_config->cpu_transcoder != TRANSCODER_A;
+ /* On TGL, FEC is supported on all Pipes */
+ if (INTEL_GEN(dev_priv) >= 12)
+ return true;
+
+ if (IS_GEN(dev_priv, 11) && pipe_config->cpu_transcoder != TRANSCODER_A)
+ return true;
+
+ return false;
}
static bool intel_dp_supports_fec(struct intel_dp *intel_dp,
@@ -1774,22 +1889,15 @@ static bool intel_dp_supports_fec(struct intel_dp *intel_dp,
drm_dp_sink_supports_fec(intel_dp->fec_capable);
}
-static bool intel_dp_source_supports_dsc(struct intel_dp *intel_dp,
- const struct intel_crtc_state *pipe_config)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
- return INTEL_GEN(dev_priv) >= 10 &&
- pipe_config->cpu_transcoder != TRANSCODER_A;
-}
-
static bool intel_dp_supports_dsc(struct intel_dp *intel_dp,
- const struct intel_crtc_state *pipe_config)
+ const struct intel_crtc_state *crtc_state)
{
- if (!intel_dp_is_edp(intel_dp) && !pipe_config->fec_enable)
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+
+ if (!intel_dp_is_edp(intel_dp) && !crtc_state->fec_enable)
return false;
- return intel_dp_source_supports_dsc(intel_dp, pipe_config) &&
+ return intel_dsc_source_support(encoder, crtc_state) &&
drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd);
}
@@ -1874,13 +1982,15 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
struct intel_crtc_state *pipe_config,
const struct link_config_limits *limits)
{
- struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
int bpp, clock, lane_count;
int mode_rate, link_clock, link_avail;
for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
+ int output_bpp = intel_dp_output_bpp(pipe_config, bpp);
+
mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
- bpp);
+ output_bpp);
for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
for (lane_count = limits->min_lane_count;
@@ -1919,6 +2029,63 @@ static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
return 0;
}
+#define DSC_SUPPORTED_VERSION_MIN 1
+
+static int intel_dp_dsc_compute_params(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
+ u8 line_buf_depth;
+ int ret;
+
+ ret = intel_dsc_compute_params(encoder, crtc_state);
+ if (ret)
+ return ret;
+
+ /*
+ * Slice Height of 8 works for all currently available panels. So start
+ * with that if pic_height is an integral multiple of 8. Eventually add
+ * logic to try multiple slice heights.
+ */
+ if (vdsc_cfg->pic_height % 8 == 0)
+ vdsc_cfg->slice_height = 8;
+ else if (vdsc_cfg->pic_height % 4 == 0)
+ vdsc_cfg->slice_height = 4;
+ else
+ vdsc_cfg->slice_height = 2;
+
+ vdsc_cfg->dsc_version_major =
+ (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
+ DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT;
+ vdsc_cfg->dsc_version_minor =
+ min(DSC_SUPPORTED_VERSION_MIN,
+ (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
+ DP_DSC_MINOR_MASK) >> DP_DSC_MINOR_SHIFT);
+
+ vdsc_cfg->convert_rgb = intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] &
+ DP_DSC_RGB;
+
+ line_buf_depth = drm_dp_dsc_sink_line_buf_depth(intel_dp->dsc_dpcd);
+ if (!line_buf_depth) {
+ DRM_DEBUG_KMS("DSC Sink Line Buffer Depth invalid\n");
+ return -EINVAL;
+ }
+
+ if (vdsc_cfg->dsc_version_minor == 2)
+ vdsc_cfg->line_buf_depth = (line_buf_depth == DSC_1_2_MAX_LINEBUF_DEPTH_BITS) ?
+ DSC_1_2_MAX_LINEBUF_DEPTH_VAL : line_buf_depth;
+ else
+ vdsc_cfg->line_buf_depth = (line_buf_depth > DSC_1_1_MAX_LINEBUF_DEPTH_BITS) ?
+ DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth;
+
+ vdsc_cfg->block_pred_enable =
+ intel_dp->dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] &
+ DP_DSC_BLK_PREDICTION_IS_SUPPORTED;
+
+ return drm_dsc_compute_rc_parameters(vdsc_cfg);
+}
+
static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state,
@@ -1926,7 +2093,7 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
u8 dsc_max_bpc;
int pipe_bpp;
int ret;
@@ -1937,11 +2104,17 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
if (!intel_dp_supports_dsc(intel_dp, pipe_config))
return -EINVAL;
- dsc_max_bpc = min_t(u8, DP_DSC_MAX_SUPPORTED_BPC,
- conn_state->max_requested_bpc);
+ /* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */
+ if (INTEL_GEN(dev_priv) >= 12)
+ dsc_max_bpc = min_t(u8, 12, conn_state->max_requested_bpc);
+ else
+ dsc_max_bpc = min_t(u8, 10,
+ conn_state->max_requested_bpc);
pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc);
- if (pipe_bpp < DP_DSC_MIN_SUPPORTED_BPC * 3) {
+
+ /* Min Input BPC for ICL+ is 8 */
+ if (pipe_bpp < 8 * 3) {
DRM_DEBUG_KMS("No DSC support for less than 8bpc\n");
return -EINVAL;
}
@@ -1956,10 +2129,10 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
pipe_config->lane_count = limits->max_lane_count;
if (intel_dp_is_edp(intel_dp)) {
- pipe_config->dsc_params.compressed_bpp =
+ pipe_config->dsc.compressed_bpp =
min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4,
pipe_config->pipe_bpp);
- pipe_config->dsc_params.slice_count =
+ pipe_config->dsc.slice_count =
drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
true);
} else {
@@ -1967,7 +2140,8 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
u8 dsc_dp_slice_count;
dsc_max_output_bpp =
- intel_dp_dsc_get_output_bpp(pipe_config->port_clock,
+ intel_dp_dsc_get_output_bpp(dev_priv,
+ pipe_config->port_clock,
pipe_config->lane_count,
adjusted_mode->crtc_clock,
adjusted_mode->crtc_hdisplay);
@@ -1979,10 +2153,10 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
DRM_DEBUG_KMS("Compressed BPP/Slice Count not supported\n");
return -EINVAL;
}
- pipe_config->dsc_params.compressed_bpp = min_t(u16,
+ pipe_config->dsc.compressed_bpp = min_t(u16,
dsc_max_output_bpp >> 4,
pipe_config->pipe_bpp);
- pipe_config->dsc_params.slice_count = dsc_dp_slice_count;
+ pipe_config->dsc.slice_count = dsc_dp_slice_count;
}
/*
* VDSC engine operates at 1 Pixel per clock, so if peak pixel rate
@@ -1990,29 +2164,29 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
* then we need to use 2 VDSC instances.
*/
if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq) {
- if (pipe_config->dsc_params.slice_count > 1) {
- pipe_config->dsc_params.dsc_split = true;
+ if (pipe_config->dsc.slice_count > 1) {
+ pipe_config->dsc.dsc_split = true;
} else {
DRM_DEBUG_KMS("Cannot split stream to use 2 VDSC instances\n");
return -EINVAL;
}
}
- ret = intel_dp_compute_dsc_params(intel_dp, pipe_config);
+ ret = intel_dp_dsc_compute_params(&dig_port->base, pipe_config);
if (ret < 0) {
DRM_DEBUG_KMS("Cannot compute valid DSC parameters for Input Bpp = %d "
"Compressed BPP = %d\n",
pipe_config->pipe_bpp,
- pipe_config->dsc_params.compressed_bpp);
+ pipe_config->dsc.compressed_bpp);
return ret;
}
- pipe_config->dsc_params.compression_enable = true;
+ pipe_config->dsc.compression_enable = true;
DRM_DEBUG_KMS("DP DSC computed with Input Bpp = %d "
"Compressed Bpp = %d Slice Count = %d\n",
pipe_config->pipe_bpp,
- pipe_config->dsc_params.compressed_bpp,
- pipe_config->dsc_params.slice_count);
+ pipe_config->dsc.compressed_bpp,
+ pipe_config->dsc.slice_count);
return 0;
}
@@ -2030,8 +2204,8 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct link_config_limits limits;
int common_len;
int ret;
@@ -2086,15 +2260,15 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
return ret;
}
- if (pipe_config->dsc_params.compression_enable) {
+ if (pipe_config->dsc.compression_enable) {
DRM_DEBUG_KMS("DP lane count %d clock %d Input bpp %d Compressed bpp %d\n",
pipe_config->lane_count, pipe_config->port_clock,
pipe_config->pipe_bpp,
- pipe_config->dsc_params.compressed_bpp);
+ pipe_config->dsc.compressed_bpp);
DRM_DEBUG_KMS("DP link rate required %i available %i\n",
intel_dp_link_required(adjusted_mode->crtc_clock,
- pipe_config->dsc_params.compressed_bpp),
+ pipe_config->dsc.compressed_bpp),
intel_dp_max_data_rate(pipe_config->port_clock,
pipe_config->lane_count));
} else {
@@ -2118,8 +2292,8 @@ intel_dp_ycbcr420_config(struct intel_dp *intel_dp,
{
const struct drm_display_info *info = &connector->display_info;
const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ &crtc_state->hw.adjusted_mode;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
int ret;
if (!drm_mode_is_420_only(info, adjusted_mode) ||
@@ -2147,7 +2321,17 @@ bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
const struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(conn_state);
const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
+
+ /*
+ * Our YCbCr output is always limited range.
+ * crtc_state->limited_color_range only applies to RGB,
+ * and it must never be set for YCbCr or we risk setting
+ * some conflicting bits in PIPECONF which will mess up
+ * the colors on the monitor.
+ */
+ if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
+ return false;
if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
/*
@@ -2164,17 +2348,28 @@ bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
}
}
+static bool intel_dp_port_has_audio(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ if (IS_G4X(dev_priv))
+ return false;
+ if (INTEL_GEN(dev_priv) < 12 && port == PORT_A)
+ return false;
+
+ return true;
+}
+
int
intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- struct intel_lspcon *lspcon = enc_to_intel_lspcon(&encoder->base);
+ struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder);
enum port port = encoder->port;
- struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct intel_connector *intel_connector = intel_dp->attached_connector;
struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(conn_state);
@@ -2186,6 +2381,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
pipe_config->has_pch_encoder = true;
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
+
if (lspcon->active)
lspcon_ycbcr420_config(&intel_connector->base, pipe_config);
else
@@ -2196,7 +2392,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
return ret;
pipe_config->has_drrs = false;
- if (IS_G4X(dev_priv) || port == PORT_A)
+ if (!intel_dp_port_has_audio(dev_priv, port))
pipe_config->has_audio = false;
else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
pipe_config->has_audio = intel_dp->has_audio;
@@ -2231,6 +2427,9 @@ intel_dp_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
return -EINVAL;
+ if (intel_dp_hdisplay_bad(dev_priv, adjusted_mode->crtc_hdisplay))
+ return -EINVAL;
+
ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state);
if (ret < 0)
return ret;
@@ -2238,8 +2437,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
pipe_config->limited_color_range =
intel_dp_limited_color_range(pipe_config, conn_state);
- if (pipe_config->dsc_params.compression_enable)
- output_bpp = pipe_config->dsc_params.compressed_bpp;
+ if (pipe_config->dsc.compression_enable)
+ output_bpp = pipe_config->dsc.compressed_bpp;
else
output_bpp = intel_dp_output_bpp(pipe_config, pipe_config->pipe_bpp);
@@ -2248,7 +2447,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
adjusted_mode->crtc_clock,
pipe_config->port_clock,
&pipe_config->dp_m_n,
- constant_n);
+ constant_n, pipe_config->fec_enable);
if (intel_connector->panel.downclock_mode != NULL &&
dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
@@ -2258,7 +2457,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
intel_connector->panel.downclock_mode->clock,
pipe_config->port_clock,
&pipe_config->dp_m2_n2,
- constant_n);
+ constant_n, pipe_config->fec_enable);
}
if (!HAS_DDI(dev_priv))
@@ -2283,16 +2482,19 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
enum port port = encoder->port;
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
- const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
intel_dp_set_link_params(intel_dp, pipe_config->port_clock,
pipe_config->lane_count,
intel_crtc_has_type(pipe_config,
INTEL_OUTPUT_DP_MST));
+ intel_dp->regs.dp_tp_ctl = DP_TP_CTL(port);
+ intel_dp->regs.dp_tp_status = DP_TP_STATUS(port);
+
/*
* There are four kinds of DP registers:
*
@@ -2307,7 +2509,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
*
* CPT PCH is quite different, having many bits moved
* to the TRANS_DP_CTL register instead. That
- * configuration happens (oddly) in ironlake_pch_enable
+ * configuration happens (oddly) in ilk_pch_enable
*/
/* Preserve the BIOS-computed detected bit. This is
@@ -2393,9 +2595,8 @@ static void wait_panel_status(struct intel_dp *intel_dp,
I915_READ(pp_stat_reg),
I915_READ(pp_ctrl_reg));
- if (intel_wait_for_register(&dev_priv->uncore,
- pp_stat_reg, mask, value,
- 5000))
+ if (intel_de_wait_for_register(dev_priv, pp_stat_reg,
+ mask, value, 5000))
DRM_ERROR("Panel status timeout: status %08x control %08x\n",
I915_READ(pp_stat_reg),
I915_READ(pp_ctrl_reg));
@@ -2452,7 +2653,7 @@ static void edp_wait_backlight_off(struct intel_dp *intel_dp)
* is locked
*/
-static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
+static u32 ilk_get_pp_control(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u32 control;
@@ -2495,13 +2696,14 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
intel_display_power_get(dev_priv,
intel_aux_power_domain(intel_dig_port));
- DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
- port_name(intel_dig_port->base.port));
+ DRM_DEBUG_KMS("Turning [ENCODER:%d:%s] VDD on\n",
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
if (!edp_have_panel_power(intel_dp))
wait_panel_power_cycle(intel_dp);
- pp = ironlake_get_pp_control(intel_dp);
+ pp = ilk_get_pp_control(intel_dp);
pp |= EDP_FORCE_VDD;
pp_stat_reg = _pp_stat_reg(intel_dp);
@@ -2515,8 +2717,9 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
* If the panel wasn't on, delay before accessing aux channel
*/
if (!edp_have_panel_power(intel_dp)) {
- DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
- port_name(intel_dig_port->base.port));
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] panel power wasn't enabled\n",
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
msleep(intel_dp->panel_power_up_delay);
}
@@ -2541,8 +2744,9 @@ void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
vdd = false;
with_pps_lock(intel_dp, wakeref)
vdd = edp_panel_vdd_on(intel_dp);
- I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
- port_name(dp_to_dig_port(intel_dp)->base.port));
+ I915_STATE_WARN(!vdd, "[ENCODER:%d:%s] VDD already requested on\n",
+ dp_to_dig_port(intel_dp)->base.base.base.id,
+ dp_to_dig_port(intel_dp)->base.base.name);
}
static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
@@ -2560,10 +2764,11 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
if (!edp_have_panel_vdd(intel_dp))
return;
- DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
- port_name(intel_dig_port->base.port));
+ DRM_DEBUG_KMS("Turning [ENCODER:%d:%s] VDD off\n",
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
- pp = ironlake_get_pp_control(intel_dp);
+ pp = ilk_get_pp_control(intel_dp);
pp &= ~EDP_FORCE_VDD;
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
@@ -2623,8 +2828,9 @@ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
if (!intel_dp_is_edp(intel_dp))
return;
- I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
- port_name(dp_to_dig_port(intel_dp)->base.port));
+ I915_STATE_WARN(!intel_dp->want_panel_vdd, "[ENCODER:%d:%s] VDD not forced on",
+ dp_to_dig_port(intel_dp)->base.base.base.id,
+ dp_to_dig_port(intel_dp)->base.base.name);
intel_dp->want_panel_vdd = false;
@@ -2645,18 +2851,20 @@ static void edp_panel_on(struct intel_dp *intel_dp)
if (!intel_dp_is_edp(intel_dp))
return;
- DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
- port_name(dp_to_dig_port(intel_dp)->base.port));
+ DRM_DEBUG_KMS("Turn [ENCODER:%d:%s] panel power on\n",
+ dp_to_dig_port(intel_dp)->base.base.base.id,
+ dp_to_dig_port(intel_dp)->base.base.name);
if (WARN(edp_have_panel_power(intel_dp),
- "eDP port %c panel power already on\n",
- port_name(dp_to_dig_port(intel_dp)->base.port)))
+ "[ENCODER:%d:%s] panel power already on\n",
+ dp_to_dig_port(intel_dp)->base.base.base.id,
+ dp_to_dig_port(intel_dp)->base.base.name))
return;
wait_panel_power_cycle(intel_dp);
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
- pp = ironlake_get_pp_control(intel_dp);
+ pp = ilk_get_pp_control(intel_dp);
if (IS_GEN(dev_priv, 5)) {
/* ILK workaround: disable reset around power sequence */
pp &= ~PANEL_POWER_RESET;
@@ -2705,13 +2913,13 @@ static void edp_panel_off(struct intel_dp *intel_dp)
if (!intel_dp_is_edp(intel_dp))
return;
- DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
- port_name(dig_port->base.port));
+ DRM_DEBUG_KMS("Turn [ENCODER:%d:%s] panel power off\n",
+ dig_port->base.base.base.id, dig_port->base.base.name);
- WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
- port_name(dig_port->base.port));
+ WARN(!intel_dp->want_panel_vdd, "Need [ENCODER:%d:%s] VDD to turn off panel\n",
+ dig_port->base.base.base.id, dig_port->base.base.name);
- pp = ironlake_get_pp_control(intel_dp);
+ pp = ilk_get_pp_control(intel_dp);
/* We need to switch off panel power _and_ force vdd, for otherwise some
* panels get very unhappy and cease to work. */
pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
@@ -2760,7 +2968,7 @@ static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
u32 pp;
- pp = ironlake_get_pp_control(intel_dp);
+ pp = ilk_get_pp_control(intel_dp);
pp |= EDP_BLC_ENABLE;
I915_WRITE(pp_ctrl_reg, pp);
@@ -2772,7 +2980,7 @@ static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(conn_state->best_encoder);
+ struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(conn_state->best_encoder));
if (!intel_dp_is_edp(intel_dp))
return;
@@ -2796,7 +3004,7 @@ static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
u32 pp;
- pp = ironlake_get_pp_control(intel_dp);
+ pp = ilk_get_pp_control(intel_dp);
pp &= ~EDP_BLC_ENABLE;
I915_WRITE(pp_ctrl_reg, pp);
@@ -2810,7 +3018,7 @@ static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
/* Disable backlight PP control and backlight PWM. */
void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(old_conn_state->best_encoder);
+ struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder));
if (!intel_dp_is_edp(intel_dp))
return;
@@ -2828,13 +3036,13 @@ void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
static void intel_edp_backlight_power(struct intel_connector *connector,
bool enable)
{
- struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
intel_wakeref_t wakeref;
bool is_enabled;
is_enabled = false;
with_pps_lock(intel_dp, wakeref)
- is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
+ is_enabled = ilk_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
if (is_enabled == enable)
return;
@@ -2854,8 +3062,8 @@ static void assert_dp_port(struct intel_dp *intel_dp, bool state)
bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
I915_STATE_WARN(cur_state != state,
- "DP port %c state assertion failure (expected %s, current %s)\n",
- port_name(dig_port->base.port),
+ "[ENCODER:%d:%s] state assertion failure (expected %s, current %s)\n",
+ dig_port->base.base.base.id, dig_port->base.base.name,
onoff(state), onoff(cur_state));
}
#define assert_dp_port_disabled(d) assert_dp_port((d), false)
@@ -2871,13 +3079,13 @@ static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
#define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
#define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
-static void ironlake_edp_pll_on(struct intel_dp *intel_dp,
- const struct intel_crtc_state *pipe_config)
+static void ilk_edp_pll_on(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *pipe_config)
{
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- assert_pipe_disabled(dev_priv, crtc->pipe);
+ assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
assert_dp_port_disabled(intel_dp);
assert_edp_pll_disabled(dev_priv);
@@ -2911,13 +3119,13 @@ static void ironlake_edp_pll_on(struct intel_dp *intel_dp,
udelay(200);
}
-static void ironlake_edp_pll_off(struct intel_dp *intel_dp,
- const struct intel_crtc_state *old_crtc_state)
+static void ilk_edp_pll_off(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *old_crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- assert_pipe_disabled(dev_priv, crtc->pipe);
+ assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder);
assert_dp_port_disabled(intel_dp);
assert_edp_pll_enabled(dev_priv);
@@ -2951,7 +3159,7 @@ void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
{
int ret;
- if (!crtc_state->dsc_params.compression_enable)
+ if (!crtc_state->dsc.compression_enable)
return;
ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE,
@@ -3050,7 +3258,7 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
intel_wakeref_t wakeref;
bool ret;
@@ -3071,10 +3279,10 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
u32 tmp, flags = 0;
enum port port = encoder->port;
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
if (encoder->type == INTEL_OUTPUT_EDP)
pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP);
@@ -3109,7 +3317,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
flags |= DRM_MODE_FLAG_NVSYNC;
}
- pipe_config->base.adjusted_mode.flags |= flags;
+ pipe_config->hw.adjusted_mode.flags |= flags;
if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235)
pipe_config->limited_color_range = true;
@@ -3126,7 +3334,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
pipe_config->port_clock = 270000;
}
- pipe_config->base.adjusted_mode.crtc_clock =
+ pipe_config->hw.adjusted_mode.crtc_clock =
intel_dotclock_calculate(pipe_config->port_clock,
&pipe_config->dp_m_n);
@@ -3155,7 +3363,7 @@ static void intel_disable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
intel_dp->link_trained = false;
@@ -3189,7 +3397,7 @@ static void g4x_post_disable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
enum port port = encoder->port;
/*
@@ -3202,7 +3410,7 @@ static void g4x_post_disable_dp(struct intel_encoder *encoder,
/* Only ilk+ has port A */
if (port == PORT_A)
- ironlake_edp_pll_off(intel_dp, old_crtc_state);
+ ilk_edp_pll_off(intel_dp, old_crtc_state);
}
static void vlv_post_disable_dp(struct intel_encoder *encoder,
@@ -3243,7 +3451,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
dp_train_pat & train_pat_mask);
if (HAS_DDI(dev_priv)) {
- u32 temp = I915_READ(DP_TP_CTL(port));
+ u32 temp = I915_READ(intel_dp->regs.dp_tp_ctl);
if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
@@ -3269,7 +3477,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
temp |= DP_TP_CTL_LINK_TRAIN_PAT4;
break;
}
- I915_WRITE(DP_TP_CTL(port), temp);
+ I915_WRITE(intel_dp->regs.dp_tp_ctl, temp);
} else if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
(HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
@@ -3340,8 +3548,8 @@ static void intel_enable_dp(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
u32 dp_reg = I915_READ(intel_dp->output_reg);
enum pipe pipe = crtc->pipe;
intel_wakeref_t wakeref;
@@ -3400,14 +3608,14 @@ static void g4x_pre_enable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
enum port port = encoder->port;
intel_dp_prepare(encoder, pipe_config);
/* Only ilk+ has port A */
if (port == PORT_A)
- ironlake_edp_pll_on(intel_dp, pipe_config);
+ ilk_edp_pll_on(intel_dp, pipe_config);
}
static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
@@ -3433,8 +3641,9 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
* port select always when logically disconnecting a power sequencer
* from a port.
*/
- DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
- pipe_name(pipe), port_name(intel_dig_port->base.port));
+ DRM_DEBUG_KMS("detaching pipe %c power sequencer from [ENCODER:%d:%s]\n",
+ pipe_name(pipe), intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
I915_WRITE(pp_on_reg, 0);
POSTING_READ(pp_on_reg);
@@ -3449,18 +3658,19 @@ static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
lockdep_assert_held(&dev_priv->pps_mutex);
for_each_intel_dp(&dev_priv->drm, encoder) {
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- enum port port = encoder->port;
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
WARN(intel_dp->active_pipe == pipe,
- "stealing pipe %c power sequencer from active (e)DP port %c\n",
- pipe_name(pipe), port_name(port));
+ "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n",
+ pipe_name(pipe), encoder->base.base.id,
+ encoder->base.name);
if (intel_dp->pps_pipe != pipe)
continue;
- DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
- pipe_name(pipe), port_name(port));
+ DRM_DEBUG_KMS("stealing pipe %c power sequencer from [ENCODER:%d:%s]\n",
+ pipe_name(pipe), encoder->base.base.id,
+ encoder->base.name);
/* make sure vdd is off before we steal it */
vlv_detach_power_sequencer(intel_dp);
@@ -3471,8 +3681,8 @@ static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
lockdep_assert_held(&dev_priv->pps_mutex);
@@ -3502,8 +3712,9 @@ static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
/* now it's all ours */
intel_dp->pps_pipe = crtc->pipe;
- DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
- pipe_name(intel_dp->pps_pipe), port_name(encoder->port));
+ DRM_DEBUG_KMS("initializing pipe %c power sequencer for [ENCODER:%d:%s]\n",
+ pipe_name(intel_dp->pps_pipe), encoder->base.base.id,
+ encoder->base.name);
/* init power sequencer on this pipe and port */
intel_dp_init_panel_power_sequencer(intel_dp);
@@ -3967,25 +4178,23 @@ void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
if (!HAS_DDI(dev_priv))
return;
- val = I915_READ(DP_TP_CTL(port));
+ val = I915_READ(intel_dp->regs.dp_tp_ctl);
val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
val |= DP_TP_CTL_LINK_TRAIN_IDLE;
- I915_WRITE(DP_TP_CTL(port), val);
+ I915_WRITE(intel_dp->regs.dp_tp_ctl, val);
/*
- * On PORT_A we can have only eDP in SST mode. There the only reason
- * we need to set idle transmission mode is to work around a HW issue
- * where we enable the pipe while not in idle link-training mode.
+ * Until TGL on PORT_A we can have only eDP in SST mode. There the only
+ * reason we need to set idle transmission mode is to work around a HW
+ * issue where we enable the pipe while not in idle link-training mode.
* In this case there is requirement to wait for a minimum number of
* idle patterns to be sent.
*/
- if (port == PORT_A)
+ if (port == PORT_A && INTEL_GEN(dev_priv) < 12)
return;
- if (intel_wait_for_register(&dev_priv->uncore, DP_TP_STATUS(port),
- DP_TP_STATUS_IDLE_DONE,
- DP_TP_STATUS_IDLE_DONE,
- 1))
+ if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
+ DP_TP_STATUS_IDLE_DONE, 1))
DRM_ERROR("Timed out waiting for DP idle patterns\n");
}
@@ -3994,8 +4203,8 @@ intel_dp_link_down(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
enum port port = encoder->port;
u32 DP = intel_dp->DP;
@@ -4169,10 +4378,6 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
drm_dp_is_branch(intel_dp->dpcd));
- if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
- dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
- DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
-
/*
* Read the eDP display control registers.
*
@@ -4244,8 +4449,14 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
if (!intel_dp_read_dpcd(intel_dp))
return false;
- /* Don't clobber cached eDP rates. */
+ /*
+ * Don't clobber cached eDP rates. Also skip re-reading
+ * the OUI/ID since we know it won't change.
+ */
if (!intel_dp_is_edp(intel_dp)) {
+ drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
+ drm_dp_is_branch(intel_dp->dpcd));
+
intel_dp_set_sink_rates(intel_dp);
intel_dp_set_common_rates(intel_dp);
}
@@ -4254,7 +4465,8 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
* Some eDP panels do not set a valid value for sink count, that is why
* it don't care about read it here and in intel_edp_init_dpcd().
*/
- if (!intel_dp_is_edp(intel_dp)) {
+ if (!intel_dp_is_edp(intel_dp) &&
+ !drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_SINK_COUNT)) {
u8 count;
ssize_t r;
@@ -4323,9 +4535,10 @@ intel_dp_configure_mst(struct intel_dp *intel_dp)
&dp_to_dig_port(intel_dp)->base;
bool sink_can_mst = intel_dp_sink_can_mst(intel_dp);
- DRM_DEBUG_KMS("MST support? port %c: %s, sink: %s, modparam: %s\n",
- port_name(encoder->port), yesno(intel_dp->can_mst),
- yesno(sink_can_mst), yesno(i915_modparams.enable_dp_mst));
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n",
+ encoder->base.base.id, encoder->base.name,
+ yesno(intel_dp->can_mst), yesno(sink_can_mst),
+ yesno(i915_modparams.enable_dp_mst));
if (!intel_dp->can_mst)
return;
@@ -4345,94 +4558,36 @@ intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
DP_DPRX_ESI_LEN;
}
-u16 intel_dp_dsc_get_output_bpp(int link_clock, u8 lane_count,
- int mode_clock, int mode_hdisplay)
+bool
+intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
- u16 bits_per_pixel, max_bpp_small_joiner_ram;
- int i;
-
- /*
- * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)*
- * (LinkSymbolClock)* 8 * ((100-FECOverhead)/100)*(TimeSlotsPerMTP)
- * FECOverhead = 2.4%, for SST -> TimeSlotsPerMTP is 1,
- * for MST -> TimeSlotsPerMTP has to be calculated
- */
- bits_per_pixel = (link_clock * lane_count * 8 *
- DP_DSC_FEC_OVERHEAD_FACTOR) /
- mode_clock;
-
- /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
- max_bpp_small_joiner_ram = DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER /
- mode_hdisplay;
-
- /*
- * Greatest allowed DSC BPP = MIN (output BPP from avaialble Link BW
- * check, output bpp from small joiner RAM check)
- */
- bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram);
-
- /* Error out if the max bpp is less than smallest allowed valid bpp */
- if (bits_per_pixel < valid_dsc_bpp[0]) {
- DRM_DEBUG_KMS("Unsupported BPP %d\n", bits_per_pixel);
- return 0;
- }
-
- /* Find the nearest match in the array of known BPPs from VESA */
- for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
- if (bits_per_pixel < valid_dsc_bpp[i + 1])
- break;
- }
- bits_per_pixel = valid_dsc_bpp[i];
-
/*
- * Compressed BPP in U6.4 format so multiply by 16, for Gen 11,
- * fractional part is 0
+ * As per DP 1.4a spec section 2.2.4.3 [MSA Field for Indication
+ * of Color Encoding Format and Content Color Gamut], in order to
+ * sending YCBCR 420 or HDR BT.2020 signals we should use DP VSC SDP.
*/
- return bits_per_pixel << 4;
-}
-
-u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
- int mode_clock,
- int mode_hdisplay)
-{
- u8 min_slice_count, i;
- int max_slice_width;
-
- if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE)
- min_slice_count = DIV_ROUND_UP(mode_clock,
- DP_DSC_MAX_ENC_THROUGHPUT_0);
- else
- min_slice_count = DIV_ROUND_UP(mode_clock,
- DP_DSC_MAX_ENC_THROUGHPUT_1);
-
- max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
- if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
- DRM_DEBUG_KMS("Unsupported slice width %d by DP DSC Sink device\n",
- max_slice_width);
- return 0;
- }
- /* Also take into account max slice width */
- min_slice_count = min_t(u8, min_slice_count,
- DIV_ROUND_UP(mode_hdisplay,
- max_slice_width));
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
+ return true;
- /* Find the closest match to the valid slice count values */
- for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
- if (valid_dsc_slicecount[i] >
- drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
- false))
- break;
- if (min_slice_count <= valid_dsc_slicecount[i])
- return valid_dsc_slicecount[i];
+ switch (conn_state->colorspace) {
+ case DRM_MODE_COLORIMETRY_SYCC_601:
+ case DRM_MODE_COLORIMETRY_OPYCC_601:
+ case DRM_MODE_COLORIMETRY_BT2020_YCC:
+ case DRM_MODE_COLORIMETRY_BT2020_RGB:
+ case DRM_MODE_COLORIMETRY_BT2020_CYCC:
+ return true;
+ default:
+ break;
}
- DRM_DEBUG_KMS("Unsupported Slice Count %d\n", min_slice_count);
- return 0;
+ return false;
}
static void
-intel_pixel_encoding_setup_vsc(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
+intel_dp_setup_vsc_sdp(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct dp_sdp vsc_sdp = {};
@@ -4453,13 +4608,55 @@ intel_pixel_encoding_setup_vsc(struct intel_dp *intel_dp,
*/
vsc_sdp.sdp_header.HB3 = 0x13;
- /*
- * YCbCr 420 = 3h DB16[7:4] ITU-R BT.601 = 0h, ITU-R BT.709 = 1h
- * DB16[3:0] DP 1.4a spec, Table 2-120
- */
- vsc_sdp.db[16] = 0x3 << 4; /* 0x3 << 4 , YCbCr 420*/
- /* RGB->YCBCR color conversion uses the BT.709 color space. */
- vsc_sdp.db[16] |= 0x1; /* 0x1, ITU-R BT.709 */
+ /* DP 1.4a spec, Table 2-120 */
+ switch (crtc_state->output_format) {
+ case INTEL_OUTPUT_FORMAT_YCBCR444:
+ vsc_sdp.db[16] = 0x1 << 4; /* YCbCr 444 : DB16[7:4] = 1h */
+ break;
+ case INTEL_OUTPUT_FORMAT_YCBCR420:
+ vsc_sdp.db[16] = 0x3 << 4; /* YCbCr 420 : DB16[7:4] = 3h */
+ break;
+ case INTEL_OUTPUT_FORMAT_RGB:
+ default:
+ /* RGB: DB16[7:4] = 0h */
+ break;
+ }
+
+ switch (conn_state->colorspace) {
+ case DRM_MODE_COLORIMETRY_BT709_YCC:
+ vsc_sdp.db[16] |= 0x1;
+ break;
+ case DRM_MODE_COLORIMETRY_XVYCC_601:
+ vsc_sdp.db[16] |= 0x2;
+ break;
+ case DRM_MODE_COLORIMETRY_XVYCC_709:
+ vsc_sdp.db[16] |= 0x3;
+ break;
+ case DRM_MODE_COLORIMETRY_SYCC_601:
+ vsc_sdp.db[16] |= 0x4;
+ break;
+ case DRM_MODE_COLORIMETRY_OPYCC_601:
+ vsc_sdp.db[16] |= 0x5;
+ break;
+ case DRM_MODE_COLORIMETRY_BT2020_CYCC:
+ case DRM_MODE_COLORIMETRY_BT2020_RGB:
+ vsc_sdp.db[16] |= 0x6;
+ break;
+ case DRM_MODE_COLORIMETRY_BT2020_YCC:
+ vsc_sdp.db[16] |= 0x7;
+ break;
+ case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65:
+ case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER:
+ vsc_sdp.db[16] |= 0x4; /* DCI-P3 (SMPTE RP 431-2) */
+ break;
+ default:
+ /* sRGB (IEC 61966-2-1) / ITU-R BT.601: DB16[0:3] = 0h */
+
+ /* RGB->YCBCR color conversion uses the BT.709 color space. */
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
+ vsc_sdp.db[16] |= 0x1; /* 0x1, ITU-R BT.709 */
+ break;
+ }
/*
* For pixel encoding formats YCbCr444, YCbCr422, YCbCr420, and Y Only,
@@ -4511,13 +4708,106 @@ intel_pixel_encoding_setup_vsc(struct intel_dp *intel_dp,
crtc_state, DP_SDP_VSC, &vsc_sdp, sizeof(vsc_sdp));
}
-void intel_dp_ycbcr_420_enable(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
+static void
+intel_dp_setup_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct dp_sdp infoframe_sdp = {};
+ struct hdmi_drm_infoframe drm_infoframe = {};
+ const int infoframe_size = HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE;
+ unsigned char buf[HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE];
+ ssize_t len;
+ int ret;
+
+ ret = drm_hdmi_infoframe_set_hdr_metadata(&drm_infoframe, conn_state);
+ if (ret) {
+ DRM_DEBUG_KMS("couldn't set HDR metadata in infoframe\n");
+ return;
+ }
+
+ len = hdmi_drm_infoframe_pack_only(&drm_infoframe, buf, sizeof(buf));
+ if (len < 0) {
+ DRM_DEBUG_KMS("buffer size is smaller than hdr metadata infoframe\n");
+ return;
+ }
+
+ if (len != infoframe_size) {
+ DRM_DEBUG_KMS("wrong static hdr metadata size\n");
+ return;
+ }
+
+ /*
+ * Set up the infoframe sdp packet for HDR static metadata.
+ * Prepare VSC Header for SU as per DP 1.4a spec,
+ * Table 2-100 and Table 2-101
+ */
+
+ /* Packet ID, 00h for non-Audio INFOFRAME */
+ infoframe_sdp.sdp_header.HB0 = 0;
+ /*
+ * Packet Type 80h + Non-audio INFOFRAME Type value
+ * HDMI_INFOFRAME_TYPE_DRM: 0x87,
+ */
+ infoframe_sdp.sdp_header.HB1 = drm_infoframe.type;
+ /*
+ * Least Significant Eight Bits of (Data Byte Count – 1)
+ * infoframe_size - 1,
+ */
+ infoframe_sdp.sdp_header.HB2 = 0x1D;
+ /* INFOFRAME SDP Version Number */
+ infoframe_sdp.sdp_header.HB3 = (0x13 << 2);
+ /* CTA Header Byte 2 (INFOFRAME Version Number) */
+ infoframe_sdp.db[0] = drm_infoframe.version;
+ /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */
+ infoframe_sdp.db[1] = drm_infoframe.length;
+ /*
+ * Copy HDMI_DRM_INFOFRAME_SIZE size from a buffer after
+ * HDMI_INFOFRAME_HEADER_SIZE
+ */
+ BUILD_BUG_ON(sizeof(infoframe_sdp.db) < HDMI_DRM_INFOFRAME_SIZE + 2);
+ memcpy(&infoframe_sdp.db[2], &buf[HDMI_INFOFRAME_HEADER_SIZE],
+ HDMI_DRM_INFOFRAME_SIZE);
+
+ /*
+ * Size of DP infoframe sdp packet for HDR static metadata is consist of
+ * - DP SDP Header(struct dp_sdp_header): 4 bytes
+ * - Two Data Blocks: 2 bytes
+ * CTA Header Byte2 (INFOFRAME Version Number)
+ * CTA Header Byte3 (Length of INFOFRAME)
+ * - HDMI_DRM_INFOFRAME_SIZE: 26 bytes
+ *
+ * Prior to GEN11's GMP register size is identical to DP HDR static metadata
+ * infoframe size. But GEN11+ has larger than that size, write_infoframe
+ * will pad rest of the size.
+ */
+ intel_dig_port->write_infoframe(&intel_dig_port->base, crtc_state,
+ HDMI_PACKET_TYPE_GAMUT_METADATA,
+ &infoframe_sdp,
+ sizeof(struct dp_sdp_header) + 2 + HDMI_DRM_INFOFRAME_SIZE);
+}
+
+void intel_dp_vsc_enable(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ if (!intel_dp_needs_vsc_sdp(crtc_state, conn_state))
+ return;
+
+ intel_dp_setup_vsc_sdp(intel_dp, crtc_state, conn_state);
+}
+
+void intel_dp_hdr_metadata_enable(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
- if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_YCBCR420)
+ if (!conn_state->hdr_output_metadata)
return;
- intel_pixel_encoding_setup_vsc(intel_dp, crtc_state);
+ intel_dp_setup_hdr_metadata_infoframe_sdp(intel_dp,
+ crtc_state,
+ conn_state);
}
static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp)
@@ -4613,7 +4903,7 @@ static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width);
intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height);
/* Set test active flag here so userspace doesn't interrupt things */
- intel_dp->compliance.test_active = 1;
+ intel_dp->compliance.test_active = true;
return DP_TEST_ACK;
}
@@ -4657,7 +4947,7 @@ static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp)
}
/* Set test active flag here so userspace doesn't interrupt things */
- intel_dp->compliance.test_active = 1;
+ intel_dp->compliance.test_active = true;
return test_result;
}
@@ -4806,7 +5096,7 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_connector *connector = intel_dp->attached_connector;
struct drm_connector_state *conn_state;
struct intel_crtc_state *crtc_state;
@@ -4837,7 +5127,7 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
WARN_ON(!intel_crtc_has_dp_encoder(crtc_state));
- if (!crtc_state->base.active)
+ if (!crtc_state->hw.active)
return 0;
if (conn_state->commit &&
@@ -4879,14 +5169,16 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
* retrain the link to get a picture. That's in case no
* userspace component reacted to intermittent HPD dip.
*/
-static bool intel_dp_hotplug(struct intel_encoder *encoder,
- struct intel_connector *connector)
+static enum intel_hotplug_state
+intel_dp_hotplug(struct intel_encoder *encoder,
+ struct intel_connector *connector,
+ bool irq_received)
{
struct drm_modeset_acquire_ctx ctx;
- bool changed;
+ enum intel_hotplug_state state;
int ret;
- changed = intel_encoder_hotplug(encoder, connector);
+ state = intel_encoder_hotplug(encoder, connector, irq_received);
drm_modeset_acquire_init(&ctx, 0);
@@ -4905,7 +5197,14 @@ static bool intel_dp_hotplug(struct intel_encoder *encoder,
drm_modeset_acquire_fini(&ctx);
WARN(ret, "Acquiring modeset locks failed with %i\n", ret);
- return changed;
+ /*
+ * Keeping it consistent with intel_ddi_hotplug() and
+ * intel_hdmi_hotplug().
+ */
+ if (state == INTEL_HOTPLUG_UNCHANGED && irq_received)
+ state = INTEL_HOTPLUG_RETRY;
+
+ return state;
}
static void intel_dp_check_service_irq(struct intel_dp *intel_dp)
@@ -5225,212 +5524,25 @@ static bool bxt_digital_port_connected(struct intel_encoder *encoder)
return I915_READ(GEN8_DE_PORT_ISR) & bit;
}
-static bool icl_combo_port_connected(struct drm_i915_private *dev_priv,
- struct intel_digital_port *intel_dig_port)
-{
- enum port port = intel_dig_port->base.port;
-
- return I915_READ(SDEISR) & SDE_DDI_HOTPLUG_ICP(port);
-}
-
-static const char *tc_type_name(enum tc_port_type type)
-{
- static const char * const names[] = {
- [TC_PORT_UNKNOWN] = "unknown",
- [TC_PORT_LEGACY] = "legacy",
- [TC_PORT_TYPEC] = "typec",
- [TC_PORT_TBT] = "tbt",
- };
-
- if (WARN_ON(type >= ARRAY_SIZE(names)))
- type = TC_PORT_UNKNOWN;
-
- return names[type];
-}
-
-static void icl_update_tc_port_type(struct drm_i915_private *dev_priv,
- struct intel_digital_port *intel_dig_port,
- bool is_legacy, bool is_typec, bool is_tbt)
+static bool intel_combo_phy_connected(struct drm_i915_private *dev_priv,
+ enum phy phy)
{
- enum port port = intel_dig_port->base.port;
- enum tc_port_type old_type = intel_dig_port->tc_type;
-
- WARN_ON(is_legacy + is_typec + is_tbt != 1);
-
- if (is_legacy)
- intel_dig_port->tc_type = TC_PORT_LEGACY;
- else if (is_typec)
- intel_dig_port->tc_type = TC_PORT_TYPEC;
- else if (is_tbt)
- intel_dig_port->tc_type = TC_PORT_TBT;
- else
- return;
+ if (HAS_PCH_MCC(dev_priv) && phy == PHY_C)
+ return I915_READ(SDEISR) & SDE_TC_HOTPLUG_ICP(PORT_TC1);
- /* Types are not supposed to be changed at runtime. */
- WARN_ON(old_type != TC_PORT_UNKNOWN &&
- old_type != intel_dig_port->tc_type);
-
- if (old_type != intel_dig_port->tc_type)
- DRM_DEBUG_KMS("Port %c has TC type %s\n", port_name(port),
- tc_type_name(intel_dig_port->tc_type));
+ return I915_READ(SDEISR) & SDE_DDI_HOTPLUG_ICP(phy);
}
-/*
- * This function implements the first part of the Connect Flow described by our
- * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading
- * lanes, EDID, etc) is done as needed in the typical places.
- *
- * Unlike the other ports, type-C ports are not available to use as soon as we
- * get a hotplug. The type-C PHYs can be shared between multiple controllers:
- * display, USB, etc. As a result, handshaking through FIA is required around
- * connect and disconnect to cleanly transfer ownership with the controller and
- * set the type-C power state.
- *
- * We could opt to only do the connect flow when we actually try to use the AUX
- * channels or do a modeset, then immediately run the disconnect flow after
- * usage, but there are some implications on this for a dynamic environment:
- * things may go away or change behind our backs. So for now our driver is
- * always trying to acquire ownership of the controller as soon as it gets an
- * interrupt (or polls state and sees a port is connected) and only gives it
- * back when it sees a disconnect. Implementation of a more fine-grained model
- * will require a lot of coordination with user space and thorough testing for
- * the extra possible cases.
- */
-static bool icl_tc_phy_connect(struct drm_i915_private *dev_priv,
- struct intel_digital_port *dig_port)
-{
- enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
- u32 val;
-
- if (dig_port->tc_type != TC_PORT_LEGACY &&
- dig_port->tc_type != TC_PORT_TYPEC)
- return true;
-
- val = I915_READ(PORT_TX_DFLEXDPPMS);
- if (!(val & DP_PHY_MODE_STATUS_COMPLETED(tc_port))) {
- DRM_DEBUG_KMS("DP PHY for TC port %d not ready\n", tc_port);
- WARN_ON(dig_port->tc_legacy_port);
- return false;
- }
-
- /*
- * This function may be called many times in a row without an HPD event
- * in between, so try to avoid the write when we can.
- */
- val = I915_READ(PORT_TX_DFLEXDPCSSS);
- if (!(val & DP_PHY_MODE_STATUS_NOT_SAFE(tc_port))) {
- val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
- I915_WRITE(PORT_TX_DFLEXDPCSSS, val);
- }
-
- /*
- * Now we have to re-check the live state, in case the port recently
- * became disconnected. Not necessary for legacy mode.
- */
- if (dig_port->tc_type == TC_PORT_TYPEC &&
- !(I915_READ(PORT_TX_DFLEXDPSP) & TC_LIVE_STATE_TC(tc_port))) {
- DRM_DEBUG_KMS("TC PHY %d sudden disconnect.\n", tc_port);
- icl_tc_phy_disconnect(dev_priv, dig_port);
- return false;
- }
-
- return true;
-}
-
-/*
- * See the comment at the connect function. This implements the Disconnect
- * Flow.
- */
-void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
- struct intel_digital_port *dig_port)
-{
- enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
-
- if (dig_port->tc_type == TC_PORT_UNKNOWN)
- return;
-
- /*
- * TBT disconnection flow is read the live status, what was done in
- * caller.
- */
- if (dig_port->tc_type == TC_PORT_TYPEC ||
- dig_port->tc_type == TC_PORT_LEGACY) {
- u32 val;
-
- val = I915_READ(PORT_TX_DFLEXDPCSSS);
- val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
- I915_WRITE(PORT_TX_DFLEXDPCSSS, val);
- }
-
- DRM_DEBUG_KMS("Port %c TC type %s disconnected\n",
- port_name(dig_port->base.port),
- tc_type_name(dig_port->tc_type));
-
- dig_port->tc_type = TC_PORT_UNKNOWN;
-}
-
-/*
- * The type-C ports are different because even when they are connected, they may
- * not be available/usable by the graphics driver: see the comment on
- * icl_tc_phy_connect(). So in our driver instead of adding the additional
- * concept of "usable" and make everything check for "connected and usable" we
- * define a port as "connected" when it is not only connected, but also when it
- * is usable by the rest of the driver. That maintains the old assumption that
- * connected ports are usable, and avoids exposing to the users objects they
- * can't really use.
- */
-static bool icl_tc_port_connected(struct drm_i915_private *dev_priv,
- struct intel_digital_port *intel_dig_port)
-{
- enum port port = intel_dig_port->base.port;
- enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
- bool is_legacy, is_typec, is_tbt;
- u32 dpsp;
-
- /*
- * Complain if we got a legacy port HPD, but VBT didn't mark the port as
- * legacy. Treat the port as legacy from now on.
- */
- if (!intel_dig_port->tc_legacy_port &&
- I915_READ(SDEISR) & SDE_TC_HOTPLUG_ICP(tc_port)) {
- DRM_ERROR("VBT incorrectly claims port %c is not TypeC legacy\n",
- port_name(port));
- intel_dig_port->tc_legacy_port = true;
- }
- is_legacy = intel_dig_port->tc_legacy_port;
-
- /*
- * The spec says we shouldn't be using the ISR bits for detecting
- * between TC and TBT. We should use DFLEXDPSP.
- */
- dpsp = I915_READ(PORT_TX_DFLEXDPSP);
- is_typec = dpsp & TC_LIVE_STATE_TC(tc_port);
- is_tbt = dpsp & TC_LIVE_STATE_TBT(tc_port);
-
- if (!is_legacy && !is_typec && !is_tbt) {
- icl_tc_phy_disconnect(dev_priv, intel_dig_port);
-
- return false;
- }
-
- icl_update_tc_port_type(dev_priv, intel_dig_port, is_legacy, is_typec,
- is_tbt);
-
- if (!icl_tc_phy_connect(dev_priv, intel_dig_port))
- return false;
-
- return true;
-}
-
-static bool icl_digital_port_connected(struct intel_encoder *encoder)
+static bool icp_digital_port_connected(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
- if (intel_port_is_combophy(dev_priv, encoder->port))
- return icl_combo_port_connected(dev_priv, dig_port);
- else if (intel_port_is_tc(dev_priv, encoder->port))
- return icl_tc_port_connected(dev_priv, dig_port);
+ if (intel_phy_is_combo(dev_priv, phy))
+ return intel_combo_phy_connected(dev_priv, phy);
+ else if (intel_phy_is_tc(dev_priv, phy))
+ return intel_tc_port_connected(dig_port);
else
MISSING_CASE(encoder->hpd_pin);
@@ -5459,9 +5571,9 @@ static bool __intel_digital_port_connected(struct intel_encoder *encoder)
return g4x_digital_port_connected(encoder);
}
- if (INTEL_GEN(dev_priv) >= 11)
- return icl_digital_port_connected(encoder);
- else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv))
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
+ return icp_digital_port_connected(encoder);
+ else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
return spt_digital_port_connected(encoder);
else if (IS_GEN9_LP(dev_priv))
return bxt_digital_port_connected(encoder);
@@ -5539,7 +5651,7 @@ intel_dp_detect(struct drm_connector *connector,
bool force)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
- struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &dig_port->base;
enum drm_connector_status status;
@@ -5588,9 +5700,6 @@ intel_dp_detect(struct drm_connector *connector,
if (INTEL_GEN(dev_priv) >= 11)
intel_dp_get_dsc_sink_cap(intel_dp);
- drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
- drm_dp_is_branch(intel_dp->dpcd));
-
intel_dp_configure_mst(intel_dp);
if (intel_dp->is_mst) {
@@ -5634,13 +5743,19 @@ out:
if (status != connector_status_connected && !intel_dp->is_mst)
intel_dp_unset_edid(intel_dp);
+ /*
+ * Make sure the refs for power wells enabled during detect are
+ * dropped to avoid a new detect cycle triggered by HPD polling.
+ */
+ intel_display_power_flush_work(dev_priv);
+
return status;
}
static void
intel_dp_force(struct drm_connector *connector)
{
- struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *intel_encoder = &dig_port->base;
struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
@@ -5675,7 +5790,7 @@ static int intel_dp_get_modes(struct drm_connector *connector)
}
/* if eDP has no EDID, fall back to fixed mode */
- if (intel_dp_is_edp(intel_attached_dp(connector)) &&
+ if (intel_dp_is_edp(intel_attached_dp(to_intel_connector(connector))) &&
intel_connector->panel.fixed_mode) {
struct drm_display_mode *mode;
@@ -5693,8 +5808,7 @@ static int intel_dp_get_modes(struct drm_connector *connector)
static int
intel_dp_connector_register(struct drm_connector *connector)
{
- struct intel_dp *intel_dp = intel_attached_dp(connector);
- struct drm_device *dev = connector->dev;
+ struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
int ret;
ret = intel_connector_register(connector);
@@ -5709,15 +5823,14 @@ intel_dp_connector_register(struct drm_connector *connector)
intel_dp->aux.dev = connector->kdev;
ret = drm_dp_aux_register(&intel_dp->aux);
if (!ret)
- drm_dp_cec_register_connector(&intel_dp->aux,
- connector->name, dev->dev);
+ drm_dp_cec_register_connector(&intel_dp->aux, connector);
return ret;
}
static void
intel_dp_connector_unregister(struct drm_connector *connector)
{
- struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
drm_dp_cec_unregister_connector(&intel_dp->aux);
drm_dp_aux_unregister(&intel_dp->aux);
@@ -5726,7 +5839,7 @@ intel_dp_connector_unregister(struct drm_connector *connector)
void intel_dp_encoder_flush_work(struct drm_encoder *encoder)
{
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(to_intel_encoder(encoder));
struct intel_dp *intel_dp = &intel_dig_port->dp;
intel_dp_mst_encoder_cleanup(intel_dig_port);
@@ -5755,12 +5868,12 @@ static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
intel_dp_encoder_flush_work(encoder);
drm_encoder_cleanup(encoder);
- kfree(enc_to_dig_port(encoder));
+ kfree(enc_to_dig_port(to_intel_encoder(encoder)));
}
void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
intel_wakeref_t wakeref;
if (!intel_dp_is_edp(intel_dp))
@@ -5791,7 +5904,7 @@ static
int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
u8 *an)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&intel_dig_port->base.base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(&intel_dig_port->base.base));
static const struct drm_dp_aux_msg msg = {
.request = DP_AUX_NATIVE_WRITE,
.address = DP_AUX_HDCP_AKSV,
@@ -6016,47 +6129,49 @@ struct hdcp2_dp_errata_stream_type {
u8 stream_type;
} __packed;
-static struct hdcp2_dp_msg_data {
+struct hdcp2_dp_msg_data {
u8 msg_id;
u32 offset;
bool msg_detectable;
u32 timeout;
u32 timeout2; /* Added for non_paired situation */
- } hdcp2_msg_data[] = {
- {HDCP_2_2_AKE_INIT, DP_HDCP_2_2_AKE_INIT_OFFSET, false, 0, 0},
- {HDCP_2_2_AKE_SEND_CERT, DP_HDCP_2_2_AKE_SEND_CERT_OFFSET,
- false, HDCP_2_2_CERT_TIMEOUT_MS, 0},
- {HDCP_2_2_AKE_NO_STORED_KM, DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET,
- false, 0, 0},
- {HDCP_2_2_AKE_STORED_KM, DP_HDCP_2_2_AKE_STORED_KM_OFFSET,
- false, 0, 0},
- {HDCP_2_2_AKE_SEND_HPRIME, DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET,
- true, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS,
- HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS},
- {HDCP_2_2_AKE_SEND_PAIRING_INFO,
- DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET, true,
- HDCP_2_2_PAIRING_TIMEOUT_MS, 0},
- {HDCP_2_2_LC_INIT, DP_HDCP_2_2_LC_INIT_OFFSET, false, 0, 0},
- {HDCP_2_2_LC_SEND_LPRIME, DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET,
- false, HDCP_2_2_DP_LPRIME_TIMEOUT_MS, 0},
- {HDCP_2_2_SKE_SEND_EKS, DP_HDCP_2_2_SKE_SEND_EKS_OFFSET, false,
- 0, 0},
- {HDCP_2_2_REP_SEND_RECVID_LIST,
- DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET, true,
- HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0},
- {HDCP_2_2_REP_SEND_ACK, DP_HDCP_2_2_REP_SEND_ACK_OFFSET, false,
- 0, 0},
- {HDCP_2_2_REP_STREAM_MANAGE,
- DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET, false,
- 0, 0},
- {HDCP_2_2_REP_STREAM_READY, DP_HDCP_2_2_REP_STREAM_READY_OFFSET,
- false, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0},
+};
+
+static const struct hdcp2_dp_msg_data hdcp2_dp_msg_data[] = {
+ { HDCP_2_2_AKE_INIT, DP_HDCP_2_2_AKE_INIT_OFFSET, false, 0, 0 },
+ { HDCP_2_2_AKE_SEND_CERT, DP_HDCP_2_2_AKE_SEND_CERT_OFFSET,
+ false, HDCP_2_2_CERT_TIMEOUT_MS, 0 },
+ { HDCP_2_2_AKE_NO_STORED_KM, DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET,
+ false, 0, 0 },
+ { HDCP_2_2_AKE_STORED_KM, DP_HDCP_2_2_AKE_STORED_KM_OFFSET,
+ false, 0, 0 },
+ { HDCP_2_2_AKE_SEND_HPRIME, DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET,
+ true, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS,
+ HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS },
+ { HDCP_2_2_AKE_SEND_PAIRING_INFO,
+ DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET, true,
+ HDCP_2_2_PAIRING_TIMEOUT_MS, 0 },
+ { HDCP_2_2_LC_INIT, DP_HDCP_2_2_LC_INIT_OFFSET, false, 0, 0 },
+ { HDCP_2_2_LC_SEND_LPRIME, DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET,
+ false, HDCP_2_2_DP_LPRIME_TIMEOUT_MS, 0 },
+ { HDCP_2_2_SKE_SEND_EKS, DP_HDCP_2_2_SKE_SEND_EKS_OFFSET, false,
+ 0, 0 },
+ { HDCP_2_2_REP_SEND_RECVID_LIST,
+ DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET, true,
+ HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0 },
+ { HDCP_2_2_REP_SEND_ACK, DP_HDCP_2_2_REP_SEND_ACK_OFFSET, false,
+ 0, 0 },
+ { HDCP_2_2_REP_STREAM_MANAGE,
+ DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET, false,
+ 0, 0 },
+ { HDCP_2_2_REP_STREAM_READY, DP_HDCP_2_2_REP_STREAM_READY_OFFSET,
+ false, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0 },
/* local define to shovel this through the write_2_2 interface */
#define HDCP_2_2_ERRATA_DP_STREAM_TYPE 50
- {HDCP_2_2_ERRATA_DP_STREAM_TYPE,
- DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET, false,
- 0, 0},
- };
+ { HDCP_2_2_ERRATA_DP_STREAM_TYPE,
+ DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET, false,
+ 0, 0 },
+};
static inline
int intel_dp_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port,
@@ -6110,7 +6225,7 @@ int hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port,
static ssize_t
intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
- struct hdcp2_dp_msg_data *hdcp2_msg_data)
+ const struct hdcp2_dp_msg_data *hdcp2_msg_data)
{
struct intel_dp *dp = &intel_dig_port->dp;
struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
@@ -6149,13 +6264,13 @@ intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
return ret;
}
-static struct hdcp2_dp_msg_data *get_hdcp2_dp_msg_data(u8 msg_id)
+static const struct hdcp2_dp_msg_data *get_hdcp2_dp_msg_data(u8 msg_id)
{
int i;
- for (i = 0; i < ARRAY_SIZE(hdcp2_msg_data); i++)
- if (hdcp2_msg_data[i].msg_id == msg_id)
- return &hdcp2_msg_data[i];
+ for (i = 0; i < ARRAY_SIZE(hdcp2_dp_msg_data); i++)
+ if (hdcp2_dp_msg_data[i].msg_id == msg_id)
+ return &hdcp2_dp_msg_data[i];
return NULL;
}
@@ -6169,7 +6284,7 @@ int intel_dp_hdcp2_write_msg(struct intel_digital_port *intel_dig_port,
unsigned int offset;
u8 *byte = buf;
ssize_t ret, bytes_to_write, len;
- struct hdcp2_dp_msg_data *hdcp2_msg_data;
+ const struct hdcp2_dp_msg_data *hdcp2_msg_data;
hdcp2_msg_data = get_hdcp2_dp_msg_data(*byte);
if (!hdcp2_msg_data)
@@ -6233,7 +6348,7 @@ int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
unsigned int offset;
u8 *byte = buf;
ssize_t ret, bytes_to_recv, len;
- struct hdcp2_dp_msg_data *hdcp2_msg_data;
+ const struct hdcp2_dp_msg_data *hdcp2_msg_data;
hdcp2_msg_data = get_hdcp2_dp_msg_data(msg_id);
if (!hdcp2_msg_data)
@@ -6399,7 +6514,7 @@ static enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
void intel_dp_encoder_reset(struct drm_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(encoder));
struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
intel_wakeref_t wakeref;
@@ -6466,13 +6581,15 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
* would end up in an endless cycle of
* "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
*/
- DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
- port_name(intel_dig_port->base.port));
+ DRM_DEBUG_KMS("ignoring long hpd on eDP [ENCODER:%d:%s]\n",
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
return IRQ_HANDLED;
}
- DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
- port_name(intel_dig_port->base.port),
+ DRM_DEBUG_KMS("got hpd irq on [ENCODER:%d:%s] - %s\n",
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name,
long_hpd ? "long" : "short");
if (long_hpd) {
@@ -6539,6 +6656,13 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
else if (INTEL_GEN(dev_priv) >= 5)
drm_connector_attach_max_bpc_property(connector, 6, 12);
+ intel_attach_colorspace_property(connector);
+
+ if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 11)
+ drm_object_attach_property(&connector->base,
+ connector->dev->mode_config.hdr_output_metadata_property,
+ 0);
+
if (intel_dp_is_edp(intel_dp)) {
u32 allowed_scalers;
@@ -6569,7 +6693,7 @@ intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
intel_pps_get_registers(intel_dp, &regs);
- pp_ctl = ironlake_get_pp_control(intel_dp);
+ pp_ctl = ilk_get_pp_control(intel_dp);
/* Ensure PPS is unlocked */
if (!HAS_DDI(dev_priv))
@@ -6739,7 +6863,7 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
* soon as the new power sequencer gets initialized.
*/
if (force_disable_vdd) {
- u32 pp = ironlake_get_pp_control(intel_dp);
+ u32 pp = ilk_get_pp_control(intel_dp);
WARN(pp & PANEL_POWER_ON, "Panel power already on\n");
@@ -6835,10 +6959,8 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
const struct intel_crtc_state *crtc_state,
int refresh_rate)
{
- struct intel_encoder *encoder;
- struct intel_digital_port *dig_port = NULL;
struct intel_dp *intel_dp = dev_priv->drrs.dp;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
if (refresh_rate <= 0) {
@@ -6851,9 +6973,6 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
return;
}
- dig_port = dp_to_dig_port(intel_dp);
- encoder = &dig_port->base;
-
if (!intel_crtc) {
DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
return;
@@ -6874,7 +6993,7 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
return;
}
- if (!crtc_state->base.active) {
+ if (!crtc_state->hw.active) {
DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
return;
}
@@ -7333,6 +7452,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = intel_encoder->port;
+ enum phy phy = intel_port_to_phy(dev_priv, port);
int type;
/* Initialize the work for modeset in case of link train failure */
@@ -7340,8 +7460,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_dp_modeset_retry_work_fn);
if (WARN(intel_dig_port->max_lanes < 1,
- "Not enough lanes (%d) for DP on port %c\n",
- intel_dig_port->max_lanes, port_name(port)))
+ "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n",
+ intel_dig_port->max_lanes, intel_encoder->base.base.id,
+ intel_encoder->base.name))
return false;
intel_dp_set_source_rates(intel_dp);
@@ -7359,7 +7480,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
* Currently we don't support eDP on TypeC ports, although in
* theory it could work on TypeC legacy ports.
*/
- WARN_ON(intel_port_is_tc(dev_priv, port));
+ WARN_ON(intel_phy_is_tc(dev_priv, phy));
type = DRM_MODE_CONNECTOR_eDP;
} else {
type = DRM_MODE_CONNECTOR_DisplayPort;
@@ -7382,9 +7503,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
port != PORT_B && port != PORT_C))
return false;
- DRM_DEBUG_KMS("Adding %s connector on port %c\n",
- type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
- port_name(port));
+ DRM_DEBUG_KMS("Adding %s connector on [ENCODER:%d:%s]\n",
+ type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
+ intel_encoder->base.base.id, intel_encoder->base.name);
drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
@@ -7408,11 +7529,8 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_connector->get_hw_state = intel_connector_get_hw_state;
/* init MST on ports that can support it */
- if (HAS_DP_MST(dev_priv) && !intel_dp_is_edp(intel_dp) &&
- (port == PORT_B || port == PORT_C ||
- port == PORT_D || port == PORT_F))
- intel_dp_mst_encoder_init(intel_dig_port,
- intel_connector->base.base.id);
+ intel_dp_mst_encoder_init(intel_dig_port,
+ intel_connector->base.base.id);
if (!intel_edp_init_connector(intel_dp, intel_connector)) {
intel_dp_aux_fini(intel_dp);
@@ -7503,11 +7621,11 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
intel_encoder->power_domain = intel_port_to_power_domain(port);
if (IS_CHERRYVIEW(dev_priv)) {
if (port == PORT_D)
- intel_encoder->crtc_mask = 1 << 2;
+ intel_encoder->pipe_mask = BIT(PIPE_C);
else
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+ intel_encoder->pipe_mask = BIT(PIPE_A) | BIT(PIPE_B);
} else {
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ intel_encoder->pipe_mask = ~0;
}
intel_encoder->cloneable = 0;
intel_encoder->port = port;
@@ -7542,7 +7660,7 @@ void intel_dp_mst_suspend(struct drm_i915_private *dev_priv)
if (encoder->type != INTEL_OUTPUT_DDI)
continue;
- intel_dp = enc_to_intel_dp(&encoder->base);
+ intel_dp = enc_to_intel_dp(encoder);
if (!intel_dp->can_mst)
continue;
@@ -7563,12 +7681,13 @@ void intel_dp_mst_resume(struct drm_i915_private *dev_priv)
if (encoder->type != INTEL_OUTPUT_DDI)
continue;
- intel_dp = enc_to_intel_dp(&encoder->base);
+ intel_dp = enc_to_intel_dp(encoder);
if (!intel_dp->can_mst)
continue;
- ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr);
+ ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr,
+ true);
if (ret) {
intel_dp->is_mst = false;
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index da70b1a41c83..3da166054788 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -13,6 +13,7 @@
#include "i915_reg.h"
enum pipe;
+enum port;
struct drm_connector_state;
struct drm_encoder;
struct drm_i915_private;
@@ -102,22 +103,26 @@ bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);
bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp);
bool
intel_dp_get_link_status(struct intel_dp *intel_dp, u8 *link_status);
-u16 intel_dp_dsc_get_output_bpp(int link_clock, u8 lane_count,
- int mode_clock, int mode_hdisplay);
-u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, int mode_clock,
- int mode_hdisplay);
bool intel_dp_read_dpcd(struct intel_dp *intel_dp);
bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp);
int intel_dp_link_required(int pixel_clock, int bpp);
int intel_dp_max_data_rate(int max_link_clock, int max_lanes);
+bool intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+void intel_dp_vsc_enable(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+void intel_dp_hdr_metadata_enable(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
bool intel_digital_port_connected(struct intel_encoder *encoder);
-void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
- struct intel_digital_port *dig_port);
static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
{
return ~((1 << lane_count) - 1) & 0xf;
}
+u32 intel_dp_mode_to_fec_clock(u32 mode_clock);
+
#endif /* __INTEL_DP_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index 7ded95a334db..7c653f8c307f 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -22,8 +22,8 @@
*
*/
+#include "intel_display_types.h"
#include "intel_dp_aux_backlight.h"
-#include "intel_drv.h"
static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
{
@@ -57,7 +57,7 @@ static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
*/
static u32 intel_dp_aux_get_backlight(struct intel_connector *connector)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
u8 read_val[2] = { 0x0 };
u16 level = 0;
@@ -82,7 +82,7 @@ static void
intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
u8 vals[2] = { 0x0 };
vals[0] = level;
@@ -110,7 +110,7 @@ intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 lev
static bool intel_dp_aux_set_pwm_freq(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
int freq, fxp, fxp_min, fxp_max, fxp_actual, f = 1;
u8 pn, pn_min, pn_max;
@@ -178,7 +178,7 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
const struct drm_connector_state *conn_state)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
u8 dpcd_buf, new_dpcd_buf, edp_backlight_mode;
if (drm_dp_dpcd_readb(&intel_dp->aux,
@@ -222,13 +222,14 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
static void intel_dp_aux_disable_backlight(const struct drm_connector_state *old_conn_state)
{
- set_aux_backlight_enable(enc_to_intel_dp(old_conn_state->best_encoder), false);
+ set_aux_backlight_enable(enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder)),
+ false);
}
static int intel_dp_aux_setup_backlight(struct intel_connector *connector,
enum pipe pipe)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
struct intel_panel *panel = &connector->panel;
if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT)
@@ -247,7 +248,7 @@ static int intel_dp_aux_setup_backlight(struct intel_connector *connector,
static bool
intel_dp_aux_display_control_capable(struct intel_connector *connector)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
/* Check the eDP Display control capabilities registers to determine if
* the panel can support backlight control over the aux channel
@@ -264,8 +265,11 @@ intel_dp_aux_display_control_capable(struct intel_connector *connector)
int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector)
{
struct intel_panel *panel = &intel_connector->panel;
+ struct drm_i915_private *dev_priv = to_i915(intel_connector->base.dev);
- if (!i915_modparams.enable_dpcd_backlight)
+ if (i915_modparams.enable_dpcd_backlight == 0 ||
+ (i915_modparams.enable_dpcd_backlight == -1 &&
+ dev_priv->vbt.backlight.type != INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE))
return -ENODEV;
if (!intel_dp_aux_display_control_capable(intel_connector))
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 9b1fccea966b..2a1130dd1ad0 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -21,9 +21,9 @@
* IN THE SOFTWARE.
*/
+#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_link_training.h"
-#include "intel_drv.h"
static void
intel_dp_dump_link_status(const u8 link_status[DP_LINK_STATUS_SIZE])
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 60652ebbdf61..cba68c5a80fa 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -32,23 +32,23 @@
#include "intel_audio.h"
#include "intel_connector.h"
#include "intel_ddi.h"
+#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_mst.h"
#include "intel_dpio_phy.h"
-#include "intel_drv.h"
static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state,
struct link_config_limits *limits)
{
- struct drm_atomic_state *state = crtc_state->base.state;
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ struct drm_atomic_state *state = crtc_state->uapi.state;
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_dp *intel_dp = &intel_mst->primary->dp;
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
void *port = connector->port;
bool constant_n = drm_dp_has_quirk(&intel_dp->desc,
DP_DPCD_QUIRK_CONSTANT_N);
@@ -61,10 +61,11 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
crtc_state->pipe_bpp = bpp;
crtc_state->pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock,
- crtc_state->pipe_bpp);
+ crtc_state->pipe_bpp,
+ false);
slots = drm_dp_atomic_find_vcpi_slots(state, &intel_dp->mst_mgr,
- port, crtc_state->pbn);
+ port, crtc_state->pbn, 0);
if (slots == -EDEADLK)
return slots;
if (slots >= 0)
@@ -81,25 +82,71 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
adjusted_mode->crtc_clock,
crtc_state->port_clock,
&crtc_state->dp_m_n,
- constant_n);
+ constant_n, crtc_state->fec_enable);
crtc_state->dp_m_n.tu = slots;
return 0;
}
+/*
+ * Iterate over all connectors and return the smallest transcoder in the MST
+ * stream
+ */
+static enum transcoder
+intel_dp_mst_master_trans_compute(struct intel_atomic_state *state,
+ struct intel_dp *mst_port)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_digital_connector_state *conn_state;
+ struct intel_connector *connector;
+ enum pipe ret = I915_MAX_PIPES;
+ int i;
+
+ if (INTEL_GEN(dev_priv) < 12)
+ return INVALID_TRANSCODER;
+
+ for_each_new_intel_connector_in_state(state, connector, conn_state, i) {
+ struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+
+ if (connector->mst_port != mst_port || !conn_state->base.crtc)
+ continue;
+
+ crtc = to_intel_crtc(conn_state->base.crtc);
+ crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
+ if (!crtc_state->uapi.active)
+ continue;
+
+ /*
+ * Using crtc->pipe because crtc_state->cpu_transcoder is
+ * computed, so others CRTCs could have non-computed
+ * cpu_transcoder
+ */
+ if (crtc->pipe < ret)
+ ret = crtc->pipe;
+ }
+
+ if (ret == I915_MAX_PIPES)
+ return INVALID_TRANSCODER;
+
+ /* Simple cast works because TGL don't have a eDP transcoder */
+ return (enum transcoder)ret;
+}
+
static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
+ struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_dp *intel_dp = &intel_mst->primary->dp;
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(conn_state);
const struct drm_display_mode *adjusted_mode =
- &pipe_config->base.adjusted_mode;
+ &pipe_config->hw.adjusted_mode;
void *port = connector->port;
struct link_config_limits limits;
int ret;
@@ -128,7 +175,15 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
limits.max_lane_count = intel_dp_max_lane_count(intel_dp);
limits.min_bpp = intel_dp_min_bpp(pipe_config);
- limits.max_bpp = pipe_config->pipe_bpp;
+ /*
+ * FIXME: If all the streams can't fit into the link with
+ * their current pipe_bpp we should reduce pipe_bpp across
+ * the board until things start to fit. Until then we
+ * limit to <= 8bpc since that's what was hardcoded for all
+ * MST streams previously. This hack should be removed once
+ * we have the proper retry logic in place.
+ */
+ limits.max_bpp = min(pipe_config->pipe_bpp, 24);
intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits);
@@ -146,25 +201,91 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
intel_ddi_compute_min_voltage_level(dev_priv, pipe_config);
+ pipe_config->mst_master_transcoder = intel_dp_mst_master_trans_compute(state, intel_dp);
+
+ return 0;
+}
+
+/*
+ * If one of the connectors in a MST stream needs a modeset, mark all CRTCs
+ * that shares the same MST stream as mode changed,
+ * intel_modeset_pipe_config()+intel_crtc_check_fastset() will take care to do
+ * a fastset when possible.
+ */
+static int
+intel_dp_mst_atomic_master_trans_check(struct intel_connector *connector,
+ struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct drm_connector_list_iter connector_list_iter;
+ struct intel_connector *connector_iter;
+
+ if (INTEL_GEN(dev_priv) < 12)
+ return 0;
+
+ if (!intel_connector_needs_modeset(state, &connector->base))
+ return 0;
+
+ drm_connector_list_iter_begin(&dev_priv->drm, &connector_list_iter);
+ for_each_intel_connector_iter(connector_iter, &connector_list_iter) {
+ struct intel_digital_connector_state *conn_iter_state;
+ struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+ int ret;
+
+ if (connector_iter->mst_port != connector->mst_port ||
+ connector_iter == connector)
+ continue;
+
+ conn_iter_state = intel_atomic_get_digital_connector_state(state,
+ connector_iter);
+ if (IS_ERR(conn_iter_state)) {
+ drm_connector_list_iter_end(&connector_list_iter);
+ return PTR_ERR(conn_iter_state);
+ }
+
+ if (!conn_iter_state->base.crtc)
+ continue;
+
+ crtc = to_intel_crtc(conn_iter_state->base.crtc);
+ crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
+ if (IS_ERR(crtc_state)) {
+ drm_connector_list_iter_end(&connector_list_iter);
+ return PTR_ERR(crtc_state);
+ }
+
+ ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
+ if (ret) {
+ drm_connector_list_iter_end(&connector_list_iter);
+ return ret;
+ }
+ crtc_state->uapi.mode_changed = true;
+ }
+ drm_connector_list_iter_end(&connector_list_iter);
+
return 0;
}
static int
intel_dp_mst_atomic_check(struct drm_connector *connector,
- struct drm_atomic_state *state)
+ struct drm_atomic_state *_state)
{
+ struct intel_atomic_state *state = to_intel_atomic_state(_state);
struct drm_connector_state *new_conn_state =
- drm_atomic_get_new_connector_state(state, connector);
+ drm_atomic_get_new_connector_state(&state->base, connector);
struct drm_connector_state *old_conn_state =
- drm_atomic_get_old_connector_state(state, connector);
+ drm_atomic_get_old_connector_state(&state->base, connector);
struct intel_connector *intel_connector =
to_intel_connector(connector);
struct drm_crtc *new_crtc = new_conn_state->crtc;
- struct drm_crtc_state *crtc_state;
struct drm_dp_mst_topology_mgr *mgr;
int ret;
- ret = intel_digital_connector_atomic_check(connector, state);
+ ret = intel_digital_connector_atomic_check(connector, &state->base);
+ if (ret)
+ return ret;
+
+ ret = intel_dp_mst_atomic_master_trans_check(intel_connector, state);
if (ret)
return ret;
@@ -175,16 +296,18 @@ intel_dp_mst_atomic_check(struct drm_connector *connector,
* connector
*/
if (new_crtc) {
- crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(new_crtc);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, intel_crtc);
if (!crtc_state ||
- !drm_atomic_crtc_needs_modeset(crtc_state) ||
- crtc_state->enable)
+ !drm_atomic_crtc_needs_modeset(&crtc_state->uapi) ||
+ crtc_state->uapi.enable)
return 0;
}
- mgr = &enc_to_mst(old_conn_state->best_encoder)->primary->dp.mst_mgr;
- ret = drm_dp_atomic_release_vcpi_slots(state, mgr,
+ mgr = &enc_to_mst(to_intel_encoder(old_conn_state->best_encoder))->primary->dp.mst_mgr;
+ ret = drm_dp_atomic_release_vcpi_slots(&state->base, mgr,
intel_connector->port);
return ret;
@@ -194,7 +317,7 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct intel_connector *connector =
@@ -207,7 +330,7 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder,
ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
if (ret) {
- DRM_ERROR("failed to update payload %d\n", ret);
+ DRM_DEBUG_KMS("failed to update payload %d\n", ret);
}
if (old_crtc_state->has_audio)
intel_audio_codec_disable(encoder,
@@ -218,36 +341,65 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct intel_connector *connector =
to_intel_connector(old_conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ bool last_mst_stream;
+ u32 val;
- intel_ddi_disable_pipe_clock(old_crtc_state);
+ intel_dp->active_mst_links--;
+ last_mst_stream = intel_dp->active_mst_links == 0;
+ WARN_ON(INTEL_GEN(dev_priv) >= 12 && last_mst_stream &&
+ !intel_dp_mst_is_master_trans(old_crtc_state));
+
+ intel_crtc_vblank_off(old_crtc_state);
+
+ intel_disable_pipe(old_crtc_state);
- /* this can fail */
- drm_dp_check_act_status(&intel_dp->mst_mgr);
- /* and this can also fail */
drm_dp_update_payload_part2(&intel_dp->mst_mgr);
+ val = I915_READ(TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder));
+ val &= ~TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
+ I915_WRITE(TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder), val);
+
+ if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
+ DP_TP_STATUS_ACT_SENT, 1))
+ DRM_ERROR("Timed out waiting for ACT sent when disabling\n");
+ drm_dp_check_act_status(&intel_dp->mst_mgr);
+
drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, connector->port);
+ intel_ddi_disable_transcoder_func(old_crtc_state);
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ skl_scaler_disable(old_crtc_state);
+ else
+ ilk_pfit_disable(old_crtc_state);
+
/*
* Power down mst path before disabling the port, otherwise we end
* up getting interrupts from the sink upon detecting link loss.
*/
drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port,
false);
+ /*
+ * From TGL spec: "If multi-stream slave transcoder: Configure
+ * Transcoder Clock Select to direct no clock to the transcoder"
+ *
+ * From older GENs spec: "Configure Transcoder Clock Select to direct
+ * no clock to the transcoder"
+ */
+ if (INTEL_GEN(dev_priv) < 12 || !last_mst_stream)
+ intel_ddi_disable_pipe_clock(old_crtc_state);
- intel_dp->active_mst_links--;
intel_mst->connector = NULL;
- if (intel_dp->active_mst_links == 0) {
- intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
+ if (last_mst_stream)
intel_dig_port->base.post_disable(&intel_dig_port->base,
old_crtc_state, NULL);
- }
DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
}
@@ -256,7 +408,7 @@ static void intel_mst_pre_pll_enable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
@@ -265,48 +417,37 @@ static void intel_mst_pre_pll_enable_dp(struct intel_encoder *encoder,
pipe_config, NULL);
}
-static void intel_mst_post_pll_disable_dp(struct intel_encoder *encoder,
- const struct intel_crtc_state *old_crtc_state,
- const struct drm_connector_state *old_conn_state)
-{
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
- struct intel_digital_port *intel_dig_port = intel_mst->primary;
- struct intel_dp *intel_dp = &intel_dig_port->dp;
-
- if (intel_dp->active_mst_links == 0)
- intel_dig_port->base.post_pll_disable(&intel_dig_port->base,
- old_crtc_state,
- old_conn_state);
-}
-
static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = intel_dig_port->base.port;
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
int ret;
u32 temp;
+ bool first_mst_stream;
/* MST encoders are bound to a crtc, not to a connector,
* force the mapping here for get_hw_state.
*/
connector->encoder = encoder;
intel_mst->connector = connector;
+ first_mst_stream = intel_dp->active_mst_links == 0;
+ WARN_ON(INTEL_GEN(dev_priv) >= 12 && first_mst_stream &&
+ !intel_dp_mst_is_master_trans(pipe_config));
DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
- if (intel_dp->active_mst_links == 0)
+ if (first_mst_stream)
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port, true);
- if (intel_dp->active_mst_links == 0)
+ if (first_mst_stream)
intel_dig_port->base.pre_enable(&intel_dig_port->base,
pipe_config, NULL);
@@ -318,31 +459,37 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
DRM_ERROR("failed to allocate vcpi\n");
intel_dp->active_mst_links++;
- temp = I915_READ(DP_TP_STATUS(port));
- I915_WRITE(DP_TP_STATUS(port), temp);
+ temp = I915_READ(intel_dp->regs.dp_tp_status);
+ I915_WRITE(intel_dp->regs.dp_tp_status, temp);
ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
- intel_ddi_enable_pipe_clock(pipe_config);
+ /*
+ * Before Gen 12 this is not done as part of
+ * intel_dig_port->base.pre_enable() and should be done here. For
+ * Gen 12+ the step in which this should be done is different for the
+ * first MST stream, so it's done on the DDI for the first stream and
+ * here for the following ones.
+ */
+ if (INTEL_GEN(dev_priv) < 12 || !first_mst_stream)
+ intel_ddi_enable_pipe_clock(pipe_config);
+
+ intel_ddi_set_dp_msa(pipe_config, conn_state);
}
static void intel_mst_enable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = intel_dig_port->base.port;
DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
- if (intel_wait_for_register(&dev_priv->uncore,
- DP_TP_STATUS(port),
- DP_TP_STATUS_ACT_SENT,
- DP_TP_STATUS_ACT_SENT,
- 1))
+ if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
+ DP_TP_STATUS_ACT_SENT, 1))
DRM_ERROR("Timed out waiting for ACT sent\n");
drm_dp_check_act_status(&intel_dp->mst_mgr);
@@ -355,7 +502,7 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder,
static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
*pipe = intel_mst->pipe;
if (intel_mst->connector)
return true;
@@ -365,7 +512,7 @@ static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder,
static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
intel_ddi_get_config(&intel_dig_port->base, pipe_config);
@@ -388,20 +535,7 @@ static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
return ret;
}
-static enum drm_connector_status
-intel_dp_mst_detect(struct drm_connector *connector, bool force)
-{
- struct intel_connector *intel_connector = to_intel_connector(connector);
- struct intel_dp *intel_dp = intel_connector->mst_port;
-
- if (drm_connector_is_unregistered(connector))
- return connector_status_disconnected;
- return drm_dp_mst_detect_port(connector, &intel_dp->mst_mgr,
- intel_connector->port);
-}
-
static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
- .detect = intel_dp_mst_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.atomic_get_property = intel_digital_connector_atomic_get_property,
.atomic_set_property = intel_digital_connector_atomic_set_property,
@@ -421,6 +555,7 @@ static enum drm_mode_status
intel_dp_mst_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_dp *intel_dp = intel_connector->mst_port;
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
@@ -448,7 +583,7 @@ intel_dp_mst_mode_valid(struct drm_connector *connector,
if (mode_rate > max_rate || mode->clock > max_dotclk)
return MODE_CLOCK_HIGH;
- return MODE_OK;
+ return intel_mode_valid_max_plane_size(dev_priv, mode);
}
static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *connector,
@@ -461,16 +596,31 @@ static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *c
return &intel_dp->mst_encoders[crtc->pipe]->base.base;
}
+static int
+intel_dp_mst_detect(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx, bool force)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_dp *intel_dp = intel_connector->mst_port;
+
+ if (drm_connector_is_unregistered(connector))
+ return connector_status_disconnected;
+
+ return drm_dp_mst_detect_port(connector, ctx, &intel_dp->mst_mgr,
+ intel_connector->port);
+}
+
static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_funcs = {
.get_modes = intel_dp_mst_get_modes,
.mode_valid = intel_dp_mst_mode_valid,
.atomic_best_encoder = intel_mst_atomic_best_encoder,
.atomic_check = intel_dp_mst_atomic_check,
+ .detect_ctx = intel_dp_mst_detect,
};
static void intel_dp_mst_encoder_destroy(struct drm_encoder *encoder)
{
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(to_intel_encoder(encoder));
drm_encoder_cleanup(encoder);
kfree(intel_mst);
@@ -539,7 +689,15 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
- drm_connector_attach_max_bpc_property(connector, 6, 12);
+
+ /*
+ * Reuse the prop from the SST connector because we're
+ * not allowed to create new props after device registration.
+ */
+ connector->max_bpc_property =
+ intel_dp->attached_connector->base.max_bpc_property;
+ if (connector->max_bpc_property)
+ drm_connector_attach_max_bpc_property(connector, 6, 12);
return connector;
@@ -602,14 +760,21 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
intel_encoder->type = INTEL_OUTPUT_DP_MST;
intel_encoder->power_domain = intel_dig_port->base.power_domain;
intel_encoder->port = intel_dig_port->base.port;
- intel_encoder->crtc_mask = 0x7;
intel_encoder->cloneable = 0;
+ /*
+ * This is wrong, but broken userspace uses the intersection
+ * of possible_crtcs of all the encoders of a given connector
+ * to figure out which crtcs can drive said connector. What
+ * should be used instead is the union of possible_crtcs.
+ * To keep such userspace functioning we must misconfigure
+ * this to make sure the intersection is not empty :(
+ */
+ intel_encoder->pipe_mask = ~0;
intel_encoder->compute_config = intel_dp_mst_compute_config;
intel_encoder->disable = intel_mst_disable_dp;
intel_encoder->post_disable = intel_mst_post_disable_dp;
intel_encoder->pre_pll_enable = intel_mst_pre_pll_enable_dp;
- intel_encoder->post_pll_disable = intel_mst_post_pll_disable_dp;
intel_encoder->pre_enable = intel_mst_pre_enable_dp;
intel_encoder->enable = intel_mst_enable_dp;
intel_encoder->get_hw_state = intel_dp_mst_enc_get_hw_state;
@@ -632,23 +797,39 @@ intel_dp_create_fake_mst_encoders(struct intel_digital_port *intel_dig_port)
}
int
+intel_dp_mst_encoder_active_links(struct intel_digital_port *intel_dig_port)
+{
+ return intel_dig_port->dp.active_mst_links;
+}
+
+int
intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_base_id)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
struct intel_dp *intel_dp = &intel_dig_port->dp;
- struct drm_device *dev = intel_dig_port->base.base.dev;
+ enum port port = intel_dig_port->base.port;
int ret;
- intel_dp->can_mst = true;
+ if (!HAS_DP_MST(i915) || intel_dp_is_edp(intel_dp))
+ return 0;
+
+ if (INTEL_GEN(i915) < 12 && port == PORT_A)
+ return 0;
+
+ if (INTEL_GEN(i915) < 11 && port == PORT_E)
+ return 0;
+
intel_dp->mst_mgr.cbs = &mst_cbs;
/* create encoders */
intel_dp_create_fake_mst_encoders(intel_dig_port);
- ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, dev,
+ ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, &i915->drm,
&intel_dp->aux, 16, 3, conn_base_id);
- if (ret) {
- intel_dp->can_mst = false;
+ if (ret)
return ret;
- }
+
+ intel_dp->can_mst = true;
+
return 0;
}
@@ -663,3 +844,14 @@ intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port)
drm_dp_mst_topology_mgr_destroy(&intel_dp->mst_mgr);
/* encoders will get killed by normal cleanup */
}
+
+bool intel_dp_mst_is_master_trans(const struct intel_crtc_state *crtc_state)
+{
+ return crtc_state->mst_master_transcoder == crtc_state->cpu_transcoder;
+}
+
+bool intel_dp_mst_is_slave_trans(const struct intel_crtc_state *crtc_state)
+{
+ return crtc_state->mst_master_transcoder != INVALID_TRANSCODER &&
+ crtc_state->mst_master_transcoder != crtc_state->cpu_transcoder;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.h b/drivers/gpu/drm/i915/display/intel_dp_mst.h
index 1470c6e0514b..854724f68f09 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.h
@@ -6,9 +6,15 @@
#ifndef __INTEL_DP_MST_H__
#define __INTEL_DP_MST_H__
+#include <linux/types.h>
+
struct intel_digital_port;
+struct intel_crtc_state;
int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
+int intel_dp_mst_encoder_active_links(struct intel_digital_port *intel_dig_port);
+bool intel_dp_mst_is_master_trans(const struct intel_crtc_state *crtc_state);
+bool intel_dp_mst_is_slave_trans(const struct intel_crtc_state *crtc_state);
#endif /* __INTEL_DP_MST_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
index 7ccf7f3974db..6fb1f7a7364e 100644
--- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
@@ -23,8 +23,8 @@
#include "display/intel_dp.h"
+#include "intel_display_types.h"
#include "intel_dpio_phy.h"
-#include "intel_drv.h"
#include "intel_sideband.h"
/**
@@ -345,10 +345,8 @@ static u32 bxt_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy)
static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv,
enum dpio_phy phy)
{
- if (intel_wait_for_register(&dev_priv->uncore,
- BXT_PORT_REF_DW3(phy),
- GRC_DONE, GRC_DONE,
- 10))
+ if (intel_de_wait_for_set(dev_priv, BXT_PORT_REF_DW3(phy),
+ GRC_DONE, 10))
DRM_ERROR("timeout waiting for PHY%d GRC\n", phy);
}
@@ -644,7 +642,7 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
bool uniq_trans_scale)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *dport = enc_to_dig_port(encoder);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
enum dpio_channel ch = vlv_dport_to_channel(dport);
enum pipe pipe = intel_crtc->pipe;
@@ -740,8 +738,8 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder,
bool reset)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(encoder));
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum pipe pipe = crtc->pipe;
u32 val;
@@ -783,9 +781,9 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder,
void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *dport = enc_to_dig_port(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum dpio_channel ch = vlv_dport_to_channel(dport);
enum pipe pipe = crtc->pipe;
unsigned int lane_mask =
@@ -863,10 +861,10 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum dpio_channel ch = vlv_dport_to_channel(dport);
enum pipe pipe = crtc->pipe;
int data, i, stagger;
@@ -942,7 +940,7 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
void chv_phy_release_cl2_override(struct intel_encoder *encoder)
{
- struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *dport = enc_to_dig_port(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
if (dport->release_cl2_override) {
@@ -955,7 +953,7 @@ void chv_phy_post_pll_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum pipe pipe = to_intel_crtc(old_crtc_state->base.crtc)->pipe;
+ enum pipe pipe = to_intel_crtc(old_crtc_state->uapi.crtc)->pipe;
u32 val;
vlv_dpio_get(dev_priv);
@@ -991,7 +989,7 @@ void vlv_set_phy_signal_level(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
- struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *dport = enc_to_dig_port(encoder);
enum dpio_channel port = vlv_dport_to_channel(dport);
enum pipe pipe = intel_crtc->pipe;
@@ -1016,9 +1014,9 @@ void vlv_set_phy_signal_level(struct intel_encoder *encoder,
void vlv_phy_pre_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *dport = enc_to_dig_port(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum dpio_channel port = vlv_dport_to_channel(dport);
enum pipe pipe = crtc->pipe;
@@ -1045,10 +1043,10 @@ void vlv_phy_pre_pll_enable(struct intel_encoder *encoder,
void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum dpio_channel port = vlv_dport_to_channel(dport);
enum pipe pipe = crtc->pipe;
u32 val;
@@ -1075,9 +1073,9 @@ void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
void vlv_phy_reset_lanes(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state)
{
- struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *dport = enc_to_dig_port(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
enum dpio_channel port = vlv_dport_to_channel(dport);
enum pipe pipe = crtc->pipe;
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index 2d4e7b9a7b9d..c75e34d87111 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -21,9 +21,9 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include "intel_display_types.h"
#include "intel_dpio_phy.h"
#include "intel_dpll_mgr.h"
-#include "intel_drv.h"
/**
* DOC: Display PLLs
@@ -36,9 +36,10 @@
* This file provides an abstraction over display PLLs. The function
* intel_shared_dpll_init() initializes the PLLs for the given platform. The
* users of a PLL are tracked and that tracking is integrated with the atomic
- * modest interface. During an atomic operation, a PLL can be requested for a
- * given CRTC and encoder configuration by calling intel_get_shared_dpll() and
- * a previously used PLL can be released with intel_release_shared_dpll().
+ * modset interface. During an atomic operation, required PLLs can be reserved
+ * for a given CRTC and encoder configuration by calling
+ * intel_reserve_shared_dplls() and previously reserved PLLs can be released
+ * with intel_release_shared_dplls().
* Changes to the users are first staged in the atomic state, and then made
* effective by calling intel_shared_dpll_swap_state() during the atomic
* commit phase.
@@ -135,7 +136,7 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
*/
void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll = crtc_state->shared_dpll;
@@ -162,7 +163,7 @@ void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state)
*/
void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll = crtc_state->shared_dpll;
unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
@@ -207,7 +208,7 @@ out:
*/
void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll = crtc_state->shared_dpll;
unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
@@ -243,19 +244,21 @@ out:
}
static struct intel_shared_dpll *
-intel_find_shared_dpll(struct intel_crtc_state *crtc_state,
- enum intel_dpll_id range_min,
- enum intel_dpll_id range_max)
+intel_find_shared_dpll(struct intel_atomic_state *state,
+ const struct intel_crtc *crtc,
+ const struct intel_dpll_hw_state *pll_state,
+ unsigned long dpll_mask)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll, *unused_pll = NULL;
struct intel_shared_dpll_state *shared_dpll;
enum intel_dpll_id i;
- shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
+ shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
- for (i = range_min; i <= range_max; i++) {
+ WARN_ON(dpll_mask & ~(BIT(I915_NUM_PLLS) - 1));
+
+ for_each_set_bit(i, &dpll_mask, I915_NUM_PLLS) {
pll = &dev_priv->shared_dplls[i];
/* Only want to check enabled timings first */
@@ -265,9 +268,9 @@ intel_find_shared_dpll(struct intel_crtc_state *crtc_state,
continue;
}
- if (memcmp(&crtc_state->dpll_hw_state,
+ if (memcmp(pll_state,
&shared_dpll[i].hw_state,
- sizeof(crtc_state->dpll_hw_state)) == 0) {
+ sizeof(*pll_state)) == 0) {
DRM_DEBUG_KMS("[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n",
crtc->base.base.id, crtc->base.name,
pll->info->name,
@@ -289,26 +292,51 @@ intel_find_shared_dpll(struct intel_crtc_state *crtc_state,
}
static void
-intel_reference_shared_dpll(struct intel_shared_dpll *pll,
- struct intel_crtc_state *crtc_state)
+intel_reference_shared_dpll(struct intel_atomic_state *state,
+ const struct intel_crtc *crtc,
+ const struct intel_shared_dpll *pll,
+ const struct intel_dpll_hw_state *pll_state)
{
struct intel_shared_dpll_state *shared_dpll;
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
const enum intel_dpll_id id = pll->info->id;
- shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
+ shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
if (shared_dpll[id].crtc_mask == 0)
- shared_dpll[id].hw_state =
- crtc_state->dpll_hw_state;
+ shared_dpll[id].hw_state = *pll_state;
- crtc_state->shared_dpll = pll;
DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->info->name,
pipe_name(crtc->pipe));
shared_dpll[id].crtc_mask |= 1 << crtc->pipe;
}
+static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
+ const struct intel_crtc *crtc,
+ const struct intel_shared_dpll *pll)
+{
+ struct intel_shared_dpll_state *shared_dpll;
+
+ shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
+ shared_dpll[pll->info->id].crtc_mask &= ~(1 << crtc->pipe);
+}
+
+static void intel_put_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ new_crtc_state->shared_dpll = NULL;
+
+ if (!old_crtc_state->shared_dpll)
+ return;
+
+ intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
+}
+
/**
* intel_shared_dpll_swap_state - make atomic DPLL configuration effective
* @state: atomic state
@@ -320,25 +348,20 @@ intel_reference_shared_dpll(struct intel_shared_dpll *pll,
* i.e. it also puts the current state into @state, even though there is no
* need for that at this moment.
*/
-void intel_shared_dpll_swap_state(struct drm_atomic_state *state)
+void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->dev);
- struct intel_shared_dpll_state *shared_dpll;
- struct intel_shared_dpll *pll;
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
enum intel_dpll_id i;
- if (!to_intel_atomic_state(state)->dpll_set)
+ if (!state->dpll_set)
return;
- shared_dpll = to_intel_atomic_state(state)->shared_dpll;
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
- struct intel_shared_dpll_state tmp;
-
- pll = &dev_priv->shared_dplls[i];
+ struct intel_shared_dpll *pll =
+ &dev_priv->shared_dplls[i];
- tmp = pll->state;
- pll->state = shared_dpll[i];
- shared_dpll[i] = tmp;
+ swap(pll->state, shared_dpll[i]);
}
}
@@ -421,11 +444,12 @@ static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
udelay(200);
}
-static struct intel_shared_dpll *
-ibx_get_dpll(struct intel_crtc_state *crtc_state,
- struct intel_encoder *encoder)
+static bool ibx_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll;
enum intel_dpll_id i;
@@ -439,18 +463,22 @@ ibx_get_dpll(struct intel_crtc_state *crtc_state,
crtc->base.base.id, crtc->base.name,
pll->info->name);
} else {
- pll = intel_find_shared_dpll(crtc_state,
- DPLL_ID_PCH_PLL_A,
- DPLL_ID_PCH_PLL_B);
+ pll = intel_find_shared_dpll(state, crtc,
+ &crtc_state->dpll_hw_state,
+ BIT(DPLL_ID_PCH_PLL_B) |
+ BIT(DPLL_ID_PCH_PLL_A));
}
if (!pll)
- return NULL;
+ return false;
/* reference the pll */
- intel_reference_shared_dpll(pll, crtc_state);
+ intel_reference_shared_dpll(state, crtc,
+ pll, &crtc_state->dpll_hw_state);
- return pll;
+ crtc_state->shared_dpll = pll;
+
+ return true;
}
static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
@@ -498,16 +526,31 @@ static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
val = I915_READ(WRPLL_CTL(id));
I915_WRITE(WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
POSTING_READ(WRPLL_CTL(id));
+
+ /*
+ * Try to set up the PCH reference clock once all DPLLs
+ * that depend on it have been shut down.
+ */
+ if (dev_priv->pch_ssc_use & BIT(id))
+ intel_init_pch_refclk(dev_priv);
}
static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
+ enum intel_dpll_id id = pll->info->id;
u32 val;
val = I915_READ(SPLL_CTL);
I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
POSTING_READ(SPLL_CTL);
+
+ /*
+ * Try to set up the PCH reference clock once all DPLLs
+ * that depend on it have been shut down.
+ */
+ if (dev_priv->pch_ssc_use & BIT(id))
+ intel_init_pch_refclk(dev_priv);
}
static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
@@ -767,8 +810,12 @@ hsw_ddi_calculate_wrpll(int clock /* in Hz */,
*r2_out = best.r2;
}
-static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(struct intel_crtc_state *crtc_state)
+static struct intel_shared_dpll *
+hsw_ddi_hdmi_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
struct intel_shared_dpll *pll;
u32 val;
unsigned int p, n2, r2;
@@ -781,8 +828,10 @@ static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(struct intel_crtc_state *
crtc_state->dpll_hw_state.wrpll = val;
- pll = intel_find_shared_dpll(crtc_state,
- DPLL_ID_WRPLL1, DPLL_ID_WRPLL2);
+ pll = intel_find_shared_dpll(state, crtc,
+ &crtc_state->dpll_hw_state,
+ BIT(DPLL_ID_WRPLL2) |
+ BIT(DPLL_ID_WRPLL1));
if (!pll)
return NULL;
@@ -793,7 +842,7 @@ static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(struct intel_crtc_state *
static struct intel_shared_dpll *
hsw_ddi_dp_get_dpll(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
struct intel_shared_dpll *pll;
enum intel_dpll_id pll_id;
int clock = crtc_state->port_clock;
@@ -821,38 +870,44 @@ hsw_ddi_dp_get_dpll(struct intel_crtc_state *crtc_state)
return pll;
}
-static struct intel_shared_dpll *
-hsw_get_dpll(struct intel_crtc_state *crtc_state,
- struct intel_encoder *encoder)
+static bool hsw_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
struct intel_shared_dpll *pll;
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
- pll = hsw_ddi_hdmi_get_dpll(crtc_state);
+ pll = hsw_ddi_hdmi_get_dpll(state, crtc);
} else if (intel_crtc_has_dp_encoder(crtc_state)) {
pll = hsw_ddi_dp_get_dpll(crtc_state);
} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
if (WARN_ON(crtc_state->port_clock / 2 != 135000))
- return NULL;
+ return false;
crtc_state->dpll_hw_state.spll =
SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
- pll = intel_find_shared_dpll(crtc_state,
- DPLL_ID_SPLL, DPLL_ID_SPLL);
+ pll = intel_find_shared_dpll(state, crtc,
+ &crtc_state->dpll_hw_state,
+ BIT(DPLL_ID_SPLL));
} else {
- return NULL;
+ return false;
}
if (!pll)
- return NULL;
+ return false;
- intel_reference_shared_dpll(pll, crtc_state);
+ intel_reference_shared_dpll(state, crtc,
+ pll, &crtc_state->dpll_hw_state);
- return pll;
+ crtc_state->shared_dpll = pll;
+
+ return true;
}
static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
@@ -962,11 +1017,7 @@ static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
I915_WRITE(regs[id].ctl,
I915_READ(regs[id].ctl) | LCPLL_PLL_ENABLE);
- if (intel_wait_for_register(&dev_priv->uncore,
- DPLL_STATUS,
- DPLL_LOCK(id),
- DPLL_LOCK(id),
- 5))
+ if (intel_de_wait_for_set(dev_priv, DPLL_STATUS, DPLL_LOCK(id), 5))
DRM_ERROR("DPLL %d not locked\n", id);
}
@@ -1385,10 +1436,12 @@ skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
return true;
}
-static struct intel_shared_dpll *
-skl_get_dpll(struct intel_crtc_state *crtc_state,
- struct intel_encoder *encoder)
+static bool skl_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
struct intel_shared_dpll *pll;
bool bret;
@@ -1396,32 +1449,37 @@ skl_get_dpll(struct intel_crtc_state *crtc_state,
bret = skl_ddi_hdmi_pll_dividers(crtc_state);
if (!bret) {
DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
- return NULL;
+ return false;
}
} else if (intel_crtc_has_dp_encoder(crtc_state)) {
bret = skl_ddi_dp_set_dpll_hw_state(crtc_state);
if (!bret) {
DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
- return NULL;
+ return false;
}
} else {
- return NULL;
+ return false;
}
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
- pll = intel_find_shared_dpll(crtc_state,
- DPLL_ID_SKL_DPLL0,
- DPLL_ID_SKL_DPLL0);
+ pll = intel_find_shared_dpll(state, crtc,
+ &crtc_state->dpll_hw_state,
+ BIT(DPLL_ID_SKL_DPLL0));
else
- pll = intel_find_shared_dpll(crtc_state,
- DPLL_ID_SKL_DPLL1,
- DPLL_ID_SKL_DPLL3);
+ pll = intel_find_shared_dpll(state, crtc,
+ &crtc_state->dpll_hw_state,
+ BIT(DPLL_ID_SKL_DPLL3) |
+ BIT(DPLL_ID_SKL_DPLL2) |
+ BIT(DPLL_ID_SKL_DPLL1));
if (!pll)
- return NULL;
+ return false;
- intel_reference_shared_dpll(pll, crtc_state);
+ intel_reference_shared_dpll(state, crtc,
+ pll, &crtc_state->dpll_hw_state);
- return pll;
+ crtc_state->shared_dpll = pll;
+
+ return true;
}
static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
@@ -1693,7 +1751,7 @@ static bool
bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
struct bxt_clk_div *clk_div)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct dpll best_clock;
/* Calculate HDMI div */
@@ -1827,22 +1885,23 @@ bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
}
-static struct intel_shared_dpll *
-bxt_get_dpll(struct intel_crtc_state *crtc_state,
- struct intel_encoder *encoder)
+static bool bxt_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll;
enum intel_dpll_id id;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
!bxt_ddi_hdmi_set_dpll_hw_state(crtc_state))
- return NULL;
+ return false;
if (intel_crtc_has_dp_encoder(crtc_state) &&
!bxt_ddi_dp_set_dpll_hw_state(crtc_state))
- return NULL;
+ return false;
/* 1:1 mapping between ports and PLLs */
id = (enum intel_dpll_id) encoder->port;
@@ -1851,9 +1910,12 @@ bxt_get_dpll(struct intel_crtc_state *crtc_state,
DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
crtc->base.base.id, crtc->base.name, pll->info->name);
- intel_reference_shared_dpll(pll, crtc_state);
+ intel_reference_shared_dpll(state, crtc,
+ pll, &crtc_state->dpll_hw_state);
- return pll;
+ crtc_state->shared_dpll = pll;
+
+ return true;
}
static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
@@ -1884,9 +1946,14 @@ static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
struct intel_dpll_mgr {
const struct dpll_info *dpll_info;
- struct intel_shared_dpll *(*get_dpll)(struct intel_crtc_state *crtc_state,
- struct intel_encoder *encoder);
-
+ bool (*get_dplls)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder);
+ void (*put_dplls)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+ void (*update_active_dpll)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder);
void (*dump_hw_state)(struct drm_i915_private *dev_priv,
const struct intel_dpll_hw_state *hw_state);
};
@@ -1899,7 +1966,8 @@ static const struct dpll_info pch_plls[] = {
static const struct intel_dpll_mgr pch_pll_mgr = {
.dpll_info = pch_plls,
- .get_dpll = ibx_get_dpll,
+ .get_dplls = ibx_get_dpll,
+ .put_dplls = intel_put_dpll,
.dump_hw_state = ibx_dump_hw_state,
};
@@ -1915,7 +1983,8 @@ static const struct dpll_info hsw_plls[] = {
static const struct intel_dpll_mgr hsw_pll_mgr = {
.dpll_info = hsw_plls,
- .get_dpll = hsw_get_dpll,
+ .get_dplls = hsw_get_dpll,
+ .put_dplls = intel_put_dpll,
.dump_hw_state = hsw_dump_hw_state,
};
@@ -1929,7 +1998,8 @@ static const struct dpll_info skl_plls[] = {
static const struct intel_dpll_mgr skl_pll_mgr = {
.dpll_info = skl_plls,
- .get_dpll = skl_get_dpll,
+ .get_dplls = skl_get_dpll,
+ .put_dplls = intel_put_dpll,
.dump_hw_state = skl_dump_hw_state,
};
@@ -1942,7 +2012,8 @@ static const struct dpll_info bxt_plls[] = {
static const struct intel_dpll_mgr bxt_pll_mgr = {
.dpll_info = bxt_plls,
- .get_dpll = bxt_get_dpll,
+ .get_dplls = bxt_get_dpll,
+ .put_dplls = intel_put_dpll,
.dump_hw_state = bxt_dump_hw_state,
};
@@ -1958,11 +2029,8 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
I915_WRITE(CNL_DPLL_ENABLE(id), val);
/* 2. Wait for DPLL power state enabled in DPLL_ENABLE. */
- if (intel_wait_for_register(&dev_priv->uncore,
- CNL_DPLL_ENABLE(id),
- PLL_POWER_STATE,
- PLL_POWER_STATE,
- 5))
+ if (intel_de_wait_for_set(dev_priv, CNL_DPLL_ENABLE(id),
+ PLL_POWER_STATE, 5))
DRM_ERROR("PLL %d Power not enabled\n", id);
/*
@@ -1999,11 +2067,7 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
I915_WRITE(CNL_DPLL_ENABLE(id), val);
/* 7. Wait for PLL lock status in DPLL_ENABLE. */
- if (intel_wait_for_register(&dev_priv->uncore,
- CNL_DPLL_ENABLE(id),
- PLL_LOCK,
- PLL_LOCK,
- 5))
+ if (intel_de_wait_for_set(dev_priv, CNL_DPLL_ENABLE(id), PLL_LOCK, 5))
DRM_ERROR("PLL %d not locked\n", id);
/*
@@ -2047,11 +2111,7 @@ static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
I915_WRITE(CNL_DPLL_ENABLE(id), val);
/* 4. Wait for PLL not locked status in DPLL_ENABLE. */
- if (intel_wait_for_register(&dev_priv->uncore,
- CNL_DPLL_ENABLE(id),
- PLL_LOCK,
- 0,
- 5))
+ if (intel_de_wait_for_clear(dev_priv, CNL_DPLL_ENABLE(id), PLL_LOCK, 5))
DRM_ERROR("PLL %d locked\n", id);
/*
@@ -2069,11 +2129,8 @@ static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
I915_WRITE(CNL_DPLL_ENABLE(id), val);
/* 7. Wait for DPLL power state disabled in DPLL_ENABLE. */
- if (intel_wait_for_register(&dev_priv->uncore,
- CNL_DPLL_ENABLE(id),
- PLL_POWER_STATE,
- 0,
- 5))
+ if (intel_de_wait_for_clear(dev_priv, CNL_DPLL_ENABLE(id),
+ PLL_POWER_STATE, 5))
DRM_ERROR("PLL %d Power not disabled\n", id);
}
@@ -2217,7 +2274,7 @@ static bool
cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
struct skl_wrpll_params *wrpll_params)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
u32 afe_clock = crtc_state->port_clock * 5;
u32 ref_clock;
u32 dco_min = 7998000;
@@ -2332,10 +2389,12 @@ cnl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
return true;
}
-static struct intel_shared_dpll *
-cnl_get_dpll(struct intel_crtc_state *crtc_state,
- struct intel_encoder *encoder)
+static bool cnl_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
struct intel_shared_dpll *pll;
bool bret;
@@ -2343,31 +2402,36 @@ cnl_get_dpll(struct intel_crtc_state *crtc_state,
bret = cnl_ddi_hdmi_pll_dividers(crtc_state);
if (!bret) {
DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
- return NULL;
+ return false;
}
} else if (intel_crtc_has_dp_encoder(crtc_state)) {
bret = cnl_ddi_dp_set_dpll_hw_state(crtc_state);
if (!bret) {
DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
- return NULL;
+ return false;
}
} else {
DRM_DEBUG_KMS("Skip DPLL setup for output_types 0x%x\n",
crtc_state->output_types);
- return NULL;
+ return false;
}
- pll = intel_find_shared_dpll(crtc_state,
- DPLL_ID_SKL_DPLL0,
- DPLL_ID_SKL_DPLL2);
+ pll = intel_find_shared_dpll(state, crtc,
+ &crtc_state->dpll_hw_state,
+ BIT(DPLL_ID_SKL_DPLL2) |
+ BIT(DPLL_ID_SKL_DPLL1) |
+ BIT(DPLL_ID_SKL_DPLL0));
if (!pll) {
DRM_DEBUG_KMS("No PLL selected\n");
- return NULL;
+ return false;
}
- intel_reference_shared_dpll(pll, crtc_state);
+ intel_reference_shared_dpll(state, crtc,
+ pll, &crtc_state->dpll_hw_state);
- return pll;
+ crtc_state->shared_dpll = pll;
+
+ return true;
}
static void cnl_dump_hw_state(struct drm_i915_private *dev_priv,
@@ -2394,7 +2458,8 @@ static const struct dpll_info cnl_plls[] = {
static const struct intel_dpll_mgr cnl_pll_mgr = {
.dpll_info = cnl_plls,
- .get_dpll = cnl_get_dpll,
+ .get_dplls = cnl_get_dpll,
+ .put_dplls = intel_put_dpll,
.dump_hw_state = cnl_dump_hw_state,
};
@@ -2473,10 +2538,22 @@ static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
};
+static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
+ .dco_integer = 0x54, .dco_fraction = 0x3000,
+ /* the following params are unused */
+ .pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
+};
+
+static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
+ .dco_integer = 0x43, .dco_fraction = 0x4000,
+ /* the following params are unused */
+ .pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
+};
+
static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
struct skl_wrpll_params *pll_params)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
const struct icl_combo_pll_params *params =
dev_priv->cdclk.hw.ref == 24000 ?
icl_dp_combo_pll_24MHz_values :
@@ -2498,22 +2575,50 @@ static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
struct skl_wrpll_params *pll_params)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+
+ if (INTEL_GEN(dev_priv) >= 12) {
+ switch (dev_priv->cdclk.hw.ref) {
+ default:
+ MISSING_CASE(dev_priv->cdclk.hw.ref);
+ /* fall-through */
+ case 19200:
+ case 38400:
+ *pll_params = tgl_tbt_pll_19_2MHz_values;
+ break;
+ case 24000:
+ *pll_params = tgl_tbt_pll_24MHz_values;
+ break;
+ }
+ } else {
+ switch (dev_priv->cdclk.hw.ref) {
+ default:
+ MISSING_CASE(dev_priv->cdclk.hw.ref);
+ /* fall-through */
+ case 19200:
+ case 38400:
+ *pll_params = icl_tbt_pll_19_2MHz_values;
+ break;
+ case 24000:
+ *pll_params = icl_tbt_pll_24MHz_values;
+ break;
+ }
+ }
- *pll_params = dev_priv->cdclk.hw.ref == 24000 ?
- icl_tbt_pll_24MHz_values : icl_tbt_pll_19_2MHz_values;
return true;
}
static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
- struct intel_encoder *encoder)
+ struct intel_encoder *encoder,
+ struct intel_dpll_hw_state *pll_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
u32 cfgcr0, cfgcr1;
struct skl_wrpll_params pll_params = { 0 };
bool ret;
- if (intel_port_is_tc(dev_priv, encoder->port))
+ if (intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv,
+ encoder->port)))
ret = icl_calc_tbt_pll(crtc_state, &pll_params);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
@@ -2530,14 +2635,17 @@ static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params.qdiv_ratio) |
DPLL_CFGCR1_QDIV_MODE(pll_params.qdiv_mode) |
DPLL_CFGCR1_KDIV(pll_params.kdiv) |
- DPLL_CFGCR1_PDIV(pll_params.pdiv) |
- DPLL_CFGCR1_CENTRAL_FREQ_8400;
+ DPLL_CFGCR1_PDIV(pll_params.pdiv);
- memset(&crtc_state->dpll_hw_state, 0,
- sizeof(crtc_state->dpll_hw_state));
+ if (INTEL_GEN(dev_priv) >= 12)
+ cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
+ else
+ cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
- crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
- crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
+ memset(pll_state, 0, sizeof(*pll_state));
+
+ pll_state->cfgcr0 = cfgcr0;
+ pll_state->cfgcr1 = cfgcr1;
return true;
}
@@ -2555,7 +2663,8 @@ enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
u32 *target_dco_khz,
- struct intel_dpll_hw_state *state)
+ struct intel_dpll_hw_state *state,
+ bool is_dkl)
{
u32 dco_min_freq, dco_max_freq;
int div1_vals[] = {7, 5, 3, 2};
@@ -2577,8 +2686,13 @@ static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
continue;
if (div2 >= 2) {
+ /*
+ * Note: a_divratio not matching TGL BSpec
+ * algorithm but matching hardcoded values and
+ * working on HW for DP alt-mode at least
+ */
a_divratio = is_dp ? 10 : 5;
- tlinedrv = 2;
+ tlinedrv = is_dkl ? 1 : 2;
} else {
a_divratio = 5;
tlinedrv = 0;
@@ -2627,10 +2741,10 @@ static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
* The specification for this function uses real numbers, so the math had to be
* adapted to integer-only calculation, that's why it looks so different.
*/
-static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state)
+static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
+ struct intel_dpll_hw_state *pll_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
- struct intel_dpll_hw_state *pll_state = &crtc_state->dpll_hw_state;
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
int refclk_khz = dev_priv->cdclk.hw.ref;
int clock = crtc_state->port_clock;
u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
@@ -2641,11 +2755,12 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state)
u64 tmp;
bool use_ssc = false;
bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
+ bool is_dkl = INTEL_GEN(dev_priv) >= 12;
memset(pll_state, 0, sizeof(*pll_state));
if (!icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
- pll_state)) {
+ pll_state, is_dkl)) {
DRM_DEBUG_KMS("Failed to find divisors for clock %d\n", clock);
return false;
}
@@ -2653,8 +2768,11 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state)
m1div = 2;
m2div_int = dco_khz / (refclk_khz * m1div);
if (m2div_int > 255) {
- m1div = 4;
- m2div_int = dco_khz / (refclk_khz * m1div);
+ if (!is_dkl) {
+ m1div = 4;
+ m2div_int = dco_khz / (refclk_khz * m1div);
+ }
+
if (m2div_int > 255) {
DRM_DEBUG_KMS("Failed to find mdiv for clock %d\n",
clock);
@@ -2734,121 +2852,277 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state)
}
ssc_steplog = 4;
- pll_state->mg_pll_div0 = (m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
- MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
- MG_PLL_DIV0_FBDIV_INT(m2div_int);
-
- pll_state->mg_pll_div1 = MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
- MG_PLL_DIV1_DITHER_DIV_2 |
- MG_PLL_DIV1_NDIVRATIO(1) |
- MG_PLL_DIV1_FBPREDIV(m1div);
-
- pll_state->mg_pll_lf = MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
- MG_PLL_LF_AFCCNTSEL_512 |
- MG_PLL_LF_GAINCTRL(1) |
- MG_PLL_LF_INT_COEFF(int_coeff) |
- MG_PLL_LF_PROP_COEFF(prop_coeff);
-
- pll_state->mg_pll_frac_lock = MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
- MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
- MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
- MG_PLL_FRAC_LOCK_DCODITHEREN |
- MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
- if (use_ssc || m2div_rem > 0)
- pll_state->mg_pll_frac_lock |= MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
-
- pll_state->mg_pll_ssc = (use_ssc ? MG_PLL_SSC_EN : 0) |
- MG_PLL_SSC_TYPE(2) |
- MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
- MG_PLL_SSC_STEPNUM(ssc_steplog) |
- MG_PLL_SSC_FLLEN |
- MG_PLL_SSC_STEPSIZE(ssc_stepsize);
-
- pll_state->mg_pll_tdc_coldst_bias = MG_PLL_TDC_COLDST_COLDSTART |
- MG_PLL_TDC_COLDST_IREFINT_EN |
- MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
- MG_PLL_TDC_TDCOVCCORR_EN |
- MG_PLL_TDC_TDCSEL(3);
-
- pll_state->mg_pll_bias = MG_PLL_BIAS_BIAS_GB_SEL(3) |
- MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
- MG_PLL_BIAS_BIAS_BONUS(10) |
- MG_PLL_BIAS_BIASCAL_EN |
- MG_PLL_BIAS_CTRIM(12) |
- MG_PLL_BIAS_VREF_RDAC(4) |
- MG_PLL_BIAS_IREFTRIM(iref_trim);
-
- if (refclk_khz == 38400) {
- pll_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
- pll_state->mg_pll_bias_mask = 0;
+ /* write pll_state calculations */
+ if (is_dkl) {
+ pll_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
+ DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
+ DKL_PLL_DIV0_FBPREDIV(m1div) |
+ DKL_PLL_DIV0_FBDIV_INT(m2div_int);
+
+ pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
+ DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
+
+ pll_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
+ DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
+ DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
+ (use_ssc ? DKL_PLL_SSC_EN : 0);
+
+ pll_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
+ DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
+
+ pll_state->mg_pll_tdc_coldst_bias =
+ DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
+ DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
+
} else {
- pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
- pll_state->mg_pll_bias_mask = -1U;
- }
+ pll_state->mg_pll_div0 =
+ (m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
+ MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
+ MG_PLL_DIV0_FBDIV_INT(m2div_int);
+
+ pll_state->mg_pll_div1 =
+ MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
+ MG_PLL_DIV1_DITHER_DIV_2 |
+ MG_PLL_DIV1_NDIVRATIO(1) |
+ MG_PLL_DIV1_FBPREDIV(m1div);
+
+ pll_state->mg_pll_lf =
+ MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
+ MG_PLL_LF_AFCCNTSEL_512 |
+ MG_PLL_LF_GAINCTRL(1) |
+ MG_PLL_LF_INT_COEFF(int_coeff) |
+ MG_PLL_LF_PROP_COEFF(prop_coeff);
+
+ pll_state->mg_pll_frac_lock =
+ MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
+ MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
+ MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
+ MG_PLL_FRAC_LOCK_DCODITHEREN |
+ MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
+ if (use_ssc || m2div_rem > 0)
+ pll_state->mg_pll_frac_lock |=
+ MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
+
+ pll_state->mg_pll_ssc =
+ (use_ssc ? MG_PLL_SSC_EN : 0) |
+ MG_PLL_SSC_TYPE(2) |
+ MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
+ MG_PLL_SSC_STEPNUM(ssc_steplog) |
+ MG_PLL_SSC_FLLEN |
+ MG_PLL_SSC_STEPSIZE(ssc_stepsize);
+
+ pll_state->mg_pll_tdc_coldst_bias =
+ MG_PLL_TDC_COLDST_COLDSTART |
+ MG_PLL_TDC_COLDST_IREFINT_EN |
+ MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
+ MG_PLL_TDC_TDCOVCCORR_EN |
+ MG_PLL_TDC_TDCSEL(3);
+
+ pll_state->mg_pll_bias =
+ MG_PLL_BIAS_BIAS_GB_SEL(3) |
+ MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
+ MG_PLL_BIAS_BIAS_BONUS(10) |
+ MG_PLL_BIAS_BIASCAL_EN |
+ MG_PLL_BIAS_CTRIM(12) |
+ MG_PLL_BIAS_VREF_RDAC(4) |
+ MG_PLL_BIAS_IREFTRIM(iref_trim);
+
+ if (refclk_khz == 38400) {
+ pll_state->mg_pll_tdc_coldst_bias_mask =
+ MG_PLL_TDC_COLDST_COLDSTART;
+ pll_state->mg_pll_bias_mask = 0;
+ } else {
+ pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
+ pll_state->mg_pll_bias_mask = -1U;
+ }
- pll_state->mg_pll_tdc_coldst_bias &= pll_state->mg_pll_tdc_coldst_bias_mask;
- pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
+ pll_state->mg_pll_tdc_coldst_bias &=
+ pll_state->mg_pll_tdc_coldst_bias_mask;
+ pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
+ }
return true;
}
-static struct intel_shared_dpll *
-icl_get_dpll(struct intel_crtc_state *crtc_state,
- struct intel_encoder *encoder)
+/**
+ * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
+ * @crtc_state: state for the CRTC to select the DPLL for
+ * @port_dpll_id: the active @port_dpll_id to select
+ *
+ * Select the given @port_dpll_id instance from the DPLLs reserved for the
+ * CRTC.
+ */
+void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
+ enum icl_port_dpll_id port_dpll_id)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
- struct intel_digital_port *intel_dig_port;
- struct intel_shared_dpll *pll;
+ struct icl_port_dpll *port_dpll =
+ &crtc_state->icl_port_dplls[port_dpll_id];
+
+ crtc_state->shared_dpll = port_dpll->pll;
+ crtc_state->dpll_hw_state = port_dpll->hw_state;
+}
+
+static void icl_update_active_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_digital_port *primary_port;
+ enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
+
+ primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
+ enc_to_mst(encoder)->primary :
+ enc_to_dig_port(encoder);
+
+ if (primary_port &&
+ (primary_port->tc_mode == TC_PORT_DP_ALT ||
+ primary_port->tc_mode == TC_PORT_LEGACY))
+ port_dpll_id = ICL_PORT_DPLL_MG_PHY;
+
+ icl_set_active_port_dpll(crtc_state, port_dpll_id);
+}
+
+static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct icl_port_dpll *port_dpll =
+ &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum port port = encoder->port;
- enum intel_dpll_id min, max;
- bool ret;
+ unsigned long dpll_mask;
- if (intel_port_is_combophy(dev_priv, port)) {
- min = DPLL_ID_ICL_DPLL0;
- max = DPLL_ID_ICL_DPLL1;
- ret = icl_calc_dpll_state(crtc_state, encoder);
- } else if (intel_port_is_tc(dev_priv, port)) {
- if (encoder->type == INTEL_OUTPUT_DP_MST) {
- struct intel_dp_mst_encoder *mst_encoder;
+ if (!icl_calc_dpll_state(crtc_state, encoder, &port_dpll->hw_state)) {
+ DRM_DEBUG_KMS("Could not calculate combo PHY PLL state.\n");
- mst_encoder = enc_to_mst(&encoder->base);
- intel_dig_port = mst_encoder->primary;
- } else {
- intel_dig_port = enc_to_dig_port(&encoder->base);
- }
+ return false;
+ }
- if (intel_dig_port->tc_type == TC_PORT_TBT) {
- min = DPLL_ID_ICL_TBTPLL;
- max = min;
- ret = icl_calc_dpll_state(crtc_state, encoder);
- } else {
- enum tc_port tc_port;
+ if (IS_ELKHARTLAKE(dev_priv) && port != PORT_A)
+ dpll_mask =
+ BIT(DPLL_ID_EHL_DPLL4) |
+ BIT(DPLL_ID_ICL_DPLL1) |
+ BIT(DPLL_ID_ICL_DPLL0);
+ else
+ dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
+
+ port_dpll->pll = intel_find_shared_dpll(state, crtc,
+ &port_dpll->hw_state,
+ dpll_mask);
+ if (!port_dpll->pll) {
+ DRM_DEBUG_KMS("No combo PHY PLL found for [ENCODER:%d:%s]\n",
+ encoder->base.base.id, encoder->base.name);
+ return false;
+ }
- tc_port = intel_port_to_tc(dev_priv, port);
- min = icl_tc_port_to_pll_id(tc_port);
- max = min;
- ret = icl_calc_mg_pll_state(crtc_state);
- }
- } else {
- MISSING_CASE(port);
- return NULL;
+ intel_reference_shared_dpll(state, crtc,
+ port_dpll->pll, &port_dpll->hw_state);
+
+ icl_update_active_dpll(state, crtc, encoder);
+
+ return true;
+}
+
+static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct icl_port_dpll *port_dpll;
+ enum intel_dpll_id dpll_id;
+
+ port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
+ if (!icl_calc_dpll_state(crtc_state, encoder, &port_dpll->hw_state)) {
+ DRM_DEBUG_KMS("Could not calculate TBT PLL state.\n");
+ return false;
}
- if (!ret) {
- DRM_DEBUG_KMS("Could not calculate PLL state.\n");
- return NULL;
+ port_dpll->pll = intel_find_shared_dpll(state, crtc,
+ &port_dpll->hw_state,
+ BIT(DPLL_ID_ICL_TBTPLL));
+ if (!port_dpll->pll) {
+ DRM_DEBUG_KMS("No TBT-ALT PLL found\n");
+ return false;
}
+ intel_reference_shared_dpll(state, crtc,
+ port_dpll->pll, &port_dpll->hw_state);
- pll = intel_find_shared_dpll(crtc_state, min, max);
- if (!pll) {
- DRM_DEBUG_KMS("No PLL selected\n");
- return NULL;
+ port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
+ if (!icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state)) {
+ DRM_DEBUG_KMS("Could not calculate MG PHY PLL state.\n");
+ goto err_unreference_tbt_pll;
}
- intel_reference_shared_dpll(pll, crtc_state);
+ dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
+ encoder->port));
+ port_dpll->pll = intel_find_shared_dpll(state, crtc,
+ &port_dpll->hw_state,
+ BIT(dpll_id));
+ if (!port_dpll->pll) {
+ DRM_DEBUG_KMS("No MG PHY PLL found\n");
+ goto err_unreference_tbt_pll;
+ }
+ intel_reference_shared_dpll(state, crtc,
+ port_dpll->pll, &port_dpll->hw_state);
- return pll;
+ icl_update_active_dpll(state, crtc, encoder);
+
+ return true;
+
+err_unreference_tbt_pll:
+ port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
+ intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
+
+ return false;
+}
+
+static bool icl_get_dplls(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+
+ if (intel_phy_is_combo(dev_priv, phy))
+ return icl_get_combo_phy_dpll(state, crtc, encoder);
+ else if (intel_phy_is_tc(dev_priv, phy))
+ return icl_get_tc_phy_dplls(state, crtc, encoder);
+
+ MISSING_CASE(phy);
+
+ return false;
+}
+
+static void icl_put_dplls(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ enum icl_port_dpll_id id;
+
+ new_crtc_state->shared_dpll = NULL;
+
+ for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
+ const struct icl_port_dpll *old_port_dpll =
+ &old_crtc_state->icl_port_dplls[id];
+ struct icl_port_dpll *new_port_dpll =
+ &new_crtc_state->icl_port_dplls[id];
+
+ new_port_dpll->pll = NULL;
+
+ if (!old_port_dpll->pll)
+ continue;
+
+ intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
+ }
}
static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
@@ -2913,6 +3187,78 @@ out:
return ret;
}
+static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ struct intel_dpll_hw_state *hw_state)
+{
+ const enum intel_dpll_id id = pll->info->id;
+ enum tc_port tc_port = icl_pll_id_to_tc_port(id);
+ intel_wakeref_t wakeref;
+ bool ret = false;
+ u32 val;
+
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ POWER_DOMAIN_DISPLAY_CORE);
+ if (!wakeref)
+ return false;
+
+ val = I915_READ(MG_PLL_ENABLE(tc_port));
+ if (!(val & PLL_ENABLE))
+ goto out;
+
+ /*
+ * All registers read here have the same HIP_INDEX_REG even though
+ * they are on different building blocks
+ */
+ I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x2));
+
+ hw_state->mg_refclkin_ctl = I915_READ(DKL_REFCLKIN_CTL(tc_port));
+ hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
+
+ hw_state->mg_clktop2_hsclkctl =
+ I915_READ(DKL_CLKTOP2_HSCLKCTL(tc_port));
+ hw_state->mg_clktop2_hsclkctl &=
+ MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
+ MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
+ MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
+ MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
+
+ hw_state->mg_clktop2_coreclkctl1 =
+ I915_READ(DKL_CLKTOP2_CORECLKCTL1(tc_port));
+ hw_state->mg_clktop2_coreclkctl1 &=
+ MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
+
+ hw_state->mg_pll_div0 = I915_READ(DKL_PLL_DIV0(tc_port));
+ hw_state->mg_pll_div0 &= (DKL_PLL_DIV0_INTEG_COEFF_MASK |
+ DKL_PLL_DIV0_PROP_COEFF_MASK |
+ DKL_PLL_DIV0_FBPREDIV_MASK |
+ DKL_PLL_DIV0_FBDIV_INT_MASK);
+
+ hw_state->mg_pll_div1 = I915_READ(DKL_PLL_DIV1(tc_port));
+ hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
+ DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
+
+ hw_state->mg_pll_ssc = I915_READ(DKL_PLL_SSC(tc_port));
+ hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
+ DKL_PLL_SSC_STEP_LEN_MASK |
+ DKL_PLL_SSC_STEP_NUM_MASK |
+ DKL_PLL_SSC_EN);
+
+ hw_state->mg_pll_bias = I915_READ(DKL_PLL_BIAS(tc_port));
+ hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
+ DKL_PLL_BIAS_FBDIV_FRAC_MASK);
+
+ hw_state->mg_pll_tdc_coldst_bias =
+ I915_READ(DKL_PLL_TDC_COLDST_BIAS(tc_port));
+ hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
+ DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
+
+ ret = true;
+out:
+ intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
+ return ret;
+}
+
static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *hw_state,
@@ -2932,8 +3278,18 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
if (!(val & PLL_ENABLE))
goto out;
- hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id));
- hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id));
+ if (INTEL_GEN(dev_priv) >= 12) {
+ hw_state->cfgcr0 = I915_READ(TGL_DPLL_CFGCR0(id));
+ hw_state->cfgcr1 = I915_READ(TGL_DPLL_CFGCR1(id));
+ } else {
+ if (IS_ELKHARTLAKE(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
+ hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(4));
+ hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(4));
+ } else {
+ hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id));
+ hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id));
+ }
+ }
ret = true;
out:
@@ -2945,8 +3301,14 @@ static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *hw_state)
{
- return icl_pll_get_hw_state(dev_priv, pll, hw_state,
- CNL_DPLL_ENABLE(pll->info->id));
+ i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
+
+ if (IS_ELKHARTLAKE(dev_priv) &&
+ pll->info->id == DPLL_ID_EHL_DPLL4) {
+ enable_reg = MG_PLL_ENABLE(0);
+ }
+
+ return icl_pll_get_hw_state(dev_priv, pll, hw_state, enable_reg);
}
static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv,
@@ -2961,10 +3323,24 @@ static void icl_dpll_write(struct drm_i915_private *dev_priv,
{
struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
const enum intel_dpll_id id = pll->info->id;
+ i915_reg_t cfgcr0_reg, cfgcr1_reg;
- I915_WRITE(ICL_DPLL_CFGCR0(id), hw_state->cfgcr0);
- I915_WRITE(ICL_DPLL_CFGCR1(id), hw_state->cfgcr1);
- POSTING_READ(ICL_DPLL_CFGCR1(id));
+ if (INTEL_GEN(dev_priv) >= 12) {
+ cfgcr0_reg = TGL_DPLL_CFGCR0(id);
+ cfgcr1_reg = TGL_DPLL_CFGCR1(id);
+ } else {
+ if (IS_ELKHARTLAKE(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
+ cfgcr0_reg = ICL_DPLL_CFGCR0(4);
+ cfgcr1_reg = ICL_DPLL_CFGCR1(4);
+ } else {
+ cfgcr0_reg = ICL_DPLL_CFGCR0(id);
+ cfgcr1_reg = ICL_DPLL_CFGCR1(id);
+ }
+ }
+
+ I915_WRITE(cfgcr0_reg, hw_state->cfgcr0);
+ I915_WRITE(cfgcr1_reg, hw_state->cfgcr1);
+ POSTING_READ(cfgcr1_reg);
}
static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
@@ -3017,6 +3393,75 @@ static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
POSTING_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
}
+static void dkl_pll_write(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
+ enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
+ u32 val;
+
+ /*
+ * All registers programmed here have the same HIP_INDEX_REG even
+ * though on different building block
+ */
+ I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x2));
+
+ /* All the registers are RMW */
+ val = I915_READ(DKL_REFCLKIN_CTL(tc_port));
+ val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
+ val |= hw_state->mg_refclkin_ctl;
+ I915_WRITE(DKL_REFCLKIN_CTL(tc_port), val);
+
+ val = I915_READ(DKL_CLKTOP2_CORECLKCTL1(tc_port));
+ val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
+ val |= hw_state->mg_clktop2_coreclkctl1;
+ I915_WRITE(DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
+
+ val = I915_READ(DKL_CLKTOP2_HSCLKCTL(tc_port));
+ val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
+ MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
+ MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
+ MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
+ val |= hw_state->mg_clktop2_hsclkctl;
+ I915_WRITE(DKL_CLKTOP2_HSCLKCTL(tc_port), val);
+
+ val = I915_READ(DKL_PLL_DIV0(tc_port));
+ val &= ~(DKL_PLL_DIV0_INTEG_COEFF_MASK |
+ DKL_PLL_DIV0_PROP_COEFF_MASK |
+ DKL_PLL_DIV0_FBPREDIV_MASK |
+ DKL_PLL_DIV0_FBDIV_INT_MASK);
+ val |= hw_state->mg_pll_div0;
+ I915_WRITE(DKL_PLL_DIV0(tc_port), val);
+
+ val = I915_READ(DKL_PLL_DIV1(tc_port));
+ val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
+ DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
+ val |= hw_state->mg_pll_div1;
+ I915_WRITE(DKL_PLL_DIV1(tc_port), val);
+
+ val = I915_READ(DKL_PLL_SSC(tc_port));
+ val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
+ DKL_PLL_SSC_STEP_LEN_MASK |
+ DKL_PLL_SSC_STEP_NUM_MASK |
+ DKL_PLL_SSC_EN);
+ val |= hw_state->mg_pll_ssc;
+ I915_WRITE(DKL_PLL_SSC(tc_port), val);
+
+ val = I915_READ(DKL_PLL_BIAS(tc_port));
+ val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
+ DKL_PLL_BIAS_FBDIV_FRAC_MASK);
+ val |= hw_state->mg_pll_bias;
+ I915_WRITE(DKL_PLL_BIAS(tc_port), val);
+
+ val = I915_READ(DKL_PLL_TDC_COLDST_BIAS(tc_port));
+ val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
+ DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
+ val |= hw_state->mg_pll_tdc_coldst_bias;
+ I915_WRITE(DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
+
+ POSTING_READ(DKL_PLL_TDC_COLDST_BIAS(tc_port));
+}
+
static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
i915_reg_t enable_reg)
@@ -3031,8 +3476,7 @@ static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
* The spec says we need to "wait" but it also says it should be
* immediate.
*/
- if (intel_wait_for_register(&dev_priv->uncore, enable_reg,
- PLL_POWER_STATE, PLL_POWER_STATE, 1))
+ if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_POWER_STATE, 1))
DRM_ERROR("PLL %d Power not enabled\n", pll->info->id);
}
@@ -3047,8 +3491,7 @@ static void icl_pll_enable(struct drm_i915_private *dev_priv,
I915_WRITE(enable_reg, val);
/* Timeout is actually 600us. */
- if (intel_wait_for_register(&dev_priv->uncore, enable_reg,
- PLL_LOCK, PLL_LOCK, 1))
+ if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 1))
DRM_ERROR("PLL %d not locked\n", pll->info->id);
}
@@ -3057,6 +3500,19 @@ static void combo_pll_enable(struct drm_i915_private *dev_priv,
{
i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
+ if (IS_ELKHARTLAKE(dev_priv) &&
+ pll->info->id == DPLL_ID_EHL_DPLL4) {
+ enable_reg = MG_PLL_ENABLE(0);
+
+ /*
+ * We need to disable DC states when this DPLL is enabled.
+ * This can be done by taking a reference on DPLL4 power
+ * domain.
+ */
+ pll->wakeref = intel_display_power_get(dev_priv,
+ POWER_DOMAIN_DPLL_DC_OFF);
+ }
+
icl_pll_power_enable(dev_priv, pll, enable_reg);
icl_dpll_write(dev_priv, pll);
@@ -3098,7 +3554,10 @@ static void mg_pll_enable(struct drm_i915_private *dev_priv,
icl_pll_power_enable(dev_priv, pll, enable_reg);
- icl_mg_pll_write(dev_priv, pll);
+ if (INTEL_GEN(dev_priv) >= 12)
+ dkl_pll_write(dev_priv, pll);
+ else
+ icl_mg_pll_write(dev_priv, pll);
/*
* DVFS pre sequence would be here, but in our driver the cdclk code
@@ -3130,8 +3589,7 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv,
I915_WRITE(enable_reg, val);
/* Timeout is actually 1us. */
- if (intel_wait_for_register(&dev_priv->uncore,
- enable_reg, PLL_LOCK, 0, 1))
+ if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 1))
DRM_ERROR("PLL %d locked\n", pll->info->id);
/* DVFS post sequence would be here. See the comment above. */
@@ -3144,15 +3602,26 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv,
* The spec says we need to "wait" but it also says it should be
* immediate.
*/
- if (intel_wait_for_register(&dev_priv->uncore,
- enable_reg, PLL_POWER_STATE, 0, 1))
+ if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_POWER_STATE, 1))
DRM_ERROR("PLL %d Power not disabled\n", pll->info->id);
}
static void combo_pll_disable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
- icl_pll_disable(dev_priv, pll, CNL_DPLL_ENABLE(pll->info->id));
+ i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
+
+ if (IS_ELKHARTLAKE(dev_priv) &&
+ pll->info->id == DPLL_ID_EHL_DPLL4) {
+ enable_reg = MG_PLL_ENABLE(0);
+ icl_pll_disable(dev_priv, pll, enable_reg);
+
+ intel_display_power_put(dev_priv, POWER_DOMAIN_DPLL_DC_OFF,
+ pll->wakeref);
+ return;
+ }
+
+ icl_pll_disable(dev_priv, pll, enable_reg);
}
static void tbt_pll_disable(struct drm_i915_private *dev_priv,
@@ -3223,19 +3692,50 @@ static const struct dpll_info icl_plls[] = {
static const struct intel_dpll_mgr icl_pll_mgr = {
.dpll_info = icl_plls,
- .get_dpll = icl_get_dpll,
+ .get_dplls = icl_get_dplls,
+ .put_dplls = icl_put_dplls,
+ .update_active_dpll = icl_update_active_dpll,
.dump_hw_state = icl_dump_hw_state,
};
static const struct dpll_info ehl_plls[] = {
{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
+ { "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
{ },
};
static const struct intel_dpll_mgr ehl_pll_mgr = {
.dpll_info = ehl_plls,
- .get_dpll = icl_get_dpll,
+ .get_dplls = icl_get_dplls,
+ .put_dplls = icl_put_dplls,
+ .dump_hw_state = icl_dump_hw_state,
+};
+
+static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
+ .enable = mg_pll_enable,
+ .disable = mg_pll_disable,
+ .get_hw_state = dkl_pll_get_hw_state,
+};
+
+static const struct dpll_info tgl_plls[] = {
+ { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
+ { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
+ { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
+ { "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
+ { "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
+ { "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
+ { "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
+ { "TC PLL 5", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL5, 0 },
+ { "TC PLL 6", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL6, 0 },
+ { },
+};
+
+static const struct intel_dpll_mgr tgl_pll_mgr = {
+ .dpll_info = tgl_plls,
+ .get_dplls = icl_get_dplls,
+ .put_dplls = icl_put_dplls,
+ .update_active_dpll = icl_update_active_dpll,
.dump_hw_state = icl_dump_hw_state,
};
@@ -3252,7 +3752,9 @@ void intel_shared_dpll_init(struct drm_device *dev)
const struct dpll_info *dpll_info;
int i;
- if (IS_ELKHARTLAKE(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 12)
+ dpll_mgr = &tgl_pll_mgr;
+ else if (IS_ELKHARTLAKE(dev_priv))
dpll_mgr = &ehl_pll_mgr;
else if (INTEL_GEN(dev_priv) >= 11)
dpll_mgr = &icl_pll_mgr;
@@ -3287,50 +3789,87 @@ void intel_shared_dpll_init(struct drm_device *dev)
}
/**
- * intel_get_shared_dpll - get a shared DPLL for CRTC and encoder combination
- * @crtc_state: atomic state for the crtc
+ * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
+ * @state: atomic state
+ * @crtc: CRTC to reserve DPLLs for
* @encoder: encoder
*
- * Find an appropriate DPLL for the given CRTC and encoder combination. A
- * reference from the @crtc_state to the returned pll is registered in the
- * atomic state. That configuration is made effective by calling
- * intel_shared_dpll_swap_state(). The reference should be released by calling
- * intel_release_shared_dpll().
+ * This function reserves all required DPLLs for the given CRTC and encoder
+ * combination in the current atomic commit @state and the new @crtc atomic
+ * state.
+ *
+ * The new configuration in the atomic commit @state is made effective by
+ * calling intel_shared_dpll_swap_state().
+ *
+ * The reserved DPLLs should be released by calling
+ * intel_release_shared_dplls().
*
* Returns:
- * A shared DPLL to be used by @crtc_state and @encoder.
+ * True if all required DPLLs were successfully reserved.
*/
-struct intel_shared_dpll *
-intel_get_shared_dpll(struct intel_crtc_state *crtc_state,
- struct intel_encoder *encoder)
+bool intel_reserve_shared_dplls(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
if (WARN_ON(!dpll_mgr))
- return NULL;
+ return false;
- return dpll_mgr->get_dpll(crtc_state, encoder);
+ return dpll_mgr->get_dplls(state, crtc, encoder);
}
/**
- * intel_release_shared_dpll - end use of DPLL by CRTC in atomic state
- * @dpll: dpll in use by @crtc
- * @crtc: crtc
+ * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
* @state: atomic state
+ * @crtc: crtc from which the DPLLs are to be released
+ *
+ * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
+ * from the current atomic commit @state and the old @crtc atomic state.
*
- * This function releases the reference from @crtc to @dpll from the
- * atomic @state. The new configuration is made effective by calling
- * intel_shared_dpll_swap_state().
+ * The new configuration in the atomic commit @state is made effective by
+ * calling intel_shared_dpll_swap_state().
*/
-void intel_release_shared_dpll(struct intel_shared_dpll *dpll,
- struct intel_crtc *crtc,
- struct drm_atomic_state *state)
+void intel_release_shared_dplls(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct intel_shared_dpll_state *shared_dpll_state;
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
+
+ /*
+ * FIXME: this function is called for every platform having a
+ * compute_clock hook, even though the platform doesn't yet support
+ * the shared DPLL framework and intel_reserve_shared_dplls() is not
+ * called on those.
+ */
+ if (!dpll_mgr)
+ return;
+
+ dpll_mgr->put_dplls(state, crtc);
+}
+
+/**
+ * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
+ * @state: atomic state
+ * @crtc: the CRTC for which to update the active DPLL
+ * @encoder: encoder determining the type of port DPLL
+ *
+ * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
+ * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
+ * DPLL selected will be based on the current mode of the encoder's port.
+ */
+void intel_update_active_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
+
+ if (WARN_ON(!dpll_mgr))
+ return;
- shared_dpll_state = intel_atomic_get_shared_dpll_state(state);
- shared_dpll_state[dpll->info->id].crtc_mask &= ~(1 << crtc->pipe);
+ dpll_mgr->update_active_dpll(state, crtc, encoder);
}
/**
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
index d0570414f3d1..2a104c64291d 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
@@ -28,6 +28,7 @@
#include <linux/types.h>
#include "intel_display.h"
+#include "intel_wakeref.h"
/*FIXME: Move this to a more appropriate place. */
#define abs_diff(a, b) ({ \
@@ -36,9 +37,9 @@
(void) (&__a == &__b); \
__a > __b ? (__a - __b) : (__b - __a); })
-struct drm_atomic_state;
struct drm_device;
struct drm_i915_private;
+struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
struct intel_encoder;
@@ -110,35 +111,59 @@ enum intel_dpll_id {
/**
- * @DPLL_ID_ICL_DPLL0: ICL combo PHY DPLL0
+ * @DPLL_ID_ICL_DPLL0: ICL/TGL combo PHY DPLL0
*/
DPLL_ID_ICL_DPLL0 = 0,
/**
- * @DPLL_ID_ICL_DPLL1: ICL combo PHY DPLL1
+ * @DPLL_ID_ICL_DPLL1: ICL/TGL combo PHY DPLL1
*/
DPLL_ID_ICL_DPLL1 = 1,
/**
- * @DPLL_ID_ICL_TBTPLL: ICL TBT PLL
+ * @DPLL_ID_EHL_DPLL4: EHL combo PHY DPLL4
+ */
+ DPLL_ID_EHL_DPLL4 = 2,
+ /**
+ * @DPLL_ID_ICL_TBTPLL: ICL/TGL TBT PLL
*/
DPLL_ID_ICL_TBTPLL = 2,
/**
- * @DPLL_ID_ICL_MGPLL1: ICL MG PLL 1 port 1 (C)
+ * @DPLL_ID_ICL_MGPLL1: ICL MG PLL 1 port 1 (C),
+ * TGL TC PLL 1 port 1 (TC1)
*/
DPLL_ID_ICL_MGPLL1 = 3,
/**
* @DPLL_ID_ICL_MGPLL2: ICL MG PLL 1 port 2 (D)
+ * TGL TC PLL 1 port 2 (TC2)
*/
DPLL_ID_ICL_MGPLL2 = 4,
/**
* @DPLL_ID_ICL_MGPLL3: ICL MG PLL 1 port 3 (E)
+ * TGL TC PLL 1 port 3 (TC3)
*/
DPLL_ID_ICL_MGPLL3 = 5,
/**
* @DPLL_ID_ICL_MGPLL4: ICL MG PLL 1 port 4 (F)
+ * TGL TC PLL 1 port 4 (TC4)
*/
DPLL_ID_ICL_MGPLL4 = 6,
+ /**
+ * @DPLL_ID_TGL_MGPLL5: TGL TC PLL port 5 (TC5)
+ */
+ DPLL_ID_TGL_MGPLL5 = 7,
+ /**
+ * @DPLL_ID_TGL_MGPLL6: TGL TC PLL port 6 (TC6)
+ */
+ DPLL_ID_TGL_MGPLL6 = 8,
+};
+
+#define I915_NUM_PLLS 9
+
+enum icl_port_dpll_id {
+ ICL_PORT_DPLL_DEFAULT,
+ ICL_PORT_DPLL_MG_PHY,
+
+ ICL_PORT_DPLL_COUNT,
};
-#define I915_NUM_PLLS 7
struct intel_dpll_hw_state {
/* i9xx, pch plls */
@@ -195,7 +220,7 @@ struct intel_dpll_hw_state {
* future state which would be applied by an atomic mode set (stored in
* a struct &intel_atomic_state).
*
- * See also intel_get_shared_dpll() and intel_release_shared_dpll().
+ * See also intel_reserve_shared_dplls() and intel_release_shared_dplls().
*/
struct intel_shared_dpll_state {
/**
@@ -312,6 +337,12 @@ struct intel_shared_dpll {
* @info: platform specific info
*/
const struct dpll_info *info;
+
+ /**
+ * @wakeref: In some platforms a device-level runtime pm reference may
+ * need to be grabbed to disable DC states while this DPLL is enabled
+ */
+ intel_wakeref_t wakeref;
};
#define SKL_DPLL0 0
@@ -331,15 +362,20 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
bool state);
#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
-struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc_state *state,
- struct intel_encoder *encoder);
-void intel_release_shared_dpll(struct intel_shared_dpll *dpll,
- struct intel_crtc *crtc,
- struct drm_atomic_state *state);
+bool intel_reserve_shared_dplls(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder);
+void intel_release_shared_dplls(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
+ enum icl_port_dpll_id port_dpll_id);
+void intel_update_active_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder);
void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state);
void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state);
void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state);
-void intel_shared_dpll_swap_state(struct drm_atomic_state *state);
+void intel_shared_dpll_swap_state(struct intel_atomic_state *state);
void intel_shared_dpll_init(struct drm_device *dev);
void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
new file mode 100644
index 000000000000..ada006a690df
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -0,0 +1,339 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ *
+ */
+
+#include "i915_drv.h"
+#include "intel_display_types.h"
+
+#define DSB_BUF_SIZE (2 * PAGE_SIZE)
+
+/**
+ * DOC: DSB
+ *
+ * A DSB (Display State Buffer) is a queue of MMIO instructions in the memory
+ * which can be offloaded to DSB HW in Display Controller. DSB HW is a DMA
+ * engine that can be programmed to download the DSB from memory.
+ * It allows driver to batch submit display HW programming. This helps to
+ * reduce loading time and CPU activity, thereby making the context switch
+ * faster. DSB Support added from Gen12 Intel graphics based platform.
+ *
+ * DSB's can access only the pipe, plane, and transcoder Data Island Packet
+ * registers.
+ *
+ * DSB HW can support only register writes (both indexed and direct MMIO
+ * writes). There are no registers reads possible with DSB HW engine.
+ */
+
+/* DSB opcodes. */
+#define DSB_OPCODE_SHIFT 24
+#define DSB_OPCODE_MMIO_WRITE 0x1
+#define DSB_OPCODE_INDEXED_WRITE 0x9
+#define DSB_BYTE_EN 0xF
+#define DSB_BYTE_EN_SHIFT 20
+#define DSB_REG_VALUE_MASK 0xfffff
+
+static inline bool is_dsb_busy(struct intel_dsb *dsb)
+{
+ struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ return DSB_STATUS & I915_READ(DSB_CTRL(pipe, dsb->id));
+}
+
+static inline bool intel_dsb_enable_engine(struct intel_dsb *dsb)
+{
+ struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ u32 dsb_ctrl;
+
+ dsb_ctrl = I915_READ(DSB_CTRL(pipe, dsb->id));
+ if (DSB_STATUS & dsb_ctrl) {
+ DRM_DEBUG_KMS("DSB engine is busy.\n");
+ return false;
+ }
+
+ dsb_ctrl |= DSB_ENABLE;
+ I915_WRITE(DSB_CTRL(pipe, dsb->id), dsb_ctrl);
+
+ POSTING_READ(DSB_CTRL(pipe, dsb->id));
+ return true;
+}
+
+static inline bool intel_dsb_disable_engine(struct intel_dsb *dsb)
+{
+ struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ u32 dsb_ctrl;
+
+ dsb_ctrl = I915_READ(DSB_CTRL(pipe, dsb->id));
+ if (DSB_STATUS & dsb_ctrl) {
+ DRM_DEBUG_KMS("DSB engine is busy.\n");
+ return false;
+ }
+
+ dsb_ctrl &= ~DSB_ENABLE;
+ I915_WRITE(DSB_CTRL(pipe, dsb->id), dsb_ctrl);
+
+ POSTING_READ(DSB_CTRL(pipe, dsb->id));
+ return true;
+}
+
+/**
+ * intel_dsb_get() - Allocate DSB context and return a DSB instance.
+ * @crtc: intel_crtc structure to get pipe info.
+ *
+ * This function provides handle of a DSB instance, for the further DSB
+ * operations.
+ *
+ * Returns: address of Intel_dsb instance requested for.
+ * Failure: Returns the same DSB instance, but without a command buffer.
+ */
+
+struct intel_dsb *
+intel_dsb_get(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *i915 = to_i915(dev);
+ struct intel_dsb *dsb = &crtc->dsb;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ u32 *buf;
+ intel_wakeref_t wakeref;
+
+ if (!HAS_DSB(i915))
+ return dsb;
+
+ if (dsb->refcount++ != 0)
+ return dsb;
+
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+
+ obj = i915_gem_object_create_internal(i915, DSB_BUF_SIZE);
+ if (IS_ERR(obj)) {
+ DRM_ERROR("Gem object creation failed\n");
+ goto out;
+ }
+
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
+ if (IS_ERR(vma)) {
+ DRM_ERROR("Vma creation failed\n");
+ i915_gem_object_put(obj);
+ goto out;
+ }
+
+ buf = i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
+ if (IS_ERR(buf)) {
+ DRM_ERROR("Command buffer creation failed\n");
+ goto out;
+ }
+
+ dsb->id = DSB1;
+ dsb->vma = vma;
+ dsb->cmd_buf = buf;
+
+out:
+ /*
+ * On error dsb->cmd_buf will continue to be NULL, making the writes
+ * pass-through. Leave the dangling ref to be removed later by the
+ * corresponding intel_dsb_put(): the important error message will
+ * already be logged above.
+ */
+
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+
+ return dsb;
+}
+
+/**
+ * intel_dsb_put() - To destroy DSB context.
+ * @dsb: intel_dsb structure.
+ *
+ * This function destroys the DSB context allocated by a dsb_get(), by
+ * unpinning and releasing the VMA object associated with it.
+ */
+
+void intel_dsb_put(struct intel_dsb *dsb)
+{
+ struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+
+ if (!HAS_DSB(i915))
+ return;
+
+ if (WARN_ON(dsb->refcount == 0))
+ return;
+
+ if (--dsb->refcount == 0) {
+ i915_vma_unpin_and_release(&dsb->vma, I915_VMA_RELEASE_MAP);
+ dsb->cmd_buf = NULL;
+ dsb->free_pos = 0;
+ dsb->ins_start_offset = 0;
+ }
+}
+
+/**
+ * intel_dsb_indexed_reg_write() -Write to the DSB context for auto
+ * increment register.
+ * @dsb: intel_dsb structure.
+ * @reg: register address.
+ * @val: value.
+ *
+ * This function is used for writing register-value pair in command
+ * buffer of DSB for auto-increment register. During command buffer overflow,
+ * a warning is thrown and rest all erroneous condition register programming
+ * is done through mmio write.
+ */
+
+void intel_dsb_indexed_reg_write(struct intel_dsb *dsb, i915_reg_t reg,
+ u32 val)
+{
+ struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 *buf = dsb->cmd_buf;
+ u32 reg_val;
+
+ if (!buf) {
+ I915_WRITE(reg, val);
+ return;
+ }
+
+ if (WARN_ON(dsb->free_pos >= DSB_BUF_SIZE)) {
+ DRM_DEBUG_KMS("DSB buffer overflow\n");
+ return;
+ }
+
+ /*
+ * For example the buffer will look like below for 3 dwords for auto
+ * increment register:
+ * +--------------------------------------------------------+
+ * | size = 3 | offset &| value1 | value2 | value3 | zero |
+ * | | opcode | | | | |
+ * +--------------------------------------------------------+
+ * + + + + + + +
+ * 0 4 8 12 16 20 24
+ * Byte
+ *
+ * As every instruction is 8 byte aligned the index of dsb instruction
+ * will start always from even number while dealing with u32 array. If
+ * we are writing odd no of dwords, Zeros will be added in the end for
+ * padding.
+ */
+ reg_val = buf[dsb->ins_start_offset + 1] & DSB_REG_VALUE_MASK;
+ if (reg_val != i915_mmio_reg_offset(reg)) {
+ /* Every instruction should be 8 byte aligned. */
+ dsb->free_pos = ALIGN(dsb->free_pos, 2);
+
+ dsb->ins_start_offset = dsb->free_pos;
+
+ /* Update the size. */
+ buf[dsb->free_pos++] = 1;
+
+ /* Update the opcode and reg. */
+ buf[dsb->free_pos++] = (DSB_OPCODE_INDEXED_WRITE <<
+ DSB_OPCODE_SHIFT) |
+ i915_mmio_reg_offset(reg);
+
+ /* Update the value. */
+ buf[dsb->free_pos++] = val;
+ } else {
+ /* Update the new value. */
+ buf[dsb->free_pos++] = val;
+
+ /* Update the size. */
+ buf[dsb->ins_start_offset]++;
+ }
+
+ /* if number of data words is odd, then the last dword should be 0.*/
+ if (dsb->free_pos & 0x1)
+ buf[dsb->free_pos] = 0;
+}
+
+/**
+ * intel_dsb_reg_write() -Write to the DSB context for normal
+ * register.
+ * @dsb: intel_dsb structure.
+ * @reg: register address.
+ * @val: value.
+ *
+ * This function is used for writing register-value pair in command
+ * buffer of DSB. During command buffer overflow, a warning is thrown
+ * and rest all erroneous condition register programming is done
+ * through mmio write.
+ */
+void intel_dsb_reg_write(struct intel_dsb *dsb, i915_reg_t reg, u32 val)
+{
+ struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 *buf = dsb->cmd_buf;
+
+ if (!buf) {
+ I915_WRITE(reg, val);
+ return;
+ }
+
+ if (WARN_ON(dsb->free_pos >= DSB_BUF_SIZE)) {
+ DRM_DEBUG_KMS("DSB buffer overflow\n");
+ return;
+ }
+
+ dsb->ins_start_offset = dsb->free_pos;
+ buf[dsb->free_pos++] = val;
+ buf[dsb->free_pos++] = (DSB_OPCODE_MMIO_WRITE << DSB_OPCODE_SHIFT) |
+ (DSB_BYTE_EN << DSB_BYTE_EN_SHIFT) |
+ i915_mmio_reg_offset(reg);
+}
+
+/**
+ * intel_dsb_commit() - Trigger workload execution of DSB.
+ * @dsb: intel_dsb structure.
+ *
+ * This function is used to do actual write to hardware using DSB.
+ * On errors, fall back to MMIO. Also this function help to reset the context.
+ */
+void intel_dsb_commit(struct intel_dsb *dsb)
+{
+ struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum pipe pipe = crtc->pipe;
+ u32 tail;
+
+ if (!dsb->free_pos)
+ return;
+
+ if (!intel_dsb_enable_engine(dsb))
+ goto reset;
+
+ if (is_dsb_busy(dsb)) {
+ DRM_ERROR("HEAD_PTR write failed - dsb engine is busy.\n");
+ goto reset;
+ }
+ I915_WRITE(DSB_HEAD(pipe, dsb->id), i915_ggtt_offset(dsb->vma));
+
+ tail = ALIGN(dsb->free_pos * 4, CACHELINE_BYTES);
+ if (tail > dsb->free_pos * 4)
+ memset(&dsb->cmd_buf[dsb->free_pos], 0,
+ (tail - dsb->free_pos * 4));
+
+ if (is_dsb_busy(dsb)) {
+ DRM_ERROR("TAIL_PTR write failed - dsb engine is busy.\n");
+ goto reset;
+ }
+ DRM_DEBUG_KMS("DSB execution started - head 0x%x, tail 0x%x\n",
+ i915_ggtt_offset(dsb->vma), tail);
+ I915_WRITE(DSB_TAIL(pipe, dsb->id), i915_ggtt_offset(dsb->vma) + tail);
+ if (wait_for(!is_dsb_busy(dsb), 1)) {
+ DRM_ERROR("Timed out waiting for DSB workload completion.\n");
+ goto reset;
+ }
+
+reset:
+ dsb->free_pos = 0;
+ dsb->ins_start_offset = 0;
+ intel_dsb_disable_engine(dsb);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.h b/drivers/gpu/drm/i915/display/intel_dsb.h
new file mode 100644
index 000000000000..395ef9ce558e
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dsb.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef _INTEL_DSB_H
+#define _INTEL_DSB_H
+
+#include <linux/types.h>
+
+#include "i915_reg.h"
+
+struct intel_crtc;
+struct i915_vma;
+
+enum dsb_id {
+ INVALID_DSB = -1,
+ DSB1,
+ DSB2,
+ DSB3,
+ MAX_DSB_PER_PIPE
+};
+
+struct intel_dsb {
+ long refcount;
+ enum dsb_id id;
+ u32 *cmd_buf;
+ struct i915_vma *vma;
+
+ /*
+ * free_pos will point the first free entry position
+ * and help in calculating tail of command buffer.
+ */
+ int free_pos;
+
+ /*
+ * ins_start_offset will help to store start address of the dsb
+ * instuction and help in identifying the batch of auto-increment
+ * register.
+ */
+ u32 ins_start_offset;
+};
+
+struct intel_dsb *
+intel_dsb_get(struct intel_crtc *crtc);
+void intel_dsb_put(struct intel_dsb *dsb);
+void intel_dsb_reg_write(struct intel_dsb *dsb, i915_reg_t reg, u32 val);
+void intel_dsb_indexed_reg_write(struct intel_dsb *dsb, i915_reg_t reg,
+ u32 val);
+void intel_dsb_commit(struct intel_dsb *dsb);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_dsi.c b/drivers/gpu/drm/i915/display/intel_dsi.c
index 5fec02aceaed..a2a937109a5a 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi.c
@@ -55,6 +55,7 @@ int intel_dsi_get_modes(struct drm_connector *connector)
enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_connector *intel_connector = to_intel_connector(connector);
const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
@@ -73,7 +74,7 @@ enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector,
return MODE_CLOCK_HIGH;
}
- return MODE_OK;
+ return intel_mode_valid_max_plane_size(dev_priv, mode);
}
struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi,
diff --git a/drivers/gpu/drm/i915/display/intel_dsi.h b/drivers/gpu/drm/i915/display/intel_dsi.h
index 6d20434636cd..19f78a4022d3 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi.h
+++ b/drivers/gpu/drm/i915/display/intel_dsi.h
@@ -26,7 +26,8 @@
#include <drm/drm_crtc.h>
#include <drm/drm_mipi_dsi.h>
-#include "intel_drv.h"
+
+#include "intel_display_types.h"
#define INTEL_DSI_VIDEO_MODE 0
#define INTEL_DSI_COMMAND_MODE 1
@@ -44,13 +45,17 @@ struct intel_dsi {
struct intel_dsi_host *dsi_hosts[I915_MAX_PORTS];
intel_wakeref_t io_wakeref[I915_MAX_PORTS];
- /* GPIO Desc for CRC based Panel control */
+ /* GPIO Desc for panel and backlight control */
struct gpio_desc *gpio_panel;
+ struct gpio_desc *gpio_backlight;
struct intel_connector *attached_connector;
- /* bit mask of ports being driven */
- u16 ports;
+ /* bit mask of ports (vlv dsi) or phys (icl dsi) being driven */
+ union {
+ u16 ports; /* VLV DSI */
+ u16 phys; /* ICL DSI */
+ };
/* if true, use HS mode, otherwise LP */
bool hs;
@@ -64,6 +69,9 @@ struct intel_dsi {
/* number of DSI lanes */
unsigned int lane_count;
+ /* i2c bus associated with the slave device */
+ int i2c_bus_num;
+
/*
* video mode pixel format
*
@@ -132,11 +140,14 @@ static inline struct intel_dsi_host *to_intel_dsi_host(struct mipi_dsi_host *h)
return container_of(h, struct intel_dsi_host, base);
}
-#define for_each_dsi_port(__port, __ports_mask) for_each_port_masked(__port, __ports_mask)
+#define for_each_dsi_port(__port, __ports_mask) \
+ for_each_port_masked(__port, __ports_mask)
+#define for_each_dsi_phy(__phy, __phys_mask) \
+ for_each_phy_masked(__phy, __phys_mask)
-static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
+static inline struct intel_dsi *enc_to_intel_dsi(struct intel_encoder *encoder)
{
- return container_of(encoder, struct intel_dsi, base.base);
+ return container_of(&encoder->base, struct intel_dsi, base.base);
}
static inline bool is_vid_mode(struct intel_dsi *intel_dsi)
@@ -151,7 +162,7 @@ static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
static inline u16 intel_dsi_encoder_ports(struct intel_encoder *encoder)
{
- return enc_to_intel_dsi(&encoder->base)->ports;
+ return enc_to_intel_dsi(encoder)->ports;
}
/* icl_dsi.c */
@@ -196,6 +207,8 @@ void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port);
/* intel_dsi_vbt.c */
bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id);
+void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on);
+void intel_dsi_vbt_gpio_cleanup(struct intel_dsi *intel_dsi);
void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
enum mipi_seq seq_id);
void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec);
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
index 8c33262cb0b2..c87838843d0b 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
@@ -27,7 +27,7 @@
#include <video/mipi_display.h>
#include "i915_drv.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_dsi.h"
#include "intel_dsi_dcs_backlight.h"
@@ -46,7 +46,7 @@
static u32 dcs_get_backlight(struct intel_connector *connector)
{
struct intel_encoder *encoder = connector->encoder;
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct mipi_dsi_device *dsi_device;
u8 data = 0;
enum port port;
@@ -64,7 +64,7 @@ static u32 dcs_get_backlight(struct intel_connector *connector)
static void dcs_set_backlight(const struct drm_connector_state *conn_state, u32 level)
{
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(conn_state->best_encoder);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(conn_state->best_encoder));
struct mipi_dsi_device *dsi_device;
u8 data = level;
enum port port;
@@ -79,7 +79,7 @@ static void dcs_set_backlight(const struct drm_connector_state *conn_state, u32
static void dcs_disable_backlight(const struct drm_connector_state *conn_state)
{
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(conn_state->best_encoder);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(conn_state->best_encoder));
struct mipi_dsi_device *dsi_device;
enum port port;
@@ -113,7 +113,7 @@ static void dcs_disable_backlight(const struct drm_connector_state *conn_state)
static void dcs_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(conn_state->best_encoder);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(conn_state->best_encoder));
struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel;
struct mipi_dsi_device *dsi_device;
enum port port;
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
index e5b178660408..04f953ba8f00 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
@@ -25,7 +25,10 @@
*/
#include <linux/gpio/consumer.h>
+#include <linux/gpio/machine.h>
#include <linux/mfd/intel_soc_pmic.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/pinctrl/machine.h>
#include <linux/slab.h>
#include <asm/intel-mid.h>
@@ -38,7 +41,7 @@
#include <video/mipi_display.h>
#include "i915_drv.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_dsi.h"
#include "intel_sideband.h"
@@ -83,6 +86,12 @@ static struct gpio_map vlv_gpio_table[] = {
{ VLV_GPIO_NC_11_PANEL1_BKLTCTL },
};
+struct i2c_adapter_lookup {
+ u16 slave_addr;
+ struct intel_dsi *intel_dsi;
+ acpi_handle dev_handle;
+};
+
#define CHV_GPIO_IDX_START_N 0
#define CHV_GPIO_IDX_START_E 73
#define CHV_GPIO_IDX_START_SW 100
@@ -375,11 +384,112 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
return data;
}
+#ifdef CONFIG_ACPI
+static int i2c_adapter_lookup(struct acpi_resource *ares, void *data)
+{
+ struct i2c_adapter_lookup *lookup = data;
+ struct intel_dsi *intel_dsi = lookup->intel_dsi;
+ struct acpi_resource_i2c_serialbus *sb;
+ struct i2c_adapter *adapter;
+ acpi_handle adapter_handle;
+ acpi_status status;
+
+ if (!i2c_acpi_get_i2c_resource(ares, &sb))
+ return 1;
+
+ if (lookup->slave_addr != sb->slave_address)
+ return 1;
+
+ status = acpi_get_handle(lookup->dev_handle,
+ sb->resource_source.string_ptr,
+ &adapter_handle);
+ if (ACPI_FAILURE(status))
+ return 1;
+
+ adapter = i2c_acpi_find_adapter_by_handle(adapter_handle);
+ if (adapter)
+ intel_dsi->i2c_bus_num = adapter->nr;
+
+ return 1;
+}
+
+static void i2c_acpi_find_adapter(struct intel_dsi *intel_dsi,
+ const u16 slave_addr)
+{
+ struct drm_device *drm_dev = intel_dsi->base.base.dev;
+ struct device *dev = &drm_dev->pdev->dev;
+ struct acpi_device *acpi_dev;
+ struct list_head resource_list;
+ struct i2c_adapter_lookup lookup;
+
+ acpi_dev = ACPI_COMPANION(dev);
+ if (acpi_dev) {
+ memset(&lookup, 0, sizeof(lookup));
+ lookup.slave_addr = slave_addr;
+ lookup.intel_dsi = intel_dsi;
+ lookup.dev_handle = acpi_device_handle(acpi_dev);
+
+ INIT_LIST_HEAD(&resource_list);
+ acpi_dev_get_resources(acpi_dev, &resource_list,
+ i2c_adapter_lookup,
+ &lookup);
+ acpi_dev_free_resource_list(&resource_list);
+ }
+}
+#else
+static inline void i2c_acpi_find_adapter(struct intel_dsi *intel_dsi,
+ const u16 slave_addr)
+{
+}
+#endif
+
static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
{
- DRM_DEBUG_KMS("Skipping I2C element execution\n");
+ struct drm_device *drm_dev = intel_dsi->base.base.dev;
+ struct device *dev = &drm_dev->pdev->dev;
+ struct i2c_adapter *adapter;
+ struct i2c_msg msg;
+ int ret;
+ u8 vbt_i2c_bus_num = *(data + 2);
+ u16 slave_addr = *(u16 *)(data + 3);
+ u8 reg_offset = *(data + 5);
+ u8 payload_size = *(data + 6);
+ u8 *payload_data;
+
+ if (intel_dsi->i2c_bus_num < 0) {
+ intel_dsi->i2c_bus_num = vbt_i2c_bus_num;
+ i2c_acpi_find_adapter(intel_dsi, slave_addr);
+ }
+
+ adapter = i2c_get_adapter(intel_dsi->i2c_bus_num);
+ if (!adapter) {
+ DRM_DEV_ERROR(dev, "Cannot find a valid i2c bus for xfer\n");
+ goto err_bus;
+ }
- return data + *(data + 6) + 7;
+ payload_data = kzalloc(payload_size + 1, GFP_KERNEL);
+ if (!payload_data)
+ goto err_alloc;
+
+ payload_data[0] = reg_offset;
+ memcpy(&payload_data[1], (data + 7), payload_size);
+
+ msg.addr = slave_addr;
+ msg.flags = 0;
+ msg.len = payload_size + 1;
+ msg.buf = payload_data;
+
+ ret = i2c_transfer(adapter, &msg, 1);
+ if (ret < 0)
+ DRM_DEV_ERROR(dev,
+ "Failed to xfer payload of size (%u) to reg (%u)\n",
+ payload_size, reg_offset);
+
+ kfree(payload_data);
+err_alloc:
+ i2c_put_adapter(adapter);
+err_bus:
+ return data + payload_size + 7;
}
static const u8 *mipi_exec_spi(struct intel_dsi *intel_dsi, const u8 *data)
@@ -453,8 +563,8 @@ static const char *sequence_name(enum mipi_seq seq_id)
return "(unknown)";
}
-void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
- enum mipi_seq seq_id)
+static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi,
+ enum mipi_seq seq_id)
{
struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
const u8 *data;
@@ -519,6 +629,22 @@ void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
}
}
+void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
+ enum mipi_seq seq_id)
+{
+ if (seq_id == MIPI_SEQ_POWER_ON && intel_dsi->gpio_panel)
+ gpiod_set_value_cansleep(intel_dsi->gpio_panel, 1);
+ if (seq_id == MIPI_SEQ_BACKLIGHT_ON && intel_dsi->gpio_backlight)
+ gpiod_set_value_cansleep(intel_dsi->gpio_backlight, 1);
+
+ intel_dsi_vbt_exec(intel_dsi, seq_id);
+
+ if (seq_id == MIPI_SEQ_POWER_OFF && intel_dsi->gpio_panel)
+ gpiod_set_value_cansleep(intel_dsi->gpio_panel, 0);
+ if (seq_id == MIPI_SEQ_BACKLIGHT_OFF && intel_dsi->gpio_backlight)
+ gpiod_set_value_cansleep(intel_dsi->gpio_backlight, 0);
+}
+
void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec)
{
struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
@@ -664,6 +790,8 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
intel_dsi->panel_off_delay = pps->panel_off_delay / 10;
intel_dsi->panel_pwr_cycle_delay = pps->panel_power_cycle_delay / 10;
+ intel_dsi->i2c_bus_num = -1;
+
/* a regular driver would get the device in probe */
for_each_dsi_port(port, intel_dsi->ports) {
mipi_dsi_attach(intel_dsi->dsi_hosts[port]->device);
@@ -671,3 +799,110 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
return true;
}
+
+/*
+ * On some BYT/CHT devs some sequences are incomplete and we need to manually
+ * control some GPIOs. We need to add a GPIO lookup table before we get these.
+ * If the GOP did not initialize the panel (HDMI inserted) we may need to also
+ * change the pinmux for the SoC's PWM0 pin from GPIO to PWM.
+ */
+static struct gpiod_lookup_table pmic_panel_gpio_table = {
+ /* Intel GFX is consumer */
+ .dev_id = "0000:00:02.0",
+ .table = {
+ /* Panel EN/DISABLE */
+ GPIO_LOOKUP("gpio_crystalcove", 94, "panel", GPIO_ACTIVE_HIGH),
+ { }
+ },
+};
+
+static struct gpiod_lookup_table soc_panel_gpio_table = {
+ .dev_id = "0000:00:02.0",
+ .table = {
+ GPIO_LOOKUP("INT33FC:01", 10, "backlight", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("INT33FC:01", 11, "panel", GPIO_ACTIVE_HIGH),
+ { }
+ },
+};
+
+static const struct pinctrl_map soc_pwm_pinctrl_map[] = {
+ PIN_MAP_MUX_GROUP("0000:00:02.0", "soc_pwm0", "INT33FC:00",
+ "pwm0_grp", "pwm"),
+};
+
+void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on)
+{
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
+ enum gpiod_flags flags = panel_is_on ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
+ bool want_backlight_gpio = false;
+ bool want_panel_gpio = false;
+ struct pinctrl *pinctrl;
+ int ret;
+
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ mipi_config->pwm_blc == PPS_BLC_PMIC) {
+ gpiod_add_lookup_table(&pmic_panel_gpio_table);
+ want_panel_gpio = true;
+ }
+
+ if (IS_VALLEYVIEW(dev_priv) && mipi_config->pwm_blc == PPS_BLC_SOC) {
+ gpiod_add_lookup_table(&soc_panel_gpio_table);
+ want_panel_gpio = true;
+ want_backlight_gpio = true;
+
+ /* Ensure PWM0 pin is muxed as PWM instead of GPIO */
+ ret = pinctrl_register_mappings(soc_pwm_pinctrl_map,
+ ARRAY_SIZE(soc_pwm_pinctrl_map));
+ if (ret)
+ DRM_ERROR("Failed to register pwm0 pinmux mapping\n");
+
+ pinctrl = devm_pinctrl_get_select(dev->dev, "soc_pwm0");
+ if (IS_ERR(pinctrl))
+ DRM_ERROR("Failed to set pinmux to PWM\n");
+ }
+
+ if (want_panel_gpio) {
+ intel_dsi->gpio_panel = gpiod_get(dev->dev, "panel", flags);
+ if (IS_ERR(intel_dsi->gpio_panel)) {
+ DRM_ERROR("Failed to own gpio for panel control\n");
+ intel_dsi->gpio_panel = NULL;
+ }
+ }
+
+ if (want_backlight_gpio) {
+ intel_dsi->gpio_backlight =
+ gpiod_get(dev->dev, "backlight", flags);
+ if (IS_ERR(intel_dsi->gpio_backlight)) {
+ DRM_ERROR("Failed to own gpio for backlight control\n");
+ intel_dsi->gpio_backlight = NULL;
+ }
+ }
+}
+
+void intel_dsi_vbt_gpio_cleanup(struct intel_dsi *intel_dsi)
+{
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
+
+ if (intel_dsi->gpio_panel) {
+ gpiod_put(intel_dsi->gpio_panel);
+ intel_dsi->gpio_panel = NULL;
+ }
+
+ if (intel_dsi->gpio_backlight) {
+ gpiod_put(intel_dsi->gpio_backlight);
+ intel_dsi->gpio_backlight = NULL;
+ }
+
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ mipi_config->pwm_blc == PPS_BLC_PMIC)
+ gpiod_remove_lookup_table(&pmic_panel_gpio_table);
+
+ if (IS_VALLEYVIEW(dev_priv) && mipi_config->pwm_blc == PPS_BLC_SOC) {
+ pinctrl_unregister_mappings(soc_pwm_pinctrl_map);
+ gpiod_remove_lookup_table(&soc_panel_gpio_table);
+ }
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index 22666d28f4aa..86a337c9d85d 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -34,7 +34,7 @@
#include "i915_drv.h"
#include "intel_connector.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_dvo.h"
#include "intel_dvo_dev.h"
#include "intel_gmbus.h"
@@ -125,7 +125,7 @@ static struct intel_dvo *enc_to_dvo(struct intel_encoder *encoder)
return container_of(encoder, struct intel_dvo, base);
}
-static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector)
+static struct intel_dvo *intel_attached_dvo(struct intel_connector *connector)
{
return enc_to_dvo(intel_attached_encoder(connector));
}
@@ -134,7 +134,7 @@ static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_dvo *intel_dvo = intel_attached_dvo(&connector->base);
+ struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
u32 tmp;
tmp = I915_READ(intel_dvo->dev.dvo_reg);
@@ -178,9 +178,9 @@ static void intel_dvo_get_config(struct intel_encoder *encoder,
else
flags |= DRM_MODE_FLAG_NVSYNC;
- pipe_config->base.adjusted_mode.flags |= flags;
+ pipe_config->hw.adjusted_mode.flags |= flags;
- pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
+ pipe_config->hw.adjusted_mode.crtc_clock = pipe_config->port_clock;
}
static void intel_disable_dvo(struct intel_encoder *encoder,
@@ -207,8 +207,8 @@ static void intel_enable_dvo(struct intel_encoder *encoder,
u32 temp = I915_READ(dvo_reg);
intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
- &pipe_config->base.mode,
- &pipe_config->base.adjusted_mode);
+ &pipe_config->hw.mode,
+ &pipe_config->hw.adjusted_mode);
I915_WRITE(dvo_reg, temp | DVO_ENABLE);
I915_READ(dvo_reg);
@@ -220,7 +220,7 @@ static enum drm_mode_status
intel_dvo_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
+ struct intel_dvo *intel_dvo = intel_attached_dvo(to_intel_connector(connector));
const struct drm_display_mode *fixed_mode =
to_intel_connector(connector)->panel.fixed_mode;
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
@@ -253,7 +253,7 @@ static int intel_dvo_compute_config(struct intel_encoder *encoder,
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
const struct drm_display_mode *fixed_mode =
intel_dvo->attached_connector->panel.fixed_mode;
- struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
/*
* If we have timings from the BIOS for the panel, put them in
@@ -277,10 +277,10 @@ static void intel_dvo_pre_enable(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
- const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
- int pipe = crtc->pipe;
+ enum pipe pipe = crtc->pipe;
u32 dvo_val;
i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg;
i915_reg_t dvo_srcdim_reg = intel_dvo->dev.dvo_srcdim_reg;
@@ -311,7 +311,7 @@ static void intel_dvo_pre_enable(struct intel_encoder *encoder,
static enum drm_connector_status
intel_dvo_detect(struct drm_connector *connector, bool force)
{
- struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
+ struct intel_dvo *intel_dvo = intel_attached_dvo(to_intel_connector(connector));
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev);
@@ -505,7 +505,7 @@ void intel_dvo_init(struct drm_i915_private *dev_priv)
intel_encoder->type = INTEL_OUTPUT_DVO;
intel_encoder->power_domain = POWER_DOMAIN_PORT_OTHER;
intel_encoder->port = port;
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+ intel_encoder->pipe_mask = ~0;
switch (dvo->type) {
case INTEL_DVO_CHIP_TMDS:
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index d36cada2cc7d..a1048ece541e 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -41,7 +41,7 @@
#include <drm/drm_fourcc.h>
#include "i915_drv.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_fbc.h"
#include "intel_frontbuffer.h"
@@ -50,11 +50,6 @@ static inline bool fbc_supported(struct drm_i915_private *dev_priv)
return HAS_FBC(dev_priv);
}
-static inline bool no_fbc_on_multiple_pipes(struct drm_i915_private *dev_priv)
-{
- return INTEL_GEN(dev_priv) <= 3;
-}
-
/*
* In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the
* frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's
@@ -73,7 +68,7 @@ static unsigned int get_crtc_fence_y_offset(struct intel_fbc *fbc)
* write to the PLANE_SIZE register. For BDW-, the hardware looks at the value
* we wrote to PIPESRC.
*/
-static void intel_fbc_get_plane_source_size(struct intel_fbc_state_cache *cache,
+static void intel_fbc_get_plane_source_size(const struct intel_fbc_state_cache *cache,
int *width, int *height)
{
if (width)
@@ -83,7 +78,7 @@ static void intel_fbc_get_plane_source_size(struct intel_fbc_state_cache *cache,
}
static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv,
- struct intel_fbc_state_cache *cache)
+ const struct intel_fbc_state_cache *cache)
{
int lines;
@@ -110,9 +105,8 @@ static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
I915_WRITE(FBC_CONTROL, fbc_ctl);
/* Wait for compressing bit to clear */
- if (intel_wait_for_register(&dev_priv->uncore,
- FBC_STATUS, FBC_STAT_COMPRESSING, 0,
- 10)) {
+ if (intel_de_wait_for_clear(dev_priv, FBC_STATUS,
+ FBC_STAT_COMPRESSING, 10)) {
DRM_DEBUG_KMS("FBC idle timed out\n");
return;
}
@@ -144,8 +138,10 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
u32 fbc_ctl2;
/* Set it up... */
- fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
+ fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM;
fbc_ctl2 |= FBC_CTL_PLANE(params->crtc.i9xx_plane);
+ if (params->fence_id >= 0)
+ fbc_ctl2 |= FBC_CTL_CPU_FENCE;
I915_WRITE(FBC_CONTROL2, fbc_ctl2);
I915_WRITE(FBC_FENCE_OFF, params->crtc.fence_y_offset);
}
@@ -157,7 +153,8 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
if (IS_I945GM(dev_priv))
fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
- fbc_ctl |= params->vma->fence->id;
+ if (params->fence_id >= 0)
+ fbc_ctl |= params->fence_id;
I915_WRITE(FBC_CONTROL, fbc_ctl);
}
@@ -177,8 +174,8 @@ static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
else
dpfc_ctl |= DPFC_CTL_LIMIT_1X;
- if (params->flags & PLANE_HAS_FENCE) {
- dpfc_ctl |= DPFC_CTL_FENCE_EN | params->vma->fence->id;
+ if (params->fence_id >= 0) {
+ dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fence_id;
I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
} else {
I915_WRITE(DPFC_FENCE_YOFF, 0);
@@ -235,14 +232,14 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
break;
}
- if (params->flags & PLANE_HAS_FENCE) {
+ if (params->fence_id >= 0) {
dpfc_ctl |= DPFC_CTL_FENCE_EN;
if (IS_GEN(dev_priv, 5))
- dpfc_ctl |= params->vma->fence->id;
+ dpfc_ctl |= params->fence_id;
if (IS_GEN(dev_priv, 6)) {
I915_WRITE(SNB_DPFC_CTL_SA,
SNB_CPU_FENCE_ENABLE |
- params->vma->fence->id);
+ params->fence_id);
I915_WRITE(DPFC_CPU_FENCE_OFFSET,
params->crtc.fence_y_offset);
}
@@ -254,8 +251,6 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
}
I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
- I915_WRITE(ILK_FBC_RT_BASE,
- i915_ggtt_offset(params->vma) | ILK_FBC_RT_VALID);
/* enable it... */
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
@@ -286,13 +281,12 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
int threshold = dev_priv->fbc.threshold;
/* Display WA #0529: skl, kbl, bxt. */
- if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv)) {
+ if (IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) {
u32 val = I915_READ(CHICKEN_MISC_4);
val &= ~(FBC_STRIDE_OVERRIDE | FBC_STRIDE_MASK);
- if (i915_gem_object_get_tiling(params->vma->obj) !=
- I915_TILING_X)
+ if (params->gen9_wa_cfb_stride)
val |= FBC_STRIDE_OVERRIDE | params->gen9_wa_cfb_stride;
I915_WRITE(CHICKEN_MISC_4, val);
@@ -318,11 +312,11 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
break;
}
- if (params->flags & PLANE_HAS_FENCE) {
+ if (params->fence_id >= 0) {
dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
I915_WRITE(SNB_DPFC_CTL_SA,
SNB_CPU_FENCE_ENABLE |
- params->vma->fence->id);
+ params->fence_id);
I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
} else {
I915_WRITE(SNB_DPFC_CTL_SA,0);
@@ -344,8 +338,8 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
HSW_FBCQ_DIS);
}
- if (IS_GEN(dev_priv, 11))
- /* Wa_1409120013:icl,ehl */
+ if (INTEL_GEN(dev_priv) >= 11)
+ /* Wa_1409120013:icl,ehl,tgl */
I915_WRITE(ILK_DPFC_CHICKEN, ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
@@ -368,6 +362,7 @@ static void intel_fbc_hw_activate(struct drm_i915_private *dev_priv)
struct intel_fbc *fbc = &dev_priv->fbc;
fbc->active = true;
+ fbc->activated = true;
if (INTEL_GEN(dev_priv) >= 7)
gen7_fbc_activate(dev_priv);
@@ -420,29 +415,10 @@ static void intel_fbc_deactivate(struct drm_i915_private *dev_priv,
fbc->no_fbc_reason = reason;
}
-static bool multiple_pipes_ok(struct intel_crtc *crtc,
- struct intel_plane_state *plane_state)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_fbc *fbc = &dev_priv->fbc;
- enum pipe pipe = crtc->pipe;
-
- /* Don't even bother tracking anything we don't need. */
- if (!no_fbc_on_multiple_pipes(dev_priv))
- return true;
-
- if (plane_state->base.visible)
- fbc->visible_pipes_mask |= (1 << pipe);
- else
- fbc->visible_pipes_mask &= ~(1 << pipe);
-
- return (fbc->visible_pipes_mask & ~(1 << pipe)) != 0;
-}
-
static int find_compression_threshold(struct drm_i915_private *dev_priv,
struct drm_mm_node *node,
- int size,
- int fb_cpp)
+ unsigned int size,
+ unsigned int fb_cpp)
{
int compression_threshold = 1;
int ret;
@@ -488,18 +464,15 @@ again:
}
}
-static int intel_fbc_alloc_cfb(struct intel_crtc *crtc)
+static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv,
+ unsigned int size, unsigned int fb_cpp)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_fbc *fbc = &dev_priv->fbc;
struct drm_mm_node *uninitialized_var(compressed_llb);
- int size, fb_cpp, ret;
+ int ret;
WARN_ON(drm_mm_node_allocated(&fbc->compressed_fb));
- size = intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache);
- fb_cpp = fbc->state_cache.fb.format->cpp[0];
-
ret = find_compression_threshold(dev_priv, &fbc->compressed_fb,
size, fb_cpp);
if (!ret)
@@ -657,46 +630,55 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
}
static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct intel_plane_state *plane_state)
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_fbc *fbc = &dev_priv->fbc;
struct intel_fbc_state_cache *cache = &fbc->state_cache;
- struct drm_framebuffer *fb = plane_state->base.fb;
+ struct drm_framebuffer *fb = plane_state->hw.fb;
- cache->vma = NULL;
- cache->flags = 0;
+ cache->plane.visible = plane_state->uapi.visible;
+ if (!cache->plane.visible)
+ return;
- cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags;
+ cache->crtc.mode_flags = crtc_state->hw.adjusted_mode.flags;
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
cache->crtc.hsw_bdw_pixel_rate = crtc_state->pixel_rate;
- cache->plane.rotation = plane_state->base.rotation;
+ cache->plane.rotation = plane_state->hw.rotation;
/*
* Src coordinates are already rotated by 270 degrees for
* the 90/270 degree plane rotation cases (to match the
* GTT mapping), hence no need to account for rotation here.
*/
- cache->plane.src_w = drm_rect_width(&plane_state->base.src) >> 16;
- cache->plane.src_h = drm_rect_height(&plane_state->base.src) >> 16;
- cache->plane.visible = plane_state->base.visible;
+ cache->plane.src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ cache->plane.src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
cache->plane.adjusted_x = plane_state->color_plane[0].x;
cache->plane.adjusted_y = plane_state->color_plane[0].y;
- cache->plane.y = plane_state->base.src.y1 >> 16;
+ cache->plane.y = plane_state->uapi.src.y1 >> 16;
- cache->plane.pixel_blend_mode = plane_state->base.pixel_blend_mode;
-
- if (!cache->plane.visible)
- return;
+ cache->plane.pixel_blend_mode = plane_state->hw.pixel_blend_mode;
cache->fb.format = fb->format;
cache->fb.stride = fb->pitches[0];
- cache->vma = plane_state->vma;
- cache->flags = plane_state->flags;
- if (WARN_ON(cache->flags & PLANE_HAS_FENCE && !cache->vma->fence))
- cache->flags &= ~PLANE_HAS_FENCE;
+ WARN_ON(plane_state->flags & PLANE_HAS_FENCE &&
+ !plane_state->vma->fence);
+
+ if (plane_state->flags & PLANE_HAS_FENCE &&
+ plane_state->vma->fence)
+ cache->fence_id = plane_state->vma->fence->id;
+ else
+ cache->fence_id = -1;
+}
+
+static bool intel_fbc_cfb_size_changed(struct drm_i915_private *dev_priv)
+{
+ struct intel_fbc *fbc = &dev_priv->fbc;
+
+ return intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache) >
+ fbc->compressed_fb.size * fbc->threshold;
}
static bool intel_fbc_can_activate(struct intel_crtc *crtc)
@@ -705,6 +687,11 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
struct intel_fbc *fbc = &dev_priv->fbc;
struct intel_fbc_state_cache *cache = &fbc->state_cache;
+ if (!cache->plane.visible) {
+ fbc->no_fbc_reason = "primary plane not visible";
+ return false;
+ }
+
/* We don't need to use a state cache here since this information is
* global for all CRTC.
*/
@@ -713,11 +700,6 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
return false;
}
- if (!cache->vma) {
- fbc->no_fbc_reason = "primary plane not visible";
- return false;
- }
-
if (cache->crtc.mode_flags & DRM_MODE_FLAG_INTERLACE) {
fbc->no_fbc_reason = "incompatible mode";
return false;
@@ -741,7 +723,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
* For now this will effecively disable FBC with 90/270 degree
* rotation.
*/
- if (!(cache->flags & PLANE_HAS_FENCE)) {
+ if (cache->fence_id < 0) {
fbc->no_fbc_reason = "framebuffer not tiled or fenced";
return false;
}
@@ -784,8 +766,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
* we didn't get any invalidate/deactivate calls, but this would require
* a lot of tracking just for a specific case. If we conclude it's an
* important case, we can implement it later. */
- if (intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache) >
- fbc->compressed_fb.size * fbc->threshold) {
+ if (intel_fbc_cfb_size_changed(dev_priv)) {
fbc->no_fbc_reason = "CFB requirements changed";
return false;
}
@@ -795,7 +776,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
* having a Y offset that isn't divisible by 4 causes FIFO underrun
* and screen flicker.
*/
- if (IS_GEN_RANGE(dev_priv, 9, 10) &&
+ if (INTEL_GEN(dev_priv) >= 9 &&
(fbc->state_cache.plane.adjusted_y & 3)) {
fbc->no_fbc_reason = "plane Y offset is misaligned";
return false;
@@ -838,8 +819,7 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
* zero. */
memset(params, 0, sizeof(*params));
- params->vma = cache->vma;
- params->flags = cache->flags;
+ params->fence_id = cache->fence_id;
params->crtc.pipe = crtc->pipe;
params->crtc.i9xx_plane = to_intel_plane(crtc->base.primary)->i9xx_plane;
@@ -850,39 +830,88 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache);
- if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
- params->gen9_wa_cfb_stride = DIV_ROUND_UP(cache->plane.src_w,
- 32 * fbc->threshold) * 8;
+ params->gen9_wa_cfb_stride = cache->gen9_wa_cfb_stride;
+
+ params->plane_visible = cache->plane.visible;
+}
+
+static bool intel_fbc_can_flip_nuke(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_fbc *fbc = &dev_priv->fbc;
+ const struct intel_fbc_state_cache *cache = &fbc->state_cache;
+ const struct intel_fbc_reg_params *params = &fbc->params;
+
+ if (drm_atomic_crtc_needs_modeset(&crtc_state->uapi))
+ return false;
+
+ if (!params->plane_visible)
+ return false;
+
+ if (!intel_fbc_can_activate(crtc))
+ return false;
+
+ if (params->fb.format != cache->fb.format)
+ return false;
+
+ if (params->fb.stride != cache->fb.stride)
+ return false;
+
+ if (params->cfb_size != intel_fbc_calculate_cfb_size(dev_priv, cache))
+ return false;
+
+ if (params->gen9_wa_cfb_stride != cache->gen9_wa_cfb_stride)
+ return false;
+
+ return true;
}
-void intel_fbc_pre_update(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct intel_plane_state *plane_state)
+bool intel_fbc_pre_update(struct intel_crtc *crtc,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_fbc *fbc = &dev_priv->fbc;
const char *reason = "update pending";
+ bool need_vblank_wait = false;
if (!fbc_supported(dev_priv))
- return;
+ return need_vblank_wait;
mutex_lock(&fbc->lock);
- if (!multiple_pipes_ok(crtc, plane_state)) {
- reason = "more than one pipe active";
- goto deactivate;
- }
-
- if (!fbc->enabled || fbc->crtc != crtc)
+ if (fbc->crtc != crtc)
goto unlock;
intel_fbc_update_state_cache(crtc, crtc_state, plane_state);
fbc->flip_pending = true;
-deactivate:
- intel_fbc_deactivate(dev_priv, reason);
+ if (!intel_fbc_can_flip_nuke(crtc_state)) {
+ intel_fbc_deactivate(dev_priv, reason);
+
+ /*
+ * Display WA #1198: glk+
+ * Need an extra vblank wait between FBC disable and most plane
+ * updates. Bspec says this is only needed for plane disable, but
+ * that is not true. Touching most plane registers will cause the
+ * corruption to appear. Also SKL/derivatives do not seem to be
+ * affected.
+ *
+ * TODO: could optimize this a bit by sampling the frame
+ * counter when we disable FBC (if it was already done earlier)
+ * and skipping the extra vblank wait before the plane update
+ * if at least one frame has already passed.
+ */
+ if (fbc->activated &&
+ (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)))
+ need_vblank_wait = true;
+ fbc->activated = false;
+ }
unlock:
mutex_unlock(&fbc->lock);
+
+ return need_vblank_wait;
}
/**
@@ -898,14 +927,13 @@ static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
struct intel_crtc *crtc = fbc->crtc;
WARN_ON(!mutex_is_locked(&fbc->lock));
- WARN_ON(!fbc->enabled);
+ WARN_ON(!fbc->crtc);
WARN_ON(fbc->active);
DRM_DEBUG_KMS("Disabling FBC on pipe %c\n", pipe_name(crtc->pipe));
__intel_fbc_cleanup_cfb(dev_priv);
- fbc->enabled = false;
fbc->crtc = NULL;
}
@@ -916,11 +944,10 @@ static void __intel_fbc_post_update(struct intel_crtc *crtc)
WARN_ON(!mutex_is_locked(&fbc->lock));
- if (!fbc->enabled || fbc->crtc != crtc)
+ if (fbc->crtc != crtc)
return;
fbc->flip_pending = false;
- WARN_ON(fbc->active);
if (!i915_modparams.enable_fbc) {
intel_fbc_deactivate(dev_priv, "disabled at runtime per module param");
@@ -934,10 +961,9 @@ static void __intel_fbc_post_update(struct intel_crtc *crtc)
if (!intel_fbc_can_activate(crtc))
return;
- if (!fbc->busy_bits) {
- intel_fbc_deactivate(dev_priv, "FBC enabled (active or scheduled)");
+ if (!fbc->busy_bits)
intel_fbc_hw_activate(dev_priv);
- } else
+ else
intel_fbc_deactivate(dev_priv, "frontbuffer write");
}
@@ -956,7 +982,7 @@ void intel_fbc_post_update(struct intel_crtc *crtc)
static unsigned int intel_fbc_get_frontbuffer_bit(struct intel_fbc *fbc)
{
- if (fbc->enabled)
+ if (fbc->crtc)
return to_intel_plane(fbc->crtc->base.primary)->frontbuffer_bit;
else
return fbc->possible_framebuffer_bits;
@@ -978,7 +1004,7 @@ void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
fbc->busy_bits |= intel_fbc_get_frontbuffer_bit(fbc) & frontbuffer_bits;
- if (fbc->enabled && fbc->busy_bits)
+ if (fbc->crtc && fbc->busy_bits)
intel_fbc_deactivate(dev_priv, "frontbuffer write");
mutex_unlock(&fbc->lock);
@@ -999,7 +1025,7 @@ void intel_fbc_flush(struct drm_i915_private *dev_priv,
if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
goto out;
- if (!fbc->busy_bits && fbc->enabled &&
+ if (!fbc->busy_bits && fbc->crtc &&
(frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) {
if (fbc->active)
intel_fbc_recompress(dev_priv);
@@ -1048,12 +1074,12 @@ void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
* to pipe or plane A. */
for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
struct intel_crtc_state *crtc_state;
- struct intel_crtc *crtc = to_intel_crtc(plane_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc);
if (!plane->has_fbc)
continue;
- if (!plane_state->base.visible)
+ if (!plane_state->uapi.visible)
continue;
crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
@@ -1082,42 +1108,53 @@ out:
* intel_fbc_disable in the middle, as long as it is deactivated.
*/
void intel_fbc_enable(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct intel_plane_state *plane_state)
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_fbc *fbc = &dev_priv->fbc;
+ struct intel_fbc_state_cache *cache = &fbc->state_cache;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
if (!fbc_supported(dev_priv))
return;
mutex_lock(&fbc->lock);
- if (fbc->enabled) {
- WARN_ON(fbc->crtc == NULL);
- if (fbc->crtc == crtc) {
- WARN_ON(!crtc_state->enable_fbc);
- WARN_ON(fbc->active);
- }
- goto out;
- }
+ if (fbc->crtc) {
+ if (fbc->crtc != crtc ||
+ !intel_fbc_cfb_size_changed(dev_priv))
+ goto out;
- if (!crtc_state->enable_fbc)
- goto out;
+ __intel_fbc_disable(dev_priv);
+ }
WARN_ON(fbc->active);
- WARN_ON(fbc->crtc != NULL);
intel_fbc_update_state_cache(crtc, crtc_state, plane_state);
- if (intel_fbc_alloc_cfb(crtc)) {
+
+ /* FIXME crtc_state->enable_fbc lies :( */
+ if (!cache->plane.visible)
+ goto out;
+
+ if (intel_fbc_alloc_cfb(dev_priv,
+ intel_fbc_calculate_cfb_size(dev_priv, cache),
+ fb->format->cpp[0])) {
+ cache->plane.visible = false;
fbc->no_fbc_reason = "not enough stolen memory";
goto out;
}
+ if ((IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) &&
+ fb->modifier != I915_FORMAT_MOD_X_TILED)
+ cache->gen9_wa_cfb_stride =
+ DIV_ROUND_UP(cache->plane.src_w, 32 * fbc->threshold) * 8;
+ else
+ cache->gen9_wa_cfb_stride = 0;
+
DRM_DEBUG_KMS("Enabling FBC on pipe %c\n", pipe_name(crtc->pipe));
fbc->no_fbc_reason = "FBC enabled but not active yet\n";
- fbc->enabled = true;
fbc->crtc = crtc;
out:
mutex_unlock(&fbc->lock);
@@ -1157,7 +1194,7 @@ void intel_fbc_global_disable(struct drm_i915_private *dev_priv)
return;
mutex_lock(&fbc->lock);
- if (fbc->enabled) {
+ if (fbc->crtc) {
WARN_ON(fbc->crtc->active);
__intel_fbc_disable(dev_priv);
}
@@ -1173,7 +1210,7 @@ static void intel_fbc_underrun_work_fn(struct work_struct *work)
mutex_lock(&fbc->lock);
/* Maybe we were scheduled twice. */
- if (fbc->underrun_detected || !fbc->enabled)
+ if (fbc->underrun_detected || !fbc->crtc)
goto out;
DRM_DEBUG_KMS("Disabling FBC due to FIFO underrun.\n");
@@ -1245,28 +1282,6 @@ void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv)
schedule_work(&fbc->underrun_work);
}
-/**
- * intel_fbc_init_pipe_state - initialize FBC's CRTC visibility tracking
- * @dev_priv: i915 device instance
- *
- * The FBC code needs to track CRTC visibility since the older platforms can't
- * have FBC enabled while multiple pipes are used. This function does the
- * initial setup at driver load to make sure FBC is matching the real hardware.
- */
-void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv)
-{
- struct intel_crtc *crtc;
-
- /* Don't even bother tracking anything if we don't need. */
- if (!no_fbc_on_multiple_pipes(dev_priv))
- return;
-
- for_each_intel_crtc(&dev_priv->drm, crtc)
- if (intel_crtc_active(crtc) &&
- crtc->base.primary->state->visible)
- dev_priv->fbc.visible_pipes_mask |= (1 << crtc->pipe);
-}
-
/*
* The DDX driver changes its behavior depending on the value it reads from
* i915.enable_fbc, so sanitize it by translating the default value into either
@@ -1284,10 +1299,6 @@ static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv)
if (!HAS_FBC(dev_priv))
return 0;
- /* https://bugs.freedesktop.org/show_bug.cgi?id=108085 */
- if (IS_GEMINILAKE(dev_priv))
- return 0;
-
if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9)
return 1;
@@ -1318,9 +1329,11 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn);
mutex_init(&fbc->lock);
- fbc->enabled = false;
fbc->active = false;
+ if (!drm_mm_initialized(&dev_priv->mm.stolen))
+ mkwrite_device_info(dev_priv)->display.has_fbc = false;
+
if (need_fbc_vtd_wa(dev_priv))
mkwrite_device_info(dev_priv)->display.has_fbc = false;
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.h b/drivers/gpu/drm/i915/display/intel_fbc.h
index 50272eda8d43..c8a5e5098687 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.h
+++ b/drivers/gpu/drm/i915/display/intel_fbc.h
@@ -19,15 +19,14 @@ struct intel_plane_state;
void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
struct intel_atomic_state *state);
bool intel_fbc_is_active(struct drm_i915_private *dev_priv);
-void intel_fbc_pre_update(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct intel_plane_state *plane_state);
+bool intel_fbc_pre_update(struct intel_crtc *crtc,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
void intel_fbc_post_update(struct intel_crtc *crtc);
void intel_fbc_init(struct drm_i915_private *dev_priv);
-void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv);
void intel_fbc_enable(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct intel_plane_state *plane_state);
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
void intel_fbc_disable(struct intel_crtc *crtc);
void intel_fbc_global_disable(struct drm_i915_private *dev_priv);
void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index 1edd44ee32b2..1e98e432c9fa 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -43,17 +43,18 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_fbdev.h"
#include "intel_frontbuffer.h"
-static void intel_fbdev_invalidate(struct intel_fbdev *ifbdev)
+static struct intel_frontbuffer *to_frontbuffer(struct intel_fbdev *ifbdev)
{
- struct drm_i915_gem_object *obj = intel_fb_obj(&ifbdev->fb->base);
- unsigned int origin =
- ifbdev->vma_flags & PLANE_HAS_FENCE ? ORIGIN_GTT : ORIGIN_CPU;
+ return ifbdev->fb->frontbuffer;
+}
- intel_fb_obj_invalidate(obj, origin);
+static void intel_fbdev_invalidate(struct intel_fbdev *ifbdev)
+{
+ intel_frontbuffer_invalidate(to_frontbuffer(ifbdev), ORIGIN_CPU);
}
static int intel_fbdev_set_par(struct fb_info *info)
@@ -99,7 +100,7 @@ static int intel_fbdev_pan_display(struct fb_var_screeninfo *var,
return ret;
}
-static struct fb_ops intelfb_ops = {
+static const struct fb_ops intelfb_ops = {
.owner = THIS_MODULE,
DRM_FB_HELPER_DEFAULT_OPS,
.fb_set_par = intel_fbdev_set_par,
@@ -120,7 +121,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_mode_fb_cmd2 mode_cmd = {};
struct drm_i915_gem_object *obj;
- int size, ret;
+ int size;
/* we don't do packed 24bpp */
if (sizes->surface_bpp == 24)
@@ -140,31 +141,23 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
/* If the FB is too big, just don't use it since fbdev is not very
* important and we should probably use that space with FBC or other
* features. */
- obj = NULL;
+ obj = ERR_PTR(-ENODEV);
if (size * 2 < dev_priv->stolen_usable_size)
obj = i915_gem_object_create_stolen(dev_priv, size);
- if (obj == NULL)
+ if (IS_ERR(obj))
obj = i915_gem_object_create_shmem(dev_priv, size);
if (IS_ERR(obj)) {
DRM_ERROR("failed to allocate framebuffer\n");
- ret = PTR_ERR(obj);
- goto err;
+ return PTR_ERR(obj);
}
fb = intel_framebuffer_create(obj, &mode_cmd);
- if (IS_ERR(fb)) {
- ret = PTR_ERR(fb);
- goto err_obj;
- }
+ i915_gem_object_put(obj);
+ if (IS_ERR(fb))
+ return PTR_ERR(fb);
ifbdev->fb = to_intel_framebuffer(fb);
-
return 0;
-
-err_obj:
- i915_gem_object_put(obj);
-err:
- return ret;
}
static int intelfb_create(struct drm_fb_helper *helper,
@@ -180,7 +173,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
const struct i915_ggtt_view view = {
.type = I915_GGTT_VIEW_NORMAL,
};
- struct drm_framebuffer *fb;
intel_wakeref_t wakeref;
struct fb_info *info;
struct i915_vma *vma;
@@ -212,7 +204,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
sizes->fb_height = intel_fb->base.height;
}
- mutex_lock(&dev->struct_mutex);
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
/* Pin the GGTT vma for our access via info->screen_base.
@@ -226,8 +217,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
goto out_unlock;
}
- fb = &ifbdev->fb->base;
- intel_fb_obj_flush(intel_fb_obj(fb), ORIGIN_DIRTYFB);
+ intel_frontbuffer_flush(to_frontbuffer(ifbdev), ORIGIN_DIRTYFB);
info = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(info)) {
@@ -236,15 +226,17 @@ static int intelfb_create(struct drm_fb_helper *helper,
goto out_unpin;
}
- ifbdev->helper.fb = fb;
+ ifbdev->helper.fb = &ifbdev->fb->base;
info->fbops = &intelfb_ops;
/* setup aperture base/size for vesafb takeover */
- info->apertures->ranges[0].base = dev->mode_config.fb_base;
+ info->apertures->ranges[0].base = ggtt->gmadr.start;
info->apertures->ranges[0].size = ggtt->mappable_end;
- info->fix.smem_start = dev->mode_config.fb_base + i915_ggtt_offset(vma);
+ /* Our framebuffer is the entirety of fbdev's system memory */
+ info->fix.smem_start =
+ (unsigned long)(ggtt->gmadr.start + vma->node.start);
info->fix.smem_len = vma->node.size;
vaddr = i915_vma_pin_iomap(vma);
@@ -262,18 +254,18 @@ static int intelfb_create(struct drm_fb_helper *helper,
* If the object is stolen however, it will be full of whatever
* garbage was left in there.
*/
- if (intel_fb_obj(fb)->stolen && !prealloc)
+ if (vma->obj->stolen && !prealloc)
memset_io(info->screen_base, 0, info->screen_size);
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x\n",
- fb->width, fb->height, i915_ggtt_offset(vma));
+ ifbdev->fb->base.width, ifbdev->fb->base.height,
+ i915_ggtt_offset(vma));
ifbdev->vma = vma;
ifbdev->vma_flags = flags;
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
- mutex_unlock(&dev->struct_mutex);
vga_switcheroo_client_fb_set(pdev, info);
return 0;
@@ -281,7 +273,6 @@ out_unpin:
intel_unpin_fb_vma(vma, flags);
out_unlock:
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
- mutex_unlock(&dev->struct_mutex);
return ret;
}
@@ -298,11 +289,8 @@ static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
drm_fb_helper_fini(&ifbdev->helper);
- if (ifbdev->vma) {
- mutex_lock(&ifbdev->helper.dev->struct_mutex);
+ if (ifbdev->vma)
intel_unpin_fb_vma(ifbdev->vma, ifbdev->vma_flags);
- mutex_unlock(&ifbdev->helper.dev->struct_mutex);
- }
if (ifbdev->fb)
drm_framebuffer_remove(&ifbdev->fb->base);
@@ -451,7 +439,7 @@ int intel_fbdev_init(struct drm_device *dev)
struct intel_fbdev *ifbdev;
int ret;
- if (WARN_ON(!HAS_DISPLAY(dev_priv)))
+ if (WARN_ON(!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv)))
return -ENODEV;
ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
index 8545ad32bb50..6c83b350525d 100644
--- a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
@@ -26,7 +26,8 @@
*/
#include "i915_drv.h"
-#include "intel_drv.h"
+#include "i915_trace.h"
+#include "intel_display_types.h"
#include "intel_fbc.h"
#include "intel_fifo_underrun.h"
@@ -125,8 +126,8 @@ static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
}
}
-static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
- enum pipe pipe, bool enable)
+static void ilk_set_fifo_underrun_reporting(struct drm_device *dev,
+ enum pipe pipe, bool enable)
{
struct drm_i915_private *dev_priv = to_i915(dev);
u32 bit = (pipe == PIPE_A) ?
@@ -138,7 +139,7 @@ static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
ilk_disable_display_irq(dev_priv, bit);
}
-static void ivybridge_check_fifo_underruns(struct intel_crtc *crtc)
+static void ivb_check_fifo_underruns(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
@@ -156,9 +157,9 @@ static void ivybridge_check_fifo_underruns(struct intel_crtc *crtc)
DRM_ERROR("fifo underrun on pipe %c\n", pipe_name(pipe));
}
-static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
- enum pipe pipe,
- bool enable, bool old)
+static void ivb_set_fifo_underrun_reporting(struct drm_device *dev,
+ enum pipe pipe, bool enable,
+ bool old)
{
struct drm_i915_private *dev_priv = to_i915(dev);
if (enable) {
@@ -179,8 +180,8 @@ static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
}
}
-static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
- enum pipe pipe, bool enable)
+static void bdw_set_fifo_underrun_reporting(struct drm_device *dev,
+ enum pipe pipe, bool enable)
{
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -263,11 +264,11 @@ static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
if (HAS_GMCH(dev_priv))
i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old);
else if (IS_GEN_RANGE(dev_priv, 5, 6))
- ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
+ ilk_set_fifo_underrun_reporting(dev, pipe, enable);
else if (IS_GEN(dev_priv, 7))
- ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old);
+ ivb_set_fifo_underrun_reporting(dev, pipe, enable, old);
else if (INTEL_GEN(dev_priv) >= 8)
- broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
+ bdw_set_fifo_underrun_reporting(dev, pipe, enable);
return old;
}
@@ -426,7 +427,7 @@ void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv)
if (HAS_GMCH(dev_priv))
i9xx_check_fifo_underruns(crtc);
else if (IS_GEN(dev_priv, 7))
- ivybridge_check_fifo_underruns(crtc);
+ ivb_check_fifo_underruns(crtc);
}
spin_unlock_irq(&dev_priv->irq_lock);
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
index 44273c10cea5..6cb02c912acc 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
@@ -30,11 +30,11 @@
* Many features require us to track changes to the currently active
* frontbuffer, especially rendering targeted at the frontbuffer.
*
- * To be able to do so GEM tracks frontbuffers using a bitmask for all possible
- * frontbuffer slots through i915_gem_track_fb(). The function in this file are
- * then called when the contents of the frontbuffer are invalidated, when
- * frontbuffer rendering has stopped again to flush out all the changes and when
- * the frontbuffer is exchanged with a flip. Subsystems interested in
+ * To be able to do so we track frontbuffers using a bitmask for all possible
+ * frontbuffer slots through intel_frontbuffer_track(). The functions in this
+ * file are then called when the contents of the frontbuffer are invalidated,
+ * when frontbuffer rendering has stopped again to flush out all the changes
+ * and when the frontbuffer is exchanged with a flip. Subsystems interested in
* frontbuffer changes (e.g. PSR, FBC, DRRS) should directly put their callbacks
* into the relevant places and filter for the frontbuffer slots that they are
* interested int.
@@ -58,33 +58,14 @@
#include "display/intel_dp.h"
#include "i915_drv.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_fbc.h"
#include "intel_frontbuffer.h"
#include "intel_psr.h"
-void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
- enum fb_op_origin origin,
- unsigned int frontbuffer_bits)
-{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-
- if (origin == ORIGIN_CS) {
- spin_lock(&dev_priv->fb_tracking.lock);
- dev_priv->fb_tracking.busy_bits |= frontbuffer_bits;
- dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
- spin_unlock(&dev_priv->fb_tracking.lock);
- }
-
- might_sleep();
- intel_psr_invalidate(dev_priv, frontbuffer_bits, origin);
- intel_edp_drrs_invalidate(dev_priv, frontbuffer_bits);
- intel_fbc_invalidate(dev_priv, frontbuffer_bits, origin);
-}
-
/**
- * intel_frontbuffer_flush - flush frontbuffer
- * @dev_priv: i915 device
+ * frontbuffer_flush - flush frontbuffer
+ * @i915: i915 device
* @frontbuffer_bits: frontbuffer plane tracking bits
* @origin: which operation caused the flush
*
@@ -94,45 +75,27 @@ void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
*
* Can be called without any locks held.
*/
-static void intel_frontbuffer_flush(struct drm_i915_private *dev_priv,
- unsigned frontbuffer_bits,
- enum fb_op_origin origin)
+static void frontbuffer_flush(struct drm_i915_private *i915,
+ unsigned int frontbuffer_bits,
+ enum fb_op_origin origin)
{
/* Delay flushing when rings are still busy.*/
- spin_lock(&dev_priv->fb_tracking.lock);
- frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
- spin_unlock(&dev_priv->fb_tracking.lock);
+ spin_lock(&i915->fb_tracking.lock);
+ frontbuffer_bits &= ~i915->fb_tracking.busy_bits;
+ spin_unlock(&i915->fb_tracking.lock);
if (!frontbuffer_bits)
return;
might_sleep();
- intel_edp_drrs_flush(dev_priv, frontbuffer_bits);
- intel_psr_flush(dev_priv, frontbuffer_bits, origin);
- intel_fbc_flush(dev_priv, frontbuffer_bits, origin);
-}
-
-void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
- enum fb_op_origin origin,
- unsigned int frontbuffer_bits)
-{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-
- if (origin == ORIGIN_CS) {
- spin_lock(&dev_priv->fb_tracking.lock);
- /* Filter out new bits since rendering started. */
- frontbuffer_bits &= dev_priv->fb_tracking.busy_bits;
- dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
- spin_unlock(&dev_priv->fb_tracking.lock);
- }
-
- if (frontbuffer_bits)
- intel_frontbuffer_flush(dev_priv, frontbuffer_bits, origin);
+ intel_edp_drrs_flush(i915, frontbuffer_bits);
+ intel_psr_flush(i915, frontbuffer_bits, origin);
+ intel_fbc_flush(i915, frontbuffer_bits, origin);
}
/**
* intel_frontbuffer_flip_prepare - prepare asynchronous frontbuffer flip
- * @dev_priv: i915 device
+ * @i915: i915 device
* @frontbuffer_bits: frontbuffer plane tracking bits
*
* This function gets called after scheduling a flip on @obj. The actual
@@ -142,19 +105,19 @@ void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
*
* Can be called without any locks held.
*/
-void intel_frontbuffer_flip_prepare(struct drm_i915_private *dev_priv,
+void intel_frontbuffer_flip_prepare(struct drm_i915_private *i915,
unsigned frontbuffer_bits)
{
- spin_lock(&dev_priv->fb_tracking.lock);
- dev_priv->fb_tracking.flip_bits |= frontbuffer_bits;
+ spin_lock(&i915->fb_tracking.lock);
+ i915->fb_tracking.flip_bits |= frontbuffer_bits;
/* Remove stale busy bits due to the old buffer. */
- dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
- spin_unlock(&dev_priv->fb_tracking.lock);
+ i915->fb_tracking.busy_bits &= ~frontbuffer_bits;
+ spin_unlock(&i915->fb_tracking.lock);
}
/**
* intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flip
- * @dev_priv: i915 device
+ * @i915: i915 device
* @frontbuffer_bits: frontbuffer plane tracking bits
*
* This function gets called after the flip has been latched and will complete
@@ -162,23 +125,22 @@ void intel_frontbuffer_flip_prepare(struct drm_i915_private *dev_priv,
*
* Can be called without any locks held.
*/
-void intel_frontbuffer_flip_complete(struct drm_i915_private *dev_priv,
+void intel_frontbuffer_flip_complete(struct drm_i915_private *i915,
unsigned frontbuffer_bits)
{
- spin_lock(&dev_priv->fb_tracking.lock);
+ spin_lock(&i915->fb_tracking.lock);
/* Mask any cancelled flips. */
- frontbuffer_bits &= dev_priv->fb_tracking.flip_bits;
- dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
- spin_unlock(&dev_priv->fb_tracking.lock);
+ frontbuffer_bits &= i915->fb_tracking.flip_bits;
+ i915->fb_tracking.flip_bits &= ~frontbuffer_bits;
+ spin_unlock(&i915->fb_tracking.lock);
if (frontbuffer_bits)
- intel_frontbuffer_flush(dev_priv,
- frontbuffer_bits, ORIGIN_FLIP);
+ frontbuffer_flush(i915, frontbuffer_bits, ORIGIN_FLIP);
}
/**
* intel_frontbuffer_flip - synchronous frontbuffer flip
- * @dev_priv: i915 device
+ * @i915: i915 device
* @frontbuffer_bits: frontbuffer plane tracking bits
*
* This function gets called after scheduling a flip on @obj. This is for
@@ -187,13 +149,165 @@ void intel_frontbuffer_flip_complete(struct drm_i915_private *dev_priv,
*
* Can be called without any locks held.
*/
-void intel_frontbuffer_flip(struct drm_i915_private *dev_priv,
+void intel_frontbuffer_flip(struct drm_i915_private *i915,
unsigned frontbuffer_bits)
{
- spin_lock(&dev_priv->fb_tracking.lock);
+ spin_lock(&i915->fb_tracking.lock);
/* Remove stale busy bits due to the old buffer. */
- dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
- spin_unlock(&dev_priv->fb_tracking.lock);
+ i915->fb_tracking.busy_bits &= ~frontbuffer_bits;
+ spin_unlock(&i915->fb_tracking.lock);
+
+ frontbuffer_flush(i915, frontbuffer_bits, ORIGIN_FLIP);
+}
+
+void __intel_fb_invalidate(struct intel_frontbuffer *front,
+ enum fb_op_origin origin,
+ unsigned int frontbuffer_bits)
+{
+ struct drm_i915_private *i915 = to_i915(front->obj->base.dev);
+
+ if (origin == ORIGIN_CS) {
+ spin_lock(&i915->fb_tracking.lock);
+ i915->fb_tracking.busy_bits |= frontbuffer_bits;
+ i915->fb_tracking.flip_bits &= ~frontbuffer_bits;
+ spin_unlock(&i915->fb_tracking.lock);
+ }
+
+ might_sleep();
+ intel_psr_invalidate(i915, frontbuffer_bits, origin);
+ intel_edp_drrs_invalidate(i915, frontbuffer_bits);
+ intel_fbc_invalidate(i915, frontbuffer_bits, origin);
+}
+
+void __intel_fb_flush(struct intel_frontbuffer *front,
+ enum fb_op_origin origin,
+ unsigned int frontbuffer_bits)
+{
+ struct drm_i915_private *i915 = to_i915(front->obj->base.dev);
+
+ if (origin == ORIGIN_CS) {
+ spin_lock(&i915->fb_tracking.lock);
+ /* Filter out new bits since rendering started. */
+ frontbuffer_bits &= i915->fb_tracking.busy_bits;
+ i915->fb_tracking.busy_bits &= ~frontbuffer_bits;
+ spin_unlock(&i915->fb_tracking.lock);
+ }
+
+ if (frontbuffer_bits)
+ frontbuffer_flush(i915, frontbuffer_bits, origin);
+}
+
+static int frontbuffer_active(struct i915_active *ref)
+{
+ struct intel_frontbuffer *front =
+ container_of(ref, typeof(*front), write);
+
+ kref_get(&front->ref);
+ return 0;
+}
+
+__i915_active_call
+static void frontbuffer_retire(struct i915_active *ref)
+{
+ struct intel_frontbuffer *front =
+ container_of(ref, typeof(*front), write);
+
+ intel_frontbuffer_flush(front, ORIGIN_CS);
+ intel_frontbuffer_put(front);
+}
+
+static void frontbuffer_release(struct kref *ref)
+ __releases(&to_i915(front->obj->base.dev)->fb_tracking.lock)
+{
+ struct intel_frontbuffer *front =
+ container_of(ref, typeof(*front), ref);
+ struct drm_i915_gem_object *obj = front->obj;
+ struct i915_vma *vma;
+
+ spin_lock(&obj->vma.lock);
+ for_each_ggtt_vma(vma, obj)
+ vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
+ spin_unlock(&obj->vma.lock);
+
+ RCU_INIT_POINTER(obj->frontbuffer, NULL);
+ spin_unlock(&to_i915(obj->base.dev)->fb_tracking.lock);
+
+ i915_gem_object_put(obj);
+ kfree_rcu(front, rcu);
+}
+
+struct intel_frontbuffer *
+intel_frontbuffer_get(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct intel_frontbuffer *front;
+
+ front = __intel_frontbuffer_get(obj);
+ if (front)
+ return front;
- intel_frontbuffer_flush(dev_priv, frontbuffer_bits, ORIGIN_FLIP);
+ front = kmalloc(sizeof(*front), GFP_KERNEL);
+ if (!front)
+ return NULL;
+
+ front->obj = obj;
+ kref_init(&front->ref);
+ atomic_set(&front->bits, 0);
+ i915_active_init(&front->write,
+ frontbuffer_active,
+ i915_active_may_sleep(frontbuffer_retire));
+
+ spin_lock(&i915->fb_tracking.lock);
+ if (rcu_access_pointer(obj->frontbuffer)) {
+ kfree(front);
+ front = rcu_dereference_protected(obj->frontbuffer, true);
+ kref_get(&front->ref);
+ } else {
+ i915_gem_object_get(obj);
+ rcu_assign_pointer(obj->frontbuffer, front);
+ }
+ spin_unlock(&i915->fb_tracking.lock);
+
+ return front;
+}
+
+void intel_frontbuffer_put(struct intel_frontbuffer *front)
+{
+ kref_put_lock(&front->ref,
+ frontbuffer_release,
+ &to_i915(front->obj->base.dev)->fb_tracking.lock);
+}
+
+/**
+ * intel_frontbuffer_track - update frontbuffer tracking
+ * @old: current buffer for the frontbuffer slots
+ * @new: new buffer for the frontbuffer slots
+ * @frontbuffer_bits: bitmask of frontbuffer slots
+ *
+ * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
+ * from @old and setting them in @new. Both @old and @new can be NULL.
+ */
+void intel_frontbuffer_track(struct intel_frontbuffer *old,
+ struct intel_frontbuffer *new,
+ unsigned int frontbuffer_bits)
+{
+ /*
+ * Control of individual bits within the mask are guarded by
+ * the owning plane->mutex, i.e. we can never see concurrent
+ * manipulation of individual bits. But since the bitfield as a whole
+ * is updated using RMW, we need to use atomics in order to update
+ * the bits.
+ */
+ BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
+ BITS_PER_TYPE(atomic_t));
+
+ if (old) {
+ WARN_ON(!(atomic_read(&old->bits) & frontbuffer_bits));
+ atomic_andnot(frontbuffer_bits, &old->bits);
+ }
+
+ if (new) {
+ WARN_ON(atomic_read(&new->bits) & frontbuffer_bits);
+ atomic_or(frontbuffer_bits, &new->bits);
+ }
}
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.h b/drivers/gpu/drm/i915/display/intel_frontbuffer.h
index 5727320c8084..6d41f5394425 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.h
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.h
@@ -24,10 +24,13 @@
#ifndef __INTEL_FRONTBUFFER_H__
#define __INTEL_FRONTBUFFER_H__
-#include "gem/i915_gem_object.h"
+#include <linux/atomic.h>
+#include <linux/kref.h>
+
+#include "gem/i915_gem_object_types.h"
+#include "i915_active.h"
struct drm_i915_private;
-struct drm_i915_gem_object;
enum fb_op_origin {
ORIGIN_GTT,
@@ -37,23 +40,60 @@ enum fb_op_origin {
ORIGIN_DIRTYFB,
};
-void intel_frontbuffer_flip_prepare(struct drm_i915_private *dev_priv,
+struct intel_frontbuffer {
+ struct kref ref;
+ atomic_t bits;
+ struct i915_active write;
+ struct drm_i915_gem_object *obj;
+ struct rcu_head rcu;
+};
+
+void intel_frontbuffer_flip_prepare(struct drm_i915_private *i915,
unsigned frontbuffer_bits);
-void intel_frontbuffer_flip_complete(struct drm_i915_private *dev_priv,
+void intel_frontbuffer_flip_complete(struct drm_i915_private *i915,
unsigned frontbuffer_bits);
-void intel_frontbuffer_flip(struct drm_i915_private *dev_priv,
+void intel_frontbuffer_flip(struct drm_i915_private *i915,
unsigned frontbuffer_bits);
-void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
- enum fb_op_origin origin,
- unsigned int frontbuffer_bits);
-void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
- enum fb_op_origin origin,
- unsigned int frontbuffer_bits);
+void intel_frontbuffer_put(struct intel_frontbuffer *front);
+
+static inline struct intel_frontbuffer *
+__intel_frontbuffer_get(const struct drm_i915_gem_object *obj)
+{
+ struct intel_frontbuffer *front;
+
+ if (likely(!rcu_access_pointer(obj->frontbuffer)))
+ return NULL;
+
+ rcu_read_lock();
+ do {
+ front = rcu_dereference(obj->frontbuffer);
+ if (!front)
+ break;
+
+ if (unlikely(!kref_get_unless_zero(&front->ref)))
+ continue;
+
+ if (likely(front == rcu_access_pointer(obj->frontbuffer)))
+ break;
+
+ intel_frontbuffer_put(front);
+ } while (1);
+ rcu_read_unlock();
+
+ return front;
+}
+
+struct intel_frontbuffer *
+intel_frontbuffer_get(struct drm_i915_gem_object *obj);
+
+void __intel_fb_invalidate(struct intel_frontbuffer *front,
+ enum fb_op_origin origin,
+ unsigned int frontbuffer_bits);
/**
- * intel_fb_obj_invalidate - invalidate frontbuffer object
- * @obj: GEM object to invalidate
+ * intel_frontbuffer_invalidate - invalidate frontbuffer object
+ * @front: GEM object to invalidate
* @origin: which operation caused the invalidation
*
* This function gets called every time rendering on the given object starts and
@@ -62,37 +102,51 @@ void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
* until the rendering completes or a flip on this frontbuffer plane is
* scheduled.
*/
-static inline bool intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
- enum fb_op_origin origin)
+static inline bool intel_frontbuffer_invalidate(struct intel_frontbuffer *front,
+ enum fb_op_origin origin)
{
unsigned int frontbuffer_bits;
- frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
+ if (!front)
+ return false;
+
+ frontbuffer_bits = atomic_read(&front->bits);
if (!frontbuffer_bits)
return false;
- __intel_fb_obj_invalidate(obj, origin, frontbuffer_bits);
+ __intel_fb_invalidate(front, origin, frontbuffer_bits);
return true;
}
+void __intel_fb_flush(struct intel_frontbuffer *front,
+ enum fb_op_origin origin,
+ unsigned int frontbuffer_bits);
+
/**
- * intel_fb_obj_flush - flush frontbuffer object
- * @obj: GEM object to flush
+ * intel_frontbuffer_flush - flush frontbuffer object
+ * @front: GEM object to flush
* @origin: which operation caused the flush
*
* This function gets called every time rendering on the given object has
* completed and frontbuffer caching can be started again.
*/
-static inline void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
- enum fb_op_origin origin)
+static inline void intel_frontbuffer_flush(struct intel_frontbuffer *front,
+ enum fb_op_origin origin)
{
unsigned int frontbuffer_bits;
- frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
+ if (!front)
+ return;
+
+ frontbuffer_bits = atomic_read(&front->bits);
if (!frontbuffer_bits)
return;
- __intel_fb_obj_flush(obj, origin, frontbuffer_bits);
+ __intel_fb_flush(front, origin, frontbuffer_bits);
}
+void intel_frontbuffer_track(struct intel_frontbuffer *old,
+ struct intel_frontbuffer *new,
+ unsigned int frontbuffer_bits);
+
#endif /* __INTEL_FRONTBUFFER_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c
index 4f6a9bd5af47..3d4d19ac1d14 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.c
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.c
@@ -35,7 +35,7 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_gmbus.h"
struct gmbus_pin {
@@ -82,25 +82,20 @@ static const struct gmbus_pin gmbus_pins_cnp[] = {
static const struct gmbus_pin gmbus_pins_icp[] = {
[GMBUS_PIN_1_BXT] = { "dpa", GPIOB },
[GMBUS_PIN_2_BXT] = { "dpb", GPIOC },
+ [GMBUS_PIN_3_BXT] = { "dpc", GPIOD },
[GMBUS_PIN_9_TC1_ICP] = { "tc1", GPIOJ },
[GMBUS_PIN_10_TC2_ICP] = { "tc2", GPIOK },
[GMBUS_PIN_11_TC3_ICP] = { "tc3", GPIOL },
[GMBUS_PIN_12_TC4_ICP] = { "tc4", GPIOM },
-};
-
-static const struct gmbus_pin gmbus_pins_mcc[] = {
- [GMBUS_PIN_1_BXT] = { "dpa", GPIOB },
- [GMBUS_PIN_2_BXT] = { "dpb", GPIOC },
- [GMBUS_PIN_9_TC1_ICP] = { "dpc", GPIOJ },
+ [GMBUS_PIN_13_TC5_TGP] = { "tc5", GPION },
+ [GMBUS_PIN_14_TC6_TGP] = { "tc6", GPIOO },
};
/* pin is expected to be valid */
static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *dev_priv,
unsigned int pin)
{
- if (HAS_PCH_MCC(dev_priv))
- return &gmbus_pins_mcc[pin];
- else if (HAS_PCH_ICP(dev_priv))
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
return &gmbus_pins_icp[pin];
else if (HAS_PCH_CNP(dev_priv))
return &gmbus_pins_cnp[pin];
@@ -119,9 +114,7 @@ bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
{
unsigned int size;
- if (HAS_PCH_MCC(dev_priv))
- size = ARRAY_SIZE(gmbus_pins_mcc);
- else if (HAS_PCH_ICP(dev_priv))
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
size = ARRAY_SIZE(gmbus_pins_icp);
else if (HAS_PCH_CNP(dev_priv))
size = ARRAY_SIZE(gmbus_pins_cnp);
@@ -843,7 +836,7 @@ int intel_gmbus_setup(struct drm_i915_private *dev_priv)
unsigned int pin;
int ret;
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))
return 0;
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.h b/drivers/gpu/drm/i915/display/intel_gmbus.h
index d989085b8d22..b96212b85425 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.h
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.h
@@ -11,6 +11,28 @@
struct drm_i915_private;
struct i2c_adapter;
+#define GMBUS_PIN_DISABLED 0
+#define GMBUS_PIN_SSC 1
+#define GMBUS_PIN_VGADDC 2
+#define GMBUS_PIN_PANEL 3
+#define GMBUS_PIN_DPD_CHV 3 /* HDMID_CHV */
+#define GMBUS_PIN_DPC 4 /* HDMIC */
+#define GMBUS_PIN_DPB 5 /* SDVO, HDMIB */
+#define GMBUS_PIN_DPD 6 /* HDMID */
+#define GMBUS_PIN_RESERVED 7 /* 7 reserved */
+#define GMBUS_PIN_1_BXT 1 /* BXT+ (atom) and CNP+ (big core) */
+#define GMBUS_PIN_2_BXT 2
+#define GMBUS_PIN_3_BXT 3
+#define GMBUS_PIN_4_CNP 4
+#define GMBUS_PIN_9_TC1_ICP 9
+#define GMBUS_PIN_10_TC2_ICP 10
+#define GMBUS_PIN_11_TC3_ICP 11
+#define GMBUS_PIN_12_TC4_ICP 12
+#define GMBUS_PIN_13_TC5_TGP 13
+#define GMBUS_PIN_14_TC6_TGP 14
+
+#define GMBUS_NUM_PINS 15 /* including 0 */
+
int intel_gmbus_setup(struct drm_i915_private *dev_priv);
void intel_gmbus_teardown(struct drm_i915_private *dev_priv);
bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 27bd7276a82d..0fdbd39f6641 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -1,9 +1,11 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright (C) 2017 Google, Inc.
+ * Copyright _ 2017-2019, Intel Corporation.
*
* Authors:
* Sean Paul <seanpaul@chromium.org>
+ * Ramalingam C <ramalingam.c@intel.com>
*/
#include <linux/component.h>
@@ -14,9 +16,11 @@
#include <drm/i915_component.h>
#include "i915_reg.h"
-#include "intel_drv.h"
+#include "intel_display_power.h"
+#include "intel_display_types.h"
#include "intel_hdcp.h"
#include "intel_sideband.h"
+#include "intel_connector.h"
#define KEY_LOAD_TRIES 5
#define ENCRYPT_STATUS_CHANGE_TIMEOUT_MS 50
@@ -104,24 +108,20 @@ bool intel_hdcp2_capable(struct intel_connector *connector)
return capable;
}
-static inline bool intel_hdcp_in_use(struct intel_connector *connector)
+static inline
+bool intel_hdcp_in_use(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder, enum port port)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- enum port port = connector->encoder->port;
- u32 reg;
-
- reg = I915_READ(PORT_HDCP_STATUS(port));
- return reg & HDCP_STATUS_ENC;
+ return I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
+ HDCP_STATUS_ENC;
}
-static inline bool intel_hdcp2_in_use(struct intel_connector *connector)
+static inline
+bool intel_hdcp2_in_use(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder, enum port port)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- enum port port = connector->encoder->port;
- u32 reg;
-
- reg = I915_READ(HDCP2_STATUS_DDI(port));
- return reg & LINK_ENCRYPTION_STATUS;
+ return I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
+ LINK_ENCRYPTION_STATUS;
}
static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *intel_dig_port,
@@ -244,8 +244,7 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text)
{
I915_WRITE(HDCP_SHA_TEXT, sha_text);
- if (intel_wait_for_register(&dev_priv->uncore, HDCP_REP_CTL,
- HDCP_SHA1_READY, HDCP_SHA1_READY, 1)) {
+ if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) {
DRM_ERROR("Timed out waiting for SHA1 ready\n");
return -ETIMEDOUT;
}
@@ -253,9 +252,29 @@ static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text)
}
static
-u32 intel_hdcp_get_repeater_ctl(struct intel_digital_port *intel_dig_port)
+u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder, enum port port)
{
- enum port port = intel_dig_port->base.port;
+ if (INTEL_GEN(dev_priv) >= 12) {
+ switch (cpu_transcoder) {
+ case TRANSCODER_A:
+ return HDCP_TRANSA_REP_PRESENT |
+ HDCP_TRANSA_SHA1_M0;
+ case TRANSCODER_B:
+ return HDCP_TRANSB_REP_PRESENT |
+ HDCP_TRANSB_SHA1_M0;
+ case TRANSCODER_C:
+ return HDCP_TRANSC_REP_PRESENT |
+ HDCP_TRANSC_SHA1_M0;
+ case TRANSCODER_D:
+ return HDCP_TRANSD_REP_PRESENT |
+ HDCP_TRANSD_SHA1_M0;
+ default:
+ DRM_ERROR("Unknown transcoder %d\n", cpu_transcoder);
+ return -EINVAL;
+ }
+ }
+
switch (port) {
case PORT_A:
return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0;
@@ -268,18 +287,20 @@ u32 intel_hdcp_get_repeater_ctl(struct intel_digital_port *intel_dig_port)
case PORT_E:
return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0;
default:
- break;
+ DRM_ERROR("Unknown port %d\n", port);
+ return -EINVAL;
}
- DRM_ERROR("Unknown port %d\n", port);
- return -EINVAL;
}
static
-int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port,
+int intel_hdcp_validate_v_prime(struct intel_connector *connector,
const struct intel_hdcp_shim *shim,
u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
{
+ struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
struct drm_i915_private *dev_priv;
+ enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
+ enum port port = intel_dig_port->base.port;
u32 vprime, sha_text, sha_leftovers, rep_ctl;
int ret, i, j, sha_idx;
@@ -306,7 +327,7 @@ int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port,
sha_idx = 0;
sha_text = 0;
sha_leftovers = 0;
- rep_ctl = intel_hdcp_get_repeater_ctl(intel_dig_port);
+ rep_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port);
I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
for (i = 0; i < num_downstream; i++) {
unsigned int sha_empty;
@@ -475,9 +496,8 @@ int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port,
/* Tell the HW we're done with the hash and wait for it to ACK */
I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_COMPLETE_HASH);
- if (intel_wait_for_register(&dev_priv->uncore, HDCP_REP_CTL,
- HDCP_SHA1_COMPLETE,
- HDCP_SHA1_COMPLETE, 1)) {
+ if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL,
+ HDCP_SHA1_COMPLETE, 1)) {
DRM_ERROR("Timed out waiting for SHA1 complete\n");
return -ETIMEDOUT;
}
@@ -523,12 +543,16 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector)
* authentication.
*/
num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
- if (num_downstream == 0)
+ if (num_downstream == 0) {
+ DRM_DEBUG_KMS("Repeater with zero downstream devices\n");
return -EINVAL;
+ }
ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL);
- if (!ksv_fifo)
+ if (!ksv_fifo) {
+ DRM_DEBUG_KMS("Out of mem: ksv_fifo\n");
return -ENOMEM;
+ }
ret = shim->read_ksv_fifo(intel_dig_port, num_downstream, ksv_fifo);
if (ret)
@@ -545,7 +569,7 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector)
* V prime atleast twice.
*/
for (i = 0; i < tries; i++) {
- ret = intel_hdcp_validate_v_prime(intel_dig_port, shim,
+ ret = intel_hdcp_validate_v_prime(connector, shim,
ksv_fifo, num_downstream,
bstatus);
if (!ret)
@@ -573,6 +597,7 @@ static int intel_hdcp_auth(struct intel_connector *connector)
struct drm_device *dev = connector->base.dev;
const struct intel_hdcp_shim *shim = hdcp->shim;
struct drm_i915_private *dev_priv;
+ enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
enum port port;
unsigned long r0_prime_gen_start;
int ret, i, tries = 2;
@@ -612,19 +637,21 @@ static int intel_hdcp_auth(struct intel_connector *connector)
/* Initialize An with 2 random values and acquire it */
for (i = 0; i < 2; i++)
- I915_WRITE(PORT_HDCP_ANINIT(port), get_random_u32());
- I915_WRITE(PORT_HDCP_CONF(port), HDCP_CONF_CAPTURE_AN);
+ I915_WRITE(HDCP_ANINIT(dev_priv, cpu_transcoder, port),
+ get_random_u32());
+ I915_WRITE(HDCP_CONF(dev_priv, cpu_transcoder, port),
+ HDCP_CONF_CAPTURE_AN);
/* Wait for An to be acquired */
- if (intel_wait_for_register(&dev_priv->uncore, PORT_HDCP_STATUS(port),
- HDCP_STATUS_AN_READY,
- HDCP_STATUS_AN_READY, 1)) {
+ if (intel_de_wait_for_set(dev_priv,
+ HDCP_STATUS(dev_priv, cpu_transcoder, port),
+ HDCP_STATUS_AN_READY, 1)) {
DRM_ERROR("Timed out waiting for An\n");
return -ETIMEDOUT;
}
- an.reg[0] = I915_READ(PORT_HDCP_ANLO(port));
- an.reg[1] = I915_READ(PORT_HDCP_ANHI(port));
+ an.reg[0] = I915_READ(HDCP_ANLO(dev_priv, cpu_transcoder, port));
+ an.reg[1] = I915_READ(HDCP_ANHI(dev_priv, cpu_transcoder, port));
ret = shim->write_an_aksv(intel_dig_port, an.shim);
if (ret)
return ret;
@@ -642,24 +669,26 @@ static int intel_hdcp_auth(struct intel_connector *connector)
return -EPERM;
}
- I915_WRITE(PORT_HDCP_BKSVLO(port), bksv.reg[0]);
- I915_WRITE(PORT_HDCP_BKSVHI(port), bksv.reg[1]);
+ I915_WRITE(HDCP_BKSVLO(dev_priv, cpu_transcoder, port), bksv.reg[0]);
+ I915_WRITE(HDCP_BKSVHI(dev_priv, cpu_transcoder, port), bksv.reg[1]);
ret = shim->repeater_present(intel_dig_port, &repeater_present);
if (ret)
return ret;
if (repeater_present)
I915_WRITE(HDCP_REP_CTL,
- intel_hdcp_get_repeater_ctl(intel_dig_port));
+ intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder,
+ port));
ret = shim->toggle_signalling(intel_dig_port, true);
if (ret)
return ret;
- I915_WRITE(PORT_HDCP_CONF(port), HDCP_CONF_AUTH_AND_ENC);
+ I915_WRITE(HDCP_CONF(dev_priv, cpu_transcoder, port),
+ HDCP_CONF_AUTH_AND_ENC);
/* Wait for R0 ready */
- if (wait_for(I915_READ(PORT_HDCP_STATUS(port)) &
+ if (wait_for(I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
(HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) {
DRM_ERROR("Timed out waiting for R0 ready\n");
return -ETIMEDOUT;
@@ -687,24 +716,27 @@ static int intel_hdcp_auth(struct intel_connector *connector)
ret = shim->read_ri_prime(intel_dig_port, ri.shim);
if (ret)
return ret;
- I915_WRITE(PORT_HDCP_RPRIME(port), ri.reg);
+ I915_WRITE(HDCP_RPRIME(dev_priv, cpu_transcoder, port), ri.reg);
/* Wait for Ri prime match */
- if (!wait_for(I915_READ(PORT_HDCP_STATUS(port)) &
+ if (!wait_for(I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder,
+ port)) &
(HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1))
break;
}
if (i == tries) {
DRM_DEBUG_KMS("Timed out waiting for Ri prime match (%x)\n",
- I915_READ(PORT_HDCP_STATUS(port)));
+ I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder,
+ port)));
return -ETIMEDOUT;
}
/* Wait for encryption confirmation */
- if (intel_wait_for_register(&dev_priv->uncore, PORT_HDCP_STATUS(port),
- HDCP_STATUS_ENC, HDCP_STATUS_ENC,
- ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
+ if (intel_de_wait_for_set(dev_priv,
+ HDCP_STATUS(dev_priv, cpu_transcoder, port),
+ HDCP_STATUS_ENC,
+ ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
DRM_ERROR("Timed out waiting for encryption\n");
return -ETIMEDOUT;
}
@@ -727,16 +759,17 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
enum port port = intel_dig_port->base.port;
+ enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
int ret;
DRM_DEBUG_KMS("[%s:%d] HDCP is being disabled...\n",
connector->base.name, connector->base.base.id);
hdcp->hdcp_encrypted = false;
- I915_WRITE(PORT_HDCP_CONF(port), 0);
- if (intel_wait_for_register(&dev_priv->uncore,
- PORT_HDCP_STATUS(port), ~0, 0,
- ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
+ I915_WRITE(HDCP_CONF(dev_priv, cpu_transcoder, port), 0);
+ if (intel_de_wait_for_clear(dev_priv,
+ HDCP_STATUS(dev_priv, cpu_transcoder, port),
+ ~0, ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
DRM_ERROR("Failed to disable HDCP, timeout clearing status\n");
return -ETIMEDOUT;
}
@@ -807,9 +840,11 @@ static int intel_hdcp_check_link(struct intel_connector *connector)
struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
enum port port = intel_dig_port->base.port;
+ enum transcoder cpu_transcoder;
int ret = 0;
mutex_lock(&hdcp->mutex);
+ cpu_transcoder = hdcp->cpu_transcoder;
/* Check_link valid only when HDCP1.4 is enabled */
if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
@@ -818,10 +853,11 @@ static int intel_hdcp_check_link(struct intel_connector *connector)
goto out;
}
- if (WARN_ON(!intel_hdcp_in_use(connector))) {
+ if (WARN_ON(!intel_hdcp_in_use(dev_priv, cpu_transcoder, port))) {
DRM_ERROR("%s:%d HDCP link stopped encryption,%x\n",
connector->base.name, connector->base.base.id,
- I915_READ(PORT_HDCP_STATUS(port)));
+ I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder,
+ port)));
ret = -ENXIO;
hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
schedule_work(&hdcp->prop_work);
@@ -866,7 +902,6 @@ static void intel_hdcp_prop_work(struct work_struct *work)
prop_work);
struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
struct drm_device *dev = connector->base.dev;
- struct drm_connector_state *state;
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
mutex_lock(&hdcp->mutex);
@@ -876,10 +911,9 @@ static void intel_hdcp_prop_work(struct work_struct *work)
* those to UNDESIRED is handled by core. If value == UNDESIRED,
* we're running just after hdcp has been disabled, so just exit
*/
- if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
- state = connector->base.state;
- state->content_protection = hdcp->value;
- }
+ if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
+ drm_hdcp_update_content_protection(&connector->base,
+ hdcp->value);
mutex_unlock(&hdcp->mutex);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
@@ -888,7 +922,7 @@ static void intel_hdcp_prop_work(struct work_struct *work)
bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
{
/* PORT E doesn't have HDCP, and PORT F is disabled */
- return INTEL_GEN(dev_priv) >= 9 && port < PORT_E;
+ return INTEL_INFO(dev_priv)->display.has_hdcp && port < PORT_E;
}
static int
@@ -1207,8 +1241,10 @@ static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
if (ret < 0)
return ret;
- if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL)
+ if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) {
+ DRM_DEBUG_KMS("cert.rx_caps dont claim HDCP2.2\n");
return -EINVAL;
+ }
hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]);
@@ -1492,10 +1528,11 @@ static int hdcp2_enable_encryption(struct intel_connector *connector)
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
enum port port = connector->encoder->port;
+ enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
int ret;
- WARN_ON(I915_READ(HDCP2_STATUS_DDI(port)) & LINK_ENCRYPTION_STATUS);
-
+ WARN_ON(I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
+ LINK_ENCRYPTION_STATUS);
if (hdcp->shim->toggle_signalling) {
ret = hdcp->shim->toggle_signalling(intel_dig_port, true);
if (ret) {
@@ -1505,17 +1542,20 @@ static int hdcp2_enable_encryption(struct intel_connector *connector)
}
}
- if (I915_READ(HDCP2_STATUS_DDI(port)) & LINK_AUTH_STATUS) {
+ if (I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
+ LINK_AUTH_STATUS) {
/* Link is Authenticated. Now set for Encryption */
- I915_WRITE(HDCP2_CTL_DDI(port),
- I915_READ(HDCP2_CTL_DDI(port)) |
+ I915_WRITE(HDCP2_CTL(dev_priv, cpu_transcoder, port),
+ I915_READ(HDCP2_CTL(dev_priv, cpu_transcoder,
+ port)) |
CTL_LINK_ENCRYPTION_REQ);
}
- ret = intel_wait_for_register(&dev_priv->uncore, HDCP2_STATUS_DDI(port),
- LINK_ENCRYPTION_STATUS,
- LINK_ENCRYPTION_STATUS,
- ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
+ ret = intel_de_wait_for_set(dev_priv,
+ HDCP2_STATUS(dev_priv, cpu_transcoder,
+ port),
+ LINK_ENCRYPTION_STATUS,
+ ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
return ret;
}
@@ -1526,15 +1566,20 @@ static int hdcp2_disable_encryption(struct intel_connector *connector)
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
enum port port = connector->encoder->port;
+ enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
int ret;
- WARN_ON(!(I915_READ(HDCP2_STATUS_DDI(port)) & LINK_ENCRYPTION_STATUS));
+ WARN_ON(!(I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
+ LINK_ENCRYPTION_STATUS));
- I915_WRITE(HDCP2_CTL_DDI(port),
- I915_READ(HDCP2_CTL_DDI(port)) & ~CTL_LINK_ENCRYPTION_REQ);
+ I915_WRITE(HDCP2_CTL(dev_priv, cpu_transcoder, port),
+ I915_READ(HDCP2_CTL(dev_priv, cpu_transcoder, port)) &
+ ~CTL_LINK_ENCRYPTION_REQ);
- ret = intel_wait_for_register(&dev_priv->uncore, HDCP2_STATUS_DDI(port),
- LINK_ENCRYPTION_STATUS, 0x0,
+ ret = intel_de_wait_for_clear(dev_priv,
+ HDCP2_STATUS(dev_priv, cpu_transcoder,
+ port),
+ LINK_ENCRYPTION_STATUS,
ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
if (ret == -ETIMEDOUT)
DRM_DEBUG_KMS("Disable Encryption Timedout");
@@ -1632,9 +1677,11 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
enum port port = connector->encoder->port;
+ enum transcoder cpu_transcoder;
int ret = 0;
mutex_lock(&hdcp->mutex);
+ cpu_transcoder = hdcp->cpu_transcoder;
/* hdcp2_check_link is expected only when HDCP2.2 is Enabled */
if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
@@ -1643,9 +1690,10 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
goto out;
}
- if (WARN_ON(!intel_hdcp2_in_use(connector))) {
+ if (WARN_ON(!intel_hdcp2_in_use(dev_priv, cpu_transcoder, port))) {
DRM_ERROR("HDCP2.2 link stopped the encryption, %x\n",
- I915_READ(HDCP2_STATUS_DDI(port)));
+ I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder,
+ port)));
ret = -ENXIO;
hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
schedule_work(&hdcp->prop_work);
@@ -1749,14 +1797,56 @@ static const struct component_ops i915_hdcp_component_ops = {
.unbind = i915_hdcp_component_unbind,
};
-static inline int initialize_hdcp_port_data(struct intel_connector *connector)
+static inline
+enum mei_fw_ddi intel_get_mei_fw_ddi_index(enum port port)
{
+ switch (port) {
+ case PORT_A:
+ return MEI_DDI_A;
+ case PORT_B ... PORT_F:
+ return (enum mei_fw_ddi)port;
+ default:
+ return MEI_DDI_INVALID_PORT;
+ }
+}
+
+static inline
+enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder)
+{
+ switch (cpu_transcoder) {
+ case TRANSCODER_A ... TRANSCODER_D:
+ return (enum mei_fw_tc)(cpu_transcoder | 0x10);
+ default: /* eDP, DSI TRANSCODERS are non HDCP capable */
+ return MEI_INVALID_TRANSCODER;
+ }
+}
+
+static inline int initialize_hdcp_port_data(struct intel_connector *connector,
+ const struct intel_hdcp_shim *shim)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
struct hdcp_port_data *data = &hdcp->port_data;
- data->port = connector->encoder->port;
+ if (INTEL_GEN(dev_priv) < 12)
+ data->fw_ddi =
+ intel_get_mei_fw_ddi_index(connector->encoder->port);
+ else
+ /*
+ * As per ME FW API expectation, for GEN 12+, fw_ddi is filled
+ * with zero(INVALID PORT index).
+ */
+ data->fw_ddi = MEI_DDI_INVALID_PORT;
+
+ /*
+ * As associated transcoder is set and modified at modeset, here fw_tc
+ * is initialized to zero (invalid transcoder index). This will be
+ * retained for <Gen12 forever.
+ */
+ data->fw_tc = MEI_INVALID_TRANSCODER;
+
data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED;
- data->protocol = (u8)hdcp->shim->protocol;
+ data->protocol = (u8)shim->protocol;
data->k = 1;
if (!data->streams)
@@ -1780,7 +1870,7 @@ static bool is_hdcp2_supported(struct drm_i915_private *dev_priv)
return false;
return (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv) ||
- IS_KABYLAKE(dev_priv));
+ IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv));
}
void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
@@ -1806,12 +1896,13 @@ void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
}
}
-static void intel_hdcp2_init(struct intel_connector *connector)
+static void intel_hdcp2_init(struct intel_connector *connector,
+ const struct intel_hdcp_shim *shim)
{
struct intel_hdcp *hdcp = &connector->hdcp;
int ret;
- ret = initialize_hdcp_port_data(connector);
+ ret = initialize_hdcp_port_data(connector, shim);
if (ret) {
DRM_DEBUG_KMS("Mei hdcp data init failed\n");
return;
@@ -1830,24 +1921,31 @@ int intel_hdcp_init(struct intel_connector *connector,
if (!shim)
return -EINVAL;
- ret = drm_connector_attach_content_protection_property(&connector->base);
- if (ret)
+ if (is_hdcp2_supported(dev_priv))
+ intel_hdcp2_init(connector, shim);
+
+ ret =
+ drm_connector_attach_content_protection_property(&connector->base,
+ hdcp->hdcp2_supported);
+ if (ret) {
+ hdcp->hdcp2_supported = false;
+ kfree(hdcp->port_data.streams);
return ret;
+ }
hdcp->shim = shim;
mutex_init(&hdcp->mutex);
INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work);
INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work);
-
- if (is_hdcp2_supported(dev_priv))
- intel_hdcp2_init(connector);
init_waitqueue_head(&hdcp->cp_irq_queue);
return 0;
}
-int intel_hdcp_enable(struct intel_connector *connector)
+int intel_hdcp_enable(struct intel_connector *connector,
+ enum transcoder cpu_transcoder, u8 content_type)
{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS;
int ret = -EINVAL;
@@ -1857,6 +1955,12 @@ int intel_hdcp_enable(struct intel_connector *connector)
mutex_lock(&hdcp->mutex);
WARN_ON(hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
+ hdcp->content_type = content_type;
+
+ if (INTEL_GEN(dev_priv) >= 12) {
+ hdcp->cpu_transcoder = cpu_transcoder;
+ hdcp->port_data.fw_tc = intel_get_mei_fw_tc(cpu_transcoder);
+ }
/*
* Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
@@ -1868,8 +1972,12 @@ int intel_hdcp_enable(struct intel_connector *connector)
check_link_interval = DRM_HDCP2_CHECK_PERIOD_MS;
}
- /* When HDCP2.2 fails, HDCP1.4 will be attempted */
- if (ret && intel_hdcp_capable(connector)) {
+ /*
+ * When HDCP2.2 fails and Content Type is not Type1, HDCP1.4 will
+ * be attempted.
+ */
+ if (ret && intel_hdcp_capable(connector) &&
+ hdcp->content_type != DRM_MODE_HDCP_CONTENT_TYPE1) {
ret = _intel_hdcp_enable(connector);
}
@@ -1951,12 +2059,15 @@ void intel_hdcp_atomic_check(struct drm_connector *connector,
/*
* Nothing to do if the state didn't change, or HDCP was activated since
- * the last commit
+ * the last commit. And also no change in hdcp content type.
*/
if (old_cp == new_cp ||
(old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
- new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED))
- return;
+ new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)) {
+ if (old_state->hdcp_content_type ==
+ new_state->hdcp_content_type)
+ return;
+ }
crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
new_state->crtc);
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.h b/drivers/gpu/drm/i915/display/intel_hdcp.h
index be8da85c866a..f3c3272e712a 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.h
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.h
@@ -15,13 +15,16 @@ struct drm_connector_state;
struct drm_i915_private;
struct intel_connector;
struct intel_hdcp_shim;
+enum port;
+enum transcoder;
void intel_hdcp_atomic_check(struct drm_connector *connector,
struct drm_connector_state *old_state,
struct drm_connector_state *new_state);
int intel_hdcp_init(struct intel_connector *connector,
const struct intel_hdcp_shim *hdcp_shim);
-int intel_hdcp_enable(struct intel_connector *connector);
+int intel_hdcp_enable(struct intel_connector *connector,
+ enum transcoder cpu_transcoder, u8 content_type);
int intel_hdcp_disable(struct intel_connector *connector);
bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port);
bool intel_hdcp_capable(struct intel_connector *connector);
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index 0ebec69bbbfc..93ac0f296852 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -45,17 +45,17 @@
#include "intel_audio.h"
#include "intel_connector.h"
#include "intel_ddi.h"
+#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dpio_phy.h"
-#include "intel_drv.h"
#include "intel_fifo_underrun.h"
#include "intel_gmbus.h"
#include "intel_hdcp.h"
#include "intel_hdmi.h"
#include "intel_hotplug.h"
#include "intel_lspcon.h"
-#include "intel_sdvo.h"
#include "intel_panel.h"
+#include "intel_sdvo.h"
#include "intel_sideband.h"
static struct drm_device *intel_hdmi_to_dev(struct intel_hdmi *intel_hdmi)
@@ -85,16 +85,17 @@ assert_hdmi_transcoder_func_disabled(struct drm_i915_private *dev_priv,
"HDMI transcoder function enabled, expecting disabled\n");
}
-struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
+struct intel_hdmi *enc_to_intel_hdmi(struct intel_encoder *encoder)
{
struct intel_digital_port *intel_dig_port =
- container_of(encoder, struct intel_digital_port, base.base);
+ container_of(&encoder->base, struct intel_digital_port,
+ base.base);
return &intel_dig_port->hdmi;
}
-static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector)
+static struct intel_hdmi *intel_attached_hdmi(struct intel_connector *connector)
{
- return enc_to_intel_hdmi(&intel_attached_encoder(connector)->base);
+ return enc_to_intel_hdmi(intel_attached_encoder(connector));
}
static u32 g4x_infoframe_index(unsigned int type)
@@ -189,13 +190,19 @@ hsw_dip_data_reg(struct drm_i915_private *dev_priv,
}
}
-static int hsw_dip_data_size(unsigned int type)
+static int hsw_dip_data_size(struct drm_i915_private *dev_priv,
+ unsigned int type)
{
switch (type) {
case DP_SDP_VSC:
return VIDEO_DIP_VSC_DATA_SIZE;
case DP_SDP_PPS:
return VIDEO_DIP_PPS_DATA_SIZE;
+ case HDMI_PACKET_TYPE_GAMUT_METADATA:
+ if (INTEL_GEN(dev_priv) >= 11)
+ return VIDEO_DIP_GMP_DATA_SIZE;
+ else
+ return VIDEO_DIP_DATA_SIZE;
default:
return VIDEO_DIP_DATA_SIZE;
}
@@ -279,7 +286,7 @@ static void ibx_write_infoframe(struct intel_encoder *encoder,
{
const u32 *data = frame;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
int i;
@@ -315,7 +322,7 @@ static void ibx_read_infoframe(struct intel_encoder *encoder,
void *frame, ssize_t len)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
u32 val, *data = frame;
int i;
@@ -334,7 +341,7 @@ static u32 ibx_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe;
+ enum pipe pipe = to_intel_crtc(pipe_config->uapi.crtc)->pipe;
i915_reg_t reg = TVIDEO_DIP_CTL(pipe);
u32 val = I915_READ(reg);
@@ -356,7 +363,7 @@ static void cpt_write_infoframe(struct intel_encoder *encoder,
{
const u32 *data = frame;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
int i;
@@ -395,7 +402,7 @@ static void cpt_read_infoframe(struct intel_encoder *encoder,
void *frame, ssize_t len)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
u32 val, *data = frame;
int i;
@@ -414,7 +421,7 @@ static u32 cpt_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe;
+ enum pipe pipe = to_intel_crtc(pipe_config->uapi.crtc)->pipe;
u32 val = I915_READ(TVIDEO_DIP_CTL(pipe));
if ((val & VIDEO_DIP_ENABLE) == 0)
@@ -432,7 +439,7 @@ static void vlv_write_infoframe(struct intel_encoder *encoder,
{
const u32 *data = frame;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
int i;
@@ -468,7 +475,7 @@ static void vlv_read_infoframe(struct intel_encoder *encoder,
void *frame, ssize_t len)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
u32 val, *data = frame;
int i;
@@ -487,7 +494,7 @@ static u32 vlv_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe;
+ enum pipe pipe = to_intel_crtc(pipe_config->uapi.crtc)->pipe;
u32 val = I915_READ(VLV_TVIDEO_DIP_CTL(pipe));
if ((val & VIDEO_DIP_ENABLE) == 0)
@@ -514,7 +521,9 @@ static void hsw_write_infoframe(struct intel_encoder *encoder,
int i;
u32 val = I915_READ(ctl_reg);
- data_size = hsw_dip_data_size(type);
+ data_size = hsw_dip_data_size(dev_priv, type);
+
+ WARN_ON(len > data_size);
val &= ~hsw_infoframe_enable(type);
I915_WRITE(ctl_reg, val);
@@ -594,7 +603,7 @@ u32 intel_hdmi_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
u32 val, ret = 0;
int i;
@@ -638,7 +647,7 @@ static void intel_write_infoframe(struct intel_encoder *encoder,
enum hdmi_infoframe_type type,
const union hdmi_infoframe *frame)
{
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
u8 buffer[VIDEO_DIP_DATA_SIZE];
ssize_t len;
@@ -667,7 +676,7 @@ void intel_read_infoframe(struct intel_encoder *encoder,
enum hdmi_infoframe_type type,
union hdmi_infoframe *frame)
{
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
u8 buffer[VIDEO_DIP_DATA_SIZE];
int ret;
@@ -700,7 +709,7 @@ intel_hdmi_compute_avi_infoframe(struct intel_encoder *encoder,
{
struct hdmi_avi_infoframe *frame = &crtc_state->infoframes.avi.avi;
const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
struct drm_connector *connector = conn_state->connector;
int ret;
@@ -724,11 +733,20 @@ intel_hdmi_compute_avi_infoframe(struct intel_encoder *encoder,
drm_hdmi_avi_infoframe_colorspace(frame, conn_state);
- drm_hdmi_avi_infoframe_quant_range(frame, connector,
- adjusted_mode,
- crtc_state->limited_color_range ?
- HDMI_QUANTIZATION_RANGE_LIMITED :
- HDMI_QUANTIZATION_RANGE_FULL);
+ /* nonsense combination */
+ WARN_ON(crtc_state->limited_color_range &&
+ crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
+
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB) {
+ drm_hdmi_avi_infoframe_quant_range(frame, connector,
+ adjusted_mode,
+ crtc_state->limited_color_range ?
+ HDMI_QUANTIZATION_RANGE_LIMITED :
+ HDMI_QUANTIZATION_RANGE_FULL);
+ } else {
+ frame->quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT;
+ frame->ycc_quantization_range = HDMI_YCC_QUANTIZATION_RANGE_LIMITED;
+ }
drm_hdmi_avi_infoframe_content_type(frame, conn_state);
@@ -787,7 +805,7 @@ intel_hdmi_compute_hdmi_infoframe(struct intel_encoder *encoder,
ret = drm_hdmi_vendor_infoframe_from_display_mode(frame,
conn_state->connector,
- &crtc_state->base.adjusted_mode);
+ &crtc_state->hw.adjusted_mode);
if (WARN_ON(ret))
return false;
@@ -838,7 +856,7 @@ static void g4x_set_infoframes(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
i915_reg_t reg = VIDEO_DIP_CTL;
u32 val = I915_READ(reg);
@@ -948,7 +966,7 @@ static bool intel_hdmi_set_gcp_infoframe(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
i915_reg_t reg;
if ((crtc_state->infoframes.enable &
@@ -973,7 +991,7 @@ void intel_hdmi_read_gcp_infoframe(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
i915_reg_t reg;
if ((crtc_state->infoframes.enable &
@@ -1010,7 +1028,7 @@ static void intel_hdmi_compute_gcp_infoframe(struct intel_encoder *encoder,
/* Enable default_phase whenever the display mode is suitably aligned */
if (gcp_default_phase_possible(crtc_state->pipe_bpp,
- &crtc_state->base.adjusted_mode))
+ &crtc_state->hw.adjusted_mode))
crtc_state->infoframes.gcp |= GCP_DEFAULT_PHASE_ENABLE;
}
@@ -1020,8 +1038,8 @@ static void ibx_set_infoframes(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
@@ -1079,8 +1097,8 @@ static void cpt_set_infoframes(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
@@ -1128,8 +1146,8 @@ static void vlv_set_infoframes(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
u32 port = VIDEO_DIP_PORT(encoder->port);
@@ -1491,7 +1509,10 @@ bool intel_hdmi_hdcp_check_link(struct intel_digital_port *intel_dig_port)
{
struct drm_i915_private *dev_priv =
intel_dig_port->base.base.dev->dev_private;
+ struct intel_connector *connector =
+ intel_dig_port->hdmi.attached_connector;
enum port port = intel_dig_port->base.port;
+ enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
int ret;
union {
u32 reg;
@@ -1502,41 +1523,31 @@ bool intel_hdmi_hdcp_check_link(struct intel_digital_port *intel_dig_port)
if (ret)
return false;
- I915_WRITE(PORT_HDCP_RPRIME(port), ri.reg);
+ I915_WRITE(HDCP_RPRIME(dev_priv, cpu_transcoder, port), ri.reg);
/* Wait for Ri prime match */
- if (wait_for(I915_READ(PORT_HDCP_STATUS(port)) &
+ if (wait_for(I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
(HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) {
DRM_ERROR("Ri' mismatch detected, link check failed (%x)\n",
- I915_READ(PORT_HDCP_STATUS(port)));
+ I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder,
+ port)));
return false;
}
return true;
}
-static struct hdcp2_hdmi_msg_data {
+struct hdcp2_hdmi_msg_timeout {
u8 msg_id;
- u32 timeout;
- u32 timeout2;
- } hdcp2_msg_data[] = {
- {HDCP_2_2_AKE_INIT, 0, 0},
- {HDCP_2_2_AKE_SEND_CERT, HDCP_2_2_CERT_TIMEOUT_MS, 0},
- {HDCP_2_2_AKE_NO_STORED_KM, 0, 0},
- {HDCP_2_2_AKE_STORED_KM, 0, 0},
- {HDCP_2_2_AKE_SEND_HPRIME, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS,
- HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS},
- {HDCP_2_2_AKE_SEND_PAIRING_INFO, HDCP_2_2_PAIRING_TIMEOUT_MS,
- 0},
- {HDCP_2_2_LC_INIT, 0, 0},
- {HDCP_2_2_LC_SEND_LPRIME, HDCP_2_2_HDMI_LPRIME_TIMEOUT_MS, 0},
- {HDCP_2_2_SKE_SEND_EKS, 0, 0},
- {HDCP_2_2_REP_SEND_RECVID_LIST,
- HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0},
- {HDCP_2_2_REP_SEND_ACK, 0, 0},
- {HDCP_2_2_REP_STREAM_MANAGE, 0, 0},
- {HDCP_2_2_REP_STREAM_READY, HDCP_2_2_STREAM_READY_TIMEOUT_MS,
- 0},
- };
+ u16 timeout;
+};
+
+static const struct hdcp2_hdmi_msg_timeout hdcp2_msg_timeout[] = {
+ { HDCP_2_2_AKE_SEND_CERT, HDCP_2_2_CERT_TIMEOUT_MS, },
+ { HDCP_2_2_AKE_SEND_PAIRING_INFO, HDCP_2_2_PAIRING_TIMEOUT_MS, },
+ { HDCP_2_2_LC_SEND_LPRIME, HDCP_2_2_HDMI_LPRIME_TIMEOUT_MS, },
+ { HDCP_2_2_REP_SEND_RECVID_LIST, HDCP_2_2_RECVID_LIST_TIMEOUT_MS, },
+ { HDCP_2_2_REP_STREAM_READY, HDCP_2_2_STREAM_READY_TIMEOUT_MS, },
+};
static
int intel_hdmi_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port,
@@ -1552,12 +1563,17 @@ static int get_hdcp2_msg_timeout(u8 msg_id, bool is_paired)
{
int i;
- for (i = 0; i < ARRAY_SIZE(hdcp2_msg_data); i++)
- if (hdcp2_msg_data[i].msg_id == msg_id &&
- (msg_id != HDCP_2_2_AKE_SEND_HPRIME || is_paired))
- return hdcp2_msg_data[i].timeout;
- else if (hdcp2_msg_data[i].msg_id == msg_id)
- return hdcp2_msg_data[i].timeout2;
+ if (msg_id == HDCP_2_2_AKE_SEND_HPRIME) {
+ if (is_paired)
+ return HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS;
+ else
+ return HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(hdcp2_msg_timeout); i++) {
+ if (hdcp2_msg_timeout[i].msg_id == msg_id)
+ return hdcp2_msg_timeout[i].timeout;
+ }
return -EINVAL;
}
@@ -1721,9 +1737,9 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder,
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
- const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
u32 hdmi_val;
intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
@@ -1759,7 +1775,7 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
intel_wakeref_t wakeref;
bool ret;
@@ -1778,7 +1794,7 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
static void intel_hdmi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
u32 tmp, flags = 0;
@@ -1814,7 +1830,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
tmp & HDMI_COLOR_RANGE_16_235)
pipe_config->limited_color_range = true;
- pipe_config->base.adjusted_mode.flags |= flags;
+ pipe_config->hw.adjusted_mode.flags |= flags;
if ((tmp & SDVO_COLOR_FORMAT_MASK) == HDMI_COLOR_FORMAT_12bpc)
dotclock = pipe_config->port_clock * 2 / 3;
@@ -1824,7 +1840,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
if (pipe_config->pixel_multiplier)
dotclock /= pipe_config->pixel_multiplier;
- pipe_config->base.adjusted_mode.crtc_clock = dotclock;
+ pipe_config->hw.adjusted_mode.crtc_clock = dotclock;
pipe_config->lane_count = 4;
@@ -1845,7 +1861,7 @@ static void intel_enable_hdmi_audio(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
WARN_ON(!pipe_config->has_hdmi_sink);
DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
@@ -1859,7 +1875,7 @@ static void g4x_enable_hdmi(struct intel_encoder *encoder,
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 temp;
temp = I915_READ(intel_hdmi->hdmi_reg);
@@ -1881,7 +1897,7 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder,
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 temp;
temp = I915_READ(intel_hdmi->hdmi_reg);
@@ -1931,8 +1947,8 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder,
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
enum pipe pipe = crtc->pipe;
u32 temp;
@@ -1992,10 +2008,10 @@ static void intel_disable_hdmi(struct intel_encoder *encoder,
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
struct intel_digital_port *intel_dig_port =
hdmi_to_dig_port(intel_hdmi);
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
u32 temp;
temp = I915_READ(intel_hdmi->hdmi_reg);
@@ -2145,7 +2161,7 @@ static enum drm_mode_status
intel_hdmi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct intel_hdmi *hdmi = intel_attached_hdmi(connector);
+ struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector));
struct drm_device *dev = intel_hdmi_to_dev(hdmi);
struct drm_i915_private *dev_priv = to_i915(dev);
enum drm_mode_status status;
@@ -2185,20 +2201,22 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
status = hdmi_port_clock_valid(hdmi, clock * 5 / 4,
true, force_dvi);
}
+ if (status != MODE_OK)
+ return status;
- return status;
+ return intel_mode_valid_max_plane_size(dev_priv, mode);
}
static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
int bpc)
{
struct drm_i915_private *dev_priv =
- to_i915(crtc_state->base.crtc->dev);
- struct drm_atomic_state *state = crtc_state->base.state;
+ to_i915(crtc_state->uapi.crtc->dev);
+ struct drm_atomic_state *state = crtc_state->uapi.state;
struct drm_connector_state *connector_state;
struct drm_connector *connector;
const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
int i;
if (HAS_GMCH(dev_priv))
@@ -2223,7 +2241,7 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
for_each_new_connector_in_state(state, connector, connector_state, i) {
const struct drm_display_info *info = &connector->display_info;
- if (connector_state->crtc != crtc_state->base.crtc)
+ if (connector_state->crtc != crtc_state->uapi.crtc)
continue;
if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) {
@@ -2262,22 +2280,15 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
static bool
intel_hdmi_ycbcr420_config(struct drm_connector *connector,
- struct intel_crtc_state *config,
- int *clock_12bpc, int *clock_10bpc,
- int *clock_8bpc)
+ struct intel_crtc_state *config)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(config->base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(config->uapi.crtc);
if (!connector->ycbcr_420_allowed) {
DRM_ERROR("Platform doesn't support YCBCR420 output\n");
return false;
}
- /* YCBCR420 TMDS rate requirement is half the pixel clock */
- config->port_clock /= 2;
- *clock_12bpc /= 2;
- *clock_10bpc /= 2;
- *clock_8bpc /= 2;
config->output_format = INTEL_OUTPUT_FORMAT_YCBCR420;
/* YCBCR 420 output conversion needs a scaler */
@@ -2292,22 +2303,117 @@ intel_hdmi_ycbcr420_config(struct drm_connector *connector,
return true;
}
+static int intel_hdmi_port_clock(int clock, int bpc)
+{
+ /*
+ * Need to adjust the port link by:
+ * 1.5x for 12bpc
+ * 1.25x for 10bpc
+ */
+ return clock * bpc / 8;
+}
+
+static int intel_hdmi_compute_bpc(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ int clock, bool force_dvi)
+{
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ int bpc;
+
+ for (bpc = 12; bpc >= 10; bpc -= 2) {
+ if (hdmi_deep_color_possible(crtc_state, bpc) &&
+ hdmi_port_clock_valid(intel_hdmi,
+ intel_hdmi_port_clock(clock, bpc),
+ true, force_dvi) == MODE_OK)
+ return bpc;
+ }
+
+ return 8;
+}
+
+static int intel_hdmi_compute_clock(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ bool force_dvi)
+{
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+ int bpc, clock = adjusted_mode->crtc_clock;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
+ clock *= 2;
+
+ /* YCBCR420 TMDS rate requirement is half the pixel clock */
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
+ clock /= 2;
+
+ bpc = intel_hdmi_compute_bpc(encoder, crtc_state,
+ clock, force_dvi);
+
+ crtc_state->port_clock = intel_hdmi_port_clock(clock, bpc);
+
+ /*
+ * pipe_bpp could already be below 8bpc due to
+ * FDI bandwidth constraints. We shouldn't bump it
+ * back up to 8bpc in that case.
+ */
+ if (crtc_state->pipe_bpp > bpc * 3)
+ crtc_state->pipe_bpp = bpc * 3;
+
+ DRM_DEBUG_KMS("picking %d bpc for HDMI output (pipe bpp: %d)\n",
+ bpc, crtc_state->pipe_bpp);
+
+ if (hdmi_port_clock_valid(intel_hdmi, crtc_state->port_clock,
+ false, force_dvi) != MODE_OK) {
+ DRM_DEBUG_KMS("unsupported HDMI clock (%d kHz), rejecting mode\n",
+ crtc_state->port_clock);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static bool intel_hdmi_limited_color_range(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ const struct intel_digital_connector_state *intel_conn_state =
+ to_intel_digital_connector_state(conn_state);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+
+ /*
+ * Our YCbCr output is always limited range.
+ * crtc_state->limited_color_range only applies to RGB,
+ * and it must never be set for YCbCr or we risk setting
+ * some conflicting bits in PIPECONF which will mess up
+ * the colors on the monitor.
+ */
+ if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
+ return false;
+
+ if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
+ /* See CEA-861-E - 5.1 Default Encoding Parameters */
+ return crtc_state->has_hdmi_sink &&
+ drm_default_rgb_quant_range(adjusted_mode) ==
+ HDMI_QUANTIZATION_RANGE_LIMITED;
+ } else {
+ return intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_LIMITED;
+ }
+}
+
int intel_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
struct drm_connector *connector = conn_state->connector;
struct drm_scdc *scdc = &connector->display_info.hdmi.scdc;
struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(conn_state);
- int clock_8bpc = pipe_config->base.adjusted_mode.crtc_clock;
- int clock_10bpc = clock_8bpc * 5 / 4;
- int clock_12bpc = clock_8bpc * 3 / 2;
- int desired_bpp;
bool force_dvi = intel_conn_state->force_audio == HDMI_AUDIO_OFF_DVI;
+ int ret;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
@@ -2318,33 +2424,19 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
if (pipe_config->has_hdmi_sink)
pipe_config->has_infoframe = true;
- if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
- /* See CEA-861-E - 5.1 Default Encoding Parameters */
- pipe_config->limited_color_range =
- pipe_config->has_hdmi_sink &&
- drm_default_rgb_quant_range(adjusted_mode) ==
- HDMI_QUANTIZATION_RANGE_LIMITED;
- } else {
- pipe_config->limited_color_range =
- intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_LIMITED;
- }
-
- if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) {
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
pipe_config->pixel_multiplier = 2;
- clock_8bpc *= 2;
- clock_10bpc *= 2;
- clock_12bpc *= 2;
- }
if (drm_mode_is_420_only(&connector->display_info, adjusted_mode)) {
- if (!intel_hdmi_ycbcr420_config(connector, pipe_config,
- &clock_12bpc, &clock_10bpc,
- &clock_8bpc)) {
+ if (!intel_hdmi_ycbcr420_config(connector, pipe_config)) {
DRM_ERROR("Can't support YCBCR420 output\n");
return -EINVAL;
}
}
+ pipe_config->limited_color_range =
+ intel_hdmi_limited_color_range(pipe_config, conn_state);
+
if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv))
pipe_config->has_pch_encoder = true;
@@ -2356,46 +2448,13 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
intel_conn_state->force_audio == HDMI_AUDIO_ON;
}
- /*
- * Note that g4x/vlv don't support 12bpc hdmi outputs. We also need
- * to check that the higher clock still fits within limits.
- */
- if (hdmi_deep_color_possible(pipe_config, 12) &&
- hdmi_port_clock_valid(intel_hdmi, clock_12bpc,
- true, force_dvi) == MODE_OK) {
- DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
- desired_bpp = 12*3;
-
- /* Need to adjust the port link by 1.5x for 12bpc. */
- pipe_config->port_clock = clock_12bpc;
- } else if (hdmi_deep_color_possible(pipe_config, 10) &&
- hdmi_port_clock_valid(intel_hdmi, clock_10bpc,
- true, force_dvi) == MODE_OK) {
- DRM_DEBUG_KMS("picking bpc to 10 for HDMI output\n");
- desired_bpp = 10 * 3;
-
- /* Need to adjust the port link by 1.25x for 10bpc. */
- pipe_config->port_clock = clock_10bpc;
- } else {
- DRM_DEBUG_KMS("picking bpc to 8 for HDMI output\n");
- desired_bpp = 8*3;
-
- pipe_config->port_clock = clock_8bpc;
- }
-
- if (!pipe_config->bw_constrained) {
- DRM_DEBUG_KMS("forcing pipe bpp to %i for HDMI\n", desired_bpp);
- pipe_config->pipe_bpp = desired_bpp;
- }
-
- if (hdmi_port_clock_valid(intel_hdmi, pipe_config->port_clock,
- false, force_dvi) != MODE_OK) {
- DRM_DEBUG_KMS("unsupported HDMI clock, rejecting mode\n");
- return -EINVAL;
- }
+ ret = intel_hdmi_compute_clock(encoder, pipe_config, force_dvi);
+ if (ret)
+ return ret;
- /* Set user selected PAR to incoming mode's member */
- adjusted_mode->picture_aspect_ratio = conn_state->picture_aspect_ratio;
+ if (conn_state->picture_aspect_ratio)
+ adjusted_mode->picture_aspect_ratio =
+ conn_state->picture_aspect_ratio;
pipe_config->lane_count = 4;
@@ -2438,7 +2497,7 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
static void
intel_hdmi_unset_edid(struct drm_connector *connector)
{
- struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+ struct intel_hdmi *intel_hdmi = intel_attached_hdmi(to_intel_connector(connector));
intel_hdmi->has_hdmi_sink = false;
intel_hdmi->has_audio = false;
@@ -2454,7 +2513,7 @@ static void
intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector, bool has_edid)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
- struct intel_hdmi *hdmi = intel_attached_hdmi(connector);
+ struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector));
enum port port = hdmi_to_dig_port(hdmi)->base.port;
struct i2c_adapter *adapter =
intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus);
@@ -2501,7 +2560,7 @@ static bool
intel_hdmi_set_edid(struct drm_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
- struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+ struct intel_hdmi *intel_hdmi = intel_attached_hdmi(to_intel_connector(connector));
intel_wakeref_t wakeref;
struct edid *edid;
bool connected = false;
@@ -2542,7 +2601,7 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
{
enum drm_connector_status status = connector_status_disconnected;
struct drm_i915_private *dev_priv = to_i915(connector->dev);
- struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+ struct intel_hdmi *intel_hdmi = intel_attached_hdmi(to_intel_connector(connector));
struct intel_encoder *encoder = &hdmi_to_dig_port(intel_hdmi)->base;
intel_wakeref_t wakeref;
@@ -2566,6 +2625,12 @@ out:
if (status != connector_status_connected)
cec_notifier_phys_addr_invalidate(intel_hdmi->cec_notifier);
+ /*
+ * Make sure the refs for power wells enabled during detect are
+ * dropped to avoid a new detect cycle triggered by HPD polling.
+ */
+ intel_display_power_flush_work(dev_priv);
+
return status;
}
@@ -2599,7 +2664,7 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct intel_digital_port *intel_dig_port =
- enc_to_dig_port(&encoder->base);
+ enc_to_dig_port(encoder);
intel_hdmi_prepare(encoder, pipe_config);
@@ -2612,7 +2677,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *dport = enc_to_dig_port(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
vlv_phy_pre_encoder_enable(encoder, pipe_config);
@@ -2682,7 +2747,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *dport = enc_to_dig_port(encoder);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -2708,7 +2773,7 @@ static struct i2c_adapter *
intel_hdmi_get_i2c_adapter(struct drm_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
- struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+ struct intel_hdmi *intel_hdmi = intel_attached_hdmi(to_intel_connector(connector));
return intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus);
}
@@ -2752,8 +2817,9 @@ intel_hdmi_connector_register(struct drm_connector *connector)
static void intel_hdmi_destroy(struct drm_connector *connector)
{
- if (intel_attached_hdmi(connector)->cec_notifier)
- cec_notifier_put(intel_attached_hdmi(connector)->cec_notifier);
+ struct cec_notifier *n = intel_attached_hdmi(to_intel_connector(connector))->cec_notifier;
+
+ cec_notifier_conn_unregister(n);
intel_connector_destroy(connector);
}
@@ -2808,7 +2874,6 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
intel_attach_colorspace_property(connector);
drm_connector_attach_content_type_property(connector);
- connector->state->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
drm_object_attach_property(&connector->base,
@@ -2842,7 +2907,7 @@ bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
bool scrambling)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
struct drm_scrambling *sink_scrambling =
&connector->display_info.hdmi.scdc.scrambling;
struct i2c_adapter *adapter =
@@ -2930,51 +2995,34 @@ static u8 cnp_port_to_ddc_pin(struct drm_i915_private *dev_priv,
static u8 icl_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
{
- u8 ddc_pin;
+ enum phy phy = intel_port_to_phy(dev_priv, port);
- switch (port) {
- case PORT_A:
- ddc_pin = GMBUS_PIN_1_BXT;
- break;
- case PORT_B:
- ddc_pin = GMBUS_PIN_2_BXT;
- break;
- case PORT_C:
- ddc_pin = GMBUS_PIN_9_TC1_ICP;
- break;
- case PORT_D:
- ddc_pin = GMBUS_PIN_10_TC2_ICP;
- break;
- case PORT_E:
- ddc_pin = GMBUS_PIN_11_TC3_ICP;
- break;
- case PORT_F:
- ddc_pin = GMBUS_PIN_12_TC4_ICP;
- break;
- default:
- MISSING_CASE(port);
- ddc_pin = GMBUS_PIN_2_BXT;
- break;
- }
- return ddc_pin;
+ if (intel_phy_is_combo(dev_priv, phy))
+ return GMBUS_PIN_1_BXT + port;
+ else if (intel_phy_is_tc(dev_priv, phy))
+ return GMBUS_PIN_9_TC1_ICP + intel_port_to_tc(dev_priv, port);
+
+ WARN(1, "Unknown port:%c\n", port_name(port));
+ return GMBUS_PIN_2_BXT;
}
static u8 mcc_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
{
+ enum phy phy = intel_port_to_phy(dev_priv, port);
u8 ddc_pin;
- switch (port) {
- case PORT_A:
+ switch (phy) {
+ case PHY_A:
ddc_pin = GMBUS_PIN_1_BXT;
break;
- case PORT_B:
+ case PHY_B:
ddc_pin = GMBUS_PIN_2_BXT;
break;
- case PORT_C:
+ case PHY_C:
ddc_pin = GMBUS_PIN_9_TC1_ICP;
break;
default:
- MISSING_CASE(port);
+ MISSING_CASE(phy);
ddc_pin = GMBUS_PIN_1_BXT;
break;
}
@@ -3019,7 +3067,7 @@ static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv,
if (HAS_PCH_MCC(dev_priv))
ddc_pin = mcc_port_to_ddc_pin(dev_priv, port);
- else if (HAS_PCH_ICP(dev_priv))
+ else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
ddc_pin = icl_port_to_ddc_pin(dev_priv, port);
else if (HAS_PCH_CNP(dev_priv))
ddc_pin = cnp_port_to_ddc_pin(dev_priv, port);
@@ -3084,18 +3132,29 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i2c_adapter *ddc;
enum port port = intel_encoder->port;
+ struct cec_connector_info conn_info;
+
+ DRM_DEBUG_KMS("Adding HDMI connector on [ENCODER:%d:%s]\n",
+ intel_encoder->base.base.id, intel_encoder->base.name);
- DRM_DEBUG_KMS("Adding HDMI connector on port %c\n",
- port_name(port));
+ if (INTEL_GEN(dev_priv) < 12 && WARN_ON(port == PORT_A))
+ return;
if (WARN(intel_dig_port->max_lanes < 4,
- "Not enough lanes (%d) for HDMI on port %c\n",
- intel_dig_port->max_lanes, port_name(port)))
+ "Not enough lanes (%d) for HDMI on [ENCODER:%d:%s]\n",
+ intel_dig_port->max_lanes, intel_encoder->base.base.id,
+ intel_encoder->base.name))
return;
- drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
- DRM_MODE_CONNECTOR_HDMIA);
+ intel_hdmi->ddc_bus = intel_hdmi_ddc_pin(dev_priv, port);
+ ddc = intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus);
+
+ drm_connector_init_with_ddc(dev, connector,
+ &intel_hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA,
+ ddc);
drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
connector->interlace_allowed = 1;
@@ -3105,10 +3164,6 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
connector->ycbcr_420_allowed = true;
- intel_hdmi->ddc_bus = intel_hdmi_ddc_pin(dev_priv, port);
-
- if (WARN_ON(port == PORT_A))
- return;
intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
if (HAS_DDI(dev_priv))
@@ -3137,12 +3192,41 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
}
- intel_hdmi->cec_notifier = cec_notifier_get_conn(dev->dev,
- port_identifier(port));
+ cec_fill_conn_info_from_drm(&conn_info, connector);
+
+ intel_hdmi->cec_notifier =
+ cec_notifier_conn_register(dev->dev, port_identifier(port),
+ &conn_info);
if (!intel_hdmi->cec_notifier)
DRM_DEBUG_KMS("CEC notifier get failed\n");
}
+static enum intel_hotplug_state
+intel_hdmi_hotplug(struct intel_encoder *encoder,
+ struct intel_connector *connector, bool irq_received)
+{
+ enum intel_hotplug_state state;
+
+ state = intel_encoder_hotplug(encoder, connector, irq_received);
+
+ /*
+ * On many platforms the HDMI live state signal is known to be
+ * unreliable, so we can't use it to detect if a sink is connected or
+ * not. Instead we detect if it's connected based on whether we can
+ * read the EDID or not. That in turn has a problem during disconnect,
+ * since the HPD interrupt may be raised before the DDC lines get
+ * disconnected (due to how the required length of DDC vs. HPD
+ * connector pins are specified) and so we'll still be able to get a
+ * valid EDID. To solve this schedule another detection cycle if this
+ * time around we didn't detect any change in the sink's connection
+ * status.
+ */
+ if (state == INTEL_HOTPLUG_UNCHANGED && irq_received)
+ state = INTEL_HOTPLUG_RETRY;
+
+ return state;
+}
+
void intel_hdmi_init(struct drm_i915_private *dev_priv,
i915_reg_t hdmi_reg, enum port port)
{
@@ -3166,7 +3250,7 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv,
&intel_hdmi_enc_funcs, DRM_MODE_ENCODER_TMDS,
"HDMI %c", port_name(port));
- intel_encoder->hotplug = intel_encoder_hotplug;
+ intel_encoder->hotplug = intel_hdmi_hotplug;
intel_encoder->compute_config = intel_hdmi_compute_config;
if (HAS_PCH_SPLIT(dev_priv)) {
intel_encoder->disable = pch_disable_hdmi;
@@ -3202,11 +3286,11 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv,
intel_encoder->port = port;
if (IS_CHERRYVIEW(dev_priv)) {
if (port == PORT_D)
- intel_encoder->crtc_mask = 1 << 2;
+ intel_encoder->pipe_mask = BIT(PIPE_C);
else
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+ intel_encoder->pipe_mask = BIT(PIPE_A) | BIT(PIPE_B);
} else {
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ intel_encoder->pipe_mask = ~0;
}
intel_encoder->cloneable = 1 << INTEL_OUTPUT_ANALOG;
/*
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.h b/drivers/gpu/drm/i915/display/intel_hdmi.h
index 106c2e0bc3c9..d3659d0b408b 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.h
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.h
@@ -23,12 +23,13 @@ struct intel_crtc_state;
struct intel_hdmi;
struct drm_connector_state;
union hdmi_infoframe;
+enum port;
void intel_hdmi_init(struct drm_i915_private *dev_priv, i915_reg_t hdmi_reg,
enum port port);
void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector);
-struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
+struct intel_hdmi *enc_to_intel_hdmi(struct intel_encoder *encoder);
int intel_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state);
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
index ea3de4acc850..99d3a3c7989e 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -26,7 +26,7 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_hotplug.h"
/**
@@ -104,6 +104,12 @@ enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
if (IS_CNL_WITH_PORT_F(dev_priv))
return HPD_PORT_E;
return HPD_PORT_F;
+ case PORT_G:
+ return HPD_PORT_G;
+ case PORT_H:
+ return HPD_PORT_H;
+ case PORT_I:
+ return HPD_PORT_I;
default:
MISSING_CASE(port);
return HPD_NONE;
@@ -112,6 +118,7 @@ enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
#define HPD_STORM_DETECT_PERIOD 1000
#define HPD_STORM_REENABLE_DELAY (2 * 60 * 1000)
+#define HPD_RETRY_DELAY 1000
/**
* intel_hpd_irq_storm_detect - gather stats and detect HPD IRQ storm on a pin
@@ -266,8 +273,10 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
}
-bool intel_encoder_hotplug(struct intel_encoder *encoder,
- struct intel_connector *connector)
+enum intel_hotplug_state
+intel_encoder_hotplug(struct intel_encoder *encoder,
+ struct intel_connector *connector,
+ bool irq_received)
{
struct drm_device *dev = connector->base.dev;
enum drm_connector_status old_status;
@@ -279,7 +288,7 @@ bool intel_encoder_hotplug(struct intel_encoder *encoder,
drm_helper_probe_detect(&connector->base, NULL, false);
if (old_status == connector->base.status)
- return false;
+ return INTEL_HOTPLUG_UNCHANGED;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
connector->base.base.id,
@@ -287,13 +296,13 @@ bool intel_encoder_hotplug(struct intel_encoder *encoder,
drm_get_connector_status_name(old_status),
drm_get_connector_status_name(connector->base.status));
- return true;
+ return INTEL_HOTPLUG_CHANGED;
}
static bool intel_encoder_has_hpd_pulse(struct intel_encoder *encoder)
{
return intel_encoder_is_dig_port(encoder) &&
- enc_to_dig_port(&encoder->base)->hpd_pulse != NULL;
+ enc_to_dig_port(encoder)->hpd_pulse != NULL;
}
static void i915_digport_work_func(struct work_struct *work)
@@ -326,7 +335,7 @@ static void i915_digport_work_func(struct work_struct *work)
if (!long_hpd && !short_hpd)
continue;
- dig_port = enc_to_dig_port(&encoder->base);
+ dig_port = enc_to_dig_port(encoder);
ret = dig_port->hpd_pulse(dig_port, long_hpd);
if (ret == IRQ_NONE) {
@@ -339,7 +348,7 @@ static void i915_digport_work_func(struct work_struct *work)
spin_lock_irq(&dev_priv->irq_lock);
dev_priv->hotplug.event_bits |= old_bits;
spin_unlock_irq(&dev_priv->irq_lock);
- schedule_work(&dev_priv->hotplug.hotplug_work);
+ queue_delayed_work(system_wq, &dev_priv->hotplug.hotplug_work, 0);
}
}
@@ -349,14 +358,16 @@ static void i915_digport_work_func(struct work_struct *work)
static void i915_hotplug_work_func(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
- container_of(work, struct drm_i915_private, hotplug.hotplug_work);
+ container_of(work, struct drm_i915_private,
+ hotplug.hotplug_work.work);
struct drm_device *dev = &dev_priv->drm;
struct intel_connector *intel_connector;
struct intel_encoder *intel_encoder;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
- bool changed = false;
+ u32 changed = 0, retry = 0;
u32 hpd_event_bits;
+ u32 hpd_retry_bits;
mutex_lock(&dev->mode_config.mutex);
DRM_DEBUG_KMS("running encoder hotplug functions\n");
@@ -365,6 +376,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
hpd_event_bits = dev_priv->hotplug.event_bits;
dev_priv->hotplug.event_bits = 0;
+ hpd_retry_bits = dev_priv->hotplug.retry_bits;
+ dev_priv->hotplug.retry_bits = 0;
/* Enable polling for connectors which had HPD IRQ storms */
intel_hpd_irq_storm_switch_to_polling(dev_priv);
@@ -373,16 +386,29 @@ static void i915_hotplug_work_func(struct work_struct *work)
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
+ u32 hpd_bit;
+
intel_connector = to_intel_connector(connector);
if (!intel_connector->encoder)
continue;
intel_encoder = intel_connector->encoder;
- if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
+ hpd_bit = BIT(intel_encoder->hpd_pin);
+ if ((hpd_event_bits | hpd_retry_bits) & hpd_bit) {
DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
connector->name, intel_encoder->hpd_pin);
- changed |= intel_encoder->hotplug(intel_encoder,
- intel_connector);
+ switch (intel_encoder->hotplug(intel_encoder,
+ intel_connector,
+ hpd_event_bits & hpd_bit)) {
+ case INTEL_HOTPLUG_UNCHANGED:
+ break;
+ case INTEL_HOTPLUG_CHANGED:
+ changed |= hpd_bit;
+ break;
+ case INTEL_HOTPLUG_RETRY:
+ retry |= hpd_bit;
+ break;
+ }
}
}
drm_connector_list_iter_end(&conn_iter);
@@ -390,6 +416,17 @@ static void i915_hotplug_work_func(struct work_struct *work)
if (changed)
drm_kms_helper_hotplug_event(dev);
+
+ /* Remove shared HPD pins that have changed */
+ retry &= ~changed;
+ if (retry) {
+ spin_lock_irq(&dev_priv->irq_lock);
+ dev_priv->hotplug.retry_bits |= retry;
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ mod_delayed_work(system_wq, &dev_priv->hotplug.hotplug_work,
+ msecs_to_jiffies(HPD_RETRY_DELAY));
+ }
}
@@ -444,7 +481,8 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
long_hpd = long_mask & BIT(pin);
- DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", port_name(port),
+ DRM_DEBUG_DRIVER("digital hpd on [ENCODER:%d:%s] - %s\n",
+ encoder->base.base.id, encoder->base.name,
long_hpd ? "long" : "short");
queue_dig = true;
@@ -516,7 +554,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
if (queue_dig)
queue_work(dev_priv->hotplug.dp_wq, &dev_priv->hotplug.dig_port_work);
if (queue_hp)
- schedule_work(&dev_priv->hotplug.hotplug_work);
+ queue_delayed_work(system_wq, &dev_priv->hotplug.hotplug_work, 0);
}
/**
@@ -636,7 +674,8 @@ void intel_hpd_poll_init(struct drm_i915_private *dev_priv)
void intel_hpd_init_work(struct drm_i915_private *dev_priv)
{
- INIT_WORK(&dev_priv->hotplug.hotplug_work, i915_hotplug_work_func);
+ INIT_DELAYED_WORK(&dev_priv->hotplug.hotplug_work,
+ i915_hotplug_work_func);
INIT_WORK(&dev_priv->hotplug.dig_port_work, i915_digport_work_func);
INIT_WORK(&dev_priv->hotplug.poll_init_work, i915_hpd_poll_init_work);
INIT_DELAYED_WORK(&dev_priv->hotplug.reenable_work,
@@ -650,11 +689,12 @@ void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
dev_priv->hotplug.long_port_mask = 0;
dev_priv->hotplug.short_port_mask = 0;
dev_priv->hotplug.event_bits = 0;
+ dev_priv->hotplug.retry_bits = 0;
spin_unlock_irq(&dev_priv->irq_lock);
cancel_work_sync(&dev_priv->hotplug.dig_port_work);
- cancel_work_sync(&dev_priv->hotplug.hotplug_work);
+ cancel_delayed_work_sync(&dev_priv->hotplug.hotplug_work);
cancel_work_sync(&dev_priv->hotplug.poll_init_work);
cancel_delayed_work_sync(&dev_priv->hotplug.reenable_work);
}
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.h b/drivers/gpu/drm/i915/display/intel_hotplug.h
index 805f897dbb7a..087b5f57b321 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.h
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.h
@@ -13,10 +13,12 @@
struct drm_i915_private;
struct intel_connector;
struct intel_encoder;
+enum port;
void intel_hpd_poll_init(struct drm_i915_private *dev_priv);
-bool intel_encoder_hotplug(struct intel_encoder *encoder,
- struct intel_connector *connector);
+enum intel_hotplug_state intel_encoder_hotplug(struct intel_encoder *encoder,
+ struct intel_connector *connector,
+ bool irq_received);
void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
u32 pin_mask, u32 long_mask);
void intel_hpd_init(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.c b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
index b19800b58442..0b67f7887cd0 100644
--- a/drivers/gpu/drm/i915/display/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
@@ -114,7 +114,7 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
pinfo.size_data = sizeof(*pdata);
pinfo.dma_mask = DMA_BIT_MASK(32);
- pdata->num_pipes = INTEL_INFO(dev_priv)->num_pipes;
+ pdata->num_pipes = INTEL_NUM_PIPES(dev_priv);
pdata->num_ports = IS_CHERRYVIEW(dev_priv) ? 3 : 2; /* B,C,D or B,C */
pdata->port[0].pipe = -1;
pdata->port[1].pipe = -1;
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c
index 7028d0cf3bb1..d807c5648c87 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.c
@@ -27,8 +27,8 @@
#include <drm/drm_dp_dual_mode_helper.h>
#include <drm/drm_edid.h>
+#include "intel_display_types.h"
#include "intel_dp.h"
-#include "intel_drv.h"
#include "intel_lspcon.h"
/* LSPCON OUI Vendor ID(signatures) */
@@ -189,7 +189,7 @@ void lspcon_ycbcr420_config(struct drm_connector *connector,
{
const struct drm_display_info *info = &connector->display_info;
const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
if (drm_mode_is_420_only(info, adjusted_mode) &&
connector->ycbcr_420_allowed) {
@@ -434,8 +434,8 @@ void lspcon_write_infoframe(struct intel_encoder *encoder,
const void *frame, ssize_t len)
{
bool ret;
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- struct intel_lspcon *lspcon = enc_to_intel_lspcon(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder);
/* LSPCON only needs AVI IF */
if (type != HDMI_INFOFRAME_TYPE_AVI)
@@ -472,10 +472,10 @@ void lspcon_set_infoframes(struct intel_encoder *encoder,
ssize_t ret;
union hdmi_infoframe frame;
u8 buf[VIDEO_DIP_DATA_SIZE];
- struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_lspcon *lspcon = &dig_port->lspcon;
const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
if (!lspcon->active) {
DRM_ERROR("Writing infoframes while LSPCON disabled ?\n");
@@ -522,7 +522,7 @@ u32 lspcon_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
/* FIXME actually read this from the hw */
- return enc_to_intel_lspcon(&encoder->base)->active;
+ return enc_to_intel_lspcon(encoder)->active;
}
void lspcon_resume(struct intel_lspcon *lspcon)
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index efefed62a7f8..10696bb99dcf 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -42,7 +42,7 @@
#include "i915_drv.h"
#include "intel_atomic.h"
#include "intel_connector.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_gmbus.h"
#include "intel_lvds.h"
#include "intel_panel.h"
@@ -135,7 +135,7 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
else
flags |= DRM_MODE_FLAG_PVSYNC;
- pipe_config->base.adjusted_mode.flags |= flags;
+ pipe_config->hw.adjusted_mode.flags |= flags;
if (INTEL_GEN(dev_priv) < 5)
pipe_config->gmch_pfit.lvds_border_bits =
@@ -148,7 +148,7 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
}
- pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
+ pipe_config->hw.adjusted_mode.crtc_clock = pipe_config->port_clock;
}
static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
@@ -230,9 +230,9 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder,
{
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
- const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
- int pipe = crtc->pipe;
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
+ enum pipe pipe = crtc->pipe;
u32 temp;
if (HAS_PCH_SPLIT(dev_priv)) {
@@ -318,8 +318,7 @@ static void intel_enable_lvds(struct intel_encoder *encoder,
I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) | PANEL_POWER_ON);
POSTING_READ(lvds_encoder->reg);
- if (intel_wait_for_register(&dev_priv->uncore,
- PP_STATUS(0), PP_ON, PP_ON, 5000))
+ if (intel_de_wait_for_set(dev_priv, PP_STATUS(0), PP_ON, 5000))
DRM_ERROR("timed out waiting for panel to power on\n");
intel_panel_enable_backlight(pipe_config, conn_state);
@@ -333,8 +332,7 @@ static void intel_disable_lvds(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) & ~PANEL_POWER_ON);
- if (intel_wait_for_register(&dev_priv->uncore,
- PP_STATUS(0), PP_ON, 0, 1000))
+ if (intel_de_wait_for_clear(dev_priv, PP_STATUS(0), PP_ON, 1000))
DRM_ERROR("timed out waiting for panel to power off\n");
I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) & ~LVDS_PORT_EN);
@@ -394,8 +392,8 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder,
to_lvds_encoder(&intel_encoder->base);
struct intel_connector *intel_connector =
lvds_encoder->attached_connector;
- struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
- struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
+ struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
+ struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
unsigned int lvds_bpp;
/* Should never happen!! */
@@ -901,12 +899,10 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
intel_encoder->power_domain = POWER_DOMAIN_PORT_OTHER;
intel_encoder->port = PORT_NONE;
intel_encoder->cloneable = 0;
- if (HAS_PCH_SPLIT(dev_priv))
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
- else if (IS_GEN(dev_priv, 4))
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+ if (INTEL_GEN(dev_priv) < 4)
+ intel_encoder->pipe_mask = BIT(PIPE_B);
else
- intel_encoder->crtc_mask = (1 << 1);
+ intel_encoder->pipe_mask = ~0;
drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c
index 824881271351..e59b4992ba1b 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.c
+++ b/drivers/gpu/drm/i915/display/intel_opregion.c
@@ -35,7 +35,7 @@
#include "display/intel_panel.h"
#include "i915_drv.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_opregion.h"
#define OPREGION_HEADER_OFFSET 0
@@ -941,6 +941,13 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
if (mboxes & MBOX_ACPI) {
DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
opregion->acpi = base + OPREGION_ACPI_OFFSET;
+ /*
+ * Indicate we handle monitor hotplug events ourselves so we do
+ * not need ACPI notifications for them. Disabling these avoids
+ * triggering the AML code doing the notifation, which may be
+ * broken as Windows also seems to disable these.
+ */
+ opregion->acpi->chpd = 1;
}
if (mboxes & MBOX_SWSCI) {
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index 21339b7f6a3e..e40c3a0e2cd7 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -30,10 +30,11 @@
#include <drm/i915_drm.h>
#include "gem/i915_gem_pm.h"
+#include "gt/intel_ring.h"
#include "i915_drv.h"
#include "i915_reg.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_frontbuffer.h"
#include "intel_overlay.h"
@@ -175,6 +176,7 @@ struct overlay_registers {
struct intel_overlay {
struct drm_i915_private *i915;
+ struct intel_context *context;
struct intel_crtc *crtc;
struct i915_vma *vma;
struct i915_vma *old_vma;
@@ -190,7 +192,8 @@ struct intel_overlay {
struct overlay_registers __iomem *regs;
u32 flip_addr;
/* flip handling */
- struct i915_active_request last_flip;
+ struct i915_active last_flip;
+ void (*flip_complete)(struct intel_overlay *ovl);
};
static void i830_overlay_clock_gating(struct drm_i915_private *dev_priv,
@@ -216,32 +219,25 @@ static void i830_overlay_clock_gating(struct drm_i915_private *dev_priv,
PCI_DEVFN(0, 0), I830_CLOCK_GATE, val);
}
-static void intel_overlay_submit_request(struct intel_overlay *overlay,
- struct i915_request *rq,
- i915_active_retire_fn retire)
+static struct i915_request *
+alloc_request(struct intel_overlay *overlay, void (*fn)(struct intel_overlay *))
{
- GEM_BUG_ON(i915_active_request_peek(&overlay->last_flip,
- &overlay->i915->drm.struct_mutex));
- i915_active_request_set_retire_fn(&overlay->last_flip, retire,
- &overlay->i915->drm.struct_mutex);
- __i915_active_request_set(&overlay->last_flip, rq);
- i915_request_add(rq);
-}
+ struct i915_request *rq;
+ int err;
-static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
- struct i915_request *rq,
- i915_active_retire_fn retire)
-{
- intel_overlay_submit_request(overlay, rq, retire);
- return i915_active_request_retire(&overlay->last_flip,
- &overlay->i915->drm.struct_mutex);
-}
+ overlay->flip_complete = fn;
-static struct i915_request *alloc_request(struct intel_overlay *overlay)
-{
- struct intel_engine_cs *engine = overlay->i915->engine[RCS0];
+ rq = i915_request_create(overlay->context);
+ if (IS_ERR(rq))
+ return rq;
- return i915_request_create(engine->kernel_context);
+ err = i915_active_add_request(&overlay->last_flip, rq);
+ if (err) {
+ i915_request_add(rq);
+ return ERR_PTR(err);
+ }
+
+ return rq;
}
/* overlay needs to be disable in OCMD reg */
@@ -253,7 +249,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
WARN_ON(overlay->active);
- rq = alloc_request(overlay);
+ rq = alloc_request(overlay, NULL);
if (IS_ERR(rq))
return PTR_ERR(rq);
@@ -274,19 +270,30 @@ static int intel_overlay_on(struct intel_overlay *overlay)
*cs++ = MI_NOOP;
intel_ring_advance(rq, cs);
- return intel_overlay_do_wait_request(overlay, rq, NULL);
+ i915_request_add(rq);
+
+ return i915_active_wait(&overlay->last_flip);
}
static void intel_overlay_flip_prepare(struct intel_overlay *overlay,
struct i915_vma *vma)
{
enum pipe pipe = overlay->crtc->pipe;
+ struct intel_frontbuffer *from = NULL, *to = NULL;
WARN_ON(overlay->old_vma);
- i915_gem_track_fb(overlay->vma ? overlay->vma->obj : NULL,
- vma ? vma->obj : NULL,
- INTEL_FRONTBUFFER_OVERLAY(pipe));
+ if (overlay->vma)
+ from = intel_frontbuffer_get(overlay->vma->obj);
+ if (vma)
+ to = intel_frontbuffer_get(vma->obj);
+
+ intel_frontbuffer_track(from, to, INTEL_FRONTBUFFER_OVERLAY(pipe));
+
+ if (to)
+ intel_frontbuffer_put(to);
+ if (from)
+ intel_frontbuffer_put(from);
intel_frontbuffer_flip_prepare(overlay->i915,
INTEL_FRONTBUFFER_OVERLAY(pipe));
@@ -318,7 +325,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
if (tmp & (1 << 17))
DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
- rq = alloc_request(overlay);
+ rq = alloc_request(overlay, NULL);
if (IS_ERR(rq))
return PTR_ERR(rq);
@@ -333,8 +340,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
intel_ring_advance(rq, cs);
intel_overlay_flip_prepare(overlay, vma);
-
- intel_overlay_submit_request(overlay, rq, NULL);
+ i915_request_add(rq);
return 0;
}
@@ -355,20 +361,13 @@ static void intel_overlay_release_old_vma(struct intel_overlay *overlay)
}
static void
-intel_overlay_release_old_vid_tail(struct i915_active_request *active,
- struct i915_request *rq)
+intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
{
- struct intel_overlay *overlay =
- container_of(active, typeof(*overlay), last_flip);
-
intel_overlay_release_old_vma(overlay);
}
-static void intel_overlay_off_tail(struct i915_active_request *active,
- struct i915_request *rq)
+static void intel_overlay_off_tail(struct intel_overlay *overlay)
{
- struct intel_overlay *overlay =
- container_of(active, typeof(*overlay), last_flip);
struct drm_i915_private *dev_priv = overlay->i915;
intel_overlay_release_old_vma(overlay);
@@ -381,6 +380,16 @@ static void intel_overlay_off_tail(struct i915_active_request *active,
i830_overlay_clock_gating(dev_priv, true);
}
+static void
+intel_overlay_last_flip_retire(struct i915_active *active)
+{
+ struct intel_overlay *overlay =
+ container_of(active, typeof(*overlay), last_flip);
+
+ if (overlay->flip_complete)
+ overlay->flip_complete(overlay);
+}
+
/* overlay needs to be disabled in OCMD reg */
static int intel_overlay_off(struct intel_overlay *overlay)
{
@@ -395,7 +404,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
* of the hw. Do it in both cases */
flip_addr |= OFC_UPDATE;
- rq = alloc_request(overlay);
+ rq = alloc_request(overlay, intel_overlay_off_tail);
if (IS_ERR(rq))
return PTR_ERR(rq);
@@ -418,17 +427,16 @@ static int intel_overlay_off(struct intel_overlay *overlay)
intel_ring_advance(rq, cs);
intel_overlay_flip_prepare(overlay, NULL);
+ i915_request_add(rq);
- return intel_overlay_do_wait_request(overlay, rq,
- intel_overlay_off_tail);
+ return i915_active_wait(&overlay->last_flip);
}
/* recover from an interruption due to a signal
* We have to be careful not to repeat work forever an make forward progess. */
static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
{
- return i915_active_request_retire(&overlay->last_flip,
- &overlay->i915->drm.struct_mutex);
+ return i915_active_wait(&overlay->last_flip);
}
/* Wait for pending overlay flip and release old frame.
@@ -438,43 +446,38 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
{
struct drm_i915_private *dev_priv = overlay->i915;
+ struct i915_request *rq;
u32 *cs;
- int ret;
-
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
- /* Only wait if there is actually an old frame to release to
+ /*
+ * Only wait if there is actually an old frame to release to
* guarantee forward progress.
*/
if (!overlay->old_vma)
return 0;
- if (I915_READ(GEN2_ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
- /* synchronous slowpath */
- struct i915_request *rq;
+ if (!(I915_READ(GEN2_ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT)) {
+ intel_overlay_release_old_vid_tail(overlay);
+ return 0;
+ }
- rq = alloc_request(overlay);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
+ rq = alloc_request(overlay, intel_overlay_release_old_vid_tail);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
- cs = intel_ring_begin(rq, 2);
- if (IS_ERR(cs)) {
- i915_request_add(rq);
- return PTR_ERR(cs);
- }
+ cs = intel_ring_begin(rq, 2);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ return PTR_ERR(cs);
+ }
- *cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
- *cs++ = MI_NOOP;
- intel_ring_advance(rq, cs);
+ *cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
+ *cs++ = MI_NOOP;
+ intel_ring_advance(rq, cs);
- ret = intel_overlay_do_wait_request(overlay, rq,
- intel_overlay_release_old_vid_tail);
- if (ret)
- return ret;
- } else
- intel_overlay_release_old_vid_tail(&overlay->last_flip, NULL);
+ i915_request_add(rq);
- return 0;
+ return i915_active_wait(&overlay->last_flip);
}
void intel_overlay_reset(struct drm_i915_private *dev_priv)
@@ -674,8 +677,8 @@ static void update_colorkey(struct intel_overlay *overlay,
if (overlay->color_key_enabled)
flags |= DST_KEY_ENABLE;
- if (state->base.visible)
- format = state->base.fb->format->format;
+ if (state->uapi.visible)
+ format = state->hw.fb->format->format;
switch (format) {
case DRM_FORMAT_C8:
@@ -756,7 +759,6 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
struct i915_vma *vma;
int ret, tmp_width;
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
ret = intel_overlay_release_old_vid(overlay);
@@ -765,19 +767,13 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
- i915_gem_object_lock(new_bo);
vma = i915_gem_object_pin_to_display_plane(new_bo,
0, NULL, PIN_MAPPABLE);
- i915_gem_object_unlock(new_bo);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto out_pin_section;
}
- intel_fb_obj_flush(new_bo, ORIGIN_DIRTYFB);
-
- ret = i915_vma_put_fence(vma);
- if (ret)
- goto out_unpin;
+ i915_gem_object_flush_frontbuffer(new_bo, ORIGIN_DIRTYFB);
if (!overlay->active) {
u32 oconfig;
@@ -861,7 +857,6 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
struct drm_i915_private *dev_priv = overlay->i915;
int ret;
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
ret = intel_overlay_recover_from_interrupt(overlay);
@@ -1077,11 +1072,7 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
if (!(params->flags & I915_OVERLAY_ENABLE)) {
drm_modeset_lock_all(dev);
- mutex_lock(&dev->struct_mutex);
-
ret = intel_overlay_switch_off(overlay);
-
- mutex_unlock(&dev->struct_mutex);
drm_modeset_unlock_all(dev);
return ret;
@@ -1097,7 +1088,6 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
drm_modeset_lock_all(dev);
- mutex_lock(&dev->struct_mutex);
if (i915_gem_object_is_tiled(new_bo)) {
DRM_DEBUG_KMS("buffer used for overlay image can not be tiled\n");
@@ -1161,14 +1151,12 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
if (ret != 0)
goto out_unlock;
- mutex_unlock(&dev->struct_mutex);
drm_modeset_unlock_all(dev);
i915_gem_object_put(new_bo);
return 0;
out_unlock:
- mutex_unlock(&dev->struct_mutex);
drm_modeset_unlock_all(dev);
i915_gem_object_put(new_bo);
@@ -1242,7 +1230,6 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
}
drm_modeset_lock_all(dev);
- mutex_lock(&dev->struct_mutex);
ret = -EINVAL;
if (!(attrs->flags & I915_OVERLAY_UPDATE_ATTRS)) {
@@ -1299,7 +1286,6 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
ret = 0;
out_unlock:
- mutex_unlock(&dev->struct_mutex);
drm_modeset_unlock_all(dev);
return ret;
@@ -1312,15 +1298,11 @@ static int get_registers(struct intel_overlay *overlay, bool use_phys)
struct i915_vma *vma;
int err;
- mutex_lock(&i915->drm.struct_mutex);
-
obj = i915_gem_object_create_stolen(i915, PAGE_SIZE);
- if (obj == NULL)
+ if (IS_ERR(obj))
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
- if (IS_ERR(obj)) {
- err = PTR_ERR(obj);
- goto err_unlock;
- }
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
if (IS_ERR(vma)) {
@@ -1341,29 +1323,33 @@ static int get_registers(struct intel_overlay *overlay, bool use_phys)
}
overlay->reg_bo = obj;
- mutex_unlock(&i915->drm.struct_mutex);
return 0;
err_put_bo:
i915_gem_object_put(obj);
-err_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
return err;
}
void intel_overlay_setup(struct drm_i915_private *dev_priv)
{
struct intel_overlay *overlay;
+ struct intel_engine_cs *engine;
int ret;
if (!HAS_OVERLAY(dev_priv))
return;
+ engine = dev_priv->engine[RCS0];
+ if (!engine || !engine->kernel_context)
+ return;
+
overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
if (!overlay)
return;
overlay->i915 = dev_priv;
+ overlay->context = engine->kernel_context;
+ GEM_BUG_ON(!overlay->context);
overlay->color_key = 0x0101fe;
overlay->color_key_enabled = true;
@@ -1371,7 +1357,8 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
overlay->contrast = 75;
overlay->saturation = 146;
- INIT_ACTIVE_REQUEST(&overlay->last_flip);
+ i915_active_init(&overlay->last_flip,
+ NULL, intel_overlay_last_flip_retire);
ret = get_registers(overlay, OVERLAY_NEEDS_PHYSICAL(dev_priv));
if (ret)
@@ -1405,6 +1392,7 @@ void intel_overlay_cleanup(struct drm_i915_private *dev_priv)
WARN_ON(overlay->active);
i915_gem_object_put(overlay->reg_bo);
+ i915_active_fini(&overlay->last_flip);
kfree(overlay);
}
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index 39d742094065..7b3ec6eb3382 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -35,8 +35,8 @@
#include <linux/pwm.h>
#include "intel_connector.h"
+#include "intel_display_types.h"
#include "intel_dp_aux_backlight.h"
-#include "intel_drv.h"
#include "intel_dsi_dcs_backlight.h"
#include "intel_panel.h"
@@ -178,7 +178,7 @@ intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
struct intel_crtc_state *pipe_config,
int fitting_mode)
{
- const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
int x = 0, y = 0, width = 0, height = 0;
/* Native modes don't need fitting */
@@ -300,7 +300,7 @@ static inline u32 panel_fitter_scaling(u32 source, u32 target)
static void i965_scale_aspect(struct intel_crtc_state *pipe_config,
u32 *pfit_control)
{
- const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
u32 scaled_width = adjusted_mode->crtc_hdisplay *
pipe_config->pipe_src_h;
u32 scaled_height = pipe_config->pipe_src_w *
@@ -321,7 +321,7 @@ static void i9xx_scale_aspect(struct intel_crtc_state *pipe_config,
u32 *pfit_control, u32 *pfit_pgm_ratios,
u32 *border)
{
- struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
u32 scaled_width = adjusted_mode->crtc_hdisplay *
pipe_config->pipe_src_h;
u32 scaled_height = pipe_config->pipe_src_w *
@@ -380,7 +380,7 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
{
struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
- struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
/* Native modes don't need fitting */
if (adjusted_mode->crtc_hdisplay == pipe_config->pipe_src_w &&
@@ -1047,7 +1047,7 @@ static void vlv_enable_backlight(const struct intel_crtc_state *crtc_state,
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
+ enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
u32 ctl, ctl2;
ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe));
@@ -1077,7 +1077,7 @@ static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state,
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
+ enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
u32 pwm_ctl, val;
/* Controller 1 uses the utility pin. */
@@ -1189,7 +1189,7 @@ void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
+ enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
if (!panel->backlight.present)
return;
@@ -1840,13 +1840,22 @@ static int pwm_setup_backlight(struct intel_connector *connector,
enum pipe pipe)
{
struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_panel *panel = &connector->panel;
+ const char *desc;
int retval;
- /* Get the PWM chip for backlight control */
- panel->backlight.pwm = pwm_get(dev->dev, "pwm_backlight");
+ /* Get the right PWM chip for DSI backlight according to VBT */
+ if (dev_priv->vbt.dsi.config->pwm_blc == PPS_BLC_PMIC) {
+ panel->backlight.pwm = pwm_get(dev->dev, "pwm_pmic_backlight");
+ desc = "PMIC";
+ } else {
+ panel->backlight.pwm = pwm_get(dev->dev, "pwm_soc_backlight");
+ desc = "SoC";
+ }
+
if (IS_ERR(panel->backlight.pwm)) {
- DRM_ERROR("Failed to own the pwm chip\n");
+ DRM_ERROR("Failed to get the %s PWM chip\n", desc);
panel->backlight.pwm = NULL;
return -ENODEV;
}
@@ -1873,6 +1882,7 @@ static int pwm_setup_backlight(struct intel_connector *connector,
CRC_PMIC_PWM_PERIOD_NS);
panel->backlight.enabled = panel->backlight.level != 0;
+ DRM_INFO("Using %s PWM for LCD backlight control\n", desc);
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_pipe_crc.c b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
index 1e2c4307d05a..520408e83681 100644
--- a/drivers/gpu/drm/i915/display/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
@@ -30,7 +30,7 @@
#include <linux/seq_file.h>
#include "intel_atomic.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_pipe_crc.h"
static const char * const pipe_crc_sources[] = {
@@ -98,7 +98,7 @@ static int i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
break;
case INTEL_OUTPUT_DP:
case INTEL_OUTPUT_EDP:
- dig_port = enc_to_dig_port(&encoder->base);
+ dig_port = enc_to_dig_port(encoder);
switch (dig_port->base.port) {
case PORT_B:
*source = INTEL_PIPE_CRC_SOURCE_DP_B;
@@ -309,13 +309,13 @@ retry:
goto put_state;
}
- pipe_config->base.mode_changed = pipe_config->has_psr;
+ pipe_config->uapi.mode_changed = pipe_config->has_psr;
pipe_config->crc_enabled = enable;
if (IS_HASWELL(dev_priv) &&
- pipe_config->base.active && crtc->pipe == PIPE_A &&
+ pipe_config->hw.active && crtc->pipe == PIPE_A &&
pipe_config->cpu_transcoder == TRANSCODER_EDP)
- pipe_config->base.mode_changed = true;
+ pipe_config->uapi.mode_changed = true;
ret = drm_atomic_commit(state);
@@ -667,5 +667,5 @@ void intel_crtc_disable_pipe_crc(struct intel_crtc *intel_crtc)
I915_WRITE(PIPE_CRC_CTL(crtc->index), 0);
POSTING_READ(PIPE_CRC_CTL(crtc->index));
- synchronize_irq(dev_priv->drm.irq);
+ intel_synchronize_irq(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 69d908e6a050..89c9cf5f38d2 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -26,7 +26,8 @@
#include "display/intel_dp.h"
#include "i915_drv.h"
-#include "intel_drv.h"
+#include "intel_atomic.h"
+#include "intel_display_types.h"
#include "intel_psr.h"
#include "intel_sprite.h"
@@ -76,7 +77,7 @@ static bool intel_psr2_enabled(struct drm_i915_private *dev_priv,
const struct intel_crtc_state *crtc_state)
{
/* Cannot enable DSC and PSR2 simultaneously */
- WARN_ON(crtc_state->dsc_params.compression_enable &&
+ WARN_ON(crtc_state->dsc.compression_enable &&
crtc_state->has_psr2);
switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
@@ -88,48 +89,35 @@ static bool intel_psr2_enabled(struct drm_i915_private *dev_priv,
}
}
-static int edp_psr_shift(enum transcoder cpu_transcoder)
+static void psr_irq_control(struct drm_i915_private *dev_priv)
{
- switch (cpu_transcoder) {
- case TRANSCODER_A:
- return EDP_PSR_TRANSCODER_A_SHIFT;
- case TRANSCODER_B:
- return EDP_PSR_TRANSCODER_B_SHIFT;
- case TRANSCODER_C:
- return EDP_PSR_TRANSCODER_C_SHIFT;
- default:
- MISSING_CASE(cpu_transcoder);
- /* fallthrough */
- case TRANSCODER_EDP:
- return EDP_PSR_TRANSCODER_EDP_SHIFT;
- }
-}
+ enum transcoder trans_shift;
+ u32 mask, val;
+ i915_reg_t imr_reg;
-void intel_psr_irq_control(struct drm_i915_private *dev_priv, u32 debug)
-{
- u32 debug_mask, mask;
- enum transcoder cpu_transcoder;
- u32 transcoders = BIT(TRANSCODER_EDP);
-
- if (INTEL_GEN(dev_priv) >= 8)
- transcoders |= BIT(TRANSCODER_A) |
- BIT(TRANSCODER_B) |
- BIT(TRANSCODER_C);
-
- debug_mask = 0;
- mask = 0;
- for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
- int shift = edp_psr_shift(cpu_transcoder);
-
- mask |= EDP_PSR_ERROR(shift);
- debug_mask |= EDP_PSR_POST_EXIT(shift) |
- EDP_PSR_PRE_ENTRY(shift);
+ /*
+ * gen12+ has registers relative to transcoder and one per transcoder
+ * using the same bit definition: handle it as TRANSCODER_EDP to force
+ * 0 shift in bit definition
+ */
+ if (INTEL_GEN(dev_priv) >= 12) {
+ trans_shift = 0;
+ imr_reg = TRANS_PSR_IMR(dev_priv->psr.transcoder);
+ } else {
+ trans_shift = dev_priv->psr.transcoder;
+ imr_reg = EDP_PSR_IMR;
}
- if (debug & I915_PSR_DEBUG_IRQ)
- mask |= debug_mask;
+ mask = EDP_PSR_ERROR(trans_shift);
+ if (dev_priv->psr.debug & I915_PSR_DEBUG_IRQ)
+ mask |= EDP_PSR_POST_EXIT(trans_shift) |
+ EDP_PSR_PRE_ENTRY(trans_shift);
- I915_WRITE(EDP_PSR_IMR, ~mask);
+ /* Warning: it is masking/setting reserved bits too */
+ val = I915_READ(imr_reg);
+ val &= ~EDP_PSR_TRANS_MASK(trans_shift);
+ val |= ~mask;
+ I915_WRITE(imr_reg, val);
}
static void psr_event_print(u32 val, bool psr2_enabled)
@@ -171,60 +159,58 @@ static void psr_event_print(u32 val, bool psr2_enabled)
void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
{
- u32 transcoders = BIT(TRANSCODER_EDP);
- enum transcoder cpu_transcoder;
+ enum transcoder cpu_transcoder = dev_priv->psr.transcoder;
+ enum transcoder trans_shift;
+ i915_reg_t imr_reg;
ktime_t time_ns = ktime_get();
- u32 mask = 0;
-
- if (INTEL_GEN(dev_priv) >= 8)
- transcoders |= BIT(TRANSCODER_A) |
- BIT(TRANSCODER_B) |
- BIT(TRANSCODER_C);
- for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
- int shift = edp_psr_shift(cpu_transcoder);
+ if (INTEL_GEN(dev_priv) >= 12) {
+ trans_shift = 0;
+ imr_reg = TRANS_PSR_IMR(dev_priv->psr.transcoder);
+ } else {
+ trans_shift = dev_priv->psr.transcoder;
+ imr_reg = EDP_PSR_IMR;
+ }
- if (psr_iir & EDP_PSR_ERROR(shift)) {
- DRM_WARN("[transcoder %s] PSR aux error\n",
- transcoder_name(cpu_transcoder));
+ if (psr_iir & EDP_PSR_PRE_ENTRY(trans_shift)) {
+ dev_priv->psr.last_entry_attempt = time_ns;
+ DRM_DEBUG_KMS("[transcoder %s] PSR entry attempt in 2 vblanks\n",
+ transcoder_name(cpu_transcoder));
+ }
- dev_priv->psr.irq_aux_error = true;
+ if (psr_iir & EDP_PSR_POST_EXIT(trans_shift)) {
+ dev_priv->psr.last_exit = time_ns;
+ DRM_DEBUG_KMS("[transcoder %s] PSR exit completed\n",
+ transcoder_name(cpu_transcoder));
- /*
- * If this interruption is not masked it will keep
- * interrupting so fast that it prevents the scheduled
- * work to run.
- * Also after a PSR error, we don't want to arm PSR
- * again so we don't care about unmask the interruption
- * or unset irq_aux_error.
- */
- mask |= EDP_PSR_ERROR(shift);
- }
+ if (INTEL_GEN(dev_priv) >= 9) {
+ u32 val = I915_READ(PSR_EVENT(cpu_transcoder));
+ bool psr2_enabled = dev_priv->psr.psr2_enabled;
- if (psr_iir & EDP_PSR_PRE_ENTRY(shift)) {
- dev_priv->psr.last_entry_attempt = time_ns;
- DRM_DEBUG_KMS("[transcoder %s] PSR entry attempt in 2 vblanks\n",
- transcoder_name(cpu_transcoder));
+ I915_WRITE(PSR_EVENT(cpu_transcoder), val);
+ psr_event_print(val, psr2_enabled);
}
+ }
- if (psr_iir & EDP_PSR_POST_EXIT(shift)) {
- dev_priv->psr.last_exit = time_ns;
- DRM_DEBUG_KMS("[transcoder %s] PSR exit completed\n",
- transcoder_name(cpu_transcoder));
+ if (psr_iir & EDP_PSR_ERROR(trans_shift)) {
+ u32 val;
- if (INTEL_GEN(dev_priv) >= 9) {
- u32 val = I915_READ(PSR_EVENT(cpu_transcoder));
- bool psr2_enabled = dev_priv->psr.psr2_enabled;
+ DRM_WARN("[transcoder %s] PSR aux error\n",
+ transcoder_name(cpu_transcoder));
- I915_WRITE(PSR_EVENT(cpu_transcoder), val);
- psr_event_print(val, psr2_enabled);
- }
- }
- }
+ dev_priv->psr.irq_aux_error = true;
- if (mask) {
- mask |= I915_READ(EDP_PSR_IMR);
- I915_WRITE(EDP_PSR_IMR, mask);
+ /*
+ * If this interruption is not masked it will keep
+ * interrupting so fast that it prevents the scheduled
+ * work to run.
+ * Also after a PSR error, we don't want to arm PSR
+ * again so we don't care about unmask the interruption
+ * or unset irq_aux_error.
+ */
+ val = I915_READ(imr_reg);
+ val |= EDP_PSR_ERROR(trans_shift);
+ I915_WRITE(imr_reg, val);
schedule_work(&dev_priv->psr.work);
}
@@ -283,6 +269,11 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv =
to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
+ if (dev_priv->psr.dp) {
+ DRM_WARN("More than one eDP panel found, PSR support should be extended\n");
+ return;
+ }
+
drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
sizeof(intel_dp->psr_dpcd));
@@ -305,7 +296,6 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
dev_priv->psr.sink_sync_latency =
intel_dp_get_sink_sync_latency(intel_dp);
- WARN_ON(dev_priv->psr.dp);
dev_priv->psr.dp = intel_dp;
if (INTEL_GEN(dev_priv) >= 9 &&
@@ -390,7 +380,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
BUILD_BUG_ON(sizeof(aux_msg) > 20);
for (i = 0; i < sizeof(aux_msg); i += 4)
- I915_WRITE(EDP_PSR_AUX_DATA(i >> 2),
+ I915_WRITE(EDP_PSR_AUX_DATA(dev_priv->psr.transcoder, i >> 2),
intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
@@ -401,7 +391,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
/* Select only valid bits for SRD_AUX_CTL */
aux_ctl &= psr_aux_mask;
- I915_WRITE(EDP_PSR_AUX_CTL, aux_ctl);
+ I915_WRITE(EDP_PSR_AUX_CTL(dev_priv->psr.transcoder), aux_ctl);
}
static void intel_psr_enable_sink(struct intel_dp *intel_dp)
@@ -412,7 +402,9 @@ static void intel_psr_enable_sink(struct intel_dp *intel_dp)
/* Enable ALPM at sink for psr2 */
if (dev_priv->psr.psr2_enabled) {
drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
- DP_ALPM_ENABLE);
+ DP_ALPM_ENABLE |
+ DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
+
dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
} else {
if (dev_priv->psr.link_standby)
@@ -491,8 +483,9 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
if (INTEL_GEN(dev_priv) >= 8)
val |= EDP_PSR_CRC_ENABLE;
- val |= I915_READ(EDP_PSR_CTL) & EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK;
- I915_WRITE(EDP_PSR_CTL, val);
+ val |= (I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder)) &
+ EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK);
+ I915_WRITE(EDP_PSR_CTL(dev_priv->psr.transcoder), val);
}
static void hsw_activate_psr2(struct intel_dp *intel_dp)
@@ -528,38 +521,128 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
* PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
* recommending keep this bit unset while PSR2 is enabled.
*/
- I915_WRITE(EDP_PSR_CTL, 0);
+ I915_WRITE(EDP_PSR_CTL(dev_priv->psr.transcoder), 0);
+
+ I915_WRITE(EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
+}
+
+static bool
+transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder trans)
+{
+ if (INTEL_GEN(dev_priv) < 9)
+ return false;
+ else if (INTEL_GEN(dev_priv) >= 12)
+ return trans == TRANSCODER_A;
+ else
+ return trans == TRANSCODER_EDP;
+}
+
+static u32 intel_get_frame_time_us(const struct intel_crtc_state *cstate)
+{
+ if (!cstate || !cstate->hw.active)
+ return 0;
+
+ return DIV_ROUND_UP(1000 * 1000,
+ drm_mode_vrefresh(&cstate->hw.adjusted_mode));
+}
+
+static void psr2_program_idle_frames(struct drm_i915_private *dev_priv,
+ u32 idle_frames)
+{
+ u32 val;
+
+ idle_frames <<= EDP_PSR2_IDLE_FRAME_SHIFT;
+ val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder));
+ val &= ~EDP_PSR2_IDLE_FRAME_MASK;
+ val |= idle_frames;
+ I915_WRITE(EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
+}
+
+static void tgl_psr2_enable_dc3co(struct drm_i915_private *dev_priv)
+{
+ psr2_program_idle_frames(dev_priv, 0);
+ intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO);
+}
+
+static void tgl_psr2_disable_dc3co(struct drm_i915_private *dev_priv)
+{
+ int idle_frames;
+
+ intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
+ /*
+ * Restore PSR2 idle frame let's use 6 as the minimum to cover all known
+ * cases including the off-by-one issue that HW has in some cases.
+ */
+ idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
+ idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
+ psr2_program_idle_frames(dev_priv, idle_frames);
+}
+
+static void tgl_dc5_idle_thread(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, typeof(*dev_priv), psr.idle_work.work);
+
+ mutex_lock(&dev_priv->psr.lock);
+ /* If delayed work is pending, it is not idle */
+ if (delayed_work_pending(&dev_priv->psr.idle_work))
+ goto unlock;
+
+ DRM_DEBUG_KMS("DC5/6 idle thread\n");
+ tgl_psr2_disable_dc3co(dev_priv);
+unlock:
+ mutex_unlock(&dev_priv->psr.lock);
+}
+
+static void tgl_disallow_dc3co_on_psr2_exit(struct drm_i915_private *dev_priv)
+{
+ if (!dev_priv->psr.dc3co_enabled)
+ return;
- I915_WRITE(EDP_PSR2_CTL, val);
+ cancel_delayed_work(&dev_priv->psr.idle_work);
+ /* Before PSR2 exit disallow dc3co*/
+ tgl_psr2_disable_dc3co(dev_priv);
}
static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- int crtc_hdisplay = crtc_state->base.adjusted_mode.crtc_hdisplay;
- int crtc_vdisplay = crtc_state->base.adjusted_mode.crtc_vdisplay;
- int psr_max_h = 0, psr_max_v = 0;
+ int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
+ int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
+ int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
if (!dev_priv->psr.sink_psr2_support)
return false;
+ if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
+ DRM_DEBUG_KMS("PSR2 not supported in transcoder %s\n",
+ transcoder_name(crtc_state->cpu_transcoder));
+ return false;
+ }
+
/*
* DSC and PSR2 cannot be enabled simultaneously. If a requested
* resolution requires DSC to be enabled, priority is given to DSC
* over PSR2.
*/
- if (crtc_state->dsc_params.compression_enable) {
+ if (crtc_state->dsc.compression_enable) {
DRM_DEBUG_KMS("PSR2 cannot be enabled since DSC is enabled\n");
return false;
}
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 12) {
+ psr_max_h = 5120;
+ psr_max_v = 3200;
+ max_bpp = 30;
+ } else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
psr_max_h = 4096;
psr_max_v = 2304;
+ max_bpp = 24;
} else if (IS_GEN(dev_priv, 9)) {
psr_max_h = 3640;
psr_max_v = 2304;
+ max_bpp = 24;
}
if (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v) {
@@ -569,6 +652,12 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
return false;
}
+ if (crtc_state->pipe_bpp > max_bpp) {
+ DRM_DEBUG_KMS("PSR2 not enabled, pipe bpp %d > max supported %d\n",
+ crtc_state->pipe_bpp, max_bpp);
+ return false;
+ }
+
/*
* HW sends SU blocks of size four scan lines, which means the starting
* X coordinate and Y granularity requirements will always be met. We
@@ -595,7 +684,7 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
int psr_setup_time;
if (!CAN_PSR(dev_priv))
@@ -606,10 +695,9 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
/*
* HSW spec explicitly says PSR is tied to port A.
- * BDW+ platforms with DDI implementation of PSR have different
- * PSR registers per transcoder and we only implement transcoder EDP
- * ones. Since by Display design transcoder EDP is tied to port A
- * we can safely escape based on the port A.
+ * BDW+ platforms have a instance of PSR registers per transcoder but
+ * for now it only supports one instance of PSR, so lets keep it
+ * hardcoded to PORT_A
*/
if (dig_port->base.port != PORT_A) {
DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
@@ -648,9 +736,10 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- if (INTEL_GEN(dev_priv) >= 9)
- WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
- WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
+ if (transcoder_has_psr2(dev_priv, dev_priv->psr.transcoder))
+ WARN_ON(I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder)) & EDP_PSR2_ENABLE);
+
+ WARN_ON(I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder)) & EDP_PSR_ENABLE);
WARN_ON(dev_priv->psr.active);
lockdep_assert_held(&dev_priv->psr.lock);
@@ -663,25 +752,6 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
dev_priv->psr.active = true;
}
-static i915_reg_t gen9_chicken_trans_reg(struct drm_i915_private *dev_priv,
- enum transcoder cpu_transcoder)
-{
- static const i915_reg_t regs[] = {
- [TRANSCODER_A] = CHICKEN_TRANS_A,
- [TRANSCODER_B] = CHICKEN_TRANS_B,
- [TRANSCODER_C] = CHICKEN_TRANS_C,
- [TRANSCODER_EDP] = CHICKEN_TRANS_EDP,
- };
-
- WARN_ON(INTEL_GEN(dev_priv) < 9);
-
- if (WARN_ON(cpu_transcoder >= ARRAY_SIZE(regs) ||
- !regs[cpu_transcoder].reg))
- cpu_transcoder = TRANSCODER_A;
-
- return regs[cpu_transcoder];
-}
-
static void intel_psr_enable_source(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
@@ -697,8 +767,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
if (dev_priv->psr.psr2_enabled && (IS_GEN(dev_priv, 9) &&
!IS_GEMINILAKE(dev_priv))) {
- i915_reg_t reg = gen9_chicken_trans_reg(dev_priv,
- cpu_transcoder);
+ i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
u32 chicken = I915_READ(reg);
chicken |= PSR2_VSC_ENABLE_PROG_HEADER |
@@ -720,19 +789,46 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
if (INTEL_GEN(dev_priv) < 11)
mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
- I915_WRITE(EDP_PSR_DEBUG, mask);
+ I915_WRITE(EDP_PSR_DEBUG(dev_priv->psr.transcoder), mask);
+
+ psr_irq_control(dev_priv);
}
static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
const struct intel_crtc_state *crtc_state)
{
struct intel_dp *intel_dp = dev_priv->psr.dp;
+ u32 val;
WARN_ON(dev_priv->psr.enabled);
dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state);
dev_priv->psr.busy_frontbuffer_bits = 0;
- dev_priv->psr.pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
+ dev_priv->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
+ dev_priv->psr.dc3co_enabled = !!crtc_state->dc3co_exitline;
+ dev_priv->psr.dc3co_exit_delay = intel_get_frame_time_us(crtc_state);
+ dev_priv->psr.transcoder = crtc_state->cpu_transcoder;
+
+ /*
+ * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
+ * will still keep the error set even after the reset done in the
+ * irq_preinstall and irq_uninstall hooks.
+ * And enabling in this situation cause the screen to freeze in the
+ * first time that PSR HW tries to activate so lets keep PSR disabled
+ * to avoid any rendering problems.
+ */
+ if (INTEL_GEN(dev_priv) >= 12) {
+ val = I915_READ(TRANS_PSR_IIR(dev_priv->psr.transcoder));
+ val &= EDP_PSR_ERROR(0);
+ } else {
+ val = I915_READ(EDP_PSR_IIR);
+ val &= EDP_PSR_ERROR(dev_priv->psr.transcoder);
+ }
+ if (val) {
+ dev_priv->psr.sink_not_reliable = true;
+ DRM_DEBUG_KMS("PSR interruption error set, not enabling PSR\n");
+ return;
+ }
DRM_DEBUG_KMS("Enabling PSR%s\n",
dev_priv->psr.psr2_enabled ? "2" : "1");
@@ -782,20 +878,28 @@ static void intel_psr_exit(struct drm_i915_private *dev_priv)
u32 val;
if (!dev_priv->psr.active) {
- if (INTEL_GEN(dev_priv) >= 9)
- WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
- WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
+ if (transcoder_has_psr2(dev_priv, dev_priv->psr.transcoder)) {
+ val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder));
+ WARN_ON(val & EDP_PSR2_ENABLE);
+ }
+
+ val = I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder));
+ WARN_ON(val & EDP_PSR_ENABLE);
+
return;
}
if (dev_priv->psr.psr2_enabled) {
- val = I915_READ(EDP_PSR2_CTL);
+ tgl_disallow_dc3co_on_psr2_exit(dev_priv);
+ val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder));
WARN_ON(!(val & EDP_PSR2_ENABLE));
- I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE);
+ val &= ~EDP_PSR2_ENABLE;
+ I915_WRITE(EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
} else {
- val = I915_READ(EDP_PSR_CTL);
+ val = I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder));
WARN_ON(!(val & EDP_PSR_ENABLE));
- I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
+ val &= ~EDP_PSR_ENABLE;
+ I915_WRITE(EDP_PSR_CTL(dev_priv->psr.transcoder), val);
}
dev_priv->psr.active = false;
}
@@ -817,21 +921,24 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
intel_psr_exit(dev_priv);
if (dev_priv->psr.psr2_enabled) {
- psr_status = EDP_PSR2_STATUS;
+ psr_status = EDP_PSR2_STATUS(dev_priv->psr.transcoder);
psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
} else {
- psr_status = EDP_PSR_STATUS;
+ psr_status = EDP_PSR_STATUS(dev_priv->psr.transcoder);
psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
}
/* Wait till PSR is idle */
- if (intel_wait_for_register(&dev_priv->uncore,
- psr_status, psr_status_mask, 0, 2000))
+ if (intel_de_wait_for_clear(dev_priv, psr_status,
+ psr_status_mask, 2000))
DRM_ERROR("Timed out waiting PSR idle state\n");
/* Disable PSR on Sink */
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
+ if (dev_priv->psr.psr2_enabled)
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0);
+
dev_priv->psr.enabled = false;
}
@@ -859,6 +966,7 @@ void intel_psr_disable(struct intel_dp *intel_dp,
mutex_unlock(&dev_priv->psr.lock);
cancel_work_sync(&dev_priv->psr.work);
+ cancel_delayed_work_sync(&dev_priv->psr.idle_work);
}
static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv)
@@ -946,7 +1054,7 @@ unlock:
int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
u32 *out_value)
{
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
if (!dev_priv->psr.enabled || !new_crtc_state->has_psr)
@@ -963,7 +1071,8 @@ int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
* defensive enough to cover everything.
*/
- return __intel_wait_for_register(&dev_priv->uncore, EDP_PSR_STATUS,
+ return __intel_wait_for_register(&dev_priv->uncore,
+ EDP_PSR_STATUS(dev_priv->psr.transcoder),
EDP_PSR_STATUS_STATE_MASK,
EDP_PSR_STATUS_STATE_IDLE, 2, 50,
out_value);
@@ -979,16 +1088,16 @@ static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv)
return false;
if (dev_priv->psr.psr2_enabled) {
- reg = EDP_PSR2_STATUS;
+ reg = EDP_PSR2_STATUS(dev_priv->psr.transcoder);
mask = EDP_PSR2_STATUS_STATE_MASK;
} else {
- reg = EDP_PSR_STATUS;
+ reg = EDP_PSR_STATUS(dev_priv->psr.transcoder);
mask = EDP_PSR_STATUS_STATE_MASK;
}
mutex_unlock(&dev_priv->psr.lock);
- err = intel_wait_for_register(&dev_priv->uncore, reg, mask, 0, 50);
+ err = intel_de_wait_for_clear(dev_priv, reg, mask, 50);
if (err)
DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
@@ -1002,7 +1111,7 @@ static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
struct drm_device *dev = &dev_priv->drm;
struct drm_modeset_acquire_ctx ctx;
struct drm_atomic_state *state;
- struct drm_crtc *crtc;
+ struct intel_crtc *crtc;
int err;
state = drm_atomic_state_alloc(dev);
@@ -1013,21 +1122,18 @@ static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
state->acquire_ctx = &ctx;
retry:
- drm_for_each_crtc(crtc, dev) {
- struct drm_crtc_state *crtc_state;
- struct intel_crtc_state *intel_crtc_state;
+ for_each_intel_crtc(dev, crtc) {
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_crtc_state(state, crtc);
- crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (IS_ERR(crtc_state)) {
err = PTR_ERR(crtc_state);
goto error;
}
- intel_crtc_state = to_intel_crtc_state(crtc_state);
-
- if (crtc_state->active && intel_crtc_state->has_psr) {
+ if (crtc_state->hw.active && crtc_state->has_psr) {
/* Mark mode as changed to trigger a pipe->update() */
- crtc_state->mode_changed = true;
+ crtc_state->uapi.mode_changed = true;
break;
}
}
@@ -1067,7 +1173,13 @@ int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 val)
old_mode = dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK;
dev_priv->psr.debug = val;
- intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
+
+ /*
+ * Do it right away if it's already enabled, otherwise it will be done
+ * when enabling the source.
+ */
+ if (dev_priv->psr.enabled)
+ psr_irq_control(dev_priv);
mutex_unlock(&dev_priv->psr.lock);
@@ -1159,6 +1271,44 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv,
mutex_unlock(&dev_priv->psr.lock);
}
+/*
+ * When we will be completely rely on PSR2 S/W tracking in future,
+ * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
+ * event also therefore tgl_dc3co_flush() require to be changed
+ * accrodingly in future.
+ */
+static void
+tgl_dc3co_flush(struct drm_i915_private *dev_priv,
+ unsigned int frontbuffer_bits, enum fb_op_origin origin)
+{
+ u32 delay;
+
+ mutex_lock(&dev_priv->psr.lock);
+
+ if (!dev_priv->psr.dc3co_enabled)
+ goto unlock;
+
+ if (!dev_priv->psr.psr2_enabled || !dev_priv->psr.active)
+ goto unlock;
+
+ /*
+ * At every frontbuffer flush flip event modified delay of delayed work,
+ * when delayed work schedules that means display has been idle.
+ */
+ if (!(frontbuffer_bits &
+ INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe)))
+ goto unlock;
+
+ tgl_psr2_enable_dc3co(dev_priv);
+ /* DC5/DC6 required idle frames = 6 */
+ delay = 6 * dev_priv->psr.dc3co_exit_delay;
+ mod_delayed_work(system_wq, &dev_priv->psr.idle_work,
+ usecs_to_jiffies(delay));
+
+unlock:
+ mutex_unlock(&dev_priv->psr.lock);
+}
+
/**
* intel_psr_flush - Flush PSR
* @dev_priv: i915 device
@@ -1178,8 +1328,10 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
if (!CAN_PSR(dev_priv))
return;
- if (origin == ORIGIN_FLIP)
+ if (origin == ORIGIN_FLIP) {
+ tgl_dc3co_flush(dev_priv, frontbuffer_bits, origin);
return;
+ }
mutex_lock(&dev_priv->psr.lock);
if (!dev_priv->psr.enabled) {
@@ -1208,53 +1360,111 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
*/
void intel_psr_init(struct drm_i915_private *dev_priv)
{
- u32 val;
-
if (!HAS_PSR(dev_priv))
return;
- dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
- HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;
-
if (!dev_priv->psr.sink_support)
return;
+ if (IS_HASWELL(dev_priv))
+ /*
+ * HSW don't have PSR registers on the same space as transcoder
+ * so set this to a value that when subtract to the register
+ * in transcoder space results in the right offset for HSW
+ */
+ dev_priv->hsw_psr_mmio_adjust = _SRD_CTL_EDP - _HSW_EDP_PSR_BASE;
+
if (i915_modparams.enable_psr == -1)
if (INTEL_GEN(dev_priv) < 9 || !dev_priv->vbt.psr.enable)
i915_modparams.enable_psr = 0;
- /*
- * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
- * will still keep the error set even after the reset done in the
- * irq_preinstall and irq_uninstall hooks.
- * And enabling in this situation cause the screen to freeze in the
- * first time that PSR HW tries to activate so lets keep PSR disabled
- * to avoid any rendering problems.
- */
- val = I915_READ(EDP_PSR_IIR);
- val &= EDP_PSR_ERROR(edp_psr_shift(TRANSCODER_EDP));
- if (val) {
- DRM_DEBUG_KMS("PSR interruption error set\n");
- dev_priv->psr.sink_not_reliable = true;
- }
-
/* Set link_standby x link_off defaults */
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
/* HSW and BDW require workarounds that we don't implement. */
dev_priv->psr.link_standby = false;
- else
- /* For new platforms let's respect VBT back again */
+ else if (INTEL_GEN(dev_priv) < 12)
+ /* For new platforms up to TGL let's respect VBT back again */
dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link;
INIT_WORK(&dev_priv->psr.work, intel_psr_work);
+ INIT_DELAYED_WORK(&dev_priv->psr.idle_work, tgl_dc5_idle_thread);
mutex_init(&dev_priv->psr.lock);
}
-void intel_psr_short_pulse(struct intel_dp *intel_dp)
+static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
+ u8 *status, u8 *error_status)
+{
+ struct drm_dp_aux *aux = &intel_dp->aux;
+ int ret;
+
+ ret = drm_dp_dpcd_readb(aux, DP_PSR_STATUS, status);
+ if (ret != 1)
+ return ret;
+
+ ret = drm_dp_dpcd_readb(aux, DP_PSR_ERROR_STATUS, error_status);
+ if (ret != 1)
+ return ret;
+
+ *status = *status & DP_PSR_SINK_STATE_MASK;
+
+ return 0;
+}
+
+static void psr_alpm_check(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct drm_dp_aux *aux = &intel_dp->aux;
+ struct i915_psr *psr = &dev_priv->psr;
+ u8 val;
+ int r;
+
+ if (!psr->psr2_enabled)
+ return;
+
+ r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
+ if (r != 1) {
+ DRM_ERROR("Error reading ALPM status\n");
+ return;
+ }
+
+ if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
+ intel_psr_disable_locked(intel_dp);
+ psr->sink_not_reliable = true;
+ DRM_DEBUG_KMS("ALPM lock timeout error, disabling PSR\n");
+
+ /* Clearing error */
+ drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val);
+ }
+}
+
+static void psr_capability_changed_check(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct i915_psr *psr = &dev_priv->psr;
u8 val;
+ int r;
+
+ r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
+ if (r != 1) {
+ DRM_ERROR("Error reading DP_PSR_ESI\n");
+ return;
+ }
+
+ if (val & DP_PSR_CAPS_CHANGE) {
+ intel_psr_disable_locked(intel_dp);
+ psr->sink_not_reliable = true;
+ DRM_DEBUG_KMS("Sink PSR capability changed, disabling PSR\n");
+
+ /* Clearing it */
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val);
+ }
+}
+
+void intel_psr_short_pulse(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct i915_psr *psr = &dev_priv->psr;
+ u8 status, error_status;
const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
DP_PSR_LINK_CRC_ERROR;
@@ -1267,38 +1477,34 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp)
if (!psr->enabled || psr->dp != intel_dp)
goto exit;
- if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val) != 1) {
- DRM_ERROR("PSR_STATUS dpcd read failed\n");
+ if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
+ DRM_ERROR("Error reading PSR status or error status\n");
goto exit;
}
- if ((val & DP_PSR_SINK_STATE_MASK) == DP_PSR_SINK_INTERNAL_ERROR) {
- DRM_DEBUG_KMS("PSR sink internal error, disabling PSR\n");
+ if (status == DP_PSR_SINK_INTERNAL_ERROR || (error_status & errors)) {
intel_psr_disable_locked(intel_dp);
psr->sink_not_reliable = true;
}
- if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ERROR_STATUS, &val) != 1) {
- DRM_ERROR("PSR_ERROR_STATUS dpcd read failed\n");
- goto exit;
- }
-
- if (val & DP_PSR_RFB_STORAGE_ERROR)
+ if (status == DP_PSR_SINK_INTERNAL_ERROR && !error_status)
+ DRM_DEBUG_KMS("PSR sink internal error, disabling PSR\n");
+ if (error_status & DP_PSR_RFB_STORAGE_ERROR)
DRM_DEBUG_KMS("PSR RFB storage error, disabling PSR\n");
- if (val & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
+ if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
DRM_DEBUG_KMS("PSR VSC SDP uncorrectable error, disabling PSR\n");
- if (val & DP_PSR_LINK_CRC_ERROR)
- DRM_ERROR("PSR Link CRC error, disabling PSR\n");
+ if (error_status & DP_PSR_LINK_CRC_ERROR)
+ DRM_DEBUG_KMS("PSR Link CRC error, disabling PSR\n");
- if (val & ~errors)
+ if (error_status & ~errors)
DRM_ERROR("PSR_ERROR_STATUS unhandled errors %x\n",
- val & ~errors);
- if (val & errors) {
- intel_psr_disable_locked(intel_dp);
- psr->sink_not_reliable = true;
- }
+ error_status & ~errors);
/* clear status register */
- drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, val);
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
+
+ psr_alpm_check(intel_dp);
+ psr_capability_changed_check(intel_dp);
+
exit:
mutex_unlock(&psr->lock);
}
@@ -1317,3 +1523,27 @@ bool intel_psr_enabled(struct intel_dp *intel_dp)
return ret;
}
+
+void intel_psr_atomic_check(struct drm_connector *connector,
+ struct drm_connector_state *old_state,
+ struct drm_connector_state *new_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
+ struct intel_connector *intel_connector;
+ struct intel_digital_port *dig_port;
+ struct drm_crtc_state *crtc_state;
+
+ if (!CAN_PSR(dev_priv) || !new_state->crtc ||
+ dev_priv->psr.initially_probed)
+ return;
+
+ intel_connector = to_intel_connector(connector);
+ dig_port = enc_to_dig_port(intel_connector->encoder);
+ if (dev_priv->psr.dp != &dig_port->dp)
+ return;
+
+ crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
+ new_state->crtc);
+ crtc_state->mode_changed = true;
+ dev_priv->psr.initially_probed = true;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h
index dc818826f36d..c58a1d438808 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.h
+++ b/drivers/gpu/drm/i915/display/intel_psr.h
@@ -8,6 +8,8 @@
#include "intel_frontbuffer.h"
+struct drm_connector;
+struct drm_connector_state;
struct drm_i915_private;
struct intel_crtc_state;
struct intel_dp;
@@ -30,11 +32,13 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
void intel_psr_init(struct drm_i915_private *dev_priv);
void intel_psr_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state);
-void intel_psr_irq_control(struct drm_i915_private *dev_priv, u32 debug);
void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir);
void intel_psr_short_pulse(struct intel_dp *intel_dp);
int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
u32 *out_value);
bool intel_psr_enabled(struct intel_dp *intel_dp);
+void intel_psr_atomic_check(struct drm_connector *connector,
+ struct drm_connector_state *old_state,
+ struct drm_connector_state *new_state);
#endif /* __INTEL_PSR_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c
index 0b749c28541f..399b1542509f 100644
--- a/drivers/gpu/drm/i915/display/intel_quirks.c
+++ b/drivers/gpu/drm/i915/display/intel_quirks.c
@@ -5,7 +5,7 @@
#include <linux/dmi.h>
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_quirks.h"
/*
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index ceda03e5a3d4..e8819fd21e03 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -39,7 +39,7 @@
#include "i915_drv.h"
#include "intel_atomic.h"
#include "intel_connector.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_fifo_underrun.h"
#include "intel_gmbus.h"
#include "intel_hdmi.h"
@@ -180,7 +180,7 @@ static struct intel_sdvo *to_sdvo(struct intel_encoder *encoder)
return container_of(encoder, struct intel_sdvo, base);
}
-static struct intel_sdvo *intel_attached_sdvo(struct drm_connector *connector)
+static struct intel_sdvo *intel_attached_sdvo(struct intel_connector *connector)
{
return to_sdvo(intel_attached_encoder(connector));
}
@@ -274,130 +274,145 @@ static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch)
return false;
}
-#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd}
+#define SDVO_CMD_NAME_ENTRY(cmd_) { .cmd = SDVO_CMD_ ## cmd_, .name = #cmd_ }
+
/** Mapping of command numbers to names, for debug output */
-static const struct _sdvo_cmd_name {
+static const struct {
u8 cmd;
const char *name;
} __attribute__ ((packed)) sdvo_cmd_names[] = {
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_POWER_STATES),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POWER_STATE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODER_POWER_STATE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DISPLAY_POWER_STATE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS),
+ SDVO_CMD_NAME_ENTRY(RESET),
+ SDVO_CMD_NAME_ENTRY(GET_DEVICE_CAPS),
+ SDVO_CMD_NAME_ENTRY(GET_FIRMWARE_REV),
+ SDVO_CMD_NAME_ENTRY(GET_TRAINED_INPUTS),
+ SDVO_CMD_NAME_ENTRY(GET_ACTIVE_OUTPUTS),
+ SDVO_CMD_NAME_ENTRY(SET_ACTIVE_OUTPUTS),
+ SDVO_CMD_NAME_ENTRY(GET_IN_OUT_MAP),
+ SDVO_CMD_NAME_ENTRY(SET_IN_OUT_MAP),
+ SDVO_CMD_NAME_ENTRY(GET_ATTACHED_DISPLAYS),
+ SDVO_CMD_NAME_ENTRY(GET_HOT_PLUG_SUPPORT),
+ SDVO_CMD_NAME_ENTRY(SET_ACTIVE_HOT_PLUG),
+ SDVO_CMD_NAME_ENTRY(GET_ACTIVE_HOT_PLUG),
+ SDVO_CMD_NAME_ENTRY(GET_INTERRUPT_EVENT_SOURCE),
+ SDVO_CMD_NAME_ENTRY(SET_TARGET_INPUT),
+ SDVO_CMD_NAME_ENTRY(SET_TARGET_OUTPUT),
+ SDVO_CMD_NAME_ENTRY(GET_INPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(GET_INPUT_TIMINGS_PART2),
+ SDVO_CMD_NAME_ENTRY(SET_INPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SET_INPUT_TIMINGS_PART2),
+ SDVO_CMD_NAME_ENTRY(SET_OUTPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SET_OUTPUT_TIMINGS_PART2),
+ SDVO_CMD_NAME_ENTRY(GET_OUTPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(GET_OUTPUT_TIMINGS_PART2),
+ SDVO_CMD_NAME_ENTRY(CREATE_PREFERRED_INPUT_TIMING),
+ SDVO_CMD_NAME_ENTRY(GET_PREFERRED_INPUT_TIMING_PART1),
+ SDVO_CMD_NAME_ENTRY(GET_PREFERRED_INPUT_TIMING_PART2),
+ SDVO_CMD_NAME_ENTRY(GET_INPUT_PIXEL_CLOCK_RANGE),
+ SDVO_CMD_NAME_ENTRY(GET_OUTPUT_PIXEL_CLOCK_RANGE),
+ SDVO_CMD_NAME_ENTRY(GET_SUPPORTED_CLOCK_RATE_MULTS),
+ SDVO_CMD_NAME_ENTRY(GET_CLOCK_RATE_MULT),
+ SDVO_CMD_NAME_ENTRY(SET_CLOCK_RATE_MULT),
+ SDVO_CMD_NAME_ENTRY(GET_SUPPORTED_TV_FORMATS),
+ SDVO_CMD_NAME_ENTRY(GET_TV_FORMAT),
+ SDVO_CMD_NAME_ENTRY(SET_TV_FORMAT),
+ SDVO_CMD_NAME_ENTRY(GET_SUPPORTED_POWER_STATES),
+ SDVO_CMD_NAME_ENTRY(GET_POWER_STATE),
+ SDVO_CMD_NAME_ENTRY(SET_ENCODER_POWER_STATE),
+ SDVO_CMD_NAME_ENTRY(SET_DISPLAY_POWER_STATE),
+ SDVO_CMD_NAME_ENTRY(SET_CONTROL_BUS_SWITCH),
+ SDVO_CMD_NAME_ENTRY(GET_SDTV_RESOLUTION_SUPPORT),
+ SDVO_CMD_NAME_ENTRY(GET_SCALED_HDTV_RESOLUTION_SUPPORT),
+ SDVO_CMD_NAME_ENTRY(GET_SUPPORTED_ENHANCEMENTS),
/* Add the op code for SDVO enhancements */
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HPOS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HPOS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HPOS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_VPOS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_VPOS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_VPOS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SATURATION),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SATURATION),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SATURATION),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HUE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HUE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HUE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_CONTRAST),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CONTRAST),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTRAST),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_BRIGHTNESS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_BRIGHTNESS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_BRIGHTNESS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_H),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_H),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_H),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_V),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_V),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_V),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_2D),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_2D),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_2D),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SHARPNESS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SHARPNESS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SHARPNESS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DOT_CRAWL),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DOT_CRAWL),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_CHROMA_FILTER),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_CHROMA_FILTER),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_CHROMA_FILTER),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_LUMA_FILTER),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_LUMA_FILTER),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_LUMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(GET_MAX_HPOS),
+ SDVO_CMD_NAME_ENTRY(GET_HPOS),
+ SDVO_CMD_NAME_ENTRY(SET_HPOS),
+ SDVO_CMD_NAME_ENTRY(GET_MAX_VPOS),
+ SDVO_CMD_NAME_ENTRY(GET_VPOS),
+ SDVO_CMD_NAME_ENTRY(SET_VPOS),
+ SDVO_CMD_NAME_ENTRY(GET_MAX_SATURATION),
+ SDVO_CMD_NAME_ENTRY(GET_SATURATION),
+ SDVO_CMD_NAME_ENTRY(SET_SATURATION),
+ SDVO_CMD_NAME_ENTRY(GET_MAX_HUE),
+ SDVO_CMD_NAME_ENTRY(GET_HUE),
+ SDVO_CMD_NAME_ENTRY(SET_HUE),
+ SDVO_CMD_NAME_ENTRY(GET_MAX_CONTRAST),
+ SDVO_CMD_NAME_ENTRY(GET_CONTRAST),
+ SDVO_CMD_NAME_ENTRY(SET_CONTRAST),
+ SDVO_CMD_NAME_ENTRY(GET_MAX_BRIGHTNESS),
+ SDVO_CMD_NAME_ENTRY(GET_BRIGHTNESS),
+ SDVO_CMD_NAME_ENTRY(SET_BRIGHTNESS),
+ SDVO_CMD_NAME_ENTRY(GET_MAX_OVERSCAN_H),
+ SDVO_CMD_NAME_ENTRY(GET_OVERSCAN_H),
+ SDVO_CMD_NAME_ENTRY(SET_OVERSCAN_H),
+ SDVO_CMD_NAME_ENTRY(GET_MAX_OVERSCAN_V),
+ SDVO_CMD_NAME_ENTRY(GET_OVERSCAN_V),
+ SDVO_CMD_NAME_ENTRY(SET_OVERSCAN_V),
+ SDVO_CMD_NAME_ENTRY(GET_MAX_FLICKER_FILTER),
+ SDVO_CMD_NAME_ENTRY(GET_FLICKER_FILTER),
+ SDVO_CMD_NAME_ENTRY(SET_FLICKER_FILTER),
+ SDVO_CMD_NAME_ENTRY(GET_MAX_FLICKER_FILTER_ADAPTIVE),
+ SDVO_CMD_NAME_ENTRY(GET_FLICKER_FILTER_ADAPTIVE),
+ SDVO_CMD_NAME_ENTRY(SET_FLICKER_FILTER_ADAPTIVE),
+ SDVO_CMD_NAME_ENTRY(GET_MAX_FLICKER_FILTER_2D),
+ SDVO_CMD_NAME_ENTRY(GET_FLICKER_FILTER_2D),
+ SDVO_CMD_NAME_ENTRY(SET_FLICKER_FILTER_2D),
+ SDVO_CMD_NAME_ENTRY(GET_MAX_SHARPNESS),
+ SDVO_CMD_NAME_ENTRY(GET_SHARPNESS),
+ SDVO_CMD_NAME_ENTRY(SET_SHARPNESS),
+ SDVO_CMD_NAME_ENTRY(GET_DOT_CRAWL),
+ SDVO_CMD_NAME_ENTRY(SET_DOT_CRAWL),
+ SDVO_CMD_NAME_ENTRY(GET_MAX_TV_CHROMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(GET_TV_CHROMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SET_TV_CHROMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(GET_MAX_TV_LUMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(GET_TV_LUMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SET_TV_LUMA_FILTER),
/* HDMI op code */
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_PIXEL_REPLI),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PIXEL_REPLI),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY_CAP),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_COLORIMETRY),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_AUDIO_STAT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_STAT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INDEX),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_INDEX),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INFO),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_AV_SPLIT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_AV_SPLIT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_TXRATE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_TXRATE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_DATA),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
+ SDVO_CMD_NAME_ENTRY(GET_SUPP_ENCODE),
+ SDVO_CMD_NAME_ENTRY(GET_ENCODE),
+ SDVO_CMD_NAME_ENTRY(SET_ENCODE),
+ SDVO_CMD_NAME_ENTRY(SET_PIXEL_REPLI),
+ SDVO_CMD_NAME_ENTRY(GET_PIXEL_REPLI),
+ SDVO_CMD_NAME_ENTRY(GET_COLORIMETRY_CAP),
+ SDVO_CMD_NAME_ENTRY(SET_COLORIMETRY),
+ SDVO_CMD_NAME_ENTRY(GET_COLORIMETRY),
+ SDVO_CMD_NAME_ENTRY(GET_AUDIO_ENCRYPT_PREFER),
+ SDVO_CMD_NAME_ENTRY(SET_AUDIO_STAT),
+ SDVO_CMD_NAME_ENTRY(GET_AUDIO_STAT),
+ SDVO_CMD_NAME_ENTRY(GET_HBUF_INDEX),
+ SDVO_CMD_NAME_ENTRY(SET_HBUF_INDEX),
+ SDVO_CMD_NAME_ENTRY(GET_HBUF_INFO),
+ SDVO_CMD_NAME_ENTRY(GET_HBUF_AV_SPLIT),
+ SDVO_CMD_NAME_ENTRY(SET_HBUF_AV_SPLIT),
+ SDVO_CMD_NAME_ENTRY(GET_HBUF_TXRATE),
+ SDVO_CMD_NAME_ENTRY(SET_HBUF_TXRATE),
+ SDVO_CMD_NAME_ENTRY(SET_HBUF_DATA),
+ SDVO_CMD_NAME_ENTRY(GET_HBUF_DATA),
};
+#undef SDVO_CMD_NAME_ENTRY
+
+static const char *sdvo_cmd_name(u8 cmd)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sdvo_cmd_names); i++) {
+ if (cmd == sdvo_cmd_names[i].cmd)
+ return sdvo_cmd_names[i].name;
+ }
+
+ return NULL;
+}
+
#define SDVO_NAME(svdo) ((svdo)->port == PORT_B ? "SDVOB" : "SDVOC")
static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
const void *args, int args_len)
{
+ const char *cmd_name;
int i, pos = 0;
#define BUF_LEN 256
char buffer[BUF_LEN];
@@ -412,15 +427,12 @@ static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
for (; i < 8; i++) {
BUF_PRINT(" ");
}
- for (i = 0; i < ARRAY_SIZE(sdvo_cmd_names); i++) {
- if (cmd == sdvo_cmd_names[i].cmd) {
- BUF_PRINT("(%s)", sdvo_cmd_names[i].name);
- break;
- }
- }
- if (i == ARRAY_SIZE(sdvo_cmd_names)) {
+
+ cmd_name = sdvo_cmd_name(cmd);
+ if (cmd_name)
+ BUF_PRINT("(%s)", cmd_name);
+ else
BUF_PRINT("(%02X)", cmd);
- }
BUG_ON(pos >= BUF_LEN - 1);
#undef BUF_PRINT
#undef BUF_LEN
@@ -429,15 +441,23 @@ static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
}
static const char * const cmd_status_names[] = {
- "Power on",
- "Success",
- "Not supported",
- "Invalid arg",
- "Pending",
- "Target not specified",
- "Scaling not supported"
+ [SDVO_CMD_STATUS_POWER_ON] = "Power on",
+ [SDVO_CMD_STATUS_SUCCESS] = "Success",
+ [SDVO_CMD_STATUS_NOTSUPP] = "Not supported",
+ [SDVO_CMD_STATUS_INVALID_ARG] = "Invalid arg",
+ [SDVO_CMD_STATUS_PENDING] = "Pending",
+ [SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED] = "Target not specified",
+ [SDVO_CMD_STATUS_SCALING_NOT_SUPP] = "Scaling not supported",
};
+static const char *sdvo_cmd_status(u8 status)
+{
+ if (status < ARRAY_SIZE(cmd_status_names))
+ return cmd_status_names[status];
+ else
+ return NULL;
+}
+
static bool __intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
const void *args, int args_len,
bool unlocked)
@@ -516,6 +536,7 @@ static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
void *response, int response_len)
{
+ const char *cmd_status;
u8 retry = 15; /* 5 quick checks, followed by 10 long checks */
u8 status;
int i, pos = 0;
@@ -562,8 +583,9 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
#define BUF_PRINT(args...) \
pos += snprintf(buffer + pos, max_t(int, BUF_LEN - pos, 0), args)
- if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
- BUF_PRINT("(%s)", cmd_status_names[status]);
+ cmd_status = sdvo_cmd_status(status);
+ if (cmd_status)
+ BUF_PRINT("(%s)", cmd_status);
else
BUF_PRINT("(??? %d)", status);
@@ -929,6 +951,20 @@ static bool intel_sdvo_set_audio_state(struct intel_sdvo *intel_sdvo,
&audio_state, 1);
}
+static bool intel_sdvo_get_hbuf_size(struct intel_sdvo *intel_sdvo,
+ u8 *hbuf_size)
+{
+ if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HBUF_INFO,
+ hbuf_size, 1))
+ return false;
+
+ /* Buffer size is 0 based, hooray! However zero means zero. */
+ if (*hbuf_size)
+ (*hbuf_size)++;
+
+ return true;
+}
+
#if 0
static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo)
{
@@ -972,14 +1008,10 @@ static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
set_buf_index, 2))
return false;
- if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HBUF_INFO,
- &hbuf_size, 1))
+ if (!intel_sdvo_get_hbuf_size(intel_sdvo, &hbuf_size))
return false;
- /* Buffer size is 0 based, hooray! */
- hbuf_size++;
-
- DRM_DEBUG_KMS("writing sdvo hbuf: %i, hbuf_size %i, hbuf_size: %i\n",
+ DRM_DEBUG_KMS("writing sdvo hbuf: %i, length %u, hbuf_size: %i\n",
if_index, length, hbuf_size);
if (hbuf_size < length)
@@ -1030,14 +1062,10 @@ static ssize_t intel_sdvo_read_infoframe(struct intel_sdvo *intel_sdvo,
if (tx_rate == SDVO_HBUF_TX_DISABLED)
return 0;
- if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HBUF_INFO,
- &hbuf_size, 1))
- return -ENXIO;
-
- /* Buffer size is 0 based, hooray! */
- hbuf_size++;
+ if (!intel_sdvo_get_hbuf_size(intel_sdvo, &hbuf_size))
+ return false;
- DRM_DEBUG_KMS("reading sdvo hbuf: %i, hbuf_size %i, hbuf_size: %i\n",
+ DRM_DEBUG_KMS("reading sdvo hbuf: %i, length %u, hbuf_size: %i\n",
if_index, length, hbuf_size);
hbuf_size = min_t(unsigned int, length, hbuf_size);
@@ -1059,7 +1087,7 @@ static bool intel_sdvo_compute_avi_infoframe(struct intel_sdvo *intel_sdvo,
{
struct hdmi_avi_infoframe *frame = &crtc_state->infoframes.avi.avi;
const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
int ret;
if (!crtc_state->has_hdmi_sink)
@@ -1248,8 +1276,8 @@ static int intel_sdvo_compute_config(struct intel_encoder *encoder,
to_intel_sdvo_connector_state(conn_state);
struct intel_sdvo_connector *intel_sdvo_connector =
to_intel_sdvo_connector(conn_state->connector);
- struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
- struct drm_display_mode *mode = &pipe_config->base.mode;
+ struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
+ struct drm_display_mode *mode = &pipe_config->hw.mode;
DRM_DEBUG_KMS("forcing bpc to 8 for SDVO\n");
pipe_config->pipe_bpp = 8*3;
@@ -1321,9 +1349,9 @@ static int intel_sdvo_compute_config(struct intel_encoder *encoder,
if (IS_TV(intel_sdvo_connector))
i9xx_adjust_sdvo_tv_clock(pipe_config);
- /* Set user selected PAR to incoming mode's member */
- if (intel_sdvo_connector->is_hdmi)
- adjusted_mode->picture_aspect_ratio = conn_state->picture_aspect_ratio;
+ if (conn_state->picture_aspect_ratio)
+ adjusted_mode->picture_aspect_ratio =
+ conn_state->picture_aspect_ratio;
if (!intel_sdvo_compute_avi_infoframe(intel_sdvo,
pipe_config, conn_state)) {
@@ -1401,13 +1429,13 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
const struct intel_sdvo_connector_state *sdvo_state =
to_intel_sdvo_connector_state(conn_state);
const struct intel_sdvo_connector *intel_sdvo_connector =
to_intel_sdvo_connector(conn_state->connector);
- const struct drm_display_mode *mode = &crtc_state->base.mode;
+ const struct drm_display_mode *mode = &crtc_state->hw.mode;
struct intel_sdvo *intel_sdvo = to_sdvo(intel_encoder);
u32 sdvox;
struct intel_sdvo_in_out_map in_out;
@@ -1523,7 +1551,7 @@ static bool intel_sdvo_connector_get_hw_state(struct intel_connector *connector)
{
struct intel_sdvo_connector *intel_sdvo_connector =
to_intel_sdvo_connector(&connector->base);
- struct intel_sdvo *intel_sdvo = intel_attached_sdvo(&connector->base);
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
u16 active_outputs = 0;
intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs);
@@ -1601,7 +1629,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
flags |= DRM_MODE_FLAG_NVSYNC;
}
- pipe_config->base.adjusted_mode.flags |= flags;
+ pipe_config->hw.adjusted_mode.flags |= flags;
/*
* pixel multiplier readout is tricky: Only on i915g/gm it is stored in
@@ -1621,7 +1649,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
if (pipe_config->pixel_multiplier)
dotclock /= pipe_config->pixel_multiplier;
- pipe_config->base.adjusted_mode.crtc_clock = dotclock;
+ pipe_config->hw.adjusted_mode.crtc_clock = dotclock;
/* Cross check the port pixel multiplier with the sdvo encoder state. */
if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_CLOCK_RATE_MULT,
@@ -1673,7 +1701,7 @@ static void intel_sdvo_enable_audio(struct intel_sdvo *intel_sdvo,
const struct drm_connector_state *conn_state)
{
const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
struct drm_connector *connector = conn_state->connector;
u8 *eld = connector->eld;
@@ -1695,7 +1723,7 @@ static void intel_disable_sdvo(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
u32 temp;
if (old_crtc_state->has_audio)
@@ -1757,7 +1785,7 @@ static void intel_enable_sdvo(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
- struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
u32 temp;
bool input1, input2;
int i;
@@ -1795,7 +1823,7 @@ static enum drm_mode_status
intel_sdvo_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector));
struct intel_sdvo_connector *intel_sdvo_connector =
to_intel_sdvo_connector(connector);
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
@@ -1893,12 +1921,14 @@ static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder)
&intel_sdvo->hotplug_active, 2);
}
-static bool intel_sdvo_hotplug(struct intel_encoder *encoder,
- struct intel_connector *connector)
+static enum intel_hotplug_state
+intel_sdvo_hotplug(struct intel_encoder *encoder,
+ struct intel_connector *connector,
+ bool irq_received)
{
intel_sdvo_enable_hotplug(encoder);
- return intel_encoder_hotplug(encoder, connector);
+ return intel_encoder_hotplug(encoder, connector, irq_received);
}
static bool
@@ -1911,7 +1941,7 @@ intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo)
static struct edid *
intel_sdvo_get_edid(struct drm_connector *connector)
{
- struct intel_sdvo *sdvo = intel_attached_sdvo(connector);
+ struct intel_sdvo *sdvo = intel_attached_sdvo(to_intel_connector(connector));
return drm_get_edid(connector, &sdvo->ddc);
}
@@ -1929,7 +1959,7 @@ intel_sdvo_get_analog_edid(struct drm_connector *connector)
static enum drm_connector_status
intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
{
- struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector));
struct intel_sdvo_connector *intel_sdvo_connector =
to_intel_sdvo_connector(connector);
enum drm_connector_status status;
@@ -1998,7 +2028,7 @@ static enum drm_connector_status
intel_sdvo_detect(struct drm_connector *connector, bool force)
{
u16 response;
- struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector));
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
enum drm_connector_status ret;
@@ -2145,7 +2175,7 @@ static const struct drm_display_mode sdvo_tv_modes[] = {
static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
{
- struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector));
const struct drm_connector_state *conn_state = connector->state;
struct intel_sdvo_sdtv_resolution_request tv_res;
u32 reply = 0, format_map = 0;
@@ -2185,7 +2215,7 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
{
- struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector));
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct drm_display_mode *newmode;
@@ -2349,7 +2379,7 @@ intel_sdvo_connector_atomic_set_property(struct drm_connector *connector,
static int
intel_sdvo_connector_register(struct drm_connector *connector)
{
- struct intel_sdvo *sdvo = intel_attached_sdvo(connector);
+ struct intel_sdvo *sdvo = intel_attached_sdvo(to_intel_connector(connector));
int ret;
ret = intel_connector_register(connector);
@@ -2364,7 +2394,7 @@ intel_sdvo_connector_register(struct drm_connector *connector)
static void
intel_sdvo_connector_unregister(struct drm_connector *connector)
{
- struct intel_sdvo *sdvo = intel_attached_sdvo(connector);
+ struct intel_sdvo *sdvo = intel_attached_sdvo(to_intel_connector(connector));
sysfs_remove_link(&connector->kdev->kobj,
sdvo->ddc.dev.kobj.name);
@@ -2624,7 +2654,6 @@ intel_sdvo_add_hdmi_properties(struct intel_sdvo *intel_sdvo,
intel_attach_broadcast_rgb_property(&connector->base.base);
}
intel_attach_aspect_ratio_property(&connector->base.base);
- connector->base.base.state->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
}
static struct intel_sdvo_connector *intel_sdvo_connector_alloc(void)
@@ -2891,7 +2920,7 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, u16 flags)
bytes[0], bytes[1]);
return false;
}
- intel_sdvo->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ intel_sdvo->base.pipe_mask = ~0;
return true;
}
@@ -2903,7 +2932,7 @@ static void intel_sdvo_output_cleanup(struct intel_sdvo *intel_sdvo)
list_for_each_entry_safe(connector, tmp,
&dev->mode_config.connector_list, head) {
- if (intel_attached_encoder(connector) == &intel_sdvo->base) {
+ if (intel_attached_encoder(to_intel_connector(connector)) == &intel_sdvo->base) {
drm_connector_unregister(connector);
intel_connector_destroy(connector);
}
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.h b/drivers/gpu/drm/i915/display/intel_sdvo.h
index c9e05bcdd141..a66f224aa17d 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.h
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.h
@@ -14,6 +14,7 @@
struct drm_i915_private;
enum pipe;
+enum port;
bool intel_sdvo_port_enabled(struct drm_i915_private *dev_priv,
i915_reg_t sdvo_reg, enum pipe *pipe);
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index 004b52027ae8..fca77ec1e0dd 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -40,26 +40,14 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
+#include "i915_trace.h"
#include "intel_atomic_plane.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_frontbuffer.h"
#include "intel_pm.h"
#include "intel_psr.h"
#include "intel_sprite.h"
-bool is_planar_yuv_format(u32 pixelformat)
-{
- switch (pixelformat) {
- case DRM_FORMAT_NV12:
- case DRM_FORMAT_P010:
- case DRM_FORMAT_P012:
- case DRM_FORMAT_P016:
- return true;
- default:
- return false;
- }
-}
-
int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
int usecs)
{
@@ -93,9 +81,9 @@ int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
*/
void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct drm_display_mode *adjusted_mode = &new_crtc_state->base.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode = &new_crtc_state->hw.adjusted_mode;
long timeout = msecs_to_jiffies_timeout(1);
int scanline, min, max, vblank_start;
wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
@@ -132,7 +120,7 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
crtc->debug.min_vbl = min;
crtc->debug.max_vbl = max;
- trace_i915_pipe_update_start(crtc);
+ trace_intel_pipe_update_start(crtc);
for (;;) {
/*
@@ -185,7 +173,7 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
crtc->debug.start_vbl_time = ktime_get();
crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc);
- trace_i915_pipe_update_vblank_evaded(crtc);
+ trace_intel_pipe_update_vblank_evaded(crtc);
return;
irq_disable:
@@ -202,27 +190,28 @@ irq_disable:
*/
void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
enum pipe pipe = crtc->pipe;
int scanline_end = intel_get_crtc_scanline(crtc);
u32 end_vbl_count = intel_crtc_get_vblank_counter(crtc);
ktime_t end_vbl_time = ktime_get();
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- trace_i915_pipe_update_end(crtc, end_vbl_count, scanline_end);
+ trace_intel_pipe_update_end(crtc, end_vbl_count, scanline_end);
/* We're still in the vblank-evade critical section, this can't race.
* Would be slightly nice to just grab the vblank count and arm the
* event outside of the critical section - the spinlock might spin for a
* while ... */
- if (new_crtc_state->base.event) {
+ if (new_crtc_state->uapi.event) {
WARN_ON(drm_crtc_vblank_get(&crtc->base) != 0);
spin_lock(&crtc->base.dev->event_lock);
- drm_crtc_arm_vblank_event(&crtc->base, new_crtc_state->base.event);
+ drm_crtc_arm_vblank_event(&crtc->base,
+ new_crtc_state->uapi.event);
spin_unlock(&crtc->base.dev->event_lock);
- new_crtc_state->base.event = NULL;
+ new_crtc_state->uapi.event = NULL;
}
local_irq_enable();
@@ -251,9 +240,9 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
int intel_plane_check_stride(const struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
- const struct drm_framebuffer *fb = plane_state->base.fb;
- unsigned int rotation = plane_state->base.rotation;
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
u32 stride, max_stride;
/*
@@ -263,7 +252,7 @@ int intel_plane_check_stride(const struct intel_plane_state *plane_state)
* kick in due the plane being invisible.
*/
if (intel_plane_can_remap(plane_state) &&
- !plane_state->base.visible)
+ !plane_state->uapi.visible)
return 0;
/* FIXME other color planes? */
@@ -283,10 +272,10 @@ int intel_plane_check_stride(const struct intel_plane_state *plane_state)
int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
{
- const struct drm_framebuffer *fb = plane_state->base.fb;
- struct drm_rect *src = &plane_state->base.src;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ struct drm_rect *src = &plane_state->uapi.src;
u32 src_x, src_y, src_w, src_h, hsub, vsub;
- bool rotated = drm_rotation_90_or_270(plane_state->base.rotation);
+ bool rotated = drm_rotation_90_or_270(plane_state->hw.rotation);
/*
* Hardware doesn't handle subpixel coordinates.
@@ -299,10 +288,8 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
src_y = src->y1 >> 16;
src_h = drm_rect_height(src) >> 16;
- src->x1 = src_x << 16;
- src->x2 = (src_x + src_w) << 16;
- src->y1 = src_y << 16;
- src->y2 = (src_y + src_h) << 16;
+ drm_rect_init(src, src_x << 16, src_y << 16,
+ src_w << 16, src_h << 16);
if (!fb->format->is_yuv)
return 0;
@@ -330,6 +317,61 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
return 0;
}
+bool icl_is_hdr_plane(struct drm_i915_private *dev_priv, enum plane_id plane_id)
+{
+ return INTEL_GEN(dev_priv) >= 11 &&
+ icl_hdr_plane_mask() & BIT(plane_id);
+}
+
+static void
+skl_plane_ratio(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ unsigned int *num, unsigned int *den)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+
+ if (fb->format->cpp[0] == 8) {
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
+ *num = 10;
+ *den = 8;
+ } else {
+ *num = 9;
+ *den = 8;
+ }
+ } else {
+ *num = 1;
+ *den = 1;
+ }
+}
+
+static int skl_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev);
+ unsigned int pixel_rate = crtc_state->pixel_rate;
+ unsigned int src_w, src_h, dst_w, dst_h;
+ unsigned int num, den;
+
+ skl_plane_ratio(crtc_state, plane_state, &num, &den);
+
+ /* two pixels per clock on glk+ */
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ den *= 2;
+
+ src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
+ dst_w = drm_rect_width(&plane_state->uapi.dst);
+ dst_h = drm_rect_height(&plane_state->uapi.dst);
+
+ /* Downscaling limits the maximum pixel rate */
+ dst_w = min(src_w, dst_w);
+ dst_h = min(src_h, dst_h);
+
+ return DIV64_U64_ROUND_UP(mul_u32_u32(pixel_rate * num, src_w * src_h),
+ mul_u32_u32(den, dst_w * dst_h));
+}
+
static unsigned int
skl_plane_max_stride(struct intel_plane *plane,
u32 pixel_format, u64 modifier,
@@ -354,27 +396,28 @@ skl_program_scaler(struct intel_plane *plane,
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
enum pipe pipe = plane->pipe;
int scaler_id = plane_state->scaler_id;
const struct intel_scaler *scaler =
&crtc_state->scaler_state.scalers[scaler_id];
- int crtc_x = plane_state->base.dst.x1;
- int crtc_y = plane_state->base.dst.y1;
- u32 crtc_w = drm_rect_width(&plane_state->base.dst);
- u32 crtc_h = drm_rect_height(&plane_state->base.dst);
+ int crtc_x = plane_state->uapi.dst.x1;
+ int crtc_y = plane_state->uapi.dst.y1;
+ u32 crtc_w = drm_rect_width(&plane_state->uapi.dst);
+ u32 crtc_h = drm_rect_height(&plane_state->uapi.dst);
u16 y_hphase, uv_rgb_hphase;
u16 y_vphase, uv_rgb_vphase;
int hscale, vscale;
- hscale = drm_rect_calc_hscale(&plane_state->base.src,
- &plane_state->base.dst,
+ hscale = drm_rect_calc_hscale(&plane_state->uapi.src,
+ &plane_state->uapi.dst,
0, INT_MAX);
- vscale = drm_rect_calc_vscale(&plane_state->base.src,
- &plane_state->base.dst,
+ vscale = drm_rect_calc_vscale(&plane_state->uapi.src,
+ &plane_state->uapi.dst,
0, INT_MAX);
/* TODO: handle sub-pixel coordinates */
- if (is_planar_yuv_format(plane_state->base.fb->format->format) &&
+ if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) &&
!icl_is_hdr_plane(dev_priv, plane->id)) {
y_hphase = skl_scaler_calc_phase(1, hscale, false);
y_vphase = skl_scaler_calc_phase(1, vscale, false);
@@ -441,9 +484,21 @@ icl_program_input_csc(struct intel_plane *plane,
*/
[DRM_COLOR_YCBCR_BT709] = {
0x7C98, 0x7800, 0x0,
- 0x9EF8, 0x7800, 0xABF8,
+ 0x9EF8, 0x7800, 0xAC00,
0x0, 0x7800, 0x7ED8,
},
+ /*
+ * BT.2020 full range YCbCr -> full range RGB
+ * The matrix required is :
+ * [1.000, 0.000, 1.474,
+ * 1.000, -0.1645, -0.5713,
+ * 1.000, 1.8814, 0.0000]
+ */
+ [DRM_COLOR_YCBCR_BT2020] = {
+ 0x7BC8, 0x7800, 0x0,
+ 0x8928, 0x7800, 0xAA88,
+ 0x0, 0x7800, 0x7F10,
+ },
};
/* Matrix for Limited Range to Full Range Conversion */
@@ -451,34 +506,46 @@ icl_program_input_csc(struct intel_plane *plane,
/*
* BT.601 Limted range YCbCr -> full range RGB
* The matrix required is :
- * [1.164384, 0.000, 1.596370,
- * 1.138393, -0.382500, -0.794598,
- * 1.138393, 1.971696, 0.0000]
+ * [1.164384, 0.000, 1.596027,
+ * 1.164384, -0.39175, -0.812813,
+ * 1.164384, 2.017232, 0.0000]
*/
[DRM_COLOR_YCBCR_BT601] = {
0x7CC8, 0x7950, 0x0,
- 0x8CB8, 0x7918, 0x9C40,
- 0x0, 0x7918, 0x7FC8,
+ 0x8D00, 0x7950, 0x9C88,
+ 0x0, 0x7950, 0x6810,
},
/*
* BT.709 Limited range YCbCr -> full range RGB
* The matrix required is :
- * [1.164, 0.000, 1.833671,
- * 1.138393, -0.213249, -0.532909,
- * 1.138393, 2.112402, 0.0000]
+ * [1.164384, 0.000, 1.792741,
+ * 1.164384, -0.213249, -0.532909,
+ * 1.164384, 2.112402, 0.0000]
*/
[DRM_COLOR_YCBCR_BT709] = {
- 0x7EA8, 0x7950, 0x0,
- 0x8888, 0x7918, 0xADA8,
- 0x0, 0x7918, 0x6870,
+ 0x7E58, 0x7950, 0x0,
+ 0x8888, 0x7950, 0xADA8,
+ 0x0, 0x7950, 0x6870,
+ },
+ /*
+ * BT.2020 Limited range YCbCr -> full range RGB
+ * The matrix required is :
+ * [1.164, 0.000, 1.678,
+ * 1.164, -0.1873, -0.6504,
+ * 1.164, 2.1417, 0.0000]
+ */
+ [DRM_COLOR_YCBCR_BT2020] = {
+ 0x7D70, 0x7950, 0x0,
+ 0x8A68, 0x7950, 0xAC00,
+ 0x0, 0x7950, 0x6890,
},
};
const u16 *csc;
- if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
- csc = input_csc_matrix[plane_state->base.color_encoding];
+ if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+ csc = input_csc_matrix[plane_state->hw.color_encoding];
else
- csc = input_csc_matrix_lr[plane_state->base.color_encoding];
+ csc = input_csc_matrix_lr[plane_state->hw.color_encoding];
I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 0), ROFF(csc[0]) |
GOFF(csc[1]));
@@ -492,8 +559,11 @@ icl_program_input_csc(struct intel_plane *plane,
I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0),
PREOFF_YUV_TO_RGB_HI);
- I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
- PREOFF_YUV_TO_RGB_ME);
+ if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+ I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1), 0);
+ else
+ I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
+ PREOFF_YUV_TO_RGB_ME);
I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 2),
PREOFF_YUV_TO_RGB_LO);
I915_WRITE_FW(PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 0), 0x0);
@@ -505,7 +575,7 @@ static void
skl_program_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
- int color_plane, bool slave, u32 plane_ctl)
+ int color_plane)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum plane_id plane_id = plane->id;
@@ -513,19 +583,21 @@ skl_program_plane(struct intel_plane *plane,
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
u32 surf_addr = plane_state->color_plane[color_plane].offset;
u32 stride = skl_plane_stride(plane_state, color_plane);
- u32 aux_stride = skl_plane_stride(plane_state, 1);
- int crtc_x = plane_state->base.dst.x1;
- int crtc_y = plane_state->base.dst.y1;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ int aux_plane = intel_main_to_aux_plane(fb, color_plane);
+ u32 aux_dist = plane_state->color_plane[aux_plane].offset - surf_addr;
+ u32 aux_stride = skl_plane_stride(plane_state, aux_plane);
+ int crtc_x = plane_state->uapi.dst.x1;
+ int crtc_y = plane_state->uapi.dst.y1;
u32 x = plane_state->color_plane[color_plane].x;
u32 y = plane_state->color_plane[color_plane].y;
- u32 src_w = drm_rect_width(&plane_state->base.src) >> 16;
- u32 src_h = drm_rect_height(&plane_state->base.src) >> 16;
- struct intel_plane *linked = plane_state->linked_plane;
- const struct drm_framebuffer *fb = plane_state->base.fb;
- u8 alpha = plane_state->base.alpha >> 8;
+ u32 src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ u32 src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
+ u8 alpha = plane_state->hw.alpha >> 8;
u32 plane_color_ctl = 0;
unsigned long irqflags;
u32 keymsk, keymax;
+ u32 plane_ctl = plane_state->ctl;
plane_ctl |= skl_plane_ctl_crtc(crtc_state);
@@ -554,29 +626,13 @@ skl_program_plane(struct intel_plane *plane,
I915_WRITE_FW(PLANE_STRIDE(pipe, plane_id), stride);
I915_WRITE_FW(PLANE_POS(pipe, plane_id), (crtc_y << 16) | crtc_x);
I915_WRITE_FW(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w);
- I915_WRITE_FW(PLANE_AUX_DIST(pipe, plane_id),
- (plane_state->color_plane[1].offset - surf_addr) | aux_stride);
- if (icl_is_hdr_plane(dev_priv, plane_id)) {
- u32 cus_ctl = 0;
-
- if (linked) {
- /* Enable and use MPEG-2 chroma siting */
- cus_ctl = PLANE_CUS_ENABLE |
- PLANE_CUS_HPHASE_0 |
- PLANE_CUS_VPHASE_SIGN_NEGATIVE |
- PLANE_CUS_VPHASE_0_25;
-
- if (linked->id == PLANE_SPRITE5)
- cus_ctl |= PLANE_CUS_PLANE_7;
- else if (linked->id == PLANE_SPRITE4)
- cus_ctl |= PLANE_CUS_PLANE_6;
- else
- MISSING_CASE(linked->id);
- }
+ if (INTEL_GEN(dev_priv) < 12)
+ aux_dist |= aux_stride;
+ I915_WRITE_FW(PLANE_AUX_DIST(pipe, plane_id), aux_dist);
- I915_WRITE_FW(PLANE_CUS_CTL(pipe, plane_id), cus_ctl);
- }
+ if (icl_is_hdr_plane(dev_priv, plane_id))
+ I915_WRITE_FW(PLANE_CUS_CTL(pipe, plane_id), plane_state->cus_ctl);
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id), plane_color_ctl);
@@ -606,7 +662,7 @@ skl_program_plane(struct intel_plane *plane,
I915_WRITE_FW(PLANE_SURF(pipe, plane_id),
intel_plane_ggtt_offset(plane_state) + surf_addr);
- if (!slave && plane_state->scaler_id >= 0)
+ if (plane_state->scaler_id >= 0)
skl_program_scaler(plane, crtc_state, plane_state);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
@@ -619,24 +675,12 @@ skl_update_plane(struct intel_plane *plane,
{
int color_plane = 0;
- if (plane_state->linked_plane) {
- /* Program the UV plane */
+ if (plane_state->planar_linked_plane && !plane_state->planar_slave)
+ /* Program the UV plane on planar master */
color_plane = 1;
- }
- skl_program_plane(plane, crtc_state, plane_state,
- color_plane, false, plane_state->ctl);
-}
-
-static void
-icl_update_slave(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- skl_program_plane(plane, crtc_state, plane_state, 0, true,
- plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE);
+ skl_program_plane(plane, crtc_state, plane_state, color_plane);
}
-
static void
skl_disable_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
@@ -683,12 +727,22 @@ skl_plane_get_hw_state(struct intel_plane *plane,
return ret;
}
+static void i9xx_plane_linear_gamma(u16 gamma[8])
+{
+ /* The points are not evenly spaced. */
+ static const u8 in[8] = { 0, 1, 2, 4, 8, 16, 24, 32 };
+ int i;
+
+ for (i = 0; i < 8; i++)
+ gamma[i] = (in[i] << 8) / 32;
+}
+
static void
chv_update_csc(const struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
enum plane_id plane_id = plane->id;
/*
* |r| | c0 c1 c2 | |cr|
@@ -714,7 +768,7 @@ chv_update_csc(const struct intel_plane_state *plane_state)
0, 4096, 7601,
},
};
- const s16 *csc = csc_matrix[plane_state->base.color_encoding];
+ const s16 *csc = csc_matrix[plane_state->hw.color_encoding];
/* Seems RGB data bypasses the CSC always */
if (!fb->format->is_yuv)
@@ -745,15 +799,15 @@ chv_update_csc(const struct intel_plane_state *plane_state)
static void
vlv_update_clrc(const struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
enum pipe pipe = plane->pipe;
enum plane_id plane_id = plane->id;
int contrast, brightness, sh_scale, sh_sin, sh_cos;
if (fb->format->is_yuv &&
- plane_state->base.color_range == DRM_COLOR_YCBCR_LIMITED_RANGE) {
+ plane_state->hw.color_range == DRM_COLOR_YCBCR_LIMITED_RANGE) {
/*
* Expand limited range to full range:
* Contrast is applied first and is used to expand Y range.
@@ -781,6 +835,85 @@ vlv_update_clrc(const struct intel_plane_state *plane_state)
SP_SH_SIN(sh_sin) | SP_SH_COS(sh_cos));
}
+static void
+vlv_plane_ratio(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ unsigned int *num, unsigned int *den)
+{
+ u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int cpp = fb->format->cpp[0];
+
+ /*
+ * VLV bspec only considers cases where all three planes are
+ * enabled, and cases where the primary and one sprite is enabled.
+ * Let's assume the case with just two sprites enabled also
+ * maps to the latter case.
+ */
+ if (hweight8(active_planes) == 3) {
+ switch (cpp) {
+ case 8:
+ *num = 11;
+ *den = 8;
+ break;
+ case 4:
+ *num = 18;
+ *den = 16;
+ break;
+ default:
+ *num = 1;
+ *den = 1;
+ break;
+ }
+ } else if (hweight8(active_planes) == 2) {
+ switch (cpp) {
+ case 8:
+ *num = 10;
+ *den = 8;
+ break;
+ case 4:
+ *num = 17;
+ *den = 16;
+ break;
+ default:
+ *num = 1;
+ *den = 1;
+ break;
+ }
+ } else {
+ switch (cpp) {
+ case 8:
+ *num = 10;
+ *den = 8;
+ break;
+ default:
+ *num = 1;
+ *den = 1;
+ break;
+ }
+ }
+}
+
+int vlv_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ unsigned int pixel_rate;
+ unsigned int num, den;
+
+ /*
+ * Note that crtc_state->pixel_rate accounts for both
+ * horizontal and vertical panel fitter downscaling factors.
+ * Pre-HSW bspec tells us to only consider the horizontal
+ * downscaling factor here. We ignore that and just consider
+ * both for simplicity.
+ */
+ pixel_rate = crtc_state->pixel_rate;
+
+ vlv_plane_ratio(crtc_state, plane_state, &num, &den);
+
+ return DIV_ROUND_UP(pixel_rate * num, den);
+}
+
static u32 vlv_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
{
u32 sprctl = 0;
@@ -794,8 +927,8 @@ static u32 vlv_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- const struct drm_framebuffer *fb = plane_state->base.fb;
- unsigned int rotation = plane_state->base.rotation;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
u32 sprctl;
@@ -814,6 +947,9 @@ static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state,
case DRM_FORMAT_VYUY:
sprctl |= SP_FORMAT_YUV422 | SP_YUV_ORDER_VYUY;
break;
+ case DRM_FORMAT_C8:
+ sprctl |= SP_FORMAT_8BPP;
+ break;
case DRM_FORMAT_RGB565:
sprctl |= SP_FORMAT_BGR565;
break;
@@ -829,6 +965,12 @@ static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state,
case DRM_FORMAT_ABGR2101010:
sprctl |= SP_FORMAT_RGBA1010102;
break;
+ case DRM_FORMAT_XRGB2101010:
+ sprctl |= SP_FORMAT_BGRX1010102;
+ break;
+ case DRM_FORMAT_ARGB2101010:
+ sprctl |= SP_FORMAT_BGRA1010102;
+ break;
case DRM_FORMAT_XBGR8888:
sprctl |= SP_FORMAT_RGBX8888;
break;
@@ -840,7 +982,7 @@ static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state,
return 0;
}
- if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
+ if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
sprctl |= SP_YUV_FORMAT_BT709;
if (fb->modifier == I915_FORMAT_MOD_X_TILED)
@@ -858,6 +1000,31 @@ static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state,
return sprctl;
}
+static void vlv_update_gamma(const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ enum pipe pipe = plane->pipe;
+ enum plane_id plane_id = plane->id;
+ u16 gamma[8];
+ int i;
+
+ /* Seems RGB data bypasses the gamma always */
+ if (!fb->format->is_yuv)
+ return;
+
+ i9xx_plane_linear_gamma(gamma);
+
+ /* FIXME these register are single buffered :( */
+ /* The two end points are implicit (0.0 and 1.0) */
+ for (i = 1; i < 8 - 1; i++)
+ I915_WRITE_FW(SPGAMC(pipe, plane_id, i - 1),
+ gamma[i] << 16 |
+ gamma[i] << 8 |
+ gamma[i]);
+}
+
static void
vlv_update_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
@@ -869,10 +1036,10 @@ vlv_update_plane(struct intel_plane *plane,
u32 sprsurf_offset = plane_state->color_plane[0].offset;
u32 linear_offset;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
- int crtc_x = plane_state->base.dst.x1;
- int crtc_y = plane_state->base.dst.y1;
- u32 crtc_w = drm_rect_width(&plane_state->base.dst);
- u32 crtc_h = drm_rect_height(&plane_state->base.dst);
+ int crtc_x = plane_state->uapi.dst.x1;
+ int crtc_y = plane_state->uapi.dst.y1;
+ u32 crtc_w = drm_rect_width(&plane_state->uapi.dst);
+ u32 crtc_h = drm_rect_height(&plane_state->uapi.dst);
u32 x = plane_state->color_plane[0].x;
u32 y = plane_state->color_plane[0].y;
unsigned long irqflags;
@@ -916,6 +1083,7 @@ vlv_update_plane(struct intel_plane *plane,
intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
vlv_update_clrc(plane_state);
+ vlv_update_gamma(plane_state);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
@@ -961,6 +1129,164 @@ vlv_plane_get_hw_state(struct intel_plane *plane,
return ret;
}
+static void ivb_plane_ratio(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ unsigned int *num, unsigned int *den)
+{
+ u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int cpp = fb->format->cpp[0];
+
+ if (hweight8(active_planes) == 2) {
+ switch (cpp) {
+ case 8:
+ *num = 10;
+ *den = 8;
+ break;
+ case 4:
+ *num = 17;
+ *den = 16;
+ break;
+ default:
+ *num = 1;
+ *den = 1;
+ break;
+ }
+ } else {
+ switch (cpp) {
+ case 8:
+ *num = 9;
+ *den = 8;
+ break;
+ default:
+ *num = 1;
+ *den = 1;
+ break;
+ }
+ }
+}
+
+static void ivb_plane_ratio_scaling(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ unsigned int *num, unsigned int *den)
+{
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int cpp = fb->format->cpp[0];
+
+ switch (cpp) {
+ case 8:
+ *num = 12;
+ *den = 8;
+ break;
+ case 4:
+ *num = 19;
+ *den = 16;
+ break;
+ case 2:
+ *num = 33;
+ *den = 32;
+ break;
+ default:
+ *num = 1;
+ *den = 1;
+ break;
+ }
+}
+
+int ivb_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ unsigned int pixel_rate;
+ unsigned int num, den;
+
+ /*
+ * Note that crtc_state->pixel_rate accounts for both
+ * horizontal and vertical panel fitter downscaling factors.
+ * Pre-HSW bspec tells us to only consider the horizontal
+ * downscaling factor here. We ignore that and just consider
+ * both for simplicity.
+ */
+ pixel_rate = crtc_state->pixel_rate;
+
+ ivb_plane_ratio(crtc_state, plane_state, &num, &den);
+
+ return DIV_ROUND_UP(pixel_rate * num, den);
+}
+
+static int ivb_sprite_min_cdclk(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ unsigned int src_w, dst_w, pixel_rate;
+ unsigned int num, den;
+
+ /*
+ * Note that crtc_state->pixel_rate accounts for both
+ * horizontal and vertical panel fitter downscaling factors.
+ * Pre-HSW bspec tells us to only consider the horizontal
+ * downscaling factor here. We ignore that and just consider
+ * both for simplicity.
+ */
+ pixel_rate = crtc_state->pixel_rate;
+
+ src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ dst_w = drm_rect_width(&plane_state->uapi.dst);
+
+ if (src_w != dst_w)
+ ivb_plane_ratio_scaling(crtc_state, plane_state, &num, &den);
+ else
+ ivb_plane_ratio(crtc_state, plane_state, &num, &den);
+
+ /* Horizontal downscaling limits the maximum pixel rate */
+ dst_w = min(src_w, dst_w);
+
+ return DIV_ROUND_UP_ULL(mul_u32_u32(pixel_rate, num * src_w),
+ den * dst_w);
+}
+
+static void hsw_plane_ratio(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ unsigned int *num, unsigned int *den)
+{
+ u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int cpp = fb->format->cpp[0];
+
+ if (hweight8(active_planes) == 2) {
+ switch (cpp) {
+ case 8:
+ *num = 10;
+ *den = 8;
+ break;
+ default:
+ *num = 1;
+ *den = 1;
+ break;
+ }
+ } else {
+ switch (cpp) {
+ case 8:
+ *num = 9;
+ *den = 8;
+ break;
+ default:
+ *num = 1;
+ *den = 1;
+ break;
+ }
+ }
+}
+
+int hsw_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ unsigned int pixel_rate = crtc_state->pixel_rate;
+ unsigned int num, den;
+
+ hsw_plane_ratio(crtc_state, plane_state, &num, &den);
+
+ return DIV_ROUND_UP(pixel_rate * num, den);
+}
+
static u32 ivb_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
{
u32 sprctl = 0;
@@ -974,13 +1300,23 @@ static u32 ivb_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
return sprctl;
}
+static bool ivb_need_sprite_gamma(const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+
+ return fb->format->cpp[0] == 8 &&
+ (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv));
+}
+
static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv =
- to_i915(plane_state->base.plane->dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
- unsigned int rotation = plane_state->base.rotation;
+ to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
u32 sprctl;
@@ -996,6 +1332,18 @@ static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state,
case DRM_FORMAT_XRGB8888:
sprctl |= SPRITE_FORMAT_RGBX888;
break;
+ case DRM_FORMAT_XBGR2101010:
+ sprctl |= SPRITE_FORMAT_RGBX101010 | SPRITE_RGB_ORDER_RGBX;
+ break;
+ case DRM_FORMAT_XRGB2101010:
+ sprctl |= SPRITE_FORMAT_RGBX101010;
+ break;
+ case DRM_FORMAT_XBGR16161616F:
+ sprctl |= SPRITE_FORMAT_RGBX161616 | SPRITE_RGB_ORDER_RGBX;
+ break;
+ case DRM_FORMAT_XRGB16161616F:
+ sprctl |= SPRITE_FORMAT_RGBX161616;
+ break;
case DRM_FORMAT_YUYV:
sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YUYV;
break;
@@ -1013,10 +1361,13 @@ static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state,
return 0;
}
- if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
+ if (!ivb_need_sprite_gamma(plane_state))
+ sprctl |= SPRITE_INT_GAMMA_DISABLE;
+
+ if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
sprctl |= SPRITE_YUV_TO_RGB_CSC_FORMAT_BT709;
- if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+ if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
sprctl |= SPRITE_YUV_RANGE_CORRECTION_DISABLE;
if (fb->modifier == I915_FORMAT_MOD_X_TILED)
@@ -1033,6 +1384,62 @@ static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state,
return sprctl;
}
+static void ivb_sprite_linear_gamma(const struct intel_plane_state *plane_state,
+ u16 gamma[18])
+{
+ int scale, i;
+
+ /*
+ * WaFP16GammaEnabling:ivb,hsw
+ * "Workaround : When using the 64-bit format, the sprite output
+ * on each color channel has one quarter amplitude. It can be
+ * brought up to full amplitude by using sprite internal gamma
+ * correction, pipe gamma correction, or pipe color space
+ * conversion to multiply the sprite output by four."
+ */
+ scale = 4;
+
+ for (i = 0; i < 16; i++)
+ gamma[i] = min((scale * i << 10) / 16, (1 << 10) - 1);
+
+ gamma[i] = min((scale * i << 10) / 16, 1 << 10);
+ i++;
+
+ gamma[i] = 3 << 10;
+ i++;
+}
+
+static void ivb_update_gamma(const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+ u16 gamma[18];
+ int i;
+
+ if (!ivb_need_sprite_gamma(plane_state))
+ return;
+
+ ivb_sprite_linear_gamma(plane_state, gamma);
+
+ /* FIXME these register are single buffered :( */
+ for (i = 0; i < 16; i++)
+ I915_WRITE_FW(SPRGAMC(pipe, i),
+ gamma[i] << 20 |
+ gamma[i] << 10 |
+ gamma[i]);
+
+ I915_WRITE_FW(SPRGAMC16(pipe, 0), gamma[i]);
+ I915_WRITE_FW(SPRGAMC16(pipe, 1), gamma[i]);
+ I915_WRITE_FW(SPRGAMC16(pipe, 2), gamma[i]);
+ i++;
+
+ I915_WRITE_FW(SPRGAMC17(pipe, 0), gamma[i]);
+ I915_WRITE_FW(SPRGAMC17(pipe, 1), gamma[i]);
+ I915_WRITE_FW(SPRGAMC17(pipe, 2), gamma[i]);
+ i++;
+}
+
static void
ivb_update_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
@@ -1043,14 +1450,14 @@ ivb_update_plane(struct intel_plane *plane,
u32 sprsurf_offset = plane_state->color_plane[0].offset;
u32 linear_offset;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
- int crtc_x = plane_state->base.dst.x1;
- int crtc_y = plane_state->base.dst.y1;
- u32 crtc_w = drm_rect_width(&plane_state->base.dst);
- u32 crtc_h = drm_rect_height(&plane_state->base.dst);
+ int crtc_x = plane_state->uapi.dst.x1;
+ int crtc_y = plane_state->uapi.dst.y1;
+ u32 crtc_w = drm_rect_width(&plane_state->uapi.dst);
+ u32 crtc_h = drm_rect_height(&plane_state->uapi.dst);
u32 x = plane_state->color_plane[0].x;
u32 y = plane_state->color_plane[0].y;
- u32 src_w = drm_rect_width(&plane_state->base.src) >> 16;
- u32 src_h = drm_rect_height(&plane_state->base.src) >> 16;
+ u32 src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ u32 src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
u32 sprctl, sprscale = 0;
unsigned long irqflags;
@@ -1099,6 +1506,8 @@ ivb_update_plane(struct intel_plane *plane,
I915_WRITE_FW(SPRSURF(pipe),
intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
+ ivb_update_gamma(plane_state);
+
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
@@ -1144,6 +1553,53 @@ ivb_plane_get_hw_state(struct intel_plane *plane,
return ret;
}
+static int g4x_sprite_min_cdclk(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int hscale, pixel_rate;
+ unsigned int limit, decimate;
+
+ /*
+ * Note that crtc_state->pixel_rate accounts for both
+ * horizontal and vertical panel fitter downscaling factors.
+ * Pre-HSW bspec tells us to only consider the horizontal
+ * downscaling factor here. We ignore that and just consider
+ * both for simplicity.
+ */
+ pixel_rate = crtc_state->pixel_rate;
+
+ /* Horizontal downscaling limits the maximum pixel rate */
+ hscale = drm_rect_calc_hscale(&plane_state->uapi.src,
+ &plane_state->uapi.dst,
+ 0, INT_MAX);
+ if (hscale < 0x10000)
+ return pixel_rate;
+
+ /* Decimation steps at 2x,4x,8x,16x */
+ decimate = ilog2(hscale >> 16);
+ hscale >>= decimate;
+
+ /* Starting limit is 90% of cdclk */
+ limit = 9;
+
+ /* -10% per decimation step */
+ limit -= decimate;
+
+ /* -10% for RGB */
+ if (fb->format->cpp[0] >= 4)
+ limit--; /* -10% for RGB */
+
+ /*
+ * We should also do -10% if sprite scaling is enabled
+ * on the other pipe, but we can't really check for that,
+ * so we ignore it.
+ */
+
+ return DIV_ROUND_UP_ULL(mul_u32_u32(pixel_rate, 10 * hscale),
+ limit << 16);
+}
+
static unsigned int
g4x_sprite_max_stride(struct intel_plane *plane,
u32 pixel_format, u64 modifier,
@@ -1169,9 +1625,9 @@ static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv =
- to_i915(plane_state->base.plane->dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
- unsigned int rotation = plane_state->base.rotation;
+ to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
u32 dvscntr;
@@ -1187,6 +1643,18 @@ static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
case DRM_FORMAT_XRGB8888:
dvscntr |= DVS_FORMAT_RGBX888;
break;
+ case DRM_FORMAT_XBGR2101010:
+ dvscntr |= DVS_FORMAT_RGBX101010 | DVS_RGB_ORDER_XBGR;
+ break;
+ case DRM_FORMAT_XRGB2101010:
+ dvscntr |= DVS_FORMAT_RGBX101010;
+ break;
+ case DRM_FORMAT_XBGR16161616F:
+ dvscntr |= DVS_FORMAT_RGBX161616 | DVS_RGB_ORDER_XBGR;
+ break;
+ case DRM_FORMAT_XRGB16161616F:
+ dvscntr |= DVS_FORMAT_RGBX161616;
+ break;
case DRM_FORMAT_YUYV:
dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YUYV;
break;
@@ -1204,10 +1672,10 @@ static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
return 0;
}
- if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
+ if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
dvscntr |= DVS_YUV_FORMAT_BT709;
- if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+ if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
dvscntr |= DVS_YUV_RANGE_CORRECTION_DISABLE;
if (fb->modifier == I915_FORMAT_MOD_X_TILED)
@@ -1224,6 +1692,66 @@ static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
return dvscntr;
}
+static void g4x_update_gamma(const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ enum pipe pipe = plane->pipe;
+ u16 gamma[8];
+ int i;
+
+ /* Seems RGB data bypasses the gamma always */
+ if (!fb->format->is_yuv)
+ return;
+
+ i9xx_plane_linear_gamma(gamma);
+
+ /* FIXME these register are single buffered :( */
+ /* The two end points are implicit (0.0 and 1.0) */
+ for (i = 1; i < 8 - 1; i++)
+ I915_WRITE_FW(DVSGAMC_G4X(pipe, i - 1),
+ gamma[i] << 16 |
+ gamma[i] << 8 |
+ gamma[i]);
+}
+
+static void ilk_sprite_linear_gamma(u16 gamma[17])
+{
+ int i;
+
+ for (i = 0; i < 17; i++)
+ gamma[i] = (i << 10) / 16;
+}
+
+static void ilk_update_gamma(const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ enum pipe pipe = plane->pipe;
+ u16 gamma[17];
+ int i;
+
+ /* Seems RGB data bypasses the gamma always */
+ if (!fb->format->is_yuv)
+ return;
+
+ ilk_sprite_linear_gamma(gamma);
+
+ /* FIXME these register are single buffered :( */
+ for (i = 0; i < 16; i++)
+ I915_WRITE_FW(DVSGAMC_ILK(pipe, i),
+ gamma[i] << 20 |
+ gamma[i] << 10 |
+ gamma[i]);
+
+ I915_WRITE_FW(DVSGAMCMAX_ILK(pipe, 0), gamma[i]);
+ I915_WRITE_FW(DVSGAMCMAX_ILK(pipe, 1), gamma[i]);
+ I915_WRITE_FW(DVSGAMCMAX_ILK(pipe, 2), gamma[i]);
+ i++;
+}
+
static void
g4x_update_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
@@ -1234,14 +1762,14 @@ g4x_update_plane(struct intel_plane *plane,
u32 dvssurf_offset = plane_state->color_plane[0].offset;
u32 linear_offset;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
- int crtc_x = plane_state->base.dst.x1;
- int crtc_y = plane_state->base.dst.y1;
- u32 crtc_w = drm_rect_width(&plane_state->base.dst);
- u32 crtc_h = drm_rect_height(&plane_state->base.dst);
+ int crtc_x = plane_state->uapi.dst.x1;
+ int crtc_y = plane_state->uapi.dst.y1;
+ u32 crtc_w = drm_rect_width(&plane_state->uapi.dst);
+ u32 crtc_h = drm_rect_height(&plane_state->uapi.dst);
u32 x = plane_state->color_plane[0].x;
u32 y = plane_state->color_plane[0].y;
- u32 src_w = drm_rect_width(&plane_state->base.src) >> 16;
- u32 src_h = drm_rect_height(&plane_state->base.src) >> 16;
+ u32 src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ u32 src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
u32 dvscntr, dvsscale = 0;
unsigned long irqflags;
@@ -1283,6 +1811,11 @@ g4x_update_plane(struct intel_plane *plane,
I915_WRITE_FW(DVSSURF(pipe),
intel_plane_ggtt_offset(plane_state) + dvssurf_offset);
+ if (IS_G4X(dev_priv))
+ g4x_update_gamma(plane_state);
+ else
+ ilk_update_gamma(plane_state);
+
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
@@ -1335,6 +1868,11 @@ static bool intel_fb_scalable(const struct drm_framebuffer *fb)
switch (fb->format->format) {
case DRM_FORMAT_C8:
return false;
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_ARGB16161616F:
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ return INTEL_GEN(to_i915(fb->dev)) >= 11;
default:
return true;
}
@@ -1344,12 +1882,13 @@ static int
g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
- const struct drm_framebuffer *fb = plane_state->base.fb;
- const struct drm_rect *src = &plane_state->base.src;
- const struct drm_rect *dst = &plane_state->base.dst;
- int src_x, src_y, src_w, src_h, crtc_w, crtc_h;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ const struct drm_rect *src = &plane_state->uapi.src;
+ const struct drm_rect *dst = &plane_state->uapi.dst;
+ int src_x, src_w, src_h, crtc_w, crtc_h;
const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
+ unsigned int stride = plane_state->color_plane[0].stride;
unsigned int cpp = fb->format->cpp[0];
unsigned int width_bytes;
int min_width, min_height;
@@ -1358,7 +1897,6 @@ g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state,
crtc_h = drm_rect_height(dst);
src_x = src->x1 >> 16;
- src_y = src->y1 >> 16;
src_w = drm_rect_width(src) >> 16;
src_h = drm_rect_height(src) >> 16;
@@ -1392,9 +1930,9 @@ g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state,
return -EINVAL;
}
- if (width_bytes > 4096 || fb->pitches[0] > 4096) {
+ if (stride > 4096) {
DRM_DEBUG_KMS("Stride (%u) exceeds hardware max with scaling (%u)\n",
- fb->pitches[0], 4096);
+ stride, 4096);
return -EINVAL;
}
@@ -1405,13 +1943,13 @@ static int
g4x_sprite_check(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
int min_scale = DRM_PLANE_HELPER_NO_SCALING;
int max_scale = DRM_PLANE_HELPER_NO_SCALING;
int ret;
- if (intel_fb_scalable(plane_state->base.fb)) {
+ if (intel_fb_scalable(plane_state->hw.fb)) {
if (INTEL_GEN(dev_priv) < 7) {
min_scale = 1;
max_scale = 16 << 16;
@@ -1421,8 +1959,8 @@ g4x_sprite_check(struct intel_crtc_state *crtc_state,
}
}
- ret = drm_atomic_helper_check_plane_state(&plane_state->base,
- &crtc_state->base,
+ ret = drm_atomic_helper_check_plane_state(&plane_state->uapi,
+ &crtc_state->uapi,
min_scale, max_scale,
true, true);
if (ret)
@@ -1432,7 +1970,7 @@ g4x_sprite_check(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
- if (!plane_state->base.visible)
+ if (!plane_state->uapi.visible)
return 0;
ret = intel_plane_check_src_coordinates(plane_state);
@@ -1453,9 +1991,9 @@ g4x_sprite_check(struct intel_crtc_state *crtc_state,
int chv_plane_check_rotation(const struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- unsigned int rotation = plane_state->base.rotation;
+ unsigned int rotation = plane_state->hw.rotation;
/* CHV ignores the mirror bit when the rotate bit is set :( */
if (IS_CHERRYVIEW(dev_priv) &&
@@ -1478,8 +2016,8 @@ vlv_sprite_check(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
- ret = drm_atomic_helper_check_plane_state(&plane_state->base,
- &crtc_state->base,
+ ret = drm_atomic_helper_check_plane_state(&plane_state->uapi,
+ &crtc_state->uapi,
DRM_PLANE_HELPER_NO_SCALING,
DRM_PLANE_HELPER_NO_SCALING,
true, true);
@@ -1490,7 +2028,7 @@ vlv_sprite_check(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
- if (!plane_state->base.visible)
+ if (!plane_state->uapi.visible)
return 0;
ret = intel_plane_check_src_coordinates(plane_state);
@@ -1505,10 +2043,10 @@ vlv_sprite_check(struct intel_crtc_state *crtc_state,
static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
- unsigned int rotation = plane_state->base.rotation;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
struct drm_format_name_buf format_name;
if (!fb)
@@ -1563,12 +2101,14 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
}
/* Y-tiling is not supported in IF-ID Interlace mode */
- if (crtc_state->base.enable &&
- crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE &&
+ if (crtc_state->hw.enable &&
+ crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE &&
(fb->modifier == I915_FORMAT_MOD_Y_TILED ||
fb->modifier == I915_FORMAT_MOD_Yf_TILED ||
fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
- fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS)) {
+ fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS ||
+ fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
+ fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS)) {
DRM_DEBUG_KMS("Y/Yf tiling not supported in IF-ID mode\n");
return -EINVAL;
}
@@ -1580,9 +2120,9 @@ static int skl_plane_check_dst_coordinates(const struct intel_crtc_state *crtc_s
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv =
- to_i915(plane_state->base.plane->dev);
- int crtc_x = plane_state->base.dst.x1;
- int crtc_w = drm_rect_width(&plane_state->base.dst);
+ to_i915(plane_state->uapi.plane->dev);
+ int crtc_x = plane_state->uapi.dst.x1;
+ int crtc_w = drm_rect_width(&plane_state->uapi.dst);
int pipe_src_w = crtc_state->pipe_src_w;
/*
@@ -1608,12 +2148,13 @@ static int skl_plane_check_dst_coordinates(const struct intel_crtc_state *crtc_s
static int skl_plane_check_nv12_rotation(const struct intel_plane_state *plane_state)
{
- const struct drm_framebuffer *fb = plane_state->base.fb;
- unsigned int rotation = plane_state->base.rotation;
- int src_w = drm_rect_width(&plane_state->base.src) >> 16;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
+ int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
/* Display WA #1106 */
- if (is_planar_yuv_format(fb->format->format) && src_w & 3 &&
+ if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) &&
+ src_w & 3 &&
(rotation == DRM_MODE_ROTATE_270 ||
rotation == (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90))) {
DRM_DEBUG_KMS("src width must be multiple of 4 for rotated planar YUV\n");
@@ -1623,12 +2164,28 @@ static int skl_plane_check_nv12_rotation(const struct intel_plane_state *plane_s
return 0;
}
+static int skl_plane_max_scale(struct drm_i915_private *dev_priv,
+ const struct drm_framebuffer *fb)
+{
+ /*
+ * We don't yet know the final source width nor
+ * whether we can use the HQ scaler mode. Assume
+ * the best case.
+ * FIXME need to properly check this later.
+ */
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv) ||
+ !intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
+ return 0x30000 - 1;
+ else
+ return 0x20000 - 1;
+}
+
static int skl_plane_check(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
int min_scale = DRM_PLANE_HELPER_NO_SCALING;
int max_scale = DRM_PLANE_HELPER_NO_SCALING;
int ret;
@@ -1640,11 +2197,11 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state,
/* use scaler when colorkey is not required */
if (!plane_state->ckey.flags && intel_fb_scalable(fb)) {
min_scale = 1;
- max_scale = skl_max_scale(crtc_state, fb->format->format);
+ max_scale = skl_plane_max_scale(dev_priv, fb);
}
- ret = drm_atomic_helper_check_plane_state(&plane_state->base,
- &crtc_state->base,
+ ret = drm_atomic_helper_check_plane_state(&plane_state->uapi,
+ &crtc_state->uapi,
min_scale, max_scale,
true, true);
if (ret)
@@ -1654,7 +2211,7 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
- if (!plane_state->base.visible)
+ if (!plane_state->uapi.visible)
return 0;
ret = skl_plane_check_dst_coordinates(crtc_state, plane_state);
@@ -1670,8 +2227,8 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state,
return ret;
/* HW only has 8 bits pixel precision, disable plane if invisible */
- if (!(plane_state->base.alpha >> 8))
- plane_state->base.visible = false;
+ if (!(plane_state->hw.alpha >> 8))
+ plane_state->uapi.visible = false;
plane_state->ctl = skl_plane_ctl(crtc_state, plane_state);
@@ -1679,6 +2236,15 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state,
plane_state->color_ctl = glk_plane_color_ctl(crtc_state,
plane_state);
+ if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) &&
+ icl_is_hdr_plane(dev_priv, plane->id))
+ /* Enable and use MPEG-2 chroma siting */
+ plane_state->cus_ctl = PLANE_CUS_ENABLE |
+ PLANE_CUS_HPHASE_0 |
+ PLANE_CUS_VPHASE_SIGN_NEGATIVE | PLANE_CUS_VPHASE_0_25;
+ else
+ plane_state->cus_ctl = 0;
+
return 0;
}
@@ -1690,7 +2256,7 @@ static bool has_dst_key_in_primary_plane(struct drm_i915_private *dev_priv)
static void intel_plane_set_ckey(struct intel_plane_state *plane_state,
const struct drm_intel_sprite_colorkey *set)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
@@ -1815,8 +2381,12 @@ static const u64 i9xx_plane_format_modifiers[] = {
};
static const u32 snb_plane_formats[] = {
- DRM_FORMAT_XBGR8888,
DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_XRGB16161616F,
+ DRM_FORMAT_XBGR16161616F,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
DRM_FORMAT_UYVY,
@@ -1824,11 +2394,12 @@ static const u32 snb_plane_formats[] = {
};
static const u32 vlv_plane_formats[] = {
+ DRM_FORMAT_C8,
DRM_FORMAT_RGB565,
- DRM_FORMAT_ABGR8888,
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_XBGR8888,
DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
DRM_FORMAT_XBGR2101010,
DRM_FORMAT_ABGR2101010,
DRM_FORMAT_YUYV,
@@ -1837,7 +2408,7 @@ static const u32 vlv_plane_formats[] = {
DRM_FORMAT_VYUY,
};
-static const u32 skl_plane_formats[] = {
+static const u32 chv_pipe_b_sprite_formats[] = {
DRM_FORMAT_C8,
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
@@ -1846,13 +2417,15 @@ static const u32 skl_plane_formats[] = {
DRM_FORMAT_ABGR8888,
DRM_FORMAT_XRGB2101010,
DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_ABGR2101010,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
DRM_FORMAT_UYVY,
DRM_FORMAT_VYUY,
};
-static const u32 icl_plane_formats[] = {
+static const u32 skl_plane_formats[] = {
DRM_FORMAT_C8,
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
@@ -1861,19 +2434,15 @@ static const u32 icl_plane_formats[] = {
DRM_FORMAT_ABGR8888,
DRM_FORMAT_XRGB2101010,
DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_XRGB16161616F,
+ DRM_FORMAT_XBGR16161616F,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
DRM_FORMAT_UYVY,
DRM_FORMAT_VYUY,
- DRM_FORMAT_Y210,
- DRM_FORMAT_Y212,
- DRM_FORMAT_Y216,
- DRM_FORMAT_XVYU2101010,
- DRM_FORMAT_XVYU12_16161616,
- DRM_FORMAT_XVYU16161616,
};
-static const u32 icl_hdr_plane_formats[] = {
+static const u32 skl_planar_formats[] = {
DRM_FORMAT_C8,
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
@@ -1884,21 +2453,14 @@ static const u32 icl_hdr_plane_formats[] = {
DRM_FORMAT_XBGR2101010,
DRM_FORMAT_XRGB16161616F,
DRM_FORMAT_XBGR16161616F,
- DRM_FORMAT_ARGB16161616F,
- DRM_FORMAT_ABGR16161616F,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
DRM_FORMAT_UYVY,
DRM_FORMAT_VYUY,
- DRM_FORMAT_Y210,
- DRM_FORMAT_Y212,
- DRM_FORMAT_Y216,
- DRM_FORMAT_XVYU2101010,
- DRM_FORMAT_XVYU12_16161616,
- DRM_FORMAT_XVYU16161616,
+ DRM_FORMAT_NV12,
};
-static const u32 skl_planar_formats[] = {
+static const u32 glk_planar_formats[] = {
DRM_FORMAT_C8,
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
@@ -1907,14 +2469,19 @@ static const u32 skl_planar_formats[] = {
DRM_FORMAT_ABGR8888,
DRM_FORMAT_XRGB2101010,
DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_XRGB16161616F,
+ DRM_FORMAT_XBGR16161616F,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
DRM_FORMAT_UYVY,
DRM_FORMAT_VYUY,
DRM_FORMAT_NV12,
+ DRM_FORMAT_P010,
+ DRM_FORMAT_P012,
+ DRM_FORMAT_P016,
};
-static const u32 glk_planar_formats[] = {
+static const u32 icl_sdr_y_plane_formats[] = {
DRM_FORMAT_C8,
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
@@ -1923,17 +2490,21 @@ static const u32 glk_planar_formats[] = {
DRM_FORMAT_ABGR8888,
DRM_FORMAT_XRGB2101010,
DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_ABGR2101010,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
DRM_FORMAT_UYVY,
DRM_FORMAT_VYUY,
- DRM_FORMAT_NV12,
- DRM_FORMAT_P010,
- DRM_FORMAT_P012,
- DRM_FORMAT_P016,
+ DRM_FORMAT_Y210,
+ DRM_FORMAT_Y212,
+ DRM_FORMAT_Y216,
+ DRM_FORMAT_XVYU2101010,
+ DRM_FORMAT_XVYU12_16161616,
+ DRM_FORMAT_XVYU16161616,
};
-static const u32 icl_planar_formats[] = {
+static const u32 icl_sdr_uv_plane_formats[] = {
DRM_FORMAT_C8,
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
@@ -1942,6 +2513,8 @@ static const u32 icl_planar_formats[] = {
DRM_FORMAT_ABGR8888,
DRM_FORMAT_XRGB2101010,
DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_ABGR2101010,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
DRM_FORMAT_UYVY,
@@ -1958,7 +2531,7 @@ static const u32 icl_planar_formats[] = {
DRM_FORMAT_XVYU16161616,
};
-static const u32 icl_hdr_planar_formats[] = {
+static const u32 icl_hdr_plane_formats[] = {
DRM_FORMAT_C8,
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
@@ -1967,6 +2540,8 @@ static const u32 icl_hdr_planar_formats[] = {
DRM_FORMAT_ABGR8888,
DRM_FORMAT_XRGB2101010,
DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_ABGR2101010,
DRM_FORMAT_XRGB16161616F,
DRM_FORMAT_XBGR16161616F,
DRM_FORMAT_ARGB16161616F,
@@ -2005,6 +2580,23 @@ static const u64 skl_plane_format_modifiers_ccs[] = {
DRM_FORMAT_MOD_INVALID
};
+static const u64 gen12_plane_format_modifiers_mc_ccs[] = {
+ I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS,
+ I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS,
+ I915_FORMAT_MOD_Y_TILED,
+ I915_FORMAT_MOD_X_TILED,
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+static const u64 gen12_plane_format_modifiers_rc_ccs[] = {
+ I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS,
+ I915_FORMAT_MOD_Y_TILED,
+ I915_FORMAT_MOD_X_TILED,
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
static bool g4x_sprite_format_mod_supported(struct drm_plane *_plane,
u32 format, u64 modifier)
{
@@ -2045,6 +2637,10 @@ static bool snb_sprite_format_mod_supported(struct drm_plane *_plane,
switch (format) {
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_XBGR16161616F:
case DRM_FORMAT_YUYV:
case DRM_FORMAT_YVYU:
case DRM_FORMAT_UYVY:
@@ -2070,6 +2666,7 @@ static bool vlv_sprite_format_mod_supported(struct drm_plane *_plane,
}
switch (format) {
+ case DRM_FORMAT_C8:
case DRM_FORMAT_RGB565:
case DRM_FORMAT_ABGR8888:
case DRM_FORMAT_ARGB8888:
@@ -2077,6 +2674,8 @@ static bool vlv_sprite_format_mod_supported(struct drm_plane *_plane,
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_XBGR2101010:
case DRM_FORMAT_ABGR2101010:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_ARGB2101010:
case DRM_FORMAT_YUYV:
case DRM_FORMAT_YVYU:
case DRM_FORMAT_UYVY:
@@ -2121,6 +2720,8 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
case DRM_FORMAT_RGB565:
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_ABGR2101010:
case DRM_FORMAT_YUYV:
case DRM_FORMAT_YVYU:
case DRM_FORMAT_UYVY:
@@ -2153,6 +2754,75 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
}
}
+static bool gen12_plane_supports_mc_ccs(enum plane_id plane_id)
+{
+ return plane_id < PLANE_SPRITE4;
+}
+
+static bool gen12_plane_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
+{
+ struct intel_plane *plane = to_intel_plane(_plane);
+
+ switch (modifier) {
+ case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
+ if (!gen12_plane_supports_mc_ccs(plane->id))
+ return false;
+ /* fall through */
+ case DRM_FORMAT_MOD_LINEAR:
+ case I915_FORMAT_MOD_X_TILED:
+ case I915_FORMAT_MOD_Y_TILED:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+ break;
+ default:
+ return false;
+ }
+
+ switch (format) {
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
+ if (is_ccs_modifier(modifier))
+ return true;
+ /* fall through */
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_P010:
+ case DRM_FORMAT_P012:
+ case DRM_FORMAT_P016:
+ if (modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS)
+ return true;
+ /* fall through */
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_ABGR2101010:
+ case DRM_FORMAT_XVYU2101010:
+ case DRM_FORMAT_C8:
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_ARGB16161616F:
+ case DRM_FORMAT_Y210:
+ case DRM_FORMAT_Y212:
+ case DRM_FORMAT_Y216:
+ case DRM_FORMAT_XVYU12_16161616:
+ case DRM_FORMAT_XVYU16161616:
+ if (modifier == DRM_FORMAT_MOD_LINEAR ||
+ modifier == I915_FORMAT_MOD_X_TILED ||
+ modifier == I915_FORMAT_MOD_Y_TILED)
+ return true;
+ /* fall through */
+ default:
+ return false;
+ }
+}
+
static const struct drm_plane_funcs g4x_sprite_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
@@ -2189,6 +2859,15 @@ static const struct drm_plane_funcs skl_plane_funcs = {
.format_mod_supported = skl_plane_format_mod_supported,
};
+static const struct drm_plane_funcs gen12_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = intel_plane_destroy,
+ .atomic_duplicate_state = intel_plane_duplicate_state,
+ .atomic_destroy_state = intel_plane_destroy_state,
+ .format_mod_supported = gen12_plane_format_mod_supported,
+};
+
static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv,
enum pipe pipe, enum plane_id plane_id)
{
@@ -2201,9 +2880,6 @@ static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv,
static bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
enum pipe pipe, enum plane_id plane_id)
{
- if (INTEL_GEN(dev_priv) >= 11)
- return plane_id <= PLANE_SPRITE3;
-
/* Display WA #0870: skl, bxt */
if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
return false;
@@ -2217,6 +2893,56 @@ static bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
return true;
}
+static const u32 *skl_get_plane_formats(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum plane_id plane_id,
+ int *num_formats)
+{
+ if (skl_plane_has_planar(dev_priv, pipe, plane_id)) {
+ *num_formats = ARRAY_SIZE(skl_planar_formats);
+ return skl_planar_formats;
+ } else {
+ *num_formats = ARRAY_SIZE(skl_plane_formats);
+ return skl_plane_formats;
+ }
+}
+
+static const u32 *glk_get_plane_formats(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum plane_id plane_id,
+ int *num_formats)
+{
+ if (skl_plane_has_planar(dev_priv, pipe, plane_id)) {
+ *num_formats = ARRAY_SIZE(glk_planar_formats);
+ return glk_planar_formats;
+ } else {
+ *num_formats = ARRAY_SIZE(skl_plane_formats);
+ return skl_plane_formats;
+ }
+}
+
+static const u32 *icl_get_plane_formats(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum plane_id plane_id,
+ int *num_formats)
+{
+ if (icl_is_hdr_plane(dev_priv, plane_id)) {
+ *num_formats = ARRAY_SIZE(icl_hdr_plane_formats);
+ return icl_hdr_plane_formats;
+ } else if (icl_is_nv12_y_plane(plane_id)) {
+ *num_formats = ARRAY_SIZE(icl_sdr_y_plane_formats);
+ return icl_sdr_y_plane_formats;
+ } else {
+ *num_formats = ARRAY_SIZE(icl_sdr_uv_plane_formats);
+ return icl_sdr_uv_plane_formats;
+ }
+}
+
+static const u64 *gen12_get_plane_modifiers(enum plane_id plane_id)
+{
+ if (gen12_plane_supports_mc_ccs(plane_id))
+ return gen12_plane_format_modifiers_mc_ccs;
+ else
+ return gen12_plane_format_modifiers_rc_ccs;
+}
+
static bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
enum pipe pipe, enum plane_id plane_id)
{
@@ -2238,6 +2964,7 @@ struct intel_plane *
skl_universal_plane_create(struct drm_i915_private *dev_priv,
enum pipe pipe, enum plane_id plane_id)
{
+ const struct drm_plane_funcs *plane_funcs;
struct intel_plane *plane;
enum drm_plane_type plane_type;
unsigned int supported_rotations;
@@ -2267,39 +2994,29 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
plane->disable_plane = skl_disable_plane;
plane->get_hw_state = skl_plane_get_hw_state;
plane->check_plane = skl_plane_check;
- if (icl_is_nv12_y_plane(plane_id))
- plane->update_slave = icl_update_slave;
+ plane->min_cdclk = skl_plane_min_cdclk;
- if (skl_plane_has_planar(dev_priv, pipe, plane_id)) {
- if (icl_is_hdr_plane(dev_priv, plane_id)) {
- formats = icl_hdr_planar_formats;
- num_formats = ARRAY_SIZE(icl_hdr_planar_formats);
- } else if (INTEL_GEN(dev_priv) >= 11) {
- formats = icl_planar_formats;
- num_formats = ARRAY_SIZE(icl_planar_formats);
- } else if (INTEL_GEN(dev_priv) == 10 || IS_GEMINILAKE(dev_priv)) {
- formats = glk_planar_formats;
- num_formats = ARRAY_SIZE(glk_planar_formats);
- } else {
- formats = skl_planar_formats;
- num_formats = ARRAY_SIZE(skl_planar_formats);
- }
- } else if (icl_is_hdr_plane(dev_priv, plane_id)) {
- formats = icl_hdr_plane_formats;
- num_formats = ARRAY_SIZE(icl_hdr_plane_formats);
- } else if (INTEL_GEN(dev_priv) >= 11) {
- formats = icl_plane_formats;
- num_formats = ARRAY_SIZE(icl_plane_formats);
- } else {
- formats = skl_plane_formats;
- num_formats = ARRAY_SIZE(skl_plane_formats);
- }
+ if (INTEL_GEN(dev_priv) >= 11)
+ formats = icl_get_plane_formats(dev_priv, pipe,
+ plane_id, &num_formats);
+ else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ formats = glk_get_plane_formats(dev_priv, pipe,
+ plane_id, &num_formats);
+ else
+ formats = skl_get_plane_formats(dev_priv, pipe,
+ plane_id, &num_formats);
plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, plane_id);
- if (plane->has_ccs)
- modifiers = skl_plane_format_modifiers_ccs;
- else
- modifiers = skl_plane_format_modifiers_noccs;
+ if (INTEL_GEN(dev_priv) >= 12) {
+ modifiers = gen12_get_plane_modifiers(plane_id);
+ plane_funcs = &gen12_plane_funcs;
+ } else {
+ if (plane->has_ccs)
+ modifiers = skl_plane_format_modifiers_ccs;
+ else
+ modifiers = skl_plane_format_modifiers_noccs;
+ plane_funcs = &skl_plane_funcs;
+ }
if (plane_id == PLANE_PRIMARY)
plane_type = DRM_PLANE_TYPE_PRIMARY;
@@ -2309,7 +3026,7 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
possible_crtcs = BIT(pipe);
ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
- possible_crtcs, &skl_plane_funcs,
+ possible_crtcs, plane_funcs,
formats, num_formats, modifiers,
plane_type,
"plane %d%c", plane_id + 1,
@@ -2342,6 +3059,8 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
BIT(DRM_MODE_BLEND_PREMULTI) |
BIT(DRM_MODE_BLEND_COVERAGE));
+ drm_plane_create_zpos_immutable_property(&plane->base, plane_id);
+
drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
return plane;
@@ -2363,7 +3082,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
const u64 *modifiers;
const u32 *formats;
int num_formats;
- int ret;
+ int ret, zpos;
if (INTEL_GEN(dev_priv) >= 9)
return skl_universal_plane_create(dev_priv, pipe,
@@ -2379,9 +3098,15 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
plane->disable_plane = vlv_disable_plane;
plane->get_hw_state = vlv_plane_get_hw_state;
plane->check_plane = vlv_sprite_check;
+ plane->min_cdclk = vlv_plane_min_cdclk;
- formats = vlv_plane_formats;
- num_formats = ARRAY_SIZE(vlv_plane_formats);
+ if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
+ formats = chv_pipe_b_sprite_formats;
+ num_formats = ARRAY_SIZE(chv_pipe_b_sprite_formats);
+ } else {
+ formats = vlv_plane_formats;
+ num_formats = ARRAY_SIZE(vlv_plane_formats);
+ }
modifiers = i9xx_plane_format_modifiers;
plane_funcs = &vlv_sprite_funcs;
@@ -2392,6 +3117,11 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
plane->get_hw_state = ivb_plane_get_hw_state;
plane->check_plane = g4x_sprite_check;
+ if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ plane->min_cdclk = hsw_plane_min_cdclk;
+ else
+ plane->min_cdclk = ivb_sprite_min_cdclk;
+
formats = snb_plane_formats;
num_formats = ARRAY_SIZE(snb_plane_formats);
modifiers = i9xx_plane_format_modifiers;
@@ -2403,6 +3133,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
plane->disable_plane = g4x_disable_plane;
plane->get_hw_state = g4x_plane_get_hw_state;
plane->check_plane = g4x_sprite_check;
+ plane->min_cdclk = g4x_sprite_min_cdclk;
modifiers = i9xx_plane_format_modifiers;
if (IS_GEN(dev_priv, 6)) {
@@ -2453,6 +3184,9 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
DRM_COLOR_YCBCR_BT709,
DRM_COLOR_YCBCR_LIMITED_RANGE);
+ zpos = sprite + 1;
+ drm_plane_create_zpos_immutable_property(&plane->base, zpos);
+
drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
return plane;
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.h b/drivers/gpu/drm/i915/display/intel_sprite.h
index 500f6bffb139..5eeaa92420d1 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.h
+++ b/drivers/gpu/drm/i915/display/intel_sprite.h
@@ -8,7 +8,6 @@
#include <linux/types.h>
-#include "i915_drv.h"
#include "intel_display.h"
struct drm_device;
@@ -18,7 +17,6 @@ struct drm_i915_private;
struct intel_crtc_state;
struct intel_plane_state;
-bool is_planar_yuv_format(u32 pixelformat);
int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
int usecs);
struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv,
@@ -49,11 +47,13 @@ static inline u8 icl_hdr_plane_mask(void)
BIT(PLANE_SPRITE0) | BIT(PLANE_SPRITE1);
}
-static inline bool icl_is_hdr_plane(struct drm_i915_private *dev_priv,
- enum plane_id plane_id)
-{
- return INTEL_GEN(dev_priv) >= 11 &&
- icl_hdr_plane_mask() & BIT(plane_id);
-}
+bool icl_is_hdr_plane(struct drm_i915_private *dev_priv, enum plane_id plane_id);
+
+int ivb_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
+int hsw_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
+int vlv_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
#endif /* __INTEL_SPRITE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
new file mode 100644
index 000000000000..7773169b7331
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -0,0 +1,563 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_display.h"
+#include "intel_display_types.h"
+#include "intel_dp_mst.h"
+#include "intel_tc.h"
+
+static const char *tc_port_mode_name(enum tc_port_mode mode)
+{
+ static const char * const names[] = {
+ [TC_PORT_TBT_ALT] = "tbt-alt",
+ [TC_PORT_DP_ALT] = "dp-alt",
+ [TC_PORT_LEGACY] = "legacy",
+ };
+
+ if (WARN_ON(mode >= ARRAY_SIZE(names)))
+ mode = TC_PORT_TBT_ALT;
+
+ return names[mode];
+}
+
+static void
+tc_port_load_fia_params(struct drm_i915_private *i915,
+ struct intel_digital_port *dig_port)
+{
+ enum port port = dig_port->base.port;
+ enum tc_port tc_port = intel_port_to_tc(i915, port);
+ u32 modular_fia;
+
+ if (INTEL_INFO(i915)->display.has_modular_fia) {
+ modular_fia = intel_uncore_read(&i915->uncore,
+ PORT_TX_DFLEXDPSP(FIA1));
+ modular_fia &= MODULAR_FIA_MASK;
+ } else {
+ modular_fia = 0;
+ }
+
+ /*
+ * Each Modular FIA instance houses 2 TC ports. In SOC that has more
+ * than two TC ports, there are multiple instances of Modular FIA.
+ */
+ if (modular_fia) {
+ dig_port->tc_phy_fia = tc_port / 2;
+ dig_port->tc_phy_fia_idx = tc_port % 2;
+ } else {
+ dig_port->tc_phy_fia = FIA1;
+ dig_port->tc_phy_fia_idx = tc_port;
+ }
+}
+
+u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_uncore *uncore = &i915->uncore;
+ u32 lane_mask;
+
+ lane_mask = intel_uncore_read(uncore,
+ PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia));
+
+ WARN_ON(lane_mask == 0xffffffff);
+
+ lane_mask &= DP_LANE_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx);
+ return lane_mask >> DP_LANE_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx);
+}
+
+u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_uncore *uncore = &i915->uncore;
+ u32 pin_mask;
+
+ pin_mask = intel_uncore_read(uncore,
+ PORT_TX_DFLEXPA1(dig_port->tc_phy_fia));
+
+ WARN_ON(pin_mask == 0xffffffff);
+
+ return (pin_mask & DP_PIN_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx)) >>
+ DP_PIN_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx);
+}
+
+int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ intel_wakeref_t wakeref;
+ u32 lane_mask;
+
+ if (dig_port->tc_mode != TC_PORT_DP_ALT)
+ return 4;
+
+ lane_mask = 0;
+ with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref)
+ lane_mask = intel_tc_port_get_lane_mask(dig_port);
+
+ switch (lane_mask) {
+ default:
+ MISSING_CASE(lane_mask);
+ /* fall-through */
+ case 0x1:
+ case 0x2:
+ case 0x4:
+ case 0x8:
+ return 1;
+ case 0x3:
+ case 0xc:
+ return 2;
+ case 0xf:
+ return 4;
+ }
+}
+
+void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
+ int required_lanes)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
+ struct intel_uncore *uncore = &i915->uncore;
+ u32 val;
+
+ WARN_ON(lane_reversal && dig_port->tc_mode != TC_PORT_LEGACY);
+
+ val = intel_uncore_read(uncore,
+ PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia));
+ val &= ~DFLEXDPMLE1_DPMLETC_MASK(dig_port->tc_phy_fia_idx);
+
+ switch (required_lanes) {
+ case 1:
+ val |= lane_reversal ?
+ DFLEXDPMLE1_DPMLETC_ML3(dig_port->tc_phy_fia_idx) :
+ DFLEXDPMLE1_DPMLETC_ML0(dig_port->tc_phy_fia_idx);
+ break;
+ case 2:
+ val |= lane_reversal ?
+ DFLEXDPMLE1_DPMLETC_ML3_2(dig_port->tc_phy_fia_idx) :
+ DFLEXDPMLE1_DPMLETC_ML1_0(dig_port->tc_phy_fia_idx);
+ break;
+ case 4:
+ val |= DFLEXDPMLE1_DPMLETC_ML3_0(dig_port->tc_phy_fia_idx);
+ break;
+ default:
+ MISSING_CASE(required_lanes);
+ }
+
+ intel_uncore_write(uncore,
+ PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia), val);
+}
+
+static void tc_port_fixup_legacy_flag(struct intel_digital_port *dig_port,
+ u32 live_status_mask)
+{
+ u32 valid_hpd_mask;
+
+ if (dig_port->tc_legacy_port)
+ valid_hpd_mask = BIT(TC_PORT_LEGACY);
+ else
+ valid_hpd_mask = BIT(TC_PORT_DP_ALT) |
+ BIT(TC_PORT_TBT_ALT);
+
+ if (!(live_status_mask & ~valid_hpd_mask))
+ return;
+
+ /* If live status mismatches the VBT flag, trust the live status. */
+ DRM_ERROR("Port %s: live status %08x mismatches the legacy port flag, fix flag\n",
+ dig_port->tc_port_name, live_status_mask);
+
+ dig_port->tc_legacy_port = !dig_port->tc_legacy_port;
+}
+
+static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
+ struct intel_uncore *uncore = &i915->uncore;
+ u32 mask = 0;
+ u32 val;
+
+ val = intel_uncore_read(uncore,
+ PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia));
+
+ if (val == 0xffffffff) {
+ DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, nothing connected\n",
+ dig_port->tc_port_name);
+ return mask;
+ }
+
+ if (val & TC_LIVE_STATE_TBT(dig_port->tc_phy_fia_idx))
+ mask |= BIT(TC_PORT_TBT_ALT);
+ if (val & TC_LIVE_STATE_TC(dig_port->tc_phy_fia_idx))
+ mask |= BIT(TC_PORT_DP_ALT);
+
+ if (intel_uncore_read(uncore, SDEISR) & SDE_TC_HOTPLUG_ICP(tc_port))
+ mask |= BIT(TC_PORT_LEGACY);
+
+ /* The sink can be connected only in a single mode. */
+ if (!WARN_ON(hweight32(mask) > 1))
+ tc_port_fixup_legacy_flag(dig_port, mask);
+
+ return mask;
+}
+
+static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_uncore *uncore = &i915->uncore;
+ u32 val;
+
+ val = intel_uncore_read(uncore,
+ PORT_TX_DFLEXDPPMS(dig_port->tc_phy_fia));
+ if (val == 0xffffffff) {
+ DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, assuming not complete\n",
+ dig_port->tc_port_name);
+ return false;
+ }
+
+ return val & DP_PHY_MODE_STATUS_COMPLETED(dig_port->tc_phy_fia_idx);
+}
+
+static bool icl_tc_phy_set_safe_mode(struct intel_digital_port *dig_port,
+ bool enable)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_uncore *uncore = &i915->uncore;
+ u32 val;
+
+ val = intel_uncore_read(uncore,
+ PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia));
+ if (val == 0xffffffff) {
+ DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, can't set safe-mode to %s\n",
+ dig_port->tc_port_name,
+ enableddisabled(enable));
+
+ return false;
+ }
+
+ val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx);
+ if (!enable)
+ val |= DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx);
+
+ intel_uncore_write(uncore,
+ PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia), val);
+
+ if (enable && wait_for(!icl_tc_phy_status_complete(dig_port), 10))
+ DRM_DEBUG_KMS("Port %s: PHY complete clear timed out\n",
+ dig_port->tc_port_name);
+
+ return true;
+}
+
+static bool icl_tc_phy_is_in_safe_mode(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_uncore *uncore = &i915->uncore;
+ u32 val;
+
+ val = intel_uncore_read(uncore,
+ PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia));
+ if (val == 0xffffffff) {
+ DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, assume safe mode\n",
+ dig_port->tc_port_name);
+ return true;
+ }
+
+ return !(val & DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx));
+}
+
+/*
+ * This function implements the first part of the Connect Flow described by our
+ * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading
+ * lanes, EDID, etc) is done as needed in the typical places.
+ *
+ * Unlike the other ports, type-C ports are not available to use as soon as we
+ * get a hotplug. The type-C PHYs can be shared between multiple controllers:
+ * display, USB, etc. As a result, handshaking through FIA is required around
+ * connect and disconnect to cleanly transfer ownership with the controller and
+ * set the type-C power state.
+ */
+static void icl_tc_phy_connect(struct intel_digital_port *dig_port,
+ int required_lanes)
+{
+ int max_lanes;
+
+ if (!icl_tc_phy_status_complete(dig_port)) {
+ DRM_DEBUG_KMS("Port %s: PHY not ready\n",
+ dig_port->tc_port_name);
+ goto out_set_tbt_alt_mode;
+ }
+
+ if (!icl_tc_phy_set_safe_mode(dig_port, false) &&
+ !WARN_ON(dig_port->tc_legacy_port))
+ goto out_set_tbt_alt_mode;
+
+ max_lanes = intel_tc_port_fia_max_lane_count(dig_port);
+ if (dig_port->tc_legacy_port) {
+ WARN_ON(max_lanes != 4);
+ dig_port->tc_mode = TC_PORT_LEGACY;
+
+ return;
+ }
+
+ /*
+ * Now we have to re-check the live state, in case the port recently
+ * became disconnected. Not necessary for legacy mode.
+ */
+ if (!(tc_port_live_status_mask(dig_port) & BIT(TC_PORT_DP_ALT))) {
+ DRM_DEBUG_KMS("Port %s: PHY sudden disconnect\n",
+ dig_port->tc_port_name);
+ goto out_set_safe_mode;
+ }
+
+ if (max_lanes < required_lanes) {
+ DRM_DEBUG_KMS("Port %s: PHY max lanes %d < required lanes %d\n",
+ dig_port->tc_port_name,
+ max_lanes, required_lanes);
+ goto out_set_safe_mode;
+ }
+
+ dig_port->tc_mode = TC_PORT_DP_ALT;
+
+ return;
+
+out_set_safe_mode:
+ icl_tc_phy_set_safe_mode(dig_port, true);
+out_set_tbt_alt_mode:
+ dig_port->tc_mode = TC_PORT_TBT_ALT;
+}
+
+/*
+ * See the comment at the connect function. This implements the Disconnect
+ * Flow.
+ */
+static void icl_tc_phy_disconnect(struct intel_digital_port *dig_port)
+{
+ switch (dig_port->tc_mode) {
+ case TC_PORT_LEGACY:
+ /* Nothing to do, we never disconnect from legacy mode */
+ break;
+ case TC_PORT_DP_ALT:
+ icl_tc_phy_set_safe_mode(dig_port, true);
+ dig_port->tc_mode = TC_PORT_TBT_ALT;
+ break;
+ case TC_PORT_TBT_ALT:
+ /* Nothing to do, we stay in TBT-alt mode */
+ break;
+ default:
+ MISSING_CASE(dig_port->tc_mode);
+ }
+}
+
+static bool icl_tc_phy_is_connected(struct intel_digital_port *dig_port)
+{
+ if (!icl_tc_phy_status_complete(dig_port)) {
+ DRM_DEBUG_KMS("Port %s: PHY status not complete\n",
+ dig_port->tc_port_name);
+ return dig_port->tc_mode == TC_PORT_TBT_ALT;
+ }
+
+ if (icl_tc_phy_is_in_safe_mode(dig_port)) {
+ DRM_DEBUG_KMS("Port %s: PHY still in safe mode\n",
+ dig_port->tc_port_name);
+
+ return false;
+ }
+
+ return dig_port->tc_mode == TC_PORT_DP_ALT ||
+ dig_port->tc_mode == TC_PORT_LEGACY;
+}
+
+static enum tc_port_mode
+intel_tc_port_get_current_mode(struct intel_digital_port *dig_port)
+{
+ u32 live_status_mask = tc_port_live_status_mask(dig_port);
+ bool in_safe_mode = icl_tc_phy_is_in_safe_mode(dig_port);
+ enum tc_port_mode mode;
+
+ if (in_safe_mode || WARN_ON(!icl_tc_phy_status_complete(dig_port)))
+ return TC_PORT_TBT_ALT;
+
+ mode = dig_port->tc_legacy_port ? TC_PORT_LEGACY : TC_PORT_DP_ALT;
+ if (live_status_mask) {
+ enum tc_port_mode live_mode = fls(live_status_mask) - 1;
+
+ if (!WARN_ON(live_mode == TC_PORT_TBT_ALT))
+ mode = live_mode;
+ }
+
+ return mode;
+}
+
+static enum tc_port_mode
+intel_tc_port_get_target_mode(struct intel_digital_port *dig_port)
+{
+ u32 live_status_mask = tc_port_live_status_mask(dig_port);
+
+ if (live_status_mask)
+ return fls(live_status_mask) - 1;
+
+ return icl_tc_phy_status_complete(dig_port) &&
+ dig_port->tc_legacy_port ? TC_PORT_LEGACY :
+ TC_PORT_TBT_ALT;
+}
+
+static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port,
+ int required_lanes)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ enum tc_port_mode old_tc_mode = dig_port->tc_mode;
+
+ intel_display_power_flush_work(i915);
+ WARN_ON(intel_display_power_is_enabled(i915,
+ intel_aux_power_domain(dig_port)));
+
+ icl_tc_phy_disconnect(dig_port);
+ icl_tc_phy_connect(dig_port, required_lanes);
+
+ DRM_DEBUG_KMS("Port %s: TC port mode reset (%s -> %s)\n",
+ dig_port->tc_port_name,
+ tc_port_mode_name(old_tc_mode),
+ tc_port_mode_name(dig_port->tc_mode));
+}
+
+static void
+intel_tc_port_link_init_refcount(struct intel_digital_port *dig_port,
+ int refcount)
+{
+ WARN_ON(dig_port->tc_link_refcount);
+ dig_port->tc_link_refcount = refcount;
+}
+
+void intel_tc_port_sanitize(struct intel_digital_port *dig_port)
+{
+ struct intel_encoder *encoder = &dig_port->base;
+ int active_links = 0;
+
+ mutex_lock(&dig_port->tc_lock);
+
+ dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port);
+ if (dig_port->dp.is_mst)
+ active_links = intel_dp_mst_encoder_active_links(dig_port);
+ else if (encoder->base.crtc)
+ active_links = to_intel_crtc(encoder->base.crtc)->active;
+
+ if (active_links) {
+ if (!icl_tc_phy_is_connected(dig_port))
+ DRM_DEBUG_KMS("Port %s: PHY disconnected with %d active link(s)\n",
+ dig_port->tc_port_name, active_links);
+ intel_tc_port_link_init_refcount(dig_port, active_links);
+
+ goto out;
+ }
+
+ if (dig_port->tc_legacy_port)
+ icl_tc_phy_connect(dig_port, 1);
+
+out:
+ DRM_DEBUG_KMS("Port %s: sanitize mode (%s)\n",
+ dig_port->tc_port_name,
+ tc_port_mode_name(dig_port->tc_mode));
+
+ mutex_unlock(&dig_port->tc_lock);
+}
+
+static bool intel_tc_port_needs_reset(struct intel_digital_port *dig_port)
+{
+ return intel_tc_port_get_target_mode(dig_port) != dig_port->tc_mode;
+}
+
+/*
+ * The type-C ports are different because even when they are connected, they may
+ * not be available/usable by the graphics driver: see the comment on
+ * icl_tc_phy_connect(). So in our driver instead of adding the additional
+ * concept of "usable" and make everything check for "connected and usable" we
+ * define a port as "connected" when it is not only connected, but also when it
+ * is usable by the rest of the driver. That maintains the old assumption that
+ * connected ports are usable, and avoids exposing to the users objects they
+ * can't really use.
+ */
+bool intel_tc_port_connected(struct intel_digital_port *dig_port)
+{
+ bool is_connected;
+
+ intel_tc_port_lock(dig_port);
+ is_connected = tc_port_live_status_mask(dig_port) &
+ BIT(dig_port->tc_mode);
+ intel_tc_port_unlock(dig_port);
+
+ return is_connected;
+}
+
+static void __intel_tc_port_lock(struct intel_digital_port *dig_port,
+ int required_lanes)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ intel_wakeref_t wakeref;
+
+ wakeref = intel_display_power_get(i915, POWER_DOMAIN_DISPLAY_CORE);
+
+ mutex_lock(&dig_port->tc_lock);
+
+ if (!dig_port->tc_link_refcount &&
+ intel_tc_port_needs_reset(dig_port))
+ intel_tc_port_reset_mode(dig_port, required_lanes);
+
+ WARN_ON(dig_port->tc_lock_wakeref);
+ dig_port->tc_lock_wakeref = wakeref;
+}
+
+void intel_tc_port_lock(struct intel_digital_port *dig_port)
+{
+ __intel_tc_port_lock(dig_port, 1);
+}
+
+void intel_tc_port_unlock(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ intel_wakeref_t wakeref = fetch_and_zero(&dig_port->tc_lock_wakeref);
+
+ mutex_unlock(&dig_port->tc_lock);
+
+ intel_display_power_put_async(i915, POWER_DOMAIN_DISPLAY_CORE,
+ wakeref);
+}
+
+bool intel_tc_port_ref_held(struct intel_digital_port *dig_port)
+{
+ return mutex_is_locked(&dig_port->tc_lock) ||
+ dig_port->tc_link_refcount;
+}
+
+void intel_tc_port_get_link(struct intel_digital_port *dig_port,
+ int required_lanes)
+{
+ __intel_tc_port_lock(dig_port, required_lanes);
+ dig_port->tc_link_refcount++;
+ intel_tc_port_unlock(dig_port);
+}
+
+void intel_tc_port_put_link(struct intel_digital_port *dig_port)
+{
+ mutex_lock(&dig_port->tc_lock);
+ dig_port->tc_link_refcount--;
+ mutex_unlock(&dig_port->tc_lock);
+}
+
+void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ enum port port = dig_port->base.port;
+ enum tc_port tc_port = intel_port_to_tc(i915, port);
+
+ if (WARN_ON(tc_port == PORT_TC_NONE))
+ return;
+
+ snprintf(dig_port->tc_port_name, sizeof(dig_port->tc_port_name),
+ "%c/TC#%d", port_name(port), tc_port + 1);
+
+ mutex_init(&dig_port->tc_lock);
+ dig_port->tc_legacy_port = is_legacy;
+ dig_port->tc_link_refcount = 0;
+ tc_port_load_fia_params(i915, dig_port);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_tc.h b/drivers/gpu/drm/i915/display/intel_tc.h
new file mode 100644
index 000000000000..463f1b3c836f
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_tc.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_TC_H__
+#define __INTEL_TC_H__
+
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+struct intel_digital_port;
+
+bool intel_tc_port_connected(struct intel_digital_port *dig_port);
+u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port);
+u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port);
+int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port);
+void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
+ int required_lanes);
+
+void intel_tc_port_sanitize(struct intel_digital_port *dig_port);
+void intel_tc_port_lock(struct intel_digital_port *dig_port);
+void intel_tc_port_unlock(struct intel_digital_port *dig_port);
+void intel_tc_port_get_link(struct intel_digital_port *dig_port,
+ int required_lanes);
+void intel_tc_port_put_link(struct intel_digital_port *dig_port);
+bool intel_tc_port_ref_held(struct intel_digital_port *dig_port);
+
+void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy);
+
+#endif /* __INTEL_TC_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index 0a95df6c6a57..c75e0ceecee6 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -37,7 +37,7 @@
#include "i915_drv.h"
#include "intel_connector.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_hotplug.h"
#include "intel_tv.h"
@@ -898,7 +898,7 @@ static struct intel_tv *enc_to_tv(struct intel_encoder *encoder)
return container_of(encoder, struct intel_tv, base);
}
-static struct intel_tv *intel_attached_tv(struct drm_connector *connector)
+static struct intel_tv *intel_attached_tv(struct intel_connector *connector)
{
return enc_to_tv(intel_attached_encoder(connector));
}
@@ -924,7 +924,7 @@ intel_enable_tv(struct intel_encoder *encoder,
/* Prevents vblank waits from timing out in intel_tv_detect_type() */
intel_wait_for_vblank(dev_priv,
- to_intel_crtc(pipe_config->base.crtc)->pipe);
+ to_intel_crtc(pipe_config->uapi.crtc)->pipe);
I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
}
@@ -961,11 +961,10 @@ intel_tv_mode_valid(struct drm_connector *connector,
return MODE_CLOCK_HIGH;
/* Ensure TV refresh is close to desired refresh */
- if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000)
- < 1000)
- return MODE_OK;
+ if (abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000) >= 1000)
+ return MODE_CLOCK_RANGE;
- return MODE_CLOCK_RANGE;
+ return MODE_OK;
}
static int
@@ -1086,7 +1085,7 @@ intel_tv_get_config(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct drm_display_mode *adjusted_mode =
- &pipe_config->base.adjusted_mode;
+ &pipe_config->hw.adjusted_mode;
struct drm_display_mode mode = {};
u32 tv_ctl, hctl1, hctl3, vctl1, vctl2, tmp;
struct tv_mode tv_mode = {};
@@ -1189,7 +1188,7 @@ intel_tv_compute_config(struct intel_encoder *encoder,
to_intel_tv_connector_state(conn_state);
const struct tv_mode *tv_mode = intel_tv_mode_find(conn_state);
struct drm_display_mode *adjusted_mode =
- &pipe_config->base.adjusted_mode;
+ &pipe_config->hw.adjusted_mode;
int hdisplay = adjusted_mode->crtc_hdisplay;
int vdisplay = adjusted_mode->crtc_vdisplay;
@@ -1418,7 +1417,7 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct intel_tv *intel_tv = enc_to_tv(encoder);
const struct intel_tv_connector_state *tv_conn_state =
to_intel_tv_connector_state(conn_state);
@@ -1528,7 +1527,7 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder,
((video_levels->black << TV_BLACK_LEVEL_SHIFT) |
(video_levels->blank << TV_BLANK_LEVEL_SHIFT)));
- assert_pipe_disabled(dev_priv, intel_crtc->pipe);
+ assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
/* Filter ctl must be set before TV_WIN_SIZE */
tv_filter_ctl = TV_AUTO_SCALE;
@@ -1663,7 +1662,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
*/
static void intel_tv_find_better_format(struct drm_connector *connector)
{
- struct intel_tv *intel_tv = intel_attached_tv(connector);
+ struct intel_tv *intel_tv = intel_attached_tv(to_intel_connector(connector));
const struct tv_mode *tv_mode = intel_tv_mode_find(connector->state);
int i;
@@ -1690,7 +1689,7 @@ intel_tv_detect(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx,
bool force)
{
- struct intel_tv *intel_tv = intel_attached_tv(connector);
+ struct intel_tv *intel_tv = intel_attached_tv(to_intel_connector(connector));
enum drm_connector_status status;
int type;
@@ -1702,7 +1701,7 @@ intel_tv_detect(struct drm_connector *connector,
struct intel_load_detect_pipe tmp;
int ret;
- ret = intel_get_load_detect_pipe(connector, NULL, &tmp, ctx);
+ ret = intel_get_load_detect_pipe(connector, &tmp, ctx);
if (ret < 0)
return ret;
@@ -1948,9 +1947,8 @@ intel_tv_init(struct drm_i915_private *dev_priv)
intel_encoder->type = INTEL_OUTPUT_TVOUT;
intel_encoder->power_domain = POWER_DOMAIN_PORT_OTHER;
intel_encoder->port = PORT_NONE;
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+ intel_encoder->pipe_mask = ~0;
intel_encoder->cloneable = 0;
- intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1));
intel_tv->type = DRM_MODE_CONNECTOR_Unknown;
/* BIOS margin values */
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
index 5ddbe71ab423..4d0c23b29248 100644
--- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -114,6 +114,8 @@ enum bdb_block_id {
BDB_LVDS_POWER = 44,
BDB_MIPI_CONFIG = 52,
BDB_MIPI_SEQUENCE = 53,
+ BDB_COMPRESSION_PARAMETERS = 56,
+ BDB_GENERIC_DTD = 58,
BDB_SKIP = 254, /* VBIOS private block, ignore */
};
@@ -291,6 +293,8 @@ struct bdb_general_features {
#define DVO_PORT_HDMIE 12 /* 193 */
#define DVO_PORT_DPF 13 /* N/A */
#define DVO_PORT_HDMIF 14 /* N/A */
+#define DVO_PORT_DPG 15
+#define DVO_PORT_HDMIG 16
#define DVO_PORT_MIPIA 21 /* 171 */
#define DVO_PORT_MIPIB 22 /* 171 */
#define DVO_PORT_MIPIC 23 /* 171 */
@@ -310,13 +314,13 @@ enum vbt_gmbus_ddi {
DDC_BUS_DDI_F,
ICL_DDC_BUS_DDI_A = 0x1,
ICL_DDC_BUS_DDI_B,
+ TGL_DDC_BUS_DDI_C,
ICL_DDC_BUS_PORT_1 = 0x4,
ICL_DDC_BUS_PORT_2,
ICL_DDC_BUS_PORT_3,
ICL_DDC_BUS_PORT_4,
- MCC_DDC_BUS_DDI_A = 0x1,
- MCC_DDC_BUS_DDI_B,
- MCC_DDC_BUS_DDI_C = 0x4,
+ TGL_DDC_BUS_PORT_5,
+ TGL_DDC_BUS_PORT_6,
};
#define DP_AUX_A 0x40
@@ -325,6 +329,7 @@ enum vbt_gmbus_ddi {
#define DP_AUX_D 0x30
#define DP_AUX_E 0x50
#define DP_AUX_F 0x60
+#define DP_AUX_G 0x70
#define VBT_DP_MAX_LINK_RATE_HBR3 0
#define VBT_DP_MAX_LINK_RATE_HBR2 1
@@ -364,7 +369,7 @@ struct child_device_config {
u16 dtd_buf_ptr; /* 161 */
u8 edidless_efp:1; /* 161 */
u8 compression_enable:1; /* 198 */
- u8 compression_method:1; /* 198 */
+ u8 compression_method_cps:1; /* 198 */
u8 ganged_edp:1; /* 202 */
u8 reserved0:4;
u8 compression_structure_index:4; /* 198 */
@@ -789,6 +794,35 @@ struct bdb_lfp_backlight_data {
} __packed;
/*
+ * Block 44 - LFP Power Conservation Features Block
+ */
+
+struct als_data_entry {
+ u16 backlight_adjust;
+ u16 lux;
+} __packed;
+
+struct agressiveness_profile_entry {
+ u8 dpst_agressiveness : 4;
+ u8 lace_agressiveness : 4;
+} __packed;
+
+struct bdb_lfp_power {
+ u8 lfp_feature_bits;
+ struct als_data_entry als[5];
+ u8 lace_aggressiveness_profile;
+ u16 dpst;
+ u16 psr;
+ u16 drrs;
+ u16 lace_support;
+ u16 adt;
+ u16 dmrrs;
+ u16 adb;
+ u16 lace_enabled_status;
+ struct agressiveness_profile_entry aggressivenes[16];
+} __packed;
+
+/*
* Block 52 - MIPI Configuration Block
*/
@@ -808,4 +842,85 @@ struct bdb_mipi_sequence {
u8 data[0]; /* up to 6 variable length blocks */
} __packed;
+/*
+ * Block 56 - Compression Parameters
+ */
+
+#define VBT_RC_BUFFER_BLOCK_SIZE_1KB 0
+#define VBT_RC_BUFFER_BLOCK_SIZE_4KB 1
+#define VBT_RC_BUFFER_BLOCK_SIZE_16KB 2
+#define VBT_RC_BUFFER_BLOCK_SIZE_64KB 3
+
+#define VBT_DSC_LINE_BUFFER_DEPTH(vbt_value) ((vbt_value) + 8) /* bits */
+#define VBT_DSC_MAX_BPP(vbt_value) (6 + (vbt_value) * 2)
+
+struct dsc_compression_parameters_entry {
+ u8 version_major:4;
+ u8 version_minor:4;
+
+ u8 rc_buffer_block_size:2;
+ u8 reserved1:6;
+
+ /*
+ * Buffer size in bytes:
+ *
+ * 4 ^ rc_buffer_block_size * 1024 * (rc_buffer_size + 1) bytes
+ */
+ u8 rc_buffer_size;
+ u32 slices_per_line;
+
+ u8 line_buffer_depth:4;
+ u8 reserved2:4;
+
+ /* Flag Bits 1 */
+ u8 block_prediction_enable:1;
+ u8 reserved3:7;
+
+ u8 max_bpp; /* mapping */
+
+ /* Color depth capabilities */
+ u8 reserved4:1;
+ u8 support_8bpc:1;
+ u8 support_10bpc:1;
+ u8 support_12bpc:1;
+ u8 reserved5:4;
+
+ u16 slice_height;
+} __packed;
+
+struct bdb_compression_parameters {
+ u16 entry_size;
+ struct dsc_compression_parameters_entry data[16];
+} __packed;
+
+/*
+ * Block 58 - Generic DTD Block
+ */
+
+struct generic_dtd_entry {
+ u32 pixel_clock;
+ u16 hactive;
+ u16 hblank;
+ u16 hfront_porch;
+ u16 hsync;
+ u16 vactive;
+ u16 vblank;
+ u16 vfront_porch;
+ u16 vsync;
+ u16 width_mm;
+ u16 height_mm;
+
+ /* Flags */
+ u8 rsvd_flags:6;
+ u8 vsync_positive_polarity:1;
+ u8 hsync_positive_polarity:1;
+
+ u8 rsvd[3];
+} __packed;
+
+struct bdb_generic_dtd {
+ u16 gdtd_size;
+ struct generic_dtd_entry dtd[]; /* up to 24 DTD's */
+} __packed;
+
#endif /* _INTEL_VBT_DEFS_H_ */
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index ffec807b8960..9e6aaa302e40 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -9,7 +9,8 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
+#include "intel_dsi.h"
#include "intel_vdsc.h"
enum ROW_INDEX_BPP {
@@ -30,10 +31,8 @@ enum COLUMN_INDEX_BPC {
MAX_COLUMN_INDEX
};
-#define DSC_SUPPORTED_VERSION_MIN 1
-
/* From DSC_v1.11 spec, rc_parameter_Set syntax element typically constant */
-static u16 rc_buf_thresh[] = {
+static const u16 rc_buf_thresh[] = {
896, 1792, 2688, 3584, 4480, 5376, 6272, 6720, 7168, 7616,
7744, 7872, 8000, 8064
};
@@ -53,7 +52,7 @@ struct rc_parameters {
* Selected Rate Control Related Parameter Recommended Values
* from DSC_v1.11 spec & C Model release: DSC_model_20161212
*/
-static struct rc_parameters rc_params[][MAX_COLUMN_INDEX] = {
+static const struct rc_parameters rc_parameters[][MAX_COLUMN_INDEX] = {
{
/* 6BPP/8BPC */
{ 768, 15, 6144, 3, 13, 11, 11, {
@@ -319,63 +318,84 @@ static int get_column_index_for_rc_params(u8 bits_per_component)
}
}
-int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
- struct intel_crtc_state *pipe_config)
+static const struct rc_parameters *get_rc_params(u16 compressed_bpp,
+ u8 bits_per_component)
+{
+ int row_index, column_index;
+
+ row_index = get_row_index_for_rc_params(compressed_bpp);
+ if (row_index < 0)
+ return NULL;
+
+ column_index = get_column_index_for_rc_params(bits_per_component);
+ if (column_index < 0)
+ return NULL;
+
+ return &rc_parameters[row_index][column_index];
+}
+
+bool intel_dsc_source_support(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
- struct drm_dsc_config *vdsc_cfg = &pipe_config->dp_dsc_cfg;
- u16 compressed_bpp = pipe_config->dsc_params.compressed_bpp;
+ const struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ enum pipe pipe = crtc->pipe;
+
+ if (!INTEL_INFO(i915)->display.has_dsc)
+ return false;
+
+ /* On TGL, DSC is supported on all Pipes */
+ if (INTEL_GEN(i915) >= 12)
+ return true;
+
+ if (INTEL_GEN(i915) >= 10 &&
+ (pipe != PIPE_A ||
+ (cpu_transcoder == TRANSCODER_EDP ||
+ cpu_transcoder == TRANSCODER_DSI_0 ||
+ cpu_transcoder == TRANSCODER_DSI_1)))
+ return true;
+
+ return false;
+}
+
+static bool is_pipe_dsc(const struct intel_crtc_state *crtc_state)
+{
+ const struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ const struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ if (INTEL_GEN(i915) >= 12)
+ return true;
+
+ if (cpu_transcoder == TRANSCODER_EDP ||
+ cpu_transcoder == TRANSCODER_DSI_0 ||
+ cpu_transcoder == TRANSCODER_DSI_1)
+ return false;
+
+ /* There's no pipe A DSC engine on ICL */
+ WARN_ON(crtc->pipe == PIPE_A);
+
+ return true;
+}
+
+int intel_dsc_compute_params(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_dsc_config *vdsc_cfg = &pipe_config->dsc.config;
+ u16 compressed_bpp = pipe_config->dsc.compressed_bpp;
+ const struct rc_parameters *rc_params;
u8 i = 0;
- int row_index = 0;
- int column_index = 0;
- u8 line_buf_depth = 0;
- vdsc_cfg->pic_width = pipe_config->base.adjusted_mode.crtc_hdisplay;
- vdsc_cfg->pic_height = pipe_config->base.adjusted_mode.crtc_vdisplay;
+ vdsc_cfg->pic_width = pipe_config->hw.adjusted_mode.crtc_hdisplay;
+ vdsc_cfg->pic_height = pipe_config->hw.adjusted_mode.crtc_vdisplay;
vdsc_cfg->slice_width = DIV_ROUND_UP(vdsc_cfg->pic_width,
- pipe_config->dsc_params.slice_count);
- /*
- * Slice Height of 8 works for all currently available panels. So start
- * with that if pic_height is an integral multiple of 8.
- * Eventually add logic to try multiple slice heights.
- */
- if (vdsc_cfg->pic_height % 8 == 0)
- vdsc_cfg->slice_height = 8;
- else if (vdsc_cfg->pic_height % 4 == 0)
- vdsc_cfg->slice_height = 4;
- else
- vdsc_cfg->slice_height = 2;
-
- /* Values filled from DSC Sink DPCD */
- vdsc_cfg->dsc_version_major =
- (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
- DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT;
- vdsc_cfg->dsc_version_minor =
- min(DSC_SUPPORTED_VERSION_MIN,
- (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
- DP_DSC_MINOR_MASK) >> DP_DSC_MINOR_SHIFT);
-
- vdsc_cfg->convert_rgb = intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] &
- DP_DSC_RGB;
-
- line_buf_depth = drm_dp_dsc_sink_line_buf_depth(intel_dp->dsc_dpcd);
- if (!line_buf_depth) {
- DRM_DEBUG_KMS("DSC Sink Line Buffer Depth invalid\n");
- return -EINVAL;
- }
- if (vdsc_cfg->dsc_version_minor == 2)
- vdsc_cfg->line_buf_depth = (line_buf_depth == DSC_1_2_MAX_LINEBUF_DEPTH_BITS) ?
- DSC_1_2_MAX_LINEBUF_DEPTH_VAL : line_buf_depth;
- else
- vdsc_cfg->line_buf_depth = (line_buf_depth > DSC_1_1_MAX_LINEBUF_DEPTH_BITS) ?
- DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth;
+ pipe_config->dsc.slice_count);
/* Gen 11 does not support YCbCr */
vdsc_cfg->simple_422 = false;
/* Gen 11 does not support VBR */
vdsc_cfg->vbr_enable = false;
- vdsc_cfg->block_pred_enable =
- intel_dp->dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] &
- DP_DSC_BLK_PREDICTION_IS_SUPPORTED;
/* Gen 11 only supports integral values of bpp */
vdsc_cfg->bits_per_pixel = compressed_bpp << 4;
@@ -399,39 +419,29 @@ int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
vdsc_cfg->rc_buf_thresh[13] = 0x7D;
}
- row_index = get_row_index_for_rc_params(compressed_bpp);
- column_index =
- get_column_index_for_rc_params(vdsc_cfg->bits_per_component);
-
- if (row_index < 0 || column_index < 0)
+ rc_params = get_rc_params(compressed_bpp, vdsc_cfg->bits_per_component);
+ if (!rc_params)
return -EINVAL;
- vdsc_cfg->first_line_bpg_offset =
- rc_params[row_index][column_index].first_line_bpg_offset;
- vdsc_cfg->initial_xmit_delay =
- rc_params[row_index][column_index].initial_xmit_delay;
- vdsc_cfg->initial_offset =
- rc_params[row_index][column_index].initial_offset;
- vdsc_cfg->flatness_min_qp =
- rc_params[row_index][column_index].flatness_min_qp;
- vdsc_cfg->flatness_max_qp =
- rc_params[row_index][column_index].flatness_max_qp;
- vdsc_cfg->rc_quant_incr_limit0 =
- rc_params[row_index][column_index].rc_quant_incr_limit0;
- vdsc_cfg->rc_quant_incr_limit1 =
- rc_params[row_index][column_index].rc_quant_incr_limit1;
+ vdsc_cfg->first_line_bpg_offset = rc_params->first_line_bpg_offset;
+ vdsc_cfg->initial_xmit_delay = rc_params->initial_xmit_delay;
+ vdsc_cfg->initial_offset = rc_params->initial_offset;
+ vdsc_cfg->flatness_min_qp = rc_params->flatness_min_qp;
+ vdsc_cfg->flatness_max_qp = rc_params->flatness_max_qp;
+ vdsc_cfg->rc_quant_incr_limit0 = rc_params->rc_quant_incr_limit0;
+ vdsc_cfg->rc_quant_incr_limit1 = rc_params->rc_quant_incr_limit1;
for (i = 0; i < DSC_NUM_BUF_RANGES; i++) {
vdsc_cfg->rc_range_params[i].range_min_qp =
- rc_params[row_index][column_index].rc_range_params[i].range_min_qp;
+ rc_params->rc_range_params[i].range_min_qp;
vdsc_cfg->rc_range_params[i].range_max_qp =
- rc_params[row_index][column_index].rc_range_params[i].range_max_qp;
+ rc_params->rc_range_params[i].range_max_qp;
/*
* Range BPG Offset uses 2's complement and is only a 6 bits. So
* mask it to get only 6 bits.
*/
vdsc_cfg->rc_range_params[i].range_bpg_offset =
- rc_params[row_index][column_index].rc_range_params[i].range_bpg_offset &
+ rc_params->rc_range_params[i].range_bpg_offset &
DSC_RANGE_BPG_OFFSET_MASK;
}
@@ -453,39 +463,46 @@ int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
vdsc_cfg->initial_scale_value = (vdsc_cfg->rc_model_size << 3) /
(vdsc_cfg->rc_model_size - vdsc_cfg->initial_offset);
- return drm_dsc_compute_rc_parameters(vdsc_cfg);
+ return 0;
}
enum intel_display_power_domain
intel_dsc_power_domain(const struct intel_crtc_state *crtc_state)
{
- enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
/*
- * On ICL VDSC/joining for eDP transcoder uses a separate power well PW2
- * This requires POWER_DOMAIN_TRANSCODER_EDP_VDSC power domain.
- * For any other transcoder, VDSC/joining uses the power well associated
- * with the pipe/transcoder in use. Hence another reference on the
- * transcoder power domain will suffice.
+ * VDSC/joining uses a separate power well, PW2, and requires
+ * POWER_DOMAIN_TRANSCODER_VDSC_PW2 power domain in two cases:
+ *
+ * - ICL eDP/DSI transcoder
+ * - TGL pipe A
+ *
+ * For any other pipe, VDSC/joining uses the power well associated with
+ * the pipe in use. Hence another reference on the pipe power domain
+ * will suffice. (Except no VDSC/joining on ICL pipe A.)
*/
- if (cpu_transcoder == TRANSCODER_EDP)
- return POWER_DOMAIN_TRANSCODER_EDP_VDSC;
+ if (INTEL_GEN(i915) >= 12 && pipe == PIPE_A)
+ return POWER_DOMAIN_TRANSCODER_VDSC_PW2;
+ else if (is_pipe_dsc(crtc_state))
+ return POWER_DOMAIN_PIPE(pipe);
else
- return POWER_DOMAIN_TRANSCODER(cpu_transcoder);
+ return POWER_DOMAIN_TRANSCODER_VDSC_PW2;
}
-static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+static void intel_dsc_pps_configure(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- const struct drm_dsc_config *vdsc_cfg = &crtc_state->dp_dsc_cfg;
+ const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
enum pipe pipe = crtc->pipe;
- enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 pps_val = 0;
u32 rc_buf_thresh_dword[4];
u32 rc_range_params_dword[8];
- u8 num_vdsc_instances = (crtc_state->dsc_params.dsc_split) ? 2 : 1;
+ u8 num_vdsc_instances = (crtc_state->dsc.dsc_split) ? 2 : 1;
int i = 0;
/* Populate PICTURE_PARAMETER_SET_0 registers */
@@ -502,17 +519,17 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
if (vdsc_cfg->vbr_enable)
pps_val |= DSC_VBR_ENABLE;
DRM_INFO("PPS0 = 0x%08x\n", pps_val);
- if (cpu_transcoder == TRANSCODER_EDP) {
+ if (!is_pipe_dsc(crtc_state)) {
I915_WRITE(DSCA_PICTURE_PARAMETER_SET_0, pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(DSCC_PICTURE_PARAMETER_SET_0, pps_val);
} else {
I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_0(pipe), pps_val);
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_0(pipe),
pps_val);
}
@@ -521,17 +538,17 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
pps_val = 0;
pps_val |= DSC_BPP(vdsc_cfg->bits_per_pixel);
DRM_INFO("PPS1 = 0x%08x\n", pps_val);
- if (cpu_transcoder == TRANSCODER_EDP) {
+ if (!is_pipe_dsc(crtc_state)) {
I915_WRITE(DSCA_PICTURE_PARAMETER_SET_1, pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(DSCC_PICTURE_PARAMETER_SET_1, pps_val);
} else {
I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe), pps_val);
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_1(pipe),
pps_val);
}
@@ -541,17 +558,17 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
pps_val |= DSC_PIC_HEIGHT(vdsc_cfg->pic_height) |
DSC_PIC_WIDTH(vdsc_cfg->pic_width / num_vdsc_instances);
DRM_INFO("PPS2 = 0x%08x\n", pps_val);
- if (encoder->type == INTEL_OUTPUT_EDP) {
+ if (!is_pipe_dsc(crtc_state)) {
I915_WRITE(DSCA_PICTURE_PARAMETER_SET_2, pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(DSCC_PICTURE_PARAMETER_SET_2, pps_val);
} else {
I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_2(pipe), pps_val);
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_2(pipe),
pps_val);
}
@@ -561,17 +578,17 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
pps_val |= DSC_SLICE_HEIGHT(vdsc_cfg->slice_height) |
DSC_SLICE_WIDTH(vdsc_cfg->slice_width);
DRM_INFO("PPS3 = 0x%08x\n", pps_val);
- if (cpu_transcoder == TRANSCODER_EDP) {
+ if (!is_pipe_dsc(crtc_state)) {
I915_WRITE(DSCA_PICTURE_PARAMETER_SET_3, pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(DSCC_PICTURE_PARAMETER_SET_3, pps_val);
} else {
I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_3(pipe), pps_val);
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_3(pipe),
pps_val);
}
@@ -581,17 +598,17 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
pps_val |= DSC_INITIAL_XMIT_DELAY(vdsc_cfg->initial_xmit_delay) |
DSC_INITIAL_DEC_DELAY(vdsc_cfg->initial_dec_delay);
DRM_INFO("PPS4 = 0x%08x\n", pps_val);
- if (cpu_transcoder == TRANSCODER_EDP) {
+ if (!is_pipe_dsc(crtc_state)) {
I915_WRITE(DSCA_PICTURE_PARAMETER_SET_4, pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(DSCC_PICTURE_PARAMETER_SET_4, pps_val);
} else {
I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_4(pipe), pps_val);
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe),
pps_val);
}
@@ -601,17 +618,17 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
pps_val |= DSC_SCALE_INC_INT(vdsc_cfg->scale_increment_interval) |
DSC_SCALE_DEC_INT(vdsc_cfg->scale_decrement_interval);
DRM_INFO("PPS5 = 0x%08x\n", pps_val);
- if (cpu_transcoder == TRANSCODER_EDP) {
+ if (!is_pipe_dsc(crtc_state)) {
I915_WRITE(DSCA_PICTURE_PARAMETER_SET_5, pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(DSCC_PICTURE_PARAMETER_SET_5, pps_val);
} else {
I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_5(pipe), pps_val);
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe),
pps_val);
}
@@ -623,17 +640,17 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
DSC_FLATNESS_MIN_QP(vdsc_cfg->flatness_min_qp) |
DSC_FLATNESS_MAX_QP(vdsc_cfg->flatness_max_qp);
DRM_INFO("PPS6 = 0x%08x\n", pps_val);
- if (cpu_transcoder == TRANSCODER_EDP) {
+ if (!is_pipe_dsc(crtc_state)) {
I915_WRITE(DSCA_PICTURE_PARAMETER_SET_6, pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(DSCC_PICTURE_PARAMETER_SET_6, pps_val);
} else {
I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_6(pipe), pps_val);
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_6(pipe),
pps_val);
}
@@ -643,17 +660,17 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
pps_val |= DSC_SLICE_BPG_OFFSET(vdsc_cfg->slice_bpg_offset) |
DSC_NFL_BPG_OFFSET(vdsc_cfg->nfl_bpg_offset);
DRM_INFO("PPS7 = 0x%08x\n", pps_val);
- if (cpu_transcoder == TRANSCODER_EDP) {
+ if (!is_pipe_dsc(crtc_state)) {
I915_WRITE(DSCA_PICTURE_PARAMETER_SET_7, pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(DSCC_PICTURE_PARAMETER_SET_7, pps_val);
} else {
I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_7(pipe), pps_val);
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_7(pipe),
pps_val);
}
@@ -663,17 +680,17 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
pps_val |= DSC_FINAL_OFFSET(vdsc_cfg->final_offset) |
DSC_INITIAL_OFFSET(vdsc_cfg->initial_offset);
DRM_INFO("PPS8 = 0x%08x\n", pps_val);
- if (cpu_transcoder == TRANSCODER_EDP) {
+ if (!is_pipe_dsc(crtc_state)) {
I915_WRITE(DSCA_PICTURE_PARAMETER_SET_8, pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(DSCC_PICTURE_PARAMETER_SET_8, pps_val);
} else {
I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_8(pipe), pps_val);
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_8(pipe),
pps_val);
}
@@ -683,17 +700,17 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
pps_val |= DSC_RC_MODEL_SIZE(DSC_RC_MODEL_SIZE_CONST) |
DSC_RC_EDGE_FACTOR(DSC_RC_EDGE_FACTOR_CONST);
DRM_INFO("PPS9 = 0x%08x\n", pps_val);
- if (cpu_transcoder == TRANSCODER_EDP) {
+ if (!is_pipe_dsc(crtc_state)) {
I915_WRITE(DSCA_PICTURE_PARAMETER_SET_9, pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(DSCC_PICTURE_PARAMETER_SET_9, pps_val);
} else {
I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_9(pipe), pps_val);
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_9(pipe),
pps_val);
}
@@ -705,17 +722,17 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
DSC_RC_TARGET_OFF_HIGH(DSC_RC_TGT_OFFSET_HI_CONST) |
DSC_RC_TARGET_OFF_LOW(DSC_RC_TGT_OFFSET_LO_CONST);
DRM_INFO("PPS10 = 0x%08x\n", pps_val);
- if (cpu_transcoder == TRANSCODER_EDP) {
+ if (!is_pipe_dsc(crtc_state)) {
I915_WRITE(DSCA_PICTURE_PARAMETER_SET_10, pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(DSCC_PICTURE_PARAMETER_SET_10, pps_val);
} else {
I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_10(pipe), pps_val);
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_10(pipe),
pps_val);
}
@@ -728,17 +745,17 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
DSC_SLICE_ROW_PER_FRAME(vdsc_cfg->pic_height /
vdsc_cfg->slice_height);
DRM_INFO("PPS16 = 0x%08x\n", pps_val);
- if (cpu_transcoder == TRANSCODER_EDP) {
+ if (!is_pipe_dsc(crtc_state)) {
I915_WRITE(DSCA_PICTURE_PARAMETER_SET_16, pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(DSCC_PICTURE_PARAMETER_SET_16, pps_val);
} else {
I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_16(pipe), pps_val);
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_16(pipe),
pps_val);
}
@@ -752,12 +769,12 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
DRM_INFO(" RC_BUF_THRESH%d = 0x%08x\n", i,
rc_buf_thresh_dword[i / 4]);
}
- if (cpu_transcoder == TRANSCODER_EDP) {
+ if (!is_pipe_dsc(crtc_state)) {
I915_WRITE(DSCA_RC_BUF_THRESH_0, rc_buf_thresh_dword[0]);
I915_WRITE(DSCA_RC_BUF_THRESH_0_UDW, rc_buf_thresh_dword[1]);
I915_WRITE(DSCA_RC_BUF_THRESH_1, rc_buf_thresh_dword[2]);
I915_WRITE(DSCA_RC_BUF_THRESH_1_UDW, rc_buf_thresh_dword[3]);
- if (crtc_state->dsc_params.dsc_split) {
+ if (crtc_state->dsc.dsc_split) {
I915_WRITE(DSCC_RC_BUF_THRESH_0,
rc_buf_thresh_dword[0]);
I915_WRITE(DSCC_RC_BUF_THRESH_0_UDW,
@@ -776,7 +793,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
rc_buf_thresh_dword[2]);
I915_WRITE(ICL_DSC0_RC_BUF_THRESH_1_UDW(pipe),
rc_buf_thresh_dword[3]);
- if (crtc_state->dsc_params.dsc_split) {
+ if (crtc_state->dsc.dsc_split) {
I915_WRITE(ICL_DSC1_RC_BUF_THRESH_0(pipe),
rc_buf_thresh_dword[0]);
I915_WRITE(ICL_DSC1_RC_BUF_THRESH_0_UDW(pipe),
@@ -801,7 +818,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
DRM_INFO(" RC_RANGE_PARAM_%d = 0x%08x\n", i,
rc_range_params_dword[i / 2]);
}
- if (cpu_transcoder == TRANSCODER_EDP) {
+ if (!is_pipe_dsc(crtc_state)) {
I915_WRITE(DSCA_RC_RANGE_PARAMETERS_0,
rc_range_params_dword[0]);
I915_WRITE(DSCA_RC_RANGE_PARAMETERS_0_UDW,
@@ -818,7 +835,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
rc_range_params_dword[6]);
I915_WRITE(DSCA_RC_RANGE_PARAMETERS_3_UDW,
rc_range_params_dword[7]);
- if (crtc_state->dsc_params.dsc_split) {
+ if (crtc_state->dsc.dsc_split) {
I915_WRITE(DSCC_RC_RANGE_PARAMETERS_0,
rc_range_params_dword[0]);
I915_WRITE(DSCC_RC_RANGE_PARAMETERS_0_UDW,
@@ -853,7 +870,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
rc_range_params_dword[6]);
I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW(pipe),
rc_range_params_dword[7]);
- if (crtc_state->dsc_params.dsc_split) {
+ if (crtc_state->dsc.dsc_split) {
I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_0(pipe),
rc_range_params_dword[0]);
I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW(pipe),
@@ -874,12 +891,79 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
}
}
-static void intel_dp_write_dsc_pps_sdp(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+void intel_dsc_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ enum pipe pipe = crtc->pipe;
+ enum intel_display_power_domain power_domain;
+ intel_wakeref_t wakeref;
+ u32 dss_ctl1, dss_ctl2, val;
+
+ if (!intel_dsc_source_support(encoder, crtc_state))
+ return;
+
+ power_domain = intel_dsc_power_domain(crtc_state);
+
+ wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (!wakeref)
+ return;
+
+ if (!is_pipe_dsc(crtc_state)) {
+ dss_ctl1 = I915_READ(DSS_CTL1);
+ dss_ctl2 = I915_READ(DSS_CTL2);
+ } else {
+ dss_ctl1 = I915_READ(ICL_PIPE_DSS_CTL1(pipe));
+ dss_ctl2 = I915_READ(ICL_PIPE_DSS_CTL2(pipe));
+ }
+
+ crtc_state->dsc.compression_enable = dss_ctl2 & LEFT_BRANCH_VDSC_ENABLE;
+ if (!crtc_state->dsc.compression_enable)
+ goto out;
+
+ crtc_state->dsc.dsc_split = (dss_ctl2 & RIGHT_BRANCH_VDSC_ENABLE) &&
+ (dss_ctl1 & JOINER_ENABLE);
+
+ /* FIXME: add more state readout as needed */
+
+ /* PPS1 */
+ if (!is_pipe_dsc(crtc_state))
+ val = I915_READ(DSCA_PICTURE_PARAMETER_SET_1);
+ else
+ val = I915_READ(ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe));
+ vdsc_cfg->bits_per_pixel = val;
+ crtc_state->dsc.compressed_bpp = vdsc_cfg->bits_per_pixel >> 4;
+out:
+ intel_display_power_put(dev_priv, power_domain, wakeref);
+}
+
+static void intel_dsc_dsi_pps_write(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ struct mipi_dsi_device *dsi;
+ struct drm_dsc_picture_parameter_set pps;
+ enum port port;
+
+ drm_dsc_pps_payload_pack(&pps, vdsc_cfg);
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi = intel_dsi->dsi_hosts[port]->device;
+
+ mipi_dsi_picture_parameter_set(dsi, &pps);
+ mipi_dsi_compression_mode(dsi, true);
+ }
+}
+
+static void intel_dsc_dp_pps_write(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- const struct drm_dsc_config *vdsc_cfg = &crtc_state->dp_dsc_cfg;
+ const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
struct drm_dsc_pps_infoframe dp_dsc_pps_sdp;
/* Prepare DP SDP PPS header as per DP 1.4 spec, Table 2-123 */
@@ -896,25 +980,28 @@ static void intel_dp_write_dsc_pps_sdp(struct intel_encoder *encoder,
void intel_dsc_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum pipe pipe = crtc->pipe;
i915_reg_t dss_ctl1_reg, dss_ctl2_reg;
u32 dss_ctl1_val = 0;
u32 dss_ctl2_val = 0;
- if (!crtc_state->dsc_params.compression_enable)
+ if (!crtc_state->dsc.compression_enable)
return;
/* Enable Power wells for VDSC/joining */
intel_display_power_get(dev_priv,
intel_dsc_power_domain(crtc_state));
- intel_configure_pps_for_dsc_encoder(encoder, crtc_state);
+ intel_dsc_pps_configure(encoder, crtc_state);
- intel_dp_write_dsc_pps_sdp(encoder, crtc_state);
+ if (encoder->type == INTEL_OUTPUT_DSI)
+ intel_dsc_dsi_pps_write(encoder, crtc_state);
+ else
+ intel_dsc_dp_pps_write(encoder, crtc_state);
- if (crtc_state->cpu_transcoder == TRANSCODER_EDP) {
+ if (!is_pipe_dsc(crtc_state)) {
dss_ctl1_reg = DSS_CTL1;
dss_ctl2_reg = DSS_CTL2;
} else {
@@ -922,7 +1009,7 @@ void intel_dsc_enable(struct intel_encoder *encoder,
dss_ctl2_reg = ICL_PIPE_DSS_CTL2(pipe);
}
dss_ctl2_val |= LEFT_BRANCH_VDSC_ENABLE;
- if (crtc_state->dsc_params.dsc_split) {
+ if (crtc_state->dsc.dsc_split) {
dss_ctl2_val |= RIGHT_BRANCH_VDSC_ENABLE;
dss_ctl1_val |= JOINER_ENABLE;
}
@@ -932,16 +1019,16 @@ void intel_dsc_enable(struct intel_encoder *encoder,
void intel_dsc_disable(const struct intel_crtc_state *old_crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
i915_reg_t dss_ctl1_reg, dss_ctl2_reg;
u32 dss_ctl1_val = 0, dss_ctl2_val = 0;
- if (!old_crtc_state->dsc_params.compression_enable)
+ if (!old_crtc_state->dsc.compression_enable)
return;
- if (old_crtc_state->cpu_transcoder == TRANSCODER_EDP) {
+ if (!is_pipe_dsc(old_crtc_state)) {
dss_ctl1_reg = DSS_CTL1;
dss_ctl2_reg = DSS_CTL2;
} else {
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.h b/drivers/gpu/drm/i915/display/intel_vdsc.h
index 90d3f6017fcb..e56a3254c214 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.h
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.h
@@ -6,15 +6,20 @@
#ifndef __INTEL_VDSC_H__
#define __INTEL_VDSC_H__
+#include <linux/types.h>
+
struct intel_encoder;
struct intel_crtc_state;
-struct intel_dp;
+bool intel_dsc_source_support(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
void intel_dsc_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void intel_dsc_disable(const struct intel_crtc_state *crtc_state);
-int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
- struct intel_crtc_state *pipe_config);
+int intel_dsc_compute_params(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config);
+void intel_dsc_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state);
enum intel_display_power_domain
intel_dsc_power_domain(const struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_vga.c b/drivers/gpu/drm/i915/display/intel_vga.c
new file mode 100644
index 000000000000..2ff7293986d4
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_vga.c
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/pci.h>
+#include <linux/vgaarb.h>
+
+#include <drm/i915_drm.h>
+
+#include "i915_drv.h"
+#include "intel_vga.h"
+
+static i915_reg_t intel_vga_cntrl_reg(struct drm_i915_private *i915)
+{
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ return VLV_VGACNTRL;
+ else if (INTEL_GEN(i915) >= 5)
+ return CPU_VGACNTRL;
+ else
+ return VGACNTRL;
+}
+
+/* Disable the VGA plane that we never use */
+void intel_vga_disable(struct drm_i915_private *dev_priv)
+{
+ struct pci_dev *pdev = dev_priv->drm.pdev;
+ i915_reg_t vga_reg = intel_vga_cntrl_reg(dev_priv);
+ u8 sr1;
+
+ /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
+ vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
+ outb(SR01, VGA_SR_INDEX);
+ sr1 = inb(VGA_SR_DATA);
+ outb(sr1 | 1 << 5, VGA_SR_DATA);
+ vga_put(pdev, VGA_RSRC_LEGACY_IO);
+ udelay(300);
+
+ I915_WRITE(vga_reg, VGA_DISP_DISABLE);
+ POSTING_READ(vga_reg);
+}
+
+void intel_vga_redisable_power_on(struct drm_i915_private *dev_priv)
+{
+ i915_reg_t vga_reg = intel_vga_cntrl_reg(dev_priv);
+
+ if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
+ DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
+ intel_vga_disable(dev_priv);
+ }
+}
+
+void intel_vga_redisable(struct drm_i915_private *i915)
+{
+ intel_wakeref_t wakeref;
+
+ /*
+ * This function can be called both from intel_modeset_setup_hw_state or
+ * at a very early point in our resume sequence, where the power well
+ * structures are not yet restored. Since this function is at a very
+ * paranoid "someone might have enabled VGA while we were not looking"
+ * level, just check if the power well is enabled instead of trying to
+ * follow the "don't touch the power well if we don't need it" policy
+ * the rest of the driver uses.
+ */
+ wakeref = intel_display_power_get_if_enabled(i915, POWER_DOMAIN_VGA);
+ if (!wakeref)
+ return;
+
+ intel_vga_redisable_power_on(i915);
+
+ intel_display_power_put(i915, POWER_DOMAIN_VGA, wakeref);
+}
+
+void intel_vga_reset_io_mem(struct drm_i915_private *i915)
+{
+ struct pci_dev *pdev = i915->drm.pdev;
+
+ /*
+ * After we re-enable the power well, if we touch VGA register 0x3d5
+ * we'll get unclaimed register interrupts. This stops after we write
+ * anything to the VGA MSR register. The vgacon module uses this
+ * register all the time, so if we unbind our driver and, as a
+ * consequence, bind vgacon, we'll get stuck in an infinite loop at
+ * console_unlock(). So make here we touch the VGA MSR register, making
+ * sure vgacon can keep working normally without triggering interrupts
+ * and error messages.
+ */
+ vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
+ outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
+ vga_put(pdev, VGA_RSRC_LEGACY_IO);
+}
+
+static int
+intel_vga_set_state(struct drm_i915_private *i915, bool enable_decode)
+{
+ unsigned int reg = INTEL_GEN(i915) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
+ u16 gmch_ctrl;
+
+ if (pci_read_config_word(i915->bridge_dev, reg, &gmch_ctrl)) {
+ DRM_ERROR("failed to read control word\n");
+ return -EIO;
+ }
+
+ if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !enable_decode)
+ return 0;
+
+ if (enable_decode)
+ gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
+ else
+ gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
+
+ if (pci_write_config_word(i915->bridge_dev, reg, gmch_ctrl)) {
+ DRM_ERROR("failed to write control word\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static unsigned int
+intel_vga_set_decode(void *cookie, bool enable_decode)
+{
+ struct drm_i915_private *i915 = cookie;
+
+ intel_vga_set_state(i915, enable_decode);
+
+ if (enable_decode)
+ return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
+ VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
+ else
+ return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
+}
+
+int intel_vga_register(struct drm_i915_private *i915)
+{
+ struct pci_dev *pdev = i915->drm.pdev;
+ int ret;
+
+ /*
+ * If we have > 1 VGA cards, then we need to arbitrate access to the
+ * common VGA resources.
+ *
+ * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
+ * then we do not take part in VGA arbitration and the
+ * vga_client_register() fails with -ENODEV.
+ */
+ ret = vga_client_register(pdev, i915, NULL, intel_vga_set_decode);
+ if (ret && ret != -ENODEV)
+ return ret;
+
+ return 0;
+}
+
+void intel_vga_unregister(struct drm_i915_private *i915)
+{
+ struct pci_dev *pdev = i915->drm.pdev;
+
+ vga_client_register(pdev, NULL, NULL, NULL);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_vga.h b/drivers/gpu/drm/i915/display/intel_vga.h
new file mode 100644
index 000000000000..ba5b55b917f0
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_vga.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_VGA_H__
+#define __INTEL_VGA_H__
+
+struct drm_i915_private;
+
+void intel_vga_reset_io_mem(struct drm_i915_private *i915);
+void intel_vga_disable(struct drm_i915_private *i915);
+void intel_vga_redisable(struct drm_i915_private *i915);
+void intel_vga_redisable_power_on(struct drm_i915_private *i915);
+int intel_vga_register(struct drm_i915_private *i915);
+void intel_vga_unregister(struct drm_i915_private *i915);
+
+#endif /* __INTEL_VGA_H__ */
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index e272d826210a..daf4fc3dab6f 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -23,7 +23,6 @@
* Author: Jani Nikula <jani.nikula@intel.com>
*/
-#include <linux/gpio/consumer.h>
#include <linux/slab.h>
#include <drm/drm_atomic_helper.h>
@@ -34,7 +33,7 @@
#include "i915_drv.h"
#include "intel_atomic.h"
#include "intel_connector.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_dsi.h"
#include "intel_fifo_underrun.h"
#include "intel_panel.h"
@@ -84,9 +83,8 @@ void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
mask = LP_CTRL_FIFO_EMPTY | HS_CTRL_FIFO_EMPTY |
LP_DATA_FIFO_EMPTY | HS_DATA_FIFO_EMPTY;
- if (intel_wait_for_register(&dev_priv->uncore,
- MIPI_GEN_FIFO_STAT(port), mask, mask,
- 100))
+ if (intel_de_wait_for_set(dev_priv, MIPI_GEN_FIFO_STAT(port),
+ mask, 100))
DRM_ERROR("DPI FIFOs are not empty\n");
}
@@ -154,10 +152,8 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
/* note: this is never true for reads */
if (packet.payload_length) {
- if (intel_wait_for_register(&dev_priv->uncore,
- MIPI_GEN_FIFO_STAT(port),
- data_mask, 0,
- 50))
+ if (intel_de_wait_for_clear(dev_priv, MIPI_GEN_FIFO_STAT(port),
+ data_mask, 50))
DRM_ERROR("Timeout waiting for HS/LP DATA FIFO !full\n");
write_data(dev_priv, data_reg, packet.payload,
@@ -168,10 +164,8 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
I915_WRITE(MIPI_INTR_STAT(port), GEN_READ_DATA_AVAIL);
}
- if (intel_wait_for_register(&dev_priv->uncore,
- MIPI_GEN_FIFO_STAT(port),
- ctrl_mask, 0,
- 50)) {
+ if (intel_de_wait_for_clear(dev_priv, MIPI_GEN_FIFO_STAT(port),
+ ctrl_mask, 50)) {
DRM_ERROR("Timeout waiting for HS/LP CTRL FIFO !full\n");
}
@@ -180,10 +174,8 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
/* ->rx_len is set only for reads */
if (msg->rx_len) {
data_mask = GEN_READ_DATA_AVAIL;
- if (intel_wait_for_register(&dev_priv->uncore,
- MIPI_INTR_STAT(port),
- data_mask, data_mask,
- 50))
+ if (intel_de_wait_for_set(dev_priv, MIPI_INTR_STAT(port),
+ data_mask, 50))
DRM_ERROR("Timeout waiting for read data.\n");
read_data(dev_priv, data_reg, msg->rx_buf, msg->rx_len);
@@ -240,9 +232,7 @@ static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs,
I915_WRITE(MIPI_DPI_CONTROL(port), cmd);
mask = SPL_PKT_SENT_INTERRUPT;
- if (intel_wait_for_register(&dev_priv->uncore,
- MIPI_INTR_STAT(port), mask, mask,
- 100))
+ if (intel_de_wait_for_set(dev_priv, MIPI_INTR_STAT(port), mask, 100))
DRM_ERROR("Video mode command 0x%08x send failed.\n", cmd);
return 0;
@@ -270,9 +260,9 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder,
struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
base);
struct intel_connector *intel_connector = intel_dsi->attached_connector;
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
- struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
int ret;
DRM_DEBUG_KMS("\n");
@@ -328,7 +318,7 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder,
static bool glk_dsi_enable_io(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
u32 tmp;
bool cold_boot = false;
@@ -359,11 +349,8 @@ static bool glk_dsi_enable_io(struct intel_encoder *encoder)
/* Wait for Pwr ACK */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_wait_for_register(&dev_priv->uncore,
- MIPI_CTRL(port),
- GLK_MIPIIO_PORT_POWERED,
- GLK_MIPIIO_PORT_POWERED,
- 20))
+ if (intel_de_wait_for_set(dev_priv, MIPI_CTRL(port),
+ GLK_MIPIIO_PORT_POWERED, 20))
DRM_ERROR("MIPIO port is powergated\n");
}
@@ -379,17 +366,14 @@ static bool glk_dsi_enable_io(struct intel_encoder *encoder)
static void glk_dsi_device_ready(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
u32 val;
/* Wait for MIPI PHY status bit to set */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_wait_for_register(&dev_priv->uncore,
- MIPI_CTRL(port),
- GLK_PHY_STATUS_PORT_READY,
- GLK_PHY_STATUS_PORT_READY,
- 20))
+ if (intel_de_wait_for_set(dev_priv, MIPI_CTRL(port),
+ GLK_PHY_STATUS_PORT_READY, 20))
DRM_ERROR("PHY is not ON\n");
}
@@ -413,11 +397,8 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
I915_WRITE(MIPI_DEVICE_READY(port), val);
/* Wait for ULPS active */
- if (intel_wait_for_register(&dev_priv->uncore,
- MIPI_CTRL(port),
- GLK_ULPS_NOT_ACTIVE,
- 0,
- 20))
+ if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port),
+ GLK_ULPS_NOT_ACTIVE, 20))
DRM_ERROR("ULPS not active\n");
/* Exit ULPS */
@@ -440,21 +421,15 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
/* Wait for Stop state */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_wait_for_register(&dev_priv->uncore,
- MIPI_CTRL(port),
- GLK_DATA_LANE_STOP_STATE,
- GLK_DATA_LANE_STOP_STATE,
- 20))
+ if (intel_de_wait_for_set(dev_priv, MIPI_CTRL(port),
+ GLK_DATA_LANE_STOP_STATE, 20))
DRM_ERROR("Date lane not in STOP state\n");
}
/* Wait for AFE LATCH */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_wait_for_register(&dev_priv->uncore,
- BXT_MIPI_PORT_CTRL(port),
- AFE_LATCHOUT,
- AFE_LATCHOUT,
- 20))
+ if (intel_de_wait_for_set(dev_priv, BXT_MIPI_PORT_CTRL(port),
+ AFE_LATCHOUT, 20))
DRM_ERROR("D-PHY not entering LP-11 state\n");
}
}
@@ -462,7 +437,7 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
static void bxt_dsi_device_ready(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
u32 val;
@@ -489,7 +464,7 @@ static void bxt_dsi_device_ready(struct intel_encoder *encoder)
static void vlv_dsi_device_ready(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
u32 val;
@@ -540,7 +515,7 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder)
static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
u32 val;
@@ -554,17 +529,15 @@ static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder)
/* Wait for MIPI PHY status bit to unset */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_wait_for_register(&dev_priv->uncore,
- MIPI_CTRL(port),
- GLK_PHY_STATUS_PORT_READY, 0, 20))
+ if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port),
+ GLK_PHY_STATUS_PORT_READY, 20))
DRM_ERROR("PHY is not turning OFF\n");
}
/* Wait for Pwr ACK bit to unset */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_wait_for_register(&dev_priv->uncore,
- MIPI_CTRL(port),
- GLK_MIPIIO_PORT_POWERED, 0, 20))
+ if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port),
+ GLK_MIPIIO_PORT_POWERED, 20))
DRM_ERROR("MIPI IO Port is not powergated\n");
}
}
@@ -572,7 +545,7 @@ static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder)
static void glk_dsi_disable_mipi_io(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
u32 tmp;
@@ -583,9 +556,8 @@ static void glk_dsi_disable_mipi_io(struct intel_encoder *encoder)
/* Wait for MIPI PHY status bit to unset */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_wait_for_register(&dev_priv->uncore,
- MIPI_CTRL(port),
- GLK_PHY_STATUS_PORT_READY, 0, 20))
+ if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port),
+ GLK_PHY_STATUS_PORT_READY, 20))
DRM_ERROR("PHY is not turning OFF\n");
}
@@ -606,7 +578,7 @@ static void glk_dsi_clear_device_ready(struct intel_encoder *encoder)
static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
DRM_DEBUG_KMS("\n");
@@ -633,9 +605,8 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
* Port A only. MIPI Port C has no similar bit for checking.
*/
if ((IS_GEN9_LP(dev_priv) || port == PORT_A) &&
- intel_wait_for_register(&dev_priv->uncore,
- port_ctrl, AFE_LATCHOUT, 0,
- 30))
+ intel_de_wait_for_clear(dev_priv, port_ctrl,
+ AFE_LATCHOUT, 30))
DRM_ERROR("DSI LP not going Low\n");
/* Disable MIPI PHY transparent latch */
@@ -652,8 +623,8 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
@@ -709,7 +680,7 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
for_each_dsi_port(port, intel_dsi->ports) {
@@ -773,11 +744,11 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
- struct drm_crtc *crtc = pipe_config->base.crtc;
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ struct drm_crtc *crtc = pipe_config->uapi.crtc;
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
+ enum pipe pipe = intel_crtc->pipe;
enum port port;
u32 val;
bool glk_cold_boot = false;
@@ -821,9 +792,6 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
if (!IS_GEMINILAKE(dev_priv))
intel_dsi_prepare(encoder, pipe_config);
- /* Power on, try both CRC pmic gpio and VBT */
- if (intel_dsi->gpio_panel)
- gpiod_set_value_cansleep(intel_dsi->gpio_panel, 1);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_ON);
intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay);
@@ -878,7 +846,7 @@ static void intel_dsi_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
DRM_DEBUG_KMS("\n");
@@ -910,16 +878,22 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
}
static void intel_dsi_post_disable(struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
- const struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
u32 val;
DRM_DEBUG_KMS("\n");
+ if (IS_GEN9_LP(dev_priv)) {
+ intel_crtc_vblank_off(old_crtc_state);
+
+ skl_scaler_disable(old_crtc_state);
+ }
+
if (is_vid_mode(intel_dsi)) {
for_each_dsi_port(port, intel_dsi->ports)
vlv_dsi_wait_for_fifo_empty(intel_dsi, port);
@@ -967,11 +941,8 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder,
/* Assert reset */
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_ASSERT_RESET);
- /* Power off, try both CRC pmic gpio and VBT */
intel_dsi_msleep(intel_dsi, intel_dsi->panel_off_delay);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_OFF);
- if (intel_dsi->gpio_panel)
- gpiod_set_value_cansleep(intel_dsi->gpio_panel, 0);
/*
* FIXME As we do with eDP, just make a note of the time here
@@ -984,7 +955,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
intel_wakeref_t wakeref;
enum port port;
bool active = false;
@@ -1060,10 +1031,10 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_display_mode *adjusted_mode =
- &pipe_config->base.adjusted_mode;
+ &pipe_config->hw.adjusted_mode;
struct drm_display_mode *adjusted_mode_sw;
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
unsigned int lane_count = intel_dsi->lane_count;
unsigned int bpp, fmt;
enum port port;
@@ -1073,7 +1044,7 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
crtc_hblank_start_sw, crtc_hblank_end_sw;
/* FIXME: hw readout should not depend on SW state */
- adjusted_mode_sw = &crtc->config->base.adjusted_mode;
+ adjusted_mode_sw = &crtc->config->hw.adjusted_mode;
/*
* Atleast one port is active as encoder->get_config called only if
@@ -1232,7 +1203,7 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
}
if (pclk) {
- pipe_config->base.adjusted_mode.crtc_clock = pclk;
+ pipe_config->hw.adjusted_mode.crtc_clock = pclk;
pipe_config->port_clock = pclk;
}
}
@@ -1256,7 +1227,7 @@ static void set_dsi_timings(struct drm_encoder *encoder,
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(encoder));
enum port port;
unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
unsigned int lane_count = intel_dsi->lane_count;
@@ -1343,9 +1314,9 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
- const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(encoder));
+ const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
enum port port;
unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
u32 val, tmp;
@@ -1534,7 +1505,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
static void intel_dsi_unprepare(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
u32 val;
@@ -1561,12 +1532,9 @@ static void intel_dsi_unprepare(struct intel_encoder *encoder)
static void intel_dsi_encoder_destroy(struct drm_encoder *encoder)
{
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
-
- /* dispose of the gpios */
- if (intel_dsi->gpio_panel)
- gpiod_put(intel_dsi->gpio_panel);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(encoder));
+ intel_dsi_vbt_gpio_cleanup(intel_dsi);
intel_encoder_destroy(encoder);
}
@@ -1644,7 +1612,7 @@ vlv_dsi_get_panel_orientation(struct intel_connector *connector)
return intel_dsi_get_panel_orientation(connector);
}
-static void intel_dsi_add_properties(struct intel_connector *connector)
+static void vlv_dsi_add_properties(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -1847,6 +1815,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
struct drm_connector *connector;
struct drm_display_mode *current_mode, *fixed_mode;
enum port port;
+ enum pipe pipe;
DRM_DEBUG_KMS("\n");
@@ -1898,11 +1867,11 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
* port C. BXT isn't limited like this.
*/
if (IS_GEN9_LP(dev_priv))
- intel_encoder->crtc_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C);
+ intel_encoder->pipe_mask = ~0;
else if (port == PORT_A)
- intel_encoder->crtc_mask = BIT(PIPE_A);
+ intel_encoder->pipe_mask = BIT(PIPE_A);
else
- intel_encoder->crtc_mask = BIT(PIPE_B);
+ intel_encoder->pipe_mask = BIT(PIPE_B);
if (dev_priv->vbt.dsi.config->dual_link)
intel_dsi->ports = BIT(PORT_A) | BIT(PORT_C);
@@ -1945,20 +1914,8 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
vlv_dphy_param_init(intel_dsi);
- /*
- * In case of BYT with CRC PMIC, we need to use GPIO for
- * Panel control.
- */
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
- (dev_priv->vbt.dsi.config->pwm_blc == PPS_BLC_PMIC)) {
- intel_dsi->gpio_panel =
- gpiod_get(dev->dev, "panel", GPIOD_OUT_HIGH);
-
- if (IS_ERR(intel_dsi->gpio_panel)) {
- DRM_ERROR("Failed to own gpio for panel control\n");
- intel_dsi->gpio_panel = NULL;
- }
- }
+ intel_dsi_vbt_gpio_init(intel_dsi,
+ intel_dsi_get_hw_state(intel_encoder, &pipe));
drm_connector_init(dev, connector, &intel_dsi_connector_funcs,
DRM_MODE_CONNECTOR_DSI);
@@ -1983,7 +1940,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
intel_panel_setup_backlight(connector, INVALID_PIPE);
- intel_dsi_add_properties(intel_connector);
+ vlv_dsi_add_properties(intel_connector);
return;
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
index f016a776a39e..6b89e67b120f 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
@@ -28,7 +28,7 @@
#include <linux/kernel.h>
#include "i915_drv.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_dsi.h"
#include "intel_sideband.h"
@@ -117,7 +117,7 @@ int vlv_dsi_pll_compute(struct intel_encoder *encoder,
struct intel_crtc_state *config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
int ret;
u32 dsi_clk;
@@ -246,11 +246,8 @@ void bxt_dsi_pll_disable(struct intel_encoder *encoder)
* PLL lock should deassert within 200us.
* Wait up to 1ms before timing out.
*/
- if (intel_wait_for_register(&dev_priv->uncore,
- BXT_DSI_PLL_ENABLE,
- BXT_DSI_PLL_LOCKED,
- 0,
- 1))
+ if (intel_de_wait_for_clear(dev_priv, BXT_DSI_PLL_ENABLE,
+ BXT_DSI_PLL_LOCKED, 1))
DRM_ERROR("Timeout waiting for PLL lock deassertion\n");
}
@@ -258,7 +255,7 @@ u32 vlv_dsi_get_pclk(struct intel_encoder *encoder,
struct intel_crtc_state *config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
u32 dsi_clock, pclk;
u32 pll_ctl, pll_div;
@@ -324,7 +321,7 @@ u32 bxt_dsi_get_pclk(struct intel_encoder *encoder,
u32 pclk;
u32 dsi_clk;
u32 dsi_ratio;
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
@@ -344,7 +341,7 @@ void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
{
u32 temp;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
temp = I915_READ(MIPI_CTRL(port));
temp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
@@ -458,7 +455,7 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder,
struct intel_crtc_state *config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
u8 dsi_ratio, dsi_ratio_min, dsi_ratio_max;
u32 dsi_clk;
@@ -506,7 +503,7 @@ void bxt_dsi_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
u32 val;
@@ -530,11 +527,8 @@ void bxt_dsi_pll_enable(struct intel_encoder *encoder,
I915_WRITE(BXT_DSI_PLL_ENABLE, val);
/* Timeout and fail if PLL not locked */
- if (intel_wait_for_register(&dev_priv->uncore,
- BXT_DSI_PLL_ENABLE,
- BXT_DSI_PLL_LOCKED,
- BXT_DSI_PLL_LOCKED,
- 1)) {
+ if (intel_de_wait_for_set(dev_priv, BXT_DSI_PLL_ENABLE,
+ BXT_DSI_PLL_LOCKED, 1)) {
DRM_ERROR("Timed out waiting for DSI PLL to lock\n");
return;
}
diff --git a/drivers/gpu/drm/i915/gem/Makefile b/drivers/gpu/drm/i915/gem/Makefile
deleted file mode 100644
index 07e7b8b840ea..000000000000
--- a/drivers/gpu/drm/i915/gem/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-include $(src)/Makefile.header-test # Extra header tests
diff --git a/drivers/gpu/drm/i915/gem/Makefile.header-test b/drivers/gpu/drm/i915/gem/Makefile.header-test
deleted file mode 100644
index 61e06cbb4b32..000000000000
--- a/drivers/gpu/drm/i915/gem/Makefile.header-test
+++ /dev/null
@@ -1,16 +0,0 @@
-# SPDX-License-Identifier: MIT
-# Copyright © 2019 Intel Corporation
-
-# Test the headers are compilable as standalone units
-header_test := $(notdir $(wildcard $(src)/*.h))
-
-quiet_cmd_header_test = HDRTEST $@
- cmd_header_test = echo "\#include \"$(<F)\"" > $@
-
-header_test_%.c: %.h
- $(call cmd,header_test)
-
-extra-$(CONFIG_DRM_I915_WERROR) += \
- $(foreach h,$(header_test),$(patsubst %.h,header_test_%.o,$(h)))
-
-clean-files += $(foreach h,$(header_test),$(patsubst %.h,header_test_%.c,$(h)))
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
index 6ad93a09968c..25235ef630c1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
@@ -9,16 +9,16 @@
#include "i915_gem_ioctls.h"
#include "i915_gem_object.h"
-static __always_inline u32 __busy_read_flag(u8 id)
+static __always_inline u32 __busy_read_flag(u16 id)
{
- if (id == (u8)I915_ENGINE_CLASS_INVALID)
+ if (id == (u16)I915_ENGINE_CLASS_INVALID)
return 0xffff0000u;
GEM_BUG_ON(id >= 16);
return 0x10000u << id;
}
-static __always_inline u32 __busy_write_id(u8 id)
+static __always_inline u32 __busy_write_id(u16 id)
{
/*
* The uABI guarantees an active writer is also amongst the read
@@ -29,14 +29,14 @@ static __always_inline u32 __busy_write_id(u8 id)
* last_read - hence we always set both read and write busy for
* last_write.
*/
- if (id == (u8)I915_ENGINE_CLASS_INVALID)
+ if (id == (u16)I915_ENGINE_CLASS_INVALID)
return 0xffffffffu;
return (id + 1) | __busy_read_flag(id);
}
static __always_inline unsigned int
-__busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u8 id))
+__busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u16 id))
{
const struct i915_request *rq;
@@ -57,7 +57,7 @@ __busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u8 id))
return 0;
/* Beware type-expansion follies! */
- BUILD_BUG_ON(!typecheck(u8, rq->engine->uabi_class));
+ BUILD_BUG_ON(!typecheck(u16, rq->engine->uabi_class));
return flag(rq->engine->uabi_class);
}
@@ -82,7 +82,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_busy *args = data;
struct drm_i915_gem_object *obj;
- struct reservation_object_list *list;
+ struct dma_resv_list *list;
unsigned int seq;
int err;
@@ -105,7 +105,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
* Alternatively, we can trade that extra information on read/write
* activity with
* args->busy =
- * !reservation_object_test_signaled_rcu(obj->resv, true);
+ * !dma_resv_test_signaled_rcu(obj->resv, true);
* to report the overall busyness. This is what the wait-ioctl does.
*
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
index 5295285d5843..34be4c0ee7c5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
@@ -8,87 +8,65 @@
#include "i915_drv.h"
#include "i915_gem_clflush.h"
-
-static DEFINE_SPINLOCK(clflush_lock);
+#include "i915_sw_fence_work.h"
+#include "i915_trace.h"
struct clflush {
- struct dma_fence dma; /* Must be first for dma_fence_free() */
- struct i915_sw_fence wait;
- struct work_struct work;
+ struct dma_fence_work base;
struct drm_i915_gem_object *obj;
};
-static const char *i915_clflush_get_driver_name(struct dma_fence *fence)
-{
- return DRIVER_NAME;
-}
-
-static const char *i915_clflush_get_timeline_name(struct dma_fence *fence)
-{
- return "clflush";
-}
-
-static void i915_clflush_release(struct dma_fence *fence)
-{
- struct clflush *clflush = container_of(fence, typeof(*clflush), dma);
-
- i915_sw_fence_fini(&clflush->wait);
-
- BUILD_BUG_ON(offsetof(typeof(*clflush), dma));
- dma_fence_free(&clflush->dma);
-}
-
-static const struct dma_fence_ops i915_clflush_ops = {
- .get_driver_name = i915_clflush_get_driver_name,
- .get_timeline_name = i915_clflush_get_timeline_name,
- .release = i915_clflush_release,
-};
-
-static void __i915_do_clflush(struct drm_i915_gem_object *obj)
+static void __do_clflush(struct drm_i915_gem_object *obj)
{
GEM_BUG_ON(!i915_gem_object_has_pages(obj));
drm_clflush_sg(obj->mm.pages);
- intel_fb_obj_flush(obj, ORIGIN_CPU);
+
+ i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
}
-static void i915_clflush_work(struct work_struct *work)
+static int clflush_work(struct dma_fence_work *base)
{
- struct clflush *clflush = container_of(work, typeof(*clflush), work);
+ struct clflush *clflush = container_of(base, typeof(*clflush), base);
struct drm_i915_gem_object *obj = clflush->obj;
+ int err;
- if (i915_gem_object_pin_pages(obj)) {
- DRM_ERROR("Failed to acquire obj->pages for clflushing\n");
- goto out;
- }
-
- __i915_do_clflush(obj);
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ return err;
+ __do_clflush(obj);
i915_gem_object_unpin_pages(obj);
-out:
- i915_gem_object_put(obj);
+ return 0;
+}
+
+static void clflush_release(struct dma_fence_work *base)
+{
+ struct clflush *clflush = container_of(base, typeof(*clflush), base);
- dma_fence_signal(&clflush->dma);
- dma_fence_put(&clflush->dma);
+ i915_gem_object_put(clflush->obj);
}
-static int __i915_sw_fence_call
-i915_clflush_notify(struct i915_sw_fence *fence,
- enum i915_sw_fence_notify state)
+static const struct dma_fence_work_ops clflush_ops = {
+ .name = "clflush",
+ .work = clflush_work,
+ .release = clflush_release,
+};
+
+static struct clflush *clflush_work_create(struct drm_i915_gem_object *obj)
{
- struct clflush *clflush = container_of(fence, typeof(*clflush), wait);
+ struct clflush *clflush;
- switch (state) {
- case FENCE_COMPLETE:
- schedule_work(&clflush->work);
- break;
+ GEM_BUG_ON(!obj->cache_dirty);
- case FENCE_FREE:
- dma_fence_put(&clflush->dma);
- break;
- }
+ clflush = kmalloc(sizeof(*clflush), GFP_KERNEL);
+ if (!clflush)
+ return NULL;
- return NOTIFY_DONE;
+ dma_fence_work_init(&clflush->base, &clflush_ops);
+ clflush->obj = i915_gem_object_get(obj); /* obj <-> clflush cycle */
+
+ return clflush;
}
bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
@@ -126,33 +104,16 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
clflush = NULL;
if (!(flags & I915_CLFLUSH_SYNC))
- clflush = kmalloc(sizeof(*clflush), GFP_KERNEL);
+ clflush = clflush_work_create(obj);
if (clflush) {
- GEM_BUG_ON(!obj->cache_dirty);
-
- dma_fence_init(&clflush->dma,
- &i915_clflush_ops,
- &clflush_lock,
- to_i915(obj->base.dev)->mm.unordered_timeline,
- 0);
- i915_sw_fence_init(&clflush->wait, i915_clflush_notify);
-
- clflush->obj = i915_gem_object_get(obj);
- INIT_WORK(&clflush->work, i915_clflush_work);
-
- dma_fence_get(&clflush->dma);
-
- i915_sw_fence_await_reservation(&clflush->wait,
- obj->base.resv, NULL,
- true, I915_FENCE_TIMEOUT,
+ i915_sw_fence_await_reservation(&clflush->base.chain,
+ obj->base.resv, NULL, true,
+ I915_FENCE_TIMEOUT,
I915_FENCE_GFP);
-
- reservation_object_add_excl_fence(obj->base.resv,
- &clflush->dma);
-
- i915_sw_fence_commit(&clflush->wait);
+ dma_resv_add_excl_fence(obj->base.resv, &clflush->base.dma);
+ dma_fence_work_commit(&clflush->base);
} else if (obj->mm.pages) {
- __i915_do_clflush(obj);
+ __do_clflush(obj);
} else {
GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
index 1fdab0767a47..81366aa4812b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
@@ -2,10 +2,13 @@
/*
* Copyright © 2019 Intel Corporation
*/
-#include "i915_gem_client_blt.h"
+#include "i915_drv.h"
+#include "gt/intel_context.h"
+#include "gt/intel_engine_pm.h"
+#include "gt/intel_engine_pool.h"
+#include "i915_gem_client_blt.h"
#include "i915_gem_object_blt.h"
-#include "intel_drv.h"
struct i915_sleeve {
struct i915_vma *vma;
@@ -72,7 +75,6 @@ static struct i915_sleeve *create_sleeve(struct i915_address_space *vm,
vma->ops = &proxy_vma_ops;
sleeve->vma = vma;
- sleeve->obj = i915_gem_object_get(obj);
sleeve->pages = pages;
sleeve->page_sizes = *page_sizes;
@@ -85,7 +87,6 @@ err_free:
static void destroy_sleeve(struct i915_sleeve *sleeve)
{
- i915_gem_object_put(sleeve->obj);
kfree(sleeve);
}
@@ -154,32 +155,37 @@ static void clear_pages_dma_fence_cb(struct dma_fence *fence,
static void clear_pages_worker(struct work_struct *work)
{
struct clear_pages_work *w = container_of(work, typeof(*w), work);
- struct drm_i915_private *i915 = w->ce->gem_context->i915;
- struct drm_i915_gem_object *obj = w->sleeve->obj;
+ struct drm_i915_gem_object *obj = w->sleeve->vma->obj;
struct i915_vma *vma = w->sleeve->vma;
struct i915_request *rq;
+ struct i915_vma *batch;
int err = w->dma.error;
if (unlikely(err))
goto out_signal;
if (obj->cache_dirty) {
- obj->write_domain = 0;
if (i915_gem_object_has_struct_page(obj))
drm_clflush_sg(w->sleeve->pages);
obj->cache_dirty = false;
}
+ obj->read_domains = I915_GEM_GPU_DOMAINS;
+ obj->write_domain = 0;
- /* XXX: we need to kill this */
- mutex_lock(&i915->drm.struct_mutex);
err = i915_vma_pin(vma, 0, 0, PIN_USER);
if (unlikely(err))
- goto out_unlock;
+ goto out_signal;
+
+ batch = intel_emit_vma_fill_blt(w->ce, vma, w->value);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ goto out_unpin;
+ }
- rq = i915_request_create(w->ce);
+ rq = intel_context_create_request(w->ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- goto out_unpin;
+ goto out_batch;
}
/* There's no way the fence has signalled */
@@ -187,20 +193,28 @@ static void clear_pages_worker(struct work_struct *work)
clear_pages_dma_fence_cb))
GEM_BUG_ON(1);
+ err = intel_emit_vma_mark_active(batch, rq);
+ if (unlikely(err))
+ goto out_request;
+
if (w->ce->engine->emit_init_breadcrumb) {
err = w->ce->engine->emit_init_breadcrumb(rq);
if (unlikely(err))
goto out_request;
}
- /* XXX: more feverish nightmares await */
- i915_vma_lock(vma);
- err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
- i915_vma_unlock(vma);
+ /*
+ * w->dma is already exported via (vma|obj)->resv we need only
+ * keep track of the GPU activity within this vma/request, and
+ * propagate the signal from the request to w->dma.
+ */
+ err = __i915_vma_move_to_active(vma, rq);
if (err)
goto out_request;
- err = intel_emit_vma_fill_blt(rq, vma, w->value);
+ err = w->ce->engine->emit_bb_start(rq,
+ batch->node.start, batch->node.size,
+ 0);
out_request:
if (unlikely(err)) {
i915_request_skip(rq, err);
@@ -208,10 +222,10 @@ out_request:
}
i915_request_add(rq);
+out_batch:
+ intel_emit_vma_release(w->ce, batch);
out_unpin:
i915_vma_unpin(vma);
-out_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
out_signal:
if (unlikely(err)) {
dma_fence_set_error(&w->dma, err);
@@ -248,14 +262,11 @@ int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object *obj,
struct i915_page_sizes *page_sizes,
u32 value)
{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_gem_context *ctx = ce->gem_context;
- struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
struct clear_pages_work *work;
struct i915_sleeve *sleeve;
int err;
- sleeve = create_sleeve(vm, obj, pages, page_sizes);
+ sleeve = create_sleeve(ce->vm, obj, pages, page_sizes);
if (IS_ERR(sleeve))
return PTR_ERR(sleeve);
@@ -273,11 +284,7 @@ int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object *obj,
init_irq_work(&work->irq_work, clear_pages_signal_irq_worker);
- dma_fence_init(&work->dma,
- &clear_pages_work_ops,
- &fence_lock,
- i915->mm.unordered_timeline,
- 0);
+ dma_fence_init(&work->dma, &clear_pages_work_ops, &fence_lock, 0, 0);
i915_sw_fence_init(&work->wait, clear_pages_work_notify);
i915_gem_object_lock(obj);
@@ -288,7 +295,7 @@ int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object *obj,
if (err < 0) {
dma_fence_set_error(&work->dma, err);
} else {
- reservation_object_add_excl_fence(obj->base.resv, &work->dma);
+ dma_resv_add_excl_fence(obj->base.resv, &work->dma);
err = 0;
}
i915_gem_object_unlock(obj);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 0f2c22a3bcb6..a2e57e62af30 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -69,7 +69,13 @@
#include <drm/i915_drm.h>
+#include "gt/gen6_ppgtt.h"
+#include "gt/intel_context.h"
+#include "gt/intel_engine_heartbeat.h"
+#include "gt/intel_engine_pm.h"
+#include "gt/intel_engine_user.h"
#include "gt/intel_lrc_reg.h"
+#include "gt/intel_ring.h"
#include "i915_gem_context.h"
#include "i915_globals.h"
@@ -158,7 +164,7 @@ lookup_user_engine(struct i915_gem_context *ctx,
if (!engine)
return ERR_PTR(-EINVAL);
- idx = engine->id;
+ idx = engine->legacy_idx;
} else {
idx = ci->engine_instance;
}
@@ -166,93 +172,71 @@ lookup_user_engine(struct i915_gem_context *ctx,
return i915_gem_context_get_engine(ctx, idx);
}
-static inline int new_hw_id(struct drm_i915_private *i915, gfp_t gfp)
+static struct i915_address_space *
+context_get_vm_rcu(struct i915_gem_context *ctx)
{
- unsigned int max;
+ GEM_BUG_ON(!rcu_access_pointer(ctx->vm));
- lockdep_assert_held(&i915->contexts.mutex);
+ do {
+ struct i915_address_space *vm;
- if (INTEL_GEN(i915) >= 11)
- max = GEN11_MAX_CONTEXT_HW_ID;
- else if (USES_GUC_SUBMISSION(i915))
/*
- * When using GuC in proxy submission, GuC consumes the
- * highest bit in the context id to indicate proxy submission.
+ * We do not allow downgrading from full-ppgtt [to a shared
+ * global gtt], so ctx->vm cannot become NULL.
*/
- max = MAX_GUC_CONTEXT_HW_ID;
- else
- max = MAX_CONTEXT_HW_ID;
-
- return ida_simple_get(&i915->contexts.hw_ida, 0, max, gfp);
-}
-
-static int steal_hw_id(struct drm_i915_private *i915)
-{
- struct i915_gem_context *ctx, *cn;
- LIST_HEAD(pinned);
- int id = -ENOSPC;
-
- lockdep_assert_held(&i915->contexts.mutex);
-
- list_for_each_entry_safe(ctx, cn,
- &i915->contexts.hw_id_list, hw_id_link) {
- if (atomic_read(&ctx->hw_id_pin_count)) {
- list_move_tail(&ctx->hw_id_link, &pinned);
+ vm = rcu_dereference(ctx->vm);
+ if (!kref_get_unless_zero(&vm->ref))
continue;
- }
- GEM_BUG_ON(!ctx->hw_id); /* perma-pinned kernel context */
- list_del_init(&ctx->hw_id_link);
- id = ctx->hw_id;
- break;
- }
+ /*
+ * This ppgtt may have be reallocated between
+ * the read and the kref, and reassigned to a third
+ * context. In order to avoid inadvertent sharing
+ * of this ppgtt with that third context (and not
+ * src), we have to confirm that we have the same
+ * ppgtt after passing through the strong memory
+ * barrier implied by a successful
+ * kref_get_unless_zero().
+ *
+ * Once we have acquired the current ppgtt of ctx,
+ * we no longer care if it is released from ctx, as
+ * it cannot be reallocated elsewhere.
+ */
- /*
- * Remember how far we got up on the last repossesion scan, so the
- * list is kept in a "least recently scanned" order.
- */
- list_splice_tail(&pinned, &i915->contexts.hw_id_list);
- return id;
+ if (vm == rcu_access_pointer(ctx->vm))
+ return rcu_pointer_handoff(vm);
+
+ i915_vm_put(vm);
+ } while (1);
}
-static int assign_hw_id(struct drm_i915_private *i915, unsigned int *out)
+static void intel_context_set_gem(struct intel_context *ce,
+ struct i915_gem_context *ctx)
{
- int ret;
+ GEM_BUG_ON(rcu_access_pointer(ce->gem_context));
+ RCU_INIT_POINTER(ce->gem_context, ctx);
- lockdep_assert_held(&i915->contexts.mutex);
+ if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))
+ ce->ring = __intel_context_ring_size(SZ_16K);
- /*
- * We prefer to steal/stall ourselves and our users over that of the
- * entire system. That may be a little unfair to our users, and
- * even hurt high priority clients. The choice is whether to oomkill
- * something else, or steal a context id.
- */
- ret = new_hw_id(i915, GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
- if (unlikely(ret < 0)) {
- ret = steal_hw_id(i915);
- if (ret < 0) /* once again for the correct errno code */
- ret = new_hw_id(i915, GFP_KERNEL);
- if (ret < 0)
- return ret;
- }
+ if (rcu_access_pointer(ctx->vm)) {
+ struct i915_address_space *vm;
- *out = ret;
- return 0;
-}
+ rcu_read_lock();
+ vm = context_get_vm_rcu(ctx); /* hmm */
+ rcu_read_unlock();
-static void release_hw_id(struct i915_gem_context *ctx)
-{
- struct drm_i915_private *i915 = ctx->i915;
+ i915_vm_put(ce->vm);
+ ce->vm = vm;
+ }
- if (list_empty(&ctx->hw_id_link))
- return;
+ GEM_BUG_ON(ce->timeline);
+ if (ctx->timeline)
+ ce->timeline = intel_timeline_get(ctx->timeline);
- mutex_lock(&i915->contexts.mutex);
- if (!list_empty(&ctx->hw_id_link)) {
- ida_simple_remove(&i915->contexts.hw_ida, ctx->hw_id);
- list_del_init(&ctx->hw_id_link);
- }
- mutex_unlock(&i915->contexts.mutex);
+ if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
+ intel_engine_has_semaphores(ce->engine))
+ __set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
}
static void __free_engines(struct i915_gem_engines *e, unsigned int count)
@@ -261,6 +245,7 @@ static void __free_engines(struct i915_gem_engines *e, unsigned int count)
if (!e->engines[count])
continue;
+ RCU_INIT_POINTER(e->engines[count]->gem_context, NULL);
intel_context_put(e->engines[count]);
}
kfree(e);
@@ -278,6 +263,7 @@ static void free_engines_rcu(struct rcu_head *rcu)
static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
{
+ const struct intel_gt *gt = &ctx->i915->gt;
struct intel_engine_cs *engine;
struct i915_gem_engines *e;
enum intel_engine_id id;
@@ -287,104 +273,254 @@ static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
return ERR_PTR(-ENOMEM);
init_rcu_head(&e->rcu);
- for_each_engine(engine, ctx->i915, id) {
+ for_each_engine(engine, gt, id) {
struct intel_context *ce;
- ce = intel_context_create(ctx, engine);
+ if (engine->legacy_idx == INVALID_ENGINE)
+ continue;
+
+ GEM_BUG_ON(engine->legacy_idx >= I915_NUM_ENGINES);
+ GEM_BUG_ON(e->engines[engine->legacy_idx]);
+
+ ce = intel_context_create(engine);
if (IS_ERR(ce)) {
- __free_engines(e, id);
+ __free_engines(e, e->num_engines + 1);
return ERR_CAST(ce);
}
- e->engines[id] = ce;
+ intel_context_set_gem(ce, ctx);
+
+ e->engines[engine->legacy_idx] = ce;
+ e->num_engines = max(e->num_engines, engine->legacy_idx);
}
- e->num_engines = id;
+ e->num_engines++;
return e;
}
static void i915_gem_context_free(struct i915_gem_context *ctx)
{
- lockdep_assert_held(&ctx->i915->drm.struct_mutex);
GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
- release_hw_id(ctx);
- if (ctx->vm)
- i915_vm_put(ctx->vm);
+ spin_lock(&ctx->i915->gem.contexts.lock);
+ list_del(&ctx->link);
+ spin_unlock(&ctx->i915->gem.contexts.lock);
free_engines(rcu_access_pointer(ctx->engines));
mutex_destroy(&ctx->engines_mutex);
if (ctx->timeline)
- i915_timeline_put(ctx->timeline);
+ intel_timeline_put(ctx->timeline);
- kfree(ctx->name);
put_pid(ctx->pid);
-
- list_del(&ctx->link);
mutex_destroy(&ctx->mutex);
kfree_rcu(ctx, rcu);
}
-static void contexts_free(struct drm_i915_private *i915)
+static void contexts_free_all(struct llist_node *list)
{
- struct llist_node *freed = llist_del_all(&i915->contexts.free_list);
struct i915_gem_context *ctx, *cn;
- lockdep_assert_held(&i915->drm.struct_mutex);
-
- llist_for_each_entry_safe(ctx, cn, freed, free_link)
+ llist_for_each_entry_safe(ctx, cn, list, free_link)
i915_gem_context_free(ctx);
}
-static void contexts_free_first(struct drm_i915_private *i915)
+static void contexts_flush_free(struct i915_gem_contexts *gc)
{
- struct i915_gem_context *ctx;
- struct llist_node *freed;
-
- lockdep_assert_held(&i915->drm.struct_mutex);
-
- freed = llist_del_first(&i915->contexts.free_list);
- if (!freed)
- return;
-
- ctx = container_of(freed, typeof(*ctx), free_link);
- i915_gem_context_free(ctx);
+ contexts_free_all(llist_del_all(&gc->free_list));
}
static void contexts_free_worker(struct work_struct *work)
{
- struct drm_i915_private *i915 =
- container_of(work, typeof(*i915), contexts.free_work);
+ struct i915_gem_contexts *gc =
+ container_of(work, typeof(*gc), free_work);
- mutex_lock(&i915->drm.struct_mutex);
- contexts_free(i915);
- mutex_unlock(&i915->drm.struct_mutex);
+ contexts_flush_free(gc);
}
void i915_gem_context_release(struct kref *ref)
{
struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
- struct drm_i915_private *i915 = ctx->i915;
+ struct i915_gem_contexts *gc = &ctx->i915->gem.contexts;
trace_i915_context_free(ctx);
- if (llist_add(&ctx->free_link, &i915->contexts.free_list))
- queue_work(i915->wq, &i915->contexts.free_work);
+ if (llist_add(&ctx->free_link, &gc->free_list))
+ schedule_work(&gc->free_work);
}
-static void context_close(struct i915_gem_context *ctx)
+static inline struct i915_gem_engines *
+__context_engines_static(const struct i915_gem_context *ctx)
{
- mutex_lock(&ctx->mutex);
+ return rcu_dereference_protected(ctx->engines, true);
+}
- i915_gem_context_set_closed(ctx);
- ctx->file_priv = ERR_PTR(-EBADF);
+static bool __reset_engine(struct intel_engine_cs *engine)
+{
+ struct intel_gt *gt = engine->gt;
+ bool success = false;
+
+ if (!intel_has_reset_engine(gt))
+ return false;
+
+ if (!test_and_set_bit(I915_RESET_ENGINE + engine->id,
+ &gt->reset.flags)) {
+ success = intel_engine_reset(engine, NULL) == 0;
+ clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id,
+ &gt->reset.flags);
+ }
+
+ return success;
+}
+
+static void __reset_context(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
+{
+ intel_gt_handle_error(engine->gt, engine->mask, 0,
+ "context closure in %s", ctx->name);
+}
+
+static bool __cancel_engine(struct intel_engine_cs *engine)
+{
+ /*
+ * Send a "high priority pulse" down the engine to cause the
+ * current request to be momentarily preempted. (If it fails to
+ * be preempted, it will be reset). As we have marked our context
+ * as banned, any incomplete request, including any running, will
+ * be skipped following the preemption.
+ *
+ * If there is no hangchecking (one of the reasons why we try to
+ * cancel the context) and no forced preemption, there may be no
+ * means by which we reset the GPU and evict the persistent hog.
+ * Ergo if we are unable to inject a preemptive pulse that can
+ * kill the banned context, we fallback to doing a local reset
+ * instead.
+ */
+ if (IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT) &&
+ !intel_engine_pulse(engine))
+ return true;
+
+ /* If we are unable to send a pulse, try resetting this engine. */
+ return __reset_engine(engine);
+}
+
+static struct intel_engine_cs *__active_engine(struct i915_request *rq)
+{
+ struct intel_engine_cs *engine, *locked;
/*
- * This context will never again be assinged to HW, so we can
- * reuse its ID for the next context.
+ * Serialise with __i915_request_submit() so that it sees
+ * is-banned?, or we know the request is already inflight.
*/
- release_hw_id(ctx);
+ locked = READ_ONCE(rq->engine);
+ spin_lock_irq(&locked->active.lock);
+ while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
+ spin_unlock(&locked->active.lock);
+ spin_lock(&engine->active.lock);
+ locked = engine;
+ }
+
+ engine = NULL;
+ if (i915_request_is_active(rq) && !rq->fence.error)
+ engine = rq->engine;
+
+ spin_unlock_irq(&locked->active.lock);
+
+ return engine;
+}
+
+static struct intel_engine_cs *active_engine(struct intel_context *ce)
+{
+ struct intel_engine_cs *engine = NULL;
+ struct i915_request *rq;
+
+ if (!ce->timeline)
+ return NULL;
+
+ mutex_lock(&ce->timeline->mutex);
+ list_for_each_entry_reverse(rq, &ce->timeline->requests, link) {
+ if (i915_request_completed(rq))
+ break;
+
+ /* Check with the backend if the request is inflight */
+ engine = __active_engine(rq);
+ if (engine)
+ break;
+ }
+ mutex_unlock(&ce->timeline->mutex);
+
+ return engine;
+}
+
+static void kill_context(struct i915_gem_context *ctx)
+{
+ struct i915_gem_engines_iter it;
+ struct intel_context *ce;
+
+ /*
+ * Map the user's engine back to the actual engines; one virtual
+ * engine will be mapped to multiple engines, and using ctx->engine[]
+ * the same engine may be have multiple instances in the user's map.
+ * However, we only care about pending requests, so only include
+ * engines on which there are incomplete requests.
+ */
+ for_each_gem_engine(ce, __context_engines_static(ctx), it) {
+ struct intel_engine_cs *engine;
+
+ if (intel_context_set_banned(ce))
+ continue;
+
+ /*
+ * Check the current active state of this context; if we
+ * are currently executing on the GPU we need to evict
+ * ourselves. On the other hand, if we haven't yet been
+ * submitted to the GPU or if everything is complete,
+ * we have nothing to do.
+ */
+ engine = active_engine(ce);
+
+ /* First attempt to gracefully cancel the context */
+ if (engine && !__cancel_engine(engine))
+ /*
+ * If we are unable to send a preemptive pulse to bump
+ * the context from the GPU, we have to resort to a full
+ * reset. We hope the collateral damage is worth it.
+ */
+ __reset_context(ctx, engine);
+ }
+}
+
+static void set_closed_name(struct i915_gem_context *ctx)
+{
+ char *s;
+
+ /* Replace '[]' with '<>' to indicate closed in debug prints */
+
+ s = strrchr(ctx->name, '[');
+ if (!s)
+ return;
+
+ *s = '<';
+
+ s = strchr(s + 1, ']');
+ if (s)
+ *s = '>';
+}
+
+static void context_close(struct i915_gem_context *ctx)
+{
+ struct i915_address_space *vm;
+
+ i915_gem_context_set_closed(ctx);
+ set_closed_name(ctx);
+
+ mutex_lock(&ctx->mutex);
+
+ vm = i915_gem_context_vm(ctx);
+ if (vm)
+ i915_vm_close(vm);
+
+ ctx->file_priv = ERR_PTR(-EBADF);
/*
* The LUT uses the VMA as a backpointer to unref the object,
@@ -394,31 +530,45 @@ static void context_close(struct i915_gem_context *ctx)
lut_close(ctx);
mutex_unlock(&ctx->mutex);
+
+ /*
+ * If the user has disabled hangchecking, we can not be sure that
+ * the batches will ever complete after the context is closed,
+ * keeping the context and all resources pinned forever. So in this
+ * case we opt to forcibly kill off all remaining requests on
+ * context close.
+ */
+ if (!i915_gem_context_is_persistent(ctx) ||
+ !i915_modparams.enable_hangcheck)
+ kill_context(ctx);
+
i915_gem_context_put(ctx);
}
-static u32 default_desc_template(const struct drm_i915_private *i915,
- const struct i915_address_space *vm)
+static int __context_set_persistence(struct i915_gem_context *ctx, bool state)
{
- u32 address_mode;
- u32 desc;
-
- desc = GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
+ if (i915_gem_context_is_persistent(ctx) == state)
+ return 0;
- address_mode = INTEL_LEGACY_32B_CONTEXT;
- if (vm && i915_vm_is_4lvl(vm))
- address_mode = INTEL_LEGACY_64B_CONTEXT;
- desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT;
+ if (state) {
+ /*
+ * Only contexts that are short-lived [that will expire or be
+ * reset] are allowed to survive past termination. We require
+ * hangcheck to ensure that the persistent requests are healthy.
+ */
+ if (!i915_modparams.enable_hangcheck)
+ return -EINVAL;
- if (IS_GEN(i915, 8))
- desc |= GEN8_CTX_L3LLC_COHERENT;
+ i915_gem_context_set_persistence(ctx);
+ } else {
+ /* To cancel a context we use "preempt-to-idle" */
+ if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
+ return -ENODEV;
- /* TODO: WaDisableLiteRestore when we start using semaphore
- * signalling between Command Streamers
- * ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
- */
+ i915_gem_context_clear_persistence(ctx);
+ }
- return desc;
+ return 0;
}
static struct i915_gem_context *
@@ -434,7 +584,6 @@ __create_context(struct drm_i915_private *i915)
return ERR_PTR(-ENOMEM);
kref_init(&ctx->ref);
- list_add_tail(&ctx->link, &i915->contexts.list);
ctx->i915 = i915;
ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
mutex_init(&ctx->mutex);
@@ -448,7 +597,6 @@ __create_context(struct drm_i915_private *i915)
RCU_INIT_POINTER(ctx->engines, e);
INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
- INIT_LIST_HEAD(&ctx->hw_id_link);
/* NB: Mark all slices as needing a remap so that when the context first
* loads it will restore whatever remap state already exists. If there
@@ -457,14 +605,15 @@ __create_context(struct drm_i915_private *i915)
i915_gem_context_set_bannable(ctx);
i915_gem_context_set_recoverable(ctx);
-
- ctx->ring_size = 4 * PAGE_SIZE;
- ctx->desc_template =
- default_desc_template(i915, &i915->mm.aliasing_ppgtt->vm);
+ __context_set_persistence(ctx, true /* cgroup hook? */);
for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
+ spin_lock(&i915->gem.contexts.lock);
+ list_add_tail(&ctx->link, &i915->gem.contexts.list);
+ spin_unlock(&i915->gem.contexts.lock);
+
return ctx;
err_free:
@@ -472,13 +621,34 @@ err_free:
return ERR_PTR(err);
}
+static void
+context_apply_all(struct i915_gem_context *ctx,
+ void (*fn)(struct intel_context *ce, void *data),
+ void *data)
+{
+ struct i915_gem_engines_iter it;
+ struct intel_context *ce;
+
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it)
+ fn(ce, data);
+ i915_gem_context_unlock_engines(ctx);
+}
+
+static void __apply_ppgtt(struct intel_context *ce, void *vm)
+{
+ i915_vm_put(ce->vm);
+ ce->vm = i915_vm_get(vm);
+}
+
static struct i915_address_space *
__set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm)
{
- struct i915_address_space *old = ctx->vm;
+ struct i915_address_space *old = i915_gem_context_vm(ctx);
+
+ GEM_BUG_ON(old && i915_vm_is_4lvl(vm) != i915_vm_is_4lvl(old));
- ctx->vm = i915_vm_get(vm);
- ctx->desc_template = default_desc_template(ctx->i915, vm);
+ rcu_assign_pointer(ctx->vm, i915_vm_open(vm));
+ context_apply_all(ctx, __apply_ppgtt, vm);
return old;
}
@@ -486,36 +656,57 @@ __set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm)
static void __assign_ppgtt(struct i915_gem_context *ctx,
struct i915_address_space *vm)
{
- if (vm == ctx->vm)
+ if (vm == rcu_access_pointer(ctx->vm))
return;
vm = __set_ppgtt(ctx, vm);
if (vm)
- i915_vm_put(vm);
+ i915_vm_close(vm);
+}
+
+static void __set_timeline(struct intel_timeline **dst,
+ struct intel_timeline *src)
+{
+ struct intel_timeline *old = *dst;
+
+ *dst = src ? intel_timeline_get(src) : NULL;
+
+ if (old)
+ intel_timeline_put(old);
+}
+
+static void __apply_timeline(struct intel_context *ce, void *timeline)
+{
+ __set_timeline(&ce->timeline, timeline);
+}
+
+static void __assign_timeline(struct i915_gem_context *ctx,
+ struct intel_timeline *timeline)
+{
+ __set_timeline(&ctx->timeline, timeline);
+ context_apply_all(ctx, __apply_timeline, timeline);
}
static struct i915_gem_context *
-i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
+i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags)
{
struct i915_gem_context *ctx;
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
-
if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE &&
- !HAS_EXECLISTS(dev_priv))
+ !HAS_EXECLISTS(i915))
return ERR_PTR(-EINVAL);
- /* Reap the most stale context */
- contexts_free_first(dev_priv);
+ /* Reap the stale contexts */
+ contexts_flush_free(&i915->gem.contexts);
- ctx = __create_context(dev_priv);
+ ctx = __create_context(i915);
if (IS_ERR(ctx))
return ctx;
- if (HAS_FULL_PPGTT(dev_priv)) {
+ if (HAS_FULL_PPGTT(i915)) {
struct i915_ppgtt *ppgtt;
- ppgtt = i915_ppgtt_create(dev_priv);
+ ppgtt = i915_ppgtt_create(&i915->gt);
if (IS_ERR(ppgtt)) {
DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
PTR_ERR(ppgtt));
@@ -523,20 +714,24 @@ i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
return ERR_CAST(ppgtt);
}
+ mutex_lock(&ctx->mutex);
__assign_ppgtt(ctx, &ppgtt->vm);
+ mutex_unlock(&ctx->mutex);
+
i915_vm_put(&ppgtt->vm);
}
if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
- struct i915_timeline *timeline;
+ struct intel_timeline *timeline;
- timeline = i915_timeline_create(dev_priv, NULL);
+ timeline = intel_timeline_create(&i915->gt, NULL);
if (IS_ERR(timeline)) {
context_close(ctx);
return ERR_CAST(timeline);
}
- ctx->timeline = timeline;
+ __assign_timeline(ctx, timeline);
+ intel_timeline_put(timeline);
}
trace_i915_context_create(ctx);
@@ -544,171 +739,26 @@ i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
return ctx;
}
-/**
- * i915_gem_context_create_gvt - create a GVT GEM context
- * @dev: drm device *
- *
- * This function is used to create a GVT specific GEM context.
- *
- * Returns:
- * pointer to i915_gem_context on success, error pointer if failed
- *
- */
-struct i915_gem_context *
-i915_gem_context_create_gvt(struct drm_device *dev)
-{
- struct i915_gem_context *ctx;
- int ret;
-
- if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
- return ERR_PTR(-ENODEV);
-
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ERR_PTR(ret);
-
- ctx = i915_gem_create_context(to_i915(dev), 0);
- if (IS_ERR(ctx))
- goto out;
-
- ret = i915_gem_context_pin_hw_id(ctx);
- if (ret) {
- context_close(ctx);
- ctx = ERR_PTR(ret);
- goto out;
- }
-
- ctx->file_priv = ERR_PTR(-EBADF);
- i915_gem_context_set_closed(ctx); /* not user accessible */
- i915_gem_context_clear_bannable(ctx);
- i915_gem_context_set_force_single_submission(ctx);
- if (!USES_GUC_SUBMISSION(to_i915(dev)))
- ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */
-
- GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
-out:
- mutex_unlock(&dev->struct_mutex);
- return ctx;
-}
-
-static void
-destroy_kernel_context(struct i915_gem_context **ctxp)
-{
- struct i915_gem_context *ctx;
-
- /* Keep the context ref so that we can free it immediately ourselves */
- ctx = i915_gem_context_get(fetch_and_zero(ctxp));
- GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
-
- context_close(ctx);
- i915_gem_context_free(ctx);
-}
-
-struct i915_gem_context *
-i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
-{
- struct i915_gem_context *ctx;
- int err;
-
- ctx = i915_gem_create_context(i915, 0);
- if (IS_ERR(ctx))
- return ctx;
-
- err = i915_gem_context_pin_hw_id(ctx);
- if (err) {
- destroy_kernel_context(&ctx);
- return ERR_PTR(err);
- }
-
- i915_gem_context_clear_bannable(ctx);
- ctx->sched.priority = I915_USER_PRIORITY(prio);
- ctx->ring_size = PAGE_SIZE;
-
- GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
-
- return ctx;
-}
-
-static void init_contexts(struct drm_i915_private *i915)
+static void init_contexts(struct i915_gem_contexts *gc)
{
- mutex_init(&i915->contexts.mutex);
- INIT_LIST_HEAD(&i915->contexts.list);
+ spin_lock_init(&gc->lock);
+ INIT_LIST_HEAD(&gc->list);
- /* Using the simple ida interface, the max is limited by sizeof(int) */
- BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
- BUILD_BUG_ON(GEN11_MAX_CONTEXT_HW_ID > INT_MAX);
- ida_init(&i915->contexts.hw_ida);
- INIT_LIST_HEAD(&i915->contexts.hw_id_list);
-
- INIT_WORK(&i915->contexts.free_work, contexts_free_worker);
- init_llist_head(&i915->contexts.free_list);
+ INIT_WORK(&gc->free_work, contexts_free_worker);
+ init_llist_head(&gc->free_list);
}
-static bool needs_preempt_context(struct drm_i915_private *i915)
+void i915_gem_init__contexts(struct drm_i915_private *i915)
{
- return HAS_EXECLISTS(i915);
-}
-
-int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
-{
- struct i915_gem_context *ctx;
-
- /* Reassure ourselves we are only called once */
- GEM_BUG_ON(dev_priv->kernel_context);
- GEM_BUG_ON(dev_priv->preempt_context);
-
- intel_engine_init_ctx_wa(dev_priv->engine[RCS0]);
- init_contexts(dev_priv);
-
- /* lowest priority; idle task */
- ctx = i915_gem_context_create_kernel(dev_priv, I915_PRIORITY_MIN);
- if (IS_ERR(ctx)) {
- DRM_ERROR("Failed to create default global context\n");
- return PTR_ERR(ctx);
- }
- /*
- * For easy recognisablity, we want the kernel context to be 0 and then
- * all user contexts will have non-zero hw_id. Kernel contexts are
- * permanently pinned, so that we never suffer a stall and can
- * use them from any allocation context (e.g. for evicting other
- * contexts and from inside the shrinker).
- */
- GEM_BUG_ON(ctx->hw_id);
- GEM_BUG_ON(!atomic_read(&ctx->hw_id_pin_count));
- dev_priv->kernel_context = ctx;
-
- /* highest priority; preempting task */
- if (needs_preempt_context(dev_priv)) {
- ctx = i915_gem_context_create_kernel(dev_priv, INT_MAX);
- if (!IS_ERR(ctx))
- dev_priv->preempt_context = ctx;
- else
- DRM_ERROR("Failed to create preempt context; disabling preemption\n");
- }
-
+ init_contexts(&i915->gem.contexts);
DRM_DEBUG_DRIVER("%s context support initialized\n",
- DRIVER_CAPS(dev_priv)->has_logical_contexts ?
+ DRIVER_CAPS(i915)->has_logical_contexts ?
"logical" : "fake");
- return 0;
}
-void i915_gem_contexts_fini(struct drm_i915_private *i915)
+void i915_gem_driver_release__contexts(struct drm_i915_private *i915)
{
- lockdep_assert_held(&i915->drm.struct_mutex);
-
- if (i915->preempt_context)
- destroy_kernel_context(&i915->preempt_context);
- destroy_kernel_context(&i915->kernel_context);
-
- /* Must free all deferred contexts (via flush_workqueue) first */
- GEM_BUG_ON(!list_empty(&i915->contexts.hw_id_list));
- ida_destroy(&i915->contexts.hw_ida);
-}
-
-static int context_idr_cleanup(int id, void *p, void *data)
-{
- context_close(p);
- return 0;
+ flush_work(&i915->gem.contexts.free_work);
}
static int vm_idr_cleanup(int id, void *p, void *data)
@@ -718,33 +768,29 @@ static int vm_idr_cleanup(int id, void *p, void *data)
}
static int gem_context_register(struct i915_gem_context *ctx,
- struct drm_i915_file_private *fpriv)
+ struct drm_i915_file_private *fpriv,
+ u32 *id)
{
+ struct i915_address_space *vm;
int ret;
ctx->file_priv = fpriv;
- if (ctx->vm)
- ctx->vm->file = fpriv;
+
+ mutex_lock(&ctx->mutex);
+ vm = i915_gem_context_vm(ctx);
+ if (vm)
+ WRITE_ONCE(vm->file, fpriv); /* XXX */
+ mutex_unlock(&ctx->mutex);
ctx->pid = get_task_pid(current, PIDTYPE_PID);
- ctx->name = kasprintf(GFP_KERNEL, "%s[%d]",
- current->comm, pid_nr(ctx->pid));
- if (!ctx->name) {
- ret = -ENOMEM;
- goto err_pid;
- }
+ snprintf(ctx->name, sizeof(ctx->name), "%s[%d]",
+ current->comm, pid_nr(ctx->pid));
/* And finally expose ourselves to userspace via the idr */
- mutex_lock(&fpriv->context_idr_lock);
- ret = idr_alloc(&fpriv->context_idr, ctx, 0, 0, GFP_KERNEL);
- mutex_unlock(&fpriv->context_idr_lock);
- if (ret >= 0)
- goto out;
+ ret = xa_alloc(&fpriv->context_xa, id, ctx, xa_limit_32b, GFP_KERNEL);
+ if (ret)
+ put_pid(fetch_and_zero(&ctx->pid));
- kfree(fetch_and_zero(&ctx->name));
-err_pid:
- put_pid(fetch_and_zero(&ctx->pid));
-out:
return ret;
}
@@ -754,51 +800,51 @@ int i915_gem_context_open(struct drm_i915_private *i915,
struct drm_i915_file_private *file_priv = file->driver_priv;
struct i915_gem_context *ctx;
int err;
+ u32 id;
- mutex_init(&file_priv->context_idr_lock);
- mutex_init(&file_priv->vm_idr_lock);
+ xa_init_flags(&file_priv->context_xa, XA_FLAGS_ALLOC);
- idr_init(&file_priv->context_idr);
+ mutex_init(&file_priv->vm_idr_lock);
idr_init_base(&file_priv->vm_idr, 1);
- mutex_lock(&i915->drm.struct_mutex);
ctx = i915_gem_create_context(i915, 0);
- mutex_unlock(&i915->drm.struct_mutex);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto err;
}
- err = gem_context_register(ctx, file_priv);
+ err = gem_context_register(ctx, file_priv, &id);
if (err < 0)
goto err_ctx;
- GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
- GEM_BUG_ON(err > 0);
-
+ GEM_BUG_ON(id);
return 0;
err_ctx:
context_close(ctx);
err:
idr_destroy(&file_priv->vm_idr);
- idr_destroy(&file_priv->context_idr);
+ xa_destroy(&file_priv->context_xa);
mutex_destroy(&file_priv->vm_idr_lock);
- mutex_destroy(&file_priv->context_idr_lock);
return err;
}
void i915_gem_context_close(struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
+ struct drm_i915_private *i915 = file_priv->dev_priv;
+ struct i915_gem_context *ctx;
+ unsigned long idx;
- idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
- idr_destroy(&file_priv->context_idr);
- mutex_destroy(&file_priv->context_idr_lock);
+ xa_for_each(&file_priv->context_xa, idx, ctx)
+ context_close(ctx);
+ xa_destroy(&file_priv->context_xa);
idr_for_each(&file_priv->vm_idr, vm_idr_cleanup, NULL);
idr_destroy(&file_priv->vm_idr);
mutex_destroy(&file_priv->vm_idr_lock);
+
+ contexts_flush_free(&i915->gem.contexts);
}
int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
@@ -816,7 +862,7 @@ int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
if (args->flags)
return -EINVAL;
- ppgtt = i915_ppgtt_create(i915);
+ ppgtt = i915_ppgtt_create(&i915->gt);
if (IS_ERR(ppgtt))
return PTR_ERR(ppgtt);
@@ -891,6 +937,7 @@ struct context_barrier_task {
void *data;
};
+__i915_active_call
static void cb_retire(struct i915_active *base)
{
struct context_barrier_task *cb = container_of(base, typeof(*cb), base);
@@ -910,21 +957,23 @@ static int context_barrier_task(struct i915_gem_context *ctx,
void (*task)(void *data),
void *data)
{
- struct drm_i915_private *i915 = ctx->i915;
struct context_barrier_task *cb;
struct i915_gem_engines_iter it;
struct intel_context *ce;
int err = 0;
- lockdep_assert_held(&i915->drm.struct_mutex);
GEM_BUG_ON(!task);
cb = kmalloc(sizeof(*cb), GFP_KERNEL);
if (!cb)
return -ENOMEM;
- i915_active_init(i915, &cb->base, cb_retire);
- i915_active_acquire(&cb->base);
+ i915_active_init(&cb->base, NULL, cb_retire);
+ err = i915_active_acquire(&cb->base);
+ if (err) {
+ kfree(cb);
+ return err;
+ }
for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
struct i915_request *rq;
@@ -951,7 +1000,7 @@ static int context_barrier_task(struct i915_gem_context *ctx,
if (emit)
err = emit(rq, data);
if (err == 0)
- err = i915_active_ref(&cb->base, rq->fence.context, rq);
+ err = i915_active_add_request(&cb->base, rq);
i915_request_add(rq);
if (err)
@@ -974,16 +1023,12 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv,
struct i915_address_space *vm;
int ret;
- if (!ctx->vm)
+ if (!rcu_access_pointer(ctx->vm))
return -ENODEV;
- /* XXX rcu acquire? */
- ret = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
- if (ret)
- return ret;
-
- vm = i915_vm_get(ctx->vm);
- mutex_unlock(&ctx->i915->drm.struct_mutex);
+ rcu_read_lock();
+ vm = context_get_vm_rcu(ctx);
+ rcu_read_unlock();
ret = mutex_lock_interruptible(&file_priv->vm_idr_lock);
if (ret)
@@ -994,7 +1039,7 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv,
if (ret < 0)
goto err_unlock;
- i915_vm_get(vm);
+ i915_vm_open(vm);
args->size = 0;
args->value = ret;
@@ -1014,12 +1059,12 @@ static void set_ppgtt_barrier(void *data)
if (INTEL_GEN(old->i915) < 8)
gen6_ppgtt_unpin_all(i915_vm_to_ppgtt(old));
- i915_vm_put(old);
+ i915_vm_close(old);
}
static int emit_ppgtt_update(struct i915_request *rq, void *data)
{
- struct i915_address_space *vm = rq->gem_context->vm;
+ struct i915_address_space *vm = rq->context->vm;
struct intel_engine_cs *engine = rq->engine;
u32 base = engine->mmio_base;
u32 *cs;
@@ -1044,12 +1089,18 @@ static int emit_ppgtt_update(struct i915_request *rq, void *data)
intel_ring_advance(rq, cs);
} else if (HAS_LOGICAL_RING_CONTEXTS(engine->i915)) {
struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+ int err;
+
+ /* Magic required to prevent forcewake errors! */
+ err = engine->emit_flush(rq, EMIT_INVALIDATE);
+ if (err)
+ return err;
cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
- *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES);
+ *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED;
for (i = GEN8_3LVL_PDPES; i--; ) {
const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
@@ -1060,9 +1111,6 @@ static int emit_ppgtt_update(struct i915_request *rq, void *data)
}
*cs++ = MI_NOOP;
intel_ring_advance(rq, cs);
- } else {
- /* ppGTT is not part of the legacy context image */
- gen6_ppgtt_pin(i915_vm_to_ppgtt(vm));
}
return 0;
@@ -1070,10 +1118,20 @@ static int emit_ppgtt_update(struct i915_request *rq, void *data)
static bool skip_ppgtt_update(struct intel_context *ce, void *data)
{
+ if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))
+ return true;
+
if (HAS_LOGICAL_RING_CONTEXTS(ce->engine->i915))
- return !ce->state;
- else
- return !atomic_read(&ce->pin_count);
+ return false;
+
+ if (!atomic_read(&ce->pin_count))
+ return true;
+
+ /* ppGTT is not part of the legacy context image */
+ if (gen6_ppgtt_pin(i915_vm_to_ppgtt(ce->vm)))
+ return true;
+
+ return false;
}
static int set_ppgtt(struct drm_i915_file_private *file_priv,
@@ -1086,34 +1144,34 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv,
if (args->size)
return -EINVAL;
- if (!ctx->vm)
+ if (!rcu_access_pointer(ctx->vm))
return -ENODEV;
if (upper_32_bits(args->value))
return -ENOENT;
- err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
- if (err)
- return err;
-
+ rcu_read_lock();
vm = idr_find(&file_priv->vm_idr, args->value);
- if (vm)
- i915_vm_get(vm);
- mutex_unlock(&file_priv->vm_idr_lock);
+ if (vm && !kref_get_unless_zero(&vm->ref))
+ vm = NULL;
+ rcu_read_unlock();
if (!vm)
return -ENOENT;
- err = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
+ err = mutex_lock_interruptible(&ctx->mutex);
if (err)
goto out;
- if (vm == ctx->vm)
+ if (i915_gem_context_is_closed(ctx)) {
+ err = -ENOENT;
+ goto unlock;
+ }
+
+ if (vm == rcu_access_pointer(ctx->vm))
goto unlock;
/* Teardown the existing obj:vma cache, it will have to be rebuilt. */
- mutex_lock(&ctx->mutex);
lut_close(ctx);
- mutex_unlock(&ctx->mutex);
old = __set_ppgtt(ctx, vm);
@@ -1128,14 +1186,12 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv,
set_ppgtt_barrier,
old);
if (err) {
- ctx->vm = old;
- ctx->desc_template = default_desc_template(ctx->i915, old);
- i915_vm_put(vm);
+ i915_vm_close(__set_ppgtt(ctx, old));
+ i915_vm_close(old);
}
unlock:
- mutex_unlock(&ctx->i915->drm.struct_mutex);
-
+ mutex_unlock(&ctx->mutex);
out:
i915_vm_put(vm);
return err;
@@ -1154,7 +1210,7 @@ static int gen8_emit_rpcs_config(struct i915_request *rq,
offset = i915_ggtt_offset(ce->state) +
LRC_STATE_PN * PAGE_SIZE +
- (CTX_R_PWR_CLK_STATE + 1) * 4;
+ CTX_R_PWR_CLK_STATE * 4;
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*cs++ = lower_32_bits(offset);
@@ -1180,44 +1236,32 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
* image, or into the registers directory, does not stick). Pristine
* and idle contexts will be configured on pinning.
*/
- if (!intel_context_is_pinned(ce))
+ if (!intel_context_pin_if_active(ce))
return 0;
- rq = i915_request_create(ce->engine->kernel_context);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
-
- /* Queue this switch after all other activity by this context. */
- ret = i915_active_request_set(&ce->ring->timeline->last_request, rq);
- if (ret)
- goto out_add;
-
- /*
- * Guarantee context image and the timeline remains pinned until the
- * modifying request is retired by setting the ce activity tracker.
- *
- * But we only need to take one pin on the account of it. Or in other
- * words transfer the pinned ce object to tracked active request.
- */
- GEM_BUG_ON(i915_active_is_idle(&ce->active));
- ret = i915_active_ref(&ce->active, rq->fence.context, rq);
- if (ret)
- goto out_add;
+ rq = intel_engine_create_kernel_request(ce->engine);
+ if (IS_ERR(rq)) {
+ ret = PTR_ERR(rq);
+ goto out_unpin;
+ }
- ret = gen8_emit_rpcs_config(rq, ce, sseu);
+ /* Serialise with the remote context */
+ ret = intel_context_prepare_remote_request(ce, rq);
+ if (ret == 0)
+ ret = gen8_emit_rpcs_config(rq, ce, sseu);
-out_add:
i915_request_add(rq);
+out_unpin:
+ intel_context_unpin(ce);
return ret;
}
static int
-__intel_context_reconfigure_sseu(struct intel_context *ce,
- struct intel_sseu sseu)
+intel_context_reconfigure_sseu(struct intel_context *ce, struct intel_sseu sseu)
{
int ret;
- GEM_BUG_ON(INTEL_GEN(ce->gem_context->i915) < 8);
+ GEM_BUG_ON(INTEL_GEN(ce->engine->i915) < 8);
ret = intel_context_lock_pinned(ce);
if (ret)
@@ -1237,23 +1281,6 @@ unlock:
}
static int
-intel_context_reconfigure_sseu(struct intel_context *ce, struct intel_sseu sseu)
-{
- struct drm_i915_private *i915 = ce->gem_context->i915;
- int ret;
-
- ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
- if (ret)
- return ret;
-
- ret = __intel_context_reconfigure_sseu(ce, sseu);
-
- mutex_unlock(&i915->drm.struct_mutex);
-
- return ret;
-}
-
-static int
user_to_context_sseu(struct drm_i915_private *i915,
const struct drm_i915_gem_context_param_sseu *user,
struct intel_sseu *context)
@@ -1484,12 +1511,14 @@ set_engines__load_balance(struct i915_user_extension __user *base, void *data)
}
}
- ce = intel_execlists_create_virtual(set->ctx, siblings, n);
+ ce = intel_execlists_create_virtual(siblings, n);
if (IS_ERR(ce)) {
err = PTR_ERR(ce);
goto out_siblings;
}
+ intel_context_set_gem(ce, set->ctx);
+
if (cmpxchg(&set->engines->engines[idx], NULL, ce)) {
intel_context_put(ce);
err = -EEXIST;
@@ -1636,6 +1665,7 @@ set_engines(struct i915_gem_context *ctx,
for (n = 0; n < num_engines; n++) {
struct i915_engine_class_instance ci;
struct intel_engine_cs *engine;
+ struct intel_context *ce;
if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) {
__free_engines(set.engines, n);
@@ -1658,11 +1688,15 @@ set_engines(struct i915_gem_context *ctx,
return -ENOENT;
}
- set.engines->engines[n] = intel_context_create(ctx, engine);
- if (!set.engines->engines[n]) {
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
__free_engines(set.engines, n);
- return -ENOMEM;
+ return PTR_ERR(ce);
}
+
+ intel_context_set_gem(ce, ctx);
+
+ set.engines->engines[n] = ce;
}
set.engines->num_engines = num_engines;
@@ -1683,7 +1717,7 @@ replace:
i915_gem_context_set_user_engines(ctx);
else
i915_gem_context_clear_user_engines(ctx);
- rcu_swap_protected(ctx->engines, set.engines, 1);
+ set.engines = rcu_replace_pointer(ctx->engines, set.engines, 1);
mutex_unlock(&ctx->engines_mutex);
call_rcu(&set.engines->rcu, free_engines_rcu);
@@ -1776,7 +1810,7 @@ get_engines(struct i915_gem_context *ctx,
if (e->engines[n]) {
ci.engine_class = e->engines[n]->engine->uabi_class;
- ci.engine_instance = e->engines[n]->engine->instance;
+ ci.engine_instance = e->engines[n]->engine->uabi_instance;
}
if (copy_to_user(&user->engines[n], &ci, sizeof(ci))) {
@@ -1792,6 +1826,54 @@ err_free:
return err;
}
+static int
+set_persistence(struct i915_gem_context *ctx,
+ const struct drm_i915_gem_context_param *args)
+{
+ if (args->size)
+ return -EINVAL;
+
+ return __context_set_persistence(ctx, args->value);
+}
+
+static void __apply_priority(struct intel_context *ce, void *arg)
+{
+ struct i915_gem_context *ctx = arg;
+
+ if (!intel_engine_has_semaphores(ce->engine))
+ return;
+
+ if (ctx->sched.priority >= I915_PRIORITY_NORMAL)
+ intel_context_set_use_semaphores(ce);
+ else
+ intel_context_clear_use_semaphores(ce);
+}
+
+static int set_priority(struct i915_gem_context *ctx,
+ const struct drm_i915_gem_context_param *args)
+{
+ s64 priority = args->value;
+
+ if (args->size)
+ return -EINVAL;
+
+ if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
+ return -ENODEV;
+
+ if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
+ priority < I915_CONTEXT_MIN_USER_PRIORITY)
+ return -EINVAL;
+
+ if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
+ !capable(CAP_SYS_NICE))
+ return -EPERM;
+
+ ctx->sched.priority = I915_USER_PRIORITY(priority);
+ context_apply_all(ctx, __apply_priority, ctx);
+
+ return 0;
+}
+
static int ctx_setparam(struct drm_i915_file_private *fpriv,
struct i915_gem_context *ctx,
struct drm_i915_gem_context_param *args)
@@ -1838,23 +1920,7 @@ static int ctx_setparam(struct drm_i915_file_private *fpriv,
break;
case I915_CONTEXT_PARAM_PRIORITY:
- {
- s64 priority = args->value;
-
- if (args->size)
- ret = -EINVAL;
- else if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
- ret = -ENODEV;
- else if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
- priority < I915_CONTEXT_MIN_USER_PRIORITY)
- ret = -EINVAL;
- else if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
- !capable(CAP_SYS_NICE))
- ret = -EPERM;
- else
- ctx->sched.priority =
- I915_USER_PRIORITY(priority);
- }
+ ret = set_priority(ctx, args);
break;
case I915_CONTEXT_PARAM_SSEU:
@@ -1869,6 +1935,10 @@ static int ctx_setparam(struct drm_i915_file_private *fpriv,
ret = set_engines(ctx, args);
break;
+ case I915_CONTEXT_PARAM_PERSISTENCE:
+ ret = set_persistence(ctx, args);
+ break;
+
case I915_CONTEXT_PARAM_BAN_PERIOD:
default:
ret = -EINVAL;
@@ -1930,20 +2000,23 @@ static int clone_engines(struct i915_gem_context *dst,
*/
if (intel_engine_is_virtual(engine))
clone->engines[n] =
- intel_execlists_clone_virtual(dst, engine);
+ intel_execlists_clone_virtual(engine);
else
- clone->engines[n] = intel_context_create(dst, engine);
+ clone->engines[n] = intel_context_create(engine);
if (IS_ERR_OR_NULL(clone->engines[n])) {
__free_engines(clone, n);
goto err_unlock;
}
+
+ intel_context_set_gem(clone->engines[n], dst);
}
clone->num_engines = n;
user_engines = i915_gem_context_user_engines(src);
i915_gem_context_unlock_engines(src);
- free_engines(dst->engines);
+ /* Serialised by constructor */
+ free_engines(__context_engines_static(dst));
RCU_INIT_POINTER(dst->engines, clone);
if (user_engines)
i915_gem_context_set_user_engines(dst);
@@ -1978,7 +2051,8 @@ static int clone_sseu(struct i915_gem_context *dst,
unsigned long n;
int err;
- clone = dst->engines; /* no locking required; sole access */
+ /* no locking required; sole access under constructor*/
+ clone = __context_engines_static(dst);
if (e->num_engines != clone->num_engines) {
err = -EINVAL;
goto unlock;
@@ -2011,13 +2085,8 @@ unlock:
static int clone_timeline(struct i915_gem_context *dst,
struct i915_gem_context *src)
{
- if (src->timeline) {
- GEM_BUG_ON(src->timeline == dst->timeline);
-
- if (dst->timeline)
- i915_timeline_put(dst->timeline);
- dst->timeline = i915_timeline_get(src->timeline);
- }
+ if (src->timeline)
+ __assign_timeline(dst, src->timeline);
return 0;
}
@@ -2026,44 +2095,24 @@ static int clone_vm(struct i915_gem_context *dst,
struct i915_gem_context *src)
{
struct i915_address_space *vm;
+ int err = 0;
- rcu_read_lock();
- do {
- vm = READ_ONCE(src->vm);
- if (!vm)
- break;
-
- if (!kref_get_unless_zero(&vm->ref))
- continue;
-
- /*
- * This ppgtt may have be reallocated between
- * the read and the kref, and reassigned to a third
- * context. In order to avoid inadvertent sharing
- * of this ppgtt with that third context (and not
- * src), we have to confirm that we have the same
- * ppgtt after passing through the strong memory
- * barrier implied by a successful
- * kref_get_unless_zero().
- *
- * Once we have acquired the current ppgtt of src,
- * we no longer care if it is released from src, as
- * it cannot be reallocated elsewhere.
- */
-
- if (vm == READ_ONCE(src->vm))
- break;
+ if (!rcu_access_pointer(src->vm))
+ return 0;
- i915_vm_put(vm);
- } while (1);
+ rcu_read_lock();
+ vm = context_get_vm_rcu(src);
rcu_read_unlock();
- if (vm) {
+ if (!mutex_lock_interruptible(&dst->mutex)) {
__assign_ppgtt(dst, vm);
- i915_vm_put(vm);
+ mutex_unlock(&dst->mutex);
+ } else {
+ err = -EINTR;
}
- return 0;
+ i915_vm_put(vm);
+ return err;
}
static int create_clone(struct i915_user_extension __user *ext, void *data)
@@ -2134,6 +2183,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_context_create_ext *args = data;
struct create_ext ext_data;
int ret;
+ u32 id;
if (!DRIVER_CAPS(i915)->has_logical_contexts)
return -ENODEV;
@@ -2141,24 +2191,18 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN)
return -EINVAL;
- ret = i915_terminally_wedged(i915);
+ ret = intel_gt_terminally_wedged(&i915->gt);
if (ret)
return ret;
ext_data.fpriv = file->driver_priv;
if (client_is_banned(ext_data.fpriv)) {
DRM_DEBUG("client %s[%d] banned from creating ctx\n",
- current->comm,
- pid_nr(get_task_pid(current, PIDTYPE_PID)));
+ current->comm, task_pid_nr(current));
return -EIO;
}
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ret;
-
ext_data.ctx = i915_gem_create_context(i915, args->flags);
- mutex_unlock(&dev->struct_mutex);
if (IS_ERR(ext_data.ctx))
return PTR_ERR(ext_data.ctx);
@@ -2171,11 +2215,11 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
goto err_ctx;
}
- ret = gem_context_register(ext_data.ctx, ext_data.fpriv);
+ ret = gem_context_register(ext_data.ctx, ext_data.fpriv, &id);
if (ret < 0)
goto err_ctx;
- args->ctx_id = ret;
+ args->ctx_id = id;
DRM_DEBUG("HW context %d created\n", args->ctx_id);
return 0;
@@ -2198,11 +2242,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
if (!args->ctx_id)
return -ENOENT;
- if (mutex_lock_interruptible(&file_priv->context_idr_lock))
- return -EINTR;
-
- ctx = idr_remove(&file_priv->context_idr, args->ctx_id);
- mutex_unlock(&file_priv->context_idr_lock);
+ ctx = xa_erase(&file_priv->context_xa, args->ctx_id);
if (!ctx)
return -ENOENT;
@@ -2285,12 +2325,12 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
case I915_CONTEXT_PARAM_GTT_SIZE:
args->size = 0;
- if (ctx->vm)
- args->value = ctx->vm->total;
- else if (to_i915(dev)->mm.aliasing_ppgtt)
- args->value = to_i915(dev)->mm.aliasing_ppgtt->vm.total;
+ rcu_read_lock();
+ if (rcu_access_pointer(ctx->vm))
+ args->value = rcu_dereference(ctx->vm)->total;
else
args->value = to_i915(dev)->ggtt.vm.total;
+ rcu_read_unlock();
break;
case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
@@ -2325,6 +2365,11 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
ret = get_engines(ctx, args);
break;
+ case I915_CONTEXT_PARAM_PERSISTENCE:
+ args->size = 0;
+ args->value = i915_gem_context_is_persistent(ctx);
+ break;
+
case I915_CONTEXT_PARAM_BAN_PERIOD:
default:
ret = -EINVAL;
@@ -2356,7 +2401,7 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
void *data, struct drm_file *file)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *i915 = to_i915(dev);
struct drm_i915_reset_stats *args = data;
struct i915_gem_context *ctx;
int ret;
@@ -2378,7 +2423,7 @@ int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
*/
if (capable(CAP_SYS_ADMIN))
- args->reset_count = i915_reset_count(&dev_priv->gpu_error);
+ args->reset_count = i915_reset_count(&i915->gpu_error);
else
args->reset_count = 0;
@@ -2391,33 +2436,6 @@ out:
return ret;
}
-int __i915_gem_context_pin_hw_id(struct i915_gem_context *ctx)
-{
- struct drm_i915_private *i915 = ctx->i915;
- int err = 0;
-
- mutex_lock(&i915->contexts.mutex);
-
- GEM_BUG_ON(i915_gem_context_is_closed(ctx));
-
- if (list_empty(&ctx->hw_id_link)) {
- GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count));
-
- err = assign_hw_id(i915, &ctx->hw_id);
- if (err)
- goto out_unlock;
-
- list_add_tail(&ctx->hw_id_link, &i915->contexts.hw_id_list);
- }
-
- GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count) == ~0u);
- atomic_inc(&ctx->hw_id_pin_count);
-
-out_unlock:
- mutex_unlock(&i915->contexts.mutex);
- return err;
-}
-
/* GEM context-engines iterator: for_each_gem_engine() */
struct intel_context *
i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h
index 9691dd062f72..3ae61a355d87 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h
@@ -11,6 +11,7 @@
#include "gt/intel_context.h"
+#include "i915_drv.h"
#include "i915_gem.h"
#include "i915_scheduler.h"
#include "intel_device_info.h"
@@ -74,24 +75,19 @@ static inline void i915_gem_context_clear_recoverable(struct i915_gem_context *c
clear_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags);
}
-static inline bool i915_gem_context_is_banned(const struct i915_gem_context *ctx)
+static inline bool i915_gem_context_is_persistent(const struct i915_gem_context *ctx)
{
- return test_bit(CONTEXT_BANNED, &ctx->flags);
+ return test_bit(UCONTEXT_PERSISTENCE, &ctx->user_flags);
}
-static inline void i915_gem_context_set_banned(struct i915_gem_context *ctx)
+static inline void i915_gem_context_set_persistence(struct i915_gem_context *ctx)
{
- set_bit(CONTEXT_BANNED, &ctx->flags);
+ set_bit(UCONTEXT_PERSISTENCE, &ctx->user_flags);
}
-static inline bool i915_gem_context_force_single_submission(const struct i915_gem_context *ctx)
+static inline void i915_gem_context_clear_persistence(struct i915_gem_context *ctx)
{
- return test_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ctx->flags);
-}
-
-static inline void i915_gem_context_set_force_single_submission(struct i915_gem_context *ctx)
-{
- __set_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ctx->flags);
+ clear_bit(UCONTEXT_PERSISTENCE, &ctx->user_flags);
}
static inline bool
@@ -112,37 +108,15 @@ i915_gem_context_clear_user_engines(struct i915_gem_context *ctx)
clear_bit(CONTEXT_USER_ENGINES, &ctx->flags);
}
-int __i915_gem_context_pin_hw_id(struct i915_gem_context *ctx);
-static inline int i915_gem_context_pin_hw_id(struct i915_gem_context *ctx)
-{
- if (atomic_inc_not_zero(&ctx->hw_id_pin_count))
- return 0;
-
- return __i915_gem_context_pin_hw_id(ctx);
-}
-
-static inline void i915_gem_context_unpin_hw_id(struct i915_gem_context *ctx)
-{
- GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count) == 0u);
- atomic_dec(&ctx->hw_id_pin_count);
-}
-
-static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
-{
- return !ctx->file_priv;
-}
-
/* i915_gem_context.c */
-int __must_check i915_gem_contexts_init(struct drm_i915_private *dev_priv);
-void i915_gem_contexts_fini(struct drm_i915_private *dev_priv);
+void i915_gem_init__contexts(struct drm_i915_private *i915);
+void i915_gem_driver_release__contexts(struct drm_i915_private *i915);
int i915_gem_context_open(struct drm_i915_private *i915,
struct drm_file *file);
void i915_gem_context_close(struct drm_file *file);
void i915_gem_context_release(struct kref *ctx_ref);
-struct i915_gem_context *
-i915_gem_context_create_gvt(struct drm_device *dev);
int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
@@ -160,9 +134,6 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
-struct i915_gem_context *
-i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio);
-
static inline struct i915_gem_context *
i915_gem_context_get(struct i915_gem_context *ctx)
{
@@ -175,6 +146,27 @@ static inline void i915_gem_context_put(struct i915_gem_context *ctx)
kref_put(&ctx->ref, i915_gem_context_release);
}
+static inline struct i915_address_space *
+i915_gem_context_vm(struct i915_gem_context *ctx)
+{
+ return rcu_dereference_protected(ctx->vm, lockdep_is_held(&ctx->mutex));
+}
+
+static inline struct i915_address_space *
+i915_gem_context_get_vm_rcu(struct i915_gem_context *ctx)
+{
+ struct i915_address_space *vm;
+
+ rcu_read_lock();
+ vm = rcu_dereference(ctx->vm);
+ if (!vm)
+ vm = &ctx->i915->ggtt.vm;
+ vm = i915_vm_get(vm);
+ rcu_read_unlock();
+
+ return vm;
+}
+
static inline struct i915_gem_engines *
i915_gem_context_engines(struct i915_gem_context *ctx)
{
@@ -198,12 +190,6 @@ i915_gem_context_unlock_engines(struct i915_gem_context *ctx)
}
static inline struct intel_context *
-i915_gem_context_lookup_engine(struct i915_gem_context *ctx, unsigned int idx)
-{
- return i915_gem_context_engines(ctx)->engines[idx];
-}
-
-static inline struct intel_context *
i915_gem_context_get_engine(struct i915_gem_context *ctx, unsigned int idx)
{
struct intel_context *ce = ERR_PTR(-EINVAL);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
index cc513410eeef..017ca803ab47 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
@@ -26,7 +26,7 @@ struct pid;
struct drm_i915_private;
struct drm_i915_file_private;
struct i915_address_space;
-struct i915_timeline;
+struct intel_timeline;
struct intel_ring;
struct i915_gem_engines {
@@ -77,7 +77,7 @@ struct i915_gem_context {
struct i915_gem_engines __rcu *engines;
struct mutex engines_mutex; /* guards writes to engines */
- struct i915_timeline *timeline;
+ struct intel_timeline *timeline;
/**
* @vm: unique address space (GTT)
@@ -88,7 +88,7 @@ struct i915_gem_context {
* In other modes, this is a NULL pointer with the expectation that
* the caller uses the shared global GTT.
*/
- struct i915_address_space *vm;
+ struct i915_address_space __rcu *vm;
/**
* @pid: process id of creator
@@ -100,15 +100,6 @@ struct i915_gem_context {
*/
struct pid *pid;
- /**
- * @name: arbitrary name
- *
- * A name is constructed for the context from the creator's process
- * name, pid and user handle in order to uniquely identify the
- * context in messages.
- */
- const char *name;
-
/** link: place with &drm_i915_private.context_list */
struct list_head link;
struct llist_node free_link;
@@ -137,43 +128,19 @@ struct i915_gem_context {
#define UCONTEXT_NO_ERROR_CAPTURE 1
#define UCONTEXT_BANNABLE 2
#define UCONTEXT_RECOVERABLE 3
+#define UCONTEXT_PERSISTENCE 4
/**
* @flags: small set of booleans
*/
unsigned long flags;
-#define CONTEXT_BANNED 0
-#define CONTEXT_CLOSED 1
-#define CONTEXT_FORCE_SINGLE_SUBMISSION 2
-#define CONTEXT_USER_ENGINES 3
-
- /**
- * @hw_id: - unique identifier for the context
- *
- * The hardware needs to uniquely identify the context for a few
- * functions like fault reporting, PASID, scheduling. The
- * &drm_i915_private.context_hw_ida is used to assign a unqiue
- * id for the lifetime of the context.
- *
- * @hw_id_pin_count: - number of times this context had been pinned
- * for use (should be, at most, once per engine).
- *
- * @hw_id_link: - all contexts with an assigned id are tracked
- * for possible repossession.
- */
- unsigned int hw_id;
- atomic_t hw_id_pin_count;
- struct list_head hw_id_link;
+#define CONTEXT_CLOSED 0
+#define CONTEXT_USER_ENGINES 1
struct mutex mutex;
struct i915_sched_attr sched;
- /** ring_size: size for allocating the per-engine ring buffer */
- u32 ring_size;
- /** desc_template: invariant fields for the HW context descriptor */
- u32 desc_template;
-
/** guilty_count: How many times this context has caused a GPU hang. */
atomic_t guilty_count;
/**
@@ -197,6 +164,15 @@ struct i915_gem_context {
* per vm, which may be one per context or shared with the global GTT)
*/
struct radix_tree_root handles_vma;
+
+ /**
+ * @name: arbitrary name, used for user debug
+ *
+ * A name is constructed for the context from the creator's process
+ * name, pid and user handle in order to uniquely identify the
+ * context in messages.
+ */
+ char name[TASK_COMM_LEN + 8];
};
#endif /* __I915_GEM_CONTEXT_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index cbf1701d3acc..372b57ca0efc 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -6,7 +6,7 @@
#include <linux/dma-buf.h>
#include <linux/highmem.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
#include "i915_drv.h"
#include "i915_gem_object.h"
@@ -93,40 +93,6 @@ static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
i915_gem_object_unpin_map(obj);
}
-static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
-{
- struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
- struct page *page;
-
- if (page_num >= obj->base.size >> PAGE_SHIFT)
- return NULL;
-
- if (!i915_gem_object_has_struct_page(obj))
- return NULL;
-
- if (i915_gem_object_pin_pages(obj))
- return NULL;
-
- /* Synchronisation is left to the caller (via .begin_cpu_access()) */
- page = i915_gem_object_get_page(obj, page_num);
- if (IS_ERR(page))
- goto err_unpin;
-
- return kmap(page);
-
-err_unpin:
- i915_gem_object_unpin_pages(obj);
- return NULL;
-}
-
-static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
-{
- struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
-
- kunmap(virt_to_page(addr));
- i915_gem_object_unpin_pages(obj);
-}
-
static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
@@ -195,8 +161,6 @@ static const struct dma_buf_ops i915_dmabuf_ops = {
.map_dma_buf = i915_gem_map_dma_buf,
.unmap_dma_buf = i915_gem_unmap_dma_buf,
.release = drm_gem_dmabuf_release,
- .map = i915_gem_dmabuf_kmap,
- .unmap = i915_gem_dmabuf_kunmap,
.mmap = i915_gem_dmabuf_mmap,
.vmap = i915_gem_dmabuf_vmap,
.vunmap = i915_gem_dmabuf_vunmap,
@@ -204,8 +168,7 @@ static const struct dma_buf_ops i915_dmabuf_ops = {
.end_cpu_access = i915_gem_end_cpu_access,
};
-struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *gem_obj, int flags)
+struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
@@ -222,7 +185,7 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
return ERR_PTR(ret);
}
- return drm_gem_dmabuf_export(dev, &exp_info);
+ return drm_gem_dmabuf_export(gem_obj->dev, &exp_info);
}
static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
@@ -257,6 +220,7 @@ static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf)
{
+ static struct lock_class_key lock_class;
struct dma_buf_attachment *attach;
struct drm_i915_gem_object *obj;
int ret;
@@ -288,7 +252,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
}
drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
- i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
+ i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops, &lock_class);
obj->base.import_attach = attach;
obj->base.resv = dma_buf->resv;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
index 2e3ce2a69653..0cc40e77bbd2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
@@ -12,6 +12,8 @@
#include "i915_gem_ioctls.h"
#include "i915_gem_object.h"
#include "i915_vma.h"
+#include "i915_gem_lmem.h"
+#include "i915_gem_mman.h"
static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
{
@@ -27,7 +29,7 @@ static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj)
{
- if (!READ_ONCE(obj->pin_global))
+ if (!i915_gem_object_is_framebuffer(obj))
return;
i915_gem_object_lock(obj);
@@ -148,9 +150,17 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
obj->read_domains |= I915_GEM_DOMAIN_GTT;
if (write) {
+ struct i915_vma *vma;
+
obj->read_domains = I915_GEM_DOMAIN_GTT;
obj->write_domain = I915_GEM_DOMAIN_GTT;
obj->mm.dirty = true;
+
+ spin_lock(&obj->vma.lock);
+ for_each_ggtt_vma(vma, obj)
+ if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
+ i915_vma_set_ggtt_write(vma);
+ spin_unlock(&obj->vma.lock);
}
i915_gem_object_unpin_pages(obj);
@@ -175,112 +185,34 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
- struct i915_vma *vma;
int ret;
- assert_object_held(obj);
-
if (obj->cache_level == cache_level)
return 0;
- /* Inspect the list of currently bound VMA and unbind any that would
- * be invalid given the new cache-level. This is principally to
- * catch the issue of the CS prefetch crossing page boundaries and
- * reading an invalid PTE on older architectures.
- */
-restart:
- list_for_each_entry(vma, &obj->vma.list, obj_link) {
- if (!drm_mm_node_allocated(&vma->node))
- continue;
-
- if (i915_vma_is_pinned(vma)) {
- DRM_DEBUG("can not change the cache level of pinned objects\n");
- return -EBUSY;
- }
-
- if (!i915_vma_is_closed(vma) &&
- i915_gem_valid_gtt_space(vma, cache_level))
- continue;
-
- ret = i915_vma_unbind(vma);
- if (ret)
- return ret;
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_ALL,
+ MAX_SCHEDULE_TIMEOUT);
+ if (ret)
+ return ret;
- /* As unbinding may affect other elements in the
- * obj->vma_list (due to side-effects from retiring
- * an active vma), play safe and restart the iterator.
- */
- goto restart;
- }
+ ret = i915_gem_object_lock_interruptible(obj);
+ if (ret)
+ return ret;
- /* We can reuse the existing drm_mm nodes but need to change the
- * cache-level on the PTE. We could simply unbind them all and
- * rebind with the correct cache-level on next use. However since
- * we already have a valid slot, dma mapping, pages etc, we may as
- * rewrite the PTE in the belief that doing so tramples upon less
- * state and so involves less work.
- */
- if (atomic_read(&obj->bind_count)) {
- /* Before we change the PTE, the GPU must not be accessing it.
- * If we wait upon the object, we know that all the bound
- * VMA are no longer active.
- */
- ret = i915_gem_object_wait(obj,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_ALL,
- MAX_SCHEDULE_TIMEOUT);
- if (ret)
- return ret;
-
- if (!HAS_LLC(to_i915(obj->base.dev)) &&
- cache_level != I915_CACHE_NONE) {
- /* Access to snoopable pages through the GTT is
- * incoherent and on some machines causes a hard
- * lockup. Relinquish the CPU mmaping to force
- * userspace to refault in the pages and we can
- * then double check if the GTT mapping is still
- * valid for that pointer access.
- */
- i915_gem_object_release_mmap(obj);
-
- /* As we no longer need a fence for GTT access,
- * we can relinquish it now (and so prevent having
- * to steal a fence from someone else on the next
- * fence request). Note GPU activity would have
- * dropped the fence as all snoopable access is
- * supposed to be linear.
- */
- for_each_ggtt_vma(vma, obj) {
- ret = i915_vma_put_fence(vma);
- if (ret)
- return ret;
- }
- } else {
- /* We either have incoherent backing store and
- * so no GTT access or the architecture is fully
- * coherent. In such cases, existing GTT mmaps
- * ignore the cache bit in the PTE and we can
- * rewrite it without confusing the GPU or having
- * to force userspace to fault back in its mmaps.
- */
- }
-
- list_for_each_entry(vma, &obj->vma.list, obj_link) {
- if (!drm_mm_node_allocated(&vma->node))
- continue;
-
- ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
- if (ret)
- return ret;
- }
+ /* Always invalidate stale cachelines */
+ if (obj->cache_level != cache_level) {
+ i915_gem_object_set_cache_coherency(obj, cache_level);
+ obj->cache_dirty = true;
}
- list_for_each_entry(vma, &obj->vma.list, obj_link)
- vma->node.color = cache_level;
- i915_gem_object_set_cache_coherency(obj, cache_level);
- obj->cache_dirty = true; /* Always invalidate stale cachelines */
+ i915_gem_object_unlock(obj);
- return 0;
+ /* The cache-level will be applied when each vma is rebound. */
+ return i915_gem_object_unbind(obj,
+ I915_GEM_OBJECT_UNBIND_ACTIVE |
+ I915_GEM_OBJECT_UNBIND_BARRIER);
}
int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
@@ -361,25 +293,7 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
goto out;
}
- if (obj->cache_level == level)
- goto out;
-
- ret = i915_gem_object_wait(obj,
- I915_WAIT_INTERRUPTIBLE,
- MAX_SCHEDULE_TIMEOUT);
- if (ret)
- goto out;
-
- ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
- if (ret)
- goto out;
-
- ret = i915_gem_object_lock_interruptible(obj);
- if (ret == 0) {
- ret = i915_gem_object_set_cache_level(obj, level);
- i915_gem_object_unlock(obj);
- }
- mutex_unlock(&i915->drm.struct_mutex);
+ ret = i915_gem_object_set_cache_level(obj, level);
out:
i915_gem_object_put(obj);
@@ -398,17 +312,16 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view,
unsigned int flags)
{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_vma *vma;
int ret;
- assert_object_held(obj);
-
- /* Mark the global pin early so that we account for the
- * display coherency whilst setting up the cache domains.
- */
- obj->pin_global++;
+ /* Frame buffer must be in LMEM (no migration yet) */
+ if (HAS_LMEM(i915) && !i915_gem_object_is_lmem(obj))
+ return ERR_PTR(-EINVAL);
- /* The display engine is not coherent with the LLC cache on gen6. As
+ /*
+ * The display engine is not coherent with the LLC cache on gen6. As
* a result, we make sure that the pinning that is about to occur is
* done with uncached PTEs. This is lowest common denominator for all
* chipsets.
@@ -418,14 +331,13 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
* with that bit in the PTE to main memory with just one PIPE_CONTROL.
*/
ret = i915_gem_object_set_cache_level(obj,
- HAS_WT(to_i915(obj->base.dev)) ?
+ HAS_WT(i915) ?
I915_CACHE_WT : I915_CACHE_NONE);
- if (ret) {
- vma = ERR_PTR(ret);
- goto err_unpin_global;
- }
+ if (ret)
+ return ERR_PTR(ret);
- /* As the user may map the buffer once pinned in the display plane
+ /*
+ * As the user may map the buffer once pinned in the display plane
* (e.g. libkms for the bootup splash), we have to ensure that we
* always use map_and_fenceable for all scanout buffers. However,
* it may simply be too big to fit into mappable, in which case
@@ -442,21 +354,12 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
if (IS_ERR(vma))
vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags);
if (IS_ERR(vma))
- goto err_unpin_global;
+ return vma;
vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
- __i915_gem_object_flush_for_display(obj);
-
- /* It should now be out of any other write domains, and we can update
- * the domain values for our changes.
- */
- obj->read_domains |= I915_GEM_DOMAIN_GTT;
-
- return vma;
+ i915_gem_object_flush_if_display(obj);
-err_unpin_global:
- obj->pin_global--;
return vma;
}
@@ -466,14 +369,19 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
struct i915_vma *vma;
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+ if (!atomic_read(&obj->bind_count))
+ return;
mutex_lock(&i915->ggtt.vm.mutex);
+ spin_lock(&obj->vma.lock);
for_each_ggtt_vma(vma, obj) {
if (!drm_mm_node_allocated(&vma->node))
continue;
+ GEM_BUG_ON(vma->vm != &i915->ggtt.vm);
list_move_tail(&vma->vm_link, &vma->vm->bound_list);
}
+ spin_unlock(&obj->vma.lock);
mutex_unlock(&i915->ggtt.vm.mutex);
if (i915_gem_object_is_shrinkable(obj)) {
@@ -481,7 +389,8 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
spin_lock_irqsave(&i915->mm.obj_lock, flags);
- if (obj->mm.madv == I915_MADV_WILLNEED)
+ if (obj->mm.madv == I915_MADV_WILLNEED &&
+ !atomic_read(&obj->mm.shrink_pin))
list_move_tail(&obj->mm.link, &i915->mm.shrink_list);
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
@@ -495,12 +404,6 @@ i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
assert_object_held(obj);
- if (WARN_ON(obj->pin_global == 0))
- return;
-
- if (--obj->pin_global == 0)
- vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
-
/* Bump the LRU to try and avoid premature eviction whilst flipping */
i915_gem_object_bump_inactive_ggtt(obj);
@@ -551,13 +454,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
return 0;
}
-static inline enum fb_op_origin
-fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain)
-{
- return (domain == I915_GEM_DOMAIN_GTT ?
- obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
-}
-
/**
* Called when user space prepares to use an object with the CPU, either
* through the mmap ioctl's mapping or a GTT mapping.
@@ -661,9 +557,8 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
i915_gem_object_unlock(obj);
- if (write_domain != 0)
- intel_fb_obj_invalidate(obj,
- fb_write_origin(obj, write_domain));
+ if (write_domain)
+ i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
out_unpin:
i915_gem_object_unpin_pages(obj);
@@ -783,7 +678,7 @@ int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
}
out:
- intel_fb_obj_invalidate(obj, ORIGIN_CPU);
+ i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
obj->mm.dirty = true;
/* return with the pages pinned */
return 0;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 41dab9ea33cd..60c984e10c4a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -5,7 +5,7 @@
*/
#include <linux/intel-iommu.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
#include <linux/sync_file.h>
#include <linux/uaccess.h>
@@ -16,13 +16,17 @@
#include "gem/i915_gem_ioctls.h"
#include "gt/intel_context.h"
+#include "gt/intel_engine_pool.h"
+#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
+#include "gt/intel_ring.h"
-#include "i915_gem_ioctls.h"
+#include "i915_drv.h"
#include "i915_gem_clflush.h"
#include "i915_gem_context.h"
+#include "i915_gem_ioctls.h"
+#include "i915_sw_fence_work.h"
#include "i915_trace.h"
-#include "intel_drv.h"
enum {
FORCE_CPU_RELOC = 1,
@@ -222,10 +226,10 @@ struct i915_execbuffer {
struct intel_engine_cs *engine; /** engine to queue the request to */
struct intel_context *context; /* logical state for the request */
struct i915_gem_context *gem_context; /** caller's context */
- struct i915_address_space *vm; /** GTT and vma for the request */
struct i915_request *request; /** our request to build */
struct i915_vma *batch; /** identity of the batch obj/vma */
+ struct i915_vma *trampoline; /** trampoline used for chaining */
/** actual size of execobj[] as we may extend it for the cmdparser */
unsigned int buffer_count;
@@ -274,28 +278,11 @@ struct i915_execbuffer {
#define exec_entry(EB, VMA) (&(EB)->exec[(VMA)->exec_flags - (EB)->flags])
-/*
- * Used to convert any address to canonical form.
- * Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS,
- * MI_LOAD_REGISTER_MEM and others, see Broadwell PRM Vol2a) require the
- * addresses to be in a canonical form:
- * "GraphicsAddress[63:48] are ignored by the HW and assumed to be in correct
- * canonical form [63:48] == [47]."
- */
-#define GEN8_HIGH_ADDRESS_BIT 47
-static inline u64 gen8_canonical_addr(u64 address)
-{
- return sign_extend64(address, GEN8_HIGH_ADDRESS_BIT);
-}
-
-static inline u64 gen8_noncanonical_addr(u64 address)
-{
- return address & GENMASK_ULL(GEN8_HIGH_ADDRESS_BIT, 0);
-}
-
static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
{
- return intel_engine_needs_cmd_parser(eb->engine) && eb->batch_len;
+ return intel_engine_requires_cmd_parser(eb->engine) ||
+ (intel_engine_using_cmd_parser(eb->engine) &&
+ eb->args->batch_len);
}
static int eb_create(struct i915_execbuffer *eb)
@@ -696,7 +683,9 @@ static int eb_reserve(struct i915_execbuffer *eb)
case 1:
/* Too fragmented, unbind everything and retry */
- err = i915_gem_evict_vm(eb->vm);
+ mutex_lock(&eb->context->vm->mutex);
+ err = i915_gem_evict_vm(eb->context->vm);
+ mutex_unlock(&eb->context->vm->mutex);
if (err)
return err;
break;
@@ -724,12 +713,8 @@ static int eb_select_context(struct i915_execbuffer *eb)
return -ENOENT;
eb->gem_context = ctx;
- if (ctx->vm) {
- eb->vm = ctx->vm;
+ if (rcu_access_pointer(ctx->vm))
eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
- } else {
- eb->vm = &eb->i915->ggtt.vm;
- }
eb->context_flags = 0;
if (test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags))
@@ -738,63 +723,6 @@ static int eb_select_context(struct i915_execbuffer *eb)
return 0;
}
-static struct i915_request *__eb_wait_for_ring(struct intel_ring *ring)
-{
- struct i915_request *rq;
-
- /*
- * Completely unscientific finger-in-the-air estimates for suitable
- * maximum user request size (to avoid blocking) and then backoff.
- */
- if (intel_ring_update_space(ring) >= PAGE_SIZE)
- return NULL;
-
- /*
- * Find a request that after waiting upon, there will be at least half
- * the ring available. The hysteresis allows us to compete for the
- * shared ring and should mean that we sleep less often prior to
- * claiming our resources, but not so long that the ring completely
- * drains before we can submit our next request.
- */
- list_for_each_entry(rq, &ring->request_list, ring_link) {
- if (__intel_ring_space(rq->postfix,
- ring->emit, ring->size) > ring->size / 2)
- break;
- }
- if (&rq->ring_link == &ring->request_list)
- return NULL; /* weird, we will check again later for real */
-
- return i915_request_get(rq);
-}
-
-static int eb_wait_for_ring(const struct i915_execbuffer *eb)
-{
- struct i915_request *rq;
- int ret = 0;
-
- /*
- * Apply a light amount of backpressure to prevent excessive hogs
- * from blocking waiting for space whilst holding struct_mutex and
- * keeping all of their resources pinned.
- */
-
- rq = __eb_wait_for_ring(eb->context->ring);
- if (rq) {
- mutex_unlock(&eb->i915->drm.struct_mutex);
-
- if (i915_request_wait(rq,
- I915_WAIT_INTERRUPTIBLE,
- MAX_SCHEDULE_TIMEOUT) < 0)
- ret = -EINTR;
-
- i915_request_put(rq);
-
- mutex_lock(&eb->i915->drm.struct_mutex);
- }
-
- return ret;
-}
-
static int eb_lookup_vmas(struct i915_execbuffer *eb)
{
struct radix_tree_root *handles_vma = &eb->gem_context->handles_vma;
@@ -802,9 +730,6 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
unsigned int i, batch;
int err;
- if (unlikely(i915_gem_context_is_banned(eb->gem_context)))
- return -EIO;
-
INIT_LIST_HEAD(&eb->relocs);
INIT_LIST_HEAD(&eb->unbound);
@@ -831,7 +756,7 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
goto err_vma;
}
- vma = i915_vma_instance(obj, eb->vm, NULL);
+ vma = i915_vma_instance(obj, eb->context->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_obj;
@@ -962,7 +887,7 @@ static void reloc_cache_init(struct reloc_cache *cache,
cache->use_64bit_reloc = HAS_64BIT_RELOC(i915);
cache->has_fence = cache->gen < 4;
cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment;
- cache->node.allocated = false;
+ cache->node.flags = 0;
cache->rq = NULL;
cache->rq_size = 0;
}
@@ -994,7 +919,7 @@ static void reloc_gpu_flush(struct reloc_cache *cache)
__i915_gem_object_flush_map(cache->rq->batch->obj, 0, cache->rq_size);
i915_gem_object_unpin_map(cache->rq->batch->obj);
- i915_gem_chipset_flush(cache->rq->i915);
+ intel_gt_chipset_flush(cache->rq->engine->gt);
i915_request_add(cache->rq);
cache->rq = NULL;
@@ -1018,15 +943,18 @@ static void reloc_cache_reset(struct reloc_cache *cache)
kunmap_atomic(vaddr);
i915_gem_object_finish_access((struct drm_i915_gem_object *)cache->node.mm);
} else {
- wmb();
+ struct i915_ggtt *ggtt = cache_to_ggtt(cache);
+
+ intel_gt_flush_ggtt_writes(ggtt->vm.gt);
io_mapping_unmap_atomic((void __iomem *)vaddr);
- if (cache->node.allocated) {
- struct i915_ggtt *ggtt = cache_to_ggtt(cache);
+ if (drm_mm_node_allocated(&cache->node)) {
ggtt->vm.clear_range(&ggtt->vm,
cache->node.start,
cache->node.size);
+ mutex_lock(&ggtt->vm.mutex);
drm_mm_remove_node(&cache->node);
+ mutex_unlock(&ggtt->vm.mutex);
} else {
i915_vma_unpin((struct i915_vma *)cache->node.mm);
}
@@ -1077,11 +1005,15 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
void *vaddr;
if (cache->vaddr) {
+ intel_gt_flush_ggtt_writes(ggtt->vm.gt);
io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
} else {
struct i915_vma *vma;
int err;
+ if (i915_gem_object_is_tiled(obj))
+ return ERR_PTR(-EINVAL);
+
if (use_cpu_reloc(cache, obj))
return NULL;
@@ -1093,32 +1025,27 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
PIN_MAPPABLE |
- PIN_NONBLOCK |
- PIN_NONFAULT);
+ PIN_NONBLOCK /* NOWARN */ |
+ PIN_NOEVICT);
if (IS_ERR(vma)) {
memset(&cache->node, 0, sizeof(cache->node));
+ mutex_lock(&ggtt->vm.mutex);
err = drm_mm_insert_node_in_range
(&ggtt->vm.mm, &cache->node,
PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
0, ggtt->mappable_end,
DRM_MM_INSERT_LOW);
+ mutex_unlock(&ggtt->vm.mutex);
if (err) /* no inactive aperture space, use cpu reloc */
return NULL;
} else {
- err = i915_vma_put_fence(vma);
- if (err) {
- i915_vma_unpin(vma);
- return ERR_PTR(err);
- }
-
cache->node.start = vma->node.start;
cache->node.mm = (void *)vma;
}
}
offset = cache->node.start;
- if (cache->node.allocated) {
- wmb();
+ if (drm_mm_node_allocated(&cache->node)) {
ggtt->vm.insert_page(&ggtt->vm,
i915_gem_object_get_dma_address(obj, page),
offset, I915_CACHE_NONE, 0);
@@ -1201,25 +1128,26 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
unsigned int len)
{
struct reloc_cache *cache = &eb->reloc_cache;
- struct drm_i915_gem_object *obj;
+ struct intel_engine_pool_node *pool;
struct i915_request *rq;
struct i915_vma *batch;
u32 *cmd;
int err;
- obj = i915_gem_batch_pool_get(&eb->engine->batch_pool, PAGE_SIZE);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
+ pool = intel_engine_get_pool(eb->engine, PAGE_SIZE);
+ if (IS_ERR(pool))
+ return PTR_ERR(pool);
- cmd = i915_gem_object_pin_map(obj,
+ cmd = i915_gem_object_pin_map(pool->obj,
cache->has_llc ?
I915_MAP_FORCE_WB :
I915_MAP_FORCE_WC);
- i915_gem_object_unpin_pages(obj);
- if (IS_ERR(cmd))
- return PTR_ERR(cmd);
+ if (IS_ERR(cmd)) {
+ err = PTR_ERR(cmd);
+ goto out_pool;
+ }
- batch = i915_vma_instance(obj, vma->vm, NULL);
+ batch = i915_vma_instance(pool->obj, vma->vm, NULL);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
goto err_unmap;
@@ -1235,6 +1163,10 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
goto err_unpin;
}
+ err = intel_engine_pool_mark_active(pool, rq);
+ if (err)
+ goto err_request;
+
err = reloc_move_to_gpu(rq, vma);
if (err)
goto err_request;
@@ -1246,8 +1178,9 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
goto skip_request;
i915_vma_lock(batch);
- GEM_BUG_ON(!reservation_object_test_signaled_rcu(batch->resv, true));
- err = i915_vma_move_to_active(batch, rq, 0);
+ err = i915_request_await_object(rq, batch->obj, false);
+ if (err == 0)
+ err = i915_vma_move_to_active(batch, rq, 0);
i915_vma_unlock(batch);
if (err)
goto skip_request;
@@ -1260,7 +1193,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
cache->rq_size = 0;
/* Return with batch mapping (cmd) still pinned */
- return 0;
+ goto out_pool;
skip_request:
i915_request_skip(rq, err);
@@ -1269,7 +1202,9 @@ err_request:
err_unpin:
i915_vma_unpin(batch);
err_unmap:
- i915_gem_object_unpin_map(obj);
+ i915_gem_object_unpin_map(pool->obj);
+out_pool:
+ intel_engine_pool_put(pool);
return err;
}
@@ -1286,10 +1221,6 @@ static u32 *reloc_gpu(struct i915_execbuffer *eb,
if (unlikely(!cache->rq)) {
int err;
- /* If we need to copy for the cmdparser, we will stall anyway */
- if (eb_use_cmdparser(eb))
- return ERR_PTR(-EWOULDBLOCK);
-
if (!intel_engine_can_store_dword(eb->engine))
return ERR_PTR(-ENODEV);
@@ -1317,7 +1248,7 @@ relocate_entry(struct i915_vma *vma,
if (!eb->reloc_cache.vaddr &&
(DBG_FORCE_RELOC == FORCE_GPU_RELOC ||
- !reservation_object_test_signaled_rcu(vma->resv, true))) {
+ !dma_resv_test_signaled_rcu(vma->resv, true))) {
const unsigned int gen = eb->reloc_cache.gen;
unsigned int len;
u32 *batch;
@@ -1442,7 +1373,7 @@ eb_relocate_entry(struct i915_execbuffer *eb,
if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
IS_GEN(eb->i915, 6)) {
err = i915_vma_bind(target, target->obj->cache_level,
- PIN_GLOBAL);
+ PIN_GLOBAL, NULL);
if (WARN_ONCE(err,
"Unexpected failure to bind target VMA!"))
return err;
@@ -1952,7 +1883,7 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
eb->exec = NULL;
/* Unconditionally flush any chipset caches (for streaming writes). */
- i915_gem_chipset_flush(eb->i915);
+ intel_gt_chipset_flush(eb->engine->gt);
return 0;
err_skip:
@@ -1960,15 +1891,15 @@ err_skip:
return err;
}
-static bool i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
+static int i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
{
if (exec->flags & __I915_EXEC_ILLEGAL_FLAGS)
- return false;
+ return -EINVAL;
/* Kernel clipping was a DRI1 misfeature */
if (!(exec->flags & I915_EXEC_FENCE_ARRAY)) {
if (exec->num_cliprects || exec->cliprects_ptr)
- return false;
+ return -EINVAL;
}
if (exec->DR4 == 0xffffffff) {
@@ -1976,12 +1907,12 @@ static bool i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
exec->DR4 = 0;
}
if (exec->DR1 || exec->DR4)
- return false;
+ return -EINVAL;
if ((exec->batch_start_offset | exec->batch_len) & 0x7)
- return false;
+ return -EINVAL;
- return true;
+ return 0;
}
static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
@@ -2009,51 +1940,227 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
return 0;
}
-static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master)
+static struct i915_vma *
+shadow_batch_pin(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ unsigned int flags)
{
- struct drm_i915_gem_object *shadow_batch_obj;
struct i915_vma *vma;
int err;
- shadow_batch_obj = i915_gem_batch_pool_get(&eb->engine->batch_pool,
- PAGE_ALIGN(eb->batch_len));
- if (IS_ERR(shadow_batch_obj))
- return ERR_CAST(shadow_batch_obj);
-
- err = intel_engine_cmd_parser(eb->engine,
- eb->batch->obj,
- shadow_batch_obj,
- eb->batch_start_offset,
- eb->batch_len,
- is_master);
- if (err) {
- if (err == -EACCES) /* unhandled chained batch */
- vma = NULL;
- else
- vma = ERR_PTR(err);
- goto out;
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma))
+ return vma;
+
+ err = i915_vma_pin(vma, 0, 0, flags);
+ if (err)
+ return ERR_PTR(err);
+
+ return vma;
+}
+
+struct eb_parse_work {
+ struct dma_fence_work base;
+ struct intel_engine_cs *engine;
+ struct i915_vma *batch;
+ struct i915_vma *shadow;
+ struct i915_vma *trampoline;
+ unsigned int batch_offset;
+ unsigned int batch_length;
+};
+
+static int __eb_parse(struct dma_fence_work *work)
+{
+ struct eb_parse_work *pw = container_of(work, typeof(*pw), base);
+
+ return intel_engine_cmd_parser(pw->engine,
+ pw->batch,
+ pw->batch_offset,
+ pw->batch_length,
+ pw->shadow,
+ pw->trampoline);
+}
+
+static void __eb_parse_release(struct dma_fence_work *work)
+{
+ struct eb_parse_work *pw = container_of(work, typeof(*pw), base);
+
+ if (pw->trampoline)
+ i915_active_release(&pw->trampoline->active);
+ i915_active_release(&pw->shadow->active);
+ i915_active_release(&pw->batch->active);
+}
+
+static const struct dma_fence_work_ops eb_parse_ops = {
+ .name = "eb_parse",
+ .work = __eb_parse,
+ .release = __eb_parse_release,
+};
+
+static int eb_parse_pipeline(struct i915_execbuffer *eb,
+ struct i915_vma *shadow,
+ struct i915_vma *trampoline)
+{
+ struct eb_parse_work *pw;
+ int err;
+
+ pw = kzalloc(sizeof(*pw), GFP_KERNEL);
+ if (!pw)
+ return -ENOMEM;
+
+ err = i915_active_acquire(&eb->batch->active);
+ if (err)
+ goto err_free;
+
+ err = i915_active_acquire(&shadow->active);
+ if (err)
+ goto err_batch;
+
+ if (trampoline) {
+ err = i915_active_acquire(&trampoline->active);
+ if (err)
+ goto err_shadow;
}
- vma = i915_gem_object_ggtt_pin(shadow_batch_obj, NULL, 0, 0, 0);
- if (IS_ERR(vma))
- goto out;
+ dma_fence_work_init(&pw->base, &eb_parse_ops);
+
+ pw->engine = eb->engine;
+ pw->batch = eb->batch;
+ pw->batch_offset = eb->batch_start_offset;
+ pw->batch_length = eb->batch_len;
+ pw->shadow = shadow;
+ pw->trampoline = trampoline;
+
+ err = dma_resv_lock_interruptible(pw->batch->resv, NULL);
+ if (err)
+ goto err_trampoline;
+
+ err = dma_resv_reserve_shared(pw->batch->resv, 1);
+ if (err)
+ goto err_batch_unlock;
+
+ /* Wait for all writes (and relocs) into the batch to complete */
+ err = i915_sw_fence_await_reservation(&pw->base.chain,
+ pw->batch->resv, NULL, false,
+ 0, I915_FENCE_GFP);
+ if (err < 0)
+ goto err_batch_unlock;
+
+ /* Keep the batch alive and unwritten as we parse */
+ dma_resv_add_shared_fence(pw->batch->resv, &pw->base.dma);
+
+ dma_resv_unlock(pw->batch->resv);
+
+ /* Force execution to wait for completion of the parser */
+ dma_resv_lock(shadow->resv, NULL);
+ dma_resv_add_excl_fence(shadow->resv, &pw->base.dma);
+ dma_resv_unlock(shadow->resv);
+
+ dma_fence_work_commit(&pw->base);
+ return 0;
+
+err_batch_unlock:
+ dma_resv_unlock(pw->batch->resv);
+err_trampoline:
+ if (trampoline)
+ i915_active_release(&trampoline->active);
+err_shadow:
+ i915_active_release(&shadow->active);
+err_batch:
+ i915_active_release(&eb->batch->active);
+err_free:
+ kfree(pw);
+ return err;
+}
+
+static int eb_parse(struct i915_execbuffer *eb)
+{
+ struct intel_engine_pool_node *pool;
+ struct i915_vma *shadow, *trampoline;
+ unsigned int len;
+ int err;
+
+ if (!eb_use_cmdparser(eb))
+ return 0;
+
+ len = eb->batch_len;
+ if (!CMDPARSER_USES_GGTT(eb->i915)) {
+ /*
+ * ppGTT backed shadow buffers must be mapped RO, to prevent
+ * post-scan tampering
+ */
+ if (!eb->context->vm->has_read_only) {
+ DRM_DEBUG("Cannot prevent post-scan tampering without RO capable vm\n");
+ return -EINVAL;
+ }
+ } else {
+ len += I915_CMD_PARSER_TRAMPOLINE_SIZE;
+ }
+
+ pool = intel_engine_get_pool(eb->engine, len);
+ if (IS_ERR(pool))
+ return PTR_ERR(pool);
+
+ shadow = shadow_batch_pin(pool->obj, eb->context->vm, PIN_USER);
+ if (IS_ERR(shadow)) {
+ err = PTR_ERR(shadow);
+ goto err;
+ }
+ i915_gem_object_set_readonly(shadow->obj);
+
+ trampoline = NULL;
+ if (CMDPARSER_USES_GGTT(eb->i915)) {
+ trampoline = shadow;
+
+ shadow = shadow_batch_pin(pool->obj,
+ &eb->engine->gt->ggtt->vm,
+ PIN_GLOBAL);
+ if (IS_ERR(shadow)) {
+ err = PTR_ERR(shadow);
+ shadow = trampoline;
+ goto err_shadow;
+ }
+
+ eb->batch_flags |= I915_DISPATCH_SECURE;
+ }
+
+ err = eb_parse_pipeline(eb, shadow, trampoline);
+ if (err)
+ goto err_trampoline;
- eb->vma[eb->buffer_count] = i915_vma_get(vma);
+ eb->vma[eb->buffer_count] = i915_vma_get(shadow);
eb->flags[eb->buffer_count] =
__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_REF;
- vma->exec_flags = &eb->flags[eb->buffer_count];
+ shadow->exec_flags = &eb->flags[eb->buffer_count];
eb->buffer_count++;
-out:
- i915_gem_object_unpin_pages(shadow_batch_obj);
- return vma;
+ eb->trampoline = trampoline;
+ eb->batch_start_offset = 0;
+ eb->batch = shadow;
+
+ shadow->private = pool;
+ return 0;
+
+err_trampoline:
+ if (trampoline)
+ i915_vma_unpin(trampoline);
+err_shadow:
+ i915_vma_unpin(shadow);
+err:
+ intel_engine_pool_put(pool);
+ return err;
}
static void
add_to_client(struct i915_request *rq, struct drm_file *file)
{
- rq->file_priv = file->driver_priv;
- list_add_tail(&rq->client_link, &rq->file_priv->mm.request_list);
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+
+ rq->file_priv = file_priv;
+
+ spin_lock(&file_priv->mm.lock);
+ list_add_tail(&rq->client_link, &file_priv->mm.request_list);
+ spin_unlock(&file_priv->mm.lock);
}
static int eb_submit(struct i915_execbuffer *eb)
@@ -2090,9 +2197,28 @@ static int eb_submit(struct i915_execbuffer *eb)
if (err)
return err;
+ if (eb->trampoline) {
+ GEM_BUG_ON(eb->batch_start_offset);
+ err = eb->engine->emit_bb_start(eb->request,
+ eb->trampoline->node.start +
+ eb->batch_len,
+ 0, 0);
+ if (err)
+ return err;
+ }
+
+ if (intel_context_nopreempt(eb->context))
+ __set_bit(I915_FENCE_FLAG_NOPREEMPT, &eb->request->fence.flags);
+
return 0;
}
+static int num_vcs_engines(const struct drm_i915_private *i915)
+{
+ return hweight64(INTEL_INFO(i915)->engine_mask &
+ GENMASK_ULL(VCS0 + I915_MAX_VCS - 1, VCS0));
+}
+
/*
* Find one BSD ring to dispatch the corresponding BSD command.
* The engine index is returned.
@@ -2105,8 +2231,8 @@ gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
/* Check whether the file_priv has already selected one ring. */
if ((int)file_priv->bsd_engine < 0)
- file_priv->bsd_engine = atomic_fetch_xor(1,
- &dev_priv->mm.bsd_engine_dispatch_index);
+ file_priv->bsd_engine =
+ get_random_int() % num_vcs_engines(dev_priv);
return file_priv->bsd_engine;
}
@@ -2119,18 +2245,57 @@ static const enum intel_engine_id user_ring_map[] = {
[I915_EXEC_VEBOX] = VECS0
};
-static int eb_pin_context(struct i915_execbuffer *eb, struct intel_context *ce)
+static struct i915_request *eb_throttle(struct intel_context *ce)
+{
+ struct intel_ring *ring = ce->ring;
+ struct intel_timeline *tl = ce->timeline;
+ struct i915_request *rq;
+
+ /*
+ * Completely unscientific finger-in-the-air estimates for suitable
+ * maximum user request size (to avoid blocking) and then backoff.
+ */
+ if (intel_ring_update_space(ring) >= PAGE_SIZE)
+ return NULL;
+
+ /*
+ * Find a request that after waiting upon, there will be at least half
+ * the ring available. The hysteresis allows us to compete for the
+ * shared ring and should mean that we sleep less often prior to
+ * claiming our resources, but not so long that the ring completely
+ * drains before we can submit our next request.
+ */
+ list_for_each_entry(rq, &tl->requests, link) {
+ if (rq->ring != ring)
+ continue;
+
+ if (__intel_ring_space(rq->postfix,
+ ring->emit, ring->size) > ring->size / 2)
+ break;
+ }
+ if (&rq->link == &tl->requests)
+ return NULL; /* weird, we will check again later for real */
+
+ return i915_request_get(rq);
+}
+
+static int __eb_pin_engine(struct i915_execbuffer *eb, struct intel_context *ce)
{
+ struct intel_timeline *tl;
+ struct i915_request *rq;
int err;
/*
* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
* EIO if the GPU is already wedged.
*/
- err = i915_terminally_wedged(eb->i915);
+ err = intel_gt_terminally_wedged(ce->engine->gt);
if (err)
return err;
+ if (unlikely(intel_context_is_banned(ce)))
+ return -EIO;
+
/*
* Pinning the contexts may generate requests in order to acquire
* GGTT space, so do this first before we reserve a seqno for
@@ -2140,14 +2305,60 @@ static int eb_pin_context(struct i915_execbuffer *eb, struct intel_context *ce)
if (err)
return err;
+ /*
+ * Take a local wakeref for preparing to dispatch the execbuf as
+ * we expect to access the hardware fairly frequently in the
+ * process, and require the engine to be kept awake between accesses.
+ * Upon dispatch, we acquire another prolonged wakeref that we hold
+ * until the timeline is idle, which in turn releases the wakeref
+ * taken on the engine, and the parent device.
+ */
+ tl = intel_context_timeline_lock(ce);
+ if (IS_ERR(tl)) {
+ err = PTR_ERR(tl);
+ goto err_unpin;
+ }
+
+ intel_context_enter(ce);
+ rq = eb_throttle(ce);
+
+ intel_context_timeline_unlock(tl);
+
+ if (rq) {
+ if (i915_request_wait(rq,
+ I915_WAIT_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT) < 0) {
+ i915_request_put(rq);
+ err = -EINTR;
+ goto err_exit;
+ }
+
+ i915_request_put(rq);
+ }
+
eb->engine = ce->engine;
eb->context = ce;
return 0;
+
+err_exit:
+ mutex_lock(&tl->mutex);
+ intel_context_exit(ce);
+ intel_context_timeline_unlock(tl);
+err_unpin:
+ intel_context_unpin(ce);
+ return err;
}
-static void eb_unpin_context(struct i915_execbuffer *eb)
+static void eb_unpin_engine(struct i915_execbuffer *eb)
{
- intel_context_unpin(eb->context);
+ struct intel_context *ce = eb->context;
+ struct intel_timeline *tl = ce->timeline;
+
+ mutex_lock(&tl->mutex);
+ intel_context_exit(ce);
+ mutex_unlock(&tl->mutex);
+
+ intel_context_unpin(ce);
}
static unsigned int
@@ -2165,7 +2376,7 @@ eb_select_legacy_ring(struct i915_execbuffer *eb,
return -1;
}
- if (user_ring_id == I915_EXEC_BSD && HAS_ENGINE(i915, VCS1)) {
+ if (user_ring_id == I915_EXEC_BSD && num_vcs_engines(i915) > 1) {
unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;
if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
@@ -2192,9 +2403,9 @@ eb_select_legacy_ring(struct i915_execbuffer *eb,
}
static int
-eb_select_engine(struct i915_execbuffer *eb,
- struct drm_file *file,
- struct drm_i915_gem_execbuffer2 *args)
+eb_pin_engine(struct i915_execbuffer *eb,
+ struct drm_file *file,
+ struct drm_i915_gem_execbuffer2 *args)
{
struct intel_context *ce;
unsigned int idx;
@@ -2209,7 +2420,7 @@ eb_select_engine(struct i915_execbuffer *eb,
if (IS_ERR(ce))
return PTR_ERR(ce);
- err = eb_pin_context(eb, ce);
+ err = __eb_pin_engine(eb, ce);
intel_context_put(ce);
return err;
@@ -2351,6 +2562,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
struct drm_i915_gem_exec_object2 *exec,
struct drm_syncobj **fences)
{
+ struct drm_i915_private *i915 = to_i915(dev);
struct i915_execbuffer eb;
struct dma_fence *in_fence = NULL;
struct dma_fence *exec_fence = NULL;
@@ -2362,7 +2574,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS &
~__EXEC_OBJECT_UNKNOWN_FLAGS);
- eb.i915 = to_i915(dev);
+ eb.i915 = i915;
eb.file = file;
eb.args = args;
if (DBG_FORCE_RELOC || !(args->flags & I915_EXEC_NO_RELOC))
@@ -2379,11 +2591,19 @@ i915_gem_do_execbuffer(struct drm_device *dev,
eb.buffer_count = args->buffer_count;
eb.batch_start_offset = args->batch_start_offset;
eb.batch_len = args->batch_len;
+ eb.trampoline = NULL;
eb.batch_flags = 0;
if (args->flags & I915_EXEC_SECURE) {
+ if (INTEL_GEN(i915) >= 11)
+ return -ENODEV;
+
+ /* Return -EPERM to trigger fallback code on old binaries. */
+ if (!HAS_SECURE_BATCHES(i915))
+ return -EPERM;
+
if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN))
- return -EPERM;
+ return -EPERM;
eb.batch_flags |= I915_DISPATCH_SECURE;
}
@@ -2427,25 +2647,12 @@ i915_gem_do_execbuffer(struct drm_device *dev,
if (unlikely(err))
goto err_destroy;
- /*
- * Take a local wakeref for preparing to dispatch the execbuf as
- * we expect to access the hardware fairly frequently in the
- * process. Upon first dispatch, we acquire another prolonged
- * wakeref that we hold until the GPU has been idle for at least
- * 100ms.
- */
- intel_gt_pm_get(eb.i915);
+ err = eb_pin_engine(&eb, file, args);
+ if (unlikely(err))
+ goto err_context;
err = i915_mutex_lock_interruptible(dev);
if (err)
- goto err_rpm;
-
- err = eb_select_engine(&eb, file, args);
- if (unlikely(err))
- goto err_unlock;
-
- err = eb_wait_for_ring(&eb); /* may temporarily drop struct_mutex */
- if (unlikely(err))
goto err_engine;
err = eb_relocate(&eb);
@@ -2473,34 +2680,13 @@ i915_gem_do_execbuffer(struct drm_device *dev,
goto err_vma;
}
- if (eb_use_cmdparser(&eb)) {
- struct i915_vma *vma;
-
- vma = eb_parse(&eb, drm_is_current_master(file));
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto err_vma;
- }
-
- if (vma) {
- /*
- * Batch parsed and accepted:
- *
- * Set the DISPATCH_SECURE bit to remove the NON_SECURE
- * bit from MI_BATCH_BUFFER_START commands issued in
- * the dispatch_execbuffer implementations. We
- * specifically don't want that set on batches the
- * command parser has accepted.
- */
- eb.batch_flags |= I915_DISPATCH_SECURE;
- eb.batch_start_offset = 0;
- eb.batch = vma;
- }
- }
-
if (eb.batch_len == 0)
eb.batch_len = eb.batch->size - eb.batch_start_offset;
+ err = eb_parse(&eb);
+ if (err)
+ goto err_vma;
+
/*
* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
* batch" bit. Hence we need to pin secure batches into the global gtt.
@@ -2572,11 +2758,14 @@ i915_gem_do_execbuffer(struct drm_device *dev,
* to explicitly hold another reference here.
*/
eb.request->batch = eb.batch;
+ if (eb.batch->private)
+ intel_engine_pool_mark_active(eb.batch->private, eb.request);
trace_i915_request_queue(eb.request, eb.batch_flags);
err = eb_submit(&eb);
err_request:
add_to_client(eb.request, file);
+ i915_request_get(eb.request);
i915_request_add(eb.request);
if (fences)
@@ -2592,19 +2781,22 @@ err_request:
fput(out_fence->file);
}
}
+ i915_request_put(eb.request);
err_batch_unpin:
if (eb.batch_flags & I915_DISPATCH_SECURE)
i915_vma_unpin(eb.batch);
+ if (eb.batch->private)
+ intel_engine_pool_put(eb.batch->private);
err_vma:
if (eb.exec)
eb_release_vmas(&eb);
-err_engine:
- eb_unpin_context(&eb);
-err_unlock:
+ if (eb.trampoline)
+ i915_vma_unpin(eb.trampoline);
mutex_unlock(&dev->struct_mutex);
-err_rpm:
- intel_gt_pm_put(eb.i915);
+err_engine:
+ eb_unpin_engine(&eb);
+err_context:
i915_gem_context_put(eb.gem_context);
err_destroy:
eb_destroy(&eb);
@@ -2670,8 +2862,9 @@ i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data,
exec2.flags = I915_EXEC_RENDER;
i915_execbuffer2_set_context_id(exec2, 0);
- if (!i915_gem_check_execbuffer(&exec2))
- return -EINVAL;
+ err = i915_gem_check_execbuffer(&exec2);
+ if (err)
+ return err;
/* Copy in the exec list from userland */
exec_list = kvmalloc_array(count, sizeof(*exec_list),
@@ -2748,8 +2941,9 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
- if (!i915_gem_check_execbuffer(args))
- return -EINVAL;
+ err = i915_gem_check_execbuffer(args);
+ if (err)
+ return err;
/* Allocate an extra slot for use by the command parser */
exec2_list = kvmalloc_array(count + 1, eb_element_size(),
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_fence.c b/drivers/gpu/drm/i915/gem/i915_gem_fence.c
index cf0439e6be83..2f6100ec2608 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_fence.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_fence.c
@@ -69,8 +69,7 @@ i915_gem_object_lock_fence(struct drm_i915_gem_object *obj)
i915_sw_fence_init(&stub->chain, stub_notify);
dma_fence_init(&stub->dma, &stub_fence_ops, &stub->chain.wait.lock,
- to_i915(obj->base.dev)->mm.unordered_timeline,
- 0);
+ 0, 0);
if (i915_sw_fence_await_reservation(&stub->chain,
obj->base.resv, NULL,
@@ -78,7 +77,7 @@ i915_gem_object_lock_fence(struct drm_i915_gem_object *obj)
I915_FENCE_GFP) < 0)
goto err;
- reservation_object_add_excl_fence(obj->base.resv, &stub->dma);
+ dma_resv_add_excl_fence(obj->base.resv, &stub->dma);
return &stub->dma;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
index 0c41e04ab8fa..9cfb0e41ff06 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
@@ -117,13 +117,6 @@ create_st:
goto err;
}
- /* Mark the pages as dontneed whilst they are still pinned. As soon
- * as they are unpinned they are allowed to be reaped by the shrinker,
- * and the caller is expected to repopulate - the contents of this
- * object are only valid whilst active and pinned.
- */
- obj->mm.madv = I915_MADV_DONTNEED;
-
__i915_gem_object_set_pages(obj, st, sg_page_sizes);
return 0;
@@ -143,7 +136,6 @@ static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj,
internal_free_pages(pages);
obj->mm.dirty = false;
- obj->mm.madv = I915_MADV_WILLNEED;
}
static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = {
@@ -172,6 +164,7 @@ struct drm_i915_gem_object *
i915_gem_object_create_internal(struct drm_i915_private *i915,
phys_addr_t size)
{
+ static struct lock_class_key lock_class;
struct drm_i915_gem_object *obj;
unsigned int cache_level;
@@ -186,7 +179,16 @@ i915_gem_object_create_internal(struct drm_i915_private *i915,
return ERR_PTR(-ENOMEM);
drm_gem_private_object_init(&i915->drm, &obj->base, size);
- i915_gem_object_init(obj, &i915_gem_object_internal_ops);
+ i915_gem_object_init(obj, &i915_gem_object_internal_ops, &lock_class);
+
+ /*
+ * Mark the object as volatile, such that the pages are marked as
+ * dontneed whilst they are still pinned. As soon as they are unpinned
+ * they are allowed to be reaped by the shrinker, and the caller is
+ * expected to repopulate - the contents of this object are only valid
+ * whilst active and pinned.
+ */
+ i915_gem_object_set_volatile(obj);
obj->read_domains = I915_GEM_DOMAIN_CPU;
obj->write_domain = I915_GEM_DOMAIN_CPU;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h b/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h
index ddc7f2a52b3e..87d8b27f426d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h
@@ -28,8 +28,8 @@ int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
-int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file);
+int i915_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
new file mode 100644
index 000000000000..70543c83df06
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "intel_memory_region.h"
+#include "gem/i915_gem_region.h"
+#include "gem/i915_gem_lmem.h"
+#include "i915_drv.h"
+
+const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops = {
+ .flags = I915_GEM_OBJECT_HAS_IOMEM,
+
+ .get_pages = i915_gem_object_get_pages_buddy,
+ .put_pages = i915_gem_object_put_pages_buddy,
+ .release = i915_gem_object_release_memory_region,
+};
+
+bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
+{
+ return obj->ops == &i915_gem_lmem_obj_ops;
+}
+
+struct drm_i915_gem_object *
+i915_gem_object_create_lmem(struct drm_i915_private *i915,
+ resource_size_t size,
+ unsigned int flags)
+{
+ return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_LMEM],
+ size, flags);
+}
+
+struct drm_i915_gem_object *
+__i915_gem_lmem_object_create(struct intel_memory_region *mem,
+ resource_size_t size,
+ unsigned int flags)
+{
+ static struct lock_class_key lock_class;
+ struct drm_i915_private *i915 = mem->i915;
+ struct drm_i915_gem_object *obj;
+
+ obj = i915_gem_object_alloc();
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+
+ drm_gem_private_object_init(&i915->drm, &obj->base, size);
+ i915_gem_object_init(obj, &i915_gem_lmem_obj_ops, &lock_class);
+
+ obj->read_domains = I915_GEM_DOMAIN_WC | I915_GEM_DOMAIN_GTT;
+
+ i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
+
+ i915_gem_object_init_memory_region(obj, mem, flags);
+
+ return obj;
+}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
new file mode 100644
index 000000000000..fc3f15580fe3
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_GEM_LMEM_H
+#define __I915_GEM_LMEM_H
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+struct drm_i915_gem_object;
+struct intel_memory_region;
+
+extern const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops;
+
+bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj);
+
+struct drm_i915_gem_object *
+i915_gem_object_create_lmem(struct drm_i915_private *i915,
+ resource_size_t size,
+ unsigned int flags);
+
+struct drm_i915_gem_object *
+__i915_gem_lmem_object_create(struct intel_memory_region *mem,
+ resource_size_t size,
+ unsigned int flags);
+
+#endif /* !__I915_GEM_LMEM_H */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index 39a661927d8e..0b6a442108de 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -4,15 +4,22 @@
* Copyright © 2014-2016 Intel Corporation
*/
+#include <linux/anon_inodes.h>
#include <linux/mman.h>
+#include <linux/pfn_t.h>
#include <linux/sizes.h>
+#include "gt/intel_gt.h"
+#include "gt/intel_gt_requests.h"
+
#include "i915_drv.h"
#include "i915_gem_gtt.h"
#include "i915_gem_ioctls.h"
#include "i915_gem_object.h"
+#include "i915_gem_mman.h"
+#include "i915_trace.h"
+#include "i915_user_extensions.h"
#include "i915_vma.h"
-#include "intel_drv.h"
static inline bool
__vma_matches(struct vm_area_struct *vma, struct file *filp,
@@ -99,9 +106,6 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
up_write(&mm->mmap_sem);
if (IS_ERR_VALUE(addr))
goto err;
-
- /* This may race, but that's ok, it only gets set */
- WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU);
}
i915_gem_object_put(obj);
@@ -144,6 +148,9 @@ static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
* 3 - Remove implicit set-domain(GTT) and synchronisation on initial
* pagefault; swapin remains transparent.
*
+ * 4 - Support multiple fault handlers per object depending on object's
+ * backing storage (a.k.a. MMAP_OFFSET).
+ *
* Restrictions:
*
* * snoopable objects cannot be accessed via the GTT. It can cause machine
@@ -171,7 +178,7 @@ static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
*/
int i915_gem_mmap_gtt_version(void)
{
- return 3;
+ return 4;
}
static inline struct i915_ggtt_view
@@ -197,29 +204,80 @@ compute_partial_view(const struct drm_i915_gem_object *obj,
return view;
}
-/**
- * i915_gem_fault - fault a page into the GTT
- * @vmf: fault info
- *
- * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
- * from userspace. The fault handler takes care of binding the object to
- * the GTT (if needed), allocating and programming a fence register (again,
- * only if needed based on whether the old reg is still valid or the object
- * is tiled) and inserting a new PTE into the faulting process.
- *
- * Note that the faulting process may involve evicting existing objects
- * from the GTT and/or fence registers to make room. So performance may
- * suffer if the GTT working set is large or there are few fence registers
- * left.
- *
- * The current feature set supported by i915_gem_fault() and thus GTT mmaps
- * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version).
- */
-vm_fault_t i915_gem_fault(struct vm_fault *vmf)
+static vm_fault_t i915_error_to_vmf_fault(int err)
+{
+ switch (err) {
+ default:
+ WARN_ONCE(err, "unhandled error in %s: %i\n", __func__, err);
+ /* fallthrough */
+ case -EIO: /* shmemfs failure from swap device */
+ case -EFAULT: /* purged object */
+ case -ENODEV: /* bad object, how did you get here! */
+ case -ENXIO: /* unable to access backing store (on device) */
+ return VM_FAULT_SIGBUS;
+
+ case -ENOSPC: /* shmemfs allocation failure */
+ case -ENOMEM: /* our allocation failure */
+ return VM_FAULT_OOM;
+
+ case 0:
+ case -EAGAIN:
+ case -ERESTARTSYS:
+ case -EINTR:
+ case -EBUSY:
+ /*
+ * EBUSY is ok: this just means that another thread
+ * already did the job.
+ */
+ return VM_FAULT_NOPAGE;
+ }
+}
+
+static vm_fault_t vm_fault_cpu(struct vm_fault *vmf)
+{
+ struct vm_area_struct *area = vmf->vma;
+ struct i915_mmap_offset *mmo = area->vm_private_data;
+ struct drm_i915_gem_object *obj = mmo->obj;
+ resource_size_t iomap;
+ int err;
+
+ /* Sanity check that we allow writing into this object */
+ if (unlikely(i915_gem_object_is_readonly(obj) &&
+ area->vm_flags & VM_WRITE))
+ return VM_FAULT_SIGBUS;
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ goto out;
+
+ iomap = -1;
+ if (!i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_STRUCT_PAGE)) {
+ iomap = obj->mm.region->iomap.base;
+ iomap -= obj->mm.region->region.start;
+ }
+
+ /* PTEs are revoked in obj->ops->put_pages() */
+ err = remap_io_sg(area,
+ area->vm_start, area->vm_end - area->vm_start,
+ obj->mm.pages->sgl, iomap);
+
+ if (area->vm_flags & VM_WRITE) {
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+ obj->mm.dirty = true;
+ }
+
+ i915_gem_object_unpin_pages(obj);
+
+out:
+ return i915_error_to_vmf_fault(err);
+}
+
+static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
{
#define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT)
struct vm_area_struct *area = vmf->vma;
- struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
+ struct i915_mmap_offset *mmo = area->vm_private_data;
+ struct drm_i915_gem_object *obj = mmo->obj;
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *i915 = to_i915(dev);
struct intel_runtime_pm *rpm = &i915->runtime_pm;
@@ -246,34 +304,22 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
wakeref = intel_runtime_pm_get(rpm);
- srcu = i915_reset_trylock(i915);
- if (srcu < 0) {
- ret = srcu;
- goto err_rpm;
- }
-
- ret = i915_mutex_lock_interruptible(dev);
+ ret = intel_gt_reset_trylock(ggtt->vm.gt, &srcu);
if (ret)
- goto err_reset;
-
- /* Access to snoopable pages through the GTT is incoherent. */
- if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(i915)) {
- ret = -EFAULT;
- goto err_unlock;
- }
+ goto err_rpm;
/* Now pin it into the GTT as needed */
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
PIN_MAPPABLE |
- PIN_NONBLOCK |
- PIN_NONFAULT);
+ PIN_NONBLOCK /* NOWARN */ |
+ PIN_NOEVICT);
if (IS_ERR(vma)) {
/* Use a partial view if it is bigger than available space */
struct i915_ggtt_view view =
compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
unsigned int flags;
- flags = PIN_MAPPABLE;
+ flags = PIN_MAPPABLE | PIN_NOSEARCH;
if (view.type == I915_GGTT_VIEW_NORMAL)
flags |= PIN_NONBLOCK; /* avoid warnings for pinned */
@@ -281,18 +327,26 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
* Userspace is now writing through an untracked VMA, abandon
* all hope that the hardware is able to track future writes.
*/
- obj->frontbuffer_ggtt_origin = ORIGIN_CPU;
vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
- if (IS_ERR(vma) && !view.type) {
+ if (IS_ERR(vma)) {
flags = PIN_MAPPABLE;
view.type = I915_GGTT_VIEW_PARTIAL;
vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
}
+
+ /* The entire mappable GGTT is pinned? Unexpected! */
+ GEM_BUG_ON(vma == ERR_PTR(-ENOSPC));
}
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
- goto err_unlock;
+ goto err_reset;
+ }
+
+ /* Access to snoopable pages through the GTT is incoherent. */
+ if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(i915)) {
+ ret = -EFAULT;
+ goto err_unpin;
}
ret = i915_vma_pin_fence(vma);
@@ -308,101 +362,67 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
if (ret)
goto err_fence;
- /* Mark as being mmapped into userspace for later revocation */
assert_rpm_wakelock_held(rpm);
+
+ /* Mark as being mmapped into userspace for later revocation */
+ mutex_lock(&i915->ggtt.vm.mutex);
if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
list_add(&obj->userfault_link, &i915->ggtt.userfault_list);
- if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
+ mutex_unlock(&i915->ggtt.vm.mutex);
+
+ /* Track the mmo associated with the fenced vma */
+ vma->mmo = mmo;
+
+ if (IS_ACTIVE(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND))
intel_wakeref_auto(&i915->ggtt.userfault_wakeref,
msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
- GEM_BUG_ON(!obj->userfault_count);
- i915_vma_set_ggtt_write(vma);
+ if (write) {
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+ i915_vma_set_ggtt_write(vma);
+ obj->mm.dirty = true;
+ }
err_fence:
i915_vma_unpin_fence(vma);
err_unpin:
__i915_vma_unpin(vma);
-err_unlock:
- mutex_unlock(&dev->struct_mutex);
err_reset:
- i915_reset_unlock(i915, srcu);
+ intel_gt_reset_unlock(ggtt->vm.gt, srcu);
err_rpm:
intel_runtime_pm_put(rpm, wakeref);
i915_gem_object_unpin_pages(obj);
err:
- switch (ret) {
- case -EIO:
- /*
- * We eat errors when the gpu is terminally wedged to avoid
- * userspace unduly crashing (gl has no provisions for mmaps to
- * fail). But any other -EIO isn't ours (e.g. swap in failure)
- * and so needs to be reported.
- */
- if (!i915_terminally_wedged(i915))
- return VM_FAULT_SIGBUS;
- /* else, fall through */
- case -EAGAIN:
- /*
- * EAGAIN means the gpu is hung and we'll wait for the error
- * handler to reset everything when re-faulting in
- * i915_mutex_lock_interruptible.
- */
- case 0:
- case -ERESTARTSYS:
- case -EINTR:
- case -EBUSY:
- /*
- * EBUSY is ok: this just means that another thread
- * already did the job.
- */
- return VM_FAULT_NOPAGE;
- case -ENOMEM:
- return VM_FAULT_OOM;
- case -ENOSPC:
- case -EFAULT:
- return VM_FAULT_SIGBUS;
- default:
- WARN_ONCE(ret, "unhandled error in %s: %i\n", __func__, ret);
- return VM_FAULT_SIGBUS;
- }
+ return i915_error_to_vmf_fault(ret);
}
-void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
+void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
{
struct i915_vma *vma;
GEM_BUG_ON(!obj->userfault_count);
- obj->userfault_count = 0;
- list_del(&obj->userfault_link);
- drm_vma_node_unmap(&obj->base.vma_node,
- obj->base.dev->anon_inode->i_mapping);
-
for_each_ggtt_vma(vma, obj)
- i915_vma_unset_userfault(vma);
+ i915_vma_revoke_mmap(vma);
+
+ GEM_BUG_ON(obj->userfault_count);
}
-/**
- * i915_gem_object_release_mmap - remove physical page mappings
- * @obj: obj in question
- *
- * Preserve the reservation of the mmapping with the DRM core code, but
- * relinquish ownership of the pages back to the system.
- *
+/*
* It is vital that we remove the page mapping if we have mapped a tiled
* object through the GTT and then lose the fence register due to
* resource pressure. Similarly if the object has been moved out of the
* aperture, than pages mapped into userspace must be revoked. Removing the
* mapping will then trigger a page fault on the next user access, allowing
- * fixup by i915_gem_fault().
+ * fixup by vm_fault_gtt().
*/
-void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
+static void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
intel_wakeref_t wakeref;
- /* Serialisation between user GTT access and our code depends upon
+ /*
+ * Serialisation between user GTT access and our code depends upon
* revoking the CPU's PTE whilst the mutex is held. The next user
* pagefault then has to wait until we release the mutex.
*
@@ -410,15 +430,16 @@ void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
* requirement that operations to the GGTT be made holding the RPM
* wakeref.
*/
- lockdep_assert_held(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ mutex_lock(&i915->ggtt.vm.mutex);
if (!obj->userfault_count)
goto out;
- __i915_gem_object_release_mmap(obj);
+ __i915_gem_object_release_mmap_gtt(obj);
- /* Ensure that the CPU's PTE are revoked and there are not outstanding
+ /*
+ * Ensure that the CPU's PTE are revoked and there are not outstanding
* memory transactions from userspace before we return. The TLB
* flushing implied above by changing the PTE above *should* be
* sufficient, an extra barrier here just provides us with a bit
@@ -428,59 +449,217 @@ void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
wmb();
out:
+ mutex_unlock(&i915->ggtt.vm.mutex);
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
-static int create_mmap_offset(struct drm_i915_gem_object *obj)
+void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
+{
+ struct i915_mmap_offset *mmo, *mn;
+
+ spin_lock(&obj->mmo.lock);
+ rbtree_postorder_for_each_entry_safe(mmo, mn,
+ &obj->mmo.offsets, offset) {
+ /*
+ * vma_node_unmap for GTT mmaps handled already in
+ * __i915_gem_object_release_mmap_gtt
+ */
+ if (mmo->mmap_type == I915_MMAP_TYPE_GTT)
+ continue;
+
+ spin_unlock(&obj->mmo.lock);
+ drm_vma_node_unmap(&mmo->vma_node,
+ obj->base.dev->anon_inode->i_mapping);
+ spin_lock(&obj->mmo.lock);
+ }
+ spin_unlock(&obj->mmo.lock);
+}
+
+/**
+ * i915_gem_object_release_mmap - remove physical page mappings
+ * @obj: obj in question
+ *
+ * Preserve the reservation of the mmapping with the DRM core code, but
+ * relinquish ownership of the pages back to the system.
+ */
+void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
+{
+ i915_gem_object_release_mmap_gtt(obj);
+ i915_gem_object_release_mmap_offset(obj);
+}
+
+static struct i915_mmap_offset *
+lookup_mmo(struct drm_i915_gem_object *obj,
+ enum i915_mmap_type mmap_type)
+{
+ struct rb_node *rb;
+
+ spin_lock(&obj->mmo.lock);
+ rb = obj->mmo.offsets.rb_node;
+ while (rb) {
+ struct i915_mmap_offset *mmo =
+ rb_entry(rb, typeof(*mmo), offset);
+
+ if (mmo->mmap_type == mmap_type) {
+ spin_unlock(&obj->mmo.lock);
+ return mmo;
+ }
+
+ if (mmo->mmap_type < mmap_type)
+ rb = rb->rb_right;
+ else
+ rb = rb->rb_left;
+ }
+ spin_unlock(&obj->mmo.lock);
+
+ return NULL;
+}
+
+static struct i915_mmap_offset *
+insert_mmo(struct drm_i915_gem_object *obj, struct i915_mmap_offset *mmo)
+{
+ struct rb_node *rb, **p;
+
+ spin_lock(&obj->mmo.lock);
+ rb = NULL;
+ p = &obj->mmo.offsets.rb_node;
+ while (*p) {
+ struct i915_mmap_offset *pos;
+
+ rb = *p;
+ pos = rb_entry(rb, typeof(*pos), offset);
+
+ if (pos->mmap_type == mmo->mmap_type) {
+ spin_unlock(&obj->mmo.lock);
+ drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
+ &mmo->vma_node);
+ kfree(mmo);
+ return pos;
+ }
+
+ if (pos->mmap_type < mmo->mmap_type)
+ p = &rb->rb_right;
+ else
+ p = &rb->rb_left;
+ }
+ rb_link_node(&mmo->offset, rb, p);
+ rb_insert_color(&mmo->offset, &obj->mmo.offsets);
+ spin_unlock(&obj->mmo.lock);
+
+ return mmo;
+}
+
+static struct i915_mmap_offset *
+mmap_offset_attach(struct drm_i915_gem_object *obj,
+ enum i915_mmap_type mmap_type,
+ struct drm_file *file)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct i915_mmap_offset *mmo;
int err;
- err = drm_gem_create_mmap_offset(&obj->base);
+ mmo = lookup_mmo(obj, mmap_type);
+ if (mmo)
+ goto out;
+
+ mmo = kmalloc(sizeof(*mmo), GFP_KERNEL);
+ if (!mmo)
+ return ERR_PTR(-ENOMEM);
+
+ mmo->obj = obj;
+ mmo->mmap_type = mmap_type;
+ drm_vma_node_reset(&mmo->vma_node);
+
+ err = drm_vma_offset_add(obj->base.dev->vma_offset_manager,
+ &mmo->vma_node, obj->base.size / PAGE_SIZE);
if (likely(!err))
- return 0;
+ goto insert;
/* Attempt to reap some mmap space from dead objects */
- do {
- err = i915_gem_wait_for_idle(i915,
- I915_WAIT_INTERRUPTIBLE,
- MAX_SCHEDULE_TIMEOUT);
- if (err)
- break;
+ err = intel_gt_retire_requests_timeout(&i915->gt, MAX_SCHEDULE_TIMEOUT);
+ if (err)
+ goto err;
- i915_gem_drain_freed_objects(i915);
- err = drm_gem_create_mmap_offset(&obj->base);
- if (!err)
- break;
+ i915_gem_drain_freed_objects(i915);
+ err = drm_vma_offset_add(obj->base.dev->vma_offset_manager,
+ &mmo->vma_node, obj->base.size / PAGE_SIZE);
+ if (err)
+ goto err;
- } while (flush_delayed_work(&i915->gem.retire_work));
+insert:
+ mmo = insert_mmo(obj, mmo);
+ GEM_BUG_ON(lookup_mmo(obj, mmap_type) != mmo);
+out:
+ if (file)
+ drm_vma_node_allow(&mmo->vma_node, file);
+ return mmo;
- return err;
+err:
+ kfree(mmo);
+ return ERR_PTR(err);
}
-int
-i915_gem_mmap_gtt(struct drm_file *file,
- struct drm_device *dev,
- u32 handle,
- u64 *offset)
+static int
+__assign_mmap_offset(struct drm_file *file,
+ u32 handle,
+ enum i915_mmap_type mmap_type,
+ u64 *offset)
{
struct drm_i915_gem_object *obj;
- int ret;
+ struct i915_mmap_offset *mmo;
+ int err;
obj = i915_gem_object_lookup(file, handle);
if (!obj)
return -ENOENT;
- ret = create_mmap_offset(obj);
- if (ret == 0)
- *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
+ if (mmap_type == I915_MMAP_TYPE_GTT &&
+ i915_gem_object_never_bind_ggtt(obj)) {
+ err = -ENODEV;
+ goto out;
+ }
+
+ if (mmap_type != I915_MMAP_TYPE_GTT &&
+ !i915_gem_object_type_has(obj,
+ I915_GEM_OBJECT_HAS_STRUCT_PAGE |
+ I915_GEM_OBJECT_HAS_IOMEM)) {
+ err = -ENODEV;
+ goto out;
+ }
+
+ mmo = mmap_offset_attach(obj, mmap_type, file);
+ if (IS_ERR(mmo)) {
+ err = PTR_ERR(mmo);
+ goto out;
+ }
+ *offset = drm_vma_node_offset_addr(&mmo->vma_node);
+ err = 0;
+out:
i915_gem_object_put(obj);
- return ret;
+ return err;
+}
+
+int
+i915_gem_dumb_mmap_offset(struct drm_file *file,
+ struct drm_device *dev,
+ u32 handle,
+ u64 *offset)
+{
+ enum i915_mmap_type mmap_type;
+
+ if (boot_cpu_has(X86_FEATURE_PAT))
+ mmap_type = I915_MMAP_TYPE_WC;
+ else if (!i915_ggtt_has_aperture(&to_i915(dev)->ggtt))
+ return -ENODEV;
+ else
+ mmap_type = I915_MMAP_TYPE_GTT;
+
+ return __assign_mmap_offset(file, handle, mmap_type, offset);
}
/**
- * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
+ * i915_gem_mmap_offset_ioctl - prepare an object for GTT mmap'ing
* @dev: DRM device
* @data: GTT mapping ioctl data
* @file: GEM object info
@@ -495,12 +674,220 @@ i915_gem_mmap_gtt(struct drm_file *file,
* userspace.
*/
int
-i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
+i915_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_private *i915 = to_i915(dev);
+ struct drm_i915_gem_mmap_offset *args = data;
+ enum i915_mmap_type type;
+ int err;
+
+ /*
+ * Historically we failed to check args.pad and args.offset
+ * and so we cannot use those fields for user input and we cannot
+ * add -EINVAL for them as the ABI is fixed, i.e. old userspace
+ * may be feeding in garbage in those fields.
+ *
+ * if (args->pad) return -EINVAL; is verbotten!
+ */
+
+ err = i915_user_extensions(u64_to_user_ptr(args->extensions),
+ NULL, 0, NULL);
+ if (err)
+ return err;
+
+ switch (args->flags) {
+ case I915_MMAP_OFFSET_GTT:
+ if (!i915_ggtt_has_aperture(&i915->ggtt))
+ return -ENODEV;
+ type = I915_MMAP_TYPE_GTT;
+ break;
+
+ case I915_MMAP_OFFSET_WC:
+ if (!boot_cpu_has(X86_FEATURE_PAT))
+ return -ENODEV;
+ type = I915_MMAP_TYPE_WC;
+ break;
+
+ case I915_MMAP_OFFSET_WB:
+ type = I915_MMAP_TYPE_WB;
+ break;
+
+ case I915_MMAP_OFFSET_UC:
+ if (!boot_cpu_has(X86_FEATURE_PAT))
+ return -ENODEV;
+ type = I915_MMAP_TYPE_UC;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return __assign_mmap_offset(file, args->handle, type, &args->offset);
+}
+
+static void vm_open(struct vm_area_struct *vma)
+{
+ struct i915_mmap_offset *mmo = vma->vm_private_data;
+ struct drm_i915_gem_object *obj = mmo->obj;
+
+ GEM_BUG_ON(!obj);
+ i915_gem_object_get(obj);
+}
+
+static void vm_close(struct vm_area_struct *vma)
+{
+ struct i915_mmap_offset *mmo = vma->vm_private_data;
+ struct drm_i915_gem_object *obj = mmo->obj;
+
+ GEM_BUG_ON(!obj);
+ i915_gem_object_put(obj);
+}
+
+static const struct vm_operations_struct vm_ops_gtt = {
+ .fault = vm_fault_gtt,
+ .open = vm_open,
+ .close = vm_close,
+};
+
+static const struct vm_operations_struct vm_ops_cpu = {
+ .fault = vm_fault_cpu,
+ .open = vm_open,
+ .close = vm_close,
+};
+
+static int singleton_release(struct inode *inode, struct file *file)
+{
+ struct drm_i915_private *i915 = file->private_data;
+
+ cmpxchg(&i915->gem.mmap_singleton, file, NULL);
+ drm_dev_put(&i915->drm);
+
+ return 0;
+}
+
+static const struct file_operations singleton_fops = {
+ .owner = THIS_MODULE,
+ .release = singleton_release,
+};
+
+static struct file *mmap_singleton(struct drm_i915_private *i915)
+{
+ struct file *file;
+
+ rcu_read_lock();
+ file = i915->gem.mmap_singleton;
+ if (file && !get_file_rcu(file))
+ file = NULL;
+ rcu_read_unlock();
+ if (file)
+ return file;
+
+ file = anon_inode_getfile("i915.gem", &singleton_fops, i915, O_RDWR);
+ if (IS_ERR(file))
+ return file;
+
+ /* Everyone shares a single global address space */
+ file->f_mapping = i915->drm.anon_inode->i_mapping;
+
+ smp_store_mb(i915->gem.mmap_singleton, file);
+ drm_dev_get(&i915->drm);
+
+ return file;
+}
+
+/*
+ * This overcomes the limitation in drm_gem_mmap's assignment of a
+ * drm_gem_object as the vma->vm_private_data. Since we need to
+ * be able to resolve multiple mmap offsets which could be tied
+ * to a single gem object.
+ */
+int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
- struct drm_i915_gem_mmap_gtt *args = data;
+ struct drm_vma_offset_node *node;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->minor->dev;
+ struct drm_i915_gem_object *obj = NULL;
+ struct i915_mmap_offset *mmo = NULL;
+ struct file *anon;
+
+ if (drm_dev_is_unplugged(dev))
+ return -ENODEV;
+
+ rcu_read_lock();
+ drm_vma_offset_lock_lookup(dev->vma_offset_manager);
+ node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
+ vma->vm_pgoff,
+ vma_pages(vma));
+ if (node && drm_vma_node_is_allowed(node, priv)) {
+ /*
+ * Skip 0-refcnted objects as it is in the process of being
+ * destroyed and will be invalid when the vma manager lock
+ * is released.
+ */
+ mmo = container_of(node, struct i915_mmap_offset, vma_node);
+ obj = i915_gem_object_get_rcu(mmo->obj);
+ }
+ drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
+ rcu_read_unlock();
+ if (!obj)
+ return node ? -EACCES : -EINVAL;
+
+ if (i915_gem_object_is_readonly(obj)) {
+ if (vma->vm_flags & VM_WRITE) {
+ i915_gem_object_put(obj);
+ return -EINVAL;
+ }
+ vma->vm_flags &= ~VM_MAYWRITE;
+ }
- return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
+ anon = mmap_singleton(to_i915(dev));
+ if (IS_ERR(anon)) {
+ i915_gem_object_put(obj);
+ return PTR_ERR(anon);
+ }
+
+ vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_private_data = mmo;
+
+ /*
+ * We keep the ref on mmo->obj, not vm_file, but we require
+ * vma->vm_file->f_mapping, see vma_link(), for later revocation.
+ * Our userspace is accustomed to having per-file resource cleanup
+ * (i.e. contexts, objects and requests) on their close(fd), which
+ * requires avoiding extraneous references to their filp, hence why
+ * we prefer to use an anonymous file for their mmaps.
+ */
+ fput(vma->vm_file);
+ vma->vm_file = anon;
+
+ switch (mmo->mmap_type) {
+ case I915_MMAP_TYPE_WC:
+ vma->vm_page_prot =
+ pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+ vma->vm_ops = &vm_ops_cpu;
+ break;
+
+ case I915_MMAP_TYPE_WB:
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+ vma->vm_ops = &vm_ops_cpu;
+ break;
+
+ case I915_MMAP_TYPE_UC:
+ vma->vm_page_prot =
+ pgprot_noncached(vm_get_page_prot(vma->vm_flags));
+ vma->vm_ops = &vm_ops_cpu;
+ break;
+
+ case I915_MMAP_TYPE_GTT:
+ vma->vm_page_prot =
+ pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+ vma->vm_ops = &vm_ops_gtt;
+ break;
+ }
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
+ return 0;
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.h b/drivers/gpu/drm/i915/gem/i915_gem_mman.h
new file mode 100644
index 000000000000..862e01b7cb69
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.h
@@ -0,0 +1,31 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_GEM_MMAN_H__
+#define __I915_GEM_MMAN_H__
+
+#include <linux/mm_types.h>
+#include <linux/types.h>
+
+struct drm_device;
+struct drm_file;
+struct drm_i915_gem_object;
+struct file;
+struct i915_mmap_offset;
+struct mutex;
+
+int i915_gem_mmap_gtt_version(void);
+int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma);
+
+int i915_gem_dumb_mmap_offset(struct drm_file *file_priv,
+ struct drm_device *dev,
+ u32 handle, u64 *offset);
+
+void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj);
+void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj);
+void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index be6caccce0c5..35985218bd85 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -22,13 +22,17 @@
*
*/
-#include "display/intel_frontbuffer.h"
+#include <linux/sched/mm.h>
+#include "display/intel_frontbuffer.h"
+#include "gt/intel_gt.h"
#include "i915_drv.h"
#include "i915_gem_clflush.h"
#include "i915_gem_context.h"
+#include "i915_gem_mman.h"
#include "i915_gem_object.h"
#include "i915_globals.h"
+#include "i915_trace.h"
static struct i915_global_object {
struct i915_global base;
@@ -45,35 +49,26 @@ void i915_gem_object_free(struct drm_i915_gem_object *obj)
return kmem_cache_free(global.slab_objects, obj);
}
-static void
-frontbuffer_retire(struct i915_active_request *active,
- struct i915_request *request)
-{
- struct drm_i915_gem_object *obj =
- container_of(active, typeof(*obj), frontbuffer_write);
-
- intel_fb_obj_flush(obj, ORIGIN_CS);
-}
-
void i915_gem_object_init(struct drm_i915_gem_object *obj,
- const struct drm_i915_gem_object_ops *ops)
+ const struct drm_i915_gem_object_ops *ops,
+ struct lock_class_key *key)
{
- mutex_init(&obj->mm.lock);
+ __mutex_init(&obj->mm.lock, "obj->mm.lock", key);
spin_lock_init(&obj->vma.lock);
INIT_LIST_HEAD(&obj->vma.list);
+ INIT_LIST_HEAD(&obj->mm.link);
+
INIT_LIST_HEAD(&obj->lut_list);
- INIT_LIST_HEAD(&obj->batch_pool_link);
+
+ spin_lock_init(&obj->mmo.lock);
+ obj->mmo.offsets = RB_ROOT;
init_rcu_head(&obj->rcu);
obj->ops = ops;
- obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
- i915_active_request_init(&obj->frontbuffer_write,
- NULL, frontbuffer_retire);
-
obj->mm.madv = I915_MADV_WILLNEED;
INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
mutex_init(&obj->mm.get_page.lock);
@@ -105,6 +100,7 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem);
struct drm_i915_file_private *fpriv = file->driver_priv;
+ struct i915_mmap_offset *mmo, *mn;
struct i915_lut_handle *lut, *ln;
LIST_HEAD(close);
@@ -120,6 +116,11 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
}
i915_gem_object_unlock(obj);
+ spin_lock(&obj->mmo.lock);
+ rbtree_postorder_for_each_entry_safe(mmo, mn, &obj->mmo.offsets, offset)
+ drm_vma_node_revoke(&mmo->vma_node, file);
+ spin_unlock(&obj->mmo.lock);
+
list_for_each_entry_safe(lut, ln, &close, obj_link) {
struct i915_gem_context *ctx = lut->ctx;
struct i915_vma *vma;
@@ -146,6 +147,19 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
}
}
+static void __i915_gem_free_object_rcu(struct rcu_head *head)
+{
+ struct drm_i915_gem_object *obj =
+ container_of(head, typeof(*obj), rcu);
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+
+ dma_resv_fini(&obj->base._resv);
+ i915_gem_object_free(obj);
+
+ GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
+ atomic_dec(&i915->mm.free_count);
+}
+
static void __i915_gem_free_objects(struct drm_i915_private *i915,
struct llist_node *freed)
{
@@ -154,125 +168,106 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
llist_for_each_entry_safe(obj, on, freed, freed) {
- struct i915_vma *vma, *vn;
+ struct i915_mmap_offset *mmo, *mn;
trace_i915_gem_object_destroy(obj);
- mutex_lock(&i915->drm.struct_mutex);
-
- GEM_BUG_ON(i915_gem_object_is_active(obj));
- list_for_each_entry_safe(vma, vn, &obj->vma.list, obj_link) {
- GEM_BUG_ON(i915_vma_is_active(vma));
- vma->flags &= ~I915_VMA_PIN_MASK;
- i915_vma_destroy(vma);
+ if (!list_empty(&obj->vma.list)) {
+ struct i915_vma *vma;
+
+ /*
+ * Note that the vma keeps an object reference while
+ * it is active, so it *should* not sleep while we
+ * destroy it. Our debug code errs insits it *might*.
+ * For the moment, play along.
+ */
+ spin_lock(&obj->vma.lock);
+ while ((vma = list_first_entry_or_null(&obj->vma.list,
+ struct i915_vma,
+ obj_link))) {
+ GEM_BUG_ON(vma->obj != obj);
+ spin_unlock(&obj->vma.lock);
+
+ __i915_vma_put(vma);
+
+ spin_lock(&obj->vma.lock);
+ }
+ spin_unlock(&obj->vma.lock);
}
- GEM_BUG_ON(!list_empty(&obj->vma.list));
- GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma.tree));
- /*
- * This serializes freeing with the shrinker. Since the free
- * is delayed, first by RCU then by the workqueue, we want the
- * shrinker to be able to free pages of unreferenced objects,
- * or else we may oom whilst there are plenty of deferred
- * freed objects.
- */
- if (i915_gem_object_has_pages(obj) &&
- i915_gem_object_is_shrinkable(obj)) {
- unsigned long flags;
+ i915_gem_object_release_mmap(obj);
- spin_lock_irqsave(&i915->mm.obj_lock, flags);
- list_del_init(&obj->mm.link);
- spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
+ rbtree_postorder_for_each_entry_safe(mmo, mn,
+ &obj->mmo.offsets,
+ offset) {
+ drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
+ &mmo->vma_node);
+ kfree(mmo);
}
-
- mutex_unlock(&i915->drm.struct_mutex);
+ obj->mmo.offsets = RB_ROOT;
GEM_BUG_ON(atomic_read(&obj->bind_count));
GEM_BUG_ON(obj->userfault_count);
- GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
GEM_BUG_ON(!list_empty(&obj->lut_list));
- if (obj->ops->release)
- obj->ops->release(obj);
-
atomic_set(&obj->mm.pages_pin_count, 0);
- __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+ __i915_gem_object_put_pages(obj);
GEM_BUG_ON(i915_gem_object_has_pages(obj));
+ bitmap_free(obj->bit_17);
if (obj->base.import_attach)
drm_prime_gem_destroy(&obj->base, NULL);
- drm_gem_object_release(&obj->base);
-
- bitmap_free(obj->bit_17);
- i915_gem_object_free(obj);
+ drm_gem_free_mmap_offset(&obj->base);
- GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
- atomic_dec(&i915->mm.free_count);
+ if (obj->ops->release)
+ obj->ops->release(obj);
- cond_resched();
+ /* But keep the pointer alive for RCU-protected lookups */
+ call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
}
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
void i915_gem_flush_free_objects(struct drm_i915_private *i915)
{
- struct llist_node *freed;
-
- /* Free the oldest, most stale object to keep the free_list short */
- freed = NULL;
- if (!llist_empty(&i915->mm.free_list)) { /* quick test for hotpath */
- /* Only one consumer of llist_del_first() allowed */
- spin_lock(&i915->mm.free_lock);
- freed = llist_del_first(&i915->mm.free_list);
- spin_unlock(&i915->mm.free_lock);
- }
- if (unlikely(freed)) {
- freed->next = NULL;
+ struct llist_node *freed = llist_del_all(&i915->mm.free_list);
+
+ if (unlikely(freed))
__i915_gem_free_objects(i915, freed);
- }
}
static void __i915_gem_free_work(struct work_struct *work)
{
struct drm_i915_private *i915 =
container_of(work, struct drm_i915_private, mm.free_work);
- struct llist_node *freed;
-
- /*
- * All file-owned VMA should have been released by this point through
- * i915_gem_close_object(), or earlier by i915_gem_context_close().
- * However, the object may also be bound into the global GTT (e.g.
- * older GPUs without per-process support, or for direct access through
- * the GTT either for the user or for scanout). Those VMA still need to
- * unbound now.
- */
-
- spin_lock(&i915->mm.free_lock);
- while ((freed = llist_del_all(&i915->mm.free_list))) {
- spin_unlock(&i915->mm.free_lock);
- __i915_gem_free_objects(i915, freed);
- if (need_resched())
- return;
-
- spin_lock(&i915->mm.free_lock);
- }
- spin_unlock(&i915->mm.free_lock);
+ i915_gem_flush_free_objects(i915);
}
-static void __i915_gem_free_object_rcu(struct rcu_head *head)
+void i915_gem_free_object(struct drm_gem_object *gem_obj)
{
- struct drm_i915_gem_object *obj =
- container_of(head, typeof(*obj), rcu);
+ struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ GEM_BUG_ON(i915_gem_object_is_framebuffer(obj));
+
/*
- * We reuse obj->rcu for the freed list, so we had better not treat
- * it like a rcu_head from this point forwards. And we expect all
- * objects to be freed via this path.
+ * Before we free the object, make sure any pure RCU-only
+ * read-side critical sections are complete, e.g.
+ * i915_gem_busy_ioctl(). For the corresponding synchronized
+ * lookup see i915_gem_object_lookup_rcu().
*/
- destroy_rcu_head(&obj->rcu);
+ atomic_inc(&i915->mm.free_count);
+
+ /*
+ * This serializes freeing with the shrinker. Since the free
+ * is delayed, first by RCU then by the workqueue, we want the
+ * shrinker to be able to free pages of unreferenced objects,
+ * or else we may oom whilst there are plenty of deferred
+ * freed objects.
+ */
+ i915_gem_object_make_unshrinkable(obj);
/*
* Since we require blocking on struct_mutex to unbind the freed
@@ -288,27 +283,6 @@ static void __i915_gem_free_object_rcu(struct rcu_head *head)
queue_work(i915->wq, &i915->mm.free_work);
}
-void i915_gem_free_object(struct drm_gem_object *gem_obj)
-{
- struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
-
- /*
- * Before we free the object, make sure any pure RCU-only
- * read-side critical sections are complete, e.g.
- * i915_gem_busy_ioctl(). For the corresponding synchronized
- * lookup see i915_gem_object_lookup_rcu().
- */
- atomic_inc(&to_i915(obj->base.dev)->mm.free_count);
- call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
-}
-
-static inline enum fb_op_origin
-fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain)
-{
- return (domain == I915_GEM_DOMAIN_GTT ?
- obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
-}
-
static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
{
return !(obj->cache_level == I915_CACHE_NONE ||
@@ -319,7 +293,6 @@ void
i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
unsigned int flush_domains)
{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct i915_vma *vma;
assert_object_held(obj);
@@ -329,17 +302,14 @@ i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
switch (obj->write_domain) {
case I915_GEM_DOMAIN_GTT:
- i915_gem_flush_ggtt_writes(dev_priv);
-
- intel_fb_obj_flush(obj,
- fb_write_origin(obj, I915_GEM_DOMAIN_GTT));
-
+ spin_lock(&obj->vma.lock);
for_each_ggtt_vma(vma, obj) {
- if (vma->iomap)
- continue;
-
- i915_vma_unset_ggtt_write(vma);
+ if (i915_vma_unset_ggtt_write(vma))
+ intel_gt_flush_ggtt_writes(vma->vm->gt);
}
+ spin_unlock(&obj->vma.lock);
+
+ i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
break;
case I915_GEM_DOMAIN_WC:
@@ -359,6 +329,30 @@ i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
obj->write_domain = 0;
}
+void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
+ enum fb_op_origin origin)
+{
+ struct intel_frontbuffer *front;
+
+ front = __intel_frontbuffer_get(obj);
+ if (front) {
+ intel_frontbuffer_flush(front, origin);
+ intel_frontbuffer_put(front);
+ }
+}
+
+void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
+ enum fb_op_origin origin)
+{
+ struct intel_frontbuffer *front;
+
+ front = __intel_frontbuffer_get(obj);
+ if (front) {
+ intel_frontbuffer_invalidate(front, origin);
+ intel_frontbuffer_put(front);
+ }
+}
+
void i915_gem_init__objects(struct drm_i915_private *i915)
{
INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index dfebd5706f16..9c86f2dea947 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -13,9 +13,10 @@
#include <drm/i915_drm.h>
+#include "display/intel_frontbuffer.h"
#include "i915_gem_object_types.h"
-
#include "i915_gem_gtt.h"
+#include "i915_vma_types.h"
void i915_gem_init__objects(struct drm_i915_private *i915);
@@ -23,12 +24,14 @@ struct drm_i915_gem_object *i915_gem_object_alloc(void);
void i915_gem_object_free(struct drm_i915_gem_object *obj);
void i915_gem_object_init(struct drm_i915_gem_object *obj,
- const struct drm_i915_gem_object_ops *ops);
+ const struct drm_i915_gem_object_ops *ops,
+ struct lock_class_key *key);
struct drm_i915_gem_object *
-i915_gem_object_create_shmem(struct drm_i915_private *i915, u64 size);
+i915_gem_object_create_shmem(struct drm_i915_private *i915,
+ resource_size_t size);
struct drm_i915_gem_object *
i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915,
- const void *data, size_t size);
+ const void *data, resource_size_t size);
extern const struct drm_i915_gem_object_ops i915_gem_shmem_ops;
void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
@@ -67,21 +70,29 @@ i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle)
}
static inline struct drm_i915_gem_object *
+i915_gem_object_get_rcu(struct drm_i915_gem_object *obj)
+{
+ if (obj && !kref_get_unless_zero(&obj->base.refcount))
+ obj = NULL;
+
+ return obj;
+}
+
+static inline struct drm_i915_gem_object *
i915_gem_object_lookup(struct drm_file *file, u32 handle)
{
struct drm_i915_gem_object *obj;
rcu_read_lock();
obj = i915_gem_object_lookup_rcu(file, handle);
- if (obj && !kref_get_unless_zero(&obj->base.refcount))
- obj = NULL;
+ obj = i915_gem_object_get_rcu(obj);
rcu_read_unlock();
return obj;
}
__deprecated
-extern struct drm_gem_object *
+struct drm_gem_object *
drm_gem_object_lookup(struct drm_file *file, u32 handle);
__attribute__((nonnull))
@@ -99,22 +110,27 @@ i915_gem_object_put(struct drm_i915_gem_object *obj)
__drm_gem_object_put(&obj->base);
}
-#define assert_object_held(obj) reservation_object_assert_held((obj)->base.resv)
+#define assert_object_held(obj) dma_resv_assert_held((obj)->base.resv)
static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj)
{
- reservation_object_lock(obj->base.resv, NULL);
+ dma_resv_lock(obj->base.resv, NULL);
+}
+
+static inline bool i915_gem_object_trylock(struct drm_i915_gem_object *obj)
+{
+ return dma_resv_trylock(obj->base.resv);
}
static inline int
i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj)
{
- return reservation_object_lock_interruptible(obj->base.resv, NULL);
+ return dma_resv_lock_interruptible(obj->base.resv, NULL);
}
static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
{
- reservation_object_unlock(obj->base.resv);
+ dma_resv_unlock(obj->base.resv);
}
struct dma_fence *
@@ -125,49 +141,74 @@ void i915_gem_object_unlock_fence(struct drm_i915_gem_object *obj,
static inline void
i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
{
- obj->base.vma_node.readonly = true;
+ obj->flags |= I915_BO_READONLY;
}
static inline bool
i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
{
- return obj->base.vma_node.readonly;
+ return obj->flags & I915_BO_READONLY;
+}
+
+static inline bool
+i915_gem_object_is_contiguous(const struct drm_i915_gem_object *obj)
+{
+ return obj->flags & I915_BO_ALLOC_CONTIGUOUS;
+}
+
+static inline bool
+i915_gem_object_is_volatile(const struct drm_i915_gem_object *obj)
+{
+ return obj->flags & I915_BO_ALLOC_VOLATILE;
+}
+
+static inline void
+i915_gem_object_set_volatile(struct drm_i915_gem_object *obj)
+{
+ obj->flags |= I915_BO_ALLOC_VOLATILE;
+}
+
+static inline bool
+i915_gem_object_type_has(const struct drm_i915_gem_object *obj,
+ unsigned long flags)
+{
+ return obj->ops->flags & flags;
}
static inline bool
i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
{
- return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE;
+ return i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_STRUCT_PAGE);
}
static inline bool
i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
{
- return obj->ops->flags & I915_GEM_OBJECT_IS_SHRINKABLE;
+ return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE);
}
static inline bool
i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
{
- return obj->ops->flags & I915_GEM_OBJECT_IS_PROXY;
+ return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_PROXY);
}
static inline bool
-i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj)
+i915_gem_object_never_bind_ggtt(const struct drm_i915_gem_object *obj)
{
- return obj->ops->flags & I915_GEM_OBJECT_ASYNC_CANCEL;
+ return i915_gem_object_type_has(obj, I915_GEM_OBJECT_NO_GGTT);
}
static inline bool
-i915_gem_object_is_active(const struct drm_i915_gem_object *obj)
+i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj)
{
- return READ_ONCE(obj->active_count);
+ return i915_gem_object_type_has(obj, I915_GEM_OBJECT_ASYNC_CANCEL);
}
static inline bool
i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
{
- return READ_ONCE(obj->framebuffer_references);
+ return READ_ONCE(obj->frontbuffer);
}
static inline unsigned int
@@ -239,10 +280,27 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
+enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */
+ I915_MM_NORMAL = 0,
+ /*
+ * Only used by struct_mutex, when called "recursively" from
+ * direct-reclaim-esque. Safe because there is only every one
+ * struct_mutex in the entire system.
+ */
+ I915_MM_SHRINKER = 1,
+ /*
+ * Used for obj->mm.lock when allocating pages. Safe because the object
+ * isn't yet on any LRU, and therefore the shrinker can't deadlock on
+ * it. As soon as the object has pages, obj->mm.lock nests within
+ * fs_reclaim.
+ */
+ I915_MM_GET_PAGES = 1,
+};
+
static inline int __must_check
i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
{
- might_lock(&obj->mm.lock);
+ might_lock_nested(&obj->mm.lock, I915_MM_GET_PAGES);
if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
return 0;
@@ -285,13 +343,7 @@ i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
__i915_gem_object_unpin_pages(obj);
}
-enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */
- I915_MM_NORMAL = 0,
- I915_MM_SHRINKER /* called "recursively" from direct-reclaim-esque */
-};
-
-int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
- enum i915_mm_subclass subclass);
+int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
void i915_gem_object_writeback(struct drm_i915_gem_object *obj);
@@ -344,9 +396,6 @@ static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
i915_gem_object_unpin_pages(obj);
}
-void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj);
-void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj);
-
void
i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
unsigned int flush_domains);
@@ -373,7 +422,7 @@ i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
struct dma_fence *fence;
rcu_read_lock();
- fence = reservation_object_get_excl_rcu(obj->base.resv);
+ fence = dma_resv_get_excl_rcu(obj->base.resv);
rcu_read_unlock();
if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
@@ -400,6 +449,10 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
unsigned int flags);
void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma);
+void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj);
+void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj);
+void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj);
+
static inline bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
{
if (obj->cache_dirty)
@@ -408,7 +461,8 @@ static inline bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
return true;
- return obj->pin_global; /* currently in use by HW, keep flushed */
+ /* Currently in use by HW (display engine)? Keep flushed. */
+ return i915_gem_object_is_framebuffer(obj);
}
static inline void __start_cpu_write(struct drm_i915_gem_object *obj)
@@ -425,6 +479,26 @@ int i915_gem_object_wait(struct drm_i915_gem_object *obj,
int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
unsigned int flags,
const struct i915_sched_attr *attr);
-#define I915_PRIORITY_DISPLAY I915_USER_PRIORITY(I915_PRIORITY_MAX)
+
+void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
+ enum fb_op_origin origin);
+void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
+ enum fb_op_origin origin);
+
+static inline void
+i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
+ enum fb_op_origin origin)
+{
+ if (unlikely(rcu_access_pointer(obj->frontbuffer)))
+ __i915_gem_object_flush_frontbuffer(obj, origin);
+}
+
+static inline void
+i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
+ enum fb_op_origin origin)
+{
+ if (unlikely(rcu_access_pointer(obj->frontbuffer)))
+ __i915_gem_object_invalidate_frontbuffer(obj, origin);
+}
#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
index cb42e3a312e2..70809d8897cd 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
@@ -3,59 +3,137 @@
* Copyright © 2019 Intel Corporation
*/
-#include "i915_gem_object_blt.h"
-
+#include "i915_drv.h"
+#include "gt/intel_context.h"
+#include "gt/intel_engine_pm.h"
+#include "gt/intel_engine_pool.h"
+#include "gt/intel_gt.h"
+#include "gt/intel_ring.h"
#include "i915_gem_clflush.h"
-#include "intel_drv.h"
+#include "i915_gem_object_blt.h"
-int intel_emit_vma_fill_blt(struct i915_request *rq,
- struct i915_vma *vma,
- u32 value)
+struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce,
+ struct i915_vma *vma,
+ u32 value)
{
- u32 *cs;
-
- cs = intel_ring_begin(rq, 8);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- if (INTEL_GEN(rq->i915) >= 8) {
- *cs++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (7 - 2);
- *cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE;
- *cs++ = 0;
- *cs++ = vma->size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
- *cs++ = lower_32_bits(vma->node.start);
- *cs++ = upper_32_bits(vma->node.start);
- *cs++ = value;
- *cs++ = MI_NOOP;
- } else {
- *cs++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (6 - 2);
- *cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE;
- *cs++ = 0;
- *cs++ = vma->size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
- *cs++ = vma->node.start;
- *cs++ = value;
- *cs++ = MI_NOOP;
- *cs++ = MI_NOOP;
+ struct drm_i915_private *i915 = ce->vm->i915;
+ const u32 block_size = SZ_8M; /* ~1ms at 8GiB/s preemption delay */
+ struct intel_engine_pool_node *pool;
+ struct i915_vma *batch;
+ u64 offset;
+ u64 count;
+ u64 rem;
+ u32 size;
+ u32 *cmd;
+ int err;
+
+ GEM_BUG_ON(intel_engine_is_virtual(ce->engine));
+ intel_engine_pm_get(ce->engine);
+
+ count = div_u64(round_up(vma->size, block_size), block_size);
+ size = (1 + 8 * count) * sizeof(u32);
+ size = round_up(size, PAGE_SIZE);
+ pool = intel_engine_get_pool(ce->engine, size);
+ if (IS_ERR(pool)) {
+ err = PTR_ERR(pool);
+ goto out_pm;
+ }
+
+ cmd = i915_gem_object_pin_map(pool->obj, I915_MAP_WC);
+ if (IS_ERR(cmd)) {
+ err = PTR_ERR(cmd);
+ goto out_put;
+ }
+
+ rem = vma->size;
+ offset = vma->node.start;
+
+ do {
+ u32 size = min_t(u64, rem, block_size);
+
+ GEM_BUG_ON(size >> PAGE_SHIFT > S16_MAX);
+
+ if (INTEL_GEN(i915) >= 8) {
+ *cmd++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (7 - 2);
+ *cmd++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE;
+ *cmd++ = 0;
+ *cmd++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
+ *cmd++ = lower_32_bits(offset);
+ *cmd++ = upper_32_bits(offset);
+ *cmd++ = value;
+ } else {
+ *cmd++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (6 - 2);
+ *cmd++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE;
+ *cmd++ = 0;
+ *cmd++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
+ *cmd++ = offset;
+ *cmd++ = value;
+ }
+
+ /* Allow ourselves to be preempted in between blocks. */
+ *cmd++ = MI_ARB_CHECK;
+
+ offset += size;
+ rem -= size;
+ } while (rem);
+
+ *cmd = MI_BATCH_BUFFER_END;
+ intel_gt_chipset_flush(ce->vm->gt);
+
+ i915_gem_object_unpin_map(pool->obj);
+
+ batch = i915_vma_instance(pool->obj, ce->vm, NULL);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ goto out_put;
}
- intel_ring_advance(rq, cs);
+ err = i915_vma_pin(batch, 0, 0, PIN_USER);
+ if (unlikely(err))
+ goto out_put;
+
+ batch->private = pool;
+ return batch;
- return 0;
+out_put:
+ intel_engine_pool_put(pool);
+out_pm:
+ intel_engine_pm_put(ce->engine);
+ return ERR_PTR(err);
+}
+
+int intel_emit_vma_mark_active(struct i915_vma *vma, struct i915_request *rq)
+{
+ int err;
+
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj, false);
+ if (err == 0)
+ err = i915_vma_move_to_active(vma, rq, 0);
+ i915_vma_unlock(vma);
+ if (unlikely(err))
+ return err;
+
+ return intel_engine_pool_mark_active(vma->private, rq);
+}
+
+void intel_emit_vma_release(struct intel_context *ce, struct i915_vma *vma)
+{
+ i915_vma_unpin(vma);
+ intel_engine_pool_put(vma->private);
+ intel_engine_pm_put(ce->engine);
}
int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
struct intel_context *ce,
u32 value)
{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_gem_context *ctx = ce->gem_context;
- struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
struct i915_request *rq;
+ struct i915_vma *batch;
struct i915_vma *vma;
int err;
- /* XXX: ce->vm please */
- vma = i915_vma_instance(obj, vm, NULL);
+ vma = i915_vma_instance(obj, ce->vm, NULL);
if (IS_ERR(vma))
return PTR_ERR(vma);
@@ -69,12 +147,22 @@ int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
i915_gem_object_unlock(obj);
}
- rq = i915_request_create(ce);
+ batch = intel_emit_vma_fill_blt(ce, vma, value);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ goto out_unpin;
+ }
+
+ rq = intel_context_create_request(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- goto out_unpin;
+ goto out_batch;
}
+ err = intel_emit_vma_mark_active(batch, rq);
+ if (unlikely(err))
+ goto out_request;
+
err = i915_request_await_object(rq, obj, true);
if (unlikely(err))
goto out_request;
@@ -86,22 +174,229 @@ int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
}
i915_vma_lock(vma);
- err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ err = i915_request_await_object(rq, vma->obj, true);
+ if (err == 0)
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_vma_unlock(vma);
if (unlikely(err))
goto out_request;
- err = intel_emit_vma_fill_blt(rq, vma, value);
+ err = ce->engine->emit_bb_start(rq,
+ batch->node.start, batch->node.size,
+ 0);
out_request:
if (unlikely(err))
i915_request_skip(rq, err);
i915_request_add(rq);
+out_batch:
+ intel_emit_vma_release(ce, batch);
out_unpin:
i915_vma_unpin(vma);
return err;
}
+struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
+ struct i915_vma *src,
+ struct i915_vma *dst)
+{
+ struct drm_i915_private *i915 = ce->vm->i915;
+ const u32 block_size = SZ_8M; /* ~1ms at 8GiB/s preemption delay */
+ struct intel_engine_pool_node *pool;
+ struct i915_vma *batch;
+ u64 src_offset, dst_offset;
+ u64 count, rem;
+ u32 size, *cmd;
+ int err;
+
+ GEM_BUG_ON(src->size != dst->size);
+
+ GEM_BUG_ON(intel_engine_is_virtual(ce->engine));
+ intel_engine_pm_get(ce->engine);
+
+ count = div_u64(round_up(dst->size, block_size), block_size);
+ size = (1 + 11 * count) * sizeof(u32);
+ size = round_up(size, PAGE_SIZE);
+ pool = intel_engine_get_pool(ce->engine, size);
+ if (IS_ERR(pool)) {
+ err = PTR_ERR(pool);
+ goto out_pm;
+ }
+
+ cmd = i915_gem_object_pin_map(pool->obj, I915_MAP_WC);
+ if (IS_ERR(cmd)) {
+ err = PTR_ERR(cmd);
+ goto out_put;
+ }
+
+ rem = src->size;
+ src_offset = src->node.start;
+ dst_offset = dst->node.start;
+
+ do {
+ size = min_t(u64, rem, block_size);
+ GEM_BUG_ON(size >> PAGE_SHIFT > S16_MAX);
+
+ if (INTEL_GEN(i915) >= 9) {
+ *cmd++ = GEN9_XY_FAST_COPY_BLT_CMD | (10 - 2);
+ *cmd++ = BLT_DEPTH_32 | PAGE_SIZE;
+ *cmd++ = 0;
+ *cmd++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
+ *cmd++ = lower_32_bits(dst_offset);
+ *cmd++ = upper_32_bits(dst_offset);
+ *cmd++ = 0;
+ *cmd++ = PAGE_SIZE;
+ *cmd++ = lower_32_bits(src_offset);
+ *cmd++ = upper_32_bits(src_offset);
+ } else if (INTEL_GEN(i915) >= 8) {
+ *cmd++ = XY_SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (10 - 2);
+ *cmd++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | PAGE_SIZE;
+ *cmd++ = 0;
+ *cmd++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
+ *cmd++ = lower_32_bits(dst_offset);
+ *cmd++ = upper_32_bits(dst_offset);
+ *cmd++ = 0;
+ *cmd++ = PAGE_SIZE;
+ *cmd++ = lower_32_bits(src_offset);
+ *cmd++ = upper_32_bits(src_offset);
+ } else {
+ *cmd++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (6 - 2);
+ *cmd++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | PAGE_SIZE;
+ *cmd++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE;
+ *cmd++ = dst_offset;
+ *cmd++ = PAGE_SIZE;
+ *cmd++ = src_offset;
+ }
+
+ /* Allow ourselves to be preempted in between blocks. */
+ *cmd++ = MI_ARB_CHECK;
+
+ src_offset += size;
+ dst_offset += size;
+ rem -= size;
+ } while (rem);
+
+ *cmd = MI_BATCH_BUFFER_END;
+ intel_gt_chipset_flush(ce->vm->gt);
+
+ i915_gem_object_unpin_map(pool->obj);
+
+ batch = i915_vma_instance(pool->obj, ce->vm, NULL);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ goto out_put;
+ }
+
+ err = i915_vma_pin(batch, 0, 0, PIN_USER);
+ if (unlikely(err))
+ goto out_put;
+
+ batch->private = pool;
+ return batch;
+
+out_put:
+ intel_engine_pool_put(pool);
+out_pm:
+ intel_engine_pm_put(ce->engine);
+ return ERR_PTR(err);
+}
+
+static int move_to_gpu(struct i915_vma *vma, struct i915_request *rq, bool write)
+{
+ struct drm_i915_gem_object *obj = vma->obj;
+
+ if (obj->cache_dirty & ~obj->cache_coherent)
+ i915_gem_clflush_object(obj, 0);
+
+ return i915_request_await_object(rq, obj, write);
+}
+
+int i915_gem_object_copy_blt(struct drm_i915_gem_object *src,
+ struct drm_i915_gem_object *dst,
+ struct intel_context *ce)
+{
+ struct drm_gem_object *objs[] = { &src->base, &dst->base };
+ struct i915_address_space *vm = ce->vm;
+ struct i915_vma *vma[2], *batch;
+ struct ww_acquire_ctx acquire;
+ struct i915_request *rq;
+ int err, i;
+
+ vma[0] = i915_vma_instance(src, vm, NULL);
+ if (IS_ERR(vma[0]))
+ return PTR_ERR(vma[0]);
+
+ err = i915_vma_pin(vma[0], 0, 0, PIN_USER);
+ if (unlikely(err))
+ return err;
+
+ vma[1] = i915_vma_instance(dst, vm, NULL);
+ if (IS_ERR(vma[1]))
+ goto out_unpin_src;
+
+ err = i915_vma_pin(vma[1], 0, 0, PIN_USER);
+ if (unlikely(err))
+ goto out_unpin_src;
+
+ batch = intel_emit_vma_copy_blt(ce, vma[0], vma[1]);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ goto out_unpin_dst;
+ }
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_batch;
+ }
+
+ err = intel_emit_vma_mark_active(batch, rq);
+ if (unlikely(err))
+ goto out_request;
+
+ err = drm_gem_lock_reservations(objs, ARRAY_SIZE(objs), &acquire);
+ if (unlikely(err))
+ goto out_request;
+
+ for (i = 0; i < ARRAY_SIZE(vma); i++) {
+ err = move_to_gpu(vma[i], rq, i);
+ if (unlikely(err))
+ goto out_unlock;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(vma); i++) {
+ unsigned int flags = i ? EXEC_OBJECT_WRITE : 0;
+
+ err = i915_vma_move_to_active(vma[i], rq, flags);
+ if (unlikely(err))
+ goto out_unlock;
+ }
+
+ if (rq->engine->emit_init_breadcrumb) {
+ err = rq->engine->emit_init_breadcrumb(rq);
+ if (unlikely(err))
+ goto out_unlock;
+ }
+
+ err = rq->engine->emit_bb_start(rq,
+ batch->node.start, batch->node.size,
+ 0);
+out_unlock:
+ drm_gem_unlock_reservations(objs, ARRAY_SIZE(objs), &acquire);
+out_request:
+ if (unlikely(err))
+ i915_request_skip(rq, err);
+
+ i915_request_add(rq);
+out_batch:
+ intel_emit_vma_release(ce, batch);
+out_unpin_dst:
+ i915_vma_unpin(vma[1]);
+out_unpin_src:
+ i915_vma_unpin(vma[0]);
+ return err;
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/i915_gem_object_blt.c"
#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.h b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.h
index 7ec7de6ac0c0..243a43a87824 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.h
@@ -8,17 +8,30 @@
#include <linux/types.h>
+#include "gt/intel_context.h"
+#include "gt/intel_engine_pm.h"
+#include "gt/intel_engine_pool.h"
+#include "i915_vma.h"
+
struct drm_i915_gem_object;
-struct intel_context;
-struct i915_request;
-struct i915_vma;
-int intel_emit_vma_fill_blt(struct i915_request *rq,
- struct i915_vma *vma,
- u32 value);
+struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce,
+ struct i915_vma *vma,
+ u32 value);
+
+struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
+ struct i915_vma *src,
+ struct i915_vma *dst);
+
+int intel_emit_vma_mark_active(struct i915_vma *vma, struct i915_request *rq);
+void intel_emit_vma_release(struct intel_context *ce, struct i915_vma *vma);
int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
struct intel_context *ce,
u32 value);
+int i915_gem_object_copy_blt(struct drm_i915_gem_object *src,
+ struct drm_i915_gem_object *dst,
+ struct intel_context *ce);
+
#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 18bf4f8d6d80..f64ad77e6b1e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -8,11 +8,13 @@
#define __I915_GEM_OBJECT_TYPES_H__
#include <drm/drm_gem.h>
+#include <uapi/drm/i915_drm.h>
#include "i915_active.h"
#include "i915_selftest.h"
struct drm_i915_gem_object;
+struct intel_fronbuffer;
/*
* struct i915_lut_handle tracks the fast lookups from handle to vma used
@@ -29,9 +31,11 @@ struct i915_lut_handle {
struct drm_i915_gem_object_ops {
unsigned int flags;
#define I915_GEM_OBJECT_HAS_STRUCT_PAGE BIT(0)
-#define I915_GEM_OBJECT_IS_SHRINKABLE BIT(1)
-#define I915_GEM_OBJECT_IS_PROXY BIT(2)
-#define I915_GEM_OBJECT_ASYNC_CANCEL BIT(3)
+#define I915_GEM_OBJECT_HAS_IOMEM BIT(1)
+#define I915_GEM_OBJECT_IS_SHRINKABLE BIT(2)
+#define I915_GEM_OBJECT_IS_PROXY BIT(3)
+#define I915_GEM_OBJECT_NO_GGTT BIT(4)
+#define I915_GEM_OBJECT_ASYNC_CANCEL BIT(5)
/* Interface between the GEM object and its backing storage.
* get_pages() is called once prior to the use of the associated set
@@ -59,6 +63,21 @@ struct drm_i915_gem_object_ops {
void (*release)(struct drm_i915_gem_object *obj);
};
+enum i915_mmap_type {
+ I915_MMAP_TYPE_GTT = 0,
+ I915_MMAP_TYPE_WC,
+ I915_MMAP_TYPE_WB,
+ I915_MMAP_TYPE_UC,
+};
+
+struct i915_mmap_offset {
+ struct drm_vma_offset_node vma_node;
+ struct drm_i915_gem_object *obj;
+ enum i915_mmap_type mmap_type;
+
+ struct rb_node offset;
+};
+
struct drm_i915_gem_object {
struct drm_gem_object base;
@@ -114,9 +133,19 @@ struct drm_i915_gem_object {
unsigned int userfault_count;
struct list_head userfault_link;
- struct list_head batch_pool_link;
+ struct {
+ spinlock_t lock; /* Protects access to mmo offsets */
+ struct rb_root offsets;
+ } mmo;
+
I915_SELFTEST_DECLARE(struct list_head st_link);
+ unsigned long flags;
+#define I915_BO_ALLOC_CONTIGUOUS BIT(0)
+#define I915_BO_ALLOC_VOLATILE BIT(1)
+#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | I915_BO_ALLOC_VOLATILE)
+#define I915_BO_READONLY BIT(2)
+
/*
* Is the object to be mapped as read-only to the GPU
* Only honoured if hardware has relevant pte bit
@@ -142,9 +171,7 @@ struct drm_i915_gem_object {
*/
u16 write_domain;
- atomic_t frontbuffer_bits;
- unsigned int frontbuffer_ggtt_origin; /* write once */
- struct i915_active_request frontbuffer_write;
+ struct intel_frontbuffer __rcu *frontbuffer;
/** Current tiling stride for the object, if it's tiled. */
unsigned int tiling_and_stride;
@@ -154,18 +181,34 @@ struct drm_i915_gem_object {
/** Count of VMA actually bound by this object */
atomic_t bind_count;
- unsigned int active_count;
- /** Count of how many global VMA are currently pinned for use by HW */
- unsigned int pin_global;
struct {
- struct mutex lock; /* protects the pages and their use */
+ /*
+ * Protects the pages and their use. Do not use directly, but
+ * instead go through the pin/unpin interfaces.
+ */
+ struct mutex lock;
atomic_t pages_pin_count;
+ atomic_t shrink_pin;
+
+ /**
+ * Memory region for this object.
+ */
+ struct intel_memory_region *region;
+ /**
+ * List of memory region blocks allocated for this object.
+ */
+ struct list_head blocks;
+ /**
+ * Element within memory_region->objects or region->purgeable
+ * if the object is marked as DONTNEED. Access is protected by
+ * region->obj_lock.
+ */
+ struct list_head region_link;
struct sg_table *pages;
void *mapping;
- /* TODO: whack some of this into the error state */
struct i915_page_sizes {
/**
* The sg mask of the pages sg_table. i.e the mask of
@@ -226,9 +269,6 @@ struct drm_i915_gem_object {
bool quirked:1;
} mm;
- /** References from framebuffers, locks out tiling changes. */
- unsigned int framebuffer_references;
-
/** Record of address bit 17 of each page at last unbind. */
unsigned long *bit_17;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 65eb430cedba..54aca5c9101e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -7,6 +7,8 @@
#include "i915_drv.h"
#include "i915_gem_object.h"
#include "i915_scatterlist.h"
+#include "i915_gem_lmem.h"
+#include "i915_gem_mman.h"
void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages,
@@ -18,6 +20,9 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
lockdep_assert_held(&obj->mm.lock);
+ if (i915_gem_object_is_volatile(obj))
+ obj->mm.madv = I915_MADV_DONTNEED;
+
/* Make the pages coherent with the GPU (flushing any swapin). */
if (obj->cache_dirty) {
obj->write_domain = 0;
@@ -71,6 +76,7 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
list = &i915->mm.shrink_list;
list_add_tail(&obj->mm.link, list);
+ atomic_set(&obj->mm.shrink_pin, 0);
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
}
}
@@ -101,7 +107,7 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
{
int err;
- err = mutex_lock_interruptible(&obj->mm.lock);
+ err = mutex_lock_interruptible_nested(&obj->mm.lock, I915_MM_GET_PAGES);
if (err)
return err;
@@ -150,37 +156,30 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
rcu_read_unlock();
}
+static void unmap_object(struct drm_i915_gem_object *obj, void *ptr)
+{
+ if (is_vmalloc_addr(ptr))
+ vunmap(ptr);
+ else
+ kunmap(kmap_to_page(ptr));
+}
+
struct sg_table *
__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct sg_table *pages;
pages = fetch_and_zero(&obj->mm.pages);
if (IS_ERR_OR_NULL(pages))
return pages;
- if (i915_gem_object_is_shrinkable(obj)) {
- unsigned long flags;
+ if (i915_gem_object_is_volatile(obj))
+ obj->mm.madv = I915_MADV_WILLNEED;
- spin_lock_irqsave(&i915->mm.obj_lock, flags);
-
- list_del(&obj->mm.link);
- i915->mm.shrink_count--;
- i915->mm.shrink_memory -= obj->base.size;
-
- spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
- }
+ i915_gem_object_make_unshrinkable(obj);
if (obj->mm.mapping) {
- void *ptr;
-
- ptr = page_mask_bits(obj->mm.mapping);
- if (is_vmalloc_addr(ptr))
- vunmap(ptr);
- else
- kunmap(kmap_to_page(ptr));
-
+ unmap_object(obj, page_mask_bits(obj->mm.mapping));
obj->mm.mapping = NULL;
}
@@ -190,8 +189,7 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
return pages;
}
-int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
- enum i915_mm_subclass subclass)
+int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
{
struct sg_table *pages;
int err;
@@ -202,12 +200,14 @@ int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
GEM_BUG_ON(atomic_read(&obj->bind_count));
/* May be called by shrinker from within get_pages() (on another bo) */
- mutex_lock_nested(&obj->mm.lock, subclass);
+ mutex_lock(&obj->mm.lock);
if (unlikely(atomic_read(&obj->mm.pages_pin_count))) {
err = -EBUSY;
goto unlock;
}
+ i915_gem_object_release_mmap_offset(obj);
+
/*
* ->put_pages might need to allocate memory for the bit17 swizzle
* array, hence protect them from being reaped by removing them from gtt
@@ -234,36 +234,44 @@ unlock:
return err;
}
+static inline pte_t iomap_pte(resource_size_t base,
+ dma_addr_t offset,
+ pgprot_t prot)
+{
+ return pte_mkspecial(pfn_pte((base + offset) >> PAGE_SHIFT, prot));
+}
+
/* The 'mapping' part of i915_gem_object_pin_map() below */
-static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
+static void *i915_gem_object_map(struct drm_i915_gem_object *obj,
enum i915_map_type type)
{
- unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
+ unsigned long n_pte = obj->base.size >> PAGE_SHIFT;
struct sg_table *sgt = obj->mm.pages;
- struct sgt_iter sgt_iter;
- struct page *page;
- struct page *stack_pages[32];
- struct page **pages = stack_pages;
- unsigned long i = 0;
+ pte_t *stack[32], **mem;
+ struct vm_struct *area;
pgprot_t pgprot;
- void *addr;
+
+ if (!i915_gem_object_has_struct_page(obj) && type != I915_MAP_WC)
+ return NULL;
/* A single page can always be kmapped */
- if (n_pages == 1 && type == I915_MAP_WB)
+ if (n_pte == 1 && type == I915_MAP_WB)
return kmap(sg_page(sgt->sgl));
- if (n_pages > ARRAY_SIZE(stack_pages)) {
+ mem = stack;
+ if (n_pte > ARRAY_SIZE(stack)) {
/* Too big for stack -- allocate temporary array instead */
- pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
- if (!pages)
+ mem = kvmalloc_array(n_pte, sizeof(*mem), GFP_KERNEL);
+ if (!mem)
return NULL;
}
- for_each_sgt_page(page, sgt_iter, sgt)
- pages[i++] = page;
-
- /* Check that we have the expected number of pages */
- GEM_BUG_ON(i != n_pages);
+ area = alloc_vm_area(obj->base.size, mem);
+ if (!area) {
+ if (mem != stack)
+ kvfree(mem);
+ return NULL;
+ }
switch (type) {
default:
@@ -276,12 +284,31 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
break;
}
- addr = vmap(pages, n_pages, 0, pgprot);
- if (pages != stack_pages)
- kvfree(pages);
+ if (i915_gem_object_has_struct_page(obj)) {
+ struct sgt_iter iter;
+ struct page *page;
+ pte_t **ptes = mem;
+
+ for_each_sgt_page(page, iter, sgt)
+ **ptes++ = mk_pte(page, pgprot);
+ } else {
+ resource_size_t iomap;
+ struct sgt_iter iter;
+ pte_t **ptes = mem;
+ dma_addr_t addr;
- return addr;
+ iomap = obj->mm.region->iomap.base;
+ iomap -= obj->mm.region->region.start;
+
+ for_each_sgt_daddr(addr, iter, sgt)
+ **ptes++ = iomap_pte(iomap, addr, pgprot);
+ }
+
+ if (mem != stack)
+ kvfree(mem);
+
+ return area->addr;
}
/* get, pin, and map the pages of the object into kernel space */
@@ -289,14 +316,16 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
enum i915_map_type type)
{
enum i915_map_type has_type;
+ unsigned int flags;
bool pinned;
void *ptr;
int err;
- if (unlikely(!i915_gem_object_has_struct_page(obj)))
+ flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | I915_GEM_OBJECT_HAS_IOMEM;
+ if (!i915_gem_object_type_has(obj, flags))
return ERR_PTR(-ENXIO);
- err = mutex_lock_interruptible(&obj->mm.lock);
+ err = mutex_lock_interruptible_nested(&obj->mm.lock, I915_MM_GET_PAGES);
if (err)
return ERR_PTR(err);
@@ -325,10 +354,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
goto err_unpin;
}
- if (is_vmalloc_addr(ptr))
- vunmap(ptr);
- else
- kunmap(kmap_to_page(ptr));
+ unmap_object(obj, ptr);
ptr = obj->mm.mapping = NULL;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
index 2deac933cf59..b1b7c1b3038a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -13,8 +13,10 @@
#include <drm/drm_legacy.h> /* for drm_pci.h! */
#include <drm/drm_pci.h>
+#include "gt/intel_gt.h"
#include "i915_drv.h"
#include "i915_gem_object.h"
+#include "i915_gem_region.h"
#include "i915_scatterlist.h"
static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
@@ -60,7 +62,7 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
vaddr += PAGE_SIZE;
}
- i915_gem_chipset_flush(to_i915(obj->base.dev));
+ intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (!st) {
@@ -132,16 +134,16 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
drm_pci_free(obj->base.dev, obj->phys_handle);
}
-static void
-i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
+static void phys_release(struct drm_i915_gem_object *obj)
{
- i915_gem_object_unpin_pages(obj);
+ fput(obj->base.filp);
}
static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
.get_pages = i915_gem_object_get_pages_phys,
.put_pages = i915_gem_object_put_pages_phys,
- .release = i915_gem_object_release_phys,
+
+ .release = phys_release,
};
int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
@@ -158,11 +160,11 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
if (obj->ops != &i915_gem_shmem_ops)
return -EINVAL;
- err = i915_gem_object_unbind(obj);
+ err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
if (err)
return err;
- mutex_lock(&obj->mm.lock);
+ mutex_lock_nested(&obj->mm.lock, I915_MM_GET_PAGES);
if (obj->mm.madv != I915_MADV_WILLNEED) {
err = -EFAULT;
@@ -190,8 +192,10 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
/* Perma-pin (until release) the physical set of pages */
__i915_gem_object_pin_pages(obj);
- if (!IS_ERR_OR_NULL(pages))
+ if (!IS_ERR_OR_NULL(pages)) {
i915_gem_shmem_ops.put_pages(obj, pages);
+ i915_gem_object_release_memory_region(obj);
+ }
mutex_unlock(&obj->mm.lock);
return 0;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index 914b5d4112bb..c8264eb036bf 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -5,149 +5,19 @@
*/
#include "gem/i915_gem_pm.h"
+#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
+#include "gt/intel_gt_requests.h"
#include "i915_drv.h"
-#include "i915_globals.h"
-
-static void call_idle_barriers(struct intel_engine_cs *engine)
-{
- struct llist_node *node, *next;
-
- llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) {
- struct i915_active_request *active =
- container_of((struct list_head *)node,
- typeof(*active), link);
-
- INIT_LIST_HEAD(&active->link);
- RCU_INIT_POINTER(active->request, NULL);
-
- active->retire(active, NULL);
- }
-}
-
-static void i915_gem_park(struct drm_i915_private *i915)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- lockdep_assert_held(&i915->drm.struct_mutex);
-
- for_each_engine(engine, i915, id) {
- call_idle_barriers(engine); /* cleanup after wedging */
- i915_gem_batch_pool_fini(&engine->batch_pool);
- }
-
- i915_timelines_park(i915);
- i915_vma_parked(i915);
-
- i915_globals_park();
-}
-
-static void idle_work_handler(struct work_struct *work)
-{
- struct drm_i915_private *i915 =
- container_of(work, typeof(*i915), gem.idle_work);
- bool park;
-
- cancel_delayed_work_sync(&i915->gem.retire_work);
- mutex_lock(&i915->drm.struct_mutex);
-
- intel_wakeref_lock(&i915->gt.wakeref);
- park = !intel_wakeref_active(&i915->gt.wakeref) && !work_pending(work);
- intel_wakeref_unlock(&i915->gt.wakeref);
- if (park)
- i915_gem_park(i915);
- else
- queue_delayed_work(i915->wq,
- &i915->gem.retire_work,
- round_jiffies_up_relative(HZ));
-
- mutex_unlock(&i915->drm.struct_mutex);
-}
-
-static void retire_work_handler(struct work_struct *work)
-{
- struct drm_i915_private *i915 =
- container_of(work, typeof(*i915), gem.retire_work.work);
-
- /* Come back later if the device is busy... */
- if (mutex_trylock(&i915->drm.struct_mutex)) {
- i915_retire_requests(i915);
- mutex_unlock(&i915->drm.struct_mutex);
- }
-
- queue_delayed_work(i915->wq,
- &i915->gem.retire_work,
- round_jiffies_up_relative(HZ));
-}
-
-static int pm_notifier(struct notifier_block *nb,
- unsigned long action,
- void *data)
-{
- struct drm_i915_private *i915 =
- container_of(nb, typeof(*i915), gem.pm_notifier);
-
- switch (action) {
- case INTEL_GT_UNPARK:
- i915_globals_unpark();
- queue_delayed_work(i915->wq,
- &i915->gem.retire_work,
- round_jiffies_up_relative(HZ));
- break;
-
- case INTEL_GT_PARK:
- queue_work(i915->wq, &i915->gem.idle_work);
- break;
- }
-
- return NOTIFY_OK;
-}
-
-static bool switch_to_kernel_context_sync(struct drm_i915_private *i915)
-{
- bool result = !i915_terminally_wedged(i915);
-
- do {
- if (i915_gem_wait_for_idle(i915,
- I915_WAIT_LOCKED |
- I915_WAIT_FOR_IDLE_BOOST,
- I915_GEM_IDLE_TIMEOUT) == -ETIME) {
- /* XXX hide warning from gem_eio */
- if (i915_modparams.reset) {
- dev_err(i915->drm.dev,
- "Failed to idle engines, declaring wedged!\n");
- GEM_TRACE_DUMP();
- }
-
- /*
- * Forcibly cancel outstanding work and leave
- * the gpu quiet.
- */
- i915_gem_set_wedged(i915);
- result = false;
- }
- } while (i915_retire_requests(i915) && result);
-
- GEM_BUG_ON(i915->gt.awake);
- return result;
-}
-
-bool i915_gem_load_power_context(struct drm_i915_private *i915)
-{
- return switch_to_kernel_context_sync(i915);
-}
void i915_gem_suspend(struct drm_i915_private *i915)
{
- GEM_TRACE("\n");
+ GEM_TRACE("%s\n", dev_name(i915->drm.dev));
intel_wakeref_auto(&i915->ggtt.userfault_wakeref, 0);
flush_workqueue(i915->wq);
- mutex_lock(&i915->drm.struct_mutex);
-
/*
* We have to flush all the executing contexts to main memory so
* that they can saved in the hibernation image. To ensure the last
@@ -157,22 +27,9 @@ void i915_gem_suspend(struct drm_i915_private *i915)
* state. Fortunately, the kernel_context is disposable and we do
* not rely on its state.
*/
- switch_to_kernel_context_sync(i915);
-
- mutex_unlock(&i915->drm.struct_mutex);
-
- /*
- * Assert that we successfully flushed all the work and
- * reset the GPU back to its idle, low power state.
- */
- GEM_BUG_ON(i915->gt.awake);
- flush_work(&i915->gem.idle_work);
-
- cancel_delayed_work_sync(&i915->gpu_error.hangcheck_work);
+ intel_gt_suspend_prepare(&i915->gt);
i915_gem_drain_freed_objects(i915);
-
- intel_uc_suspend(i915);
}
static struct drm_i915_gem_object *first_mm_object(struct list_head *list)
@@ -212,6 +69,8 @@ void i915_gem_suspend_late(struct drm_i915_private *i915)
* machine in an unusable condition.
*/
+ intel_gt_suspend_late(&i915->gt);
+
spin_lock_irqsave(&i915->mm.obj_lock, flags);
for (phase = phases; *phase; phase++) {
LIST_HEAD(keep);
@@ -236,60 +95,16 @@ void i915_gem_suspend_late(struct drm_i915_private *i915)
list_splice_tail(&keep, *phase);
}
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
-
- intel_uc_sanitize(i915);
- i915_gem_sanitize(i915);
}
void i915_gem_resume(struct drm_i915_private *i915)
{
- GEM_TRACE("\n");
-
- WARN_ON(i915->gt.awake);
-
- mutex_lock(&i915->drm.struct_mutex);
- intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
-
- i915_gem_restore_gtt_mappings(i915);
- i915_gem_restore_fences(i915);
-
- if (i915_gem_init_hw(i915))
- goto err_wedged;
+ GEM_TRACE("%s\n", dev_name(i915->drm.dev));
/*
* As we didn't flush the kernel context before suspend, we cannot
* guarantee that the context image is complete. So let's just reset
* it and start again.
*/
- if (intel_gt_resume(i915))
- goto err_wedged;
-
- intel_uc_resume(i915);
-
- /* Always reload a context for powersaving. */
- if (!i915_gem_load_power_context(i915))
- goto err_wedged;
-
-out_unlock:
- intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
- mutex_unlock(&i915->drm.struct_mutex);
- return;
-
-err_wedged:
- if (!i915_reset_failed(i915)) {
- dev_err(i915->drm.dev,
- "Failed to re-initialize GPU, declaring it wedged!\n");
- i915_gem_set_wedged(i915);
- }
- goto out_unlock;
-}
-
-void i915_gem_init__pm(struct drm_i915_private *i915)
-{
- INIT_WORK(&i915->gem.idle_work, idle_work_handler);
- INIT_DELAYED_WORK(&i915->gem.retire_work, retire_work_handler);
-
- i915->gem.pm_notifier.notifier_call = pm_notifier;
- blocking_notifier_chain_register(&i915->gt.pm_notifications,
- &i915->gem.pm_notifier);
+ intel_gt_resume(&i915->gt);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.h b/drivers/gpu/drm/i915/gem/i915_gem_pm.h
index 6f7d5d11ac3b..26b78dbdc225 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.h
@@ -12,9 +12,6 @@
struct drm_i915_private;
struct work_struct;
-void i915_gem_init__pm(struct drm_i915_private *i915);
-
-bool i915_gem_load_power_context(struct drm_i915_private *i915);
void i915_gem_resume(struct drm_i915_private *i915);
void i915_gem_idle_work_handler(struct work_struct *work);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c b/drivers/gpu/drm/i915/gem/i915_gem_region.c
new file mode 100644
index 000000000000..1515384d7e0e
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "intel_memory_region.h"
+#include "i915_gem_region.h"
+#include "i915_drv.h"
+#include "i915_trace.h"
+
+void
+i915_gem_object_put_pages_buddy(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
+{
+ __intel_memory_region_put_pages_buddy(obj->mm.region, &obj->mm.blocks);
+
+ obj->mm.dirty = false;
+ sg_free_table(pages);
+ kfree(pages);
+}
+
+int
+i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
+{
+ struct intel_memory_region *mem = obj->mm.region;
+ struct list_head *blocks = &obj->mm.blocks;
+ resource_size_t size = obj->base.size;
+ resource_size_t prev_end;
+ struct i915_buddy_block *block;
+ unsigned int flags;
+ struct sg_table *st;
+ struct scatterlist *sg;
+ unsigned int sg_page_sizes;
+ int ret;
+
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+ if (sg_alloc_table(st, size >> ilog2(mem->mm.chunk_size), GFP_KERNEL)) {
+ kfree(st);
+ return -ENOMEM;
+ }
+
+ flags = I915_ALLOC_MIN_PAGE_SIZE;
+ if (obj->flags & I915_BO_ALLOC_CONTIGUOUS)
+ flags |= I915_ALLOC_CONTIGUOUS;
+
+ ret = __intel_memory_region_get_pages_buddy(mem, size, flags, blocks);
+ if (ret)
+ goto err_free_sg;
+
+ GEM_BUG_ON(list_empty(blocks));
+
+ sg = st->sgl;
+ st->nents = 0;
+ sg_page_sizes = 0;
+ prev_end = (resource_size_t)-1;
+
+ list_for_each_entry(block, blocks, link) {
+ u64 block_size, offset;
+
+ block_size = min_t(u64, size,
+ i915_buddy_block_size(&mem->mm, block));
+ offset = i915_buddy_block_offset(block);
+
+ GEM_BUG_ON(overflows_type(block_size, sg->length));
+
+ if (offset != prev_end ||
+ add_overflows_t(typeof(sg->length), sg->length, block_size)) {
+ if (st->nents) {
+ sg_page_sizes |= sg->length;
+ sg = __sg_next(sg);
+ }
+
+ sg_dma_address(sg) = mem->region.start + offset;
+ sg_dma_len(sg) = block_size;
+
+ sg->length = block_size;
+
+ st->nents++;
+ } else {
+ sg->length += block_size;
+ sg_dma_len(sg) += block_size;
+ }
+
+ prev_end = offset + block_size;
+ }
+
+ sg_page_sizes |= sg->length;
+ sg_mark_end(sg);
+ i915_sg_trim(st);
+
+ __i915_gem_object_set_pages(obj, st, sg_page_sizes);
+
+ return 0;
+
+err_free_sg:
+ sg_free_table(st);
+ kfree(st);
+ return ret;
+}
+
+void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
+ struct intel_memory_region *mem,
+ unsigned long flags)
+{
+ INIT_LIST_HEAD(&obj->mm.blocks);
+ obj->mm.region = intel_memory_region_get(mem);
+
+ obj->flags |= flags;
+ if (obj->base.size <= mem->min_page_size)
+ obj->flags |= I915_BO_ALLOC_CONTIGUOUS;
+
+ mutex_lock(&mem->objects.lock);
+
+ if (obj->flags & I915_BO_ALLOC_VOLATILE)
+ list_add(&obj->mm.region_link, &mem->objects.purgeable);
+ else
+ list_add(&obj->mm.region_link, &mem->objects.list);
+
+ mutex_unlock(&mem->objects.lock);
+}
+
+void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj)
+{
+ struct intel_memory_region *mem = obj->mm.region;
+
+ mutex_lock(&mem->objects.lock);
+ list_del(&obj->mm.region_link);
+ mutex_unlock(&mem->objects.lock);
+
+ intel_memory_region_put(mem);
+}
+
+struct drm_i915_gem_object *
+i915_gem_object_create_region(struct intel_memory_region *mem,
+ resource_size_t size,
+ unsigned int flags)
+{
+ struct drm_i915_gem_object *obj;
+
+ /*
+ * NB: Our use of resource_size_t for the size stems from using struct
+ * resource for the mem->region. We might need to revisit this in the
+ * future.
+ */
+
+ GEM_BUG_ON(flags & ~I915_BO_ALLOC_FLAGS);
+
+ if (!mem)
+ return ERR_PTR(-ENODEV);
+
+ size = round_up(size, mem->min_page_size);
+
+ GEM_BUG_ON(!size);
+ GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_MIN_ALIGNMENT));
+
+ /*
+ * XXX: There is a prevalence of the assumption that we fit the
+ * object's page count inside a 32bit _signed_ variable. Let's document
+ * this and catch if we ever need to fix it. In the meantime, if you do
+ * spot such a local variable, please consider fixing!
+ */
+
+ if (size >> PAGE_SHIFT > INT_MAX)
+ return ERR_PTR(-E2BIG);
+
+ if (overflows_type(size, obj->base.size))
+ return ERR_PTR(-E2BIG);
+
+ obj = mem->ops->create_object(mem, size, flags);
+ if (!IS_ERR(obj))
+ trace_i915_gem_object_create(obj);
+
+ return obj;
+}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.h b/drivers/gpu/drm/i915/gem/i915_gem_region.h
new file mode 100644
index 000000000000..f2ff6f8bff74
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_GEM_REGION_H__
+#define __I915_GEM_REGION_H__
+
+#include <linux/types.h>
+
+struct intel_memory_region;
+struct drm_i915_gem_object;
+struct sg_table;
+
+int i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj);
+void i915_gem_object_put_pages_buddy(struct drm_i915_gem_object *obj,
+ struct sg_table *pages);
+
+void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
+ struct intel_memory_region *mem,
+ unsigned long flags);
+void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj);
+
+struct drm_i915_gem_object *
+i915_gem_object_create_region(struct intel_memory_region *mem,
+ resource_size_t size,
+ unsigned int flags);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index 19d9ecdb2894..a2a980d9d241 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -7,9 +7,12 @@
#include <linux/pagevec.h>
#include <linux/swap.h>
+#include "gem/i915_gem_region.h"
#include "i915_drv.h"
+#include "i915_gemfs.h"
#include "i915_gem_object.h"
#include "i915_scatterlist.h"
+#include "i915_trace.h"
/*
* Move pages to appropriate lru and release the pagevec, decrementing the
@@ -25,6 +28,7 @@ static void check_release_pagevec(struct pagevec *pvec)
static int shmem_get_pages(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct intel_memory_region *mem = obj->mm.region;
const unsigned long page_count = obj->base.size / PAGE_SIZE;
unsigned long i;
struct address_space *mapping;
@@ -51,7 +55,7 @@ static int shmem_get_pages(struct drm_i915_gem_object *obj)
* If there's no chance of allocating enough pages for the whole
* object, bail early.
*/
- if (page_count > totalram_pages())
+ if (obj->base.size > resource_size(&mem->region))
return -ENOMEM;
st = kmalloc(sizeof(*st), GFP_KERNEL);
@@ -414,6 +418,13 @@ shmem_pwrite(struct drm_i915_gem_object *obj,
return 0;
}
+static void shmem_release(struct drm_i915_gem_object *obj)
+{
+ i915_gem_object_release_memory_region(obj);
+
+ fput(obj->base.filp);
+}
+
const struct drm_i915_gem_object_ops i915_gem_shmem_ops = {
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
I915_GEM_OBJECT_IS_SHRINKABLE,
@@ -424,11 +435,13 @@ const struct drm_i915_gem_object_ops i915_gem_shmem_ops = {
.writeback = shmem_writeback,
.pwrite = shmem_pwrite,
+
+ .release = shmem_release,
};
-static int create_shmem(struct drm_i915_private *i915,
- struct drm_gem_object *obj,
- size_t size)
+static int __create_shmem(struct drm_i915_private *i915,
+ struct drm_gem_object *obj,
+ resource_size_t size)
{
unsigned long flags = VM_NORESERVE;
struct file *filp;
@@ -447,31 +460,24 @@ static int create_shmem(struct drm_i915_private *i915,
return 0;
}
-struct drm_i915_gem_object *
-i915_gem_object_create_shmem(struct drm_i915_private *i915, u64 size)
+static struct drm_i915_gem_object *
+create_shmem(struct intel_memory_region *mem,
+ resource_size_t size,
+ unsigned int flags)
{
+ static struct lock_class_key lock_class;
+ struct drm_i915_private *i915 = mem->i915;
struct drm_i915_gem_object *obj;
struct address_space *mapping;
unsigned int cache_level;
gfp_t mask;
int ret;
- /* There is a prevalence of the assumption that we fit the object's
- * page count inside a 32bit _signed_ variable. Let's document this and
- * catch if we ever need to fix it. In the meantime, if you do spot
- * such a local variable, please consider fixing!
- */
- if (size >> PAGE_SHIFT > INT_MAX)
- return ERR_PTR(-E2BIG);
-
- if (overflows_type(size, obj->base.size))
- return ERR_PTR(-E2BIG);
-
obj = i915_gem_object_alloc();
if (!obj)
return ERR_PTR(-ENOMEM);
- ret = create_shmem(i915, &obj->base, size);
+ ret = __create_shmem(i915, &obj->base, size);
if (ret)
goto fail;
@@ -486,7 +492,7 @@ i915_gem_object_create_shmem(struct drm_i915_private *i915, u64 size)
mapping_set_gfp_mask(mapping, mask);
GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
- i915_gem_object_init(obj, &i915_gem_shmem_ops);
+ i915_gem_object_init(obj, &i915_gem_shmem_ops, &lock_class);
obj->write_domain = I915_GEM_DOMAIN_CPU;
obj->read_domains = I915_GEM_DOMAIN_CPU;
@@ -510,7 +516,7 @@ i915_gem_object_create_shmem(struct drm_i915_private *i915, u64 size)
i915_gem_object_set_cache_coherency(obj, cache_level);
- trace_i915_gem_object_create(obj);
+ i915_gem_object_init_memory_region(obj, mem, 0);
return obj;
@@ -519,14 +525,22 @@ fail:
return ERR_PTR(ret);
}
+struct drm_i915_gem_object *
+i915_gem_object_create_shmem(struct drm_i915_private *i915,
+ resource_size_t size)
+{
+ return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_SMEM],
+ size, 0);
+}
+
/* Allocate a new GEM object and fill it with the supplied data */
struct drm_i915_gem_object *
i915_gem_object_create_shmem_from_data(struct drm_i915_private *dev_priv,
- const void *data, size_t size)
+ const void *data, resource_size_t size)
{
struct drm_i915_gem_object *obj;
struct file *file;
- size_t offset;
+ resource_size_t offset;
int err;
obj = i915_gem_object_create_shmem(dev_priv, round_up(size, PAGE_SIZE));
@@ -569,3 +583,37 @@ fail:
i915_gem_object_put(obj);
return ERR_PTR(err);
}
+
+static int init_shmem(struct intel_memory_region *mem)
+{
+ int err;
+
+ err = i915_gemfs_init(mem->i915);
+ if (err) {
+ DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n",
+ err);
+ }
+
+ intel_memory_region_set_name(mem, "system");
+
+ return 0; /* Don't error, we can simply fallback to the kernel mnt */
+}
+
+static void release_shmem(struct intel_memory_region *mem)
+{
+ i915_gemfs_fini(mem->i915);
+}
+
+static const struct intel_memory_region_ops shmem_region_ops = {
+ .init = init_shmem,
+ .release = release_shmem,
+ .create_object = create_shmem,
+};
+
+struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915)
+{
+ return intel_memory_region_create(i915, 0,
+ totalram_pages() << PAGE_SHIFT,
+ PAGE_SIZE, 0,
+ &shmem_region_ops);
+}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index 3a926a8755c6..f7e4b39c734f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -16,40 +16,6 @@
#include "i915_trace.h"
-static bool shrinker_lock(struct drm_i915_private *i915,
- unsigned int flags,
- bool *unlock)
-{
- struct mutex *m = &i915->drm.struct_mutex;
-
- switch (mutex_trylock_recursive(m)) {
- case MUTEX_TRYLOCK_RECURSIVE:
- *unlock = false;
- return true;
-
- case MUTEX_TRYLOCK_FAILED:
- *unlock = false;
- if (flags & I915_SHRINK_ACTIVE &&
- mutex_lock_killable_nested(m, I915_MM_SHRINKER) == 0)
- *unlock = true;
- return *unlock;
-
- case MUTEX_TRYLOCK_SUCCESS:
- *unlock = true;
- return true;
- }
-
- BUG();
-}
-
-static void shrinker_unlock(struct drm_i915_private *i915, bool unlock)
-{
- if (!unlock)
- return;
-
- mutex_unlock(&i915->drm.struct_mutex);
-}
-
static bool swap_available(void)
{
return get_nr_swap_pages() > 0;
@@ -61,7 +27,8 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
if (!i915_gem_object_is_shrinkable(obj))
return false;
- /* Only report true if by unbinding the object and putting its pages
+ /*
+ * Only report true if by unbinding the object and putting its pages
* we can actually make forward progress towards freeing physical
* pages.
*
@@ -72,26 +39,26 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
if (atomic_read(&obj->mm.pages_pin_count) > atomic_read(&obj->bind_count))
return false;
- /* If any vma are "permanently" pinned, it will prevent us from
- * reclaiming the obj->mm.pages. We only allow scanout objects to claim
- * a permanent pin, along with a few others like the context objects.
- * To simplify the scan, and to avoid walking the list of vma under the
- * object, we just check the count of its permanently pinned.
- */
- if (READ_ONCE(obj->pin_global))
- return false;
-
- /* We can only return physical pages to the system if we can either
+ /*
+ * We can only return physical pages to the system if we can either
* discard the contents (because the user has marked them as being
* purgeable) or if we can move their contents out to swap.
*/
return swap_available() || obj->mm.madv == I915_MADV_DONTNEED;
}
-static bool unsafe_drop_pages(struct drm_i915_gem_object *obj)
+static bool unsafe_drop_pages(struct drm_i915_gem_object *obj,
+ unsigned long shrink)
{
- if (i915_gem_object_unbind(obj) == 0)
- __i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
+ unsigned long flags;
+
+ flags = 0;
+ if (shrink & I915_SHRINK_ACTIVE)
+ flags = I915_GEM_OBJECT_UNBIND_ACTIVE;
+
+ if (i915_gem_object_unbind(obj, flags) == 0)
+ __i915_gem_object_put_pages(obj);
+
return !i915_gem_object_has_pages(obj);
}
@@ -154,10 +121,6 @@ i915_gem_shrink(struct drm_i915_private *i915,
intel_wakeref_t wakeref = 0;
unsigned long count = 0;
unsigned long scanned = 0;
- bool unlock;
-
- if (!shrinker_lock(i915, shrink, &unlock))
- return 0;
/*
* When shrinking the active list, we should also consider active
@@ -169,7 +132,6 @@ i915_gem_shrink(struct drm_i915_private *i915,
*/
trace_i915_gem_shrink(i915, target, shrink);
- i915_retire_requests(i915);
/*
* Unbinding of objects will require HW access; Let us not wake the
@@ -230,8 +192,7 @@ i915_gem_shrink(struct drm_i915_private *i915,
continue;
if (!(shrink & I915_SHRINK_ACTIVE) &&
- (i915_gem_object_is_active(obj) ||
- i915_gem_object_is_framebuffer(obj)))
+ i915_gem_object_is_framebuffer(obj))
continue;
if (!(shrink & I915_SHRINK_BOUND) &&
@@ -246,10 +207,9 @@ i915_gem_shrink(struct drm_i915_private *i915,
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
- if (unsafe_drop_pages(obj)) {
+ if (unsafe_drop_pages(obj, shrink)) {
/* May arrive from get_pages on another bo */
- mutex_lock_nested(&obj->mm.lock,
- I915_MM_SHRINKER);
+ mutex_lock(&obj->mm.lock);
if (!i915_gem_object_has_pages(obj)) {
try_to_writeback(obj, shrink);
count += obj->base.size >> PAGE_SHIFT;
@@ -269,10 +229,6 @@ i915_gem_shrink(struct drm_i915_private *i915,
if (shrink & I915_SHRINK_BOUND)
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- i915_retire_requests(i915);
-
- shrinker_unlock(i915, unlock);
-
if (nr_scanned)
*nr_scanned += scanned;
return count;
@@ -342,19 +298,14 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
struct drm_i915_private *i915 =
container_of(shrinker, struct drm_i915_private, mm.shrinker);
unsigned long freed;
- bool unlock;
sc->nr_scanned = 0;
- if (!shrinker_lock(i915, 0, &unlock))
- return SHRINK_STOP;
-
freed = i915_gem_shrink(i915,
sc->nr_to_scan,
&sc->nr_scanned,
I915_SHRINK_BOUND |
- I915_SHRINK_UNBOUND |
- I915_SHRINK_WRITEBACK);
+ I915_SHRINK_UNBOUND);
if (sc->nr_scanned < sc->nr_to_scan && current_is_kswapd()) {
intel_wakeref_t wakeref;
@@ -369,8 +320,6 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
}
}
- shrinker_unlock(i915, unlock);
-
return sc->nr_scanned ? freed : SHRINK_STOP;
}
@@ -387,6 +336,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
freed_pages = 0;
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
freed_pages += i915_gem_shrink(i915, -1UL, NULL,
+ I915_SHRINK_ACTIVE |
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
I915_SHRINK_WRITEBACK);
@@ -422,16 +372,6 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
struct i915_vma *vma, *next;
unsigned long freed_pages = 0;
intel_wakeref_t wakeref;
- bool unlock;
-
- if (!shrinker_lock(i915, 0, &unlock))
- return NOTIFY_DONE;
-
- /* Force everything onto the inactive lists */
- if (i915_gem_wait_for_idle(i915,
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT))
- goto out;
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
freed_pages += i915_gem_shrink(i915, -1UL, NULL,
@@ -448,27 +388,16 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
if (!vma->iomap || i915_vma_is_active(vma))
continue;
- mutex_unlock(&i915->ggtt.vm.mutex);
- if (i915_vma_unbind(vma) == 0)
+ if (__i915_vma_unbind(vma) == 0)
freed_pages += count;
- mutex_lock(&i915->ggtt.vm.mutex);
}
mutex_unlock(&i915->ggtt.vm.mutex);
-out:
- shrinker_unlock(i915, unlock);
-
*(unsigned long *)ptr += freed_pages;
return NOTIFY_DONE;
}
-/**
- * i915_gem_shrinker_register - Register the i915 shrinker
- * @i915: i915 device
- *
- * This function registers and sets up the i915 shrinker and OOM handler.
- */
-void i915_gem_shrinker_register(struct drm_i915_private *i915)
+void i915_gem_driver_register__shrinker(struct drm_i915_private *i915)
{
i915->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
i915->mm.shrinker.count_objects = i915_gem_shrinker_count;
@@ -483,13 +412,7 @@ void i915_gem_shrinker_register(struct drm_i915_private *i915)
WARN_ON(register_vmap_purge_notifier(&i915->mm.vmap_notifier));
}
-/**
- * i915_gem_shrinker_unregister - Unregisters the i915 shrinker
- * @i915: i915 device
- *
- * This function unregisters the i915 shrinker and OOM handler.
- */
-void i915_gem_shrinker_unregister(struct drm_i915_private *i915)
+void i915_gem_driver_unregister__shrinker(struct drm_i915_private *i915)
{
WARN_ON(unregister_vmap_purge_notifier(&i915->mm.vmap_notifier));
WARN_ON(unregister_oom_notifier(&i915->mm.oom_notifier));
@@ -512,24 +435,75 @@ void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
fs_reclaim_acquire(GFP_KERNEL);
+ mutex_acquire(&mutex->dep_map, 0, 0, _RET_IP_);
+ mutex_release(&mutex->dep_map, _RET_IP_);
+
+ fs_reclaim_release(GFP_KERNEL);
+
+ if (unlock)
+ mutex_release(&i915->drm.struct_mutex.dep_map, _RET_IP_);
+}
+
+#define obj_to_i915(obj__) to_i915((obj__)->base.dev)
+
+void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915 = obj_to_i915(obj);
+ unsigned long flags;
+
/*
- * As we invariably rely on the struct_mutex within the shrinker,
- * but have a complicated recursion dance, taint all the mutexes used
- * within the shrinker with the struct_mutex. For completeness, we
- * taint with all subclass of struct_mutex, even though we should
- * only need tainting by I915_MM_NORMAL to catch possible ABBA
- * deadlocks from using struct_mutex inside @mutex.
+ * We can only be called while the pages are pinned or when
+ * the pages are released. If pinned, we should only be called
+ * from a single caller under controlled conditions; and on release
+ * only one caller may release us. Neither the two may cross.
*/
- mutex_acquire(&i915->drm.struct_mutex.dep_map,
- I915_MM_SHRINKER, 0, _RET_IP_);
+ if (atomic_add_unless(&obj->mm.shrink_pin, 1, 0))
+ return;
- mutex_acquire(&mutex->dep_map, 0, 0, _RET_IP_);
- mutex_release(&mutex->dep_map, 0, _RET_IP_);
+ spin_lock_irqsave(&i915->mm.obj_lock, flags);
+ if (!atomic_fetch_inc(&obj->mm.shrink_pin) &&
+ !list_empty(&obj->mm.link)) {
+ list_del_init(&obj->mm.link);
+ i915->mm.shrink_count--;
+ i915->mm.shrink_memory -= obj->base.size;
+ }
+ spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
+}
- mutex_release(&i915->drm.struct_mutex.dep_map, 0, _RET_IP_);
+static void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj,
+ struct list_head *head)
+{
+ struct drm_i915_private *i915 = obj_to_i915(obj);
+ unsigned long flags;
- fs_reclaim_release(GFP_KERNEL);
+ GEM_BUG_ON(!i915_gem_object_has_pages(obj));
+ if (!i915_gem_object_is_shrinkable(obj))
+ return;
- if (unlock)
- mutex_release(&i915->drm.struct_mutex.dep_map, 0, _RET_IP_);
+ if (atomic_add_unless(&obj->mm.shrink_pin, -1, 1))
+ return;
+
+ spin_lock_irqsave(&i915->mm.obj_lock, flags);
+ GEM_BUG_ON(!kref_read(&obj->base.refcount));
+ if (atomic_dec_and_test(&obj->mm.shrink_pin)) {
+ GEM_BUG_ON(!list_empty(&obj->mm.link));
+
+ list_add_tail(&obj->mm.link, head);
+ i915->mm.shrink_count++;
+ i915->mm.shrink_memory += obj->base.size;
+
+ }
+ spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
+}
+
+void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj)
+{
+ __i915_gem_object_make_shrinkable(obj,
+ &obj_to_i915(obj)->mm.shrink_list);
+}
+
+void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj)
+{
+ __i915_gem_object_make_shrinkable(obj,
+ &obj_to_i915(obj)->mm.purge_list);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.h b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.h
new file mode 100644
index 000000000000..b397d7785789
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_GEM_SHRINKER_H__
+#define __I915_GEM_SHRINKER_H__
+
+#include <linux/bits.h>
+
+struct drm_i915_private;
+struct mutex;
+
+/* i915_gem_shrinker.c */
+unsigned long i915_gem_shrink(struct drm_i915_private *i915,
+ unsigned long target,
+ unsigned long *nr_scanned,
+ unsigned flags);
+#define I915_SHRINK_UNBOUND BIT(0)
+#define I915_SHRINK_BOUND BIT(1)
+#define I915_SHRINK_ACTIVE BIT(2)
+#define I915_SHRINK_VMAPS BIT(3)
+#define I915_SHRINK_WRITEBACK BIT(4)
+
+unsigned long i915_gem_shrink_all(struct drm_i915_private *i915);
+void i915_gem_driver_register__shrinker(struct drm_i915_private *i915);
+void i915_gem_driver_unregister__shrinker(struct drm_i915_private *i915);
+void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
+ struct mutex *mutex);
+
+#endif /* __I915_GEM_SHRINKER_H__ */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index de1fab2058ec..451f3078d60d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -10,7 +10,9 @@
#include <drm/drm_mm.h>
#include <drm/i915_drm.h>
+#include "gem/i915_gem_region.h"
#include "i915_drv.h"
+#include "i915_gem_stolen.h"
/*
* The BIOS typically reserves some of the system's memory for the exclusive
@@ -24,48 +26,49 @@
* for is a boon.
*/
-int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
+int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915,
struct drm_mm_node *node, u64 size,
unsigned alignment, u64 start, u64 end)
{
int ret;
- if (!drm_mm_initialized(&dev_priv->mm.stolen))
+ if (!drm_mm_initialized(&i915->mm.stolen))
return -ENODEV;
/* WaSkipStolenMemoryFirstPage:bdw+ */
- if (INTEL_GEN(dev_priv) >= 8 && start < 4096)
+ if (INTEL_GEN(i915) >= 8 && start < 4096)
start = 4096;
- mutex_lock(&dev_priv->mm.stolen_lock);
- ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node,
+ mutex_lock(&i915->mm.stolen_lock);
+ ret = drm_mm_insert_node_in_range(&i915->mm.stolen, node,
size, alignment, 0,
start, end, DRM_MM_INSERT_BEST);
- mutex_unlock(&dev_priv->mm.stolen_lock);
+ mutex_unlock(&i915->mm.stolen_lock);
return ret;
}
-int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
+int i915_gem_stolen_insert_node(struct drm_i915_private *i915,
struct drm_mm_node *node, u64 size,
unsigned alignment)
{
- return i915_gem_stolen_insert_node_in_range(dev_priv, node, size,
+ return i915_gem_stolen_insert_node_in_range(i915, node, size,
alignment, 0, U64_MAX);
}
-void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
+void i915_gem_stolen_remove_node(struct drm_i915_private *i915,
struct drm_mm_node *node)
{
- mutex_lock(&dev_priv->mm.stolen_lock);
+ mutex_lock(&i915->mm.stolen_lock);
drm_mm_remove_node(node);
- mutex_unlock(&dev_priv->mm.stolen_lock);
+ mutex_unlock(&i915->mm.stolen_lock);
}
-static int i915_adjust_stolen(struct drm_i915_private *dev_priv,
+static int i915_adjust_stolen(struct drm_i915_private *i915,
struct resource *dsm)
{
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
+ struct i915_ggtt *ggtt = &i915->ggtt;
+ struct intel_uncore *uncore = ggtt->vm.gt->uncore;
struct resource *r;
if (dsm->start == 0 || dsm->end <= dsm->start)
@@ -77,14 +80,14 @@ static int i915_adjust_stolen(struct drm_i915_private *dev_priv,
*/
/* Make sure we don't clobber the GTT if it's within stolen memory */
- if (INTEL_GEN(dev_priv) <= 4 &&
- !IS_G33(dev_priv) && !IS_PINEVIEW(dev_priv) && !IS_G4X(dev_priv)) {
+ if (INTEL_GEN(i915) <= 4 &&
+ !IS_G33(i915) && !IS_PINEVIEW(i915) && !IS_G4X(i915)) {
struct resource stolen[2] = {*dsm, *dsm};
struct resource ggtt_res;
resource_size_t ggtt_start;
- ggtt_start = I915_READ(PGTBL_CTL);
- if (IS_GEN(dev_priv, 4))
+ ggtt_start = intel_uncore_read(uncore, PGTBL_CTL);
+ if (IS_GEN(i915, 4))
ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) |
(ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
else
@@ -118,7 +121,7 @@ static int i915_adjust_stolen(struct drm_i915_private *dev_priv,
* kernel. So if the region is already marked as busy, something
* is seriously wrong.
*/
- r = devm_request_mem_region(dev_priv->drm.dev, dsm->start,
+ r = devm_request_mem_region(i915->drm.dev, dsm->start,
resource_size(dsm),
"Graphics Stolen Memory");
if (r == NULL) {
@@ -131,14 +134,14 @@ static int i915_adjust_stolen(struct drm_i915_private *dev_priv,
* reservation starting from 1 instead of 0.
* There's also BIOS with off-by-one on the other end.
*/
- r = devm_request_mem_region(dev_priv->drm.dev, dsm->start + 1,
+ r = devm_request_mem_region(i915->drm.dev, dsm->start + 1,
resource_size(dsm) - 2,
"Graphics Stolen Memory");
/*
* GEN3 firmware likes to smash pci bridges into the stolen
* range. Apparently this works.
*/
- if (r == NULL && !IS_GEN(dev_priv, 3)) {
+ if (!r && !IS_GEN(i915, 3)) {
DRM_ERROR("conflict detected with stolen region: %pR\n",
dsm);
@@ -149,25 +152,27 @@ static int i915_adjust_stolen(struct drm_i915_private *dev_priv,
return 0;
}
-void i915_gem_cleanup_stolen(struct drm_i915_private *dev_priv)
+static void i915_gem_cleanup_stolen(struct drm_i915_private *i915)
{
- if (!drm_mm_initialized(&dev_priv->mm.stolen))
+ if (!drm_mm_initialized(&i915->mm.stolen))
return;
- drm_mm_takedown(&dev_priv->mm.stolen);
+ drm_mm_takedown(&i915->mm.stolen);
}
-static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
+static void g4x_get_stolen_reserved(struct drm_i915_private *i915,
+ struct intel_uncore *uncore,
resource_size_t *base,
resource_size_t *size)
{
- u32 reg_val = I915_READ(IS_GM45(dev_priv) ?
- CTG_STOLEN_RESERVED :
- ELK_STOLEN_RESERVED);
- resource_size_t stolen_top = dev_priv->dsm.end + 1;
+ u32 reg_val = intel_uncore_read(uncore,
+ IS_GM45(i915) ?
+ CTG_STOLEN_RESERVED :
+ ELK_STOLEN_RESERVED);
+ resource_size_t stolen_top = i915->dsm.end + 1;
DRM_DEBUG_DRIVER("%s_STOLEN_RESERVED = %08x\n",
- IS_GM45(dev_priv) ? "CTG" : "ELK", reg_val);
+ IS_GM45(i915) ? "CTG" : "ELK", reg_val);
if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0)
return;
@@ -176,7 +181,7 @@ static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
* Whether ILK really reuses the ELK register for this is unclear.
* Let's see if we catch anyone with this supposedly enabled on ILK.
*/
- WARN(IS_GEN(dev_priv, 5), "ILK stolen reserved found? 0x%08x\n",
+ WARN(IS_GEN(i915, 5), "ILK stolen reserved found? 0x%08x\n",
reg_val);
if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK))
@@ -188,11 +193,12 @@ static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
*size = stolen_top - *base;
}
-static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
+static void gen6_get_stolen_reserved(struct drm_i915_private *i915,
+ struct intel_uncore *uncore,
resource_size_t *base,
resource_size_t *size)
{
- u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED);
+ u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
@@ -220,12 +226,13 @@ static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
}
}
-static void vlv_get_stolen_reserved(struct drm_i915_private *dev_priv,
+static void vlv_get_stolen_reserved(struct drm_i915_private *i915,
+ struct intel_uncore *uncore,
resource_size_t *base,
resource_size_t *size)
{
- u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED);
- resource_size_t stolen_top = dev_priv->dsm.end + 1;
+ u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
+ resource_size_t stolen_top = i915->dsm.end + 1;
DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
@@ -248,11 +255,12 @@ static void vlv_get_stolen_reserved(struct drm_i915_private *dev_priv,
*base = stolen_top - *size;
}
-static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv,
+static void gen7_get_stolen_reserved(struct drm_i915_private *i915,
+ struct intel_uncore *uncore,
resource_size_t *base,
resource_size_t *size)
{
- u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED);
+ u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
@@ -274,11 +282,12 @@ static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv,
}
}
-static void chv_get_stolen_reserved(struct drm_i915_private *dev_priv,
+static void chv_get_stolen_reserved(struct drm_i915_private *i915,
+ struct intel_uncore *uncore,
resource_size_t *base,
resource_size_t *size)
{
- u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED);
+ u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
@@ -306,12 +315,13 @@ static void chv_get_stolen_reserved(struct drm_i915_private *dev_priv,
}
}
-static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
+static void bdw_get_stolen_reserved(struct drm_i915_private *i915,
+ struct intel_uncore *uncore,
resource_size_t *base,
resource_size_t *size)
{
- u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED);
- resource_size_t stolen_top = dev_priv->dsm.end + 1;
+ u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
+ resource_size_t stolen_top = i915->dsm.end + 1;
DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
@@ -326,10 +336,11 @@ static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
}
static void icl_get_stolen_reserved(struct drm_i915_private *i915,
+ struct intel_uncore *uncore,
resource_size_t *base,
resource_size_t *size)
{
- u64 reg_val = intel_uncore_read64(&i915->uncore, GEN6_STOLEN_RESERVED);
+ u64 reg_val = intel_uncore_read64(uncore, GEN6_STOLEN_RESERVED);
DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val);
@@ -354,75 +365,84 @@ static void icl_get_stolen_reserved(struct drm_i915_private *i915,
}
}
-int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
+static int i915_gem_init_stolen(struct drm_i915_private *i915)
{
+ struct intel_uncore *uncore = &i915->uncore;
resource_size_t reserved_base, stolen_top;
resource_size_t reserved_total, reserved_size;
- mutex_init(&dev_priv->mm.stolen_lock);
+ mutex_init(&i915->mm.stolen_lock);
- if (intel_vgpu_active(dev_priv)) {
- DRM_INFO("iGVT-g active, disabling use of stolen memory\n");
+ if (intel_vgpu_active(i915)) {
+ dev_notice(i915->drm.dev,
+ "%s, disabling use of stolen memory\n",
+ "iGVT-g active");
return 0;
}
- if (intel_vtd_active() && INTEL_GEN(dev_priv) < 8) {
- DRM_INFO("DMAR active, disabling use of stolen memory\n");
+ if (intel_vtd_active() && INTEL_GEN(i915) < 8) {
+ dev_notice(i915->drm.dev,
+ "%s, disabling use of stolen memory\n",
+ "DMAR active");
return 0;
}
if (resource_size(&intel_graphics_stolen_res) == 0)
return 0;
- dev_priv->dsm = intel_graphics_stolen_res;
+ i915->dsm = intel_graphics_stolen_res;
- if (i915_adjust_stolen(dev_priv, &dev_priv->dsm))
+ if (i915_adjust_stolen(i915, &i915->dsm))
return 0;
- GEM_BUG_ON(dev_priv->dsm.start == 0);
- GEM_BUG_ON(dev_priv->dsm.end <= dev_priv->dsm.start);
+ GEM_BUG_ON(i915->dsm.start == 0);
+ GEM_BUG_ON(i915->dsm.end <= i915->dsm.start);
- stolen_top = dev_priv->dsm.end + 1;
+ stolen_top = i915->dsm.end + 1;
reserved_base = stolen_top;
reserved_size = 0;
- switch (INTEL_GEN(dev_priv)) {
+ switch (INTEL_GEN(i915)) {
case 2:
case 3:
break;
case 4:
- if (!IS_G4X(dev_priv))
+ if (!IS_G4X(i915))
break;
/* fall through */
case 5:
- g4x_get_stolen_reserved(dev_priv,
+ g4x_get_stolen_reserved(i915, uncore,
&reserved_base, &reserved_size);
break;
case 6:
- gen6_get_stolen_reserved(dev_priv,
+ gen6_get_stolen_reserved(i915, uncore,
&reserved_base, &reserved_size);
break;
case 7:
- if (IS_VALLEYVIEW(dev_priv))
- vlv_get_stolen_reserved(dev_priv,
+ if (IS_VALLEYVIEW(i915))
+ vlv_get_stolen_reserved(i915, uncore,
&reserved_base, &reserved_size);
else
- gen7_get_stolen_reserved(dev_priv,
+ gen7_get_stolen_reserved(i915, uncore,
&reserved_base, &reserved_size);
break;
case 8:
case 9:
case 10:
- if (IS_LP(dev_priv))
- chv_get_stolen_reserved(dev_priv,
+ if (IS_LP(i915))
+ chv_get_stolen_reserved(i915, uncore,
&reserved_base, &reserved_size);
else
- bdw_get_stolen_reserved(dev_priv,
+ bdw_get_stolen_reserved(i915, uncore,
&reserved_base, &reserved_size);
break;
- case 11:
default:
- icl_get_stolen_reserved(dev_priv, &reserved_base,
+ MISSING_CASE(INTEL_GEN(i915));
+ /* fall-through */
+ case 11:
+ case 12:
+ icl_get_stolen_reserved(i915, uncore,
+ &reserved_base,
&reserved_size);
break;
}
@@ -439,12 +459,12 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
reserved_size = 0;
}
- dev_priv->dsm_reserved =
- (struct resource) DEFINE_RES_MEM(reserved_base, reserved_size);
+ i915->dsm_reserved =
+ (struct resource)DEFINE_RES_MEM(reserved_base, reserved_size);
- if (!resource_contains(&dev_priv->dsm, &dev_priv->dsm_reserved)) {
+ if (!resource_contains(&i915->dsm, &i915->dsm_reserved)) {
DRM_ERROR("Stolen reserved area %pR outside stolen memory %pR\n",
- &dev_priv->dsm_reserved, &dev_priv->dsm);
+ &i915->dsm_reserved, &i915->dsm);
return 0;
}
@@ -453,14 +473,14 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
reserved_total = stolen_top - reserved_base;
DRM_DEBUG_DRIVER("Memory reserved for graphics device: %lluK, usable: %lluK\n",
- (u64)resource_size(&dev_priv->dsm) >> 10,
- ((u64)resource_size(&dev_priv->dsm) - reserved_total) >> 10);
+ (u64)resource_size(&i915->dsm) >> 10,
+ ((u64)resource_size(&i915->dsm) - reserved_total) >> 10);
- dev_priv->stolen_usable_size =
- resource_size(&dev_priv->dsm) - reserved_total;
+ i915->stolen_usable_size =
+ resource_size(&i915->dsm) - reserved_total;
/* Basic memrange allocator for stolen space. */
- drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->stolen_usable_size);
+ drm_mm_init(&i915->mm.stolen, 0, i915->stolen_usable_size);
return 0;
}
@@ -469,11 +489,11 @@ static struct sg_table *
i915_pages_create_for_stolen(struct drm_device *dev,
resource_size_t offset, resource_size_t size)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *i915 = to_i915(dev);
struct sg_table *st;
struct scatterlist *sg;
- GEM_BUG_ON(range_overflows(offset, size, resource_size(&dev_priv->dsm)));
+ GEM_BUG_ON(range_overflows(offset, size, resource_size(&i915->dsm)));
/* We hide that we have no struct page backing our stolen object
* by wrapping the contiguous physical allocation with a fake
@@ -493,7 +513,7 @@ i915_pages_create_for_stolen(struct drm_device *dev,
sg->offset = 0;
sg->length = size;
- sg_dma_address(sg) = (dma_addr_t)dev_priv->dsm.start + offset;
+ sg_dma_address(sg) = (dma_addr_t)i915->dsm.start + offset;
sg_dma_len(sg) = size;
return st;
@@ -524,14 +544,14 @@ static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
static void
i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen);
GEM_BUG_ON(!stolen);
- __i915_gem_object_unpin_pages(obj);
+ i915_gem_object_release_memory_region(obj);
- i915_gem_stolen_remove_node(dev_priv, stolen);
+ i915_gem_stolen_remove_node(i915, stolen);
kfree(stolen);
}
@@ -542,83 +562,133 @@ static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
};
static struct drm_i915_gem_object *
-_i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
- struct drm_mm_node *stolen)
+__i915_gem_object_create_stolen(struct intel_memory_region *mem,
+ struct drm_mm_node *stolen)
{
+ static struct lock_class_key lock_class;
struct drm_i915_gem_object *obj;
unsigned int cache_level;
+ int err = -ENOMEM;
obj = i915_gem_object_alloc();
- if (obj == NULL)
- return NULL;
+ if (!obj)
+ goto err;
- drm_gem_private_object_init(&dev_priv->drm, &obj->base, stolen->size);
- i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
+ drm_gem_private_object_init(&mem->i915->drm, &obj->base, stolen->size);
+ i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class);
obj->stolen = stolen;
obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
- cache_level = HAS_LLC(dev_priv) ? I915_CACHE_LLC : I915_CACHE_NONE;
+ cache_level = HAS_LLC(mem->i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
i915_gem_object_set_cache_coherency(obj, cache_level);
- if (i915_gem_object_pin_pages(obj))
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
goto cleanup;
+ i915_gem_object_init_memory_region(obj, mem, 0);
+
return obj;
cleanup:
i915_gem_object_free(obj);
- return NULL;
+err:
+ return ERR_PTR(err);
}
-struct drm_i915_gem_object *
-i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
- resource_size_t size)
+static struct drm_i915_gem_object *
+_i915_gem_object_create_stolen(struct intel_memory_region *mem,
+ resource_size_t size,
+ unsigned int flags)
{
+ struct drm_i915_private *i915 = mem->i915;
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
int ret;
- if (!drm_mm_initialized(&dev_priv->mm.stolen))
- return NULL;
+ if (!drm_mm_initialized(&i915->mm.stolen))
+ return ERR_PTR(-ENODEV);
if (size == 0)
- return NULL;
+ return ERR_PTR(-EINVAL);
stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
if (!stolen)
- return NULL;
+ return ERR_PTR(-ENOMEM);
- ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096);
+ ret = i915_gem_stolen_insert_node(i915, stolen, size, 4096);
if (ret) {
- kfree(stolen);
- return NULL;
+ obj = ERR_PTR(ret);
+ goto err_free;
}
- obj = _i915_gem_object_create_stolen(dev_priv, stolen);
- if (obj)
- return obj;
+ obj = __i915_gem_object_create_stolen(mem, stolen);
+ if (IS_ERR(obj))
+ goto err_remove;
+
+ return obj;
- i915_gem_stolen_remove_node(dev_priv, stolen);
+err_remove:
+ i915_gem_stolen_remove_node(i915, stolen);
+err_free:
kfree(stolen);
- return NULL;
+ return obj;
}
struct drm_i915_gem_object *
-i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv,
+i915_gem_object_create_stolen(struct drm_i915_private *i915,
+ resource_size_t size)
+{
+ return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_STOLEN],
+ size, I915_BO_ALLOC_CONTIGUOUS);
+}
+
+static int init_stolen(struct intel_memory_region *mem)
+{
+ intel_memory_region_set_name(mem, "stolen");
+
+ /*
+ * Initialise stolen early so that we may reserve preallocated
+ * objects for the BIOS to KMS transition.
+ */
+ return i915_gem_init_stolen(mem->i915);
+}
+
+static void release_stolen(struct intel_memory_region *mem)
+{
+ i915_gem_cleanup_stolen(mem->i915);
+}
+
+static const struct intel_memory_region_ops i915_region_stolen_ops = {
+ .init = init_stolen,
+ .release = release_stolen,
+ .create_object = _i915_gem_object_create_stolen,
+};
+
+struct intel_memory_region *i915_gem_stolen_setup(struct drm_i915_private *i915)
+{
+ return intel_memory_region_create(i915,
+ intel_graphics_stolen_res.start,
+ resource_size(&intel_graphics_stolen_res),
+ PAGE_SIZE, 0,
+ &i915_region_stolen_ops);
+}
+
+struct drm_i915_gem_object *
+i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *i915,
resource_size_t stolen_offset,
resource_size_t gtt_offset,
resource_size_t size)
{
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
+ struct intel_memory_region *mem = i915->mm.regions[INTEL_REGION_STOLEN];
+ struct i915_ggtt *ggtt = &i915->ggtt;
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
struct i915_vma *vma;
int ret;
- if (!drm_mm_initialized(&dev_priv->mm.stolen))
- return NULL;
-
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
+ if (!drm_mm_initialized(&i915->mm.stolen))
+ return ERR_PTR(-ENODEV);
DRM_DEBUG_DRIVER("creating preallocated stolen object: stolen_offset=%pa, gtt_offset=%pa, size=%pa\n",
&stolen_offset, &gtt_offset, &size);
@@ -627,29 +697,29 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
if (WARN_ON(size == 0) ||
WARN_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)) ||
WARN_ON(!IS_ALIGNED(stolen_offset, I915_GTT_MIN_ALIGNMENT)))
- return NULL;
+ return ERR_PTR(-EINVAL);
stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
if (!stolen)
- return NULL;
+ return ERR_PTR(-ENOMEM);
stolen->start = stolen_offset;
stolen->size = size;
- mutex_lock(&dev_priv->mm.stolen_lock);
- ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
- mutex_unlock(&dev_priv->mm.stolen_lock);
+ mutex_lock(&i915->mm.stolen_lock);
+ ret = drm_mm_reserve_node(&i915->mm.stolen, stolen);
+ mutex_unlock(&i915->mm.stolen_lock);
if (ret) {
DRM_DEBUG_DRIVER("failed to allocate stolen space\n");
kfree(stolen);
- return NULL;
+ return ERR_PTR(ret);
}
- obj = _i915_gem_object_create_stolen(dev_priv, stolen);
- if (obj == NULL) {
+ obj = __i915_gem_object_create_stolen(mem, stolen);
+ if (IS_ERR(obj)) {
DRM_DEBUG_DRIVER("failed to allocate stolen object\n");
- i915_gem_stolen_remove_node(dev_priv, stolen);
+ i915_gem_stolen_remove_node(i915, stolen);
kfree(stolen);
- return NULL;
+ return obj;
}
/* Some objects just need physical mem from stolen space */
@@ -671,22 +741,26 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
* setting up the GTT space. The actual reservation will occur
* later.
*/
+ mutex_lock(&ggtt->vm.mutex);
ret = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
size, gtt_offset, obj->cache_level,
0);
if (ret) {
DRM_DEBUG_DRIVER("failed to allocate stolen GTT space\n");
+ mutex_unlock(&ggtt->vm.mutex);
goto err_pages;
}
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+ GEM_BUG_ON(vma->pages);
vma->pages = obj->mm.pages;
- vma->flags |= I915_VMA_GLOBAL_BIND;
+ atomic_set(&vma->pages_count, I915_VMA_PAGES_ACTIVE);
+
+ set_bit(I915_VMA_GLOBAL_BIND_BIT, __i915_vma_flags(vma));
__i915_vma_set_map_and_fenceable(vma);
- mutex_lock(&ggtt->vm.mutex);
- list_move_tail(&vma->vm_link, &ggtt->vm.bound_list);
+ list_add_tail(&vma->vm_link, &ggtt->vm.bound_list);
mutex_unlock(&ggtt->vm.mutex);
GEM_BUG_ON(i915_gem_object_is_shrinkable(obj));
@@ -698,5 +772,5 @@ err_pages:
i915_gem_object_unpin_pages(obj);
err:
i915_gem_object_put(obj);
- return NULL;
+ return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
new file mode 100644
index 000000000000..c1040627fbf3
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_GEM_STOLEN_H__
+#define __I915_GEM_STOLEN_H__
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+struct drm_mm_node;
+struct drm_i915_gem_object;
+
+int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
+ struct drm_mm_node *node, u64 size,
+ unsigned alignment);
+int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
+ struct drm_mm_node *node, u64 size,
+ unsigned alignment, u64 start,
+ u64 end);
+void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
+ struct drm_mm_node *node);
+struct intel_memory_region *i915_gem_stolen_setup(struct drm_i915_private *i915);
+struct drm_i915_gem_object *
+i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
+ resource_size_t size);
+struct drm_i915_gem_object *
+i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv,
+ resource_size_t stolen_offset,
+ resource_size_t gtt_offset,
+ resource_size_t size);
+
+#endif /* __I915_GEM_STOLEN_H__ */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_throttle.c b/drivers/gpu/drm/i915/gem/i915_gem_throttle.c
index adb3074d9ce2..540ef0551789 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_throttle.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_throttle.c
@@ -41,7 +41,7 @@ i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
long ret;
/* ABI: return -EIO if already wedged */
- ret = i915_terminally_wedged(to_i915(dev));
+ ret = intel_gt_terminally_wedged(&to_i915(dev)->gt);
if (ret)
return ret;
@@ -50,10 +50,8 @@ i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
if (time_after_eq(request->emitted_jiffies, recent_enough))
break;
- if (target) {
+ if (target && xchg(&target->file_priv, NULL))
list_del(&target->client_link);
- target->file_priv = NULL;
- }
target = request;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
index ca0c2f451742..6c7825a2dc2a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
@@ -11,6 +11,7 @@
#include "i915_drv.h"
#include "i915_gem.h"
#include "i915_gem_ioctls.h"
+#include "i915_gem_mman.h"
#include "i915_gem_object.h"
/**
@@ -181,22 +182,25 @@ static int
i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj,
int tiling_mode, unsigned int stride)
{
+ struct i915_ggtt *ggtt = &to_i915(obj->base.dev)->ggtt;
struct i915_vma *vma;
- int ret;
+ int ret = 0;
if (tiling_mode == I915_TILING_NONE)
return 0;
+ mutex_lock(&ggtt->vm.mutex);
for_each_ggtt_vma(vma, obj) {
if (i915_vma_fence_prepare(vma, tiling_mode, stride))
continue;
- ret = i915_vma_unbind(vma);
+ ret = __i915_vma_unbind(vma);
if (ret)
- return ret;
+ break;
}
+ mutex_unlock(&ggtt->vm.mutex);
- return 0;
+ return ret;
}
int
@@ -212,7 +216,6 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
GEM_BUG_ON(!i915_tiling_ok(obj, tiling, stride));
GEM_BUG_ON(!stride ^ (tiling == I915_TILING_NONE));
- lockdep_assert_held(&i915->drm.struct_mutex);
if ((tiling | stride) == obj->tiling_and_stride)
return 0;
@@ -233,16 +236,18 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
* whilst executing a fenced command for an untiled object.
*/
- err = i915_gem_object_fence_prepare(obj, tiling, stride);
- if (err)
- return err;
-
i915_gem_object_lock(obj);
if (i915_gem_object_is_framebuffer(obj)) {
i915_gem_object_unlock(obj);
return -EBUSY;
}
+ err = i915_gem_object_fence_prepare(obj, tiling, stride);
+ if (err) {
+ i915_gem_object_unlock(obj);
+ return err;
+ }
+
/* If the memory has unknown (i.e. varying) swizzling, we pin the
* pages to prevent them being swapped out and causing corruption
* due to the change in swizzling.
@@ -313,10 +318,14 @@ int
i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_set_tiling *args = data;
struct drm_i915_gem_object *obj;
int err;
+ if (!dev_priv->ggtt.num_fences)
+ return -EOPNOTSUPP;
+
obj = i915_gem_object_lookup(file, args->handle);
if (!obj)
return -ENOENT;
@@ -340,9 +349,9 @@ i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
args->stride = 0;
} else {
if (args->tiling_mode == I915_TILING_X)
- args->swizzle_mode = to_i915(dev)->mm.bit_6_swizzle_x;
+ args->swizzle_mode = to_i915(dev)->ggtt.bit_6_swizzle_x;
else
- args->swizzle_mode = to_i915(dev)->mm.bit_6_swizzle_y;
+ args->swizzle_mode = to_i915(dev)->ggtt.bit_6_swizzle_y;
/* Hide bit 17 swizzling from the user. This prevents old Mesa
* from aborting the application on sw fallbacks to bit 17,
@@ -364,12 +373,7 @@ i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
}
}
- err = mutex_lock_interruptible(&dev->struct_mutex);
- if (err)
- goto err;
-
err = i915_gem_object_set_tiling(obj, args->tiling_mode, args->stride);
- mutex_unlock(&dev->struct_mutex);
/* We have to maintain this existing ABI... */
args->stride = i915_gem_object_get_stride(obj);
@@ -402,6 +406,9 @@ i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_object *obj;
int err = -ENOENT;
+ if (!dev_priv->ggtt.num_fences)
+ return -EOPNOTSUPP;
+
rcu_read_lock();
obj = i915_gem_object_lookup_rcu(file, args->handle);
if (obj) {
@@ -415,10 +422,10 @@ i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
switch (args->tiling_mode) {
case I915_TILING_X:
- args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
+ args->swizzle_mode = dev_priv->ggtt.bit_6_swizzle_x;
break;
case I915_TILING_Y:
- args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
+ args->swizzle_mode = dev_priv->ggtt.bit_6_swizzle_y;
break;
default:
case I915_TILING_NONE:
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 2caa594322bc..580319b7bf1a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -12,11 +12,10 @@
#include <drm/i915_drm.h>
+#include "i915_drv.h"
#include "i915_gem_ioctls.h"
#include "i915_gem_object.h"
#include "i915_scatterlist.h"
-#include "i915_trace.h"
-#include "intel_drv.h"
struct i915_mm_struct {
struct mm_struct *mm;
@@ -93,7 +92,6 @@ userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
struct i915_mmu_notifier *mn =
container_of(_mn, struct i915_mmu_notifier, mn);
struct interval_tree_node *it;
- struct mutex *unlock = NULL;
unsigned long end;
int ret = 0;
@@ -130,32 +128,14 @@ userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
}
spin_unlock(&mn->lock);
- if (!unlock) {
- unlock = &mn->mm->i915->drm.struct_mutex;
-
- switch (mutex_trylock_recursive(unlock)) {
- default:
- case MUTEX_TRYLOCK_FAILED:
- if (mutex_lock_killable_nested(unlock, I915_MM_SHRINKER)) {
- i915_gem_object_put(obj);
- return -EINTR;
- }
- /* fall through */
- case MUTEX_TRYLOCK_SUCCESS:
- break;
-
- case MUTEX_TRYLOCK_RECURSIVE:
- unlock = ERR_PTR(-EEXIST);
- break;
- }
- }
-
- ret = i915_gem_object_unbind(obj);
+ ret = i915_gem_object_unbind(obj,
+ I915_GEM_OBJECT_UNBIND_ACTIVE |
+ I915_GEM_OBJECT_UNBIND_BARRIER);
if (ret == 0)
- ret = __i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
+ ret = __i915_gem_object_put_pages(obj);
i915_gem_object_put(obj);
if (ret)
- goto unlock;
+ return ret;
spin_lock(&mn->lock);
@@ -168,10 +148,6 @@ userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
}
spin_unlock(&mn->lock);
-unlock:
- if (!IS_ERR_OR_NULL(unlock))
- mutex_unlock(unlock);
-
return ret;
}
@@ -427,7 +403,7 @@ struct get_pages_work {
static struct sg_table *
__i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj,
- struct page **pvec, int num_pages)
+ struct page **pvec, unsigned long num_pages)
{
unsigned int max_segment = i915_sg_segment_size();
struct sg_table *st;
@@ -473,9 +449,10 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
{
struct get_pages_work *work = container_of(_work, typeof(*work), work);
struct drm_i915_gem_object *obj = work->obj;
- const int npages = obj->base.size >> PAGE_SHIFT;
+ const unsigned long npages = obj->base.size >> PAGE_SHIFT;
+ unsigned long pinned;
struct page **pvec;
- int pinned, ret;
+ int ret;
ret = -ENOMEM;
pinned = 0;
@@ -484,31 +461,36 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
if (pvec != NULL) {
struct mm_struct *mm = obj->userptr.mm->mm;
unsigned int flags = 0;
+ int locked = 0;
if (!i915_gem_object_is_readonly(obj))
flags |= FOLL_WRITE;
ret = -EFAULT;
if (mmget_not_zero(mm)) {
- down_read(&mm->mmap_sem);
while (pinned < npages) {
+ if (!locked) {
+ down_read(&mm->mmap_sem);
+ locked = 1;
+ }
ret = get_user_pages_remote
(work->task, mm,
obj->userptr.ptr + pinned * PAGE_SIZE,
npages - pinned,
flags,
- pvec + pinned, NULL, NULL);
+ pvec + pinned, NULL, &locked);
if (ret < 0)
break;
pinned += ret;
}
- up_read(&mm->mmap_sem);
+ if (locked)
+ up_read(&mm->mmap_sem);
mmput(mm);
}
}
- mutex_lock(&obj->mm.lock);
+ mutex_lock_nested(&obj->mm.lock, I915_MM_GET_PAGES);
if (obj->userptr.work == &work->work) {
struct sg_table *pages = ERR_PTR(ret);
@@ -578,7 +560,7 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj)
static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
{
- const int num_pages = obj->base.size >> PAGE_SHIFT;
+ const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
struct mm_struct *mm = obj->userptr.mm->mm;
struct page **pvec;
struct sg_table *pages;
@@ -662,8 +644,16 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
__i915_gem_object_release_shmem(obj, pages, true);
i915_gem_gtt_finish_pages(obj, pages);
+ /*
+ * We always mark objects as dirty when they are used by the GPU,
+ * just in case. However, if we set the vma as being read-only we know
+ * that the object will never have been written to.
+ */
+ if (i915_gem_object_is_readonly(obj))
+ obj->mm.dirty = false;
+
for_each_sgt_page(page, sgt_iter, pages) {
- if (obj->mm.dirty)
+ if (obj->mm.dirty && trylock_page(page)) {
/*
* As this may not be anonymous memory (e.g. shmem)
* but exist on a real mapping, we have to lock
@@ -671,8 +661,20 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
* the page reference is not sufficient to
* prevent the inode from being truncated.
* Play safe and take the lock.
+ *
+ * However...!
+ *
+ * The mmu-notifier can be invalidated for a
+ * migrate_page, that is alreadying holding the lock
+ * on the page. Such a try_to_unmap() will result
+ * in us calling put_pages() and so recursively try
+ * to lock the page. We avoid that deadlock with
+ * a trylock_page() and in exchange we risk missing
+ * some page dirtying.
*/
- set_page_dirty_lock(page);
+ set_page_dirty(page);
+ unlock_page(page);
+ }
mark_page_accessed(page);
put_page(page);
@@ -702,6 +704,7 @@ i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
I915_GEM_OBJECT_IS_SHRINKABLE |
+ I915_GEM_OBJECT_NO_GGTT |
I915_GEM_OBJECT_ASYNC_CANCEL,
.get_pages = i915_gem_userptr_get_pages,
.put_pages = i915_gem_userptr_put_pages,
@@ -749,6 +752,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
void *data,
struct drm_file *file)
{
+ static struct lock_class_key lock_class;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_userptr *args = data;
struct drm_i915_gem_object *obj;
@@ -776,14 +780,11 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
return -EFAULT;
if (args->flags & I915_USERPTR_READ_ONLY) {
- struct i915_address_space *vm;
-
/*
* On almost all of the older hw, we cannot tell the GPU that
* a page is readonly.
*/
- vm = dev_priv->kernel_context->vm;
- if (!vm || !vm->has_read_only)
+ if (!dev_priv->gt.vm->has_read_only)
return -ENODEV;
}
@@ -792,7 +793,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
return -ENOMEM;
drm_gem_private_object_init(dev, &obj->base, args->user_size);
- i915_gem_object_init(obj, &i915_gem_userptr_ops);
+ i915_gem_object_init(obj, &i915_gem_userptr_ops, &lock_class);
obj->read_domains = I915_GEM_DOMAIN_CPU;
obj->write_domain = I915_GEM_DOMAIN_CPU;
i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
index 26ec6579b7cd..8af55cd3e690 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
@@ -31,11 +31,10 @@ i915_gem_object_wait_fence(struct dma_fence *fence,
}
static long
-i915_gem_object_wait_reservation(struct reservation_object *resv,
+i915_gem_object_wait_reservation(struct dma_resv *resv,
unsigned int flags,
long timeout)
{
- unsigned int seq = __read_seqcount_begin(&resv->seq);
struct dma_fence *excl;
bool prune_fences = false;
@@ -44,7 +43,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
unsigned int count, i;
int ret;
- ret = reservation_object_get_fences_rcu(resv,
+ ret = dma_resv_get_fences_rcu(resv,
&excl, &count, &shared);
if (ret)
return ret;
@@ -73,7 +72,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
*/
prune_fences = count && timeout >= 0;
} else {
- excl = reservation_object_get_excl_rcu(resv);
+ excl = dma_resv_get_excl_rcu(resv);
}
if (excl && timeout >= 0)
@@ -83,15 +82,12 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
/*
* Opportunistically prune the fences iff we know they have *all* been
- * signaled and that the reservation object has not been changed (i.e.
- * no new fences have been added).
+ * signaled.
*/
- if (prune_fences && !__read_seqcount_retry(&resv->seq, seq)) {
- if (reservation_object_trylock(resv)) {
- if (!__read_seqcount_retry(&resv->seq, seq))
- reservation_object_add_excl_fence(resv, NULL);
- reservation_object_unlock(resv);
- }
+ if (prune_fences && dma_resv_trylock(resv)) {
+ if (dma_resv_test_signaled_rcu(resv, true))
+ dma_resv_add_excl_fence(resv, NULL);
+ dma_resv_unlock(resv);
}
return timeout;
@@ -144,7 +140,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
unsigned int count, i;
int ret;
- ret = reservation_object_get_fences_rcu(obj->base.resv,
+ ret = dma_resv_get_fences_rcu(obj->base.resv,
&excl, &count, &shared);
if (ret)
return ret;
@@ -156,7 +152,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
kfree(shared);
} else {
- excl = reservation_object_get_excl_rcu(obj->base.resv);
+ excl = dma_resv_get_excl_rcu(obj->base.resv);
}
if (excl) {
diff --git a/drivers/gpu/drm/i915/gem/i915_gemfs.c b/drivers/gpu/drm/i915/gem/i915_gemfs.c
index 099f3397aada..5e6e8c91ab38 100644
--- a/drivers/gpu/drm/i915/gem/i915_gemfs.c
+++ b/drivers/gpu/drm/i915/gem/i915_gemfs.c
@@ -20,31 +20,18 @@ int i915_gemfs_init(struct drm_i915_private *i915)
if (!type)
return -ENODEV;
- gemfs = kern_mount(type);
- if (IS_ERR(gemfs))
- return PTR_ERR(gemfs);
-
/*
- * Enable huge-pages for objects that are at least HPAGE_PMD_SIZE, most
- * likely 2M. Note that within_size may overallocate huge-pages, if say
- * we allocate an object of size 2M + 4K, we may get 2M + 2M, but under
- * memory pressure shmem should split any huge-pages which can be
- * shrunk.
+ * By creating our own shmemfs mountpoint, we can pass in
+ * mount flags that better match our usecase.
+ *
+ * One example, although it is probably better with a per-file
+ * control, is selecting huge page allocations ("huge=within_size").
+ * Currently unused due to bandwidth issues (slow reads) on Broadwell+.
*/
- if (has_transparent_hugepage()) {
- struct super_block *sb = gemfs->mnt_sb;
- /* FIXME: Disabled until we get W/A for read BW issue. */
- char options[] = "huge=never";
- int flags = 0;
- int err;
-
- err = sb->s_op->remount_fs(sb, &flags, options);
- if (err) {
- kern_unmount(gemfs);
- return err;
- }
- }
+ gemfs = kern_mount(type);
+ if (IS_ERR(gemfs))
+ return PTR_ERR(gemfs);
i915->mm.gemfs = gemfs;
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
index 3c5d17b2b670..fa16f2c3f3ac 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
@@ -12,10 +12,14 @@ static void huge_free_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
unsigned long nreal = obj->scratch / PAGE_SIZE;
- struct scatterlist *sg;
+ struct sgt_iter sgt_iter;
+ struct page *page;
- for (sg = pages->sgl; sg && nreal--; sg = __sg_next(sg))
- __free_page(sg_page(sg));
+ for_each_sgt_page(page, sgt_iter, pages) {
+ __free_page(page);
+ if (!--nreal)
+ break;
+ }
sg_free_table(pages);
kfree(pages);
@@ -70,7 +74,6 @@ static int huge_get_pages(struct drm_i915_gem_object *obj)
err:
huge_free_pages(obj, pages);
-
return -ENOMEM;
#undef GFP
}
@@ -96,6 +99,7 @@ huge_gem_object(struct drm_i915_private *i915,
phys_addr_t phys_size,
dma_addr_t dma_size)
{
+ static struct lock_class_key lock_class;
struct drm_i915_gem_object *obj;
unsigned int cache_level;
@@ -111,7 +115,7 @@ huge_gem_object(struct drm_i915_private *i915,
return ERR_PTR(-ENOMEM);
drm_gem_private_object_init(&i915->drm, &obj->base, dma_size);
- i915_gem_object_init(obj, &huge_ops);
+ i915_gem_object_init(obj, &huge_ops, &lock_class);
obj->read_domains = I915_GEM_DOMAIN_CPU;
obj->write_domain = I915_GEM_DOMAIN_CPU;
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.h b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.h
index 549c1394bcdc..b8cf31b7bf14 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.h
@@ -7,6 +7,12 @@
#ifndef __HUGE_GEM_OBJECT_H
#define __HUGE_GEM_OBJECT_H
+#include <linux/types.h>
+
+#include "gem/i915_gem_object_types.h"
+
+struct drm_i915_private;
+
struct drm_i915_gem_object *
huge_gem_object(struct drm_i915_private *i915,
phys_addr_t phys_size,
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index b74729b6f353..9311250d7d6f 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -8,13 +8,18 @@
#include "i915_selftest.h"
+#include "gem/i915_gem_region.h"
+#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_pm.h"
+#include "gt/intel_gt.h"
+
#include "igt_gem_utils.h"
#include "mock_context.h"
#include "selftests/mock_drm.h"
#include "selftests/mock_gem_device.h"
+#include "selftests/mock_region.h"
#include "selftests/i915_random.h"
static const unsigned int page_sizes[] = {
@@ -111,8 +116,6 @@ static int get_huge_pages(struct drm_i915_gem_object *obj)
if (i915_gem_gtt_prepare_pages(obj, st))
goto err;
- obj->mm.madv = I915_MADV_DONTNEED;
-
GEM_BUG_ON(sg_page_sizes != obj->mm.page_mask);
__i915_gem_object_set_pages(obj, st, sg_page_sizes);
@@ -133,7 +136,6 @@ static void put_huge_pages(struct drm_i915_gem_object *obj,
huge_pages_free_pages(pages);
obj->mm.dirty = false;
- obj->mm.madv = I915_MADV_WILLNEED;
}
static const struct drm_i915_gem_object_ops huge_page_ops = {
@@ -148,6 +150,7 @@ huge_pages_object(struct drm_i915_private *i915,
u64 size,
unsigned int page_mask)
{
+ static struct lock_class_key lock_class;
struct drm_i915_gem_object *obj;
GEM_BUG_ON(!size);
@@ -164,7 +167,9 @@ huge_pages_object(struct drm_i915_private *i915,
return ERR_PTR(-ENOMEM);
drm_gem_private_object_init(&i915->drm, &obj->base, size);
- i915_gem_object_init(obj, &huge_page_ops);
+ i915_gem_object_init(obj, &huge_page_ops, &lock_class);
+
+ i915_gem_object_set_volatile(obj);
obj->write_domain = I915_GEM_DOMAIN_CPU;
obj->read_domains = I915_GEM_DOMAIN_CPU;
@@ -225,8 +230,6 @@ static int fake_get_huge_pages(struct drm_i915_gem_object *obj)
i915_sg_trim(st);
- obj->mm.madv = I915_MADV_DONTNEED;
-
__i915_gem_object_set_pages(obj, st, sg_page_sizes);
return 0;
@@ -259,8 +262,6 @@ static int fake_get_huge_pages_single(struct drm_i915_gem_object *obj)
sg_dma_len(sg) = obj->base.size;
sg_dma_address(sg) = page_size;
- obj->mm.madv = I915_MADV_DONTNEED;
-
__i915_gem_object_set_pages(obj, st, sg->length);
return 0;
@@ -279,7 +280,6 @@ static void fake_put_huge_pages(struct drm_i915_gem_object *obj,
{
fake_free_huge_pages(obj, pages);
obj->mm.dirty = false;
- obj->mm.madv = I915_MADV_WILLNEED;
}
static const struct drm_i915_gem_object_ops fake_ops = {
@@ -297,6 +297,7 @@ static const struct drm_i915_gem_object_ops fake_ops_single = {
static struct drm_i915_gem_object *
fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single)
{
+ static struct lock_class_key lock_class;
struct drm_i915_gem_object *obj;
GEM_BUG_ON(!size);
@@ -315,9 +316,11 @@ fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single)
drm_gem_private_object_init(&i915->drm, &obj->base, size);
if (single)
- i915_gem_object_init(obj, &fake_ops_single);
+ i915_gem_object_init(obj, &fake_ops_single, &lock_class);
else
- i915_gem_object_init(obj, &fake_ops);
+ i915_gem_object_init(obj, &fake_ops, &lock_class);
+
+ i915_gem_object_set_volatile(obj);
obj->write_domain = I915_GEM_DOMAIN_CPU;
obj->read_domains = I915_GEM_DOMAIN_CPU;
@@ -331,7 +334,12 @@ static int igt_check_page_sizes(struct i915_vma *vma)
struct drm_i915_private *i915 = vma->vm->i915;
unsigned int supported = INTEL_INFO(i915)->page_sizes;
struct drm_i915_gem_object *obj = vma->obj;
- int err = 0;
+ int err;
+
+ /* We have to wait for the async bind to complete before our asserts */
+ err = i915_vma_sync(vma);
+ if (err)
+ return err;
if (!HAS_PAGE_SIZES(i915, vma->page_sizes.sg)) {
pr_err("unsupported page_sizes.sg=%u, supported=%u\n",
@@ -445,6 +453,88 @@ out_device:
return err;
}
+static int igt_mock_memory_region_huge_pages(void *arg)
+{
+ const unsigned int flags[] = { 0, I915_BO_ALLOC_CONTIGUOUS };
+ struct i915_ppgtt *ppgtt = arg;
+ struct drm_i915_private *i915 = ppgtt->vm.i915;
+ unsigned long supported = INTEL_INFO(i915)->page_sizes;
+ struct intel_memory_region *mem;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int bit;
+ int err = 0;
+
+ mem = mock_region_create(i915, 0, SZ_2G, I915_GTT_PAGE_SIZE_4K, 0);
+ if (IS_ERR(mem)) {
+ pr_err("%s failed to create memory region\n", __func__);
+ return PTR_ERR(mem);
+ }
+
+ for_each_set_bit(bit, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
+ unsigned int page_size = BIT(bit);
+ resource_size_t phys;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(flags); ++i) {
+ obj = i915_gem_object_create_region(mem, page_size,
+ flags[i]);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out_region;
+ }
+
+ vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_put;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto out_close;
+
+ err = igt_check_page_sizes(vma);
+ if (err)
+ goto out_unpin;
+
+ phys = i915_gem_object_get_dma_address(obj, 0);
+ if (!IS_ALIGNED(phys, page_size)) {
+ pr_err("%s addr misaligned(%pa) page_size=%u\n",
+ __func__, &phys, page_size);
+ err = -EINVAL;
+ goto out_unpin;
+ }
+
+ if (vma->page_sizes.gtt != page_size) {
+ pr_err("%s page_sizes.gtt=%u, expected=%u\n",
+ __func__, vma->page_sizes.gtt,
+ page_size);
+ err = -EINVAL;
+ goto out_unpin;
+ }
+
+ i915_vma_unpin(vma);
+ i915_vma_close(vma);
+
+ __i915_gem_object_put_pages(obj);
+ i915_gem_object_put(obj);
+ }
+ }
+
+ goto out_region;
+
+out_unpin:
+ i915_vma_unpin(vma);
+out_close:
+ i915_vma_close(vma);
+out_put:
+ i915_gem_object_put(obj);
+out_region:
+ intel_memory_region_put(mem);
+ return err;
+}
+
static int igt_mock_ppgtt_misaligned_dma(void *arg)
{
struct i915_ppgtt *ppgtt = arg;
@@ -560,7 +650,7 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
i915_vma_close(vma);
i915_gem_object_unpin_pages(obj);
- __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+ __i915_gem_object_put_pages(obj);
i915_gem_object_put(obj);
}
@@ -588,7 +678,7 @@ static void close_object_list(struct list_head *objects,
list_del(&obj->st_link);
i915_gem_object_unpin_pages(obj);
- __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+ __i915_gem_object_put_pages(obj);
i915_gem_object_put(obj);
}
}
@@ -858,7 +948,7 @@ static int igt_mock_ppgtt_64K(void *arg)
i915_vma_close(vma);
i915_gem_object_unpin_pages(obj);
- __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+ __i915_gem_object_put_pages(obj);
i915_gem_object_put(obj);
}
}
@@ -877,129 +967,25 @@ out_object_put:
return err;
}
-static struct i915_vma *
-gpu_write_dw(struct i915_vma *vma, u64 offset, u32 val)
-{
- struct drm_i915_private *i915 = vma->vm->i915;
- const int gen = INTEL_GEN(i915);
- unsigned int count = vma->size >> PAGE_SHIFT;
- struct drm_i915_gem_object *obj;
- struct i915_vma *batch;
- unsigned int size;
- u32 *cmd;
- int n;
- int err;
-
- size = (1 + 4 * count) * sizeof(u32);
- size = round_up(size, PAGE_SIZE);
- obj = i915_gem_object_create_internal(i915, size);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
-
- cmd = i915_gem_object_pin_map(obj, I915_MAP_WC);
- if (IS_ERR(cmd)) {
- err = PTR_ERR(cmd);
- goto err;
- }
-
- offset += vma->node.start;
-
- for (n = 0; n < count; n++) {
- if (gen >= 8) {
- *cmd++ = MI_STORE_DWORD_IMM_GEN4;
- *cmd++ = lower_32_bits(offset);
- *cmd++ = upper_32_bits(offset);
- *cmd++ = val;
- } else if (gen >= 4) {
- *cmd++ = MI_STORE_DWORD_IMM_GEN4 |
- (gen < 6 ? MI_USE_GGTT : 0);
- *cmd++ = 0;
- *cmd++ = offset;
- *cmd++ = val;
- } else {
- *cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
- *cmd++ = offset;
- *cmd++ = val;
- }
-
- offset += PAGE_SIZE;
- }
-
- *cmd = MI_BATCH_BUFFER_END;
- i915_gem_chipset_flush(i915);
-
- i915_gem_object_unpin_map(obj);
-
- batch = i915_vma_instance(obj, vma->vm, NULL);
- if (IS_ERR(batch)) {
- err = PTR_ERR(batch);
- goto err;
- }
-
- err = i915_vma_pin(batch, 0, 0, PIN_USER);
- if (err)
- goto err;
-
- return batch;
-
-err:
- i915_gem_object_put(obj);
-
- return ERR_PTR(err);
-}
-
-static int gpu_write(struct i915_vma *vma,
- struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
- u32 dword,
- u32 value)
+static int gpu_write(struct intel_context *ce,
+ struct i915_vma *vma,
+ u32 dw,
+ u32 val)
{
- struct i915_request *rq;
- struct i915_vma *batch;
int err;
- GEM_BUG_ON(!intel_engine_can_store_dword(engine));
-
- batch = gpu_write_dw(vma, dword * sizeof(u32), value);
- if (IS_ERR(batch))
- return PTR_ERR(batch);
-
- rq = igt_request_alloc(ctx, engine);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto err_batch;
- }
-
- i915_vma_lock(batch);
- err = i915_vma_move_to_active(batch, rq, 0);
- i915_vma_unlock(batch);
- if (err)
- goto err_request;
-
- i915_vma_lock(vma);
- err = i915_gem_object_set_to_gtt_domain(vma->obj, false);
- if (err == 0)
- err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
- i915_vma_unlock(vma);
- if (err)
- goto err_request;
-
- err = engine->emit_bb_start(rq,
- batch->node.start, batch->node.size,
- 0);
-err_request:
+ i915_gem_object_lock(vma->obj);
+ err = i915_gem_object_set_to_gtt_domain(vma->obj, true);
+ i915_gem_object_unlock(vma->obj);
if (err)
- i915_request_skip(rq, err);
- i915_request_add(rq);
-err_batch:
- i915_vma_unpin(batch);
- i915_vma_close(batch);
- i915_vma_put(batch);
+ return err;
- return err;
+ return igt_gpu_fill_dw(ce, vma, dw * sizeof(u32),
+ vma->size >> PAGE_SHIFT, val);
}
-static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
+static int
+__cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 val)
{
unsigned int needs_flush;
unsigned long n;
@@ -1031,19 +1017,54 @@ static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
return err;
}
-static int __igt_write_huge(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
+static int __cpu_check_vmap(struct drm_i915_gem_object *obj, u32 dword, u32 val)
+{
+ unsigned long n = obj->base.size >> PAGE_SHIFT;
+ u32 *ptr;
+ int err;
+
+ err = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT);
+ if (err)
+ return err;
+
+ ptr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(ptr))
+ return PTR_ERR(ptr);
+
+ ptr += dword;
+ while (n--) {
+ if (*ptr != val) {
+ pr_err("base[%u]=%08x, val=%08x\n",
+ dword, *ptr, val);
+ err = -EINVAL;
+ break;
+ }
+
+ ptr += PAGE_SIZE / sizeof(*ptr);
+ }
+
+ i915_gem_object_unpin_map(obj);
+ return err;
+}
+
+static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
+{
+ if (i915_gem_object_has_struct_page(obj))
+ return __cpu_check_shmem(obj, dword, val);
+ else
+ return __cpu_check_vmap(obj, dword, val);
+}
+
+static int __igt_write_huge(struct intel_context *ce,
struct drm_i915_gem_object *obj,
u64 size, u64 offset,
u32 dword, u32 val)
{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
struct i915_vma *vma;
int err;
- vma = i915_vma_instance(obj, vm, NULL);
+ vma = i915_vma_instance(obj, ce->vm, NULL);
if (IS_ERR(vma))
return PTR_ERR(vma);
@@ -1057,7 +1078,7 @@ static int __igt_write_huge(struct i915_gem_context *ctx,
* The ggtt may have some pages reserved so
* refrain from erroring out.
*/
- if (err == -ENOSPC && i915_is_ggtt(vm))
+ if (err == -ENOSPC && i915_is_ggtt(ce->vm))
err = 0;
goto out_vma_close;
@@ -1067,7 +1088,7 @@ static int __igt_write_huge(struct i915_gem_context *ctx,
if (err)
goto out_vma_unpin;
- err = gpu_write(vma, ctx, engine, dword, val);
+ err = gpu_write(ce, vma, dword, val);
if (err) {
pr_err("gpu-write failed at offset=%llx\n", offset);
goto out_vma_unpin;
@@ -1082,22 +1103,20 @@ static int __igt_write_huge(struct i915_gem_context *ctx,
out_vma_unpin:
i915_vma_unpin(vma);
out_vma_close:
- i915_vma_destroy(vma);
-
+ __i915_vma_put(vma);
return err;
}
static int igt_write_huge(struct i915_gem_context *ctx,
struct drm_i915_gem_object *obj)
{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
- static struct intel_engine_cs *engines[I915_NUM_ENGINES];
- struct intel_engine_cs *engine;
+ struct i915_gem_engines *engines;
+ struct i915_gem_engines_iter it;
+ struct intel_context *ce;
I915_RND_STATE(prng);
IGT_TIMEOUT(end_time);
unsigned int max_page_size;
- unsigned int id;
+ unsigned int count;
u64 max;
u64 num;
u64 size;
@@ -1111,19 +1130,18 @@ static int igt_write_huge(struct i915_gem_context *ctx,
if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
size = round_up(size, I915_GTT_PAGE_SIZE_2M);
- max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg);
- max = div_u64((vm->total - size), max_page_size);
-
n = 0;
- for_each_engine(engine, i915, id) {
- if (!intel_engine_can_store_dword(engine)) {
- pr_info("store-dword-imm not supported on engine=%u\n",
- id);
+ count = 0;
+ max = U64_MAX;
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+ count++;
+ if (!intel_engine_can_store_dword(ce->engine))
continue;
- }
- engines[n++] = engine;
- }
+ max = min(max, ce->vm->total);
+ n++;
+ }
+ i915_gem_context_unlock_engines(ctx);
if (!n)
return 0;
@@ -1132,23 +1150,30 @@ static int igt_write_huge(struct i915_gem_context *ctx,
* randomized order, lets also make feeding to the same engine a few
* times in succession a possibility by enlarging the permutation array.
*/
- order = i915_random_order(n * I915_NUM_ENGINES, &prng);
+ order = i915_random_order(count * count, &prng);
if (!order)
return -ENOMEM;
+ max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg);
+ max = div_u64(max - size, max_page_size);
+
/*
* Try various offsets in an ascending/descending fashion until we
* timeout -- we want to avoid issues hidden by effectively always using
* offset = 0.
*/
i = 0;
+ engines = i915_gem_context_lock_engines(ctx);
for_each_prime_number_from(num, 0, max) {
u64 offset_low = num * max_page_size;
u64 offset_high = (max - num) * max_page_size;
u32 dword = offset_in_page(num) / 4;
+ struct intel_context *ce;
- engine = engines[order[i] % n];
- i = (i + 1) % (n * I915_NUM_ENGINES);
+ ce = engines->engines[order[i] % engines->num_engines];
+ i = (i + 1) % (count * count);
+ if (!ce || !intel_engine_can_store_dword(ce->engine))
+ continue;
/*
* In order to utilize 64K pages we need to both pad the vma
@@ -1160,22 +1185,23 @@ static int igt_write_huge(struct i915_gem_context *ctx,
offset_low = round_down(offset_low,
I915_GTT_PAGE_SIZE_2M);
- err = __igt_write_huge(ctx, engine, obj, size, offset_low,
+ err = __igt_write_huge(ce, obj, size, offset_low,
dword, num + 1);
if (err)
break;
- err = __igt_write_huge(ctx, engine, obj, size, offset_high,
+ err = __igt_write_huge(ce, obj, size, offset_high,
dword, num + 1);
if (err)
break;
if (igt_timeout(end_time,
- "%s timed out on engine=%u, offset_low=%llx offset_high=%llx, max_page_size=%x\n",
- __func__, engine->id, offset_low, offset_high,
+ "%s timed out on %s, offset_low=%llx offset_high=%llx, max_page_size=%x\n",
+ __func__, ce->engine->name, offset_low, offset_high,
max_page_size))
break;
}
+ i915_gem_context_unlock_engines(ctx);
kfree(order);
@@ -1267,7 +1293,7 @@ static int igt_ppgtt_exhaust_huge(void *arg)
}
i915_gem_object_unpin_pages(obj);
- __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+ __i915_gem_object_put_pages(obj);
i915_gem_object_put(obj);
}
}
@@ -1283,131 +1309,235 @@ out_device:
return err;
}
-static int igt_ppgtt_internal_huge(void *arg)
+typedef struct drm_i915_gem_object *
+(*igt_create_fn)(struct drm_i915_private *i915, u32 size, u32 flags);
+
+static inline bool igt_can_allocate_thp(struct drm_i915_private *i915)
+{
+ return i915->mm.gemfs && has_transparent_hugepage();
+}
+
+static struct drm_i915_gem_object *
+igt_create_shmem(struct drm_i915_private *i915, u32 size, u32 flags)
+{
+ if (!igt_can_allocate_thp(i915)) {
+ pr_info("%s missing THP support, skipping\n", __func__);
+ return ERR_PTR(-ENODEV);
+ }
+
+ return i915_gem_object_create_shmem(i915, size);
+}
+
+static struct drm_i915_gem_object *
+igt_create_internal(struct drm_i915_private *i915, u32 size, u32 flags)
+{
+ return i915_gem_object_create_internal(i915, size);
+}
+
+static struct drm_i915_gem_object *
+igt_create_system(struct drm_i915_private *i915, u32 size, u32 flags)
+{
+ return huge_pages_object(i915, size, size);
+}
+
+static struct drm_i915_gem_object *
+igt_create_local(struct drm_i915_private *i915, u32 size, u32 flags)
+{
+ return i915_gem_object_create_lmem(i915, size, flags);
+}
+
+static u32 igt_random_size(struct rnd_state *prng,
+ u32 min_page_size,
+ u32 max_page_size)
+{
+ u64 mask;
+ u32 size;
+
+ GEM_BUG_ON(!is_power_of_2(min_page_size));
+ GEM_BUG_ON(!is_power_of_2(max_page_size));
+ GEM_BUG_ON(min_page_size < PAGE_SIZE);
+ GEM_BUG_ON(min_page_size > max_page_size);
+
+ mask = ((max_page_size << 1ULL) - 1) & PAGE_MASK;
+ size = prandom_u32_state(prng) & mask;
+ if (size < min_page_size)
+ size |= min_page_size;
+
+ return size;
+}
+
+static int igt_ppgtt_smoke_huge(void *arg)
{
struct i915_gem_context *ctx = arg;
struct drm_i915_private *i915 = ctx->i915;
struct drm_i915_gem_object *obj;
- static const unsigned int sizes[] = {
- SZ_64K,
- SZ_128K,
- SZ_256K,
- SZ_512K,
- SZ_1M,
- SZ_2M,
+ I915_RND_STATE(prng);
+ struct {
+ igt_create_fn fn;
+ u32 min;
+ u32 max;
+ } backends[] = {
+ { igt_create_internal, SZ_64K, SZ_2M, },
+ { igt_create_shmem, SZ_64K, SZ_32M, },
+ { igt_create_local, SZ_64K, SZ_1G, },
};
- int i;
int err;
+ int i;
/*
- * Sanity check that the HW uses huge pages correctly through internal
- * -- ensure that our writes land in the right place.
+ * Sanity check that the HW uses huge pages correctly through our
+ * various backends -- ensure that our writes land in the right place.
*/
- for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
- unsigned int size = sizes[i];
+ for (i = 0; i < ARRAY_SIZE(backends); ++i) {
+ u32 min = backends[i].min;
+ u32 max = backends[i].max;
+ u32 size = max;
+try_again:
+ size = igt_random_size(&prng, min, rounddown_pow_of_two(size));
- obj = i915_gem_object_create_internal(i915, size);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
+ obj = backends[i].fn(i915, size, 0);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ if (err == -E2BIG) {
+ size >>= 1;
+ goto try_again;
+ } else if (err == -ENODEV) {
+ err = 0;
+ continue;
+ }
+
+ return err;
+ }
err = i915_gem_object_pin_pages(obj);
- if (err)
+ if (err) {
+ if (err == -ENXIO || err == -E2BIG) {
+ i915_gem_object_put(obj);
+ size >>= 1;
+ goto try_again;
+ }
goto out_put;
+ }
- if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_64K) {
- pr_info("internal unable to allocate huge-page(s) with size=%u\n",
- size);
+ if (obj->mm.page_sizes.phys < min) {
+ pr_info("%s unable to allocate huge-page(s) with size=%u, i=%d\n",
+ __func__, size, i);
+ err = -ENOMEM;
goto out_unpin;
}
err = igt_write_huge(ctx, obj);
if (err) {
- pr_err("internal write-huge failed with size=%u\n",
- size);
- goto out_unpin;
+ pr_err("%s write-huge failed with size=%u, i=%d\n",
+ __func__, size, i);
}
-
+out_unpin:
i915_gem_object_unpin_pages(obj);
- __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+ __i915_gem_object_put_pages(obj);
+out_put:
i915_gem_object_put(obj);
- }
- return 0;
+ if (err == -ENOMEM || err == -ENXIO)
+ err = 0;
-out_unpin:
- i915_gem_object_unpin_pages(obj);
-out_put:
- i915_gem_object_put(obj);
+ if (err)
+ break;
- return err;
-}
+ cond_resched();
+ }
-static inline bool igt_can_allocate_thp(struct drm_i915_private *i915)
-{
- return i915->mm.gemfs && has_transparent_hugepage();
+ return err;
}
-static int igt_ppgtt_gemfs_huge(void *arg)
+static int igt_ppgtt_sanity_check(void *arg)
{
struct i915_gem_context *ctx = arg;
struct drm_i915_private *i915 = ctx->i915;
- struct drm_i915_gem_object *obj;
- static const unsigned int sizes[] = {
- SZ_2M,
- SZ_4M,
- SZ_8M,
- SZ_16M,
- SZ_32M,
+ unsigned int supported = INTEL_INFO(i915)->page_sizes;
+ struct {
+ igt_create_fn fn;
+ unsigned int flags;
+ } backends[] = {
+ { igt_create_system, 0, },
+ { igt_create_local, I915_BO_ALLOC_CONTIGUOUS, },
};
- int i;
+ struct {
+ u32 size;
+ u32 pages;
+ } combos[] = {
+ { SZ_64K, SZ_64K },
+ { SZ_2M, SZ_2M },
+ { SZ_2M, SZ_64K },
+ { SZ_2M - SZ_64K, SZ_64K },
+ { SZ_2M - SZ_4K, SZ_64K | SZ_4K },
+ { SZ_2M + SZ_4K, SZ_64K | SZ_4K },
+ { SZ_2M + SZ_4K, SZ_2M | SZ_4K },
+ { SZ_2M + SZ_64K, SZ_2M | SZ_64K },
+ };
+ int i, j;
int err;
+ if (supported == I915_GTT_PAGE_SIZE_4K)
+ return 0;
+
/*
- * Sanity check that the HW uses huge pages correctly through gemfs --
- * ensure that our writes land in the right place.
+ * Sanity check that the HW behaves with a limited set of combinations.
+ * We already have a bunch of randomised testing, which should give us
+ * a decent amount of variation between runs, however we should keep
+ * this to limit the chances of introducing a temporary regression, by
+ * testing the most obvious cases that might make something blow up.
*/
- if (!igt_can_allocate_thp(i915)) {
- pr_info("missing THP support, skipping\n");
- return 0;
- }
+ for (i = 0; i < ARRAY_SIZE(backends); ++i) {
+ for (j = 0; j < ARRAY_SIZE(combos); ++j) {
+ struct drm_i915_gem_object *obj;
+ u32 size = combos[j].size;
+ u32 pages = combos[j].pages;
+
+ obj = backends[i].fn(i915, size, backends[i].flags);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ if (err == -ENODEV) {
+ pr_info("Device lacks local memory, skipping\n");
+ err = 0;
+ break;
+ }
- for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
- unsigned int size = sizes[i];
+ return err;
+ }
- obj = i915_gem_object_create_shmem(i915, size);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
+ err = i915_gem_object_pin_pages(obj);
+ if (err) {
+ i915_gem_object_put(obj);
+ goto out;
+ }
- err = i915_gem_object_pin_pages(obj);
- if (err)
- goto out_put;
+ GEM_BUG_ON(pages > obj->base.size);
+ pages = pages & supported;
- if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_2M) {
- pr_info("finishing test early, gemfs unable to allocate huge-page(s) with size=%u\n",
- size);
- goto out_unpin;
- }
+ if (pages)
+ obj->mm.page_sizes.sg = pages;
- err = igt_write_huge(ctx, obj);
- if (err) {
- pr_err("gemfs write-huge failed with size=%u\n",
- size);
- goto out_unpin;
+ err = igt_write_huge(ctx, obj);
+
+ i915_gem_object_unpin_pages(obj);
+ __i915_gem_object_put_pages(obj);
+ i915_gem_object_put(obj);
+
+ if (err) {
+ pr_err("%s write-huge failed with size=%u pages=%u i=%d, j=%d\n",
+ __func__, size, pages, i, j);
+ goto out;
+ }
}
- i915_gem_object_unpin_pages(obj);
- __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
- i915_gem_object_put(obj);
+ cond_resched();
}
- return 0;
-
-out_unpin:
- i915_gem_object_unpin_pages(obj);
-out_put:
- i915_gem_object_put(obj);
+out:
+ if (err == -ENOMEM)
+ err = 0;
return err;
}
@@ -1417,12 +1547,15 @@ static int igt_ppgtt_pin_update(void *arg)
struct i915_gem_context *ctx = arg;
struct drm_i915_private *dev_priv = ctx->i915;
unsigned long supported = INTEL_INFO(dev_priv)->page_sizes;
- struct i915_address_space *vm = ctx->vm;
struct drm_i915_gem_object *obj;
+ struct i915_gem_engines_iter it;
+ struct i915_address_space *vm;
+ struct intel_context *ce;
struct i915_vma *vma;
unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
+ unsigned int n;
int first, last;
- int err;
+ int err = 0;
/*
* Make sure there's no funny business when doing a PIN_UPDATE -- in the
@@ -1432,9 +1565,10 @@ static int igt_ppgtt_pin_update(void *arg)
* huge-gtt-pages.
*/
- if (!vm || !i915_vm_is_4lvl(vm)) {
+ vm = i915_gem_context_get_vm_rcu(ctx);
+ if (!i915_vm_is_4lvl(vm)) {
pr_info("48b PPGTT not supported, skipping\n");
- return 0;
+ goto out_vm;
}
first = ilog2(I915_GTT_PAGE_SIZE_64K);
@@ -1487,7 +1621,7 @@ static int igt_ppgtt_pin_update(void *arg)
goto out_unpin;
}
- err = i915_vma_bind(vma, I915_CACHE_NONE, PIN_UPDATE);
+ err = i915_vma_bind(vma, I915_CACHE_NONE, PIN_UPDATE, NULL);
if (err)
goto out_unpin;
@@ -1518,11 +1652,24 @@ static int igt_ppgtt_pin_update(void *arg)
* land in the now stale 2M page.
*/
- err = gpu_write(vma, ctx, dev_priv->engine[RCS0], 0, 0xdeadbeaf);
+ n = 0;
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+ if (!intel_engine_can_store_dword(ce->engine))
+ continue;
+
+ err = gpu_write(ce, vma, n++, 0xdeadbeaf);
+ if (err)
+ break;
+ }
+ i915_gem_context_unlock_engines(ctx);
if (err)
goto out_unpin;
- err = cpu_check(obj, 0, 0xdeadbeaf);
+ while (n--) {
+ err = cpu_check(obj, n, 0xdeadbeaf);
+ if (err)
+ goto out_unpin;
+ }
out_unpin:
i915_vma_unpin(vma);
@@ -1530,6 +1677,8 @@ out_close:
i915_vma_close(vma);
out_put:
i915_gem_object_put(obj);
+out_vm:
+ i915_vm_put(vm);
return err;
}
@@ -1539,7 +1688,7 @@ static int igt_tmpfs_fallback(void *arg)
struct i915_gem_context *ctx = arg;
struct drm_i915_private *i915 = ctx->i915;
struct vfsmount *gemfs = i915->mm.gemfs;
- struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
+ struct i915_address_space *vm = i915_gem_context_get_vm_rcu(ctx);
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
u32 *vaddr;
@@ -1589,6 +1738,7 @@ out_put:
out_restore:
i915->mm.gemfs = gemfs;
+ i915_vm_put(vm);
return err;
}
@@ -1596,11 +1746,14 @@ static int igt_shrink_thp(void *arg)
{
struct i915_gem_context *ctx = arg;
struct drm_i915_private *i915 = ctx->i915;
- struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
+ struct i915_address_space *vm = i915_gem_context_get_vm_rcu(ctx);
struct drm_i915_gem_object *obj;
+ struct i915_gem_engines_iter it;
+ struct intel_context *ce;
struct i915_vma *vma;
unsigned int flags = PIN_USER;
- int err;
+ unsigned int n;
+ int err = 0;
/*
* Sanity check shrinking huge-paged object -- make sure nothing blows
@@ -1609,12 +1762,14 @@ static int igt_shrink_thp(void *arg)
if (!igt_can_allocate_thp(i915)) {
pr_info("missing THP support, skipping\n");
- return 0;
+ goto out_vm;
}
obj = i915_gem_object_create_shmem(i915, SZ_2M);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out_vm;
+ }
vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) {
@@ -1635,11 +1790,20 @@ static int igt_shrink_thp(void *arg)
if (err)
goto out_unpin;
- err = gpu_write(vma, ctx, i915->engine[RCS0], 0, 0xdeadbeaf);
- if (err)
- goto out_unpin;
+ n = 0;
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+ if (!intel_engine_can_store_dword(ce->engine))
+ continue;
+
+ err = gpu_write(ce, vma, n++, 0xdeadbeaf);
+ if (err)
+ break;
+ }
+ i915_gem_context_unlock_engines(ctx);
i915_vma_unpin(vma);
+ if (err)
+ goto out_close;
/*
* Now that the pages are *unpinned* shrink-all should invoke
@@ -1662,7 +1826,11 @@ static int igt_shrink_thp(void *arg)
if (err)
goto out_close;
- err = cpu_check(obj, 0, 0xdeadbeaf);
+ while (n--) {
+ err = cpu_check(obj, n, 0xdeadbeaf);
+ if (err)
+ break;
+ }
out_unpin:
i915_vma_unpin(vma);
@@ -1670,6 +1838,8 @@ out_close:
i915_vma_close(vma);
out_put:
i915_gem_object_put(obj);
+out_vm:
+ i915_vm_put(vm);
return err;
}
@@ -1678,6 +1848,7 @@ int i915_gem_huge_page_mock_selftests(void)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_mock_exhaust_device_supported_pages),
+ SUBTEST(igt_mock_memory_region_huge_pages),
SUBTEST(igt_mock_ppgtt_misaligned_dma),
SUBTEST(igt_mock_ppgtt_huge_fill),
SUBTEST(igt_mock_ppgtt_64K),
@@ -1694,8 +1865,7 @@ int i915_gem_huge_page_mock_selftests(void)
mkwrite_device_info(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL;
mkwrite_device_info(dev_priv)->ppgtt_size = 48;
- mutex_lock(&dev_priv->drm.struct_mutex);
- ppgtt = i915_ppgtt_create(dev_priv);
+ ppgtt = i915_ppgtt_create(&dev_priv->gt);
if (IS_ERR(ppgtt)) {
err = PTR_ERR(ppgtt);
goto out_unlock;
@@ -1720,58 +1890,52 @@ out_close:
i915_vm_put(&ppgtt->vm);
out_unlock:
- mutex_unlock(&dev_priv->drm.struct_mutex);
drm_dev_put(&dev_priv->drm);
-
return err;
}
-int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
+int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_shrink_thp),
SUBTEST(igt_ppgtt_pin_update),
SUBTEST(igt_tmpfs_fallback),
SUBTEST(igt_ppgtt_exhaust_huge),
- SUBTEST(igt_ppgtt_gemfs_huge),
- SUBTEST(igt_ppgtt_internal_huge),
+ SUBTEST(igt_ppgtt_smoke_huge),
+ SUBTEST(igt_ppgtt_sanity_check),
};
- struct drm_file *file;
struct i915_gem_context *ctx;
- intel_wakeref_t wakeref;
+ struct i915_address_space *vm;
+ struct file *file;
int err;
- if (!HAS_PPGTT(dev_priv)) {
+ if (!HAS_PPGTT(i915)) {
pr_info("PPGTT not supported, skipping live-selftests\n");
return 0;
}
- if (i915_terminally_wedged(dev_priv))
+ if (intel_gt_is_wedged(&i915->gt))
return 0;
- file = mock_file(dev_priv);
+ file = mock_file(i915);
if (IS_ERR(file))
return PTR_ERR(file);
- mutex_lock(&dev_priv->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-
- ctx = live_context(dev_priv, file);
+ ctx = live_context(i915, file);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
- goto out_unlock;
+ goto out_file;
}
- if (ctx->vm)
- ctx->vm->scrub_64K = true;
+ mutex_lock(&ctx->mutex);
+ vm = i915_gem_context_vm(ctx);
+ if (vm)
+ WRITE_ONCE(vm->scrub_64K, true);
+ mutex_unlock(&ctx->mutex);
err = i915_subtests(tests, ctx);
-out_unlock:
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
- mutex_unlock(&dev_priv->drm.struct_mutex);
-
- mock_file_free(dev_priv, file);
-
+out_file:
+ fput(file);
return err;
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
index f3a5eb807c1c..b972be165e85 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
@@ -5,14 +5,17 @@
#include "i915_selftest.h"
+#include "gt/intel_engine_user.h"
+#include "gt/intel_gt.h"
+
#include "selftests/igt_flush_test.h"
#include "selftests/mock_drm.h"
+#include "huge_gem_object.h"
#include "mock_context.h"
-static int igt_client_fill(void *arg)
+static int __igt_client_fill(struct intel_engine_cs *engine)
{
- struct intel_context *ce = arg;
- struct drm_i915_private *i915 = ce->gem_context->i915;
+ struct intel_context *ce = engine->kernel_context;
struct drm_i915_gem_object *obj;
struct rnd_state prng;
IGT_TIMEOUT(end);
@@ -21,16 +24,21 @@ static int igt_client_fill(void *arg)
prandom_seed_state(&prng, i915_selftest.random_seed);
+ intel_engine_pm_get(engine);
do {
- u32 sz = prandom_u32_state(&prng) % SZ_32M;
+ const u32 max_block_size = S16_MAX * PAGE_SIZE;
+ u32 sz = min_t(u64, ce->vm->total >> 4, prandom_u32_state(&prng));
+ u32 phys_sz = sz % (max_block_size + 1);
u32 val = prandom_u32_state(&prng);
u32 i;
sz = round_up(sz, PAGE_SIZE);
+ phys_sz = round_up(phys_sz, PAGE_SIZE);
- pr_debug("%s with sz=%x, val=%x\n", __func__, sz, val);
+ pr_debug("%s with phys_sz= %x, sz=%x, val=%x\n", __func__,
+ phys_sz, sz, val);
- obj = i915_gem_object_create_internal(i915, sz);
+ obj = huge_gem_object(engine->i915, phys_sz, sz);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto err_flush;
@@ -52,7 +60,8 @@ static int igt_client_fill(void *arg)
* values after we do the set_to_cpu_domain and pick it up as a
* test failure.
*/
- memset32(vaddr, val ^ 0xdeadbeaf, obj->base.size / sizeof(u32));
+ memset32(vaddr, val ^ 0xdeadbeaf,
+ huge_gem_object_phys_size(obj) / sizeof(u32));
if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
obj->cache_dirty = true;
@@ -63,24 +72,13 @@ static int igt_client_fill(void *arg)
if (err)
goto err_unpin;
- /*
- * XXX: For now do the wait without the object resv lock to
- * ensure we don't deadlock.
- */
- err = i915_gem_object_wait(obj,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_ALL,
- MAX_SCHEDULE_TIMEOUT);
- if (err)
- goto err_unpin;
-
i915_gem_object_lock(obj);
err = i915_gem_object_set_to_cpu_domain(obj, false);
i915_gem_object_unlock(obj);
if (err)
goto err_unpin;
- for (i = 0; i < obj->base.size / sizeof(u32); ++i) {
+ for (i = 0; i < huge_gem_object_phys_size(obj) / sizeof(u32); ++i) {
if (vaddr[i] != val) {
pr_err("vaddr[%u]=%x, expected=%x\n", i,
vaddr[i], val);
@@ -100,28 +98,46 @@ err_unpin:
err_put:
i915_gem_object_put(obj);
err_flush:
- mutex_lock(&i915->drm.struct_mutex);
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
- err = -EIO;
- mutex_unlock(&i915->drm.struct_mutex);
-
if (err == -ENOMEM)
err = 0;
+ intel_engine_pm_put(engine);
return err;
}
+static int igt_client_fill(void *arg)
+{
+ int inst = 0;
+
+ do {
+ struct intel_engine_cs *engine;
+ int err;
+
+ engine = intel_engine_lookup_user(arg,
+ I915_ENGINE_CLASS_COPY,
+ inst++);
+ if (!engine)
+ return 0;
+
+ err = __igt_client_fill(engine);
+ if (err == -ENOMEM)
+ err = 0;
+ if (err)
+ return err;
+ } while (1);
+}
+
int i915_gem_client_blt_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_client_fill),
};
- if (i915_terminally_wedged(i915))
+ if (intel_gt_is_wedged(&i915->gt))
return 0;
if (!HAS_ENGINE(i915, BCS0))
return 0;
- return i915_subtests(tests, i915->engine[BCS0]->kernel_context);
+ return i915_live_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
index 8f22d3f18422..3f6079e1dfb6 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
@@ -6,12 +6,20 @@
#include <linux/prime_numbers.h>
+#include "gt/intel_engine_pm.h"
+#include "gt/intel_gt.h"
+#include "gt/intel_gt_pm.h"
+#include "gt/intel_ring.h"
+
#include "i915_selftest.h"
#include "selftests/i915_random.h"
-static int cpu_set(struct drm_i915_gem_object *obj,
- unsigned long offset,
- u32 v)
+struct context {
+ struct drm_i915_gem_object *obj;
+ struct intel_engine_cs *engine;
+};
+
+static int cpu_set(struct context *ctx, unsigned long offset, u32 v)
{
unsigned int needs_clflush;
struct page *page;
@@ -19,11 +27,11 @@ static int cpu_set(struct drm_i915_gem_object *obj,
u32 *cpu;
int err;
- err = i915_gem_object_prepare_write(obj, &needs_clflush);
+ err = i915_gem_object_prepare_write(ctx->obj, &needs_clflush);
if (err)
return err;
- page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
+ page = i915_gem_object_get_page(ctx->obj, offset >> PAGE_SHIFT);
map = kmap_atomic(page);
cpu = map + offset_in_page(offset);
@@ -36,14 +44,12 @@ static int cpu_set(struct drm_i915_gem_object *obj,
drm_clflush_virt_range(cpu, sizeof(*cpu));
kunmap_atomic(map);
- i915_gem_object_finish_access(obj);
+ i915_gem_object_finish_access(ctx->obj);
return 0;
}
-static int cpu_get(struct drm_i915_gem_object *obj,
- unsigned long offset,
- u32 *v)
+static int cpu_get(struct context *ctx, unsigned long offset, u32 *v)
{
unsigned int needs_clflush;
struct page *page;
@@ -51,11 +57,11 @@ static int cpu_get(struct drm_i915_gem_object *obj,
u32 *cpu;
int err;
- err = i915_gem_object_prepare_read(obj, &needs_clflush);
+ err = i915_gem_object_prepare_read(ctx->obj, &needs_clflush);
if (err)
return err;
- page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
+ page = i915_gem_object_get_page(ctx->obj, offset >> PAGE_SHIFT);
map = kmap_atomic(page);
cpu = map + offset_in_page(offset);
@@ -65,136 +71,137 @@ static int cpu_get(struct drm_i915_gem_object *obj,
*v = *cpu;
kunmap_atomic(map);
- i915_gem_object_finish_access(obj);
+ i915_gem_object_finish_access(ctx->obj);
return 0;
}
-static int gtt_set(struct drm_i915_gem_object *obj,
- unsigned long offset,
- u32 v)
+static int gtt_set(struct context *ctx, unsigned long offset, u32 v)
{
struct i915_vma *vma;
u32 __iomem *map;
- int err;
+ int err = 0;
- i915_gem_object_lock(obj);
- err = i915_gem_object_set_to_gtt_domain(obj, true);
- i915_gem_object_unlock(obj);
+ i915_gem_object_lock(ctx->obj);
+ err = i915_gem_object_set_to_gtt_domain(ctx->obj, true);
+ i915_gem_object_unlock(ctx->obj);
if (err)
return err;
- vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
+ vma = i915_gem_object_ggtt_pin(ctx->obj, NULL, 0, 0, PIN_MAPPABLE);
if (IS_ERR(vma))
return PTR_ERR(vma);
+ intel_gt_pm_get(vma->vm->gt);
+
map = i915_vma_pin_iomap(vma);
i915_vma_unpin(vma);
- if (IS_ERR(map))
- return PTR_ERR(map);
+ if (IS_ERR(map)) {
+ err = PTR_ERR(map);
+ goto out_rpm;
+ }
iowrite32(v, &map[offset / sizeof(*map)]);
i915_vma_unpin_iomap(vma);
- return 0;
+out_rpm:
+ intel_gt_pm_put(vma->vm->gt);
+ return err;
}
-static int gtt_get(struct drm_i915_gem_object *obj,
- unsigned long offset,
- u32 *v)
+static int gtt_get(struct context *ctx, unsigned long offset, u32 *v)
{
struct i915_vma *vma;
u32 __iomem *map;
- int err;
+ int err = 0;
- i915_gem_object_lock(obj);
- err = i915_gem_object_set_to_gtt_domain(obj, false);
- i915_gem_object_unlock(obj);
+ i915_gem_object_lock(ctx->obj);
+ err = i915_gem_object_set_to_gtt_domain(ctx->obj, false);
+ i915_gem_object_unlock(ctx->obj);
if (err)
return err;
- vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
+ vma = i915_gem_object_ggtt_pin(ctx->obj, NULL, 0, 0, PIN_MAPPABLE);
if (IS_ERR(vma))
return PTR_ERR(vma);
+ intel_gt_pm_get(vma->vm->gt);
+
map = i915_vma_pin_iomap(vma);
i915_vma_unpin(vma);
- if (IS_ERR(map))
- return PTR_ERR(map);
+ if (IS_ERR(map)) {
+ err = PTR_ERR(map);
+ goto out_rpm;
+ }
*v = ioread32(&map[offset / sizeof(*map)]);
i915_vma_unpin_iomap(vma);
- return 0;
+out_rpm:
+ intel_gt_pm_put(vma->vm->gt);
+ return err;
}
-static int wc_set(struct drm_i915_gem_object *obj,
- unsigned long offset,
- u32 v)
+static int wc_set(struct context *ctx, unsigned long offset, u32 v)
{
u32 *map;
int err;
- i915_gem_object_lock(obj);
- err = i915_gem_object_set_to_wc_domain(obj, true);
- i915_gem_object_unlock(obj);
+ i915_gem_object_lock(ctx->obj);
+ err = i915_gem_object_set_to_wc_domain(ctx->obj, true);
+ i915_gem_object_unlock(ctx->obj);
if (err)
return err;
- map = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ map = i915_gem_object_pin_map(ctx->obj, I915_MAP_WC);
if (IS_ERR(map))
return PTR_ERR(map);
map[offset / sizeof(*map)] = v;
- i915_gem_object_unpin_map(obj);
+ i915_gem_object_unpin_map(ctx->obj);
return 0;
}
-static int wc_get(struct drm_i915_gem_object *obj,
- unsigned long offset,
- u32 *v)
+static int wc_get(struct context *ctx, unsigned long offset, u32 *v)
{
u32 *map;
int err;
- i915_gem_object_lock(obj);
- err = i915_gem_object_set_to_wc_domain(obj, false);
- i915_gem_object_unlock(obj);
+ i915_gem_object_lock(ctx->obj);
+ err = i915_gem_object_set_to_wc_domain(ctx->obj, false);
+ i915_gem_object_unlock(ctx->obj);
if (err)
return err;
- map = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ map = i915_gem_object_pin_map(ctx->obj, I915_MAP_WC);
if (IS_ERR(map))
return PTR_ERR(map);
*v = map[offset / sizeof(*map)];
- i915_gem_object_unpin_map(obj);
+ i915_gem_object_unpin_map(ctx->obj);
return 0;
}
-static int gpu_set(struct drm_i915_gem_object *obj,
- unsigned long offset,
- u32 v)
+static int gpu_set(struct context *ctx, unsigned long offset, u32 v)
{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_request *rq;
struct i915_vma *vma;
u32 *cs;
int err;
- i915_gem_object_lock(obj);
- err = i915_gem_object_set_to_gtt_domain(obj, true);
- i915_gem_object_unlock(obj);
+ i915_gem_object_lock(ctx->obj);
+ err = i915_gem_object_set_to_gtt_domain(ctx->obj, true);
+ i915_gem_object_unlock(ctx->obj);
if (err)
return err;
- vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
+ vma = i915_gem_object_ggtt_pin(ctx->obj, NULL, 0, 0, 0);
if (IS_ERR(vma))
return PTR_ERR(vma);
- rq = i915_request_create(i915->engine[RCS0]->kernel_context);
+ rq = intel_engine_create_kernel_request(ctx->engine);
if (IS_ERR(rq)) {
i915_vma_unpin(vma);
return PTR_ERR(rq);
@@ -207,12 +214,12 @@ static int gpu_set(struct drm_i915_gem_object *obj,
return PTR_ERR(cs);
}
- if (INTEL_GEN(i915) >= 8) {
+ if (INTEL_GEN(ctx->engine->i915) >= 8) {
*cs++ = MI_STORE_DWORD_IMM_GEN4 | 1 << 22;
*cs++ = lower_32_bits(i915_ggtt_offset(vma) + offset);
*cs++ = upper_32_bits(i915_ggtt_offset(vma) + offset);
*cs++ = v;
- } else if (INTEL_GEN(i915) >= 4) {
+ } else if (INTEL_GEN(ctx->engine->i915) >= 4) {
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*cs++ = 0;
*cs++ = i915_ggtt_offset(vma) + offset;
@@ -226,7 +233,9 @@ static int gpu_set(struct drm_i915_gem_object *obj,
intel_ring_advance(rq, cs);
i915_vma_lock(vma);
- err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ err = i915_request_await_object(rq, vma->obj, true);
+ if (err == 0)
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_vma_unlock(vma);
i915_vma_unpin(vma);
@@ -235,29 +244,34 @@ static int gpu_set(struct drm_i915_gem_object *obj,
return err;
}
-static bool always_valid(struct drm_i915_private *i915)
+static bool always_valid(struct context *ctx)
{
return true;
}
-static bool needs_fence_registers(struct drm_i915_private *i915)
+static bool needs_fence_registers(struct context *ctx)
{
- return !i915_terminally_wedged(i915);
+ struct intel_gt *gt = ctx->engine->gt;
+
+ if (intel_gt_is_wedged(gt))
+ return false;
+
+ return gt->ggtt->num_fences;
}
-static bool needs_mi_store_dword(struct drm_i915_private *i915)
+static bool needs_mi_store_dword(struct context *ctx)
{
- if (i915_terminally_wedged(i915))
+ if (intel_gt_is_wedged(ctx->engine->gt))
return false;
- return intel_engine_can_store_dword(i915->engine[RCS0]);
+ return intel_engine_can_store_dword(ctx->engine);
}
static const struct igt_coherency_mode {
const char *name;
- int (*set)(struct drm_i915_gem_object *, unsigned long offset, u32 v);
- int (*get)(struct drm_i915_gem_object *, unsigned long offset, u32 *v);
- bool (*valid)(struct drm_i915_private *i915);
+ int (*set)(struct context *ctx, unsigned long offset, u32 v);
+ int (*get)(struct context *ctx, unsigned long offset, u32 *v);
+ bool (*valid)(struct context *ctx);
} igt_coherency_mode[] = {
{ "cpu", cpu_set, cpu_get, always_valid },
{ "gtt", gtt_set, gtt_get, needs_fence_registers },
@@ -266,19 +280,37 @@ static const struct igt_coherency_mode {
{ },
};
+static struct intel_engine_cs *
+random_engine(struct drm_i915_private *i915, struct rnd_state *prng)
+{
+ struct intel_engine_cs *engine;
+ unsigned int count;
+
+ count = 0;
+ for_each_uabi_engine(engine, i915)
+ count++;
+
+ count = i915_prandom_u32_max_state(count, prng);
+ for_each_uabi_engine(engine, i915)
+ if (count-- == 0)
+ return engine;
+
+ return NULL;
+}
+
static int igt_gem_coherency(void *arg)
{
const unsigned int ncachelines = PAGE_SIZE/64;
- I915_RND_STATE(prng);
struct drm_i915_private *i915 = arg;
const struct igt_coherency_mode *read, *write, *over;
- struct drm_i915_gem_object *obj;
- intel_wakeref_t wakeref;
unsigned long count, n;
u32 *offsets, *values;
+ I915_RND_STATE(prng);
+ struct context ctx;
int err = 0;
- /* We repeatedly write, overwrite and read from a sequence of
+ /*
+ * We repeatedly write, overwrite and read from a sequence of
* cachelines in order to try and detect incoherency (unflushed writes
* from either the CPU or GPU). Each setter/getter uses our cache
* domain API which should prevent incoherency.
@@ -292,34 +324,40 @@ static int igt_gem_coherency(void *arg)
values = offsets + ncachelines;
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ ctx.engine = random_engine(i915, &prng);
+ if (!ctx.engine) {
+ err = -ENODEV;
+ goto out_free;
+ }
+ pr_info("%s: using %s\n", __func__, ctx.engine->name);
+ intel_engine_pm_get(ctx.engine);
+
for (over = igt_coherency_mode; over->name; over++) {
if (!over->set)
continue;
- if (!over->valid(i915))
+ if (!over->valid(&ctx))
continue;
for (write = igt_coherency_mode; write->name; write++) {
if (!write->set)
continue;
- if (!write->valid(i915))
+ if (!write->valid(&ctx))
continue;
for (read = igt_coherency_mode; read->name; read++) {
if (!read->get)
continue;
- if (!read->valid(i915))
+ if (!read->valid(&ctx))
continue;
for_each_prime_number_from(count, 1, ncachelines) {
- obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
- if (IS_ERR(obj)) {
- err = PTR_ERR(obj);
- goto unlock;
+ ctx.obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(ctx.obj)) {
+ err = PTR_ERR(ctx.obj);
+ goto out_pm;
}
i915_random_reorder(offsets, ncachelines, &prng);
@@ -327,7 +365,7 @@ static int igt_gem_coherency(void *arg)
values[n] = prandom_u32_state(&prng);
for (n = 0; n < count; n++) {
- err = over->set(obj, offsets[n], ~values[n]);
+ err = over->set(&ctx, offsets[n], ~values[n]);
if (err) {
pr_err("Failed to set stale value[%ld/%ld] in object using %s, err=%d\n",
n, count, over->name, err);
@@ -336,7 +374,7 @@ static int igt_gem_coherency(void *arg)
}
for (n = 0; n < count; n++) {
- err = write->set(obj, offsets[n], values[n]);
+ err = write->set(&ctx, offsets[n], values[n]);
if (err) {
pr_err("Failed to set value[%ld/%ld] in object using %s, err=%d\n",
n, count, write->name, err);
@@ -347,7 +385,7 @@ static int igt_gem_coherency(void *arg)
for (n = 0; n < count; n++) {
u32 found;
- err = read->get(obj, offsets[n], &found);
+ err = read->get(&ctx, offsets[n], &found);
if (err) {
pr_err("Failed to get value[%ld/%ld] in object using %s, err=%d\n",
n, count, read->name, err);
@@ -365,20 +403,20 @@ static int igt_gem_coherency(void *arg)
}
}
- i915_gem_object_put(obj);
+ i915_gem_object_put(ctx.obj);
}
}
}
}
-unlock:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
+out_pm:
+ intel_engine_pm_put(ctx.engine);
+out_free:
kfree(offsets);
return err;
put_object:
- i915_gem_object_put(obj);
- goto unlock;
+ i915_gem_object_put(ctx.obj);
+ goto out_pm;
}
int i915_gem_coherency_live_selftests(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index eaa2b16574c7..7fc46861a54d 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -7,6 +7,9 @@
#include <linux/prime_numbers.h>
#include "gem/i915_gem_pm.h"
+#include "gt/intel_engine_pm.h"
+#include "gt/intel_gt.h"
+#include "gt/intel_gt_requests.h"
#include "gt/intel_reset.h"
#include "i915_selftest.h"
@@ -24,16 +27,20 @@
#define DW_PER_PAGE (PAGE_SIZE / sizeof(u32))
+static inline struct i915_address_space *ctx_vm(struct i915_gem_context *ctx)
+{
+ /* single threaded, private ctx */
+ return rcu_dereference_protected(ctx->vm, true);
+}
+
static int live_nop_switch(void *arg)
{
const unsigned int nctx = 1024;
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine;
struct i915_gem_context **ctx;
- enum intel_engine_id id;
- intel_wakeref_t wakeref;
struct igt_live_test t;
- struct drm_file *file;
+ struct file *file;
unsigned long n;
int err = -ENODEV;
@@ -52,43 +59,49 @@ static int live_nop_switch(void *arg)
if (IS_ERR(file))
return PTR_ERR(file);
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
ctx = kcalloc(nctx, sizeof(*ctx), GFP_KERNEL);
if (!ctx) {
err = -ENOMEM;
- goto out_unlock;
+ goto out_file;
}
for (n = 0; n < nctx; n++) {
ctx[n] = live_context(i915, file);
if (IS_ERR(ctx[n])) {
err = PTR_ERR(ctx[n]);
- goto out_unlock;
+ goto out_file;
}
}
- for_each_engine(engine, i915, id) {
- struct i915_request *rq;
+ for_each_uabi_engine(engine, i915) {
+ struct i915_request *rq = NULL;
unsigned long end_time, prime;
ktime_t times[2] = {};
times[0] = ktime_get_raw();
for (n = 0; n < nctx; n++) {
- rq = igt_request_alloc(ctx[n], engine);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out_unlock;
+ struct i915_request *this;
+
+ this = igt_request_alloc(ctx[n], engine);
+ if (IS_ERR(this)) {
+ err = PTR_ERR(this);
+ goto out_file;
}
- i915_request_add(rq);
+ if (rq) {
+ i915_request_await_dma_fence(this, &rq->fence);
+ i915_request_put(rq);
+ }
+ rq = i915_request_get(this);
+ i915_request_add(this);
}
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
pr_err("Failed to populated %d contexts\n", nctx);
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(&i915->gt);
+ i915_request_put(rq);
err = -EIO;
- goto out_unlock;
+ goto out_file;
}
+ i915_request_put(rq);
times[1] = ktime_get_raw();
@@ -97,17 +110,25 @@ static int live_nop_switch(void *arg)
err = igt_live_test_begin(&t, i915, __func__, engine->name);
if (err)
- goto out_unlock;
+ goto out_file;
end_time = jiffies + i915_selftest.timeout_jiffies;
for_each_prime_number_from(prime, 2, 8192) {
times[1] = ktime_get_raw();
+ rq = NULL;
for (n = 0; n < prime; n++) {
- rq = igt_request_alloc(ctx[n % nctx], engine);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out_unlock;
+ struct i915_request *this;
+
+ this = igt_request_alloc(ctx[n % nctx], engine);
+ if (IS_ERR(this)) {
+ err = PTR_ERR(this);
+ goto out_file;
+ }
+
+ if (rq) { /* Force submission order */
+ i915_request_await_dma_fence(this, &rq->fence);
+ i915_request_put(rq);
}
/*
@@ -124,14 +145,18 @@ static int live_nop_switch(void *arg)
* for latency.
*/
- i915_request_add(rq);
+ rq = i915_request_get(this);
+ i915_request_add(this);
}
+ GEM_BUG_ON(!rq);
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
pr_err("Switching between %ld contexts timed out\n",
prime);
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(&i915->gt);
+ i915_request_put(rq);
break;
}
+ i915_request_put(rq);
times[1] = ktime_sub(ktime_get_raw(), times[1]);
if (prime == 2)
@@ -143,7 +168,7 @@ static int live_nop_switch(void *arg)
err = igt_live_test_end(&t);
if (err)
- goto out_unlock;
+ goto out_file;
pr_info("Switch latencies on %s: 1 = %lluns, %lu = %lluns\n",
engine->name,
@@ -151,75 +176,237 @@ static int live_nop_switch(void *arg)
prime - 1, div64_u64(ktime_to_ns(times[1]), prime - 1));
}
-out_unlock:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
- mock_file_free(i915, file);
+out_file:
+ fput(file);
return err;
}
-static struct i915_vma *
-gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value)
+struct parallel_switch {
+ struct task_struct *tsk;
+ struct intel_context *ce[2];
+};
+
+static int __live_parallel_switch1(void *data)
{
- struct drm_i915_gem_object *obj;
- const int gen = INTEL_GEN(vma->vm->i915);
- unsigned long n, size;
- u32 *cmd;
- int err;
+ struct parallel_switch *arg = data;
+ IGT_TIMEOUT(end_time);
+ unsigned long count;
- size = (4 * count + 1) * sizeof(u32);
- size = round_up(size, PAGE_SIZE);
- obj = i915_gem_object_create_internal(vma->vm->i915, size);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
+ count = 0;
+ do {
+ struct i915_request *rq = NULL;
+ int err, n;
- cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
- if (IS_ERR(cmd)) {
- err = PTR_ERR(cmd);
- goto err;
+ err = 0;
+ for (n = 0; !err && n < ARRAY_SIZE(arg->ce); n++) {
+ struct i915_request *prev = rq;
+
+ rq = i915_request_create(arg->ce[n]);
+ if (IS_ERR(rq)) {
+ i915_request_put(prev);
+ return PTR_ERR(rq);
+ }
+
+ i915_request_get(rq);
+ if (prev) {
+ err = i915_request_await_dma_fence(rq, &prev->fence);
+ i915_request_put(prev);
+ }
+
+ i915_request_add(rq);
+ }
+ if (i915_request_wait(rq, 0, HZ / 5) < 0)
+ err = -ETIME;
+ i915_request_put(rq);
+ if (err)
+ return err;
+
+ count++;
+ } while (!__igt_timeout(end_time, NULL));
+
+ pr_info("%s: %lu switches (sync)\n", arg->ce[0]->engine->name, count);
+ return 0;
+}
+
+static int __live_parallel_switchN(void *data)
+{
+ struct parallel_switch *arg = data;
+ struct i915_request *rq = NULL;
+ IGT_TIMEOUT(end_time);
+ unsigned long count;
+ int n;
+
+ count = 0;
+ do {
+ for (n = 0; n < ARRAY_SIZE(arg->ce); n++) {
+ struct i915_request *prev = rq;
+ int err = 0;
+
+ rq = i915_request_create(arg->ce[n]);
+ if (IS_ERR(rq)) {
+ i915_request_put(prev);
+ return PTR_ERR(rq);
+ }
+
+ i915_request_get(rq);
+ if (prev) {
+ err = i915_request_await_dma_fence(rq, &prev->fence);
+ i915_request_put(prev);
+ }
+
+ i915_request_add(rq);
+ if (err) {
+ i915_request_put(rq);
+ return err;
+ }
+ }
+
+ count++;
+ } while (!__igt_timeout(end_time, NULL));
+ i915_request_put(rq);
+
+ pr_info("%s: %lu switches (many)\n", arg->ce[0]->engine->name, count);
+ return 0;
+}
+
+static int live_parallel_switch(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ static int (* const func[])(void *arg) = {
+ __live_parallel_switch1,
+ __live_parallel_switchN,
+ NULL,
+ };
+ struct parallel_switch *data = NULL;
+ struct i915_gem_engines *engines;
+ struct i915_gem_engines_iter it;
+ int (* const *fn)(void *arg);
+ struct i915_gem_context *ctx;
+ struct intel_context *ce;
+ struct file *file;
+ int n, m, count;
+ int err = 0;
+
+ /*
+ * Check we can process switches on all engines simultaneously.
+ */
+
+ if (!DRIVER_CAPS(i915)->has_logical_contexts)
+ return 0;
+
+ file = mock_file(i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ ctx = live_context(i915, file);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out_file;
}
- GEM_BUG_ON(offset + (count - 1) * PAGE_SIZE > vma->node.size);
- offset += vma->node.start;
+ engines = i915_gem_context_lock_engines(ctx);
+ count = engines->num_engines;
- for (n = 0; n < count; n++) {
- if (gen >= 8) {
- *cmd++ = MI_STORE_DWORD_IMM_GEN4;
- *cmd++ = lower_32_bits(offset);
- *cmd++ = upper_32_bits(offset);
- *cmd++ = value;
- } else if (gen >= 4) {
- *cmd++ = MI_STORE_DWORD_IMM_GEN4 |
- (gen < 6 ? MI_USE_GGTT : 0);
- *cmd++ = 0;
- *cmd++ = offset;
- *cmd++ = value;
- } else {
- *cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
- *cmd++ = offset;
- *cmd++ = value;
+ data = kcalloc(count, sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ i915_gem_context_unlock_engines(ctx);
+ err = -ENOMEM;
+ goto out_file;
+ }
+
+ m = 0; /* Use the first context as our template for the engines */
+ for_each_gem_engine(ce, engines, it) {
+ err = intel_context_pin(ce);
+ if (err) {
+ i915_gem_context_unlock_engines(ctx);
+ goto out;
}
- offset += PAGE_SIZE;
+ data[m++].ce[0] = intel_context_get(ce);
}
- *cmd = MI_BATCH_BUFFER_END;
- i915_gem_object_flush_map(obj);
- i915_gem_object_unpin_map(obj);
+ i915_gem_context_unlock_engines(ctx);
- vma = i915_vma_instance(obj, vma->vm, NULL);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto err;
+ /* Clone the same set of engines into the other contexts */
+ for (n = 1; n < ARRAY_SIZE(data->ce); n++) {
+ ctx = live_context(i915, file);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out;
+ }
+
+ for (m = 0; m < count; m++) {
+ if (!data[m].ce[0])
+ continue;
+
+ ce = intel_context_create(data[m].ce[0]->engine);
+ if (IS_ERR(ce))
+ goto out;
+
+ err = intel_context_pin(ce);
+ if (err) {
+ intel_context_put(ce);
+ goto out;
+ }
+
+ data[m].ce[n] = ce;
+ }
}
- err = i915_vma_pin(vma, 0, 0, PIN_USER);
- if (err)
- goto err;
+ for (fn = func; !err && *fn; fn++) {
+ struct igt_live_test t;
+ int n;
- return vma;
+ err = igt_live_test_begin(&t, i915, __func__, "");
+ if (err)
+ break;
-err:
- i915_gem_object_put(obj);
- return ERR_PTR(err);
+ for (n = 0; n < count; n++) {
+ if (!data[n].ce[0])
+ continue;
+
+ data[n].tsk = kthread_run(*fn, &data[n],
+ "igt/parallel:%s",
+ data[n].ce[0]->engine->name);
+ if (IS_ERR(data[n].tsk)) {
+ err = PTR_ERR(data[n].tsk);
+ break;
+ }
+ get_task_struct(data[n].tsk);
+ }
+
+ yield(); /* start all threads before we kthread_stop() */
+
+ for (n = 0; n < count; n++) {
+ int status;
+
+ if (IS_ERR_OR_NULL(data[n].tsk))
+ continue;
+
+ status = kthread_stop(data[n].tsk);
+ if (status && !err)
+ err = status;
+
+ put_task_struct(data[n].tsk);
+ data[n].tsk = NULL;
+ }
+
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ }
+
+out:
+ for (n = 0; n < count; n++) {
+ for (m = 0; m < ARRAY_SIZE(data->ce); m++) {
+ if (!data[n].ce[m])
+ continue;
+
+ intel_context_unpin(data[n].ce[m]);
+ intel_context_put(data[n].ce[m]);
+ }
+ }
+ kfree(data);
+out_file:
+ fput(file);
+ return err;
}
static unsigned long real_page_count(struct drm_i915_gem_object *obj)
@@ -232,100 +419,39 @@ static unsigned long fake_page_count(struct drm_i915_gem_object *obj)
return huge_gem_object_dma_size(obj) >> PAGE_SHIFT;
}
-static int gpu_fill(struct drm_i915_gem_object *obj,
- struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
+static int gpu_fill(struct intel_context *ce,
+ struct drm_i915_gem_object *obj,
unsigned int dw)
{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
- struct i915_request *rq;
struct i915_vma *vma;
- struct i915_vma *batch;
- unsigned int flags;
int err;
- GEM_BUG_ON(obj->base.size > vm->total);
- GEM_BUG_ON(!intel_engine_can_store_dword(engine));
+ GEM_BUG_ON(obj->base.size > ce->vm->total);
+ GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine));
- vma = i915_vma_instance(obj, vm, NULL);
+ vma = i915_vma_instance(obj, ce->vm, NULL);
if (IS_ERR(vma))
return PTR_ERR(vma);
- i915_gem_object_lock(obj);
- err = i915_gem_object_set_to_gtt_domain(obj, false);
- i915_gem_object_unlock(obj);
- if (err)
- return err;
-
err = i915_vma_pin(vma, 0, 0, PIN_HIGH | PIN_USER);
if (err)
return err;
- /* Within the GTT the huge objects maps every page onto
+ /*
+ * Within the GTT the huge objects maps every page onto
* its 1024 real pages (using phys_pfn = dma_pfn % 1024).
* We set the nth dword within the page using the nth
* mapping via the GTT - this should exercise the GTT mapping
* whilst checking that each context provides a unique view
* into the object.
*/
- batch = gpu_fill_dw(vma,
- (dw * real_page_count(obj)) << PAGE_SHIFT |
- (dw * sizeof(u32)),
- real_page_count(obj),
- dw);
- if (IS_ERR(batch)) {
- err = PTR_ERR(batch);
- goto err_vma;
- }
-
- rq = igt_request_alloc(ctx, engine);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto err_batch;
- }
-
- flags = 0;
- if (INTEL_GEN(vm->i915) <= 5)
- flags |= I915_DISPATCH_SECURE;
-
- err = engine->emit_bb_start(rq,
- batch->node.start, batch->node.size,
- flags);
- if (err)
- goto err_request;
-
- i915_vma_lock(batch);
- err = i915_vma_move_to_active(batch, rq, 0);
- i915_vma_unlock(batch);
- if (err)
- goto skip_request;
-
- i915_vma_lock(vma);
- err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
- i915_vma_unlock(vma);
- if (err)
- goto skip_request;
-
- i915_request_add(rq);
-
- i915_vma_unpin(batch);
- i915_vma_close(batch);
- i915_vma_put(batch);
-
+ err = igt_gpu_fill_dw(ce, vma,
+ (dw * real_page_count(obj)) << PAGE_SHIFT |
+ (dw * sizeof(u32)),
+ real_page_count(obj),
+ dw);
i915_vma_unpin(vma);
- return 0;
-
-skip_request:
- i915_request_skip(rq, err);
-err_request:
- i915_request_add(rq);
-err_batch:
- i915_vma_unpin(batch);
- i915_vma_put(batch);
-err_vma:
- i915_vma_unpin(vma);
return err;
}
@@ -404,17 +530,17 @@ out_unmap:
return err;
}
-static int file_add_object(struct drm_file *file,
- struct drm_i915_gem_object *obj)
+static int file_add_object(struct file *file, struct drm_i915_gem_object *obj)
{
int err;
GEM_BUG_ON(obj->base.handle_count);
/* tie the object to the drm_file for easy reaping */
- err = idr_alloc(&file->object_idr, &obj->base, 1, 0, GFP_KERNEL);
+ err = idr_alloc(&to_drm_file(file)->object_idr,
+ &obj->base, 1, 0, GFP_KERNEL);
if (err < 0)
- return err;
+ return err;
i915_gem_object_get(obj);
obj->base.handle_count++;
@@ -422,19 +548,21 @@ static int file_add_object(struct drm_file *file,
}
static struct drm_i915_gem_object *
-create_test_object(struct i915_gem_context *ctx,
- struct drm_file *file,
+create_test_object(struct i915_address_space *vm,
+ struct file *file,
struct list_head *objects)
{
struct drm_i915_gem_object *obj;
- struct i915_address_space *vm = ctx->vm ?: &ctx->i915->ggtt.vm;
u64 size;
int err;
+ /* Keep in GEM's good graces */
+ intel_gt_retire_requests(vm->gt);
+
size = min(vm->total / 2, 1024ull * DW_PER_PAGE * PAGE_SIZE);
size = round_down(size, DW_PER_PAGE * PAGE_SIZE);
- obj = huge_gem_object(ctx->i915, DW_PER_PAGE * PAGE_SIZE, size);
+ obj = huge_gem_object(vm->i915, DW_PER_PAGE * PAGE_SIZE, size);
if (IS_ERR(obj))
return obj;
@@ -462,11 +590,49 @@ static unsigned long max_dwords(struct drm_i915_gem_object *obj)
return npages / DW_PER_PAGE;
}
+static void throttle_release(struct i915_request **q, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ if (IS_ERR_OR_NULL(q[i]))
+ continue;
+
+ i915_request_put(fetch_and_zero(&q[i]));
+ }
+}
+
+static int throttle(struct intel_context *ce,
+ struct i915_request **q, int count)
+{
+ int i;
+
+ if (!IS_ERR_OR_NULL(q[0])) {
+ if (i915_request_wait(q[0],
+ I915_WAIT_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT) < 0)
+ return -EINTR;
+
+ i915_request_put(q[0]);
+ }
+
+ for (i = 0; i < count - 1; i++)
+ q[i] = q[i + 1];
+
+ q[i] = intel_context_create_request(ce);
+ if (IS_ERR(q[i]))
+ return PTR_ERR(q[i]);
+
+ i915_request_get(q[i]);
+ i915_request_add(q[i]);
+
+ return 0;
+}
+
static int igt_ctx_exec(void *arg)
{
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine;
- enum intel_engine_id id;
int err = -ENODEV;
/*
@@ -478,13 +644,14 @@ static int igt_ctx_exec(void *arg)
if (!DRIVER_CAPS(i915)->has_logical_contexts)
return 0;
- for_each_engine(engine, i915, id) {
+ for_each_uabi_engine(engine, i915) {
struct drm_i915_gem_object *obj = NULL;
unsigned long ncontexts, ndwords, dw;
+ struct i915_request *tq[5] = {};
struct igt_live_test t;
- struct drm_file *file;
IGT_TIMEOUT(end_time);
LIST_HEAD(objects);
+ struct file *file;
if (!intel_engine_can_store_dword(engine))
continue;
@@ -496,41 +663,53 @@ static int igt_ctx_exec(void *arg)
if (IS_ERR(file))
return PTR_ERR(file);
- mutex_lock(&i915->drm.struct_mutex);
-
err = igt_live_test_begin(&t, i915, __func__, engine->name);
if (err)
- goto out_unlock;
+ goto out_file;
ncontexts = 0;
ndwords = 0;
dw = 0;
while (!time_after(jiffies, end_time)) {
struct i915_gem_context *ctx;
- intel_wakeref_t wakeref;
+ struct intel_context *ce;
- ctx = live_context(i915, file);
+ ctx = kernel_context(i915);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
- goto out_unlock;
+ goto out_file;
}
+ ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
+ GEM_BUG_ON(IS_ERR(ce));
+
if (!obj) {
- obj = create_test_object(ctx, file, &objects);
+ obj = create_test_object(ce->vm, file, &objects);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
- goto out_unlock;
+ intel_context_put(ce);
+ kernel_context_close(ctx);
+ goto out_file;
}
}
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- err = gpu_fill(obj, ctx, engine, dw);
+ err = gpu_fill(ce, obj, dw);
if (err) {
- pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
+ pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj),
- engine->name, ctx->hw_id,
- yesno(!!ctx->vm), err);
- goto out_unlock;
+ engine->name,
+ yesno(!!rcu_access_pointer(ctx->vm)),
+ err);
+ intel_context_put(ce);
+ kernel_context_close(ctx);
+ goto out_file;
+ }
+
+ err = throttle(ce, tq, ARRAY_SIZE(tq));
+ if (err) {
+ intel_context_put(ce);
+ kernel_context_close(ctx);
+ goto out_file;
}
if (++dw == max_dwords(obj)) {
@@ -540,6 +719,9 @@ static int igt_ctx_exec(void *arg)
ndwords++;
ncontexts++;
+
+ intel_context_put(ce);
+ kernel_context_close(ctx);
}
pr_info("Submitted %lu contexts to %s, filling %lu dwords\n",
@@ -557,14 +739,16 @@ static int igt_ctx_exec(void *arg)
dw += rem;
}
-out_unlock:
+out_file:
+ throttle_release(tq, ARRAY_SIZE(tq));
if (igt_live_test_end(&t))
err = -EIO;
- mutex_unlock(&i915->drm.struct_mutex);
- mock_file_free(i915, file);
+ fput(file);
if (err)
return err;
+
+ i915_gem_drain_freed_objects(i915);
}
return 0;
@@ -573,11 +757,11 @@ out_unlock:
static int igt_shared_ctx_exec(void *arg)
{
struct drm_i915_private *i915 = arg;
+ struct i915_request *tq[5] = {};
struct i915_gem_context *parent;
struct intel_engine_cs *engine;
- enum intel_engine_id id;
struct igt_live_test t;
- struct drm_file *file;
+ struct file *file;
int err = 0;
/*
@@ -592,24 +776,22 @@ static int igt_shared_ctx_exec(void *arg)
if (IS_ERR(file))
return PTR_ERR(file);
- mutex_lock(&i915->drm.struct_mutex);
-
parent = live_context(i915, file);
if (IS_ERR(parent)) {
err = PTR_ERR(parent);
- goto out_unlock;
+ goto out_file;
}
if (!parent->vm) { /* not full-ppgtt; nothing to share */
err = 0;
- goto out_unlock;
+ goto out_file;
}
err = igt_live_test_begin(&t, i915, __func__, "");
if (err)
- goto out_unlock;
+ goto out_file;
- for_each_engine(engine, i915, id) {
+ for_each_uabi_engine(engine, i915) {
unsigned long ncontexts, ndwords, dw;
struct drm_i915_gem_object *obj = NULL;
IGT_TIMEOUT(end_time);
@@ -623,7 +805,7 @@ static int igt_shared_ctx_exec(void *arg)
ncontexts = 0;
while (!time_after(jiffies, end_time)) {
struct i915_gem_context *ctx;
- intel_wakeref_t wakeref;
+ struct intel_context *ce;
ctx = kernel_context(i915);
if (IS_ERR(ctx)) {
@@ -631,25 +813,39 @@ static int igt_shared_ctx_exec(void *arg)
goto out_test;
}
- __assign_ppgtt(ctx, parent->vm);
+ mutex_lock(&ctx->mutex);
+ __assign_ppgtt(ctx, ctx_vm(parent));
+ mutex_unlock(&ctx->mutex);
+
+ ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
+ GEM_BUG_ON(IS_ERR(ce));
if (!obj) {
- obj = create_test_object(parent, file, &objects);
+ obj = create_test_object(ctx_vm(parent),
+ file, &objects);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
+ intel_context_put(ce);
kernel_context_close(ctx);
goto out_test;
}
}
- err = 0;
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- err = gpu_fill(obj, ctx, engine, dw);
+ err = gpu_fill(ce, obj, dw);
if (err) {
- pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
+ pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj),
- engine->name, ctx->hw_id,
- yesno(!!ctx->vm), err);
+ engine->name,
+ yesno(!!rcu_access_pointer(ctx->vm)),
+ err);
+ intel_context_put(ce);
+ kernel_context_close(ctx);
+ goto out_test;
+ }
+
+ err = throttle(ce, tq, ARRAY_SIZE(tq));
+ if (err) {
+ intel_context_put(ce);
kernel_context_close(ctx);
goto out_test;
}
@@ -662,6 +858,7 @@ static int igt_shared_ctx_exec(void *arg)
ndwords++;
ncontexts++;
+ intel_context_put(ce);
kernel_context_close(ctx);
}
pr_info("Submitted %lu contexts to %s, filling %lu dwords\n",
@@ -678,14 +875,15 @@ static int igt_shared_ctx_exec(void *arg)
dw += rem;
}
+
+ i915_gem_drain_freed_objects(i915);
}
out_test:
+ throttle_release(tq, ARRAY_SIZE(tq));
if (igt_live_test_end(&t))
err = -EIO;
-out_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
-
- mock_file_free(i915, file);
+out_file:
+ fput(file);
return err;
}
@@ -717,6 +915,8 @@ static struct i915_vma *rpcs_query_batch(struct i915_vma *vma)
__i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj);
+ intel_gt_chipset_flush(vma->vm->gt);
+
vma = i915_vma_instance(obj, vma->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
@@ -746,7 +946,7 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine));
- vma = i915_vma_instance(obj, ce->gem_context->vm, NULL);
+ vma = i915_vma_instance(obj, ce->vm, NULL);
if (IS_ERR(vma))
return PTR_ERR(vma);
@@ -779,21 +979,22 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
goto err_request;
i915_vma_lock(batch);
- err = i915_vma_move_to_active(batch, rq, 0);
+ err = i915_request_await_object(rq, batch->obj, false);
+ if (err == 0)
+ err = i915_vma_move_to_active(batch, rq, 0);
i915_vma_unlock(batch);
if (err)
goto skip_request;
i915_vma_lock(vma);
- err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ err = i915_request_await_object(rq, vma->obj, true);
+ if (err == 0)
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_vma_unlock(vma);
if (err)
goto skip_request;
- i915_vma_unpin(batch);
- i915_vma_close(batch);
- i915_vma_put(batch);
-
+ i915_vma_unpin_and_release(&batch, 0);
i915_vma_unpin(vma);
*rq_out = i915_request_get(rq);
@@ -807,8 +1008,7 @@ skip_request:
err_request:
i915_request_add(rq);
err_batch:
- i915_vma_unpin(batch);
- i915_vma_put(batch);
+ i915_vma_unpin_and_release(&batch, 0);
err_vma:
i915_vma_unpin(vma);
@@ -820,8 +1020,7 @@ err_vma:
#define TEST_RESET BIT(2)
static int
-__sseu_prepare(struct drm_i915_private *i915,
- const char *name,
+__sseu_prepare(const char *name,
unsigned int flags,
struct intel_context *ce,
struct igt_spinner **spin)
@@ -837,14 +1036,11 @@ __sseu_prepare(struct drm_i915_private *i915,
if (!*spin)
return -ENOMEM;
- ret = igt_spinner_init(*spin, i915);
+ ret = igt_spinner_init(*spin, ce->engine->gt);
if (ret)
goto err_free;
- rq = igt_spinner_create_request(*spin,
- ce->gem_context,
- ce->engine,
- MI_NOOP);
+ rq = igt_spinner_create_request(*spin, ce, MI_NOOP);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
goto err_fini;
@@ -870,8 +1066,7 @@ err_free:
}
static int
-__read_slice_count(struct drm_i915_private *i915,
- struct intel_context *ce,
+__read_slice_count(struct intel_context *ce,
struct drm_i915_gem_object *obj,
struct igt_spinner *spin,
u32 *rpcs)
@@ -900,7 +1095,7 @@ __read_slice_count(struct drm_i915_private *i915,
return ret;
}
- if (INTEL_GEN(i915) >= 11) {
+ if (INTEL_GEN(ce->engine->i915) >= 11) {
s_mask = GEN11_RPCS_S_CNT_MASK;
s_shift = GEN11_RPCS_S_CNT_SHIFT;
} else {
@@ -943,8 +1138,7 @@ __check_rpcs(const char *name, u32 rpcs, int slices, unsigned int expected,
}
static int
-__sseu_finish(struct drm_i915_private *i915,
- const char *name,
+__sseu_finish(const char *name,
unsigned int flags,
struct intel_context *ce,
struct drm_i915_gem_object *obj,
@@ -956,19 +1150,18 @@ __sseu_finish(struct drm_i915_private *i915,
int ret = 0;
if (flags & TEST_RESET) {
- ret = i915_reset_engine(ce->engine, "sseu");
+ ret = intel_engine_reset(ce->engine, "sseu");
if (ret)
goto out;
}
- ret = __read_slice_count(i915, ce, obj,
+ ret = __read_slice_count(ce, obj,
flags & TEST_RESET ? NULL : spin, &rpcs);
ret = __check_rpcs(name, rpcs, ret, expected, "Context", "!");
if (ret)
goto out;
- ret = __read_slice_count(i915, ce->engine->kernel_context, obj,
- NULL, &rpcs);
+ ret = __read_slice_count(ce->engine->kernel_context, obj, NULL, &rpcs);
ret = __check_rpcs(name, rpcs, ret, slices, "Kernel context", "!");
out:
@@ -976,11 +1169,11 @@ out:
igt_spinner_end(spin);
if ((flags & TEST_IDLE) && ret == 0) {
- ret = i915_gem_wait_for_idle(i915, 0, MAX_SCHEDULE_TIMEOUT);
+ ret = igt_flush_test(ce->engine->i915);
if (ret)
return ret;
- ret = __read_slice_count(i915, ce, obj, NULL, &rpcs);
+ ret = __read_slice_count(ce, obj, NULL, &rpcs);
ret = __check_rpcs(name, rpcs, ret, expected,
"Context", " after idle!");
}
@@ -989,8 +1182,7 @@ out:
}
static int
-__sseu_test(struct drm_i915_private *i915,
- const char *name,
+__sseu_test(const char *name,
unsigned int flags,
struct intel_context *ce,
struct drm_i915_gem_object *obj,
@@ -999,15 +1191,17 @@ __sseu_test(struct drm_i915_private *i915,
struct igt_spinner *spin = NULL;
int ret;
- ret = __sseu_prepare(i915, name, flags, ce, &spin);
+ intel_engine_pm_get(ce->engine);
+
+ ret = __sseu_prepare(name, flags, ce, &spin);
if (ret)
- return ret;
+ goto out_pm;
- ret = __intel_context_reconfigure_sseu(ce, sseu);
+ ret = intel_context_reconfigure_sseu(ce, sseu);
if (ret)
goto out_spin;
- ret = __sseu_finish(i915, name, flags, ce, obj,
+ ret = __sseu_finish(name, flags, ce, obj,
hweight32(sseu.slice_mask), spin);
out_spin:
@@ -1016,6 +1210,8 @@ out_spin:
igt_spinner_fini(spin);
kfree(spin);
}
+out_pm:
+ intel_engine_pm_put(ce->engine);
return ret;
}
@@ -1024,53 +1220,15 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
const char *name,
unsigned int flags)
{
- struct intel_engine_cs *engine = i915->engine[RCS0];
- struct intel_sseu default_sseu = engine->sseu;
struct drm_i915_gem_object *obj;
- struct i915_gem_context *ctx;
- struct intel_context *ce;
- struct intel_sseu pg_sseu;
- intel_wakeref_t wakeref;
- struct drm_file *file;
- int ret;
-
- if (INTEL_GEN(i915) < 9)
- return 0;
-
- if (!RUNTIME_INFO(i915)->sseu.has_slice_pg)
- return 0;
+ int inst = 0;
+ int ret = 0;
- if (hweight32(default_sseu.slice_mask) < 2)
+ if (INTEL_GEN(i915) < 9 || !RUNTIME_INFO(i915)->sseu.has_slice_pg)
return 0;
- /*
- * Gen11 VME friendly power-gated configuration with half enabled
- * sub-slices.
- */
- pg_sseu = default_sseu;
- pg_sseu.slice_mask = 1;
- pg_sseu.subslice_mask =
- ~(~0 << (hweight32(default_sseu.subslice_mask) / 2));
-
- pr_info("SSEU subtest '%s', flags=%x, def_slices=%u, pg_slices=%u\n",
- name, flags, hweight32(default_sseu.slice_mask),
- hweight32(pg_sseu.slice_mask));
-
- file = mock_file(i915);
- if (IS_ERR(file))
- return PTR_ERR(file);
-
if (flags & TEST_RESET)
- igt_global_reset_lock(i915);
-
- mutex_lock(&i915->drm.struct_mutex);
-
- ctx = live_context(i915, file);
- if (IS_ERR(ctx)) {
- ret = PTR_ERR(ctx);
- goto out_unlock;
- }
- i915_gem_context_clear_bannable(ctx); /* to reset and beyond! */
+ igt_global_reset_lock(&i915->gt);
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
if (IS_ERR(obj)) {
@@ -1078,56 +1236,79 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
goto out_unlock;
}
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ do {
+ struct intel_engine_cs *engine;
+ struct intel_context *ce;
+ struct intel_sseu pg_sseu;
- ce = i915_gem_context_get_engine(ctx, RCS0);
- if (IS_ERR(ce)) {
- ret = PTR_ERR(ce);
- goto out_rpm;
- }
+ engine = intel_engine_lookup_user(i915,
+ I915_ENGINE_CLASS_RENDER,
+ inst++);
+ if (!engine)
+ break;
- ret = intel_context_pin(ce);
- if (ret)
- goto out_context;
+ if (hweight32(engine->sseu.slice_mask) < 2)
+ continue;
- /* First set the default mask. */
- ret = __sseu_test(i915, name, flags, ce, obj, default_sseu);
- if (ret)
- goto out_fail;
+ /*
+ * Gen11 VME friendly power-gated configuration with
+ * half enabled sub-slices.
+ */
+ pg_sseu = engine->sseu;
+ pg_sseu.slice_mask = 1;
+ pg_sseu.subslice_mask =
+ ~(~0 << (hweight32(engine->sseu.subslice_mask) / 2));
+
+ pr_info("%s: SSEU subtest '%s', flags=%x, def_slices=%u, pg_slices=%u\n",
+ engine->name, name, flags,
+ hweight32(engine->sseu.slice_mask),
+ hweight32(pg_sseu.slice_mask));
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ ret = PTR_ERR(ce);
+ goto out_put;
+ }
- /* Then set a power-gated configuration. */
- ret = __sseu_test(i915, name, flags, ce, obj, pg_sseu);
- if (ret)
- goto out_fail;
+ ret = intel_context_pin(ce);
+ if (ret)
+ goto out_ce;
- /* Back to defaults. */
- ret = __sseu_test(i915, name, flags, ce, obj, default_sseu);
- if (ret)
- goto out_fail;
+ /* First set the default mask. */
+ ret = __sseu_test(name, flags, ce, obj, engine->sseu);
+ if (ret)
+ goto out_unpin;
- /* One last power-gated configuration for the road. */
- ret = __sseu_test(i915, name, flags, ce, obj, pg_sseu);
- if (ret)
- goto out_fail;
+ /* Then set a power-gated configuration. */
+ ret = __sseu_test(name, flags, ce, obj, pg_sseu);
+ if (ret)
+ goto out_unpin;
+
+ /* Back to defaults. */
+ ret = __sseu_test(name, flags, ce, obj, engine->sseu);
+ if (ret)
+ goto out_unpin;
+
+ /* One last power-gated configuration for the road. */
+ ret = __sseu_test(name, flags, ce, obj, pg_sseu);
+ if (ret)
+ goto out_unpin;
-out_fail:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+out_unpin:
+ intel_context_unpin(ce);
+out_ce:
+ intel_context_put(ce);
+ } while (!ret);
+
+ if (igt_flush_test(i915))
ret = -EIO;
- intel_context_unpin(ce);
-out_context:
- intel_context_put(ce);
-out_rpm:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+out_put:
i915_gem_object_put(obj);
out_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
-
if (flags & TEST_RESET)
- igt_global_reset_unlock(i915);
-
- mock_file_free(i915, file);
+ igt_global_reset_unlock(&i915->gt);
if (ret)
pr_err("%s: Failed with %d!\n", name, ret);
@@ -1161,15 +1342,18 @@ static int igt_ctx_sseu(void *arg)
static int igt_ctx_readonly(void *arg)
{
struct drm_i915_private *i915 = arg;
+ unsigned long idx, ndwords, dw, num_engines;
struct drm_i915_gem_object *obj = NULL;
+ struct i915_request *tq[5] = {};
+ struct i915_gem_engines_iter it;
struct i915_address_space *vm;
struct i915_gem_context *ctx;
- unsigned long idx, ndwords, dw;
+ struct intel_context *ce;
struct igt_live_test t;
- struct drm_file *file;
I915_RND_STATE(prng);
IGT_TIMEOUT(end_time);
LIST_HEAD(objects);
+ struct file *file;
int err = -ENODEV;
/*
@@ -1182,56 +1366,63 @@ static int igt_ctx_readonly(void *arg)
if (IS_ERR(file))
return PTR_ERR(file);
- mutex_lock(&i915->drm.struct_mutex);
-
err = igt_live_test_begin(&t, i915, __func__, "");
if (err)
- goto out_unlock;
+ goto out_file;
ctx = live_context(i915, file);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
- goto out_unlock;
+ goto out_file;
}
- vm = ctx->vm ?: &i915->mm.aliasing_ppgtt->vm;
+ vm = ctx_vm(ctx) ?: &i915->ggtt.alias->vm;
if (!vm || !vm->has_read_only) {
err = 0;
- goto out_unlock;
+ goto out_file;
}
+ num_engines = 0;
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it)
+ if (intel_engine_can_store_dword(ce->engine))
+ num_engines++;
+ i915_gem_context_unlock_engines(ctx);
+
ndwords = 0;
dw = 0;
while (!time_after(jiffies, end_time)) {
- struct intel_engine_cs *engine;
- unsigned int id;
-
- for_each_engine(engine, i915, id) {
- intel_wakeref_t wakeref;
-
- if (!intel_engine_can_store_dword(engine))
+ for_each_gem_engine(ce,
+ i915_gem_context_lock_engines(ctx), it) {
+ if (!intel_engine_can_store_dword(ce->engine))
continue;
if (!obj) {
- obj = create_test_object(ctx, file, &objects);
+ obj = create_test_object(ce->vm, file, &objects);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
- goto out_unlock;
+ i915_gem_context_unlock_engines(ctx);
+ goto out_file;
}
if (prandom_u32_state(&prng) & 1)
i915_gem_object_set_readonly(obj);
}
- err = 0;
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- err = gpu_fill(obj, ctx, engine, dw);
+ err = gpu_fill(ce, obj, dw);
if (err) {
- pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
+ pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj),
- engine->name, ctx->hw_id,
- yesno(!!ctx->vm), err);
- goto out_unlock;
+ ce->engine->name,
+ yesno(!!ctx_vm(ctx)),
+ err);
+ i915_gem_context_unlock_engines(ctx);
+ goto out_file;
+ }
+
+ err = throttle(ce, tq, ARRAY_SIZE(tq));
+ if (err) {
+ i915_gem_context_unlock_engines(ctx);
+ goto out_file;
}
if (++dw == max_dwords(obj)) {
@@ -1240,9 +1431,10 @@ static int igt_ctx_readonly(void *arg)
}
ndwords++;
}
+ i915_gem_context_unlock_engines(ctx);
}
- pr_info("Submitted %lu dwords (across %u engines)\n",
- ndwords, RUNTIME_INFO(i915)->num_engines);
+ pr_info("Submitted %lu dwords (across %lu engines)\n",
+ ndwords, num_engines);
dw = 0;
idx = 0;
@@ -1262,19 +1454,19 @@ static int igt_ctx_readonly(void *arg)
dw += rem;
}
-out_unlock:
+out_file:
+ throttle_release(tq, ARRAY_SIZE(tq));
if (igt_live_test_end(&t))
err = -EIO;
- mutex_unlock(&i915->drm.struct_mutex);
- mock_file_free(i915, file);
+ fput(file);
return err;
}
-static int check_scratch(struct i915_gem_context *ctx, u64 offset)
+static int check_scratch(struct i915_address_space *vm, u64 offset)
{
struct drm_mm_node *node =
- __drm_mm_interval_first(&ctx->vm->mm,
+ __drm_mm_interval_first(&vm->mm,
offset, offset + sizeof(u32) - 1);
if (!node || node->start > offset)
return 0;
@@ -1292,6 +1484,7 @@ static int write_to_scratch(struct i915_gem_context *ctx,
{
struct drm_i915_private *i915 = ctx->i915;
struct drm_i915_gem_object *obj;
+ struct i915_address_space *vm;
struct i915_request *rq;
struct i915_vma *vma;
u32 *cmd;
@@ -1306,7 +1499,7 @@ static int write_to_scratch(struct i915_gem_context *ctx,
cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
- goto err;
+ goto out;
}
*cmd++ = MI_STORE_DWORD_IMM_GEN4;
@@ -1322,17 +1515,20 @@ static int write_to_scratch(struct i915_gem_context *ctx,
__i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj);
- vma = i915_vma_instance(obj, ctx->vm, NULL);
+ intel_gt_chipset_flush(engine->gt);
+
+ vm = i915_gem_context_get_vm_rcu(ctx);
+ vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
- goto err;
+ goto out_vm;
}
err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
if (err)
- goto err;
+ goto out_vm;
- err = check_scratch(ctx, offset);
+ err = check_scratch(vm, offset);
if (err)
goto err_unpin;
@@ -1347,26 +1543,27 @@ static int write_to_scratch(struct i915_gem_context *ctx,
goto err_request;
i915_vma_lock(vma);
- err = i915_vma_move_to_active(vma, rq, 0);
+ err = i915_request_await_object(rq, vma->obj, false);
+ if (err == 0)
+ err = i915_vma_move_to_active(vma, rq, 0);
i915_vma_unlock(vma);
if (err)
goto skip_request;
i915_vma_unpin(vma);
- i915_vma_close(vma);
- i915_vma_put(vma);
i915_request_add(rq);
- return 0;
-
+ goto out_vm;
skip_request:
i915_request_skip(rq, err);
err_request:
i915_request_add(rq);
err_unpin:
i915_vma_unpin(vma);
-err:
+out_vm:
+ i915_vm_put(vm);
+out:
i915_gem_object_put(obj);
return err;
}
@@ -1377,6 +1574,7 @@ static int read_from_scratch(struct i915_gem_context *ctx,
{
struct drm_i915_private *i915 = ctx->i915;
struct drm_i915_gem_object *obj;
+ struct i915_address_space *vm;
const u32 RCS_GPR0 = 0x2600; /* not all engines have their own GPR! */
const u32 result = 0x100;
struct i915_request *rq;
@@ -1393,7 +1591,7 @@ static int read_from_scratch(struct i915_gem_context *ctx,
cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
- goto err;
+ goto out;
}
memset(cmd, POISON_INUSE, PAGE_SIZE);
@@ -1419,17 +1617,20 @@ static int read_from_scratch(struct i915_gem_context *ctx,
i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj);
- vma = i915_vma_instance(obj, ctx->vm, NULL);
+ intel_gt_chipset_flush(engine->gt);
+
+ vm = i915_gem_context_get_vm_rcu(ctx);
+ vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
- goto err;
+ goto out_vm;
}
err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
if (err)
- goto err;
+ goto out_vm;
- err = check_scratch(ctx, offset);
+ err = check_scratch(vm, offset);
if (err)
goto err_unpin;
@@ -1444,7 +1645,9 @@ static int read_from_scratch(struct i915_gem_context *ctx,
goto err_request;
i915_vma_lock(vma);
- err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ err = i915_request_await_object(rq, vma->obj, true);
+ if (err == 0)
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_vma_unlock(vma);
if (err)
goto skip_request;
@@ -1458,27 +1661,27 @@ static int read_from_scratch(struct i915_gem_context *ctx,
err = i915_gem_object_set_to_cpu_domain(obj, false);
i915_gem_object_unlock(obj);
if (err)
- goto err;
+ goto out_vm;
cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
- goto err;
+ goto out_vm;
}
*value = cmd[result / sizeof(*cmd)];
i915_gem_object_unpin_map(obj);
- i915_gem_object_put(obj);
-
- return 0;
+ goto out_vm;
skip_request:
i915_request_skip(rq, err);
err_request:
i915_request_add(rq);
err_unpin:
i915_vma_unpin(vma);
-err:
+out_vm:
+ i915_vm_put(vm);
+out:
i915_gem_object_put(obj);
return err;
}
@@ -1487,13 +1690,11 @@ static int igt_vm_isolation(void *arg)
{
struct drm_i915_private *i915 = arg;
struct i915_gem_context *ctx_a, *ctx_b;
+ unsigned long num_engines, count;
struct intel_engine_cs *engine;
- intel_wakeref_t wakeref;
struct igt_live_test t;
- struct drm_file *file;
I915_RND_STATE(prng);
- unsigned long count;
- unsigned int id;
+ struct file *file;
u64 vm_total;
int err;
@@ -1509,36 +1710,33 @@ static int igt_vm_isolation(void *arg)
if (IS_ERR(file))
return PTR_ERR(file);
- mutex_lock(&i915->drm.struct_mutex);
-
err = igt_live_test_begin(&t, i915, __func__, "");
if (err)
- goto out_unlock;
+ goto out_file;
ctx_a = live_context(i915, file);
if (IS_ERR(ctx_a)) {
err = PTR_ERR(ctx_a);
- goto out_unlock;
+ goto out_file;
}
ctx_b = live_context(i915, file);
if (IS_ERR(ctx_b)) {
err = PTR_ERR(ctx_b);
- goto out_unlock;
+ goto out_file;
}
/* We can only test vm isolation, if the vm are distinct */
- if (ctx_a->vm == ctx_b->vm)
- goto out_unlock;
+ if (ctx_vm(ctx_a) == ctx_vm(ctx_b))
+ goto out_file;
- vm_total = ctx_a->vm->total;
- GEM_BUG_ON(ctx_b->vm->total != vm_total);
+ vm_total = ctx_vm(ctx_a)->total;
+ GEM_BUG_ON(ctx_vm(ctx_b)->total != vm_total);
vm_total -= I915_GTT_PAGE_SIZE;
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
count = 0;
- for_each_engine(engine, i915, id) {
+ num_engines = 0;
+ for_each_uabi_engine(engine, i915) {
IGT_TIMEOUT(end_time);
unsigned long this = 0;
@@ -1551,7 +1749,7 @@ static int igt_vm_isolation(void *arg)
div64_u64_rem(i915_prandom_u64_state(&prng),
vm_total, &offset);
- offset &= -sizeof(u32);
+ offset = round_down(offset, alignof_dword);
offset += I915_GTT_PAGE_SIZE;
err = write_to_scratch(ctx_a, engine,
@@ -1560,7 +1758,7 @@ static int igt_vm_isolation(void *arg)
err = read_from_scratch(ctx_b, engine,
offset, &value);
if (err)
- goto out_rpm;
+ goto out_file;
if (value) {
pr_err("%s: Read %08x from scratch (offset 0x%08x_%08x), after %lu reads!\n",
@@ -1569,42 +1767,24 @@ static int igt_vm_isolation(void *arg)
lower_32_bits(offset),
this);
err = -EINVAL;
- goto out_rpm;
+ goto out_file;
}
this++;
}
count += this;
+ num_engines++;
}
- pr_info("Checked %lu scratch offsets across %d engines\n",
- count, RUNTIME_INFO(i915)->num_engines);
+ pr_info("Checked %lu scratch offsets across %lu engines\n",
+ count, num_engines);
-out_rpm:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
-out_unlock:
+out_file:
if (igt_live_test_end(&t))
err = -EIO;
- mutex_unlock(&i915->drm.struct_mutex);
-
- mock_file_free(i915, file);
+ fput(file);
return err;
}
-static __maybe_unused const char *
-__engine_name(struct drm_i915_private *i915, intel_engine_mask_t engines)
-{
- struct intel_engine_cs *engine;
- intel_engine_mask_t tmp;
-
- if (engines == ALL_ENGINES)
- return "all";
-
- for_each_engine_masked(engine, i915, engines, tmp)
- return engine->name;
-
- return "none";
-}
-
static bool skip_unused_engines(struct intel_context *ce, void *data)
{
return !ce->state;
@@ -1632,13 +1812,9 @@ static int mock_context_barrier(void *arg)
* a request; useful for retiring old state after loading new.
*/
- mutex_lock(&i915->drm.struct_mutex);
-
ctx = mock_context(i915, "mock");
- if (!ctx) {
- err = -ENOMEM;
- goto unlock;
- }
+ if (!ctx)
+ return -ENOMEM;
counter = 0;
err = context_barrier_task(ctx, 0,
@@ -1711,8 +1887,6 @@ static int mock_context_barrier(void *arg)
out:
mock_context_close(ctx);
-unlock:
- mutex_unlock(&i915->drm.struct_mutex);
return err;
#undef pr_fmt
#define pr_fmt(x) x
@@ -1736,10 +1910,11 @@ int i915_gem_context_mock_selftests(void)
return err;
}
-int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv)
+int i915_gem_context_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_nop_switch),
+ SUBTEST(live_parallel_switch),
SUBTEST(igt_ctx_exec),
SUBTEST(igt_ctx_readonly),
SUBTEST(igt_ctx_sseu),
@@ -1747,8 +1922,8 @@ int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv)
SUBTEST(igt_vm_isolation),
};
- if (i915_terminally_wedged(dev_priv))
+ if (intel_gt_is_wedged(&i915->gt))
return 0;
- return i915_subtests(tests, dev_priv);
+ return i915_live_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
index e3a64edef918..2a52b92586b9 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
@@ -20,7 +20,7 @@ static int igt_dmabuf_export(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
- dmabuf = i915_gem_prime_export(&i915->drm, &obj->base, 0);
+ dmabuf = i915_gem_prime_export(&obj->base, 0);
i915_gem_object_put(obj);
if (IS_ERR(dmabuf)) {
pr_err("i915_gem_prime_export failed with err=%d\n",
@@ -44,7 +44,7 @@ static int igt_dmabuf_import_self(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
- dmabuf = i915_gem_prime_export(&i915->drm, &obj->base, 0);
+ dmabuf = i915_gem_prime_export(&obj->base, 0);
if (IS_ERR(dmabuf)) {
pr_err("i915_gem_prime_export failed with err=%d\n",
(int)PTR_ERR(dmabuf));
@@ -219,7 +219,7 @@ static int igt_dmabuf_export_vmap(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
- dmabuf = i915_gem_prime_export(&i915->drm, &obj->base, 0);
+ dmabuf = i915_gem_prime_export(&obj->base, 0);
if (IS_ERR(dmabuf)) {
pr_err("i915_gem_prime_export failed with err=%d\n",
(int)PTR_ERR(dmabuf));
@@ -254,106 +254,6 @@ err_obj:
return err;
}
-static int igt_dmabuf_export_kmap(void *arg)
-{
- struct drm_i915_private *i915 = arg;
- struct drm_i915_gem_object *obj;
- struct dma_buf *dmabuf;
- void *ptr;
- int err;
-
- obj = i915_gem_object_create_shmem(i915, 2 * PAGE_SIZE);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
-
- dmabuf = i915_gem_prime_export(&i915->drm, &obj->base, 0);
- i915_gem_object_put(obj);
- if (IS_ERR(dmabuf)) {
- err = PTR_ERR(dmabuf);
- pr_err("i915_gem_prime_export failed with err=%d\n", err);
- return err;
- }
-
- ptr = dma_buf_kmap(dmabuf, 0);
- if (!ptr) {
- pr_err("dma_buf_kmap failed\n");
- err = -ENOMEM;
- goto err;
- }
-
- if (memchr_inv(ptr, 0, PAGE_SIZE)) {
- dma_buf_kunmap(dmabuf, 0, ptr);
- pr_err("Exported page[0] not initialiased to zero!\n");
- err = -EINVAL;
- goto err;
- }
-
- memset(ptr, 0xc5, PAGE_SIZE);
- dma_buf_kunmap(dmabuf, 0, ptr);
-
- ptr = i915_gem_object_pin_map(obj, I915_MAP_WB);
- if (IS_ERR(ptr)) {
- err = PTR_ERR(ptr);
- pr_err("i915_gem_object_pin_map failed with err=%d\n", err);
- goto err;
- }
- memset(ptr + PAGE_SIZE, 0xaa, PAGE_SIZE);
- i915_gem_object_flush_map(obj);
- i915_gem_object_unpin_map(obj);
-
- ptr = dma_buf_kmap(dmabuf, 1);
- if (!ptr) {
- pr_err("dma_buf_kmap failed\n");
- err = -ENOMEM;
- goto err;
- }
-
- if (memchr_inv(ptr, 0xaa, PAGE_SIZE)) {
- dma_buf_kunmap(dmabuf, 1, ptr);
- pr_err("Exported page[1] not set to 0xaa!\n");
- err = -EINVAL;
- goto err;
- }
-
- memset(ptr, 0xc5, PAGE_SIZE);
- dma_buf_kunmap(dmabuf, 1, ptr);
-
- ptr = dma_buf_kmap(dmabuf, 0);
- if (!ptr) {
- pr_err("dma_buf_kmap failed\n");
- err = -ENOMEM;
- goto err;
- }
- if (memchr_inv(ptr, 0xc5, PAGE_SIZE)) {
- dma_buf_kunmap(dmabuf, 0, ptr);
- pr_err("Exported page[0] did not retain 0xc5!\n");
- err = -EINVAL;
- goto err;
- }
- dma_buf_kunmap(dmabuf, 0, ptr);
-
- ptr = dma_buf_kmap(dmabuf, 2);
- if (ptr) {
- pr_err("Erroneously kmapped beyond the end of the object!\n");
- dma_buf_kunmap(dmabuf, 2, ptr);
- err = -EINVAL;
- goto err;
- }
-
- ptr = dma_buf_kmap(dmabuf, -1);
- if (ptr) {
- pr_err("Erroneously kmapped before the start of the object!\n");
- dma_buf_kunmap(dmabuf, -1, ptr);
- err = -EINVAL;
- goto err;
- }
-
- err = 0;
-err:
- dma_buf_put(dmabuf);
- return err;
-}
-
int i915_gem_dmabuf_mock_selftests(void)
{
static const struct i915_subtest tests[] = {
@@ -362,7 +262,6 @@ int i915_gem_dmabuf_mock_selftests(void)
SUBTEST(igt_dmabuf_import),
SUBTEST(igt_dmabuf_import_ownership),
SUBTEST(igt_dmabuf_export_vmap),
- SUBTEST(igt_dmabuf_export_kmap),
};
struct drm_i915_private *i915;
int err;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 5c81f4b4813a..ef7c74cff28a 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -6,10 +6,15 @@
#include <linux/prime_numbers.h>
+#include "gt/intel_engine_pm.h"
+#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
+#include "gem/i915_gem_region.h"
#include "huge_gem_object.h"
#include "i915_selftest.h"
+#include "selftests/i915_random.h"
#include "selftests/igt_flush_test.h"
+#include "selftests/igt_mmap.h"
struct tile {
unsigned int width;
@@ -75,18 +80,103 @@ static u64 tiled_offset(const struct tile *tile, u64 v)
static int check_partial_mapping(struct drm_i915_gem_object *obj,
const struct tile *tile,
- unsigned long end_time)
+ struct rnd_state *prng)
{
- const unsigned int nreal = obj->scratch / PAGE_SIZE;
const unsigned long npages = obj->base.size / PAGE_SIZE;
+ struct i915_ggtt_view view;
struct i915_vma *vma;
unsigned long page;
+ u32 __iomem *io;
+ struct page *p;
+ unsigned int n;
+ u64 offset;
+ u32 *cpu;
int err;
- if (igt_timeout(end_time,
- "%s: timed out before tiling=%d stride=%d\n",
- __func__, tile->tiling, tile->stride))
- return -EINTR;
+ err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
+ if (err) {
+ pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n",
+ tile->tiling, tile->stride, err);
+ return err;
+ }
+
+ GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
+ GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
+
+ i915_gem_object_lock(obj);
+ err = i915_gem_object_set_to_gtt_domain(obj, true);
+ i915_gem_object_unlock(obj);
+ if (err) {
+ pr_err("Failed to flush to GTT write domain; err=%d\n", err);
+ return err;
+ }
+
+ page = i915_prandom_u32_max_state(npages, prng);
+ view = compute_partial_view(obj, page, MIN_CHUNK_PAGES);
+
+ vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
+ if (IS_ERR(vma)) {
+ pr_err("Failed to pin partial view: offset=%lu; err=%d\n",
+ page, (int)PTR_ERR(vma));
+ return PTR_ERR(vma);
+ }
+
+ n = page - view.partial.offset;
+ GEM_BUG_ON(n >= view.partial.size);
+
+ io = i915_vma_pin_iomap(vma);
+ i915_vma_unpin(vma);
+ if (IS_ERR(io)) {
+ pr_err("Failed to iomap partial view: offset=%lu; err=%d\n",
+ page, (int)PTR_ERR(io));
+ err = PTR_ERR(io);
+ goto out;
+ }
+
+ iowrite32(page, io + n * PAGE_SIZE / sizeof(*io));
+ i915_vma_unpin_iomap(vma);
+
+ offset = tiled_offset(tile, page << PAGE_SHIFT);
+ if (offset >= obj->base.size)
+ goto out;
+
+ intel_gt_flush_ggtt_writes(&to_i915(obj->base.dev)->gt);
+
+ p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
+ cpu = kmap(p) + offset_in_page(offset);
+ drm_clflush_virt_range(cpu, sizeof(*cpu));
+ if (*cpu != (u32)page) {
+ pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n",
+ page, n,
+ view.partial.offset,
+ view.partial.size,
+ vma->size >> PAGE_SHIFT,
+ tile->tiling ? tile_row_pages(obj) : 0,
+ vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride,
+ offset >> PAGE_SHIFT,
+ (unsigned int)offset_in_page(offset),
+ offset,
+ (u32)page, *cpu);
+ err = -EINVAL;
+ }
+ *cpu = 0;
+ drm_clflush_virt_range(cpu, sizeof(*cpu));
+ kunmap(p);
+
+out:
+ __i915_vma_put(vma);
+ return err;
+}
+
+static int check_partial_mappings(struct drm_i915_gem_object *obj,
+ const struct tile *tile,
+ unsigned long end_time)
+{
+ const unsigned int nreal = obj->scratch / PAGE_SIZE;
+ const unsigned long npages = obj->base.size / PAGE_SIZE;
+ struct i915_vma *vma;
+ unsigned long page;
+ int err;
err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
if (err) {
@@ -143,7 +233,7 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
if (offset >= obj->base.size)
continue;
- i915_gem_flush_ggtt_writes(to_i915(obj->base.dev));
+ intel_gt_flush_ggtt_writes(&to_i915(obj->base.dev)->gt);
p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
cpu = kmap(p) + offset_in_page(offset);
@@ -168,12 +258,43 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
if (err)
return err;
- i915_vma_destroy(vma);
+ __i915_vma_put(vma);
+
+ if (igt_timeout(end_time,
+ "%s: timed out after tiling=%d stride=%d\n",
+ __func__, tile->tiling, tile->stride))
+ return -EINTR;
}
return 0;
}
+static unsigned int
+setup_tile_size(struct tile *tile, struct drm_i915_private *i915)
+{
+ if (INTEL_GEN(i915) <= 2) {
+ tile->height = 16;
+ tile->width = 128;
+ tile->size = 11;
+ } else if (tile->tiling == I915_TILING_Y &&
+ HAS_128_BYTE_Y_TILING(i915)) {
+ tile->height = 32;
+ tile->width = 128;
+ tile->size = 12;
+ } else {
+ tile->height = 8;
+ tile->width = 512;
+ tile->size = 12;
+ }
+
+ if (INTEL_GEN(i915) < 4)
+ return 8192 / tile->width;
+ else if (INTEL_GEN(i915) < 7)
+ return 128 * I965_FENCE_MAX_PITCH_VAL / tile->width;
+ else
+ return 128 * GEN7_FENCE_MAX_PITCH_VAL / tile->width;
+}
+
static int igt_partial_tiling(void *arg)
{
const unsigned int nreal = 1 << 12; /* largest tile row x2 */
@@ -183,6 +304,9 @@ static int igt_partial_tiling(void *arg)
int tiling;
int err;
+ if (!i915_ggtt_has_aperture(&i915->ggtt))
+ return 0;
+
/* We want to check the page mapping and fencing of a large object
* mmapped through the GTT. The object we create is larger than can
* possibly be mmaped as a whole, and so we must use partial GGTT vma.
@@ -204,7 +328,6 @@ static int igt_partial_tiling(void *arg)
goto out;
}
- mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
if (1) {
@@ -218,7 +341,7 @@ static int igt_partial_tiling(void *arg)
tile.swizzle = I915_BIT_6_SWIZZLE_NONE;
tile.tiling = I915_TILING_NONE;
- err = check_partial_mapping(obj, &tile, end);
+ err = check_partial_mappings(obj, &tile, end);
if (err && err != -EINTR)
goto out_unlock;
}
@@ -240,10 +363,10 @@ static int igt_partial_tiling(void *arg)
tile.tiling = tiling;
switch (tiling) {
case I915_TILING_X:
- tile.swizzle = i915->mm.bit_6_swizzle_x;
+ tile.swizzle = i915->ggtt.bit_6_swizzle_x;
break;
case I915_TILING_Y:
- tile.swizzle = i915->mm.bit_6_swizzle_y;
+ tile.swizzle = i915->ggtt.bit_6_swizzle_y;
break;
}
@@ -252,31 +375,11 @@ static int igt_partial_tiling(void *arg)
tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
continue;
- if (INTEL_GEN(i915) <= 2) {
- tile.height = 16;
- tile.width = 128;
- tile.size = 11;
- } else if (tile.tiling == I915_TILING_Y &&
- HAS_128_BYTE_Y_TILING(i915)) {
- tile.height = 32;
- tile.width = 128;
- tile.size = 12;
- } else {
- tile.height = 8;
- tile.width = 512;
- tile.size = 12;
- }
-
- if (INTEL_GEN(i915) < 4)
- max_pitch = 8192 / tile.width;
- else if (INTEL_GEN(i915) < 7)
- max_pitch = 128 * I965_FENCE_MAX_PITCH_VAL / tile.width;
- else
- max_pitch = 128 * GEN7_FENCE_MAX_PITCH_VAL / tile.width;
+ max_pitch = setup_tile_size(&tile, i915);
for (pitch = max_pitch; pitch; pitch >>= 1) {
tile.stride = tile.width * pitch;
- err = check_partial_mapping(obj, &tile, end);
+ err = check_partial_mappings(obj, &tile, end);
if (err == -EINTR)
goto next_tiling;
if (err)
@@ -284,7 +387,7 @@ static int igt_partial_tiling(void *arg)
if (pitch > 2 && INTEL_GEN(i915) >= 4) {
tile.stride = tile.width * (pitch - 1);
- err = check_partial_mapping(obj, &tile, end);
+ err = check_partial_mappings(obj, &tile, end);
if (err == -EINTR)
goto next_tiling;
if (err)
@@ -293,7 +396,7 @@ static int igt_partial_tiling(void *arg)
if (pitch < max_pitch && INTEL_GEN(i915) >= 4) {
tile.stride = tile.width * (pitch + 1);
- err = check_partial_mapping(obj, &tile, end);
+ err = check_partial_mappings(obj, &tile, end);
if (err == -EINTR)
goto next_tiling;
if (err)
@@ -304,7 +407,7 @@ static int igt_partial_tiling(void *arg)
if (INTEL_GEN(i915) >= 4) {
for_each_prime_number(pitch, max_pitch) {
tile.stride = tile.width * pitch;
- err = check_partial_mapping(obj, &tile, end);
+ err = check_partial_mappings(obj, &tile, end);
if (err == -EINTR)
goto next_tiling;
if (err)
@@ -317,82 +420,188 @@ next_tiling: ;
out_unlock:
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
i915_gem_object_unpin_pages(obj);
out:
i915_gem_object_put(obj);
return err;
}
-static int make_obj_busy(struct drm_i915_gem_object *obj)
+static int igt_smoke_tiling(void *arg)
{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_request *rq;
- struct i915_vma *vma;
+ const unsigned int nreal = 1 << 12; /* largest tile row x2 */
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj;
+ intel_wakeref_t wakeref;
+ I915_RND_STATE(prng);
+ unsigned long count;
+ IGT_TIMEOUT(end);
int err;
- vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
- if (IS_ERR(vma))
- return PTR_ERR(vma);
+ if (!i915_ggtt_has_aperture(&i915->ggtt))
+ return 0;
- err = i915_vma_pin(vma, 0, 0, PIN_USER);
- if (err)
- return err;
+ /*
+ * igt_partial_tiling() does an exhastive check of partial tiling
+ * chunking, but will undoubtably run out of time. Here, we do a
+ * randomised search and hope over many runs of 1s with different
+ * seeds we will do a thorough check.
+ *
+ * Remember to look at the st_seed if we see a flip-flop in BAT!
+ */
- rq = i915_request_create(i915->engine[RCS0]->kernel_context);
- if (IS_ERR(rq)) {
- i915_vma_unpin(vma);
- return PTR_ERR(rq);
+ if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
+ return 0;
+
+ obj = huge_gem_object(i915,
+ nreal << PAGE_SHIFT,
+ (1 + next_prime_number(i915->ggtt.vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err) {
+ pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
+ nreal, obj->base.size / PAGE_SIZE, err);
+ goto out;
}
- i915_vma_lock(vma);
- err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
- i915_vma_unlock(vma);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- i915_request_add(rq);
+ count = 0;
+ do {
+ struct tile tile;
- i915_vma_unpin(vma);
- i915_gem_object_put(obj); /* leave it only alive via its active ref */
+ tile.tiling =
+ i915_prandom_u32_max_state(I915_TILING_Y + 1, &prng);
+ switch (tile.tiling) {
+ case I915_TILING_NONE:
+ tile.height = 1;
+ tile.width = 1;
+ tile.size = 0;
+ tile.stride = 0;
+ tile.swizzle = I915_BIT_6_SWIZZLE_NONE;
+ break;
+
+ case I915_TILING_X:
+ tile.swizzle = i915->ggtt.bit_6_swizzle_x;
+ break;
+ case I915_TILING_Y:
+ tile.swizzle = i915->ggtt.bit_6_swizzle_y;
+ break;
+ }
+
+ if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 ||
+ tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
+ continue;
+
+ if (tile.tiling != I915_TILING_NONE) {
+ unsigned int max_pitch = setup_tile_size(&tile, i915);
+
+ tile.stride =
+ i915_prandom_u32_max_state(max_pitch, &prng);
+ tile.stride = (1 + tile.stride) * tile.width;
+ if (INTEL_GEN(i915) < 4)
+ tile.stride = rounddown_pow_of_two(tile.stride);
+ }
+
+ err = check_partial_mapping(obj, &tile, &prng);
+ if (err)
+ break;
+
+ count++;
+ } while (!__igt_timeout(end, NULL));
+ pr_info("%s: Completed %lu trials\n", __func__, count);
+
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ i915_gem_object_unpin_pages(obj);
+out:
+ i915_gem_object_put(obj);
return err;
}
+static int make_obj_busy(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct intel_engine_cs *engine;
+
+ for_each_uabi_engine(engine, i915) {
+ struct i915_request *rq;
+ struct i915_vma *vma;
+ int err;
+
+ vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ return err;
+
+ rq = intel_engine_create_kernel_request(engine);
+ if (IS_ERR(rq)) {
+ i915_vma_unpin(vma);
+ return PTR_ERR(rq);
+ }
+
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj, true);
+ if (err == 0)
+ err = i915_vma_move_to_active(vma, rq,
+ EXEC_OBJECT_WRITE);
+ i915_vma_unlock(vma);
+
+ i915_request_add(rq);
+ i915_vma_unpin(vma);
+ if (err)
+ return err;
+ }
+
+ i915_gem_object_put(obj); /* leave it only alive via its active ref */
+ return 0;
+}
+
static bool assert_mmap_offset(struct drm_i915_private *i915,
unsigned long size,
int expected)
{
struct drm_i915_gem_object *obj;
- int err;
+ struct i915_mmap_offset *mmo;
obj = i915_gem_object_create_internal(i915, size);
if (IS_ERR(obj))
return PTR_ERR(obj);
- err = create_mmap_offset(obj);
+ mmo = mmap_offset_attach(obj, I915_MMAP_OFFSET_GTT, NULL);
i915_gem_object_put(obj);
- return err == expected;
+ return PTR_ERR_OR_ZERO(mmo) == expected;
}
static void disable_retire_worker(struct drm_i915_private *i915)
{
- i915_gem_shrinker_unregister(i915);
-
- intel_gt_pm_get(i915);
-
- cancel_delayed_work_sync(&i915->gem.retire_work);
- flush_work(&i915->gem.idle_work);
+ i915_gem_driver_unregister__shrinker(i915);
+ intel_gt_pm_get(&i915->gt);
+ cancel_delayed_work_sync(&i915->gt.requests.retire_work);
}
static void restore_retire_worker(struct drm_i915_private *i915)
{
- intel_gt_pm_put(i915);
+ igt_flush_test(i915);
+ intel_gt_pm_put(&i915->gt);
+ i915_gem_driver_register__shrinker(i915);
+}
- mutex_lock(&i915->drm.struct_mutex);
- igt_flush_test(i915, I915_WAIT_LOCKED);
- mutex_unlock(&i915->drm.struct_mutex);
+static void mmap_offset_lock(struct drm_i915_private *i915)
+ __acquires(&i915->drm.vma_offset_manager->vm_lock)
+{
+ write_lock(&i915->drm.vma_offset_manager->vm_lock);
+}
- i915_gem_shrinker_register(i915);
+static void mmap_offset_unlock(struct drm_i915_private *i915)
+ __releases(&i915->drm.vma_offset_manager->vm_lock)
+{
+ write_unlock(&i915->drm.vma_offset_manager->vm_lock);
}
static int igt_mmap_offset_exhaustion(void *arg)
@@ -400,26 +609,50 @@ static int igt_mmap_offset_exhaustion(void *arg)
struct drm_i915_private *i915 = arg;
struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm;
struct drm_i915_gem_object *obj;
- struct drm_mm_node resv, *hole;
- u64 hole_start, hole_end;
- int loop, err;
+ struct drm_mm_node *hole, *next;
+ struct i915_mmap_offset *mmo;
+ int loop, err = 0;
/* Disable background reaper */
disable_retire_worker(i915);
GEM_BUG_ON(!i915->gt.awake);
+ intel_gt_retire_requests(&i915->gt);
+ i915_gem_drain_freed_objects(i915);
/* Trim the device mmap space to only a page */
- memset(&resv, 0, sizeof(resv));
- drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
- resv.start = hole_start;
- resv.size = hole_end - hole_start - 1; /* PAGE_SIZE units */
- err = drm_mm_reserve_node(mm, &resv);
+ mmap_offset_lock(i915);
+ loop = 1; /* PAGE_SIZE units */
+ list_for_each_entry_safe(hole, next, &mm->hole_stack, hole_stack) {
+ struct drm_mm_node *resv;
+
+ resv = kzalloc(sizeof(*resv), GFP_NOWAIT);
+ if (!resv) {
+ err = -ENOMEM;
+ goto out_park;
+ }
+
+ resv->start = drm_mm_hole_node_start(hole) + loop;
+ resv->size = hole->hole_size - loop;
+ resv->color = -1ul;
+ loop = 0;
+
+ if (!resv->size) {
+ kfree(resv);
+ continue;
+ }
+
+ pr_debug("Reserving hole [%llx + %llx]\n",
+ resv->start, resv->size);
+
+ err = drm_mm_reserve_node(mm, resv);
if (err) {
pr_err("Failed to trim VMA manager, err=%d\n", err);
+ kfree(resv);
goto out_park;
}
- break;
}
+ GEM_BUG_ON(!list_is_singular(&mm->hole_stack));
+ mmap_offset_unlock(i915);
/* Just fits! */
if (!assert_mmap_offset(i915, PAGE_SIZE, 0)) {
@@ -442,9 +675,10 @@ static int igt_mmap_offset_exhaustion(void *arg)
goto out;
}
- err = create_mmap_offset(obj);
- if (err) {
+ mmo = mmap_offset_attach(obj, I915_MMAP_OFFSET_GTT, NULL);
+ if (IS_ERR(mmo)) {
pr_err("Unable to insert object into reclaimed hole\n");
+ err = PTR_ERR(mmo);
goto err_obj;
}
@@ -458,7 +692,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
/* Now fill with busy dead objects that we expect to reap */
for (loop = 0; loop < 3; loop++) {
- if (i915_terminally_wedged(i915))
+ if (intel_gt_is_wedged(&i915->gt))
break;
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
@@ -467,27 +701,24 @@ static int igt_mmap_offset_exhaustion(void *arg)
goto out;
}
- mutex_lock(&i915->drm.struct_mutex);
err = make_obj_busy(obj);
- mutex_unlock(&i915->drm.struct_mutex);
if (err) {
pr_err("[loop %d] Failed to busy the object\n", loop);
goto err_obj;
}
-
- /* NB we rely on the _active_ reference to access obj now */
- GEM_BUG_ON(!i915_gem_object_is_active(obj));
- err = create_mmap_offset(obj);
- if (err) {
- pr_err("[loop %d] create_mmap_offset failed with err=%d\n",
- loop, err);
- goto out;
- }
}
out:
- drm_mm_remove_node(&resv);
+ mmap_offset_lock(i915);
out_park:
+ drm_mm_for_each_node_safe(hole, next, mm) {
+ if (hole->color != -1ul)
+ continue;
+
+ drm_mm_remove_node(hole);
+ kfree(hole);
+ }
+ mmap_offset_unlock(i915);
restore_retire_worker(i915);
return err;
err_obj:
@@ -495,11 +726,515 @@ err_obj:
goto out;
}
+static int gtt_set(struct drm_i915_gem_object *obj)
+{
+ struct i915_vma *vma;
+ void __iomem *map;
+ int err = 0;
+
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ intel_gt_pm_get(vma->vm->gt);
+ map = i915_vma_pin_iomap(vma);
+ i915_vma_unpin(vma);
+ if (IS_ERR(map)) {
+ err = PTR_ERR(map);
+ goto out;
+ }
+
+ memset_io(map, POISON_INUSE, obj->base.size);
+ i915_vma_unpin_iomap(vma);
+
+out:
+ intel_gt_pm_put(vma->vm->gt);
+ return err;
+}
+
+static int gtt_check(struct drm_i915_gem_object *obj)
+{
+ struct i915_vma *vma;
+ void __iomem *map;
+ int err = 0;
+
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ intel_gt_pm_get(vma->vm->gt);
+ map = i915_vma_pin_iomap(vma);
+ i915_vma_unpin(vma);
+ if (IS_ERR(map)) {
+ err = PTR_ERR(map);
+ goto out;
+ }
+
+ if (memchr_inv((void __force *)map, POISON_FREE, obj->base.size)) {
+ pr_err("%s: Write via mmap did not land in backing store (GTT)\n",
+ obj->mm.region->name);
+ err = -EINVAL;
+ }
+ i915_vma_unpin_iomap(vma);
+
+out:
+ intel_gt_pm_put(vma->vm->gt);
+ return err;
+}
+
+static int wc_set(struct drm_i915_gem_object *obj)
+{
+ void *vaddr;
+
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
+
+ memset(vaddr, POISON_INUSE, obj->base.size);
+ i915_gem_object_flush_map(obj);
+ i915_gem_object_unpin_map(obj);
+
+ return 0;
+}
+
+static int wc_check(struct drm_i915_gem_object *obj)
+{
+ void *vaddr;
+ int err = 0;
+
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
+
+ if (memchr_inv(vaddr, POISON_FREE, obj->base.size)) {
+ pr_err("%s: Write via mmap did not land in backing store (WC)\n",
+ obj->mm.region->name);
+ err = -EINVAL;
+ }
+ i915_gem_object_unpin_map(obj);
+
+ return err;
+}
+
+static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type)
+{
+ if (type == I915_MMAP_TYPE_GTT &&
+ !i915_ggtt_has_aperture(&to_i915(obj->base.dev)->ggtt))
+ return false;
+
+ if (type != I915_MMAP_TYPE_GTT &&
+ !i915_gem_object_type_has(obj,
+ I915_GEM_OBJECT_HAS_STRUCT_PAGE |
+ I915_GEM_OBJECT_HAS_IOMEM))
+ return false;
+
+ return true;
+}
+
+#define expand32(x) (((x) << 0) | ((x) << 8) | ((x) << 16) | ((x) << 24))
+static int __igt_mmap(struct drm_i915_private *i915,
+ struct drm_i915_gem_object *obj,
+ enum i915_mmap_type type)
+{
+ struct i915_mmap_offset *mmo;
+ struct vm_area_struct *area;
+ unsigned long addr;
+ int err, i;
+
+ if (!can_mmap(obj, type))
+ return 0;
+
+ err = wc_set(obj);
+ if (err == -ENXIO)
+ err = gtt_set(obj);
+ if (err)
+ return err;
+
+ mmo = mmap_offset_attach(obj, type, NULL);
+ if (IS_ERR(mmo))
+ return PTR_ERR(mmo);
+
+ addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED);
+ if (IS_ERR_VALUE(addr))
+ return addr;
+
+ pr_debug("igt_mmap(%s, %d) @ %lx\n", obj->mm.region->name, type, addr);
+
+ area = find_vma(current->mm, addr);
+ if (!area) {
+ pr_err("%s: Did not create a vm_area_struct for the mmap\n",
+ obj->mm.region->name);
+ err = -EINVAL;
+ goto out_unmap;
+ }
+
+ if (area->vm_private_data != mmo) {
+ pr_err("%s: vm_area_struct did not point back to our mmap_offset object!\n",
+ obj->mm.region->name);
+ err = -EINVAL;
+ goto out_unmap;
+ }
+
+ for (i = 0; i < obj->base.size / sizeof(u32); i++) {
+ u32 __user *ux = u64_to_user_ptr((u64)(addr + i * sizeof(*ux)));
+ u32 x;
+
+ if (get_user(x, ux)) {
+ pr_err("%s: Unable to read from mmap, offset:%zd\n",
+ obj->mm.region->name, i * sizeof(x));
+ err = -EFAULT;
+ goto out_unmap;
+ }
+
+ if (x != expand32(POISON_INUSE)) {
+ pr_err("%s: Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n",
+ obj->mm.region->name,
+ i * sizeof(x), x, expand32(POISON_INUSE));
+ err = -EINVAL;
+ goto out_unmap;
+ }
+
+ x = expand32(POISON_FREE);
+ if (put_user(x, ux)) {
+ pr_err("%s: Unable to write to mmap, offset:%zd\n",
+ obj->mm.region->name, i * sizeof(x));
+ err = -EFAULT;
+ goto out_unmap;
+ }
+ }
+
+ if (type == I915_MMAP_TYPE_GTT)
+ intel_gt_flush_ggtt_writes(&i915->gt);
+
+ err = wc_check(obj);
+ if (err == -ENXIO)
+ err = gtt_check(obj);
+out_unmap:
+ vm_munmap(addr, obj->base.size);
+ return err;
+}
+
+static int igt_mmap(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_memory_region *mr;
+ enum intel_region_id id;
+
+ for_each_memory_region(mr, i915, id) {
+ unsigned long sizes[] = {
+ PAGE_SIZE,
+ mr->min_page_size,
+ SZ_4M,
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sizes); i++) {
+ struct drm_i915_gem_object *obj;
+ int err;
+
+ obj = i915_gem_object_create_region(mr, sizes[i], 0);
+ if (obj == ERR_PTR(-ENODEV))
+ continue;
+
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ err = __igt_mmap(i915, obj, I915_MMAP_TYPE_GTT);
+ if (err == 0)
+ err = __igt_mmap(i915, obj, I915_MMAP_TYPE_WC);
+
+ i915_gem_object_put(obj);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int __igt_mmap_gpu(struct drm_i915_private *i915,
+ struct drm_i915_gem_object *obj,
+ enum i915_mmap_type type)
+{
+ struct intel_engine_cs *engine;
+ struct i915_mmap_offset *mmo;
+ unsigned long addr;
+ u32 __user *ux;
+ u32 bbe;
+ int err;
+
+ /*
+ * Verify that the mmap access into the backing store aligns with
+ * that of the GPU, i.e. that mmap is indeed writing into the same
+ * page as being read by the GPU.
+ */
+
+ if (!can_mmap(obj, type))
+ return 0;
+
+ err = wc_set(obj);
+ if (err == -ENXIO)
+ err = gtt_set(obj);
+ if (err)
+ return err;
+
+ mmo = mmap_offset_attach(obj, type, NULL);
+ if (IS_ERR(mmo))
+ return PTR_ERR(mmo);
+
+ addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED);
+ if (IS_ERR_VALUE(addr))
+ return addr;
+
+ ux = u64_to_user_ptr((u64)addr);
+ bbe = MI_BATCH_BUFFER_END;
+ if (put_user(bbe, ux)) {
+ pr_err("%s: Unable to write to mmap\n", obj->mm.region->name);
+ err = -EFAULT;
+ goto out_unmap;
+ }
+
+ if (type == I915_MMAP_TYPE_GTT)
+ intel_gt_flush_ggtt_writes(&i915->gt);
+
+ for_each_uabi_engine(engine, i915) {
+ struct i915_request *rq;
+ struct i915_vma *vma;
+
+ vma = i915_vma_instance(obj, engine->kernel_context->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_unmap;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto out_unmap;
+
+ rq = i915_request_create(engine->kernel_context);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_unpin;
+ }
+
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj, false);
+ if (err == 0)
+ err = i915_vma_move_to_active(vma, rq, 0);
+ i915_vma_unlock(vma);
+
+ err = engine->emit_bb_start(rq, vma->node.start, 0, 0);
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ struct drm_printer p =
+ drm_info_printer(engine->i915->drm.dev);
+
+ pr_err("%s(%s, %s): Failed to execute batch\n",
+ __func__, engine->name, obj->mm.region->name);
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+
+ intel_gt_set_wedged(engine->gt);
+ err = -EIO;
+ }
+ i915_request_put(rq);
+
+out_unpin:
+ i915_vma_unpin(vma);
+ if (err)
+ goto out_unmap;
+ }
+
+out_unmap:
+ vm_munmap(addr, obj->base.size);
+ return err;
+}
+
+static int igt_mmap_gpu(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_memory_region *mr;
+ enum intel_region_id id;
+
+ for_each_memory_region(mr, i915, id) {
+ struct drm_i915_gem_object *obj;
+ int err;
+
+ obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0);
+ if (obj == ERR_PTR(-ENODEV))
+ continue;
+
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_GTT);
+ if (err == 0)
+ err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_WC);
+
+ i915_gem_object_put(obj);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int check_present_pte(pte_t *pte, unsigned long addr, void *data)
+{
+ if (!pte_present(*pte) || pte_none(*pte)) {
+ pr_err("missing PTE:%lx\n",
+ (addr - (unsigned long)data) >> PAGE_SHIFT);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int check_absent_pte(pte_t *pte, unsigned long addr, void *data)
+{
+ if (pte_present(*pte) && !pte_none(*pte)) {
+ pr_err("present PTE:%lx; expected to be revoked\n",
+ (addr - (unsigned long)data) >> PAGE_SHIFT);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int check_present(unsigned long addr, unsigned long len)
+{
+ return apply_to_page_range(current->mm, addr, len,
+ check_present_pte, (void *)addr);
+}
+
+static int check_absent(unsigned long addr, unsigned long len)
+{
+ return apply_to_page_range(current->mm, addr, len,
+ check_absent_pte, (void *)addr);
+}
+
+static int prefault_range(u64 start, u64 len)
+{
+ const char __user *addr, *end;
+ char __maybe_unused c;
+ int err;
+
+ addr = u64_to_user_ptr(start);
+ end = addr + len;
+
+ for (; addr < end; addr += PAGE_SIZE) {
+ err = __get_user(c, addr);
+ if (err)
+ return err;
+ }
+
+ return __get_user(c, end - 1);
+}
+
+static int __igt_mmap_revoke(struct drm_i915_private *i915,
+ struct drm_i915_gem_object *obj,
+ enum i915_mmap_type type)
+{
+ struct i915_mmap_offset *mmo;
+ unsigned long addr;
+ int err;
+
+ if (!can_mmap(obj, type))
+ return 0;
+
+ mmo = mmap_offset_attach(obj, type, NULL);
+ if (IS_ERR(mmo))
+ return PTR_ERR(mmo);
+
+ addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED);
+ if (IS_ERR_VALUE(addr))
+ return addr;
+
+ err = prefault_range(addr, obj->base.size);
+ if (err)
+ goto out_unmap;
+
+ GEM_BUG_ON(mmo->mmap_type == I915_MMAP_TYPE_GTT &&
+ !atomic_read(&obj->bind_count));
+
+ err = check_present(addr, obj->base.size);
+ if (err) {
+ pr_err("%s: was not present\n", obj->mm.region->name);
+ goto out_unmap;
+ }
+
+ /*
+ * After unbinding the object from the GGTT, its address may be reused
+ * for other objects. Ergo we have to revoke the previous mmap PTE
+ * access as it no longer points to the same object.
+ */
+ err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
+ if (err) {
+ pr_err("Failed to unbind object!\n");
+ goto out_unmap;
+ }
+ GEM_BUG_ON(atomic_read(&obj->bind_count));
+
+ if (type != I915_MMAP_TYPE_GTT) {
+ __i915_gem_object_put_pages(obj);
+ if (i915_gem_object_has_pages(obj)) {
+ pr_err("Failed to put-pages object!\n");
+ err = -EINVAL;
+ goto out_unmap;
+ }
+ }
+
+ err = check_absent(addr, obj->base.size);
+ if (err) {
+ pr_err("%s: was not absent\n", obj->mm.region->name);
+ goto out_unmap;
+ }
+
+out_unmap:
+ vm_munmap(addr, obj->base.size);
+ return err;
+}
+
+static int igt_mmap_revoke(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_memory_region *mr;
+ enum intel_region_id id;
+
+ for_each_memory_region(mr, i915, id) {
+ struct drm_i915_gem_object *obj;
+ int err;
+
+ obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0);
+ if (obj == ERR_PTR(-ENODEV))
+ continue;
+
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_GTT);
+ if (err == 0)
+ err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_WC);
+
+ i915_gem_object_put(obj);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
int i915_gem_mman_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_partial_tiling),
+ SUBTEST(igt_smoke_tiling),
SUBTEST(igt_mmap_offset_exhaustion),
+ SUBTEST(igt_mmap),
+ SUBTEST(igt_mmap_revoke),
+ SUBTEST(igt_mmap_gpu),
};
return i915_subtests(tests, i915);
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
index e23d8c9e9298..62077fe46715 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
@@ -3,34 +3,254 @@
* Copyright © 2019 Intel Corporation
*/
+#include <linux/sort.h>
+
+#include "gt/intel_gt.h"
+#include "gt/intel_engine_user.h"
+
#include "i915_selftest.h"
+#include "gem/i915_gem_context.h"
#include "selftests/igt_flush_test.h"
+#include "selftests/i915_random.h"
#include "selftests/mock_drm.h"
+#include "huge_gem_object.h"
#include "mock_context.h"
-static int igt_fill_blt(void *arg)
+static int wrap_ktime_compare(const void *A, const void *B)
{
- struct intel_context *ce = arg;
- struct drm_i915_private *i915 = ce->gem_context->i915;
- struct drm_i915_gem_object *obj;
+ const ktime_t *a = A, *b = B;
+
+ return ktime_compare(*a, *b);
+}
+
+static int __perf_fill_blt(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ int inst = 0;
+
+ do {
+ struct intel_engine_cs *engine;
+ ktime_t t[5];
+ int pass;
+ int err;
+
+ engine = intel_engine_lookup_user(i915,
+ I915_ENGINE_CLASS_COPY,
+ inst++);
+ if (!engine)
+ return 0;
+
+ intel_engine_pm_get(engine);
+ for (pass = 0; pass < ARRAY_SIZE(t); pass++) {
+ struct intel_context *ce = engine->kernel_context;
+ ktime_t t0, t1;
+
+ t0 = ktime_get();
+
+ err = i915_gem_object_fill_blt(obj, ce, 0);
+ if (err)
+ break;
+
+ err = i915_gem_object_wait(obj,
+ I915_WAIT_ALL,
+ MAX_SCHEDULE_TIMEOUT);
+ if (err)
+ break;
+
+ t1 = ktime_get();
+ t[pass] = ktime_sub(t1, t0);
+ }
+ intel_engine_pm_put(engine);
+ if (err)
+ return err;
+
+ sort(t, ARRAY_SIZE(t), sizeof(*t), wrap_ktime_compare, NULL);
+ pr_info("%s: blt %zd KiB fill: %lld MiB/s\n",
+ engine->name,
+ obj->base.size >> 10,
+ div64_u64(mul_u32_u32(4 * obj->base.size,
+ 1000 * 1000 * 1000),
+ t[1] + 2 * t[2] + t[3]) >> 20);
+ } while (1);
+}
+
+static int perf_fill_blt(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ static const unsigned long sizes[] = {
+ SZ_4K,
+ SZ_64K,
+ SZ_2M,
+ SZ_64M
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sizes); i++) {
+ struct drm_i915_gem_object *obj;
+ int err;
+
+ obj = i915_gem_object_create_internal(i915, sizes[i]);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ err = __perf_fill_blt(obj);
+ i915_gem_object_put(obj);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int __perf_copy_blt(struct drm_i915_gem_object *src,
+ struct drm_i915_gem_object *dst)
+{
+ struct drm_i915_private *i915 = to_i915(src->base.dev);
+ int inst = 0;
+
+ do {
+ struct intel_engine_cs *engine;
+ ktime_t t[5];
+ int pass;
+ int err = 0;
+
+ engine = intel_engine_lookup_user(i915,
+ I915_ENGINE_CLASS_COPY,
+ inst++);
+ if (!engine)
+ return 0;
+
+ intel_engine_pm_get(engine);
+ for (pass = 0; pass < ARRAY_SIZE(t); pass++) {
+ struct intel_context *ce = engine->kernel_context;
+ ktime_t t0, t1;
+
+ t0 = ktime_get();
+
+ err = i915_gem_object_copy_blt(src, dst, ce);
+ if (err)
+ break;
+
+ err = i915_gem_object_wait(dst,
+ I915_WAIT_ALL,
+ MAX_SCHEDULE_TIMEOUT);
+ if (err)
+ break;
+
+ t1 = ktime_get();
+ t[pass] = ktime_sub(t1, t0);
+ }
+ intel_engine_pm_put(engine);
+ if (err)
+ return err;
+
+ sort(t, ARRAY_SIZE(t), sizeof(*t), wrap_ktime_compare, NULL);
+ pr_info("%s: blt %zd KiB copy: %lld MiB/s\n",
+ engine->name,
+ src->base.size >> 10,
+ div64_u64(mul_u32_u32(4 * src->base.size,
+ 1000 * 1000 * 1000),
+ t[1] + 2 * t[2] + t[3]) >> 20);
+ } while (1);
+}
+
+static int perf_copy_blt(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ static const unsigned long sizes[] = {
+ SZ_4K,
+ SZ_64K,
+ SZ_2M,
+ SZ_64M
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sizes); i++) {
+ struct drm_i915_gem_object *src, *dst;
+ int err;
+
+ src = i915_gem_object_create_internal(i915, sizes[i]);
+ if (IS_ERR(src))
+ return PTR_ERR(src);
+
+ dst = i915_gem_object_create_internal(i915, sizes[i]);
+ if (IS_ERR(dst)) {
+ err = PTR_ERR(dst);
+ goto err_src;
+ }
+
+ err = __perf_copy_blt(src, dst);
+
+ i915_gem_object_put(dst);
+err_src:
+ i915_gem_object_put(src);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+struct igt_thread_arg {
+ struct drm_i915_private *i915;
+ struct i915_gem_context *ctx;
+ struct file *file;
struct rnd_state prng;
+ unsigned int n_cpus;
+};
+
+static int igt_fill_blt_thread(void *arg)
+{
+ struct igt_thread_arg *thread = arg;
+ struct drm_i915_private *i915 = thread->i915;
+ struct rnd_state *prng = &thread->prng;
+ struct drm_i915_gem_object *obj;
+ struct i915_gem_context *ctx;
+ struct intel_context *ce;
+ unsigned int prio;
IGT_TIMEOUT(end);
- u32 *vaddr;
- int err = 0;
+ int err;
+
+ ctx = thread->ctx;
+ if (!ctx) {
+ ctx = live_context(i915, thread->file);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ prio = i915_prandom_u32_max_state(I915_PRIORITY_MAX, prng);
+ ctx->sched.priority = I915_USER_PRIORITY(prio);
+ }
- prandom_seed_state(&prng, i915_selftest.random_seed);
+ ce = i915_gem_context_get_engine(ctx, BCS0);
+ GEM_BUG_ON(IS_ERR(ce));
do {
- u32 sz = prandom_u32_state(&prng) % SZ_32M;
- u32 val = prandom_u32_state(&prng);
+ const u32 max_block_size = S16_MAX * PAGE_SIZE;
+ u32 val = prandom_u32_state(prng);
+ u64 total = ce->vm->total;
+ u32 phys_sz;
+ u32 sz;
+ u32 *vaddr;
u32 i;
+ /*
+ * If we have a tiny shared address space, like for the GGTT
+ * then we can't be too greedy.
+ */
+ if (i915_is_ggtt(ce->vm))
+ total = div64_u64(total, thread->n_cpus);
+
+ sz = min_t(u64, total >> 4, prandom_u32_state(prng));
+ phys_sz = sz % (max_block_size + 1);
+
sz = round_up(sz, PAGE_SIZE);
+ phys_sz = round_up(phys_sz, PAGE_SIZE);
- pr_debug("%s with sz=%x, val=%x\n", __func__, sz, val);
+ pr_debug("%s with phys_sz= %x, sz=%x, val=%x\n", __func__,
+ phys_sz, sz, val);
- obj = i915_gem_object_create_internal(i915, sz);
+ obj = huge_gem_object(i915, phys_sz, sz);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto err_flush;
@@ -46,14 +266,13 @@ static int igt_fill_blt(void *arg)
* Make sure the potentially async clflush does its job, if
* required.
*/
- memset32(vaddr, val ^ 0xdeadbeaf, obj->base.size / sizeof(u32));
+ memset32(vaddr, val ^ 0xdeadbeaf,
+ huge_gem_object_phys_size(obj) / sizeof(u32));
if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
obj->cache_dirty = true;
- mutex_lock(&i915->drm.struct_mutex);
err = i915_gem_object_fill_blt(obj, ce, val);
- mutex_unlock(&i915->drm.struct_mutex);
if (err)
goto err_unpin;
@@ -63,7 +282,7 @@ static int igt_fill_blt(void *arg)
if (err)
goto err_unpin;
- for (i = 0; i < obj->base.size / sizeof(u32); ++i) {
+ for (i = 0; i < huge_gem_object_phys_size(obj) / sizeof(u32); ++i) {
if (vaddr[i] != val) {
pr_err("vaddr[%u]=%x, expected=%x\n", i,
vaddr[i], val);
@@ -83,28 +302,261 @@ err_unpin:
err_put:
i915_gem_object_put(obj);
err_flush:
- mutex_lock(&i915->drm.struct_mutex);
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
- err = -EIO;
- mutex_unlock(&i915->drm.struct_mutex);
+ if (err == -ENOMEM)
+ err = 0;
+
+ intel_context_put(ce);
+ return err;
+}
+
+static int igt_copy_blt_thread(void *arg)
+{
+ struct igt_thread_arg *thread = arg;
+ struct drm_i915_private *i915 = thread->i915;
+ struct rnd_state *prng = &thread->prng;
+ struct drm_i915_gem_object *src, *dst;
+ struct i915_gem_context *ctx;
+ struct intel_context *ce;
+ unsigned int prio;
+ IGT_TIMEOUT(end);
+ int err;
+
+ ctx = thread->ctx;
+ if (!ctx) {
+ ctx = live_context(i915, thread->file);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ prio = i915_prandom_u32_max_state(I915_PRIORITY_MAX, prng);
+ ctx->sched.priority = I915_USER_PRIORITY(prio);
+ }
+
+ ce = i915_gem_context_get_engine(ctx, BCS0);
+ GEM_BUG_ON(IS_ERR(ce));
+
+ do {
+ const u32 max_block_size = S16_MAX * PAGE_SIZE;
+ u32 val = prandom_u32_state(prng);
+ u64 total = ce->vm->total;
+ u32 phys_sz;
+ u32 sz;
+ u32 *vaddr;
+ u32 i;
+
+ if (i915_is_ggtt(ce->vm))
+ total = div64_u64(total, thread->n_cpus);
+
+ sz = min_t(u64, total >> 4, prandom_u32_state(prng));
+ phys_sz = sz % (max_block_size + 1);
+
+ sz = round_up(sz, PAGE_SIZE);
+ phys_sz = round_up(phys_sz, PAGE_SIZE);
+
+ pr_debug("%s with phys_sz= %x, sz=%x, val=%x\n", __func__,
+ phys_sz, sz, val);
+
+ src = huge_gem_object(i915, phys_sz, sz);
+ if (IS_ERR(src)) {
+ err = PTR_ERR(src);
+ goto err_flush;
+ }
+
+ vaddr = i915_gem_object_pin_map(src, I915_MAP_WB);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto err_put_src;
+ }
+
+ memset32(vaddr, val,
+ huge_gem_object_phys_size(src) / sizeof(u32));
+
+ i915_gem_object_unpin_map(src);
+
+ if (!(src->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
+ src->cache_dirty = true;
+
+ dst = huge_gem_object(i915, phys_sz, sz);
+ if (IS_ERR(dst)) {
+ err = PTR_ERR(dst);
+ goto err_put_src;
+ }
+
+ vaddr = i915_gem_object_pin_map(dst, I915_MAP_WB);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto err_put_dst;
+ }
+
+ memset32(vaddr, val ^ 0xdeadbeaf,
+ huge_gem_object_phys_size(dst) / sizeof(u32));
+
+ if (!(dst->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
+ dst->cache_dirty = true;
+
+ err = i915_gem_object_copy_blt(src, dst, ce);
+ if (err)
+ goto err_unpin;
+
+ i915_gem_object_lock(dst);
+ err = i915_gem_object_set_to_cpu_domain(dst, false);
+ i915_gem_object_unlock(dst);
+ if (err)
+ goto err_unpin;
+ for (i = 0; i < huge_gem_object_phys_size(dst) / sizeof(u32); ++i) {
+ if (vaddr[i] != val) {
+ pr_err("vaddr[%u]=%x, expected=%x\n", i,
+ vaddr[i], val);
+ err = -EINVAL;
+ goto err_unpin;
+ }
+ }
+
+ i915_gem_object_unpin_map(dst);
+
+ i915_gem_object_put(src);
+ i915_gem_object_put(dst);
+ } while (!time_after(jiffies, end));
+
+ goto err_flush;
+
+err_unpin:
+ i915_gem_object_unpin_map(dst);
+err_put_dst:
+ i915_gem_object_put(dst);
+err_put_src:
+ i915_gem_object_put(src);
+err_flush:
if (err == -ENOMEM)
err = 0;
+ intel_context_put(ce);
+ return err;
+}
+
+static int igt_threaded_blt(struct drm_i915_private *i915,
+ int (*blt_fn)(void *arg),
+ unsigned int flags)
+#define SINGLE_CTX BIT(0)
+{
+ struct igt_thread_arg *thread;
+ struct task_struct **tsk;
+ unsigned int n_cpus, i;
+ I915_RND_STATE(prng);
+ int err = 0;
+
+ n_cpus = num_online_cpus() + 1;
+
+ tsk = kcalloc(n_cpus, sizeof(struct task_struct *), GFP_KERNEL);
+ if (!tsk)
+ return 0;
+
+ thread = kcalloc(n_cpus, sizeof(struct igt_thread_arg), GFP_KERNEL);
+ if (!thread)
+ goto out_tsk;
+
+ thread[0].file = mock_file(i915);
+ if (IS_ERR(thread[0].file)) {
+ err = PTR_ERR(thread[0].file);
+ goto out_thread;
+ }
+
+ if (flags & SINGLE_CTX) {
+ thread[0].ctx = live_context(i915, thread[0].file);
+ if (IS_ERR(thread[0].ctx)) {
+ err = PTR_ERR(thread[0].ctx);
+ goto out_file;
+ }
+ }
+
+ for (i = 0; i < n_cpus; ++i) {
+ thread[i].i915 = i915;
+ thread[i].file = thread[0].file;
+ thread[i].ctx = thread[0].ctx;
+ thread[i].n_cpus = n_cpus;
+ thread[i].prng =
+ I915_RND_STATE_INITIALIZER(prandom_u32_state(&prng));
+
+ tsk[i] = kthread_run(blt_fn, &thread[i], "igt/blt-%d", i);
+ if (IS_ERR(tsk[i])) {
+ err = PTR_ERR(tsk[i]);
+ break;
+ }
+
+ get_task_struct(tsk[i]);
+ }
+
+ yield(); /* start all threads before we kthread_stop() */
+
+ for (i = 0; i < n_cpus; ++i) {
+ int status;
+
+ if (IS_ERR_OR_NULL(tsk[i]))
+ continue;
+
+ status = kthread_stop(tsk[i]);
+ if (status && !err)
+ err = status;
+
+ put_task_struct(tsk[i]);
+ }
+
+out_file:
+ fput(thread[0].file);
+out_thread:
+ kfree(thread);
+out_tsk:
+ kfree(tsk);
return err;
}
+static int igt_fill_blt(void *arg)
+{
+ return igt_threaded_blt(arg, igt_fill_blt_thread, 0);
+}
+
+static int igt_fill_blt_ctx0(void *arg)
+{
+ return igt_threaded_blt(arg, igt_fill_blt_thread, SINGLE_CTX);
+}
+
+static int igt_copy_blt(void *arg)
+{
+ return igt_threaded_blt(arg, igt_copy_blt_thread, 0);
+}
+
+static int igt_copy_blt_ctx0(void *arg)
+{
+ return igt_threaded_blt(arg, igt_copy_blt_thread, SINGLE_CTX);
+}
+
int i915_gem_object_blt_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_fill_blt),
+ SUBTEST(igt_fill_blt_ctx0),
+ SUBTEST(igt_copy_blt),
+ SUBTEST(igt_copy_blt_ctx0),
};
- if (i915_terminally_wedged(i915))
+ if (intel_gt_is_wedged(&i915->gt))
return 0;
if (!HAS_ENGINE(i915, BCS0))
return 0;
- return i915_subtests(tests, i915->engine[BCS0]->kernel_context);
+ return i915_live_subtests(tests, i915);
+}
+
+int i915_gem_object_blt_perf_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(perf_fill_blt),
+ SUBTEST(perf_copy_blt),
+ };
+
+ if (intel_gt_is_wedged(&i915->gt))
+ return 0;
+
+ return i915_live_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c
index 94a15e3f6db8..34932871b3a5 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c
@@ -25,9 +25,7 @@ static int mock_phys_object(void *arg)
goto out;
}
- mutex_lock(&i915->drm.struct_mutex);
err = i915_gem_object_attach_phys(obj, PAGE_SIZE);
- mutex_unlock(&i915->drm.struct_mutex);
if (err) {
pr_err("i915_gem_object_attach_phys failed, err=%d\n", err);
goto out_obj;
diff --git a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
index b232e6d2cd92..6718da20f35d 100644
--- a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
+++ b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
@@ -9,6 +9,9 @@
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_pm.h"
#include "gt/intel_context.h"
+#include "gt/intel_gt.h"
+#include "i915_vma.h"
+#include "i915_drv.h"
#include "i915_request.h"
@@ -23,7 +26,7 @@ igt_request_alloc(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
* GGTT space, so do this first before we reserve a seqno for
* ourselves.
*/
- ce = i915_gem_context_get_engine(ctx, engine->id);
+ ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
if (IS_ERR(ce))
return ERR_CAST(ce);
@@ -32,3 +35,134 @@ igt_request_alloc(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
return rq;
}
+
+struct i915_vma *
+igt_emit_store_dw(struct i915_vma *vma,
+ u64 offset,
+ unsigned long count,
+ u32 val)
+{
+ struct drm_i915_gem_object *obj;
+ const int gen = INTEL_GEN(vma->vm->i915);
+ unsigned long n, size;
+ u32 *cmd;
+ int err;
+
+ size = (4 * count + 1) * sizeof(u32);
+ size = round_up(size, PAGE_SIZE);
+ obj = i915_gem_object_create_internal(vma->vm->i915, size);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ cmd = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(cmd)) {
+ err = PTR_ERR(cmd);
+ goto err;
+ }
+
+ GEM_BUG_ON(offset + (count - 1) * PAGE_SIZE > vma->node.size);
+ offset += vma->node.start;
+
+ for (n = 0; n < count; n++) {
+ if (gen >= 8) {
+ *cmd++ = MI_STORE_DWORD_IMM_GEN4;
+ *cmd++ = lower_32_bits(offset);
+ *cmd++ = upper_32_bits(offset);
+ *cmd++ = val;
+ } else if (gen >= 4) {
+ *cmd++ = MI_STORE_DWORD_IMM_GEN4 |
+ (gen < 6 ? MI_USE_GGTT : 0);
+ *cmd++ = 0;
+ *cmd++ = offset;
+ *cmd++ = val;
+ } else {
+ *cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
+ *cmd++ = offset;
+ *cmd++ = val;
+ }
+ offset += PAGE_SIZE;
+ }
+ *cmd = MI_BATCH_BUFFER_END;
+ i915_gem_object_unpin_map(obj);
+
+ intel_gt_chipset_flush(vma->vm->gt);
+
+ vma = i915_vma_instance(obj, vma->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto err;
+
+ return vma;
+
+err:
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+}
+
+int igt_gpu_fill_dw(struct intel_context *ce,
+ struct i915_vma *vma, u64 offset,
+ unsigned long count, u32 val)
+{
+ struct i915_request *rq;
+ struct i915_vma *batch;
+ unsigned int flags;
+ int err;
+
+ GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine));
+ GEM_BUG_ON(!i915_vma_is_pinned(vma));
+
+ batch = igt_emit_store_dw(vma, offset, count, val);
+ if (IS_ERR(batch))
+ return PTR_ERR(batch);
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_batch;
+ }
+
+ flags = 0;
+ if (INTEL_GEN(ce->vm->i915) <= 5)
+ flags |= I915_DISPATCH_SECURE;
+
+ err = rq->engine->emit_bb_start(rq,
+ batch->node.start, batch->node.size,
+ flags);
+ if (err)
+ goto err_request;
+
+ i915_vma_lock(batch);
+ err = i915_request_await_object(rq, batch->obj, false);
+ if (err == 0)
+ err = i915_vma_move_to_active(batch, rq, 0);
+ i915_vma_unlock(batch);
+ if (err)
+ goto skip_request;
+
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj, true);
+ if (err == 0)
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ i915_vma_unlock(vma);
+ if (err)
+ goto skip_request;
+
+ i915_request_add(rq);
+
+ i915_vma_unpin_and_release(&batch, 0);
+
+ return 0;
+
+skip_request:
+ i915_request_skip(rq, err);
+err_request:
+ i915_request_add(rq);
+err_batch:
+ i915_vma_unpin_and_release(&batch, 0);
+ return err;
+}
diff --git a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h
index 0f17251cf75d..4221cf84d175 100644
--- a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h
+++ b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h
@@ -7,11 +7,26 @@
#ifndef __IGT_GEM_UTILS_H__
#define __IGT_GEM_UTILS_H__
+#include <linux/types.h>
+
struct i915_request;
struct i915_gem_context;
+struct i915_vma;
+
+struct intel_context;
struct intel_engine_cs;
struct i915_request *
igt_request_alloc(struct i915_gem_context *ctx, struct intel_engine_cs *engine);
+struct i915_vma *
+igt_emit_store_dw(struct i915_vma *vma,
+ u64 offset,
+ unsigned long count,
+ u32 val);
+
+int igt_gpu_fill_dw(struct intel_context *ce,
+ struct i915_vma *vma, u64 offset,
+ unsigned long count, u32 val);
+
#endif /* __IGT_GEM_UTILS_H__ */
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.c b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
index be8974ccff24..384143aa7776 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
@@ -5,6 +5,7 @@
*/
#include "mock_context.h"
+#include "selftests/mock_drm.h"
#include "selftests/mock_gtt.h"
struct i915_gem_context *
@@ -13,7 +14,6 @@ mock_context(struct drm_i915_private *i915,
{
struct i915_gem_context *ctx;
struct i915_gem_engines *e;
- int ret;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
@@ -23,6 +23,8 @@ mock_context(struct drm_i915_private *i915,
INIT_LIST_HEAD(&ctx->link);
ctx->i915 = i915;
+ i915_gem_context_set_persistence(ctx);
+
mutex_init(&ctx->engines_mutex);
e = default_engines(ctx);
if (IS_ERR(e))
@@ -30,32 +32,26 @@ mock_context(struct drm_i915_private *i915,
RCU_INIT_POINTER(ctx->engines, e);
INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
- INIT_LIST_HEAD(&ctx->hw_id_link);
mutex_init(&ctx->mutex);
- ret = i915_gem_context_pin_hw_id(ctx);
- if (ret < 0)
- goto err_engines;
-
if (name) {
struct i915_ppgtt *ppgtt;
- ctx->name = kstrdup(name, GFP_KERNEL);
- if (!ctx->name)
- goto err_put;
+ strncpy(ctx->name, name, sizeof(ctx->name));
ppgtt = mock_ppgtt(i915, name);
if (!ppgtt)
goto err_put;
+ mutex_lock(&ctx->mutex);
__set_ppgtt(ctx, &ppgtt->vm);
+ mutex_unlock(&ctx->mutex);
+
i915_vm_put(&ppgtt->vm);
}
return ctx;
-err_engines:
- free_engines(rcu_access_pointer(ctx->engines));
err_free:
kfree(ctx);
return NULL;
@@ -73,22 +69,21 @@ void mock_context_close(struct i915_gem_context *ctx)
void mock_init_contexts(struct drm_i915_private *i915)
{
- init_contexts(i915);
+ init_contexts(&i915->gem.contexts);
}
struct i915_gem_context *
-live_context(struct drm_i915_private *i915, struct drm_file *file)
+live_context(struct drm_i915_private *i915, struct file *file)
{
struct i915_gem_context *ctx;
int err;
-
- lockdep_assert_held(&i915->drm.struct_mutex);
+ u32 id;
ctx = i915_gem_create_context(i915, 0);
if (IS_ERR(ctx))
return ctx;
- err = gem_context_register(ctx, file->driver_priv);
+ err = gem_context_register(ctx, to_drm_file(file)->driver_priv, &id);
if (err < 0)
goto err_ctx;
@@ -102,7 +97,16 @@ err_ctx:
struct i915_gem_context *
kernel_context(struct drm_i915_private *i915)
{
- return i915_gem_context_create_kernel(i915, I915_PRIORITY_NORMAL);
+ struct i915_gem_context *ctx;
+
+ ctx = i915_gem_create_context(i915, 0);
+ if (IS_ERR(ctx))
+ return ctx;
+
+ i915_gem_context_clear_bannable(ctx);
+ i915_gem_context_set_persistence(ctx);
+
+ return ctx;
}
void kernel_context_close(struct i915_gem_context *ctx)
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.h b/drivers/gpu/drm/i915/gem/selftests/mock_context.h
index 0b926653914f..fb83d2f09212 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_context.h
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.h
@@ -7,6 +7,9 @@
#ifndef __MOCK_CONTEXT_H
#define __MOCK_CONTEXT_H
+struct file;
+struct drm_i915_private;
+
void mock_init_contexts(struct drm_i915_private *i915);
struct i915_gem_context *
@@ -16,7 +19,7 @@ mock_context(struct drm_i915_private *i915,
void mock_context_close(struct i915_gem_context *ctx);
struct i915_gem_context *
-live_context(struct drm_i915_private *i915, struct drm_file *file);
+live_context(struct drm_i915_private *i915, struct file *file);
struct i915_gem_context *kernel_context(struct drm_i915_private *i915);
void kernel_context_close(struct i915_gem_context *ctx);
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
index b9e059d4328a..9272bef57092 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
@@ -76,20 +76,6 @@ static void mock_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
vm_unmap_ram(vaddr, mock->npages);
}
-static void *mock_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
-{
- struct mock_dmabuf *mock = to_mock(dma_buf);
-
- return kmap(mock->pages[page_num]);
-}
-
-static void mock_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
-{
- struct mock_dmabuf *mock = to_mock(dma_buf);
-
- return kunmap(mock->pages[page_num]);
-}
-
static int mock_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
{
return -ENODEV;
@@ -99,8 +85,6 @@ static const struct dma_buf_ops mock_dmabuf_ops = {
.map_dma_buf = mock_map_dma_buf,
.unmap_dma_buf = mock_unmap_dma_buf,
.release = mock_dmabuf_release,
- .map = mock_dmabuf_kmap,
- .unmap = mock_dmabuf_kunmap,
.mmap = mock_dmabuf_mmap,
.vmap = mock_dmabuf_vmap,
.vunmap = mock_dmabuf_vunmap,
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.h b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.h
index f0f8bbd82dfc..22818bbb139d 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.h
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.h
@@ -14,7 +14,7 @@ struct mock_dmabuf {
struct page *pages[];
};
-static struct mock_dmabuf *to_mock(struct dma_buf *buf)
+static inline struct mock_dmabuf *to_mock(struct dma_buf *buf)
{
return buf->priv;
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_gem_object.h b/drivers/gpu/drm/i915/gem/selftests/mock_gem_object.h
index 370360b4a148..688511afa883 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_gem_object.h
@@ -7,6 +7,8 @@
#ifndef __MOCK_GEM_OBJECT_H__
#define __MOCK_GEM_OBJECT_H__
+#include "gem/i915_gem_object_types.h"
+
struct mock_object {
struct drm_i915_gem_object base;
};
diff --git a/drivers/gpu/drm/i915/gt/Makefile b/drivers/gpu/drm/i915/gt/Makefile
deleted file mode 100644
index 1c75b5c9790c..000000000000
--- a/drivers/gpu/drm/i915/gt/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-# Extra header tests
-include $(src)/Makefile.header-test
diff --git a/drivers/gpu/drm/i915/gt/Makefile.header-test b/drivers/gpu/drm/i915/gt/Makefile.header-test
deleted file mode 100644
index 61e06cbb4b32..000000000000
--- a/drivers/gpu/drm/i915/gt/Makefile.header-test
+++ /dev/null
@@ -1,16 +0,0 @@
-# SPDX-License-Identifier: MIT
-# Copyright © 2019 Intel Corporation
-
-# Test the headers are compilable as standalone units
-header_test := $(notdir $(wildcard $(src)/*.h))
-
-quiet_cmd_header_test = HDRTEST $@
- cmd_header_test = echo "\#include \"$(<F)\"" > $@
-
-header_test_%.c: %.h
- $(call cmd,header_test)
-
-extra-$(CONFIG_DRM_I915_WERROR) += \
- $(foreach h,$(header_test),$(patsubst %.h,header_test_%.o,$(h)))
-
-clean-files += $(foreach h,$(header_test),$(patsubst %.h,header_test_%.c,$(h)))
diff --git a/drivers/gpu/drm/i915/gt/debugfs_engines.c b/drivers/gpu/drm/i915/gt/debugfs_engines.c
new file mode 100644
index 000000000000..6a5e9ab20b94
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/debugfs_engines.c
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: MIT
+
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <drm/drm_print.h>
+
+#include "debugfs_engines.h"
+#include "debugfs_gt.h"
+#include "i915_drv.h" /* for_each_engine! */
+#include "intel_engine.h"
+
+static int engines_show(struct seq_file *m, void *data)
+{
+ struct intel_gt *gt = m->private;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct drm_printer p;
+
+ p = drm_seq_file_printer(m);
+ for_each_engine(engine, gt, id)
+ intel_engine_dump(engine, &p, "%s\n", engine->name);
+
+ return 0;
+}
+DEFINE_GT_DEBUGFS_ATTRIBUTE(engines);
+
+void debugfs_engines_register(struct intel_gt *gt, struct dentry *root)
+{
+ static const struct debugfs_gt_file files[] = {
+ { "engines", &engines_fops },
+ };
+
+ debugfs_gt_register_files(gt, root, files, ARRAY_SIZE(files));
+}
diff --git a/drivers/gpu/drm/i915/gt/debugfs_engines.h b/drivers/gpu/drm/i915/gt/debugfs_engines.h
new file mode 100644
index 000000000000..f69257eaa1cc
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/debugfs_engines.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef DEBUGFS_ENGINES_H
+#define DEBUGFS_ENGINES_H
+
+struct intel_gt;
+struct dentry;
+
+void debugfs_engines_register(struct intel_gt *gt, struct dentry *root);
+
+#endif /* DEBUGFS_ENGINES_H */
diff --git a/drivers/gpu/drm/i915/gt/debugfs_gt.c b/drivers/gpu/drm/i915/gt/debugfs_gt.c
new file mode 100644
index 000000000000..75255aaacaed
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/debugfs_gt.c
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: MIT
+
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/debugfs.h>
+
+#include "debugfs_engines.h"
+#include "debugfs_gt.h"
+#include "debugfs_gt_pm.h"
+#include "i915_drv.h"
+
+void debugfs_gt_register(struct intel_gt *gt)
+{
+ struct dentry *root;
+
+ if (!gt->i915->drm.primary->debugfs_root)
+ return;
+
+ root = debugfs_create_dir("gt", gt->i915->drm.primary->debugfs_root);
+ if (IS_ERR(root))
+ return;
+
+ debugfs_engines_register(gt, root);
+ debugfs_gt_pm_register(gt, root);
+}
+
+void debugfs_gt_register_files(struct intel_gt *gt,
+ struct dentry *root,
+ const struct debugfs_gt_file *files,
+ unsigned long count)
+{
+ while (count--) {
+ if (!files->eval || files->eval(gt))
+ debugfs_create_file(files->name,
+ 0444, root, gt,
+ files->fops);
+
+ files++;
+ }
+}
diff --git a/drivers/gpu/drm/i915/gt/debugfs_gt.h b/drivers/gpu/drm/i915/gt/debugfs_gt.h
new file mode 100644
index 000000000000..4ea0f06cda8f
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/debugfs_gt.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef DEBUGFS_GT_H
+#define DEBUGFS_GT_H
+
+#include <linux/file.h>
+
+struct intel_gt;
+
+#define DEFINE_GT_DEBUGFS_ATTRIBUTE(__name) \
+ static int __name ## _open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, __name ## _show, inode->i_private); \
+} \
+static const struct file_operations __name ## _fops = { \
+ .owner = THIS_MODULE, \
+ .open = __name ## _open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+}
+
+void debugfs_gt_register(struct intel_gt *gt);
+
+struct debugfs_gt_file {
+ const char *name;
+ const struct file_operations *fops;
+ bool (*eval)(const struct intel_gt *gt);
+};
+
+void debugfs_gt_register_files(struct intel_gt *gt,
+ struct dentry *root,
+ const struct debugfs_gt_file *files,
+ unsigned long count);
+
+#endif /* DEBUGFS_GT_H */
diff --git a/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c b/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c
new file mode 100644
index 000000000000..059c9e5c002e
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c
@@ -0,0 +1,601 @@
+// SPDX-License-Identifier: MIT
+
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/seq_file.h>
+
+#include "debugfs_gt.h"
+#include "debugfs_gt_pm.h"
+#include "i915_drv.h"
+#include "intel_gt.h"
+#include "intel_llc.h"
+#include "intel_rc6.h"
+#include "intel_rps.h"
+#include "intel_runtime_pm.h"
+#include "intel_sideband.h"
+#include "intel_uncore.h"
+
+static int fw_domains_show(struct seq_file *m, void *data)
+{
+ struct intel_gt *gt = m->private;
+ struct intel_uncore *uncore = gt->uncore;
+ struct intel_uncore_forcewake_domain *fw_domain;
+ unsigned int tmp;
+
+ seq_printf(m, "user.bypass_count = %u\n",
+ uncore->user_forcewake_count);
+
+ for_each_fw_domain(fw_domain, uncore, tmp)
+ seq_printf(m, "%s.wake_count = %u\n",
+ intel_uncore_forcewake_domain_to_str(fw_domain->id),
+ READ_ONCE(fw_domain->wake_count));
+
+ return 0;
+}
+DEFINE_GT_DEBUGFS_ATTRIBUTE(fw_domains);
+
+static void print_rc6_res(struct seq_file *m,
+ const char *title,
+ const i915_reg_t reg)
+{
+ struct intel_gt *gt = m->private;
+ intel_wakeref_t wakeref;
+
+ with_intel_runtime_pm(gt->uncore->rpm, wakeref)
+ seq_printf(m, "%s %u (%llu us)\n", title,
+ intel_uncore_read(gt->uncore, reg),
+ intel_rc6_residency_us(&gt->rc6, reg));
+}
+
+static int vlv_drpc(struct seq_file *m)
+{
+ struct intel_gt *gt = m->private;
+ struct intel_uncore *uncore = gt->uncore;
+ u32 rcctl1, pw_status;
+
+ pw_status = intel_uncore_read(uncore, VLV_GTLC_PW_STATUS);
+ rcctl1 = intel_uncore_read(uncore, GEN6_RC_CONTROL);
+
+ seq_printf(m, "RC6 Enabled: %s\n",
+ yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
+ GEN6_RC_CTL_EI_MODE(1))));
+ seq_printf(m, "Render Power Well: %s\n",
+ (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
+ seq_printf(m, "Media Power Well: %s\n",
+ (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
+
+ print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
+ print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
+
+ return fw_domains_show(m, NULL);
+}
+
+static int gen6_drpc(struct seq_file *m)
+{
+ struct intel_gt *gt = m->private;
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore = gt->uncore;
+ u32 gt_core_status, rcctl1, rc6vids = 0;
+ u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
+
+ gt_core_status = intel_uncore_read_fw(uncore, GEN6_GT_CORE_STATUS);
+
+ rcctl1 = intel_uncore_read(uncore, GEN6_RC_CONTROL);
+ if (INTEL_GEN(i915) >= 9) {
+ gen9_powergate_enable =
+ intel_uncore_read(uncore, GEN9_PG_ENABLE);
+ gen9_powergate_status =
+ intel_uncore_read(uncore, GEN9_PWRGT_DOMAIN_STATUS);
+ }
+
+ if (INTEL_GEN(i915) <= 7)
+ sandybridge_pcode_read(i915, GEN6_PCODE_READ_RC6VIDS,
+ &rc6vids, NULL);
+
+ seq_printf(m, "RC1e Enabled: %s\n",
+ yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
+ seq_printf(m, "RC6 Enabled: %s\n",
+ yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
+ if (INTEL_GEN(i915) >= 9) {
+ seq_printf(m, "Render Well Gating Enabled: %s\n",
+ yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
+ seq_printf(m, "Media Well Gating Enabled: %s\n",
+ yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
+ }
+ seq_printf(m, "Deep RC6 Enabled: %s\n",
+ yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
+ seq_printf(m, "Deepest RC6 Enabled: %s\n",
+ yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
+ seq_puts(m, "Current RC state: ");
+ switch (gt_core_status & GEN6_RCn_MASK) {
+ case GEN6_RC0:
+ if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
+ seq_puts(m, "Core Power Down\n");
+ else
+ seq_puts(m, "on\n");
+ break;
+ case GEN6_RC3:
+ seq_puts(m, "RC3\n");
+ break;
+ case GEN6_RC6:
+ seq_puts(m, "RC6\n");
+ break;
+ case GEN6_RC7:
+ seq_puts(m, "RC7\n");
+ break;
+ default:
+ seq_puts(m, "Unknown\n");
+ break;
+ }
+
+ seq_printf(m, "Core Power Down: %s\n",
+ yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
+ if (INTEL_GEN(i915) >= 9) {
+ seq_printf(m, "Render Power Well: %s\n",
+ (gen9_powergate_status &
+ GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
+ seq_printf(m, "Media Power Well: %s\n",
+ (gen9_powergate_status &
+ GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
+ }
+
+ /* Not exactly sure what this is */
+ print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
+ GEN6_GT_GFX_RC6_LOCKED);
+ print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
+ print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
+ print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
+
+ if (INTEL_GEN(i915) <= 7) {
+ seq_printf(m, "RC6 voltage: %dmV\n",
+ GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
+ seq_printf(m, "RC6+ voltage: %dmV\n",
+ GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
+ seq_printf(m, "RC6++ voltage: %dmV\n",
+ GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
+ }
+
+ return fw_domains_show(m, NULL);
+}
+
+static int ilk_drpc(struct seq_file *m)
+{
+ struct intel_gt *gt = m->private;
+ struct intel_uncore *uncore = gt->uncore;
+ u32 rgvmodectl, rstdbyctl;
+ u16 crstandvid;
+
+ rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
+ rstdbyctl = intel_uncore_read(uncore, RSTDBYCTL);
+ crstandvid = intel_uncore_read16(uncore, CRSTANDVID);
+
+ seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
+ seq_printf(m, "Boost freq: %d\n",
+ (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
+ MEMMODE_BOOST_FREQ_SHIFT);
+ seq_printf(m, "HW control enabled: %s\n",
+ yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
+ seq_printf(m, "SW control enabled: %s\n",
+ yesno(rgvmodectl & MEMMODE_SWMODE_EN));
+ seq_printf(m, "Gated voltage change: %s\n",
+ yesno(rgvmodectl & MEMMODE_RCLK_GATE));
+ seq_printf(m, "Starting frequency: P%d\n",
+ (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
+ seq_printf(m, "Max P-state: P%d\n",
+ (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
+ seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
+ seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
+ seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
+ seq_printf(m, "Render standby enabled: %s\n",
+ yesno(!(rstdbyctl & RCX_SW_EXIT)));
+ seq_puts(m, "Current RS state: ");
+ switch (rstdbyctl & RSX_STATUS_MASK) {
+ case RSX_STATUS_ON:
+ seq_puts(m, "on\n");
+ break;
+ case RSX_STATUS_RC1:
+ seq_puts(m, "RC1\n");
+ break;
+ case RSX_STATUS_RC1E:
+ seq_puts(m, "RC1E\n");
+ break;
+ case RSX_STATUS_RS1:
+ seq_puts(m, "RS1\n");
+ break;
+ case RSX_STATUS_RS2:
+ seq_puts(m, "RS2 (RC6)\n");
+ break;
+ case RSX_STATUS_RS3:
+ seq_puts(m, "RC3 (RC6+)\n");
+ break;
+ default:
+ seq_puts(m, "unknown\n");
+ break;
+ }
+
+ return 0;
+}
+
+static int drpc_show(struct seq_file *m, void *unused)
+{
+ struct intel_gt *gt = m->private;
+ struct drm_i915_private *i915 = gt->i915;
+ intel_wakeref_t wakeref;
+ int err = -ENODEV;
+
+ with_intel_runtime_pm(gt->uncore->rpm, wakeref) {
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ err = vlv_drpc(m);
+ else if (INTEL_GEN(i915) >= 6)
+ err = gen6_drpc(m);
+ else
+ err = ilk_drpc(m);
+ }
+
+ return err;
+}
+DEFINE_GT_DEBUGFS_ATTRIBUTE(drpc);
+
+static int frequency_show(struct seq_file *m, void *unused)
+{
+ struct intel_gt *gt = m->private;
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore = gt->uncore;
+ struct intel_rps *rps = &gt->rps;
+ intel_wakeref_t wakeref;
+
+ wakeref = intel_runtime_pm_get(uncore->rpm);
+
+ if (IS_GEN(i915, 5)) {
+ u16 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
+ u16 rgvstat = intel_uncore_read16(uncore, MEMSTAT_ILK);
+
+ seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
+ seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
+ seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
+ MEMSTAT_VID_SHIFT);
+ seq_printf(m, "Current P-state: %d\n",
+ (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
+ } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+ u32 rpmodectl, freq_sts;
+
+ rpmodectl = intel_uncore_read(uncore, GEN6_RP_CONTROL);
+ seq_printf(m, "Video Turbo Mode: %s\n",
+ yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
+ seq_printf(m, "HW control enabled: %s\n",
+ yesno(rpmodectl & GEN6_RP_ENABLE));
+ seq_printf(m, "SW control enabled: %s\n",
+ yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
+ GEN6_RP_MEDIA_SW_MODE));
+
+ vlv_punit_get(i915);
+ freq_sts = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
+ vlv_punit_put(i915);
+
+ seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
+ seq_printf(m, "DDR freq: %d MHz\n", i915->mem_freq);
+
+ seq_printf(m, "actual GPU freq: %d MHz\n",
+ intel_gpu_freq(rps, (freq_sts >> 8) & 0xff));
+
+ seq_printf(m, "current GPU freq: %d MHz\n",
+ intel_gpu_freq(rps, rps->cur_freq));
+
+ seq_printf(m, "max GPU freq: %d MHz\n",
+ intel_gpu_freq(rps, rps->max_freq));
+
+ seq_printf(m, "min GPU freq: %d MHz\n",
+ intel_gpu_freq(rps, rps->min_freq));
+
+ seq_printf(m, "idle GPU freq: %d MHz\n",
+ intel_gpu_freq(rps, rps->idle_freq));
+
+ seq_printf(m, "efficient (RPe) frequency: %d MHz\n",
+ intel_gpu_freq(rps, rps->efficient_freq));
+ } else if (INTEL_GEN(i915) >= 6) {
+ u32 rp_state_limits;
+ u32 gt_perf_status;
+ u32 rp_state_cap;
+ u32 rpmodectl, rpinclimit, rpdeclimit;
+ u32 rpstat, cagf, reqf;
+ u32 rpupei, rpcurup, rpprevup;
+ u32 rpdownei, rpcurdown, rpprevdown;
+ u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
+ int max_freq;
+
+ rp_state_limits = intel_uncore_read(uncore, GEN6_RP_STATE_LIMITS);
+ if (IS_GEN9_LP(i915)) {
+ rp_state_cap = intel_uncore_read(uncore, BXT_RP_STATE_CAP);
+ gt_perf_status = intel_uncore_read(uncore, BXT_GT_PERF_STATUS);
+ } else {
+ rp_state_cap = intel_uncore_read(uncore, GEN6_RP_STATE_CAP);
+ gt_perf_status = intel_uncore_read(uncore, GEN6_GT_PERF_STATUS);
+ }
+
+ /* RPSTAT1 is in the GT power well */
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
+
+ reqf = intel_uncore_read(uncore, GEN6_RPNSWREQ);
+ if (INTEL_GEN(i915) >= 9) {
+ reqf >>= 23;
+ } else {
+ reqf &= ~GEN6_TURBO_DISABLE;
+ if (IS_HASWELL(i915) || IS_BROADWELL(i915))
+ reqf >>= 24;
+ else
+ reqf >>= 25;
+ }
+ reqf = intel_gpu_freq(rps, reqf);
+
+ rpmodectl = intel_uncore_read(uncore, GEN6_RP_CONTROL);
+ rpinclimit = intel_uncore_read(uncore, GEN6_RP_UP_THRESHOLD);
+ rpdeclimit = intel_uncore_read(uncore, GEN6_RP_DOWN_THRESHOLD);
+
+ rpstat = intel_uncore_read(uncore, GEN6_RPSTAT1);
+ rpupei = intel_uncore_read(uncore, GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
+ rpcurup = intel_uncore_read(uncore, GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
+ rpprevup = intel_uncore_read(uncore, GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
+ rpdownei = intel_uncore_read(uncore, GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
+ rpcurdown = intel_uncore_read(uncore, GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
+ rpprevdown = intel_uncore_read(uncore, GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
+ cagf = intel_rps_read_actual_frequency(rps);
+
+ intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
+
+ if (INTEL_GEN(i915) >= 11) {
+ pm_ier = intel_uncore_read(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE);
+ pm_imr = intel_uncore_read(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK);
+ /*
+ * The equivalent to the PM ISR & IIR cannot be read
+ * without affecting the current state of the system
+ */
+ pm_isr = 0;
+ pm_iir = 0;
+ } else if (INTEL_GEN(i915) >= 8) {
+ pm_ier = intel_uncore_read(uncore, GEN8_GT_IER(2));
+ pm_imr = intel_uncore_read(uncore, GEN8_GT_IMR(2));
+ pm_isr = intel_uncore_read(uncore, GEN8_GT_ISR(2));
+ pm_iir = intel_uncore_read(uncore, GEN8_GT_IIR(2));
+ } else {
+ pm_ier = intel_uncore_read(uncore, GEN6_PMIER);
+ pm_imr = intel_uncore_read(uncore, GEN6_PMIMR);
+ pm_isr = intel_uncore_read(uncore, GEN6_PMISR);
+ pm_iir = intel_uncore_read(uncore, GEN6_PMIIR);
+ }
+ pm_mask = intel_uncore_read(uncore, GEN6_PMINTRMSK);
+
+ seq_printf(m, "Video Turbo Mode: %s\n",
+ yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
+ seq_printf(m, "HW control enabled: %s\n",
+ yesno(rpmodectl & GEN6_RP_ENABLE));
+ seq_printf(m, "SW control enabled: %s\n",
+ yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
+ GEN6_RP_MEDIA_SW_MODE));
+
+ seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
+ pm_ier, pm_imr, pm_mask);
+ if (INTEL_GEN(i915) <= 10)
+ seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
+ pm_isr, pm_iir);
+ seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
+ rps->pm_intrmsk_mbz);
+ seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
+ seq_printf(m, "Render p-state ratio: %d\n",
+ (gt_perf_status & (INTEL_GEN(i915) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
+ seq_printf(m, "Render p-state VID: %d\n",
+ gt_perf_status & 0xff);
+ seq_printf(m, "Render p-state limit: %d\n",
+ rp_state_limits & 0xff);
+ seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
+ seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
+ seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
+ seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
+ seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
+ seq_printf(m, "CAGF: %dMHz\n", cagf);
+ seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
+ rpupei, GT_PM_INTERVAL_TO_US(i915, rpupei));
+ seq_printf(m, "RP CUR UP: %d (%dus)\n",
+ rpcurup, GT_PM_INTERVAL_TO_US(i915, rpcurup));
+ seq_printf(m, "RP PREV UP: %d (%dus)\n",
+ rpprevup, GT_PM_INTERVAL_TO_US(i915, rpprevup));
+ seq_printf(m, "Up threshold: %d%%\n",
+ rps->power.up_threshold);
+
+ seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
+ rpdownei, GT_PM_INTERVAL_TO_US(i915, rpdownei));
+ seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
+ rpcurdown, GT_PM_INTERVAL_TO_US(i915, rpcurdown));
+ seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
+ rpprevdown, GT_PM_INTERVAL_TO_US(i915, rpprevdown));
+ seq_printf(m, "Down threshold: %d%%\n",
+ rps->power.down_threshold);
+
+ max_freq = (IS_GEN9_LP(i915) ? rp_state_cap >> 0 :
+ rp_state_cap >> 16) & 0xff;
+ max_freq *= (IS_GEN9_BC(i915) ||
+ INTEL_GEN(i915) >= 10 ? GEN9_FREQ_SCALER : 1);
+ seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
+ intel_gpu_freq(rps, max_freq));
+
+ max_freq = (rp_state_cap & 0xff00) >> 8;
+ max_freq *= (IS_GEN9_BC(i915) ||
+ INTEL_GEN(i915) >= 10 ? GEN9_FREQ_SCALER : 1);
+ seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
+ intel_gpu_freq(rps, max_freq));
+
+ max_freq = (IS_GEN9_LP(i915) ? rp_state_cap >> 16 :
+ rp_state_cap >> 0) & 0xff;
+ max_freq *= (IS_GEN9_BC(i915) ||
+ INTEL_GEN(i915) >= 10 ? GEN9_FREQ_SCALER : 1);
+ seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
+ intel_gpu_freq(rps, max_freq));
+ seq_printf(m, "Max overclocked frequency: %dMHz\n",
+ intel_gpu_freq(rps, rps->max_freq));
+
+ seq_printf(m, "Current freq: %d MHz\n",
+ intel_gpu_freq(rps, rps->cur_freq));
+ seq_printf(m, "Actual freq: %d MHz\n", cagf);
+ seq_printf(m, "Idle freq: %d MHz\n",
+ intel_gpu_freq(rps, rps->idle_freq));
+ seq_printf(m, "Min freq: %d MHz\n",
+ intel_gpu_freq(rps, rps->min_freq));
+ seq_printf(m, "Boost freq: %d MHz\n",
+ intel_gpu_freq(rps, rps->boost_freq));
+ seq_printf(m, "Max freq: %d MHz\n",
+ intel_gpu_freq(rps, rps->max_freq));
+ seq_printf(m,
+ "efficient (RPe) frequency: %d MHz\n",
+ intel_gpu_freq(rps, rps->efficient_freq));
+ } else {
+ seq_puts(m, "no P-state info available\n");
+ }
+
+ seq_printf(m, "Current CD clock frequency: %d kHz\n", i915->cdclk.hw.cdclk);
+ seq_printf(m, "Max CD clock frequency: %d kHz\n", i915->max_cdclk_freq);
+ seq_printf(m, "Max pixel clock frequency: %d kHz\n", i915->max_dotclk_freq);
+
+ intel_runtime_pm_put(uncore->rpm, wakeref);
+
+ return 0;
+}
+DEFINE_GT_DEBUGFS_ATTRIBUTE(frequency);
+
+static int llc_show(struct seq_file *m, void *data)
+{
+ struct intel_gt *gt = m->private;
+ struct drm_i915_private *i915 = gt->i915;
+ const bool edram = INTEL_GEN(i915) > 8;
+ struct intel_rps *rps = &gt->rps;
+ unsigned int max_gpu_freq, min_gpu_freq;
+ intel_wakeref_t wakeref;
+ int gpu_freq, ia_freq;
+
+ seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(i915)));
+ seq_printf(m, "%s: %uMB\n", edram ? "eDRAM" : "eLLC",
+ i915->edram_size_mb);
+
+ min_gpu_freq = rps->min_freq;
+ max_gpu_freq = rps->max_freq;
+ if (IS_GEN9_BC(i915) || INTEL_GEN(i915) >= 10) {
+ /* Convert GT frequency to 50 HZ units */
+ min_gpu_freq /= GEN9_FREQ_SCALER;
+ max_gpu_freq /= GEN9_FREQ_SCALER;
+ }
+
+ seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
+
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
+ for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
+ ia_freq = gpu_freq;
+ sandybridge_pcode_read(i915,
+ GEN6_PCODE_READ_MIN_FREQ_TABLE,
+ &ia_freq, NULL);
+ seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
+ intel_gpu_freq(rps,
+ (gpu_freq *
+ (IS_GEN9_BC(i915) ||
+ INTEL_GEN(i915) >= 10 ?
+ GEN9_FREQ_SCALER : 1))),
+ ((ia_freq >> 0) & 0xff) * 100,
+ ((ia_freq >> 8) & 0xff) * 100);
+ }
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
+
+ return 0;
+}
+
+static bool llc_eval(const struct intel_gt *gt)
+{
+ return HAS_LLC(gt->i915);
+}
+
+DEFINE_GT_DEBUGFS_ATTRIBUTE(llc);
+
+static const char *rps_power_to_str(unsigned int power)
+{
+ static const char * const strings[] = {
+ [LOW_POWER] = "low power",
+ [BETWEEN] = "mixed",
+ [HIGH_POWER] = "high power",
+ };
+
+ if (power >= ARRAY_SIZE(strings) || !strings[power])
+ return "unknown";
+
+ return strings[power];
+}
+
+static int rps_boost_show(struct seq_file *m, void *data)
+{
+ struct intel_gt *gt = m->private;
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_rps *rps = &gt->rps;
+
+ seq_printf(m, "RPS enabled? %d\n", rps->enabled);
+ seq_printf(m, "GPU busy? %s\n", yesno(gt->awake));
+ seq_printf(m, "Boosts outstanding? %d\n",
+ atomic_read(&rps->num_waiters));
+ seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
+ seq_printf(m, "Frequency requested %d, actual %d\n",
+ intel_gpu_freq(rps, rps->cur_freq),
+ intel_rps_read_actual_frequency(rps));
+ seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
+ intel_gpu_freq(rps, rps->min_freq),
+ intel_gpu_freq(rps, rps->min_freq_softlimit),
+ intel_gpu_freq(rps, rps->max_freq_softlimit),
+ intel_gpu_freq(rps, rps->max_freq));
+ seq_printf(m, " idle:%d, efficient:%d, boost:%d\n",
+ intel_gpu_freq(rps, rps->idle_freq),
+ intel_gpu_freq(rps, rps->efficient_freq),
+ intel_gpu_freq(rps, rps->boost_freq));
+
+ seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
+
+ if (INTEL_GEN(i915) >= 6 && rps->enabled && gt->awake) {
+ struct intel_uncore *uncore = gt->uncore;
+ u32 rpup, rpupei;
+ u32 rpdown, rpdownei;
+
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
+ rpup = intel_uncore_read_fw(uncore, GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
+ rpupei = intel_uncore_read_fw(uncore, GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
+ rpdown = intel_uncore_read_fw(uncore, GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
+ rpdownei = intel_uncore_read_fw(uncore, GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
+ intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
+
+ seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
+ rps_power_to_str(rps->power.mode));
+ seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n",
+ rpup && rpupei ? 100 * rpup / rpupei : 0,
+ rps->power.up_threshold);
+ seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n",
+ rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
+ rps->power.down_threshold);
+ } else {
+ seq_puts(m, "\nRPS Autotuning inactive\n");
+ }
+
+ return 0;
+}
+
+static bool rps_eval(const struct intel_gt *gt)
+{
+ return HAS_RPS(gt->i915);
+}
+
+DEFINE_GT_DEBUGFS_ATTRIBUTE(rps_boost);
+
+void debugfs_gt_pm_register(struct intel_gt *gt, struct dentry *root)
+{
+ static const struct debugfs_gt_file files[] = {
+ { "drpc", &drpc_fops, NULL },
+ { "frequency", &frequency_fops, NULL },
+ { "forcewake", &fw_domains_fops, NULL },
+ { "llc", &llc_fops, llc_eval },
+ { "rps_boost", &rps_boost_fops, rps_eval },
+ };
+
+ debugfs_gt_register_files(gt, root, files, ARRAY_SIZE(files));
+}
diff --git a/drivers/gpu/drm/i915/gt/debugfs_gt_pm.h b/drivers/gpu/drm/i915/gt/debugfs_gt_pm.h
new file mode 100644
index 000000000000..4cf5f5c9da7d
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/debugfs_gt_pm.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef DEBUGFS_GT_PM_H
+#define DEBUGFS_GT_PM_H
+
+struct intel_gt;
+struct dentry;
+
+void debugfs_gt_pm_register(struct intel_gt *gt, struct dentry *root);
+
+#endif /* DEBUGFS_GT_PM_H */
diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
new file mode 100644
index 000000000000..f4fec7eb4064
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
@@ -0,0 +1,483 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/log2.h>
+
+#include "gen6_ppgtt.h"
+#include "i915_scatterlist.h"
+#include "i915_trace.h"
+#include "i915_vgpu.h"
+#include "intel_gt.h"
+
+/* Write pde (index) from the page directory @pd to the page table @pt */
+static inline void gen6_write_pde(const struct gen6_ppgtt *ppgtt,
+ const unsigned int pde,
+ const struct i915_page_table *pt)
+{
+ /* Caller needs to make sure the write completes if necessary */
+ iowrite32(GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | GEN6_PDE_VALID,
+ ppgtt->pd_addr + pde);
+}
+
+void gen7_ppgtt_enable(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore = gt->uncore;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ u32 ecochk;
+
+ intel_uncore_rmw(uncore, GAC_ECO_BITS, 0, ECOBITS_PPGTT_CACHE64B);
+
+ ecochk = intel_uncore_read(uncore, GAM_ECOCHK);
+ if (IS_HASWELL(i915)) {
+ ecochk |= ECOCHK_PPGTT_WB_HSW;
+ } else {
+ ecochk |= ECOCHK_PPGTT_LLC_IVB;
+ ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
+ }
+ intel_uncore_write(uncore, GAM_ECOCHK, ecochk);
+
+ for_each_engine(engine, gt, id) {
+ /* GFX_MODE is per-ring on gen7+ */
+ ENGINE_WRITE(engine,
+ RING_MODE_GEN7,
+ _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+ }
+}
+
+void gen6_ppgtt_enable(struct intel_gt *gt)
+{
+ struct intel_uncore *uncore = gt->uncore;
+
+ intel_uncore_rmw(uncore,
+ GAC_ECO_BITS,
+ 0,
+ ECOBITS_SNB_BIT | ECOBITS_PPGTT_CACHE64B);
+
+ intel_uncore_rmw(uncore,
+ GAB_CTL,
+ 0,
+ GAB_CTL_CONT_AFTER_PAGEFAULT);
+
+ intel_uncore_rmw(uncore,
+ GAM_ECOCHK,
+ 0,
+ ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
+
+ if (HAS_PPGTT(uncore->i915)) /* may be disabled for VT-d */
+ intel_uncore_write(uncore,
+ GFX_MODE,
+ _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+}
+
+/* PPGTT support for Sandybdrige/Gen6 and later */
+static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
+ u64 start, u64 length)
+{
+ struct gen6_ppgtt * const ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
+ const unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
+ const gen6_pte_t scratch_pte = vm->scratch[0].encode;
+ unsigned int pde = first_entry / GEN6_PTES;
+ unsigned int pte = first_entry % GEN6_PTES;
+ unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
+
+ while (num_entries) {
+ struct i915_page_table * const pt =
+ i915_pt_entry(ppgtt->base.pd, pde++);
+ const unsigned int count = min(num_entries, GEN6_PTES - pte);
+ gen6_pte_t *vaddr;
+
+ GEM_BUG_ON(px_base(pt) == px_base(&vm->scratch[1]));
+
+ num_entries -= count;
+
+ GEM_BUG_ON(count > atomic_read(&pt->used));
+ if (!atomic_sub_return(count, &pt->used))
+ ppgtt->scan_for_unused_pt = true;
+
+ /*
+ * Note that the hw doesn't support removing PDE on the fly
+ * (they are cached inside the context with no means to
+ * invalidate the cache), so we can only reset the PTE
+ * entries back to scratch.
+ */
+
+ vaddr = kmap_atomic_px(pt);
+ memset32(vaddr + pte, scratch_pte, count);
+ kunmap_atomic(vaddr);
+
+ pte = 0;
+ }
+}
+
+static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
+ struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags)
+{
+ struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+ struct i915_page_directory * const pd = ppgtt->pd;
+ unsigned int first_entry = vma->node.start / I915_GTT_PAGE_SIZE;
+ unsigned int act_pt = first_entry / GEN6_PTES;
+ unsigned int act_pte = first_entry % GEN6_PTES;
+ const u32 pte_encode = vm->pte_encode(0, cache_level, flags);
+ struct sgt_dma iter = sgt_dma(vma);
+ gen6_pte_t *vaddr;
+
+ GEM_BUG_ON(pd->entry[act_pt] == &vm->scratch[1]);
+
+ vaddr = kmap_atomic_px(i915_pt_entry(pd, act_pt));
+ do {
+ GEM_BUG_ON(iter.sg->length < I915_GTT_PAGE_SIZE);
+ vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
+
+ iter.dma += I915_GTT_PAGE_SIZE;
+ if (iter.dma == iter.max) {
+ iter.sg = __sg_next(iter.sg);
+ if (!iter.sg)
+ break;
+
+ iter.dma = sg_dma_address(iter.sg);
+ iter.max = iter.dma + iter.sg->length;
+ }
+
+ if (++act_pte == GEN6_PTES) {
+ kunmap_atomic(vaddr);
+ vaddr = kmap_atomic_px(i915_pt_entry(pd, ++act_pt));
+ act_pte = 0;
+ }
+ } while (1);
+ kunmap_atomic(vaddr);
+
+ vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
+}
+
+static void gen6_flush_pd(struct gen6_ppgtt *ppgtt, u64 start, u64 end)
+{
+ struct i915_page_directory * const pd = ppgtt->base.pd;
+ struct i915_page_table *pt;
+ unsigned int pde;
+
+ start = round_down(start, SZ_64K);
+ end = round_up(end, SZ_64K) - start;
+
+ mutex_lock(&ppgtt->flush);
+
+ gen6_for_each_pde(pt, pd, start, end, pde)
+ gen6_write_pde(ppgtt, pde, pt);
+
+ mb();
+ ioread32(ppgtt->pd_addr + pde - 1);
+ gen6_ggtt_invalidate(ppgtt->base.vm.gt->ggtt);
+ mb();
+
+ mutex_unlock(&ppgtt->flush);
+}
+
+static int gen6_alloc_va_range(struct i915_address_space *vm,
+ u64 start, u64 length)
+{
+ struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
+ struct i915_page_directory * const pd = ppgtt->base.pd;
+ struct i915_page_table *pt, *alloc = NULL;
+ intel_wakeref_t wakeref;
+ u64 from = start;
+ unsigned int pde;
+ int ret = 0;
+
+ wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
+
+ spin_lock(&pd->lock);
+ gen6_for_each_pde(pt, pd, start, length, pde) {
+ const unsigned int count = gen6_pte_count(start, length);
+
+ if (px_base(pt) == px_base(&vm->scratch[1])) {
+ spin_unlock(&pd->lock);
+
+ pt = fetch_and_zero(&alloc);
+ if (!pt)
+ pt = alloc_pt(vm);
+ if (IS_ERR(pt)) {
+ ret = PTR_ERR(pt);
+ goto unwind_out;
+ }
+
+ fill32_px(pt, vm->scratch[0].encode);
+
+ spin_lock(&pd->lock);
+ if (pd->entry[pde] == &vm->scratch[1]) {
+ pd->entry[pde] = pt;
+ } else {
+ alloc = pt;
+ pt = pd->entry[pde];
+ }
+ }
+
+ atomic_add(count, &pt->used);
+ }
+ spin_unlock(&pd->lock);
+
+ if (i915_vma_is_bound(ppgtt->vma, I915_VMA_GLOBAL_BIND))
+ gen6_flush_pd(ppgtt, from, start);
+
+ goto out;
+
+unwind_out:
+ gen6_ppgtt_clear_range(vm, from, start - from);
+out:
+ if (alloc)
+ free_px(vm, alloc);
+ intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
+ return ret;
+}
+
+static int gen6_ppgtt_init_scratch(struct gen6_ppgtt *ppgtt)
+{
+ struct i915_address_space * const vm = &ppgtt->base.vm;
+ struct i915_page_directory * const pd = ppgtt->base.pd;
+ int ret;
+
+ ret = setup_scratch_page(vm, __GFP_HIGHMEM);
+ if (ret)
+ return ret;
+
+ vm->scratch[0].encode =
+ vm->pte_encode(px_dma(&vm->scratch[0]),
+ I915_CACHE_NONE, PTE_READ_ONLY);
+
+ if (unlikely(setup_page_dma(vm, px_base(&vm->scratch[1])))) {
+ cleanup_scratch_page(vm);
+ return -ENOMEM;
+ }
+
+ fill32_px(&vm->scratch[1], vm->scratch[0].encode);
+ memset_p(pd->entry, &vm->scratch[1], I915_PDES);
+
+ return 0;
+}
+
+static void gen6_ppgtt_free_pd(struct gen6_ppgtt *ppgtt)
+{
+ struct i915_page_directory * const pd = ppgtt->base.pd;
+ struct i915_page_dma * const scratch =
+ px_base(&ppgtt->base.vm.scratch[1]);
+ struct i915_page_table *pt;
+ u32 pde;
+
+ gen6_for_all_pdes(pt, pd, pde)
+ if (px_base(pt) != scratch)
+ free_px(&ppgtt->base.vm, pt);
+}
+
+static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
+{
+ struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
+
+ __i915_vma_put(ppgtt->vma);
+
+ gen6_ppgtt_free_pd(ppgtt);
+ free_scratch(vm);
+
+ mutex_destroy(&ppgtt->flush);
+ mutex_destroy(&ppgtt->pin_mutex);
+ kfree(ppgtt->base.pd);
+}
+
+static int pd_vma_set_pages(struct i915_vma *vma)
+{
+ vma->pages = ERR_PTR(-ENODEV);
+ return 0;
+}
+
+static void pd_vma_clear_pages(struct i915_vma *vma)
+{
+ GEM_BUG_ON(!vma->pages);
+
+ vma->pages = NULL;
+}
+
+static int pd_vma_bind(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 unused)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm);
+ struct gen6_ppgtt *ppgtt = vma->private;
+ u32 ggtt_offset = i915_ggtt_offset(vma) / I915_GTT_PAGE_SIZE;
+
+ px_base(ppgtt->base.pd)->ggtt_offset = ggtt_offset * sizeof(gen6_pte_t);
+ ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm + ggtt_offset;
+
+ gen6_flush_pd(ppgtt, 0, ppgtt->base.vm.total);
+ return 0;
+}
+
+static void pd_vma_unbind(struct i915_vma *vma)
+{
+ struct gen6_ppgtt *ppgtt = vma->private;
+ struct i915_page_directory * const pd = ppgtt->base.pd;
+ struct i915_page_dma * const scratch =
+ px_base(&ppgtt->base.vm.scratch[1]);
+ struct i915_page_table *pt;
+ unsigned int pde;
+
+ if (!ppgtt->scan_for_unused_pt)
+ return;
+
+ /* Free all no longer used page tables */
+ gen6_for_all_pdes(pt, ppgtt->base.pd, pde) {
+ if (px_base(pt) == scratch || atomic_read(&pt->used))
+ continue;
+
+ free_px(&ppgtt->base.vm, pt);
+ pd->entry[pde] = scratch;
+ }
+
+ ppgtt->scan_for_unused_pt = false;
+}
+
+static const struct i915_vma_ops pd_vma_ops = {
+ .set_pages = pd_vma_set_pages,
+ .clear_pages = pd_vma_clear_pages,
+ .bind_vma = pd_vma_bind,
+ .unbind_vma = pd_vma_unbind,
+};
+
+static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size)
+{
+ struct i915_ggtt *ggtt = ppgtt->base.vm.gt->ggtt;
+ struct i915_vma *vma;
+
+ GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
+ GEM_BUG_ON(size > ggtt->vm.total);
+
+ vma = i915_vma_alloc();
+ if (!vma)
+ return ERR_PTR(-ENOMEM);
+
+ i915_active_init(&vma->active, NULL, NULL);
+
+ kref_init(&vma->ref);
+ mutex_init(&vma->pages_mutex);
+ vma->vm = i915_vm_get(&ggtt->vm);
+ vma->ops = &pd_vma_ops;
+ vma->private = ppgtt;
+
+ vma->size = size;
+ vma->fence_size = size;
+ atomic_set(&vma->flags, I915_VMA_GGTT);
+ vma->ggtt_view.type = I915_GGTT_VIEW_ROTATED; /* prevent fencing */
+
+ INIT_LIST_HEAD(&vma->obj_link);
+ INIT_LIST_HEAD(&vma->closed_link);
+
+ return vma;
+}
+
+int gen6_ppgtt_pin(struct i915_ppgtt *base)
+{
+ struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
+ int err;
+
+ GEM_BUG_ON(!atomic_read(&ppgtt->base.vm.open));
+
+ /*
+ * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt
+ * which will be pinned into every active context.
+ * (When vma->pin_count becomes atomic, I expect we will naturally
+ * need a larger, unpacked, type and kill this redundancy.)
+ */
+ if (atomic_add_unless(&ppgtt->pin_count, 1, 0))
+ return 0;
+
+ if (mutex_lock_interruptible(&ppgtt->pin_mutex))
+ return -EINTR;
+
+ /*
+ * PPGTT PDEs reside in the GGTT and consists of 512 entries. The
+ * allocator works in address space sizes, so it's multiplied by page
+ * size. We allocate at the top of the GTT to avoid fragmentation.
+ */
+ err = 0;
+ if (!atomic_read(&ppgtt->pin_count))
+ err = i915_ggtt_pin(ppgtt->vma, GEN6_PD_ALIGN, PIN_HIGH);
+ if (!err)
+ atomic_inc(&ppgtt->pin_count);
+ mutex_unlock(&ppgtt->pin_mutex);
+
+ return err;
+}
+
+void gen6_ppgtt_unpin(struct i915_ppgtt *base)
+{
+ struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
+
+ GEM_BUG_ON(!atomic_read(&ppgtt->pin_count));
+ if (atomic_dec_and_test(&ppgtt->pin_count))
+ i915_vma_unpin(ppgtt->vma);
+}
+
+void gen6_ppgtt_unpin_all(struct i915_ppgtt *base)
+{
+ struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
+
+ if (!atomic_read(&ppgtt->pin_count))
+ return;
+
+ i915_vma_unpin(ppgtt->vma);
+ atomic_set(&ppgtt->pin_count, 0);
+}
+
+struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt)
+{
+ struct i915_ggtt * const ggtt = gt->ggtt;
+ struct gen6_ppgtt *ppgtt;
+ int err;
+
+ ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
+ if (!ppgtt)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&ppgtt->flush);
+ mutex_init(&ppgtt->pin_mutex);
+
+ ppgtt_init(&ppgtt->base, gt);
+ ppgtt->base.vm.top = 1;
+
+ ppgtt->base.vm.bind_async_flags = I915_VMA_LOCAL_BIND;
+ ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range;
+ ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range;
+ ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries;
+ ppgtt->base.vm.cleanup = gen6_ppgtt_cleanup;
+
+ ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode;
+
+ ppgtt->base.pd = __alloc_pd(sizeof(*ppgtt->base.pd));
+ if (!ppgtt->base.pd) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+
+ err = gen6_ppgtt_init_scratch(ppgtt);
+ if (err)
+ goto err_pd;
+
+ ppgtt->vma = pd_vma_create(ppgtt, GEN6_PD_SIZE);
+ if (IS_ERR(ppgtt->vma)) {
+ err = PTR_ERR(ppgtt->vma);
+ goto err_scratch;
+ }
+
+ return &ppgtt->base;
+
+err_scratch:
+ free_scratch(&ppgtt->base.vm);
+err_pd:
+ kfree(ppgtt->base.pd);
+err_free:
+ mutex_destroy(&ppgtt->pin_mutex);
+ kfree(ppgtt);
+ return ERR_PTR(err);
+}
diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.h b/drivers/gpu/drm/i915/gt/gen6_ppgtt.h
new file mode 100644
index 000000000000..72e481806c96
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __GEN6_PPGTT_H__
+#define __GEN6_PPGTT_H__
+
+#include "intel_gtt.h"
+
+struct gen6_ppgtt {
+ struct i915_ppgtt base;
+
+ struct mutex flush;
+ struct i915_vma *vma;
+ gen6_pte_t __iomem *pd_addr;
+
+ atomic_t pin_count;
+ struct mutex pin_mutex;
+
+ bool scan_for_unused_pt;
+};
+
+static inline u32 gen6_pte_index(u32 addr)
+{
+ return i915_pte_index(addr, GEN6_PDE_SHIFT);
+}
+
+static inline u32 gen6_pte_count(u32 addr, u32 length)
+{
+ return i915_pte_count(addr, length, GEN6_PDE_SHIFT);
+}
+
+static inline u32 gen6_pde_index(u32 addr)
+{
+ return i915_pde_index(addr, GEN6_PDE_SHIFT);
+}
+
+#define __to_gen6_ppgtt(base) container_of(base, struct gen6_ppgtt, base)
+
+static inline struct gen6_ppgtt *to_gen6_ppgtt(struct i915_ppgtt *base)
+{
+ BUILD_BUG_ON(offsetof(struct gen6_ppgtt, base));
+ return __to_gen6_ppgtt(base);
+}
+
+/*
+ * gen6_for_each_pde() iterates over every pde from start until start+length.
+ * If start and start+length are not perfectly divisible, the macro will round
+ * down and up as needed. Start=0 and length=2G effectively iterates over
+ * every PDE in the system. The macro modifies ALL its parameters except 'pd',
+ * so each of the other parameters should preferably be a simple variable, or
+ * at most an lvalue with no side-effects!
+ */
+#define gen6_for_each_pde(pt, pd, start, length, iter) \
+ for (iter = gen6_pde_index(start); \
+ length > 0 && iter < I915_PDES && \
+ (pt = i915_pt_entry(pd, iter), true); \
+ ({ u32 temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT); \
+ temp = min(temp - start, length); \
+ start += temp, length -= temp; }), ++iter)
+
+#define gen6_for_all_pdes(pt, pd, iter) \
+ for (iter = 0; \
+ iter < I915_PDES && \
+ (pt = i915_pt_entry(pd, iter), true); \
+ ++iter)
+
+int gen6_ppgtt_pin(struct i915_ppgtt *base);
+void gen6_ppgtt_unpin(struct i915_ppgtt *base);
+void gen6_ppgtt_unpin_all(struct i915_ppgtt *base);
+void gen6_ppgtt_enable(struct intel_gt *gt);
+void gen7_ppgtt_enable(struct intel_gt *gt);
+struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt);
+
+#endif
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen6.c b/drivers/gpu/drm/i915/gt/gen6_renderstate.c
index 11c8e7b3dd7c..11c8e7b3dd7c 100644
--- a/drivers/gpu/drm/i915/intel_renderstate_gen6.c
+++ b/drivers/gpu/drm/i915/gt/gen6_renderstate.c
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen7.c b/drivers/gpu/drm/i915/gt/gen7_renderstate.c
index 655180646152..655180646152 100644
--- a/drivers/gpu/drm/i915/intel_renderstate_gen7.c
+++ b/drivers/gpu/drm/i915/gt/gen7_renderstate.c
diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
new file mode 100644
index 000000000000..4d1de2d97d5c
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
@@ -0,0 +1,724 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/log2.h>
+
+#include "gen8_ppgtt.h"
+#include "i915_scatterlist.h"
+#include "i915_trace.h"
+#include "i915_vgpu.h"
+#include "intel_gt.h"
+#include "intel_gtt.h"
+
+static u64 gen8_pde_encode(const dma_addr_t addr,
+ const enum i915_cache_level level)
+{
+ u64 pde = addr | _PAGE_PRESENT | _PAGE_RW;
+
+ if (level != I915_CACHE_NONE)
+ pde |= PPAT_CACHED_PDE;
+ else
+ pde |= PPAT_UNCACHED;
+
+ return pde;
+}
+
+static void gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
+{
+ struct drm_i915_private *i915 = ppgtt->vm.i915;
+ struct intel_uncore *uncore = ppgtt->vm.gt->uncore;
+ enum vgt_g2v_type msg;
+ int i;
+
+ if (create)
+ atomic_inc(px_used(ppgtt->pd)); /* never remove */
+ else
+ atomic_dec(px_used(ppgtt->pd));
+
+ mutex_lock(&i915->vgpu.lock);
+
+ if (i915_vm_is_4lvl(&ppgtt->vm)) {
+ const u64 daddr = px_dma(ppgtt->pd);
+
+ intel_uncore_write(uncore,
+ vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
+ intel_uncore_write(uncore,
+ vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
+
+ msg = create ?
+ VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
+ VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY;
+ } else {
+ for (i = 0; i < GEN8_3LVL_PDPES; i++) {
+ const u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
+
+ intel_uncore_write(uncore,
+ vgtif_reg(pdp[i].lo),
+ lower_32_bits(daddr));
+ intel_uncore_write(uncore,
+ vgtif_reg(pdp[i].hi),
+ upper_32_bits(daddr));
+ }
+
+ msg = create ?
+ VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
+ VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY;
+ }
+
+ /* g2v_notify atomically (via hv trap) consumes the message packet. */
+ intel_uncore_write(uncore, vgtif_reg(g2v_notify), msg);
+
+ mutex_unlock(&i915->vgpu.lock);
+}
+
+/* Index shifts into the pagetable are offset by GEN8_PTE_SHIFT [12] */
+#define GEN8_PAGE_SIZE (SZ_4K) /* page and page-directory sizes are the same */
+#define GEN8_PTE_SHIFT (ilog2(GEN8_PAGE_SIZE))
+#define GEN8_PDES (GEN8_PAGE_SIZE / sizeof(u64))
+#define gen8_pd_shift(lvl) ((lvl) * ilog2(GEN8_PDES))
+#define gen8_pd_index(i, lvl) i915_pde_index((i), gen8_pd_shift(lvl))
+#define __gen8_pte_shift(lvl) (GEN8_PTE_SHIFT + gen8_pd_shift(lvl))
+#define __gen8_pte_index(a, lvl) i915_pde_index((a), __gen8_pte_shift(lvl))
+
+#define as_pd(x) container_of((x), typeof(struct i915_page_directory), pt)
+
+static inline unsigned int
+gen8_pd_range(u64 start, u64 end, int lvl, unsigned int *idx)
+{
+ const int shift = gen8_pd_shift(lvl);
+ const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
+
+ GEM_BUG_ON(start >= end);
+ end += ~mask >> gen8_pd_shift(1);
+
+ *idx = i915_pde_index(start, shift);
+ if ((start ^ end) & mask)
+ return GEN8_PDES - *idx;
+ else
+ return i915_pde_index(end, shift) - *idx;
+}
+
+static inline bool gen8_pd_contains(u64 start, u64 end, int lvl)
+{
+ const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
+
+ GEM_BUG_ON(start >= end);
+ return (start ^ end) & mask && (start & ~mask) == 0;
+}
+
+static inline unsigned int gen8_pt_count(u64 start, u64 end)
+{
+ GEM_BUG_ON(start >= end);
+ if ((start ^ end) >> gen8_pd_shift(1))
+ return GEN8_PDES - (start & (GEN8_PDES - 1));
+ else
+ return end - start;
+}
+
+static inline unsigned int
+gen8_pd_top_count(const struct i915_address_space *vm)
+{
+ unsigned int shift = __gen8_pte_shift(vm->top);
+ return (vm->total + (1ull << shift) - 1) >> shift;
+}
+
+static inline struct i915_page_directory *
+gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx)
+{
+ struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
+
+ if (vm->top == 2)
+ return ppgtt->pd;
+ else
+ return i915_pd_entry(ppgtt->pd, gen8_pd_index(idx, vm->top));
+}
+
+static inline struct i915_page_directory *
+gen8_pdp_for_page_address(struct i915_address_space * const vm, const u64 addr)
+{
+ return gen8_pdp_for_page_index(vm, addr >> GEN8_PTE_SHIFT);
+}
+
+static void __gen8_ppgtt_cleanup(struct i915_address_space *vm,
+ struct i915_page_directory *pd,
+ int count, int lvl)
+{
+ if (lvl) {
+ void **pde = pd->entry;
+
+ do {
+ if (!*pde)
+ continue;
+
+ __gen8_ppgtt_cleanup(vm, *pde, GEN8_PDES, lvl - 1);
+ } while (pde++, --count);
+ }
+
+ free_px(vm, pd);
+}
+
+static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
+{
+ struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+
+ if (intel_vgpu_active(vm->i915))
+ gen8_ppgtt_notify_vgt(ppgtt, false);
+
+ __gen8_ppgtt_cleanup(vm, ppgtt->pd, gen8_pd_top_count(vm), vm->top);
+ free_scratch(vm);
+}
+
+static u64 __gen8_ppgtt_clear(struct i915_address_space * const vm,
+ struct i915_page_directory * const pd,
+ u64 start, const u64 end, int lvl)
+{
+ const struct i915_page_scratch * const scratch = &vm->scratch[lvl];
+ unsigned int idx, len;
+
+ GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
+
+ len = gen8_pd_range(start, end, lvl--, &idx);
+ DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
+ __func__, vm, lvl + 1, start, end,
+ idx, len, atomic_read(px_used(pd)));
+ GEM_BUG_ON(!len || len >= atomic_read(px_used(pd)));
+
+ do {
+ struct i915_page_table *pt = pd->entry[idx];
+
+ if (atomic_fetch_inc(&pt->used) >> gen8_pd_shift(1) &&
+ gen8_pd_contains(start, end, lvl)) {
+ DBG("%s(%p):{ lvl:%d, idx:%d, start:%llx, end:%llx } removing pd\n",
+ __func__, vm, lvl + 1, idx, start, end);
+ clear_pd_entry(pd, idx, scratch);
+ __gen8_ppgtt_cleanup(vm, as_pd(pt), I915_PDES, lvl);
+ start += (u64)I915_PDES << gen8_pd_shift(lvl);
+ continue;
+ }
+
+ if (lvl) {
+ start = __gen8_ppgtt_clear(vm, as_pd(pt),
+ start, end, lvl);
+ } else {
+ unsigned int count;
+ u64 *vaddr;
+
+ count = gen8_pt_count(start, end);
+ DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } removing pte\n",
+ __func__, vm, lvl, start, end,
+ gen8_pd_index(start, 0), count,
+ atomic_read(&pt->used));
+ GEM_BUG_ON(!count || count >= atomic_read(&pt->used));
+
+ vaddr = kmap_atomic_px(pt);
+ memset64(vaddr + gen8_pd_index(start, 0),
+ vm->scratch[0].encode,
+ count);
+ kunmap_atomic(vaddr);
+
+ atomic_sub(count, &pt->used);
+ start += count;
+ }
+
+ if (release_pd_entry(pd, idx, pt, scratch))
+ free_px(vm, pt);
+ } while (idx++, --len);
+
+ return start;
+}
+
+static void gen8_ppgtt_clear(struct i915_address_space *vm,
+ u64 start, u64 length)
+{
+ GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
+ GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
+ GEM_BUG_ON(range_overflows(start, length, vm->total));
+
+ start >>= GEN8_PTE_SHIFT;
+ length >>= GEN8_PTE_SHIFT;
+ GEM_BUG_ON(length == 0);
+
+ __gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd,
+ start, start + length, vm->top);
+}
+
+static int __gen8_ppgtt_alloc(struct i915_address_space * const vm,
+ struct i915_page_directory * const pd,
+ u64 * const start, const u64 end, int lvl)
+{
+ const struct i915_page_scratch * const scratch = &vm->scratch[lvl];
+ struct i915_page_table *alloc = NULL;
+ unsigned int idx, len;
+ int ret = 0;
+
+ GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
+
+ len = gen8_pd_range(*start, end, lvl--, &idx);
+ DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
+ __func__, vm, lvl + 1, *start, end,
+ idx, len, atomic_read(px_used(pd)));
+ GEM_BUG_ON(!len || (idx + len - 1) >> gen8_pd_shift(1));
+
+ spin_lock(&pd->lock);
+ GEM_BUG_ON(!atomic_read(px_used(pd))); /* Must be pinned! */
+ do {
+ struct i915_page_table *pt = pd->entry[idx];
+
+ if (!pt) {
+ spin_unlock(&pd->lock);
+
+ DBG("%s(%p):{ lvl:%d, idx:%d } allocating new tree\n",
+ __func__, vm, lvl + 1, idx);
+
+ pt = fetch_and_zero(&alloc);
+ if (lvl) {
+ if (!pt) {
+ pt = &alloc_pd(vm)->pt;
+ if (IS_ERR(pt)) {
+ ret = PTR_ERR(pt);
+ goto out;
+ }
+ }
+
+ fill_px(pt, vm->scratch[lvl].encode);
+ } else {
+ if (!pt) {
+ pt = alloc_pt(vm);
+ if (IS_ERR(pt)) {
+ ret = PTR_ERR(pt);
+ goto out;
+ }
+ }
+
+ if (intel_vgpu_active(vm->i915) ||
+ gen8_pt_count(*start, end) < I915_PDES)
+ fill_px(pt, vm->scratch[lvl].encode);
+ }
+
+ spin_lock(&pd->lock);
+ if (likely(!pd->entry[idx]))
+ set_pd_entry(pd, idx, pt);
+ else
+ alloc = pt, pt = pd->entry[idx];
+ }
+
+ if (lvl) {
+ atomic_inc(&pt->used);
+ spin_unlock(&pd->lock);
+
+ ret = __gen8_ppgtt_alloc(vm, as_pd(pt),
+ start, end, lvl);
+ if (unlikely(ret)) {
+ if (release_pd_entry(pd, idx, pt, scratch))
+ free_px(vm, pt);
+ goto out;
+ }
+
+ spin_lock(&pd->lock);
+ atomic_dec(&pt->used);
+ GEM_BUG_ON(!atomic_read(&pt->used));
+ } else {
+ unsigned int count = gen8_pt_count(*start, end);
+
+ DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } inserting pte\n",
+ __func__, vm, lvl, *start, end,
+ gen8_pd_index(*start, 0), count,
+ atomic_read(&pt->used));
+
+ atomic_add(count, &pt->used);
+ /* All other pdes may be simultaneously removed */
+ GEM_BUG_ON(atomic_read(&pt->used) > NALLOC * I915_PDES);
+ *start += count;
+ }
+ } while (idx++, --len);
+ spin_unlock(&pd->lock);
+out:
+ if (alloc)
+ free_px(vm, alloc);
+ return ret;
+}
+
+static int gen8_ppgtt_alloc(struct i915_address_space *vm,
+ u64 start, u64 length)
+{
+ u64 from;
+ int err;
+
+ GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
+ GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
+ GEM_BUG_ON(range_overflows(start, length, vm->total));
+
+ start >>= GEN8_PTE_SHIFT;
+ length >>= GEN8_PTE_SHIFT;
+ GEM_BUG_ON(length == 0);
+ from = start;
+
+ err = __gen8_ppgtt_alloc(vm, i915_vm_to_ppgtt(vm)->pd,
+ &start, start + length, vm->top);
+ if (unlikely(err && from != start))
+ __gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd,
+ from, start, vm->top);
+
+ return err;
+}
+
+static __always_inline u64
+gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
+ struct i915_page_directory *pdp,
+ struct sgt_dma *iter,
+ u64 idx,
+ enum i915_cache_level cache_level,
+ u32 flags)
+{
+ struct i915_page_directory *pd;
+ const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
+ gen8_pte_t *vaddr;
+
+ pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2));
+ vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
+ do {
+ GEM_BUG_ON(iter->sg->length < I915_GTT_PAGE_SIZE);
+ vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma;
+
+ iter->dma += I915_GTT_PAGE_SIZE;
+ if (iter->dma >= iter->max) {
+ iter->sg = __sg_next(iter->sg);
+ if (!iter->sg) {
+ idx = 0;
+ break;
+ }
+
+ iter->dma = sg_dma_address(iter->sg);
+ iter->max = iter->dma + iter->sg->length;
+ }
+
+ if (gen8_pd_index(++idx, 0) == 0) {
+ if (gen8_pd_index(idx, 1) == 0) {
+ /* Limited by sg length for 3lvl */
+ if (gen8_pd_index(idx, 2) == 0)
+ break;
+
+ pd = pdp->entry[gen8_pd_index(idx, 2)];
+ }
+
+ kunmap_atomic(vaddr);
+ vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
+ }
+ } while (1);
+ kunmap_atomic(vaddr);
+
+ return idx;
+}
+
+static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
+ struct sgt_dma *iter,
+ enum i915_cache_level cache_level,
+ u32 flags)
+{
+ const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
+ u64 start = vma->node.start;
+ dma_addr_t rem = iter->sg->length;
+
+ GEM_BUG_ON(!i915_vm_is_4lvl(vma->vm));
+
+ do {
+ struct i915_page_directory * const pdp =
+ gen8_pdp_for_page_address(vma->vm, start);
+ struct i915_page_directory * const pd =
+ i915_pd_entry(pdp, __gen8_pte_index(start, 2));
+ gen8_pte_t encode = pte_encode;
+ unsigned int maybe_64K = -1;
+ unsigned int page_size;
+ gen8_pte_t *vaddr;
+ u16 index;
+
+ if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_2M &&
+ IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) &&
+ rem >= I915_GTT_PAGE_SIZE_2M &&
+ !__gen8_pte_index(start, 0)) {
+ index = __gen8_pte_index(start, 1);
+ encode |= GEN8_PDE_PS_2M;
+ page_size = I915_GTT_PAGE_SIZE_2M;
+
+ vaddr = kmap_atomic_px(pd);
+ } else {
+ struct i915_page_table *pt =
+ i915_pt_entry(pd, __gen8_pte_index(start, 1));
+
+ index = __gen8_pte_index(start, 0);
+ page_size = I915_GTT_PAGE_SIZE;
+
+ if (!index &&
+ vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K &&
+ IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
+ (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
+ rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE))
+ maybe_64K = __gen8_pte_index(start, 1);
+
+ vaddr = kmap_atomic_px(pt);
+ }
+
+ do {
+ GEM_BUG_ON(iter->sg->length < page_size);
+ vaddr[index++] = encode | iter->dma;
+
+ start += page_size;
+ iter->dma += page_size;
+ rem -= page_size;
+ if (iter->dma >= iter->max) {
+ iter->sg = __sg_next(iter->sg);
+ if (!iter->sg)
+ break;
+
+ rem = iter->sg->length;
+ iter->dma = sg_dma_address(iter->sg);
+ iter->max = iter->dma + rem;
+
+ if (maybe_64K != -1 && index < I915_PDES &&
+ !(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
+ (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
+ rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE)))
+ maybe_64K = -1;
+
+ if (unlikely(!IS_ALIGNED(iter->dma, page_size)))
+ break;
+ }
+ } while (rem >= page_size && index < I915_PDES);
+
+ kunmap_atomic(vaddr);
+
+ /*
+ * Is it safe to mark the 2M block as 64K? -- Either we have
+ * filled whole page-table with 64K entries, or filled part of
+ * it and have reached the end of the sg table and we have
+ * enough padding.
+ */
+ if (maybe_64K != -1 &&
+ (index == I915_PDES ||
+ (i915_vm_has_scratch_64K(vma->vm) &&
+ !iter->sg && IS_ALIGNED(vma->node.start +
+ vma->node.size,
+ I915_GTT_PAGE_SIZE_2M)))) {
+ vaddr = kmap_atomic_px(pd);
+ vaddr[maybe_64K] |= GEN8_PDE_IPS_64K;
+ kunmap_atomic(vaddr);
+ page_size = I915_GTT_PAGE_SIZE_64K;
+
+ /*
+ * We write all 4K page entries, even when using 64K
+ * pages. In order to verify that the HW isn't cheating
+ * by using the 4K PTE instead of the 64K PTE, we want
+ * to remove all the surplus entries. If the HW skipped
+ * the 64K PTE, it will read/write into the scratch page
+ * instead - which we detect as missing results during
+ * selftests.
+ */
+ if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) {
+ u16 i;
+
+ encode = vma->vm->scratch[0].encode;
+ vaddr = kmap_atomic_px(i915_pt_entry(pd, maybe_64K));
+
+ for (i = 1; i < index; i += 16)
+ memset64(vaddr + i, encode, 15);
+
+ kunmap_atomic(vaddr);
+ }
+ }
+
+ vma->page_sizes.gtt |= page_size;
+ } while (iter->sg);
+}
+
+static void gen8_ppgtt_insert(struct i915_address_space *vm,
+ struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags)
+{
+ struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
+ struct sgt_dma iter = sgt_dma(vma);
+
+ if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
+ gen8_ppgtt_insert_huge(vma, &iter, cache_level, flags);
+ } else {
+ u64 idx = vma->node.start >> GEN8_PTE_SHIFT;
+
+ do {
+ struct i915_page_directory * const pdp =
+ gen8_pdp_for_page_index(vm, idx);
+
+ idx = gen8_ppgtt_insert_pte(ppgtt, pdp, &iter, idx,
+ cache_level, flags);
+ } while (idx);
+
+ vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
+ }
+}
+
+static int gen8_init_scratch(struct i915_address_space *vm)
+{
+ int ret;
+ int i;
+
+ /*
+ * If everybody agrees to not to write into the scratch page,
+ * we can reuse it for all vm, keeping contexts and processes separate.
+ */
+ if (vm->has_read_only && vm->gt->vm && !i915_is_ggtt(vm->gt->vm)) {
+ struct i915_address_space *clone = vm->gt->vm;
+
+ GEM_BUG_ON(!clone->has_read_only);
+
+ vm->scratch_order = clone->scratch_order;
+ memcpy(vm->scratch, clone->scratch, sizeof(vm->scratch));
+ px_dma(&vm->scratch[0]) = 0; /* no xfer of ownership */
+ return 0;
+ }
+
+ ret = setup_scratch_page(vm, __GFP_HIGHMEM);
+ if (ret)
+ return ret;
+
+ vm->scratch[0].encode =
+ gen8_pte_encode(px_dma(&vm->scratch[0]),
+ I915_CACHE_LLC, vm->has_read_only);
+
+ for (i = 1; i <= vm->top; i++) {
+ if (unlikely(setup_page_dma(vm, px_base(&vm->scratch[i]))))
+ goto free_scratch;
+
+ fill_px(&vm->scratch[i], vm->scratch[i - 1].encode);
+ vm->scratch[i].encode =
+ gen8_pde_encode(px_dma(&vm->scratch[i]),
+ I915_CACHE_LLC);
+ }
+
+ return 0;
+
+free_scratch:
+ free_scratch(vm);
+ return -ENOMEM;
+}
+
+static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
+{
+ struct i915_address_space *vm = &ppgtt->vm;
+ struct i915_page_directory *pd = ppgtt->pd;
+ unsigned int idx;
+
+ GEM_BUG_ON(vm->top != 2);
+ GEM_BUG_ON(gen8_pd_top_count(vm) != GEN8_3LVL_PDPES);
+
+ for (idx = 0; idx < GEN8_3LVL_PDPES; idx++) {
+ struct i915_page_directory *pde;
+
+ pde = alloc_pd(vm);
+ if (IS_ERR(pde))
+ return PTR_ERR(pde);
+
+ fill_px(pde, vm->scratch[1].encode);
+ set_pd_entry(pd, idx, pde);
+ atomic_inc(px_used(pde)); /* keep pinned */
+ }
+ wmb();
+
+ return 0;
+}
+
+static struct i915_page_directory *
+gen8_alloc_top_pd(struct i915_address_space *vm)
+{
+ const unsigned int count = gen8_pd_top_count(vm);
+ struct i915_page_directory *pd;
+
+ GEM_BUG_ON(count > ARRAY_SIZE(pd->entry));
+
+ pd = __alloc_pd(offsetof(typeof(*pd), entry[count]));
+ if (unlikely(!pd))
+ return ERR_PTR(-ENOMEM);
+
+ if (unlikely(setup_page_dma(vm, px_base(pd)))) {
+ kfree(pd);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ fill_page_dma(px_base(pd), vm->scratch[vm->top].encode, count);
+ atomic_inc(px_used(pd)); /* mark as pinned */
+ return pd;
+}
+
+/*
+ * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
+ * with a net effect resembling a 2-level page table in normal x86 terms. Each
+ * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
+ * space.
+ *
+ */
+struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt)
+{
+ struct i915_ppgtt *ppgtt;
+ int err;
+
+ ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
+ if (!ppgtt)
+ return ERR_PTR(-ENOMEM);
+
+ ppgtt_init(ppgtt, gt);
+ ppgtt->vm.top = i915_vm_is_4lvl(&ppgtt->vm) ? 3 : 2;
+
+ /*
+ * From bdw, there is hw support for read-only pages in the PPGTT.
+ *
+ * Gen11 has HSDES#:1807136187 unresolved. Disable ro support
+ * for now.
+ *
+ * Gen12 has inherited the same read-only fault issue from gen11.
+ */
+ ppgtt->vm.has_read_only = !IS_GEN_RANGE(gt->i915, 11, 12);
+
+ /*
+ * There are only few exceptions for gen >=6. chv and bxt.
+ * And we are not sure about the latter so play safe for now.
+ */
+ if (IS_CHERRYVIEW(gt->i915) || IS_BROXTON(gt->i915))
+ ppgtt->vm.pt_kmap_wc = true;
+
+ err = gen8_init_scratch(&ppgtt->vm);
+ if (err)
+ goto err_free;
+
+ ppgtt->pd = gen8_alloc_top_pd(&ppgtt->vm);
+ if (IS_ERR(ppgtt->pd)) {
+ err = PTR_ERR(ppgtt->pd);
+ goto err_free_scratch;
+ }
+
+ if (!i915_vm_is_4lvl(&ppgtt->vm)) {
+ err = gen8_preallocate_top_level_pdp(ppgtt);
+ if (err)
+ goto err_free_pd;
+ }
+
+ ppgtt->vm.bind_async_flags = I915_VMA_LOCAL_BIND;
+ ppgtt->vm.insert_entries = gen8_ppgtt_insert;
+ ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc;
+ ppgtt->vm.clear_range = gen8_ppgtt_clear;
+
+ if (intel_vgpu_active(gt->i915))
+ gen8_ppgtt_notify_vgt(ppgtt, true);
+
+ ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
+
+ return ppgtt;
+
+err_free_pd:
+ __gen8_ppgtt_cleanup(&ppgtt->vm, ppgtt->pd,
+ gen8_pd_top_count(&ppgtt->vm), ppgtt->vm.top);
+err_free_scratch:
+ free_scratch(&ppgtt->vm);
+err_free:
+ kfree(ppgtt);
+ return ERR_PTR(err);
+}
diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.h b/drivers/gpu/drm/i915/gt/gen8_ppgtt.h
new file mode 100644
index 000000000000..76a08b9c1f5c
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __GEN8_PPGTT_H__
+#define __GEN8_PPGTT_H__
+
+struct intel_gt;
+
+struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt);
+
+#endif
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen8.c b/drivers/gpu/drm/i915/gt/gen8_renderstate.c
index 95288a34c15d..95288a34c15d 100644
--- a/drivers/gpu/drm/i915/intel_renderstate_gen8.c
+++ b/drivers/gpu/drm/i915/gt/gen8_renderstate.c
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen9.c b/drivers/gpu/drm/i915/gt/gen9_renderstate.c
index 7d3ac02f0177..7d3ac02f0177 100644
--- a/drivers/gpu/drm/i915/intel_renderstate_gen9.c
+++ b/drivers/gpu/drm/i915/gt/gen9_renderstate.c
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
index c092bdf5f0bf..0ba524a414c6 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
@@ -27,6 +27,9 @@
#include <uapi/linux/sched/types.h>
#include "i915_drv.h"
+#include "i915_trace.h"
+#include "intel_gt_pm.h"
+#include "intel_gt_requests.h"
static void irq_enable(struct intel_engine_cs *engine)
{
@@ -34,9 +37,9 @@ static void irq_enable(struct intel_engine_cs *engine)
return;
/* Caller disables interrupts */
- spin_lock(&engine->i915->irq_lock);
+ spin_lock(&engine->gt->irq_lock);
engine->irq_enable(engine);
- spin_unlock(&engine->i915->irq_lock);
+ spin_unlock(&engine->gt->irq_lock);
}
static void irq_disable(struct intel_engine_cs *engine)
@@ -45,35 +48,38 @@ static void irq_disable(struct intel_engine_cs *engine)
return;
/* Caller disables interrupts */
- spin_lock(&engine->i915->irq_lock);
+ spin_lock(&engine->gt->irq_lock);
engine->irq_disable(engine);
- spin_unlock(&engine->i915->irq_lock);
+ spin_unlock(&engine->gt->irq_lock);
}
static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b)
{
+ struct intel_engine_cs *engine =
+ container_of(b, struct intel_engine_cs, breadcrumbs);
+
lockdep_assert_held(&b->irq_lock);
GEM_BUG_ON(!b->irq_enabled);
if (!--b->irq_enabled)
- irq_disable(container_of(b,
- struct intel_engine_cs,
- breadcrumbs));
+ irq_disable(engine);
b->irq_armed = false;
+ intel_gt_pm_put_async(engine->gt);
}
void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
{
struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ unsigned long flags;
if (!b->irq_armed)
return;
- spin_lock_irq(&b->irq_lock);
+ spin_lock_irqsave(&b->irq_lock, flags);
if (b->irq_armed)
__intel_breadcrumbs_disarm_irq(b);
- spin_unlock_irq(&b->irq_lock);
+ spin_unlock_irqrestore(&b->irq_lock, flags);
}
static inline bool __request_completed(const struct i915_request *rq)
@@ -112,23 +118,30 @@ __dma_fence_signal__timestamp(struct dma_fence *fence, ktime_t timestamp)
}
static void
-__dma_fence_signal__notify(struct dma_fence *fence)
+__dma_fence_signal__notify(struct dma_fence *fence,
+ const struct list_head *list)
{
struct dma_fence_cb *cur, *tmp;
lockdep_assert_held(fence->lock);
- lockdep_assert_irqs_disabled();
- list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
+ list_for_each_entry_safe(cur, tmp, list, node) {
INIT_LIST_HEAD(&cur->node);
cur->func(fence, cur);
}
- INIT_LIST_HEAD(&fence->cb_list);
}
-void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
+static void add_retire(struct intel_breadcrumbs *b, struct intel_timeline *tl)
{
- struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ struct intel_engine_cs *engine =
+ container_of(b, struct intel_engine_cs, breadcrumbs);
+
+ intel_engine_add_retire(engine, tl);
+}
+
+static void signal_irq_work(struct irq_work *work)
+{
+ struct intel_breadcrumbs *b = container_of(work, typeof(*b), irq_work);
const ktime_t timestamp = ktime_get();
struct intel_context *ce, *cn;
struct list_head *pos, *next;
@@ -175,8 +188,10 @@ void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
if (!list_is_first(pos, &ce->signals)) {
/* Advance the list to the first incomplete request */
__list_del_many(&ce->signals, pos);
- if (&ce->signals == pos) /* now empty */
+ if (&ce->signals == pos) { /* now empty */
list_del_init(&ce->signal_link);
+ add_retire(b, ce->timeline);
+ }
}
}
@@ -185,62 +200,29 @@ void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
list_for_each_safe(pos, next, &signal) {
struct i915_request *rq =
list_entry(pos, typeof(*rq), signal_link);
-
- __dma_fence_signal__timestamp(&rq->fence, timestamp);
+ struct list_head cb_list;
spin_lock(&rq->lock);
- __dma_fence_signal__notify(&rq->fence);
+ list_replace(&rq->fence.cb_list, &cb_list);
+ __dma_fence_signal__timestamp(&rq->fence, timestamp);
+ __dma_fence_signal__notify(&rq->fence, &cb_list);
spin_unlock(&rq->lock);
i915_request_put(rq);
}
}
-void intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine)
-{
- local_irq_disable();
- intel_engine_breadcrumbs_irq(engine);
- local_irq_enable();
-}
-
-static void signal_irq_work(struct irq_work *work)
-{
- struct intel_engine_cs *engine =
- container_of(work, typeof(*engine), breadcrumbs.irq_work);
-
- intel_engine_breadcrumbs_irq(engine);
-}
-
-void intel_engine_pin_breadcrumbs_irq(struct intel_engine_cs *engine)
-{
- struct intel_breadcrumbs *b = &engine->breadcrumbs;
-
- spin_lock_irq(&b->irq_lock);
- if (!b->irq_enabled++)
- irq_enable(engine);
- GEM_BUG_ON(!b->irq_enabled); /* no overflow! */
- spin_unlock_irq(&b->irq_lock);
-}
-
-void intel_engine_unpin_breadcrumbs_irq(struct intel_engine_cs *engine)
-{
- struct intel_breadcrumbs *b = &engine->breadcrumbs;
-
- spin_lock_irq(&b->irq_lock);
- GEM_BUG_ON(!b->irq_enabled); /* no underflow! */
- if (!--b->irq_enabled)
- irq_disable(engine);
- spin_unlock_irq(&b->irq_lock);
-}
-
-static void __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
+static bool __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
{
struct intel_engine_cs *engine =
container_of(b, struct intel_engine_cs, breadcrumbs);
lockdep_assert_held(&b->irq_lock);
if (b->irq_armed)
- return;
+ return true;
+
+ if (!intel_gt_pm_get_if_awake(engine->gt))
+ return false;
/*
* The breadcrumb irq will be disarmed on the interrupt after the
@@ -260,6 +242,8 @@ static void __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
if (!b->irq_enabled++)
irq_enable(engine);
+
+ return true;
}
void intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
@@ -294,23 +278,23 @@ void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
bool i915_request_enable_breadcrumb(struct i915_request *rq)
{
lockdep_assert_held(&rq->lock);
- lockdep_assert_irqs_disabled();
if (test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) {
struct intel_breadcrumbs *b = &rq->engine->breadcrumbs;
- struct intel_context *ce = rq->hw_context;
+ struct intel_context *ce = rq->context;
struct list_head *pos;
spin_lock(&b->irq_lock);
GEM_BUG_ON(test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags));
- __intel_breadcrumbs_arm_irq(b);
+ if (!__intel_breadcrumbs_arm_irq(b))
+ goto unlock;
/*
* We keep the seqno in retirement order, so we can break
- * inside intel_engine_breadcrumbs_irq as soon as we've passed
- * the last completed request (or seen a request that hasn't
- * event started). We could iterate the timeline->requests list,
+ * inside intel_engine_signal_breadcrumbs as soon as we've
+ * passed the last completed request (or seen a request that
+ * hasn't event started). We could walk the timeline->requests,
* but keeping a separate signalers_list has the advantage of
* hopefully being much smaller than the full list and so
* provides faster iteration and detection when there are no
@@ -333,6 +317,7 @@ bool i915_request_enable_breadcrumb(struct i915_request *rq)
GEM_BUG_ON(!check_signal_order(ce, rq));
set_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
+unlock:
spin_unlock(&b->irq_lock);
}
@@ -344,7 +329,6 @@ void i915_request_cancel_breadcrumb(struct i915_request *rq)
struct intel_breadcrumbs *b = &rq->engine->breadcrumbs;
lockdep_assert_held(&rq->lock);
- lockdep_assert_irqs_disabled();
/*
* We must wait for b->irq_lock so that we know the interrupt handler
@@ -354,7 +338,7 @@ void i915_request_cancel_breadcrumb(struct i915_request *rq)
*/
spin_lock(&b->irq_lock);
if (test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) {
- struct intel_context *ce = rq->hw_context;
+ struct intel_context *ce = rq->context;
list_del(&rq->signal_link);
if (list_empty(&ce->signals))
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index 23120901c55f..57e8a051ddc2 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -13,6 +13,7 @@
#include "intel_context.h"
#include "intel_engine.h"
#include "intel_engine_pm.h"
+#include "intel_ring.h"
static struct i915_global_context {
struct i915_global base;
@@ -30,8 +31,7 @@ void intel_context_free(struct intel_context *ce)
}
struct intel_context *
-intel_context_create(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
+intel_context_create(struct intel_engine_cs *engine)
{
struct intel_context *ce;
@@ -39,164 +39,266 @@ intel_context_create(struct i915_gem_context *ctx,
if (!ce)
return ERR_PTR(-ENOMEM);
- intel_context_init(ce, ctx, engine);
+ intel_context_init(ce, engine);
return ce;
}
-int __intel_context_do_pin(struct intel_context *ce)
+int intel_context_alloc_state(struct intel_context *ce)
{
- int err;
+ int err = 0;
if (mutex_lock_interruptible(&ce->pin_mutex))
return -EINTR;
- if (likely(!atomic_read(&ce->pin_count))) {
- intel_wakeref_t wakeref;
+ if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
+ err = ce->ops->alloc(ce);
+ if (unlikely(err))
+ goto unlock;
+
+ set_bit(CONTEXT_ALLOC_BIT, &ce->flags);
+ }
+
+unlock:
+ mutex_unlock(&ce->pin_mutex);
+ return err;
+}
+
+static int intel_context_active_acquire(struct intel_context *ce)
+{
+ int err;
+
+ __i915_active_acquire(&ce->active);
+
+ if (intel_context_is_barrier(ce))
+ return 0;
+
+ /* Preallocate tracking nodes */
+ err = i915_active_acquire_preallocate_barrier(&ce->active,
+ ce->engine);
+ if (err)
+ i915_active_release(&ce->active);
- err = 0;
- with_intel_runtime_pm(&ce->engine->i915->runtime_pm, wakeref)
- err = ce->ops->pin(ce);
+ return err;
+}
+
+static void intel_context_active_release(struct intel_context *ce)
+{
+ /* Nodes preallocated in intel_context_active() */
+ i915_active_acquire_barrier(&ce->active);
+ i915_active_release(&ce->active);
+}
+
+int __intel_context_do_pin(struct intel_context *ce)
+{
+ int err;
+
+ if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) {
+ err = intel_context_alloc_state(ce);
if (err)
- goto err;
+ return err;
+ }
- i915_gem_context_get(ce->gem_context); /* for ctx->ppgtt */
+ err = i915_active_acquire(&ce->active);
+ if (err)
+ return err;
+
+ if (mutex_lock_interruptible(&ce->pin_mutex)) {
+ err = -EINTR;
+ goto out_release;
+ }
+
+ if (likely(!atomic_add_unless(&ce->pin_count, 1, 0))) {
+ err = intel_context_active_acquire(ce);
+ if (unlikely(err))
+ goto out_unlock;
+
+ err = ce->ops->pin(ce);
+ if (unlikely(err))
+ goto err_active;
+
+ CE_TRACE(ce, "pin ring:{head:%04x, tail:%04x}\n",
+ ce->ring->head, ce->ring->tail);
smp_mb__before_atomic(); /* flush pin before it is visible */
+ atomic_inc(&ce->pin_count);
}
- atomic_inc(&ce->pin_count);
GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */
+ GEM_BUG_ON(i915_active_is_idle(&ce->active));
+ goto out_unlock;
+err_active:
+ intel_context_active_release(ce);
+out_unlock:
mutex_unlock(&ce->pin_mutex);
- return 0;
-
-err:
- mutex_unlock(&ce->pin_mutex);
+out_release:
+ i915_active_release(&ce->active);
return err;
}
void intel_context_unpin(struct intel_context *ce)
{
- if (likely(atomic_add_unless(&ce->pin_count, -1, 1)))
+ if (!atomic_dec_and_test(&ce->pin_count))
return;
- /* We may be called from inside intel_context_pin() to evict another */
- intel_context_get(ce);
- mutex_lock_nested(&ce->pin_mutex, SINGLE_DEPTH_NESTING);
-
- if (likely(atomic_dec_and_test(&ce->pin_count))) {
- ce->ops->unpin(ce);
-
- i915_gem_context_put(ce->gem_context);
- intel_context_active_release(ce);
- }
+ CE_TRACE(ce, "unpin\n");
+ ce->ops->unpin(ce);
- mutex_unlock(&ce->pin_mutex);
+ /*
+ * Once released, we may asynchronously drop the active reference.
+ * As that may be the only reference keeping the context alive,
+ * take an extra now so that it is not freed before we finish
+ * dereferencing it.
+ */
+ intel_context_get(ce);
+ intel_context_active_release(ce);
intel_context_put(ce);
}
-static int __context_pin_state(struct i915_vma *vma, unsigned long flags)
+static int __context_pin_state(struct i915_vma *vma)
{
+ unsigned int bias = i915_ggtt_pin_bias(vma) | PIN_OFFSET_BIAS;
int err;
- err = i915_vma_pin(vma, 0, 0, flags | PIN_GLOBAL);
+ err = i915_ggtt_pin(vma, 0, bias | PIN_HIGH);
if (err)
return err;
+ err = i915_active_acquire(&vma->active);
+ if (err)
+ goto err_unpin;
+
/*
* And mark it as a globally pinned object to let the shrinker know
* it cannot reclaim the object until we release it.
*/
- vma->obj->pin_global++;
+ i915_vma_make_unshrinkable(vma);
vma->obj->mm.dirty = true;
return 0;
+
+err_unpin:
+ i915_vma_unpin(vma);
+ return err;
}
static void __context_unpin_state(struct i915_vma *vma)
{
- vma->obj->pin_global--;
+ i915_vma_make_shrinkable(vma);
+ i915_active_release(&vma->active);
__i915_vma_unpin(vma);
}
-static void intel_context_retire(struct i915_active *active)
+static int __ring_active(struct intel_ring *ring)
{
- struct intel_context *ce = container_of(active, typeof(*ce), active);
+ int err;
- if (ce->state)
- __context_unpin_state(ce->state);
+ err = i915_active_acquire(&ring->vma->active);
+ if (err)
+ return err;
- intel_ring_unpin(ce->ring);
- intel_context_put(ce);
+ err = intel_ring_pin(ring);
+ if (err)
+ goto err_active;
+
+ return 0;
+
+err_active:
+ i915_active_release(&ring->vma->active);
+ return err;
}
-void
-intel_context_init(struct intel_context *ce,
- struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
+static void __ring_retire(struct intel_ring *ring)
{
- GEM_BUG_ON(!engine->cops);
+ intel_ring_unpin(ring);
+ i915_active_release(&ring->vma->active);
+}
- kref_init(&ce->ref);
+__i915_active_call
+static void __intel_context_retire(struct i915_active *active)
+{
+ struct intel_context *ce = container_of(active, typeof(*ce), active);
- ce->gem_context = ctx;
- ce->engine = engine;
- ce->ops = engine->cops;
- ce->sseu = engine->sseu;
+ CE_TRACE(ce, "retire\n");
- INIT_LIST_HEAD(&ce->signal_link);
- INIT_LIST_HEAD(&ce->signals);
+ set_bit(CONTEXT_VALID_BIT, &ce->flags);
+ if (ce->state)
+ __context_unpin_state(ce->state);
- mutex_init(&ce->pin_mutex);
+ intel_timeline_unpin(ce->timeline);
+ __ring_retire(ce->ring);
- i915_active_init(ctx->i915, &ce->active, intel_context_retire);
+ intel_context_put(ce);
}
-int intel_context_active_acquire(struct intel_context *ce, unsigned long flags)
+static int __intel_context_active(struct i915_active *active)
{
+ struct intel_context *ce = container_of(active, typeof(*ce), active);
int err;
- if (!i915_active_acquire(&ce->active))
- return 0;
+ CE_TRACE(ce, "active\n");
intel_context_get(ce);
- err = intel_ring_pin(ce->ring);
+ err = __ring_active(ce->ring);
if (err)
goto err_put;
+ err = intel_timeline_pin(ce->timeline);
+ if (err)
+ goto err_ring;
+
if (!ce->state)
return 0;
- err = __context_pin_state(ce->state, flags);
+ err = __context_pin_state(ce->state);
if (err)
- goto err_ring;
-
- /* Preallocate tracking nodes */
- if (!i915_gem_context_is_kernel(ce->gem_context)) {
- err = i915_active_acquire_preallocate_barrier(&ce->active,
- ce->engine);
- if (err)
- goto err_state;
- }
+ goto err_timeline;
return 0;
-err_state:
- __context_unpin_state(ce->state);
+err_timeline:
+ intel_timeline_unpin(ce->timeline);
err_ring:
- intel_ring_unpin(ce->ring);
+ __ring_retire(ce->ring);
err_put:
intel_context_put(ce);
- i915_active_cancel(&ce->active);
return err;
}
-void intel_context_active_release(struct intel_context *ce)
+void
+intel_context_init(struct intel_context *ce,
+ struct intel_engine_cs *engine)
{
- /* Nodes preallocated in intel_context_active() */
- i915_active_acquire_barrier(&ce->active);
- i915_active_release(&ce->active);
+ GEM_BUG_ON(!engine->cops);
+ GEM_BUG_ON(!engine->gt->vm);
+
+ kref_init(&ce->ref);
+
+ ce->engine = engine;
+ ce->ops = engine->cops;
+ ce->sseu = engine->sseu;
+ ce->ring = __intel_context_ring_size(SZ_4K);
+
+ ce->vm = i915_vm_get(engine->gt->vm);
+
+ INIT_LIST_HEAD(&ce->signal_link);
+ INIT_LIST_HEAD(&ce->signals);
+
+ mutex_init(&ce->pin_mutex);
+
+ i915_active_init(&ce->active,
+ __intel_context_active, __intel_context_retire);
+}
+
+void intel_context_fini(struct intel_context *ce)
+{
+ if (ce->timeline)
+ intel_timeline_put(ce->timeline);
+ i915_vm_put(ce->vm);
+
+ mutex_destroy(&ce->pin_mutex);
+ i915_active_fini(&ce->active);
}
static void i915_global_context_shrink(void)
@@ -227,13 +329,42 @@ int __init i915_global_context_init(void)
void intel_context_enter_engine(struct intel_context *ce)
{
intel_engine_pm_get(ce->engine);
+ intel_timeline_enter(ce->timeline);
}
void intel_context_exit_engine(struct intel_context *ce)
{
+ intel_timeline_exit(ce->timeline);
intel_engine_pm_put(ce->engine);
}
+int intel_context_prepare_remote_request(struct intel_context *ce,
+ struct i915_request *rq)
+{
+ struct intel_timeline *tl = ce->timeline;
+ int err;
+
+ /* Only suitable for use in remotely modifying this context */
+ GEM_BUG_ON(rq->context == ce);
+
+ if (rcu_access_pointer(rq->timeline) != tl) { /* timeline sharing! */
+ /* Queue this switch after current activity by this context. */
+ err = i915_active_fence_set(&tl->last_request, rq);
+ if (err)
+ return err;
+ }
+
+ /*
+ * Guarantee context image and the timeline remains pinned until the
+ * modifying request is retired by setting the ce activity tracker.
+ *
+ * But we only need to take one pin on the account of it. Or in other
+ * words transfer the pinned ce object to tracked active request.
+ */
+ GEM_BUG_ON(i915_active_is_idle(&ce->active));
+ return i915_active_add_request(&ce->active, rq);
+}
+
struct i915_request *intel_context_create_request(struct intel_context *ce)
{
struct i915_request *rq;
@@ -248,3 +379,7 @@ struct i915_request *intel_context_create_request(struct intel_context *ce)
return rq;
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_context.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index a47275bc4f01..30bd248827d8 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -7,18 +7,31 @@
#ifndef __INTEL_CONTEXT_H__
#define __INTEL_CONTEXT_H__
+#include <linux/bitops.h>
#include <linux/lockdep.h>
+#include <linux/types.h>
+#include "i915_active.h"
#include "intel_context_types.h"
#include "intel_engine_types.h"
+#include "intel_ring_types.h"
+#include "intel_timeline_types.h"
+
+#define CE_TRACE(ce, fmt, ...) do { \
+ const struct intel_context *ce__ = (ce); \
+ ENGINE_TRACE(ce__->engine, "context:%llx " fmt, \
+ ce__->timeline->fence_context, \
+ ##__VA_ARGS__); \
+} while (0)
void intel_context_init(struct intel_context *ce,
- struct i915_gem_context *ctx,
struct intel_engine_cs *engine);
+void intel_context_fini(struct intel_context *ce);
struct intel_context *
-intel_context_create(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine);
+intel_context_create(struct intel_engine_cs *engine);
+
+int intel_context_alloc_state(struct intel_context *ce);
void intel_context_free(struct intel_context *ce);
@@ -65,9 +78,14 @@ static inline void intel_context_unlock_pinned(struct intel_context *ce)
int __intel_context_do_pin(struct intel_context *ce);
+static inline bool intel_context_pin_if_active(struct intel_context *ce)
+{
+ return atomic_inc_not_zero(&ce->pin_count);
+}
+
static inline int intel_context_pin(struct intel_context *ce)
{
- if (likely(atomic_inc_not_zero(&ce->pin_count)))
+ if (likely(intel_context_pin_if_active(ce)))
return 0;
return __intel_context_do_pin(ce);
@@ -86,25 +104,25 @@ void intel_context_exit_engine(struct intel_context *ce);
static inline void intel_context_enter(struct intel_context *ce)
{
+ lockdep_assert_held(&ce->timeline->mutex);
if (!ce->active_count++)
ce->ops->enter(ce);
}
static inline void intel_context_mark_active(struct intel_context *ce)
{
+ lockdep_assert_held(&ce->timeline->mutex);
++ce->active_count;
}
static inline void intel_context_exit(struct intel_context *ce)
{
+ lockdep_assert_held(&ce->timeline->mutex);
GEM_BUG_ON(!ce->active_count);
if (!--ce->active_count)
ce->ops->exit(ce);
}
-int intel_context_active_acquire(struct intel_context *ce, unsigned long flags);
-void intel_context_active_release(struct intel_context *ce);
-
static inline struct intel_context *intel_context_get(struct intel_context *ce)
{
kref_get(&ce->ref);
@@ -116,19 +134,94 @@ static inline void intel_context_put(struct intel_context *ce)
kref_put(&ce->ref, ce->ops->destroy);
}
-static inline int __must_check
+static inline struct intel_timeline *__must_check
intel_context_timeline_lock(struct intel_context *ce)
- __acquires(&ce->ring->timeline->mutex)
+ __acquires(&ce->timeline->mutex)
{
- return mutex_lock_interruptible(&ce->ring->timeline->mutex);
+ struct intel_timeline *tl = ce->timeline;
+ int err;
+
+ err = mutex_lock_interruptible(&tl->mutex);
+ if (err)
+ return ERR_PTR(err);
+
+ return tl;
}
-static inline void intel_context_timeline_unlock(struct intel_context *ce)
- __releases(&ce->ring->timeline->mutex)
+static inline void intel_context_timeline_unlock(struct intel_timeline *tl)
+ __releases(&tl->mutex)
{
- mutex_unlock(&ce->ring->timeline->mutex);
+ mutex_unlock(&tl->mutex);
}
+int intel_context_prepare_remote_request(struct intel_context *ce,
+ struct i915_request *rq);
+
struct i915_request *intel_context_create_request(struct intel_context *ce);
+static inline struct intel_ring *__intel_context_ring_size(u64 sz)
+{
+ return u64_to_ptr(struct intel_ring, sz);
+}
+
+static inline bool intel_context_is_barrier(const struct intel_context *ce)
+{
+ return test_bit(CONTEXT_BARRIER_BIT, &ce->flags);
+}
+
+static inline bool intel_context_use_semaphores(const struct intel_context *ce)
+{
+ return test_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
+}
+
+static inline void intel_context_set_use_semaphores(struct intel_context *ce)
+{
+ set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
+}
+
+static inline void intel_context_clear_use_semaphores(struct intel_context *ce)
+{
+ clear_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
+}
+
+static inline bool intel_context_is_banned(const struct intel_context *ce)
+{
+ return test_bit(CONTEXT_BANNED, &ce->flags);
+}
+
+static inline bool intel_context_set_banned(struct intel_context *ce)
+{
+ return test_and_set_bit(CONTEXT_BANNED, &ce->flags);
+}
+
+static inline bool
+intel_context_force_single_submission(const struct intel_context *ce)
+{
+ return test_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ce->flags);
+}
+
+static inline void
+intel_context_set_single_submission(struct intel_context *ce)
+{
+ __set_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ce->flags);
+}
+
+static inline bool
+intel_context_nopreempt(const struct intel_context *ce)
+{
+ return test_bit(CONTEXT_NOPREEMPT, &ce->flags);
+}
+
+static inline void
+intel_context_set_nopreempt(struct intel_context *ce)
+{
+ set_bit(CONTEXT_NOPREEMPT, &ce->flags);
+}
+
+static inline void
+intel_context_clear_nopreempt(struct intel_context *ce)
+{
+ clear_bit(CONTEXT_NOPREEMPT, &ce->flags);
+}
+
#endif /* __INTEL_CONTEXT_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index 08049ee91cee..ca1420fb8b53 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -13,15 +13,20 @@
#include <linux/types.h>
#include "i915_active_types.h"
+#include "i915_utils.h"
#include "intel_engine_types.h"
#include "intel_sseu.h"
+#define CONTEXT_REDZONE POISON_INUSE
+
struct i915_gem_context;
struct i915_vma;
struct intel_context;
struct intel_ring;
struct intel_context_ops {
+ int (*alloc)(struct intel_context *ce);
+
int (*pin)(struct intel_context *ce);
void (*unpin)(struct intel_context *ce);
@@ -35,20 +40,35 @@ struct intel_context_ops {
struct intel_context {
struct kref ref;
- struct i915_gem_context *gem_context;
struct intel_engine_cs *engine;
struct intel_engine_cs *inflight;
+#define intel_context_inflight(ce) ptr_mask_bits((ce)->inflight, 2)
+#define intel_context_inflight_count(ce) ptr_unmask_bits((ce)->inflight, 2)
+
+ struct i915_address_space *vm;
+ struct i915_gem_context __rcu *gem_context;
struct list_head signal_link;
struct list_head signals;
struct i915_vma *state;
struct intel_ring *ring;
+ struct intel_timeline *timeline;
+
+ unsigned long flags;
+#define CONTEXT_BARRIER_BIT 0
+#define CONTEXT_ALLOC_BIT 1
+#define CONTEXT_VALID_BIT 2
+#define CONTEXT_USE_SEMAPHORES 3
+#define CONTEXT_BANNED 4
+#define CONTEXT_FORCE_SINGLE_SUBMISSION 5
+#define CONTEXT_NOPREEMPT 6
u32 *lrc_reg_state;
u64 lrc_desc;
+ u32 tag; /* cookie passed to HW to track this context on submission */
- unsigned int active_count; /* notionally protected by timeline->mutex */
+ unsigned int active_count; /* protected by timeline->mutex */
atomic_t pin_count;
struct mutex pin_mutex; /* guards pinning and associated on-gpuing */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index 2f1c6871ee95..5df003061e44 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -9,17 +9,17 @@
#include <linux/random.h>
#include <linux/seqlock.h>
-#include "i915_gem_batch_pool.h"
#include "i915_pmu.h"
#include "i915_reg.h"
#include "i915_request.h"
#include "i915_selftest.h"
-#include "i915_timeline.h"
+#include "gt/intel_timeline.h"
#include "intel_engine_types.h"
#include "intel_gpu_commands.h"
#include "intel_workarounds.h"
struct drm_printer;
+struct intel_gt;
/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
* but keeps the logic simple. Indeed, the whole purpose of this macro is just
@@ -29,6 +29,13 @@ struct drm_printer;
#define CACHELINE_BYTES 64
#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(u32))
+#define ENGINE_TRACE(e, fmt, ...) do { \
+ const struct intel_engine_cs *e__ __maybe_unused = (e); \
+ GEM_TRACE("%s %s: " fmt, \
+ dev_name(e__->i915->drm.dev), e__->name, \
+ ##__VA_ARGS__); \
+} while (0)
+
/*
* The register defines to be used with the following macros need to accept a
* base param, e.g:
@@ -51,7 +58,7 @@ struct drm_printer;
#define ENGINE_READ16(...) __ENGINE_READ_OP(read16, __VA_ARGS__)
#define ENGINE_READ(...) __ENGINE_READ_OP(read, __VA_ARGS__)
#define ENGINE_READ_FW(...) __ENGINE_READ_OP(read_fw, __VA_ARGS__)
-#define ENGINE_POSTING_READ(...) __ENGINE_READ_OP(posting_read, __VA_ARGS__)
+#define ENGINE_POSTING_READ(...) __ENGINE_READ_OP(posting_read_fw, __VA_ARGS__)
#define ENGINE_POSTING_READ16(...) __ENGINE_READ_OP(posting_read16, __VA_ARGS__)
#define ENGINE_READ64(engine__, lower_reg__, upper_reg__) \
@@ -90,106 +97,36 @@ struct drm_printer;
/* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
* do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
*/
-enum intel_engine_hangcheck_action {
- ENGINE_IDLE = 0,
- ENGINE_WAIT,
- ENGINE_ACTIVE_SEQNO,
- ENGINE_ACTIVE_HEAD,
- ENGINE_ACTIVE_SUBUNITS,
- ENGINE_WAIT_KICK,
- ENGINE_DEAD,
-};
-
-static inline const char *
-hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
-{
- switch (a) {
- case ENGINE_IDLE:
- return "idle";
- case ENGINE_WAIT:
- return "wait";
- case ENGINE_ACTIVE_SEQNO:
- return "active seqno";
- case ENGINE_ACTIVE_HEAD:
- return "active head";
- case ENGINE_ACTIVE_SUBUNITS:
- return "active subunits";
- case ENGINE_WAIT_KICK:
- return "wait kick";
- case ENGINE_DEAD:
- return "dead";
- }
- return "unknown";
-}
-
-void intel_engines_set_scheduler_caps(struct drm_i915_private *i915);
-
-static inline void
-execlists_set_active(struct intel_engine_execlists *execlists,
- unsigned int bit)
+static inline unsigned int
+execlists_num_ports(const struct intel_engine_execlists * const execlists)
{
- __set_bit(bit, (unsigned long *)&execlists->active);
+ return execlists->port_mask + 1;
}
-static inline bool
-execlists_set_active_once(struct intel_engine_execlists *execlists,
- unsigned int bit)
+static inline struct i915_request *
+execlists_active(const struct intel_engine_execlists *execlists)
{
- return !__test_and_set_bit(bit, (unsigned long *)&execlists->active);
+ return *READ_ONCE(execlists->active);
}
static inline void
-execlists_clear_active(struct intel_engine_execlists *execlists,
- unsigned int bit)
+execlists_active_lock_bh(struct intel_engine_execlists *execlists)
{
- __clear_bit(bit, (unsigned long *)&execlists->active);
+ local_bh_disable(); /* prevent local softirq and lock recursion */
+ tasklet_lock(&execlists->tasklet);
}
static inline void
-execlists_clear_all_active(struct intel_engine_execlists *execlists)
+execlists_active_unlock_bh(struct intel_engine_execlists *execlists)
{
- execlists->active = 0;
+ tasklet_unlock(&execlists->tasklet);
+ local_bh_enable(); /* restore softirq, and kick ksoftirqd! */
}
-static inline bool
-execlists_is_active(const struct intel_engine_execlists *execlists,
- unsigned int bit)
-{
- return test_bit(bit, (unsigned long *)&execlists->active);
-}
-
-void execlists_user_begin(struct intel_engine_execlists *execlists,
- const struct execlist_port *port);
-void execlists_user_end(struct intel_engine_execlists *execlists);
-
-void
-execlists_cancel_port_requests(struct intel_engine_execlists * const execlists);
-
struct i915_request *
execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists);
-static inline unsigned int
-execlists_num_ports(const struct intel_engine_execlists * const execlists)
-{
- return execlists->port_mask + 1;
-}
-
-static inline struct execlist_port *
-execlists_port_complete(struct intel_engine_execlists * const execlists,
- struct execlist_port * const port)
-{
- const unsigned int m = execlists->port_mask;
-
- GEM_BUG_ON(port_index(port, execlists) != 0);
- GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_USER));
-
- memmove(port, port + 1, m * sizeof(struct execlist_port));
- memset(port + m, 0, sizeof(struct execlist_port));
-
- return port;
-}
-
static inline u32
intel_read_status_page(const struct intel_engine_cs *engine, int reg)
{
@@ -243,134 +180,19 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
#define I915_HWS_CSB_WRITE_INDEX 0x1f
#define CNL_HWS_CSB_WRITE_INDEX 0x2f
-struct intel_ring *
-intel_engine_create_ring(struct intel_engine_cs *engine,
- struct i915_timeline *timeline,
- int size);
-int intel_ring_pin(struct intel_ring *ring);
-void intel_ring_reset(struct intel_ring *ring, u32 tail);
-unsigned int intel_ring_update_space(struct intel_ring *ring);
-void intel_ring_unpin(struct intel_ring *ring);
-void intel_ring_free(struct kref *ref);
-
-static inline struct intel_ring *intel_ring_get(struct intel_ring *ring)
-{
- kref_get(&ring->ref);
- return ring;
-}
-
-static inline void intel_ring_put(struct intel_ring *ring)
-{
- kref_put(&ring->ref, intel_ring_free);
-}
-
void intel_engine_stop(struct intel_engine_cs *engine);
void intel_engine_cleanup(struct intel_engine_cs *engine);
-int __must_check intel_ring_cacheline_align(struct i915_request *rq);
-
-u32 __must_check *intel_ring_begin(struct i915_request *rq, unsigned int n);
-
-static inline void intel_ring_advance(struct i915_request *rq, u32 *cs)
-{
- /* Dummy function.
- *
- * This serves as a placeholder in the code so that the reader
- * can compare against the preceding intel_ring_begin() and
- * check that the number of dwords emitted matches the space
- * reserved for the command packet (i.e. the value passed to
- * intel_ring_begin()).
- */
- GEM_BUG_ON((rq->ring->vaddr + rq->ring->emit) != cs);
-}
-
-static inline u32 intel_ring_wrap(const struct intel_ring *ring, u32 pos)
-{
- return pos & (ring->size - 1);
-}
-
-static inline bool
-intel_ring_offset_valid(const struct intel_ring *ring,
- unsigned int pos)
-{
- if (pos & -ring->size) /* must be strictly within the ring */
- return false;
-
- if (!IS_ALIGNED(pos, 8)) /* must be qword aligned */
- return false;
-
- return true;
-}
+int intel_engines_init_mmio(struct intel_gt *gt);
+int intel_engines_init(struct intel_gt *gt);
-static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr)
-{
- /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
- u32 offset = addr - rq->ring->vaddr;
- GEM_BUG_ON(offset > rq->ring->size);
- return intel_ring_wrap(rq->ring, offset);
-}
-
-static inline void
-assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
-{
- GEM_BUG_ON(!intel_ring_offset_valid(ring, tail));
-
- /*
- * "Ring Buffer Use"
- * Gen2 BSpec "1. Programming Environment" / 1.4.4.6
- * Gen3 BSpec "1c Memory Interface Functions" / 2.3.4.5
- * Gen4+ BSpec "1c Memory Interface and Command Stream" / 5.3.4.5
- * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
- * same cacheline, the Head Pointer must not be greater than the Tail
- * Pointer."
- *
- * We use ring->head as the last known location of the actual RING_HEAD,
- * it may have advanced but in the worst case it is equally the same
- * as ring->head and so we should never program RING_TAIL to advance
- * into the same cacheline as ring->head.
- */
-#define cacheline(a) round_down(a, CACHELINE_BYTES)
- GEM_BUG_ON(cacheline(tail) == cacheline(ring->head) &&
- tail < ring->head);
-#undef cacheline
-}
-
-static inline unsigned int
-intel_ring_set_tail(struct intel_ring *ring, unsigned int tail)
-{
- /* Whilst writes to the tail are strictly order, there is no
- * serialisation between readers and the writers. The tail may be
- * read by i915_request_retire() just as it is being updated
- * by execlists, as although the breadcrumb is complete, the context
- * switch hasn't been seen.
- */
- assert_ring_tail_valid(ring, tail);
- ring->tail = tail;
- return tail;
-}
-
-static inline unsigned int
-__intel_ring_space(unsigned int head, unsigned int tail, unsigned int size)
-{
- /*
- * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
- * same cacheline, the Head Pointer must not be greater than the Tail
- * Pointer."
- */
- GEM_BUG_ON(!is_power_of_2(size));
- return (head - tail - CACHELINE_BYTES) & (size - 1);
-}
-
-int intel_engines_init_mmio(struct drm_i915_private *i915);
-int intel_engines_setup(struct drm_i915_private *i915);
-int intel_engines_init(struct drm_i915_private *i915);
-void intel_engines_cleanup(struct drm_i915_private *i915);
+void intel_engines_release(struct intel_gt *gt);
+void intel_engines_free(struct intel_gt *gt);
int intel_engine_init_common(struct intel_engine_cs *engine);
void intel_engine_cleanup_common(struct intel_engine_cs *engine);
int intel_ring_submission_setup(struct intel_engine_cs *engine);
-int intel_ring_submission_init(struct intel_engine_cs *engine);
int intel_engine_stop_cs(struct intel_engine_cs *engine);
void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine);
@@ -380,7 +202,7 @@ void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask);
u64 intel_engine_get_active_head(const struct intel_engine_cs *engine);
u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine);
-void intel_engine_get_instdone(struct intel_engine_cs *engine,
+void intel_engine_get_instdone(const struct intel_engine_cs *engine,
struct intel_instdone *instdone);
void intel_engine_init_execlists(struct intel_engine_cs *engine);
@@ -388,20 +210,14 @@ void intel_engine_init_execlists(struct intel_engine_cs *engine);
void intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
-void intel_engine_pin_breadcrumbs_irq(struct intel_engine_cs *engine);
-void intel_engine_unpin_breadcrumbs_irq(struct intel_engine_cs *engine);
-
-void intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
static inline void
-intel_engine_queue_breadcrumbs(struct intel_engine_cs *engine)
+intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine)
{
irq_work_queue(&engine->breadcrumbs.irq_work);
}
-void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine);
-
void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
@@ -456,19 +272,19 @@ gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
return cs;
}
-static inline void intel_engine_reset(struct intel_engine_cs *engine,
- bool stalled)
+static inline void __intel_engine_reset(struct intel_engine_cs *engine,
+ bool stalled)
{
- if (engine->reset.reset)
- engine->reset.reset(engine, stalled);
+ if (engine->reset.rewind)
+ engine->reset.rewind(engine, stalled);
engine->serial++; /* contexts lost */
}
+bool intel_engines_are_idle(struct intel_gt *gt);
bool intel_engine_is_idle(struct intel_engine_cs *engine);
-bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
+void intel_engine_flush_submission(struct intel_engine_cs *engine);
-void intel_engines_reset_default_submission(struct drm_i915_private *i915);
-unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915);
+void intel_engines_reset_default_submission(struct intel_gt *gt);
bool intel_engine_can_store_dword(struct intel_engine_cs *engine);
@@ -477,64 +293,6 @@ void intel_engine_dump(struct intel_engine_cs *engine,
struct drm_printer *m,
const char *header, ...);
-struct intel_engine_cs *
-intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance);
-
-static inline void intel_engine_context_in(struct intel_engine_cs *engine)
-{
- unsigned long flags;
-
- if (READ_ONCE(engine->stats.enabled) == 0)
- return;
-
- write_seqlock_irqsave(&engine->stats.lock, flags);
-
- if (engine->stats.enabled > 0) {
- if (engine->stats.active++ == 0)
- engine->stats.start = ktime_get();
- GEM_BUG_ON(engine->stats.active == 0);
- }
-
- write_sequnlock_irqrestore(&engine->stats.lock, flags);
-}
-
-static inline void intel_engine_context_out(struct intel_engine_cs *engine)
-{
- unsigned long flags;
-
- if (READ_ONCE(engine->stats.enabled) == 0)
- return;
-
- write_seqlock_irqsave(&engine->stats.lock, flags);
-
- if (engine->stats.enabled > 0) {
- ktime_t last;
-
- if (engine->stats.active && --engine->stats.active == 0) {
- /*
- * Decrement the active context count and in case GPU
- * is now idle add up to the running total.
- */
- last = ktime_sub(ktime_get(), engine->stats.start);
-
- engine->stats.total = ktime_add(engine->stats.total,
- last);
- } else if (engine->stats.active == 0) {
- /*
- * After turning on engine stats, context out might be
- * the first event in which case we account from the
- * time stats gathering was turned on.
- */
- last = ktime_sub(ktime_get(), engine->stats.enabled_at);
-
- engine->stats.total = ktime_add(engine->stats.total,
- last);
- }
- }
-
- write_sequnlock_irqrestore(&engine->stats.lock, flags);
-}
-
int intel_enable_engine_stats(struct intel_engine_cs *engine);
void intel_disable_engine_stats(struct intel_engine_cs *engine);
@@ -543,7 +301,7 @@ ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine);
struct i915_request *
intel_engine_find_active_request(struct intel_engine_cs *engine);
-u32 intel_engine_context_size(struct drm_i915_private *i915, u8 class);
+u32 intel_engine_context_size(struct intel_gt *gt, u8 class);
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
@@ -571,4 +329,22 @@ void intel_engine_init_active(struct intel_engine_cs *engine,
#define ENGINE_MOCK 1
#define ENGINE_VIRTUAL 2
+static inline bool
+intel_engine_has_preempt_reset(const struct intel_engine_cs *engine)
+{
+ if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT))
+ return false;
+
+ return intel_engine_has_preemption(engine);
+}
+
+static inline bool
+intel_engine_has_timeslices(const struct intel_engine_cs *engine)
+{
+ if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+ return false;
+
+ return intel_engine_has_semaphores(engine);
+}
+
#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index f25632c9b292..06ff7695fa29 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -28,11 +28,16 @@
#include "i915_drv.h"
+#include "intel_context.h"
#include "intel_engine.h"
#include "intel_engine_pm.h"
-#include "intel_context.h"
+#include "intel_engine_pool.h"
+#include "intel_engine_user.h"
+#include "intel_gt.h"
+#include "intel_gt_requests.h"
#include "intel_lrc.h"
#include "intel_reset.h"
+#include "intel_ring.h"
/* Haswell does have the CXT_SIZE register however it does not appear to be
* valid. Now, docs explain in dwords what is in the context object. The full
@@ -51,30 +56,6 @@
#define GEN8_LR_CONTEXT_OTHER_SIZE ( 2 * PAGE_SIZE)
-struct engine_class_info {
- const char *name;
- u8 uabi_class;
-};
-
-static const struct engine_class_info intel_engine_classes[] = {
- [RENDER_CLASS] = {
- .name = "rcs",
- .uabi_class = I915_ENGINE_CLASS_RENDER,
- },
- [COPY_ENGINE_CLASS] = {
- .name = "bcs",
- .uabi_class = I915_ENGINE_CLASS_COPY,
- },
- [VIDEO_DECODE_CLASS] = {
- .name = "vcs",
- .uabi_class = I915_ENGINE_CLASS_VIDEO,
- },
- [VIDEO_ENHANCEMENT_CLASS] = {
- .name = "vecs",
- .uabi_class = I915_ENGINE_CLASS_VIDEO_ENHANCE,
- },
-};
-
#define MAX_MMIO_BASES 3
struct engine_info {
unsigned int hw_id;
@@ -160,7 +141,7 @@ static const struct engine_info intel_engines[] = {
/**
* intel_engine_context_size() - return the size of the context for an engine
- * @dev_priv: i915 device private
+ * @gt: the gt
* @class: engine class
*
* Each engine class may require a different amount of space for a context
@@ -172,18 +153,20 @@ static const struct engine_info intel_engines[] = {
* in LRC mode, but does not include the "shared data page" used with
* GuC submission. The caller should account for this if using the GuC.
*/
-u32 intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
+u32 intel_engine_context_size(struct intel_gt *gt, u8 class)
{
+ struct intel_uncore *uncore = gt->uncore;
u32 cxt_size;
BUILD_BUG_ON(I915_GTT_PAGE_SIZE != PAGE_SIZE);
switch (class) {
case RENDER_CLASS:
- switch (INTEL_GEN(dev_priv)) {
+ switch (INTEL_GEN(gt->i915)) {
default:
- MISSING_CASE(INTEL_GEN(dev_priv));
+ MISSING_CASE(INTEL_GEN(gt->i915));
return DEFAULT_LR_CONTEXT_RENDER_SIZE;
+ case 12:
case 11:
return GEN11_LR_CONTEXT_RENDER_SIZE;
case 10:
@@ -193,14 +176,14 @@ u32 intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
case 8:
return GEN8_LR_CONTEXT_RENDER_SIZE;
case 7:
- if (IS_HASWELL(dev_priv))
+ if (IS_HASWELL(gt->i915))
return HSW_CXT_TOTAL_SIZE;
- cxt_size = I915_READ(GEN7_CXT_SIZE);
+ cxt_size = intel_uncore_read(uncore, GEN7_CXT_SIZE);
return round_up(GEN7_CXT_TOTAL_SIZE(cxt_size) * 64,
PAGE_SIZE);
case 6:
- cxt_size = I915_READ(CXT_SIZE);
+ cxt_size = intel_uncore_read(uncore, CXT_SIZE);
return round_up(GEN6_CXT_TOTAL_SIZE(cxt_size) * 64,
PAGE_SIZE);
case 5:
@@ -215,9 +198,9 @@ u32 intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
* minimum allocation anyway so it should all come
* out in the wash.
*/
- cxt_size = I915_READ(CXT_SIZE) + 1;
+ cxt_size = intel_uncore_read(uncore, CXT_SIZE) + 1;
DRM_DEBUG_DRIVER("gen%d CXT_SIZE = %d bytes [0x%08x]\n",
- INTEL_GEN(dev_priv),
+ INTEL_GEN(gt->i915),
cxt_size * 64,
cxt_size - 1);
return round_up(cxt_size * 64, PAGE_SIZE);
@@ -234,7 +217,7 @@ u32 intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
case VIDEO_DECODE_CLASS:
case VIDEO_ENHANCEMENT_CLASS:
case COPY_ENGINE_CLASS:
- if (INTEL_GEN(dev_priv) < 8)
+ if (INTEL_GEN(gt->i915) < 8)
return 0;
return GEN8_LR_CONTEXT_OTHER_SIZE;
}
@@ -255,11 +238,16 @@ static u32 __engine_mmio_base(struct drm_i915_private *i915,
return bases[i].base;
}
-static void __sprint_engine_name(char *name, const struct engine_info *info)
+static void __sprint_engine_name(struct intel_engine_cs *engine)
{
- WARN_ON(snprintf(name, INTEL_ENGINE_CS_MAX_NAME, "%s%u",
- intel_engine_classes[info->class].name,
- info->instance) >= INTEL_ENGINE_CS_MAX_NAME);
+ /*
+ * Before we know what the uABI name for this engine will be,
+ * we still would like to keep track of this engine in the debug logs.
+ * We throw in a ' here as a reminder that this isn't its final name.
+ */
+ GEM_WARN_ON(snprintf(engine->name, sizeof(engine->name), "%s'%u",
+ intel_engine_class_repr(engine->class),
+ engine->instance) >= sizeof(engine->name));
}
void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask)
@@ -283,28 +271,26 @@ static void intel_engine_sanitize_mmio(struct intel_engine_cs *engine)
intel_engine_set_hwsp_writemask(engine, ~0u);
}
-static int
-intel_engine_setup(struct drm_i915_private *dev_priv,
- enum intel_engine_id id)
+static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
{
const struct engine_info *info = &intel_engines[id];
struct intel_engine_cs *engine;
- GEM_BUG_ON(info->class >= ARRAY_SIZE(intel_engine_classes));
-
BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH));
BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH));
+ if (GEM_DEBUG_WARN_ON(id >= ARRAY_SIZE(gt->engine)))
+ return -EINVAL;
+
if (GEM_DEBUG_WARN_ON(info->class > MAX_ENGINE_CLASS))
return -EINVAL;
if (GEM_DEBUG_WARN_ON(info->instance > MAX_ENGINE_INSTANCE))
return -EINVAL;
- if (GEM_DEBUG_WARN_ON(dev_priv->engine_class[info->class][info->instance]))
+ if (GEM_DEBUG_WARN_ON(gt->engine_class[info->class][info->instance]))
return -EINVAL;
- GEM_BUG_ON(dev_priv->engine[id]);
engine = kzalloc(sizeof(*engine), GFP_KERNEL);
if (!engine)
return -ENOMEM;
@@ -312,33 +298,37 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
BUILD_BUG_ON(BITS_PER_TYPE(engine->mask) < I915_NUM_ENGINES);
engine->id = id;
+ engine->legacy_idx = INVALID_ENGINE;
engine->mask = BIT(id);
- engine->i915 = dev_priv;
- engine->uncore = &dev_priv->uncore;
- __sprint_engine_name(engine->name, info);
+ engine->i915 = gt->i915;
+ engine->gt = gt;
+ engine->uncore = gt->uncore;
engine->hw_id = engine->guc_id = info->hw_id;
- engine->mmio_base = __engine_mmio_base(dev_priv, info->mmio_bases);
+ engine->mmio_base = __engine_mmio_base(gt->i915, info->mmio_bases);
+
engine->class = info->class;
engine->instance = info->instance;
-
- /*
- * To be overridden by the backend on setup. However to facilitate
- * cleanup on error during setup, we always provide the destroy vfunc.
- */
- engine->destroy = (typeof(engine->destroy))kfree;
-
- engine->uabi_class = intel_engine_classes[info->class].uabi_class;
-
- engine->context_size = intel_engine_context_size(dev_priv,
- engine->class);
+ __sprint_engine_name(engine);
+
+ engine->props.heartbeat_interval_ms =
+ CONFIG_DRM_I915_HEARTBEAT_INTERVAL;
+ engine->props.preempt_timeout_ms =
+ CONFIG_DRM_I915_PREEMPT_TIMEOUT;
+ engine->props.stop_timeout_ms =
+ CONFIG_DRM_I915_STOP_TIMEOUT;
+ engine->props.timeslice_duration_ms =
+ CONFIG_DRM_I915_TIMESLICE_DURATION;
+
+ engine->context_size = intel_engine_context_size(gt, engine->class);
if (WARN_ON(engine->context_size > BIT(20)))
engine->context_size = 0;
if (engine->context_size)
- DRIVER_CAPS(dev_priv)->has_logical_contexts = true;
+ DRIVER_CAPS(gt->i915)->has_logical_contexts = true;
/* Nothing to do here, execute in order of dependencies */
engine->schedule = NULL;
+ ewma__engine_latency_init(&engine->latency);
seqlock_init(&engine->stats.lock);
ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
@@ -346,8 +336,11 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
/* Scrub mmio state on takeover */
intel_engine_sanitize_mmio(engine);
- dev_priv->engine_class[info->class][info->instance] = engine;
- dev_priv->engine[id] = engine;
+ gt->engine_class[info->class][info->instance] = engine;
+ gt->engine[id] = engine;
+
+ gt->i915->engine[id] = engine;
+
return 0;
}
@@ -381,38 +374,58 @@ static void __setup_engine_capabilities(struct intel_engine_cs *engine)
}
}
-static void intel_setup_engine_capabilities(struct drm_i915_private *i915)
+static void intel_setup_engine_capabilities(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, i915, id)
+ for_each_engine(engine, gt, id)
__setup_engine_capabilities(engine);
}
/**
- * intel_engines_cleanup() - free the resources allocated for Command Streamers
- * @i915: the i915 devic
+ * intel_engines_release() - free the resources allocated for Command Streamers
+ * @gt: pointer to struct intel_gt
*/
-void intel_engines_cleanup(struct drm_i915_private *i915)
+void intel_engines_release(struct intel_gt *gt)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /* Decouple the backend; but keep the layout for late GPU resets */
+ for_each_engine(engine, gt, id) {
+ if (!engine->release)
+ continue;
+
+ engine->release(engine);
+ engine->release = NULL;
+
+ memset(&engine->reset, 0, sizeof(engine->reset));
+
+ gt->i915->engine[id] = NULL;
+ }
+}
+
+void intel_engines_free(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, i915, id) {
- engine->destroy(engine);
- i915->engine[id] = NULL;
+ for_each_engine(engine, gt, id) {
+ kfree(engine);
+ gt->engine[id] = NULL;
}
}
/**
* intel_engines_init_mmio() - allocate and prepare the Engine Command Streamers
- * @i915: the i915 device
+ * @gt: pointer to struct intel_gt
*
* Return: non-zero if the initialization failed.
*/
-int intel_engines_init_mmio(struct drm_i915_private *i915)
+int intel_engines_init_mmio(struct intel_gt *gt)
{
+ struct drm_i915_private *i915 = gt->i915;
struct intel_device_info *device_info = mkwrite_device_info(i915);
const unsigned int engine_mask = INTEL_INFO(i915)->engine_mask;
unsigned int mask = 0;
@@ -423,14 +436,14 @@ int intel_engines_init_mmio(struct drm_i915_private *i915)
WARN_ON(engine_mask &
GENMASK(BITS_PER_TYPE(mask) - 1, I915_NUM_ENGINES));
- if (i915_inject_load_failure())
+ if (i915_inject_probe_failure(i915))
return -ENODEV;
for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
if (!HAS_ENGINE(i915, i))
continue;
- err = intel_engine_setup(i915, i);
+ err = intel_engine_setup(gt, i);
if (err)
goto cleanup;
@@ -445,61 +458,19 @@ int intel_engines_init_mmio(struct drm_i915_private *i915)
if (WARN_ON(mask != engine_mask))
device_info->engine_mask = mask;
- /* We always presume we have at least RCS available for later probing */
- if (WARN_ON(!HAS_ENGINE(i915, RCS0))) {
- err = -ENODEV;
- goto cleanup;
- }
-
RUNTIME_INFO(i915)->num_engines = hweight32(mask);
- i915_check_and_clear_faults(i915);
-
- intel_setup_engine_capabilities(i915);
-
- return 0;
-
-cleanup:
- intel_engines_cleanup(i915);
- return err;
-}
-
-/**
- * intel_engines_init() - init the Engine Command Streamers
- * @i915: i915 device private
- *
- * Return: non-zero if the initialization failed.
- */
-int intel_engines_init(struct drm_i915_private *i915)
-{
- int (*init)(struct intel_engine_cs *engine);
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int err;
-
- if (HAS_EXECLISTS(i915))
- init = intel_execlists_submission_init;
- else
- init = intel_ring_submission_init;
+ intel_gt_check_and_clear_faults(gt);
- for_each_engine(engine, i915, id) {
- err = init(engine);
- if (err)
- goto cleanup;
- }
+ intel_setup_engine_capabilities(gt);
return 0;
cleanup:
- intel_engines_cleanup(i915);
+ intel_engines_free(gt);
return err;
}
-static void intel_engine_init_batch_pool(struct intel_engine_cs *engine)
-{
- i915_gem_batch_pool_init(&engine->batch_pool, engine);
-}
-
void intel_engine_init_execlists(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
@@ -508,6 +479,10 @@ void intel_engine_init_execlists(struct intel_engine_cs *engine)
GEM_BUG_ON(!is_power_of_2(execlists_num_ports(execlists)));
GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS);
+ memset(execlists->pending, 0, sizeof(execlists->pending));
+ execlists->active =
+ memset(execlists->inflight, 0, sizeof(execlists->inflight));
+
execlists->queue_priority_hint = INT_MIN;
execlists->queue = RB_ROOT_CACHED;
}
@@ -536,7 +511,7 @@ static int pin_ggtt_status_page(struct intel_engine_cs *engine,
unsigned int flags;
flags = PIN_GLOBAL;
- if (!HAS_LLC(engine->i915))
+ if (!HAS_LLC(engine->i915) && i915_ggtt_has_aperture(engine->gt->ggtt))
/*
* On g33, we cannot place HWS above 256MiB, so
* restrict its pinning to the low mappable arena.
@@ -577,7 +552,7 @@ static int init_status_page(struct intel_engine_cs *engine)
i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
- vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
+ vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err;
@@ -607,7 +582,7 @@ err:
return ret;
}
-static int intel_engine_setup_common(struct intel_engine_cs *engine)
+static int engine_setup_common(struct intel_engine_cs *engine)
{
int err;
@@ -620,104 +595,26 @@ static int intel_engine_setup_common(struct intel_engine_cs *engine)
intel_engine_init_active(engine, ENGINE_PHYSICAL);
intel_engine_init_breadcrumbs(engine);
intel_engine_init_execlists(engine);
- intel_engine_init_hangcheck(engine);
- intel_engine_init_batch_pool(engine);
intel_engine_init_cmd_parser(engine);
intel_engine_init__pm(engine);
+ intel_engine_init_retire(engine);
+
+ intel_engine_pool_init(&engine->pool);
/* Use the whole device by default */
engine->sseu =
intel_sseu_from_device_info(&RUNTIME_INFO(engine->i915)->sseu);
- return 0;
-}
-
-/**
- * intel_engines_setup- setup engine state not requiring hw access
- * @i915: Device to setup.
- *
- * Initializes engine structure members shared between legacy and execlists
- * submission modes which do not require hardware access.
- *
- * Typically done early in the submission mode specific engine setup stage.
- */
-int intel_engines_setup(struct drm_i915_private *i915)
-{
- int (*setup)(struct intel_engine_cs *engine);
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int err;
-
- if (HAS_EXECLISTS(i915))
- setup = intel_execlists_submission_setup;
- else
- setup = intel_ring_submission_setup;
-
- for_each_engine(engine, i915, id) {
- err = intel_engine_setup_common(engine);
- if (err)
- goto cleanup;
-
- err = setup(engine);
- if (err)
- goto cleanup;
-
- /* We expect the backend to take control over its state */
- GEM_BUG_ON(engine->destroy == (typeof(engine->destroy))kfree);
-
- GEM_BUG_ON(!engine->cops);
- }
+ intel_engine_init_workarounds(engine);
+ intel_engine_init_whitelist(engine);
+ intel_engine_init_ctx_wa(engine);
return 0;
-
-cleanup:
- intel_engines_cleanup(i915);
- return err;
-}
-
-void intel_engines_set_scheduler_caps(struct drm_i915_private *i915)
-{
- static const struct {
- u8 engine;
- u8 sched;
- } map[] = {
-#define MAP(x, y) { ilog2(I915_ENGINE_HAS_##x), ilog2(I915_SCHEDULER_CAP_##y) }
- MAP(PREEMPTION, PREEMPTION),
- MAP(SEMAPHORES, SEMAPHORES),
-#undef MAP
- };
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- u32 enabled, disabled;
-
- enabled = 0;
- disabled = 0;
- for_each_engine(engine, i915, id) { /* all engines must agree! */
- int i;
-
- if (engine->schedule)
- enabled |= (I915_SCHEDULER_CAP_ENABLED |
- I915_SCHEDULER_CAP_PRIORITY);
- else
- disabled |= (I915_SCHEDULER_CAP_ENABLED |
- I915_SCHEDULER_CAP_PRIORITY);
-
- for (i = 0; i < ARRAY_SIZE(map); i++) {
- if (engine->flags & BIT(map[i].engine))
- enabled |= BIT(map[i].sched);
- else
- disabled |= BIT(map[i].sched);
- }
- }
-
- i915->caps.scheduler = enabled & ~disabled;
- if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_ENABLED))
- i915->caps.scheduler = 0;
}
struct measure_breadcrumb {
struct i915_request rq;
- struct i915_timeline timeline;
+ struct intel_timeline timeline;
struct intel_ring ring;
u32 cs[1024];
};
@@ -727,19 +624,19 @@ static int measure_breadcrumb_dw(struct intel_engine_cs *engine)
struct measure_breadcrumb *frame;
int dw = -ENOMEM;
- GEM_BUG_ON(!engine->i915->gt.scratch);
+ GEM_BUG_ON(!engine->gt->scratch);
frame = kzalloc(sizeof(*frame), GFP_KERNEL);
if (!frame)
return -ENOMEM;
- if (i915_timeline_init(engine->i915,
- &frame->timeline,
- engine->status_page.vma))
+ if (intel_timeline_init(&frame->timeline,
+ engine->gt,
+ engine->status_page.vma))
goto out_frame;
- INIT_LIST_HEAD(&frame->ring.request_list);
- frame->ring.timeline = &frame->timeline;
+ mutex_lock(&frame->timeline.mutex);
+
frame->ring.vaddr = frame->cs;
frame->ring.size = sizeof(frame->cs);
frame->ring.effective_size = frame->ring.size;
@@ -748,48 +645,33 @@ static int measure_breadcrumb_dw(struct intel_engine_cs *engine)
frame->rq.i915 = engine->i915;
frame->rq.engine = engine;
frame->rq.ring = &frame->ring;
- frame->rq.timeline = &frame->timeline;
+ rcu_assign_pointer(frame->rq.timeline, &frame->timeline);
- dw = i915_timeline_pin(&frame->timeline);
+ dw = intel_timeline_pin(&frame->timeline);
if (dw < 0)
goto out_timeline;
+ spin_lock_irq(&engine->active.lock);
dw = engine->emit_fini_breadcrumb(&frame->rq, frame->cs) - frame->cs;
+ spin_unlock_irq(&engine->active.lock);
+
GEM_BUG_ON(dw & 1); /* RING_TAIL must be qword aligned */
- i915_timeline_unpin(&frame->timeline);
+ intel_timeline_unpin(&frame->timeline);
out_timeline:
- i915_timeline_fini(&frame->timeline);
+ mutex_unlock(&frame->timeline.mutex);
+ intel_timeline_fini(&frame->timeline);
out_frame:
kfree(frame);
return dw;
}
-static int pin_context(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
- struct intel_context **out)
-{
- struct intel_context *ce;
- int err;
-
- ce = i915_gem_context_get_engine(ctx, engine->id);
- if (IS_ERR(ce))
- return PTR_ERR(ce);
-
- err = intel_context_pin(ce);
- intel_context_put(ce);
- if (err)
- return err;
-
- *out = ce;
- return 0;
-}
-
void
intel_engine_init_active(struct intel_engine_cs *engine, unsigned int subclass)
{
INIT_LIST_HEAD(&engine->active.requests);
+ INIT_LIST_HEAD(&engine->active.hold);
spin_lock_init(&engine->active.lock);
lockdep_set_subclass(&engine->active.lock, subclass);
@@ -807,6 +689,36 @@ intel_engine_init_active(struct intel_engine_cs *engine, unsigned int subclass)
#endif
}
+static struct intel_context *
+create_kernel_context(struct intel_engine_cs *engine)
+{
+ static struct lock_class_key kernel;
+ struct intel_context *ce;
+ int err;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return ce;
+
+ __set_bit(CONTEXT_BARRIER_BIT, &ce->flags);
+
+ err = intel_context_pin(ce); /* perma-pin so it is always available */
+ if (err) {
+ intel_context_put(ce);
+ return ERR_PTR(err);
+ }
+
+ /*
+ * Give our perma-pinned kernel timelines a separate lockdep class,
+ * so that we can use them from within the normal user timelines
+ * should we need to inject GPU operations during their request
+ * construction.
+ */
+ lockdep_set_class(&ce->timeline->mutex, &kernel);
+
+ return ce;
+}
+
/**
* intel_engines_init_common - initialize cengine state which might require hw access
* @engine: Engine to initialize.
@@ -818,47 +730,65 @@ intel_engine_init_active(struct intel_engine_cs *engine, unsigned int subclass)
*
* Returns zero on success or an error code on failure.
*/
-int intel_engine_init_common(struct intel_engine_cs *engine)
+static int engine_init_common(struct intel_engine_cs *engine)
{
- struct drm_i915_private *i915 = engine->i915;
+ struct intel_context *ce;
int ret;
- /* We may need to do things with the shrinker which
+ engine->set_default_submission(engine);
+
+ ret = measure_breadcrumb_dw(engine);
+ if (ret < 0)
+ return ret;
+
+ engine->emit_fini_breadcrumb_dw = ret;
+
+ /*
+ * We may need to do things with the shrinker which
* require us to immediately switch back to the default
* context. This can cause a problem as pinning the
* default context also requires GTT space which may not
* be available. To avoid this we always pin the default
* context.
*/
- ret = pin_context(i915->kernel_context, engine,
- &engine->kernel_context);
- if (ret)
- return ret;
+ ce = create_kernel_context(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
- /*
- * Similarly the preempt context must always be available so that
- * we can interrupt the engine at any time. However, as preemption
- * is optional, we allow it to fail.
- */
- if (i915->preempt_context)
- pin_context(i915->preempt_context, engine,
- &engine->preempt_context);
+ engine->kernel_context = ce;
- ret = measure_breadcrumb_dw(engine);
- if (ret < 0)
- goto err_unpin;
+ return 0;
+}
- engine->emit_fini_breadcrumb_dw = ret;
+int intel_engines_init(struct intel_gt *gt)
+{
+ int (*setup)(struct intel_engine_cs *engine);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err;
- engine->set_default_submission(engine);
+ if (HAS_EXECLISTS(gt->i915))
+ setup = intel_execlists_submission_setup;
+ else
+ setup = intel_ring_submission_setup;
- return 0;
+ for_each_engine(engine, gt, id) {
+ err = engine_setup_common(engine);
+ if (err)
+ return err;
-err_unpin:
- if (engine->preempt_context)
- intel_context_unpin(engine->preempt_context);
- intel_context_unpin(engine->kernel_context);
- return ret;
+ err = setup(engine);
+ if (err)
+ return err;
+
+ err = engine_init_common(engine);
+ if (err)
+ return err;
+
+ intel_engine_add_user(engine);
+ }
+
+ return 0;
}
/**
@@ -871,19 +801,22 @@ err_unpin:
void intel_engine_cleanup_common(struct intel_engine_cs *engine)
{
GEM_BUG_ON(!list_empty(&engine->active.requests));
+ tasklet_kill(&engine->execlists.tasklet); /* flush the callback */
cleanup_status_page(engine);
+ intel_engine_fini_retire(engine);
+ intel_engine_pool_fini(&engine->pool);
intel_engine_fini_breadcrumbs(engine);
intel_engine_cleanup_cmd_parser(engine);
- i915_gem_batch_pool_fini(&engine->batch_pool);
if (engine->default_state)
i915_gem_object_put(engine->default_state);
- if (engine->preempt_context)
- intel_context_unpin(engine->preempt_context);
- intel_context_unpin(engine->kernel_context);
+ if (engine->kernel_context) {
+ intel_context_unpin(engine->kernel_context);
+ intel_context_put(engine->kernel_context);
+ }
GEM_BUG_ON(!llist_empty(&engine->barrier_tasks));
intel_wa_list_free(&engine->ctx_wa_list);
@@ -919,6 +852,21 @@ u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine)
return bbaddr;
}
+static unsigned long stop_timeout(const struct intel_engine_cs *engine)
+{
+ if (in_atomic() || irqs_disabled()) /* inside atomic preempt-reset? */
+ return 0;
+
+ /*
+ * If we are doing a normal GPU reset, we can take our time and allow
+ * the engine to quiesce. We've stopped submission to the engine, and
+ * if we wait long enough an innocent context should complete and
+ * leave the engine idle. So they should not be caught unaware by
+ * the forthcoming GPU reset (which usually follows the stop_cs)!
+ */
+ return READ_ONCE(engine->props.stop_timeout_ms);
+}
+
int intel_engine_stop_cs(struct intel_engine_cs *engine)
{
struct intel_uncore *uncore = engine->uncore;
@@ -929,16 +877,16 @@ int intel_engine_stop_cs(struct intel_engine_cs *engine)
if (INTEL_GEN(engine->i915) < 3)
return -ENODEV;
- GEM_TRACE("%s\n", engine->name);
+ ENGINE_TRACE(engine, "\n");
intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING));
err = 0;
if (__intel_wait_for_register_fw(uncore,
mode, MODE_IDLE, MODE_IDLE,
- 1000, 0,
+ 1000, stop_timeout(engine),
NULL)) {
- GEM_TRACE("%s: timed out on STOP_RING -> IDLE\n", engine->name);
+ ENGINE_TRACE(engine, "timed out on STOP_RING -> IDLE\n");
err = -ETIMEDOUT;
}
@@ -950,7 +898,7 @@ int intel_engine_stop_cs(struct intel_engine_cs *engine)
void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine)
{
- GEM_TRACE("%s\n", engine->name);
+ ENGINE_TRACE(engine, "\n");
ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
}
@@ -966,57 +914,23 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
}
}
-u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv)
-{
- const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
- unsigned int slice = fls(sseu->slice_mask) - 1;
- unsigned int subslice;
- u32 mcr_s_ss_select;
-
- GEM_BUG_ON(slice >= ARRAY_SIZE(sseu->subslice_mask));
- subslice = fls(sseu->subslice_mask[slice]);
- GEM_BUG_ON(!subslice);
- subslice--;
-
- if (IS_GEN(dev_priv, 10))
- mcr_s_ss_select = GEN8_MCR_SLICE(slice) |
- GEN8_MCR_SUBSLICE(subslice);
- else if (INTEL_GEN(dev_priv) >= 11)
- mcr_s_ss_select = GEN11_MCR_SLICE(slice) |
- GEN11_MCR_SUBSLICE(subslice);
- else
- mcr_s_ss_select = 0;
-
- return mcr_s_ss_select;
-}
-
static u32
-read_subslice_reg(struct intel_engine_cs *engine, int slice, int subslice,
- i915_reg_t reg)
+read_subslice_reg(const struct intel_engine_cs *engine,
+ int slice, int subslice, i915_reg_t reg)
{
struct drm_i915_private *i915 = engine->i915;
struct intel_uncore *uncore = engine->uncore;
- u32 mcr_slice_subslice_mask;
- u32 mcr_slice_subslice_select;
- u32 default_mcr_s_ss_select;
- u32 mcr;
- u32 ret;
+ u32 mcr_mask, mcr_ss, mcr, old_mcr, val;
enum forcewake_domains fw_domains;
if (INTEL_GEN(i915) >= 11) {
- mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK |
- GEN11_MCR_SUBSLICE_MASK;
- mcr_slice_subslice_select = GEN11_MCR_SLICE(slice) |
- GEN11_MCR_SUBSLICE(subslice);
+ mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK;
+ mcr_ss = GEN11_MCR_SLICE(slice) | GEN11_MCR_SUBSLICE(subslice);
} else {
- mcr_slice_subslice_mask = GEN8_MCR_SLICE_MASK |
- GEN8_MCR_SUBSLICE_MASK;
- mcr_slice_subslice_select = GEN8_MCR_SLICE(slice) |
- GEN8_MCR_SUBSLICE(subslice);
+ mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK;
+ mcr_ss = GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
}
- default_mcr_s_ss_select = intel_calculate_mcr_s_ss_select(i915);
-
fw_domains = intel_uncore_forcewake_for_reg(uncore, reg,
FW_REG_READ);
fw_domains |= intel_uncore_forcewake_for_reg(uncore,
@@ -1026,33 +940,31 @@ read_subslice_reg(struct intel_engine_cs *engine, int slice, int subslice,
spin_lock_irq(&uncore->lock);
intel_uncore_forcewake_get__locked(uncore, fw_domains);
- mcr = intel_uncore_read_fw(uncore, GEN8_MCR_SELECTOR);
+ old_mcr = mcr = intel_uncore_read_fw(uncore, GEN8_MCR_SELECTOR);
- WARN_ON_ONCE((mcr & mcr_slice_subslice_mask) !=
- default_mcr_s_ss_select);
-
- mcr &= ~mcr_slice_subslice_mask;
- mcr |= mcr_slice_subslice_select;
+ mcr &= ~mcr_mask;
+ mcr |= mcr_ss;
intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
- ret = intel_uncore_read_fw(uncore, reg);
+ val = intel_uncore_read_fw(uncore, reg);
- mcr &= ~mcr_slice_subslice_mask;
- mcr |= default_mcr_s_ss_select;
+ mcr &= ~mcr_mask;
+ mcr |= old_mcr & mcr_mask;
intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
intel_uncore_forcewake_put__locked(uncore, fw_domains);
spin_unlock_irq(&uncore->lock);
- return ret;
+ return val;
}
/* NB: please notice the memset */
-void intel_engine_get_instdone(struct intel_engine_cs *engine,
+void intel_engine_get_instdone(const struct intel_engine_cs *engine,
struct intel_instdone *instdone)
{
struct drm_i915_private *i915 = engine->i915;
+ const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
struct intel_uncore *uncore = engine->uncore;
u32 mmio_base = engine->mmio_base;
int slice;
@@ -1070,7 +982,7 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine,
instdone->slice_common =
intel_uncore_read(uncore, GEN7_SC_INSTDONE);
- for_each_instdone_slice_subslice(i915, slice, subslice) {
+ for_each_instdone_slice_subslice(i915, sseu, slice, subslice) {
instdone->sampler[slice][subslice] =
read_subslice_reg(engine, slice, subslice,
GEN7_SAMPLER_INSTDONE);
@@ -1113,16 +1025,12 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine,
static bool ring_is_idle(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
- intel_wakeref_t wakeref;
bool idle = true;
if (I915_SELFTEST_ONLY(!engine->mmio_base))
return true;
- /* If the whole device is asleep, the engine must be idle */
- wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
- if (!wakeref)
+ if (!intel_engine_pm_get_if_awake(engine))
return true;
/* First check that no commands are left in the ring */
@@ -1131,15 +1039,34 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
idle = false;
/* No bit for gen2, so assume the CS parser is idle */
- if (INTEL_GEN(dev_priv) > 2 &&
+ if (INTEL_GEN(engine->i915) > 2 &&
!(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE))
idle = false;
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ intel_engine_pm_put(engine);
return idle;
}
+void intel_engine_flush_submission(struct intel_engine_cs *engine)
+{
+ struct tasklet_struct *t = &engine->execlists.tasklet;
+
+ if (__tasklet_is_scheduled(t)) {
+ local_bh_disable();
+ if (tasklet_trylock(t)) {
+ /* Must wait for any GPU reset in progress. */
+ if (__tasklet_is_enabled(t))
+ t->func(t->data);
+ tasklet_unlock(t);
+ }
+ local_bh_enable();
+ }
+
+ /* Otherwise flush the tasklet if it was running on another cpu */
+ tasklet_unlock_wait(t);
+}
+
/**
* intel_engine_is_idle() - Report if the engine has finished process all work
* @engine: the intel_engine_cs
@@ -1150,31 +1077,19 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
bool intel_engine_is_idle(struct intel_engine_cs *engine)
{
/* More white lies, if wedged, hw state is inconsistent */
- if (i915_reset_failed(engine->i915))
+ if (intel_gt_is_wedged(engine->gt))
return true;
- if (!intel_wakeref_active(&engine->wakeref))
+ if (!intel_engine_pm_is_awake(engine))
return true;
/* Waiting to drain ELSP? */
- if (READ_ONCE(engine->execlists.active)) {
- struct tasklet_struct *t = &engine->execlists.tasklet;
+ if (execlists_active(&engine->execlists)) {
+ synchronize_hardirq(engine->i915->drm.pdev->irq);
- synchronize_hardirq(engine->i915->drm.irq);
+ intel_engine_flush_submission(engine);
- local_bh_disable();
- if (tasklet_trylock(t)) {
- /* Must wait for any GPU reset in progress. */
- if (__tasklet_is_enabled(t))
- t->func(t->data);
- tasklet_unlock(t);
- }
- local_bh_enable();
-
- /* Otherwise flush the tasklet if it was on another cpu */
- tasklet_unlock_wait(t);
-
- if (READ_ONCE(engine->execlists.active))
+ if (execlists_active(&engine->execlists))
return false;
}
@@ -1186,7 +1101,7 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
return ring_is_idle(engine);
}
-bool intel_engines_are_idle(struct drm_i915_private *i915)
+bool intel_engines_are_idle(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -1195,14 +1110,14 @@ bool intel_engines_are_idle(struct drm_i915_private *i915)
* If the driver is wedged, HW state may be very inconsistent and
* report that it is still busy, even though we have stopped using it.
*/
- if (i915_reset_failed(i915))
+ if (intel_gt_is_wedged(gt))
return true;
/* Already parked (and passed an idleness test); must still be idle */
- if (!READ_ONCE(i915->gt.awake))
+ if (!READ_ONCE(gt->awake))
return true;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
if (!intel_engine_is_idle(engine))
return false;
}
@@ -1210,12 +1125,12 @@ bool intel_engines_are_idle(struct drm_i915_private *i915)
return true;
}
-void intel_engines_reset_default_submission(struct drm_i915_private *i915)
+void intel_engines_reset_default_submission(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, i915, id)
+ for_each_engine(engine, gt, id)
engine->set_default_submission(engine);
}
@@ -1227,6 +1142,8 @@ bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
case 3:
/* maybe only uses physical not virtual addresses */
return !(IS_I915G(engine->i915) || IS_I915GM(engine->i915));
+ case 4:
+ return !IS_I965G(engine->i915); /* who knows! */
case 6:
return engine->class != VIDEO_DECODE_CLASS; /* b0rked */
default:
@@ -1234,20 +1151,6 @@ bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
}
}
-unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- unsigned int which;
-
- which = 0;
- for_each_engine(engine, i915, id)
- if (engine->default_state)
- which |= BIT(engine->uabi_class);
-
- return which;
-}
-
static int print_sched_attr(struct drm_i915_private *i915,
const struct i915_sched_attr *attr,
char *buf, int x, int len)
@@ -1316,16 +1219,46 @@ static void hexdump(struct drm_printer *m, const void *buf, size_t len)
}
}
+static struct intel_timeline *get_timeline(struct i915_request *rq)
+{
+ struct intel_timeline *tl;
+
+ /*
+ * Even though we are holding the engine->active.lock here, there
+ * is no control over the submission queue per-se and we are
+ * inspecting the active state at a random point in time, with an
+ * unknown queue. Play safe and make sure the timeline remains valid.
+ * (Only being used for pretty printing, one extra kref shouldn't
+ * cause a camel stampede!)
+ */
+ rcu_read_lock();
+ tl = rcu_dereference(rq->timeline);
+ if (!kref_get_unless_zero(&tl->kref))
+ tl = NULL;
+ rcu_read_unlock();
+
+ return tl;
+}
+
+static const char *repr_timer(const struct timer_list *t)
+{
+ if (!READ_ONCE(t->expires))
+ return "inactive";
+
+ if (timer_pending(t))
+ return "active";
+
+ return "expired";
+}
+
static void intel_engine_print_registers(struct intel_engine_cs *engine,
struct drm_printer *m)
{
struct drm_i915_private *dev_priv = engine->i915;
- const struct intel_engine_execlists * const execlists =
- &engine->execlists;
- unsigned long flags;
+ struct intel_engine_execlists * const execlists = &engine->execlists;
u64 addr;
- if (engine->id == RCS0 && IS_GEN_RANGE(dev_priv, 4, 7))
+ if (engine->id == RENDER_CLASS && IS_GEN_RANGE(dev_priv, 4, 7))
drm_printf(m, "\tCCID: 0x%08x\n", ENGINE_READ(engine, CCID));
drm_printf(m, "\tRING_START: 0x%08x\n",
ENGINE_READ(engine, RING_START));
@@ -1372,25 +1305,28 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
}
if (HAS_EXECLISTS(dev_priv)) {
+ struct i915_request * const *port, *rq;
const u32 *hws =
&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
const u8 num_entries = execlists->csb_size;
unsigned int idx;
u8 read, write;
- drm_printf(m, "\tExeclist status: 0x%08x %08x, entries %u\n",
- ENGINE_READ(engine, RING_EXECLIST_STATUS_LO),
- ENGINE_READ(engine, RING_EXECLIST_STATUS_HI),
- num_entries);
+ drm_printf(m, "\tExeclist tasklet queued? %s (%s), preempt? %s, timeslice? %s\n",
+ yesno(test_bit(TASKLET_STATE_SCHED,
+ &engine->execlists.tasklet.state)),
+ enableddisabled(!atomic_read(&engine->execlists.tasklet.count)),
+ repr_timer(&engine->execlists.preempt),
+ repr_timer(&engine->execlists.timer));
read = execlists->csb_head;
write = READ_ONCE(*execlists->csb_write);
- drm_printf(m, "\tExeclist CSB read %d, write %d, tasklet queued? %s (%s)\n",
- read, write,
- yesno(test_bit(TASKLET_STATE_SCHED,
- &engine->execlists.tasklet.state)),
- enableddisabled(!atomic_read(&engine->execlists.tasklet.count)));
+ drm_printf(m, "\tExeclist status: 0x%08x %08x; CSB read:%d, write:%d, entries:%d\n",
+ ENGINE_READ(engine, RING_EXECLIST_STATUS_LO),
+ ENGINE_READ(engine, RING_EXECLIST_STATUS_HI),
+ read, write, num_entries);
+
if (read >= num_entries)
read = 0;
if (write >= num_entries)
@@ -1403,29 +1339,47 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
idx, hws[idx * 2], hws[idx * 2 + 1]);
}
- spin_lock_irqsave(&engine->active.lock, flags);
- for (idx = 0; idx < execlists_num_ports(execlists); idx++) {
- struct i915_request *rq;
- unsigned int count;
+ execlists_active_lock_bh(execlists);
+ rcu_read_lock();
+ for (port = execlists->active; (rq = *port); port++) {
char hdr[80];
-
- rq = port_unpack(&execlists->port[idx], &count);
- if (!rq) {
- drm_printf(m, "\t\tELSP[%d] idle\n", idx);
- } else if (!i915_request_signaled(rq)) {
- snprintf(hdr, sizeof(hdr),
- "\t\tELSP[%d] count=%d, ring:{start:%08x, hwsp:%08x, seqno:%08x}, rq: ",
- idx, count,
- i915_ggtt_offset(rq->ring->vma),
- rq->timeline->hwsp_offset,
- hwsp_seqno(rq));
- print_request(m, rq, hdr);
- } else {
- print_request(m, rq, "\t\tELSP[%d] rq: ");
+ int len;
+
+ len = snprintf(hdr, sizeof(hdr),
+ "\t\tActive[%d]: ",
+ (int)(port - execlists->active));
+ if (!i915_request_signaled(rq)) {
+ struct intel_timeline *tl = get_timeline(rq);
+
+ len += snprintf(hdr + len, sizeof(hdr) - len,
+ "ring:{start:%08x, hwsp:%08x, seqno:%08x}, ",
+ i915_ggtt_offset(rq->ring->vma),
+ tl ? tl->hwsp_offset : 0,
+ hwsp_seqno(rq));
+
+ if (tl)
+ intel_timeline_put(tl);
}
+ snprintf(hdr + len, sizeof(hdr) - len, "rq: ");
+ print_request(m, rq, hdr);
+ }
+ for (port = execlists->pending; (rq = *port); port++) {
+ struct intel_timeline *tl = get_timeline(rq);
+ char hdr[80];
+
+ snprintf(hdr, sizeof(hdr),
+ "\t\tPending[%d] ring:{start:%08x, hwsp:%08x, seqno:%08x}, rq: ",
+ (int)(port - execlists->pending),
+ i915_ggtt_offset(rq->ring->vma),
+ tl ? tl->hwsp_offset : 0,
+ hwsp_seqno(rq));
+ print_request(m, rq, hdr);
+
+ if (tl)
+ intel_timeline_put(tl);
}
- drm_printf(m, "\t\tHW active? 0x%x\n", execlists->active);
- spin_unlock_irqrestore(&engine->active.lock, flags);
+ rcu_read_unlock();
+ execlists_active_unlock_bh(execlists);
} else if (INTEL_GEN(dev_priv) > 6) {
drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
ENGINE_READ(engine, RING_PP_DIR_BASE));
@@ -1469,6 +1423,17 @@ static void print_request_ring(struct drm_printer *m, struct i915_request *rq)
}
}
+static unsigned long list_count(struct list_head *list)
+{
+ struct list_head *pos;
+ unsigned long count = 0;
+
+ list_for_each(pos, list)
+ count++;
+
+ return count;
+}
+
void intel_engine_dump(struct intel_engine_cs *engine,
struct drm_printer *m,
const char *header, ...)
@@ -1486,12 +1451,21 @@ void intel_engine_dump(struct intel_engine_cs *engine,
va_end(ap);
}
- if (i915_reset_failed(engine->i915))
+ if (intel_gt_is_wedged(engine->gt))
drm_printf(m, "*** WEDGED ***\n");
drm_printf(m, "\tAwake? %d\n", atomic_read(&engine->wakeref.count));
- drm_printf(m, "\tHangcheck: %d ms ago\n",
- jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp));
+ drm_printf(m, "\tBarriers?: %s\n",
+ yesno(!llist_empty(&engine->barrier_tasks)));
+ drm_printf(m, "\tLatency: %luus\n",
+ ewma__engine_latency_read(&engine->latency));
+
+ rcu_read_lock();
+ rq = READ_ONCE(engine->heartbeat.systole);
+ if (rq)
+ drm_printf(m, "\tHeartbeat: %d ms ago\n",
+ jiffies_to_msecs(jiffies - rq->emitted_jiffies));
+ rcu_read_unlock();
drm_printf(m, "\tReset count: %d (global %d)\n",
i915_reset_engine_count(error, engine),
i915_reset_count(error));
@@ -1501,6 +1475,8 @@ void intel_engine_dump(struct intel_engine_cs *engine,
spin_lock_irqsave(&engine->active.lock, flags);
rq = intel_engine_find_active_request(engine);
if (rq) {
+ struct intel_timeline *tl = get_timeline(rq);
+
print_request(m, rq, "\t\tactive ");
drm_printf(m, "\t\tring->start: 0x%08x\n",
@@ -1513,17 +1489,28 @@ void intel_engine_dump(struct intel_engine_cs *engine,
rq->ring->emit);
drm_printf(m, "\t\tring->space: 0x%08x\n",
rq->ring->space);
- drm_printf(m, "\t\tring->hwsp: 0x%08x\n",
- rq->timeline->hwsp_offset);
+
+ if (tl) {
+ drm_printf(m, "\t\tring->hwsp: 0x%08x\n",
+ tl->hwsp_offset);
+ intel_timeline_put(tl);
+ }
print_request_ring(m, rq);
+
+ if (rq->context->lrc_reg_state) {
+ drm_printf(m, "Logical Ring Context:\n");
+ hexdump(m, rq->context->lrc_reg_state, PAGE_SIZE);
+ }
}
+ drm_printf(m, "\tOn hold?: %lu\n", list_count(&engine->active.hold));
spin_unlock_irqrestore(&engine->active.lock, flags);
- wakeref = intel_runtime_pm_get_if_in_use(&engine->i915->runtime_pm);
+ drm_printf(m, "\tMMIO base: 0x%08x\n", engine->mmio_base);
+ wakeref = intel_runtime_pm_get_if_in_use(engine->uncore->rpm);
if (wakeref) {
intel_engine_print_registers(engine, m);
- intel_runtime_pm_put(&engine->i915->runtime_pm, wakeref);
+ intel_runtime_pm_put(engine->uncore->rpm, wakeref);
} else {
drm_printf(m, "\tDevice is asleep; skipping register dump\n");
}
@@ -1538,29 +1525,6 @@ void intel_engine_dump(struct intel_engine_cs *engine,
intel_engine_print_breadcrumbs(engine, m);
}
-static u8 user_class_map[] = {
- [I915_ENGINE_CLASS_RENDER] = RENDER_CLASS,
- [I915_ENGINE_CLASS_COPY] = COPY_ENGINE_CLASS,
- [I915_ENGINE_CLASS_VIDEO] = VIDEO_DECODE_CLASS,
- [I915_ENGINE_CLASS_VIDEO_ENHANCE] = VIDEO_ENHANCEMENT_CLASS,
-};
-
-struct intel_engine_cs *
-intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance)
-{
- if (class >= ARRAY_SIZE(user_class_map))
- return NULL;
-
- class = user_class_map[class];
-
- GEM_BUG_ON(class > MAX_ENGINE_CLASS);
-
- if (instance > MAX_ENGINE_INSTANCE)
- return NULL;
-
- return i915->engine_class[class][instance];
-}
-
/**
* intel_enable_engine_stats() - Enable engine busy tracking on engine
* @engine: engine to enable stats collection
@@ -1578,8 +1542,8 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
if (!intel_engine_supports_stats(engine))
return -ENODEV;
- spin_lock_irqsave(&engine->active.lock, flags);
- write_seqlock(&engine->stats.lock);
+ execlists_active_lock_bh(execlists);
+ write_seqlock_irqsave(&engine->stats.lock, flags);
if (unlikely(engine->stats.enabled == ~0)) {
err = -EBUSY;
@@ -1587,15 +1551,19 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
}
if (engine->stats.enabled++ == 0) {
- const struct execlist_port *port = execlists->port;
- unsigned int num_ports = execlists_num_ports(execlists);
+ struct i915_request * const *port;
+ struct i915_request *rq;
engine->stats.enabled_at = ktime_get();
/* XXX submission method oblivious? */
- while (num_ports-- && port_isset(port)) {
+ for (port = execlists->active; (rq = *port); port++)
engine->stats.active++;
- port++;
+
+ for (port = execlists->pending; (rq = *port); port++) {
+ /* Exclude any contexts already counted in active */
+ if (!intel_context_inflight_count(rq->context))
+ engine->stats.active++;
}
if (engine->stats.active)
@@ -1603,8 +1571,8 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
}
unlock:
- write_sequnlock(&engine->stats.lock);
- spin_unlock_irqrestore(&engine->active.lock, flags);
+ write_sequnlock_irqrestore(&engine->stats.lock, flags);
+ execlists_active_unlock_bh(execlists);
return err;
}
@@ -1708,5 +1676,7 @@ intel_engine_find_active_request(struct intel_engine_cs *engine)
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "mock_engine.c"
+#include "selftest_engine.c"
#include "selftest_engine_cs.c"
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
new file mode 100644
index 000000000000..6c6fd185457c
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
@@ -0,0 +1,242 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_request.h"
+
+#include "intel_context.h"
+#include "intel_engine_heartbeat.h"
+#include "intel_engine_pm.h"
+#include "intel_engine.h"
+#include "intel_gt.h"
+#include "intel_reset.h"
+
+/*
+ * While the engine is active, we send a periodic pulse along the engine
+ * to check on its health and to flush any idle-barriers. If that request
+ * is stuck, and we fail to preempt it, we declare the engine hung and
+ * issue a reset -- in the hope that restores progress.
+ */
+
+static bool next_heartbeat(struct intel_engine_cs *engine)
+{
+ long delay;
+
+ delay = READ_ONCE(engine->props.heartbeat_interval_ms);
+ if (!delay)
+ return false;
+
+ delay = msecs_to_jiffies_timeout(delay);
+ if (delay >= HZ)
+ delay = round_jiffies_up_relative(delay);
+ schedule_delayed_work(&engine->heartbeat.work, delay);
+
+ return true;
+}
+
+static void idle_pulse(struct intel_engine_cs *engine, struct i915_request *rq)
+{
+ engine->wakeref_serial = READ_ONCE(engine->serial) + 1;
+ i915_request_add_active_barriers(rq);
+}
+
+static void show_heartbeat(const struct i915_request *rq,
+ struct intel_engine_cs *engine)
+{
+ struct drm_printer p = drm_debug_printer("heartbeat");
+
+ intel_engine_dump(engine, &p,
+ "%s heartbeat {prio:%d} not ticking\n",
+ engine->name,
+ rq->sched.attr.priority);
+}
+
+static void heartbeat(struct work_struct *wrk)
+{
+ struct i915_sched_attr attr = {
+ .priority = I915_USER_PRIORITY(I915_PRIORITY_MIN),
+ };
+ struct intel_engine_cs *engine =
+ container_of(wrk, typeof(*engine), heartbeat.work.work);
+ struct intel_context *ce = engine->kernel_context;
+ struct i915_request *rq;
+
+ rq = engine->heartbeat.systole;
+ if (rq && i915_request_completed(rq)) {
+ i915_request_put(rq);
+ engine->heartbeat.systole = NULL;
+ }
+
+ if (!intel_engine_pm_get_if_awake(engine))
+ return;
+
+ if (intel_gt_is_wedged(engine->gt))
+ goto out;
+
+ if (engine->heartbeat.systole) {
+ if (engine->schedule &&
+ rq->sched.attr.priority < I915_PRIORITY_BARRIER) {
+ /*
+ * Gradually raise the priority of the heartbeat to
+ * give high priority work [which presumably desires
+ * low latency and no jitter] the chance to naturally
+ * complete before being preempted.
+ */
+ attr.priority = I915_PRIORITY_MASK;
+ if (rq->sched.attr.priority >= attr.priority)
+ attr.priority |= I915_USER_PRIORITY(I915_PRIORITY_HEARTBEAT);
+ if (rq->sched.attr.priority >= attr.priority)
+ attr.priority = I915_PRIORITY_BARRIER;
+
+ local_bh_disable();
+ engine->schedule(rq, &attr);
+ local_bh_enable();
+ } else {
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ show_heartbeat(rq, engine);
+
+ intel_gt_handle_error(engine->gt, engine->mask,
+ I915_ERROR_CAPTURE,
+ "stopped heartbeat on %s",
+ engine->name);
+ }
+ goto out;
+ }
+
+ if (engine->wakeref_serial == engine->serial)
+ goto out;
+
+ mutex_lock(&ce->timeline->mutex);
+
+ intel_context_enter(ce);
+ rq = __i915_request_create(ce, GFP_NOWAIT | __GFP_NOWARN);
+ intel_context_exit(ce);
+ if (IS_ERR(rq))
+ goto unlock;
+
+ idle_pulse(engine, rq);
+ if (i915_modparams.enable_hangcheck)
+ engine->heartbeat.systole = i915_request_get(rq);
+
+ __i915_request_commit(rq);
+ __i915_request_queue(rq, &attr);
+
+unlock:
+ mutex_unlock(&ce->timeline->mutex);
+out:
+ if (!next_heartbeat(engine))
+ i915_request_put(fetch_and_zero(&engine->heartbeat.systole));
+ intel_engine_pm_put(engine);
+}
+
+void intel_engine_unpark_heartbeat(struct intel_engine_cs *engine)
+{
+ if (!IS_ACTIVE(CONFIG_DRM_I915_HEARTBEAT_INTERVAL))
+ return;
+
+ next_heartbeat(engine);
+}
+
+void intel_engine_park_heartbeat(struct intel_engine_cs *engine)
+{
+ if (cancel_delayed_work(&engine->heartbeat.work))
+ i915_request_put(fetch_and_zero(&engine->heartbeat.systole));
+}
+
+void intel_engine_init_heartbeat(struct intel_engine_cs *engine)
+{
+ INIT_DELAYED_WORK(&engine->heartbeat.work, heartbeat);
+}
+
+int intel_engine_set_heartbeat(struct intel_engine_cs *engine,
+ unsigned long delay)
+{
+ int err;
+
+ /* Send one last pulse before to cleanup persistent hogs */
+ if (!delay && IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT)) {
+ err = intel_engine_pulse(engine);
+ if (err)
+ return err;
+ }
+
+ WRITE_ONCE(engine->props.heartbeat_interval_ms, delay);
+
+ if (intel_engine_pm_get_if_awake(engine)) {
+ if (delay)
+ intel_engine_unpark_heartbeat(engine);
+ else
+ intel_engine_park_heartbeat(engine);
+ intel_engine_pm_put(engine);
+ }
+
+ return 0;
+}
+
+int intel_engine_pulse(struct intel_engine_cs *engine)
+{
+ struct i915_sched_attr attr = { .priority = I915_PRIORITY_BARRIER };
+ struct intel_context *ce = engine->kernel_context;
+ struct i915_request *rq;
+ int err = 0;
+
+ if (!intel_engine_has_preemption(engine))
+ return -ENODEV;
+
+ if (!intel_engine_pm_get_if_awake(engine))
+ return 0;
+
+ if (mutex_lock_interruptible(&ce->timeline->mutex))
+ goto out_rpm;
+
+ intel_context_enter(ce);
+ rq = __i915_request_create(ce, GFP_NOWAIT | __GFP_NOWARN);
+ intel_context_exit(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_unlock;
+ }
+
+ __set_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags);
+ idle_pulse(engine, rq);
+
+ __i915_request_commit(rq);
+ __i915_request_queue(rq, &attr);
+
+out_unlock:
+ mutex_unlock(&ce->timeline->mutex);
+out_rpm:
+ intel_engine_pm_put(engine);
+ return err;
+}
+
+int intel_engine_flush_barriers(struct intel_engine_cs *engine)
+{
+ struct i915_request *rq;
+ int err = 0;
+
+ if (llist_empty(&engine->barrier_tasks))
+ return 0;
+
+ if (!intel_engine_pm_get_if_awake(engine))
+ return 0;
+
+ rq = i915_request_create(engine->kernel_context);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_rpm;
+ }
+
+ idle_pulse(engine, rq);
+ i915_request_add(rq);
+
+out_rpm:
+ intel_engine_pm_put(engine);
+ return err;
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_engine_heartbeat.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
new file mode 100644
index 000000000000..a7b8c0f9e005
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
@@ -0,0 +1,23 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_ENGINE_HEARTBEAT_H
+#define INTEL_ENGINE_HEARTBEAT_H
+
+struct intel_engine_cs;
+
+void intel_engine_init_heartbeat(struct intel_engine_cs *engine);
+
+int intel_engine_set_heartbeat(struct intel_engine_cs *engine,
+ unsigned long delay);
+
+void intel_engine_park_heartbeat(struct intel_engine_cs *engine);
+void intel_engine_unpark_heartbeat(struct intel_engine_cs *engine);
+
+int intel_engine_pulse(struct intel_engine_cs *engine);
+int intel_engine_flush_barriers(struct intel_engine_cs *engine);
+
+#endif /* INTEL_ENGINE_HEARTBEAT_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index ae5b6baf6dff..ea90ab3e396e 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -6,19 +6,26 @@
#include "i915_drv.h"
+#include "intel_context.h"
#include "intel_engine.h"
+#include "intel_engine_heartbeat.h"
#include "intel_engine_pm.h"
+#include "intel_engine_pool.h"
+#include "intel_gt.h"
#include "intel_gt_pm.h"
+#include "intel_rc6.h"
+#include "intel_ring.h"
static int __engine_unpark(struct intel_wakeref *wf)
{
struct intel_engine_cs *engine =
container_of(wf, typeof(*engine), wakeref);
+ struct intel_context *ce;
void *map;
- GEM_TRACE("%s\n", engine->name);
+ ENGINE_TRACE(engine, "\n");
- intel_gt_pm_get(engine->i915);
+ intel_gt_pm_get(engine->gt);
/* Pin the default state for fast resets from atomic context. */
map = NULL;
@@ -28,45 +35,124 @@ static int __engine_unpark(struct intel_wakeref *wf)
if (!IS_ERR_OR_NULL(map))
engine->pinned_default_state = map;
+ /* Discard stale context state from across idling */
+ ce = engine->kernel_context;
+ if (ce) {
+ GEM_BUG_ON(test_bit(CONTEXT_VALID_BIT, &ce->flags));
+
+ /* First poison the image to verify we never fully trust it */
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && ce->state) {
+ struct drm_i915_gem_object *obj = ce->state->obj;
+ int type = i915_coherent_map_type(engine->i915);
+
+ map = i915_gem_object_pin_map(obj, type);
+ if (!IS_ERR(map)) {
+ memset(map, CONTEXT_REDZONE, obj->base.size);
+ i915_gem_object_flush_map(obj);
+ i915_gem_object_unpin_map(obj);
+ }
+ }
+
+ ce->ops->reset(ce);
+ }
+
if (engine->unpark)
engine->unpark(engine);
- intel_engine_init_hangcheck(engine);
+ intel_engine_unpark_heartbeat(engine);
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_LOCKDEP)
+
+static inline unsigned long __timeline_mark_lock(struct intel_context *ce)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ mutex_acquire(&ce->timeline->mutex.dep_map, 2, 0, _THIS_IP_);
+
+ return flags;
+}
+
+static inline void __timeline_mark_unlock(struct intel_context *ce,
+ unsigned long flags)
+{
+ mutex_release(&ce->timeline->mutex.dep_map, _THIS_IP_);
+ local_irq_restore(flags);
+}
+
+#else
+
+static inline unsigned long __timeline_mark_lock(struct intel_context *ce)
+{
return 0;
}
-void intel_engine_pm_get(struct intel_engine_cs *engine)
+static inline void __timeline_mark_unlock(struct intel_context *ce,
+ unsigned long flags)
{
- intel_wakeref_get(&engine->i915->runtime_pm, &engine->wakeref, __engine_unpark);
}
-void intel_engine_park(struct intel_engine_cs *engine)
+#endif /* !IS_ENABLED(CONFIG_LOCKDEP) */
+
+static void duration(struct dma_fence *fence, struct dma_fence_cb *cb)
{
+ struct i915_request *rq = to_request(fence);
+
+ ewma__engine_latency_add(&rq->engine->latency,
+ ktime_us_delta(rq->fence.timestamp,
+ rq->duration.emitted));
+}
+
+static void
+__queue_and_release_pm(struct i915_request *rq,
+ struct intel_timeline *tl,
+ struct intel_engine_cs *engine)
+{
+ struct intel_gt_timelines *timelines = &engine->gt->timelines;
+
+ ENGINE_TRACE(engine, "\n");
+
/*
- * We are committed now to parking this engine, make sure there
- * will be no more interrupts arriving later and the engine
- * is truly idle.
+ * We have to serialise all potential retirement paths with our
+ * submission, as we don't want to underflow either the
+ * engine->wakeref.counter or our timeline->active_count.
+ *
+ * Equally, we cannot allow a new submission to start until
+ * after we finish queueing, nor could we allow that submitter
+ * to retire us before we are ready!
*/
- if (wait_for(intel_engine_is_idle(engine), 10)) {
- struct drm_printer p = drm_debug_printer(__func__);
+ spin_lock(&timelines->lock);
- dev_err(engine->i915->drm.dev,
- "%s is not idle before parking\n",
- engine->name);
- intel_engine_dump(engine, &p, NULL);
- }
+ /* Let intel_gt_retire_requests() retire us (acquired under lock) */
+ if (!atomic_fetch_inc(&tl->active_count))
+ list_add_tail(&tl->link, &timelines->active_list);
+
+ /* Hand the request over to HW and so engine_retire() */
+ __i915_request_queue(rq, NULL);
+
+ /* Let new submissions commence (and maybe retire this timeline) */
+ __intel_wakeref_defer_park(&engine->wakeref);
+
+ spin_unlock(&timelines->lock);
}
static bool switch_to_kernel_context(struct intel_engine_cs *engine)
{
+ struct intel_context *ce = engine->kernel_context;
struct i915_request *rq;
+ unsigned long flags;
+ bool result = true;
- /* Already inside the kernel context, safe to power down. */
- if (engine->wakeref_serial == engine->serial)
+ /* GPU is pointing to the void, as good as in the kernel context. */
+ if (intel_gt_is_wedged(engine->gt))
return true;
- /* GPU is pointing to the void, as good as in the kernel context. */
- if (i915_reset_failed(engine->i915))
+ GEM_BUG_ON(!intel_context_is_barrier(ce));
+
+ /* Already inside the kernel context, safe to power down. */
+ if (engine->wakeref_serial == engine->serial)
return true;
/*
@@ -80,19 +166,70 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine)
* This should hold true as we can only park the engine after
* retiring the last request, thus all rings should be empty and
* all timelines idle.
+ *
+ * For unlocking, there are 2 other parties and the GPU who have a
+ * stake here.
+ *
+ * A new gpu user will be waiting on the engine-pm to start their
+ * engine_unpark. New waiters are predicated on engine->wakeref.count
+ * and so intel_wakeref_defer_park() acts like a mutex_unlock of the
+ * engine->wakeref.
+ *
+ * The other party is intel_gt_retire_requests(), which is walking the
+ * list of active timelines looking for completions. Meanwhile as soon
+ * as we call __i915_request_queue(), the GPU may complete our request.
+ * Ergo, if we put ourselves on the timelines.active_list
+ * (se intel_timeline_enter()) before we increment the
+ * engine->wakeref.count, we may see the request completion and retire
+ * it causing an undeflow of the engine->wakeref.
*/
- rq = __i915_request_create(engine->kernel_context, GFP_NOWAIT);
+ flags = __timeline_mark_lock(ce);
+ GEM_BUG_ON(atomic_read(&ce->timeline->active_count) < 0);
+
+ rq = __i915_request_create(ce, GFP_NOWAIT);
if (IS_ERR(rq))
/* Context switch failed, hope for the best! Maybe reset? */
- return true;
+ goto out_unlock;
/* Check again on the next retirement. */
engine->wakeref_serial = engine->serial + 1;
+ i915_request_add_active_barriers(rq);
- i915_request_add_barriers(rq);
- __i915_request_commit(rq);
+ /* Install ourselves as a preemption barrier */
+ rq->sched.attr.priority = I915_PRIORITY_BARRIER;
+ if (likely(!__i915_request_commit(rq))) { /* engine should be idle! */
+ /*
+ * Use an interrupt for precise measurement of duration,
+ * otherwise we rely on someone else retiring all the requests
+ * which may delay the signaling (i.e. we will likely wait
+ * until the background request retirement running every
+ * second or two).
+ */
+ BUILD_BUG_ON(sizeof(rq->duration) > sizeof(rq->submitq));
+ dma_fence_add_callback(&rq->fence, &rq->duration.cb, duration);
+ rq->duration.emitted = ktime_get();
+ }
+
+ /* Expose ourselves to the world */
+ __queue_and_release_pm(rq, ce->timeline, engine);
- return false;
+ result = false;
+out_unlock:
+ __timeline_mark_unlock(ce, flags);
+ return result;
+}
+
+static void call_idle_barriers(struct intel_engine_cs *engine)
+{
+ struct llist_node *node, *next;
+
+ llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) {
+ struct dma_fence_cb *cb =
+ container_of((struct list_head *)node,
+ typeof(*cb), node);
+
+ cb->func(ERR_PTR(-EAGAIN), cb);
+ }
}
static int __engine_park(struct intel_wakeref *wf)
@@ -112,9 +249,13 @@ static int __engine_park(struct intel_wakeref *wf)
if (!switch_to_kernel_context(engine))
return -EBUSY;
- GEM_TRACE("%s\n", engine->name);
+ ENGINE_TRACE(engine, "\n");
+
+ call_idle_barriers(engine); /* cleanup after wedging */
+ intel_engine_park_heartbeat(engine);
intel_engine_disarm_breadcrumbs(engine);
+ intel_engine_pool_park(&engine->pool);
/* Must be reset upon idling, or we may miss the busy wakeup. */
GEM_BUG_ON(engine->execlists.queue_priority_hint != INT_MIN);
@@ -129,16 +270,24 @@ static int __engine_park(struct intel_wakeref *wf)
engine->execlists.no_priolist = false;
- intel_gt_pm_put(engine->i915);
+ /* While gt calls i915_vma_parked(), we have to break the lock cycle */
+ intel_gt_pm_put_async(engine->gt);
return 0;
}
-void intel_engine_pm_put(struct intel_engine_cs *engine)
-{
- intel_wakeref_put(&engine->i915->runtime_pm, &engine->wakeref, __engine_park);
-}
+static const struct intel_wakeref_ops wf_ops = {
+ .get = __engine_unpark,
+ .put = __engine_park,
+};
void intel_engine_init__pm(struct intel_engine_cs *engine)
{
- intel_wakeref_init(&engine->wakeref);
+ struct intel_runtime_pm *rpm = engine->uncore->rpm;
+
+ intel_wakeref_init(&engine->wakeref, rpm, &wf_ops);
+ intel_engine_init_heartbeat(engine);
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_engine_pm.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.h b/drivers/gpu/drm/i915/gt/intel_engine_pm.h
index a11c893f64c6..e52c2b0cb245 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.h
@@ -7,21 +7,60 @@
#ifndef INTEL_ENGINE_PM_H
#define INTEL_ENGINE_PM_H
+#include "i915_request.h"
#include "intel_engine_types.h"
#include "intel_wakeref.h"
-struct drm_i915_private;
+static inline bool
+intel_engine_pm_is_awake(const struct intel_engine_cs *engine)
+{
+ return intel_wakeref_is_active(&engine->wakeref);
+}
-void intel_engine_pm_get(struct intel_engine_cs *engine);
-void intel_engine_pm_put(struct intel_engine_cs *engine);
+static inline void intel_engine_pm_get(struct intel_engine_cs *engine)
+{
+ intel_wakeref_get(&engine->wakeref);
+}
-static inline bool
-intel_engine_pm_get_if_awake(struct intel_engine_cs *engine)
+static inline bool intel_engine_pm_get_if_awake(struct intel_engine_cs *engine)
{
return intel_wakeref_get_if_active(&engine->wakeref);
}
-void intel_engine_park(struct intel_engine_cs *engine);
+static inline void intel_engine_pm_put(struct intel_engine_cs *engine)
+{
+ intel_wakeref_put(&engine->wakeref);
+}
+
+static inline void intel_engine_pm_put_async(struct intel_engine_cs *engine)
+{
+ intel_wakeref_put_async(&engine->wakeref);
+}
+
+static inline void intel_engine_pm_flush(struct intel_engine_cs *engine)
+{
+ intel_wakeref_unlock_wait(&engine->wakeref);
+}
+
+static inline struct i915_request *
+intel_engine_create_kernel_request(struct intel_engine_cs *engine)
+{
+ struct i915_request *rq;
+
+ /*
+ * The engine->kernel_context is special as it is used inside
+ * the engine-pm barrier (see __engine_park()), circumventing
+ * the usual mutexes and relying on the engine-pm barrier
+ * instead. So whenever we use the engine->kernel_context
+ * outside of the barrier, we must manually handle the
+ * engine wakeref to serialise with the use inside.
+ */
+ intel_engine_pm_get(engine);
+ rq = i915_request_create(engine->kernel_context);
+ intel_engine_pm_put(engine);
+
+ return rq;
+}
void intel_engine_init__pm(struct intel_engine_cs *engine);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pool.c b/drivers/gpu/drm/i915/gt/intel_engine_pool.c
new file mode 100644
index 000000000000..397186818305
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pool.c
@@ -0,0 +1,190 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2014-2018 Intel Corporation
+ */
+
+#include "gem/i915_gem_object.h"
+
+#include "i915_drv.h"
+#include "intel_engine_pm.h"
+#include "intel_engine_pool.h"
+
+static struct intel_engine_cs *to_engine(struct intel_engine_pool *pool)
+{
+ return container_of(pool, struct intel_engine_cs, pool);
+}
+
+static struct list_head *
+bucket_for_size(struct intel_engine_pool *pool, size_t sz)
+{
+ int n;
+
+ /*
+ * Compute a power-of-two bucket, but throw everything greater than
+ * 16KiB into the same bucket: i.e. the buckets hold objects of
+ * (1 page, 2 pages, 4 pages, 8+ pages).
+ */
+ n = fls(sz >> PAGE_SHIFT) - 1;
+ if (n >= ARRAY_SIZE(pool->cache_list))
+ n = ARRAY_SIZE(pool->cache_list) - 1;
+
+ return &pool->cache_list[n];
+}
+
+static void node_free(struct intel_engine_pool_node *node)
+{
+ i915_gem_object_put(node->obj);
+ i915_active_fini(&node->active);
+ kfree(node);
+}
+
+static int pool_active(struct i915_active *ref)
+{
+ struct intel_engine_pool_node *node =
+ container_of(ref, typeof(*node), active);
+ struct dma_resv *resv = node->obj->base.resv;
+ int err;
+
+ if (dma_resv_trylock(resv)) {
+ dma_resv_add_excl_fence(resv, NULL);
+ dma_resv_unlock(resv);
+ }
+
+ err = i915_gem_object_pin_pages(node->obj);
+ if (err)
+ return err;
+
+ /* Hide this pinned object from the shrinker until retired */
+ i915_gem_object_make_unshrinkable(node->obj);
+
+ return 0;
+}
+
+__i915_active_call
+static void pool_retire(struct i915_active *ref)
+{
+ struct intel_engine_pool_node *node =
+ container_of(ref, typeof(*node), active);
+ struct intel_engine_pool *pool = node->pool;
+ struct list_head *list = bucket_for_size(pool, node->obj->base.size);
+ unsigned long flags;
+
+ GEM_BUG_ON(!intel_engine_pm_is_awake(to_engine(pool)));
+
+ i915_gem_object_unpin_pages(node->obj);
+
+ /* Return this object to the shrinker pool */
+ i915_gem_object_make_purgeable(node->obj);
+
+ spin_lock_irqsave(&pool->lock, flags);
+ list_add(&node->link, list);
+ spin_unlock_irqrestore(&pool->lock, flags);
+}
+
+static struct intel_engine_pool_node *
+node_create(struct intel_engine_pool *pool, size_t sz)
+{
+ struct intel_engine_cs *engine = to_engine(pool);
+ struct intel_engine_pool_node *node;
+ struct drm_i915_gem_object *obj;
+
+ node = kmalloc(sizeof(*node),
+ GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
+ if (!node)
+ return ERR_PTR(-ENOMEM);
+
+ node->pool = pool;
+ i915_active_init(&node->active, pool_active, pool_retire);
+
+ obj = i915_gem_object_create_internal(engine->i915, sz);
+ if (IS_ERR(obj)) {
+ i915_active_fini(&node->active);
+ kfree(node);
+ return ERR_CAST(obj);
+ }
+
+ i915_gem_object_set_readonly(obj);
+
+ node->obj = obj;
+ return node;
+}
+
+static struct intel_engine_pool *lookup_pool(struct intel_engine_cs *engine)
+{
+ if (intel_engine_is_virtual(engine))
+ engine = intel_virtual_engine_get_sibling(engine, 0);
+
+ GEM_BUG_ON(!engine);
+ return &engine->pool;
+}
+
+struct intel_engine_pool_node *
+intel_engine_get_pool(struct intel_engine_cs *engine, size_t size)
+{
+ struct intel_engine_pool *pool = lookup_pool(engine);
+ struct intel_engine_pool_node *node;
+ struct list_head *list;
+ unsigned long flags;
+ int ret;
+
+ GEM_BUG_ON(!intel_engine_pm_is_awake(to_engine(pool)));
+
+ size = PAGE_ALIGN(size);
+ list = bucket_for_size(pool, size);
+
+ spin_lock_irqsave(&pool->lock, flags);
+ list_for_each_entry(node, list, link) {
+ if (node->obj->base.size < size)
+ continue;
+ list_del(&node->link);
+ break;
+ }
+ spin_unlock_irqrestore(&pool->lock, flags);
+
+ if (&node->link == list) {
+ node = node_create(pool, size);
+ if (IS_ERR(node))
+ return node;
+ }
+
+ ret = i915_active_acquire(&node->active);
+ if (ret) {
+ node_free(node);
+ return ERR_PTR(ret);
+ }
+
+ return node;
+}
+
+void intel_engine_pool_init(struct intel_engine_pool *pool)
+{
+ int n;
+
+ spin_lock_init(&pool->lock);
+ for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
+ INIT_LIST_HEAD(&pool->cache_list[n]);
+}
+
+void intel_engine_pool_park(struct intel_engine_pool *pool)
+{
+ int n;
+
+ for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
+ struct list_head *list = &pool->cache_list[n];
+ struct intel_engine_pool_node *node, *nn;
+
+ list_for_each_entry_safe(node, nn, list, link)
+ node_free(node);
+
+ INIT_LIST_HEAD(list);
+ }
+}
+
+void intel_engine_pool_fini(struct intel_engine_pool *pool)
+{
+ int n;
+
+ for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
+ GEM_BUG_ON(!list_empty(&pool->cache_list[n]));
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pool.h b/drivers/gpu/drm/i915/gt/intel_engine_pool.h
new file mode 100644
index 000000000000..1bd89cadc3b7
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pool.h
@@ -0,0 +1,34 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2014-2018 Intel Corporation
+ */
+
+#ifndef INTEL_ENGINE_POOL_H
+#define INTEL_ENGINE_POOL_H
+
+#include "intel_engine_pool_types.h"
+#include "i915_active.h"
+#include "i915_request.h"
+
+struct intel_engine_pool_node *
+intel_engine_get_pool(struct intel_engine_cs *engine, size_t size);
+
+static inline int
+intel_engine_pool_mark_active(struct intel_engine_pool_node *node,
+ struct i915_request *rq)
+{
+ return i915_active_add_request(&node->active, rq);
+}
+
+static inline void
+intel_engine_pool_put(struct intel_engine_pool_node *node)
+{
+ i915_active_release(&node->active);
+}
+
+void intel_engine_pool_init(struct intel_engine_pool *pool);
+void intel_engine_pool_park(struct intel_engine_pool *pool);
+void intel_engine_pool_fini(struct intel_engine_pool *pool);
+
+#endif /* INTEL_ENGINE_POOL_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pool_types.h b/drivers/gpu/drm/i915/gt/intel_engine_pool_types.h
new file mode 100644
index 000000000000..e31ee361b76f
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pool_types.h
@@ -0,0 +1,29 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2014-2018 Intel Corporation
+ */
+
+#ifndef INTEL_ENGINE_POOL_TYPES_H
+#define INTEL_ENGINE_POOL_TYPES_H
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+
+#include "i915_active_types.h"
+
+struct drm_i915_gem_object;
+
+struct intel_engine_pool {
+ spinlock_t lock;
+ struct list_head cache_list[4];
+};
+
+struct intel_engine_pool_node {
+ struct i915_active active;
+ struct drm_i915_gem_object *obj;
+ struct list_head link;
+ struct intel_engine_pool *pool;
+};
+
+#endif /* INTEL_ENGINE_POOL_TYPES_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 43e975a26016..92be41a6903c 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -7,23 +7,47 @@
#ifndef __INTEL_ENGINE_TYPES__
#define __INTEL_ENGINE_TYPES__
+#include <linux/average.h>
#include <linux/hashtable.h>
#include <linux/irq_work.h>
#include <linux/kref.h>
#include <linux/list.h>
#include <linux/llist.h>
+#include <linux/rbtree.h>
+#include <linux/timer.h>
#include <linux/types.h>
+#include <linux/workqueue.h>
#include "i915_gem.h"
-#include "i915_gem_batch_pool.h"
#include "i915_pmu.h"
#include "i915_priolist_types.h"
#include "i915_selftest.h"
-#include "i915_timeline_types.h"
+#include "intel_engine_pool_types.h"
#include "intel_sseu.h"
+#include "intel_timeline_types.h"
#include "intel_wakeref.h"
#include "intel_workarounds_types.h"
+/* Legacy HW Engine ID */
+
+#define RCS0_HW 0
+#define VCS0_HW 1
+#define BCS0_HW 2
+#define VECS0_HW 3
+#define VCS1_HW 4
+#define VCS2_HW 6
+#define VCS3_HW 7
+#define VECS1_HW 12
+
+/* Gen11+ HW Engine class + instance */
+#define RENDER_CLASS 0
+#define VIDEO_DECODE_CLASS 1
+#define VIDEO_ENHANCEMENT_CLASS 2
+#define COPY_ENGINE_CLASS 3
+#define OTHER_CLASS 4
+#define MAX_ENGINE_CLASS 4
+#define MAX_ENGINE_INSTANCE 3
+
#define I915_MAX_SLICES 3
#define I915_MAX_SUBSLICES 8
@@ -35,6 +59,8 @@ struct drm_i915_reg_table;
struct i915_gem_context;
struct i915_request;
struct i915_sched_attr;
+struct intel_gt;
+struct intel_ring;
struct intel_uncore;
typedef u8 intel_engine_mask_t;
@@ -53,44 +79,6 @@ struct intel_instdone {
u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES];
};
-struct intel_engine_hangcheck {
- u64 acthd;
- u32 last_ring;
- u32 last_head;
- unsigned long action_timestamp;
- struct intel_instdone instdone;
-};
-
-struct intel_ring {
- struct kref ref;
- struct i915_vma *vma;
- void *vaddr;
-
- struct i915_timeline *timeline;
- struct list_head request_list;
- struct list_head active_link;
-
- /*
- * As we have two types of rings, one global to the engine used
- * by ringbuffer submission and those that are exclusive to a
- * context used by execlists, we have to play safe and allow
- * atomic updates to the pin_count. However, the actual pinning
- * of the context is either done during initialisation for
- * ringbuffer submission or serialised as part of the context
- * pinning for execlists, and so we do not need a mutex ourselves
- * to serialise intel_ring_pin/intel_ring_unpin.
- */
- atomic_t pin_count;
-
- u32 head;
- u32 tail;
- u32 emit;
-
- u32 space;
- u32 size;
- u32 effective_size;
-};
-
/*
* we use a single page to load ctx workarounds so all of these
* values are referred in terms of dwords
@@ -129,8 +117,12 @@ enum intel_engine_id {
VECS1,
#define _VECS(n) (VECS0 + (n))
I915_NUM_ENGINES
+#define INVALID_ENGINE ((enum intel_engine_id)-1)
};
+/* A simple estimator for the round-trip latency of an engine */
+DECLARE_EWMA(_engine_latency, 6, 4)
+
struct st_preempt_hang {
struct completion completion;
unsigned int count;
@@ -150,6 +142,16 @@ struct intel_engine_execlists {
struct tasklet_struct tasklet;
/**
+ * @timer: kick the current context if its timeslice expires
+ */
+ struct timer_list timer;
+
+ /**
+ * @preempt: reset the current context if it fails to give way
+ */
+ struct timer_list preempt;
+
+ /**
* @default_priolist: priority list for I915_PRIORITY_NORMAL
*/
struct i915_priolist default_priolist;
@@ -172,51 +174,28 @@ struct intel_engine_execlists {
*/
u32 __iomem *ctrl_reg;
+#define EXECLIST_MAX_PORTS 2
/**
- * @port: execlist port states
+ * @active: the currently known context executing on HW
+ */
+ struct i915_request * const *active;
+ /**
+ * @inflight: the set of contexts submitted and acknowleged by HW
*
- * For each hardware ELSP (ExecList Submission Port) we keep
- * track of the last request and the number of times we submitted
- * that port to hw. We then count the number of times the hw reports
- * a context completion or preemption. As only one context can
- * be active on hw, we limit resubmission of context to port[0]. This
- * is called Lite Restore, of the context.
+ * The set of inflight contexts is managed by reading CS events
+ * from the HW. On a context-switch event (not preemption), we
+ * know the HW has transitioned from port0 to port1, and we
+ * advance our inflight/active tracking accordingly.
*/
- struct execlist_port {
- /**
- * @request_count: combined request and submission count
- */
- struct i915_request *request_count;
-#define EXECLIST_COUNT_BITS 2
-#define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS)
-#define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS)
-#define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS)
-#define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS)
-#define port_set(p, packed) ((p)->request_count = (packed))
-#define port_isset(p) ((p)->request_count)
-#define port_index(p, execlists) ((p) - (execlists)->port)
-
- /**
- * @context_id: context ID for port
- */
- GEM_DEBUG_DECL(u32 context_id);
-
-#define EXECLIST_MAX_PORTS 2
- } port[EXECLIST_MAX_PORTS];
-
+ struct i915_request *inflight[EXECLIST_MAX_PORTS + 1 /* sentinel */];
/**
- * @active: is the HW active? We consider the HW as active after
- * submitting any context for execution and until we have seen the
- * last context completion event. After that, we do not expect any
- * more events until we submit, and so can park the HW.
+ * @pending: the next set of contexts submitted to ELSP
*
- * As we have a small number of different sources from which we feed
- * the HW, we track the state of each inside a single bitfield.
+ * We store the array of contexts that we submit to HW (via ELSP) and
+ * promote them to the inflight array once HW has signaled the
+ * preemption or idle-to-active event.
*/
- unsigned int active;
-#define EXECLISTS_ACTIVE_USER 0
-#define EXECLISTS_ACTIVE_PREEMPT 1
-#define EXECLISTS_ACTIVE_HWACK 2
+ struct i915_request *pending[EXECLIST_MAX_PORTS + 1];
/**
* @port_mask: number of execlist ports - 1
@@ -224,6 +203,16 @@ struct intel_engine_execlists {
unsigned int port_mask;
/**
+ * @switch_priority_hint: Second context priority.
+ *
+ * We submit multiple contexts to the HW simultaneously and would
+ * like to occasionally switch between them to emulate timeslicing.
+ * To know when timeslicing is suitable, we track the priority of
+ * the context submitted second.
+ */
+ int switch_priority_hint;
+
+ /**
* @queue_priority_hint: Highest pending priority.
*
* When we add requests into the queue, or adjust the priority of
@@ -258,11 +247,6 @@ struct intel_engine_execlists {
u32 *csb_status;
/**
- * @preempt_complete_status: expected CSB upon completing preemption
- */
- u32 preempt_complete_status;
-
- /**
* @csb_size: context status buffer FIFO size
*/
u8 csb_size;
@@ -279,39 +263,52 @@ struct intel_engine_execlists {
struct intel_engine_cs {
struct drm_i915_private *i915;
+ struct intel_gt *gt;
struct intel_uncore *uncore;
char name[INTEL_ENGINE_CS_MAX_NAME];
enum intel_engine_id id;
+ enum intel_engine_id legacy_idx;
+
unsigned int hw_id;
unsigned int guc_id;
- intel_engine_mask_t mask;
- u8 uabi_class;
+ intel_engine_mask_t mask;
u8 class;
u8 instance;
+
+ u16 uabi_class;
+ u16 uabi_instance;
+
+ u32 uabi_capabilities;
u32 context_size;
u32 mmio_base;
- u32 uabi_capabilities;
+ unsigned int context_tag;
+#define NUM_CONTEXT_TAG roundup_pow_of_two(2 * EXECLIST_MAX_PORTS)
- struct intel_sseu sseu;
+ struct rb_node uabi_node;
- struct intel_ring *buffer;
+ struct intel_sseu sseu;
struct {
spinlock_t lock;
struct list_head requests;
+ struct list_head hold; /* ready requests, but on hold */
} active;
struct llist_head barrier_tasks;
struct intel_context *kernel_context; /* pinned */
- struct intel_context *preempt_context; /* pinned; optional */
intel_engine_mask_t saturated; /* submitting semaphores too late? */
+ struct {
+ struct delayed_work work;
+ struct i915_request *systole;
+ } heartbeat;
+
unsigned long serial;
unsigned long wakeref_serial;
@@ -319,6 +316,18 @@ struct intel_engine_cs {
struct drm_i915_gem_object *default_state;
void *pinned_default_state;
+ struct {
+ struct intel_ring *ring;
+ struct intel_timeline *timeline;
+ } legacy;
+
+ /*
+ * We track the average duration of the idle pulse on parking the
+ * engine to keep an estimate of the how the fast the engine is
+ * under ideal conditions.
+ */
+ struct ewma__engine_latency latency;
+
/* Rather than have every client wait upon all user interrupts,
* with the herd waking after every interrupt and each doing the
* heavyweight seqno dance, we delegate the task (of being the
@@ -375,7 +384,7 @@ struct intel_engine_cs {
* when the command parser is enabled. Prevents the client from
* modifying the batch contents after software parsing.
*/
- struct i915_gem_batch_pool batch_pool;
+ struct intel_engine_pool pool;
struct intel_hw_status_page status_page;
struct i915_ctx_workarounds wa_ctx;
@@ -392,7 +401,10 @@ struct intel_engine_cs {
struct {
void (*prepare)(struct intel_engine_cs *engine);
- void (*reset)(struct intel_engine_cs *engine, bool stalled);
+
+ void (*rewind)(struct intel_engine_cs *engine, bool stalled);
+ void (*cancel)(struct intel_engine_cs *engine);
+
void (*finish)(struct intel_engine_cs *engine);
} reset;
@@ -404,7 +416,6 @@ struct intel_engine_cs {
const struct intel_context_ops *cops;
int (*request_alloc)(struct i915_request *rq);
- int (*init_context)(struct i915_request *rq);
int (*emit_flush)(struct i915_request *request, u32 mode);
#define EMIT_INVALIDATE BIT(0)
@@ -443,29 +454,29 @@ struct intel_engine_cs {
void (*schedule)(struct i915_request *request,
const struct i915_sched_attr *attr);
- /*
- * Cancel all requests on the hardware, or queued for execution.
- * This should only cancel the ready requests that have been
- * submitted to the engine (via the engine->submit_request callback).
- * This is called when marking the device as wedged.
- */
- void (*cancel_requests)(struct intel_engine_cs *engine);
-
- void (*destroy)(struct intel_engine_cs *engine);
+ void (*release)(struct intel_engine_cs *engine);
struct intel_engine_execlists execlists;
+ /*
+ * Keep track of completed timelines on this engine for early
+ * retirement with the goal of quickly enabling powersaving as
+ * soon as the engine is idle.
+ */
+ struct intel_timeline *retire;
+ struct work_struct retire_work;
+
/* status_notifier: list of callbacks for context-switch changes */
struct atomic_notifier_head context_status_notifier;
- struct intel_engine_hangcheck hangcheck;
-
-#define I915_ENGINE_NEEDS_CMD_PARSER BIT(0)
+#define I915_ENGINE_USING_CMD_PARSER BIT(0)
#define I915_ENGINE_SUPPORTS_STATS BIT(1)
#define I915_ENGINE_HAS_PREEMPTION BIT(2)
#define I915_ENGINE_HAS_SEMAPHORES BIT(3)
#define I915_ENGINE_NEEDS_BREADCRUMB_TASKLET BIT(4)
#define I915_ENGINE_IS_VIRTUAL BIT(5)
+#define I915_ENGINE_HAS_RELATIVE_MMIO BIT(6)
+#define I915_ENGINE_REQUIRES_CMD_PARSER BIT(7)
unsigned int flags;
/*
@@ -523,12 +534,25 @@ struct intel_engine_cs {
*/
ktime_t total;
} stats;
+
+ struct {
+ unsigned long heartbeat_interval_ms;
+ unsigned long preempt_timeout_ms;
+ unsigned long stop_timeout_ms;
+ unsigned long timeslice_duration_ms;
+ } props;
};
static inline bool
-intel_engine_needs_cmd_parser(const struct intel_engine_cs *engine)
+intel_engine_using_cmd_parser(const struct intel_engine_cs *engine)
+{
+ return engine->flags & I915_ENGINE_USING_CMD_PARSER;
+}
+
+static inline bool
+intel_engine_requires_cmd_parser(const struct intel_engine_cs *engine)
{
- return engine->flags & I915_ENGINE_NEEDS_CMD_PARSER;
+ return engine->flags & I915_ENGINE_REQUIRES_CMD_PARSER;
}
static inline bool
@@ -561,20 +585,24 @@ intel_engine_is_virtual(const struct intel_engine_cs *engine)
return engine->flags & I915_ENGINE_IS_VIRTUAL;
}
-#define instdone_slice_mask(dev_priv__) \
- (IS_GEN(dev_priv__, 7) ? \
- 1 : RUNTIME_INFO(dev_priv__)->sseu.slice_mask)
+static inline bool
+intel_engine_has_relative_mmio(const struct intel_engine_cs * const engine)
+{
+ return engine->flags & I915_ENGINE_HAS_RELATIVE_MMIO;
+}
-#define instdone_subslice_mask(dev_priv__) \
- (IS_GEN(dev_priv__, 7) ? \
- 1 : RUNTIME_INFO(dev_priv__)->sseu.subslice_mask[0])
+#define instdone_has_slice(dev_priv___, sseu___, slice___) \
+ ((IS_GEN(dev_priv___, 7) ? 1 : ((sseu___)->slice_mask)) & BIT(slice___))
-#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
- for ((slice__) = 0, (subslice__) = 0; \
- (slice__) < I915_MAX_SLICES; \
- (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \
- (slice__) += ((subslice__) == 0)) \
- for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \
- (BIT(subslice__) & instdone_subslice_mask(dev_priv__)))
+#define instdone_has_subslice(dev_priv__, sseu__, slice__, subslice__) \
+ (IS_GEN(dev_priv__, 7) ? (1 & BIT(subslice__)) : \
+ intel_sseu_has_subslice(sseu__, 0, subslice__))
+#define for_each_instdone_slice_subslice(dev_priv_, sseu_, slice_, subslice_) \
+ for ((slice_) = 0, (subslice_) = 0; (slice_) < I915_MAX_SLICES; \
+ (subslice_) = ((subslice_) + 1) % I915_MAX_SUBSLICES, \
+ (slice_) += ((subslice_) == 0)) \
+ for_each_if((instdone_has_slice(dev_priv_, sseu_, slice_)) && \
+ (instdone_has_subslice(dev_priv_, sseu_, slice_, \
+ subslice_)))
#endif /* __INTEL_ENGINE_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_user.c b/drivers/gpu/drm/i915/gt/intel_engine_user.c
new file mode 100644
index 000000000000..9e7f12bef828
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_engine_user.c
@@ -0,0 +1,299 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/list.h>
+#include <linux/list_sort.h>
+#include <linux/llist.h>
+
+#include "i915_drv.h"
+#include "intel_engine.h"
+#include "intel_engine_user.h"
+#include "intel_gt.h"
+
+struct intel_engine_cs *
+intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance)
+{
+ struct rb_node *p = i915->uabi_engines.rb_node;
+
+ while (p) {
+ struct intel_engine_cs *it =
+ rb_entry(p, typeof(*it), uabi_node);
+
+ if (class < it->uabi_class)
+ p = p->rb_left;
+ else if (class > it->uabi_class ||
+ instance > it->uabi_instance)
+ p = p->rb_right;
+ else if (instance < it->uabi_instance)
+ p = p->rb_left;
+ else
+ return it;
+ }
+
+ return NULL;
+}
+
+void intel_engine_add_user(struct intel_engine_cs *engine)
+{
+ llist_add((struct llist_node *)&engine->uabi_node,
+ (struct llist_head *)&engine->i915->uabi_engines);
+}
+
+static const u8 uabi_classes[] = {
+ [RENDER_CLASS] = I915_ENGINE_CLASS_RENDER,
+ [COPY_ENGINE_CLASS] = I915_ENGINE_CLASS_COPY,
+ [VIDEO_DECODE_CLASS] = I915_ENGINE_CLASS_VIDEO,
+ [VIDEO_ENHANCEMENT_CLASS] = I915_ENGINE_CLASS_VIDEO_ENHANCE,
+};
+
+static int engine_cmp(void *priv, struct list_head *A, struct list_head *B)
+{
+ const struct intel_engine_cs *a =
+ container_of((struct rb_node *)A, typeof(*a), uabi_node);
+ const struct intel_engine_cs *b =
+ container_of((struct rb_node *)B, typeof(*b), uabi_node);
+
+ if (uabi_classes[a->class] < uabi_classes[b->class])
+ return -1;
+ if (uabi_classes[a->class] > uabi_classes[b->class])
+ return 1;
+
+ if (a->instance < b->instance)
+ return -1;
+ if (a->instance > b->instance)
+ return 1;
+
+ return 0;
+}
+
+static struct llist_node *get_engines(struct drm_i915_private *i915)
+{
+ return llist_del_all((struct llist_head *)&i915->uabi_engines);
+}
+
+static void sort_engines(struct drm_i915_private *i915,
+ struct list_head *engines)
+{
+ struct llist_node *pos, *next;
+
+ llist_for_each_safe(pos, next, get_engines(i915)) {
+ struct intel_engine_cs *engine =
+ container_of((struct rb_node *)pos, typeof(*engine),
+ uabi_node);
+ list_add((struct list_head *)&engine->uabi_node, engines);
+ }
+ list_sort(NULL, engines, engine_cmp);
+}
+
+static void set_scheduler_caps(struct drm_i915_private *i915)
+{
+ static const struct {
+ u8 engine;
+ u8 sched;
+ } map[] = {
+#define MAP(x, y) { ilog2(I915_ENGINE_##x), ilog2(I915_SCHEDULER_CAP_##y) }
+ MAP(HAS_PREEMPTION, PREEMPTION),
+ MAP(HAS_SEMAPHORES, SEMAPHORES),
+ MAP(SUPPORTS_STATS, ENGINE_BUSY_STATS),
+#undef MAP
+ };
+ struct intel_engine_cs *engine;
+ u32 enabled, disabled;
+
+ enabled = 0;
+ disabled = 0;
+ for_each_uabi_engine(engine, i915) { /* all engines must agree! */
+ int i;
+
+ if (engine->schedule)
+ enabled |= (I915_SCHEDULER_CAP_ENABLED |
+ I915_SCHEDULER_CAP_PRIORITY);
+ else
+ disabled |= (I915_SCHEDULER_CAP_ENABLED |
+ I915_SCHEDULER_CAP_PRIORITY);
+
+ for (i = 0; i < ARRAY_SIZE(map); i++) {
+ if (engine->flags & BIT(map[i].engine))
+ enabled |= BIT(map[i].sched);
+ else
+ disabled |= BIT(map[i].sched);
+ }
+ }
+
+ i915->caps.scheduler = enabled & ~disabled;
+ if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_ENABLED))
+ i915->caps.scheduler = 0;
+}
+
+const char *intel_engine_class_repr(u8 class)
+{
+ static const char * const uabi_names[] = {
+ [RENDER_CLASS] = "rcs",
+ [COPY_ENGINE_CLASS] = "bcs",
+ [VIDEO_DECODE_CLASS] = "vcs",
+ [VIDEO_ENHANCEMENT_CLASS] = "vecs",
+ };
+
+ if (class >= ARRAY_SIZE(uabi_names) || !uabi_names[class])
+ return "xxx";
+
+ return uabi_names[class];
+}
+
+struct legacy_ring {
+ struct intel_gt *gt;
+ u8 class;
+ u8 instance;
+};
+
+static int legacy_ring_idx(const struct legacy_ring *ring)
+{
+ static const struct {
+ u8 base, max;
+ } map[] = {
+ [RENDER_CLASS] = { RCS0, 1 },
+ [COPY_ENGINE_CLASS] = { BCS0, 1 },
+ [VIDEO_DECODE_CLASS] = { VCS0, I915_MAX_VCS },
+ [VIDEO_ENHANCEMENT_CLASS] = { VECS0, I915_MAX_VECS },
+ };
+
+ if (GEM_DEBUG_WARN_ON(ring->class >= ARRAY_SIZE(map)))
+ return INVALID_ENGINE;
+
+ if (GEM_DEBUG_WARN_ON(ring->instance >= map[ring->class].max))
+ return INVALID_ENGINE;
+
+ return map[ring->class].base + ring->instance;
+}
+
+static void add_legacy_ring(struct legacy_ring *ring,
+ struct intel_engine_cs *engine)
+{
+ if (engine->gt != ring->gt || engine->class != ring->class) {
+ ring->gt = engine->gt;
+ ring->class = engine->class;
+ ring->instance = 0;
+ }
+
+ engine->legacy_idx = legacy_ring_idx(ring);
+ if (engine->legacy_idx != INVALID_ENGINE)
+ ring->instance++;
+}
+
+void intel_engines_driver_register(struct drm_i915_private *i915)
+{
+ struct legacy_ring ring = {};
+ u8 uabi_instances[4] = {};
+ struct list_head *it, *next;
+ struct rb_node **p, *prev;
+ LIST_HEAD(engines);
+
+ sort_engines(i915, &engines);
+
+ prev = NULL;
+ p = &i915->uabi_engines.rb_node;
+ list_for_each_safe(it, next, &engines) {
+ struct intel_engine_cs *engine =
+ container_of((struct rb_node *)it, typeof(*engine),
+ uabi_node);
+ char old[sizeof(engine->name)];
+
+ if (intel_gt_has_init_error(engine->gt))
+ continue; /* ignore incomplete engines */
+
+ GEM_BUG_ON(engine->class >= ARRAY_SIZE(uabi_classes));
+ engine->uabi_class = uabi_classes[engine->class];
+
+ GEM_BUG_ON(engine->uabi_class >= ARRAY_SIZE(uabi_instances));
+ engine->uabi_instance = uabi_instances[engine->uabi_class]++;
+
+ /* Replace the internal name with the final user facing name */
+ memcpy(old, engine->name, sizeof(engine->name));
+ scnprintf(engine->name, sizeof(engine->name), "%s%u",
+ intel_engine_class_repr(engine->class),
+ engine->uabi_instance);
+ DRM_DEBUG_DRIVER("renamed %s to %s\n", old, engine->name);
+
+ rb_link_node(&engine->uabi_node, prev, p);
+ rb_insert_color(&engine->uabi_node, &i915->uabi_engines);
+
+ GEM_BUG_ON(intel_engine_lookup_user(i915,
+ engine->uabi_class,
+ engine->uabi_instance) != engine);
+
+ /* Fix up the mapping to match default execbuf::user_map[] */
+ add_legacy_ring(&ring, engine);
+
+ prev = &engine->uabi_node;
+ p = &prev->rb_right;
+ }
+
+ if (IS_ENABLED(CONFIG_DRM_I915_SELFTESTS) &&
+ IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
+ struct intel_engine_cs *engine;
+ unsigned int isolation;
+ int class, inst;
+ int errors = 0;
+
+ for (class = 0; class < ARRAY_SIZE(uabi_instances); class++) {
+ for (inst = 0; inst < uabi_instances[class]; inst++) {
+ engine = intel_engine_lookup_user(i915,
+ class, inst);
+ if (!engine) {
+ pr_err("UABI engine not found for { class:%d, instance:%d }\n",
+ class, inst);
+ errors++;
+ continue;
+ }
+
+ if (engine->uabi_class != class ||
+ engine->uabi_instance != inst) {
+ pr_err("Wrong UABI engine:%s { class:%d, instance:%d } found for { class:%d, instance:%d }\n",
+ engine->name,
+ engine->uabi_class,
+ engine->uabi_instance,
+ class, inst);
+ errors++;
+ continue;
+ }
+ }
+ }
+
+ /*
+ * Make sure that classes with multiple engine instances all
+ * share the same basic configuration.
+ */
+ isolation = intel_engines_has_context_isolation(i915);
+ for_each_uabi_engine(engine, i915) {
+ unsigned int bit = BIT(engine->uabi_class);
+ unsigned int expected = engine->default_state ? bit : 0;
+
+ if ((isolation & bit) != expected) {
+ pr_err("mismatching default context state for class %d on engine %s\n",
+ engine->uabi_class, engine->name);
+ errors++;
+ }
+ }
+
+ if (WARN(errors, "Invalid UABI engine mapping found"))
+ i915->uabi_engines = RB_ROOT;
+ }
+
+ set_scheduler_caps(i915);
+}
+
+unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ unsigned int which;
+
+ which = 0;
+ for_each_uabi_engine(engine, i915)
+ if (engine->default_state)
+ which |= BIT(engine->uabi_class);
+
+ return which;
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_user.h b/drivers/gpu/drm/i915/gt/intel_engine_user.h
new file mode 100644
index 000000000000..f845ea1cbfaa
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_engine_user.h
@@ -0,0 +1,25 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_ENGINE_USER_H
+#define INTEL_ENGINE_USER_H
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+struct intel_engine_cs;
+
+struct intel_engine_cs *
+intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance);
+
+unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915);
+
+void intel_engine_add_user(struct intel_engine_cs *engine);
+void intel_engines_driver_register(struct drm_i915_private *i915);
+
+const char *intel_engine_class_repr(u8 class);
+
+#endif /* INTEL_ENGINE_USER_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
new file mode 100644
index 000000000000..531d501be01f
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -0,0 +1,1486 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/stop_machine.h>
+
+#include <asm/set_memory.h>
+#include <asm/smp.h>
+
+#include "intel_gt.h"
+#include "i915_drv.h"
+#include "i915_scatterlist.h"
+#include "i915_vgpu.h"
+
+#include "intel_gtt.h"
+
+static int
+i915_get_ggtt_vma_pages(struct i915_vma *vma);
+
+static void i915_ggtt_color_adjust(const struct drm_mm_node *node,
+ unsigned long color,
+ u64 *start,
+ u64 *end)
+{
+ if (i915_node_color_differs(node, color))
+ *start += I915_GTT_PAGE_SIZE;
+
+ /*
+ * Also leave a space between the unallocated reserved node after the
+ * GTT and any objects within the GTT, i.e. we use the color adjustment
+ * to insert a guard page to prevent prefetches crossing over the
+ * GTT boundary.
+ */
+ node = list_next_entry(node, node_list);
+ if (node->color != color)
+ *end -= I915_GTT_PAGE_SIZE;
+}
+
+static int ggtt_init_hw(struct i915_ggtt *ggtt)
+{
+ struct drm_i915_private *i915 = ggtt->vm.i915;
+
+ i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
+
+ ggtt->vm.is_ggtt = true;
+
+ /* Only VLV supports read-only GGTT mappings */
+ ggtt->vm.has_read_only = IS_VALLEYVIEW(i915);
+
+ if (!HAS_LLC(i915) && !HAS_PPGTT(i915))
+ ggtt->vm.mm.color_adjust = i915_ggtt_color_adjust;
+
+ if (ggtt->mappable_end) {
+ if (!io_mapping_init_wc(&ggtt->iomap,
+ ggtt->gmadr.start,
+ ggtt->mappable_end)) {
+ ggtt->vm.cleanup(&ggtt->vm);
+ return -EIO;
+ }
+
+ ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start,
+ ggtt->mappable_end);
+ }
+
+ i915_ggtt_init_fences(ggtt);
+
+ return 0;
+}
+
+/**
+ * i915_ggtt_init_hw - Initialize GGTT hardware
+ * @i915: i915 device
+ */
+int i915_ggtt_init_hw(struct drm_i915_private *i915)
+{
+ int ret;
+
+ stash_init(&i915->mm.wc_stash);
+
+ /*
+ * Note that we use page colouring to enforce a guard page at the
+ * end of the address space. This is required as the CS may prefetch
+ * beyond the end of the batch buffer, across the page boundary,
+ * and beyond the end of the GTT if we do not provide a guard.
+ */
+ ret = ggtt_init_hw(&i915->ggtt);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/*
+ * Certain Gen5 chipsets require require idling the GPU before
+ * unmapping anything from the GTT when VT-d is enabled.
+ */
+static bool needs_idle_maps(struct drm_i915_private *i915)
+{
+ /*
+ * Query intel_iommu to see if we need the workaround. Presumably that
+ * was loaded first.
+ */
+ return IS_GEN(i915, 5) && IS_MOBILE(i915) && intel_vtd_active();
+}
+
+static void ggtt_suspend_mappings(struct i915_ggtt *ggtt)
+{
+ struct drm_i915_private *i915 = ggtt->vm.i915;
+
+ /*
+ * Don't bother messing with faults pre GEN6 as we have little
+ * documentation supporting that it's a good idea.
+ */
+ if (INTEL_GEN(i915) < 6)
+ return;
+
+ intel_gt_check_and_clear_faults(ggtt->vm.gt);
+
+ ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
+
+ ggtt->invalidate(ggtt);
+}
+
+void i915_gem_suspend_gtt_mappings(struct drm_i915_private *i915)
+{
+ ggtt_suspend_mappings(&i915->ggtt);
+}
+
+void gen6_ggtt_invalidate(struct i915_ggtt *ggtt)
+{
+ struct intel_uncore *uncore = ggtt->vm.gt->uncore;
+
+ spin_lock_irq(&uncore->lock);
+ intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
+ intel_uncore_read_fw(uncore, GFX_FLSH_CNTL_GEN6);
+ spin_unlock_irq(&uncore->lock);
+}
+
+static void gen8_ggtt_invalidate(struct i915_ggtt *ggtt)
+{
+ struct intel_uncore *uncore = ggtt->vm.gt->uncore;
+
+ /*
+ * Note that as an uncached mmio write, this will flush the
+ * WCB of the writes into the GGTT before it triggers the invalidate.
+ */
+ intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
+}
+
+static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
+{
+ struct intel_uncore *uncore = ggtt->vm.gt->uncore;
+ struct drm_i915_private *i915 = ggtt->vm.i915;
+
+ gen8_ggtt_invalidate(ggtt);
+
+ if (INTEL_GEN(i915) >= 12)
+ intel_uncore_write_fw(uncore, GEN12_GUC_TLB_INV_CR,
+ GEN12_GUC_TLB_INV_CR_INVALIDATE);
+ else
+ intel_uncore_write_fw(uncore, GEN8_GTCR, GEN8_GTCR_INVALIDATE);
+}
+
+static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt)
+{
+ intel_gtt_chipset_flush();
+}
+
+static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
+{
+ writeq(pte, addr);
+}
+
+static void gen8_ggtt_insert_page(struct i915_address_space *vm,
+ dma_addr_t addr,
+ u64 offset,
+ enum i915_cache_level level,
+ u32 unused)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ gen8_pte_t __iomem *pte =
+ (gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
+
+ gen8_set_pte(pte, gen8_pte_encode(addr, level, 0));
+
+ ggtt->invalidate(ggtt);
+}
+
+static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
+ struct i915_vma *vma,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ struct sgt_iter sgt_iter;
+ gen8_pte_t __iomem *gtt_entries;
+ const gen8_pte_t pte_encode = gen8_pte_encode(0, level, 0);
+ dma_addr_t addr;
+
+ /*
+ * Note that we ignore PTE_READ_ONLY here. The caller must be careful
+ * not to allow the user to override access to a read only page.
+ */
+
+ gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
+ gtt_entries += vma->node.start / I915_GTT_PAGE_SIZE;
+ for_each_sgt_daddr(addr, sgt_iter, vma->pages)
+ gen8_set_pte(gtt_entries++, pte_encode | addr);
+
+ /*
+ * We want to flush the TLBs only after we're certain all the PTE
+ * updates have finished.
+ */
+ ggtt->invalidate(ggtt);
+}
+
+static void gen6_ggtt_insert_page(struct i915_address_space *vm,
+ dma_addr_t addr,
+ u64 offset,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ gen6_pte_t __iomem *pte =
+ (gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
+
+ iowrite32(vm->pte_encode(addr, level, flags), pte);
+
+ ggtt->invalidate(ggtt);
+}
+
+/*
+ * Binds an object into the global gtt with the specified cache level.
+ * The object will be accessible to the GPU via commands whose operands
+ * reference offsets within the global GTT as well as accessible by the GPU
+ * through the GMADR mapped BAR (i915->mm.gtt->gtt).
+ */
+static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
+ struct i915_vma *vma,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm;
+ unsigned int i = vma->node.start / I915_GTT_PAGE_SIZE;
+ struct sgt_iter iter;
+ dma_addr_t addr;
+
+ for_each_sgt_daddr(addr, iter, vma->pages)
+ iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]);
+
+ /*
+ * We want to flush the TLBs only after we're certain all the PTE
+ * updates have finished.
+ */
+ ggtt->invalidate(ggtt);
+}
+
+static void nop_clear_range(struct i915_address_space *vm,
+ u64 start, u64 length)
+{
+}
+
+static void gen8_ggtt_clear_range(struct i915_address_space *vm,
+ u64 start, u64 length)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
+ unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
+ const gen8_pte_t scratch_pte = vm->scratch[0].encode;
+ gen8_pte_t __iomem *gtt_base =
+ (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
+ const int max_entries = ggtt_total_entries(ggtt) - first_entry;
+ int i;
+
+ if (WARN(num_entries > max_entries,
+ "First entry = %d; Num entries = %d (max=%d)\n",
+ first_entry, num_entries, max_entries))
+ num_entries = max_entries;
+
+ for (i = 0; i < num_entries; i++)
+ gen8_set_pte(&gtt_base[i], scratch_pte);
+}
+
+static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
+{
+ /*
+ * Make sure the internal GAM fifo has been cleared of all GTT
+ * writes before exiting stop_machine(). This guarantees that
+ * any aperture accesses waiting to start in another process
+ * cannot back up behind the GTT writes causing a hang.
+ * The register can be any arbitrary GAM register.
+ */
+ intel_uncore_posting_read_fw(vm->gt->uncore, GFX_FLSH_CNTL_GEN6);
+}
+
+struct insert_page {
+ struct i915_address_space *vm;
+ dma_addr_t addr;
+ u64 offset;
+ enum i915_cache_level level;
+};
+
+static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
+{
+ struct insert_page *arg = _arg;
+
+ gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
+ bxt_vtd_ggtt_wa(arg->vm);
+
+ return 0;
+}
+
+static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
+ dma_addr_t addr,
+ u64 offset,
+ enum i915_cache_level level,
+ u32 unused)
+{
+ struct insert_page arg = { vm, addr, offset, level };
+
+ stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
+}
+
+struct insert_entries {
+ struct i915_address_space *vm;
+ struct i915_vma *vma;
+ enum i915_cache_level level;
+ u32 flags;
+};
+
+static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
+{
+ struct insert_entries *arg = _arg;
+
+ gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, arg->flags);
+ bxt_vtd_ggtt_wa(arg->vm);
+
+ return 0;
+}
+
+static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
+ struct i915_vma *vma,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ struct insert_entries arg = { vm, vma, level, flags };
+
+ stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
+}
+
+struct clear_range {
+ struct i915_address_space *vm;
+ u64 start;
+ u64 length;
+};
+
+static int bxt_vtd_ggtt_clear_range__cb(void *_arg)
+{
+ struct clear_range *arg = _arg;
+
+ gen8_ggtt_clear_range(arg->vm, arg->start, arg->length);
+ bxt_vtd_ggtt_wa(arg->vm);
+
+ return 0;
+}
+
+static void bxt_vtd_ggtt_clear_range__BKL(struct i915_address_space *vm,
+ u64 start,
+ u64 length)
+{
+ struct clear_range arg = { vm, start, length };
+
+ stop_machine(bxt_vtd_ggtt_clear_range__cb, &arg, NULL);
+}
+
+static void gen6_ggtt_clear_range(struct i915_address_space *vm,
+ u64 start, u64 length)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
+ unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
+ gen6_pte_t scratch_pte, __iomem *gtt_base =
+ (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
+ const int max_entries = ggtt_total_entries(ggtt) - first_entry;
+ int i;
+
+ if (WARN(num_entries > max_entries,
+ "First entry = %d; Num entries = %d (max=%d)\n",
+ first_entry, num_entries, max_entries))
+ num_entries = max_entries;
+
+ scratch_pte = vm->scratch[0].encode;
+ for (i = 0; i < num_entries; i++)
+ iowrite32(scratch_pte, &gtt_base[i]);
+}
+
+static void i915_ggtt_insert_page(struct i915_address_space *vm,
+ dma_addr_t addr,
+ u64 offset,
+ enum i915_cache_level cache_level,
+ u32 unused)
+{
+ unsigned int flags = (cache_level == I915_CACHE_NONE) ?
+ AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
+
+ intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
+}
+
+static void i915_ggtt_insert_entries(struct i915_address_space *vm,
+ struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 unused)
+{
+ unsigned int flags = (cache_level == I915_CACHE_NONE) ?
+ AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
+
+ intel_gtt_insert_sg_entries(vma->pages, vma->node.start >> PAGE_SHIFT,
+ flags);
+}
+
+static void i915_ggtt_clear_range(struct i915_address_space *vm,
+ u64 start, u64 length)
+{
+ intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
+}
+
+static int ggtt_bind_vma(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags)
+{
+ struct drm_i915_gem_object *obj = vma->obj;
+ u32 pte_flags;
+
+ /* Applicable to VLV (gen8+ do not support RO in the GGTT) */
+ pte_flags = 0;
+ if (i915_gem_object_is_readonly(obj))
+ pte_flags |= PTE_READ_ONLY;
+
+ vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
+
+ vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
+
+ /*
+ * Without aliasing PPGTT there's no difference between
+ * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
+ * upgrade to both bound if we bind either to avoid double-binding.
+ */
+ atomic_or(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND, &vma->flags);
+
+ return 0;
+}
+
+static void ggtt_unbind_vma(struct i915_vma *vma)
+{
+ vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
+}
+
+static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
+{
+ u64 size;
+ int ret;
+
+ if (!USES_GUC(ggtt->vm.i915))
+ return 0;
+
+ GEM_BUG_ON(ggtt->vm.total <= GUC_GGTT_TOP);
+ size = ggtt->vm.total - GUC_GGTT_TOP;
+
+ ret = i915_gem_gtt_reserve(&ggtt->vm, &ggtt->uc_fw, size,
+ GUC_GGTT_TOP, I915_COLOR_UNEVICTABLE,
+ PIN_NOEVICT);
+ if (ret)
+ DRM_DEBUG_DRIVER("Failed to reserve top of GGTT for GuC\n");
+
+ return ret;
+}
+
+static void ggtt_release_guc_top(struct i915_ggtt *ggtt)
+{
+ if (drm_mm_node_allocated(&ggtt->uc_fw))
+ drm_mm_remove_node(&ggtt->uc_fw);
+}
+
+static void cleanup_init_ggtt(struct i915_ggtt *ggtt)
+{
+ ggtt_release_guc_top(ggtt);
+ if (drm_mm_node_allocated(&ggtt->error_capture))
+ drm_mm_remove_node(&ggtt->error_capture);
+ mutex_destroy(&ggtt->error_mutex);
+}
+
+static int init_ggtt(struct i915_ggtt *ggtt)
+{
+ /*
+ * Let GEM Manage all of the aperture.
+ *
+ * However, leave one page at the end still bound to the scratch page.
+ * There are a number of places where the hardware apparently prefetches
+ * past the end of the object, and we've seen multiple hangs with the
+ * GPU head pointer stuck in a batchbuffer bound at the last page of the
+ * aperture. One page should be enough to keep any prefetching inside
+ * of the aperture.
+ */
+ unsigned long hole_start, hole_end;
+ struct drm_mm_node *entry;
+ int ret;
+
+ /*
+ * GuC requires all resources that we're sharing with it to be placed in
+ * non-WOPCM memory. If GuC is not present or not in use we still need a
+ * small bias as ring wraparound at offset 0 sometimes hangs. No idea
+ * why.
+ */
+ ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE,
+ intel_wopcm_guc_size(&ggtt->vm.i915->wopcm));
+
+ ret = intel_vgt_balloon(ggtt);
+ if (ret)
+ return ret;
+
+ mutex_init(&ggtt->error_mutex);
+ if (ggtt->mappable_end) {
+ /* Reserve a mappable slot for our lockless error capture */
+ ret = drm_mm_insert_node_in_range(&ggtt->vm.mm,
+ &ggtt->error_capture,
+ PAGE_SIZE, 0,
+ I915_COLOR_UNEVICTABLE,
+ 0, ggtt->mappable_end,
+ DRM_MM_INSERT_LOW);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * The upper portion of the GuC address space has a sizeable hole
+ * (several MB) that is inaccessible by GuC. Reserve this range within
+ * GGTT as it can comfortably hold GuC/HuC firmware images.
+ */
+ ret = ggtt_reserve_guc_top(ggtt);
+ if (ret)
+ goto err;
+
+ /* Clear any non-preallocated blocks */
+ drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
+ DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
+ hole_start, hole_end);
+ ggtt->vm.clear_range(&ggtt->vm, hole_start,
+ hole_end - hole_start);
+ }
+
+ /* And finally clear the reserved guard page */
+ ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE);
+
+ return 0;
+
+err:
+ cleanup_init_ggtt(ggtt);
+ return ret;
+}
+
+static int aliasing_gtt_bind_vma(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags)
+{
+ u32 pte_flags;
+ int ret;
+
+ /* Currently applicable only to VLV */
+ pte_flags = 0;
+ if (i915_gem_object_is_readonly(vma->obj))
+ pte_flags |= PTE_READ_ONLY;
+
+ if (flags & I915_VMA_LOCAL_BIND) {
+ struct i915_ppgtt *alias = i915_vm_to_ggtt(vma->vm)->alias;
+
+ if (flags & I915_VMA_ALLOC) {
+ ret = alias->vm.allocate_va_range(&alias->vm,
+ vma->node.start,
+ vma->size);
+ if (ret)
+ return ret;
+
+ set_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma));
+ }
+
+ GEM_BUG_ON(!test_bit(I915_VMA_ALLOC_BIT,
+ __i915_vma_flags(vma)));
+ alias->vm.insert_entries(&alias->vm, vma,
+ cache_level, pte_flags);
+ }
+
+ if (flags & I915_VMA_GLOBAL_BIND)
+ vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
+
+ return 0;
+}
+
+static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
+{
+ if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) {
+ struct i915_address_space *vm = vma->vm;
+
+ vm->clear_range(vm, vma->node.start, vma->size);
+ }
+
+ if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))) {
+ struct i915_address_space *vm =
+ &i915_vm_to_ggtt(vma->vm)->alias->vm;
+
+ vm->clear_range(vm, vma->node.start, vma->size);
+ }
+}
+
+static int init_aliasing_ppgtt(struct i915_ggtt *ggtt)
+{
+ struct i915_ppgtt *ppgtt;
+ int err;
+
+ ppgtt = i915_ppgtt_create(ggtt->vm.gt);
+ if (IS_ERR(ppgtt))
+ return PTR_ERR(ppgtt);
+
+ if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) {
+ err = -ENODEV;
+ goto err_ppgtt;
+ }
+
+ /*
+ * Note we only pre-allocate as far as the end of the global
+ * GTT. On 48b / 4-level page-tables, the difference is very,
+ * very significant! We have to preallocate as GVT/vgpu does
+ * not like the page directory disappearing.
+ */
+ err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, ggtt->vm.total);
+ if (err)
+ goto err_ppgtt;
+
+ ggtt->alias = ppgtt;
+ ggtt->vm.bind_async_flags |= ppgtt->vm.bind_async_flags;
+
+ GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma);
+ ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma;
+
+ GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != ggtt_unbind_vma);
+ ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma;
+
+ return 0;
+
+err_ppgtt:
+ i915_vm_put(&ppgtt->vm);
+ return err;
+}
+
+static void fini_aliasing_ppgtt(struct i915_ggtt *ggtt)
+{
+ struct i915_ppgtt *ppgtt;
+
+ ppgtt = fetch_and_zero(&ggtt->alias);
+ if (!ppgtt)
+ return;
+
+ i915_vm_put(&ppgtt->vm);
+
+ ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
+ ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
+}
+
+int i915_init_ggtt(struct drm_i915_private *i915)
+{
+ int ret;
+
+ ret = init_ggtt(&i915->ggtt);
+ if (ret)
+ return ret;
+
+ if (INTEL_PPGTT(i915) == INTEL_PPGTT_ALIASING) {
+ ret = init_aliasing_ppgtt(&i915->ggtt);
+ if (ret)
+ cleanup_init_ggtt(&i915->ggtt);
+ }
+
+ return 0;
+}
+
+static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
+{
+ struct i915_vma *vma, *vn;
+
+ atomic_set(&ggtt->vm.open, 0);
+
+ rcu_barrier(); /* flush the RCU'ed__i915_vm_release */
+ flush_workqueue(ggtt->vm.i915->wq);
+
+ mutex_lock(&ggtt->vm.mutex);
+
+ list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link)
+ WARN_ON(__i915_vma_unbind(vma));
+
+ if (drm_mm_node_allocated(&ggtt->error_capture))
+ drm_mm_remove_node(&ggtt->error_capture);
+ mutex_destroy(&ggtt->error_mutex);
+
+ ggtt_release_guc_top(ggtt);
+ intel_vgt_deballoon(ggtt);
+
+ ggtt->vm.cleanup(&ggtt->vm);
+
+ mutex_unlock(&ggtt->vm.mutex);
+ i915_address_space_fini(&ggtt->vm);
+
+ arch_phys_wc_del(ggtt->mtrr);
+
+ if (ggtt->iomap.size)
+ io_mapping_fini(&ggtt->iomap);
+}
+
+/**
+ * i915_ggtt_driver_release - Clean up GGTT hardware initialization
+ * @i915: i915 device
+ */
+void i915_ggtt_driver_release(struct drm_i915_private *i915)
+{
+ struct pagevec *pvec;
+
+ fini_aliasing_ppgtt(&i915->ggtt);
+
+ ggtt_cleanup_hw(&i915->ggtt);
+
+ pvec = &i915->mm.wc_stash.pvec;
+ if (pvec->nr) {
+ set_pages_array_wb(pvec->pages, pvec->nr);
+ __pagevec_release(pvec);
+ }
+}
+
+static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
+{
+ snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
+ snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
+ return snb_gmch_ctl << 20;
+}
+
+static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
+{
+ bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
+ bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
+ if (bdw_gmch_ctl)
+ bdw_gmch_ctl = 1 << bdw_gmch_ctl;
+
+#ifdef CONFIG_X86_32
+ /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * I915_GTT_PAGE_SIZE */
+ if (bdw_gmch_ctl > 4)
+ bdw_gmch_ctl = 4;
+#endif
+
+ return bdw_gmch_ctl << 20;
+}
+
+static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
+{
+ gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
+ gmch_ctrl &= SNB_GMCH_GGMS_MASK;
+
+ if (gmch_ctrl)
+ return 1 << (20 + gmch_ctrl);
+
+ return 0;
+}
+
+static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
+{
+ struct drm_i915_private *i915 = ggtt->vm.i915;
+ struct pci_dev *pdev = i915->drm.pdev;
+ phys_addr_t phys_addr;
+ int ret;
+
+ /* For Modern GENs the PTEs and register space are split in the BAR */
+ phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2;
+
+ /*
+ * On BXT+/CNL+ writes larger than 64 bit to the GTT pagetable range
+ * will be dropped. For WC mappings in general we have 64 byte burst
+ * writes when the WC buffer is flushed, so we can't use it, but have to
+ * resort to an uncached mapping. The WC issue is easily caught by the
+ * readback check when writing GTT PTE entries.
+ */
+ if (IS_GEN9_LP(i915) || INTEL_GEN(i915) >= 10)
+ ggtt->gsm = ioremap(phys_addr, size);
+ else
+ ggtt->gsm = ioremap_wc(phys_addr, size);
+ if (!ggtt->gsm) {
+ DRM_ERROR("Failed to map the ggtt page table\n");
+ return -ENOMEM;
+ }
+
+ ret = setup_scratch_page(&ggtt->vm, GFP_DMA32);
+ if (ret) {
+ DRM_ERROR("Scratch setup failed\n");
+ /* iounmap will also get called at remove, but meh */
+ iounmap(ggtt->gsm);
+ return ret;
+ }
+
+ ggtt->vm.scratch[0].encode =
+ ggtt->vm.pte_encode(px_dma(&ggtt->vm.scratch[0]),
+ I915_CACHE_NONE, 0);
+
+ return 0;
+}
+
+int ggtt_set_pages(struct i915_vma *vma)
+{
+ int ret;
+
+ GEM_BUG_ON(vma->pages);
+
+ ret = i915_get_ggtt_vma_pages(vma);
+ if (ret)
+ return ret;
+
+ vma->page_sizes = vma->obj->mm.page_sizes;
+
+ return 0;
+}
+
+static void gen6_gmch_remove(struct i915_address_space *vm)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+
+ iounmap(ggtt->gsm);
+ cleanup_scratch_page(vm);
+}
+
+static struct resource pci_resource(struct pci_dev *pdev, int bar)
+{
+ return (struct resource)DEFINE_RES_MEM(pci_resource_start(pdev, bar),
+ pci_resource_len(pdev, bar));
+}
+
+static int gen8_gmch_probe(struct i915_ggtt *ggtt)
+{
+ struct drm_i915_private *i915 = ggtt->vm.i915;
+ struct pci_dev *pdev = i915->drm.pdev;
+ unsigned int size;
+ u16 snb_gmch_ctl;
+ int err;
+
+ /* TODO: We're not aware of mappable constraints on gen8 yet */
+ if (!IS_DGFX(i915)) {
+ ggtt->gmadr = pci_resource(pdev, 2);
+ ggtt->mappable_end = resource_size(&ggtt->gmadr);
+ }
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39));
+ if (!err)
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39));
+ if (err)
+ DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
+
+ pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+ if (IS_CHERRYVIEW(i915))
+ size = chv_get_total_gtt_size(snb_gmch_ctl);
+ else
+ size = gen8_get_total_gtt_size(snb_gmch_ctl);
+
+ ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
+ ggtt->vm.cleanup = gen6_gmch_remove;
+ ggtt->vm.insert_page = gen8_ggtt_insert_page;
+ ggtt->vm.clear_range = nop_clear_range;
+ if (intel_scanout_needs_vtd_wa(i915))
+ ggtt->vm.clear_range = gen8_ggtt_clear_range;
+
+ ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
+
+ /* Serialize GTT updates with aperture access on BXT if VT-d is on. */
+ if (intel_ggtt_update_needs_vtd_wa(i915) ||
+ IS_CHERRYVIEW(i915) /* fails with concurrent use/update */) {
+ ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
+ ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL;
+ if (ggtt->vm.clear_range != nop_clear_range)
+ ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL;
+ }
+
+ ggtt->invalidate = gen8_ggtt_invalidate;
+
+ ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
+ ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
+ ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
+ ggtt->vm.vma_ops.clear_pages = clear_pages;
+
+ ggtt->vm.pte_encode = gen8_pte_encode;
+
+ setup_private_pat(ggtt->vm.gt->uncore);
+
+ return ggtt_probe_common(ggtt, size);
+}
+
+static u64 snb_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
+
+ switch (level) {
+ case I915_CACHE_L3_LLC:
+ case I915_CACHE_LLC:
+ pte |= GEN6_PTE_CACHE_LLC;
+ break;
+ case I915_CACHE_NONE:
+ pte |= GEN6_PTE_UNCACHED;
+ break;
+ default:
+ MISSING_CASE(level);
+ }
+
+ return pte;
+}
+
+static u64 ivb_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
+
+ switch (level) {
+ case I915_CACHE_L3_LLC:
+ pte |= GEN7_PTE_CACHE_L3_LLC;
+ break;
+ case I915_CACHE_LLC:
+ pte |= GEN6_PTE_CACHE_LLC;
+ break;
+ case I915_CACHE_NONE:
+ pte |= GEN6_PTE_UNCACHED;
+ break;
+ default:
+ MISSING_CASE(level);
+ }
+
+ return pte;
+}
+
+static u64 byt_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
+
+ if (!(flags & PTE_READ_ONLY))
+ pte |= BYT_PTE_WRITEABLE;
+
+ if (level != I915_CACHE_NONE)
+ pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
+
+ return pte;
+}
+
+static u64 hsw_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
+
+ if (level != I915_CACHE_NONE)
+ pte |= HSW_WB_LLC_AGE3;
+
+ return pte;
+}
+
+static u64 iris_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
+
+ switch (level) {
+ case I915_CACHE_NONE:
+ break;
+ case I915_CACHE_WT:
+ pte |= HSW_WT_ELLC_LLC_AGE3;
+ break;
+ default:
+ pte |= HSW_WB_ELLC_LLC_AGE3;
+ break;
+ }
+
+ return pte;
+}
+
+static int gen6_gmch_probe(struct i915_ggtt *ggtt)
+{
+ struct drm_i915_private *i915 = ggtt->vm.i915;
+ struct pci_dev *pdev = i915->drm.pdev;
+ unsigned int size;
+ u16 snb_gmch_ctl;
+ int err;
+
+ ggtt->gmadr = pci_resource(pdev, 2);
+ ggtt->mappable_end = resource_size(&ggtt->gmadr);
+
+ /*
+ * 64/512MB is the current min/max we actually know of, but this is
+ * just a coarse sanity check.
+ */
+ if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) {
+ DRM_ERROR("Unknown GMADR size (%pa)\n", &ggtt->mappable_end);
+ return -ENXIO;
+ }
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
+ if (!err)
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
+ if (err)
+ DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
+ pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+
+ size = gen6_get_total_gtt_size(snb_gmch_ctl);
+ ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE;
+
+ ggtt->vm.clear_range = nop_clear_range;
+ if (!HAS_FULL_PPGTT(i915) || intel_scanout_needs_vtd_wa(i915))
+ ggtt->vm.clear_range = gen6_ggtt_clear_range;
+ ggtt->vm.insert_page = gen6_ggtt_insert_page;
+ ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
+ ggtt->vm.cleanup = gen6_gmch_remove;
+
+ ggtt->invalidate = gen6_ggtt_invalidate;
+
+ if (HAS_EDRAM(i915))
+ ggtt->vm.pte_encode = iris_pte_encode;
+ else if (IS_HASWELL(i915))
+ ggtt->vm.pte_encode = hsw_pte_encode;
+ else if (IS_VALLEYVIEW(i915))
+ ggtt->vm.pte_encode = byt_pte_encode;
+ else if (INTEL_GEN(i915) >= 7)
+ ggtt->vm.pte_encode = ivb_pte_encode;
+ else
+ ggtt->vm.pte_encode = snb_pte_encode;
+
+ ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
+ ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
+ ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
+ ggtt->vm.vma_ops.clear_pages = clear_pages;
+
+ return ggtt_probe_common(ggtt, size);
+}
+
+static void i915_gmch_remove(struct i915_address_space *vm)
+{
+ intel_gmch_remove();
+}
+
+static int i915_gmch_probe(struct i915_ggtt *ggtt)
+{
+ struct drm_i915_private *i915 = ggtt->vm.i915;
+ phys_addr_t gmadr_base;
+ int ret;
+
+ ret = intel_gmch_probe(i915->bridge_dev, i915->drm.pdev, NULL);
+ if (!ret) {
+ DRM_ERROR("failed to set up gmch\n");
+ return -EIO;
+ }
+
+ intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end);
+
+ ggtt->gmadr =
+ (struct resource)DEFINE_RES_MEM(gmadr_base, ggtt->mappable_end);
+
+ ggtt->do_idle_maps = needs_idle_maps(i915);
+ ggtt->vm.insert_page = i915_ggtt_insert_page;
+ ggtt->vm.insert_entries = i915_ggtt_insert_entries;
+ ggtt->vm.clear_range = i915_ggtt_clear_range;
+ ggtt->vm.cleanup = i915_gmch_remove;
+
+ ggtt->invalidate = gmch_ggtt_invalidate;
+
+ ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
+ ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
+ ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
+ ggtt->vm.vma_ops.clear_pages = clear_pages;
+
+ if (unlikely(ggtt->do_idle_maps))
+ dev_notice(i915->drm.dev,
+ "Applying Ironlake quirks for intel_iommu\n");
+
+ return 0;
+}
+
+static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ int ret;
+
+ ggtt->vm.gt = gt;
+ ggtt->vm.i915 = i915;
+ ggtt->vm.dma = &i915->drm.pdev->dev;
+
+ if (INTEL_GEN(i915) <= 5)
+ ret = i915_gmch_probe(ggtt);
+ else if (INTEL_GEN(i915) < 8)
+ ret = gen6_gmch_probe(ggtt);
+ else
+ ret = gen8_gmch_probe(ggtt);
+ if (ret)
+ return ret;
+
+ if ((ggtt->vm.total - 1) >> 32) {
+ DRM_ERROR("We never expected a Global GTT with more than 32bits"
+ " of address space! Found %lldM!\n",
+ ggtt->vm.total >> 20);
+ ggtt->vm.total = 1ULL << 32;
+ ggtt->mappable_end =
+ min_t(u64, ggtt->mappable_end, ggtt->vm.total);
+ }
+
+ if (ggtt->mappable_end > ggtt->vm.total) {
+ DRM_ERROR("mappable aperture extends past end of GGTT,"
+ " aperture=%pa, total=%llx\n",
+ &ggtt->mappable_end, ggtt->vm.total);
+ ggtt->mappable_end = ggtt->vm.total;
+ }
+
+ /* GMADR is the PCI mmio aperture into the global GTT. */
+ DRM_DEBUG_DRIVER("GGTT size = %lluM\n", ggtt->vm.total >> 20);
+ DRM_DEBUG_DRIVER("GMADR size = %lluM\n", (u64)ggtt->mappable_end >> 20);
+ DRM_DEBUG_DRIVER("DSM size = %lluM\n",
+ (u64)resource_size(&intel_graphics_stolen_res) >> 20);
+
+ return 0;
+}
+
+/**
+ * i915_ggtt_probe_hw - Probe GGTT hardware location
+ * @i915: i915 device
+ */
+int i915_ggtt_probe_hw(struct drm_i915_private *i915)
+{
+ int ret;
+
+ ret = ggtt_probe_hw(&i915->ggtt, &i915->gt);
+ if (ret)
+ return ret;
+
+ if (intel_vtd_active())
+ dev_info(i915->drm.dev, "VT-d active for gfx access\n");
+
+ return 0;
+}
+
+int i915_ggtt_enable_hw(struct drm_i915_private *i915)
+{
+ if (INTEL_GEN(i915) < 6 && !intel_enable_gtt())
+ return -EIO;
+
+ return 0;
+}
+
+void i915_ggtt_enable_guc(struct i915_ggtt *ggtt)
+{
+ GEM_BUG_ON(ggtt->invalidate != gen8_ggtt_invalidate);
+
+ ggtt->invalidate = guc_ggtt_invalidate;
+
+ ggtt->invalidate(ggtt);
+}
+
+void i915_ggtt_disable_guc(struct i915_ggtt *ggtt)
+{
+ /* XXX Temporary pardon for error unload */
+ if (ggtt->invalidate == gen8_ggtt_invalidate)
+ return;
+
+ /* We should only be called after i915_ggtt_enable_guc() */
+ GEM_BUG_ON(ggtt->invalidate != guc_ggtt_invalidate);
+
+ ggtt->invalidate = gen8_ggtt_invalidate;
+
+ ggtt->invalidate(ggtt);
+}
+
+static void ggtt_restore_mappings(struct i915_ggtt *ggtt)
+{
+ struct i915_vma *vma;
+ bool flush = false;
+ int open;
+
+ intel_gt_check_and_clear_faults(ggtt->vm.gt);
+
+ mutex_lock(&ggtt->vm.mutex);
+
+ /* First fill our portion of the GTT with scratch pages */
+ ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
+
+ /* Skip rewriting PTE on VMA unbind. */
+ open = atomic_xchg(&ggtt->vm.open, 0);
+
+ /* clflush objects bound into the GGTT and rebind them. */
+ list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) {
+ struct drm_i915_gem_object *obj = vma->obj;
+
+ if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
+ continue;
+
+ clear_bit(I915_VMA_GLOBAL_BIND_BIT, __i915_vma_flags(vma));
+ WARN_ON(i915_vma_bind(vma,
+ obj ? obj->cache_level : 0,
+ PIN_GLOBAL, NULL));
+ if (obj) { /* only used during resume => exclusive access */
+ flush |= fetch_and_zero(&obj->write_domain);
+ obj->read_domains |= I915_GEM_DOMAIN_GTT;
+ }
+ }
+
+ atomic_set(&ggtt->vm.open, open);
+ ggtt->invalidate(ggtt);
+
+ mutex_unlock(&ggtt->vm.mutex);
+
+ if (flush)
+ wbinvd_on_all_cpus();
+}
+
+void i915_gem_restore_gtt_mappings(struct drm_i915_private *i915)
+{
+ struct i915_ggtt *ggtt = &i915->ggtt;
+
+ ggtt_restore_mappings(ggtt);
+
+ if (INTEL_GEN(i915) >= 8)
+ setup_private_pat(ggtt->vm.gt->uncore);
+}
+
+static struct scatterlist *
+rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset,
+ unsigned int width, unsigned int height,
+ unsigned int stride,
+ struct sg_table *st, struct scatterlist *sg)
+{
+ unsigned int column, row;
+ unsigned int src_idx;
+
+ for (column = 0; column < width; column++) {
+ src_idx = stride * (height - 1) + column + offset;
+ for (row = 0; row < height; row++) {
+ st->nents++;
+ /*
+ * We don't need the pages, but need to initialize
+ * the entries so the sg list can be happily traversed.
+ * The only thing we need are DMA addresses.
+ */
+ sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0);
+ sg_dma_address(sg) =
+ i915_gem_object_get_dma_address(obj, src_idx);
+ sg_dma_len(sg) = I915_GTT_PAGE_SIZE;
+ sg = sg_next(sg);
+ src_idx -= stride;
+ }
+ }
+
+ return sg;
+}
+
+static noinline struct sg_table *
+intel_rotate_pages(struct intel_rotation_info *rot_info,
+ struct drm_i915_gem_object *obj)
+{
+ unsigned int size = intel_rotation_info_size(rot_info);
+ struct sg_table *st;
+ struct scatterlist *sg;
+ int ret = -ENOMEM;
+ int i;
+
+ /* Allocate target SG list. */
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ goto err_st_alloc;
+
+ ret = sg_alloc_table(st, size, GFP_KERNEL);
+ if (ret)
+ goto err_sg_alloc;
+
+ st->nents = 0;
+ sg = st->sgl;
+
+ for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) {
+ sg = rotate_pages(obj, rot_info->plane[i].offset,
+ rot_info->plane[i].width, rot_info->plane[i].height,
+ rot_info->plane[i].stride, st, sg);
+ }
+
+ return st;
+
+err_sg_alloc:
+ kfree(st);
+err_st_alloc:
+
+ DRM_DEBUG_DRIVER("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
+ obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
+
+ return ERR_PTR(ret);
+}
+
+static struct scatterlist *
+remap_pages(struct drm_i915_gem_object *obj, unsigned int offset,
+ unsigned int width, unsigned int height,
+ unsigned int stride,
+ struct sg_table *st, struct scatterlist *sg)
+{
+ unsigned int row;
+
+ for (row = 0; row < height; row++) {
+ unsigned int left = width * I915_GTT_PAGE_SIZE;
+
+ while (left) {
+ dma_addr_t addr;
+ unsigned int length;
+
+ /*
+ * We don't need the pages, but need to initialize
+ * the entries so the sg list can be happily traversed.
+ * The only thing we need are DMA addresses.
+ */
+
+ addr = i915_gem_object_get_dma_address_len(obj, offset, &length);
+
+ length = min(left, length);
+
+ st->nents++;
+
+ sg_set_page(sg, NULL, length, 0);
+ sg_dma_address(sg) = addr;
+ sg_dma_len(sg) = length;
+ sg = sg_next(sg);
+
+ offset += length / I915_GTT_PAGE_SIZE;
+ left -= length;
+ }
+
+ offset += stride - width;
+ }
+
+ return sg;
+}
+
+static noinline struct sg_table *
+intel_remap_pages(struct intel_remapped_info *rem_info,
+ struct drm_i915_gem_object *obj)
+{
+ unsigned int size = intel_remapped_info_size(rem_info);
+ struct sg_table *st;
+ struct scatterlist *sg;
+ int ret = -ENOMEM;
+ int i;
+
+ /* Allocate target SG list. */
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ goto err_st_alloc;
+
+ ret = sg_alloc_table(st, size, GFP_KERNEL);
+ if (ret)
+ goto err_sg_alloc;
+
+ st->nents = 0;
+ sg = st->sgl;
+
+ for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
+ sg = remap_pages(obj, rem_info->plane[i].offset,
+ rem_info->plane[i].width, rem_info->plane[i].height,
+ rem_info->plane[i].stride, st, sg);
+ }
+
+ i915_sg_trim(st);
+
+ return st;
+
+err_sg_alloc:
+ kfree(st);
+err_st_alloc:
+
+ DRM_DEBUG_DRIVER("Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n",
+ obj->base.size, rem_info->plane[0].width, rem_info->plane[0].height, size);
+
+ return ERR_PTR(ret);
+}
+
+static noinline struct sg_table *
+intel_partial_pages(const struct i915_ggtt_view *view,
+ struct drm_i915_gem_object *obj)
+{
+ struct sg_table *st;
+ struct scatterlist *sg, *iter;
+ unsigned int count = view->partial.size;
+ unsigned int offset;
+ int ret = -ENOMEM;
+
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ goto err_st_alloc;
+
+ ret = sg_alloc_table(st, count, GFP_KERNEL);
+ if (ret)
+ goto err_sg_alloc;
+
+ iter = i915_gem_object_get_sg(obj, view->partial.offset, &offset);
+ GEM_BUG_ON(!iter);
+
+ sg = st->sgl;
+ st->nents = 0;
+ do {
+ unsigned int len;
+
+ len = min(iter->length - (offset << PAGE_SHIFT),
+ count << PAGE_SHIFT);
+ sg_set_page(sg, NULL, len, 0);
+ sg_dma_address(sg) =
+ sg_dma_address(iter) + (offset << PAGE_SHIFT);
+ sg_dma_len(sg) = len;
+
+ st->nents++;
+ count -= len >> PAGE_SHIFT;
+ if (count == 0) {
+ sg_mark_end(sg);
+ i915_sg_trim(st); /* Drop any unused tail entries. */
+
+ return st;
+ }
+
+ sg = __sg_next(sg);
+ iter = __sg_next(iter);
+ offset = 0;
+ } while (1);
+
+err_sg_alloc:
+ kfree(st);
+err_st_alloc:
+ return ERR_PTR(ret);
+}
+
+static int
+i915_get_ggtt_vma_pages(struct i915_vma *vma)
+{
+ int ret;
+
+ /*
+ * The vma->pages are only valid within the lifespan of the borrowed
+ * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
+ * must be the vma->pages. A simple rule is that vma->pages must only
+ * be accessed when the obj->mm.pages are pinned.
+ */
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
+
+ switch (vma->ggtt_view.type) {
+ default:
+ GEM_BUG_ON(vma->ggtt_view.type);
+ /* fall through */
+ case I915_GGTT_VIEW_NORMAL:
+ vma->pages = vma->obj->mm.pages;
+ return 0;
+
+ case I915_GGTT_VIEW_ROTATED:
+ vma->pages =
+ intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj);
+ break;
+
+ case I915_GGTT_VIEW_REMAPPED:
+ vma->pages =
+ intel_remap_pages(&vma->ggtt_view.remapped, vma->obj);
+ break;
+
+ case I915_GGTT_VIEW_PARTIAL:
+ vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
+ break;
+ }
+
+ ret = 0;
+ if (IS_ERR(vma->pages)) {
+ ret = PTR_ERR(vma->pages);
+ vma->pages = NULL;
+ DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
+ vma->ggtt_view.type, ret);
+ }
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
index eec31e36aca7..51b8718513bc 100644
--- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
+++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
@@ -7,6 +7,15 @@
#ifndef _INTEL_GPU_COMMANDS_H_
#define _INTEL_GPU_COMMANDS_H_
+#include <linux/bitops.h>
+
+/*
+ * Target address alignments required for GPU access e.g.
+ * MI_STORE_DWORD_IMM.
+ */
+#define alignof_dword 4
+#define alignof_qword 8
+
/*
* Instruction field definitions used by the command parser
*/
@@ -105,6 +114,7 @@
#define MI_SEMAPHORE_SIGNAL MI_INSTR(0x1b, 0) /* GEN8+ */
#define MI_SEMAPHORE_TARGET(engine) ((engine)<<15)
#define MI_SEMAPHORE_WAIT MI_INSTR(0x1c, 2) /* GEN8+ */
+#define MI_SEMAPHORE_WAIT_TOKEN MI_INSTR(0x1c, 3) /* GEN12+ */
#define MI_SEMAPHORE_POLL (1 << 15)
#define MI_SEMAPHORE_SAD_GT_SDD (0 << 12)
#define MI_SEMAPHORE_SAD_GTE_SDD (1 << 12)
@@ -112,6 +122,8 @@
#define MI_SEMAPHORE_SAD_LTE_SDD (3 << 12)
#define MI_SEMAPHORE_SAD_EQ_SDD (4 << 12)
#define MI_SEMAPHORE_SAD_NEQ_SDD (5 << 12)
+#define MI_SEMAPHORE_TOKEN_MASK REG_GENMASK(9, 5)
+#define MI_SEMAPHORE_TOKEN_SHIFT 5
#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
#define MI_STORE_DWORD_IMM_GEN4 MI_INSTR(0x20, 2)
#define MI_MEM_VIRTUAL (1 << 22) /* 945,g33,965 */
@@ -125,7 +137,10 @@
* address/value pairs. Don't overdue it, though, x <= 2^4 must hold!
*/
#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*(x)-1)
+/* Gen11+. addr = base + (ctx_restore ? offset & GENMASK(12,2) : offset) */
+#define MI_LRI_CS_MMIO (1<<19)
#define MI_LRI_FORCE_POSTED (1<<12)
+#define MI_LOAD_REGISTER_IMM_MAX_REGS (126)
#define MI_STORE_REGISTER_MEM MI_INSTR(0x24, 1)
#define MI_STORE_REGISTER_MEM_GEN8 MI_INSTR(0x24, 2)
#define MI_SRM_LRM_GLOBAL_GTT (1<<22)
@@ -140,6 +155,7 @@
#define MI_FLUSH_DW_USE_PPGTT (0<<2)
#define MI_LOAD_REGISTER_MEM MI_INSTR(0x29, 1)
#define MI_LOAD_REGISTER_MEM_GEN8 MI_INSTR(0x29, 2)
+#define MI_LOAD_REGISTER_REG MI_INSTR(0x2A, 1)
#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
#define MI_BATCH_NON_SECURE (1)
/* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */
@@ -149,7 +165,8 @@
#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */
#define MI_BATCH_BUFFER_START_GEN8 MI_INSTR(0x31, 1)
-#define MI_BATCH_RESOURCE_STREAMER (1<<10)
+#define MI_BATCH_RESOURCE_STREAMER REG_BIT(10)
+#define MI_BATCH_PREDICATE REG_BIT(15) /* HSW+ on RCS only*/
/*
* 3D instructions used by the kernel
@@ -179,11 +196,12 @@
#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2)
-#define COLOR_BLT_CMD (2<<29 | 0x40<<22 | (5-2))
+#define COLOR_BLT_CMD (2 << 29 | 0x40 << 22 | (5 - 2))
#define XY_COLOR_BLT_CMD (2 << 29 | 0x50 << 22)
-#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4)
-#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
-#define XY_MONO_SRC_COPY_IMM_BLT ((2<<29)|(0x71<<22)|5)
+#define SRC_COPY_BLT_CMD (2 << 29 | 0x43 << 22)
+#define GEN9_XY_FAST_COPY_BLT_CMD (2 << 29 | 0x42 << 22)
+#define XY_SRC_COPY_BLT_CMD (2 << 29 | 0x53 << 22)
+#define XY_MONO_SRC_COPY_IMM_BLT (2 << 29 | 0x71 << 22 | 5)
#define BLT_WRITE_A (2<<20)
#define BLT_WRITE_RGB (1<<20)
#define BLT_WRITE_RGBA (BLT_WRITE_RGB | BLT_WRITE_A)
@@ -200,6 +218,8 @@
#define DISPLAY_PLANE_A (0<<20)
#define DISPLAY_PLANE_B (1<<20)
#define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|((len)-2))
+#define PIPE_CONTROL_COMMAND_CACHE_INVALIDATE (1<<29) /* gen11+ */
+#define PIPE_CONTROL_TILE_CACHE_FLUSH (1<<28) /* gen11+ */
#define PIPE_CONTROL_FLUSH_L3 (1<<27)
#define PIPE_CONTROL_GLOBAL_GTT_IVB (1<<24) /* gen7+ */
#define PIPE_CONTROL_MMIO_WRITE (1<<23)
@@ -207,6 +227,7 @@
#define PIPE_CONTROL_CS_STALL (1<<20)
#define PIPE_CONTROL_TLB_INVALIDATE (1<<18)
#define PIPE_CONTROL_MEDIA_STATE_CLEAR (1<<16)
+#define PIPE_CONTROL_WRITE_TIMESTAMP (3<<14)
#define PIPE_CONTROL_QW_WRITE (1<<14)
#define PIPE_CONTROL_POST_SYNC_OP_MASK (3<<14)
#define PIPE_CONTROL_DEPTH_STALL (1<<13)
@@ -214,7 +235,9 @@
#define PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH (1<<12) /* gen6+ */
#define PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE (1<<11) /* MBZ on ILK */
#define PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE (1<<10) /* GM45+ only */
+#define PIPE_CONTROL_L3_RO_CACHE_INVALIDATE REG_BIT(10) /* gen12 */
#define PIPE_CONTROL_INDIRECT_STATE_DISABLE (1<<9)
+#define PIPE_CONTROL_HDC_PIPELINE_FLUSH REG_BIT(9) /* gen12 */
#define PIPE_CONTROL_NOTIFY (1<<8)
#define PIPE_CONTROL_FLUSH_ENABLE (1<<7) /* gen7+ */
#define PIPE_CONTROL_DC_FLUSH_ENABLE (1<<5)
@@ -225,6 +248,29 @@
#define PIPE_CONTROL_DEPTH_CACHE_FLUSH (1<<0)
#define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */
+#define MI_MATH(x) MI_INSTR(0x1a, (x) - 1)
+#define MI_MATH_INSTR(opcode, op1, op2) ((opcode) << 20 | (op1) << 10 | (op2))
+/* Opcodes for MI_MATH_INSTR */
+#define MI_MATH_NOOP MI_MATH_INSTR(0x000, 0x0, 0x0)
+#define MI_MATH_LOAD(op1, op2) MI_MATH_INSTR(0x080, op1, op2)
+#define MI_MATH_LOADINV(op1, op2) MI_MATH_INSTR(0x480, op1, op2)
+#define MI_MATH_LOAD0(op1) MI_MATH_INSTR(0x081, op1)
+#define MI_MATH_LOAD1(op1) MI_MATH_INSTR(0x481, op1)
+#define MI_MATH_ADD MI_MATH_INSTR(0x100, 0x0, 0x0)
+#define MI_MATH_SUB MI_MATH_INSTR(0x101, 0x0, 0x0)
+#define MI_MATH_AND MI_MATH_INSTR(0x102, 0x0, 0x0)
+#define MI_MATH_OR MI_MATH_INSTR(0x103, 0x0, 0x0)
+#define MI_MATH_XOR MI_MATH_INSTR(0x104, 0x0, 0x0)
+#define MI_MATH_STORE(op1, op2) MI_MATH_INSTR(0x180, op1, op2)
+#define MI_MATH_STOREINV(op1, op2) MI_MATH_INSTR(0x580, op1, op2)
+/* Registers used as operands in MI_MATH_INSTR */
+#define MI_MATH_REG(x) (x)
+#define MI_MATH_REG_SRCA 0x20
+#define MI_MATH_REG_SRCB 0x21
+#define MI_MATH_REG_ACCU 0x31
+#define MI_MATH_REG_ZF 0x32
+#define MI_MATH_REG_CF 0x33
+
/*
* Commands used only by the command parser
*/
@@ -241,7 +287,6 @@
#define MI_CLFLUSH MI_INSTR(0x27, 0)
#define MI_REPORT_PERF_COUNT MI_INSTR(0x28, 0)
#define MI_REPORT_PERF_COUNT_GGTT (1<<0)
-#define MI_LOAD_REGISTER_REG MI_INSTR(0x2A, 0)
#define MI_RS_STORE_DATA_IMM MI_INSTR(0x2B, 0)
#define MI_LOAD_URB_MEM MI_INSTR(0x2C, 0)
#define MI_STORE_URB_MEM MI_INSTR(0x2D, 0)
@@ -276,4 +321,31 @@
#define COLOR_BLT ((0x2<<29)|(0x40<<22))
#define SRC_COPY_BLT ((0x2<<29)|(0x43<<22))
+/*
+ * Used to convert any address to canonical form.
+ * Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS,
+ * MI_LOAD_REGISTER_MEM and others, see Broadwell PRM Vol2a) require the
+ * addresses to be in a canonical form:
+ * "GraphicsAddress[63:48] are ignored by the HW and assumed to be in correct
+ * canonical form [63:48] == [47]."
+ */
+#define GEN8_HIGH_ADDRESS_BIT 47
+static inline u64 gen8_canonical_addr(u64 address)
+{
+ return sign_extend64(address, GEN8_HIGH_ADDRESS_BIT);
+}
+
+static inline u64 gen8_noncanonical_addr(u64 address)
+{
+ return address & GENMASK_ULL(GEN8_HIGH_ADDRESS_BIT, 0);
+}
+
+static inline u32 *__gen6_emit_bb_start(u32 *cs, u32 addr, unsigned int flags)
+{
+ *cs++ = MI_BATCH_BUFFER_START | flags;
+ *cs++ = addr;
+
+ return cs;
+}
+
#endif /* _INTEL_GPU_COMMANDS_H_ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
new file mode 100644
index 000000000000..da2b6e2ae692
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -0,0 +1,658 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "debugfs_gt.h"
+#include "i915_drv.h"
+#include "intel_context.h"
+#include "intel_gt.h"
+#include "intel_gt_pm.h"
+#include "intel_gt_requests.h"
+#include "intel_mocs.h"
+#include "intel_rc6.h"
+#include "intel_renderstate.h"
+#include "intel_rps.h"
+#include "intel_uncore.h"
+#include "intel_pm.h"
+
+void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
+{
+ gt->i915 = i915;
+ gt->uncore = &i915->uncore;
+
+ spin_lock_init(&gt->irq_lock);
+
+ INIT_LIST_HEAD(&gt->closed_vma);
+ spin_lock_init(&gt->closed_lock);
+
+ intel_gt_init_reset(gt);
+ intel_gt_init_requests(gt);
+ intel_gt_init_timelines(gt);
+ intel_gt_pm_init_early(gt);
+
+ intel_rps_init_early(&gt->rps);
+ intel_uc_init_early(&gt->uc);
+}
+
+void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt)
+{
+ gt->ggtt = ggtt;
+}
+
+static void init_unused_ring(struct intel_gt *gt, u32 base)
+{
+ struct intel_uncore *uncore = gt->uncore;
+
+ intel_uncore_write(uncore, RING_CTL(base), 0);
+ intel_uncore_write(uncore, RING_HEAD(base), 0);
+ intel_uncore_write(uncore, RING_TAIL(base), 0);
+ intel_uncore_write(uncore, RING_START(base), 0);
+}
+
+static void init_unused_rings(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+
+ if (IS_I830(i915)) {
+ init_unused_ring(gt, PRB1_BASE);
+ init_unused_ring(gt, SRB0_BASE);
+ init_unused_ring(gt, SRB1_BASE);
+ init_unused_ring(gt, SRB2_BASE);
+ init_unused_ring(gt, SRB3_BASE);
+ } else if (IS_GEN(i915, 2)) {
+ init_unused_ring(gt, SRB0_BASE);
+ init_unused_ring(gt, SRB1_BASE);
+ } else if (IS_GEN(i915, 3)) {
+ init_unused_ring(gt, PRB1_BASE);
+ init_unused_ring(gt, PRB2_BASE);
+ }
+}
+
+int intel_gt_init_hw(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore = gt->uncore;
+ int ret;
+
+ gt->last_init_time = ktime_get();
+
+ /* Double layer security blanket, see i915_gem_init() */
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
+
+ if (HAS_EDRAM(i915) && INTEL_GEN(i915) < 9)
+ intel_uncore_rmw(uncore, HSW_IDICR, 0, IDIHASHMSK(0xf));
+
+ if (IS_HASWELL(i915))
+ intel_uncore_write(uncore,
+ MI_PREDICATE_RESULT_2,
+ IS_HSW_GT3(i915) ?
+ LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
+
+ /* Apply the GT workarounds... */
+ intel_gt_apply_workarounds(gt);
+ /* ...and determine whether they are sticking. */
+ intel_gt_verify_workarounds(gt, "init");
+
+ intel_gt_init_swizzling(gt);
+
+ /*
+ * At least 830 can leave some of the unused rings
+ * "active" (ie. head != tail) after resume which
+ * will prevent c3 entry. Makes sure all unused rings
+ * are totally idle.
+ */
+ init_unused_rings(gt);
+
+ ret = i915_ppgtt_init_hw(gt);
+ if (ret) {
+ DRM_ERROR("Enabling PPGTT failed (%d)\n", ret);
+ goto out;
+ }
+
+ /* We can't enable contexts until all firmware is loaded */
+ ret = intel_uc_init_hw(&gt->uc);
+ if (ret) {
+ i915_probe_error(i915, "Enabling uc failed (%d)\n", ret);
+ goto out;
+ }
+
+ intel_mocs_init(gt);
+
+out:
+ intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
+ return ret;
+}
+
+static void rmw_set(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
+{
+ intel_uncore_rmw(uncore, reg, 0, set);
+}
+
+static void rmw_clear(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
+{
+ intel_uncore_rmw(uncore, reg, clr, 0);
+}
+
+static void clear_register(struct intel_uncore *uncore, i915_reg_t reg)
+{
+ intel_uncore_rmw(uncore, reg, 0, 0);
+}
+
+static void gen8_clear_engine_error_register(struct intel_engine_cs *engine)
+{
+ GEN6_RING_FAULT_REG_RMW(engine, RING_FAULT_VALID, 0);
+ GEN6_RING_FAULT_REG_POSTING_READ(engine);
+}
+
+void
+intel_gt_clear_error_registers(struct intel_gt *gt,
+ intel_engine_mask_t engine_mask)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore = gt->uncore;
+ u32 eir;
+
+ if (!IS_GEN(i915, 2))
+ clear_register(uncore, PGTBL_ER);
+
+ if (INTEL_GEN(i915) < 4)
+ clear_register(uncore, IPEIR(RENDER_RING_BASE));
+ else
+ clear_register(uncore, IPEIR_I965);
+
+ clear_register(uncore, EIR);
+ eir = intel_uncore_read(uncore, EIR);
+ if (eir) {
+ /*
+ * some errors might have become stuck,
+ * mask them.
+ */
+ DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
+ rmw_set(uncore, EMR, eir);
+ intel_uncore_write(uncore, GEN2_IIR,
+ I915_MASTER_ERROR_INTERRUPT);
+ }
+
+ if (INTEL_GEN(i915) >= 12) {
+ rmw_clear(uncore, GEN12_RING_FAULT_REG, RING_FAULT_VALID);
+ intel_uncore_posting_read(uncore, GEN12_RING_FAULT_REG);
+ } else if (INTEL_GEN(i915) >= 8) {
+ rmw_clear(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID);
+ intel_uncore_posting_read(uncore, GEN8_RING_FAULT_REG);
+ } else if (INTEL_GEN(i915) >= 6) {
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine_masked(engine, gt, engine_mask, id)
+ gen8_clear_engine_error_register(engine);
+ }
+}
+
+static void gen6_check_faults(struct intel_gt *gt)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ u32 fault;
+
+ for_each_engine(engine, gt, id) {
+ fault = GEN6_RING_FAULT_REG_READ(engine);
+ if (fault & RING_FAULT_VALID) {
+ DRM_DEBUG_DRIVER("Unexpected fault\n"
+ "\tAddr: 0x%08lx\n"
+ "\tAddress space: %s\n"
+ "\tSource ID: %d\n"
+ "\tType: %d\n",
+ fault & PAGE_MASK,
+ fault & RING_FAULT_GTTSEL_MASK ?
+ "GGTT" : "PPGTT",
+ RING_FAULT_SRCID(fault),
+ RING_FAULT_FAULT_TYPE(fault));
+ }
+ }
+}
+
+static void gen8_check_faults(struct intel_gt *gt)
+{
+ struct intel_uncore *uncore = gt->uncore;
+ i915_reg_t fault_reg, fault_data0_reg, fault_data1_reg;
+ u32 fault;
+
+ if (INTEL_GEN(gt->i915) >= 12) {
+ fault_reg = GEN12_RING_FAULT_REG;
+ fault_data0_reg = GEN12_FAULT_TLB_DATA0;
+ fault_data1_reg = GEN12_FAULT_TLB_DATA1;
+ } else {
+ fault_reg = GEN8_RING_FAULT_REG;
+ fault_data0_reg = GEN8_FAULT_TLB_DATA0;
+ fault_data1_reg = GEN8_FAULT_TLB_DATA1;
+ }
+
+ fault = intel_uncore_read(uncore, fault_reg);
+ if (fault & RING_FAULT_VALID) {
+ u32 fault_data0, fault_data1;
+ u64 fault_addr;
+
+ fault_data0 = intel_uncore_read(uncore, fault_data0_reg);
+ fault_data1 = intel_uncore_read(uncore, fault_data1_reg);
+
+ fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
+ ((u64)fault_data0 << 12);
+
+ DRM_DEBUG_DRIVER("Unexpected fault\n"
+ "\tAddr: 0x%08x_%08x\n"
+ "\tAddress space: %s\n"
+ "\tEngine ID: %d\n"
+ "\tSource ID: %d\n"
+ "\tType: %d\n",
+ upper_32_bits(fault_addr),
+ lower_32_bits(fault_addr),
+ fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
+ GEN8_RING_FAULT_ENGINE_ID(fault),
+ RING_FAULT_SRCID(fault),
+ RING_FAULT_FAULT_TYPE(fault));
+ }
+}
+
+void intel_gt_check_and_clear_faults(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+
+ /* From GEN8 onwards we only have one 'All Engine Fault Register' */
+ if (INTEL_GEN(i915) >= 8)
+ gen8_check_faults(gt);
+ else if (INTEL_GEN(i915) >= 6)
+ gen6_check_faults(gt);
+ else
+ return;
+
+ intel_gt_clear_error_registers(gt, ALL_ENGINES);
+}
+
+void intel_gt_flush_ggtt_writes(struct intel_gt *gt)
+{
+ struct intel_uncore *uncore = gt->uncore;
+ intel_wakeref_t wakeref;
+
+ /*
+ * No actual flushing is required for the GTT write domain for reads
+ * from the GTT domain. Writes to it "immediately" go to main memory
+ * as far as we know, so there's no chipset flush. It also doesn't
+ * land in the GPU render cache.
+ *
+ * However, we do have to enforce the order so that all writes through
+ * the GTT land before any writes to the device, such as updates to
+ * the GATT itself.
+ *
+ * We also have to wait a bit for the writes to land from the GTT.
+ * An uncached read (i.e. mmio) seems to be ideal for the round-trip
+ * timing. This issue has only been observed when switching quickly
+ * between GTT writes and CPU reads from inside the kernel on recent hw,
+ * and it appears to only affect discrete GTT blocks (i.e. on LLC
+ * system agents we cannot reproduce this behaviour, until Cannonlake
+ * that was!).
+ */
+
+ wmb();
+
+ if (INTEL_INFO(gt->i915)->has_coherent_ggtt)
+ return;
+
+ intel_gt_chipset_flush(gt);
+
+ with_intel_runtime_pm_if_in_use(uncore->rpm, wakeref) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&uncore->lock, flags);
+ intel_uncore_posting_read_fw(uncore,
+ RING_HEAD(RENDER_RING_BASE));
+ spin_unlock_irqrestore(&uncore->lock, flags);
+ }
+}
+
+void intel_gt_chipset_flush(struct intel_gt *gt)
+{
+ wmb();
+ if (INTEL_GEN(gt->i915) < 6)
+ intel_gtt_chipset_flush();
+}
+
+void intel_gt_driver_register(struct intel_gt *gt)
+{
+ intel_rps_driver_register(&gt->rps);
+
+ debugfs_gt_register(gt);
+}
+
+static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int ret;
+
+ obj = i915_gem_object_create_stolen(i915, size);
+ if (IS_ERR(obj))
+ obj = i915_gem_object_create_internal(i915, size);
+ if (IS_ERR(obj)) {
+ DRM_ERROR("Failed to allocate scratch page\n");
+ return PTR_ERR(obj);
+ }
+
+ vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto err_unref;
+ }
+
+ ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
+ if (ret)
+ goto err_unref;
+
+ gt->scratch = i915_vma_make_unshrinkable(vma);
+
+ return 0;
+
+err_unref:
+ i915_gem_object_put(obj);
+ return ret;
+}
+
+static void intel_gt_fini_scratch(struct intel_gt *gt)
+{
+ i915_vma_unpin_and_release(&gt->scratch, 0);
+}
+
+static struct i915_address_space *kernel_vm(struct intel_gt *gt)
+{
+ if (INTEL_PPGTT(gt->i915) > INTEL_PPGTT_ALIASING)
+ return &i915_ppgtt_create(gt)->vm;
+ else
+ return i915_vm_get(&gt->ggtt->vm);
+}
+
+static int __intel_context_flush_retire(struct intel_context *ce)
+{
+ struct intel_timeline *tl;
+
+ tl = intel_context_timeline_lock(ce);
+ if (IS_ERR(tl))
+ return PTR_ERR(tl);
+
+ intel_context_timeline_unlock(tl);
+ return 0;
+}
+
+static int __engines_record_defaults(struct intel_gt *gt)
+{
+ struct i915_request *requests[I915_NUM_ENGINES] = {};
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * As we reset the gpu during very early sanitisation, the current
+ * register state on the GPU should reflect its defaults values.
+ * We load a context onto the hw (with restore-inhibit), then switch
+ * over to a second context to save that default register state. We
+ * can then prime every new context with that state so they all start
+ * from the same default HW values.
+ */
+
+ for_each_engine(engine, gt, id) {
+ struct intel_renderstate so;
+ struct intel_context *ce;
+ struct i915_request *rq;
+
+ /* We must be able to switch to something! */
+ GEM_BUG_ON(!engine->kernel_context);
+
+ err = intel_renderstate_init(&so, engine);
+ if (err)
+ goto out;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out;
+ }
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ intel_context_put(ce);
+ goto out;
+ }
+
+ err = intel_engine_emit_ctx_wa(rq);
+ if (err)
+ goto err_rq;
+
+ err = intel_renderstate_emit(&so, rq);
+ if (err)
+ goto err_rq;
+
+err_rq:
+ requests[id] = i915_request_get(rq);
+ i915_request_add(rq);
+ intel_renderstate_fini(&so);
+ if (err)
+ goto out;
+ }
+
+ /* Flush the default context image to memory, and enable powersaving. */
+ if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
+ err = -EIO;
+ goto out;
+ }
+
+ for (id = 0; id < ARRAY_SIZE(requests); id++) {
+ struct i915_request *rq;
+ struct i915_vma *state;
+ void *vaddr;
+
+ rq = requests[id];
+ if (!rq)
+ continue;
+
+ GEM_BUG_ON(!test_bit(CONTEXT_ALLOC_BIT, &rq->context->flags));
+ state = rq->context->state;
+ if (!state)
+ continue;
+
+ /* Serialise with retirement on another CPU */
+ GEM_BUG_ON(!i915_request_completed(rq));
+ err = __intel_context_flush_retire(rq->context);
+ if (err)
+ goto out;
+
+ /* We want to be able to unbind the state from the GGTT */
+ GEM_BUG_ON(intel_context_is_pinned(rq->context));
+
+ /*
+ * As we will hold a reference to the logical state, it will
+ * not be torn down with the context, and importantly the
+ * object will hold onto its vma (making it possible for a
+ * stray GTT write to corrupt our defaults). Unmap the vma
+ * from the GTT to prevent such accidents and reclaim the
+ * space.
+ */
+ err = i915_vma_unbind(state);
+ if (err)
+ goto out;
+
+ i915_gem_object_lock(state->obj);
+ err = i915_gem_object_set_to_cpu_domain(state->obj, false);
+ i915_gem_object_unlock(state->obj);
+ if (err)
+ goto out;
+
+ i915_gem_object_set_cache_coherency(state->obj, I915_CACHE_LLC);
+
+ /* Check we can acquire the image of the context state */
+ vaddr = i915_gem_object_pin_map(state->obj, I915_MAP_FORCE_WB);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto out;
+ }
+
+ rq->engine->default_state = i915_gem_object_get(state->obj);
+ i915_gem_object_unpin_map(state->obj);
+ }
+
+out:
+ /*
+ * If we have to abandon now, we expect the engines to be idle
+ * and ready to be torn-down. The quickest way we can accomplish
+ * this is by declaring ourselves wedged.
+ */
+ if (err)
+ intel_gt_set_wedged(gt);
+
+ for (id = 0; id < ARRAY_SIZE(requests); id++) {
+ struct intel_context *ce;
+ struct i915_request *rq;
+
+ rq = requests[id];
+ if (!rq)
+ continue;
+
+ ce = rq->context;
+ i915_request_put(rq);
+ intel_context_put(ce);
+ }
+ return err;
+}
+
+static int __engines_verify_workarounds(struct intel_gt *gt)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ return 0;
+
+ for_each_engine(engine, gt, id) {
+ if (intel_engine_verify_workarounds(engine, "load"))
+ err = -EIO;
+ }
+
+ return err;
+}
+
+static void __intel_gt_disable(struct intel_gt *gt)
+{
+ intel_gt_set_wedged_on_init(gt);
+
+ intel_gt_suspend_prepare(gt);
+ intel_gt_suspend_late(gt);
+
+ GEM_BUG_ON(intel_gt_pm_is_awake(gt));
+}
+
+int intel_gt_init(struct intel_gt *gt)
+{
+ int err;
+
+ err = i915_inject_probe_error(gt->i915, -ENODEV);
+ if (err)
+ return err;
+
+ /*
+ * This is just a security blanket to placate dragons.
+ * On some systems, we very sporadically observe that the first TLBs
+ * used by the CS may be stale, despite us poking the TLB reset. If
+ * we hold the forcewake during initialisation these problems
+ * just magically go away.
+ */
+ intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
+
+ err = intel_gt_init_scratch(gt, IS_GEN(gt->i915, 2) ? SZ_256K : SZ_4K);
+ if (err)
+ goto out_fw;
+
+ intel_gt_pm_init(gt);
+
+ gt->vm = kernel_vm(gt);
+ if (!gt->vm) {
+ err = -ENOMEM;
+ goto err_pm;
+ }
+
+ err = intel_engines_init(gt);
+ if (err)
+ goto err_engines;
+
+ intel_uc_init(&gt->uc);
+
+ err = intel_gt_resume(gt);
+ if (err)
+ goto err_uc_init;
+
+ err = __engines_record_defaults(gt);
+ if (err)
+ goto err_gt;
+
+ err = __engines_verify_workarounds(gt);
+ if (err)
+ goto err_gt;
+
+ err = i915_inject_probe_error(gt->i915, -EIO);
+ if (err)
+ goto err_gt;
+
+ goto out_fw;
+err_gt:
+ __intel_gt_disable(gt);
+ intel_uc_fini_hw(&gt->uc);
+err_uc_init:
+ intel_uc_fini(&gt->uc);
+err_engines:
+ intel_engines_release(gt);
+ i915_vm_put(fetch_and_zero(&gt->vm));
+err_pm:
+ intel_gt_pm_fini(gt);
+ intel_gt_fini_scratch(gt);
+out_fw:
+ if (err)
+ intel_gt_set_wedged_on_init(gt);
+ intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
+ return err;
+}
+
+void intel_gt_driver_remove(struct intel_gt *gt)
+{
+ __intel_gt_disable(gt);
+
+ intel_uc_fini_hw(&gt->uc);
+ intel_uc_fini(&gt->uc);
+
+ intel_engines_release(gt);
+}
+
+void intel_gt_driver_unregister(struct intel_gt *gt)
+{
+ intel_rps_driver_unregister(&gt->rps);
+}
+
+void intel_gt_driver_release(struct intel_gt *gt)
+{
+ struct i915_address_space *vm;
+
+ vm = fetch_and_zero(&gt->vm);
+ if (vm) /* FIXME being called twice on error paths :( */
+ i915_vm_put(vm);
+
+ intel_gt_pm_fini(gt);
+ intel_gt_fini_scratch(gt);
+}
+
+void intel_gt_driver_late_release(struct intel_gt *gt)
+{
+ intel_uc_driver_late_release(&gt->uc);
+ intel_gt_fini_requests(gt);
+ intel_gt_fini_reset(gt);
+ intel_gt_fini_timelines(gt);
+ intel_engines_free(gt);
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h
new file mode 100644
index 000000000000..1dac441cb8f4
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_GT__
+#define __INTEL_GT__
+
+#include "intel_engine_types.h"
+#include "intel_gt_types.h"
+#include "intel_reset.h"
+
+struct drm_i915_private;
+
+#define GT_TRACE(gt, fmt, ...) do { \
+ const struct intel_gt *gt__ __maybe_unused = (gt); \
+ GEM_TRACE("%s " fmt, dev_name(gt__->i915->drm.dev), \
+ ##__VA_ARGS__); \
+} while (0)
+
+static inline struct intel_gt *uc_to_gt(struct intel_uc *uc)
+{
+ return container_of(uc, struct intel_gt, uc);
+}
+
+static inline struct intel_gt *guc_to_gt(struct intel_guc *guc)
+{
+ return container_of(guc, struct intel_gt, uc.guc);
+}
+
+static inline struct intel_gt *huc_to_gt(struct intel_huc *huc)
+{
+ return container_of(huc, struct intel_gt, uc.huc);
+}
+
+void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915);
+void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt);
+int __must_check intel_gt_init_hw(struct intel_gt *gt);
+int intel_gt_init(struct intel_gt *gt);
+void intel_gt_driver_register(struct intel_gt *gt);
+
+void intel_gt_driver_unregister(struct intel_gt *gt);
+void intel_gt_driver_remove(struct intel_gt *gt);
+void intel_gt_driver_release(struct intel_gt *gt);
+
+void intel_gt_driver_late_release(struct intel_gt *gt);
+
+void intel_gt_check_and_clear_faults(struct intel_gt *gt);
+void intel_gt_clear_error_registers(struct intel_gt *gt,
+ intel_engine_mask_t engine_mask);
+
+void intel_gt_flush_ggtt_writes(struct intel_gt *gt);
+void intel_gt_chipset_flush(struct intel_gt *gt);
+
+static inline u32 intel_gt_scratch_offset(const struct intel_gt *gt,
+ enum intel_gt_scratch_field field)
+{
+ return i915_ggtt_offset(gt->scratch) + field;
+}
+
+static inline bool intel_gt_is_wedged(const struct intel_gt *gt)
+{
+ return __intel_reset_failed(&gt->reset);
+}
+
+static inline bool intel_gt_has_init_error(const struct intel_gt *gt)
+{
+ return test_bit(I915_WEDGED_ON_INIT, &gt->reset.flags);
+}
+
+#endif /* __INTEL_GT_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
new file mode 100644
index 000000000000..f796bdf1ed30
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
@@ -0,0 +1,456 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/sched/clock.h>
+
+#include "i915_drv.h"
+#include "i915_irq.h"
+#include "intel_gt.h"
+#include "intel_gt_irq.h"
+#include "intel_uncore.h"
+#include "intel_rps.h"
+
+static void guc_irq_handler(struct intel_guc *guc, u16 iir)
+{
+ if (iir & GUC_INTR_GUC2HOST)
+ intel_guc_to_host_event_handler(guc);
+}
+
+static void
+cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
+{
+ bool tasklet = false;
+
+ if (iir & GT_CONTEXT_SWITCH_INTERRUPT)
+ tasklet = true;
+
+ if (iir & GT_RENDER_USER_INTERRUPT) {
+ intel_engine_signal_breadcrumbs(engine);
+ tasklet |= intel_engine_needs_breadcrumb_tasklet(engine);
+ }
+
+ if (tasklet)
+ tasklet_hi_schedule(&engine->execlists.tasklet);
+}
+
+static u32
+gen11_gt_engine_identity(struct intel_gt *gt,
+ const unsigned int bank, const unsigned int bit)
+{
+ void __iomem * const regs = gt->uncore->regs;
+ u32 timeout_ts;
+ u32 ident;
+
+ lockdep_assert_held(&gt->irq_lock);
+
+ raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit));
+
+ /*
+ * NB: Specs do not specify how long to spin wait,
+ * so we do ~100us as an educated guess.
+ */
+ timeout_ts = (local_clock() >> 10) + 100;
+ do {
+ ident = raw_reg_read(regs, GEN11_INTR_IDENTITY_REG(bank));
+ } while (!(ident & GEN11_INTR_DATA_VALID) &&
+ !time_after32(local_clock() >> 10, timeout_ts));
+
+ if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) {
+ DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n",
+ bank, bit, ident);
+ return 0;
+ }
+
+ raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank),
+ GEN11_INTR_DATA_VALID);
+
+ return ident;
+}
+
+static void
+gen11_other_irq_handler(struct intel_gt *gt, const u8 instance,
+ const u16 iir)
+{
+ if (instance == OTHER_GUC_INSTANCE)
+ return guc_irq_handler(&gt->uc.guc, iir);
+
+ if (instance == OTHER_GTPM_INSTANCE)
+ return gen11_rps_irq_handler(&gt->rps, iir);
+
+ WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
+ instance, iir);
+}
+
+static void
+gen11_engine_irq_handler(struct intel_gt *gt, const u8 class,
+ const u8 instance, const u16 iir)
+{
+ struct intel_engine_cs *engine;
+
+ if (instance <= MAX_ENGINE_INSTANCE)
+ engine = gt->engine_class[class][instance];
+ else
+ engine = NULL;
+
+ if (likely(engine))
+ return cs_irq_handler(engine, iir);
+
+ WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n",
+ class, instance);
+}
+
+static void
+gen11_gt_identity_handler(struct intel_gt *gt, const u32 identity)
+{
+ const u8 class = GEN11_INTR_ENGINE_CLASS(identity);
+ const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity);
+ const u16 intr = GEN11_INTR_ENGINE_INTR(identity);
+
+ if (unlikely(!intr))
+ return;
+
+ if (class <= COPY_ENGINE_CLASS)
+ return gen11_engine_irq_handler(gt, class, instance, intr);
+
+ if (class == OTHER_CLASS)
+ return gen11_other_irq_handler(gt, instance, intr);
+
+ WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n",
+ class, instance, intr);
+}
+
+static void
+gen11_gt_bank_handler(struct intel_gt *gt, const unsigned int bank)
+{
+ void __iomem * const regs = gt->uncore->regs;
+ unsigned long intr_dw;
+ unsigned int bit;
+
+ lockdep_assert_held(&gt->irq_lock);
+
+ intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
+
+ for_each_set_bit(bit, &intr_dw, 32) {
+ const u32 ident = gen11_gt_engine_identity(gt, bank, bit);
+
+ gen11_gt_identity_handler(gt, ident);
+ }
+
+ /* Clear must be after shared has been served for engine */
+ raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw);
+}
+
+void gen11_gt_irq_handler(struct intel_gt *gt, const u32 master_ctl)
+{
+ unsigned int bank;
+
+ spin_lock(&gt->irq_lock);
+
+ for (bank = 0; bank < 2; bank++) {
+ if (master_ctl & GEN11_GT_DW_IRQ(bank))
+ gen11_gt_bank_handler(gt, bank);
+ }
+
+ spin_unlock(&gt->irq_lock);
+}
+
+bool gen11_gt_reset_one_iir(struct intel_gt *gt,
+ const unsigned int bank, const unsigned int bit)
+{
+ void __iomem * const regs = gt->uncore->regs;
+ u32 dw;
+
+ lockdep_assert_held(&gt->irq_lock);
+
+ dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
+ if (dw & BIT(bit)) {
+ /*
+ * According to the BSpec, DW_IIR bits cannot be cleared without
+ * first servicing the Selector & Shared IIR registers.
+ */
+ gen11_gt_engine_identity(gt, bank, bit);
+
+ /*
+ * We locked GT INT DW by reading it. If we want to (try
+ * to) recover from this successfully, we need to clear
+ * our bit, otherwise we are locking the register for
+ * everybody.
+ */
+ raw_reg_write(regs, GEN11_GT_INTR_DW(bank), BIT(bit));
+
+ return true;
+ }
+
+ return false;
+}
+
+void gen11_gt_irq_reset(struct intel_gt *gt)
+{
+ struct intel_uncore *uncore = gt->uncore;
+
+ /* Disable RCS, BCS, VCS and VECS class engines. */
+ intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, 0);
+ intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, 0);
+
+ /* Restore masks irqs on RCS, BCS, VCS and VECS engines. */
+ intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~0);
+ intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~0);
+ intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~0);
+ intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~0);
+ intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK, ~0);
+
+ intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
+ intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK, ~0);
+ intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
+ intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK, ~0);
+}
+
+void gen11_gt_irq_postinstall(struct intel_gt *gt)
+{
+ const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT;
+ struct intel_uncore *uncore = gt->uncore;
+ const u32 dmask = irqs << 16 | irqs;
+ const u32 smask = irqs << 16;
+
+ BUILD_BUG_ON(irqs & 0xffff0000);
+
+ /* Enable RCS, BCS, VCS and VECS class interrupts. */
+ intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, dmask);
+ intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, dmask);
+
+ /* Unmask irqs on RCS, BCS, VCS and VECS engines. */
+ intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~smask);
+ intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~smask);
+ intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~dmask);
+ intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~dmask);
+ intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK, ~dmask);
+
+ /*
+ * RPS interrupts will get enabled/disabled on demand when RPS itself
+ * is enabled/disabled.
+ */
+ gt->pm_ier = 0x0;
+ gt->pm_imr = ~gt->pm_ier;
+ intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
+ intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK, ~0);
+
+ /* Same thing for GuC interrupts */
+ intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
+ intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK, ~0);
+}
+
+void gen5_gt_irq_handler(struct intel_gt *gt, u32 gt_iir)
+{
+ if (gt_iir & GT_RENDER_USER_INTERRUPT)
+ intel_engine_signal_breadcrumbs(gt->engine_class[RENDER_CLASS][0]);
+ if (gt_iir & ILK_BSD_USER_INTERRUPT)
+ intel_engine_signal_breadcrumbs(gt->engine_class[VIDEO_DECODE_CLASS][0]);
+}
+
+static void gen7_parity_error_irq_handler(struct intel_gt *gt, u32 iir)
+{
+ if (!HAS_L3_DPF(gt->i915))
+ return;
+
+ spin_lock(&gt->irq_lock);
+ gen5_gt_disable_irq(gt, GT_PARITY_ERROR(gt->i915));
+ spin_unlock(&gt->irq_lock);
+
+ if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
+ gt->i915->l3_parity.which_slice |= 1 << 1;
+
+ if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
+ gt->i915->l3_parity.which_slice |= 1 << 0;
+
+ schedule_work(&gt->i915->l3_parity.error_work);
+}
+
+void gen6_gt_irq_handler(struct intel_gt *gt, u32 gt_iir)
+{
+ if (gt_iir & GT_RENDER_USER_INTERRUPT)
+ intel_engine_signal_breadcrumbs(gt->engine_class[RENDER_CLASS][0]);
+ if (gt_iir & GT_BSD_USER_INTERRUPT)
+ intel_engine_signal_breadcrumbs(gt->engine_class[VIDEO_DECODE_CLASS][0]);
+ if (gt_iir & GT_BLT_USER_INTERRUPT)
+ intel_engine_signal_breadcrumbs(gt->engine_class[COPY_ENGINE_CLASS][0]);
+
+ if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
+ GT_BSD_CS_ERROR_INTERRUPT |
+ GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
+ DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
+
+ if (gt_iir & GT_PARITY_ERROR(gt->i915))
+ gen7_parity_error_irq_handler(gt, gt_iir);
+}
+
+void gen8_gt_irq_ack(struct intel_gt *gt, u32 master_ctl, u32 gt_iir[4])
+{
+ void __iomem * const regs = gt->uncore->regs;
+
+ if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
+ gt_iir[0] = raw_reg_read(regs, GEN8_GT_IIR(0));
+ if (likely(gt_iir[0]))
+ raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]);
+ }
+
+ if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
+ gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1));
+ if (likely(gt_iir[1]))
+ raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]);
+ }
+
+ if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
+ gt_iir[2] = raw_reg_read(regs, GEN8_GT_IIR(2));
+ if (likely(gt_iir[2]))
+ raw_reg_write(regs, GEN8_GT_IIR(2), gt_iir[2]);
+ }
+
+ if (master_ctl & GEN8_GT_VECS_IRQ) {
+ gt_iir[3] = raw_reg_read(regs, GEN8_GT_IIR(3));
+ if (likely(gt_iir[3]))
+ raw_reg_write(regs, GEN8_GT_IIR(3), gt_iir[3]);
+ }
+}
+
+void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl, u32 gt_iir[4])
+{
+ if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
+ cs_irq_handler(gt->engine_class[RENDER_CLASS][0],
+ gt_iir[0] >> GEN8_RCS_IRQ_SHIFT);
+ cs_irq_handler(gt->engine_class[COPY_ENGINE_CLASS][0],
+ gt_iir[0] >> GEN8_BCS_IRQ_SHIFT);
+ }
+
+ if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
+ cs_irq_handler(gt->engine_class[VIDEO_DECODE_CLASS][0],
+ gt_iir[1] >> GEN8_VCS0_IRQ_SHIFT);
+ cs_irq_handler(gt->engine_class[VIDEO_DECODE_CLASS][1],
+ gt_iir[1] >> GEN8_VCS1_IRQ_SHIFT);
+ }
+
+ if (master_ctl & GEN8_GT_VECS_IRQ) {
+ cs_irq_handler(gt->engine_class[VIDEO_ENHANCEMENT_CLASS][0],
+ gt_iir[3] >> GEN8_VECS_IRQ_SHIFT);
+ }
+
+ if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
+ gen6_rps_irq_handler(&gt->rps, gt_iir[2]);
+ guc_irq_handler(&gt->uc.guc, gt_iir[2] >> 16);
+ }
+}
+
+void gen8_gt_irq_reset(struct intel_gt *gt)
+{
+ struct intel_uncore *uncore = gt->uncore;
+
+ GEN8_IRQ_RESET_NDX(uncore, GT, 0);
+ GEN8_IRQ_RESET_NDX(uncore, GT, 1);
+ GEN8_IRQ_RESET_NDX(uncore, GT, 2);
+ GEN8_IRQ_RESET_NDX(uncore, GT, 3);
+}
+
+void gen8_gt_irq_postinstall(struct intel_gt *gt)
+{
+ struct intel_uncore *uncore = gt->uncore;
+
+ /* These are interrupts we'll toggle with the ring mask register */
+ u32 gt_interrupts[] = {
+ (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
+ GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
+ GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
+ GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT),
+
+ (GT_RENDER_USER_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
+ GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
+ GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
+ GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT),
+
+ 0,
+
+ (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
+ GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT)
+ };
+
+ gt->pm_ier = 0x0;
+ gt->pm_imr = ~gt->pm_ier;
+ GEN8_IRQ_INIT_NDX(uncore, GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
+ GEN8_IRQ_INIT_NDX(uncore, GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
+ /*
+ * RPS interrupts will get enabled/disabled on demand when RPS itself
+ * is enabled/disabled. Same wil be the case for GuC interrupts.
+ */
+ GEN8_IRQ_INIT_NDX(uncore, GT, 2, gt->pm_imr, gt->pm_ier);
+ GEN8_IRQ_INIT_NDX(uncore, GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
+}
+
+static void gen5_gt_update_irq(struct intel_gt *gt,
+ u32 interrupt_mask,
+ u32 enabled_irq_mask)
+{
+ lockdep_assert_held(&gt->irq_lock);
+
+ GEM_BUG_ON(enabled_irq_mask & ~interrupt_mask);
+
+ gt->gt_imr &= ~interrupt_mask;
+ gt->gt_imr |= (~enabled_irq_mask & interrupt_mask);
+ intel_uncore_write(gt->uncore, GTIMR, gt->gt_imr);
+}
+
+void gen5_gt_enable_irq(struct intel_gt *gt, u32 mask)
+{
+ gen5_gt_update_irq(gt, mask, mask);
+ intel_uncore_posting_read_fw(gt->uncore, GTIMR);
+}
+
+void gen5_gt_disable_irq(struct intel_gt *gt, u32 mask)
+{
+ gen5_gt_update_irq(gt, mask, 0);
+}
+
+void gen5_gt_irq_reset(struct intel_gt *gt)
+{
+ struct intel_uncore *uncore = gt->uncore;
+
+ GEN3_IRQ_RESET(uncore, GT);
+ if (INTEL_GEN(gt->i915) >= 6)
+ GEN3_IRQ_RESET(uncore, GEN6_PM);
+}
+
+void gen5_gt_irq_postinstall(struct intel_gt *gt)
+{
+ struct intel_uncore *uncore = gt->uncore;
+ u32 pm_irqs = 0;
+ u32 gt_irqs = 0;
+
+ gt->gt_imr = ~0;
+ if (HAS_L3_DPF(gt->i915)) {
+ /* L3 parity interrupt is always unmasked. */
+ gt->gt_imr = ~GT_PARITY_ERROR(gt->i915);
+ gt_irqs |= GT_PARITY_ERROR(gt->i915);
+ }
+
+ gt_irqs |= GT_RENDER_USER_INTERRUPT;
+ if (IS_GEN(gt->i915, 5))
+ gt_irqs |= ILK_BSD_USER_INTERRUPT;
+ else
+ gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
+
+ GEN3_IRQ_INIT(uncore, GT, gt->gt_imr, gt_irqs);
+
+ if (INTEL_GEN(gt->i915) >= 6) {
+ /*
+ * RPS interrupts will get enabled/disabled on demand when RPS
+ * itself is enabled/disabled.
+ */
+ if (HAS_ENGINE(gt->i915, VECS0)) {
+ pm_irqs |= PM_VEBOX_USER_INTERRUPT;
+ gt->pm_ier |= PM_VEBOX_USER_INTERRUPT;
+ }
+
+ gt->pm_imr = 0xffffffff;
+ GEN3_IRQ_INIT(uncore, GEN6_PM, gt->pm_imr, pm_irqs);
+ }
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.h b/drivers/gpu/drm/i915/gt/intel_gt_irq.h
new file mode 100644
index 000000000000..8f37593712c9
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.h
@@ -0,0 +1,44 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_GT_IRQ_H
+#define INTEL_GT_IRQ_H
+
+#include <linux/types.h>
+
+struct intel_gt;
+
+#define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \
+ GEN8_GT_BCS_IRQ | \
+ GEN8_GT_VCS0_IRQ | \
+ GEN8_GT_VCS1_IRQ | \
+ GEN8_GT_VECS_IRQ | \
+ GEN8_GT_PM_IRQ | \
+ GEN8_GT_GUC_IRQ)
+
+void gen11_gt_irq_reset(struct intel_gt *gt);
+void gen11_gt_irq_postinstall(struct intel_gt *gt);
+void gen11_gt_irq_handler(struct intel_gt *gt, const u32 master_ctl);
+
+bool gen11_gt_reset_one_iir(struct intel_gt *gt,
+ const unsigned int bank,
+ const unsigned int bit);
+
+void gen5_gt_irq_handler(struct intel_gt *gt, u32 gt_iir);
+
+void gen5_gt_irq_postinstall(struct intel_gt *gt);
+void gen5_gt_irq_reset(struct intel_gt *gt);
+void gen5_gt_disable_irq(struct intel_gt *gt, u32 mask);
+void gen5_gt_enable_irq(struct intel_gt *gt, u32 mask);
+
+void gen6_gt_irq_handler(struct intel_gt *gt, u32 gt_iir);
+
+void gen8_gt_irq_ack(struct intel_gt *gt, u32 master_ctl, u32 gt_iir[4]);
+void gen8_gt_irq_reset(struct intel_gt *gt);
+void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl, u32 gt_iir[4]);
+void gen8_gt_irq_postinstall(struct intel_gt *gt);
+
+#endif /* INTEL_GT_IRQ_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index 9f8f7f54191f..d1c2f034296a 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -4,23 +4,48 @@
* Copyright © 2019 Intel Corporation
*/
+#include <linux/suspend.h>
+
#include "i915_drv.h"
+#include "i915_globals.h"
+#include "i915_params.h"
+#include "intel_context.h"
#include "intel_engine_pm.h"
+#include "intel_gt.h"
#include "intel_gt_pm.h"
+#include "intel_gt_requests.h"
+#include "intel_llc.h"
#include "intel_pm.h"
+#include "intel_rc6.h"
+#include "intel_rps.h"
#include "intel_wakeref.h"
-static void pm_notify(struct drm_i915_private *i915, int state)
+static void user_forcewake(struct intel_gt *gt, bool suspend)
{
- blocking_notifier_call_chain(&i915->gt.pm_notifications, state, i915);
+ int count = atomic_read(&gt->user_wakeref);
+
+ /* Inside suspend/resume so single threaded, no races to worry about. */
+ if (likely(!count))
+ return;
+
+ intel_gt_pm_get(gt);
+ if (suspend) {
+ GEM_BUG_ON(count > atomic_read(&gt->wakeref.count));
+ atomic_sub(count, &gt->wakeref.count);
+ } else {
+ atomic_add(count, &gt->wakeref.count);
+ }
+ intel_gt_pm_put(gt);
}
-static int intel_gt_unpark(struct intel_wakeref *wf)
+static int __gt_unpark(struct intel_wakeref *wf)
{
- struct drm_i915_private *i915 =
- container_of(wf, typeof(*i915), gt.wakeref);
+ struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
+ struct drm_i915_private *i915 = gt->i915;
+
+ GT_TRACE(gt, "\n");
- GEM_TRACE("\n");
+ i915_globals_unpark();
/*
* It seems that the DMC likes to transition between the DC states a lot
@@ -33,97 +58,132 @@ static int intel_gt_unpark(struct intel_wakeref *wf)
* Work around it by grabbing a GT IRQ power domain whilst there is any
* GT activity, preventing any DC state transitions.
*/
- i915->gt.awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
- GEM_BUG_ON(!i915->gt.awake);
-
- intel_enable_gt_powersave(i915);
-
- i915_update_gfx_val(i915);
- if (INTEL_GEN(i915) >= 6)
- gen6_rps_busy(i915);
+ gt->awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
+ GEM_BUG_ON(!gt->awake);
+ intel_rc6_unpark(&gt->rc6);
+ intel_rps_unpark(&gt->rps);
i915_pmu_gt_unparked(i915);
- i915_queue_hangcheck(i915);
-
- pm_notify(i915, INTEL_GT_UNPARK);
+ intel_gt_unpark_requests(gt);
return 0;
}
-void intel_gt_pm_get(struct drm_i915_private *i915)
+static int __gt_park(struct intel_wakeref *wf)
{
- intel_wakeref_get(&i915->runtime_pm, &i915->gt.wakeref, intel_gt_unpark);
-}
+ struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
+ intel_wakeref_t wakeref = fetch_and_zero(&gt->awake);
+ struct drm_i915_private *i915 = gt->i915;
-static int intel_gt_park(struct intel_wakeref *wf)
-{
- struct drm_i915_private *i915 =
- container_of(wf, typeof(*i915), gt.wakeref);
- intel_wakeref_t wakeref = fetch_and_zero(&i915->gt.awake);
+ GT_TRACE(gt, "\n");
- GEM_TRACE("\n");
-
- pm_notify(i915, INTEL_GT_PARK);
+ intel_gt_park_requests(gt);
+ i915_vma_parked(gt);
i915_pmu_gt_parked(i915);
- if (INTEL_GEN(i915) >= 6)
- gen6_rps_idle(i915);
+ intel_rps_park(&gt->rps);
+ intel_rc6_park(&gt->rc6);
+
+ /* Everything switched off, flush any residual interrupt just in case */
+ intel_synchronize_irq(i915);
+ /* Defer dropping the display power well for 100ms, it's slow! */
GEM_BUG_ON(!wakeref);
- intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ, wakeref);
+ intel_display_power_put_async(i915, POWER_DOMAIN_GT_IRQ, wakeref);
+
+ i915_globals_park();
return 0;
}
-void intel_gt_pm_put(struct drm_i915_private *i915)
+static const struct intel_wakeref_ops wf_ops = {
+ .get = __gt_unpark,
+ .put = __gt_park,
+};
+
+void intel_gt_pm_init_early(struct intel_gt *gt)
{
- intel_wakeref_put(&i915->runtime_pm, &i915->gt.wakeref, intel_gt_park);
+ intel_wakeref_init(&gt->wakeref, gt->uncore->rpm, &wf_ops);
}
-void intel_gt_pm_init(struct drm_i915_private *i915)
+void intel_gt_pm_init(struct intel_gt *gt)
{
- intel_wakeref_init(&i915->gt.wakeref);
- BLOCKING_INIT_NOTIFIER_HEAD(&i915->gt.pm_notifications);
+ /*
+ * Enabling power-management should be "self-healing". If we cannot
+ * enable a feature, simply leave it disabled with a notice to the
+ * user.
+ */
+ intel_rc6_init(&gt->rc6);
+ intel_rps_init(&gt->rps);
}
-static bool reset_engines(struct drm_i915_private *i915)
+static bool reset_engines(struct intel_gt *gt)
{
- if (INTEL_INFO(i915)->gpu_reset_clobbers_display)
+ if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
return false;
- return intel_gpu_reset(i915, ALL_ENGINES) == 0;
+ return __intel_gt_reset(gt, ALL_ENGINES) == 0;
}
-/**
- * intel_gt_sanitize: called after the GPU has lost power
- * @i915: the i915 device
- * @force: ignore a failed reset and sanitize engine state anyway
- *
- * Anytime we reset the GPU, either with an explicit GPU reset or through a
- * PCI power cycle, the GPU loses state and we must reset our state tracking
- * to match. Note that calling intel_gt_sanitize() if the GPU has not
- * been reset results in much confusion!
- */
-void intel_gt_sanitize(struct drm_i915_private *i915, bool force)
+static void gt_sanitize(struct intel_gt *gt, bool force)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ intel_wakeref_t wakeref;
- GEM_TRACE("\n");
+ GT_TRACE(gt, "force:%s", yesno(force));
- if (!reset_engines(i915) && !force)
- return;
+ /* Use a raw wakeref to avoid calling intel_display_power_get early */
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
+ intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
- for_each_engine(engine, i915, id)
- intel_engine_reset(engine, false);
+ /*
+ * As we have just resumed the machine and woken the device up from
+ * deep PCI sleep (presumably D3_cold), assume the HW has been reset
+ * back to defaults, recovering from whatever wedged state we left it
+ * in and so worth trying to use the device once more.
+ */
+ if (intel_gt_is_wedged(gt))
+ intel_gt_unset_wedged(gt);
+
+ intel_uc_sanitize(&gt->uc);
+
+ for_each_engine(engine, gt, id)
+ if (engine->reset.prepare)
+ engine->reset.prepare(engine);
+
+ intel_uc_reset_prepare(&gt->uc);
+
+ if (reset_engines(gt) || force) {
+ for_each_engine(engine, gt, id)
+ __intel_engine_reset(engine, false);
+ }
+
+ for_each_engine(engine, gt, id)
+ if (engine->reset.finish)
+ engine->reset.finish(engine);
+
+ intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
+}
+
+void intel_gt_pm_fini(struct intel_gt *gt)
+{
+ intel_rc6_fini(&gt->rc6);
}
-int intel_gt_resume(struct drm_i915_private *i915)
+int intel_gt_resume(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- int err = 0;
+ int err;
+
+ err = intel_gt_has_init_error(gt);
+ if (err)
+ return err;
+
+ GT_TRACE(gt, "\n");
/*
* After resume, we may need to poke into the pinned kernel
@@ -131,32 +191,143 @@ int intel_gt_resume(struct drm_i915_private *i915)
* Only the kernel contexts should remain pinned over suspend,
* allowing us to fixup the user contexts on their first pin.
*/
- intel_gt_pm_get(i915);
- for_each_engine(engine, i915, id) {
- struct intel_context *ce;
+ intel_gt_pm_get(gt);
+
+ intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
+ intel_rc6_sanitize(&gt->rc6);
+ gt_sanitize(gt, true);
+ if (intel_gt_is_wedged(gt)) {
+ err = -EIO;
+ goto out_fw;
+ }
- intel_engine_pm_get(engine);
+ /* Only when the HW is re-initialised, can we replay the requests */
+ err = intel_gt_init_hw(gt);
+ if (err) {
+ dev_err(gt->i915->drm.dev,
+ "Failed to initialize GPU, declaring it wedged!\n");
+ goto err_wedged;
+ }
- ce = engine->kernel_context;
- if (ce)
- ce->ops->reset(ce);
+ intel_rps_enable(&gt->rps);
+ intel_llc_enable(&gt->llc);
- ce = engine->preempt_context;
- if (ce)
- ce->ops->reset(ce);
+ for_each_engine(engine, gt, id) {
+ intel_engine_pm_get(engine);
engine->serial++; /* kernel context lost */
err = engine->resume(engine);
intel_engine_pm_put(engine);
if (err) {
- dev_err(i915->drm.dev,
+ dev_err(gt->i915->drm.dev,
"Failed to restart %s (%d)\n",
engine->name, err);
- break;
+ goto err_wedged;
}
}
- intel_gt_pm_put(i915);
+ intel_rc6_enable(&gt->rc6);
+
+ intel_uc_resume(&gt->uc);
+
+ user_forcewake(gt, false);
+
+out_fw:
+ intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
+ intel_gt_pm_put(gt);
return err;
+
+err_wedged:
+ intel_gt_set_wedged(gt);
+ goto out_fw;
+}
+
+static void wait_for_suspend(struct intel_gt *gt)
+{
+ if (!intel_gt_pm_is_awake(gt))
+ return;
+
+ if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
+ /*
+ * Forcibly cancel outstanding work and leave
+ * the gpu quiet.
+ */
+ intel_gt_set_wedged(gt);
+ intel_gt_retire_requests(gt);
+ }
+
+ intel_gt_pm_wait_for_idle(gt);
+}
+
+void intel_gt_suspend_prepare(struct intel_gt *gt)
+{
+ user_forcewake(gt, true);
+ wait_for_suspend(gt);
+
+ intel_uc_suspend(&gt->uc);
+}
+
+static suspend_state_t pm_suspend_target(void)
+{
+#if IS_ENABLED(CONFIG_SUSPEND) && IS_ENABLED(CONFIG_PM_SLEEP)
+ return pm_suspend_target_state;
+#else
+ return PM_SUSPEND_TO_IDLE;
+#endif
+}
+
+void intel_gt_suspend_late(struct intel_gt *gt)
+{
+ intel_wakeref_t wakeref;
+
+ /* We expect to be idle already; but also want to be independent */
+ wait_for_suspend(gt);
+
+ if (is_mock_gt(gt))
+ return;
+
+ GEM_BUG_ON(gt->awake);
+
+ /*
+ * On disabling the device, we want to turn off HW access to memory
+ * that we no longer own.
+ *
+ * However, not all suspend-states disable the device. S0 (s2idle)
+ * is effectively runtime-suspend, the device is left powered on
+ * but needs to be put into a low power state. We need to keep
+ * powermanagement enabled, but we also retain system state and so
+ * it remains safe to keep on using our allocated memory.
+ */
+ if (pm_suspend_target() == PM_SUSPEND_TO_IDLE)
+ return;
+
+ with_intel_runtime_pm(gt->uncore->rpm, wakeref) {
+ intel_rps_disable(&gt->rps);
+ intel_rc6_disable(&gt->rc6);
+ intel_llc_disable(&gt->llc);
+ }
+
+ gt_sanitize(gt, false);
+
+ GT_TRACE(gt, "\n");
+}
+
+void intel_gt_runtime_suspend(struct intel_gt *gt)
+{
+ intel_uc_runtime_suspend(&gt->uc);
+
+ GT_TRACE(gt, "\n");
}
+
+int intel_gt_runtime_resume(struct intel_gt *gt)
+{
+ GT_TRACE(gt, "\n");
+ intel_gt_init_swizzling(gt);
+
+ return intel_uc_runtime_resume(&gt->uc);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_gt_pm.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
index 53f342b20181..60f0e2fbe55c 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
@@ -9,19 +9,58 @@
#include <linux/types.h>
-struct drm_i915_private;
+#include "intel_gt_types.h"
+#include "intel_wakeref.h"
-enum {
- INTEL_GT_UNPARK,
- INTEL_GT_PARK,
-};
+static inline bool intel_gt_pm_is_awake(const struct intel_gt *gt)
+{
+ return intel_wakeref_is_active(&gt->wakeref);
+}
-void intel_gt_pm_get(struct drm_i915_private *i915);
-void intel_gt_pm_put(struct drm_i915_private *i915);
+static inline void intel_gt_pm_get(struct intel_gt *gt)
+{
+ intel_wakeref_get(&gt->wakeref);
+}
-void intel_gt_pm_init(struct drm_i915_private *i915);
+static inline void __intel_gt_pm_get(struct intel_gt *gt)
+{
+ __intel_wakeref_get(&gt->wakeref);
+}
-void intel_gt_sanitize(struct drm_i915_private *i915, bool force);
-int intel_gt_resume(struct drm_i915_private *i915);
+static inline bool intel_gt_pm_get_if_awake(struct intel_gt *gt)
+{
+ return intel_wakeref_get_if_active(&gt->wakeref);
+}
+
+static inline void intel_gt_pm_put(struct intel_gt *gt)
+{
+ intel_wakeref_put(&gt->wakeref);
+}
+
+static inline void intel_gt_pm_put_async(struct intel_gt *gt)
+{
+ intel_wakeref_put_async(&gt->wakeref);
+}
+
+static inline int intel_gt_pm_wait_for_idle(struct intel_gt *gt)
+{
+ return intel_wakeref_wait_for_idle(&gt->wakeref);
+}
+
+void intel_gt_pm_init_early(struct intel_gt *gt);
+void intel_gt_pm_init(struct intel_gt *gt);
+void intel_gt_pm_fini(struct intel_gt *gt);
+
+void intel_gt_suspend_prepare(struct intel_gt *gt);
+void intel_gt_suspend_late(struct intel_gt *gt);
+int intel_gt_resume(struct intel_gt *gt);
+
+void intel_gt_runtime_suspend(struct intel_gt *gt);
+int intel_gt_runtime_resume(struct intel_gt *gt);
+
+static inline bool is_mock_gt(const struct intel_gt *gt)
+{
+ return I915_SELFTEST_ONLY(gt->awake == -ENODEV);
+}
#endif /* INTEL_GT_PM_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c
new file mode 100644
index 000000000000..babe866126d7
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c
@@ -0,0 +1,109 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "i915_reg.h"
+#include "intel_gt.h"
+#include "intel_gt_irq.h"
+#include "intel_gt_pm_irq.h"
+
+static void write_pm_imr(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore = gt->uncore;
+ u32 mask = gt->pm_imr;
+ i915_reg_t reg;
+
+ if (INTEL_GEN(i915) >= 11) {
+ reg = GEN11_GPM_WGBOXPERF_INTR_MASK;
+ mask <<= 16; /* pm is in upper half */
+ } else if (INTEL_GEN(i915) >= 8) {
+ reg = GEN8_GT_IMR(2);
+ } else {
+ reg = GEN6_PMIMR;
+ }
+
+ intel_uncore_write(uncore, reg, mask);
+}
+
+static void gen6_gt_pm_update_irq(struct intel_gt *gt,
+ u32 interrupt_mask,
+ u32 enabled_irq_mask)
+{
+ u32 new_val;
+
+ WARN_ON(enabled_irq_mask & ~interrupt_mask);
+
+ lockdep_assert_held(&gt->irq_lock);
+
+ new_val = gt->pm_imr;
+ new_val &= ~interrupt_mask;
+ new_val |= ~enabled_irq_mask & interrupt_mask;
+
+ if (new_val != gt->pm_imr) {
+ gt->pm_imr = new_val;
+ write_pm_imr(gt);
+ }
+}
+
+void gen6_gt_pm_unmask_irq(struct intel_gt *gt, u32 mask)
+{
+ gen6_gt_pm_update_irq(gt, mask, mask);
+}
+
+void gen6_gt_pm_mask_irq(struct intel_gt *gt, u32 mask)
+{
+ gen6_gt_pm_update_irq(gt, mask, 0);
+}
+
+void gen6_gt_pm_reset_iir(struct intel_gt *gt, u32 reset_mask)
+{
+ struct intel_uncore *uncore = gt->uncore;
+ i915_reg_t reg = INTEL_GEN(gt->i915) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
+
+ lockdep_assert_held(&gt->irq_lock);
+
+ intel_uncore_write(uncore, reg, reset_mask);
+ intel_uncore_write(uncore, reg, reset_mask);
+ intel_uncore_posting_read(uncore, reg);
+}
+
+static void write_pm_ier(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore = gt->uncore;
+ u32 mask = gt->pm_ier;
+ i915_reg_t reg;
+
+ if (INTEL_GEN(i915) >= 11) {
+ reg = GEN11_GPM_WGBOXPERF_INTR_ENABLE;
+ mask <<= 16; /* pm is in upper half */
+ } else if (INTEL_GEN(i915) >= 8) {
+ reg = GEN8_GT_IER(2);
+ } else {
+ reg = GEN6_PMIER;
+ }
+
+ intel_uncore_write(uncore, reg, mask);
+}
+
+void gen6_gt_pm_enable_irq(struct intel_gt *gt, u32 enable_mask)
+{
+ lockdep_assert_held(&gt->irq_lock);
+
+ gt->pm_ier |= enable_mask;
+ write_pm_ier(gt);
+ gen6_gt_pm_unmask_irq(gt, enable_mask);
+}
+
+void gen6_gt_pm_disable_irq(struct intel_gt *gt, u32 disable_mask)
+{
+ lockdep_assert_held(&gt->irq_lock);
+
+ gt->pm_ier &= ~disable_mask;
+ gen6_gt_pm_mask_irq(gt, disable_mask);
+ write_pm_ier(gt);
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.h b/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.h
new file mode 100644
index 000000000000..b29816a04809
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.h
@@ -0,0 +1,22 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_GT_PM_IRQ_H
+#define INTEL_GT_PM_IRQ_H
+
+#include <linux/types.h>
+
+struct intel_gt;
+
+void gen6_gt_pm_unmask_irq(struct intel_gt *gt, u32 mask);
+void gen6_gt_pm_mask_irq(struct intel_gt *gt, u32 mask);
+
+void gen6_gt_pm_enable_irq(struct intel_gt *gt, u32 enable_mask);
+void gen6_gt_pm_disable_irq(struct intel_gt *gt, u32 disable_mask);
+
+void gen6_gt_pm_reset_iir(struct intel_gt *gt, u32 reset_mask);
+
+#endif /* INTEL_GT_PM_IRQ_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.c b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
new file mode 100644
index 000000000000..7ef1d37970f6
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
@@ -0,0 +1,225 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/workqueue.h>
+
+#include "i915_drv.h" /* for_each_engine() */
+#include "i915_request.h"
+#include "intel_engine_heartbeat.h"
+#include "intel_gt.h"
+#include "intel_gt_pm.h"
+#include "intel_gt_requests.h"
+#include "intel_timeline.h"
+
+static bool retire_requests(struct intel_timeline *tl)
+{
+ struct i915_request *rq, *rn;
+
+ list_for_each_entry_safe(rq, rn, &tl->requests, link)
+ if (!i915_request_retire(rq))
+ return false;
+
+ /* And check nothing new was submitted */
+ return !i915_active_fence_isset(&tl->last_request);
+}
+
+static bool flush_submission(struct intel_gt *gt)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ bool active = false;
+
+ if (!intel_gt_pm_is_awake(gt))
+ return false;
+
+ for_each_engine(engine, gt, id) {
+ intel_engine_flush_submission(engine);
+ active |= flush_work(&engine->retire_work);
+ active |= flush_work(&engine->wakeref.work);
+ }
+
+ return active;
+}
+
+static void engine_retire(struct work_struct *work)
+{
+ struct intel_engine_cs *engine =
+ container_of(work, typeof(*engine), retire_work);
+ struct intel_timeline *tl = xchg(&engine->retire, NULL);
+
+ do {
+ struct intel_timeline *next = xchg(&tl->retire, NULL);
+
+ /*
+ * Our goal here is to retire _idle_ timelines as soon as
+ * possible (as they are idle, we do not expect userspace
+ * to be cleaning up anytime soon).
+ *
+ * If the timeline is currently locked, either it is being
+ * retired elsewhere or about to be!
+ */
+ if (mutex_trylock(&tl->mutex)) {
+ retire_requests(tl);
+ mutex_unlock(&tl->mutex);
+ }
+ intel_timeline_put(tl);
+
+ GEM_BUG_ON(!next);
+ tl = ptr_mask_bits(next, 1);
+ } while (tl);
+}
+
+static bool add_retire(struct intel_engine_cs *engine,
+ struct intel_timeline *tl)
+{
+#define STUB ((struct intel_timeline *)1)
+ struct intel_timeline *first;
+
+ /*
+ * We open-code a llist here to include the additional tag [BIT(0)]
+ * so that we know when the timeline is already on a
+ * retirement queue: either this engine or another.
+ */
+
+ if (cmpxchg(&tl->retire, NULL, STUB)) /* already queued */
+ return false;
+
+ intel_timeline_get(tl);
+ first = READ_ONCE(engine->retire);
+ do
+ tl->retire = ptr_pack_bits(first, 1, 1);
+ while (!try_cmpxchg(&engine->retire, &first, tl));
+
+ return !first;
+}
+
+void intel_engine_add_retire(struct intel_engine_cs *engine,
+ struct intel_timeline *tl)
+{
+ if (add_retire(engine, tl))
+ schedule_work(&engine->retire_work);
+}
+
+void intel_engine_init_retire(struct intel_engine_cs *engine)
+{
+ INIT_WORK(&engine->retire_work, engine_retire);
+}
+
+void intel_engine_fini_retire(struct intel_engine_cs *engine)
+{
+ flush_work(&engine->retire_work);
+ GEM_BUG_ON(engine->retire);
+}
+
+long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
+{
+ struct intel_gt_timelines *timelines = &gt->timelines;
+ struct intel_timeline *tl, *tn;
+ unsigned long active_count = 0;
+ bool interruptible;
+ LIST_HEAD(free);
+
+ interruptible = true;
+ if (unlikely(timeout < 0))
+ timeout = -timeout, interruptible = false;
+
+ flush_submission(gt); /* kick the ksoftirqd tasklets */
+ spin_lock(&timelines->lock);
+ list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
+ if (!mutex_trylock(&tl->mutex)) {
+ active_count++; /* report busy to caller, try again? */
+ continue;
+ }
+
+ intel_timeline_get(tl);
+ GEM_BUG_ON(!atomic_read(&tl->active_count));
+ atomic_inc(&tl->active_count); /* pin the list element */
+ spin_unlock(&timelines->lock);
+
+ if (timeout > 0) {
+ struct dma_fence *fence;
+
+ fence = i915_active_fence_get(&tl->last_request);
+ if (fence) {
+ timeout = dma_fence_wait_timeout(fence,
+ interruptible,
+ timeout);
+ dma_fence_put(fence);
+ }
+ }
+
+ if (!retire_requests(tl) || flush_submission(gt))
+ active_count++;
+
+ spin_lock(&timelines->lock);
+
+ /* Resume iteration after dropping lock */
+ list_safe_reset_next(tl, tn, link);
+ if (atomic_dec_and_test(&tl->active_count))
+ list_del(&tl->link);
+
+ mutex_unlock(&tl->mutex);
+
+ /* Defer the final release to after the spinlock */
+ if (refcount_dec_and_test(&tl->kref.refcount)) {
+ GEM_BUG_ON(atomic_read(&tl->active_count));
+ list_add(&tl->link, &free);
+ }
+ }
+ spin_unlock(&timelines->lock);
+
+ list_for_each_entry_safe(tl, tn, &free, link)
+ __intel_timeline_free(&tl->kref);
+
+ return active_count ? timeout : 0;
+}
+
+int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout)
+{
+ /* If the device is asleep, we have no requests outstanding */
+ if (!intel_gt_pm_is_awake(gt))
+ return 0;
+
+ while ((timeout = intel_gt_retire_requests_timeout(gt, timeout)) > 0) {
+ cond_resched();
+ if (signal_pending(current))
+ return -EINTR;
+ }
+
+ return timeout;
+}
+
+static void retire_work_handler(struct work_struct *work)
+{
+ struct intel_gt *gt =
+ container_of(work, typeof(*gt), requests.retire_work.work);
+
+ schedule_delayed_work(&gt->requests.retire_work,
+ round_jiffies_up_relative(HZ));
+ intel_gt_retire_requests(gt);
+}
+
+void intel_gt_init_requests(struct intel_gt *gt)
+{
+ INIT_DELAYED_WORK(&gt->requests.retire_work, retire_work_handler);
+}
+
+void intel_gt_park_requests(struct intel_gt *gt)
+{
+ cancel_delayed_work(&gt->requests.retire_work);
+}
+
+void intel_gt_unpark_requests(struct intel_gt *gt)
+{
+ schedule_delayed_work(&gt->requests.retire_work,
+ round_jiffies_up_relative(HZ));
+}
+
+void intel_gt_fini_requests(struct intel_gt *gt)
+{
+ /* Wait until the work is marked as finished before unloading! */
+ cancel_delayed_work_sync(&gt->requests.retire_work);
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.h b/drivers/gpu/drm/i915/gt/intel_gt_requests.h
new file mode 100644
index 000000000000..dbac53baf1cb
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.h
@@ -0,0 +1,32 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_GT_REQUESTS_H
+#define INTEL_GT_REQUESTS_H
+
+struct intel_engine_cs;
+struct intel_gt;
+struct intel_timeline;
+
+long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout);
+static inline void intel_gt_retire_requests(struct intel_gt *gt)
+{
+ intel_gt_retire_requests_timeout(gt, 0);
+}
+
+void intel_engine_init_retire(struct intel_engine_cs *engine);
+void intel_engine_add_retire(struct intel_engine_cs *engine,
+ struct intel_timeline *tl);
+void intel_engine_fini_retire(struct intel_engine_cs *engine);
+
+int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout);
+
+void intel_gt_init_requests(struct intel_gt *gt);
+void intel_gt_park_requests(struct intel_gt *gt);
+void intel_gt_unpark_requests(struct intel_gt *gt);
+void intel_gt_fini_requests(struct intel_gt *gt);
+
+#endif /* INTEL_GT_REQUESTS_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
new file mode 100644
index 000000000000..96890dd12b5f
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_GT_TYPES__
+#define __INTEL_GT_TYPES__
+
+#include <linux/ktime.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include "uc/intel_uc.h"
+
+#include "i915_vma.h"
+#include "intel_engine_types.h"
+#include "intel_llc_types.h"
+#include "intel_reset_types.h"
+#include "intel_rc6_types.h"
+#include "intel_rps_types.h"
+#include "intel_wakeref.h"
+
+struct drm_i915_private;
+struct i915_ggtt;
+struct intel_engine_cs;
+struct intel_uncore;
+
+struct intel_gt {
+ struct drm_i915_private *i915;
+ struct intel_uncore *uncore;
+ struct i915_ggtt *ggtt;
+
+ struct intel_uc uc;
+
+ struct intel_gt_timelines {
+ spinlock_t lock; /* protects active_list */
+ struct list_head active_list;
+
+ /* Pack multiple timelines' seqnos into the same page */
+ spinlock_t hwsp_lock;
+ struct list_head hwsp_free_list;
+ } timelines;
+
+ struct intel_gt_requests {
+ /**
+ * We leave the user IRQ off as much as possible,
+ * but this means that requests will finish and never
+ * be retired once the system goes idle. Set a timer to
+ * fire periodically while the ring is running. When it
+ * fires, go retire requests.
+ */
+ struct delayed_work retire_work;
+ } requests;
+
+ struct intel_wakeref wakeref;
+ atomic_t user_wakeref;
+
+ struct list_head closed_vma;
+ spinlock_t closed_lock; /* guards the list of closed_vma */
+
+ struct intel_reset reset;
+
+ /**
+ * Is the GPU currently considered idle, or busy executing
+ * userspace requests? Whilst idle, we allow runtime power
+ * management to power down the hardware and display clocks.
+ * In order to reduce the effect on performance, there
+ * is a slight delay before we do so.
+ */
+ intel_wakeref_t awake;
+
+ struct intel_llc llc;
+ struct intel_rc6 rc6;
+ struct intel_rps rps;
+
+ ktime_t last_init_time;
+
+ struct i915_vma *scratch;
+
+ spinlock_t irq_lock;
+ u32 gt_imr;
+ u32 pm_ier;
+ u32 pm_imr;
+
+ u32 pm_guc_events;
+
+ struct intel_engine_cs *engine[I915_NUM_ENGINES];
+ struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1]
+ [MAX_ENGINE_INSTANCE + 1];
+
+ /*
+ * Default address space (either GGTT or ppGTT depending on arch).
+ *
+ * Reserved for exclusive use by the kernel.
+ */
+ struct i915_address_space *vm;
+};
+
+enum intel_gt_scratch_field {
+ /* 8 bytes */
+ INTEL_GT_SCRATCH_FIELD_DEFAULT = 0,
+
+ /* 8 bytes */
+ INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH = 128,
+
+ /* 8 bytes */
+ INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA = 256,
+
+ /* 6 * 8 bytes */
+ INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR = 2048,
+
+ /* 4 bytes */
+ INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1 = 2096,
+};
+
+#endif /* __INTEL_GT_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
new file mode 100644
index 000000000000..16acdc5d6734
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -0,0 +1,598 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/slab.h> /* fault-inject.h is not standalone! */
+
+#include <linux/fault-inject.h>
+
+#include "i915_trace.h"
+#include "intel_gt.h"
+#include "intel_gtt.h"
+
+void stash_init(struct pagestash *stash)
+{
+ pagevec_init(&stash->pvec);
+ spin_lock_init(&stash->lock);
+}
+
+static struct page *stash_pop_page(struct pagestash *stash)
+{
+ struct page *page = NULL;
+
+ spin_lock(&stash->lock);
+ if (likely(stash->pvec.nr))
+ page = stash->pvec.pages[--stash->pvec.nr];
+ spin_unlock(&stash->lock);
+
+ return page;
+}
+
+static void stash_push_pagevec(struct pagestash *stash, struct pagevec *pvec)
+{
+ unsigned int nr;
+
+ spin_lock_nested(&stash->lock, SINGLE_DEPTH_NESTING);
+
+ nr = min_t(typeof(nr), pvec->nr, pagevec_space(&stash->pvec));
+ memcpy(stash->pvec.pages + stash->pvec.nr,
+ pvec->pages + pvec->nr - nr,
+ sizeof(pvec->pages[0]) * nr);
+ stash->pvec.nr += nr;
+
+ spin_unlock(&stash->lock);
+
+ pvec->nr -= nr;
+}
+
+static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
+{
+ struct pagevec stack;
+ struct page *page;
+
+ if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
+ i915_gem_shrink_all(vm->i915);
+
+ page = stash_pop_page(&vm->free_pages);
+ if (page)
+ return page;
+
+ if (!vm->pt_kmap_wc)
+ return alloc_page(gfp);
+
+ /* Look in our global stash of WC pages... */
+ page = stash_pop_page(&vm->i915->mm.wc_stash);
+ if (page)
+ return page;
+
+ /*
+ * Otherwise batch allocate pages to amortize cost of set_pages_wc.
+ *
+ * We have to be careful as page allocation may trigger the shrinker
+ * (via direct reclaim) which will fill up the WC stash underneath us.
+ * So we add our WB pages into a temporary pvec on the stack and merge
+ * them into the WC stash after all the allocations are complete.
+ */
+ pagevec_init(&stack);
+ do {
+ struct page *page;
+
+ page = alloc_page(gfp);
+ if (unlikely(!page))
+ break;
+
+ stack.pages[stack.nr++] = page;
+ } while (pagevec_space(&stack));
+
+ if (stack.nr && !set_pages_array_wc(stack.pages, stack.nr)) {
+ page = stack.pages[--stack.nr];
+
+ /* Merge spare WC pages to the global stash */
+ if (stack.nr)
+ stash_push_pagevec(&vm->i915->mm.wc_stash, &stack);
+
+ /* Push any surplus WC pages onto the local VM stash */
+ if (stack.nr)
+ stash_push_pagevec(&vm->free_pages, &stack);
+ }
+
+ /* Return unwanted leftovers */
+ if (unlikely(stack.nr)) {
+ WARN_ON_ONCE(set_pages_array_wb(stack.pages, stack.nr));
+ __pagevec_release(&stack);
+ }
+
+ return page;
+}
+
+static void vm_free_pages_release(struct i915_address_space *vm,
+ bool immediate)
+{
+ struct pagevec *pvec = &vm->free_pages.pvec;
+ struct pagevec stack;
+
+ lockdep_assert_held(&vm->free_pages.lock);
+ GEM_BUG_ON(!pagevec_count(pvec));
+
+ if (vm->pt_kmap_wc) {
+ /*
+ * When we use WC, first fill up the global stash and then
+ * only if full immediately free the overflow.
+ */
+ stash_push_pagevec(&vm->i915->mm.wc_stash, pvec);
+
+ /*
+ * As we have made some room in the VM's free_pages,
+ * we can wait for it to fill again. Unless we are
+ * inside i915_address_space_fini() and must
+ * immediately release the pages!
+ */
+ if (pvec->nr <= (immediate ? 0 : PAGEVEC_SIZE - 1))
+ return;
+
+ /*
+ * We have to drop the lock to allow ourselves to sleep,
+ * so take a copy of the pvec and clear the stash for
+ * others to use it as we sleep.
+ */
+ stack = *pvec;
+ pagevec_reinit(pvec);
+ spin_unlock(&vm->free_pages.lock);
+
+ pvec = &stack;
+ set_pages_array_wb(pvec->pages, pvec->nr);
+
+ spin_lock(&vm->free_pages.lock);
+ }
+
+ __pagevec_release(pvec);
+}
+
+static void vm_free_page(struct i915_address_space *vm, struct page *page)
+{
+ /*
+ * On !llc, we need to change the pages back to WB. We only do so
+ * in bulk, so we rarely need to change the page attributes here,
+ * but doing so requires a stop_machine() from deep inside arch/x86/mm.
+ * To make detection of the possible sleep more likely, use an
+ * unconditional might_sleep() for everybody.
+ */
+ might_sleep();
+ spin_lock(&vm->free_pages.lock);
+ while (!pagevec_space(&vm->free_pages.pvec))
+ vm_free_pages_release(vm, false);
+ GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec) >= PAGEVEC_SIZE);
+ pagevec_add(&vm->free_pages.pvec, page);
+ spin_unlock(&vm->free_pages.lock);
+}
+
+void __i915_vm_close(struct i915_address_space *vm)
+{
+ struct i915_vma *vma, *vn;
+
+ mutex_lock(&vm->mutex);
+ list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) {
+ struct drm_i915_gem_object *obj = vma->obj;
+
+ /* Keep the obj (and hence the vma) alive as _we_ destroy it */
+ if (!kref_get_unless_zero(&obj->base.refcount))
+ continue;
+
+ atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
+ WARN_ON(__i915_vma_unbind(vma));
+ __i915_vma_put(vma);
+
+ i915_gem_object_put(obj);
+ }
+ GEM_BUG_ON(!list_empty(&vm->bound_list));
+ mutex_unlock(&vm->mutex);
+}
+
+void i915_address_space_fini(struct i915_address_space *vm)
+{
+ spin_lock(&vm->free_pages.lock);
+ if (pagevec_count(&vm->free_pages.pvec))
+ vm_free_pages_release(vm, true);
+ GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec));
+ spin_unlock(&vm->free_pages.lock);
+
+ drm_mm_takedown(&vm->mm);
+
+ mutex_destroy(&vm->mutex);
+}
+
+static void __i915_vm_release(struct work_struct *work)
+{
+ struct i915_address_space *vm =
+ container_of(work, struct i915_address_space, rcu.work);
+
+ vm->cleanup(vm);
+ i915_address_space_fini(vm);
+
+ kfree(vm);
+}
+
+void i915_vm_release(struct kref *kref)
+{
+ struct i915_address_space *vm =
+ container_of(kref, struct i915_address_space, ref);
+
+ GEM_BUG_ON(i915_is_ggtt(vm));
+ trace_i915_ppgtt_release(vm);
+
+ queue_rcu_work(vm->i915->wq, &vm->rcu);
+}
+
+void i915_address_space_init(struct i915_address_space *vm, int subclass)
+{
+ kref_init(&vm->ref);
+ INIT_RCU_WORK(&vm->rcu, __i915_vm_release);
+ atomic_set(&vm->open, 1);
+
+ /*
+ * The vm->mutex must be reclaim safe (for use in the shrinker).
+ * Do a dummy acquire now under fs_reclaim so that any allocation
+ * attempt holding the lock is immediately reported by lockdep.
+ */
+ mutex_init(&vm->mutex);
+ lockdep_set_subclass(&vm->mutex, subclass);
+ i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex);
+
+ GEM_BUG_ON(!vm->total);
+ drm_mm_init(&vm->mm, 0, vm->total);
+ vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
+
+ stash_init(&vm->free_pages);
+
+ INIT_LIST_HEAD(&vm->bound_list);
+}
+
+void clear_pages(struct i915_vma *vma)
+{
+ GEM_BUG_ON(!vma->pages);
+
+ if (vma->pages != vma->obj->mm.pages) {
+ sg_free_table(vma->pages);
+ kfree(vma->pages);
+ }
+ vma->pages = NULL;
+
+ memset(&vma->page_sizes, 0, sizeof(vma->page_sizes));
+}
+
+static int __setup_page_dma(struct i915_address_space *vm,
+ struct i915_page_dma *p,
+ gfp_t gfp)
+{
+ p->page = vm_alloc_page(vm, gfp | I915_GFP_ALLOW_FAIL);
+ if (unlikely(!p->page))
+ return -ENOMEM;
+
+ p->daddr = dma_map_page_attrs(vm->dma,
+ p->page, 0, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL,
+ DMA_ATTR_SKIP_CPU_SYNC |
+ DMA_ATTR_NO_WARN);
+ if (unlikely(dma_mapping_error(vm->dma, p->daddr))) {
+ vm_free_page(vm, p->page);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+int setup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p)
+{
+ return __setup_page_dma(vm, p, __GFP_HIGHMEM);
+}
+
+void cleanup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p)
+{
+ dma_unmap_page(vm->dma, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ vm_free_page(vm, p->page);
+}
+
+void
+fill_page_dma(const struct i915_page_dma *p, const u64 val, unsigned int count)
+{
+ kunmap_atomic(memset64(kmap_atomic(p->page), val, count));
+}
+
+int setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
+{
+ unsigned long size;
+
+ /*
+ * In order to utilize 64K pages for an object with a size < 2M, we will
+ * need to support a 64K scratch page, given that every 16th entry for a
+ * page-table operating in 64K mode must point to a properly aligned 64K
+ * region, including any PTEs which happen to point to scratch.
+ *
+ * This is only relevant for the 48b PPGTT where we support
+ * huge-gtt-pages, see also i915_vma_insert(). However, as we share the
+ * scratch (read-only) between all vm, we create one 64k scratch page
+ * for all.
+ */
+ size = I915_GTT_PAGE_SIZE_4K;
+ if (i915_vm_is_4lvl(vm) &&
+ HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K)) {
+ size = I915_GTT_PAGE_SIZE_64K;
+ gfp |= __GFP_NOWARN;
+ }
+ gfp |= __GFP_ZERO | __GFP_RETRY_MAYFAIL;
+
+ do {
+ unsigned int order = get_order(size);
+ struct page *page;
+ dma_addr_t addr;
+
+ page = alloc_pages(gfp, order);
+ if (unlikely(!page))
+ goto skip;
+
+ addr = dma_map_page_attrs(vm->dma,
+ page, 0, size,
+ PCI_DMA_BIDIRECTIONAL,
+ DMA_ATTR_SKIP_CPU_SYNC |
+ DMA_ATTR_NO_WARN);
+ if (unlikely(dma_mapping_error(vm->dma, addr)))
+ goto free_page;
+
+ if (unlikely(!IS_ALIGNED(addr, size)))
+ goto unmap_page;
+
+ vm->scratch[0].base.page = page;
+ vm->scratch[0].base.daddr = addr;
+ vm->scratch_order = order;
+ return 0;
+
+unmap_page:
+ dma_unmap_page(vm->dma, addr, size, PCI_DMA_BIDIRECTIONAL);
+free_page:
+ __free_pages(page, order);
+skip:
+ if (size == I915_GTT_PAGE_SIZE_4K)
+ return -ENOMEM;
+
+ size = I915_GTT_PAGE_SIZE_4K;
+ gfp &= ~__GFP_NOWARN;
+ } while (1);
+}
+
+void cleanup_scratch_page(struct i915_address_space *vm)
+{
+ struct i915_page_dma *p = px_base(&vm->scratch[0]);
+ unsigned int order = vm->scratch_order;
+
+ dma_unmap_page(vm->dma, p->daddr, BIT(order) << PAGE_SHIFT,
+ PCI_DMA_BIDIRECTIONAL);
+ __free_pages(p->page, order);
+}
+
+void free_scratch(struct i915_address_space *vm)
+{
+ int i;
+
+ if (!px_dma(&vm->scratch[0])) /* set to 0 on clones */
+ return;
+
+ for (i = 1; i <= vm->top; i++) {
+ if (!px_dma(&vm->scratch[i]))
+ break;
+ cleanup_page_dma(vm, px_base(&vm->scratch[i]));
+ }
+
+ cleanup_scratch_page(vm);
+}
+
+void gtt_write_workarounds(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore = gt->uncore;
+
+ /*
+ * This function is for gtt related workarounds. This function is
+ * called on driver load and after a GPU reset, so you can place
+ * workarounds here even if they get overwritten by GPU reset.
+ */
+ /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl,icl */
+ if (IS_BROADWELL(i915))
+ intel_uncore_write(uncore,
+ GEN8_L3_LRA_1_GPGPU,
+ GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
+ else if (IS_CHERRYVIEW(i915))
+ intel_uncore_write(uncore,
+ GEN8_L3_LRA_1_GPGPU,
+ GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
+ else if (IS_GEN9_LP(i915))
+ intel_uncore_write(uncore,
+ GEN8_L3_LRA_1_GPGPU,
+ GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
+ else if (INTEL_GEN(i915) >= 9 && INTEL_GEN(i915) <= 11)
+ intel_uncore_write(uncore,
+ GEN8_L3_LRA_1_GPGPU,
+ GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
+
+ /*
+ * To support 64K PTEs we need to first enable the use of the
+ * Intermediate-Page-Size(IPS) bit of the PDE field via some magical
+ * mmio, otherwise the page-walker will simply ignore the IPS bit. This
+ * shouldn't be needed after GEN10.
+ *
+ * 64K pages were first introduced from BDW+, although technically they
+ * only *work* from gen9+. For pre-BDW we instead have the option for
+ * 32K pages, but we don't currently have any support for it in our
+ * driver.
+ */
+ if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K) &&
+ INTEL_GEN(i915) <= 10)
+ intel_uncore_rmw(uncore,
+ GEN8_GAMW_ECO_DEV_RW_IA,
+ 0,
+ GAMW_ECO_ENABLE_64K_IPS_FIELD);
+
+ if (IS_GEN_RANGE(i915, 8, 11)) {
+ bool can_use_gtt_cache = true;
+
+ /*
+ * According to the BSpec if we use 2M/1G pages then we also
+ * need to disable the GTT cache. At least on BDW we can see
+ * visual corruption when using 2M pages, and not disabling the
+ * GTT cache.
+ */
+ if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_2M))
+ can_use_gtt_cache = false;
+
+ /* WaGttCachingOffByDefault */
+ intel_uncore_write(uncore,
+ HSW_GTT_CACHE_EN,
+ can_use_gtt_cache ? GTT_CACHE_EN_ALL : 0);
+ WARN_ON_ONCE(can_use_gtt_cache &&
+ intel_uncore_read(uncore,
+ HSW_GTT_CACHE_EN) == 0);
+ }
+}
+
+u64 gen8_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW;
+
+ if (unlikely(flags & PTE_READ_ONLY))
+ pte &= ~_PAGE_RW;
+
+ switch (level) {
+ case I915_CACHE_NONE:
+ pte |= PPAT_UNCACHED;
+ break;
+ case I915_CACHE_WT:
+ pte |= PPAT_DISPLAY_ELLC;
+ break;
+ default:
+ pte |= PPAT_CACHED;
+ break;
+ }
+
+ return pte;
+}
+
+static void tgl_setup_private_ppat(struct intel_uncore *uncore)
+{
+ /* TGL doesn't support LLC or AGE settings */
+ intel_uncore_write(uncore, GEN12_PAT_INDEX(0), GEN8_PPAT_WB);
+ intel_uncore_write(uncore, GEN12_PAT_INDEX(1), GEN8_PPAT_WC);
+ intel_uncore_write(uncore, GEN12_PAT_INDEX(2), GEN8_PPAT_WT);
+ intel_uncore_write(uncore, GEN12_PAT_INDEX(3), GEN8_PPAT_UC);
+ intel_uncore_write(uncore, GEN12_PAT_INDEX(4), GEN8_PPAT_WB);
+ intel_uncore_write(uncore, GEN12_PAT_INDEX(5), GEN8_PPAT_WB);
+ intel_uncore_write(uncore, GEN12_PAT_INDEX(6), GEN8_PPAT_WB);
+ intel_uncore_write(uncore, GEN12_PAT_INDEX(7), GEN8_PPAT_WB);
+}
+
+static void cnl_setup_private_ppat(struct intel_uncore *uncore)
+{
+ intel_uncore_write(uncore,
+ GEN10_PAT_INDEX(0),
+ GEN8_PPAT_WB | GEN8_PPAT_LLC);
+ intel_uncore_write(uncore,
+ GEN10_PAT_INDEX(1),
+ GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
+ intel_uncore_write(uncore,
+ GEN10_PAT_INDEX(2),
+ GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
+ intel_uncore_write(uncore,
+ GEN10_PAT_INDEX(3),
+ GEN8_PPAT_UC);
+ intel_uncore_write(uncore,
+ GEN10_PAT_INDEX(4),
+ GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
+ intel_uncore_write(uncore,
+ GEN10_PAT_INDEX(5),
+ GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
+ intel_uncore_write(uncore,
+ GEN10_PAT_INDEX(6),
+ GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
+ intel_uncore_write(uncore,
+ GEN10_PAT_INDEX(7),
+ GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
+}
+
+/*
+ * The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
+ * bits. When using advanced contexts each context stores its own PAT, but
+ * writing this data shouldn't be harmful even in those cases.
+ */
+static void bdw_setup_private_ppat(struct intel_uncore *uncore)
+{
+ u64 pat;
+
+ pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */
+ GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */
+ GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */
+ GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */
+ GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
+ GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
+ GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
+ GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
+
+ intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
+ intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
+}
+
+static void chv_setup_private_ppat(struct intel_uncore *uncore)
+{
+ u64 pat;
+
+ /*
+ * Map WB on BDW to snooped on CHV.
+ *
+ * Only the snoop bit has meaning for CHV, the rest is
+ * ignored.
+ *
+ * The hardware will never snoop for certain types of accesses:
+ * - CPU GTT (GMADR->GGTT->no snoop->memory)
+ * - PPGTT page tables
+ * - some other special cycles
+ *
+ * As with BDW, we also need to consider the following for GT accesses:
+ * "For GGTT, there is NO pat_sel[2:0] from the entry,
+ * so RTL will always use the value corresponding to
+ * pat_sel = 000".
+ * Which means we must set the snoop bit in PAT entry 0
+ * in order to keep the global status page working.
+ */
+
+ pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
+ GEN8_PPAT(1, 0) |
+ GEN8_PPAT(2, 0) |
+ GEN8_PPAT(3, 0) |
+ GEN8_PPAT(4, CHV_PPAT_SNOOP) |
+ GEN8_PPAT(5, CHV_PPAT_SNOOP) |
+ GEN8_PPAT(6, CHV_PPAT_SNOOP) |
+ GEN8_PPAT(7, CHV_PPAT_SNOOP);
+
+ intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
+ intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
+}
+
+void setup_private_pat(struct intel_uncore *uncore)
+{
+ struct drm_i915_private *i915 = uncore->i915;
+
+ GEM_BUG_ON(INTEL_GEN(i915) < 8);
+
+ if (INTEL_GEN(i915) >= 12)
+ tgl_setup_private_ppat(uncore);
+ else if (INTEL_GEN(i915) >= 10)
+ cnl_setup_private_ppat(uncore);
+ else if (IS_CHERRYVIEW(i915) || IS_GEN9_LP(i915))
+ chv_setup_private_ppat(uncore);
+ else
+ bdw_setup_private_ppat(uncore);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/mock_gtt.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
new file mode 100644
index 000000000000..7da7681c20b1
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -0,0 +1,587 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ *
+ * Please try to maintain the following order within this file unless it makes
+ * sense to do otherwise. From top to bottom:
+ * 1. typedefs
+ * 2. #defines, and macros
+ * 3. structure definitions
+ * 4. function prototypes
+ *
+ * Within each section, please try to order by generation in ascending order,
+ * from top to bottom (ie. gen6 on the top, gen8 on the bottom).
+ */
+
+#ifndef __INTEL_GTT_H__
+#define __INTEL_GTT_H__
+
+#include <linux/io-mapping.h>
+#include <linux/kref.h>
+#include <linux/mm.h>
+#include <linux/pagevec.h>
+#include <linux/scatterlist.h>
+#include <linux/workqueue.h>
+
+#include <drm/drm_mm.h>
+
+#include "gt/intel_reset.h"
+#include "i915_gem_fence_reg.h"
+#include "i915_selftest.h"
+#include "i915_vma_types.h"
+
+#define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
+
+#if IS_ENABLED(CONFIG_DRM_I915_TRACE_GTT)
+#define DBG(...) trace_printk(__VA_ARGS__)
+#else
+#define DBG(...)
+#endif
+
+#define NALLOC 3 /* 1 normal, 1 for concurrent threads, 1 for preallocation */
+
+#define I915_GTT_PAGE_SIZE_4K BIT_ULL(12)
+#define I915_GTT_PAGE_SIZE_64K BIT_ULL(16)
+#define I915_GTT_PAGE_SIZE_2M BIT_ULL(21)
+
+#define I915_GTT_PAGE_SIZE I915_GTT_PAGE_SIZE_4K
+#define I915_GTT_MAX_PAGE_SIZE I915_GTT_PAGE_SIZE_2M
+
+#define I915_GTT_PAGE_MASK -I915_GTT_PAGE_SIZE
+
+#define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE
+
+#define I915_FENCE_REG_NONE -1
+#define I915_MAX_NUM_FENCES 32
+/* 32 fences + sign bit for FENCE_REG_NONE */
+#define I915_MAX_NUM_FENCE_BITS 6
+
+typedef u32 gen6_pte_t;
+typedef u64 gen8_pte_t;
+
+#define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT)
+
+#define I915_PTES(pte_len) ((unsigned int)(PAGE_SIZE / (pte_len)))
+#define I915_PTE_MASK(pte_len) (I915_PTES(pte_len) - 1)
+#define I915_PDES 512
+#define I915_PDE_MASK (I915_PDES - 1)
+
+/* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
+#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
+#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
+#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
+#define GEN6_PTE_CACHE_LLC (2 << 1)
+#define GEN6_PTE_UNCACHED (1 << 1)
+#define GEN6_PTE_VALID REG_BIT(0)
+
+#define GEN6_PTES I915_PTES(sizeof(gen6_pte_t))
+#define GEN6_PD_SIZE (I915_PDES * PAGE_SIZE)
+#define GEN6_PD_ALIGN (PAGE_SIZE * 16)
+#define GEN6_PDE_SHIFT 22
+#define GEN6_PDE_VALID REG_BIT(0)
+#define NUM_PTE(pde_shift) (1 << (pde_shift - PAGE_SHIFT))
+
+#define GEN7_PTE_CACHE_L3_LLC (3 << 1)
+
+#define BYT_PTE_SNOOPED_BY_CPU_CACHES REG_BIT(2)
+#define BYT_PTE_WRITEABLE REG_BIT(1)
+
+/*
+ * Cacheability Control is a 4-bit value. The low three bits are stored in bits
+ * 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
+ */
+#define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \
+ (((bits) & 0x8) << (11 - 3)))
+#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2)
+#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3)
+#define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8)
+#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
+#define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7)
+#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
+#define HSW_PTE_UNCACHED (0)
+#define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0))
+#define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr)
+
+/*
+ * GEN8 32b style address is defined as a 3 level page table:
+ * 31:30 | 29:21 | 20:12 | 11:0
+ * PDPE | PDE | PTE | offset
+ * The difference as compared to normal x86 3 level page table is the PDPEs are
+ * programmed via register.
+ *
+ * GEN8 48b style address is defined as a 4 level page table:
+ * 47:39 | 38:30 | 29:21 | 20:12 | 11:0
+ * PML4E | PDPE | PDE | PTE | offset
+ */
+#define GEN8_3LVL_PDPES 4
+
+#define PPAT_UNCACHED (_PAGE_PWT | _PAGE_PCD)
+#define PPAT_CACHED_PDE 0 /* WB LLC */
+#define PPAT_CACHED _PAGE_PAT /* WB LLCeLLC */
+#define PPAT_DISPLAY_ELLC _PAGE_PCD /* WT eLLC */
+
+#define CHV_PPAT_SNOOP REG_BIT(6)
+#define GEN8_PPAT_AGE(x) ((x)<<4)
+#define GEN8_PPAT_LLCeLLC (3<<2)
+#define GEN8_PPAT_LLCELLC (2<<2)
+#define GEN8_PPAT_LLC (1<<2)
+#define GEN8_PPAT_WB (3<<0)
+#define GEN8_PPAT_WT (2<<0)
+#define GEN8_PPAT_WC (1<<0)
+#define GEN8_PPAT_UC (0<<0)
+#define GEN8_PPAT_ELLC_OVERRIDE (0<<2)
+#define GEN8_PPAT(i, x) ((u64)(x) << ((i) * 8))
+
+#define GEN8_PDE_IPS_64K BIT(11)
+#define GEN8_PDE_PS_2M BIT(7)
+
+#define for_each_sgt_daddr(__dp, __iter, __sgt) \
+ __for_each_sgt_daddr(__dp, __iter, __sgt, I915_GTT_PAGE_SIZE)
+
+struct i915_page_dma {
+ struct page *page;
+ union {
+ dma_addr_t daddr;
+
+ /*
+ * For gen6/gen7 only. This is the offset in the GGTT
+ * where the page directory entries for PPGTT begin
+ */
+ u32 ggtt_offset;
+ };
+};
+
+struct i915_page_scratch {
+ struct i915_page_dma base;
+ u64 encode;
+};
+
+struct i915_page_table {
+ struct i915_page_dma base;
+ atomic_t used;
+};
+
+struct i915_page_directory {
+ struct i915_page_table pt;
+ spinlock_t lock;
+ void *entry[512];
+};
+
+#define __px_choose_expr(x, type, expr, other) \
+ __builtin_choose_expr( \
+ __builtin_types_compatible_p(typeof(x), type) || \
+ __builtin_types_compatible_p(typeof(x), const type), \
+ ({ type __x = (type)(x); expr; }), \
+ other)
+
+#define px_base(px) \
+ __px_choose_expr(px, struct i915_page_dma *, __x, \
+ __px_choose_expr(px, struct i915_page_scratch *, &__x->base, \
+ __px_choose_expr(px, struct i915_page_table *, &__x->base, \
+ __px_choose_expr(px, struct i915_page_directory *, &__x->pt.base, \
+ (void)0))))
+#define px_dma(px) (px_base(px)->daddr)
+
+#define px_pt(px) \
+ __px_choose_expr(px, struct i915_page_table *, __x, \
+ __px_choose_expr(px, struct i915_page_directory *, &__x->pt, \
+ (void)0))
+#define px_used(px) (&px_pt(px)->used)
+
+enum i915_cache_level;
+
+struct drm_i915_file_private;
+struct drm_i915_gem_object;
+struct i915_vma;
+struct intel_gt;
+
+struct i915_vma_ops {
+ /* Map an object into an address space with the given cache flags. */
+ int (*bind_vma)(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags);
+ /*
+ * Unmap an object from an address space. This usually consists of
+ * setting the valid PTE entries to a reserved scratch page.
+ */
+ void (*unbind_vma)(struct i915_vma *vma);
+
+ int (*set_pages)(struct i915_vma *vma);
+ void (*clear_pages)(struct i915_vma *vma);
+};
+
+struct pagestash {
+ spinlock_t lock;
+ struct pagevec pvec;
+};
+
+void stash_init(struct pagestash *stash);
+
+struct i915_address_space {
+ struct kref ref;
+ struct rcu_work rcu;
+
+ struct drm_mm mm;
+ struct intel_gt *gt;
+ struct drm_i915_private *i915;
+ struct device *dma;
+ /*
+ * Every address space belongs to a struct file - except for the global
+ * GTT that is owned by the driver (and so @file is set to NULL). In
+ * principle, no information should leak from one context to another
+ * (or between files/processes etc) unless explicitly shared by the
+ * owner. Tracking the owner is important in order to free up per-file
+ * objects along with the file, to aide resource tracking, and to
+ * assign blame.
+ */
+ struct drm_i915_file_private *file;
+ u64 total; /* size addr space maps (ex. 2GB for ggtt) */
+ u64 reserved; /* size addr space reserved */
+
+ unsigned int bind_async_flags;
+
+ /*
+ * Each active user context has its own address space (in full-ppgtt).
+ * Since the vm may be shared between multiple contexts, we count how
+ * many contexts keep us "open". Once open hits zero, we are closed
+ * and do not allow any new attachments, and proceed to shutdown our
+ * vma and page directories.
+ */
+ atomic_t open;
+
+ struct mutex mutex; /* protects vma and our lists */
+#define VM_CLASS_GGTT 0
+#define VM_CLASS_PPGTT 1
+
+ struct i915_page_scratch scratch[4];
+ unsigned int scratch_order;
+ unsigned int top;
+
+ /**
+ * List of vma currently bound.
+ */
+ struct list_head bound_list;
+
+ struct pagestash free_pages;
+
+ /* Global GTT */
+ bool is_ggtt:1;
+
+ /* Some systems require uncached updates of the page directories */
+ bool pt_kmap_wc:1;
+
+ /* Some systems support read-only mappings for GGTT and/or PPGTT */
+ bool has_read_only:1;
+
+ u64 (*pte_encode)(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags); /* Create a valid PTE */
+#define PTE_READ_ONLY BIT(0)
+
+ int (*allocate_va_range)(struct i915_address_space *vm,
+ u64 start, u64 length);
+ void (*clear_range)(struct i915_address_space *vm,
+ u64 start, u64 length);
+ void (*insert_page)(struct i915_address_space *vm,
+ dma_addr_t addr,
+ u64 offset,
+ enum i915_cache_level cache_level,
+ u32 flags);
+ void (*insert_entries)(struct i915_address_space *vm,
+ struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags);
+ void (*cleanup)(struct i915_address_space *vm);
+
+ struct i915_vma_ops vma_ops;
+
+ I915_SELFTEST_DECLARE(struct fault_attr fault_attr);
+ I915_SELFTEST_DECLARE(bool scrub_64K);
+};
+
+/*
+ * The Graphics Translation Table is the way in which GEN hardware translates a
+ * Graphics Virtual Address into a Physical Address. In addition to the normal
+ * collateral associated with any va->pa translations GEN hardware also has a
+ * portion of the GTT which can be mapped by the CPU and remain both coherent
+ * and correct (in cases like swizzling). That region is referred to as GMADR in
+ * the spec.
+ */
+struct i915_ggtt {
+ struct i915_address_space vm;
+
+ struct io_mapping iomap; /* Mapping to our CPU mappable region */
+ struct resource gmadr; /* GMADR resource */
+ resource_size_t mappable_end; /* End offset that we can CPU map */
+
+ /** "Graphics Stolen Memory" holds the global PTEs */
+ void __iomem *gsm;
+ void (*invalidate)(struct i915_ggtt *ggtt);
+
+ /** PPGTT used for aliasing the PPGTT with the GTT */
+ struct i915_ppgtt *alias;
+
+ bool do_idle_maps;
+
+ int mtrr;
+
+ /** Bit 6 swizzling required for X tiling */
+ u32 bit_6_swizzle_x;
+ /** Bit 6 swizzling required for Y tiling */
+ u32 bit_6_swizzle_y;
+
+ u32 pin_bias;
+
+ unsigned int num_fences;
+ struct i915_fence_reg fence_regs[I915_MAX_NUM_FENCES];
+ struct list_head fence_list;
+
+ /**
+ * List of all objects in gtt_space, currently mmaped by userspace.
+ * All objects within this list must also be on bound_list.
+ */
+ struct list_head userfault_list;
+
+ /* Manual runtime pm autosuspend delay for user GGTT mmaps */
+ struct intel_wakeref_auto userfault_wakeref;
+
+ struct mutex error_mutex;
+ struct drm_mm_node error_capture;
+ struct drm_mm_node uc_fw;
+};
+
+struct i915_ppgtt {
+ struct i915_address_space vm;
+
+ struct i915_page_directory *pd;
+};
+
+#define i915_is_ggtt(vm) ((vm)->is_ggtt)
+
+static inline bool
+i915_vm_is_4lvl(const struct i915_address_space *vm)
+{
+ return (vm->total - 1) >> 32;
+}
+
+static inline bool
+i915_vm_has_scratch_64K(struct i915_address_space *vm)
+{
+ return vm->scratch_order == get_order(I915_GTT_PAGE_SIZE_64K);
+}
+
+static inline bool
+i915_vm_has_cache_coloring(struct i915_address_space *vm)
+{
+ return i915_is_ggtt(vm) && vm->mm.color_adjust;
+}
+
+static inline struct i915_ggtt *
+i915_vm_to_ggtt(struct i915_address_space *vm)
+{
+ BUILD_BUG_ON(offsetof(struct i915_ggtt, vm));
+ GEM_BUG_ON(!i915_is_ggtt(vm));
+ return container_of(vm, struct i915_ggtt, vm);
+}
+
+static inline struct i915_ppgtt *
+i915_vm_to_ppgtt(struct i915_address_space *vm)
+{
+ BUILD_BUG_ON(offsetof(struct i915_ppgtt, vm));
+ GEM_BUG_ON(i915_is_ggtt(vm));
+ return container_of(vm, struct i915_ppgtt, vm);
+}
+
+static inline struct i915_address_space *
+i915_vm_get(struct i915_address_space *vm)
+{
+ kref_get(&vm->ref);
+ return vm;
+}
+
+void i915_vm_release(struct kref *kref);
+
+static inline void i915_vm_put(struct i915_address_space *vm)
+{
+ kref_put(&vm->ref, i915_vm_release);
+}
+
+static inline struct i915_address_space *
+i915_vm_open(struct i915_address_space *vm)
+{
+ GEM_BUG_ON(!atomic_read(&vm->open));
+ atomic_inc(&vm->open);
+ return i915_vm_get(vm);
+}
+
+static inline bool
+i915_vm_tryopen(struct i915_address_space *vm)
+{
+ if (atomic_add_unless(&vm->open, 1, 0))
+ return i915_vm_get(vm);
+
+ return false;
+}
+
+void __i915_vm_close(struct i915_address_space *vm);
+
+static inline void
+i915_vm_close(struct i915_address_space *vm)
+{
+ GEM_BUG_ON(!atomic_read(&vm->open));
+ if (atomic_dec_and_test(&vm->open))
+ __i915_vm_close(vm);
+
+ i915_vm_put(vm);
+}
+
+void i915_address_space_init(struct i915_address_space *vm, int subclass);
+void i915_address_space_fini(struct i915_address_space *vm);
+
+static inline u32 i915_pte_index(u64 address, unsigned int pde_shift)
+{
+ const u32 mask = NUM_PTE(pde_shift) - 1;
+
+ return (address >> PAGE_SHIFT) & mask;
+}
+
+/*
+ * Helper to counts the number of PTEs within the given length. This count
+ * does not cross a page table boundary, so the max value would be
+ * GEN6_PTES for GEN6, and GEN8_PTES for GEN8.
+ */
+static inline u32 i915_pte_count(u64 addr, u64 length, unsigned int pde_shift)
+{
+ const u64 mask = ~((1ULL << pde_shift) - 1);
+ u64 end;
+
+ GEM_BUG_ON(length == 0);
+ GEM_BUG_ON(offset_in_page(addr | length));
+
+ end = addr + length;
+
+ if ((addr & mask) != (end & mask))
+ return NUM_PTE(pde_shift) - i915_pte_index(addr, pde_shift);
+
+ return i915_pte_index(end, pde_shift) - i915_pte_index(addr, pde_shift);
+}
+
+static inline u32 i915_pde_index(u64 addr, u32 shift)
+{
+ return (addr >> shift) & I915_PDE_MASK;
+}
+
+static inline struct i915_page_table *
+i915_pt_entry(const struct i915_page_directory * const pd,
+ const unsigned short n)
+{
+ return pd->entry[n];
+}
+
+static inline struct i915_page_directory *
+i915_pd_entry(const struct i915_page_directory * const pdp,
+ const unsigned short n)
+{
+ return pdp->entry[n];
+}
+
+static inline dma_addr_t
+i915_page_dir_dma_addr(const struct i915_ppgtt *ppgtt, const unsigned int n)
+{
+ struct i915_page_dma *pt = ppgtt->pd->entry[n];
+
+ return px_dma(pt ?: px_base(&ppgtt->vm.scratch[ppgtt->vm.top]));
+}
+
+void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt);
+
+int i915_ggtt_probe_hw(struct drm_i915_private *i915);
+int i915_ggtt_init_hw(struct drm_i915_private *i915);
+int i915_ggtt_enable_hw(struct drm_i915_private *i915);
+void i915_ggtt_enable_guc(struct i915_ggtt *ggtt);
+void i915_ggtt_disable_guc(struct i915_ggtt *ggtt);
+int i915_init_ggtt(struct drm_i915_private *i915);
+void i915_ggtt_driver_release(struct drm_i915_private *i915);
+
+static inline bool i915_ggtt_has_aperture(const struct i915_ggtt *ggtt)
+{
+ return ggtt->mappable_end > 0;
+}
+
+int i915_ppgtt_init_hw(struct intel_gt *gt);
+
+struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt);
+
+void i915_gem_suspend_gtt_mappings(struct drm_i915_private *i915);
+void i915_gem_restore_gtt_mappings(struct drm_i915_private *i915);
+
+u64 gen8_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags);
+
+int setup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p);
+void cleanup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p);
+
+#define kmap_atomic_px(px) kmap_atomic(px_base(px)->page)
+
+void
+fill_page_dma(const struct i915_page_dma *p, const u64 val, unsigned int count);
+
+#define fill_px(px, v) fill_page_dma(px_base(px), (v), PAGE_SIZE / sizeof(u64))
+#define fill32_px(px, v) do { \
+ u64 v__ = lower_32_bits(v); \
+ fill_px((px), v__ << 32 | v__); \
+} while (0)
+
+int setup_scratch_page(struct i915_address_space *vm, gfp_t gfp);
+void cleanup_scratch_page(struct i915_address_space *vm);
+void free_scratch(struct i915_address_space *vm);
+
+struct i915_page_table *alloc_pt(struct i915_address_space *vm);
+struct i915_page_directory *alloc_pd(struct i915_address_space *vm);
+struct i915_page_directory *__alloc_pd(size_t sz);
+
+void free_pd(struct i915_address_space *vm, struct i915_page_dma *pd);
+
+#define free_px(vm, px) free_pd(vm, px_base(px))
+
+void
+__set_pd_entry(struct i915_page_directory * const pd,
+ const unsigned short idx,
+ struct i915_page_dma * const to,
+ u64 (*encode)(const dma_addr_t, const enum i915_cache_level));
+
+#define set_pd_entry(pd, idx, to) \
+ __set_pd_entry((pd), (idx), px_base(to), gen8_pde_encode)
+
+void
+clear_pd_entry(struct i915_page_directory * const pd,
+ const unsigned short idx,
+ const struct i915_page_scratch * const scratch);
+
+bool
+release_pd_entry(struct i915_page_directory * const pd,
+ const unsigned short idx,
+ struct i915_page_table * const pt,
+ const struct i915_page_scratch * const scratch);
+void gen6_ggtt_invalidate(struct i915_ggtt *ggtt);
+
+int ggtt_set_pages(struct i915_vma *vma);
+int ppgtt_set_pages(struct i915_vma *vma);
+void clear_pages(struct i915_vma *vma);
+
+void gtt_write_workarounds(struct intel_gt *gt);
+
+void setup_private_pat(struct intel_uncore *uncore);
+
+static inline struct sgt_dma {
+ struct scatterlist *sg;
+ dma_addr_t dma, max;
+} sgt_dma(struct i915_vma *vma) {
+ struct scatterlist *sg = vma->pages->sgl;
+ dma_addr_t addr = sg_dma_address(sg);
+
+ return (struct sgt_dma){ sg, addr, addr + sg->length };
+}
+
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_hangcheck.c b/drivers/gpu/drm/i915/gt/intel_hangcheck.c
deleted file mode 100644
index 6bcfa6456c45..000000000000
--- a/drivers/gpu/drm/i915/gt/intel_hangcheck.c
+++ /dev/null
@@ -1,347 +0,0 @@
-/*
- * Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#include "intel_reset.h"
-#include "i915_drv.h"
-
-struct hangcheck {
- u64 acthd;
- u32 ring;
- u32 head;
- enum intel_engine_hangcheck_action action;
- unsigned long action_timestamp;
- int deadlock;
- struct intel_instdone instdone;
- bool wedged:1;
- bool stalled:1;
-};
-
-static bool instdone_unchanged(u32 current_instdone, u32 *old_instdone)
-{
- u32 tmp = current_instdone | *old_instdone;
- bool unchanged;
-
- unchanged = tmp == *old_instdone;
- *old_instdone |= tmp;
-
- return unchanged;
-}
-
-static bool subunits_stuck(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- struct intel_instdone instdone;
- struct intel_instdone *accu_instdone = &engine->hangcheck.instdone;
- bool stuck;
- int slice;
- int subslice;
-
- if (engine->id != RCS0)
- return true;
-
- intel_engine_get_instdone(engine, &instdone);
-
- /* There might be unstable subunit states even when
- * actual head is not moving. Filter out the unstable ones by
- * accumulating the undone -> done transitions and only
- * consider those as progress.
- */
- stuck = instdone_unchanged(instdone.instdone,
- &accu_instdone->instdone);
- stuck &= instdone_unchanged(instdone.slice_common,
- &accu_instdone->slice_common);
-
- for_each_instdone_slice_subslice(dev_priv, slice, subslice) {
- stuck &= instdone_unchanged(instdone.sampler[slice][subslice],
- &accu_instdone->sampler[slice][subslice]);
- stuck &= instdone_unchanged(instdone.row[slice][subslice],
- &accu_instdone->row[slice][subslice]);
- }
-
- return stuck;
-}
-
-static enum intel_engine_hangcheck_action
-head_stuck(struct intel_engine_cs *engine, u64 acthd)
-{
- if (acthd != engine->hangcheck.acthd) {
-
- /* Clear subunit states on head movement */
- memset(&engine->hangcheck.instdone, 0,
- sizeof(engine->hangcheck.instdone));
-
- return ENGINE_ACTIVE_HEAD;
- }
-
- if (!subunits_stuck(engine))
- return ENGINE_ACTIVE_SUBUNITS;
-
- return ENGINE_DEAD;
-}
-
-static enum intel_engine_hangcheck_action
-engine_stuck(struct intel_engine_cs *engine, u64 acthd)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- enum intel_engine_hangcheck_action ha;
- u32 tmp;
-
- ha = head_stuck(engine, acthd);
- if (ha != ENGINE_DEAD)
- return ha;
-
- if (IS_GEN(dev_priv, 2))
- return ENGINE_DEAD;
-
- /* Is the chip hanging on a WAIT_FOR_EVENT?
- * If so we can simply poke the RB_WAIT bit
- * and break the hang. This should work on
- * all but the second generation chipsets.
- */
- tmp = ENGINE_READ(engine, RING_CTL);
- if (tmp & RING_WAIT) {
- i915_handle_error(dev_priv, engine->mask, 0,
- "stuck wait on %s", engine->name);
- ENGINE_WRITE(engine, RING_CTL, tmp);
- return ENGINE_WAIT_KICK;
- }
-
- return ENGINE_DEAD;
-}
-
-static void hangcheck_load_sample(struct intel_engine_cs *engine,
- struct hangcheck *hc)
-{
- hc->acthd = intel_engine_get_active_head(engine);
- hc->ring = ENGINE_READ(engine, RING_START);
- hc->head = ENGINE_READ(engine, RING_HEAD);
-}
-
-static void hangcheck_store_sample(struct intel_engine_cs *engine,
- const struct hangcheck *hc)
-{
- engine->hangcheck.acthd = hc->acthd;
- engine->hangcheck.last_ring = hc->ring;
- engine->hangcheck.last_head = hc->head;
-}
-
-static enum intel_engine_hangcheck_action
-hangcheck_get_action(struct intel_engine_cs *engine,
- const struct hangcheck *hc)
-{
- if (intel_engine_is_idle(engine))
- return ENGINE_IDLE;
-
- if (engine->hangcheck.last_ring != hc->ring)
- return ENGINE_ACTIVE_SEQNO;
-
- if (engine->hangcheck.last_head != hc->head)
- return ENGINE_ACTIVE_SEQNO;
-
- return engine_stuck(engine, hc->acthd);
-}
-
-static void hangcheck_accumulate_sample(struct intel_engine_cs *engine,
- struct hangcheck *hc)
-{
- unsigned long timeout = I915_ENGINE_DEAD_TIMEOUT;
-
- hc->action = hangcheck_get_action(engine, hc);
-
- /* We always increment the progress
- * if the engine is busy and still processing
- * the same request, so that no single request
- * can run indefinitely (such as a chain of
- * batches). The only time we do not increment
- * the hangcheck score on this ring, if this
- * engine is in a legitimate wait for another
- * engine. In that case the waiting engine is a
- * victim and we want to be sure we catch the
- * right culprit. Then every time we do kick
- * the ring, make it as a progress as the seqno
- * advancement might ensure and if not, it
- * will catch the hanging engine.
- */
-
- switch (hc->action) {
- case ENGINE_IDLE:
- case ENGINE_ACTIVE_SEQNO:
- /* Clear head and subunit states on seqno movement */
- hc->acthd = 0;
-
- memset(&engine->hangcheck.instdone, 0,
- sizeof(engine->hangcheck.instdone));
-
- /* Intentional fall through */
- case ENGINE_WAIT_KICK:
- case ENGINE_WAIT:
- engine->hangcheck.action_timestamp = jiffies;
- break;
-
- case ENGINE_ACTIVE_HEAD:
- case ENGINE_ACTIVE_SUBUNITS:
- /*
- * Seqno stuck with still active engine gets leeway,
- * in hopes that it is just a long shader.
- */
- timeout = I915_SEQNO_DEAD_TIMEOUT;
- break;
-
- case ENGINE_DEAD:
- break;
-
- default:
- MISSING_CASE(hc->action);
- }
-
- hc->stalled = time_after(jiffies,
- engine->hangcheck.action_timestamp + timeout);
- hc->wedged = time_after(jiffies,
- engine->hangcheck.action_timestamp +
- I915_ENGINE_WEDGED_TIMEOUT);
-}
-
-static void hangcheck_declare_hang(struct drm_i915_private *i915,
- intel_engine_mask_t hung,
- intel_engine_mask_t stuck)
-{
- struct intel_engine_cs *engine;
- intel_engine_mask_t tmp;
- char msg[80];
- int len;
-
- /* If some rings hung but others were still busy, only
- * blame the hanging rings in the synopsis.
- */
- if (stuck != hung)
- hung &= ~stuck;
- len = scnprintf(msg, sizeof(msg),
- "%s on ", stuck == hung ? "no progress" : "hang");
- for_each_engine_masked(engine, i915, hung, tmp)
- len += scnprintf(msg + len, sizeof(msg) - len,
- "%s, ", engine->name);
- msg[len-2] = '\0';
-
- return i915_handle_error(i915, hung, I915_ERROR_CAPTURE, "%s", msg);
-}
-
-/*
- * This is called when the chip hasn't reported back with completed
- * batchbuffers in a long time. We keep track per ring seqno progress and
- * if there are no progress, hangcheck score for that ring is increased.
- * Further, acthd is inspected to see if the ring is stuck. On stuck case
- * we kick the ring. If we see no progress on three subsequent calls
- * we assume chip is wedged and try to fix it by resetting the chip.
- */
-static void i915_hangcheck_elapsed(struct work_struct *work)
-{
- struct drm_i915_private *dev_priv =
- container_of(work, typeof(*dev_priv),
- gpu_error.hangcheck_work.work);
- intel_engine_mask_t hung = 0, stuck = 0, wedged = 0;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- intel_wakeref_t wakeref;
-
- if (!i915_modparams.enable_hangcheck)
- return;
-
- if (!READ_ONCE(dev_priv->gt.awake))
- return;
-
- if (i915_terminally_wedged(dev_priv))
- return;
-
- wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
- if (!wakeref)
- return;
-
- /* As enabling the GPU requires fairly extensive mmio access,
- * periodically arm the mmio checker to see if we are triggering
- * any invalid access.
- */
- intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
-
- for_each_engine(engine, dev_priv, id) {
- struct hangcheck hc;
-
- intel_engine_signal_breadcrumbs(engine);
-
- hangcheck_load_sample(engine, &hc);
- hangcheck_accumulate_sample(engine, &hc);
- hangcheck_store_sample(engine, &hc);
-
- if (hc.stalled) {
- hung |= engine->mask;
- if (hc.action != ENGINE_DEAD)
- stuck |= engine->mask;
- }
-
- if (hc.wedged)
- wedged |= engine->mask;
- }
-
- if (GEM_SHOW_DEBUG() && (hung | stuck)) {
- struct drm_printer p = drm_debug_printer("hangcheck");
-
- for_each_engine(engine, dev_priv, id) {
- if (intel_engine_is_idle(engine))
- continue;
-
- intel_engine_dump(engine, &p, "%s\n", engine->name);
- }
- }
-
- if (wedged) {
- dev_err(dev_priv->drm.dev,
- "GPU recovery timed out,"
- " cancelling all in-flight rendering.\n");
- GEM_TRACE_DUMP();
- i915_gem_set_wedged(dev_priv);
- }
-
- if (hung)
- hangcheck_declare_hang(dev_priv, hung, stuck);
-
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
-
- /* Reset timer in case GPU hangs without another request being added */
- i915_queue_hangcheck(dev_priv);
-}
-
-void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
-{
- memset(&engine->hangcheck, 0, sizeof(engine->hangcheck));
- engine->hangcheck.action_timestamp = jiffies;
-}
-
-void intel_hangcheck_init(struct drm_i915_private *i915)
-{
- INIT_DELAYED_WORK(&i915->gpu_error.hangcheck_work,
- i915_hangcheck_elapsed);
-}
-
-#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
-#include "selftest_hangcheck.c"
-#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_llc.c b/drivers/gpu/drm/i915/gt/intel_llc.c
new file mode 100644
index 000000000000..ceb785b75c25
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_llc.c
@@ -0,0 +1,161 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/cpufreq.h>
+
+#include "i915_drv.h"
+#include "intel_gt.h"
+#include "intel_llc.h"
+#include "intel_sideband.h"
+
+struct ia_constants {
+ unsigned int min_gpu_freq;
+ unsigned int max_gpu_freq;
+
+ unsigned int min_ring_freq;
+ unsigned int max_ia_freq;
+};
+
+static struct intel_gt *llc_to_gt(struct intel_llc *llc)
+{
+ return container_of(llc, struct intel_gt, llc);
+}
+
+static unsigned int cpu_max_MHz(void)
+{
+ struct cpufreq_policy *policy;
+ unsigned int max_khz;
+
+ policy = cpufreq_cpu_get(0);
+ if (policy) {
+ max_khz = policy->cpuinfo.max_freq;
+ cpufreq_cpu_put(policy);
+ } else {
+ /*
+ * Default to measured freq if none found, PCU will ensure we
+ * don't go over
+ */
+ max_khz = tsc_khz;
+ }
+
+ return max_khz / 1000;
+}
+
+static bool get_ia_constants(struct intel_llc *llc,
+ struct ia_constants *consts)
+{
+ struct drm_i915_private *i915 = llc_to_gt(llc)->i915;
+ struct intel_rps *rps = &llc_to_gt(llc)->rps;
+
+ if (rps->max_freq <= rps->min_freq)
+ return false;
+
+ consts->max_ia_freq = cpu_max_MHz();
+
+ consts->min_ring_freq =
+ intel_uncore_read(llc_to_gt(llc)->uncore, DCLK) & 0xf;
+ /* convert DDR frequency from units of 266.6MHz to bandwidth */
+ consts->min_ring_freq = mult_frac(consts->min_ring_freq, 8, 3);
+
+ consts->min_gpu_freq = rps->min_freq;
+ consts->max_gpu_freq = rps->max_freq;
+ if (INTEL_GEN(i915) >= 9) {
+ /* Convert GT frequency to 50 HZ units */
+ consts->min_gpu_freq /= GEN9_FREQ_SCALER;
+ consts->max_gpu_freq /= GEN9_FREQ_SCALER;
+ }
+
+ return true;
+}
+
+static void calc_ia_freq(struct intel_llc *llc,
+ unsigned int gpu_freq,
+ const struct ia_constants *consts,
+ unsigned int *out_ia_freq,
+ unsigned int *out_ring_freq)
+{
+ struct drm_i915_private *i915 = llc_to_gt(llc)->i915;
+ const int diff = consts->max_gpu_freq - gpu_freq;
+ unsigned int ia_freq = 0, ring_freq = 0;
+
+ if (INTEL_GEN(i915) >= 9) {
+ /*
+ * ring_freq = 2 * GT. ring_freq is in 100MHz units
+ * No floor required for ring frequency on SKL.
+ */
+ ring_freq = gpu_freq;
+ } else if (INTEL_GEN(i915) >= 8) {
+ /* max(2 * GT, DDR). NB: GT is 50MHz units */
+ ring_freq = max(consts->min_ring_freq, gpu_freq);
+ } else if (IS_HASWELL(i915)) {
+ ring_freq = mult_frac(gpu_freq, 5, 4);
+ ring_freq = max(consts->min_ring_freq, ring_freq);
+ /* leave ia_freq as the default, chosen by cpufreq */
+ } else {
+ const int min_freq = 15;
+ const int scale = 180;
+
+ /*
+ * On older processors, there is no separate ring
+ * clock domain, so in order to boost the bandwidth
+ * of the ring, we need to upclock the CPU (ia_freq).
+ *
+ * For GPU frequencies less than 750MHz,
+ * just use the lowest ring freq.
+ */
+ if (gpu_freq < min_freq)
+ ia_freq = 800;
+ else
+ ia_freq = consts->max_ia_freq - diff * scale / 2;
+ ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
+ }
+
+ *out_ia_freq = ia_freq;
+ *out_ring_freq = ring_freq;
+}
+
+static void gen6_update_ring_freq(struct intel_llc *llc)
+{
+ struct drm_i915_private *i915 = llc_to_gt(llc)->i915;
+ struct ia_constants consts;
+ unsigned int gpu_freq;
+
+ if (!get_ia_constants(llc, &consts))
+ return;
+
+ /*
+ * For each potential GPU frequency, load a ring frequency we'd like
+ * to use for memory access. We do this by specifying the IA frequency
+ * the PCU should use as a reference to determine the ring frequency.
+ */
+ for (gpu_freq = consts.max_gpu_freq;
+ gpu_freq >= consts.min_gpu_freq;
+ gpu_freq--) {
+ unsigned int ia_freq, ring_freq;
+
+ calc_ia_freq(llc, gpu_freq, &consts, &ia_freq, &ring_freq);
+ sandybridge_pcode_write(i915,
+ GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
+ ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
+ ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
+ gpu_freq);
+ }
+}
+
+void intel_llc_enable(struct intel_llc *llc)
+{
+ if (HAS_LLC(llc_to_gt(llc)->i915))
+ gen6_update_ring_freq(llc);
+}
+
+void intel_llc_disable(struct intel_llc *llc)
+{
+ /* Currently there is no HW configuration to be done to disable. */
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_llc.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_llc.h b/drivers/gpu/drm/i915/gt/intel_llc.h
new file mode 100644
index 000000000000..ef09a890d2b7
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_llc.h
@@ -0,0 +1,15 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_LLC_H
+#define INTEL_LLC_H
+
+struct intel_llc;
+
+void intel_llc_enable(struct intel_llc *llc);
+void intel_llc_disable(struct intel_llc *llc);
+
+#endif /* INTEL_LLC_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_llc_types.h b/drivers/gpu/drm/i915/gt/intel_llc_types.h
new file mode 100644
index 000000000000..ecad4687b930
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_llc_types.h
@@ -0,0 +1,13 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_LLC_TYPES_H
+#define INTEL_LLC_TYPES_H
+
+struct intel_llc {
+};
+
+#endif /* INTEL_LLC_TYPES_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 82b7ace62d97..a13a8c4b65ab 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -133,15 +133,19 @@
*/
#include <linux/interrupt.h>
-#include "gem/i915_gem_context.h"
-
#include "i915_drv.h"
-#include "i915_gem_render_state.h"
+#include "i915_perf.h"
+#include "i915_trace.h"
#include "i915_vgpu.h"
+#include "intel_context.h"
#include "intel_engine_pm.h"
+#include "intel_gt.h"
+#include "intel_gt_pm.h"
+#include "intel_gt_requests.h"
#include "intel_lrc_reg.h"
#include "intel_mocs.h"
#include "intel_reset.h"
+#include "intel_ring.h"
#include "intel_workarounds.h"
#define RING_EXECLIST_QFULL (1 << 0x2)
@@ -161,6 +165,15 @@
#define GEN8_CTX_STATUS_COMPLETED_MASK \
(GEN8_CTX_STATUS_COMPLETE | GEN8_CTX_STATUS_PREEMPTED)
+#define CTX_DESC_FORCE_RESTORE BIT_ULL(2)
+
+#define GEN12_CTX_STATUS_SWITCHED_TO_NEW_QUEUE (0x1) /* lower csb dword */
+#define GEN12_CTX_SWITCH_DETAIL(csb_dw) ((csb_dw) & 0xF) /* upper csb dword */
+#define GEN12_CSB_SW_CTX_ID_MASK GENMASK(25, 15)
+#define GEN12_IDLE_CTX_ID 0x7FF
+#define GEN12_CSB_CTX_VALID(csb_dw) \
+ (FIELD_GET(GEN12_CSB_SW_CTX_ID_MASK, csb_dw) != GEN12_IDLE_CTX_ID)
+
/* Typical size of the average request (2 pipecontrols and a MI_BB) */
#define EXECLISTS_REQUEST_SIZE 64 /* bytes */
#define WA_TAIL_DWORDS 2
@@ -214,12 +227,65 @@ static struct virtual_engine *to_virtual_engine(struct intel_engine_cs *engine)
return container_of(engine, struct virtual_engine, base);
}
-static int execlists_context_deferred_alloc(struct intel_context *ce,
- struct intel_engine_cs *engine);
+static int __execlists_context_alloc(struct intel_context *ce,
+ struct intel_engine_cs *engine);
+
static void execlists_init_reg_state(u32 *reg_state,
- struct intel_context *ce,
- struct intel_engine_cs *engine,
- struct intel_ring *ring);
+ const struct intel_context *ce,
+ const struct intel_engine_cs *engine,
+ const struct intel_ring *ring,
+ bool close);
+static void
+__execlists_update_reg_state(const struct intel_context *ce,
+ const struct intel_engine_cs *engine);
+
+static void mark_eio(struct i915_request *rq)
+{
+ if (i915_request_completed(rq))
+ return;
+
+ GEM_BUG_ON(i915_request_signaled(rq));
+
+ dma_fence_set_error(&rq->fence, -EIO);
+ i915_request_mark_complete(rq);
+}
+
+static struct i915_request *
+active_request(const struct intel_timeline * const tl, struct i915_request *rq)
+{
+ struct i915_request *active = rq;
+
+ rcu_read_lock();
+ list_for_each_entry_continue_reverse(rq, &tl->requests, link) {
+ if (i915_request_completed(rq))
+ break;
+
+ active = rq;
+ }
+ rcu_read_unlock();
+
+ return active;
+}
+
+static inline u32 intel_hws_preempt_address(struct intel_engine_cs *engine)
+{
+ return (i915_ggtt_offset(engine->status_page.vma) +
+ I915_GEM_HWS_PREEMPT_ADDR);
+}
+
+static inline void
+ring_set_paused(const struct intel_engine_cs *engine, int state)
+{
+ /*
+ * We inspect HWS_PREEMPT with a semaphore inside
+ * engine->emit_fini_breadcrumb. If the dword is true,
+ * the ring is paused as the semaphore will busywait
+ * until the dword is false.
+ */
+ engine->status_page.addr[I915_GEM_HWS_PREEMPT] = state;
+ if (state)
+ wmb();
+}
static inline struct i915_priolist *to_priolist(struct rb_node *rb)
{
@@ -236,6 +302,17 @@ static int effective_prio(const struct i915_request *rq)
int prio = rq_prio(rq);
/*
+ * If this request is special and must not be interrupted at any
+ * cost, so be it. Note we are only checking the most recent request
+ * in the context and so may be masking an earlier vip request. It
+ * is hoped that under the conditions where nopreempt is used, this
+ * will not matter (i.e. all requests to that context will be
+ * nopreempt for as long as desired).
+ */
+ if (i915_request_has_nopreempt(rq))
+ prio = I915_PRIORITY_UNPREEMPTABLE;
+
+ /*
* On unwinding the active request, we give it a priority bump
* if it has completed waiting on any semaphore. If we know that
* the request has already started, we can prevent an unwanted
@@ -245,6 +322,7 @@ static int effective_prio(const struct i915_request *rq)
prio |= I915_PRIORITY_NOSEMAPHORE;
/* Restrict mere WAIT boosts from triggering preemption */
+ BUILD_BUG_ON(__NO_PREEMPTION & ~I915_PRIORITY_MASK); /* only internal */
return prio | __NO_PREEMPTION;
}
@@ -271,10 +349,7 @@ static inline bool need_preempt(const struct intel_engine_cs *engine,
{
int last_prio;
- if (!engine->preempt_context)
- return false;
-
- if (i915_request_completed(rq))
+ if (!intel_engine_has_semaphores(engine))
return false;
/*
@@ -288,10 +363,15 @@ static inline bool need_preempt(const struct intel_engine_cs *engine,
* However, the priority hint is a mere hint that we may need to
* preempt. If that hint is stale or we may be trying to preempt
* ourselves, ignore the request.
+ *
+ * More naturally we would write
+ * prio >= max(0, last);
+ * except that we wish to prevent triggering preemption at the same
+ * priority level: the task that is running should remain running
+ * to preserve FIFO ordering of dependencies.
*/
- last_prio = effective_prio(rq);
- if (!i915_scheduler_need_preempt(engine->execlists.queue_priority_hint,
- last_prio))
+ last_prio = max(effective_prio(rq), I915_PRIORITY_NORMAL - 1);
+ if (engine->execlists.queue_priority_hint <= last_prio)
return false;
/*
@@ -338,9 +418,6 @@ __maybe_unused static inline bool
assert_priority_queue(const struct i915_request *prev,
const struct i915_request *next)
{
- const struct intel_engine_execlists *execlists =
- &prev->engine->execlists;
-
/*
* Without preemption, the prev may refer to the still active element
* which we refuse to let go.
@@ -348,7 +425,7 @@ assert_priority_queue(const struct i915_request *prev,
* Even with preemption, there are times when we think it is better not
* to preempt and leave an ostensibly lower priority request in flight.
*/
- if (port_request(execlists->port) == prev)
+ if (i915_request_is_active(prev))
return true;
return rq_prio(prev) >= rq_prio(next);
@@ -383,48 +460,496 @@ assert_priority_queue(const struct i915_request *prev,
static u64
lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine)
{
- struct i915_gem_context *ctx = ce->gem_context;
u64 desc;
- BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (BIT(GEN8_CTX_ID_WIDTH)));
- BUILD_BUG_ON(GEN11_MAX_CONTEXT_HW_ID > (BIT(GEN11_SW_CTX_ID_WIDTH)));
+ desc = INTEL_LEGACY_32B_CONTEXT;
+ if (i915_vm_is_4lvl(ce->vm))
+ desc = INTEL_LEGACY_64B_CONTEXT;
+ desc <<= GEN8_CTX_ADDRESSING_MODE_SHIFT;
- desc = ctx->desc_template; /* bits 0-11 */
- GEM_BUG_ON(desc & GENMASK_ULL(63, 12));
-
- desc |= i915_ggtt_offset(ce->state) + LRC_HEADER_PAGES * PAGE_SIZE;
- /* bits 12-31 */
- GEM_BUG_ON(desc & GENMASK_ULL(63, 32));
+ desc |= GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
+ if (IS_GEN(engine->i915, 8))
+ desc |= GEN8_CTX_L3LLC_COHERENT;
+ desc |= i915_ggtt_offset(ce->state); /* bits 12-31 */
/*
* The following 32bits are copied into the OA reports (dword 2).
* Consider updating oa_get_render_ctx_id in i915_perf.c when changing
* anything below.
*/
if (INTEL_GEN(engine->i915) >= 11) {
- GEM_BUG_ON(ctx->hw_id >= BIT(GEN11_SW_CTX_ID_WIDTH));
- desc |= (u64)ctx->hw_id << GEN11_SW_CTX_ID_SHIFT;
- /* bits 37-47 */
-
desc |= (u64)engine->instance << GEN11_ENGINE_INSTANCE_SHIFT;
/* bits 48-53 */
- /* TODO: decide what to do with SW counter (bits 55-60) */
-
desc |= (u64)engine->class << GEN11_ENGINE_CLASS_SHIFT;
/* bits 61-63 */
- } else {
- GEM_BUG_ON(ctx->hw_id >= BIT(GEN8_CTX_ID_WIDTH));
- desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */
}
return desc;
}
-static void unwind_wa_tail(struct i915_request *rq)
+static inline unsigned int dword_in_page(void *addr)
+{
+ return offset_in_page(addr) / sizeof(u32);
+}
+
+static void set_offsets(u32 *regs,
+ const u8 *data,
+ const struct intel_engine_cs *engine,
+ bool clear)
+#define NOP(x) (BIT(7) | (x))
+#define LRI(count, flags) ((flags) << 6 | (count) | BUILD_BUG_ON_ZERO(count >= BIT(6)))
+#define POSTED BIT(0)
+#define REG(x) (((x) >> 2) | BUILD_BUG_ON_ZERO(x >= 0x200))
+#define REG16(x) \
+ (((x) >> 9) | BIT(7) | BUILD_BUG_ON_ZERO(x >= 0x10000)), \
+ (((x) >> 2) & 0x7f)
+#define END(x) 0, (x)
{
- rq->tail = intel_ring_wrap(rq->ring, rq->wa_tail - WA_TAIL_BYTES);
- assert_ring_tail_valid(rq->ring, rq->tail);
+ const u32 base = engine->mmio_base;
+
+ while (*data) {
+ u8 count, flags;
+
+ if (*data & BIT(7)) { /* skip */
+ count = *data++ & ~BIT(7);
+ if (clear)
+ memset32(regs, MI_NOOP, count);
+ regs += count;
+ continue;
+ }
+
+ count = *data & 0x3f;
+ flags = *data >> 6;
+ data++;
+
+ *regs = MI_LOAD_REGISTER_IMM(count);
+ if (flags & POSTED)
+ *regs |= MI_LRI_FORCE_POSTED;
+ if (INTEL_GEN(engine->i915) >= 11)
+ *regs |= MI_LRI_CS_MMIO;
+ regs++;
+
+ GEM_BUG_ON(!count);
+ do {
+ u32 offset = 0;
+ u8 v;
+
+ do {
+ v = *data++;
+ offset <<= 7;
+ offset |= v & ~BIT(7);
+ } while (v & BIT(7));
+
+ regs[0] = base + (offset << 2);
+ if (clear)
+ regs[1] = 0;
+ regs += 2;
+ } while (--count);
+ }
+
+ if (clear) {
+ u8 count = *++data;
+
+ /* Clear past the tail for HW access */
+ GEM_BUG_ON(dword_in_page(regs) > count);
+ memset32(regs, MI_NOOP, count - dword_in_page(regs));
+
+ /* Close the batch; used mainly by live_lrc_layout() */
+ *regs = MI_BATCH_BUFFER_END;
+ if (INTEL_GEN(engine->i915) >= 10)
+ *regs |= BIT(0);
+ }
+}
+
+static const u8 gen8_xcs_offsets[] = {
+ NOP(1),
+ LRI(11, 0),
+ REG16(0x244),
+ REG(0x034),
+ REG(0x030),
+ REG(0x038),
+ REG(0x03c),
+ REG(0x168),
+ REG(0x140),
+ REG(0x110),
+ REG(0x11c),
+ REG(0x114),
+ REG(0x118),
+
+ NOP(9),
+ LRI(9, 0),
+ REG16(0x3a8),
+ REG16(0x28c),
+ REG16(0x288),
+ REG16(0x284),
+ REG16(0x280),
+ REG16(0x27c),
+ REG16(0x278),
+ REG16(0x274),
+ REG16(0x270),
+
+ NOP(13),
+ LRI(2, 0),
+ REG16(0x200),
+ REG(0x028),
+
+ END(80)
+};
+
+static const u8 gen9_xcs_offsets[] = {
+ NOP(1),
+ LRI(14, POSTED),
+ REG16(0x244),
+ REG(0x034),
+ REG(0x030),
+ REG(0x038),
+ REG(0x03c),
+ REG(0x168),
+ REG(0x140),
+ REG(0x110),
+ REG(0x11c),
+ REG(0x114),
+ REG(0x118),
+ REG(0x1c0),
+ REG(0x1c4),
+ REG(0x1c8),
+
+ NOP(3),
+ LRI(9, POSTED),
+ REG16(0x3a8),
+ REG16(0x28c),
+ REG16(0x288),
+ REG16(0x284),
+ REG16(0x280),
+ REG16(0x27c),
+ REG16(0x278),
+ REG16(0x274),
+ REG16(0x270),
+
+ NOP(13),
+ LRI(1, POSTED),
+ REG16(0x200),
+
+ NOP(13),
+ LRI(44, POSTED),
+ REG(0x028),
+ REG(0x09c),
+ REG(0x0c0),
+ REG(0x178),
+ REG(0x17c),
+ REG16(0x358),
+ REG(0x170),
+ REG(0x150),
+ REG(0x154),
+ REG(0x158),
+ REG16(0x41c),
+ REG16(0x600),
+ REG16(0x604),
+ REG16(0x608),
+ REG16(0x60c),
+ REG16(0x610),
+ REG16(0x614),
+ REG16(0x618),
+ REG16(0x61c),
+ REG16(0x620),
+ REG16(0x624),
+ REG16(0x628),
+ REG16(0x62c),
+ REG16(0x630),
+ REG16(0x634),
+ REG16(0x638),
+ REG16(0x63c),
+ REG16(0x640),
+ REG16(0x644),
+ REG16(0x648),
+ REG16(0x64c),
+ REG16(0x650),
+ REG16(0x654),
+ REG16(0x658),
+ REG16(0x65c),
+ REG16(0x660),
+ REG16(0x664),
+ REG16(0x668),
+ REG16(0x66c),
+ REG16(0x670),
+ REG16(0x674),
+ REG16(0x678),
+ REG16(0x67c),
+ REG(0x068),
+
+ END(176)
+};
+
+static const u8 gen12_xcs_offsets[] = {
+ NOP(1),
+ LRI(13, POSTED),
+ REG16(0x244),
+ REG(0x034),
+ REG(0x030),
+ REG(0x038),
+ REG(0x03c),
+ REG(0x168),
+ REG(0x140),
+ REG(0x110),
+ REG(0x1c0),
+ REG(0x1c4),
+ REG(0x1c8),
+ REG(0x180),
+ REG16(0x2b4),
+
+ NOP(5),
+ LRI(9, POSTED),
+ REG16(0x3a8),
+ REG16(0x28c),
+ REG16(0x288),
+ REG16(0x284),
+ REG16(0x280),
+ REG16(0x27c),
+ REG16(0x278),
+ REG16(0x274),
+ REG16(0x270),
+
+ END(80)
+};
+
+static const u8 gen8_rcs_offsets[] = {
+ NOP(1),
+ LRI(14, POSTED),
+ REG16(0x244),
+ REG(0x034),
+ REG(0x030),
+ REG(0x038),
+ REG(0x03c),
+ REG(0x168),
+ REG(0x140),
+ REG(0x110),
+ REG(0x11c),
+ REG(0x114),
+ REG(0x118),
+ REG(0x1c0),
+ REG(0x1c4),
+ REG(0x1c8),
+
+ NOP(3),
+ LRI(9, POSTED),
+ REG16(0x3a8),
+ REG16(0x28c),
+ REG16(0x288),
+ REG16(0x284),
+ REG16(0x280),
+ REG16(0x27c),
+ REG16(0x278),
+ REG16(0x274),
+ REG16(0x270),
+
+ NOP(13),
+ LRI(1, 0),
+ REG(0x0c8),
+
+ END(80)
+};
+
+static const u8 gen9_rcs_offsets[] = {
+ NOP(1),
+ LRI(14, POSTED),
+ REG16(0x244),
+ REG(0x34),
+ REG(0x30),
+ REG(0x38),
+ REG(0x3c),
+ REG(0x168),
+ REG(0x140),
+ REG(0x110),
+ REG(0x11c),
+ REG(0x114),
+ REG(0x118),
+ REG(0x1c0),
+ REG(0x1c4),
+ REG(0x1c8),
+
+ NOP(3),
+ LRI(9, POSTED),
+ REG16(0x3a8),
+ REG16(0x28c),
+ REG16(0x288),
+ REG16(0x284),
+ REG16(0x280),
+ REG16(0x27c),
+ REG16(0x278),
+ REG16(0x274),
+ REG16(0x270),
+
+ NOP(13),
+ LRI(1, 0),
+ REG(0xc8),
+
+ NOP(13),
+ LRI(44, POSTED),
+ REG(0x28),
+ REG(0x9c),
+ REG(0xc0),
+ REG(0x178),
+ REG(0x17c),
+ REG16(0x358),
+ REG(0x170),
+ REG(0x150),
+ REG(0x154),
+ REG(0x158),
+ REG16(0x41c),
+ REG16(0x600),
+ REG16(0x604),
+ REG16(0x608),
+ REG16(0x60c),
+ REG16(0x610),
+ REG16(0x614),
+ REG16(0x618),
+ REG16(0x61c),
+ REG16(0x620),
+ REG16(0x624),
+ REG16(0x628),
+ REG16(0x62c),
+ REG16(0x630),
+ REG16(0x634),
+ REG16(0x638),
+ REG16(0x63c),
+ REG16(0x640),
+ REG16(0x644),
+ REG16(0x648),
+ REG16(0x64c),
+ REG16(0x650),
+ REG16(0x654),
+ REG16(0x658),
+ REG16(0x65c),
+ REG16(0x660),
+ REG16(0x664),
+ REG16(0x668),
+ REG16(0x66c),
+ REG16(0x670),
+ REG16(0x674),
+ REG16(0x678),
+ REG16(0x67c),
+ REG(0x68),
+
+ END(176)
+};
+
+static const u8 gen11_rcs_offsets[] = {
+ NOP(1),
+ LRI(15, POSTED),
+ REG16(0x244),
+ REG(0x034),
+ REG(0x030),
+ REG(0x038),
+ REG(0x03c),
+ REG(0x168),
+ REG(0x140),
+ REG(0x110),
+ REG(0x11c),
+ REG(0x114),
+ REG(0x118),
+ REG(0x1c0),
+ REG(0x1c4),
+ REG(0x1c8),
+ REG(0x180),
+
+ NOP(1),
+ LRI(9, POSTED),
+ REG16(0x3a8),
+ REG16(0x28c),
+ REG16(0x288),
+ REG16(0x284),
+ REG16(0x280),
+ REG16(0x27c),
+ REG16(0x278),
+ REG16(0x274),
+ REG16(0x270),
+
+ LRI(1, POSTED),
+ REG(0x1b0),
+
+ NOP(10),
+ LRI(1, 0),
+ REG(0x0c8),
+
+ END(80)
+};
+
+static const u8 gen12_rcs_offsets[] = {
+ NOP(1),
+ LRI(13, POSTED),
+ REG16(0x244),
+ REG(0x034),
+ REG(0x030),
+ REG(0x038),
+ REG(0x03c),
+ REG(0x168),
+ REG(0x140),
+ REG(0x110),
+ REG(0x1c0),
+ REG(0x1c4),
+ REG(0x1c8),
+ REG(0x180),
+ REG16(0x2b4),
+
+ NOP(5),
+ LRI(9, POSTED),
+ REG16(0x3a8),
+ REG16(0x28c),
+ REG16(0x288),
+ REG16(0x284),
+ REG16(0x280),
+ REG16(0x27c),
+ REG16(0x278),
+ REG16(0x274),
+ REG16(0x270),
+
+ LRI(3, POSTED),
+ REG(0x1b0),
+ REG16(0x5a8),
+ REG16(0x5ac),
+
+ NOP(6),
+ LRI(1, 0),
+ REG(0x0c8),
+
+ END(80)
+};
+
+#undef END
+#undef REG16
+#undef REG
+#undef LRI
+#undef NOP
+
+static const u8 *reg_offsets(const struct intel_engine_cs *engine)
+{
+ /*
+ * The gen12+ lists only have the registers we program in the basic
+ * default state. We rely on the context image using relative
+ * addressing to automatic fixup the register state between the
+ * physical engines for virtual engine.
+ */
+ GEM_BUG_ON(INTEL_GEN(engine->i915) >= 12 &&
+ !intel_engine_has_relative_mmio(engine));
+
+ if (engine->class == RENDER_CLASS) {
+ if (INTEL_GEN(engine->i915) >= 12)
+ return gen12_rcs_offsets;
+ else if (INTEL_GEN(engine->i915) >= 11)
+ return gen11_rcs_offsets;
+ else if (INTEL_GEN(engine->i915) >= 9)
+ return gen9_rcs_offsets;
+ else
+ return gen8_rcs_offsets;
+ } else {
+ if (INTEL_GEN(engine->i915) >= 12)
+ return gen12_xcs_offsets;
+ else if (INTEL_GEN(engine->i915) >= 9)
+ return gen9_xcs_offsets;
+ else
+ return gen8_xcs_offsets;
+ }
}
static struct i915_request *
@@ -439,15 +964,10 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
list_for_each_entry_safe_reverse(rq, rn,
&engine->active.requests,
sched.link) {
- struct intel_engine_cs *owner;
-
if (i915_request_completed(rq))
- break;
+ continue; /* XXX */
__i915_request_unsubmit(rq);
- unwind_wa_tail(rq);
-
- GEM_BUG_ON(rq->hw_context->inflight);
/*
* Push the request back into the queue for later resubmission.
@@ -456,8 +976,7 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
* engine so that it can be moved across onto another physical
* engine as load dictates.
*/
- owner = rq->hw_context->engine;
- if (likely(owner == engine)) {
+ if (likely(rq->execution_mask == engine->mask)) {
GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
if (rq_prio(rq) != prio) {
prio = rq_prio(rq);
@@ -466,8 +985,26 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
list_move(&rq->sched.link, pl);
+ set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+
active = rq;
} else {
+ struct intel_engine_cs *owner = rq->context->engine;
+
+ /*
+ * Decouple the virtual breadcrumb before moving it
+ * back to the virtual engine -- we don't want the
+ * request to complete in the background and try
+ * and cancel the breadcrumb on the virtual engine
+ * (instead of the old engine where it is linked)!
+ */
+ if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+ &rq->fence.flags)) {
+ spin_lock_nested(&rq->lock,
+ SINGLE_DEPTH_NESTING);
+ i915_request_cancel_breadcrumb(rq);
+ spin_unlock(&rq->lock);
+ }
rq->engine = owner;
owner->submit_request(rq);
active = NULL;
@@ -500,32 +1037,223 @@ execlists_context_status_change(struct i915_request *rq, unsigned long status)
status, rq);
}
-inline void
-execlists_user_begin(struct intel_engine_execlists *execlists,
- const struct execlist_port *port)
+static void intel_engine_context_in(struct intel_engine_cs *engine)
{
- execlists_set_active_once(execlists, EXECLISTS_ACTIVE_USER);
+ unsigned long flags;
+
+ if (READ_ONCE(engine->stats.enabled) == 0)
+ return;
+
+ write_seqlock_irqsave(&engine->stats.lock, flags);
+
+ if (engine->stats.enabled > 0) {
+ if (engine->stats.active++ == 0)
+ engine->stats.start = ktime_get();
+ GEM_BUG_ON(engine->stats.active == 0);
+ }
+
+ write_sequnlock_irqrestore(&engine->stats.lock, flags);
}
-inline void
-execlists_user_end(struct intel_engine_execlists *execlists)
+static void intel_engine_context_out(struct intel_engine_cs *engine)
{
- execlists_clear_active(execlists, EXECLISTS_ACTIVE_USER);
+ unsigned long flags;
+
+ if (READ_ONCE(engine->stats.enabled) == 0)
+ return;
+
+ write_seqlock_irqsave(&engine->stats.lock, flags);
+
+ if (engine->stats.enabled > 0) {
+ ktime_t last;
+
+ if (engine->stats.active && --engine->stats.active == 0) {
+ /*
+ * Decrement the active context count and in case GPU
+ * is now idle add up to the running total.
+ */
+ last = ktime_sub(ktime_get(), engine->stats.start);
+
+ engine->stats.total = ktime_add(engine->stats.total,
+ last);
+ } else if (engine->stats.active == 0) {
+ /*
+ * After turning on engine stats, context out might be
+ * the first event in which case we account from the
+ * time stats gathering was turned on.
+ */
+ last = ktime_sub(ktime_get(), engine->stats.enabled_at);
+
+ engine->stats.total = ktime_add(engine->stats.total,
+ last);
+ }
+ }
+
+ write_sequnlock_irqrestore(&engine->stats.lock, flags);
}
-static inline void
-execlists_context_schedule_in(struct i915_request *rq)
+static int lrc_ring_mi_mode(const struct intel_engine_cs *engine)
+{
+ if (INTEL_GEN(engine->i915) >= 12)
+ return 0x60;
+ else if (INTEL_GEN(engine->i915) >= 9)
+ return 0x54;
+ else if (engine->class == RENDER_CLASS)
+ return 0x58;
+ else
+ return -1;
+}
+
+static void
+execlists_check_context(const struct intel_context *ce,
+ const struct intel_engine_cs *engine)
{
- GEM_BUG_ON(rq->hw_context->inflight);
+ const struct intel_ring *ring = ce->ring;
+ u32 *regs = ce->lrc_reg_state;
+ bool valid = true;
+ int x;
+
+ if (regs[CTX_RING_START] != i915_ggtt_offset(ring->vma)) {
+ pr_err("%s: context submitted with incorrect RING_START [%08x], expected %08x\n",
+ engine->name,
+ regs[CTX_RING_START],
+ i915_ggtt_offset(ring->vma));
+ regs[CTX_RING_START] = i915_ggtt_offset(ring->vma);
+ valid = false;
+ }
+
+ if ((regs[CTX_RING_CTL] & ~(RING_WAIT | RING_WAIT_SEMAPHORE)) !=
+ (RING_CTL_SIZE(ring->size) | RING_VALID)) {
+ pr_err("%s: context submitted with incorrect RING_CTL [%08x], expected %08x\n",
+ engine->name,
+ regs[CTX_RING_CTL],
+ (u32)(RING_CTL_SIZE(ring->size) | RING_VALID));
+ regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID;
+ valid = false;
+ }
+
+ x = lrc_ring_mi_mode(engine);
+ if (x != -1 && regs[x + 1] & (regs[x + 1] >> 16) & STOP_RING) {
+ pr_err("%s: context submitted with STOP_RING [%08x] in RING_MI_MODE\n",
+ engine->name, regs[x + 1]);
+ regs[x + 1] &= ~STOP_RING;
+ regs[x + 1] |= STOP_RING << 16;
+ valid = false;
+ }
+
+ WARN_ONCE(!valid, "Invalid lrc state found before submission\n");
+}
+static void restore_default_state(struct intel_context *ce,
+ struct intel_engine_cs *engine)
+{
+ u32 *regs = ce->lrc_reg_state;
+
+ if (engine->pinned_default_state)
+ memcpy(regs, /* skip restoring the vanilla PPHWSP */
+ engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE,
+ engine->context_size - PAGE_SIZE);
+
+ execlists_init_reg_state(regs, ce, engine, ce->ring, false);
+}
+
+static void reset_active(struct i915_request *rq,
+ struct intel_engine_cs *engine)
+{
+ struct intel_context * const ce = rq->context;
+ u32 head;
+
+ /*
+ * The executing context has been cancelled. We want to prevent
+ * further execution along this context and propagate the error on
+ * to anything depending on its results.
+ *
+ * In __i915_request_submit(), we apply the -EIO and remove the
+ * requests' payloads for any banned requests. But first, we must
+ * rewind the context back to the start of the incomplete request so
+ * that we do not jump back into the middle of the batch.
+ *
+ * We preserve the breadcrumbs and semaphores of the incomplete
+ * requests so that inter-timeline dependencies (i.e other timelines)
+ * remain correctly ordered. And we defer to __i915_request_submit()
+ * so that all asynchronous waits are correctly handled.
+ */
+ ENGINE_TRACE(engine, "{ rq=%llx:%lld }\n",
+ rq->fence.context, rq->fence.seqno);
+
+ /* On resubmission of the active request, payload will be scrubbed */
+ if (i915_request_completed(rq))
+ head = rq->tail;
+ else
+ head = active_request(ce->timeline, rq)->head;
+ ce->ring->head = intel_ring_wrap(ce->ring, head);
+ intel_ring_update_space(ce->ring);
+
+ /* Scrub the context image to prevent replaying the previous batch */
+ restore_default_state(ce, engine);
+ __execlists_update_reg_state(ce, engine);
+
+ /* We've switched away, so this should be a no-op, but intent matters */
+ ce->lrc_desc |= CTX_DESC_FORCE_RESTORE;
+}
+
+static inline struct intel_engine_cs *
+__execlists_schedule_in(struct i915_request *rq)
+{
+ struct intel_engine_cs * const engine = rq->engine;
+ struct intel_context * const ce = rq->context;
+
+ intel_context_get(ce);
+
+ if (unlikely(intel_context_is_banned(ce)))
+ reset_active(rq, engine);
+
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ execlists_check_context(ce, engine);
+
+ if (ce->tag) {
+ /* Use a fixed tag for OA and friends */
+ ce->lrc_desc |= (u64)ce->tag << 32;
+ } else {
+ /* We don't need a strict matching tag, just different values */
+ ce->lrc_desc &= ~GENMASK_ULL(47, 37);
+ ce->lrc_desc |=
+ (u64)(++engine->context_tag % NUM_CONTEXT_TAG) <<
+ GEN11_SW_CTX_ID_SHIFT;
+ BUILD_BUG_ON(NUM_CONTEXT_TAG > GEN12_MAX_CONTEXT_HW_ID);
+ }
+
+ __intel_gt_pm_get(engine->gt);
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
- intel_engine_context_in(rq->engine);
- rq->hw_context->inflight = rq->engine;
+ intel_engine_context_in(engine);
+
+ return engine;
}
-static void kick_siblings(struct i915_request *rq)
+static inline struct i915_request *
+execlists_schedule_in(struct i915_request *rq, int idx)
{
- struct virtual_engine *ve = to_virtual_engine(rq->hw_context->engine);
+ struct intel_context * const ce = rq->context;
+ struct intel_engine_cs *old;
+
+ GEM_BUG_ON(!intel_engine_pm_is_awake(rq->engine));
+ trace_i915_request_in(rq, idx);
+
+ old = READ_ONCE(ce->inflight);
+ do {
+ if (!old) {
+ WRITE_ONCE(ce->inflight, __execlists_schedule_in(rq));
+ break;
+ }
+ } while (!try_cmpxchg(&ce->inflight, &old, ptr_inc(old)));
+
+ GEM_BUG_ON(intel_context_inflight(ce) != rq->engine);
+ return i915_request_get(rq);
+}
+
+static void kick_siblings(struct i915_request *rq, struct intel_context *ce)
+{
+ struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
struct i915_request *next = READ_ONCE(ve->request);
if (next && next->execution_mask & ~rq->execution_mask)
@@ -533,32 +1261,85 @@ static void kick_siblings(struct i915_request *rq)
}
static inline void
-execlists_context_schedule_out(struct i915_request *rq, unsigned long status)
+__execlists_schedule_out(struct i915_request *rq,
+ struct intel_engine_cs * const engine)
{
- rq->hw_context->inflight = NULL;
- intel_engine_context_out(rq->engine);
- execlists_context_status_change(rq, status);
- trace_i915_request_out(rq);
+ struct intel_context * const ce = rq->context;
/*
- * If this is part of a virtual engine, its next request may have
- * been blocked waiting for access to the active context. We have
- * to kick all the siblings again in case we need to switch (e.g.
- * the next request is not runnable on this engine). Hopefully,
- * we will already have submitted the next request before the
- * tasklet runs and do not need to rebuild each virtual tree
- * and kick everyone again.
+ * NB process_csb() is not under the engine->active.lock and hence
+ * schedule_out can race with schedule_in meaning that we should
+ * refrain from doing non-trivial work here.
*/
- if (rq->engine != rq->hw_context->engine)
- kick_siblings(rq);
+
+ /*
+ * If we have just completed this context, the engine may now be
+ * idle and we want to re-enter powersaving.
+ */
+ if (list_is_last(&rq->link, &ce->timeline->requests) &&
+ i915_request_completed(rq))
+ intel_engine_add_retire(engine, ce->timeline);
+
+ intel_engine_context_out(engine);
+ execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
+ intel_gt_pm_put_async(engine->gt);
+
+ /*
+ * If this is part of a virtual engine, its next request may
+ * have been blocked waiting for access to the active context.
+ * We have to kick all the siblings again in case we need to
+ * switch (e.g. the next request is not runnable on this
+ * engine). Hopefully, we will already have submitted the next
+ * request before the tasklet runs and do not need to rebuild
+ * each virtual tree and kick everyone again.
+ */
+ if (ce->engine != engine)
+ kick_siblings(rq, ce);
+
+ intel_context_put(ce);
+}
+
+static inline void
+execlists_schedule_out(struct i915_request *rq)
+{
+ struct intel_context * const ce = rq->context;
+ struct intel_engine_cs *cur, *old;
+
+ trace_i915_request_out(rq);
+
+ old = READ_ONCE(ce->inflight);
+ do
+ cur = ptr_unmask_bits(old, 2) ? ptr_dec(old) : NULL;
+ while (!try_cmpxchg(&ce->inflight, &old, cur));
+ if (!cur)
+ __execlists_schedule_out(rq, old);
+
+ i915_request_put(rq);
}
static u64 execlists_update_context(struct i915_request *rq)
{
- struct intel_context *ce = rq->hw_context;
+ struct intel_context *ce = rq->context;
+ u64 desc = ce->lrc_desc;
+ u32 tail;
- ce->lrc_reg_state[CTX_RING_TAIL + 1] =
- intel_ring_set_tail(rq->ring, rq->tail);
+ /*
+ * WaIdleLiteRestore:bdw,skl
+ *
+ * We should never submit the context with the same RING_TAIL twice
+ * just in case we submit an empty ring, which confuses the HW.
+ *
+ * We append a couple of NOOPs (gen8_emit_wa_tail) after the end of
+ * the normal request to be able to always advance the RING_TAIL on
+ * subsequent resubmissions (for lite restore). Should that fail us,
+ * and we try and submit the same tail again, force the context
+ * reload.
+ */
+ tail = intel_ring_set_tail(rq->ring, rq->tail);
+ if (unlikely(ce->lrc_reg_state[CTX_RING_TAIL] == tail))
+ desc |= CTX_DESC_FORCE_RESTORE;
+ ce->lrc_reg_state[CTX_RING_TAIL] = tail;
+ rq->tail = rq->wa_tail;
/*
* Make sure the context image is complete before we submit it to HW.
@@ -569,14 +1350,11 @@ static u64 execlists_update_context(struct i915_request *rq)
* may not be visible to the HW prior to the completion of the UC
* register write and that we may begin execution from the context
* before its image is complete leading to invalid PD chasing.
- *
- * Furthermore, Braswell, at least, wants a full mb to be sure that
- * the writes are coherent in memory (visible to the GPU) prior to
- * execution, and not just visible to other CPUs (as is the result of
- * wmb).
*/
- mb();
- return ce->lrc_desc;
+ wmb();
+
+ ce->lrc_desc &= ~CTX_DESC_FORCE_RESTORE;
+ return desc;
}
static inline void write_desc(struct intel_engine_execlists *execlists, u64 desc, u32 port)
@@ -590,12 +1368,110 @@ static inline void write_desc(struct intel_engine_execlists *execlists, u64 desc
}
}
+static __maybe_unused void
+trace_ports(const struct intel_engine_execlists *execlists,
+ const char *msg,
+ struct i915_request * const *ports)
+{
+ const struct intel_engine_cs *engine =
+ container_of(execlists, typeof(*engine), execlists);
+
+ if (!ports[0])
+ return;
+
+ ENGINE_TRACE(engine, "%s { %llx:%lld%s, %llx:%lld }\n", msg,
+ ports[0]->fence.context,
+ ports[0]->fence.seqno,
+ i915_request_completed(ports[0]) ? "!" :
+ i915_request_started(ports[0]) ? "*" :
+ "",
+ ports[1] ? ports[1]->fence.context : 0,
+ ports[1] ? ports[1]->fence.seqno : 0);
+}
+
+static __maybe_unused bool
+assert_pending_valid(const struct intel_engine_execlists *execlists,
+ const char *msg)
+{
+ struct i915_request * const *port, *rq;
+ struct intel_context *ce = NULL;
+
+ trace_ports(execlists, msg, execlists->pending);
+
+ if (!execlists->pending[0]) {
+ GEM_TRACE_ERR("Nothing pending for promotion!\n");
+ return false;
+ }
+
+ if (execlists->pending[execlists_num_ports(execlists)]) {
+ GEM_TRACE_ERR("Excess pending[%d] for promotion!\n",
+ execlists_num_ports(execlists));
+ return false;
+ }
+
+ for (port = execlists->pending; (rq = *port); port++) {
+ unsigned long flags;
+ bool ok = true;
+
+ GEM_BUG_ON(!kref_read(&rq->fence.refcount));
+ GEM_BUG_ON(!i915_request_is_active(rq));
+
+ if (ce == rq->context) {
+ GEM_TRACE_ERR("Dup context:%llx in pending[%zd]\n",
+ ce->timeline->fence_context,
+ port - execlists->pending);
+ return false;
+ }
+ ce = rq->context;
+
+ /* Hold tightly onto the lock to prevent concurrent retires! */
+ if (!spin_trylock_irqsave(&rq->lock, flags))
+ continue;
+
+ if (i915_request_completed(rq))
+ goto unlock;
+
+ if (i915_active_is_idle(&ce->active) &&
+ !intel_context_is_barrier(ce)) {
+ GEM_TRACE_ERR("Inactive context:%llx in pending[%zd]\n",
+ ce->timeline->fence_context,
+ port - execlists->pending);
+ ok = false;
+ goto unlock;
+ }
+
+ if (!i915_vma_is_pinned(ce->state)) {
+ GEM_TRACE_ERR("Unpinned context:%llx in pending[%zd]\n",
+ ce->timeline->fence_context,
+ port - execlists->pending);
+ ok = false;
+ goto unlock;
+ }
+
+ if (!i915_vma_is_pinned(ce->ring->vma)) {
+ GEM_TRACE_ERR("Unpinned ring:%llx in pending[%zd]\n",
+ ce->timeline->fence_context,
+ port - execlists->pending);
+ ok = false;
+ goto unlock;
+ }
+
+unlock:
+ spin_unlock_irqrestore(&rq->lock, flags);
+ if (!ok)
+ return false;
+ }
+
+ return ce;
+}
+
static void execlists_submit_ports(struct intel_engine_cs *engine)
{
struct intel_engine_execlists *execlists = &engine->execlists;
- struct execlist_port *port = execlists->port;
unsigned int n;
+ GEM_BUG_ON(!assert_pending_valid(execlists, "submit"));
+
/*
* We can skip acquiring intel_runtime_pm_get() here as it was taken
* on our behalf by the request (see i915_gem_mark_busy()) and it will
@@ -604,7 +1480,7 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
* that all ELSP are drained i.e. we have processed the CSB,
* before allowing ourselves to idle and calling intel_runtime_pm_put().
*/
- GEM_BUG_ON(!intel_wakeref_active(&engine->wakeref));
+ GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
/*
* ELSQ note: the submit queue is not cleared after being submitted
@@ -613,44 +1489,22 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
* of elsq entries, keep this in mind before changing the loop below.
*/
for (n = execlists_num_ports(execlists); n--; ) {
- struct i915_request *rq;
- unsigned int count;
- u64 desc;
-
- rq = port_unpack(&port[n], &count);
- if (rq) {
- GEM_BUG_ON(count > !n);
- if (!count++)
- execlists_context_schedule_in(rq);
- port_set(&port[n], port_pack(rq, count));
- desc = execlists_update_context(rq);
- GEM_DEBUG_EXEC(port[n].context_id = upper_32_bits(desc));
-
- GEM_TRACE("%s in[%d]: ctx=%d.%d, fence %llx:%lld (current %d), prio=%d\n",
- engine->name, n,
- port[n].context_id, count,
- rq->fence.context, rq->fence.seqno,
- hwsp_seqno(rq),
- rq_prio(rq));
- } else {
- GEM_BUG_ON(!n);
- desc = 0;
- }
+ struct i915_request *rq = execlists->pending[n];
- write_desc(execlists, desc, n);
+ write_desc(execlists,
+ rq ? execlists_update_context(rq) : 0,
+ n);
}
/* we need to manually load the submit queue */
if (execlists->ctrl_reg)
writel(EL_CTRL_LOAD, execlists->ctrl_reg);
-
- execlists_clear_active(execlists, EXECLISTS_ACTIVE_HWACK);
}
static bool ctx_single_port_submission(const struct intel_context *ce)
{
return (IS_ENABLED(CONFIG_DRM_I915_GVT) &&
- i915_gem_context_force_single_submission(ce->gem_context));
+ intel_context_force_single_submission(ce));
}
static bool can_merge_ctx(const struct intel_context *prev,
@@ -668,110 +1522,35 @@ static bool can_merge_ctx(const struct intel_context *prev,
static bool can_merge_rq(const struct i915_request *prev,
const struct i915_request *next)
{
+ GEM_BUG_ON(prev == next);
GEM_BUG_ON(!assert_priority_queue(prev, next));
- if (!can_merge_ctx(prev->hw_context, next->hw_context))
- return false;
-
- return true;
-}
-
-static void port_assign(struct execlist_port *port, struct i915_request *rq)
-{
- GEM_BUG_ON(rq == port_request(port));
-
- if (port_isset(port))
- i915_request_put(port_request(port));
-
- port_set(port, port_pack(i915_request_get(rq), port_count(port)));
-}
-
-static void inject_preempt_context(struct intel_engine_cs *engine)
-{
- struct intel_engine_execlists *execlists = &engine->execlists;
- struct intel_context *ce = engine->preempt_context;
- unsigned int n;
-
- GEM_BUG_ON(execlists->preempt_complete_status !=
- upper_32_bits(ce->lrc_desc));
-
/*
- * Switch to our empty preempt context so
- * the state of the GPU is known (idle).
+ * We do not submit known completed requests. Therefore if the next
+ * request is already completed, we can pretend to merge it in
+ * with the previous context (and we will skip updating the ELSP
+ * and tracking). Thus hopefully keeping the ELSP full with active
+ * contexts, despite the best efforts of preempt-to-busy to confuse
+ * us.
*/
- GEM_TRACE("%s\n", engine->name);
- for (n = execlists_num_ports(execlists); --n; )
- write_desc(execlists, 0, n);
-
- write_desc(execlists, ce->lrc_desc, n);
-
- /* we need to manually load the submit queue */
- if (execlists->ctrl_reg)
- writel(EL_CTRL_LOAD, execlists->ctrl_reg);
-
- execlists_clear_active(execlists, EXECLISTS_ACTIVE_HWACK);
- execlists_set_active(execlists, EXECLISTS_ACTIVE_PREEMPT);
-
- (void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++);
-}
+ if (i915_request_completed(next))
+ return true;
-static void complete_preempt_context(struct intel_engine_execlists *execlists)
-{
- GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT));
+ if (unlikely((prev->fence.flags ^ next->fence.flags) &
+ (BIT(I915_FENCE_FLAG_NOPREEMPT) |
+ BIT(I915_FENCE_FLAG_SENTINEL))))
+ return false;
- if (inject_preempt_hang(execlists))
- return;
+ if (!can_merge_ctx(prev->context, next->context))
+ return false;
- execlists_cancel_port_requests(execlists);
- __unwind_incomplete_requests(container_of(execlists,
- struct intel_engine_cs,
- execlists));
+ return true;
}
static void virtual_update_register_offsets(u32 *regs,
struct intel_engine_cs *engine)
{
- u32 base = engine->mmio_base;
-
- /* Must match execlists_init_reg_state()! */
-
- regs[CTX_CONTEXT_CONTROL] =
- i915_mmio_reg_offset(RING_CONTEXT_CONTROL(base));
- regs[CTX_RING_HEAD] = i915_mmio_reg_offset(RING_HEAD(base));
- regs[CTX_RING_TAIL] = i915_mmio_reg_offset(RING_TAIL(base));
- regs[CTX_RING_BUFFER_START] = i915_mmio_reg_offset(RING_START(base));
- regs[CTX_RING_BUFFER_CONTROL] = i915_mmio_reg_offset(RING_CTL(base));
-
- regs[CTX_BB_HEAD_U] = i915_mmio_reg_offset(RING_BBADDR_UDW(base));
- regs[CTX_BB_HEAD_L] = i915_mmio_reg_offset(RING_BBADDR(base));
- regs[CTX_BB_STATE] = i915_mmio_reg_offset(RING_BBSTATE(base));
- regs[CTX_SECOND_BB_HEAD_U] =
- i915_mmio_reg_offset(RING_SBBADDR_UDW(base));
- regs[CTX_SECOND_BB_HEAD_L] = i915_mmio_reg_offset(RING_SBBADDR(base));
- regs[CTX_SECOND_BB_STATE] = i915_mmio_reg_offset(RING_SBBSTATE(base));
-
- regs[CTX_CTX_TIMESTAMP] =
- i915_mmio_reg_offset(RING_CTX_TIMESTAMP(base));
- regs[CTX_PDP3_UDW] = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 3));
- regs[CTX_PDP3_LDW] = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 3));
- regs[CTX_PDP2_UDW] = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 2));
- regs[CTX_PDP2_LDW] = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 2));
- regs[CTX_PDP1_UDW] = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 1));
- regs[CTX_PDP1_LDW] = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 1));
- regs[CTX_PDP0_UDW] = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 0));
- regs[CTX_PDP0_LDW] = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 0));
-
- if (engine->class == RENDER_CLASS) {
- regs[CTX_RCS_INDIRECT_CTX] =
- i915_mmio_reg_offset(RING_INDIRECT_CTX(base));
- regs[CTX_RCS_INDIRECT_CTX_OFFSET] =
- i915_mmio_reg_offset(RING_INDIRECT_CTX_OFFSET(base));
- regs[CTX_BB_PER_CTX_PTR] =
- i915_mmio_reg_offset(RING_BB_PER_CTX_PTR(base));
-
- regs[CTX_R_PWR_CLK_STATE] =
- i915_mmio_reg_offset(GEN8_R_PWR_CLK_STATE);
- }
+ set_offsets(regs, reg_offsets(engine), engine, false);
}
static bool virtual_matches(const struct virtual_engine *ve,
@@ -792,7 +1571,7 @@ static bool virtual_matches(const struct virtual_engine *ve,
* we reuse the register offsets). This is a very small
* hystersis on the greedy seelction algorithm.
*/
- inflight = READ_ONCE(ve->context.inflight);
+ inflight = intel_context_inflight(&ve->context);
if (inflight && inflight != engine)
return false;
@@ -810,18 +1589,171 @@ static void virtual_xfer_breadcrumbs(struct virtual_engine *ve,
if (!list_empty(&ve->context.signal_link)) {
list_move_tail(&ve->context.signal_link,
&engine->breadcrumbs.signalers);
- intel_engine_queue_breadcrumbs(engine);
+ intel_engine_signal_breadcrumbs(engine);
}
spin_unlock(&old->breadcrumbs.irq_lock);
}
+static struct i915_request *
+last_active(const struct intel_engine_execlists *execlists)
+{
+ struct i915_request * const *last = READ_ONCE(execlists->active);
+
+ while (*last && i915_request_completed(*last))
+ last++;
+
+ return *last;
+}
+
+static void defer_request(struct i915_request *rq, struct list_head * const pl)
+{
+ LIST_HEAD(list);
+
+ /*
+ * We want to move the interrupted request to the back of
+ * the round-robin list (i.e. its priority level), but
+ * in doing so, we must then move all requests that were in
+ * flight and were waiting for the interrupted request to
+ * be run after it again.
+ */
+ do {
+ struct i915_dependency *p;
+
+ GEM_BUG_ON(i915_request_is_active(rq));
+ list_move_tail(&rq->sched.link, pl);
+
+ list_for_each_entry(p, &rq->sched.waiters_list, wait_link) {
+ struct i915_request *w =
+ container_of(p->waiter, typeof(*w), sched);
+
+ /* Leave semaphores spinning on the other engines */
+ if (w->engine != rq->engine)
+ continue;
+
+ /* No waiter should start before its signaler */
+ GEM_BUG_ON(i915_request_started(w) &&
+ !i915_request_completed(rq));
+
+ GEM_BUG_ON(i915_request_is_active(w));
+ if (!i915_request_is_ready(w))
+ continue;
+
+ if (rq_prio(w) < rq_prio(rq))
+ continue;
+
+ GEM_BUG_ON(rq_prio(w) > rq_prio(rq));
+ list_move_tail(&w->sched.link, &list);
+ }
+
+ rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
+ } while (rq);
+}
+
+static void defer_active(struct intel_engine_cs *engine)
+{
+ struct i915_request *rq;
+
+ rq = __unwind_incomplete_requests(engine);
+ if (!rq)
+ return;
+
+ defer_request(rq, i915_sched_lookup_priolist(engine, rq_prio(rq)));
+}
+
+static bool
+need_timeslice(struct intel_engine_cs *engine, const struct i915_request *rq)
+{
+ int hint;
+
+ if (!intel_engine_has_timeslices(engine))
+ return false;
+
+ if (list_is_last(&rq->sched.link, &engine->active.requests))
+ return false;
+
+ hint = max(rq_prio(list_next_entry(rq, sched.link)),
+ engine->execlists.queue_priority_hint);
+
+ return hint >= effective_prio(rq);
+}
+
+static int
+switch_prio(struct intel_engine_cs *engine, const struct i915_request *rq)
+{
+ if (list_is_last(&rq->sched.link, &engine->active.requests))
+ return INT_MIN;
+
+ return rq_prio(list_next_entry(rq, sched.link));
+}
+
+static inline unsigned long
+timeslice(const struct intel_engine_cs *engine)
+{
+ return READ_ONCE(engine->props.timeslice_duration_ms);
+}
+
+static unsigned long
+active_timeslice(const struct intel_engine_cs *engine)
+{
+ const struct i915_request *rq = *engine->execlists.active;
+
+ if (!rq || i915_request_completed(rq))
+ return 0;
+
+ if (engine->execlists.switch_priority_hint < effective_prio(rq))
+ return 0;
+
+ return timeslice(engine);
+}
+
+static void set_timeslice(struct intel_engine_cs *engine)
+{
+ if (!intel_engine_has_timeslices(engine))
+ return;
+
+ set_timer_ms(&engine->execlists.timer, active_timeslice(engine));
+}
+
+static void record_preemption(struct intel_engine_execlists *execlists)
+{
+ (void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++);
+}
+
+static unsigned long active_preempt_timeout(struct intel_engine_cs *engine)
+{
+ struct i915_request *rq;
+
+ rq = last_active(&engine->execlists);
+ if (!rq)
+ return 0;
+
+ /* Force a fast reset for terminated contexts (ignoring sysfs!) */
+ if (unlikely(intel_context_is_banned(rq->context)))
+ return 1;
+
+ return READ_ONCE(engine->props.preempt_timeout_ms);
+}
+
+static void set_preempt_timeout(struct intel_engine_cs *engine)
+{
+ if (!intel_engine_has_preempt_reset(engine))
+ return;
+
+ set_timer_ms(&engine->execlists.preempt,
+ active_preempt_timeout(engine));
+}
+
+static inline void clear_ports(struct i915_request **ports, int count)
+{
+ memset_p((void **)ports, NULL, count);
+}
+
static void execlists_dequeue(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
- struct execlist_port *port = execlists->port;
- const struct execlist_port * const last_port =
- &execlists->port[execlists->port_mask];
- struct i915_request *last = port_request(port);
+ struct i915_request **port = execlists->pending;
+ struct i915_request ** const last_port = port + execlists->port_mask;
+ struct i915_request *last;
struct rb_node *rb;
bool submit = false;
@@ -867,65 +1799,100 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
break;
}
+ /*
+ * If the queue is higher priority than the last
+ * request in the currently active context, submit afresh.
+ * We will resubmit again afterwards in case we need to split
+ * the active context to interject the preemption request,
+ * i.e. we will retrigger preemption following the ack in case
+ * of trouble.
+ */
+ last = last_active(execlists);
if (last) {
- /*
- * Don't resubmit or switch until all outstanding
- * preemptions (lite-restore) are seen. Then we
- * know the next preemption status we see corresponds
- * to this ELSP update.
- */
- GEM_BUG_ON(!execlists_is_active(execlists,
- EXECLISTS_ACTIVE_USER));
- GEM_BUG_ON(!port_count(&port[0]));
+ if (need_preempt(engine, last, rb)) {
+ ENGINE_TRACE(engine,
+ "preempting last=%llx:%lld, prio=%d, hint=%d\n",
+ last->fence.context,
+ last->fence.seqno,
+ last->sched.attr.priority,
+ execlists->queue_priority_hint);
+ record_preemption(execlists);
- /*
- * If we write to ELSP a second time before the HW has had
- * a chance to respond to the previous write, we can confuse
- * the HW and hit "undefined behaviour". After writing to ELSP,
- * we must then wait until we see a context-switch event from
- * the HW to indicate that it has had a chance to respond.
- */
- if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_HWACK))
- return;
+ /*
+ * Don't let the RING_HEAD advance past the breadcrumb
+ * as we unwind (and until we resubmit) so that we do
+ * not accidentally tell it to go backwards.
+ */
+ ring_set_paused(engine, 1);
- if (need_preempt(engine, last, rb)) {
- inject_preempt_context(engine);
- return;
- }
+ /*
+ * Note that we have not stopped the GPU at this point,
+ * so we are unwinding the incomplete requests as they
+ * remain inflight and so by the time we do complete
+ * the preemption, some of the unwound requests may
+ * complete!
+ */
+ __unwind_incomplete_requests(engine);
- /*
- * In theory, we could coalesce more requests onto
- * the second port (the first port is active, with
- * no preemptions pending). However, that means we
- * then have to deal with the possible lite-restore
- * of the second port (as we submit the ELSP, there
- * may be a context-switch) but also we may complete
- * the resubmission before the context-switch. Ergo,
- * coalescing onto the second port will cause a
- * preemption event, but we cannot predict whether
- * that will affect port[0] or port[1].
- *
- * If the second port is already active, we can wait
- * until the next context-switch before contemplating
- * new requests. The GPU will be busy and we should be
- * able to resubmit the new ELSP before it idles,
- * avoiding pipeline bubbles (momentary pauses where
- * the driver is unable to keep up the supply of new
- * work). However, we have to double check that the
- * priorities of the ports haven't been switch.
- */
- if (port_count(&port[1]))
- return;
+ /*
+ * If we need to return to the preempted context, we
+ * need to skip the lite-restore and force it to
+ * reload the RING_TAIL. Otherwise, the HW has a
+ * tendency to ignore us rewinding the TAIL to the
+ * end of an earlier request.
+ */
+ last->context->lrc_desc |= CTX_DESC_FORCE_RESTORE;
+ last = NULL;
+ } else if (need_timeslice(engine, last) &&
+ timer_expired(&engine->execlists.timer)) {
+ ENGINE_TRACE(engine,
+ "expired last=%llx:%lld, prio=%d, hint=%d\n",
+ last->fence.context,
+ last->fence.seqno,
+ last->sched.attr.priority,
+ execlists->queue_priority_hint);
+
+ ring_set_paused(engine, 1);
+ defer_active(engine);
- /*
- * WaIdleLiteRestore:bdw,skl
- * Apply the wa NOOPs to prevent
- * ring:HEAD == rq:TAIL as we resubmit the
- * request. See gen8_emit_fini_breadcrumb() for
- * where we prepare the padding after the
- * end of the request.
- */
- last->tail = last->wa_tail;
+ /*
+ * Unlike for preemption, if we rewind and continue
+ * executing the same context as previously active,
+ * the order of execution will remain the same and
+ * the tail will only advance. We do not need to
+ * force a full context restore, as a lite-restore
+ * is sufficient to resample the monotonic TAIL.
+ *
+ * If we switch to any other context, similarly we
+ * will not rewind TAIL of current context, and
+ * normal save/restore will preserve state and allow
+ * us to later continue executing the same request.
+ */
+ last = NULL;
+ } else {
+ /*
+ * Otherwise if we already have a request pending
+ * for execution after the current one, we can
+ * just wait until the next CS event before
+ * queuing more. In either case we will force a
+ * lite-restore preemption event, but if we wait
+ * we hopefully coalesce several updates into a single
+ * submission.
+ */
+ if (!list_is_last(&last->sched.link,
+ &engine->active.requests)) {
+ /*
+ * Even if ELSP[1] is occupied and not worthy
+ * of timeslices, our queue might be.
+ */
+ if (!execlists->timer.expires &&
+ need_timeslice(engine, last))
+ set_timer_ms(&execlists->timer,
+ timeslice(engine));
+
+ return;
+ }
+ }
}
while (rb) { /* XXX virtual is always taking precedence */
@@ -946,7 +1913,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
GEM_BUG_ON(rq != ve->request);
GEM_BUG_ON(rq->engine != &ve->base);
- GEM_BUG_ON(rq->hw_context != &ve->context);
+ GEM_BUG_ON(rq->context != &ve->context);
if (rq_prio(rq) >= queue_prio(execlists)) {
if (!virtual_matches(ve, rq, engine)) {
@@ -957,17 +1924,17 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
if (last && !can_merge_rq(last, rq)) {
spin_unlock(&ve->base.active.lock);
- return; /* leave this rq for another engine */
+ return; /* leave this for another */
}
- GEM_TRACE("%s: virtual rq=%llx:%lld%s, new engine? %s\n",
- engine->name,
- rq->fence.context,
- rq->fence.seqno,
- i915_request_completed(rq) ? "!" :
- i915_request_started(rq) ? "*" :
- "",
- yesno(engine != ve->siblings[0]));
+ ENGINE_TRACE(engine,
+ "virtual rq=%llx:%lld%s, new engine? %s\n",
+ rq->fence.context,
+ rq->fence.seqno,
+ i915_request_completed(rq) ? "!" :
+ i915_request_started(rq) ? "*" :
+ "",
+ yesno(engine != ve->siblings[0]));
ve->request = NULL;
ve->base.execlists.queue_priority_hint = INT_MIN;
@@ -982,7 +1949,10 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
unsigned int n;
GEM_BUG_ON(READ_ONCE(ve->context.inflight));
- virtual_update_register_offsets(regs, engine);
+
+ if (!intel_engine_has_relative_mmio(engine))
+ virtual_update_register_offsets(regs,
+ engine);
if (!list_empty(&ve->context.signals))
virtual_xfer_breadcrumbs(ve, engine);
@@ -1005,10 +1975,24 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
GEM_BUG_ON(ve->siblings[0] != engine);
}
- __i915_request_submit(rq);
- trace_i915_request_in(rq, port_index(port, execlists));
- submit = true;
- last = rq;
+ if (__i915_request_submit(rq)) {
+ submit = true;
+ last = rq;
+ }
+ i915_request_put(rq);
+
+ /*
+ * Hmm, we have a bunch of virtual engine requests,
+ * but the first one was already completed (thanks
+ * preempt-to-busy!). Keep looking at the veng queue
+ * until we have no more relevant requests (i.e.
+ * the normal submit queue has higher priority).
+ */
+ if (!submit) {
+ spin_unlock(&ve->base.active.lock);
+ rb = rb_first_cached(&execlists->virtual);
+ continue;
+ }
}
spin_unlock(&ve->base.active.lock);
@@ -1021,6 +2005,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
int i;
priolist_for_each_request_consume(rq, rn, p, i) {
+ bool merge = true;
+
/*
* Can we combine this request with the current port?
* It has to be the same context/ringbuffer and not
@@ -1046,7 +2032,10 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* same LRCA, i.e. we must submit 2 different
* contexts if we submit 2 ELSP.
*/
- if (last->hw_context == rq->hw_context)
+ if (last->context == rq->context)
+ goto done;
+
+ if (i915_request_has_sentinel(last))
goto done;
/*
@@ -1056,23 +2045,27 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* the same context (even though a different
* request) to the second port.
*/
- if (ctx_single_port_submission(last->hw_context) ||
- ctx_single_port_submission(rq->hw_context))
+ if (ctx_single_port_submission(last->context) ||
+ ctx_single_port_submission(rq->context))
goto done;
-
- if (submit)
- port_assign(port, last);
- port++;
-
- GEM_BUG_ON(port_isset(port));
+ merge = false;
}
- __i915_request_submit(rq);
- trace_i915_request_in(rq, port_index(port, execlists));
+ if (__i915_request_submit(rq)) {
+ if (!merge) {
+ *port = execlists_schedule_in(last, port - execlists->pending);
+ port++;
+ last = NULL;
+ }
- last = rq;
- submit = true;
+ GEM_BUG_ON(last &&
+ !can_merge_ctx(last->context,
+ rq->context));
+
+ submit = true;
+ last = rq;
+ }
}
rb_erase_cached(&p->node, &execlists->queue);
@@ -1099,52 +2092,47 @@ done:
execlists->queue_priority_hint = queue_prio(execlists);
if (submit) {
- port_assign(port, last);
- execlists_submit_ports(engine);
- }
+ *port = execlists_schedule_in(last, port - execlists->pending);
+ execlists->switch_priority_hint =
+ switch_prio(engine, *execlists->pending);
- /* We must always keep the beast fed if we have work piled up */
- GEM_BUG_ON(rb_first_cached(&execlists->queue) &&
- !port_isset(execlists->port));
+ /*
+ * Skip if we ended up with exactly the same set of requests,
+ * e.g. trying to timeslice a pair of ordered contexts
+ */
+ if (!memcmp(execlists->active, execlists->pending,
+ (port - execlists->pending + 1) * sizeof(*port))) {
+ do
+ execlists_schedule_out(fetch_and_zero(port));
+ while (port-- != execlists->pending);
- /* Re-evaluate the executing context setup after each preemptive kick */
- if (last)
- execlists_user_begin(execlists, execlists->port);
+ goto skip_submit;
+ }
+ clear_ports(port + 1, last_port - port);
- /* If the engine is now idle, so should be the flag; and vice versa. */
- GEM_BUG_ON(execlists_is_active(&engine->execlists,
- EXECLISTS_ACTIVE_USER) ==
- !port_isset(engine->execlists.port));
+ execlists_submit_ports(engine);
+ set_preempt_timeout(engine);
+ } else {
+skip_submit:
+ ring_set_paused(engine, 0);
+ }
}
-void
-execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
+static void
+cancel_port_requests(struct intel_engine_execlists * const execlists)
{
- struct execlist_port *port = execlists->port;
- unsigned int num_ports = execlists_num_ports(execlists);
+ struct i915_request * const *port;
- while (num_ports-- && port_isset(port)) {
- struct i915_request *rq = port_request(port);
+ for (port = execlists->pending; *port; port++)
+ execlists_schedule_out(*port);
+ clear_ports(execlists->pending, ARRAY_SIZE(execlists->pending));
- GEM_TRACE("%s:port%u fence %llx:%lld, (current %d)\n",
- rq->engine->name,
- (unsigned int)(port - execlists->port),
- rq->fence.context, rq->fence.seqno,
- hwsp_seqno(rq));
+ /* Mark the end of active before we overwrite *active */
+ for (port = xchg(&execlists->active, execlists->pending); *port; port++)
+ execlists_schedule_out(*port);
+ clear_ports(execlists->inflight, ARRAY_SIZE(execlists->inflight));
- GEM_BUG_ON(!execlists->active);
- execlists_context_schedule_out(rq,
- i915_request_completed(rq) ?
- INTEL_CONTEXT_SCHEDULE_OUT :
- INTEL_CONTEXT_SCHEDULE_PREEMPTED);
-
- i915_request_put(rq);
-
- memset(port, 0, sizeof(*port));
- port++;
- }
-
- execlists_clear_all_active(execlists);
+ WRITE_ONCE(execlists->active, execlists->inflight);
}
static inline void
@@ -1160,16 +2148,83 @@ reset_in_progress(const struct intel_engine_execlists *execlists)
return unlikely(!__tasklet_is_enabled(&execlists->tasklet));
}
+/*
+ * Starting with Gen12, the status has a new format:
+ *
+ * bit 0: switched to new queue
+ * bit 1: reserved
+ * bit 2: semaphore wait mode (poll or signal), only valid when
+ * switch detail is set to "wait on semaphore"
+ * bits 3-5: engine class
+ * bits 6-11: engine instance
+ * bits 12-14: reserved
+ * bits 15-25: sw context id of the lrc the GT switched to
+ * bits 26-31: sw counter of the lrc the GT switched to
+ * bits 32-35: context switch detail
+ * - 0: ctx complete
+ * - 1: wait on sync flip
+ * - 2: wait on vblank
+ * - 3: wait on scanline
+ * - 4: wait on semaphore
+ * - 5: context preempted (not on SEMAPHORE_WAIT or
+ * WAIT_FOR_EVENT)
+ * bit 36: reserved
+ * bits 37-43: wait detail (for switch detail 1 to 4)
+ * bits 44-46: reserved
+ * bits 47-57: sw context id of the lrc the GT switched away from
+ * bits 58-63: sw counter of the lrc the GT switched away from
+ */
+static inline bool
+gen12_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb)
+{
+ u32 lower_dw = csb[0];
+ u32 upper_dw = csb[1];
+ bool ctx_to_valid = GEN12_CSB_CTX_VALID(lower_dw);
+ bool ctx_away_valid = GEN12_CSB_CTX_VALID(upper_dw);
+ bool new_queue = lower_dw & GEN12_CTX_STATUS_SWITCHED_TO_NEW_QUEUE;
+
+ /*
+ * The context switch detail is not guaranteed to be 5 when a preemption
+ * occurs, so we can't just check for that. The check below works for
+ * all the cases we care about, including preemptions of WAIT
+ * instructions and lite-restore. Preempt-to-idle via the CTRL register
+ * would require some extra handling, but we don't support that.
+ */
+ if (!ctx_away_valid || new_queue) {
+ GEM_BUG_ON(!ctx_to_valid);
+ return true;
+ }
+
+ /*
+ * switch detail = 5 is covered by the case above and we do not expect a
+ * context switch on an unsuccessful wait instruction since we always
+ * use polling mode.
+ */
+ GEM_BUG_ON(GEN12_CTX_SWITCH_DETAIL(upper_dw));
+ return false;
+}
+
+static inline bool
+gen8_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb)
+{
+ return *csb & (GEN8_CTX_STATUS_IDLE_ACTIVE | GEN8_CTX_STATUS_PREEMPTED);
+}
+
static void process_csb(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
- struct execlist_port *port = execlists->port;
const u32 * const buf = execlists->csb_status;
const u8 num_entries = execlists->csb_size;
u8 head, tail;
- lockdep_assert_held(&engine->active.lock);
- GEM_BUG_ON(USES_GUC_SUBMISSION(engine->i915));
+ /*
+ * As we modify our execlists state tracking we require exclusive
+ * access. Either we are inside the tasklet, or the tasklet is disabled
+ * and we assume that is only inside the reset paths and so serialised.
+ */
+ GEM_BUG_ON(!tasklet_is_locked(&execlists->tasklet) &&
+ !reset_in_progress(execlists));
+ GEM_BUG_ON(!intel_engine_in_execlists_submission_mode(engine));
/*
* Note that csb_write, csb_status may be either in HWSP or mmio.
@@ -1183,7 +2238,7 @@ static void process_csb(struct intel_engine_cs *engine)
*/
head = execlists->csb_head;
tail = READ_ONCE(*execlists->csb_write);
- GEM_TRACE("%s cs-irq head=%d, tail=%d\n", engine->name, head, tail);
+ ENGINE_TRACE(engine, "cs-irq head=%d, tail=%d\n", head, tail);
if (unlikely(head == tail))
return;
@@ -1198,9 +2253,7 @@ static void process_csb(struct intel_engine_cs *engine)
rmb();
do {
- struct i915_request *rq;
- unsigned int status;
- unsigned int count;
+ bool promote;
if (++head == num_entries)
head = 0;
@@ -1223,68 +2276,41 @@ static void process_csb(struct intel_engine_cs *engine)
* status notifier.
*/
- GEM_TRACE("%s csb[%d]: status=0x%08x:0x%08x, active=0x%x\n",
- engine->name, head,
- buf[2 * head + 0], buf[2 * head + 1],
- execlists->active);
-
- status = buf[2 * head];
- if (status & (GEN8_CTX_STATUS_IDLE_ACTIVE |
- GEN8_CTX_STATUS_PREEMPTED))
- execlists_set_active(execlists,
- EXECLISTS_ACTIVE_HWACK);
- if (status & GEN8_CTX_STATUS_ACTIVE_IDLE)
- execlists_clear_active(execlists,
- EXECLISTS_ACTIVE_HWACK);
-
- if (!(status & GEN8_CTX_STATUS_COMPLETED_MASK))
- continue;
-
- /* We should never get a COMPLETED | IDLE_ACTIVE! */
- GEM_BUG_ON(status & GEN8_CTX_STATUS_IDLE_ACTIVE);
-
- if (status & GEN8_CTX_STATUS_COMPLETE &&
- buf[2*head + 1] == execlists->preempt_complete_status) {
- GEM_TRACE("%s preempt-idle\n", engine->name);
- complete_preempt_context(execlists);
- continue;
- }
-
- if (status & GEN8_CTX_STATUS_PREEMPTED &&
- execlists_is_active(execlists,
- EXECLISTS_ACTIVE_PREEMPT))
- continue;
-
- GEM_BUG_ON(!execlists_is_active(execlists,
- EXECLISTS_ACTIVE_USER));
-
- rq = port_unpack(port, &count);
- GEM_TRACE("%s out[0]: ctx=%d.%d, fence %llx:%lld (current %d), prio=%d\n",
- engine->name,
- port->context_id, count,
- rq ? rq->fence.context : 0,
- rq ? rq->fence.seqno : 0,
- rq ? hwsp_seqno(rq) : 0,
- rq ? rq_prio(rq) : 0);
+ ENGINE_TRACE(engine, "csb[%d]: status=0x%08x:0x%08x\n",
+ head, buf[2 * head + 0], buf[2 * head + 1]);
- /* Check the context/desc id for this event matches */
- GEM_DEBUG_BUG_ON(buf[2 * head + 1] != port->context_id);
+ if (INTEL_GEN(engine->i915) >= 12)
+ promote = gen12_csb_parse(execlists, buf + 2 * head);
+ else
+ promote = gen8_csb_parse(execlists, buf + 2 * head);
+ if (promote) {
+ struct i915_request * const *old = execlists->active;
+
+ /* Point active to the new ELSP; prevent overwriting */
+ WRITE_ONCE(execlists->active, execlists->pending);
+
+ if (!inject_preempt_hang(execlists))
+ ring_set_paused(engine, 0);
+
+ /* cancel old inflight, prepare for switch */
+ trace_ports(execlists, "preempted", old);
+ while (*old)
+ execlists_schedule_out(*old++);
+
+ /* switch pending to inflight */
+ GEM_BUG_ON(!assert_pending_valid(execlists, "promote"));
+ WRITE_ONCE(execlists->active,
+ memcpy(execlists->inflight,
+ execlists->pending,
+ execlists_num_ports(execlists) *
+ sizeof(*execlists->pending)));
+
+ WRITE_ONCE(execlists->pending[0], NULL);
+ } else {
+ GEM_BUG_ON(!*execlists->active);
- GEM_BUG_ON(count == 0);
- if (--count == 0) {
- /*
- * On the final event corresponding to the
- * submission of this context, we expect either
- * an element-switch event or a completion
- * event (and on completion, the active-idle
- * marker). No more preemptions, lite-restore
- * or otherwise.
- */
- GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED);
- GEM_BUG_ON(port_isset(&port[1]) &&
- !(status & GEN8_CTX_STATUS_ELEMENT_SWITCH));
- GEM_BUG_ON(!port_isset(&port[1]) &&
- !(status & GEN8_CTX_STATUS_ACTIVE_IDLE));
+ /* port0 completed, advanced to port1 */
+ trace_ports(execlists, "completed", execlists->active);
/*
* We rely on the hardware being strongly
@@ -1292,26 +2318,17 @@ static void process_csb(struct intel_engine_cs *engine)
* coherent (visible from the CPU) before the
* user interrupt and CSB is processed.
*/
- GEM_BUG_ON(!i915_request_completed(rq));
-
- execlists_context_schedule_out(rq,
- INTEL_CONTEXT_SCHEDULE_OUT);
- i915_request_put(rq);
+ GEM_BUG_ON(!i915_request_completed(*execlists->active) &&
+ !reset_in_progress(execlists));
+ execlists_schedule_out(*execlists->active++);
- GEM_TRACE("%s completed ctx=%d\n",
- engine->name, port->context_id);
-
- port = execlists_port_complete(execlists, port);
- if (port_isset(port))
- execlists_user_begin(execlists, port);
- else
- execlists_user_end(execlists);
- } else {
- port_set(port, port_pack(rq, count));
+ GEM_BUG_ON(execlists->active - execlists->inflight >
+ execlists_num_ports(execlists));
}
} while (head != tail);
execlists->csb_head = head;
+ set_timeslice(engine);
/*
* Gen11 has proven to fail wrt global observation point between
@@ -1330,10 +2347,356 @@ static void process_csb(struct intel_engine_cs *engine)
static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
{
lockdep_assert_held(&engine->active.lock);
-
- process_csb(engine);
- if (!execlists_is_active(&engine->execlists, EXECLISTS_ACTIVE_PREEMPT))
+ if (!engine->execlists.pending[0]) {
+ rcu_read_lock(); /* protect peeking at execlists->active */
execlists_dequeue(engine);
+ rcu_read_unlock();
+ }
+}
+
+static void __execlists_hold(struct i915_request *rq)
+{
+ LIST_HEAD(list);
+
+ do {
+ struct i915_dependency *p;
+
+ if (i915_request_is_active(rq))
+ __i915_request_unsubmit(rq);
+
+ RQ_TRACE(rq, "on hold\n");
+ clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+ list_move_tail(&rq->sched.link, &rq->engine->active.hold);
+ i915_request_set_hold(rq);
+
+ list_for_each_entry(p, &rq->sched.waiters_list, wait_link) {
+ struct i915_request *w =
+ container_of(p->waiter, typeof(*w), sched);
+
+ /* Leave semaphores spinning on the other engines */
+ if (w->engine != rq->engine)
+ continue;
+
+ if (!i915_request_is_ready(w))
+ continue;
+
+ if (i915_request_completed(w))
+ continue;
+
+ if (i915_request_on_hold(rq))
+ continue;
+
+ list_move_tail(&w->sched.link, &list);
+ }
+
+ rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
+ } while (rq);
+}
+
+static bool execlists_hold(struct intel_engine_cs *engine,
+ struct i915_request *rq)
+{
+ spin_lock_irq(&engine->active.lock);
+
+ if (i915_request_completed(rq)) { /* too late! */
+ rq = NULL;
+ goto unlock;
+ }
+
+ if (rq->engine != engine) { /* preempted virtual engine */
+ struct virtual_engine *ve = to_virtual_engine(rq->engine);
+
+ /*
+ * intel_context_inflight() is only protected by virtue
+ * of process_csb() being called only by the tasklet (or
+ * directly from inside reset while the tasklet is suspended).
+ * Assert that neither of those are allowed to run while we
+ * poke at the request queues.
+ */
+ GEM_BUG_ON(!reset_in_progress(&engine->execlists));
+
+ /*
+ * An unsubmitted request along a virtual engine will
+ * remain on the active (this) engine until we are able
+ * to process the context switch away (and so mark the
+ * context as no longer in flight). That cannot have happened
+ * yet, otherwise we would not be hanging!
+ */
+ spin_lock(&ve->base.active.lock);
+ GEM_BUG_ON(intel_context_inflight(rq->context) != engine);
+ GEM_BUG_ON(ve->request != rq);
+ ve->request = NULL;
+ spin_unlock(&ve->base.active.lock);
+ i915_request_put(rq);
+
+ rq->engine = engine;
+ }
+
+ /*
+ * Transfer this request onto the hold queue to prevent it
+ * being resumbitted to HW (and potentially completed) before we have
+ * released it. Since we may have already submitted following
+ * requests, we need to remove those as well.
+ */
+ GEM_BUG_ON(i915_request_on_hold(rq));
+ GEM_BUG_ON(rq->engine != engine);
+ __execlists_hold(rq);
+
+unlock:
+ spin_unlock_irq(&engine->active.lock);
+ return rq;
+}
+
+static bool hold_request(const struct i915_request *rq)
+{
+ struct i915_dependency *p;
+
+ /*
+ * If one of our ancestors is on hold, we must also be on hold,
+ * otherwise we will bypass it and execute before it.
+ */
+ list_for_each_entry(p, &rq->sched.signalers_list, signal_link) {
+ const struct i915_request *s =
+ container_of(p->signaler, typeof(*s), sched);
+
+ if (s->engine != rq->engine)
+ continue;
+
+ if (i915_request_on_hold(s))
+ return true;
+ }
+
+ return false;
+}
+
+static void __execlists_unhold(struct i915_request *rq)
+{
+ LIST_HEAD(list);
+
+ do {
+ struct i915_dependency *p;
+
+ GEM_BUG_ON(!i915_request_on_hold(rq));
+ GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
+
+ i915_request_clear_hold(rq);
+ list_move_tail(&rq->sched.link,
+ i915_sched_lookup_priolist(rq->engine,
+ rq_prio(rq)));
+ set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+ RQ_TRACE(rq, "hold release\n");
+
+ /* Also release any children on this engine that are ready */
+ list_for_each_entry(p, &rq->sched.waiters_list, wait_link) {
+ struct i915_request *w =
+ container_of(p->waiter, typeof(*w), sched);
+
+ if (w->engine != rq->engine)
+ continue;
+
+ if (!i915_request_on_hold(rq))
+ continue;
+
+ /* Check that no other parents are also on hold */
+ if (hold_request(rq))
+ continue;
+
+ list_move_tail(&w->sched.link, &list);
+ }
+
+ rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
+ } while (rq);
+}
+
+static void execlists_unhold(struct intel_engine_cs *engine,
+ struct i915_request *rq)
+{
+ spin_lock_irq(&engine->active.lock);
+
+ /*
+ * Move this request back to the priority queue, and all of its
+ * children and grandchildren that were suspended along with it.
+ */
+ __execlists_unhold(rq);
+
+ if (rq_prio(rq) > engine->execlists.queue_priority_hint) {
+ engine->execlists.queue_priority_hint = rq_prio(rq);
+ tasklet_hi_schedule(&engine->execlists.tasklet);
+ }
+
+ spin_unlock_irq(&engine->active.lock);
+}
+
+struct execlists_capture {
+ struct work_struct work;
+ struct i915_request *rq;
+ struct i915_gpu_coredump *error;
+};
+
+static void execlists_capture_work(struct work_struct *work)
+{
+ struct execlists_capture *cap = container_of(work, typeof(*cap), work);
+ const gfp_t gfp = GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
+ struct intel_engine_cs *engine = cap->rq->engine;
+ struct intel_gt_coredump *gt = cap->error->gt;
+ struct intel_engine_capture_vma *vma;
+
+ /* Compress all the objects attached to the request, slow! */
+ vma = intel_engine_coredump_add_request(gt->engine, cap->rq, gfp);
+ if (vma) {
+ struct i915_vma_compress *compress =
+ i915_vma_capture_prepare(gt);
+
+ intel_engine_coredump_add_vma(gt->engine, vma, compress);
+ i915_vma_capture_finish(gt, compress);
+ }
+
+ gt->simulated = gt->engine->simulated;
+ cap->error->simulated = gt->simulated;
+
+ /* Publish the error state, and announce it to the world */
+ i915_error_state_store(cap->error);
+ i915_gpu_coredump_put(cap->error);
+
+ /* Return this request and all that depend upon it for signaling */
+ execlists_unhold(engine, cap->rq);
+ i915_request_put(cap->rq);
+
+ kfree(cap);
+}
+
+static struct execlists_capture *capture_regs(struct intel_engine_cs *engine)
+{
+ const gfp_t gfp = GFP_ATOMIC | __GFP_NOWARN;
+ struct execlists_capture *cap;
+
+ cap = kmalloc(sizeof(*cap), gfp);
+ if (!cap)
+ return NULL;
+
+ cap->error = i915_gpu_coredump_alloc(engine->i915, gfp);
+ if (!cap->error)
+ goto err_cap;
+
+ cap->error->gt = intel_gt_coredump_alloc(engine->gt, gfp);
+ if (!cap->error->gt)
+ goto err_gpu;
+
+ cap->error->gt->engine = intel_engine_coredump_alloc(engine, gfp);
+ if (!cap->error->gt->engine)
+ goto err_gt;
+
+ return cap;
+
+err_gt:
+ kfree(cap->error->gt);
+err_gpu:
+ kfree(cap->error);
+err_cap:
+ kfree(cap);
+ return NULL;
+}
+
+static bool execlists_capture(struct intel_engine_cs *engine)
+{
+ struct execlists_capture *cap;
+
+ if (!IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR))
+ return true;
+
+ /*
+ * We need to _quickly_ capture the engine state before we reset.
+ * We are inside an atomic section (softirq) here and we are delaying
+ * the forced preemption event.
+ */
+ cap = capture_regs(engine);
+ if (!cap)
+ return true;
+
+ cap->rq = execlists_active(&engine->execlists);
+ GEM_BUG_ON(!cap->rq);
+
+ rcu_read_lock();
+ cap->rq = active_request(cap->rq->context->timeline, cap->rq);
+ cap->rq = i915_request_get_rcu(cap->rq);
+ rcu_read_unlock();
+ if (!cap->rq)
+ goto err_free;
+
+ /*
+ * Remove the request from the execlists queue, and take ownership
+ * of the request. We pass it to our worker who will _slowly_ compress
+ * all the pages the _user_ requested for debugging their batch, after
+ * which we return it to the queue for signaling.
+ *
+ * By removing them from the execlists queue, we also remove the
+ * requests from being processed by __unwind_incomplete_requests()
+ * during the intel_engine_reset(), and so they will *not* be replayed
+ * afterwards.
+ *
+ * Note that because we have not yet reset the engine at this point,
+ * it is possible for the request that we have identified as being
+ * guilty, did in fact complete and we will then hit an arbitration
+ * point allowing the outstanding preemption to succeed. The likelihood
+ * of that is very low (as capturing of the engine registers should be
+ * fast enough to run inside an irq-off atomic section!), so we will
+ * simply hold that request accountable for being non-preemptible
+ * long enough to force the reset.
+ */
+ if (!execlists_hold(engine, cap->rq))
+ goto err_rq;
+
+ INIT_WORK(&cap->work, execlists_capture_work);
+ schedule_work(&cap->work);
+ return true;
+
+err_rq:
+ i915_request_put(cap->rq);
+err_free:
+ i915_gpu_coredump_put(cap->error);
+ kfree(cap);
+ return false;
+}
+
+static noinline void preempt_reset(struct intel_engine_cs *engine)
+{
+ const unsigned int bit = I915_RESET_ENGINE + engine->id;
+ unsigned long *lock = &engine->gt->reset.flags;
+
+ if (i915_modparams.reset < 3)
+ return;
+
+ if (test_and_set_bit(bit, lock))
+ return;
+
+ /* Mark this tasklet as disabled to avoid waiting for it to complete */
+ tasklet_disable_nosync(&engine->execlists.tasklet);
+
+ ENGINE_TRACE(engine, "preempt timeout %lu+%ums\n",
+ READ_ONCE(engine->props.preempt_timeout_ms),
+ jiffies_to_msecs(jiffies - engine->execlists.preempt.expires));
+
+ ring_set_paused(engine, 1); /* Freeze the current request in place */
+ if (execlists_capture(engine))
+ intel_engine_reset(engine, "preemption time out");
+ else
+ ring_set_paused(engine, 0);
+
+ tasklet_enable(&engine->execlists.tasklet);
+ clear_and_wake_up_bit(bit, lock);
+}
+
+static bool preempt_timeout(const struct intel_engine_cs *const engine)
+{
+ const struct timer_list *t = &engine->execlists.preempt;
+
+ if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT)
+ return false;
+
+ if (!timer_expired(t))
+ return false;
+
+ return READ_ONCE(engine->execlists.pending[0]);
}
/*
@@ -1343,24 +2706,48 @@ static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
static void execlists_submission_tasklet(unsigned long data)
{
struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
- unsigned long flags;
+ bool timeout = preempt_timeout(engine);
+
+ process_csb(engine);
+ if (!READ_ONCE(engine->execlists.pending[0]) || timeout) {
+ unsigned long flags;
- GEM_TRACE("%s awake?=%d, active=%x\n",
- engine->name,
- !!intel_wakeref_active(&engine->wakeref),
- engine->execlists.active);
+ spin_lock_irqsave(&engine->active.lock, flags);
+ __execlists_submission_tasklet(engine);
+ spin_unlock_irqrestore(&engine->active.lock, flags);
- spin_lock_irqsave(&engine->active.lock, flags);
- __execlists_submission_tasklet(engine);
- spin_unlock_irqrestore(&engine->active.lock, flags);
+ /* Recheck after serialising with direct-submission */
+ if (timeout && preempt_timeout(engine))
+ preempt_reset(engine);
+ }
+}
+
+static void __execlists_kick(struct intel_engine_execlists *execlists)
+{
+ /* Kick the tasklet for some interrupt coalescing and reset handling */
+ tasklet_hi_schedule(&execlists->tasklet);
+}
+
+#define execlists_kick(t, member) \
+ __execlists_kick(container_of(t, struct intel_engine_execlists, member))
+
+static void execlists_timeslice(struct timer_list *timer)
+{
+ execlists_kick(timer, timer);
+}
+
+static void execlists_preempt(struct timer_list *timer)
+{
+ execlists_kick(timer, preempt);
}
static void queue_request(struct intel_engine_cs *engine,
- struct i915_sched_node *node,
- int prio)
+ struct i915_request *rq)
{
- GEM_BUG_ON(!list_empty(&node->link));
- list_add_tail(&node->link, i915_sched_lookup_priolist(engine, prio));
+ GEM_BUG_ON(!list_empty(&rq->sched.link));
+ list_add_tail(&rq->sched.link,
+ i915_sched_lookup_priolist(engine, rq_prio(rq)));
+ set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
}
static void __submit_queue_imm(struct intel_engine_cs *engine)
@@ -1376,12 +2763,23 @@ static void __submit_queue_imm(struct intel_engine_cs *engine)
tasklet_hi_schedule(&execlists->tasklet);
}
-static void submit_queue(struct intel_engine_cs *engine, int prio)
+static void submit_queue(struct intel_engine_cs *engine,
+ const struct i915_request *rq)
{
- if (prio > engine->execlists.queue_priority_hint) {
- engine->execlists.queue_priority_hint = prio;
- __submit_queue_imm(engine);
- }
+ struct intel_engine_execlists *execlists = &engine->execlists;
+
+ if (rq_prio(rq) <= execlists->queue_priority_hint)
+ return;
+
+ execlists->queue_priority_hint = rq_prio(rq);
+ __submit_queue_imm(engine);
+}
+
+static bool ancestor_on_hold(const struct intel_engine_cs *engine,
+ const struct i915_request *rq)
+{
+ GEM_BUG_ON(i915_request_on_hold(rq));
+ return !list_empty(&engine->active.hold) && hold_request(rq);
}
static void execlists_submit_request(struct i915_request *request)
@@ -1392,12 +2790,17 @@ static void execlists_submit_request(struct i915_request *request)
/* Will be called from irq-context when using foreign fences. */
spin_lock_irqsave(&engine->active.lock, flags);
- queue_request(engine, &request->sched, rq_prio(request));
+ if (unlikely(ancestor_on_hold(engine, request))) {
+ list_add_tail(&request->sched.link, &engine->active.hold);
+ i915_request_set_hold(request);
+ } else {
+ queue_request(engine, request);
- GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
- GEM_BUG_ON(list_empty(&request->sched.link));
+ GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
+ GEM_BUG_ON(list_empty(&request->sched.link));
- submit_queue(engine, rq_prio(request));
+ submit_queue(engine, request);
+ }
spin_unlock_irqrestore(&engine->active.lock, flags);
}
@@ -1405,9 +2808,7 @@ static void execlists_submit_request(struct i915_request *request)
static void __execlists_context_fini(struct intel_context *ce)
{
intel_ring_put(ce->ring);
-
- GEM_BUG_ON(i915_gem_object_is_active(ce->state->obj));
- i915_gem_object_put(ce->state->obj);
+ i915_vma_put(ce->state);
}
static void execlists_context_destroy(struct kref *kref)
@@ -1420,18 +2821,46 @@ static void execlists_context_destroy(struct kref *kref)
if (ce->state)
__execlists_context_fini(ce);
+ intel_context_fini(ce);
intel_context_free(ce);
}
+static void
+set_redzone(void *vaddr, const struct intel_engine_cs *engine)
+{
+ if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ return;
+
+ vaddr += engine->context_size;
+
+ memset(vaddr, CONTEXT_REDZONE, I915_GTT_PAGE_SIZE);
+}
+
+static void
+check_redzone(const void *vaddr, const struct intel_engine_cs *engine)
+{
+ if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ return;
+
+ vaddr += engine->context_size;
+
+ if (memchr_inv(vaddr, CONTEXT_REDZONE, I915_GTT_PAGE_SIZE))
+ dev_err_once(engine->i915->drm.dev,
+ "%s context redzone overwritten!\n",
+ engine->name);
+}
+
static void execlists_context_unpin(struct intel_context *ce)
{
- i915_gem_context_unpin_hw_id(ce->gem_context);
+ check_redzone((void *)ce->lrc_reg_state - LRC_STATE_PN * PAGE_SIZE,
+ ce->engine);
+
i915_gem_object_unpin_map(ce->state->obj);
}
static void
-__execlists_update_reg_state(struct intel_context *ce,
- struct intel_engine_cs *engine)
+__execlists_update_reg_state(const struct intel_context *ce,
+ const struct intel_engine_cs *engine)
{
struct intel_ring *ring = ce->ring;
u32 *regs = ce->lrc_reg_state;
@@ -1439,14 +2868,17 @@ __execlists_update_reg_state(struct intel_context *ce,
GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
- regs[CTX_RING_BUFFER_START + 1] = i915_ggtt_offset(ring->vma);
- regs[CTX_RING_HEAD + 1] = ring->head;
- regs[CTX_RING_TAIL + 1] = ring->tail;
+ regs[CTX_RING_START] = i915_ggtt_offset(ring->vma);
+ regs[CTX_RING_HEAD] = ring->head;
+ regs[CTX_RING_TAIL] = ring->tail;
/* RPCS */
- if (engine->class == RENDER_CLASS)
- regs[CTX_R_PWR_CLK_STATE + 1] =
+ if (engine->class == RENDER_CLASS) {
+ regs[CTX_R_PWR_CLK_STATE] =
intel_sseu_make_rpcs(engine->i915, &ce->sseu);
+
+ i915_oa_init_reg_state(ce, engine);
+ }
}
static int
@@ -1454,46 +2886,21 @@ __execlists_context_pin(struct intel_context *ce,
struct intel_engine_cs *engine)
{
void *vaddr;
- int ret;
-
- GEM_BUG_ON(!ce->gem_context->vm);
- ret = execlists_context_deferred_alloc(ce, engine);
- if (ret)
- goto err;
GEM_BUG_ON(!ce->state);
-
- ret = intel_context_active_acquire(ce,
- engine->i915->ggtt.pin_bias |
- PIN_OFFSET_BIAS |
- PIN_HIGH);
- if (ret)
- goto err;
+ GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
vaddr = i915_gem_object_pin_map(ce->state->obj,
i915_coherent_map_type(engine->i915) |
I915_MAP_OVERRIDE);
- if (IS_ERR(vaddr)) {
- ret = PTR_ERR(vaddr);
- goto unpin_active;
- }
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
- ret = i915_gem_context_pin_hw_id(ce->gem_context);
- if (ret)
- goto unpin_map;
-
- ce->lrc_desc = lrc_descriptor(ce, engine);
+ ce->lrc_desc = lrc_descriptor(ce, engine) | CTX_DESC_FORCE_RESTORE;
ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
__execlists_update_reg_state(ce, engine);
return 0;
-
-unpin_map:
- i915_gem_object_unpin_map(ce->state->obj);
-unpin_active:
- intel_context_active_release(ce);
-err:
- return ret;
}
static int execlists_context_pin(struct intel_context *ce)
@@ -1501,8 +2908,16 @@ static int execlists_context_pin(struct intel_context *ce)
return __execlists_context_pin(ce, ce->engine);
}
+static int execlists_context_alloc(struct intel_context *ce)
+{
+ return __execlists_context_alloc(ce, ce->engine);
+}
+
static void execlists_context_reset(struct intel_context *ce)
{
+ CE_TRACE(ce, "reset\n");
+ GEM_BUG_ON(!intel_context_is_pinned(ce));
+
/*
* Because we emit WA_TAIL_DWORDS there may be a disparity
* between our bookkeeping in ce->ring->head and ce->ring->tail and
@@ -1519,11 +2934,19 @@ static void execlists_context_reset(struct intel_context *ce)
* So to avoid that we reset the context images upon resume. For
* simplicity, we just zero everything out.
*/
- intel_ring_reset(ce->ring, 0);
+ intel_ring_reset(ce->ring, ce->ring->emit);
+
+ /* Scrub away the garbage */
+ execlists_init_reg_state(ce->lrc_reg_state,
+ ce, ce->engine, ce->ring, true);
__execlists_update_reg_state(ce, ce->engine);
+
+ ce->lrc_desc |= CTX_DESC_FORCE_RESTORE;
}
static const struct intel_context_ops execlists_context_ops = {
+ .alloc = execlists_context_alloc,
+
.pin = execlists_context_pin,
.unpin = execlists_context_unpin,
@@ -1538,7 +2961,7 @@ static int gen8_emit_init_breadcrumb(struct i915_request *rq)
{
u32 *cs;
- GEM_BUG_ON(!rq->timeline->has_initial_breadcrumb);
+ GEM_BUG_ON(!i915_request_timeline(rq)->has_initial_breadcrumb);
cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
@@ -1554,7 +2977,7 @@ static int gen8_emit_init_breadcrumb(struct i915_request *rq)
*cs++ = MI_NOOP;
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
- *cs++ = rq->timeline->hwsp_offset;
+ *cs++ = i915_request_timeline(rq)->hwsp_offset;
*cs++ = 0;
*cs++ = rq->fence.seqno - 1;
@@ -1566,66 +2989,11 @@ static int gen8_emit_init_breadcrumb(struct i915_request *rq)
return 0;
}
-static int emit_pdps(struct i915_request *rq)
-{
- const struct intel_engine_cs * const engine = rq->engine;
- struct i915_ppgtt * const ppgtt =
- i915_vm_to_ppgtt(rq->gem_context->vm);
- int err, i;
- u32 *cs;
-
- GEM_BUG_ON(intel_vgpu_active(rq->i915));
-
- /*
- * Beware ye of the dragons, this sequence is magic!
- *
- * Small changes to this sequence can cause anything from
- * GPU hangs to forcewake errors and machine lockups!
- */
-
- /* Flush any residual operations from the context load */
- err = engine->emit_flush(rq, EMIT_FLUSH);
- if (err)
- return err;
-
- /* Magic required to prevent forcewake errors! */
- err = engine->emit_flush(rq, EMIT_INVALIDATE);
- if (err)
- return err;
-
- cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- /* Ensure the LRI have landed before we invalidate & continue */
- *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED;
- for (i = GEN8_3LVL_PDPES; i--; ) {
- const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
- u32 base = engine->mmio_base;
-
- *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i));
- *cs++ = upper_32_bits(pd_daddr);
- *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i));
- *cs++ = lower_32_bits(pd_daddr);
- }
- *cs++ = MI_NOOP;
-
- intel_ring_advance(rq, cs);
-
- /* Be doubly sure the LRI have landed before proceeding */
- err = engine->emit_flush(rq, EMIT_FLUSH);
- if (err)
- return err;
-
- /* Re-invalidate the TLB for luck */
- return engine->emit_flush(rq, EMIT_INVALIDATE);
-}
-
static int execlists_request_alloc(struct i915_request *request)
{
int ret;
- GEM_BUG_ON(!intel_context_is_pinned(request->hw_context));
+ GEM_BUG_ON(!intel_context_is_pinned(request->context));
/*
* Flush enough space to reduce the likelihood of waiting after
@@ -1643,10 +3011,7 @@ static int execlists_request_alloc(struct i915_request *request)
*/
/* Unconditionally invalidate GPU caches and TLBs. */
- if (i915_vm_is_4lvl(request->gem_context->vm))
- ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
- else
- ret = emit_pdps(request);
+ ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
if (ret)
return ret;
@@ -1676,7 +3041,8 @@ gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch)
/* NB no one else is allowed to scribble over scratch + 256! */
*batch++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
*batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
- *batch++ = i915_scratch_offset(engine->i915) + 256;
+ *batch++ = intel_gt_scratch_offset(engine->gt,
+ INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA);
*batch++ = 0;
*batch++ = MI_LOAD_REGISTER_IMM(1);
@@ -1690,7 +3056,8 @@ gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch)
*batch++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
*batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
- *batch++ = i915_scratch_offset(engine->i915) + 256;
+ *batch++ = intel_gt_scratch_offset(engine->gt,
+ INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA);
*batch++ = 0;
return batch;
@@ -1724,11 +3091,10 @@ static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
/* Actual scratch location is at 128 bytes offset */
batch = gen8_emit_pipe_control(batch,
PIPE_CONTROL_FLUSH_L3 |
- PIPE_CONTROL_GLOBAL_GTT_IVB |
+ PIPE_CONTROL_STORE_DATA_INDEX |
PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_QW_WRITE,
- i915_scratch_offset(engine->i915) +
- 2 * CACHELINE_BYTES);
+ LRC_PPHWSP_SCRATCH_ADDR);
*batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
@@ -1794,6 +3160,14 @@ static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
/* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */
batch = gen8_emit_flush_coherentl3_wa(engine, batch);
+ /* WaClearSlmSpaceAtContextSwitch:skl,bxt,kbl,glk,cfl */
+ batch = gen8_emit_pipe_control(batch,
+ PIPE_CONTROL_FLUSH_L3 |
+ PIPE_CONTROL_STORE_DATA_INDEX |
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_QW_WRITE,
+ LRC_PPHWSP_SCRATCH_ADDR);
+
batch = emit_lri(batch, lri, ARRAY_SIZE(lri));
/* WaMediaPoolStateCmdInWABB:bxt,glk */
@@ -1874,7 +3248,7 @@ static int lrc_setup_wa_ctx(struct intel_engine_cs *engine)
if (IS_ERR(obj))
return PTR_ERR(obj);
- vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
+ vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err;
@@ -1914,6 +3288,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
return 0;
switch (INTEL_GEN(engine->i915)) {
+ case 12:
case 11:
return 0;
case 10:
@@ -1970,30 +3345,33 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
static void enable_execlists(struct intel_engine_cs *engine)
{
+ u32 mode;
+
+ assert_forcewakes_active(engine->uncore, FORCEWAKE_ALL);
+
intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */
if (INTEL_GEN(engine->i915) >= 11)
- ENGINE_WRITE(engine,
- RING_MODE_GEN7,
- _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE));
+ mode = _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE);
else
- ENGINE_WRITE(engine,
- RING_MODE_GEN7,
- _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
+ mode = _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE);
+ ENGINE_WRITE_FW(engine, RING_MODE_GEN7, mode);
- ENGINE_WRITE(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
+ ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
- ENGINE_WRITE(engine,
- RING_HWS_PGA,
- i915_ggtt_offset(engine->status_page.vma));
+ ENGINE_WRITE_FW(engine,
+ RING_HWS_PGA,
+ i915_ggtt_offset(engine->status_page.vma));
ENGINE_POSTING_READ(engine, RING_HWS_PGA);
+
+ engine->context_tag = 0;
}
static bool unexpected_starting_state(struct intel_engine_cs *engine)
{
bool unexpected = false;
- if (ENGINE_READ(engine, RING_MI_MODE) & STOP_RING) {
+ if (ENGINE_READ_FW(engine, RING_MI_MODE) & STOP_RING) {
DRM_DEBUG_DRIVER("STOP_RING still set in RING_MI_MODE\n");
unexpected = true;
}
@@ -2026,8 +3404,8 @@ static void execlists_reset_prepare(struct intel_engine_cs *engine)
struct intel_engine_execlists * const execlists = &engine->execlists;
unsigned long flags;
- GEM_TRACE("%s: depth<-%d\n", engine->name,
- atomic_read(&execlists->tasklet.count));
+ ENGINE_TRACE(engine, "depth<-%d\n",
+ atomic_read(&execlists->tasklet.count));
/*
* Prevent request submission to the hardware until we have
@@ -2041,34 +3419,32 @@ static void execlists_reset_prepare(struct intel_engine_cs *engine)
__tasklet_disable_sync_once(&execlists->tasklet);
GEM_BUG_ON(!reset_in_progress(execlists));
- intel_engine_stop_cs(engine);
-
/* And flush any current direct submission. */
spin_lock_irqsave(&engine->active.lock, flags);
spin_unlock_irqrestore(&engine->active.lock, flags);
-}
-
-static bool lrc_regs_ok(const struct i915_request *rq)
-{
- const struct intel_ring *ring = rq->ring;
- const u32 *regs = rq->hw_context->lrc_reg_state;
-
- /* Quick spot check for the common signs of context corruption */
-
- if (regs[CTX_RING_BUFFER_CONTROL + 1] !=
- (RING_CTL_SIZE(ring->size) | RING_VALID))
- return false;
-
- if (regs[CTX_RING_BUFFER_START + 1] != i915_ggtt_offset(ring->vma))
- return false;
- return true;
+ /*
+ * We stop engines, otherwise we might get failed reset and a
+ * dead gpu (on elk). Also as modern gpu as kbl can suffer
+ * from system hang if batchbuffer is progressing when
+ * the reset is issued, regardless of READY_TO_RESET ack.
+ * Thus assume it is best to stop engines on all gens
+ * where we have a gpu reset.
+ *
+ * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES)
+ *
+ * FIXME: Wa for more modern gens needs to be validated
+ */
+ intel_engine_stop_cs(engine);
}
-static void reset_csb_pointers(struct intel_engine_execlists *execlists)
+static void reset_csb_pointers(struct intel_engine_cs *engine)
{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
const unsigned int reset_value = execlists->csb_size - 1;
+ ring_set_paused(engine, 0);
+
/*
* After a reset, the HW starts writing into CSB entry [0]. We
* therefore have to set our HEAD pointer back one entry so that
@@ -2082,27 +3458,35 @@ static void reset_csb_pointers(struct intel_engine_execlists *execlists)
WRITE_ONCE(*execlists->csb_write, reset_value);
wmb(); /* Make sure this is visible to HW (paranoia?) */
+ /*
+ * Sometimes Icelake forgets to reset its pointers on a GPU reset.
+ * Bludgeon them with a mmio update to be sure.
+ */
+ ENGINE_WRITE(engine, RING_CONTEXT_STATUS_PTR,
+ reset_value << 8 | reset_value);
+ ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR);
+
invalidate_csb_entries(&execlists->csb_status[0],
&execlists->csb_status[reset_value]);
}
-static struct i915_request *active_request(struct i915_request *rq)
+static void __reset_stop_ring(u32 *regs, const struct intel_engine_cs *engine)
{
- const struct list_head * const list = &rq->engine->active.requests;
- const struct intel_context * const context = rq->hw_context;
- struct i915_request *active = NULL;
-
- list_for_each_entry_from_reverse(rq, list, sched.link) {
- if (i915_request_completed(rq))
- break;
+ int x;
- if (rq->hw_context != context)
- break;
-
- active = rq;
+ x = lrc_ring_mi_mode(engine);
+ if (x != -1) {
+ regs[x + 1] &= ~STOP_RING;
+ regs[x + 1] |= STOP_RING << 16;
}
+}
- return active;
+static void __execlists_reset_reg_state(const struct intel_context *ce,
+ const struct intel_engine_cs *engine)
+{
+ u32 *regs = ce->lrc_reg_state;
+
+ __reset_stop_ring(regs, engine);
}
static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
@@ -2110,38 +3494,42 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
struct intel_engine_execlists * const execlists = &engine->execlists;
struct intel_context *ce;
struct i915_request *rq;
- u32 *regs;
+
+ mb(); /* paranoia: read the CSB pointers from after the reset */
+ clflush(execlists->csb_write);
+ mb();
process_csb(engine); /* drain preemption events */
/* Following the reset, we need to reload the CSB read/write pointers */
- reset_csb_pointers(&engine->execlists);
+ reset_csb_pointers(engine);
/*
* Save the currently executing context, even if we completed
* its request, it was still running at the time of the
* reset and will have been clobbered.
*/
- if (!port_isset(execlists->port))
- goto out_clear;
+ rq = execlists_active(execlists);
+ if (!rq)
+ goto unwind;
- rq = port_request(execlists->port);
- ce = rq->hw_context;
+ /* We still have requests in-flight; the engine should be active */
+ GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
- /*
- * Catch up with any missed context-switch interrupts.
- *
- * Ideally we would just read the remaining CSB entries now that we
- * know the gpu is idle. However, the CSB registers are sometimes^W
- * often trashed across a GPU reset! Instead we have to rely on
- * guessing the missed context-switch events by looking at what
- * requests were completed.
- */
- execlists_cancel_port_requests(execlists);
+ ce = rq->context;
+ GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
- rq = active_request(rq);
- if (!rq)
+ if (i915_request_completed(rq)) {
+ /* Idle context; tidy up the ring so we can restart afresh */
+ ce->ring->head = intel_ring_wrap(ce->ring, rq->tail);
goto out_replay;
+ }
+
+ /* Context has requests still in-flight; it should not be idle! */
+ GEM_BUG_ON(i915_active_is_idle(&ce->active));
+ rq = active_request(ce->timeline, rq);
+ ce->ring->head = intel_ring_wrap(ce->ring, rq->head);
+ GEM_BUG_ON(ce->ring->head == ce->ring->tail);
/*
* If this request hasn't started yet, e.g. it is waiting on a
@@ -2155,7 +3543,7 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
* Otherwise, if we have not started yet, the request should replay
* perfectly and we do not need to flag the result as being erroneous.
*/
- if (!i915_request_started(rq) && lrc_regs_ok(rq))
+ if (!i915_request_started(rq))
goto out_replay;
/*
@@ -2169,8 +3557,8 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
* and have to at least restore the RING register in the context
* image back to the expected values to skip over the guilty request.
*/
- i915_reset_request(rq, stalled);
- if (!stalled && lrc_regs_ok(rq))
+ __i915_request_reset(rq, stalled);
+ if (!stalled)
goto out_replay;
/*
@@ -2181,33 +3569,28 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
* future request will be after userspace has had the opportunity
* to recreate its own state.
*/
- regs = ce->lrc_reg_state;
- if (engine->pinned_default_state) {
- memcpy(regs, /* skip restoring the vanilla PPHWSP */
- engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE,
- engine->context_size - PAGE_SIZE);
- }
- execlists_init_reg_state(regs, ce, engine, ce->ring);
+ GEM_BUG_ON(!intel_context_is_pinned(ce));
+ restore_default_state(ce, engine);
out_replay:
- /* Rerun the request; its payload has been neutered (if guilty). */
- ce->ring->head =
- rq ? intel_ring_wrap(ce->ring, rq->head) : ce->ring->tail;
+ ENGINE_TRACE(engine, "replay {head:%04x, tail:%04x}\n",
+ ce->ring->head, ce->ring->tail);
intel_ring_update_space(ce->ring);
+ __execlists_reset_reg_state(ce, engine);
__execlists_update_reg_state(ce, engine);
+ ce->lrc_desc |= CTX_DESC_FORCE_RESTORE; /* paranoid: GPU was reset! */
+unwind:
/* Push back any incomplete requests for replay after the reset. */
+ cancel_port_requests(execlists);
__unwind_incomplete_requests(engine);
-
-out_clear:
- execlists_clear_all_active(execlists);
}
-static void execlists_reset(struct intel_engine_cs *engine, bool stalled)
+static void execlists_reset_rewind(struct intel_engine_cs *engine, bool stalled)
{
unsigned long flags;
- GEM_TRACE("%s\n", engine->name);
+ ENGINE_TRACE(engine, "\n");
spin_lock_irqsave(&engine->active.lock, flags);
@@ -2221,14 +3604,14 @@ static void nop_submission_tasklet(unsigned long data)
/* The driver is wedged; don't process any more events. */
}
-static void execlists_cancel_requests(struct intel_engine_cs *engine)
+static void execlists_reset_cancel(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
struct i915_request *rq, *rn;
struct rb_node *rb;
unsigned long flags;
- GEM_TRACE("%s\n", engine->name);
+ ENGINE_TRACE(engine, "\n");
/*
* Before we call engine->cancel_requests(), we should have exclusive
@@ -2249,12 +3632,8 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
__execlists_reset(engine, true);
/* Mark all executing requests as skipped. */
- list_for_each_entry(rq, &engine->active.requests, sched.link) {
- if (!i915_request_signaled(rq))
- dma_fence_set_error(&rq->fence, -EIO);
-
- i915_request_mark_complete(rq);
- }
+ list_for_each_entry(rq, &engine->active.requests, sched.link)
+ mark_eio(rq);
/* Flush the queued requests to the timeline list (for retiring). */
while ((rb = rb_first_cached(&execlists->queue))) {
@@ -2262,16 +3641,18 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
int i;
priolist_for_each_request_consume(rq, rn, p, i) {
- list_del_init(&rq->sched.link);
+ mark_eio(rq);
__i915_request_submit(rq);
- dma_fence_set_error(&rq->fence, -EIO);
- i915_request_mark_complete(rq);
}
rb_erase_cached(&p->node, &execlists->queue);
i915_priolist_free(p);
}
+ /* On-hold requests will be flushed to timeline upon their release */
+ list_for_each_entry(rq, &engine->active.hold, sched.link)
+ mark_eio(rq);
+
/* Cancel all attached virtual engines */
while ((rb = rb_first_cached(&execlists->virtual))) {
struct virtual_engine *ve =
@@ -2281,13 +3662,15 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
RB_CLEAR_NODE(rb);
spin_lock(&ve->base.active.lock);
- if (ve->request) {
- ve->request->engine = engine;
- __i915_request_submit(ve->request);
- dma_fence_set_error(&ve->request->fence, -EIO);
- i915_request_mark_complete(ve->request);
+ rq = fetch_and_zero(&ve->request);
+ if (rq) {
+ mark_eio(rq);
+
+ rq->engine = engine;
+ __i915_request_submit(rq);
+ i915_request_put(rq);
+
ve->base.execlists.queue_priority_hint = INT_MIN;
- ve->request = NULL;
}
spin_unlock(&ve->base.active.lock);
}
@@ -2296,7 +3679,6 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
execlists->queue_priority_hint = INT_MIN;
execlists->queue = RB_ROOT_CACHED;
- GEM_BUG_ON(port_isset(execlists->port));
GEM_BUG_ON(__tasklet_is_enabled(&execlists->tasklet));
execlists->tasklet.func = nop_submission_tasklet;
@@ -2320,13 +3702,13 @@ static void execlists_reset_finish(struct intel_engine_cs *engine)
if (__tasklet_enable(&execlists->tasklet))
/* And kick in case we missed a new request submission. */
tasklet_hi_schedule(&execlists->tasklet);
- GEM_TRACE("%s: depth->%d\n", engine->name,
- atomic_read(&execlists->tasklet.count));
+ ENGINE_TRACE(engine, "depth->%d\n",
+ atomic_read(&execlists->tasklet.count));
}
-static int gen8_emit_bb_start(struct i915_request *rq,
- u64 offset, u32 len,
- const unsigned int flags)
+static int gen8_emit_bb_start_noarb(struct i915_request *rq,
+ u64 offset, u32 len,
+ const unsigned int flags)
{
u32 *cs;
@@ -2360,7 +3742,7 @@ static int gen8_emit_bb_start(struct i915_request *rq,
return 0;
}
-static int gen9_emit_bb_start(struct i915_request *rq,
+static int gen8_emit_bb_start(struct i915_request *rq,
u64 offset, u32 len,
const unsigned int flags)
{
@@ -2421,7 +3803,7 @@ static int gen8_emit_flush(struct i915_request *request, u32 mode)
}
*cs++ = cmd;
- *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
+ *cs++ = LRC_PPHWSP_SCRATCH_ADDR;
*cs++ = 0; /* upper addr */
*cs++ = 0; /* value */
intel_ring_advance(request, cs);
@@ -2432,9 +3814,6 @@ static int gen8_emit_flush(struct i915_request *request, u32 mode)
static int gen8_emit_flush_render(struct i915_request *request,
u32 mode)
{
- struct intel_engine_cs *engine = request->engine;
- u32 scratch_addr =
- i915_scratch_offset(engine->i915) + 2 * CACHELINE_BYTES;
bool vf_flush_wa = false, dc_flush_wa = false;
u32 *cs, flags = 0;
int len;
@@ -2456,7 +3835,7 @@ static int gen8_emit_flush_render(struct i915_request *request,
flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_QW_WRITE;
- flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
+ flags |= PIPE_CONTROL_STORE_DATA_INDEX;
/*
* On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
@@ -2489,7 +3868,7 @@ static int gen8_emit_flush_render(struct i915_request *request,
cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_DC_FLUSH_ENABLE,
0);
- cs = gen8_emit_pipe_control(cs, flags, scratch_addr);
+ cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
if (dc_flush_wa)
cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_CS_STALL, 0);
@@ -2499,6 +3878,150 @@ static int gen8_emit_flush_render(struct i915_request *request,
return 0;
}
+static int gen11_emit_flush_render(struct i915_request *request,
+ u32 mode)
+{
+ if (mode & EMIT_FLUSH) {
+ u32 *cs;
+ u32 flags = 0;
+
+ flags |= PIPE_CONTROL_CS_STALL;
+
+ flags |= PIPE_CONTROL_TILE_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
+ flags |= PIPE_CONTROL_FLUSH_ENABLE;
+ flags |= PIPE_CONTROL_QW_WRITE;
+ flags |= PIPE_CONTROL_STORE_DATA_INDEX;
+
+ cs = intel_ring_begin(request, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
+ intel_ring_advance(request, cs);
+ }
+
+ if (mode & EMIT_INVALIDATE) {
+ u32 *cs;
+ u32 flags = 0;
+
+ flags |= PIPE_CONTROL_CS_STALL;
+
+ flags |= PIPE_CONTROL_COMMAND_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TLB_INVALIDATE;
+ flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_QW_WRITE;
+ flags |= PIPE_CONTROL_STORE_DATA_INDEX;
+
+ cs = intel_ring_begin(request, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
+ intel_ring_advance(request, cs);
+ }
+
+ return 0;
+}
+
+static u32 preparser_disable(bool state)
+{
+ return MI_ARB_CHECK | 1 << 8 | state;
+}
+
+static int gen12_emit_flush_render(struct i915_request *request,
+ u32 mode)
+{
+ if (mode & EMIT_FLUSH) {
+ u32 flags = 0;
+ u32 *cs;
+
+ flags |= PIPE_CONTROL_TILE_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+ /* Wa_1409600907:tgl */
+ flags |= PIPE_CONTROL_DEPTH_STALL;
+ flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
+ flags |= PIPE_CONTROL_FLUSH_ENABLE;
+ flags |= PIPE_CONTROL_HDC_PIPELINE_FLUSH;
+
+ flags |= PIPE_CONTROL_STORE_DATA_INDEX;
+ flags |= PIPE_CONTROL_QW_WRITE;
+
+ flags |= PIPE_CONTROL_CS_STALL;
+
+ cs = intel_ring_begin(request, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
+ intel_ring_advance(request, cs);
+ }
+
+ if (mode & EMIT_INVALIDATE) {
+ u32 flags = 0;
+ u32 *cs;
+
+ flags |= PIPE_CONTROL_COMMAND_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TLB_INVALIDATE;
+ flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_L3_RO_CACHE_INVALIDATE;
+
+ flags |= PIPE_CONTROL_STORE_DATA_INDEX;
+ flags |= PIPE_CONTROL_QW_WRITE;
+
+ flags |= PIPE_CONTROL_CS_STALL;
+
+ cs = intel_ring_begin(request, 8);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ /*
+ * Prevent the pre-parser from skipping past the TLB
+ * invalidate and loading a stale page for the batch
+ * buffer / request payload.
+ */
+ *cs++ = preparser_disable(true);
+
+ cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
+
+ *cs++ = preparser_disable(false);
+ intel_ring_advance(request, cs);
+
+ /*
+ * Wa_1604544889:tgl
+ */
+ if (IS_TGL_REVID(request->i915, TGL_REVID_A0, TGL_REVID_A0)) {
+ flags = 0;
+ flags |= PIPE_CONTROL_CS_STALL;
+ flags |= PIPE_CONTROL_HDC_PIPELINE_FLUSH;
+
+ flags |= PIPE_CONTROL_STORE_DATA_INDEX;
+ flags |= PIPE_CONTROL_QW_WRITE;
+
+ cs = intel_ring_begin(request, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ cs = gen8_emit_pipe_control(cs, flags,
+ LRC_PPHWSP_SCRATCH_ADDR);
+ intel_ring_advance(request, cs);
+ }
+ }
+
+ return 0;
+}
+
/*
* Reserve space for 2 NOOPs at the end of each request to be
* used as a workaround for not being allowed to do lite
@@ -2514,15 +4037,28 @@ static u32 *gen8_emit_wa_tail(struct i915_request *request, u32 *cs)
return cs;
}
-static u32 *gen8_emit_fini_breadcrumb(struct i915_request *request, u32 *cs)
+static u32 *emit_preempt_busywait(struct i915_request *request, u32 *cs)
{
- cs = gen8_emit_ggtt_write(cs,
- request->fence.seqno,
- request->timeline->hwsp_offset,
- 0);
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_EQ_SDD;
+ *cs++ = 0;
+ *cs++ = intel_hws_preempt_address(request->engine);
+ *cs++ = 0;
+ return cs;
+}
+
+static __always_inline u32*
+gen8_emit_fini_breadcrumb_footer(struct i915_request *request,
+ u32 *cs)
+{
*cs++ = MI_USER_INTERRUPT;
+
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ if (intel_engine_has_semaphores(request->engine))
+ cs = emit_preempt_busywait(request, cs);
request->tail = intel_ring_offset(request, cs);
assert_ring_tail_valid(request->ring, request->tail);
@@ -2530,22 +4066,92 @@ static u32 *gen8_emit_fini_breadcrumb(struct i915_request *request, u32 *cs)
return gen8_emit_wa_tail(request, cs);
}
+static u32 *gen8_emit_fini_breadcrumb(struct i915_request *request, u32 *cs)
+{
+ cs = gen8_emit_ggtt_write(cs,
+ request->fence.seqno,
+ i915_request_active_timeline(request)->hwsp_offset,
+ 0);
+
+ return gen8_emit_fini_breadcrumb_footer(request, cs);
+}
+
static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
{
+ cs = gen8_emit_pipe_control(cs,
+ PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+ PIPE_CONTROL_DC_FLUSH_ENABLE,
+ 0);
+
/* XXX flush+write+CS_STALL all in one upsets gem_concurrent_blt:kbl */
cs = gen8_emit_ggtt_write_rcs(cs,
request->fence.seqno,
- request->timeline->hwsp_offset,
+ i915_request_active_timeline(request)->hwsp_offset,
+ PIPE_CONTROL_FLUSH_ENABLE |
+ PIPE_CONTROL_CS_STALL);
+
+ return gen8_emit_fini_breadcrumb_footer(request, cs);
+}
+
+static u32 *
+gen11_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
+{
+ cs = gen8_emit_ggtt_write_rcs(cs,
+ request->fence.seqno,
+ i915_request_active_timeline(request)->hwsp_offset,
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_TILE_CACHE_FLUSH |
PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
PIPE_CONTROL_DEPTH_CACHE_FLUSH |
- PIPE_CONTROL_DC_FLUSH_ENABLE);
- cs = gen8_emit_pipe_control(cs,
- PIPE_CONTROL_FLUSH_ENABLE |
- PIPE_CONTROL_CS_STALL,
- 0);
+ PIPE_CONTROL_DC_FLUSH_ENABLE |
+ PIPE_CONTROL_FLUSH_ENABLE);
+
+ return gen8_emit_fini_breadcrumb_footer(request, cs);
+}
+
+/*
+ * Note that the CS instruction pre-parser will not stall on the breadcrumb
+ * flush and will continue pre-fetching the instructions after it before the
+ * memory sync is completed. On pre-gen12 HW, the pre-parser will stop at
+ * BB_START/END instructions, so, even though we might pre-fetch the pre-amble
+ * of the next request before the memory has been flushed, we're guaranteed that
+ * we won't access the batch itself too early.
+ * However, on gen12+ the parser can pre-fetch across the BB_START/END commands,
+ * so, if the current request is modifying an instruction in the next request on
+ * the same intel_context, we might pre-fetch and then execute the pre-update
+ * instruction. To avoid this, the users of self-modifying code should either
+ * disable the parser around the code emitting the memory writes, via a new flag
+ * added to MI_ARB_CHECK, or emit the writes from a different intel_context. For
+ * the in-kernel use-cases we've opted to use a separate context, see
+ * reloc_gpu() as an example.
+ * All the above applies only to the instructions themselves. Non-inline data
+ * used by the instructions is not pre-fetched.
+ */
+static u32 *gen12_emit_preempt_busywait(struct i915_request *request, u32 *cs)
+{
+ *cs++ = MI_SEMAPHORE_WAIT_TOKEN |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_EQ_SDD;
+ *cs++ = 0;
+ *cs++ = intel_hws_preempt_address(request->engine);
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = MI_NOOP;
+
+ return cs;
+}
+
+static __always_inline u32*
+gen12_emit_fini_breadcrumb_footer(struct i915_request *request, u32 *cs)
+{
*cs++ = MI_USER_INTERRUPT;
+
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ if (intel_engine_has_semaphores(request->engine))
+ cs = gen12_emit_preempt_busywait(request, cs);
request->tail = intel_ring_offset(request, cs);
assert_ring_tail_valid(request->ring, request->tail);
@@ -2553,57 +4159,85 @@ static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
return gen8_emit_wa_tail(request, cs);
}
-static int gen8_init_rcs_context(struct i915_request *rq)
+static u32 *gen12_emit_fini_breadcrumb(struct i915_request *request, u32 *cs)
{
- int ret;
+ cs = gen8_emit_ggtt_write(cs,
+ request->fence.seqno,
+ i915_request_active_timeline(request)->hwsp_offset,
+ 0);
- ret = intel_engine_emit_ctx_wa(rq);
- if (ret)
- return ret;
+ return gen12_emit_fini_breadcrumb_footer(request, cs);
+}
- ret = intel_rcs_context_init_mocs(rq);
- /*
- * Failing to program the MOCS is non-fatal.The system will not
- * run at peak performance. So generate an error and carry on.
- */
- if (ret)
- DRM_ERROR("MOCS failed to program: expect performance issues.\n");
+static u32 *
+gen12_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
+{
+ cs = gen8_emit_ggtt_write_rcs(cs,
+ request->fence.seqno,
+ i915_request_active_timeline(request)->hwsp_offset,
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_TILE_CACHE_FLUSH |
+ PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+ /* Wa_1409600907:tgl */
+ PIPE_CONTROL_DEPTH_STALL |
+ PIPE_CONTROL_DC_FLUSH_ENABLE |
+ PIPE_CONTROL_FLUSH_ENABLE |
+ PIPE_CONTROL_HDC_PIPELINE_FLUSH);
- return i915_gem_render_state_emit(rq);
+ return gen12_emit_fini_breadcrumb_footer(request, cs);
}
static void execlists_park(struct intel_engine_cs *engine)
{
- intel_engine_park(engine);
+ cancel_timer(&engine->execlists.timer);
+ cancel_timer(&engine->execlists.preempt);
}
void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
{
engine->submit_request = execlists_submit_request;
- engine->cancel_requests = execlists_cancel_requests;
engine->schedule = i915_schedule;
engine->execlists.tasklet.func = execlists_submission_tasklet;
engine->reset.prepare = execlists_reset_prepare;
- engine->reset.reset = execlists_reset;
+ engine->reset.rewind = execlists_reset_rewind;
+ engine->reset.cancel = execlists_reset_cancel;
engine->reset.finish = execlists_reset_finish;
engine->park = execlists_park;
engine->unpark = NULL;
engine->flags |= I915_ENGINE_SUPPORTS_STATS;
- if (!intel_vgpu_active(engine->i915))
+ if (!intel_vgpu_active(engine->i915)) {
engine->flags |= I915_ENGINE_HAS_SEMAPHORES;
- if (engine->preempt_context &&
- HAS_LOGICAL_RING_PREEMPTION(engine->i915))
- engine->flags |= I915_ENGINE_HAS_PREEMPTION;
+ if (HAS_LOGICAL_RING_PREEMPTION(engine->i915))
+ engine->flags |= I915_ENGINE_HAS_PREEMPTION;
+ }
+
+ if (INTEL_GEN(engine->i915) >= 12)
+ engine->flags |= I915_ENGINE_HAS_RELATIVE_MMIO;
+
+ if (intel_engine_has_preemption(engine))
+ engine->emit_bb_start = gen8_emit_bb_start;
+ else
+ engine->emit_bb_start = gen8_emit_bb_start_noarb;
+}
+
+static void execlists_shutdown(struct intel_engine_cs *engine)
+{
+ /* Synchronise with residual timers and any softirq they raise */
+ del_timer_sync(&engine->execlists.timer);
+ del_timer_sync(&engine->execlists.preempt);
+ tasklet_kill(&engine->execlists.tasklet);
}
-static void execlists_destroy(struct intel_engine_cs *engine)
+static void execlists_release(struct intel_engine_cs *engine)
{
+ execlists_shutdown(engine);
+
intel_engine_cleanup_common(engine);
lrc_destroy_wa_ctx(engine);
- kfree(engine);
}
static void
@@ -2611,19 +4245,16 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
{
/* Default vfuncs which can be overriden by each engine. */
- engine->destroy = execlists_destroy;
engine->resume = execlists_resume;
- engine->reset.prepare = execlists_reset_prepare;
- engine->reset.reset = execlists_reset;
- engine->reset.finish = execlists_reset_finish;
-
engine->cops = &execlists_context_ops;
engine->request_alloc = execlists_request_alloc;
engine->emit_flush = gen8_emit_flush;
engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb;
engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb;
+ if (INTEL_GEN(engine->i915) >= 12)
+ engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb;
engine->set_default_submission = intel_execlists_set_default_submission;
@@ -2638,10 +4269,6 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
* until a more refined solution exists.
*/
}
- if (IS_GEN(engine->i915, 8))
- engine->emit_bb_start = gen8_emit_bb_start;
- else
- engine->emit_bb_start = gen9_emit_bb_start;
}
static inline void
@@ -2665,40 +4292,41 @@ logical_ring_default_irqs(struct intel_engine_cs *engine)
engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
}
-int intel_execlists_submission_setup(struct intel_engine_cs *engine)
+static void rcs_submission_override(struct intel_engine_cs *engine)
{
- /* Intentionally left blank. */
- engine->buffer = NULL;
-
- tasklet_init(&engine->execlists.tasklet,
- execlists_submission_tasklet, (unsigned long)engine);
-
- logical_ring_default_vfuncs(engine);
- logical_ring_default_irqs(engine);
-
- if (engine->class == RENDER_CLASS) {
- engine->init_context = gen8_init_rcs_context;
+ switch (INTEL_GEN(engine->i915)) {
+ case 12:
+ engine->emit_flush = gen12_emit_flush_render;
+ engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_rcs;
+ break;
+ case 11:
+ engine->emit_flush = gen11_emit_flush_render;
+ engine->emit_fini_breadcrumb = gen11_emit_fini_breadcrumb_rcs;
+ break;
+ default:
engine->emit_flush = gen8_emit_flush_render;
engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_rcs;
+ break;
}
-
- return 0;
}
-int intel_execlists_submission_init(struct intel_engine_cs *engine)
+int intel_execlists_submission_setup(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
struct drm_i915_private *i915 = engine->i915;
struct intel_uncore *uncore = engine->uncore;
u32 base = engine->mmio_base;
- int ret;
- ret = intel_engine_init_common(engine);
- if (ret)
- return ret;
+ tasklet_init(&engine->execlists.tasklet,
+ execlists_submission_tasklet, (unsigned long)engine);
+ timer_setup(&engine->execlists.timer, execlists_timeslice, 0);
+ timer_setup(&engine->execlists.preempt, execlists_preempt, 0);
- intel_engine_init_workarounds(engine);
- intel_engine_init_whitelist(engine);
+ logical_ring_default_vfuncs(engine);
+ logical_ring_default_irqs(engine);
+
+ if (engine->class == RENDER_CLASS)
+ rcs_submission_override(engine);
if (intel_init_workaround_bb(engine))
/*
@@ -2718,11 +4346,6 @@ int intel_execlists_submission_init(struct intel_engine_cs *engine)
i915_mmio_reg_offset(RING_ELSP(base));
}
- execlists->preempt_complete_status = ~0u;
- if (engine->preempt_context)
- execlists->preempt_complete_status =
- upper_32_bits(engine->preempt_context->lrc_desc);
-
execlists->csb_status =
&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
@@ -2734,12 +4357,15 @@ int intel_execlists_submission_init(struct intel_engine_cs *engine)
else
execlists->csb_size = GEN11_CSB_ENTRIES;
- reset_csb_pointers(execlists);
+ reset_csb_pointers(engine);
+
+ /* Finally, take ownership and responsibility for cleanup! */
+ engine->release = execlists_release;
return 0;
}
-static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
+static u32 intel_lr_indirect_ctx_offset(const struct intel_engine_cs *engine)
{
u32 indirect_ctx_offset;
@@ -2747,6 +4373,10 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
default:
MISSING_CASE(INTEL_GEN(engine->i915));
/* fall through */
+ case 12:
+ indirect_ctx_offset =
+ GEN12_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
+ break;
case 11:
indirect_ctx_offset =
GEN11_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
@@ -2768,86 +4398,53 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
return indirect_ctx_offset;
}
-static void execlists_init_reg_state(u32 *regs,
- struct intel_context *ce,
- struct intel_engine_cs *engine,
- struct intel_ring *ring)
-{
- struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ce->gem_context->vm);
- bool rcs = engine->class == RENDER_CLASS;
- u32 base = engine->mmio_base;
- /*
- * A context is actually a big batch buffer with several
- * MI_LOAD_REGISTER_IMM commands followed by (reg, value) pairs. The
- * values we are setting here are only for the first context restore:
- * on a subsequent save, the GPU will recreate this batchbuffer with new
- * values (including all the missing MI_LOAD_REGISTER_IMM commands that
- * we are not initializing here).
- *
- * Must keep consistent with virtual_update_register_offsets().
- */
- regs[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(rcs ? 14 : 11) |
- MI_LRI_FORCE_POSTED;
+static void init_common_reg_state(u32 * const regs,
+ const struct intel_engine_cs *engine,
+ const struct intel_ring *ring,
+ bool inhibit)
+{
+ u32 ctl;
+
+ ctl = _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH);
+ ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
+ if (inhibit)
+ ctl |= CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT;
+ if (INTEL_GEN(engine->i915) < 11)
+ ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT |
+ CTX_CTRL_RS_CTX_ENABLE);
+ regs[CTX_CONTEXT_CONTROL] = ctl;
+
+ regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID;
+}
- CTX_REG(regs, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(base),
- _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) |
- _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH));
- if (INTEL_GEN(engine->i915) < 11) {
- regs[CTX_CONTEXT_CONTROL + 1] |=
- _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT |
- CTX_CTRL_RS_CTX_ENABLE);
- }
- CTX_REG(regs, CTX_RING_HEAD, RING_HEAD(base), 0);
- CTX_REG(regs, CTX_RING_TAIL, RING_TAIL(base), 0);
- CTX_REG(regs, CTX_RING_BUFFER_START, RING_START(base), 0);
- CTX_REG(regs, CTX_RING_BUFFER_CONTROL, RING_CTL(base),
- RING_CTL_SIZE(ring->size) | RING_VALID);
- CTX_REG(regs, CTX_BB_HEAD_U, RING_BBADDR_UDW(base), 0);
- CTX_REG(regs, CTX_BB_HEAD_L, RING_BBADDR(base), 0);
- CTX_REG(regs, CTX_BB_STATE, RING_BBSTATE(base), RING_BB_PPGTT);
- CTX_REG(regs, CTX_SECOND_BB_HEAD_U, RING_SBBADDR_UDW(base), 0);
- CTX_REG(regs, CTX_SECOND_BB_HEAD_L, RING_SBBADDR(base), 0);
- CTX_REG(regs, CTX_SECOND_BB_STATE, RING_SBBSTATE(base), 0);
- if (rcs) {
- struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
-
- CTX_REG(regs, CTX_RCS_INDIRECT_CTX, RING_INDIRECT_CTX(base), 0);
- CTX_REG(regs, CTX_RCS_INDIRECT_CTX_OFFSET,
- RING_INDIRECT_CTX_OFFSET(base), 0);
- if (wa_ctx->indirect_ctx.size) {
- u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
-
- regs[CTX_RCS_INDIRECT_CTX + 1] =
- (ggtt_offset + wa_ctx->indirect_ctx.offset) |
- (wa_ctx->indirect_ctx.size / CACHELINE_BYTES);
-
- regs[CTX_RCS_INDIRECT_CTX_OFFSET + 1] =
- intel_lr_indirect_ctx_offset(engine) << 6;
- }
+static void init_wa_bb_reg_state(u32 * const regs,
+ const struct intel_engine_cs *engine,
+ u32 pos_bb_per_ctx)
+{
+ const struct i915_ctx_workarounds * const wa_ctx = &engine->wa_ctx;
- CTX_REG(regs, CTX_BB_PER_CTX_PTR, RING_BB_PER_CTX_PTR(base), 0);
- if (wa_ctx->per_ctx.size) {
- u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
+ if (wa_ctx->per_ctx.size) {
+ const u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
- regs[CTX_BB_PER_CTX_PTR + 1] =
- (ggtt_offset + wa_ctx->per_ctx.offset) | 0x01;
- }
+ regs[pos_bb_per_ctx] =
+ (ggtt_offset + wa_ctx->per_ctx.offset) | 0x01;
}
- regs[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9) | MI_LRI_FORCE_POSTED;
+ if (wa_ctx->indirect_ctx.size) {
+ const u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
- CTX_REG(regs, CTX_CTX_TIMESTAMP, RING_CTX_TIMESTAMP(base), 0);
- /* PDP values well be assigned later if needed */
- CTX_REG(regs, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(base, 3), 0);
- CTX_REG(regs, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(base, 3), 0);
- CTX_REG(regs, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(base, 2), 0);
- CTX_REG(regs, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(base, 2), 0);
- CTX_REG(regs, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(base, 1), 0);
- CTX_REG(regs, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(base, 1), 0);
- CTX_REG(regs, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(base, 0), 0);
- CTX_REG(regs, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(base, 0), 0);
+ regs[pos_bb_per_ctx + 2] =
+ (ggtt_offset + wa_ctx->indirect_ctx.offset) |
+ (wa_ctx->indirect_ctx.size / CACHELINE_BYTES);
+ regs[pos_bb_per_ctx + 4] =
+ intel_lr_indirect_ctx_offset(engine) << 6;
+ }
+}
+
+static void init_ppgtt_reg_state(u32 *regs, const struct i915_ppgtt *ppgtt)
+{
if (i915_vm_is_4lvl(&ppgtt->vm)) {
/* 64b PPGTT (48bit canonical)
* PDP0_DESCRIPTOR contains the base address to PML4 and
@@ -2860,17 +4457,43 @@ static void execlists_init_reg_state(u32 *regs,
ASSIGN_CTX_PDP(ppgtt, regs, 1);
ASSIGN_CTX_PDP(ppgtt, regs, 0);
}
+}
- if (rcs) {
- regs[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
- CTX_REG(regs, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE, 0);
+static struct i915_ppgtt *vm_alias(struct i915_address_space *vm)
+{
+ if (i915_is_ggtt(vm))
+ return i915_vm_to_ggtt(vm)->alias;
+ else
+ return i915_vm_to_ppgtt(vm);
+}
- i915_oa_init_reg_state(engine, ce, regs);
- }
+static void execlists_init_reg_state(u32 *regs,
+ const struct intel_context *ce,
+ const struct intel_engine_cs *engine,
+ const struct intel_ring *ring,
+ bool inhibit)
+{
+ /*
+ * A context is actually a big batch buffer with several
+ * MI_LOAD_REGISTER_IMM commands followed by (reg, value) pairs. The
+ * values we are setting here are only for the first context restore:
+ * on a subsequent save, the GPU will recreate this batchbuffer with new
+ * values (including all the missing MI_LOAD_REGISTER_IMM commands that
+ * we are not initializing here).
+ *
+ * Must keep consistent with virtual_update_register_offsets().
+ */
+ set_offsets(regs, reg_offsets(engine), engine, inhibit);
- regs[CTX_END] = MI_BATCH_BUFFER_END;
- if (INTEL_GEN(engine->i915) >= 10)
- regs[CTX_END] |= BIT(0);
+ init_common_reg_state(regs, engine, ring, inhibit);
+ init_ppgtt_reg_state(regs, vm_alias(ce->vm));
+
+ init_wa_bb_reg_state(regs, engine,
+ INTEL_GEN(engine->i915) >= 12 ?
+ GEN12_CTX_BB_PER_CTX_PTR :
+ CTX_BB_PER_CTX_PTR);
+
+ __reset_stop_ring(regs, engine);
}
static int
@@ -2879,8 +4502,8 @@ populate_lr_context(struct intel_context *ce,
struct intel_engine_cs *engine,
struct intel_ring *ring)
{
+ bool inhibit = true;
void *vaddr;
- u32 *regs;
int ret;
vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB);
@@ -2890,13 +4513,9 @@ populate_lr_context(struct intel_context *ce,
return ret;
}
+ set_redzone(vaddr, engine);
+
if (engine->default_state) {
- /*
- * We only want to copy over the template context state;
- * skipping over the headers reserved for GuC communication,
- * leaving those as zero.
- */
- const unsigned long start = LRC_HEADER_PAGES * PAGE_SIZE;
void *defaults;
defaults = i915_gem_object_pin_map(engine->default_state,
@@ -2906,81 +4525,62 @@ populate_lr_context(struct intel_context *ce,
goto err_unpin_ctx;
}
- memcpy(vaddr + start, defaults + start, engine->context_size);
+ memcpy(vaddr, defaults, engine->context_size);
i915_gem_object_unpin_map(engine->default_state);
+ __set_bit(CONTEXT_VALID_BIT, &ce->flags);
+ inhibit = false;
}
/* The second page of the context object contains some fields which must
* be set up prior to the first execution. */
- regs = vaddr + LRC_STATE_PN * PAGE_SIZE;
- execlists_init_reg_state(regs, ce, engine, ring);
- if (!engine->default_state)
- regs[CTX_CONTEXT_CONTROL + 1] |=
- _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
- if (ce->gem_context == engine->i915->preempt_context &&
- INTEL_GEN(engine->i915) < 11)
- regs[CTX_CONTEXT_CONTROL + 1] |=
- _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
- CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT);
+ execlists_init_reg_state(vaddr + LRC_STATE_PN * PAGE_SIZE,
+ ce, engine, ring, inhibit);
ret = 0;
err_unpin_ctx:
- __i915_gem_object_flush_map(ctx_obj,
- LRC_HEADER_PAGES * PAGE_SIZE,
- engine->context_size);
+ __i915_gem_object_flush_map(ctx_obj, 0, engine->context_size);
i915_gem_object_unpin_map(ctx_obj);
return ret;
}
-static struct i915_timeline *get_timeline(struct i915_gem_context *ctx)
-{
- if (ctx->timeline)
- return i915_timeline_get(ctx->timeline);
- else
- return i915_timeline_create(ctx->i915, NULL);
-}
-
-static int execlists_context_deferred_alloc(struct intel_context *ce,
- struct intel_engine_cs *engine)
+static int __execlists_context_alloc(struct intel_context *ce,
+ struct intel_engine_cs *engine)
{
struct drm_i915_gem_object *ctx_obj;
+ struct intel_ring *ring;
struct i915_vma *vma;
u32 context_size;
- struct intel_ring *ring;
- struct i915_timeline *timeline;
int ret;
- if (ce->state)
- return 0;
-
+ GEM_BUG_ON(ce->state);
context_size = round_up(engine->context_size, I915_GTT_PAGE_SIZE);
- /*
- * Before the actual start of the context image, we insert a few pages
- * for our own use and for sharing with the GuC.
- */
- context_size += LRC_HEADER_PAGES * PAGE_SIZE;
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ context_size += I915_GTT_PAGE_SIZE; /* for redzone */
ctx_obj = i915_gem_object_create_shmem(engine->i915, context_size);
if (IS_ERR(ctx_obj))
return PTR_ERR(ctx_obj);
- vma = i915_vma_instance(ctx_obj, &engine->i915->ggtt.vm, NULL);
+ vma = i915_vma_instance(ctx_obj, &engine->gt->ggtt->vm, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto error_deref_obj;
}
- timeline = get_timeline(ce->gem_context);
- if (IS_ERR(timeline)) {
- ret = PTR_ERR(timeline);
- goto error_deref_obj;
+ if (!ce->timeline) {
+ struct intel_timeline *tl;
+
+ tl = intel_timeline_create(engine->gt, NULL);
+ if (IS_ERR(tl)) {
+ ret = PTR_ERR(tl);
+ goto error_deref_obj;
+ }
+
+ ce->timeline = tl;
}
- ring = intel_engine_create_ring(engine,
- timeline,
- ce->gem_context->ring_size);
- i915_timeline_put(timeline);
+ ring = intel_engine_create_ring(engine, (unsigned long)ce->ring);
if (IS_ERR(ring)) {
ret = PTR_ERR(ring);
goto error_deref_obj;
@@ -3022,22 +4622,24 @@ static void virtual_context_destroy(struct kref *kref)
for (n = 0; n < ve->num_siblings; n++) {
struct intel_engine_cs *sibling = ve->siblings[n];
struct rb_node *node = &ve->nodes[sibling->id].rb;
+ unsigned long flags;
if (RB_EMPTY_NODE(node))
continue;
- spin_lock_irq(&sibling->active.lock);
+ spin_lock_irqsave(&sibling->active.lock, flags);
/* Detachment is lazily performed in the execlists tasklet */
if (!RB_EMPTY_NODE(node))
rb_erase_cached(node, &sibling->execlists.virtual);
- spin_unlock_irq(&sibling->active.lock);
+ spin_unlock_irqrestore(&sibling->active.lock, flags);
}
GEM_BUG_ON(__tasklet_is_scheduled(&ve->base.execlists.tasklet));
if (ve->context.state)
__execlists_context_fini(&ve->context);
+ intel_context_fini(&ve->context);
kfree(ve->bonds);
kfree(ve);
@@ -3065,8 +4667,16 @@ static void virtual_engine_initial_hint(struct virtual_engine *ve)
return;
swap(ve->siblings[swp], ve->siblings[0]);
- virtual_update_register_offsets(ve->context.lrc_reg_state,
- ve->siblings[0]);
+ if (!intel_engine_has_relative_mmio(ve->siblings[0]))
+ virtual_update_register_offsets(ve->context.lrc_reg_state,
+ ve->siblings[0]);
+}
+
+static int virtual_context_alloc(struct intel_context *ce)
+{
+ struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
+
+ return __execlists_context_alloc(ce, ve->siblings[0]);
}
static int virtual_context_pin(struct intel_context *ce)
@@ -3090,6 +4700,8 @@ static void virtual_context_enter(struct intel_context *ce)
for (n = 0; n < ve->num_siblings; n++)
intel_engine_pm_get(ve->siblings[n]);
+
+ intel_timeline_enter(ce->timeline);
}
static void virtual_context_exit(struct intel_context *ce)
@@ -3097,11 +4709,15 @@ static void virtual_context_exit(struct intel_context *ce)
struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
unsigned int n;
+ intel_timeline_exit(ce->timeline);
+
for (n = 0; n < ve->num_siblings; n++)
intel_engine_pm_put(ve->siblings[n]);
}
static const struct intel_context_ops virtual_context_ops = {
+ .alloc = virtual_context_alloc,
+
.pin = virtual_context_pin,
.unpin = execlists_context_unpin,
@@ -3128,10 +4744,9 @@ static intel_engine_mask_t virtual_submission_mask(struct virtual_engine *ve)
mask = ve->siblings[0]->mask;
}
- GEM_TRACE("%s: rq=%llx:%lld, mask=%x, prio=%d\n",
- ve->base.name,
- rq->fence.context, rq->fence.seqno,
- mask, ve->base.execlists.queue_priority_hint);
+ ENGINE_TRACE(&ve->base, "rq=%llx:%lld, mask=%x, prio=%d\n",
+ rq->fence.context, rq->fence.seqno,
+ mask, ve->base.execlists.queue_priority_hint);
return mask;
}
@@ -3219,23 +4834,40 @@ submit_engine:
static void virtual_submit_request(struct i915_request *rq)
{
struct virtual_engine *ve = to_virtual_engine(rq->engine);
+ struct i915_request *old;
+ unsigned long flags;
- GEM_TRACE("%s: rq=%llx:%lld\n",
- ve->base.name,
- rq->fence.context,
- rq->fence.seqno);
+ ENGINE_TRACE(&ve->base, "rq=%llx:%lld\n",
+ rq->fence.context,
+ rq->fence.seqno);
GEM_BUG_ON(ve->base.submit_request != virtual_submit_request);
- GEM_BUG_ON(ve->request);
- GEM_BUG_ON(!list_empty(virtual_queue(ve)));
+ spin_lock_irqsave(&ve->base.active.lock, flags);
- ve->base.execlists.queue_priority_hint = rq_prio(rq);
- WRITE_ONCE(ve->request, rq);
+ old = ve->request;
+ if (old) { /* background completion event from preempt-to-busy */
+ GEM_BUG_ON(!i915_request_completed(old));
+ __i915_request_submit(old);
+ i915_request_put(old);
+ }
- list_move_tail(&rq->sched.link, virtual_queue(ve));
+ if (i915_request_completed(rq)) {
+ __i915_request_submit(rq);
- tasklet_schedule(&ve->base.execlists.tasklet);
+ ve->base.execlists.queue_priority_hint = INT_MIN;
+ ve->request = NULL;
+ } else {
+ ve->base.execlists.queue_priority_hint = rq_prio(rq);
+ ve->request = i915_request_get(rq);
+
+ GEM_BUG_ON(!list_empty(virtual_queue(ve)));
+ list_move_tail(&rq->sched.link, virtual_queue(ve));
+
+ tasklet_schedule(&ve->base.execlists.tasklet);
+ }
+
+ spin_unlock_irqrestore(&ve->base.active.lock, flags);
}
static struct ve_bond *
@@ -3256,23 +4888,26 @@ static void
virtual_bond_execute(struct i915_request *rq, struct dma_fence *signal)
{
struct virtual_engine *ve = to_virtual_engine(rq->engine);
+ intel_engine_mask_t allowed, exec;
struct ve_bond *bond;
+ allowed = ~to_request(signal)->engine->mask;
+
bond = virtual_find_bond(ve, to_request(signal)->engine);
- if (bond) {
- intel_engine_mask_t old, new, cmp;
+ if (bond)
+ allowed &= bond->sibling_mask;
- cmp = READ_ONCE(rq->execution_mask);
- do {
- old = cmp;
- new = cmp & bond->sibling_mask;
- } while ((cmp = cmpxchg(&rq->execution_mask, old, new)) != old);
- }
+ /* Restrict the bonded request to run on only the available engines */
+ exec = READ_ONCE(rq->execution_mask);
+ while (!try_cmpxchg(&rq->execution_mask, &exec, exec & allowed))
+ ;
+
+ /* Prevent the master from being re-run on the bonded engines */
+ to_request(signal)->execution_mask &= ~allowed;
}
struct intel_context *
-intel_execlists_create_virtual(struct i915_gem_context *ctx,
- struct intel_engine_cs **siblings,
+intel_execlists_create_virtual(struct intel_engine_cs **siblings,
unsigned int count)
{
struct virtual_engine *ve;
@@ -3283,18 +4918,21 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx,
return ERR_PTR(-EINVAL);
if (count == 1)
- return intel_context_create(ctx, siblings[0]);
+ return intel_context_create(siblings[0]);
ve = kzalloc(struct_size(ve, siblings, count), GFP_KERNEL);
if (!ve)
return ERR_PTR(-ENOMEM);
- ve->base.i915 = ctx->i915;
+ ve->base.i915 = siblings[0]->i915;
+ ve->base.gt = siblings[0]->gt;
+ ve->base.uncore = siblings[0]->uncore;
ve->base.id = -1;
+
ve->base.class = OTHER_CLASS;
ve->base.uabi_class = I915_ENGINE_CLASS_INVALID;
ve->base.instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
- ve->base.flags = I915_ENGINE_IS_VIRTUAL;
+ ve->base.uabi_instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
/*
* The decision on whether to submit a request using semaphores
@@ -3314,7 +4952,7 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx,
snprintf(ve->base.name, sizeof(ve->base.name), "virtual");
intel_engine_init_active(&ve->base, ENGINE_VIRTUAL);
-
+ intel_engine_init_breadcrumbs(&ve->base);
intel_engine_init_execlists(&ve->base);
ve->base.cops = &virtual_context_ops;
@@ -3330,7 +4968,7 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx,
virtual_submission_tasklet,
(unsigned long)ve);
- intel_context_init(&ve->context, ctx, &ve->base);
+ intel_context_init(&ve->context, &ve->base);
for (n = 0; n < count; n++) {
struct intel_engine_cs *sibling = siblings[n];
@@ -3391,8 +5029,12 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx,
ve->base.emit_fini_breadcrumb = sibling->emit_fini_breadcrumb;
ve->base.emit_fini_breadcrumb_dw =
sibling->emit_fini_breadcrumb_dw;
+
+ ve->base.flags = sibling->flags;
}
+ ve->base.flags |= I915_ENGINE_IS_VIRTUAL;
+
return &ve->context;
err_put:
@@ -3401,14 +5043,12 @@ err_put:
}
struct intel_context *
-intel_execlists_clone_virtual(struct i915_gem_context *ctx,
- struct intel_engine_cs *src)
+intel_execlists_clone_virtual(struct intel_engine_cs *src)
{
struct virtual_engine *se = to_virtual_engine(src);
struct intel_context *dst;
- dst = intel_execlists_create_virtual(ctx,
- se->siblings,
+ dst = intel_execlists_create_virtual(se->siblings,
se->num_siblings);
if (IS_ERR(dst))
return dst;
@@ -3466,6 +5106,18 @@ int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine,
return 0;
}
+struct intel_engine_cs *
+intel_virtual_engine_get_sibling(struct intel_engine_cs *engine,
+ unsigned int sibling)
+{
+ struct virtual_engine *ve = to_virtual_engine(engine);
+
+ if (sibling >= ve->num_siblings)
+ return NULL;
+
+ return ve->siblings[sibling];
+}
+
void intel_execlists_show_requests(struct intel_engine_cs *engine,
struct drm_printer *m,
void (*show_request)(struct drm_printer *m,
@@ -3554,6 +5206,8 @@ void intel_lr_context_reset(struct intel_engine_cs *engine,
u32 head,
bool scrub)
{
+ GEM_BUG_ON(!intel_context_is_pinned(ce));
+
/*
* We want a simple context + ring to execute the breadcrumb update.
* We cannot rely on the context being intact across the GPU hang,
@@ -3562,16 +5216,8 @@ void intel_lr_context_reset(struct intel_engine_cs *engine,
* future request will be after userspace has had the opportunity
* to recreate its own state.
*/
- if (scrub) {
- u32 *regs = ce->lrc_reg_state;
-
- if (engine->pinned_default_state) {
- memcpy(regs, /* skip restoring the vanilla PPHWSP */
- engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE,
- engine->context_size - PAGE_SIZE);
- }
- execlists_init_reg_state(regs, ce, engine, ce->ring);
- }
+ if (scrub)
+ restore_default_state(ce, engine);
/* Rerun the request; its payload has been neutered (if guilty). */
ce->ring->head = head;
@@ -3580,6 +5226,13 @@ void intel_lr_context_reset(struct intel_engine_cs *engine,
__execlists_update_reg_state(ce, engine);
}
+bool
+intel_engine_in_execlists_submission_mode(const struct intel_engine_cs *engine)
+{
+ return engine->set_default_submission ==
+ intel_execlists_set_default_submission;
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_lrc.c"
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.h b/drivers/gpu/drm/i915/gt/intel_lrc.h
index c2bba82bcc16..dfbc214e14f5 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.h
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.h
@@ -43,6 +43,7 @@ struct intel_engine_cs;
#define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT (1 << 0)
#define CTX_CTRL_RS_CTX_ENABLE (1 << 1)
#define CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT (1 << 2)
+#define GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE (1 << 8)
#define RING_CONTEXT_STATUS_PTR(base) _MMIO((base) + 0x3a0)
#define RING_EXECLIST_SQ_CONTENTS(base) _MMIO((base) + 0x510)
#define RING_EXECLIST_CONTROL(base) _MMIO((base) + 0x550)
@@ -66,6 +67,12 @@ struct intel_engine_cs;
#define GEN11_CSB_READ_PTR_MASK (GEN11_CSB_PTR_MASK << 8)
#define GEN11_CSB_WRITE_PTR_MASK (GEN11_CSB_PTR_MASK << 0)
+#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
+#define MAX_GUC_CONTEXT_HW_ID (1 << 20) /* exclusive */
+#define GEN11_MAX_CONTEXT_HW_ID (1<<11) /* exclusive */
+/* in Gen12 ID 0x7FF is reserved to indicate idle */
+#define GEN12_MAX_CONTEXT_HW_ID (GEN11_MAX_CONTEXT_HW_ID - 1)
+
enum {
INTEL_CONTEXT_SCHEDULE_IN = 0,
INTEL_CONTEXT_SCHEDULE_OUT,
@@ -76,33 +83,17 @@ enum {
void intel_logical_ring_cleanup(struct intel_engine_cs *engine);
int intel_execlists_submission_setup(struct intel_engine_cs *engine);
-int intel_execlists_submission_init(struct intel_engine_cs *engine);
/* Logical Ring Contexts */
-
-/*
- * We allocate a header at the start of the context image for our own
- * use, therefore the actual location of the logical state is offset
- * from the start of the VMA. The layout is
- *
- * | [guc] | [hwsp] [logical state] |
- * |<- our header ->|<- context image ->|
- *
- */
-/* The first page is used for sharing data with the GuC */
-#define LRC_GUCSHR_PN (0)
-#define LRC_GUCSHR_SZ (1)
/* At the start of the context image is its per-process HWS page */
-#define LRC_PPHWSP_PN (LRC_GUCSHR_PN + LRC_GUCSHR_SZ)
+#define LRC_PPHWSP_PN (0)
#define LRC_PPHWSP_SZ (1)
-/* Finally we have the logical state for the context */
+/* After the PPHWSP we have the logical state for the context */
#define LRC_STATE_PN (LRC_PPHWSP_PN + LRC_PPHWSP_SZ)
-/*
- * Currently we include the PPHWSP in __intel_engine_context_size() so
- * the size of the header is synonymous with the start of the PPHWSP.
- */
-#define LRC_HEADER_PAGES LRC_PPHWSP_PN
+/* Space within PPHWSP reserved to be used as scratch */
+#define LRC_PPHWSP_SCRATCH 0x34
+#define LRC_PPHWSP_SCRATCH_ADDR (LRC_PPHWSP_SCRATCH * sizeof(u32))
void intel_execlists_set_default_submission(struct intel_engine_cs *engine);
@@ -119,16 +110,21 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
unsigned int max);
struct intel_context *
-intel_execlists_create_virtual(struct i915_gem_context *ctx,
- struct intel_engine_cs **siblings,
+intel_execlists_create_virtual(struct intel_engine_cs **siblings,
unsigned int count);
struct intel_context *
-intel_execlists_clone_virtual(struct i915_gem_context *ctx,
- struct intel_engine_cs *src);
+intel_execlists_clone_virtual(struct intel_engine_cs *src);
int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine,
const struct intel_engine_cs *master,
const struct intel_engine_cs *sibling);
+struct intel_engine_cs *
+intel_virtual_engine_get_sibling(struct intel_engine_cs *engine,
+ unsigned int sibling);
+
+bool
+intel_engine_in_execlists_submission_mode(const struct intel_engine_cs *engine);
+
#endif /* _INTEL_LRC_H_ */
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
index 6bf34738b4e5..08a3be65f700 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
+++ b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
@@ -9,60 +9,47 @@
#include <linux/types.h>
-/* GEN8+ Reg State Context */
-#define CTX_LRI_HEADER_0 0x01
-#define CTX_CONTEXT_CONTROL 0x02
-#define CTX_RING_HEAD 0x04
-#define CTX_RING_TAIL 0x06
-#define CTX_RING_BUFFER_START 0x08
-#define CTX_RING_BUFFER_CONTROL 0x0a
-#define CTX_BB_HEAD_U 0x0c
-#define CTX_BB_HEAD_L 0x0e
-#define CTX_BB_STATE 0x10
-#define CTX_SECOND_BB_HEAD_U 0x12
-#define CTX_SECOND_BB_HEAD_L 0x14
-#define CTX_SECOND_BB_STATE 0x16
-#define CTX_BB_PER_CTX_PTR 0x18
-#define CTX_RCS_INDIRECT_CTX 0x1a
-#define CTX_RCS_INDIRECT_CTX_OFFSET 0x1c
-#define CTX_LRI_HEADER_1 0x21
-#define CTX_CTX_TIMESTAMP 0x22
-#define CTX_PDP3_UDW 0x24
-#define CTX_PDP3_LDW 0x26
-#define CTX_PDP2_UDW 0x28
-#define CTX_PDP2_LDW 0x2a
-#define CTX_PDP1_UDW 0x2c
-#define CTX_PDP1_LDW 0x2e
-#define CTX_PDP0_UDW 0x30
-#define CTX_PDP0_LDW 0x32
-#define CTX_LRI_HEADER_2 0x41
-#define CTX_R_PWR_CLK_STATE 0x42
-#define CTX_END 0x44
-
-#define CTX_REG(reg_state, pos, reg, val) do { \
- u32 *reg_state__ = (reg_state); \
- const u32 pos__ = (pos); \
- (reg_state__)[(pos__) + 0] = i915_mmio_reg_offset(reg); \
- (reg_state__)[(pos__) + 1] = (val); \
-} while (0)
+/* GEN8 to GEN11 Reg State Context */
+#define CTX_CONTEXT_CONTROL (0x02 + 1)
+#define CTX_RING_HEAD (0x04 + 1)
+#define CTX_RING_TAIL (0x06 + 1)
+#define CTX_RING_START (0x08 + 1)
+#define CTX_RING_CTL (0x0a + 1)
+#define CTX_BB_STATE (0x10 + 1)
+#define CTX_BB_PER_CTX_PTR (0x18 + 1)
+#define CTX_PDP3_UDW (0x24 + 1)
+#define CTX_PDP3_LDW (0x26 + 1)
+#define CTX_PDP2_UDW (0x28 + 1)
+#define CTX_PDP2_LDW (0x2a + 1)
+#define CTX_PDP1_UDW (0x2c + 1)
+#define CTX_PDP1_LDW (0x2e + 1)
+#define CTX_PDP0_UDW (0x30 + 1)
+#define CTX_PDP0_LDW (0x32 + 1)
+#define CTX_R_PWR_CLK_STATE (0x42 + 1)
+
+#define GEN9_CTX_RING_MI_MODE 0x54
+
+/* GEN12+ Reg State Context */
+#define GEN12_CTX_BB_PER_CTX_PTR (0x12 + 1)
#define ASSIGN_CTX_PDP(ppgtt, reg_state, n) do { \
u32 *reg_state__ = (reg_state); \
const u64 addr__ = i915_page_dir_dma_addr((ppgtt), (n)); \
- (reg_state__)[CTX_PDP ## n ## _UDW + 1] = upper_32_bits(addr__); \
- (reg_state__)[CTX_PDP ## n ## _LDW + 1] = lower_32_bits(addr__); \
+ (reg_state__)[CTX_PDP ## n ## _UDW] = upper_32_bits(addr__); \
+ (reg_state__)[CTX_PDP ## n ## _LDW] = lower_32_bits(addr__); \
} while (0)
#define ASSIGN_CTX_PML4(ppgtt, reg_state) do { \
u32 *reg_state__ = (reg_state); \
const u64 addr__ = px_dma(ppgtt->pd); \
- (reg_state__)[CTX_PDP0_UDW + 1] = upper_32_bits(addr__); \
- (reg_state__)[CTX_PDP0_LDW + 1] = lower_32_bits(addr__); \
+ (reg_state__)[CTX_PDP0_UDW] = upper_32_bits(addr__); \
+ (reg_state__)[CTX_PDP0_LDW] = lower_32_bits(addr__); \
} while (0)
#define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17
#define GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x26
#define GEN10_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x19
#define GEN11_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x1A
+#define GEN12_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0xD
#endif /* _INTEL_LRC_REG_H_ */
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index 1f9db50b1869..eeef90b55c64 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -23,8 +23,10 @@
#include "i915_drv.h"
#include "intel_engine.h"
+#include "intel_gt.h"
#include "intel_mocs.h"
#include "intel_lrc.h"
+#include "intel_ring.h"
/* structures required */
struct drm_i915_mocs_entry {
@@ -61,6 +63,10 @@ struct drm_i915_mocs_table {
#define GEN11_NUM_MOCS_ENTRIES 64 /* 63-64 are reserved, but configured. */
/* (e)LLC caching options */
+/*
+ * Note: LE_0_PAGETABLE works only up to Gen11; for newer gens it means
+ * the same as LE_UC
+ */
#define LE_0_PAGETABLE _LE_CACHEABILITY(0)
#define LE_1_UC _LE_CACHEABILITY(1)
#define LE_2_WT _LE_CACHEABILITY(2)
@@ -99,8 +105,9 @@ struct drm_i915_mocs_table {
* of bspec.
*
* Entries not part of the following tables are undefined as far as
- * userspace is concerned and shouldn't be relied upon. For the time
- * being they will be initialized to PTE.
+ * userspace is concerned and shouldn't be relied upon. For Gen < 12
+ * they will be initialized to PTE. Gen >= 12 onwards don't have a setting for
+ * PTE and will be initialized to an invalid value.
*
* The last two entries are reserved by the hardware. For ICL+ they
* should be initialized according to bspec and never used, for older
@@ -120,7 +127,7 @@ struct drm_i915_mocs_table {
LE_0_PAGETABLE | LE_TC_2_LLC_ELLC | LE_LRUM(3), \
L3_3_WB)
-static const struct drm_i915_mocs_entry skylake_mocs_table[] = {
+static const struct drm_i915_mocs_entry skl_mocs_table[] = {
GEN9_MOCS_ENTRIES,
MOCS_ENTRY(I915_MOCS_CACHED,
LE_3_WB | LE_TC_2_LLC_ELLC | LE_LRUM(3),
@@ -136,14 +143,7 @@ static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
};
#define GEN11_MOCS_ENTRIES \
- /* Base - Uncached (Deprecated) */ \
- MOCS_ENTRY(I915_MOCS_UNCACHED, \
- LE_1_UC | LE_TC_1_LLC, \
- L3_1_UC), \
- /* Base - L3 + LeCC:PAT (Deprecated) */ \
- MOCS_ENTRY(I915_MOCS_PTE, \
- LE_0_PAGETABLE | LE_TC_1_LLC, \
- L3_3_WB), \
+ /* Entries 0 and 1 are defined per-platform */ \
/* Base - L3 + LLC */ \
MOCS_ENTRY(2, \
LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), \
@@ -200,14 +200,6 @@ static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
MOCS_ENTRY(15, \
LE_3_WB | LE_TC_1_LLC | LE_LRUM(2) | LE_AOM(1), \
L3_3_WB), \
- /* Bypass LLC - Uncached (EHL+) */ \
- MOCS_ENTRY(16, \
- LE_1_UC | LE_TC_1_LLC | LE_SCF(1), \
- L3_1_UC), \
- /* Bypass LLC - L3 (Read-Only) (EHL+) */ \
- MOCS_ENTRY(17, \
- LE_1_UC | LE_TC_1_LLC | LE_SCF(1), \
- L3_3_WB), \
/* Self-Snoop - L3 + LLC */ \
MOCS_ENTRY(18, \
LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SSE(3), \
@@ -241,79 +233,92 @@ static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), \
L3_1_UC)
-static const struct drm_i915_mocs_entry icelake_mocs_table[] = {
+static const struct drm_i915_mocs_entry tgl_mocs_table[] = {
+ /* Base - Error (Reserved for Non-Use) */
+ MOCS_ENTRY(0, 0x0, 0x0),
+ /* Base - Reserved */
+ MOCS_ENTRY(1, 0x0, 0x0),
+
+ GEN11_MOCS_ENTRIES,
+
+ /* Implicitly enable L1 - HDC:L1 + L3 + LLC */
+ MOCS_ENTRY(48,
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
+ L3_3_WB),
+ /* Implicitly enable L1 - HDC:L1 + L3 */
+ MOCS_ENTRY(49,
+ LE_1_UC | LE_TC_1_LLC,
+ L3_3_WB),
+ /* Implicitly enable L1 - HDC:L1 + LLC */
+ MOCS_ENTRY(50,
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
+ L3_1_UC),
+ /* Implicitly enable L1 - HDC:L1 */
+ MOCS_ENTRY(51,
+ LE_1_UC | LE_TC_1_LLC,
+ L3_1_UC),
+ /* HW Special Case (CCS) */
+ MOCS_ENTRY(60,
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
+ L3_1_UC),
+ /* HW Special Case (Displayable) */
+ MOCS_ENTRY(61,
+ LE_1_UC | LE_TC_1_LLC,
+ L3_3_WB),
+};
+
+static const struct drm_i915_mocs_entry icl_mocs_table[] = {
+ /* Base - Uncached (Deprecated) */
+ MOCS_ENTRY(I915_MOCS_UNCACHED,
+ LE_1_UC | LE_TC_1_LLC,
+ L3_1_UC),
+ /* Base - L3 + LeCC:PAT (Deprecated) */
+ MOCS_ENTRY(I915_MOCS_PTE,
+ LE_0_PAGETABLE | LE_TC_1_LLC,
+ L3_3_WB),
+
GEN11_MOCS_ENTRIES
};
-/**
- * get_mocs_settings()
- * @dev_priv: i915 device.
- * @table: Output table that will be made to point at appropriate
- * MOCS values for the device.
- *
- * This function will return the values of the MOCS table that needs to
- * be programmed for the platform. It will return the values that need
- * to be programmed and if they need to be programmed.
- *
- * Return: true if there are applicable MOCS settings for the device.
- */
-static bool get_mocs_settings(struct drm_i915_private *dev_priv,
+static bool get_mocs_settings(const struct drm_i915_private *i915,
struct drm_i915_mocs_table *table)
{
- bool result = false;
-
- if (INTEL_GEN(dev_priv) >= 11) {
- table->size = ARRAY_SIZE(icelake_mocs_table);
- table->table = icelake_mocs_table;
+ if (INTEL_GEN(i915) >= 12) {
+ table->size = ARRAY_SIZE(tgl_mocs_table);
+ table->table = tgl_mocs_table;
+ table->n_entries = GEN11_NUM_MOCS_ENTRIES;
+ } else if (IS_GEN(i915, 11)) {
+ table->size = ARRAY_SIZE(icl_mocs_table);
+ table->table = icl_mocs_table;
table->n_entries = GEN11_NUM_MOCS_ENTRIES;
- result = true;
- } else if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
- table->size = ARRAY_SIZE(skylake_mocs_table);
+ } else if (IS_GEN9_BC(i915) || IS_CANNONLAKE(i915)) {
+ table->size = ARRAY_SIZE(skl_mocs_table);
table->n_entries = GEN9_NUM_MOCS_ENTRIES;
- table->table = skylake_mocs_table;
- result = true;
- } else if (IS_GEN9_LP(dev_priv)) {
+ table->table = skl_mocs_table;
+ } else if (IS_GEN9_LP(i915)) {
table->size = ARRAY_SIZE(broxton_mocs_table);
table->n_entries = GEN9_NUM_MOCS_ENTRIES;
table->table = broxton_mocs_table;
- result = true;
} else {
- WARN_ONCE(INTEL_GEN(dev_priv) >= 9,
+ WARN_ONCE(INTEL_GEN(i915) >= 9,
"Platform that should have a MOCS table does not.\n");
+ return false;
}
+ if (GEM_DEBUG_WARN_ON(table->size > table->n_entries))
+ return false;
+
/* WaDisableSkipCaching:skl,bxt,kbl,glk */
- if (IS_GEN(dev_priv, 9)) {
+ if (IS_GEN(i915, 9)) {
int i;
for (i = 0; i < table->size; i++)
- if (WARN_ON(table->table[i].l3cc_value &
- (L3_ESC(1) | L3_SCC(0x7))))
+ if (GEM_DEBUG_WARN_ON(table->table[i].l3cc_value &
+ (L3_ESC(1) | L3_SCC(0x7))))
return false;
}
- return result;
-}
-
-static i915_reg_t mocs_register(enum intel_engine_id engine_id, int index)
-{
- switch (engine_id) {
- case RCS0:
- return GEN9_GFX_MOCS(index);
- case VCS0:
- return GEN9_MFX0_MOCS(index);
- case BCS0:
- return GEN9_BLT_MOCS(index);
- case VECS0:
- return GEN9_VEBOX_MOCS(index);
- case VCS1:
- return GEN9_MFX1_MOCS(index);
- case VCS2:
- return GEN11_MFX2_MOCS(index);
- default:
- MISSING_CASE(engine_id);
- return INVALID_MMIO_REG;
- }
+ return true;
}
/*
@@ -323,90 +328,47 @@ static i915_reg_t mocs_register(enum intel_engine_id engine_id, int index)
static u32 get_entry_control(const struct drm_i915_mocs_table *table,
unsigned int index)
{
- if (table->table[index].used)
+ if (index < table->size && table->table[index].used)
return table->table[index].control_value;
return table->table[I915_MOCS_PTE].control_value;
}
-/**
- * intel_mocs_init_engine() - emit the mocs control table
- * @engine: The engine for whom to emit the registers.
- *
- * This function simply emits a MI_LOAD_REGISTER_IMM command for the
- * given table starting at the given address.
- */
-void intel_mocs_init_engine(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- struct drm_i915_mocs_table table;
- unsigned int index;
- u32 unused_value;
+#define for_each_mocs(mocs, t, i) \
+ for (i = 0; \
+ i < (t)->n_entries ? (mocs = get_entry_control((t), i)), 1 : 0;\
+ i++)
- if (!get_mocs_settings(dev_priv, &table))
- return;
-
- /* Set unused values to PTE */
- unused_value = table.table[I915_MOCS_PTE].control_value;
-
- for (index = 0; index < table.size; index++) {
- u32 value = get_entry_control(&table, index);
-
- I915_WRITE(mocs_register(engine->id, index), value);
- }
+static void __init_mocs_table(struct intel_uncore *uncore,
+ const struct drm_i915_mocs_table *table,
+ u32 addr)
+{
+ unsigned int i;
+ u32 mocs;
- /* All remaining entries are also unused */
- for (; index < table.n_entries; index++)
- I915_WRITE(mocs_register(engine->id, index), unused_value);
+ for_each_mocs(mocs, table, i)
+ intel_uncore_write_fw(uncore, _MMIO(addr + i * 4), mocs);
}
-/**
- * emit_mocs_control_table() - emit the mocs control table
- * @rq: Request to set up the MOCS table for.
- * @table: The values to program into the control regs.
- *
- * This function simply emits a MI_LOAD_REGISTER_IMM command for the
- * given table starting at the given address.
- *
- * Return: 0 on success, otherwise the error status.
- */
-static int emit_mocs_control_table(struct i915_request *rq,
- const struct drm_i915_mocs_table *table)
+static u32 mocs_offset(const struct intel_engine_cs *engine)
{
- enum intel_engine_id engine = rq->engine->id;
- unsigned int index;
- u32 unused_value;
- u32 *cs;
-
- if (GEM_WARN_ON(table->size > table->n_entries))
- return -ENODEV;
-
- /* Set unused values to PTE */
- unused_value = table->table[I915_MOCS_PTE].control_value;
-
- cs = intel_ring_begin(rq, 2 + 2 * table->n_entries);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = MI_LOAD_REGISTER_IMM(table->n_entries);
-
- for (index = 0; index < table->size; index++) {
- u32 value = get_entry_control(table, index);
-
- *cs++ = i915_mmio_reg_offset(mocs_register(engine, index));
- *cs++ = value;
- }
-
- /* All remaining entries are also unused */
- for (; index < table->n_entries; index++) {
- *cs++ = i915_mmio_reg_offset(mocs_register(engine, index));
- *cs++ = unused_value;
- }
-
- *cs++ = MI_NOOP;
- intel_ring_advance(rq, cs);
+ static const u32 offset[] = {
+ [RCS0] = __GEN9_RCS0_MOCS0,
+ [VCS0] = __GEN9_VCS0_MOCS0,
+ [VCS1] = __GEN9_VCS1_MOCS0,
+ [VECS0] = __GEN9_VECS0_MOCS0,
+ [BCS0] = __GEN9_BCS0_MOCS0,
+ [VCS2] = __GEN11_VCS2_MOCS0,
+ };
+
+ GEM_BUG_ON(engine->id >= ARRAY_SIZE(offset));
+ return offset[engine->id];
+}
- return 0;
+static void init_mocs_table(struct intel_engine_cs *engine,
+ const struct drm_i915_mocs_table *table)
+{
+ __init_mocs_table(engine->uncore, table, mocs_offset(engine));
}
/*
@@ -416,159 +378,81 @@ static int emit_mocs_control_table(struct i915_request *rq,
static u16 get_entry_l3cc(const struct drm_i915_mocs_table *table,
unsigned int index)
{
- if (table->table[index].used)
+ if (index < table->size && table->table[index].used)
return table->table[index].l3cc_value;
return table->table[I915_MOCS_PTE].l3cc_value;
}
-static inline u32 l3cc_combine(const struct drm_i915_mocs_table *table,
- u16 low,
- u16 high)
+static inline u32 l3cc_combine(u16 low, u16 high)
{
- return low | high << 16;
+ return low | (u32)high << 16;
}
-/**
- * emit_mocs_l3cc_table() - emit the mocs control table
- * @rq: Request to set up the MOCS table for.
- * @table: The values to program into the control regs.
- *
- * This function simply emits a MI_LOAD_REGISTER_IMM command for the
- * given table starting at the given address. This register set is
- * programmed in pairs.
- *
- * Return: 0 on success, otherwise the error status.
- */
-static int emit_mocs_l3cc_table(struct i915_request *rq,
- const struct drm_i915_mocs_table *table)
+#define for_each_l3cc(l3cc, t, i) \
+ for (i = 0; \
+ i < ((t)->n_entries + 1) / 2 ? \
+ (l3cc = l3cc_combine(get_entry_l3cc((t), 2 * i), \
+ get_entry_l3cc((t), 2 * i + 1))), 1 : \
+ 0; \
+ i++)
+
+static void init_l3cc_table(struct intel_engine_cs *engine,
+ const struct drm_i915_mocs_table *table)
{
- u16 unused_value;
+ struct intel_uncore *uncore = engine->uncore;
unsigned int i;
- u32 *cs;
-
- if (GEM_WARN_ON(table->size > table->n_entries))
- return -ENODEV;
+ u32 l3cc;
- /* Set unused values to PTE */
- unused_value = table->table[I915_MOCS_PTE].l3cc_value;
-
- cs = intel_ring_begin(rq, 2 + table->n_entries);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = MI_LOAD_REGISTER_IMM(table->n_entries / 2);
-
- for (i = 0; i < table->size / 2; i++) {
- u16 low = get_entry_l3cc(table, 2 * i);
- u16 high = get_entry_l3cc(table, 2 * i + 1);
+ for_each_l3cc(l3cc, table, i)
+ intel_uncore_write_fw(uncore, GEN9_LNCFCMOCS(i), l3cc);
+}
- *cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(i));
- *cs++ = l3cc_combine(table, low, high);
- }
+void intel_mocs_init_engine(struct intel_engine_cs *engine)
+{
+ struct drm_i915_mocs_table table;
- /* Odd table size - 1 left over */
- if (table->size & 0x01) {
- u16 low = get_entry_l3cc(table, 2 * i);
+ /* Called under a blanket forcewake */
+ assert_forcewakes_active(engine->uncore, FORCEWAKE_ALL);
- *cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(i));
- *cs++ = l3cc_combine(table, low, unused_value);
- i++;
- }
+ if (!get_mocs_settings(engine->i915, &table))
+ return;
- /* All remaining entries are also unused */
- for (; i < table->n_entries / 2; i++) {
- *cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(i));
- *cs++ = l3cc_combine(table, unused_value, unused_value);
- }
+ /* Platforms with global MOCS do not need per-engine initialization. */
+ if (!HAS_GLOBAL_MOCS_REGISTERS(engine->i915))
+ init_mocs_table(engine, &table);
- *cs++ = MI_NOOP;
- intel_ring_advance(rq, cs);
+ if (engine->class == RENDER_CLASS)
+ init_l3cc_table(engine, &table);
+}
- return 0;
+static u32 global_mocs_offset(void)
+{
+ return i915_mmio_reg_offset(GEN12_GLOBAL_MOCS(0));
}
-/**
- * intel_mocs_init_l3cc_table() - program the mocs control table
- * @dev_priv: i915 device private
- *
- * This function simply programs the mocs registers for the given table
- * starting at the given address. This register set is programmed in pairs.
- *
- * These registers may get programmed more than once, it is simpler to
- * re-program 32 registers than maintain the state of when they were programmed.
- * We are always reprogramming with the same values and this only on context
- * start.
- *
- * Return: Nothing.
- */
-void intel_mocs_init_l3cc_table(struct drm_i915_private *dev_priv)
+static void init_global_mocs(struct intel_gt *gt)
{
struct drm_i915_mocs_table table;
- unsigned int i;
- u16 unused_value;
- if (!get_mocs_settings(dev_priv, &table))
+ /*
+ * LLC and eDRAM control values are not applicable to dgfx
+ */
+ if (IS_DGFX(gt->i915))
return;
- /* Set unused values to PTE */
- unused_value = table.table[I915_MOCS_PTE].l3cc_value;
-
- for (i = 0; i < table.size / 2; i++) {
- u16 low = get_entry_l3cc(&table, 2 * i);
- u16 high = get_entry_l3cc(&table, 2 * i + 1);
-
- I915_WRITE(GEN9_LNCFCMOCS(i),
- l3cc_combine(&table, low, high));
- }
-
- /* Odd table size - 1 left over */
- if (table.size & 0x01) {
- u16 low = get_entry_l3cc(&table, 2 * i);
-
- I915_WRITE(GEN9_LNCFCMOCS(i),
- l3cc_combine(&table, low, unused_value));
- i++;
- }
+ if (!get_mocs_settings(gt->i915, &table))
+ return;
- /* All remaining entries are also unused */
- for (; i < table.n_entries / 2; i++)
- I915_WRITE(GEN9_LNCFCMOCS(i),
- l3cc_combine(&table, unused_value, unused_value));
+ __init_mocs_table(gt->uncore, &table, global_mocs_offset());
}
-/**
- * intel_rcs_context_init_mocs() - program the MOCS register.
- * @rq: Request to set up the MOCS tables for.
- *
- * This function will emit a batch buffer with the values required for
- * programming the MOCS register values for all the currently supported
- * rings.
- *
- * These registers are partially stored in the RCS context, so they are
- * emitted at the same time so that when a context is created these registers
- * are set up. These registers have to be emitted into the start of the
- * context as setting the ELSP will re-init some of these registers back
- * to the hw values.
- *
- * Return: 0 on success, otherwise the error status.
- */
-int intel_rcs_context_init_mocs(struct i915_request *rq)
+void intel_mocs_init(struct intel_gt *gt)
{
- struct drm_i915_mocs_table t;
- int ret;
-
- if (get_mocs_settings(rq->i915, &t)) {
- /* Program the RCS control registers */
- ret = emit_mocs_control_table(rq, &t);
- if (ret)
- return ret;
-
- /* Now program the l3cc registers */
- ret = emit_mocs_l3cc_table(rq, &t);
- if (ret)
- return ret;
- }
-
- return 0;
+ if (HAS_GLOBAL_MOCS_REGISTERS(gt->i915))
+ init_global_mocs(gt);
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_mocs.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.h b/drivers/gpu/drm/i915/gt/intel_mocs.h
index 0913704a1af2..83371f3e6ba1 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.h
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.h
@@ -49,12 +49,10 @@
* context handling keep the MOCS in step.
*/
-struct drm_i915_private;
-struct i915_request;
struct intel_engine_cs;
+struct intel_gt;
-int intel_rcs_context_init_mocs(struct i915_request *rq);
-void intel_mocs_init_l3cc_table(struct drm_i915_private *dev_priv);
+void intel_mocs_init(struct intel_gt *gt);
void intel_mocs_init_engine(struct intel_engine_cs *engine);
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_ppgtt.c b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
new file mode 100644
index 000000000000..f86f7e68ce5e
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
@@ -0,0 +1,218 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/slab.h>
+
+#include "i915_trace.h"
+#include "intel_gtt.h"
+#include "gen6_ppgtt.h"
+#include "gen8_ppgtt.h"
+
+struct i915_page_table *alloc_pt(struct i915_address_space *vm)
+{
+ struct i915_page_table *pt;
+
+ pt = kmalloc(sizeof(*pt), I915_GFP_ALLOW_FAIL);
+ if (unlikely(!pt))
+ return ERR_PTR(-ENOMEM);
+
+ if (unlikely(setup_page_dma(vm, &pt->base))) {
+ kfree(pt);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ atomic_set(&pt->used, 0);
+ return pt;
+}
+
+struct i915_page_directory *__alloc_pd(size_t sz)
+{
+ struct i915_page_directory *pd;
+
+ pd = kzalloc(sz, I915_GFP_ALLOW_FAIL);
+ if (unlikely(!pd))
+ return NULL;
+
+ spin_lock_init(&pd->lock);
+ return pd;
+}
+
+struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
+{
+ struct i915_page_directory *pd;
+
+ pd = __alloc_pd(sizeof(*pd));
+ if (unlikely(!pd))
+ return ERR_PTR(-ENOMEM);
+
+ if (unlikely(setup_page_dma(vm, px_base(pd)))) {
+ kfree(pd);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return pd;
+}
+
+void free_pd(struct i915_address_space *vm, struct i915_page_dma *pd)
+{
+ cleanup_page_dma(vm, pd);
+ kfree(pd);
+}
+
+static inline void
+write_dma_entry(struct i915_page_dma * const pdma,
+ const unsigned short idx,
+ const u64 encoded_entry)
+{
+ u64 * const vaddr = kmap_atomic(pdma->page);
+
+ vaddr[idx] = encoded_entry;
+ kunmap_atomic(vaddr);
+}
+
+void
+__set_pd_entry(struct i915_page_directory * const pd,
+ const unsigned short idx,
+ struct i915_page_dma * const to,
+ u64 (*encode)(const dma_addr_t, const enum i915_cache_level))
+{
+ /* Each thread pre-pins the pd, and we may have a thread per pde. */
+ GEM_BUG_ON(atomic_read(px_used(pd)) > NALLOC * ARRAY_SIZE(pd->entry));
+
+ atomic_inc(px_used(pd));
+ pd->entry[idx] = to;
+ write_dma_entry(px_base(pd), idx, encode(to->daddr, I915_CACHE_LLC));
+}
+
+void
+clear_pd_entry(struct i915_page_directory * const pd,
+ const unsigned short idx,
+ const struct i915_page_scratch * const scratch)
+{
+ GEM_BUG_ON(atomic_read(px_used(pd)) == 0);
+
+ write_dma_entry(px_base(pd), idx, scratch->encode);
+ pd->entry[idx] = NULL;
+ atomic_dec(px_used(pd));
+}
+
+bool
+release_pd_entry(struct i915_page_directory * const pd,
+ const unsigned short idx,
+ struct i915_page_table * const pt,
+ const struct i915_page_scratch * const scratch)
+{
+ bool free = false;
+
+ if (atomic_add_unless(&pt->used, -1, 1))
+ return false;
+
+ spin_lock(&pd->lock);
+ if (atomic_dec_and_test(&pt->used)) {
+ clear_pd_entry(pd, idx, scratch);
+ free = true;
+ }
+ spin_unlock(&pd->lock);
+
+ return free;
+}
+
+int i915_ppgtt_init_hw(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+
+ gtt_write_workarounds(gt);
+
+ if (IS_GEN(i915, 6))
+ gen6_ppgtt_enable(gt);
+ else if (IS_GEN(i915, 7))
+ gen7_ppgtt_enable(gt);
+
+ return 0;
+}
+
+static struct i915_ppgtt *
+__ppgtt_create(struct intel_gt *gt)
+{
+ if (INTEL_GEN(gt->i915) < 8)
+ return gen6_ppgtt_create(gt);
+ else
+ return gen8_ppgtt_create(gt);
+}
+
+struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt)
+{
+ struct i915_ppgtt *ppgtt;
+
+ ppgtt = __ppgtt_create(gt);
+ if (IS_ERR(ppgtt))
+ return ppgtt;
+
+ trace_i915_ppgtt_create(&ppgtt->vm);
+
+ return ppgtt;
+}
+
+static int ppgtt_bind_vma(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags)
+{
+ u32 pte_flags;
+ int err;
+
+ if (flags & I915_VMA_ALLOC) {
+ err = vma->vm->allocate_va_range(vma->vm,
+ vma->node.start, vma->size);
+ if (err)
+ return err;
+
+ set_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma));
+ }
+
+ /* Applicable to VLV, and gen8+ */
+ pte_flags = 0;
+ if (i915_gem_object_is_readonly(vma->obj))
+ pte_flags |= PTE_READ_ONLY;
+
+ GEM_BUG_ON(!test_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)));
+ vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
+ wmb();
+
+ return 0;
+}
+
+static void ppgtt_unbind_vma(struct i915_vma *vma)
+{
+ if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)))
+ vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
+}
+
+int ppgtt_set_pages(struct i915_vma *vma)
+{
+ GEM_BUG_ON(vma->pages);
+
+ vma->pages = vma->obj->mm.pages;
+
+ vma->page_sizes = vma->obj->mm.page_sizes;
+
+ return 0;
+}
+
+void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+
+ ppgtt->vm.gt = gt;
+ ppgtt->vm.i915 = i915;
+ ppgtt->vm.dma = &i915->drm.pdev->dev;
+ ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size);
+
+ i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
+
+ ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma;
+ ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma;
+ ppgtt->vm.vma_ops.set_pages = ppgtt_set_pages;
+ ppgtt->vm.vma_ops.clear_pages = clear_pages;
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
new file mode 100644
index 000000000000..9e303c29d6e3
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -0,0 +1,776 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/pm_runtime.h>
+
+#include "i915_drv.h"
+#include "intel_gt.h"
+#include "intel_gt_pm.h"
+#include "intel_rc6.h"
+#include "intel_sideband.h"
+
+/**
+ * DOC: RC6
+ *
+ * RC6 is a special power stage which allows the GPU to enter an very
+ * low-voltage mode when idle, using down to 0V while at this stage. This
+ * stage is entered automatically when the GPU is idle when RC6 support is
+ * enabled, and as soon as new workload arises GPU wakes up automatically as
+ * well.
+ *
+ * There are different RC6 modes available in Intel GPU, which differentiate
+ * among each other with the latency required to enter and leave RC6 and
+ * voltage consumed by the GPU in different states.
+ *
+ * The combination of the following flags define which states GPU is allowed
+ * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
+ * RC6pp is deepest RC6. Their support by hardware varies according to the
+ * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
+ * which brings the most power savings; deeper states save more power, but
+ * require higher latency to switch to and wake up.
+ */
+
+static struct intel_gt *rc6_to_gt(struct intel_rc6 *rc6)
+{
+ return container_of(rc6, struct intel_gt, rc6);
+}
+
+static struct intel_uncore *rc6_to_uncore(struct intel_rc6 *rc)
+{
+ return rc6_to_gt(rc)->uncore;
+}
+
+static struct drm_i915_private *rc6_to_i915(struct intel_rc6 *rc)
+{
+ return rc6_to_gt(rc)->i915;
+}
+
+static inline void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val)
+{
+ intel_uncore_write_fw(uncore, reg, val);
+}
+
+static void gen11_rc6_enable(struct intel_rc6 *rc6)
+{
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /* 2b: Program RC6 thresholds.*/
+ set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85);
+ set(uncore, GEN10_MEDIA_WAKE_RATE_LIMIT, 150);
+
+ set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
+ set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
+ for_each_engine(engine, rc6_to_gt(rc6), id)
+ set(uncore, RING_MAX_IDLE(engine->mmio_base), 10);
+
+ set(uncore, GUC_MAX_IDLE_COUNT, 0xA);
+
+ set(uncore, GEN6_RC_SLEEP, 0);
+
+ set(uncore, GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
+
+ /*
+ * 2c: Program Coarse Power Gating Policies.
+ *
+ * Bspec's guidance is to use 25us (really 25 * 1280ns) here. What we
+ * use instead is a more conservative estimate for the maximum time
+ * it takes us to service a CS interrupt and submit a new ELSP - that
+ * is the time which the GPU is idle waiting for the CPU to select the
+ * next request to execute. If the idle hysteresis is less than that
+ * interrupt service latency, the hardware will automatically gate
+ * the power well and we will then incur the wake up cost on top of
+ * the service latency. A similar guide from plane_state is that we
+ * do not want the enable hysteresis to less than the wakeup latency.
+ *
+ * igt/gem_exec_nop/sequential provides a rough estimate for the
+ * service latency, and puts it under 10us for Icelake, similar to
+ * Broadwell+, To be conservative, we want to factor in a context
+ * switch on top (due to ksoftirqd).
+ */
+ set(uncore, GEN9_MEDIA_PG_IDLE_HYSTERESIS, 60);
+ set(uncore, GEN9_RENDER_PG_IDLE_HYSTERESIS, 60);
+
+ /* 3a: Enable RC6 */
+ rc6->ctl_enable =
+ GEN6_RC_CTL_HW_ENABLE |
+ GEN6_RC_CTL_RC6_ENABLE |
+ GEN6_RC_CTL_EI_MODE(1);
+
+ set(uncore, GEN9_PG_ENABLE,
+ GEN9_RENDER_PG_ENABLE |
+ GEN9_MEDIA_PG_ENABLE |
+ GEN11_MEDIA_SAMPLER_PG_ENABLE);
+}
+
+static void gen9_rc6_enable(struct intel_rc6 *rc6)
+{
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ u32 rc6_mode;
+
+ /* 2b: Program RC6 thresholds.*/
+ if (INTEL_GEN(rc6_to_i915(rc6)) >= 10) {
+ set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85);
+ set(uncore, GEN10_MEDIA_WAKE_RATE_LIMIT, 150);
+ } else if (IS_SKYLAKE(rc6_to_i915(rc6))) {
+ /*
+ * WaRsDoubleRc6WrlWithCoarsePowerGating:skl Doubling WRL only
+ * when CPG is enabled
+ */
+ set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16);
+ } else {
+ set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
+ }
+
+ set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
+ set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
+ for_each_engine(engine, rc6_to_gt(rc6), id)
+ set(uncore, RING_MAX_IDLE(engine->mmio_base), 10);
+
+ set(uncore, GUC_MAX_IDLE_COUNT, 0xA);
+
+ set(uncore, GEN6_RC_SLEEP, 0);
+
+ /*
+ * 2c: Program Coarse Power Gating Policies.
+ *
+ * Bspec's guidance is to use 25us (really 25 * 1280ns) here. What we
+ * use instead is a more conservative estimate for the maximum time
+ * it takes us to service a CS interrupt and submit a new ELSP - that
+ * is the time which the GPU is idle waiting for the CPU to select the
+ * next request to execute. If the idle hysteresis is less than that
+ * interrupt service latency, the hardware will automatically gate
+ * the power well and we will then incur the wake up cost on top of
+ * the service latency. A similar guide from plane_state is that we
+ * do not want the enable hysteresis to less than the wakeup latency.
+ *
+ * igt/gem_exec_nop/sequential provides a rough estimate for the
+ * service latency, and puts it around 10us for Broadwell (and other
+ * big core) and around 40us for Broxton (and other low power cores).
+ * [Note that for legacy ringbuffer submission, this is less than 1us!]
+ * However, the wakeup latency on Broxton is closer to 100us. To be
+ * conservative, we have to factor in a context switch on top (due
+ * to ksoftirqd).
+ */
+ set(uncore, GEN9_MEDIA_PG_IDLE_HYSTERESIS, 250);
+ set(uncore, GEN9_RENDER_PG_IDLE_HYSTERESIS, 250);
+
+ /* 3a: Enable RC6 */
+ set(uncore, GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
+
+ /* WaRsUseTimeoutMode:cnl (pre-prod) */
+ if (IS_CNL_REVID(rc6_to_i915(rc6), CNL_REVID_A0, CNL_REVID_C0))
+ rc6_mode = GEN7_RC_CTL_TO_MODE;
+ else
+ rc6_mode = GEN6_RC_CTL_EI_MODE(1);
+
+ rc6->ctl_enable =
+ GEN6_RC_CTL_HW_ENABLE |
+ GEN6_RC_CTL_RC6_ENABLE |
+ rc6_mode;
+
+ /*
+ * WaRsDisableCoarsePowerGating:skl,cnl
+ * - Render/Media PG need to be disabled with RC6.
+ */
+ if (!NEEDS_WaRsDisableCoarsePowerGating(rc6_to_i915(rc6)))
+ set(uncore, GEN9_PG_ENABLE,
+ GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE);
+}
+
+static void gen8_rc6_enable(struct intel_rc6 *rc6)
+{
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /* 2b: Program RC6 thresholds.*/
+ set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
+ set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
+ set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
+ for_each_engine(engine, rc6_to_gt(rc6), id)
+ set(uncore, RING_MAX_IDLE(engine->mmio_base), 10);
+ set(uncore, GEN6_RC_SLEEP, 0);
+ set(uncore, GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
+
+ /* 3: Enable RC6 */
+ rc6->ctl_enable =
+ GEN6_RC_CTL_HW_ENABLE |
+ GEN7_RC_CTL_TO_MODE |
+ GEN6_RC_CTL_RC6_ENABLE;
+}
+
+static void gen6_rc6_enable(struct intel_rc6 *rc6)
+{
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+ struct drm_i915_private *i915 = rc6_to_i915(rc6);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ u32 rc6vids, rc6_mask;
+ int ret;
+
+ set(uncore, GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
+ set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
+ set(uncore, GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
+ set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000);
+ set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25);
+
+ for_each_engine(engine, rc6_to_gt(rc6), id)
+ set(uncore, RING_MAX_IDLE(engine->mmio_base), 10);
+
+ set(uncore, GEN6_RC_SLEEP, 0);
+ set(uncore, GEN6_RC1e_THRESHOLD, 1000);
+ if (IS_IVYBRIDGE(i915))
+ set(uncore, GEN6_RC6_THRESHOLD, 125000);
+ else
+ set(uncore, GEN6_RC6_THRESHOLD, 50000);
+ set(uncore, GEN6_RC6p_THRESHOLD, 150000);
+ set(uncore, GEN6_RC6pp_THRESHOLD, 64000); /* unused */
+
+ /* We don't use those on Haswell */
+ rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
+ if (HAS_RC6p(i915))
+ rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
+ if (HAS_RC6pp(i915))
+ rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
+ rc6->ctl_enable =
+ rc6_mask |
+ GEN6_RC_CTL_EI_MODE(1) |
+ GEN6_RC_CTL_HW_ENABLE;
+
+ rc6vids = 0;
+ ret = sandybridge_pcode_read(i915, GEN6_PCODE_READ_RC6VIDS,
+ &rc6vids, NULL);
+ if (IS_GEN(i915, 6) && ret) {
+ DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
+ } else if (IS_GEN(i915, 6) &&
+ (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
+ DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
+ GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
+ rc6vids &= 0xffff00;
+ rc6vids |= GEN6_ENCODE_RC6_VID(450);
+ ret = sandybridge_pcode_write(i915, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
+ if (ret)
+ DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
+ }
+}
+
+/* Check that the pcbr address is not empty. */
+static int chv_rc6_init(struct intel_rc6 *rc6)
+{
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+ resource_size_t pctx_paddr, paddr;
+ resource_size_t pctx_size = 32 * SZ_1K;
+ u32 pcbr;
+
+ pcbr = intel_uncore_read(uncore, VLV_PCBR);
+ if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
+ DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
+ paddr = rc6_to_i915(rc6)->dsm.end + 1 - pctx_size;
+ GEM_BUG_ON(paddr > U32_MAX);
+
+ pctx_paddr = (paddr & ~4095);
+ intel_uncore_write(uncore, VLV_PCBR, pctx_paddr);
+ }
+
+ return 0;
+}
+
+static int vlv_rc6_init(struct intel_rc6 *rc6)
+{
+ struct drm_i915_private *i915 = rc6_to_i915(rc6);
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+ struct drm_i915_gem_object *pctx;
+ resource_size_t pctx_paddr;
+ resource_size_t pctx_size = 24 * SZ_1K;
+ u32 pcbr;
+
+ pcbr = intel_uncore_read(uncore, VLV_PCBR);
+ if (pcbr) {
+ /* BIOS set it up already, grab the pre-alloc'd space */
+ resource_size_t pcbr_offset;
+
+ pcbr_offset = (pcbr & ~4095) - i915->dsm.start;
+ pctx = i915_gem_object_create_stolen_for_preallocated(i915,
+ pcbr_offset,
+ I915_GTT_OFFSET_NONE,
+ pctx_size);
+ if (IS_ERR(pctx))
+ return PTR_ERR(pctx);
+
+ goto out;
+ }
+
+ DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
+
+ /*
+ * From the Gunit register HAS:
+ * The Gfx driver is expected to program this register and ensure
+ * proper allocation within Gfx stolen memory. For example, this
+ * register should be programmed such than the PCBR range does not
+ * overlap with other ranges, such as the frame buffer, protected
+ * memory, or any other relevant ranges.
+ */
+ pctx = i915_gem_object_create_stolen(i915, pctx_size);
+ if (IS_ERR(pctx)) {
+ DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
+ return PTR_ERR(pctx);
+ }
+
+ GEM_BUG_ON(range_overflows_t(u64,
+ i915->dsm.start,
+ pctx->stolen->start,
+ U32_MAX));
+ pctx_paddr = i915->dsm.start + pctx->stolen->start;
+ intel_uncore_write(uncore, VLV_PCBR, pctx_paddr);
+
+out:
+ rc6->pctx = pctx;
+ return 0;
+}
+
+static void chv_rc6_enable(struct intel_rc6 *rc6)
+{
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /* 2a: Program RC6 thresholds.*/
+ set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
+ set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
+ set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
+
+ for_each_engine(engine, rc6_to_gt(rc6), id)
+ set(uncore, RING_MAX_IDLE(engine->mmio_base), 10);
+ set(uncore, GEN6_RC_SLEEP, 0);
+
+ /* TO threshold set to 500 us (0x186 * 1.28 us) */
+ set(uncore, GEN6_RC6_THRESHOLD, 0x186);
+
+ /* Allows RC6 residency counter to work */
+ set(uncore, VLV_COUNTER_CONTROL,
+ _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
+ VLV_MEDIA_RC6_COUNT_EN |
+ VLV_RENDER_RC6_COUNT_EN));
+
+ /* 3: Enable RC6 */
+ rc6->ctl_enable = GEN7_RC_CTL_TO_MODE;
+}
+
+static void vlv_rc6_enable(struct intel_rc6 *rc6)
+{
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
+ set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000);
+ set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25);
+
+ for_each_engine(engine, rc6_to_gt(rc6), id)
+ set(uncore, RING_MAX_IDLE(engine->mmio_base), 10);
+
+ set(uncore, GEN6_RC6_THRESHOLD, 0x557);
+
+ /* Allows RC6 residency counter to work */
+ set(uncore, VLV_COUNTER_CONTROL,
+ _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
+ VLV_MEDIA_RC0_COUNT_EN |
+ VLV_RENDER_RC0_COUNT_EN |
+ VLV_MEDIA_RC6_COUNT_EN |
+ VLV_RENDER_RC6_COUNT_EN));
+
+ rc6->ctl_enable =
+ GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
+}
+
+static bool bxt_check_bios_rc6_setup(struct intel_rc6 *rc6)
+{
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+ struct drm_i915_private *i915 = rc6_to_i915(rc6);
+ u32 rc6_ctx_base, rc_ctl, rc_sw_target;
+ bool enable_rc6 = true;
+
+ rc_ctl = intel_uncore_read(uncore, GEN6_RC_CONTROL);
+ rc_sw_target = intel_uncore_read(uncore, GEN6_RC_STATE);
+ rc_sw_target &= RC_SW_TARGET_STATE_MASK;
+ rc_sw_target >>= RC_SW_TARGET_STATE_SHIFT;
+ DRM_DEBUG_DRIVER("BIOS enabled RC states: "
+ "HW_CTRL %s HW_RC6 %s SW_TARGET_STATE %x\n",
+ onoff(rc_ctl & GEN6_RC_CTL_HW_ENABLE),
+ onoff(rc_ctl & GEN6_RC_CTL_RC6_ENABLE),
+ rc_sw_target);
+
+ if (!(intel_uncore_read(uncore, RC6_LOCATION) & RC6_CTX_IN_DRAM)) {
+ DRM_DEBUG_DRIVER("RC6 Base location not set properly.\n");
+ enable_rc6 = false;
+ }
+
+ /*
+ * The exact context size is not known for BXT, so assume a page size
+ * for this check.
+ */
+ rc6_ctx_base =
+ intel_uncore_read(uncore, RC6_CTX_BASE) & RC6_CTX_BASE_MASK;
+ if (!(rc6_ctx_base >= i915->dsm_reserved.start &&
+ rc6_ctx_base + PAGE_SIZE < i915->dsm_reserved.end)) {
+ DRM_DEBUG_DRIVER("RC6 Base address not as expected.\n");
+ enable_rc6 = false;
+ }
+
+ if (!((intel_uncore_read(uncore, PWRCTX_MAXCNT_RCSUNIT) & IDLE_TIME_MASK) > 1 &&
+ (intel_uncore_read(uncore, PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1 &&
+ (intel_uncore_read(uncore, PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1 &&
+ (intel_uncore_read(uncore, PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1)) {
+ DRM_DEBUG_DRIVER("Engine Idle wait time not set properly.\n");
+ enable_rc6 = false;
+ }
+
+ if (!intel_uncore_read(uncore, GEN8_PUSHBUS_CONTROL) ||
+ !intel_uncore_read(uncore, GEN8_PUSHBUS_ENABLE) ||
+ !intel_uncore_read(uncore, GEN8_PUSHBUS_SHIFT)) {
+ DRM_DEBUG_DRIVER("Pushbus not setup properly.\n");
+ enable_rc6 = false;
+ }
+
+ if (!intel_uncore_read(uncore, GEN6_GFXPAUSE)) {
+ DRM_DEBUG_DRIVER("GFX pause not setup properly.\n");
+ enable_rc6 = false;
+ }
+
+ if (!intel_uncore_read(uncore, GEN8_MISC_CTRL0)) {
+ DRM_DEBUG_DRIVER("GPM control not setup properly.\n");
+ enable_rc6 = false;
+ }
+
+ return enable_rc6;
+}
+
+static bool rc6_supported(struct intel_rc6 *rc6)
+{
+ struct drm_i915_private *i915 = rc6_to_i915(rc6);
+
+ if (!HAS_RC6(i915))
+ return false;
+
+ if (intel_vgpu_active(i915))
+ return false;
+
+ if (is_mock_gt(rc6_to_gt(rc6)))
+ return false;
+
+ if (IS_GEN9_LP(i915) && !bxt_check_bios_rc6_setup(rc6)) {
+ dev_notice(i915->drm.dev,
+ "RC6 and powersaving disabled by BIOS\n");
+ return false;
+ }
+
+ return true;
+}
+
+static void rpm_get(struct intel_rc6 *rc6)
+{
+ GEM_BUG_ON(rc6->wakeref);
+ pm_runtime_get_sync(&rc6_to_i915(rc6)->drm.pdev->dev);
+ rc6->wakeref = true;
+}
+
+static void rpm_put(struct intel_rc6 *rc6)
+{
+ GEM_BUG_ON(!rc6->wakeref);
+ pm_runtime_put(&rc6_to_i915(rc6)->drm.pdev->dev);
+ rc6->wakeref = false;
+}
+
+static bool pctx_corrupted(struct intel_rc6 *rc6)
+{
+ struct drm_i915_private *i915 = rc6_to_i915(rc6);
+
+ if (!NEEDS_RC6_CTX_CORRUPTION_WA(i915))
+ return false;
+
+ if (intel_uncore_read(rc6_to_uncore(rc6), GEN8_RC6_CTX_INFO))
+ return false;
+
+ dev_notice(i915->drm.dev,
+ "RC6 context corruption, disabling runtime power management\n");
+ return true;
+}
+
+static void __intel_rc6_disable(struct intel_rc6 *rc6)
+{
+ struct drm_i915_private *i915 = rc6_to_i915(rc6);
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
+ if (INTEL_GEN(i915) >= 9)
+ set(uncore, GEN9_PG_ENABLE, 0);
+ set(uncore, GEN6_RC_CONTROL, 0);
+ set(uncore, GEN6_RC_STATE, 0);
+ intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
+}
+
+void intel_rc6_init(struct intel_rc6 *rc6)
+{
+ struct drm_i915_private *i915 = rc6_to_i915(rc6);
+ int err;
+
+ /* Disable runtime-pm until we can save the GPU state with rc6 pctx */
+ rpm_get(rc6);
+
+ if (!rc6_supported(rc6))
+ return;
+
+ if (IS_CHERRYVIEW(i915))
+ err = chv_rc6_init(rc6);
+ else if (IS_VALLEYVIEW(i915))
+ err = vlv_rc6_init(rc6);
+ else
+ err = 0;
+
+ /* Sanitize rc6, ensure it is disabled before we are ready. */
+ __intel_rc6_disable(rc6);
+
+ rc6->supported = err == 0;
+}
+
+void intel_rc6_sanitize(struct intel_rc6 *rc6)
+{
+ if (rc6->enabled) { /* unbalanced suspend/resume */
+ rpm_get(rc6);
+ rc6->enabled = false;
+ }
+
+ if (rc6->supported)
+ __intel_rc6_disable(rc6);
+}
+
+void intel_rc6_enable(struct intel_rc6 *rc6)
+{
+ struct drm_i915_private *i915 = rc6_to_i915(rc6);
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+
+ if (!rc6->supported)
+ return;
+
+ GEM_BUG_ON(rc6->enabled);
+
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
+
+ if (IS_CHERRYVIEW(i915))
+ chv_rc6_enable(rc6);
+ else if (IS_VALLEYVIEW(i915))
+ vlv_rc6_enable(rc6);
+ else if (INTEL_GEN(i915) >= 11)
+ gen11_rc6_enable(rc6);
+ else if (INTEL_GEN(i915) >= 9)
+ gen9_rc6_enable(rc6);
+ else if (IS_BROADWELL(i915))
+ gen8_rc6_enable(rc6);
+ else if (INTEL_GEN(i915) >= 6)
+ gen6_rc6_enable(rc6);
+
+ rc6->manual = rc6->ctl_enable & GEN6_RC_CTL_RC6_ENABLE;
+ if (NEEDS_RC6_CTX_CORRUPTION_WA(i915))
+ rc6->ctl_enable = 0;
+
+ intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
+
+ if (unlikely(pctx_corrupted(rc6)))
+ return;
+
+ /* rc6 is ready, runtime-pm is go! */
+ rpm_put(rc6);
+ rc6->enabled = true;
+}
+
+void intel_rc6_unpark(struct intel_rc6 *rc6)
+{
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+
+ if (!rc6->enabled)
+ return;
+
+ /* Restore HW timers for automatic RC6 entry while busy */
+ set(uncore, GEN6_RC_CONTROL, rc6->ctl_enable);
+}
+
+void intel_rc6_park(struct intel_rc6 *rc6)
+{
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+
+ if (!rc6->enabled)
+ return;
+
+ if (unlikely(pctx_corrupted(rc6))) {
+ intel_rc6_disable(rc6);
+ return;
+ }
+
+ if (!rc6->manual)
+ return;
+
+ /* Turn off the HW timers and go directly to rc6 */
+ set(uncore, GEN6_RC_CONTROL, GEN6_RC_CTL_RC6_ENABLE);
+ set(uncore, GEN6_RC_STATE, 0x4 << RC_SW_TARGET_STATE_SHIFT);
+}
+
+void intel_rc6_disable(struct intel_rc6 *rc6)
+{
+ if (!rc6->enabled)
+ return;
+
+ rpm_get(rc6);
+ rc6->enabled = false;
+
+ __intel_rc6_disable(rc6);
+}
+
+void intel_rc6_fini(struct intel_rc6 *rc6)
+{
+ struct drm_i915_gem_object *pctx;
+
+ intel_rc6_disable(rc6);
+
+ pctx = fetch_and_zero(&rc6->pctx);
+ if (pctx)
+ i915_gem_object_put(pctx);
+
+ if (rc6->wakeref)
+ rpm_put(rc6);
+}
+
+static u64 vlv_residency_raw(struct intel_uncore *uncore, const i915_reg_t reg)
+{
+ u32 lower, upper, tmp;
+ int loop = 2;
+
+ /*
+ * The register accessed do not need forcewake. We borrow
+ * uncore lock to prevent concurrent access to range reg.
+ */
+ lockdep_assert_held(&uncore->lock);
+
+ /*
+ * vlv and chv residency counters are 40 bits in width.
+ * With a control bit, we can choose between upper or lower
+ * 32bit window into this counter.
+ *
+ * Although we always use the counter in high-range mode elsewhere,
+ * userspace may attempt to read the value before rc6 is initialised,
+ * before we have set the default VLV_COUNTER_CONTROL value. So always
+ * set the high bit to be safe.
+ */
+ set(uncore, VLV_COUNTER_CONTROL,
+ _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH));
+ upper = intel_uncore_read_fw(uncore, reg);
+ do {
+ tmp = upper;
+
+ set(uncore, VLV_COUNTER_CONTROL,
+ _MASKED_BIT_DISABLE(VLV_COUNT_RANGE_HIGH));
+ lower = intel_uncore_read_fw(uncore, reg);
+
+ set(uncore, VLV_COUNTER_CONTROL,
+ _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH));
+ upper = intel_uncore_read_fw(uncore, reg);
+ } while (upper != tmp && --loop);
+
+ /*
+ * Everywhere else we always use VLV_COUNTER_CONTROL with the
+ * VLV_COUNT_RANGE_HIGH bit set - so it is safe to leave it set
+ * now.
+ */
+
+ return lower | (u64)upper << 8;
+}
+
+u64 intel_rc6_residency_ns(struct intel_rc6 *rc6, const i915_reg_t reg)
+{
+ struct drm_i915_private *i915 = rc6_to_i915(rc6);
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+ u64 time_hw, prev_hw, overflow_hw;
+ unsigned int fw_domains;
+ unsigned long flags;
+ unsigned int i;
+ u32 mul, div;
+
+ if (!rc6->supported)
+ return 0;
+
+ /*
+ * Store previous hw counter values for counter wrap-around handling.
+ *
+ * There are only four interesting registers and they live next to each
+ * other so we can use the relative address, compared to the smallest
+ * one as the index into driver storage.
+ */
+ i = (i915_mmio_reg_offset(reg) -
+ i915_mmio_reg_offset(GEN6_GT_GFX_RC6_LOCKED)) / sizeof(u32);
+ if (WARN_ON_ONCE(i >= ARRAY_SIZE(rc6->cur_residency)))
+ return 0;
+
+ fw_domains = intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ);
+
+ spin_lock_irqsave(&uncore->lock, flags);
+ intel_uncore_forcewake_get__locked(uncore, fw_domains);
+
+ /* On VLV and CHV, residency time is in CZ units rather than 1.28us */
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+ mul = 1000000;
+ div = i915->czclk_freq;
+ overflow_hw = BIT_ULL(40);
+ time_hw = vlv_residency_raw(uncore, reg);
+ } else {
+ /* 833.33ns units on Gen9LP, 1.28us elsewhere. */
+ if (IS_GEN9_LP(i915)) {
+ mul = 10000;
+ div = 12;
+ } else {
+ mul = 1280;
+ div = 1;
+ }
+
+ overflow_hw = BIT_ULL(32);
+ time_hw = intel_uncore_read_fw(uncore, reg);
+ }
+
+ /*
+ * Counter wrap handling.
+ *
+ * But relying on a sufficient frequency of queries otherwise counters
+ * can still wrap.
+ */
+ prev_hw = rc6->prev_hw_residency[i];
+ rc6->prev_hw_residency[i] = time_hw;
+
+ /* RC6 delta from last sample. */
+ if (time_hw >= prev_hw)
+ time_hw -= prev_hw;
+ else
+ time_hw += overflow_hw - prev_hw;
+
+ /* Add delta to RC6 extended raw driver copy. */
+ time_hw += rc6->cur_residency[i];
+ rc6->cur_residency[i] = time_hw;
+
+ intel_uncore_forcewake_put__locked(uncore, fw_domains);
+ spin_unlock_irqrestore(&uncore->lock, flags);
+
+ return mul_u64_u32_div(time_hw, mul, div);
+}
+
+u64 intel_rc6_residency_us(struct intel_rc6 *rc6, i915_reg_t reg)
+{
+ return DIV_ROUND_UP_ULL(intel_rc6_residency_ns(rc6, reg), 1000);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_rc6.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.h b/drivers/gpu/drm/i915/gt/intel_rc6.h
new file mode 100644
index 000000000000..9f0f23fca8af
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.h
@@ -0,0 +1,28 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_RC6_H
+#define INTEL_RC6_H
+
+#include "i915_reg.h"
+
+struct intel_engine_cs;
+struct intel_rc6;
+
+void intel_rc6_init(struct intel_rc6 *rc6);
+void intel_rc6_fini(struct intel_rc6 *rc6);
+
+void intel_rc6_unpark(struct intel_rc6 *rc6);
+void intel_rc6_park(struct intel_rc6 *rc6);
+
+void intel_rc6_sanitize(struct intel_rc6 *rc6);
+void intel_rc6_enable(struct intel_rc6 *rc6);
+void intel_rc6_disable(struct intel_rc6 *rc6);
+
+u64 intel_rc6_residency_ns(struct intel_rc6 *rc6, i915_reg_t reg);
+u64 intel_rc6_residency_us(struct intel_rc6 *rc6, i915_reg_t reg);
+
+#endif /* INTEL_RC6_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6_types.h b/drivers/gpu/drm/i915/gt/intel_rc6_types.h
new file mode 100644
index 000000000000..bfbb623f7a4f
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_rc6_types.h
@@ -0,0 +1,31 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_RC6_TYPES_H
+#define INTEL_RC6_TYPES_H
+
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include "intel_engine_types.h"
+
+struct drm_i915_gem_object;
+
+struct intel_rc6 {
+ u64 prev_hw_residency[4];
+ u64 cur_residency[4];
+
+ u32 ctl_enable;
+
+ struct drm_i915_gem_object *pctx;
+
+ bool supported : 1;
+ bool enabled : 1;
+ bool manual : 1;
+ bool wakeref : 1;
+};
+
+#endif /* INTEL_RC6_TYPES_H */
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/gt/intel_renderstate.c
index 4ee032072d4f..5954ecc3207f 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/gt/intel_renderstate.c
@@ -26,23 +26,13 @@
*/
#include "i915_drv.h"
-#include "i915_gem_render_state.h"
#include "intel_renderstate.h"
-
-struct intel_render_state {
- const struct intel_renderstate_rodata *rodata;
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- u32 batch_offset;
- u32 batch_size;
- u32 aux_offset;
- u32 aux_size;
-};
+#include "intel_ring.h"
static const struct intel_renderstate_rodata *
render_state_get_rodata(const struct intel_engine_cs *engine)
{
- if (engine->id != RCS0)
+ if (engine->class != RENDER_CLASS)
return NULL;
switch (INTEL_GEN(engine->i915)) {
@@ -75,7 +65,7 @@ render_state_get_rodata(const struct intel_engine_cs *engine)
(batch)[(i)++] = (val); \
} while(0)
-static int render_state_setup(struct intel_render_state *so,
+static int render_state_setup(struct intel_renderstate *so,
struct drm_i915_private *i915)
{
const struct intel_renderstate_rodata *rodata = so->rodata;
@@ -84,11 +74,11 @@ static int render_state_setup(struct intel_render_state *so,
u32 *d;
int ret;
- ret = i915_gem_object_prepare_write(so->obj, &needs_clflush);
+ ret = i915_gem_object_prepare_write(so->vma->obj, &needs_clflush);
if (ret)
return ret;
- d = kmap_atomic(i915_gem_object_get_dirty_page(so->obj, 0));
+ d = kmap_atomic(i915_gem_object_get_dirty_page(so->vma->obj, 0));
while (i < rodata->batch_items) {
u32 s = rodata->batch[i];
@@ -166,7 +156,7 @@ static int render_state_setup(struct intel_render_state *so,
ret = 0;
out:
- i915_gem_object_finish_access(so->obj);
+ i915_gem_object_finish_access(so->vma->obj);
return ret;
err:
@@ -177,59 +167,84 @@ err:
#undef OUT_BATCH
-int i915_gem_render_state_emit(struct i915_request *rq)
+int intel_renderstate_init(struct intel_renderstate *so,
+ struct intel_engine_cs *engine)
{
- struct intel_engine_cs *engine = rq->engine;
- struct intel_render_state so = {}; /* keep the compiler happy */
+ struct drm_i915_gem_object *obj;
int err;
- so.rodata = render_state_get_rodata(engine);
- if (!so.rodata)
+ memset(so, 0, sizeof(*so));
+
+ so->rodata = render_state_get_rodata(engine);
+ if (!so->rodata)
return 0;
- if (so.rodata->batch_items * 4 > PAGE_SIZE)
+ if (so->rodata->batch_items * 4 > PAGE_SIZE)
return -EINVAL;
- so.obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
- if (IS_ERR(so.obj))
- return PTR_ERR(so.obj);
+ obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
- so.vma = i915_vma_instance(so.obj, &engine->i915->ggtt.vm, NULL);
- if (IS_ERR(so.vma)) {
- err = PTR_ERR(so.vma);
+ so->vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
+ if (IS_ERR(so->vma)) {
+ err = PTR_ERR(so->vma);
goto err_obj;
}
- err = i915_vma_pin(so.vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
+ err = i915_vma_pin(so->vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
if (err)
goto err_vma;
- err = render_state_setup(&so, rq->i915);
+ err = render_state_setup(so, engine->i915);
if (err)
goto err_unpin;
+ return 0;
+
+err_unpin:
+ i915_vma_unpin(so->vma);
+err_vma:
+ i915_vma_close(so->vma);
+err_obj:
+ i915_gem_object_put(obj);
+ so->vma = NULL;
+ return err;
+}
+
+int intel_renderstate_emit(struct intel_renderstate *so,
+ struct i915_request *rq)
+{
+ struct intel_engine_cs *engine = rq->engine;
+ int err;
+
+ if (!so->vma)
+ return 0;
+
err = engine->emit_bb_start(rq,
- so.batch_offset, so.batch_size,
+ so->batch_offset, so->batch_size,
I915_DISPATCH_SECURE);
if (err)
- goto err_unpin;
+ return err;
- if (so.aux_size > 8) {
+ if (so->aux_size > 8) {
err = engine->emit_bb_start(rq,
- so.aux_offset, so.aux_size,
+ so->aux_offset, so->aux_size,
I915_DISPATCH_SECURE);
if (err)
- goto err_unpin;
+ return err;
}
- i915_vma_lock(so.vma);
- err = i915_vma_move_to_active(so.vma, rq, 0);
- i915_vma_unlock(so.vma);
-err_unpin:
- i915_vma_unpin(so.vma);
-err_vma:
- i915_vma_close(so.vma);
-err_obj:
- i915_gem_object_put(so.obj);
+ i915_vma_lock(so->vma);
+ err = i915_request_await_object(rq, so->vma->obj, false);
+ if (err == 0)
+ err = i915_vma_move_to_active(so->vma, rq, 0);
+ i915_vma_unlock(so->vma);
+
return err;
}
+
+void intel_renderstate_fini(struct intel_renderstate *so)
+{
+ i915_vma_unpin_and_release(&so->vma, 0);
+}
diff --git a/drivers/gpu/drm/i915/intel_renderstate.h b/drivers/gpu/drm/i915/gt/intel_renderstate.h
index 08f6fea05a2c..5700be69a05a 100644
--- a/drivers/gpu/drm/i915/intel_renderstate.h
+++ b/drivers/gpu/drm/i915/gt/intel_renderstate.h
@@ -21,11 +21,15 @@
* DEALINGS IN THE SOFTWARE.
*/
-#ifndef _INTEL_RENDERSTATE_H
-#define _INTEL_RENDERSTATE_H
+#ifndef _INTEL_RENDERSTATE_H_
+#define _INTEL_RENDERSTATE_H_
#include <linux/types.h>
+struct i915_request;
+struct intel_engine_cs;
+struct i915_vma;
+
struct intel_renderstate_rodata {
const u32 *reloc;
const u32 *batch;
@@ -44,4 +48,19 @@ extern const struct intel_renderstate_rodata gen7_null_state;
extern const struct intel_renderstate_rodata gen8_null_state;
extern const struct intel_renderstate_rodata gen9_null_state;
-#endif /* INTEL_RENDERSTATE_H */
+struct intel_renderstate {
+ const struct intel_renderstate_rodata *rodata;
+ struct i915_vma *vma;
+ u32 batch_offset;
+ u32 batch_size;
+ u32 aux_offset;
+ u32 aux_size;
+};
+
+int intel_renderstate_init(struct intel_renderstate *so,
+ struct intel_engine_cs *engine);
+int intel_renderstate_emit(struct intel_renderstate *so,
+ struct i915_request *rq);
+void intel_renderstate_fini(struct intel_renderstate *so);
+
+#endif /* _INTEL_RENDERSTATE_H_ */
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index 3f907701ef4d..beee0cf89bce 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -7,6 +7,7 @@
#include <linux/sched/mm.h>
#include <linux/stop_machine.h>
+#include "display/intel_display_types.h"
#include "display/intel_overlay.h"
#include "gem/i915_gem_context.h"
@@ -15,26 +16,18 @@
#include "i915_gpu_error.h"
#include "i915_irq.h"
#include "intel_engine_pm.h"
+#include "intel_gt.h"
#include "intel_gt_pm.h"
#include "intel_reset.h"
-#include "intel_guc.h"
+#include "uc/intel_guc.h"
+#include "uc/intel_guc_submission.h"
#define RESET_MAX_RETRIES 3
/* XXX How to handle concurrent GGTT updates using tiling registers? */
#define RESET_UNDER_STOP_MACHINE 0
-static void rmw_set(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
-{
- intel_uncore_rmw(uncore, reg, 0, set);
-}
-
-static void rmw_clear(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
-{
- intel_uncore_rmw(uncore, reg, clr, 0);
-}
-
static void rmw_set_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
{
intel_uncore_rmw_fw(uncore, reg, 0, set);
@@ -48,28 +41,29 @@ static void rmw_clear_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
static void engine_skip_context(struct i915_request *rq)
{
struct intel_engine_cs *engine = rq->engine;
- struct i915_gem_context *hung_ctx = rq->gem_context;
-
- lockdep_assert_held(&engine->active.lock);
+ struct intel_context *hung_ctx = rq->context;
if (!i915_request_is_active(rq))
return;
+ lockdep_assert_held(&engine->active.lock);
list_for_each_entry_continue(rq, &engine->active.requests, sched.link)
- if (rq->gem_context == hung_ctx)
+ if (rq->context == hung_ctx)
i915_request_skip(rq, -EIO);
}
-static void client_mark_guilty(struct drm_i915_file_private *file_priv,
- const struct i915_gem_context *ctx)
+static void client_mark_guilty(struct i915_gem_context *ctx, bool banned)
{
- unsigned int score;
+ struct drm_i915_file_private *file_priv = ctx->file_priv;
unsigned long prev_hang;
+ unsigned int score;
+
+ if (IS_ERR_OR_NULL(file_priv))
+ return;
- if (i915_gem_context_is_banned(ctx))
+ score = 0;
+ if (banned)
score = I915_CLIENT_SCORE_CONTEXT_BAN;
- else
- score = 0;
prev_hang = xchg(&file_priv->hang_timestamp, jiffies);
if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES))
@@ -84,17 +78,38 @@ static void client_mark_guilty(struct drm_i915_file_private *file_priv,
}
}
-static bool context_mark_guilty(struct i915_gem_context *ctx)
+static bool mark_guilty(struct i915_request *rq)
{
+ struct i915_gem_context *ctx;
unsigned long prev_hang;
bool banned;
int i;
+ rcu_read_lock();
+ ctx = rcu_dereference(rq->context->gem_context);
+ if (ctx && !kref_get_unless_zero(&ctx->ref))
+ ctx = NULL;
+ rcu_read_unlock();
+ if (!ctx)
+ return false;
+
+ if (i915_gem_context_is_closed(ctx)) {
+ intel_context_set_banned(rq->context);
+ banned = true;
+ goto out;
+ }
+
atomic_inc(&ctx->guilty_count);
/* Cool contexts are too cool to be banned! (Used for reset testing.) */
- if (!i915_gem_context_is_bannable(ctx))
- return false;
+ if (!i915_gem_context_is_bannable(ctx)) {
+ banned = false;
+ goto out;
+ }
+
+ dev_notice(ctx->i915->drm.dev,
+ "%s context reset due to GPU hang\n",
+ ctx->name);
/* Record the timestamp for the last N hangs */
prev_hang = ctx->hang_timestamp[0];
@@ -109,81 +124,43 @@ static bool context_mark_guilty(struct i915_gem_context *ctx)
if (banned) {
DRM_DEBUG_DRIVER("context %s: guilty %d, banned\n",
ctx->name, atomic_read(&ctx->guilty_count));
- i915_gem_context_set_banned(ctx);
+ intel_context_set_banned(rq->context);
}
- if (!IS_ERR_OR_NULL(ctx->file_priv))
- client_mark_guilty(ctx->file_priv, ctx);
+ client_mark_guilty(ctx, banned);
+out:
+ i915_gem_context_put(ctx);
return banned;
}
-static void context_mark_innocent(struct i915_gem_context *ctx)
+static void mark_innocent(struct i915_request *rq)
{
- atomic_inc(&ctx->active_count);
+ struct i915_gem_context *ctx;
+
+ rcu_read_lock();
+ ctx = rcu_dereference(rq->context->gem_context);
+ if (ctx)
+ atomic_inc(&ctx->active_count);
+ rcu_read_unlock();
}
-void i915_reset_request(struct i915_request *rq, bool guilty)
+void __i915_request_reset(struct i915_request *rq, bool guilty)
{
- GEM_TRACE("%s rq=%llx:%lld, guilty? %s\n",
- rq->engine->name,
- rq->fence.context,
- rq->fence.seqno,
- yesno(guilty));
+ RQ_TRACE(rq, "guilty? %s\n", yesno(guilty));
- lockdep_assert_held(&rq->engine->active.lock);
GEM_BUG_ON(i915_request_completed(rq));
+ rcu_read_lock(); /* protect the GEM context */
if (guilty) {
i915_request_skip(rq, -EIO);
- if (context_mark_guilty(rq->gem_context))
+ if (mark_guilty(rq))
engine_skip_context(rq);
} else {
dma_fence_set_error(&rq->fence, -EAGAIN);
- context_mark_innocent(rq->gem_context);
+ mark_innocent(rq);
}
-}
-
-static void gen3_stop_engine(struct intel_engine_cs *engine)
-{
- struct intel_uncore *uncore = engine->uncore;
- const u32 base = engine->mmio_base;
-
- GEM_TRACE("%s\n", engine->name);
-
- if (intel_engine_stop_cs(engine))
- GEM_TRACE("%s: timed out on STOP_RING\n", engine->name);
-
- intel_uncore_write_fw(uncore,
- RING_HEAD(base),
- intel_uncore_read_fw(uncore, RING_TAIL(base)));
- intel_uncore_posting_read_fw(uncore, RING_HEAD(base)); /* paranoia */
-
- intel_uncore_write_fw(uncore, RING_HEAD(base), 0);
- intel_uncore_write_fw(uncore, RING_TAIL(base), 0);
- intel_uncore_posting_read_fw(uncore, RING_TAIL(base));
-
- /* The ring must be empty before it is disabled */
- intel_uncore_write_fw(uncore, RING_CTL(base), 0);
-
- /* Check acts as a post */
- if (intel_uncore_read_fw(uncore, RING_HEAD(base)))
- GEM_TRACE("%s: ring head [%x] not parked\n",
- engine->name,
- intel_uncore_read_fw(uncore, RING_HEAD(base)));
-}
-
-static void i915_stop_engines(struct drm_i915_private *i915,
- intel_engine_mask_t engine_mask)
-{
- struct intel_engine_cs *engine;
- intel_engine_mask_t tmp;
-
- if (INTEL_GEN(i915) < 3)
- return;
-
- for_each_engine_masked(engine, i915, engine_mask, tmp)
- gen3_stop_engine(engine);
+ rcu_read_unlock();
}
static bool i915_in_reset(struct pci_dev *pdev)
@@ -194,11 +171,11 @@ static bool i915_in_reset(struct pci_dev *pdev)
return gdrst & GRDOM_RESET_STATUS;
}
-static int i915_do_reset(struct drm_i915_private *i915,
+static int i915_do_reset(struct intel_gt *gt,
intel_engine_mask_t engine_mask,
unsigned int retry)
{
- struct pci_dev *pdev = i915->drm.pdev;
+ struct pci_dev *pdev = gt->i915->drm.pdev;
int err;
/* Assert reset for at least 20 usec, and wait for acknowledgement. */
@@ -223,22 +200,22 @@ static bool g4x_reset_complete(struct pci_dev *pdev)
return (gdrst & GRDOM_RESET_ENABLE) == 0;
}
-static int g33_do_reset(struct drm_i915_private *i915,
+static int g33_do_reset(struct intel_gt *gt,
intel_engine_mask_t engine_mask,
unsigned int retry)
{
- struct pci_dev *pdev = i915->drm.pdev;
+ struct pci_dev *pdev = gt->i915->drm.pdev;
pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
return wait_for_atomic(g4x_reset_complete(pdev), 50);
}
-static int g4x_do_reset(struct drm_i915_private *i915,
+static int g4x_do_reset(struct intel_gt *gt,
intel_engine_mask_t engine_mask,
unsigned int retry)
{
- struct pci_dev *pdev = i915->drm.pdev;
- struct intel_uncore *uncore = &i915->uncore;
+ struct pci_dev *pdev = gt->i915->drm.pdev;
+ struct intel_uncore *uncore = gt->uncore;
int ret;
/* WaVcpClkGateDisableForMediaReset:ctg,elk */
@@ -270,11 +247,10 @@ out:
return ret;
}
-static int ironlake_do_reset(struct drm_i915_private *i915,
- intel_engine_mask_t engine_mask,
- unsigned int retry)
+static int ilk_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask,
+ unsigned int retry)
{
- struct intel_uncore *uncore = &i915->uncore;
+ struct intel_uncore *uncore = gt->uncore;
int ret;
intel_uncore_write_fw(uncore, ILK_GDSR,
@@ -306,10 +282,9 @@ out:
}
/* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
-static int gen6_hw_domain_reset(struct drm_i915_private *i915,
- u32 hw_domain_mask)
+static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask)
{
- struct intel_uncore *uncore = &i915->uncore;
+ struct intel_uncore *uncore = gt->uncore;
int err;
/*
@@ -331,18 +306,18 @@ static int gen6_hw_domain_reset(struct drm_i915_private *i915,
return err;
}
-static int gen6_reset_engines(struct drm_i915_private *i915,
+static int gen6_reset_engines(struct intel_gt *gt,
intel_engine_mask_t engine_mask,
unsigned int retry)
{
- struct intel_engine_cs *engine;
- const u32 hw_engine_mask[] = {
+ static const u32 hw_engine_mask[] = {
[RCS0] = GEN6_GRDOM_RENDER,
[BCS0] = GEN6_GRDOM_BLT,
[VCS0] = GEN6_GRDOM_MEDIA,
[VCS1] = GEN8_GRDOM_MEDIA2,
[VECS0] = GEN6_GRDOM_VECS,
};
+ struct intel_engine_cs *engine;
u32 hw_mask;
if (engine_mask == ALL_ENGINES) {
@@ -351,16 +326,16 @@ static int gen6_reset_engines(struct drm_i915_private *i915,
intel_engine_mask_t tmp;
hw_mask = 0;
- for_each_engine_masked(engine, i915, engine_mask, tmp) {
+ for_each_engine_masked(engine, gt, engine_mask, tmp) {
GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
hw_mask |= hw_engine_mask[engine->id];
}
}
- return gen6_hw_domain_reset(i915, hw_mask);
+ return gen6_hw_domain_reset(gt, hw_mask);
}
-static u32 gen11_lock_sfc(struct intel_engine_cs *engine)
+static int gen11_lock_sfc(struct intel_engine_cs *engine, u32 *hw_mask)
{
struct intel_uncore *uncore = engine->uncore;
u8 vdbox_sfc_access = RUNTIME_INFO(engine->i915)->vdbox_sfc_access;
@@ -369,6 +344,7 @@ static u32 gen11_lock_sfc(struct intel_engine_cs *engine)
i915_reg_t sfc_usage;
u32 sfc_usage_bit;
u32 sfc_reset_bit;
+ int ret;
switch (engine->class) {
case VIDEO_DECODE_CLASS:
@@ -403,27 +379,33 @@ static u32 gen11_lock_sfc(struct intel_engine_cs *engine)
}
/*
- * Tell the engine that a software reset is going to happen. The engine
- * will then try to force lock the SFC (if currently locked, it will
- * remain so until we tell the engine it is safe to unlock; if currently
- * unlocked, it will ignore this and all new lock requests). If SFC
- * ends up being locked to the engine we want to reset, we have to reset
- * it as well (we will unlock it once the reset sequence is completed).
+ * If the engine is using a SFC, tell the engine that a software reset
+ * is going to happen. The engine will then try to force lock the SFC.
+ * If SFC ends up being locked to the engine we want to reset, we have
+ * to reset it as well (we will unlock it once the reset sequence is
+ * completed).
*/
+ if (!(intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit))
+ return 0;
+
rmw_set_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
- if (__intel_wait_for_register_fw(uncore,
- sfc_forced_lock_ack,
- sfc_forced_lock_ack_bit,
- sfc_forced_lock_ack_bit,
- 1000, 0, NULL)) {
- DRM_DEBUG_DRIVER("Wait for SFC forced lock ack failed\n");
+ ret = __intel_wait_for_register_fw(uncore,
+ sfc_forced_lock_ack,
+ sfc_forced_lock_ack_bit,
+ sfc_forced_lock_ack_bit,
+ 1000, 0, NULL);
+
+ /* Was the SFC released while we were trying to lock it? */
+ if (!(intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit))
return 0;
- }
- if (intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit)
- return sfc_reset_bit;
+ if (ret) {
+ DRM_DEBUG_DRIVER("Wait for SFC forced lock ack failed\n");
+ return ret;
+ }
+ *hw_mask |= sfc_reset_bit;
return 0;
}
@@ -455,11 +437,11 @@ static void gen11_unlock_sfc(struct intel_engine_cs *engine)
rmw_clear_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
}
-static int gen11_reset_engines(struct drm_i915_private *i915,
+static int gen11_reset_engines(struct intel_gt *gt,
intel_engine_mask_t engine_mask,
unsigned int retry)
{
- const u32 hw_engine_mask[] = {
+ static const u32 hw_engine_mask[] = {
[RCS0] = GEN11_GRDOM_RENDER,
[BCS0] = GEN11_GRDOM_BLT,
[VCS0] = GEN11_GRDOM_MEDIA,
@@ -478,17 +460,26 @@ static int gen11_reset_engines(struct drm_i915_private *i915,
hw_mask = GEN11_GRDOM_FULL;
} else {
hw_mask = 0;
- for_each_engine_masked(engine, i915, engine_mask, tmp) {
+ for_each_engine_masked(engine, gt, engine_mask, tmp) {
GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
hw_mask |= hw_engine_mask[engine->id];
- hw_mask |= gen11_lock_sfc(engine);
+ ret = gen11_lock_sfc(engine, &hw_mask);
+ if (ret)
+ goto sfc_unlock;
}
}
- ret = gen6_hw_domain_reset(i915, hw_mask);
+ ret = gen6_hw_domain_reset(gt, hw_mask);
+sfc_unlock:
+ /*
+ * We unlock the SFC based on the lock status and not the result of
+ * gen11_lock_sfc to make sure that we clean properly if something
+ * wrong happened during the lock (e.g. lock acquired after timeout
+ * expiration).
+ */
if (engine_mask != ALL_ENGINES)
- for_each_engine_masked(engine, i915, engine_mask, tmp)
+ for_each_engine_masked(engine, gt, engine_mask, tmp)
gen11_unlock_sfc(engine);
return ret;
@@ -538,7 +529,7 @@ static void gen8_engine_reset_cancel(struct intel_engine_cs *engine)
_MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
}
-static int gen8_reset_engines(struct drm_i915_private *i915,
+static int gen8_reset_engines(struct intel_gt *gt,
intel_engine_mask_t engine_mask,
unsigned int retry)
{
@@ -547,7 +538,7 @@ static int gen8_reset_engines(struct drm_i915_private *i915,
intel_engine_mask_t tmp;
int ret;
- for_each_engine_masked(engine, i915, engine_mask, tmp) {
+ for_each_engine_masked(engine, gt, engine_mask, tmp) {
ret = gen8_engine_reset_prepare(engine);
if (ret && !reset_non_ready)
goto skip_reset;
@@ -563,34 +554,45 @@ static int gen8_reset_engines(struct drm_i915_private *i915,
* We rather take context corruption instead of
* failed reset with a wedged driver/gpu. And
* active bb execution case should be covered by
- * i915_stop_engines we have before the reset.
+ * stop_engines() we have before the reset.
*/
}
- if (INTEL_GEN(i915) >= 11)
- ret = gen11_reset_engines(i915, engine_mask, retry);
+ if (INTEL_GEN(gt->i915) >= 11)
+ ret = gen11_reset_engines(gt, engine_mask, retry);
else
- ret = gen6_reset_engines(i915, engine_mask, retry);
+ ret = gen6_reset_engines(gt, engine_mask, retry);
skip_reset:
- for_each_engine_masked(engine, i915, engine_mask, tmp)
+ for_each_engine_masked(engine, gt, engine_mask, tmp)
gen8_engine_reset_cancel(engine);
return ret;
}
-typedef int (*reset_func)(struct drm_i915_private *,
+static int mock_reset(struct intel_gt *gt,
+ intel_engine_mask_t mask,
+ unsigned int retry)
+{
+ return 0;
+}
+
+typedef int (*reset_func)(struct intel_gt *,
intel_engine_mask_t engine_mask,
unsigned int retry);
-static reset_func intel_get_gpu_reset(struct drm_i915_private *i915)
+static reset_func intel_get_gpu_reset(const struct intel_gt *gt)
{
- if (INTEL_GEN(i915) >= 8)
+ struct drm_i915_private *i915 = gt->i915;
+
+ if (is_mock_gt(gt))
+ return mock_reset;
+ else if (INTEL_GEN(i915) >= 8)
return gen8_reset_engines;
else if (INTEL_GEN(i915) >= 6)
return gen6_reset_engines;
else if (INTEL_GEN(i915) >= 5)
- return ironlake_do_reset;
+ return ilk_do_reset;
else if (IS_G4X(i915))
return g4x_do_reset;
else if (IS_G33(i915) || IS_PINEVIEW(i915))
@@ -601,15 +603,14 @@ static reset_func intel_get_gpu_reset(struct drm_i915_private *i915)
return NULL;
}
-int intel_gpu_reset(struct drm_i915_private *i915,
- intel_engine_mask_t engine_mask)
+int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask)
{
const int retries = engine_mask == ALL_ENGINES ? RESET_MAX_RETRIES : 1;
reset_func reset;
int ret = -ETIMEDOUT;
int retry;
- reset = intel_get_gpu_reset(i915);
+ reset = intel_get_gpu_reset(gt);
if (!reset)
return -ENODEV;
@@ -617,59 +618,45 @@ int intel_gpu_reset(struct drm_i915_private *i915,
* If the power well sleeps during the reset, the reset
* request may be dropped and never completes (causing -EIO).
*/
- intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
for (retry = 0; ret == -ETIMEDOUT && retry < retries; retry++) {
- /*
- * We stop engines, otherwise we might get failed reset and a
- * dead gpu (on elk). Also as modern gpu as kbl can suffer
- * from system hang if batchbuffer is progressing when
- * the reset is issued, regardless of READY_TO_RESET ack.
- * Thus assume it is best to stop engines on all gens
- * where we have a gpu reset.
- *
- * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES)
- *
- * WaMediaResetMainRingCleanup:ctg,elk (presumably)
- *
- * FIXME: Wa for more modern gens needs to be validated
- */
- if (retry)
- i915_stop_engines(i915, engine_mask);
-
- GEM_TRACE("engine_mask=%x\n", engine_mask);
+ GT_TRACE(gt, "engine_mask=%x\n", engine_mask);
preempt_disable();
- ret = reset(i915, engine_mask, retry);
+ ret = reset(gt, engine_mask, retry);
preempt_enable();
}
- intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
return ret;
}
-bool intel_has_gpu_reset(struct drm_i915_private *i915)
+bool intel_has_gpu_reset(const struct intel_gt *gt)
{
if (!i915_modparams.reset)
return NULL;
- return intel_get_gpu_reset(i915);
+ return intel_get_gpu_reset(gt);
}
-bool intel_has_reset_engine(struct drm_i915_private *i915)
+bool intel_has_reset_engine(const struct intel_gt *gt)
{
- return INTEL_INFO(i915)->has_reset_engine && i915_modparams.reset >= 2;
+ if (i915_modparams.reset < 2)
+ return false;
+
+ return INTEL_INFO(gt->i915)->has_reset_engine;
}
-int intel_reset_guc(struct drm_i915_private *i915)
+int intel_reset_guc(struct intel_gt *gt)
{
u32 guc_domain =
- INTEL_GEN(i915) >= 11 ? GEN11_GRDOM_GUC : GEN9_GRDOM_GUC;
+ INTEL_GEN(gt->i915) >= 11 ? GEN11_GRDOM_GUC : GEN9_GRDOM_GUC;
int ret;
- GEM_BUG_ON(!HAS_GUC(i915));
+ GEM_BUG_ON(!HAS_GT_UC(gt->i915));
- intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
- ret = gen6_hw_domain_reset(i915, guc_domain);
- intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
+ ret = gen6_hw_domain_reset(gt, guc_domain);
+ intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
return ret;
}
@@ -688,59 +675,64 @@ static void reset_prepare_engine(struct intel_engine_cs *engine)
* GPU state upon resume, i.e. fail to restart after a reset.
*/
intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
- engine->reset.prepare(engine);
+ if (engine->reset.prepare)
+ engine->reset.prepare(engine);
}
-static void revoke_mmaps(struct drm_i915_private *i915)
+static void revoke_mmaps(struct intel_gt *gt)
{
int i;
- for (i = 0; i < i915->ggtt.num_fences; i++) {
+ for (i = 0; i < gt->ggtt->num_fences; i++) {
struct drm_vma_offset_node *node;
struct i915_vma *vma;
u64 vma_offset;
- vma = READ_ONCE(i915->ggtt.fence_regs[i].vma);
+ vma = READ_ONCE(gt->ggtt->fence_regs[i].vma);
if (!vma)
continue;
if (!i915_vma_has_userfault(vma))
continue;
- GEM_BUG_ON(vma->fence != &i915->ggtt.fence_regs[i]);
- node = &vma->obj->base.vma_node;
+ GEM_BUG_ON(vma->fence != &gt->ggtt->fence_regs[i]);
+
+ if (!vma->mmo)
+ continue;
+
+ node = &vma->mmo->vma_node;
vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
- unmap_mapping_range(i915->drm.anon_inode->i_mapping,
+
+ unmap_mapping_range(gt->i915->drm.anon_inode->i_mapping,
drm_vma_node_offset_addr(node) + vma_offset,
vma->size,
1);
}
}
-static intel_engine_mask_t reset_prepare(struct drm_i915_private *i915)
+static intel_engine_mask_t reset_prepare(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
intel_engine_mask_t awake = 0;
enum intel_engine_id id;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
if (intel_engine_pm_get_if_awake(engine))
awake |= engine->mask;
reset_prepare_engine(engine);
}
- intel_uc_reset_prepare(i915);
+ intel_uc_reset_prepare(&gt->uc);
return awake;
}
-static void gt_revoke(struct drm_i915_private *i915)
+static void gt_revoke(struct intel_gt *gt)
{
- revoke_mmaps(i915);
+ revoke_mmaps(gt);
}
-static int gt_reset(struct drm_i915_private *i915,
- intel_engine_mask_t stalled_mask)
+static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -750,33 +742,33 @@ static int gt_reset(struct drm_i915_private *i915,
* Everything depends on having the GTT running, so we need to start
* there.
*/
- err = i915_ggtt_enable_hw(i915);
+ err = i915_ggtt_enable_hw(gt->i915);
if (err)
return err;
- for_each_engine(engine, i915, id)
- intel_engine_reset(engine, stalled_mask & engine->mask);
+ for_each_engine(engine, gt, id)
+ __intel_engine_reset(engine, stalled_mask & engine->mask);
- i915_gem_restore_fences(i915);
+ i915_gem_restore_fences(gt->ggtt);
return err;
}
static void reset_finish_engine(struct intel_engine_cs *engine)
{
- engine->reset.finish(engine);
+ if (engine->reset.finish)
+ engine->reset.finish(engine);
intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
intel_engine_signal_breadcrumbs(engine);
}
-static void reset_finish(struct drm_i915_private *i915,
- intel_engine_mask_t awake)
+static void reset_finish(struct intel_gt *gt, intel_engine_mask_t awake)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
reset_finish_engine(engine);
if (awake & engine->mask)
intel_engine_pm_put(engine);
@@ -788,8 +780,7 @@ static void nop_submit_request(struct i915_request *request)
struct intel_engine_cs *engine = request->engine;
unsigned long flags;
- GEM_TRACE("%s fence %llx:%lld -> -EIO\n",
- engine->name, request->fence.context, request->fence.seqno);
+ RQ_TRACE(request, "-EIO\n");
dma_fence_set_error(&request->fence, -EIO);
spin_lock_irqsave(&engine->active.lock, flags);
@@ -797,44 +788,40 @@ static void nop_submit_request(struct i915_request *request)
i915_request_mark_complete(request);
spin_unlock_irqrestore(&engine->active.lock, flags);
- intel_engine_queue_breadcrumbs(engine);
+ intel_engine_signal_breadcrumbs(engine);
}
-static void __i915_gem_set_wedged(struct drm_i915_private *i915)
+static void __intel_gt_set_wedged(struct intel_gt *gt)
{
- struct i915_gpu_error *error = &i915->gpu_error;
struct intel_engine_cs *engine;
intel_engine_mask_t awake;
enum intel_engine_id id;
- if (test_bit(I915_WEDGED, &error->flags))
+ if (test_bit(I915_WEDGED, &gt->reset.flags))
return;
- if (GEM_SHOW_DEBUG() && !intel_engines_are_idle(i915)) {
+ if (GEM_SHOW_DEBUG() && !intel_engines_are_idle(gt)) {
struct drm_printer p = drm_debug_printer(__func__);
- for_each_engine(engine, i915, id)
+ for_each_engine(engine, gt, id)
intel_engine_dump(engine, &p, "%s\n", engine->name);
}
- GEM_TRACE("start\n");
+ GT_TRACE(gt, "start\n");
/*
* First, stop submission to hw, but do not yet complete requests by
* rolling the global seqno forward (since this would complete requests
* for which we haven't set the fence error to EIO yet).
*/
- awake = reset_prepare(i915);
+ awake = reset_prepare(gt);
/* Even if the GPU reset fails, it should still stop the engines */
- if (!INTEL_INFO(i915)->gpu_reset_clobbers_display)
- intel_gpu_reset(i915, ALL_ENGINES);
+ if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
+ __intel_gt_reset(gt, ALL_ENGINES);
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id)
engine->submit_request = nop_submit_request;
- engine->schedule = NULL;
- }
- i915->caps.scheduler = 0;
/*
* Make sure no request can slip through without getting completed by
@@ -842,40 +829,42 @@ static void __i915_gem_set_wedged(struct drm_i915_private *i915)
* in nop_submit_request.
*/
synchronize_rcu_expedited();
- set_bit(I915_WEDGED, &error->flags);
+ set_bit(I915_WEDGED, &gt->reset.flags);
/* Mark all executing requests as skipped */
- for_each_engine(engine, i915, id)
- engine->cancel_requests(engine);
+ for_each_engine(engine, gt, id)
+ if (engine->reset.cancel)
+ engine->reset.cancel(engine);
- reset_finish(i915, awake);
+ reset_finish(gt, awake);
- GEM_TRACE("end\n");
+ GT_TRACE(gt, "end\n");
}
-void i915_gem_set_wedged(struct drm_i915_private *i915)
+void intel_gt_set_wedged(struct intel_gt *gt)
{
- struct i915_gpu_error *error = &i915->gpu_error;
intel_wakeref_t wakeref;
- mutex_lock(&error->wedge_mutex);
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- __i915_gem_set_wedged(i915);
- mutex_unlock(&error->wedge_mutex);
+ mutex_lock(&gt->reset.mutex);
+ with_intel_runtime_pm(gt->uncore->rpm, wakeref)
+ __intel_gt_set_wedged(gt);
+ mutex_unlock(&gt->reset.mutex);
}
-static bool __i915_gem_unset_wedged(struct drm_i915_private *i915)
+static bool __intel_gt_unset_wedged(struct intel_gt *gt)
{
- struct i915_gpu_error *error = &i915->gpu_error;
- struct i915_timeline *tl;
+ struct intel_gt_timelines *timelines = &gt->timelines;
+ struct intel_timeline *tl;
+ bool ok;
- if (!test_bit(I915_WEDGED, &error->flags))
+ if (!test_bit(I915_WEDGED, &gt->reset.flags))
return true;
- if (!i915->gt.scratch) /* Never full initialised, recovery impossible */
+ /* Never fully initialised, recovery impossible */
+ if (test_bit(I915_WEDGED_ON_INIT, &gt->reset.flags))
return false;
- GEM_TRACE("start\n");
+ GT_TRACE(gt, "start\n");
/*
* Before unwedging, make sure that all pending operations
@@ -887,14 +876,16 @@ static bool __i915_gem_unset_wedged(struct drm_i915_private *i915)
*
* No more can be submitted until we reset the wedged bit.
*/
- mutex_lock(&i915->gt.timelines.mutex);
- list_for_each_entry(tl, &i915->gt.timelines.active_list, link) {
- struct i915_request *rq;
+ spin_lock(&timelines->lock);
+ list_for_each_entry(tl, &timelines->active_list, link) {
+ struct dma_fence *fence;
- rq = i915_active_request_get_unlocked(&tl->last_request);
- if (!rq)
+ fence = i915_active_fence_get(&tl->last_request);
+ if (!fence)
continue;
+ spin_unlock(&timelines->lock);
+
/*
* All internal dependencies (i915_requests) will have
* been flushed by the set-wedge, but we may be stuck waiting
@@ -902,12 +893,27 @@ static bool __i915_gem_unset_wedged(struct drm_i915_private *i915)
* (I915_FENCE_TIMEOUT) so this wait should not be unbounded
* in the worst case.
*/
- dma_fence_default_wait(&rq->fence, false, MAX_SCHEDULE_TIMEOUT);
- i915_request_put(rq);
+ dma_fence_default_wait(fence, false, MAX_SCHEDULE_TIMEOUT);
+ dma_fence_put(fence);
+
+ /* Restart iteration after droping lock */
+ spin_lock(&timelines->lock);
+ tl = list_entry(&timelines->active_list, typeof(*tl), link);
}
- mutex_unlock(&i915->gt.timelines.mutex);
+ spin_unlock(&timelines->lock);
- intel_gt_sanitize(i915, false);
+ /* We must reset pending GPU events before restoring our submission */
+ ok = !HAS_EXECLISTS(gt->i915); /* XXX better agnosticism desired */
+ if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
+ ok = __intel_gt_reset(gt, ALL_ENGINES) == 0;
+ if (!ok) {
+ /*
+ * Warn CI about the unrecoverable wedged condition.
+ * Time for a reboot.
+ */
+ add_taint_for_CI(TAINT_WARN);
+ return false;
+ }
/*
* Undo nop_submit_request. We prevent all new i915 requests from
@@ -918,53 +924,51 @@ static bool __i915_gem_unset_wedged(struct drm_i915_private *i915)
* the nop_submit_request on reset, we can do this from normal
* context and do not require stop_machine().
*/
- intel_engines_reset_default_submission(i915);
+ intel_engines_reset_default_submission(gt);
- GEM_TRACE("end\n");
+ GT_TRACE(gt, "end\n");
smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
- clear_bit(I915_WEDGED, &i915->gpu_error.flags);
+ clear_bit(I915_WEDGED, &gt->reset.flags);
return true;
}
-bool i915_gem_unset_wedged(struct drm_i915_private *i915)
+bool intel_gt_unset_wedged(struct intel_gt *gt)
{
- struct i915_gpu_error *error = &i915->gpu_error;
bool result;
- mutex_lock(&error->wedge_mutex);
- result = __i915_gem_unset_wedged(i915);
- mutex_unlock(&error->wedge_mutex);
+ mutex_lock(&gt->reset.mutex);
+ result = __intel_gt_unset_wedged(gt);
+ mutex_unlock(&gt->reset.mutex);
return result;
}
-static int do_reset(struct drm_i915_private *i915,
- intel_engine_mask_t stalled_mask)
+static int do_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
{
int err, i;
- gt_revoke(i915);
+ gt_revoke(gt);
- err = intel_gpu_reset(i915, ALL_ENGINES);
+ err = __intel_gt_reset(gt, ALL_ENGINES);
for (i = 0; err && i < RESET_MAX_RETRIES; i++) {
msleep(10 * (i + 1));
- err = intel_gpu_reset(i915, ALL_ENGINES);
+ err = __intel_gt_reset(gt, ALL_ENGINES);
}
if (err)
return err;
- return gt_reset(i915, stalled_mask);
+ return gt_reset(gt, stalled_mask);
}
-static int resume(struct drm_i915_private *i915)
+static int resume(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
int ret;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
ret = engine->resume(engine);
if (ret)
return ret;
@@ -974,8 +978,8 @@ static int resume(struct drm_i915_private *i915)
}
/**
- * i915_reset - reset chip after a hang
- * @i915: #drm_i915_private to reset
+ * intel_gt_reset - reset chip after a hang
+ * @gt: #intel_gt to reset
* @stalled_mask: mask of the stalled engines with the guilty requests
* @reason: user error message for why we are resetting
*
@@ -990,50 +994,50 @@ static int resume(struct drm_i915_private *i915)
* - re-init interrupt state
* - re-init display
*/
-void i915_reset(struct drm_i915_private *i915,
- intel_engine_mask_t stalled_mask,
- const char *reason)
+void intel_gt_reset(struct intel_gt *gt,
+ intel_engine_mask_t stalled_mask,
+ const char *reason)
{
- struct i915_gpu_error *error = &i915->gpu_error;
intel_engine_mask_t awake;
int ret;
- GEM_TRACE("flags=%lx\n", error->flags);
+ GT_TRACE(gt, "flags=%lx\n", gt->reset.flags);
might_sleep();
- GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags));
- mutex_lock(&error->wedge_mutex);
+ GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &gt->reset.flags));
+ mutex_lock(&gt->reset.mutex);
/* Clear any previous failed attempts at recovery. Time to try again. */
- if (!__i915_gem_unset_wedged(i915))
+ if (!__intel_gt_unset_wedged(gt))
goto unlock;
if (reason)
- dev_notice(i915->drm.dev, "Resetting chip for %s\n", reason);
- error->reset_count++;
+ dev_notice(gt->i915->drm.dev,
+ "Resetting chip for %s\n", reason);
+ atomic_inc(&gt->i915->gpu_error.reset_count);
- awake = reset_prepare(i915);
+ awake = reset_prepare(gt);
- if (!intel_has_gpu_reset(i915)) {
+ if (!intel_has_gpu_reset(gt)) {
if (i915_modparams.reset)
- dev_err(i915->drm.dev, "GPU reset not supported\n");
+ dev_err(gt->i915->drm.dev, "GPU reset not supported\n");
else
DRM_DEBUG_DRIVER("GPU reset disabled\n");
goto error;
}
- if (INTEL_INFO(i915)->gpu_reset_clobbers_display)
- intel_runtime_pm_disable_interrupts(i915);
+ if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
+ intel_runtime_pm_disable_interrupts(gt->i915);
- if (do_reset(i915, stalled_mask)) {
- dev_err(i915->drm.dev, "Failed to reset chip\n");
+ if (do_reset(gt, stalled_mask)) {
+ dev_err(gt->i915->drm.dev, "Failed to reset chip\n");
goto taint;
}
- if (INTEL_INFO(i915)->gpu_reset_clobbers_display)
- intel_runtime_pm_enable_interrupts(i915);
+ if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
+ intel_runtime_pm_enable_interrupts(gt->i915);
- intel_overlay_reset(i915);
+ intel_overlay_reset(gt->i915);
/*
* Next we need to restore the context, but we don't use those
@@ -1043,23 +1047,21 @@ void i915_reset(struct drm_i915_private *i915,
* was running at the time of the reset (i.e. we weren't VT
* switched away).
*/
- ret = i915_gem_init_hw(i915);
+ ret = intel_gt_init_hw(gt);
if (ret) {
DRM_ERROR("Failed to initialise HW following reset (%d)\n",
ret);
goto taint;
}
- ret = resume(i915);
+ ret = resume(gt);
if (ret)
goto taint;
- i915_queue_hangcheck(i915);
-
finish:
- reset_finish(i915, awake);
+ reset_finish(gt, awake);
unlock:
- mutex_unlock(&error->wedge_mutex);
+ mutex_unlock(&gt->reset.mutex);
return;
taint:
@@ -1077,18 +1079,17 @@ taint:
*/
add_taint_for_CI(TAINT_WARN);
error:
- __i915_gem_set_wedged(i915);
+ __intel_gt_set_wedged(gt);
goto finish;
}
-static inline int intel_gt_reset_engine(struct drm_i915_private *i915,
- struct intel_engine_cs *engine)
+static inline int intel_gt_reset_engine(struct intel_engine_cs *engine)
{
- return intel_gpu_reset(i915, engine->mask);
+ return __intel_gt_reset(engine->gt, engine->mask);
}
/**
- * i915_reset_engine - reset GPU engine to recover from a hang
+ * intel_engine_reset - reset GPU engine to recover from a hang
* @engine: engine to reset
* @msg: reason for GPU reset; or NULL for no dev_notice()
*
@@ -1100,13 +1101,14 @@ static inline int intel_gt_reset_engine(struct drm_i915_private *i915,
* - reset engine (which will force the engine to idle)
* - re-init/configure engine
*/
-int i915_reset_engine(struct intel_engine_cs *engine, const char *msg)
+int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
{
- struct i915_gpu_error *error = &engine->i915->gpu_error;
+ struct intel_gt *gt = engine->gt;
+ bool uses_guc = intel_engine_in_guc_submission_mode(engine);
int ret;
- GEM_TRACE("%s flags=%lx\n", engine->name, error->flags);
- GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags));
+ ENGINE_TRACE(engine, "flags=%lx\n", gt->reset.flags);
+ GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &gt->reset.flags));
if (!intel_engine_pm_get_if_awake(engine))
return 0;
@@ -1116,16 +1118,16 @@ int i915_reset_engine(struct intel_engine_cs *engine, const char *msg)
if (msg)
dev_notice(engine->i915->drm.dev,
"Resetting %s for %s\n", engine->name, msg);
- error->reset_engine_count[engine->id]++;
+ atomic_inc(&engine->i915->gpu_error.reset_engine_count[engine->uabi_class]);
- if (!engine->i915->guc.execbuf_client)
- ret = intel_gt_reset_engine(engine->i915, engine);
+ if (!uses_guc)
+ ret = intel_gt_reset_engine(engine);
else
- ret = intel_guc_reset_engine(&engine->i915->guc, engine);
+ ret = intel_guc_reset_engine(&engine->gt->uc.guc, engine);
if (ret) {
/* If we fail here, we expect to fallback to a global reset */
DRM_DEBUG_DRIVER("%sFailed to reset %s, ret=%d\n",
- engine->i915->guc.execbuf_client ? "GuC " : "",
+ uses_guc ? "GuC " : "",
engine->name, ret);
goto out;
}
@@ -1135,7 +1137,7 @@ int i915_reset_engine(struct intel_engine_cs *engine, const char *msg)
* active request and can drop it, adjust head to skip the offending
* request to resume executing remaining requests in the queue.
*/
- intel_engine_reset(engine, true);
+ __intel_engine_reset(engine, true);
/*
* The engine and its registers (and workarounds in case of render)
@@ -1147,20 +1149,19 @@ int i915_reset_engine(struct intel_engine_cs *engine, const char *msg)
out:
intel_engine_cancel_stop_cs(engine);
reset_finish_engine(engine);
- intel_engine_pm_put(engine);
+ intel_engine_pm_put_async(engine);
return ret;
}
-static void i915_reset_device(struct drm_i915_private *i915,
- u32 engine_mask,
- const char *reason)
+static void intel_gt_reset_global(struct intel_gt *gt,
+ u32 engine_mask,
+ const char *reason)
{
- struct i915_gpu_error *error = &i915->gpu_error;
- struct kobject *kobj = &i915->drm.primary->kdev->kobj;
+ struct kobject *kobj = &gt->i915->drm.primary->kdev->kobj;
char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
- struct i915_wedge_me w;
+ struct intel_wedge_me w;
kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
@@ -1168,137 +1169,24 @@ static void i915_reset_device(struct drm_i915_private *i915,
kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
/* Use a watchdog to ensure that our reset completes */
- i915_wedge_on_timeout(&w, i915, 5 * HZ) {
- intel_prepare_reset(i915);
+ intel_wedge_on_timeout(&w, gt, 5 * HZ) {
+ intel_prepare_reset(gt->i915);
/* Flush everyone using a resource about to be clobbered */
- synchronize_srcu_expedited(&error->reset_backoff_srcu);
+ synchronize_srcu_expedited(&gt->reset.backoff_srcu);
- i915_reset(i915, engine_mask, reason);
+ intel_gt_reset(gt, engine_mask, reason);
- intel_finish_reset(i915);
+ intel_finish_reset(gt->i915);
}
- if (!test_bit(I915_WEDGED, &error->flags))
+ if (!test_bit(I915_WEDGED, &gt->reset.flags))
kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
}
-static void clear_register(struct intel_uncore *uncore, i915_reg_t reg)
-{
- intel_uncore_rmw(uncore, reg, 0, 0);
-}
-
-static void gen8_clear_engine_error_register(struct intel_engine_cs *engine)
-{
- GEN6_RING_FAULT_REG_RMW(engine, RING_FAULT_VALID, 0);
- GEN6_RING_FAULT_REG_POSTING_READ(engine);
-}
-
-static void clear_error_registers(struct drm_i915_private *i915,
- intel_engine_mask_t engine_mask)
-{
- struct intel_uncore *uncore = &i915->uncore;
- u32 eir;
-
- if (!IS_GEN(i915, 2))
- clear_register(uncore, PGTBL_ER);
-
- if (INTEL_GEN(i915) < 4)
- clear_register(uncore, IPEIR(RENDER_RING_BASE));
- else
- clear_register(uncore, IPEIR_I965);
-
- clear_register(uncore, EIR);
- eir = intel_uncore_read(uncore, EIR);
- if (eir) {
- /*
- * some errors might have become stuck,
- * mask them.
- */
- DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
- rmw_set(uncore, EMR, eir);
- intel_uncore_write(uncore, GEN2_IIR,
- I915_MASTER_ERROR_INTERRUPT);
- }
-
- if (INTEL_GEN(i915) >= 8) {
- rmw_clear(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID);
- intel_uncore_posting_read(uncore, GEN8_RING_FAULT_REG);
- } else if (INTEL_GEN(i915) >= 6) {
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- for_each_engine_masked(engine, i915, engine_mask, id)
- gen8_clear_engine_error_register(engine);
- }
-}
-
-static void gen6_check_faults(struct drm_i915_private *dev_priv)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- u32 fault;
-
- for_each_engine(engine, dev_priv, id) {
- fault = GEN6_RING_FAULT_REG_READ(engine);
- if (fault & RING_FAULT_VALID) {
- DRM_DEBUG_DRIVER("Unexpected fault\n"
- "\tAddr: 0x%08lx\n"
- "\tAddress space: %s\n"
- "\tSource ID: %d\n"
- "\tType: %d\n",
- fault & PAGE_MASK,
- fault & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
- RING_FAULT_SRCID(fault),
- RING_FAULT_FAULT_TYPE(fault));
- }
- }
-}
-
-static void gen8_check_faults(struct drm_i915_private *dev_priv)
-{
- u32 fault = I915_READ(GEN8_RING_FAULT_REG);
-
- if (fault & RING_FAULT_VALID) {
- u32 fault_data0, fault_data1;
- u64 fault_addr;
-
- fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0);
- fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
- fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
- ((u64)fault_data0 << 12);
-
- DRM_DEBUG_DRIVER("Unexpected fault\n"
- "\tAddr: 0x%08x_%08x\n"
- "\tAddress space: %s\n"
- "\tEngine ID: %d\n"
- "\tSource ID: %d\n"
- "\tType: %d\n",
- upper_32_bits(fault_addr),
- lower_32_bits(fault_addr),
- fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
- GEN8_RING_FAULT_ENGINE_ID(fault),
- RING_FAULT_SRCID(fault),
- RING_FAULT_FAULT_TYPE(fault));
- }
-}
-
-void i915_check_and_clear_faults(struct drm_i915_private *i915)
-{
- /* From GEN8 onwards we only have one 'All Engine Fault Register' */
- if (INTEL_GEN(i915) >= 8)
- gen8_check_faults(i915);
- else if (INTEL_GEN(i915) >= 6)
- gen6_check_faults(i915);
- else
- return;
-
- clear_error_registers(i915, ALL_ENGINES);
-}
-
/**
- * i915_handle_error - handle a gpu error
- * @i915: i915 device private
+ * intel_gt_handle_error - handle a gpu error
+ * @gt: the intel_gt
* @engine_mask: mask representing engines that are hung
* @flags: control flags
* @fmt: Error message format string
@@ -1309,12 +1197,11 @@ void i915_check_and_clear_faults(struct drm_i915_private *i915)
* so userspace knows something bad happened (should trigger collection
* of a ring dump etc.).
*/
-void i915_handle_error(struct drm_i915_private *i915,
- intel_engine_mask_t engine_mask,
- unsigned long flags,
- const char *fmt, ...)
+void intel_gt_handle_error(struct intel_gt *gt,
+ intel_engine_mask_t engine_mask,
+ unsigned long flags,
+ const char *fmt, ...)
{
- struct i915_gpu_error *error = &i915->gpu_error;
struct intel_engine_cs *engine;
intel_wakeref_t wakeref;
intel_engine_mask_t tmp;
@@ -1338,33 +1225,31 @@ void i915_handle_error(struct drm_i915_private *i915,
* isn't the case at least when we get here by doing a
* simulated reset via debugfs, so get an RPM reference.
*/
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
- engine_mask &= INTEL_INFO(i915)->engine_mask;
+ engine_mask &= INTEL_INFO(gt->i915)->engine_mask;
if (flags & I915_ERROR_CAPTURE) {
- i915_capture_error_state(i915, engine_mask, msg);
- clear_error_registers(i915, engine_mask);
+ i915_capture_error_state(gt->i915);
+ intel_gt_clear_error_registers(gt, engine_mask);
}
/*
* Try engine reset when available. We fall back to full reset if
* single reset fails.
*/
- if (intel_has_reset_engine(i915) && !__i915_wedged(error)) {
- for_each_engine_masked(engine, i915, engine_mask, tmp) {
+ if (intel_has_reset_engine(gt) && !intel_gt_is_wedged(gt)) {
+ for_each_engine_masked(engine, gt, engine_mask, tmp) {
BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
- &error->flags))
+ &gt->reset.flags))
continue;
- if (i915_reset_engine(engine, msg) == 0)
+ if (intel_engine_reset(engine, msg) == 0)
engine_mask &= ~engine->mask;
- clear_bit(I915_RESET_ENGINE + engine->id,
- &error->flags);
- wake_up_bit(&error->flags,
- I915_RESET_ENGINE + engine->id);
+ clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id,
+ &gt->reset.flags);
}
}
@@ -1372,9 +1257,9 @@ void i915_handle_error(struct drm_i915_private *i915,
goto out;
/* Full reset needs the mutex, stop any other user trying to do so. */
- if (test_and_set_bit(I915_RESET_BACKOFF, &error->flags)) {
- wait_event(error->reset_queue,
- !test_bit(I915_RESET_BACKOFF, &error->flags));
+ if (test_and_set_bit(I915_RESET_BACKOFF, &gt->reset.flags)) {
+ wait_event(gt->reset.queue,
+ !test_bit(I915_RESET_BACKOFF, &gt->reset.flags));
goto out; /* piggy-back on the other reset */
}
@@ -1382,115 +1267,127 @@ void i915_handle_error(struct drm_i915_private *i915,
synchronize_rcu_expedited();
/* Prevent any other reset-engine attempt. */
- for_each_engine(engine, i915, tmp) {
+ for_each_engine(engine, gt, tmp) {
while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
- &error->flags))
- wait_on_bit(&error->flags,
+ &gt->reset.flags))
+ wait_on_bit(&gt->reset.flags,
I915_RESET_ENGINE + engine->id,
TASK_UNINTERRUPTIBLE);
}
- i915_reset_device(i915, engine_mask, msg);
-
- for_each_engine(engine, i915, tmp) {
- clear_bit(I915_RESET_ENGINE + engine->id,
- &error->flags);
- }
+ intel_gt_reset_global(gt, engine_mask, msg);
- clear_bit(I915_RESET_BACKOFF, &error->flags);
- wake_up_all(&error->reset_queue);
+ for_each_engine(engine, gt, tmp)
+ clear_bit_unlock(I915_RESET_ENGINE + engine->id,
+ &gt->reset.flags);
+ clear_bit_unlock(I915_RESET_BACKOFF, &gt->reset.flags);
+ smp_mb__after_atomic();
+ wake_up_all(&gt->reset.queue);
out:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
}
-int i915_reset_trylock(struct drm_i915_private *i915)
+int intel_gt_reset_trylock(struct intel_gt *gt, int *srcu)
{
- struct i915_gpu_error *error = &i915->gpu_error;
- int srcu;
-
- might_lock(&error->reset_backoff_srcu);
+ might_lock(&gt->reset.backoff_srcu);
might_sleep();
rcu_read_lock();
- while (test_bit(I915_RESET_BACKOFF, &error->flags)) {
+ while (test_bit(I915_RESET_BACKOFF, &gt->reset.flags)) {
rcu_read_unlock();
- if (wait_event_interruptible(error->reset_queue,
+ if (wait_event_interruptible(gt->reset.queue,
!test_bit(I915_RESET_BACKOFF,
- &error->flags)))
+ &gt->reset.flags)))
return -EINTR;
rcu_read_lock();
}
- srcu = srcu_read_lock(&error->reset_backoff_srcu);
+ *srcu = srcu_read_lock(&gt->reset.backoff_srcu);
rcu_read_unlock();
- return srcu;
+ return 0;
}
-void i915_reset_unlock(struct drm_i915_private *i915, int tag)
-__releases(&i915->gpu_error.reset_backoff_srcu)
+void intel_gt_reset_unlock(struct intel_gt *gt, int tag)
+__releases(&gt->reset.backoff_srcu)
{
- struct i915_gpu_error *error = &i915->gpu_error;
-
- srcu_read_unlock(&error->reset_backoff_srcu, tag);
+ srcu_read_unlock(&gt->reset.backoff_srcu, tag);
}
-int i915_terminally_wedged(struct drm_i915_private *i915)
+int intel_gt_terminally_wedged(struct intel_gt *gt)
{
- struct i915_gpu_error *error = &i915->gpu_error;
-
might_sleep();
- if (!__i915_wedged(error))
+ if (!intel_gt_is_wedged(gt))
return 0;
- /* Reset still in progress? Maybe we will recover? */
- if (!test_bit(I915_RESET_BACKOFF, &error->flags))
+ if (intel_gt_has_init_error(gt))
return -EIO;
- /* XXX intel_reset_finish() still takes struct_mutex!!! */
- if (mutex_is_locked(&i915->drm.struct_mutex))
- return -EAGAIN;
-
- if (wait_event_interruptible(error->reset_queue,
+ /* Reset still in progress? Maybe we will recover? */
+ if (wait_event_interruptible(gt->reset.queue,
!test_bit(I915_RESET_BACKOFF,
- &error->flags)))
+ &gt->reset.flags)))
return -EINTR;
- return __i915_wedged(error) ? -EIO : 0;
+ return intel_gt_is_wedged(gt) ? -EIO : 0;
+}
+
+void intel_gt_set_wedged_on_init(struct intel_gt *gt)
+{
+ BUILD_BUG_ON(I915_RESET_ENGINE + I915_NUM_ENGINES >
+ I915_WEDGED_ON_INIT);
+ intel_gt_set_wedged(gt);
+ set_bit(I915_WEDGED_ON_INIT, &gt->reset.flags);
+}
+
+void intel_gt_init_reset(struct intel_gt *gt)
+{
+ init_waitqueue_head(&gt->reset.queue);
+ mutex_init(&gt->reset.mutex);
+ init_srcu_struct(&gt->reset.backoff_srcu);
+
+ /* no GPU until we are ready! */
+ __set_bit(I915_WEDGED, &gt->reset.flags);
+}
+
+void intel_gt_fini_reset(struct intel_gt *gt)
+{
+ cleanup_srcu_struct(&gt->reset.backoff_srcu);
}
-static void i915_wedge_me(struct work_struct *work)
+static void intel_wedge_me(struct work_struct *work)
{
- struct i915_wedge_me *w = container_of(work, typeof(*w), work.work);
+ struct intel_wedge_me *w = container_of(work, typeof(*w), work.work);
- dev_err(w->i915->drm.dev,
+ dev_err(w->gt->i915->drm.dev,
"%s timed out, cancelling all in-flight rendering.\n",
w->name);
- i915_gem_set_wedged(w->i915);
+ intel_gt_set_wedged(w->gt);
}
-void __i915_init_wedge(struct i915_wedge_me *w,
- struct drm_i915_private *i915,
- long timeout,
- const char *name)
+void __intel_init_wedge(struct intel_wedge_me *w,
+ struct intel_gt *gt,
+ long timeout,
+ const char *name)
{
- w->i915 = i915;
+ w->gt = gt;
w->name = name;
- INIT_DELAYED_WORK_ONSTACK(&w->work, i915_wedge_me);
+ INIT_DELAYED_WORK_ONSTACK(&w->work, intel_wedge_me);
schedule_delayed_work(&w->work, timeout);
}
-void __i915_fini_wedge(struct i915_wedge_me *w)
+void __intel_fini_wedge(struct intel_wedge_me *w)
{
cancel_delayed_work_sync(&w->work);
destroy_delayed_work_on_stack(&w->work);
- w->i915 = NULL;
+ w->gt = NULL;
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_reset.c"
+#include "selftest_hangcheck.c"
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.h b/drivers/gpu/drm/i915/gt/intel_reset.h
index 580ebdb59eca..8e8d5f761166 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.h
+++ b/drivers/gpu/drm/i915/gt/intel_reset.h
@@ -11,58 +11,75 @@
#include <linux/types.h>
#include <linux/srcu.h>
-#include "gt/intel_engine_types.h"
+#include "intel_engine_types.h"
+#include "intel_reset_types.h"
-struct drm_i915_private;
struct i915_request;
struct intel_engine_cs;
+struct intel_gt;
struct intel_guc;
+void intel_gt_init_reset(struct intel_gt *gt);
+void intel_gt_fini_reset(struct intel_gt *gt);
+
__printf(4, 5)
-void i915_handle_error(struct drm_i915_private *i915,
- intel_engine_mask_t engine_mask,
- unsigned long flags,
- const char *fmt, ...);
+void intel_gt_handle_error(struct intel_gt *gt,
+ intel_engine_mask_t engine_mask,
+ unsigned long flags,
+ const char *fmt, ...);
#define I915_ERROR_CAPTURE BIT(0)
-void i915_check_and_clear_faults(struct drm_i915_private *i915);
-
-void i915_reset(struct drm_i915_private *i915,
- intel_engine_mask_t stalled_mask,
- const char *reason);
-int i915_reset_engine(struct intel_engine_cs *engine,
- const char *reason);
+void intel_gt_reset(struct intel_gt *gt,
+ intel_engine_mask_t stalled_mask,
+ const char *reason);
+int intel_engine_reset(struct intel_engine_cs *engine,
+ const char *reason);
-void i915_reset_request(struct i915_request *rq, bool guilty);
+void __i915_request_reset(struct i915_request *rq, bool guilty);
-int __must_check i915_reset_trylock(struct drm_i915_private *i915);
-void i915_reset_unlock(struct drm_i915_private *i915, int tag);
+int __must_check intel_gt_reset_trylock(struct intel_gt *gt, int *srcu);
+void intel_gt_reset_unlock(struct intel_gt *gt, int tag);
-int i915_terminally_wedged(struct drm_i915_private *i915);
+void intel_gt_set_wedged(struct intel_gt *gt);
+bool intel_gt_unset_wedged(struct intel_gt *gt);
+int intel_gt_terminally_wedged(struct intel_gt *gt);
-bool intel_has_gpu_reset(struct drm_i915_private *i915);
-bool intel_has_reset_engine(struct drm_i915_private *i915);
+/*
+ * There's no unset_wedged_on_init paired with this one.
+ * Once we're wedged on init, there's no going back.
+ */
+void intel_gt_set_wedged_on_init(struct intel_gt *gt);
-int intel_gpu_reset(struct drm_i915_private *i915,
- intel_engine_mask_t engine_mask);
+int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask);
-int intel_reset_guc(struct drm_i915_private *i915);
+int intel_reset_guc(struct intel_gt *gt);
-struct i915_wedge_me {
+struct intel_wedge_me {
struct delayed_work work;
- struct drm_i915_private *i915;
+ struct intel_gt *gt;
const char *name;
};
-void __i915_init_wedge(struct i915_wedge_me *w,
- struct drm_i915_private *i915,
- long timeout,
- const char *name);
-void __i915_fini_wedge(struct i915_wedge_me *w);
+void __intel_init_wedge(struct intel_wedge_me *w,
+ struct intel_gt *gt,
+ long timeout,
+ const char *name);
+void __intel_fini_wedge(struct intel_wedge_me *w);
+
+#define intel_wedge_on_timeout(W, GT, TIMEOUT) \
+ for (__intel_init_wedge((W), (GT), (TIMEOUT), __func__); \
+ (W)->gt; \
+ __intel_fini_wedge((W)))
+
+static inline bool __intel_reset_failed(const struct intel_reset *reset)
+{
+ GEM_BUG_ON(test_bit(I915_WEDGED_ON_INIT, &reset->flags) ?
+ !test_bit(I915_WEDGED, &reset->flags) : false);
+
+ return unlikely(test_bit(I915_WEDGED, &reset->flags));
+}
-#define i915_wedge_on_timeout(W, DEV, TIMEOUT) \
- for (__i915_init_wedge((W), (DEV), (TIMEOUT), __func__); \
- (W)->i915; \
- __i915_fini_wedge((W)))
+bool intel_has_gpu_reset(const struct intel_gt *gt);
+bool intel_has_reset_engine(const struct intel_gt *gt);
#endif /* I915_RESET_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_reset_types.h b/drivers/gpu/drm/i915/gt/intel_reset_types.h
new file mode 100644
index 000000000000..f43bc3a0fe4f
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_reset_types.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_RESET_TYPES_H_
+#define __INTEL_RESET_TYPES_H_
+
+#include <linux/mutex.h>
+#include <linux/wait.h>
+#include <linux/srcu.h>
+
+struct intel_reset {
+ /**
+ * flags: Control various stages of the GPU reset
+ *
+ * #I915_RESET_BACKOFF - When we start a global reset, we need to
+ * serialise with any other users attempting to do the same, and
+ * any global resources that may be clobber by the reset (such as
+ * FENCE registers).
+ *
+ * #I915_RESET_ENGINE[num_engines] - Since the driver doesn't need to
+ * acquire the struct_mutex to reset an engine, we need an explicit
+ * flag to prevent two concurrent reset attempts in the same engine.
+ * As the number of engines continues to grow, allocate the flags from
+ * the most significant bits.
+ *
+ * #I915_WEDGED - If reset fails and we can no longer use the GPU,
+ * we set the #I915_WEDGED bit. Prior to command submission, e.g.
+ * i915_request_alloc(), this bit is checked and the sequence
+ * aborted (with -EIO reported to userspace) if set.
+ *
+ * #I915_WEDGED_ON_INIT - If we fail to initialize the GPU we can no
+ * longer use the GPU - similar to #I915_WEDGED bit. The difference in
+ * in the way we're handling "forced" unwedged (e.g. through debugfs),
+ * which is not allowed in case we failed to initialize.
+ */
+ unsigned long flags;
+#define I915_RESET_BACKOFF 0
+#define I915_RESET_MODESET 1
+#define I915_RESET_ENGINE 2
+#define I915_WEDGED_ON_INIT (BITS_PER_LONG - 2)
+#define I915_WEDGED (BITS_PER_LONG - 1)
+
+ struct mutex mutex; /* serialises wedging/unwedging */
+
+ /**
+ * Waitqueue to signal when the reset has completed. Used by clients
+ * that wait for dev_priv->mm.wedged to settle.
+ */
+ wait_queue_head_t queue;
+
+ struct srcu_struct backoff_srcu;
+};
+
+#endif /* _INTEL_RESET_TYPES_H_ */
diff --git a/drivers/gpu/drm/i915/gt/intel_ring.c b/drivers/gpu/drm/i915/gt/intel_ring.c
new file mode 100644
index 000000000000..374b28f13ca0
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_ring.c
@@ -0,0 +1,318 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "gem/i915_gem_object.h"
+#include "i915_drv.h"
+#include "i915_vma.h"
+#include "intel_engine.h"
+#include "intel_ring.h"
+#include "intel_timeline.h"
+
+unsigned int intel_ring_update_space(struct intel_ring *ring)
+{
+ unsigned int space;
+
+ space = __intel_ring_space(ring->head, ring->emit, ring->size);
+
+ ring->space = space;
+ return space;
+}
+
+int intel_ring_pin(struct intel_ring *ring)
+{
+ struct i915_vma *vma = ring->vma;
+ unsigned int flags;
+ void *addr;
+ int ret;
+
+ if (atomic_fetch_inc(&ring->pin_count))
+ return 0;
+
+ flags = PIN_GLOBAL;
+
+ /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
+ flags |= PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
+
+ if (vma->obj->stolen)
+ flags |= PIN_MAPPABLE;
+ else
+ flags |= PIN_HIGH;
+
+ ret = i915_vma_pin(vma, 0, 0, flags);
+ if (unlikely(ret))
+ goto err_unpin;
+
+ if (i915_vma_is_map_and_fenceable(vma))
+ addr = (void __force *)i915_vma_pin_iomap(vma);
+ else
+ addr = i915_gem_object_pin_map(vma->obj,
+ i915_coherent_map_type(vma->vm->i915));
+ if (IS_ERR(addr)) {
+ ret = PTR_ERR(addr);
+ goto err_ring;
+ }
+
+ i915_vma_make_unshrinkable(vma);
+
+ /* Discard any unused bytes beyond that submitted to hw. */
+ intel_ring_reset(ring, ring->emit);
+
+ ring->vaddr = addr;
+ return 0;
+
+err_ring:
+ i915_vma_unpin(vma);
+err_unpin:
+ atomic_dec(&ring->pin_count);
+ return ret;
+}
+
+void intel_ring_reset(struct intel_ring *ring, u32 tail)
+{
+ tail = intel_ring_wrap(ring, tail);
+ ring->tail = tail;
+ ring->head = tail;
+ ring->emit = tail;
+ intel_ring_update_space(ring);
+}
+
+void intel_ring_unpin(struct intel_ring *ring)
+{
+ struct i915_vma *vma = ring->vma;
+
+ if (!atomic_dec_and_test(&ring->pin_count))
+ return;
+
+ i915_vma_unset_ggtt_write(vma);
+ if (i915_vma_is_map_and_fenceable(vma))
+ i915_vma_unpin_iomap(vma);
+ else
+ i915_gem_object_unpin_map(vma->obj);
+
+ i915_vma_make_purgeable(vma);
+ i915_vma_unpin(vma);
+}
+
+static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size)
+{
+ struct i915_address_space *vm = &ggtt->vm;
+ struct drm_i915_private *i915 = vm->i915;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+
+ obj = ERR_PTR(-ENODEV);
+ if (i915_ggtt_has_aperture(ggtt))
+ obj = i915_gem_object_create_stolen(i915, size);
+ if (IS_ERR(obj))
+ obj = i915_gem_object_create_internal(i915, size);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ /*
+ * Mark ring buffers as read-only from GPU side (so no stray overwrites)
+ * if supported by the platform's GGTT.
+ */
+ if (vm->has_read_only)
+ i915_gem_object_set_readonly(obj);
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma))
+ goto err;
+
+ return vma;
+
+err:
+ i915_gem_object_put(obj);
+ return vma;
+}
+
+struct intel_ring *
+intel_engine_create_ring(struct intel_engine_cs *engine, int size)
+{
+ struct drm_i915_private *i915 = engine->i915;
+ struct intel_ring *ring;
+ struct i915_vma *vma;
+
+ GEM_BUG_ON(!is_power_of_2(size));
+ GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES);
+
+ ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+ if (!ring)
+ return ERR_PTR(-ENOMEM);
+
+ kref_init(&ring->ref);
+ ring->size = size;
+
+ /*
+ * Workaround an erratum on the i830 which causes a hang if
+ * the TAIL pointer points to within the last 2 cachelines
+ * of the buffer.
+ */
+ ring->effective_size = size;
+ if (IS_I830(i915) || IS_I845G(i915))
+ ring->effective_size -= 2 * CACHELINE_BYTES;
+
+ intel_ring_update_space(ring);
+
+ vma = create_ring_vma(engine->gt->ggtt, size);
+ if (IS_ERR(vma)) {
+ kfree(ring);
+ return ERR_CAST(vma);
+ }
+ ring->vma = vma;
+
+ return ring;
+}
+
+void intel_ring_free(struct kref *ref)
+{
+ struct intel_ring *ring = container_of(ref, typeof(*ring), ref);
+
+ i915_vma_put(ring->vma);
+ kfree(ring);
+}
+
+static noinline int
+wait_for_space(struct intel_ring *ring,
+ struct intel_timeline *tl,
+ unsigned int bytes)
+{
+ struct i915_request *target;
+ long timeout;
+
+ if (intel_ring_update_space(ring) >= bytes)
+ return 0;
+
+ GEM_BUG_ON(list_empty(&tl->requests));
+ list_for_each_entry(target, &tl->requests, link) {
+ if (target->ring != ring)
+ continue;
+
+ /* Would completion of this request free enough space? */
+ if (bytes <= __intel_ring_space(target->postfix,
+ ring->emit, ring->size))
+ break;
+ }
+
+ if (GEM_WARN_ON(&target->link == &tl->requests))
+ return -ENOSPC;
+
+ timeout = i915_request_wait(target,
+ I915_WAIT_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT);
+ if (timeout < 0)
+ return timeout;
+
+ i915_request_retire_upto(target);
+
+ intel_ring_update_space(ring);
+ GEM_BUG_ON(ring->space < bytes);
+ return 0;
+}
+
+u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords)
+{
+ struct intel_ring *ring = rq->ring;
+ const unsigned int remain_usable = ring->effective_size - ring->emit;
+ const unsigned int bytes = num_dwords * sizeof(u32);
+ unsigned int need_wrap = 0;
+ unsigned int total_bytes;
+ u32 *cs;
+
+ /* Packets must be qword aligned. */
+ GEM_BUG_ON(num_dwords & 1);
+
+ total_bytes = bytes + rq->reserved_space;
+ GEM_BUG_ON(total_bytes > ring->effective_size);
+
+ if (unlikely(total_bytes > remain_usable)) {
+ const int remain_actual = ring->size - ring->emit;
+
+ if (bytes > remain_usable) {
+ /*
+ * Not enough space for the basic request. So need to
+ * flush out the remainder and then wait for
+ * base + reserved.
+ */
+ total_bytes += remain_actual;
+ need_wrap = remain_actual | 1;
+ } else {
+ /*
+ * The base request will fit but the reserved space
+ * falls off the end. So we don't need an immediate
+ * wrap and only need to effectively wait for the
+ * reserved size from the start of ringbuffer.
+ */
+ total_bytes = rq->reserved_space + remain_actual;
+ }
+ }
+
+ if (unlikely(total_bytes > ring->space)) {
+ int ret;
+
+ /*
+ * Space is reserved in the ringbuffer for finalising the
+ * request, as that cannot be allowed to fail. During request
+ * finalisation, reserved_space is set to 0 to stop the
+ * overallocation and the assumption is that then we never need
+ * to wait (which has the risk of failing with EINTR).
+ *
+ * See also i915_request_alloc() and i915_request_add().
+ */
+ GEM_BUG_ON(!rq->reserved_space);
+
+ ret = wait_for_space(ring,
+ i915_request_timeline(rq),
+ total_bytes);
+ if (unlikely(ret))
+ return ERR_PTR(ret);
+ }
+
+ if (unlikely(need_wrap)) {
+ need_wrap &= ~1;
+ GEM_BUG_ON(need_wrap > ring->space);
+ GEM_BUG_ON(ring->emit + need_wrap > ring->size);
+ GEM_BUG_ON(!IS_ALIGNED(need_wrap, sizeof(u64)));
+
+ /* Fill the tail with MI_NOOP */
+ memset64(ring->vaddr + ring->emit, 0, need_wrap / sizeof(u64));
+ ring->space -= need_wrap;
+ ring->emit = 0;
+ }
+
+ GEM_BUG_ON(ring->emit > ring->size - bytes);
+ GEM_BUG_ON(ring->space < bytes);
+ cs = ring->vaddr + ring->emit;
+ GEM_DEBUG_EXEC(memset32(cs, POISON_INUSE, bytes / sizeof(*cs)));
+ ring->emit += bytes;
+ ring->space -= bytes;
+
+ return cs;
+}
+
+/* Align the ring tail to a cacheline boundary */
+int intel_ring_cacheline_align(struct i915_request *rq)
+{
+ int num_dwords;
+ void *cs;
+
+ num_dwords = (rq->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(u32);
+ if (num_dwords == 0)
+ return 0;
+
+ num_dwords = CACHELINE_DWORDS - num_dwords;
+ GEM_BUG_ON(num_dwords & 1);
+
+ cs = intel_ring_begin(rq, num_dwords);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ memset64(cs, (u64)MI_NOOP << 32 | MI_NOOP, num_dwords / 2);
+ intel_ring_advance(rq, cs + num_dwords);
+
+ GEM_BUG_ON(rq->ring->emit & (CACHELINE_BYTES - 1));
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_ring.h b/drivers/gpu/drm/i915/gt/intel_ring.h
new file mode 100644
index 000000000000..ea2839d9e044
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_ring.h
@@ -0,0 +1,131 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_RING_H
+#define INTEL_RING_H
+
+#include "i915_gem.h" /* GEM_BUG_ON */
+#include "i915_request.h"
+#include "intel_ring_types.h"
+
+struct intel_engine_cs;
+
+struct intel_ring *
+intel_engine_create_ring(struct intel_engine_cs *engine, int size);
+
+u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords);
+int intel_ring_cacheline_align(struct i915_request *rq);
+
+unsigned int intel_ring_update_space(struct intel_ring *ring);
+
+int intel_ring_pin(struct intel_ring *ring);
+void intel_ring_unpin(struct intel_ring *ring);
+void intel_ring_reset(struct intel_ring *ring, u32 tail);
+
+void intel_ring_free(struct kref *ref);
+
+static inline struct intel_ring *intel_ring_get(struct intel_ring *ring)
+{
+ kref_get(&ring->ref);
+ return ring;
+}
+
+static inline void intel_ring_put(struct intel_ring *ring)
+{
+ kref_put(&ring->ref, intel_ring_free);
+}
+
+static inline void intel_ring_advance(struct i915_request *rq, u32 *cs)
+{
+ /* Dummy function.
+ *
+ * This serves as a placeholder in the code so that the reader
+ * can compare against the preceding intel_ring_begin() and
+ * check that the number of dwords emitted matches the space
+ * reserved for the command packet (i.e. the value passed to
+ * intel_ring_begin()).
+ */
+ GEM_BUG_ON((rq->ring->vaddr + rq->ring->emit) != cs);
+}
+
+static inline u32 intel_ring_wrap(const struct intel_ring *ring, u32 pos)
+{
+ return pos & (ring->size - 1);
+}
+
+static inline bool
+intel_ring_offset_valid(const struct intel_ring *ring,
+ unsigned int pos)
+{
+ if (pos & -ring->size) /* must be strictly within the ring */
+ return false;
+
+ if (!IS_ALIGNED(pos, 8)) /* must be qword aligned */
+ return false;
+
+ return true;
+}
+
+static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr)
+{
+ /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
+ u32 offset = addr - rq->ring->vaddr;
+ GEM_BUG_ON(offset > rq->ring->size);
+ return intel_ring_wrap(rq->ring, offset);
+}
+
+static inline void
+assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
+{
+ GEM_BUG_ON(!intel_ring_offset_valid(ring, tail));
+
+ /*
+ * "Ring Buffer Use"
+ * Gen2 BSpec "1. Programming Environment" / 1.4.4.6
+ * Gen3 BSpec "1c Memory Interface Functions" / 2.3.4.5
+ * Gen4+ BSpec "1c Memory Interface and Command Stream" / 5.3.4.5
+ * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
+ * same cacheline, the Head Pointer must not be greater than the Tail
+ * Pointer."
+ *
+ * We use ring->head as the last known location of the actual RING_HEAD,
+ * it may have advanced but in the worst case it is equally the same
+ * as ring->head and so we should never program RING_TAIL to advance
+ * into the same cacheline as ring->head.
+ */
+#define cacheline(a) round_down(a, CACHELINE_BYTES)
+ GEM_BUG_ON(cacheline(tail) == cacheline(ring->head) &&
+ tail < ring->head);
+#undef cacheline
+}
+
+static inline unsigned int
+intel_ring_set_tail(struct intel_ring *ring, unsigned int tail)
+{
+ /* Whilst writes to the tail are strictly order, there is no
+ * serialisation between readers and the writers. The tail may be
+ * read by i915_request_retire() just as it is being updated
+ * by execlists, as although the breadcrumb is complete, the context
+ * switch hasn't been seen.
+ */
+ assert_ring_tail_valid(ring, tail);
+ ring->tail = tail;
+ return tail;
+}
+
+static inline unsigned int
+__intel_ring_space(unsigned int head, unsigned int tail, unsigned int size)
+{
+ /*
+ * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
+ * same cacheline, the Head Pointer must not be greater than the Tail
+ * Pointer."
+ */
+ GEM_BUG_ON(!is_power_of_2(size));
+ return (head - tail - CACHELINE_BYTES) & (size - 1);
+}
+
+#endif /* INTEL_RING_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index 12010e798868..bc44fe8e5ffa 100644
--- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -33,11 +33,15 @@
#include "gem/i915_gem_context.h"
+#include "gen6_ppgtt.h"
#include "i915_drv.h"
-#include "i915_gem_render_state.h"
#include "i915_trace.h"
#include "intel_context.h"
+#include "intel_gt.h"
+#include "intel_gt_irq.h"
+#include "intel_gt_pm_irq.h"
#include "intel_reset.h"
+#include "intel_ring.h"
#include "intel_workarounds.h"
/* Rough estimate of the typical request size, performing a flush,
@@ -45,16 +49,6 @@
*/
#define LEGACY_REQUEST_SIZE 200
-unsigned int intel_ring_update_space(struct intel_ring *ring)
-{
- unsigned int space;
-
- space = __intel_ring_space(ring->head, ring->emit, ring->size);
-
- ring->space = space;
- return space;
-}
-
static int
gen2_render_ring_flush(struct i915_request *rq, u32 mode)
{
@@ -75,7 +69,8 @@ gen2_render_ring_flush(struct i915_request *rq, u32 mode)
*cs++ = cmd;
while (num_store_dw--) {
*cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
- *cs++ = i915_scratch_offset(rq->i915);
+ *cs++ = intel_gt_scratch_offset(rq->engine->gt,
+ INTEL_GT_SCRATCH_FIELD_DEFAULT);
*cs++ = 0;
}
*cs++ = MI_FLUSH | MI_NO_WRITE_FLUSH;
@@ -148,7 +143,9 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
*/
if (mode & EMIT_INVALIDATE) {
*cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
- *cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = intel_gt_scratch_offset(rq->engine->gt,
+ INTEL_GT_SCRATCH_FIELD_DEFAULT) |
+ PIPE_CONTROL_GLOBAL_GTT;
*cs++ = 0;
*cs++ = 0;
@@ -156,7 +153,9 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
*cs++ = MI_FLUSH;
*cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
- *cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = intel_gt_scratch_offset(rq->engine->gt,
+ INTEL_GT_SCRATCH_FIELD_DEFAULT) |
+ PIPE_CONTROL_GLOBAL_GTT;
*cs++ = 0;
*cs++ = 0;
}
@@ -208,7 +207,9 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
static int
gen6_emit_post_sync_nonzero_flush(struct i915_request *rq)
{
- u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
+ u32 scratch_addr =
+ intel_gt_scratch_offset(rq->engine->gt,
+ INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
u32 *cs;
cs = intel_ring_begin(rq, 6);
@@ -241,7 +242,9 @@ gen6_emit_post_sync_nonzero_flush(struct i915_request *rq)
static int
gen6_render_ring_flush(struct i915_request *rq, u32 mode)
{
- u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
+ u32 scratch_addr =
+ intel_gt_scratch_offset(rq->engine->gt,
+ INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
u32 *cs, flags = 0;
int ret;
@@ -299,7 +302,9 @@ static u32 *gen6_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
*cs++ = GFX_OP_PIPE_CONTROL(4);
*cs++ = PIPE_CONTROL_QW_WRITE;
- *cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = intel_gt_scratch_offset(rq->engine->gt,
+ INTEL_GT_SCRATCH_FIELD_DEFAULT) |
+ PIPE_CONTROL_GLOBAL_GTT;
*cs++ = 0;
/* Finally we can flush and with it emit the breadcrumb */
@@ -309,7 +314,8 @@ static u32 *gen6_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
PIPE_CONTROL_DC_FLUSH_ENABLE |
PIPE_CONTROL_QW_WRITE |
PIPE_CONTROL_CS_STALL);
- *cs++ = rq->timeline->hwsp_offset | PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = i915_request_active_timeline(rq)->hwsp_offset |
+ PIPE_CONTROL_GLOBAL_GTT;
*cs++ = rq->fence.seqno;
*cs++ = MI_USER_INTERRUPT;
@@ -342,7 +348,9 @@ gen7_render_ring_cs_stall_wa(struct i915_request *rq)
static int
gen7_render_ring_flush(struct i915_request *rq, u32 mode)
{
- u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
+ u32 scratch_addr =
+ intel_gt_scratch_offset(rq->engine->gt,
+ INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
u32 *cs, flags = 0;
/*
@@ -355,6 +363,12 @@ gen7_render_ring_flush(struct i915_request *rq, u32 mode)
*/
flags |= PIPE_CONTROL_CS_STALL;
+ /*
+ * CS_STALL suggests at least a post-sync write.
+ */
+ flags |= PIPE_CONTROL_QW_WRITE;
+ flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
+
/* Just flush everything. Experiments have shown that reducing the
* number of bits based on the write domains has little performance
* impact.
@@ -373,13 +387,6 @@ gen7_render_ring_flush(struct i915_request *rq, u32 mode)
flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
- /*
- * TLB invalidate requires a post-sync write.
- */
- flags |= PIPE_CONTROL_QW_WRITE;
- flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
-
- flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
/* Workaround: we must issue a pipe_control with CS-stall bit
* set before a pipe_control command that has the state cache
@@ -410,7 +417,7 @@ static u32 *gen7_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
PIPE_CONTROL_QW_WRITE |
PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_CS_STALL);
- *cs++ = rq->timeline->hwsp_offset;
+ *cs++ = i915_request_active_timeline(rq)->hwsp_offset;
*cs++ = rq->fence.seqno;
*cs++ = MI_USER_INTERRUPT;
@@ -424,8 +431,8 @@ static u32 *gen7_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
static u32 *gen6_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
{
- GEM_BUG_ON(rq->timeline->hwsp_ggtt != rq->engine->status_page.vma);
- GEM_BUG_ON(offset_in_page(rq->timeline->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
+ GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
+ GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
*cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
*cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT;
@@ -444,10 +451,11 @@ static u32 *gen7_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
{
int i;
- GEM_BUG_ON(rq->timeline->hwsp_ggtt != rq->engine->status_page.vma);
- GEM_BUG_ON(offset_in_page(rq->timeline->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
+ GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
+ GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
- *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
+ *cs++ = MI_FLUSH_DW | MI_INVALIDATE_TLB |
+ MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
*cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT;
*cs++ = rq->fence.seqno;
@@ -489,14 +497,13 @@ static void set_hwstam(struct intel_engine_cs *engine, u32 mask)
static void set_hws_pga(struct intel_engine_cs *engine, phys_addr_t phys)
{
- struct drm_i915_private *dev_priv = engine->i915;
u32 addr;
addr = lower_32_bits(phys);
- if (INTEL_GEN(dev_priv) >= 4)
+ if (INTEL_GEN(engine->i915) >= 4)
addr |= (phys >> 28) & 0xf0;
- I915_WRITE(HWS_PGA, addr);
+ intel_uncore_write(engine->uncore, HWS_PGA, addr);
}
static struct page *status_page(struct intel_engine_cs *engine)
@@ -515,14 +522,13 @@ static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
static void set_hwsp(struct intel_engine_cs *engine, u32 offset)
{
- struct drm_i915_private *dev_priv = engine->i915;
i915_reg_t hwsp;
/*
* The ring status page addresses are no longer next to the rest of
* the ring registers as of gen7.
*/
- if (IS_GEN(dev_priv, 7)) {
+ if (IS_GEN(engine->i915, 7)) {
switch (engine->id) {
/*
* No more rings exist on Gen7. Default case is only to shut up
@@ -544,14 +550,14 @@ static void set_hwsp(struct intel_engine_cs *engine, u32 offset)
hwsp = VEBOX_HWS_PGA_GEN7;
break;
}
- } else if (IS_GEN(dev_priv, 6)) {
+ } else if (IS_GEN(engine->i915, 6)) {
hwsp = RING_HWS_PGA_GEN6(engine->mmio_base);
} else {
hwsp = RING_HWS_PGA(engine->mmio_base);
}
- I915_WRITE(hwsp, offset);
- POSTING_READ(hwsp);
+ intel_uncore_write(engine->uncore, hwsp, offset);
+ intel_uncore_posting_read(engine->uncore, hwsp);
}
static void flush_cs_tlb(struct intel_engine_cs *engine)
@@ -623,14 +629,15 @@ static bool stop_ring(struct intel_engine_cs *engine)
static int xcs_resume(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
- struct intel_ring *ring = engine->buffer;
+ struct intel_ring *ring = engine->legacy.ring;
int ret = 0;
- GEM_TRACE("%s: ring:{HEAD:%04x, TAIL:%04x}\n",
- engine->name, ring->head, ring->tail);
+ ENGINE_TRACE(engine, "ring:{HEAD:%04x, TAIL:%04x}\n",
+ ring->head, ring->tail);
intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
+ /* WaClearRingBufHeadRegAtInit:ctg,elk */
if (!stop_ring(engine)) {
/* G45 ring initialization often fails to reset head to zero */
DRM_DEBUG_DRIVER("%s head not reset to zero "
@@ -662,19 +669,16 @@ static int xcs_resume(struct intel_engine_cs *engine)
intel_engine_reset_breadcrumbs(engine);
/* Enforce ordering by reading HEAD register back */
- ENGINE_READ(engine, RING_HEAD);
+ ENGINE_POSTING_READ(engine, RING_HEAD);
- /* Initialize the ring. This must happen _after_ we've cleared the ring
+ /*
+ * Initialize the ring. This must happen _after_ we've cleared the ring
* registers with the above sequence (the readback of the HEAD registers
* also enforces ordering), otherwise the hw might lose the new ring
- * register values. */
+ * register values.
+ */
ENGINE_WRITE(engine, RING_START, i915_ggtt_offset(ring->vma));
- /* WaClearRingBufHeadRegAtInit:ctg,elk */
- if (ENGINE_READ(engine, RING_HEAD))
- DRM_DEBUG_DRIVER("%s initialization failed [head=%08x], fudging\n",
- engine->name, ENGINE_READ(engine, RING_HEAD));
-
/* Check that the ring offsets point within the ring! */
GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
@@ -716,7 +720,7 @@ static int xcs_resume(struct intel_engine_cs *engine)
}
/* Papering over lost _interrupts_ immediately following the restart */
- intel_engine_queue_breadcrumbs(engine);
+ intel_engine_signal_breadcrumbs(engine);
out:
intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
@@ -725,10 +729,47 @@ out:
static void reset_prepare(struct intel_engine_cs *engine)
{
- intel_engine_stop_cs(engine);
+ struct intel_uncore *uncore = engine->uncore;
+ const u32 base = engine->mmio_base;
+
+ /*
+ * We stop engines, otherwise we might get failed reset and a
+ * dead gpu (on elk). Also as modern gpu as kbl can suffer
+ * from system hang if batchbuffer is progressing when
+ * the reset is issued, regardless of READY_TO_RESET ack.
+ * Thus assume it is best to stop engines on all gens
+ * where we have a gpu reset.
+ *
+ * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES)
+ *
+ * WaMediaResetMainRingCleanup:ctg,elk (presumably)
+ *
+ * FIXME: Wa for more modern gens needs to be validated
+ */
+ ENGINE_TRACE(engine, "\n");
+
+ if (intel_engine_stop_cs(engine))
+ ENGINE_TRACE(engine, "timed out on STOP_RING\n");
+
+ intel_uncore_write_fw(uncore,
+ RING_HEAD(base),
+ intel_uncore_read_fw(uncore, RING_TAIL(base)));
+ intel_uncore_posting_read_fw(uncore, RING_HEAD(base)); /* paranoia */
+
+ intel_uncore_write_fw(uncore, RING_HEAD(base), 0);
+ intel_uncore_write_fw(uncore, RING_TAIL(base), 0);
+ intel_uncore_posting_read_fw(uncore, RING_TAIL(base));
+
+ /* The ring must be empty before it is disabled */
+ intel_uncore_write_fw(uncore, RING_CTL(base), 0);
+
+ /* Check acts as a post */
+ if (intel_uncore_read_fw(uncore, RING_HEAD(base)))
+ ENGINE_TRACE(engine, "ring head [%x] not parked\n",
+ intel_uncore_read_fw(uncore, RING_HEAD(base)));
}
-static void reset_ring(struct intel_engine_cs *engine, bool stalled)
+static void reset_rewind(struct intel_engine_cs *engine, bool stalled)
{
struct i915_request *pos, *rq;
unsigned long flags;
@@ -781,14 +822,14 @@ static void reset_ring(struct intel_engine_cs *engine, bool stalled)
* If the request was innocent, we try to replay the request
* with the restored context.
*/
- i915_reset_request(rq, stalled);
+ __i915_request_reset(rq, stalled);
- GEM_BUG_ON(rq->ring != engine->buffer);
+ GEM_BUG_ON(rq->ring != engine->legacy.ring);
head = rq->head;
} else {
- head = engine->buffer->tail;
+ head = engine->legacy.ring->tail;
}
- engine->buffer->head = intel_ring_wrap(engine->buffer, head);
+ engine->legacy.ring->head = intel_ring_wrap(engine->legacy.ring, head);
spin_unlock_irqrestore(&engine->active.lock, flags);
}
@@ -797,24 +838,10 @@ static void reset_finish(struct intel_engine_cs *engine)
{
}
-static int intel_rcs_ctx_init(struct i915_request *rq)
-{
- int ret;
-
- ret = intel_engine_emit_ctx_wa(rq);
- if (ret != 0)
- return ret;
-
- ret = i915_gem_render_state_emit(rq);
- if (ret)
- return ret;
-
- return 0;
-}
-
static int rcs_resume(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
+ struct drm_i915_private *i915 = engine->i915;
+ struct intel_uncore *uncore = engine->uncore;
/*
* Disable CONSTANT_BUFFER before it is loaded from the context
@@ -826,13 +853,14 @@ static int rcs_resume(struct intel_engine_cs *engine)
* they are already accustomed to from before contexts were
* enabled.
*/
- if (IS_GEN(dev_priv, 4))
- I915_WRITE(ECOSKPD,
+ if (IS_GEN(i915, 4))
+ intel_uncore_write(uncore, ECOSKPD,
_MASKED_BIT_ENABLE(ECO_CONSTANT_BUFFER_SR_DISABLE));
/* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
- if (IS_GEN_RANGE(dev_priv, 4, 6))
- I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
+ if (IS_GEN_RANGE(i915, 4, 6))
+ intel_uncore_write(uncore, MI_MODE,
+ _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
/* We need to disable the AsyncFlip performance optimisations in order
* to use MI_WAIT_FOR_EVENT within the CS. It should already be
@@ -840,38 +868,40 @@ static int rcs_resume(struct intel_engine_cs *engine)
*
* WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
*/
- if (IS_GEN_RANGE(dev_priv, 6, 7))
- I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
+ if (IS_GEN_RANGE(i915, 6, 7))
+ intel_uncore_write(uncore, MI_MODE,
+ _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
/* Required for the hardware to program scanline values for waiting */
/* WaEnableFlushTlbInvalidationMode:snb */
- if (IS_GEN(dev_priv, 6))
- I915_WRITE(GFX_MODE,
+ if (IS_GEN(i915, 6))
+ intel_uncore_write(uncore, GFX_MODE,
_MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
/* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
- if (IS_GEN(dev_priv, 7))
- I915_WRITE(GFX_MODE_GEN7,
+ if (IS_GEN(i915, 7))
+ intel_uncore_write(uncore, GFX_MODE_GEN7,
_MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
_MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
- if (IS_GEN(dev_priv, 6)) {
+ if (IS_GEN(i915, 6)) {
/* From the Sandybridge PRM, volume 1 part 3, page 24:
* "If this bit is set, STCunit will have LRA as replacement
* policy. [...] This bit must be reset. LRA replacement
* policy is not supported."
*/
- I915_WRITE(CACHE_MODE_0,
+ intel_uncore_write(uncore, CACHE_MODE_0,
_MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
}
- if (IS_GEN_RANGE(dev_priv, 6, 7))
- I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
+ if (IS_GEN_RANGE(i915, 6, 7))
+ intel_uncore_write(uncore, INSTPM,
+ _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
return xcs_resume(engine);
}
-static void cancel_requests(struct intel_engine_cs *engine)
+static void reset_cancel(struct intel_engine_cs *engine)
{
struct i915_request *request;
unsigned long flags;
@@ -894,6 +924,7 @@ static void cancel_requests(struct intel_engine_cs *engine)
static void i9xx_submit_request(struct i915_request *request)
{
i915_request_submit(request);
+ wmb(); /* paranoid flush writes out of the WCB before mmio */
ENGINE_WRITE(request->engine, RING_TAIL,
intel_ring_set_tail(request->ring, request->tail));
@@ -901,8 +932,8 @@ static void i9xx_submit_request(struct i915_request *request)
static u32 *i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs)
{
- GEM_BUG_ON(rq->timeline->hwsp_ggtt != rq->engine->status_page.vma);
- GEM_BUG_ON(offset_in_page(rq->timeline->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
+ GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
+ GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
*cs++ = MI_FLUSH;
@@ -924,8 +955,8 @@ static u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs)
{
int i;
- GEM_BUG_ON(rq->timeline->hwsp_ggtt != rq->engine->status_page.vma);
- GEM_BUG_ON(offset_in_page(rq->timeline->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
+ GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
+ GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
*cs++ = MI_FLUSH;
@@ -948,13 +979,13 @@ static u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs)
static void
gen5_irq_enable(struct intel_engine_cs *engine)
{
- gen5_enable_gt_irq(engine->i915, engine->irq_enable_mask);
+ gen5_gt_enable_irq(engine->gt, engine->irq_enable_mask);
}
static void
gen5_irq_disable(struct intel_engine_cs *engine)
{
- gen5_disable_gt_irq(engine->i915, engine->irq_enable_mask);
+ gen5_gt_disable_irq(engine->gt, engine->irq_enable_mask);
}
static void
@@ -1015,14 +1046,14 @@ gen6_irq_enable(struct intel_engine_cs *engine)
/* Flush/delay to ensure the RING_IMR is active before the GT IMR */
ENGINE_POSTING_READ(engine, RING_IMR);
- gen5_enable_gt_irq(engine->i915, engine->irq_enable_mask);
+ gen5_gt_enable_irq(engine->gt, engine->irq_enable_mask);
}
static void
gen6_irq_disable(struct intel_engine_cs *engine)
{
ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask);
- gen5_disable_gt_irq(engine->i915, engine->irq_enable_mask);
+ gen5_gt_disable_irq(engine->gt, engine->irq_enable_mask);
}
static void
@@ -1033,14 +1064,14 @@ hsw_vebox_irq_enable(struct intel_engine_cs *engine)
/* Flush/delay to ensure the RING_IMR is active before the GT IMR */
ENGINE_POSTING_READ(engine, RING_IMR);
- gen6_unmask_pm_irq(engine->i915, engine->irq_enable_mask);
+ gen6_gt_pm_unmask_irq(engine->gt, engine->irq_enable_mask);
}
static void
hsw_vebox_irq_disable(struct intel_engine_cs *engine)
{
ENGINE_WRITE(engine, RING_IMR, ~0);
- gen6_mask_pm_irq(engine->i915, engine->irq_enable_mask);
+ gen6_gt_pm_mask_irq(engine->gt, engine->irq_enable_mask);
}
static int
@@ -1071,9 +1102,11 @@ i830_emit_bb_start(struct i915_request *rq,
u64 offset, u32 len,
unsigned int dispatch_flags)
{
- u32 *cs, cs_offset = i915_scratch_offset(rq->i915);
+ u32 *cs, cs_offset =
+ intel_gt_scratch_offset(rq->engine->gt,
+ INTEL_GT_SCRATCH_FIELD_DEFAULT);
- GEM_BUG_ON(rq->i915->gt.scratch->size < I830_WA_SIZE);
+ GEM_BUG_ON(rq->engine->gt->scratch->size < I830_WA_SIZE);
cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
@@ -1100,7 +1133,7 @@ i830_emit_bb_start(struct i915_request *rq,
* stable batch scratch bo area (so that the CS never
* stumbles over its tlb invalidation bug) ...
*/
- *cs++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA;
+ *cs++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (6 - 2);
*cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096;
*cs++ = DIV_ROUND_UP(len, 4096) << 16 | 4096;
*cs++ = cs_offset;
@@ -1146,179 +1179,9 @@ i915_emit_bb_start(struct i915_request *rq,
return 0;
}
-int intel_ring_pin(struct intel_ring *ring)
-{
- struct i915_vma *vma = ring->vma;
- unsigned int flags;
- void *addr;
- int ret;
-
- if (atomic_fetch_inc(&ring->pin_count))
- return 0;
-
- ret = i915_timeline_pin(ring->timeline);
- if (ret)
- goto err_unpin;
-
- flags = PIN_GLOBAL;
-
- /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
- flags |= PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
-
- if (vma->obj->stolen)
- flags |= PIN_MAPPABLE;
- else
- flags |= PIN_HIGH;
-
- ret = i915_vma_pin(vma, 0, 0, flags);
- if (unlikely(ret))
- goto err_timeline;
-
- if (i915_vma_is_map_and_fenceable(vma))
- addr = (void __force *)i915_vma_pin_iomap(vma);
- else
- addr = i915_gem_object_pin_map(vma->obj,
- i915_coherent_map_type(vma->vm->i915));
- if (IS_ERR(addr)) {
- ret = PTR_ERR(addr);
- goto err_ring;
- }
-
- vma->obj->pin_global++;
-
- GEM_BUG_ON(ring->vaddr);
- ring->vaddr = addr;
-
- return 0;
-
-err_ring:
- i915_vma_unpin(vma);
-err_timeline:
- i915_timeline_unpin(ring->timeline);
-err_unpin:
- atomic_dec(&ring->pin_count);
- return ret;
-}
-
-void intel_ring_reset(struct intel_ring *ring, u32 tail)
-{
- GEM_BUG_ON(!intel_ring_offset_valid(ring, tail));
-
- ring->tail = tail;
- ring->head = tail;
- ring->emit = tail;
- intel_ring_update_space(ring);
-}
-
-void intel_ring_unpin(struct intel_ring *ring)
-{
- if (!atomic_dec_and_test(&ring->pin_count))
- return;
-
- /* Discard any unused bytes beyond that submitted to hw. */
- intel_ring_reset(ring, ring->tail);
-
- GEM_BUG_ON(!ring->vma);
- if (i915_vma_is_map_and_fenceable(ring->vma))
- i915_vma_unpin_iomap(ring->vma);
- else
- i915_gem_object_unpin_map(ring->vma->obj);
-
- GEM_BUG_ON(!ring->vaddr);
- ring->vaddr = NULL;
-
- ring->vma->obj->pin_global--;
- i915_vma_unpin(ring->vma);
-
- i915_timeline_unpin(ring->timeline);
-}
-
-static struct i915_vma *
-intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
-{
- struct i915_address_space *vm = &dev_priv->ggtt.vm;
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
-
- obj = i915_gem_object_create_stolen(dev_priv, size);
- if (!obj)
- obj = i915_gem_object_create_internal(dev_priv, size);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
-
- /*
- * Mark ring buffers as read-only from GPU side (so no stray overwrites)
- * if supported by the platform's GGTT.
- */
- if (vm->has_read_only)
- i915_gem_object_set_readonly(obj);
-
- vma = i915_vma_instance(obj, vm, NULL);
- if (IS_ERR(vma))
- goto err;
-
- return vma;
-
-err:
- i915_gem_object_put(obj);
- return vma;
-}
-
-struct intel_ring *
-intel_engine_create_ring(struct intel_engine_cs *engine,
- struct i915_timeline *timeline,
- int size)
-{
- struct intel_ring *ring;
- struct i915_vma *vma;
-
- GEM_BUG_ON(!is_power_of_2(size));
- GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES);
-
- ring = kzalloc(sizeof(*ring), GFP_KERNEL);
- if (!ring)
- return ERR_PTR(-ENOMEM);
-
- kref_init(&ring->ref);
- INIT_LIST_HEAD(&ring->request_list);
- ring->timeline = i915_timeline_get(timeline);
-
- ring->size = size;
- /* Workaround an erratum on the i830 which causes a hang if
- * the TAIL pointer points to within the last 2 cachelines
- * of the buffer.
- */
- ring->effective_size = size;
- if (IS_I830(engine->i915) || IS_I845G(engine->i915))
- ring->effective_size -= 2 * CACHELINE_BYTES;
-
- intel_ring_update_space(ring);
-
- vma = intel_ring_create_vma(engine->i915, size);
- if (IS_ERR(vma)) {
- kfree(ring);
- return ERR_CAST(vma);
- }
- ring->vma = vma;
-
- return ring;
-}
-
-void intel_ring_free(struct kref *ref)
-{
- struct intel_ring *ring = container_of(ref, typeof(*ring), ref);
-
- i915_vma_close(ring->vma);
- i915_vma_put(ring->vma);
-
- i915_timeline_put(ring->timeline);
- kfree(ring);
-}
-
static void __ring_context_fini(struct intel_context *ce)
{
- GEM_BUG_ON(i915_gem_object_is_active(ce->state->obj));
- i915_gem_object_put(ce->state->obj);
+ i915_vma_put(ce->state);
}
static void ring_context_destroy(struct kref *ref)
@@ -1330,33 +1193,45 @@ static void ring_context_destroy(struct kref *ref)
if (ce->state)
__ring_context_fini(ce);
+ intel_context_fini(ce);
intel_context_free(ce);
}
-static int __context_pin_ppgtt(struct i915_gem_context *ctx)
+static struct i915_address_space *vm_alias(struct intel_context *ce)
+{
+ struct i915_address_space *vm;
+
+ vm = ce->vm;
+ if (i915_is_ggtt(vm))
+ vm = &i915_vm_to_ggtt(vm)->alias->vm;
+
+ return vm;
+}
+
+static int __context_pin_ppgtt(struct intel_context *ce)
{
struct i915_address_space *vm;
int err = 0;
- vm = ctx->vm ?: &ctx->i915->mm.aliasing_ppgtt->vm;
+ vm = vm_alias(ce);
if (vm)
err = gen6_ppgtt_pin(i915_vm_to_ppgtt((vm)));
return err;
}
-static void __context_unpin_ppgtt(struct i915_gem_context *ctx)
+static void __context_unpin_ppgtt(struct intel_context *ce)
{
struct i915_address_space *vm;
- vm = ctx->vm ?: &ctx->i915->mm.aliasing_ppgtt->vm;
+ vm = vm_alias(ce);
if (vm)
gen6_ppgtt_unpin(i915_vm_to_ppgtt(vm));
}
static void ring_context_unpin(struct intel_context *ce)
{
- __context_unpin_ppgtt(ce->gem_context);
+ __context_unpin_ppgtt(ce);
}
static struct i915_vma *
@@ -1412,7 +1287,7 @@ alloc_context_vma(struct intel_engine_cs *engine)
i915_gem_object_unpin_map(obj);
}
- vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+ vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_obj;
@@ -1427,16 +1302,17 @@ err_obj:
return ERR_PTR(err);
}
-static int ring_context_pin(struct intel_context *ce)
+static int ring_context_alloc(struct intel_context *ce)
{
struct intel_engine_cs *engine = ce->engine;
- int err;
/* One ringbuffer to rule them all */
- GEM_BUG_ON(!engine->buffer);
- ce->ring = engine->buffer;
+ GEM_BUG_ON(!engine->legacy.ring);
+ ce->ring = engine->legacy.ring;
+ ce->timeline = intel_timeline_get(engine->legacy.timeline);
- if (!ce->state && engine->context_size) {
+ GEM_BUG_ON(ce->state);
+ if (engine->context_size) {
struct i915_vma *vma;
vma = alloc_context_vma(engine);
@@ -1444,29 +1320,26 @@ static int ring_context_pin(struct intel_context *ce)
return PTR_ERR(vma);
ce->state = vma;
+ if (engine->default_state)
+ __set_bit(CONTEXT_VALID_BIT, &ce->flags);
}
- err = intel_context_active_acquire(ce, PIN_HIGH);
- if (err)
- return err;
-
- err = __context_pin_ppgtt(ce->gem_context);
- if (err)
- goto err_active;
-
return 0;
+}
-err_active:
- intel_context_active_release(ce);
- return err;
+static int ring_context_pin(struct intel_context *ce)
+{
+ return __context_pin_ppgtt(ce);
}
static void ring_context_reset(struct intel_context *ce)
{
- intel_ring_reset(ce->ring, 0);
+ intel_ring_reset(ce->ring, ce->ring->emit);
}
static const struct intel_context_ops ring_context_ops = {
+ .alloc = ring_context_alloc,
+
.pin = ring_context_pin,
.unpin = ring_context_unpin,
@@ -1477,45 +1350,38 @@ static const struct intel_context_ops ring_context_ops = {
.destroy = ring_context_destroy,
};
-static int load_pd_dir(struct i915_request *rq, const struct i915_ppgtt *ppgtt)
+static int load_pd_dir(struct i915_request *rq,
+ const struct i915_ppgtt *ppgtt,
+ u32 valid)
{
const struct intel_engine_cs * const engine = rq->engine;
u32 *cs;
- cs = intel_ring_begin(rq, 6);
+ cs = intel_ring_begin(rq, 12);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = MI_LOAD_REGISTER_IMM(1);
*cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine->mmio_base));
- *cs++ = PP_DIR_DCLV_2G;
+ *cs++ = valid;
*cs++ = MI_LOAD_REGISTER_IMM(1);
*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base));
- *cs++ = ppgtt->pd->base.ggtt_offset << 10;
-
- intel_ring_advance(rq, cs);
-
- return 0;
-}
-
-static int flush_pd_dir(struct i915_request *rq)
-{
- const struct intel_engine_cs * const engine = rq->engine;
- u32 *cs;
+ *cs++ = px_base(ppgtt->pd)->ggtt_offset << 10;
- cs = intel_ring_begin(rq, 4);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- /* Stall until the page table load is complete */
+ /* Stall until the page table load is complete? */
*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base));
- *cs++ = i915_scratch_offset(rq->i915);
- *cs++ = MI_NOOP;
+ *cs++ = intel_gt_scratch_offset(engine->gt,
+ INTEL_GT_SCRATCH_FIELD_DEFAULT);
+
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(RING_INSTPM(engine->mmio_base));
+ *cs++ = _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE);
intel_ring_advance(rq, cs);
- return 0;
+
+ return rq->engine->emit_flush(rq, EMIT_FLUSH);
}
static inline int mi_set_context(struct i915_request *rq, u32 flags)
@@ -1524,19 +1390,11 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
struct intel_engine_cs *engine = rq->engine;
enum intel_engine_id id;
const int num_engines =
- IS_HSW_GT1(i915) ? RUNTIME_INFO(i915)->num_engines - 1 : 0;
+ IS_HASWELL(i915) ? RUNTIME_INFO(i915)->num_engines - 1 : 0;
bool force_restore = false;
int len;
u32 *cs;
- flags |= MI_MM_SPACE_GTT;
- if (IS_HASWELL(i915))
- /* These flags are for resource streamer on HSW+ */
- flags |= HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN;
- else
- /* We need to save the extended state for powersaving modes */
- flags |= MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN;
-
len = 4;
if (IS_GEN(i915, 7))
len += 2 + (num_engines ? 4 * num_engines + 6 : 0);
@@ -1560,7 +1418,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
struct intel_engine_cs *signaller;
*cs++ = MI_LOAD_REGISTER_IMM(num_engines);
- for_each_engine(signaller, i915, id) {
+ for_each_engine(signaller, engine->gt, id) {
if (signaller == engine)
continue;
@@ -1601,7 +1459,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
*cs++ = MI_NOOP;
*cs++ = MI_SET_CONTEXT;
- *cs++ = i915_ggtt_offset(rq->hw_context->state) | flags;
+ *cs++ = i915_ggtt_offset(rq->context->state) | flags;
/*
* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
* WaMiSetContext_Hang:snb,ivb,vlv
@@ -1614,7 +1472,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
i915_reg_t last_reg = {}; /* keep gcc quiet */
*cs++ = MI_LOAD_REGISTER_IMM(num_engines);
- for_each_engine(signaller, i915, id) {
+ for_each_engine(signaller, engine->gt, id) {
if (signaller == engine)
continue;
@@ -1627,7 +1485,8 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
/* Insert a delay before the next switch! */
*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
*cs++ = i915_mmio_reg_offset(last_reg);
- *cs++ = i915_scratch_offset(rq->i915);
+ *cs++ = intel_gt_scratch_offset(engine->gt,
+ INTEL_GT_SCRATCH_FIELD_DEFAULT);
*cs++ = MI_NOOP;
}
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
@@ -1640,7 +1499,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
return 0;
}
-static int remap_l3(struct i915_request *rq, int slice)
+static int remap_l3_slice(struct i915_request *rq, int slice)
{
u32 *cs, *remap_info = rq->i915->l3_parity.remap_info[slice];
int i;
@@ -1668,120 +1527,97 @@ static int remap_l3(struct i915_request *rq, int slice)
return 0;
}
-static int switch_context(struct i915_request *rq)
+static int remap_l3(struct i915_request *rq)
{
- struct intel_engine_cs *engine = rq->engine;
- struct i915_gem_context *ctx = rq->gem_context;
- struct i915_address_space *vm =
- ctx->vm ?: &rq->i915->mm.aliasing_ppgtt->vm;
- unsigned int unwind_mm = 0;
- u32 hw_flags = 0;
- int ret, i;
+ struct i915_gem_context *ctx = i915_request_gem_context(rq);
+ int i, err;
- GEM_BUG_ON(HAS_EXECLISTS(rq->i915));
+ if (!ctx || !ctx->remap_slice)
+ return 0;
- if (vm) {
- struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- int loops;
+ for (i = 0; i < MAX_L3_SLICES; i++) {
+ if (!(ctx->remap_slice & BIT(i)))
+ continue;
- /*
- * Baytail takes a little more convincing that it really needs
- * to reload the PD between contexts. It is not just a little
- * longer, as adding more stalls after the load_pd_dir (i.e.
- * adding a long loop around flush_pd_dir) is not as effective
- * as reloading the PD umpteen times. 32 is derived from
- * experimentation (gem_exec_parallel/fds) and has no good
- * explanation.
- */
- loops = 1;
- if (engine->id == BCS0 && IS_VALLEYVIEW(engine->i915))
- loops = 32;
-
- do {
- ret = load_pd_dir(rq, ppgtt);
- if (ret)
- goto err;
- } while (--loops);
-
- if (ppgtt->pd_dirty_engines & engine->mask) {
- unwind_mm = engine->mask;
- ppgtt->pd_dirty_engines &= ~unwind_mm;
- hw_flags = MI_FORCE_RESTORE;
- }
+ err = remap_l3_slice(rq, i);
+ if (err)
+ return err;
}
- if (rq->hw_context->state) {
- GEM_BUG_ON(engine->id != RCS0);
+ ctx->remap_slice = 0;
+ return 0;
+}
- /*
- * The kernel context(s) is treated as pure scratch and is not
- * expected to retain any state (as we sacrifice it during
- * suspend and on resume it may be corrupted). This is ok,
- * as nothing actually executes using the kernel context; it
- * is purely used for flushing user contexts.
- */
- if (i915_gem_context_is_kernel(ctx))
- hw_flags = MI_RESTORE_INHIBIT;
+static int switch_mm(struct i915_request *rq, struct i915_address_space *vm)
+{
+ int ret;
- ret = mi_set_context(rq, hw_flags);
- if (ret)
- goto err_mm;
- }
+ if (!vm)
+ return 0;
- if (vm) {
- ret = engine->emit_flush(rq, EMIT_INVALIDATE);
- if (ret)
- goto err_mm;
+ ret = rq->engine->emit_flush(rq, EMIT_FLUSH);
+ if (ret)
+ return ret;
- ret = flush_pd_dir(rq);
- if (ret)
- goto err_mm;
+ /*
+ * Not only do we need a full barrier (post-sync write) after
+ * invalidating the TLBs, but we need to wait a little bit
+ * longer. Whether this is merely delaying us, or the
+ * subsequent flush is a key part of serialising with the
+ * post-sync op, this extra pass appears vital before a
+ * mm switch!
+ */
+ ret = load_pd_dir(rq, i915_vm_to_ppgtt(vm), PP_DIR_DCLV_2G);
+ if (ret)
+ return ret;
- /*
- * Not only do we need a full barrier (post-sync write) after
- * invalidating the TLBs, but we need to wait a little bit
- * longer. Whether this is merely delaying us, or the
- * subsequent flush is a key part of serialising with the
- * post-sync op, this extra pass appears vital before a
- * mm switch!
- */
- ret = engine->emit_flush(rq, EMIT_INVALIDATE);
- if (ret)
- goto err_mm;
+ return rq->engine->emit_flush(rq, EMIT_INVALIDATE);
+}
- ret = engine->emit_flush(rq, EMIT_FLUSH);
- if (ret)
- goto err_mm;
- }
+static int switch_context(struct i915_request *rq)
+{
+ struct intel_context *ce = rq->context;
+ int ret;
- if (ctx->remap_slice) {
- for (i = 0; i < MAX_L3_SLICES; i++) {
- if (!(ctx->remap_slice & BIT(i)))
- continue;
+ GEM_BUG_ON(HAS_EXECLISTS(rq->i915));
- ret = remap_l3(rq, i);
- if (ret)
- goto err_mm;
- }
+ ret = switch_mm(rq, vm_alias(ce));
+ if (ret)
+ return ret;
+
+ if (ce->state) {
+ u32 flags;
+
+ GEM_BUG_ON(rq->engine->id != RCS0);
- ctx->remap_slice = 0;
+ /* For resource streamer on HSW+ and power context elsewhere */
+ BUILD_BUG_ON(HSW_MI_RS_SAVE_STATE_EN != MI_SAVE_EXT_STATE_EN);
+ BUILD_BUG_ON(HSW_MI_RS_RESTORE_STATE_EN != MI_RESTORE_EXT_STATE_EN);
+
+ flags = MI_SAVE_EXT_STATE_EN | MI_MM_SPACE_GTT;
+ if (test_bit(CONTEXT_VALID_BIT, &ce->flags))
+ flags |= MI_RESTORE_EXT_STATE_EN;
+ else
+ flags |= MI_RESTORE_INHIBIT;
+
+ ret = mi_set_context(rq, flags);
+ if (ret)
+ return ret;
}
- return 0;
+ ret = remap_l3(rq);
+ if (ret)
+ return ret;
-err_mm:
- if (unwind_mm)
- i915_vm_to_ppgtt(vm)->pd_dirty_engines |= unwind_mm;
-err:
- return ret;
+ return 0;
}
static int ring_request_alloc(struct i915_request *request)
{
int ret;
- GEM_BUG_ON(!intel_context_is_pinned(request->hw_context));
- GEM_BUG_ON(request->timeline->has_initial_breadcrumb);
+ GEM_BUG_ON(!intel_context_is_pinned(request->context));
+ GEM_BUG_ON(i915_request_timeline(request)->has_initial_breadcrumb);
/*
* Flush enough space to reduce the likelihood of waiting after
@@ -1803,140 +1639,6 @@ static int ring_request_alloc(struct i915_request *request)
return 0;
}
-static noinline int wait_for_space(struct intel_ring *ring, unsigned int bytes)
-{
- struct i915_request *target;
- long timeout;
-
- if (intel_ring_update_space(ring) >= bytes)
- return 0;
-
- GEM_BUG_ON(list_empty(&ring->request_list));
- list_for_each_entry(target, &ring->request_list, ring_link) {
- /* Would completion of this request free enough space? */
- if (bytes <= __intel_ring_space(target->postfix,
- ring->emit, ring->size))
- break;
- }
-
- if (WARN_ON(&target->ring_link == &ring->request_list))
- return -ENOSPC;
-
- timeout = i915_request_wait(target,
- I915_WAIT_INTERRUPTIBLE,
- MAX_SCHEDULE_TIMEOUT);
- if (timeout < 0)
- return timeout;
-
- i915_request_retire_upto(target);
-
- intel_ring_update_space(ring);
- GEM_BUG_ON(ring->space < bytes);
- return 0;
-}
-
-u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords)
-{
- struct intel_ring *ring = rq->ring;
- const unsigned int remain_usable = ring->effective_size - ring->emit;
- const unsigned int bytes = num_dwords * sizeof(u32);
- unsigned int need_wrap = 0;
- unsigned int total_bytes;
- u32 *cs;
-
- /* Packets must be qword aligned. */
- GEM_BUG_ON(num_dwords & 1);
-
- total_bytes = bytes + rq->reserved_space;
- GEM_BUG_ON(total_bytes > ring->effective_size);
-
- if (unlikely(total_bytes > remain_usable)) {
- const int remain_actual = ring->size - ring->emit;
-
- if (bytes > remain_usable) {
- /*
- * Not enough space for the basic request. So need to
- * flush out the remainder and then wait for
- * base + reserved.
- */
- total_bytes += remain_actual;
- need_wrap = remain_actual | 1;
- } else {
- /*
- * The base request will fit but the reserved space
- * falls off the end. So we don't need an immediate
- * wrap and only need to effectively wait for the
- * reserved size from the start of ringbuffer.
- */
- total_bytes = rq->reserved_space + remain_actual;
- }
- }
-
- if (unlikely(total_bytes > ring->space)) {
- int ret;
-
- /*
- * Space is reserved in the ringbuffer for finalising the
- * request, as that cannot be allowed to fail. During request
- * finalisation, reserved_space is set to 0 to stop the
- * overallocation and the assumption is that then we never need
- * to wait (which has the risk of failing with EINTR).
- *
- * See also i915_request_alloc() and i915_request_add().
- */
- GEM_BUG_ON(!rq->reserved_space);
-
- ret = wait_for_space(ring, total_bytes);
- if (unlikely(ret))
- return ERR_PTR(ret);
- }
-
- if (unlikely(need_wrap)) {
- need_wrap &= ~1;
- GEM_BUG_ON(need_wrap > ring->space);
- GEM_BUG_ON(ring->emit + need_wrap > ring->size);
- GEM_BUG_ON(!IS_ALIGNED(need_wrap, sizeof(u64)));
-
- /* Fill the tail with MI_NOOP */
- memset64(ring->vaddr + ring->emit, 0, need_wrap / sizeof(u64));
- ring->space -= need_wrap;
- ring->emit = 0;
- }
-
- GEM_BUG_ON(ring->emit > ring->size - bytes);
- GEM_BUG_ON(ring->space < bytes);
- cs = ring->vaddr + ring->emit;
- GEM_DEBUG_EXEC(memset32(cs, POISON_INUSE, bytes / sizeof(*cs)));
- ring->emit += bytes;
- ring->space -= bytes;
-
- return cs;
-}
-
-/* Align the ring tail to a cacheline boundary */
-int intel_ring_cacheline_align(struct i915_request *rq)
-{
- int num_dwords;
- void *cs;
-
- num_dwords = (rq->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(u32);
- if (num_dwords == 0)
- return 0;
-
- num_dwords = CACHELINE_DWORDS - num_dwords;
- GEM_BUG_ON(num_dwords & 1);
-
- cs = intel_ring_begin(rq, num_dwords);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- memset64(cs, (u64)MI_NOOP << 32 | MI_NOOP, num_dwords / 2);
- intel_ring_advance(rq, cs);
-
- GEM_BUG_ON(rq->ring->emit & (CACHELINE_BYTES - 1));
- return 0;
-}
-
static void gen6_bsd_submit_request(struct i915_request *request)
{
struct intel_uncore *uncore = request->engine->uncore;
@@ -2070,7 +1772,6 @@ static int gen6_ring_flush(struct i915_request *rq, u32 mode)
static void i9xx_set_default_submission(struct intel_engine_cs *engine)
{
engine->submit_request = i9xx_submit_request;
- engine->cancel_requests = cancel_requests;
engine->park = NULL;
engine->unpark = NULL;
@@ -2082,7 +1783,7 @@ static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine)
engine->submit_request = gen6_bsd_submit_request;
}
-static void ring_destroy(struct intel_engine_cs *engine)
+static void ring_release(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
@@ -2091,10 +1792,11 @@ static void ring_destroy(struct intel_engine_cs *engine)
intel_engine_cleanup_common(engine);
- intel_ring_unpin(engine->buffer);
- intel_ring_put(engine->buffer);
+ intel_ring_unpin(engine->legacy.ring);
+ intel_ring_put(engine->legacy.ring);
- kfree(engine);
+ intel_timeline_unpin(engine->legacy.timeline);
+ intel_timeline_put(engine->legacy.timeline);
}
static void setup_irq(struct intel_engine_cs *engine)
@@ -2125,11 +1827,10 @@ static void setup_common(struct intel_engine_cs *engine)
setup_irq(engine);
- engine->destroy = ring_destroy;
-
engine->resume = xcs_resume;
engine->reset.prepare = reset_prepare;
- engine->reset.reset = reset_ring;
+ engine->reset.rewind = reset_rewind;
+ engine->reset.cancel = reset_cancel;
engine->reset.finish = reset_finish;
engine->cops = &ring_context_ops;
@@ -2166,11 +1867,9 @@ static void setup_rcs(struct intel_engine_cs *engine)
engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
if (INTEL_GEN(i915) >= 7) {
- engine->init_context = intel_rcs_ctx_init;
engine->emit_flush = gen7_render_ring_flush;
engine->emit_fini_breadcrumb = gen7_rcs_emit_breadcrumb;
} else if (IS_GEN(i915, 6)) {
- engine->init_context = intel_rcs_ctx_init;
engine->emit_flush = gen6_render_ring_flush;
engine->emit_fini_breadcrumb = gen6_rcs_emit_breadcrumb;
} else if (IS_GEN(i915, 5)) {
@@ -2242,6 +1941,10 @@ static void setup_vecs(struct intel_engine_cs *engine)
int intel_ring_submission_setup(struct intel_engine_cs *engine)
{
+ struct intel_timeline *timeline;
+ struct intel_ring *ring;
+ int err;
+
setup_common(engine);
switch (engine->class) {
@@ -2262,48 +1965,44 @@ int intel_ring_submission_setup(struct intel_engine_cs *engine)
return -ENODEV;
}
- return 0;
-}
-
-int intel_ring_submission_init(struct intel_engine_cs *engine)
-{
- struct i915_timeline *timeline;
- struct intel_ring *ring;
- int err;
-
- timeline = i915_timeline_create(engine->i915, engine->status_page.vma);
+ timeline = intel_timeline_create(engine->gt, engine->status_page.vma);
if (IS_ERR(timeline)) {
err = PTR_ERR(timeline);
goto err;
}
GEM_BUG_ON(timeline->has_initial_breadcrumb);
- ring = intel_engine_create_ring(engine, timeline, 32 * PAGE_SIZE);
- i915_timeline_put(timeline);
+ err = intel_timeline_pin(timeline);
+ if (err)
+ goto err_timeline;
+
+ ring = intel_engine_create_ring(engine, SZ_16K);
if (IS_ERR(ring)) {
err = PTR_ERR(ring);
- goto err;
+ goto err_timeline_unpin;
}
err = intel_ring_pin(ring);
if (err)
goto err_ring;
- GEM_BUG_ON(engine->buffer);
- engine->buffer = ring;
+ GEM_BUG_ON(engine->legacy.ring);
+ engine->legacy.ring = ring;
+ engine->legacy.timeline = timeline;
- err = intel_engine_init_common(engine);
- if (err)
- goto err_unpin;
+ GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma);
- GEM_BUG_ON(ring->timeline->hwsp_ggtt != engine->status_page.vma);
+ /* Finally, take ownership and responsibility for cleanup! */
+ engine->release = ring_release;
return 0;
-err_unpin:
- intel_ring_unpin(ring);
err_ring:
intel_ring_put(ring);
+err_timeline_unpin:
+ intel_timeline_unpin(timeline);
+err_timeline:
+ intel_timeline_put(timeline);
err:
intel_engine_cleanup_common(engine);
return err;
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_types.h b/drivers/gpu/drm/i915/gt/intel_ring_types.h
new file mode 100644
index 000000000000..d9f17f38e0cc
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_ring_types.h
@@ -0,0 +1,51 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_RING_TYPES_H
+#define INTEL_RING_TYPES_H
+
+#include <linux/atomic.h>
+#include <linux/kref.h>
+#include <linux/types.h>
+
+/*
+ * Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
+ * but keeps the logic simple. Indeed, the whole purpose of this macro is just
+ * to give some inclination as to some of the magic values used in the various
+ * workarounds!
+ */
+#define CACHELINE_BYTES 64
+#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(u32))
+
+struct i915_vma;
+
+struct intel_ring {
+ struct kref ref;
+ struct i915_vma *vma;
+ void *vaddr;
+
+ /*
+ * As we have two types of rings, one global to the engine used
+ * by ringbuffer submission and those that are exclusive to a
+ * context used by execlists, we have to play safe and allow
+ * atomic updates to the pin_count. However, the actual pinning
+ * of the context is either done during initialisation for
+ * ringbuffer submission or serialised as part of the context
+ * pinning for execlists, and so we do not need a mutex ourselves
+ * to serialise intel_ring_pin/intel_ring_unpin.
+ */
+ atomic_t pin_count;
+
+ u32 head;
+ u32 tail;
+ u32 emit;
+
+ u32 space;
+ u32 size;
+ u32 effective_size;
+};
+
+#endif /* INTEL_RING_TYPES_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
new file mode 100644
index 000000000000..d2a3d935d186
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -0,0 +1,1903 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_gt.h"
+#include "intel_gt_irq.h"
+#include "intel_gt_pm_irq.h"
+#include "intel_rps.h"
+#include "intel_sideband.h"
+#include "../../../platform/x86/intel_ips.h"
+
+/*
+ * Lock protecting IPS related data structures
+ */
+static DEFINE_SPINLOCK(mchdev_lock);
+
+static struct intel_gt *rps_to_gt(struct intel_rps *rps)
+{
+ return container_of(rps, struct intel_gt, rps);
+}
+
+static struct drm_i915_private *rps_to_i915(struct intel_rps *rps)
+{
+ return rps_to_gt(rps)->i915;
+}
+
+static struct intel_uncore *rps_to_uncore(struct intel_rps *rps)
+{
+ return rps_to_gt(rps)->uncore;
+}
+
+static u32 rps_pm_sanitize_mask(struct intel_rps *rps, u32 mask)
+{
+ return mask & ~rps->pm_intrmsk_mbz;
+}
+
+static inline void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val)
+{
+ intel_uncore_write_fw(uncore, reg, val);
+}
+
+static u32 rps_pm_mask(struct intel_rps *rps, u8 val)
+{
+ u32 mask = 0;
+
+ /* We use UP_EI_EXPIRED interrupts for both up/down in manual mode */
+ if (val > rps->min_freq_softlimit)
+ mask |= (GEN6_PM_RP_UP_EI_EXPIRED |
+ GEN6_PM_RP_DOWN_THRESHOLD |
+ GEN6_PM_RP_DOWN_TIMEOUT);
+
+ if (val < rps->max_freq_softlimit)
+ mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
+
+ mask &= rps->pm_events;
+
+ return rps_pm_sanitize_mask(rps, ~mask);
+}
+
+static void rps_reset_ei(struct intel_rps *rps)
+{
+ memset(&rps->ei, 0, sizeof(rps->ei));
+}
+
+static void rps_enable_interrupts(struct intel_rps *rps)
+{
+ struct intel_gt *gt = rps_to_gt(rps);
+
+ rps_reset_ei(rps);
+
+ if (IS_VALLEYVIEW(gt->i915))
+ /* WaGsvRC0ResidencyMethod:vlv */
+ rps->pm_events = GEN6_PM_RP_UP_EI_EXPIRED;
+ else
+ rps->pm_events = (GEN6_PM_RP_UP_THRESHOLD |
+ GEN6_PM_RP_DOWN_THRESHOLD |
+ GEN6_PM_RP_DOWN_TIMEOUT);
+
+ spin_lock_irq(&gt->irq_lock);
+ gen6_gt_pm_enable_irq(gt, rps->pm_events);
+ spin_unlock_irq(&gt->irq_lock);
+
+ set(gt->uncore, GEN6_PMINTRMSK, rps_pm_mask(rps, rps->cur_freq));
+}
+
+static void gen6_rps_reset_interrupts(struct intel_rps *rps)
+{
+ gen6_gt_pm_reset_iir(rps_to_gt(rps), GEN6_PM_RPS_EVENTS);
+}
+
+static void gen11_rps_reset_interrupts(struct intel_rps *rps)
+{
+ while (gen11_gt_reset_one_iir(rps_to_gt(rps), 0, GEN11_GTPM))
+ ;
+}
+
+static void rps_reset_interrupts(struct intel_rps *rps)
+{
+ struct intel_gt *gt = rps_to_gt(rps);
+
+ spin_lock_irq(&gt->irq_lock);
+ if (INTEL_GEN(gt->i915) >= 11)
+ gen11_rps_reset_interrupts(rps);
+ else
+ gen6_rps_reset_interrupts(rps);
+
+ rps->pm_iir = 0;
+ spin_unlock_irq(&gt->irq_lock);
+}
+
+static void rps_disable_interrupts(struct intel_rps *rps)
+{
+ struct intel_gt *gt = rps_to_gt(rps);
+
+ rps->pm_events = 0;
+
+ set(gt->uncore, GEN6_PMINTRMSK, rps_pm_sanitize_mask(rps, ~0u));
+
+ spin_lock_irq(&gt->irq_lock);
+ gen6_gt_pm_disable_irq(gt, GEN6_PM_RPS_EVENTS);
+ spin_unlock_irq(&gt->irq_lock);
+
+ intel_synchronize_irq(gt->i915);
+
+ /*
+ * Now that we will not be generating any more work, flush any
+ * outstanding tasks. As we are called on the RPS idle path,
+ * we will reset the GPU to minimum frequencies, so the current
+ * state of the worker can be discarded.
+ */
+ cancel_work_sync(&rps->work);
+
+ rps_reset_interrupts(rps);
+}
+
+static const struct cparams {
+ u16 i;
+ u16 t;
+ u16 m;
+ u16 c;
+} cparams[] = {
+ { 1, 1333, 301, 28664 },
+ { 1, 1066, 294, 24460 },
+ { 1, 800, 294, 25192 },
+ { 0, 1333, 276, 27605 },
+ { 0, 1066, 276, 27605 },
+ { 0, 800, 231, 23784 },
+};
+
+static void gen5_rps_init(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ u8 fmax, fmin, fstart;
+ u32 rgvmodectl;
+ int c_m, i;
+
+ if (i915->fsb_freq <= 3200)
+ c_m = 0;
+ else if (i915->fsb_freq <= 4800)
+ c_m = 1;
+ else
+ c_m = 2;
+
+ for (i = 0; i < ARRAY_SIZE(cparams); i++) {
+ if (cparams[i].i == c_m && cparams[i].t == i915->mem_freq) {
+ rps->ips.m = cparams[i].m;
+ rps->ips.c = cparams[i].c;
+ break;
+ }
+ }
+
+ rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
+
+ /* Set up min, max, and cur for interrupt handling */
+ fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
+ fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
+ fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
+ MEMMODE_FSTART_SHIFT;
+ DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
+ fmax, fmin, fstart);
+
+ rps->min_freq = fmax;
+ rps->max_freq = fmin;
+
+ rps->idle_freq = rps->min_freq;
+ rps->cur_freq = rps->idle_freq;
+}
+
+static unsigned long
+__ips_chipset_val(struct intel_ips *ips)
+{
+ struct intel_uncore *uncore =
+ rps_to_uncore(container_of(ips, struct intel_rps, ips));
+ unsigned long now = jiffies_to_msecs(jiffies), dt;
+ unsigned long result;
+ u64 total, delta;
+
+ lockdep_assert_held(&mchdev_lock);
+
+ /*
+ * Prevent division-by-zero if we are asking too fast.
+ * Also, we don't get interesting results if we are polling
+ * faster than once in 10ms, so just return the saved value
+ * in such cases.
+ */
+ dt = now - ips->last_time1;
+ if (dt <= 10)
+ return ips->chipset_power;
+
+ /* FIXME: handle per-counter overflow */
+ total = intel_uncore_read(uncore, DMIEC);
+ total += intel_uncore_read(uncore, DDREC);
+ total += intel_uncore_read(uncore, CSIEC);
+
+ delta = total - ips->last_count1;
+
+ result = div_u64(div_u64(ips->m * delta, dt) + ips->c, 10);
+
+ ips->last_count1 = total;
+ ips->last_time1 = now;
+
+ ips->chipset_power = result;
+
+ return result;
+}
+
+static unsigned long ips_mch_val(struct intel_uncore *uncore)
+{
+ unsigned int m, x, b;
+ u32 tsfs;
+
+ tsfs = intel_uncore_read(uncore, TSFS);
+ x = intel_uncore_read8(uncore, TR1);
+
+ b = tsfs & TSFS_INTR_MASK;
+ m = (tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT;
+
+ return m * x / 127 - b;
+}
+
+static int _pxvid_to_vd(u8 pxvid)
+{
+ if (pxvid == 0)
+ return 0;
+
+ if (pxvid >= 8 && pxvid < 31)
+ pxvid = 31;
+
+ return (pxvid + 2) * 125;
+}
+
+static u32 pvid_to_extvid(struct drm_i915_private *i915, u8 pxvid)
+{
+ const int vd = _pxvid_to_vd(pxvid);
+
+ if (INTEL_INFO(i915)->is_mobile)
+ return max(vd - 1125, 0);
+
+ return vd;
+}
+
+static void __gen5_ips_update(struct intel_ips *ips)
+{
+ struct intel_uncore *uncore =
+ rps_to_uncore(container_of(ips, struct intel_rps, ips));
+ u64 now, delta, dt;
+ u32 count;
+
+ lockdep_assert_held(&mchdev_lock);
+
+ now = ktime_get_raw_ns();
+ dt = now - ips->last_time2;
+ do_div(dt, NSEC_PER_MSEC);
+
+ /* Don't divide by 0 */
+ if (dt <= 10)
+ return;
+
+ count = intel_uncore_read(uncore, GFXEC);
+ delta = count - ips->last_count2;
+
+ ips->last_count2 = count;
+ ips->last_time2 = now;
+
+ /* More magic constants... */
+ ips->gfx_power = div_u64(delta * 1181, dt * 10);
+}
+
+static void gen5_rps_update(struct intel_rps *rps)
+{
+ spin_lock_irq(&mchdev_lock);
+ __gen5_ips_update(&rps->ips);
+ spin_unlock_irq(&mchdev_lock);
+}
+
+static bool gen5_rps_set(struct intel_rps *rps, u8 val)
+{
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ u16 rgvswctl;
+
+ lockdep_assert_held(&mchdev_lock);
+
+ rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
+ if (rgvswctl & MEMCTL_CMD_STS) {
+ DRM_DEBUG("gpu busy, RCS change rejected\n");
+ return false; /* still busy with another command */
+ }
+
+ /* Invert the frequency bin into an ips delay */
+ val = rps->max_freq - val;
+ val = rps->min_freq + val;
+
+ rgvswctl =
+ (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
+ (val << MEMCTL_FREQ_SHIFT) |
+ MEMCTL_SFCAVM;
+ intel_uncore_write16(uncore, MEMSWCTL, rgvswctl);
+ intel_uncore_posting_read16(uncore, MEMSWCTL);
+
+ rgvswctl |= MEMCTL_CMD_STS;
+ intel_uncore_write16(uncore, MEMSWCTL, rgvswctl);
+
+ return true;
+}
+
+static unsigned long intel_pxfreq(u32 vidfreq)
+{
+ int div = (vidfreq & 0x3f0000) >> 16;
+ int post = (vidfreq & 0x3000) >> 12;
+ int pre = (vidfreq & 0x7);
+
+ if (!pre)
+ return 0;
+
+ return div * 133333 / (pre << post);
+}
+
+static unsigned int init_emon(struct intel_uncore *uncore)
+{
+ u8 pxw[16];
+ int i;
+
+ /* Disable to program */
+ intel_uncore_write(uncore, ECR, 0);
+ intel_uncore_posting_read(uncore, ECR);
+
+ /* Program energy weights for various events */
+ intel_uncore_write(uncore, SDEW, 0x15040d00);
+ intel_uncore_write(uncore, CSIEW0, 0x007f0000);
+ intel_uncore_write(uncore, CSIEW1, 0x1e220004);
+ intel_uncore_write(uncore, CSIEW2, 0x04000004);
+
+ for (i = 0; i < 5; i++)
+ intel_uncore_write(uncore, PEW(i), 0);
+ for (i = 0; i < 3; i++)
+ intel_uncore_write(uncore, DEW(i), 0);
+
+ /* Program P-state weights to account for frequency power adjustment */
+ for (i = 0; i < 16; i++) {
+ u32 pxvidfreq = intel_uncore_read(uncore, PXVFREQ(i));
+ unsigned int freq = intel_pxfreq(pxvidfreq);
+ unsigned int vid =
+ (pxvidfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT;
+ unsigned int val;
+
+ val = vid * vid * freq / 1000 * 255;
+ val /= 127 * 127 * 900;
+
+ pxw[i] = val;
+ }
+ /* Render standby states get 0 weight */
+ pxw[14] = 0;
+ pxw[15] = 0;
+
+ for (i = 0; i < 4; i++) {
+ intel_uncore_write(uncore, PXW(i),
+ pxw[i * 4 + 0] << 24 |
+ pxw[i * 4 + 1] << 16 |
+ pxw[i * 4 + 2] << 8 |
+ pxw[i * 4 + 3] << 0);
+ }
+
+ /* Adjust magic regs to magic values (more experimental results) */
+ intel_uncore_write(uncore, OGW0, 0);
+ intel_uncore_write(uncore, OGW1, 0);
+ intel_uncore_write(uncore, EG0, 0x00007f00);
+ intel_uncore_write(uncore, EG1, 0x0000000e);
+ intel_uncore_write(uncore, EG2, 0x000e0000);
+ intel_uncore_write(uncore, EG3, 0x68000300);
+ intel_uncore_write(uncore, EG4, 0x42000000);
+ intel_uncore_write(uncore, EG5, 0x00140031);
+ intel_uncore_write(uncore, EG6, 0);
+ intel_uncore_write(uncore, EG7, 0);
+
+ for (i = 0; i < 8; i++)
+ intel_uncore_write(uncore, PXWL(i), 0);
+
+ /* Enable PMON + select events */
+ intel_uncore_write(uncore, ECR, 0x80000019);
+
+ return intel_uncore_read(uncore, LCFUSE02) & LCFUSE_HIV_MASK;
+}
+
+static bool gen5_rps_enable(struct intel_rps *rps)
+{
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ u8 fstart, vstart;
+ u32 rgvmodectl;
+
+ spin_lock_irq(&mchdev_lock);
+
+ rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
+
+ /* Enable temp reporting */
+ intel_uncore_write16(uncore, PMMISC,
+ intel_uncore_read16(uncore, PMMISC) | MCPPCE_EN);
+ intel_uncore_write16(uncore, TSC1,
+ intel_uncore_read16(uncore, TSC1) | TSE);
+
+ /* 100ms RC evaluation intervals */
+ intel_uncore_write(uncore, RCUPEI, 100000);
+ intel_uncore_write(uncore, RCDNEI, 100000);
+
+ /* Set max/min thresholds to 90ms and 80ms respectively */
+ intel_uncore_write(uncore, RCBMAXAVG, 90000);
+ intel_uncore_write(uncore, RCBMINAVG, 80000);
+
+ intel_uncore_write(uncore, MEMIHYST, 1);
+
+ /* Set up min, max, and cur for interrupt handling */
+ fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
+ MEMMODE_FSTART_SHIFT;
+
+ vstart = (intel_uncore_read(uncore, PXVFREQ(fstart)) &
+ PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT;
+
+ intel_uncore_write(uncore,
+ MEMINTREN,
+ MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
+
+ intel_uncore_write(uncore, VIDSTART, vstart);
+ intel_uncore_posting_read(uncore, VIDSTART);
+
+ rgvmodectl |= MEMMODE_SWMODE_EN;
+ intel_uncore_write(uncore, MEMMODECTL, rgvmodectl);
+
+ if (wait_for_atomic((intel_uncore_read(uncore, MEMSWCTL) &
+ MEMCTL_CMD_STS) == 0, 10))
+ DRM_ERROR("stuck trying to change perf mode\n");
+ mdelay(1);
+
+ gen5_rps_set(rps, rps->cur_freq);
+
+ rps->ips.last_count1 = intel_uncore_read(uncore, DMIEC);
+ rps->ips.last_count1 += intel_uncore_read(uncore, DDREC);
+ rps->ips.last_count1 += intel_uncore_read(uncore, CSIEC);
+ rps->ips.last_time1 = jiffies_to_msecs(jiffies);
+
+ rps->ips.last_count2 = intel_uncore_read(uncore, GFXEC);
+ rps->ips.last_time2 = ktime_get_raw_ns();
+
+ spin_unlock_irq(&mchdev_lock);
+
+ rps->ips.corr = init_emon(uncore);
+
+ return true;
+}
+
+static void gen5_rps_disable(struct intel_rps *rps)
+{
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ u16 rgvswctl;
+
+ spin_lock_irq(&mchdev_lock);
+
+ rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
+
+ /* Ack interrupts, disable EFC interrupt */
+ intel_uncore_write(uncore, MEMINTREN,
+ intel_uncore_read(uncore, MEMINTREN) &
+ ~MEMINT_EVAL_CHG_EN);
+ intel_uncore_write(uncore, MEMINTRSTS, MEMINT_EVAL_CHG);
+ intel_uncore_write(uncore, DEIER,
+ intel_uncore_read(uncore, DEIER) & ~DE_PCU_EVENT);
+ intel_uncore_write(uncore, DEIIR, DE_PCU_EVENT);
+ intel_uncore_write(uncore, DEIMR,
+ intel_uncore_read(uncore, DEIMR) | DE_PCU_EVENT);
+
+ /* Go back to the starting frequency */
+ gen5_rps_set(rps, rps->idle_freq);
+ mdelay(1);
+ rgvswctl |= MEMCTL_CMD_STS;
+ intel_uncore_write(uncore, MEMSWCTL, rgvswctl);
+ mdelay(1);
+
+ spin_unlock_irq(&mchdev_lock);
+}
+
+static u32 rps_limits(struct intel_rps *rps, u8 val)
+{
+ u32 limits;
+
+ /*
+ * Only set the down limit when we've reached the lowest level to avoid
+ * getting more interrupts, otherwise leave this clear. This prevents a
+ * race in the hw when coming out of rc6: There's a tiny window where
+ * the hw runs at the minimal clock before selecting the desired
+ * frequency, if the down threshold expires in that window we will not
+ * receive a down interrupt.
+ */
+ if (INTEL_GEN(rps_to_i915(rps)) >= 9) {
+ limits = rps->max_freq_softlimit << 23;
+ if (val <= rps->min_freq_softlimit)
+ limits |= rps->min_freq_softlimit << 14;
+ } else {
+ limits = rps->max_freq_softlimit << 24;
+ if (val <= rps->min_freq_softlimit)
+ limits |= rps->min_freq_softlimit << 16;
+ }
+
+ return limits;
+}
+
+static void rps_set_power(struct intel_rps *rps, int new_power)
+{
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 threshold_up = 0, threshold_down = 0; /* in % */
+ u32 ei_up = 0, ei_down = 0;
+
+ lockdep_assert_held(&rps->power.mutex);
+
+ if (new_power == rps->power.mode)
+ return;
+
+ /* Note the units here are not exactly 1us, but 1280ns. */
+ switch (new_power) {
+ case LOW_POWER:
+ /* Upclock if more than 95% busy over 16ms */
+ ei_up = 16000;
+ threshold_up = 95;
+
+ /* Downclock if less than 85% busy over 32ms */
+ ei_down = 32000;
+ threshold_down = 85;
+ break;
+
+ case BETWEEN:
+ /* Upclock if more than 90% busy over 13ms */
+ ei_up = 13000;
+ threshold_up = 90;
+
+ /* Downclock if less than 75% busy over 32ms */
+ ei_down = 32000;
+ threshold_down = 75;
+ break;
+
+ case HIGH_POWER:
+ /* Upclock if more than 85% busy over 10ms */
+ ei_up = 10000;
+ threshold_up = 85;
+
+ /* Downclock if less than 60% busy over 32ms */
+ ei_down = 32000;
+ threshold_down = 60;
+ break;
+ }
+
+ /* When byt can survive without system hang with dynamic
+ * sw freq adjustments, this restriction can be lifted.
+ */
+ if (IS_VALLEYVIEW(i915))
+ goto skip_hw_write;
+
+ set(uncore, GEN6_RP_UP_EI, GT_INTERVAL_FROM_US(i915, ei_up));
+ set(uncore, GEN6_RP_UP_THRESHOLD,
+ GT_INTERVAL_FROM_US(i915, ei_up * threshold_up / 100));
+
+ set(uncore, GEN6_RP_DOWN_EI, GT_INTERVAL_FROM_US(i915, ei_down));
+ set(uncore, GEN6_RP_DOWN_THRESHOLD,
+ GT_INTERVAL_FROM_US(i915, ei_down * threshold_down / 100));
+
+ set(uncore, GEN6_RP_CONTROL,
+ (INTEL_GEN(i915) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) |
+ GEN6_RP_MEDIA_HW_NORMAL_MODE |
+ GEN6_RP_MEDIA_IS_GFX |
+ GEN6_RP_ENABLE |
+ GEN6_RP_UP_BUSY_AVG |
+ GEN6_RP_DOWN_IDLE_AVG);
+
+skip_hw_write:
+ rps->power.mode = new_power;
+ rps->power.up_threshold = threshold_up;
+ rps->power.down_threshold = threshold_down;
+}
+
+static void gen6_rps_set_thresholds(struct intel_rps *rps, u8 val)
+{
+ int new_power;
+
+ new_power = rps->power.mode;
+ switch (rps->power.mode) {
+ case LOW_POWER:
+ if (val > rps->efficient_freq + 1 &&
+ val > rps->cur_freq)
+ new_power = BETWEEN;
+ break;
+
+ case BETWEEN:
+ if (val <= rps->efficient_freq &&
+ val < rps->cur_freq)
+ new_power = LOW_POWER;
+ else if (val >= rps->rp0_freq &&
+ val > rps->cur_freq)
+ new_power = HIGH_POWER;
+ break;
+
+ case HIGH_POWER:
+ if (val < (rps->rp1_freq + rps->rp0_freq) >> 1 &&
+ val < rps->cur_freq)
+ new_power = BETWEEN;
+ break;
+ }
+ /* Max/min bins are special */
+ if (val <= rps->min_freq_softlimit)
+ new_power = LOW_POWER;
+ if (val >= rps->max_freq_softlimit)
+ new_power = HIGH_POWER;
+
+ mutex_lock(&rps->power.mutex);
+ if (rps->power.interactive)
+ new_power = HIGH_POWER;
+ rps_set_power(rps, new_power);
+ mutex_unlock(&rps->power.mutex);
+}
+
+void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive)
+{
+ mutex_lock(&rps->power.mutex);
+ if (interactive) {
+ if (!rps->power.interactive++ && rps->active)
+ rps_set_power(rps, HIGH_POWER);
+ } else {
+ GEM_BUG_ON(!rps->power.interactive);
+ rps->power.interactive--;
+ }
+ mutex_unlock(&rps->power.mutex);
+}
+
+static int gen6_rps_set(struct intel_rps *rps, u8 val)
+{
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 swreq;
+
+ if (INTEL_GEN(i915) >= 9)
+ swreq = GEN9_FREQUENCY(val);
+ else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
+ swreq = HSW_FREQUENCY(val);
+ else
+ swreq = (GEN6_FREQUENCY(val) |
+ GEN6_OFFSET(0) |
+ GEN6_AGGRESSIVE_TURBO);
+ set(uncore, GEN6_RPNSWREQ, swreq);
+
+ return 0;
+}
+
+static int vlv_rps_set(struct intel_rps *rps, u8 val)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ int err;
+
+ vlv_punit_get(i915);
+ err = vlv_punit_write(i915, PUNIT_REG_GPU_FREQ_REQ, val);
+ vlv_punit_put(i915);
+
+ return err;
+}
+
+static int rps_set(struct intel_rps *rps, u8 val, bool update)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ int err;
+
+ if (INTEL_GEN(i915) < 6)
+ return 0;
+
+ if (val == rps->last_freq)
+ return 0;
+
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ err = vlv_rps_set(rps, val);
+ else
+ err = gen6_rps_set(rps, val);
+ if (err)
+ return err;
+
+ if (update)
+ gen6_rps_set_thresholds(rps, val);
+ rps->last_freq = val;
+
+ return 0;
+}
+
+void intel_rps_unpark(struct intel_rps *rps)
+{
+ u8 freq;
+
+ if (!rps->enabled)
+ return;
+
+ /*
+ * Use the user's desired frequency as a guide, but for better
+ * performance, jump directly to RPe as our starting frequency.
+ */
+ mutex_lock(&rps->lock);
+ rps->active = true;
+ freq = max(rps->cur_freq, rps->efficient_freq),
+ freq = clamp(freq, rps->min_freq_softlimit, rps->max_freq_softlimit);
+ intel_rps_set(rps, freq);
+ rps->last_adj = 0;
+ mutex_unlock(&rps->lock);
+
+ if (INTEL_GEN(rps_to_i915(rps)) >= 6)
+ rps_enable_interrupts(rps);
+
+ if (IS_GEN(rps_to_i915(rps), 5))
+ gen5_rps_update(rps);
+}
+
+void intel_rps_park(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+
+ if (!rps->enabled)
+ return;
+
+ if (INTEL_GEN(i915) >= 6)
+ rps_disable_interrupts(rps);
+
+ rps->active = false;
+ if (rps->last_freq <= rps->idle_freq)
+ return;
+
+ /*
+ * The punit delays the write of the frequency and voltage until it
+ * determines the GPU is awake. During normal usage we don't want to
+ * waste power changing the frequency if the GPU is sleeping (rc6).
+ * However, the GPU and driver is now idle and we do not want to delay
+ * switching to minimum voltage (reducing power whilst idle) as we do
+ * not expect to be woken in the near future and so must flush the
+ * change by waking the device.
+ *
+ * We choose to take the media powerwell (either would do to trick the
+ * punit into committing the voltage change) as that takes a lot less
+ * power than the render powerwell.
+ */
+ intel_uncore_forcewake_get(rps_to_uncore(rps), FORCEWAKE_MEDIA);
+ rps_set(rps, rps->idle_freq, false);
+ intel_uncore_forcewake_put(rps_to_uncore(rps), FORCEWAKE_MEDIA);
+}
+
+void intel_rps_boost(struct i915_request *rq)
+{
+ struct intel_rps *rps = &rq->engine->gt->rps;
+ unsigned long flags;
+
+ if (i915_request_signaled(rq) || !rps->active)
+ return;
+
+ /* Serializes with i915_request_retire() */
+ spin_lock_irqsave(&rq->lock, flags);
+ if (!i915_request_has_waitboost(rq) &&
+ !dma_fence_is_signaled_locked(&rq->fence)) {
+ set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags);
+
+ if (!atomic_fetch_inc(&rps->num_waiters) &&
+ READ_ONCE(rps->cur_freq) < rps->boost_freq)
+ schedule_work(&rps->work);
+
+ atomic_inc(&rps->boosts);
+ }
+ spin_unlock_irqrestore(&rq->lock, flags);
+}
+
+int intel_rps_set(struct intel_rps *rps, u8 val)
+{
+ int err;
+
+ lockdep_assert_held(&rps->lock);
+ GEM_BUG_ON(val > rps->max_freq);
+ GEM_BUG_ON(val < rps->min_freq);
+
+ if (rps->active) {
+ err = rps_set(rps, val, true);
+ if (err)
+ return err;
+
+ /*
+ * Make sure we continue to get interrupts
+ * until we hit the minimum or maximum frequencies.
+ */
+ if (INTEL_GEN(rps_to_i915(rps)) >= 6) {
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+
+ set(uncore,
+ GEN6_RP_INTERRUPT_LIMITS, rps_limits(rps, val));
+
+ set(uncore, GEN6_PMINTRMSK, rps_pm_mask(rps, val));
+ }
+ }
+
+ rps->cur_freq = val;
+ return 0;
+}
+
+static void gen6_rps_init(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+
+ /* All of these values are in units of 50MHz */
+
+ /* static values from HW: RP0 > RP1 > RPn (min_freq) */
+ if (IS_GEN9_LP(i915)) {
+ u32 rp_state_cap = intel_uncore_read(uncore, BXT_RP_STATE_CAP);
+
+ rps->rp0_freq = (rp_state_cap >> 16) & 0xff;
+ rps->rp1_freq = (rp_state_cap >> 8) & 0xff;
+ rps->min_freq = (rp_state_cap >> 0) & 0xff;
+ } else {
+ u32 rp_state_cap = intel_uncore_read(uncore, GEN6_RP_STATE_CAP);
+
+ rps->rp0_freq = (rp_state_cap >> 0) & 0xff;
+ rps->rp1_freq = (rp_state_cap >> 8) & 0xff;
+ rps->min_freq = (rp_state_cap >> 16) & 0xff;
+ }
+
+ /* hw_max = RP0 until we check for overclocking */
+ rps->max_freq = rps->rp0_freq;
+
+ rps->efficient_freq = rps->rp1_freq;
+ if (IS_HASWELL(i915) || IS_BROADWELL(i915) ||
+ IS_GEN9_BC(i915) || INTEL_GEN(i915) >= 10) {
+ u32 ddcc_status = 0;
+
+ if (sandybridge_pcode_read(i915,
+ HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
+ &ddcc_status, NULL) == 0)
+ rps->efficient_freq =
+ clamp_t(u8,
+ (ddcc_status >> 8) & 0xff,
+ rps->min_freq,
+ rps->max_freq);
+ }
+
+ if (IS_GEN9_BC(i915) || INTEL_GEN(i915) >= 10) {
+ /* Store the frequency values in 16.66 MHZ units, which is
+ * the natural hardware unit for SKL
+ */
+ rps->rp0_freq *= GEN9_FREQ_SCALER;
+ rps->rp1_freq *= GEN9_FREQ_SCALER;
+ rps->min_freq *= GEN9_FREQ_SCALER;
+ rps->max_freq *= GEN9_FREQ_SCALER;
+ rps->efficient_freq *= GEN9_FREQ_SCALER;
+ }
+}
+
+static bool rps_reset(struct intel_rps *rps)
+{
+ /* force a reset */
+ rps->power.mode = -1;
+ rps->last_freq = -1;
+
+ if (rps_set(rps, rps->min_freq, true)) {
+ DRM_ERROR("Failed to reset RPS to initial values\n");
+ return false;
+ }
+
+ rps->cur_freq = rps->min_freq;
+ return true;
+}
+
+/* See the Gen9_GT_PM_Programming_Guide doc for the below */
+static bool gen9_rps_enable(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+
+ /* Program defaults and thresholds for RPS */
+ if (IS_GEN(i915, 9))
+ intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ,
+ GEN9_FREQUENCY(rps->rp1_freq));
+
+ /* 1 second timeout */
+ intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT,
+ GT_INTERVAL_FROM_US(i915, 1000000));
+
+ intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 0xa);
+
+ return rps_reset(rps);
+}
+
+static bool gen8_rps_enable(struct intel_rps *rps)
+{
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+
+ intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ,
+ HSW_FREQUENCY(rps->rp1_freq));
+
+ /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
+ intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT,
+ 100000000 / 128); /* 1 second timeout */
+
+ intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
+
+ return rps_reset(rps);
+}
+
+static bool gen6_rps_enable(struct intel_rps *rps)
+{
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+
+ /* Power down if completely idle for over 50ms */
+ intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 50000);
+ intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
+
+ return rps_reset(rps);
+}
+
+static int chv_rps_max_freq(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 val;
+
+ val = vlv_punit_read(i915, FB_GFX_FMAX_AT_VMAX_FUSE);
+
+ switch (RUNTIME_INFO(i915)->sseu.eu_total) {
+ case 8:
+ /* (2 * 4) config */
+ val >>= FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT;
+ break;
+ case 12:
+ /* (2 * 6) config */
+ val >>= FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT;
+ break;
+ case 16:
+ /* (2 * 8) config */
+ default:
+ /* Setting (2 * 8) Min RP0 for any other combination */
+ val >>= FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT;
+ break;
+ }
+
+ return val & FB_GFX_FREQ_FUSE_MASK;
+}
+
+static int chv_rps_rpe_freq(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 val;
+
+ val = vlv_punit_read(i915, PUNIT_GPU_DUTYCYCLE_REG);
+ val >>= PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT;
+
+ return val & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
+}
+
+static int chv_rps_guar_freq(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 val;
+
+ val = vlv_punit_read(i915, FB_GFX_FMAX_AT_VMAX_FUSE);
+
+ return val & FB_GFX_FREQ_FUSE_MASK;
+}
+
+static u32 chv_rps_min_freq(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 val;
+
+ val = vlv_punit_read(i915, FB_GFX_FMIN_AT_VMIN_FUSE);
+ val >>= FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT;
+
+ return val & FB_GFX_FREQ_FUSE_MASK;
+}
+
+static bool chv_rps_enable(struct intel_rps *rps)
+{
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 val;
+
+ /* 1: Program defaults and thresholds for RPS*/
+ intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 1000000);
+ intel_uncore_write_fw(uncore, GEN6_RP_UP_THRESHOLD, 59400);
+ intel_uncore_write_fw(uncore, GEN6_RP_DOWN_THRESHOLD, 245000);
+ intel_uncore_write_fw(uncore, GEN6_RP_UP_EI, 66000);
+ intel_uncore_write_fw(uncore, GEN6_RP_DOWN_EI, 350000);
+
+ intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
+
+ /* 2: Enable RPS */
+ intel_uncore_write_fw(uncore, GEN6_RP_CONTROL,
+ GEN6_RP_MEDIA_HW_NORMAL_MODE |
+ GEN6_RP_MEDIA_IS_GFX |
+ GEN6_RP_ENABLE |
+ GEN6_RP_UP_BUSY_AVG |
+ GEN6_RP_DOWN_IDLE_AVG);
+
+ /* Setting Fixed Bias */
+ vlv_punit_get(i915);
+
+ val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | CHV_BIAS_CPU_50_SOC_50;
+ vlv_punit_write(i915, VLV_TURBO_SOC_OVERRIDE, val);
+
+ val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
+
+ vlv_punit_put(i915);
+
+ /* RPS code assumes GPLL is used */
+ WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
+
+ DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
+ DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
+
+ return rps_reset(rps);
+}
+
+static int vlv_rps_guar_freq(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 val, rp1;
+
+ val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FREQ_FUSE);
+
+ rp1 = val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK;
+ rp1 >>= FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
+
+ return rp1;
+}
+
+static int vlv_rps_max_freq(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 val, rp0;
+
+ val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FREQ_FUSE);
+
+ rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
+ /* Clamp to max */
+ rp0 = min_t(u32, rp0, 0xea);
+
+ return rp0;
+}
+
+static int vlv_rps_rpe_freq(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 val, rpe;
+
+ val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
+ rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
+ val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
+ rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
+
+ return rpe;
+}
+
+static int vlv_rps_min_freq(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 val;
+
+ val = vlv_punit_read(i915, PUNIT_REG_GPU_LFM) & 0xff;
+ /*
+ * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value
+ * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on
+ * a BYT-M B0 the above register contains 0xbf. Moreover when setting
+ * a frequency Punit will not allow values below 0xc0. Clamp it 0xc0
+ * to make sure it matches what Punit accepts.
+ */
+ return max_t(u32, val, 0xc0);
+}
+
+static bool vlv_rps_enable(struct intel_rps *rps)
+{
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 val;
+
+ intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 1000000);
+ intel_uncore_write_fw(uncore, GEN6_RP_UP_THRESHOLD, 59400);
+ intel_uncore_write_fw(uncore, GEN6_RP_DOWN_THRESHOLD, 245000);
+ intel_uncore_write_fw(uncore, GEN6_RP_UP_EI, 66000);
+ intel_uncore_write_fw(uncore, GEN6_RP_DOWN_EI, 350000);
+
+ intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
+
+ intel_uncore_write_fw(uncore, GEN6_RP_CONTROL,
+ GEN6_RP_MEDIA_TURBO |
+ GEN6_RP_MEDIA_HW_NORMAL_MODE |
+ GEN6_RP_MEDIA_IS_GFX |
+ GEN6_RP_ENABLE |
+ GEN6_RP_UP_BUSY_AVG |
+ GEN6_RP_DOWN_IDLE_CONT);
+
+ vlv_punit_get(i915);
+
+ /* Setting Fixed Bias */
+ val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | VLV_BIAS_CPU_125_SOC_875;
+ vlv_punit_write(i915, VLV_TURBO_SOC_OVERRIDE, val);
+
+ val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
+
+ vlv_punit_put(i915);
+
+ /* RPS code assumes GPLL is used */
+ WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
+
+ DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
+ DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
+
+ return rps_reset(rps);
+}
+
+static unsigned long __ips_gfx_val(struct intel_ips *ips)
+{
+ struct intel_rps *rps = container_of(ips, typeof(*rps), ips);
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ unsigned long t, corr, state1, corr2, state2;
+ u32 pxvid, ext_v;
+
+ lockdep_assert_held(&mchdev_lock);
+
+ pxvid = intel_uncore_read(uncore, PXVFREQ(rps->cur_freq));
+ pxvid = (pxvid >> 24) & 0x7f;
+ ext_v = pvid_to_extvid(rps_to_i915(rps), pxvid);
+
+ state1 = ext_v;
+
+ /* Revel in the empirically derived constants */
+
+ /* Correction factor in 1/100000 units */
+ t = ips_mch_val(uncore);
+ if (t > 80)
+ corr = t * 2349 + 135940;
+ else if (t >= 50)
+ corr = t * 964 + 29317;
+ else /* < 50 */
+ corr = t * 301 + 1004;
+
+ corr = corr * 150142 * state1 / 10000 - 78642;
+ corr /= 100000;
+ corr2 = corr * ips->corr;
+
+ state2 = corr2 * state1 / 10000;
+ state2 /= 100; /* convert to mW */
+
+ __gen5_ips_update(ips);
+
+ return ips->gfx_power + state2;
+}
+
+void intel_rps_enable(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
+ if (IS_CHERRYVIEW(i915))
+ rps->enabled = chv_rps_enable(rps);
+ else if (IS_VALLEYVIEW(i915))
+ rps->enabled = vlv_rps_enable(rps);
+ else if (INTEL_GEN(i915) >= 9)
+ rps->enabled = gen9_rps_enable(rps);
+ else if (INTEL_GEN(i915) >= 8)
+ rps->enabled = gen8_rps_enable(rps);
+ else if (INTEL_GEN(i915) >= 6)
+ rps->enabled = gen6_rps_enable(rps);
+ else if (IS_IRONLAKE_M(i915))
+ rps->enabled = gen5_rps_enable(rps);
+ intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
+ if (!rps->enabled)
+ return;
+
+ WARN_ON(rps->max_freq < rps->min_freq);
+ WARN_ON(rps->idle_freq > rps->max_freq);
+
+ WARN_ON(rps->efficient_freq < rps->min_freq);
+ WARN_ON(rps->efficient_freq > rps->max_freq);
+}
+
+static void gen6_rps_disable(struct intel_rps *rps)
+{
+ set(rps_to_uncore(rps), GEN6_RP_CONTROL, 0);
+}
+
+void intel_rps_disable(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+
+ rps->enabled = false;
+
+ if (INTEL_GEN(i915) >= 6)
+ gen6_rps_disable(rps);
+ else if (IS_IRONLAKE_M(i915))
+ gen5_rps_disable(rps);
+}
+
+static int byt_gpu_freq(struct intel_rps *rps, int val)
+{
+ /*
+ * N = val - 0xb7
+ * Slow = Fast = GPLL ref * N
+ */
+ return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * (val - 0xb7), 1000);
+}
+
+static int byt_freq_opcode(struct intel_rps *rps, int val)
+{
+ return DIV_ROUND_CLOSEST(1000 * val, rps->gpll_ref_freq) + 0xb7;
+}
+
+static int chv_gpu_freq(struct intel_rps *rps, int val)
+{
+ /*
+ * N = val / 2
+ * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2
+ */
+ return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * val, 2 * 2 * 1000);
+}
+
+static int chv_freq_opcode(struct intel_rps *rps, int val)
+{
+ /* CHV needs even values */
+ return DIV_ROUND_CLOSEST(2 * 1000 * val, rps->gpll_ref_freq) * 2;
+}
+
+int intel_gpu_freq(struct intel_rps *rps, int val)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+
+ if (INTEL_GEN(i915) >= 9)
+ return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER,
+ GEN9_FREQ_SCALER);
+ else if (IS_CHERRYVIEW(i915))
+ return chv_gpu_freq(rps, val);
+ else if (IS_VALLEYVIEW(i915))
+ return byt_gpu_freq(rps, val);
+ else
+ return val * GT_FREQUENCY_MULTIPLIER;
+}
+
+int intel_freq_opcode(struct intel_rps *rps, int val)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+
+ if (INTEL_GEN(i915) >= 9)
+ return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER,
+ GT_FREQUENCY_MULTIPLIER);
+ else if (IS_CHERRYVIEW(i915))
+ return chv_freq_opcode(rps, val);
+ else if (IS_VALLEYVIEW(i915))
+ return byt_freq_opcode(rps, val);
+ else
+ return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER);
+}
+
+static void vlv_init_gpll_ref_freq(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+
+ rps->gpll_ref_freq =
+ vlv_get_cck_clock(i915, "GPLL ref",
+ CCK_GPLL_CLOCK_CONTROL,
+ i915->czclk_freq);
+
+ DRM_DEBUG_DRIVER("GPLL reference freq: %d kHz\n", rps->gpll_ref_freq);
+}
+
+static void vlv_rps_init(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 val;
+
+ vlv_iosf_sb_get(i915,
+ BIT(VLV_IOSF_SB_PUNIT) |
+ BIT(VLV_IOSF_SB_NC) |
+ BIT(VLV_IOSF_SB_CCK));
+
+ vlv_init_gpll_ref_freq(rps);
+
+ val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
+ switch ((val >> 6) & 3) {
+ case 0:
+ case 1:
+ i915->mem_freq = 800;
+ break;
+ case 2:
+ i915->mem_freq = 1066;
+ break;
+ case 3:
+ i915->mem_freq = 1333;
+ break;
+ }
+ DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", i915->mem_freq);
+
+ rps->max_freq = vlv_rps_max_freq(rps);
+ rps->rp0_freq = rps->max_freq;
+ DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->max_freq),
+ rps->max_freq);
+
+ rps->efficient_freq = vlv_rps_rpe_freq(rps);
+ DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->efficient_freq),
+ rps->efficient_freq);
+
+ rps->rp1_freq = vlv_rps_guar_freq(rps);
+ DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->rp1_freq),
+ rps->rp1_freq);
+
+ rps->min_freq = vlv_rps_min_freq(rps);
+ DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->min_freq),
+ rps->min_freq);
+
+ vlv_iosf_sb_put(i915,
+ BIT(VLV_IOSF_SB_PUNIT) |
+ BIT(VLV_IOSF_SB_NC) |
+ BIT(VLV_IOSF_SB_CCK));
+}
+
+static void chv_rps_init(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 val;
+
+ vlv_iosf_sb_get(i915,
+ BIT(VLV_IOSF_SB_PUNIT) |
+ BIT(VLV_IOSF_SB_NC) |
+ BIT(VLV_IOSF_SB_CCK));
+
+ vlv_init_gpll_ref_freq(rps);
+
+ val = vlv_cck_read(i915, CCK_FUSE_REG);
+
+ switch ((val >> 2) & 0x7) {
+ case 3:
+ i915->mem_freq = 2000;
+ break;
+ default:
+ i915->mem_freq = 1600;
+ break;
+ }
+ DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", i915->mem_freq);
+
+ rps->max_freq = chv_rps_max_freq(rps);
+ rps->rp0_freq = rps->max_freq;
+ DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->max_freq),
+ rps->max_freq);
+
+ rps->efficient_freq = chv_rps_rpe_freq(rps);
+ DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->efficient_freq),
+ rps->efficient_freq);
+
+ rps->rp1_freq = chv_rps_guar_freq(rps);
+ DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->rp1_freq),
+ rps->rp1_freq);
+
+ rps->min_freq = chv_rps_min_freq(rps);
+ DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->min_freq),
+ rps->min_freq);
+
+ vlv_iosf_sb_put(i915,
+ BIT(VLV_IOSF_SB_PUNIT) |
+ BIT(VLV_IOSF_SB_NC) |
+ BIT(VLV_IOSF_SB_CCK));
+
+ WARN_ONCE((rps->max_freq | rps->efficient_freq | rps->rp1_freq |
+ rps->min_freq) & 1,
+ "Odd GPU freq values\n");
+}
+
+static void vlv_c0_read(struct intel_uncore *uncore, struct intel_rps_ei *ei)
+{
+ ei->ktime = ktime_get_raw();
+ ei->render_c0 = intel_uncore_read(uncore, VLV_RENDER_C0_COUNT);
+ ei->media_c0 = intel_uncore_read(uncore, VLV_MEDIA_C0_COUNT);
+}
+
+static u32 vlv_wa_c0_ei(struct intel_rps *rps, u32 pm_iir)
+{
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ const struct intel_rps_ei *prev = &rps->ei;
+ struct intel_rps_ei now;
+ u32 events = 0;
+
+ if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
+ return 0;
+
+ vlv_c0_read(uncore, &now);
+
+ if (prev->ktime) {
+ u64 time, c0;
+ u32 render, media;
+
+ time = ktime_us_delta(now.ktime, prev->ktime);
+
+ time *= rps_to_i915(rps)->czclk_freq;
+
+ /* Workload can be split between render + media,
+ * e.g. SwapBuffers being blitted in X after being rendered in
+ * mesa. To account for this we need to combine both engines
+ * into our activity counter.
+ */
+ render = now.render_c0 - prev->render_c0;
+ media = now.media_c0 - prev->media_c0;
+ c0 = max(render, media);
+ c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */
+
+ if (c0 > time * rps->power.up_threshold)
+ events = GEN6_PM_RP_UP_THRESHOLD;
+ else if (c0 < time * rps->power.down_threshold)
+ events = GEN6_PM_RP_DOWN_THRESHOLD;
+ }
+
+ rps->ei = now;
+ return events;
+}
+
+static void rps_work(struct work_struct *work)
+{
+ struct intel_rps *rps = container_of(work, typeof(*rps), work);
+ struct intel_gt *gt = rps_to_gt(rps);
+ bool client_boost = false;
+ int new_freq, adj, min, max;
+ u32 pm_iir = 0;
+
+ spin_lock_irq(&gt->irq_lock);
+ pm_iir = fetch_and_zero(&rps->pm_iir);
+ client_boost = atomic_read(&rps->num_waiters);
+ spin_unlock_irq(&gt->irq_lock);
+
+ /* Make sure we didn't queue anything we're not going to process. */
+ if ((pm_iir & rps->pm_events) == 0 && !client_boost)
+ goto out;
+
+ mutex_lock(&rps->lock);
+
+ pm_iir |= vlv_wa_c0_ei(rps, pm_iir);
+
+ adj = rps->last_adj;
+ new_freq = rps->cur_freq;
+ min = rps->min_freq_softlimit;
+ max = rps->max_freq_softlimit;
+ if (client_boost)
+ max = rps->max_freq;
+ if (client_boost && new_freq < rps->boost_freq) {
+ new_freq = rps->boost_freq;
+ adj = 0;
+ } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
+ if (adj > 0)
+ adj *= 2;
+ else /* CHV needs even encode values */
+ adj = IS_CHERRYVIEW(gt->i915) ? 2 : 1;
+
+ if (new_freq >= rps->max_freq_softlimit)
+ adj = 0;
+ } else if (client_boost) {
+ adj = 0;
+ } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
+ if (rps->cur_freq > rps->efficient_freq)
+ new_freq = rps->efficient_freq;
+ else if (rps->cur_freq > rps->min_freq_softlimit)
+ new_freq = rps->min_freq_softlimit;
+ adj = 0;
+ } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
+ if (adj < 0)
+ adj *= 2;
+ else /* CHV needs even encode values */
+ adj = IS_CHERRYVIEW(gt->i915) ? -2 : -1;
+
+ if (new_freq <= rps->min_freq_softlimit)
+ adj = 0;
+ } else { /* unknown event */
+ adj = 0;
+ }
+
+ rps->last_adj = adj;
+
+ /*
+ * Limit deboosting and boosting to keep ourselves at the extremes
+ * when in the respective power modes (i.e. slowly decrease frequencies
+ * while in the HIGH_POWER zone and slowly increase frequencies while
+ * in the LOW_POWER zone). On idle, we will hit the timeout and drop
+ * to the next level quickly, and conversely if busy we expect to
+ * hit a waitboost and rapidly switch into max power.
+ */
+ if ((adj < 0 && rps->power.mode == HIGH_POWER) ||
+ (adj > 0 && rps->power.mode == LOW_POWER))
+ rps->last_adj = 0;
+
+ /* sysfs frequency interfaces may have snuck in while servicing the
+ * interrupt
+ */
+ new_freq += adj;
+ new_freq = clamp_t(int, new_freq, min, max);
+
+ if (intel_rps_set(rps, new_freq)) {
+ DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n");
+ rps->last_adj = 0;
+ }
+
+ mutex_unlock(&rps->lock);
+
+out:
+ spin_lock_irq(&gt->irq_lock);
+ gen6_gt_pm_unmask_irq(gt, rps->pm_events);
+ spin_unlock_irq(&gt->irq_lock);
+}
+
+void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
+{
+ struct intel_gt *gt = rps_to_gt(rps);
+ const u32 events = rps->pm_events & pm_iir;
+
+ lockdep_assert_held(&gt->irq_lock);
+
+ if (unlikely(!events))
+ return;
+
+ gen6_gt_pm_mask_irq(gt, events);
+
+ rps->pm_iir |= events;
+ schedule_work(&rps->work);
+}
+
+void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
+{
+ struct intel_gt *gt = rps_to_gt(rps);
+
+ if (pm_iir & rps->pm_events) {
+ spin_lock(&gt->irq_lock);
+ gen6_gt_pm_mask_irq(gt, pm_iir & rps->pm_events);
+ rps->pm_iir |= pm_iir & rps->pm_events;
+ schedule_work(&rps->work);
+ spin_unlock(&gt->irq_lock);
+ }
+
+ if (INTEL_GEN(gt->i915) >= 8)
+ return;
+
+ if (pm_iir & PM_VEBOX_USER_INTERRUPT)
+ intel_engine_signal_breadcrumbs(gt->engine[VECS0]);
+
+ if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
+ DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
+}
+
+void gen5_rps_irq_handler(struct intel_rps *rps)
+{
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ u32 busy_up, busy_down, max_avg, min_avg;
+ u8 new_freq;
+
+ spin_lock(&mchdev_lock);
+
+ intel_uncore_write16(uncore,
+ MEMINTRSTS,
+ intel_uncore_read(uncore, MEMINTRSTS));
+
+ intel_uncore_write16(uncore, MEMINTRSTS, MEMINT_EVAL_CHG);
+ busy_up = intel_uncore_read(uncore, RCPREVBSYTUPAVG);
+ busy_down = intel_uncore_read(uncore, RCPREVBSYTDNAVG);
+ max_avg = intel_uncore_read(uncore, RCBMAXAVG);
+ min_avg = intel_uncore_read(uncore, RCBMINAVG);
+
+ /* Handle RCS change request from hw */
+ new_freq = rps->cur_freq;
+ if (busy_up > max_avg)
+ new_freq++;
+ else if (busy_down < min_avg)
+ new_freq--;
+ new_freq = clamp(new_freq,
+ rps->min_freq_softlimit,
+ rps->max_freq_softlimit);
+
+ if (new_freq != rps->cur_freq && gen5_rps_set(rps, new_freq))
+ rps->cur_freq = new_freq;
+
+ spin_unlock(&mchdev_lock);
+}
+
+void intel_rps_init_early(struct intel_rps *rps)
+{
+ mutex_init(&rps->lock);
+ mutex_init(&rps->power.mutex);
+
+ INIT_WORK(&rps->work, rps_work);
+
+ atomic_set(&rps->num_waiters, 0);
+}
+
+void intel_rps_init(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+
+ if (IS_CHERRYVIEW(i915))
+ chv_rps_init(rps);
+ else if (IS_VALLEYVIEW(i915))
+ vlv_rps_init(rps);
+ else if (INTEL_GEN(i915) >= 6)
+ gen6_rps_init(rps);
+ else if (IS_IRONLAKE_M(i915))
+ gen5_rps_init(rps);
+
+ /* Derive initial user preferences/limits from the hardware limits */
+ rps->max_freq_softlimit = rps->max_freq;
+ rps->min_freq_softlimit = rps->min_freq;
+
+ /* After setting max-softlimit, find the overclock max freq */
+ if (IS_GEN(i915, 6) || IS_IVYBRIDGE(i915) || IS_HASWELL(i915)) {
+ u32 params = 0;
+
+ sandybridge_pcode_read(i915, GEN6_READ_OC_PARAMS,
+ &params, NULL);
+ if (params & BIT(31)) { /* OC supported */
+ DRM_DEBUG_DRIVER("Overclocking supported, max: %dMHz, overclock: %dMHz\n",
+ (rps->max_freq & 0xff) * 50,
+ (params & 0xff) * 50);
+ rps->max_freq = params & 0xff;
+ }
+ }
+
+ /* Finally allow us to boost to max by default */
+ rps->boost_freq = rps->max_freq;
+ rps->idle_freq = rps->min_freq;
+ rps->cur_freq = rps->idle_freq;
+
+ rps->pm_intrmsk_mbz = 0;
+
+ /*
+ * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer
+ * if GEN6_PM_UP_EI_EXPIRED is masked.
+ *
+ * TODO: verify if this can be reproduced on VLV,CHV.
+ */
+ if (INTEL_GEN(i915) <= 7)
+ rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
+
+ if (INTEL_GEN(i915) >= 8 && INTEL_GEN(i915) < 11)
+ rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
+}
+
+u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 cagf;
+
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ cagf = (rpstat >> 8) & 0xff;
+ else if (INTEL_GEN(i915) >= 9)
+ cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
+ else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
+ cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
+ else
+ cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
+
+ return cagf;
+}
+
+static u32 read_cagf(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 freq;
+
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+ vlv_punit_get(i915);
+ freq = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
+ vlv_punit_put(i915);
+ } else {
+ freq = intel_uncore_read(rps_to_gt(rps)->uncore, GEN6_RPSTAT1);
+ }
+
+ return intel_rps_get_cagf(rps, freq);
+}
+
+u32 intel_rps_read_actual_frequency(struct intel_rps *rps)
+{
+ struct intel_runtime_pm *rpm = rps_to_gt(rps)->uncore->rpm;
+ intel_wakeref_t wakeref;
+ u32 freq = 0;
+
+ with_intel_runtime_pm_if_in_use(rpm, wakeref)
+ freq = intel_gpu_freq(rps, read_cagf(rps));
+
+ return freq;
+}
+
+/* External interface for intel_ips.ko */
+
+static struct drm_i915_private __rcu *ips_mchdev;
+
+/**
+ * Tells the intel_ips driver that the i915 driver is now loaded, if
+ * IPS got loaded first.
+ *
+ * This awkward dance is so that neither module has to depend on the
+ * other in order for IPS to do the appropriate communication of
+ * GPU turbo limits to i915.
+ */
+static void
+ips_ping_for_i915_load(void)
+{
+ void (*link)(void);
+
+ link = symbol_get(ips_link_to_i915_driver);
+ if (link) {
+ link();
+ symbol_put(ips_link_to_i915_driver);
+ }
+}
+
+void intel_rps_driver_register(struct intel_rps *rps)
+{
+ struct intel_gt *gt = rps_to_gt(rps);
+
+ /*
+ * We only register the i915 ips part with intel-ips once everything is
+ * set up, to avoid intel-ips sneaking in and reading bogus values.
+ */
+ if (IS_GEN(gt->i915, 5)) {
+ GEM_BUG_ON(ips_mchdev);
+ rcu_assign_pointer(ips_mchdev, gt->i915);
+ ips_ping_for_i915_load();
+ }
+}
+
+void intel_rps_driver_unregister(struct intel_rps *rps)
+{
+ if (rcu_access_pointer(ips_mchdev) == rps_to_i915(rps))
+ rcu_assign_pointer(ips_mchdev, NULL);
+}
+
+static struct drm_i915_private *mchdev_get(void)
+{
+ struct drm_i915_private *i915;
+
+ rcu_read_lock();
+ i915 = rcu_dereference(ips_mchdev);
+ if (!kref_get_unless_zero(&i915->drm.ref))
+ i915 = NULL;
+ rcu_read_unlock();
+
+ return i915;
+}
+
+/**
+ * i915_read_mch_val - return value for IPS use
+ *
+ * Calculate and return a value for the IPS driver to use when deciding whether
+ * we have thermal and power headroom to increase CPU or GPU power budget.
+ */
+unsigned long i915_read_mch_val(void)
+{
+ struct drm_i915_private *i915;
+ unsigned long chipset_val = 0;
+ unsigned long graphics_val = 0;
+ intel_wakeref_t wakeref;
+
+ i915 = mchdev_get();
+ if (!i915)
+ return 0;
+
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
+ struct intel_ips *ips = &i915->gt.rps.ips;
+
+ spin_lock_irq(&mchdev_lock);
+ chipset_val = __ips_chipset_val(ips);
+ graphics_val = __ips_gfx_val(ips);
+ spin_unlock_irq(&mchdev_lock);
+ }
+
+ drm_dev_put(&i915->drm);
+ return chipset_val + graphics_val;
+}
+EXPORT_SYMBOL_GPL(i915_read_mch_val);
+
+/**
+ * i915_gpu_raise - raise GPU frequency limit
+ *
+ * Raise the limit; IPS indicates we have thermal headroom.
+ */
+bool i915_gpu_raise(void)
+{
+ struct drm_i915_private *i915;
+ struct intel_rps *rps;
+
+ i915 = mchdev_get();
+ if (!i915)
+ return false;
+
+ rps = &i915->gt.rps;
+
+ spin_lock_irq(&mchdev_lock);
+ if (rps->max_freq_softlimit < rps->max_freq)
+ rps->max_freq_softlimit++;
+ spin_unlock_irq(&mchdev_lock);
+
+ drm_dev_put(&i915->drm);
+ return true;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_raise);
+
+/**
+ * i915_gpu_lower - lower GPU frequency limit
+ *
+ * IPS indicates we're close to a thermal limit, so throttle back the GPU
+ * frequency maximum.
+ */
+bool i915_gpu_lower(void)
+{
+ struct drm_i915_private *i915;
+ struct intel_rps *rps;
+
+ i915 = mchdev_get();
+ if (!i915)
+ return false;
+
+ rps = &i915->gt.rps;
+
+ spin_lock_irq(&mchdev_lock);
+ if (rps->max_freq_softlimit > rps->min_freq)
+ rps->max_freq_softlimit--;
+ spin_unlock_irq(&mchdev_lock);
+
+ drm_dev_put(&i915->drm);
+ return true;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_lower);
+
+/**
+ * i915_gpu_busy - indicate GPU business to IPS
+ *
+ * Tell the IPS driver whether or not the GPU is busy.
+ */
+bool i915_gpu_busy(void)
+{
+ struct drm_i915_private *i915;
+ bool ret;
+
+ i915 = mchdev_get();
+ if (!i915)
+ return false;
+
+ ret = i915->gt.awake;
+
+ drm_dev_put(&i915->drm);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_busy);
+
+/**
+ * i915_gpu_turbo_disable - disable graphics turbo
+ *
+ * Disable graphics turbo by resetting the max frequency and setting the
+ * current frequency to the default.
+ */
+bool i915_gpu_turbo_disable(void)
+{
+ struct drm_i915_private *i915;
+ struct intel_rps *rps;
+ bool ret;
+
+ i915 = mchdev_get();
+ if (!i915)
+ return false;
+
+ rps = &i915->gt.rps;
+
+ spin_lock_irq(&mchdev_lock);
+ rps->max_freq_softlimit = rps->min_freq;
+ ret = gen5_rps_set(&i915->gt.rps, rps->min_freq);
+ spin_unlock_irq(&mchdev_lock);
+
+ drm_dev_put(&i915->drm);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.h b/drivers/gpu/drm/i915/gt/intel_rps.h
new file mode 100644
index 000000000000..dfa98194f3b2
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_rps.h
@@ -0,0 +1,39 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_RPS_H
+#define INTEL_RPS_H
+
+#include "intel_rps_types.h"
+
+struct i915_request;
+
+void intel_rps_init_early(struct intel_rps *rps);
+void intel_rps_init(struct intel_rps *rps);
+
+void intel_rps_driver_register(struct intel_rps *rps);
+void intel_rps_driver_unregister(struct intel_rps *rps);
+
+void intel_rps_enable(struct intel_rps *rps);
+void intel_rps_disable(struct intel_rps *rps);
+
+void intel_rps_park(struct intel_rps *rps);
+void intel_rps_unpark(struct intel_rps *rps);
+void intel_rps_boost(struct i915_request *rq);
+
+int intel_rps_set(struct intel_rps *rps, u8 val);
+void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive);
+
+int intel_gpu_freq(struct intel_rps *rps, int val);
+int intel_freq_opcode(struct intel_rps *rps, int val);
+u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat1);
+u32 intel_rps_read_actual_frequency(struct intel_rps *rps);
+
+void gen5_rps_irq_handler(struct intel_rps *rps);
+void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir);
+void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir);
+
+#endif /* INTEL_RPS_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_rps_types.h b/drivers/gpu/drm/i915/gt/intel_rps_types.h
new file mode 100644
index 000000000000..c2e279154bd5
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_rps_types.h
@@ -0,0 +1,93 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_RPS_TYPES_H
+#define INTEL_RPS_TYPES_H
+
+#include <linux/atomic.h>
+#include <linux/ktime.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+struct intel_ips {
+ u64 last_count1;
+ unsigned long last_time1;
+ unsigned long chipset_power;
+ u64 last_count2;
+ u64 last_time2;
+ unsigned long gfx_power;
+ u8 corr;
+
+ int c, m;
+};
+
+struct intel_rps_ei {
+ ktime_t ktime;
+ u32 render_c0;
+ u32 media_c0;
+};
+
+struct intel_rps {
+ struct mutex lock; /* protects enabling and the worker */
+
+ /*
+ * work, interrupts_enabled and pm_iir are protected by
+ * dev_priv->irq_lock
+ */
+ struct work_struct work;
+ bool enabled;
+ bool active;
+ u32 pm_iir;
+
+ /* PM interrupt bits that should never be masked */
+ u32 pm_intrmsk_mbz;
+ u32 pm_events;
+
+ /* Frequencies are stored in potentially platform dependent multiples.
+ * In other words, *_freq needs to be multiplied by X to be interesting.
+ * Soft limits are those which are used for the dynamic reclocking done
+ * by the driver (raise frequencies under heavy loads, and lower for
+ * lighter loads). Hard limits are those imposed by the hardware.
+ *
+ * A distinction is made for overclocking, which is never enabled by
+ * default, and is considered to be above the hard limit if it's
+ * possible at all.
+ */
+ u8 cur_freq; /* Current frequency (cached, may not == HW) */
+ u8 last_freq; /* Last SWREQ frequency */
+ u8 min_freq_softlimit; /* Minimum frequency permitted by the driver */
+ u8 max_freq_softlimit; /* Max frequency permitted by the driver */
+ u8 max_freq; /* Maximum frequency, RP0 if not overclocking */
+ u8 min_freq; /* AKA RPn. Minimum frequency */
+ u8 boost_freq; /* Frequency to request when wait boosting */
+ u8 idle_freq; /* Frequency to request when we are idle */
+ u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */
+ u8 rp1_freq; /* "less than" RP0 power/freqency */
+ u8 rp0_freq; /* Non-overclocked max frequency. */
+ u16 gpll_ref_freq; /* vlv/chv GPLL reference frequency */
+
+ int last_adj;
+
+ struct {
+ struct mutex mutex;
+
+ enum { LOW_POWER, BETWEEN, HIGH_POWER } mode;
+ unsigned int interactive;
+
+ u8 up_threshold; /* Current %busy required to uplock */
+ u8 down_threshold; /* Current %busy required to downclock */
+ } power;
+
+ atomic_t num_waiters;
+ atomic_t boosts;
+
+ /* manual wa residency calculations */
+ struct intel_rps_ei ei;
+ struct intel_ips ips;
+};
+
+#endif /* INTEL_RPS_TYPES_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c
index a0756f006f5f..74f793423231 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu.c
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.c
@@ -8,6 +8,19 @@
#include "intel_lrc_reg.h"
#include "intel_sseu.h"
+void intel_sseu_set_info(struct sseu_dev_info *sseu, u8 max_slices,
+ u8 max_subslices, u8 max_eus_per_subslice)
+{
+ sseu->max_slices = max_slices;
+ sseu->max_subslices = max_subslices;
+ sseu->max_eus_per_subslice = max_eus_per_subslice;
+
+ sseu->ss_stride = GEN_SSEU_STRIDE(sseu->max_subslices);
+ GEM_BUG_ON(sseu->ss_stride > GEN_MAX_SUBSLICE_STRIDE);
+ sseu->eu_stride = GEN_SSEU_STRIDE(sseu->max_eus_per_subslice);
+ GEM_BUG_ON(sseu->eu_stride > GEN_MAX_EU_STRIDE);
+}
+
unsigned int
intel_sseu_subslice_total(const struct sseu_dev_info *sseu)
{
@@ -19,10 +32,32 @@ intel_sseu_subslice_total(const struct sseu_dev_info *sseu)
return total;
}
+u32 intel_sseu_get_subslices(const struct sseu_dev_info *sseu, u8 slice)
+{
+ int i, offset = slice * sseu->ss_stride;
+ u32 mask = 0;
+
+ GEM_BUG_ON(slice >= sseu->max_slices);
+
+ for (i = 0; i < sseu->ss_stride; i++)
+ mask |= (u32)sseu->subslice_mask[offset + i] <<
+ i * BITS_PER_BYTE;
+
+ return mask;
+}
+
+void intel_sseu_set_subslices(struct sseu_dev_info *sseu, int slice,
+ u32 ss_mask)
+{
+ int offset = slice * sseu->ss_stride;
+
+ memcpy(&sseu->subslice_mask[offset], &ss_mask, sseu->ss_stride);
+}
+
unsigned int
intel_sseu_subslices_per_slice(const struct sseu_dev_info *sseu, u8 slice)
{
- return hweight8(sseu->subslice_mask[slice]);
+ return hweight32(intel_sseu_get_subslices(sseu, slice));
}
u32 intel_sseu_make_rpcs(struct drm_i915_private *i915,
@@ -49,7 +84,7 @@ u32 intel_sseu_make_rpcs(struct drm_i915_private *i915,
* cases which disable slices for functional, apart for performance
* reasons. So in this case we select a known stable subset.
*/
- if (!i915->perf.oa.exclusive_stream) {
+ if (!i915->perf.exclusive_stream) {
ctx_sseu = *req_sseu;
} else {
ctx_sseu = intel_sseu_from_device_info(sseu);
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.h b/drivers/gpu/drm/i915/gt/intel_sseu.h
index b50d0401a4e2..d1d225204f09 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu.h
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.h
@@ -10,15 +10,21 @@
#include <linux/types.h>
#include <linux/kernel.h>
+#include "i915_gem.h"
+
struct drm_i915_private;
#define GEN_MAX_SLICES (6) /* CNL upper bound */
#define GEN_MAX_SUBSLICES (8) /* ICL upper bound */
#define GEN_SSEU_STRIDE(max_entries) DIV_ROUND_UP(max_entries, BITS_PER_BYTE)
+#define GEN_MAX_SUBSLICE_STRIDE GEN_SSEU_STRIDE(GEN_MAX_SUBSLICES)
+#define GEN_MAX_EUS (16) /* TGL upper bound */
+#define GEN_MAX_EU_STRIDE GEN_SSEU_STRIDE(GEN_MAX_EUS)
struct sseu_dev_info {
u8 slice_mask;
- u8 subslice_mask[GEN_MAX_SLICES];
+ u8 subslice_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICE_STRIDE];
+ u8 eu_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICES * GEN_MAX_EU_STRIDE];
u16 eu_total;
u8 eu_per_subslice;
u8 min_eu_in_pool;
@@ -33,11 +39,8 @@ struct sseu_dev_info {
u8 max_subslices;
u8 max_eus_per_subslice;
- /* We don't have more than 8 eus per subslice at the moment and as we
- * store eus enabled using bits, no need to multiply by eus per
- * subslice.
- */
- u8 eu_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICES];
+ u8 ss_stride;
+ u8 eu_stride;
};
/*
@@ -63,12 +66,34 @@ intel_sseu_from_device_info(const struct sseu_dev_info *sseu)
return value;
}
+static inline bool
+intel_sseu_has_subslice(const struct sseu_dev_info *sseu, int slice,
+ int subslice)
+{
+ u8 mask;
+ int ss_idx = subslice / BITS_PER_BYTE;
+
+ GEM_BUG_ON(ss_idx >= sseu->ss_stride);
+
+ mask = sseu->subslice_mask[slice * sseu->ss_stride + ss_idx];
+
+ return mask & BIT(subslice % BITS_PER_BYTE);
+}
+
+void intel_sseu_set_info(struct sseu_dev_info *sseu, u8 max_slices,
+ u8 max_subslices, u8 max_eus_per_subslice);
+
unsigned int
intel_sseu_subslice_total(const struct sseu_dev_info *sseu);
unsigned int
intel_sseu_subslices_per_slice(const struct sseu_dev_info *sseu, u8 slice);
+u32 intel_sseu_get_subslices(const struct sseu_dev_info *sseu, u8 slice);
+
+void intel_sseu_set_subslices(struct sseu_dev_info *sseu, int slice,
+ u32 ss_mask);
+
u32 intel_sseu_make_rpcs(struct drm_i915_private *i915,
const struct intel_sseu *req_sseu);
diff --git a/drivers/gpu/drm/i915/i915_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
index c311ce9c6f9d..87716529cd2f 100644
--- a/drivers/gpu/drm/i915/i915_timeline.c
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -8,34 +8,27 @@
#include "i915_active.h"
#include "i915_syncmap.h"
-#include "i915_timeline.h"
+#include "intel_gt.h"
+#include "intel_ring.h"
+#include "intel_timeline.h"
#define ptr_set_bit(ptr, bit) ((typeof(ptr))((unsigned long)(ptr) | BIT(bit)))
#define ptr_test_bit(ptr, bit) ((unsigned long)(ptr) & BIT(bit))
-struct i915_timeline_hwsp {
- struct i915_gt_timelines *gt;
+#define CACHELINE_BITS 6
+#define CACHELINE_FREE CACHELINE_BITS
+
+struct intel_timeline_hwsp {
+ struct intel_gt *gt;
+ struct intel_gt_timelines *gt_timelines;
struct list_head free_link;
struct i915_vma *vma;
u64 free_bitmap;
};
-struct i915_timeline_cacheline {
- struct i915_active active;
- struct i915_timeline_hwsp *hwsp;
- void *vaddr;
-#define CACHELINE_BITS 6
-#define CACHELINE_FREE CACHELINE_BITS
-};
-
-static inline struct drm_i915_private *
-hwsp_to_i915(struct i915_timeline_hwsp *hwsp)
-{
- return container_of(hwsp->gt, struct drm_i915_private, gt.timelines);
-}
-
-static struct i915_vma *__hwsp_alloc(struct drm_i915_private *i915)
+static struct i915_vma *__hwsp_alloc(struct intel_gt *gt)
{
+ struct drm_i915_private *i915 = gt->i915;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
@@ -45,7 +38,7 @@ static struct i915_vma *__hwsp_alloc(struct drm_i915_private *i915)
i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
- vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+ vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
if (IS_ERR(vma))
i915_gem_object_put(obj);
@@ -53,11 +46,10 @@ static struct i915_vma *__hwsp_alloc(struct drm_i915_private *i915)
}
static struct i915_vma *
-hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline)
+hwsp_alloc(struct intel_timeline *timeline, unsigned int *cacheline)
{
- struct drm_i915_private *i915 = timeline->i915;
- struct i915_gt_timelines *gt = &i915->gt.timelines;
- struct i915_timeline_hwsp *hwsp;
+ struct intel_gt_timelines *gt = &timeline->gt->timelines;
+ struct intel_timeline_hwsp *hwsp;
BUILD_BUG_ON(BITS_PER_TYPE(u64) * CACHELINE_BYTES > PAGE_SIZE);
@@ -75,16 +67,17 @@ hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline)
if (!hwsp)
return ERR_PTR(-ENOMEM);
- vma = __hwsp_alloc(i915);
+ vma = __hwsp_alloc(timeline->gt);
if (IS_ERR(vma)) {
kfree(hwsp);
return vma;
}
vma->private = hwsp;
+ hwsp->gt = timeline->gt;
hwsp->vma = vma;
hwsp->free_bitmap = ~0ull;
- hwsp->gt = gt;
+ hwsp->gt_timelines = gt;
spin_lock_irq(&gt->hwsp_lock);
list_add(&hwsp->free_link, &gt->hwsp_free_list);
@@ -102,9 +95,9 @@ hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline)
return hwsp->vma;
}
-static void __idle_hwsp_free(struct i915_timeline_hwsp *hwsp, int cacheline)
+static void __idle_hwsp_free(struct intel_timeline_hwsp *hwsp, int cacheline)
{
- struct i915_gt_timelines *gt = hwsp->gt;
+ struct intel_gt_timelines *gt = hwsp->gt_timelines;
unsigned long flags;
spin_lock_irqsave(&gt->hwsp_lock, flags);
@@ -126,7 +119,7 @@ static void __idle_hwsp_free(struct i915_timeline_hwsp *hwsp, int cacheline)
spin_unlock_irqrestore(&gt->hwsp_lock, flags);
}
-static void __idle_cacheline_free(struct i915_timeline_cacheline *cl)
+static void __idle_cacheline_free(struct intel_timeline_cacheline *cl)
{
GEM_BUG_ON(!i915_active_is_idle(&cl->active));
@@ -135,12 +128,13 @@ static void __idle_cacheline_free(struct i915_timeline_cacheline *cl)
__idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS));
i915_active_fini(&cl->active);
- kfree(cl);
+ kfree_rcu(cl, rcu);
}
+__i915_active_call
static void __cacheline_retire(struct i915_active *active)
{
- struct i915_timeline_cacheline *cl =
+ struct intel_timeline_cacheline *cl =
container_of(active, typeof(*cl), active);
i915_vma_unpin(cl->hwsp->vma);
@@ -148,10 +142,19 @@ static void __cacheline_retire(struct i915_active *active)
__idle_cacheline_free(cl);
}
-static struct i915_timeline_cacheline *
-cacheline_alloc(struct i915_timeline_hwsp *hwsp, unsigned int cacheline)
+static int __cacheline_active(struct i915_active *active)
{
- struct i915_timeline_cacheline *cl;
+ struct intel_timeline_cacheline *cl =
+ container_of(active, typeof(*cl), active);
+
+ __i915_vma_pin(cl->hwsp->vma);
+ return 0;
+}
+
+static struct intel_timeline_cacheline *
+cacheline_alloc(struct intel_timeline_hwsp *hwsp, unsigned int cacheline)
+{
+ struct intel_timeline_cacheline *cl;
void *vaddr;
GEM_BUG_ON(cacheline >= BIT(CACHELINE_BITS));
@@ -170,24 +173,24 @@ cacheline_alloc(struct i915_timeline_hwsp *hwsp, unsigned int cacheline)
cl->hwsp = hwsp;
cl->vaddr = page_pack_bits(vaddr, cacheline);
- i915_active_init(hwsp_to_i915(hwsp), &cl->active, __cacheline_retire);
+ i915_active_init(&cl->active, __cacheline_active, __cacheline_retire);
return cl;
}
-static void cacheline_acquire(struct i915_timeline_cacheline *cl)
+static void cacheline_acquire(struct intel_timeline_cacheline *cl)
{
- if (cl && i915_active_acquire(&cl->active))
- __i915_vma_pin(cl->hwsp->vma);
+ if (cl)
+ i915_active_acquire(&cl->active);
}
-static void cacheline_release(struct i915_timeline_cacheline *cl)
+static void cacheline_release(struct intel_timeline_cacheline *cl)
{
if (cl)
i915_active_release(&cl->active);
}
-static void cacheline_free(struct i915_timeline_cacheline *cl)
+static void cacheline_free(struct intel_timeline_cacheline *cl)
{
GEM_BUG_ON(ptr_test_bit(cl->vaddr, CACHELINE_FREE));
cl->vaddr = ptr_set_bit(cl->vaddr, CACHELINE_FREE);
@@ -196,29 +199,22 @@ static void cacheline_free(struct i915_timeline_cacheline *cl)
__idle_cacheline_free(cl);
}
-int i915_timeline_init(struct drm_i915_private *i915,
- struct i915_timeline *timeline,
- struct i915_vma *hwsp)
+int intel_timeline_init(struct intel_timeline *timeline,
+ struct intel_gt *gt,
+ struct i915_vma *hwsp)
{
void *vaddr;
- /*
- * Ideally we want a set of engines on a single leaf as we expect
- * to mostly be tracking synchronisation between engines. It is not
- * a huge issue if this is not the case, but we may want to mitigate
- * any page crossing penalties if they become an issue.
- *
- * Called during early_init before we know how many engines there are.
- */
- BUILD_BUG_ON(KSYNCMAP < I915_NUM_ENGINES);
+ kref_init(&timeline->kref);
+ atomic_set(&timeline->pin_count, 0);
+
+ timeline->gt = gt;
- timeline->i915 = i915;
- timeline->pin_count = 0;
timeline->has_initial_breadcrumb = !hwsp;
timeline->hwsp_cacheline = NULL;
if (!hwsp) {
- struct i915_timeline_cacheline *cl;
+ struct intel_timeline_cacheline *cl;
unsigned int cacheline;
hwsp = hwsp_alloc(timeline, &cacheline);
@@ -253,7 +249,7 @@ int i915_timeline_init(struct drm_i915_private *i915,
mutex_init(&timeline->mutex);
- INIT_ACTIVE_REQUEST(&timeline->last_request);
+ INIT_ACTIVE_FENCE(&timeline->last_request);
INIT_LIST_HEAD(&timeline->requests);
i915_syncmap_init(&timeline->sync);
@@ -261,72 +257,22 @@ int i915_timeline_init(struct drm_i915_private *i915,
return 0;
}
-void i915_timelines_init(struct drm_i915_private *i915)
+void intel_gt_init_timelines(struct intel_gt *gt)
{
- struct i915_gt_timelines *gt = &i915->gt.timelines;
-
- mutex_init(&gt->mutex);
- INIT_LIST_HEAD(&gt->active_list);
+ struct intel_gt_timelines *timelines = &gt->timelines;
- spin_lock_init(&gt->hwsp_lock);
- INIT_LIST_HEAD(&gt->hwsp_free_list);
+ spin_lock_init(&timelines->lock);
+ INIT_LIST_HEAD(&timelines->active_list);
- /* via i915_gem_wait_for_idle() */
- i915_gem_shrinker_taints_mutex(i915, &gt->mutex);
+ spin_lock_init(&timelines->hwsp_lock);
+ INIT_LIST_HEAD(&timelines->hwsp_free_list);
}
-static void timeline_add_to_active(struct i915_timeline *tl)
+void intel_timeline_fini(struct intel_timeline *timeline)
{
- struct i915_gt_timelines *gt = &tl->i915->gt.timelines;
-
- mutex_lock(&gt->mutex);
- list_add(&tl->link, &gt->active_list);
- mutex_unlock(&gt->mutex);
-}
-
-static void timeline_remove_from_active(struct i915_timeline *tl)
-{
- struct i915_gt_timelines *gt = &tl->i915->gt.timelines;
-
- mutex_lock(&gt->mutex);
- list_del(&tl->link);
- mutex_unlock(&gt->mutex);
-}
-
-/**
- * i915_timelines_park - called when the driver idles
- * @i915: the drm_i915_private device
- *
- * When the driver is completely idle, we know that all of our sync points
- * have been signaled and our tracking is then entirely redundant. Any request
- * to wait upon an older sync point will be completed instantly as we know
- * the fence is signaled and therefore we will not even look them up in the
- * sync point map.
- */
-void i915_timelines_park(struct drm_i915_private *i915)
-{
- struct i915_gt_timelines *gt = &i915->gt.timelines;
- struct i915_timeline *timeline;
-
- mutex_lock(&gt->mutex);
- list_for_each_entry(timeline, &gt->active_list, link) {
- /*
- * All known fences are completed so we can scrap
- * the current sync point tracking and start afresh,
- * any attempt to wait upon a previous sync point
- * will be skipped as the fence was signaled.
- */
- i915_syncmap_free(&timeline->sync);
- }
- mutex_unlock(&gt->mutex);
-}
-
-void i915_timeline_fini(struct i915_timeline *timeline)
-{
- GEM_BUG_ON(timeline->pin_count);
+ GEM_BUG_ON(atomic_read(&timeline->pin_count));
GEM_BUG_ON(!list_empty(&timeline->requests));
-
- i915_syncmap_free(&timeline->sync);
+ GEM_BUG_ON(timeline->retire);
if (timeline->hwsp_cacheline)
cacheline_free(timeline->hwsp_cacheline);
@@ -336,73 +282,125 @@ void i915_timeline_fini(struct i915_timeline *timeline)
i915_vma_put(timeline->hwsp_ggtt);
}
-struct i915_timeline *
-i915_timeline_create(struct drm_i915_private *i915,
- struct i915_vma *global_hwsp)
+struct intel_timeline *
+intel_timeline_create(struct intel_gt *gt, struct i915_vma *global_hwsp)
{
- struct i915_timeline *timeline;
+ struct intel_timeline *timeline;
int err;
timeline = kzalloc(sizeof(*timeline), GFP_KERNEL);
if (!timeline)
return ERR_PTR(-ENOMEM);
- err = i915_timeline_init(i915, timeline, global_hwsp);
+ err = intel_timeline_init(timeline, gt, global_hwsp);
if (err) {
kfree(timeline);
return ERR_PTR(err);
}
- kref_init(&timeline->kref);
-
return timeline;
}
-int i915_timeline_pin(struct i915_timeline *tl)
+int intel_timeline_pin(struct intel_timeline *tl)
{
int err;
- if (tl->pin_count++)
+ if (atomic_add_unless(&tl->pin_count, 1, 0))
return 0;
- GEM_BUG_ON(!tl->pin_count);
err = i915_vma_pin(tl->hwsp_ggtt, 0, 0, PIN_GLOBAL | PIN_HIGH);
if (err)
- goto unpin;
+ return err;
tl->hwsp_offset =
i915_ggtt_offset(tl->hwsp_ggtt) +
offset_in_page(tl->hwsp_offset);
cacheline_acquire(tl->hwsp_cacheline);
- timeline_add_to_active(tl);
+ if (atomic_fetch_inc(&tl->pin_count)) {
+ cacheline_release(tl->hwsp_cacheline);
+ __i915_vma_unpin(tl->hwsp_ggtt);
+ }
return 0;
+}
-unpin:
- tl->pin_count = 0;
- return err;
+void intel_timeline_enter(struct intel_timeline *tl)
+{
+ struct intel_gt_timelines *timelines = &tl->gt->timelines;
+
+ /*
+ * Pretend we are serialised by the timeline->mutex.
+ *
+ * While generally true, there are a few exceptions to the rule
+ * for the engine->kernel_context being used to manage power
+ * transitions. As the engine_park may be called from under any
+ * timeline, it uses the power mutex as a global serialisation
+ * lock to prevent any other request entering its timeline.
+ *
+ * The rule is generally tl->mutex, otherwise engine->wakeref.mutex.
+ *
+ * However, intel_gt_retire_request() does not know which engine
+ * it is retiring along and so cannot partake in the engine-pm
+ * barrier, and there we use the tl->active_count as a means to
+ * pin the timeline in the active_list while the locks are dropped.
+ * Ergo, as that is outside of the engine-pm barrier, we need to
+ * use atomic to manipulate tl->active_count.
+ */
+ lockdep_assert_held(&tl->mutex);
+
+ if (atomic_add_unless(&tl->active_count, 1, 0))
+ return;
+
+ spin_lock(&timelines->lock);
+ if (!atomic_fetch_inc(&tl->active_count))
+ list_add_tail(&tl->link, &timelines->active_list);
+ spin_unlock(&timelines->lock);
}
-static u32 timeline_advance(struct i915_timeline *tl)
+void intel_timeline_exit(struct intel_timeline *tl)
{
- GEM_BUG_ON(!tl->pin_count);
+ struct intel_gt_timelines *timelines = &tl->gt->timelines;
+
+ /* See intel_timeline_enter() */
+ lockdep_assert_held(&tl->mutex);
+
+ GEM_BUG_ON(!atomic_read(&tl->active_count));
+ if (atomic_add_unless(&tl->active_count, -1, 1))
+ return;
+
+ spin_lock(&timelines->lock);
+ if (atomic_dec_and_test(&tl->active_count))
+ list_del(&tl->link);
+ spin_unlock(&timelines->lock);
+
+ /*
+ * Since this timeline is idle, all bariers upon which we were waiting
+ * must also be complete and so we can discard the last used barriers
+ * without loss of information.
+ */
+ i915_syncmap_free(&tl->sync);
+}
+
+static u32 timeline_advance(struct intel_timeline *tl)
+{
+ GEM_BUG_ON(!atomic_read(&tl->pin_count));
GEM_BUG_ON(tl->seqno & tl->has_initial_breadcrumb);
return tl->seqno += 1 + tl->has_initial_breadcrumb;
}
-static void timeline_rollback(struct i915_timeline *tl)
+static void timeline_rollback(struct intel_timeline *tl)
{
tl->seqno -= 1 + tl->has_initial_breadcrumb;
}
static noinline int
-__i915_timeline_get_seqno(struct i915_timeline *tl,
- struct i915_request *rq,
- u32 *seqno)
+__intel_timeline_get_seqno(struct intel_timeline *tl,
+ struct i915_request *rq,
+ u32 *seqno)
{
- struct i915_timeline_cacheline *cl;
+ struct intel_timeline_cacheline *cl;
unsigned int cacheline;
struct i915_vma *vma;
void *vaddr;
@@ -452,8 +450,7 @@ __i915_timeline_get_seqno(struct i915_timeline *tl,
* free it after the current request is retired, which ensures that
* all writes into the cacheline from previous requests are complete.
*/
- err = i915_active_ref(&tl->hwsp_cacheline->active,
- tl->fence_context, rq);
+ err = i915_active_ref(&tl->hwsp_cacheline->active, tl, &rq->fence);
if (err)
goto err_cacheline;
@@ -488,92 +485,89 @@ err_rollback:
return err;
}
-int i915_timeline_get_seqno(struct i915_timeline *tl,
- struct i915_request *rq,
- u32 *seqno)
+int intel_timeline_get_seqno(struct intel_timeline *tl,
+ struct i915_request *rq,
+ u32 *seqno)
{
*seqno = timeline_advance(tl);
/* Replace the HWSP on wraparound for HW semaphores */
if (unlikely(!*seqno && tl->hwsp_cacheline))
- return __i915_timeline_get_seqno(tl, rq, seqno);
+ return __intel_timeline_get_seqno(tl, rq, seqno);
return 0;
}
-static int cacheline_ref(struct i915_timeline_cacheline *cl,
+static int cacheline_ref(struct intel_timeline_cacheline *cl,
struct i915_request *rq)
{
- return i915_active_ref(&cl->active, rq->fence.context, rq);
+ return i915_active_add_request(&cl->active, rq);
}
-int i915_timeline_read_hwsp(struct i915_request *from,
- struct i915_request *to,
- u32 *hwsp)
+int intel_timeline_read_hwsp(struct i915_request *from,
+ struct i915_request *to,
+ u32 *hwsp)
{
- struct i915_timeline_cacheline *cl = from->hwsp_cacheline;
- struct i915_timeline *tl = from->timeline;
+ struct intel_timeline_cacheline *cl;
int err;
- GEM_BUG_ON(to->timeline == tl);
-
- mutex_lock_nested(&tl->mutex, SINGLE_DEPTH_NESTING);
- err = i915_request_completed(from);
- if (!err)
- err = cacheline_ref(cl, to);
- if (!err) {
- if (likely(cl == tl->hwsp_cacheline)) {
- *hwsp = tl->hwsp_offset;
- } else { /* across a seqno wrap, recover the original offset */
- *hwsp = i915_ggtt_offset(cl->hwsp->vma) +
- ptr_unmask_bits(cl->vaddr, CACHELINE_BITS) *
- CACHELINE_BYTES;
- }
- }
- mutex_unlock(&tl->mutex);
+ GEM_BUG_ON(!rcu_access_pointer(from->hwsp_cacheline));
+
+ rcu_read_lock();
+ cl = rcu_dereference(from->hwsp_cacheline);
+ if (unlikely(!i915_active_acquire_if_busy(&cl->active)))
+ goto unlock; /* seqno wrapped and completed! */
+ if (unlikely(i915_request_completed(from)))
+ goto release;
+ rcu_read_unlock();
+
+ err = cacheline_ref(cl, to);
+ if (err)
+ goto out;
+ *hwsp = i915_ggtt_offset(cl->hwsp->vma) +
+ ptr_unmask_bits(cl->vaddr, CACHELINE_BITS) * CACHELINE_BYTES;
+
+out:
+ i915_active_release(&cl->active);
return err;
+
+release:
+ i915_active_release(&cl->active);
+unlock:
+ rcu_read_unlock();
+ return 1;
}
-void i915_timeline_unpin(struct i915_timeline *tl)
+void intel_timeline_unpin(struct intel_timeline *tl)
{
- GEM_BUG_ON(!tl->pin_count);
- if (--tl->pin_count)
+ GEM_BUG_ON(!atomic_read(&tl->pin_count));
+ if (!atomic_dec_and_test(&tl->pin_count))
return;
- timeline_remove_from_active(tl);
cacheline_release(tl->hwsp_cacheline);
- /*
- * Since this timeline is idle, all bariers upon which we were waiting
- * must also be complete and so we can discard the last used barriers
- * without loss of information.
- */
- i915_syncmap_free(&tl->sync);
-
__i915_vma_unpin(tl->hwsp_ggtt);
}
-void __i915_timeline_free(struct kref *kref)
+void __intel_timeline_free(struct kref *kref)
{
- struct i915_timeline *timeline =
+ struct intel_timeline *timeline =
container_of(kref, typeof(*timeline), kref);
- i915_timeline_fini(timeline);
- kfree(timeline);
+ intel_timeline_fini(timeline);
+ kfree_rcu(timeline, rcu);
}
-void i915_timelines_fini(struct drm_i915_private *i915)
+void intel_gt_fini_timelines(struct intel_gt *gt)
{
- struct i915_gt_timelines *gt = &i915->gt.timelines;
-
- GEM_BUG_ON(!list_empty(&gt->active_list));
- GEM_BUG_ON(!list_empty(&gt->hwsp_free_list));
+ struct intel_gt_timelines *timelines = &gt->timelines;
- mutex_destroy(&gt->mutex);
+ GEM_BUG_ON(!list_empty(&timelines->active_list));
+ GEM_BUG_ON(!list_empty(&timelines->hwsp_free_list));
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
-#include "selftests/mock_timeline.c"
-#include "selftests/i915_timeline.c"
+#include "gt/selftests/mock_timeline.c"
+#include "gt/selftest_timeline.c"
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.h b/drivers/gpu/drm/i915/gt/intel_timeline.h
new file mode 100644
index 000000000000..f5b7eade3809
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef I915_TIMELINE_H
+#define I915_TIMELINE_H
+
+#include <linux/lockdep.h>
+
+#include "i915_active.h"
+#include "i915_syncmap.h"
+#include "gt/intel_timeline_types.h"
+
+int intel_timeline_init(struct intel_timeline *tl,
+ struct intel_gt *gt,
+ struct i915_vma *hwsp);
+void intel_timeline_fini(struct intel_timeline *tl);
+
+struct intel_timeline *
+intel_timeline_create(struct intel_gt *gt, struct i915_vma *global_hwsp);
+
+static inline struct intel_timeline *
+intel_timeline_get(struct intel_timeline *timeline)
+{
+ kref_get(&timeline->kref);
+ return timeline;
+}
+
+void __intel_timeline_free(struct kref *kref);
+static inline void intel_timeline_put(struct intel_timeline *timeline)
+{
+ kref_put(&timeline->kref, __intel_timeline_free);
+}
+
+static inline int __intel_timeline_sync_set(struct intel_timeline *tl,
+ u64 context, u32 seqno)
+{
+ return i915_syncmap_set(&tl->sync, context, seqno);
+}
+
+static inline int intel_timeline_sync_set(struct intel_timeline *tl,
+ const struct dma_fence *fence)
+{
+ return __intel_timeline_sync_set(tl, fence->context, fence->seqno);
+}
+
+static inline bool __intel_timeline_sync_is_later(struct intel_timeline *tl,
+ u64 context, u32 seqno)
+{
+ return i915_syncmap_is_later(&tl->sync, context, seqno);
+}
+
+static inline bool intel_timeline_sync_is_later(struct intel_timeline *tl,
+ const struct dma_fence *fence)
+{
+ return __intel_timeline_sync_is_later(tl, fence->context, fence->seqno);
+}
+
+int intel_timeline_pin(struct intel_timeline *tl);
+void intel_timeline_enter(struct intel_timeline *tl);
+int intel_timeline_get_seqno(struct intel_timeline *tl,
+ struct i915_request *rq,
+ u32 *seqno);
+void intel_timeline_exit(struct intel_timeline *tl);
+void intel_timeline_unpin(struct intel_timeline *tl);
+
+int intel_timeline_read_hwsp(struct i915_request *from,
+ struct i915_request *until,
+ u32 *hwsp_offset);
+
+void intel_gt_init_timelines(struct intel_gt *gt);
+void intel_gt_fini_timelines(struct intel_gt *gt);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline_types.h b/drivers/gpu/drm/i915/gt/intel_timeline_types.h
new file mode 100644
index 000000000000..02181c5020db
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_timeline_types.h
@@ -0,0 +1,100 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2016 Intel Corporation
+ */
+
+#ifndef __I915_TIMELINE_TYPES_H__
+#define __I915_TIMELINE_TYPES_H__
+
+#include <linux/list.h>
+#include <linux/kref.h>
+#include <linux/mutex.h>
+#include <linux/rcupdate.h>
+#include <linux/types.h>
+
+#include "i915_active_types.h"
+
+struct i915_vma;
+struct i915_syncmap;
+struct intel_gt;
+struct intel_timeline_hwsp;
+
+struct intel_timeline {
+ u64 fence_context;
+ u32 seqno;
+
+ struct mutex mutex; /* protects the flow of requests */
+
+ /*
+ * pin_count and active_count track essentially the same thing:
+ * How many requests are in flight or may be under construction.
+ *
+ * We need two distinct counters so that we can assign different
+ * lifetimes to the events for different use-cases. For example,
+ * we want to permanently keep the timeline pinned for the kernel
+ * context so that we can issue requests at any time without having
+ * to acquire space in the GGTT. However, we want to keep tracking
+ * the activity (to be able to detect when we become idle) along that
+ * permanently pinned timeline and so end up requiring two counters.
+ *
+ * Note that the active_count is protected by the intel_timeline.mutex,
+ * but the pin_count is protected by a combination of serialisation
+ * from the intel_context caller plus internal atomicity.
+ */
+ atomic_t pin_count;
+ atomic_t active_count;
+
+ const u32 *hwsp_seqno;
+ struct i915_vma *hwsp_ggtt;
+ u32 hwsp_offset;
+
+ struct intel_timeline_cacheline *hwsp_cacheline;
+
+ bool has_initial_breadcrumb;
+
+ /**
+ * List of breadcrumbs associated with GPU requests currently
+ * outstanding.
+ */
+ struct list_head requests;
+
+ /*
+ * Contains an RCU guarded pointer to the last request. No reference is
+ * held to the request, users must carefully acquire a reference to
+ * the request using i915_active_fence_get(), or manage the RCU
+ * protection themselves (cf the i915_active_fence API).
+ */
+ struct i915_active_fence last_request;
+
+ /** A chain of completed timelines ready for early retirement. */
+ struct intel_timeline *retire;
+
+ /**
+ * We track the most recent seqno that we wait on in every context so
+ * that we only have to emit a new await and dependency on a more
+ * recent sync point. As the contexts may be executed out-of-order, we
+ * have to track each individually and can not rely on an absolute
+ * global_seqno. When we know that all tracked fences are completed
+ * (i.e. when the driver is idle), we know that the syncmap is
+ * redundant and we can discard it without loss of generality.
+ */
+ struct i915_syncmap *sync;
+
+ struct list_head link;
+ struct intel_gt *gt;
+
+ struct kref kref;
+ struct rcu_head rcu;
+};
+
+struct intel_timeline_cacheline {
+ struct i915_active active;
+
+ struct intel_timeline_hwsp *hwsp;
+ void *vaddr;
+
+ struct rcu_head rcu;
+};
+
+#endif /* __I915_TIMELINE_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 98dfb086320f..4e292d4bf7b9 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -6,6 +6,9 @@
#include "i915_drv.h"
#include "intel_context.h"
+#include "intel_engine_pm.h"
+#include "intel_gt.h"
+#include "intel_ring.h"
#include "intel_workarounds.h"
/**
@@ -49,9 +52,10 @@
* - Public functions to init or apply the given workaround type.
*/
-static void wa_init_start(struct i915_wa_list *wal, const char *name)
+static void wa_init_start(struct i915_wa_list *wal, const char *name, const char *engine_name)
{
wal->name = name;
+ wal->engine_name = engine_name;
}
#define WA_LIST_CHUNK (1 << 4)
@@ -73,8 +77,8 @@ static void wa_init_finish(struct i915_wa_list *wal)
if (!wal->count)
return;
- DRM_DEBUG_DRIVER("Initialized %u %s workarounds\n",
- wal->wa_count, wal->name);
+ DRM_DEBUG_DRIVER("Initialized %u %s workarounds on %s\n",
+ wal->wa_count, wal->name, wal->engine_name);
}
static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa)
@@ -143,21 +147,27 @@ static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa)
}
}
-static void
-wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask,
- u32 val)
+static void wa_add(struct i915_wa_list *wal, i915_reg_t reg, u32 mask,
+ u32 val, u32 read_mask)
{
struct i915_wa wa = {
.reg = reg,
.mask = mask,
.val = val,
- .read = mask,
+ .read = read_mask,
};
_wa_add(wal, &wa);
}
static void
+wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask,
+ u32 val)
+{
+ wa_add(wal, reg, mask, val, mask);
+}
+
+static void
wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
{
wa_write_masked_or(wal, reg, val, _MASKED_BIT_ENABLE(val));
@@ -175,19 +185,6 @@ wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
wa_write_masked_or(wal, reg, val, val);
}
-static void
-ignore_wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask, u32 val)
-{
- struct i915_wa wa = {
- .reg = reg,
- .mask = mask,
- .val = val,
- /* Bonkers HW, skip verifying */
- };
-
- _wa_add(wal, &wa);
-}
-
#define WA_SET_BIT_MASKED(addr, mask) \
wa_write_masked_or(wal, (addr), (mask), _MASKED_BIT_ENABLE(mask))
@@ -257,7 +254,7 @@ static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine,
/* WaDisableDopClockGating:bdw
*
- * Also see the related UCGTCL1 write in broadwell_init_clock_gating()
+ * Also see the related UCGTCL1 write in bdw_init_clock_gating()
* to disable EUTC clock gating.
*/
WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
@@ -308,11 +305,6 @@ static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine,
FLOW_CONTROL_ENABLE |
PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
- /* Syncing dependencies between camera and graphics:skl,bxt,kbl */
- if (!IS_COFFEELAKE(i915))
- WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
- GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
-
/* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */
/* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */
WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
@@ -536,12 +528,6 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
intel_uncore_read(engine->uncore, GEN8_L3CNTLREG) |
GEN8_ERRDETBCTRL);
- /* WaDisableBankHangMode:icl */
- wa_write(wal,
- GEN8_L3CNTLREG,
- intel_uncore_read(engine->uncore, GEN8_L3CNTLREG) |
- GEN8_ERRDETBCTRL);
-
/* Wa_1604370585:icl (pre-prod)
* Formerly known as WaPushConstantDereferenceHoldDisable
*/
@@ -586,6 +572,29 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
GEN11_SAMPLER_ENABLE_HEADLESS_MSG);
}
+static void tgl_ctx_workarounds_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
+{
+ u32 val;
+
+ /* Wa_1409142259:tgl */
+ WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
+ GEN12_DISABLE_CPS_AWARE_COLOR_PIPE);
+
+ /* Wa_1604555607:tgl */
+ val = intel_uncore_read(engine->uncore, FF_MODE2);
+ val &= ~FF_MODE2_TDS_TIMER_MASK;
+ val |= FF_MODE2_TDS_TIMER_128;
+ /*
+ * FIXME: FF_MODE2 register is not readable till TGL B0. We can
+ * enable verification of WA from the later steppings, which enables
+ * the read of FF_MODE2.
+ */
+ wa_add(wal, FF_MODE2, FF_MODE2_TDS_TIMER_MASK, val,
+ IS_TGL_REVID(engine->i915, TGL_REVID_A0, TGL_REVID_A0) ? 0 :
+ FF_MODE2_TDS_TIMER_MASK);
+}
+
static void
__intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
struct i915_wa_list *wal,
@@ -596,9 +605,11 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
if (engine->class != RENDER_CLASS)
return;
- wa_init_start(wal, name);
+ wa_init_start(wal, name, engine->name);
- if (IS_GEN(i915, 11))
+ if (IS_GEN(i915, 12))
+ tgl_ctx_workarounds_init(engine, wal);
+ else if (IS_GEN(i915, 11))
icl_ctx_workarounds_init(engine, wal);
else if (IS_CANNONLAKE(i915))
cnl_ctx_workarounds_init(engine, wal);
@@ -766,7 +777,10 @@ static void
wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
- u32 mcr_slice_subslice_mask;
+ unsigned int slice, subslice;
+ u32 l3_en, mcr, mcr_mask;
+
+ GEM_BUG_ON(INTEL_GEN(i915) < 10);
/*
* WaProgramMgsrForL3BankSpecificMmioReads: cnl,icl
@@ -774,42 +788,7 @@ wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
* the case, we might need to program MCR select to a valid L3Bank
* by default, to make sure we correctly read certain registers
* later on (in the range 0xB100 - 0xB3FF).
- * This might be incompatible with
- * WaProgramMgsrForCorrectSliceSpecificMmioReads.
- * Fortunately, this should not happen in production hardware, so
- * we only assert that this is the case (instead of implementing
- * something more complex that requires checking the range of every
- * MMIO read).
- */
- if (INTEL_GEN(i915) >= 10 &&
- is_power_of_2(sseu->slice_mask)) {
- /*
- * read FUSE3 for enabled L3 Bank IDs, if L3 Bank matches
- * enabled subslice, no need to redirect MCR packet
- */
- u32 slice = fls(sseu->slice_mask);
- u32 fuse3 =
- intel_uncore_read(&i915->uncore, GEN10_MIRROR_FUSE3);
- u8 ss_mask = sseu->subslice_mask[slice];
-
- u8 enabled_mask = (ss_mask | ss_mask >>
- GEN10_L3BANK_PAIR_COUNT) & GEN10_L3BANK_MASK;
- u8 disabled_mask = fuse3 & GEN10_L3BANK_MASK;
-
- /*
- * Production silicon should have matched L3Bank and
- * subslice enabled
- */
- WARN_ON((enabled_mask & disabled_mask) != enabled_mask);
- }
-
- if (INTEL_GEN(i915) >= 11)
- mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK |
- GEN11_MCR_SUBSLICE_MASK;
- else
- mcr_slice_subslice_mask = GEN8_MCR_SLICE_MASK |
- GEN8_MCR_SUBSLICE_MASK;
- /*
+ *
* WaProgramMgsrForCorrectSliceSpecificMmioReads:cnl,icl
* Before any MMIO read into slice/subslice specific registers, MCR
* packet control register needs to be programmed to point to any
@@ -819,11 +798,50 @@ wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
* are consistent across s/ss in almost all cases. In the rare
* occasions, such as INSTDONE, where this value is dependent
* on s/ss combo, the read should be done with read_subslice_reg.
+ *
+ * Since GEN8_MCR_SELECTOR contains dual-purpose bits which select both
+ * to which subslice, or to which L3 bank, the respective mmio reads
+ * will go, we have to find a common index which works for both
+ * accesses.
+ *
+ * Case where we cannot find a common index fortunately should not
+ * happen in production hardware, so we only emit a warning instead of
+ * implementing something more complex that requires checking the range
+ * of every MMIO read.
*/
- wa_write_masked_or(wal,
- GEN8_MCR_SELECTOR,
- mcr_slice_subslice_mask,
- intel_calculate_mcr_s_ss_select(i915));
+
+ if (INTEL_GEN(i915) >= 10 && is_power_of_2(sseu->slice_mask)) {
+ u32 l3_fuse =
+ intel_uncore_read(&i915->uncore, GEN10_MIRROR_FUSE3) &
+ GEN10_L3BANK_MASK;
+
+ DRM_DEBUG_DRIVER("L3 fuse = %x\n", l3_fuse);
+ l3_en = ~(l3_fuse << GEN10_L3BANK_PAIR_COUNT | l3_fuse);
+ } else {
+ l3_en = ~0;
+ }
+
+ slice = fls(sseu->slice_mask) - 1;
+ subslice = fls(l3_en & intel_sseu_get_subslices(sseu, slice));
+ if (!subslice) {
+ DRM_WARN("No common index found between subslice mask %x and L3 bank mask %x!\n",
+ intel_sseu_get_subslices(sseu, slice), l3_en);
+ subslice = fls(l3_en);
+ WARN_ON(!subslice);
+ }
+ subslice--;
+
+ if (INTEL_GEN(i915) >= 11) {
+ mcr = GEN11_MCR_SLICE(slice) | GEN11_MCR_SUBSLICE(subslice);
+ mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK;
+ } else {
+ mcr = GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
+ mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK;
+ }
+
+ DRM_DEBUG_DRIVER("MCR slice/subslice = %x\n", mcr);
+
+ wa_write_masked_or(wal, GEN8_MCR_SELECTOR, mcr_mask, mcr);
}
static void
@@ -897,12 +915,35 @@ icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
wa_write_or(wal,
GAMT_CHKN_BIT_REG,
GAMT_CHKN_DISABLE_L3_COH_PIPE);
+
+ /* Wa_1607087056:icl */
+ wa_write_or(wal,
+ SLICE_UNIT_LEVEL_CLKGATE,
+ L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
+}
+
+static void
+tgl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
+{
+ /* Wa_1409420604:tgl */
+ if (IS_TGL_REVID(i915, TGL_REVID_A0, TGL_REVID_A0))
+ wa_write_or(wal,
+ SUBSLICE_UNIT_LEVEL_CLKGATE2,
+ CPSSUNIT_CLKGATE_DIS);
+
+ /* Wa_1409180338:tgl */
+ if (IS_TGL_REVID(i915, TGL_REVID_A0, TGL_REVID_A0))
+ wa_write_or(wal,
+ SLICE_UNIT_LEVEL_CLKGATE,
+ L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
}
static void
gt_init_workarounds(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
- if (IS_GEN(i915, 11))
+ if (IS_GEN(i915, 12))
+ tgl_gt_workarounds_init(i915, wal);
+ else if (IS_GEN(i915, 11))
icl_gt_workarounds_init(i915, wal);
else if (IS_CANNONLAKE(i915))
cnl_gt_workarounds_init(i915, wal);
@@ -926,7 +967,7 @@ void intel_gt_init_workarounds(struct drm_i915_private *i915)
{
struct i915_wa_list *wal = &i915->gt_wa_list;
- wa_init_start(wal, "GT");
+ wa_init_start(wal, "GT", "global");
gt_init_workarounds(i915, wal);
wa_init_finish(wal);
}
@@ -990,9 +1031,9 @@ wa_list_apply(struct intel_uncore *uncore, const struct i915_wa_list *wal)
spin_unlock_irqrestore(&uncore->lock, flags);
}
-void intel_gt_apply_workarounds(struct drm_i915_private *i915)
+void intel_gt_apply_workarounds(struct intel_gt *gt)
{
- wa_list_apply(&i915->uncore, &i915->gt_wa_list);
+ wa_list_apply(gt->uncore, &gt->i915->gt_wa_list);
}
static bool wa_list_verify(struct intel_uncore *uncore,
@@ -1011,10 +1052,23 @@ static bool wa_list_verify(struct intel_uncore *uncore,
return ok;
}
-bool intel_gt_verify_workarounds(struct drm_i915_private *i915,
- const char *from)
+bool intel_gt_verify_workarounds(struct intel_gt *gt, const char *from)
{
- return wa_list_verify(&i915->uncore, &i915->gt_wa_list, from);
+ return wa_list_verify(gt->uncore, &gt->i915->gt_wa_list, from);
+}
+
+static inline bool is_nonpriv_flags_valid(u32 flags)
+{
+ /* Check only valid flag bits are set */
+ if (flags & ~RING_FORCE_TO_NONPRIV_MASK_VALID)
+ return false;
+
+ /* NB: Only 3 out of 4 enum values are valid for access field */
+ if ((flags & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
+ RING_FORCE_TO_NONPRIV_ACCESS_INVALID)
+ return false;
+
+ return true;
}
static void
@@ -1027,6 +1081,9 @@ whitelist_reg_ext(struct i915_wa_list *wal, i915_reg_t reg, u32 flags)
if (GEM_DEBUG_WARN_ON(wal->count >= RING_MAX_NONPRIV_SLOTS))
return;
+ if (GEM_DEBUG_WARN_ON(!is_nonpriv_flags_valid(flags)))
+ return;
+
wa.reg.reg |= flags;
_wa_add(wal, &wa);
}
@@ -1034,7 +1091,7 @@ whitelist_reg_ext(struct i915_wa_list *wal, i915_reg_t reg, u32 flags)
static void
whitelist_reg(struct i915_wa_list *wal, i915_reg_t reg)
{
- whitelist_reg_ext(wal, reg, RING_FORCE_TO_NONPRIV_RW);
+ whitelist_reg_ext(wal, reg, RING_FORCE_TO_NONPRIV_ACCESS_RW);
}
static void gen9_whitelist_build(struct i915_wa_list *w)
@@ -1047,6 +1104,9 @@ static void gen9_whitelist_build(struct i915_wa_list *w)
/* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk,cfl */
whitelist_reg(w, GEN8_HDC_CHICKEN1);
+
+ /* WaSendPushConstantsFromMMIO:skl,bxt */
+ whitelist_reg(w, COMMON_SLICE_CHICKEN2);
}
static void skl_whitelist_build(struct intel_engine_cs *engine)
@@ -1115,7 +1175,7 @@ static void cfl_whitelist_build(struct intel_engine_cs *engine)
* - PS_DEPTH_COUNT_UDW
*/
whitelist_reg_ext(w, PS_INVOCATION_COUNT,
- RING_FORCE_TO_NONPRIV_RD |
+ RING_FORCE_TO_NONPRIV_ACCESS_RD |
RING_FORCE_TO_NONPRIV_RANGE_4);
}
@@ -1155,22 +1215,46 @@ static void icl_whitelist_build(struct intel_engine_cs *engine)
* - PS_DEPTH_COUNT_UDW
*/
whitelist_reg_ext(w, PS_INVOCATION_COUNT,
- RING_FORCE_TO_NONPRIV_RD |
+ RING_FORCE_TO_NONPRIV_ACCESS_RD |
RING_FORCE_TO_NONPRIV_RANGE_4);
break;
case VIDEO_DECODE_CLASS:
/* hucStatusRegOffset */
whitelist_reg_ext(w, _MMIO(0x2000 + engine->mmio_base),
- RING_FORCE_TO_NONPRIV_RD);
+ RING_FORCE_TO_NONPRIV_ACCESS_RD);
/* hucUKernelHdrInfoRegOffset */
whitelist_reg_ext(w, _MMIO(0x2014 + engine->mmio_base),
- RING_FORCE_TO_NONPRIV_RD);
+ RING_FORCE_TO_NONPRIV_ACCESS_RD);
/* hucStatus2RegOffset */
whitelist_reg_ext(w, _MMIO(0x23B0 + engine->mmio_base),
- RING_FORCE_TO_NONPRIV_RD);
+ RING_FORCE_TO_NONPRIV_ACCESS_RD);
+ break;
+
+ default:
break;
+ }
+}
+
+static void tgl_whitelist_build(struct intel_engine_cs *engine)
+{
+ struct i915_wa_list *w = &engine->whitelist;
+ switch (engine->class) {
+ case RENDER_CLASS:
+ /*
+ * WaAllowPMDepthAndInvocationCountAccessFromUMD:tgl
+ *
+ * This covers 4 registers which are next to one another :
+ * - PS_INVOCATION_COUNT
+ * - PS_INVOCATION_COUNT_UDW
+ * - PS_DEPTH_COUNT
+ * - PS_DEPTH_COUNT_UDW
+ */
+ whitelist_reg_ext(w, PS_INVOCATION_COUNT,
+ RING_FORCE_TO_NONPRIV_ACCESS_RD |
+ RING_FORCE_TO_NONPRIV_RANGE_4);
+ break;
default:
break;
}
@@ -1181,9 +1265,11 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine)
struct drm_i915_private *i915 = engine->i915;
struct i915_wa_list *w = &engine->whitelist;
- wa_init_start(w, "whitelist");
+ wa_init_start(w, "whitelist", engine->name);
- if (IS_GEN(i915, 11))
+ if (IS_GEN(i915, 12))
+ tgl_whitelist_build(engine);
+ else if (IS_GEN(i915, 11))
icl_whitelist_build(engine);
else if (IS_CANNONLAKE(i915))
cnl_whitelist_build(engine);
@@ -1233,6 +1319,34 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = engine->i915;
+ if (IS_TGL_REVID(i915, TGL_REVID_A0, TGL_REVID_A0)) {
+ /* Wa_1606700617:tgl */
+ wa_masked_en(wal,
+ GEN9_CS_DEBUG_MODE1,
+ FF_DOP_CLOCK_GATE_DISABLE);
+
+ /* Wa_1607138336:tgl */
+ wa_write_or(wal,
+ GEN9_CTX_PREEMPT_REG,
+ GEN12_DISABLE_POSH_BUSY_FF_DOP_CG);
+
+ /* Wa_1607030317:tgl */
+ /* Wa_1607186500:tgl */
+ /* Wa_1607297627:tgl */
+ wa_masked_en(wal,
+ GEN6_RC_SLEEP_PSMI_CONTROL,
+ GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE |
+ GEN8_RC_SEMA_IDLE_MSG_DISABLE);
+
+ /*
+ * Wa_1606679103:tgl
+ * (see also Wa_1606682166:icl)
+ */
+ wa_write_or(wal,
+ GEN7_SARCHKMD,
+ GEN7_DISABLE_SAMPLER_PREFETCH);
+ }
+
if (IS_GEN(i915, 11)) {
/* This is not an Wa. Enable for better image quality */
wa_masked_en(wal,
@@ -1240,10 +1354,9 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE);
/* WaPipelineFlushCoherentLines:icl */
- ignore_wa_write_or(wal,
- GEN8_L3SQCREG4,
- GEN8_LQSC_FLUSH_COHERENT_LINES,
- GEN8_LQSC_FLUSH_COHERENT_LINES);
+ wa_write_or(wal,
+ GEN8_L3SQCREG4,
+ GEN8_LQSC_FLUSH_COHERENT_LINES);
/*
* Wa_1405543622:icl
@@ -1270,10 +1383,9 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
* Wa_1405733216:icl
* Formerly known as WaDisableCleanEvicts
*/
- ignore_wa_write_or(wal,
- GEN8_L3SQCREG4,
- GEN11_LQSC_CLEAN_EVICT_DISABLE,
- GEN11_LQSC_CLEAN_EVICT_DISABLE);
+ wa_write_or(wal,
+ GEN8_L3SQCREG4,
+ GEN11_LQSC_CLEAN_EVICT_DISABLE);
/* WaForwardProgressSoftReset:icl */
wa_write_or(wal,
@@ -1292,6 +1404,12 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
wa_write_or(wal,
GEN7_SARCHKMD,
GEN7_DISABLE_SAMPLER_PREFETCH);
+
+ /* Wa_1409178092:icl */
+ wa_write_masked_or(wal,
+ GEN11_SCRATCH2,
+ GEN11_COHERENT_PARTIAL_WRITE_MERGE_ENABLE,
+ 0);
}
if (IS_GEN_RANGE(i915, 9, 11)) {
@@ -1360,7 +1478,7 @@ engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal
if (I915_SELFTEST_ONLY(INTEL_GEN(engine->i915) < 8))
return;
- if (engine->id == RCS0)
+ if (engine->class == RENDER_CLASS)
rcs_engine_wa_init(engine, wal);
else
xcs_engine_wa_init(engine, wal);
@@ -1370,10 +1488,10 @@ void intel_engine_init_workarounds(struct intel_engine_cs *engine)
{
struct i915_wa_list *wal = &engine->wa_list;
- if (GEM_WARN_ON(INTEL_GEN(engine->i915) < 8))
+ if (INTEL_GEN(engine->i915) < 8)
return;
- wa_init_start(wal, engine->name);
+ wa_init_start(wal, "engine", engine->name);
engine_init_workarounds(engine, wal);
wa_init_finish(wal);
}
@@ -1416,26 +1534,50 @@ err_obj:
return ERR_PTR(err);
}
+static bool mcr_range(struct drm_i915_private *i915, u32 offset)
+{
+ /*
+ * Registers in this range are affected by the MCR selector
+ * which only controls CPU initiated MMIO. Routing does not
+ * work for CS access so we cannot verify them on this path.
+ */
+ if (INTEL_GEN(i915) >= 8 && (offset >= 0xb000 && offset <= 0xb4ff))
+ return true;
+
+ return false;
+}
+
static int
wa_list_srm(struct i915_request *rq,
const struct i915_wa_list *wal,
struct i915_vma *vma)
{
+ struct drm_i915_private *i915 = rq->i915;
+ unsigned int i, count = 0;
const struct i915_wa *wa;
- unsigned int i;
u32 srm, *cs;
srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
- if (INTEL_GEN(rq->i915) >= 8)
+ if (INTEL_GEN(i915) >= 8)
srm++;
- cs = intel_ring_begin(rq, 4 * wal->count);
+ for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
+ if (!mcr_range(i915, i915_mmio_reg_offset(wa->reg)))
+ count++;
+ }
+
+ cs = intel_ring_begin(rq, 4 * count);
if (IS_ERR(cs))
return PTR_ERR(cs);
for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
+ u32 offset = i915_mmio_reg_offset(wa->reg);
+
+ if (mcr_range(i915, offset))
+ continue;
+
*cs++ = srm;
- *cs++ = i915_mmio_reg_offset(wa->reg);
+ *cs++ = offset;
*cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
*cs++ = 0;
}
@@ -1458,11 +1600,13 @@ static int engine_wa_list_verify(struct intel_context *ce,
if (!wal->count)
return 0;
- vma = create_scratch(&ce->engine->i915->ggtt.vm, wal->count);
+ vma = create_scratch(&ce->engine->gt->ggtt->vm, wal->count);
if (IS_ERR(vma))
return PTR_ERR(vma);
+ intel_engine_pm_get(ce->engine);
rq = intel_context_create_request(ce);
+ intel_engine_pm_put(ce->engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_vma;
@@ -1472,25 +1616,32 @@ static int engine_wa_list_verify(struct intel_context *ce,
if (err)
goto err_vma;
+ i915_request_get(rq);
i915_request_add(rq);
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
err = -ETIME;
- goto err_vma;
+ goto err_rq;
}
results = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
if (IS_ERR(results)) {
err = PTR_ERR(results);
- goto err_vma;
+ goto err_rq;
}
err = 0;
- for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
+ for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
+ if (mcr_range(rq->i915, i915_mmio_reg_offset(wa->reg)))
+ continue;
+
if (!wa_verify(wa, results[i], wal->name, from))
err = -ENXIO;
+ }
i915_gem_object_unpin_map(vma->obj);
+err_rq:
+ i915_request_put(rq);
err_vma:
i915_vma_unpin(vma);
i915_vma_put(vma);
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.h b/drivers/gpu/drm/i915/gt/intel_workarounds.h
index 3761a6ee58bb..8c9c769c2204 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.h
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.h
@@ -14,6 +14,7 @@
struct drm_i915_private;
struct i915_request;
struct intel_engine_cs;
+struct intel_gt;
static inline void intel_wa_list_free(struct i915_wa_list *wal)
{
@@ -25,9 +26,8 @@ void intel_engine_init_ctx_wa(struct intel_engine_cs *engine);
int intel_engine_emit_ctx_wa(struct i915_request *rq);
void intel_gt_init_workarounds(struct drm_i915_private *i915);
-void intel_gt_apply_workarounds(struct drm_i915_private *i915);
-bool intel_gt_verify_workarounds(struct drm_i915_private *i915,
- const char *from);
+void intel_gt_apply_workarounds(struct intel_gt *gt);
+bool intel_gt_verify_workarounds(struct intel_gt *gt, const char *from);
void intel_engine_init_whitelist(struct intel_engine_cs *engine);
void intel_engine_apply_whitelist(struct intel_engine_cs *engine);
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds_types.h b/drivers/gpu/drm/i915/gt/intel_workarounds_types.h
index 42ac1fb99572..e27ab1b710b3 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds_types.h
@@ -20,6 +20,7 @@ struct i915_wa {
struct i915_wa_list {
const char *name;
+ const char *engine_name;
struct i915_wa *list;
unsigned int count;
unsigned int wa_count;
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index 486c6953dcb1..f2806381733f 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -23,62 +23,59 @@
*/
#include "gem/i915_gem_context.h"
+#include "gt/intel_ring.h"
#include "i915_drv.h"
#include "intel_context.h"
#include "intel_engine_pm.h"
+#include "intel_engine_pool.h"
#include "mock_engine.h"
#include "selftests/mock_request.h"
-struct mock_ring {
- struct intel_ring base;
- struct i915_timeline timeline;
-};
-
-static void mock_timeline_pin(struct i915_timeline *tl)
+static void mock_timeline_pin(struct intel_timeline *tl)
{
- tl->pin_count++;
+ atomic_inc(&tl->pin_count);
}
-static void mock_timeline_unpin(struct i915_timeline *tl)
+static void mock_timeline_unpin(struct intel_timeline *tl)
{
- GEM_BUG_ON(!tl->pin_count);
- tl->pin_count--;
+ GEM_BUG_ON(!atomic_read(&tl->pin_count));
+ atomic_dec(&tl->pin_count);
}
static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
{
const unsigned long sz = PAGE_SIZE / 2;
- struct mock_ring *ring;
+ struct intel_ring *ring;
ring = kzalloc(sizeof(*ring) + sz, GFP_KERNEL);
if (!ring)
return NULL;
- if (i915_timeline_init(engine->i915, &ring->timeline, NULL)) {
+ kref_init(&ring->ref);
+ ring->size = sz;
+ ring->effective_size = sz;
+ ring->vaddr = (void *)(ring + 1);
+ atomic_set(&ring->pin_count, 1);
+
+ ring->vma = i915_vma_alloc();
+ if (!ring->vma) {
kfree(ring);
return NULL;
}
+ i915_active_init(&ring->vma->active, NULL, NULL);
- kref_init(&ring->base.ref);
- ring->base.size = sz;
- ring->base.effective_size = sz;
- ring->base.vaddr = (void *)(ring + 1);
- ring->base.timeline = &ring->timeline;
- atomic_set(&ring->base.pin_count, 1);
-
- INIT_LIST_HEAD(&ring->base.request_list);
- intel_ring_update_space(&ring->base);
+ intel_ring_update_space(ring);
- return &ring->base;
+ return ring;
}
-static void mock_ring_free(struct intel_ring *base)
+static void mock_ring_free(struct intel_ring *ring)
{
- struct mock_ring *ring = container_of(base, typeof(*ring), base);
+ i915_active_fini(&ring->vma->active);
+ i915_vma_free(ring->vma);
- i915_timeline_fini(&ring->timeline);
kfree(ring);
}
@@ -95,7 +92,7 @@ static void advance(struct i915_request *request)
i915_request_mark_complete(request);
GEM_BUG_ON(!i915_request_completed(request));
- intel_engine_queue_breadcrumbs(request->engine);
+ intel_engine_signal_breadcrumbs(request->engine);
}
static void hw_delay_complete(struct timer_list *t)
@@ -130,7 +127,6 @@ static void hw_delay_complete(struct timer_list *t)
static void mock_context_unpin(struct intel_context *ce)
{
- mock_timeline_unpin(ce->ring->timeline);
}
static void mock_context_destroy(struct kref *ref)
@@ -139,37 +135,52 @@ static void mock_context_destroy(struct kref *ref)
GEM_BUG_ON(intel_context_is_pinned(ce));
- if (ce->ring)
+ if (test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
mock_ring_free(ce->ring);
+ mock_timeline_unpin(ce->timeline);
+ }
+ intel_context_fini(ce);
intel_context_free(ce);
}
-static int mock_context_pin(struct intel_context *ce)
+static int mock_context_alloc(struct intel_context *ce)
{
- int ret;
-
- if (!ce->ring) {
- ce->ring = mock_ring(ce->engine);
- if (!ce->ring)
- return -ENOMEM;
+ ce->ring = mock_ring(ce->engine);
+ if (!ce->ring)
+ return -ENOMEM;
+
+ GEM_BUG_ON(ce->timeline);
+ ce->timeline = intel_timeline_create(ce->engine->gt, NULL);
+ if (IS_ERR(ce->timeline)) {
+ kfree(ce->engine);
+ return PTR_ERR(ce->timeline);
}
- ret = intel_context_active_acquire(ce, PIN_HIGH);
- if (ret)
- return ret;
+ mock_timeline_pin(ce->timeline);
- mock_timeline_pin(ce->ring->timeline);
return 0;
}
+static int mock_context_pin(struct intel_context *ce)
+{
+ return 0;
+}
+
+static void mock_context_reset(struct intel_context *ce)
+{
+}
+
static const struct intel_context_ops mock_context_ops = {
+ .alloc = mock_context_alloc,
+
.pin = mock_context_pin,
.unpin = mock_context_unpin,
.enter = intel_context_enter_engine,
.exit = intel_context_exit_engine,
+ .reset = mock_context_reset,
.destroy = mock_context_destroy,
};
@@ -216,16 +227,12 @@ static void mock_reset_prepare(struct intel_engine_cs *engine)
{
}
-static void mock_reset(struct intel_engine_cs *engine, bool stalled)
+static void mock_reset_rewind(struct intel_engine_cs *engine, bool stalled)
{
GEM_BUG_ON(stalled);
}
-static void mock_reset_finish(struct intel_engine_cs *engine)
-{
-}
-
-static void mock_cancel_requests(struct intel_engine_cs *engine)
+static void mock_reset_cancel(struct intel_engine_cs *engine)
{
struct i915_request *request;
unsigned long flags;
@@ -243,6 +250,24 @@ static void mock_cancel_requests(struct intel_engine_cs *engine)
spin_unlock_irqrestore(&engine->active.lock, flags);
}
+static void mock_reset_finish(struct intel_engine_cs *engine)
+{
+}
+
+static void mock_engine_release(struct intel_engine_cs *engine)
+{
+ struct mock_engine *mock =
+ container_of(engine, typeof(*mock), base);
+
+ GEM_BUG_ON(timer_pending(&mock->hw_delay));
+
+ intel_context_unpin(engine->kernel_context);
+ intel_context_put(engine->kernel_context);
+
+ intel_engine_fini_retire(engine);
+ intel_engine_fini_breadcrumbs(engine);
+}
+
struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
const char *name,
int id)
@@ -250,6 +275,7 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
struct mock_engine *engine;
GEM_BUG_ON(id >= I915_NUM_ENGINES);
+ GEM_BUG_ON(!i915->gt.uncore);
engine = kzalloc(sizeof(*engine) + PAGE_SIZE, GFP_KERNEL);
if (!engine)
@@ -257,9 +283,13 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
/* minimal engine setup for requests */
engine->base.i915 = i915;
+ engine->base.gt = &i915->gt;
+ engine->base.uncore = i915->gt.uncore;
snprintf(engine->base.name, sizeof(engine->base.name), "%s", name);
engine->base.id = id;
engine->base.mask = BIT(id);
+ engine->base.legacy_idx = INVALID_ENGINE;
+ engine->base.instance = id;
engine->base.status_page.addr = (void *)(engine + 1);
engine->base.cops = &mock_context_ops;
@@ -269,38 +299,41 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
engine->base.submit_request = mock_submit_request;
engine->base.reset.prepare = mock_reset_prepare;
- engine->base.reset.reset = mock_reset;
+ engine->base.reset.rewind = mock_reset_rewind;
+ engine->base.reset.cancel = mock_reset_cancel;
engine->base.reset.finish = mock_reset_finish;
- engine->base.cancel_requests = mock_cancel_requests;
+
+ engine->base.release = mock_engine_release;
+
+ i915->gt.engine[id] = &engine->base;
+ i915->gt.engine_class[0][id] = &engine->base;
/* fake hw queue */
spin_lock_init(&engine->hw_lock);
timer_setup(&engine->hw_delay, hw_delay_complete, 0);
INIT_LIST_HEAD(&engine->hw_queue);
+ intel_engine_add_user(&engine->base);
+
return &engine->base;
}
int mock_engine_init(struct intel_engine_cs *engine)
{
- struct drm_i915_private *i915 = engine->i915;
- int err;
+ struct intel_context *ce;
intel_engine_init_active(engine, ENGINE_MOCK);
intel_engine_init_breadcrumbs(engine);
intel_engine_init_execlists(engine);
intel_engine_init__pm(engine);
+ intel_engine_init_retire(engine);
+ intel_engine_pool_init(&engine->pool);
- engine->kernel_context =
- i915_gem_context_get_engine(i915->kernel_context, engine->id);
- if (IS_ERR(engine->kernel_context))
- goto err_breadcrumbs;
-
- err = intel_context_pin(engine->kernel_context);
- intel_context_put(engine->kernel_context);
- if (err)
+ ce = create_kernel_context(engine);
+ if (IS_ERR(ce))
goto err_breadcrumbs;
+ engine->kernel_context = ce;
return 0;
err_breadcrumbs:
@@ -325,17 +358,3 @@ void mock_engine_flush(struct intel_engine_cs *engine)
void mock_engine_reset(struct intel_engine_cs *engine)
{
}
-
-void mock_engine_free(struct intel_engine_cs *engine)
-{
- struct mock_engine *mock =
- container_of(engine, typeof(*mock), base);
-
- GEM_BUG_ON(timer_pending(&mock->hw_delay));
-
- intel_context_unpin(engine->kernel_context);
-
- intel_engine_fini_breadcrumbs(engine);
-
- kfree(engine);
-}
diff --git a/drivers/gpu/drm/i915/gt/selftest_context.c b/drivers/gpu/drm/i915/gt/selftest_context.c
new file mode 100644
index 000000000000..e874dfaa5316
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_context.c
@@ -0,0 +1,443 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_selftest.h"
+#include "intel_engine_heartbeat.h"
+#include "intel_engine_pm.h"
+#include "intel_gt.h"
+
+#include "gem/selftests/mock_context.h"
+#include "selftests/igt_flush_test.h"
+#include "selftests/mock_drm.h"
+
+static int request_sync(struct i915_request *rq)
+{
+ struct intel_timeline *tl = i915_request_timeline(rq);
+ long timeout;
+ int err = 0;
+
+ intel_timeline_get(tl);
+ i915_request_get(rq);
+
+ /* Opencode i915_request_add() so we can keep the timeline locked. */
+ __i915_request_commit(rq);
+ __i915_request_queue(rq, NULL);
+
+ timeout = i915_request_wait(rq, 0, HZ / 10);
+ if (timeout < 0)
+ err = timeout;
+ else
+ i915_request_retire_upto(rq);
+
+ lockdep_unpin_lock(&tl->mutex, rq->cookie);
+ mutex_unlock(&tl->mutex);
+
+ i915_request_put(rq);
+ intel_timeline_put(tl);
+
+ return err;
+}
+
+static int context_sync(struct intel_context *ce)
+{
+ struct intel_timeline *tl = ce->timeline;
+ int err = 0;
+
+ mutex_lock(&tl->mutex);
+ do {
+ struct i915_request *rq;
+ long timeout;
+
+ if (list_empty(&tl->requests))
+ break;
+
+ rq = list_last_entry(&tl->requests, typeof(*rq), link);
+ i915_request_get(rq);
+
+ timeout = i915_request_wait(rq, 0, HZ / 10);
+ if (timeout < 0)
+ err = timeout;
+ else
+ i915_request_retire_upto(rq);
+
+ i915_request_put(rq);
+ } while (!err);
+ mutex_unlock(&tl->mutex);
+
+ return err;
+}
+
+static int __live_context_size(struct intel_engine_cs *engine)
+{
+ struct intel_context *ce;
+ struct i915_request *rq;
+ void *vaddr;
+ int err;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ err = intel_context_pin(ce);
+ if (err)
+ goto err;
+
+ vaddr = i915_gem_object_pin_map(ce->state->obj,
+ i915_coherent_map_type(engine->i915));
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ intel_context_unpin(ce);
+ goto err;
+ }
+
+ /*
+ * Note that execlists also applies a redzone which it checks on
+ * context unpin when debugging. We are using the same location
+ * and same poison value so that our checks overlap. Despite the
+ * redundancy, we want to keep this little selftest so that we
+ * get coverage of any and all submission backends, and we can
+ * always extend this test to ensure we trick the HW into a
+ * compromising position wrt to the various sections that need
+ * to be written into the context state.
+ *
+ * TLDR; this overlaps with the execlists redzone.
+ */
+ vaddr += engine->context_size - I915_GTT_PAGE_SIZE;
+ memset(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE);
+
+ rq = intel_context_create_request(ce);
+ intel_context_unpin(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_unpin;
+ }
+
+ err = request_sync(rq);
+ if (err)
+ goto err_unpin;
+
+ /* Force the context switch */
+ rq = intel_engine_create_kernel_request(engine);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_unpin;
+ }
+ err = request_sync(rq);
+ if (err)
+ goto err_unpin;
+
+ if (memchr_inv(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE)) {
+ pr_err("%s context overwrote trailing red-zone!", engine->name);
+ err = -EINVAL;
+ }
+
+err_unpin:
+ i915_gem_object_unpin_map(ce->state->obj);
+err:
+ intel_context_put(ce);
+ return err;
+}
+
+static int live_context_size(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * Check that our context sizes are correct by seeing if the
+ * HW tries to write past the end of one.
+ */
+
+ for_each_engine(engine, gt, id) {
+ struct {
+ struct drm_i915_gem_object *state;
+ void *pinned;
+ } saved;
+
+ if (!engine->context_size)
+ continue;
+
+ intel_engine_pm_get(engine);
+
+ /*
+ * Hide the old default state -- we lie about the context size
+ * and get confused when the default state is smaller than
+ * expected. For our do nothing request, inheriting the
+ * active state is sufficient, we are only checking that we
+ * don't use more than we planned.
+ */
+ saved.state = fetch_and_zero(&engine->default_state);
+ saved.pinned = fetch_and_zero(&engine->pinned_default_state);
+
+ /* Overlaps with the execlists redzone */
+ engine->context_size += I915_GTT_PAGE_SIZE;
+
+ err = __live_context_size(engine);
+
+ engine->context_size -= I915_GTT_PAGE_SIZE;
+
+ engine->pinned_default_state = saved.pinned;
+ engine->default_state = saved.state;
+
+ intel_engine_pm_put(engine);
+
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+static int __live_active_context(struct intel_engine_cs *engine)
+{
+ unsigned long saved_heartbeat;
+ struct intel_context *ce;
+ int pass;
+ int err;
+
+ /*
+ * We keep active contexts alive until after a subsequent context
+ * switch as the final write from the context-save will be after
+ * we retire the final request. We track when we unpin the context,
+ * under the presumption that the final pin is from the last request,
+ * and instead of immediately unpinning the context, we add a task
+ * to unpin the context from the next idle-barrier.
+ *
+ * This test makes sure that the context is kept alive until a
+ * subsequent idle-barrier (emitted when the engine wakeref hits 0
+ * with no more outstanding requests).
+ */
+
+ if (intel_engine_pm_is_awake(engine)) {
+ pr_err("%s is awake before starting %s!\n",
+ engine->name, __func__);
+ return -EINVAL;
+ }
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ saved_heartbeat = engine->props.heartbeat_interval_ms;
+ engine->props.heartbeat_interval_ms = 0;
+
+ for (pass = 0; pass <= 2; pass++) {
+ struct i915_request *rq;
+
+ intel_engine_pm_get(engine);
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_engine;
+ }
+
+ err = request_sync(rq);
+ if (err)
+ goto out_engine;
+
+ /* Context will be kept active until after an idle-barrier. */
+ if (i915_active_is_idle(&ce->active)) {
+ pr_err("context is not active; expected idle-barrier (%s pass %d)\n",
+ engine->name, pass);
+ err = -EINVAL;
+ goto out_engine;
+ }
+
+ if (!intel_engine_pm_is_awake(engine)) {
+ pr_err("%s is asleep before idle-barrier\n",
+ engine->name);
+ err = -EINVAL;
+ goto out_engine;
+ }
+
+out_engine:
+ intel_engine_pm_put(engine);
+ if (err)
+ goto err;
+ }
+
+ /* Now make sure our idle-barriers are flushed */
+ err = intel_engine_flush_barriers(engine);
+ if (err)
+ goto err;
+
+ /* Wait for the barrier and in the process wait for engine to park */
+ err = context_sync(engine->kernel_context);
+ if (err)
+ goto err;
+
+ if (!i915_active_is_idle(&ce->active)) {
+ pr_err("context is still active!");
+ err = -EINVAL;
+ }
+
+ intel_engine_pm_flush(engine);
+
+ if (intel_engine_pm_is_awake(engine)) {
+ struct drm_printer p = drm_debug_printer(__func__);
+
+ intel_engine_dump(engine, &p,
+ "%s is still awake:%d after idle-barriers\n",
+ engine->name,
+ atomic_read(&engine->wakeref.count));
+ GEM_TRACE_DUMP();
+
+ err = -EINVAL;
+ goto err;
+ }
+
+err:
+ engine->props.heartbeat_interval_ms = saved_heartbeat;
+ intel_context_put(ce);
+ return err;
+}
+
+static int live_active_context(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ for_each_engine(engine, gt, id) {
+ err = __live_active_context(engine);
+ if (err)
+ break;
+
+ err = igt_flush_test(gt->i915);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+static int __remote_sync(struct intel_context *ce, struct intel_context *remote)
+{
+ struct i915_request *rq;
+ int err;
+
+ err = intel_context_pin(remote);
+ if (err)
+ return err;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto unpin;
+ }
+
+ err = intel_context_prepare_remote_request(remote, rq);
+ if (err) {
+ i915_request_add(rq);
+ goto unpin;
+ }
+
+ err = request_sync(rq);
+
+unpin:
+ intel_context_unpin(remote);
+ return err;
+}
+
+static int __live_remote_context(struct intel_engine_cs *engine)
+{
+ struct intel_context *local, *remote;
+ unsigned long saved_heartbeat;
+ int pass;
+ int err;
+
+ /*
+ * Check that our idle barriers do not interfere with normal
+ * activity tracking. In particular, check that operating
+ * on the context image remotely (intel_context_prepare_remote_request),
+ * which inserts foreign fences into intel_context.active, does not
+ * clobber the idle-barrier.
+ */
+
+ if (intel_engine_pm_is_awake(engine)) {
+ pr_err("%s is awake before starting %s!\n",
+ engine->name, __func__);
+ return -EINVAL;
+ }
+
+ remote = intel_context_create(engine);
+ if (IS_ERR(remote))
+ return PTR_ERR(remote);
+
+ local = intel_context_create(engine);
+ if (IS_ERR(local)) {
+ err = PTR_ERR(local);
+ goto err_remote;
+ }
+
+ saved_heartbeat = engine->props.heartbeat_interval_ms;
+ engine->props.heartbeat_interval_ms = 0;
+ intel_engine_pm_get(engine);
+
+ for (pass = 0; pass <= 2; pass++) {
+ err = __remote_sync(local, remote);
+ if (err)
+ break;
+
+ err = __remote_sync(engine->kernel_context, remote);
+ if (err)
+ break;
+
+ if (i915_active_is_idle(&remote->active)) {
+ pr_err("remote context is not active; expected idle-barrier (%s pass %d)\n",
+ engine->name, pass);
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ intel_engine_pm_put(engine);
+ engine->props.heartbeat_interval_ms = saved_heartbeat;
+
+ intel_context_put(local);
+err_remote:
+ intel_context_put(remote);
+ return err;
+}
+
+static int live_remote_context(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ for_each_engine(engine, gt, id) {
+ err = __live_remote_context(engine);
+ if (err)
+ break;
+
+ err = igt_flush_test(gt->i915);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+int intel_context_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_context_size),
+ SUBTEST(live_active_context),
+ SUBTEST(live_remote_context),
+ };
+ struct intel_gt *gt = &i915->gt;
+
+ if (intel_gt_is_wedged(gt))
+ return 0;
+
+ return intel_gt_live_subtests(tests, gt);
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine.c b/drivers/gpu/drm/i915/gt/selftest_engine.c
new file mode 100644
index 000000000000..f65b118e261d
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_engine.c
@@ -0,0 +1,28 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include "i915_selftest.h"
+#include "selftest_engine.h"
+
+int intel_engine_live_selftests(struct drm_i915_private *i915)
+{
+ static int (* const tests[])(struct intel_gt *) = {
+ live_engine_pm_selftests,
+ NULL,
+ };
+ struct intel_gt *gt = &i915->gt;
+ typeof(*tests) *fn;
+
+ for (fn = tests; *fn; fn++) {
+ int err;
+
+ err = (*fn)(gt);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine.h b/drivers/gpu/drm/i915/gt/selftest_engine.h
new file mode 100644
index 000000000000..ab32d09ec5a1
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_engine.h
@@ -0,0 +1,14 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef SELFTEST_ENGINE_H
+#define SELFTEST_ENGINE_H
+
+struct intel_gt;
+
+int live_engine_pm_selftests(struct intel_gt *gt);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
index cfaa6b296835..f88e445a1cae 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
@@ -4,7 +4,365 @@
* Copyright © 2018 Intel Corporation
*/
-#include "../i915_selftest.h"
+#include <linux/sort.h>
+
+#include "intel_gt_pm.h"
+#include "intel_rps.h"
+
+#include "i915_selftest.h"
+#include "selftests/igt_flush_test.h"
+
+#define COUNT 5
+
+static int cmp_u32(const void *A, const void *B)
+{
+ const u32 *a = A, *b = B;
+
+ return *a - *b;
+}
+
+static void perf_begin(struct intel_gt *gt)
+{
+ intel_gt_pm_get(gt);
+
+ /* Boost gpufreq to max [waitboost] and keep it fixed */
+ atomic_inc(&gt->rps.num_waiters);
+ schedule_work(&gt->rps.work);
+ flush_work(&gt->rps.work);
+}
+
+static int perf_end(struct intel_gt *gt)
+{
+ atomic_dec(&gt->rps.num_waiters);
+ intel_gt_pm_put(gt);
+
+ return igt_flush_test(gt->i915);
+}
+
+static int write_timestamp(struct i915_request *rq, int slot)
+{
+ u32 cmd;
+ u32 *cs;
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ cmd = MI_STORE_REGISTER_MEM | MI_USE_GGTT;
+ if (INTEL_GEN(rq->i915) >= 8)
+ cmd++;
+ *cs++ = cmd;
+ *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(rq->engine->mmio_base));
+ *cs++ = i915_request_timeline(rq)->hwsp_offset + slot * sizeof(u32);
+ *cs++ = 0;
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static struct i915_vma *create_empty_batch(struct intel_context *ce)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ u32 *cs;
+ int err;
+
+ obj = i915_gem_object_create_internal(ce->engine->i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ cs = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_put;
+ }
+
+ cs[0] = MI_BATCH_BUFFER_END;
+
+ i915_gem_object_flush_map(obj);
+
+ vma = i915_vma_instance(obj, ce->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_unpin;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto err_unpin;
+
+ i915_gem_object_unpin_map(obj);
+ return vma;
+
+err_unpin:
+ i915_gem_object_unpin_map(obj);
+err_put:
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+}
+
+static u32 trifilter(u32 *a)
+{
+ u64 sum;
+
+ sort(a, COUNT, sizeof(*a), cmp_u32, NULL);
+
+ sum = mul_u32_u32(a[2], 2);
+ sum += a[1];
+ sum += a[3];
+
+ return sum >> 2;
+}
+
+static int perf_mi_bb_start(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ if (INTEL_GEN(gt->i915) < 7) /* for per-engine CS_TIMESTAMP */
+ return 0;
+
+ perf_begin(gt);
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce = engine->kernel_context;
+ struct i915_vma *batch;
+ u32 cycles[COUNT];
+ int i;
+
+ intel_engine_pm_get(engine);
+
+ batch = create_empty_batch(ce);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ intel_engine_pm_put(engine);
+ break;
+ }
+
+ err = i915_vma_sync(batch);
+ if (err) {
+ intel_engine_pm_put(engine);
+ i915_vma_put(batch);
+ break;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(cycles); i++) {
+ struct i915_request *rq;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ err = write_timestamp(rq, 2);
+ if (err)
+ goto out;
+
+ err = rq->engine->emit_bb_start(rq,
+ batch->node.start, 8,
+ 0);
+ if (err)
+ goto out;
+
+ err = write_timestamp(rq, 3);
+ if (err)
+ goto out;
+
+out:
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0)
+ err = -EIO;
+ i915_request_put(rq);
+ if (err)
+ break;
+
+ cycles[i] = rq->hwsp_seqno[3] - rq->hwsp_seqno[2];
+ }
+ i915_vma_put(batch);
+ intel_engine_pm_put(engine);
+ if (err)
+ break;
+
+ pr_info("%s: MI_BB_START cycles: %u\n",
+ engine->name, trifilter(cycles));
+ }
+ if (perf_end(gt))
+ err = -EIO;
+
+ return err;
+}
+
+static struct i915_vma *create_nop_batch(struct intel_context *ce)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ u32 *cs;
+ int err;
+
+ obj = i915_gem_object_create_internal(ce->engine->i915, SZ_64K);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ cs = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_put;
+ }
+
+ memset(cs, 0, SZ_64K);
+ cs[SZ_64K / sizeof(*cs) - 1] = MI_BATCH_BUFFER_END;
+
+ i915_gem_object_flush_map(obj);
+
+ vma = i915_vma_instance(obj, ce->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_unpin;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto err_unpin;
+
+ i915_gem_object_unpin_map(obj);
+ return vma;
+
+err_unpin:
+ i915_gem_object_unpin_map(obj);
+err_put:
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+}
+
+static int perf_mi_noop(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ if (INTEL_GEN(gt->i915) < 7) /* for per-engine CS_TIMESTAMP */
+ return 0;
+
+ perf_begin(gt);
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce = engine->kernel_context;
+ struct i915_vma *base, *nop;
+ u32 cycles[COUNT];
+ int i;
+
+ intel_engine_pm_get(engine);
+
+ base = create_empty_batch(ce);
+ if (IS_ERR(base)) {
+ err = PTR_ERR(base);
+ intel_engine_pm_put(engine);
+ break;
+ }
+
+ err = i915_vma_sync(base);
+ if (err) {
+ i915_vma_put(base);
+ intel_engine_pm_put(engine);
+ break;
+ }
+
+ nop = create_nop_batch(ce);
+ if (IS_ERR(nop)) {
+ err = PTR_ERR(nop);
+ i915_vma_put(base);
+ intel_engine_pm_put(engine);
+ break;
+ }
+
+ err = i915_vma_sync(nop);
+ if (err) {
+ i915_vma_put(nop);
+ i915_vma_put(base);
+ intel_engine_pm_put(engine);
+ break;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(cycles); i++) {
+ struct i915_request *rq;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ err = write_timestamp(rq, 2);
+ if (err)
+ goto out;
+
+ err = rq->engine->emit_bb_start(rq,
+ base->node.start, 8,
+ 0);
+ if (err)
+ goto out;
+
+ err = write_timestamp(rq, 3);
+ if (err)
+ goto out;
+
+ err = rq->engine->emit_bb_start(rq,
+ nop->node.start,
+ nop->node.size,
+ 0);
+ if (err)
+ goto out;
+
+ err = write_timestamp(rq, 4);
+ if (err)
+ goto out;
+
+out:
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0)
+ err = -EIO;
+ i915_request_put(rq);
+ if (err)
+ break;
+
+ cycles[i] =
+ (rq->hwsp_seqno[4] - rq->hwsp_seqno[3]) -
+ (rq->hwsp_seqno[3] - rq->hwsp_seqno[2]);
+ }
+ i915_vma_put(nop);
+ i915_vma_put(base);
+ intel_engine_pm_put(engine);
+ if (err)
+ break;
+
+ pr_info("%s: 16K MI_NOOP cycles: %u\n",
+ engine->name, trifilter(cycles));
+ }
+ if (perf_end(gt))
+ err = -EIO;
+
+ return err;
+}
+
+int intel_engine_cs_perf_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(perf_mi_bb_start),
+ SUBTEST(perf_mi_noop),
+ };
+
+ if (intel_gt_is_wedged(&i915->gt))
+ return 0;
+
+ return intel_gt_live_subtests(tests, &i915->gt);
+}
static int intel_mmio_bases_check(void *arg)
{
@@ -12,19 +370,18 @@ static int intel_mmio_bases_check(void *arg)
for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
const struct engine_info *info = &intel_engines[i];
- char name[INTEL_ENGINE_CS_MAX_NAME];
u8 prev = U8_MAX;
- __sprint_engine_name(name, info);
-
for (j = 0; j < MAX_MMIO_BASES; j++) {
u8 gen = info->mmio_bases[j].gen;
u32 base = info->mmio_bases[j].base;
if (gen >= prev) {
- pr_err("%s: %s: mmio base for gen %x "
- "is before the one for gen %x\n",
- __func__, name, prev, gen);
+ pr_err("%s(%s, class:%d, instance:%d): mmio base for gen %x is before the one for gen %x\n",
+ __func__,
+ intel_engine_class_repr(info->class),
+ info->class, info->instance,
+ prev, gen);
return -EINVAL;
}
@@ -32,17 +389,22 @@ static int intel_mmio_bases_check(void *arg)
break;
if (!base) {
- pr_err("%s: %s: invalid mmio base (%x) "
- "for gen %x at entry %u\n",
- __func__, name, base, gen, j);
+ pr_err("%s(%s, class:%d, instance:%d): invalid mmio base (%x) for gen %x at entry %u\n",
+ __func__,
+ intel_engine_class_repr(info->class),
+ info->class, info->instance,
+ base, gen, j);
return -EINVAL;
}
prev = gen;
}
- pr_info("%s: min gen supported for %s = %d\n",
- __func__, name, prev);
+ pr_debug("%s: min gen supported for %s%d is %d\n",
+ __func__,
+ intel_engine_class_repr(info->class),
+ info->instance,
+ prev);
}
return 0;
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
new file mode 100644
index 000000000000..43d4d589749f
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
@@ -0,0 +1,374 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include <linux/sort.h>
+
+#include "i915_drv.h"
+
+#include "intel_gt_requests.h"
+#include "i915_selftest.h"
+
+static int timeline_sync(struct intel_timeline *tl)
+{
+ struct dma_fence *fence;
+ long timeout;
+
+ fence = i915_active_fence_get(&tl->last_request);
+ if (!fence)
+ return 0;
+
+ timeout = dma_fence_wait_timeout(fence, true, HZ / 2);
+ dma_fence_put(fence);
+ if (timeout < 0)
+ return timeout;
+
+ return 0;
+}
+
+static int engine_sync_barrier(struct intel_engine_cs *engine)
+{
+ return timeline_sync(engine->kernel_context->timeline);
+}
+
+struct pulse {
+ struct i915_active active;
+ struct kref kref;
+};
+
+static int pulse_active(struct i915_active *active)
+{
+ kref_get(&container_of(active, struct pulse, active)->kref);
+ return 0;
+}
+
+static void pulse_free(struct kref *kref)
+{
+ kfree(container_of(kref, struct pulse, kref));
+}
+
+static void pulse_put(struct pulse *p)
+{
+ kref_put(&p->kref, pulse_free);
+}
+
+static void pulse_retire(struct i915_active *active)
+{
+ pulse_put(container_of(active, struct pulse, active));
+}
+
+static struct pulse *pulse_create(void)
+{
+ struct pulse *p;
+
+ p = kmalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return p;
+
+ kref_init(&p->kref);
+ i915_active_init(&p->active, pulse_active, pulse_retire);
+
+ return p;
+}
+
+static void pulse_unlock_wait(struct pulse *p)
+{
+ i915_active_unlock_wait(&p->active);
+}
+
+static int __live_idle_pulse(struct intel_engine_cs *engine,
+ int (*fn)(struct intel_engine_cs *cs))
+{
+ struct pulse *p;
+ int err;
+
+ GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
+
+ p = pulse_create();
+ if (!p)
+ return -ENOMEM;
+
+ err = i915_active_acquire(&p->active);
+ if (err)
+ goto out;
+
+ err = i915_active_acquire_preallocate_barrier(&p->active, engine);
+ if (err) {
+ i915_active_release(&p->active);
+ goto out;
+ }
+
+ i915_active_acquire_barrier(&p->active);
+ i915_active_release(&p->active);
+
+ GEM_BUG_ON(i915_active_is_idle(&p->active));
+ GEM_BUG_ON(llist_empty(&engine->barrier_tasks));
+
+ err = fn(engine);
+ if (err)
+ goto out;
+
+ GEM_BUG_ON(!llist_empty(&engine->barrier_tasks));
+
+ if (engine_sync_barrier(engine)) {
+ struct drm_printer m = drm_err_printer("pulse");
+
+ pr_err("%s: no heartbeat pulse?\n", engine->name);
+ intel_engine_dump(engine, &m, "%s", engine->name);
+
+ err = -ETIME;
+ goto out;
+ }
+
+ GEM_BUG_ON(READ_ONCE(engine->serial) != engine->wakeref_serial);
+
+ pulse_unlock_wait(p); /* synchronize with the retirement callback */
+
+ if (!i915_active_is_idle(&p->active)) {
+ struct drm_printer m = drm_err_printer("pulse");
+
+ pr_err("%s: heartbeat pulse did not flush idle tasks\n",
+ engine->name);
+ i915_active_print(&p->active, &m);
+
+ err = -EINVAL;
+ goto out;
+ }
+
+out:
+ pulse_put(p);
+ return err;
+}
+
+static int live_idle_flush(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /* Check that we can flush the idle barriers */
+
+ for_each_engine(engine, gt, id) {
+ intel_engine_pm_get(engine);
+ err = __live_idle_pulse(engine, intel_engine_flush_barriers);
+ intel_engine_pm_put(engine);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+static int live_idle_pulse(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /* Check that heartbeat pulses flush the idle barriers */
+
+ for_each_engine(engine, gt, id) {
+ intel_engine_pm_get(engine);
+ err = __live_idle_pulse(engine, intel_engine_pulse);
+ intel_engine_pm_put(engine);
+ if (err && err != -ENODEV)
+ break;
+
+ err = 0;
+ }
+
+ return err;
+}
+
+static int cmp_u32(const void *_a, const void *_b)
+{
+ const u32 *a = _a, *b = _b;
+
+ return *a - *b;
+}
+
+static int __live_heartbeat_fast(struct intel_engine_cs *engine)
+{
+ struct intel_context *ce;
+ struct i915_request *rq;
+ ktime_t t0, t1;
+ u32 times[5];
+ int err;
+ int i;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ intel_engine_pm_get(engine);
+
+ err = intel_engine_set_heartbeat(engine, 1);
+ if (err)
+ goto err_pm;
+
+ for (i = 0; i < ARRAY_SIZE(times); i++) {
+ /* Manufacture a tick */
+ do {
+ while (READ_ONCE(engine->heartbeat.systole))
+ flush_delayed_work(&engine->heartbeat.work);
+
+ engine->serial++; /* quick, pretend we are not idle! */
+ flush_delayed_work(&engine->heartbeat.work);
+ if (!delayed_work_pending(&engine->heartbeat.work)) {
+ pr_err("%s: heartbeat did not start\n",
+ engine->name);
+ err = -EINVAL;
+ goto err_pm;
+ }
+
+ rcu_read_lock();
+ rq = READ_ONCE(engine->heartbeat.systole);
+ if (rq)
+ rq = i915_request_get_rcu(rq);
+ rcu_read_unlock();
+ } while (!rq);
+
+ t0 = ktime_get();
+ while (rq == READ_ONCE(engine->heartbeat.systole))
+ yield(); /* work is on the local cpu! */
+ t1 = ktime_get();
+
+ i915_request_put(rq);
+ times[i] = ktime_us_delta(t1, t0);
+ }
+
+ sort(times, ARRAY_SIZE(times), sizeof(times[0]), cmp_u32, NULL);
+
+ pr_info("%s: Heartbeat delay: %uus [%u, %u]\n",
+ engine->name,
+ times[ARRAY_SIZE(times) / 2],
+ times[0],
+ times[ARRAY_SIZE(times) - 1]);
+
+ /* Min work delay is 2 * 2 (worst), +1 for scheduling, +1 for slack */
+ if (times[ARRAY_SIZE(times) / 2] > jiffies_to_usecs(6)) {
+ pr_err("%s: Heartbeat delay was %uus, expected less than %dus\n",
+ engine->name,
+ times[ARRAY_SIZE(times) / 2],
+ jiffies_to_usecs(6));
+ err = -EINVAL;
+ }
+
+ intel_engine_set_heartbeat(engine, CONFIG_DRM_I915_HEARTBEAT_INTERVAL);
+err_pm:
+ intel_engine_pm_put(engine);
+ intel_context_put(ce);
+ return err;
+}
+
+static int live_heartbeat_fast(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /* Check that the heartbeat ticks at the desired rate. */
+ if (!CONFIG_DRM_I915_HEARTBEAT_INTERVAL)
+ return 0;
+
+ for_each_engine(engine, gt, id) {
+ err = __live_heartbeat_fast(engine);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+static int __live_heartbeat_off(struct intel_engine_cs *engine)
+{
+ int err;
+
+ intel_engine_pm_get(engine);
+
+ engine->serial++;
+ flush_delayed_work(&engine->heartbeat.work);
+ if (!delayed_work_pending(&engine->heartbeat.work)) {
+ pr_err("%s: heartbeat not running\n",
+ engine->name);
+ err = -EINVAL;
+ goto err_pm;
+ }
+
+ err = intel_engine_set_heartbeat(engine, 0);
+ if (err)
+ goto err_pm;
+
+ engine->serial++;
+ flush_delayed_work(&engine->heartbeat.work);
+ if (delayed_work_pending(&engine->heartbeat.work)) {
+ pr_err("%s: heartbeat still running\n",
+ engine->name);
+ err = -EINVAL;
+ goto err_beat;
+ }
+
+ if (READ_ONCE(engine->heartbeat.systole)) {
+ pr_err("%s: heartbeat still allocated\n",
+ engine->name);
+ err = -EINVAL;
+ goto err_beat;
+ }
+
+err_beat:
+ intel_engine_set_heartbeat(engine, CONFIG_DRM_I915_HEARTBEAT_INTERVAL);
+err_pm:
+ intel_engine_pm_put(engine);
+ return err;
+}
+
+static int live_heartbeat_off(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /* Check that we can turn off heartbeat and not interrupt VIP */
+ if (!CONFIG_DRM_I915_HEARTBEAT_INTERVAL)
+ return 0;
+
+ for_each_engine(engine, gt, id) {
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ err = __live_heartbeat_off(engine);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+int intel_heartbeat_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_idle_flush),
+ SUBTEST(live_idle_pulse),
+ SUBTEST(live_heartbeat_fast),
+ SUBTEST(live_heartbeat_off),
+ };
+ int saved_hangcheck;
+ int err;
+
+ if (intel_gt_is_wedged(&i915->gt))
+ return 0;
+
+ saved_hangcheck = i915_modparams.enable_hangcheck;
+ i915_modparams.enable_hangcheck = INT_MAX;
+
+ err = intel_gt_live_subtests(tests, &i915->gt);
+
+ i915_modparams.enable_hangcheck = saved_hangcheck;
+ return err;
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
new file mode 100644
index 000000000000..cbf6b0735272
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
@@ -0,0 +1,84 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include "i915_selftest.h"
+#include "selftest_engine.h"
+#include "selftests/igt_atomic.h"
+
+static int live_engine_pm(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /*
+ * Check we can call intel_engine_pm_put from any context. No
+ * failures are reported directly, but if we mess up lockdep should
+ * tell us.
+ */
+ if (intel_gt_pm_wait_for_idle(gt)) {
+ pr_err("Unable to flush GT pm before test\n");
+ return -EBUSY;
+ }
+
+ GEM_BUG_ON(intel_gt_pm_is_awake(gt));
+ for_each_engine(engine, gt, id) {
+ const typeof(*igt_atomic_phases) *p;
+
+ for (p = igt_atomic_phases; p->name; p++) {
+ /*
+ * Acquisition is always synchronous, except if we
+ * know that the engine is already awake, in which
+ * case we should use intel_engine_pm_get_if_awake()
+ * to atomically grab the wakeref.
+ *
+ * In practice,
+ * intel_engine_pm_get();
+ * intel_engine_pm_put();
+ * occurs in one thread, while simultaneously
+ * intel_engine_pm_get_if_awake();
+ * intel_engine_pm_put();
+ * occurs from atomic context in another.
+ */
+ GEM_BUG_ON(intel_engine_pm_is_awake(engine));
+ intel_engine_pm_get(engine);
+
+ p->critical_section_begin();
+ if (!intel_engine_pm_get_if_awake(engine))
+ pr_err("intel_engine_pm_get_if_awake(%s) failed under %s\n",
+ engine->name, p->name);
+ else
+ intel_engine_pm_put_async(engine);
+ intel_engine_pm_put_async(engine);
+ p->critical_section_end();
+
+ intel_engine_pm_flush(engine);
+
+ if (intel_engine_pm_is_awake(engine)) {
+ pr_err("%s is still awake after flushing pm\n",
+ engine->name);
+ return -EINVAL;
+ }
+
+ /* gt wakeref is async (deferred to workqueue) */
+ if (intel_gt_pm_wait_for_idle(gt)) {
+ pr_err("GT failed to idle\n");
+ return -EINVAL;
+ }
+ }
+ }
+
+ return 0;
+}
+
+int live_engine_pm_selftests(struct intel_gt *gt)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_engine_pm),
+ };
+
+ return intel_gt_live_subtests(tests, gt);
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
new file mode 100644
index 000000000000..09ff8e4f88af
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
@@ -0,0 +1,79 @@
+
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "selftest_llc.h"
+#include "selftest_rc6.h"
+
+static int live_gt_resume(void *arg)
+{
+ struct intel_gt *gt = arg;
+ IGT_TIMEOUT(end_time);
+ int err;
+
+ /* Do several suspend/resume cycles to check we don't explode! */
+ do {
+ intel_gt_suspend_prepare(gt);
+ intel_gt_suspend_late(gt);
+
+ if (gt->rc6.enabled) {
+ pr_err("rc6 still enabled after suspend!\n");
+ intel_gt_set_wedged_on_init(gt);
+ err = -EINVAL;
+ break;
+ }
+
+ err = intel_gt_resume(gt);
+ if (err)
+ break;
+
+ if (gt->rc6.supported && !gt->rc6.enabled) {
+ pr_err("rc6 not enabled upon resume!\n");
+ intel_gt_set_wedged_on_init(gt);
+ err = -EINVAL;
+ break;
+ }
+
+ err = st_llc_verify(&gt->llc);
+ if (err) {
+ pr_err("llc state not restored upon resume!\n");
+ intel_gt_set_wedged_on_init(gt);
+ break;
+ }
+ } while (!__igt_timeout(end_time, NULL));
+
+ return err;
+}
+
+int intel_gt_pm_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_rc6_manual),
+ SUBTEST(live_gt_resume),
+ };
+
+ if (intel_gt_is_wedged(&i915->gt))
+ return 0;
+
+ return intel_gt_live_subtests(tests, &i915->gt);
+}
+
+int intel_gt_pm_late_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ /*
+ * These tests may leave the system in an undesirable state.
+ * They are intended to be run last in CI and the system
+ * rebooted afterwards.
+ */
+ SUBTEST(live_rc6_ctx_wa),
+ };
+
+ if (intel_gt_is_wedged(&i915->gt))
+ return 0;
+
+ return intel_gt_live_subtests(tests, &i915->gt);
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index 1ee4c923044f..3e5e6c86e843 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -25,13 +25,15 @@
#include <linux/kthread.h>
#include "gem/i915_gem_context.h"
+
+#include "intel_gt.h"
+#include "intel_engine_heartbeat.h"
#include "intel_engine_pm.h"
#include "i915_selftest.h"
#include "selftests/i915_random.h"
#include "selftests/igt_flush_test.h"
#include "selftests/igt_reset.h"
-#include "selftests/igt_wedge_me.h"
#include "selftests/igt_atomic.h"
#include "selftests/mock_drm.h"
@@ -42,7 +44,7 @@
#define IGT_IDLE_TIMEOUT 50 /* ms; time to wait after flushing between tests */
struct hang {
- struct drm_i915_private *i915;
+ struct intel_gt *gt;
struct drm_i915_gem_object *hws;
struct drm_i915_gem_object *obj;
struct i915_gem_context *ctx;
@@ -50,27 +52,27 @@ struct hang {
u32 *batch;
};
-static int hang_init(struct hang *h, struct drm_i915_private *i915)
+static int hang_init(struct hang *h, struct intel_gt *gt)
{
void *vaddr;
int err;
memset(h, 0, sizeof(*h));
- h->i915 = i915;
+ h->gt = gt;
- h->ctx = kernel_context(i915);
+ h->ctx = kernel_context(gt->i915);
if (IS_ERR(h->ctx))
return PTR_ERR(h->ctx);
GEM_BUG_ON(i915_gem_context_is_bannable(h->ctx));
- h->hws = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ h->hws = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
if (IS_ERR(h->hws)) {
err = PTR_ERR(h->hws);
goto err_ctx;
}
- h->obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ h->obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
if (IS_ERR(h->obj)) {
err = PTR_ERR(h->obj);
goto err_hws;
@@ -85,7 +87,7 @@ static int hang_init(struct hang *h, struct drm_i915_private *i915)
h->seqno = memset(vaddr, 0xff, PAGE_SIZE);
vaddr = i915_gem_object_pin_map(h->obj,
- i915_coherent_map_type(i915));
+ i915_coherent_map_type(gt->i915));
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
goto err_unpin_hws;
@@ -118,7 +120,10 @@ static int move_to_active(struct i915_vma *vma,
int err;
i915_vma_lock(vma);
- err = i915_vma_move_to_active(vma, rq, flags);
+ err = i915_request_await_object(rq, vma->obj,
+ flags & EXEC_OBJECT_WRITE);
+ if (err == 0)
+ err = i915_vma_move_to_active(vma, rq, flags);
i915_vma_unlock(vma);
return err;
@@ -127,47 +132,52 @@ static int move_to_active(struct i915_vma *vma,
static struct i915_request *
hang_create_request(struct hang *h, struct intel_engine_cs *engine)
{
- struct drm_i915_private *i915 = h->i915;
- struct i915_address_space *vm = h->ctx->vm ?: &i915->ggtt.vm;
+ struct intel_gt *gt = h->gt;
+ struct i915_address_space *vm = i915_gem_context_get_vm_rcu(h->ctx);
+ struct drm_i915_gem_object *obj;
struct i915_request *rq = NULL;
struct i915_vma *hws, *vma;
unsigned int flags;
+ void *vaddr;
u32 *batch;
int err;
- if (i915_gem_object_is_active(h->obj)) {
- struct drm_i915_gem_object *obj;
- void *vaddr;
-
- obj = i915_gem_object_create_internal(h->i915, PAGE_SIZE);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
+ obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ i915_vm_put(vm);
+ return ERR_CAST(obj);
+ }
- vaddr = i915_gem_object_pin_map(obj,
- i915_coherent_map_type(h->i915));
- if (IS_ERR(vaddr)) {
- i915_gem_object_put(obj);
- return ERR_CAST(vaddr);
- }
+ vaddr = i915_gem_object_pin_map(obj, i915_coherent_map_type(gt->i915));
+ if (IS_ERR(vaddr)) {
+ i915_gem_object_put(obj);
+ i915_vm_put(vm);
+ return ERR_CAST(vaddr);
+ }
- i915_gem_object_unpin_map(h->obj);
- i915_gem_object_put(h->obj);
+ i915_gem_object_unpin_map(h->obj);
+ i915_gem_object_put(h->obj);
- h->obj = obj;
- h->batch = vaddr;
- }
+ h->obj = obj;
+ h->batch = vaddr;
vma = i915_vma_instance(h->obj, vm, NULL);
- if (IS_ERR(vma))
+ if (IS_ERR(vma)) {
+ i915_vm_put(vm);
return ERR_CAST(vma);
+ }
hws = i915_vma_instance(h->hws, vm, NULL);
- if (IS_ERR(hws))
+ if (IS_ERR(hws)) {
+ i915_vm_put(vm);
return ERR_CAST(hws);
+ }
err = i915_vma_pin(vma, 0, 0, PIN_USER);
- if (err)
+ if (err) {
+ i915_vm_put(vm);
return ERR_PTR(err);
+ }
err = i915_vma_pin(hws, 0, 0, PIN_USER);
if (err)
@@ -188,7 +198,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
goto cancel_rq;
batch = h->batch;
- if (INTEL_GEN(i915) >= 8) {
+ if (INTEL_GEN(gt->i915) >= 8) {
*batch++ = MI_STORE_DWORD_IMM_GEN4;
*batch++ = lower_32_bits(hws_address(hws, rq));
*batch++ = upper_32_bits(hws_address(hws, rq));
@@ -202,7 +212,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
*batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
*batch++ = lower_32_bits(vma->node.start);
*batch++ = upper_32_bits(vma->node.start);
- } else if (INTEL_GEN(i915) >= 6) {
+ } else if (INTEL_GEN(gt->i915) >= 6) {
*batch++ = MI_STORE_DWORD_IMM_GEN4;
*batch++ = 0;
*batch++ = lower_32_bits(hws_address(hws, rq));
@@ -215,7 +225,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
*batch++ = MI_ARB_CHECK;
*batch++ = MI_BATCH_BUFFER_START | 1 << 8;
*batch++ = lower_32_bits(vma->node.start);
- } else if (INTEL_GEN(i915) >= 4) {
+ } else if (INTEL_GEN(gt->i915) >= 4) {
*batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*batch++ = 0;
*batch++ = lower_32_bits(hws_address(hws, rq));
@@ -242,7 +252,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
*batch++ = lower_32_bits(vma->node.start);
}
*batch++ = MI_BATCH_BUFFER_END; /* not reached */
- i915_gem_chipset_flush(h->i915);
+ intel_gt_chipset_flush(engine->gt);
if (rq->engine->emit_init_breadcrumb) {
err = rq->engine->emit_init_breadcrumb(rq);
@@ -251,7 +261,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
}
flags = 0;
- if (INTEL_GEN(vm->i915) <= 5)
+ if (INTEL_GEN(gt->i915) <= 5)
flags |= I915_DISPATCH_SECURE;
err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
@@ -265,6 +275,7 @@ unpin_hws:
i915_vma_unpin(hws);
unpin_vma:
i915_vma_unpin(vma);
+ i915_vm_put(vm);
return err ? ERR_PTR(err) : rq;
}
@@ -276,7 +287,7 @@ static u32 hws_seqno(const struct hang *h, const struct i915_request *rq)
static void hang_fini(struct hang *h)
{
*h->batch = MI_BATCH_BUFFER_END;
- i915_gem_chipset_flush(h->i915);
+ intel_gt_chipset_flush(h->gt);
i915_gem_object_unpin_map(h->obj);
i915_gem_object_put(h->obj);
@@ -286,7 +297,7 @@ static void hang_fini(struct hang *h)
kernel_context_close(h->ctx);
- igt_flush_test(h->i915, I915_WAIT_LOCKED);
+ igt_flush_test(h->gt->i915);
}
static bool wait_until_running(struct hang *h, struct i915_request *rq)
@@ -299,9 +310,27 @@ static bool wait_until_running(struct hang *h, struct i915_request *rq)
1000));
}
+static void engine_heartbeat_disable(struct intel_engine_cs *engine,
+ unsigned long *saved)
+{
+ *saved = engine->props.heartbeat_interval_ms;
+ engine->props.heartbeat_interval_ms = 0;
+
+ intel_engine_pm_get(engine);
+ intel_engine_park_heartbeat(engine);
+}
+
+static void engine_heartbeat_enable(struct intel_engine_cs *engine,
+ unsigned long saved)
+{
+ intel_engine_pm_put(engine);
+
+ engine->props.heartbeat_interval_ms = saved;
+}
+
static int igt_hang_sanitycheck(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct i915_request *rq;
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -310,13 +339,12 @@ static int igt_hang_sanitycheck(void *arg)
/* Basic check that we can execute our hanging batch */
- mutex_lock(&i915->drm.struct_mutex);
- err = hang_init(&h, i915);
+ err = hang_init(&h, gt);
if (err)
- goto unlock;
+ return err;
- for_each_engine(engine, i915, id) {
- struct igt_wedge_me w;
+ for_each_engine(engine, gt, id) {
+ struct intel_wedge_me w;
long timeout;
if (!intel_engine_can_store_dword(engine))
@@ -333,15 +361,15 @@ static int igt_hang_sanitycheck(void *arg)
i915_request_get(rq);
*h.batch = MI_BATCH_BUFFER_END;
- i915_gem_chipset_flush(i915);
+ intel_gt_chipset_flush(engine->gt);
i915_request_add(rq);
timeout = 0;
- igt_wedge_on_timeout(&w, i915, HZ / 10 /* 100ms timeout*/)
+ intel_wedge_on_timeout(&w, gt, HZ / 10 /* 100ms */)
timeout = i915_request_wait(rq, 0,
MAX_SCHEDULE_TIMEOUT);
- if (i915_reset_failed(i915))
+ if (intel_gt_is_wedged(gt))
timeout = -EIO;
i915_request_put(rq);
@@ -356,8 +384,6 @@ static int igt_hang_sanitycheck(void *arg)
fini:
hang_fini(&h);
-unlock:
- mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -368,43 +394,33 @@ static bool wait_for_idle(struct intel_engine_cs *engine)
static int igt_reset_nop(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
+ struct i915_gpu_error *global = &gt->i915->gpu_error;
struct intel_engine_cs *engine;
- struct i915_gem_context *ctx;
unsigned int reset_count, count;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
- struct drm_file *file;
IGT_TIMEOUT(end_time);
int err = 0;
/* Check that we can reset during non-user portions of requests */
- file = mock_file(i915);
- if (IS_ERR(file))
- return PTR_ERR(file);
-
- mutex_lock(&i915->drm.struct_mutex);
- ctx = live_context(i915, file);
- mutex_unlock(&i915->drm.struct_mutex);
- if (IS_ERR(ctx)) {
- err = PTR_ERR(ctx);
- goto out;
- }
-
- i915_gem_context_clear_bannable(ctx);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- reset_count = i915_reset_count(&i915->gpu_error);
+ reset_count = i915_reset_count(global);
count = 0;
do {
- mutex_lock(&i915->drm.struct_mutex);
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
int i;
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ break;
+ }
+
for (i = 0; i < 16; i++) {
struct i915_request *rq;
- rq = igt_request_alloc(ctx, engine);
+ rq = intel_context_create_request(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
break;
@@ -412,83 +428,65 @@ static int igt_reset_nop(void *arg)
i915_request_add(rq);
}
+
+ intel_context_put(ce);
}
- mutex_unlock(&i915->drm.struct_mutex);
- igt_global_reset_lock(i915);
- i915_reset(i915, ALL_ENGINES, NULL);
- igt_global_reset_unlock(i915);
- if (i915_reset_failed(i915)) {
+ igt_global_reset_lock(gt);
+ intel_gt_reset(gt, ALL_ENGINES, NULL);
+ igt_global_reset_unlock(gt);
+
+ if (intel_gt_is_wedged(gt)) {
err = -EIO;
break;
}
- if (i915_reset_count(&i915->gpu_error) !=
- reset_count + ++count) {
+ if (i915_reset_count(global) != reset_count + ++count) {
pr_err("Full GPU reset not recorded!\n");
err = -EINVAL;
break;
}
- err = igt_flush_test(i915, 0);
+ err = igt_flush_test(gt->i915);
if (err)
break;
} while (time_before(jiffies, end_time));
pr_info("%s: %d resets\n", __func__, count);
- mutex_lock(&i915->drm.struct_mutex);
- err = igt_flush_test(i915, I915_WAIT_LOCKED);
- mutex_unlock(&i915->drm.struct_mutex);
-
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
-
-out:
- mock_file_free(i915, file);
- if (i915_reset_failed(i915))
+ if (igt_flush_test(gt->i915))
err = -EIO;
return err;
}
static int igt_reset_nop_engine(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
+ struct i915_gpu_error *global = &gt->i915->gpu_error;
struct intel_engine_cs *engine;
- struct i915_gem_context *ctx;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
- struct drm_file *file;
- int err = 0;
/* Check that we can engine-reset during non-user portions */
- if (!intel_has_reset_engine(i915))
+ if (!intel_has_reset_engine(gt))
return 0;
- file = mock_file(i915);
- if (IS_ERR(file))
- return PTR_ERR(file);
-
- mutex_lock(&i915->drm.struct_mutex);
- ctx = live_context(i915, file);
- mutex_unlock(&i915->drm.struct_mutex);
- if (IS_ERR(ctx)) {
- err = PTR_ERR(ctx);
- goto out;
- }
-
- i915_gem_context_clear_bannable(ctx);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- for_each_engine(engine, i915, id) {
- unsigned int reset_count, reset_engine_count;
- unsigned int count;
+ for_each_engine(engine, gt, id) {
+ unsigned int reset_count, reset_engine_count, count;
+ struct intel_context *ce;
+ unsigned long heartbeat;
IGT_TIMEOUT(end_time);
+ int err;
- reset_count = i915_reset_count(&i915->gpu_error);
- reset_engine_count = i915_reset_engine_count(&i915->gpu_error,
- engine);
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ reset_count = i915_reset_count(global);
+ reset_engine_count = i915_reset_engine_count(global, engine);
count = 0;
- set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
+ engine_heartbeat_disable(engine, &heartbeat);
+ set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
do {
int i;
@@ -499,11 +497,10 @@ static int igt_reset_nop_engine(void *arg)
break;
}
- mutex_lock(&i915->drm.struct_mutex);
for (i = 0; i < 16; i++) {
struct i915_request *rq;
- rq = igt_request_alloc(ctx, engine);
+ rq = intel_context_create_request(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
break;
@@ -511,21 +508,19 @@ static int igt_reset_nop_engine(void *arg)
i915_request_add(rq);
}
- mutex_unlock(&i915->drm.struct_mutex);
-
- err = i915_reset_engine(engine, NULL);
+ err = intel_engine_reset(engine, NULL);
if (err) {
pr_err("i915_reset_engine failed\n");
break;
}
- if (i915_reset_count(&i915->gpu_error) != reset_count) {
+ if (i915_reset_count(global) != reset_count) {
pr_err("Full GPU reset recorded! (engine reset expected)\n");
err = -EINVAL;
break;
}
- if (i915_reset_engine_count(&i915->gpu_error, engine) !=
+ if (i915_reset_engine_count(global, engine) !=
reset_engine_count + ++count) {
pr_err("%s engine reset not recorded!\n",
engine->name);
@@ -533,31 +528,24 @@ static int igt_reset_nop_engine(void *arg)
break;
}
} while (time_before(jiffies, end_time));
- clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
- pr_info("%s(%s): %d resets\n", __func__, engine->name, count);
+ clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
+ engine_heartbeat_enable(engine, heartbeat);
- if (err)
- break;
+ pr_info("%s(%s): %d resets\n", __func__, engine->name, count);
- err = igt_flush_test(i915, 0);
+ intel_context_put(ce);
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
if (err)
- break;
+ return err;
}
- mutex_lock(&i915->drm.struct_mutex);
- err = igt_flush_test(i915, I915_WAIT_LOCKED);
- mutex_unlock(&i915->drm.struct_mutex);
-
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
-out:
- mock_file_free(i915, file);
- if (i915_reset_failed(i915))
- err = -EIO;
- return err;
+ return 0;
}
-static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
+static int __igt_reset_engine(struct intel_gt *gt, bool active)
{
+ struct i915_gpu_error *global = &gt->i915->gpu_error;
struct intel_engine_cs *engine;
enum intel_engine_id id;
struct hang h;
@@ -565,19 +553,18 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
/* Check that we can issue an engine reset on an idle engine (no-op) */
- if (!intel_has_reset_engine(i915))
+ if (!intel_has_reset_engine(gt))
return 0;
if (active) {
- mutex_lock(&i915->drm.struct_mutex);
- err = hang_init(&h, i915);
- mutex_unlock(&i915->drm.struct_mutex);
+ err = hang_init(&h, gt);
if (err)
return err;
}
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
unsigned int reset_count, reset_engine_count;
+ unsigned long heartbeat;
IGT_TIMEOUT(end_time);
if (active && !intel_engine_can_store_dword(engine))
@@ -590,30 +577,26 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
break;
}
- reset_count = i915_reset_count(&i915->gpu_error);
- reset_engine_count = i915_reset_engine_count(&i915->gpu_error,
- engine);
+ reset_count = i915_reset_count(global);
+ reset_engine_count = i915_reset_engine_count(global, engine);
- intel_engine_pm_get(engine);
- set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
+ engine_heartbeat_disable(engine, &heartbeat);
+ set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
do {
if (active) {
struct i915_request *rq;
- mutex_lock(&i915->drm.struct_mutex);
rq = hang_create_request(&h, engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- mutex_unlock(&i915->drm.struct_mutex);
break;
}
i915_request_get(rq);
i915_request_add(rq);
- mutex_unlock(&i915->drm.struct_mutex);
if (!wait_until_running(&h, rq)) {
- struct drm_printer p = drm_info_printer(i915->drm.dev);
+ struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
pr_err("%s: Failed to start request %llx, at %x\n",
__func__, rq->fence.seqno, hws_seqno(&h, rq));
@@ -628,19 +611,19 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
i915_request_put(rq);
}
- err = i915_reset_engine(engine, NULL);
+ err = intel_engine_reset(engine, NULL);
if (err) {
pr_err("i915_reset_engine failed\n");
break;
}
- if (i915_reset_count(&i915->gpu_error) != reset_count) {
+ if (i915_reset_count(global) != reset_count) {
pr_err("Full GPU reset recorded! (engine reset expected)\n");
err = -EINVAL;
break;
}
- if (i915_reset_engine_count(&i915->gpu_error, engine) !=
+ if (i915_reset_engine_count(global, engine) !=
++reset_engine_count) {
pr_err("%s engine reset not recorded!\n",
engine->name);
@@ -648,25 +631,22 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
break;
}
} while (time_before(jiffies, end_time));
- clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
- intel_engine_pm_put(engine);
+ clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
+ engine_heartbeat_enable(engine, heartbeat);
if (err)
break;
- err = igt_flush_test(i915, 0);
+ err = igt_flush_test(gt->i915);
if (err)
break;
}
- if (i915_reset_failed(i915))
+ if (intel_gt_is_wedged(gt))
err = -EIO;
- if (active) {
- mutex_lock(&i915->drm.struct_mutex);
+ if (active)
hang_fini(&h);
- mutex_unlock(&i915->drm.struct_mutex);
- }
return err;
}
@@ -707,7 +687,7 @@ static int active_request_put(struct i915_request *rq)
rq->fence.seqno);
GEM_TRACE_DUMP();
- i915_gem_set_wedged(rq->i915);
+ intel_gt_set_wedged(rq->engine->gt);
err = -EIO;
}
@@ -722,47 +702,42 @@ static int active_engine(void *data)
struct active_engine *arg = data;
struct intel_engine_cs *engine = arg->engine;
struct i915_request *rq[8] = {};
- struct i915_gem_context *ctx[ARRAY_SIZE(rq)];
- struct drm_file *file;
- unsigned long count = 0;
+ struct intel_context *ce[ARRAY_SIZE(rq)];
+ unsigned long count;
int err = 0;
- file = mock_file(engine->i915);
- if (IS_ERR(file))
- return PTR_ERR(file);
-
- for (count = 0; count < ARRAY_SIZE(ctx); count++) {
- mutex_lock(&engine->i915->drm.struct_mutex);
- ctx[count] = live_context(engine->i915, file);
- mutex_unlock(&engine->i915->drm.struct_mutex);
- if (IS_ERR(ctx[count])) {
- err = PTR_ERR(ctx[count]);
+ for (count = 0; count < ARRAY_SIZE(ce); count++) {
+ ce[count] = intel_context_create(engine);
+ if (IS_ERR(ce[count])) {
+ err = PTR_ERR(ce[count]);
while (--count)
- i915_gem_context_put(ctx[count]);
- goto err_file;
+ intel_context_put(ce[count]);
+ return err;
}
}
+ count = 0;
while (!kthread_should_stop()) {
unsigned int idx = count++ & (ARRAY_SIZE(rq) - 1);
struct i915_request *old = rq[idx];
struct i915_request *new;
- mutex_lock(&engine->i915->drm.struct_mutex);
- new = igt_request_alloc(ctx[idx], engine);
+ new = intel_context_create_request(ce[idx]);
if (IS_ERR(new)) {
- mutex_unlock(&engine->i915->drm.struct_mutex);
err = PTR_ERR(new);
break;
}
- if (arg->flags & TEST_PRIORITY)
- ctx[idx]->sched.priority =
- i915_prandom_u32_max_state(512, &prng);
-
rq[idx] = i915_request_get(new);
i915_request_add(new);
- mutex_unlock(&engine->i915->drm.struct_mutex);
+
+ if (engine->schedule && arg->flags & TEST_PRIORITY) {
+ struct i915_sched_attr attr = {
+ .priority =
+ i915_prandom_u32_max_state(512, &prng),
+ };
+ engine->schedule(rq[idx], &attr);
+ }
err = active_request_put(old);
if (err)
@@ -777,17 +752,18 @@ static int active_engine(void *data)
/* Keep the first error */
if (!err)
err = err__;
+
+ intel_context_put(ce[count]);
}
-err_file:
- mock_file_free(engine->i915, file);
return err;
}
-static int __igt_reset_engines(struct drm_i915_private *i915,
+static int __igt_reset_engines(struct intel_gt *gt,
const char *test_name,
unsigned int flags)
{
+ struct i915_gpu_error *global = &gt->i915->gpu_error;
struct intel_engine_cs *engine, *other;
enum intel_engine_id id, tmp;
struct hang h;
@@ -797,13 +773,11 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
* with any other engine.
*/
- if (!intel_has_reset_engine(i915))
+ if (!intel_has_reset_engine(gt))
return 0;
if (flags & TEST_ACTIVE) {
- mutex_lock(&i915->drm.struct_mutex);
- err = hang_init(&h, i915);
- mutex_unlock(&i915->drm.struct_mutex);
+ err = hang_init(&h, gt);
if (err)
return err;
@@ -811,10 +785,11 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
h.ctx->sched.priority = 1024;
}
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
struct active_engine threads[I915_NUM_ENGINES] = {};
- unsigned long global = i915_reset_count(&i915->gpu_error);
+ unsigned long device = i915_reset_count(global);
unsigned long count = 0, reported;
+ unsigned long heartbeat;
IGT_TIMEOUT(end_time);
if (flags & TEST_ACTIVE &&
@@ -829,12 +804,11 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
}
memset(threads, 0, sizeof(threads));
- for_each_engine(other, i915, tmp) {
+ for_each_engine(other, gt, tmp) {
struct task_struct *tsk;
threads[tmp].resets =
- i915_reset_engine_count(&i915->gpu_error,
- other);
+ i915_reset_engine_count(global, other);
if (!(flags & TEST_OTHERS))
continue;
@@ -856,26 +830,25 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
get_task_struct(tsk);
}
- intel_engine_pm_get(engine);
- set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
+ yield(); /* start all threads before we begin */
+
+ engine_heartbeat_disable(engine, &heartbeat);
+ set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
do {
struct i915_request *rq = NULL;
if (flags & TEST_ACTIVE) {
- mutex_lock(&i915->drm.struct_mutex);
rq = hang_create_request(&h, engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- mutex_unlock(&i915->drm.struct_mutex);
break;
}
i915_request_get(rq);
i915_request_add(rq);
- mutex_unlock(&i915->drm.struct_mutex);
if (!wait_until_running(&h, rq)) {
- struct drm_printer p = drm_info_printer(i915->drm.dev);
+ struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
pr_err("%s: Failed to start request %llx, at %x\n",
__func__, rq->fence.seqno, hws_seqno(&h, rq));
@@ -888,7 +861,7 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
}
}
- err = i915_reset_engine(engine, NULL);
+ err = intel_engine_reset(engine, NULL);
if (err) {
pr_err("i915_reset_engine(%s:%s): failed, err=%d\n",
engine->name, test_name, err);
@@ -900,7 +873,7 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
if (rq) {
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
struct drm_printer p =
- drm_info_printer(i915->drm.dev);
+ drm_info_printer(gt->i915->drm.dev);
pr_err("i915_reset_engine(%s:%s):"
" failed to complete request after reset\n",
@@ -910,7 +883,7 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
i915_request_put(rq);
GEM_TRACE_DUMP();
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(gt);
err = -EIO;
break;
}
@@ -920,7 +893,7 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
if (!(flags & TEST_SELF) && !wait_for_idle(engine)) {
struct drm_printer p =
- drm_info_printer(i915->drm.dev);
+ drm_info_printer(gt->i915->drm.dev);
pr_err("i915_reset_engine(%s:%s):"
" failed to idle after reset\n",
@@ -932,12 +905,13 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
break;
}
} while (time_before(jiffies, end_time));
- clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
- intel_engine_pm_put(engine);
+ clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
+ engine_heartbeat_enable(engine, heartbeat);
+
pr_info("i915_reset_engine(%s:%s): %lu resets\n",
engine->name, test_name, count);
- reported = i915_reset_engine_count(&i915->gpu_error, engine);
+ reported = i915_reset_engine_count(global, engine);
reported -= threads[engine->id].resets;
if (reported != count) {
pr_err("i915_reset_engine(%s:%s): reset %lu times, but reported %lu\n",
@@ -947,7 +921,7 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
}
unwind:
- for_each_engine(other, i915, tmp) {
+ for_each_engine(other, gt, tmp) {
int ret;
if (!threads[tmp].task)
@@ -962,22 +936,21 @@ unwind:
}
put_task_struct(threads[tmp].task);
- if (other != engine &&
+ if (other->uabi_class != engine->uabi_class &&
threads[tmp].resets !=
- i915_reset_engine_count(&i915->gpu_error, other)) {
+ i915_reset_engine_count(global, other)) {
pr_err("Innocent engine %s was reset (count=%ld)\n",
other->name,
- i915_reset_engine_count(&i915->gpu_error,
- other) -
+ i915_reset_engine_count(global, other) -
threads[tmp].resets);
if (!err)
err = -EINVAL;
}
}
- if (global != i915_reset_count(&i915->gpu_error)) {
+ if (device != i915_reset_count(global)) {
pr_err("Global reset (count=%ld)!\n",
- i915_reset_count(&i915->gpu_error) - global);
+ i915_reset_count(global) - device);
if (!err)
err = -EINVAL;
}
@@ -985,21 +958,16 @@ unwind:
if (err)
break;
- mutex_lock(&i915->drm.struct_mutex);
- err = igt_flush_test(i915, I915_WAIT_LOCKED);
- mutex_unlock(&i915->drm.struct_mutex);
+ err = igt_flush_test(gt->i915);
if (err)
break;
}
- if (i915_reset_failed(i915))
+ if (intel_gt_is_wedged(gt))
err = -EIO;
- if (flags & TEST_ACTIVE) {
- mutex_lock(&i915->drm.struct_mutex);
+ if (flags & TEST_ACTIVE)
hang_fini(&h);
- mutex_unlock(&i915->drm.struct_mutex);
- }
return err;
}
@@ -1024,13 +992,13 @@ static int igt_reset_engines(void *arg)
},
{ }
};
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
typeof(*phases) *p;
int err;
for (p = phases; p->name; p++) {
if (p->flags & TEST_PRIORITY) {
- if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
+ if (!(gt->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
continue;
}
@@ -1042,38 +1010,38 @@ static int igt_reset_engines(void *arg)
return 0;
}
-static u32 fake_hangcheck(struct drm_i915_private *i915,
- intel_engine_mask_t mask)
+static u32 fake_hangcheck(struct intel_gt *gt, intel_engine_mask_t mask)
{
- u32 count = i915_reset_count(&i915->gpu_error);
+ u32 count = i915_reset_count(&gt->i915->gpu_error);
- i915_reset(i915, mask, NULL);
+ intel_gt_reset(gt, mask, NULL);
return count;
}
static int igt_reset_wait(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
+ struct i915_gpu_error *global = &gt->i915->gpu_error;
+ struct intel_engine_cs *engine = gt->engine[RCS0];
struct i915_request *rq;
unsigned int reset_count;
struct hang h;
long timeout;
int err;
- if (!intel_engine_can_store_dword(i915->engine[RCS0]))
+ if (!engine || !intel_engine_can_store_dword(engine))
return 0;
/* Check that we detect a stuck waiter and issue a reset */
- igt_global_reset_lock(i915);
+ igt_global_reset_lock(gt);
- mutex_lock(&i915->drm.struct_mutex);
- err = hang_init(&h, i915);
+ err = hang_init(&h, gt);
if (err)
goto unlock;
- rq = hang_create_request(&h, i915->engine[RCS0]);
+ rq = hang_create_request(&h, engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto fini;
@@ -1083,19 +1051,19 @@ static int igt_reset_wait(void *arg)
i915_request_add(rq);
if (!wait_until_running(&h, rq)) {
- struct drm_printer p = drm_info_printer(i915->drm.dev);
+ struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
pr_err("%s: Failed to start request %llx, at %x\n",
__func__, rq->fence.seqno, hws_seqno(&h, rq));
intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(gt);
err = -EIO;
goto out_rq;
}
- reset_count = fake_hangcheck(i915, ALL_ENGINES);
+ reset_count = fake_hangcheck(gt, ALL_ENGINES);
timeout = i915_request_wait(rq, 0, 10);
if (timeout < 0) {
@@ -1105,7 +1073,7 @@ static int igt_reset_wait(void *arg)
goto out_rq;
}
- if (i915_reset_count(&i915->gpu_error) == reset_count) {
+ if (i915_reset_count(global) == reset_count) {
pr_err("No GPU reset recorded!\n");
err = -EINVAL;
goto out_rq;
@@ -1116,10 +1084,9 @@ out_rq:
fini:
hang_fini(&h);
unlock:
- mutex_unlock(&i915->drm.struct_mutex);
- igt_global_reset_unlock(i915);
+ igt_global_reset_unlock(gt);
- if (i915_reset_failed(i915))
+ if (intel_gt_is_wedged(gt))
return -EIO;
return err;
@@ -1134,15 +1101,14 @@ static int evict_vma(void *data)
{
struct evict_vma *arg = data;
struct i915_address_space *vm = arg->vma->vm;
- struct drm_i915_private *i915 = vm->i915;
struct drm_mm_node evict = arg->vma->node;
int err;
complete(&arg->completion);
- mutex_lock(&i915->drm.struct_mutex);
+ mutex_lock(&vm->mutex);
err = i915_gem_evict_for_node(vm, &evict, 0);
- mutex_unlock(&i915->drm.struct_mutex);
+ mutex_unlock(&vm->mutex);
return err;
}
@@ -1150,57 +1116,62 @@ static int evict_vma(void *data)
static int evict_fence(void *data)
{
struct evict_vma *arg = data;
- struct drm_i915_private *i915 = arg->vma->vm->i915;
int err;
complete(&arg->completion);
- mutex_lock(&i915->drm.struct_mutex);
-
/* Mark the fence register as dirty to force the mmio update. */
err = i915_gem_object_set_tiling(arg->vma->obj, I915_TILING_Y, 512);
if (err) {
pr_err("Invalid Y-tiling settings; err:%d\n", err);
- goto out_unlock;
+ return err;
+ }
+
+ err = i915_vma_pin(arg->vma, 0, 0, PIN_GLOBAL | PIN_MAPPABLE);
+ if (err) {
+ pr_err("Unable to pin vma for Y-tiled fence; err:%d\n", err);
+ return err;
}
err = i915_vma_pin_fence(arg->vma);
+ i915_vma_unpin(arg->vma);
if (err) {
pr_err("Unable to pin Y-tiled fence; err:%d\n", err);
- goto out_unlock;
+ return err;
}
i915_vma_unpin_fence(arg->vma);
-out_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
-
- return err;
+ return 0;
}
-static int __igt_reset_evict_vma(struct drm_i915_private *i915,
+static int __igt_reset_evict_vma(struct intel_gt *gt,
struct i915_address_space *vm,
int (*fn)(void *),
unsigned int flags)
{
+ struct intel_engine_cs *engine = gt->engine[RCS0];
struct drm_i915_gem_object *obj;
struct task_struct *tsk = NULL;
struct i915_request *rq;
struct evict_vma arg;
struct hang h;
+ unsigned int pin_flags;
int err;
- if (!intel_engine_can_store_dword(i915->engine[RCS0]))
+ if (!gt->ggtt->num_fences && flags & EXEC_OBJECT_NEEDS_FENCE)
+ return 0;
+
+ if (!engine || !intel_engine_can_store_dword(engine))
return 0;
/* Check that we can recover an unbind stuck on a hanging request */
- mutex_lock(&i915->drm.struct_mutex);
- err = hang_init(&h, i915);
+ err = hang_init(&h, gt);
if (err)
- goto unlock;
+ return err;
- obj = i915_gem_object_create_internal(i915, SZ_1M);
+ obj = i915_gem_object_create_internal(gt->i915, SZ_1M);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto fini;
@@ -1220,16 +1191,18 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
goto out_obj;
}
- rq = hang_create_request(&h, i915->engine[RCS0]);
+ rq = hang_create_request(&h, engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out_obj;
}
- err = i915_vma_pin(arg.vma, 0, 0,
- i915_vma_is_ggtt(arg.vma) ?
- PIN_GLOBAL | PIN_MAPPABLE :
- PIN_USER);
+ pin_flags = i915_vma_is_ggtt(arg.vma) ? PIN_GLOBAL : PIN_USER;
+
+ if (flags & EXEC_OBJECT_NEEDS_FENCE)
+ pin_flags |= PIN_MAPPABLE;
+
+ err = i915_vma_pin(arg.vma, 0, 0, pin_flags);
if (err) {
i915_request_add(rq);
goto out_obj;
@@ -1246,7 +1219,10 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
}
i915_vma_lock(arg.vma);
- err = i915_vma_move_to_active(arg.vma, rq, flags);
+ err = i915_request_await_object(rq, arg.vma->obj,
+ flags & EXEC_OBJECT_WRITE);
+ if (err == 0)
+ err = i915_vma_move_to_active(arg.vma, rq, flags);
i915_vma_unlock(arg.vma);
if (flags & EXEC_OBJECT_NEEDS_FENCE)
@@ -1258,16 +1234,14 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
if (err)
goto out_rq;
- mutex_unlock(&i915->drm.struct_mutex);
-
if (!wait_until_running(&h, rq)) {
- struct drm_printer p = drm_info_printer(i915->drm.dev);
+ struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
pr_err("%s: Failed to start request %llx, at %x\n",
__func__, rq->fence.seqno, hws_seqno(&h, rq));
intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(gt);
goto out_reset;
}
@@ -1284,41 +1258,37 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
wait_for_completion(&arg.completion);
if (wait_for(!list_empty(&rq->fence.cb_list), 10)) {
- struct drm_printer p = drm_info_printer(i915->drm.dev);
+ struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
pr_err("igt/evict_vma kthread did not wait\n");
intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(gt);
goto out_reset;
}
out_reset:
- igt_global_reset_lock(i915);
- fake_hangcheck(rq->i915, rq->engine->mask);
- igt_global_reset_unlock(i915);
+ igt_global_reset_lock(gt);
+ fake_hangcheck(gt, rq->engine->mask);
+ igt_global_reset_unlock(gt);
if (tsk) {
- struct igt_wedge_me w;
+ struct intel_wedge_me w;
/* The reset, even indirectly, should take less than 10ms. */
- igt_wedge_on_timeout(&w, i915, HZ / 10 /* 100ms timeout*/)
+ intel_wedge_on_timeout(&w, gt, HZ / 10 /* 100ms */)
err = kthread_stop(tsk);
put_task_struct(tsk);
}
- mutex_lock(&i915->drm.struct_mutex);
out_rq:
i915_request_put(rq);
out_obj:
i915_gem_object_put(obj);
fini:
hang_fini(&h);
-unlock:
- mutex_unlock(&i915->drm.struct_mutex);
-
- if (i915_reset_failed(i915))
+ if (intel_gt_is_wedged(gt))
return -EIO;
return err;
@@ -1326,56 +1296,48 @@ unlock:
static int igt_reset_evict_ggtt(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
- return __igt_reset_evict_vma(i915, &i915->ggtt.vm,
+ return __igt_reset_evict_vma(gt, &gt->ggtt->vm,
evict_vma, EXEC_OBJECT_WRITE);
}
static int igt_reset_evict_ppgtt(void *arg)
{
- struct drm_i915_private *i915 = arg;
- struct i915_gem_context *ctx;
- struct drm_file *file;
+ struct intel_gt *gt = arg;
+ struct i915_ppgtt *ppgtt;
int err;
- file = mock_file(i915);
- if (IS_ERR(file))
- return PTR_ERR(file);
+ /* aliasing == global gtt locking, covered above */
+ if (INTEL_PPGTT(gt->i915) < INTEL_PPGTT_FULL)
+ return 0;
- mutex_lock(&i915->drm.struct_mutex);
- ctx = live_context(i915, file);
- mutex_unlock(&i915->drm.struct_mutex);
- if (IS_ERR(ctx)) {
- err = PTR_ERR(ctx);
- goto out;
- }
+ ppgtt = i915_ppgtt_create(gt);
+ if (IS_ERR(ppgtt))
+ return PTR_ERR(ppgtt);
- err = 0;
- if (ctx->vm) /* aliasing == global gtt locking, covered above */
- err = __igt_reset_evict_vma(i915, ctx->vm,
- evict_vma, EXEC_OBJECT_WRITE);
+ err = __igt_reset_evict_vma(gt, &ppgtt->vm,
+ evict_vma, EXEC_OBJECT_WRITE);
+ i915_vm_put(&ppgtt->vm);
-out:
- mock_file_free(i915, file);
return err;
}
static int igt_reset_evict_fence(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
- return __igt_reset_evict_vma(i915, &i915->ggtt.vm,
+ return __igt_reset_evict_vma(gt, &gt->ggtt->vm,
evict_fence, EXEC_OBJECT_NEEDS_FENCE);
}
-static int wait_for_others(struct drm_i915_private *i915,
+static int wait_for_others(struct intel_gt *gt,
struct intel_engine_cs *exclude)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
if (engine == exclude)
continue;
@@ -1388,7 +1350,8 @@ static int wait_for_others(struct drm_i915_private *i915,
static int igt_reset_queue(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
+ struct i915_gpu_error *global = &gt->i915->gpu_error;
struct intel_engine_cs *engine;
enum intel_engine_id id;
struct hang h;
@@ -1396,14 +1359,13 @@ static int igt_reset_queue(void *arg)
/* Check that we replay pending requests following a hang */
- igt_global_reset_lock(i915);
+ igt_global_reset_lock(gt);
- mutex_lock(&i915->drm.struct_mutex);
- err = hang_init(&h, i915);
+ err = hang_init(&h, gt);
if (err)
goto unlock;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
struct i915_request *prev;
IGT_TIMEOUT(end_time);
unsigned int count;
@@ -1444,7 +1406,7 @@ static int igt_reset_queue(void *arg)
* (hangcheck), or we focus on resetting just one
* engine and so avoid repeatedly resetting innocents.
*/
- err = wait_for_others(i915, engine);
+ err = wait_for_others(gt, engine);
if (err) {
pr_err("%s(%s): Failed to idle other inactive engines after device reset\n",
__func__, engine->name);
@@ -1452,12 +1414,12 @@ static int igt_reset_queue(void *arg)
i915_request_put(prev);
GEM_TRACE_DUMP();
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(gt);
goto fini;
}
if (!wait_until_running(&h, prev)) {
- struct drm_printer p = drm_info_printer(i915->drm.dev);
+ struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
pr_err("%s(%s): Failed to start request %llx, at %x\n",
__func__, engine->name,
@@ -1468,13 +1430,13 @@ static int igt_reset_queue(void *arg)
i915_request_put(rq);
i915_request_put(prev);
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(gt);
err = -EIO;
goto fini;
}
- reset_count = fake_hangcheck(i915, BIT(id));
+ reset_count = fake_hangcheck(gt, BIT(id));
if (prev->fence.error != -EIO) {
pr_err("GPU reset not recorded on hanging request [fence.error=%d]!\n",
@@ -1494,7 +1456,7 @@ static int igt_reset_queue(void *arg)
goto fini;
}
- if (i915_reset_count(&i915->gpu_error) == reset_count) {
+ if (i915_reset_count(global) == reset_count) {
pr_err("No GPU reset recorded!\n");
i915_request_put(rq);
i915_request_put(prev);
@@ -1509,11 +1471,11 @@ static int igt_reset_queue(void *arg)
pr_info("%s: Completed %d resets\n", engine->name, count);
*h.batch = MI_BATCH_BUFFER_END;
- i915_gem_chipset_flush(i915);
+ intel_gt_chipset_flush(engine->gt);
i915_request_put(prev);
- err = igt_flush_test(i915, I915_WAIT_LOCKED);
+ err = igt_flush_test(gt->i915);
if (err)
break;
}
@@ -1521,10 +1483,9 @@ static int igt_reset_queue(void *arg)
fini:
hang_fini(&h);
unlock:
- mutex_unlock(&i915->drm.struct_mutex);
- igt_global_reset_unlock(i915);
+ igt_global_reset_unlock(gt);
- if (i915_reset_failed(i915))
+ if (intel_gt_is_wedged(gt))
return -EIO;
return err;
@@ -1532,26 +1493,25 @@ unlock:
static int igt_handle_error(void *arg)
{
- struct drm_i915_private *i915 = arg;
- struct intel_engine_cs *engine = i915->engine[RCS0];
+ struct intel_gt *gt = arg;
+ struct i915_gpu_error *global = &gt->i915->gpu_error;
+ struct intel_engine_cs *engine = gt->engine[RCS0];
struct hang h;
struct i915_request *rq;
- struct i915_gpu_state *error;
+ struct i915_gpu_coredump *error;
int err;
/* Check that we can issue a global GPU and engine reset */
- if (!intel_has_reset_engine(i915))
+ if (!intel_has_reset_engine(gt))
return 0;
if (!engine || !intel_engine_can_store_dword(engine))
return 0;
- mutex_lock(&i915->drm.struct_mutex);
-
- err = hang_init(&h, i915);
+ err = hang_init(&h, gt);
if (err)
- goto err_unlock;
+ return err;
rq = hang_create_request(&h, engine);
if (IS_ERR(rq)) {
@@ -1563,28 +1523,24 @@ static int igt_handle_error(void *arg)
i915_request_add(rq);
if (!wait_until_running(&h, rq)) {
- struct drm_printer p = drm_info_printer(i915->drm.dev);
+ struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
pr_err("%s: Failed to start request %llx, at %x\n",
__func__, rq->fence.seqno, hws_seqno(&h, rq));
intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(gt);
err = -EIO;
goto err_request;
}
- mutex_unlock(&i915->drm.struct_mutex);
-
/* Temporarily disable error capture */
- error = xchg(&i915->gpu_error.first_error, (void *)-1);
+ error = xchg(&global->first_error, (void *)-1);
- i915_handle_error(i915, engine->mask, 0, NULL);
+ intel_gt_handle_error(gt, engine->mask, 0, NULL);
- xchg(&i915->gpu_error.first_error, error);
-
- mutex_lock(&i915->drm.struct_mutex);
+ xchg(&global->first_error, error);
if (rq->fence.error != -EIO) {
pr_err("Guilty request not identified!\n");
@@ -1596,8 +1552,6 @@ err_request:
i915_request_put(rq);
err_fini:
hang_fini(&h);
-err_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -1611,10 +1565,10 @@ static int __igt_atomic_reset_engine(struct intel_engine_cs *engine,
GEM_TRACE("i915_reset_engine(%s:%s) under %s\n",
engine->name, mode, p->name);
- tasklet_disable_nosync(t);
+ tasklet_disable(t);
p->critical_section_begin();
- err = i915_reset_engine(engine, NULL);
+ err = intel_engine_reset(engine, NULL);
p->critical_section_end();
tasklet_enable(t);
@@ -1629,7 +1583,6 @@ static int __igt_atomic_reset_engine(struct intel_engine_cs *engine,
static int igt_atomic_reset_engine(struct intel_engine_cs *engine,
const struct igt_atomic_section *p)
{
- struct drm_i915_private *i915 = engine->i915;
struct i915_request *rq;
struct hang h;
int err;
@@ -1638,7 +1591,7 @@ static int igt_atomic_reset_engine(struct intel_engine_cs *engine,
if (err)
return err;
- err = hang_init(&h, i915);
+ err = hang_init(&h, engine->gt);
if (err)
return err;
@@ -1657,16 +1610,16 @@ static int igt_atomic_reset_engine(struct intel_engine_cs *engine,
pr_err("%s(%s): Failed to start request %llx, at %x\n",
__func__, engine->name,
rq->fence.seqno, hws_seqno(&h, rq));
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(engine->gt);
err = -EIO;
}
if (err == 0) {
- struct igt_wedge_me w;
+ struct intel_wedge_me w;
- igt_wedge_on_timeout(&w, i915, HZ / 20 /* 50ms timeout*/)
+ intel_wedge_on_timeout(&w, engine->gt, HZ / 20 /* 50ms */)
i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
- if (i915_reset_failed(i915))
+ if (intel_gt_is_wedged(engine->gt))
err = -EIO;
}
@@ -1678,30 +1631,29 @@ out:
static int igt_reset_engines_atomic(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
const typeof(*igt_atomic_phases) *p;
int err = 0;
/* Check that the engines resets are usable from atomic context */
- if (!intel_has_reset_engine(i915))
+ if (!intel_has_reset_engine(gt))
return 0;
- if (USES_GUC_SUBMISSION(i915))
+ if (USES_GUC_SUBMISSION(gt->i915))
return 0;
- igt_global_reset_lock(i915);
- mutex_lock(&i915->drm.struct_mutex);
+ igt_global_reset_lock(gt);
/* Flush any requests before we get started and check basics */
- if (!igt_force_reset(i915))
+ if (!igt_force_reset(gt))
goto unlock;
for (p = igt_atomic_phases; p->name; p++) {
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
err = igt_atomic_reset_engine(engine, p);
if (err)
goto out;
@@ -1710,11 +1662,9 @@ static int igt_reset_engines_atomic(void *arg)
out:
/* As we poke around the guts, do a full reset before continuing. */
- igt_force_reset(i915);
-
+ igt_force_reset(gt);
unlock:
- mutex_unlock(&i915->drm.struct_mutex);
- igt_global_reset_unlock(i915);
+ igt_global_reset_unlock(gt);
return err;
}
@@ -1736,28 +1686,21 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_reset_evict_fence),
SUBTEST(igt_handle_error),
};
+ struct intel_gt *gt = &i915->gt;
intel_wakeref_t wakeref;
- bool saved_hangcheck;
int err;
- if (!intel_has_gpu_reset(i915))
+ if (!intel_has_gpu_reset(gt))
return 0;
- if (i915_terminally_wedged(i915))
+ if (intel_gt_is_wedged(gt))
return -EIO; /* we're long past hope of a successful reset */
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- saved_hangcheck = fetch_and_zero(&i915_modparams.enable_hangcheck);
- drain_delayed_work(&i915->gpu_error.hangcheck_work); /* flush param */
-
- err = i915_subtests(tests, i915);
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
- mutex_lock(&i915->drm.struct_mutex);
- igt_flush_test(i915, I915_WAIT_LOCKED);
- mutex_unlock(&i915->drm.struct_mutex);
+ err = intel_gt_live_subtests(tests, gt);
- i915_modparams.enable_hangcheck = saved_hangcheck;
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
return err;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_llc.c b/drivers/gpu/drm/i915/gt/selftest_llc.c
new file mode 100644
index 000000000000..fd3770e48ac7
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_llc.c
@@ -0,0 +1,80 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "intel_pm.h" /* intel_gpu_freq() */
+#include "selftest_llc.h"
+#include "intel_rps.h"
+
+static int gen6_verify_ring_freq(struct intel_llc *llc)
+{
+ struct drm_i915_private *i915 = llc_to_gt(llc)->i915;
+ struct ia_constants consts;
+ intel_wakeref_t wakeref;
+ unsigned int gpu_freq;
+ int err = 0;
+
+ wakeref = intel_runtime_pm_get(llc_to_gt(llc)->uncore->rpm);
+
+ if (!get_ia_constants(llc, &consts)) {
+ err = -ENODEV;
+ goto out_rpm;
+ }
+
+ for (gpu_freq = consts.min_gpu_freq;
+ gpu_freq <= consts.max_gpu_freq;
+ gpu_freq++) {
+ struct intel_rps *rps = &llc_to_gt(llc)->rps;
+
+ unsigned int ia_freq, ring_freq, found;
+ u32 val;
+
+ calc_ia_freq(llc, gpu_freq, &consts, &ia_freq, &ring_freq);
+
+ val = gpu_freq;
+ if (sandybridge_pcode_read(i915,
+ GEN6_PCODE_READ_MIN_FREQ_TABLE,
+ &val, NULL)) {
+ pr_err("Failed to read freq table[%d], range [%d, %d]\n",
+ gpu_freq, consts.min_gpu_freq, consts.max_gpu_freq);
+ err = -ENXIO;
+ break;
+ }
+
+ found = (val >> 0) & 0xff;
+ if (found != ia_freq) {
+ pr_err("Min freq table(%d/[%d, %d]):%dMHz did not match expected CPU freq, found %d, expected %d\n",
+ gpu_freq, consts.min_gpu_freq, consts.max_gpu_freq,
+ intel_gpu_freq(rps, gpu_freq * (INTEL_GEN(i915) >= 9 ? GEN9_FREQ_SCALER : 1)),
+ found, ia_freq);
+ err = -EINVAL;
+ break;
+ }
+
+ found = (val >> 8) & 0xff;
+ if (found != ring_freq) {
+ pr_err("Min freq table(%d/[%d, %d]):%dMHz did not match expected ring freq, found %d, expected %d\n",
+ gpu_freq, consts.min_gpu_freq, consts.max_gpu_freq,
+ intel_gpu_freq(rps, gpu_freq * (INTEL_GEN(i915) >= 9 ? GEN9_FREQ_SCALER : 1)),
+ found, ring_freq);
+ err = -EINVAL;
+ break;
+ }
+ }
+
+out_rpm:
+ intel_runtime_pm_put(llc_to_gt(llc)->uncore->rpm, wakeref);
+ return err;
+}
+
+int st_llc_verify(struct intel_llc *llc)
+{
+ int err = 0;
+
+ if (HAS_LLC(llc_to_gt(llc)->i915))
+ err = gen6_verify_ring_freq(llc);
+
+ return err;
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_llc.h b/drivers/gpu/drm/i915/gt/selftest_llc.h
new file mode 100644
index 000000000000..873f896e72f2
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_llc.h
@@ -0,0 +1,14 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef SELFTEST_LLC_H
+#define SELFTEST_LLC_H
+
+struct intel_llc;
+
+int st_llc_verify(struct intel_llc *llc);
+
+#endif /* SELFTEST_LLC_H */
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index 401e8b539297..65718ca2326e 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -7,6 +7,7 @@
#include <linux/prime_numbers.h>
#include "gem/i915_gem_pm.h"
+#include "gt/intel_engine_heartbeat.h"
#include "gt/intel_reset.h"
#include "i915_selftest.h"
@@ -19,75 +20,776 @@
#include "gem/selftests/igt_gem_utils.h"
#include "gem/selftests/mock_context.h"
+#define CS_GPR(engine, n) ((engine)->mmio_base + 0x600 + (n) * 4)
+#define NUM_GPR_DW (16 * 2) /* each GPR is 2 dwords */
+
+static struct i915_vma *create_scratch(struct intel_gt *gt)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int err;
+
+ obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED);
+
+ vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ return vma;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+ if (err) {
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+ }
+
+ return vma;
+}
+
+static void engine_heartbeat_disable(struct intel_engine_cs *engine,
+ unsigned long *saved)
+{
+ *saved = engine->props.heartbeat_interval_ms;
+ engine->props.heartbeat_interval_ms = 0;
+
+ intel_engine_pm_get(engine);
+ intel_engine_park_heartbeat(engine);
+}
+
+static void engine_heartbeat_enable(struct intel_engine_cs *engine,
+ unsigned long saved)
+{
+ intel_engine_pm_put(engine);
+
+ engine->props.heartbeat_interval_ms = saved;
+}
+
static int live_sanitycheck(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
- struct i915_gem_context *ctx;
enum intel_engine_id id;
struct igt_spinner spin;
- intel_wakeref_t wakeref;
- int err = -ENOMEM;
+ int err = 0;
- if (!HAS_LOGICAL_RING_CONTEXTS(i915))
+ if (!HAS_LOGICAL_RING_CONTEXTS(gt->i915))
return 0;
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
- if (igt_spinner_init(&spin, i915))
- goto err_unlock;
-
- ctx = kernel_context(i915);
- if (!ctx)
- goto err_spin;
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
struct i915_request *rq;
- rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP);
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ break;
+ }
+
+ rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- goto err_ctx;
+ goto out_ctx;
}
i915_request_add(rq);
if (!igt_wait_for_spinner(&spin, rq)) {
GEM_TRACE("spinner failed to start\n");
GEM_TRACE_DUMP();
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(gt);
err = -EIO;
- goto err_ctx;
+ goto out_ctx;
}
igt_spinner_end(&spin);
- if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
+ if (igt_flush_test(gt->i915)) {
err = -EIO;
- goto err_ctx;
+ goto out_ctx;
}
+
+out_ctx:
+ intel_context_put(ce);
+ if (err)
+ break;
}
+ igt_spinner_fini(&spin);
+ return err;
+}
+
+static int live_unlite_restore(struct intel_gt *gt, int prio)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct igt_spinner spin;
+ int err = -ENOMEM;
+
+ /*
+ * Check that we can correctly context switch between 2 instances
+ * on the same engine from the same parent context.
+ */
+
+ if (igt_spinner_init(&spin, gt))
+ return err;
+
err = 0;
-err_ctx:
- kernel_context_close(ctx);
-err_spin:
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce[2] = {};
+ struct i915_request *rq[2];
+ struct igt_live_test t;
+ unsigned long saved;
+ int n;
+
+ if (prio && !intel_engine_has_preemption(engine))
+ continue;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
+ err = -EIO;
+ break;
+ }
+ engine_heartbeat_disable(engine, &saved);
+
+ for (n = 0; n < ARRAY_SIZE(ce); n++) {
+ struct intel_context *tmp;
+
+ tmp = intel_context_create(engine);
+ if (IS_ERR(tmp)) {
+ err = PTR_ERR(tmp);
+ goto err_ce;
+ }
+
+ err = intel_context_pin(tmp);
+ if (err) {
+ intel_context_put(tmp);
+ goto err_ce;
+ }
+
+ /*
+ * Setup the pair of contexts such that if we
+ * lite-restore using the RING_TAIL from ce[1] it
+ * will execute garbage from ce[0]->ring.
+ */
+ memset(tmp->ring->vaddr,
+ POISON_INUSE, /* IPEHR: 0x5a5a5a5a [hung!] */
+ tmp->ring->vma->size);
+
+ ce[n] = tmp;
+ }
+ GEM_BUG_ON(!ce[1]->ring->size);
+ intel_ring_reset(ce[1]->ring, ce[1]->ring->size / 2);
+ __execlists_update_reg_state(ce[1], engine);
+
+ rq[0] = igt_spinner_create_request(&spin, ce[0], MI_ARB_CHECK);
+ if (IS_ERR(rq[0])) {
+ err = PTR_ERR(rq[0]);
+ goto err_ce;
+ }
+
+ i915_request_get(rq[0]);
+ i915_request_add(rq[0]);
+ GEM_BUG_ON(rq[0]->postfix > ce[1]->ring->emit);
+
+ if (!igt_wait_for_spinner(&spin, rq[0])) {
+ i915_request_put(rq[0]);
+ goto err_ce;
+ }
+
+ rq[1] = i915_request_create(ce[1]);
+ if (IS_ERR(rq[1])) {
+ err = PTR_ERR(rq[1]);
+ i915_request_put(rq[0]);
+ goto err_ce;
+ }
+
+ if (!prio) {
+ /*
+ * Ensure we do the switch to ce[1] on completion.
+ *
+ * rq[0] is already submitted, so this should reduce
+ * to a no-op (a wait on a request on the same engine
+ * uses the submit fence, not the completion fence),
+ * but it will install a dependency on rq[1] for rq[0]
+ * that will prevent the pair being reordered by
+ * timeslicing.
+ */
+ i915_request_await_dma_fence(rq[1], &rq[0]->fence);
+ }
+
+ i915_request_get(rq[1]);
+ i915_request_add(rq[1]);
+ GEM_BUG_ON(rq[1]->postfix <= rq[0]->postfix);
+ i915_request_put(rq[0]);
+
+ if (prio) {
+ struct i915_sched_attr attr = {
+ .priority = prio,
+ };
+
+ /* Alternatively preempt the spinner with ce[1] */
+ engine->schedule(rq[1], &attr);
+ }
+
+ /* And switch back to ce[0] for good measure */
+ rq[0] = i915_request_create(ce[0]);
+ if (IS_ERR(rq[0])) {
+ err = PTR_ERR(rq[0]);
+ i915_request_put(rq[1]);
+ goto err_ce;
+ }
+
+ i915_request_await_dma_fence(rq[0], &rq[1]->fence);
+ i915_request_get(rq[0]);
+ i915_request_add(rq[0]);
+ GEM_BUG_ON(rq[0]->postfix > rq[1]->postfix);
+ i915_request_put(rq[1]);
+ i915_request_put(rq[0]);
+
+err_ce:
+ tasklet_kill(&engine->execlists.tasklet); /* flush submission */
+ igt_spinner_end(&spin);
+ for (n = 0; n < ARRAY_SIZE(ce); n++) {
+ if (IS_ERR_OR_NULL(ce[n]))
+ break;
+
+ intel_context_unpin(ce[n]);
+ intel_context_put(ce[n]);
+ }
+
+ engine_heartbeat_enable(engine, saved);
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ if (err)
+ break;
+ }
+
+ igt_spinner_fini(&spin);
+ return err;
+}
+
+static int live_unlite_switch(void *arg)
+{
+ return live_unlite_restore(arg, 0);
+}
+
+static int live_unlite_preempt(void *arg)
+{
+ return live_unlite_restore(arg, I915_USER_PRIORITY(I915_PRIORITY_MAX));
+}
+
+static int live_hold_reset(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct igt_spinner spin;
+ int err = 0;
+
+ /*
+ * In order to support offline error capture for fast preempt reset,
+ * we need to decouple the guilty request and ensure that it and its
+ * descendents are not executed while the capture is in progress.
+ */
+
+ if (!intel_has_reset_engine(gt))
+ return 0;
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
+ unsigned long heartbeat;
+ struct i915_request *rq;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ break;
+ }
+
+ engine_heartbeat_disable(engine, &heartbeat);
+
+ rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+ i915_request_add(rq);
+
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ intel_gt_set_wedged(gt);
+ err = -ETIME;
+ goto out;
+ }
+
+ /* We have our request executing, now remove it and reset */
+
+ if (test_and_set_bit(I915_RESET_ENGINE + id,
+ &gt->reset.flags)) {
+ intel_gt_set_wedged(gt);
+ err = -EBUSY;
+ goto out;
+ }
+ tasklet_disable(&engine->execlists.tasklet);
+
+ engine->execlists.tasklet.func(engine->execlists.tasklet.data);
+ GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
+
+ i915_request_get(rq);
+ execlists_hold(engine, rq);
+ GEM_BUG_ON(!i915_request_on_hold(rq));
+
+ intel_engine_reset(engine, NULL);
+ GEM_BUG_ON(rq->fence.error != -EIO);
+
+ tasklet_enable(&engine->execlists.tasklet);
+ clear_and_wake_up_bit(I915_RESET_ENGINE + id,
+ &gt->reset.flags);
+
+ /* Check that we do not resubmit the held request */
+ if (!i915_request_wait(rq, 0, HZ / 5)) {
+ pr_err("%s: on hold request completed!\n",
+ engine->name);
+ i915_request_put(rq);
+ err = -EIO;
+ goto out;
+ }
+ GEM_BUG_ON(!i915_request_on_hold(rq));
+
+ /* But is resubmitted on release */
+ execlists_unhold(engine, rq);
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ pr_err("%s: held request did not complete!\n",
+ engine->name);
+ intel_gt_set_wedged(gt);
+ err = -ETIME;
+ }
+ i915_request_put(rq);
+
+out:
+ engine_heartbeat_enable(engine, heartbeat);
+ intel_context_put(ce);
+ if (err)
+ break;
+ }
+
igt_spinner_fini(&spin);
-err_unlock:
- igt_flush_test(i915, I915_WAIT_LOCKED);
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+}
+
+static int
+emit_semaphore_chain(struct i915_request *rq, struct i915_vma *vma, int idx)
+{
+ u32 *cs;
+
+ cs = intel_ring_begin(rq, 10);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_NEQ_SDD;
+ *cs++ = 0;
+ *cs++ = i915_ggtt_offset(vma) + 4 * idx;
+ *cs++ = 0;
+
+ if (idx > 0) {
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = i915_ggtt_offset(vma) + 4 * (idx - 1);
+ *cs++ = 0;
+ *cs++ = 1;
+ } else {
+ *cs++ = MI_NOOP;
+ *cs++ = MI_NOOP;
+ *cs++ = MI_NOOP;
+ *cs++ = MI_NOOP;
+ }
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+
+ intel_ring_advance(rq, cs);
+ return 0;
+}
+
+static struct i915_request *
+semaphore_queue(struct intel_engine_cs *engine, struct i915_vma *vma, int idx)
+{
+ struct intel_context *ce;
+ struct i915_request *rq;
+ int err;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return ERR_CAST(ce);
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ goto out_ce;
+
+ err = 0;
+ if (rq->engine->emit_init_breadcrumb)
+ err = rq->engine->emit_init_breadcrumb(rq);
+ if (err == 0)
+ err = emit_semaphore_chain(rq, vma, idx);
+ if (err == 0)
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (err)
+ rq = ERR_PTR(err);
+
+out_ce:
+ intel_context_put(ce);
+ return rq;
+}
+
+static int
+release_queue(struct intel_engine_cs *engine,
+ struct i915_vma *vma,
+ int idx, int prio)
+{
+ struct i915_sched_attr attr = {
+ .priority = prio,
+ };
+ struct i915_request *rq;
+ u32 *cs;
+
+ rq = intel_engine_create_kernel_request(engine);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ return PTR_ERR(cs);
+ }
+
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = i915_ggtt_offset(vma) + 4 * (idx - 1);
+ *cs++ = 0;
+ *cs++ = 1;
+
+ intel_ring_advance(rq, cs);
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ local_bh_disable();
+ engine->schedule(rq, &attr);
+ local_bh_enable(); /* kick tasklet */
+
+ i915_request_put(rq);
+
+ return 0;
+}
+
+static int
+slice_semaphore_queue(struct intel_engine_cs *outer,
+ struct i915_vma *vma,
+ int count)
+{
+ struct intel_engine_cs *engine;
+ struct i915_request *head;
+ enum intel_engine_id id;
+ int err, i, n = 0;
+
+ head = semaphore_queue(outer, vma, n++);
+ if (IS_ERR(head))
+ return PTR_ERR(head);
+
+ for_each_engine(engine, outer->gt, id) {
+ for (i = 0; i < count; i++) {
+ struct i915_request *rq;
+
+ rq = semaphore_queue(engine, vma, n++);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ i915_request_put(rq);
+ }
+ }
+
+ err = release_queue(outer, vma, n, INT_MAX);
+ if (err)
+ goto out;
+
+ if (i915_request_wait(head, 0,
+ 2 * RUNTIME_INFO(outer->i915)->num_engines * (count + 2) * (count + 3)) < 0) {
+ pr_err("Failed to slice along semaphore chain of length (%d, %d)!\n",
+ count, n);
+ GEM_TRACE_DUMP();
+ intel_gt_set_wedged(outer->gt);
+ err = -EIO;
+ }
+
+out:
+ i915_request_put(head);
+ return err;
+}
+
+static int live_timeslice_preempt(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ void *vaddr;
+ int err = 0;
+ int count;
+
+ /*
+ * If a request takes too long, we would like to give other users
+ * a fair go on the GPU. In particular, users may create batches
+ * that wait upon external input, where that input may even be
+ * supplied by another GPU job. To avoid blocking forever, we
+ * need to preempt the current task and replace it with another
+ * ready task.
+ */
+ if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+ return 0;
+
+ obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_obj;
+ }
+
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto err_obj;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+ if (err)
+ goto err_map;
+
+ for_each_prime_number_from(count, 1, 16) {
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, gt, id) {
+ unsigned long saved;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ memset(vaddr, 0, PAGE_SIZE);
+
+ engine_heartbeat_disable(engine, &saved);
+ err = slice_semaphore_queue(engine, vma, count);
+ engine_heartbeat_enable(engine, saved);
+ if (err)
+ goto err_pin;
+
+ if (igt_flush_test(gt->i915)) {
+ err = -EIO;
+ goto err_pin;
+ }
+ }
+ }
+
+err_pin:
+ i915_vma_unpin(vma);
+err_map:
+ i915_gem_object_unpin_map(obj);
+err_obj:
+ i915_gem_object_put(obj);
+ return err;
+}
+
+static struct i915_request *nop_request(struct intel_engine_cs *engine)
+{
+ struct i915_request *rq;
+
+ rq = intel_engine_create_kernel_request(engine);
+ if (IS_ERR(rq))
+ return rq;
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ return rq;
+}
+
+static int wait_for_submit(struct intel_engine_cs *engine,
+ struct i915_request *rq,
+ unsigned long timeout)
+{
+ timeout += jiffies;
+ do {
+ cond_resched();
+ intel_engine_flush_submission(engine);
+ if (i915_request_is_active(rq))
+ return 0;
+ } while (time_before(jiffies, timeout));
+
+ return -ETIME;
+}
+
+static long timeslice_threshold(const struct intel_engine_cs *engine)
+{
+ return 2 * msecs_to_jiffies_timeout(timeslice(engine)) + 1;
+}
+
+static int live_timeslice_queue(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct drm_i915_gem_object *obj;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct i915_vma *vma;
+ void *vaddr;
+ int err = 0;
+
+ /*
+ * Make sure that even if ELSP[0] and ELSP[1] are filled with
+ * timeslicing between them disabled, we *do* enable timeslicing
+ * if the queue demands it. (Normally, we do not submit if
+ * ELSP[1] is already occupied, so must rely on timeslicing to
+ * eject ELSP[0] in favour of the queue.)
+ */
+ if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+ return 0;
+
+ obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_obj;
+ }
+
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto err_obj;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+ if (err)
+ goto err_map;
+
+ for_each_engine(engine, gt, id) {
+ struct i915_sched_attr attr = {
+ .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
+ };
+ struct i915_request *rq, *nop;
+ unsigned long saved;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ engine_heartbeat_disable(engine, &saved);
+ memset(vaddr, 0, PAGE_SIZE);
+
+ /* ELSP[0]: semaphore wait */
+ rq = semaphore_queue(engine, vma, 0);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_heartbeat;
+ }
+ engine->schedule(rq, &attr);
+ err = wait_for_submit(engine, rq, HZ / 2);
+ if (err) {
+ pr_err("%s: Timed out trying to submit semaphores\n",
+ engine->name);
+ goto err_rq;
+ }
+
+ /* ELSP[1]: nop request */
+ nop = nop_request(engine);
+ if (IS_ERR(nop)) {
+ err = PTR_ERR(nop);
+ goto err_rq;
+ }
+ err = wait_for_submit(engine, nop, HZ / 2);
+ i915_request_put(nop);
+ if (err) {
+ pr_err("%s: Timed out trying to submit nop\n",
+ engine->name);
+ goto err_rq;
+ }
+
+ GEM_BUG_ON(i915_request_completed(rq));
+ GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
+
+ /* Queue: semaphore signal, matching priority as semaphore */
+ err = release_queue(engine, vma, 1, effective_prio(rq));
+ if (err)
+ goto err_rq;
+
+ intel_engine_flush_submission(engine);
+ if (!READ_ONCE(engine->execlists.timer.expires) &&
+ !i915_request_completed(rq)) {
+ struct drm_printer p =
+ drm_info_printer(gt->i915->drm.dev);
+
+ GEM_TRACE_ERR("%s: Failed to enable timeslicing!\n",
+ engine->name);
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+ GEM_TRACE_DUMP();
+
+ memset(vaddr, 0xff, PAGE_SIZE);
+ err = -EINVAL;
+ }
+
+ /* Timeslice every jiffy, so within 2 we should signal */
+ if (i915_request_wait(rq, 0, timeslice_threshold(engine)) < 0) {
+ struct drm_printer p =
+ drm_info_printer(gt->i915->drm.dev);
+
+ pr_err("%s: Failed to timeslice into queue\n",
+ engine->name);
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+
+ memset(vaddr, 0xff, PAGE_SIZE);
+ err = -EIO;
+ }
+err_rq:
+ i915_request_put(rq);
+err_heartbeat:
+ engine_heartbeat_enable(engine, saved);
+ if (err)
+ break;
+ }
+
+ i915_vma_unpin(vma);
+err_map:
+ i915_gem_object_unpin_map(obj);
+err_obj:
+ i915_gem_object_put(obj);
return err;
}
static int live_busywait_preempt(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct i915_gem_context *ctx_hi, *ctx_lo;
struct intel_engine_cs *engine;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
int err = -ENOMEM;
u32 *map;
@@ -96,22 +798,19 @@ static int live_busywait_preempt(void *arg)
* preempt the busywaits used to synchronise between rings.
*/
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
- ctx_hi = kernel_context(i915);
+ ctx_hi = kernel_context(gt->i915);
if (!ctx_hi)
- goto err_unlock;
+ return -ENOMEM;
ctx_hi->sched.priority =
I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
- ctx_lo = kernel_context(i915);
+ ctx_lo = kernel_context(gt->i915);
if (!ctx_lo)
goto err_ctx_hi;
ctx_lo->sched.priority =
I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
- obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto err_ctx_lo;
@@ -123,7 +822,7 @@ static int live_busywait_preempt(void *arg)
goto err_obj;
}
- vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+ vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_map;
@@ -133,15 +832,18 @@ static int live_busywait_preempt(void *arg)
if (err)
goto err_map;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
struct i915_request *lo, *hi;
struct igt_live_test t;
u32 *cs;
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
if (!intel_engine_can_store_dword(engine))
continue;
- if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
err = -EIO;
goto err_vma;
}
@@ -184,15 +886,19 @@ static int live_busywait_preempt(void *arg)
*cs++ = 0;
intel_ring_advance(lo, cs);
+
+ i915_request_get(lo);
i915_request_add(lo);
if (wait_for(READ_ONCE(*map), 10)) {
+ i915_request_put(lo);
err = -ETIMEDOUT;
goto err_vma;
}
/* Low priority request should be busywaiting now */
if (i915_request_wait(lo, 0, 1) != -ETIME) {
+ i915_request_put(lo);
pr_err("%s: Busywaiting request did not!\n",
engine->name);
err = -EIO;
@@ -202,6 +908,7 @@ static int live_busywait_preempt(void *arg)
hi = igt_request_alloc(ctx_hi, engine);
if (IS_ERR(hi)) {
err = PTR_ERR(hi);
+ i915_request_put(lo);
goto err_vma;
}
@@ -209,6 +916,7 @@ static int live_busywait_preempt(void *arg)
if (IS_ERR(cs)) {
err = PTR_ERR(cs);
i915_request_add(hi);
+ i915_request_put(lo);
goto err_vma;
}
@@ -221,7 +929,7 @@ static int live_busywait_preempt(void *arg)
i915_request_add(hi);
if (i915_request_wait(lo, 0, HZ / 5) < 0) {
- struct drm_printer p = drm_info_printer(i915->drm.dev);
+ struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
pr_err("%s: Failed to preempt semaphore busywait!\n",
engine->name);
@@ -229,11 +937,13 @@ static int live_busywait_preempt(void *arg)
intel_engine_dump(engine, &p, "%s\n", engine->name);
GEM_TRACE_DUMP();
- i915_gem_set_wedged(i915);
+ i915_request_put(lo);
+ intel_gt_set_wedged(gt);
err = -EIO;
goto err_vma;
}
GEM_BUG_ON(READ_ONCE(*map));
+ i915_request_put(lo);
if (igt_live_test_end(&t)) {
err = -EIO;
@@ -252,65 +962,74 @@ err_ctx_lo:
kernel_context_close(ctx_lo);
err_ctx_hi:
kernel_context_close(ctx_hi);
-err_unlock:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
- err = -EIO;
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
return err;
}
+static struct i915_request *
+spinner_create_request(struct igt_spinner *spin,
+ struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ u32 arb)
+{
+ struct intel_context *ce;
+ struct i915_request *rq;
+
+ ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
+ if (IS_ERR(ce))
+ return ERR_CAST(ce);
+
+ rq = igt_spinner_create_request(spin, ce, arb);
+ intel_context_put(ce);
+ return rq;
+}
+
static int live_preempt(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct i915_gem_context *ctx_hi, *ctx_lo;
struct igt_spinner spin_hi, spin_lo;
struct intel_engine_cs *engine;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
int err = -ENOMEM;
- if (!HAS_LOGICAL_RING_PREEMPTION(i915))
+ if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
return 0;
- if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
+ if (!(gt->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
pr_err("Logical preemption supported, but not exposed\n");
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
- if (igt_spinner_init(&spin_hi, i915))
- goto err_unlock;
+ if (igt_spinner_init(&spin_hi, gt))
+ return -ENOMEM;
- if (igt_spinner_init(&spin_lo, i915))
+ if (igt_spinner_init(&spin_lo, gt))
goto err_spin_hi;
- ctx_hi = kernel_context(i915);
+ ctx_hi = kernel_context(gt->i915);
if (!ctx_hi)
goto err_spin_lo;
ctx_hi->sched.priority =
I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
- ctx_lo = kernel_context(i915);
+ ctx_lo = kernel_context(gt->i915);
if (!ctx_lo)
goto err_ctx_hi;
ctx_lo->sched.priority =
I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
struct igt_live_test t;
struct i915_request *rq;
if (!intel_engine_has_preemption(engine))
continue;
- if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
err = -EIO;
goto err_ctx_lo;
}
- rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
- MI_ARB_CHECK);
+ rq = spinner_create_request(&spin_lo, ctx_lo, engine,
+ MI_ARB_CHECK);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_ctx_lo;
@@ -320,13 +1039,13 @@ static int live_preempt(void *arg)
if (!igt_wait_for_spinner(&spin_lo, rq)) {
GEM_TRACE("lo spinner failed to start\n");
GEM_TRACE_DUMP();
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(gt);
err = -EIO;
goto err_ctx_lo;
}
- rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
- MI_ARB_CHECK);
+ rq = spinner_create_request(&spin_hi, ctx_hi, engine,
+ MI_ARB_CHECK);
if (IS_ERR(rq)) {
igt_spinner_end(&spin_lo);
err = PTR_ERR(rq);
@@ -337,7 +1056,7 @@ static int live_preempt(void *arg)
if (!igt_wait_for_spinner(&spin_hi, rq)) {
GEM_TRACE("hi spinner failed to start\n");
GEM_TRACE_DUMP();
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(gt);
err = -EIO;
goto err_ctx_lo;
}
@@ -360,58 +1079,53 @@ err_spin_lo:
igt_spinner_fini(&spin_lo);
err_spin_hi:
igt_spinner_fini(&spin_hi);
-err_unlock:
- igt_flush_test(i915, I915_WAIT_LOCKED);
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
return err;
}
static int live_late_preempt(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct i915_gem_context *ctx_hi, *ctx_lo;
struct igt_spinner spin_hi, spin_lo;
struct intel_engine_cs *engine;
struct i915_sched_attr attr = {};
enum intel_engine_id id;
- intel_wakeref_t wakeref;
int err = -ENOMEM;
- if (!HAS_LOGICAL_RING_PREEMPTION(i915))
+ if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
return 0;
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
- if (igt_spinner_init(&spin_hi, i915))
- goto err_unlock;
+ if (igt_spinner_init(&spin_hi, gt))
+ return -ENOMEM;
- if (igt_spinner_init(&spin_lo, i915))
+ if (igt_spinner_init(&spin_lo, gt))
goto err_spin_hi;
- ctx_hi = kernel_context(i915);
+ ctx_hi = kernel_context(gt->i915);
if (!ctx_hi)
goto err_spin_lo;
- ctx_lo = kernel_context(i915);
+ ctx_lo = kernel_context(gt->i915);
if (!ctx_lo)
goto err_ctx_hi;
- for_each_engine(engine, i915, id) {
+ /* Make sure ctx_lo stays before ctx_hi until we trigger preemption. */
+ ctx_lo->sched.priority = I915_USER_PRIORITY(1);
+
+ for_each_engine(engine, gt, id) {
struct igt_live_test t;
struct i915_request *rq;
if (!intel_engine_has_preemption(engine))
continue;
- if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
err = -EIO;
goto err_ctx_lo;
}
- rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
- MI_ARB_CHECK);
+ rq = spinner_create_request(&spin_lo, ctx_lo, engine,
+ MI_ARB_CHECK);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_ctx_lo;
@@ -423,8 +1137,8 @@ static int live_late_preempt(void *arg)
goto err_wedged;
}
- rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
- MI_NOOP);
+ rq = spinner_create_request(&spin_hi, ctx_hi, engine,
+ MI_NOOP);
if (IS_ERR(rq)) {
igt_spinner_end(&spin_lo);
err = PTR_ERR(rq);
@@ -464,16 +1178,12 @@ err_spin_lo:
igt_spinner_fini(&spin_lo);
err_spin_hi:
igt_spinner_fini(&spin_hi);
-err_unlock:
- igt_flush_test(i915, I915_WAIT_LOCKED);
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
return err;
err_wedged:
igt_spinner_end(&spin_hi);
igt_spinner_end(&spin_lo);
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(gt);
err = -EIO;
goto err_ctx_lo;
}
@@ -483,14 +1193,13 @@ struct preempt_client {
struct i915_gem_context *ctx;
};
-static int preempt_client_init(struct drm_i915_private *i915,
- struct preempt_client *c)
+static int preempt_client_init(struct intel_gt *gt, struct preempt_client *c)
{
- c->ctx = kernel_context(i915);
+ c->ctx = kernel_context(gt->i915);
if (!c->ctx)
return -ENOMEM;
- if (igt_spinner_init(&c->spin, i915))
+ if (igt_spinner_init(&c->spin, gt))
goto err_ctx;
return 0;
@@ -506,16 +1215,435 @@ static void preempt_client_fini(struct preempt_client *c)
kernel_context_close(c->ctx);
}
+static int live_nopreempt(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ struct preempt_client a, b;
+ enum intel_engine_id id;
+ int err = -ENOMEM;
+
+ /*
+ * Verify that we can disable preemption for an individual request
+ * that may be being observed and not want to be interrupted.
+ */
+
+ if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
+ return 0;
+
+ if (preempt_client_init(gt, &a))
+ return -ENOMEM;
+ if (preempt_client_init(gt, &b))
+ goto err_client_a;
+ b.ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
+
+ for_each_engine(engine, gt, id) {
+ struct i915_request *rq_a, *rq_b;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ engine->execlists.preempt_hang.count = 0;
+
+ rq_a = spinner_create_request(&a.spin,
+ a.ctx, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq_a)) {
+ err = PTR_ERR(rq_a);
+ goto err_client_b;
+ }
+
+ /* Low priority client, but unpreemptable! */
+ __set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq_a->fence.flags);
+
+ i915_request_add(rq_a);
+ if (!igt_wait_for_spinner(&a.spin, rq_a)) {
+ pr_err("First client failed to start\n");
+ goto err_wedged;
+ }
+
+ rq_b = spinner_create_request(&b.spin,
+ b.ctx, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq_b)) {
+ err = PTR_ERR(rq_b);
+ goto err_client_b;
+ }
+
+ i915_request_add(rq_b);
+
+ /* B is much more important than A! (But A is unpreemptable.) */
+ GEM_BUG_ON(rq_prio(rq_b) <= rq_prio(rq_a));
+
+ /* Wait long enough for preemption and timeslicing */
+ if (igt_wait_for_spinner(&b.spin, rq_b)) {
+ pr_err("Second client started too early!\n");
+ goto err_wedged;
+ }
+
+ igt_spinner_end(&a.spin);
+
+ if (!igt_wait_for_spinner(&b.spin, rq_b)) {
+ pr_err("Second client failed to start\n");
+ goto err_wedged;
+ }
+
+ igt_spinner_end(&b.spin);
+
+ if (engine->execlists.preempt_hang.count) {
+ pr_err("Preemption recorded x%d; should have been suppressed!\n",
+ engine->execlists.preempt_hang.count);
+ err = -EINVAL;
+ goto err_wedged;
+ }
+
+ if (igt_flush_test(gt->i915))
+ goto err_wedged;
+ }
+
+ err = 0;
+err_client_b:
+ preempt_client_fini(&b);
+err_client_a:
+ preempt_client_fini(&a);
+ return err;
+
+err_wedged:
+ igt_spinner_end(&b.spin);
+ igt_spinner_end(&a.spin);
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto err_client_b;
+}
+
+struct live_preempt_cancel {
+ struct intel_engine_cs *engine;
+ struct preempt_client a, b;
+};
+
+static int __cancel_active0(struct live_preempt_cancel *arg)
+{
+ struct i915_request *rq;
+ struct igt_live_test t;
+ int err;
+
+ /* Preempt cancel of ELSP0 */
+ GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
+ if (igt_live_test_begin(&t, arg->engine->i915,
+ __func__, arg->engine->name))
+ return -EIO;
+
+ rq = spinner_create_request(&arg->a.spin,
+ arg->a.ctx, arg->engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ clear_bit(CONTEXT_BANNED, &rq->context->flags);
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
+ err = -EIO;
+ goto out;
+ }
+
+ intel_context_set_banned(rq->context);
+ err = intel_engine_pulse(arg->engine);
+ if (err)
+ goto out;
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ err = -EIO;
+ goto out;
+ }
+
+ if (rq->fence.error != -EIO) {
+ pr_err("Cancelled inflight0 request did not report -EIO\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+out:
+ i915_request_put(rq);
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ return err;
+}
+
+static int __cancel_active1(struct live_preempt_cancel *arg)
+{
+ struct i915_request *rq[2] = {};
+ struct igt_live_test t;
+ int err;
+
+ /* Preempt cancel of ELSP1 */
+ GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
+ if (igt_live_test_begin(&t, arg->engine->i915,
+ __func__, arg->engine->name))
+ return -EIO;
+
+ rq[0] = spinner_create_request(&arg->a.spin,
+ arg->a.ctx, arg->engine,
+ MI_NOOP); /* no preemption */
+ if (IS_ERR(rq[0]))
+ return PTR_ERR(rq[0]);
+
+ clear_bit(CONTEXT_BANNED, &rq[0]->context->flags);
+ i915_request_get(rq[0]);
+ i915_request_add(rq[0]);
+ if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) {
+ err = -EIO;
+ goto out;
+ }
+
+ rq[1] = spinner_create_request(&arg->b.spin,
+ arg->b.ctx, arg->engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq[1])) {
+ err = PTR_ERR(rq[1]);
+ goto out;
+ }
+
+ clear_bit(CONTEXT_BANNED, &rq[1]->context->flags);
+ i915_request_get(rq[1]);
+ err = i915_request_await_dma_fence(rq[1], &rq[0]->fence);
+ i915_request_add(rq[1]);
+ if (err)
+ goto out;
+
+ intel_context_set_banned(rq[1]->context);
+ err = intel_engine_pulse(arg->engine);
+ if (err)
+ goto out;
+
+ igt_spinner_end(&arg->a.spin);
+ if (i915_request_wait(rq[1], 0, HZ / 5) < 0) {
+ err = -EIO;
+ goto out;
+ }
+
+ if (rq[0]->fence.error != 0) {
+ pr_err("Normal inflight0 request did not complete\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (rq[1]->fence.error != -EIO) {
+ pr_err("Cancelled inflight1 request did not report -EIO\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+out:
+ i915_request_put(rq[1]);
+ i915_request_put(rq[0]);
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ return err;
+}
+
+static int __cancel_queued(struct live_preempt_cancel *arg)
+{
+ struct i915_request *rq[3] = {};
+ struct igt_live_test t;
+ int err;
+
+ /* Full ELSP and one in the wings */
+ GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
+ if (igt_live_test_begin(&t, arg->engine->i915,
+ __func__, arg->engine->name))
+ return -EIO;
+
+ rq[0] = spinner_create_request(&arg->a.spin,
+ arg->a.ctx, arg->engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq[0]))
+ return PTR_ERR(rq[0]);
+
+ clear_bit(CONTEXT_BANNED, &rq[0]->context->flags);
+ i915_request_get(rq[0]);
+ i915_request_add(rq[0]);
+ if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) {
+ err = -EIO;
+ goto out;
+ }
+
+ rq[1] = igt_request_alloc(arg->b.ctx, arg->engine);
+ if (IS_ERR(rq[1])) {
+ err = PTR_ERR(rq[1]);
+ goto out;
+ }
+
+ clear_bit(CONTEXT_BANNED, &rq[1]->context->flags);
+ i915_request_get(rq[1]);
+ err = i915_request_await_dma_fence(rq[1], &rq[0]->fence);
+ i915_request_add(rq[1]);
+ if (err)
+ goto out;
+
+ rq[2] = spinner_create_request(&arg->b.spin,
+ arg->a.ctx, arg->engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq[2])) {
+ err = PTR_ERR(rq[2]);
+ goto out;
+ }
+
+ i915_request_get(rq[2]);
+ err = i915_request_await_dma_fence(rq[2], &rq[1]->fence);
+ i915_request_add(rq[2]);
+ if (err)
+ goto out;
+
+ intel_context_set_banned(rq[2]->context);
+ err = intel_engine_pulse(arg->engine);
+ if (err)
+ goto out;
+
+ if (i915_request_wait(rq[2], 0, HZ / 5) < 0) {
+ err = -EIO;
+ goto out;
+ }
+
+ if (rq[0]->fence.error != -EIO) {
+ pr_err("Cancelled inflight0 request did not report -EIO\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (rq[1]->fence.error != 0) {
+ pr_err("Normal inflight1 request did not complete\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (rq[2]->fence.error != -EIO) {
+ pr_err("Cancelled queued request did not report -EIO\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+out:
+ i915_request_put(rq[2]);
+ i915_request_put(rq[1]);
+ i915_request_put(rq[0]);
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ return err;
+}
+
+static int __cancel_hostile(struct live_preempt_cancel *arg)
+{
+ struct i915_request *rq;
+ int err;
+
+ /* Preempt cancel non-preemptible spinner in ELSP0 */
+ if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT))
+ return 0;
+
+ GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
+ rq = spinner_create_request(&arg->a.spin,
+ arg->a.ctx, arg->engine,
+ MI_NOOP); /* preemption disabled */
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ clear_bit(CONTEXT_BANNED, &rq->context->flags);
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
+ err = -EIO;
+ goto out;
+ }
+
+ intel_context_set_banned(rq->context);
+ err = intel_engine_pulse(arg->engine); /* force reset */
+ if (err)
+ goto out;
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ err = -EIO;
+ goto out;
+ }
+
+ if (rq->fence.error != -EIO) {
+ pr_err("Cancelled inflight0 request did not report -EIO\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+out:
+ i915_request_put(rq);
+ if (igt_flush_test(arg->engine->i915))
+ err = -EIO;
+ return err;
+}
+
+static int live_preempt_cancel(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct live_preempt_cancel data;
+ enum intel_engine_id id;
+ int err = -ENOMEM;
+
+ /*
+ * To cancel an inflight context, we need to first remove it from the
+ * GPU. That sounds like preemption! Plus a little bit of bookkeeping.
+ */
+
+ if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
+ return 0;
+
+ if (preempt_client_init(gt, &data.a))
+ return -ENOMEM;
+ if (preempt_client_init(gt, &data.b))
+ goto err_client_a;
+
+ for_each_engine(data.engine, gt, id) {
+ if (!intel_engine_has_preemption(data.engine))
+ continue;
+
+ err = __cancel_active0(&data);
+ if (err)
+ goto err_wedged;
+
+ err = __cancel_active1(&data);
+ if (err)
+ goto err_wedged;
+
+ err = __cancel_queued(&data);
+ if (err)
+ goto err_wedged;
+
+ err = __cancel_hostile(&data);
+ if (err)
+ goto err_wedged;
+ }
+
+ err = 0;
+err_client_b:
+ preempt_client_fini(&data.b);
+err_client_a:
+ preempt_client_fini(&data.a);
+ return err;
+
+err_wedged:
+ GEM_TRACE_DUMP();
+ igt_spinner_end(&data.b.spin);
+ igt_spinner_end(&data.a.spin);
+ intel_gt_set_wedged(gt);
+ goto err_client_b;
+}
+
static int live_suppress_self_preempt(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
struct i915_sched_attr attr = {
.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX)
};
struct preempt_client a, b;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
int err = -ENOMEM;
/*
@@ -525,49 +1653,58 @@ static int live_suppress_self_preempt(void *arg)
* completion event.
*/
- if (!HAS_LOGICAL_RING_PREEMPTION(i915))
+ if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
return 0;
- if (USES_GUC_SUBMISSION(i915))
+ if (USES_GUC_SUBMISSION(gt->i915))
return 0; /* presume black blox */
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ if (intel_vgpu_active(gt->i915))
+ return 0; /* GVT forces single port & request submission */
- if (preempt_client_init(i915, &a))
- goto err_unlock;
- if (preempt_client_init(i915, &b))
+ if (preempt_client_init(gt, &a))
+ return -ENOMEM;
+ if (preempt_client_init(gt, &b))
goto err_client_a;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
struct i915_request *rq_a, *rq_b;
int depth;
if (!intel_engine_has_preemption(engine))
continue;
+ if (igt_flush_test(gt->i915))
+ goto err_wedged;
+
+ intel_engine_pm_get(engine);
engine->execlists.preempt_hang.count = 0;
- rq_a = igt_spinner_create_request(&a.spin,
- a.ctx, engine,
- MI_NOOP);
+ rq_a = spinner_create_request(&a.spin,
+ a.ctx, engine,
+ MI_NOOP);
if (IS_ERR(rq_a)) {
err = PTR_ERR(rq_a);
+ intel_engine_pm_put(engine);
goto err_client_b;
}
i915_request_add(rq_a);
if (!igt_wait_for_spinner(&a.spin, rq_a)) {
pr_err("First client failed to start\n");
+ intel_engine_pm_put(engine);
goto err_wedged;
}
+ /* Keep postponing the timer to avoid premature slicing */
+ mod_timer(&engine->execlists.timer, jiffies + HZ);
for (depth = 0; depth < 8; depth++) {
- rq_b = igt_spinner_create_request(&b.spin,
- b.ctx, engine,
- MI_NOOP);
+ rq_b = spinner_create_request(&b.spin,
+ b.ctx, engine,
+ MI_NOOP);
if (IS_ERR(rq_b)) {
err = PTR_ERR(rq_b);
+ intel_engine_pm_put(engine);
goto err_client_b;
}
i915_request_add(rq_b);
@@ -578,6 +1715,7 @@ static int live_suppress_self_preempt(void *arg)
if (!igt_wait_for_spinner(&b.spin, rq_b)) {
pr_err("Second client failed to start\n");
+ intel_engine_pm_put(engine);
goto err_wedged;
}
@@ -587,14 +1725,17 @@ static int live_suppress_self_preempt(void *arg)
igt_spinner_end(&a.spin);
if (engine->execlists.preempt_hang.count) {
- pr_err("Preemption recorded x%d, depth %d; should have been suppressed!\n",
+ pr_err("Preemption on %s recorded x%d, depth %d; should have been suppressed!\n",
+ engine->name,
engine->execlists.preempt_hang.count,
depth);
+ intel_engine_pm_put(engine);
err = -EINVAL;
goto err_client_b;
}
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ intel_engine_pm_put(engine);
+ if (igt_flush_test(gt->i915))
goto err_wedged;
}
@@ -603,17 +1744,12 @@ err_client_b:
preempt_client_fini(&b);
err_client_a:
preempt_client_fini(&a);
-err_unlock:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
- err = -EIO;
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
return err;
err_wedged:
igt_spinner_end(&b.spin);
igt_spinner_end(&a.spin);
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(gt);
err = -EIO;
goto err_client_b;
}
@@ -632,9 +1768,13 @@ static struct i915_request *dummy_request(struct intel_engine_cs *engine)
if (!rq)
return NULL;
- INIT_LIST_HEAD(&rq->active_list);
rq->engine = engine;
+ spin_lock_init(&rq->lock);
+ INIT_LIST_HEAD(&rq->fence.cb_list);
+ rq->fence.lock = &rq->lock;
+ rq->fence.ops = &i915_fence_ops;
+
i915_sched_node_init(&rq->sched);
/* mark this request as permanently incomplete */
@@ -646,6 +1786,10 @@ static struct i915_request *dummy_request(struct intel_engine_cs *engine)
i915_sw_fence_init(&rq->submit, dummy_notify);
set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
+ spin_lock_init(&rq->lock);
+ rq->fence.lock = &rq->lock;
+ INIT_LIST_HEAD(&rq->fence.cb_list);
+
return rq;
}
@@ -665,11 +1809,11 @@ static void dummy_request_free(struct i915_request *dummy)
static int live_suppress_wait_preempt(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct preempt_client client[4];
+ struct i915_request *rq[ARRAY_SIZE(client)] = {};
struct intel_engine_cs *engine;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
int err = -ENOMEM;
int i;
@@ -679,22 +1823,19 @@ static int live_suppress_wait_preempt(void *arg)
* not needlessly generate preempt-to-idle cycles.
*/
- if (!HAS_LOGICAL_RING_PREEMPTION(i915))
+ if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
return 0;
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
- if (preempt_client_init(i915, &client[0])) /* ELSP[0] */
- goto err_unlock;
- if (preempt_client_init(i915, &client[1])) /* ELSP[1] */
+ if (preempt_client_init(gt, &client[0])) /* ELSP[0] */
+ return -ENOMEM;
+ if (preempt_client_init(gt, &client[1])) /* ELSP[1] */
goto err_client_0;
- if (preempt_client_init(i915, &client[2])) /* head of queue */
+ if (preempt_client_init(gt, &client[2])) /* head of queue */
goto err_client_1;
- if (preempt_client_init(i915, &client[3])) /* bystander */
+ if (preempt_client_init(gt, &client[3])) /* bystander */
goto err_client_2;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
int depth;
if (!intel_engine_has_preemption(engine))
@@ -704,7 +1845,6 @@ static int live_suppress_wait_preempt(void *arg)
continue;
for (depth = 0; depth < ARRAY_SIZE(client); depth++) {
- struct i915_request *rq[ARRAY_SIZE(client)];
struct i915_request *dummy;
engine->execlists.preempt_hang.count = 0;
@@ -714,18 +1854,22 @@ static int live_suppress_wait_preempt(void *arg)
goto err_client_3;
for (i = 0; i < ARRAY_SIZE(client); i++) {
- rq[i] = igt_spinner_create_request(&client[i].spin,
- client[i].ctx, engine,
- MI_NOOP);
- if (IS_ERR(rq[i])) {
- err = PTR_ERR(rq[i]);
+ struct i915_request *this;
+
+ this = spinner_create_request(&client[i].spin,
+ client[i].ctx, engine,
+ MI_NOOP);
+ if (IS_ERR(this)) {
+ err = PTR_ERR(this);
goto err_wedged;
}
/* Disable NEWCLIENT promotion */
- __i915_active_request_set(&rq[i]->timeline->last_request,
- dummy);
- i915_request_add(rq[i]);
+ __i915_active_fence_set(&i915_request_timeline(this)->last_request,
+ &dummy->fence);
+
+ rq[i] = i915_request_get(this);
+ i915_request_add(this);
}
dummy_request_free(dummy);
@@ -746,10 +1890,13 @@ static int live_suppress_wait_preempt(void *arg)
goto err_wedged;
}
- for (i = 0; i < ARRAY_SIZE(client); i++)
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
igt_spinner_end(&client[i].spin);
+ i915_request_put(rq[i]);
+ rq[i] = NULL;
+ }
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(gt->i915))
goto err_wedged;
if (engine->execlists.preempt_hang.count) {
@@ -772,28 +1919,24 @@ err_client_1:
preempt_client_fini(&client[1]);
err_client_0:
preempt_client_fini(&client[0]);
-err_unlock:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
- err = -EIO;
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
return err;
err_wedged:
- for (i = 0; i < ARRAY_SIZE(client); i++)
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
igt_spinner_end(&client[i].spin);
- i915_gem_set_wedged(i915);
+ i915_request_put(rq[i]);
+ }
+ intel_gt_set_wedged(gt);
err = -EIO;
goto err_client_3;
}
static int live_chain_preempt(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
struct preempt_client hi, lo;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
int err = -ENOMEM;
/*
@@ -802,19 +1945,16 @@ static int live_chain_preempt(void *arg)
* the previously submitted spinner in B.
*/
- if (!HAS_LOGICAL_RING_PREEMPTION(i915))
+ if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
return 0;
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
- if (preempt_client_init(i915, &hi))
- goto err_unlock;
+ if (preempt_client_init(gt, &hi))
+ return -ENOMEM;
- if (preempt_client_init(i915, &lo))
+ if (preempt_client_init(gt, &lo))
goto err_client_hi;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
struct i915_sched_attr attr = {
.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
};
@@ -825,11 +1965,13 @@ static int live_chain_preempt(void *arg)
if (!intel_engine_has_preemption(engine))
continue;
- rq = igt_spinner_create_request(&lo.spin,
- lo.ctx, engine,
- MI_ARB_CHECK);
+ rq = spinner_create_request(&lo.spin,
+ lo.ctx, engine,
+ MI_ARB_CHECK);
if (IS_ERR(rq))
goto err_wedged;
+
+ i915_request_get(rq);
i915_request_add(rq);
ring_size = rq->wa_tail - rq->head;
@@ -842,27 +1984,29 @@ static int live_chain_preempt(void *arg)
igt_spinner_end(&lo.spin);
if (i915_request_wait(rq, 0, HZ / 2) < 0) {
pr_err("Timed out waiting to flush %s\n", engine->name);
+ i915_request_put(rq);
goto err_wedged;
}
+ i915_request_put(rq);
- if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
err = -EIO;
goto err_wedged;
}
for_each_prime_number_from(count, 1, ring_size) {
- rq = igt_spinner_create_request(&hi.spin,
- hi.ctx, engine,
- MI_ARB_CHECK);
+ rq = spinner_create_request(&hi.spin,
+ hi.ctx, engine,
+ MI_ARB_CHECK);
if (IS_ERR(rq))
goto err_wedged;
i915_request_add(rq);
if (!igt_wait_for_spinner(&hi.spin, rq))
goto err_wedged;
- rq = igt_spinner_create_request(&lo.spin,
- lo.ctx, engine,
- MI_ARB_CHECK);
+ rq = spinner_create_request(&lo.spin,
+ lo.ctx, engine,
+ MI_ARB_CHECK);
if (IS_ERR(rq))
goto err_wedged;
i915_request_add(rq);
@@ -877,36 +2021,46 @@ static int live_chain_preempt(void *arg)
rq = igt_request_alloc(hi.ctx, engine);
if (IS_ERR(rq))
goto err_wedged;
+
+ i915_request_get(rq);
i915_request_add(rq);
engine->schedule(rq, &attr);
igt_spinner_end(&hi.spin);
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
struct drm_printer p =
- drm_info_printer(i915->drm.dev);
+ drm_info_printer(gt->i915->drm.dev);
pr_err("Failed to preempt over chain of %d\n",
count);
intel_engine_dump(engine, &p,
"%s\n", engine->name);
+ i915_request_put(rq);
goto err_wedged;
}
igt_spinner_end(&lo.spin);
+ i915_request_put(rq);
rq = igt_request_alloc(lo.ctx, engine);
if (IS_ERR(rq))
goto err_wedged;
+
+ i915_request_get(rq);
i915_request_add(rq);
+
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
struct drm_printer p =
- drm_info_printer(i915->drm.dev);
+ drm_info_printer(gt->i915->drm.dev);
pr_err("Failed to flush low priority chain of %d requests\n",
count);
intel_engine_dump(engine, &p,
"%s\n", engine->name);
+
+ i915_request_put(rq);
goto err_wedged;
}
+ i915_request_put(rq);
}
if (igt_live_test_end(&t)) {
@@ -920,66 +2074,252 @@ err_client_lo:
preempt_client_fini(&lo);
err_client_hi:
preempt_client_fini(&hi);
-err_unlock:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
- err = -EIO;
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
return err;
err_wedged:
igt_spinner_end(&hi.spin);
igt_spinner_end(&lo.spin);
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(gt);
err = -EIO;
goto err_client_lo;
}
+static int create_gang(struct intel_engine_cs *engine,
+ struct i915_request **prev)
+{
+ struct drm_i915_gem_object *obj;
+ struct intel_context *ce;
+ struct i915_request *rq;
+ struct i915_vma *vma;
+ u32 *cs;
+ int err;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ obj = i915_gem_object_create_internal(engine->i915, 4096);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto err_ce;
+ }
+
+ vma = i915_vma_instance(obj, ce->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_obj;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto err_obj;
+
+ cs = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(cs))
+ goto err_obj;
+
+ /* Semaphore target: spin until zero */
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_EQ_SDD;
+ *cs++ = 0;
+ *cs++ = lower_32_bits(vma->node.start);
+ *cs++ = upper_32_bits(vma->node.start);
+
+ if (*prev) {
+ u64 offset = (*prev)->batch->node.start;
+
+ /* Terminate the spinner in the next lower priority batch. */
+ *cs++ = MI_STORE_DWORD_IMM_GEN4;
+ *cs++ = lower_32_bits(offset);
+ *cs++ = upper_32_bits(offset);
+ *cs++ = 0;
+ }
+
+ *cs++ = MI_BATCH_BUFFER_END;
+ i915_gem_object_flush_map(obj);
+ i915_gem_object_unpin_map(obj);
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ goto err_obj;
+
+ rq->batch = vma;
+ i915_request_get(rq);
+
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj, false);
+ if (!err)
+ err = i915_vma_move_to_active(vma, rq, 0);
+ if (!err)
+ err = rq->engine->emit_bb_start(rq,
+ vma->node.start,
+ PAGE_SIZE, 0);
+ i915_vma_unlock(vma);
+ i915_request_add(rq);
+ if (err)
+ goto err_rq;
+
+ i915_gem_object_put(obj);
+ intel_context_put(ce);
+
+ rq->client_link.next = &(*prev)->client_link;
+ *prev = rq;
+ return 0;
+
+err_rq:
+ i915_request_put(rq);
+err_obj:
+ i915_gem_object_put(obj);
+err_ce:
+ intel_context_put(ce);
+ return err;
+}
+
+static int live_preempt_gang(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
+ return 0;
+
+ /*
+ * Build as long a chain of preempters as we can, with each
+ * request higher priority than the last. Once we are ready, we release
+ * the last batch which then precolates down the chain, each releasing
+ * the next oldest in turn. The intent is to simply push as hard as we
+ * can with the number of preemptions, trying to exceed narrow HW
+ * limits. At a minimum, we insist that we can sort all the user
+ * high priority levels into execution order.
+ */
+
+ for_each_engine(engine, gt, id) {
+ struct i915_request *rq = NULL;
+ struct igt_live_test t;
+ IGT_TIMEOUT(end_time);
+ int prio = 0;
+ int err = 0;
+ u32 *cs;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name))
+ return -EIO;
+
+ do {
+ struct i915_sched_attr attr = {
+ .priority = I915_USER_PRIORITY(prio++),
+ };
+
+ err = create_gang(engine, &rq);
+ if (err)
+ break;
+
+ /* Submit each spinner at increasing priority */
+ engine->schedule(rq, &attr);
+
+ if (prio <= I915_PRIORITY_MAX)
+ continue;
+
+ if (prio > (INT_MAX >> I915_USER_PRIORITY_SHIFT))
+ break;
+
+ if (__igt_timeout(end_time, NULL))
+ break;
+ } while (1);
+ pr_debug("%s: Preempt chain of %d requests\n",
+ engine->name, prio);
+
+ /*
+ * Such that the last spinner is the highest priority and
+ * should execute first. When that spinner completes,
+ * it will terminate the next lowest spinner until there
+ * are no more spinners and the gang is complete.
+ */
+ cs = i915_gem_object_pin_map(rq->batch->obj, I915_MAP_WC);
+ if (!IS_ERR(cs)) {
+ *cs = 0;
+ i915_gem_object_unpin_map(rq->batch->obj);
+ } else {
+ err = PTR_ERR(cs);
+ intel_gt_set_wedged(gt);
+ }
+
+ while (rq) { /* wait for each rq from highest to lowest prio */
+ struct i915_request *n =
+ list_next_entry(rq, client_link);
+
+ if (err == 0 && i915_request_wait(rq, 0, HZ / 5) < 0) {
+ struct drm_printer p =
+ drm_info_printer(engine->i915->drm.dev);
+
+ pr_err("Failed to flush chain of %d requests, at %d\n",
+ prio, rq_prio(rq) >> I915_USER_PRIORITY_SHIFT);
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+
+ err = -ETIME;
+ }
+
+ i915_request_put(rq);
+ rq = n;
+ }
+
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static int live_preempt_hang(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct i915_gem_context *ctx_hi, *ctx_lo;
struct igt_spinner spin_hi, spin_lo;
struct intel_engine_cs *engine;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
int err = -ENOMEM;
- if (!HAS_LOGICAL_RING_PREEMPTION(i915))
+ if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
return 0;
- if (!intel_has_reset_engine(i915))
+ if (!intel_has_reset_engine(gt))
return 0;
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
- if (igt_spinner_init(&spin_hi, i915))
- goto err_unlock;
+ if (igt_spinner_init(&spin_hi, gt))
+ return -ENOMEM;
- if (igt_spinner_init(&spin_lo, i915))
+ if (igt_spinner_init(&spin_lo, gt))
goto err_spin_hi;
- ctx_hi = kernel_context(i915);
+ ctx_hi = kernel_context(gt->i915);
if (!ctx_hi)
goto err_spin_lo;
ctx_hi->sched.priority =
I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
- ctx_lo = kernel_context(i915);
+ ctx_lo = kernel_context(gt->i915);
if (!ctx_lo)
goto err_ctx_hi;
ctx_lo->sched.priority =
I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
struct i915_request *rq;
if (!intel_engine_has_preemption(engine))
continue;
- rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
- MI_ARB_CHECK);
+ rq = spinner_create_request(&spin_lo, ctx_lo, engine,
+ MI_ARB_CHECK);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_ctx_lo;
@@ -989,13 +2329,13 @@ static int live_preempt_hang(void *arg)
if (!igt_wait_for_spinner(&spin_lo, rq)) {
GEM_TRACE("lo spinner failed to start\n");
GEM_TRACE_DUMP();
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(gt);
err = -EIO;
goto err_ctx_lo;
}
- rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
- MI_ARB_CHECK);
+ rq = spinner_create_request(&spin_hi, ctx_hi, engine,
+ MI_ARB_CHECK);
if (IS_ERR(rq)) {
igt_spinner_end(&spin_lo);
err = PTR_ERR(rq);
@@ -1011,28 +2351,28 @@ static int live_preempt_hang(void *arg)
HZ / 10)) {
pr_err("Preemption did not occur within timeout!");
GEM_TRACE_DUMP();
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(gt);
err = -EIO;
goto err_ctx_lo;
}
- set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
- i915_reset_engine(engine, NULL);
- clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
+ set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
+ intel_engine_reset(engine, NULL);
+ clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
engine->execlists.preempt_hang.inject_hang = false;
if (!igt_wait_for_spinner(&spin_hi, rq)) {
GEM_TRACE("hi spinner failed to start\n");
GEM_TRACE_DUMP();
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(gt);
err = -EIO;
goto err_ctx_lo;
}
igt_spinner_end(&spin_hi);
igt_spinner_end(&spin_lo);
- if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
+ if (igt_flush_test(gt->i915)) {
err = -EIO;
goto err_ctx_lo;
}
@@ -1047,10 +2387,105 @@ err_spin_lo:
igt_spinner_fini(&spin_lo);
err_spin_hi:
igt_spinner_fini(&spin_hi);
-err_unlock:
- igt_flush_test(i915, I915_WAIT_LOCKED);
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+}
+
+static int live_preempt_timeout(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct i915_gem_context *ctx_hi, *ctx_lo;
+ struct igt_spinner spin_lo;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = -ENOMEM;
+
+ /*
+ * Check that we force preemption to occur by cancelling the previous
+ * context if it refuses to yield the GPU.
+ */
+ if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT))
+ return 0;
+
+ if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
+ return 0;
+
+ if (!intel_has_reset_engine(gt))
+ return 0;
+
+ if (igt_spinner_init(&spin_lo, gt))
+ return -ENOMEM;
+
+ ctx_hi = kernel_context(gt->i915);
+ if (!ctx_hi)
+ goto err_spin_lo;
+ ctx_hi->sched.priority =
+ I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
+
+ ctx_lo = kernel_context(gt->i915);
+ if (!ctx_lo)
+ goto err_ctx_hi;
+ ctx_lo->sched.priority =
+ I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
+
+ for_each_engine(engine, gt, id) {
+ unsigned long saved_timeout;
+ struct i915_request *rq;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ rq = spinner_create_request(&spin_lo, ctx_lo, engine,
+ MI_NOOP); /* preemption disabled */
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_ctx_lo;
+ }
+
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&spin_lo, rq)) {
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto err_ctx_lo;
+ }
+
+ rq = igt_request_alloc(ctx_hi, engine);
+ if (IS_ERR(rq)) {
+ igt_spinner_end(&spin_lo);
+ err = PTR_ERR(rq);
+ goto err_ctx_lo;
+ }
+
+ /* Flush the previous CS ack before changing timeouts */
+ while (READ_ONCE(engine->execlists.pending[0]))
+ cpu_relax();
+
+ saved_timeout = engine->props.preempt_timeout_ms;
+ engine->props.preempt_timeout_ms = 1; /* in ms, -> 1 jiffie */
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ intel_engine_flush_submission(engine);
+ engine->props.preempt_timeout_ms = saved_timeout;
+
+ if (i915_request_wait(rq, 0, HZ / 10) < 0) {
+ intel_gt_set_wedged(gt);
+ i915_request_put(rq);
+ err = -ETIME;
+ goto err_ctx_lo;
+ }
+
+ igt_spinner_end(&spin_lo);
+ i915_request_put(rq);
+ }
+
+ err = 0;
+err_ctx_lo:
+ kernel_context_close(ctx_lo);
+err_ctx_hi:
+ kernel_context_close(ctx_hi);
+err_spin_lo:
+ igt_spinner_fini(&spin_lo);
return err;
}
@@ -1065,7 +2500,7 @@ static int random_priority(struct rnd_state *rnd)
}
struct preempt_smoke {
- struct drm_i915_private *i915;
+ struct intel_gt *gt;
struct i915_gem_context **contexts;
struct intel_engine_cs *engine;
struct drm_i915_gem_object *batch;
@@ -1089,7 +2524,11 @@ static int smoke_submit(struct preempt_smoke *smoke,
int err = 0;
if (batch) {
- vma = i915_vma_instance(batch, ctx->vm, NULL);
+ struct i915_address_space *vm;
+
+ vm = i915_gem_context_get_vm_rcu(ctx);
+ vma = i915_vma_instance(batch, vm, NULL);
+ i915_vm_put(vm);
if (IS_ERR(vma))
return PTR_ERR(vma);
@@ -1108,11 +2547,13 @@ static int smoke_submit(struct preempt_smoke *smoke,
if (vma) {
i915_vma_lock(vma);
- err = rq->engine->emit_bb_start(rq,
- vma->node.start,
- PAGE_SIZE, 0);
+ err = i915_request_await_object(rq, vma->obj, false);
if (!err)
err = i915_vma_move_to_active(vma, rq, 0);
+ if (!err)
+ err = rq->engine->emit_bb_start(rq,
+ vma->node.start,
+ PAGE_SIZE, 0);
i915_vma_unlock(vma);
}
@@ -1136,11 +2577,9 @@ static int smoke_crescendo_thread(void *arg)
struct i915_gem_context *ctx = smoke_context(smoke);
int err;
- mutex_lock(&smoke->i915->drm.struct_mutex);
err = smoke_submit(smoke,
ctx, count % I915_PRIORITY_MAX,
smoke->batch);
- mutex_unlock(&smoke->i915->drm.struct_mutex);
if (err)
return err;
@@ -1161,9 +2600,7 @@ static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
unsigned long count;
int err = 0;
- mutex_unlock(&smoke->i915->drm.struct_mutex);
-
- for_each_engine(engine, smoke->i915, id) {
+ for_each_engine(engine, smoke->gt, id) {
arg[id] = *smoke;
arg[id].engine = engine;
if (!(flags & BATCH))
@@ -1179,8 +2616,10 @@ static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
get_task_struct(tsk[id]);
}
+ yield(); /* start all threads before we kthread_stop() */
+
count = 0;
- for_each_engine(engine, smoke->i915, id) {
+ for_each_engine(engine, smoke->gt, id) {
int status;
if (IS_ERR_OR_NULL(tsk[id]))
@@ -1195,11 +2634,9 @@ static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
put_task_struct(tsk[id]);
}
- mutex_lock(&smoke->i915->drm.struct_mutex);
-
pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
count, flags,
- RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext);
+ RUNTIME_INFO(smoke->gt->i915)->num_engines, smoke->ncontext);
return 0;
}
@@ -1211,7 +2648,7 @@ static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
count = 0;
do {
- for_each_engine(smoke->engine, smoke->i915, id) {
+ for_each_engine(smoke->engine, smoke->gt, id) {
struct i915_gem_context *ctx = smoke_context(smoke);
int err;
@@ -1227,25 +2664,24 @@ static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
count, flags,
- RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext);
+ RUNTIME_INFO(smoke->gt->i915)->num_engines, smoke->ncontext);
return 0;
}
static int live_preempt_smoke(void *arg)
{
struct preempt_smoke smoke = {
- .i915 = arg,
+ .gt = arg,
.prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed),
.ncontext = 1024,
};
const unsigned int phase[] = { 0, BATCH };
- intel_wakeref_t wakeref;
struct igt_live_test t;
int err = -ENOMEM;
u32 *cs;
int n;
- if (!HAS_LOGICAL_RING_PREEMPTION(smoke.i915))
+ if (!HAS_LOGICAL_RING_PREEMPTION(smoke.gt->i915))
return 0;
smoke.contexts = kmalloc_array(smoke.ncontext,
@@ -1254,13 +2690,11 @@ static int live_preempt_smoke(void *arg)
if (!smoke.contexts)
return -ENOMEM;
- mutex_lock(&smoke.i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&smoke.i915->runtime_pm);
-
- smoke.batch = i915_gem_object_create_internal(smoke.i915, PAGE_SIZE);
+ smoke.batch =
+ i915_gem_object_create_internal(smoke.gt->i915, PAGE_SIZE);
if (IS_ERR(smoke.batch)) {
err = PTR_ERR(smoke.batch);
- goto err_unlock;
+ goto err_free;
}
cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB);
@@ -1274,13 +2708,13 @@ static int live_preempt_smoke(void *arg)
i915_gem_object_flush_map(smoke.batch);
i915_gem_object_unpin_map(smoke.batch);
- if (igt_live_test_begin(&t, smoke.i915, __func__, "all")) {
+ if (igt_live_test_begin(&t, smoke.gt->i915, __func__, "all")) {
err = -EIO;
goto err_batch;
}
for (n = 0; n < smoke.ncontext; n++) {
- smoke.contexts[n] = kernel_context(smoke.i915);
+ smoke.contexts[n] = kernel_context(smoke.gt->i915);
if (!smoke.contexts[n])
goto err_ctx;
}
@@ -1307,15 +2741,13 @@ err_ctx:
err_batch:
i915_gem_object_put(smoke.batch);
-err_unlock:
- intel_runtime_pm_put(&smoke.i915->runtime_pm, wakeref);
- mutex_unlock(&smoke.i915->drm.struct_mutex);
+err_free:
kfree(smoke.contexts);
return err;
}
-static int nop_virtual_engine(struct drm_i915_private *i915,
+static int nop_virtual_engine(struct intel_gt *gt,
struct intel_engine_cs **siblings,
unsigned int nsibling,
unsigned int nctx,
@@ -1323,28 +2755,18 @@ static int nop_virtual_engine(struct drm_i915_private *i915,
#define CHAIN BIT(0)
{
IGT_TIMEOUT(end_time);
- struct i915_request *request[16];
- struct i915_gem_context *ctx[16];
+ struct i915_request *request[16] = {};
struct intel_context *ve[16];
unsigned long n, prime, nc;
struct igt_live_test t;
ktime_t times[2] = {};
int err;
- GEM_BUG_ON(!nctx || nctx > ARRAY_SIZE(ctx));
+ GEM_BUG_ON(!nctx || nctx > ARRAY_SIZE(ve));
for (n = 0; n < nctx; n++) {
- ctx[n] = kernel_context(i915);
- if (!ctx[n]) {
- err = -ENOMEM;
- nctx = n;
- goto out;
- }
-
- ve[n] = intel_execlists_create_virtual(ctx[n],
- siblings, nsibling);
+ ve[n] = intel_execlists_create_virtual(siblings, nsibling);
if (IS_ERR(ve[n])) {
- kernel_context_close(ctx[n]);
err = PTR_ERR(ve[n]);
nctx = n;
goto out;
@@ -1353,13 +2775,12 @@ static int nop_virtual_engine(struct drm_i915_private *i915,
err = intel_context_pin(ve[n]);
if (err) {
intel_context_put(ve[n]);
- kernel_context_close(ctx[n]);
nctx = n;
goto out;
}
}
- err = igt_live_test_begin(&t, i915, __func__, ve[0]->engine->name);
+ err = igt_live_test_begin(&t, gt->i915, __func__, ve[0]->engine->name);
if (err)
goto out;
@@ -1369,27 +2790,35 @@ static int nop_virtual_engine(struct drm_i915_private *i915,
if (flags & CHAIN) {
for (nc = 0; nc < nctx; nc++) {
for (n = 0; n < prime; n++) {
- request[nc] =
- i915_request_create(ve[nc]);
- if (IS_ERR(request[nc])) {
- err = PTR_ERR(request[nc]);
+ struct i915_request *rq;
+
+ rq = i915_request_create(ve[nc]);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
goto out;
}
- i915_request_add(request[nc]);
+ if (request[nc])
+ i915_request_put(request[nc]);
+ request[nc] = i915_request_get(rq);
+ i915_request_add(rq);
}
}
} else {
for (n = 0; n < prime; n++) {
for (nc = 0; nc < nctx; nc++) {
- request[nc] =
- i915_request_create(ve[nc]);
- if (IS_ERR(request[nc])) {
- err = PTR_ERR(request[nc]);
+ struct i915_request *rq;
+
+ rq = i915_request_create(ve[nc]);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
goto out;
}
- i915_request_add(request[nc]);
+ if (request[nc])
+ i915_request_put(request[nc]);
+ request[nc] = i915_request_get(rq);
+ i915_request_add(rq);
}
}
}
@@ -1406,7 +2835,7 @@ static int nop_virtual_engine(struct drm_i915_private *i915,
request[nc]->fence.context,
request[nc]->fence.seqno);
GEM_TRACE_DUMP();
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(gt);
break;
}
}
@@ -1415,6 +2844,11 @@ static int nop_virtual_engine(struct drm_i915_private *i915,
if (prime == 1)
times[0] = times[1];
+ for (nc = 0; nc < nctx; nc++) {
+ i915_request_put(request[nc]);
+ request[nc] = NULL;
+ }
+
if (__igt_timeout(end_time, NULL))
break;
}
@@ -1428,37 +2862,35 @@ static int nop_virtual_engine(struct drm_i915_private *i915,
prime, div64_u64(ktime_to_ns(times[1]), prime));
out:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(gt->i915))
err = -EIO;
for (nc = 0; nc < nctx; nc++) {
+ i915_request_put(request[nc]);
intel_context_unpin(ve[nc]);
intel_context_put(ve[nc]);
- kernel_context_close(ctx[nc]);
}
return err;
}
static int live_virtual_engine(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
struct intel_engine_cs *engine;
enum intel_engine_id id;
unsigned int class, inst;
- int err = -ENODEV;
+ int err;
- if (USES_GUC_SUBMISSION(i915))
+ if (USES_GUC_SUBMISSION(gt->i915))
return 0;
- mutex_lock(&i915->drm.struct_mutex);
-
- for_each_engine(engine, i915, id) {
- err = nop_virtual_engine(i915, &engine, 1, 1, 0);
+ for_each_engine(engine, gt, id) {
+ err = nop_virtual_engine(gt, &engine, 1, 1, 0);
if (err) {
pr_err("Failed to wrap engine %s: err=%d\n",
engine->name, err);
- goto out_unlock;
+ return err;
}
}
@@ -1467,37 +2899,34 @@ static int live_virtual_engine(void *arg)
nsibling = 0;
for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
- if (!i915->engine_class[class][inst])
+ if (!gt->engine_class[class][inst])
continue;
- siblings[nsibling++] = i915->engine_class[class][inst];
+ siblings[nsibling++] = gt->engine_class[class][inst];
}
if (nsibling < 2)
continue;
for (n = 1; n <= nsibling + 1; n++) {
- err = nop_virtual_engine(i915, siblings, nsibling,
+ err = nop_virtual_engine(gt, siblings, nsibling,
n, 0);
if (err)
- goto out_unlock;
+ return err;
}
- err = nop_virtual_engine(i915, siblings, nsibling, n, CHAIN);
+ err = nop_virtual_engine(gt, siblings, nsibling, n, CHAIN);
if (err)
- goto out_unlock;
+ return err;
}
-out_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
- return err;
+ return 0;
}
-static int mask_virtual_engine(struct drm_i915_private *i915,
+static int mask_virtual_engine(struct intel_gt *gt,
struct intel_engine_cs **siblings,
unsigned int nsibling)
{
struct i915_request *request[MAX_ENGINE_INSTANCE + 1];
- struct i915_gem_context *ctx;
struct intel_context *ve;
struct igt_live_test t;
unsigned int n;
@@ -1508,11 +2937,7 @@ static int mask_virtual_engine(struct drm_i915_private *i915,
* restrict it to our desired engine within the virtual engine.
*/
- ctx = kernel_context(i915);
- if (!ctx)
- return -ENOMEM;
-
- ve = intel_execlists_create_virtual(ctx, siblings, nsibling);
+ ve = intel_execlists_create_virtual(siblings, nsibling);
if (IS_ERR(ve)) {
err = PTR_ERR(ve);
goto out_close;
@@ -1522,7 +2947,7 @@ static int mask_virtual_engine(struct drm_i915_private *i915,
if (err)
goto out_put;
- err = igt_live_test_begin(&t, i915, __func__, ve->engine->name);
+ err = igt_live_test_begin(&t, gt->i915, __func__, ve->engine->name);
if (err)
goto out_unpin;
@@ -1553,7 +2978,7 @@ static int mask_virtual_engine(struct drm_i915_private *i915,
request[n]->fence.context,
request[n]->fence.seqno);
GEM_TRACE_DUMP();
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(gt);
err = -EIO;
goto out;
}
@@ -1568,11 +2993,8 @@ static int mask_virtual_engine(struct drm_i915_private *i915,
}
err = igt_live_test_end(&t);
- if (err)
- goto out;
-
out:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(gt->i915))
err = -EIO;
for (n = 0; n < nsibling; n++)
@@ -1583,46 +3005,183 @@ out_unpin:
out_put:
intel_context_put(ve);
out_close:
- kernel_context_close(ctx);
return err;
}
static int live_virtual_mask(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
unsigned int class, inst;
- int err = 0;
+ int err;
- if (USES_GUC_SUBMISSION(i915))
+ if (USES_GUC_SUBMISSION(gt->i915))
return 0;
- mutex_lock(&i915->drm.struct_mutex);
-
for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
unsigned int nsibling;
nsibling = 0;
for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
- if (!i915->engine_class[class][inst])
+ if (!gt->engine_class[class][inst])
break;
- siblings[nsibling++] = i915->engine_class[class][inst];
+ siblings[nsibling++] = gt->engine_class[class][inst];
}
if (nsibling < 2)
continue;
- err = mask_virtual_engine(i915, siblings, nsibling);
+ err = mask_virtual_engine(gt, siblings, nsibling);
if (err)
- goto out_unlock;
+ return err;
}
-out_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
+ return 0;
+}
+
+static int preserved_virtual_engine(struct intel_gt *gt,
+ struct intel_engine_cs **siblings,
+ unsigned int nsibling)
+{
+ struct i915_request *last = NULL;
+ struct intel_context *ve;
+ struct i915_vma *scratch;
+ struct igt_live_test t;
+ unsigned int n;
+ int err = 0;
+ u32 *cs;
+
+ scratch = create_scratch(siblings[0]->gt);
+ if (IS_ERR(scratch))
+ return PTR_ERR(scratch);
+
+ ve = intel_execlists_create_virtual(siblings, nsibling);
+ if (IS_ERR(ve)) {
+ err = PTR_ERR(ve);
+ goto out_scratch;
+ }
+
+ err = intel_context_pin(ve);
+ if (err)
+ goto out_put;
+
+ err = igt_live_test_begin(&t, gt->i915, __func__, ve->engine->name);
+ if (err)
+ goto out_unpin;
+
+ for (n = 0; n < NUM_GPR_DW; n++) {
+ struct intel_engine_cs *engine = siblings[n % nsibling];
+ struct i915_request *rq;
+
+ rq = i915_request_create(ve);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_end;
+ }
+
+ i915_request_put(last);
+ last = i915_request_get(rq);
+
+ cs = intel_ring_begin(rq, 8);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ err = PTR_ERR(cs);
+ goto out_end;
+ }
+
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+ *cs++ = CS_GPR(engine, n);
+ *cs++ = i915_ggtt_offset(scratch) + n * sizeof(u32);
+ *cs++ = 0;
+
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = CS_GPR(engine, (n + 1) % NUM_GPR_DW);
+ *cs++ = n + 1;
+
+ *cs++ = MI_NOOP;
+ intel_ring_advance(rq, cs);
+
+ /* Restrict this request to run on a particular engine */
+ rq->execution_mask = engine->mask;
+ i915_request_add(rq);
+ }
+
+ if (i915_request_wait(last, 0, HZ / 5) < 0) {
+ err = -ETIME;
+ goto out_end;
+ }
+
+ cs = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto out_end;
+ }
+
+ for (n = 0; n < NUM_GPR_DW; n++) {
+ if (cs[n] != n) {
+ pr_err("Incorrect value[%d] found for GPR[%d]\n",
+ cs[n], n);
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ i915_gem_object_unpin_map(scratch->obj);
+
+out_end:
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ i915_request_put(last);
+out_unpin:
+ intel_context_unpin(ve);
+out_put:
+ intel_context_put(ve);
+out_scratch:
+ i915_vma_unpin_and_release(&scratch, 0);
return err;
}
-static int bond_virtual_engine(struct drm_i915_private *i915,
+static int live_virtual_preserved(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+ unsigned int class, inst;
+
+ /*
+ * Check that the context image retains non-privileged (user) registers
+ * from one engine to the next. For this we check that the CS_GPR
+ * are preserved.
+ */
+
+ if (USES_GUC_SUBMISSION(gt->i915))
+ return 0;
+
+ /* As we use CS_GPR we cannot run before they existed on all engines. */
+ if (INTEL_GEN(gt->i915) < 9)
+ return 0;
+
+ for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
+ int nsibling, err;
+
+ nsibling = 0;
+ for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
+ if (!gt->engine_class[class][inst])
+ continue;
+
+ siblings[nsibling++] = gt->engine_class[class][inst];
+ }
+ if (nsibling < 2)
+ continue;
+
+ err = preserved_virtual_engine(gt, siblings, nsibling);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int bond_virtual_engine(struct intel_gt *gt,
unsigned int class,
struct intel_engine_cs **siblings,
unsigned int nsibling,
@@ -1630,21 +3189,59 @@ static int bond_virtual_engine(struct drm_i915_private *i915,
#define BOND_SCHEDULE BIT(0)
{
struct intel_engine_cs *master;
- struct i915_gem_context *ctx;
struct i915_request *rq[16];
enum intel_engine_id id;
+ struct igt_spinner spin;
unsigned long n;
int err;
+ /*
+ * A set of bonded requests is intended to be run concurrently
+ * across a number of engines. We use one request per-engine
+ * and a magic fence to schedule each of the bonded requests
+ * at the same time. A consequence of our current scheduler is that
+ * we only move requests to the HW ready queue when the request
+ * becomes ready, that is when all of its prerequisite fences have
+ * been signaled. As one of those fences is the master submit fence,
+ * there is a delay on all secondary fences as the HW may be
+ * currently busy. Equally, as all the requests are independent,
+ * they may have other fences that delay individual request
+ * submission to HW. Ergo, we do not guarantee that all requests are
+ * immediately submitted to HW at the same time, just that if the
+ * rules are abided by, they are ready at the same time as the
+ * first is submitted. Userspace can embed semaphores in its batch
+ * to ensure parallel execution of its phases as it requires.
+ * Though naturally it gets requested that perhaps the scheduler should
+ * take care of parallel execution, even across preemption events on
+ * different HW. (The proper answer is of course "lalalala".)
+ *
+ * With the submit-fence, we have identified three possible phases
+ * of synchronisation depending on the master fence: queued (not
+ * ready), executing, and signaled. The first two are quite simple
+ * and checked below. However, the signaled master fence handling is
+ * contentious. Currently we do not distinguish between a signaled
+ * fence and an expired fence, as once signaled it does not convey
+ * any information about the previous execution. It may even be freed
+ * and hence checking later it may not exist at all. Ergo we currently
+ * do not apply the bonding constraint for an already signaled fence,
+ * as our expectation is that it should not constrain the secondaries
+ * and is outside of the scope of the bonded request API (i.e. all
+ * userspace requests are meant to be running in parallel). As
+ * it imposes no constraint, and is effectively a no-op, we do not
+ * check below as normal execution flows are checked extensively above.
+ *
+ * XXX Is the degenerate handling of signaled submit fences the
+ * expected behaviour for userpace?
+ */
+
GEM_BUG_ON(nsibling >= ARRAY_SIZE(rq) - 1);
- ctx = kernel_context(i915);
- if (!ctx)
+ if (igt_spinner_init(&spin, gt))
return -ENOMEM;
err = 0;
rq[0] = ERR_PTR(-ENOMEM);
- for_each_engine(master, i915, id) {
+ for_each_engine(master, gt, id) {
struct i915_sw_fence fence = {};
if (master->class == class)
@@ -1652,7 +3249,9 @@ static int bond_virtual_engine(struct drm_i915_private *i915,
memset_p((void *)rq, ERR_PTR(-EINVAL), ARRAY_SIZE(rq));
- rq[0] = igt_request_alloc(ctx, master);
+ rq[0] = igt_spinner_create_request(&spin,
+ master->kernel_context,
+ MI_NOOP);
if (IS_ERR(rq[0])) {
err = PTR_ERR(rq[0]);
goto out;
@@ -1665,16 +3264,21 @@ static int bond_virtual_engine(struct drm_i915_private *i915,
&fence,
GFP_KERNEL);
}
+
i915_request_add(rq[0]);
if (err < 0)
goto out;
+ if (!(flags & BOND_SCHEDULE) &&
+ !igt_wait_for_spinner(&spin, rq[0])) {
+ err = -EIO;
+ goto out;
+ }
+
for (n = 0; n < nsibling; n++) {
struct intel_context *ve;
- ve = intel_execlists_create_virtual(ctx,
- siblings,
- nsibling);
+ ve = intel_execlists_create_virtual(siblings, nsibling);
if (IS_ERR(ve)) {
err = PTR_ERR(ve);
onstack_fence_fini(&fence);
@@ -1716,6 +3320,8 @@ static int bond_virtual_engine(struct drm_i915_private *i915,
}
}
onstack_fence_fini(&fence);
+ intel_engine_flush_submission(master);
+ igt_spinner_end(&spin);
if (i915_request_wait(rq[0], 0, HZ / 10) < 0) {
pr_err("Master request did not execute (on %s)!\n",
@@ -1749,10 +3355,10 @@ static int bond_virtual_engine(struct drm_i915_private *i915,
out:
for (n = 0; !IS_ERR(rq[n]); n++)
i915_request_put(rq[n]);
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(gt->i915))
err = -EIO;
- kernel_context_close(ctx);
+ igt_spinner_fini(&spin);
return err;
}
@@ -1766,70 +3372,725 @@ static int live_virtual_bond(void *arg)
{ "schedule", BOND_SCHEDULE },
{ },
};
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
unsigned int class, inst;
- int err = 0;
+ int err;
- if (USES_GUC_SUBMISSION(i915))
+ if (USES_GUC_SUBMISSION(gt->i915))
return 0;
- mutex_lock(&i915->drm.struct_mutex);
-
for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
const struct phase *p;
int nsibling;
nsibling = 0;
for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
- if (!i915->engine_class[class][inst])
+ if (!gt->engine_class[class][inst])
break;
GEM_BUG_ON(nsibling == ARRAY_SIZE(siblings));
- siblings[nsibling++] = i915->engine_class[class][inst];
+ siblings[nsibling++] = gt->engine_class[class][inst];
}
if (nsibling < 2)
continue;
for (p = phases; p->name; p++) {
- err = bond_virtual_engine(i915,
+ err = bond_virtual_engine(gt,
class, siblings, nsibling,
p->flags);
if (err) {
pr_err("%s(%s): failed class=%d, nsibling=%d, err=%d\n",
__func__, p->name, class, nsibling, err);
- goto out_unlock;
+ return err;
}
}
}
-out_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
+ return 0;
+}
+
+static int reset_virtual_engine(struct intel_gt *gt,
+ struct intel_engine_cs **siblings,
+ unsigned int nsibling)
+{
+ struct intel_engine_cs *engine;
+ struct intel_context *ve;
+ unsigned long *heartbeat;
+ struct igt_spinner spin;
+ struct i915_request *rq;
+ unsigned int n;
+ int err = 0;
+
+ /*
+ * In order to support offline error capture for fast preempt reset,
+ * we need to decouple the guilty request and ensure that it and its
+ * descendents are not executed while the capture is in progress.
+ */
+
+ heartbeat = kmalloc_array(nsibling, sizeof(*heartbeat), GFP_KERNEL);
+ if (!heartbeat)
+ return -ENOMEM;
+
+ if (igt_spinner_init(&spin, gt)) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ ve = intel_execlists_create_virtual(siblings, nsibling);
+ if (IS_ERR(ve)) {
+ err = PTR_ERR(ve);
+ goto out_spin;
+ }
+
+ for (n = 0; n < nsibling; n++)
+ engine_heartbeat_disable(siblings[n], &heartbeat[n]);
+
+ rq = igt_spinner_create_request(&spin, ve, MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_heartbeat;
+ }
+ i915_request_add(rq);
+
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ intel_gt_set_wedged(gt);
+ err = -ETIME;
+ goto out_heartbeat;
+ }
+
+ engine = rq->engine;
+ GEM_BUG_ON(engine == ve->engine);
+
+ /* Take ownership of the reset and tasklet */
+ if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
+ &gt->reset.flags)) {
+ intel_gt_set_wedged(gt);
+ err = -EBUSY;
+ goto out_heartbeat;
+ }
+ tasklet_disable(&engine->execlists.tasklet);
+
+ engine->execlists.tasklet.func(engine->execlists.tasklet.data);
+ GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
+
+ /* Fake a preemption event; failed of course */
+ spin_lock_irq(&engine->active.lock);
+ __unwind_incomplete_requests(engine);
+ spin_unlock_irq(&engine->active.lock);
+ GEM_BUG_ON(rq->engine != ve->engine);
+
+ /* Reset the engine while keeping our active request on hold */
+ execlists_hold(engine, rq);
+ GEM_BUG_ON(!i915_request_on_hold(rq));
+
+ intel_engine_reset(engine, NULL);
+ GEM_BUG_ON(rq->fence.error != -EIO);
+
+ /* Release our grasp on the engine, letting CS flow again */
+ tasklet_enable(&engine->execlists.tasklet);
+ clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id, &gt->reset.flags);
+
+ /* Check that we do not resubmit the held request */
+ i915_request_get(rq);
+ if (!i915_request_wait(rq, 0, HZ / 5)) {
+ pr_err("%s: on hold request completed!\n",
+ engine->name);
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto out_rq;
+ }
+ GEM_BUG_ON(!i915_request_on_hold(rq));
+
+ /* But is resubmitted on release */
+ execlists_unhold(engine, rq);
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ pr_err("%s: held request did not complete!\n",
+ engine->name);
+ intel_gt_set_wedged(gt);
+ err = -ETIME;
+ }
+
+out_rq:
+ i915_request_put(rq);
+out_heartbeat:
+ for (n = 0; n < nsibling; n++)
+ engine_heartbeat_enable(siblings[n], heartbeat[n]);
+
+ intel_context_put(ve);
+out_spin:
+ igt_spinner_fini(&spin);
+out_free:
+ kfree(heartbeat);
return err;
}
+static int live_virtual_reset(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+ unsigned int class, inst;
+
+ /*
+ * Check that we handle a reset event within a virtual engine.
+ * Only the physical engine is reset, but we have to check the flow
+ * of the virtual requests around the reset, and make sure it is not
+ * forgotten.
+ */
+
+ if (USES_GUC_SUBMISSION(gt->i915))
+ return 0;
+
+ if (!intel_has_reset_engine(gt))
+ return 0;
+
+ for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
+ int nsibling, err;
+
+ nsibling = 0;
+ for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
+ if (!gt->engine_class[class][inst])
+ continue;
+
+ siblings[nsibling++] = gt->engine_class[class][inst];
+ }
+ if (nsibling < 2)
+ continue;
+
+ err = reset_virtual_engine(gt, siblings, nsibling);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
int intel_execlists_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_sanitycheck),
+ SUBTEST(live_unlite_switch),
+ SUBTEST(live_unlite_preempt),
+ SUBTEST(live_hold_reset),
+ SUBTEST(live_timeslice_preempt),
+ SUBTEST(live_timeslice_queue),
SUBTEST(live_busywait_preempt),
SUBTEST(live_preempt),
SUBTEST(live_late_preempt),
+ SUBTEST(live_nopreempt),
+ SUBTEST(live_preempt_cancel),
SUBTEST(live_suppress_self_preempt),
SUBTEST(live_suppress_wait_preempt),
SUBTEST(live_chain_preempt),
+ SUBTEST(live_preempt_gang),
SUBTEST(live_preempt_hang),
+ SUBTEST(live_preempt_timeout),
SUBTEST(live_preempt_smoke),
SUBTEST(live_virtual_engine),
SUBTEST(live_virtual_mask),
+ SUBTEST(live_virtual_preserved),
SUBTEST(live_virtual_bond),
+ SUBTEST(live_virtual_reset),
};
if (!HAS_EXECLISTS(i915))
return 0;
- if (i915_terminally_wedged(i915))
+ if (intel_gt_is_wedged(&i915->gt))
+ return 0;
+
+ return intel_gt_live_subtests(tests, &i915->gt);
+}
+
+static void hexdump(const void *buf, size_t len)
+{
+ const size_t rowsize = 8 * sizeof(u32);
+ const void *prev = NULL;
+ bool skip = false;
+ size_t pos;
+
+ for (pos = 0; pos < len; pos += rowsize) {
+ char line[128];
+
+ if (prev && !memcmp(prev, buf + pos, rowsize)) {
+ if (!skip) {
+ pr_info("*\n");
+ skip = true;
+ }
+ continue;
+ }
+
+ WARN_ON_ONCE(hex_dump_to_buffer(buf + pos, len - pos,
+ rowsize, sizeof(u32),
+ line, sizeof(line),
+ false) >= sizeof(line));
+ pr_info("[%04zx] %s\n", pos, line);
+
+ prev = buf + pos;
+ skip = false;
+ }
+}
+
+static int live_lrc_layout(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ u32 *lrc;
+ int err;
+
+ /*
+ * Check the registers offsets we use to create the initial reg state
+ * match the layout saved by HW.
+ */
+
+ lrc = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!lrc)
+ return -ENOMEM;
+
+ err = 0;
+ for_each_engine(engine, gt, id) {
+ u32 *hw;
+ int dw;
+
+ if (!engine->default_state)
+ continue;
+
+ hw = i915_gem_object_pin_map(engine->default_state,
+ I915_MAP_WB);
+ if (IS_ERR(hw)) {
+ err = PTR_ERR(hw);
+ break;
+ }
+ hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw);
+
+ execlists_init_reg_state(memset(lrc, POISON_INUSE, PAGE_SIZE),
+ engine->kernel_context,
+ engine,
+ engine->kernel_context->ring,
+ true);
+
+ dw = 0;
+ do {
+ u32 lri = hw[dw];
+
+ if (lri == 0) {
+ dw++;
+ continue;
+ }
+
+ if (lrc[dw] == 0) {
+ pr_debug("%s: skipped instruction %x at dword %d\n",
+ engine->name, lri, dw);
+ dw++;
+ continue;
+ }
+
+ if ((lri & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) {
+ pr_err("%s: Expected LRI command at dword %d, found %08x\n",
+ engine->name, dw, lri);
+ err = -EINVAL;
+ break;
+ }
+
+ if (lrc[dw] != lri) {
+ pr_err("%s: LRI command mismatch at dword %d, expected %08x found %08x\n",
+ engine->name, dw, lri, lrc[dw]);
+ err = -EINVAL;
+ break;
+ }
+
+ lri &= 0x7f;
+ lri++;
+ dw++;
+
+ while (lri) {
+ if (hw[dw] != lrc[dw]) {
+ pr_err("%s: Different registers found at dword %d, expected %x, found %x\n",
+ engine->name, dw, hw[dw], lrc[dw]);
+ err = -EINVAL;
+ break;
+ }
+
+ /*
+ * Skip over the actual register value as we
+ * expect that to differ.
+ */
+ dw += 2;
+ lri -= 2;
+ }
+ } while ((lrc[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END);
+
+ if (err) {
+ pr_info("%s: HW register image:\n", engine->name);
+ hexdump(hw, PAGE_SIZE);
+
+ pr_info("%s: SW register image:\n", engine->name);
+ hexdump(lrc, PAGE_SIZE);
+ }
+
+ i915_gem_object_unpin_map(engine->default_state);
+ if (err)
+ break;
+ }
+
+ kfree(lrc);
+ return err;
+}
+
+static int find_offset(const u32 *lri, u32 offset)
+{
+ int i;
+
+ for (i = 0; i < PAGE_SIZE / sizeof(u32); i++)
+ if (lri[i] == offset)
+ return i;
+
+ return -1;
+}
+
+static int live_lrc_fixed(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * Check the assumed register offsets match the actual locations in
+ * the context image.
+ */
+
+ for_each_engine(engine, gt, id) {
+ const struct {
+ u32 reg;
+ u32 offset;
+ const char *name;
+ } tbl[] = {
+ {
+ i915_mmio_reg_offset(RING_START(engine->mmio_base)),
+ CTX_RING_START - 1,
+ "RING_START"
+ },
+ {
+ i915_mmio_reg_offset(RING_CTL(engine->mmio_base)),
+ CTX_RING_CTL - 1,
+ "RING_CTL"
+ },
+ {
+ i915_mmio_reg_offset(RING_HEAD(engine->mmio_base)),
+ CTX_RING_HEAD - 1,
+ "RING_HEAD"
+ },
+ {
+ i915_mmio_reg_offset(RING_TAIL(engine->mmio_base)),
+ CTX_RING_TAIL - 1,
+ "RING_TAIL"
+ },
+ {
+ i915_mmio_reg_offset(RING_MI_MODE(engine->mmio_base)),
+ lrc_ring_mi_mode(engine),
+ "RING_MI_MODE"
+ },
+ {
+ i915_mmio_reg_offset(RING_BBSTATE(engine->mmio_base)),
+ CTX_BB_STATE - 1,
+ "BB_STATE"
+ },
+ { },
+ }, *t;
+ u32 *hw;
+
+ if (!engine->default_state)
+ continue;
+
+ hw = i915_gem_object_pin_map(engine->default_state,
+ I915_MAP_WB);
+ if (IS_ERR(hw)) {
+ err = PTR_ERR(hw);
+ break;
+ }
+ hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw);
+
+ for (t = tbl; t->name; t++) {
+ int dw = find_offset(hw, t->reg);
+
+ if (dw != t->offset) {
+ pr_err("%s: Offset for %s [0x%x] mismatch, found %x, expected %x\n",
+ engine->name,
+ t->name,
+ t->reg,
+ dw,
+ t->offset);
+ err = -EINVAL;
+ }
+ }
+
+ i915_gem_object_unpin_map(engine->default_state);
+ }
+
+ return err;
+}
+
+static int __live_lrc_state(struct intel_engine_cs *engine,
+ struct i915_vma *scratch)
+{
+ struct intel_context *ce;
+ struct i915_request *rq;
+ enum {
+ RING_START_IDX = 0,
+ RING_TAIL_IDX,
+ MAX_IDX
+ };
+ u32 expected[MAX_IDX];
+ u32 *cs;
+ int err;
+ int n;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ err = intel_context_pin(ce);
+ if (err)
+ goto err_put;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_unpin;
+ }
+
+ cs = intel_ring_begin(rq, 4 * MAX_IDX);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ i915_request_add(rq);
+ goto err_unpin;
+ }
+
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+ *cs++ = i915_mmio_reg_offset(RING_START(engine->mmio_base));
+ *cs++ = i915_ggtt_offset(scratch) + RING_START_IDX * sizeof(u32);
+ *cs++ = 0;
+
+ expected[RING_START_IDX] = i915_ggtt_offset(ce->ring->vma);
+
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+ *cs++ = i915_mmio_reg_offset(RING_TAIL(engine->mmio_base));
+ *cs++ = i915_ggtt_offset(scratch) + RING_TAIL_IDX * sizeof(u32);
+ *cs++ = 0;
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ intel_engine_flush_submission(engine);
+ expected[RING_TAIL_IDX] = ce->ring->tail;
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ err = -ETIME;
+ goto err_rq;
+ }
+
+ cs = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_rq;
+ }
+
+ for (n = 0; n < MAX_IDX; n++) {
+ if (cs[n] != expected[n]) {
+ pr_err("%s: Stored register[%d] value[0x%x] did not match expected[0x%x]\n",
+ engine->name, n, cs[n], expected[n]);
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ i915_gem_object_unpin_map(scratch->obj);
+
+err_rq:
+ i915_request_put(rq);
+err_unpin:
+ intel_context_unpin(ce);
+err_put:
+ intel_context_put(ce);
+ return err;
+}
+
+static int live_lrc_state(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ struct i915_vma *scratch;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * Check the live register state matches what we expect for this
+ * intel_context.
+ */
+
+ scratch = create_scratch(gt);
+ if (IS_ERR(scratch))
+ return PTR_ERR(scratch);
+
+ for_each_engine(engine, gt, id) {
+ err = __live_lrc_state(engine, scratch);
+ if (err)
+ break;
+ }
+
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+
+ i915_vma_unpin_and_release(&scratch, 0);
+ return err;
+}
+
+static int gpr_make_dirty(struct intel_engine_cs *engine)
+{
+ struct i915_request *rq;
+ u32 *cs;
+ int n;
+
+ rq = intel_engine_create_kernel_request(engine);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ cs = intel_ring_begin(rq, 2 * NUM_GPR_DW + 2);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ return PTR_ERR(cs);
+ }
+
+ *cs++ = MI_LOAD_REGISTER_IMM(NUM_GPR_DW);
+ for (n = 0; n < NUM_GPR_DW; n++) {
+ *cs++ = CS_GPR(engine, n);
+ *cs++ = STACK_MAGIC;
+ }
+ *cs++ = MI_NOOP;
+
+ intel_ring_advance(rq, cs);
+ i915_request_add(rq);
+
+ return 0;
+}
+
+static int __live_gpr_clear(struct intel_engine_cs *engine,
+ struct i915_vma *scratch)
+{
+ struct intel_context *ce;
+ struct i915_request *rq;
+ u32 *cs;
+ int err;
+ int n;
+
+ if (INTEL_GEN(engine->i915) < 9 && engine->class != RENDER_CLASS)
+ return 0; /* GPR only on rcs0 for gen8 */
+
+ err = gpr_make_dirty(engine);
+ if (err)
+ return err;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_put;
+ }
+
+ cs = intel_ring_begin(rq, 4 * NUM_GPR_DW);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ i915_request_add(rq);
+ goto err_put;
+ }
+
+ for (n = 0; n < NUM_GPR_DW; n++) {
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+ *cs++ = CS_GPR(engine, n);
+ *cs++ = i915_ggtt_offset(scratch) + n * sizeof(u32);
+ *cs++ = 0;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ err = -ETIME;
+ goto err_rq;
+ }
+
+ cs = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_rq;
+ }
+
+ for (n = 0; n < NUM_GPR_DW; n++) {
+ if (cs[n]) {
+ pr_err("%s: GPR[%d].%s was not zero, found 0x%08x!\n",
+ engine->name,
+ n / 2, n & 1 ? "udw" : "ldw",
+ cs[n]);
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ i915_gem_object_unpin_map(scratch->obj);
+
+err_rq:
+ i915_request_put(rq);
+err_put:
+ intel_context_put(ce);
+ return err;
+}
+
+static int live_gpr_clear(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ struct i915_vma *scratch;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * Check that GPR registers are cleared in new contexts as we need
+ * to avoid leaking any information from previous contexts.
+ */
+
+ scratch = create_scratch(gt);
+ if (IS_ERR(scratch))
+ return PTR_ERR(scratch);
+
+ for_each_engine(engine, gt, id) {
+ err = __live_gpr_clear(engine, scratch);
+ if (err)
+ break;
+ }
+
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+
+ i915_vma_unpin_and_release(&scratch, 0);
+ return err;
+}
+
+int intel_lrc_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_lrc_layout),
+ SUBTEST(live_lrc_fixed),
+ SUBTEST(live_lrc_state),
+ SUBTEST(live_gpr_clear),
+ };
+
+ if (!HAS_LOGICAL_RING_CONTEXTS(i915))
return 0;
- return i915_subtests(tests, i915);
+ return intel_gt_live_subtests(tests, &i915->gt);
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_mocs.c b/drivers/gpu/drm/i915/gt/selftest_mocs.c
new file mode 100644
index 000000000000..de1f83100fb6
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_mocs.c
@@ -0,0 +1,419 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "gt/intel_engine_pm.h"
+#include "i915_selftest.h"
+
+#include "gem/selftests/mock_context.h"
+#include "selftests/igt_reset.h"
+#include "selftests/igt_spinner.h"
+
+struct live_mocs {
+ struct drm_i915_mocs_table table;
+ struct i915_vma *scratch;
+ void *vaddr;
+};
+
+static int request_add_sync(struct i915_request *rq, int err)
+{
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (i915_request_wait(rq, 0, HZ / 5) < 0)
+ err = -ETIME;
+ i915_request_put(rq);
+
+ return err;
+}
+
+static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
+{
+ int err = 0;
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (spin && !igt_wait_for_spinner(spin, rq))
+ err = -ETIME;
+ i915_request_put(rq);
+
+ return err;
+}
+
+static struct i915_vma *create_scratch(struct intel_gt *gt)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int err;
+
+ obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED);
+
+ vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ return vma;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+ if (err) {
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+ }
+
+ return vma;
+}
+
+static int live_mocs_init(struct live_mocs *arg, struct intel_gt *gt)
+{
+ int err;
+
+ if (!get_mocs_settings(gt->i915, &arg->table))
+ return -EINVAL;
+
+ arg->scratch = create_scratch(gt);
+ if (IS_ERR(arg->scratch))
+ return PTR_ERR(arg->scratch);
+
+ arg->vaddr = i915_gem_object_pin_map(arg->scratch->obj, I915_MAP_WB);
+ if (IS_ERR(arg->vaddr)) {
+ err = PTR_ERR(arg->vaddr);
+ goto err_scratch;
+ }
+
+ return 0;
+
+err_scratch:
+ i915_vma_unpin_and_release(&arg->scratch, 0);
+ return err;
+}
+
+static void live_mocs_fini(struct live_mocs *arg)
+{
+ i915_vma_unpin_and_release(&arg->scratch, I915_VMA_RELEASE_MAP);
+}
+
+static int read_regs(struct i915_request *rq,
+ u32 addr, unsigned int count,
+ uint32_t *offset)
+{
+ unsigned int i;
+ u32 *cs;
+
+ GEM_BUG_ON(!IS_ALIGNED(*offset, sizeof(u32)));
+
+ cs = intel_ring_begin(rq, 4 * count);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ for (i = 0; i < count; i++) {
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+ *cs++ = addr;
+ *cs++ = *offset;
+ *cs++ = 0;
+
+ addr += sizeof(u32);
+ *offset += sizeof(u32);
+ }
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static int read_mocs_table(struct i915_request *rq,
+ const struct drm_i915_mocs_table *table,
+ uint32_t *offset)
+{
+ u32 addr;
+
+ if (HAS_GLOBAL_MOCS_REGISTERS(rq->i915))
+ addr = global_mocs_offset();
+ else
+ addr = mocs_offset(rq->engine);
+
+ return read_regs(rq, addr, table->n_entries, offset);
+}
+
+static int read_l3cc_table(struct i915_request *rq,
+ const struct drm_i915_mocs_table *table,
+ uint32_t *offset)
+{
+ u32 addr = i915_mmio_reg_offset(GEN9_LNCFCMOCS(0));
+
+ return read_regs(rq, addr, (table->n_entries + 1) / 2, offset);
+}
+
+static int check_mocs_table(struct intel_engine_cs *engine,
+ const struct drm_i915_mocs_table *table,
+ uint32_t **vaddr)
+{
+ unsigned int i;
+ u32 expect;
+
+ for_each_mocs(expect, table, i) {
+ if (**vaddr != expect) {
+ pr_err("%s: Invalid MOCS[%d] entry, found %08x, expected %08x\n",
+ engine->name, i, **vaddr, expect);
+ return -EINVAL;
+ }
+ ++*vaddr;
+ }
+
+ return 0;
+}
+
+static bool mcr_range(struct drm_i915_private *i915, u32 offset)
+{
+ /*
+ * Registers in this range are affected by the MCR selector
+ * which only controls CPU initiated MMIO. Routing does not
+ * work for CS access so we cannot verify them on this path.
+ */
+ return INTEL_GEN(i915) >= 8 && offset >= 0xb000 && offset <= 0xb4ff;
+}
+
+static int check_l3cc_table(struct intel_engine_cs *engine,
+ const struct drm_i915_mocs_table *table,
+ uint32_t **vaddr)
+{
+ /* Can we read the MCR range 0xb00 directly? See intel_workarounds! */
+ u32 reg = i915_mmio_reg_offset(GEN9_LNCFCMOCS(0));
+ unsigned int i;
+ u32 expect;
+
+ for_each_l3cc(expect, table, i) {
+ if (!mcr_range(engine->i915, reg) && **vaddr != expect) {
+ pr_err("%s: Invalid L3CC[%d] entry, found %08x, expected %08x\n",
+ engine->name, i, **vaddr, expect);
+ return -EINVAL;
+ }
+ ++*vaddr;
+ reg += 4;
+ }
+
+ return 0;
+}
+
+static int check_mocs_engine(struct live_mocs *arg,
+ struct intel_context *ce)
+{
+ struct i915_vma *vma = arg->scratch;
+ struct i915_request *rq;
+ u32 offset;
+ u32 *vaddr;
+ int err;
+
+ memset32(arg->vaddr, STACK_MAGIC, PAGE_SIZE / sizeof(u32));
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj, true);
+ if (!err)
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ i915_vma_unlock(vma);
+
+ /* Read the mocs tables back using SRM */
+ offset = i915_ggtt_offset(vma);
+ if (!err)
+ err = read_mocs_table(rq, &arg->table, &offset);
+ if (!err && ce->engine->class == RENDER_CLASS)
+ err = read_l3cc_table(rq, &arg->table, &offset);
+ offset -= i915_ggtt_offset(vma);
+ GEM_BUG_ON(offset > PAGE_SIZE);
+
+ err = request_add_sync(rq, err);
+ if (err)
+ return err;
+
+ /* Compare the results against the expected tables */
+ vaddr = arg->vaddr;
+ if (!err)
+ err = check_mocs_table(ce->engine, &arg->table, &vaddr);
+ if (!err && ce->engine->class == RENDER_CLASS)
+ err = check_l3cc_table(ce->engine, &arg->table, &vaddr);
+ if (err)
+ return err;
+
+ GEM_BUG_ON(arg->vaddr + offset != vaddr);
+ return 0;
+}
+
+static int live_mocs_kernel(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct live_mocs mocs;
+ int err;
+
+ /* Basic check the system is configured with the expected mocs table */
+
+ err = live_mocs_init(&mocs, gt);
+ if (err)
+ return err;
+
+ for_each_engine(engine, gt, id) {
+ intel_engine_pm_get(engine);
+ err = check_mocs_engine(&mocs, engine->kernel_context);
+ intel_engine_pm_put(engine);
+ if (err)
+ break;
+ }
+
+ live_mocs_fini(&mocs);
+ return err;
+}
+
+static int live_mocs_clean(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct live_mocs mocs;
+ int err;
+
+ /* Every new context should see the same mocs table */
+
+ err = live_mocs_init(&mocs, gt);
+ if (err)
+ return err;
+
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ break;
+ }
+
+ err = check_mocs_engine(&mocs, ce);
+ intel_context_put(ce);
+ if (err)
+ break;
+ }
+
+ live_mocs_fini(&mocs);
+ return err;
+}
+
+static int active_engine_reset(struct intel_context *ce,
+ const char *reason)
+{
+ struct igt_spinner spin;
+ struct i915_request *rq;
+ int err;
+
+ err = igt_spinner_init(&spin, ce->engine->gt);
+ if (err)
+ return err;
+
+ rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
+ if (IS_ERR(rq)) {
+ igt_spinner_fini(&spin);
+ return PTR_ERR(rq);
+ }
+
+ err = request_add_spin(rq, &spin);
+ if (err == 0)
+ err = intel_engine_reset(ce->engine, reason);
+
+ igt_spinner_end(&spin);
+ igt_spinner_fini(&spin);
+
+ return err;
+}
+
+static int __live_mocs_reset(struct live_mocs *mocs,
+ struct intel_context *ce)
+{
+ int err;
+
+ err = intel_engine_reset(ce->engine, "mocs");
+ if (err)
+ return err;
+
+ err = check_mocs_engine(mocs, ce);
+ if (err)
+ return err;
+
+ err = active_engine_reset(ce, "mocs");
+ if (err)
+ return err;
+
+ err = check_mocs_engine(mocs, ce);
+ if (err)
+ return err;
+
+ intel_gt_reset(ce->engine->gt, ce->engine->mask, "mocs");
+
+ err = check_mocs_engine(mocs, ce);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int live_mocs_reset(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct live_mocs mocs;
+ int err = 0;
+
+ /* Check the mocs setup is retained over per-engine and global resets */
+
+ if (!intel_has_reset_engine(gt))
+ return 0;
+
+ err = live_mocs_init(&mocs, gt);
+ if (err)
+ return err;
+
+ igt_global_reset_lock(gt);
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ break;
+ }
+
+ intel_engine_pm_get(engine);
+ err = __live_mocs_reset(&mocs, ce);
+ intel_engine_pm_put(engine);
+
+ intel_context_put(ce);
+ if (err)
+ break;
+ }
+ igt_global_reset_unlock(gt);
+
+ live_mocs_fini(&mocs);
+ return err;
+}
+
+int intel_mocs_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_mocs_kernel),
+ SUBTEST(live_mocs_clean),
+ SUBTEST(live_mocs_reset),
+ };
+ struct drm_i915_mocs_table table;
+
+ if (!get_mocs_settings(i915, &table))
+ return 0;
+
+ return intel_gt_live_subtests(tests, &i915->gt);
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_rc6.c b/drivers/gpu/drm/i915/gt/selftest_rc6.c
new file mode 100644
index 000000000000..8cc55a0e9e06
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_rc6.c
@@ -0,0 +1,203 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "intel_context.h"
+#include "intel_engine_pm.h"
+#include "intel_gt_requests.h"
+#include "intel_ring.h"
+#include "selftest_rc6.h"
+
+#include "selftests/i915_random.h"
+
+int live_rc6_manual(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_rc6 *rc6 = &gt->rc6;
+ intel_wakeref_t wakeref;
+ u64 res[2];
+ int err = 0;
+
+ /*
+ * Our claim is that we can "encourage" the GPU to enter rc6 at will.
+ * Let's try it!
+ */
+
+ if (!rc6->enabled)
+ return 0;
+
+ /* bsw/byt use a PCU and decouple RC6 from our manual control */
+ if (IS_VALLEYVIEW(gt->i915) || IS_CHERRYVIEW(gt->i915))
+ return 0;
+
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
+
+ /* Force RC6 off for starters */
+ __intel_rc6_disable(rc6);
+ msleep(1); /* wakeup is not immediate, takes about 100us on icl */
+
+ res[0] = intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6);
+ msleep(250);
+ res[1] = intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6);
+ if ((res[1] - res[0]) >> 10) {
+ pr_err("RC6 residency increased by %lldus while disabled for 250ms!\n",
+ (res[1] - res[0]) >> 10);
+ err = -EINVAL;
+ goto out_unlock;
+ }
+
+ /* Manually enter RC6 */
+ intel_rc6_park(rc6);
+
+ res[0] = intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6);
+ msleep(100);
+ res[1] = intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6);
+
+ if (res[1] == res[0]) {
+ pr_err("Did not enter RC6! RC6_STATE=%08x, RC6_CONTROL=%08x\n",
+ intel_uncore_read_fw(gt->uncore, GEN6_RC_STATE),
+ intel_uncore_read_fw(gt->uncore, GEN6_RC_CONTROL));
+ err = -EINVAL;
+ }
+
+ /* Restore what should have been the original state! */
+ intel_rc6_unpark(rc6);
+
+out_unlock:
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
+ return err;
+}
+
+static const u32 *__live_rc6_ctx(struct intel_context *ce)
+{
+ struct i915_request *rq;
+ const u32 *result;
+ u32 cmd;
+ u32 *cs;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return ERR_CAST(rq);
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ return cs;
+ }
+
+ cmd = MI_STORE_REGISTER_MEM | MI_USE_GGTT;
+ if (INTEL_GEN(rq->i915) >= 8)
+ cmd++;
+
+ *cs++ = cmd;
+ *cs++ = i915_mmio_reg_offset(GEN8_RC6_CTX_INFO);
+ *cs++ = ce->timeline->hwsp_offset + 8;
+ *cs++ = 0;
+ intel_ring_advance(rq, cs);
+
+ result = rq->hwsp_seqno + 2;
+ i915_request_add(rq);
+
+ return result;
+}
+
+static struct intel_engine_cs **
+randomised_engines(struct intel_gt *gt,
+ struct rnd_state *prng,
+ unsigned int *count)
+{
+ struct intel_engine_cs *engine, **engines;
+ enum intel_engine_id id;
+ int n;
+
+ n = 0;
+ for_each_engine(engine, gt, id)
+ n++;
+ if (!n)
+ return NULL;
+
+ engines = kmalloc_array(n, sizeof(*engines), GFP_KERNEL);
+ if (!engines)
+ return NULL;
+
+ n = 0;
+ for_each_engine(engine, gt, id)
+ engines[n++] = engine;
+
+ i915_prandom_shuffle(engines, sizeof(*engines), n, prng);
+
+ *count = n;
+ return engines;
+}
+
+int live_rc6_ctx_wa(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs **engines;
+ unsigned int n, count;
+ I915_RND_STATE(prng);
+ int err = 0;
+
+ /* A read of CTX_INFO upsets rc6. Poke the bear! */
+ if (INTEL_GEN(gt->i915) < 8)
+ return 0;
+
+ engines = randomised_engines(gt, &prng, &count);
+ if (!engines)
+ return 0;
+
+ for (n = 0; n < count; n++) {
+ struct intel_engine_cs *engine = engines[n];
+ int pass;
+
+ for (pass = 0; pass < 2; pass++) {
+ struct intel_context *ce;
+ unsigned int resets =
+ i915_reset_engine_count(&gt->i915->gpu_error,
+ engine);
+ const u32 *res;
+
+ /* Use a sacrifical context */
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out;
+ }
+
+ intel_engine_pm_get(engine);
+ res = __live_rc6_ctx(ce);
+ intel_engine_pm_put(engine);
+ intel_context_put(ce);
+ if (IS_ERR(res)) {
+ err = PTR_ERR(res);
+ goto out;
+ }
+
+ if (intel_gt_wait_for_idle(gt, HZ / 5) == -ETIME) {
+ intel_gt_set_wedged(gt);
+ err = -ETIME;
+ goto out;
+ }
+
+ intel_gt_pm_wait_for_idle(gt);
+ pr_debug("%s: CTX_INFO=%0x\n",
+ engine->name, READ_ONCE(*res));
+
+ if (resets !=
+ i915_reset_engine_count(&gt->i915->gpu_error,
+ engine)) {
+ pr_err("%s: GPU reset required\n",
+ engine->name);
+ add_taint_for_CI(TAINT_WARN);
+ err = -EIO;
+ goto out;
+ }
+ }
+ }
+
+out:
+ kfree(engines);
+ return err;
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_rc6.h b/drivers/gpu/drm/i915/gt/selftest_rc6.h
new file mode 100644
index 000000000000..762fd442d7b2
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_rc6.h
@@ -0,0 +1,13 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef SELFTEST_RC6_H
+#define SELFTEST_RC6_H
+
+int live_rc6_ctx_wa(void *arg);
+int live_rc6_manual(void *arg);
+
+#endif /* SELFTEST_RC6_H */
diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c
index b5c590c9ccba..6ad6aca315f6 100644
--- a/drivers/gpu/drm/i915/gt/selftest_reset.c
+++ b/drivers/gpu/drm/i915/gt/selftest_reset.c
@@ -9,26 +9,29 @@
static int igt_global_reset(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
unsigned int reset_count;
+ intel_wakeref_t wakeref;
int err = 0;
/* Check that we can issue a global GPU reset */
- igt_global_reset_lock(i915);
+ igt_global_reset_lock(gt);
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
- reset_count = i915_reset_count(&i915->gpu_error);
+ reset_count = i915_reset_count(&gt->i915->gpu_error);
- i915_reset(i915, ALL_ENGINES, NULL);
+ intel_gt_reset(gt, ALL_ENGINES, NULL);
- if (i915_reset_count(&i915->gpu_error) == reset_count) {
+ if (i915_reset_count(&gt->i915->gpu_error) == reset_count) {
pr_err("No GPU reset recorded!\n");
err = -EINVAL;
}
- igt_global_reset_unlock(i915);
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
+ igt_global_reset_unlock(gt);
- if (i915_reset_failed(i915))
+ if (intel_gt_is_wedged(gt))
err = -EIO;
return err;
@@ -36,64 +39,123 @@ static int igt_global_reset(void *arg)
static int igt_wedged_reset(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
intel_wakeref_t wakeref;
/* Check that we can recover a wedged device with a GPU reset */
- igt_global_reset_lock(i915);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ igt_global_reset_lock(gt);
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(gt);
- GEM_BUG_ON(!i915_reset_failed(i915));
- i915_reset(i915, ALL_ENGINES, NULL);
+ GEM_BUG_ON(!intel_gt_is_wedged(gt));
+ intel_gt_reset(gt, ALL_ENGINES, NULL);
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- igt_global_reset_unlock(i915);
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
+ igt_global_reset_unlock(gt);
- return i915_reset_failed(i915) ? -EIO : 0;
+ return intel_gt_is_wedged(gt) ? -EIO : 0;
}
static int igt_atomic_reset(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
const typeof(*igt_atomic_phases) *p;
int err = 0;
/* Check that the resets are usable from atomic context */
- igt_global_reset_lock(i915);
- mutex_lock(&i915->drm.struct_mutex);
+ intel_gt_pm_get(gt);
+ igt_global_reset_lock(gt);
/* Flush any requests before we get started and check basics */
- if (!igt_force_reset(i915))
+ if (!igt_force_reset(gt))
goto unlock;
for (p = igt_atomic_phases; p->name; p++) {
intel_engine_mask_t awake;
- GEM_TRACE("intel_gpu_reset under %s\n", p->name);
+ GEM_TRACE("__intel_gt_reset under %s\n", p->name);
- awake = reset_prepare(i915);
+ awake = reset_prepare(gt);
p->critical_section_begin();
- reset_prepare(i915);
- err = intel_gpu_reset(i915, ALL_ENGINES);
+
+ err = __intel_gt_reset(gt, ALL_ENGINES);
+
p->critical_section_end();
- reset_finish(i915, awake);
+ reset_finish(gt, awake);
if (err) {
- pr_err("intel_gpu_reset failed under %s\n", p->name);
+ pr_err("__intel_gt_reset failed under %s\n", p->name);
break;
}
}
/* As we poke around the guts, do a full reset before continuing. */
- igt_force_reset(i915);
+ igt_force_reset(gt);
unlock:
- mutex_unlock(&i915->drm.struct_mutex);
- igt_global_reset_unlock(i915);
+ igt_global_reset_unlock(gt);
+ intel_gt_pm_put(gt);
+
+ return err;
+}
+
+static int igt_atomic_engine_reset(void *arg)
+{
+ struct intel_gt *gt = arg;
+ const typeof(*igt_atomic_phases) *p;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /* Check that the resets are usable from atomic context */
+
+ if (!intel_has_reset_engine(gt))
+ return 0;
+
+ if (USES_GUC_SUBMISSION(gt->i915))
+ return 0;
+
+ intel_gt_pm_get(gt);
+ igt_global_reset_lock(gt);
+
+ /* Flush any requests before we get started and check basics */
+ if (!igt_force_reset(gt))
+ goto out_unlock;
+
+ for_each_engine(engine, gt, id) {
+ tasklet_disable(&engine->execlists.tasklet);
+ intel_engine_pm_get(engine);
+
+ for (p = igt_atomic_phases; p->name; p++) {
+ GEM_TRACE("intel_engine_reset(%s) under %s\n",
+ engine->name, p->name);
+
+ p->critical_section_begin();
+ err = intel_engine_reset(engine, NULL);
+ p->critical_section_end();
+
+ if (err) {
+ pr_err("intel_engine_reset(%s) failed under %s\n",
+ engine->name, p->name);
+ break;
+ }
+ }
+
+ intel_engine_pm_put(engine);
+ tasklet_enable(&engine->execlists.tasklet);
+ if (err)
+ break;
+ }
+
+ /* As we poke around the guts, do a full reset before continuing. */
+ igt_force_reset(gt);
+
+out_unlock:
+ igt_global_reset_unlock(gt);
+ intel_gt_pm_put(gt);
return err;
}
@@ -104,18 +166,15 @@ int intel_reset_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_global_reset), /* attempt to recover GPU first */
SUBTEST(igt_wedged_reset),
SUBTEST(igt_atomic_reset),
+ SUBTEST(igt_atomic_engine_reset),
};
- intel_wakeref_t wakeref;
- int err = 0;
+ struct intel_gt *gt = &i915->gt;
- if (!intel_has_gpu_reset(i915))
+ if (!intel_has_gpu_reset(gt))
return 0;
- if (i915_terminally_wedged(i915))
+ if (intel_gt_is_wedged(gt))
return -EIO; /* we're long past hope of a successful reset */
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- err = i915_subtests(tests, i915);
-
- return err;
+ return intel_gt_live_subtests(tests, gt);
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c
index 76d3977f1d4b..e2d78cc22fb4 100644
--- a/drivers/gpu/drm/i915/selftests/i915_timeline.c
+++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c
@@ -6,16 +6,19 @@
#include <linux/prime_numbers.h>
-#include "gem/i915_gem_pm.h"
+#include "intel_engine_pm.h"
+#include "intel_gt.h"
+#include "intel_gt_requests.h"
+#include "intel_ring.h"
-#include "i915_random.h"
-#include "i915_selftest.h"
+#include "../selftests/i915_random.h"
+#include "../i915_selftest.h"
-#include "igt_flush_test.h"
-#include "mock_gem_device.h"
-#include "mock_timeline.h"
+#include "../selftests/igt_flush_test.h"
+#include "../selftests/mock_gem_device.h"
+#include "selftests/mock_timeline.h"
-static struct page *hwsp_page(struct i915_timeline *tl)
+static struct page *hwsp_page(struct intel_timeline *tl)
{
struct drm_i915_gem_object *obj = tl->hwsp_ggtt->obj;
@@ -23,7 +26,7 @@ static struct page *hwsp_page(struct i915_timeline *tl)
return sg_page(obj->mm.pages->sgl);
}
-static unsigned long hwsp_cacheline(struct i915_timeline *tl)
+static unsigned long hwsp_cacheline(struct intel_timeline *tl)
{
unsigned long address = (unsigned long)page_address(hwsp_page(tl));
@@ -33,9 +36,9 @@ static unsigned long hwsp_cacheline(struct i915_timeline *tl)
#define CACHELINES_PER_PAGE (PAGE_SIZE / CACHELINE_BYTES)
struct mock_hwsp_freelist {
- struct drm_i915_private *i915;
+ struct intel_gt *gt;
struct radix_tree_root cachelines;
- struct i915_timeline **history;
+ struct intel_timeline **history;
unsigned long count, max;
struct rnd_state prng;
};
@@ -46,12 +49,12 @@ enum {
static void __mock_hwsp_record(struct mock_hwsp_freelist *state,
unsigned int idx,
- struct i915_timeline *tl)
+ struct intel_timeline *tl)
{
tl = xchg(&state->history[idx], tl);
if (tl) {
radix_tree_delete(&state->cachelines, hwsp_cacheline(tl));
- i915_timeline_put(tl);
+ intel_timeline_put(tl);
}
}
@@ -59,14 +62,14 @@ static int __mock_hwsp_timeline(struct mock_hwsp_freelist *state,
unsigned int count,
unsigned int flags)
{
- struct i915_timeline *tl;
+ struct intel_timeline *tl;
unsigned int idx;
while (count--) {
unsigned long cacheline;
int err;
- tl = i915_timeline_create(state->i915, NULL);
+ tl = intel_timeline_create(state->gt, NULL);
if (IS_ERR(tl))
return PTR_ERR(tl);
@@ -77,7 +80,7 @@ static int __mock_hwsp_timeline(struct mock_hwsp_freelist *state,
pr_err("HWSP cacheline %lu already used; duplicate allocation!\n",
cacheline);
}
- i915_timeline_put(tl);
+ intel_timeline_put(tl);
return err;
}
@@ -104,6 +107,7 @@ static int __mock_hwsp_timeline(struct mock_hwsp_freelist *state,
static int mock_hwsp_freelist(void *arg)
{
struct mock_hwsp_freelist state;
+ struct drm_i915_private *i915;
const struct {
const char *name;
unsigned int flags;
@@ -115,12 +119,14 @@ static int mock_hwsp_freelist(void *arg)
unsigned int na;
int err = 0;
+ i915 = mock_gem_device();
+ if (!i915)
+ return -ENOMEM;
+
INIT_RADIX_TREE(&state.cachelines, GFP_KERNEL);
state.prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed);
- state.i915 = mock_gem_device();
- if (!state.i915)
- return -ENOMEM;
+ state.gt = &i915->gt;
/*
* Create a bunch of timelines and check that their HWSP do not overlap.
@@ -135,7 +141,6 @@ static int mock_hwsp_freelist(void *arg)
goto err_put;
}
- mutex_lock(&state.i915->drm.struct_mutex);
for (p = phases; p->name; p++) {
pr_debug("%s(%s)\n", __func__, p->name);
for_each_prime_number_from(na, 1, 2 * CACHELINES_PER_PAGE) {
@@ -148,10 +153,9 @@ static int mock_hwsp_freelist(void *arg)
out:
for (na = 0; na < state.max; na++)
__mock_hwsp_record(&state, na, NULL);
- mutex_unlock(&state.i915->drm.struct_mutex);
kfree(state.history);
err_put:
- drm_dev_put(&state.i915->drm);
+ drm_dev_put(&i915->drm);
return err;
}
@@ -162,21 +166,21 @@ struct __igt_sync {
bool set;
};
-static int __igt_sync(struct i915_timeline *tl,
+static int __igt_sync(struct intel_timeline *tl,
u64 ctx,
const struct __igt_sync *p,
const char *name)
{
int ret;
- if (__i915_timeline_sync_is_later(tl, ctx, p->seqno) != p->expected) {
+ if (__intel_timeline_sync_is_later(tl, ctx, p->seqno) != p->expected) {
pr_err("%s: %s(ctx=%llu, seqno=%u) expected passed %s but failed\n",
name, p->name, ctx, p->seqno, yesno(p->expected));
return -EINVAL;
}
if (p->set) {
- ret = __i915_timeline_sync_set(tl, ctx, p->seqno);
+ ret = __intel_timeline_sync_set(tl, ctx, p->seqno);
if (ret)
return ret;
}
@@ -204,7 +208,7 @@ static int igt_sync(void *arg)
{ "unwrap", UINT_MAX, true, false },
{},
}, *p;
- struct i915_timeline tl;
+ struct intel_timeline tl;
int order, offset;
int ret = -ENODEV;
@@ -248,7 +252,7 @@ static unsigned int random_engine(struct rnd_state *rnd)
static int bench_sync(void *arg)
{
struct rnd_state prng;
- struct i915_timeline tl;
+ struct intel_timeline tl;
unsigned long end_time, count;
u64 prng32_1M;
ktime_t kt;
@@ -286,7 +290,7 @@ static int bench_sync(void *arg)
do {
u64 id = i915_prandom_u64_state(&prng);
- __i915_timeline_sync_set(&tl, id, 0);
+ __intel_timeline_sync_set(&tl, id, 0);
count++;
} while (!time_after(jiffies, end_time));
kt = ktime_sub(ktime_get(), kt);
@@ -301,7 +305,7 @@ static int bench_sync(void *arg)
while (end_time--) {
u64 id = i915_prandom_u64_state(&prng);
- if (!__i915_timeline_sync_is_later(&tl, id, 0)) {
+ if (!__intel_timeline_sync_is_later(&tl, id, 0)) {
mock_timeline_fini(&tl);
pr_err("Lookup of %llu failed\n", id);
return -EINVAL;
@@ -322,7 +326,7 @@ static int bench_sync(void *arg)
kt = ktime_get();
end_time = jiffies + HZ/10;
do {
- __i915_timeline_sync_set(&tl, count++, 0);
+ __intel_timeline_sync_set(&tl, count++, 0);
} while (!time_after(jiffies, end_time));
kt = ktime_sub(ktime_get(), kt);
pr_info("%s: %lu in-order insertions, %lluns/insert\n",
@@ -332,7 +336,7 @@ static int bench_sync(void *arg)
end_time = count;
kt = ktime_get();
while (end_time--) {
- if (!__i915_timeline_sync_is_later(&tl, end_time, 0)) {
+ if (!__intel_timeline_sync_is_later(&tl, end_time, 0)) {
pr_err("Lookup of %lu failed\n", end_time);
mock_timeline_fini(&tl);
return -EINVAL;
@@ -356,8 +360,8 @@ static int bench_sync(void *arg)
u32 id = random_engine(&prng);
u32 seqno = prandom_u32_state(&prng);
- if (!__i915_timeline_sync_is_later(&tl, id, seqno))
- __i915_timeline_sync_set(&tl, id, seqno);
+ if (!__intel_timeline_sync_is_later(&tl, id, seqno))
+ __intel_timeline_sync_set(&tl, id, seqno);
count++;
} while (!time_after(jiffies, end_time));
@@ -385,8 +389,8 @@ static int bench_sync(void *arg)
*/
u64 id = (u64)(count & mask) << order;
- __i915_timeline_sync_is_later(&tl, id, 0);
- __i915_timeline_sync_set(&tl, id, 0);
+ __intel_timeline_sync_is_later(&tl, id, 0);
+ __intel_timeline_sync_set(&tl, id, 0);
count++;
} while (!time_after(jiffies, end_time));
@@ -401,7 +405,7 @@ static int bench_sync(void *arg)
return 0;
}
-int i915_timeline_mock_selftests(void)
+int intel_timeline_mock_selftests(void)
{
static const struct i915_subtest tests[] = {
SUBTEST(mock_hwsp_freelist),
@@ -443,49 +447,51 @@ static int emit_ggtt_store_dw(struct i915_request *rq, u32 addr, u32 value)
}
static struct i915_request *
-tl_write(struct i915_timeline *tl, struct intel_engine_cs *engine, u32 value)
+tl_write(struct intel_timeline *tl, struct intel_engine_cs *engine, u32 value)
{
struct i915_request *rq;
int err;
- lockdep_assert_held(&tl->i915->drm.struct_mutex); /* lazy rq refs */
-
- err = i915_timeline_pin(tl);
+ err = intel_timeline_pin(tl);
if (err) {
rq = ERR_PTR(err);
goto out;
}
- rq = i915_request_create(engine->kernel_context);
+ rq = intel_engine_create_kernel_request(engine);
if (IS_ERR(rq))
goto out_unpin;
+ i915_request_get(rq);
+
err = emit_ggtt_store_dw(rq, tl->hwsp_offset, value);
i915_request_add(rq);
- if (err)
+ if (err) {
+ i915_request_put(rq);
rq = ERR_PTR(err);
+ }
out_unpin:
- i915_timeline_unpin(tl);
+ intel_timeline_unpin(tl);
out:
if (IS_ERR(rq))
pr_err("Failed to write to timeline!\n");
return rq;
}
-static struct i915_timeline *
-checked_i915_timeline_create(struct drm_i915_private *i915)
+static struct intel_timeline *
+checked_intel_timeline_create(struct intel_gt *gt)
{
- struct i915_timeline *tl;
+ struct intel_timeline *tl;
- tl = i915_timeline_create(i915, NULL);
+ tl = intel_timeline_create(gt, NULL);
if (IS_ERR(tl))
return tl;
if (*tl->hwsp_seqno != tl->seqno) {
pr_err("Timeline created with incorrect breadcrumb, found %x, expected %x\n",
*tl->hwsp_seqno, tl->seqno);
- i915_timeline_put(tl);
+ intel_timeline_put(tl);
return ERR_PTR(-EINVAL);
}
@@ -495,11 +501,10 @@ checked_i915_timeline_create(struct drm_i915_private *i915)
static int live_hwsp_engine(void *arg)
{
#define NUM_TIMELINES 4096
- struct drm_i915_private *i915 = arg;
- struct i915_timeline **timelines;
+ struct intel_gt *gt = arg;
+ struct intel_timeline **timelines;
struct intel_engine_cs *engine;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
unsigned long count, n;
int err = 0;
@@ -514,55 +519,54 @@ static int live_hwsp_engine(void *arg)
if (!timelines)
return -ENOMEM;
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
count = 0;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
if (!intel_engine_can_store_dword(engine))
continue;
+ intel_engine_pm_get(engine);
+
for (n = 0; n < NUM_TIMELINES; n++) {
- struct i915_timeline *tl;
+ struct intel_timeline *tl;
struct i915_request *rq;
- tl = checked_i915_timeline_create(i915);
+ tl = checked_intel_timeline_create(gt);
if (IS_ERR(tl)) {
err = PTR_ERR(tl);
- goto out;
+ break;
}
rq = tl_write(tl, engine, count);
if (IS_ERR(rq)) {
- i915_timeline_put(tl);
+ intel_timeline_put(tl);
err = PTR_ERR(rq);
- goto out;
+ break;
}
timelines[count++] = tl;
+ i915_request_put(rq);
}
+
+ intel_engine_pm_put(engine);
+ if (err)
+ break;
}
-out:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(gt->i915))
err = -EIO;
for (n = 0; n < count; n++) {
- struct i915_timeline *tl = timelines[n];
+ struct intel_timeline *tl = timelines[n];
if (!err && *tl->hwsp_seqno != n) {
pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n",
n, *tl->hwsp_seqno);
err = -EINVAL;
}
- i915_timeline_put(tl);
+ intel_timeline_put(tl);
}
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
-
kvfree(timelines);
-
return err;
#undef NUM_TIMELINES
}
@@ -570,11 +574,10 @@ out:
static int live_hwsp_alternate(void *arg)
{
#define NUM_TIMELINES 4096
- struct drm_i915_private *i915 = arg;
- struct i915_timeline **timelines;
+ struct intel_gt *gt = arg;
+ struct intel_timeline **timelines;
struct intel_engine_cs *engine;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
unsigned long count, n;
int err = 0;
@@ -590,66 +593,62 @@ static int live_hwsp_alternate(void *arg)
if (!timelines)
return -ENOMEM;
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
count = 0;
for (n = 0; n < NUM_TIMELINES; n++) {
- for_each_engine(engine, i915, id) {
- struct i915_timeline *tl;
+ for_each_engine(engine, gt, id) {
+ struct intel_timeline *tl;
struct i915_request *rq;
if (!intel_engine_can_store_dword(engine))
continue;
- tl = checked_i915_timeline_create(i915);
+ tl = checked_intel_timeline_create(gt);
if (IS_ERR(tl)) {
+ intel_engine_pm_put(engine);
err = PTR_ERR(tl);
goto out;
}
+ intel_engine_pm_get(engine);
rq = tl_write(tl, engine, count);
+ intel_engine_pm_put(engine);
if (IS_ERR(rq)) {
- i915_timeline_put(tl);
+ intel_timeline_put(tl);
err = PTR_ERR(rq);
goto out;
}
timelines[count++] = tl;
+ i915_request_put(rq);
}
}
out:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(gt->i915))
err = -EIO;
for (n = 0; n < count; n++) {
- struct i915_timeline *tl = timelines[n];
+ struct intel_timeline *tl = timelines[n];
if (!err && *tl->hwsp_seqno != n) {
pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n",
n, *tl->hwsp_seqno);
err = -EINVAL;
}
- i915_timeline_put(tl);
+ intel_timeline_put(tl);
}
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
-
kvfree(timelines);
-
return err;
#undef NUM_TIMELINES
}
static int live_hwsp_wrap(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
- struct i915_timeline *tl;
+ struct intel_timeline *tl;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
int err = 0;
/*
@@ -657,22 +656,18 @@ static int live_hwsp_wrap(void *arg)
* foreign GPU references.
*/
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ tl = intel_timeline_create(gt, NULL);
+ if (IS_ERR(tl))
+ return PTR_ERR(tl);
- tl = i915_timeline_create(i915, NULL);
- if (IS_ERR(tl)) {
- err = PTR_ERR(tl);
- goto out_rpm;
- }
if (!tl->has_initial_breadcrumb || !tl->hwsp_cacheline)
goto out_free;
- err = i915_timeline_pin(tl);
+ err = intel_timeline_pin(tl);
if (err)
goto out_free;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
const u32 *hwsp_seqno[2];
struct i915_request *rq;
u32 seqno[2];
@@ -680,7 +675,7 @@ static int live_hwsp_wrap(void *arg)
if (!intel_engine_can_store_dword(engine))
continue;
- rq = i915_request_create(engine->kernel_context);
+ rq = intel_engine_create_kernel_request(engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out;
@@ -688,7 +683,9 @@ static int live_hwsp_wrap(void *arg)
tl->seqno = -4u;
- err = i915_timeline_get_seqno(tl, rq, &seqno[0]);
+ mutex_lock_nested(&tl->mutex, SINGLE_DEPTH_NESTING);
+ err = intel_timeline_get_seqno(tl, rq, &seqno[0]);
+ mutex_unlock(&tl->mutex);
if (err) {
i915_request_add(rq);
goto out;
@@ -703,7 +700,9 @@ static int live_hwsp_wrap(void *arg)
}
hwsp_seqno[0] = tl->hwsp_seqno;
- err = i915_timeline_get_seqno(tl, rq, &seqno[1]);
+ mutex_lock_nested(&tl->mutex, SINGLE_DEPTH_NESTING);
+ err = intel_timeline_get_seqno(tl, rq, &seqno[1]);
+ mutex_unlock(&tl->mutex);
if (err) {
i915_request_add(rq);
goto out;
@@ -738,29 +737,24 @@ static int live_hwsp_wrap(void *arg)
goto out;
}
- i915_retire_requests(i915); /* recycle HWSP */
+ intel_gt_retire_requests(gt); /* recycle HWSP */
}
out:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(gt->i915))
err = -EIO;
- i915_timeline_unpin(tl);
+ intel_timeline_unpin(tl);
out_free:
- i915_timeline_put(tl);
-out_rpm:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
-
+ intel_timeline_put(tl);
return err;
}
static int live_hwsp_recycle(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
unsigned long count;
int err = 0;
@@ -770,38 +764,38 @@ static int live_hwsp_recycle(void *arg)
* want to confuse ourselves or the GPU.
*/
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
count = 0;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
IGT_TIMEOUT(end_time);
if (!intel_engine_can_store_dword(engine))
continue;
+ intel_engine_pm_get(engine);
+
do {
- struct i915_timeline *tl;
+ struct intel_timeline *tl;
struct i915_request *rq;
- tl = checked_i915_timeline_create(i915);
+ tl = checked_intel_timeline_create(gt);
if (IS_ERR(tl)) {
err = PTR_ERR(tl);
- goto out;
+ break;
}
rq = tl_write(tl, engine, count);
if (IS_ERR(rq)) {
- i915_timeline_put(tl);
+ intel_timeline_put(tl);
err = PTR_ERR(rq);
- goto out;
+ break;
}
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
pr_err("Wait for timeline writes timed out!\n");
- i915_timeline_put(tl);
+ i915_request_put(rq);
+ intel_timeline_put(tl);
err = -EIO;
- goto out;
+ break;
}
if (*tl->hwsp_seqno != count) {
@@ -810,26 +804,23 @@ static int live_hwsp_recycle(void *arg)
err = -EINVAL;
}
- i915_timeline_put(tl);
+ i915_request_put(rq);
+ intel_timeline_put(tl);
count++;
if (err)
- goto out;
-
- i915_timelines_park(i915); /* Encourage recycling! */
+ break;
} while (!__igt_timeout(end_time, NULL));
- }
-out:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
- err = -EIO;
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
+ intel_engine_pm_put(engine);
+ if (err)
+ break;
+ }
return err;
}
-int i915_timeline_live_selftests(struct drm_i915_private *i915)
+int intel_timeline_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_hwsp_recycle),
@@ -838,8 +829,8 @@ int i915_timeline_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_hwsp_wrap),
};
- if (i915_terminally_wedged(i915))
+ if (intel_gt_is_wedged(&i915->gt))
return 0;
- return i915_subtests(tests, i915);
+ return intel_gt_live_subtests(tests, &i915->gt);
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
index 44becd9538be..ac1921854cbf 100644
--- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
@@ -5,13 +5,14 @@
*/
#include "gem/i915_gem_pm.h"
+#include "gt/intel_engine_user.h"
+#include "gt/intel_gt.h"
#include "i915_selftest.h"
#include "intel_reset.h"
#include "selftests/igt_flush_test.h"
#include "selftests/igt_reset.h"
#include "selftests/igt_spinner.h"
-#include "selftests/igt_wedge_me.h"
#include "selftests/mock_drm.h"
#include "gem/selftests/igt_gem_utils.h"
@@ -24,53 +25,70 @@ static const struct wo_register {
{ INTEL_GEMINILAKE, 0x731c }
};
-#define REF_NAME_MAX (INTEL_ENGINE_CS_MAX_NAME + 8)
struct wa_lists {
struct i915_wa_list gt_wa_list;
struct {
- char name[REF_NAME_MAX];
struct i915_wa_list wa_list;
struct i915_wa_list ctx_wa_list;
} engine[I915_NUM_ENGINES];
};
+static int request_add_sync(struct i915_request *rq, int err)
+{
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (i915_request_wait(rq, 0, HZ / 5) < 0)
+ err = -EIO;
+ i915_request_put(rq);
+
+ return err;
+}
+
+static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
+{
+ int err = 0;
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (spin && !igt_wait_for_spinner(spin, rq))
+ err = -ETIMEDOUT;
+ i915_request_put(rq);
+
+ return err;
+}
+
static void
-reference_lists_init(struct drm_i915_private *i915, struct wa_lists *lists)
+reference_lists_init(struct intel_gt *gt, struct wa_lists *lists)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
memset(lists, 0, sizeof(*lists));
- wa_init_start(&lists->gt_wa_list, "GT_REF");
- gt_init_workarounds(i915, &lists->gt_wa_list);
+ wa_init_start(&lists->gt_wa_list, "GT_REF", "global");
+ gt_init_workarounds(gt->i915, &lists->gt_wa_list);
wa_init_finish(&lists->gt_wa_list);
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
struct i915_wa_list *wal = &lists->engine[id].wa_list;
- char *name = lists->engine[id].name;
-
- snprintf(name, REF_NAME_MAX, "%s_REF", engine->name);
- wa_init_start(wal, name);
+ wa_init_start(wal, "REF", engine->name);
engine_init_workarounds(engine, wal);
wa_init_finish(wal);
- snprintf(name, REF_NAME_MAX, "%s_CTX_REF", engine->name);
-
__intel_engine_init_ctx_wa(engine,
&lists->engine[id].ctx_wa_list,
- name);
+ "CTX_REF");
}
}
static void
-reference_lists_fini(struct drm_i915_private *i915, struct wa_lists *lists)
+reference_lists_fini(struct intel_gt *gt, struct wa_lists *lists)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, i915, id)
+ for_each_engine(engine, gt, id)
intel_wa_list_free(&lists->engine[id].wa_list);
intel_wa_list_free(&lists->gt_wa_list);
@@ -102,7 +120,7 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
i915_gem_object_flush_map(result);
i915_gem_object_unpin_map(result);
- vma = i915_vma_instance(result, &engine->i915->ggtt.vm, NULL);
+ vma = i915_vma_instance(result, &engine->gt->ggtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_obj;
@@ -119,7 +137,9 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
}
i915_vma_lock(vma);
- err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ err = i915_request_await_object(rq, vma->obj, true);
+ if (err == 0)
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_vma_unlock(vma);
if (err)
goto err_req;
@@ -184,7 +204,7 @@ static int check_whitelist(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
struct drm_i915_gem_object *results;
- struct igt_wedge_me wedge;
+ struct intel_wedge_me wedge;
u32 *vaddr;
int err;
int i;
@@ -195,10 +215,10 @@ static int check_whitelist(struct i915_gem_context *ctx,
err = 0;
i915_gem_object_lock(results);
- igt_wedge_on_timeout(&wedge, ctx->i915, HZ / 5) /* a safety net! */
+ intel_wedge_on_timeout(&wedge, engine->gt, HZ / 5) /* safety net! */
err = i915_gem_object_set_to_cpu_domain(results, false);
i915_gem_object_unlock(results);
- if (i915_terminally_wedged(ctx->i915))
+ if (intel_gt_is_wedged(engine->gt))
err = -EIO;
if (err)
goto out_put;
@@ -231,35 +251,29 @@ out_put:
static int do_device_reset(struct intel_engine_cs *engine)
{
- i915_reset(engine->i915, engine->mask, "live_workarounds");
+ intel_gt_reset(engine->gt, engine->mask, "live_workarounds");
return 0;
}
static int do_engine_reset(struct intel_engine_cs *engine)
{
- return i915_reset_engine(engine, "live_workarounds");
+ return intel_engine_reset(engine, "live_workarounds");
}
static int
switch_to_scratch_context(struct intel_engine_cs *engine,
struct igt_spinner *spin)
{
- struct i915_gem_context *ctx;
+ struct intel_context *ce;
struct i915_request *rq;
- intel_wakeref_t wakeref;
int err = 0;
- ctx = kernel_context(engine->i915);
- if (IS_ERR(ctx))
- return PTR_ERR(ctx);
-
- GEM_BUG_ON(i915_gem_context_is_bannable(ctx));
-
- rq = ERR_PTR(-ENODEV);
- with_intel_runtime_pm(&engine->i915->runtime_pm, wakeref)
- rq = igt_spinner_create_request(spin, ctx, engine, MI_NOOP);
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
- kernel_context_close(ctx);
+ rq = igt_spinner_create_request(spin, ce, MI_NOOP);
+ intel_context_put(ce);
if (IS_ERR(rq)) {
spin = NULL;
@@ -267,13 +281,7 @@ switch_to_scratch_context(struct intel_engine_cs *engine,
goto err;
}
- i915_request_add(rq);
-
- if (spin && !igt_wait_for_spinner(spin, rq)) {
- pr_err("Spinner failed to start\n");
- err = -ETIMEDOUT;
- }
-
+ err = request_add_spin(rq, spin);
err:
if (err && spin)
igt_spinner_end(spin);
@@ -286,79 +294,82 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
const char *name)
{
struct drm_i915_private *i915 = engine->i915;
- struct i915_gem_context *ctx;
+ struct i915_gem_context *ctx, *tmp;
struct igt_spinner spin;
intel_wakeref_t wakeref;
int err;
- pr_info("Checking %d whitelisted registers (RING_NONPRIV) [%s]\n",
- engine->whitelist.count, name);
-
- err = igt_spinner_init(&spin, i915);
- if (err)
- return err;
+ pr_info("Checking %d whitelisted registers on %s (RING_NONPRIV) [%s]\n",
+ engine->whitelist.count, engine->name, name);
ctx = kernel_context(i915);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
+ err = igt_spinner_init(&spin, engine->gt);
+ if (err)
+ goto out_ctx;
+
err = check_whitelist(ctx, engine);
if (err) {
pr_err("Invalid whitelist *before* %s reset!\n", name);
- goto out;
+ goto out_spin;
}
err = switch_to_scratch_context(engine, &spin);
if (err)
- goto out;
+ goto out_spin;
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ with_intel_runtime_pm(engine->uncore->rpm, wakeref)
err = reset(engine);
igt_spinner_end(&spin);
- igt_spinner_fini(&spin);
if (err) {
pr_err("%s reset failed\n", name);
- goto out;
+ goto out_spin;
}
err = check_whitelist(ctx, engine);
if (err) {
pr_err("Whitelist not preserved in context across %s reset!\n",
name);
- goto out;
+ goto out_spin;
}
+ tmp = kernel_context(i915);
+ if (IS_ERR(tmp)) {
+ err = PTR_ERR(tmp);
+ goto out_spin;
+ }
kernel_context_close(ctx);
-
- ctx = kernel_context(i915);
- if (IS_ERR(ctx))
- return PTR_ERR(ctx);
+ ctx = tmp;
err = check_whitelist(ctx, engine);
if (err) {
pr_err("Invalid whitelist *after* %s reset in fresh context!\n",
name);
- goto out;
+ goto out_spin;
}
-out:
+out_spin:
+ igt_spinner_fini(&spin);
+out_ctx:
kernel_context_close(ctx);
return err;
}
-static struct i915_vma *create_batch(struct i915_gem_context *ctx)
+static struct i915_vma *create_batch(struct i915_address_space *vm)
{
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
int err;
- obj = i915_gem_object_create_internal(ctx->i915, 16 * PAGE_SIZE);
+ obj = i915_gem_object_create_internal(vm->i915, 16 * PAGE_SIZE);
if (IS_ERR(obj))
return ERR_CAST(obj);
- vma = i915_vma_instance(obj, ctx->vm, NULL);
+ vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_obj;
@@ -393,6 +404,10 @@ static bool wo_register(struct intel_engine_cs *engine, u32 reg)
enum intel_platform platform = INTEL_INFO(engine->i915)->platform;
int i;
+ if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
+ RING_FORCE_TO_NONPRIV_ACCESS_WR)
+ return true;
+
for (i = 0; i < ARRAY_SIZE(wo_registers); i++) {
if (wo_registers[i].platform == platform &&
wo_registers[i].reg == reg)
@@ -404,7 +419,8 @@ static bool wo_register(struct intel_engine_cs *engine, u32 reg)
static bool ro_register(u32 reg)
{
- if (reg & RING_FORCE_TO_NONPRIV_RD)
+ if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
+ RING_FORCE_TO_NONPRIV_ACCESS_RD)
return true;
return false;
@@ -425,8 +441,7 @@ static int whitelist_writable_count(struct intel_engine_cs *engine)
return count;
}
-static int check_dirty_whitelist(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
+static int check_dirty_whitelist(struct intel_context *ce)
{
const u32 values[] = {
0x00000000,
@@ -454,16 +469,17 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx,
0xffff00ff,
0xffffffff,
};
+ struct intel_engine_cs *engine = ce->engine;
struct i915_vma *scratch;
struct i915_vma *batch;
int err = 0, i, v;
u32 *cs, *results;
- scratch = create_scratch(ctx->vm, 2 * ARRAY_SIZE(values) + 1);
+ scratch = create_scratch(ce->vm, 2 * ARRAY_SIZE(values) + 1);
if (IS_ERR(scratch))
return PTR_ERR(scratch);
- batch = create_batch(ctx);
+ batch = create_batch(ce->vm);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
goto out_scratch;
@@ -476,16 +492,19 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx,
u32 srm, lrm, rsvd;
u32 expect;
int idx;
+ bool ro_reg;
if (wo_register(engine, reg))
continue;
- if (ro_register(reg))
- continue;
+ ro_reg = ro_register(reg);
+
+ /* Clear non priv flags */
+ reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
srm = MI_STORE_REGISTER_MEM;
lrm = MI_LOAD_REGISTER_MEM;
- if (INTEL_GEN(ctx->i915) >= 8)
+ if (INTEL_GEN(engine->i915) >= 8)
lrm++, srm++;
pr_debug("%s: Writing garbage to %x\n",
@@ -542,9 +561,9 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx,
i915_gem_object_flush_map(batch->obj);
i915_gem_object_unpin_map(batch->obj);
- i915_gem_chipset_flush(ctx->i915);
+ intel_gt_chipset_flush(engine->gt);
- rq = igt_request_alloc(ctx, engine);
+ rq = intel_context_create_request(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out_batch;
@@ -556,6 +575,14 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx,
goto err_request;
}
+ i915_vma_lock(batch);
+ err = i915_request_await_object(rq, batch->obj, false);
+ if (err == 0)
+ err = i915_vma_move_to_active(batch, rq, 0);
+ i915_vma_unlock(batch);
+ if (err)
+ goto err_request;
+
err = engine->emit_bb_start(rq,
batch->node.start, PAGE_SIZE,
0);
@@ -563,15 +590,11 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx,
goto err_request;
err_request:
- i915_request_add(rq);
- if (err)
- goto out_batch;
-
- if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ err = request_add_sync(rq, err);
+ if (err) {
pr_err("%s: Futzing %x timedout; cancelling test\n",
engine->name, reg);
- i915_gem_set_wedged(ctx->i915);
- err = -EIO;
+ intel_gt_set_wedged(engine->gt);
goto out_batch;
}
@@ -582,24 +605,35 @@ err_request:
}
GEM_BUG_ON(values[ARRAY_SIZE(values) - 1] != 0xffffffff);
- rsvd = results[ARRAY_SIZE(values)]; /* detect write masking */
- if (!rsvd) {
- pr_err("%s: Unable to write to whitelisted register %x\n",
- engine->name, reg);
- err = -EINVAL;
- goto out_unpin;
+ if (!ro_reg) {
+ /* detect write masking */
+ rsvd = results[ARRAY_SIZE(values)];
+ if (!rsvd) {
+ pr_err("%s: Unable to write to whitelisted register %x\n",
+ engine->name, reg);
+ err = -EINVAL;
+ goto out_unpin;
+ }
}
expect = results[0];
idx = 1;
for (v = 0; v < ARRAY_SIZE(values); v++) {
- expect = reg_write(expect, values[v], rsvd);
+ if (ro_reg)
+ expect = results[0];
+ else
+ expect = reg_write(expect, values[v], rsvd);
+
if (results[idx] != expect)
err++;
idx++;
}
for (v = 0; v < ARRAY_SIZE(values); v++) {
- expect = reg_write(expect, ~values[v], rsvd);
+ if (ro_reg)
+ expect = results[0];
+ else
+ expect = reg_write(expect, ~values[v], rsvd);
+
if (results[idx] != expect)
err++;
idx++;
@@ -608,15 +642,22 @@ err_request:
pr_err("%s: %d mismatch between values written to whitelisted register [%x], and values read back!\n",
engine->name, err, reg);
- pr_info("%s: Whitelisted register: %x, original value %08x, rsvd %08x\n",
- engine->name, reg, results[0], rsvd);
+ if (ro_reg)
+ pr_info("%s: Whitelisted read-only register: %x, original value %08x\n",
+ engine->name, reg, results[0]);
+ else
+ pr_info("%s: Whitelisted register: %x, original value %08x, rsvd %08x\n",
+ engine->name, reg, results[0], rsvd);
expect = results[0];
idx = 1;
for (v = 0; v < ARRAY_SIZE(values); v++) {
u32 w = values[v];
- expect = reg_write(expect, w, rsvd);
+ if (ro_reg)
+ expect = results[0];
+ else
+ expect = reg_write(expect, w, rsvd);
pr_info("Wrote %08x, read %08x, expect %08x\n",
w, results[idx], expect);
idx++;
@@ -624,7 +665,10 @@ err_request:
for (v = 0; v < ARRAY_SIZE(values); v++) {
u32 w = ~values[v];
- expect = reg_write(expect, w, rsvd);
+ if (ro_reg)
+ expect = results[0];
+ else
+ expect = reg_write(expect, w, rsvd);
pr_info("Wrote %08x, read %08x, expect %08x\n",
w, results[idx], expect);
idx++;
@@ -638,7 +682,7 @@ out_unpin:
break;
}
- if (igt_flush_test(ctx->i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(engine->i915))
err = -EIO;
out_batch:
i915_vma_unpin_and_release(&batch, 0);
@@ -649,84 +693,68 @@ out_scratch:
static int live_dirty_whitelist(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
- struct i915_gem_context *ctx;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
- struct drm_file *file;
- int err = 0;
/* Can the user write to the whitelisted registers? */
- if (INTEL_GEN(i915) < 7) /* minimum requirement for LRI, SRM, LRM */
+ if (INTEL_GEN(gt->i915) < 7) /* minimum requirement for LRI, SRM, LRM */
return 0;
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
- mutex_unlock(&i915->drm.struct_mutex);
- file = mock_file(i915);
- mutex_lock(&i915->drm.struct_mutex);
- if (IS_ERR(file)) {
- err = PTR_ERR(file);
- goto out_rpm;
- }
-
- ctx = live_context(i915, file);
- if (IS_ERR(ctx)) {
- err = PTR_ERR(ctx);
- goto out_file;
- }
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
+ int err;
- for_each_engine(engine, i915, id) {
if (engine->whitelist.count == 0)
continue;
- err = check_dirty_whitelist(ctx, engine);
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ err = check_dirty_whitelist(ce);
+ intel_context_put(ce);
if (err)
- goto out_file;
+ return err;
}
-out_file:
- mutex_unlock(&i915->drm.struct_mutex);
- mock_file_free(i915, file);
- mutex_lock(&i915->drm.struct_mutex);
-out_rpm:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- return err;
+ return 0;
}
static int live_reset_whitelist(void *arg)
{
- struct drm_i915_private *i915 = arg;
- struct intel_engine_cs *engine = i915->engine[RCS0];
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int err = 0;
/* If we reset the gpu, we should not lose the RING_NONPRIV */
+ igt_global_reset_lock(gt);
- if (!engine || engine->whitelist.count == 0)
- return 0;
-
- igt_global_reset_lock(i915);
+ for_each_engine(engine, gt, id) {
+ if (engine->whitelist.count == 0)
+ continue;
- if (intel_has_reset_engine(i915)) {
- err = check_whitelist_across_reset(engine,
- do_engine_reset,
- "engine");
- if (err)
- goto out;
- }
+ if (intel_has_reset_engine(gt)) {
+ err = check_whitelist_across_reset(engine,
+ do_engine_reset,
+ "engine");
+ if (err)
+ goto out;
+ }
- if (intel_has_gpu_reset(i915)) {
- err = check_whitelist_across_reset(engine,
- do_device_reset,
- "device");
- if (err)
- goto out;
+ if (intel_has_gpu_reset(gt)) {
+ err = check_whitelist_across_reset(engine,
+ do_device_reset,
+ "device");
+ if (err)
+ goto out;
+ }
}
out:
- igt_global_reset_unlock(i915);
+ igt_global_reset_unlock(gt);
return err;
}
@@ -742,6 +770,14 @@ static int read_whitelisted_registers(struct i915_gem_context *ctx,
if (IS_ERR(rq))
return PTR_ERR(rq);
+ i915_vma_lock(results);
+ err = i915_request_await_object(rq, results->obj, true);
+ if (err == 0)
+ err = i915_vma_move_to_active(results, rq, EXEC_OBJECT_WRITE);
+ i915_vma_unlock(results);
+ if (err)
+ goto err_req;
+
srm = MI_STORE_REGISTER_MEM;
if (INTEL_GEN(ctx->i915) >= 8)
srm++;
@@ -756,8 +792,8 @@ static int read_whitelisted_registers(struct i915_gem_context *ctx,
u64 offset = results->node.start + sizeof(u32) * i;
u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
- /* Clear RD only and WR only flags */
- reg &= ~(RING_FORCE_TO_NONPRIV_RD | RING_FORCE_TO_NONPRIV_WR);
+ /* Clear non priv flags */
+ reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
*cs++ = srm;
*cs++ = reg;
@@ -767,23 +803,21 @@ static int read_whitelisted_registers(struct i915_gem_context *ctx,
intel_ring_advance(rq, cs);
err_req:
- i915_request_add(rq);
-
- if (i915_request_wait(rq, 0, HZ / 5) < 0)
- err = -EIO;
-
- return err;
+ return request_add_sync(rq, err);
}
static int scrub_whitelisted_registers(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
+ struct i915_address_space *vm;
struct i915_request *rq;
struct i915_vma *batch;
int i, err = 0;
u32 *cs;
- batch = create_batch(ctx);
+ vm = i915_gem_context_get_vm_rcu(ctx);
+ batch = create_batch(vm);
+ i915_vm_put(vm);
if (IS_ERR(batch))
return PTR_ERR(batch);
@@ -800,13 +834,16 @@ static int scrub_whitelisted_registers(struct i915_gem_context *ctx,
if (ro_register(reg))
continue;
+ /* Clear non priv flags */
+ reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
+
*cs++ = reg;
*cs++ = 0xffffffff;
}
*cs++ = MI_BATCH_BUFFER_END;
i915_gem_object_flush_map(batch->obj);
- i915_gem_chipset_flush(ctx->i915);
+ intel_gt_chipset_flush(engine->gt);
rq = igt_request_alloc(ctx, engine);
if (IS_ERR(rq)) {
@@ -820,13 +857,19 @@ static int scrub_whitelisted_registers(struct i915_gem_context *ctx,
goto err_request;
}
+ i915_vma_lock(batch);
+ err = i915_request_await_object(rq, batch->obj, false);
+ if (err == 0)
+ err = i915_vma_move_to_active(batch, rq, 0);
+ i915_vma_unlock(batch);
+ if (err)
+ goto err_request;
+
/* Perform the writes from an unprivileged "user" batch */
err = engine->emit_bb_start(rq, batch->node.start, 0, 0);
err_request:
- i915_request_add(rq);
- if (i915_request_wait(rq, 0, HZ / 5) < 0)
- err = -EIO;
+ err = request_add_sync(rq, err);
err_unpin:
i915_gem_object_unpin_map(batch->obj);
@@ -927,7 +970,8 @@ check_whitelisted_registers(struct intel_engine_cs *engine,
for (i = 0; i < engine->whitelist.count; i++) {
const struct i915_wa *wa = &engine->whitelist.list[i];
- if (i915_mmio_reg_offset(wa->reg) & RING_FORCE_TO_NONPRIV_RD)
+ if (i915_mmio_reg_offset(wa->reg) &
+ RING_FORCE_TO_NONPRIV_ACCESS_RD)
continue;
if (!fn(engine, a[i], b[i], wa->reg))
@@ -942,7 +986,7 @@ err_a:
static int live_isolated_whitelist(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct {
struct i915_gem_context *ctx;
struct i915_vma *scratch[2];
@@ -956,40 +1000,46 @@ static int live_isolated_whitelist(void *arg)
* invisible to a second context.
*/
- if (!intel_engines_has_context_isolation(i915))
- return 0;
-
- if (!i915->kernel_context->vm)
+ if (!intel_engines_has_context_isolation(gt->i915))
return 0;
for (i = 0; i < ARRAY_SIZE(client); i++) {
+ struct i915_address_space *vm;
struct i915_gem_context *c;
- c = kernel_context(i915);
+ c = kernel_context(gt->i915);
if (IS_ERR(c)) {
err = PTR_ERR(c);
goto err;
}
- client[i].scratch[0] = create_scratch(c->vm, 1024);
+ vm = i915_gem_context_get_vm_rcu(c);
+
+ client[i].scratch[0] = create_scratch(vm, 1024);
if (IS_ERR(client[i].scratch[0])) {
err = PTR_ERR(client[i].scratch[0]);
+ i915_vm_put(vm);
kernel_context_close(c);
goto err;
}
- client[i].scratch[1] = create_scratch(c->vm, 1024);
+ client[i].scratch[1] = create_scratch(vm, 1024);
if (IS_ERR(client[i].scratch[1])) {
err = PTR_ERR(client[i].scratch[1]);
i915_vma_unpin_and_release(&client[i].scratch[0], 0);
+ i915_vm_put(vm);
kernel_context_close(c);
goto err;
}
client[i].ctx = c;
+ i915_vm_put(vm);
}
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
+ if (!engine->kernel_context->vm)
+ continue;
+
if (!whitelist_writable_count(engine))
continue;
@@ -1043,7 +1093,7 @@ err:
kernel_context_close(client[i].ctx);
}
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(gt->i915))
err = -EIO;
return err;
@@ -1060,7 +1110,7 @@ verify_wa_lists(struct i915_gem_context *ctx, struct wa_lists *lists,
ok &= wa_list_verify(&i915->uncore, &lists->gt_wa_list, str);
- for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+ for_each_gem_engine(ce, i915_gem_context_engines(ctx), it) {
enum intel_engine_id id = ce->engine->id;
ok &= engine_wa_list_verify(ce,
@@ -1071,7 +1121,6 @@ verify_wa_lists(struct i915_gem_context *ctx, struct wa_lists *lists,
&lists->engine[id].ctx_wa_list,
str) == 0;
}
- i915_gem_context_unlock_engines(ctx);
return ok;
}
@@ -1079,39 +1128,42 @@ verify_wa_lists(struct i915_gem_context *ctx, struct wa_lists *lists,
static int
live_gpu_reset_workarounds(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct i915_gem_context *ctx;
intel_wakeref_t wakeref;
struct wa_lists lists;
bool ok;
- if (!intel_has_gpu_reset(i915))
+ if (!intel_has_gpu_reset(gt))
return 0;
- ctx = kernel_context(i915);
+ ctx = kernel_context(gt->i915);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
+ i915_gem_context_lock_engines(ctx);
+
pr_info("Verifying after GPU reset...\n");
- igt_global_reset_lock(i915);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ igt_global_reset_lock(gt);
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
- reference_lists_init(i915, &lists);
+ reference_lists_init(gt, &lists);
ok = verify_wa_lists(ctx, &lists, "before reset");
if (!ok)
goto out;
- i915_reset(i915, ALL_ENGINES, "live_workarounds");
+ intel_gt_reset(gt, ALL_ENGINES, "live_workarounds");
ok = verify_wa_lists(ctx, &lists, "after reset");
out:
+ i915_gem_context_unlock_engines(ctx);
kernel_context_close(ctx);
- reference_lists_fini(i915, &lists);
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- igt_global_reset_unlock(i915);
+ reference_lists_fini(gt, &lists);
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
+ igt_global_reset_unlock(gt);
return ok ? 0 : -ESRCH;
}
@@ -1119,29 +1171,30 @@ out:
static int
live_engine_reset_workarounds(void *arg)
{
- struct drm_i915_private *i915 = arg;
- struct intel_engine_cs *engine;
+ struct intel_gt *gt = arg;
+ struct i915_gem_engines_iter it;
struct i915_gem_context *ctx;
+ struct intel_context *ce;
struct igt_spinner spin;
- enum intel_engine_id id;
struct i915_request *rq;
intel_wakeref_t wakeref;
struct wa_lists lists;
int ret = 0;
- if (!intel_has_reset_engine(i915))
+ if (!intel_has_reset_engine(gt))
return 0;
- ctx = kernel_context(i915);
+ ctx = kernel_context(gt->i915);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- igt_global_reset_lock(i915);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ igt_global_reset_lock(gt);
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
- reference_lists_init(i915, &lists);
+ reference_lists_init(gt, &lists);
- for_each_engine(engine, i915, id) {
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+ struct intel_engine_cs *engine = ce->engine;
bool ok;
pr_info("Verifying after %s reset...\n", engine->name);
@@ -1152,7 +1205,7 @@ live_engine_reset_workarounds(void *arg)
goto err;
}
- i915_reset_engine(engine, "live_workarounds");
+ intel_engine_reset(engine, "live_workarounds");
ok = verify_wa_lists(ctx, &lists, "after idle reset");
if (!ok) {
@@ -1160,27 +1213,25 @@ live_engine_reset_workarounds(void *arg)
goto err;
}
- ret = igt_spinner_init(&spin, i915);
+ ret = igt_spinner_init(&spin, engine->gt);
if (ret)
goto err;
- rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP);
+ rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
igt_spinner_fini(&spin);
goto err;
}
- i915_request_add(rq);
-
- if (!igt_wait_for_spinner(&spin, rq)) {
+ ret = request_add_spin(rq, &spin);
+ if (ret) {
pr_err("Spinner failed to start\n");
igt_spinner_fini(&spin);
- ret = -ETIMEDOUT;
goto err;
}
- i915_reset_engine(engine, "live_workarounds");
+ intel_engine_reset(engine, "live_workarounds");
igt_spinner_end(&spin);
igt_spinner_fini(&spin);
@@ -1191,14 +1242,14 @@ live_engine_reset_workarounds(void *arg)
goto err;
}
}
-
err:
- reference_lists_fini(i915, &lists);
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- igt_global_reset_unlock(i915);
+ i915_gem_context_unlock_engines(ctx);
+ reference_lists_fini(gt, &lists);
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
+ igt_global_reset_unlock(gt);
kernel_context_close(ctx);
- igt_flush_test(i915, I915_WAIT_LOCKED);
+ igt_flush_test(gt->i915);
return ret;
}
@@ -1212,14 +1263,9 @@ int intel_workarounds_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_gpu_reset_workarounds),
SUBTEST(live_engine_reset_workarounds),
};
- int err;
- if (i915_terminally_wedged(i915))
+ if (intel_gt_is_wedged(&i915->gt))
return 0;
- mutex_lock(&i915->drm.struct_mutex);
- err = i915_subtests(tests, i915);
- mutex_unlock(&i915->drm.struct_mutex);
-
- return err;
+ return intel_gt_live_subtests(tests, &i915->gt);
}
diff --git a/drivers/gpu/drm/i915/selftests/mock_timeline.c b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c
index 65b52be23d42..aeb1d1f616e8 100644
--- a/drivers/gpu/drm/i915/selftests/mock_timeline.c
+++ b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c
@@ -4,18 +4,18 @@
* Copyright © 2017-2018 Intel Corporation
*/
-#include "../i915_timeline.h"
+#include "../intel_timeline.h"
#include "mock_timeline.h"
-void mock_timeline_init(struct i915_timeline *timeline, u64 context)
+void mock_timeline_init(struct intel_timeline *timeline, u64 context)
{
- timeline->i915 = NULL;
+ timeline->gt = NULL;
timeline->fence_context = context;
mutex_init(&timeline->mutex);
- INIT_ACTIVE_REQUEST(&timeline->last_request);
+ INIT_ACTIVE_FENCE(&timeline->last_request);
INIT_LIST_HEAD(&timeline->requests);
i915_syncmap_init(&timeline->sync);
@@ -23,7 +23,7 @@ void mock_timeline_init(struct i915_timeline *timeline, u64 context)
INIT_LIST_HEAD(&timeline->link);
}
-void mock_timeline_fini(struct i915_timeline *timeline)
+void mock_timeline_fini(struct intel_timeline *timeline)
{
i915_syncmap_free(&timeline->sync);
}
diff --git a/drivers/gpu/drm/i915/gt/selftests/mock_timeline.h b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.h
new file mode 100644
index 000000000000..d2bcc3df6183
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.h
@@ -0,0 +1,17 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2017-2018 Intel Corporation
+ */
+
+#ifndef __MOCK_TIMELINE__
+#define __MOCK_TIMELINE__
+
+#include <linux/types.h>
+
+struct intel_timeline;
+
+void mock_timeline_init(struct intel_timeline *timeline, u64 context);
+void mock_timeline_fini(struct intel_timeline *timeline);
+
+#endif /* !__MOCK_TIMELINE__ */
diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index c40a6efdd33a..5d00a3b2d914 100644
--- a/drivers/gpu/drm/i915/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -1,44 +1,48 @@
+// SPDX-License-Identifier: MIT
/*
- * Copyright © 2014-2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
+ * Copyright © 2014-2019 Intel Corporation
*/
+#include "gt/intel_gt.h"
+#include "gt/intel_gt_irq.h"
+#include "gt/intel_gt_pm_irq.h"
#include "intel_guc.h"
#include "intel_guc_ads.h"
#include "intel_guc_submission.h"
#include "i915_drv.h"
-static void gen8_guc_raise_irq(struct intel_guc *guc)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
-
- I915_WRITE(GUC_SEND_INTERRUPT, GUC_SEND_TRIGGER);
-}
+/**
+ * DOC: GuC
+ *
+ * The GuC is a microcontroller inside the GT HW, introduced in gen9. The GuC is
+ * designed to offload some of the functionality usually performed by the host
+ * driver; currently the main operations it can take care of are:
+ *
+ * - Authentication of the HuC, which is required to fully enable HuC usage.
+ * - Low latency graphics context scheduling (a.k.a. GuC submission).
+ * - GT Power management.
+ *
+ * The enable_guc module parameter can be used to select which of those
+ * operations to enable within GuC. Note that not all the operations are
+ * supported on all gen9+ platforms.
+ *
+ * Enabling the GuC is not mandatory and therefore the firmware is only loaded
+ * if at least one of the operations is selected. However, not loading the GuC
+ * might result in the loss of some features that do require the GuC (currently
+ * just the HuC, but more are expected to land in the future).
+ */
-static void gen11_guc_raise_irq(struct intel_guc *guc)
+void intel_guc_notify(struct intel_guc *guc)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct intel_gt *gt = guc_to_gt(guc);
- I915_WRITE(GEN11_GUC_HOST_INTERRUPT, 0);
+ /*
+ * On Gen11+, the value written to the register is passes as a payload
+ * to the FW. However, the FW currently treats all values the same way
+ * (H2G interrupt), so we can just write the value that the HW expects
+ * on older gens.
+ */
+ intel_uncore_write(gt->uncore, guc->notify_reg, GUC_SEND_TRIGGER);
}
static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i)
@@ -52,11 +56,11 @@ static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i)
void intel_guc_init_send_regs(struct intel_guc *guc)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct intel_gt *gt = guc_to_gt(guc);
enum forcewake_domains fw_domains = 0;
unsigned int i;
- if (INTEL_GEN(dev_priv) >= 11) {
+ if (INTEL_GEN(gt->i915) >= 11) {
guc->send_regs.base =
i915_mmio_reg_offset(GEN11_SOFT_SCRATCH(0));
guc->send_regs.count = GEN11_SOFT_SCRATCH_COUNT;
@@ -67,206 +71,122 @@ void intel_guc_init_send_regs(struct intel_guc *guc)
}
for (i = 0; i < guc->send_regs.count; i++) {
- fw_domains |= intel_uncore_forcewake_for_reg(&dev_priv->uncore,
+ fw_domains |= intel_uncore_forcewake_for_reg(gt->uncore,
guc_send_reg(guc, i),
FW_REG_READ | FW_REG_WRITE);
}
guc->send_regs.fw_domains = fw_domains;
}
-void intel_guc_init_early(struct intel_guc *guc)
+static void gen9_reset_guc_interrupts(struct intel_guc *guc)
{
- struct drm_i915_private *i915 = guc_to_i915(guc);
+ struct intel_gt *gt = guc_to_gt(guc);
- intel_guc_fw_init_early(guc);
- intel_guc_ct_init_early(&guc->ct);
- intel_guc_log_init_early(&guc->log);
+ assert_rpm_wakelock_held(&gt->i915->runtime_pm);
- mutex_init(&guc->send_mutex);
- spin_lock_init(&guc->irq_lock);
- guc->send = intel_guc_send_nop;
- guc->handler = intel_guc_to_host_event_handler_nop;
- if (INTEL_GEN(i915) >= 11) {
- guc->notify = gen11_guc_raise_irq;
- guc->interrupts.reset = gen11_reset_guc_interrupts;
- guc->interrupts.enable = gen11_enable_guc_interrupts;
- guc->interrupts.disable = gen11_disable_guc_interrupts;
- } else {
- guc->notify = gen8_guc_raise_irq;
- guc->interrupts.reset = gen9_reset_guc_interrupts;
- guc->interrupts.enable = gen9_enable_guc_interrupts;
- guc->interrupts.disable = gen9_disable_guc_interrupts;
- }
+ spin_lock_irq(&gt->irq_lock);
+ gen6_gt_pm_reset_iir(gt, gt->pm_guc_events);
+ spin_unlock_irq(&gt->irq_lock);
}
-static int guc_init_wq(struct intel_guc *guc)
+static void gen9_enable_guc_interrupts(struct intel_guc *guc)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct intel_gt *gt = guc_to_gt(guc);
- /*
- * GuC log buffer flush work item has to do register access to
- * send the ack to GuC and this work item, if not synced before
- * suspend, can potentially get executed after the GFX device is
- * suspended.
- * By marking the WQ as freezable, we don't have to bother about
- * flushing of this work item from the suspend hooks, the pending
- * work item if any will be either executed before the suspend
- * or scheduled later on resume. This way the handling of work
- * item can be kept same between system suspend & rpm suspend.
- */
- guc->log.relay.flush_wq =
- alloc_ordered_workqueue("i915-guc_log",
- WQ_HIGHPRI | WQ_FREEZABLE);
- if (!guc->log.relay.flush_wq) {
- DRM_ERROR("Couldn't allocate workqueue for GuC log\n");
- return -ENOMEM;
- }
+ assert_rpm_wakelock_held(&gt->i915->runtime_pm);
- /*
- * Even though both sending GuC action, and adding a new workitem to
- * GuC workqueue are serialized (each with its own locking), since
- * we're using mutliple engines, it's possible that we're going to
- * issue a preempt request with two (or more - each for different
- * engine) workitems in GuC queue. In this situation, GuC may submit
- * all of them, which will make us very confused.
- * Our preemption contexts may even already be complete - before we
- * even had the chance to sent the preempt action to GuC!. Rather
- * than introducing yet another lock, we can just use ordered workqueue
- * to make sure we're always sending a single preemption request with a
- * single workitem.
- */
- if (HAS_LOGICAL_RING_PREEMPTION(dev_priv) &&
- USES_GUC_SUBMISSION(dev_priv)) {
- guc->preempt_wq = alloc_ordered_workqueue("i915-guc_preempt",
- WQ_HIGHPRI);
- if (!guc->preempt_wq) {
- destroy_workqueue(guc->log.relay.flush_wq);
- DRM_ERROR("Couldn't allocate workqueue for GuC "
- "preemption\n");
- return -ENOMEM;
- }
+ spin_lock_irq(&gt->irq_lock);
+ if (!guc->interrupts.enabled) {
+ WARN_ON_ONCE(intel_uncore_read(gt->uncore, GEN8_GT_IIR(2)) &
+ gt->pm_guc_events);
+ guc->interrupts.enabled = true;
+ gen6_gt_pm_enable_irq(gt, gt->pm_guc_events);
}
-
- return 0;
+ spin_unlock_irq(&gt->irq_lock);
}
-static void guc_fini_wq(struct intel_guc *guc)
+static void gen9_disable_guc_interrupts(struct intel_guc *guc)
{
- struct workqueue_struct *wq;
-
- wq = fetch_and_zero(&guc->preempt_wq);
- if (wq)
- destroy_workqueue(wq);
-
- wq = fetch_and_zero(&guc->log.relay.flush_wq);
- if (wq)
- destroy_workqueue(wq);
-}
+ struct intel_gt *gt = guc_to_gt(guc);
-int intel_guc_init_misc(struct intel_guc *guc)
-{
- struct drm_i915_private *i915 = guc_to_i915(guc);
- int ret;
+ assert_rpm_wakelock_held(&gt->i915->runtime_pm);
- ret = guc_init_wq(guc);
- if (ret)
- return ret;
+ spin_lock_irq(&gt->irq_lock);
+ guc->interrupts.enabled = false;
- intel_uc_fw_fetch(i915, &guc->fw);
+ gen6_gt_pm_disable_irq(gt, gt->pm_guc_events);
- return 0;
-}
+ spin_unlock_irq(&gt->irq_lock);
+ intel_synchronize_irq(gt->i915);
-void intel_guc_fini_misc(struct intel_guc *guc)
-{
- intel_uc_fw_cleanup_fetch(&guc->fw);
- guc_fini_wq(guc);
+ gen9_reset_guc_interrupts(guc);
}
-static int guc_shared_data_create(struct intel_guc *guc)
+static void gen11_reset_guc_interrupts(struct intel_guc *guc)
{
- struct i915_vma *vma;
- void *vaddr;
-
- vma = intel_guc_allocate_vma(guc, PAGE_SIZE);
- if (IS_ERR(vma))
- return PTR_ERR(vma);
-
- vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
- if (IS_ERR(vaddr)) {
- i915_vma_unpin_and_release(&vma, 0);
- return PTR_ERR(vaddr);
- }
+ struct intel_gt *gt = guc_to_gt(guc);
- guc->shared_data = vma;
- guc->shared_data_vaddr = vaddr;
-
- return 0;
+ spin_lock_irq(&gt->irq_lock);
+ gen11_gt_reset_one_iir(gt, 0, GEN11_GUC);
+ spin_unlock_irq(&gt->irq_lock);
}
-static void guc_shared_data_destroy(struct intel_guc *guc)
+static void gen11_enable_guc_interrupts(struct intel_guc *guc)
{
- i915_vma_unpin_and_release(&guc->shared_data, I915_VMA_RELEASE_MAP);
+ struct intel_gt *gt = guc_to_gt(guc);
+
+ spin_lock_irq(&gt->irq_lock);
+ if (!guc->interrupts.enabled) {
+ u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST);
+
+ WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GUC));
+ intel_uncore_write(gt->uncore,
+ GEN11_GUC_SG_INTR_ENABLE, events);
+ intel_uncore_write(gt->uncore,
+ GEN11_GUC_SG_INTR_MASK, ~events);
+ guc->interrupts.enabled = true;
+ }
+ spin_unlock_irq(&gt->irq_lock);
}
-int intel_guc_init(struct intel_guc *guc)
+static void gen11_disable_guc_interrupts(struct intel_guc *guc)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- int ret;
-
- ret = intel_uc_fw_init(&guc->fw);
- if (ret)
- goto err_fetch;
-
- ret = guc_shared_data_create(guc);
- if (ret)
- goto err_fw;
- GEM_BUG_ON(!guc->shared_data);
-
- ret = intel_guc_log_create(&guc->log);
- if (ret)
- goto err_shared;
-
- ret = intel_guc_ads_create(guc);
- if (ret)
- goto err_log;
- GEM_BUG_ON(!guc->ads_vma);
+ struct intel_gt *gt = guc_to_gt(guc);
- ret = intel_guc_ct_init(&guc->ct);
- if (ret)
- goto err_ads;
+ spin_lock_irq(&gt->irq_lock);
+ guc->interrupts.enabled = false;
- /* We need to notify the guc whenever we change the GGTT */
- i915_ggtt_enable_guc(dev_priv);
+ intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~0);
+ intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
- return 0;
+ spin_unlock_irq(&gt->irq_lock);
+ intel_synchronize_irq(gt->i915);
-err_ads:
- intel_guc_ads_destroy(guc);
-err_log:
- intel_guc_log_destroy(&guc->log);
-err_shared:
- guc_shared_data_destroy(guc);
-err_fw:
- intel_uc_fw_fini(&guc->fw);
-err_fetch:
- intel_uc_fw_cleanup_fetch(&guc->fw);
- return ret;
+ gen11_reset_guc_interrupts(guc);
}
-void intel_guc_fini(struct intel_guc *guc)
+void intel_guc_init_early(struct intel_guc *guc)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
- i915_ggtt_disable_guc(dev_priv);
-
- intel_guc_ct_fini(&guc->ct);
+ intel_guc_fw_init_early(guc);
+ intel_guc_ct_init_early(&guc->ct);
+ intel_guc_log_init_early(&guc->log);
+ intel_guc_submission_init_early(guc);
- intel_guc_ads_destroy(guc);
- intel_guc_log_destroy(&guc->log);
- guc_shared_data_destroy(guc);
- intel_uc_fw_fini(&guc->fw);
- intel_uc_fw_cleanup_fetch(&guc->fw);
+ mutex_init(&guc->send_mutex);
+ spin_lock_init(&guc->irq_lock);
+ if (INTEL_GEN(i915) >= 11) {
+ guc->notify_reg = GEN11_GUC_HOST_INTERRUPT;
+ guc->interrupts.reset = gen11_reset_guc_interrupts;
+ guc->interrupts.enable = gen11_enable_guc_interrupts;
+ guc->interrupts.disable = gen11_disable_guc_interrupts;
+ } else {
+ guc->notify_reg = GUC_SEND_INTERRUPT;
+ guc->interrupts.reset = gen9_reset_guc_interrupts;
+ guc->interrupts.enable = gen9_enable_guc_interrupts;
+ guc->interrupts.disable = gen9_disable_guc_interrupts;
+ }
}
static u32 guc_ctl_debug_flags(struct intel_guc *guc)
@@ -287,7 +207,7 @@ static u32 guc_ctl_feature_flags(struct intel_guc *guc)
{
u32 flags = 0;
- if (!USES_GUC_SUBMISSION(guc_to_i915(guc)))
+ if (!intel_guc_is_submission_supported(guc))
flags |= GUC_CTL_DISABLE_SCHEDULER;
return flags;
@@ -297,7 +217,7 @@ static u32 guc_ctl_ctxinfo_flags(struct intel_guc *guc)
{
u32 flags = 0;
- if (USES_GUC_SUBMISSION(guc_to_i915(guc))) {
+ if (intel_guc_is_submission_supported(guc)) {
u32 ctxnum, base;
base = intel_guc_ggtt_offset(guc, guc->stage_desc_pool);
@@ -364,13 +284,12 @@ static u32 guc_ctl_ads_flags(struct intel_guc *guc)
* transfer. These parameters are read by the firmware on startup
* and cannot be changed thereafter.
*/
-void intel_guc_init_params(struct intel_guc *guc)
+static void guc_init_params(struct intel_guc *guc)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- u32 params[GUC_CTL_MAX_DWORDS];
+ u32 *params = guc->params;
int i;
- memset(params, 0, sizeof(params));
+ BUILD_BUG_ON(sizeof(guc->params) != GUC_CTL_MAX_DWORDS * sizeof(u32));
params[GUC_CTL_CTXINFO] = guc_ctl_ctxinfo_flags(guc);
params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
@@ -380,32 +299,107 @@ void intel_guc_init_params(struct intel_guc *guc)
for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
DRM_DEBUG_DRIVER("param[%2d] = %#x\n", i, params[i]);
+}
+
+/*
+ * Initialise the GuC parameter block before starting the firmware
+ * transfer. These parameters are read by the firmware on startup
+ * and cannot be changed thereafter.
+ */
+void intel_guc_write_params(struct intel_guc *guc)
+{
+ struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
+ int i;
/*
* All SOFT_SCRATCH registers are in FORCEWAKE_BLITTER domain and
* they are power context saved so it's ok to release forcewake
* when we are done here and take it again at xfer time.
*/
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_BLITTER);
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_BLITTER);
- I915_WRITE(SOFT_SCRATCH(0), 0);
+ intel_uncore_write(uncore, SOFT_SCRATCH(0), 0);
for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
- I915_WRITE(SOFT_SCRATCH(1 + i), params[i]);
+ intel_uncore_write(uncore, SOFT_SCRATCH(1 + i), guc->params[i]);
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_BLITTER);
+ intel_uncore_forcewake_put(uncore, FORCEWAKE_BLITTER);
}
-int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len,
- u32 *response_buf, u32 response_buf_size)
+int intel_guc_init(struct intel_guc *guc)
{
- WARN(1, "Unexpected send: action=%#x\n", *action);
- return -ENODEV;
+ struct intel_gt *gt = guc_to_gt(guc);
+ int ret;
+
+ ret = intel_uc_fw_init(&guc->fw);
+ if (ret)
+ goto err_fetch;
+
+ ret = intel_guc_log_create(&guc->log);
+ if (ret)
+ goto err_fw;
+
+ ret = intel_guc_ads_create(guc);
+ if (ret)
+ goto err_log;
+ GEM_BUG_ON(!guc->ads_vma);
+
+ ret = intel_guc_ct_init(&guc->ct);
+ if (ret)
+ goto err_ads;
+
+ if (intel_guc_is_submission_supported(guc)) {
+ /*
+ * This is stuff we need to have available at fw load time
+ * if we are planning to enable submission later
+ */
+ ret = intel_guc_submission_init(guc);
+ if (ret)
+ goto err_ct;
+ }
+
+ /* now that everything is perma-pinned, initialize the parameters */
+ guc_init_params(guc);
+
+ /* We need to notify the guc whenever we change the GGTT */
+ i915_ggtt_enable_guc(gt->ggtt);
+
+ return 0;
+
+err_ct:
+ intel_guc_ct_fini(&guc->ct);
+err_ads:
+ intel_guc_ads_destroy(guc);
+err_log:
+ intel_guc_log_destroy(&guc->log);
+err_fw:
+ intel_uc_fw_fini(&guc->fw);
+err_fetch:
+ intel_uc_fw_cleanup_fetch(&guc->fw);
+ DRM_DEV_DEBUG_DRIVER(gt->i915->drm.dev, "failed with %d\n", ret);
+ return ret;
}
-void intel_guc_to_host_event_handler_nop(struct intel_guc *guc)
+void intel_guc_fini(struct intel_guc *guc)
{
- WARN(1, "Unexpected event: no suitable handler\n");
+ struct intel_gt *gt = guc_to_gt(guc);
+
+ if (!intel_uc_fw_is_available(&guc->fw))
+ return;
+
+ i915_ggtt_disable_guc(gt->ggtt);
+
+ if (intel_guc_is_submission_supported(guc))
+ intel_guc_submission_fini(guc);
+
+ intel_guc_ct_fini(&guc->ct);
+
+ intel_guc_ads_destroy(guc);
+ intel_guc_log_destroy(&guc->log);
+ intel_uc_fw_fini(&guc->fw);
+ intel_uc_fw_cleanup_fetch(&guc->fw);
+
+ intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_DISABLED);
}
/*
@@ -414,8 +408,7 @@ void intel_guc_to_host_event_handler_nop(struct intel_guc *guc)
int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
u32 *response_buf, u32 response_buf_size)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- struct intel_uncore *uncore = &dev_priv->uncore;
+ struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
u32 status;
int i;
int ret;
@@ -464,7 +457,8 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
int count = min(response_buf_size, guc->send_regs.count - 1);
for (i = 0; i < count; i++)
- response_buf[i] = I915_READ(guc_send_reg(guc, i + 1));
+ response_buf[i] = intel_uncore_read(uncore,
+ guc_send_reg(guc, i + 1));
}
/* Use data from the GuC response as our return value */
@@ -497,7 +491,7 @@ int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
int intel_guc_sample_forcewake(struct intel_guc *guc)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
u32 action[2];
action[0] = INTEL_GUC_ACTION_SAMPLE_FORCEWAKE;
@@ -538,7 +532,7 @@ int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset)
*/
int intel_guc_suspend(struct intel_guc *guc)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
int ret;
u32 status;
u32 action[] = {
@@ -547,6 +541,13 @@ int intel_guc_suspend(struct intel_guc *guc)
};
/*
+ * If GuC communication is enabled but submission is not supported,
+ * we do not need to suspend the GuC.
+ */
+ if (!intel_guc_submission_is_enabled(guc))
+ return 0;
+
+ /*
* The ENTER_S_STATE action queues the save/restore operation in GuC FW
* and then returns, so waiting on the H2G is not enough to guarantee
* GuC is done. When all the processing is done, GuC writes
@@ -556,13 +557,14 @@ int intel_guc_suspend(struct intel_guc *guc)
* in progress so we need to take care of that ourselves as well.
*/
- I915_WRITE(SOFT_SCRATCH(14), INTEL_GUC_SLEEP_STATE_INVALID_MASK);
+ intel_uncore_write(uncore, SOFT_SCRATCH(14),
+ INTEL_GUC_SLEEP_STATE_INVALID_MASK);
ret = intel_guc_send(guc, action, ARRAY_SIZE(action));
if (ret)
return ret;
- ret = __intel_wait_for_register(&dev_priv->uncore, SOFT_SCRATCH(14),
+ ret = __intel_wait_for_register(uncore, SOFT_SCRATCH(14),
INTEL_GUC_SLEEP_STATE_INVALID_MASK,
0, 0, 10, &status);
if (ret)
@@ -586,19 +588,9 @@ int intel_guc_suspend(struct intel_guc *guc)
int intel_guc_reset_engine(struct intel_guc *guc,
struct intel_engine_cs *engine)
{
- u32 data[7];
-
- GEM_BUG_ON(!guc->execbuf_client);
-
- data[0] = INTEL_GUC_ACTION_REQUEST_ENGINE_RESET;
- data[1] = engine->guc_id;
- data[2] = 0;
- data[3] = 0;
- data[4] = 0;
- data[5] = guc->execbuf_client->stage_id;
- data[6] = intel_guc_ggtt_offset(guc, guc->shared_data);
+ /* XXX: to be implemented with submission interface rework */
- return intel_guc_send(guc, data, ARRAY_SIZE(data));
+ return -ENODEV;
}
/**
@@ -612,13 +604,27 @@ int intel_guc_resume(struct intel_guc *guc)
GUC_POWER_D0,
};
+ /*
+ * If GuC communication is enabled but submission is not supported,
+ * we do not need to resume the GuC but we do need to enable the
+ * GuC communication on resume (above).
+ */
+ if (!intel_guc_submission_is_enabled(guc))
+ return 0;
+
return intel_guc_send(guc, action, ARRAY_SIZE(action));
}
/**
- * DOC: GuC Address Space
+ * DOC: GuC Memory Management
*
- * The layout of GuC address space is shown below:
+ * GuC can't allocate any memory for its own usage, so all the allocations must
+ * be handled by the host driver. GuC accesses the memory via the GGTT, with the
+ * exception of the top and bottom parts of the 4GB address space, which are
+ * instead re-mapped by the GuC HW to memory location of the FW itself (WOPCM)
+ * or other parts of the HW. The driver must take care not to place objects that
+ * the GuC is going to access in these reserved ranges. The layout of the GuC
+ * address space is shown below:
*
* ::
*
@@ -658,17 +664,17 @@ int intel_guc_resume(struct intel_guc *guc)
*/
struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct intel_gt *gt = guc_to_gt(guc);
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
u64 flags;
int ret;
- obj = i915_gem_object_create_shmem(dev_priv, size);
+ obj = i915_gem_object_create_shmem(gt->i915, size);
if (IS_ERR(obj))
return ERR_CAST(obj);
- vma = i915_vma_instance(obj, &dev_priv->ggtt.vm, NULL);
+ vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
if (IS_ERR(vma))
goto err;
@@ -679,9 +685,43 @@ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
goto err;
}
- return vma;
+ return i915_vma_make_unshrinkable(vma);
err:
i915_gem_object_put(obj);
return vma;
}
+
+/**
+ * intel_guc_allocate_and_map_vma() - Allocate and map VMA for GuC usage
+ * @guc: the guc
+ * @size: size of area to allocate (both virtual space and memory)
+ * @out_vma: return variable for the allocated vma pointer
+ * @out_vaddr: return variable for the obj mapping
+ *
+ * This wrapper calls intel_guc_allocate_vma() and then maps the allocated
+ * object with I915_MAP_WB.
+ *
+ * Return: 0 if successful, a negative errno code otherwise.
+ */
+int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size,
+ struct i915_vma **out_vma, void **out_vaddr)
+{
+ struct i915_vma *vma;
+ void *vaddr;
+
+ vma = intel_guc_allocate_vma(guc, size);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
+ if (IS_ERR(vaddr)) {
+ i915_vma_unpin_and_release(&vma, 0);
+ return PTR_ERR(vaddr);
+ }
+
+ *out_vma = vma;
+ *out_vaddr = vaddr;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index 08c906abdfa2..910d49590068 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -1,25 +1,6 @@
+/* SPDX-License-Identifier: MIT */
/*
- * Copyright © 2014-2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
+ * Copyright © 2014-2019 Intel Corporation
*/
#ifndef _INTEL_GUC_H_
@@ -35,51 +16,46 @@
#include "i915_utils.h"
#include "i915_vma.h"
-struct guc_preempt_work {
- struct work_struct work;
- struct intel_engine_cs *engine;
-};
+struct __guc_ads_blob;
/*
* Top level structure of GuC. It handles firmware loading and manages client
- * pool and doorbells. intel_guc owns a intel_guc_client to replace the legacy
- * ExecList submission.
+ * pool. intel_guc owns a intel_guc_client to replace the legacy ExecList
+ * submission.
*/
struct intel_guc {
struct intel_uc_fw fw;
struct intel_guc_log log;
struct intel_guc_ct ct;
- /* Log snapshot if GuC errors during load */
- struct drm_i915_gem_object *load_err_log;
-
/* intel_guc_recv interrupt related state */
spinlock_t irq_lock;
unsigned int msg_enabled_mask;
struct {
bool enabled;
- void (*reset)(struct drm_i915_private *i915);
- void (*enable)(struct drm_i915_private *i915);
- void (*disable)(struct drm_i915_private *i915);
+ void (*reset)(struct intel_guc *guc);
+ void (*enable)(struct intel_guc *guc);
+ void (*disable)(struct intel_guc *guc);
} interrupts;
+ bool submission_supported;
+
struct i915_vma *ads_vma;
+ struct __guc_ads_blob *ads_blob;
+
struct i915_vma *stage_desc_pool;
void *stage_desc_pool_vaddr;
- struct ida stage_ids;
- struct i915_vma *shared_data;
- void *shared_data_vaddr;
- struct intel_guc_client *execbuf_client;
- struct intel_guc_client *preempt_client;
+ struct i915_vma *workqueue;
+ void *workqueue_vaddr;
+ spinlock_t wq_lock;
- struct guc_preempt_work preempt_work[I915_NUM_ENGINES];
- struct workqueue_struct *preempt_wq;
+ struct i915_vma *proc_desc;
+ void *proc_desc_vaddr;
- DECLARE_BITMAP(doorbell_bitmap, GUC_NUM_DOORBELLS);
- /* Cyclic counter mod pagesize */
- u32 db_cacheline;
+ /* Control params for fw initialization */
+ u32 params[GUC_CTL_MAX_DWORDS];
/* GuC's FW specific registers used in MMIO send */
struct {
@@ -88,41 +64,33 @@ struct intel_guc {
enum forcewake_domains fw_domains;
} send_regs;
- /* To serialize the intel_guc_send actions */
- struct mutex send_mutex;
-
- /* GuC's FW specific send function */
- int (*send)(struct intel_guc *guc, const u32 *data, u32 len,
- u32 *response_buf, u32 response_buf_size);
+ /* register used to send interrupts to the GuC FW */
+ i915_reg_t notify_reg;
- /* GuC's FW specific event handler function */
- void (*handler)(struct intel_guc *guc);
+ /* Store msg (e.g. log flush) that we see while CTBs are disabled */
+ u32 mmio_msg;
- /* GuC's FW specific notify function */
- void (*notify)(struct intel_guc *guc);
+ /* To serialize the intel_guc_send actions */
+ struct mutex send_mutex;
};
static
inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
{
- return guc->send(guc, action, len, NULL, 0);
+ return intel_guc_ct_send(&guc->ct, action, len, NULL, 0);
}
static inline int
intel_guc_send_and_receive(struct intel_guc *guc, const u32 *action, u32 len,
u32 *response_buf, u32 response_buf_size)
{
- return guc->send(guc, action, len, response_buf, response_buf_size);
-}
-
-static inline void intel_guc_notify(struct intel_guc *guc)
-{
- guc->notify(guc);
+ return intel_guc_ct_send(&guc->ct, action, len,
+ response_buf, response_buf_size);
}
static inline void intel_guc_to_host_event_handler(struct intel_guc *guc)
{
- guc->handler(guc);
+ intel_guc_ct_event_handler(&guc->ct);
}
/* GuC addresses above GUC_GGTT_TOP also don't map through the GTT */
@@ -154,17 +122,12 @@ static inline u32 intel_guc_ggtt_offset(struct intel_guc *guc,
void intel_guc_init_early(struct intel_guc *guc);
void intel_guc_init_send_regs(struct intel_guc *guc);
-void intel_guc_init_params(struct intel_guc *guc);
-int intel_guc_init_misc(struct intel_guc *guc);
+void intel_guc_write_params(struct intel_guc *guc);
int intel_guc_init(struct intel_guc *guc);
void intel_guc_fini(struct intel_guc *guc);
-void intel_guc_fini_misc(struct intel_guc *guc);
-int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len,
- u32 *response_buf, u32 response_buf_size);
+void intel_guc_notify(struct intel_guc *guc);
int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
u32 *response_buf, u32 response_buf_size);
-void intel_guc_to_host_event_handler(struct intel_guc *guc);
-void intel_guc_to_host_event_handler_nop(struct intel_guc *guc);
int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
const u32 *payload, u32 len);
int intel_guc_sample_forcewake(struct intel_guc *guc);
@@ -172,18 +135,37 @@ int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset);
int intel_guc_suspend(struct intel_guc *guc);
int intel_guc_resume(struct intel_guc *guc);
struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size);
+int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size,
+ struct i915_vma **out_vma, void **out_vaddr);
-static inline bool intel_guc_is_loaded(struct intel_guc *guc)
+static inline bool intel_guc_is_supported(struct intel_guc *guc)
{
- return intel_uc_fw_is_loaded(&guc->fw);
+ return intel_uc_fw_is_supported(&guc->fw);
+}
+
+static inline bool intel_guc_is_enabled(struct intel_guc *guc)
+{
+ return intel_uc_fw_is_enabled(&guc->fw);
+}
+
+static inline bool intel_guc_is_running(struct intel_guc *guc)
+{
+ return intel_uc_fw_is_running(&guc->fw);
}
static inline int intel_guc_sanitize(struct intel_guc *guc)
{
intel_uc_fw_sanitize(&guc->fw);
+ guc->mmio_msg = 0;
+
return 0;
}
+static inline bool intel_guc_is_submission_supported(struct intel_guc *guc)
+{
+ return guc->submission_supported;
+}
+
static inline void intel_guc_enable_msg(struct intel_guc *guc, u32 mask)
{
spin_lock_irq(&guc->irq_lock);
diff --git a/drivers/gpu/drm/i915/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
index ecb69fc94218..101728006ae9 100644
--- a/drivers/gpu/drm/i915/intel_guc_ads.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
@@ -1,27 +1,9 @@
+// SPDX-License-Identifier: MIT
/*
- * Copyright © 2014-2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
+ * Copyright © 2014-2019 Intel Corporation
*/
+#include "gt/intel_gt.h"
#include "intel_guc_ads.h"
#include "intel_uc.h"
#include "i915_drv.h"
@@ -83,18 +65,14 @@ struct __guc_ads_blob {
u8 reg_state_buffer[GUC_S3_SAVE_SPACE_PAGES * PAGE_SIZE];
} __packed;
-static int __guc_ads_init(struct intel_guc *guc)
+static void __guc_ads_init(struct intel_guc *guc)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- struct __guc_ads_blob *blob;
+ struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
+ struct __guc_ads_blob *blob = guc->ads_blob;
const u32 skipped_size = LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SIZE;
u32 base;
u8 engine_class;
- blob = i915_gem_object_pin_map(guc->ads_vma->obj, I915_MAP_WB);
- if (IS_ERR(blob))
- return PTR_ERR(blob);
-
/* GuC scheduling policies */
guc_policies_init(&blob->policies);
@@ -115,7 +93,8 @@ static int __guc_ads_init(struct intel_guc *guc)
*/
blob->ads.golden_context_lrca[engine_class] = 0;
blob->ads.eng_state_size[engine_class] =
- intel_engine_context_size(dev_priv, engine_class) -
+ intel_engine_context_size(guc_to_gt(guc),
+ engine_class) -
skipped_size;
}
@@ -144,9 +123,7 @@ static int __guc_ads_init(struct intel_guc *guc)
blob->ads.gt_system_info = base + ptr_offset(blob, system_info);
blob->ads.clients_info = base + ptr_offset(blob, clients_info);
- i915_gem_object_unpin_map(guc->ads_vma->obj);
-
- return 0;
+ i915_gem_object_flush_map(guc->ads_vma->obj);
}
/**
@@ -159,31 +136,24 @@ static int __guc_ads_init(struct intel_guc *guc)
int intel_guc_ads_create(struct intel_guc *guc)
{
const u32 size = PAGE_ALIGN(sizeof(struct __guc_ads_blob));
- struct i915_vma *vma;
int ret;
GEM_BUG_ON(guc->ads_vma);
- vma = intel_guc_allocate_vma(guc, size);
- if (IS_ERR(vma))
- return PTR_ERR(vma);
-
- guc->ads_vma = vma;
+ ret = intel_guc_allocate_and_map_vma(guc, size, &guc->ads_vma,
+ (void **)&guc->ads_blob);
- ret = __guc_ads_init(guc);
if (ret)
- goto err_vma;
+ return ret;
- return 0;
+ __guc_ads_init(guc);
-err_vma:
- i915_vma_unpin_and_release(&guc->ads_vma, 0);
- return ret;
+ return 0;
}
void intel_guc_ads_destroy(struct intel_guc *guc)
{
- i915_vma_unpin_and_release(&guc->ads_vma, 0);
+ i915_vma_unpin_and_release(&guc->ads_vma, I915_VMA_RELEASE_MAP);
}
/**
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h
new file mode 100644
index 000000000000..b00d3ae1113a
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2014-2019 Intel Corporation
+ */
+
+#ifndef _INTEL_GUC_ADS_H_
+#define _INTEL_GUC_ADS_H_
+
+struct intel_guc;
+
+int intel_guc_ads_create(struct intel_guc *guc);
+void intel_guc_ads_destroy(struct intel_guc *guc);
+void intel_guc_ads_reset(struct intel_guc *guc);
+
+#endif
diff --git a/drivers/gpu/drm/i915/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
index 3921809f812b..c6f971a049f9 100644
--- a/drivers/gpu/drm/i915/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
@@ -1,24 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
- * Copyright © 2016-2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2016-2019 Intel Corporation
*/
#include "i915_drv.h"
@@ -55,13 +37,10 @@ static void ct_incoming_request_worker_func(struct work_struct *w);
*/
void intel_guc_ct_init_early(struct intel_guc_ct *ct)
{
- /* we're using static channel owners */
- ct->host_channel.owner = CTB_OWNER_HOST;
-
- spin_lock_init(&ct->lock);
- INIT_LIST_HEAD(&ct->pending_requests);
- INIT_LIST_HEAD(&ct->incoming_requests);
- INIT_WORK(&ct->worker, ct_incoming_request_worker_func);
+ spin_lock_init(&ct->requests.lock);
+ INIT_LIST_HEAD(&ct->requests.pending);
+ INIT_LIST_HEAD(&ct->requests.incoming);
+ INIT_WORK(&ct->requests.worker, ct_incoming_request_worker_func);
}
static inline struct intel_guc *ct_to_guc(struct intel_guc_ct *ct)
@@ -82,14 +61,13 @@ static inline const char *guc_ct_buffer_type_to_str(u32 type)
}
static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc *desc,
- u32 cmds_addr, u32 size, u32 owner)
+ u32 cmds_addr, u32 size)
{
- CT_DEBUG_DRIVER("CT: desc %p init addr=%#x size=%u owner=%u\n",
- desc, cmds_addr, size, owner);
+ CT_DEBUG_DRIVER("CT: init addr=%#x size=%u\n", cmds_addr, size);
memset(desc, 0, sizeof(*desc));
desc->addr = cmds_addr;
desc->size = size;
- desc->owner = owner;
+ desc->owner = CTB_OWNER_HOST;
}
static void guc_ct_buffer_desc_reset(struct guc_ct_buffer_desc *desc)
@@ -122,12 +100,11 @@ static int guc_action_register_ct_buffer(struct intel_guc *guc,
}
static int guc_action_deregister_ct_buffer(struct intel_guc *guc,
- u32 owner,
u32 type)
{
u32 action[] = {
INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER,
- owner,
+ CTB_OWNER_HOST,
type
};
int err;
@@ -135,20 +112,27 @@ static int guc_action_deregister_ct_buffer(struct intel_guc *guc,
/* Can't use generic send(), CT deregistration must go over MMIO */
err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
if (err)
- DRM_ERROR("CT: deregister %s buffer failed; owner=%d err=%d\n",
- guc_ct_buffer_type_to_str(type), owner, err);
+ DRM_ERROR("CT: deregister %s buffer failed; err=%d\n",
+ guc_ct_buffer_type_to_str(type), err);
return err;
}
-static int ctch_init(struct intel_guc *guc,
- struct intel_guc_ct_channel *ctch)
+/**
+ * intel_guc_ct_init - Init buffer-based communication
+ * @ct: pointer to CT struct
+ *
+ * Allocate memory required for buffer-based communication.
+ *
+ * Return: 0 on success, a negative errno code on failure.
+ */
+int intel_guc_ct_init(struct intel_guc_ct *ct)
{
- struct i915_vma *vma;
+ struct intel_guc *guc = ct_to_guc(ct);
void *blob;
int err;
int i;
- GEM_BUG_ON(ctch->vma);
+ GEM_BUG_ON(ct->vma);
/* We allocate 1 page to hold both descriptors and both buffers.
* ___________.....................
@@ -172,71 +156,65 @@ static int ctch_init(struct intel_guc *guc,
* other code will need updating as well.
*/
- /* allocate vma */
- vma = intel_guc_allocate_vma(guc, PAGE_SIZE);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto err_out;
+ err = intel_guc_allocate_and_map_vma(guc, PAGE_SIZE, &ct->vma, &blob);
+ if (err) {
+ DRM_ERROR("CT: channel allocation failed; err=%d\n", err);
+ return err;
}
- ctch->vma = vma;
- /* map first page */
- blob = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
- if (IS_ERR(blob)) {
- err = PTR_ERR(blob);
- goto err_vma;
- }
CT_DEBUG_DRIVER("CT: vma base=%#x\n",
- intel_guc_ggtt_offset(guc, ctch->vma));
+ intel_guc_ggtt_offset(guc, ct->vma));
/* store pointers to desc and cmds */
- for (i = 0; i < ARRAY_SIZE(ctch->ctbs); i++) {
- GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV));
- ctch->ctbs[i].desc = blob + PAGE_SIZE/4 * i;
- ctch->ctbs[i].cmds = blob + PAGE_SIZE/4 * i + PAGE_SIZE/2;
+ for (i = 0; i < ARRAY_SIZE(ct->ctbs); i++) {
+ GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV));
+ ct->ctbs[i].desc = blob + PAGE_SIZE/4 * i;
+ ct->ctbs[i].cmds = blob + PAGE_SIZE/4 * i + PAGE_SIZE/2;
}
return 0;
-
-err_vma:
- i915_vma_unpin_and_release(&ctch->vma, 0);
-err_out:
- CT_DEBUG_DRIVER("CT: channel %d initialization failed; err=%d\n",
- ctch->owner, err);
- return err;
}
-static void ctch_fini(struct intel_guc *guc,
- struct intel_guc_ct_channel *ctch)
+/**
+ * intel_guc_ct_fini - Fini buffer-based communication
+ * @ct: pointer to CT struct
+ *
+ * Deallocate memory required for buffer-based communication.
+ */
+void intel_guc_ct_fini(struct intel_guc_ct *ct)
{
- GEM_BUG_ON(ctch->enabled);
+ GEM_BUG_ON(ct->enabled);
- i915_vma_unpin_and_release(&ctch->vma, I915_VMA_RELEASE_MAP);
+ i915_vma_unpin_and_release(&ct->vma, I915_VMA_RELEASE_MAP);
}
-static int ctch_enable(struct intel_guc *guc,
- struct intel_guc_ct_channel *ctch)
+/**
+ * intel_guc_ct_enable - Enable buffer based command transport.
+ * @ct: pointer to CT struct
+ *
+ * Return: 0 on success, a negative errno code on failure.
+ */
+int intel_guc_ct_enable(struct intel_guc_ct *ct)
{
+ struct intel_guc *guc = ct_to_guc(ct);
u32 base;
int err;
int i;
- GEM_BUG_ON(!ctch->vma);
-
- GEM_BUG_ON(ctch->enabled);
+ GEM_BUG_ON(ct->enabled);
/* vma should be already allocated and map'ed */
- base = intel_guc_ggtt_offset(guc, ctch->vma);
+ GEM_BUG_ON(!ct->vma);
+ base = intel_guc_ggtt_offset(guc, ct->vma);
/* (re)initialize descriptors
* cmds buffers are in the second half of the blob page
*/
- for (i = 0; i < ARRAY_SIZE(ctch->ctbs); i++) {
+ for (i = 0; i < ARRAY_SIZE(ct->ctbs); i++) {
GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV));
- guc_ct_buffer_desc_init(ctch->ctbs[i].desc,
+ guc_ct_buffer_desc_init(ct->ctbs[i].desc,
base + PAGE_SIZE/4 * i + PAGE_SIZE/2,
- PAGE_SIZE/4,
- ctch->owner);
+ PAGE_SIZE/4);
}
/* register buffers, starting wirh RECV buffer
@@ -254,38 +232,42 @@ static int ctch_enable(struct intel_guc *guc,
if (unlikely(err))
goto err_deregister;
- ctch->enabled = true;
+ ct->enabled = true;
return 0;
err_deregister:
guc_action_deregister_ct_buffer(guc,
- ctch->owner,
INTEL_GUC_CT_BUFFER_TYPE_RECV);
err_out:
- DRM_ERROR("CT: can't open channel %d; err=%d\n", ctch->owner, err);
+ DRM_ERROR("CT: can't open channel; err=%d\n", err);
return err;
}
-static void ctch_disable(struct intel_guc *guc,
- struct intel_guc_ct_channel *ctch)
+/**
+ * intel_guc_ct_disable - Disable buffer based command transport.
+ * @ct: pointer to CT struct
+ */
+void intel_guc_ct_disable(struct intel_guc_ct *ct)
{
- GEM_BUG_ON(!ctch->enabled);
+ struct intel_guc *guc = ct_to_guc(ct);
- ctch->enabled = false;
+ GEM_BUG_ON(!ct->enabled);
- guc_action_deregister_ct_buffer(guc,
- ctch->owner,
- INTEL_GUC_CT_BUFFER_TYPE_SEND);
- guc_action_deregister_ct_buffer(guc,
- ctch->owner,
- INTEL_GUC_CT_BUFFER_TYPE_RECV);
+ ct->enabled = false;
+
+ if (intel_guc_is_running(guc)) {
+ guc_action_deregister_ct_buffer(guc,
+ INTEL_GUC_CT_BUFFER_TYPE_SEND);
+ guc_action_deregister_ct_buffer(guc,
+ INTEL_GUC_CT_BUFFER_TYPE_RECV);
+ }
}
-static u32 ctch_get_next_fence(struct intel_guc_ct_channel *ctch)
+static u32 ct_get_next_fence(struct intel_guc_ct *ct)
{
/* For now it's trivial */
- return ++ctch->next_fence;
+ return ++ct->requests.next_fence;
}
/**
@@ -458,35 +440,34 @@ static int wait_for_ct_request_update(struct ct_request *req, u32 *status)
return err;
}
-static int ctch_send(struct intel_guc_ct *ct,
- struct intel_guc_ct_channel *ctch,
- const u32 *action,
- u32 len,
- u32 *response_buf,
- u32 response_buf_size,
- u32 *status)
+static int ct_send(struct intel_guc_ct *ct,
+ const u32 *action,
+ u32 len,
+ u32 *response_buf,
+ u32 response_buf_size,
+ u32 *status)
{
- struct intel_guc_ct_buffer *ctb = &ctch->ctbs[CTB_SEND];
+ struct intel_guc_ct_buffer *ctb = &ct->ctbs[CTB_SEND];
struct guc_ct_buffer_desc *desc = ctb->desc;
struct ct_request request;
unsigned long flags;
u32 fence;
int err;
- GEM_BUG_ON(!ctch->enabled);
+ GEM_BUG_ON(!ct->enabled);
GEM_BUG_ON(!len);
GEM_BUG_ON(len & ~GUC_CT_MSG_LEN_MASK);
GEM_BUG_ON(!response_buf && response_buf_size);
- fence = ctch_get_next_fence(ctch);
+ fence = ct_get_next_fence(ct);
request.fence = fence;
request.status = 0;
request.response_len = response_buf_size;
request.response_buf = response_buf;
- spin_lock_irqsave(&ct->lock, flags);
- list_add_tail(&request.link, &ct->pending_requests);
- spin_unlock_irqrestore(&ct->lock, flags);
+ spin_lock_irqsave(&ct->requests.lock, flags);
+ list_add_tail(&request.link, &ct->requests.pending);
+ spin_unlock_irqrestore(&ct->requests.lock, flags);
err = ctb_write(ctb, action, len, fence, !!response_buf);
if (unlikely(err))
@@ -519,9 +500,9 @@ static int ctch_send(struct intel_guc_ct *ct,
}
unlink:
- spin_lock_irqsave(&ct->lock, flags);
+ spin_lock_irqsave(&ct->requests.lock, flags);
list_del(&request.link);
- spin_unlock_irqrestore(&ct->lock, flags);
+ spin_unlock_irqrestore(&ct->requests.lock, flags);
return err;
}
@@ -529,18 +510,21 @@ unlink:
/*
* Command Transport (CT) buffer based GuC send function.
*/
-static int intel_guc_send_ct(struct intel_guc *guc, const u32 *action, u32 len,
- u32 *response_buf, u32 response_buf_size)
+int intel_guc_ct_send(struct intel_guc_ct *ct, const u32 *action, u32 len,
+ u32 *response_buf, u32 response_buf_size)
{
- struct intel_guc_ct *ct = &guc->ct;
- struct intel_guc_ct_channel *ctch = &ct->host_channel;
+ struct intel_guc *guc = ct_to_guc(ct);
u32 status = ~0; /* undefined */
int ret;
+ if (unlikely(!ct->enabled)) {
+ WARN(1, "Unexpected send: action=%#x\n", *action);
+ return -ENODEV;
+ }
+
mutex_lock(&guc->send_mutex);
- ret = ctch_send(ct, ctch, action, len, response_buf, response_buf_size,
- &status);
+ ret = ct_send(ct, action, len, response_buf, response_buf_size, &status);
if (unlikely(ret < 0)) {
DRM_ERROR("CT: send action %#X failed; err=%d status=%#X\n",
action[0], ret, status);
@@ -671,8 +655,8 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
CT_DEBUG_DRIVER("CT: response fence %u status %#x\n", fence, status);
- spin_lock(&ct->lock);
- list_for_each_entry(req, &ct->pending_requests, link) {
+ spin_lock(&ct->requests.lock);
+ list_for_each_entry(req, &ct->requests.pending, link) {
if (unlikely(fence != req->fence)) {
CT_DEBUG_DRIVER("CT: request %u awaits response\n",
req->fence);
@@ -690,7 +674,7 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
found = true;
break;
}
- spin_unlock(&ct->lock);
+ spin_unlock(&ct->requests.lock);
if (!found)
DRM_ERROR("CT: unsolicited response %*ph\n", 4 * msglen, msg);
@@ -728,13 +712,13 @@ static bool ct_process_incoming_requests(struct intel_guc_ct *ct)
u32 *payload;
bool done;
- spin_lock_irqsave(&ct->lock, flags);
- request = list_first_entry_or_null(&ct->incoming_requests,
+ spin_lock_irqsave(&ct->requests.lock, flags);
+ request = list_first_entry_or_null(&ct->requests.incoming,
struct ct_incoming_request, link);
if (request)
list_del(&request->link);
- done = !!list_empty(&ct->incoming_requests);
- spin_unlock_irqrestore(&ct->lock, flags);
+ done = !!list_empty(&ct->requests.incoming);
+ spin_unlock_irqrestore(&ct->requests.lock, flags);
if (!request)
return true;
@@ -752,12 +736,13 @@ static bool ct_process_incoming_requests(struct intel_guc_ct *ct)
static void ct_incoming_request_worker_func(struct work_struct *w)
{
- struct intel_guc_ct *ct = container_of(w, struct intel_guc_ct, worker);
+ struct intel_guc_ct *ct =
+ container_of(w, struct intel_guc_ct, requests.worker);
bool done;
done = ct_process_incoming_requests(ct);
if (!done)
- queue_work(system_unbound_wq, &ct->worker);
+ queue_work(system_unbound_wq, &ct->requests.worker);
}
/**
@@ -795,23 +780,28 @@ static int ct_handle_request(struct intel_guc_ct *ct, const u32 *msg)
}
memcpy(request->msg, msg, 4 * msglen);
- spin_lock_irqsave(&ct->lock, flags);
- list_add_tail(&request->link, &ct->incoming_requests);
- spin_unlock_irqrestore(&ct->lock, flags);
+ spin_lock_irqsave(&ct->requests.lock, flags);
+ list_add_tail(&request->link, &ct->requests.incoming);
+ spin_unlock_irqrestore(&ct->requests.lock, flags);
- queue_work(system_unbound_wq, &ct->worker);
+ queue_work(system_unbound_wq, &ct->requests.worker);
return 0;
}
-static void ct_process_host_channel(struct intel_guc_ct *ct)
+/*
+ * When we're communicating with the GuC over CT, GuC uses events
+ * to notify us about new messages being posted on the RECV buffer.
+ */
+void intel_guc_ct_event_handler(struct intel_guc_ct *ct)
{
- struct intel_guc_ct_channel *ctch = &ct->host_channel;
- struct intel_guc_ct_buffer *ctb = &ctch->ctbs[CTB_RECV];
+ struct intel_guc_ct_buffer *ctb = &ct->ctbs[CTB_RECV];
u32 msg[GUC_CT_MSG_LEN_MASK + 1]; /* one extra dw for the header */
int err = 0;
- if (!ctch->enabled)
+ if (unlikely(!ct->enabled)) {
+ WARN(1, "Unexpected GuC event received while CT disabled!\n");
return;
+ }
do {
err = ctb_read(ctb, msg);
@@ -830,100 +820,3 @@ static void ct_process_host_channel(struct intel_guc_ct *ct)
}
}
-/*
- * When we're communicating with the GuC over CT, GuC uses events
- * to notify us about new messages being posted on the RECV buffer.
- */
-static void intel_guc_to_host_event_handler_ct(struct intel_guc *guc)
-{
- struct intel_guc_ct *ct = &guc->ct;
-
- ct_process_host_channel(ct);
-}
-
-/**
- * intel_guc_ct_init - Init CT communication
- * @ct: pointer to CT struct
- *
- * Allocate memory required for communication via
- * the CT channel.
- *
- * Return: 0 on success, a negative errno code on failure.
- */
-int intel_guc_ct_init(struct intel_guc_ct *ct)
-{
- struct intel_guc *guc = ct_to_guc(ct);
- struct intel_guc_ct_channel *ctch = &ct->host_channel;
- int err;
-
- err = ctch_init(guc, ctch);
- if (unlikely(err)) {
- DRM_ERROR("CT: can't open channel %d; err=%d\n",
- ctch->owner, err);
- return err;
- }
-
- GEM_BUG_ON(!ctch->vma);
- return 0;
-}
-
-/**
- * intel_guc_ct_fini - Fini CT communication
- * @ct: pointer to CT struct
- *
- * Deallocate memory required for communication via
- * the CT channel.
- */
-void intel_guc_ct_fini(struct intel_guc_ct *ct)
-{
- struct intel_guc *guc = ct_to_guc(ct);
- struct intel_guc_ct_channel *ctch = &ct->host_channel;
-
- ctch_fini(guc, ctch);
-}
-
-/**
- * intel_guc_ct_enable - Enable buffer based command transport.
- * @ct: pointer to CT struct
- *
- * Return: 0 on success, a negative errno code on failure.
- */
-int intel_guc_ct_enable(struct intel_guc_ct *ct)
-{
- struct intel_guc *guc = ct_to_guc(ct);
- struct intel_guc_ct_channel *ctch = &ct->host_channel;
- int err;
-
- if (ctch->enabled)
- return 0;
-
- err = ctch_enable(guc, ctch);
- if (unlikely(err))
- return err;
-
- /* Switch into cmd transport buffer based send() */
- guc->send = intel_guc_send_ct;
- guc->handler = intel_guc_to_host_event_handler_ct;
- DRM_INFO("CT: %s\n", enableddisabled(true));
- return 0;
-}
-
-/**
- * intel_guc_ct_disable - Disable buffer based command transport.
- * @ct: pointer to CT struct
- */
-void intel_guc_ct_disable(struct intel_guc_ct *ct)
-{
- struct intel_guc *guc = ct_to_guc(ct);
- struct intel_guc_ct_channel *ctch = &ct->host_channel;
-
- if (!ctch->enabled)
- return;
-
- ctch_disable(guc, ctch);
-
- /* Disable send */
- guc->send = intel_guc_send_nop;
- guc->handler = intel_guc_to_host_event_handler_nop;
- DRM_INFO("CT: %s\n", enableddisabled(false));
-}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h
new file mode 100644
index 000000000000..3e7fe237cfa5
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2016-2019 Intel Corporation
+ */
+
+#ifndef _INTEL_GUC_CT_H_
+#define _INTEL_GUC_CT_H_
+
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+#include "intel_guc_fwif.h"
+
+struct i915_vma;
+struct intel_guc;
+
+/**
+ * DOC: Command Transport (CT).
+ *
+ * Buffer based command transport is a replacement for MMIO based mechanism.
+ * It can be used to perform both host-2-guc and guc-to-host communication.
+ */
+
+/** Represents single command transport buffer.
+ *
+ * A single command transport buffer consists of two parts, the header
+ * record (command transport buffer descriptor) and the actual buffer which
+ * holds the commands.
+ *
+ * @desc: pointer to the buffer descriptor
+ * @cmds: pointer to the commands buffer
+ */
+struct intel_guc_ct_buffer {
+ struct guc_ct_buffer_desc *desc;
+ u32 *cmds;
+};
+
+
+/** Top-level structure for Command Transport related data
+ *
+ * Includes a pair of CT buffers for bi-directional communication and tracking
+ * for the H2G and G2H requests sent and received through the buffers.
+ */
+struct intel_guc_ct {
+ struct i915_vma *vma;
+ bool enabled;
+
+ /* buffers for sending(0) and receiving(1) commands */
+ struct intel_guc_ct_buffer ctbs[2];
+
+ struct {
+ u32 next_fence; /* fence to be used with next request to send */
+
+ spinlock_t lock; /* protects pending requests list */
+ struct list_head pending; /* requests waiting for response */
+
+ struct list_head incoming; /* incoming requests */
+ struct work_struct worker; /* handler for incoming requests */
+ } requests;
+};
+
+void intel_guc_ct_init_early(struct intel_guc_ct *ct);
+int intel_guc_ct_init(struct intel_guc_ct *ct);
+void intel_guc_ct_fini(struct intel_guc_ct *ct);
+int intel_guc_ct_enable(struct intel_guc_ct *ct);
+void intel_guc_ct_disable(struct intel_guc_ct *ct);
+
+static inline bool intel_guc_ct_enabled(struct intel_guc_ct *ct)
+{
+ return ct->enabled;
+}
+
+int intel_guc_ct_send(struct intel_guc_ct *ct, const u32 *action, u32 len,
+ u32 *response_buf, u32 response_buf_size);
+void intel_guc_ct_event_handler(struct intel_guc_ct *ct);
+
+#endif /* _INTEL_GUC_CT_H_ */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
new file mode 100644
index 000000000000..3a1c47d600ea
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2014-2019 Intel Corporation
+ *
+ * Authors:
+ * Vinit Azad <vinit.azad@intel.com>
+ * Ben Widawsky <ben@bwidawsk.net>
+ * Dave Gordon <david.s.gordon@intel.com>
+ * Alex Dai <yu.dai@intel.com>
+ */
+
+#include "gt/intel_gt.h"
+#include "intel_guc_fw.h"
+#include "i915_drv.h"
+
+/**
+ * intel_guc_fw_init_early() - initializes GuC firmware struct
+ * @guc: intel_guc struct
+ *
+ * On platforms with GuC selects firmware for uploading
+ */
+void intel_guc_fw_init_early(struct intel_guc *guc)
+{
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+
+ intel_uc_fw_init_early(&guc->fw, INTEL_UC_FW_TYPE_GUC, HAS_GT_UC(i915),
+ INTEL_INFO(i915)->platform, INTEL_REVID(i915));
+}
+
+static void guc_prepare_xfer(struct intel_uncore *uncore)
+{
+ u32 shim_flags = GUC_DISABLE_SRAM_INIT_TO_ZEROES |
+ GUC_ENABLE_READ_CACHE_LOGIC |
+ GUC_ENABLE_MIA_CACHING |
+ GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA |
+ GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA |
+ GUC_ENABLE_MIA_CLOCK_GATING;
+
+ /* Must program this register before loading the ucode with DMA */
+ intel_uncore_write(uncore, GUC_SHIM_CONTROL, shim_flags);
+
+ if (IS_GEN9_LP(uncore->i915))
+ intel_uncore_write(uncore, GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
+ else
+ intel_uncore_write(uncore, GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
+
+ if (IS_GEN(uncore->i915, 9)) {
+ /* DOP Clock Gating Enable for GuC clocks */
+ intel_uncore_rmw(uncore, GEN7_MISCCPCTL,
+ 0, GEN8_DOP_CLOCK_GATE_GUC_ENABLE);
+
+ /* allows for 5us (in 10ns units) before GT can go to RC6 */
+ intel_uncore_write(uncore, GUC_ARAT_C6DIS, 0x1FF);
+ }
+}
+
+/* Copy RSA signature from the fw image to HW for verification */
+static void guc_xfer_rsa(struct intel_uc_fw *guc_fw,
+ struct intel_uncore *uncore)
+{
+ u32 rsa[UOS_RSA_SCRATCH_COUNT];
+ size_t copied;
+ int i;
+
+ copied = intel_uc_fw_copy_rsa(guc_fw, rsa, sizeof(rsa));
+ GEM_BUG_ON(copied < sizeof(rsa));
+
+ for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++)
+ intel_uncore_write(uncore, UOS_RSA_SCRATCH(i), rsa[i]);
+}
+
+/*
+ * Read the GuC status register (GUC_STATUS) and store it in the
+ * specified location; then return a boolean indicating whether
+ * the value matches either of two values representing completion
+ * of the GuC boot process.
+ *
+ * This is used for polling the GuC status in a wait_for()
+ * loop below.
+ */
+static inline bool guc_ready(struct intel_uncore *uncore, u32 *status)
+{
+ u32 val = intel_uncore_read(uncore, GUC_STATUS);
+ u32 uk_val = val & GS_UKERNEL_MASK;
+
+ *status = val;
+ return (uk_val == GS_UKERNEL_READY) ||
+ ((val & GS_MIA_CORE_STATE) && (uk_val == GS_UKERNEL_LAPIC_DONE));
+}
+
+static int guc_wait_ucode(struct intel_uncore *uncore)
+{
+ u32 status;
+ int ret;
+
+ /*
+ * Wait for the GuC to start up.
+ * NB: Docs recommend not using the interrupt for completion.
+ * Measurements indicate this should take no more than 20ms, so a
+ * timeout here indicates that the GuC has failed and is unusable.
+ * (Higher levels of the driver may decide to reset the GuC and
+ * attempt the ucode load again if this happens.)
+ */
+ ret = wait_for(guc_ready(uncore, &status), 100);
+ DRM_DEBUG_DRIVER("GuC status %#x\n", status);
+
+ if ((status & GS_BOOTROM_MASK) == GS_BOOTROM_RSA_FAILED) {
+ DRM_ERROR("GuC firmware signature verification failed\n");
+ ret = -ENOEXEC;
+ }
+
+ if ((status & GS_UKERNEL_MASK) == GS_UKERNEL_EXCEPTION) {
+ DRM_ERROR("GuC firmware exception. EIP: %#x\n",
+ intel_uncore_read(uncore, SOFT_SCRATCH(13)));
+ ret = -ENXIO;
+ }
+
+ return ret;
+}
+
+/**
+ * intel_guc_fw_upload() - load GuC uCode to device
+ * @guc: intel_guc structure
+ *
+ * Called from intel_uc_init_hw() during driver load, resume from sleep and
+ * after a GPU reset.
+ *
+ * The firmware image should have already been fetched into memory, so only
+ * check that fetch succeeded, and then transfer the image to the h/w.
+ *
+ * Return: non-zero code on error
+ */
+int intel_guc_fw_upload(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ struct intel_uncore *uncore = gt->uncore;
+ int ret;
+
+ guc_prepare_xfer(uncore);
+
+ /*
+ * Note that GuC needs the CSS header plus uKernel code to be copied
+ * by the DMA engine in one operation, whereas the RSA signature is
+ * loaded via MMIO.
+ */
+ guc_xfer_rsa(&guc->fw, uncore);
+
+ /*
+ * Current uCode expects the code to be loaded at 8k; locations below
+ * this are used for the stack.
+ */
+ ret = intel_uc_fw_upload(&guc->fw, 0x2000, UOS_MOVE);
+ if (ret)
+ goto out;
+
+ ret = guc_wait_ucode(uncore);
+ if (ret)
+ goto out;
+
+ intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_RUNNING);
+ return 0;
+
+out:
+ intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_FAIL);
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.h
new file mode 100644
index 000000000000..b5ab639d7259
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2017-2019 Intel Corporation
+ */
+
+#ifndef _INTEL_GUC_FW_H_
+#define _INTEL_GUC_FW_H_
+
+struct intel_guc;
+
+void intel_guc_fw_init_early(struct intel_guc *guc);
+int intel_guc_fw_upload(struct intel_guc *guc);
+
+#endif
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
index f55f3bc8524d..a6b733c146c9 100644
--- a/drivers/gpu/drm/i915/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
@@ -1,28 +1,15 @@
+/* SPDX-License-Identifier: MIT */
/*
- * Copyright © 2014 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2014-2019 Intel Corporation
*/
+
#ifndef _INTEL_GUC_FWIF_H
#define _INTEL_GUC_FWIF_H
+#include <linux/bits.h>
+#include <linux/compiler.h>
+#include <linux/types.h>
+
#define GUC_CLIENT_PRIORITY_KMD_HIGH 0
#define GUC_CLIENT_PRIORITY_HIGH 1
#define GUC_CLIENT_PRIORITY_KMD_NORMAL 2
@@ -39,17 +26,11 @@
#define GUC_VIDEO_ENGINE2 4
#define GUC_MAX_ENGINES_NUM (GUC_VIDEO_ENGINE2 + 1)
-/*
- * XXX: Beware that Gen9 firmware 32.x uses wrong definition for
- * GUC_MAX_INSTANCES_PER_CLASS (1) but this is harmless for us now
- * as we are not enabling GuC submission mode where this will be used
- */
#define GUC_MAX_ENGINE_CLASSES 5
-#define GUC_MAX_INSTANCES_PER_CLASS 4
+#define GUC_MAX_INSTANCES_PER_CLASS 16
#define GUC_DOORBELL_INVALID 256
-#define GUC_DB_SIZE (PAGE_SIZE)
#define GUC_WQ_SIZE (PAGE_SIZE * 2)
/* Work queue item header definitions */
@@ -122,76 +103,6 @@
#define GUC_CTL_MAX_DWORDS (SOFT_SCRATCH_COUNT - 2) /* [1..14] */
-/**
- * DOC: GuC Firmware Layout
- *
- * The GuC firmware layout looks like this:
- *
- * +-------------------------------+
- * | uc_css_header |
- * | |
- * | contains major/minor version |
- * +-------------------------------+
- * | uCode |
- * +-------------------------------+
- * | RSA signature |
- * +-------------------------------+
- * | modulus key |
- * +-------------------------------+
- * | exponent val |
- * +-------------------------------+
- *
- * The firmware may or may not have modulus key and exponent data. The header,
- * uCode and RSA signature are must-have components that will be used by driver.
- * Length of each components, which is all in dwords, can be found in header.
- * In the case that modulus and exponent are not present in fw, a.k.a truncated
- * image, the length value still appears in header.
- *
- * Driver will do some basic fw size validation based on the following rules:
- *
- * 1. Header, uCode and RSA are must-have components.
- * 2. All firmware components, if they present, are in the sequence illustrated
- * in the layout table above.
- * 3. Length info of each component can be found in header, in dwords.
- * 4. Modulus and exponent key are not required by driver. They may not appear
- * in fw. So driver will load a truncated firmware in this case.
- *
- * HuC firmware layout is same as GuC firmware.
- * Only HuC version information is saved in a different way.
- */
-
-struct uc_css_header {
- u32 module_type;
- /* header_size includes all non-uCode bits, including css_header, rsa
- * key, modulus key and exponent data. */
- u32 header_size_dw;
- u32 header_version;
- u32 module_id;
- u32 module_vendor;
- u32 date;
-#define CSS_DATE_DAY (0xFF << 0)
-#define CSS_DATE_MONTH (0xFF << 8)
-#define CSS_DATE_YEAR (0xFFFF << 16)
- u32 size_dw; /* uCode plus header_size_dw */
- u32 key_size_dw;
- u32 modulus_size_dw;
- u32 exponent_size_dw;
- u32 time;
-#define CSS_TIME_HOUR (0xFF << 0)
-#define CSS_DATE_MIN (0xFF << 8)
-#define CSS_DATE_SEC (0xFFFF << 16)
- char username[8];
- char buildnumber[12];
- u32 sw_version;
-#define CSS_SW_VERSION_GUC_MAJOR (0xFF << 16)
-#define CSS_SW_VERSION_GUC_MINOR (0xFF << 8)
-#define CSS_SW_VERSION_GUC_PATCH (0xFF << 0)
-#define CSS_SW_VERSION_HUC_MAJOR (0xFFFF << 16)
-#define CSS_SW_VERSION_HUC_MINOR (0xFFFF << 0)
- u32 reserved[14];
- u32 header_info;
-} __packed;
-
/* Work item for submitting workloads into work queue of GuC. */
struct guc_wq_item {
u32 header;
@@ -636,6 +547,7 @@ enum intel_guc_action {
INTEL_GUC_ACTION_ALLOCATE_DOORBELL = 0x10,
INTEL_GUC_ACTION_DEALLOCATE_DOORBELL = 0x20,
INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE = 0x30,
+ INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING = 0x40,
INTEL_GUC_ACTION_FORCE_LOG_BUFFER_FLUSH = 0x302,
INTEL_GUC_ACTION_ENTER_S_STATE = 0x501,
INTEL_GUC_ACTION_EXIT_S_STATE = 0x502,
@@ -644,7 +556,6 @@ enum intel_guc_action {
INTEL_GUC_ACTION_AUTHENTICATE_HUC = 0x4000,
INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER = 0x4505,
INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER = 0x4506,
- INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING = 0x0E000,
INTEL_GUC_ACTION_LIMIT
};
diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
index e3b83ecb90b5..caed0d57e704 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
@@ -1,31 +1,14 @@
+// SPDX-License-Identifier: MIT
/*
- * Copyright © 2014-2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
+ * Copyright © 2014-2019 Intel Corporation
*/
#include <linux/debugfs.h>
-#include "intel_guc_log.h"
+#include "gt/intel_gt.h"
#include "i915_drv.h"
+#include "i915_memcpy.h"
+#include "intel_guc_log.h"
static void guc_log_capture_logs(struct intel_guc_log *log);
@@ -209,7 +192,7 @@ static bool guc_check_log_buf_overflow(struct intel_guc_log *log,
log->stats[type].sampled_overflow += 16;
}
- dev_notice_ratelimited(guc_to_i915(log_to_guc(log))->drm.dev,
+ dev_notice_ratelimited(guc_to_gt(log_to_guc(log))->i915->drm.dev,
"GuC log buffer overflow\n");
}
@@ -243,7 +226,7 @@ static void guc_read_update_log_buffer(struct intel_guc_log *log)
mutex_lock(&log->relay.lock);
- if (WARN_ON(!intel_guc_log_relay_enabled(log)))
+ if (WARN_ON(!intel_guc_log_relay_created(log)))
goto out_unlock;
/* Get the pointer to shared GuC log buffer */
@@ -378,17 +361,19 @@ void intel_guc_log_init_early(struct intel_guc_log *log)
{
mutex_init(&log->relay.lock);
INIT_WORK(&log->relay.flush_work, capture_logs_work);
+ log->relay.started = false;
}
static int guc_log_relay_create(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
struct rchan *guc_log_relay_chan;
size_t n_subbufs, subbuf_size;
int ret;
lockdep_assert_held(&log->relay.lock);
+ GEM_BUG_ON(!log->vma);
/* Keep the size of sub buffers same as shared log buffer */
subbuf_size = log->vma->size;
@@ -429,7 +414,7 @@ static void guc_log_relay_destroy(struct intel_guc_log *log)
static void guc_log_capture_logs(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
intel_wakeref_t wakeref;
guc_read_update_log_buffer(log);
@@ -442,6 +427,29 @@ static void guc_log_capture_logs(struct intel_guc_log *log)
guc_action_flush_log_complete(guc);
}
+static u32 __get_default_log_level(struct intel_guc_log *log)
+{
+ /* A negative value means "use platform/config default" */
+ if (i915_modparams.guc_log_level < 0) {
+ return (IS_ENABLED(CONFIG_DRM_I915_DEBUG) ||
+ IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) ?
+ GUC_LOG_LEVEL_MAX : GUC_LOG_LEVEL_NON_VERBOSE;
+ }
+
+ if (i915_modparams.guc_log_level > GUC_LOG_LEVEL_MAX) {
+ DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
+ "guc_log_level", i915_modparams.guc_log_level,
+ "verbosity too high");
+ return (IS_ENABLED(CONFIG_DRM_I915_DEBUG) ||
+ IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) ?
+ GUC_LOG_LEVEL_MAX : GUC_LOG_LEVEL_DISABLED;
+ }
+
+ GEM_BUG_ON(i915_modparams.guc_log_level < GUC_LOG_LEVEL_DISABLED);
+ GEM_BUG_ON(i915_modparams.guc_log_level > GUC_LOG_LEVEL_MAX);
+ return i915_modparams.guc_log_level;
+}
+
int intel_guc_log_create(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
@@ -481,7 +489,11 @@ int intel_guc_log_create(struct intel_guc_log *log)
log->vma = vma;
- log->level = i915_modparams.guc_log_level;
+ log->level = __get_default_log_level(log);
+ DRM_DEBUG_DRIVER("guc_log_level=%d (%s, verbose:%s, verbosity:%d)\n",
+ log->level, enableddisabled(log->level),
+ yesno(GUC_LOG_LEVEL_IS_VERBOSE(log->level)),
+ GUC_LOG_LEVEL_TO_VERBOSITY(log->level));
return 0;
@@ -498,7 +510,7 @@ void intel_guc_log_destroy(struct intel_guc_log *log)
int intel_guc_log_set_level(struct intel_guc_log *log, u32 level)
{
struct intel_guc *guc = log_to_guc(log);
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
intel_wakeref_t wakeref;
int ret = 0;
@@ -535,7 +547,7 @@ out_unlock:
return ret;
}
-bool intel_guc_log_relay_enabled(const struct intel_guc_log *log)
+bool intel_guc_log_relay_created(const struct intel_guc_log *log)
{
return log->relay.buf_addr;
}
@@ -544,9 +556,12 @@ int intel_guc_log_relay_open(struct intel_guc_log *log)
{
int ret;
+ if (!log->vma)
+ return -ENODEV;
+
mutex_lock(&log->relay.lock);
- if (intel_guc_log_relay_enabled(log)) {
+ if (intel_guc_log_relay_created(log)) {
ret = -EEXIST;
goto out_unlock;
}
@@ -571,6 +586,21 @@ int intel_guc_log_relay_open(struct intel_guc_log *log)
mutex_unlock(&log->relay.lock);
+ return 0;
+
+out_relay:
+ guc_log_relay_destroy(log);
+out_unlock:
+ mutex_unlock(&log->relay.lock);
+
+ return ret;
+}
+
+int intel_guc_log_relay_start(struct intel_guc_log *log)
+{
+ if (log->relay.started)
+ return -EEXIST;
+
guc_log_enable_flush_events(log);
/*
@@ -578,49 +608,61 @@ int intel_guc_log_relay_open(struct intel_guc_log *log)
* the flush notification. This means that we need to unconditionally
* flush on relay enabling, since GuC only notifies us once.
*/
- queue_work(log->relay.flush_wq, &log->relay.flush_work);
-
- return 0;
+ queue_work(system_highpri_wq, &log->relay.flush_work);
-out_relay:
- guc_log_relay_destroy(log);
-out_unlock:
- mutex_unlock(&log->relay.lock);
+ log->relay.started = true;
- return ret;
+ return 0;
}
void intel_guc_log_relay_flush(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
- struct drm_i915_private *i915 = guc_to_i915(guc);
intel_wakeref_t wakeref;
+ if (!log->relay.started)
+ return;
+
/*
* Before initiating the forceful flush, wait for any pending/ongoing
* flush to complete otherwise forceful flush may not actually happen.
*/
flush_work(&log->relay.flush_work);
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ with_intel_runtime_pm(guc_to_gt(guc)->uncore->rpm, wakeref)
guc_action_flush_log(guc);
/* GuC would have updated log buffer by now, so capture it */
guc_log_capture_logs(log);
}
-void intel_guc_log_relay_close(struct intel_guc_log *log)
+/*
+ * Stops the relay log. Called from intel_guc_log_relay_close(), so no
+ * possibility of race with start/flush since relay_write cannot race
+ * relay_close.
+ */
+static void guc_log_relay_stop(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
- struct drm_i915_private *i915 = guc_to_i915(guc);
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+
+ if (!log->relay.started)
+ return;
guc_log_disable_flush_events(log);
- synchronize_irq(i915->drm.irq);
+ intel_synchronize_irq(i915);
flush_work(&log->relay.flush_work);
+ log->relay.started = false;
+}
+
+void intel_guc_log_relay_close(struct intel_guc_log *log)
+{
+ guc_log_relay_stop(log);
+
mutex_lock(&log->relay.lock);
- GEM_BUG_ON(!intel_guc_log_relay_enabled(log));
+ GEM_BUG_ON(!intel_guc_log_relay_created(log));
guc_log_unmap(log);
guc_log_relay_destroy(log);
mutex_unlock(&log->relay.lock);
@@ -628,5 +670,5 @@ void intel_guc_log_relay_close(struct intel_guc_log *log)
void intel_guc_log_handle_flush_event(struct intel_guc_log *log)
{
- queue_work(log->relay.flush_wq, &log->relay.flush_work);
+ queue_work(system_highpri_wq, &log->relay.flush_work);
}
diff --git a/drivers/gpu/drm/i915/intel_guc_log.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
index 7bc763f10c03..c252c022c5fc 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
@@ -1,25 +1,6 @@
+/* SPDX-License-Identifier: MIT */
/*
- * Copyright © 2014-2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
+ * Copyright © 2014-2019 Intel Corporation
*/
#ifndef _INTEL_GUC_LOG_H_
@@ -66,7 +47,7 @@ struct intel_guc_log {
struct i915_vma *vma;
struct {
void *buf_addr;
- struct workqueue_struct *flush_wq;
+ bool started;
struct work_struct flush_work;
struct rchan *channel;
struct mutex lock;
@@ -85,8 +66,9 @@ int intel_guc_log_create(struct intel_guc_log *log);
void intel_guc_log_destroy(struct intel_guc_log *log);
int intel_guc_log_set_level(struct intel_guc_log *log, u32 level);
-bool intel_guc_log_relay_enabled(const struct intel_guc_log *log);
+bool intel_guc_log_relay_created(const struct intel_guc_log *log);
int intel_guc_log_relay_open(struct intel_guc_log *log);
+int intel_guc_log_relay_start(struct intel_guc_log *log);
void intel_guc_log_relay_flush(struct intel_guc_log *log);
void intel_guc_log_relay_close(struct intel_guc_log *log);
diff --git a/drivers/gpu/drm/i915/intel_guc_reg.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h
index a214f8b71929..1949346e714e 100644
--- a/drivers/gpu/drm/i915/intel_guc_reg.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h
@@ -1,29 +1,16 @@
+/* SPDX-License-Identifier: MIT */
/*
- * Copyright © 2014 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
+ * Copyright © 2014-2019 Intel Corporation
*/
+
#ifndef _INTEL_GUC_REG_H_
#define _INTEL_GUC_REG_H_
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+#include "i915_reg.h"
+
/* Definitions of GuC H/W registers, bits, etc */
#define GUC_STATUS _MMIO(0xc000)
@@ -37,6 +24,7 @@
#define GS_UKERNEL_MASK (0xFF << GS_UKERNEL_SHIFT)
#define GS_UKERNEL_LAPIC_DONE (0x30 << GS_UKERNEL_SHIFT)
#define GS_UKERNEL_DPC_ERROR (0x60 << GS_UKERNEL_SHIFT)
+#define GS_UKERNEL_EXCEPTION (0x70 << GS_UKERNEL_SHIFT)
#define GS_UKERNEL_READY (0xF0 << GS_UKERNEL_SHIFT)
#define GS_MIA_SHIFT 16
#define GS_MIA_MASK (0x07 << GS_MIA_SHIFT)
@@ -95,6 +83,9 @@
#define GEN8_GTCR _MMIO(0x4274)
#define GEN8_GTCR_INVALIDATE (1<<0)
+#define GEN12_GUC_TLB_INV_CR _MMIO(0xcee8)
+#define GEN12_GUC_TLB_INV_CR_INVALIDATE (1 << 0)
+
#define GUC_ARAT_C6DIS _MMIO(0xA178)
#define GUC_SHIM_CONTROL _MMIO(0xc064)
@@ -135,21 +126,21 @@ struct guc_doorbell_info {
#define GUC_PM_P24C_IER _MMIO(0xC55C)
/* GuC Interrupt Vector */
-#define GEN11_GUC_INTR_GUC2HOST (1 << 15)
-#define GEN11_GUC_INTR_EXEC_ERROR (1 << 14)
-#define GEN11_GUC_INTR_DISPLAY_EVENT (1 << 13)
-#define GEN11_GUC_INTR_SEM_SIG (1 << 12)
-#define GEN11_GUC_INTR_IOMMU2GUC (1 << 11)
-#define GEN11_GUC_INTR_DOORBELL_RANG (1 << 10)
-#define GEN11_GUC_INTR_DMA_DONE (1 << 9)
-#define GEN11_GUC_INTR_FATAL_ERROR (1 << 8)
-#define GEN11_GUC_INTR_NOTIF_ERROR (1 << 7)
-#define GEN11_GUC_INTR_SW_INT_6 (1 << 6)
-#define GEN11_GUC_INTR_SW_INT_5 (1 << 5)
-#define GEN11_GUC_INTR_SW_INT_4 (1 << 4)
-#define GEN11_GUC_INTR_SW_INT_3 (1 << 3)
-#define GEN11_GUC_INTR_SW_INT_2 (1 << 2)
-#define GEN11_GUC_INTR_SW_INT_1 (1 << 1)
-#define GEN11_GUC_INTR_SW_INT_0 (1 << 0)
+#define GUC_INTR_GUC2HOST BIT(15)
+#define GUC_INTR_EXEC_ERROR BIT(14)
+#define GUC_INTR_DISPLAY_EVENT BIT(13)
+#define GUC_INTR_SEM_SIG BIT(12)
+#define GUC_INTR_IOMMU2GUC BIT(11)
+#define GUC_INTR_DOORBELL_RANG BIT(10)
+#define GUC_INTR_DMA_DONE BIT(9)
+#define GUC_INTR_FATAL_ERROR BIT(8)
+#define GUC_INTR_NOTIF_ERROR BIT(7)
+#define GUC_INTR_SW_INT_6 BIT(6)
+#define GUC_INTR_SW_INT_5 BIT(5)
+#define GUC_INTR_SW_INT_4 BIT(4)
+#define GUC_INTR_SW_INT_3 BIT(3)
+#define GUC_INTR_SW_INT_2 BIT(2)
+#define GUC_INTR_SW_INT_1 BIT(1)
+#define GUC_INTR_SW_INT_0 BIT(0)
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
new file mode 100644
index 000000000000..9e42324fdecd
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -0,0 +1,682 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2014 Intel Corporation
+ */
+
+#include <linux/circ_buf.h>
+
+#include "gem/i915_gem_context.h"
+#include "gt/intel_context.h"
+#include "gt/intel_engine_pm.h"
+#include "gt/intel_gt.h"
+#include "gt/intel_gt_pm.h"
+#include "gt/intel_lrc_reg.h"
+#include "gt/intel_ring.h"
+
+#include "intel_guc_submission.h"
+
+#include "i915_drv.h"
+#include "i915_trace.h"
+
+/**
+ * DOC: GuC-based command submission
+ *
+ * IMPORTANT NOTE: GuC submission is currently not supported in i915. The GuC
+ * firmware is moving to an updated submission interface and we plan to
+ * turn submission back on when that lands. The below documentation (and related
+ * code) matches the old submission model and will be updated as part of the
+ * upgrade to the new flow.
+ *
+ * GuC stage descriptor:
+ * During initialization, the driver allocates a static pool of 1024 such
+ * descriptors, and shares them with the GuC. Currently, we only use one
+ * descriptor. This stage descriptor lets the GuC know about the workqueue and
+ * process descriptor. Theoretically, it also lets the GuC know about our HW
+ * contexts (context ID, etc...), but we actually employ a kind of submission
+ * where the GuC uses the LRCA sent via the work item instead. This is called
+ * a "proxy" submission.
+ *
+ * The Scratch registers:
+ * There are 16 MMIO-based registers start from 0xC180. The kernel driver writes
+ * a value to the action register (SOFT_SCRATCH_0) along with any data. It then
+ * triggers an interrupt on the GuC via another register write (0xC4C8).
+ * Firmware writes a success/fail code back to the action register after
+ * processes the request. The kernel driver polls waiting for this update and
+ * then proceeds.
+ *
+ * Work Items:
+ * There are several types of work items that the host may place into a
+ * workqueue, each with its own requirements and limitations. Currently only
+ * WQ_TYPE_INORDER is needed to support legacy submission via GuC, which
+ * represents in-order queue. The kernel driver packs ring tail pointer and an
+ * ELSP context descriptor dword into Work Item.
+ * See guc_add_request()
+ *
+ */
+
+static inline struct i915_priolist *to_priolist(struct rb_node *rb)
+{
+ return rb_entry(rb, struct i915_priolist, node);
+}
+
+static struct guc_stage_desc *__get_stage_desc(struct intel_guc *guc, u32 id)
+{
+ struct guc_stage_desc *base = guc->stage_desc_pool_vaddr;
+
+ return &base[id];
+}
+
+static int guc_workqueue_create(struct intel_guc *guc)
+{
+ return intel_guc_allocate_and_map_vma(guc, GUC_WQ_SIZE, &guc->workqueue,
+ &guc->workqueue_vaddr);
+}
+
+static void guc_workqueue_destroy(struct intel_guc *guc)
+{
+ i915_vma_unpin_and_release(&guc->workqueue, I915_VMA_RELEASE_MAP);
+}
+
+/*
+ * Initialise the process descriptor shared with the GuC firmware.
+ */
+static int guc_proc_desc_create(struct intel_guc *guc)
+{
+ const u32 size = PAGE_ALIGN(sizeof(struct guc_process_desc));
+
+ return intel_guc_allocate_and_map_vma(guc, size, &guc->proc_desc,
+ &guc->proc_desc_vaddr);
+}
+
+static void guc_proc_desc_destroy(struct intel_guc *guc)
+{
+ i915_vma_unpin_and_release(&guc->proc_desc, I915_VMA_RELEASE_MAP);
+}
+
+static void guc_proc_desc_init(struct intel_guc *guc)
+{
+ struct guc_process_desc *desc;
+
+ desc = memset(guc->proc_desc_vaddr, 0, sizeof(*desc));
+
+ /*
+ * XXX: pDoorbell and WQVBaseAddress are pointers in process address
+ * space for ring3 clients (set them as in mmap_ioctl) or kernel
+ * space for kernel clients (map on demand instead? May make debug
+ * easier to have it mapped).
+ */
+ desc->wq_base_addr = 0;
+ desc->db_base_addr = 0;
+
+ desc->wq_size_bytes = GUC_WQ_SIZE;
+ desc->wq_status = WQ_STATUS_ACTIVE;
+ desc->priority = GUC_CLIENT_PRIORITY_KMD_NORMAL;
+}
+
+static void guc_proc_desc_fini(struct intel_guc *guc)
+{
+ memset(guc->proc_desc_vaddr, 0, sizeof(struct guc_process_desc));
+}
+
+static int guc_stage_desc_pool_create(struct intel_guc *guc)
+{
+ u32 size = PAGE_ALIGN(sizeof(struct guc_stage_desc) *
+ GUC_MAX_STAGE_DESCRIPTORS);
+
+ return intel_guc_allocate_and_map_vma(guc, size, &guc->stage_desc_pool,
+ &guc->stage_desc_pool_vaddr);
+}
+
+static void guc_stage_desc_pool_destroy(struct intel_guc *guc)
+{
+ i915_vma_unpin_and_release(&guc->stage_desc_pool, I915_VMA_RELEASE_MAP);
+}
+
+/*
+ * Initialise/clear the stage descriptor shared with the GuC firmware.
+ *
+ * This descriptor tells the GuC where (in GGTT space) to find the important
+ * data structures related to work submission (process descriptor, write queue,
+ * etc).
+ */
+static void guc_stage_desc_init(struct intel_guc *guc)
+{
+ struct guc_stage_desc *desc;
+
+ /* we only use 1 stage desc, so hardcode it to 0 */
+ desc = __get_stage_desc(guc, 0);
+ memset(desc, 0, sizeof(*desc));
+
+ desc->attribute = GUC_STAGE_DESC_ATTR_ACTIVE |
+ GUC_STAGE_DESC_ATTR_KERNEL;
+
+ desc->stage_id = 0;
+ desc->priority = GUC_CLIENT_PRIORITY_KMD_NORMAL;
+
+ desc->process_desc = intel_guc_ggtt_offset(guc, guc->proc_desc);
+ desc->wq_addr = intel_guc_ggtt_offset(guc, guc->workqueue);
+ desc->wq_size = GUC_WQ_SIZE;
+}
+
+static void guc_stage_desc_fini(struct intel_guc *guc)
+{
+ struct guc_stage_desc *desc;
+
+ desc = __get_stage_desc(guc, 0);
+ memset(desc, 0, sizeof(*desc));
+}
+
+/* Construct a Work Item and append it to the GuC's Work Queue */
+static void guc_wq_item_append(struct intel_guc *guc,
+ u32 target_engine, u32 context_desc,
+ u32 ring_tail, u32 fence_id)
+{
+ /* wqi_len is in DWords, and does not include the one-word header */
+ const size_t wqi_size = sizeof(struct guc_wq_item);
+ const u32 wqi_len = wqi_size / sizeof(u32) - 1;
+ struct guc_process_desc *desc = guc->proc_desc_vaddr;
+ struct guc_wq_item *wqi;
+ u32 wq_off;
+
+ lockdep_assert_held(&guc->wq_lock);
+
+ /* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
+ * should not have the case where structure wqi is across page, neither
+ * wrapped to the beginning. This simplifies the implementation below.
+ *
+ * XXX: if not the case, we need save data to a temp wqi and copy it to
+ * workqueue buffer dw by dw.
+ */
+ BUILD_BUG_ON(wqi_size != 16);
+
+ /* We expect the WQ to be active if we're appending items to it */
+ GEM_BUG_ON(desc->wq_status != WQ_STATUS_ACTIVE);
+
+ /* Free space is guaranteed. */
+ wq_off = READ_ONCE(desc->tail);
+ GEM_BUG_ON(CIRC_SPACE(wq_off, READ_ONCE(desc->head),
+ GUC_WQ_SIZE) < wqi_size);
+ GEM_BUG_ON(wq_off & (wqi_size - 1));
+
+ wqi = guc->workqueue_vaddr + wq_off;
+
+ /* Now fill in the 4-word work queue item */
+ wqi->header = WQ_TYPE_INORDER |
+ (wqi_len << WQ_LEN_SHIFT) |
+ (target_engine << WQ_TARGET_SHIFT) |
+ WQ_NO_WCFLUSH_WAIT;
+ wqi->context_desc = context_desc;
+ wqi->submit_element_info = ring_tail << WQ_RING_TAIL_SHIFT;
+ GEM_BUG_ON(ring_tail > WQ_RING_TAIL_MAX);
+ wqi->fence_id = fence_id;
+
+ /* Make the update visible to GuC */
+ WRITE_ONCE(desc->tail, (wq_off + wqi_size) & (GUC_WQ_SIZE - 1));
+}
+
+static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
+{
+ struct intel_engine_cs *engine = rq->engine;
+ u32 ctx_desc = lower_32_bits(rq->context->lrc_desc);
+ u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64);
+
+ guc_wq_item_append(guc, engine->guc_id, ctx_desc,
+ ring_tail, rq->fence.seqno);
+}
+
+/*
+ * When we're doing submissions using regular execlists backend, writing to
+ * ELSP from CPU side is enough to make sure that writes to ringbuffer pages
+ * pinned in mappable aperture portion of GGTT are visible to command streamer.
+ * Writes done by GuC on our behalf are not guaranteeing such ordering,
+ * therefore, to ensure the flush, we're issuing a POSTING READ.
+ */
+static void flush_ggtt_writes(struct i915_vma *vma)
+{
+ if (i915_vma_is_map_and_fenceable(vma))
+ intel_uncore_posting_read_fw(vma->vm->gt->uncore,
+ GUC_STATUS);
+}
+
+static void guc_submit(struct intel_engine_cs *engine,
+ struct i915_request **out,
+ struct i915_request **end)
+{
+ struct intel_guc *guc = &engine->gt->uc.guc;
+
+ spin_lock(&guc->wq_lock);
+
+ do {
+ struct i915_request *rq = *out++;
+
+ flush_ggtt_writes(rq->ring->vma);
+ guc_add_request(guc, rq);
+ } while (out != end);
+
+ spin_unlock(&guc->wq_lock);
+}
+
+static inline int rq_prio(const struct i915_request *rq)
+{
+ return rq->sched.attr.priority | __NO_PREEMPTION;
+}
+
+static struct i915_request *schedule_in(struct i915_request *rq, int idx)
+{
+ trace_i915_request_in(rq, idx);
+
+ /*
+ * Currently we are not tracking the rq->context being inflight
+ * (ce->inflight = rq->engine). It is only used by the execlists
+ * backend at the moment, a similar counting strategy would be
+ * required if we generalise the inflight tracking.
+ */
+
+ __intel_gt_pm_get(rq->engine->gt);
+ return i915_request_get(rq);
+}
+
+static void schedule_out(struct i915_request *rq)
+{
+ trace_i915_request_out(rq);
+
+ intel_gt_pm_put_async(rq->engine->gt);
+ i915_request_put(rq);
+}
+
+static void __guc_dequeue(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct i915_request **first = execlists->inflight;
+ struct i915_request ** const last_port = first + execlists->port_mask;
+ struct i915_request *last = first[0];
+ struct i915_request **port;
+ bool submit = false;
+ struct rb_node *rb;
+
+ lockdep_assert_held(&engine->active.lock);
+
+ if (last) {
+ if (*++first)
+ return;
+
+ last = NULL;
+ }
+
+ /*
+ * We write directly into the execlists->inflight queue and don't use
+ * the execlists->pending queue, as we don't have a distinct switch
+ * event.
+ */
+ port = first;
+ while ((rb = rb_first_cached(&execlists->queue))) {
+ struct i915_priolist *p = to_priolist(rb);
+ struct i915_request *rq, *rn;
+ int i;
+
+ priolist_for_each_request_consume(rq, rn, p, i) {
+ if (last && rq->context != last->context) {
+ if (port == last_port)
+ goto done;
+
+ *port = schedule_in(last,
+ port - execlists->inflight);
+ port++;
+ }
+
+ list_del_init(&rq->sched.link);
+ __i915_request_submit(rq);
+ submit = true;
+ last = rq;
+ }
+
+ rb_erase_cached(&p->node, &execlists->queue);
+ i915_priolist_free(p);
+ }
+done:
+ execlists->queue_priority_hint =
+ rb ? to_priolist(rb)->priority : INT_MIN;
+ if (submit) {
+ *port = schedule_in(last, port - execlists->inflight);
+ *++port = NULL;
+ guc_submit(engine, first, port);
+ }
+ execlists->active = execlists->inflight;
+}
+
+static void guc_submission_tasklet(unsigned long data)
+{
+ struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct i915_request **port, *rq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&engine->active.lock, flags);
+
+ for (port = execlists->inflight; (rq = *port); port++) {
+ if (!i915_request_completed(rq))
+ break;
+
+ schedule_out(rq);
+ }
+ if (port != execlists->inflight) {
+ int idx = port - execlists->inflight;
+ int rem = ARRAY_SIZE(execlists->inflight) - idx;
+ memmove(execlists->inflight, port, rem * sizeof(*port));
+ }
+
+ __guc_dequeue(engine);
+
+ spin_unlock_irqrestore(&engine->active.lock, flags);
+}
+
+static void guc_reset_prepare(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+
+ ENGINE_TRACE(engine, "\n");
+
+ /*
+ * Prevent request submission to the hardware until we have
+ * completed the reset in i915_gem_reset_finish(). If a request
+ * is completed by one engine, it may then queue a request
+ * to a second via its execlists->tasklet *just* as we are
+ * calling engine->init_hw() and also writing the ELSP.
+ * Turning off the execlists->tasklet until the reset is over
+ * prevents the race.
+ */
+ __tasklet_disable_sync_once(&execlists->tasklet);
+}
+
+static void
+cancel_port_requests(struct intel_engine_execlists * const execlists)
+{
+ struct i915_request * const *port, *rq;
+
+ /* Note we are only using the inflight and not the pending queue */
+
+ for (port = execlists->active; (rq = *port); port++)
+ schedule_out(rq);
+ execlists->active =
+ memset(execlists->inflight, 0, sizeof(execlists->inflight));
+}
+
+static void guc_reset_rewind(struct intel_engine_cs *engine, bool stalled)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct i915_request *rq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&engine->active.lock, flags);
+
+ cancel_port_requests(execlists);
+
+ /* Push back any incomplete requests for replay after the reset. */
+ rq = execlists_unwind_incomplete_requests(execlists);
+ if (!rq)
+ goto out_unlock;
+
+ if (!i915_request_started(rq))
+ stalled = false;
+
+ __i915_request_reset(rq, stalled);
+ intel_lr_context_reset(engine, rq->context, rq->head, stalled);
+
+out_unlock:
+ spin_unlock_irqrestore(&engine->active.lock, flags);
+}
+
+static void guc_reset_cancel(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct i915_request *rq, *rn;
+ struct rb_node *rb;
+ unsigned long flags;
+
+ ENGINE_TRACE(engine, "\n");
+
+ /*
+ * Before we call engine->cancel_requests(), we should have exclusive
+ * access to the submission state. This is arranged for us by the
+ * caller disabling the interrupt generation, the tasklet and other
+ * threads that may then access the same state, giving us a free hand
+ * to reset state. However, we still need to let lockdep be aware that
+ * we know this state may be accessed in hardirq context, so we
+ * disable the irq around this manipulation and we want to keep
+ * the spinlock focused on its duties and not accidentally conflate
+ * coverage to the submission's irq state. (Similarly, although we
+ * shouldn't need to disable irq around the manipulation of the
+ * submission's irq state, we also wish to remind ourselves that
+ * it is irq state.)
+ */
+ spin_lock_irqsave(&engine->active.lock, flags);
+
+ /* Cancel the requests on the HW and clear the ELSP tracker. */
+ cancel_port_requests(execlists);
+
+ /* Mark all executing requests as skipped. */
+ list_for_each_entry(rq, &engine->active.requests, sched.link) {
+ if (!i915_request_signaled(rq))
+ dma_fence_set_error(&rq->fence, -EIO);
+
+ i915_request_mark_complete(rq);
+ }
+
+ /* Flush the queued requests to the timeline list (for retiring). */
+ while ((rb = rb_first_cached(&execlists->queue))) {
+ struct i915_priolist *p = to_priolist(rb);
+ int i;
+
+ priolist_for_each_request_consume(rq, rn, p, i) {
+ list_del_init(&rq->sched.link);
+ __i915_request_submit(rq);
+ dma_fence_set_error(&rq->fence, -EIO);
+ i915_request_mark_complete(rq);
+ }
+
+ rb_erase_cached(&p->node, &execlists->queue);
+ i915_priolist_free(p);
+ }
+
+ /* Remaining _unready_ requests will be nop'ed when submitted */
+
+ execlists->queue_priority_hint = INT_MIN;
+ execlists->queue = RB_ROOT_CACHED;
+
+ spin_unlock_irqrestore(&engine->active.lock, flags);
+}
+
+static void guc_reset_finish(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+
+ if (__tasklet_enable(&execlists->tasklet))
+ /* And kick in case we missed a new request submission. */
+ tasklet_hi_schedule(&execlists->tasklet);
+
+ ENGINE_TRACE(engine, "depth->%d\n",
+ atomic_read(&execlists->tasklet.count));
+}
+
+/*
+ * Everything below here is concerned with setup & teardown, and is
+ * therefore not part of the somewhat time-critical batch-submission
+ * path of guc_submit() above.
+ */
+
+/*
+ * Set up the memory resources to be shared with the GuC (via the GGTT)
+ * at firmware loading time.
+ */
+int intel_guc_submission_init(struct intel_guc *guc)
+{
+ int ret;
+
+ if (guc->stage_desc_pool)
+ return 0;
+
+ ret = guc_stage_desc_pool_create(guc);
+ if (ret)
+ return ret;
+ /*
+ * Keep static analysers happy, let them know that we allocated the
+ * vma after testing that it didn't exist earlier.
+ */
+ GEM_BUG_ON(!guc->stage_desc_pool);
+
+ ret = guc_workqueue_create(guc);
+ if (ret)
+ goto err_pool;
+
+ ret = guc_proc_desc_create(guc);
+ if (ret)
+ goto err_workqueue;
+
+ spin_lock_init(&guc->wq_lock);
+
+ return 0;
+
+err_workqueue:
+ guc_workqueue_destroy(guc);
+err_pool:
+ guc_stage_desc_pool_destroy(guc);
+ return ret;
+}
+
+void intel_guc_submission_fini(struct intel_guc *guc)
+{
+ if (guc->stage_desc_pool) {
+ guc_proc_desc_destroy(guc);
+ guc_workqueue_destroy(guc);
+ guc_stage_desc_pool_destroy(guc);
+ }
+}
+
+static void guc_interrupts_capture(struct intel_gt *gt)
+{
+ struct intel_uncore *uncore = gt->uncore;
+ u32 irqs = GT_CONTEXT_SWITCH_INTERRUPT;
+ u32 dmask = irqs << 16 | irqs;
+
+ GEM_BUG_ON(INTEL_GEN(gt->i915) < 11);
+
+ /* Don't handle the ctx switch interrupt in GuC submission mode */
+ intel_uncore_rmw(uncore, GEN11_RENDER_COPY_INTR_ENABLE, dmask, 0);
+ intel_uncore_rmw(uncore, GEN11_VCS_VECS_INTR_ENABLE, dmask, 0);
+}
+
+static void guc_interrupts_release(struct intel_gt *gt)
+{
+ struct intel_uncore *uncore = gt->uncore;
+ u32 irqs = GT_CONTEXT_SWITCH_INTERRUPT;
+ u32 dmask = irqs << 16 | irqs;
+
+ GEM_BUG_ON(INTEL_GEN(gt->i915) < 11);
+
+ /* Handle ctx switch interrupts again */
+ intel_uncore_rmw(uncore, GEN11_RENDER_COPY_INTR_ENABLE, 0, dmask);
+ intel_uncore_rmw(uncore, GEN11_VCS_VECS_INTR_ENABLE, 0, dmask);
+}
+
+static void guc_set_default_submission(struct intel_engine_cs *engine)
+{
+ /*
+ * We inherit a bunch of functions from execlists that we'd like
+ * to keep using:
+ *
+ * engine->submit_request = execlists_submit_request;
+ * engine->cancel_requests = execlists_cancel_requests;
+ * engine->schedule = execlists_schedule;
+ *
+ * But we need to override the actual submission backend in order
+ * to talk to the GuC.
+ */
+ intel_execlists_set_default_submission(engine);
+
+ engine->execlists.tasklet.func = guc_submission_tasklet;
+
+ /* do not use execlists park/unpark */
+ engine->park = engine->unpark = NULL;
+
+ engine->reset.prepare = guc_reset_prepare;
+ engine->reset.rewind = guc_reset_rewind;
+ engine->reset.cancel = guc_reset_cancel;
+ engine->reset.finish = guc_reset_finish;
+
+ engine->flags &= ~I915_ENGINE_SUPPORTS_STATS;
+ engine->flags |= I915_ENGINE_NEEDS_BREADCRUMB_TASKLET;
+
+ /*
+ * For the breadcrumb irq to work we need the interrupts to stay
+ * enabled. However, on all platforms on which we'll have support for
+ * GuC submission we don't allow disabling the interrupts at runtime, so
+ * we're always safe with the current flow.
+ */
+ GEM_BUG_ON(engine->irq_enable || engine->irq_disable);
+}
+
+void intel_guc_submission_enable(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /*
+ * We're using GuC work items for submitting work through GuC. Since
+ * we're coalescing multiple requests from a single context into a
+ * single work item prior to assigning it to execlist_port, we can
+ * never have more work items than the total number of ports (for all
+ * engines). The GuC firmware is controlling the HEAD of work queue,
+ * and it is guaranteed that it will remove the work item from the
+ * queue before our request is completed.
+ */
+ BUILD_BUG_ON(ARRAY_SIZE(engine->execlists.inflight) *
+ sizeof(struct guc_wq_item) *
+ I915_NUM_ENGINES > GUC_WQ_SIZE);
+
+ guc_proc_desc_init(guc);
+ guc_stage_desc_init(guc);
+
+ /* Take over from manual control of ELSP (execlists) */
+ guc_interrupts_capture(gt);
+
+ for_each_engine(engine, gt, id) {
+ engine->set_default_submission = guc_set_default_submission;
+ engine->set_default_submission(engine);
+ }
+}
+
+void intel_guc_submission_disable(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+
+ GEM_BUG_ON(gt->awake); /* GT should be parked first */
+
+ /* Note: By the time we're here, GuC may have already been reset */
+
+ guc_interrupts_release(gt);
+
+ guc_stage_desc_fini(guc);
+ guc_proc_desc_fini(guc);
+}
+
+static bool __guc_submission_support(struct intel_guc *guc)
+{
+ /* XXX: GuC submission is unavailable for now */
+ return false;
+
+ if (!intel_guc_is_supported(guc))
+ return false;
+
+ return i915_modparams.enable_guc & ENABLE_GUC_SUBMISSION;
+}
+
+void intel_guc_submission_init_early(struct intel_guc *guc)
+{
+ guc->submission_supported = __guc_submission_support(guc);
+}
+
+bool intel_engine_in_guc_submission_mode(const struct intel_engine_cs *engine)
+{
+ return engine->set_default_submission == guc_set_default_submission;
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
new file mode 100644
index 000000000000..e402a2932592
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2014-2019 Intel Corporation
+ */
+
+#ifndef _INTEL_GUC_SUBMISSION_H_
+#define _INTEL_GUC_SUBMISSION_H_
+
+#include <linux/types.h>
+
+struct intel_guc;
+struct intel_engine_cs;
+
+void intel_guc_submission_init_early(struct intel_guc *guc);
+int intel_guc_submission_init(struct intel_guc *guc);
+void intel_guc_submission_enable(struct intel_guc *guc);
+void intel_guc_submission_disable(struct intel_guc *guc);
+void intel_guc_submission_fini(struct intel_guc *guc);
+int intel_guc_preempt_work_create(struct intel_guc *guc);
+void intel_guc_preempt_work_destroy(struct intel_guc *guc);
+bool intel_engine_in_guc_submission_mode(const struct intel_engine_cs *engine);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
new file mode 100644
index 000000000000..32a069841c14
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
@@ -0,0 +1,219 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2016-2019 Intel Corporation
+ */
+
+#include <linux/types.h>
+
+#include "gt/intel_gt.h"
+#include "intel_huc.h"
+#include "i915_drv.h"
+
+/**
+ * DOC: HuC
+ *
+ * The HuC is a dedicated microcontroller for usage in media HEVC (High
+ * Efficiency Video Coding) operations. Userspace can directly use the firmware
+ * capabilities by adding HuC specific commands to batch buffers.
+ *
+ * The kernel driver is only responsible for loading the HuC firmware and
+ * triggering its security authentication, which is performed by the GuC. For
+ * The GuC to correctly perform the authentication, the HuC binary must be
+ * loaded before the GuC one. Loading the HuC is optional; however, not using
+ * the HuC might negatively impact power usage and/or performance of media
+ * workloads, depending on the use-cases.
+ *
+ * See https://github.com/intel/media-driver for the latest details on HuC
+ * functionality.
+ */
+
+/**
+ * DOC: HuC Memory Management
+ *
+ * Similarly to the GuC, the HuC can't do any memory allocations on its own,
+ * with the difference being that the allocations for HuC usage are handled by
+ * the userspace driver instead of the kernel one. The HuC accesses the memory
+ * via the PPGTT belonging to the context loaded on the VCS executing the
+ * HuC-specific commands.
+ */
+
+void intel_huc_init_early(struct intel_huc *huc)
+{
+ struct drm_i915_private *i915 = huc_to_gt(huc)->i915;
+
+ intel_huc_fw_init_early(huc);
+
+ if (INTEL_GEN(i915) >= 11) {
+ huc->status.reg = GEN11_HUC_KERNEL_LOAD_INFO;
+ huc->status.mask = HUC_LOAD_SUCCESSFUL;
+ huc->status.value = HUC_LOAD_SUCCESSFUL;
+ } else {
+ huc->status.reg = HUC_STATUS2;
+ huc->status.mask = HUC_FW_VERIFIED;
+ huc->status.value = HUC_FW_VERIFIED;
+ }
+}
+
+static int intel_huc_rsa_data_create(struct intel_huc *huc)
+{
+ struct intel_gt *gt = huc_to_gt(huc);
+ struct intel_guc *guc = &gt->uc.guc;
+ struct i915_vma *vma;
+ size_t copied;
+ void *vaddr;
+ int err;
+
+ err = i915_inject_probe_error(gt->i915, -ENXIO);
+ if (err)
+ return err;
+
+ /*
+ * HuC firmware will sit above GUC_GGTT_TOP and will not map
+ * through GTT. Unfortunately, this means GuC cannot perform
+ * the HuC auth. as the rsa offset now falls within the GuC
+ * inaccessible range. We resort to perma-pinning an additional
+ * vma within the accessible range that only contains the rsa
+ * signature. The GuC can use this extra pinning to perform
+ * the authentication since its GGTT offset will be GuC
+ * accessible.
+ */
+ GEM_BUG_ON(huc->fw.rsa_size > PAGE_SIZE);
+ vma = intel_guc_allocate_vma(guc, PAGE_SIZE);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
+ if (IS_ERR(vaddr)) {
+ i915_vma_unpin_and_release(&vma, 0);
+ return PTR_ERR(vaddr);
+ }
+
+ copied = intel_uc_fw_copy_rsa(&huc->fw, vaddr, vma->size);
+ GEM_BUG_ON(copied < huc->fw.rsa_size);
+
+ i915_gem_object_unpin_map(vma->obj);
+
+ huc->rsa_data = vma;
+
+ return 0;
+}
+
+static void intel_huc_rsa_data_destroy(struct intel_huc *huc)
+{
+ i915_vma_unpin_and_release(&huc->rsa_data, 0);
+}
+
+int intel_huc_init(struct intel_huc *huc)
+{
+ struct drm_i915_private *i915 = huc_to_gt(huc)->i915;
+ int err;
+
+ err = intel_uc_fw_init(&huc->fw);
+ if (err)
+ goto out;
+
+ /*
+ * HuC firmware image is outside GuC accessible range.
+ * Copy the RSA signature out of the image into
+ * a perma-pinned region set aside for it
+ */
+ err = intel_huc_rsa_data_create(huc);
+ if (err)
+ goto out_fini;
+
+ return 0;
+
+out_fini:
+ intel_uc_fw_fini(&huc->fw);
+out:
+ intel_uc_fw_cleanup_fetch(&huc->fw);
+ DRM_DEV_DEBUG_DRIVER(i915->drm.dev, "failed with %d\n", err);
+ return err;
+}
+
+void intel_huc_fini(struct intel_huc *huc)
+{
+ if (!intel_uc_fw_is_available(&huc->fw))
+ return;
+
+ intel_huc_rsa_data_destroy(huc);
+ intel_uc_fw_fini(&huc->fw);
+}
+
+/**
+ * intel_huc_auth() - Authenticate HuC uCode
+ * @huc: intel_huc structure
+ *
+ * Called after HuC and GuC firmware loading during intel_uc_init_hw().
+ *
+ * This function invokes the GuC action to authenticate the HuC firmware,
+ * passing the offset of the RSA signature to intel_guc_auth_huc(). It then
+ * waits for up to 50ms for firmware verification ACK.
+ */
+int intel_huc_auth(struct intel_huc *huc)
+{
+ struct intel_gt *gt = huc_to_gt(huc);
+ struct intel_guc *guc = &gt->uc.guc;
+ int ret;
+
+ GEM_BUG_ON(intel_huc_is_authenticated(huc));
+
+ if (!intel_uc_fw_is_loaded(&huc->fw))
+ return -ENOEXEC;
+
+ ret = i915_inject_probe_error(gt->i915, -ENXIO);
+ if (ret)
+ goto fail;
+
+ ret = intel_guc_auth_huc(guc,
+ intel_guc_ggtt_offset(guc, huc->rsa_data));
+ if (ret) {
+ DRM_ERROR("HuC: GuC did not ack Auth request %d\n", ret);
+ goto fail;
+ }
+
+ /* Check authentication status, it should be done by now */
+ ret = __intel_wait_for_register(gt->uncore,
+ huc->status.reg,
+ huc->status.mask,
+ huc->status.value,
+ 2, 50, NULL);
+ if (ret) {
+ DRM_ERROR("HuC: Firmware not verified %d\n", ret);
+ goto fail;
+ }
+
+ intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_RUNNING);
+ return 0;
+
+fail:
+ i915_probe_error(gt->i915, "HuC: Authentication failed %d\n", ret);
+ intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_FAIL);
+ return ret;
+}
+
+/**
+ * intel_huc_check_status() - check HuC status
+ * @huc: intel_huc structure
+ *
+ * This function reads status register to verify if HuC
+ * firmware was successfully loaded.
+ *
+ * Returns: 1 if HuC firmware is loaded and verified,
+ * 0 if HuC firmware is not loaded and -ENODEV if HuC
+ * is not present on this platform.
+ */
+int intel_huc_check_status(struct intel_huc *huc)
+{
+ struct intel_gt *gt = huc_to_gt(huc);
+ intel_wakeref_t wakeref;
+ u32 status = 0;
+
+ if (!intel_huc_is_supported(huc))
+ return -ENODEV;
+
+ with_intel_runtime_pm(gt->uncore->rpm, wakeref)
+ status = intel_uncore_read(gt->uncore, huc->status.reg);
+
+ return (status & huc->status.mask) == huc->status.value;
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.h b/drivers/gpu/drm/i915/gt/uc/intel_huc.h
new file mode 100644
index 000000000000..644c059fe01d
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2014-2019 Intel Corporation
+ */
+
+#ifndef _INTEL_HUC_H_
+#define _INTEL_HUC_H_
+
+#include "i915_reg.h"
+#include "intel_uc_fw.h"
+#include "intel_huc_fw.h"
+
+struct intel_huc {
+ /* Generic uC firmware management */
+ struct intel_uc_fw fw;
+
+ /* HuC-specific additions */
+ struct i915_vma *rsa_data;
+
+ struct {
+ i915_reg_t reg;
+ u32 mask;
+ u32 value;
+ } status;
+};
+
+void intel_huc_init_early(struct intel_huc *huc);
+int intel_huc_init(struct intel_huc *huc);
+void intel_huc_fini(struct intel_huc *huc);
+int intel_huc_auth(struct intel_huc *huc);
+int intel_huc_check_status(struct intel_huc *huc);
+
+static inline int intel_huc_sanitize(struct intel_huc *huc)
+{
+ intel_uc_fw_sanitize(&huc->fw);
+ return 0;
+}
+
+static inline bool intel_huc_is_supported(struct intel_huc *huc)
+{
+ return intel_uc_fw_is_supported(&huc->fw);
+}
+
+static inline bool intel_huc_is_enabled(struct intel_huc *huc)
+{
+ return intel_uc_fw_is_enabled(&huc->fw);
+}
+
+static inline bool intel_huc_is_authenticated(struct intel_huc *huc)
+{
+ return intel_uc_fw_is_running(&huc->fw);
+}
+
+#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
new file mode 100644
index 000000000000..eee193bf2cc4
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2014-2019 Intel Corporation
+ */
+
+#include "gt/intel_gt.h"
+#include "intel_huc_fw.h"
+#include "i915_drv.h"
+
+/**
+ * intel_huc_fw_init_early() - initializes HuC firmware struct
+ * @huc: intel_huc struct
+ *
+ * On platforms with HuC selects firmware for uploading
+ */
+void intel_huc_fw_init_early(struct intel_huc *huc)
+{
+ struct intel_gt *gt = huc_to_gt(huc);
+ struct intel_uc *uc = &gt->uc;
+ struct drm_i915_private *i915 = gt->i915;
+
+ intel_uc_fw_init_early(&huc->fw, INTEL_UC_FW_TYPE_HUC,
+ intel_uc_uses_guc(uc),
+ INTEL_INFO(i915)->platform, INTEL_REVID(i915));
+}
+
+/**
+ * intel_huc_fw_upload() - load HuC uCode to device
+ * @huc: intel_huc structure
+ *
+ * Called from intel_uc_init_hw() during driver load, resume from sleep and
+ * after a GPU reset. Note that HuC must be loaded before GuC.
+ *
+ * The firmware image should have already been fetched into memory, so only
+ * check that fetch succeeded, and then transfer the image to the h/w.
+ *
+ * Return: non-zero code on error
+ */
+int intel_huc_fw_upload(struct intel_huc *huc)
+{
+ /* HW doesn't look at destination address for HuC, so set it to 0 */
+ return intel_uc_fw_upload(&huc->fw, 0, HUC_UKERNEL);
+}
diff --git a/drivers/gpu/drm/i915/intel_huc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.h
index 8a00a0ebddc5..b791269ce923 100644
--- a/drivers/gpu/drm/i915/intel_huc_fw.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.h
@@ -1,7 +1,6 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2014-2018 Intel Corporation
+ * Copyright © 2014-2019 Intel Corporation
*/
#ifndef _INTEL_HUC_FW_H_
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
new file mode 100644
index 000000000000..64934a876a50
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -0,0 +1,620 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2016-2019 Intel Corporation
+ */
+
+#include "gt/intel_gt.h"
+#include "gt/intel_reset.h"
+#include "intel_guc.h"
+#include "intel_guc_ads.h"
+#include "intel_guc_submission.h"
+#include "intel_uc.h"
+
+#include "i915_drv.h"
+
+static const struct intel_uc_ops uc_ops_off;
+static const struct intel_uc_ops uc_ops_on;
+
+/* Reset GuC providing us with fresh state for both GuC and HuC.
+ */
+static int __intel_uc_reset_hw(struct intel_uc *uc)
+{
+ struct intel_gt *gt = uc_to_gt(uc);
+ int ret;
+ u32 guc_status;
+
+ ret = i915_inject_probe_error(gt->i915, -ENXIO);
+ if (ret)
+ return ret;
+
+ ret = intel_reset_guc(gt);
+ if (ret) {
+ DRM_ERROR("Failed to reset GuC, ret = %d\n", ret);
+ return ret;
+ }
+
+ guc_status = intel_uncore_read(gt->uncore, GUC_STATUS);
+ WARN(!(guc_status & GS_MIA_IN_RESET),
+ "GuC status: 0x%x, MIA core expected to be in reset\n",
+ guc_status);
+
+ return ret;
+}
+
+static void __confirm_options(struct intel_uc *uc)
+{
+ struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
+
+ DRM_DEV_DEBUG_DRIVER(i915->drm.dev,
+ "enable_guc=%d (guc:%s submission:%s huc:%s)\n",
+ i915_modparams.enable_guc,
+ yesno(intel_uc_uses_guc(uc)),
+ yesno(intel_uc_uses_guc_submission(uc)),
+ yesno(intel_uc_uses_huc(uc)));
+
+ if (i915_modparams.enable_guc == -1)
+ return;
+
+ if (i915_modparams.enable_guc == 0) {
+ GEM_BUG_ON(intel_uc_uses_guc(uc));
+ GEM_BUG_ON(intel_uc_uses_guc_submission(uc));
+ GEM_BUG_ON(intel_uc_uses_huc(uc));
+ return;
+ }
+
+ if (!intel_uc_supports_guc(uc))
+ dev_info(i915->drm.dev,
+ "Incompatible option enable_guc=%d - %s\n",
+ i915_modparams.enable_guc, "GuC is not supported!");
+
+ if (i915_modparams.enable_guc & ENABLE_GUC_LOAD_HUC &&
+ !intel_uc_supports_huc(uc))
+ dev_info(i915->drm.dev,
+ "Incompatible option enable_guc=%d - %s\n",
+ i915_modparams.enable_guc, "HuC is not supported!");
+
+ if (i915_modparams.enable_guc & ENABLE_GUC_SUBMISSION &&
+ !intel_uc_supports_guc_submission(uc))
+ dev_info(i915->drm.dev,
+ "Incompatible option enable_guc=%d - %s\n",
+ i915_modparams.enable_guc, "GuC submission is N/A");
+
+ if (i915_modparams.enable_guc & ~(ENABLE_GUC_SUBMISSION |
+ ENABLE_GUC_LOAD_HUC))
+ dev_info(i915->drm.dev,
+ "Incompatible option enable_guc=%d - %s\n",
+ i915_modparams.enable_guc, "undocumented flag");
+}
+
+void intel_uc_init_early(struct intel_uc *uc)
+{
+ intel_guc_init_early(&uc->guc);
+ intel_huc_init_early(&uc->huc);
+
+ __confirm_options(uc);
+
+ if (intel_uc_uses_guc(uc))
+ uc->ops = &uc_ops_on;
+ else
+ uc->ops = &uc_ops_off;
+}
+
+void intel_uc_driver_late_release(struct intel_uc *uc)
+{
+}
+
+/**
+ * intel_uc_init_mmio - setup uC MMIO access
+ * @uc: the intel_uc structure
+ *
+ * Setup minimal state necessary for MMIO accesses later in the
+ * initialization sequence.
+ */
+void intel_uc_init_mmio(struct intel_uc *uc)
+{
+ intel_guc_init_send_regs(&uc->guc);
+}
+
+static void __uc_capture_load_err_log(struct intel_uc *uc)
+{
+ struct intel_guc *guc = &uc->guc;
+
+ if (guc->log.vma && !uc->load_err_log)
+ uc->load_err_log = i915_gem_object_get(guc->log.vma->obj);
+}
+
+static void __uc_free_load_err_log(struct intel_uc *uc)
+{
+ struct drm_i915_gem_object *log = fetch_and_zero(&uc->load_err_log);
+
+ if (log)
+ i915_gem_object_put(log);
+}
+
+static inline bool guc_communication_enabled(struct intel_guc *guc)
+{
+ return intel_guc_ct_enabled(&guc->ct);
+}
+
+/*
+ * Events triggered while CT buffers are disabled are logged in the SCRATCH_15
+ * register using the same bits used in the CT message payload. Since our
+ * communication channel with guc is turned off at this point, we can save the
+ * message and handle it after we turn it back on.
+ */
+static void guc_clear_mmio_msg(struct intel_guc *guc)
+{
+ intel_uncore_write(guc_to_gt(guc)->uncore, SOFT_SCRATCH(15), 0);
+}
+
+static void guc_get_mmio_msg(struct intel_guc *guc)
+{
+ u32 val;
+
+ spin_lock_irq(&guc->irq_lock);
+
+ val = intel_uncore_read(guc_to_gt(guc)->uncore, SOFT_SCRATCH(15));
+ guc->mmio_msg |= val & guc->msg_enabled_mask;
+
+ /*
+ * clear all events, including the ones we're not currently servicing,
+ * to make sure we don't try to process a stale message if we enable
+ * handling of more events later.
+ */
+ guc_clear_mmio_msg(guc);
+
+ spin_unlock_irq(&guc->irq_lock);
+}
+
+static void guc_handle_mmio_msg(struct intel_guc *guc)
+{
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+
+ /* we need communication to be enabled to reply to GuC */
+ GEM_BUG_ON(!guc_communication_enabled(guc));
+
+ if (!guc->mmio_msg)
+ return;
+
+ spin_lock_irq(&i915->irq_lock);
+ intel_guc_to_host_process_recv_msg(guc, &guc->mmio_msg, 1);
+ spin_unlock_irq(&i915->irq_lock);
+
+ guc->mmio_msg = 0;
+}
+
+static void guc_reset_interrupts(struct intel_guc *guc)
+{
+ guc->interrupts.reset(guc);
+}
+
+static void guc_enable_interrupts(struct intel_guc *guc)
+{
+ guc->interrupts.enable(guc);
+}
+
+static void guc_disable_interrupts(struct intel_guc *guc)
+{
+ guc->interrupts.disable(guc);
+}
+
+static int guc_enable_communication(struct intel_guc *guc)
+{
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ int ret;
+
+ GEM_BUG_ON(guc_communication_enabled(guc));
+
+ ret = i915_inject_probe_error(i915, -ENXIO);
+ if (ret)
+ return ret;
+
+ ret = intel_guc_ct_enable(&guc->ct);
+ if (ret)
+ return ret;
+
+ /* check for mmio messages received before/during the CT enable */
+ guc_get_mmio_msg(guc);
+ guc_handle_mmio_msg(guc);
+
+ guc_enable_interrupts(guc);
+
+ /* check for CT messages received before we enabled interrupts */
+ spin_lock_irq(&i915->irq_lock);
+ intel_guc_ct_event_handler(&guc->ct);
+ spin_unlock_irq(&i915->irq_lock);
+
+ DRM_INFO("GuC communication enabled\n");
+
+ return 0;
+}
+
+static void guc_disable_communication(struct intel_guc *guc)
+{
+ /*
+ * Events generated during or after CT disable are logged by guc in
+ * via mmio. Make sure the register is clear before disabling CT since
+ * all events we cared about have already been processed via CT.
+ */
+ guc_clear_mmio_msg(guc);
+
+ guc_disable_interrupts(guc);
+
+ intel_guc_ct_disable(&guc->ct);
+
+ /*
+ * Check for messages received during/after the CT disable. We do not
+ * expect any messages to have arrived via CT between the interrupt
+ * disable and the CT disable because GuC should've been idle until we
+ * triggered the CT disable protocol.
+ */
+ guc_get_mmio_msg(guc);
+
+ DRM_INFO("GuC communication disabled\n");
+}
+
+static void __uc_fetch_firmwares(struct intel_uc *uc)
+{
+ int err;
+
+ GEM_BUG_ON(!intel_uc_uses_guc(uc));
+
+ err = intel_uc_fw_fetch(&uc->guc.fw);
+ if (err)
+ return;
+
+ if (intel_uc_uses_huc(uc))
+ intel_uc_fw_fetch(&uc->huc.fw);
+}
+
+static void __uc_cleanup_firmwares(struct intel_uc *uc)
+{
+ intel_uc_fw_cleanup_fetch(&uc->huc.fw);
+ intel_uc_fw_cleanup_fetch(&uc->guc.fw);
+}
+
+static void __uc_init(struct intel_uc *uc)
+{
+ struct intel_guc *guc = &uc->guc;
+ struct intel_huc *huc = &uc->huc;
+ int ret;
+
+ GEM_BUG_ON(!intel_uc_uses_guc(uc));
+
+ /* XXX: GuC submission is unavailable for now */
+ GEM_BUG_ON(intel_uc_supports_guc_submission(uc));
+
+ ret = intel_guc_init(guc);
+ if (ret) {
+ intel_uc_fw_cleanup_fetch(&huc->fw);
+ return;
+ }
+
+ if (intel_uc_uses_huc(uc))
+ intel_huc_init(huc);
+}
+
+static void __uc_fini(struct intel_uc *uc)
+{
+ intel_huc_fini(&uc->huc);
+ intel_guc_fini(&uc->guc);
+
+ __uc_free_load_err_log(uc);
+}
+
+static int __uc_sanitize(struct intel_uc *uc)
+{
+ struct intel_guc *guc = &uc->guc;
+ struct intel_huc *huc = &uc->huc;
+
+ GEM_BUG_ON(!intel_uc_supports_guc(uc));
+
+ intel_huc_sanitize(huc);
+ intel_guc_sanitize(guc);
+
+ return __intel_uc_reset_hw(uc);
+}
+
+/* Initialize and verify the uC regs related to uC positioning in WOPCM */
+static int uc_init_wopcm(struct intel_uc *uc)
+{
+ struct intel_gt *gt = uc_to_gt(uc);
+ struct intel_uncore *uncore = gt->uncore;
+ u32 base = intel_wopcm_guc_base(&gt->i915->wopcm);
+ u32 size = intel_wopcm_guc_size(&gt->i915->wopcm);
+ u32 huc_agent = intel_uc_uses_huc(uc) ? HUC_LOADING_AGENT_GUC : 0;
+ u32 mask;
+ int err;
+
+ if (unlikely(!base || !size)) {
+ i915_probe_error(gt->i915, "Unsuccessful WOPCM partitioning\n");
+ return -E2BIG;
+ }
+
+ GEM_BUG_ON(!intel_uc_supports_guc(uc));
+ GEM_BUG_ON(!(base & GUC_WOPCM_OFFSET_MASK));
+ GEM_BUG_ON(base & ~GUC_WOPCM_OFFSET_MASK);
+ GEM_BUG_ON(!(size & GUC_WOPCM_SIZE_MASK));
+ GEM_BUG_ON(size & ~GUC_WOPCM_SIZE_MASK);
+
+ err = i915_inject_probe_error(gt->i915, -ENXIO);
+ if (err)
+ return err;
+
+ mask = GUC_WOPCM_SIZE_MASK | GUC_WOPCM_SIZE_LOCKED;
+ err = intel_uncore_write_and_verify(uncore, GUC_WOPCM_SIZE, size, mask,
+ size | GUC_WOPCM_SIZE_LOCKED);
+ if (err)
+ goto err_out;
+
+ mask = GUC_WOPCM_OFFSET_MASK | GUC_WOPCM_OFFSET_VALID | huc_agent;
+ err = intel_uncore_write_and_verify(uncore, DMA_GUC_WOPCM_OFFSET,
+ base | huc_agent, mask,
+ base | huc_agent |
+ GUC_WOPCM_OFFSET_VALID);
+ if (err)
+ goto err_out;
+
+ return 0;
+
+err_out:
+ i915_probe_error(gt->i915, "Failed to init uC WOPCM registers!\n");
+ i915_probe_error(gt->i915, "%s(%#x)=%#x\n", "DMA_GUC_WOPCM_OFFSET",
+ i915_mmio_reg_offset(DMA_GUC_WOPCM_OFFSET),
+ intel_uncore_read(uncore, DMA_GUC_WOPCM_OFFSET));
+ i915_probe_error(gt->i915, "%s(%#x)=%#x\n", "GUC_WOPCM_SIZE",
+ i915_mmio_reg_offset(GUC_WOPCM_SIZE),
+ intel_uncore_read(uncore, GUC_WOPCM_SIZE));
+
+ return err;
+}
+
+static bool uc_is_wopcm_locked(struct intel_uc *uc)
+{
+ struct intel_gt *gt = uc_to_gt(uc);
+ struct intel_uncore *uncore = gt->uncore;
+
+ return (intel_uncore_read(uncore, GUC_WOPCM_SIZE) & GUC_WOPCM_SIZE_LOCKED) ||
+ (intel_uncore_read(uncore, DMA_GUC_WOPCM_OFFSET) & GUC_WOPCM_OFFSET_VALID);
+}
+
+static int __uc_check_hw(struct intel_uc *uc)
+{
+ if (!intel_uc_supports_guc(uc))
+ return 0;
+
+ /*
+ * We can silently continue without GuC only if it was never enabled
+ * before on this system after reboot, otherwise we risk GPU hangs.
+ * To check if GuC was loaded before we look at WOPCM registers.
+ */
+ if (uc_is_wopcm_locked(uc))
+ return -EIO;
+
+ return 0;
+}
+
+static int __uc_init_hw(struct intel_uc *uc)
+{
+ struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
+ struct intel_guc *guc = &uc->guc;
+ struct intel_huc *huc = &uc->huc;
+ int ret, attempts;
+
+ GEM_BUG_ON(!intel_uc_supports_guc(uc));
+ GEM_BUG_ON(!intel_uc_uses_guc(uc));
+
+ if (!intel_uc_fw_is_available(&guc->fw)) {
+ ret = __uc_check_hw(uc) ||
+ intel_uc_fw_is_overridden(&guc->fw) ||
+ intel_uc_supports_guc_submission(uc) ?
+ intel_uc_fw_status_to_error(guc->fw.status) : 0;
+ goto err_out;
+ }
+
+ ret = uc_init_wopcm(uc);
+ if (ret)
+ goto err_out;
+
+ guc_reset_interrupts(guc);
+
+ /* WaEnableuKernelHeaderValidFix:skl */
+ /* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */
+ if (IS_GEN(i915, 9))
+ attempts = 3;
+ else
+ attempts = 1;
+
+ while (attempts--) {
+ /*
+ * Always reset the GuC just before (re)loading, so
+ * that the state and timing are fairly predictable
+ */
+ ret = __uc_sanitize(uc);
+ if (ret)
+ goto err_out;
+
+ intel_huc_fw_upload(huc);
+ intel_guc_ads_reset(guc);
+ intel_guc_write_params(guc);
+ ret = intel_guc_fw_upload(guc);
+ if (ret == 0)
+ break;
+
+ DRM_DEBUG_DRIVER("GuC fw load failed: %d; will reset and "
+ "retry %d more time(s)\n", ret, attempts);
+ }
+
+ /* Did we succeded or run out of retries? */
+ if (ret)
+ goto err_log_capture;
+
+ ret = guc_enable_communication(guc);
+ if (ret)
+ goto err_log_capture;
+
+ intel_huc_auth(huc);
+
+ ret = intel_guc_sample_forcewake(guc);
+ if (ret)
+ goto err_communication;
+
+ if (intel_uc_supports_guc_submission(uc))
+ intel_guc_submission_enable(guc);
+
+ dev_info(i915->drm.dev, "%s firmware %s version %u.%u %s:%s\n",
+ intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_GUC), guc->fw.path,
+ guc->fw.major_ver_found, guc->fw.minor_ver_found,
+ "submission",
+ enableddisabled(intel_uc_supports_guc_submission(uc)));
+
+ if (intel_uc_uses_huc(uc)) {
+ dev_info(i915->drm.dev, "%s firmware %s version %u.%u %s:%s\n",
+ intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_HUC),
+ huc->fw.path,
+ huc->fw.major_ver_found, huc->fw.minor_ver_found,
+ "authenticated",
+ yesno(intel_huc_is_authenticated(huc)));
+ }
+
+ return 0;
+
+ /*
+ * We've failed to load the firmware :(
+ */
+err_communication:
+ guc_disable_communication(guc);
+err_log_capture:
+ __uc_capture_load_err_log(uc);
+err_out:
+ __uc_sanitize(uc);
+
+ if (!ret) {
+ dev_notice(i915->drm.dev, "GuC is uninitialized\n");
+ /* We want to run without GuC submission */
+ return 0;
+ }
+
+ i915_probe_error(i915, "GuC initialization failed %d\n", ret);
+
+ /* We want to keep KMS alive */
+ return -EIO;
+}
+
+static void __uc_fini_hw(struct intel_uc *uc)
+{
+ struct intel_guc *guc = &uc->guc;
+
+ if (!intel_guc_is_running(guc))
+ return;
+
+ if (intel_uc_supports_guc_submission(uc))
+ intel_guc_submission_disable(guc);
+
+ if (guc_communication_enabled(guc))
+ guc_disable_communication(guc);
+
+ __uc_sanitize(uc);
+}
+
+/**
+ * intel_uc_reset_prepare - Prepare for reset
+ * @uc: the intel_uc structure
+ *
+ * Preparing for full gpu reset.
+ */
+void intel_uc_reset_prepare(struct intel_uc *uc)
+{
+ struct intel_guc *guc = &uc->guc;
+
+ if (!intel_guc_is_running(guc))
+ return;
+
+ guc_disable_communication(guc);
+ __uc_sanitize(uc);
+}
+
+void intel_uc_runtime_suspend(struct intel_uc *uc)
+{
+ struct intel_guc *guc = &uc->guc;
+ int err;
+
+ if (!intel_guc_is_running(guc))
+ return;
+
+ err = intel_guc_suspend(guc);
+ if (err)
+ DRM_DEBUG_DRIVER("Failed to suspend GuC, err=%d", err);
+
+ guc_disable_communication(guc);
+}
+
+void intel_uc_suspend(struct intel_uc *uc)
+{
+ struct intel_guc *guc = &uc->guc;
+ intel_wakeref_t wakeref;
+
+ if (!intel_guc_is_running(guc))
+ return;
+
+ with_intel_runtime_pm(uc_to_gt(uc)->uncore->rpm, wakeref)
+ intel_uc_runtime_suspend(uc);
+}
+
+static int __uc_resume(struct intel_uc *uc, bool enable_communication)
+{
+ struct intel_guc *guc = &uc->guc;
+ int err;
+
+ if (!intel_guc_is_running(guc))
+ return 0;
+
+ /* Make sure we enable communication if and only if it's disabled */
+ GEM_BUG_ON(enable_communication == guc_communication_enabled(guc));
+
+ if (enable_communication)
+ guc_enable_communication(guc);
+
+ err = intel_guc_resume(guc);
+ if (err) {
+ DRM_DEBUG_DRIVER("Failed to resume GuC, err=%d", err);
+ return err;
+ }
+
+ return 0;
+}
+
+int intel_uc_resume(struct intel_uc *uc)
+{
+ /*
+ * When coming out of S3/S4 we sanitize and re-init the HW, so
+ * communication is already re-enabled at this point.
+ */
+ return __uc_resume(uc, false);
+}
+
+int intel_uc_runtime_resume(struct intel_uc *uc)
+{
+ /*
+ * During runtime resume we don't sanitize, so we need to re-init
+ * communication as well.
+ */
+ return __uc_resume(uc, true);
+}
+
+static const struct intel_uc_ops uc_ops_off = {
+ .init_hw = __uc_check_hw,
+};
+
+static const struct intel_uc_ops uc_ops_on = {
+ .sanitize = __uc_sanitize,
+
+ .init_fw = __uc_fetch_firmwares,
+ .fini_fw = __uc_cleanup_firmwares,
+
+ .init = __uc_init,
+ .fini = __uc_fini,
+
+ .init_hw = __uc_init_hw,
+ .fini_hw = __uc_fini_hw,
+};
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.h b/drivers/gpu/drm/i915/gt/uc/intel_uc.h
new file mode 100644
index 000000000000..49c913524686
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2014-2019 Intel Corporation
+ */
+
+#ifndef _INTEL_UC_H_
+#define _INTEL_UC_H_
+
+#include "intel_guc.h"
+#include "intel_huc.h"
+#include "i915_params.h"
+
+struct intel_uc;
+
+struct intel_uc_ops {
+ int (*sanitize)(struct intel_uc *uc);
+ void (*init_fw)(struct intel_uc *uc);
+ void (*fini_fw)(struct intel_uc *uc);
+ void (*init)(struct intel_uc *uc);
+ void (*fini)(struct intel_uc *uc);
+ int (*init_hw)(struct intel_uc *uc);
+ void (*fini_hw)(struct intel_uc *uc);
+};
+
+struct intel_uc {
+ struct intel_uc_ops const *ops;
+ struct intel_guc guc;
+ struct intel_huc huc;
+
+ /* Snapshot of GuC log from last failed load */
+ struct drm_i915_gem_object *load_err_log;
+};
+
+void intel_uc_init_early(struct intel_uc *uc);
+void intel_uc_driver_late_release(struct intel_uc *uc);
+void intel_uc_init_mmio(struct intel_uc *uc);
+void intel_uc_reset_prepare(struct intel_uc *uc);
+void intel_uc_suspend(struct intel_uc *uc);
+void intel_uc_runtime_suspend(struct intel_uc *uc);
+int intel_uc_resume(struct intel_uc *uc);
+int intel_uc_runtime_resume(struct intel_uc *uc);
+
+static inline bool intel_uc_supports_guc(struct intel_uc *uc)
+{
+ return intel_guc_is_supported(&uc->guc);
+}
+
+static inline bool intel_uc_uses_guc(struct intel_uc *uc)
+{
+ return intel_guc_is_enabled(&uc->guc);
+}
+
+static inline bool intel_uc_supports_guc_submission(struct intel_uc *uc)
+{
+ return intel_guc_is_submission_supported(&uc->guc);
+}
+
+static inline bool intel_uc_uses_guc_submission(struct intel_uc *uc)
+{
+ return intel_guc_is_submission_supported(&uc->guc);
+}
+
+static inline bool intel_uc_supports_huc(struct intel_uc *uc)
+{
+ return intel_uc_supports_guc(uc);
+}
+
+static inline bool intel_uc_uses_huc(struct intel_uc *uc)
+{
+ return intel_huc_is_enabled(&uc->huc);
+}
+
+#define intel_uc_ops_function(_NAME, _OPS, _TYPE, _RET) \
+static inline _TYPE intel_uc_##_NAME(struct intel_uc *uc) \
+{ \
+ if (uc->ops->_OPS) \
+ return uc->ops->_OPS(uc); \
+ return _RET; \
+}
+intel_uc_ops_function(sanitize, sanitize, int, 0);
+intel_uc_ops_function(fetch_firmwares, init_fw, void, );
+intel_uc_ops_function(cleanup_firmwares, fini_fw, void, );
+intel_uc_ops_function(init, init, void, );
+intel_uc_ops_function(fini, fini, void, );
+intel_uc_ops_function(init_hw, init_hw, int, 0);
+intel_uc_ops_function(fini_hw, fini_hw, void, );
+#undef intel_uc_ops_function
+
+#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
new file mode 100644
index 000000000000..8ee0a0c7f447
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -0,0 +1,604 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2016-2019 Intel Corporation
+ */
+
+#include <linux/bitfield.h>
+#include <linux/firmware.h>
+#include <drm/drm_print.h>
+
+#include "intel_uc_fw.h"
+#include "intel_uc_fw_abi.h"
+#include "i915_drv.h"
+
+static inline struct intel_gt *__uc_fw_to_gt(struct intel_uc_fw *uc_fw)
+{
+ GEM_BUG_ON(uc_fw->status == INTEL_UC_FIRMWARE_UNINITIALIZED);
+ if (uc_fw->type == INTEL_UC_FW_TYPE_GUC)
+ return container_of(uc_fw, struct intel_gt, uc.guc.fw);
+
+ GEM_BUG_ON(uc_fw->type != INTEL_UC_FW_TYPE_HUC);
+ return container_of(uc_fw, struct intel_gt, uc.huc.fw);
+}
+
+#ifdef CONFIG_DRM_I915_DEBUG_GUC
+void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
+ enum intel_uc_fw_status status)
+{
+ uc_fw->__status = status;
+ DRM_DEV_DEBUG_DRIVER(__uc_fw_to_gt(uc_fw)->i915->drm.dev,
+ "%s firmware -> %s\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ status == INTEL_UC_FIRMWARE_SELECTED ?
+ uc_fw->path : intel_uc_fw_status_repr(status));
+}
+#endif
+
+/*
+ * List of required GuC and HuC binaries per-platform.
+ * Must be ordered based on platform + revid, from newer to older.
+ *
+ * TGL 35.2 is interface-compatible with 33.0 for previous Gens. The deltas
+ * between 33.0 and 35.2 are only related to new additions to support new Gen12
+ * features.
+ */
+#define INTEL_UC_FIRMWARE_DEFS(fw_def, guc_def, huc_def) \
+ fw_def(TIGERLAKE, 0, guc_def(tgl, 35, 2, 0), huc_def(tgl, 7, 0, 3)) \
+ fw_def(ELKHARTLAKE, 0, guc_def(ehl, 33, 0, 4), huc_def(ehl, 9, 0, 0)) \
+ fw_def(ICELAKE, 0, guc_def(icl, 33, 0, 0), huc_def(icl, 9, 0, 0)) \
+ fw_def(COFFEELAKE, 5, guc_def(cml, 33, 0, 0), huc_def(cml, 4, 0, 0)) \
+ fw_def(COFFEELAKE, 0, guc_def(kbl, 33, 0, 0), huc_def(kbl, 4, 0, 0)) \
+ fw_def(GEMINILAKE, 0, guc_def(glk, 33, 0, 0), huc_def(glk, 4, 0, 0)) \
+ fw_def(KABYLAKE, 0, guc_def(kbl, 33, 0, 0), huc_def(kbl, 4, 0, 0)) \
+ fw_def(BROXTON, 0, guc_def(bxt, 33, 0, 0), huc_def(bxt, 2, 0, 0)) \
+ fw_def(SKYLAKE, 0, guc_def(skl, 33, 0, 0), huc_def(skl, 2, 0, 0))
+
+#define __MAKE_UC_FW_PATH(prefix_, name_, major_, minor_, patch_) \
+ "i915/" \
+ __stringify(prefix_) name_ \
+ __stringify(major_) "." \
+ __stringify(minor_) "." \
+ __stringify(patch_) ".bin"
+
+#define MAKE_GUC_FW_PATH(prefix_, major_, minor_, patch_) \
+ __MAKE_UC_FW_PATH(prefix_, "_guc_", major_, minor_, patch_)
+
+#define MAKE_HUC_FW_PATH(prefix_, major_, minor_, bld_num_) \
+ __MAKE_UC_FW_PATH(prefix_, "_huc_", major_, minor_, bld_num_)
+
+/* All blobs need to be declared via MODULE_FIRMWARE() */
+#define INTEL_UC_MODULE_FW(platform_, revid_, guc_, huc_) \
+ MODULE_FIRMWARE(guc_); \
+ MODULE_FIRMWARE(huc_);
+
+INTEL_UC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_GUC_FW_PATH, MAKE_HUC_FW_PATH)
+
+/* The below structs and macros are used to iterate across the list of blobs */
+struct __packed uc_fw_blob {
+ u8 major;
+ u8 minor;
+ const char *path;
+};
+
+#define UC_FW_BLOB(major_, minor_, path_) \
+ { .major = major_, .minor = minor_, .path = path_ }
+
+#define GUC_FW_BLOB(prefix_, major_, minor_, patch_) \
+ UC_FW_BLOB(major_, minor_, \
+ MAKE_GUC_FW_PATH(prefix_, major_, minor_, patch_))
+
+#define HUC_FW_BLOB(prefix_, major_, minor_, bld_num_) \
+ UC_FW_BLOB(major_, minor_, \
+ MAKE_HUC_FW_PATH(prefix_, major_, minor_, bld_num_))
+
+struct __packed uc_fw_platform_requirement {
+ enum intel_platform p;
+ u8 rev; /* first platform rev using this FW */
+ const struct uc_fw_blob blobs[INTEL_UC_FW_NUM_TYPES];
+};
+
+#define MAKE_FW_LIST(platform_, revid_, guc_, huc_) \
+{ \
+ .p = INTEL_##platform_, \
+ .rev = revid_, \
+ .blobs[INTEL_UC_FW_TYPE_GUC] = guc_, \
+ .blobs[INTEL_UC_FW_TYPE_HUC] = huc_, \
+},
+
+static void
+__uc_fw_auto_select(struct intel_uc_fw *uc_fw, enum intel_platform p, u8 rev)
+{
+ static const struct uc_fw_platform_requirement fw_blobs[] = {
+ INTEL_UC_FIRMWARE_DEFS(MAKE_FW_LIST, GUC_FW_BLOB, HUC_FW_BLOB)
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(fw_blobs) && p <= fw_blobs[i].p; i++) {
+ if (p == fw_blobs[i].p && rev >= fw_blobs[i].rev) {
+ const struct uc_fw_blob *blob =
+ &fw_blobs[i].blobs[uc_fw->type];
+ uc_fw->path = blob->path;
+ uc_fw->major_ver_wanted = blob->major;
+ uc_fw->minor_ver_wanted = blob->minor;
+ break;
+ }
+ }
+
+ /* make sure the list is ordered as expected */
+ if (IS_ENABLED(CONFIG_DRM_I915_SELFTEST)) {
+ for (i = 1; i < ARRAY_SIZE(fw_blobs); i++) {
+ if (fw_blobs[i].p < fw_blobs[i - 1].p)
+ continue;
+
+ if (fw_blobs[i].p == fw_blobs[i - 1].p &&
+ fw_blobs[i].rev < fw_blobs[i - 1].rev)
+ continue;
+
+ pr_err("invalid FW blob order: %s r%u comes before %s r%u\n",
+ intel_platform_name(fw_blobs[i - 1].p),
+ fw_blobs[i - 1].rev,
+ intel_platform_name(fw_blobs[i].p),
+ fw_blobs[i].rev);
+
+ uc_fw->path = NULL;
+ }
+ }
+
+ /* We don't want to enable GuC/HuC on pre-Gen11 by default */
+ if (i915_modparams.enable_guc == -1 && p < INTEL_ICELAKE)
+ uc_fw->path = NULL;
+}
+
+static const char *__override_guc_firmware_path(void)
+{
+ if (i915_modparams.enable_guc & (ENABLE_GUC_SUBMISSION |
+ ENABLE_GUC_LOAD_HUC))
+ return i915_modparams.guc_firmware_path;
+ return "";
+}
+
+static const char *__override_huc_firmware_path(void)
+{
+ if (i915_modparams.enable_guc & ENABLE_GUC_LOAD_HUC)
+ return i915_modparams.huc_firmware_path;
+ return "";
+}
+
+static void __uc_fw_user_override(struct intel_uc_fw *uc_fw)
+{
+ const char *path = NULL;
+
+ switch (uc_fw->type) {
+ case INTEL_UC_FW_TYPE_GUC:
+ path = __override_guc_firmware_path();
+ break;
+ case INTEL_UC_FW_TYPE_HUC:
+ path = __override_huc_firmware_path();
+ break;
+ }
+
+ if (unlikely(path)) {
+ uc_fw->path = path;
+ uc_fw->user_overridden = true;
+ }
+}
+
+/**
+ * intel_uc_fw_init_early - initialize the uC object and select the firmware
+ * @uc_fw: uC firmware
+ * @type: type of uC
+ * @supported: is uC support possible
+ * @platform: platform identifier
+ * @rev: hardware revision
+ *
+ * Initialize the state of our uC object and relevant tracking and select the
+ * firmware to fetch and load.
+ */
+void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw,
+ enum intel_uc_fw_type type, bool supported,
+ enum intel_platform platform, u8 rev)
+{
+ /*
+ * we use FIRMWARE_UNINITIALIZED to detect checks against uc_fw->status
+ * before we're looked at the HW caps to see if we have uc support
+ */
+ BUILD_BUG_ON(INTEL_UC_FIRMWARE_UNINITIALIZED);
+ GEM_BUG_ON(uc_fw->status);
+ GEM_BUG_ON(uc_fw->path);
+
+ uc_fw->type = type;
+
+ if (supported) {
+ __uc_fw_auto_select(uc_fw, platform, rev);
+ __uc_fw_user_override(uc_fw);
+ }
+
+ intel_uc_fw_change_status(uc_fw, uc_fw->path ? *uc_fw->path ?
+ INTEL_UC_FIRMWARE_SELECTED :
+ INTEL_UC_FIRMWARE_DISABLED :
+ INTEL_UC_FIRMWARE_NOT_SUPPORTED);
+}
+
+static void __force_fw_fetch_failures(struct intel_uc_fw *uc_fw, int e)
+{
+ struct drm_i915_private *i915 = __uc_fw_to_gt(uc_fw)->i915;
+ bool user = e == -EINVAL;
+
+ if (i915_inject_probe_error(i915, e)) {
+ /* non-existing blob */
+ uc_fw->path = "<invalid>";
+ uc_fw->user_overridden = user;
+ } else if (i915_inject_probe_error(i915, e)) {
+ /* require next major version */
+ uc_fw->major_ver_wanted += 1;
+ uc_fw->minor_ver_wanted = 0;
+ uc_fw->user_overridden = user;
+ } else if (i915_inject_probe_error(i915, e)) {
+ /* require next minor version */
+ uc_fw->minor_ver_wanted += 1;
+ uc_fw->user_overridden = user;
+ } else if (uc_fw->major_ver_wanted &&
+ i915_inject_probe_error(i915, e)) {
+ /* require prev major version */
+ uc_fw->major_ver_wanted -= 1;
+ uc_fw->minor_ver_wanted = 0;
+ uc_fw->user_overridden = user;
+ } else if (uc_fw->minor_ver_wanted &&
+ i915_inject_probe_error(i915, e)) {
+ /* require prev minor version - hey, this should work! */
+ uc_fw->minor_ver_wanted -= 1;
+ uc_fw->user_overridden = user;
+ } else if (user && i915_inject_probe_error(i915, e)) {
+ /* officially unsupported platform */
+ uc_fw->major_ver_wanted = 0;
+ uc_fw->minor_ver_wanted = 0;
+ uc_fw->user_overridden = true;
+ }
+}
+
+/**
+ * intel_uc_fw_fetch - fetch uC firmware
+ * @uc_fw: uC firmware
+ *
+ * Fetch uC firmware into GEM obj.
+ *
+ * Return: 0 on success, a negative errno code on failure.
+ */
+int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
+{
+ struct drm_i915_private *i915 = __uc_fw_to_gt(uc_fw)->i915;
+ struct device *dev = i915->drm.dev;
+ struct drm_i915_gem_object *obj;
+ const struct firmware *fw = NULL;
+ struct uc_css_header *css;
+ size_t size;
+ int err;
+
+ GEM_BUG_ON(!i915->wopcm.size);
+ GEM_BUG_ON(!intel_uc_fw_is_enabled(uc_fw));
+
+ err = i915_inject_probe_error(i915, -ENXIO);
+ if (err)
+ return err;
+
+ __force_fw_fetch_failures(uc_fw, -EINVAL);
+ __force_fw_fetch_failures(uc_fw, -ESTALE);
+
+ err = request_firmware(&fw, uc_fw->path, dev);
+ if (err)
+ goto fail;
+
+ /* Check the size of the blob before examining buffer contents */
+ if (unlikely(fw->size < sizeof(struct uc_css_header))) {
+ dev_warn(dev, "%s firmware %s: invalid size: %zu < %zu\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
+ fw->size, sizeof(struct uc_css_header));
+ err = -ENODATA;
+ goto fail;
+ }
+
+ css = (struct uc_css_header *)fw->data;
+
+ /* Check integrity of size values inside CSS header */
+ size = (css->header_size_dw - css->key_size_dw - css->modulus_size_dw -
+ css->exponent_size_dw) * sizeof(u32);
+ if (unlikely(size != sizeof(struct uc_css_header))) {
+ dev_warn(dev,
+ "%s firmware %s: unexpected header size: %zu != %zu\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
+ fw->size, sizeof(struct uc_css_header));
+ err = -EPROTO;
+ goto fail;
+ }
+
+ /* uCode size must calculated from other sizes */
+ uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
+
+ /* now RSA */
+ if (unlikely(css->key_size_dw != UOS_RSA_SCRATCH_COUNT)) {
+ dev_warn(dev, "%s firmware %s: unexpected key size: %u != %u\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
+ css->key_size_dw, UOS_RSA_SCRATCH_COUNT);
+ err = -EPROTO;
+ goto fail;
+ }
+ uc_fw->rsa_size = css->key_size_dw * sizeof(u32);
+
+ /* At least, it should have header, uCode and RSA. Size of all three. */
+ size = sizeof(struct uc_css_header) + uc_fw->ucode_size + uc_fw->rsa_size;
+ if (unlikely(fw->size < size)) {
+ dev_warn(dev, "%s firmware %s: invalid size: %zu < %zu\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
+ fw->size, size);
+ err = -ENOEXEC;
+ goto fail;
+ }
+
+ /* Sanity check whether this fw is not larger than whole WOPCM memory */
+ size = __intel_uc_fw_get_upload_size(uc_fw);
+ if (unlikely(size >= i915->wopcm.size)) {
+ dev_warn(dev, "%s firmware %s: invalid size: %zu > %zu\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
+ size, (size_t)i915->wopcm.size);
+ err = -E2BIG;
+ goto fail;
+ }
+
+ /* Get version numbers from the CSS header */
+ uc_fw->major_ver_found = FIELD_GET(CSS_SW_VERSION_UC_MAJOR,
+ css->sw_version);
+ uc_fw->minor_ver_found = FIELD_GET(CSS_SW_VERSION_UC_MINOR,
+ css->sw_version);
+
+ if (uc_fw->major_ver_found != uc_fw->major_ver_wanted ||
+ uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) {
+ dev_notice(dev, "%s firmware %s: unexpected version: %u.%u != %u.%u\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
+ uc_fw->major_ver_found, uc_fw->minor_ver_found,
+ uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
+ if (!intel_uc_fw_is_overridden(uc_fw)) {
+ err = -ENOEXEC;
+ goto fail;
+ }
+ }
+
+ obj = i915_gem_object_create_shmem_from_data(i915, fw->data, fw->size);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto fail;
+ }
+
+ uc_fw->obj = obj;
+ uc_fw->size = fw->size;
+ intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_AVAILABLE);
+
+ release_firmware(fw);
+ return 0;
+
+fail:
+ intel_uc_fw_change_status(uc_fw, err == -ENOENT ?
+ INTEL_UC_FIRMWARE_MISSING :
+ INTEL_UC_FIRMWARE_ERROR);
+
+ dev_notice(dev, "%s firmware %s: fetch failed with error %d\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err);
+ dev_info(dev, "%s firmware(s) can be downloaded from %s\n",
+ intel_uc_fw_type_repr(uc_fw->type), INTEL_UC_FIRMWARE_URL);
+
+ release_firmware(fw); /* OK even if fw is NULL */
+ return err;
+}
+
+static u32 uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw)
+{
+ struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt;
+ struct drm_mm_node *node = &ggtt->uc_fw;
+
+ GEM_BUG_ON(!drm_mm_node_allocated(node));
+ GEM_BUG_ON(upper_32_bits(node->start));
+ GEM_BUG_ON(upper_32_bits(node->start + node->size - 1));
+
+ return lower_32_bits(node->start);
+}
+
+static void uc_fw_bind_ggtt(struct intel_uc_fw *uc_fw)
+{
+ struct drm_i915_gem_object *obj = uc_fw->obj;
+ struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt;
+ struct i915_vma dummy = {
+ .node.start = uc_fw_ggtt_offset(uc_fw),
+ .node.size = obj->base.size,
+ .pages = obj->mm.pages,
+ .vm = &ggtt->vm,
+ };
+
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+ GEM_BUG_ON(dummy.node.size > ggtt->uc_fw.size);
+
+ /* uc_fw->obj cache domains were not controlled across suspend */
+ drm_clflush_sg(dummy.pages);
+
+ ggtt->vm.insert_entries(&ggtt->vm, &dummy, I915_CACHE_NONE, 0);
+}
+
+static void uc_fw_unbind_ggtt(struct intel_uc_fw *uc_fw)
+{
+ struct drm_i915_gem_object *obj = uc_fw->obj;
+ struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt;
+ u64 start = uc_fw_ggtt_offset(uc_fw);
+
+ ggtt->vm.clear_range(&ggtt->vm, start, obj->base.size);
+}
+
+static int uc_fw_xfer(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
+{
+ struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
+ struct intel_uncore *uncore = gt->uncore;
+ u64 offset;
+ int ret;
+
+ ret = i915_inject_probe_error(gt->i915, -ETIMEDOUT);
+ if (ret)
+ return ret;
+
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
+
+ /* Set the source address for the uCode */
+ offset = uc_fw_ggtt_offset(uc_fw);
+ GEM_BUG_ON(upper_32_bits(offset) & 0xFFFF0000);
+ intel_uncore_write_fw(uncore, DMA_ADDR_0_LOW, lower_32_bits(offset));
+ intel_uncore_write_fw(uncore, DMA_ADDR_0_HIGH, upper_32_bits(offset));
+
+ /* Set the DMA destination */
+ intel_uncore_write_fw(uncore, DMA_ADDR_1_LOW, dst_offset);
+ intel_uncore_write_fw(uncore, DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
+
+ /*
+ * Set the transfer size. The header plus uCode will be copied to WOPCM
+ * via DMA, excluding any other components
+ */
+ intel_uncore_write_fw(uncore, DMA_COPY_SIZE,
+ sizeof(struct uc_css_header) + uc_fw->ucode_size);
+
+ /* Start the DMA */
+ intel_uncore_write_fw(uncore, DMA_CTRL,
+ _MASKED_BIT_ENABLE(dma_flags | START_DMA));
+
+ /* Wait for DMA to finish */
+ ret = intel_wait_for_register_fw(uncore, DMA_CTRL, START_DMA, 0, 100);
+ if (ret)
+ dev_err(gt->i915->drm.dev, "DMA for %s fw failed, DMA_CTRL=%u\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ intel_uncore_read_fw(uncore, DMA_CTRL));
+
+ /* Disable the bits once DMA is over */
+ intel_uncore_write_fw(uncore, DMA_CTRL, _MASKED_BIT_DISABLE(dma_flags));
+
+ intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
+
+ return ret;
+}
+
+/**
+ * intel_uc_fw_upload - load uC firmware using custom loader
+ * @uc_fw: uC firmware
+ * @dst_offset: destination offset
+ * @dma_flags: flags for flags for dma ctrl
+ *
+ * Loads uC firmware and updates internal flags.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
+{
+ struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
+ int err;
+
+ /* make sure the status was cleared the last time we reset the uc */
+ GEM_BUG_ON(intel_uc_fw_is_loaded(uc_fw));
+
+ err = i915_inject_probe_error(gt->i915, -ENOEXEC);
+ if (err)
+ return err;
+
+ if (!intel_uc_fw_is_available(uc_fw))
+ return -ENOEXEC;
+
+ /* Call custom loader */
+ uc_fw_bind_ggtt(uc_fw);
+ err = uc_fw_xfer(uc_fw, dst_offset, dma_flags);
+ uc_fw_unbind_ggtt(uc_fw);
+ if (err)
+ goto fail;
+
+ intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_TRANSFERRED);
+ return 0;
+
+fail:
+ i915_probe_error(gt->i915, "Failed to load %s firmware %s (%d)\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
+ err);
+ intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_FAIL);
+ return err;
+}
+
+int intel_uc_fw_init(struct intel_uc_fw *uc_fw)
+{
+ int err;
+
+ /* this should happen before the load! */
+ GEM_BUG_ON(intel_uc_fw_is_loaded(uc_fw));
+
+ if (!intel_uc_fw_is_available(uc_fw))
+ return -ENOEXEC;
+
+ err = i915_gem_object_pin_pages(uc_fw->obj);
+ if (err) {
+ DRM_DEBUG_DRIVER("%s fw pin-pages err=%d\n",
+ intel_uc_fw_type_repr(uc_fw->type), err);
+ intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_FAIL);
+ }
+
+ return err;
+}
+
+void intel_uc_fw_fini(struct intel_uc_fw *uc_fw)
+{
+ intel_uc_fw_cleanup_fetch(uc_fw);
+}
+
+/**
+ * intel_uc_fw_cleanup_fetch - cleanup uC firmware
+ * @uc_fw: uC firmware
+ *
+ * Cleans up uC firmware by releasing the firmware GEM obj.
+ */
+void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw)
+{
+ if (!intel_uc_fw_is_available(uc_fw))
+ return;
+
+ i915_gem_object_put(fetch_and_zero(&uc_fw->obj));
+
+ intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_SELECTED);
+}
+
+/**
+ * intel_uc_fw_copy_rsa - copy fw RSA to buffer
+ *
+ * @uc_fw: uC firmware
+ * @dst: dst buffer
+ * @max_len: max number of bytes to copy
+ *
+ * Return: number of copied bytes.
+ */
+size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len)
+{
+ struct sg_table *pages = uc_fw->obj->mm.pages;
+ u32 size = min_t(u32, uc_fw->rsa_size, max_len);
+ u32 offset = sizeof(struct uc_css_header) + uc_fw->ucode_size;
+
+ GEM_BUG_ON(!intel_uc_fw_is_available(uc_fw));
+
+ return sg_pcopy_to_buffer(pages->sgl, pages->nents, dst, size, offset);
+}
+
+/**
+ * intel_uc_fw_dump - dump information about uC firmware
+ * @uc_fw: uC firmware
+ * @p: the &drm_printer
+ *
+ * Pretty printer for uC firmware.
+ */
+void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p)
+{
+ drm_printf(p, "%s firmware: %s\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->path);
+ drm_printf(p, "\tstatus: %s\n",
+ intel_uc_fw_status_repr(uc_fw->status));
+ drm_printf(p, "\tversion: wanted %u.%u, found %u.%u\n",
+ uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted,
+ uc_fw->major_ver_found, uc_fw->minor_ver_found);
+ drm_printf(p, "\tuCode: %u bytes\n", uc_fw->ucode_size);
+ drm_printf(p, "\tRSA: %u bytes\n", uc_fw->rsa_size);
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
new file mode 100644
index 000000000000..1f30543d0d2d
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
@@ -0,0 +1,240 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2014-2019 Intel Corporation
+ */
+
+#ifndef _INTEL_UC_FW_H_
+#define _INTEL_UC_FW_H_
+
+#include <linux/types.h>
+#include "intel_uc_fw_abi.h"
+#include "intel_device_info.h"
+#include "i915_gem.h"
+
+struct drm_printer;
+struct drm_i915_private;
+struct intel_gt;
+
+/* Home of GuC, HuC and DMC firmwares */
+#define INTEL_UC_FIRMWARE_URL "https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git/tree/i915"
+
+/*
+ * +------------+---------------------------------------------------+
+ * | PHASE | FIRMWARE STATUS TRANSITIONS |
+ * +============+===================================================+
+ * | | UNINITIALIZED |
+ * +------------+- / | \ -+
+ * | | DISABLED <--/ | \--> NOT_SUPPORTED |
+ * | init_early | V |
+ * | | SELECTED |
+ * +------------+- / | \ -+
+ * | | MISSING <--/ | \--> ERROR |
+ * | fetch | | |
+ * | | /------> AVAILABLE <---<-----------\ |
+ * +------------+- \ / \ \ \ -+
+ * | | FAIL <--< \--> TRANSFERRED \ |
+ * | upload | \ / \ / |
+ * | | \---------/ \--> RUNNING |
+ * +------------+---------------------------------------------------+
+ */
+
+enum intel_uc_fw_status {
+ INTEL_UC_FIRMWARE_NOT_SUPPORTED = -1, /* no uc HW */
+ INTEL_UC_FIRMWARE_UNINITIALIZED = 0, /* used to catch checks done too early */
+ INTEL_UC_FIRMWARE_DISABLED, /* disabled */
+ INTEL_UC_FIRMWARE_SELECTED, /* selected the blob we want to load */
+ INTEL_UC_FIRMWARE_MISSING, /* blob not found on the system */
+ INTEL_UC_FIRMWARE_ERROR, /* invalid format or version */
+ INTEL_UC_FIRMWARE_AVAILABLE, /* blob found and copied in mem */
+ INTEL_UC_FIRMWARE_FAIL, /* failed to xfer or init/auth the fw */
+ INTEL_UC_FIRMWARE_TRANSFERRED, /* dma xfer done */
+ INTEL_UC_FIRMWARE_RUNNING /* init/auth done */
+};
+
+enum intel_uc_fw_type {
+ INTEL_UC_FW_TYPE_GUC = 0,
+ INTEL_UC_FW_TYPE_HUC
+};
+#define INTEL_UC_FW_NUM_TYPES 2
+
+/*
+ * This structure encapsulates all the data needed during the process
+ * of fetching, caching, and loading the firmware image into the uC.
+ */
+struct intel_uc_fw {
+ enum intel_uc_fw_type type;
+ union {
+ const enum intel_uc_fw_status status;
+ enum intel_uc_fw_status __status; /* no accidental overwrites */
+ };
+ const char *path;
+ bool user_overridden;
+ size_t size;
+ struct drm_i915_gem_object *obj;
+
+ /*
+ * The firmware build process will generate a version header file with major and
+ * minor version defined. The versions are built into CSS header of firmware.
+ * i915 kernel driver set the minimal firmware version required per platform.
+ */
+ u16 major_ver_wanted;
+ u16 minor_ver_wanted;
+ u16 major_ver_found;
+ u16 minor_ver_found;
+
+ u32 rsa_size;
+ u32 ucode_size;
+};
+
+#ifdef CONFIG_DRM_I915_DEBUG_GUC
+void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
+ enum intel_uc_fw_status status);
+#else
+static inline void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
+ enum intel_uc_fw_status status)
+{
+ uc_fw->__status = status;
+}
+#endif
+
+static inline
+const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status)
+{
+ switch (status) {
+ case INTEL_UC_FIRMWARE_NOT_SUPPORTED:
+ return "N/A";
+ case INTEL_UC_FIRMWARE_UNINITIALIZED:
+ return "UNINITIALIZED";
+ case INTEL_UC_FIRMWARE_DISABLED:
+ return "DISABLED";
+ case INTEL_UC_FIRMWARE_SELECTED:
+ return "SELECTED";
+ case INTEL_UC_FIRMWARE_MISSING:
+ return "MISSING";
+ case INTEL_UC_FIRMWARE_ERROR:
+ return "ERROR";
+ case INTEL_UC_FIRMWARE_AVAILABLE:
+ return "AVAILABLE";
+ case INTEL_UC_FIRMWARE_FAIL:
+ return "FAIL";
+ case INTEL_UC_FIRMWARE_TRANSFERRED:
+ return "TRANSFERRED";
+ case INTEL_UC_FIRMWARE_RUNNING:
+ return "RUNNING";
+ }
+ return "<invalid>";
+}
+
+static inline int intel_uc_fw_status_to_error(enum intel_uc_fw_status status)
+{
+ switch (status) {
+ case INTEL_UC_FIRMWARE_NOT_SUPPORTED:
+ return -ENODEV;
+ case INTEL_UC_FIRMWARE_UNINITIALIZED:
+ return -EACCES;
+ case INTEL_UC_FIRMWARE_DISABLED:
+ return -EPERM;
+ case INTEL_UC_FIRMWARE_MISSING:
+ return -ENOENT;
+ case INTEL_UC_FIRMWARE_ERROR:
+ return -ENOEXEC;
+ case INTEL_UC_FIRMWARE_FAIL:
+ return -EIO;
+ case INTEL_UC_FIRMWARE_SELECTED:
+ return -ESTALE;
+ case INTEL_UC_FIRMWARE_AVAILABLE:
+ case INTEL_UC_FIRMWARE_TRANSFERRED:
+ case INTEL_UC_FIRMWARE_RUNNING:
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static inline const char *intel_uc_fw_type_repr(enum intel_uc_fw_type type)
+{
+ switch (type) {
+ case INTEL_UC_FW_TYPE_GUC:
+ return "GuC";
+ case INTEL_UC_FW_TYPE_HUC:
+ return "HuC";
+ }
+ return "uC";
+}
+
+static inline enum intel_uc_fw_status
+__intel_uc_fw_status(struct intel_uc_fw *uc_fw)
+{
+ /* shouldn't call this before checking hw/blob availability */
+ GEM_BUG_ON(uc_fw->status == INTEL_UC_FIRMWARE_UNINITIALIZED);
+ return uc_fw->status;
+}
+
+static inline bool intel_uc_fw_is_supported(struct intel_uc_fw *uc_fw)
+{
+ return __intel_uc_fw_status(uc_fw) != INTEL_UC_FIRMWARE_NOT_SUPPORTED;
+}
+
+static inline bool intel_uc_fw_is_enabled(struct intel_uc_fw *uc_fw)
+{
+ return __intel_uc_fw_status(uc_fw) > INTEL_UC_FIRMWARE_DISABLED;
+}
+
+static inline bool intel_uc_fw_is_available(struct intel_uc_fw *uc_fw)
+{
+ return __intel_uc_fw_status(uc_fw) >= INTEL_UC_FIRMWARE_AVAILABLE;
+}
+
+static inline bool intel_uc_fw_is_loaded(struct intel_uc_fw *uc_fw)
+{
+ return __intel_uc_fw_status(uc_fw) >= INTEL_UC_FIRMWARE_TRANSFERRED;
+}
+
+static inline bool intel_uc_fw_is_running(struct intel_uc_fw *uc_fw)
+{
+ return __intel_uc_fw_status(uc_fw) == INTEL_UC_FIRMWARE_RUNNING;
+}
+
+static inline bool intel_uc_fw_is_overridden(const struct intel_uc_fw *uc_fw)
+{
+ return uc_fw->user_overridden;
+}
+
+static inline void intel_uc_fw_sanitize(struct intel_uc_fw *uc_fw)
+{
+ if (intel_uc_fw_is_loaded(uc_fw))
+ intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_AVAILABLE);
+}
+
+static inline u32 __intel_uc_fw_get_upload_size(struct intel_uc_fw *uc_fw)
+{
+ return sizeof(struct uc_css_header) + uc_fw->ucode_size;
+}
+
+/**
+ * intel_uc_fw_get_upload_size() - Get size of firmware needed to be uploaded.
+ * @uc_fw: uC firmware.
+ *
+ * Get the size of the firmware and header that will be uploaded to WOPCM.
+ *
+ * Return: Upload firmware size, or zero on firmware fetch failure.
+ */
+static inline u32 intel_uc_fw_get_upload_size(struct intel_uc_fw *uc_fw)
+{
+ if (!intel_uc_fw_is_available(uc_fw))
+ return 0;
+
+ return __intel_uc_fw_get_upload_size(uc_fw);
+}
+
+void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw,
+ enum intel_uc_fw_type type, bool supported,
+ enum intel_platform platform, u8 rev);
+int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw);
+void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw);
+int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, u32 offset, u32 dma_flags);
+int intel_uc_fw_init(struct intel_uc_fw *uc_fw);
+void intel_uc_fw_fini(struct intel_uc_fw *uc_fw);
+size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len);
+void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h
new file mode 100644
index 000000000000..029214cdedd5
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef _INTEL_UC_FW_ABI_H
+#define _INTEL_UC_FW_ABI_H
+
+#include <linux/types.h>
+#include <linux/build_bug.h>
+
+/**
+ * DOC: Firmware Layout
+ *
+ * The GuC/HuC firmware layout looks like this::
+ *
+ * +======================================================================+
+ * | Firmware blob |
+ * +===============+===============+============+============+============+
+ * | CSS header | uCode | RSA key | modulus | exponent |
+ * +===============+===============+============+============+============+
+ * <-header size-> <---header size continued ----------->
+ * <--- size ----------------------------------------------------------->
+ * <-key size->
+ * <-mod size->
+ * <-exp size->
+ *
+ * The firmware may or may not have modulus key and exponent data. The header,
+ * uCode and RSA signature are must-have components that will be used by driver.
+ * Length of each components, which is all in dwords, can be found in header.
+ * In the case that modulus and exponent are not present in fw, a.k.a truncated
+ * image, the length value still appears in header.
+ *
+ * Driver will do some basic fw size validation based on the following rules:
+ *
+ * 1. Header, uCode and RSA are must-have components.
+ * 2. All firmware components, if they present, are in the sequence illustrated
+ * in the layout table above.
+ * 3. Length info of each component can be found in header, in dwords.
+ * 4. Modulus and exponent key are not required by driver. They may not appear
+ * in fw. So driver will load a truncated firmware in this case.
+ */
+
+struct uc_css_header {
+ u32 module_type;
+ /*
+ * header_size includes all non-uCode bits, including css_header, rsa
+ * key, modulus key and exponent data.
+ */
+ u32 header_size_dw;
+ u32 header_version;
+ u32 module_id;
+ u32 module_vendor;
+ u32 date;
+#define CSS_DATE_DAY (0xFF << 0)
+#define CSS_DATE_MONTH (0xFF << 8)
+#define CSS_DATE_YEAR (0xFFFF << 16)
+ u32 size_dw; /* uCode plus header_size_dw */
+ u32 key_size_dw;
+ u32 modulus_size_dw;
+ u32 exponent_size_dw;
+ u32 time;
+#define CSS_TIME_HOUR (0xFF << 0)
+#define CSS_DATE_MIN (0xFF << 8)
+#define CSS_DATE_SEC (0xFFFF << 16)
+ char username[8];
+ char buildnumber[12];
+ u32 sw_version;
+#define CSS_SW_VERSION_UC_MAJOR (0xFF << 16)
+#define CSS_SW_VERSION_UC_MINOR (0xFF << 8)
+#define CSS_SW_VERSION_UC_PATCH (0xFF << 0)
+ u32 reserved[14];
+ u32 header_info;
+} __packed;
+static_assert(sizeof(struct uc_css_header) == 128);
+
+#endif /* _INTEL_UC_FW_ABI_H */
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index c3d19d88da40..771420453f82 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -61,14 +61,14 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
flags = PIN_MAPPABLE;
}
- mutex_lock(&dev_priv->drm.struct_mutex);
+ mutex_lock(&dev_priv->ggtt.vm.mutex);
mmio_hw_access_pre(dev_priv);
ret = i915_gem_gtt_insert(&dev_priv->ggtt.vm, node,
size, I915_GTT_PAGE_SIZE,
I915_COLOR_UNEVICTABLE,
start, end, flags);
mmio_hw_access_post(dev_priv);
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ mutex_unlock(&dev_priv->ggtt.vm.mutex);
if (ret)
gvt_err("fail to alloc %s gm space from host\n",
high_gm ? "high" : "low");
@@ -98,9 +98,9 @@ static int alloc_vgpu_gm(struct intel_vgpu *vgpu)
return 0;
out_free_aperture:
- mutex_lock(&dev_priv->drm.struct_mutex);
+ mutex_lock(&dev_priv->ggtt.vm.mutex);
drm_mm_remove_node(&vgpu->gm.low_gm_node);
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ mutex_unlock(&dev_priv->ggtt.vm.mutex);
return ret;
}
@@ -108,10 +108,10 @@ static void free_vgpu_gm(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- mutex_lock(&dev_priv->drm.struct_mutex);
+ mutex_lock(&dev_priv->ggtt.vm.mutex);
drm_mm_remove_node(&vgpu->gm.low_gm_node);
drm_mm_remove_node(&vgpu->gm.high_gm_node);
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ mutex_unlock(&dev_priv->ggtt.vm.mutex);
}
/**
@@ -172,14 +172,14 @@ static void free_vgpu_fence(struct intel_vgpu *vgpu)
intel_runtime_pm_get(&dev_priv->runtime_pm);
- mutex_lock(&dev_priv->drm.struct_mutex);
+ mutex_lock(&dev_priv->ggtt.vm.mutex);
_clear_vgpu_fence(vgpu);
for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
reg = vgpu->fence.regs[i];
i915_unreserve_fence(reg);
vgpu->fence.regs[i] = NULL;
}
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ mutex_unlock(&dev_priv->ggtt.vm.mutex);
intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
}
@@ -195,10 +195,10 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
intel_runtime_pm_get(rpm);
/* Request fences from host */
- mutex_lock(&dev_priv->drm.struct_mutex);
+ mutex_lock(&dev_priv->ggtt.vm.mutex);
for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
- reg = i915_reserve_fence(dev_priv);
+ reg = i915_reserve_fence(&dev_priv->ggtt);
if (IS_ERR(reg))
goto out_free_fence;
@@ -207,7 +207,7 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
_clear_vgpu_fence(vgpu);
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ mutex_unlock(&dev_priv->ggtt.vm.mutex);
intel_runtime_pm_put_unchecked(rpm);
return 0;
out_free_fence:
@@ -220,7 +220,7 @@ out_free_fence:
i915_unreserve_fence(reg);
vgpu->fence.regs[i] = NULL;
}
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ mutex_unlock(&dev_priv->ggtt.vm.mutex);
intel_runtime_pm_put_unchecked(rpm);
return -ENOSPC;
}
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index b09dc315e2da..21a176cd8acc 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -35,7 +35,9 @@
*/
#include <linux/slab.h>
+
#include "i915_drv.h"
+#include "gt/intel_ring.h"
#include "gvt.h"
#include "i915_pvinfo.h"
#include "trace.h"
@@ -374,21 +376,37 @@ typedef int (*parser_cmd_handler)(struct parser_exec_state *s);
#define ADDR_FIX_4(x1, x2, x3, x4) (ADDR_FIX_1(x1) | ADDR_FIX_3(x2, x3, x4))
#define ADDR_FIX_5(x1, x2, x3, x4, x5) (ADDR_FIX_1(x1) | ADDR_FIX_4(x2, x3, x4, x5))
+#define DWORD_FIELD(dword, end, start) \
+ FIELD_GET(GENMASK(end, start), cmd_val(s, dword))
+
+#define OP_LENGTH_BIAS 2
+#define CMD_LEN(value) (value + OP_LENGTH_BIAS)
+
+static int gvt_check_valid_cmd_length(int len, int valid_len)
+{
+ if (valid_len != len) {
+ gvt_err("len is not valid: len=%u valid_len=%u\n",
+ len, valid_len);
+ return -EFAULT;
+ }
+ return 0;
+}
+
struct cmd_info {
const char *name;
u32 opcode;
-#define F_LEN_MASK (1U<<0)
+#define F_LEN_MASK 3U
#define F_LEN_CONST 1U
#define F_LEN_VAR 0U
+/* value is const although LEN maybe variable */
+#define F_LEN_VAR_FIXED (1<<1)
/*
* command has its own ip advance logic
* e.g. MI_BATCH_START, MI_BATCH_END
*/
-#define F_IP_ADVANCE_CUSTOM (1<<1)
-
-#define F_POST_HANDLE (1<<2)
+#define F_IP_ADVANCE_CUSTOM (1<<2)
u32 flag;
#define R_RCS BIT(RCS0)
@@ -418,9 +436,12 @@ struct cmd_info {
* flag == F_LEN_VAR : length bias bits
* Note: length is in DWord
*/
- u8 len;
+ u32 len;
parser_cmd_handler handler;
+
+ /* valid length in DWord */
+ u32 valid_len;
};
struct cmd_entry {
@@ -944,6 +965,18 @@ static int cmd_handler_lri(struct parser_exec_state *s)
int i, ret = 0;
int cmd_len = cmd_length(s);
struct intel_gvt *gvt = s->vgpu->gvt;
+ u32 valid_len = CMD_LEN(1);
+
+ /*
+ * Official intel docs are somewhat sloppy , check the definition of
+ * MI_LOAD_REGISTER_IMM.
+ */
+ #define MAX_VALID_LEN 127
+ if ((cmd_len < valid_len) || (cmd_len > MAX_VALID_LEN)) {
+ gvt_err("len is not valid: len=%u valid_len=%u\n",
+ cmd_len, valid_len);
+ return -EFAULT;
+ }
for (i = 1; i < cmd_len; i += 2) {
if (IS_BROADWELL(gvt->dev_priv) && s->ring_id != RCS0) {
@@ -1375,6 +1408,15 @@ static int cmd_handler_mi_display_flip(struct parser_exec_state *s)
int ret;
int i;
int len = cmd_length(s);
+ u32 valid_len = CMD_LEN(1);
+
+ /* Flip Type == Stereo 3D Flip */
+ if (DWORD_FIELD(2, 1, 0) == 2)
+ valid_len++;
+ ret = gvt_check_valid_cmd_length(cmd_length(s),
+ valid_len);
+ if (ret)
+ return ret;
ret = decode_mi_display_flip(s, &info);
if (ret) {
@@ -1494,12 +1536,21 @@ static int cmd_handler_mi_store_data_imm(struct parser_exec_state *s)
int op_size = (cmd_length(s) - 3) * sizeof(u32);
int core_id = (cmd_val(s, 2) & (1 << 0)) ? 1 : 0;
unsigned long gma, gma_low, gma_high;
+ u32 valid_len = CMD_LEN(2);
int ret = 0;
/* check ppggt */
if (!(cmd_val(s, 0) & (1 << 22)))
return 0;
+ /* check if QWORD */
+ if (DWORD_FIELD(0, 21, 21))
+ valid_len++;
+ ret = gvt_check_valid_cmd_length(cmd_length(s),
+ valid_len);
+ if (ret)
+ return ret;
+
gma = cmd_val(s, 2) & GENMASK(31, 2);
if (gmadr_bytes == 8) {
@@ -1542,11 +1593,20 @@ static int cmd_handler_mi_op_2f(struct parser_exec_state *s)
int op_size = (1 << ((cmd_val(s, 0) & GENMASK(20, 19)) >> 19)) *
sizeof(u32);
unsigned long gma, gma_high;
+ u32 valid_len = CMD_LEN(1);
int ret = 0;
if (!(cmd_val(s, 0) & (1 << 22)))
return ret;
+ /* check inline data */
+ if (cmd_val(s, 0) & BIT(18))
+ valid_len = CMD_LEN(9);
+ ret = gvt_check_valid_cmd_length(cmd_length(s),
+ valid_len);
+ if (ret)
+ return ret;
+
gma = cmd_val(s, 1) & GENMASK(31, 2);
if (gmadr_bytes == 8) {
gma_high = cmd_val(s, 2) & GENMASK(15, 0);
@@ -1584,6 +1644,16 @@ static int cmd_handler_mi_flush_dw(struct parser_exec_state *s)
bool index_mode = false;
int ret = 0;
u32 hws_pga, val;
+ u32 valid_len = CMD_LEN(2);
+
+ ret = gvt_check_valid_cmd_length(cmd_length(s),
+ valid_len);
+ if (ret) {
+ /* Check again for Qword */
+ ret = gvt_check_valid_cmd_length(cmd_length(s),
+ ++valid_len);
+ return ret;
+ }
/* Check post-sync and ppgtt bit */
if (((cmd_val(s, 0) >> 14) & 0x3) && (cmd_val(s, 1) & (1 << 2))) {
@@ -1661,7 +1731,9 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s)
return 1;
}
-static int find_bb_size(struct parser_exec_state *s, unsigned long *bb_size)
+static int find_bb_size(struct parser_exec_state *s,
+ unsigned long *bb_size,
+ unsigned long *bb_end_cmd_offset)
{
unsigned long gma = 0;
const struct cmd_info *info;
@@ -1673,6 +1745,7 @@ static int find_bb_size(struct parser_exec_state *s, unsigned long *bb_size)
s->vgpu->gtt.ggtt_mm : s->workload->shadow_mm;
*bb_size = 0;
+ *bb_end_cmd_offset = 0;
/* get the start gm address of the batch buffer */
gma = get_gma_bb_from_cmd(s, 1);
@@ -1708,6 +1781,10 @@ static int find_bb_size(struct parser_exec_state *s, unsigned long *bb_size)
/* chained batch buffer */
bb_end = true;
}
+
+ if (bb_end)
+ *bb_end_cmd_offset = *bb_size;
+
cmd_len = get_cmd_length(info, cmd) << 2;
*bb_size += cmd_len;
gma += cmd_len;
@@ -1716,12 +1793,36 @@ static int find_bb_size(struct parser_exec_state *s, unsigned long *bb_size)
return 0;
}
+static int audit_bb_end(struct parser_exec_state *s, void *va)
+{
+ struct intel_vgpu *vgpu = s->vgpu;
+ u32 cmd = *(u32 *)va;
+ const struct cmd_info *info;
+
+ info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
+ if (info == NULL) {
+ gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n",
+ cmd, get_opcode(cmd, s->ring_id),
+ (s->buf_addr_type == PPGTT_BUFFER) ?
+ "ppgtt" : "ggtt", s->ring_id, s->workload);
+ return -EBADRQC;
+ }
+
+ if ((info->opcode == OP_MI_BATCH_BUFFER_END) ||
+ ((info->opcode == OP_MI_BATCH_BUFFER_START) &&
+ (BATCH_BUFFER_2ND_LEVEL_BIT(cmd) == 0)))
+ return 0;
+
+ return -EBADRQC;
+}
+
static int perform_bb_shadow(struct parser_exec_state *s)
{
struct intel_vgpu *vgpu = s->vgpu;
struct intel_vgpu_shadow_bb *bb;
unsigned long gma = 0;
unsigned long bb_size;
+ unsigned long bb_end_cmd_offset;
int ret = 0;
struct intel_vgpu_mm *mm = (s->buf_addr_type == GTT_BUFFER) ?
s->vgpu->gtt.ggtt_mm : s->workload->shadow_mm;
@@ -1732,7 +1833,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
if (gma == INTEL_GVT_INVALID_ADDR)
return -EFAULT;
- ret = find_bb_size(s, &bb_size);
+ ret = find_bb_size(s, &bb_size, &bb_end_cmd_offset);
if (ret)
return ret;
@@ -1788,6 +1889,10 @@ static int perform_bb_shadow(struct parser_exec_state *s)
goto err_unmap;
}
+ ret = audit_bb_end(s, bb->va + start_offset + bb_end_cmd_offset);
+ if (ret)
+ goto err_unmap;
+
INIT_LIST_HEAD(&bb->list);
list_add(&bb->list, &s->workload->shadow_bb);
@@ -1912,21 +2017,24 @@ static const struct cmd_info cmd_info[] = {
{"MI_RS_CONTEXT", OP_MI_RS_CONTEXT, F_LEN_CONST, R_RCS, D_ALL, 0, 1,
NULL},
- {"MI_DISPLAY_FLIP", OP_MI_DISPLAY_FLIP, F_LEN_VAR | F_POST_HANDLE,
+ {"MI_DISPLAY_FLIP", OP_MI_DISPLAY_FLIP, F_LEN_VAR,
R_RCS | R_BCS, D_ALL, 0, 8, cmd_handler_mi_display_flip},
- {"MI_SEMAPHORE_MBOX", OP_MI_SEMAPHORE_MBOX, F_LEN_VAR, R_ALL, D_ALL,
- 0, 8, NULL},
+ {"MI_SEMAPHORE_MBOX", OP_MI_SEMAPHORE_MBOX, F_LEN_VAR | F_LEN_VAR_FIXED,
+ R_ALL, D_ALL, 0, 8, NULL, CMD_LEN(1)},
{"MI_MATH", OP_MI_MATH, F_LEN_VAR, R_ALL, D_ALL, 0, 8, NULL},
- {"MI_URB_CLEAR", OP_MI_URB_CLEAR, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+ {"MI_URB_CLEAR", OP_MI_URB_CLEAR, F_LEN_VAR | F_LEN_VAR_FIXED, R_RCS,
+ D_ALL, 0, 8, NULL, CMD_LEN(0)},
- {"MI_SEMAPHORE_SIGNAL", OP_MI_SEMAPHORE_SIGNAL, F_LEN_VAR, R_ALL,
- D_BDW_PLUS, 0, 8, NULL},
+ {"MI_SEMAPHORE_SIGNAL", OP_MI_SEMAPHORE_SIGNAL,
+ F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_BDW_PLUS, 0, 8,
+ NULL, CMD_LEN(0)},
- {"MI_SEMAPHORE_WAIT", OP_MI_SEMAPHORE_WAIT, F_LEN_VAR, R_ALL,
- D_BDW_PLUS, ADDR_FIX_1(2), 8, cmd_handler_mi_semaphore_wait},
+ {"MI_SEMAPHORE_WAIT", OP_MI_SEMAPHORE_WAIT,
+ F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_BDW_PLUS, ADDR_FIX_1(2),
+ 8, cmd_handler_mi_semaphore_wait, CMD_LEN(2)},
{"MI_STORE_DATA_IMM", OP_MI_STORE_DATA_IMM, F_LEN_VAR, R_ALL, D_BDW_PLUS,
ADDR_FIX_1(1), 10, cmd_handler_mi_store_data_imm},
@@ -1940,8 +2048,9 @@ static const struct cmd_info cmd_info[] = {
{"MI_UPDATE_GTT", OP_MI_UPDATE_GTT, F_LEN_VAR, R_ALL, D_BDW_PLUS, 0, 10,
cmd_handler_mi_update_gtt},
- {"MI_STORE_REGISTER_MEM", OP_MI_STORE_REGISTER_MEM, F_LEN_VAR, R_ALL,
- D_ALL, ADDR_FIX_1(2), 8, cmd_handler_srm},
+ {"MI_STORE_REGISTER_MEM", OP_MI_STORE_REGISTER_MEM,
+ F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, ADDR_FIX_1(2), 8,
+ cmd_handler_srm, CMD_LEN(2)},
{"MI_FLUSH_DW", OP_MI_FLUSH_DW, F_LEN_VAR, R_ALL, D_ALL, 0, 6,
cmd_handler_mi_flush_dw},
@@ -1949,26 +2058,30 @@ static const struct cmd_info cmd_info[] = {
{"MI_CLFLUSH", OP_MI_CLFLUSH, F_LEN_VAR, R_ALL, D_ALL, ADDR_FIX_1(1),
10, cmd_handler_mi_clflush},
- {"MI_REPORT_PERF_COUNT", OP_MI_REPORT_PERF_COUNT, F_LEN_VAR, R_ALL,
- D_ALL, ADDR_FIX_1(1), 6, cmd_handler_mi_report_perf_count},
+ {"MI_REPORT_PERF_COUNT", OP_MI_REPORT_PERF_COUNT,
+ F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, ADDR_FIX_1(1), 6,
+ cmd_handler_mi_report_perf_count, CMD_LEN(2)},
- {"MI_LOAD_REGISTER_MEM", OP_MI_LOAD_REGISTER_MEM, F_LEN_VAR, R_ALL,
- D_ALL, ADDR_FIX_1(2), 8, cmd_handler_lrm},
+ {"MI_LOAD_REGISTER_MEM", OP_MI_LOAD_REGISTER_MEM,
+ F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, ADDR_FIX_1(2), 8,
+ cmd_handler_lrm, CMD_LEN(2)},
- {"MI_LOAD_REGISTER_REG", OP_MI_LOAD_REGISTER_REG, F_LEN_VAR, R_ALL,
- D_ALL, 0, 8, cmd_handler_lrr},
+ {"MI_LOAD_REGISTER_REG", OP_MI_LOAD_REGISTER_REG,
+ F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, 0, 8,
+ cmd_handler_lrr, CMD_LEN(1)},
- {"MI_RS_STORE_DATA_IMM", OP_MI_RS_STORE_DATA_IMM, F_LEN_VAR, R_RCS,
- D_ALL, 0, 8, NULL},
+ {"MI_RS_STORE_DATA_IMM", OP_MI_RS_STORE_DATA_IMM,
+ F_LEN_VAR | F_LEN_VAR_FIXED, R_RCS, D_ALL, 0,
+ 8, NULL, CMD_LEN(2)},
- {"MI_LOAD_URB_MEM", OP_MI_LOAD_URB_MEM, F_LEN_VAR, R_RCS, D_ALL,
- ADDR_FIX_1(2), 8, NULL},
+ {"MI_LOAD_URB_MEM", OP_MI_LOAD_URB_MEM, F_LEN_VAR | F_LEN_VAR_FIXED,
+ R_RCS, D_ALL, ADDR_FIX_1(2), 8, NULL, CMD_LEN(2)},
{"MI_STORE_URM_MEM", OP_MI_STORE_URM_MEM, F_LEN_VAR, R_RCS, D_ALL,
ADDR_FIX_1(2), 8, NULL},
- {"MI_OP_2E", OP_MI_2E, F_LEN_VAR, R_ALL, D_BDW_PLUS, ADDR_FIX_2(1, 2),
- 8, cmd_handler_mi_op_2e},
+ {"MI_OP_2E", OP_MI_2E, F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_BDW_PLUS,
+ ADDR_FIX_2(1, 2), 8, cmd_handler_mi_op_2e, CMD_LEN(3)},
{"MI_OP_2F", OP_MI_2F, F_LEN_VAR, R_ALL, D_BDW_PLUS, ADDR_FIX_1(1),
8, cmd_handler_mi_op_2f},
@@ -1978,8 +2091,8 @@ static const struct cmd_info cmd_info[] = {
cmd_handler_mi_batch_buffer_start},
{"MI_CONDITIONAL_BATCH_BUFFER_END", OP_MI_CONDITIONAL_BATCH_BUFFER_END,
- F_LEN_VAR, R_ALL, D_ALL, ADDR_FIX_1(2), 8,
- cmd_handler_mi_conditional_batch_buffer_end},
+ F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, ADDR_FIX_1(2), 8,
+ cmd_handler_mi_conditional_batch_buffer_end, CMD_LEN(2)},
{"MI_LOAD_SCAN_LINES_INCL", OP_MI_LOAD_SCAN_LINES_INCL, F_LEN_CONST,
R_RCS | R_BCS, D_ALL, 0, 2, NULL},
@@ -2569,6 +2682,13 @@ static int cmd_parser_exec(struct parser_exec_state *s)
cmd_length(s), s->buf_type, s->buf_addr_type,
s->workload, info->name);
+ if ((info->flag & F_LEN_MASK) == F_LEN_VAR_FIXED) {
+ ret = gvt_check_valid_cmd_length(cmd_length(s),
+ info->valid_len);
+ if (ret)
+ return ret;
+ }
+
if (info->handler) {
ret = info->handler(s);
if (ret < 0) {
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.h b/drivers/gpu/drm/i915/gvt/cmd_parser.h
index 286703643002..ab25d151932a 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.h
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.h
@@ -38,6 +38,10 @@
#define GVT_CMD_HASH_BITS 7
+struct intel_gvt;
+struct intel_shadow_wa_ctx;
+struct intel_vgpu_workload;
+
void intel_gvt_clean_cmd_parser(struct intel_gvt *gvt);
int intel_gvt_init_cmd_parser(struct intel_gvt *gvt);
diff --git a/drivers/gpu/drm/i915/gvt/debugfs.c b/drivers/gpu/drm/i915/gvt/debugfs.c
index 2fb7b73b260d..285f6011a537 100644
--- a/drivers/gpu/drm/i915/gvt/debugfs.c
+++ b/drivers/gpu/drm/i915/gvt/debugfs.c
@@ -189,36 +189,19 @@ DEFINE_SIMPLE_ATTRIBUTE(vgpu_scan_nonprivbb_fops,
/**
* intel_gvt_debugfs_add_vgpu - register debugfs entries for a vGPU
* @vgpu: a vGPU
- *
- * Returns:
- * Zero on success, negative error code if failed.
*/
-int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu)
+void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu)
{
- struct dentry *ent;
char name[16] = "";
snprintf(name, 16, "vgpu%d", vgpu->id);
vgpu->debugfs = debugfs_create_dir(name, vgpu->gvt->debugfs_root);
- if (!vgpu->debugfs)
- return -ENOMEM;
-
- ent = debugfs_create_bool("active", 0444, vgpu->debugfs,
- &vgpu->active);
- if (!ent)
- return -ENOMEM;
-
- ent = debugfs_create_file("mmio_diff", 0444, vgpu->debugfs,
- vgpu, &vgpu_mmio_diff_fops);
- if (!ent)
- return -ENOMEM;
- ent = debugfs_create_file("scan_nonprivbb", 0644, vgpu->debugfs,
- vgpu, &vgpu_scan_nonprivbb_fops);
- if (!ent)
- return -ENOMEM;
-
- return 0;
+ debugfs_create_bool("active", 0444, vgpu->debugfs, &vgpu->active);
+ debugfs_create_file("mmio_diff", 0444, vgpu->debugfs, vgpu,
+ &vgpu_mmio_diff_fops);
+ debugfs_create_file("scan_nonprivbb", 0644, vgpu->debugfs, vgpu,
+ &vgpu_scan_nonprivbb_fops);
}
/**
@@ -234,27 +217,15 @@ void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu)
/**
* intel_gvt_debugfs_init - register gvt debugfs root entry
* @gvt: GVT device
- *
- * Returns:
- * zero on success, negative if failed.
*/
-int intel_gvt_debugfs_init(struct intel_gvt *gvt)
+void intel_gvt_debugfs_init(struct intel_gvt *gvt)
{
struct drm_minor *minor = gvt->dev_priv->drm.primary;
- struct dentry *ent;
gvt->debugfs_root = debugfs_create_dir("gvt", minor->debugfs_root);
- if (!gvt->debugfs_root) {
- gvt_err("Cannot create debugfs dir\n");
- return -ENOMEM;
- }
- ent = debugfs_create_ulong("num_tracked_mmio", 0444, gvt->debugfs_root,
- &gvt->mmio.num_tracked_mmio);
- if (!ent)
- return -ENOMEM;
-
- return 0;
+ debugfs_create_ulong("num_tracked_mmio", 0444, gvt->debugfs_root,
+ &gvt->mmio.num_tracked_mmio);
}
/**
diff --git a/drivers/gpu/drm/i915/gvt/display.h b/drivers/gpu/drm/i915/gvt/display.h
index a87f33e6a23c..b59b34046e1e 100644
--- a/drivers/gpu/drm/i915/gvt/display.h
+++ b/drivers/gpu/drm/i915/gvt/display.h
@@ -35,6 +35,11 @@
#ifndef _GVT_DISPLAY_H_
#define _GVT_DISPLAY_H_
+#include <linux/types.h>
+
+struct intel_gvt;
+struct intel_vgpu;
+
#define SBI_REG_MAX 20
#define DPCD_SIZE 0x700
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index 41c8ebc60c63..2477a1e5a166 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -36,13 +36,32 @@
#define GEN8_DECODE_PTE(pte) (pte & GENMASK_ULL(63, 12))
+static int vgpu_pin_dma_address(struct intel_vgpu *vgpu,
+ unsigned long size,
+ dma_addr_t dma_addr)
+{
+ int ret = 0;
+
+ if (intel_gvt_hypervisor_dma_pin_guest_page(vgpu, dma_addr))
+ ret = -EINVAL;
+
+ return ret;
+}
+
+static void vgpu_unpin_dma_address(struct intel_vgpu *vgpu,
+ dma_addr_t dma_addr)
+{
+ intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, dma_addr);
+}
+
static int vgpu_gem_get_pages(
struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+ struct intel_vgpu *vgpu;
struct sg_table *st;
struct scatterlist *sg;
- int i, ret;
+ int i, j, ret;
gen8_pte_t __iomem *gtt_entries;
struct intel_vgpu_fb_info *fb_info;
u32 page_num;
@@ -51,6 +70,10 @@ static int vgpu_gem_get_pages(
if (WARN_ON(!fb_info))
return -ENODEV;
+ vgpu = fb_info->obj->vgpu;
+ if (WARN_ON(!vgpu))
+ return -ENODEV;
+
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (unlikely(!st))
return -ENOMEM;
@@ -64,21 +87,53 @@ static int vgpu_gem_get_pages(
gtt_entries = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm +
(fb_info->start >> PAGE_SHIFT);
for_each_sg(st->sgl, sg, page_num, i) {
+ dma_addr_t dma_addr =
+ GEN8_DECODE_PTE(readq(&gtt_entries[i]));
+ if (vgpu_pin_dma_address(vgpu, PAGE_SIZE, dma_addr)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
sg->offset = 0;
sg->length = PAGE_SIZE;
- sg_dma_address(sg) =
- GEN8_DECODE_PTE(readq(&gtt_entries[i]));
sg_dma_len(sg) = PAGE_SIZE;
+ sg_dma_address(sg) = dma_addr;
}
__i915_gem_object_set_pages(obj, st, PAGE_SIZE);
+out:
+ if (ret) {
+ dma_addr_t dma_addr;
+
+ for_each_sg(st->sgl, sg, i, j) {
+ dma_addr = sg_dma_address(sg);
+ if (dma_addr)
+ vgpu_unpin_dma_address(vgpu, dma_addr);
+ }
+ sg_free_table(st);
+ kfree(st);
+ }
+
+ return ret;
- return 0;
}
static void vgpu_gem_put_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
+ struct scatterlist *sg;
+
+ if (obj->base.dma_buf) {
+ struct intel_vgpu_fb_info *fb_info = obj->gvt_info;
+ struct intel_vgpu_dmabuf_obj *obj = fb_info->obj;
+ struct intel_vgpu *vgpu = obj->vgpu;
+ int i;
+
+ for_each_sg(pages->sgl, sg, fb_info->size, i)
+ vgpu_unpin_dma_address(vgpu,
+ sg_dma_address(sg));
+ }
+
sg_free_table(pages);
kfree(pages);
}
@@ -152,6 +207,7 @@ static const struct drm_i915_gem_object_ops intel_vgpu_gem_ops = {
static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
struct intel_vgpu_fb_info *info)
{
+ static struct lock_class_key lock_class;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj;
@@ -161,7 +217,8 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
drm_gem_private_object_init(dev, &obj->base,
roundup(info->size, PAGE_SIZE));
- i915_gem_object_init(obj, &intel_vgpu_gem_ops);
+ i915_gem_object_init(obj, &intel_vgpu_gem_ops, &lock_class);
+ i915_gem_object_set_readonly(obj);
obj->read_domains = I915_GEM_DOMAIN_GTT;
obj->write_domain = 0;
@@ -491,15 +548,13 @@ int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id)
obj->gvt_info = dmabuf_obj->info;
- dmabuf = i915_gem_prime_export(dev, &obj->base, DRM_CLOEXEC | DRM_RDWR);
+ dmabuf = i915_gem_prime_export(&obj->base, DRM_CLOEXEC | DRM_RDWR);
if (IS_ERR(dmabuf)) {
gvt_vgpu_err("export dma-buf failed\n");
ret = PTR_ERR(dmabuf);
goto out_free_gem;
}
- i915_gem_object_put(obj);
-
ret = dma_buf_fd(dmabuf, DRM_CLOEXEC | DRM_RDWR);
if (ret < 0) {
gvt_vgpu_err("create dma-buf fd failed ret:%d\n", ret);
@@ -524,6 +579,8 @@ int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id)
file_count(dmabuf->file),
kref_read(&obj->base.refcount));
+ i915_gem_object_put(obj);
+
return dmabuf_fd;
out_free_dmabuf:
diff --git a/drivers/gpu/drm/i915/gvt/edid.h b/drivers/gpu/drm/i915/gvt/edid.h
index f6dfc8b795ec..dfe0cbc6aad8 100644
--- a/drivers/gpu/drm/i915/gvt/edid.h
+++ b/drivers/gpu/drm/i915/gvt/edid.h
@@ -35,6 +35,10 @@
#ifndef _GVT_EDID_H_
#define _GVT_EDID_H_
+#include <linux/types.h>
+
+struct intel_vgpu;
+
#define EDID_SIZE 128
#define EDID_ADDR 0x50 /* Linux hvm EDID addr */
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index f21b8fb5b37e..d6e7a1189bad 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -534,7 +534,7 @@ static void clean_execlist(struct intel_vgpu *vgpu,
struct intel_vgpu_submission *s = &vgpu->submission;
intel_engine_mask_t tmp;
- for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
+ for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp) {
kfree(s->ring_scan_buffer[engine->id]);
s->ring_scan_buffer[engine->id] = NULL;
s->ring_scan_buffer_size[engine->id] = 0;
@@ -548,7 +548,7 @@ static void reset_execlist(struct intel_vgpu *vgpu,
struct intel_engine_cs *engine;
intel_engine_mask_t tmp;
- for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
+ for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp)
init_vgpu_execlist(vgpu, engine->id);
}
diff --git a/drivers/gpu/drm/i915/gvt/execlist.h b/drivers/gpu/drm/i915/gvt/execlist.h
index 5ccc2c695848..5c0c1fd30c83 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.h
+++ b/drivers/gpu/drm/i915/gvt/execlist.h
@@ -35,6 +35,8 @@
#ifndef _GVT_EXECLIST_H_
#define _GVT_EXECLIST_H_
+#include <linux/types.h>
+
struct execlist_ctx_descriptor_format {
union {
u32 ldw;
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.h b/drivers/gpu/drm/i915/gvt/fb_decoder.h
index 60c155085029..67b6ede9e707 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.h
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.h
@@ -36,6 +36,8 @@
#ifndef _GVT_FB_DECODER_H_
#define _GVT_FB_DECODER_H_
+#include <linux/types.h>
+
#define _PLANE_CTL_FORMAT_SHIFT 24
#define _PLANE_CTL_TILED_SHIFT 10
#define _PIPE_V_SRCSZ_SHIFT 0
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c
index 049775e8e350..b0c1fda32977 100644
--- a/drivers/gpu/drm/i915/gvt/firmware.c
+++ b/drivers/gpu/drm/i915/gvt/firmware.c
@@ -146,7 +146,7 @@ void intel_gvt_free_firmware(struct intel_gvt *gvt)
clean_firmware_sysfs(gvt);
kfree(gvt->firmware.cfg_space);
- kfree(gvt->firmware.mmio);
+ vfree(gvt->firmware.mmio);
}
static int verify_firmware(struct intel_gvt *gvt,
@@ -229,7 +229,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt)
firmware->cfg_space = mem;
- mem = kmalloc(info->mmio_size, GFP_KERNEL);
+ mem = vmalloc(info->mmio_size);
if (!mem) {
kfree(path);
kfree(firmware->cfg_space);
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 4b04af569c05..4a4828074cb7 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -1282,7 +1282,7 @@ static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu,
return -EINVAL;
default:
GEM_BUG_ON(1);
- };
+ }
/* direct shadow */
ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, page_size,
@@ -1956,7 +1956,11 @@ void _intel_vgpu_mm_release(struct kref *mm_ref)
if (mm->type == INTEL_GVT_MM_PPGTT) {
list_del(&mm->ppgtt_mm.list);
+
+ mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
list_del(&mm->ppgtt_mm.lru_list);
+ mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
+
invalidate_ppgtt_mm(mm);
} else {
vfree(mm->ggtt_mm.virtual_ggtt);
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index 42d0394f0de2..88789316807d 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -205,17 +205,18 @@ struct intel_vgpu_gtt {
struct intel_vgpu_scratch_pt scratch_pt[GTT_TYPE_MAX];
};
-extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu);
-extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu);
+int intel_vgpu_init_gtt(struct intel_vgpu *vgpu);
+void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu);
void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old);
void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu);
-extern int intel_gvt_init_gtt(struct intel_gvt *gvt);
+int intel_gvt_init_gtt(struct intel_gvt *gvt);
void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu);
-extern void intel_gvt_clean_gtt(struct intel_gvt *gvt);
+void intel_gvt_clean_gtt(struct intel_gvt *gvt);
-extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu,
- int page_table_level, void *root_entry);
+struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu,
+ int page_table_level,
+ void *root_entry);
struct intel_vgpu_oos_page {
struct intel_vgpu_ppgtt_spt *spt;
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index 43f4242062dd..8f37eefa0a02 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -375,9 +375,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
}
gvt->idle_vgpu = vgpu;
- ret = intel_gvt_debugfs_init(gvt);
- if (ret)
- gvt_err("debugfs registration failed, go on.\n");
+ intel_gvt_debugfs_init(gvt);
gvt_dbg_core("gvt device initialization is done\n");
dev_priv->gvt = gvt;
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 7a1fe44d45af..b47c6acaf9c0 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -334,6 +334,10 @@ struct intel_gvt {
struct {
struct engine_mmio *mmio;
int ctx_mmio_count[I915_NUM_ENGINES];
+ u32 *tlb_mmio_offset_list;
+ u32 tlb_mmio_offset_list_cnt;
+ u32 *mocs_mmio_offset_list;
+ u32 mocs_mmio_offset_list_cnt;
} engine_mmio_list;
struct dentry *debugfs_root;
@@ -682,9 +686,9 @@ static inline void intel_gvt_mmio_set_in_ctx(
gvt->mmio.mmio_attribute[offset >> 2] |= F_IN_CTX;
}
-int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu);
+void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu);
-int intel_gvt_debugfs_init(struct intel_gvt *gvt);
+void intel_gvt_debugfs_init(struct intel_gvt *gvt);
void intel_gvt_debugfs_clean(struct intel_gvt *gvt);
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 25f78196b964..6d28d72e6c7e 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -341,6 +341,10 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
engine_mask |= BIT(VCS1);
}
+ if (data & GEN9_GRDOM_GUC) {
+ gvt_dbg_mmio("vgpu%d: request GUC Reset\n", vgpu->id);
+ vgpu_vreg_t(vgpu, GUC_STATUS) |= GS_MIA_IN_RESET;
+ }
engine_mask &= INTEL_INFO(vgpu->gvt->dev_priv)->engine_mask;
}
@@ -460,6 +464,7 @@ static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
static i915_reg_t force_nonpriv_white_list[] = {
GEN9_CS_DEBUG_MODE1, //_MMIO(0x20ec)
GEN9_CTX_PREEMPT_REG,//_MMIO(0x2248)
+ PS_INVOCATION_COUNT,//_MMIO(0x2348)
GEN8_CS_CHICKEN1,//_MMIO(0x2580)
_MMIO(0x2690),
_MMIO(0x2694),
@@ -508,7 +513,7 @@ static inline bool in_whitelist(unsigned int reg)
static int force_nonpriv_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
- u32 reg_nonpriv = *(u32 *)p_data;
+ u32 reg_nonpriv = (*(u32 *)p_data) & REG_GENMASK(25, 2);
int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
u32 ring_base;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
@@ -528,7 +533,7 @@ static int force_nonpriv_write(struct intel_vgpu *vgpu,
bytes);
} else
gvt_err("vgpu(%d) Invalid FORCE_NONPRIV write %x at offset %x\n",
- vgpu->id, reg_nonpriv, offset);
+ vgpu->id, *(u32 *)p_data, offset);
return 0;
}
@@ -819,13 +824,16 @@ static int trigger_aux_channel_interrupt(struct intel_vgpu *vgpu,
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
enum intel_gvt_event_type event;
- if (reg == _DPA_AUX_CH_CTL)
+ if (reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_A)))
event = AUX_CHANNEL_A;
- else if (reg == _PCH_DPB_AUX_CH_CTL || reg == _DPB_AUX_CH_CTL)
+ else if (reg == _PCH_DPB_AUX_CH_CTL ||
+ reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_B)))
event = AUX_CHANNEL_B;
- else if (reg == _PCH_DPC_AUX_CH_CTL || reg == _DPC_AUX_CH_CTL)
+ else if (reg == _PCH_DPC_AUX_CH_CTL ||
+ reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_C)))
event = AUX_CHANNEL_C;
- else if (reg == _PCH_DPD_AUX_CH_CTL || reg == _DPD_AUX_CH_CTL)
+ else if (reg == _PCH_DPD_AUX_CH_CTL ||
+ reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_D)))
event = AUX_CHANNEL_D;
else {
WARN_ON(true);
@@ -1632,6 +1640,16 @@ static int edp_psr_imr_iir_write(struct intel_vgpu *vgpu,
return 0;
}
+static int guc_status_read(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data,
+ unsigned int bytes)
+{
+ /* keep MIA_IN_RESET before clearing */
+ read_vreg(vgpu, offset, p_data, bytes);
+ vgpu_vreg(vgpu, offset) &= ~GS_MIA_IN_RESET;
+ return 0;
+}
+
static int mmio_read_from_hw(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
@@ -2668,10 +2686,12 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DH(EDP_PSR_IMR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write);
MMIO_DH(EDP_PSR_IIR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write);
+ MMIO_DH(GUC_STATUS, D_ALL, guc_status_read, NULL);
+
return 0;
}
-static int init_broadwell_mmio_info(struct intel_gvt *gvt)
+static int init_bdw_mmio_info(struct intel_gvt *gvt)
{
struct drm_i915_private *dev_priv = gvt->dev_priv;
int ret;
@@ -2796,7 +2816,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_D(CHICKEN_PIPESL_1(PIPE_C), D_BDW_PLUS);
MMIO_D(WM_MISC, D_BDW);
- MMIO_D(_MMIO(BDW_EDP_PSR_BASE), D_BDW);
+ MMIO_D(_MMIO(_SRD_CTL_EDP), D_BDW);
MMIO_D(_MMIO(0x6671c), D_BDW_PLUS);
MMIO_D(_MMIO(0x66c00), D_BDW_PLUS);
@@ -2872,11 +2892,11 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_DH(FORCEWAKE_MEDIA_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
MMIO_DH(FORCEWAKE_ACK_MEDIA_GEN9, D_SKL_PLUS, NULL, NULL);
- MMIO_F(_MMIO(_DPB_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
+ MMIO_F(DP_AUX_CH_CTL(AUX_CH_B), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
dp_aux_ch_ctl_mmio_write);
- MMIO_F(_MMIO(_DPC_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
+ MMIO_F(DP_AUX_CH_CTL(AUX_CH_C), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
dp_aux_ch_ctl_mmio_write);
- MMIO_F(_MMIO(_DPD_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
+ MMIO_F(DP_AUX_CH_CTL(AUX_CH_D), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
dp_aux_ch_ctl_mmio_write);
MMIO_D(HSW_PWR_WELL_CTL1, D_SKL_PLUS);
@@ -3360,20 +3380,20 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
goto err;
if (IS_BROADWELL(dev_priv)) {
- ret = init_broadwell_mmio_info(gvt);
+ ret = init_bdw_mmio_info(gvt);
if (ret)
goto err;
} else if (IS_SKYLAKE(dev_priv)
|| IS_KABYLAKE(dev_priv)
|| IS_COFFEELAKE(dev_priv)) {
- ret = init_broadwell_mmio_info(gvt);
+ ret = init_bdw_mmio_info(gvt);
if (ret)
goto err;
ret = init_skl_mmio_info(gvt);
if (ret)
goto err;
} else if (IS_BROXTON(dev_priv)) {
- ret = init_broadwell_mmio_info(gvt);
+ ret = init_bdw_mmio_info(gvt);
if (ret)
goto err;
ret = init_skl_mmio_info(gvt);
@@ -3417,6 +3437,10 @@ int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt,
}
for (i = 0; i < gvt->mmio.num_mmio_block; i++, block++) {
+ /* pvinfo data doesn't come from hw mmio */
+ if (i915_mmio_reg_offset(block->offset) == VGT_PVINFO_PAGE)
+ continue;
+
for (j = 0; j < block->size; j += 4) {
ret = handler(gvt,
i915_mmio_reg_offset(block->offset) + j,
diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h
index 4862fb12778e..b17c4a1599cd 100644
--- a/drivers/gpu/drm/i915/gvt/hypercall.h
+++ b/drivers/gpu/drm/i915/gvt/hypercall.h
@@ -33,6 +33,10 @@
#ifndef _GVT_HYPERCALL_H_
#define _GVT_HYPERCALL_H_
+#include <linux/types.h>
+
+struct device;
+
enum hypervisor_type {
INTEL_GVT_HYPERVISOR_XEN = 0,
INTEL_GVT_HYPERVISOR_KVM,
@@ -62,6 +66,8 @@ struct intel_gvt_mpt {
unsigned long size, dma_addr_t *dma_addr);
void (*dma_unmap_guest_page)(unsigned long handle, dma_addr_t dma_addr);
+ int (*dma_pin_guest_page)(unsigned long handle, dma_addr_t dma_addr);
+
int (*map_gfn_to_mfn)(unsigned long handle, unsigned long gfn,
unsigned long mfn, unsigned int nr, bool map);
int (*set_trap_area)(unsigned long handle, u64 start, u64 end,
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c
index 951681813230..11accd3e1023 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.c
+++ b/drivers/gpu/drm/i915/gvt/interrupt.c
@@ -672,7 +672,7 @@ void intel_gvt_clean_irq(struct intel_gvt *gvt)
hrtimer_cancel(&irq->vblank_timer.timer);
}
-#define VBLNAK_TIMER_PERIOD 16000000
+#define VBLANK_TIMER_PERIOD 16000000
/**
* intel_gvt_init_irq - initialize GVT-g IRQ emulation subsystem
@@ -704,7 +704,7 @@ int intel_gvt_init_irq(struct intel_gvt *gvt)
hrtimer_init(&vblank_timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
vblank_timer->timer.function = vblank_timer_fn;
- vblank_timer->period = VBLNAK_TIMER_PERIOD;
+ vblank_timer->period = VBLANK_TIMER_PERIOD;
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.h b/drivers/gpu/drm/i915/gvt/interrupt.h
index 5313fb1b33e1..fcd663811d37 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.h
+++ b/drivers/gpu/drm/i915/gvt/interrupt.h
@@ -32,6 +32,8 @@
#ifndef _GVT_INTERRUPT_H_
#define _GVT_INTERRUPT_H_
+#include <linux/types.h>
+
enum intel_gvt_event_type {
RCS_MI_USER_INTERRUPT = 0,
RCS_DEBUG,
@@ -135,6 +137,7 @@ enum intel_gvt_event_type {
struct intel_gvt_irq;
struct intel_gvt;
+struct intel_vgpu;
typedef void (*gvt_event_virt_handler_t)(struct intel_gvt_irq *irq,
enum intel_gvt_event_type event, struct intel_vgpu *vgpu);
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 23aa3e50cbf8..3259a1fa69e1 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -1306,7 +1306,6 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
unsigned int i;
int ret;
struct vfio_region_info_cap_sparse_mmap *sparse = NULL;
- size_t size;
int nr_areas = 1;
int cap_type_id;
@@ -1349,9 +1348,8 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
VFIO_REGION_INFO_FLAG_WRITE;
info.size = gvt_aperture_sz(vgpu->gvt);
- size = sizeof(*sparse) +
- (nr_areas * sizeof(*sparse->areas));
- sparse = kzalloc(size, GFP_KERNEL);
+ sparse = kzalloc(struct_size(sparse, areas, nr_areas),
+ GFP_KERNEL);
if (!sparse)
return -ENOMEM;
@@ -1416,9 +1414,9 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
switch (cap_type_id) {
case VFIO_REGION_INFO_CAP_SPARSE_MMAP:
ret = vfio_info_add_capability(&caps,
- &sparse->header, sizeof(*sparse) +
- (sparse->nr_areas *
- sizeof(*sparse->areas)));
+ &sparse->header,
+ struct_size(sparse, areas,
+ sparse->nr_areas));
if (ret) {
kfree(sparse);
return ret;
@@ -1566,27 +1564,10 @@ vgpu_id_show(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "\n");
}
-static ssize_t
-hw_id_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct mdev_device *mdev = mdev_from_dev(dev);
-
- if (mdev) {
- struct intel_vgpu *vgpu = (struct intel_vgpu *)
- mdev_get_drvdata(mdev);
- return sprintf(buf, "%u\n",
- vgpu->submission.shadow[0]->gem_context->hw_id);
- }
- return sprintf(buf, "\n");
-}
-
static DEVICE_ATTR_RO(vgpu_id);
-static DEVICE_ATTR_RO(hw_id);
static struct attribute *intel_vgpu_attrs[] = {
&dev_attr_vgpu_id.attr,
- &dev_attr_hw_id.attr,
NULL
};
@@ -1798,9 +1779,6 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
"kvmgt_nr_cache_entries",
0444, vgpu->debugfs,
&vgpu->vdev.nr_cache_entries);
- if (!info->debugfs_cache_entries)
- gvt_vgpu_err("Cannot create kvmgt debugfs entry\n");
-
return 0;
}
@@ -1938,6 +1916,28 @@ err_unlock:
return ret;
}
+static int kvmgt_dma_pin_guest_page(unsigned long handle, dma_addr_t dma_addr)
+{
+ struct kvmgt_guest_info *info;
+ struct gvt_dma *entry;
+ int ret = 0;
+
+ if (!handle_valid(handle))
+ return -ENODEV;
+
+ info = (struct kvmgt_guest_info *)handle;
+
+ mutex_lock(&info->vgpu->vdev.cache_lock);
+ entry = __gvt_cache_find_dma_addr(info->vgpu, dma_addr);
+ if (entry)
+ kref_get(&entry->ref);
+ else
+ ret = -ENOMEM;
+ mutex_unlock(&info->vgpu->vdev.cache_lock);
+
+ return ret;
+}
+
static void __gvt_dma_release(struct kref *ref)
{
struct gvt_dma *entry = container_of(ref, typeof(*entry), ref);
@@ -2049,6 +2049,7 @@ static struct intel_gvt_mpt kvmgt_mpt = {
.gfn_to_mfn = kvmgt_gfn_to_pfn,
.dma_map_guest_page = kvmgt_dma_map_guest_page,
.dma_unmap_guest_page = kvmgt_dma_unmap_guest_page,
+ .dma_pin_guest_page = kvmgt_dma_pin_guest_page,
.set_opregion = kvmgt_set_opregion,
.set_edid = kvmgt_set_edid,
.get_vfio_device = kvmgt_get_vfio_device,
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
index 5874f1cb4306..2e68f4b02c94 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.h
+++ b/drivers/gpu/drm/i915/gvt/mmio.h
@@ -36,6 +36,8 @@
#ifndef _GVT_MMIO_H_
#define _GVT_MMIO_H_
+#include <linux/types.h>
+
struct intel_gvt;
struct intel_vgpu;
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 2998999e8568..aaf15916d29a 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -35,6 +35,7 @@
#include "i915_drv.h"
#include "gt/intel_context.h"
+#include "gt/intel_ring.h"
#include "gvt.h"
#include "trace.h"
@@ -148,19 +149,27 @@ static struct {
u32 l3cc_table[GEN9_MOCS_SIZE / 2];
} gen9_render_mocs;
+static u32 gen9_mocs_mmio_offset_list[] = {
+ [RCS0] = 0xc800,
+ [VCS0] = 0xc900,
+ [VCS1] = 0xca00,
+ [BCS0] = 0xcc00,
+ [VECS0] = 0xcb00,
+};
+
static void load_render_mocs(struct drm_i915_private *dev_priv)
{
+ struct intel_gvt *gvt = dev_priv->gvt;
i915_reg_t offset;
- u32 regs[] = {
- [RCS0] = 0xc800,
- [VCS0] = 0xc900,
- [VCS1] = 0xca00,
- [BCS0] = 0xcc00,
- [VECS0] = 0xcb00,
- };
+ u32 cnt = gvt->engine_mmio_list.mocs_mmio_offset_list_cnt;
+ u32 *regs = gvt->engine_mmio_list.mocs_mmio_offset_list;
int ring_id, i;
- for (ring_id = 0; ring_id < ARRAY_SIZE(regs); ring_id++) {
+ /* Platform doesn't have mocs mmios. */
+ if (!regs)
+ return;
+
+ for (ring_id = 0; ring_id < cnt; ring_id++) {
if (!HAS_ENGINE(dev_priv, ring_id))
continue;
offset.reg = regs[ring_id];
@@ -327,22 +336,28 @@ out:
return ret;
}
+static u32 gen8_tlb_mmio_offset_list[] = {
+ [RCS0] = 0x4260,
+ [VCS0] = 0x4264,
+ [VCS1] = 0x4268,
+ [BCS0] = 0x426c,
+ [VECS0] = 0x4270,
+};
+
static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_uncore *uncore = &dev_priv->uncore;
struct intel_vgpu_submission *s = &vgpu->submission;
+ u32 *regs = vgpu->gvt->engine_mmio_list.tlb_mmio_offset_list;
+ u32 cnt = vgpu->gvt->engine_mmio_list.tlb_mmio_offset_list_cnt;
enum forcewake_domains fw;
i915_reg_t reg;
- u32 regs[] = {
- [RCS0] = 0x4260,
- [VCS0] = 0x4264,
- [VCS1] = 0x4268,
- [BCS0] = 0x426c,
- [VECS0] = 0x4270,
- };
- if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
+ if (!regs)
+ return;
+
+ if (WARN_ON(ring_id >= cnt))
return;
if (!test_and_clear_bit(ring_id, (void *)s->tlb_handle_pending))
@@ -565,10 +580,17 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
{
struct engine_mmio *mmio;
- if (INTEL_GEN(gvt->dev_priv) >= 9)
+ if (INTEL_GEN(gvt->dev_priv) >= 9) {
gvt->engine_mmio_list.mmio = gen9_engine_mmio_list;
- else
+ gvt->engine_mmio_list.tlb_mmio_offset_list = gen8_tlb_mmio_offset_list;
+ gvt->engine_mmio_list.tlb_mmio_offset_list_cnt = ARRAY_SIZE(gen8_tlb_mmio_offset_list);
+ gvt->engine_mmio_list.mocs_mmio_offset_list = gen9_mocs_mmio_offset_list;
+ gvt->engine_mmio_list.mocs_mmio_offset_list_cnt = ARRAY_SIZE(gen9_mocs_mmio_offset_list);
+ } else {
gvt->engine_mmio_list.mmio = gen8_engine_mmio_list;
+ gvt->engine_mmio_list.tlb_mmio_offset_list = gen8_tlb_mmio_offset_list;
+ gvt->engine_mmio_list.tlb_mmio_offset_list_cnt = ARRAY_SIZE(gen8_tlb_mmio_offset_list);
+ }
for (mmio = gvt->engine_mmio_list.mmio;
i915_mmio_reg_valid(mmio->reg); mmio++) {
diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h
index 0f9440128123..9ad224df9c68 100644
--- a/drivers/gpu/drm/i915/gvt/mpt.h
+++ b/drivers/gpu/drm/i915/gvt/mpt.h
@@ -255,6 +255,21 @@ static inline void intel_gvt_hypervisor_dma_unmap_guest_page(
}
/**
+ * intel_gvt_hypervisor_dma_pin_guest_page - pin guest dma buf
+ * @vgpu: a vGPU
+ * @dma_addr: guest dma addr
+ *
+ * Returns:
+ * 0 on success, negative error code if failed.
+ */
+static inline int
+intel_gvt_hypervisor_dma_pin_guest_page(struct intel_vgpu *vgpu,
+ dma_addr_t dma_addr)
+{
+ return intel_gvt_host.mpt->dma_pin_guest_page(vgpu->handle, dma_addr);
+}
+
+/**
* intel_gvt_hypervisor_map_gfn_to_mfn - map a GFN region to MFN
* @vgpu: a vGPU
* @gfn: guest PFN
diff --git a/drivers/gpu/drm/i915/gvt/page_track.h b/drivers/gpu/drm/i915/gvt/page_track.h
index fa607a71c3c0..f6eb7135583c 100644
--- a/drivers/gpu/drm/i915/gvt/page_track.h
+++ b/drivers/gpu/drm/i915/gvt/page_track.h
@@ -25,6 +25,9 @@
#ifndef _GVT_PAGE_TRACK_H_
#define _GVT_PAGE_TRACK_H_
+#include <linux/types.h>
+
+struct intel_vgpu;
struct intel_vgpu_page_track;
typedef int (*gvt_page_track_handler_t)(
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.h b/drivers/gpu/drm/i915/gvt/sched_policy.h
index 7b59e3e88b8b..3dacdad5f529 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.h
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.h
@@ -34,6 +34,9 @@
#ifndef __GVT_SCHED_POLICY__
#define __GVT_SCHED_POLICY__
+struct intel_gvt;
+struct intel_vgpu;
+
struct intel_gvt_sched_policy_ops {
int (*init)(struct intel_gvt *gvt);
void (*clean)(struct intel_gvt *gvt);
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 75baff657e43..685d1e04a5ff 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -35,11 +35,12 @@
#include <linux/kthread.h>
-#include "gem/i915_gem_context.h"
#include "gem/i915_gem_pm.h"
#include "gt/intel_context.h"
+#include "gt/intel_ring.h"
#include "i915_drv.h"
+#include "i915_gem_gtt.h"
#include "gvt.h"
#define RING_CTX_OFF(x) \
@@ -58,7 +59,7 @@ static void set_context_pdp_root_pointer(
static void update_shadow_pdps(struct intel_vgpu_workload *workload)
{
struct drm_i915_gem_object *ctx_obj =
- workload->req->hw_context->state->obj;
+ workload->req->context->state->obj;
struct execlist_ring_context *shadow_ring_context;
struct page *page;
@@ -84,8 +85,8 @@ static void sr_oa_regs(struct intel_vgpu_workload *workload,
u32 *reg_state, bool save)
{
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
- u32 ctx_oactxctrl = dev_priv->perf.oa.ctx_oactxctrl_offset;
- u32 ctx_flexeu0 = dev_priv->perf.oa.ctx_flexeu0_offset;
+ u32 ctx_oactxctrl = dev_priv->perf.ctx_oactxctrl_offset;
+ u32 ctx_flexeu0 = dev_priv->perf.ctx_flexeu0_offset;
int i = 0;
u32 flex_mmio[] = {
i915_mmio_reg_offset(EU_PERF_CNTL0),
@@ -129,7 +130,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
struct intel_gvt *gvt = vgpu->gvt;
int ring_id = workload->ring_id;
struct drm_i915_gem_object *ctx_obj =
- workload->req->hw_context->state->obj;
+ workload->req->context->state->obj;
struct execlist_ring_context *shadow_ring_context;
struct page *page;
void *dst;
@@ -194,7 +195,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
return -EFAULT;
}
- page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
+ page = i915_gem_object_get_page(ctx_obj, i);
dst = kmap(page);
intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
I915_GTT_PAGE_SIZE);
@@ -204,9 +205,9 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
return 0;
}
-static inline bool is_gvt_request(struct i915_request *req)
+static inline bool is_gvt_request(struct i915_request *rq)
{
- return i915_gem_context_force_single_submission(req->gem_context);
+ return intel_context_force_single_submission(rq->context);
}
static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
@@ -291,9 +292,6 @@ shadow_context_descriptor_update(struct intel_context *ce,
* Update bits 0-11 of the context descriptor which includes flags
* like GEN8_CTX_* cached in desc_template
*/
- desc &= U64_MAX << 12;
- desc |= ce->gem_context->desc_template & ((1ULL << 12) - 1);
-
desc &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
desc |= workload->ctx_desc.addressing_mode <<
GEN8_CTX_ADDRESSING_MODE_SHIFT;
@@ -309,7 +307,7 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
u32 *cs;
int err;
- if (IS_GEN(req->i915, 9) && is_inhibit_context(req->hw_context))
+ if (IS_GEN(req->i915, 9) && is_inhibit_context(req->context))
intel_vgpu_restore_inhibit_context(vgpu, req);
/*
@@ -365,10 +363,10 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
}
static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
- struct i915_gem_context *ctx)
+ struct intel_context *ce)
{
struct intel_vgpu_mm *mm = workload->shadow_mm;
- struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ctx->vm);
+ struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ce->vm);
int i = 0;
if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
@@ -388,11 +386,8 @@ intel_gvt_workload_req_alloc(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_vgpu_submission *s = &vgpu->submission;
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct i915_request *rq;
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
-
if (workload->req)
return 0;
@@ -418,10 +413,9 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_vgpu_submission *s = &vgpu->submission;
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
int ret;
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
+ lockdep_assert_held(&vgpu->vgpu_lock);
if (workload->shadow)
return 0;
@@ -532,7 +526,7 @@ static void update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
container_of(wa_ctx, struct intel_vgpu_workload, wa_ctx);
struct i915_request *rq = workload->req;
struct execlist_ring_context *shadow_ring_context =
- (struct execlist_ring_context *)rq->hw_context->lrc_reg_state;
+ (struct execlist_ring_context *)rq->context->lrc_reg_state;
shadow_ring_context->bb_per_ctx_ptr.val =
(shadow_ring_context->bb_per_ctx_ptr.val &
@@ -571,10 +565,18 @@ static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
return 0;
}
-static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
+static void update_vreg_in_ctx(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ u32 ring_base;
+
+ ring_base = dev_priv->engine[workload->ring_id]->mmio_base;
+ vgpu_vreg_t(vgpu, RING_START(ring_base)) = workload->rb_start;
+}
+
+static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
+{
struct intel_vgpu_shadow_bb *bb, *pos;
if (list_empty(&workload->shadow_bb))
@@ -583,8 +585,6 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
bb = list_first_entry(&workload->shadow_bb,
struct intel_vgpu_shadow_bb, list);
- mutex_lock(&dev_priv->drm.struct_mutex);
-
list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) {
if (bb->obj) {
if (bb->accessing)
@@ -602,8 +602,6 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
list_del(&bb->list);
kfree(bb);
}
-
- mutex_unlock(&dev_priv->drm.struct_mutex);
}
static int prepare_workload(struct intel_vgpu_workload *workload)
@@ -627,7 +625,7 @@ static int prepare_workload(struct intel_vgpu_workload *workload)
update_shadow_pdps(workload);
- set_context_ppgtt_from_shadow(workload, s->shadow[ring]->gem_context);
+ set_context_ppgtt_from_shadow(workload, s->shadow[ring]);
ret = intel_vgpu_sync_oos_pages(workload->vgpu);
if (ret) {
@@ -678,7 +676,6 @@ err_unpin_mm:
static int dispatch_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct i915_request *rq;
int ring_id = workload->ring_id;
int ret;
@@ -687,7 +684,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
ring_id, workload);
mutex_lock(&vgpu->vgpu_lock);
- mutex_lock(&dev_priv->drm.struct_mutex);
ret = intel_gvt_workload_req_alloc(workload);
if (ret)
@@ -722,7 +718,6 @@ out:
err_req:
if (ret)
workload->status = ret;
- mutex_unlock(&dev_priv->drm.struct_mutex);
mutex_unlock(&vgpu->vgpu_lock);
return ret;
}
@@ -789,7 +784,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
struct i915_request *rq = workload->req;
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
- struct drm_i915_gem_object *ctx_obj = rq->hw_context->state->obj;
+ struct drm_i915_gem_object *ctx_obj = rq->context->state->obj;
struct execlist_ring_context *shadow_ring_context;
struct page *page;
void *src;
@@ -837,7 +832,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
return;
}
- page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
+ page = i915_gem_object_get_page(ctx_obj, i);
src = kmap(page);
intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
I915_GTT_PAGE_SIZE);
@@ -880,7 +875,7 @@ void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
intel_engine_mask_t tmp;
/* free the unsubmited workloads in the queues. */
- for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
+ for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp) {
list_for_each_entry_safe(pos, n,
&s->workload_q_head[engine->id], list) {
list_del_init(&pos->list);
@@ -1019,6 +1014,13 @@ static int workload_thread(void *priv)
if (need_force_wake)
intel_uncore_forcewake_get(&gvt->dev_priv->uncore,
FORCEWAKE_ALL);
+ /*
+ * Update the vReg of the vGPU which submitted this
+ * workload. The vGPU may use these registers for checking
+ * the context state. The value comes from GPU commands
+ * in this workload.
+ */
+ update_vreg_in_ctx(workload);
ret = dispatch_workload(workload);
@@ -1157,7 +1159,7 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
- i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(s->shadow[0]->gem_context->vm));
+ i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(s->shadow[0]->vm));
for_each_engine(engine, vgpu->gvt->dev_priv, id)
intel_context_unpin(s->shadow[id]);
@@ -1215,30 +1217,41 @@ i915_context_ppgtt_root_save(struct intel_vgpu_submission *s,
*/
int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
{
+ struct drm_i915_private *i915 = vgpu->gvt->dev_priv;
struct intel_vgpu_submission *s = &vgpu->submission;
struct intel_engine_cs *engine;
- struct i915_gem_context *ctx;
+ struct i915_ppgtt *ppgtt;
enum intel_engine_id i;
int ret;
- ctx = i915_gem_context_create_gvt(&vgpu->gvt->dev_priv->drm);
- if (IS_ERR(ctx))
- return PTR_ERR(ctx);
+ ppgtt = i915_ppgtt_create(&i915->gt);
+ if (IS_ERR(ppgtt))
+ return PTR_ERR(ppgtt);
- i915_context_ppgtt_root_save(s, i915_vm_to_ppgtt(ctx->vm));
+ i915_context_ppgtt_root_save(s, ppgtt);
- for_each_engine(engine, vgpu->gvt->dev_priv, i) {
+ for_each_engine(engine, i915, i) {
struct intel_context *ce;
INIT_LIST_HEAD(&s->workload_q_head[i]);
s->shadow[i] = ERR_PTR(-EINVAL);
- ce = i915_gem_context_get_engine(ctx, i);
+ ce = intel_context_create(engine);
if (IS_ERR(ce)) {
ret = PTR_ERR(ce);
goto out_shadow_ctx;
}
+ i915_vm_put(ce->vm);
+ ce->vm = i915_vm_get(&ppgtt->vm);
+ intel_context_set_single_submission(ce);
+
+ if (!USES_GUC_SUBMISSION(i915)) { /* Max ring buffer size */
+ const unsigned int ring_size = 512 * SZ_4K;
+
+ ce->ring = __intel_context_ring_size(ring_size);
+ }
+
ret = intel_context_pin(ce);
intel_context_put(ce);
if (ret)
@@ -1264,18 +1277,19 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
atomic_set(&s->running_workload_num, 0);
bitmap_zero(s->tlb_handle_pending, I915_NUM_ENGINES);
- i915_gem_context_put(ctx);
+ i915_vm_put(&ppgtt->vm);
return 0;
out_shadow_ctx:
- i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(ctx->vm));
- for_each_engine(engine, vgpu->gvt->dev_priv, i) {
+ i915_context_ppgtt_root_restore(s, ppgtt);
+ for_each_engine(engine, i915, i) {
if (IS_ERR(s->shadow[i]))
break;
intel_context_unpin(s->shadow[i]);
+ intel_context_put(s->shadow[i]);
}
- i915_gem_context_put(ctx);
+ i915_vm_put(&ppgtt->vm);
return ret;
}
@@ -1424,9 +1438,6 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
#define same_context(a, b) (((a)->context_id == (b)->context_id) && \
((a)->lrca == (b)->lrca))
-#define get_last_workload(q) \
- (list_empty(q) ? NULL : container_of(q->prev, \
- struct intel_vgpu_workload, list))
/**
* intel_vgpu_create_workload - create a vGPU workload
* @vgpu: a vGPU
@@ -1446,7 +1457,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
{
struct intel_vgpu_submission *s = &vgpu->submission;
struct list_head *q = workload_q_head(vgpu, ring_id);
- struct intel_vgpu_workload *last_workload = get_last_workload(q);
+ struct intel_vgpu_workload *last_workload = NULL;
struct intel_vgpu_workload *workload = NULL;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
u64 ring_context_gpa;
@@ -1472,15 +1483,20 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
head &= RB_HEAD_OFF_MASK;
tail &= RB_TAIL_OFF_MASK;
- if (last_workload && same_context(&last_workload->ctx_desc, desc)) {
- gvt_dbg_el("ring id %d cur workload == last\n", ring_id);
- gvt_dbg_el("ctx head %x real head %lx\n", head,
- last_workload->rb_tail);
- /*
- * cannot use guest context head pointer here,
- * as it might not be updated at this time
- */
- head = last_workload->rb_tail;
+ list_for_each_entry_reverse(last_workload, q, list) {
+
+ if (same_context(&last_workload->ctx_desc, desc)) {
+ gvt_dbg_el("ring id %d cur workload == last\n",
+ ring_id);
+ gvt_dbg_el("ctx head %x real head %lx\n", head,
+ last_workload->rb_tail);
+ /*
+ * cannot use guest context head pointer here,
+ * as it might not be updated at this time
+ */
+ head = last_workload->rb_tail;
+ break;
+ }
}
gvt_dbg_el("ring id %d begin a new workload\n", ring_id);
@@ -1564,9 +1580,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
*/
if (list_empty(workload_q_head(vgpu, ring_id))) {
intel_runtime_pm_get(&dev_priv->runtime_pm);
- mutex_lock(&dev_priv->drm.struct_mutex);
ret = intel_gvt_scan_and_shadow_workload(workload);
- mutex_unlock(&dev_priv->drm.struct_mutex);
intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
}
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 44ce3c2b9ac1..85bd9bf4f6ee 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -212,9 +212,9 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
*/
void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu)
{
- mutex_lock(&vgpu->gvt->lock);
+ mutex_lock(&vgpu->vgpu_lock);
vgpu->active = true;
- mutex_unlock(&vgpu->gvt->lock);
+ mutex_unlock(&vgpu->vgpu_lock);
}
/**
@@ -420,9 +420,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
if (ret)
goto out_clean_submission;
- ret = intel_gvt_debugfs_add_vgpu(vgpu);
- if (ret)
- goto out_clean_sched_policy;
+ intel_gvt_debugfs_add_vgpu(vgpu);
ret = intel_gvt_hypervisor_set_opregion(vgpu);
if (ret)
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index 293e5bcc4b6c..b0a499753526 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -4,14 +4,16 @@
* Copyright © 2019 Intel Corporation
*/
+#include <linux/debugobjects.h>
+
+#include "gt/intel_context.h"
#include "gt/intel_engine_pm.h"
+#include "gt/intel_ring.h"
#include "i915_drv.h"
#include "i915_active.h"
#include "i915_globals.h"
-#define BKL(ref) (&(ref)->i915->drm.struct_mutex)
-
/*
* Active refs memory management
*
@@ -25,55 +27,200 @@ static struct i915_global_active {
} global;
struct active_node {
- struct i915_active_request base;
+ struct i915_active_fence base;
struct i915_active *ref;
struct rb_node node;
u64 timeline;
};
+static inline struct active_node *
+node_from_active(struct i915_active_fence *active)
+{
+ return container_of(active, struct active_node, base);
+}
+
+#define take_preallocated_barriers(x) llist_del_all(&(x)->preallocated_barriers)
+
+static inline bool is_barrier(const struct i915_active_fence *active)
+{
+ return IS_ERR(rcu_access_pointer(active->fence));
+}
+
+static inline struct llist_node *barrier_to_ll(struct active_node *node)
+{
+ GEM_BUG_ON(!is_barrier(&node->base));
+ return (struct llist_node *)&node->base.cb.node;
+}
+
+static inline struct intel_engine_cs *
+__barrier_to_engine(struct active_node *node)
+{
+ return (struct intel_engine_cs *)READ_ONCE(node->base.cb.node.prev);
+}
+
+static inline struct intel_engine_cs *
+barrier_to_engine(struct active_node *node)
+{
+ GEM_BUG_ON(!is_barrier(&node->base));
+ return __barrier_to_engine(node);
+}
+
+static inline struct active_node *barrier_from_ll(struct llist_node *x)
+{
+ return container_of((struct list_head *)x,
+ struct active_node, base.cb.node);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && IS_ENABLED(CONFIG_DEBUG_OBJECTS)
+
+static void *active_debug_hint(void *addr)
+{
+ struct i915_active *ref = addr;
+
+ return (void *)ref->active ?: (void *)ref->retire ?: (void *)ref;
+}
+
+static struct debug_obj_descr active_debug_desc = {
+ .name = "i915_active",
+ .debug_hint = active_debug_hint,
+};
+
+static void debug_active_init(struct i915_active *ref)
+{
+ debug_object_init(ref, &active_debug_desc);
+}
+
+static void debug_active_activate(struct i915_active *ref)
+{
+ lockdep_assert_held(&ref->tree_lock);
+ if (!atomic_read(&ref->count)) /* before the first inc */
+ debug_object_activate(ref, &active_debug_desc);
+}
+
+static void debug_active_deactivate(struct i915_active *ref)
+{
+ lockdep_assert_held(&ref->tree_lock);
+ if (!atomic_read(&ref->count)) /* after the last dec */
+ debug_object_deactivate(ref, &active_debug_desc);
+}
+
+static void debug_active_fini(struct i915_active *ref)
+{
+ debug_object_free(ref, &active_debug_desc);
+}
+
+static void debug_active_assert(struct i915_active *ref)
+{
+ debug_object_assert_init(ref, &active_debug_desc);
+}
+
+#else
+
+static inline void debug_active_init(struct i915_active *ref) { }
+static inline void debug_active_activate(struct i915_active *ref) { }
+static inline void debug_active_deactivate(struct i915_active *ref) { }
+static inline void debug_active_fini(struct i915_active *ref) { }
+static inline void debug_active_assert(struct i915_active *ref) { }
+
+#endif
+
static void
-__active_park(struct i915_active *ref)
+__active_retire(struct i915_active *ref)
{
struct active_node *it, *n;
+ struct rb_root root;
+ unsigned long flags;
- rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
- GEM_BUG_ON(i915_active_request_isset(&it->base));
+ GEM_BUG_ON(i915_active_is_idle(ref));
+
+ /* return the unused nodes to our slabcache -- flushing the allocator */
+ if (!atomic_dec_and_lock_irqsave(&ref->count, &ref->tree_lock, flags))
+ return;
+
+ GEM_BUG_ON(rcu_access_pointer(ref->excl.fence));
+ debug_active_deactivate(ref);
+
+ root = ref->tree;
+ ref->tree = RB_ROOT;
+ ref->cache = NULL;
+
+ spin_unlock_irqrestore(&ref->tree_lock, flags);
+
+ /* After the final retire, the entire struct may be freed */
+ if (ref->retire)
+ ref->retire(ref);
+
+ /* ... except if you wait on it, you must manage your own references! */
+ wake_up_var(ref);
+
+ rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
+ GEM_BUG_ON(i915_active_fence_isset(&it->base));
kmem_cache_free(global.slab_cache, it);
}
- ref->tree = RB_ROOT;
}
static void
-__active_retire(struct i915_active *ref)
+active_work(struct work_struct *wrk)
{
- GEM_BUG_ON(!ref->count);
- if (--ref->count)
+ struct i915_active *ref = container_of(wrk, typeof(*ref), work);
+
+ GEM_BUG_ON(!atomic_read(&ref->count));
+ if (atomic_add_unless(&ref->count, -1, 1))
return;
- /* return the unused nodes to our slabcache */
- __active_park(ref);
+ __active_retire(ref);
+}
+
+static void
+active_retire(struct i915_active *ref)
+{
+ GEM_BUG_ON(!atomic_read(&ref->count));
+ if (atomic_add_unless(&ref->count, -1, 1))
+ return;
+
+ if (ref->flags & I915_ACTIVE_RETIRE_SLEEPS) {
+ queue_work(system_unbound_wq, &ref->work);
+ return;
+ }
+
+ __active_retire(ref);
+}
+
+static inline struct dma_fence **
+__active_fence_slot(struct i915_active_fence *active)
+{
+ return (struct dma_fence ** __force)&active->fence;
+}
- ref->retire(ref);
+static inline bool
+active_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
+{
+ struct i915_active_fence *active =
+ container_of(cb, typeof(*active), cb);
+
+ return cmpxchg(__active_fence_slot(active), fence, NULL) == fence;
}
static void
-node_retire(struct i915_active_request *base, struct i915_request *rq)
+node_retire(struct dma_fence *fence, struct dma_fence_cb *cb)
{
- __active_retire(container_of(base, struct active_node, base)->ref);
+ if (active_fence_cb(fence, cb))
+ active_retire(container_of(cb, struct active_node, base.cb)->ref);
}
static void
-last_retire(struct i915_active_request *base, struct i915_request *rq)
+excl_retire(struct dma_fence *fence, struct dma_fence_cb *cb)
{
- __active_retire(container_of(base, struct i915_active, last));
+ if (active_fence_cb(fence, cb))
+ active_retire(container_of(cb, struct i915_active, excl.cb));
}
-static struct i915_active_request *
-active_instance(struct i915_active *ref, u64 idx)
+static struct i915_active_fence *
+active_instance(struct i915_active *ref, struct intel_timeline *tl)
{
- struct active_node *node;
+ struct active_node *node, *prealloc;
struct rb_node **p, *parent;
- struct i915_request *old;
+ u64 idx = tl->fence_context;
/*
* We track the most recently used timeline to skip a rbtree search
@@ -81,20 +228,18 @@ active_instance(struct i915_active *ref, u64 idx)
* at all. We can reuse the last slot if it is empty, that is
* after the previous activity has been retired, or if it matches the
* current timeline.
- *
- * Note that we allow the timeline to be active simultaneously in
- * the rbtree and the last cache. We do this to avoid having
- * to search and replace the rbtree element for a new timeline, with
- * the cost being that we must be aware that the ref may be retired
- * twice for the same timeline (as the older rbtree element will be
- * retired before the new request added to last).
*/
- old = i915_active_request_raw(&ref->last, BKL(ref));
- if (!old || old->fence.context == idx)
- goto out;
+ node = READ_ONCE(ref->cache);
+ if (node && node->timeline == idx)
+ return &node->base;
- /* Move the currently active fence into the rbtree */
- idx = old->fence.context;
+ /* Preallocate a replacement, just in case */
+ prealloc = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
+ if (!prealloc)
+ return NULL;
+
+ spin_lock_irq(&ref->tree_lock);
+ GEM_BUG_ON(i915_active_is_idle(ref));
parent = NULL;
p = &ref->tree.rb_node;
@@ -102,8 +247,10 @@ active_instance(struct i915_active *ref, u64 idx)
parent = *p;
node = rb_entry(parent, struct active_node, node);
- if (node->timeline == idx)
- goto replace;
+ if (node->timeline == idx) {
+ kmem_cache_free(global.slab_cache, prealloc);
+ goto out;
+ }
if (node->timeline < idx)
p = &parent->rb_right;
@@ -111,202 +258,428 @@ active_instance(struct i915_active *ref, u64 idx)
p = &parent->rb_left;
}
- node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
-
- /* kmalloc may retire the ref->last (thanks shrinker)! */
- if (unlikely(!i915_active_request_raw(&ref->last, BKL(ref)))) {
- kmem_cache_free(global.slab_cache, node);
- goto out;
- }
-
- if (unlikely(!node))
- return ERR_PTR(-ENOMEM);
-
- i915_active_request_init(&node->base, NULL, node_retire);
+ node = prealloc;
+ __i915_active_fence_init(&node->base, NULL, node_retire);
node->ref = ref;
node->timeline = idx;
rb_link_node(&node->node, parent, p);
rb_insert_color(&node->node, &ref->tree);
-replace:
+out:
+ ref->cache = node;
+ spin_unlock_irq(&ref->tree_lock);
+
+ BUILD_BUG_ON(offsetof(typeof(*node), base));
+ return &node->base;
+}
+
+void __i915_active_init(struct i915_active *ref,
+ int (*active)(struct i915_active *ref),
+ void (*retire)(struct i915_active *ref),
+ struct lock_class_key *mkey,
+ struct lock_class_key *wkey)
+{
+ unsigned long bits;
+
+ debug_active_init(ref);
+
+ ref->flags = 0;
+ ref->active = active;
+ ref->retire = ptr_unpack_bits(retire, &bits, 2);
+ if (bits & I915_ACTIVE_MAY_SLEEP)
+ ref->flags |= I915_ACTIVE_RETIRE_SLEEPS;
+
+ spin_lock_init(&ref->tree_lock);
+ ref->tree = RB_ROOT;
+ ref->cache = NULL;
+
+ init_llist_head(&ref->preallocated_barriers);
+ atomic_set(&ref->count, 0);
+ __mutex_init(&ref->mutex, "i915_active", mkey);
+ __i915_active_fence_init(&ref->excl, NULL, excl_retire);
+ INIT_WORK(&ref->work, active_work);
+#if IS_ENABLED(CONFIG_LOCKDEP)
+ lockdep_init_map(&ref->work.lockdep_map, "i915_active.work", wkey, 0);
+#endif
+}
+
+static bool ____active_del_barrier(struct i915_active *ref,
+ struct active_node *node,
+ struct intel_engine_cs *engine)
+
+{
+ struct llist_node *head = NULL, *tail = NULL;
+ struct llist_node *pos, *next;
+
+ GEM_BUG_ON(node->timeline != engine->kernel_context->timeline->fence_context);
+
/*
- * Overwrite the previous active slot in the rbtree with last,
- * leaving last zeroed. If the previous slot is still active,
- * we must be careful as we now only expect to receive one retire
- * callback not two, and so much undo the active counting for the
- * overwritten slot.
+ * Rebuild the llist excluding our node. We may perform this
+ * outside of the kernel_context timeline mutex and so someone
+ * else may be manipulating the engine->barrier_tasks, in
+ * which case either we or they will be upset :)
+ *
+ * A second __active_del_barrier() will report failure to claim
+ * the active_node and the caller will just shrug and know not to
+ * claim ownership of its node.
+ *
+ * A concurrent i915_request_add_active_barriers() will miss adding
+ * any of the tasks, but we will try again on the next -- and since
+ * we are actively using the barrier, we know that there will be
+ * at least another opportunity when we idle.
*/
- if (i915_active_request_isset(&node->base)) {
- /* Retire ourselves from the old rq->active_list */
- __list_del_entry(&node->base.link);
- ref->count--;
- GEM_BUG_ON(!ref->count);
+ llist_for_each_safe(pos, next, llist_del_all(&engine->barrier_tasks)) {
+ if (node == barrier_from_ll(pos)) {
+ node = NULL;
+ continue;
+ }
+
+ pos->next = head;
+ head = pos;
+ if (!tail)
+ tail = pos;
}
- GEM_BUG_ON(list_empty(&ref->last.link));
- list_replace_init(&ref->last.link, &node->base.link);
- node->base.request = fetch_and_zero(&ref->last.request);
+ if (head)
+ llist_add_batch(head, tail, &engine->barrier_tasks);
-out:
- return &ref->last;
+ return !node;
}
-void i915_active_init(struct drm_i915_private *i915,
- struct i915_active *ref,
- void (*retire)(struct i915_active *ref))
+static bool
+__active_del_barrier(struct i915_active *ref, struct active_node *node)
{
- ref->i915 = i915;
- ref->retire = retire;
- ref->tree = RB_ROOT;
- i915_active_request_init(&ref->last, NULL, last_retire);
- init_llist_head(&ref->barriers);
- ref->count = 0;
+ return ____active_del_barrier(ref, node, barrier_to_engine(node));
}
int i915_active_ref(struct i915_active *ref,
- u64 timeline,
- struct i915_request *rq)
+ struct intel_timeline *tl,
+ struct dma_fence *fence)
{
- struct i915_active_request *active;
- int err = 0;
+ struct i915_active_fence *active;
+ int err;
+
+ lockdep_assert_held(&tl->mutex);
/* Prevent reaping in case we malloc/wait while building the tree */
- i915_active_acquire(ref);
+ err = i915_active_acquire(ref);
+ if (err)
+ return err;
- active = active_instance(ref, timeline);
- if (IS_ERR(active)) {
- err = PTR_ERR(active);
+ active = active_instance(ref, tl);
+ if (!active) {
+ err = -ENOMEM;
goto out;
}
- if (!i915_active_request_isset(active))
- ref->count++;
- __i915_active_request_set(active, rq);
+ if (is_barrier(active)) { /* proto-node used by our idle barrier */
+ /*
+ * This request is on the kernel_context timeline, and so
+ * we can use it to substitute for the pending idle-barrer
+ * request that we want to emit on the kernel_context.
+ */
+ __active_del_barrier(ref, node_from_active(active));
+ RCU_INIT_POINTER(active->fence, NULL);
+ atomic_dec(&ref->count);
+ }
+ if (!__i915_active_fence_set(active, fence))
+ atomic_inc(&ref->count);
- GEM_BUG_ON(!ref->count);
out:
i915_active_release(ref);
return err;
}
-bool i915_active_acquire(struct i915_active *ref)
+void i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f)
{
- lockdep_assert_held(BKL(ref));
- return !ref->count++;
+ /* We expect the caller to manage the exclusive timeline ordering */
+ GEM_BUG_ON(i915_active_is_idle(ref));
+
+ if (!__i915_active_fence_set(&ref->excl, f))
+ atomic_inc(&ref->count);
}
-void i915_active_release(struct i915_active *ref)
+bool i915_active_acquire_if_busy(struct i915_active *ref)
{
- lockdep_assert_held(BKL(ref));
- __active_retire(ref);
+ debug_active_assert(ref);
+ return atomic_add_unless(&ref->count, 1, 0);
}
-int i915_active_wait(struct i915_active *ref)
+int i915_active_acquire(struct i915_active *ref)
{
- struct active_node *it, *n;
- int ret = 0;
+ int err;
- if (i915_active_acquire(ref))
- goto out_release;
+ if (i915_active_acquire_if_busy(ref))
+ return 0;
- ret = i915_active_request_retire(&ref->last, BKL(ref));
- if (ret)
- goto out_release;
+ err = mutex_lock_interruptible(&ref->mutex);
+ if (err)
+ return err;
- rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
- ret = i915_active_request_retire(&it->base, BKL(ref));
- if (ret)
- break;
+ if (likely(!i915_active_acquire_if_busy(ref))) {
+ if (ref->active)
+ err = ref->active(ref);
+ if (!err) {
+ spin_lock_irq(&ref->tree_lock); /* __active_retire() */
+ debug_active_activate(ref);
+ atomic_inc(&ref->count);
+ spin_unlock_irq(&ref->tree_lock);
+ }
}
-out_release:
- i915_active_release(ref);
- return ret;
+ mutex_unlock(&ref->mutex);
+
+ return err;
+}
+
+void i915_active_release(struct i915_active *ref)
+{
+ debug_active_assert(ref);
+ active_retire(ref);
}
-int i915_request_await_active_request(struct i915_request *rq,
- struct i915_active_request *active)
+static void enable_signaling(struct i915_active_fence *active)
{
- struct i915_request *barrier =
- i915_active_request_raw(active, &rq->i915->drm.struct_mutex);
+ struct dma_fence *fence;
- return barrier ? i915_request_await_dma_fence(rq, &barrier->fence) : 0;
+ fence = i915_active_fence_get(active);
+ if (!fence)
+ return;
+
+ dma_fence_enable_sw_signaling(fence);
+ dma_fence_put(fence);
}
-int i915_request_await_active(struct i915_request *rq, struct i915_active *ref)
+int i915_active_wait(struct i915_active *ref)
{
struct active_node *it, *n;
int err = 0;
- /* await allocates and so we need to avoid hitting the shrinker */
- if (i915_active_acquire(ref))
- goto out; /* was idle */
+ might_sleep();
- err = i915_request_await_active_request(rq, &ref->last);
- if (err)
- goto out;
+ if (!i915_active_acquire_if_busy(ref))
+ return 0;
+ /* Flush lazy signals */
+ enable_signaling(&ref->excl);
rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
- err = i915_request_await_active_request(rq, &it->base);
- if (err)
- goto out;
+ if (is_barrier(&it->base)) /* unconnected idle barrier */
+ continue;
+
+ enable_signaling(&it->base);
}
+ /* Any fence added after the wait begins will not be auto-signaled */
-out:
i915_active_release(ref);
+ if (err)
+ return err;
+
+ if (wait_var_event_interruptible(ref, i915_active_is_idle(ref)))
+ return -EINTR;
+
+ flush_work(&ref->work);
+ return 0;
+}
+
+int i915_request_await_active(struct i915_request *rq, struct i915_active *ref)
+{
+ int err = 0;
+
+ if (rcu_access_pointer(ref->excl.fence)) {
+ struct dma_fence *fence;
+
+ rcu_read_lock();
+ fence = dma_fence_get_rcu_safe(&ref->excl.fence);
+ rcu_read_unlock();
+ if (fence) {
+ err = i915_request_await_dma_fence(rq, fence);
+ dma_fence_put(fence);
+ }
+ }
+
+ /* In the future we may choose to await on all fences */
+
return err;
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
void i915_active_fini(struct i915_active *ref)
{
- GEM_BUG_ON(i915_active_request_isset(&ref->last));
+ debug_active_fini(ref);
+ GEM_BUG_ON(atomic_read(&ref->count));
+ GEM_BUG_ON(work_pending(&ref->work));
GEM_BUG_ON(!RB_EMPTY_ROOT(&ref->tree));
- GEM_BUG_ON(ref->count);
+ mutex_destroy(&ref->mutex);
}
#endif
+static inline bool is_idle_barrier(struct active_node *node, u64 idx)
+{
+ return node->timeline == idx && !i915_active_fence_isset(&node->base);
+}
+
+static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
+{
+ struct rb_node *prev, *p;
+
+ if (RB_EMPTY_ROOT(&ref->tree))
+ return NULL;
+
+ spin_lock_irq(&ref->tree_lock);
+ GEM_BUG_ON(i915_active_is_idle(ref));
+
+ /*
+ * Try to reuse any existing barrier nodes already allocated for this
+ * i915_active, due to overlapping active phases there is likely a
+ * node kept alive (as we reuse before parking). We prefer to reuse
+ * completely idle barriers (less hassle in manipulating the llists),
+ * but otherwise any will do.
+ */
+ if (ref->cache && is_idle_barrier(ref->cache, idx)) {
+ p = &ref->cache->node;
+ goto match;
+ }
+
+ prev = NULL;
+ p = ref->tree.rb_node;
+ while (p) {
+ struct active_node *node =
+ rb_entry(p, struct active_node, node);
+
+ if (is_idle_barrier(node, idx))
+ goto match;
+
+ prev = p;
+ if (node->timeline < idx)
+ p = p->rb_right;
+ else
+ p = p->rb_left;
+ }
+
+ /*
+ * No quick match, but we did find the leftmost rb_node for the
+ * kernel_context. Walk the rb_tree in-order to see if there were
+ * any idle-barriers on this timeline that we missed, or just use
+ * the first pending barrier.
+ */
+ for (p = prev; p; p = rb_next(p)) {
+ struct active_node *node =
+ rb_entry(p, struct active_node, node);
+ struct intel_engine_cs *engine;
+
+ if (node->timeline > idx)
+ break;
+
+ if (node->timeline < idx)
+ continue;
+
+ if (is_idle_barrier(node, idx))
+ goto match;
+
+ /*
+ * The list of pending barriers is protected by the
+ * kernel_context timeline, which notably we do not hold
+ * here. i915_request_add_active_barriers() may consume
+ * the barrier before we claim it, so we have to check
+ * for success.
+ */
+ engine = __barrier_to_engine(node);
+ smp_rmb(); /* serialise with add_active_barriers */
+ if (is_barrier(&node->base) &&
+ ____active_del_barrier(ref, node, engine))
+ goto match;
+ }
+
+ spin_unlock_irq(&ref->tree_lock);
+
+ return NULL;
+
+match:
+ rb_erase(p, &ref->tree); /* Hide from waits and sibling allocations */
+ if (p == &ref->cache->node)
+ ref->cache = NULL;
+ spin_unlock_irq(&ref->tree_lock);
+
+ return rb_entry(p, struct active_node, node);
+}
+
int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
struct intel_engine_cs *engine)
{
- struct drm_i915_private *i915 = engine->i915;
- struct llist_node *pos, *next;
- unsigned long tmp;
+ intel_engine_mask_t tmp, mask = engine->mask;
+ struct llist_node *first = NULL, *last = NULL;
+ struct intel_gt *gt = engine->gt;
int err;
- GEM_BUG_ON(!engine->mask);
- for_each_engine_masked(engine, i915, engine->mask, tmp) {
- struct intel_context *kctx = engine->kernel_context;
+ GEM_BUG_ON(i915_active_is_idle(ref));
+
+ /* Wait until the previous preallocation is completed */
+ while (!llist_empty(&ref->preallocated_barriers))
+ cond_resched();
+
+ /*
+ * Preallocate a node for each physical engine supporting the target
+ * engine (remember virtual engines have more than one sibling).
+ * We can then use the preallocated nodes in
+ * i915_active_acquire_barrier()
+ */
+ for_each_engine_masked(engine, gt, mask, tmp) {
+ u64 idx = engine->kernel_context->timeline->fence_context;
+ struct llist_node *prev = first;
struct active_node *node;
- node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
- if (unlikely(!node)) {
- err = -ENOMEM;
- goto unwind;
+ node = reuse_idle_barrier(ref, idx);
+ if (!node) {
+ node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
+ if (!node) {
+ err = ENOMEM;
+ goto unwind;
+ }
+
+ RCU_INIT_POINTER(node->base.fence, NULL);
+ node->base.cb.func = node_retire;
+ node->timeline = idx;
+ node->ref = ref;
}
- i915_active_request_init(&node->base,
- (void *)engine, node_retire);
- node->timeline = kctx->ring->timeline->fence_context;
- node->ref = ref;
- ref->count++;
+ if (!i915_active_fence_isset(&node->base)) {
+ /*
+ * Mark this as being *our* unconnected proto-node.
+ *
+ * Since this node is not in any list, and we have
+ * decoupled it from the rbtree, we can reuse the
+ * request to indicate this is an idle-barrier node
+ * and then we can use the rb_node and list pointers
+ * for our tracking of the pending barrier.
+ */
+ RCU_INIT_POINTER(node->base.fence, ERR_PTR(-EAGAIN));
+ node->base.cb.node.prev = (void *)engine;
+ atomic_inc(&ref->count);
+ }
+ GEM_BUG_ON(rcu_access_pointer(node->base.fence) != ERR_PTR(-EAGAIN));
+ GEM_BUG_ON(barrier_to_engine(node) != engine);
+ first = barrier_to_ll(node);
+ first->next = prev;
+ if (!last)
+ last = first;
intel_engine_pm_get(engine);
- llist_add((struct llist_node *)&node->base.link,
- &ref->barriers);
}
+ GEM_BUG_ON(!llist_empty(&ref->preallocated_barriers));
+ llist_add_batch(first, last, &ref->preallocated_barriers);
+
return 0;
unwind:
- llist_for_each_safe(pos, next, llist_del_all(&ref->barriers)) {
- struct active_node *node;
+ while (first) {
+ struct active_node *node = barrier_from_ll(first);
- node = container_of((struct list_head *)pos,
- typeof(*node), base.link);
- engine = (void *)rcu_access_pointer(node->base.request);
+ first = first->next;
+
+ atomic_dec(&ref->count);
+ intel_engine_pm_put(barrier_to_engine(node));
- intel_engine_pm_put(engine);
kmem_cache_free(global.slab_cache, node);
}
return err;
@@ -315,68 +688,160 @@ unwind:
void i915_active_acquire_barrier(struct i915_active *ref)
{
struct llist_node *pos, *next;
+ unsigned long flags;
- i915_active_acquire(ref);
+ GEM_BUG_ON(i915_active_is_idle(ref));
- llist_for_each_safe(pos, next, llist_del_all(&ref->barriers)) {
- struct intel_engine_cs *engine;
- struct active_node *node;
+ /*
+ * Transfer the list of preallocated barriers into the
+ * i915_active rbtree, but only as proto-nodes. They will be
+ * populated by i915_request_add_active_barriers() to point to the
+ * request that will eventually release them.
+ */
+ llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) {
+ struct active_node *node = barrier_from_ll(pos);
+ struct intel_engine_cs *engine = barrier_to_engine(node);
struct rb_node **p, *parent;
- node = container_of((struct list_head *)pos,
- typeof(*node), base.link);
-
- engine = (void *)rcu_access_pointer(node->base.request);
- RCU_INIT_POINTER(node->base.request, ERR_PTR(-EAGAIN));
-
+ spin_lock_irqsave_nested(&ref->tree_lock, flags,
+ SINGLE_DEPTH_NESTING);
parent = NULL;
p = &ref->tree.rb_node;
while (*p) {
+ struct active_node *it;
+
parent = *p;
- if (rb_entry(parent,
- struct active_node,
- node)->timeline < node->timeline)
+
+ it = rb_entry(parent, struct active_node, node);
+ if (it->timeline < node->timeline)
p = &parent->rb_right;
else
p = &parent->rb_left;
}
rb_link_node(&node->node, parent, p);
rb_insert_color(&node->node, &ref->tree);
+ spin_unlock_irqrestore(&ref->tree_lock, flags);
- llist_add((struct llist_node *)&node->base.link,
- &engine->barrier_tasks);
+ GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
+ llist_add(barrier_to_ll(node), &engine->barrier_tasks);
intel_engine_pm_put(engine);
}
- i915_active_release(ref);
}
-void i915_request_add_barriers(struct i915_request *rq)
+static struct dma_fence **ll_to_fence_slot(struct llist_node *node)
+{
+ return __active_fence_slot(&barrier_from_ll(node)->base);
+}
+
+void i915_request_add_active_barriers(struct i915_request *rq)
{
struct intel_engine_cs *engine = rq->engine;
struct llist_node *node, *next;
+ unsigned long flags;
- llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks))
- list_add_tail((struct list_head *)node, &rq->active_list);
+ GEM_BUG_ON(!intel_context_is_barrier(rq->context));
+ GEM_BUG_ON(intel_engine_is_virtual(engine));
+ GEM_BUG_ON(i915_request_timeline(rq) != engine->kernel_context->timeline);
+
+ node = llist_del_all(&engine->barrier_tasks);
+ if (!node)
+ return;
+ /*
+ * Attach the list of proto-fences to the in-flight request such
+ * that the parent i915_active will be released when this request
+ * is retired.
+ */
+ spin_lock_irqsave(&rq->lock, flags);
+ llist_for_each_safe(node, next, node) {
+ /* serialise with reuse_idle_barrier */
+ smp_store_mb(*ll_to_fence_slot(node), &rq->fence);
+ list_add_tail((struct list_head *)node, &rq->fence.cb_list);
+ }
+ spin_unlock_irqrestore(&rq->lock, flags);
}
-int i915_active_request_set(struct i915_active_request *active,
- struct i915_request *rq)
+/*
+ * __i915_active_fence_set: Update the last active fence along its timeline
+ * @active: the active tracker
+ * @fence: the new fence (under construction)
+ *
+ * Records the new @fence as the last active fence along its timeline in
+ * this active tracker, moving the tracking callbacks from the previous
+ * fence onto this one. Returns the previous fence (if not already completed),
+ * which the caller must ensure is executed before the new fence. To ensure
+ * that the order of fences within the timeline of the i915_active_fence is
+ * understood, it should be locked by the caller.
+ */
+struct dma_fence *
+__i915_active_fence_set(struct i915_active_fence *active,
+ struct dma_fence *fence)
{
- int err;
+ struct dma_fence *prev;
+ unsigned long flags;
- /* Must maintain ordering wrt previous active requests */
- err = i915_request_await_active_request(rq, active);
- if (err)
- return err;
+ if (fence == rcu_access_pointer(active->fence))
+ return fence;
- __i915_active_request_set(active, rq);
- return 0;
+ GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
+
+ /*
+ * Consider that we have two threads arriving (A and B), with
+ * C already resident as the active->fence.
+ *
+ * A does the xchg first, and so it sees C or NULL depending
+ * on the timing of the interrupt handler. If it is NULL, the
+ * previous fence must have been signaled and we know that
+ * we are first on the timeline. If it is still present,
+ * we acquire the lock on that fence and serialise with the interrupt
+ * handler, in the process removing it from any future interrupt
+ * callback. A will then wait on C before executing (if present).
+ *
+ * As B is second, it sees A as the previous fence and so waits for
+ * it to complete its transition and takes over the occupancy for
+ * itself -- remembering that it needs to wait on A before executing.
+ *
+ * Note the strong ordering of the timeline also provides consistent
+ * nesting rules for the fence->lock; the inner lock is always the
+ * older lock.
+ */
+ spin_lock_irqsave(fence->lock, flags);
+ prev = xchg(__active_fence_slot(active), fence);
+ if (prev) {
+ GEM_BUG_ON(prev == fence);
+ spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING);
+ __list_del_entry(&active->cb.node);
+ spin_unlock(prev->lock); /* serialise with prev->cb_list */
+ }
+ GEM_BUG_ON(rcu_access_pointer(active->fence) != fence);
+ list_add_tail(&active->cb.node, &fence->cb_list);
+ spin_unlock_irqrestore(fence->lock, flags);
+
+ return prev;
+}
+
+int i915_active_fence_set(struct i915_active_fence *active,
+ struct i915_request *rq)
+{
+ struct dma_fence *fence;
+ int err = 0;
+
+ /* Must maintain timeline ordering wrt previous active requests */
+ rcu_read_lock();
+ fence = __i915_active_fence_set(active, &rq->fence);
+ if (fence) /* but the previous fence may not belong to that timeline! */
+ fence = dma_fence_get_rcu(fence);
+ rcu_read_unlock();
+ if (fence) {
+ err = i915_request_await_dma_fence(rq, fence);
+ dma_fence_put(fence);
+ }
+
+ return err;
}
-void i915_active_retire_noop(struct i915_active_request *active,
- struct i915_request *request)
+void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb)
{
- /* Space left intentionally blank */
+ active_fence_cb(fence, cb);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h
index c14eebf6d074..51e1e854ca55 100644
--- a/drivers/gpu/drm/i915/i915_active.h
+++ b/drivers/gpu/drm/i915/i915_active.h
@@ -12,6 +12,10 @@
#include "i915_active_types.h"
#include "i915_request.h"
+struct i915_request;
+struct intel_engine_cs;
+struct intel_timeline;
+
/*
* We treat requests as fences. This is not be to confused with our
* "fence registers" but pipeline synchronisation objects ala GL_ARB_sync.
@@ -28,319 +32,95 @@
* write access so that we can perform concurrent read operations between
* the CPU and GPU engines, as well as waiting for all rendering to
* complete, or waiting for the last GPU user of a "fence register". The
- * object then embeds a #i915_active_request to track the most recent (in
+ * object then embeds a #i915_active_fence to track the most recent (in
* retirement order) request relevant for the desired mode of access.
- * The #i915_active_request is updated with i915_active_request_set() to
+ * The #i915_active_fence is updated with i915_active_fence_set() to
* track the most recent fence request, typically this is done as part of
* i915_vma_move_to_active().
*
- * When the #i915_active_request completes (is retired), it will
+ * When the #i915_active_fence completes (is retired), it will
* signal its completion to the owner through a callback as well as mark
- * itself as idle (i915_active_request.request == NULL). The owner
+ * itself as idle (i915_active_fence.request == NULL). The owner
* can then perform any action, such as delayed freeing of an active
* resource including itself.
*/
-void i915_active_retire_noop(struct i915_active_request *active,
- struct i915_request *request);
+void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb);
/**
- * i915_active_request_init - prepares the activity tracker for use
+ * __i915_active_fence_init - prepares the activity tracker for use
* @active - the active tracker
- * @rq - initial request to track, can be NULL
+ * @fence - initial fence to track, can be NULL
* @func - a callback when then the tracker is retired (becomes idle),
* can be NULL
*
- * i915_active_request_init() prepares the embedded @active struct for use as
- * an activity tracker, that is for tracking the last known active request
- * associated with it. When the last request becomes idle, when it is retired
+ * i915_active_fence_init() prepares the embedded @active struct for use as
+ * an activity tracker, that is for tracking the last known active fence
+ * associated with it. When the last fence becomes idle, when it is retired
* after completion, the optional callback @func is invoked.
*/
static inline void
-i915_active_request_init(struct i915_active_request *active,
- struct i915_request *rq,
- i915_active_retire_fn retire)
+__i915_active_fence_init(struct i915_active_fence *active,
+ void *fence,
+ dma_fence_func_t fn)
{
- RCU_INIT_POINTER(active->request, rq);
- INIT_LIST_HEAD(&active->link);
- active->retire = retire ?: i915_active_retire_noop;
+ RCU_INIT_POINTER(active->fence, fence);
+ active->cb.func = fn ?: i915_active_noop;
}
-#define INIT_ACTIVE_REQUEST(name) i915_active_request_init((name), NULL, NULL)
-
-/**
- * i915_active_request_set - updates the tracker to watch the current request
- * @active - the active tracker
- * @request - the request to watch
- *
- * __i915_active_request_set() watches the given @request for completion. Whilst
- * that @request is busy, the @active reports busy. When that @request is
- * retired, the @active tracker is updated to report idle.
- */
-static inline void
-__i915_active_request_set(struct i915_active_request *active,
- struct i915_request *request)
-{
- list_move(&active->link, &request->active_list);
- rcu_assign_pointer(active->request, request);
-}
+#define INIT_ACTIVE_FENCE(A) \
+ __i915_active_fence_init((A), NULL, NULL)
-int __must_check
-i915_active_request_set(struct i915_active_request *active,
- struct i915_request *rq);
+struct dma_fence *
+__i915_active_fence_set(struct i915_active_fence *active,
+ struct dma_fence *fence);
/**
- * i915_active_request_set_retire_fn - updates the retirement callback
+ * i915_active_fence_set - updates the tracker to watch the current fence
* @active - the active tracker
- * @fn - the routine called when the request is retired
- * @mutex - struct_mutex used to guard retirements
+ * @rq - the request to watch
*
- * i915_active_request_set_retire_fn() updates the function pointer that
- * is called when the final request associated with the @active tracker
- * is retired.
+ * i915_active_fence_set() watches the given @rq for completion. While
+ * that @rq is busy, the @active reports busy. When that @rq is signaled
+ * (or else retired) the @active tracker is updated to report idle.
*/
-static inline void
-i915_active_request_set_retire_fn(struct i915_active_request *active,
- i915_active_retire_fn fn,
- struct mutex *mutex)
-{
- lockdep_assert_held(mutex);
- active->retire = fn ?: i915_active_retire_noop;
-}
-
-/**
- * i915_active_request_raw - return the active request
- * @active - the active tracker
- *
- * i915_active_request_raw() returns the current request being tracked, or NULL.
- * It does not obtain a reference on the request for the caller, so the caller
- * must hold struct_mutex.
- */
-static inline struct i915_request *
-i915_active_request_raw(const struct i915_active_request *active,
- struct mutex *mutex)
-{
- return rcu_dereference_protected(active->request,
- lockdep_is_held(mutex));
-}
-
-/**
- * i915_active_request_peek - report the active request being monitored
- * @active - the active tracker
- *
- * i915_active_request_peek() returns the current request being tracked if
- * still active, or NULL. It does not obtain a reference on the request
- * for the caller, so the caller must hold struct_mutex.
- */
-static inline struct i915_request *
-i915_active_request_peek(const struct i915_active_request *active,
- struct mutex *mutex)
-{
- struct i915_request *request;
-
- request = i915_active_request_raw(active, mutex);
- if (!request || i915_request_completed(request))
- return NULL;
-
- return request;
-}
-
-/**
- * i915_active_request_get - return a reference to the active request
- * @active - the active tracker
- *
- * i915_active_request_get() returns a reference to the active request, or NULL
- * if the active tracker is idle. The caller must hold struct_mutex.
- */
-static inline struct i915_request *
-i915_active_request_get(const struct i915_active_request *active,
- struct mutex *mutex)
-{
- return i915_request_get(i915_active_request_peek(active, mutex));
-}
-
-/**
- * __i915_active_request_get_rcu - return a reference to the active request
- * @active - the active tracker
- *
- * __i915_active_request_get() returns a reference to the active request,
- * or NULL if the active tracker is idle. The caller must hold the RCU read
- * lock, but the returned pointer is safe to use outside of RCU.
- */
-static inline struct i915_request *
-__i915_active_request_get_rcu(const struct i915_active_request *active)
-{
- /*
- * Performing a lockless retrieval of the active request is super
- * tricky. SLAB_TYPESAFE_BY_RCU merely guarantees that the backing
- * slab of request objects will not be freed whilst we hold the
- * RCU read lock. It does not guarantee that the request itself
- * will not be freed and then *reused*. Viz,
- *
- * Thread A Thread B
- *
- * rq = active.request
- * retire(rq) -> free(rq);
- * (rq is now first on the slab freelist)
- * active.request = NULL
- *
- * rq = new submission on a new object
- * ref(rq)
- *
- * To prevent the request from being reused whilst the caller
- * uses it, we take a reference like normal. Whilst acquiring
- * the reference we check that it is not in a destroyed state
- * (refcnt == 0). That prevents the request being reallocated
- * whilst the caller holds on to it. To check that the request
- * was not reallocated as we acquired the reference we have to
- * check that our request remains the active request across
- * the lookup, in the same manner as a seqlock. The visibility
- * of the pointer versus the reference counting is controlled
- * by using RCU barriers (rcu_dereference and rcu_assign_pointer).
- *
- * In the middle of all that, we inspect whether the request is
- * complete. Retiring is lazy so the request may be completed long
- * before the active tracker is updated. Querying whether the
- * request is complete is far cheaper (as it involves no locked
- * instructions setting cachelines to exclusive) than acquiring
- * the reference, so we do it first. The RCU read lock ensures the
- * pointer dereference is valid, but does not ensure that the
- * seqno nor HWS is the right one! However, if the request was
- * reallocated, that means the active tracker's request was complete.
- * If the new request is also complete, then both are and we can
- * just report the active tracker is idle. If the new request is
- * incomplete, then we acquire a reference on it and check that
- * it remained the active request.
- *
- * It is then imperative that we do not zero the request on
- * reallocation, so that we can chase the dangling pointers!
- * See i915_request_alloc().
- */
- do {
- struct i915_request *request;
-
- request = rcu_dereference(active->request);
- if (!request || i915_request_completed(request))
- return NULL;
-
- /*
- * An especially silly compiler could decide to recompute the
- * result of i915_request_completed, more specifically
- * re-emit the load for request->fence.seqno. A race would catch
- * a later seqno value, which could flip the result from true to
- * false. Which means part of the instructions below might not
- * be executed, while later on instructions are executed. Due to
- * barriers within the refcounting the inconsistency can't reach
- * past the call to i915_request_get_rcu, but not executing
- * that while still executing i915_request_put() creates
- * havoc enough. Prevent this with a compiler barrier.
- */
- barrier();
-
- request = i915_request_get_rcu(request);
-
- /*
- * What stops the following rcu_access_pointer() from occurring
- * before the above i915_request_get_rcu()? If we were
- * to read the value before pausing to get the reference to
- * the request, we may not notice a change in the active
- * tracker.
- *
- * The rcu_access_pointer() is a mere compiler barrier, which
- * means both the CPU and compiler are free to perform the
- * memory read without constraint. The compiler only has to
- * ensure that any operations after the rcu_access_pointer()
- * occur afterwards in program order. This means the read may
- * be performed earlier by an out-of-order CPU, or adventurous
- * compiler.
- *
- * The atomic operation at the heart of
- * i915_request_get_rcu(), see dma_fence_get_rcu(), is
- * atomic_inc_not_zero() which is only a full memory barrier
- * when successful. That is, if i915_request_get_rcu()
- * returns the request (and so with the reference counted
- * incremented) then the following read for rcu_access_pointer()
- * must occur after the atomic operation and so confirm
- * that this request is the one currently being tracked.
- *
- * The corresponding write barrier is part of
- * rcu_assign_pointer().
- */
- if (!request || request == rcu_access_pointer(active->request))
- return rcu_pointer_handoff(request);
-
- i915_request_put(request);
- } while (1);
-}
-
+int __must_check
+i915_active_fence_set(struct i915_active_fence *active,
+ struct i915_request *rq);
/**
- * i915_active_request_get_unlocked - return a reference to the active request
+ * i915_active_fence_get - return a reference to the active fence
* @active - the active tracker
*
- * i915_active_request_get_unlocked() returns a reference to the active request,
+ * i915_active_fence_get() returns a reference to the active fence,
* or NULL if the active tracker is idle. The reference is obtained under RCU,
* so no locking is required by the caller.
*
- * The reference should be freed with i915_request_put().
+ * The reference should be freed with dma_fence_put().
*/
-static inline struct i915_request *
-i915_active_request_get_unlocked(const struct i915_active_request *active)
+static inline struct dma_fence *
+i915_active_fence_get(struct i915_active_fence *active)
{
- struct i915_request *request;
+ struct dma_fence *fence;
rcu_read_lock();
- request = __i915_active_request_get_rcu(active);
+ fence = dma_fence_get_rcu_safe(&active->fence);
rcu_read_unlock();
- return request;
+ return fence;
}
/**
- * i915_active_request_isset - report whether the active tracker is assigned
+ * i915_active_fence_isset - report whether the active tracker is assigned
* @active - the active tracker
*
- * i915_active_request_isset() returns true if the active tracker is currently
- * assigned to a request. Due to the lazy retiring, that request may be idle
+ * i915_active_fence_isset() returns true if the active tracker is currently
+ * assigned to a fence. Due to the lazy retiring, that fence may be idle
* and this may report stale information.
*/
static inline bool
-i915_active_request_isset(const struct i915_active_request *active)
-{
- return rcu_access_pointer(active->request);
-}
-
-/**
- * i915_active_request_retire - waits until the request is retired
- * @active - the active request on which to wait
- *
- * i915_active_request_retire() waits until the request is completed,
- * and then ensures that at least the retirement handler for this
- * @active tracker is called before returning. If the @active
- * tracker is idle, the function returns immediately.
- */
-static inline int __must_check
-i915_active_request_retire(struct i915_active_request *active,
- struct mutex *mutex)
+i915_active_fence_isset(const struct i915_active_fence *active)
{
- struct i915_request *request;
- long ret;
-
- request = i915_active_request_raw(active, mutex);
- if (!request)
- return 0;
-
- ret = i915_request_wait(request,
- I915_WAIT_INTERRUPTIBLE,
- MAX_SCHEDULE_TIMEOUT);
- if (ret < 0)
- return ret;
-
- list_del_init(&active->link);
- RCU_INIT_POINTER(active->request, NULL);
-
- active->retire(active, request);
-
- return 0;
+ return rcu_access_pointer(active->fence);
}
/*
@@ -369,35 +149,55 @@ i915_active_request_retire(struct i915_active_request *active,
* synchronisation.
*/
-void i915_active_init(struct drm_i915_private *i915,
- struct i915_active *ref,
- void (*retire)(struct i915_active *ref));
+void __i915_active_init(struct i915_active *ref,
+ int (*active)(struct i915_active *ref),
+ void (*retire)(struct i915_active *ref),
+ struct lock_class_key *mkey,
+ struct lock_class_key *wkey);
-int i915_active_ref(struct i915_active *ref,
- u64 timeline,
- struct i915_request *rq);
+/* Specialise each class of i915_active to avoid impossible lockdep cycles. */
+#define i915_active_init(ref, active, retire) do { \
+ static struct lock_class_key __mkey; \
+ static struct lock_class_key __wkey; \
+ \
+ __i915_active_init(ref, active, retire, &__mkey, &__wkey); \
+} while (0)
-int i915_active_wait(struct i915_active *ref);
+int i915_active_ref(struct i915_active *ref,
+ struct intel_timeline *tl,
+ struct dma_fence *fence);
-int i915_request_await_active(struct i915_request *rq,
- struct i915_active *ref);
-int i915_request_await_active_request(struct i915_request *rq,
- struct i915_active_request *active);
+static inline int
+i915_active_add_request(struct i915_active *ref, struct i915_request *rq)
+{
+ return i915_active_ref(ref, i915_request_timeline(rq), &rq->fence);
+}
-bool i915_active_acquire(struct i915_active *ref);
+void i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f);
-static inline void i915_active_cancel(struct i915_active *ref)
+static inline bool i915_active_has_exclusive(struct i915_active *ref)
{
- GEM_BUG_ON(ref->count != 1);
- ref->count = 0;
+ return rcu_access_pointer(ref->excl.fence);
}
+int i915_active_wait(struct i915_active *ref);
+
+int i915_request_await_active(struct i915_request *rq, struct i915_active *ref);
+
+int i915_active_acquire(struct i915_active *ref);
+bool i915_active_acquire_if_busy(struct i915_active *ref);
void i915_active_release(struct i915_active *ref);
+static inline void __i915_active_acquire(struct i915_active *ref)
+{
+ GEM_BUG_ON(!atomic_read(&ref->count));
+ atomic_inc(&ref->count);
+}
+
static inline bool
i915_active_is_idle(const struct i915_active *ref)
{
- return !ref->count;
+ return !atomic_read(&ref->count);
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
@@ -409,6 +209,9 @@ static inline void i915_active_fini(struct i915_active *ref) { }
int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
struct intel_engine_cs *engine);
void i915_active_acquire_barrier(struct i915_active *ref);
-void i915_request_add_barriers(struct i915_request *rq);
+void i915_request_add_active_barriers(struct i915_request *rq);
+
+void i915_active_print(struct i915_active *ref, struct drm_printer *m);
+void i915_active_unlock_wait(struct i915_active *ref);
#endif /* _I915_ACTIVE_H_ */
diff --git a/drivers/gpu/drm/i915/i915_active_types.h b/drivers/gpu/drm/i915/i915_active_types.h
index c025991b9233..6360c3e4b765 100644
--- a/drivers/gpu/drm/i915/i915_active_types.h
+++ b/drivers/gpu/drm/i915/i915_active_types.h
@@ -7,33 +7,48 @@
#ifndef _I915_ACTIVE_TYPES_H_
#define _I915_ACTIVE_TYPES_H_
+#include <linux/atomic.h>
+#include <linux/dma-fence.h>
#include <linux/llist.h>
+#include <linux/mutex.h>
#include <linux/rbtree.h>
#include <linux/rcupdate.h>
+#include <linux/workqueue.h>
-struct drm_i915_private;
-struct i915_active_request;
-struct i915_request;
+#include "i915_utils.h"
-typedef void (*i915_active_retire_fn)(struct i915_active_request *,
- struct i915_request *);
-
-struct i915_active_request {
- struct i915_request __rcu *request;
- struct list_head link;
- i915_active_retire_fn retire;
+struct i915_active_fence {
+ struct dma_fence __rcu *fence;
+ struct dma_fence_cb cb;
};
+struct active_node;
+
+#define I915_ACTIVE_MAY_SLEEP BIT(0)
+
+#define __i915_active_call __aligned(4)
+#define i915_active_may_sleep(fn) ptr_pack_bits(&(fn), I915_ACTIVE_MAY_SLEEP, 2)
+
struct i915_active {
- struct drm_i915_private *i915;
+ atomic_t count;
+ struct mutex mutex;
+ spinlock_t tree_lock;
+ struct active_node *cache;
struct rb_root tree;
- struct i915_active_request last;
- unsigned int count;
+ /* Preallocated "exclusive" node */
+ struct i915_active_fence excl;
+
+ unsigned long flags;
+#define I915_ACTIVE_RETIRE_SLEEPS BIT(0)
+
+ int (*active)(struct i915_active *ref);
void (*retire)(struct i915_active *ref);
- struct llist_head barriers;
+ struct work_struct work;
+
+ struct llist_head preallocated_barriers;
};
#endif /* _I915_ACTIVE_TYPES_H_ */
diff --git a/drivers/gpu/drm/i915/i915_buddy.c b/drivers/gpu/drm/i915/i915_buddy.c
new file mode 100644
index 000000000000..66883af64ca1
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_buddy.c
@@ -0,0 +1,431 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/kmemleak.h>
+#include <linux/slab.h>
+
+#include "i915_buddy.h"
+
+#include "i915_gem.h"
+#include "i915_globals.h"
+#include "i915_utils.h"
+
+static struct i915_global_block {
+ struct i915_global base;
+ struct kmem_cache *slab_blocks;
+} global;
+
+static void i915_global_buddy_shrink(void)
+{
+ kmem_cache_shrink(global.slab_blocks);
+}
+
+static void i915_global_buddy_exit(void)
+{
+ kmem_cache_destroy(global.slab_blocks);
+}
+
+static struct i915_global_block global = { {
+ .shrink = i915_global_buddy_shrink,
+ .exit = i915_global_buddy_exit,
+} };
+
+int __init i915_global_buddy_init(void)
+{
+ global.slab_blocks = KMEM_CACHE(i915_buddy_block, SLAB_HWCACHE_ALIGN);
+ if (!global.slab_blocks)
+ return -ENOMEM;
+
+ i915_global_register(&global.base);
+ return 0;
+}
+
+static struct i915_buddy_block *i915_block_alloc(struct i915_buddy_block *parent,
+ unsigned int order,
+ u64 offset)
+{
+ struct i915_buddy_block *block;
+
+ block = kmem_cache_zalloc(global.slab_blocks, GFP_KERNEL);
+ if (!block)
+ return NULL;
+
+ block->header = offset;
+ block->header |= order;
+ block->parent = parent;
+
+ return block;
+}
+
+static void i915_block_free(struct i915_buddy_block *block)
+{
+ kmem_cache_free(global.slab_blocks, block);
+}
+
+static void mark_allocated(struct i915_buddy_block *block)
+{
+ block->header &= ~I915_BUDDY_HEADER_STATE;
+ block->header |= I915_BUDDY_ALLOCATED;
+
+ list_del(&block->link);
+}
+
+static void mark_free(struct i915_buddy_mm *mm,
+ struct i915_buddy_block *block)
+{
+ block->header &= ~I915_BUDDY_HEADER_STATE;
+ block->header |= I915_BUDDY_FREE;
+
+ list_add(&block->link,
+ &mm->free_list[i915_buddy_block_order(block)]);
+}
+
+static void mark_split(struct i915_buddy_block *block)
+{
+ block->header &= ~I915_BUDDY_HEADER_STATE;
+ block->header |= I915_BUDDY_SPLIT;
+
+ list_del(&block->link);
+}
+
+int i915_buddy_init(struct i915_buddy_mm *mm, u64 size, u64 chunk_size)
+{
+ unsigned int i;
+ u64 offset;
+
+ if (size < chunk_size)
+ return -EINVAL;
+
+ if (chunk_size < PAGE_SIZE)
+ return -EINVAL;
+
+ if (!is_power_of_2(chunk_size))
+ return -EINVAL;
+
+ size = round_down(size, chunk_size);
+
+ mm->size = size;
+ mm->chunk_size = chunk_size;
+ mm->max_order = ilog2(size) - ilog2(chunk_size);
+
+ GEM_BUG_ON(mm->max_order > I915_BUDDY_MAX_ORDER);
+
+ mm->free_list = kmalloc_array(mm->max_order + 1,
+ sizeof(struct list_head),
+ GFP_KERNEL);
+ if (!mm->free_list)
+ return -ENOMEM;
+
+ for (i = 0; i <= mm->max_order; ++i)
+ INIT_LIST_HEAD(&mm->free_list[i]);
+
+ mm->n_roots = hweight64(size);
+
+ mm->roots = kmalloc_array(mm->n_roots,
+ sizeof(struct i915_buddy_block *),
+ GFP_KERNEL);
+ if (!mm->roots)
+ goto out_free_list;
+
+ offset = 0;
+ i = 0;
+
+ /*
+ * Split into power-of-two blocks, in case we are given a size that is
+ * not itself a power-of-two.
+ */
+ do {
+ struct i915_buddy_block *root;
+ unsigned int order;
+ u64 root_size;
+
+ root_size = rounddown_pow_of_two(size);
+ order = ilog2(root_size) - ilog2(chunk_size);
+
+ root = i915_block_alloc(NULL, order, offset);
+ if (!root)
+ goto out_free_roots;
+
+ mark_free(mm, root);
+
+ GEM_BUG_ON(i > mm->max_order);
+ GEM_BUG_ON(i915_buddy_block_size(mm, root) < chunk_size);
+
+ mm->roots[i] = root;
+
+ offset += root_size;
+ size -= root_size;
+ i++;
+ } while (size);
+
+ return 0;
+
+out_free_roots:
+ while (i--)
+ i915_block_free(mm->roots[i]);
+ kfree(mm->roots);
+out_free_list:
+ kfree(mm->free_list);
+ return -ENOMEM;
+}
+
+void i915_buddy_fini(struct i915_buddy_mm *mm)
+{
+ int i;
+
+ for (i = 0; i < mm->n_roots; ++i) {
+ GEM_WARN_ON(!i915_buddy_block_is_free(mm->roots[i]));
+ i915_block_free(mm->roots[i]);
+ }
+
+ kfree(mm->roots);
+ kfree(mm->free_list);
+}
+
+static int split_block(struct i915_buddy_mm *mm,
+ struct i915_buddy_block *block)
+{
+ unsigned int block_order = i915_buddy_block_order(block) - 1;
+ u64 offset = i915_buddy_block_offset(block);
+
+ GEM_BUG_ON(!i915_buddy_block_is_free(block));
+ GEM_BUG_ON(!i915_buddy_block_order(block));
+
+ block->left = i915_block_alloc(block, block_order, offset);
+ if (!block->left)
+ return -ENOMEM;
+
+ block->right = i915_block_alloc(block, block_order,
+ offset + (mm->chunk_size << block_order));
+ if (!block->right) {
+ i915_block_free(block->left);
+ return -ENOMEM;
+ }
+
+ mark_free(mm, block->left);
+ mark_free(mm, block->right);
+
+ mark_split(block);
+
+ return 0;
+}
+
+static struct i915_buddy_block *
+get_buddy(struct i915_buddy_block *block)
+{
+ struct i915_buddy_block *parent;
+
+ parent = block->parent;
+ if (!parent)
+ return NULL;
+
+ if (parent->left == block)
+ return parent->right;
+
+ return parent->left;
+}
+
+static void __i915_buddy_free(struct i915_buddy_mm *mm,
+ struct i915_buddy_block *block)
+{
+ struct i915_buddy_block *parent;
+
+ while ((parent = block->parent)) {
+ struct i915_buddy_block *buddy;
+
+ buddy = get_buddy(block);
+
+ if (!i915_buddy_block_is_free(buddy))
+ break;
+
+ list_del(&buddy->link);
+
+ i915_block_free(block);
+ i915_block_free(buddy);
+
+ block = parent;
+ }
+
+ mark_free(mm, block);
+}
+
+void i915_buddy_free(struct i915_buddy_mm *mm,
+ struct i915_buddy_block *block)
+{
+ GEM_BUG_ON(!i915_buddy_block_is_allocated(block));
+ __i915_buddy_free(mm, block);
+}
+
+void i915_buddy_free_list(struct i915_buddy_mm *mm, struct list_head *objects)
+{
+ struct i915_buddy_block *block, *on;
+
+ list_for_each_entry_safe(block, on, objects, link) {
+ i915_buddy_free(mm, block);
+ cond_resched();
+ }
+ INIT_LIST_HEAD(objects);
+}
+
+/*
+ * Allocate power-of-two block. The order value here translates to:
+ *
+ * 0 = 2^0 * mm->chunk_size
+ * 1 = 2^1 * mm->chunk_size
+ * 2 = 2^2 * mm->chunk_size
+ * ...
+ */
+struct i915_buddy_block *
+i915_buddy_alloc(struct i915_buddy_mm *mm, unsigned int order)
+{
+ struct i915_buddy_block *block = NULL;
+ unsigned int i;
+ int err;
+
+ for (i = order; i <= mm->max_order; ++i) {
+ block = list_first_entry_or_null(&mm->free_list[i],
+ struct i915_buddy_block,
+ link);
+ if (block)
+ break;
+ }
+
+ if (!block)
+ return ERR_PTR(-ENOSPC);
+
+ GEM_BUG_ON(!i915_buddy_block_is_free(block));
+
+ while (i != order) {
+ err = split_block(mm, block);
+ if (unlikely(err))
+ goto out_free;
+
+ /* Go low */
+ block = block->left;
+ i--;
+ }
+
+ mark_allocated(block);
+ kmemleak_update_trace(block);
+ return block;
+
+out_free:
+ __i915_buddy_free(mm, block);
+ return ERR_PTR(err);
+}
+
+static inline bool overlaps(u64 s1, u64 e1, u64 s2, u64 e2)
+{
+ return s1 <= e2 && e1 >= s2;
+}
+
+static inline bool contains(u64 s1, u64 e1, u64 s2, u64 e2)
+{
+ return s1 <= s2 && e1 >= e2;
+}
+
+/*
+ * Allocate range. Note that it's safe to chain together multiple alloc_ranges
+ * with the same blocks list.
+ *
+ * Intended for pre-allocating portions of the address space, for example to
+ * reserve a block for the initial framebuffer or similar, hence the expectation
+ * here is that i915_buddy_alloc() is still the main vehicle for
+ * allocations, so if that's not the case then the drm_mm range allocator is
+ * probably a much better fit, and so you should probably go use that instead.
+ */
+int i915_buddy_alloc_range(struct i915_buddy_mm *mm,
+ struct list_head *blocks,
+ u64 start, u64 size)
+{
+ struct i915_buddy_block *block;
+ struct i915_buddy_block *buddy;
+ LIST_HEAD(allocated);
+ LIST_HEAD(dfs);
+ u64 end;
+ int err;
+ int i;
+
+ if (size < mm->chunk_size)
+ return -EINVAL;
+
+ if (!IS_ALIGNED(size | start, mm->chunk_size))
+ return -EINVAL;
+
+ if (range_overflows(start, size, mm->size))
+ return -EINVAL;
+
+ for (i = 0; i < mm->n_roots; ++i)
+ list_add_tail(&mm->roots[i]->tmp_link, &dfs);
+
+ end = start + size - 1;
+
+ do {
+ u64 block_start;
+ u64 block_end;
+
+ block = list_first_entry_or_null(&dfs,
+ struct i915_buddy_block,
+ tmp_link);
+ if (!block)
+ break;
+
+ list_del(&block->tmp_link);
+
+ block_start = i915_buddy_block_offset(block);
+ block_end = block_start + i915_buddy_block_size(mm, block) - 1;
+
+ if (!overlaps(start, end, block_start, block_end))
+ continue;
+
+ if (i915_buddy_block_is_allocated(block)) {
+ err = -ENOSPC;
+ goto err_free;
+ }
+
+ if (contains(start, end, block_start, block_end)) {
+ if (!i915_buddy_block_is_free(block)) {
+ err = -ENOSPC;
+ goto err_free;
+ }
+
+ mark_allocated(block);
+ list_add_tail(&block->link, &allocated);
+ continue;
+ }
+
+ if (!i915_buddy_block_is_split(block)) {
+ err = split_block(mm, block);
+ if (unlikely(err))
+ goto err_undo;
+ }
+
+ list_add(&block->right->tmp_link, &dfs);
+ list_add(&block->left->tmp_link, &dfs);
+ } while (1);
+
+ list_splice_tail(&allocated, blocks);
+ return 0;
+
+err_undo:
+ /*
+ * We really don't want to leave around a bunch of split blocks, since
+ * bigger is better, so make sure we merge everything back before we
+ * free the allocated blocks.
+ */
+ buddy = get_buddy(block);
+ if (buddy &&
+ (i915_buddy_block_is_free(block) &&
+ i915_buddy_block_is_free(buddy)))
+ __i915_buddy_free(mm, block);
+
+err_free:
+ i915_buddy_free_list(mm, &allocated);
+ return err;
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/i915_buddy.c"
+#endif
diff --git a/drivers/gpu/drm/i915/i915_buddy.h b/drivers/gpu/drm/i915/i915_buddy.h
new file mode 100644
index 000000000000..ed41f3507cdc
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_buddy.h
@@ -0,0 +1,128 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_BUDDY_H__
+#define __I915_BUDDY_H__
+
+#include <linux/bitops.h>
+#include <linux/list.h>
+
+struct i915_buddy_block {
+#define I915_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12)
+#define I915_BUDDY_HEADER_STATE GENMASK_ULL(11, 10)
+#define I915_BUDDY_ALLOCATED (1 << 10)
+#define I915_BUDDY_FREE (2 << 10)
+#define I915_BUDDY_SPLIT (3 << 10)
+#define I915_BUDDY_HEADER_ORDER GENMASK_ULL(9, 0)
+ u64 header;
+
+ struct i915_buddy_block *left;
+ struct i915_buddy_block *right;
+ struct i915_buddy_block *parent;
+
+ void *private; /* owned by creator */
+
+ /*
+ * While the block is allocated by the user through i915_buddy_alloc*,
+ * the user has ownership of the link, for example to maintain within
+ * a list, if so desired. As soon as the block is freed with
+ * i915_buddy_free* ownership is given back to the mm.
+ */
+ struct list_head link;
+ struct list_head tmp_link;
+};
+
+#define I915_BUDDY_MAX_ORDER I915_BUDDY_HEADER_ORDER
+
+/*
+ * Binary Buddy System.
+ *
+ * Locking should be handled by the user, a simple mutex around
+ * i915_buddy_alloc* and i915_buddy_free* should suffice.
+ */
+struct i915_buddy_mm {
+ /* Maintain a free list for each order. */
+ struct list_head *free_list;
+
+ /*
+ * Maintain explicit binary tree(s) to track the allocation of the
+ * address space. This gives us a simple way of finding a buddy block
+ * and performing the potentially recursive merge step when freeing a
+ * block. Nodes are either allocated or free, in which case they will
+ * also exist on the respective free list.
+ */
+ struct i915_buddy_block **roots;
+
+ /*
+ * Anything from here is public, and remains static for the lifetime of
+ * the mm. Everything above is considered do-not-touch.
+ */
+ unsigned int n_roots;
+ unsigned int max_order;
+
+ /* Must be at least PAGE_SIZE */
+ u64 chunk_size;
+ u64 size;
+};
+
+static inline u64
+i915_buddy_block_offset(struct i915_buddy_block *block)
+{
+ return block->header & I915_BUDDY_HEADER_OFFSET;
+}
+
+static inline unsigned int
+i915_buddy_block_order(struct i915_buddy_block *block)
+{
+ return block->header & I915_BUDDY_HEADER_ORDER;
+}
+
+static inline unsigned int
+i915_buddy_block_state(struct i915_buddy_block *block)
+{
+ return block->header & I915_BUDDY_HEADER_STATE;
+}
+
+static inline bool
+i915_buddy_block_is_allocated(struct i915_buddy_block *block)
+{
+ return i915_buddy_block_state(block) == I915_BUDDY_ALLOCATED;
+}
+
+static inline bool
+i915_buddy_block_is_free(struct i915_buddy_block *block)
+{
+ return i915_buddy_block_state(block) == I915_BUDDY_FREE;
+}
+
+static inline bool
+i915_buddy_block_is_split(struct i915_buddy_block *block)
+{
+ return i915_buddy_block_state(block) == I915_BUDDY_SPLIT;
+}
+
+static inline u64
+i915_buddy_block_size(struct i915_buddy_mm *mm,
+ struct i915_buddy_block *block)
+{
+ return mm->chunk_size << i915_buddy_block_order(block);
+}
+
+int i915_buddy_init(struct i915_buddy_mm *mm, u64 size, u64 chunk_size);
+
+void i915_buddy_fini(struct i915_buddy_mm *mm);
+
+struct i915_buddy_block *
+i915_buddy_alloc(struct i915_buddy_mm *mm, unsigned int order);
+
+int i915_buddy_alloc_range(struct i915_buddy_mm *mm,
+ struct list_head *blocks,
+ u64 start, u64 size);
+
+void i915_buddy_free(struct i915_buddy_mm *mm, struct i915_buddy_block *block);
+
+void i915_buddy_free_list(struct i915_buddy_mm *mm, struct list_head *objects);
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index a28bcd2d7c09..a0e437aa65b7 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -28,6 +28,7 @@
#include "gt/intel_engine.h"
#include "i915_drv.h"
+#include "i915_memcpy.h"
/**
* DOC: batch buffer command parser
@@ -52,13 +53,11 @@
* granting userspace undue privileges. There are three categories of privilege.
*
* First, commands which are explicitly defined as privileged or which should
- * only be used by the kernel driver. The parser generally rejects such
- * commands, though it may allow some from the drm master process.
+ * only be used by the kernel driver. The parser rejects such commands
*
* Second, commands which access registers. To support correct/enhanced
* userspace functionality, particularly certain OpenGL extensions, the parser
- * provides a whitelist of registers which userspace may safely access (for both
- * normal and drm master processes).
+ * provides a whitelist of registers which userspace may safely access
*
* Third, commands which access privileged memory (i.e. GGTT, HWS page, etc).
* The parser always rejects such commands.
@@ -83,9 +82,9 @@
* in the per-engine command tables.
*
* Other command table entries map fairly directly to high level categories
- * mentioned above: rejected, master-only, register whitelist. The parser
- * implements a number of checks, including the privileged memory checks, via a
- * general bitmasking mechanism.
+ * mentioned above: rejected, register whitelist. The parser implements a number
+ * of checks, including the privileged memory checks, via a general bitmasking
+ * mechanism.
*/
/*
@@ -103,8 +102,6 @@ struct drm_i915_cmd_descriptor {
* CMD_DESC_REJECT: The command is never allowed
* CMD_DESC_REGISTER: The command should be checked against the
* register whitelist for the appropriate ring
- * CMD_DESC_MASTER: The command is allowed if the submitting process
- * is the DRM master
*/
u32 flags;
#define CMD_DESC_FIXED (1<<0)
@@ -112,7 +109,6 @@ struct drm_i915_cmd_descriptor {
#define CMD_DESC_REJECT (1<<2)
#define CMD_DESC_REGISTER (1<<3)
#define CMD_DESC_BITMASK (1<<4)
-#define CMD_DESC_MASTER (1<<5)
/*
* The command's unique identification bits and the bitmask to get them.
@@ -193,7 +189,7 @@ struct drm_i915_cmd_table {
#define CMD(op, opm, f, lm, fl, ...) \
{ \
.flags = (fl) | ((f) ? CMD_DESC_FIXED : 0), \
- .cmd = { (op), ~0u << (opm) }, \
+ .cmd = { (op & ~0u << (opm)), ~0u << (opm) }, \
.length = { (lm) }, \
__VA_ARGS__ \
}
@@ -208,14 +204,13 @@ struct drm_i915_cmd_table {
#define R CMD_DESC_REJECT
#define W CMD_DESC_REGISTER
#define B CMD_DESC_BITMASK
-#define M CMD_DESC_MASTER
/* Command Mask Fixed Len Action
---------------------------------------------------------- */
-static const struct drm_i915_cmd_descriptor common_cmds[] = {
+static const struct drm_i915_cmd_descriptor gen7_common_cmds[] = {
CMD( MI_NOOP, SMI, F, 1, S ),
CMD( MI_USER_INTERRUPT, SMI, F, 1, R ),
- CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, M ),
+ CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, R ),
CMD( MI_ARB_CHECK, SMI, F, 1, S ),
CMD( MI_REPORT_HEAD, SMI, F, 1, S ),
CMD( MI_SUSPEND_FLUSH, SMI, F, 1, S ),
@@ -240,12 +235,12 @@ static const struct drm_i915_cmd_descriptor common_cmds[] = {
/*
* MI_BATCH_BUFFER_START requires some special handling. It's not
* really a 'skip' action but it doesn't seem like it's worth adding
- * a new action. See i915_parse_cmds().
+ * a new action. See intel_engine_cmd_parser().
*/
CMD( MI_BATCH_BUFFER_START, SMI, !F, 0xFF, S ),
};
-static const struct drm_i915_cmd_descriptor render_cmds[] = {
+static const struct drm_i915_cmd_descriptor gen7_render_cmds[] = {
CMD( MI_FLUSH, SMI, F, 1, S ),
CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
CMD( MI_PREDICATE, SMI, F, 1, S ),
@@ -312,7 +307,7 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = {
CMD( MI_URB_ATOMIC_ALLOC, SMI, F, 1, S ),
CMD( MI_SET_APPID, SMI, F, 1, S ),
CMD( MI_RS_CONTEXT, SMI, F, 1, S ),
- CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ),
+ CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, R ),
CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, W,
.reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 } ),
@@ -329,7 +324,7 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = {
CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_PS, S3D, !F, 0x1FF, S ),
};
-static const struct drm_i915_cmd_descriptor video_cmds[] = {
+static const struct drm_i915_cmd_descriptor gen7_video_cmds[] = {
CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
CMD( MI_SET_APPID, SMI, F, 1, S ),
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B,
@@ -373,7 +368,7 @@ static const struct drm_i915_cmd_descriptor video_cmds[] = {
CMD( MFX_WAIT, SMFX, F, 1, S ),
};
-static const struct drm_i915_cmd_descriptor vecs_cmds[] = {
+static const struct drm_i915_cmd_descriptor gen7_vecs_cmds[] = {
CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
CMD( MI_SET_APPID, SMI, F, 1, S ),
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B,
@@ -411,7 +406,7 @@ static const struct drm_i915_cmd_descriptor vecs_cmds[] = {
}}, ),
};
-static const struct drm_i915_cmd_descriptor blt_cmds[] = {
+static const struct drm_i915_cmd_descriptor gen7_blt_cmds[] = {
CMD( MI_DISPLAY_FLIP, SMI, !F, 0xFF, R ),
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3FF, B,
.bits = {{
@@ -445,10 +440,64 @@ static const struct drm_i915_cmd_descriptor blt_cmds[] = {
};
static const struct drm_i915_cmd_descriptor hsw_blt_cmds[] = {
- CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ),
+ CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, R ),
CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
};
+/*
+ * For Gen9 we can still rely on the h/w to enforce cmd security, and only
+ * need to re-enforce the register access checks. We therefore only need to
+ * teach the cmdparser how to find the end of each command, and identify
+ * register accesses. The table doesn't need to reject any commands, and so
+ * the only commands listed here are:
+ * 1) Those that touch registers
+ * 2) Those that do not have the default 8-bit length
+ *
+ * Note that the default MI length mask chosen for this table is 0xFF, not
+ * the 0x3F used on older devices. This is because the vast majority of MI
+ * cmds on Gen9 use a standard 8-bit Length field.
+ * All the Gen9 blitter instructions are standard 0xFF length mask, and
+ * none allow access to non-general registers, so in fact no BLT cmds are
+ * included in the table at all.
+ *
+ */
+static const struct drm_i915_cmd_descriptor gen9_blt_cmds[] = {
+ CMD( MI_NOOP, SMI, F, 1, S ),
+ CMD( MI_USER_INTERRUPT, SMI, F, 1, S ),
+ CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, S ),
+ CMD( MI_FLUSH, SMI, F, 1, S ),
+ CMD( MI_ARB_CHECK, SMI, F, 1, S ),
+ CMD( MI_REPORT_HEAD, SMI, F, 1, S ),
+ CMD( MI_ARB_ON_OFF, SMI, F, 1, S ),
+ CMD( MI_SUSPEND_FLUSH, SMI, F, 1, S ),
+ CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, S ),
+ CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, S ),
+ CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3FF, S ),
+ CMD( MI_LOAD_REGISTER_IMM(1), SMI, !F, 0xFF, W,
+ .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 2 } ),
+ CMD( MI_UPDATE_GTT, SMI, !F, 0x3FF, S ),
+ CMD( MI_STORE_REGISTER_MEM_GEN8, SMI, F, 4, W,
+ .reg = { .offset = 1, .mask = 0x007FFFFC } ),
+ CMD( MI_FLUSH_DW, SMI, !F, 0x3F, S ),
+ CMD( MI_LOAD_REGISTER_MEM_GEN8, SMI, F, 4, W,
+ .reg = { .offset = 1, .mask = 0x007FFFFC } ),
+ CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, W,
+ .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 } ),
+
+ /*
+ * We allow BB_START but apply further checks. We just sanitize the
+ * basic fields here.
+ */
+#define MI_BB_START_OPERAND_MASK GENMASK(SMI-1, 0)
+#define MI_BB_START_OPERAND_EXPECT (MI_BATCH_PPGTT_HSW | 1)
+ CMD( MI_BATCH_BUFFER_START_GEN8, SMI, !F, 0xFF, B,
+ .bits = {{
+ .offset = 0,
+ .mask = MI_BB_START_OPERAND_MASK,
+ .expected = MI_BB_START_OPERAND_EXPECT,
+ }}, ),
+};
+
static const struct drm_i915_cmd_descriptor noop_desc =
CMD(MI_NOOP, SMI, F, 1, S);
@@ -462,40 +511,44 @@ static const struct drm_i915_cmd_descriptor noop_desc =
#undef R
#undef W
#undef B
-#undef M
-static const struct drm_i915_cmd_table gen7_render_cmds[] = {
- { common_cmds, ARRAY_SIZE(common_cmds) },
- { render_cmds, ARRAY_SIZE(render_cmds) },
+static const struct drm_i915_cmd_table gen7_render_cmd_table[] = {
+ { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
+ { gen7_render_cmds, ARRAY_SIZE(gen7_render_cmds) },
};
-static const struct drm_i915_cmd_table hsw_render_ring_cmds[] = {
- { common_cmds, ARRAY_SIZE(common_cmds) },
- { render_cmds, ARRAY_SIZE(render_cmds) },
+static const struct drm_i915_cmd_table hsw_render_ring_cmd_table[] = {
+ { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
+ { gen7_render_cmds, ARRAY_SIZE(gen7_render_cmds) },
{ hsw_render_cmds, ARRAY_SIZE(hsw_render_cmds) },
};
-static const struct drm_i915_cmd_table gen7_video_cmds[] = {
- { common_cmds, ARRAY_SIZE(common_cmds) },
- { video_cmds, ARRAY_SIZE(video_cmds) },
+static const struct drm_i915_cmd_table gen7_video_cmd_table[] = {
+ { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
+ { gen7_video_cmds, ARRAY_SIZE(gen7_video_cmds) },
};
-static const struct drm_i915_cmd_table hsw_vebox_cmds[] = {
- { common_cmds, ARRAY_SIZE(common_cmds) },
- { vecs_cmds, ARRAY_SIZE(vecs_cmds) },
+static const struct drm_i915_cmd_table hsw_vebox_cmd_table[] = {
+ { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
+ { gen7_vecs_cmds, ARRAY_SIZE(gen7_vecs_cmds) },
};
-static const struct drm_i915_cmd_table gen7_blt_cmds[] = {
- { common_cmds, ARRAY_SIZE(common_cmds) },
- { blt_cmds, ARRAY_SIZE(blt_cmds) },
+static const struct drm_i915_cmd_table gen7_blt_cmd_table[] = {
+ { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
+ { gen7_blt_cmds, ARRAY_SIZE(gen7_blt_cmds) },
};
-static const struct drm_i915_cmd_table hsw_blt_ring_cmds[] = {
- { common_cmds, ARRAY_SIZE(common_cmds) },
- { blt_cmds, ARRAY_SIZE(blt_cmds) },
+static const struct drm_i915_cmd_table hsw_blt_ring_cmd_table[] = {
+ { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
+ { gen7_blt_cmds, ARRAY_SIZE(gen7_blt_cmds) },
{ hsw_blt_cmds, ARRAY_SIZE(hsw_blt_cmds) },
};
+static const struct drm_i915_cmd_table gen9_blt_cmd_table[] = {
+ { gen9_blt_cmds, ARRAY_SIZE(gen9_blt_cmds) },
+};
+
+
/*
* Register whitelists, sorted by increasing register offset.
*/
@@ -611,17 +664,27 @@ static const struct drm_i915_reg_descriptor gen7_blt_regs[] = {
REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE),
};
-static const struct drm_i915_reg_descriptor ivb_master_regs[] = {
- REG32(FORCEWAKE_MT),
- REG32(DERRMR),
- REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_A)),
- REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_B)),
- REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_C)),
-};
-
-static const struct drm_i915_reg_descriptor hsw_master_regs[] = {
- REG32(FORCEWAKE_MT),
- REG32(DERRMR),
+static const struct drm_i915_reg_descriptor gen9_blt_regs[] = {
+ REG64_IDX(RING_TIMESTAMP, RENDER_RING_BASE),
+ REG64_IDX(RING_TIMESTAMP, BSD_RING_BASE),
+ REG32(BCS_SWCTRL),
+ REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE),
+ REG64_IDX(BCS_GPR, 0),
+ REG64_IDX(BCS_GPR, 1),
+ REG64_IDX(BCS_GPR, 2),
+ REG64_IDX(BCS_GPR, 3),
+ REG64_IDX(BCS_GPR, 4),
+ REG64_IDX(BCS_GPR, 5),
+ REG64_IDX(BCS_GPR, 6),
+ REG64_IDX(BCS_GPR, 7),
+ REG64_IDX(BCS_GPR, 8),
+ REG64_IDX(BCS_GPR, 9),
+ REG64_IDX(BCS_GPR, 10),
+ REG64_IDX(BCS_GPR, 11),
+ REG64_IDX(BCS_GPR, 12),
+ REG64_IDX(BCS_GPR, 13),
+ REG64_IDX(BCS_GPR, 14),
+ REG64_IDX(BCS_GPR, 15),
};
#undef REG64
@@ -630,28 +693,27 @@ static const struct drm_i915_reg_descriptor hsw_master_regs[] = {
struct drm_i915_reg_table {
const struct drm_i915_reg_descriptor *regs;
int num_regs;
- bool master;
};
static const struct drm_i915_reg_table ivb_render_reg_tables[] = {
- { gen7_render_regs, ARRAY_SIZE(gen7_render_regs), false },
- { ivb_master_regs, ARRAY_SIZE(ivb_master_regs), true },
+ { gen7_render_regs, ARRAY_SIZE(gen7_render_regs) },
};
static const struct drm_i915_reg_table ivb_blt_reg_tables[] = {
- { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs), false },
- { ivb_master_regs, ARRAY_SIZE(ivb_master_regs), true },
+ { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs) },
};
static const struct drm_i915_reg_table hsw_render_reg_tables[] = {
- { gen7_render_regs, ARRAY_SIZE(gen7_render_regs), false },
- { hsw_render_regs, ARRAY_SIZE(hsw_render_regs), false },
- { hsw_master_regs, ARRAY_SIZE(hsw_master_regs), true },
+ { gen7_render_regs, ARRAY_SIZE(gen7_render_regs) },
+ { hsw_render_regs, ARRAY_SIZE(hsw_render_regs) },
};
static const struct drm_i915_reg_table hsw_blt_reg_tables[] = {
- { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs), false },
- { hsw_master_regs, ARRAY_SIZE(hsw_master_regs), true },
+ { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs) },
+};
+
+static const struct drm_i915_reg_table gen9_blt_reg_tables[] = {
+ { gen9_blt_regs, ARRAY_SIZE(gen9_blt_regs) },
};
static u32 gen7_render_get_cmd_length_mask(u32 cmd_header)
@@ -669,7 +731,7 @@ static u32 gen7_render_get_cmd_length_mask(u32 cmd_header)
return 0xFF;
}
- DRM_DEBUG_DRIVER("CMD: Abnormal rcs cmd length! 0x%08X\n", cmd_header);
+ DRM_DEBUG("CMD: Abnormal rcs cmd length! 0x%08X\n", cmd_header);
return 0;
}
@@ -692,7 +754,7 @@ static u32 gen7_bsd_get_cmd_length_mask(u32 cmd_header)
return 0xFF;
}
- DRM_DEBUG_DRIVER("CMD: Abnormal bsd cmd length! 0x%08X\n", cmd_header);
+ DRM_DEBUG("CMD: Abnormal bsd cmd length! 0x%08X\n", cmd_header);
return 0;
}
@@ -705,7 +767,18 @@ static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header)
else if (client == INSTR_BC_CLIENT)
return 0xFF;
- DRM_DEBUG_DRIVER("CMD: Abnormal blt cmd length! 0x%08X\n", cmd_header);
+ DRM_DEBUG("CMD: Abnormal blt cmd length! 0x%08X\n", cmd_header);
+ return 0;
+}
+
+static u32 gen9_blt_get_cmd_length_mask(u32 cmd_header)
+{
+ u32 client = cmd_header >> INSTR_CLIENT_SHIFT;
+
+ if (client == INSTR_MI_CLIENT || client == INSTR_BC_CLIENT)
+ return 0xFF;
+
+ DRM_DEBUG("CMD: Abnormal blt cmd length! 0x%08X\n", cmd_header);
return 0;
}
@@ -866,18 +939,19 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
int cmd_table_count;
int ret;
- if (!IS_GEN(engine->i915, 7))
+ if (!IS_GEN(engine->i915, 7) && !(IS_GEN(engine->i915, 9) &&
+ engine->class == COPY_ENGINE_CLASS))
return;
switch (engine->class) {
case RENDER_CLASS:
if (IS_HASWELL(engine->i915)) {
- cmd_tables = hsw_render_ring_cmds;
+ cmd_tables = hsw_render_ring_cmd_table;
cmd_table_count =
- ARRAY_SIZE(hsw_render_ring_cmds);
+ ARRAY_SIZE(hsw_render_ring_cmd_table);
} else {
- cmd_tables = gen7_render_cmds;
- cmd_table_count = ARRAY_SIZE(gen7_render_cmds);
+ cmd_tables = gen7_render_cmd_table;
+ cmd_table_count = ARRAY_SIZE(gen7_render_cmd_table);
}
if (IS_HASWELL(engine->i915)) {
@@ -887,36 +961,46 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
engine->reg_tables = ivb_render_reg_tables;
engine->reg_table_count = ARRAY_SIZE(ivb_render_reg_tables);
}
-
engine->get_cmd_length_mask = gen7_render_get_cmd_length_mask;
break;
case VIDEO_DECODE_CLASS:
- cmd_tables = gen7_video_cmds;
- cmd_table_count = ARRAY_SIZE(gen7_video_cmds);
+ cmd_tables = gen7_video_cmd_table;
+ cmd_table_count = ARRAY_SIZE(gen7_video_cmd_table);
engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
break;
case COPY_ENGINE_CLASS:
- if (IS_HASWELL(engine->i915)) {
- cmd_tables = hsw_blt_ring_cmds;
- cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds);
+ engine->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
+ if (IS_GEN(engine->i915, 9)) {
+ cmd_tables = gen9_blt_cmd_table;
+ cmd_table_count = ARRAY_SIZE(gen9_blt_cmd_table);
+ engine->get_cmd_length_mask =
+ gen9_blt_get_cmd_length_mask;
+
+ /* BCS Engine unsafe without parser */
+ engine->flags |= I915_ENGINE_REQUIRES_CMD_PARSER;
+ } else if (IS_HASWELL(engine->i915)) {
+ cmd_tables = hsw_blt_ring_cmd_table;
+ cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmd_table);
} else {
- cmd_tables = gen7_blt_cmds;
- cmd_table_count = ARRAY_SIZE(gen7_blt_cmds);
+ cmd_tables = gen7_blt_cmd_table;
+ cmd_table_count = ARRAY_SIZE(gen7_blt_cmd_table);
}
- if (IS_HASWELL(engine->i915)) {
+ if (IS_GEN(engine->i915, 9)) {
+ engine->reg_tables = gen9_blt_reg_tables;
+ engine->reg_table_count =
+ ARRAY_SIZE(gen9_blt_reg_tables);
+ } else if (IS_HASWELL(engine->i915)) {
engine->reg_tables = hsw_blt_reg_tables;
engine->reg_table_count = ARRAY_SIZE(hsw_blt_reg_tables);
} else {
engine->reg_tables = ivb_blt_reg_tables;
engine->reg_table_count = ARRAY_SIZE(ivb_blt_reg_tables);
}
-
- engine->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
break;
case VIDEO_ENHANCEMENT_CLASS:
- cmd_tables = hsw_vebox_cmds;
- cmd_table_count = ARRAY_SIZE(hsw_vebox_cmds);
+ cmd_tables = hsw_vebox_cmd_table;
+ cmd_table_count = ARRAY_SIZE(hsw_vebox_cmd_table);
/* VECS can use the same length_mask function as VCS */
engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
break;
@@ -942,7 +1026,7 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
return;
}
- engine->flags |= I915_ENGINE_NEEDS_CMD_PARSER;
+ engine->flags |= I915_ENGINE_USING_CMD_PARSER;
}
/**
@@ -954,7 +1038,7 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
*/
void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine)
{
- if (!intel_engine_needs_cmd_parser(engine))
+ if (!intel_engine_using_cmd_parser(engine))
return;
fini_hash_table(engine);
@@ -1028,119 +1112,98 @@ __find_reg(const struct drm_i915_reg_descriptor *table, int count, u32 addr)
}
static const struct drm_i915_reg_descriptor *
-find_reg(const struct intel_engine_cs *engine, bool is_master, u32 addr)
+find_reg(const struct intel_engine_cs *engine, u32 addr)
{
const struct drm_i915_reg_table *table = engine->reg_tables;
+ const struct drm_i915_reg_descriptor *reg = NULL;
int count = engine->reg_table_count;
- for (; count > 0; ++table, --count) {
- if (!table->master || is_master) {
- const struct drm_i915_reg_descriptor *reg;
+ for (; !reg && (count > 0); ++table, --count)
+ reg = __find_reg(table->regs, table->num_regs, addr);
- reg = __find_reg(table->regs, table->num_regs, addr);
- if (reg != NULL)
- return reg;
- }
- }
-
- return NULL;
+ return reg;
}
/* Returns a vmap'd pointer to dst_obj, which the caller must unmap */
static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
struct drm_i915_gem_object *src_obj,
- u32 batch_start_offset,
- u32 batch_len,
- bool *needs_clflush_after)
+ u32 offset, u32 length)
{
- unsigned int src_needs_clflush;
- unsigned int dst_needs_clflush;
+ bool needs_clflush;
void *dst, *src;
int ret;
- ret = i915_gem_object_prepare_write(dst_obj, &dst_needs_clflush);
- if (ret)
- return ERR_PTR(ret);
-
dst = i915_gem_object_pin_map(dst_obj, I915_MAP_FORCE_WB);
- i915_gem_object_finish_access(dst_obj);
if (IS_ERR(dst))
return dst;
- ret = i915_gem_object_prepare_read(src_obj, &src_needs_clflush);
+ ret = i915_gem_object_pin_pages(src_obj);
if (ret) {
i915_gem_object_unpin_map(dst_obj);
return ERR_PTR(ret);
}
+ needs_clflush =
+ !(src_obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ);
+
src = ERR_PTR(-ENODEV);
- if (src_needs_clflush &&
- i915_can_memcpy_from_wc(NULL, batch_start_offset, 0)) {
+ if (needs_clflush && i915_has_memcpy_from_wc()) {
src = i915_gem_object_pin_map(src_obj, I915_MAP_WC);
if (!IS_ERR(src)) {
- i915_memcpy_from_wc(dst,
- src + batch_start_offset,
- ALIGN(batch_len, 16));
+ i915_unaligned_memcpy_from_wc(dst,
+ src + offset,
+ length);
i915_gem_object_unpin_map(src_obj);
}
}
if (IS_ERR(src)) {
void *ptr;
- int offset, n;
-
- offset = offset_in_page(batch_start_offset);
+ int x, n;
- /* We can avoid clflushing partial cachelines before the write
+ /*
+ * We can avoid clflushing partial cachelines before the write
* if we only every write full cache-lines. Since we know that
* both the source and destination are in multiples of
* PAGE_SIZE, we can simply round up to the next cacheline.
* We don't care about copying too much here as we only
* validate up to the end of the batch.
*/
- if (dst_needs_clflush & CLFLUSH_BEFORE)
- batch_len = roundup(batch_len,
- boot_cpu_data.x86_clflush_size);
+ if (!(dst_obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
+ length = round_up(length,
+ boot_cpu_data.x86_clflush_size);
ptr = dst;
- for (n = batch_start_offset >> PAGE_SHIFT; batch_len; n++) {
- int len = min_t(int, batch_len, PAGE_SIZE - offset);
+ x = offset_in_page(offset);
+ for (n = offset >> PAGE_SHIFT; length; n++) {
+ int len = min_t(int, length, PAGE_SIZE - x);
src = kmap_atomic(i915_gem_object_get_page(src_obj, n));
- if (src_needs_clflush)
- drm_clflush_virt_range(src + offset, len);
- memcpy(ptr, src + offset, len);
+ if (needs_clflush)
+ drm_clflush_virt_range(src + x, len);
+ memcpy(ptr, src + x, len);
kunmap_atomic(src);
ptr += len;
- batch_len -= len;
- offset = 0;
+ length -= len;
+ x = 0;
}
}
- i915_gem_object_finish_access(src_obj);
+ i915_gem_object_unpin_pages(src_obj);
/* dst_obj is returned with vmap pinned */
- *needs_clflush_after = dst_needs_clflush & CLFLUSH_AFTER;
-
return dst;
}
static bool check_cmd(const struct intel_engine_cs *engine,
const struct drm_i915_cmd_descriptor *desc,
- const u32 *cmd, u32 length,
- const bool is_master)
+ const u32 *cmd, u32 length)
{
if (desc->flags & CMD_DESC_SKIP)
return true;
if (desc->flags & CMD_DESC_REJECT) {
- DRM_DEBUG_DRIVER("CMD: Rejected command: 0x%08X\n", *cmd);
- return false;
- }
-
- if ((desc->flags & CMD_DESC_MASTER) && !is_master) {
- DRM_DEBUG_DRIVER("CMD: Rejected master-only command: 0x%08X\n",
- *cmd);
+ DRM_DEBUG("CMD: Rejected command: 0x%08X\n", *cmd);
return false;
}
@@ -1157,11 +1220,11 @@ static bool check_cmd(const struct intel_engine_cs *engine,
offset += step) {
const u32 reg_addr = cmd[offset] & desc->reg.mask;
const struct drm_i915_reg_descriptor *reg =
- find_reg(engine, is_master, reg_addr);
+ find_reg(engine, reg_addr);
if (!reg) {
- DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (%s)\n",
- reg_addr, *cmd, engine->name);
+ DRM_DEBUG("CMD: Rejected register 0x%08X in command: 0x%08X (%s)\n",
+ reg_addr, *cmd, engine->name);
return false;
}
@@ -1171,22 +1234,22 @@ static bool check_cmd(const struct intel_engine_cs *engine,
*/
if (reg->mask) {
if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
- DRM_DEBUG_DRIVER("CMD: Rejected LRM to masked register 0x%08X\n",
- reg_addr);
+ DRM_DEBUG("CMD: Rejected LRM to masked register 0x%08X\n",
+ reg_addr);
return false;
}
if (desc->cmd.value == MI_LOAD_REGISTER_REG) {
- DRM_DEBUG_DRIVER("CMD: Rejected LRR to masked register 0x%08X\n",
- reg_addr);
+ DRM_DEBUG("CMD: Rejected LRR to masked register 0x%08X\n",
+ reg_addr);
return false;
}
if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1) &&
(offset + 2 > length ||
(cmd[offset + 1] & reg->mask) != reg->value)) {
- DRM_DEBUG_DRIVER("CMD: Rejected LRI to masked register 0x%08X\n",
- reg_addr);
+ DRM_DEBUG("CMD: Rejected LRI to masked register 0x%08X\n",
+ reg_addr);
return false;
}
}
@@ -1213,8 +1276,8 @@ static bool check_cmd(const struct intel_engine_cs *engine,
}
if (desc->bits[i].offset >= length) {
- DRM_DEBUG_DRIVER("CMD: Rejected command 0x%08X, too short to check bitmask (%s)\n",
- *cmd, engine->name);
+ DRM_DEBUG("CMD: Rejected command 0x%08X, too short to check bitmask (%s)\n",
+ *cmd, engine->name);
return false;
}
@@ -1222,11 +1285,11 @@ static bool check_cmd(const struct intel_engine_cs *engine,
desc->bits[i].mask;
if (dword != desc->bits[i].expected) {
- DRM_DEBUG_DRIVER("CMD: Rejected command 0x%08X for bitmask 0x%08X (exp=0x%08X act=0x%08X) (%s)\n",
- *cmd,
- desc->bits[i].mask,
- desc->bits[i].expected,
- dword, engine->name);
+ DRM_DEBUG("CMD: Rejected command 0x%08X for bitmask 0x%08X (exp=0x%08X act=0x%08X) (%s)\n",
+ *cmd,
+ desc->bits[i].mask,
+ desc->bits[i].expected,
+ dword, engine->name);
return false;
}
}
@@ -1235,16 +1298,98 @@ static bool check_cmd(const struct intel_engine_cs *engine,
return true;
}
+static int check_bbstart(u32 *cmd, u32 offset, u32 length,
+ u32 batch_length,
+ u64 batch_addr,
+ u64 shadow_addr,
+ const unsigned long *jump_whitelist)
+{
+ u64 jump_offset, jump_target;
+ u32 target_cmd_offset, target_cmd_index;
+
+ /* For igt compatibility on older platforms */
+ if (!jump_whitelist) {
+ DRM_DEBUG("CMD: Rejecting BB_START for ggtt based submission\n");
+ return -EACCES;
+ }
+
+ if (length != 3) {
+ DRM_DEBUG("CMD: Recursive BB_START with bad length(%u)\n",
+ length);
+ return -EINVAL;
+ }
+
+ jump_target = *(u64 *)(cmd + 1);
+ jump_offset = jump_target - batch_addr;
+
+ /*
+ * Any underflow of jump_target is guaranteed to be outside the range
+ * of a u32, so >= test catches both too large and too small
+ */
+ if (jump_offset >= batch_length) {
+ DRM_DEBUG("CMD: BB_START to 0x%llx jumps out of BB\n",
+ jump_target);
+ return -EINVAL;
+ }
+
+ /*
+ * This cannot overflow a u32 because we already checked jump_offset
+ * is within the BB, and the batch_length is a u32
+ */
+ target_cmd_offset = lower_32_bits(jump_offset);
+ target_cmd_index = target_cmd_offset / sizeof(u32);
+
+ *(u64 *)(cmd + 1) = shadow_addr + target_cmd_offset;
+
+ if (target_cmd_index == offset)
+ return 0;
+
+ if (IS_ERR(jump_whitelist))
+ return PTR_ERR(jump_whitelist);
+
+ if (!test_bit(target_cmd_index, jump_whitelist)) {
+ DRM_DEBUG("CMD: BB_START to 0x%llx not a previously executed cmd\n",
+ jump_target);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static unsigned long *alloc_whitelist(u32 batch_length)
+{
+ unsigned long *jmp;
+
+ /*
+ * We expect batch_length to be less than 256KiB for known users,
+ * i.e. we need at most an 8KiB bitmap allocation which should be
+ * reasonably cheap due to kmalloc caches.
+ */
+
+ /* Prefer to report transient allocation failure rather than hit oom */
+ jmp = bitmap_zalloc(DIV_ROUND_UP(batch_length, sizeof(u32)),
+ GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
+ if (!jmp)
+ return ERR_PTR(-ENOMEM);
+
+ return jmp;
+}
+
#define LENGTH_BIAS 2
+static bool shadow_needs_clflush(struct drm_i915_gem_object *obj)
+{
+ return !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE);
+}
+
/**
- * i915_parse_cmds() - parse a submitted batch buffer for privilege violations
+ * intel_engine_cmd_parser() - parse a batch buffer for privilege violations
* @engine: the engine on which the batch is to execute
- * @batch_obj: the batch buffer in question
- * @shadow_batch_obj: copy of the batch buffer in question
- * @batch_start_offset: byte offset in the batch at which execution starts
- * @batch_len: length of the commands in batch_obj
- * @is_master: is the submitting process the drm master?
+ * @batch: the batch buffer in question
+ * @batch_offset: byte offset in the batch at which execution starts
+ * @batch_length: length of the commands in batch_obj
+ * @shadow: validated copy of the batch buffer in question
+ * @trampoline: whether to emit a conditional trampoline at the end of the batch
*
* Parses the specified batch buffer looking for privilege violations as
* described in the overview.
@@ -1253,90 +1398,146 @@ static bool check_cmd(const struct intel_engine_cs *engine,
* if the batch appears legal but should use hardware parsing
*/
int intel_engine_cmd_parser(struct intel_engine_cs *engine,
- struct drm_i915_gem_object *batch_obj,
- struct drm_i915_gem_object *shadow_batch_obj,
- u32 batch_start_offset,
- u32 batch_len,
- bool is_master)
+ struct i915_vma *batch,
+ u32 batch_offset,
+ u32 batch_length,
+ struct i915_vma *shadow,
+ bool trampoline)
{
- u32 *cmd, *batch_end;
+ u32 *cmd, *batch_end, offset = 0;
struct drm_i915_cmd_descriptor default_desc = noop_desc;
const struct drm_i915_cmd_descriptor *desc = &default_desc;
- bool needs_clflush_after = false;
+ unsigned long *jump_whitelist;
+ u64 batch_addr, shadow_addr;
int ret = 0;
- cmd = copy_batch(shadow_batch_obj, batch_obj,
- batch_start_offset, batch_len,
- &needs_clflush_after);
+ GEM_BUG_ON(!IS_ALIGNED(batch_offset, sizeof(*cmd)));
+ GEM_BUG_ON(!IS_ALIGNED(batch_length, sizeof(*cmd)));
+ GEM_BUG_ON(range_overflows_t(u64, batch_offset, batch_length,
+ batch->size));
+ GEM_BUG_ON(!batch_length);
+
+ cmd = copy_batch(shadow->obj, batch->obj, batch_offset, batch_length);
if (IS_ERR(cmd)) {
- DRM_DEBUG_DRIVER("CMD: Failed to copy batch\n");
+ DRM_DEBUG("CMD: Failed to copy batch\n");
return PTR_ERR(cmd);
}
+ jump_whitelist = NULL;
+ if (!trampoline)
+ /* Defer failure until attempted use */
+ jump_whitelist = alloc_whitelist(batch_length);
+
+ shadow_addr = gen8_canonical_addr(shadow->node.start);
+ batch_addr = gen8_canonical_addr(batch->node.start + batch_offset);
+
/*
* We use the batch length as size because the shadow object is as
* large or larger and copy_batch() will write MI_NOPs to the extra
* space. Parsing should be faster in some cases this way.
*/
- batch_end = cmd + (batch_len / sizeof(*batch_end));
+ batch_end = cmd + batch_length / sizeof(*batch_end);
do {
u32 length;
- if (*cmd == MI_BATCH_BUFFER_END) {
- if (needs_clflush_after) {
- void *ptr = page_mask_bits(shadow_batch_obj->mm.mapping);
- drm_clflush_virt_range(ptr,
- (void *)(cmd + 1) - ptr);
- }
+ if (*cmd == MI_BATCH_BUFFER_END)
break;
- }
desc = find_cmd(engine, *cmd, desc, &default_desc);
if (!desc) {
- DRM_DEBUG_DRIVER("CMD: Unrecognized command: 0x%08X\n",
- *cmd);
+ DRM_DEBUG("CMD: Unrecognized command: 0x%08X\n", *cmd);
ret = -EINVAL;
break;
}
- /*
- * If the batch buffer contains a chained batch, return an
- * error that tells the caller to abort and dispatch the
- * workload as a non-secure batch.
- */
- if (desc->cmd.value == MI_BATCH_BUFFER_START) {
- ret = -EACCES;
- break;
- }
-
if (desc->flags & CMD_DESC_FIXED)
length = desc->length.fixed;
else
- length = ((*cmd & desc->length.mask) + LENGTH_BIAS);
+ length = (*cmd & desc->length.mask) + LENGTH_BIAS;
if ((batch_end - cmd) < length) {
- DRM_DEBUG_DRIVER("CMD: Command length exceeds batch length: 0x%08X length=%u batchlen=%td\n",
- *cmd,
- length,
- batch_end - cmd);
+ DRM_DEBUG("CMD: Command length exceeds batch length: 0x%08X length=%u batchlen=%td\n",
+ *cmd,
+ length,
+ batch_end - cmd);
ret = -EINVAL;
break;
}
- if (!check_cmd(engine, desc, cmd, length, is_master)) {
+ if (!check_cmd(engine, desc, cmd, length)) {
ret = -EACCES;
break;
}
+ if (desc->cmd.value == MI_BATCH_BUFFER_START) {
+ ret = check_bbstart(cmd, offset, length, batch_length,
+ batch_addr, shadow_addr,
+ jump_whitelist);
+ break;
+ }
+
+ if (!IS_ERR_OR_NULL(jump_whitelist))
+ __set_bit(offset, jump_whitelist);
+
cmd += length;
+ offset += length;
if (cmd >= batch_end) {
- DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n");
+ DRM_DEBUG("CMD: Got to the end of the buffer w/o a BBE cmd!\n");
ret = -EINVAL;
break;
}
} while (1);
- i915_gem_object_unpin_map(shadow_batch_obj);
+ if (trampoline) {
+ /*
+ * With the trampoline, the shadow is executed twice.
+ *
+ * 1 - starting at offset 0, in privileged mode
+ * 2 - starting at offset batch_len, as non-privileged
+ *
+ * Only if the batch is valid and safe to execute, do we
+ * allow the first privileged execution to proceed. If not,
+ * we terminate the first batch and use the second batchbuffer
+ * entry to chain to the original unsafe non-privileged batch,
+ * leaving it to the HW to validate.
+ */
+ *batch_end = MI_BATCH_BUFFER_END;
+
+ if (ret) {
+ /* Batch unsafe to execute with privileges, cancel! */
+ cmd = page_mask_bits(shadow->obj->mm.mapping);
+ *cmd = MI_BATCH_BUFFER_END;
+
+ /* If batch is unsafe but valid, jump to the original */
+ if (ret == -EACCES) {
+ unsigned int flags;
+
+ flags = MI_BATCH_NON_SECURE_I965;
+ if (IS_HASWELL(engine->i915))
+ flags = MI_BATCH_NON_SECURE_HSW;
+
+ GEM_BUG_ON(!IS_GEN_RANGE(engine->i915, 6, 7));
+ __gen6_emit_bb_start(batch_end,
+ batch_addr,
+ flags);
+
+ ret = 0; /* allow execution */
+ }
+ }
+
+ if (shadow_needs_clflush(shadow->obj))
+ drm_clflush_virt_range(batch_end, 8);
+ }
+
+ if (shadow_needs_clflush(shadow->obj)) {
+ void *ptr = page_mask_bits(shadow->obj->mm.mapping);
+
+ drm_clflush_virt_range(ptr, (void *)(cmd + 1) - ptr);
+ }
+
+ if (!IS_ERR_OR_NULL(jump_whitelist))
+ kfree(jump_whitelist);
+ i915_gem_object_unpin_map(shadow->obj);
return ret;
}
@@ -1352,12 +1553,11 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
- enum intel_engine_id id;
bool active = false;
/* If the command parser is not enabled, report 0 - unsupported */
- for_each_engine(engine, dev_priv, id) {
- if (intel_engine_needs_cmd_parser(engine)) {
+ for_each_uabi_engine(engine, dev_priv) {
+ if (intel_engine_using_cmd_parser(engine)) {
active = true;
break;
}
@@ -1382,6 +1582,7 @@ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv)
* the parser enabled.
* 9. Don't whitelist or handle oacontrol specially, as ownership
* for oacontrol state is moving to i915-perf.
+ * 10. Support for Gen9 BCS Parsing
*/
- return 9;
+ return 10;
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 62cf34db9280..d5a9b8a964c2 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -32,6 +32,7 @@
#include <drm/drm_debugfs.h>
#include <drm/drm_fourcc.h>
+#include "display/intel_display_types.h"
#include "display/intel_dp.h"
#include "display/intel_fbc.h"
#include "display/intel_hdcp.h"
@@ -39,13 +40,17 @@
#include "display/intel_psr.h"
#include "gem/i915_gem_context.h"
+#include "gt/intel_gt_pm.h"
+#include "gt/intel_gt_requests.h"
#include "gt/intel_reset.h"
+#include "gt/intel_rc6.h"
+#include "gt/intel_rps.h"
+#include "gt/uc/intel_guc_submission.h"
#include "i915_debugfs.h"
#include "i915_irq.h"
+#include "i915_trace.h"
#include "intel_csr.h"
-#include "intel_drv.h"
-#include "intel_guc_submission.h"
#include "intel_pm.h"
#include "intel_sideband.h"
@@ -56,17 +61,14 @@ static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
static int i915_capabilities(struct seq_file *m, void *data)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- const struct intel_device_info *info = INTEL_INFO(dev_priv);
+ struct drm_i915_private *i915 = node_to_i915(m->private);
struct drm_printer p = drm_seq_file_printer(m);
- seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
- seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
- seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
+ seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(i915));
- intel_device_info_dump_flags(info, &p);
- intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
- intel_driver_caps_print(&dev_priv->caps, &p);
+ intel_device_info_print_static(INTEL_INFO(i915), &p);
+ intel_device_info_print_runtime(RUNTIME_INFO(i915), &p);
+ intel_driver_caps_print(&i915->caps, &p);
kernel_param_lock(THIS_MODULE);
i915_params_dump(&i915_modparams, &p);
@@ -75,16 +77,6 @@ static int i915_capabilities(struct seq_file *m, void *data)
return 0;
}
-static char get_active_flag(struct drm_i915_gem_object *obj)
-{
- return i915_gem_object_is_active(obj) ? '*' : ' ';
-}
-
-static char get_pin_flag(struct drm_i915_gem_object *obj)
-{
- return obj->pin_global ? 'p' : ' ';
-}
-
static char get_tiling_flag(struct drm_i915_gem_object *obj)
{
switch (i915_gem_object_get_tiling(obj)) {
@@ -97,7 +89,7 @@ static char get_tiling_flag(struct drm_i915_gem_object *obj)
static char get_global_flag(struct drm_i915_gem_object *obj)
{
- return obj->userfault_count ? 'g' : ' ';
+ return READ_ONCE(obj->userfault_count) ? 'g' : ' ';
}
static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
@@ -141,13 +133,10 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct intel_engine_cs *engine;
struct i915_vma *vma;
- unsigned int frontbuffer_bits;
int pin_count = 0;
- seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
+ seq_printf(m, "%pK: %c%c%c %8zdKiB %02x %02x %s%s%s",
&obj->base,
- get_active_flag(obj),
- get_pin_flag(obj),
get_tiling_flag(obj),
get_global_flag(obj),
get_pin_mapped_flag(obj),
@@ -216,9 +205,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
}
}
if (vma->fence)
- seq_printf(m, " , fence: %d%s",
- vma->fence->id,
- i915_active_request_isset(&vma->last_fence) ? "*" : "");
+ seq_printf(m, " , fence: %d", vma->fence->id);
seq_puts(m, ")");
spin_lock(&obj->vma.lock);
@@ -228,23 +215,18 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
seq_printf(m, " (pinned x %d)", pin_count);
if (obj->stolen)
seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
- if (obj->pin_global)
- seq_printf(m, " (global)");
+ if (i915_gem_object_is_framebuffer(obj))
+ seq_printf(m, " (fb)");
engine = i915_gem_object_last_write_engine(obj);
if (engine)
seq_printf(m, " (%s)", engine->name);
-
- frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
- if (frontbuffer_bits)
- seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
}
struct file_stats {
struct i915_address_space *vm;
unsigned long count;
u64 total, unbound;
- u64 global, shared;
u64 active, inactive;
u64 closed;
};
@@ -255,101 +237,113 @@ static int per_file_stats(int id, void *ptr, void *data)
struct file_stats *stats = data;
struct i915_vma *vma;
- lockdep_assert_held(&obj->base.dev->struct_mutex);
+ if (!kref_get_unless_zero(&obj->base.refcount))
+ return 0;
stats->count++;
stats->total += obj->base.size;
if (!atomic_read(&obj->bind_count))
stats->unbound += obj->base.size;
- if (obj->base.name || obj->base.dma_buf)
- stats->shared += obj->base.size;
- list_for_each_entry(vma, &obj->vma.list, obj_link) {
- if (!drm_mm_node_allocated(&vma->node))
- continue;
-
- if (i915_vma_is_ggtt(vma)) {
- stats->global += vma->node.size;
- } else {
- if (vma->vm != stats->vm)
+ spin_lock(&obj->vma.lock);
+ if (!stats->vm) {
+ for_each_ggtt_vma(vma, obj) {
+ if (!drm_mm_node_allocated(&vma->node))
continue;
- }
- if (i915_vma_is_active(vma))
- stats->active += vma->node.size;
- else
- stats->inactive += vma->node.size;
+ if (i915_vma_is_active(vma))
+ stats->active += vma->node.size;
+ else
+ stats->inactive += vma->node.size;
- if (i915_vma_is_closed(vma))
- stats->closed += vma->node.size;
+ if (i915_vma_is_closed(vma))
+ stats->closed += vma->node.size;
+ }
+ } else {
+ struct rb_node *p = obj->vma.tree.rb_node;
+
+ while (p) {
+ long cmp;
+
+ vma = rb_entry(p, typeof(*vma), obj_node);
+ cmp = i915_vma_compare(vma, stats->vm, NULL);
+ if (cmp == 0) {
+ if (drm_mm_node_allocated(&vma->node)) {
+ if (i915_vma_is_active(vma))
+ stats->active += vma->node.size;
+ else
+ stats->inactive += vma->node.size;
+
+ if (i915_vma_is_closed(vma))
+ stats->closed += vma->node.size;
+ }
+ break;
+ }
+ if (cmp < 0)
+ p = p->rb_right;
+ else
+ p = p->rb_left;
+ }
}
+ spin_unlock(&obj->vma.lock);
+ i915_gem_object_put(obj);
return 0;
}
#define print_file_stats(m, name, stats) do { \
if (stats.count) \
- seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound, %llu closed)\n", \
+ seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu unbound, %llu closed)\n", \
name, \
stats.count, \
stats.total, \
stats.active, \
stats.inactive, \
- stats.global, \
- stats.shared, \
stats.unbound, \
stats.closed); \
} while (0)
-static void print_batch_pool_stats(struct seq_file *m,
- struct drm_i915_private *dev_priv)
-{
- struct drm_i915_gem_object *obj;
- struct intel_engine_cs *engine;
- struct file_stats stats = {};
- enum intel_engine_id id;
- int j;
-
- for_each_engine(engine, dev_priv, id) {
- for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
- list_for_each_entry(obj,
- &engine->batch_pool.cache_list[j],
- batch_pool_link)
- per_file_stats(0, obj, &stats);
- }
- }
-
- print_file_stats(m, "[k]batch pool", stats);
-}
-
static void print_context_stats(struct seq_file *m,
struct drm_i915_private *i915)
{
struct file_stats kstats = {};
- struct i915_gem_context *ctx;
+ struct i915_gem_context *ctx, *cn;
- list_for_each_entry(ctx, &i915->contexts.list, link) {
+ spin_lock(&i915->gem.contexts.lock);
+ list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
struct i915_gem_engines_iter it;
struct intel_context *ce;
+ if (!kref_get_unless_zero(&ctx->ref))
+ continue;
+
+ spin_unlock(&i915->gem.contexts.lock);
+
for_each_gem_engine(ce,
i915_gem_context_lock_engines(ctx), it) {
- if (ce->state)
- per_file_stats(0, ce->state->obj, &kstats);
- if (ce->ring)
+ if (intel_context_pin_if_active(ce)) {
+ rcu_read_lock();
+ if (ce->state)
+ per_file_stats(0,
+ ce->state->obj, &kstats);
per_file_stats(0, ce->ring->vma->obj, &kstats);
+ rcu_read_unlock();
+ intel_context_unpin(ce);
+ }
}
i915_gem_context_unlock_engines(ctx);
if (!IS_ERR_OR_NULL(ctx->file_priv)) {
- struct file_stats stats = { .vm = ctx->vm, };
+ struct file_stats stats = {
+ .vm = rcu_access_pointer(ctx->vm),
+ };
struct drm_file *file = ctx->file_priv->file;
struct task_struct *task;
char name[80];
- spin_lock(&file->table_lock);
+ rcu_read_lock();
idr_for_each(&file->object_idr, per_file_stats, &stats);
- spin_unlock(&file->table_lock);
+ rcu_read_unlock();
rcu_read_lock();
task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
@@ -359,7 +353,12 @@ static void print_context_stats(struct seq_file *m,
print_file_stats(m, name, stats);
}
+
+ spin_lock(&i915->gem.contexts.lock);
+ list_safe_reset_next(ctx, cn, link);
+ i915_gem_context_put(ctx);
}
+ spin_unlock(&i915->gem.contexts.lock);
print_file_stats(m, "[k]contexts", kstats);
}
@@ -367,66 +366,19 @@ static void print_context_stats(struct seq_file *m,
static int i915_gem_object_info(struct seq_file *m, void *data)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
- int ret;
+ struct intel_memory_region *mr;
+ enum intel_region_id id;
- seq_printf(m, "%u shrinkable objects, %llu bytes\n",
+ seq_printf(m, "%u shrinkable [%u free] objects, %llu bytes\n",
i915->mm.shrink_count,
+ atomic_read(&i915->mm.free_count),
i915->mm.shrink_memory);
-
+ for_each_memory_region(mr, i915, id)
+ seq_printf(m, "%s: total:%pa, available:%pa bytes\n",
+ mr->name, &mr->total, &mr->avail);
seq_putc(m, '\n');
- ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
- if (ret)
- return ret;
-
- print_batch_pool_stats(m, i915);
print_context_stats(m, i915);
- mutex_unlock(&i915->drm.struct_mutex);
-
- return 0;
-}
-
-static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- struct drm_i915_gem_object *obj;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int total = 0;
- int ret, j;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
-
- for_each_engine(engine, dev_priv, id) {
- for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
- int count;
-
- count = 0;
- list_for_each_entry(obj,
- &engine->batch_pool.cache_list[j],
- batch_pool_link)
- count++;
- seq_printf(m, "%s cache[%d]: %d objects\n",
- engine->name, j, count);
-
- list_for_each_entry(obj,
- &engine->batch_pool.cache_list[j],
- batch_pool_link) {
- seq_puts(m, " ");
- describe_obj(m, obj);
- seq_putc(m, '\n');
- }
-
- total += count;
- }
- }
-
- seq_printf(m, "total: %d\n", total);
-
- mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -434,7 +386,7 @@ static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
static void gen8_display_interrupt_info(struct seq_file *m)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- int pipe;
+ enum pipe pipe;
for_each_pipe(dev_priv, pipe) {
enum intel_display_power_domain power_domain;
@@ -487,7 +439,6 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_engine_cs *engine;
- enum intel_engine_id id;
intel_wakeref_t wakeref;
int i, pipe;
@@ -586,6 +537,8 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
gen8_display_interrupt_info(m);
} else if (IS_VALLEYVIEW(dev_priv)) {
+ intel_wakeref_t pref;
+
seq_printf(m, "Display IER:\t%08x\n",
I915_READ(VLV_IER));
seq_printf(m, "Display IIR:\t%08x\n",
@@ -596,7 +549,6 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
I915_READ(VLV_IMR));
for_each_pipe(dev_priv, pipe) {
enum intel_display_power_domain power_domain;
- intel_wakeref_t pref;
power_domain = POWER_DOMAIN_PIPE(pipe);
pref = intel_display_power_get_if_enabled(dev_priv,
@@ -630,12 +582,14 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
seq_printf(m, "PM IMR:\t\t%08x\n",
I915_READ(GEN6_PMIMR));
+ pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
seq_printf(m, "Port hotplug:\t%08x\n",
I915_READ(PORT_HOTPLUG_EN));
seq_printf(m, "DPFLIPSTAT:\t%08x\n",
I915_READ(VLV_DPFLIPSTAT));
seq_printf(m, "DPINVGTT:\t%08x\n",
I915_READ(DPINVGTT));
+ intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
} else if (!HAS_PCH_SPLIT(dev_priv)) {
seq_printf(m, "Interrupt enable: %08x\n",
@@ -690,7 +644,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
} else if (INTEL_GEN(dev_priv) >= 6) {
- for_each_engine(engine, dev_priv, id) {
+ for_each_uabi_engine(engine, dev_priv) {
seq_printf(m,
"Graphics Interrupt mask (%s): %08x\n",
engine->name, ENGINE_READ(engine, RING_IMR));
@@ -711,10 +665,11 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
rcu_read_lock();
for (i = 0; i < i915->ggtt.num_fences; i++) {
- struct i915_vma *vma = i915->ggtt.fence_regs[i].vma;
+ struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
+ struct i915_vma *vma = reg->vma;
seq_printf(m, "Fence %d, pin count = %d, object = ",
- i, i915->ggtt.fence_regs[i].pin_count);
+ i, atomic_read(&reg->pin_count));
if (!vma)
seq_puts(m, "unused");
else
@@ -730,7 +685,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
size_t count, loff_t *pos)
{
- struct i915_gpu_state *error;
+ struct i915_gpu_coredump *error;
ssize_t ret;
void *buf;
@@ -743,7 +698,7 @@ static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
if (!buf)
return -ENOMEM;
- ret = i915_gpu_state_copy_to_buffer(error, buf, *pos, count);
+ ret = i915_gpu_coredump_copy_to_buffer(error, buf, *pos, count);
if (ret <= 0)
goto out;
@@ -759,19 +714,19 @@ out:
static int gpu_state_release(struct inode *inode, struct file *file)
{
- i915_gpu_state_put(file->private_data);
+ i915_gpu_coredump_put(file->private_data);
return 0;
}
static int i915_gpu_info_open(struct inode *inode, struct file *file)
{
struct drm_i915_private *i915 = inode->i_private;
- struct i915_gpu_state *gpu;
+ struct i915_gpu_coredump *gpu;
intel_wakeref_t wakeref;
gpu = NULL;
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- gpu = i915_capture_gpu_state(i915);
+ gpu = i915_gpu_coredump(i915);
if (IS_ERR(gpu))
return PTR_ERR(gpu);
@@ -793,7 +748,7 @@ i915_error_state_write(struct file *filp,
size_t cnt,
loff_t *ppos)
{
- struct i915_gpu_state *error = filp->private_data;
+ struct i915_gpu_coredump *error = filp->private_data;
if (!error)
return 0;
@@ -806,7 +761,7 @@ i915_error_state_write(struct file *filp,
static int i915_error_state_open(struct inode *inode, struct file *file)
{
- struct i915_gpu_state *error;
+ struct i915_gpu_coredump *error;
error = i915_first_error_state(inode->i_private);
if (IS_ERR(error))
@@ -830,7 +785,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_uncore *uncore = &dev_priv->uncore;
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
+ struct intel_rps *rps = &dev_priv->gt.rps;
intel_wakeref_t wakeref;
int ret = 0;
@@ -866,23 +821,23 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
seq_printf(m, "actual GPU freq: %d MHz\n",
- intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
+ intel_gpu_freq(rps, (freq_sts >> 8) & 0xff));
seq_printf(m, "current GPU freq: %d MHz\n",
- intel_gpu_freq(dev_priv, rps->cur_freq));
+ intel_gpu_freq(rps, rps->cur_freq));
seq_printf(m, "max GPU freq: %d MHz\n",
- intel_gpu_freq(dev_priv, rps->max_freq));
+ intel_gpu_freq(rps, rps->max_freq));
seq_printf(m, "min GPU freq: %d MHz\n",
- intel_gpu_freq(dev_priv, rps->min_freq));
+ intel_gpu_freq(rps, rps->min_freq));
seq_printf(m, "idle GPU freq: %d MHz\n",
- intel_gpu_freq(dev_priv, rps->idle_freq));
+ intel_gpu_freq(rps, rps->idle_freq));
seq_printf(m,
"efficient (RPe) frequency: %d MHz\n",
- intel_gpu_freq(dev_priv, rps->efficient_freq));
+ intel_gpu_freq(rps, rps->efficient_freq));
} else if (INTEL_GEN(dev_priv) >= 6) {
u32 rp_state_limits;
u32 gt_perf_status;
@@ -916,7 +871,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
else
reqf >>= 25;
}
- reqf = intel_gpu_freq(dev_priv, reqf);
+ reqf = intel_gpu_freq(rps, reqf);
rpmodectl = I915_READ(GEN6_RP_CONTROL);
rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
@@ -929,8 +884,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
- cagf = intel_gpu_freq(dev_priv,
- intel_get_cagf(dev_priv, rpstat));
+ cagf = intel_rps_read_actual_frequency(rps);
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
@@ -1007,37 +961,37 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
max_freq *= (IS_GEN9_BC(dev_priv) ||
INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
- intel_gpu_freq(dev_priv, max_freq));
+ intel_gpu_freq(rps, max_freq));
max_freq = (rp_state_cap & 0xff00) >> 8;
max_freq *= (IS_GEN9_BC(dev_priv) ||
INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
- intel_gpu_freq(dev_priv, max_freq));
+ intel_gpu_freq(rps, max_freq));
max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
rp_state_cap >> 0) & 0xff;
max_freq *= (IS_GEN9_BC(dev_priv) ||
INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
- intel_gpu_freq(dev_priv, max_freq));
+ intel_gpu_freq(rps, max_freq));
seq_printf(m, "Max overclocked frequency: %dMHz\n",
- intel_gpu_freq(dev_priv, rps->max_freq));
+ intel_gpu_freq(rps, rps->max_freq));
seq_printf(m, "Current freq: %d MHz\n",
- intel_gpu_freq(dev_priv, rps->cur_freq));
+ intel_gpu_freq(rps, rps->cur_freq));
seq_printf(m, "Actual freq: %d MHz\n", cagf);
seq_printf(m, "Idle freq: %d MHz\n",
- intel_gpu_freq(dev_priv, rps->idle_freq));
+ intel_gpu_freq(rps, rps->idle_freq));
seq_printf(m, "Min freq: %d MHz\n",
- intel_gpu_freq(dev_priv, rps->min_freq));
+ intel_gpu_freq(rps, rps->min_freq));
seq_printf(m, "Boost freq: %d MHz\n",
- intel_gpu_freq(dev_priv, rps->boost_freq));
+ intel_gpu_freq(rps, rps->boost_freq));
seq_printf(m, "Max freq: %d MHz\n",
- intel_gpu_freq(dev_priv, rps->max_freq));
+ intel_gpu_freq(rps, rps->max_freq));
seq_printf(m,
"efficient (RPe) frequency: %d MHz\n",
- intel_gpu_freq(dev_priv, rps->efficient_freq));
+ intel_gpu_freq(rps, rps->efficient_freq));
} else {
seq_puts(m, "no P-state info available\n");
}
@@ -1050,115 +1004,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
return ret;
}
-static void i915_instdone_info(struct drm_i915_private *dev_priv,
- struct seq_file *m,
- struct intel_instdone *instdone)
-{
- int slice;
- int subslice;
-
- seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
- instdone->instdone);
-
- if (INTEL_GEN(dev_priv) <= 3)
- return;
-
- seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
- instdone->slice_common);
-
- if (INTEL_GEN(dev_priv) <= 6)
- return;
-
- for_each_instdone_slice_subslice(dev_priv, slice, subslice)
- seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
- slice, subslice, instdone->sampler[slice][subslice]);
-
- for_each_instdone_slice_subslice(dev_priv, slice, subslice)
- seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
- slice, subslice, instdone->row[slice][subslice]);
-}
-
-static int i915_hangcheck_info(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct intel_engine_cs *engine;
- u64 acthd[I915_NUM_ENGINES];
- struct intel_instdone instdone;
- intel_wakeref_t wakeref;
- enum intel_engine_id id;
-
- seq_printf(m, "Reset flags: %lx\n", dev_priv->gpu_error.flags);
- if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
- seq_puts(m, "\tWedged\n");
- if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
- seq_puts(m, "\tDevice (global) reset in progress\n");
-
- if (!i915_modparams.enable_hangcheck) {
- seq_puts(m, "Hangcheck disabled\n");
- return 0;
- }
-
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
- for_each_engine(engine, dev_priv, id)
- acthd[id] = intel_engine_get_active_head(engine);
-
- intel_engine_get_instdone(dev_priv->engine[RCS0], &instdone);
- }
-
- if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
- seq_printf(m, "Hangcheck active, timer fires in %dms\n",
- jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
- jiffies));
- else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work))
- seq_puts(m, "Hangcheck active, work pending\n");
- else
- seq_puts(m, "Hangcheck inactive\n");
-
- seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
-
- for_each_engine(engine, dev_priv, id) {
- seq_printf(m, "%s: %d ms ago\n",
- engine->name,
- jiffies_to_msecs(jiffies -
- engine->hangcheck.action_timestamp));
-
- seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
- (long long)engine->hangcheck.acthd,
- (long long)acthd[id]);
-
- if (engine->id == RCS0) {
- seq_puts(m, "\tinstdone read =\n");
-
- i915_instdone_info(dev_priv, m, &instdone);
-
- seq_puts(m, "\tinstdone accu =\n");
-
- i915_instdone_info(dev_priv, m,
- &engine->hangcheck.instdone);
- }
- }
-
- return 0;
-}
-
-static int i915_reset_info(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct i915_gpu_error *error = &dev_priv->gpu_error;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error));
-
- for_each_engine(engine, dev_priv, id) {
- seq_printf(m, "%s = %u\n", engine->name,
- i915_reset_engine_count(error, engine));
- }
-
- return 0;
-}
-
-static int ironlake_drpc_info(struct seq_file *m)
+static int ilk_drpc_info(struct seq_file *m)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
struct intel_uncore *uncore = &i915->uncore;
@@ -1224,7 +1070,7 @@ static int i915_forcewake_domains(struct seq_file *m, void *data)
unsigned int tmp;
seq_printf(m, "user.bypass_count = %u\n",
- uncore->user_forcewake.count);
+ uncore->user_forcewake_count);
for_each_fw_domain(fw_domain, uncore, tmp)
seq_printf(m, "%s.wake_count = %u\n",
@@ -1238,11 +1084,13 @@ static void print_rc6_res(struct seq_file *m,
const char *title,
const i915_reg_t reg)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_i915_private *i915 = node_to_i915(m->private);
+ intel_wakeref_t wakeref;
- seq_printf(m, "%s %u (%llu us)\n",
- title, I915_READ(reg),
- intel_rc6_residency_us(dev_priv, reg));
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ seq_printf(m, "%s %u (%llu us)\n", title,
+ intel_uncore_read(&i915->uncore, reg),
+ intel_rc6_residency_us(&i915->gt.rc6, reg));
}
static int vlv_drpc_info(struct seq_file *m)
@@ -1364,7 +1212,7 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
else if (INTEL_GEN(dev_priv) >= 6)
err = gen6_drpc_info(m);
else
- err = ironlake_drpc_info(m);
+ err = ilk_drpc_info(m);
}
return err;
@@ -1517,34 +1365,10 @@ static int i915_sr_status(struct seq_file *m, void *unused)
return 0;
}
-static int i915_emon_status(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *i915 = node_to_i915(m->private);
- intel_wakeref_t wakeref;
-
- if (!IS_GEN(i915, 5))
- return -ENODEV;
-
- with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
- unsigned long temp, chipset, gfx;
-
- temp = i915_mch_val(i915);
- chipset = i915_chipset_val(i915);
- gfx = i915_gfx_val(i915);
-
- seq_printf(m, "GMCH temp: %ld\n", temp);
- seq_printf(m, "Chipset power: %ld\n", chipset);
- seq_printf(m, "GFX power: %ld\n", gfx);
- seq_printf(m, "Total power: %ld\n", chipset + gfx);
- }
-
- return 0;
-}
-
static int i915_ring_freq_table(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
+ struct intel_rps *rps = &dev_priv->gt.rps;
unsigned int max_gpu_freq, min_gpu_freq;
intel_wakeref_t wakeref;
int gpu_freq, ia_freq;
@@ -1569,10 +1393,11 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
GEN6_PCODE_READ_MIN_FREQ_TABLE,
&ia_freq, NULL);
seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
- intel_gpu_freq(dev_priv, (gpu_freq *
- (IS_GEN9_BC(dev_priv) ||
- INTEL_GEN(dev_priv) >= 10 ?
- GEN9_FREQ_SCALER : 1))),
+ intel_gpu_freq(rps,
+ (gpu_freq *
+ (IS_GEN9_BC(dev_priv) ||
+ INTEL_GEN(dev_priv) >= 10 ?
+ GEN9_FREQ_SCALER : 1))),
((ia_freq >> 0) & 0xff) * 100,
((ia_freq >> 8) & 0xff) * 100);
}
@@ -1583,21 +1408,11 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
static int i915_opregion(struct seq_file *m, void *unused)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- struct intel_opregion *opregion = &dev_priv->opregion;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- goto out;
+ struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
if (opregion->header)
seq_write(m, opregion->header, OPREGION_SIZE);
- mutex_unlock(&dev->struct_mutex);
-
-out:
return 0;
}
@@ -1617,11 +1432,6 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
struct drm_device *dev = &dev_priv->drm;
struct intel_framebuffer *fbdev_fb = NULL;
struct drm_framebuffer *drm_fb;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
#ifdef CONFIG_DRM_FBDEV_EMULATION
if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
@@ -1656,7 +1466,6 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
seq_putc(m, '\n');
}
mutex_unlock(&dev->mode_config.fb_lock);
- mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -1669,23 +1478,20 @@ static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
static int i915_context_status(struct seq_file *m, void *unused)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- struct i915_gem_context *ctx;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
+ struct drm_i915_private *i915 = node_to_i915(m->private);
+ struct i915_gem_context *ctx, *cn;
- list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
+ spin_lock(&i915->gem.contexts.lock);
+ list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
struct i915_gem_engines_iter it;
struct intel_context *ce;
+ if (!kref_get_unless_zero(&ctx->ref))
+ continue;
+
+ spin_unlock(&i915->gem.contexts.lock);
+
seq_puts(m, "HW context ");
- if (!list_empty(&ctx->hw_id_link))
- seq_printf(m, "%x [pin %u]", ctx->hw_id,
- atomic_read(&ctx->hw_id_pin_count));
if (ctx->pid) {
struct task_struct *task;
@@ -1706,19 +1512,24 @@ static int i915_context_status(struct seq_file *m, void *unused)
for_each_gem_engine(ce,
i915_gem_context_lock_engines(ctx), it) {
- seq_printf(m, "%s: ", ce->engine->name);
- if (ce->state)
- describe_obj(m, ce->state->obj);
- if (ce->ring)
+ if (intel_context_pin_if_active(ce)) {
+ seq_printf(m, "%s: ", ce->engine->name);
+ if (ce->state)
+ describe_obj(m, ce->state->obj);
describe_ctx_ring(m, ce->ring);
- seq_putc(m, '\n');
+ seq_putc(m, '\n');
+ intel_context_unpin(ce);
+ }
}
i915_gem_context_unlock_engines(ctx);
seq_putc(m, '\n');
- }
- mutex_unlock(&dev->struct_mutex);
+ spin_lock(&i915->gem.contexts.lock);
+ list_safe_reset_next(ctx, cn, link);
+ i915_gem_context_put(ctx);
+ }
+ spin_unlock(&i915->gem.contexts.lock);
return 0;
}
@@ -1756,9 +1567,9 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
- swizzle_string(dev_priv->mm.bit_6_swizzle_x));
+ swizzle_string(dev_priv->ggtt.bit_6_swizzle_x));
seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
- swizzle_string(dev_priv->mm.bit_6_swizzle_y));
+ swizzle_string(dev_priv->ggtt.bit_6_swizzle_y));
if (IS_GEN_RANGE(dev_priv, 3, 4)) {
seq_printf(m, "DDC = 0x%08x\n",
@@ -1813,22 +1624,7 @@ static const char *rps_power_to_str(unsigned int power)
static int i915_rps_boost_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- u32 act_freq = rps->cur_freq;
- intel_wakeref_t wakeref;
-
- with_intel_runtime_pm_if_in_use(&dev_priv->runtime_pm, wakeref) {
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- vlv_punit_get(dev_priv);
- act_freq = vlv_punit_read(dev_priv,
- PUNIT_REG_GPU_FREQ_STS);
- vlv_punit_put(dev_priv);
- act_freq = (act_freq >> 8) & 0xff;
- } else {
- act_freq = intel_get_cagf(dev_priv,
- I915_READ(GEN6_RPSTAT1));
- }
- }
+ struct intel_rps *rps = &dev_priv->gt.rps;
seq_printf(m, "RPS enabled? %d\n", rps->enabled);
seq_printf(m, "GPU busy? %s\n", yesno(dev_priv->gt.awake));
@@ -1836,17 +1632,17 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
atomic_read(&rps->num_waiters));
seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
seq_printf(m, "Frequency requested %d, actual %d\n",
- intel_gpu_freq(dev_priv, rps->cur_freq),
- intel_gpu_freq(dev_priv, act_freq));
+ intel_gpu_freq(rps, rps->cur_freq),
+ intel_rps_read_actual_frequency(rps));
seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
- intel_gpu_freq(dev_priv, rps->min_freq),
- intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
- intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
- intel_gpu_freq(dev_priv, rps->max_freq));
+ intel_gpu_freq(rps, rps->min_freq),
+ intel_gpu_freq(rps, rps->min_freq_softlimit),
+ intel_gpu_freq(rps, rps->max_freq_softlimit),
+ intel_gpu_freq(rps, rps->max_freq));
seq_printf(m, " idle:%d, efficient:%d, boost:%d\n",
- intel_gpu_freq(dev_priv, rps->idle_freq),
- intel_gpu_freq(dev_priv, rps->efficient_freq),
- intel_gpu_freq(dev_priv, rps->boost_freq));
+ intel_gpu_freq(rps, rps->idle_freq),
+ intel_gpu_freq(rps, rps->efficient_freq),
+ intel_gpu_freq(rps, rps->boost_freq));
seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
@@ -1894,11 +1690,11 @@ static int i915_huc_load_status_info(struct seq_file *m, void *data)
intel_wakeref_t wakeref;
struct drm_printer p;
- if (!HAS_HUC(dev_priv))
+ if (!HAS_GT_UC(dev_priv))
return -ENODEV;
p = drm_seq_file_printer(m);
- intel_uc_fw_dump(&dev_priv->huc.fw, &p);
+ intel_uc_fw_dump(&dev_priv->gt.uc.huc.fw, &p);
with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
@@ -1912,11 +1708,11 @@ static int i915_guc_load_status_info(struct seq_file *m, void *data)
intel_wakeref_t wakeref;
struct drm_printer p;
- if (!HAS_GUC(dev_priv))
+ if (!HAS_GT_UC(dev_priv))
return -ENODEV;
p = drm_seq_file_printer(m);
- intel_uc_fw_dump(&dev_priv->guc.fw, &p);
+ intel_uc_fw_dump(&dev_priv->gt.uc.guc.fw, &p);
with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
u32 tmp = I915_READ(GUC_STATUS);
@@ -1959,11 +1755,11 @@ stringify_guc_log_type(enum guc_log_buffer_type type)
static void i915_guc_log_info(struct seq_file *m,
struct drm_i915_private *dev_priv)
{
- struct intel_guc_log *log = &dev_priv->guc.log;
+ struct intel_guc_log *log = &dev_priv->gt.uc.guc.log;
enum guc_log_buffer_type type;
- if (!intel_guc_log_relay_enabled(log)) {
- seq_puts(m, "GuC log relay disabled\n");
+ if (!intel_guc_log_relay_created(log)) {
+ seq_puts(m, "GuC log relay not created\n");
return;
}
@@ -1980,55 +1776,15 @@ static void i915_guc_log_info(struct seq_file *m,
}
}
-static void i915_guc_client_info(struct seq_file *m,
- struct drm_i915_private *dev_priv,
- struct intel_guc_client *client)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- u64 tot = 0;
-
- seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
- client->priority, client->stage_id, client->proc_desc_offset);
- seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
- client->doorbell_id, client->doorbell_offset);
-
- for_each_engine(engine, dev_priv, id) {
- u64 submissions = client->submissions[id];
- tot += submissions;
- seq_printf(m, "\tSubmissions: %llu %s\n",
- submissions, engine->name);
- }
- seq_printf(m, "\tTotal: %llu\n", tot);
-}
-
static int i915_guc_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- const struct intel_guc *guc = &dev_priv->guc;
if (!USES_GUC(dev_priv))
return -ENODEV;
i915_guc_log_info(m, dev_priv);
- if (!USES_GUC_SUBMISSION(dev_priv))
- return 0;
-
- GEM_BUG_ON(!guc->execbuf_client);
-
- seq_printf(m, "\nDoorbell map:\n");
- seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
- seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline);
-
- seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
- i915_guc_client_info(m, dev_priv, guc->execbuf_client);
- if (guc->preempt_client) {
- seq_printf(m, "\nGuC preempt client @ %p:\n",
- guc->preempt_client);
- i915_guc_client_info(m, dev_priv, guc->preempt_client);
- }
-
/* Add more as required ... */
return 0;
@@ -2037,10 +1793,8 @@ static int i915_guc_info(struct seq_file *m, void *data)
static int i915_guc_stage_pool(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- const struct intel_guc *guc = &dev_priv->guc;
+ const struct intel_guc *guc = &dev_priv->gt.uc.guc;
struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
- struct intel_guc_client *client = guc->execbuf_client;
- intel_engine_mask_t tmp;
int index;
if (!USES_GUC_SUBMISSION(dev_priv))
@@ -2069,7 +1823,7 @@ static int i915_guc_stage_pool(struct seq_file *m, void *data)
desc->wq_addr, desc->wq_size);
seq_putc(m, '\n');
- for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
+ for_each_uabi_engine(engine, dev_priv) {
u32 guc_engine_id = engine->guc_id;
struct guc_execlist_context *lrc =
&desc->lrc[guc_engine_id];
@@ -2097,13 +1851,13 @@ static int i915_guc_log_dump(struct seq_file *m, void *data)
u32 *log;
int i = 0;
- if (!HAS_GUC(dev_priv))
+ if (!HAS_GT_UC(dev_priv))
return -ENODEV;
if (dump_load_err)
- obj = dev_priv->guc.load_err_log;
- else if (dev_priv->guc.log.vma)
- obj = dev_priv->guc.log.vma->obj;
+ obj = dev_priv->gt.uc.load_err_log;
+ else if (dev_priv->gt.uc.guc.log.vma)
+ obj = dev_priv->gt.uc.guc.log.vma->obj;
if (!obj)
return 0;
@@ -2134,7 +1888,7 @@ static int i915_guc_log_level_get(void *data, u64 *val)
if (!USES_GUC(dev_priv))
return -ENODEV;
- *val = intel_guc_log_get_level(&dev_priv->guc.log);
+ *val = intel_guc_log_get_level(&dev_priv->gt.uc.guc.log);
return 0;
}
@@ -2146,7 +1900,7 @@ static int i915_guc_log_level_set(void *data, u64 val)
if (!USES_GUC(dev_priv))
return -ENODEV;
- return intel_guc_log_set_level(&dev_priv->guc.log, val);
+ return intel_guc_log_set_level(&dev_priv->gt.uc.guc.log, val);
}
DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
@@ -2155,14 +1909,16 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
{
- struct drm_i915_private *dev_priv = inode->i_private;
+ struct drm_i915_private *i915 = inode->i_private;
+ struct intel_guc *guc = &i915->gt.uc.guc;
+ struct intel_guc_log *log = &guc->log;
- if (!USES_GUC(dev_priv))
+ if (!intel_guc_is_running(guc))
return -ENODEV;
- file->private_data = &dev_priv->guc.log;
+ file->private_data = log;
- return intel_guc_log_relay_open(&dev_priv->guc.log);
+ return intel_guc_log_relay_open(log);
}
static ssize_t
@@ -2172,18 +1928,31 @@ i915_guc_log_relay_write(struct file *filp,
loff_t *ppos)
{
struct intel_guc_log *log = filp->private_data;
+ int val;
+ int ret;
- intel_guc_log_relay_flush(log);
+ ret = kstrtoint_from_user(ubuf, cnt, 0, &val);
+ if (ret < 0)
+ return ret;
- return cnt;
+ /*
+ * Enable and start the guc log relay on value of 1.
+ * Flush log relay for any other value.
+ */
+ if (val == 1)
+ ret = intel_guc_log_relay_start(log);
+ else
+ intel_guc_log_relay_flush(log);
+
+ return ret ?: cnt;
}
static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
{
- struct drm_i915_private *dev_priv = inode->i_private;
-
- intel_guc_log_relay_close(&dev_priv->guc.log);
+ struct drm_i915_private *i915 = inode->i_private;
+ struct intel_guc *guc = &i915->gt.uc.guc;
+ intel_guc_log_relay_close(&guc->log);
return 0;
}
@@ -2210,7 +1979,7 @@ static int i915_psr_sink_status_show(struct seq_file *m, void *data)
struct drm_connector *connector = m->private;
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_dp *intel_dp =
- enc_to_intel_dp(&intel_attached_encoder(connector)->base);
+ enc_to_intel_dp(intel_attached_encoder(to_intel_connector(connector)));
int ret;
if (!CAN_PSR(dev_priv)) {
@@ -2258,7 +2027,7 @@ psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
"BUF_ON",
"TG_ON"
};
- val = I915_READ(EDP_PSR2_STATUS);
+ val = I915_READ(EDP_PSR2_STATUS(dev_priv->psr.transcoder));
status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
EDP_PSR2_STATUS_STATE_SHIFT;
if (status_val < ARRAY_SIZE(live_status))
@@ -2274,7 +2043,7 @@ psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
"SRDOFFACK",
"SRDENT_ON",
};
- val = I915_READ(EDP_PSR_STATUS);
+ val = I915_READ(EDP_PSR_STATUS(dev_priv->psr.transcoder));
status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
EDP_PSR_STATUS_STATE_SHIFT;
if (status_val < ARRAY_SIZE(live_status))
@@ -2313,14 +2082,18 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
status = "disabled";
seq_printf(m, "PSR mode: %s\n", status);
- if (!psr->enabled)
+ if (!psr->enabled) {
+ seq_printf(m, "PSR sink not reliable: %s\n",
+ yesno(psr->sink_not_reliable));
+
goto unlock;
+ }
if (psr->psr2_enabled) {
- val = I915_READ(EDP_PSR2_CTL);
+ val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder));
enabled = val & EDP_PSR2_ENABLE;
} else {
- val = I915_READ(EDP_PSR_CTL);
+ val = I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder));
enabled = val & EDP_PSR_ENABLE;
}
seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
@@ -2333,7 +2106,8 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
* SKL+ Perf counter is reset to 0 everytime DC state is entered
*/
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- val = I915_READ(EDP_PSR_PERF_CNT) & EDP_PSR_PERF_CNT_MASK;
+ val = I915_READ(EDP_PSR_PERF_CNT(dev_priv->psr.transcoder));
+ val &= EDP_PSR_PERF_CNT_MASK;
seq_printf(m, "Performance counter: %u\n", val);
}
@@ -2351,8 +2125,11 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
* Reading all 3 registers before hand to minimize crossing a
* frame boundary between register reads
*/
- for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3)
- su_frames_val[frame / 3] = I915_READ(PSR2_SU_STATUS(frame));
+ for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
+ val = I915_READ(PSR2_SU_STATUS(dev_priv->psr.transcoder,
+ frame));
+ su_frames_val[frame / 3] = val;
+ }
seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
@@ -2499,6 +2276,7 @@ static int i915_dmc_info(struct seq_file *m, void *unused)
struct drm_i915_private *dev_priv = node_to_i915(m->private);
intel_wakeref_t wakeref;
struct intel_csr *csr;
+ i915_reg_t dc5_reg, dc6_reg = {};
if (!HAS_CSR(dev_priv))
return -ENODEV;
@@ -2516,15 +2294,26 @@ static int i915_dmc_info(struct seq_file *m, void *unused)
seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
CSR_VERSION_MINOR(csr->version));
- if (WARN_ON(INTEL_GEN(dev_priv) > 11))
- goto out;
+ if (INTEL_GEN(dev_priv) >= 12) {
+ dc5_reg = TGL_DMC_DEBUG_DC5_COUNT;
+ dc6_reg = TGL_DMC_DEBUG_DC6_COUNT;
+ /*
+ * NOTE: DMC_DEBUG3 is a general purpose reg.
+ * According to B.Specs:49196 DMC f/w reuses DC5/6 counter
+ * reg for DC3CO debugging and validation,
+ * but TGL DMC f/w is using DMC_DEBUG3 reg for DC3CO counter.
+ */
+ seq_printf(m, "DC3CO count: %d\n", I915_READ(DMC_DEBUG3));
+ } else {
+ dc5_reg = IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
+ SKL_CSR_DC3_DC5_COUNT;
+ if (!IS_GEN9_LP(dev_priv))
+ dc6_reg = SKL_CSR_DC5_DC6_COUNT;
+ }
- seq_printf(m, "DC3 -> DC5 count: %d\n",
- I915_READ(IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
- SKL_CSR_DC3_DC5_COUNT));
- if (!IS_GEN9_LP(dev_priv))
- seq_printf(m, "DC5 -> DC6 count: %d\n",
- I915_READ(SKL_CSR_DC5_DC6_COUNT));
+ seq_printf(m, "DC3 -> DC5 count: %d\n", I915_READ(dc5_reg));
+ if (dc6_reg.reg)
+ seq_printf(m, "DC5 -> DC6 count: %d\n", I915_READ(dc6_reg));
out:
seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
@@ -2537,7 +2326,7 @@ out:
}
static void intel_seq_print_mode(struct seq_file *m, int tabs,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
int i;
@@ -2548,66 +2337,61 @@ static void intel_seq_print_mode(struct seq_file *m, int tabs,
}
static void intel_encoder_info(struct seq_file *m,
- struct intel_crtc *intel_crtc,
- struct intel_encoder *intel_encoder)
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- struct drm_crtc *crtc = &intel_crtc->base;
- struct intel_connector *intel_connector;
- struct drm_encoder *encoder;
-
- encoder = &intel_encoder->base;
- seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
- encoder->base.id, encoder->name);
- for_each_connector_on_encoder(dev, encoder, intel_connector) {
- struct drm_connector *connector = &intel_connector->base;
- seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
- connector->base.id,
- connector->name,
- drm_get_connector_status_name(connector->status));
- if (connector->status == connector_status_connected) {
- struct drm_display_mode *mode = &crtc->mode;
- seq_printf(m, ", mode:\n");
- intel_seq_print_mode(m, 2, mode);
- } else {
- seq_putc(m, '\n');
- }
+ struct drm_connector_list_iter conn_iter;
+ struct drm_connector *connector;
+
+ seq_printf(m, "\t[ENCODER:%d:%s]: connectors:\n",
+ encoder->base.base.id, encoder->base.name);
+
+ drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ const struct drm_connector_state *conn_state =
+ connector->state;
+
+ if (conn_state->best_encoder != &encoder->base)
+ continue;
+
+ seq_printf(m, "\t\t[CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
}
+ drm_connector_list_iter_end(&conn_iter);
}
-static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
+static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- struct drm_crtc *crtc = &intel_crtc->base;
- struct intel_encoder *intel_encoder;
- struct drm_plane_state *plane_state = crtc->primary->state;
- struct drm_framebuffer *fb = plane_state->fb;
+ const struct drm_display_mode *mode = panel->fixed_mode;
- if (fb)
- seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
- fb->base.id, plane_state->src_x >> 16,
- plane_state->src_y >> 16, fb->width, fb->height);
- else
- seq_puts(m, "\tprimary plane disabled\n");
- for_each_encoder_on_crtc(dev, crtc, intel_encoder)
- intel_encoder_info(m, intel_crtc, intel_encoder);
+ seq_printf(m, "\tfixed mode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
}
-static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
+static void intel_hdcp_info(struct seq_file *m,
+ struct intel_connector *intel_connector)
{
- struct drm_display_mode *mode = panel->fixed_mode;
+ bool hdcp_cap, hdcp2_cap;
- seq_printf(m, "\tfixed mode:\n");
- intel_seq_print_mode(m, 2, mode);
+ hdcp_cap = intel_hdcp_capable(intel_connector);
+ hdcp2_cap = intel_hdcp2_capable(intel_connector);
+
+ if (hdcp_cap)
+ seq_puts(m, "HDCP1.4 ");
+ if (hdcp2_cap)
+ seq_puts(m, "HDCP2.2 ");
+
+ if (!hdcp_cap && !hdcp2_cap)
+ seq_puts(m, "None");
+
+ seq_puts(m, "\n");
}
static void intel_dp_info(struct seq_file *m,
struct intel_connector *intel_connector)
{
struct intel_encoder *intel_encoder = intel_connector->encoder;
- struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
@@ -2616,6 +2400,10 @@ static void intel_dp_info(struct seq_file *m,
drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
&intel_dp->aux);
+ if (intel_connector->hdcp.shim) {
+ seq_puts(m, "\tHDCP version: ");
+ intel_hdcp_info(m, intel_connector);
+ }
}
static void intel_dp_mst_info(struct seq_file *m,
@@ -2623,7 +2411,7 @@ static void intel_dp_mst_info(struct seq_file *m,
{
struct intel_encoder *intel_encoder = intel_connector->encoder;
struct intel_dp_mst_encoder *intel_mst =
- enc_to_mst(&intel_encoder->base);
+ enc_to_mst(intel_encoder);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
@@ -2636,9 +2424,13 @@ static void intel_hdmi_info(struct seq_file *m,
struct intel_connector *intel_connector)
{
struct intel_encoder *intel_encoder = intel_connector->encoder;
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(intel_encoder);
seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
+ if (intel_connector->hdcp.shim) {
+ seq_puts(m, "\tHDCP version: ");
+ intel_hdcp_info(m, intel_connector);
+ }
}
static void intel_lvds_info(struct seq_file *m,
@@ -2651,10 +2443,12 @@ static void intel_connector_info(struct seq_file *m,
struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
- struct intel_encoder *intel_encoder = intel_connector->encoder;
- struct drm_display_mode *mode;
+ const struct drm_connector_state *conn_state = connector->state;
+ struct intel_encoder *encoder =
+ to_intel_encoder(conn_state->best_encoder);
+ const struct drm_display_mode *mode;
- seq_printf(m, "connector %d: type %s, status: %s\n",
+ seq_printf(m, "[CONNECTOR:%d:%s]: status: %s\n",
connector->base.id, connector->name,
drm_get_connector_status_name(connector->status));
@@ -2668,24 +2462,24 @@ static void intel_connector_info(struct seq_file *m,
drm_get_subpixel_order_name(connector->display_info.subpixel_order));
seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
- if (!intel_encoder)
+ if (!encoder)
return;
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_DisplayPort:
case DRM_MODE_CONNECTOR_eDP:
- if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
+ if (encoder->type == INTEL_OUTPUT_DP_MST)
intel_dp_mst_info(m, intel_connector);
else
intel_dp_info(m, intel_connector);
break;
case DRM_MODE_CONNECTOR_LVDS:
- if (intel_encoder->type == INTEL_OUTPUT_LVDS)
+ if (encoder->type == INTEL_OUTPUT_LVDS)
intel_lvds_info(m, intel_connector);
break;
case DRM_MODE_CONNECTOR_HDMIA:
- if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
- intel_encoder->type == INTEL_OUTPUT_DDI)
+ if (encoder->type == INTEL_OUTPUT_HDMI ||
+ encoder->type == INTEL_OUTPUT_DDI)
intel_hdmi_info(m, intel_connector);
break;
default:
@@ -2732,70 +2526,88 @@ static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
rotation);
}
-static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
+static void intel_plane_uapi_info(struct seq_file *m, struct intel_plane *plane)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- struct intel_plane *intel_plane;
+ const struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+ const struct drm_framebuffer *fb = plane_state->uapi.fb;
+ struct drm_format_name_buf format_name;
+ struct drm_rect src, dst;
+ char rot_str[48];
- for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
- struct drm_plane_state *state;
- struct drm_plane *plane = &intel_plane->base;
- struct drm_format_name_buf format_name;
- char rot_str[48];
+ src = drm_plane_state_src(&plane_state->uapi);
+ dst = drm_plane_state_dest(&plane_state->uapi);
- if (!plane->state) {
- seq_puts(m, "plane->state is NULL!\n");
- continue;
- }
+ if (fb)
+ drm_get_format_name(fb->format->format, &format_name);
- state = plane->state;
+ plane_rotation(rot_str, sizeof(rot_str),
+ plane_state->uapi.rotation);
- if (state->fb) {
- drm_get_format_name(state->fb->format->format,
- &format_name);
- } else {
- sprintf(format_name.str, "N/A");
- }
+ seq_printf(m, "\t\tuapi: fb=%d,%s,%dx%d, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
+ fb ? fb->base.id : 0, fb ? format_name.str : "n/a",
+ fb ? fb->width : 0, fb ? fb->height : 0,
+ DRM_RECT_FP_ARG(&src),
+ DRM_RECT_ARG(&dst),
+ rot_str);
+}
+
+static void intel_plane_hw_info(struct seq_file *m, struct intel_plane *plane)
+{
+ const struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ struct drm_format_name_buf format_name;
+ char rot_str[48];
+
+ if (!fb)
+ return;
+
+ drm_get_format_name(fb->format->format, &format_name);
- plane_rotation(rot_str, sizeof(rot_str), state->rotation);
-
- seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
- plane->base.id,
- plane_type(intel_plane->base.type),
- state->crtc_x, state->crtc_y,
- state->crtc_w, state->crtc_h,
- (state->src_x >> 16),
- ((state->src_x & 0xffff) * 15625) >> 10,
- (state->src_y >> 16),
- ((state->src_y & 0xffff) * 15625) >> 10,
- (state->src_w >> 16),
- ((state->src_w & 0xffff) * 15625) >> 10,
- (state->src_h >> 16),
- ((state->src_h & 0xffff) * 15625) >> 10,
- format_name.str,
- rot_str);
+ plane_rotation(rot_str, sizeof(rot_str),
+ plane_state->hw.rotation);
+
+ seq_printf(m, "\t\thw: fb=%d,%s,%dx%d, visible=%s, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
+ fb->base.id, format_name.str,
+ fb->width, fb->height,
+ yesno(plane_state->uapi.visible),
+ DRM_RECT_FP_ARG(&plane_state->uapi.src),
+ DRM_RECT_ARG(&plane_state->uapi.dst),
+ rot_str);
+}
+
+static void intel_plane_info(struct seq_file *m, struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct intel_plane *plane;
+
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+ seq_printf(m, "\t[PLANE:%d:%s]: type=%s\n",
+ plane->base.base.id, plane->base.name,
+ plane_type(plane->base.type));
+ intel_plane_uapi_info(m, plane);
+ intel_plane_hw_info(m, plane);
}
}
-static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
+static void intel_scaler_info(struct seq_file *m, struct intel_crtc *crtc)
{
- struct intel_crtc_state *pipe_config;
- int num_scalers = intel_crtc->num_scalers;
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ int num_scalers = crtc->num_scalers;
int i;
- pipe_config = to_intel_crtc_state(intel_crtc->base.state);
-
/* Not all platformas have a scaler */
if (num_scalers) {
seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
num_scalers,
- pipe_config->scaler_state.scaler_users,
- pipe_config->scaler_state.scaler_id);
+ crtc_state->scaler_state.scaler_users,
+ crtc_state->scaler_state.scaler_id);
for (i = 0; i < num_scalers; i++) {
- struct intel_scaler *sc =
- &pipe_config->scaler_state.scalers[i];
+ const struct intel_scaler *sc =
+ &crtc_state->scaler_state.scalers[i];
seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
i, yesno(sc->in_use), sc->mode);
@@ -2806,6 +2618,44 @@ static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
}
}
+static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct intel_encoder *encoder;
+
+ seq_printf(m, "[CRTC:%d:%s]:\n",
+ crtc->base.base.id, crtc->base.name);
+
+ seq_printf(m, "\tuapi: enable=%s, active=%s, mode=" DRM_MODE_FMT "\n",
+ yesno(crtc_state->uapi.enable),
+ yesno(crtc_state->uapi.active),
+ DRM_MODE_ARG(&crtc_state->uapi.mode));
+
+ if (crtc_state->hw.enable) {
+ seq_printf(m, "\thw: active=%s, adjusted_mode=" DRM_MODE_FMT "\n",
+ yesno(crtc_state->hw.active),
+ DRM_MODE_ARG(&crtc_state->hw.adjusted_mode));
+
+ seq_printf(m, "\tpipe src size=%dx%d, dither=%s, bpp=%d\n",
+ crtc_state->pipe_src_w, crtc_state->pipe_src_h,
+ yesno(crtc_state->dither), crtc_state->pipe_bpp);
+
+ intel_scaler_info(m, crtc);
+ }
+
+ for_each_intel_encoder_mask(&dev_priv->drm, encoder,
+ crtc_state->uapi.encoder_mask)
+ intel_encoder_info(m, crtc, encoder);
+
+ intel_plane_info(m, crtc);
+
+ seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s\n",
+ yesno(!crtc->cpu_fifo_underrun_disabled),
+ yesno(!crtc->pch_fifo_underrun_disabled));
+}
+
static int i915_display_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -2817,52 +2667,22 @@ static int i915_display_info(struct seq_file *m, void *unused)
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ drm_modeset_lock_all(dev);
+
seq_printf(m, "CRTC info\n");
seq_printf(m, "---------\n");
- for_each_intel_crtc(dev, crtc) {
- struct intel_crtc_state *pipe_config;
-
- drm_modeset_lock(&crtc->base.mutex, NULL);
- pipe_config = to_intel_crtc_state(crtc->base.state);
-
- seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
- crtc->base.base.id, pipe_name(crtc->pipe),
- yesno(pipe_config->base.active),
- pipe_config->pipe_src_w, pipe_config->pipe_src_h,
- yesno(pipe_config->dither), pipe_config->pipe_bpp);
-
- if (pipe_config->base.active) {
- struct intel_plane *cursor =
- to_intel_plane(crtc->base.cursor);
-
- intel_crtc_info(m, crtc);
-
- seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
- yesno(cursor->base.state->visible),
- cursor->base.state->crtc_x,
- cursor->base.state->crtc_y,
- cursor->base.state->crtc_w,
- cursor->base.state->crtc_h,
- cursor->cursor.base);
- intel_scaler_info(m, crtc);
- intel_plane_info(m, crtc);
- }
-
- seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
- yesno(!crtc->cpu_fifo_underrun_disabled),
- yesno(!crtc->pch_fifo_underrun_disabled));
- drm_modeset_unlock(&crtc->base.mutex);
- }
+ for_each_intel_crtc(dev, crtc)
+ intel_crtc_info(m, crtc);
seq_printf(m, "\n");
seq_printf(m, "Connector info\n");
seq_printf(m, "--------------\n");
- mutex_lock(&dev->mode_config.mutex);
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter)
intel_connector_info(m, connector);
drm_connector_list_iter_end(&conn_iter);
- mutex_unlock(&dev->mode_config.mutex);
+
+ drm_modeset_unlock_all(dev);
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
@@ -2874,7 +2694,6 @@ static int i915_engine_info(struct seq_file *m, void *unused)
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_engine_cs *engine;
intel_wakeref_t wakeref;
- enum intel_engine_id id;
struct drm_printer p;
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
@@ -2886,7 +2705,7 @@ static int i915_engine_info(struct seq_file *m, void *unused)
RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
p = drm_seq_file_printer(m);
- for_each_engine(engine, dev_priv, id)
+ for_each_uabi_engine(engine, dev_priv)
intel_engine_dump(engine, &p, "%s\n", engine->name);
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
@@ -2899,7 +2718,7 @@ static int i915_rcs_topology(struct seq_file *m, void *unused)
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct drm_printer p = drm_seq_file_printer(m);
- intel_device_info_dump_topology(&RUNTIME_INFO(dev_priv)->sseu, &p);
+ intel_device_info_print_topology(&RUNTIME_INFO(dev_priv)->sseu, &p);
return 0;
}
@@ -2966,14 +2785,27 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
static int i915_wa_registers(struct seq_file *m, void *unused)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
- const struct i915_wa_list *wal = &i915->engine[RCS0]->ctx_wa_list;
- struct i915_wa *wa;
- unsigned int i;
+ struct intel_engine_cs *engine;
+
+ for_each_uabi_engine(engine, i915) {
+ const struct i915_wa_list *wal = &engine->ctx_wa_list;
+ const struct i915_wa *wa;
+ unsigned int count;
+
+ count = wal->count;
+ if (!count)
+ continue;
+
+ seq_printf(m, "%s: Workarounds applied: %u\n",
+ engine->name, count);
- seq_printf(m, "Workarounds applied: %u\n", wal->count);
- for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
- seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
- i915_mmio_reg_offset(wa->reg), wa->val, wa->mask);
+ for (wa = wal->list; count--; wa++)
+ seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
+ i915_mmio_reg_offset(wa->reg),
+ wa->val, wa->mask);
+
+ seq_printf(m, "\n");
+ }
return 0;
}
@@ -3182,16 +3014,17 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused)
if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
continue;
- intel_encoder = intel_attached_encoder(connector);
+ intel_encoder = intel_attached_encoder(to_intel_connector(connector));
if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
continue;
- intel_dig_port = enc_to_dig_port(&intel_encoder->base);
+ intel_dig_port = enc_to_dig_port(intel_encoder);
if (!intel_dig_port->dp.can_mst)
continue;
- seq_printf(m, "MST Source Port %c\n",
- port_name(intel_dig_port->base.port));
+ seq_printf(m, "MST Source Port [ENCODER:%d:%s]\n",
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
}
drm_connector_list_iter_end(&conn_iter);
@@ -3235,7 +3068,7 @@ static ssize_t i915_displayport_test_active_write(struct file *file,
continue;
if (encoder && connector->status == connector_status_connected) {
- intel_dp = enc_to_intel_dp(&encoder->base);
+ intel_dp = enc_to_intel_dp(encoder);
status = kstrtoint(input_buffer, 10, &val);
if (status < 0)
break;
@@ -3244,9 +3077,9 @@ static ssize_t i915_displayport_test_active_write(struct file *file,
* testing code, only accept an actual value of 1 here
*/
if (val == 1)
- intel_dp->compliance.test_active = 1;
+ intel_dp->compliance.test_active = true;
else
- intel_dp->compliance.test_active = 0;
+ intel_dp->compliance.test_active = false;
}
}
drm_connector_list_iter_end(&conn_iter);
@@ -3279,7 +3112,7 @@ static int i915_displayport_test_active_show(struct seq_file *m, void *data)
continue;
if (encoder && connector->status == connector_status_connected) {
- intel_dp = enc_to_intel_dp(&encoder->base);
+ intel_dp = enc_to_intel_dp(encoder);
if (intel_dp->compliance.test_active)
seq_puts(m, "1");
else
@@ -3329,7 +3162,7 @@ static int i915_displayport_test_data_show(struct seq_file *m, void *data)
continue;
if (encoder && connector->status == connector_status_connected) {
- intel_dp = enc_to_intel_dp(&encoder->base);
+ intel_dp = enc_to_intel_dp(encoder);
if (intel_dp->compliance.test_type ==
DP_TEST_LINK_EDID_READ)
seq_printf(m, "%lx",
@@ -3373,7 +3206,7 @@ static int i915_displayport_test_type_show(struct seq_file *m, void *data)
continue;
if (encoder && connector->status == connector_status_connected) {
- intel_dp = enc_to_intel_dp(&encoder->base);
+ intel_dp = enc_to_intel_dp(encoder);
seq_printf(m, "%02lx", intel_dp->compliance.test_type);
} else
seq_puts(m, "0");
@@ -3620,7 +3453,8 @@ static const struct file_operations i915_cur_wm_latency_fops = {
static int
i915_wedged_get(void *data, u64 *val)
{
- int ret = i915_terminally_wedged(data);
+ struct drm_i915_private *i915 = data;
+ int ret = intel_gt_terminally_wedged(&i915->gt);
switch (ret) {
case -EIO:
@@ -3640,11 +3474,11 @@ i915_wedged_set(void *data, u64 val)
struct drm_i915_private *i915 = data;
/* Flush any previous reset before applying for a new one */
- wait_event(i915->gpu_error.reset_queue,
- !test_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags));
+ wait_event(i915->gt.reset.queue,
+ !test_bit(I915_RESET_BACKOFF, &i915->gt.reset.flags));
- i915_handle_error(i915, val, I915_ERROR_CAPTURE,
- "Manually set wedged engine mask = %llx", val);
+ intel_gt_handle_error(&i915->gt, val, I915_ERROR_CAPTURE,
+ "Manually set wedged engine mask = %llx", val);
return 0;
}
@@ -3652,6 +3486,37 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
i915_wedged_get, i915_wedged_set,
"%llu\n");
+static int
+i915_perf_noa_delay_set(void *data, u64 val)
+{
+ struct drm_i915_private *i915 = data;
+ const u32 clk = RUNTIME_INFO(i915)->cs_timestamp_frequency_khz;
+
+ /*
+ * This would lead to infinite waits as we're doing timestamp
+ * difference on the CS with only 32bits.
+ */
+ if (val > mul_u32_u32(U32_MAX, clk))
+ return -EINVAL;
+
+ atomic64_set(&i915->perf.noa_programming_delay, val);
+ return 0;
+}
+
+static int
+i915_perf_noa_delay_get(void *data, u64 *val)
+{
+ struct drm_i915_private *i915 = data;
+
+ *val = atomic64_read(&i915->perf.noa_programming_delay);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_perf_noa_delay_fops,
+ i915_perf_noa_delay_get,
+ i915_perf_noa_delay_set,
+ "%llu\n");
+
#define DROP_UNBOUND BIT(0)
#define DROP_BOUND BIT(1)
#define DROP_RETIRE BIT(2)
@@ -3661,6 +3526,7 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
#define DROP_IDLE BIT(6)
#define DROP_RESET_ACTIVE BIT(7)
#define DROP_RESET_SEQNO BIT(8)
+#define DROP_RCU BIT(9)
#define DROP_ALL (DROP_UNBOUND | \
DROP_BOUND | \
DROP_RETIRE | \
@@ -3669,7 +3535,8 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
DROP_SHRINK_ALL |\
DROP_IDLE | \
DROP_RESET_ACTIVE | \
- DROP_RESET_SEQNO)
+ DROP_RESET_SEQNO | \
+ DROP_RCU)
static int
i915_drop_caches_get(void *data, u64 *val)
{
@@ -3677,54 +3544,48 @@ i915_drop_caches_get(void *data, u64 *val)
return 0;
}
-
static int
-i915_drop_caches_set(void *data, u64 val)
+gt_drop_caches(struct intel_gt *gt, u64 val)
{
- struct drm_i915_private *i915 = data;
-
- DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
- val, val & DROP_ALL);
+ int ret;
if (val & DROP_RESET_ACTIVE &&
- wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT))
- i915_gem_set_wedged(i915);
+ wait_for(intel_engines_are_idle(gt), I915_IDLE_ENGINES_TIMEOUT))
+ intel_gt_set_wedged(gt);
- /* No need to check and wait for gpu resets, only libdrm auto-restarts
- * on ioctls on -EAGAIN. */
- if (val & (DROP_ACTIVE | DROP_IDLE | DROP_RETIRE | DROP_RESET_SEQNO)) {
- int ret;
+ if (val & DROP_RETIRE)
+ intel_gt_retire_requests(gt);
- ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
+ if (val & (DROP_IDLE | DROP_ACTIVE)) {
+ ret = intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
if (ret)
return ret;
+ }
- /*
- * To finish the flush of the idle_worker, we must complete
- * the switch-to-kernel-context, which requires a double
- * pass through wait_for_idle: first queues the switch,
- * second waits for the switch.
- */
- if (ret == 0 && val & (DROP_IDLE | DROP_ACTIVE))
- ret = i915_gem_wait_for_idle(i915,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
-
- if (ret == 0 && val & DROP_IDLE)
- ret = i915_gem_wait_for_idle(i915,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
-
- if (val & DROP_RETIRE)
- i915_retire_requests(i915);
-
- mutex_unlock(&i915->drm.struct_mutex);
+ if (val & DROP_IDLE) {
+ ret = intel_gt_pm_wait_for_idle(gt);
+ if (ret)
+ return ret;
}
- if (val & DROP_RESET_ACTIVE && i915_terminally_wedged(i915))
- i915_handle_error(i915, ALL_ENGINES, 0, NULL);
+ if (val & DROP_RESET_ACTIVE && intel_gt_terminally_wedged(gt))
+ intel_gt_handle_error(gt, ALL_ENGINES, 0, NULL);
+
+ return 0;
+}
+
+static int
+i915_drop_caches_set(void *data, u64 val)
+{
+ struct drm_i915_private *i915 = data;
+ int ret;
+
+ DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
+ val, val & DROP_ALL);
+
+ ret = gt_drop_caches(&i915->gt, val);
+ if (ret)
+ return ret;
fs_reclaim_acquire(GFP_KERNEL);
if (val & DROP_BOUND)
@@ -3737,10 +3598,8 @@ i915_drop_caches_set(void *data, u64 val)
i915_gem_shrink_all(i915);
fs_reclaim_release(GFP_KERNEL);
- if (val & DROP_IDLE) {
- flush_delayed_work(&i915->gem.retire_work);
- flush_work(&i915->gem.idle_work);
- }
+ if (val & DROP_RCU)
+ rcu_barrier();
if (val & DROP_FREED)
i915_gem_drain_freed_objects(i915);
@@ -3796,6 +3655,15 @@ i915_cache_sharing_set(void *data, u64 val)
return 0;
}
+static void
+intel_sseu_copy_subslices(const struct sseu_dev_info *sseu, int slice,
+ u8 *to_mask)
+{
+ int offset = slice * sseu->ss_stride;
+
+ memcpy(&to_mask[offset], &sseu->subslice_mask[offset], sseu->ss_stride);
+}
+
DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
i915_cache_sharing_get, i915_cache_sharing_set,
"%llu\n");
@@ -3869,12 +3737,13 @@ static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
continue;
sseu->slice_mask |= BIT(s);
- sseu->subslice_mask[s] = info->sseu.subslice_mask[s];
+ intel_sseu_copy_subslices(&info->sseu, s, sseu->subslice_mask);
for (ss = 0; ss < info->sseu.max_subslices; ss++) {
unsigned int eu_cnt;
- if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
+ if (info->sseu.has_subslice_pg &&
+ !(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
/* skip disabled subslice */
continue;
@@ -3920,18 +3789,21 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
sseu->slice_mask |= BIT(s);
if (IS_GEN9_BC(dev_priv))
- sseu->subslice_mask[s] =
- RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
+ intel_sseu_copy_subslices(&info->sseu, s,
+ sseu->subslice_mask);
for (ss = 0; ss < info->sseu.max_subslices; ss++) {
unsigned int eu_cnt;
+ u8 ss_idx = s * info->sseu.ss_stride +
+ ss / BITS_PER_BYTE;
if (IS_GEN9_LP(dev_priv)) {
if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
/* skip disabled subslice */
continue;
- sseu->subslice_mask[s] |= BIT(ss);
+ sseu->subslice_mask[ss_idx] |=
+ BIT(ss % BITS_PER_BYTE);
}
eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
@@ -3945,28 +3817,26 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
#undef SS_MAX
}
-static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
- struct sseu_dev_info *sseu)
+static void bdw_sseu_device_status(struct drm_i915_private *dev_priv,
+ struct sseu_dev_info *sseu)
{
+ const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
int s;
sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
if (sseu->slice_mask) {
- sseu->eu_per_subslice =
- RUNTIME_INFO(dev_priv)->sseu.eu_per_subslice;
- for (s = 0; s < fls(sseu->slice_mask); s++) {
- sseu->subslice_mask[s] =
- RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
- }
+ sseu->eu_per_subslice = info->sseu.eu_per_subslice;
+ for (s = 0; s < fls(sseu->slice_mask); s++)
+ intel_sseu_copy_subslices(&info->sseu, s,
+ sseu->subslice_mask);
sseu->eu_total = sseu->eu_per_subslice *
intel_sseu_subslice_total(sseu);
/* subtract fused off EU(s) from enabled slice(s) */
for (s = 0; s < fls(sseu->slice_mask); s++) {
- u8 subslice_7eu =
- RUNTIME_INFO(dev_priv)->sseu.subslice_7eu[s];
+ u8 subslice_7eu = info->sseu.subslice_7eu[s];
sseu->eu_total -= hweight8(subslice_7eu);
}
@@ -4013,6 +3883,7 @@ static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
static int i915_sseu_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
struct sseu_dev_info sseu;
intel_wakeref_t wakeref;
@@ -4020,20 +3891,19 @@ static int i915_sseu_status(struct seq_file *m, void *unused)
return -ENODEV;
seq_puts(m, "SSEU Device Info\n");
- i915_print_sseu_info(m, true, &RUNTIME_INFO(dev_priv)->sseu);
+ i915_print_sseu_info(m, true, &info->sseu);
seq_puts(m, "SSEU Device Status\n");
memset(&sseu, 0, sizeof(sseu));
- sseu.max_slices = RUNTIME_INFO(dev_priv)->sseu.max_slices;
- sseu.max_subslices = RUNTIME_INFO(dev_priv)->sseu.max_subslices;
- sseu.max_eus_per_subslice =
- RUNTIME_INFO(dev_priv)->sseu.max_eus_per_subslice;
+ intel_sseu_set_info(&sseu, info->sseu.max_slices,
+ info->sseu.max_subslices,
+ info->sseu.max_eus_per_subslice);
with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
if (IS_CHERRYVIEW(dev_priv))
cherryview_sseu_device_status(dev_priv, &sseu);
else if (IS_BROADWELL(dev_priv))
- broadwell_sseu_device_status(dev_priv, &sseu);
+ bdw_sseu_device_status(dev_priv, &sseu);
else if (IS_GEN(dev_priv, 9))
gen9_sseu_device_status(dev_priv, &sseu);
else if (INTEL_GEN(dev_priv) >= 10)
@@ -4048,13 +3918,12 @@ static int i915_sseu_status(struct seq_file *m, void *unused)
static int i915_forcewake_open(struct inode *inode, struct file *file)
{
struct drm_i915_private *i915 = inode->i_private;
+ struct intel_gt *gt = &i915->gt;
- if (INTEL_GEN(i915) < 6)
- return 0;
-
- file->private_data =
- (void *)(uintptr_t)intel_runtime_pm_get(&i915->runtime_pm);
- intel_uncore_forcewake_user_get(&i915->uncore);
+ atomic_inc(&gt->user_wakeref);
+ intel_gt_pm_get(gt);
+ if (INTEL_GEN(i915) >= 6)
+ intel_uncore_forcewake_user_get(gt->uncore);
return 0;
}
@@ -4062,13 +3931,12 @@ static int i915_forcewake_open(struct inode *inode, struct file *file)
static int i915_forcewake_release(struct inode *inode, struct file *file)
{
struct drm_i915_private *i915 = inode->i_private;
+ struct intel_gt *gt = &i915->gt;
- if (INTEL_GEN(i915) < 6)
- return 0;
-
- intel_uncore_forcewake_user_put(&i915->uncore);
- intel_runtime_pm_put(&i915->runtime_pm,
- (intel_wakeref_t)(uintptr_t)file->private_data);
+ if (INTEL_GEN(i915) >= 6)
+ intel_uncore_forcewake_user_put(&i915->uncore);
+ intel_gt_pm_put(gt);
+ atomic_dec(&gt->user_wakeref);
return 0;
}
@@ -4087,9 +3955,9 @@ static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
/* Synchronize with everything first in case there's been an HPD
* storm, but we haven't finished handling it in the kernel yet
*/
- synchronize_irq(dev_priv->drm.irq);
+ intel_synchronize_irq(dev_priv);
flush_work(&dev_priv->hotplug.dig_port_work);
- flush_work(&dev_priv->hotplug.hotplug_work);
+ flush_delayed_work(&dev_priv->hotplug.hotplug_work);
seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
seq_printf(m, "Detected: %s\n",
@@ -4256,11 +4124,11 @@ static int i915_drrs_ctl_set(void *data, u64 val)
crtc_state = to_intel_crtc_state(crtc->base.state);
- if (!crtc_state->base.active ||
+ if (!crtc_state->hw.active ||
!crtc_state->has_drrs)
goto out;
- commit = crtc_state->base.commit;
+ commit = crtc_state->uapi.commit;
if (commit) {
ret = wait_for_completion_interruptible(&commit->hw_done);
if (ret)
@@ -4272,18 +4140,18 @@ static int i915_drrs_ctl_set(void *data, u64 val)
struct intel_encoder *encoder;
struct intel_dp *intel_dp;
- if (!(crtc_state->base.connector_mask &
+ if (!(crtc_state->uapi.connector_mask &
drm_connector_mask(connector)))
continue;
- encoder = intel_attached_encoder(connector);
+ encoder = intel_attached_encoder(to_intel_connector(connector));
if (encoder->type != INTEL_OUTPUT_EDP)
continue;
DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
val ? "en" : "dis", val);
- intel_dp = enc_to_intel_dp(&encoder->base);
+ intel_dp = enc_to_intel_dp(encoder);
if (val)
intel_edp_drrs_enable(intel_dp,
crtc_state);
@@ -4331,14 +4199,14 @@ i915_fifo_underrun_reset_write(struct file *filp,
return ret;
crtc_state = to_intel_crtc_state(intel_crtc->base.state);
- commit = crtc_state->base.commit;
+ commit = crtc_state->uapi.commit;
if (commit) {
ret = wait_for_completion_interruptible(&commit->hw_done);
if (!ret)
ret = wait_for_completion_interruptible(&commit->flip_done);
}
- if (!ret && crtc_state->base.active) {
+ if (!ret && crtc_state->hw.active) {
DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
pipe_name(intel_crtc->pipe));
@@ -4370,7 +4238,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_gem_objects", i915_gem_object_info, 0},
{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
{"i915_gem_interrupt", i915_interrupt_info, 0},
- {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
{"i915_guc_info", i915_guc_info, 0},
{"i915_guc_load_status", i915_guc_load_status_info, 0},
{"i915_guc_log_dump", i915_guc_log_dump, 0},
@@ -4378,10 +4245,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_guc_stage_pool", i915_guc_stage_pool, 0},
{"i915_huc_load_status", i915_huc_load_status_info, 0},
{"i915_frequency_info", i915_frequency_info, 0},
- {"i915_hangcheck_info", i915_hangcheck_info, 0},
- {"i915_reset_info", i915_reset_info, 0},
{"i915_drpc_info", i915_drpc_info, 0},
- {"i915_emon_status", i915_emon_status, 0},
{"i915_ring_freq_table", i915_ring_freq_table, 0},
{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
{"i915_fbc_status", i915_fbc_status, 0},
@@ -4417,6 +4281,7 @@ static const struct i915_debugfs_files {
const char *name;
const struct file_operations *fops;
} i915_debugfs_files[] = {
+ {"i915_perf_noa_delay", &i915_perf_noa_delay_fops},
{"i915_wedged", &i915_wedged_fops},
{"i915_cache_sharing", &i915_cache_sharing_fops},
{"i915_gem_drop_caches", &i915_drop_caches_fops},
@@ -4490,7 +4355,7 @@ static int i915_dpcd_show(struct seq_file *m, void *data)
{
struct drm_connector *connector = m->private;
struct intel_dp *intel_dp =
- enc_to_intel_dp(&intel_attached_encoder(connector)->base);
+ enc_to_intel_dp(intel_attached_encoder(to_intel_connector(connector)));
u8 buf[16];
ssize_t err;
int i;
@@ -4525,7 +4390,7 @@ static int i915_panel_show(struct seq_file *m, void *data)
{
struct drm_connector *connector = m->private;
struct intel_dp *intel_dp =
- enc_to_intel_dp(&intel_attached_encoder(connector)->base);
+ enc_to_intel_dp(intel_attached_encoder(to_intel_connector(connector)));
if (connector->status != connector_status_connected)
return -ENODEV;
@@ -4547,7 +4412,6 @@ static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
{
struct drm_connector *connector = m->private;
struct intel_connector *intel_connector = to_intel_connector(connector);
- bool hdcp_cap, hdcp2_cap;
if (connector->status != connector_status_connected)
return -ENODEV;
@@ -4558,17 +4422,7 @@ static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
seq_printf(m, "%s:%d HDCP version: ", connector->name,
connector->base.id);
- hdcp_cap = intel_hdcp_capable(intel_connector);
- hdcp2_cap = intel_hdcp2_capable(intel_connector);
-
- if (hdcp_cap)
- seq_puts(m, "HDCP1.4 ");
- if (hdcp2_cap)
- seq_puts(m, "HDCP2.2 ");
-
- if (!hdcp_cap && !hdcp2_cap)
- seq_puts(m, "None");
- seq_puts(m, "\n");
+ intel_hdcp_info(m, intel_connector);
return 0;
}
@@ -4614,10 +4468,10 @@ static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
} else if (ret) {
break;
}
- intel_dp = enc_to_intel_dp(&intel_attached_encoder(connector)->base);
+ intel_dp = enc_to_intel_dp(intel_attached_encoder(to_intel_connector(connector)));
crtc_state = to_intel_crtc_state(crtc->state);
seq_printf(m, "DSC_Enabled: %s\n",
- yesno(crtc_state->dsc_params.compression_enable));
+ yesno(crtc_state->dsc.compression_enable));
seq_printf(m, "DSC_Sink_Support: %s\n",
yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
seq_printf(m, "Force_DSC_Enable: %s\n",
@@ -4641,8 +4495,8 @@ static ssize_t i915_dsc_fec_support_write(struct file *file,
int ret;
struct drm_connector *connector =
((struct seq_file *)file->private_data)->private;
- struct intel_encoder *encoder = intel_attached_encoder(connector);
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
if (len == 0)
return 0;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index f62e3397d936..f7385abdd74b 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -36,7 +36,6 @@
#include <linux/pm_runtime.h>
#include <linux/pnp.h>
#include <linux/slab.h>
-#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
#include <linux/vt.h>
#include <acpi/video.h>
@@ -51,446 +50,100 @@
#include "display/intel_audio.h"
#include "display/intel_bw.h"
#include "display/intel_cdclk.h"
+#include "display/intel_display_types.h"
#include "display/intel_dp.h"
#include "display/intel_fbdev.h"
-#include "display/intel_gmbus.h"
#include "display/intel_hotplug.h"
#include "display/intel_overlay.h"
#include "display/intel_pipe_crc.h"
#include "display/intel_sprite.h"
+#include "display/intel_vga.h"
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_ioctls.h"
+#include "gem/i915_gem_mman.h"
+#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
-#include "gt/intel_reset.h"
-#include "gt/intel_workarounds.h"
+#include "gt/intel_rc6.h"
#include "i915_debugfs.h"
#include "i915_drv.h"
#include "i915_irq.h"
-#include "i915_pmu.h"
+#include "i915_memcpy.h"
+#include "i915_perf.h"
#include "i915_query.h"
+#include "i915_suspend.h"
+#include "i915_switcheroo.h"
+#include "i915_sysfs.h"
#include "i915_trace.h"
#include "i915_vgpu.h"
#include "intel_csr.h"
-#include "intel_drv.h"
+#include "intel_memory_region.h"
#include "intel_pm.h"
-#include "intel_uc.h"
static struct drm_driver driver;
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
-static unsigned int i915_load_fail_count;
-
-bool __i915_inject_load_failure(const char *func, int line)
-{
- if (i915_load_fail_count >= i915_modparams.inject_load_failure)
- return false;
-
- if (++i915_load_fail_count == i915_modparams.inject_load_failure) {
- DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n",
- i915_modparams.inject_load_failure, func, line);
- i915_modparams.inject_load_failure = 0;
- return true;
- }
-
- return false;
-}
-
-bool i915_error_injected(void)
-{
- return i915_load_fail_count && !i915_modparams.inject_load_failure;
-}
-
-#endif
-
-#define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI"
-#define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \
- "providing the dmesg log by booting with drm.debug=0xf"
-
-void
-__i915_printk(struct drm_i915_private *dev_priv, const char *level,
- const char *fmt, ...)
-{
- static bool shown_bug_once;
- struct device *kdev = dev_priv->drm.dev;
- bool is_error = level[1] <= KERN_ERR[1];
- bool is_debug = level[1] == KERN_DEBUG[1];
- struct va_format vaf;
- va_list args;
-
- if (is_debug && !(drm_debug & DRM_UT_DRIVER))
- return;
-
- va_start(args, fmt);
-
- vaf.fmt = fmt;
- vaf.va = &args;
-
- if (is_error)
- dev_printk(level, kdev, "%pV", &vaf);
- else
- dev_printk(level, kdev, "[" DRM_NAME ":%ps] %pV",
- __builtin_return_address(0), &vaf);
-
- va_end(args);
-
- if (is_error && !shown_bug_once) {
- /*
- * Ask the user to file a bug report for the error, except
- * if they may have caused the bug by fiddling with unsafe
- * module parameters.
- */
- if (!test_taint(TAINT_USER))
- dev_notice(kdev, "%s", FDO_BUG_MSG);
- shown_bug_once = true;
- }
-}
-
-/* Map PCH device id to PCH type, or PCH_NONE if unknown. */
-static enum intel_pch
-intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
-{
- switch (id) {
- case INTEL_PCH_IBX_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
- WARN_ON(!IS_GEN(dev_priv, 5));
- return PCH_IBX;
- case INTEL_PCH_CPT_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found CougarPoint PCH\n");
- WARN_ON(!IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
- return PCH_CPT;
- case INTEL_PCH_PPT_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found PantherPoint PCH\n");
- WARN_ON(!IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
- /* PantherPoint is CPT compatible */
- return PCH_CPT;
- case INTEL_PCH_LPT_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found LynxPoint PCH\n");
- WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
- WARN_ON(IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
- return PCH_LPT;
- case INTEL_PCH_LPT_LP_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
- WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
- WARN_ON(!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
- return PCH_LPT;
- case INTEL_PCH_WPT_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found WildcatPoint PCH\n");
- WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
- WARN_ON(IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
- /* WildcatPoint is LPT compatible */
- return PCH_LPT;
- case INTEL_PCH_WPT_LP_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found WildcatPoint LP PCH\n");
- WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
- WARN_ON(!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
- /* WildcatPoint is LPT compatible */
- return PCH_LPT;
- case INTEL_PCH_SPT_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
- WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv));
- return PCH_SPT;
- case INTEL_PCH_SPT_LP_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
- WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv));
- return PCH_SPT;
- case INTEL_PCH_KBP_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found Kaby Lake PCH (KBP)\n");
- WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) &&
- !IS_COFFEELAKE(dev_priv));
- /* KBP is SPT compatible */
- return PCH_SPT;
- case INTEL_PCH_CNP_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found Cannon Lake PCH (CNP)\n");
- WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
- return PCH_CNP;
- case INTEL_PCH_CNP_LP_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found Cannon Lake LP PCH (CNP-LP)\n");
- WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
- return PCH_CNP;
- case INTEL_PCH_CMP_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found Comet Lake PCH (CMP)\n");
- WARN_ON(!IS_COFFEELAKE(dev_priv));
- /* CometPoint is CNP Compatible */
- return PCH_CNP;
- case INTEL_PCH_ICP_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found Ice Lake PCH\n");
- WARN_ON(!IS_ICELAKE(dev_priv));
- return PCH_ICP;
- case INTEL_PCH_MCC_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found Mule Creek Canyon PCH\n");
- WARN_ON(!IS_ELKHARTLAKE(dev_priv));
- return PCH_MCC;
- default:
- return PCH_NONE;
- }
-}
-
-static bool intel_is_virt_pch(unsigned short id,
- unsigned short svendor, unsigned short sdevice)
-{
- return (id == INTEL_PCH_P2X_DEVICE_ID_TYPE ||
- id == INTEL_PCH_P3X_DEVICE_ID_TYPE ||
- (id == INTEL_PCH_QEMU_DEVICE_ID_TYPE &&
- svendor == PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
- sdevice == PCI_SUBDEVICE_ID_QEMU));
-}
-
-static unsigned short
-intel_virt_detect_pch(const struct drm_i915_private *dev_priv)
-{
- unsigned short id = 0;
-
- /*
- * In a virtualized passthrough environment we can be in a
- * setup where the ISA bridge is not able to be passed through.
- * In this case, a south bridge can be emulated and we have to
- * make an educated guess as to which PCH is really there.
- */
-
- if (IS_ELKHARTLAKE(dev_priv))
- id = INTEL_PCH_MCC_DEVICE_ID_TYPE;
- else if (IS_ICELAKE(dev_priv))
- id = INTEL_PCH_ICP_DEVICE_ID_TYPE;
- else if (IS_CANNONLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
- id = INTEL_PCH_CNP_DEVICE_ID_TYPE;
- else if (IS_KABYLAKE(dev_priv) || IS_SKYLAKE(dev_priv))
- id = INTEL_PCH_SPT_DEVICE_ID_TYPE;
- else if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
- id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
- else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- id = INTEL_PCH_LPT_DEVICE_ID_TYPE;
- else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
- id = INTEL_PCH_CPT_DEVICE_ID_TYPE;
- else if (IS_GEN(dev_priv, 5))
- id = INTEL_PCH_IBX_DEVICE_ID_TYPE;
-
- if (id)
- DRM_DEBUG_KMS("Assuming PCH ID %04x\n", id);
- else
- DRM_DEBUG_KMS("Assuming no PCH\n");
-
- return id;
-}
-
-static void intel_detect_pch(struct drm_i915_private *dev_priv)
-{
- struct pci_dev *pch = NULL;
-
- /*
- * The reason to probe ISA bridge instead of Dev31:Fun0 is to
- * make graphics device passthrough work easy for VMM, that only
- * need to expose ISA bridge to let driver know the real hardware
- * underneath. This is a requirement from virtualization team.
- *
- * In some virtualized environments (e.g. XEN), there is irrelevant
- * ISA bridge in the system. To work reliably, we should scan trhough
- * all the ISA bridge devices and check for the first match, instead
- * of only checking the first one.
- */
- while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
- unsigned short id;
- enum intel_pch pch_type;
-
- if (pch->vendor != PCI_VENDOR_ID_INTEL)
- continue;
-
- id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
-
- pch_type = intel_pch_type(dev_priv, id);
- if (pch_type != PCH_NONE) {
- dev_priv->pch_type = pch_type;
- dev_priv->pch_id = id;
- break;
- } else if (intel_is_virt_pch(id, pch->subsystem_vendor,
- pch->subsystem_device)) {
- id = intel_virt_detect_pch(dev_priv);
- pch_type = intel_pch_type(dev_priv, id);
-
- /* Sanity check virtual PCH id */
- if (WARN_ON(id && pch_type == PCH_NONE))
- id = 0;
-
- dev_priv->pch_type = pch_type;
- dev_priv->pch_id = id;
- break;
- }
- }
-
- /*
- * Use PCH_NOP (PCH but no South Display) for PCH platforms without
- * display.
- */
- if (pch && !HAS_DISPLAY(dev_priv)) {
- DRM_DEBUG_KMS("Display disabled, reverting to NOP PCH\n");
- dev_priv->pch_type = PCH_NOP;
- dev_priv->pch_id = 0;
- }
-
- if (!pch)
- DRM_DEBUG_KMS("No PCH found.\n");
-
- pci_dev_put(pch);
-}
-
-static int i915_getparam_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct pci_dev *pdev = dev_priv->drm.pdev;
- const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
- drm_i915_getparam_t *param = data;
- int value;
-
- switch (param->param) {
- case I915_PARAM_IRQ_ACTIVE:
- case I915_PARAM_ALLOW_BATCHBUFFER:
- case I915_PARAM_LAST_DISPATCH:
- case I915_PARAM_HAS_EXEC_CONSTANTS:
- /* Reject all old ums/dri params. */
- return -ENODEV;
- case I915_PARAM_CHIPSET_ID:
- value = pdev->device;
- break;
- case I915_PARAM_REVISION:
- value = pdev->revision;
- break;
- case I915_PARAM_NUM_FENCES_AVAIL:
- value = dev_priv->ggtt.num_fences;
- break;
- case I915_PARAM_HAS_OVERLAY:
- value = dev_priv->overlay ? 1 : 0;
- break;
- case I915_PARAM_HAS_BSD:
- value = !!dev_priv->engine[VCS0];
- break;
- case I915_PARAM_HAS_BLT:
- value = !!dev_priv->engine[BCS0];
- break;
- case I915_PARAM_HAS_VEBOX:
- value = !!dev_priv->engine[VECS0];
- break;
- case I915_PARAM_HAS_BSD2:
- value = !!dev_priv->engine[VCS1];
- break;
- case I915_PARAM_HAS_LLC:
- value = HAS_LLC(dev_priv);
- break;
- case I915_PARAM_HAS_WT:
- value = HAS_WT(dev_priv);
- break;
- case I915_PARAM_HAS_ALIASING_PPGTT:
- value = INTEL_PPGTT(dev_priv);
- break;
- case I915_PARAM_HAS_SEMAPHORES:
- value = !!(dev_priv->caps.scheduler & I915_SCHEDULER_CAP_SEMAPHORES);
- break;
- case I915_PARAM_HAS_SECURE_BATCHES:
- value = capable(CAP_SYS_ADMIN);
- break;
- case I915_PARAM_CMD_PARSER_VERSION:
- value = i915_cmd_parser_get_version(dev_priv);
- break;
- case I915_PARAM_SUBSLICE_TOTAL:
- value = intel_sseu_subslice_total(sseu);
- if (!value)
- return -ENODEV;
- break;
- case I915_PARAM_EU_TOTAL:
- value = sseu->eu_total;
- if (!value)
- return -ENODEV;
- break;
- case I915_PARAM_HAS_GPU_RESET:
- value = i915_modparams.enable_hangcheck &&
- intel_has_gpu_reset(dev_priv);
- if (value && intel_has_reset_engine(dev_priv))
- value = 2;
- break;
- case I915_PARAM_HAS_RESOURCE_STREAMER:
- value = 0;
- break;
- case I915_PARAM_HAS_POOLED_EU:
- value = HAS_POOLED_EU(dev_priv);
- break;
- case I915_PARAM_MIN_EU_IN_POOL:
- value = sseu->min_eu_in_pool;
- break;
- case I915_PARAM_HUC_STATUS:
- value = intel_huc_check_status(&dev_priv->huc);
- if (value < 0)
- return value;
- break;
- case I915_PARAM_MMAP_GTT_VERSION:
- /* Though we've started our numbering from 1, and so class all
- * earlier versions as 0, in effect their value is undefined as
- * the ioctl will report EINVAL for the unknown param!
- */
- value = i915_gem_mmap_gtt_version();
- break;
- case I915_PARAM_HAS_SCHEDULER:
- value = dev_priv->caps.scheduler;
- break;
-
- case I915_PARAM_MMAP_VERSION:
- /* Remember to bump this if the version changes! */
- case I915_PARAM_HAS_GEM:
- case I915_PARAM_HAS_PAGEFLIPPING:
- case I915_PARAM_HAS_EXECBUF2: /* depends on GEM */
- case I915_PARAM_HAS_RELAXED_FENCING:
- case I915_PARAM_HAS_COHERENT_RINGS:
- case I915_PARAM_HAS_RELAXED_DELTA:
- case I915_PARAM_HAS_GEN7_SOL_RESET:
- case I915_PARAM_HAS_WAIT_TIMEOUT:
- case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
- case I915_PARAM_HAS_PINNED_BATCHES:
- case I915_PARAM_HAS_EXEC_NO_RELOC:
- case I915_PARAM_HAS_EXEC_HANDLE_LUT:
- case I915_PARAM_HAS_COHERENT_PHYS_GTT:
- case I915_PARAM_HAS_EXEC_SOFTPIN:
- case I915_PARAM_HAS_EXEC_ASYNC:
- case I915_PARAM_HAS_EXEC_FENCE:
- case I915_PARAM_HAS_EXEC_CAPTURE:
- case I915_PARAM_HAS_EXEC_BATCH_FIRST:
- case I915_PARAM_HAS_EXEC_FENCE_ARRAY:
- case I915_PARAM_HAS_EXEC_SUBMIT_FENCE:
- /* For the time being all of these are always true;
- * if some supported hardware does not have one of these
- * features this value needs to be provided from
- * INTEL_INFO(), a feature macro, or similar.
- */
- value = 1;
- break;
- case I915_PARAM_HAS_CONTEXT_ISOLATION:
- value = intel_engines_has_context_isolation(dev_priv);
- break;
- case I915_PARAM_SLICE_MASK:
- value = sseu->slice_mask;
- if (!value)
- return -ENODEV;
- break;
- case I915_PARAM_SUBSLICE_MASK:
- value = sseu->subslice_mask[0];
- if (!value)
- return -ENODEV;
- break;
- case I915_PARAM_CS_TIMESTAMP_FREQUENCY:
- value = 1000 * RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz;
- break;
- case I915_PARAM_MMAP_GTT_COHERENT:
- value = INTEL_INFO(dev_priv)->has_coherent_ggtt;
- break;
- default:
- DRM_DEBUG("Unknown parameter %d\n", param->param);
- return -EINVAL;
- }
-
- if (put_user(value, param->value))
- return -EFAULT;
-
- return 0;
-}
+struct vlv_s0ix_state {
+ /* GAM */
+ u32 wr_watermark;
+ u32 gfx_prio_ctrl;
+ u32 arb_mode;
+ u32 gfx_pend_tlb0;
+ u32 gfx_pend_tlb1;
+ u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM];
+ u32 media_max_req_count;
+ u32 gfx_max_req_count;
+ u32 render_hwsp;
+ u32 ecochk;
+ u32 bsd_hwsp;
+ u32 blt_hwsp;
+ u32 tlb_rd_addr;
+
+ /* MBC */
+ u32 g3dctl;
+ u32 gsckgctl;
+ u32 mbctl;
+
+ /* GCP */
+ u32 ucgctl1;
+ u32 ucgctl3;
+ u32 rcgctl1;
+ u32 rcgctl2;
+ u32 rstctl;
+ u32 misccpctl;
+
+ /* GPM */
+ u32 gfxpause;
+ u32 rpdeuhwtc;
+ u32 rpdeuc;
+ u32 ecobus;
+ u32 pwrdwnupctl;
+ u32 rp_down_timeout;
+ u32 rp_deucsw;
+ u32 rcubmabdtmr;
+ u32 rcedata;
+ u32 spare2gh;
+
+ /* Display 1 CZ domain */
+ u32 gt_imr;
+ u32 gt_ier;
+ u32 pm_imr;
+ u32 pm_ier;
+ u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM];
+
+ /* GT SA CZ domain */
+ u32 tilectl;
+ u32 gt_fifoctl;
+ u32 gtlc_wake_ctrl;
+ u32 gtlc_survive;
+ u32 pmwgicz;
+
+ /* Display 2 CZ domain */
+ u32 gu_ctl0;
+ u32 gu_ctl1;
+ u32 pcbr;
+ u32 clock_gate_dis2;
+};
static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
{
@@ -619,176 +272,97 @@ intel_teardown_mchbar(struct drm_i915_private *dev_priv)
release_resource(&dev_priv->mch_res);
}
-/* true = enable decode, false = disable decoder */
-static unsigned int i915_vga_set_decode(void *cookie, bool state)
+static int i915_driver_modeset_probe(struct drm_i915_private *i915)
{
- struct drm_i915_private *dev_priv = cookie;
-
- intel_modeset_vga_set_state(dev_priv, state);
- if (state)
- return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
- VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
- else
- return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
-}
-
-static int i915_resume_switcheroo(struct drm_device *dev);
-static int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state);
-
-static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
-{
- struct drm_device *dev = pci_get_drvdata(pdev);
- pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
-
- if (state == VGA_SWITCHEROO_ON) {
- pr_info("switched on\n");
- dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
- /* i915 resume handler doesn't set to D0 */
- pci_set_power_state(pdev, PCI_D0);
- i915_resume_switcheroo(dev);
- dev->switch_power_state = DRM_SWITCH_POWER_ON;
- } else {
- pr_info("switched off\n");
- dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
- i915_suspend_switcheroo(dev, pmm);
- dev->switch_power_state = DRM_SWITCH_POWER_OFF;
- }
-}
-
-static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
-{
- struct drm_device *dev = pci_get_drvdata(pdev);
-
- /*
- * FIXME: open_count is protected by drm_global_mutex but that would lead to
- * locking inversion with the driver load path. And the access here is
- * completely racy anyway. So don't bother with locking for now.
- */
- return dev->open_count == 0;
-}
-
-static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
- .set_gpu_state = i915_switcheroo_set_state,
- .reprobe = NULL,
- .can_switch = i915_switcheroo_can_switch,
-};
-
-static int i915_load_modeset_init(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct pci_dev *pdev = dev_priv->drm.pdev;
int ret;
- if (i915_inject_load_failure())
+ if (i915_inject_probe_failure(i915))
return -ENODEV;
- if (HAS_DISPLAY(dev_priv)) {
- ret = drm_vblank_init(&dev_priv->drm,
- INTEL_INFO(dev_priv)->num_pipes);
+ if (HAS_DISPLAY(i915) && INTEL_DISPLAY_ENABLED(i915)) {
+ ret = drm_vblank_init(&i915->drm,
+ INTEL_NUM_PIPES(i915));
if (ret)
goto out;
}
- intel_bios_init(dev_priv);
+ intel_bios_init(i915);
- /* If we have > 1 VGA cards, then we need to arbitrate access
- * to the common VGA resources.
- *
- * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
- * then we do not take part in VGA arbitration and the
- * vga_client_register() fails with -ENODEV.
- */
- ret = vga_client_register(pdev, dev_priv, NULL, i915_vga_set_decode);
- if (ret && ret != -ENODEV)
+ ret = intel_vga_register(i915);
+ if (ret)
goto out;
intel_register_dsm_handler();
- ret = vga_switcheroo_register_client(pdev, &i915_switcheroo_ops, false);
+ ret = i915_switcheroo_register(i915);
if (ret)
goto cleanup_vga_client;
- /* must happen before intel_power_domains_init_hw() on VLV/CHV */
- intel_update_rawclk(dev_priv);
-
- intel_power_domains_init_hw(dev_priv, false);
+ intel_power_domains_init_hw(i915, false);
- intel_csr_ucode_init(dev_priv);
+ intel_csr_ucode_init(i915);
- ret = intel_irq_install(dev_priv);
+ ret = intel_irq_install(i915);
if (ret)
goto cleanup_csr;
- intel_gmbus_setup(dev_priv);
-
/* Important: The output setup functions called by modeset_init need
* working irqs for e.g. gmbus and dp aux transfers. */
- ret = intel_modeset_init(dev);
+ ret = intel_modeset_init(i915);
if (ret)
goto cleanup_irq;
- ret = i915_gem_init(dev_priv);
+ ret = i915_gem_init(i915);
if (ret)
goto cleanup_modeset;
- intel_overlay_setup(dev_priv);
+ intel_overlay_setup(i915);
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(i915) || !INTEL_DISPLAY_ENABLED(i915))
return 0;
- ret = intel_fbdev_init(dev);
+ ret = intel_fbdev_init(&i915->drm);
if (ret)
goto cleanup_gem;
/* Only enable hotplug handling once the fbdev is fully set up. */
- intel_hpd_init(dev_priv);
+ intel_hpd_init(i915);
- intel_init_ipc(dev_priv);
+ intel_init_ipc(i915);
return 0;
cleanup_gem:
- i915_gem_suspend(dev_priv);
- i915_gem_fini_hw(dev_priv);
- i915_gem_fini(dev_priv);
+ i915_gem_suspend(i915);
+ i915_gem_driver_remove(i915);
+ i915_gem_driver_release(i915);
cleanup_modeset:
- intel_modeset_cleanup(dev);
+ intel_modeset_driver_remove(i915);
cleanup_irq:
- drm_irq_uninstall(dev);
- intel_gmbus_teardown(dev_priv);
+ intel_irq_uninstall(i915);
cleanup_csr:
- intel_csr_ucode_fini(dev_priv);
- intel_power_domains_fini_hw(dev_priv);
- vga_switcheroo_unregister_client(pdev);
+ intel_csr_ucode_fini(i915);
+ intel_power_domains_driver_remove(i915);
+ i915_switcheroo_unregister(i915);
cleanup_vga_client:
- vga_client_register(pdev, NULL, NULL, NULL);
+ intel_vga_unregister(i915);
out:
return ret;
}
-static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
+static void i915_driver_modeset_remove(struct drm_i915_private *i915)
{
- struct apertures_struct *ap;
- struct pci_dev *pdev = dev_priv->drm.pdev;
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- bool primary;
- int ret;
-
- ap = alloc_apertures(1);
- if (!ap)
- return -ENOMEM;
+ intel_modeset_driver_remove(i915);
- ap->ranges[0].base = ggtt->gmadr.start;
- ap->ranges[0].size = ggtt->mappable_end;
+ intel_irq_uninstall(i915);
- primary =
- pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+ intel_bios_driver_remove(i915);
- ret = drm_fb_helper_remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
+ i915_switcheroo_unregister(i915);
- kfree(ap);
+ intel_vga_unregister(i915);
- return ret;
+ intel_csr_ucode_fini(i915);
}
static void intel_init_dpio(struct drm_i915_private *dev_priv)
@@ -840,15 +414,6 @@ out_err:
return -ENOMEM;
}
-static void i915_engines_cleanup(struct drm_i915_private *i915)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- for_each_engine(engine, i915, id)
- kfree(engine);
-}
-
static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
{
destroy_workqueue(dev_priv->hotplug.dp_wq);
@@ -881,8 +446,37 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
}
}
+static int vlv_alloc_s0ix_state(struct drm_i915_private *i915)
+{
+ if (!IS_VALLEYVIEW(i915))
+ return 0;
+
+ /* we write all the values in the struct, so no need to zero it out */
+ i915->vlv_s0ix_state = kmalloc(sizeof(*i915->vlv_s0ix_state),
+ GFP_KERNEL);
+ if (!i915->vlv_s0ix_state)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void vlv_free_s0ix_state(struct drm_i915_private *i915)
+{
+ if (!i915->vlv_s0ix_state)
+ return;
+
+ kfree(i915->vlv_s0ix_state);
+ i915->vlv_s0ix_state = NULL;
+}
+
+static void sanitize_gpu(struct drm_i915_private *i915)
+{
+ if (!INTEL_INFO(i915)->gpu_reset_clobbers_display)
+ __intel_gt_reset(&i915->gt, ALL_ENGINES);
+}
+
/**
- * i915_driver_init_early - setup state not requiring device access
+ * i915_driver_early_probe - setup state not requiring device access
* @dev_priv: device private
*
* Initialize everything that is a "SW-only" state, that is state not
@@ -891,16 +485,17 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
* system memory allocation, setting up device specific attributes and
* function hooks not requiring accessing the device.
*/
-static int i915_driver_init_early(struct drm_i915_private *dev_priv)
+static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
{
int ret = 0;
- if (i915_inject_load_failure())
+ if (i915_inject_probe_failure(dev_priv))
return -ENODEV;
intel_device_info_subplatform_init(dev_priv);
- intel_uncore_init_early(&dev_priv->uncore);
+ intel_uncore_mmio_debug_init_early(&dev_priv->mmio_debug);
+ intel_uncore_init_early(&dev_priv->uncore, dev_priv);
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->gpu_error.lock);
@@ -920,24 +515,27 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv)
ret = i915_workqueues_init(dev_priv);
if (ret < 0)
- goto err_engines;
+ return ret;
- ret = i915_gem_init_early(dev_priv);
+ ret = vlv_alloc_s0ix_state(dev_priv);
if (ret < 0)
goto err_workqueues;
+ intel_wopcm_init_early(&dev_priv->wopcm);
+
+ intel_gt_init_early(&dev_priv->gt, dev_priv);
+
+ i915_gem_init_early(dev_priv);
+
/* This must be called before any calls to HAS_PCH_* */
intel_detect_pch(dev_priv);
- intel_wopcm_init_early(&dev_priv->wopcm);
- intel_uc_init_early(dev_priv);
intel_pm_setup(dev_priv);
intel_init_dpio(dev_priv);
ret = intel_power_domains_init(dev_priv);
if (ret < 0)
- goto err_uc;
+ goto err_gem;
intel_irq_init(dev_priv);
- intel_hangcheck_init(dev_priv);
intel_init_display_hooks(dev_priv);
intel_init_clock_gating_hooks(dev_priv);
intel_init_audio_hooks(dev_priv);
@@ -947,35 +545,35 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv)
return 0;
-err_uc:
- intel_uc_cleanup_early(dev_priv);
+err_gem:
i915_gem_cleanup_early(dev_priv);
+ intel_gt_driver_late_release(&dev_priv->gt);
+ vlv_free_s0ix_state(dev_priv);
err_workqueues:
i915_workqueues_cleanup(dev_priv);
-err_engines:
- i915_engines_cleanup(dev_priv);
return ret;
}
/**
- * i915_driver_cleanup_early - cleanup the setup done in i915_driver_init_early()
+ * i915_driver_late_release - cleanup the setup done in
+ * i915_driver_early_probe()
* @dev_priv: device private
*/
-static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
+static void i915_driver_late_release(struct drm_i915_private *dev_priv)
{
intel_irq_fini(dev_priv);
intel_power_domains_cleanup(dev_priv);
- intel_uc_cleanup_early(dev_priv);
i915_gem_cleanup_early(dev_priv);
+ intel_gt_driver_late_release(&dev_priv->gt);
+ vlv_free_s0ix_state(dev_priv);
i915_workqueues_cleanup(dev_priv);
- i915_engines_cleanup(dev_priv);
pm_qos_remove_request(&dev_priv->sb_qos);
mutex_destroy(&dev_priv->sb_lock);
}
/**
- * i915_driver_init_mmio - setup device MMIO
+ * i915_driver_mmio_probe - setup device MMIO
* @dev_priv: device private
*
* Setup minimal device state necessary for MMIO accesses later in the
@@ -983,11 +581,11 @@ static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
* side effects or exposing the driver via kernel internal or user space
* interfaces.
*/
-static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
+static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
{
int ret;
- if (i915_inject_load_failure())
+ if (i915_inject_probe_failure(dev_priv))
return -ENODEV;
if (i915_get_bridge_dev(dev_priv))
@@ -1004,13 +602,14 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
intel_uncore_prune_mmio_domains(&dev_priv->uncore);
- intel_uc_init_mmio(dev_priv);
+ intel_uc_init_mmio(&dev_priv->gt.uc);
- ret = intel_engines_init_mmio(dev_priv);
+ ret = intel_engines_init_mmio(&dev_priv->gt);
if (ret)
goto err_uncore;
- i915_gem_init_mmio(dev_priv);
+ /* As early as possible, scrub existing GPU state before clobbering */
+ sanitize_gpu(dev_priv);
return 0;
@@ -1024,10 +623,10 @@ err_bridge:
}
/**
- * i915_driver_cleanup_mmio - cleanup the setup done in i915_driver_init_mmio()
+ * i915_driver_mmio_release - cleanup the setup done in i915_driver_mmio_probe()
* @dev_priv: device private
*/
-static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
+static void i915_driver_mmio_release(struct drm_i915_private *dev_priv)
{
intel_teardown_mchbar(dev_priv);
intel_uncore_fini_mmio(&dev_priv->uncore);
@@ -1462,7 +1061,7 @@ intel_get_dram_info(struct drm_i915_private *dev_priv)
*/
dram_info->is_16gb_dimm = !IS_GEN9_LP(dev_priv);
- if (INTEL_GEN(dev_priv) < 9)
+ if (INTEL_GEN(dev_priv) < 9 || !HAS_DISPLAY(dev_priv))
return;
if (IS_GEN9_LP(dev_priv))
@@ -1482,8 +1081,8 @@ intel_get_dram_info(struct drm_i915_private *dev_priv)
static u32 gen9_edram_size_mb(struct drm_i915_private *dev_priv, u32 cap)
{
- const unsigned int ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
- const unsigned int sets[4] = { 1, 1, 2, 2 };
+ static const u8 ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
+ static const u8 sets[4] = { 1, 1, 2, 2 };
return EDRAM_NUM_BANKS(cap) *
ways[EDRAM_WAYS_IDX(cap)] *
@@ -1516,22 +1115,23 @@ static void edram_detect(struct drm_i915_private *dev_priv)
dev_priv->edram_size_mb =
gen9_edram_size_mb(dev_priv, edram_cap);
- DRM_INFO("Found %uMB of eDRAM\n", dev_priv->edram_size_mb);
+ dev_info(dev_priv->drm.dev,
+ "Found %uMB of eDRAM\n", dev_priv->edram_size_mb);
}
/**
- * i915_driver_init_hw - setup state requiring device access
+ * i915_driver_hw_probe - setup state requiring device access
* @dev_priv: device private
*
* Setup state that requires accessing the device, but doesn't require
* exposing the driver via kernel internal or userspace interfaces.
*/
-static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
+static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
int ret;
- if (i915_inject_load_failure())
+ if (i915_inject_probe_failure(dev_priv))
return -ENODEV;
intel_device_info_runtime_init(dev_priv);
@@ -1570,41 +1170,41 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
if (ret)
goto err_perf;
- /*
- * WARNING: Apparently we must kick fbdev drivers before vgacon,
- * otherwise the vga fbdev driver falls over.
- */
- ret = i915_kick_out_firmware_fb(dev_priv);
- if (ret) {
- DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
+ ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "inteldrmfb");
+ if (ret)
goto err_ggtt;
- }
- ret = vga_remove_vgacon(pdev);
- if (ret) {
- DRM_ERROR("failed to remove conflicting VGA console\n");
+ ret = i915_ggtt_init_hw(dev_priv);
+ if (ret)
goto err_ggtt;
- }
- ret = i915_ggtt_init_hw(dev_priv);
+ ret = intel_memory_regions_hw_probe(dev_priv);
if (ret)
goto err_ggtt;
+ intel_gt_init_hw_early(&dev_priv->gt, &dev_priv->ggtt);
+
ret = i915_ggtt_enable_hw(dev_priv);
if (ret) {
DRM_ERROR("failed to enable GGTT\n");
- goto err_ggtt;
+ goto err_mem_regions;
}
pci_set_master(pdev);
+ /*
+ * We don't have a max segment size, so set it to the max so sg's
+ * debugging layer doesn't complain
+ */
+ dma_set_max_seg_size(&pdev->dev, UINT_MAX);
+
/* overlay on gen2 is broken and can't address above 1G */
if (IS_GEN(dev_priv, 2)) {
ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30));
if (ret) {
DRM_ERROR("failed to set DMA mask\n");
- goto err_ggtt;
+ goto err_mem_regions;
}
}
@@ -1622,15 +1222,13 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
if (ret) {
DRM_ERROR("failed to set DMA mask\n");
- goto err_ggtt;
+ goto err_mem_regions;
}
}
pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
PM_QOS_DEFAULT_VALUE);
- intel_uncore_sanitize(dev_priv);
-
intel_gt_init_workarounds(dev_priv);
/* On the 945G/GM, the chipset reports the MSI capability on the
@@ -1676,18 +1274,20 @@ err_msi:
if (pdev->msi_enabled)
pci_disable_msi(pdev);
pm_qos_remove_request(&dev_priv->pm_qos);
+err_mem_regions:
+ intel_memory_regions_driver_release(dev_priv);
err_ggtt:
- i915_ggtt_cleanup_hw(dev_priv);
+ i915_ggtt_driver_release(dev_priv);
err_perf:
i915_perf_fini(dev_priv);
return ret;
}
/**
- * i915_driver_cleanup_hw - cleanup the setup done in i915_driver_init_hw()
+ * i915_driver_hw_remove - cleanup the setup done in i915_driver_hw_probe()
* @dev_priv: device private
*/
-static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv)
+static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
@@ -1710,7 +1310,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = &dev_priv->drm;
- i915_gem_shrinker_register(dev_priv);
+ i915_gem_driver_register(dev_priv);
i915_pmu_register(dev_priv);
/*
@@ -1730,14 +1330,13 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
} else
DRM_ERROR("Failed to register driver for userspace access!\n");
- if (HAS_DISPLAY(dev_priv)) {
+ if (HAS_DISPLAY(dev_priv) && INTEL_DISPLAY_ENABLED(dev_priv)) {
/* Must be done after probing outputs */
intel_opregion_register(dev_priv);
acpi_video_register();
}
- if (IS_GEN(dev_priv, 5))
- intel_gpu_ips_init(dev_priv);
+ intel_gt_driver_register(&dev_priv->gt);
intel_audio_init(dev_priv);
@@ -1754,7 +1353,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
* We need to coordinate the hotplugs with the asynchronous fbdev
* configuration, for which we use the fbdev->async_cookie.
*/
- if (HAS_DISPLAY(dev_priv))
+ if (HAS_DISPLAY(dev_priv) && INTEL_DISPLAY_ENABLED(dev_priv))
drm_kms_helper_poll_init(dev);
intel_power_domains_enable(dev_priv);
@@ -1780,7 +1379,7 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
*/
drm_kms_helper_poll_fini(&dev_priv->drm);
- intel_gpu_ips_teardown();
+ intel_gt_driver_unregister(&dev_priv->gt);
acpi_video_unregister();
intel_opregion_unregister(dev_priv);
@@ -1790,12 +1389,12 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
i915_teardown_sysfs(dev_priv);
drm_dev_unplug(&dev_priv->drm);
- i915_gem_shrinker_unregister(dev_priv);
+ i915_gem_driver_unregister(dev_priv);
}
static void i915_welcome_messages(struct drm_i915_private *dev_priv)
{
- if (drm_debug & DRM_UT_DRIVER) {
+ if (drm_debug_enabled(DRM_UT_DRIVER)) {
struct drm_printer p = drm_debug_printer("i915 device info:");
drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s (subplatform=0x%x) gen=%i\n",
@@ -1806,8 +1405,8 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv)
INTEL_INFO(dev_priv)->platform),
INTEL_GEN(dev_priv));
- intel_device_info_dump_flags(INTEL_INFO(dev_priv), &p);
- intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
+ intel_device_info_print_static(INTEL_INFO(dev_priv), &p);
+ intel_device_info_print_runtime(RUNTIME_INFO(dev_priv), &p);
}
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
@@ -1837,9 +1436,10 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
return ERR_PTR(err);
}
- i915->drm.pdev = pdev;
i915->drm.dev_private = i915;
- pci_set_drvdata(pdev, &i915->drm);
+
+ i915->drm.pdev = pdev;
+ pci_set_drvdata(pdev, i915);
/* Setup the write-once "constant" device info */
device_info = mkwrite_device_info(i915);
@@ -1863,17 +1463,17 @@ static void i915_driver_destroy(struct drm_i915_private *i915)
}
/**
- * i915_driver_load - setup chip and create an initial config
+ * i915_driver_probe - setup chip and create an initial config
* @pdev: PCI device
* @ent: matching PCI ID entry
*
- * The driver load routine has to do several things:
+ * The driver probe routine has to do several things:
* - drive output discovery via intel_modeset_init()
* - initialize the memory manager
* - allocate initial config memory
* - setup the DRM framebuffer with the allocated memory
*/
-int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
+int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
const struct intel_device_info *match_info =
(struct intel_device_info *)ent->driver_data;
@@ -1888,25 +1488,44 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!i915_modparams.nuclear_pageflip && match_info->gen < 5)
dev_priv->drm.driver_features &= ~DRIVER_ATOMIC;
+ /*
+ * Check if we support fake LMEM -- for now we only unleash this for
+ * the live selftests(test-and-exit).
+ */
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+ if (IS_ENABLED(CONFIG_DRM_I915_UNSTABLE_FAKE_LMEM)) {
+ if (INTEL_GEN(dev_priv) >= 9 && i915_selftest.live < 0 &&
+ i915_modparams.fake_lmem_start) {
+ mkwrite_device_info(dev_priv)->memory_regions =
+ REGION_SMEM | REGION_LMEM | REGION_STOLEN;
+ mkwrite_device_info(dev_priv)->is_dgfx = true;
+ GEM_BUG_ON(!HAS_LMEM(dev_priv));
+ GEM_BUG_ON(!IS_DGFX(dev_priv));
+ }
+ }
+#endif
+
ret = pci_enable_device(pdev);
if (ret)
goto out_fini;
- ret = i915_driver_init_early(dev_priv);
+ ret = i915_driver_early_probe(dev_priv);
if (ret < 0)
goto out_pci_disable;
disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
- ret = i915_driver_init_mmio(dev_priv);
+ i915_detect_vgpu(dev_priv);
+
+ ret = i915_driver_mmio_probe(dev_priv);
if (ret < 0)
goto out_runtime_pm_put;
- ret = i915_driver_init_hw(dev_priv);
+ ret = i915_driver_hw_probe(dev_priv);
if (ret < 0)
goto out_cleanup_mmio;
- ret = i915_load_modeset_init(&dev_priv->drm);
+ ret = i915_driver_modeset_probe(dev_priv);
if (ret < 0)
goto out_cleanup_hw;
@@ -1919,66 +1538,54 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
out_cleanup_hw:
- i915_driver_cleanup_hw(dev_priv);
- i915_ggtt_cleanup_hw(dev_priv);
+ i915_driver_hw_remove(dev_priv);
+ intel_memory_regions_driver_release(dev_priv);
+ i915_ggtt_driver_release(dev_priv);
out_cleanup_mmio:
- i915_driver_cleanup_mmio(dev_priv);
+ i915_driver_mmio_release(dev_priv);
out_runtime_pm_put:
enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
- i915_driver_cleanup_early(dev_priv);
+ i915_driver_late_release(dev_priv);
out_pci_disable:
pci_disable_device(pdev);
out_fini:
- i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret);
+ i915_probe_error(dev_priv, "Device initialization failed (%d)\n", ret);
i915_driver_destroy(dev_priv);
return ret;
}
-void i915_driver_unload(struct drm_device *dev)
+void i915_driver_remove(struct drm_i915_private *i915)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ disable_rpm_wakeref_asserts(&i915->runtime_pm);
- disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
-
- i915_driver_unregister(dev_priv);
+ i915_driver_unregister(i915);
/*
* After unregistering the device to prevent any new users, cancel
* all in-flight requests so that we can quickly unbind the active
* resources.
*/
- i915_gem_set_wedged(dev_priv);
+ intel_gt_set_wedged(&i915->gt);
/* Flush any external code that still may be under the RCU lock */
synchronize_rcu();
- i915_gem_suspend(dev_priv);
-
- drm_atomic_helper_shutdown(dev);
-
- intel_gvt_cleanup(dev_priv);
-
- intel_modeset_cleanup(dev);
-
- intel_bios_cleanup(dev_priv);
+ i915_gem_suspend(i915);
- vga_switcheroo_unregister_client(pdev);
- vga_client_register(pdev, NULL, NULL, NULL);
+ drm_atomic_helper_shutdown(&i915->drm);
- intel_csr_ucode_fini(dev_priv);
+ intel_gvt_driver_remove(i915);
- /* Free error state after interrupts are fully disabled. */
- cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
- i915_reset_error_state(dev_priv);
+ i915_driver_modeset_remove(i915);
- i915_gem_fini_hw(dev_priv);
+ i915_reset_error_state(i915);
+ i915_gem_driver_remove(i915);
- intel_power_domains_fini_hw(dev_priv);
+ intel_power_domains_driver_remove(i915);
- i915_driver_cleanup_hw(dev_priv);
+ i915_driver_hw_remove(i915);
- enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
+ enable_rpm_wakeref_asserts(&i915->runtime_pm);
}
static void i915_driver_release(struct drm_device *dev)
@@ -1988,15 +1595,17 @@ static void i915_driver_release(struct drm_device *dev)
disable_rpm_wakeref_asserts(rpm);
- i915_gem_fini(dev_priv);
+ i915_gem_driver_release(dev_priv);
+
+ intel_memory_regions_driver_release(dev_priv);
+ i915_ggtt_driver_release(dev_priv);
- i915_ggtt_cleanup_hw(dev_priv);
- i915_driver_cleanup_mmio(dev_priv);
+ i915_driver_mmio_release(dev_priv);
enable_rpm_wakeref_asserts(rpm);
- intel_runtime_pm_cleanup(rpm);
+ intel_runtime_pm_driver_release(rpm);
- i915_driver_cleanup_early(dev_priv);
+ i915_driver_late_release(dev_priv);
i915_driver_destroy(dev_priv);
}
@@ -2034,12 +1643,13 @@ static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
- mutex_lock(&dev->struct_mutex);
i915_gem_context_close(file);
i915_gem_release(dev, file);
- mutex_unlock(&dev->struct_mutex);
- kfree(file_priv);
+ kfree_rcu(file_priv, rcu);
+
+ /* Catch up with all the deferred frees from "this" client */
+ i915_gem_flush_free_objects(to_i915(dev));
}
static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
@@ -2144,7 +1754,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = dev_priv->drm.pdev;
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
- int ret;
+ int ret = 0;
disable_rpm_wakeref_asserts(rpm);
@@ -2155,12 +1765,9 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
intel_power_domains_suspend(dev_priv,
get_suspend_mode(dev_priv, hibernation));
- ret = 0;
- if (INTEL_GEN(dev_priv) >= 11 || IS_GEN9_LP(dev_priv))
- bxt_enable_dc9(dev_priv);
- else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- hsw_enable_pc8(dev_priv);
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ intel_display_power_suspend_late(dev_priv);
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
ret = vlv_suspend_complete(dev_priv);
if (ret) {
@@ -2188,34 +1795,28 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
out:
enable_rpm_wakeref_asserts(rpm);
- if (!dev_priv->uncore.user_forcewake.count)
- intel_runtime_pm_cleanup(rpm);
+ if (!dev_priv->uncore.user_forcewake_count)
+ intel_runtime_pm_driver_release(rpm);
return ret;
}
-static int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
+int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state)
{
int error;
- if (!dev) {
- DRM_ERROR("dev: %p\n", dev);
- DRM_ERROR("DRM not initialized, aborting suspend.\n");
- return -ENODEV;
- }
-
if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND &&
state.event != PM_EVENT_FREEZE))
return -EINVAL;
- if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- error = i915_drm_suspend(dev);
+ error = i915_drm_suspend(&i915->drm);
if (error)
return error;
- return i915_drm_suspend_late(dev, false);
+ return i915_drm_suspend_late(&i915->drm, false);
}
static int i915_drm_resume(struct drm_device *dev)
@@ -2224,14 +1825,16 @@ static int i915_drm_resume(struct drm_device *dev)
int ret;
disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
- intel_sanitize_gt_powersave(dev_priv);
- i915_gem_sanitize(dev_priv);
+ sanitize_gpu(dev_priv);
ret = i915_ggtt_enable_hw(dev_priv);
if (ret)
DRM_ERROR("failed to re-enable GGTT\n");
+ i915_gem_restore_gtt_mappings(dev_priv);
+ i915_gem_restore_fences(&dev_priv->ggtt);
+
intel_csr_ucode_resume(dev_priv);
i915_restore_state(dev_priv);
@@ -2255,7 +1858,7 @@ static int i915_drm_resume(struct drm_device *dev)
i915_gem_resume(dev_priv);
- intel_modeset_init_hw(dev);
+ intel_modeset_init_hw(dev_priv);
intel_init_clock_gating(dev_priv);
spin_lock_irq(&dev_priv->irq_lock);
@@ -2348,75 +1951,64 @@ static int i915_drm_resume_early(struct drm_device *dev)
intel_uncore_resume_early(&dev_priv->uncore);
- i915_check_and_clear_faults(dev_priv);
-
- if (INTEL_GEN(dev_priv) >= 11 || IS_GEN9_LP(dev_priv)) {
- gen9_sanitize_dc_state(dev_priv);
- bxt_disable_dc9(dev_priv);
- } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- hsw_disable_pc8(dev_priv);
- }
+ intel_gt_check_and_clear_faults(&dev_priv->gt);
- intel_uncore_sanitize(dev_priv);
+ intel_display_power_resume_early(dev_priv);
intel_power_domains_resume(dev_priv);
- intel_gt_sanitize(dev_priv, true);
-
enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
return ret;
}
-static int i915_resume_switcheroo(struct drm_device *dev)
+int i915_resume_switcheroo(struct drm_i915_private *i915)
{
int ret;
- if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- ret = i915_drm_resume_early(dev);
+ ret = i915_drm_resume_early(&i915->drm);
if (ret)
return ret;
- return i915_drm_resume(dev);
+ return i915_drm_resume(&i915->drm);
}
static int i915_pm_prepare(struct device *kdev)
{
- struct pci_dev *pdev = to_pci_dev(kdev);
- struct drm_device *dev = pci_get_drvdata(pdev);
+ struct drm_i915_private *i915 = kdev_to_i915(kdev);
- if (!dev) {
+ if (!i915) {
dev_err(kdev, "DRM not initialized, aborting suspend.\n");
return -ENODEV;
}
- if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- return i915_drm_prepare(dev);
+ return i915_drm_prepare(&i915->drm);
}
static int i915_pm_suspend(struct device *kdev)
{
- struct pci_dev *pdev = to_pci_dev(kdev);
- struct drm_device *dev = pci_get_drvdata(pdev);
+ struct drm_i915_private *i915 = kdev_to_i915(kdev);
- if (!dev) {
+ if (!i915) {
dev_err(kdev, "DRM not initialized, aborting suspend.\n");
return -ENODEV;
}
- if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- return i915_drm_suspend(dev);
+ return i915_drm_suspend(&i915->drm);
}
static int i915_pm_suspend_late(struct device *kdev)
{
- struct drm_device *dev = &kdev_to_i915(kdev)->drm;
+ struct drm_i915_private *i915 = kdev_to_i915(kdev);
/*
* We have a suspend ordering issue with the snd-hda driver also
@@ -2427,55 +2019,55 @@ static int i915_pm_suspend_late(struct device *kdev)
* FIXME: This should be solved with a special hdmi sink device or
* similar so that power domains can be employed.
*/
- if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- return i915_drm_suspend_late(dev, false);
+ return i915_drm_suspend_late(&i915->drm, false);
}
static int i915_pm_poweroff_late(struct device *kdev)
{
- struct drm_device *dev = &kdev_to_i915(kdev)->drm;
+ struct drm_i915_private *i915 = kdev_to_i915(kdev);
- if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- return i915_drm_suspend_late(dev, true);
+ return i915_drm_suspend_late(&i915->drm, true);
}
static int i915_pm_resume_early(struct device *kdev)
{
- struct drm_device *dev = &kdev_to_i915(kdev)->drm;
+ struct drm_i915_private *i915 = kdev_to_i915(kdev);
- if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- return i915_drm_resume_early(dev);
+ return i915_drm_resume_early(&i915->drm);
}
static int i915_pm_resume(struct device *kdev)
{
- struct drm_device *dev = &kdev_to_i915(kdev)->drm;
+ struct drm_i915_private *i915 = kdev_to_i915(kdev);
- if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- return i915_drm_resume(dev);
+ return i915_drm_resume(&i915->drm);
}
/* freeze: before creating the hibernation_image */
static int i915_pm_freeze(struct device *kdev)
{
- struct drm_device *dev = &kdev_to_i915(kdev)->drm;
+ struct drm_i915_private *i915 = kdev_to_i915(kdev);
int ret;
- if (dev->switch_power_state != DRM_SWITCH_POWER_OFF) {
- ret = i915_drm_suspend(dev);
+ if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) {
+ ret = i915_drm_suspend(&i915->drm);
if (ret)
return ret;
}
- ret = i915_gem_freeze(kdev_to_i915(kdev));
+ ret = i915_gem_freeze(i915);
if (ret)
return ret;
@@ -2484,16 +2076,16 @@ static int i915_pm_freeze(struct device *kdev)
static int i915_pm_freeze_late(struct device *kdev)
{
- struct drm_device *dev = &kdev_to_i915(kdev)->drm;
+ struct drm_i915_private *i915 = kdev_to_i915(kdev);
int ret;
- if (dev->switch_power_state != DRM_SWITCH_POWER_OFF) {
- ret = i915_drm_suspend_late(dev, true);
+ if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) {
+ ret = i915_drm_suspend_late(&i915->drm, true);
if (ret)
return ret;
}
- ret = i915_gem_freeze_late(kdev_to_i915(kdev));
+ ret = i915_gem_freeze_late(i915);
if (ret)
return ret;
@@ -2550,9 +2142,12 @@ static int i915_pm_restore(struct device *kdev)
*/
static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
{
- struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
+ struct vlv_s0ix_state *s = dev_priv->vlv_s0ix_state;
int i;
+ if (!s)
+ return;
+
/* GAM 0x4000-0x4770 */
s->wr_watermark = I915_READ(GEN7_WR_WATERMARK);
s->gfx_prio_ctrl = I915_READ(GEN7_GFX_PRIO_CTRL);
@@ -2631,10 +2226,13 @@ static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
{
- struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
+ struct vlv_s0ix_state *s = dev_priv->vlv_s0ix_state;
u32 val;
int i;
+ if (!s)
+ return;
+
/* GAM 0x4000-0x4770 */
I915_WRITE(GEN7_WR_WATERMARK, s->wr_watermark);
I915_WRITE(GEN7_GFX_PRIO_CTRL, s->gfx_prio_ctrl);
@@ -2843,8 +2441,7 @@ static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
if (err)
goto err2;
- if (!IS_CHERRYVIEW(dev_priv))
- vlv_save_gunit_s0ix_state(dev_priv);
+ vlv_save_gunit_s0ix_state(dev_priv);
err = vlv_force_gfx_clock(dev_priv, false);
if (err)
@@ -2874,8 +2471,7 @@ static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
*/
ret = vlv_force_gfx_clock(dev_priv, true);
- if (!IS_CHERRYVIEW(dev_priv))
- vlv_restore_gunit_s0ix_state(dev_priv);
+ vlv_restore_gunit_s0ix_state(dev_priv);
err = vlv_allow_gt_wake(dev_priv, true);
if (!ret)
@@ -2895,14 +2491,9 @@ static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
static int intel_runtime_suspend(struct device *kdev)
{
- struct pci_dev *pdev = to_pci_dev(kdev);
- struct drm_device *dev = pci_get_drvdata(pdev);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
- int ret;
-
- if (WARN_ON_ONCE(!(dev_priv->gt_pm.rc6.enabled && HAS_RC6(dev_priv))))
- return -ENODEV;
+ int ret = 0;
if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
return -ENODEV;
@@ -2917,24 +2508,16 @@ static int intel_runtime_suspend(struct device *kdev)
*/
i915_gem_runtime_suspend(dev_priv);
- intel_uc_runtime_suspend(dev_priv);
+ intel_gt_runtime_suspend(&dev_priv->gt);
intel_runtime_pm_disable_interrupts(dev_priv);
intel_uncore_suspend(&dev_priv->uncore);
- ret = 0;
- if (INTEL_GEN(dev_priv) >= 11) {
- icl_display_core_uninit(dev_priv);
- bxt_enable_dc9(dev_priv);
- } else if (IS_GEN9_LP(dev_priv)) {
- bxt_display_core_uninit(dev_priv);
- bxt_enable_dc9(dev_priv);
- } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- hsw_enable_pc8(dev_priv);
- } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ intel_display_power_suspend(dev_priv);
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
ret = vlv_suspend_complete(dev_priv);
- }
if (ret) {
DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
@@ -2942,10 +2525,9 @@ static int intel_runtime_suspend(struct device *kdev)
intel_runtime_pm_enable_interrupts(dev_priv);
- intel_uc_resume(dev_priv);
+ intel_gt_runtime_resume(&dev_priv->gt);
- i915_gem_init_swizzling(dev_priv);
- i915_gem_restore_fences(dev_priv);
+ i915_gem_restore_fences(&dev_priv->ggtt);
enable_rpm_wakeref_asserts(rpm);
@@ -2953,7 +2535,7 @@ static int intel_runtime_suspend(struct device *kdev)
}
enable_rpm_wakeref_asserts(rpm);
- intel_runtime_pm_cleanup(rpm);
+ intel_runtime_pm_driver_release(rpm);
if (intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore))
DRM_ERROR("Unclaimed access detected prior to suspending\n");
@@ -2994,9 +2576,7 @@ static int intel_runtime_suspend(struct device *kdev)
static int intel_runtime_resume(struct device *kdev)
{
- struct pci_dev *pdev = to_pci_dev(kdev);
- struct drm_device *dev = pci_get_drvdata(pdev);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
int ret = 0;
@@ -3013,41 +2593,21 @@ static int intel_runtime_resume(struct device *kdev)
if (intel_uncore_unclaimed_mmio(&dev_priv->uncore))
DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
- if (INTEL_GEN(dev_priv) >= 11) {
- bxt_disable_dc9(dev_priv);
- icl_display_core_init(dev_priv, true);
- if (dev_priv->csr.dmc_payload) {
- if (dev_priv->csr.allowed_dc_mask &
- DC_STATE_EN_UPTO_DC6)
- skl_enable_dc6(dev_priv);
- else if (dev_priv->csr.allowed_dc_mask &
- DC_STATE_EN_UPTO_DC5)
- gen9_enable_dc5(dev_priv);
- }
- } else if (IS_GEN9_LP(dev_priv)) {
- bxt_disable_dc9(dev_priv);
- bxt_display_core_init(dev_priv, true);
- if (dev_priv->csr.dmc_payload &&
- (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
- gen9_enable_dc5(dev_priv);
- } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- hsw_disable_pc8(dev_priv);
- } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ intel_display_power_resume(dev_priv);
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
ret = vlv_resume_prepare(dev_priv, true);
- }
intel_uncore_runtime_resume(&dev_priv->uncore);
intel_runtime_pm_enable_interrupts(dev_priv);
- intel_uc_resume(dev_priv);
-
/*
* No point of rolling back things in case of an error, as the best
* we can do is to hope that things will still work (and disable RPM).
*/
- i915_gem_init_swizzling(dev_priv);
- i915_gem_restore_fences(dev_priv);
+ intel_gt_runtime_resume(&dev_priv->gt);
+ i915_gem_restore_fences(&dev_priv->ggtt);
/*
* On VLV/CHV display interrupts are part of the display
@@ -3109,18 +2669,12 @@ const struct dev_pm_ops i915_pm_ops = {
.runtime_resume = intel_runtime_resume,
};
-static const struct vm_operations_struct i915_gem_vm_ops = {
- .fault = i915_gem_fault,
- .open = drm_gem_vm_open,
- .close = drm_gem_vm_close,
-};
-
static const struct file_operations i915_driver_fops = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
- .mmap = drm_gem_mmap,
+ .mmap = i915_gem_mmap,
.poll = drm_poll,
.read = drm_read,
.compat_ioctl = i915_compat_ioctl,
@@ -3167,7 +2721,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_OFFSET, i915_gem_mmap_offset_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW),
@@ -3188,9 +2742,9 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_VM_CREATE, i915_gem_vm_create_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_VM_DESTROY, i915_gem_vm_destroy_ioctl, DRM_RENDER_ALLOW),
};
@@ -3200,7 +2754,7 @@ static struct drm_driver driver = {
* deal with them for Intel hardware.
*/
.driver_features =
- DRIVER_GEM | DRIVER_PRIME |
+ DRIVER_GEM |
DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_SYNCOBJ,
.release = i915_driver_release,
.open = i915_driver_open,
@@ -3209,15 +2763,18 @@ static struct drm_driver driver = {
.gem_close_object = i915_gem_close_object,
.gem_free_object_unlocked = i915_gem_free_object,
- .gem_vm_ops = &i915_gem_vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = i915_gem_prime_export,
.gem_prime_import = i915_gem_prime_import,
+ .get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos,
+ .get_scanout_position = i915_get_crtc_scanoutpos,
+
.dumb_create = i915_gem_dumb_create,
- .dumb_map_offset = i915_gem_mmap_gtt,
+ .dumb_map_offset = i915_gem_dumb_mmap_offset,
+
.ioctls = i915_ioctls,
.num_ioctls = ARRAY_SIZE(i915_ioctls),
.fops = &i915_driver_fops,
@@ -3228,7 +2785,3 @@ static struct drm_driver driver = {
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
};
-
-#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
-#include "selftests/mock_drm.c"
-#endif
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index fe7a6ec2c199..077af22b8340 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -43,9 +43,10 @@
#include <linux/mm_types.h>
#include <linux/perf_event.h>
#include <linux/pm_qos.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
#include <linux/shmem_fs.h>
#include <linux/stackdepot.h>
+#include <linux/xarray.h>
#include <drm/intel-gtt.h>
#include <drm/drm_legacy.h> /* for struct drm_dma_handle */
@@ -67,29 +68,41 @@
#include "display/intel_display.h"
#include "display/intel_display_power.h"
#include "display/intel_dpll_mgr.h"
+#include "display/intel_dsb.h"
#include "display/intel_frontbuffer.h"
+#include "display/intel_gmbus.h"
#include "display/intel_opregion.h"
+#include "gem/i915_gem_context_types.h"
+#include "gem/i915_gem_shrinker.h"
+#include "gem/i915_gem_stolen.h"
+
#include "gt/intel_lrc.h"
#include "gt/intel_engine.h"
+#include "gt/intel_gt_types.h"
#include "gt/intel_workarounds.h"
+#include "gt/uc/intel_uc.h"
#include "intel_device_info.h"
+#include "intel_pch.h"
#include "intel_runtime_pm.h"
-#include "intel_uc.h"
+#include "intel_memory_region.h"
#include "intel_uncore.h"
#include "intel_wakeref.h"
#include "intel_wopcm.h"
#include "i915_gem.h"
-#include "gem/i915_gem_context_types.h"
#include "i915_gem_fence_reg.h"
#include "i915_gem_gtt.h"
#include "i915_gpu_error.h"
+#include "i915_perf_types.h"
#include "i915_request.h"
#include "i915_scheduler.h"
-#include "i915_timeline.h"
+#include "gt/intel_timeline.h"
#include "i915_vma.h"
+#include "i915_irq.h"
+
+#include "intel_region_lmem.h"
#include "intel_gvt.h"
@@ -98,45 +111,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20190619"
-#define DRIVER_TIMESTAMP 1560947544
-
-/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
- * WARN_ON()) for hw state sanity checks to check for unexpected conditions
- * which may not necessarily be a user visible problem. This will either
- * WARN() or DRM_ERROR() depending on the verbose_checks moduleparam, to
- * enable distros and users to tailor their preferred amount of i915 abrt
- * spam.
- */
-#define I915_STATE_WARN(condition, format...) ({ \
- int __ret_warn_on = !!(condition); \
- if (unlikely(__ret_warn_on)) \
- if (!WARN(i915_modparams.verbose_state_checks, format)) \
- DRM_ERROR(format); \
- unlikely(__ret_warn_on); \
-})
-
-#define I915_STATE_WARN_ON(x) \
- I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
-
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
-
-bool __i915_inject_load_failure(const char *func, int line);
-#define i915_inject_load_failure() \
- __i915_inject_load_failure(__func__, __LINE__)
-
-bool i915_error_injected(void);
-
-#else
-
-#define i915_inject_load_failure() false
-#define i915_error_injected() false
-
-#endif
-
-#define i915_load_error(i915, fmt, ...) \
- __i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \
- fmt, ##__VA_ARGS__)
+#define DRIVER_DATE "20200114"
+#define DRIVER_TIMESTAMP 1579001978
struct drm_i915_gem_object;
@@ -152,6 +128,10 @@ enum hpd_pin {
HPD_PORT_D,
HPD_PORT_E,
HPD_PORT_F,
+ HPD_PORT_G,
+ HPD_PORT_H,
+ HPD_PORT_I,
+
HPD_NUM_PINS
};
@@ -162,7 +142,7 @@ enum hpd_pin {
#define HPD_STORM_DEFAULT_THRESHOLD 50
struct i915_hotplug {
- struct work_struct hotplug_work;
+ struct delayed_work hotplug_work;
struct {
unsigned long last_jiffies;
@@ -174,6 +154,7 @@ struct i915_hotplug {
} state;
} stats[HPD_NUM_PINS];
u32 event_bits;
+ u32 retry_bits;
struct delayed_work reenable_work;
u32 long_port_mask;
@@ -210,15 +191,18 @@ struct i915_mmu_object;
struct drm_i915_file_private {
struct drm_i915_private *dev_priv;
- struct drm_file *file;
+
+ union {
+ struct drm_file *file;
+ struct rcu_head rcu;
+ };
struct {
spinlock_t lock;
struct list_head request_list;
} mm;
- struct idr context_idr;
- struct mutex context_idr_lock; /* guards context_idr */
+ struct xarray context_xa;
struct idr vm_idr;
struct mutex vm_idr_lock; /* guards vm_idr */
@@ -286,17 +270,18 @@ struct drm_i915_display_funcs {
enum pipe pipe);
int (*get_fifo_size)(struct drm_i915_private *dev_priv,
enum i9xx_plane_id i9xx_plane);
- int (*compute_pipe_wm)(struct intel_crtc_state *cstate);
- int (*compute_intermediate_wm)(struct intel_crtc_state *newstate);
+ int (*compute_pipe_wm)(struct intel_crtc_state *crtc_state);
+ int (*compute_intermediate_wm)(struct intel_crtc_state *crtc_state);
void (*initial_watermarks)(struct intel_atomic_state *state,
- struct intel_crtc_state *cstate);
+ struct intel_crtc *crtc);
void (*atomic_update_watermarks)(struct intel_atomic_state *state,
- struct intel_crtc_state *cstate);
+ struct intel_crtc *crtc);
void (*optimize_watermarks)(struct intel_atomic_state *state,
- struct intel_crtc_state *cstate);
+ struct intel_crtc *crtc);
int (*compute_global_watermarks)(struct intel_atomic_state *state);
void (*update_wm)(struct intel_crtc *crtc);
int (*modeset_calc_cdclk)(struct intel_atomic_state *state);
+ u8 (*calc_voltage_level)(int cdclk);
/* Returns the active state of the crtc, and if the crtc is active,
* fills out the pipe-config with the hw state. */
bool (*get_pipe_config)(struct intel_crtc *,
@@ -305,11 +290,12 @@ struct drm_i915_display_funcs {
struct intel_initial_plane_config *);
int (*crtc_compute_clock)(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state);
- void (*crtc_enable)(struct intel_crtc_state *pipe_config,
- struct drm_atomic_state *old_state);
- void (*crtc_disable)(struct intel_crtc_state *old_crtc_state,
- struct drm_atomic_state *old_state);
- void (*update_crtcs)(struct drm_atomic_state *state);
+ void (*crtc_enable)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+ void (*crtc_disable)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+ void (*commit_modeset_enables)(struct intel_atomic_state *state);
+ void (*commit_modeset_disables)(struct intel_atomic_state *state);
void (*audio_codec_enable)(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
@@ -356,6 +342,7 @@ struct intel_csr {
i915_reg_t mmioaddr[20];
u32 mmiodata[20];
u32 dc_state;
+ u32 target_dc_state;
u32 allowed_dc_mask;
intel_wakeref_t wakeref;
};
@@ -379,7 +366,6 @@ struct intel_fbc {
unsigned threshold;
unsigned int possible_framebuffer_bits;
unsigned int busy_bits;
- unsigned int visible_pipes_mask;
struct intel_crtc *crtc;
struct drm_mm_node compressed_fb;
@@ -387,8 +373,8 @@ struct intel_fbc {
bool false_color;
- bool enabled;
bool active;
+ bool activated;
bool flip_pending;
bool underrun_detected;
@@ -400,9 +386,6 @@ struct intel_fbc {
* these problems.
*/
struct intel_fbc_state_cache {
- struct i915_vma *vma;
- unsigned long flags;
-
struct {
unsigned int mode_flags;
u32 hsw_bdw_pixel_rate;
@@ -431,6 +414,8 @@ struct intel_fbc {
const struct drm_format_info *format;
unsigned int stride;
} fb;
+ u16 gen9_wa_cfb_stride;
+ s8 fence_id;
} state_cache;
/*
@@ -441,9 +426,6 @@ struct intel_fbc {
* are supposed to read from it in order to program the registers.
*/
struct intel_fbc_reg_params {
- struct i915_vma *vma;
- unsigned long flags;
-
struct {
enum pipe pipe;
enum i9xx_plane_id i9xx_plane;
@@ -456,7 +438,9 @@ struct intel_fbc {
} fb;
int cfb_size;
- unsigned int gen9_wa_cfb_stride;
+ u16 gen9_wa_cfb_stride;
+ s8 fence_id;
+ bool plane_visible;
} params;
const char *no_fbc_reason;
@@ -504,6 +488,7 @@ struct i915_psr {
bool enabled;
struct intel_dp *dp;
enum pipe pipe;
+ enum transcoder transcoder;
bool active;
struct work_struct work;
unsigned busy_frontbuffer_bits;
@@ -517,24 +502,10 @@ struct i915_psr {
bool sink_not_reliable;
bool irq_aux_error;
u16 su_x_granularity;
-};
-
-/*
- * Sorted by south display engine compatibility.
- * If the new PCH comes with a south display engine that is not
- * inherited from the latest item, please do not add it to the
- * end. Instead, add it right after its "parent" PCH.
- */
-enum intel_pch {
- PCH_NOP = -1, /* PCH without south display */
- PCH_NONE = 0, /* No PCH present */
- PCH_IBX, /* Ibexpeak PCH */
- PCH_CPT, /* Cougarpoint/Pantherpoint PCH */
- PCH_LPT, /* Lynxpoint/Wildcatpoint PCH */
- PCH_SPT, /* Sunrisepoint/Kaby Lake PCH */
- PCH_CNP, /* Cannon/Comet Lake PCH */
- PCH_ICP, /* Ice Lake PCH */
- PCH_MCC, /* Mule Creek Canyon PCH */
+ bool dc3co_enabled;
+ u32 dc3co_exit_delay;
+ struct delayed_work idle_work;
+ bool initially_probed;
};
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
@@ -570,167 +541,7 @@ struct i915_suspend_saved_registers {
u16 saveGCDGMBUS;
};
-struct vlv_s0ix_state {
- /* GAM */
- u32 wr_watermark;
- u32 gfx_prio_ctrl;
- u32 arb_mode;
- u32 gfx_pend_tlb0;
- u32 gfx_pend_tlb1;
- u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM];
- u32 media_max_req_count;
- u32 gfx_max_req_count;
- u32 render_hwsp;
- u32 ecochk;
- u32 bsd_hwsp;
- u32 blt_hwsp;
- u32 tlb_rd_addr;
-
- /* MBC */
- u32 g3dctl;
- u32 gsckgctl;
- u32 mbctl;
-
- /* GCP */
- u32 ucgctl1;
- u32 ucgctl3;
- u32 rcgctl1;
- u32 rcgctl2;
- u32 rstctl;
- u32 misccpctl;
-
- /* GPM */
- u32 gfxpause;
- u32 rpdeuhwtc;
- u32 rpdeuc;
- u32 ecobus;
- u32 pwrdwnupctl;
- u32 rp_down_timeout;
- u32 rp_deucsw;
- u32 rcubmabdtmr;
- u32 rcedata;
- u32 spare2gh;
-
- /* Display 1 CZ domain */
- u32 gt_imr;
- u32 gt_ier;
- u32 pm_imr;
- u32 pm_ier;
- u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM];
-
- /* GT SA CZ domain */
- u32 tilectl;
- u32 gt_fifoctl;
- u32 gtlc_wake_ctrl;
- u32 gtlc_survive;
- u32 pmwgicz;
-
- /* Display 2 CZ domain */
- u32 gu_ctl0;
- u32 gu_ctl1;
- u32 pcbr;
- u32 clock_gate_dis2;
-};
-
-struct intel_rps_ei {
- ktime_t ktime;
- u32 render_c0;
- u32 media_c0;
-};
-
-struct intel_rps {
- struct mutex lock; /* protects enabling and the worker */
-
- /*
- * work, interrupts_enabled and pm_iir are protected by
- * dev_priv->irq_lock
- */
- struct work_struct work;
- bool interrupts_enabled;
- u32 pm_iir;
-
- /* PM interrupt bits that should never be masked */
- u32 pm_intrmsk_mbz;
-
- /* Frequencies are stored in potentially platform dependent multiples.
- * In other words, *_freq needs to be multiplied by X to be interesting.
- * Soft limits are those which are used for the dynamic reclocking done
- * by the driver (raise frequencies under heavy loads, and lower for
- * lighter loads). Hard limits are those imposed by the hardware.
- *
- * A distinction is made for overclocking, which is never enabled by
- * default, and is considered to be above the hard limit if it's
- * possible at all.
- */
- u8 cur_freq; /* Current frequency (cached, may not == HW) */
- u8 min_freq_softlimit; /* Minimum frequency permitted by the driver */
- u8 max_freq_softlimit; /* Max frequency permitted by the driver */
- u8 max_freq; /* Maximum frequency, RP0 if not overclocking */
- u8 min_freq; /* AKA RPn. Minimum frequency */
- u8 boost_freq; /* Frequency to request when wait boosting */
- u8 idle_freq; /* Frequency to request when we are idle */
- u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */
- u8 rp1_freq; /* "less than" RP0 power/freqency */
- u8 rp0_freq; /* Non-overclocked max frequency. */
- u16 gpll_ref_freq; /* vlv/chv GPLL reference frequency */
-
- int last_adj;
-
- struct {
- struct mutex mutex;
-
- enum { LOW_POWER, BETWEEN, HIGH_POWER } mode;
- unsigned int interactive;
-
- u8 up_threshold; /* Current %busy required to uplock */
- u8 down_threshold; /* Current %busy required to downclock */
- } power;
-
- bool enabled;
- atomic_t num_waiters;
- atomic_t boosts;
-
- /* manual wa residency calculations */
- struct intel_rps_ei ei;
-};
-
-struct intel_rc6 {
- bool enabled;
- u64 prev_hw_residency[4];
- u64 cur_residency[4];
-};
-
-struct intel_llc_pstate {
- bool enabled;
-};
-
-struct intel_gen6_power_mgmt {
- struct intel_rps rps;
- struct intel_rc6 rc6;
- struct intel_llc_pstate llc_pstate;
-};
-
-/* defined intel_pm.c */
-extern spinlock_t mchdev_lock;
-
-struct intel_ilk_power_mgmt {
- u8 cur_delay;
- u8 min_delay;
- u8 max_delay;
- u8 fmax;
- u8 fstart;
-
- u64 last_count1;
- unsigned long last_time1;
- unsigned long chipset_power;
- u64 last_count2;
- u64 last_time2;
- unsigned long gfx_power;
- u8 corr;
-
- int c_m;
- int r_t;
-};
+struct vlv_s0ix_state;
#define MAX_L3_SLICES 2
struct intel_l3_parity {
@@ -764,7 +575,6 @@ struct i915_gem_mm {
*/
struct llist_head free_list;
struct work_struct free_work;
- spinlock_t free_lock;
/**
* Count of objects pending destructions. Used to skip needlessly
* waiting on an RCU barrier if no objects are waiting to be freed.
@@ -781,8 +591,7 @@ struct i915_gem_mm {
*/
struct vfsmount *gemfs;
- /** PPGTT used for aliasing the PPGTT with the GTT */
- struct i915_ppgtt *aliasing_ppgtt;
+ struct intel_memory_region *regions[INTEL_REGION_UNKNOWN];
struct notifier_block oom_notifier;
struct notifier_block vmap_notifier;
@@ -795,16 +604,6 @@ struct i915_gem_mm {
*/
struct workqueue_struct *userptr_wq;
- u64 unordered_timeline;
-
- /* the indicator for dispatch video commands on two BSD rings */
- atomic_t bsd_engine_dispatch_index;
-
- /** Bit 6 swizzling required for X tiling */
- u32 bit_6_swizzle_x;
- /** Bit 6 swizzling required for Y tiling */
- u32 bit_6_swizzle_y;
-
/* shrinker accounting, also useful for userland debugging */
u64 shrink_memory;
u32 shrink_count;
@@ -820,19 +619,18 @@ struct i915_gem_mm {
#define I915_ENGINE_WEDGED_TIMEOUT (60 * HZ) /* Reset but no recovery? */
+/* Amount of SAGV/QGV points, BSpec precisely defines this */
+#define I915_NUM_QGV_POINTS 8
+
struct ddi_vbt_port_info {
/* Non-NULL if port present. */
const struct child_device_config *child;
int max_tmds_clock;
- /*
- * This is an index in the HDMI/DVI DDI buffer translation table.
- * The special value HDMI_LEVEL_SHIFT_UNKNOWN means the VBT didn't
- * populate this field.
- */
-#define HDMI_LEVEL_SHIFT_UNKNOWN 0xff
+ /* This is an index in the HDMI/DVI DDI buffer translation table. */
u8 hdmi_level_shift;
+ u8 hdmi_level_shift_set:1;
u8 supports_dvi:1;
u8 supports_hdmi:1;
@@ -923,8 +721,7 @@ struct intel_vbt_data {
int crt_ddc_pin;
- int child_dev_num;
- struct child_device_config *child_dev;
+ struct list_head display_devices;
struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS];
struct sdvo_device_mapping sdvo_mappings[2];
@@ -1073,6 +870,7 @@ struct i915_frontbuffer_tracking {
};
struct i915_virtual_gpu {
+ struct mutex lock; /* serialises sending of g2v_notify command pkts */
bool active;
u32 caps;
};
@@ -1084,230 +882,15 @@ struct intel_wm_config {
bool sprites_scaled;
};
-struct i915_oa_format {
- u32 format;
- int size;
-};
-
-struct i915_oa_reg {
- i915_reg_t addr;
- u32 value;
-};
-
-struct i915_oa_config {
- char uuid[UUID_STRING_LEN + 1];
- int id;
-
- const struct i915_oa_reg *mux_regs;
- u32 mux_regs_len;
- const struct i915_oa_reg *b_counter_regs;
- u32 b_counter_regs_len;
- const struct i915_oa_reg *flex_regs;
- u32 flex_regs_len;
-
- struct attribute_group sysfs_metric;
- struct attribute *attrs[2];
- struct device_attribute sysfs_metric_id;
-
- atomic_t ref_count;
-};
-
-struct i915_perf_stream;
-
-/**
- * struct i915_perf_stream_ops - the OPs to support a specific stream type
- */
-struct i915_perf_stream_ops {
- /**
- * @enable: Enables the collection of HW samples, either in response to
- * `I915_PERF_IOCTL_ENABLE` or implicitly called when stream is opened
- * without `I915_PERF_FLAG_DISABLED`.
- */
- void (*enable)(struct i915_perf_stream *stream);
-
- /**
- * @disable: Disables the collection of HW samples, either in response
- * to `I915_PERF_IOCTL_DISABLE` or implicitly called before destroying
- * the stream.
- */
- void (*disable)(struct i915_perf_stream *stream);
-
- /**
- * @poll_wait: Call poll_wait, passing a wait queue that will be woken
- * once there is something ready to read() for the stream
- */
- void (*poll_wait)(struct i915_perf_stream *stream,
- struct file *file,
- poll_table *wait);
-
- /**
- * @wait_unlocked: For handling a blocking read, wait until there is
- * something to ready to read() for the stream. E.g. wait on the same
- * wait queue that would be passed to poll_wait().
- */
- int (*wait_unlocked)(struct i915_perf_stream *stream);
-
- /**
- * @read: Copy buffered metrics as records to userspace
- * **buf**: the userspace, destination buffer
- * **count**: the number of bytes to copy, requested by userspace
- * **offset**: zero at the start of the read, updated as the read
- * proceeds, it represents how many bytes have been copied so far and
- * the buffer offset for copying the next record.
- *
- * Copy as many buffered i915 perf samples and records for this stream
- * to userspace as will fit in the given buffer.
- *
- * Only write complete records; returning -%ENOSPC if there isn't room
- * for a complete record.
- *
- * Return any error condition that results in a short read such as
- * -%ENOSPC or -%EFAULT, even though these may be squashed before
- * returning to userspace.
- */
- int (*read)(struct i915_perf_stream *stream,
- char __user *buf,
- size_t count,
- size_t *offset);
-
- /**
- * @destroy: Cleanup any stream specific resources.
- *
- * The stream will always be disabled before this is called.
- */
- void (*destroy)(struct i915_perf_stream *stream);
-};
-
-/**
- * struct i915_perf_stream - state for a single open stream FD
- */
-struct i915_perf_stream {
- /**
- * @dev_priv: i915 drm device
- */
- struct drm_i915_private *dev_priv;
-
- /**
- * @link: Links the stream into ``&drm_i915_private->streams``
- */
- struct list_head link;
-
- /**
- * @wakeref: As we keep the device awake while the perf stream is
- * active, we track our runtime pm reference for later release.
- */
- intel_wakeref_t wakeref;
-
- /**
- * @sample_flags: Flags representing the `DRM_I915_PERF_PROP_SAMPLE_*`
- * properties given when opening a stream, representing the contents
- * of a single sample as read() by userspace.
- */
- u32 sample_flags;
-
- /**
- * @sample_size: Considering the configured contents of a sample
- * combined with the required header size, this is the total size
- * of a single sample record.
- */
- int sample_size;
-
- /**
- * @ctx: %NULL if measuring system-wide across all contexts or a
- * specific context that is being monitored.
- */
- struct i915_gem_context *ctx;
-
- /**
- * @enabled: Whether the stream is currently enabled, considering
- * whether the stream was opened in a disabled state and based
- * on `I915_PERF_IOCTL_ENABLE` and `I915_PERF_IOCTL_DISABLE` calls.
- */
- bool enabled;
-
- /**
- * @ops: The callbacks providing the implementation of this specific
- * type of configured stream.
- */
- const struct i915_perf_stream_ops *ops;
-
- /**
- * @oa_config: The OA configuration used by the stream.
- */
- struct i915_oa_config *oa_config;
-};
-
-/**
- * struct i915_oa_ops - Gen specific implementation of an OA unit stream
- */
-struct i915_oa_ops {
- /**
- * @is_valid_b_counter_reg: Validates register's address for
- * programming boolean counters for a particular platform.
- */
- bool (*is_valid_b_counter_reg)(struct drm_i915_private *dev_priv,
- u32 addr);
-
- /**
- * @is_valid_mux_reg: Validates register's address for programming mux
- * for a particular platform.
- */
- bool (*is_valid_mux_reg)(struct drm_i915_private *dev_priv, u32 addr);
-
- /**
- * @is_valid_flex_reg: Validates register's address for programming
- * flex EU filtering for a particular platform.
- */
- bool (*is_valid_flex_reg)(struct drm_i915_private *dev_priv, u32 addr);
-
- /**
- * @enable_metric_set: Selects and applies any MUX configuration to set
- * up the Boolean and Custom (B/C) counters that are part of the
- * counter reports being sampled. May apply system constraints such as
- * disabling EU clock gating as required.
- */
- int (*enable_metric_set)(struct i915_perf_stream *stream);
-
- /**
- * @disable_metric_set: Remove system constraints associated with using
- * the OA unit.
- */
- void (*disable_metric_set)(struct drm_i915_private *dev_priv);
-
- /**
- * @oa_enable: Enable periodic sampling
- */
- void (*oa_enable)(struct i915_perf_stream *stream);
-
- /**
- * @oa_disable: Disable periodic sampling
- */
- void (*oa_disable)(struct i915_perf_stream *stream);
-
- /**
- * @read: Copy data from the circular OA buffer into a given userspace
- * buffer.
- */
- int (*read)(struct i915_perf_stream *stream,
- char __user *buf,
- size_t count,
- size_t *offset);
-
- /**
- * @oa_hw_tail_read: read the OA tail pointer register
- *
- * In particular this enables us to share all the fiddly code for
- * handling the OA unit tail pointer race that affects multiple
- * generations.
- */
- u32 (*oa_hw_tail_read)(struct drm_i915_private *dev_priv);
-};
-
struct intel_cdclk_state {
unsigned int cdclk, vco, ref, bypass;
u8 voltage_level;
};
+struct i915_selftest_stash {
+ atomic_t counter;
+};
+
struct drm_i915_private {
struct drm_device drm;
@@ -1340,6 +923,7 @@ struct drm_i915_private {
resource_size_t stolen_usable_size; /* Total size minus reserved ranges */
struct intel_uncore uncore;
+ struct intel_uncore_mmio_debug mmio_debug;
struct i915_virtual_gpu vgpu;
@@ -1347,9 +931,6 @@ struct drm_i915_private {
struct intel_wopcm wopcm;
- struct intel_huc huc;
- struct intel_guc guc;
-
struct intel_csr csr;
struct intel_gmbus gmbus[GMBUS_NUM_PINS];
@@ -1364,23 +945,19 @@ struct drm_i915_private {
*/
u32 gpio_mmio_base;
+ u32 hsw_psr_mmio_adjust;
+
/* MMIO base address for MIPI regs */
u32 mipi_mmio_base;
- u32 psr_mmio_base;
-
u32 pps_mmio_base;
wait_queue_head_t gmbus_wait_queue;
struct pci_dev *bridge_dev;
+
struct intel_engine_cs *engine[I915_NUM_ENGINES];
- /* Context used internally to idle the GPU and setup initial state */
- struct i915_gem_context *kernel_context;
- /* Context only to be used for injecting preemption commands */
- struct i915_gem_context *preempt_context;
- struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1]
- [MAX_ENGINE_INSTANCE + 1];
+ struct rb_root uabi_engines;
struct resource mch_res;
@@ -1401,11 +978,6 @@ struct drm_i915_private {
u32 irq_mask;
u32 de_irq_mask[I915_MAX_PIPES];
};
- u32 gt_irq_mask;
- u32 pm_imr;
- u32 pm_ier;
- u32 pm_rps_events;
- u32 pm_guc_events;
u32 pipestat_irq_mask[I915_MAX_PIPES];
struct i915_hotplug hotplug;
@@ -1422,9 +994,6 @@ struct drm_i915_private {
/* backlight registers and fields in struct intel_panel */
struct mutex backlight_lock;
- /* LVDS info */
- bool no_aux_handshake;
-
/* protects panel power sequencer state */
struct mutex pps_mutex;
@@ -1438,13 +1007,14 @@ struct drm_i915_private {
unsigned int fdi_pll_freq;
unsigned int czclk_freq;
+ /*
+ * For reading holding any crtc lock is sufficient,
+ * for writing must hold all of them.
+ */
struct {
/*
* The current logical cdclk state.
* See intel_atomic_state.cdclk.logical
- *
- * For reading holding any crtc lock is sufficient,
- * for writing must hold all of them.
*/
struct intel_cdclk_state logical;
/*
@@ -1455,6 +1025,9 @@ struct drm_i915_private {
/* The current hardware cdclk state */
struct intel_cdclk_state hw;
+ /* cdclk, divider, and ratio table from bspec */
+ const struct intel_cdclk_vals *table;
+
int force_min_cdclk;
} cdclk;
@@ -1469,6 +1042,8 @@ struct drm_i915_private {
/* ordered wq for modesets */
struct workqueue_struct *modeset_wq;
+ /* unbound hipri wq for page flips/plane updates */
+ struct workqueue_struct *flip_wq;
/* Display functions */
struct drm_i915_display_funcs display;
@@ -1488,8 +1063,6 @@ struct drm_i915_private {
DECLARE_HASHTABLE(mm_structs, 7);
struct mutex mm_lock;
- struct intel_ppat ppat;
-
/* Kernel Modesetting */
struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
@@ -1511,7 +1084,11 @@ struct drm_i915_private {
*/
struct mutex dpll_lock;
- unsigned int active_crtcs;
+ /*
+ * For reading active_pipes, min_cdclk, min_voltage_level holding
+ * any crtc lock is sufficient, for writing must hold all of them.
+ */
+ u8 active_pipes;
/* minimum acceptable cdclk for each pipe */
int min_cdclk[I915_MAX_PIPES];
/* minimum acceptable voltage level for each pipe */
@@ -1540,13 +1117,6 @@ struct drm_i915_private {
*/
u32 edram_size_mb;
- /* gen6+ GT PM state */
- struct intel_gen6_power_mgmt gt_pm;
-
- /* ilk-only ips/rps state. Everything in here is protected by the global
- * mchdev_lock in intel_pm.c */
- struct intel_ilk_power_mgmt ips;
-
struct i915_power_domains power_domains;
struct i915_psr psr;
@@ -1571,23 +1141,7 @@ struct drm_i915_private {
*/
struct mutex av_mutex;
int audio_power_refcount;
-
- struct {
- struct mutex mutex;
- struct list_head list;
- struct llist_head free_list;
- struct work_struct free_work;
-
- /* The hw wants to have a stable context identifier for the
- * lifetime of the context (for OA, PASID, faults, etc).
- * This is limited in execlists to 21 bits.
- */
- struct ida hw_ida;
-#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
-#define MAX_GUC_CONTEXT_HW_ID (1 << 20) /* exclusive */
-#define GEN11_MAX_CONTEXT_HW_ID (1<<11) /* exclusive */
- struct list_head hw_id_list;
- } contexts;
+ u32 audio_freq_cntrl;
u32 fdi_rx_config;
@@ -1604,7 +1158,7 @@ struct drm_i915_private {
u32 suspend_count;
bool power_domains_suspended;
struct i915_suspend_saved_registers regfile;
- struct vlv_s0ix_state vlv_s0ix_state;
+ struct vlv_s0ix_state *vlv_s0ix_state;
enum {
I915_SAGV_UNKNOWN = 0,
@@ -1613,6 +1167,8 @@ struct drm_i915_private {
I915_SAGV_NOT_CONTROLLED
} sagv_status;
+ u32 sagv_block_time_us;
+
struct {
/*
* Raw watermark latency values:
@@ -1645,7 +1201,7 @@ struct drm_i915_private {
/*
* Should be held around atomic WM register writing; also
* protects * intel_crtc->wm.active and
- * cstate->wm.need_postvbl_update.
+ * crtc_state->wm.need_postvbl_update.
*/
struct mutex wm_mutex;
@@ -1674,7 +1230,8 @@ struct drm_i915_private {
} dram_info;
struct intel_bw_info {
- unsigned int deratedbw[3]; /* for each QGV point */
+ /* for each QGV point */
+ unsigned int deratedbw[I915_NUM_QGV_POINTS];
u8 num_qgv_points;
u8 num_planes;
} max_bw[6];
@@ -1683,210 +1240,35 @@ struct drm_i915_private {
struct intel_runtime_pm runtime_pm;
- struct {
- bool initialized;
-
- struct kobject *metrics_kobj;
- struct ctl_table_header *sysctl_header;
-
- /*
- * Lock associated with adding/modifying/removing OA configs
- * in dev_priv->perf.metrics_idr.
- */
- struct mutex metrics_lock;
-
- /*
- * List of dynamic configurations, you need to hold
- * dev_priv->perf.metrics_lock to access it.
- */
- struct idr metrics_idr;
-
- /*
- * Lock associated with anything below within this structure
- * except exclusive_stream.
- */
- struct mutex lock;
- struct list_head streams;
-
- struct {
- /*
- * The stream currently using the OA unit. If accessed
- * outside a syscall associated to its file
- * descriptor, you need to hold
- * dev_priv->drm.struct_mutex.
- */
- struct i915_perf_stream *exclusive_stream;
-
- struct intel_context *pinned_ctx;
- u32 specific_ctx_id;
- u32 specific_ctx_id_mask;
-
- struct hrtimer poll_check_timer;
- wait_queue_head_t poll_wq;
- bool pollin;
-
- /**
- * For rate limiting any notifications of spurious
- * invalid OA reports
- */
- struct ratelimit_state spurious_report_rs;
-
- bool periodic;
- int period_exponent;
-
- struct i915_oa_config test_config;
-
- struct {
- struct i915_vma *vma;
- u8 *vaddr;
- u32 last_ctx_id;
- int format;
- int format_size;
-
- /**
- * Locks reads and writes to all head/tail state
- *
- * Consider: the head and tail pointer state
- * needs to be read consistently from a hrtimer
- * callback (atomic context) and read() fop
- * (user context) with tail pointer updates
- * happening in atomic context and head updates
- * in user context and the (unlikely)
- * possibility of read() errors needing to
- * reset all head/tail state.
- *
- * Note: Contention or performance aren't
- * currently a significant concern here
- * considering the relatively low frequency of
- * hrtimer callbacks (5ms period) and that
- * reads typically only happen in response to a
- * hrtimer event and likely complete before the
- * next callback.
- *
- * Note: This lock is not held *while* reading
- * and copying data to userspace so the value
- * of head observed in htrimer callbacks won't
- * represent any partial consumption of data.
- */
- spinlock_t ptr_lock;
-
- /**
- * One 'aging' tail pointer and one 'aged'
- * tail pointer ready to used for reading.
- *
- * Initial values of 0xffffffff are invalid
- * and imply that an update is required
- * (and should be ignored by an attempted
- * read)
- */
- struct {
- u32 offset;
- } tails[2];
-
- /**
- * Index for the aged tail ready to read()
- * data up to.
- */
- unsigned int aged_tail_idx;
-
- /**
- * A monotonic timestamp for when the current
- * aging tail pointer was read; used to
- * determine when it is old enough to trust.
- */
- u64 aging_timestamp;
-
- /**
- * Although we can always read back the head
- * pointer register, we prefer to avoid
- * trusting the HW state, just to avoid any
- * risk that some hardware condition could
- * somehow bump the head pointer unpredictably
- * and cause us to forward the wrong OA buffer
- * data to userspace.
- */
- u32 head;
- } oa_buffer;
-
- u32 gen7_latched_oastatus1;
- u32 ctx_oactxctrl_offset;
- u32 ctx_flexeu0_offset;
-
- /**
- * The RPT_ID/reason field for Gen8+ includes a bit
- * to determine if the CTX ID in the report is valid
- * but the specific bit differs between Gen 8 and 9
- */
- u32 gen8_valid_ctx_bit;
-
- struct i915_oa_ops ops;
- const struct i915_oa_format *oa_formats;
- } oa;
- } perf;
+ struct i915_perf perf;
/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
- struct {
- struct i915_gt_timelines {
- struct mutex mutex; /* protects list, tainted by GPU */
- struct list_head active_list;
-
- /* Pack multiple timelines' seqnos into the same page */
- spinlock_t hwsp_lock;
- struct list_head hwsp_free_list;
- } timelines;
+ struct intel_gt gt;
- struct list_head active_rings;
-
- struct intel_wakeref wakeref;
-
- struct list_head closed_vma;
- spinlock_t closed_lock; /* guards the list of closed_vma */
-
- /**
- * Is the GPU currently considered idle, or busy executing
- * userspace requests? Whilst idle, we allow runtime power
- * management to power down the hardware and display clocks.
- * In order to reduce the effect on performance, there
- * is a slight delay before we do so.
- */
- intel_wakeref_t awake;
-
- struct blocking_notifier_head pm_notifications;
-
- ktime_t last_init_time;
+ struct {
+ struct i915_gem_contexts {
+ spinlock_t lock; /* locks list */
+ struct list_head list;
- struct i915_vma *scratch;
- } gt;
+ struct llist_head free_list;
+ struct work_struct free_work;
+ } contexts;
- struct {
- struct notifier_block pm_notifier;
-
- /**
- * We leave the user IRQ off as much as possible,
- * but this means that requests will finish and never
- * be retired once the system goes idle. Set a timer to
- * fire periodically while the ring is running. When it
- * fires, go retire requests.
- */
- struct delayed_work retire_work;
-
- /**
- * When we detect an idle GPU, we want to turn on
- * powersaving features. So once we see that there
- * are no more requests outstanding and no more
- * arrive within a small period of time, we fire
- * off the idle_work.
+ /*
+ * We replace the local file with a global mappings as the
+ * backing storage for the mmap is on the device and not
+ * on the struct file, and we do not want to prolong the
+ * lifetime of the local fd. To minimise the number of
+ * anonymous inodes we create, we use a global singleton to
+ * share the global mapping.
*/
- struct work_struct idle_work;
+ struct file *mmap_singleton;
} gem;
- /* For i945gm vblank irq vs. C3 workaround */
- struct {
- struct work_struct work;
- struct pm_qos_request pm_qos;
- u8 c3_disable_latency;
- u8 enabled;
- } i945gm_vblank;
+ u8 pch_ssc_use;
+
+ /* For i915gm/i945gm vblank irq workaround */
+ u8 vblank_enabled;
/* perform PHY state sanity checks? */
bool chv_phy_assert[2];
@@ -1910,6 +1292,8 @@ struct drm_i915_private {
/* Mutex to protect the above hdcp component related values. */
struct mutex hdcp_comp_mutex;
+ I915_SELFTEST_DECLARE(struct i915_selftest_stash selftest;)
+
/*
* NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
* will be rejected. Instead look for a better place.
@@ -1933,27 +1317,12 @@ static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
static inline struct drm_i915_private *kdev_to_i915(struct device *kdev)
{
- return to_i915(dev_get_drvdata(kdev));
-}
-
-static inline struct drm_i915_private *wopcm_to_i915(struct intel_wopcm *wopcm)
-{
- return container_of(wopcm, struct drm_i915_private, wopcm);
-}
-
-static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
-{
- return container_of(guc, struct drm_i915_private, guc);
-}
-
-static inline struct drm_i915_private *huc_to_i915(struct intel_huc *huc)
-{
- return container_of(huc, struct drm_i915_private, huc);
+ return dev_get_drvdata(kdev);
}
-static inline struct drm_i915_private *uncore_to_i915(struct intel_uncore *uncore)
+static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev)
{
- return container_of(uncore, struct drm_i915_private, uncore);
+ return pci_get_drvdata(pdev);
}
/* Simple iterator over all initialised engines */
@@ -1964,18 +1333,19 @@ static inline struct drm_i915_private *uncore_to_i915(struct intel_uncore *uncor
for_each_if ((engine__) = (dev_priv__)->engine[(id__)])
/* Iterator over subset of engines selected by mask */
-#define for_each_engine_masked(engine__, dev_priv__, mask__, tmp__) \
- for ((tmp__) = (mask__) & INTEL_INFO(dev_priv__)->engine_mask; \
+#define for_each_engine_masked(engine__, gt__, mask__, tmp__) \
+ for ((tmp__) = (mask__) & INTEL_INFO((gt__)->i915)->engine_mask; \
(tmp__) ? \
- ((engine__) = (dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : \
+ ((engine__) = (gt__)->engine[__mask_next_bit(tmp__)]), 1 : \
0;)
-enum hdmi_force_audio {
- HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
- HDMI_AUDIO_OFF, /* force turn off HDMI audio */
- HDMI_AUDIO_AUTO, /* trust EDID */
- HDMI_AUDIO_ON, /* force turn on HDMI audio */
-};
+#define rb_to_uabi_engine(rb) \
+ rb_entry_safe(rb, struct intel_engine_cs, uabi_node)
+
+#define for_each_uabi_engine(engine__, i915__) \
+ for ((engine__) = rb_to_uabi_engine(rb_first(&(i915__)->uabi_engines));\
+ (engine__); \
+ (engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node)))
#define I915_GTT_OFFSET_NONE ((u32)-1)
@@ -2022,6 +1392,8 @@ enum hdmi_force_audio {
(BUILD_BUG_ON_ZERO(!__builtin_constant_p(n)) + \
INTEL_INFO(dev_priv)->gen == (n))
+#define HAS_DSB(dev_priv) (INTEL_INFO(dev_priv)->display.has_dsb)
+
/*
* Return true if revision is in range [since,until] inclusive.
*
@@ -2093,6 +1465,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
}
#define IS_MOBILE(dev_priv) (INTEL_INFO(dev_priv)->is_mobile)
+#define IS_DGFX(dev_priv) (INTEL_INFO(dev_priv)->is_dgfx)
#define IS_I830(dev_priv) IS_PLATFORM(dev_priv, INTEL_I830)
#define IS_I845G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I845G)
@@ -2127,6 +1500,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define IS_CANNONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_CANNONLAKE)
#define IS_ICELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ICELAKE)
#define IS_ELKHARTLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE)
+#define IS_TIGERLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_TIGERLAKE)
#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
#define IS_BDW_ULT(dev_priv) \
@@ -2226,6 +1600,11 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define IS_ICL_REVID(p, since, until) \
(IS_ICELAKE(p) && IS_REVID(p, since, until))
+#define TGL_REVID_A0 0x0
+
+#define IS_TGL_REVID(p, since, until) \
+ (IS_TIGERLAKE(p) && IS_REVID(p, since, until))
+
#define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp)
#define IS_GEN9_LP(dev_priv) (IS_GEN(dev_priv, 9) && IS_LP(dev_priv))
#define IS_GEN9_BC(dev_priv) (IS_GEN(dev_priv, 9) && !IS_LP(dev_priv))
@@ -2243,9 +1622,16 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define VEBOX_MASK(dev_priv) \
ENGINE_INSTANCES_MASK(dev_priv, VECS0, I915_MAX_VECS)
+/*
+ * The Gen7 cmdparser copies the scanned buffer to the ggtt for execution
+ * All later gens can run the final buffer from the ppgtt
+ */
+#define CMDPARSER_USES_GGTT(dev_priv) IS_GEN(dev_priv, 7)
+
#define HAS_LLC(dev_priv) (INTEL_INFO(dev_priv)->has_llc)
#define HAS_SNOOP(dev_priv) (INTEL_INFO(dev_priv)->has_snoop)
#define HAS_EDRAM(dev_priv) ((dev_priv)->edram_size_mb)
+#define HAS_SECURE_BATCHES(dev_priv) (INTEL_GEN(dev_priv) < 6)
#define HAS_WT(dev_priv) ((IS_HASWELL(dev_priv) || \
IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv))
@@ -2278,10 +1664,14 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
/* Early gen2 have a totally busted CS tlb and require pinned batches. */
#define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_I845G(dev_priv))
+#define NEEDS_RC6_CTX_CORRUPTION_WA(dev_priv) \
+ (IS_BROADWELL(dev_priv) || IS_GEN(dev_priv, 9))
+
/* WaRsDisableCoarsePowerGating:skl,cnl */
-#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
- (IS_CANNONLAKE(dev_priv) || \
- IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv))
+#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
+ (IS_CANNONLAKE(dev_priv) || \
+ IS_SKL_GT3(dev_priv) || \
+ IS_SKL_GT4(dev_priv))
#define HAS_GMBUS_IRQ(dev_priv) (INTEL_GEN(dev_priv) >= 4)
#define HAS_GMBUS_BURST_READ(dev_priv) (INTEL_GEN(dev_priv) >= 10 || \
@@ -2323,63 +1713,19 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_IPC(dev_priv) (INTEL_INFO(dev_priv)->display.has_ipc)
-/*
- * For now, anything with a GuC requires uCode loading, and then supports
- * command submission once loaded. But these are logically independent
- * properties, so we have separate macros to test them.
- */
-#define HAS_GUC(dev_priv) (INTEL_INFO(dev_priv)->has_guc)
-#define HAS_GUC_UCODE(dev_priv) (HAS_GUC(dev_priv))
-#define HAS_GUC_SCHED(dev_priv) (HAS_GUC(dev_priv))
+#define HAS_REGION(i915, i) (INTEL_INFO(i915)->memory_regions & (i))
+#define HAS_LMEM(i915) HAS_REGION(i915, REGION_LMEM)
-/* For now, anything with a GuC has also HuC */
-#define HAS_HUC(dev_priv) (HAS_GUC(dev_priv))
-#define HAS_HUC_UCODE(dev_priv) (HAS_GUC(dev_priv))
+#define HAS_GT_UC(dev_priv) (INTEL_INFO(dev_priv)->has_gt_uc)
-/* Having a GuC is not the same as using a GuC */
-#define USES_GUC(dev_priv) intel_uc_is_using_guc(dev_priv)
-#define USES_GUC_SUBMISSION(dev_priv) intel_uc_is_using_guc_submission(dev_priv)
-#define USES_HUC(dev_priv) intel_uc_is_using_huc(dev_priv)
+/* Having GuC is not the same as using GuC */
+#define USES_GUC(dev_priv) intel_uc_uses_guc(&(dev_priv)->gt.uc)
+#define USES_GUC_SUBMISSION(dev_priv) intel_uc_uses_guc_submission(&(dev_priv)->gt.uc)
#define HAS_POOLED_EU(dev_priv) (INTEL_INFO(dev_priv)->has_pooled_eu)
-#define INTEL_PCH_DEVICE_ID_MASK 0xff80
-#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
-#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
-#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
-#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
-#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
-#define INTEL_PCH_WPT_DEVICE_ID_TYPE 0x8c80
-#define INTEL_PCH_WPT_LP_DEVICE_ID_TYPE 0x9c80
-#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100
-#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00
-#define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA280
-#define INTEL_PCH_CNP_DEVICE_ID_TYPE 0xA300
-#define INTEL_PCH_CNP_LP_DEVICE_ID_TYPE 0x9D80
-#define INTEL_PCH_CMP_DEVICE_ID_TYPE 0x0280
-#define INTEL_PCH_ICP_DEVICE_ID_TYPE 0x3480
-#define INTEL_PCH_MCC_DEVICE_ID_TYPE 0x4B00
-#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
-#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
-#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
-
-#define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type)
-#define INTEL_PCH_ID(dev_priv) ((dev_priv)->pch_id)
-#define HAS_PCH_MCC(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_MCC)
-#define HAS_PCH_ICP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ICP)
-#define HAS_PCH_CNP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CNP)
-#define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT)
-#define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT)
-#define HAS_PCH_LPT_LP(dev_priv) \
- (INTEL_PCH_ID(dev_priv) == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE || \
- INTEL_PCH_ID(dev_priv) == INTEL_PCH_WPT_LP_DEVICE_ID_TYPE)
-#define HAS_PCH_LPT_H(dev_priv) \
- (INTEL_PCH_ID(dev_priv) == INTEL_PCH_LPT_DEVICE_ID_TYPE || \
- INTEL_PCH_ID(dev_priv) == INTEL_PCH_WPT_DEVICE_ID_TYPE)
-#define HAS_PCH_CPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CPT)
-#define HAS_PCH_IBX(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_IBX)
-#define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP)
-#define HAS_PCH_SPLIT(dev_priv) (INTEL_PCH_TYPE(dev_priv) != PCH_NONE)
+#define HAS_GLOBAL_MOCS_REGISTERS(dev_priv) (INTEL_INFO(dev_priv)->has_global_mocs)
+
#define HAS_GMCH(dev_priv) (INTEL_INFO(dev_priv)->display.has_gmch)
@@ -2393,9 +1739,12 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define GT_FREQUENCY_MULTIPLIER 50
#define GEN9_FREQ_SCALER 3
-#define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->num_pipes > 0)
+#define INTEL_NUM_PIPES(dev_priv) (hweight8(INTEL_INFO(dev_priv)->pipe_mask))
+
+#define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->pipe_mask != 0)
-#include "i915_trace.h"
+/* Only valid when HAS_DISPLAY() is true */
+#define INTEL_DISPLAY_ENABLED(dev_priv) (WARN_ON(!HAS_DISPLAY(dev_priv)), !i915_modparams.disable_display)
static inline bool intel_vtd_active(void)
{
@@ -2418,47 +1767,20 @@ intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *dev_priv)
}
/* i915_drv.c */
-void __printf(3, 4)
-__i915_printk(struct drm_i915_private *dev_priv, const char *level,
- const char *fmt, ...);
-
-#define i915_report_error(dev_priv, fmt, ...) \
- __i915_printk(dev_priv, KERN_ERR, fmt, ##__VA_ARGS__)
-
#ifdef CONFIG_COMPAT
-extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg);
+long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
#else
#define i915_compat_ioctl NULL
#endif
extern const struct dev_pm_ops i915_pm_ops;
-extern int i915_driver_load(struct pci_dev *pdev,
- const struct pci_device_id *ent);
-extern void i915_driver_unload(struct drm_device *dev);
-
-extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
-extern void intel_hangcheck_init(struct drm_i915_private *dev_priv);
-int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
-
-u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv);
+int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+void i915_driver_remove(struct drm_i915_private *i915);
-static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv)
-{
- unsigned long delay;
-
- if (unlikely(!i915_modparams.enable_hangcheck))
- return;
-
- /* Don't continually defer the hangcheck so that it is always run at
- * least once after work has been scheduled on any ring. Otherwise,
- * we will ignore a hung ring if a second ring is kept busy.
- */
+int i915_resume_switcheroo(struct drm_i915_private *i915);
+int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state);
- delay = round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES);
- queue_delayed_work(system_long_wq,
- &dev_priv->gpu_error.hangcheck_work, delay);
-}
+int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
{
@@ -2470,29 +1792,32 @@ static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv)
return dev_priv->vgpu.active;
}
+int i915_getparam_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
/* i915_gem.c */
int i915_gem_init_userptr(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv);
-void i915_gem_sanitize(struct drm_i915_private *i915);
-int i915_gem_init_early(struct drm_i915_private *dev_priv);
+void i915_gem_init_early(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_early(struct drm_i915_private *dev_priv);
int i915_gem_freeze(struct drm_i915_private *dev_priv);
int i915_gem_freeze_late(struct drm_i915_private *dev_priv);
+struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915);
+
static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
{
- if (!atomic_read(&i915->mm.free_count))
- return;
-
- /* A single pass should suffice to release all the freed objects (along
+ /*
+ * A single pass should suffice to release all the freed objects (along
* most call paths) , but be a little more paranoid in that freeing
* the objects does take a little amount of time, during which the rcu
* callbacks could have added new objects into the freed list, and
* armed the work again.
*/
- do {
+ while (atomic_read(&i915->mm.free_count)) {
+ flush_work(&i915->mm.free_work);
rcu_barrier();
- } while (flush_work(&i915->mm.free_work));
+ }
}
static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915)
@@ -2510,6 +1835,7 @@ static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915)
*/
int pass = 3;
do {
+ flush_workqueue(i915->wq);
rcu_barrier();
i915_gem_drain_freed_objects(i915);
} while (--pass);
@@ -2523,7 +1849,10 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
u64 alignment,
u64 flags);
-int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
+int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
+ unsigned long flags);
+#define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0)
+#define I915_GEM_OBJECT_UNBIND_BARRIER BIT(1)
void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
@@ -2536,52 +1865,28 @@ i915_mutex_lock_interruptible(struct drm_device *dev)
int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
-int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
- u32 handle, u64 *offset);
-int i915_gem_mmap_gtt_version(void);
-
-void i915_gem_track_fb(struct drm_i915_gem_object *old,
- struct drm_i915_gem_object *new,
- unsigned frontbuffer_bits);
int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno);
-static inline bool __i915_wedged(struct i915_gpu_error *error)
-{
- return unlikely(test_bit(I915_WEDGED, &error->flags));
-}
-
-static inline bool i915_reset_failed(struct drm_i915_private *i915)
-{
- return __i915_wedged(&i915->gpu_error);
-}
-
static inline u32 i915_reset_count(struct i915_gpu_error *error)
{
- return READ_ONCE(error->reset_count);
+ return atomic_read(&error->reset_count);
}
static inline u32 i915_reset_engine_count(struct i915_gpu_error *error,
- struct intel_engine_cs *engine)
+ const struct intel_engine_cs *engine)
{
- return READ_ONCE(error->reset_engine_count[engine->id]);
+ return atomic_read(&error->reset_engine_count[engine->uabi_class]);
}
-void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
-bool i915_gem_unset_wedged(struct drm_i915_private *dev_priv);
-
-void i915_gem_init_mmio(struct drm_i915_private *i915);
int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
-int __must_check i915_gem_init_hw(struct drm_i915_private *dev_priv);
-void i915_gem_init_swizzling(struct drm_i915_private *dev_priv);
-void i915_gem_fini_hw(struct drm_i915_private *dev_priv);
-void i915_gem_fini(struct drm_i915_private *dev_priv);
-int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
- unsigned int flags, long timeout);
+void i915_gem_driver_register(struct drm_i915_private *i915);
+void i915_gem_driver_unregister(struct drm_i915_private *i915);
+void i915_gem_driver_remove(struct drm_i915_private *dev_priv);
+void i915_gem_driver_release(struct drm_i915_private *dev_priv);
void i915_gem_suspend(struct drm_i915_private *dev_priv);
void i915_gem_suspend_late(struct drm_i915_private *dev_priv);
void i915_gem_resume(struct drm_i915_private *dev_priv);
-vm_fault_t i915_gem_fault(struct vm_fault *vmf);
int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file);
void i915_gem_release(struct drm_device *dev, struct drm_file *file);
@@ -2592,13 +1897,12 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf);
-struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *gem_obj, int flags);
+struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags);
static inline struct i915_gem_context *
__i915_gem_context_lookup_rcu(struct drm_i915_file_private *file_priv, u32 id)
{
- return idr_find(&file_priv->context_idr, id);
+ return xa_load(&file_priv->context_xa, id);
}
static inline struct i915_gem_context *
@@ -2615,20 +1919,10 @@ i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
return ctx;
}
-int i915_perf_open_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file);
-int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file);
-int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file);
-void i915_oa_init_reg_state(struct intel_engine_cs *engine,
- struct intel_context *ce,
- u32 *reg_state);
-
/* i915_gem_evict.c */
int __must_check i915_gem_evict_something(struct i915_address_space *vm,
u64 min_size, u64 alignment,
- unsigned cache_level,
+ unsigned long color,
u64 start, u64 end,
unsigned flags);
int __must_check i915_gem_evict_for_node(struct i915_address_space *vm,
@@ -2636,65 +1930,17 @@ int __must_check i915_gem_evict_for_node(struct i915_address_space *vm,
unsigned int flags);
int i915_gem_evict_vm(struct i915_address_space *vm);
-void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv);
-
-/* belongs in i915_gem_gtt.h */
-static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv)
-{
- wmb();
- if (INTEL_GEN(dev_priv) < 6)
- intel_gtt_chipset_flush();
-}
-
-/* i915_gem_stolen.c */
-int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
- struct drm_mm_node *node, u64 size,
- unsigned alignment);
-int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
- struct drm_mm_node *node, u64 size,
- unsigned alignment, u64 start,
- u64 end);
-void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
- struct drm_mm_node *node);
-int i915_gem_init_stolen(struct drm_i915_private *dev_priv);
-void i915_gem_cleanup_stolen(struct drm_i915_private *dev_priv);
-struct drm_i915_gem_object *
-i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
- resource_size_t size);
-struct drm_i915_gem_object *
-i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv,
- resource_size_t stolen_offset,
- resource_size_t gtt_offset,
- resource_size_t size);
-
/* i915_gem_internal.c */
struct drm_i915_gem_object *
i915_gem_object_create_internal(struct drm_i915_private *dev_priv,
phys_addr_t size);
-/* i915_gem_shrinker.c */
-unsigned long i915_gem_shrink(struct drm_i915_private *i915,
- unsigned long target,
- unsigned long *nr_scanned,
- unsigned flags);
-#define I915_SHRINK_UNBOUND BIT(0)
-#define I915_SHRINK_BOUND BIT(1)
-#define I915_SHRINK_ACTIVE BIT(2)
-#define I915_SHRINK_VMAPS BIT(3)
-#define I915_SHRINK_WRITEBACK BIT(4)
-
-unsigned long i915_gem_shrink_all(struct drm_i915_private *i915);
-void i915_gem_shrinker_register(struct drm_i915_private *i915);
-void i915_gem_shrinker_unregister(struct drm_i915_private *i915);
-void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
- struct mutex *mutex);
-
/* i915_gem_tiling.c */
static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
- return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
+ return i915->ggtt.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
i915_gem_object_is_tiled(obj);
}
@@ -2710,25 +1956,12 @@ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
void intel_engine_init_cmd_parser(struct intel_engine_cs *engine);
void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine);
int intel_engine_cmd_parser(struct intel_engine_cs *engine,
- struct drm_i915_gem_object *batch_obj,
- struct drm_i915_gem_object *shadow_batch_obj,
- u32 batch_start_offset,
- u32 batch_len,
- bool is_master);
-
-/* i915_perf.c */
-extern void i915_perf_init(struct drm_i915_private *dev_priv);
-extern void i915_perf_fini(struct drm_i915_private *dev_priv);
-extern void i915_perf_register(struct drm_i915_private *dev_priv);
-extern void i915_perf_unregister(struct drm_i915_private *dev_priv);
-
-/* i915_suspend.c */
-extern int i915_save_state(struct drm_i915_private *dev_priv);
-extern int i915_restore_state(struct drm_i915_private *dev_priv);
-
-/* i915_sysfs.c */
-void i915_setup_sysfs(struct drm_i915_private *dev_priv);
-void i915_teardown_sysfs(struct drm_i915_private *dev_priv);
+ struct i915_vma *batch,
+ u32 batch_offset,
+ u32 batch_length,
+ struct i915_vma *shadow,
+ bool trampoline);
+#define I915_CMD_PARSER_TRAMPOLINE_SIZE 8
/* intel_device_info.c */
static inline struct intel_device_info *
@@ -2737,25 +1970,9 @@ mkwrite_device_info(struct drm_i915_private *dev_priv)
return (struct intel_device_info *)INTEL_INFO(dev_priv);
}
-/* modesetting */
-extern void intel_modeset_init_hw(struct drm_device *dev);
-extern int intel_modeset_init(struct drm_device *dev);
-extern void intel_modeset_cleanup(struct drm_device *dev);
-extern int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv,
- bool state);
-extern void intel_display_resume(struct drm_device *dev);
-extern void i915_redisable_vga(struct drm_i915_private *dev_priv);
-extern void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv);
-extern void intel_init_pch_refclk(struct drm_i915_private *dev_priv);
-
int i915_reg_read_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
-extern struct intel_display_error_state *
-intel_display_capture_error_state(struct drm_i915_private *dev_priv);
-extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
- struct intel_display_error_state *error);
-
#define __I915_REG_OP(op__, dev_priv__, ...) \
intel_uncore_##op__(&(dev_priv__)->uncore, __VA_ARGS__)
@@ -2793,34 +2010,27 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
#define I915_READ_FW(reg__) __I915_REG_OP(read_fw, dev_priv, (reg__))
#define I915_WRITE_FW(reg__, val__) __I915_REG_OP(write_fw, dev_priv, (reg__), (val__))
-/* "Broadcast RGB" property */
-#define INTEL_BROADCAST_RGB_AUTO 0
-#define INTEL_BROADCAST_RGB_FULL 1
-#define INTEL_BROADCAST_RGB_LIMITED 2
-
-void i915_memcpy_init_early(struct drm_i915_private *dev_priv);
-bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len);
+/* register wait wrappers for display regs */
+#define intel_de_wait_for_register(dev_priv_, reg_, mask_, value_, timeout_) \
+ intel_wait_for_register(&(dev_priv_)->uncore, \
+ (reg_), (mask_), (value_), (timeout_))
-/* The movntdqa instructions used for memcpy-from-wc require 16-byte alignment,
- * as well as SSE4.1 support. i915_memcpy_from_wc() will report if it cannot
- * perform the operation. To check beforehand, pass in the parameters to
- * to i915_can_memcpy_from_wc() - since we only care about the low 4 bits,
- * you only need to pass in the minor offsets, page-aligned pointers are
- * always valid.
- *
- * For just checking for SSE4.1, in the foreknowledge that the future use
- * will be correctly aligned, just use i915_has_memcpy_from_wc().
- */
-#define i915_can_memcpy_from_wc(dst, src, len) \
- i915_memcpy_from_wc((void *)((unsigned long)(dst) | (unsigned long)(src) | (len)), NULL, 0)
+#define intel_de_wait_for_set(dev_priv_, reg_, mask_, timeout_) ({ \
+ u32 mask__ = (mask_); \
+ intel_de_wait_for_register((dev_priv_), (reg_), \
+ mask__, mask__, (timeout_)); \
+})
-#define i915_has_memcpy_from_wc() \
- i915_memcpy_from_wc(NULL, NULL, 0)
+#define intel_de_wait_for_clear(dev_priv_, reg_, mask_, timeout_) \
+ intel_de_wait_for_register((dev_priv_), (reg_), (mask_), 0, (timeout_))
/* i915_mm.c */
int remap_io_mapping(struct vm_area_struct *vma,
unsigned long addr, unsigned long pfn, unsigned long size,
struct io_mapping *iomap);
+int remap_io_sg(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long size,
+ struct scatterlist *sgl, resource_size_t iobase);
static inline int intel_hws_csb_write_index(struct drm_i915_private *i915)
{
@@ -2830,26 +2040,16 @@ static inline int intel_hws_csb_write_index(struct drm_i915_private *i915)
return I915_HWS_CSB_WRITE_INDEX;
}
-static inline u32 i915_scratch_offset(const struct drm_i915_private *i915)
-{
- return i915_ggtt_offset(i915->gt.scratch);
-}
-
static inline enum i915_map_type
i915_coherent_map_type(struct drm_i915_private *i915)
{
return HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
}
-static inline void add_taint_for_CI(unsigned int taint)
+static inline bool intel_guc_submission_is_enabled(struct intel_guc *guc)
{
- /*
- * The system is "ok", just about surviving for the user, but
- * CI results are now unreliable as the HW is very suspect.
- * CI checks the taint state after every test and will reboot
- * the machine if the kernel is tainted.
- */
- add_taint(taint, LOCKDEP_STILL_OK);
+ return intel_guc_is_submission_supported(guc) &&
+ intel_guc_is_running(guc);
}
#endif
diff --git a/drivers/gpu/drm/i915/i915_fixed.h b/drivers/gpu/drm/i915/i915_fixed.h
index 6621595fe74c..a327094de2bd 100644
--- a/drivers/gpu/drm/i915/i915_fixed.h
+++ b/drivers/gpu/drm/i915/i915_fixed.h
@@ -6,6 +6,11 @@
#ifndef _I915_FIXED_H_
#define _I915_FIXED_H_
+#include <linux/bug.h>
+#include <linux/kernel.h>
+#include <linux/math64.h>
+#include <linux/types.h>
+
typedef struct {
u32 val;
} uint_fixed_16_16_t;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8a659d3d7435..c2de2f45b459 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -29,7 +29,7 @@
#include <drm/i915_drm.h>
#include <linux/dma-fence-array.h>
#include <linux/kthread.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
#include <linux/shmem_fs.h>
#include <linux/slab.h>
#include <linux/stop_machine.h>
@@ -44,36 +44,45 @@
#include "gem/i915_gem_clflush.h"
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_ioctls.h"
-#include "gem/i915_gem_pm.h"
-#include "gem/i915_gemfs.h"
+#include "gem/i915_gem_mman.h"
+#include "gem/i915_gem_region.h"
+#include "gt/intel_engine_user.h"
+#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
-#include "gt/intel_mocs.h"
-#include "gt/intel_reset.h"
#include "gt/intel_workarounds.h"
#include "i915_drv.h"
-#include "i915_scatterlist.h"
#include "i915_trace.h"
#include "i915_vgpu.h"
-#include "intel_drv.h"
#include "intel_pm.h"
static int
-insert_mappable_node(struct i915_ggtt *ggtt,
- struct drm_mm_node *node, u32 size)
+insert_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node, u32 size)
{
+ int err;
+
+ err = mutex_lock_interruptible(&ggtt->vm.mutex);
+ if (err)
+ return err;
+
memset(node, 0, sizeof(*node));
- return drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
- size, 0, I915_COLOR_UNEVICTABLE,
- 0, ggtt->mappable_end,
- DRM_MM_INSERT_LOW);
+ err = drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
+ size, 0, I915_COLOR_UNEVICTABLE,
+ 0, ggtt->mappable_end,
+ DRM_MM_INSERT_LOW);
+
+ mutex_unlock(&ggtt->vm.mutex);
+
+ return err;
}
static void
-remove_mappable_node(struct drm_mm_node *node)
+remove_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node)
{
+ mutex_lock(&ggtt->vm.mutex);
drm_mm_remove_node(node);
+ mutex_unlock(&ggtt->vm.mutex);
}
int
@@ -85,7 +94,8 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct i915_vma *vma;
u64 pinned;
- mutex_lock(&ggtt->vm.mutex);
+ if (mutex_lock_interruptible(&ggtt->vm.mutex))
+ return -EINTR;
pinned = ggtt->vm.reserved;
list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
@@ -100,28 +110,68 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
return 0;
}
-int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
+int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
+ unsigned long flags)
{
- struct i915_vma *vma;
+ struct intel_runtime_pm *rpm = &to_i915(obj->base.dev)->runtime_pm;
LIST_HEAD(still_in_list);
- int ret = 0;
+ intel_wakeref_t wakeref;
+ struct i915_vma *vma;
+ int ret;
+
+ if (!atomic_read(&obj->bind_count))
+ return 0;
- lockdep_assert_held(&obj->base.dev->struct_mutex);
+ /*
+ * As some machines use ACPI to handle runtime-resume callbacks, and
+ * ACPI is quite kmalloc happy, we cannot resume beneath the vm->mutex
+ * as they are required by the shrinker. Ergo, we wake the device up
+ * first just in case.
+ */
+ wakeref = intel_runtime_pm_get(rpm);
+try_again:
+ ret = 0;
spin_lock(&obj->vma.lock);
while (!ret && (vma = list_first_entry_or_null(&obj->vma.list,
struct i915_vma,
obj_link))) {
+ struct i915_address_space *vm = vma->vm;
+
list_move_tail(&vma->obj_link, &still_in_list);
+ if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK))
+ continue;
+
+ ret = -EAGAIN;
+ if (!i915_vm_tryopen(vm))
+ break;
+
+ /* Prevent vma being freed by i915_vma_parked as we unbind */
+ vma = __i915_vma_get(vma);
spin_unlock(&obj->vma.lock);
- ret = i915_vma_unbind(vma);
+ if (vma) {
+ ret = -EBUSY;
+ if (flags & I915_GEM_OBJECT_UNBIND_ACTIVE ||
+ !i915_vma_is_active(vma))
+ ret = i915_vma_unbind(vma);
+
+ __i915_vma_put(vma);
+ }
+ i915_vm_close(vm);
spin_lock(&obj->vma.lock);
}
- list_splice(&still_in_list, &obj->vma.list);
+ list_splice_init(&still_in_list, &obj->vma.list);
spin_unlock(&obj->vma.lock);
+ if (ret == -EAGAIN && flags & I915_GEM_OBJECT_UNBIND_BARRIER) {
+ rcu_barrier(); /* flush the i915_vm_release() */
+ goto try_again;
+ }
+
+ intel_runtime_pm_put(rpm, wakeref);
+
return ret;
}
@@ -133,23 +183,25 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
void *vaddr = obj->phys_handle->vaddr + args->offset;
char __user *user_data = u64_to_user_ptr(args->data_ptr);
- /* We manually control the domain here and pretend that it
+ /*
+ * We manually control the domain here and pretend that it
* remains coherent i.e. in the GTT domain, like shmem_pwrite.
*/
- intel_fb_obj_invalidate(obj, ORIGIN_CPU);
+ i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
+
if (copy_from_user(vaddr, user_data, args->size))
return -EFAULT;
drm_clflush_virt_range(vaddr, args->size);
- i915_gem_chipset_flush(to_i915(obj->base.dev));
+ intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
- intel_fb_obj_flush(obj, ORIGIN_CPU);
+ i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
return 0;
}
static int
i915_gem_create(struct drm_file *file,
- struct drm_i915_private *dev_priv,
+ struct intel_memory_region *mr,
u64 *size_p,
u32 *handle_p)
{
@@ -158,12 +210,16 @@ i915_gem_create(struct drm_file *file,
u64 size;
int ret;
- size = round_up(*size_p, PAGE_SIZE);
+ GEM_BUG_ON(!is_power_of_2(mr->min_page_size));
+ size = round_up(*size_p, mr->min_page_size);
if (size == 0)
return -EINVAL;
+ /* For most of the ABI (e.g. mmap) we think in system pages */
+ GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
+
/* Allocate the new object */
- obj = i915_gem_object_create_shmem(dev_priv, size);
+ obj = i915_gem_object_create_region(mr, size, 0);
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -183,6 +239,7 @@ i915_gem_dumb_create(struct drm_file *file,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
+ enum intel_memory_type mem_type;
int cpp = DIV_ROUND_UP(args->bpp, 8);
u32 format;
@@ -208,8 +265,18 @@ i915_gem_dumb_create(struct drm_file *file,
DRM_FORMAT_MOD_LINEAR))
args->pitch = ALIGN(args->pitch, 4096);
- args->size = args->pitch * args->height;
- return i915_gem_create(file, to_i915(dev),
+ if (args->pitch < args->width)
+ return -EINVAL;
+
+ args->size = mul_u32_u32(args->pitch, args->height);
+
+ mem_type = INTEL_MEMORY_SYSTEM;
+ if (HAS_LMEM(to_i915(dev)))
+ mem_type = INTEL_MEMORY_LOCAL;
+
+ return i915_gem_create(file,
+ intel_memory_region_by_type(to_i915(dev),
+ mem_type),
&args->size, &args->handle);
}
@@ -223,55 +290,17 @@ int
i915_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *i915 = to_i915(dev);
struct drm_i915_gem_create *args = data;
- i915_gem_flush_free_objects(dev_priv);
+ i915_gem_flush_free_objects(i915);
- return i915_gem_create(file, dev_priv,
+ return i915_gem_create(file,
+ intel_memory_region_by_type(i915,
+ INTEL_MEMORY_SYSTEM),
&args->size, &args->handle);
}
-void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv)
-{
- intel_wakeref_t wakeref;
-
- /*
- * No actual flushing is required for the GTT write domain for reads
- * from the GTT domain. Writes to it "immediately" go to main memory
- * as far as we know, so there's no chipset flush. It also doesn't
- * land in the GPU render cache.
- *
- * However, we do have to enforce the order so that all writes through
- * the GTT land before any writes to the device, such as updates to
- * the GATT itself.
- *
- * We also have to wait a bit for the writes to land from the GTT.
- * An uncached read (i.e. mmio) seems to be ideal for the round-trip
- * timing. This issue has only been observed when switching quickly
- * between GTT writes and CPU reads from inside the kernel on recent hw,
- * and it appears to only affect discrete GTT blocks (i.e. on LLC
- * system agents we cannot reproduce this behaviour, until Cannonlake
- * that was!).
- */
-
- wmb();
-
- if (INTEL_INFO(dev_priv)->has_coherent_ggtt)
- return;
-
- i915_gem_chipset_flush(dev_priv);
-
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
- struct intel_uncore *uncore = &dev_priv->uncore;
-
- spin_lock_irq(&uncore->lock);
- intel_uncore_posting_read_fw(uncore,
- RING_HEAD(RENDER_RING_BASE));
- spin_unlock_irq(&uncore->lock);
- }
-}
-
static int
shmem_pread(struct page *page, int offset, int len, char __user *user_data,
bool needs_clflush)
@@ -370,33 +399,23 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
u64 remain, offset;
int ret;
- ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
- if (ret)
- return ret;
-
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
- PIN_MAPPABLE |
- PIN_NONFAULT |
- PIN_NONBLOCK);
+ vma = ERR_PTR(-ENODEV);
+ if (!i915_gem_object_is_tiled(obj))
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
+ PIN_MAPPABLE |
+ PIN_NONBLOCK /* NOWARN */ |
+ PIN_NOEVICT);
if (!IS_ERR(vma)) {
node.start = i915_ggtt_offset(vma);
- node.allocated = false;
- ret = i915_vma_put_fence(vma);
- if (ret) {
- i915_vma_unpin(vma);
- vma = ERR_PTR(ret);
- }
- }
- if (IS_ERR(vma)) {
+ node.flags = 0;
+ } else {
ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
if (ret)
- goto out_unlock;
- GEM_BUG_ON(!node.allocated);
+ goto out_rpm;
+ GEM_BUG_ON(!drm_mm_node_allocated(&node));
}
- mutex_unlock(&i915->drm.struct_mutex);
-
ret = i915_gem_object_lock_interruptible(obj);
if (ret)
goto out_unpin;
@@ -429,12 +448,10 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
unsigned page_offset = offset_in_page(offset);
unsigned page_length = PAGE_SIZE - page_offset;
page_length = remain < page_length ? remain : page_length;
- if (node.allocated) {
- wmb();
+ if (drm_mm_node_allocated(&node)) {
ggtt->vm.insert_page(&ggtt->vm,
i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
node.start, I915_CACHE_NONE, 0);
- wmb();
} else {
page_base += offset & PAGE_MASK;
}
@@ -452,18 +469,14 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
i915_gem_object_unlock_fence(obj, fence);
out_unpin:
- mutex_lock(&i915->drm.struct_mutex);
- if (node.allocated) {
- wmb();
+ if (drm_mm_node_allocated(&node)) {
ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
- remove_mappable_node(&node);
+ remove_mappable_node(ggtt, &node);
} else {
i915_vma_unpin(vma);
}
-out_unlock:
+out_rpm:
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
-
return ret;
}
@@ -570,10 +583,6 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
void __user *user_data;
int ret;
- ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
- if (ret)
- return ret;
-
if (i915_gem_object_has_struct_page(obj)) {
/*
* Avoid waking the device up if we can fallback, as
@@ -583,37 +592,29 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
* using the cache bypass of indirect GGTT access.
*/
wakeref = intel_runtime_pm_get_if_in_use(rpm);
- if (!wakeref) {
- ret = -EFAULT;
- goto out_unlock;
- }
+ if (!wakeref)
+ return -EFAULT;
} else {
/* No backing pages, no fallback, we must force GGTT access */
wakeref = intel_runtime_pm_get(rpm);
}
- vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
- PIN_MAPPABLE |
- PIN_NONFAULT |
- PIN_NONBLOCK);
+ vma = ERR_PTR(-ENODEV);
+ if (!i915_gem_object_is_tiled(obj))
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
+ PIN_MAPPABLE |
+ PIN_NONBLOCK /* NOWARN */ |
+ PIN_NOEVICT);
if (!IS_ERR(vma)) {
node.start = i915_ggtt_offset(vma);
- node.allocated = false;
- ret = i915_vma_put_fence(vma);
- if (ret) {
- i915_vma_unpin(vma);
- vma = ERR_PTR(ret);
- }
- }
- if (IS_ERR(vma)) {
+ node.flags = 0;
+ } else {
ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
if (ret)
goto out_rpm;
- GEM_BUG_ON(!node.allocated);
+ GEM_BUG_ON(!drm_mm_node_allocated(&node));
}
- mutex_unlock(&i915->drm.struct_mutex);
-
ret = i915_gem_object_lock_interruptible(obj);
if (ret)
goto out_unpin;
@@ -631,7 +632,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
goto out_unpin;
}
- intel_fb_obj_invalidate(obj, ORIGIN_CPU);
+ i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
user_data = u64_to_user_ptr(args->data_ptr);
offset = args->offset;
@@ -647,8 +648,9 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
unsigned int page_offset = offset_in_page(offset);
unsigned int page_length = PAGE_SIZE - page_offset;
page_length = remain < page_length ? remain : page_length;
- if (node.allocated) {
- wmb(); /* flush the write before we modify the GGTT */
+ if (drm_mm_node_allocated(&node)) {
+ /* flush the write before we modify the GGTT */
+ intel_gt_flush_ggtt_writes(ggtt->vm.gt);
ggtt->vm.insert_page(&ggtt->vm,
i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
node.start, I915_CACHE_NONE, 0);
@@ -672,22 +674,20 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
user_data += page_length;
offset += page_length;
}
- intel_fb_obj_flush(obj, ORIGIN_CPU);
+
+ intel_gt_flush_ggtt_writes(ggtt->vm.gt);
+ i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
i915_gem_object_unlock_fence(obj, fence);
out_unpin:
- mutex_lock(&i915->drm.struct_mutex);
- if (node.allocated) {
- wmb();
+ if (drm_mm_node_allocated(&node)) {
ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
- remove_mappable_node(&node);
+ remove_mappable_node(ggtt, &node);
} else {
i915_vma_unpin(vma);
}
out_rpm:
intel_runtime_pm_put(rpm, wakeref);
-out_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
return ret;
}
@@ -765,7 +765,7 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
offset = 0;
}
- intel_fb_obj_flush(obj, ORIGIN_CPU);
+ i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
i915_gem_object_unlock_fence(obj, fence);
return ret;
@@ -899,7 +899,7 @@ void i915_gem_runtime_suspend(struct drm_i915_private *i915)
list_for_each_entry_safe(obj, on,
&i915->ggtt.userfault_list, userfault_link)
- __i915_gem_object_release_mmap(obj);
+ __i915_gem_object_release_mmap_gtt(obj);
/*
* The fence will be lost when the device powers down. If any were
@@ -929,93 +929,6 @@ void i915_gem_runtime_suspend(struct drm_i915_private *i915)
}
}
-static int wait_for_engines(struct drm_i915_private *i915)
-{
- if (wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT)) {
- dev_err(i915->drm.dev,
- "Failed to idle engines, declaring wedged!\n");
- GEM_TRACE_DUMP();
- i915_gem_set_wedged(i915);
- return -EIO;
- }
-
- return 0;
-}
-
-static long
-wait_for_timelines(struct drm_i915_private *i915,
- unsigned int flags, long timeout)
-{
- struct i915_gt_timelines *gt = &i915->gt.timelines;
- struct i915_timeline *tl;
-
- mutex_lock(&gt->mutex);
- list_for_each_entry(tl, &gt->active_list, link) {
- struct i915_request *rq;
-
- rq = i915_active_request_get_unlocked(&tl->last_request);
- if (!rq)
- continue;
-
- mutex_unlock(&gt->mutex);
-
- /*
- * "Race-to-idle".
- *
- * Switching to the kernel context is often used a synchronous
- * step prior to idling, e.g. in suspend for flushing all
- * current operations to memory before sleeping. These we
- * want to complete as quickly as possible to avoid prolonged
- * stalls, so allow the gpu to boost to maximum clocks.
- */
- if (flags & I915_WAIT_FOR_IDLE_BOOST)
- gen6_rps_boost(rq);
-
- timeout = i915_request_wait(rq, flags, timeout);
- i915_request_put(rq);
- if (timeout < 0)
- return timeout;
-
- /* restart after reacquiring the lock */
- mutex_lock(&gt->mutex);
- tl = list_entry(&gt->active_list, typeof(*tl), link);
- }
- mutex_unlock(&gt->mutex);
-
- return timeout;
-}
-
-int i915_gem_wait_for_idle(struct drm_i915_private *i915,
- unsigned int flags, long timeout)
-{
- GEM_TRACE("flags=%x (%s), timeout=%ld%s, awake?=%s\n",
- flags, flags & I915_WAIT_LOCKED ? "locked" : "unlocked",
- timeout, timeout == MAX_SCHEDULE_TIMEOUT ? " (forever)" : "",
- yesno(i915->gt.awake));
-
- /* If the device is asleep, we have no requests outstanding */
- if (!READ_ONCE(i915->gt.awake))
- return 0;
-
- timeout = wait_for_timelines(i915, flags, timeout);
- if (timeout < 0)
- return timeout;
-
- if (flags & I915_WAIT_LOCKED) {
- int err;
-
- lockdep_assert_held(&i915->drm.struct_mutex);
-
- err = wait_for_engines(i915);
- if (err)
- return err;
-
- i915_retire_requests(i915);
- }
-
- return 0;
-}
-
struct i915_vma *
i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view,
@@ -1023,26 +936,29 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
u64 alignment,
u64 flags)
{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- struct i915_address_space *vm = &dev_priv->ggtt.vm;
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct i915_ggtt *ggtt = &i915->ggtt;
struct i915_vma *vma;
int ret;
- lockdep_assert_held(&obj->base.dev->struct_mutex);
+ if (i915_gem_object_never_bind_ggtt(obj))
+ return ERR_PTR(-ENODEV);
if (flags & PIN_MAPPABLE &&
(!view || view->type == I915_GGTT_VIEW_NORMAL)) {
- /* If the required space is larger than the available
+ /*
+ * If the required space is larger than the available
* aperture, we will not able to find a slot for the
* object and unbinding the object now will be in
* vain. Worse, doing so may cause us to ping-pong
* the object in and out of the Global GTT and
* waste a lot of cycles under the mutex.
*/
- if (obj->base.size > dev_priv->ggtt.mappable_end)
+ if (obj->base.size > ggtt->mappable_end)
return ERR_PTR(-E2BIG);
- /* If NONBLOCK is set the caller is optimistically
+ /*
+ * If NONBLOCK is set the caller is optimistically
* trying to cache the full object within the mappable
* aperture, and *must* have a fallback in place for
* situations where we cannot bind the object. We
@@ -1058,11 +974,11 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
* we could try to minimise harm to others.
*/
if (flags & PIN_NONBLOCK &&
- obj->base.size > dev_priv->ggtt.mappable_end / 2)
+ obj->base.size > ggtt->mappable_end / 2)
return ERR_PTR(-ENOSPC);
}
- vma = i915_vma_instance(obj, vm, view);
+ vma = i915_vma_instance(obj, &ggtt->vm, view);
if (IS_ERR(vma))
return vma;
@@ -1072,22 +988,23 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
return ERR_PTR(-ENOSPC);
if (flags & PIN_MAPPABLE &&
- vma->fence_size > dev_priv->ggtt.mappable_end / 2)
+ vma->fence_size > ggtt->mappable_end / 2)
return ERR_PTR(-ENOSPC);
}
- WARN(i915_vma_is_pinned(vma),
- "bo is already pinned in ggtt with incorrect alignment:"
- " offset=%08x, req.alignment=%llx,"
- " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n",
- i915_ggtt_offset(vma), alignment,
- !!(flags & PIN_MAPPABLE),
- i915_vma_is_map_and_fenceable(vma));
ret = i915_vma_unbind(vma);
if (ret)
return ERR_PTR(ret);
}
+ if (vma->fence && !i915_gem_object_is_tiled(obj)) {
+ mutex_lock(&ggtt->vm.mutex);
+ ret = i915_vma_revoke_fence(vma);
+ mutex_unlock(&ggtt->vm.mutex);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
if (ret)
return ERR_PTR(ret);
@@ -1169,333 +1086,6 @@ out:
return err;
}
-void i915_gem_sanitize(struct drm_i915_private *i915)
-{
- intel_wakeref_t wakeref;
-
- GEM_TRACE("\n");
-
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
-
- /*
- * As we have just resumed the machine and woken the device up from
- * deep PCI sleep (presumably D3_cold), assume the HW has been reset
- * back to defaults, recovering from whatever wedged state we left it
- * in and so worth trying to use the device once more.
- */
- if (i915_terminally_wedged(i915))
- i915_gem_unset_wedged(i915);
-
- /*
- * If we inherit context state from the BIOS or earlier occupants
- * of the GPU, the GPU may be in an inconsistent state when we
- * try to take over. The only way to remove the earlier state
- * is by resetting. However, resetting on earlier gen is tricky as
- * it may impact the display and we are uncertain about the stability
- * of the reset, so this could be applied to even earlier gen.
- */
- intel_gt_sanitize(i915, false);
-
- intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
-}
-
-void i915_gem_init_swizzling(struct drm_i915_private *dev_priv)
-{
- if (INTEL_GEN(dev_priv) < 5 ||
- dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
- return;
-
- I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
- DISP_TILE_SURFACE_SWIZZLING);
-
- if (IS_GEN(dev_priv, 5))
- return;
-
- I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
- if (IS_GEN(dev_priv, 6))
- I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
- else if (IS_GEN(dev_priv, 7))
- I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
- else if (IS_GEN(dev_priv, 8))
- I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
- else
- BUG();
-}
-
-static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base)
-{
- I915_WRITE(RING_CTL(base), 0);
- I915_WRITE(RING_HEAD(base), 0);
- I915_WRITE(RING_TAIL(base), 0);
- I915_WRITE(RING_START(base), 0);
-}
-
-static void init_unused_rings(struct drm_i915_private *dev_priv)
-{
- if (IS_I830(dev_priv)) {
- init_unused_ring(dev_priv, PRB1_BASE);
- init_unused_ring(dev_priv, SRB0_BASE);
- init_unused_ring(dev_priv, SRB1_BASE);
- init_unused_ring(dev_priv, SRB2_BASE);
- init_unused_ring(dev_priv, SRB3_BASE);
- } else if (IS_GEN(dev_priv, 2)) {
- init_unused_ring(dev_priv, SRB0_BASE);
- init_unused_ring(dev_priv, SRB1_BASE);
- } else if (IS_GEN(dev_priv, 3)) {
- init_unused_ring(dev_priv, PRB1_BASE);
- init_unused_ring(dev_priv, PRB2_BASE);
- }
-}
-
-int i915_gem_init_hw(struct drm_i915_private *dev_priv)
-{
- int ret;
-
- dev_priv->gt.last_init_time = ktime_get();
-
- /* Double layer security blanket, see i915_gem_init() */
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
- if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9)
- I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
-
- if (IS_HASWELL(dev_priv))
- I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ?
- LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
-
- /* Apply the GT workarounds... */
- intel_gt_apply_workarounds(dev_priv);
- /* ...and determine whether they are sticking. */
- intel_gt_verify_workarounds(dev_priv, "init");
-
- i915_gem_init_swizzling(dev_priv);
-
- /*
- * At least 830 can leave some of the unused rings
- * "active" (ie. head != tail) after resume which
- * will prevent c3 entry. Makes sure all unused rings
- * are totally idle.
- */
- init_unused_rings(dev_priv);
-
- BUG_ON(!dev_priv->kernel_context);
- ret = i915_terminally_wedged(dev_priv);
- if (ret)
- goto out;
-
- ret = i915_ppgtt_init_hw(dev_priv);
- if (ret) {
- DRM_ERROR("Enabling PPGTT failed (%d)\n", ret);
- goto out;
- }
-
- ret = intel_wopcm_init_hw(&dev_priv->wopcm);
- if (ret) {
- DRM_ERROR("Enabling WOPCM failed (%d)\n", ret);
- goto out;
- }
-
- /* We can't enable contexts until all firmware is loaded */
- ret = intel_uc_init_hw(dev_priv);
- if (ret) {
- DRM_ERROR("Enabling uc failed (%d)\n", ret);
- goto out;
- }
-
- intel_mocs_init_l3cc_table(dev_priv);
-
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-
- intel_engines_set_scheduler_caps(dev_priv);
- return 0;
-
-out:
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
- return ret;
-}
-
-static int __intel_engines_record_defaults(struct drm_i915_private *i915)
-{
- struct intel_engine_cs *engine;
- struct i915_gem_context *ctx;
- struct i915_gem_engines *e;
- enum intel_engine_id id;
- int err = 0;
-
- /*
- * As we reset the gpu during very early sanitisation, the current
- * register state on the GPU should reflect its defaults values.
- * We load a context onto the hw (with restore-inhibit), then switch
- * over to a second context to save that default register state. We
- * can then prime every new context with that state so they all start
- * from the same default HW values.
- */
-
- ctx = i915_gem_context_create_kernel(i915, 0);
- if (IS_ERR(ctx))
- return PTR_ERR(ctx);
-
- e = i915_gem_context_lock_engines(ctx);
-
- for_each_engine(engine, i915, id) {
- struct intel_context *ce = e->engines[id];
- struct i915_request *rq;
-
- rq = intel_context_create_request(ce);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto err_active;
- }
-
- err = 0;
- if (rq->engine->init_context)
- err = rq->engine->init_context(rq);
-
- i915_request_add(rq);
- if (err)
- goto err_active;
- }
-
- /* Flush the default context image to memory, and enable powersaving. */
- if (!i915_gem_load_power_context(i915)) {
- err = -EIO;
- goto err_active;
- }
-
- for_each_engine(engine, i915, id) {
- struct intel_context *ce = e->engines[id];
- struct i915_vma *state = ce->state;
- void *vaddr;
-
- if (!state)
- continue;
-
- GEM_BUG_ON(intel_context_is_pinned(ce));
-
- /*
- * As we will hold a reference to the logical state, it will
- * not be torn down with the context, and importantly the
- * object will hold onto its vma (making it possible for a
- * stray GTT write to corrupt our defaults). Unmap the vma
- * from the GTT to prevent such accidents and reclaim the
- * space.
- */
- err = i915_vma_unbind(state);
- if (err)
- goto err_active;
-
- i915_gem_object_lock(state->obj);
- err = i915_gem_object_set_to_cpu_domain(state->obj, false);
- i915_gem_object_unlock(state->obj);
- if (err)
- goto err_active;
-
- engine->default_state = i915_gem_object_get(state->obj);
- i915_gem_object_set_cache_coherency(engine->default_state,
- I915_CACHE_LLC);
-
- /* Check we can acquire the image of the context state */
- vaddr = i915_gem_object_pin_map(engine->default_state,
- I915_MAP_FORCE_WB);
- if (IS_ERR(vaddr)) {
- err = PTR_ERR(vaddr);
- goto err_active;
- }
-
- i915_gem_object_unpin_map(engine->default_state);
- }
-
- if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
- unsigned int found = intel_engines_has_context_isolation(i915);
-
- /*
- * Make sure that classes with multiple engine instances all
- * share the same basic configuration.
- */
- for_each_engine(engine, i915, id) {
- unsigned int bit = BIT(engine->uabi_class);
- unsigned int expected = engine->default_state ? bit : 0;
-
- if ((found & bit) != expected) {
- DRM_ERROR("mismatching default context state for class %d on engine %s\n",
- engine->uabi_class, engine->name);
- }
- }
- }
-
-out_ctx:
- i915_gem_context_unlock_engines(ctx);
- i915_gem_context_set_closed(ctx);
- i915_gem_context_put(ctx);
- return err;
-
-err_active:
- /*
- * If we have to abandon now, we expect the engines to be idle
- * and ready to be torn-down. The quickest way we can accomplish
- * this is by declaring ourselves wedged.
- */
- i915_gem_set_wedged(i915);
- goto out_ctx;
-}
-
-static int
-i915_gem_init_scratch(struct drm_i915_private *i915, unsigned int size)
-{
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- int ret;
-
- obj = i915_gem_object_create_stolen(i915, size);
- if (!obj)
- obj = i915_gem_object_create_internal(i915, size);
- if (IS_ERR(obj)) {
- DRM_ERROR("Failed to allocate scratch page\n");
- return PTR_ERR(obj);
- }
-
- vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
- if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
- goto err_unref;
- }
-
- ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
- if (ret)
- goto err_unref;
-
- i915->gt.scratch = vma;
- return 0;
-
-err_unref:
- i915_gem_object_put(obj);
- return ret;
-}
-
-static void i915_gem_fini_scratch(struct drm_i915_private *i915)
-{
- i915_vma_unpin_and_release(&i915->gt.scratch, 0);
-}
-
-static int intel_engines_verify_workarounds(struct drm_i915_private *i915)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int err = 0;
-
- if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
- return 0;
-
- for_each_engine(engine, i915, id) {
- if (intel_engine_verify_workarounds(engine, "load"))
- err = -EIO;
- }
-
- return err;
-}
-
int i915_gem_init(struct drm_i915_private *dev_priv)
{
int ret;
@@ -1505,77 +1095,19 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
mkwrite_device_info(dev_priv)->page_sizes =
I915_GTT_PAGE_SIZE_4K;
- dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
-
- i915_timelines_init(dev_priv);
-
ret = i915_gem_init_userptr(dev_priv);
if (ret)
return ret;
- ret = intel_uc_init_misc(dev_priv);
- if (ret)
- return ret;
+ intel_uc_fetch_firmwares(&dev_priv->gt.uc);
+ intel_wopcm_init(&dev_priv->wopcm);
- ret = intel_wopcm_init(&dev_priv->wopcm);
- if (ret)
- goto err_uc_misc;
-
- /* This is just a security blanket to placate dragons.
- * On some systems, we very sporadically observe that the first TLBs
- * used by the CS may be stale, despite us poking the TLB reset. If
- * we hold the forcewake during initialisation these problems
- * just magically go away.
- */
- mutex_lock(&dev_priv->drm.struct_mutex);
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
- ret = i915_gem_init_ggtt(dev_priv);
+ ret = i915_init_ggtt(dev_priv);
if (ret) {
GEM_BUG_ON(ret == -EIO);
goto err_unlock;
}
- ret = i915_gem_init_scratch(dev_priv,
- IS_GEN(dev_priv, 2) ? SZ_256K : PAGE_SIZE);
- if (ret) {
- GEM_BUG_ON(ret == -EIO);
- goto err_ggtt;
- }
-
- ret = intel_engines_setup(dev_priv);
- if (ret) {
- GEM_BUG_ON(ret == -EIO);
- goto err_unlock;
- }
-
- ret = i915_gem_contexts_init(dev_priv);
- if (ret) {
- GEM_BUG_ON(ret == -EIO);
- goto err_scratch;
- }
-
- ret = intel_engines_init(dev_priv);
- if (ret) {
- GEM_BUG_ON(ret == -EIO);
- goto err_context;
- }
-
- intel_init_gt_powersave(dev_priv);
-
- ret = intel_uc_init(dev_priv);
- if (ret)
- goto err_pm;
-
- ret = i915_gem_init_hw(dev_priv);
- if (ret)
- goto err_uc_init;
-
- /* Only when the HW is re-initialised, can we replay the requests */
- ret = intel_gt_resume(dev_priv);
- if (ret)
- goto err_init_hw;
-
/*
* Despite its name intel_init_clock_gating applies both display
* clock gating workarounds; GT mmio workarounds and the occasional
@@ -1587,26 +1119,9 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
*/
intel_init_clock_gating(dev_priv);
- ret = intel_engines_verify_workarounds(dev_priv);
+ ret = intel_gt_init(&dev_priv->gt);
if (ret)
- goto err_gt;
-
- ret = __intel_engines_record_defaults(dev_priv);
- if (ret)
- goto err_gt;
-
- if (i915_inject_load_failure()) {
- ret = -ENODEV;
- goto err_gt;
- }
-
- if (i915_inject_load_failure()) {
- ret = -EIO;
- goto err_gt;
- }
-
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ goto err_unlock;
return 0;
@@ -1616,120 +1131,82 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
* HW as irrevisibly wedged, but keep enough state around that the
* driver doesn't explode during runtime.
*/
-err_gt:
- mutex_unlock(&dev_priv->drm.struct_mutex);
-
- i915_gem_set_wedged(dev_priv);
- i915_gem_suspend(dev_priv);
- i915_gem_suspend_late(dev_priv);
-
- i915_gem_drain_workqueue(dev_priv);
-
- mutex_lock(&dev_priv->drm.struct_mutex);
-err_init_hw:
- intel_uc_fini_hw(dev_priv);
-err_uc_init:
- intel_uc_fini(dev_priv);
-err_pm:
- if (ret != -EIO) {
- intel_cleanup_gt_powersave(dev_priv);
- intel_engines_cleanup(dev_priv);
- }
-err_context:
- if (ret != -EIO)
- i915_gem_contexts_fini(dev_priv);
-err_scratch:
- i915_gem_fini_scratch(dev_priv);
-err_ggtt:
err_unlock:
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
- mutex_unlock(&dev_priv->drm.struct_mutex);
-
-err_uc_misc:
- intel_uc_fini_misc(dev_priv);
+ i915_gem_drain_workqueue(dev_priv);
if (ret != -EIO) {
+ intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
i915_gem_cleanup_userptr(dev_priv);
- i915_timelines_fini(dev_priv);
}
if (ret == -EIO) {
- mutex_lock(&dev_priv->drm.struct_mutex);
-
/*
- * Allow engine initialisation to fail by marking the GPU as
- * wedged. But we only want to do this where the GPU is angry,
+ * Allow engines or uC initialisation to fail by marking the GPU
+ * as wedged. But we only want to do this when the GPU is angry,
* for all other failure, such as an allocation failure, bail.
*/
- if (!i915_reset_failed(dev_priv)) {
- i915_load_error(dev_priv,
- "Failed to initialize GPU, declaring it wedged!\n");
- i915_gem_set_wedged(dev_priv);
+ if (!intel_gt_is_wedged(&dev_priv->gt)) {
+ i915_probe_error(dev_priv,
+ "Failed to initialize GPU, declaring it wedged!\n");
+ intel_gt_set_wedged(&dev_priv->gt);
}
/* Minimal basic recovery for KMS */
ret = i915_ggtt_enable_hw(dev_priv);
i915_gem_restore_gtt_mappings(dev_priv);
- i915_gem_restore_fences(dev_priv);
+ i915_gem_restore_fences(&dev_priv->ggtt);
intel_init_clock_gating(dev_priv);
-
- mutex_unlock(&dev_priv->drm.struct_mutex);
}
i915_gem_drain_freed_objects(dev_priv);
return ret;
}
-void i915_gem_fini_hw(struct drm_i915_private *dev_priv)
+void i915_gem_driver_register(struct drm_i915_private *i915)
{
- GEM_BUG_ON(dev_priv->gt.awake);
+ i915_gem_driver_register__shrinker(i915);
+
+ intel_engines_driver_register(i915);
+}
+
+void i915_gem_driver_unregister(struct drm_i915_private *i915)
+{
+ i915_gem_driver_unregister__shrinker(i915);
+}
+void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
+{
intel_wakeref_auto_fini(&dev_priv->ggtt.userfault_wakeref);
i915_gem_suspend_late(dev_priv);
- intel_disable_gt_powersave(dev_priv);
+ intel_gt_driver_remove(&dev_priv->gt);
+ dev_priv->uabi_engines = RB_ROOT;
/* Flush any outstanding unpin_work. */
i915_gem_drain_workqueue(dev_priv);
- mutex_lock(&dev_priv->drm.struct_mutex);
- intel_uc_fini_hw(dev_priv);
- intel_uc_fini(dev_priv);
- mutex_unlock(&dev_priv->drm.struct_mutex);
-
i915_gem_drain_freed_objects(dev_priv);
}
-void i915_gem_fini(struct drm_i915_private *dev_priv)
+void i915_gem_driver_release(struct drm_i915_private *dev_priv)
{
- mutex_lock(&dev_priv->drm.struct_mutex);
- intel_engines_cleanup(dev_priv);
- i915_gem_contexts_fini(dev_priv);
- i915_gem_fini_scratch(dev_priv);
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ i915_gem_driver_release__contexts(dev_priv);
- intel_wa_list_free(&dev_priv->gt_wa_list);
+ intel_gt_driver_release(&dev_priv->gt);
- intel_cleanup_gt_powersave(dev_priv);
+ intel_wa_list_free(&dev_priv->gt_wa_list);
- intel_uc_fini_misc(dev_priv);
+ intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
i915_gem_cleanup_userptr(dev_priv);
- i915_timelines_fini(dev_priv);
i915_gem_drain_freed_objects(dev_priv);
- WARN_ON(!list_empty(&dev_priv->contexts.list));
-}
-
-void i915_gem_init_mmio(struct drm_i915_private *i915)
-{
- i915_gem_sanitize(i915);
+ WARN_ON(!list_empty(&dev_priv->gem.contexts.list));
}
static void i915_gem_init__mm(struct drm_i915_private *i915)
{
spin_lock_init(&i915->mm.obj_lock);
- spin_lock_init(&i915->mm.free_lock);
init_llist_head(&i915->mm.free_list);
@@ -1739,33 +1216,12 @@ static void i915_gem_init__mm(struct drm_i915_private *i915)
i915_gem_init__objects(i915);
}
-int i915_gem_init_early(struct drm_i915_private *dev_priv)
+void i915_gem_init_early(struct drm_i915_private *dev_priv)
{
- int err;
-
- intel_gt_pm_init(dev_priv);
-
- INIT_LIST_HEAD(&dev_priv->gt.active_rings);
- INIT_LIST_HEAD(&dev_priv->gt.closed_vma);
- spin_lock_init(&dev_priv->gt.closed_lock);
-
i915_gem_init__mm(dev_priv);
- i915_gem_init__pm(dev_priv);
-
- init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
- init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
- mutex_init(&dev_priv->gpu_error.wedge_mutex);
- init_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu);
-
- atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
+ i915_gem_init__contexts(dev_priv);
spin_lock_init(&dev_priv->fb_tracking.lock);
-
- err = i915_gemfs_init(dev_priv);
- if (err)
- DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", err);
-
- return 0;
}
void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
@@ -1774,10 +1230,6 @@ void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
WARN_ON(dev_priv->mm.shrink_count);
-
- cleanup_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu);
-
- i915_gemfs_fini(dev_priv);
}
int i915_gem_freeze(struct drm_i915_private *dev_priv)
@@ -1869,39 +1321,6 @@ int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
return ret;
}
-/**
- * i915_gem_track_fb - update frontbuffer tracking
- * @old: current GEM buffer for the frontbuffer slots
- * @new: new GEM buffer for the frontbuffer slots
- * @frontbuffer_bits: bitmask of frontbuffer slots
- *
- * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
- * from @old and setting them in @new. Both @old and @new can be NULL.
- */
-void i915_gem_track_fb(struct drm_i915_gem_object *old,
- struct drm_i915_gem_object *new,
- unsigned frontbuffer_bits)
-{
- /* Control of individual bits within the mask are guarded by
- * the owning plane->mutex, i.e. we can never see concurrent
- * manipulation of individual bits. But since the bitfield as a whole
- * is updated using RMW, we need to use atomics in order to update
- * the bits.
- */
- BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
- BITS_PER_TYPE(atomic_t));
-
- if (old) {
- WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
- atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits);
- }
-
- if (new) {
- WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits);
- atomic_or(frontbuffer_bits, &new->frontbuffer_bits);
- }
-}
-
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_gem_device.c"
#include "selftests/i915_gem.c"
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index fe82d3571072..1753c84d6c0d 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -28,17 +28,20 @@
#include <linux/bug.h>
#include <linux/interrupt.h>
+#include <drm/drm_drv.h>
+
+#include "i915_utils.h"
+
struct drm_i915_private;
#ifdef CONFIG_DRM_I915_DEBUG_GEM
-#define GEM_SHOW_DEBUG() (drm_debug & DRM_UT_DRIVER)
+#define GEM_SHOW_DEBUG() drm_debug_enabled(DRM_UT_DRIVER)
#define GEM_BUG_ON(condition) do { if (unlikely((condition))) { \
- pr_err("%s:%d GEM_BUG_ON(%s)\n", \
- __func__, __LINE__, __stringify(condition)); \
- GEM_TRACE("%s:%d GEM_BUG_ON(%s)\n", \
- __func__, __LINE__, __stringify(condition)); \
+ GEM_TRACE_ERR("%s:%d GEM_BUG_ON(%s)\n", \
+ __func__, __LINE__, __stringify(condition)); \
+ GEM_TRACE_DUMP(); \
BUG(); \
} \
} while(0)
@@ -64,17 +67,34 @@ struct drm_i915_private;
#if IS_ENABLED(CONFIG_DRM_I915_TRACE_GEM)
#define GEM_TRACE(...) trace_printk(__VA_ARGS__)
-#define GEM_TRACE_DUMP() ftrace_dump(DUMP_ALL)
+#define GEM_TRACE_ERR(...) do { \
+ pr_err(__VA_ARGS__); \
+ trace_printk(__VA_ARGS__); \
+} while (0)
+#define GEM_TRACE_DUMP() \
+ do { ftrace_dump(DUMP_ALL); add_taint_for_CI(TAINT_WARN); } while (0)
#define GEM_TRACE_DUMP_ON(expr) \
- do { if (expr) ftrace_dump(DUMP_ALL); } while (0)
+ do { if (expr) GEM_TRACE_DUMP(); } while (0)
#else
#define GEM_TRACE(...) do { } while (0)
+#define GEM_TRACE_ERR(...) do { } while (0)
#define GEM_TRACE_DUMP() do { } while (0)
#define GEM_TRACE_DUMP_ON(expr) BUILD_BUG_ON_INVALID(expr)
#endif
#define I915_GEM_IDLE_TIMEOUT (HZ / 5)
+static inline void tasklet_lock(struct tasklet_struct *t)
+{
+ while (!tasklet_trylock(t))
+ cpu_relax();
+}
+
+static inline bool tasklet_is_locked(const struct tasklet_struct *t)
+{
+ return test_bit(TASKLET_STATE_RUN, &t->state);
+}
+
static inline void __tasklet_disable_sync_once(struct tasklet_struct *t)
{
if (!atomic_fetch_inc(&t->count))
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.c b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
deleted file mode 100644
index 25a3e4d09a2f..000000000000
--- a/drivers/gpu/drm/i915/i915_gem_batch_pool.c
+++ /dev/null
@@ -1,140 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2014-2018 Intel Corporation
- */
-
-#include "i915_gem_batch_pool.h"
-#include "i915_drv.h"
-
-/**
- * DOC: batch pool
- *
- * In order to submit batch buffers as 'secure', the software command parser
- * must ensure that a batch buffer cannot be modified after parsing. It does
- * this by copying the user provided batch buffer contents to a kernel owned
- * buffer from which the hardware will actually execute, and by carefully
- * managing the address space bindings for such buffers.
- *
- * The batch pool framework provides a mechanism for the driver to manage a
- * set of scratch buffers to use for this purpose. The framework can be
- * extended to support other uses cases should they arise.
- */
-
-/**
- * i915_gem_batch_pool_init() - initialize a batch buffer pool
- * @pool: the batch buffer pool
- * @engine: the associated request submission engine
- */
-void i915_gem_batch_pool_init(struct i915_gem_batch_pool *pool,
- struct intel_engine_cs *engine)
-{
- int n;
-
- pool->engine = engine;
-
- for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
- INIT_LIST_HEAD(&pool->cache_list[n]);
-}
-
-/**
- * i915_gem_batch_pool_fini() - clean up a batch buffer pool
- * @pool: the pool to clean up
- *
- * Note: Callers must hold the struct_mutex.
- */
-void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool)
-{
- int n;
-
- lockdep_assert_held(&pool->engine->i915->drm.struct_mutex);
-
- for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
- struct drm_i915_gem_object *obj, *next;
-
- list_for_each_entry_safe(obj, next,
- &pool->cache_list[n],
- batch_pool_link)
- i915_gem_object_put(obj);
-
- INIT_LIST_HEAD(&pool->cache_list[n]);
- }
-}
-
-/**
- * i915_gem_batch_pool_get() - allocate a buffer from the pool
- * @pool: the batch buffer pool
- * @size: the minimum desired size of the returned buffer
- *
- * Returns an inactive buffer from @pool with at least @size bytes,
- * with the pages pinned. The caller must i915_gem_object_unpin_pages()
- * on the returned object.
- *
- * Note: Callers must hold the struct_mutex
- *
- * Return: the buffer object or an error pointer
- */
-struct drm_i915_gem_object *
-i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
- size_t size)
-{
- struct drm_i915_gem_object *obj;
- struct list_head *list;
- int n, ret;
-
- lockdep_assert_held(&pool->engine->i915->drm.struct_mutex);
-
- /* Compute a power-of-two bucket, but throw everything greater than
- * 16KiB into the same bucket: i.e. the the buckets hold objects of
- * (1 page, 2 pages, 4 pages, 8+ pages).
- */
- n = fls(size >> PAGE_SHIFT) - 1;
- if (n >= ARRAY_SIZE(pool->cache_list))
- n = ARRAY_SIZE(pool->cache_list) - 1;
- list = &pool->cache_list[n];
-
- list_for_each_entry(obj, list, batch_pool_link) {
- /* The batches are strictly LRU ordered */
- if (i915_gem_object_is_active(obj)) {
- struct reservation_object *resv = obj->base.resv;
-
- if (!reservation_object_test_signaled_rcu(resv, true))
- break;
-
- i915_retire_requests(pool->engine->i915);
- GEM_BUG_ON(i915_gem_object_is_active(obj));
-
- /*
- * The object is now idle, clear the array of shared
- * fences before we add a new request. Although, we
- * remain on the same engine, we may be on a different
- * timeline and so may continually grow the array,
- * trapping a reference to all the old fences, rather
- * than replace the existing fence.
- */
- if (rcu_access_pointer(resv->fence)) {
- reservation_object_lock(resv, NULL);
- reservation_object_add_excl_fence(resv, NULL);
- reservation_object_unlock(resv);
- }
- }
-
- GEM_BUG_ON(!reservation_object_test_signaled_rcu(obj->base.resv,
- true));
-
- if (obj->base.size >= size)
- goto found;
- }
-
- obj = i915_gem_object_create_internal(pool->engine->i915, size);
- if (IS_ERR(obj))
- return obj;
-
-found:
- ret = i915_gem_object_pin_pages(obj);
- if (ret)
- return ERR_PTR(ret);
-
- list_move_tail(&obj->batch_pool_link, list);
- return obj;
-}
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.h b/drivers/gpu/drm/i915/i915_gem_batch_pool.h
deleted file mode 100644
index feeeeeaa54d8..000000000000
--- a/drivers/gpu/drm/i915/i915_gem_batch_pool.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2014-2018 Intel Corporation
- */
-
-#ifndef I915_GEM_BATCH_POOL_H
-#define I915_GEM_BATCH_POOL_H
-
-#include <linux/types.h>
-
-struct drm_i915_gem_object;
-struct intel_engine_cs;
-
-struct i915_gem_batch_pool {
- struct intel_engine_cs *engine;
- struct list_head cache_list[4];
-};
-
-void i915_gem_batch_pool_init(struct i915_gem_batch_pool *pool,
- struct intel_engine_cs *engine);
-void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool);
-struct drm_i915_gem_object *
-i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, size_t size);
-
-#endif /* I915_GEM_BATCH_POOL_H */
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index a5783c4cb98b..0697bedebeef 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -29,16 +29,16 @@
#include <drm/i915_drm.h>
#include "gem/i915_gem_context.h"
+#include "gt/intel_gt_requests.h"
#include "i915_drv.h"
-#include "intel_drv.h"
#include "i915_trace.h"
I915_SELFTEST_DECLARE(static struct igt_evict_ctl {
bool fail_if_busy:1;
} igt_evict_ctl;)
-static int ggtt_flush(struct drm_i915_private *i915)
+static int ggtt_flush(struct intel_gt *gt)
{
/*
* Not everything in the GGTT is tracked via vma (otherwise we
@@ -47,10 +47,7 @@ static int ggtt_flush(struct drm_i915_private *i915)
* the hopes that we can then remove contexts and the like only
* bound by their active reference.
*/
- return i915_gem_wait_for_idle(i915,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
+ return intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
}
static bool
@@ -62,9 +59,6 @@ mark_free(struct drm_mm_scan *scan,
if (i915_vma_is_pinned(vma))
return false;
- if (flags & PIN_NONFAULT && i915_vma_has_userfault(vma))
- return false;
-
list_add(&vma->evict_link, unwind);
return drm_mm_scan_add_block(scan, &vma->node);
}
@@ -74,7 +68,7 @@ mark_free(struct drm_mm_scan *scan,
* @vm: address space to evict from
* @min_size: size of the desired free space
* @alignment: alignment constraint of the desired free space
- * @cache_level: cache_level for the desired space
+ * @color: color for the desired space
* @start: start (inclusive) of the range from which to evict objects
* @end: end (exclusive) of the range from which to evict objects
* @flags: additional flags to control the eviction algorithm
@@ -95,11 +89,10 @@ mark_free(struct drm_mm_scan *scan,
int
i915_gem_evict_something(struct i915_address_space *vm,
u64 min_size, u64 alignment,
- unsigned cache_level,
+ unsigned long color,
u64 start, u64 end,
unsigned flags)
{
- struct drm_i915_private *dev_priv = vm->i915;
struct drm_mm_scan scan;
struct list_head eviction_list;
struct i915_vma *vma, *next;
@@ -108,7 +101,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
struct i915_vma *active;
int ret;
- lockdep_assert_held(&vm->i915->drm.struct_mutex);
+ lockdep_assert_held(&vm->mutex);
trace_i915_gem_evict(vm, min_size, alignment, flags);
/*
@@ -128,17 +121,10 @@ i915_gem_evict_something(struct i915_address_space *vm,
if (flags & PIN_MAPPABLE)
mode = DRM_MM_INSERT_LOW;
drm_mm_scan_init_with_range(&scan, &vm->mm,
- min_size, alignment, cache_level,
+ min_size, alignment, color,
start, end, mode);
- /*
- * Retire before we search the active list. Although we have
- * reasonable accuracy in our retirement lists, we may have
- * a stray pin (preventing eviction) that can only be resolved by
- * retiring.
- */
- if (!(flags & PIN_NONBLOCK))
- i915_retire_requests(dev_priv);
+ intel_gt_retire_requests(vm->gt);
search_again:
active = NULL;
@@ -211,7 +197,7 @@ search_again:
if (I915_SELFTEST_ONLY(igt_evict_ctl.fail_if_busy))
return -EBUSY;
- ret = ggtt_flush(dev_priv);
+ ret = ggtt_flush(vm->gt);
if (ret)
return ret;
@@ -239,12 +225,12 @@ found:
list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
__i915_vma_unpin(vma);
if (ret == 0)
- ret = i915_vma_unbind(vma);
+ ret = __i915_vma_unbind(vma);
}
while (ret == 0 && (node = drm_mm_scan_color_evict(&scan))) {
vma = container_of(node, struct i915_vma, node);
- ret = i915_vma_unbind(vma);
+ ret = __i915_vma_unbind(vma);
}
return ret;
@@ -270,25 +256,23 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
u64 start = target->start;
u64 end = start + target->size;
struct i915_vma *vma, *next;
- bool check_color;
int ret = 0;
- lockdep_assert_held(&vm->i915->drm.struct_mutex);
+ lockdep_assert_held(&vm->mutex);
GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
trace_i915_gem_evict_node(vm, target, flags);
- /* Retire before we search the active list. Although we have
+ /*
+ * Retire before we search the active list. Although we have
* reasonable accuracy in our retirement lists, we may have
* a stray pin (preventing eviction) that can only be resolved by
* retiring.
*/
- if (!(flags & PIN_NONBLOCK))
- i915_retire_requests(vm->i915);
+ intel_gt_retire_requests(vm->gt);
- check_color = vm->mm.color_adjust;
- if (check_color) {
+ if (i915_vm_has_cache_coloring(vm)) {
/* Expand search to cover neighbouring guard pages (or lack!) */
if (start)
start -= I915_GTT_PAGE_SIZE;
@@ -305,7 +289,7 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
break;
}
- GEM_BUG_ON(!node->allocated);
+ GEM_BUG_ON(!drm_mm_node_allocated(node));
vma = container_of(node, typeof(*vma), node);
/* If we are using coloring to insert guard pages between
@@ -314,7 +298,7 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
* abutt and conflict. If they are in conflict, then we evict
* those as well to make room for our guard pages.
*/
- if (check_color) {
+ if (i915_vm_has_cache_coloring(vm)) {
if (node->start + node->size == target->start) {
if (node->color == target->color)
continue;
@@ -331,11 +315,6 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
break;
}
- if (flags & PIN_NONFAULT && i915_vma_has_userfault(vma)) {
- ret = -ENOSPC;
- break;
- }
-
/* Overlap of objects in the same batch? */
if (i915_vma_is_pinned(vma)) {
ret = -ENOSPC;
@@ -360,7 +339,7 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
__i915_vma_unpin(vma);
if (ret == 0)
- ret = i915_vma_unbind(vma);
+ ret = __i915_vma_unbind(vma);
}
return ret;
@@ -380,11 +359,9 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
*/
int i915_gem_evict_vm(struct i915_address_space *vm)
{
- struct list_head eviction_list;
- struct i915_vma *vma, *next;
- int ret;
+ int ret = 0;
- lockdep_assert_held(&vm->i915->drm.struct_mutex);
+ lockdep_assert_held(&vm->mutex);
trace_i915_gem_evict_vm(vm);
/* Switch back to the default context in order to unpin
@@ -393,28 +370,35 @@ int i915_gem_evict_vm(struct i915_address_space *vm)
* switch otherwise is ineffective.
*/
if (i915_is_ggtt(vm)) {
- ret = ggtt_flush(vm->i915);
+ ret = ggtt_flush(vm->gt);
if (ret)
return ret;
}
- INIT_LIST_HEAD(&eviction_list);
- mutex_lock(&vm->mutex);
- list_for_each_entry(vma, &vm->bound_list, vm_link) {
- if (i915_vma_is_pinned(vma))
- continue;
+ do {
+ struct i915_vma *vma, *vn;
+ LIST_HEAD(eviction_list);
- __i915_vma_pin(vma);
- list_add(&vma->evict_link, &eviction_list);
- }
- mutex_unlock(&vm->mutex);
+ list_for_each_entry(vma, &vm->bound_list, vm_link) {
+ if (i915_vma_is_pinned(vma))
+ continue;
+
+ __i915_vma_pin(vma);
+ list_add(&vma->evict_link, &eviction_list);
+ }
+ if (list_empty(&eviction_list))
+ break;
+
+ ret = 0;
+ list_for_each_entry_safe(vma, vn, &eviction_list, evict_link) {
+ __i915_vma_unpin(vma);
+ if (ret == 0)
+ ret = __i915_vma_unbind(vma);
+ if (ret != -EINTR) /* "Get me out of here!" */
+ ret = 0;
+ }
+ } while (ret == 0);
- ret = 0;
- list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
- __i915_vma_unpin(vma);
- if (ret == 0)
- ret = i915_vma_unbind(vma);
- }
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
index 0bf53ac1c835..d9c34a23cd67 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
@@ -59,6 +59,16 @@
#define pipelined 0
+static struct drm_i915_private *fence_to_i915(struct i915_fence_reg *fence)
+{
+ return fence->ggtt->vm.i915;
+}
+
+static struct intel_uncore *fence_to_uncore(struct i915_fence_reg *fence)
+{
+ return fence->ggtt->vm.gt->uncore;
+}
+
static void i965_write_fence_reg(struct i915_fence_reg *fence,
struct i915_vma *vma)
{
@@ -66,7 +76,7 @@ static void i965_write_fence_reg(struct i915_fence_reg *fence,
int fence_pitch_shift;
u64 val;
- if (INTEL_GEN(fence->i915) >= 6) {
+ if (INTEL_GEN(fence_to_i915(fence)) >= 6) {
fence_reg_lo = FENCE_REG_GEN6_LO(fence->id);
fence_reg_hi = FENCE_REG_GEN6_HI(fence->id);
fence_pitch_shift = GEN6_FENCE_PITCH_SHIFT;
@@ -95,7 +105,7 @@ static void i965_write_fence_reg(struct i915_fence_reg *fence,
}
if (!pipelined) {
- struct intel_uncore *uncore = &fence->i915->uncore;
+ struct intel_uncore *uncore = fence_to_uncore(fence);
/*
* To w/a incoherency with non-atomic 64-bit register updates,
@@ -132,7 +142,7 @@ static void i915_write_fence_reg(struct i915_fence_reg *fence,
GEM_BUG_ON(!is_power_of_2(vma->fence_size));
GEM_BUG_ON(!IS_ALIGNED(vma->node.start, vma->fence_size));
- if (is_y_tiled && HAS_128_BYTE_Y_TILING(fence->i915))
+ if (is_y_tiled && HAS_128_BYTE_Y_TILING(fence_to_i915(fence)))
stride /= 128;
else
stride /= 512;
@@ -148,7 +158,7 @@ static void i915_write_fence_reg(struct i915_fence_reg *fence,
}
if (!pipelined) {
- struct intel_uncore *uncore = &fence->i915->uncore;
+ struct intel_uncore *uncore = fence_to_uncore(fence);
i915_reg_t reg = FENCE_REG(fence->id);
intel_uncore_write_fw(uncore, reg, val);
@@ -180,7 +190,7 @@ static void i830_write_fence_reg(struct i915_fence_reg *fence,
}
if (!pipelined) {
- struct intel_uncore *uncore = &fence->i915->uncore;
+ struct intel_uncore *uncore = fence_to_uncore(fence);
i915_reg_t reg = FENCE_REG(fence->id);
intel_uncore_write_fw(uncore, reg, val);
@@ -191,15 +201,17 @@ static void i830_write_fence_reg(struct i915_fence_reg *fence,
static void fence_write(struct i915_fence_reg *fence,
struct i915_vma *vma)
{
+ struct drm_i915_private *i915 = fence_to_i915(fence);
+
/*
* Previous access through the fence register is marshalled by
* the mb() inside the fault handlers (i915_gem_release_mmaps)
* and explicitly managed for internal users.
*/
- if (IS_GEN(fence->i915, 2))
+ if (IS_GEN(i915, 2))
i830_write_fence_reg(fence, vma);
- else if (IS_GEN(fence->i915, 3))
+ else if (IS_GEN(i915, 3))
i915_write_fence_reg(fence, vma);
else
i965_write_fence_reg(fence, vma);
@@ -215,6 +227,8 @@ static void fence_write(struct i915_fence_reg *fence,
static int fence_update(struct i915_fence_reg *fence,
struct i915_vma *vma)
{
+ struct i915_ggtt *ggtt = fence->ggtt;
+ struct intel_uncore *uncore = fence_to_uncore(fence);
intel_wakeref_t wakeref;
struct i915_vma *old;
int ret;
@@ -230,16 +244,15 @@ static int fence_update(struct i915_fence_reg *fence,
i915_gem_object_get_tiling(vma->obj)))
return -EINVAL;
- ret = i915_active_request_retire(&vma->last_fence,
- &vma->obj->base.dev->struct_mutex);
+ ret = i915_vma_sync(vma);
if (ret)
return ret;
}
old = xchg(&fence->vma, NULL);
if (old) {
- ret = i915_active_request_retire(&old->last_fence,
- &old->obj->base.dev->struct_mutex);
+ /* XXX Ideally we would move the waiting to outside the mutex */
+ ret = i915_vma_sync(old);
if (ret) {
fence->vma = old;
return ret;
@@ -257,7 +270,7 @@ static int fence_update(struct i915_fence_reg *fence,
old->fence = NULL;
}
- list_move(&fence->link, &fence->i915->ggtt.fence_list);
+ list_move(&fence->link, &ggtt->fence_list);
}
/*
@@ -270,7 +283,7 @@ static int fence_update(struct i915_fence_reg *fence,
* be cleared before we can use any other fences to ensure that
* the new fences do not overlap the elided clears, confusing HW.
*/
- wakeref = intel_runtime_pm_get_if_in_use(&fence->i915->runtime_pm);
+ wakeref = intel_runtime_pm_get_if_in_use(uncore->rpm);
if (!wakeref) {
GEM_BUG_ON(vma);
return 0;
@@ -281,15 +294,15 @@ static int fence_update(struct i915_fence_reg *fence,
if (vma) {
vma->fence = fence;
- list_move_tail(&fence->link, &fence->i915->ggtt.fence_list);
+ list_move_tail(&fence->link, &ggtt->fence_list);
}
- intel_runtime_pm_put(&fence->i915->runtime_pm, wakeref);
+ intel_runtime_pm_put(uncore->rpm, wakeref);
return 0;
}
/**
- * i915_vma_put_fence - force-remove fence for a VMA
+ * i915_vma_revoke_fence - force-remove fence for a VMA
* @vma: vma to map linearly (not through a fence reg)
*
* This function force-removes any fence from the given object, which is useful
@@ -299,88 +312,68 @@ static int fence_update(struct i915_fence_reg *fence,
*
* 0 on success, negative error code on failure.
*/
-int i915_vma_put_fence(struct i915_vma *vma)
+int i915_vma_revoke_fence(struct i915_vma *vma)
{
struct i915_fence_reg *fence = vma->fence;
+ lockdep_assert_held(&vma->vm->mutex);
if (!fence)
return 0;
- if (fence->pin_count)
+ if (atomic_read(&fence->pin_count))
return -EBUSY;
return fence_update(fence, NULL);
}
-static struct i915_fence_reg *fence_find(struct drm_i915_private *i915)
+static struct i915_fence_reg *fence_find(struct i915_ggtt *ggtt)
{
struct i915_fence_reg *fence;
- list_for_each_entry(fence, &i915->ggtt.fence_list, link) {
+ list_for_each_entry(fence, &ggtt->fence_list, link) {
GEM_BUG_ON(fence->vma && fence->vma->fence != fence);
- if (fence->pin_count)
+ if (atomic_read(&fence->pin_count))
continue;
return fence;
}
/* Wait for completion of pending flips which consume fences */
- if (intel_has_pending_fb_unpin(i915))
+ if (intel_has_pending_fb_unpin(ggtt->vm.i915))
return ERR_PTR(-EAGAIN);
return ERR_PTR(-EDEADLK);
}
-/**
- * i915_vma_pin_fence - set up fencing for a vma
- * @vma: vma to map through a fence reg
- *
- * When mapping objects through the GTT, userspace wants to be able to write
- * to them without having to worry about swizzling if the object is tiled.
- * This function walks the fence regs looking for a free one for @obj,
- * stealing one if it can't find any.
- *
- * It then sets up the reg based on the object's properties: address, pitch
- * and tiling format.
- *
- * For an untiled surface, this removes any existing fence.
- *
- * Returns:
- *
- * 0 on success, negative error code on failure.
- */
-int i915_vma_pin_fence(struct i915_vma *vma)
+int __i915_vma_pin_fence(struct i915_vma *vma)
{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm);
struct i915_fence_reg *fence;
struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL;
int err;
- /*
- * Note that we revoke fences on runtime suspend. Therefore the user
- * must keep the device awake whilst using the fence.
- */
- assert_rpm_wakelock_held(&vma->vm->i915->runtime_pm);
+ lockdep_assert_held(&vma->vm->mutex);
/* Just update our place in the LRU if our fence is getting reused. */
if (vma->fence) {
fence = vma->fence;
GEM_BUG_ON(fence->vma != vma);
- fence->pin_count++;
+ atomic_inc(&fence->pin_count);
if (!fence->dirty) {
- list_move_tail(&fence->link,
- &fence->i915->ggtt.fence_list);
+ list_move_tail(&fence->link, &ggtt->fence_list);
return 0;
}
} else if (set) {
- fence = fence_find(vma->vm->i915);
+ fence = fence_find(ggtt);
if (IS_ERR(fence))
return PTR_ERR(fence);
- GEM_BUG_ON(fence->pin_count);
- fence->pin_count++;
- } else
+ GEM_BUG_ON(atomic_read(&fence->pin_count));
+ atomic_inc(&fence->pin_count);
+ } else {
return 0;
+ }
err = fence_update(fence, set);
if (err)
@@ -393,33 +386,76 @@ int i915_vma_pin_fence(struct i915_vma *vma)
return 0;
out_unpin:
- fence->pin_count--;
+ atomic_dec(&fence->pin_count);
+ return err;
+}
+
+/**
+ * i915_vma_pin_fence - set up fencing for a vma
+ * @vma: vma to map through a fence reg
+ *
+ * When mapping objects through the GTT, userspace wants to be able to write
+ * to them without having to worry about swizzling if the object is tiled.
+ * This function walks the fence regs looking for a free one for @obj,
+ * stealing one if it can't find any.
+ *
+ * It then sets up the reg based on the object's properties: address, pitch
+ * and tiling format.
+ *
+ * For an untiled surface, this removes any existing fence.
+ *
+ * Returns:
+ *
+ * 0 on success, negative error code on failure.
+ */
+int i915_vma_pin_fence(struct i915_vma *vma)
+{
+ int err;
+
+ if (!vma->fence && !i915_gem_object_is_tiled(vma->obj))
+ return 0;
+
+ /*
+ * Note that we revoke fences on runtime suspend. Therefore the user
+ * must keep the device awake whilst using the fence.
+ */
+ assert_rpm_wakelock_held(vma->vm->gt->uncore->rpm);
+ GEM_BUG_ON(!i915_vma_is_pinned(vma));
+ GEM_BUG_ON(!i915_vma_is_ggtt(vma));
+
+ err = mutex_lock_interruptible(&vma->vm->mutex);
+ if (err)
+ return err;
+
+ err = __i915_vma_pin_fence(vma);
+ mutex_unlock(&vma->vm->mutex);
+
return err;
}
/**
* i915_reserve_fence - Reserve a fence for vGPU
- * @i915: i915 device private
+ * @ggtt: Global GTT
*
* This function walks the fence regs looking for a free one and remove
* it from the fence_list. It is used to reserve fence for vGPU to use.
*/
-struct i915_fence_reg *i915_reserve_fence(struct drm_i915_private *i915)
+struct i915_fence_reg *i915_reserve_fence(struct i915_ggtt *ggtt)
{
struct i915_fence_reg *fence;
int count;
int ret;
- lockdep_assert_held(&i915->drm.struct_mutex);
+ lockdep_assert_held(&ggtt->vm.mutex);
/* Keep at least one fence available for the display engine. */
count = 0;
- list_for_each_entry(fence, &i915->ggtt.fence_list, link)
- count += !fence->pin_count;
+ list_for_each_entry(fence, &ggtt->fence_list, link)
+ count += !atomic_read(&fence->pin_count);
if (count <= 1)
return ERR_PTR(-ENOSPC);
- fence = fence_find(i915);
+ fence = fence_find(ggtt);
if (IS_ERR(fence))
return fence;
@@ -431,6 +467,7 @@ struct i915_fence_reg *i915_reserve_fence(struct drm_i915_private *i915)
}
list_del(&fence->link);
+
return fence;
}
@@ -442,26 +479,28 @@ struct i915_fence_reg *i915_reserve_fence(struct drm_i915_private *i915)
*/
void i915_unreserve_fence(struct i915_fence_reg *fence)
{
- lockdep_assert_held(&fence->i915->drm.struct_mutex);
+ struct i915_ggtt *ggtt = fence->ggtt;
- list_add(&fence->link, &fence->i915->ggtt.fence_list);
+ lockdep_assert_held(&ggtt->vm.mutex);
+
+ list_add(&fence->link, &ggtt->fence_list);
}
/**
* i915_gem_restore_fences - restore fence state
- * @i915: i915 device private
+ * @ggtt: Global GTT
*
* Restore the hw fence state to match the software tracking again, to be called
* after a gpu reset and on resume. Note that on runtime suspend we only cancel
* the fences, to be reacquired by the user later.
*/
-void i915_gem_restore_fences(struct drm_i915_private *i915)
+void i915_gem_restore_fences(struct i915_ggtt *ggtt)
{
int i;
rcu_read_lock(); /* keep obj alive as we dereference */
- for (i = 0; i < i915->ggtt.num_fences; i++) {
- struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
+ for (i = 0; i < ggtt->num_fences; i++) {
+ struct i915_fence_reg *reg = &ggtt->fence_regs[i];
struct i915_vma *vma = READ_ONCE(reg->vma);
GEM_BUG_ON(vma && vma->fence != reg);
@@ -527,15 +566,16 @@ void i915_gem_restore_fences(struct drm_i915_private *i915)
*/
/**
- * i915_gem_detect_bit_6_swizzle - detect bit 6 swizzling pattern
- * @i915: i915 device private
+ * detect_bit_6_swizzle - detect bit 6 swizzling pattern
+ * @ggtt: Global GGTT
*
* Detects bit 6 swizzling of address lookup between IGD access and CPU
* access through main memory.
*/
-static void detect_bit_6_swizzle(struct drm_i915_private *i915)
+static void detect_bit_6_swizzle(struct i915_ggtt *ggtt)
{
- struct intel_uncore *uncore = &i915->uncore;
+ struct intel_uncore *uncore = ggtt->vm.gt->uncore;
+ struct drm_i915_private *i915 = ggtt->vm.i915;
u32 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
u32 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
@@ -697,8 +737,8 @@ static void detect_bit_6_swizzle(struct drm_i915_private *i915)
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
}
- i915->mm.bit_6_swizzle_x = swizzle_x;
- i915->mm.bit_6_swizzle_y = swizzle_y;
+ i915->ggtt.bit_6_swizzle_x = swizzle_x;
+ i915->ggtt.bit_6_swizzle_y = swizzle_y;
}
/*
@@ -799,17 +839,20 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
void i915_ggtt_init_fences(struct i915_ggtt *ggtt)
{
struct drm_i915_private *i915 = ggtt->vm.i915;
+ struct intel_uncore *uncore = ggtt->vm.gt->uncore;
int num_fences;
int i;
INIT_LIST_HEAD(&ggtt->fence_list);
INIT_LIST_HEAD(&ggtt->userfault_list);
- intel_wakeref_auto_init(&ggtt->userfault_wakeref, &i915->runtime_pm);
+ intel_wakeref_auto_init(&ggtt->userfault_wakeref, uncore->rpm);
- detect_bit_6_swizzle(i915);
+ detect_bit_6_swizzle(ggtt);
- if (INTEL_GEN(i915) >= 7 &&
- !(IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)))
+ if (!i915_ggtt_has_aperture(ggtt))
+ num_fences = 0;
+ else if (INTEL_GEN(i915) >= 7 &&
+ !(IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)))
num_fences = 32;
else if (INTEL_GEN(i915) >= 4 ||
IS_I945G(i915) || IS_I945GM(i915) ||
@@ -819,18 +862,50 @@ void i915_ggtt_init_fences(struct i915_ggtt *ggtt)
num_fences = 8;
if (intel_vgpu_active(i915))
- num_fences = intel_uncore_read(&i915->uncore,
+ num_fences = intel_uncore_read(uncore,
vgtif_reg(avail_rs.fence_num));
/* Initialize fence registers to zero */
for (i = 0; i < num_fences; i++) {
struct i915_fence_reg *fence = &ggtt->fence_regs[i];
- fence->i915 = i915;
+ fence->ggtt = ggtt;
fence->id = i;
list_add_tail(&fence->link, &ggtt->fence_list);
}
ggtt->num_fences = num_fences;
- i915_gem_restore_fences(i915);
+ i915_gem_restore_fences(ggtt);
+}
+
+void intel_gt_init_swizzling(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore = gt->uncore;
+
+ if (INTEL_GEN(i915) < 5 ||
+ i915->ggtt.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
+ return;
+
+ intel_uncore_rmw(uncore, DISP_ARB_CTL, 0, DISP_TILE_SURFACE_SWIZZLING);
+
+ if (IS_GEN(i915, 5))
+ return;
+
+ intel_uncore_rmw(uncore, TILECTL, 0, TILECTL_SWZCTL);
+
+ if (IS_GEN(i915, 6))
+ intel_uncore_write(uncore,
+ ARB_MODE,
+ _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
+ else if (IS_GEN(i915, 7))
+ intel_uncore_write(uncore,
+ ARB_MODE,
+ _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
+ else if (IS_GEN(i915, 8))
+ intel_uncore_write(uncore,
+ GAMTARBMODE,
+ _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
+ else
+ MISSING_CASE(INTEL_GEN(i915));
}
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.h b/drivers/gpu/drm/i915/i915_gem_fence_reg.h
index d2da98828179..7bd521cd7cd7 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.h
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.h
@@ -29,18 +29,18 @@
#include <linux/types.h>
struct drm_i915_gem_object;
-struct drm_i915_private;
struct i915_ggtt;
struct i915_vma;
+struct intel_gt;
struct sg_table;
#define I965_FENCE_PAGE 4096UL
struct i915_fence_reg {
struct list_head link;
- struct drm_i915_private *i915;
+ struct i915_ggtt *ggtt;
struct i915_vma *vma;
- int pin_count;
+ atomic_t pin_count;
int id;
/**
* Whether the tiling parameters for the currently
@@ -54,10 +54,10 @@ struct i915_fence_reg {
};
/* i915_gem_fence_reg.c */
-struct i915_fence_reg *i915_reserve_fence(struct drm_i915_private *i915);
+struct i915_fence_reg *i915_reserve_fence(struct i915_ggtt *ggtt);
void i915_unreserve_fence(struct i915_fence_reg *fence);
-void i915_gem_restore_fences(struct drm_i915_private *i915);
+void i915_gem_restore_fences(struct i915_ggtt *ggtt);
void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
struct sg_table *pages);
@@ -66,4 +66,6 @@ void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
void i915_ggtt_init_fences(struct i915_ggtt *ggtt);
+void intel_gt_init_swizzling(struct intel_gt *gt);
+
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 7015a97b1097..e039eb56900f 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1,26 +1,7 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright © 2010 Daniel Vetter
- * Copyright © 2011-2014 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
+ * Copyright © 2020 Intel Corporation
*/
#include <linux/slab.h> /* fault-inject.h is not standalone! */
@@ -32,2291 +13,18 @@
#include <linux/stop_machine.h>
#include <asm/set_memory.h>
+#include <asm/smp.h>
#include <drm/i915_drm.h>
#include "display/intel_frontbuffer.h"
+#include "gt/intel_gt.h"
+#include "gt/intel_gt_requests.h"
#include "i915_drv.h"
#include "i915_scatterlist.h"
#include "i915_trace.h"
#include "i915_vgpu.h"
-#include "intel_drv.h"
-
-#define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
-
-/**
- * DOC: Global GTT views
- *
- * Background and previous state
- *
- * Historically objects could exists (be bound) in global GTT space only as
- * singular instances with a view representing all of the object's backing pages
- * in a linear fashion. This view will be called a normal view.
- *
- * To support multiple views of the same object, where the number of mapped
- * pages is not equal to the backing store, or where the layout of the pages
- * is not linear, concept of a GGTT view was added.
- *
- * One example of an alternative view is a stereo display driven by a single
- * image. In this case we would have a framebuffer looking like this
- * (2x2 pages):
- *
- * 12
- * 34
- *
- * Above would represent a normal GGTT view as normally mapped for GPU or CPU
- * rendering. In contrast, fed to the display engine would be an alternative
- * view which could look something like this:
- *
- * 1212
- * 3434
- *
- * In this example both the size and layout of pages in the alternative view is
- * different from the normal view.
- *
- * Implementation and usage
- *
- * GGTT views are implemented using VMAs and are distinguished via enum
- * i915_ggtt_view_type and struct i915_ggtt_view.
- *
- * A new flavour of core GEM functions which work with GGTT bound objects were
- * added with the _ggtt_ infix, and sometimes with _view postfix to avoid
- * renaming in large amounts of code. They take the struct i915_ggtt_view
- * parameter encapsulating all metadata required to implement a view.
- *
- * As a helper for callers which are only interested in the normal view,
- * globally const i915_ggtt_view_normal singleton instance exists. All old core
- * GEM API functions, the ones not taking the view parameter, are operating on,
- * or with the normal GGTT view.
- *
- * Code wanting to add or use a new GGTT view needs to:
- *
- * 1. Add a new enum with a suitable name.
- * 2. Extend the metadata in the i915_ggtt_view structure if required.
- * 3. Add support to i915_get_vma_pages().
- *
- * New views are required to build a scatter-gather table from within the
- * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and
- * exists for the lifetime of an VMA.
- *
- * Core API is designed to have copy semantics which means that passed in
- * struct i915_ggtt_view does not need to be persistent (left around after
- * calling the core API functions).
- *
- */
-
-static int
-i915_get_ggtt_vma_pages(struct i915_vma *vma);
-
-static void gen6_ggtt_invalidate(struct drm_i915_private *i915)
-{
- struct intel_uncore *uncore = &i915->uncore;
-
- /*
- * Note that as an uncached mmio write, this will flush the
- * WCB of the writes into the GGTT before it triggers the invalidate.
- */
- intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
-}
-
-static void guc_ggtt_invalidate(struct drm_i915_private *i915)
-{
- struct intel_uncore *uncore = &i915->uncore;
-
- gen6_ggtt_invalidate(i915);
- intel_uncore_write_fw(uncore, GEN8_GTCR, GEN8_GTCR_INVALIDATE);
-}
-
-static void gmch_ggtt_invalidate(struct drm_i915_private *i915)
-{
- intel_gtt_chipset_flush();
-}
-
-static inline void i915_ggtt_invalidate(struct drm_i915_private *i915)
-{
- i915->ggtt.invalidate(i915);
-}
-
-static int ppgtt_bind_vma(struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 unused)
-{
- u32 pte_flags;
- int err;
-
- if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
- err = vma->vm->allocate_va_range(vma->vm,
- vma->node.start, vma->size);
- if (err)
- return err;
- }
-
- /* Applicable to VLV, and gen8+ */
- pte_flags = 0;
- if (i915_gem_object_is_readonly(vma->obj))
- pte_flags |= PTE_READ_ONLY;
-
- vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
-
- return 0;
-}
-
-static void ppgtt_unbind_vma(struct i915_vma *vma)
-{
- vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
-}
-
-static int ppgtt_set_pages(struct i915_vma *vma)
-{
- GEM_BUG_ON(vma->pages);
-
- vma->pages = vma->obj->mm.pages;
-
- vma->page_sizes = vma->obj->mm.page_sizes;
-
- return 0;
-}
-
-static void clear_pages(struct i915_vma *vma)
-{
- GEM_BUG_ON(!vma->pages);
-
- if (vma->pages != vma->obj->mm.pages) {
- sg_free_table(vma->pages);
- kfree(vma->pages);
- }
- vma->pages = NULL;
-
- memset(&vma->page_sizes, 0, sizeof(vma->page_sizes));
-}
-
-static u64 gen8_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags)
-{
- gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW;
-
- if (unlikely(flags & PTE_READ_ONLY))
- pte &= ~_PAGE_RW;
-
- switch (level) {
- case I915_CACHE_NONE:
- pte |= PPAT_UNCACHED;
- break;
- case I915_CACHE_WT:
- pte |= PPAT_DISPLAY_ELLC;
- break;
- default:
- pte |= PPAT_CACHED;
- break;
- }
-
- return pte;
-}
-
-static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
- const enum i915_cache_level level)
-{
- gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
- pde |= addr;
- if (level != I915_CACHE_NONE)
- pde |= PPAT_CACHED_PDE;
- else
- pde |= PPAT_UNCACHED;
- return pde;
-}
-
-#define gen8_pdpe_encode gen8_pde_encode
-#define gen8_pml4e_encode gen8_pde_encode
-
-static u64 snb_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags)
-{
- gen6_pte_t pte = GEN6_PTE_VALID;
- pte |= GEN6_PTE_ADDR_ENCODE(addr);
-
- switch (level) {
- case I915_CACHE_L3_LLC:
- case I915_CACHE_LLC:
- pte |= GEN6_PTE_CACHE_LLC;
- break;
- case I915_CACHE_NONE:
- pte |= GEN6_PTE_UNCACHED;
- break;
- default:
- MISSING_CASE(level);
- }
-
- return pte;
-}
-
-static u64 ivb_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags)
-{
- gen6_pte_t pte = GEN6_PTE_VALID;
- pte |= GEN6_PTE_ADDR_ENCODE(addr);
-
- switch (level) {
- case I915_CACHE_L3_LLC:
- pte |= GEN7_PTE_CACHE_L3_LLC;
- break;
- case I915_CACHE_LLC:
- pte |= GEN6_PTE_CACHE_LLC;
- break;
- case I915_CACHE_NONE:
- pte |= GEN6_PTE_UNCACHED;
- break;
- default:
- MISSING_CASE(level);
- }
-
- return pte;
-}
-
-static u64 byt_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags)
-{
- gen6_pte_t pte = GEN6_PTE_VALID;
- pte |= GEN6_PTE_ADDR_ENCODE(addr);
-
- if (!(flags & PTE_READ_ONLY))
- pte |= BYT_PTE_WRITEABLE;
-
- if (level != I915_CACHE_NONE)
- pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
-
- return pte;
-}
-
-static u64 hsw_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags)
-{
- gen6_pte_t pte = GEN6_PTE_VALID;
- pte |= HSW_PTE_ADDR_ENCODE(addr);
-
- if (level != I915_CACHE_NONE)
- pte |= HSW_WB_LLC_AGE3;
-
- return pte;
-}
-
-static u64 iris_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags)
-{
- gen6_pte_t pte = GEN6_PTE_VALID;
- pte |= HSW_PTE_ADDR_ENCODE(addr);
-
- switch (level) {
- case I915_CACHE_NONE:
- break;
- case I915_CACHE_WT:
- pte |= HSW_WT_ELLC_LLC_AGE3;
- break;
- default:
- pte |= HSW_WB_ELLC_LLC_AGE3;
- break;
- }
-
- return pte;
-}
-
-static void stash_init(struct pagestash *stash)
-{
- pagevec_init(&stash->pvec);
- spin_lock_init(&stash->lock);
-}
-
-static struct page *stash_pop_page(struct pagestash *stash)
-{
- struct page *page = NULL;
-
- spin_lock(&stash->lock);
- if (likely(stash->pvec.nr))
- page = stash->pvec.pages[--stash->pvec.nr];
- spin_unlock(&stash->lock);
-
- return page;
-}
-
-static void stash_push_pagevec(struct pagestash *stash, struct pagevec *pvec)
-{
- unsigned int nr;
-
- spin_lock_nested(&stash->lock, SINGLE_DEPTH_NESTING);
-
- nr = min_t(typeof(nr), pvec->nr, pagevec_space(&stash->pvec));
- memcpy(stash->pvec.pages + stash->pvec.nr,
- pvec->pages + pvec->nr - nr,
- sizeof(pvec->pages[0]) * nr);
- stash->pvec.nr += nr;
-
- spin_unlock(&stash->lock);
-
- pvec->nr -= nr;
-}
-
-static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
-{
- struct pagevec stack;
- struct page *page;
-
- if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
- i915_gem_shrink_all(vm->i915);
-
- page = stash_pop_page(&vm->free_pages);
- if (page)
- return page;
-
- if (!vm->pt_kmap_wc)
- return alloc_page(gfp);
-
- /* Look in our global stash of WC pages... */
- page = stash_pop_page(&vm->i915->mm.wc_stash);
- if (page)
- return page;
-
- /*
- * Otherwise batch allocate pages to amortize cost of set_pages_wc.
- *
- * We have to be careful as page allocation may trigger the shrinker
- * (via direct reclaim) which will fill up the WC stash underneath us.
- * So we add our WB pages into a temporary pvec on the stack and merge
- * them into the WC stash after all the allocations are complete.
- */
- pagevec_init(&stack);
- do {
- struct page *page;
-
- page = alloc_page(gfp);
- if (unlikely(!page))
- break;
-
- stack.pages[stack.nr++] = page;
- } while (pagevec_space(&stack));
-
- if (stack.nr && !set_pages_array_wc(stack.pages, stack.nr)) {
- page = stack.pages[--stack.nr];
-
- /* Merge spare WC pages to the global stash */
- if (stack.nr)
- stash_push_pagevec(&vm->i915->mm.wc_stash, &stack);
-
- /* Push any surplus WC pages onto the local VM stash */
- if (stack.nr)
- stash_push_pagevec(&vm->free_pages, &stack);
- }
-
- /* Return unwanted leftovers */
- if (unlikely(stack.nr)) {
- WARN_ON_ONCE(set_pages_array_wb(stack.pages, stack.nr));
- __pagevec_release(&stack);
- }
-
- return page;
-}
-
-static void vm_free_pages_release(struct i915_address_space *vm,
- bool immediate)
-{
- struct pagevec *pvec = &vm->free_pages.pvec;
- struct pagevec stack;
-
- lockdep_assert_held(&vm->free_pages.lock);
- GEM_BUG_ON(!pagevec_count(pvec));
-
- if (vm->pt_kmap_wc) {
- /*
- * When we use WC, first fill up the global stash and then
- * only if full immediately free the overflow.
- */
- stash_push_pagevec(&vm->i915->mm.wc_stash, pvec);
-
- /*
- * As we have made some room in the VM's free_pages,
- * we can wait for it to fill again. Unless we are
- * inside i915_address_space_fini() and must
- * immediately release the pages!
- */
- if (pvec->nr <= (immediate ? 0 : PAGEVEC_SIZE - 1))
- return;
-
- /*
- * We have to drop the lock to allow ourselves to sleep,
- * so take a copy of the pvec and clear the stash for
- * others to use it as we sleep.
- */
- stack = *pvec;
- pagevec_reinit(pvec);
- spin_unlock(&vm->free_pages.lock);
-
- pvec = &stack;
- set_pages_array_wb(pvec->pages, pvec->nr);
-
- spin_lock(&vm->free_pages.lock);
- }
-
- __pagevec_release(pvec);
-}
-
-static void vm_free_page(struct i915_address_space *vm, struct page *page)
-{
- /*
- * On !llc, we need to change the pages back to WB. We only do so
- * in bulk, so we rarely need to change the page attributes here,
- * but doing so requires a stop_machine() from deep inside arch/x86/mm.
- * To make detection of the possible sleep more likely, use an
- * unconditional might_sleep() for everybody.
- */
- might_sleep();
- spin_lock(&vm->free_pages.lock);
- while (!pagevec_space(&vm->free_pages.pvec))
- vm_free_pages_release(vm, false);
- GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec) >= PAGEVEC_SIZE);
- pagevec_add(&vm->free_pages.pvec, page);
- spin_unlock(&vm->free_pages.lock);
-}
-
-static void i915_address_space_init(struct i915_address_space *vm, int subclass)
-{
- kref_init(&vm->ref);
-
- /*
- * The vm->mutex must be reclaim safe (for use in the shrinker).
- * Do a dummy acquire now under fs_reclaim so that any allocation
- * attempt holding the lock is immediately reported by lockdep.
- */
- mutex_init(&vm->mutex);
- lockdep_set_subclass(&vm->mutex, subclass);
- i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex);
-
- GEM_BUG_ON(!vm->total);
- drm_mm_init(&vm->mm, 0, vm->total);
- vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
-
- stash_init(&vm->free_pages);
-
- INIT_LIST_HEAD(&vm->unbound_list);
- INIT_LIST_HEAD(&vm->bound_list);
-}
-
-static void i915_address_space_fini(struct i915_address_space *vm)
-{
- spin_lock(&vm->free_pages.lock);
- if (pagevec_count(&vm->free_pages.pvec))
- vm_free_pages_release(vm, true);
- GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec));
- spin_unlock(&vm->free_pages.lock);
-
- drm_mm_takedown(&vm->mm);
-
- mutex_destroy(&vm->mutex);
-}
-
-static int __setup_page_dma(struct i915_address_space *vm,
- struct i915_page_dma *p,
- gfp_t gfp)
-{
- p->page = vm_alloc_page(vm, gfp | I915_GFP_ALLOW_FAIL);
- if (unlikely(!p->page))
- return -ENOMEM;
-
- p->daddr = dma_map_page_attrs(vm->dma,
- p->page, 0, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL,
- DMA_ATTR_SKIP_CPU_SYNC |
- DMA_ATTR_NO_WARN);
- if (unlikely(dma_mapping_error(vm->dma, p->daddr))) {
- vm_free_page(vm, p->page);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static int setup_page_dma(struct i915_address_space *vm,
- struct i915_page_dma *p)
-{
- return __setup_page_dma(vm, p, __GFP_HIGHMEM);
-}
-
-static void cleanup_page_dma(struct i915_address_space *vm,
- struct i915_page_dma *p)
-{
- dma_unmap_page(vm->dma, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- vm_free_page(vm, p->page);
-}
-
-#define kmap_atomic_px(px) kmap_atomic(px_base(px)->page)
-
-#define setup_px(vm, px) setup_page_dma((vm), px_base(px))
-#define cleanup_px(vm, px) cleanup_page_dma((vm), px_base(px))
-#define fill_px(vm, px, v) fill_page_dma((vm), px_base(px), (v))
-#define fill32_px(vm, px, v) fill_page_dma_32((vm), px_base(px), (v))
-
-static void fill_page_dma(struct i915_address_space *vm,
- struct i915_page_dma *p,
- const u64 val)
-{
- u64 * const vaddr = kmap_atomic(p->page);
-
- memset64(vaddr, val, PAGE_SIZE / sizeof(val));
-
- kunmap_atomic(vaddr);
-}
-
-static void fill_page_dma_32(struct i915_address_space *vm,
- struct i915_page_dma *p,
- const u32 v)
-{
- fill_page_dma(vm, p, (u64)v << 32 | v);
-}
-
-static int
-setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
-{
- unsigned long size;
-
- /*
- * In order to utilize 64K pages for an object with a size < 2M, we will
- * need to support a 64K scratch page, given that every 16th entry for a
- * page-table operating in 64K mode must point to a properly aligned 64K
- * region, including any PTEs which happen to point to scratch.
- *
- * This is only relevant for the 48b PPGTT where we support
- * huge-gtt-pages, see also i915_vma_insert(). However, as we share the
- * scratch (read-only) between all vm, we create one 64k scratch page
- * for all.
- */
- size = I915_GTT_PAGE_SIZE_4K;
- if (i915_vm_is_4lvl(vm) &&
- HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K)) {
- size = I915_GTT_PAGE_SIZE_64K;
- gfp |= __GFP_NOWARN;
- }
- gfp |= __GFP_ZERO | __GFP_RETRY_MAYFAIL;
-
- do {
- int order = get_order(size);
- struct page *page;
- dma_addr_t addr;
-
- page = alloc_pages(gfp, order);
- if (unlikely(!page))
- goto skip;
-
- addr = dma_map_page_attrs(vm->dma,
- page, 0, size,
- PCI_DMA_BIDIRECTIONAL,
- DMA_ATTR_SKIP_CPU_SYNC |
- DMA_ATTR_NO_WARN);
- if (unlikely(dma_mapping_error(vm->dma, addr)))
- goto free_page;
-
- if (unlikely(!IS_ALIGNED(addr, size)))
- goto unmap_page;
-
- vm->scratch_page.page = page;
- vm->scratch_page.daddr = addr;
- vm->scratch_order = order;
- return 0;
-
-unmap_page:
- dma_unmap_page(vm->dma, addr, size, PCI_DMA_BIDIRECTIONAL);
-free_page:
- __free_pages(page, order);
-skip:
- if (size == I915_GTT_PAGE_SIZE_4K)
- return -ENOMEM;
-
- size = I915_GTT_PAGE_SIZE_4K;
- gfp &= ~__GFP_NOWARN;
- } while (1);
-}
-
-static void cleanup_scratch_page(struct i915_address_space *vm)
-{
- struct i915_page_dma *p = &vm->scratch_page;
- int order = vm->scratch_order;
-
- dma_unmap_page(vm->dma, p->daddr, BIT(order) << PAGE_SHIFT,
- PCI_DMA_BIDIRECTIONAL);
- __free_pages(p->page, order);
-}
-
-static struct i915_page_table *alloc_pt(struct i915_address_space *vm)
-{
- struct i915_page_table *pt;
-
- pt = kmalloc(sizeof(*pt), I915_GFP_ALLOW_FAIL);
- if (unlikely(!pt))
- return ERR_PTR(-ENOMEM);
-
- if (unlikely(setup_px(vm, pt))) {
- kfree(pt);
- return ERR_PTR(-ENOMEM);
- }
-
- atomic_set(&pt->used, 0);
-
- return pt;
-}
-
-static void free_pt(struct i915_address_space *vm, struct i915_page_table *pt)
-{
- cleanup_px(vm, pt);
- kfree(pt);
-}
-
-static void gen8_initialize_pt(struct i915_address_space *vm,
- struct i915_page_table *pt)
-{
- fill_px(vm, pt, vm->scratch_pte);
-}
-
-static void gen6_initialize_pt(struct i915_address_space *vm,
- struct i915_page_table *pt)
-{
- fill32_px(vm, pt, vm->scratch_pte);
-}
-
-static struct i915_page_directory *__alloc_pd(void)
-{
- struct i915_page_directory *pd;
-
- pd = kmalloc(sizeof(*pd), I915_GFP_ALLOW_FAIL);
-
- if (unlikely(!pd))
- return NULL;
-
- memset(&pd->base, 0, sizeof(pd->base));
- atomic_set(&pd->used, 0);
- spin_lock_init(&pd->lock);
-
- /* for safety */
- pd->entry[0] = NULL;
-
- return pd;
-}
-
-static struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
-{
- struct i915_page_directory *pd;
-
- pd = __alloc_pd();
- if (unlikely(!pd))
- return ERR_PTR(-ENOMEM);
-
- if (unlikely(setup_px(vm, pd))) {
- kfree(pd);
- return ERR_PTR(-ENOMEM);
- }
-
- return pd;
-}
-
-static inline bool pd_has_phys_page(const struct i915_page_directory * const pd)
-{
- return pd->base.page;
-}
-
-static void free_pd(struct i915_address_space *vm,
- struct i915_page_directory *pd)
-{
- if (likely(pd_has_phys_page(pd)))
- cleanup_px(vm, pd);
-
- kfree(pd);
-}
-
-static void init_pd_with_page(struct i915_address_space *vm,
- struct i915_page_directory * const pd,
- struct i915_page_table *pt)
-{
- fill_px(vm, pd, gen8_pde_encode(px_dma(pt), I915_CACHE_LLC));
- memset_p(pd->entry, pt, 512);
-}
-
-static void init_pd(struct i915_address_space *vm,
- struct i915_page_directory * const pd,
- struct i915_page_directory * const to)
-{
- GEM_DEBUG_BUG_ON(!pd_has_phys_page(pd));
-
- fill_px(vm, pd, gen8_pdpe_encode(px_dma(to), I915_CACHE_LLC));
- memset_p(pd->entry, to, 512);
-}
-
-/*
- * PDE TLBs are a pain to invalidate on GEN8+. When we modify
- * the page table structures, we mark them dirty so that
- * context switching/execlist queuing code takes extra steps
- * to ensure that tlbs are flushed.
- */
-static void mark_tlbs_dirty(struct i915_ppgtt *ppgtt)
-{
- ppgtt->pd_dirty_engines = ALL_ENGINES;
-}
-
-/* Removes entries from a single page table, releasing it if it's empty.
- * Caller can use the return value to update higher-level entries.
- */
-static bool gen8_ppgtt_clear_pt(const struct i915_address_space *vm,
- struct i915_page_table *pt,
- u64 start, u64 length)
-{
- unsigned int num_entries = gen8_pte_count(start, length);
- gen8_pte_t *vaddr;
-
- vaddr = kmap_atomic_px(pt);
- memset64(vaddr + gen8_pte_index(start), vm->scratch_pte, num_entries);
- kunmap_atomic(vaddr);
-
- GEM_BUG_ON(num_entries > atomic_read(&pt->used));
- return !atomic_sub_return(num_entries, &pt->used);
-}
-
-static void gen8_ppgtt_set_pde(struct i915_address_space *vm,
- struct i915_page_directory *pd,
- struct i915_page_table *pt,
- unsigned int pde)
-{
- gen8_pde_t *vaddr;
-
- vaddr = kmap_atomic_px(pd);
- vaddr[pde] = gen8_pde_encode(px_dma(pt), I915_CACHE_LLC);
- kunmap_atomic(vaddr);
-}
-
-static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm,
- struct i915_page_directory *pd,
- u64 start, u64 length)
-{
- struct i915_page_table *pt;
- u32 pde;
-
- gen8_for_each_pde(pt, pd, start, length, pde) {
- bool free = false;
-
- GEM_BUG_ON(pt == vm->scratch_pt);
-
- if (!gen8_ppgtt_clear_pt(vm, pt, start, length))
- continue;
-
- spin_lock(&pd->lock);
- if (!atomic_read(&pt->used)) {
- gen8_ppgtt_set_pde(vm, pd, vm->scratch_pt, pde);
- pd->entry[pde] = vm->scratch_pt;
-
- GEM_BUG_ON(!atomic_read(&pd->used));
- atomic_dec(&pd->used);
- free = true;
- }
- spin_unlock(&pd->lock);
- if (free)
- free_pt(vm, pt);
- }
-
- return !atomic_read(&pd->used);
-}
-
-static void gen8_ppgtt_set_pdpe(struct i915_page_directory *pdp,
- struct i915_page_directory *pd,
- unsigned int pdpe)
-{
- gen8_ppgtt_pdpe_t *vaddr;
-
- if (!pd_has_phys_page(pdp))
- return;
-
- vaddr = kmap_atomic_px(pdp);
- vaddr[pdpe] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC);
- kunmap_atomic(vaddr);
-}
-
-/* Removes entries from a single page dir pointer, releasing it if it's empty.
- * Caller can use the return value to update higher-level entries
- */
-static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
- struct i915_page_directory * const pdp,
- u64 start, u64 length)
-{
- struct i915_page_directory *pd;
- unsigned int pdpe;
-
- gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
- bool free = false;
-
- GEM_BUG_ON(pd == vm->scratch_pd);
-
- if (!gen8_ppgtt_clear_pd(vm, pd, start, length))
- continue;
-
- spin_lock(&pdp->lock);
- if (!atomic_read(&pd->used)) {
- gen8_ppgtt_set_pdpe(pdp, vm->scratch_pd, pdpe);
- pdp->entry[pdpe] = vm->scratch_pd;
-
- GEM_BUG_ON(!atomic_read(&pdp->used));
- atomic_dec(&pdp->used);
- free = true;
- }
- spin_unlock(&pdp->lock);
- if (free)
- free_pd(vm, pd);
- }
-
- return !atomic_read(&pdp->used);
-}
-
-static void gen8_ppgtt_clear_3lvl(struct i915_address_space *vm,
- u64 start, u64 length)
-{
- gen8_ppgtt_clear_pdp(vm, i915_vm_to_ppgtt(vm)->pd, start, length);
-}
-
-static void gen8_ppgtt_set_pml4e(struct i915_page_directory *pml4,
- struct i915_page_directory *pdp,
- unsigned int pml4e)
-{
- gen8_ppgtt_pml4e_t *vaddr;
-
- vaddr = kmap_atomic_px(pml4);
- vaddr[pml4e] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC);
- kunmap_atomic(vaddr);
-}
-
-/* Removes entries from a single pml4.
- * This is the top-level structure in 4-level page tables used on gen8+.
- * Empty entries are always scratch pml4e.
- */
-static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm,
- u64 start, u64 length)
-{
- struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- struct i915_page_directory * const pml4 = ppgtt->pd;
- struct i915_page_directory *pdp;
- unsigned int pml4e;
-
- GEM_BUG_ON(!i915_vm_is_4lvl(vm));
-
- gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
- bool free = false;
- GEM_BUG_ON(pdp == vm->scratch_pdp);
-
- if (!gen8_ppgtt_clear_pdp(vm, pdp, start, length))
- continue;
-
- spin_lock(&pml4->lock);
- if (!atomic_read(&pdp->used)) {
- gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
- pml4->entry[pml4e] = vm->scratch_pdp;
- free = true;
- }
- spin_unlock(&pml4->lock);
- if (free)
- free_pd(vm, pdp);
- }
-}
-
-static inline struct sgt_dma {
- struct scatterlist *sg;
- dma_addr_t dma, max;
-} sgt_dma(struct i915_vma *vma) {
- struct scatterlist *sg = vma->pages->sgl;
- dma_addr_t addr = sg_dma_address(sg);
- return (struct sgt_dma) { sg, addr, addr + sg->length };
-}
-
-struct gen8_insert_pte {
- u16 pml4e;
- u16 pdpe;
- u16 pde;
- u16 pte;
-};
-
-static __always_inline struct gen8_insert_pte gen8_insert_pte(u64 start)
-{
- return (struct gen8_insert_pte) {
- gen8_pml4e_index(start),
- gen8_pdpe_index(start),
- gen8_pde_index(start),
- gen8_pte_index(start),
- };
-}
-
-static __always_inline bool
-gen8_ppgtt_insert_pte_entries(struct i915_ppgtt *ppgtt,
- struct i915_page_directory *pdp,
- struct sgt_dma *iter,
- struct gen8_insert_pte *idx,
- enum i915_cache_level cache_level,
- u32 flags)
-{
- struct i915_page_directory *pd;
- const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
- gen8_pte_t *vaddr;
- bool ret;
-
- GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->vm));
- pd = i915_pd_entry(pdp, idx->pdpe);
- vaddr = kmap_atomic_px(i915_pt_entry(pd, idx->pde));
- do {
- vaddr[idx->pte] = pte_encode | iter->dma;
-
- iter->dma += I915_GTT_PAGE_SIZE;
- if (iter->dma >= iter->max) {
- iter->sg = __sg_next(iter->sg);
- if (!iter->sg) {
- ret = false;
- break;
- }
-
- iter->dma = sg_dma_address(iter->sg);
- iter->max = iter->dma + iter->sg->length;
- }
-
- if (++idx->pte == GEN8_PTES) {
- idx->pte = 0;
-
- if (++idx->pde == I915_PDES) {
- idx->pde = 0;
-
- /* Limited by sg length for 3lvl */
- if (++idx->pdpe == GEN8_PML4ES_PER_PML4) {
- idx->pdpe = 0;
- ret = true;
- break;
- }
-
- GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->vm));
- pd = pdp->entry[idx->pdpe];
- }
-
- kunmap_atomic(vaddr);
- vaddr = kmap_atomic_px(i915_pt_entry(pd, idx->pde));
- }
- } while (1);
- kunmap_atomic(vaddr);
-
- return ret;
-}
-
-static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
- struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 flags)
-{
- struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- struct sgt_dma iter = sgt_dma(vma);
- struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
-
- gen8_ppgtt_insert_pte_entries(ppgtt, ppgtt->pd, &iter, &idx,
- cache_level, flags);
-
- vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
-}
-
-static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
- struct i915_page_directory *pml4,
- struct sgt_dma *iter,
- enum i915_cache_level cache_level,
- u32 flags)
-{
- const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
- u64 start = vma->node.start;
- dma_addr_t rem = iter->sg->length;
-
- do {
- struct gen8_insert_pte idx = gen8_insert_pte(start);
- struct i915_page_directory *pdp =
- i915_pdp_entry(pml4, idx.pml4e);
- struct i915_page_directory *pd = i915_pd_entry(pdp, idx.pdpe);
- unsigned int page_size;
- bool maybe_64K = false;
- gen8_pte_t encode = pte_encode;
- gen8_pte_t *vaddr;
- u16 index, max;
-
- if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_2M &&
- IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) &&
- rem >= I915_GTT_PAGE_SIZE_2M && !idx.pte) {
- index = idx.pde;
- max = I915_PDES;
- page_size = I915_GTT_PAGE_SIZE_2M;
-
- encode |= GEN8_PDE_PS_2M;
-
- vaddr = kmap_atomic_px(pd);
- } else {
- struct i915_page_table *pt = i915_pt_entry(pd, idx.pde);
-
- index = idx.pte;
- max = GEN8_PTES;
- page_size = I915_GTT_PAGE_SIZE;
-
- if (!index &&
- vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K &&
- IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
- (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
- rem >= (max - index) * I915_GTT_PAGE_SIZE))
- maybe_64K = true;
-
- vaddr = kmap_atomic_px(pt);
- }
-
- do {
- GEM_BUG_ON(iter->sg->length < page_size);
- vaddr[index++] = encode | iter->dma;
-
- start += page_size;
- iter->dma += page_size;
- rem -= page_size;
- if (iter->dma >= iter->max) {
- iter->sg = __sg_next(iter->sg);
- if (!iter->sg)
- break;
-
- rem = iter->sg->length;
- iter->dma = sg_dma_address(iter->sg);
- iter->max = iter->dma + rem;
-
- if (maybe_64K && index < max &&
- !(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
- (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
- rem >= (max - index) * I915_GTT_PAGE_SIZE)))
- maybe_64K = false;
-
- if (unlikely(!IS_ALIGNED(iter->dma, page_size)))
- break;
- }
- } while (rem >= page_size && index < max);
-
- kunmap_atomic(vaddr);
-
- /*
- * Is it safe to mark the 2M block as 64K? -- Either we have
- * filled whole page-table with 64K entries, or filled part of
- * it and have reached the end of the sg table and we have
- * enough padding.
- */
- if (maybe_64K &&
- (index == max ||
- (i915_vm_has_scratch_64K(vma->vm) &&
- !iter->sg && IS_ALIGNED(vma->node.start +
- vma->node.size,
- I915_GTT_PAGE_SIZE_2M)))) {
- vaddr = kmap_atomic_px(pd);
- vaddr[idx.pde] |= GEN8_PDE_IPS_64K;
- kunmap_atomic(vaddr);
- page_size = I915_GTT_PAGE_SIZE_64K;
-
- /*
- * We write all 4K page entries, even when using 64K
- * pages. In order to verify that the HW isn't cheating
- * by using the 4K PTE instead of the 64K PTE, we want
- * to remove all the surplus entries. If the HW skipped
- * the 64K PTE, it will read/write into the scratch page
- * instead - which we detect as missing results during
- * selftests.
- */
- if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) {
- u16 i;
-
- encode = vma->vm->scratch_pte;
- vaddr = kmap_atomic_px(i915_pt_entry(pd,
- idx.pde));
-
- for (i = 1; i < index; i += 16)
- memset64(vaddr + i, encode, 15);
-
- kunmap_atomic(vaddr);
- }
- }
-
- vma->page_sizes.gtt |= page_size;
- } while (iter->sg);
-}
-
-static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
- struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 flags)
-{
- struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- struct sgt_dma iter = sgt_dma(vma);
- struct i915_page_directory * const pml4 = ppgtt->pd;
-
- if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
- gen8_ppgtt_insert_huge_entries(vma, pml4, &iter, cache_level,
- flags);
- } else {
- struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
-
- while (gen8_ppgtt_insert_pte_entries(ppgtt,
- i915_pdp_entry(pml4, idx.pml4e++),
- &iter, &idx, cache_level,
- flags))
- GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4);
-
- vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
- }
-}
-
-static void gen8_free_page_tables(struct i915_address_space *vm,
- struct i915_page_directory *pd)
-{
- int i;
-
- for (i = 0; i < I915_PDES; i++) {
- if (pd->entry[i] != vm->scratch_pt)
- free_pt(vm, pd->entry[i]);
- }
-}
-
-static int gen8_init_scratch(struct i915_address_space *vm)
-{
- int ret;
-
- /*
- * If everybody agrees to not to write into the scratch page,
- * we can reuse it for all vm, keeping contexts and processes separate.
- */
- if (vm->has_read_only &&
- vm->i915->kernel_context &&
- vm->i915->kernel_context->vm) {
- struct i915_address_space *clone = vm->i915->kernel_context->vm;
-
- GEM_BUG_ON(!clone->has_read_only);
-
- vm->scratch_order = clone->scratch_order;
- vm->scratch_pte = clone->scratch_pte;
- vm->scratch_pt = clone->scratch_pt;
- vm->scratch_pd = clone->scratch_pd;
- vm->scratch_pdp = clone->scratch_pdp;
- return 0;
- }
-
- ret = setup_scratch_page(vm, __GFP_HIGHMEM);
- if (ret)
- return ret;
-
- vm->scratch_pte =
- gen8_pte_encode(vm->scratch_page.daddr,
- I915_CACHE_LLC,
- vm->has_read_only);
-
- vm->scratch_pt = alloc_pt(vm);
- if (IS_ERR(vm->scratch_pt)) {
- ret = PTR_ERR(vm->scratch_pt);
- goto free_scratch_page;
- }
-
- vm->scratch_pd = alloc_pd(vm);
- if (IS_ERR(vm->scratch_pd)) {
- ret = PTR_ERR(vm->scratch_pd);
- goto free_pt;
- }
-
- if (i915_vm_is_4lvl(vm)) {
- vm->scratch_pdp = alloc_pd(vm);
- if (IS_ERR(vm->scratch_pdp)) {
- ret = PTR_ERR(vm->scratch_pdp);
- goto free_pd;
- }
- }
-
- gen8_initialize_pt(vm, vm->scratch_pt);
- init_pd_with_page(vm, vm->scratch_pd, vm->scratch_pt);
- if (i915_vm_is_4lvl(vm))
- init_pd(vm, vm->scratch_pdp, vm->scratch_pd);
-
- return 0;
-
-free_pd:
- free_pd(vm, vm->scratch_pd);
-free_pt:
- free_pt(vm, vm->scratch_pt);
-free_scratch_page:
- cleanup_scratch_page(vm);
-
- return ret;
-}
-
-static int gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
-{
- struct i915_address_space *vm = &ppgtt->vm;
- struct drm_i915_private *dev_priv = vm->i915;
- enum vgt_g2v_type msg;
- int i;
-
- if (i915_vm_is_4lvl(vm)) {
- const u64 daddr = px_dma(ppgtt->pd);
-
- I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
- I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
-
- msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
- VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY);
- } else {
- for (i = 0; i < GEN8_3LVL_PDPES; i++) {
- const u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
-
- I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr));
- I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr));
- }
-
- msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
- VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY);
- }
-
- I915_WRITE(vgtif_reg(g2v_notify), msg);
-
- return 0;
-}
-
-static void gen8_free_scratch(struct i915_address_space *vm)
-{
- if (!vm->scratch_page.daddr)
- return;
-
- if (i915_vm_is_4lvl(vm))
- free_pd(vm, vm->scratch_pdp);
- free_pd(vm, vm->scratch_pd);
- free_pt(vm, vm->scratch_pt);
- cleanup_scratch_page(vm);
-}
-
-static void gen8_ppgtt_cleanup_3lvl(struct i915_address_space *vm,
- struct i915_page_directory *pdp)
-{
- const unsigned int pdpes = i915_pdpes_per_pdp(vm);
- int i;
-
- for (i = 0; i < pdpes; i++) {
- if (pdp->entry[i] == vm->scratch_pd)
- continue;
-
- gen8_free_page_tables(vm, pdp->entry[i]);
- free_pd(vm, pdp->entry[i]);
- }
-
- free_pd(vm, pdp);
-}
-
-static void gen8_ppgtt_cleanup_4lvl(struct i915_ppgtt *ppgtt)
-{
- struct i915_page_directory * const pml4 = ppgtt->pd;
- int i;
-
- for (i = 0; i < GEN8_PML4ES_PER_PML4; i++) {
- struct i915_page_directory *pdp = i915_pdp_entry(pml4, i);
-
- if (pdp == ppgtt->vm.scratch_pdp)
- continue;
-
- gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, pdp);
- }
-
- free_pd(&ppgtt->vm, pml4);
-}
-
-static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
-{
- struct drm_i915_private *i915 = vm->i915;
- struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
-
- if (intel_vgpu_active(i915))
- gen8_ppgtt_notify_vgt(ppgtt, false);
-
- if (i915_vm_is_4lvl(vm))
- gen8_ppgtt_cleanup_4lvl(ppgtt);
- else
- gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, ppgtt->pd);
-
- gen8_free_scratch(vm);
-}
-
-static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm,
- struct i915_page_directory *pd,
- u64 start, u64 length)
-{
- struct i915_page_table *pt, *alloc = NULL;
- u64 from = start;
- unsigned int pde;
- int ret = 0;
-
- spin_lock(&pd->lock);
- gen8_for_each_pde(pt, pd, start, length, pde) {
- const int count = gen8_pte_count(start, length);
-
- if (pt == vm->scratch_pt) {
- spin_unlock(&pd->lock);
-
- pt = fetch_and_zero(&alloc);
- if (!pt)
- pt = alloc_pt(vm);
- if (IS_ERR(pt)) {
- ret = PTR_ERR(pt);
- goto unwind;
- }
-
- if (count < GEN8_PTES || intel_vgpu_active(vm->i915))
- gen8_initialize_pt(vm, pt);
-
- spin_lock(&pd->lock);
- if (pd->entry[pde] == vm->scratch_pt) {
- gen8_ppgtt_set_pde(vm, pd, pt, pde);
- pd->entry[pde] = pt;
- atomic_inc(&pd->used);
- } else {
- alloc = pt;
- pt = pd->entry[pde];
- }
- }
-
- atomic_add(count, &pt->used);
- }
- spin_unlock(&pd->lock);
- goto out;
-
-unwind:
- gen8_ppgtt_clear_pd(vm, pd, from, start - from);
-out:
- if (alloc)
- free_pt(vm, alloc);
- return ret;
-}
-
-static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
- struct i915_page_directory *pdp,
- u64 start, u64 length)
-{
- struct i915_page_directory *pd, *alloc = NULL;
- u64 from = start;
- unsigned int pdpe;
- int ret = 0;
-
- spin_lock(&pdp->lock);
- gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
- if (pd == vm->scratch_pd) {
- spin_unlock(&pdp->lock);
-
- pd = fetch_and_zero(&alloc);
- if (!pd)
- pd = alloc_pd(vm);
- if (IS_ERR(pd)) {
- ret = PTR_ERR(pd);
- goto unwind;
- }
-
- init_pd_with_page(vm, pd, vm->scratch_pt);
-
- spin_lock(&pdp->lock);
- if (pdp->entry[pdpe] == vm->scratch_pd) {
- gen8_ppgtt_set_pdpe(pdp, pd, pdpe);
- pdp->entry[pdpe] = pd;
- atomic_inc(&pdp->used);
- } else {
- alloc = pd;
- pd = pdp->entry[pdpe];
- }
- }
- atomic_inc(&pd->used);
- spin_unlock(&pdp->lock);
-
- ret = gen8_ppgtt_alloc_pd(vm, pd, start, length);
- if (unlikely(ret))
- goto unwind_pd;
-
- spin_lock(&pdp->lock);
- atomic_dec(&pd->used);
- }
- spin_unlock(&pdp->lock);
- goto out;
-
-unwind_pd:
- spin_lock(&pdp->lock);
- if (atomic_dec_and_test(&pd->used)) {
- gen8_ppgtt_set_pdpe(pdp, vm->scratch_pd, pdpe);
- pdp->entry[pdpe] = vm->scratch_pd;
- GEM_BUG_ON(!atomic_read(&pdp->used));
- atomic_dec(&pdp->used);
- GEM_BUG_ON(alloc);
- alloc = pd; /* defer the free to after the lock */
- }
- spin_unlock(&pdp->lock);
-unwind:
- gen8_ppgtt_clear_pdp(vm, pdp, from, start - from);
-out:
- if (alloc)
- free_pd(vm, alloc);
- return ret;
-}
-
-static int gen8_ppgtt_alloc_3lvl(struct i915_address_space *vm,
- u64 start, u64 length)
-{
- return gen8_ppgtt_alloc_pdp(vm,
- i915_vm_to_ppgtt(vm)->pd, start, length);
-}
-
-static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm,
- u64 start, u64 length)
-{
- struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- struct i915_page_directory * const pml4 = ppgtt->pd;
- struct i915_page_directory *pdp, *alloc = NULL;
- u64 from = start;
- int ret = 0;
- u32 pml4e;
-
- spin_lock(&pml4->lock);
- gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
- if (pdp == vm->scratch_pdp) {
- spin_unlock(&pml4->lock);
-
- pdp = fetch_and_zero(&alloc);
- if (!pdp)
- pdp = alloc_pd(vm);
- if (IS_ERR(pdp)) {
- ret = PTR_ERR(pdp);
- goto unwind;
- }
-
- init_pd(vm, pdp, vm->scratch_pd);
-
- spin_lock(&pml4->lock);
- if (pml4->entry[pml4e] == vm->scratch_pdp) {
- gen8_ppgtt_set_pml4e(pml4, pdp, pml4e);
- pml4->entry[pml4e] = pdp;
- } else {
- alloc = pdp;
- pdp = pml4->entry[pml4e];
- }
- }
- atomic_inc(&pdp->used);
- spin_unlock(&pml4->lock);
-
- ret = gen8_ppgtt_alloc_pdp(vm, pdp, start, length);
- if (unlikely(ret))
- goto unwind_pdp;
-
- spin_lock(&pml4->lock);
- atomic_dec(&pdp->used);
- }
- spin_unlock(&pml4->lock);
- goto out;
-
-unwind_pdp:
- spin_lock(&pml4->lock);
- if (atomic_dec_and_test(&pdp->used)) {
- gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
- pml4->entry[pml4e] = vm->scratch_pdp;
- GEM_BUG_ON(alloc);
- alloc = pdp; /* defer the free until after the lock */
- }
- spin_unlock(&pml4->lock);
-unwind:
- gen8_ppgtt_clear_4lvl(vm, from, start - from);
-out:
- if (alloc)
- free_pd(vm, alloc);
- return ret;
-}
-
-static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
-{
- struct i915_address_space *vm = &ppgtt->vm;
- struct i915_page_directory *pdp = ppgtt->pd;
- struct i915_page_directory *pd;
- u64 start = 0, length = ppgtt->vm.total;
- u64 from = start;
- unsigned int pdpe;
-
- gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
- pd = alloc_pd(vm);
- if (IS_ERR(pd))
- goto unwind;
-
- init_pd_with_page(vm, pd, vm->scratch_pt);
- gen8_ppgtt_set_pdpe(pdp, pd, pdpe);
-
- atomic_inc(&pdp->used);
- }
-
- atomic_inc(&pdp->used); /* never remove */
-
- return 0;
-
-unwind:
- start -= from;
- gen8_for_each_pdpe(pd, pdp, from, start, pdpe) {
- gen8_ppgtt_set_pdpe(pdp, vm->scratch_pd, pdpe);
- free_pd(vm, pd);
- }
- atomic_set(&pdp->used, 0);
- return -ENOMEM;
-}
-
-static void ppgtt_init(struct drm_i915_private *i915,
- struct i915_ppgtt *ppgtt)
-{
- ppgtt->vm.i915 = i915;
- ppgtt->vm.dma = &i915->drm.pdev->dev;
- ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size);
-
- i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
-
- ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma;
- ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma;
- ppgtt->vm.vma_ops.set_pages = ppgtt_set_pages;
- ppgtt->vm.vma_ops.clear_pages = clear_pages;
-}
-
-/*
- * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
- * with a net effect resembling a 2-level page table in normal x86 terms. Each
- * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
- * space.
- *
- */
-static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
-{
- struct i915_ppgtt *ppgtt;
- int err;
-
- ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
- if (!ppgtt)
- return ERR_PTR(-ENOMEM);
-
- ppgtt_init(i915, ppgtt);
-
- /*
- * From bdw, there is hw support for read-only pages in the PPGTT.
- *
- * Gen11 has HSDES#:1807136187 unresolved. Disable ro support
- * for now.
- */
- ppgtt->vm.has_read_only = INTEL_GEN(i915) != 11;
-
- /* There are only few exceptions for gen >=6. chv and bxt.
- * And we are not sure about the latter so play safe for now.
- */
- if (IS_CHERRYVIEW(i915) || IS_BROXTON(i915))
- ppgtt->vm.pt_kmap_wc = true;
-
- err = gen8_init_scratch(&ppgtt->vm);
- if (err)
- goto err_free;
-
- ppgtt->pd = __alloc_pd();
- if (!ppgtt->pd) {
- err = -ENOMEM;
- goto err_free_scratch;
- }
-
- if (i915_vm_is_4lvl(&ppgtt->vm)) {
- err = setup_px(&ppgtt->vm, ppgtt->pd);
- if (err)
- goto err_free_pdp;
-
- init_pd(&ppgtt->vm, ppgtt->pd, ppgtt->vm.scratch_pdp);
-
- ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_4lvl;
- ppgtt->vm.insert_entries = gen8_ppgtt_insert_4lvl;
- ppgtt->vm.clear_range = gen8_ppgtt_clear_4lvl;
- } else {
- /*
- * We don't need to setup dma for top level pdp, only
- * for entries. So point entries to scratch.
- */
- memset_p(ppgtt->pd->entry, ppgtt->vm.scratch_pd,
- GEN8_3LVL_PDPES);
-
- if (intel_vgpu_active(i915)) {
- err = gen8_preallocate_top_level_pdp(ppgtt);
- if (err)
- goto err_free_pdp;
- }
-
- ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_3lvl;
- ppgtt->vm.insert_entries = gen8_ppgtt_insert_3lvl;
- ppgtt->vm.clear_range = gen8_ppgtt_clear_3lvl;
- }
-
- if (intel_vgpu_active(i915))
- gen8_ppgtt_notify_vgt(ppgtt, true);
-
- ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
-
- return ppgtt;
-
-err_free_pdp:
- free_pd(&ppgtt->vm, ppgtt->pd);
-err_free_scratch:
- gen8_free_scratch(&ppgtt->vm);
-err_free:
- kfree(ppgtt);
- return ERR_PTR(err);
-}
-
-/* Write pde (index) from the page directory @pd to the page table @pt */
-static inline void gen6_write_pde(const struct gen6_ppgtt *ppgtt,
- const unsigned int pde,
- const struct i915_page_table *pt)
-{
- /* Caller needs to make sure the write completes if necessary */
- iowrite32(GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | GEN6_PDE_VALID,
- ppgtt->pd_addr + pde);
-}
-
-static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv)
-{
- struct intel_engine_cs *engine;
- u32 ecochk, ecobits;
- enum intel_engine_id id;
-
- ecobits = I915_READ(GAC_ECO_BITS);
- I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
-
- ecochk = I915_READ(GAM_ECOCHK);
- if (IS_HASWELL(dev_priv)) {
- ecochk |= ECOCHK_PPGTT_WB_HSW;
- } else {
- ecochk |= ECOCHK_PPGTT_LLC_IVB;
- ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
- }
- I915_WRITE(GAM_ECOCHK, ecochk);
-
- for_each_engine(engine, dev_priv, id) {
- /* GFX_MODE is per-ring on gen7+ */
- ENGINE_WRITE(engine,
- RING_MODE_GEN7,
- _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
- }
-}
-
-static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv)
-{
- u32 ecochk, gab_ctl, ecobits;
-
- ecobits = I915_READ(GAC_ECO_BITS);
- I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
- ECOBITS_PPGTT_CACHE64B);
-
- gab_ctl = I915_READ(GAB_CTL);
- I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
-
- ecochk = I915_READ(GAM_ECOCHK);
- I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
-
- if (HAS_PPGTT(dev_priv)) /* may be disabled for VT-d */
- I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
-}
-
-/* PPGTT support for Sandybdrige/Gen6 and later */
-static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
- u64 start, u64 length)
-{
- struct gen6_ppgtt * const ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
- const unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
- const gen6_pte_t scratch_pte = vm->scratch_pte;
- unsigned int pde = first_entry / GEN6_PTES;
- unsigned int pte = first_entry % GEN6_PTES;
- unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
-
- while (num_entries) {
- struct i915_page_table * const pt =
- i915_pt_entry(ppgtt->base.pd, pde++);
- const unsigned int count = min(num_entries, GEN6_PTES - pte);
- gen6_pte_t *vaddr;
-
- GEM_BUG_ON(pt == vm->scratch_pt);
-
- num_entries -= count;
-
- GEM_BUG_ON(count > atomic_read(&pt->used));
- if (!atomic_sub_return(count, &pt->used))
- ppgtt->scan_for_unused_pt = true;
-
- /*
- * Note that the hw doesn't support removing PDE on the fly
- * (they are cached inside the context with no means to
- * invalidate the cache), so we can only reset the PTE
- * entries back to scratch.
- */
-
- vaddr = kmap_atomic_px(pt);
- memset32(vaddr + pte, scratch_pte, count);
- kunmap_atomic(vaddr);
-
- pte = 0;
- }
-}
-
-static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
- struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 flags)
-{
- struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- struct i915_page_directory * const pd = ppgtt->pd;
- unsigned first_entry = vma->node.start / I915_GTT_PAGE_SIZE;
- unsigned act_pt = first_entry / GEN6_PTES;
- unsigned act_pte = first_entry % GEN6_PTES;
- const u32 pte_encode = vm->pte_encode(0, cache_level, flags);
- struct sgt_dma iter = sgt_dma(vma);
- gen6_pte_t *vaddr;
-
- GEM_BUG_ON(i915_pt_entry(pd, act_pt) == vm->scratch_pt);
-
- vaddr = kmap_atomic_px(i915_pt_entry(pd, act_pt));
- do {
- vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
-
- iter.dma += I915_GTT_PAGE_SIZE;
- if (iter.dma == iter.max) {
- iter.sg = __sg_next(iter.sg);
- if (!iter.sg)
- break;
-
- iter.dma = sg_dma_address(iter.sg);
- iter.max = iter.dma + iter.sg->length;
- }
-
- if (++act_pte == GEN6_PTES) {
- kunmap_atomic(vaddr);
- vaddr = kmap_atomic_px(i915_pt_entry(pd, ++act_pt));
- act_pte = 0;
- }
- } while (1);
- kunmap_atomic(vaddr);
-
- vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
-}
-
-static int gen6_alloc_va_range(struct i915_address_space *vm,
- u64 start, u64 length)
-{
- struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
- struct i915_page_directory * const pd = ppgtt->base.pd;
- struct i915_page_table *pt, *alloc = NULL;
- intel_wakeref_t wakeref;
- u64 from = start;
- unsigned int pde;
- bool flush = false;
- int ret = 0;
-
- wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
-
- spin_lock(&pd->lock);
- gen6_for_each_pde(pt, pd, start, length, pde) {
- const unsigned int count = gen6_pte_count(start, length);
-
- if (pt == vm->scratch_pt) {
- spin_unlock(&pd->lock);
-
- pt = fetch_and_zero(&alloc);
- if (!pt)
- pt = alloc_pt(vm);
- if (IS_ERR(pt)) {
- ret = PTR_ERR(pt);
- goto unwind_out;
- }
-
- gen6_initialize_pt(vm, pt);
-
- spin_lock(&pd->lock);
- if (pd->entry[pde] == vm->scratch_pt) {
- pd->entry[pde] = pt;
- if (i915_vma_is_bound(ppgtt->vma,
- I915_VMA_GLOBAL_BIND)) {
- gen6_write_pde(ppgtt, pde, pt);
- flush = true;
- }
- } else {
- alloc = pt;
- pt = pd->entry[pde];
- }
- }
-
- atomic_add(count, &pt->used);
- }
- spin_unlock(&pd->lock);
-
- if (flush) {
- mark_tlbs_dirty(&ppgtt->base);
- gen6_ggtt_invalidate(vm->i915);
- }
-
- goto out;
-
-unwind_out:
- gen6_ppgtt_clear_range(vm, from, start - from);
-out:
- if (alloc)
- free_pt(vm, alloc);
- intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
- return ret;
-}
-
-static int gen6_ppgtt_init_scratch(struct gen6_ppgtt *ppgtt)
-{
- struct i915_address_space * const vm = &ppgtt->base.vm;
- struct i915_page_directory * const pd = ppgtt->base.pd;
- struct i915_page_table *unused;
- u32 pde;
- int ret;
-
- ret = setup_scratch_page(vm, __GFP_HIGHMEM);
- if (ret)
- return ret;
-
- vm->scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
- I915_CACHE_NONE,
- PTE_READ_ONLY);
-
- vm->scratch_pt = alloc_pt(vm);
- if (IS_ERR(vm->scratch_pt)) {
- cleanup_scratch_page(vm);
- return PTR_ERR(vm->scratch_pt);
- }
-
- gen6_initialize_pt(vm, vm->scratch_pt);
-
- gen6_for_all_pdes(unused, pd, pde)
- pd->entry[pde] = vm->scratch_pt;
-
- return 0;
-}
-
-static void gen6_ppgtt_free_scratch(struct i915_address_space *vm)
-{
- free_pt(vm, vm->scratch_pt);
- cleanup_scratch_page(vm);
-}
-
-static void gen6_ppgtt_free_pd(struct gen6_ppgtt *ppgtt)
-{
- struct i915_page_directory * const pd = ppgtt->base.pd;
- struct i915_page_table *pt;
- u32 pde;
-
- gen6_for_all_pdes(pt, pd, pde)
- if (pt != ppgtt->base.vm.scratch_pt)
- free_pt(&ppgtt->base.vm, pt);
-}
-
-struct gen6_ppgtt_cleanup_work {
- struct work_struct base;
- struct i915_vma *vma;
-};
-
-static void gen6_ppgtt_cleanup_work(struct work_struct *wrk)
-{
- struct gen6_ppgtt_cleanup_work *work =
- container_of(wrk, typeof(*work), base);
- /* Side note, vma->vm is the GGTT not the ppgtt we just destroyed! */
- struct drm_i915_private *i915 = work->vma->vm->i915;
-
- mutex_lock(&i915->drm.struct_mutex);
- i915_vma_destroy(work->vma);
- mutex_unlock(&i915->drm.struct_mutex);
-
- kfree(work);
-}
-
-static int nop_set_pages(struct i915_vma *vma)
-{
- return -ENODEV;
-}
-
-static void nop_clear_pages(struct i915_vma *vma)
-{
-}
-
-static int nop_bind(struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 unused)
-{
- return -ENODEV;
-}
-
-static void nop_unbind(struct i915_vma *vma)
-{
-}
-
-static const struct i915_vma_ops nop_vma_ops = {
- .set_pages = nop_set_pages,
- .clear_pages = nop_clear_pages,
- .bind_vma = nop_bind,
- .unbind_vma = nop_unbind,
-};
-
-static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
-{
- struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
- struct gen6_ppgtt_cleanup_work *work = ppgtt->work;
-
- /* FIXME remove the struct_mutex to bring the locking under control */
- INIT_WORK(&work->base, gen6_ppgtt_cleanup_work);
- work->vma = ppgtt->vma;
- work->vma->ops = &nop_vma_ops;
- schedule_work(&work->base);
-
- gen6_ppgtt_free_pd(ppgtt);
- gen6_ppgtt_free_scratch(vm);
- kfree(ppgtt->base.pd);
-}
-
-static int pd_vma_set_pages(struct i915_vma *vma)
-{
- vma->pages = ERR_PTR(-ENODEV);
- return 0;
-}
-
-static void pd_vma_clear_pages(struct i915_vma *vma)
-{
- GEM_BUG_ON(!vma->pages);
-
- vma->pages = NULL;
-}
-
-static int pd_vma_bind(struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 unused)
-{
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm);
- struct gen6_ppgtt *ppgtt = vma->private;
- u32 ggtt_offset = i915_ggtt_offset(vma) / I915_GTT_PAGE_SIZE;
- struct i915_page_table *pt;
- unsigned int pde;
-
- ppgtt->base.pd->base.ggtt_offset = ggtt_offset * sizeof(gen6_pte_t);
- ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm + ggtt_offset;
-
- gen6_for_all_pdes(pt, ppgtt->base.pd, pde)
- gen6_write_pde(ppgtt, pde, pt);
-
- mark_tlbs_dirty(&ppgtt->base);
- gen6_ggtt_invalidate(ppgtt->base.vm.i915);
-
- return 0;
-}
-
-static void pd_vma_unbind(struct i915_vma *vma)
-{
- struct gen6_ppgtt *ppgtt = vma->private;
- struct i915_page_directory * const pd = ppgtt->base.pd;
- struct i915_page_table * const scratch_pt = ppgtt->base.vm.scratch_pt;
- struct i915_page_table *pt;
- unsigned int pde;
-
- if (!ppgtt->scan_for_unused_pt)
- return;
-
- /* Free all no longer used page tables */
- gen6_for_all_pdes(pt, ppgtt->base.pd, pde) {
- if (atomic_read(&pt->used) || pt == scratch_pt)
- continue;
-
- free_pt(&ppgtt->base.vm, pt);
- pd->entry[pde] = scratch_pt;
- }
-
- ppgtt->scan_for_unused_pt = false;
-}
-
-static const struct i915_vma_ops pd_vma_ops = {
- .set_pages = pd_vma_set_pages,
- .clear_pages = pd_vma_clear_pages,
- .bind_vma = pd_vma_bind,
- .unbind_vma = pd_vma_unbind,
-};
-
-static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size)
-{
- struct drm_i915_private *i915 = ppgtt->base.vm.i915;
- struct i915_ggtt *ggtt = &i915->ggtt;
- struct i915_vma *vma;
-
- GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
- GEM_BUG_ON(size > ggtt->vm.total);
-
- vma = i915_vma_alloc();
- if (!vma)
- return ERR_PTR(-ENOMEM);
-
- i915_active_init(i915, &vma->active, NULL);
- INIT_ACTIVE_REQUEST(&vma->last_fence);
-
- vma->vm = &ggtt->vm;
- vma->ops = &pd_vma_ops;
- vma->private = ppgtt;
-
- vma->size = size;
- vma->fence_size = size;
- vma->flags = I915_VMA_GGTT;
- vma->ggtt_view.type = I915_GGTT_VIEW_ROTATED; /* prevent fencing */
-
- INIT_LIST_HEAD(&vma->obj_link);
- INIT_LIST_HEAD(&vma->closed_link);
-
- mutex_lock(&vma->vm->mutex);
- list_add(&vma->vm_link, &vma->vm->unbound_list);
- mutex_unlock(&vma->vm->mutex);
-
- return vma;
-}
-
-int gen6_ppgtt_pin(struct i915_ppgtt *base)
-{
- struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
- int err;
-
- GEM_BUG_ON(ppgtt->base.vm.closed);
-
- /*
- * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt
- * which will be pinned into every active context.
- * (When vma->pin_count becomes atomic, I expect we will naturally
- * need a larger, unpacked, type and kill this redundancy.)
- */
- if (ppgtt->pin_count++)
- return 0;
-
- /*
- * PPGTT PDEs reside in the GGTT and consists of 512 entries. The
- * allocator works in address space sizes, so it's multiplied by page
- * size. We allocate at the top of the GTT to avoid fragmentation.
- */
- err = i915_vma_pin(ppgtt->vma,
- 0, GEN6_PD_ALIGN,
- PIN_GLOBAL | PIN_HIGH);
- if (err)
- goto unpin;
-
- return 0;
-
-unpin:
- ppgtt->pin_count = 0;
- return err;
-}
-
-void gen6_ppgtt_unpin(struct i915_ppgtt *base)
-{
- struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
-
- GEM_BUG_ON(!ppgtt->pin_count);
- if (--ppgtt->pin_count)
- return;
-
- i915_vma_unpin(ppgtt->vma);
-}
-
-void gen6_ppgtt_unpin_all(struct i915_ppgtt *base)
-{
- struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
-
- if (!ppgtt->pin_count)
- return;
-
- ppgtt->pin_count = 0;
- i915_vma_unpin(ppgtt->vma);
-}
-
-static struct i915_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
-{
- struct i915_ggtt * const ggtt = &i915->ggtt;
- struct gen6_ppgtt *ppgtt;
- int err;
-
- ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
- if (!ppgtt)
- return ERR_PTR(-ENOMEM);
-
- ppgtt_init(i915, &ppgtt->base);
-
- ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range;
- ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range;
- ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries;
- ppgtt->base.vm.cleanup = gen6_ppgtt_cleanup;
-
- ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode;
-
- ppgtt->work = kmalloc(sizeof(*ppgtt->work), GFP_KERNEL);
- if (!ppgtt->work) {
- err = -ENOMEM;
- goto err_free;
- }
-
- ppgtt->base.pd = __alloc_pd();
- if (!ppgtt->base.pd) {
- err = -ENOMEM;
- goto err_work;
- }
-
- err = gen6_ppgtt_init_scratch(ppgtt);
- if (err)
- goto err_pd;
-
- ppgtt->vma = pd_vma_create(ppgtt, GEN6_PD_SIZE);
- if (IS_ERR(ppgtt->vma)) {
- err = PTR_ERR(ppgtt->vma);
- goto err_scratch;
- }
-
- return &ppgtt->base;
-
-err_scratch:
- gen6_ppgtt_free_scratch(&ppgtt->base.vm);
-err_pd:
- kfree(ppgtt->base.pd);
-err_work:
- kfree(ppgtt->work);
-err_free:
- kfree(ppgtt);
- return ERR_PTR(err);
-}
-
-static void gtt_write_workarounds(struct drm_i915_private *dev_priv)
-{
- /* This function is for gtt related workarounds. This function is
- * called on driver load and after a GPU reset, so you can place
- * workarounds here even if they get overwritten by GPU reset.
- */
- /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl,icl */
- if (IS_BROADWELL(dev_priv))
- I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
- else if (IS_CHERRYVIEW(dev_priv))
- I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
- else if (IS_GEN9_LP(dev_priv))
- I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
- else if (INTEL_GEN(dev_priv) >= 9)
- I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
-
- /*
- * To support 64K PTEs we need to first enable the use of the
- * Intermediate-Page-Size(IPS) bit of the PDE field via some magical
- * mmio, otherwise the page-walker will simply ignore the IPS bit. This
- * shouldn't be needed after GEN10.
- *
- * 64K pages were first introduced from BDW+, although technically they
- * only *work* from gen9+. For pre-BDW we instead have the option for
- * 32K pages, but we don't currently have any support for it in our
- * driver.
- */
- if (HAS_PAGE_SIZES(dev_priv, I915_GTT_PAGE_SIZE_64K) &&
- INTEL_GEN(dev_priv) <= 10)
- I915_WRITE(GEN8_GAMW_ECO_DEV_RW_IA,
- I915_READ(GEN8_GAMW_ECO_DEV_RW_IA) |
- GAMW_ECO_ENABLE_64K_IPS_FIELD);
-}
-
-int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
-{
- gtt_write_workarounds(dev_priv);
-
- if (IS_GEN(dev_priv, 6))
- gen6_ppgtt_enable(dev_priv);
- else if (IS_GEN(dev_priv, 7))
- gen7_ppgtt_enable(dev_priv);
-
- return 0;
-}
-
-static struct i915_ppgtt *
-__ppgtt_create(struct drm_i915_private *i915)
-{
- if (INTEL_GEN(i915) < 8)
- return gen6_ppgtt_create(i915);
- else
- return gen8_ppgtt_create(i915);
-}
-
-struct i915_ppgtt *
-i915_ppgtt_create(struct drm_i915_private *i915)
-{
- struct i915_ppgtt *ppgtt;
-
- ppgtt = __ppgtt_create(i915);
- if (IS_ERR(ppgtt))
- return ppgtt;
-
- trace_i915_ppgtt_create(&ppgtt->vm);
-
- return ppgtt;
-}
-
-static void ppgtt_destroy_vma(struct i915_address_space *vm)
-{
- struct list_head *phases[] = {
- &vm->bound_list,
- &vm->unbound_list,
- NULL,
- }, **phase;
-
- vm->closed = true;
- for (phase = phases; *phase; phase++) {
- struct i915_vma *vma, *vn;
-
- list_for_each_entry_safe(vma, vn, *phase, vm_link)
- i915_vma_destroy(vma);
- }
-}
-
-void i915_vm_release(struct kref *kref)
-{
- struct i915_address_space *vm =
- container_of(kref, struct i915_address_space, ref);
-
- GEM_BUG_ON(i915_is_ggtt(vm));
- trace_i915_ppgtt_release(vm);
-
- ppgtt_destroy_vma(vm);
-
- GEM_BUG_ON(!list_empty(&vm->bound_list));
- GEM_BUG_ON(!list_empty(&vm->unbound_list));
-
- vm->cleanup(vm);
- i915_address_space_fini(vm);
-
- kfree(vm);
-}
-
-/* Certain Gen5 chipsets require require idling the GPU before
- * unmapping anything from the GTT when VT-d is enabled.
- */
-static bool needs_idle_maps(struct drm_i915_private *dev_priv)
-{
- /* Query intel_iommu to see if we need the workaround. Presumably that
- * was loaded first.
- */
- return IS_GEN(dev_priv, 5) && IS_MOBILE(dev_priv) && intel_vtd_active();
-}
-
-void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv)
-{
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
-
- /* Don't bother messing with faults pre GEN6 as we have little
- * documentation supporting that it's a good idea.
- */
- if (INTEL_GEN(dev_priv) < 6)
- return;
-
- i915_check_and_clear_faults(dev_priv);
-
- ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
-
- i915_ggtt_invalidate(dev_priv);
-}
int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages)
@@ -2344,364 +52,6 @@ int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
return -ENOSPC;
}
-static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
-{
- writeq(pte, addr);
-}
-
-static void gen8_ggtt_insert_page(struct i915_address_space *vm,
- dma_addr_t addr,
- u64 offset,
- enum i915_cache_level level,
- u32 unused)
-{
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- gen8_pte_t __iomem *pte =
- (gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
-
- gen8_set_pte(pte, gen8_pte_encode(addr, level, 0));
-
- ggtt->invalidate(vm->i915);
-}
-
-static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
- struct i915_vma *vma,
- enum i915_cache_level level,
- u32 flags)
-{
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- struct sgt_iter sgt_iter;
- gen8_pte_t __iomem *gtt_entries;
- const gen8_pte_t pte_encode = gen8_pte_encode(0, level, 0);
- dma_addr_t addr;
-
- /*
- * Note that we ignore PTE_READ_ONLY here. The caller must be careful
- * not to allow the user to override access to a read only page.
- */
-
- gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
- gtt_entries += vma->node.start / I915_GTT_PAGE_SIZE;
- for_each_sgt_dma(addr, sgt_iter, vma->pages)
- gen8_set_pte(gtt_entries++, pte_encode | addr);
-
- /*
- * We want to flush the TLBs only after we're certain all the PTE
- * updates have finished.
- */
- ggtt->invalidate(vm->i915);
-}
-
-static void gen6_ggtt_insert_page(struct i915_address_space *vm,
- dma_addr_t addr,
- u64 offset,
- enum i915_cache_level level,
- u32 flags)
-{
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- gen6_pte_t __iomem *pte =
- (gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
-
- iowrite32(vm->pte_encode(addr, level, flags), pte);
-
- ggtt->invalidate(vm->i915);
-}
-
-/*
- * Binds an object into the global gtt with the specified cache level. The object
- * will be accessible to the GPU via commands whose operands reference offsets
- * within the global GTT as well as accessible by the GPU through the GMADR
- * mapped BAR (dev_priv->mm.gtt->gtt).
- */
-static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
- struct i915_vma *vma,
- enum i915_cache_level level,
- u32 flags)
-{
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm;
- unsigned int i = vma->node.start / I915_GTT_PAGE_SIZE;
- struct sgt_iter iter;
- dma_addr_t addr;
- for_each_sgt_dma(addr, iter, vma->pages)
- iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]);
-
- /*
- * We want to flush the TLBs only after we're certain all the PTE
- * updates have finished.
- */
- ggtt->invalidate(vm->i915);
-}
-
-static void nop_clear_range(struct i915_address_space *vm,
- u64 start, u64 length)
-{
-}
-
-static void gen8_ggtt_clear_range(struct i915_address_space *vm,
- u64 start, u64 length)
-{
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- unsigned first_entry = start / I915_GTT_PAGE_SIZE;
- unsigned num_entries = length / I915_GTT_PAGE_SIZE;
- const gen8_pte_t scratch_pte = vm->scratch_pte;
- gen8_pte_t __iomem *gtt_base =
- (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
- const int max_entries = ggtt_total_entries(ggtt) - first_entry;
- int i;
-
- if (WARN(num_entries > max_entries,
- "First entry = %d; Num entries = %d (max=%d)\n",
- first_entry, num_entries, max_entries))
- num_entries = max_entries;
-
- for (i = 0; i < num_entries; i++)
- gen8_set_pte(&gtt_base[i], scratch_pte);
-}
-
-static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
-{
- struct drm_i915_private *dev_priv = vm->i915;
-
- /*
- * Make sure the internal GAM fifo has been cleared of all GTT
- * writes before exiting stop_machine(). This guarantees that
- * any aperture accesses waiting to start in another process
- * cannot back up behind the GTT writes causing a hang.
- * The register can be any arbitrary GAM register.
- */
- POSTING_READ(GFX_FLSH_CNTL_GEN6);
-}
-
-struct insert_page {
- struct i915_address_space *vm;
- dma_addr_t addr;
- u64 offset;
- enum i915_cache_level level;
-};
-
-static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
-{
- struct insert_page *arg = _arg;
-
- gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
- bxt_vtd_ggtt_wa(arg->vm);
-
- return 0;
-}
-
-static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
- dma_addr_t addr,
- u64 offset,
- enum i915_cache_level level,
- u32 unused)
-{
- struct insert_page arg = { vm, addr, offset, level };
-
- stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
-}
-
-struct insert_entries {
- struct i915_address_space *vm;
- struct i915_vma *vma;
- enum i915_cache_level level;
- u32 flags;
-};
-
-static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
-{
- struct insert_entries *arg = _arg;
-
- gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, arg->flags);
- bxt_vtd_ggtt_wa(arg->vm);
-
- return 0;
-}
-
-static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
- struct i915_vma *vma,
- enum i915_cache_level level,
- u32 flags)
-{
- struct insert_entries arg = { vm, vma, level, flags };
-
- stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
-}
-
-struct clear_range {
- struct i915_address_space *vm;
- u64 start;
- u64 length;
-};
-
-static int bxt_vtd_ggtt_clear_range__cb(void *_arg)
-{
- struct clear_range *arg = _arg;
-
- gen8_ggtt_clear_range(arg->vm, arg->start, arg->length);
- bxt_vtd_ggtt_wa(arg->vm);
-
- return 0;
-}
-
-static void bxt_vtd_ggtt_clear_range__BKL(struct i915_address_space *vm,
- u64 start,
- u64 length)
-{
- struct clear_range arg = { vm, start, length };
-
- stop_machine(bxt_vtd_ggtt_clear_range__cb, &arg, NULL);
-}
-
-static void gen6_ggtt_clear_range(struct i915_address_space *vm,
- u64 start, u64 length)
-{
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- unsigned first_entry = start / I915_GTT_PAGE_SIZE;
- unsigned num_entries = length / I915_GTT_PAGE_SIZE;
- gen6_pte_t scratch_pte, __iomem *gtt_base =
- (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
- const int max_entries = ggtt_total_entries(ggtt) - first_entry;
- int i;
-
- if (WARN(num_entries > max_entries,
- "First entry = %d; Num entries = %d (max=%d)\n",
- first_entry, num_entries, max_entries))
- num_entries = max_entries;
-
- scratch_pte = vm->scratch_pte;
-
- for (i = 0; i < num_entries; i++)
- iowrite32(scratch_pte, &gtt_base[i]);
-}
-
-static void i915_ggtt_insert_page(struct i915_address_space *vm,
- dma_addr_t addr,
- u64 offset,
- enum i915_cache_level cache_level,
- u32 unused)
-{
- unsigned int flags = (cache_level == I915_CACHE_NONE) ?
- AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
-
- intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
-}
-
-static void i915_ggtt_insert_entries(struct i915_address_space *vm,
- struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 unused)
-{
- unsigned int flags = (cache_level == I915_CACHE_NONE) ?
- AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
-
- intel_gtt_insert_sg_entries(vma->pages, vma->node.start >> PAGE_SHIFT,
- flags);
-}
-
-static void i915_ggtt_clear_range(struct i915_address_space *vm,
- u64 start, u64 length)
-{
- intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
-}
-
-static int ggtt_bind_vma(struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 flags)
-{
- struct drm_i915_private *i915 = vma->vm->i915;
- struct drm_i915_gem_object *obj = vma->obj;
- intel_wakeref_t wakeref;
- u32 pte_flags;
-
- /* Applicable to VLV (gen8+ do not support RO in the GGTT) */
- pte_flags = 0;
- if (i915_gem_object_is_readonly(obj))
- pte_flags |= PTE_READ_ONLY;
-
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
-
- vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
-
- /*
- * Without aliasing PPGTT there's no difference between
- * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
- * upgrade to both bound if we bind either to avoid double-binding.
- */
- vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
-
- return 0;
-}
-
-static void ggtt_unbind_vma(struct i915_vma *vma)
-{
- struct drm_i915_private *i915 = vma->vm->i915;
- intel_wakeref_t wakeref;
-
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
-}
-
-static int aliasing_gtt_bind_vma(struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 flags)
-{
- struct drm_i915_private *i915 = vma->vm->i915;
- u32 pte_flags;
- int ret;
-
- /* Currently applicable only to VLV */
- pte_flags = 0;
- if (i915_gem_object_is_readonly(vma->obj))
- pte_flags |= PTE_READ_ONLY;
-
- if (flags & I915_VMA_LOCAL_BIND) {
- struct i915_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
-
- if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
- ret = appgtt->vm.allocate_va_range(&appgtt->vm,
- vma->node.start,
- vma->size);
- if (ret)
- return ret;
- }
-
- appgtt->vm.insert_entries(&appgtt->vm, vma, cache_level,
- pte_flags);
- }
-
- if (flags & I915_VMA_GLOBAL_BIND) {
- intel_wakeref_t wakeref;
-
- with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
- vma->vm->insert_entries(vma->vm, vma,
- cache_level, pte_flags);
- }
- }
-
- return 0;
-}
-
-static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
-{
- struct drm_i915_private *i915 = vma->vm->i915;
-
- if (vma->flags & I915_VMA_GLOBAL_BIND) {
- struct i915_address_space *vm = vma->vm;
- intel_wakeref_t wakeref;
-
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- vm->clear_range(vm, vma->node.start, vma->size);
- }
-
- if (vma->flags & I915_VMA_LOCAL_BIND) {
- struct i915_address_space *vm = &i915->mm.aliasing_ppgtt->vm;
-
- vm->clear_range(vm, vma->node.start, vma->size);
- }
-}
-
void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
@@ -2710,7 +60,9 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
struct i915_ggtt *ggtt = &dev_priv->ggtt;
if (unlikely(ggtt->do_idle_maps)) {
- if (i915_gem_wait_for_idle(dev_priv, 0, MAX_SCHEDULE_TIMEOUT)) {
+ /* XXX This does not prevent more requests being submitted! */
+ if (intel_gt_retire_requests_timeout(ggtt->vm.gt,
+ -MAX_SCHEDULE_TIMEOUT)) {
DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
/* Wait a bit, in hopes it avoids the hang */
udelay(10);
@@ -2720,1203 +72,6 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL);
}
-static int ggtt_set_pages(struct i915_vma *vma)
-{
- int ret;
-
- GEM_BUG_ON(vma->pages);
-
- ret = i915_get_ggtt_vma_pages(vma);
- if (ret)
- return ret;
-
- vma->page_sizes = vma->obj->mm.page_sizes;
-
- return 0;
-}
-
-static void i915_gtt_color_adjust(const struct drm_mm_node *node,
- unsigned long color,
- u64 *start,
- u64 *end)
-{
- if (node->allocated && node->color != color)
- *start += I915_GTT_PAGE_SIZE;
-
- /* Also leave a space between the unallocated reserved node after the
- * GTT and any objects within the GTT, i.e. we use the color adjustment
- * to insert a guard page to prevent prefetches crossing over the
- * GTT boundary.
- */
- node = list_next_entry(node, node_list);
- if (node->color != color)
- *end -= I915_GTT_PAGE_SIZE;
-}
-
-static int init_aliasing_ppgtt(struct drm_i915_private *i915)
-{
- struct i915_ggtt *ggtt = &i915->ggtt;
- struct i915_ppgtt *ppgtt;
- int err;
-
- ppgtt = i915_ppgtt_create(i915);
- if (IS_ERR(ppgtt))
- return PTR_ERR(ppgtt);
-
- if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) {
- err = -ENODEV;
- goto err_ppgtt;
- }
-
- /*
- * Note we only pre-allocate as far as the end of the global
- * GTT. On 48b / 4-level page-tables, the difference is very,
- * very significant! We have to preallocate as GVT/vgpu does
- * not like the page directory disappearing.
- */
- err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, ggtt->vm.total);
- if (err)
- goto err_ppgtt;
-
- i915->mm.aliasing_ppgtt = ppgtt;
-
- GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma);
- ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma;
-
- GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != ggtt_unbind_vma);
- ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma;
-
- return 0;
-
-err_ppgtt:
- i915_vm_put(&ppgtt->vm);
- return err;
-}
-
-static void fini_aliasing_ppgtt(struct drm_i915_private *i915)
-{
- struct i915_ggtt *ggtt = &i915->ggtt;
- struct i915_ppgtt *ppgtt;
-
- ppgtt = fetch_and_zero(&i915->mm.aliasing_ppgtt);
- if (!ppgtt)
- return;
-
- i915_vm_put(&ppgtt->vm);
-
- ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
- ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
-}
-
-static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
-{
- u64 size;
- int ret;
-
- if (!USES_GUC(ggtt->vm.i915))
- return 0;
-
- GEM_BUG_ON(ggtt->vm.total <= GUC_GGTT_TOP);
- size = ggtt->vm.total - GUC_GGTT_TOP;
-
- ret = i915_gem_gtt_reserve(&ggtt->vm, &ggtt->uc_fw, size,
- GUC_GGTT_TOP, I915_COLOR_UNEVICTABLE,
- PIN_NOEVICT);
- if (ret)
- DRM_DEBUG_DRIVER("Failed to reserve top of GGTT for GuC\n");
-
- return ret;
-}
-
-static void ggtt_release_guc_top(struct i915_ggtt *ggtt)
-{
- if (drm_mm_node_allocated(&ggtt->uc_fw))
- drm_mm_remove_node(&ggtt->uc_fw);
-}
-
-int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
-{
- /* Let GEM Manage all of the aperture.
- *
- * However, leave one page at the end still bound to the scratch page.
- * There are a number of places where the hardware apparently prefetches
- * past the end of the object, and we've seen multiple hangs with the
- * GPU head pointer stuck in a batchbuffer bound at the last page of the
- * aperture. One page should be enough to keep any prefetching inside
- * of the aperture.
- */
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- unsigned long hole_start, hole_end;
- struct drm_mm_node *entry;
- int ret;
-
- /*
- * GuC requires all resources that we're sharing with it to be placed in
- * non-WOPCM memory. If GuC is not present or not in use we still need a
- * small bias as ring wraparound at offset 0 sometimes hangs. No idea
- * why.
- */
- ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE,
- intel_wopcm_guc_size(&dev_priv->wopcm));
-
- ret = intel_vgt_balloon(dev_priv);
- if (ret)
- return ret;
-
- /* Reserve a mappable slot for our lockless error capture */
- ret = drm_mm_insert_node_in_range(&ggtt->vm.mm, &ggtt->error_capture,
- PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
- 0, ggtt->mappable_end,
- DRM_MM_INSERT_LOW);
- if (ret)
- return ret;
-
- /*
- * The upper portion of the GuC address space has a sizeable hole
- * (several MB) that is inaccessible by GuC. Reserve this range within
- * GGTT as it can comfortably hold GuC/HuC firmware images.
- */
- ret = ggtt_reserve_guc_top(ggtt);
- if (ret)
- goto err_reserve;
-
- /* Clear any non-preallocated blocks */
- drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
- DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
- hole_start, hole_end);
- ggtt->vm.clear_range(&ggtt->vm, hole_start,
- hole_end - hole_start);
- }
-
- /* And finally clear the reserved guard page */
- ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE);
-
- if (INTEL_PPGTT(dev_priv) == INTEL_PPGTT_ALIASING) {
- ret = init_aliasing_ppgtt(dev_priv);
- if (ret)
- goto err_appgtt;
- }
-
- return 0;
-
-err_appgtt:
- ggtt_release_guc_top(ggtt);
-err_reserve:
- drm_mm_remove_node(&ggtt->error_capture);
- return ret;
-}
-
-/**
- * i915_ggtt_cleanup_hw - Clean up GGTT hardware initialization
- * @dev_priv: i915 device
- */
-void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
-{
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- struct i915_vma *vma, *vn;
- struct pagevec *pvec;
-
- ggtt->vm.closed = true;
-
- mutex_lock(&dev_priv->drm.struct_mutex);
- fini_aliasing_ppgtt(dev_priv);
-
- list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link)
- WARN_ON(i915_vma_unbind(vma));
-
- if (drm_mm_node_allocated(&ggtt->error_capture))
- drm_mm_remove_node(&ggtt->error_capture);
-
- ggtt_release_guc_top(ggtt);
-
- if (drm_mm_initialized(&ggtt->vm.mm)) {
- intel_vgt_deballoon(dev_priv);
- i915_address_space_fini(&ggtt->vm);
- }
-
- ggtt->vm.cleanup(&ggtt->vm);
-
- pvec = &dev_priv->mm.wc_stash.pvec;
- if (pvec->nr) {
- set_pages_array_wb(pvec->pages, pvec->nr);
- __pagevec_release(pvec);
- }
-
- mutex_unlock(&dev_priv->drm.struct_mutex);
-
- arch_phys_wc_del(ggtt->mtrr);
- io_mapping_fini(&ggtt->iomap);
-
- i915_gem_cleanup_stolen(dev_priv);
-}
-
-static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
-{
- snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
- snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
- return snb_gmch_ctl << 20;
-}
-
-static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
-{
- bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
- bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
- if (bdw_gmch_ctl)
- bdw_gmch_ctl = 1 << bdw_gmch_ctl;
-
-#ifdef CONFIG_X86_32
- /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * I915_GTT_PAGE_SIZE */
- if (bdw_gmch_ctl > 4)
- bdw_gmch_ctl = 4;
-#endif
-
- return bdw_gmch_ctl << 20;
-}
-
-static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
-{
- gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
- gmch_ctrl &= SNB_GMCH_GGMS_MASK;
-
- if (gmch_ctrl)
- return 1 << (20 + gmch_ctrl);
-
- return 0;
-}
-
-static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
-{
- struct drm_i915_private *dev_priv = ggtt->vm.i915;
- struct pci_dev *pdev = dev_priv->drm.pdev;
- phys_addr_t phys_addr;
- int ret;
-
- /* For Modern GENs the PTEs and register space are split in the BAR */
- phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2;
-
- /*
- * On BXT+/CNL+ writes larger than 64 bit to the GTT pagetable range
- * will be dropped. For WC mappings in general we have 64 byte burst
- * writes when the WC buffer is flushed, so we can't use it, but have to
- * resort to an uncached mapping. The WC issue is easily caught by the
- * readback check when writing GTT PTE entries.
- */
- if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10)
- ggtt->gsm = ioremap_nocache(phys_addr, size);
- else
- ggtt->gsm = ioremap_wc(phys_addr, size);
- if (!ggtt->gsm) {
- DRM_ERROR("Failed to map the ggtt page table\n");
- return -ENOMEM;
- }
-
- ret = setup_scratch_page(&ggtt->vm, GFP_DMA32);
- if (ret) {
- DRM_ERROR("Scratch setup failed\n");
- /* iounmap will also get called at remove, but meh */
- iounmap(ggtt->gsm);
- return ret;
- }
-
- ggtt->vm.scratch_pte =
- ggtt->vm.pte_encode(ggtt->vm.scratch_page.daddr,
- I915_CACHE_NONE, 0);
-
- return 0;
-}
-
-static struct intel_ppat_entry *
-__alloc_ppat_entry(struct intel_ppat *ppat, unsigned int index, u8 value)
-{
- struct intel_ppat_entry *entry = &ppat->entries[index];
-
- GEM_BUG_ON(index >= ppat->max_entries);
- GEM_BUG_ON(test_bit(index, ppat->used));
-
- entry->ppat = ppat;
- entry->value = value;
- kref_init(&entry->ref);
- set_bit(index, ppat->used);
- set_bit(index, ppat->dirty);
-
- return entry;
-}
-
-static void __free_ppat_entry(struct intel_ppat_entry *entry)
-{
- struct intel_ppat *ppat = entry->ppat;
- unsigned int index = entry - ppat->entries;
-
- GEM_BUG_ON(index >= ppat->max_entries);
- GEM_BUG_ON(!test_bit(index, ppat->used));
-
- entry->value = ppat->clear_value;
- clear_bit(index, ppat->used);
- set_bit(index, ppat->dirty);
-}
-
-/**
- * intel_ppat_get - get a usable PPAT entry
- * @i915: i915 device instance
- * @value: the PPAT value required by the caller
- *
- * The function tries to search if there is an existing PPAT entry which
- * matches with the required value. If perfectly matched, the existing PPAT
- * entry will be used. If only partially matched, it will try to check if
- * there is any available PPAT index. If yes, it will allocate a new PPAT
- * index for the required entry and update the HW. If not, the partially
- * matched entry will be used.
- */
-const struct intel_ppat_entry *
-intel_ppat_get(struct drm_i915_private *i915, u8 value)
-{
- struct intel_ppat *ppat = &i915->ppat;
- struct intel_ppat_entry *entry = NULL;
- unsigned int scanned, best_score;
- int i;
-
- GEM_BUG_ON(!ppat->max_entries);
-
- scanned = best_score = 0;
- for_each_set_bit(i, ppat->used, ppat->max_entries) {
- unsigned int score;
-
- score = ppat->match(ppat->entries[i].value, value);
- if (score > best_score) {
- entry = &ppat->entries[i];
- if (score == INTEL_PPAT_PERFECT_MATCH) {
- kref_get(&entry->ref);
- return entry;
- }
- best_score = score;
- }
- scanned++;
- }
-
- if (scanned == ppat->max_entries) {
- if (!entry)
- return ERR_PTR(-ENOSPC);
-
- kref_get(&entry->ref);
- return entry;
- }
-
- i = find_first_zero_bit(ppat->used, ppat->max_entries);
- entry = __alloc_ppat_entry(ppat, i, value);
- ppat->update_hw(i915);
- return entry;
-}
-
-static void release_ppat(struct kref *kref)
-{
- struct intel_ppat_entry *entry =
- container_of(kref, struct intel_ppat_entry, ref);
- struct drm_i915_private *i915 = entry->ppat->i915;
-
- __free_ppat_entry(entry);
- entry->ppat->update_hw(i915);
-}
-
-/**
- * intel_ppat_put - put back the PPAT entry got from intel_ppat_get()
- * @entry: an intel PPAT entry
- *
- * Put back the PPAT entry got from intel_ppat_get(). If the PPAT index of the
- * entry is dynamically allocated, its reference count will be decreased. Once
- * the reference count becomes into zero, the PPAT index becomes free again.
- */
-void intel_ppat_put(const struct intel_ppat_entry *entry)
-{
- struct intel_ppat *ppat = entry->ppat;
- unsigned int index = entry - ppat->entries;
-
- GEM_BUG_ON(!ppat->max_entries);
-
- kref_put(&ppat->entries[index].ref, release_ppat);
-}
-
-static void cnl_private_pat_update_hw(struct drm_i915_private *dev_priv)
-{
- struct intel_ppat *ppat = &dev_priv->ppat;
- int i;
-
- for_each_set_bit(i, ppat->dirty, ppat->max_entries) {
- I915_WRITE(GEN10_PAT_INDEX(i), ppat->entries[i].value);
- clear_bit(i, ppat->dirty);
- }
-}
-
-static void bdw_private_pat_update_hw(struct drm_i915_private *dev_priv)
-{
- struct intel_ppat *ppat = &dev_priv->ppat;
- u64 pat = 0;
- int i;
-
- for (i = 0; i < ppat->max_entries; i++)
- pat |= GEN8_PPAT(i, ppat->entries[i].value);
-
- bitmap_clear(ppat->dirty, 0, ppat->max_entries);
-
- I915_WRITE(GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
- I915_WRITE(GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
-}
-
-static unsigned int bdw_private_pat_match(u8 src, u8 dst)
-{
- unsigned int score = 0;
- enum {
- AGE_MATCH = BIT(0),
- TC_MATCH = BIT(1),
- CA_MATCH = BIT(2),
- };
-
- /* Cache attribute has to be matched. */
- if (GEN8_PPAT_GET_CA(src) != GEN8_PPAT_GET_CA(dst))
- return 0;
-
- score |= CA_MATCH;
-
- if (GEN8_PPAT_GET_TC(src) == GEN8_PPAT_GET_TC(dst))
- score |= TC_MATCH;
-
- if (GEN8_PPAT_GET_AGE(src) == GEN8_PPAT_GET_AGE(dst))
- score |= AGE_MATCH;
-
- if (score == (AGE_MATCH | TC_MATCH | CA_MATCH))
- return INTEL_PPAT_PERFECT_MATCH;
-
- return score;
-}
-
-static unsigned int chv_private_pat_match(u8 src, u8 dst)
-{
- return (CHV_PPAT_GET_SNOOP(src) == CHV_PPAT_GET_SNOOP(dst)) ?
- INTEL_PPAT_PERFECT_MATCH : 0;
-}
-
-static void cnl_setup_private_ppat(struct intel_ppat *ppat)
-{
- ppat->max_entries = 8;
- ppat->update_hw = cnl_private_pat_update_hw;
- ppat->match = bdw_private_pat_match;
- ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3);
-
- __alloc_ppat_entry(ppat, 0, GEN8_PPAT_WB | GEN8_PPAT_LLC);
- __alloc_ppat_entry(ppat, 1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
- __alloc_ppat_entry(ppat, 2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
- __alloc_ppat_entry(ppat, 3, GEN8_PPAT_UC);
- __alloc_ppat_entry(ppat, 4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
- __alloc_ppat_entry(ppat, 5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
- __alloc_ppat_entry(ppat, 6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
- __alloc_ppat_entry(ppat, 7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
-}
-
-/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
- * bits. When using advanced contexts each context stores its own PAT, but
- * writing this data shouldn't be harmful even in those cases. */
-static void bdw_setup_private_ppat(struct intel_ppat *ppat)
-{
- ppat->max_entries = 8;
- ppat->update_hw = bdw_private_pat_update_hw;
- ppat->match = bdw_private_pat_match;
- ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3);
-
- if (!HAS_PPGTT(ppat->i915)) {
- /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
- * so RTL will always use the value corresponding to
- * pat_sel = 000".
- * So let's disable cache for GGTT to avoid screen corruptions.
- * MOCS still can be used though.
- * - System agent ggtt writes (i.e. cpu gtt mmaps) already work
- * before this patch, i.e. the same uncached + snooping access
- * like on gen6/7 seems to be in effect.
- * - So this just fixes blitter/render access. Again it looks
- * like it's not just uncached access, but uncached + snooping.
- * So we can still hold onto all our assumptions wrt cpu
- * clflushing on LLC machines.
- */
- __alloc_ppat_entry(ppat, 0, GEN8_PPAT_UC);
- return;
- }
-
- __alloc_ppat_entry(ppat, 0, GEN8_PPAT_WB | GEN8_PPAT_LLC); /* for normal objects, no eLLC */
- __alloc_ppat_entry(ppat, 1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC); /* for something pointing to ptes? */
- __alloc_ppat_entry(ppat, 2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC); /* for scanout with eLLC */
- __alloc_ppat_entry(ppat, 3, GEN8_PPAT_UC); /* Uncached objects, mostly for scanout */
- __alloc_ppat_entry(ppat, 4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
- __alloc_ppat_entry(ppat, 5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
- __alloc_ppat_entry(ppat, 6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
- __alloc_ppat_entry(ppat, 7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
-}
-
-static void chv_setup_private_ppat(struct intel_ppat *ppat)
-{
- ppat->max_entries = 8;
- ppat->update_hw = bdw_private_pat_update_hw;
- ppat->match = chv_private_pat_match;
- ppat->clear_value = CHV_PPAT_SNOOP;
-
- /*
- * Map WB on BDW to snooped on CHV.
- *
- * Only the snoop bit has meaning for CHV, the rest is
- * ignored.
- *
- * The hardware will never snoop for certain types of accesses:
- * - CPU GTT (GMADR->GGTT->no snoop->memory)
- * - PPGTT page tables
- * - some other special cycles
- *
- * As with BDW, we also need to consider the following for GT accesses:
- * "For GGTT, there is NO pat_sel[2:0] from the entry,
- * so RTL will always use the value corresponding to
- * pat_sel = 000".
- * Which means we must set the snoop bit in PAT entry 0
- * in order to keep the global status page working.
- */
-
- __alloc_ppat_entry(ppat, 0, CHV_PPAT_SNOOP);
- __alloc_ppat_entry(ppat, 1, 0);
- __alloc_ppat_entry(ppat, 2, 0);
- __alloc_ppat_entry(ppat, 3, 0);
- __alloc_ppat_entry(ppat, 4, CHV_PPAT_SNOOP);
- __alloc_ppat_entry(ppat, 5, CHV_PPAT_SNOOP);
- __alloc_ppat_entry(ppat, 6, CHV_PPAT_SNOOP);
- __alloc_ppat_entry(ppat, 7, CHV_PPAT_SNOOP);
-}
-
-static void gen6_gmch_remove(struct i915_address_space *vm)
-{
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
-
- iounmap(ggtt->gsm);
- cleanup_scratch_page(vm);
-}
-
-static void setup_private_pat(struct drm_i915_private *dev_priv)
-{
- struct intel_ppat *ppat = &dev_priv->ppat;
- int i;
-
- ppat->i915 = dev_priv;
-
- if (INTEL_GEN(dev_priv) >= 10)
- cnl_setup_private_ppat(ppat);
- else if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
- chv_setup_private_ppat(ppat);
- else
- bdw_setup_private_ppat(ppat);
-
- GEM_BUG_ON(ppat->max_entries > INTEL_MAX_PPAT_ENTRIES);
-
- for_each_clear_bit(i, ppat->used, ppat->max_entries) {
- ppat->entries[i].value = ppat->clear_value;
- ppat->entries[i].ppat = ppat;
- set_bit(i, ppat->dirty);
- }
-
- ppat->update_hw(dev_priv);
-}
-
-static int gen8_gmch_probe(struct i915_ggtt *ggtt)
-{
- struct drm_i915_private *dev_priv = ggtt->vm.i915;
- struct pci_dev *pdev = dev_priv->drm.pdev;
- unsigned int size;
- u16 snb_gmch_ctl;
- int err;
-
- /* TODO: We're not aware of mappable constraints on gen8 yet */
- ggtt->gmadr =
- (struct resource) DEFINE_RES_MEM(pci_resource_start(pdev, 2),
- pci_resource_len(pdev, 2));
- ggtt->mappable_end = resource_size(&ggtt->gmadr);
-
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39));
- if (!err)
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39));
- if (err)
- DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
-
- pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
- if (IS_CHERRYVIEW(dev_priv))
- size = chv_get_total_gtt_size(snb_gmch_ctl);
- else
- size = gen8_get_total_gtt_size(snb_gmch_ctl);
-
- ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
- ggtt->vm.cleanup = gen6_gmch_remove;
- ggtt->vm.insert_page = gen8_ggtt_insert_page;
- ggtt->vm.clear_range = nop_clear_range;
- if (intel_scanout_needs_vtd_wa(dev_priv))
- ggtt->vm.clear_range = gen8_ggtt_clear_range;
-
- ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
-
- /* Serialize GTT updates with aperture access on BXT if VT-d is on. */
- if (intel_ggtt_update_needs_vtd_wa(dev_priv) ||
- IS_CHERRYVIEW(dev_priv) /* fails with concurrent use/update */) {
- ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
- ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL;
- if (ggtt->vm.clear_range != nop_clear_range)
- ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL;
-
- /* Prevent recursively calling stop_machine() and deadlocks. */
- dev_info(dev_priv->drm.dev,
- "Disabling error capture for VT-d workaround\n");
- i915_disable_error_state(dev_priv, -ENODEV);
- }
-
- ggtt->invalidate = gen6_ggtt_invalidate;
-
- ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
- ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
- ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
- ggtt->vm.vma_ops.clear_pages = clear_pages;
-
- ggtt->vm.pte_encode = gen8_pte_encode;
-
- setup_private_pat(dev_priv);
-
- return ggtt_probe_common(ggtt, size);
-}
-
-static int gen6_gmch_probe(struct i915_ggtt *ggtt)
-{
- struct drm_i915_private *dev_priv = ggtt->vm.i915;
- struct pci_dev *pdev = dev_priv->drm.pdev;
- unsigned int size;
- u16 snb_gmch_ctl;
- int err;
-
- ggtt->gmadr =
- (struct resource) DEFINE_RES_MEM(pci_resource_start(pdev, 2),
- pci_resource_len(pdev, 2));
- ggtt->mappable_end = resource_size(&ggtt->gmadr);
-
- /* 64/512MB is the current min/max we actually know of, but this is just
- * a coarse sanity check.
- */
- if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) {
- DRM_ERROR("Unknown GMADR size (%pa)\n", &ggtt->mappable_end);
- return -ENXIO;
- }
-
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
- if (!err)
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
- if (err)
- DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
- pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
-
- size = gen6_get_total_gtt_size(snb_gmch_ctl);
- ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE;
-
- ggtt->vm.clear_range = nop_clear_range;
- if (!HAS_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv))
- ggtt->vm.clear_range = gen6_ggtt_clear_range;
- ggtt->vm.insert_page = gen6_ggtt_insert_page;
- ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
- ggtt->vm.cleanup = gen6_gmch_remove;
-
- ggtt->invalidate = gen6_ggtt_invalidate;
-
- if (HAS_EDRAM(dev_priv))
- ggtt->vm.pte_encode = iris_pte_encode;
- else if (IS_HASWELL(dev_priv))
- ggtt->vm.pte_encode = hsw_pte_encode;
- else if (IS_VALLEYVIEW(dev_priv))
- ggtt->vm.pte_encode = byt_pte_encode;
- else if (INTEL_GEN(dev_priv) >= 7)
- ggtt->vm.pte_encode = ivb_pte_encode;
- else
- ggtt->vm.pte_encode = snb_pte_encode;
-
- ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
- ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
- ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
- ggtt->vm.vma_ops.clear_pages = clear_pages;
-
- return ggtt_probe_common(ggtt, size);
-}
-
-static void i915_gmch_remove(struct i915_address_space *vm)
-{
- intel_gmch_remove();
-}
-
-static int i915_gmch_probe(struct i915_ggtt *ggtt)
-{
- struct drm_i915_private *dev_priv = ggtt->vm.i915;
- phys_addr_t gmadr_base;
- int ret;
-
- ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL);
- if (!ret) {
- DRM_ERROR("failed to set up gmch\n");
- return -EIO;
- }
-
- intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end);
-
- ggtt->gmadr =
- (struct resource) DEFINE_RES_MEM(gmadr_base,
- ggtt->mappable_end);
-
- ggtt->do_idle_maps = needs_idle_maps(dev_priv);
- ggtt->vm.insert_page = i915_ggtt_insert_page;
- ggtt->vm.insert_entries = i915_ggtt_insert_entries;
- ggtt->vm.clear_range = i915_ggtt_clear_range;
- ggtt->vm.cleanup = i915_gmch_remove;
-
- ggtt->invalidate = gmch_ggtt_invalidate;
-
- ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
- ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
- ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
- ggtt->vm.vma_ops.clear_pages = clear_pages;
-
- if (unlikely(ggtt->do_idle_maps))
- DRM_INFO("applying Ironlake quirks for intel_iommu\n");
-
- return 0;
-}
-
-/**
- * i915_ggtt_probe_hw - Probe GGTT hardware location
- * @dev_priv: i915 device
- */
-int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
-{
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- int ret;
-
- ggtt->vm.i915 = dev_priv;
- ggtt->vm.dma = &dev_priv->drm.pdev->dev;
-
- if (INTEL_GEN(dev_priv) <= 5)
- ret = i915_gmch_probe(ggtt);
- else if (INTEL_GEN(dev_priv) < 8)
- ret = gen6_gmch_probe(ggtt);
- else
- ret = gen8_gmch_probe(ggtt);
- if (ret)
- return ret;
-
- if ((ggtt->vm.total - 1) >> 32) {
- DRM_ERROR("We never expected a Global GTT with more than 32bits"
- " of address space! Found %lldM!\n",
- ggtt->vm.total >> 20);
- ggtt->vm.total = 1ULL << 32;
- ggtt->mappable_end =
- min_t(u64, ggtt->mappable_end, ggtt->vm.total);
- }
-
- if (ggtt->mappable_end > ggtt->vm.total) {
- DRM_ERROR("mappable aperture extends past end of GGTT,"
- " aperture=%pa, total=%llx\n",
- &ggtt->mappable_end, ggtt->vm.total);
- ggtt->mappable_end = ggtt->vm.total;
- }
-
- /* GMADR is the PCI mmio aperture into the global GTT. */
- DRM_DEBUG_DRIVER("GGTT size = %lluM\n", ggtt->vm.total >> 20);
- DRM_DEBUG_DRIVER("GMADR size = %lluM\n", (u64)ggtt->mappable_end >> 20);
- DRM_DEBUG_DRIVER("DSM size = %lluM\n",
- (u64)resource_size(&intel_graphics_stolen_res) >> 20);
- if (intel_vtd_active())
- DRM_INFO("VT-d active for gfx access\n");
-
- return 0;
-}
-
-/**
- * i915_ggtt_init_hw - Initialize GGTT hardware
- * @dev_priv: i915 device
- */
-int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
-{
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- int ret;
-
- stash_init(&dev_priv->mm.wc_stash);
-
- /* Note that we use page colouring to enforce a guard page at the
- * end of the address space. This is required as the CS may prefetch
- * beyond the end of the batch buffer, across the page boundary,
- * and beyond the end of the GTT if we do not provide a guard.
- */
- mutex_lock(&dev_priv->drm.struct_mutex);
- i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
-
- ggtt->vm.is_ggtt = true;
-
- /* Only VLV supports read-only GGTT mappings */
- ggtt->vm.has_read_only = IS_VALLEYVIEW(dev_priv);
-
- if (!HAS_LLC(dev_priv) && !HAS_PPGTT(dev_priv))
- ggtt->vm.mm.color_adjust = i915_gtt_color_adjust;
- mutex_unlock(&dev_priv->drm.struct_mutex);
-
- if (!io_mapping_init_wc(&dev_priv->ggtt.iomap,
- dev_priv->ggtt.gmadr.start,
- dev_priv->ggtt.mappable_end)) {
- ret = -EIO;
- goto out_gtt_cleanup;
- }
-
- ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, ggtt->mappable_end);
-
- i915_ggtt_init_fences(ggtt);
-
- /*
- * Initialise stolen early so that we may reserve preallocated
- * objects for the BIOS to KMS transition.
- */
- ret = i915_gem_init_stolen(dev_priv);
- if (ret)
- goto out_gtt_cleanup;
-
- return 0;
-
-out_gtt_cleanup:
- ggtt->vm.cleanup(&ggtt->vm);
- return ret;
-}
-
-int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv)
-{
- if (INTEL_GEN(dev_priv) < 6 && !intel_enable_gtt())
- return -EIO;
-
- return 0;
-}
-
-void i915_ggtt_enable_guc(struct drm_i915_private *i915)
-{
- GEM_BUG_ON(i915->ggtt.invalidate != gen6_ggtt_invalidate);
-
- i915->ggtt.invalidate = guc_ggtt_invalidate;
-
- i915_ggtt_invalidate(i915);
-}
-
-void i915_ggtt_disable_guc(struct drm_i915_private *i915)
-{
- /* XXX Temporary pardon for error unload */
- if (i915->ggtt.invalidate == gen6_ggtt_invalidate)
- return;
-
- /* We should only be called after i915_ggtt_enable_guc() */
- GEM_BUG_ON(i915->ggtt.invalidate != guc_ggtt_invalidate);
-
- i915->ggtt.invalidate = gen6_ggtt_invalidate;
-
- i915_ggtt_invalidate(i915);
-}
-
-void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
-{
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- struct i915_vma *vma, *vn;
-
- i915_check_and_clear_faults(dev_priv);
-
- mutex_lock(&ggtt->vm.mutex);
-
- /* First fill our portion of the GTT with scratch pages */
- ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
- ggtt->vm.closed = true; /* skip rewriting PTE on VMA unbind */
-
- /* clflush objects bound into the GGTT and rebind them. */
- list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
- struct drm_i915_gem_object *obj = vma->obj;
-
- if (!(vma->flags & I915_VMA_GLOBAL_BIND))
- continue;
-
- mutex_unlock(&ggtt->vm.mutex);
-
- if (!i915_vma_unbind(vma))
- goto lock;
-
- WARN_ON(i915_vma_bind(vma,
- obj ? obj->cache_level : 0,
- PIN_UPDATE));
- if (obj) {
- i915_gem_object_lock(obj);
- WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
- i915_gem_object_unlock(obj);
- }
-
-lock:
- mutex_lock(&ggtt->vm.mutex);
- }
-
- ggtt->vm.closed = false;
- i915_ggtt_invalidate(dev_priv);
-
- mutex_unlock(&ggtt->vm.mutex);
-
- if (INTEL_GEN(dev_priv) >= 8) {
- struct intel_ppat *ppat = &dev_priv->ppat;
-
- bitmap_set(ppat->dirty, 0, ppat->max_entries);
- dev_priv->ppat.update_hw(dev_priv);
- return;
- }
-}
-
-static struct scatterlist *
-rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset,
- unsigned int width, unsigned int height,
- unsigned int stride,
- struct sg_table *st, struct scatterlist *sg)
-{
- unsigned int column, row;
- unsigned int src_idx;
-
- for (column = 0; column < width; column++) {
- src_idx = stride * (height - 1) + column + offset;
- for (row = 0; row < height; row++) {
- st->nents++;
- /* We don't need the pages, but need to initialize
- * the entries so the sg list can be happily traversed.
- * The only thing we need are DMA addresses.
- */
- sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0);
- sg_dma_address(sg) =
- i915_gem_object_get_dma_address(obj, src_idx);
- sg_dma_len(sg) = I915_GTT_PAGE_SIZE;
- sg = sg_next(sg);
- src_idx -= stride;
- }
- }
-
- return sg;
-}
-
-static noinline struct sg_table *
-intel_rotate_pages(struct intel_rotation_info *rot_info,
- struct drm_i915_gem_object *obj)
-{
- unsigned int size = intel_rotation_info_size(rot_info);
- struct sg_table *st;
- struct scatterlist *sg;
- int ret = -ENOMEM;
- int i;
-
- /* Allocate target SG list. */
- st = kmalloc(sizeof(*st), GFP_KERNEL);
- if (!st)
- goto err_st_alloc;
-
- ret = sg_alloc_table(st, size, GFP_KERNEL);
- if (ret)
- goto err_sg_alloc;
-
- st->nents = 0;
- sg = st->sgl;
-
- for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) {
- sg = rotate_pages(obj, rot_info->plane[i].offset,
- rot_info->plane[i].width, rot_info->plane[i].height,
- rot_info->plane[i].stride, st, sg);
- }
-
- return st;
-
-err_sg_alloc:
- kfree(st);
-err_st_alloc:
-
- DRM_DEBUG_DRIVER("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
- obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
-
- return ERR_PTR(ret);
-}
-
-static struct scatterlist *
-remap_pages(struct drm_i915_gem_object *obj, unsigned int offset,
- unsigned int width, unsigned int height,
- unsigned int stride,
- struct sg_table *st, struct scatterlist *sg)
-{
- unsigned int row;
-
- for (row = 0; row < height; row++) {
- unsigned int left = width * I915_GTT_PAGE_SIZE;
-
- while (left) {
- dma_addr_t addr;
- unsigned int length;
-
- /* We don't need the pages, but need to initialize
- * the entries so the sg list can be happily traversed.
- * The only thing we need are DMA addresses.
- */
-
- addr = i915_gem_object_get_dma_address_len(obj, offset, &length);
-
- length = min(left, length);
-
- st->nents++;
-
- sg_set_page(sg, NULL, length, 0);
- sg_dma_address(sg) = addr;
- sg_dma_len(sg) = length;
- sg = sg_next(sg);
-
- offset += length / I915_GTT_PAGE_SIZE;
- left -= length;
- }
-
- offset += stride - width;
- }
-
- return sg;
-}
-
-static noinline struct sg_table *
-intel_remap_pages(struct intel_remapped_info *rem_info,
- struct drm_i915_gem_object *obj)
-{
- unsigned int size = intel_remapped_info_size(rem_info);
- struct sg_table *st;
- struct scatterlist *sg;
- int ret = -ENOMEM;
- int i;
-
- /* Allocate target SG list. */
- st = kmalloc(sizeof(*st), GFP_KERNEL);
- if (!st)
- goto err_st_alloc;
-
- ret = sg_alloc_table(st, size, GFP_KERNEL);
- if (ret)
- goto err_sg_alloc;
-
- st->nents = 0;
- sg = st->sgl;
-
- for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
- sg = remap_pages(obj, rem_info->plane[i].offset,
- rem_info->plane[i].width, rem_info->plane[i].height,
- rem_info->plane[i].stride, st, sg);
- }
-
- i915_sg_trim(st);
-
- return st;
-
-err_sg_alloc:
- kfree(st);
-err_st_alloc:
-
- DRM_DEBUG_DRIVER("Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n",
- obj->base.size, rem_info->plane[0].width, rem_info->plane[0].height, size);
-
- return ERR_PTR(ret);
-}
-
-static noinline struct sg_table *
-intel_partial_pages(const struct i915_ggtt_view *view,
- struct drm_i915_gem_object *obj)
-{
- struct sg_table *st;
- struct scatterlist *sg, *iter;
- unsigned int count = view->partial.size;
- unsigned int offset;
- int ret = -ENOMEM;
-
- st = kmalloc(sizeof(*st), GFP_KERNEL);
- if (!st)
- goto err_st_alloc;
-
- ret = sg_alloc_table(st, count, GFP_KERNEL);
- if (ret)
- goto err_sg_alloc;
-
- iter = i915_gem_object_get_sg(obj, view->partial.offset, &offset);
- GEM_BUG_ON(!iter);
-
- sg = st->sgl;
- st->nents = 0;
- do {
- unsigned int len;
-
- len = min(iter->length - (offset << PAGE_SHIFT),
- count << PAGE_SHIFT);
- sg_set_page(sg, NULL, len, 0);
- sg_dma_address(sg) =
- sg_dma_address(iter) + (offset << PAGE_SHIFT);
- sg_dma_len(sg) = len;
-
- st->nents++;
- count -= len >> PAGE_SHIFT;
- if (count == 0) {
- sg_mark_end(sg);
- i915_sg_trim(st); /* Drop any unused tail entries. */
-
- return st;
- }
-
- sg = __sg_next(sg);
- iter = __sg_next(iter);
- offset = 0;
- } while (1);
-
-err_sg_alloc:
- kfree(st);
-err_st_alloc:
- return ERR_PTR(ret);
-}
-
-static int
-i915_get_ggtt_vma_pages(struct i915_vma *vma)
-{
- int ret;
-
- /* The vma->pages are only valid within the lifespan of the borrowed
- * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
- * must be the vma->pages. A simple rule is that vma->pages must only
- * be accessed when the obj->mm.pages are pinned.
- */
- GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
-
- switch (vma->ggtt_view.type) {
- default:
- GEM_BUG_ON(vma->ggtt_view.type);
- /* fall through */
- case I915_GGTT_VIEW_NORMAL:
- vma->pages = vma->obj->mm.pages;
- return 0;
-
- case I915_GGTT_VIEW_ROTATED:
- vma->pages =
- intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj);
- break;
-
- case I915_GGTT_VIEW_REMAPPED:
- vma->pages =
- intel_remap_pages(&vma->ggtt_view.remapped, vma->obj);
- break;
-
- case I915_GGTT_VIEW_PARTIAL:
- vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
- break;
- }
-
- ret = 0;
- if (IS_ERR(vma->pages)) {
- ret = PTR_ERR(vma->pages);
- vma->pages = NULL;
- DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
- vma->ggtt_view.type, ret);
- }
- return ret;
-}
-
/**
* i915_gem_gtt_reserve - reserve a node in an address_space (GTT)
* @vm: the &struct i915_address_space
@@ -3953,7 +108,7 @@ int i915_gem_gtt_reserve(struct i915_address_space *vm,
GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT));
GEM_BUG_ON(range_overflows(offset, size, vm->total));
- GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
+ GEM_BUG_ON(vm == &vm->i915->ggtt.alias->vm);
GEM_BUG_ON(drm_mm_node_allocated(node));
node->size = size;
@@ -4042,7 +197,8 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
u64 offset;
int err;
- lockdep_assert_held(&vm->i915->drm.struct_mutex);
+ lockdep_assert_held(&vm->mutex);
+
GEM_BUG_ON(!size);
GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
GEM_BUG_ON(alignment && !is_power_of_2(alignment));
@@ -4050,7 +206,7 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
GEM_BUG_ON(start >= end);
GEM_BUG_ON(start > 0 && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
- GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
+ GEM_BUG_ON(vm == &vm->i915->ggtt.alias->vm);
GEM_BUG_ON(drm_mm_node_allocated(node));
if (unlikely(range_overflows(start, size, end)))
@@ -4093,7 +249,8 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
if (flags & PIN_NOEVICT)
return -ENOSPC;
- /* No free space, pick a slot at random.
+ /*
+ * No free space, pick a slot at random.
*
* There is a pathological case here using a GTT shared between
* mmap and GPU (i.e. ggtt/aliasing_ppgtt but not full-ppgtt):
@@ -4121,6 +278,9 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
if (err != -ENOSPC)
return err;
+ if (flags & PIN_NOSEARCH)
+ return -ENOSPC;
+
/* Randomly selected placement is pinned, do a search */
err = i915_gem_evict_something(vm, size, alignment, color,
start, end, flags);
@@ -4133,6 +293,5 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
-#include "selftests/mock_gtt.c"
#include "selftests/i915_gem_gtt.c"
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 812717ccc69b..f6226df9f972 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -1,682 +1,21 @@
+/* SPDX-License-Identifier: MIT */
/*
- * Copyright © 2014 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Please try to maintain the following order within this file unless it makes
- * sense to do otherwise. From top to bottom:
- * 1. typedefs
- * 2. #defines, and macros
- * 3. structure definitions
- * 4. function prototypes
- *
- * Within each section, please try to order by generation in ascending order,
- * from top to bottom (ie. gen6 on the top, gen8 on the bottom).
+ * Copyright © 2020 Intel Corporation
*/
#ifndef __I915_GEM_GTT_H__
#define __I915_GEM_GTT_H__
#include <linux/io-mapping.h>
-#include <linux/mm.h>
-#include <linux/pagevec.h>
+#include <linux/types.h>
-#include "gt/intel_reset.h"
-#include "i915_gem_fence_reg.h"
-#include "i915_request.h"
-#include "i915_scatterlist.h"
-#include "i915_selftest.h"
-#include "i915_timeline.h"
-
-#define I915_GTT_PAGE_SIZE_4K BIT_ULL(12)
-#define I915_GTT_PAGE_SIZE_64K BIT_ULL(16)
-#define I915_GTT_PAGE_SIZE_2M BIT_ULL(21)
-
-#define I915_GTT_PAGE_SIZE I915_GTT_PAGE_SIZE_4K
-#define I915_GTT_MAX_PAGE_SIZE I915_GTT_PAGE_SIZE_2M
-
-#define I915_GTT_PAGE_MASK -I915_GTT_PAGE_SIZE
+#include <drm/drm_mm.h>
-#define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE
-
-#define I915_FENCE_REG_NONE -1
-#define I915_MAX_NUM_FENCES 32
-/* 32 fences + sign bit for FENCE_REG_NONE */
-#define I915_MAX_NUM_FENCE_BITS 6
+#include "gt/intel_gtt.h"
+#include "i915_scatterlist.h"
-struct drm_i915_file_private;
struct drm_i915_gem_object;
-struct i915_vma;
-
-typedef u32 gen6_pte_t;
-typedef u64 gen8_pte_t;
-typedef u64 gen8_pde_t;
-typedef u64 gen8_ppgtt_pdpe_t;
-typedef u64 gen8_ppgtt_pml4e_t;
-
-#define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT)
-
-/* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
-#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
-#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
-#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
-#define GEN6_PTE_CACHE_LLC (2 << 1)
-#define GEN6_PTE_UNCACHED (1 << 1)
-#define GEN6_PTE_VALID (1 << 0)
-
-#define I915_PTES(pte_len) ((unsigned int)(PAGE_SIZE / (pte_len)))
-#define I915_PTE_MASK(pte_len) (I915_PTES(pte_len) - 1)
-#define I915_PDES 512
-#define I915_PDE_MASK (I915_PDES - 1)
-#define NUM_PTE(pde_shift) (1 << (pde_shift - PAGE_SHIFT))
-
-#define GEN6_PTES I915_PTES(sizeof(gen6_pte_t))
-#define GEN6_PD_SIZE (I915_PDES * PAGE_SIZE)
-#define GEN6_PD_ALIGN (PAGE_SIZE * 16)
-#define GEN6_PDE_SHIFT 22
-#define GEN6_PDE_VALID (1 << 0)
-
-#define GEN7_PTE_CACHE_L3_LLC (3 << 1)
-
-#define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2)
-#define BYT_PTE_WRITEABLE (1 << 1)
-
-/* Cacheability Control is a 4-bit value. The low three bits are stored in bits
- * 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
- */
-#define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \
- (((bits) & 0x8) << (11 - 3)))
-#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2)
-#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3)
-#define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8)
-#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
-#define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7)
-#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
-#define HSW_PTE_UNCACHED (0)
-#define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0))
-#define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr)
-
-/* GEN8 32b style address is defined as a 3 level page table:
- * 31:30 | 29:21 | 20:12 | 11:0
- * PDPE | PDE | PTE | offset
- * The difference as compared to normal x86 3 level page table is the PDPEs are
- * programmed via register.
- */
-#define GEN8_3LVL_PDPES 4
-#define GEN8_PDE_SHIFT 21
-#define GEN8_PDE_MASK 0x1ff
-#define GEN8_PTE_SHIFT 12
-#define GEN8_PTE_MASK 0x1ff
-#define GEN8_PTES I915_PTES(sizeof(gen8_pte_t))
-
-/* GEN8 48b style address is defined as a 4 level page table:
- * 47:39 | 38:30 | 29:21 | 20:12 | 11:0
- * PML4E | PDPE | PDE | PTE | offset
- */
-#define GEN8_PML4ES_PER_PML4 512
-#define GEN8_PML4E_SHIFT 39
-#define GEN8_PML4E_MASK (GEN8_PML4ES_PER_PML4 - 1)
-#define GEN8_PDPE_SHIFT 30
-/* NB: GEN8_PDPE_MASK is untrue for 32b platforms, but it has no impact on 32b page
- * tables */
-#define GEN8_PDPE_MASK 0x1ff
-
-#define PPAT_UNCACHED (_PAGE_PWT | _PAGE_PCD)
-#define PPAT_CACHED_PDE 0 /* WB LLC */
-#define PPAT_CACHED _PAGE_PAT /* WB LLCeLLC */
-#define PPAT_DISPLAY_ELLC _PAGE_PCD /* WT eLLC */
-
-#define CHV_PPAT_SNOOP (1<<6)
-#define GEN8_PPAT_AGE(x) ((x)<<4)
-#define GEN8_PPAT_LLCeLLC (3<<2)
-#define GEN8_PPAT_LLCELLC (2<<2)
-#define GEN8_PPAT_LLC (1<<2)
-#define GEN8_PPAT_WB (3<<0)
-#define GEN8_PPAT_WT (2<<0)
-#define GEN8_PPAT_WC (1<<0)
-#define GEN8_PPAT_UC (0<<0)
-#define GEN8_PPAT_ELLC_OVERRIDE (0<<2)
-#define GEN8_PPAT(i, x) ((u64)(x) << ((i) * 8))
-
-#define GEN8_PPAT_GET_CA(x) ((x) & 3)
-#define GEN8_PPAT_GET_TC(x) ((x) & (3 << 2))
-#define GEN8_PPAT_GET_AGE(x) ((x) & (3 << 4))
-#define CHV_PPAT_GET_SNOOP(x) ((x) & (1 << 6))
-
-#define GEN8_PDE_IPS_64K BIT(11)
-#define GEN8_PDE_PS_2M BIT(7)
-
-#define for_each_sgt_dma(__dmap, __iter, __sgt) \
- __for_each_sgt_dma(__dmap, __iter, __sgt, I915_GTT_PAGE_SIZE)
-
-struct intel_remapped_plane_info {
- /* in gtt pages */
- unsigned int width, height, stride, offset;
-} __packed;
-
-struct intel_remapped_info {
- struct intel_remapped_plane_info plane[2];
- unsigned int unused_mbz;
-} __packed;
-
-struct intel_rotation_info {
- struct intel_remapped_plane_info plane[2];
-} __packed;
-
-struct intel_partial_info {
- u64 offset;
- unsigned int size;
-} __packed;
-
-enum i915_ggtt_view_type {
- I915_GGTT_VIEW_NORMAL = 0,
- I915_GGTT_VIEW_ROTATED = sizeof(struct intel_rotation_info),
- I915_GGTT_VIEW_PARTIAL = sizeof(struct intel_partial_info),
- I915_GGTT_VIEW_REMAPPED = sizeof(struct intel_remapped_info),
-};
-
-static inline void assert_i915_gem_gtt_types(void)
-{
- BUILD_BUG_ON(sizeof(struct intel_rotation_info) != 8*sizeof(unsigned int));
- BUILD_BUG_ON(sizeof(struct intel_partial_info) != sizeof(u64) + sizeof(unsigned int));
- BUILD_BUG_ON(sizeof(struct intel_remapped_info) != 9*sizeof(unsigned int));
-
- /* Check that rotation/remapped shares offsets for simplicity */
- BUILD_BUG_ON(offsetof(struct intel_remapped_info, plane[0]) !=
- offsetof(struct intel_rotation_info, plane[0]));
- BUILD_BUG_ON(offsetofend(struct intel_remapped_info, plane[1]) !=
- offsetofend(struct intel_rotation_info, plane[1]));
-
- /* As we encode the size of each branch inside the union into its type,
- * we have to be careful that each branch has a unique size.
- */
- switch ((enum i915_ggtt_view_type)0) {
- case I915_GGTT_VIEW_NORMAL:
- case I915_GGTT_VIEW_PARTIAL:
- case I915_GGTT_VIEW_ROTATED:
- case I915_GGTT_VIEW_REMAPPED:
- /* gcc complains if these are identical cases */
- break;
- }
-}
-
-struct i915_ggtt_view {
- enum i915_ggtt_view_type type;
- union {
- /* Members need to contain no holes/padding */
- struct intel_partial_info partial;
- struct intel_rotation_info rotated;
- struct intel_remapped_info remapped;
- };
-};
-
-enum i915_cache_level;
-
-struct i915_vma;
-
-struct i915_page_dma {
- struct page *page;
- union {
- dma_addr_t daddr;
-
- /* For gen6/gen7 only. This is the offset in the GGTT
- * where the page directory entries for PPGTT begin
- */
- u32 ggtt_offset;
- };
-};
-
-#define px_base(px) (&(px)->base)
-#define px_dma(px) (px_base(px)->daddr)
-
-struct i915_page_table {
- struct i915_page_dma base;
- atomic_t used;
-};
-
-struct i915_page_directory {
- struct i915_page_dma base;
- atomic_t used;
- spinlock_t lock;
- void *entry[512];
-};
-
-struct i915_vma_ops {
- /* Map an object into an address space with the given cache flags. */
- int (*bind_vma)(struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 flags);
- /*
- * Unmap an object from an address space. This usually consists of
- * setting the valid PTE entries to a reserved scratch page.
- */
- void (*unbind_vma)(struct i915_vma *vma);
-
- int (*set_pages)(struct i915_vma *vma);
- void (*clear_pages)(struct i915_vma *vma);
-};
-
-struct pagestash {
- spinlock_t lock;
- struct pagevec pvec;
-};
-
-struct i915_address_space {
- struct kref ref;
-
- struct drm_mm mm;
- struct drm_i915_private *i915;
- struct device *dma;
- /* Every address space belongs to a struct file - except for the global
- * GTT that is owned by the driver (and so @file is set to NULL). In
- * principle, no information should leak from one context to another
- * (or between files/processes etc) unless explicitly shared by the
- * owner. Tracking the owner is important in order to free up per-file
- * objects along with the file, to aide resource tracking, and to
- * assign blame.
- */
- struct drm_i915_file_private *file;
- u64 total; /* size addr space maps (ex. 2GB for ggtt) */
- u64 reserved; /* size addr space reserved */
-
- bool closed;
-
- struct mutex mutex; /* protects vma and our lists */
-#define VM_CLASS_GGTT 0
-#define VM_CLASS_PPGTT 1
-
- u64 scratch_pte;
- int scratch_order;
- struct i915_page_dma scratch_page;
- struct i915_page_table *scratch_pt;
- struct i915_page_directory *scratch_pd;
- struct i915_page_directory *scratch_pdp; /* GEN8+ & 48b PPGTT */
-
- /**
- * List of vma currently bound.
- */
- struct list_head bound_list;
-
- /**
- * List of vma that are not unbound.
- */
- struct list_head unbound_list;
-
- struct pagestash free_pages;
-
- /* Global GTT */
- bool is_ggtt:1;
-
- /* Some systems require uncached updates of the page directories */
- bool pt_kmap_wc:1;
-
- /* Some systems support read-only mappings for GGTT and/or PPGTT */
- bool has_read_only:1;
-
- u64 (*pte_encode)(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags); /* Create a valid PTE */
-#define PTE_READ_ONLY (1<<0)
-
- int (*allocate_va_range)(struct i915_address_space *vm,
- u64 start, u64 length);
- void (*clear_range)(struct i915_address_space *vm,
- u64 start, u64 length);
- void (*insert_page)(struct i915_address_space *vm,
- dma_addr_t addr,
- u64 offset,
- enum i915_cache_level cache_level,
- u32 flags);
- void (*insert_entries)(struct i915_address_space *vm,
- struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 flags);
- void (*cleanup)(struct i915_address_space *vm);
-
- struct i915_vma_ops vma_ops;
-
- I915_SELFTEST_DECLARE(struct fault_attr fault_attr);
- I915_SELFTEST_DECLARE(bool scrub_64K);
-};
-
-#define i915_is_ggtt(vm) ((vm)->is_ggtt)
-
-static inline bool
-i915_vm_is_4lvl(const struct i915_address_space *vm)
-{
- return (vm->total - 1) >> 32;
-}
-
-static inline bool
-i915_vm_has_scratch_64K(struct i915_address_space *vm)
-{
- return vm->scratch_order == get_order(I915_GTT_PAGE_SIZE_64K);
-}
-
-/* The Graphics Translation Table is the way in which GEN hardware translates a
- * Graphics Virtual Address into a Physical Address. In addition to the normal
- * collateral associated with any va->pa translations GEN hardware also has a
- * portion of the GTT which can be mapped by the CPU and remain both coherent
- * and correct (in cases like swizzling). That region is referred to as GMADR in
- * the spec.
- */
-struct i915_ggtt {
- struct i915_address_space vm;
-
- struct io_mapping iomap; /* Mapping to our CPU mappable region */
- struct resource gmadr; /* GMADR resource */
- resource_size_t mappable_end; /* End offset that we can CPU map */
-
- /** "Graphics Stolen Memory" holds the global PTEs */
- void __iomem *gsm;
- void (*invalidate)(struct drm_i915_private *dev_priv);
-
- bool do_idle_maps;
-
- int mtrr;
-
- u32 pin_bias;
-
- unsigned int num_fences;
- struct i915_fence_reg fence_regs[I915_MAX_NUM_FENCES];
- struct list_head fence_list;
-
- /** List of all objects in gtt_space, currently mmaped by userspace.
- * All objects within this list must also be on bound_list.
- */
- struct list_head userfault_list;
-
- /* Manual runtime pm autosuspend delay for user GGTT mmaps */
- struct intel_wakeref_auto userfault_wakeref;
-
- struct drm_mm_node error_capture;
- struct drm_mm_node uc_fw;
-};
-
-struct i915_ppgtt {
- struct i915_address_space vm;
-
- intel_engine_mask_t pd_dirty_engines;
- struct i915_page_directory *pd;
-};
-
-struct gen6_ppgtt {
- struct i915_ppgtt base;
-
- struct i915_vma *vma;
- gen6_pte_t __iomem *pd_addr;
-
- unsigned int pin_count;
- bool scan_for_unused_pt;
-
- struct gen6_ppgtt_cleanup_work *work;
-};
-
-#define __to_gen6_ppgtt(base) container_of(base, struct gen6_ppgtt, base)
-
-static inline struct gen6_ppgtt *to_gen6_ppgtt(struct i915_ppgtt *base)
-{
- BUILD_BUG_ON(offsetof(struct gen6_ppgtt, base));
- return __to_gen6_ppgtt(base);
-}
-
-/*
- * gen6_for_each_pde() iterates over every pde from start until start+length.
- * If start and start+length are not perfectly divisible, the macro will round
- * down and up as needed. Start=0 and length=2G effectively iterates over
- * every PDE in the system. The macro modifies ALL its parameters except 'pd',
- * so each of the other parameters should preferably be a simple variable, or
- * at most an lvalue with no side-effects!
- */
-#define gen6_for_each_pde(pt, pd, start, length, iter) \
- for (iter = gen6_pde_index(start); \
- length > 0 && iter < I915_PDES && \
- (pt = i915_pt_entry(pd, iter), true); \
- ({ u32 temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT); \
- temp = min(temp - start, length); \
- start += temp, length -= temp; }), ++iter)
-
-#define gen6_for_all_pdes(pt, pd, iter) \
- for (iter = 0; \
- iter < I915_PDES && \
- (pt = i915_pt_entry(pd, iter), true); \
- ++iter)
-
-static inline u32 i915_pte_index(u64 address, unsigned int pde_shift)
-{
- const u32 mask = NUM_PTE(pde_shift) - 1;
-
- return (address >> PAGE_SHIFT) & mask;
-}
-
-/* Helper to counts the number of PTEs within the given length. This count
- * does not cross a page table boundary, so the max value would be
- * GEN6_PTES for GEN6, and GEN8_PTES for GEN8.
-*/
-static inline u32 i915_pte_count(u64 addr, u64 length, unsigned int pde_shift)
-{
- const u64 mask = ~((1ULL << pde_shift) - 1);
- u64 end;
-
- GEM_BUG_ON(length == 0);
- GEM_BUG_ON(offset_in_page(addr | length));
-
- end = addr + length;
-
- if ((addr & mask) != (end & mask))
- return NUM_PTE(pde_shift) - i915_pte_index(addr, pde_shift);
-
- return i915_pte_index(end, pde_shift) - i915_pte_index(addr, pde_shift);
-}
-
-static inline u32 i915_pde_index(u64 addr, u32 shift)
-{
- return (addr >> shift) & I915_PDE_MASK;
-}
-
-static inline u32 gen6_pte_index(u32 addr)
-{
- return i915_pte_index(addr, GEN6_PDE_SHIFT);
-}
-
-static inline u32 gen6_pte_count(u32 addr, u32 length)
-{
- return i915_pte_count(addr, length, GEN6_PDE_SHIFT);
-}
-
-static inline u32 gen6_pde_index(u32 addr)
-{
- return i915_pde_index(addr, GEN6_PDE_SHIFT);
-}
-
-static inline unsigned int
-i915_pdpes_per_pdp(const struct i915_address_space *vm)
-{
- if (i915_vm_is_4lvl(vm))
- return GEN8_PML4ES_PER_PML4;
-
- return GEN8_3LVL_PDPES;
-}
-
-static inline struct i915_page_table *
-i915_pt_entry(const struct i915_page_directory * const pd,
- const unsigned short n)
-{
- return pd->entry[n];
-}
-
-static inline struct i915_page_directory *
-i915_pd_entry(const struct i915_page_directory * const pdp,
- const unsigned short n)
-{
- return pdp->entry[n];
-}
-
-static inline struct i915_page_directory *
-i915_pdp_entry(const struct i915_page_directory * const pml4,
- const unsigned short n)
-{
- return pml4->entry[n];
-}
-
-/* Equivalent to the gen6 version, For each pde iterates over every pde
- * between from start until start + length. On gen8+ it simply iterates
- * over every page directory entry in a page directory.
- */
-#define gen8_for_each_pde(pt, pd, start, length, iter) \
- for (iter = gen8_pde_index(start); \
- length > 0 && iter < I915_PDES && \
- (pt = i915_pt_entry(pd, iter), true); \
- ({ u64 temp = ALIGN(start+1, 1 << GEN8_PDE_SHIFT); \
- temp = min(temp - start, length); \
- start += temp, length -= temp; }), ++iter)
-
-#define gen8_for_each_pdpe(pd, pdp, start, length, iter) \
- for (iter = gen8_pdpe_index(start); \
- length > 0 && iter < i915_pdpes_per_pdp(vm) && \
- (pd = i915_pd_entry(pdp, iter), true); \
- ({ u64 temp = ALIGN(start+1, 1 << GEN8_PDPE_SHIFT); \
- temp = min(temp - start, length); \
- start += temp, length -= temp; }), ++iter)
-
-#define gen8_for_each_pml4e(pdp, pml4, start, length, iter) \
- for (iter = gen8_pml4e_index(start); \
- length > 0 && iter < GEN8_PML4ES_PER_PML4 && \
- (pdp = i915_pdp_entry(pml4, iter), true); \
- ({ u64 temp = ALIGN(start+1, 1ULL << GEN8_PML4E_SHIFT); \
- temp = min(temp - start, length); \
- start += temp, length -= temp; }), ++iter)
-
-static inline u32 gen8_pte_index(u64 address)
-{
- return i915_pte_index(address, GEN8_PDE_SHIFT);
-}
-
-static inline u32 gen8_pde_index(u64 address)
-{
- return i915_pde_index(address, GEN8_PDE_SHIFT);
-}
-
-static inline u32 gen8_pdpe_index(u64 address)
-{
- return (address >> GEN8_PDPE_SHIFT) & GEN8_PDPE_MASK;
-}
-
-static inline u32 gen8_pml4e_index(u64 address)
-{
- return (address >> GEN8_PML4E_SHIFT) & GEN8_PML4E_MASK;
-}
-
-static inline u64 gen8_pte_count(u64 address, u64 length)
-{
- return i915_pte_count(address, length, GEN8_PDE_SHIFT);
-}
-
-static inline dma_addr_t
-i915_page_dir_dma_addr(const struct i915_ppgtt *ppgtt, const unsigned int n)
-{
- struct i915_page_directory *pd;
-
- pd = i915_pdp_entry(ppgtt->pd, n);
- return px_dma(pd);
-}
-
-static inline struct i915_ggtt *
-i915_vm_to_ggtt(struct i915_address_space *vm)
-{
- BUILD_BUG_ON(offsetof(struct i915_ggtt, vm));
- GEM_BUG_ON(!i915_is_ggtt(vm));
- return container_of(vm, struct i915_ggtt, vm);
-}
-
-static inline struct i915_ppgtt *
-i915_vm_to_ppgtt(struct i915_address_space *vm)
-{
- BUILD_BUG_ON(offsetof(struct i915_ppgtt, vm));
- GEM_BUG_ON(i915_is_ggtt(vm));
- return container_of(vm, struct i915_ppgtt, vm);
-}
-
-#define INTEL_MAX_PPAT_ENTRIES 8
-#define INTEL_PPAT_PERFECT_MATCH (~0U)
-
-struct intel_ppat;
-
-struct intel_ppat_entry {
- struct intel_ppat *ppat;
- struct kref ref;
- u8 value;
-};
-
-struct intel_ppat {
- struct intel_ppat_entry entries[INTEL_MAX_PPAT_ENTRIES];
- DECLARE_BITMAP(used, INTEL_MAX_PPAT_ENTRIES);
- DECLARE_BITMAP(dirty, INTEL_MAX_PPAT_ENTRIES);
- unsigned int max_entries;
- u8 clear_value;
- /*
- * Return a score to show how two PPAT values match,
- * a INTEL_PPAT_PERFECT_MATCH indicates a perfect match
- */
- unsigned int (*match)(u8 src, u8 dst);
- void (*update_hw)(struct drm_i915_private *i915);
-
- struct drm_i915_private *i915;
-};
-
-const struct intel_ppat_entry *
-intel_ppat_get(struct drm_i915_private *i915, u8 value);
-void intel_ppat_put(const struct intel_ppat_entry *entry);
-
-int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv);
-int i915_ggtt_init_hw(struct drm_i915_private *dev_priv);
-int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv);
-void i915_ggtt_enable_guc(struct drm_i915_private *i915);
-void i915_ggtt_disable_guc(struct drm_i915_private *i915);
-int i915_gem_init_ggtt(struct drm_i915_private *dev_priv);
-void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv);
-
-int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv);
-
-struct i915_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv);
-
-static inline struct i915_address_space *
-i915_vm_get(struct i915_address_space *vm)
-{
- kref_get(&vm->ref);
- return vm;
-}
-
-void i915_vm_release(struct kref *kref);
-
-static inline void i915_vm_put(struct i915_address_space *vm)
-{
- kref_put(&vm->ref, i915_vm_release);
-}
-
-int gen6_ppgtt_pin(struct i915_ppgtt *base);
-void gen6_ppgtt_unpin(struct i915_ppgtt *base);
-void gen6_ppgtt_unpin_all(struct i915_ppgtt *base);
-
-void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv);
-void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv);
+struct i915_address_space;
int __must_check i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages);
@@ -694,20 +33,19 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
u64 start, u64 end, unsigned int flags);
/* Flags used by pin/bind&friends. */
-#define PIN_NONBLOCK BIT_ULL(0)
-#define PIN_NONFAULT BIT_ULL(1)
-#define PIN_NOEVICT BIT_ULL(2)
+#define PIN_NOEVICT BIT_ULL(0)
+#define PIN_NOSEARCH BIT_ULL(1)
+#define PIN_NONBLOCK BIT_ULL(2)
#define PIN_MAPPABLE BIT_ULL(3)
#define PIN_ZONE_4G BIT_ULL(4)
#define PIN_HIGH BIT_ULL(5)
#define PIN_OFFSET_BIAS BIT_ULL(6)
#define PIN_OFFSET_FIXED BIT_ULL(7)
-#define PIN_MBZ BIT_ULL(8) /* I915_VMA_PIN_OVERFLOW */
-#define PIN_GLOBAL BIT_ULL(9) /* I915_VMA_GLOBAL_BIND */
-#define PIN_USER BIT_ULL(10) /* I915_VMA_LOCAL_BIND */
-#define PIN_UPDATE BIT_ULL(11)
+#define PIN_UPDATE BIT_ULL(9)
+#define PIN_GLOBAL BIT_ULL(10) /* I915_VMA_GLOBAL_BIND */
+#define PIN_USER BIT_ULL(11) /* I915_VMA_LOCAL_BIND */
-#define PIN_OFFSET_MASK (-I915_GTT_PAGE_SIZE)
+#define PIN_OFFSET_MASK I915_GTT_PAGE_MASK
#endif
diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c
new file mode 100644
index 000000000000..54fce81d5724
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_getparam.c
@@ -0,0 +1,173 @@
+/*
+ * SPDX-License-Identifier: MIT
+ */
+
+#include "gem/i915_gem_mman.h"
+#include "gt/intel_engine_user.h"
+
+#include "i915_drv.h"
+#include "i915_perf.h"
+
+int i915_getparam_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_private *i915 = to_i915(dev);
+ const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
+ drm_i915_getparam_t *param = data;
+ int value;
+
+ switch (param->param) {
+ case I915_PARAM_IRQ_ACTIVE:
+ case I915_PARAM_ALLOW_BATCHBUFFER:
+ case I915_PARAM_LAST_DISPATCH:
+ case I915_PARAM_HAS_EXEC_CONSTANTS:
+ /* Reject all old ums/dri params. */
+ return -ENODEV;
+ case I915_PARAM_CHIPSET_ID:
+ value = i915->drm.pdev->device;
+ break;
+ case I915_PARAM_REVISION:
+ value = i915->drm.pdev->revision;
+ break;
+ case I915_PARAM_NUM_FENCES_AVAIL:
+ value = i915->ggtt.num_fences;
+ break;
+ case I915_PARAM_HAS_OVERLAY:
+ value = !!i915->overlay;
+ break;
+ case I915_PARAM_HAS_BSD:
+ value = !!intel_engine_lookup_user(i915,
+ I915_ENGINE_CLASS_VIDEO, 0);
+ break;
+ case I915_PARAM_HAS_BLT:
+ value = !!intel_engine_lookup_user(i915,
+ I915_ENGINE_CLASS_COPY, 0);
+ break;
+ case I915_PARAM_HAS_VEBOX:
+ value = !!intel_engine_lookup_user(i915,
+ I915_ENGINE_CLASS_VIDEO_ENHANCE, 0);
+ break;
+ case I915_PARAM_HAS_BSD2:
+ value = !!intel_engine_lookup_user(i915,
+ I915_ENGINE_CLASS_VIDEO, 1);
+ break;
+ case I915_PARAM_HAS_LLC:
+ value = HAS_LLC(i915);
+ break;
+ case I915_PARAM_HAS_WT:
+ value = HAS_WT(i915);
+ break;
+ case I915_PARAM_HAS_ALIASING_PPGTT:
+ value = INTEL_PPGTT(i915);
+ break;
+ case I915_PARAM_HAS_SEMAPHORES:
+ value = !!(i915->caps.scheduler & I915_SCHEDULER_CAP_SEMAPHORES);
+ break;
+ case I915_PARAM_HAS_SECURE_BATCHES:
+ value = HAS_SECURE_BATCHES(i915) && capable(CAP_SYS_ADMIN);
+ break;
+ case I915_PARAM_CMD_PARSER_VERSION:
+ value = i915_cmd_parser_get_version(i915);
+ break;
+ case I915_PARAM_SUBSLICE_TOTAL:
+ value = intel_sseu_subslice_total(sseu);
+ if (!value)
+ return -ENODEV;
+ break;
+ case I915_PARAM_EU_TOTAL:
+ value = sseu->eu_total;
+ if (!value)
+ return -ENODEV;
+ break;
+ case I915_PARAM_HAS_GPU_RESET:
+ value = i915_modparams.enable_hangcheck &&
+ intel_has_gpu_reset(&i915->gt);
+ if (value && intel_has_reset_engine(&i915->gt))
+ value = 2;
+ break;
+ case I915_PARAM_HAS_RESOURCE_STREAMER:
+ value = 0;
+ break;
+ case I915_PARAM_HAS_POOLED_EU:
+ value = HAS_POOLED_EU(i915);
+ break;
+ case I915_PARAM_MIN_EU_IN_POOL:
+ value = sseu->min_eu_in_pool;
+ break;
+ case I915_PARAM_HUC_STATUS:
+ value = intel_huc_check_status(&i915->gt.uc.huc);
+ if (value < 0)
+ return value;
+ break;
+ case I915_PARAM_MMAP_GTT_VERSION:
+ /* Though we've started our numbering from 1, and so class all
+ * earlier versions as 0, in effect their value is undefined as
+ * the ioctl will report EINVAL for the unknown param!
+ */
+ value = i915_gem_mmap_gtt_version();
+ break;
+ case I915_PARAM_HAS_SCHEDULER:
+ value = i915->caps.scheduler;
+ break;
+
+ case I915_PARAM_MMAP_VERSION:
+ /* Remember to bump this if the version changes! */
+ case I915_PARAM_HAS_GEM:
+ case I915_PARAM_HAS_PAGEFLIPPING:
+ case I915_PARAM_HAS_EXECBUF2: /* depends on GEM */
+ case I915_PARAM_HAS_RELAXED_FENCING:
+ case I915_PARAM_HAS_COHERENT_RINGS:
+ case I915_PARAM_HAS_RELAXED_DELTA:
+ case I915_PARAM_HAS_GEN7_SOL_RESET:
+ case I915_PARAM_HAS_WAIT_TIMEOUT:
+ case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
+ case I915_PARAM_HAS_PINNED_BATCHES:
+ case I915_PARAM_HAS_EXEC_NO_RELOC:
+ case I915_PARAM_HAS_EXEC_HANDLE_LUT:
+ case I915_PARAM_HAS_COHERENT_PHYS_GTT:
+ case I915_PARAM_HAS_EXEC_SOFTPIN:
+ case I915_PARAM_HAS_EXEC_ASYNC:
+ case I915_PARAM_HAS_EXEC_FENCE:
+ case I915_PARAM_HAS_EXEC_CAPTURE:
+ case I915_PARAM_HAS_EXEC_BATCH_FIRST:
+ case I915_PARAM_HAS_EXEC_FENCE_ARRAY:
+ case I915_PARAM_HAS_EXEC_SUBMIT_FENCE:
+ /* For the time being all of these are always true;
+ * if some supported hardware does not have one of these
+ * features this value needs to be provided from
+ * INTEL_INFO(), a feature macro, or similar.
+ */
+ value = 1;
+ break;
+ case I915_PARAM_HAS_CONTEXT_ISOLATION:
+ value = intel_engines_has_context_isolation(i915);
+ break;
+ case I915_PARAM_SLICE_MASK:
+ value = sseu->slice_mask;
+ if (!value)
+ return -ENODEV;
+ break;
+ case I915_PARAM_SUBSLICE_MASK:
+ value = sseu->subslice_mask[0];
+ if (!value)
+ return -ENODEV;
+ break;
+ case I915_PARAM_CS_TIMESTAMP_FREQUENCY:
+ value = 1000 * RUNTIME_INFO(i915)->cs_timestamp_frequency_khz;
+ break;
+ case I915_PARAM_MMAP_GTT_COHERENT:
+ value = INTEL_INFO(i915)->has_coherent_ggtt;
+ break;
+ case I915_PARAM_PERF_REVISION:
+ value = i915_perf_ioctl_version();
+ break;
+ default:
+ DRM_DEBUG("Unknown parameter %d\n", param->param);
+ return -EINVAL;
+ }
+
+ if (put_user(value, param->value))
+ return -EFAULT;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_globals.c b/drivers/gpu/drm/i915/i915_globals.c
index 2d5fcba98841..3aa213684293 100644
--- a/drivers/gpu/drm/i915/i915_globals.c
+++ b/drivers/gpu/drm/i915/i915_globals.c
@@ -20,7 +20,10 @@ static LIST_HEAD(globals);
static atomic_t active;
static atomic_t epoch;
static struct park_work {
- struct rcu_work work;
+ struct delayed_work work;
+ struct rcu_head rcu;
+ unsigned long flags;
+#define PENDING 0
int epoch;
} park;
@@ -37,11 +40,33 @@ static void i915_globals_shrink(void)
global->shrink();
}
+static void __i915_globals_grace(struct rcu_head *rcu)
+{
+ /* Ratelimit parking as shrinking is quite slow */
+ schedule_delayed_work(&park.work, round_jiffies_up_relative(2 * HZ));
+}
+
+static void __i915_globals_queue_rcu(void)
+{
+ park.epoch = atomic_inc_return(&epoch);
+ if (!atomic_read(&active)) {
+ init_rcu_head(&park.rcu);
+ call_rcu(&park.rcu, __i915_globals_grace);
+ }
+}
+
static void __i915_globals_park(struct work_struct *work)
{
+ destroy_rcu_head(&park.rcu);
+
/* Confirm nothing woke up in the last grace period */
- if (park.epoch == atomic_read(&epoch))
- i915_globals_shrink();
+ if (park.epoch != atomic_read(&epoch)) {
+ __i915_globals_queue_rcu();
+ return;
+ }
+
+ clear_bit(PENDING, &park.flags);
+ i915_globals_shrink();
}
void __init i915_global_register(struct i915_global *global)
@@ -62,6 +87,7 @@ static void __i915_globals_cleanup(void)
static __initconst int (* const initfn[])(void) = {
i915_global_active_init,
+ i915_global_buddy_init,
i915_global_context_init,
i915_global_gem_context_init,
i915_global_objects_init,
@@ -84,7 +110,7 @@ int __init i915_globals_init(void)
}
}
- INIT_RCU_WORK(&park.work, __i915_globals_park);
+ INIT_DELAYED_WORK(&park.work, __i915_globals_park);
return 0;
}
@@ -102,8 +128,9 @@ void i915_globals_park(void)
if (!atomic_dec_and_test(&active))
return;
- park.epoch = atomic_inc_return(&epoch);
- queue_rcu_work(system_wq, &park.work);
+ /* Queue cleanup after the next RCU grace period has freed slabs */
+ if (!test_and_set_bit(PENDING, &park.flags))
+ __i915_globals_queue_rcu();
}
void i915_globals_unpark(void)
@@ -112,12 +139,21 @@ void i915_globals_unpark(void)
atomic_inc(&active);
}
+static void __exit __i915_globals_flush(void)
+{
+ atomic_inc(&active); /* skip shrinking */
+
+ rcu_barrier(); /* wait for the work to be queued */
+ flush_delayed_work(&park.work);
+
+ atomic_dec(&active);
+}
+
void __exit i915_globals_exit(void)
{
- /* Flush any residual park_work */
- atomic_inc(&epoch);
- flush_rcu_work(&park.work);
+ GEM_BUG_ON(atomic_read(&active));
+ __i915_globals_flush();
__i915_globals_cleanup();
/* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
diff --git a/drivers/gpu/drm/i915/i915_globals.h b/drivers/gpu/drm/i915/i915_globals.h
index 04c1ce107fc0..b2f5cd9b9b1a 100644
--- a/drivers/gpu/drm/i915/i915_globals.h
+++ b/drivers/gpu/drm/i915/i915_globals.h
@@ -7,6 +7,8 @@
#ifndef _I915_GLOBALS_H_
#define _I915_GLOBALS_H_
+#include <linux/types.h>
+
typedef void (*i915_global_func_t)(void);
struct i915_global {
@@ -25,6 +27,7 @@ void i915_globals_exit(void);
/* constructors */
int i915_global_active_init(void);
+int i915_global_buddy_init(void);
int i915_global_context_init(void);
int i915_global_gem_context_init(void);
int i915_global_objects_init(void);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 8bc76fcff70d..594341e27a47 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -29,8 +29,8 @@
#include <linux/ascii85.h>
#include <linux/nmi.h>
+#include <linux/pagevec.h>
#include <linux/scatterlist.h>
-#include <linux/stop_machine.h>
#include <linux/utsname.h>
#include <linux/zlib.h>
@@ -40,52 +40,17 @@
#include "display/intel_overlay.h"
#include "gem/i915_gem_context.h"
+#include "gem/i915_gem_lmem.h"
+#include "gt/intel_gt_pm.h"
#include "i915_drv.h"
#include "i915_gpu_error.h"
+#include "i915_memcpy.h"
#include "i915_scatterlist.h"
#include "intel_csr.h"
-static inline const struct intel_engine_cs *
-engine_lookup(const struct drm_i915_private *i915, unsigned int id)
-{
- if (id >= I915_NUM_ENGINES)
- return NULL;
-
- return i915->engine[id];
-}
-
-static inline const char *
-__engine_name(const struct intel_engine_cs *engine)
-{
- return engine ? engine->name : "";
-}
-
-static const char *
-engine_name(const struct drm_i915_private *i915, unsigned int id)
-{
- return __engine_name(engine_lookup(i915, id));
-}
-
-static const char *tiling_flag(int tiling)
-{
- switch (tiling) {
- default:
- case I915_TILING_NONE: return "";
- case I915_TILING_X: return " X";
- case I915_TILING_Y: return " Y";
- }
-}
-
-static const char *dirty_flag(int dirty)
-{
- return dirty ? " dirty" : "";
-}
-
-static const char *purgeable_flag(int purgeable)
-{
- return purgeable ? " purgeable" : "";
-}
+#define ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
+#define ATOMIC_MAYFAIL (GFP_ATOMIC | __GFP_NOWARN)
static void __sg_set_buf(struct scatterlist *sg,
void *addr, unsigned int len, loff_t it)
@@ -114,7 +79,7 @@ static bool __i915_error_grow(struct drm_i915_error_state_buf *e, size_t len)
if (e->cur == e->end) {
struct scatterlist *sgl;
- sgl = (typeof(sgl))__get_free_page(GFP_KERNEL);
+ sgl = (typeof(sgl))__get_free_page(ALLOW_FAIL);
if (!sgl) {
e->err = -ENOMEM;
return false;
@@ -134,7 +99,7 @@ static bool __i915_error_grow(struct drm_i915_error_state_buf *e, size_t len)
}
e->size = ALIGN(len + 1, SZ_64K);
- e->buf = kmalloc(e->size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
+ e->buf = kmalloc(e->size, ALLOW_FAIL);
if (!e->buf) {
e->size = PAGE_ALIGN(len + 1);
e->buf = kmalloc(e->size, GFP_KERNEL);
@@ -211,63 +176,132 @@ i915_error_printer(struct drm_i915_error_state_buf *e)
return p;
}
+/* single threaded page allocator with a reserved stash for emergencies */
+static void pool_fini(struct pagevec *pv)
+{
+ pagevec_release(pv);
+}
+
+static int pool_refill(struct pagevec *pv, gfp_t gfp)
+{
+ while (pagevec_space(pv)) {
+ struct page *p;
+
+ p = alloc_page(gfp);
+ if (!p)
+ return -ENOMEM;
+
+ pagevec_add(pv, p);
+ }
+
+ return 0;
+}
+
+static int pool_init(struct pagevec *pv, gfp_t gfp)
+{
+ int err;
+
+ pagevec_init(pv);
+
+ err = pool_refill(pv, gfp);
+ if (err)
+ pool_fini(pv);
+
+ return err;
+}
+
+static void *pool_alloc(struct pagevec *pv, gfp_t gfp)
+{
+ struct page *p;
+
+ p = alloc_page(gfp);
+ if (!p && pagevec_count(pv))
+ p = pv->pages[--pv->nr];
+
+ return p ? page_address(p) : NULL;
+}
+
+static void pool_free(struct pagevec *pv, void *addr)
+{
+ struct page *p = virt_to_page(addr);
+
+ if (pagevec_space(pv))
+ pagevec_add(pv, p);
+ else
+ __free_page(p);
+}
+
#ifdef CONFIG_DRM_I915_COMPRESS_ERROR
-struct compress {
+struct i915_vma_compress {
+ struct pagevec pool;
struct z_stream_s zstream;
void *tmp;
};
-static bool compress_init(struct compress *c)
+static bool compress_init(struct i915_vma_compress *c)
{
- struct z_stream_s *zstream = memset(&c->zstream, 0, sizeof(c->zstream));
+ struct z_stream_s *zstream = &c->zstream;
- zstream->workspace =
- kmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
- GFP_ATOMIC | __GFP_NOWARN);
- if (!zstream->workspace)
+ if (pool_init(&c->pool, ALLOW_FAIL))
return false;
- if (zlib_deflateInit(zstream, Z_DEFAULT_COMPRESSION) != Z_OK) {
- kfree(zstream->workspace);
+ zstream->workspace =
+ kmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
+ ALLOW_FAIL);
+ if (!zstream->workspace) {
+ pool_fini(&c->pool);
return false;
}
c->tmp = NULL;
if (i915_has_memcpy_from_wc())
- c->tmp = (void *)__get_free_page(GFP_ATOMIC | __GFP_NOWARN);
+ c->tmp = pool_alloc(&c->pool, ALLOW_FAIL);
return true;
}
-static void *compress_next_page(struct drm_i915_error_object *dst)
+static bool compress_start(struct i915_vma_compress *c)
+{
+ struct z_stream_s *zstream = &c->zstream;
+ void *workspace = zstream->workspace;
+
+ memset(zstream, 0, sizeof(*zstream));
+ zstream->workspace = workspace;
+
+ return zlib_deflateInit(zstream, Z_DEFAULT_COMPRESSION) == Z_OK;
+}
+
+static void *compress_next_page(struct i915_vma_compress *c,
+ struct i915_vma_coredump *dst)
{
- unsigned long page;
+ void *page;
if (dst->page_count >= dst->num_pages)
return ERR_PTR(-ENOSPC);
- page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
+ page = pool_alloc(&c->pool, ALLOW_FAIL);
if (!page)
return ERR_PTR(-ENOMEM);
- return dst->pages[dst->page_count++] = (void *)page;
+ return dst->pages[dst->page_count++] = page;
}
-static int compress_page(struct compress *c,
+static int compress_page(struct i915_vma_compress *c,
void *src,
- struct drm_i915_error_object *dst)
+ struct i915_vma_coredump *dst,
+ bool wc)
{
struct z_stream_s *zstream = &c->zstream;
zstream->next_in = src;
- if (c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE))
+ if (wc && c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE))
zstream->next_in = c->tmp;
zstream->avail_in = PAGE_SIZE;
do {
if (zstream->avail_out == 0) {
- zstream->next_out = compress_next_page(dst);
+ zstream->next_out = compress_next_page(c, dst);
if (IS_ERR(zstream->next_out))
return PTR_ERR(zstream->next_out);
@@ -276,8 +310,6 @@ static int compress_page(struct compress *c,
if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK)
return -EIO;
-
- touch_nmi_watchdog();
} while (zstream->avail_in);
/* Fallback to uncompressed if we increase size? */
@@ -287,15 +319,15 @@ static int compress_page(struct compress *c,
return 0;
}
-static int compress_flush(struct compress *c,
- struct drm_i915_error_object *dst)
+static int compress_flush(struct i915_vma_compress *c,
+ struct i915_vma_coredump *dst)
{
struct z_stream_s *zstream = &c->zstream;
do {
switch (zlib_deflate(zstream, Z_FINISH)) {
case Z_OK: /* more space requested */
- zstream->next_out = compress_next_page(dst);
+ zstream->next_out = compress_next_page(c, dst);
if (IS_ERR(zstream->next_out))
return PTR_ERR(zstream->next_out);
@@ -316,15 +348,17 @@ end:
return 0;
}
-static void compress_fini(struct compress *c,
- struct drm_i915_error_object *dst)
+static void compress_finish(struct i915_vma_compress *c)
{
- struct z_stream_s *zstream = &c->zstream;
+ zlib_deflateEnd(&c->zstream);
+}
- zlib_deflateEnd(zstream);
- kfree(zstream->workspace);
+static void compress_fini(struct i915_vma_compress *c)
+{
+ kfree(c->zstream.workspace);
if (c->tmp)
- free_page((unsigned long)c->tmp);
+ pool_free(&c->pool, c->tmp);
+ pool_fini(&c->pool);
}
static void err_compression_marker(struct drm_i915_error_state_buf *m)
@@ -334,44 +368,53 @@ static void err_compression_marker(struct drm_i915_error_state_buf *m)
#else
-struct compress {
+struct i915_vma_compress {
+ struct pagevec pool;
};
-static bool compress_init(struct compress *c)
+static bool compress_init(struct i915_vma_compress *c)
+{
+ return pool_init(&c->pool, ALLOW_FAIL) == 0;
+}
+
+static bool compress_start(struct i915_vma_compress *c)
{
return true;
}
-static int compress_page(struct compress *c,
+static int compress_page(struct i915_vma_compress *c,
void *src,
- struct drm_i915_error_object *dst)
+ struct i915_vma_coredump *dst,
+ bool wc)
{
- unsigned long page;
void *ptr;
- page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
- if (!page)
+ ptr = pool_alloc(&c->pool, ALLOW_FAIL);
+ if (!ptr)
return -ENOMEM;
- ptr = (void *)page;
- if (!i915_memcpy_from_wc(ptr, src, PAGE_SIZE))
+ if (!(wc && i915_memcpy_from_wc(ptr, src, PAGE_SIZE)))
memcpy(ptr, src, PAGE_SIZE);
dst->pages[dst->page_count++] = ptr;
return 0;
}
-static int compress_flush(struct compress *c,
- struct drm_i915_error_object *dst)
+static int compress_flush(struct i915_vma_compress *c,
+ struct i915_vma_coredump *dst)
{
return 0;
}
-static void compress_fini(struct compress *c,
- struct drm_i915_error_object *dst)
+static void compress_finish(struct i915_vma_compress *c)
{
}
+static void compress_fini(struct i915_vma_compress *c)
+{
+ pool_fini(&c->pool);
+}
+
static void err_compression_marker(struct drm_i915_error_state_buf *m)
{
err_puts(m, "~");
@@ -379,46 +422,17 @@ static void err_compression_marker(struct drm_i915_error_state_buf *m)
#endif
-static void print_error_buffers(struct drm_i915_error_state_buf *m,
- const char *name,
- struct drm_i915_error_buffer *err,
- int count)
-{
- err_printf(m, "%s [%d]:\n", name, count);
-
- while (count--) {
- err_printf(m, " %08x_%08x %8u %02x %02x",
- upper_32_bits(err->gtt_offset),
- lower_32_bits(err->gtt_offset),
- err->size,
- err->read_domains,
- err->write_domain);
- err_puts(m, tiling_flag(err->tiling));
- err_puts(m, dirty_flag(err->dirty));
- err_puts(m, purgeable_flag(err->purgeable));
- err_puts(m, err->userptr ? " userptr" : "");
- err_puts(m, i915_cache_level_str(m->i915, err->cache_level));
-
- if (err->name)
- err_printf(m, " (name: %d)", err->name);
- if (err->fence_reg != I915_FENCE_REG_NONE)
- err_printf(m, " (fence: %d)", err->fence_reg);
-
- err_puts(m, "\n");
- err++;
- }
-}
-
static void error_print_instdone(struct drm_i915_error_state_buf *m,
- const struct drm_i915_error_engine *ee)
+ const struct intel_engine_coredump *ee)
{
+ const struct sseu_dev_info *sseu = &RUNTIME_INFO(m->i915)->sseu;
int slice;
int subslice;
err_printf(m, " INSTDONE: 0x%08x\n",
ee->instdone.instdone);
- if (ee->engine_id != RCS0 || INTEL_GEN(m->i915) <= 3)
+ if (ee->engine->class != RENDER_CLASS || INTEL_GEN(m->i915) <= 3)
return;
err_printf(m, " SC_INSTDONE: 0x%08x\n",
@@ -427,12 +441,12 @@ static void error_print_instdone(struct drm_i915_error_state_buf *m,
if (INTEL_GEN(m->i915) <= 6)
return;
- for_each_instdone_slice_subslice(m->i915, slice, subslice)
+ for_each_instdone_slice_subslice(m->i915, sseu, slice, subslice)
err_printf(m, " SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
slice, subslice,
ee->instdone.sampler[slice][subslice]);
- for_each_instdone_slice_subslice(m->i915, slice, subslice)
+ for_each_instdone_slice_subslice(m->i915, sseu, slice, subslice)
err_printf(m, " ROW_INSTDONE[%d][%d]: 0x%08x\n",
slice, subslice,
ee->instdone.row[slice][subslice]);
@@ -440,41 +454,56 @@ static void error_print_instdone(struct drm_i915_error_state_buf *m,
static void error_print_request(struct drm_i915_error_state_buf *m,
const char *prefix,
- const struct drm_i915_error_request *erq,
- const unsigned long epoch)
+ const struct i915_request_coredump *erq)
{
if (!erq->seqno)
return;
- err_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, emitted %dms, start %08x, head %08x, tail %08x\n",
+ err_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, start %08x, head %08x, tail %08x\n",
prefix, erq->pid, erq->context, erq->seqno,
test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
&erq->flags) ? "!" : "",
test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
&erq->flags) ? "+" : "",
erq->sched_attr.priority,
- jiffies_to_msecs(erq->jiffies - epoch),
erq->start, erq->head, erq->tail);
}
static void error_print_context(struct drm_i915_error_state_buf *m,
const char *header,
- const struct drm_i915_error_context *ctx)
+ const struct i915_gem_context_coredump *ctx)
+{
+ err_printf(m, "%s%s[%d] prio %d, guilty %d active %d\n",
+ header, ctx->comm, ctx->pid, ctx->sched_attr.priority,
+ ctx->guilty, ctx->active);
+}
+
+static struct i915_vma_coredump *
+__find_vma(struct i915_vma_coredump *vma, const char *name)
+{
+ while (vma) {
+ if (strcmp(vma->name, name) == 0)
+ return vma;
+ vma = vma->next;
+ }
+
+ return NULL;
+}
+
+static struct i915_vma_coredump *
+find_batch(const struct intel_engine_coredump *ee)
{
- err_printf(m, "%s%s[%d] hw_id %d, prio %d, guilty %d active %d\n",
- header, ctx->comm, ctx->pid, ctx->hw_id,
- ctx->sched_attr.priority, ctx->guilty, ctx->active);
+ return __find_vma(ee->vma, "batch");
}
static void error_print_engine(struct drm_i915_error_state_buf *m,
- const struct drm_i915_error_engine *ee,
- const unsigned long epoch)
+ const struct intel_engine_coredump *ee)
{
+ struct i915_vma_coredump *batch;
int n;
- err_printf(m, "%s command stream:\n",
- engine_name(m->i915, ee->engine_id));
- err_printf(m, " IDLE?: %s\n", yesno(ee->idle));
+ err_printf(m, "%s command stream:\n", ee->engine->name);
+ err_printf(m, " CCID: 0x%08x\n", ee->ccid);
err_printf(m, " START: 0x%08x\n", ee->start);
err_printf(m, " HEAD: 0x%08x [0x%08x]\n", ee->head, ee->rq_head);
err_printf(m, " TAIL: 0x%08x [0x%08x, 0x%08x]\n",
@@ -489,9 +518,10 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
error_print_instdone(m, ee);
- if (ee->batchbuffer) {
- u64 start = ee->batchbuffer->gtt_offset;
- u64 end = start + ee->batchbuffer->gtt_size;
+ batch = find_batch(ee);
+ if (batch) {
+ u64 start = batch->gtt_offset;
+ u64 end = start + batch->gtt_size;
err_printf(m, " batch: [0x%08x_%08x, 0x%08x_%08x]\n",
upper_32_bits(start), lower_32_bits(start),
@@ -523,17 +553,11 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
ee->vm_info.pp_dir_base);
}
}
- err_printf(m, " ring->head: 0x%08x\n", ee->cpu_ring_head);
- err_printf(m, " ring->tail: 0x%08x\n", ee->cpu_ring_tail);
- err_printf(m, " hangcheck timestamp: %dms (%lu%s)\n",
- jiffies_to_msecs(ee->hangcheck_timestamp - epoch),
- ee->hangcheck_timestamp,
- ee->hangcheck_timestamp == epoch ? "; epoch" : "");
err_printf(m, " engine reset count: %u\n", ee->reset_count);
for (n = 0; n < ee->num_ports; n++) {
err_printf(m, " ELSP[%d]:", n);
- error_print_request(m, " ", &ee->execlist[n], epoch);
+ error_print_request(m, " ", &ee->execlist[n]);
}
error_print_context(m, " Active context: ", &ee->context);
@@ -548,35 +572,35 @@ void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
va_end(args);
}
-static void print_error_obj(struct drm_i915_error_state_buf *m,
- struct intel_engine_cs *engine,
- const char *name,
- struct drm_i915_error_object *obj)
+static void print_error_vma(struct drm_i915_error_state_buf *m,
+ const struct intel_engine_cs *engine,
+ const struct i915_vma_coredump *vma)
{
char out[ASCII85_BUFSZ];
int page;
- if (!obj)
+ if (!vma)
return;
- if (name) {
- err_printf(m, "%s --- %s = 0x%08x %08x\n",
- engine ? engine->name : "global", name,
- upper_32_bits(obj->gtt_offset),
- lower_32_bits(obj->gtt_offset));
- }
+ err_printf(m, "%s --- %s = 0x%08x %08x\n",
+ engine ? engine->name : "global", vma->name,
+ upper_32_bits(vma->gtt_offset),
+ lower_32_bits(vma->gtt_offset));
+
+ if (vma->gtt_page_sizes > I915_GTT_PAGE_SIZE_4K)
+ err_printf(m, "gtt_page_sizes = 0x%08x\n", vma->gtt_page_sizes);
err_compression_marker(m);
- for (page = 0; page < obj->page_count; page++) {
+ for (page = 0; page < vma->page_count; page++) {
int i, len;
len = PAGE_SIZE;
- if (page == obj->page_count - 1)
- len -= obj->unused;
+ if (page == vma->page_count - 1)
+ len -= vma->unused;
len = ascii85_encode_len(len);
for (i = 0; i < len; i++)
- err_puts(m, ascii85_encode(obj->pages[page][i], out));
+ err_puts(m, ascii85_encode(vma->pages[page][i], out));
}
err_puts(m, "\n");
}
@@ -588,9 +612,10 @@ static void err_print_capabilities(struct drm_i915_error_state_buf *m,
{
struct drm_printer p = i915_error_printer(m);
- intel_device_info_dump_flags(info, &p);
+ intel_device_info_print_static(info, &p);
+ intel_device_info_print_runtime(runtime, &p);
+ intel_device_info_print_topology(&runtime->sseu, &p);
intel_driver_caps_print(caps, &p);
- intel_device_info_dump_topology(&runtime->sseu, &p);
}
static void err_print_params(struct drm_i915_error_state_buf *m,
@@ -614,18 +639,13 @@ static void err_print_pciid(struct drm_i915_error_state_buf *m,
}
static void err_print_uc(struct drm_i915_error_state_buf *m,
- const struct i915_error_uc *error_uc)
+ const struct intel_uc_coredump *error_uc)
{
struct drm_printer p = i915_error_printer(m);
- const struct i915_gpu_state *error =
- container_of(error_uc, typeof(*error), uc);
-
- if (!error->device_info.has_guc)
- return;
intel_uc_fw_dump(&error_uc->guc_fw, &p);
intel_uc_fw_dump(&error_uc->huc_fw, &p);
- print_error_obj(m, NULL, "GuC log buffer", error_uc->guc_log);
+ print_error_vma(m, NULL, error_uc->guc_log);
}
static void err_free_sgl(struct scatterlist *sgl)
@@ -645,18 +665,76 @@ static void err_free_sgl(struct scatterlist *sgl)
}
}
+static void err_print_gt(struct drm_i915_error_state_buf *m,
+ struct intel_gt_coredump *gt)
+{
+ const struct intel_engine_coredump *ee;
+ int i;
+
+ err_printf(m, "GT awake: %s\n", yesno(gt->awake));
+ err_printf(m, "EIR: 0x%08x\n", gt->eir);
+ err_printf(m, "IER: 0x%08x\n", gt->ier);
+ for (i = 0; i < gt->ngtier; i++)
+ err_printf(m, "GTIER[%d]: 0x%08x\n", i, gt->gtier[i]);
+ err_printf(m, "PGTBL_ER: 0x%08x\n", gt->pgtbl_er);
+ err_printf(m, "FORCEWAKE: 0x%08x\n", gt->forcewake);
+ err_printf(m, "DERRMR: 0x%08x\n", gt->derrmr);
+
+ for (i = 0; i < gt->nfence; i++)
+ err_printf(m, " fence[%d] = %08llx\n", i, gt->fence[i]);
+
+ if (IS_GEN_RANGE(m->i915, 6, 11)) {
+ err_printf(m, "ERROR: 0x%08x\n", gt->error);
+ err_printf(m, "DONE_REG: 0x%08x\n", gt->done_reg);
+ }
+
+ if (INTEL_GEN(m->i915) >= 8)
+ err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
+ gt->fault_data1, gt->fault_data0);
+
+ if (IS_GEN(m->i915, 7))
+ err_printf(m, "ERR_INT: 0x%08x\n", gt->err_int);
+
+ if (IS_GEN_RANGE(m->i915, 8, 11))
+ err_printf(m, "GTT_CACHE_EN: 0x%08x\n", gt->gtt_cache);
+
+ if (IS_GEN(m->i915, 12))
+ err_printf(m, "AUX_ERR_DBG: 0x%08x\n", gt->aux_err);
+
+ if (INTEL_GEN(m->i915) >= 12) {
+ int i;
+
+ for (i = 0; i < GEN12_SFC_DONE_MAX; i++)
+ err_printf(m, " SFC_DONE[%d]: 0x%08x\n", i,
+ gt->sfc_done[i]);
+
+ err_printf(m, " GAM_DONE: 0x%08x\n", gt->gam_done);
+ }
+
+ for (ee = gt->engine; ee; ee = ee->next) {
+ const struct i915_vma_coredump *vma;
+
+ error_print_engine(m, ee);
+ for (vma = ee->vma; vma; vma = vma->next)
+ print_error_vma(m, ee->engine, vma);
+ }
+
+ if (gt->uc)
+ err_print_uc(m, gt->uc);
+}
+
static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
- struct i915_gpu_state *error)
+ struct i915_gpu_coredump *error)
{
- struct drm_i915_error_object *obj;
+ const struct intel_engine_coredump *ee;
struct timespec64 ts;
- int i, j;
if (*error->error_msg)
err_printf(m, "%s\n", error->error_msg);
err_printf(m, "Kernel: %s %s\n",
init_utsname()->release,
init_utsname()->machine);
+ err_printf(m, "Driver: %s\n", DRIVER_DATE);
ts = ktime_to_timespec64(error->time);
err_printf(m, "Time: %lld s %ld us\n",
(s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
@@ -666,21 +744,15 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
ts = ktime_to_timespec64(error->uptime);
err_printf(m, "Uptime: %lld s %ld us\n",
(s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
- err_printf(m, "Epoch: %lu jiffies (%u HZ)\n", error->epoch, HZ);
- err_printf(m, "Capture: %lu jiffies; %d ms ago, %d ms after epoch\n",
- error->capture,
- jiffies_to_msecs(jiffies - error->capture),
- jiffies_to_msecs(error->capture - error->epoch));
-
- for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
- if (!error->engine[i].context.pid)
- continue;
+ err_printf(m, "Capture: %lu jiffies; %d ms ago\n",
+ error->capture, jiffies_to_msecs(jiffies - error->capture));
+ for (ee = error->gt ? error->gt->engine : NULL; ee; ee = ee->next)
err_printf(m, "Active process (on ring %s): %s [%d]\n",
- engine_name(m->i915, i),
- error->engine[i].context.comm,
- error->engine[i].context.pid);
- }
+ ee->engine->name,
+ ee->context.comm,
+ ee->context.pid);
+
err_printf(m, "Reset count: %u\n", error->reset_count);
err_printf(m, "Suspend count: %u\n", error->suspend_count);
err_printf(m, "Platform: %s\n", intel_platform_name(error->device_info.platform));
@@ -701,114 +773,11 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
CSR_VERSION_MINOR(csr->version));
}
- err_printf(m, "GT awake: %s\n", yesno(error->awake));
err_printf(m, "RPM wakelock: %s\n", yesno(error->wakelock));
err_printf(m, "PM suspended: %s\n", yesno(error->suspended));
- err_printf(m, "EIR: 0x%08x\n", error->eir);
- err_printf(m, "IER: 0x%08x\n", error->ier);
- for (i = 0; i < error->ngtier; i++)
- err_printf(m, "GTIER[%d]: 0x%08x\n", i, error->gtier[i]);
- err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
- err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
- err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
- err_printf(m, "CCID: 0x%08x\n", error->ccid);
-
- for (i = 0; i < error->nfence; i++)
- err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
-
- if (INTEL_GEN(m->i915) >= 6) {
- err_printf(m, "ERROR: 0x%08x\n", error->error);
-
- if (INTEL_GEN(m->i915) >= 8)
- err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
- error->fault_data1, error->fault_data0);
-
- err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
- }
-
- if (IS_GEN(m->i915, 7))
- err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
-
- for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
- if (error->engine[i].engine_id != -1)
- error_print_engine(m, &error->engine[i], error->epoch);
- }
-
- for (i = 0; i < ARRAY_SIZE(error->active_vm); i++) {
- char buf[128];
- int len, first = 1;
-
- if (!error->active_vm[i])
- break;
-
- len = scnprintf(buf, sizeof(buf), "Active (");
- for (j = 0; j < ARRAY_SIZE(error->engine); j++) {
- if (error->engine[j].vm != error->active_vm[i])
- continue;
-
- len += scnprintf(buf + len, sizeof(buf), "%s%s",
- first ? "" : ", ",
- m->i915->engine[j]->name);
- first = 0;
- }
- scnprintf(buf + len, sizeof(buf), ")");
- print_error_buffers(m, buf,
- error->active_bo[i],
- error->active_bo_count[i]);
- }
-
- print_error_buffers(m, "Pinned (global)",
- error->pinned_bo,
- error->pinned_bo_count);
-
- for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
- const struct drm_i915_error_engine *ee = &error->engine[i];
-
- obj = ee->batchbuffer;
- if (obj) {
- err_puts(m, m->i915->engine[i]->name);
- if (ee->context.pid)
- err_printf(m, " (submitted by %s [%d])",
- ee->context.comm,
- ee->context.pid);
- err_printf(m, " --- gtt_offset = 0x%08x %08x\n",
- upper_32_bits(obj->gtt_offset),
- lower_32_bits(obj->gtt_offset));
- print_error_obj(m, m->i915->engine[i], NULL, obj);
- }
-
- for (j = 0; j < ee->user_bo_count; j++)
- print_error_obj(m, m->i915->engine[i],
- "user", ee->user_bo[j]);
-
- if (ee->num_requests) {
- err_printf(m, "%s --- %d requests\n",
- m->i915->engine[i]->name,
- ee->num_requests);
- for (j = 0; j < ee->num_requests; j++)
- error_print_request(m, " ",
- &ee->requests[j],
- error->epoch);
- }
-
- print_error_obj(m, m->i915->engine[i],
- "ringbuffer", ee->ringbuffer);
-
- print_error_obj(m, m->i915->engine[i],
- "HW Status", ee->hws_page);
- print_error_obj(m, m->i915->engine[i],
- "HW context", ee->ctx);
-
- print_error_obj(m, m->i915->engine[i],
- "WA context", ee->wa_ctx);
-
- print_error_obj(m, m->i915->engine[i],
- "WA batchbuffer", ee->wa_batchbuffer);
-
- print_error_obj(m, m->i915->engine[i],
- "NULL context", ee->default_state);
- }
+ if (error->gt)
+ err_print_gt(m, error->gt);
if (error->overlay)
intel_overlay_print_error_state(m, error->overlay);
@@ -819,10 +788,9 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
err_print_capabilities(m, &error->device_info, &error->runtime_info,
&error->driver_caps);
err_print_params(m, &error->params);
- err_print_uc(m, &error->uc);
}
-static int err_print_to_sgl(struct i915_gpu_state *error)
+static int err_print_to_sgl(struct i915_gpu_coredump *error)
{
struct drm_i915_error_state_buf m;
@@ -859,8 +827,8 @@ static int err_print_to_sgl(struct i915_gpu_state *error)
return 0;
}
-ssize_t i915_gpu_state_copy_to_buffer(struct i915_gpu_state *error,
- char *buf, loff_t off, size_t rem)
+ssize_t i915_gpu_coredump_copy_to_buffer(struct i915_gpu_coredump *error,
+ char *buf, loff_t off, size_t rem)
{
struct scatterlist *sg;
size_t count;
@@ -923,249 +891,224 @@ ssize_t i915_gpu_state_copy_to_buffer(struct i915_gpu_state *error,
return count;
}
-static void i915_error_object_free(struct drm_i915_error_object *obj)
+static void i915_vma_coredump_free(struct i915_vma_coredump *vma)
{
- int page;
-
- if (obj == NULL)
- return;
+ while (vma) {
+ struct i915_vma_coredump *next = vma->next;
+ int page;
- for (page = 0; page < obj->page_count; page++)
- free_page((unsigned long)obj->pages[page]);
+ for (page = 0; page < vma->page_count; page++)
+ free_page((unsigned long)vma->pages[page]);
- kfree(obj);
+ kfree(vma);
+ vma = next;
+ }
}
-
-static void cleanup_params(struct i915_gpu_state *error)
+static void cleanup_params(struct i915_gpu_coredump *error)
{
i915_params_free(&error->params);
}
-static void cleanup_uc_state(struct i915_gpu_state *error)
+static void cleanup_uc(struct intel_uc_coredump *uc)
{
- struct i915_error_uc *error_uc = &error->uc;
+ kfree(uc->guc_fw.path);
+ kfree(uc->huc_fw.path);
+ i915_vma_coredump_free(uc->guc_log);
- kfree(error_uc->guc_fw.path);
- kfree(error_uc->huc_fw.path);
- i915_error_object_free(error_uc->guc_log);
+ kfree(uc);
}
-void __i915_gpu_state_free(struct kref *error_ref)
+static void cleanup_gt(struct intel_gt_coredump *gt)
{
- struct i915_gpu_state *error =
- container_of(error_ref, typeof(*error), ref);
- long i, j;
+ while (gt->engine) {
+ struct intel_engine_coredump *ee = gt->engine;
- for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
- struct drm_i915_error_engine *ee = &error->engine[i];
+ gt->engine = ee->next;
- for (j = 0; j < ee->user_bo_count; j++)
- i915_error_object_free(ee->user_bo[j]);
- kfree(ee->user_bo);
+ i915_vma_coredump_free(ee->vma);
+ kfree(ee);
+ }
- i915_error_object_free(ee->batchbuffer);
- i915_error_object_free(ee->wa_batchbuffer);
- i915_error_object_free(ee->ringbuffer);
- i915_error_object_free(ee->hws_page);
- i915_error_object_free(ee->ctx);
- i915_error_object_free(ee->wa_ctx);
+ if (gt->uc)
+ cleanup_uc(gt->uc);
- kfree(ee->requests);
- }
+ kfree(gt);
+}
- for (i = 0; i < ARRAY_SIZE(error->active_bo); i++)
- kfree(error->active_bo[i]);
- kfree(error->pinned_bo);
+void __i915_gpu_coredump_free(struct kref *error_ref)
+{
+ struct i915_gpu_coredump *error =
+ container_of(error_ref, typeof(*error), ref);
+
+ while (error->gt) {
+ struct intel_gt_coredump *gt = error->gt;
+
+ error->gt = gt->next;
+ cleanup_gt(gt);
+ }
kfree(error->overlay);
kfree(error->display);
cleanup_params(error);
- cleanup_uc_state(error);
err_free_sgl(error->sgl);
kfree(error);
}
-static struct drm_i915_error_object *
-i915_error_object_create(struct drm_i915_private *i915,
- struct i915_vma *vma)
+static struct i915_vma_coredump *
+i915_vma_coredump_create(const struct intel_gt *gt,
+ const struct i915_vma *vma,
+ const char *name,
+ struct i915_vma_compress *compress)
{
- struct i915_ggtt *ggtt = &i915->ggtt;
+ struct i915_ggtt *ggtt = gt->ggtt;
const u64 slot = ggtt->error_capture.start;
- struct drm_i915_error_object *dst;
- struct compress compress;
+ struct i915_vma_coredump *dst;
unsigned long num_pages;
struct sgt_iter iter;
- dma_addr_t dma;
int ret;
- if (!vma || !vma->pages)
+ might_sleep();
+
+ if (!vma || !vma->pages || !compress)
return NULL;
num_pages = min_t(u64, vma->size, vma->obj->base.size) >> PAGE_SHIFT;
num_pages = DIV_ROUND_UP(10 * num_pages, 8); /* worstcase zlib growth */
- dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *),
- GFP_ATOMIC | __GFP_NOWARN);
+ dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), ALLOW_FAIL);
if (!dst)
return NULL;
+ if (!compress_start(compress)) {
+ kfree(dst);
+ return NULL;
+ }
+
+ strcpy(dst->name, name);
+ dst->next = NULL;
+
dst->gtt_offset = vma->node.start;
dst->gtt_size = vma->node.size;
+ dst->gtt_page_sizes = vma->page_sizes.gtt;
dst->num_pages = num_pages;
dst->page_count = 0;
dst->unused = 0;
- if (!compress_init(&compress)) {
- kfree(dst);
- return NULL;
- }
-
ret = -EINVAL;
- for_each_sgt_dma(dma, iter, vma->pages) {
+ if (drm_mm_node_allocated(&ggtt->error_capture)) {
void __iomem *s;
+ dma_addr_t dma;
+
+ for_each_sgt_daddr(dma, iter, vma->pages) {
+ ggtt->vm.insert_page(&ggtt->vm, dma, slot,
+ I915_CACHE_NONE, 0);
+ mb();
+
+ s = io_mapping_map_wc(&ggtt->iomap, slot, PAGE_SIZE);
+ ret = compress_page(compress,
+ (void __force *)s, dst,
+ true);
+ io_mapping_unmap(s);
+ if (ret)
+ break;
+ }
+ } else if (i915_gem_object_is_lmem(vma->obj)) {
+ struct intel_memory_region *mem = vma->obj->mm.region;
+ dma_addr_t dma;
+
+ for_each_sgt_daddr(dma, iter, vma->pages) {
+ void __iomem *s;
+
+ s = io_mapping_map_wc(&mem->iomap, dma, PAGE_SIZE);
+ ret = compress_page(compress,
+ (void __force *)s, dst,
+ true);
+ io_mapping_unmap(s);
+ if (ret)
+ break;
+ }
+ } else {
+ struct page *page;
- ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0);
+ for_each_sgt_page(page, iter, vma->pages) {
+ void *s;
- s = io_mapping_map_atomic_wc(&ggtt->iomap, slot);
- ret = compress_page(&compress, (void __force *)s, dst);
- io_mapping_unmap_atomic(s);
- if (ret)
- break;
+ drm_clflush_pages(&page, 1);
+
+ s = kmap(page);
+ ret = compress_page(compress, s, dst, false);
+ kunmap(page);
+
+ drm_clflush_pages(&page, 1);
+
+ if (ret)
+ break;
+ }
}
- if (ret || compress_flush(&compress, dst)) {
+ if (ret || compress_flush(compress, dst)) {
while (dst->page_count--)
- free_page((unsigned long)dst->pages[dst->page_count]);
+ pool_free(&compress->pool, dst->pages[dst->page_count]);
kfree(dst);
dst = NULL;
}
+ compress_finish(compress);
- compress_fini(&compress, dst);
return dst;
}
-static void capture_bo(struct drm_i915_error_buffer *err,
- struct i915_vma *vma)
-{
- struct drm_i915_gem_object *obj = vma->obj;
-
- err->size = obj->base.size;
- err->name = obj->base.name;
-
- err->gtt_offset = vma->node.start;
- err->read_domains = obj->read_domains;
- err->write_domain = obj->write_domain;
- err->fence_reg = vma->fence ? vma->fence->id : -1;
- err->tiling = i915_gem_object_get_tiling(obj);
- err->dirty = obj->mm.dirty;
- err->purgeable = obj->mm.madv != I915_MADV_WILLNEED;
- err->userptr = obj->userptr.mm != NULL;
- err->cache_level = obj->cache_level;
-}
-
-static u32 capture_error_bo(struct drm_i915_error_buffer *err,
- int count, struct list_head *head,
- unsigned int flags)
-#define ACTIVE_ONLY BIT(0)
-#define PINNED_ONLY BIT(1)
-{
- struct i915_vma *vma;
- int i = 0;
-
- list_for_each_entry(vma, head, vm_link) {
- if (!vma->obj)
- continue;
-
- if (flags & ACTIVE_ONLY && !i915_vma_is_active(vma))
- continue;
-
- if (flags & PINNED_ONLY && !i915_vma_is_pinned(vma))
- continue;
-
- capture_bo(err++, vma);
- if (++i == count)
- break;
- }
-
- return i;
-}
-
-/*
- * Generate a semi-unique error code. The code is not meant to have meaning, The
- * code's only purpose is to try to prevent false duplicated bug reports by
- * grossly estimating a GPU error state.
- *
- * TODO Ideally, hashing the batchbuffer would be a very nice way to determine
- * the hang if we could strip the GTT offset information from it.
- *
- * It's only a small step better than a random number in its current form.
- */
-static u32 i915_error_generate_code(struct i915_gpu_state *error,
- intel_engine_mask_t engine_mask)
+static void gt_record_fences(struct intel_gt_coredump *gt)
{
- /*
- * IPEHR would be an ideal way to detect errors, as it's the gross
- * measure of "the command that hung." However, has some very common
- * synchronization commands which almost always appear in the case
- * strictly a client bug. Use instdone to differentiate those some.
- */
- if (engine_mask) {
- struct drm_i915_error_engine *ee =
- &error->engine[ffs(engine_mask)];
-
- return ee->ipehr ^ ee->instdone.instdone;
- }
-
- return 0;
-}
-
-static void gem_record_fences(struct i915_gpu_state *error)
-{
- struct drm_i915_private *dev_priv = error->i915;
- struct intel_uncore *uncore = &dev_priv->uncore;
+ struct i915_ggtt *ggtt = gt->_gt->ggtt;
+ struct intel_uncore *uncore = gt->_gt->uncore;
int i;
- if (INTEL_GEN(dev_priv) >= 6) {
- for (i = 0; i < dev_priv->ggtt.num_fences; i++)
- error->fence[i] =
+ if (INTEL_GEN(uncore->i915) >= 6) {
+ for (i = 0; i < ggtt->num_fences; i++)
+ gt->fence[i] =
intel_uncore_read64(uncore,
FENCE_REG_GEN6_LO(i));
- } else if (INTEL_GEN(dev_priv) >= 4) {
- for (i = 0; i < dev_priv->ggtt.num_fences; i++)
- error->fence[i] =
+ } else if (INTEL_GEN(uncore->i915) >= 4) {
+ for (i = 0; i < ggtt->num_fences; i++)
+ gt->fence[i] =
intel_uncore_read64(uncore,
FENCE_REG_965_LO(i));
} else {
- for (i = 0; i < dev_priv->ggtt.num_fences; i++)
- error->fence[i] =
+ for (i = 0; i < ggtt->num_fences; i++)
+ gt->fence[i] =
intel_uncore_read(uncore, FENCE_REG(i));
}
- error->nfence = i;
+ gt->nfence = i;
}
-static void error_record_engine_registers(struct i915_gpu_state *error,
- struct intel_engine_cs *engine,
- struct drm_i915_error_engine *ee)
+static void engine_record_registers(struct intel_engine_coredump *ee)
{
- struct drm_i915_private *dev_priv = engine->i915;
+ const struct intel_engine_cs *engine = ee->engine;
+ struct drm_i915_private *i915 = engine->i915;
- if (INTEL_GEN(dev_priv) >= 6) {
+ if (INTEL_GEN(i915) >= 6) {
ee->rc_psmi = ENGINE_READ(engine, RING_PSMI_CTL);
- if (INTEL_GEN(dev_priv) >= 8)
- ee->fault_reg = I915_READ(GEN8_RING_FAULT_REG);
+
+ if (INTEL_GEN(i915) >= 12)
+ ee->fault_reg = intel_uncore_read(engine->uncore,
+ GEN12_RING_FAULT_REG);
+ else if (INTEL_GEN(i915) >= 8)
+ ee->fault_reg = intel_uncore_read(engine->uncore,
+ GEN8_RING_FAULT_REG);
else
ee->fault_reg = GEN6_RING_FAULT_REG_READ(engine);
}
- if (INTEL_GEN(dev_priv) >= 4) {
+ if (INTEL_GEN(i915) >= 4) {
ee->faddr = ENGINE_READ(engine, RING_DMA_FADD);
ee->ipeir = ENGINE_READ(engine, RING_IPEIR);
ee->ipehr = ENGINE_READ(engine, RING_IPEHR);
ee->instps = ENGINE_READ(engine, RING_INSTPS);
ee->bbaddr = ENGINE_READ(engine, RING_BBADDR);
- if (INTEL_GEN(dev_priv) >= 8) {
+ ee->ccid = ENGINE_READ(engine, CCID);
+ if (INTEL_GEN(i915) >= 8) {
ee->faddr |= (u64)ENGINE_READ(engine, RING_DMA_FADD_UDW) << 32;
ee->bbaddr |= (u64)ENGINE_READ(engine, RING_BBADDR_UDW) << 32;
}
@@ -1184,13 +1127,13 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
ee->head = ENGINE_READ(engine, RING_HEAD);
ee->tail = ENGINE_READ(engine, RING_TAIL);
ee->ctl = ENGINE_READ(engine, RING_CTL);
- if (INTEL_GEN(dev_priv) > 2)
+ if (INTEL_GEN(i915) > 2)
ee->mode = ENGINE_READ(engine, RING_MI_MODE);
- if (!HWS_NEEDS_PHYSICAL(dev_priv)) {
+ if (!HWS_NEEDS_PHYSICAL(i915)) {
i915_reg_t mmio;
- if (IS_GEN(dev_priv, 7)) {
+ if (IS_GEN(i915, 7)) {
switch (engine->id) {
default:
MISSING_CASE(engine->id);
@@ -1215,184 +1158,155 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
mmio = RING_HWS_PGA(engine->mmio_base);
}
- ee->hws = I915_READ(mmio);
+ ee->hws = intel_uncore_read(engine->uncore, mmio);
}
- ee->idle = intel_engine_is_idle(engine);
- if (!ee->idle)
- ee->hangcheck_timestamp = engine->hangcheck.action_timestamp;
- ee->reset_count = i915_reset_engine_count(&dev_priv->gpu_error,
- engine);
+ ee->reset_count = i915_reset_engine_count(&i915->gpu_error, engine);
- if (HAS_PPGTT(dev_priv)) {
+ if (HAS_PPGTT(i915)) {
int i;
ee->vm_info.gfx_mode = ENGINE_READ(engine, RING_MODE_GEN7);
- if (IS_GEN(dev_priv, 6)) {
+ if (IS_GEN(i915, 6)) {
ee->vm_info.pp_dir_base =
ENGINE_READ(engine, RING_PP_DIR_BASE_READ);
- } else if (IS_GEN(dev_priv, 7)) {
+ } else if (IS_GEN(i915, 7)) {
ee->vm_info.pp_dir_base =
ENGINE_READ(engine, RING_PP_DIR_BASE);
- } else if (INTEL_GEN(dev_priv) >= 8) {
+ } else if (INTEL_GEN(i915) >= 8) {
u32 base = engine->mmio_base;
for (i = 0; i < 4; i++) {
ee->vm_info.pdp[i] =
- I915_READ(GEN8_RING_PDP_UDW(base, i));
+ intel_uncore_read(engine->uncore,
+ GEN8_RING_PDP_UDW(base, i));
ee->vm_info.pdp[i] <<= 32;
ee->vm_info.pdp[i] |=
- I915_READ(GEN8_RING_PDP_LDW(base, i));
+ intel_uncore_read(engine->uncore,
+ GEN8_RING_PDP_LDW(base, i));
}
}
}
}
-static void record_request(struct i915_request *request,
- struct drm_i915_error_request *erq)
+static void record_request(const struct i915_request *request,
+ struct i915_request_coredump *erq)
{
- struct i915_gem_context *ctx = request->gem_context;
+ const struct i915_gem_context *ctx;
erq->flags = request->fence.flags;
erq->context = request->fence.context;
erq->seqno = request->fence.seqno;
erq->sched_attr = request->sched.attr;
- erq->jiffies = request->emitted_jiffies;
erq->start = i915_ggtt_offset(request->ring->vma);
erq->head = request->head;
erq->tail = request->tail;
+ erq->pid = 0;
rcu_read_lock();
- erq->pid = ctx->pid ? pid_nr(ctx->pid) : 0;
+ ctx = rcu_dereference(request->context->gem_context);
+ if (ctx)
+ erq->pid = pid_nr(ctx->pid);
rcu_read_unlock();
}
-static void engine_record_requests(struct intel_engine_cs *engine,
- struct i915_request *first,
- struct drm_i915_error_engine *ee)
+static void engine_record_execlists(struct intel_engine_coredump *ee)
{
- struct i915_request *request;
- int count;
+ const struct intel_engine_execlists * const el = &ee->engine->execlists;
+ struct i915_request * const *port = el->active;
+ unsigned int n = 0;
- count = 0;
- request = first;
- list_for_each_entry_from(request, &engine->active.requests, sched.link)
- count++;
- if (!count)
- return;
-
- ee->requests = kcalloc(count, sizeof(*ee->requests), GFP_ATOMIC);
- if (!ee->requests)
- return;
-
- ee->num_requests = count;
-
- count = 0;
- request = first;
- list_for_each_entry_from(request,
- &engine->active.requests, sched.link) {
- if (count >= ee->num_requests) {
- /*
- * If the ring request list was changed in
- * between the point where the error request
- * list was created and dimensioned and this
- * point then just exit early to avoid crashes.
- *
- * We don't need to communicate that the
- * request list changed state during error
- * state capture and that the error state is
- * slightly incorrect as a consequence since we
- * are typically only interested in the request
- * list state at the point of error state
- * capture, not in any changes happening during
- * the capture.
- */
- break;
- }
+ while (*port)
+ record_request(*port++, &ee->execlist[n++]);
- record_request(request, &ee->requests[count++]);
- }
- ee->num_requests = count;
+ ee->num_ports = n;
}
-static void error_record_engine_execlists(struct intel_engine_cs *engine,
- struct drm_i915_error_engine *ee)
+static bool record_context(struct i915_gem_context_coredump *e,
+ const struct i915_request *rq)
{
- const struct intel_engine_execlists * const execlists = &engine->execlists;
- unsigned int n;
-
- for (n = 0; n < execlists_num_ports(execlists); n++) {
- struct i915_request *rq = port_request(&execlists->port[n]);
+ struct i915_gem_context *ctx;
+ struct task_struct *task;
+ bool capture;
- if (!rq)
- break;
+ rcu_read_lock();
+ ctx = rcu_dereference(rq->context->gem_context);
+ if (ctx && !kref_get_unless_zero(&ctx->ref))
+ ctx = NULL;
+ rcu_read_unlock();
+ if (!ctx)
+ return false;
- record_request(rq, &ee->execlist[n]);
+ rcu_read_lock();
+ task = pid_task(ctx->pid, PIDTYPE_PID);
+ if (task) {
+ strcpy(e->comm, task->comm);
+ e->pid = task->pid;
}
+ rcu_read_unlock();
- ee->num_ports = n;
+ e->sched_attr = ctx->sched;
+ e->guilty = atomic_read(&ctx->guilty_count);
+ e->active = atomic_read(&ctx->active_count);
+
+ capture = i915_gem_context_no_error_capture(ctx);
+
+ i915_gem_context_put(ctx);
+ return capture;
}
-static void record_context(struct drm_i915_error_context *e,
- struct i915_gem_context *ctx)
+struct intel_engine_capture_vma {
+ struct intel_engine_capture_vma *next;
+ struct i915_vma *vma;
+ char name[16];
+};
+
+static struct intel_engine_capture_vma *
+capture_vma(struct intel_engine_capture_vma *next,
+ struct i915_vma *vma,
+ const char *name,
+ gfp_t gfp)
{
- if (ctx->pid) {
- struct task_struct *task;
+ struct intel_engine_capture_vma *c;
- rcu_read_lock();
- task = pid_task(ctx->pid, PIDTYPE_PID);
- if (task) {
- strcpy(e->comm, task->comm);
- e->pid = task->pid;
- }
- rcu_read_unlock();
+ if (!vma)
+ return next;
+
+ c = kmalloc(sizeof(*c), gfp);
+ if (!c)
+ return next;
+
+ if (!i915_active_acquire_if_busy(&vma->active)) {
+ kfree(c);
+ return next;
}
- e->hw_id = ctx->hw_id;
- e->sched_attr = ctx->sched;
- e->guilty = atomic_read(&ctx->guilty_count);
- e->active = atomic_read(&ctx->active_count);
+ strcpy(c->name, name);
+ c->vma = i915_vma_get(vma);
+
+ c->next = next;
+ return c;
}
-static void request_record_user_bo(struct i915_request *request,
- struct drm_i915_error_engine *ee)
+static struct intel_engine_capture_vma *
+capture_user(struct intel_engine_capture_vma *capture,
+ const struct i915_request *rq,
+ gfp_t gfp)
{
struct i915_capture_list *c;
- struct drm_i915_error_object **bo;
- long count, max;
- max = 0;
- for (c = request->capture_list; c; c = c->next)
- max++;
- if (!max)
- return;
-
- bo = kmalloc_array(max, sizeof(*bo), GFP_ATOMIC);
- if (!bo) {
- /* If we can't capture everything, try to capture something. */
- max = min_t(long, max, PAGE_SIZE / sizeof(*bo));
- bo = kmalloc_array(max, sizeof(*bo), GFP_ATOMIC);
- }
- if (!bo)
- return;
+ for (c = rq->capture_list; c; c = c->next)
+ capture = capture_vma(capture, c->vma, "user", gfp);
- count = 0;
- for (c = request->capture_list; c; c = c->next) {
- bo[count] = i915_error_object_create(request->i915, c->vma);
- if (!bo[count])
- break;
- if (++count == max)
- break;
- }
-
- ee->user_bo = bo;
- ee->user_bo_count = count;
+ return capture;
}
-static struct drm_i915_error_object *
-capture_object(struct drm_i915_private *dev_priv,
- struct drm_i915_gem_object *obj)
+static struct i915_vma_coredump *
+capture_object(const struct intel_gt *gt,
+ struct drm_i915_gem_object *obj,
+ const char *name,
+ struct i915_vma_compress *compress)
{
if (obj && i915_gem_object_has_pages(obj)) {
struct i915_vma fake = {
@@ -1402,190 +1316,221 @@ capture_object(struct drm_i915_private *dev_priv,
.obj = obj,
};
- return i915_error_object_create(dev_priv, &fake);
+ return i915_vma_coredump_create(gt, &fake, name, compress);
} else {
return NULL;
}
}
-static void gem_record_rings(struct i915_gpu_state *error)
+static void add_vma(struct intel_engine_coredump *ee,
+ struct i915_vma_coredump *vma)
{
- struct drm_i915_private *i915 = error->i915;
- struct i915_ggtt *ggtt = &i915->ggtt;
- int i;
+ if (vma) {
+ vma->next = ee->vma;
+ ee->vma = vma;
+ }
+}
- for (i = 0; i < I915_NUM_ENGINES; i++) {
- struct intel_engine_cs *engine = i915->engine[i];
- struct drm_i915_error_engine *ee = &error->engine[i];
- struct i915_request *request;
- unsigned long flags;
+struct intel_engine_coredump *
+intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp)
+{
+ struct intel_engine_coredump *ee;
- ee->engine_id = -1;
+ ee = kzalloc(sizeof(*ee), gfp);
+ if (!ee)
+ return NULL;
- if (!engine)
- continue;
+ ee->engine = engine;
- ee->engine_id = i;
+ engine_record_registers(ee);
+ engine_record_execlists(ee);
- error_record_engine_registers(error, engine, ee);
- error_record_engine_execlists(engine, ee);
+ return ee;
+}
- spin_lock_irqsave(&engine->active.lock, flags);
- request = intel_engine_find_active_request(engine);
- if (request) {
- struct i915_gem_context *ctx = request->gem_context;
- struct intel_ring *ring = request->ring;
+struct intel_engine_capture_vma *
+intel_engine_coredump_add_request(struct intel_engine_coredump *ee,
+ struct i915_request *rq,
+ gfp_t gfp)
+{
+ struct intel_engine_capture_vma *vma = NULL;
- ee->vm = ctx->vm ?: &ggtt->vm;
+ ee->simulated |= record_context(&ee->context, rq);
+ if (ee->simulated)
+ return NULL;
- record_context(&ee->context, ctx);
+ /*
+ * We need to copy these to an anonymous buffer
+ * as the simplest method to avoid being overwritten
+ * by userspace.
+ */
+ vma = capture_vma(vma, rq->batch, "batch", gfp);
+ vma = capture_user(vma, rq, gfp);
+ vma = capture_vma(vma, rq->ring->vma, "ring", gfp);
+ vma = capture_vma(vma, rq->context->state, "HW context", gfp);
- /* We need to copy these to an anonymous buffer
- * as the simplest method to avoid being overwritten
- * by userspace.
- */
- ee->batchbuffer =
- i915_error_object_create(i915, request->batch);
+ ee->rq_head = rq->head;
+ ee->rq_post = rq->postfix;
+ ee->rq_tail = rq->tail;
- if (HAS_BROKEN_CS_TLB(i915))
- ee->wa_batchbuffer =
- i915_error_object_create(i915,
- i915->gt.scratch);
- request_record_user_bo(request, ee);
+ return vma;
+}
- ee->ctx =
- i915_error_object_create(i915,
- request->hw_context->state);
+void
+intel_engine_coredump_add_vma(struct intel_engine_coredump *ee,
+ struct intel_engine_capture_vma *capture,
+ struct i915_vma_compress *compress)
+{
+ const struct intel_engine_cs *engine = ee->engine;
- error->simulated |=
- i915_gem_context_no_error_capture(ctx);
+ while (capture) {
+ struct intel_engine_capture_vma *this = capture;
+ struct i915_vma *vma = this->vma;
- ee->rq_head = request->head;
- ee->rq_post = request->postfix;
- ee->rq_tail = request->tail;
+ add_vma(ee,
+ i915_vma_coredump_create(engine->gt,
+ vma, this->name,
+ compress));
- ee->cpu_ring_head = ring->head;
- ee->cpu_ring_tail = ring->tail;
- ee->ringbuffer =
- i915_error_object_create(i915, ring->vma);
+ i915_active_release(&vma->active);
+ i915_vma_put(vma);
- engine_record_requests(engine, request, ee);
- }
- spin_unlock_irqrestore(&engine->active.lock, flags);
+ capture = this->next;
+ kfree(this);
+ }
- ee->hws_page =
- i915_error_object_create(i915,
- engine->status_page.vma);
+ add_vma(ee,
+ i915_vma_coredump_create(engine->gt,
+ engine->status_page.vma,
+ "HW Status",
+ compress));
- ee->wa_ctx = i915_error_object_create(i915, engine->wa_ctx.vma);
+ add_vma(ee,
+ i915_vma_coredump_create(engine->gt,
+ engine->wa_ctx.vma,
+ "WA context",
+ compress));
- ee->default_state = capture_object(i915, engine->default_state);
- }
+ add_vma(ee,
+ capture_object(engine->gt,
+ engine->default_state,
+ "NULL context",
+ compress));
}
-static void gem_capture_vm(struct i915_gpu_state *error,
- struct i915_address_space *vm,
- int idx)
+static struct intel_engine_coredump *
+capture_engine(struct intel_engine_cs *engine,
+ struct i915_vma_compress *compress)
{
- struct drm_i915_error_buffer *active_bo;
- struct i915_vma *vma;
- int count;
+ struct intel_engine_capture_vma *capture = NULL;
+ struct intel_engine_coredump *ee;
+ struct i915_request *rq;
+ unsigned long flags;
- count = 0;
- list_for_each_entry(vma, &vm->bound_list, vm_link)
- if (i915_vma_is_active(vma))
- count++;
-
- active_bo = NULL;
- if (count)
- active_bo = kcalloc(count, sizeof(*active_bo), GFP_ATOMIC);
- if (active_bo)
- count = capture_error_bo(active_bo,
- count, &vm->bound_list,
- ACTIVE_ONLY);
- else
- count = 0;
+ ee = intel_engine_coredump_alloc(engine, GFP_KERNEL);
+ if (!ee)
+ return NULL;
- error->active_vm[idx] = vm;
- error->active_bo[idx] = active_bo;
- error->active_bo_count[idx] = count;
+ spin_lock_irqsave(&engine->active.lock, flags);
+ rq = intel_engine_find_active_request(engine);
+ if (rq)
+ capture = intel_engine_coredump_add_request(ee, rq,
+ ATOMIC_MAYFAIL);
+ spin_unlock_irqrestore(&engine->active.lock, flags);
+ if (!capture) {
+ kfree(ee);
+ return NULL;
+ }
+
+ intel_engine_coredump_add_vma(ee, capture, compress);
+
+ return ee;
}
-static void capture_active_buffers(struct i915_gpu_state *error)
+static void
+gt_record_engines(struct intel_gt_coredump *gt,
+ struct i915_vma_compress *compress)
{
- int cnt = 0, i, j;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, gt->_gt, id) {
+ struct intel_engine_coredump *ee;
- BUILD_BUG_ON(ARRAY_SIZE(error->engine) > ARRAY_SIZE(error->active_bo));
- BUILD_BUG_ON(ARRAY_SIZE(error->active_bo) != ARRAY_SIZE(error->active_vm));
- BUILD_BUG_ON(ARRAY_SIZE(error->active_bo) != ARRAY_SIZE(error->active_bo_count));
+ /* Refill our page pool before entering atomic section */
+ pool_refill(&compress->pool, ALLOW_FAIL);
- /* Scan each engine looking for unique active contexts/vm */
- for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
- struct drm_i915_error_engine *ee = &error->engine[i];
- bool found;
+ ee = capture_engine(engine, compress);
+ if (!ee)
+ continue;
- if (!ee->vm)
+ gt->simulated |= ee->simulated;
+ if (ee->simulated) {
+ kfree(ee);
continue;
+ }
- found = false;
- for (j = 0; j < i && !found; j++)
- found = error->engine[j].vm == ee->vm;
- if (!found)
- gem_capture_vm(error, ee->vm, cnt++);
+ ee->next = gt->engine;
+ gt->engine = ee;
}
}
-static void capture_pinned_buffers(struct i915_gpu_state *error)
+static struct intel_uc_coredump *
+gt_record_uc(struct intel_gt_coredump *gt,
+ struct i915_vma_compress *compress)
{
- struct i915_address_space *vm = &error->i915->ggtt.vm;
- struct drm_i915_error_buffer *bo;
- struct i915_vma *vma;
- int count;
+ const struct intel_uc *uc = &gt->_gt->uc;
+ struct intel_uc_coredump *error_uc;
- count = 0;
- list_for_each_entry(vma, &vm->bound_list, vm_link)
- count++;
+ error_uc = kzalloc(sizeof(*error_uc), ALLOW_FAIL);
+ if (!error_uc)
+ return NULL;
- bo = NULL;
- if (count)
- bo = kcalloc(count, sizeof(*bo), GFP_ATOMIC);
- if (!bo)
- return;
+ memcpy(&error_uc->guc_fw, &uc->guc.fw, sizeof(uc->guc.fw));
+ memcpy(&error_uc->huc_fw, &uc->huc.fw, sizeof(uc->huc.fw));
- error->pinned_bo_count =
- capture_error_bo(bo, count, &vm->bound_list, PINNED_ONLY);
- error->pinned_bo = bo;
+ /* Non-default firmware paths will be specified by the modparam.
+ * As modparams are generally accesible from the userspace make
+ * explicit copies of the firmware paths.
+ */
+ error_uc->guc_fw.path = kstrdup(uc->guc.fw.path, ALLOW_FAIL);
+ error_uc->huc_fw.path = kstrdup(uc->huc.fw.path, ALLOW_FAIL);
+ error_uc->guc_log =
+ i915_vma_coredump_create(gt->_gt,
+ uc->guc.log.vma, "GuC log buffer",
+ compress);
+
+ return error_uc;
}
-static void capture_uc_state(struct i915_gpu_state *error)
+static void gt_capture_prepare(struct intel_gt_coredump *gt)
{
- struct drm_i915_private *i915 = error->i915;
- struct i915_error_uc *error_uc = &error->uc;
+ struct i915_ggtt *ggtt = gt->_gt->ggtt;
- /* Capturing uC state won't be useful if there is no GuC */
- if (!error->device_info.has_guc)
- return;
+ mutex_lock(&ggtt->error_mutex);
+}
- error_uc->guc_fw = i915->guc.fw;
- error_uc->huc_fw = i915->huc.fw;
+static void gt_capture_finish(struct intel_gt_coredump *gt)
+{
+ struct i915_ggtt *ggtt = gt->_gt->ggtt;
- /* Non-default firmware paths will be specified by the modparam.
- * As modparams are generally accesible from the userspace make
- * explicit copies of the firmware paths.
- */
- error_uc->guc_fw.path = kstrdup(i915->guc.fw.path, GFP_ATOMIC);
- error_uc->huc_fw.path = kstrdup(i915->huc.fw.path, GFP_ATOMIC);
- error_uc->guc_log = i915_error_object_create(i915, i915->guc.log.vma);
+ if (drm_mm_node_allocated(&ggtt->error_capture))
+ ggtt->vm.clear_range(&ggtt->vm,
+ ggtt->error_capture.start,
+ PAGE_SIZE);
+
+ mutex_unlock(&ggtt->error_mutex);
}
/* Capture all registers which don't fit into another category. */
-static void capture_reg_state(struct i915_gpu_state *error)
+static void gt_record_regs(struct intel_gt_coredump *gt)
{
- struct drm_i915_private *i915 = error->i915;
- struct intel_uncore *uncore = &i915->uncore;
+ struct intel_uncore *uncore = gt->_gt->uncore;
+ struct drm_i915_private *i915 = uncore->i915;
int i;
- /* General organization
+ /*
+ * General organization
* 1. Registers specific to a single generation
* 2. Registers which belong to multiple generations
* 3. Feature specific registers.
@@ -1595,122 +1540,162 @@ static void capture_reg_state(struct i915_gpu_state *error)
/* 1: Registers specific to a single generation */
if (IS_VALLEYVIEW(i915)) {
- error->gtier[0] = intel_uncore_read(uncore, GTIER);
- error->ier = intel_uncore_read(uncore, VLV_IER);
- error->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_VLV);
+ gt->gtier[0] = intel_uncore_read(uncore, GTIER);
+ gt->ier = intel_uncore_read(uncore, VLV_IER);
+ gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_VLV);
}
if (IS_GEN(i915, 7))
- error->err_int = intel_uncore_read(uncore, GEN7_ERR_INT);
+ gt->err_int = intel_uncore_read(uncore, GEN7_ERR_INT);
- if (INTEL_GEN(i915) >= 8) {
- error->fault_data0 = intel_uncore_read(uncore,
- GEN8_FAULT_TLB_DATA0);
- error->fault_data1 = intel_uncore_read(uncore,
- GEN8_FAULT_TLB_DATA1);
+ if (INTEL_GEN(i915) >= 12) {
+ gt->fault_data0 = intel_uncore_read(uncore,
+ GEN12_FAULT_TLB_DATA0);
+ gt->fault_data1 = intel_uncore_read(uncore,
+ GEN12_FAULT_TLB_DATA1);
+ } else if (INTEL_GEN(i915) >= 8) {
+ gt->fault_data0 = intel_uncore_read(uncore,
+ GEN8_FAULT_TLB_DATA0);
+ gt->fault_data1 = intel_uncore_read(uncore,
+ GEN8_FAULT_TLB_DATA1);
}
if (IS_GEN(i915, 6)) {
- error->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE);
- error->gab_ctl = intel_uncore_read(uncore, GAB_CTL);
- error->gfx_mode = intel_uncore_read(uncore, GFX_MODE);
+ gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE);
+ gt->gab_ctl = intel_uncore_read(uncore, GAB_CTL);
+ gt->gfx_mode = intel_uncore_read(uncore, GFX_MODE);
}
/* 2: Registers which belong to multiple generations */
if (INTEL_GEN(i915) >= 7)
- error->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_MT);
+ gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_MT);
if (INTEL_GEN(i915) >= 6) {
- error->derrmr = intel_uncore_read(uncore, DERRMR);
- error->error = intel_uncore_read(uncore, ERROR_GEN6);
- error->done_reg = intel_uncore_read(uncore, DONE_REG);
+ gt->derrmr = intel_uncore_read(uncore, DERRMR);
+ if (INTEL_GEN(i915) < 12) {
+ gt->error = intel_uncore_read(uncore, ERROR_GEN6);
+ gt->done_reg = intel_uncore_read(uncore, DONE_REG);
+ }
}
- if (INTEL_GEN(i915) >= 5)
- error->ccid = intel_uncore_read(uncore, CCID(RENDER_RING_BASE));
-
/* 3: Feature specific registers */
if (IS_GEN_RANGE(i915, 6, 7)) {
- error->gam_ecochk = intel_uncore_read(uncore, GAM_ECOCHK);
- error->gac_eco = intel_uncore_read(uncore, GAC_ECO_BITS);
+ gt->gam_ecochk = intel_uncore_read(uncore, GAM_ECOCHK);
+ gt->gac_eco = intel_uncore_read(uncore, GAC_ECO_BITS);
+ }
+
+ if (IS_GEN_RANGE(i915, 8, 11))
+ gt->gtt_cache = intel_uncore_read(uncore, HSW_GTT_CACHE_EN);
+
+ if (IS_GEN(i915, 12))
+ gt->aux_err = intel_uncore_read(uncore, GEN12_AUX_ERR_DBG);
+
+ if (INTEL_GEN(i915) >= 12) {
+ for (i = 0; i < GEN12_SFC_DONE_MAX; i++) {
+ gt->sfc_done[i] =
+ intel_uncore_read(uncore, GEN12_SFC_DONE(i));
+ }
+
+ gt->gam_done = intel_uncore_read(uncore, GEN12_GAM_DONE);
}
/* 4: Everything else */
if (INTEL_GEN(i915) >= 11) {
- error->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER);
- error->gtier[0] =
+ gt->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER);
+ gt->gtier[0] =
intel_uncore_read(uncore,
GEN11_RENDER_COPY_INTR_ENABLE);
- error->gtier[1] =
+ gt->gtier[1] =
intel_uncore_read(uncore, GEN11_VCS_VECS_INTR_ENABLE);
- error->gtier[2] =
+ gt->gtier[2] =
intel_uncore_read(uncore, GEN11_GUC_SG_INTR_ENABLE);
- error->gtier[3] =
+ gt->gtier[3] =
intel_uncore_read(uncore,
GEN11_GPM_WGBOXPERF_INTR_ENABLE);
- error->gtier[4] =
+ gt->gtier[4] =
intel_uncore_read(uncore,
GEN11_CRYPTO_RSVD_INTR_ENABLE);
- error->gtier[5] =
+ gt->gtier[5] =
intel_uncore_read(uncore,
GEN11_GUNIT_CSME_INTR_ENABLE);
- error->ngtier = 6;
+ gt->ngtier = 6;
} else if (INTEL_GEN(i915) >= 8) {
- error->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER);
+ gt->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER);
for (i = 0; i < 4; i++)
- error->gtier[i] = intel_uncore_read(uncore,
- GEN8_GT_IER(i));
- error->ngtier = 4;
+ gt->gtier[i] =
+ intel_uncore_read(uncore, GEN8_GT_IER(i));
+ gt->ngtier = 4;
} else if (HAS_PCH_SPLIT(i915)) {
- error->ier = intel_uncore_read(uncore, DEIER);
- error->gtier[0] = intel_uncore_read(uncore, GTIER);
- error->ngtier = 1;
+ gt->ier = intel_uncore_read(uncore, DEIER);
+ gt->gtier[0] = intel_uncore_read(uncore, GTIER);
+ gt->ngtier = 1;
} else if (IS_GEN(i915, 2)) {
- error->ier = intel_uncore_read16(uncore, GEN2_IER);
+ gt->ier = intel_uncore_read16(uncore, GEN2_IER);
} else if (!IS_VALLEYVIEW(i915)) {
- error->ier = intel_uncore_read(uncore, GEN2_IER);
+ gt->ier = intel_uncore_read(uncore, GEN2_IER);
}
- error->eir = intel_uncore_read(uncore, EIR);
- error->pgtbl_er = intel_uncore_read(uncore, PGTBL_ER);
+ gt->eir = intel_uncore_read(uncore, EIR);
+ gt->pgtbl_er = intel_uncore_read(uncore, PGTBL_ER);
+}
+
+/*
+ * Generate a semi-unique error code. The code is not meant to have meaning, The
+ * code's only purpose is to try to prevent false duplicated bug reports by
+ * grossly estimating a GPU error state.
+ *
+ * TODO Ideally, hashing the batchbuffer would be a very nice way to determine
+ * the hang if we could strip the GTT offset information from it.
+ *
+ * It's only a small step better than a random number in its current form.
+ */
+static u32 generate_ecode(const struct intel_engine_coredump *ee)
+{
+ /*
+ * IPEHR would be an ideal way to detect errors, as it's the gross
+ * measure of "the command that hung." However, has some very common
+ * synchronization commands which almost always appear in the case
+ * strictly a client bug. Use instdone to differentiate those some.
+ */
+ return ee ? ee->ipehr ^ ee->instdone.instdone : 0;
}
-static const char *
-error_msg(struct i915_gpu_state *error,
- intel_engine_mask_t engines, const char *msg)
+static const char *error_msg(struct i915_gpu_coredump *error)
{
+ struct intel_engine_coredump *first = NULL;
+ struct intel_gt_coredump *gt;
+ intel_engine_mask_t engines;
int len;
- int i;
- for (i = 0; i < ARRAY_SIZE(error->engine); i++)
- if (!error->engine[i].context.pid)
- engines &= ~BIT(i);
+ engines = 0;
+ for (gt = error->gt; gt; gt = gt->next) {
+ struct intel_engine_coredump *cs;
+
+ if (gt->engine && !first)
+ first = gt->engine;
+
+ for (cs = gt->engine; cs; cs = cs->next)
+ engines |= cs->engine->mask;
+ }
len = scnprintf(error->error_msg, sizeof(error->error_msg),
- "GPU HANG: ecode %d:%x:0x%08x",
+ "GPU HANG: ecode %d:%x:%08x",
INTEL_GEN(error->i915), engines,
- i915_error_generate_code(error, engines));
- if (engines) {
+ generate_ecode(first));
+ if (first && first->context.pid) {
/* Just show the first executing process, more is confusing */
- i = __ffs(engines);
len += scnprintf(error->error_msg + len,
sizeof(error->error_msg) - len,
", in %s [%d]",
- error->engine[i].context.comm,
- error->engine[i].context.pid);
+ first->context.comm, first->context.pid);
}
- if (msg)
- len += scnprintf(error->error_msg + len,
- sizeof(error->error_msg) - len,
- ", %s", msg);
return error->error_msg;
}
-static void capture_gen_state(struct i915_gpu_state *error)
+static void capture_gen(struct i915_gpu_coredump *error)
{
struct drm_i915_private *i915 = error->i915;
- error->awake = i915->gt.awake;
error->wakelock = atomic_read(&i915->runtime_pm.wakeref_count);
error->suspended = i915->runtime_pm.suspended;
@@ -1721,6 +1706,7 @@ static void capture_gen_state(struct i915_gpu_state *error)
error->reset_count = i915_reset_count(&i915->gpu_error);
error->suspend_count = i915->suspend_count;
+ i915_params_copy(&error->params, &i915_modparams);
memcpy(&error->device_info,
INTEL_INFO(i915),
sizeof(error->device_info));
@@ -1730,155 +1716,182 @@ static void capture_gen_state(struct i915_gpu_state *error)
error->driver_caps = i915->caps;
}
-static void capture_params(struct i915_gpu_state *error)
+struct i915_gpu_coredump *
+i915_gpu_coredump_alloc(struct drm_i915_private *i915, gfp_t gfp)
{
- i915_params_copy(&error->params, &i915_modparams);
-}
+ struct i915_gpu_coredump *error;
-static unsigned long capture_find_epoch(const struct i915_gpu_state *error)
-{
- unsigned long epoch = error->capture;
- int i;
+ if (!i915_modparams.error_capture)
+ return NULL;
- for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
- const struct drm_i915_error_engine *ee = &error->engine[i];
+ error = kzalloc(sizeof(*error), gfp);
+ if (!error)
+ return NULL;
- if (ee->hangcheck_timestamp &&
- time_before(ee->hangcheck_timestamp, epoch))
- epoch = ee->hangcheck_timestamp;
- }
+ kref_init(&error->ref);
+ error->i915 = i915;
+
+ error->time = ktime_get_real();
+ error->boottime = ktime_get_boottime();
+ error->uptime = ktime_sub(ktime_get(), i915->gt.last_init_time);
+ error->capture = jiffies;
+
+ capture_gen(error);
- return epoch;
+ return error;
}
-static void capture_finish(struct i915_gpu_state *error)
+#define DAY_AS_SECONDS(x) (24 * 60 * 60 * (x))
+
+struct intel_gt_coredump *
+intel_gt_coredump_alloc(struct intel_gt *gt, gfp_t gfp)
{
- struct i915_ggtt *ggtt = &error->i915->ggtt;
- const u64 slot = ggtt->error_capture.start;
+ struct intel_gt_coredump *gc;
+
+ gc = kzalloc(sizeof(*gc), gfp);
+ if (!gc)
+ return NULL;
- ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
+ gc->_gt = gt;
+ gc->awake = intel_gt_pm_is_awake(gt);
+
+ gt_record_regs(gc);
+ gt_record_fences(gc);
+
+ return gc;
}
-static int capture(void *data)
+struct i915_vma_compress *
+i915_vma_capture_prepare(struct intel_gt_coredump *gt)
{
- struct i915_gpu_state *error = data;
-
- error->time = ktime_get_real();
- error->boottime = ktime_get_boottime();
- error->uptime = ktime_sub(ktime_get(),
- error->i915->gt.last_init_time);
- error->capture = jiffies;
+ struct i915_vma_compress *compress;
- capture_params(error);
- capture_gen_state(error);
- capture_uc_state(error);
- capture_reg_state(error);
- gem_record_fences(error);
- gem_record_rings(error);
- capture_active_buffers(error);
- capture_pinned_buffers(error);
+ compress = kmalloc(sizeof(*compress), ALLOW_FAIL);
+ if (!compress)
+ return NULL;
- error->overlay = intel_overlay_capture_error_state(error->i915);
- error->display = intel_display_capture_error_state(error->i915);
+ if (!compress_init(compress)) {
+ kfree(compress);
+ return NULL;
+ }
- error->epoch = capture_find_epoch(error);
+ gt_capture_prepare(gt);
- capture_finish(error);
- return 0;
+ return compress;
}
-#define DAY_AS_SECONDS(x) (24 * 60 * 60 * (x))
+void i915_vma_capture_finish(struct intel_gt_coredump *gt,
+ struct i915_vma_compress *compress)
+{
+ if (!compress)
+ return;
+
+ gt_capture_finish(gt);
+
+ compress_fini(compress);
+ kfree(compress);
+}
-struct i915_gpu_state *
-i915_capture_gpu_state(struct drm_i915_private *i915)
+struct i915_gpu_coredump *i915_gpu_coredump(struct drm_i915_private *i915)
{
- struct i915_gpu_state *error;
+ struct i915_gpu_coredump *error;
/* Check if GPU capture has been disabled */
error = READ_ONCE(i915->gpu_error.first_error);
if (IS_ERR(error))
return error;
- error = kzalloc(sizeof(*error), GFP_ATOMIC);
- if (!error) {
- i915_disable_error_state(i915, -ENOMEM);
+ error = i915_gpu_coredump_alloc(i915, ALLOW_FAIL);
+ if (!error)
return ERR_PTR(-ENOMEM);
- }
- kref_init(&error->ref);
- error->i915 = i915;
+ error->gt = intel_gt_coredump_alloc(&i915->gt, ALLOW_FAIL);
+ if (error->gt) {
+ struct i915_vma_compress *compress;
+
+ compress = i915_vma_capture_prepare(error->gt);
+ if (!compress) {
+ kfree(error->gt);
+ kfree(error);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ gt_record_engines(error->gt, compress);
+
+ if (INTEL_INFO(i915)->has_gt_uc)
+ error->gt->uc = gt_record_uc(error->gt, compress);
- stop_machine(capture, error, NULL);
+ i915_vma_capture_finish(error->gt, compress);
+
+ error->simulated |= error->gt->simulated;
+ }
+
+ error->overlay = intel_overlay_capture_error_state(i915);
+ error->display = intel_display_capture_error_state(i915);
return error;
}
-/**
- * i915_capture_error_state - capture an error record for later analysis
- * @i915: i915 device
- * @engine_mask: the mask of engines triggering the hang
- * @msg: a message to insert into the error capture header
- *
- * Should be called when an error is detected (either a hang or an error
- * interrupt) to capture error state from the time of the error. Fills
- * out a structure which becomes available in debugfs for user level tools
- * to pick up.
- */
-void i915_capture_error_state(struct drm_i915_private *i915,
- intel_engine_mask_t engine_mask,
- const char *msg)
+void i915_error_state_store(struct i915_gpu_coredump *error)
{
+ struct drm_i915_private *i915;
static bool warned;
- struct i915_gpu_state *error;
- unsigned long flags;
- if (!i915_modparams.error_capture)
+ if (IS_ERR_OR_NULL(error))
return;
- if (READ_ONCE(i915->gpu_error.first_error))
- return;
+ i915 = error->i915;
+ dev_info(i915->drm.dev, "%s\n", error_msg(error));
- error = i915_capture_gpu_state(i915);
- if (IS_ERR(error))
+ if (error->simulated ||
+ cmpxchg(&i915->gpu_error.first_error, NULL, error))
return;
- dev_info(i915->drm.dev, "%s\n", error_msg(error, engine_mask, msg));
+ i915_gpu_coredump_get(error);
- if (!error->simulated) {
- spin_lock_irqsave(&i915->gpu_error.lock, flags);
- if (!i915->gpu_error.first_error) {
- i915->gpu_error.first_error = error;
- error = NULL;
- }
- spin_unlock_irqrestore(&i915->gpu_error.lock, flags);
+ if (!xchg(&warned, true) &&
+ ktime_get_real_seconds() - DRIVER_TIMESTAMP < DAY_AS_SECONDS(180)) {
+ pr_info("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
+ pr_info("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
+ pr_info("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
+ pr_info("The GPU crash dump is required to analyze GPU hangs, so please always attach it.\n");
+ pr_info("GPU crash dump saved to /sys/class/drm/card%d/error\n",
+ i915->drm.primary->index);
}
+}
- if (error) {
- __i915_gpu_state_free(&error->ref);
+/**
+ * i915_capture_error_state - capture an error record for later analysis
+ * @i915: i915 device
+ *
+ * Should be called when an error is detected (either a hang or an error
+ * interrupt) to capture error state from the time of the error. Fills
+ * out a structure which becomes available in debugfs for user level tools
+ * to pick up.
+ */
+void i915_capture_error_state(struct drm_i915_private *i915)
+{
+ struct i915_gpu_coredump *error;
+
+ error = i915_gpu_coredump(i915);
+ if (IS_ERR(error)) {
+ cmpxchg(&i915->gpu_error.first_error, NULL, error);
return;
}
- if (!warned &&
- ktime_get_real_seconds() - DRIVER_TIMESTAMP < DAY_AS_SECONDS(180)) {
- DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
- DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
- DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
- DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
- DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n",
- i915->drm.primary->index);
- warned = true;
- }
+ i915_error_state_store(error);
+ i915_gpu_coredump_put(error);
}
-struct i915_gpu_state *
+struct i915_gpu_coredump *
i915_first_error_state(struct drm_i915_private *i915)
{
- struct i915_gpu_state *error;
+ struct i915_gpu_coredump *error;
spin_lock_irq(&i915->gpu_error.lock);
error = i915->gpu_error.first_error;
if (!IS_ERR_OR_NULL(error))
- i915_gpu_state_get(error);
+ i915_gpu_coredump_get(error);
spin_unlock_irq(&i915->gpu_error.lock);
return error;
@@ -1886,7 +1899,7 @@ i915_first_error_state(struct drm_i915_private *i915)
void i915_reset_error_state(struct drm_i915_private *i915)
{
- struct i915_gpu_state *error;
+ struct i915_gpu_coredump *error;
spin_lock_irq(&i915->gpu_error.lock);
error = i915->gpu_error.first_error;
@@ -1895,7 +1908,7 @@ void i915_reset_error_state(struct drm_i915_private *i915)
spin_unlock_irq(&i915->gpu_error.lock);
if (!IS_ERR_OR_NULL(error))
- i915_gpu_state_put(error);
+ i915_gpu_coredump_put(error);
}
void i915_disable_error_state(struct drm_i915_private *i915, int err)
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 2ecd0c6a1c94..e4a6afed3bbf 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -7,6 +7,7 @@
#ifndef _I915_GPU_ERROR_H_
#define _I915_GPU_ERROR_H_
+#include <linux/atomic.h>
#include <linux/kref.h>
#include <linux/ktime.h>
#include <linux/sched.h>
@@ -14,9 +15,9 @@
#include <drm/drm_mm.h>
#include "gt/intel_engine.h"
+#include "gt/uc/intel_uc_fw.h"
#include "intel_device_info.h"
-#include "intel_uc_fw.h"
#include "i915_gem.h"
#include "i915_gem_gtt.h"
@@ -24,44 +25,100 @@
#include "i915_scheduler.h"
struct drm_i915_private;
+struct i915_vma_compress;
+struct intel_engine_capture_vma;
struct intel_overlay_error_state;
struct intel_display_error_state;
-struct i915_gpu_state {
- struct kref ref;
- ktime_t time;
- ktime_t boottime;
- ktime_t uptime;
- unsigned long capture;
- unsigned long epoch;
+struct i915_vma_coredump {
+ struct i915_vma_coredump *next;
- struct drm_i915_private *i915;
+ char name[20];
+
+ u64 gtt_offset;
+ u64 gtt_size;
+ u32 gtt_page_sizes;
+
+ int num_pages;
+ int page_count;
+ int unused;
+ u32 *pages[0];
+};
+
+struct i915_request_coredump {
+ unsigned long flags;
+ pid_t pid;
+ u32 context;
+ u32 seqno;
+ u32 start;
+ u32 head;
+ u32 tail;
+ struct i915_sched_attr sched_attr;
+};
+
+struct intel_engine_coredump {
+ const struct intel_engine_cs *engine;
- char error_msg[128];
bool simulated;
- bool awake;
- bool wakelock;
- bool suspended;
- int iommu;
u32 reset_count;
- u32 suspend_count;
- struct intel_device_info device_info;
- struct intel_runtime_info runtime_info;
- struct intel_driver_caps driver_caps;
- struct i915_params params;
- struct i915_error_uc {
- struct intel_uc_fw guc_fw;
- struct intel_uc_fw huc_fw;
- struct drm_i915_error_object *guc_log;
- } uc;
+ /* position of active request inside the ring */
+ u32 rq_head, rq_post, rq_tail;
+
+ /* Register state */
+ u32 ccid;
+ u32 start;
+ u32 tail;
+ u32 head;
+ u32 ctl;
+ u32 mode;
+ u32 hws;
+ u32 ipeir;
+ u32 ipehr;
+ u32 bbstate;
+ u32 instpm;
+ u32 instps;
+ u64 bbaddr;
+ u64 acthd;
+ u32 fault_reg;
+ u64 faddr;
+ u32 rc_psmi; /* sleep state */
+ struct intel_instdone instdone;
+
+ struct i915_gem_context_coredump {
+ char comm[TASK_COMM_LEN];
+ pid_t pid;
+ int active;
+ int guilty;
+ struct i915_sched_attr sched_attr;
+ } context;
+
+ struct i915_vma_coredump *vma;
+
+ struct i915_request_coredump execlist[EXECLIST_MAX_PORTS];
+ unsigned int num_ports;
+
+ struct {
+ u32 gfx_mode;
+ union {
+ u64 pdp[4];
+ u32 pp_dir_base;
+ };
+ } vm_info;
+
+ struct intel_engine_coredump *next;
+};
+
+struct intel_gt_coredump {
+ const struct intel_gt *_gt;
+ bool awake;
+ bool simulated;
/* Generic register state */
u32 eir;
u32 pgtbl_er;
u32 ier;
u32 gtier[6], ngtier;
- u32 ccid;
u32 derrmr;
u32 forcewake;
u32 error; /* gen6+ */
@@ -73,172 +130,68 @@ struct i915_gpu_state {
u32 gam_ecochk;
u32 gab_ctl;
u32 gfx_mode;
+ u32 gtt_cache;
+ u32 aux_err; /* gen12 */
+ u32 sfc_done[GEN12_SFC_DONE_MAX]; /* gen12 */
+ u32 gam_done; /* gen12 */
u32 nfence;
u64 fence[I915_MAX_NUM_FENCES];
+
+ struct intel_engine_coredump *engine;
+
+ struct intel_uc_coredump {
+ struct intel_uc_fw guc_fw;
+ struct intel_uc_fw huc_fw;
+ struct i915_vma_coredump *guc_log;
+ } *uc;
+
+ struct intel_gt_coredump *next;
+};
+
+struct i915_gpu_coredump {
+ struct kref ref;
+ ktime_t time;
+ ktime_t boottime;
+ ktime_t uptime;
+ unsigned long capture;
+
+ struct drm_i915_private *i915;
+
+ struct intel_gt_coredump *gt;
+
+ char error_msg[128];
+ bool simulated;
+ bool wakelock;
+ bool suspended;
+ int iommu;
+ u32 reset_count;
+ u32 suspend_count;
+
+ struct intel_device_info device_info;
+ struct intel_runtime_info runtime_info;
+ struct intel_driver_caps driver_caps;
+ struct i915_params params;
+
struct intel_overlay_error_state *overlay;
struct intel_display_error_state *display;
- struct drm_i915_error_engine {
- int engine_id;
- /* Software tracked state */
- bool idle;
- unsigned long hangcheck_timestamp;
- struct i915_address_space *vm;
- int num_requests;
- u32 reset_count;
-
- /* position of active request inside the ring */
- u32 rq_head, rq_post, rq_tail;
-
- /* our own tracking of ring head and tail */
- u32 cpu_ring_head;
- u32 cpu_ring_tail;
-
- /* Register state */
- u32 start;
- u32 tail;
- u32 head;
- u32 ctl;
- u32 mode;
- u32 hws;
- u32 ipeir;
- u32 ipehr;
- u32 bbstate;
- u32 instpm;
- u32 instps;
- u64 bbaddr;
- u64 acthd;
- u32 fault_reg;
- u64 faddr;
- u32 rc_psmi; /* sleep state */
- struct intel_instdone instdone;
-
- struct drm_i915_error_context {
- char comm[TASK_COMM_LEN];
- pid_t pid;
- u32 hw_id;
- int active;
- int guilty;
- struct i915_sched_attr sched_attr;
- } context;
-
- struct drm_i915_error_object {
- u64 gtt_offset;
- u64 gtt_size;
- int num_pages;
- int page_count;
- int unused;
- u32 *pages[0];
- } *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page;
-
- struct drm_i915_error_object **user_bo;
- long user_bo_count;
-
- struct drm_i915_error_object *wa_ctx;
- struct drm_i915_error_object *default_state;
-
- struct drm_i915_error_request {
- unsigned long flags;
- long jiffies;
- pid_t pid;
- u32 context;
- u32 seqno;
- u32 start;
- u32 head;
- u32 tail;
- struct i915_sched_attr sched_attr;
- } *requests, execlist[EXECLIST_MAX_PORTS];
- unsigned int num_ports;
-
- struct {
- u32 gfx_mode;
- union {
- u64 pdp[4];
- u32 pp_dir_base;
- };
- } vm_info;
- } engine[I915_NUM_ENGINES];
-
- struct drm_i915_error_buffer {
- u32 size;
- u32 name;
- u64 gtt_offset;
- u32 read_domains;
- u32 write_domain;
- s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
- u32 tiling:2;
- u32 dirty:1;
- u32 purgeable:1;
- u32 userptr:1;
- u32 cache_level:3;
- } *active_bo[I915_NUM_ENGINES], *pinned_bo;
- u32 active_bo_count[I915_NUM_ENGINES], pinned_bo_count;
- struct i915_address_space *active_vm[I915_NUM_ENGINES];
-
struct scatterlist *sgl, *fit;
};
struct i915_gpu_error {
- /* For hangcheck timer */
-#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
-#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
-
- struct delayed_work hangcheck_work;
-
/* For reset and error_state handling. */
spinlock_t lock;
/* Protected by the above dev->gpu_error.lock. */
- struct i915_gpu_state *first_error;
+ struct i915_gpu_coredump *first_error;
atomic_t pending_fb_pin;
- /**
- * flags: Control various stages of the GPU reset
- *
- * #I915_RESET_BACKOFF - When we start a global reset, we need to
- * serialise with any other users attempting to do the same, and
- * any global resources that may be clobber by the reset (such as
- * FENCE registers).
- *
- * #I915_RESET_ENGINE[num_engines] - Since the driver doesn't need to
- * acquire the struct_mutex to reset an engine, we need an explicit
- * flag to prevent two concurrent reset attempts in the same engine.
- * As the number of engines continues to grow, allocate the flags from
- * the most significant bits.
- *
- * #I915_WEDGED - If reset fails and we can no longer use the GPU,
- * we set the #I915_WEDGED bit. Prior to command submission, e.g.
- * i915_request_alloc(), this bit is checked and the sequence
- * aborted (with -EIO reported to userspace) if set.
- */
- unsigned long flags;
-#define I915_RESET_BACKOFF 0
-#define I915_RESET_MODESET 1
-#define I915_RESET_ENGINE 2
-#define I915_WEDGED (BITS_PER_LONG - 1)
-
/** Number of times the device has been reset (global) */
- u32 reset_count;
+ atomic_t reset_count;
/** Number of times an engine has been reset */
- u32 reset_engine_count[I915_NUM_ENGINES];
-
- struct mutex wedge_mutex; /* serialises wedging/unwedging */
-
- /**
- * Waitqueue to signal when a hang is detected. Used to for waiters
- * to release the struct_mutex for the reset to procede.
- */
- wait_queue_head_t wait_queue;
-
- /**
- * Waitqueue to signal when the reset has completed. Used by clients
- * that wait for dev_priv->mm.wedged to settle.
- */
- wait_queue_head_t reset_queue;
-
- struct srcu_struct reset_backoff_srcu;
+ atomic_t reset_engine_count[I915_NUM_ENGINES];
};
struct drm_i915_error_state_buf {
@@ -258,41 +211,118 @@ struct drm_i915_error_state_buf {
__printf(2, 3)
void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
-struct i915_gpu_state *i915_capture_gpu_state(struct drm_i915_private *i915);
-void i915_capture_error_state(struct drm_i915_private *dev_priv,
- intel_engine_mask_t engine_mask,
- const char *error_msg);
+struct i915_gpu_coredump *i915_gpu_coredump(struct drm_i915_private *i915);
+void i915_capture_error_state(struct drm_i915_private *i915);
+
+struct i915_gpu_coredump *
+i915_gpu_coredump_alloc(struct drm_i915_private *i915, gfp_t gfp);
+
+struct intel_gt_coredump *
+intel_gt_coredump_alloc(struct intel_gt *gt, gfp_t gfp);
+
+struct intel_engine_coredump *
+intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp);
-static inline struct i915_gpu_state *
-i915_gpu_state_get(struct i915_gpu_state *gpu)
+struct intel_engine_capture_vma *
+intel_engine_coredump_add_request(struct intel_engine_coredump *ee,
+ struct i915_request *rq,
+ gfp_t gfp);
+
+void intel_engine_coredump_add_vma(struct intel_engine_coredump *ee,
+ struct intel_engine_capture_vma *capture,
+ struct i915_vma_compress *compress);
+
+struct i915_vma_compress *
+i915_vma_capture_prepare(struct intel_gt_coredump *gt);
+
+void i915_vma_capture_finish(struct intel_gt_coredump *gt,
+ struct i915_vma_compress *compress);
+
+void i915_error_state_store(struct i915_gpu_coredump *error);
+
+static inline struct i915_gpu_coredump *
+i915_gpu_coredump_get(struct i915_gpu_coredump *gpu)
{
kref_get(&gpu->ref);
return gpu;
}
-ssize_t i915_gpu_state_copy_to_buffer(struct i915_gpu_state *error,
- char *buf, loff_t offset, size_t count);
+ssize_t
+i915_gpu_coredump_copy_to_buffer(struct i915_gpu_coredump *error,
+ char *buf, loff_t offset, size_t count);
-void __i915_gpu_state_free(struct kref *kref);
-static inline void i915_gpu_state_put(struct i915_gpu_state *gpu)
+void __i915_gpu_coredump_free(struct kref *kref);
+static inline void i915_gpu_coredump_put(struct i915_gpu_coredump *gpu)
{
if (gpu)
- kref_put(&gpu->ref, __i915_gpu_state_free);
+ kref_put(&gpu->ref, __i915_gpu_coredump_free);
}
-struct i915_gpu_state *i915_first_error_state(struct drm_i915_private *i915);
+struct i915_gpu_coredump *i915_first_error_state(struct drm_i915_private *i915);
void i915_reset_error_state(struct drm_i915_private *i915);
void i915_disable_error_state(struct drm_i915_private *i915, int err);
#else
-static inline void i915_capture_error_state(struct drm_i915_private *dev_priv,
- u32 engine_mask,
- const char *error_msg)
+static inline void i915_capture_error_state(struct drm_i915_private *i915)
+{
+}
+
+static inline struct i915_gpu_coredump *
+i915_gpu_coredump_alloc(struct drm_i915_private *i915, gfp_t gfp)
+{
+ return NULL;
+}
+
+static inline struct intel_gt_coredump *
+intel_gt_coredump_alloc(struct intel_gt *gt, gfp_t gfp)
+{
+ return NULL;
+}
+
+static inline struct intel_engine_coredump *
+intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp)
+{
+ return NULL;
+}
+
+static inline struct intel_engine_capture_vma *
+intel_engine_coredump_add_request(struct intel_engine_coredump *ee,
+ struct i915_request *rq,
+ gfp_t gfp)
+{
+ return NULL;
+}
+
+static inline void
+intel_engine_coredump_add_vma(struct intel_engine_coredump *ee,
+ struct intel_engine_capture_vma *capture,
+ struct i915_vma_compress *compress)
+{
+}
+
+static inline struct i915_vma_compress *
+i915_vma_capture_prepare(struct intel_gt_coredump *gt)
+{
+ return NULL;
+}
+
+static inline void
+i915_vma_capture_finish(struct intel_gt_coredump *gt,
+ struct i915_vma_compress *compress)
+{
+}
+
+static inline void
+i915_error_state_store(struct i915_gpu_coredump *error)
+{
+}
+
+static inline void i915_gpu_coredump_put(struct i915_gpu_coredump *gpu)
{
}
-static inline struct i915_gpu_state *
+static inline struct i915_gpu_coredump *
i915_first_error_state(struct drm_i915_private *i915)
{
return ERR_PTR(-ENODEV);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index b2e27b5b0df9..afc6aad9bf8c 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -29,7 +29,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/circ_buf.h>
-#include <linux/cpuidle.h>
#include <linux/slab.h>
#include <linux/sysrq.h>
@@ -37,15 +36,20 @@
#include <drm/drm_irq.h>
#include <drm/i915_drm.h>
+#include "display/intel_display_types.h"
#include "display/intel_fifo_underrun.h"
#include "display/intel_hotplug.h"
#include "display/intel_lpe_audio.h"
#include "display/intel_psr.h"
+#include "gt/intel_gt.h"
+#include "gt/intel_gt_irq.h"
+#include "gt/intel_gt_pm_irq.h"
+#include "gt/intel_rps.h"
+
#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_trace.h"
-#include "intel_drv.h"
#include "intel_pm.h"
/**
@@ -56,6 +60,8 @@
* and related files, but that will be described in separate chapters.
*/
+typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val);
+
static const u32 hpd_ilk[HPD_NUM_PINS] = {
[HPD_PORT_A] = DE_DP_A_HOTPLUG,
};
@@ -133,23 +139,38 @@ static const u32 hpd_gen11[HPD_NUM_PINS] = {
[HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG
};
+static const u32 hpd_gen12[HPD_NUM_PINS] = {
+ [HPD_PORT_D] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG,
+ [HPD_PORT_E] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG,
+ [HPD_PORT_F] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG,
+ [HPD_PORT_G] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG,
+ [HPD_PORT_H] = GEN12_TC5_HOTPLUG | GEN12_TBT5_HOTPLUG,
+ [HPD_PORT_I] = GEN12_TC6_HOTPLUG | GEN12_TBT6_HOTPLUG
+};
+
static const u32 hpd_icp[HPD_NUM_PINS] = {
- [HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP,
- [HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP,
- [HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP,
- [HPD_PORT_D] = SDE_TC2_HOTPLUG_ICP,
- [HPD_PORT_E] = SDE_TC3_HOTPLUG_ICP,
- [HPD_PORT_F] = SDE_TC4_HOTPLUG_ICP
+ [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(PORT_A),
+ [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(PORT_B),
+ [HPD_PORT_C] = SDE_TC_HOTPLUG_ICP(PORT_TC1),
+ [HPD_PORT_D] = SDE_TC_HOTPLUG_ICP(PORT_TC2),
+ [HPD_PORT_E] = SDE_TC_HOTPLUG_ICP(PORT_TC3),
+ [HPD_PORT_F] = SDE_TC_HOTPLUG_ICP(PORT_TC4),
};
-static const u32 hpd_mcc[HPD_NUM_PINS] = {
- [HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP,
- [HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP,
- [HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP
+static const u32 hpd_tgp[HPD_NUM_PINS] = {
+ [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(PORT_A),
+ [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(PORT_B),
+ [HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(PORT_C),
+ [HPD_PORT_D] = SDE_TC_HOTPLUG_ICP(PORT_TC1),
+ [HPD_PORT_E] = SDE_TC_HOTPLUG_ICP(PORT_TC2),
+ [HPD_PORT_F] = SDE_TC_HOTPLUG_ICP(PORT_TC3),
+ [HPD_PORT_G] = SDE_TC_HOTPLUG_ICP(PORT_TC4),
+ [HPD_PORT_H] = SDE_TC_HOTPLUG_ICP(PORT_TC5),
+ [HPD_PORT_I] = SDE_TC_HOTPLUG_ICP(PORT_TC6),
};
-static void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
- i915_reg_t iir, i915_reg_t ier)
+void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
+ i915_reg_t iir, i915_reg_t ier)
{
intel_uncore_write(uncore, imr, 0xffffffff);
intel_uncore_posting_read(uncore, imr);
@@ -163,7 +184,7 @@ static void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
intel_uncore_posting_read(uncore, iir);
}
-static void gen2_irq_reset(struct intel_uncore *uncore)
+void gen2_irq_reset(struct intel_uncore *uncore)
{
intel_uncore_write16(uncore, GEN2_IMR, 0xffff);
intel_uncore_posting_read16(uncore, GEN2_IMR);
@@ -177,19 +198,6 @@ static void gen2_irq_reset(struct intel_uncore *uncore)
intel_uncore_posting_read16(uncore, GEN2_IIR);
}
-#define GEN8_IRQ_RESET_NDX(uncore, type, which) \
-({ \
- unsigned int which_ = which; \
- gen3_irq_reset((uncore), GEN8_##type##_IMR(which_), \
- GEN8_##type##_IIR(which_), GEN8_##type##_IER(which_)); \
-})
-
-#define GEN3_IRQ_RESET(uncore, type) \
- gen3_irq_reset((uncore), type##IMR, type##IIR, type##IER)
-
-#define GEN2_IRQ_RESET(uncore) \
- gen2_irq_reset(uncore)
-
/*
* We should clear IMR at preinstall/uninstall, and just check at postinstall.
*/
@@ -223,10 +231,10 @@ static void gen2_assert_iir_is_zero(struct intel_uncore *uncore)
intel_uncore_posting_read16(uncore, GEN2_IIR);
}
-static void gen3_irq_init(struct intel_uncore *uncore,
- i915_reg_t imr, u32 imr_val,
- i915_reg_t ier, u32 ier_val,
- i915_reg_t iir)
+void gen3_irq_init(struct intel_uncore *uncore,
+ i915_reg_t imr, u32 imr_val,
+ i915_reg_t ier, u32 ier_val,
+ i915_reg_t iir)
{
gen3_assert_iir_is_zero(uncore, iir);
@@ -235,8 +243,8 @@ static void gen3_irq_init(struct intel_uncore *uncore,
intel_uncore_posting_read(uncore, imr);
}
-static void gen2_irq_init(struct intel_uncore *uncore,
- u32 imr_val, u32 ier_val)
+void gen2_irq_init(struct intel_uncore *uncore,
+ u32 imr_val, u32 ier_val)
{
gen2_assert_iir_is_zero(uncore);
@@ -245,27 +253,6 @@ static void gen2_irq_init(struct intel_uncore *uncore,
intel_uncore_posting_read16(uncore, GEN2_IMR);
}
-#define GEN8_IRQ_INIT_NDX(uncore, type, which, imr_val, ier_val) \
-({ \
- unsigned int which_ = which; \
- gen3_irq_init((uncore), \
- GEN8_##type##_IMR(which_), imr_val, \
- GEN8_##type##_IER(which_), ier_val, \
- GEN8_##type##_IIR(which_)); \
-})
-
-#define GEN3_IRQ_INIT(uncore, type, imr_val, ier_val) \
- gen3_irq_init((uncore), \
- type##IMR, imr_val, \
- type##IER, ier_val, \
- type##IIR)
-
-#define GEN2_IRQ_INIT(uncore, imr_val, ier_val) \
- gen2_irq_init((uncore), imr_val, ier_val)
-
-static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
-static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
-
/* For display hotplug interrupt */
static inline void
i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
@@ -304,41 +291,6 @@ void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
spin_unlock_irq(&dev_priv->irq_lock);
}
-static u32
-gen11_gt_engine_identity(struct drm_i915_private * const i915,
- const unsigned int bank, const unsigned int bit);
-
-static bool gen11_reset_one_iir(struct drm_i915_private * const i915,
- const unsigned int bank,
- const unsigned int bit)
-{
- void __iomem * const regs = i915->uncore.regs;
- u32 dw;
-
- lockdep_assert_held(&i915->irq_lock);
-
- dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
- if (dw & BIT(bit)) {
- /*
- * According to the BSpec, DW_IIR bits cannot be cleared without
- * first servicing the Selector & Shared IIR registers.
- */
- gen11_gt_engine_identity(i915, bank, bit);
-
- /*
- * We locked GT INT DW by reading it. If we want to (try
- * to) recover from this succesfully, we need to clear
- * our bit, otherwise we are locking the register for
- * everybody.
- */
- raw_reg_write(regs, GEN11_GT_INTR_DW(bank), BIT(bit));
-
- return true;
- }
-
- return false;
-}
-
/**
* ilk_update_display_irq - update DEIMR
* @dev_priv: driver private
@@ -370,305 +322,6 @@ void ilk_update_display_irq(struct drm_i915_private *dev_priv,
}
/**
- * ilk_update_gt_irq - update GTIMR
- * @dev_priv: driver private
- * @interrupt_mask: mask of interrupt bits to update
- * @enabled_irq_mask: mask of interrupt bits to enable
- */
-static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
- u32 interrupt_mask,
- u32 enabled_irq_mask)
-{
- lockdep_assert_held(&dev_priv->irq_lock);
-
- WARN_ON(enabled_irq_mask & ~interrupt_mask);
-
- if (WARN_ON(!intel_irqs_enabled(dev_priv)))
- return;
-
- dev_priv->gt_irq_mask &= ~interrupt_mask;
- dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
- I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
-}
-
-void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, u32 mask)
-{
- ilk_update_gt_irq(dev_priv, mask, mask);
- intel_uncore_posting_read_fw(&dev_priv->uncore, GTIMR);
-}
-
-void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, u32 mask)
-{
- ilk_update_gt_irq(dev_priv, mask, 0);
-}
-
-static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
-{
- WARN_ON_ONCE(INTEL_GEN(dev_priv) >= 11);
-
- return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
-}
-
-static void write_pm_imr(struct drm_i915_private *dev_priv)
-{
- i915_reg_t reg;
- u32 mask = dev_priv->pm_imr;
-
- if (INTEL_GEN(dev_priv) >= 11) {
- reg = GEN11_GPM_WGBOXPERF_INTR_MASK;
- /* pm is in upper half */
- mask = mask << 16;
- } else if (INTEL_GEN(dev_priv) >= 8) {
- reg = GEN8_GT_IMR(2);
- } else {
- reg = GEN6_PMIMR;
- }
-
- I915_WRITE(reg, mask);
- POSTING_READ(reg);
-}
-
-static void write_pm_ier(struct drm_i915_private *dev_priv)
-{
- i915_reg_t reg;
- u32 mask = dev_priv->pm_ier;
-
- if (INTEL_GEN(dev_priv) >= 11) {
- reg = GEN11_GPM_WGBOXPERF_INTR_ENABLE;
- /* pm is in upper half */
- mask = mask << 16;
- } else if (INTEL_GEN(dev_priv) >= 8) {
- reg = GEN8_GT_IER(2);
- } else {
- reg = GEN6_PMIER;
- }
-
- I915_WRITE(reg, mask);
-}
-
-/**
- * snb_update_pm_irq - update GEN6_PMIMR
- * @dev_priv: driver private
- * @interrupt_mask: mask of interrupt bits to update
- * @enabled_irq_mask: mask of interrupt bits to enable
- */
-static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
- u32 interrupt_mask,
- u32 enabled_irq_mask)
-{
- u32 new_val;
-
- WARN_ON(enabled_irq_mask & ~interrupt_mask);
-
- lockdep_assert_held(&dev_priv->irq_lock);
-
- new_val = dev_priv->pm_imr;
- new_val &= ~interrupt_mask;
- new_val |= (~enabled_irq_mask & interrupt_mask);
-
- if (new_val != dev_priv->pm_imr) {
- dev_priv->pm_imr = new_val;
- write_pm_imr(dev_priv);
- }
-}
-
-void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
-{
- if (WARN_ON(!intel_irqs_enabled(dev_priv)))
- return;
-
- snb_update_pm_irq(dev_priv, mask, mask);
-}
-
-static void __gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
-{
- snb_update_pm_irq(dev_priv, mask, 0);
-}
-
-void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
-{
- if (WARN_ON(!intel_irqs_enabled(dev_priv)))
- return;
-
- __gen6_mask_pm_irq(dev_priv, mask);
-}
-
-static void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
-{
- i915_reg_t reg = gen6_pm_iir(dev_priv);
-
- lockdep_assert_held(&dev_priv->irq_lock);
-
- I915_WRITE(reg, reset_mask);
- I915_WRITE(reg, reset_mask);
- POSTING_READ(reg);
-}
-
-static void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask)
-{
- lockdep_assert_held(&dev_priv->irq_lock);
-
- dev_priv->pm_ier |= enable_mask;
- write_pm_ier(dev_priv);
- gen6_unmask_pm_irq(dev_priv, enable_mask);
- /* unmask_pm_irq provides an implicit barrier (POSTING_READ) */
-}
-
-static void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask)
-{
- lockdep_assert_held(&dev_priv->irq_lock);
-
- dev_priv->pm_ier &= ~disable_mask;
- __gen6_mask_pm_irq(dev_priv, disable_mask);
- write_pm_ier(dev_priv);
- /* though a barrier is missing here, but don't really need a one */
-}
-
-void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv)
-{
- spin_lock_irq(&dev_priv->irq_lock);
-
- while (gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM))
- ;
-
- dev_priv->gt_pm.rps.pm_iir = 0;
-
- spin_unlock_irq(&dev_priv->irq_lock);
-}
-
-void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
-{
- spin_lock_irq(&dev_priv->irq_lock);
- gen6_reset_pm_iir(dev_priv, GEN6_PM_RPS_EVENTS);
- dev_priv->gt_pm.rps.pm_iir = 0;
- spin_unlock_irq(&dev_priv->irq_lock);
-}
-
-void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
- if (READ_ONCE(rps->interrupts_enabled))
- return;
-
- spin_lock_irq(&dev_priv->irq_lock);
- WARN_ON_ONCE(rps->pm_iir);
-
- if (INTEL_GEN(dev_priv) >= 11)
- WARN_ON_ONCE(gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM));
- else
- WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
-
- rps->interrupts_enabled = true;
- gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
-
- spin_unlock_irq(&dev_priv->irq_lock);
-}
-
-void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
- if (!READ_ONCE(rps->interrupts_enabled))
- return;
-
- spin_lock_irq(&dev_priv->irq_lock);
- rps->interrupts_enabled = false;
-
- I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
-
- gen6_disable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
-
- spin_unlock_irq(&dev_priv->irq_lock);
- synchronize_irq(dev_priv->drm.irq);
-
- /* Now that we will not be generating any more work, flush any
- * outstanding tasks. As we are called on the RPS idle path,
- * we will reset the GPU to minimum frequencies, so the current
- * state of the worker can be discarded.
- */
- cancel_work_sync(&rps->work);
- if (INTEL_GEN(dev_priv) >= 11)
- gen11_reset_rps_interrupts(dev_priv);
- else
- gen6_reset_rps_interrupts(dev_priv);
-}
-
-void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv)
-{
- assert_rpm_wakelock_held(&dev_priv->runtime_pm);
-
- spin_lock_irq(&dev_priv->irq_lock);
- gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events);
- spin_unlock_irq(&dev_priv->irq_lock);
-}
-
-void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv)
-{
- assert_rpm_wakelock_held(&dev_priv->runtime_pm);
-
- spin_lock_irq(&dev_priv->irq_lock);
- if (!dev_priv->guc.interrupts.enabled) {
- WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) &
- dev_priv->pm_guc_events);
- dev_priv->guc.interrupts.enabled = true;
- gen6_enable_pm_irq(dev_priv, dev_priv->pm_guc_events);
- }
- spin_unlock_irq(&dev_priv->irq_lock);
-}
-
-void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv)
-{
- assert_rpm_wakelock_held(&dev_priv->runtime_pm);
-
- spin_lock_irq(&dev_priv->irq_lock);
- dev_priv->guc.interrupts.enabled = false;
-
- gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events);
-
- spin_unlock_irq(&dev_priv->irq_lock);
- synchronize_irq(dev_priv->drm.irq);
-
- gen9_reset_guc_interrupts(dev_priv);
-}
-
-void gen11_reset_guc_interrupts(struct drm_i915_private *i915)
-{
- spin_lock_irq(&i915->irq_lock);
- gen11_reset_one_iir(i915, 0, GEN11_GUC);
- spin_unlock_irq(&i915->irq_lock);
-}
-
-void gen11_enable_guc_interrupts(struct drm_i915_private *dev_priv)
-{
- spin_lock_irq(&dev_priv->irq_lock);
- if (!dev_priv->guc.interrupts.enabled) {
- u32 events = REG_FIELD_PREP(ENGINE1_MASK,
- GEN11_GUC_INTR_GUC2HOST);
-
- WARN_ON_ONCE(gen11_reset_one_iir(dev_priv, 0, GEN11_GUC));
- I915_WRITE(GEN11_GUC_SG_INTR_ENABLE, events);
- I915_WRITE(GEN11_GUC_SG_INTR_MASK, ~events);
- dev_priv->guc.interrupts.enabled = true;
- }
- spin_unlock_irq(&dev_priv->irq_lock);
-}
-
-void gen11_disable_guc_interrupts(struct drm_i915_private *dev_priv)
-{
- spin_lock_irq(&dev_priv->irq_lock);
- dev_priv->guc.interrupts.enabled = false;
-
- I915_WRITE(GEN11_GUC_SG_INTR_MASK, ~0);
- I915_WRITE(GEN11_GUC_SG_INTR_ENABLE, 0);
-
- spin_unlock_irq(&dev_priv->irq_lock);
- synchronize_irq(dev_priv->drm.irq);
-
- gen11_reset_guc_interrupts(dev_priv);
-}
-
-/**
* bdw_update_port_irq - update DE port interrupt
* @dev_priv: driver private
* @interrupt_mask: mask of interrupt bits to update
@@ -924,11 +577,12 @@ static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
/* Called from drm generic code, passed a 'crtc', which
* we use as a pipe index
*/
-static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
+u32 i915_get_vblank_counter(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
const struct drm_display_mode *mode = &vblank->hwmode;
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
i915_reg_t high_frame, low_frame;
u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
unsigned long irqflags;
@@ -989,9 +643,10 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
}
-static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
+u32 g4x_get_vblank_counter(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
}
@@ -1107,14 +762,14 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
return (position + crtc->scanline_offset) % vtotal;
}
-static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
- bool in_vblank_irq, int *vpos, int *hpos,
- ktime_t *stime, ktime_t *etime,
- const struct drm_display_mode *mode)
+bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int index,
+ bool in_vblank_irq, int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
- pipe);
+ struct intel_crtc *crtc = to_intel_crtc(drm_crtc_from_index(dev, index));
+ enum pipe pipe = crtc->pipe;
int position;
int vbl_start, vbl_end, hsync_start, htotal, vtotal;
unsigned long irqflags;
@@ -1157,7 +812,7 @@ static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
/* No obvious pixelcount register. Only query vertical
* scanout position from Display scan line register.
*/
- position = __intel_get_crtc_scanline(intel_crtc);
+ position = __intel_get_crtc_scanline(crtc);
} else {
/* Have access to pixelcount since start of frame.
* We can split this into vertical and horizontal
@@ -1237,200 +892,8 @@ int intel_get_crtc_scanline(struct intel_crtc *crtc)
return position;
}
-static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
-{
- struct intel_uncore *uncore = &dev_priv->uncore;
- u32 busy_up, busy_down, max_avg, min_avg;
- u8 new_delay;
-
- spin_lock(&mchdev_lock);
-
- intel_uncore_write16(uncore,
- MEMINTRSTS,
- intel_uncore_read(uncore, MEMINTRSTS));
-
- new_delay = dev_priv->ips.cur_delay;
-
- intel_uncore_write16(uncore, MEMINTRSTS, MEMINT_EVAL_CHG);
- busy_up = intel_uncore_read(uncore, RCPREVBSYTUPAVG);
- busy_down = intel_uncore_read(uncore, RCPREVBSYTDNAVG);
- max_avg = intel_uncore_read(uncore, RCBMAXAVG);
- min_avg = intel_uncore_read(uncore, RCBMINAVG);
-
- /* Handle RCS change request from hw */
- if (busy_up > max_avg) {
- if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
- new_delay = dev_priv->ips.cur_delay - 1;
- if (new_delay < dev_priv->ips.max_delay)
- new_delay = dev_priv->ips.max_delay;
- } else if (busy_down < min_avg) {
- if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
- new_delay = dev_priv->ips.cur_delay + 1;
- if (new_delay > dev_priv->ips.min_delay)
- new_delay = dev_priv->ips.min_delay;
- }
-
- if (ironlake_set_drps(dev_priv, new_delay))
- dev_priv->ips.cur_delay = new_delay;
-
- spin_unlock(&mchdev_lock);
-
- return;
-}
-
-static void vlv_c0_read(struct drm_i915_private *dev_priv,
- struct intel_rps_ei *ei)
-{
- ei->ktime = ktime_get_raw();
- ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
- ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
-}
-
-void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
-{
- memset(&dev_priv->gt_pm.rps.ei, 0, sizeof(dev_priv->gt_pm.rps.ei));
-}
-
-static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- const struct intel_rps_ei *prev = &rps->ei;
- struct intel_rps_ei now;
- u32 events = 0;
-
- if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
- return 0;
-
- vlv_c0_read(dev_priv, &now);
-
- if (prev->ktime) {
- u64 time, c0;
- u32 render, media;
-
- time = ktime_us_delta(now.ktime, prev->ktime);
-
- time *= dev_priv->czclk_freq;
-
- /* Workload can be split between render + media,
- * e.g. SwapBuffers being blitted in X after being rendered in
- * mesa. To account for this we need to combine both engines
- * into our activity counter.
- */
- render = now.render_c0 - prev->render_c0;
- media = now.media_c0 - prev->media_c0;
- c0 = max(render, media);
- c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */
-
- if (c0 > time * rps->power.up_threshold)
- events = GEN6_PM_RP_UP_THRESHOLD;
- else if (c0 < time * rps->power.down_threshold)
- events = GEN6_PM_RP_DOWN_THRESHOLD;
- }
-
- rps->ei = now;
- return events;
-}
-
-static void gen6_pm_rps_work(struct work_struct *work)
-{
- struct drm_i915_private *dev_priv =
- container_of(work, struct drm_i915_private, gt_pm.rps.work);
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- bool client_boost = false;
- int new_delay, adj, min, max;
- u32 pm_iir = 0;
-
- spin_lock_irq(&dev_priv->irq_lock);
- if (rps->interrupts_enabled) {
- pm_iir = fetch_and_zero(&rps->pm_iir);
- client_boost = atomic_read(&rps->num_waiters);
- }
- spin_unlock_irq(&dev_priv->irq_lock);
-
- /* Make sure we didn't queue anything we're not going to process. */
- WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
- if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
- goto out;
-
- mutex_lock(&rps->lock);
-
- pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
-
- adj = rps->last_adj;
- new_delay = rps->cur_freq;
- min = rps->min_freq_softlimit;
- max = rps->max_freq_softlimit;
- if (client_boost)
- max = rps->max_freq;
- if (client_boost && new_delay < rps->boost_freq) {
- new_delay = rps->boost_freq;
- adj = 0;
- } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
- if (adj > 0)
- adj *= 2;
- else /* CHV needs even encode values */
- adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
-
- if (new_delay >= rps->max_freq_softlimit)
- adj = 0;
- } else if (client_boost) {
- adj = 0;
- } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
- if (rps->cur_freq > rps->efficient_freq)
- new_delay = rps->efficient_freq;
- else if (rps->cur_freq > rps->min_freq_softlimit)
- new_delay = rps->min_freq_softlimit;
- adj = 0;
- } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
- if (adj < 0)
- adj *= 2;
- else /* CHV needs even encode values */
- adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
-
- if (new_delay <= rps->min_freq_softlimit)
- adj = 0;
- } else { /* unknown event */
- adj = 0;
- }
-
- rps->last_adj = adj;
-
- /*
- * Limit deboosting and boosting to keep ourselves at the extremes
- * when in the respective power modes (i.e. slowly decrease frequencies
- * while in the HIGH_POWER zone and slowly increase frequencies while
- * in the LOW_POWER zone). On idle, we will hit the timeout and drop
- * to the next level quickly, and conversely if busy we expect to
- * hit a waitboost and rapidly switch into max power.
- */
- if ((adj < 0 && rps->power.mode == HIGH_POWER) ||
- (adj > 0 && rps->power.mode == LOW_POWER))
- rps->last_adj = 0;
-
- /* sysfs frequency interfaces may have snuck in while servicing the
- * interrupt
- */
- new_delay += adj;
- new_delay = clamp_t(int, new_delay, min, max);
-
- if (intel_set_rps(dev_priv, new_delay)) {
- DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n");
- rps->last_adj = 0;
- }
-
- mutex_unlock(&rps->lock);
-
-out:
- /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
- spin_lock_irq(&dev_priv->irq_lock);
- if (rps->interrupts_enabled)
- gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events);
- spin_unlock_irq(&dev_priv->irq_lock);
-}
-
-
/**
- * ivybridge_parity_work - Workqueue called when a parity error interrupt
+ * ivb_parity_work - Workqueue called when a parity error interrupt
* occurred.
* @work: workqueue struct
*
@@ -1438,10 +901,11 @@ out:
* this event, userspace should try to remap the bad rows since statistically
* it is likely the same row is more likely to go bad again.
*/
-static void ivybridge_parity_work(struct work_struct *work)
+static void ivb_parity_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv), l3_parity.error_work);
+ struct intel_gt *gt = &dev_priv->gt;
u32 error_status, row, bank, subbank;
char *parity_event[6];
u32 misccpctl;
@@ -1503,144 +967,13 @@ static void ivybridge_parity_work(struct work_struct *work)
out:
WARN_ON(dev_priv->l3_parity.which_slice);
- spin_lock_irq(&dev_priv->irq_lock);
- gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&gt->irq_lock);
+ gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv));
+ spin_unlock_irq(&gt->irq_lock);
mutex_unlock(&dev_priv->drm.struct_mutex);
}
-static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv,
- u32 iir)
-{
- if (!HAS_L3_DPF(dev_priv))
- return;
-
- spin_lock(&dev_priv->irq_lock);
- gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
- spin_unlock(&dev_priv->irq_lock);
-
- iir &= GT_PARITY_ERROR(dev_priv);
- if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
- dev_priv->l3_parity.which_slice |= 1 << 1;
-
- if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
- dev_priv->l3_parity.which_slice |= 1 << 0;
-
- queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
-}
-
-static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
- u32 gt_iir)
-{
- if (gt_iir & GT_RENDER_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
- if (gt_iir & ILK_BSD_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
-}
-
-static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
- u32 gt_iir)
-{
- if (gt_iir & GT_RENDER_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
- if (gt_iir & GT_BSD_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
- if (gt_iir & GT_BLT_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[BCS0]);
-
- if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
- GT_BSD_CS_ERROR_INTERRUPT |
- GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
- DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
-
- if (gt_iir & GT_PARITY_ERROR(dev_priv))
- ivybridge_parity_error_irq_handler(dev_priv, gt_iir);
-}
-
-static void
-gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
-{
- bool tasklet = false;
-
- if (iir & GT_CONTEXT_SWITCH_INTERRUPT)
- tasklet = true;
-
- if (iir & GT_RENDER_USER_INTERRUPT) {
- intel_engine_breadcrumbs_irq(engine);
- tasklet |= intel_engine_needs_breadcrumb_tasklet(engine);
- }
-
- if (tasklet)
- tasklet_hi_schedule(&engine->execlists.tasklet);
-}
-
-static void gen8_gt_irq_ack(struct drm_i915_private *i915,
- u32 master_ctl, u32 gt_iir[4])
-{
- void __iomem * const regs = i915->uncore.regs;
-
-#define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \
- GEN8_GT_BCS_IRQ | \
- GEN8_GT_VCS0_IRQ | \
- GEN8_GT_VCS1_IRQ | \
- GEN8_GT_VECS_IRQ | \
- GEN8_GT_PM_IRQ | \
- GEN8_GT_GUC_IRQ)
-
- if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
- gt_iir[0] = raw_reg_read(regs, GEN8_GT_IIR(0));
- if (likely(gt_iir[0]))
- raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]);
- }
-
- if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
- gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1));
- if (likely(gt_iir[1]))
- raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]);
- }
-
- if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
- gt_iir[2] = raw_reg_read(regs, GEN8_GT_IIR(2));
- if (likely(gt_iir[2]))
- raw_reg_write(regs, GEN8_GT_IIR(2), gt_iir[2]);
- }
-
- if (master_ctl & GEN8_GT_VECS_IRQ) {
- gt_iir[3] = raw_reg_read(regs, GEN8_GT_IIR(3));
- if (likely(gt_iir[3]))
- raw_reg_write(regs, GEN8_GT_IIR(3), gt_iir[3]);
- }
-}
-
-static void gen8_gt_irq_handler(struct drm_i915_private *i915,
- u32 master_ctl, u32 gt_iir[4])
-{
- if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
- gen8_cs_irq_handler(i915->engine[RCS0],
- gt_iir[0] >> GEN8_RCS_IRQ_SHIFT);
- gen8_cs_irq_handler(i915->engine[BCS0],
- gt_iir[0] >> GEN8_BCS_IRQ_SHIFT);
- }
-
- if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
- gen8_cs_irq_handler(i915->engine[VCS0],
- gt_iir[1] >> GEN8_VCS0_IRQ_SHIFT);
- gen8_cs_irq_handler(i915->engine[VCS1],
- gt_iir[1] >> GEN8_VCS1_IRQ_SHIFT);
- }
-
- if (master_ctl & GEN8_GT_VECS_IRQ) {
- gen8_cs_irq_handler(i915->engine[VECS0],
- gt_iir[3] >> GEN8_VECS_IRQ_SHIFT);
- }
-
- if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
- gen6_rps_irq_handler(i915, gt_iir[2]);
- gen9_guc_irq_handler(i915, gt_iir[2]);
- }
-}
-
static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
{
switch (pin) {
@@ -1657,6 +990,26 @@ static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
}
}
+static bool gen12_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
+{
+ switch (pin) {
+ case HPD_PORT_D:
+ return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1);
+ case HPD_PORT_E:
+ return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2);
+ case HPD_PORT_F:
+ return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3);
+ case HPD_PORT_G:
+ return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4);
+ case HPD_PORT_H:
+ return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC5);
+ case HPD_PORT_I:
+ return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC6);
+ default:
+ return false;
+ }
+}
+
static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
{
switch (pin) {
@@ -1675,9 +1028,11 @@ static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
{
switch (pin) {
case HPD_PORT_A:
- return val & ICP_DDIA_HPD_LONG_DETECT;
+ return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_A);
case HPD_PORT_B:
- return val & ICP_DDIB_HPD_LONG_DETECT;
+ return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_B);
+ case HPD_PORT_C:
+ return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_C);
default:
return false;
}
@@ -1699,6 +1054,26 @@ static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
}
}
+static bool tgp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
+{
+ switch (pin) {
+ case HPD_PORT_D:
+ return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1);
+ case HPD_PORT_E:
+ return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2);
+ case HPD_PORT_F:
+ return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3);
+ case HPD_PORT_G:
+ return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4);
+ case HPD_PORT_H:
+ return val & ICP_TC_HPD_LONG_DETECT(PORT_TC5);
+ case HPD_PORT_I:
+ return val & ICP_TC_HPD_LONG_DETECT(PORT_TC6);
+ default:
+ return false;
+ }
+}
+
static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val)
{
switch (pin) {
@@ -1778,6 +1153,8 @@ static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
{
enum hpd_pin pin;
+ BUILD_BUG_ON(BITS_PER_TYPE(*pin_mask) < HPD_NUM_PINS);
+
for_each_hpd_pin(pin) {
if ((hpd[pin] & hotplug_trigger) == 0)
continue;
@@ -1888,64 +1265,6 @@ static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
res1, res2);
}
-/* The RPS events need forcewake, so we add them to a work queue and mask their
- * IMR bits until the work is done. Other interrupts can be processed without
- * the work queue. */
-static void gen11_rps_irq_handler(struct drm_i915_private *i915, u32 pm_iir)
-{
- struct intel_rps *rps = &i915->gt_pm.rps;
- const u32 events = i915->pm_rps_events & pm_iir;
-
- lockdep_assert_held(&i915->irq_lock);
-
- if (unlikely(!events))
- return;
-
- gen6_mask_pm_irq(i915, events);
-
- if (!rps->interrupts_enabled)
- return;
-
- rps->pm_iir |= events;
- schedule_work(&rps->work);
-}
-
-static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
- if (pm_iir & dev_priv->pm_rps_events) {
- spin_lock(&dev_priv->irq_lock);
- gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
- if (rps->interrupts_enabled) {
- rps->pm_iir |= pm_iir & dev_priv->pm_rps_events;
- schedule_work(&rps->work);
- }
- spin_unlock(&dev_priv->irq_lock);
- }
-
- if (INTEL_GEN(dev_priv) >= 8)
- return;
-
- if (pm_iir & PM_VEBOX_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[VECS0]);
-
- if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
- DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
-}
-
-static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir)
-{
- if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT)
- intel_guc_to_host_event_handler(&dev_priv->guc);
-}
-
-static void gen11_guc_irq_handler(struct drm_i915_private *i915, u16 iir)
-{
- if (iir & GEN11_GUC_INTR_GUC2HOST)
- intel_guc_to_host_event_handler(&i915->guc);
-}
-
static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
{
enum pipe pipe;
@@ -1962,7 +1281,7 @@ static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
u32 iir, u32 pipe_stats[I915_MAX_PIPES])
{
- int pipe;
+ enum pipe pipe;
spin_lock(&dev_priv->irq_lock);
@@ -1987,6 +1306,7 @@ static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
status_mask = PIPE_FIFO_UNDERRUN_STATUS;
switch (pipe) {
+ default:
case PIPE_A:
iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
break;
@@ -2185,8 +1505,7 @@ static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
{
- struct drm_device *dev = arg;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = arg;
irqreturn_t ret = IRQ_NONE;
if (!intel_irqs_enabled(dev_priv))
@@ -2254,9 +1573,9 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
if (gt_iir)
- snb_gt_irq_handler(dev_priv, gt_iir);
+ gen6_gt_irq_handler(&dev_priv->gt, gt_iir);
if (pm_iir)
- gen6_rps_irq_handler(dev_priv, pm_iir);
+ gen6_rps_irq_handler(&dev_priv->gt.rps, pm_iir);
if (hotplug_status)
i9xx_hpd_irq_handler(dev_priv, hotplug_status);
@@ -2271,8 +1590,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
static irqreturn_t cherryview_irq_handler(int irq, void *arg)
{
- struct drm_device *dev = arg;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = arg;
irqreturn_t ret = IRQ_NONE;
if (!intel_irqs_enabled(dev_priv))
@@ -2313,7 +1631,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
ier = I915_READ(VLV_IER);
I915_WRITE(VLV_IER, 0);
- gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
+ gen8_gt_irq_ack(&dev_priv->gt, master_ctl, gt_iir);
if (iir & I915_DISPLAY_PORT_INTERRUPT)
hotplug_status = i9xx_hpd_irq_ack(dev_priv);
@@ -2337,7 +1655,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
I915_WRITE(VLV_IER, ier);
I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
- gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
+ gen8_gt_irq_handler(&dev_priv->gt, master_ctl, gt_iir);
if (hotplug_status)
i9xx_hpd_irq_handler(dev_priv, hotplug_status);
@@ -2384,7 +1702,7 @@ static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
{
- int pipe;
+ enum pipe pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
@@ -2470,7 +1788,7 @@ static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
{
- int pipe;
+ enum pipe pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
@@ -2504,12 +1822,36 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
cpt_serr_int_handler(dev_priv);
}
-static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir,
- const u32 *pins)
+static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
{
- u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
- u32 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP;
+ u32 ddi_hotplug_trigger, tc_hotplug_trigger;
u32 pin_mask = 0, long_mask = 0;
+ bool (*tc_port_hotplug_long_detect)(enum hpd_pin pin, u32 val);
+ const u32 *pins;
+
+ if (HAS_PCH_TGP(dev_priv)) {
+ ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP;
+ tc_hotplug_trigger = pch_iir & SDE_TC_MASK_TGP;
+ tc_port_hotplug_long_detect = tgp_tc_port_hotplug_long_detect;
+ pins = hpd_tgp;
+ } else if (HAS_PCH_JSP(dev_priv)) {
+ ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP;
+ tc_hotplug_trigger = 0;
+ pins = hpd_tgp;
+ } else if (HAS_PCH_MCC(dev_priv)) {
+ ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
+ tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_ICP(PORT_TC1);
+ tc_port_hotplug_long_detect = icp_tc_port_hotplug_long_detect;
+ pins = hpd_icp;
+ } else {
+ WARN(!HAS_PCH_ICP(dev_priv),
+ "Unrecognized PCH type 0x%x\n", INTEL_PCH_TYPE(dev_priv));
+
+ ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
+ tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP;
+ tc_port_hotplug_long_detect = icp_tc_port_hotplug_long_detect;
+ pins = hpd_icp;
+ }
if (ddi_hotplug_trigger) {
u32 dig_hotplug_reg;
@@ -2532,7 +1874,7 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir,
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
tc_hotplug_trigger,
dig_hotplug_reg, pins,
- icp_tc_port_hotplug_long_detect);
+ tc_port_hotplug_long_detect);
}
if (pin_mask)
@@ -2637,7 +1979,7 @@ static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
}
if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT)
- ironlake_rps_change_irq_handler(dev_priv);
+ gen5_rps_irq_handler(&dev_priv->gt.rps);
}
static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
@@ -2689,10 +2031,9 @@ static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
* 4 - Process the interrupt(s) that had bits set in the IIRs.
* 5 - Re-enable Master Interrupt Control.
*/
-static irqreturn_t ironlake_irq_handler(int irq, void *arg)
+static irqreturn_t ilk_irq_handler(int irq, void *arg)
{
- struct drm_device *dev = arg;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = arg;
u32 de_iir, gt_iir, de_ier, sde_ier = 0;
irqreturn_t ret = IRQ_NONE;
@@ -2723,9 +2064,9 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
I915_WRITE(GTIIR, gt_iir);
ret = IRQ_HANDLED;
if (INTEL_GEN(dev_priv) >= 6)
- snb_gt_irq_handler(dev_priv, gt_iir);
+ gen6_gt_irq_handler(&dev_priv->gt, gt_iir);
else
- ilk_gt_irq_handler(dev_priv, gt_iir);
+ gen5_gt_irq_handler(&dev_priv->gt, gt_iir);
}
de_iir = I915_READ(DEIIR);
@@ -2743,7 +2084,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
if (pm_iir) {
I915_WRITE(GEN6_PMIIR, pm_iir);
ret = IRQ_HANDLED;
- gen6_rps_irq_handler(dev_priv, pm_iir);
+ gen6_rps_irq_handler(&dev_priv->gt.rps, pm_iir);
}
}
@@ -2778,6 +2119,16 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
u32 pin_mask = 0, long_mask = 0;
u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
+ long_pulse_detect_func long_pulse_detect;
+ const u32 *hpd;
+
+ if (INTEL_GEN(dev_priv) >= 12) {
+ long_pulse_detect = gen12_port_hotplug_long_detect;
+ hpd = hpd_gen12;
+ } else {
+ long_pulse_detect = gen11_port_hotplug_long_detect;
+ hpd = hpd_gen11;
+ }
if (trigger_tc) {
u32 dig_hotplug_reg;
@@ -2786,8 +2137,7 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tc,
- dig_hotplug_reg, hpd_gen11,
- gen11_port_hotplug_long_detect);
+ dig_hotplug_reg, hpd, long_pulse_detect);
}
if (trigger_tbt) {
@@ -2797,8 +2147,7 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tbt,
- dig_hotplug_reg, hpd_gen11,
- gen11_port_hotplug_long_detect);
+ dig_hotplug_reg, hpd, long_pulse_detect);
}
if (pin_mask)
@@ -2809,23 +2158,77 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
{
- u32 mask = GEN8_AUX_CHANNEL_A;
+ u32 mask;
+
+ if (INTEL_GEN(dev_priv) >= 12)
+ return TGL_DE_PORT_AUX_DDIA |
+ TGL_DE_PORT_AUX_DDIB |
+ TGL_DE_PORT_AUX_DDIC |
+ TGL_DE_PORT_AUX_USBC1 |
+ TGL_DE_PORT_AUX_USBC2 |
+ TGL_DE_PORT_AUX_USBC3 |
+ TGL_DE_PORT_AUX_USBC4 |
+ TGL_DE_PORT_AUX_USBC5 |
+ TGL_DE_PORT_AUX_USBC6;
+
+ mask = GEN8_AUX_CHANNEL_A;
if (INTEL_GEN(dev_priv) >= 9)
mask |= GEN9_AUX_CHANNEL_B |
GEN9_AUX_CHANNEL_C |
GEN9_AUX_CHANNEL_D;
- if (IS_CNL_WITH_PORT_F(dev_priv))
+ if (IS_CNL_WITH_PORT_F(dev_priv) || IS_GEN(dev_priv, 11))
mask |= CNL_AUX_CHANNEL_F;
- if (INTEL_GEN(dev_priv) >= 11)
- mask |= ICL_AUX_CHANNEL_E |
- CNL_AUX_CHANNEL_F;
+ if (IS_GEN(dev_priv, 11))
+ mask |= ICL_AUX_CHANNEL_E;
return mask;
}
+static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
+{
+ if (INTEL_GEN(dev_priv) >= 11)
+ return GEN11_DE_PIPE_IRQ_FAULT_ERRORS;
+ else if (INTEL_GEN(dev_priv) >= 9)
+ return GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
+ else
+ return GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
+}
+
+static void
+gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
+{
+ bool found = false;
+
+ if (iir & GEN8_DE_MISC_GSE) {
+ intel_opregion_asle_intr(dev_priv);
+ found = true;
+ }
+
+ if (iir & GEN8_DE_EDP_PSR) {
+ u32 psr_iir;
+ i915_reg_t iir_reg;
+
+ if (INTEL_GEN(dev_priv) >= 12)
+ iir_reg = TRANS_PSR_IIR(dev_priv->psr.transcoder);
+ else
+ iir_reg = EDP_PSR_IIR;
+
+ psr_iir = I915_READ(iir_reg);
+ I915_WRITE(iir_reg, psr_iir);
+
+ if (psr_iir)
+ found = true;
+
+ intel_psr_irq_handler(dev_priv, psr_iir);
+ }
+
+ if (!found)
+ DRM_ERROR("Unexpected DE Misc interrupt\n");
+}
+
static irqreturn_t
gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
{
@@ -2836,29 +2239,12 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
if (master_ctl & GEN8_DE_MISC_IRQ) {
iir = I915_READ(GEN8_DE_MISC_IIR);
if (iir) {
- bool found = false;
-
I915_WRITE(GEN8_DE_MISC_IIR, iir);
ret = IRQ_HANDLED;
-
- if (iir & GEN8_DE_MISC_GSE) {
- intel_opregion_asle_intr(dev_priv);
- found = true;
- }
-
- if (iir & GEN8_DE_EDP_PSR) {
- u32 psr_iir = I915_READ(EDP_PSR_IIR);
-
- intel_psr_irq_handler(dev_priv, psr_iir);
- I915_WRITE(EDP_PSR_IIR, psr_iir);
- found = true;
- }
-
- if (!found)
- DRM_ERROR("Unexpected DE Misc interrupt\n");
- }
- else
+ gen8_de_misc_irq_handler(dev_priv, iir);
+ } else {
DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
+ }
}
if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
@@ -2938,12 +2324,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
if (iir & GEN8_PIPE_FIFO_UNDERRUN)
intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
- fault_errors = iir;
- if (INTEL_GEN(dev_priv) >= 9)
- fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
- else
- fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
-
+ fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv);
if (fault_errors)
DRM_ERROR("Fault errors on pipe %c: 0x%08x\n",
pipe_name(pipe),
@@ -2962,10 +2343,8 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
I915_WRITE(SDEIIR, iir);
ret = IRQ_HANDLED;
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_MCC)
- icp_irq_handler(dev_priv, iir, hpd_mcc);
- else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
- icp_irq_handler(dev_priv, iir, hpd_icp);
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
+ icp_irq_handler(dev_priv, iir);
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
spt_irq_handler(dev_priv, iir);
else
@@ -3002,7 +2381,7 @@ static inline void gen8_master_intr_enable(void __iomem * const regs)
static irqreturn_t gen8_irq_handler(int irq, void *arg)
{
- struct drm_i915_private *dev_priv = to_i915(arg);
+ struct drm_i915_private *dev_priv = arg;
void __iomem * const regs = dev_priv->uncore.regs;
u32 master_ctl;
u32 gt_iir[4];
@@ -3017,7 +2396,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
}
/* Find, clear, then process each source of interrupt */
- gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
+ gen8_gt_irq_ack(&dev_priv->gt, master_ctl, gt_iir);
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
if (master_ctl & ~GEN8_GT_IRQS) {
@@ -3028,140 +2407,15 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
gen8_master_intr_enable(regs);
- gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
+ gen8_gt_irq_handler(&dev_priv->gt, master_ctl, gt_iir);
return IRQ_HANDLED;
}
static u32
-gen11_gt_engine_identity(struct drm_i915_private * const i915,
- const unsigned int bank, const unsigned int bit)
-{
- void __iomem * const regs = i915->uncore.regs;
- u32 timeout_ts;
- u32 ident;
-
- lockdep_assert_held(&i915->irq_lock);
-
- raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit));
-
- /*
- * NB: Specs do not specify how long to spin wait,
- * so we do ~100us as an educated guess.
- */
- timeout_ts = (local_clock() >> 10) + 100;
- do {
- ident = raw_reg_read(regs, GEN11_INTR_IDENTITY_REG(bank));
- } while (!(ident & GEN11_INTR_DATA_VALID) &&
- !time_after32(local_clock() >> 10, timeout_ts));
-
- if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) {
- DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n",
- bank, bit, ident);
- return 0;
- }
-
- raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank),
- GEN11_INTR_DATA_VALID);
-
- return ident;
-}
-
-static void
-gen11_other_irq_handler(struct drm_i915_private * const i915,
- const u8 instance, const u16 iir)
-{
- if (instance == OTHER_GUC_INSTANCE)
- return gen11_guc_irq_handler(i915, iir);
-
- if (instance == OTHER_GTPM_INSTANCE)
- return gen11_rps_irq_handler(i915, iir);
-
- WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
- instance, iir);
-}
-
-static void
-gen11_engine_irq_handler(struct drm_i915_private * const i915,
- const u8 class, const u8 instance, const u16 iir)
-{
- struct intel_engine_cs *engine;
-
- if (instance <= MAX_ENGINE_INSTANCE)
- engine = i915->engine_class[class][instance];
- else
- engine = NULL;
-
- if (likely(engine))
- return gen8_cs_irq_handler(engine, iir);
-
- WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n",
- class, instance);
-}
-
-static void
-gen11_gt_identity_handler(struct drm_i915_private * const i915,
- const u32 identity)
+gen11_gu_misc_irq_ack(struct intel_gt *gt, const u32 master_ctl)
{
- const u8 class = GEN11_INTR_ENGINE_CLASS(identity);
- const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity);
- const u16 intr = GEN11_INTR_ENGINE_INTR(identity);
-
- if (unlikely(!intr))
- return;
-
- if (class <= COPY_ENGINE_CLASS)
- return gen11_engine_irq_handler(i915, class, instance, intr);
-
- if (class == OTHER_CLASS)
- return gen11_other_irq_handler(i915, instance, intr);
-
- WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n",
- class, instance, intr);
-}
-
-static void
-gen11_gt_bank_handler(struct drm_i915_private * const i915,
- const unsigned int bank)
-{
- void __iomem * const regs = i915->uncore.regs;
- unsigned long intr_dw;
- unsigned int bit;
-
- lockdep_assert_held(&i915->irq_lock);
-
- intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
-
- for_each_set_bit(bit, &intr_dw, 32) {
- const u32 ident = gen11_gt_engine_identity(i915, bank, bit);
-
- gen11_gt_identity_handler(i915, ident);
- }
-
- /* Clear must be after shared has been served for engine */
- raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw);
-}
-
-static void
-gen11_gt_irq_handler(struct drm_i915_private * const i915,
- const u32 master_ctl)
-{
- unsigned int bank;
-
- spin_lock(&i915->irq_lock);
-
- for (bank = 0; bank < 2; bank++) {
- if (master_ctl & GEN11_GT_DW_IRQ(bank))
- gen11_gt_bank_handler(i915, bank);
- }
-
- spin_unlock(&i915->irq_lock);
-}
-
-static u32
-gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl)
-{
- void __iomem * const regs = dev_priv->uncore.regs;
+ void __iomem * const regs = gt->uncore->regs;
u32 iir;
if (!(master_ctl & GEN11_GU_MISC_IRQ))
@@ -3175,10 +2429,10 @@ gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl)
}
static void
-gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv, const u32 iir)
+gen11_gu_misc_irq_handler(struct intel_gt *gt, const u32 iir)
{
if (iir & GEN11_GU_MISC_GSE)
- intel_opregion_asle_intr(dev_priv);
+ intel_opregion_asle_intr(gt->i915);
}
static inline u32 gen11_master_intr_disable(void __iomem * const regs)
@@ -3199,53 +2453,74 @@ static inline void gen11_master_intr_enable(void __iomem * const regs)
raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
}
-static irqreturn_t gen11_irq_handler(int irq, void *arg)
+static void
+gen11_display_irq_handler(struct drm_i915_private *i915)
+{
+ void __iomem * const regs = i915->uncore.regs;
+ const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);
+
+ disable_rpm_wakeref_asserts(&i915->runtime_pm);
+ /*
+ * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
+ * for the display related bits.
+ */
+ raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 0x0);
+ gen8_de_irq_handler(i915, disp_ctl);
+ raw_reg_write(regs, GEN11_DISPLAY_INT_CTL,
+ GEN11_DISPLAY_IRQ_ENABLE);
+
+ enable_rpm_wakeref_asserts(&i915->runtime_pm);
+}
+
+static __always_inline irqreturn_t
+__gen11_irq_handler(struct drm_i915_private * const i915,
+ u32 (*intr_disable)(void __iomem * const regs),
+ void (*intr_enable)(void __iomem * const regs))
{
- struct drm_i915_private * const i915 = to_i915(arg);
void __iomem * const regs = i915->uncore.regs;
+ struct intel_gt *gt = &i915->gt;
u32 master_ctl;
u32 gu_misc_iir;
if (!intel_irqs_enabled(i915))
return IRQ_NONE;
- master_ctl = gen11_master_intr_disable(regs);
+ master_ctl = intr_disable(regs);
if (!master_ctl) {
- gen11_master_intr_enable(regs);
+ intr_enable(regs);
return IRQ_NONE;
}
/* Find, clear, then process each source of interrupt. */
- gen11_gt_irq_handler(i915, master_ctl);
+ gen11_gt_irq_handler(gt, master_ctl);
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
- if (master_ctl & GEN11_DISPLAY_IRQ) {
- const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);
-
- disable_rpm_wakeref_asserts(&i915->runtime_pm);
- /*
- * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
- * for the display related bits.
- */
- gen8_de_irq_handler(i915, disp_ctl);
- enable_rpm_wakeref_asserts(&i915->runtime_pm);
- }
+ if (master_ctl & GEN11_DISPLAY_IRQ)
+ gen11_display_irq_handler(i915);
- gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
+ gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl);
- gen11_master_intr_enable(regs);
+ intr_enable(regs);
- gen11_gu_misc_irq_handler(i915, gu_misc_iir);
+ gen11_gu_misc_irq_handler(gt, gu_misc_iir);
return IRQ_HANDLED;
}
+static irqreturn_t gen11_irq_handler(int irq, void *arg)
+{
+ return __gen11_irq_handler(arg,
+ gen11_master_intr_disable,
+ gen11_master_intr_enable);
+}
+
/* Called from drm generic code, passed 'crtc' which
* we use as a pipe index
*/
-static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe)
+int i8xx_enable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -3255,19 +2530,26 @@ static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe)
return 0;
}
-static int i945gm_enable_vblank(struct drm_device *dev, unsigned int pipe)
+int i915gm_enable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- if (dev_priv->i945gm_vblank.enabled++ == 0)
- schedule_work(&dev_priv->i945gm_vblank.work);
+ /*
+ * Vblank interrupts fail to wake the device up from C2+.
+ * Disabling render clock gating during C-states avoids
+ * the problem. There is a small power cost so we do this
+ * only when vblank interrupts are actually enabled.
+ */
+ if (dev_priv->vblank_enabled++ == 0)
+ I915_WRITE(SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
- return i8xx_enable_vblank(dev, pipe);
+ return i8xx_enable_vblank(crtc);
}
-static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe)
+int i965_enable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -3278,9 +2560,10 @@ static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe)
return 0;
}
-static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
+int ilk_enable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
unsigned long irqflags;
u32 bit = INTEL_GEN(dev_priv) >= 7 ?
DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
@@ -3293,14 +2576,15 @@ static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
* PSR is active as no frames are generated.
*/
if (HAS_PSR(dev_priv))
- drm_vblank_restore(dev, pipe);
+ drm_crtc_vblank_restore(crtc);
return 0;
}
-static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
+int bdw_enable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -3311,7 +2595,7 @@ static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
* PSR is active as no frames are generated, so check only for PSR.
*/
if (HAS_PSR(dev_priv))
- drm_vblank_restore(dev, pipe);
+ drm_crtc_vblank_restore(crtc);
return 0;
}
@@ -3319,9 +2603,10 @@ static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
/* Called from drm generic code, passed 'crtc' which
* we use as a pipe index
*/
-static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe)
+void i8xx_disable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -3329,19 +2614,20 @@ static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe)
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
-static void i945gm_disable_vblank(struct drm_device *dev, unsigned int pipe)
+void i915gm_disable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- i8xx_disable_vblank(dev, pipe);
+ i8xx_disable_vblank(crtc);
- if (--dev_priv->i945gm_vblank.enabled == 0)
- schedule_work(&dev_priv->i945gm_vblank.work);
+ if (--dev_priv->vblank_enabled == 0)
+ I915_WRITE(SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
}
-static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe)
+void i965_disable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -3350,9 +2636,10 @@ static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe)
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
-static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
+void ilk_disable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
unsigned long irqflags;
u32 bit = INTEL_GEN(dev_priv) >= 7 ?
DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
@@ -3362,9 +2649,10 @@ static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
-static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
+void bdw_disable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -3372,60 +2660,6 @@ static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
-static void i945gm_vblank_work_func(struct work_struct *work)
-{
- struct drm_i915_private *dev_priv =
- container_of(work, struct drm_i915_private, i945gm_vblank.work);
-
- /*
- * Vblank interrupts fail to wake up the device from C3,
- * hence we want to prevent C3 usage while vblank interrupts
- * are enabled.
- */
- pm_qos_update_request(&dev_priv->i945gm_vblank.pm_qos,
- READ_ONCE(dev_priv->i945gm_vblank.enabled) ?
- dev_priv->i945gm_vblank.c3_disable_latency :
- PM_QOS_DEFAULT_VALUE);
-}
-
-static int cstate_disable_latency(const char *name)
-{
- const struct cpuidle_driver *drv;
- int i;
-
- drv = cpuidle_get_driver();
- if (!drv)
- return 0;
-
- for (i = 0; i < drv->state_count; i++) {
- const struct cpuidle_state *state = &drv->states[i];
-
- if (!strcmp(state->name, name))
- return state->exit_latency ?
- state->exit_latency - 1 : 0;
- }
-
- return 0;
-}
-
-static void i945gm_vblank_work_init(struct drm_i915_private *dev_priv)
-{
- INIT_WORK(&dev_priv->i945gm_vblank.work,
- i945gm_vblank_work_func);
-
- dev_priv->i945gm_vblank.c3_disable_latency =
- cstate_disable_latency("C3");
- pm_qos_add_request(&dev_priv->i945gm_vblank.pm_qos,
- PM_QOS_CPU_DMA_LATENCY,
- PM_QOS_DEFAULT_VALUE);
-}
-
-static void i945gm_vblank_work_fini(struct drm_i915_private *dev_priv)
-{
- cancel_work_sync(&dev_priv->i945gm_vblank.work);
- pm_qos_remove_request(&dev_priv->i945gm_vblank.pm_qos);
-}
-
static void ibx_irq_reset(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
@@ -3447,10 +2681,8 @@ static void ibx_irq_reset(struct drm_i915_private *dev_priv)
*
* This function needs to be called before interrupts are enabled.
*/
-static void ibx_irq_pre_postinstall(struct drm_device *dev)
+static void ibx_irq_pre_postinstall(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
if (HAS_PCH_NOP(dev_priv))
return;
@@ -3459,26 +2691,17 @@ static void ibx_irq_pre_postinstall(struct drm_device *dev)
POSTING_READ(SDEIER);
}
-static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv)
-{
- struct intel_uncore *uncore = &dev_priv->uncore;
-
- GEN3_IRQ_RESET(uncore, GT);
- if (INTEL_GEN(dev_priv) >= 6)
- GEN3_IRQ_RESET(uncore, GEN6_PM);
-}
-
static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
if (IS_CHERRYVIEW(dev_priv))
- I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
+ intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
else
- I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
+ intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK);
i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
- I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+ intel_uncore_write(uncore, PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
i9xx_pipestat_irq_reset(dev_priv);
@@ -3519,33 +2742,30 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
/* drm_dma.h hooks
*/
-static void ironlake_irq_reset(struct drm_device *dev)
+static void ilk_irq_reset(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_uncore *uncore = &dev_priv->uncore;
GEN3_IRQ_RESET(uncore, DE);
if (IS_GEN(dev_priv, 7))
- I915_WRITE(GEN7_ERR_INT, 0xffffffff);
+ intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff);
if (IS_HASWELL(dev_priv)) {
- I915_WRITE(EDP_PSR_IMR, 0xffffffff);
- I915_WRITE(EDP_PSR_IIR, 0xffffffff);
+ intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
+ intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
}
- gen5_gt_irq_reset(dev_priv);
+ gen5_gt_irq_reset(&dev_priv->gt);
ibx_irq_reset(dev_priv);
}
-static void valleyview_irq_reset(struct drm_device *dev)
+static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
I915_WRITE(VLV_MASTER_IER, 0);
POSTING_READ(VLV_MASTER_IER);
- gen5_gt_irq_reset(dev_priv);
+ gen5_gt_irq_reset(&dev_priv->gt);
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display_irqs_enabled)
@@ -3553,28 +2773,17 @@ static void valleyview_irq_reset(struct drm_device *dev)
spin_unlock_irq(&dev_priv->irq_lock);
}
-static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
+static void gen8_irq_reset(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
-
- GEN8_IRQ_RESET_NDX(uncore, GT, 0);
- GEN8_IRQ_RESET_NDX(uncore, GT, 1);
- GEN8_IRQ_RESET_NDX(uncore, GT, 2);
- GEN8_IRQ_RESET_NDX(uncore, GT, 3);
-}
-
-static void gen8_irq_reset(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_uncore *uncore = &dev_priv->uncore;
- int pipe;
+ enum pipe pipe;
gen8_master_intr_disable(dev_priv->uncore.regs);
- gen8_gt_irq_reset(dev_priv);
+ gen8_gt_irq_reset(&dev_priv->gt);
- I915_WRITE(EDP_PSR_IMR, 0xffffffff);
- I915_WRITE(EDP_PSR_IIR, 0xffffffff);
+ intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
+ intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
for_each_pipe(dev_priv, pipe)
if (intel_display_power_is_enabled(dev_priv,
@@ -3589,39 +2798,30 @@ static void gen8_irq_reset(struct drm_device *dev)
ibx_irq_reset(dev_priv);
}
-static void gen11_gt_irq_reset(struct drm_i915_private *dev_priv)
+static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
{
- /* Disable RCS, BCS, VCS and VECS class engines. */
- I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, 0);
- I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE, 0);
-
- /* Restore masks irqs on RCS, BCS, VCS and VECS engines. */
- I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK, ~0);
- I915_WRITE(GEN11_BCS_RSVD_INTR_MASK, ~0);
- I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK, ~0);
- I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK, ~0);
- I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~0);
-
- I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
- I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK, ~0);
- I915_WRITE(GEN11_GUC_SG_INTR_ENABLE, 0);
- I915_WRITE(GEN11_GUC_SG_INTR_MASK, ~0);
-}
-
-static void gen11_irq_reset(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_uncore *uncore = &dev_priv->uncore;
- int pipe;
+ enum pipe pipe;
- gen11_master_intr_disable(dev_priv->uncore.regs);
+ intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0);
+
+ if (INTEL_GEN(dev_priv) >= 12) {
+ enum transcoder trans;
- gen11_gt_irq_reset(dev_priv);
+ for (trans = TRANSCODER_A; trans <= TRANSCODER_D; trans++) {
+ enum intel_display_power_domain domain;
- I915_WRITE(GEN11_DISPLAY_INT_CTL, 0);
+ domain = POWER_DOMAIN_TRANSCODER(trans);
+ if (!intel_display_power_is_enabled(dev_priv, domain))
+ continue;
- I915_WRITE(EDP_PSR_IMR, 0xffffffff);
- I915_WRITE(EDP_PSR_IIR, 0xffffffff);
+ intel_uncore_write(uncore, TRANS_PSR_IMR(trans), 0xffffffff);
+ intel_uncore_write(uncore, TRANS_PSR_IIR(trans), 0xffffffff);
+ }
+ } else {
+ intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
+ intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
+ }
for_each_pipe(dev_priv, pipe)
if (intel_display_power_is_enabled(dev_priv,
@@ -3631,13 +2831,24 @@ static void gen11_irq_reset(struct drm_device *dev)
GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_);
- GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
- GEN3_IRQ_RESET(uncore, GEN8_PCU_);
if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
GEN3_IRQ_RESET(uncore, SDE);
}
+static void gen11_irq_reset(struct drm_i915_private *dev_priv)
+{
+ struct intel_uncore *uncore = &dev_priv->uncore;
+
+ gen11_master_intr_disable(dev_priv->uncore.regs);
+
+ gen11_gt_irq_reset(&dev_priv->gt);
+ gen11_display_irq_reset(dev_priv);
+
+ GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
+ GEN3_IRQ_RESET(uncore, GEN8_PCU_);
+}
+
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
u8 pipe_mask)
{
@@ -3680,18 +2891,17 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
spin_unlock_irq(&dev_priv->irq_lock);
/* make sure we're done processing display irqs */
- synchronize_irq(dev_priv->drm.irq);
+ intel_synchronize_irq(dev_priv);
}
-static void cherryview_irq_reset(struct drm_device *dev)
+static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_uncore *uncore = &dev_priv->uncore;
I915_WRITE(GEN8_MASTER_IRQ, 0);
POSTING_READ(GEN8_MASTER_IRQ);
- gen8_gt_irq_reset(dev_priv);
+ gen8_gt_irq_reset(&dev_priv->gt);
GEN3_IRQ_RESET(uncore, GEN8_PCU_);
@@ -3756,33 +2966,63 @@ static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
ibx_hpd_detection_setup(dev_priv);
}
-static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv)
+static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv,
+ u32 ddi_hotplug_enable_mask,
+ u32 tc_hotplug_enable_mask)
{
u32 hotplug;
hotplug = I915_READ(SHOTPLUG_CTL_DDI);
- hotplug |= ICP_DDIA_HPD_ENABLE |
- ICP_DDIB_HPD_ENABLE;
+ hotplug |= ddi_hotplug_enable_mask;
I915_WRITE(SHOTPLUG_CTL_DDI, hotplug);
- hotplug = I915_READ(SHOTPLUG_CTL_TC);
- hotplug |= ICP_TC_HPD_ENABLE(PORT_TC1) |
- ICP_TC_HPD_ENABLE(PORT_TC2) |
- ICP_TC_HPD_ENABLE(PORT_TC3) |
- ICP_TC_HPD_ENABLE(PORT_TC4);
- I915_WRITE(SHOTPLUG_CTL_TC, hotplug);
+ if (tc_hotplug_enable_mask) {
+ hotplug = I915_READ(SHOTPLUG_CTL_TC);
+ hotplug |= tc_hotplug_enable_mask;
+ I915_WRITE(SHOTPLUG_CTL_TC, hotplug);
+ }
}
-static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
+static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv,
+ u32 sde_ddi_mask, u32 sde_tc_mask,
+ u32 ddi_enable_mask, u32 tc_enable_mask,
+ const u32 *pins)
{
u32 hotplug_irqs, enabled_irqs;
- hotplug_irqs = SDE_DDI_MASK_ICP | SDE_TC_MASK_ICP;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_icp);
+ hotplug_irqs = sde_ddi_mask | sde_tc_mask;
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, pins);
+
+ I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
- icp_hpd_detection_setup(dev_priv);
+ icp_hpd_detection_setup(dev_priv, ddi_enable_mask, tc_enable_mask);
+}
+
+/*
+ * EHL doesn't need most of gen11_hpd_irq_setup, it's handling only the
+ * equivalent of SDE.
+ */
+static void mcc_hpd_irq_setup(struct drm_i915_private *dev_priv)
+{
+ icp_hpd_irq_setup(dev_priv,
+ SDE_DDI_MASK_ICP, SDE_TC_HOTPLUG_ICP(PORT_TC1),
+ ICP_DDI_HPD_ENABLE_MASK, ICP_TC_HPD_ENABLE(PORT_TC1),
+ hpd_icp);
+}
+
+/*
+ * JSP behaves exactly the same as MCC above except that port C is mapped to
+ * the DDI-C pins instead of the TC1 pins. This means we should follow TGP's
+ * masks & tables rather than ICP's masks & tables.
+ */
+static void jsp_hpd_irq_setup(struct drm_i915_private *dev_priv)
+{
+ icp_hpd_irq_setup(dev_priv,
+ SDE_DDI_MASK_TGP, 0,
+ TGP_DDI_HPD_ENABLE_MASK, 0,
+ hpd_tgp);
}
static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv)
@@ -3807,9 +3047,11 @@ static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv)
static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug_irqs, enabled_irqs;
+ const u32 *hpd;
u32 val;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_gen11);
+ hpd = INTEL_GEN(dev_priv) >= 12 ? hpd_gen12 : hpd_gen11;
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd);
hotplug_irqs = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK;
val = I915_READ(GEN11_DE_HPD_IMR);
@@ -3819,8 +3061,14 @@ static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
gen11_hpd_detection_setup(dev_priv);
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
- icp_hpd_irq_setup(dev_priv);
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP)
+ icp_hpd_irq_setup(dev_priv, SDE_DDI_MASK_TGP, SDE_TC_MASK_TGP,
+ TGP_DDI_HPD_ENABLE_MASK,
+ TGP_TC_HPD_ENABLE_MASK, hpd_tgp);
+ else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
+ icp_hpd_irq_setup(dev_priv, SDE_DDI_MASK_ICP, SDE_TC_MASK_ICP,
+ ICP_DDI_HPD_ENABLE_MASK,
+ ICP_TC_HPD_ENABLE_MASK, hpd_icp);
}
static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
@@ -3852,6 +3100,9 @@ static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug_irqs, enabled_irqs;
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
+ I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
+
hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
@@ -3950,9 +3201,8 @@ static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
__bxt_hpd_detection_setup(dev_priv, enabled_irqs);
}
-static void ibx_irq_postinstall(struct drm_device *dev)
+static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
u32 mask;
if (HAS_PCH_NOP(dev_priv))
@@ -3975,48 +3225,8 @@ static void ibx_irq_postinstall(struct drm_device *dev)
spt_hpd_detection_setup(dev_priv);
}
-static void gen5_gt_irq_postinstall(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_uncore *uncore = &dev_priv->uncore;
- u32 pm_irqs, gt_irqs;
-
- pm_irqs = gt_irqs = 0;
-
- dev_priv->gt_irq_mask = ~0;
- if (HAS_L3_DPF(dev_priv)) {
- /* L3 parity interrupt is always unmasked. */
- dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv);
- gt_irqs |= GT_PARITY_ERROR(dev_priv);
- }
-
- gt_irqs |= GT_RENDER_USER_INTERRUPT;
- if (IS_GEN(dev_priv, 5)) {
- gt_irqs |= ILK_BSD_USER_INTERRUPT;
- } else {
- gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
- }
-
- GEN3_IRQ_INIT(uncore, GT, dev_priv->gt_irq_mask, gt_irqs);
-
- if (INTEL_GEN(dev_priv) >= 6) {
- /*
- * RPS interrupts will get enabled/disabled on demand when RPS
- * itself is enabled/disabled.
- */
- if (HAS_ENGINE(dev_priv, VECS0)) {
- pm_irqs |= PM_VEBOX_USER_INTERRUPT;
- dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT;
- }
-
- dev_priv->pm_imr = 0xffffffff;
- GEN3_IRQ_INIT(uncore, GEN6_PM, dev_priv->pm_imr, pm_irqs);
- }
-}
-
-static int ironlake_irq_postinstall(struct drm_device *dev)
+static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_uncore *uncore = &dev_priv->uncore;
u32 display_mask, extra_mask;
@@ -4037,22 +3247,21 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
if (IS_HASWELL(dev_priv)) {
gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
- intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
display_mask |= DE_EDP_PSR_INT_HSW;
}
dev_priv->irq_mask = ~display_mask;
- ibx_irq_pre_postinstall(dev);
+ ibx_irq_pre_postinstall(dev_priv);
GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
display_mask | extra_mask);
- gen5_gt_irq_postinstall(dev);
+ gen5_gt_irq_postinstall(&dev_priv->gt);
ilk_hpd_detection_setup(dev_priv);
- ibx_irq_postinstall(dev);
+ ibx_irq_postinstall(dev_priv);
if (IS_IRONLAKE_M(dev_priv)) {
/* Enable PCU event interrupts
@@ -4064,8 +3273,6 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
spin_unlock_irq(&dev_priv->irq_lock);
}
-
- return 0;
}
void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
@@ -4097,11 +3304,9 @@ void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
}
-static int valleyview_irq_postinstall(struct drm_device *dev)
+static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- gen5_gt_irq_postinstall(dev);
+ gen5_gt_irq_postinstall(&dev_priv->gt);
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display_irqs_enabled)
@@ -4110,42 +3315,6 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
POSTING_READ(VLV_MASTER_IER);
-
- return 0;
-}
-
-static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
-{
- struct intel_uncore *uncore = &dev_priv->uncore;
-
- /* These are interrupts we'll toggle with the ring mask register */
- u32 gt_interrupts[] = {
- (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
- GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT),
-
- (GT_RENDER_USER_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
- GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT),
-
- 0,
-
- (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT)
- };
-
- dev_priv->pm_ier = 0x0;
- dev_priv->pm_imr = ~dev_priv->pm_ier;
- GEN8_IRQ_INIT_NDX(uncore, GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
- GEN8_IRQ_INIT_NDX(uncore, GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
- /*
- * RPS interrupts will get enabled/disabled on demand when RPS itself
- * is enabled/disabled. Same wil be the case for GuC interrupts.
- */
- GEN8_IRQ_INIT_NDX(uncore, GT, 2, dev_priv->pm_imr, dev_priv->pm_ier);
- GEN8_IRQ_INIT_NDX(uncore, GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
}
static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
@@ -4187,8 +3356,21 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
else if (IS_BROADWELL(dev_priv))
de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
- gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
- intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
+ if (INTEL_GEN(dev_priv) >= 12) {
+ enum transcoder trans;
+
+ for (trans = TRANSCODER_A; trans <= TRANSCODER_D; trans++) {
+ enum intel_display_power_domain domain;
+
+ domain = POWER_DOMAIN_TRANSCODER(trans);
+ if (!intel_display_power_is_enabled(dev_priv, domain))
+ continue;
+
+ gen3_assert_iir_is_zero(uncore, TRANS_PSR_IIR(trans));
+ }
+ } else {
+ gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
+ }
for_each_pipe(dev_priv, pipe) {
dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
@@ -4218,58 +3400,22 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
}
}
-static int gen8_irq_postinstall(struct drm_device *dev)
+static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
if (HAS_PCH_SPLIT(dev_priv))
- ibx_irq_pre_postinstall(dev);
+ ibx_irq_pre_postinstall(dev_priv);
- gen8_gt_irq_postinstall(dev_priv);
+ gen8_gt_irq_postinstall(&dev_priv->gt);
gen8_de_irq_postinstall(dev_priv);
if (HAS_PCH_SPLIT(dev_priv))
- ibx_irq_postinstall(dev);
+ ibx_irq_postinstall(dev_priv);
gen8_master_intr_enable(dev_priv->uncore.regs);
-
- return 0;
-}
-
-static void gen11_gt_irq_postinstall(struct drm_i915_private *dev_priv)
-{
- const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT;
-
- BUILD_BUG_ON(irqs & 0xffff0000);
-
- /* Enable RCS, BCS, VCS and VECS class interrupts. */
- I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, irqs << 16 | irqs);
- I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE, irqs << 16 | irqs);
-
- /* Unmask irqs on RCS, BCS, VCS and VECS engines. */
- I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK, ~(irqs << 16));
- I915_WRITE(GEN11_BCS_RSVD_INTR_MASK, ~(irqs << 16));
- I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK, ~(irqs | irqs << 16));
- I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK, ~(irqs | irqs << 16));
- I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~(irqs | irqs << 16));
-
- /*
- * RPS interrupts will get enabled/disabled on demand when RPS itself
- * is enabled/disabled.
- */
- dev_priv->pm_ier = 0x0;
- dev_priv->pm_imr = ~dev_priv->pm_ier;
- I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
- I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK, ~0);
-
- /* Same thing for GuC interrupts */
- I915_WRITE(GEN11_GUC_SG_INTR_ENABLE, 0);
- I915_WRITE(GEN11_GUC_SG_INTR_MASK, ~0);
}
-static void icp_irq_postinstall(struct drm_device *dev)
+static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
u32 mask = SDE_GMBUS_ICP;
WARN_ON(I915_READ(SDEIER) != 0);
@@ -4279,36 +3425,41 @@ static void icp_irq_postinstall(struct drm_device *dev)
gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
I915_WRITE(SDEIMR, ~mask);
- icp_hpd_detection_setup(dev_priv);
+ if (HAS_PCH_TGP(dev_priv))
+ icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK,
+ TGP_TC_HPD_ENABLE_MASK);
+ else if (HAS_PCH_JSP(dev_priv))
+ icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK, 0);
+ else if (HAS_PCH_MCC(dev_priv))
+ icp_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK,
+ ICP_TC_HPD_ENABLE(PORT_TC1));
+ else
+ icp_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK,
+ ICP_TC_HPD_ENABLE_MASK);
}
-static int gen11_irq_postinstall(struct drm_device *dev)
+static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_uncore *uncore = &dev_priv->uncore;
u32 gu_misc_masked = GEN11_GU_MISC_GSE;
if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
- icp_irq_postinstall(dev);
+ icp_irq_postinstall(dev_priv);
- gen11_gt_irq_postinstall(dev_priv);
+ gen11_gt_irq_postinstall(&dev_priv->gt);
gen8_de_irq_postinstall(dev_priv);
GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
- gen11_master_intr_enable(dev_priv->uncore.regs);
+ gen11_master_intr_enable(uncore->regs);
POSTING_READ(GEN11_GFX_MSTR_IRQ);
-
- return 0;
}
-static int cherryview_irq_postinstall(struct drm_device *dev)
+static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- gen8_gt_irq_postinstall(dev_priv);
+ gen8_gt_irq_postinstall(&dev_priv->gt);
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display_irqs_enabled)
@@ -4317,13 +3468,10 @@ static int cherryview_irq_postinstall(struct drm_device *dev)
I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
POSTING_READ(GEN8_MASTER_IRQ);
-
- return 0;
}
-static void i8xx_irq_reset(struct drm_device *dev)
+static void i8xx_irq_reset(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_uncore *uncore = &dev_priv->uncore;
i9xx_pipestat_irq_reset(dev_priv);
@@ -4331,9 +3479,8 @@ static void i8xx_irq_reset(struct drm_device *dev)
GEN2_IRQ_RESET(uncore);
}
-static int i8xx_irq_postinstall(struct drm_device *dev)
+static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_uncore *uncore = &dev_priv->uncore;
u16 enable_mask;
@@ -4362,8 +3509,6 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
spin_unlock_irq(&dev_priv->irq_lock);
-
- return 0;
}
static void i8xx_error_irq_ack(struct drm_i915_private *i915,
@@ -4444,8 +3589,7 @@ static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
{
- struct drm_device *dev = arg;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = arg;
irqreturn_t ret = IRQ_NONE;
if (!intel_irqs_enabled(dev_priv))
@@ -4475,7 +3619,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir);
if (iir & I915_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
+ intel_engine_signal_breadcrumbs(dev_priv->engine[RCS0]);
if (iir & I915_MASTER_ERROR_INTERRUPT)
i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
@@ -4488,9 +3632,8 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
return ret;
}
-static void i915_irq_reset(struct drm_device *dev)
+static void i915_irq_reset(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_uncore *uncore = &dev_priv->uncore;
if (I915_HAS_HOTPLUG(dev_priv)) {
@@ -4503,9 +3646,8 @@ static void i915_irq_reset(struct drm_device *dev)
GEN3_IRQ_RESET(uncore, GEN2_);
}
-static int i915_irq_postinstall(struct drm_device *dev)
+static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_uncore *uncore = &dev_priv->uncore;
u32 enable_mask;
@@ -4543,14 +3685,11 @@ static int i915_irq_postinstall(struct drm_device *dev)
spin_unlock_irq(&dev_priv->irq_lock);
i915_enable_asle_pipestat(dev_priv);
-
- return 0;
}
static irqreturn_t i915_irq_handler(int irq, void *arg)
{
- struct drm_device *dev = arg;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = arg;
irqreturn_t ret = IRQ_NONE;
if (!intel_irqs_enabled(dev_priv))
@@ -4585,7 +3724,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
I915_WRITE(GEN2_IIR, iir);
if (iir & I915_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
+ intel_engine_signal_breadcrumbs(dev_priv->engine[RCS0]);
if (iir & I915_MASTER_ERROR_INTERRUPT)
i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
@@ -4601,9 +3740,8 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
return ret;
}
-static void i965_irq_reset(struct drm_device *dev)
+static void i965_irq_reset(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_uncore *uncore = &dev_priv->uncore;
i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
@@ -4614,9 +3752,8 @@ static void i965_irq_reset(struct drm_device *dev)
GEN3_IRQ_RESET(uncore, GEN2_);
}
-static int i965_irq_postinstall(struct drm_device *dev)
+static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_uncore *uncore = &dev_priv->uncore;
u32 enable_mask;
u32 error_mask;
@@ -4666,8 +3803,6 @@ static int i965_irq_postinstall(struct drm_device *dev)
spin_unlock_irq(&dev_priv->irq_lock);
i915_enable_asle_pipestat(dev_priv);
-
- return 0;
}
static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
@@ -4697,8 +3832,7 @@ static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
static irqreturn_t i965_irq_handler(int irq, void *arg)
{
- struct drm_device *dev = arg;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = arg;
irqreturn_t ret = IRQ_NONE;
if (!intel_irqs_enabled(dev_priv))
@@ -4732,10 +3866,10 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
I915_WRITE(GEN2_IIR, iir);
if (iir & I915_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
+ intel_engine_signal_breadcrumbs(dev_priv->engine[RCS0]);
if (iir & I915_BSD_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
+ intel_engine_signal_breadcrumbs(dev_priv->engine[VCS0]);
if (iir & I915_MASTER_ERROR_INTERRUPT)
i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
@@ -4761,54 +3895,17 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
void intel_irq_init(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = &dev_priv->drm;
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
int i;
- if (IS_I945GM(dev_priv))
- i945gm_vblank_work_init(dev_priv);
-
intel_hpd_init_work(dev_priv);
- INIT_WORK(&rps->work, gen6_pm_rps_work);
-
- INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
+ INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work);
for (i = 0; i < MAX_L3_SLICES; ++i)
dev_priv->l3_parity.remap_info[i] = NULL;
- if (HAS_GUC_SCHED(dev_priv) && INTEL_GEN(dev_priv) < 11)
- dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT;
-
- /* Let's track the enabled rps events */
- if (IS_VALLEYVIEW(dev_priv))
- /* WaGsvRC0ResidencyMethod:vlv */
- dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
- else
- dev_priv->pm_rps_events = (GEN6_PM_RP_UP_THRESHOLD |
- GEN6_PM_RP_DOWN_THRESHOLD |
- GEN6_PM_RP_DOWN_TIMEOUT);
-
- /* We share the register with other engine */
- if (INTEL_GEN(dev_priv) > 9)
- GEM_WARN_ON(dev_priv->pm_rps_events & 0xffff0000);
-
- rps->pm_intrmsk_mbz = 0;
-
- /*
- * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer
- * if GEN6_PM_UP_EI_EXPIRED is masked.
- *
- * TODO: verify if this can be reproduced on VLV,CHV.
- */
- if (INTEL_GEN(dev_priv) <= 7)
- rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
-
- if (INTEL_GEN(dev_priv) >= 8)
- rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
-
- if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
- dev->driver->get_vblank_counter = g4x_get_vblank_counter;
- else if (INTEL_GEN(dev_priv) >= 3)
- dev->driver->get_vblank_counter = i915_get_vblank_counter;
+ /* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */
+ if (HAS_GT_UC(dev_priv) && INTEL_GEN(dev_priv) < 11)
+ dev_priv->gt.pm_guc_events = GUC_INTR_GUC2HOST << 16;
dev->vblank_disable_immediate = true;
@@ -4831,86 +3928,22 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
*/
dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
- dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos;
- dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
-
- if (IS_CHERRYVIEW(dev_priv)) {
- dev->driver->irq_handler = cherryview_irq_handler;
- dev->driver->irq_preinstall = cherryview_irq_reset;
- dev->driver->irq_postinstall = cherryview_irq_postinstall;
- dev->driver->irq_uninstall = cherryview_irq_reset;
- dev->driver->enable_vblank = i965_enable_vblank;
- dev->driver->disable_vblank = i965_disable_vblank;
- dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
- } else if (IS_VALLEYVIEW(dev_priv)) {
- dev->driver->irq_handler = valleyview_irq_handler;
- dev->driver->irq_preinstall = valleyview_irq_reset;
- dev->driver->irq_postinstall = valleyview_irq_postinstall;
- dev->driver->irq_uninstall = valleyview_irq_reset;
- dev->driver->enable_vblank = i965_enable_vblank;
- dev->driver->disable_vblank = i965_disable_vblank;
- dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
- } else if (INTEL_GEN(dev_priv) >= 11) {
- dev->driver->irq_handler = gen11_irq_handler;
- dev->driver->irq_preinstall = gen11_irq_reset;
- dev->driver->irq_postinstall = gen11_irq_postinstall;
- dev->driver->irq_uninstall = gen11_irq_reset;
- dev->driver->enable_vblank = gen8_enable_vblank;
- dev->driver->disable_vblank = gen8_disable_vblank;
- dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
- } else if (INTEL_GEN(dev_priv) >= 8) {
- dev->driver->irq_handler = gen8_irq_handler;
- dev->driver->irq_preinstall = gen8_irq_reset;
- dev->driver->irq_postinstall = gen8_irq_postinstall;
- dev->driver->irq_uninstall = gen8_irq_reset;
- dev->driver->enable_vblank = gen8_enable_vblank;
- dev->driver->disable_vblank = gen8_disable_vblank;
- if (IS_GEN9_LP(dev_priv))
+ if (HAS_GMCH(dev_priv)) {
+ if (I915_HAS_HOTPLUG(dev_priv))
+ dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
+ } else {
+ if (HAS_PCH_JSP(dev_priv))
+ dev_priv->display.hpd_irq_setup = jsp_hpd_irq_setup;
+ else if (HAS_PCH_MCC(dev_priv))
+ dev_priv->display.hpd_irq_setup = mcc_hpd_irq_setup;
+ else if (INTEL_GEN(dev_priv) >= 11)
+ dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
+ else if (IS_GEN9_LP(dev_priv))
dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
else
dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
- } else if (HAS_PCH_SPLIT(dev_priv)) {
- dev->driver->irq_handler = ironlake_irq_handler;
- dev->driver->irq_preinstall = ironlake_irq_reset;
- dev->driver->irq_postinstall = ironlake_irq_postinstall;
- dev->driver->irq_uninstall = ironlake_irq_reset;
- dev->driver->enable_vblank = ironlake_enable_vblank;
- dev->driver->disable_vblank = ironlake_disable_vblank;
- dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
- } else {
- if (IS_GEN(dev_priv, 2)) {
- dev->driver->irq_preinstall = i8xx_irq_reset;
- dev->driver->irq_postinstall = i8xx_irq_postinstall;
- dev->driver->irq_handler = i8xx_irq_handler;
- dev->driver->irq_uninstall = i8xx_irq_reset;
- dev->driver->enable_vblank = i8xx_enable_vblank;
- dev->driver->disable_vblank = i8xx_disable_vblank;
- } else if (IS_I945GM(dev_priv)) {
- dev->driver->irq_preinstall = i915_irq_reset;
- dev->driver->irq_postinstall = i915_irq_postinstall;
- dev->driver->irq_uninstall = i915_irq_reset;
- dev->driver->irq_handler = i915_irq_handler;
- dev->driver->enable_vblank = i945gm_enable_vblank;
- dev->driver->disable_vblank = i945gm_disable_vblank;
- } else if (IS_GEN(dev_priv, 3)) {
- dev->driver->irq_preinstall = i915_irq_reset;
- dev->driver->irq_postinstall = i915_irq_postinstall;
- dev->driver->irq_uninstall = i915_irq_reset;
- dev->driver->irq_handler = i915_irq_handler;
- dev->driver->enable_vblank = i8xx_enable_vblank;
- dev->driver->disable_vblank = i8xx_disable_vblank;
- } else {
- dev->driver->irq_preinstall = i965_irq_reset;
- dev->driver->irq_postinstall = i965_irq_postinstall;
- dev->driver->irq_uninstall = i965_irq_reset;
- dev->driver->irq_handler = i965_irq_handler;
- dev->driver->enable_vblank = i965_enable_vblank;
- dev->driver->disable_vblank = i965_disable_vblank;
- }
- if (I915_HAS_HOTPLUG(dev_priv))
- dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
}
}
@@ -4924,13 +3957,79 @@ void intel_irq_fini(struct drm_i915_private *i915)
{
int i;
- if (IS_I945GM(i915))
- i945gm_vblank_work_fini(i915);
-
for (i = 0; i < MAX_L3_SLICES; ++i)
kfree(i915->l3_parity.remap_info[i]);
}
+static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
+{
+ if (HAS_GMCH(dev_priv)) {
+ if (IS_CHERRYVIEW(dev_priv))
+ return cherryview_irq_handler;
+ else if (IS_VALLEYVIEW(dev_priv))
+ return valleyview_irq_handler;
+ else if (IS_GEN(dev_priv, 4))
+ return i965_irq_handler;
+ else if (IS_GEN(dev_priv, 3))
+ return i915_irq_handler;
+ else
+ return i8xx_irq_handler;
+ } else {
+ if (INTEL_GEN(dev_priv) >= 11)
+ return gen11_irq_handler;
+ else if (INTEL_GEN(dev_priv) >= 8)
+ return gen8_irq_handler;
+ else
+ return ilk_irq_handler;
+ }
+}
+
+static void intel_irq_reset(struct drm_i915_private *dev_priv)
+{
+ if (HAS_GMCH(dev_priv)) {
+ if (IS_CHERRYVIEW(dev_priv))
+ cherryview_irq_reset(dev_priv);
+ else if (IS_VALLEYVIEW(dev_priv))
+ valleyview_irq_reset(dev_priv);
+ else if (IS_GEN(dev_priv, 4))
+ i965_irq_reset(dev_priv);
+ else if (IS_GEN(dev_priv, 3))
+ i915_irq_reset(dev_priv);
+ else
+ i8xx_irq_reset(dev_priv);
+ } else {
+ if (INTEL_GEN(dev_priv) >= 11)
+ gen11_irq_reset(dev_priv);
+ else if (INTEL_GEN(dev_priv) >= 8)
+ gen8_irq_reset(dev_priv);
+ else
+ ilk_irq_reset(dev_priv);
+ }
+}
+
+static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
+{
+ if (HAS_GMCH(dev_priv)) {
+ if (IS_CHERRYVIEW(dev_priv))
+ cherryview_irq_postinstall(dev_priv);
+ else if (IS_VALLEYVIEW(dev_priv))
+ valleyview_irq_postinstall(dev_priv);
+ else if (IS_GEN(dev_priv, 4))
+ i965_irq_postinstall(dev_priv);
+ else if (IS_GEN(dev_priv, 3))
+ i915_irq_postinstall(dev_priv);
+ else
+ i8xx_irq_postinstall(dev_priv);
+ } else {
+ if (INTEL_GEN(dev_priv) >= 11)
+ gen11_irq_postinstall(dev_priv);
+ else if (INTEL_GEN(dev_priv) >= 8)
+ gen8_irq_postinstall(dev_priv);
+ else
+ ilk_irq_postinstall(dev_priv);
+ }
+}
+
/**
* intel_irq_install - enables the hardware interrupt
* @dev_priv: i915 device instance
@@ -4944,6 +4043,9 @@ void intel_irq_fini(struct drm_i915_private *i915)
*/
int intel_irq_install(struct drm_i915_private *dev_priv)
{
+ int irq = dev_priv->drm.pdev->irq;
+ int ret;
+
/*
* We enable some interrupt sources in our postinstall hooks, so mark
* interrupts as enabled _before_ actually enabling them to avoid
@@ -4951,7 +4053,20 @@ int intel_irq_install(struct drm_i915_private *dev_priv)
*/
dev_priv->runtime_pm.irqs_enabled = true;
- return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq);
+ dev_priv->drm.irq_enabled = true;
+
+ intel_irq_reset(dev_priv);
+
+ ret = request_irq(irq, intel_irq_handler(dev_priv),
+ IRQF_SHARED, DRIVER_NAME, dev_priv);
+ if (ret < 0) {
+ dev_priv->drm.irq_enabled = false;
+ return ret;
+ }
+
+ intel_irq_postinstall(dev_priv);
+
+ return ret;
}
/**
@@ -4963,7 +4078,23 @@ int intel_irq_install(struct drm_i915_private *dev_priv)
*/
void intel_irq_uninstall(struct drm_i915_private *dev_priv)
{
- drm_irq_uninstall(&dev_priv->drm);
+ int irq = dev_priv->drm.pdev->irq;
+
+ /*
+ * FIXME we can get called twice during driver probe
+ * error handling as well as during driver remove due to
+ * intel_modeset_driver_remove() calling us out of sequence.
+ * Would be nice if it didn't do that...
+ */
+ if (!dev_priv->drm.irq_enabled)
+ return;
+
+ dev_priv->drm.irq_enabled = false;
+
+ intel_irq_reset(dev_priv);
+
+ free_irq(irq, dev_priv);
+
intel_hpd_cancel_work(dev_priv);
dev_priv->runtime_pm.irqs_enabled = false;
}
@@ -4977,9 +4108,9 @@ void intel_irq_uninstall(struct drm_i915_private *dev_priv)
*/
void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
{
- dev_priv->drm.driver->irq_uninstall(&dev_priv->drm);
+ intel_irq_reset(dev_priv);
dev_priv->runtime_pm.irqs_enabled = false;
- synchronize_irq(dev_priv->drm.irq);
+ intel_synchronize_irq(dev_priv);
}
/**
@@ -4992,6 +4123,20 @@ void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
{
dev_priv->runtime_pm.irqs_enabled = true;
- dev_priv->drm.driver->irq_preinstall(&dev_priv->drm);
- dev_priv->drm.driver->irq_postinstall(&dev_priv->drm);
+ intel_irq_reset(dev_priv);
+ intel_irq_postinstall(dev_priv);
+}
+
+bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
+{
+ /*
+ * We only use drm_irq_uninstall() at unload and VT switch, so
+ * this is the only thing we need to check.
+ */
+ return dev_priv->runtime_pm.irqs_enabled;
+}
+
+void intel_synchronize_irq(struct drm_i915_private *i915)
+{
+ synchronize_irq(i915->drm.pdev->irq);
}
diff --git a/drivers/gpu/drm/i915/i915_irq.h b/drivers/gpu/drm/i915/i915_irq.h
index cb25dd213308..812c47a9c2d6 100644
--- a/drivers/gpu/drm/i915/i915_irq.h
+++ b/drivers/gpu/drm/i915/i915_irq.h
@@ -6,15 +6,21 @@
#ifndef __I915_IRQ_H__
#define __I915_IRQ_H__
+#include <linux/ktime.h>
#include <linux/types.h>
-#include "i915_drv.h"
+#include "display/intel_display.h"
+#include "i915_reg.h"
+struct drm_crtc;
+struct drm_device;
+struct drm_display_mode;
struct drm_i915_private;
struct intel_crtc;
+struct intel_uncore;
-extern void intel_irq_init(struct drm_i915_private *dev_priv);
-extern void intel_irq_fini(struct drm_i915_private *dev_priv);
+void intel_irq_init(struct drm_i915_private *dev_priv);
+void intel_irq_fini(struct drm_i915_private *dev_priv);
int intel_irq_install(struct drm_i915_private *dev_priv);
void intel_irq_uninstall(struct drm_i915_private *dev_priv);
@@ -77,41 +83,83 @@ ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, u32 bits)
void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, u32 mask);
void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, u32 mask);
-void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask);
-void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask);
void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv);
void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv);
void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv);
void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv);
void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
-
-static inline u32 gen6_sanitize_rps_pm_mask(const struct drm_i915_private *i915,
- u32 mask)
-{
- return mask & ~i915->gt_pm.rps.pm_intrmsk_mbz;
-}
+u32 gen6_sanitize_rps_pm_mask(const struct drm_i915_private *i915, u32 mask);
void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv);
void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv);
-static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
-{
- /*
- * We only use drm_irq_uninstall() at unload and VT switch, so
- * this is the only thing we need to check.
- */
- return dev_priv->runtime_pm.irqs_enabled;
-}
+bool intel_irqs_enabled(struct drm_i915_private *dev_priv);
+void intel_synchronize_irq(struct drm_i915_private *i915);
int intel_get_crtc_scanline(struct intel_crtc *crtc);
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
u8 pipe_mask);
void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
u8 pipe_mask);
-void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv);
-void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv);
-void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv);
-void gen11_reset_guc_interrupts(struct drm_i915_private *i915);
-void gen11_enable_guc_interrupts(struct drm_i915_private *i915);
-void gen11_disable_guc_interrupts(struct drm_i915_private *i915);
+
+bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
+ bool in_vblank_irq, int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode);
+
+u32 i915_get_vblank_counter(struct drm_crtc *crtc);
+u32 g4x_get_vblank_counter(struct drm_crtc *crtc);
+
+int i8xx_enable_vblank(struct drm_crtc *crtc);
+int i915gm_enable_vblank(struct drm_crtc *crtc);
+int i965_enable_vblank(struct drm_crtc *crtc);
+int ilk_enable_vblank(struct drm_crtc *crtc);
+int bdw_enable_vblank(struct drm_crtc *crtc);
+void i8xx_disable_vblank(struct drm_crtc *crtc);
+void i915gm_disable_vblank(struct drm_crtc *crtc);
+void i965_disable_vblank(struct drm_crtc *crtc);
+void ilk_disable_vblank(struct drm_crtc *crtc);
+void bdw_disable_vblank(struct drm_crtc *crtc);
+
+void gen2_irq_reset(struct intel_uncore *uncore);
+void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
+ i915_reg_t iir, i915_reg_t ier);
+
+void gen2_irq_init(struct intel_uncore *uncore,
+ u32 imr_val, u32 ier_val);
+void gen3_irq_init(struct intel_uncore *uncore,
+ i915_reg_t imr, u32 imr_val,
+ i915_reg_t ier, u32 ier_val,
+ i915_reg_t iir);
+
+#define GEN8_IRQ_RESET_NDX(uncore, type, which) \
+({ \
+ unsigned int which_ = which; \
+ gen3_irq_reset((uncore), GEN8_##type##_IMR(which_), \
+ GEN8_##type##_IIR(which_), GEN8_##type##_IER(which_)); \
+})
+
+#define GEN3_IRQ_RESET(uncore, type) \
+ gen3_irq_reset((uncore), type##IMR, type##IIR, type##IER)
+
+#define GEN2_IRQ_RESET(uncore) \
+ gen2_irq_reset(uncore)
+
+#define GEN8_IRQ_INIT_NDX(uncore, type, which, imr_val, ier_val) \
+({ \
+ unsigned int which_ = which; \
+ gen3_irq_init((uncore), \
+ GEN8_##type##_IMR(which_), imr_val, \
+ GEN8_##type##_IER(which_), ier_val, \
+ GEN8_##type##_IIR(which_)); \
+})
+
+#define GEN3_IRQ_INIT(uncore, type, imr_val, ier_val) \
+ gen3_irq_init((uncore), \
+ type##IMR, imr_val, \
+ type##IER, ier_val, \
+ type##IIR)
+
+#define GEN2_IRQ_INIT(uncore, imr_val, ier_val) \
+ gen2_irq_init((uncore), imr_val, ier_val)
#endif /* __I915_IRQ_H__ */
diff --git a/drivers/gpu/drm/i915/i915_memcpy.c b/drivers/gpu/drm/i915/i915_memcpy.c
index 79f8ec756362..fdd550405fd3 100644
--- a/drivers/gpu/drm/i915/i915_memcpy.c
+++ b/drivers/gpu/drm/i915/i915_memcpy.c
@@ -25,7 +25,13 @@
#include <linux/kernel.h>
#include <asm/fpu/api.h>
-#include "i915_drv.h"
+#include "i915_memcpy.h"
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
+#define CI_BUG_ON(expr) BUG_ON(expr)
+#else
+#define CI_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
+#endif
static DEFINE_STATIC_KEY_FALSE(has_movntdqa);
@@ -34,7 +40,6 @@ static void __memcpy_ntdqa(void *dst, const void *src, unsigned long len)
{
kernel_fpu_begin();
- len >>= 4;
while (len >= 4) {
asm("movntdqa (%0), %%xmm0\n"
"movntdqa 16(%0), %%xmm1\n"
@@ -59,6 +64,38 @@ static void __memcpy_ntdqa(void *dst, const void *src, unsigned long len)
kernel_fpu_end();
}
+
+static void __memcpy_ntdqu(void *dst, const void *src, unsigned long len)
+{
+ kernel_fpu_begin();
+
+ while (len >= 4) {
+ asm("movntdqa (%0), %%xmm0\n"
+ "movntdqa 16(%0), %%xmm1\n"
+ "movntdqa 32(%0), %%xmm2\n"
+ "movntdqa 48(%0), %%xmm3\n"
+ "movups %%xmm0, (%1)\n"
+ "movups %%xmm1, 16(%1)\n"
+ "movups %%xmm2, 32(%1)\n"
+ "movups %%xmm3, 48(%1)\n"
+ :: "r" (src), "r" (dst) : "memory");
+ src += 64;
+ dst += 64;
+ len -= 4;
+ }
+ while (len--) {
+ asm("movntdqa (%0), %%xmm0\n"
+ "movups %%xmm0, (%1)\n"
+ :: "r" (src), "r" (dst) : "memory");
+ src += 16;
+ dst += 16;
+ }
+
+ kernel_fpu_end();
+}
+#else
+static void __memcpy_ntdqa(void *dst, const void *src, unsigned long len) {}
+static void __memcpy_ntdqu(void *dst, const void *src, unsigned long len) {}
#endif
/**
@@ -83,17 +120,47 @@ bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len)
if (unlikely(((unsigned long)dst | (unsigned long)src | len) & 15))
return false;
-#ifdef CONFIG_AS_MOVNTDQA
if (static_branch_likely(&has_movntdqa)) {
if (likely(len))
- __memcpy_ntdqa(dst, src, len);
+ __memcpy_ntdqa(dst, src, len >> 4);
return true;
}
-#endif
return false;
}
+/**
+ * i915_unaligned_memcpy_from_wc: perform a mostly accelerated read from WC
+ * @dst: destination pointer
+ * @src: source pointer
+ * @len: how many bytes to copy
+ *
+ * Like i915_memcpy_from_wc(), the unaligned variant copies @len bytes from
+ * @src to @dst using * non-temporal instructions where available, but
+ * accepts that its arguments may not be aligned, but are valid for the
+ * potential 16-byte read past the end.
+ */
+void i915_unaligned_memcpy_from_wc(void *dst, void *src, unsigned long len)
+{
+ unsigned long addr;
+
+ CI_BUG_ON(!i915_has_memcpy_from_wc());
+
+ addr = (unsigned long)src;
+ if (!IS_ALIGNED(addr, 16)) {
+ unsigned long x = min(ALIGN(addr, 16) - addr, len);
+
+ memcpy(dst, src, x);
+
+ len -= x;
+ dst += x;
+ src += x;
+ }
+
+ if (likely(len))
+ __memcpy_ntdqu(dst, src, DIV_ROUND_UP(len, 16));
+}
+
void i915_memcpy_init_early(struct drm_i915_private *dev_priv)
{
/*
diff --git a/drivers/gpu/drm/i915/i915_memcpy.h b/drivers/gpu/drm/i915/i915_memcpy.h
new file mode 100644
index 000000000000..e36d30edd987
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_memcpy.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_MEMCPY_H__
+#define __I915_MEMCPY_H__
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+
+void i915_memcpy_init_early(struct drm_i915_private *i915);
+
+bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len);
+void i915_unaligned_memcpy_from_wc(void *dst, void *src, unsigned long len);
+
+/* The movntdqa instructions used for memcpy-from-wc require 16-byte alignment,
+ * as well as SSE4.1 support. i915_memcpy_from_wc() will report if it cannot
+ * perform the operation. To check beforehand, pass in the parameters to
+ * to i915_can_memcpy_from_wc() - since we only care about the low 4 bits,
+ * you only need to pass in the minor offsets, page-aligned pointers are
+ * always valid.
+ *
+ * For just checking for SSE4.1, in the foreknowledge that the future use
+ * will be correctly aligned, just use i915_has_memcpy_from_wc().
+ */
+#define i915_can_memcpy_from_wc(dst, src, len) \
+ i915_memcpy_from_wc((void *)((unsigned long)(dst) | (unsigned long)(src) | (len)), NULL, 0)
+
+#define i915_has_memcpy_from_wc() \
+ i915_memcpy_from_wc(NULL, NULL, 0)
+
+#endif /* __I915_MEMCPY_H__ */
diff --git a/drivers/gpu/drm/i915/i915_mm.c b/drivers/gpu/drm/i915/i915_mm.c
index c23bb29e6d3e..b6376b25ef63 100644
--- a/drivers/gpu/drm/i915/i915_mm.c
+++ b/drivers/gpu/drm/i915/i915_mm.c
@@ -33,6 +33,9 @@ struct remap_pfn {
struct mm_struct *mm;
unsigned long pfn;
pgprot_t prot;
+
+ struct sgt_iter sgt;
+ resource_size_t iobase;
};
static int remap_pfn(pte_t *pte, unsigned long addr, void *data)
@@ -46,6 +49,35 @@ static int remap_pfn(pte_t *pte, unsigned long addr, void *data)
return 0;
}
+#define use_dma(io) ((io) != -1)
+
+static inline unsigned long sgt_pfn(const struct remap_pfn *r)
+{
+ if (use_dma(r->iobase))
+ return (r->sgt.dma + r->sgt.curr + r->iobase) >> PAGE_SHIFT;
+ else
+ return r->sgt.pfn + (r->sgt.curr >> PAGE_SHIFT);
+}
+
+static int remap_sg(pte_t *pte, unsigned long addr, void *data)
+{
+ struct remap_pfn *r = data;
+
+ if (GEM_WARN_ON(!r->sgt.pfn))
+ return -EINVAL;
+
+ /* Special PTE are not associated with any struct page */
+ set_pte_at(r->mm, addr, pte,
+ pte_mkspecial(pfn_pte(sgt_pfn(r), r->prot)));
+ r->pfn++; /* track insertions in case we need to unwind later */
+
+ r->sgt.curr += PAGE_SIZE;
+ if (r->sgt.curr >= r->sgt.max)
+ r->sgt = __sgt_iter(__sg_next(r->sgt.sgp), use_dma(r->iobase));
+
+ return 0;
+}
+
/**
* remap_io_mapping - remap an IO mapping to userspace
* @vma: user vma to map to
@@ -63,9 +95,8 @@ int remap_io_mapping(struct vm_area_struct *vma,
struct remap_pfn r;
int err;
- GEM_BUG_ON((vma->vm_flags &
- (VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)) !=
- (VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP));
+#define EXPECTED_FLAGS (VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)
+ GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS);
/* We rely on prevalidation of the io-mapping to skip track_pfn(). */
r.mm = vma->vm_mm;
@@ -81,3 +112,40 @@ int remap_io_mapping(struct vm_area_struct *vma,
return 0;
}
+
+/**
+ * remap_io_sg - remap an IO mapping to userspace
+ * @vma: user vma to map to
+ * @addr: target user address to start at
+ * @size: size of map area
+ * @sgl: Start sg entry
+ * @iobase: Use stored dma address offset by this address or pfn if -1
+ *
+ * Note: this is only safe if the mm semaphore is held when called.
+ */
+int remap_io_sg(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long size,
+ struct scatterlist *sgl, resource_size_t iobase)
+{
+ struct remap_pfn r = {
+ .mm = vma->vm_mm,
+ .prot = vma->vm_page_prot,
+ .sgt = __sgt_iter(sgl, use_dma(iobase)),
+ .iobase = iobase,
+ };
+ int err;
+
+ /* We rely on prevalidation of the io-mapping to skip track_pfn(). */
+ GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS);
+
+ if (!use_dma(iobase))
+ flush_cache_range(vma, addr, size);
+
+ err = apply_to_page_range(r.mm, addr, size, remap_sg, &r);
+ if (unlikely(err)) {
+ zap_vma_ptes(vma, addr, r.pfn << PAGE_SHIFT);
+ return err;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_oa_bdw.h b/drivers/gpu/drm/i915/i915_oa_bdw.h
deleted file mode 100644
index 0e667f1a8aa1..000000000000
--- a/drivers/gpu/drm/i915/i915_oa_bdw.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_BDW_H__
-#define __I915_OA_BDW_H__
-
-extern void i915_perf_load_test_config_bdw(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_bxt.h b/drivers/gpu/drm/i915/i915_oa_bxt.h
deleted file mode 100644
index 679e92cf4f1d..000000000000
--- a/drivers/gpu/drm/i915/i915_oa_bxt.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_BXT_H__
-#define __I915_OA_BXT_H__
-
-extern void i915_perf_load_test_config_bxt(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt2.h b/drivers/gpu/drm/i915/i915_oa_cflgt2.h
deleted file mode 100644
index 4d6025559bbe..000000000000
--- a/drivers/gpu/drm/i915/i915_oa_cflgt2.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_CFLGT2_H__
-#define __I915_OA_CFLGT2_H__
-
-extern void i915_perf_load_test_config_cflgt2(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt3.h b/drivers/gpu/drm/i915/i915_oa_cflgt3.h
deleted file mode 100644
index 0697f4077402..000000000000
--- a/drivers/gpu/drm/i915/i915_oa_cflgt3.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_CFLGT3_H__
-#define __I915_OA_CFLGT3_H__
-
-extern void i915_perf_load_test_config_cflgt3(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_chv.h b/drivers/gpu/drm/i915/i915_oa_chv.h
deleted file mode 100644
index 0986eae3135f..000000000000
--- a/drivers/gpu/drm/i915/i915_oa_chv.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_CHV_H__
-#define __I915_OA_CHV_H__
-
-extern void i915_perf_load_test_config_chv(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_cnl.h b/drivers/gpu/drm/i915/i915_oa_cnl.h
deleted file mode 100644
index e830a406aff2..000000000000
--- a/drivers/gpu/drm/i915/i915_oa_cnl.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_CNL_H__
-#define __I915_OA_CNL_H__
-
-extern void i915_perf_load_test_config_cnl(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_glk.h b/drivers/gpu/drm/i915/i915_oa_glk.h
deleted file mode 100644
index 06dedf991edb..000000000000
--- a/drivers/gpu/drm/i915/i915_oa_glk.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_GLK_H__
-#define __I915_OA_GLK_H__
-
-extern void i915_perf_load_test_config_glk(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_hsw.h b/drivers/gpu/drm/i915/i915_oa_hsw.h
deleted file mode 100644
index 3d0c870cd0bd..000000000000
--- a/drivers/gpu/drm/i915/i915_oa_hsw.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_HSW_H__
-#define __I915_OA_HSW_H__
-
-extern void i915_perf_load_test_config_hsw(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_icl.h b/drivers/gpu/drm/i915/i915_oa_icl.h
deleted file mode 100644
index 24eaa97d61ba..000000000000
--- a/drivers/gpu/drm/i915/i915_oa_icl.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_ICL_H__
-#define __I915_OA_ICL_H__
-
-extern void i915_perf_load_test_config_icl(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt2.h b/drivers/gpu/drm/i915/i915_oa_kblgt2.h
deleted file mode 100644
index a55398a904de..000000000000
--- a/drivers/gpu/drm/i915/i915_oa_kblgt2.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_KBLGT2_H__
-#define __I915_OA_KBLGT2_H__
-
-extern void i915_perf_load_test_config_kblgt2(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt3.h b/drivers/gpu/drm/i915/i915_oa_kblgt3.h
deleted file mode 100644
index 3ddd3483b7cc..000000000000
--- a/drivers/gpu/drm/i915/i915_oa_kblgt3.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_KBLGT3_H__
-#define __I915_OA_KBLGT3_H__
-
-extern void i915_perf_load_test_config_kblgt3(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt2.h b/drivers/gpu/drm/i915/i915_oa_sklgt2.h
deleted file mode 100644
index be6256037239..000000000000
--- a/drivers/gpu/drm/i915/i915_oa_sklgt2.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_SKLGT2_H__
-#define __I915_OA_SKLGT2_H__
-
-extern void i915_perf_load_test_config_sklgt2(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt3.h b/drivers/gpu/drm/i915/i915_oa_sklgt3.h
deleted file mode 100644
index 650beb068e56..000000000000
--- a/drivers/gpu/drm/i915/i915_oa_sklgt3.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_SKLGT3_H__
-#define __I915_OA_SKLGT3_H__
-
-extern void i915_perf_load_test_config_sklgt3(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt4.h b/drivers/gpu/drm/i915/i915_oa_sklgt4.h
deleted file mode 100644
index 8dcf849d131e..000000000000
--- a/drivers/gpu/drm/i915/i915_oa_sklgt4.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_SKLGT4_H__
-#define __I915_OA_SKLGT4_H__
-
-extern void i915_perf_load_test_config_sklgt4(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 5b07766a1c26..1dd1f3652795 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -46,7 +46,8 @@ i915_param_named(modeset, int, 0400,
i915_param_named_unsafe(enable_dc, int, 0400,
"Enable power-saving display C-states. "
- "(-1=auto [default]; 0=disable; 1=up to DC5; 2=up to DC6)");
+ "(-1=auto [default]; 0=disable; 1=up to DC5; 2=up to DC6; "
+ "3=up to DC5 with DC3CO; 4=up to DC6 with DC3CO)");
i915_param_named_unsafe(enable_fbc, int, 0600,
"Enable frame buffer compression for power savings "
@@ -165,18 +166,24 @@ i915_param_named_unsafe(enable_dp_mst, bool, 0600,
"Enable multi-stream transport (MST) for new DisplayPort sinks. (default: true)");
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
-i915_param_named_unsafe(inject_load_failure, uint, 0400,
+i915_param_named_unsafe(inject_probe_failure, uint, 0400,
"Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)");
#endif
-i915_param_named(enable_dpcd_backlight, bool, 0600,
- "Enable support for DPCD backlight control (default:false)");
+i915_param_named(enable_dpcd_backlight, int, 0600,
+ "Enable support for DPCD backlight control"
+ "(-1=use per-VBT LFP backlight type setting, 0=disabled [default], 1=enabled)");
#if IS_ENABLED(CONFIG_DRM_I915_GVT)
i915_param_named(enable_gvt, bool, 0400,
"Enable support for Intel GVT-g graphics virtualization host support(default:false)");
#endif
+#if IS_ENABLED(CONFIG_DRM_I915_UNSTABLE_FAKE_LMEM)
+i915_param_named_unsafe(fake_lmem_start, ulong, 0600,
+ "Fake LMEM start offset (default: 0)");
+#endif
+
static __always_inline void _print_param(struct drm_printer *p,
const char *name,
const char *type,
@@ -188,6 +195,8 @@ static __always_inline void _print_param(struct drm_printer *p,
drm_printf(p, "i915.%s=%d\n", name, *(const int *)x);
else if (!__builtin_strcmp(type, "unsigned int"))
drm_printf(p, "i915.%s=%u\n", name, *(const unsigned int *)x);
+ else if (!__builtin_strcmp(type, "unsigned long"))
+ drm_printf(p, "i915.%s=%lu\n", name, *(const unsigned long *)x);
else if (!__builtin_strcmp(type, "char *"))
drm_printf(p, "i915.%s=%s\n", name, *(const char **)x);
else
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index a4770ce46bd2..31b88f297fbc 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -61,10 +61,12 @@ struct drm_printer;
param(char *, dmc_firmware_path, NULL) \
param(int, mmio_debug, -IS_ENABLED(CONFIG_DRM_I915_DEBUG_MMIO)) \
param(int, edp_vswing, 0) \
- param(int, reset, 2) \
- param(unsigned int, inject_load_failure, 0) \
+ param(int, reset, 3) \
+ param(unsigned int, inject_probe_failure, 0) \
param(int, fastboot, -1) \
+ param(int, enable_dpcd_backlight, 0) \
param(char *, force_probe, CONFIG_DRM_I915_FORCE_PROBE) \
+ param(unsigned long, fake_lmem_start, 0) \
/* leave bools at the end to not create holes */ \
param(bool, alpha_support, IS_ENABLED(CONFIG_DRM_I915_ALPHA_SUPPORT)) \
param(bool, enable_hangcheck, true) \
@@ -76,7 +78,6 @@ struct drm_printer;
param(bool, verbose_state_checks, true) \
param(bool, nuclear_pageflip, false) \
param(bool, enable_dp_mst, true) \
- param(bool, enable_dpcd_backlight, false) \
param(bool, enable_gvt, false)
#define MEMBER(T, member, ...) T member;
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 6c9f46fc3e12..83f01401b8b5 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -23,7 +23,6 @@
*/
#include <linux/console.h>
-#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
#include <drm/drm_drv.h>
@@ -31,6 +30,7 @@
#include "display/intel_fbdev.h"
#include "i915_drv.h"
+#include "i915_perf.h"
#include "i915_globals.h"
#include "i915_selftest.h"
@@ -118,6 +118,14 @@
[PIPE_C] = IVB_CURSOR_C_OFFSET, \
}
+#define TGL_CURSOR_OFFSETS \
+ .cursor_offsets = { \
+ [PIPE_A] = CURSOR_A_OFFSET, \
+ [PIPE_B] = IVB_CURSOR_B_OFFSET, \
+ [PIPE_C] = IVB_CURSOR_C_OFFSET, \
+ [PIPE_D] = TGL_CURSOR_D_OFFSET, \
+ }
+
#define I9XX_COLORS \
.color = { .gamma_lut_size = 256 }
#define I965_COLORS \
@@ -144,10 +152,13 @@
#define GEN_DEFAULT_PAGE_SIZES \
.page_sizes = I915_GTT_PAGE_SIZE_4K
+#define GEN_DEFAULT_REGIONS \
+ .memory_regions = REGION_SMEM | REGION_STOLEN
+
#define I830_FEATURES \
GEN(2), \
.is_mobile = 1, \
- .num_pipes = 2, \
+ .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
.display.has_overlay = 1, \
.display.cursor_needs_physical = 1, \
.display.overlay_needs_physical = 1, \
@@ -161,11 +172,12 @@
I9XX_PIPE_OFFSETS, \
I9XX_CURSOR_OFFSETS, \
I9XX_COLORS, \
- GEN_DEFAULT_PAGE_SIZES
+ GEN_DEFAULT_PAGE_SIZES, \
+ GEN_DEFAULT_REGIONS
#define I845_FEATURES \
GEN(2), \
- .num_pipes = 1, \
+ .pipe_mask = BIT(PIPE_A), \
.display.has_overlay = 1, \
.display.overlay_needs_physical = 1, \
.display.has_gmch = 1, \
@@ -178,32 +190,33 @@
I845_PIPE_OFFSETS, \
I845_CURSOR_OFFSETS, \
I9XX_COLORS, \
- GEN_DEFAULT_PAGE_SIZES
+ GEN_DEFAULT_PAGE_SIZES, \
+ GEN_DEFAULT_REGIONS
-static const struct intel_device_info intel_i830_info = {
+static const struct intel_device_info i830_info = {
I830_FEATURES,
PLATFORM(INTEL_I830),
};
-static const struct intel_device_info intel_i845g_info = {
+static const struct intel_device_info i845g_info = {
I845_FEATURES,
PLATFORM(INTEL_I845G),
};
-static const struct intel_device_info intel_i85x_info = {
+static const struct intel_device_info i85x_info = {
I830_FEATURES,
PLATFORM(INTEL_I85X),
.display.has_fbc = 1,
};
-static const struct intel_device_info intel_i865g_info = {
+static const struct intel_device_info i865g_info = {
I845_FEATURES,
PLATFORM(INTEL_I865G),
};
#define GEN3_FEATURES \
GEN(3), \
- .num_pipes = 2, \
+ .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
.display.has_gmch = 1, \
.gpu_reset_clobbers_display = true, \
.engine_mask = BIT(RCS0), \
@@ -212,9 +225,10 @@ static const struct intel_device_info intel_i865g_info = {
I9XX_PIPE_OFFSETS, \
I9XX_CURSOR_OFFSETS, \
I9XX_COLORS, \
- GEN_DEFAULT_PAGE_SIZES
+ GEN_DEFAULT_PAGE_SIZES, \
+ GEN_DEFAULT_REGIONS
-static const struct intel_device_info intel_i915g_info = {
+static const struct intel_device_info i915g_info = {
GEN3_FEATURES,
PLATFORM(INTEL_I915G),
.has_coherent_ggtt = false,
@@ -225,7 +239,7 @@ static const struct intel_device_info intel_i915g_info = {
.unfenced_needs_alignment = 1,
};
-static const struct intel_device_info intel_i915gm_info = {
+static const struct intel_device_info i915gm_info = {
GEN3_FEATURES,
PLATFORM(INTEL_I915GM),
.is_mobile = 1,
@@ -238,7 +252,7 @@ static const struct intel_device_info intel_i915gm_info = {
.unfenced_needs_alignment = 1,
};
-static const struct intel_device_info intel_i945g_info = {
+static const struct intel_device_info i945g_info = {
GEN3_FEATURES,
PLATFORM(INTEL_I945G),
.display.has_hotplug = 1,
@@ -249,7 +263,7 @@ static const struct intel_device_info intel_i945g_info = {
.unfenced_needs_alignment = 1,
};
-static const struct intel_device_info intel_i945gm_info = {
+static const struct intel_device_info i945gm_info = {
GEN3_FEATURES,
PLATFORM(INTEL_I945GM),
.is_mobile = 1,
@@ -263,21 +277,21 @@ static const struct intel_device_info intel_i945gm_info = {
.unfenced_needs_alignment = 1,
};
-static const struct intel_device_info intel_g33_info = {
+static const struct intel_device_info g33_info = {
GEN3_FEATURES,
PLATFORM(INTEL_G33),
.display.has_hotplug = 1,
.display.has_overlay = 1,
};
-static const struct intel_device_info intel_pineview_g_info = {
+static const struct intel_device_info pnv_g_info = {
GEN3_FEATURES,
PLATFORM(INTEL_PINEVIEW),
.display.has_hotplug = 1,
.display.has_overlay = 1,
};
-static const struct intel_device_info intel_pineview_m_info = {
+static const struct intel_device_info pnv_m_info = {
GEN3_FEATURES,
PLATFORM(INTEL_PINEVIEW),
.is_mobile = 1,
@@ -287,7 +301,7 @@ static const struct intel_device_info intel_pineview_m_info = {
#define GEN4_FEATURES \
GEN(4), \
- .num_pipes = 2, \
+ .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
.display.has_hotplug = 1, \
.display.has_gmch = 1, \
.gpu_reset_clobbers_display = true, \
@@ -297,9 +311,10 @@ static const struct intel_device_info intel_pineview_m_info = {
I9XX_PIPE_OFFSETS, \
I9XX_CURSOR_OFFSETS, \
I965_COLORS, \
- GEN_DEFAULT_PAGE_SIZES
+ GEN_DEFAULT_PAGE_SIZES, \
+ GEN_DEFAULT_REGIONS
-static const struct intel_device_info intel_i965g_info = {
+static const struct intel_device_info i965g_info = {
GEN4_FEATURES,
PLATFORM(INTEL_I965G),
.display.has_overlay = 1,
@@ -307,7 +322,7 @@ static const struct intel_device_info intel_i965g_info = {
.has_snoop = false,
};
-static const struct intel_device_info intel_i965gm_info = {
+static const struct intel_device_info i965gm_info = {
GEN4_FEATURES,
PLATFORM(INTEL_I965GM),
.is_mobile = 1,
@@ -318,14 +333,14 @@ static const struct intel_device_info intel_i965gm_info = {
.has_snoop = false,
};
-static const struct intel_device_info intel_g45_info = {
+static const struct intel_device_info g45_info = {
GEN4_FEATURES,
PLATFORM(INTEL_G45),
.engine_mask = BIT(RCS0) | BIT(VCS0),
.gpu_reset_clobbers_display = false,
};
-static const struct intel_device_info intel_gm45_info = {
+static const struct intel_device_info gm45_info = {
GEN4_FEATURES,
PLATFORM(INTEL_GM45),
.is_mobile = 1,
@@ -337,7 +352,7 @@ static const struct intel_device_info intel_gm45_info = {
#define GEN5_FEATURES \
GEN(5), \
- .num_pipes = 2, \
+ .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
.display.has_hotplug = 1, \
.engine_mask = BIT(RCS0) | BIT(VCS0), \
.has_snoop = true, \
@@ -347,14 +362,15 @@ static const struct intel_device_info intel_gm45_info = {
I9XX_PIPE_OFFSETS, \
I9XX_CURSOR_OFFSETS, \
ILK_COLORS, \
- GEN_DEFAULT_PAGE_SIZES
+ GEN_DEFAULT_PAGE_SIZES, \
+ GEN_DEFAULT_REGIONS
-static const struct intel_device_info intel_ironlake_d_info = {
+static const struct intel_device_info ilk_d_info = {
GEN5_FEATURES,
PLATFORM(INTEL_IRONLAKE),
};
-static const struct intel_device_info intel_ironlake_m_info = {
+static const struct intel_device_info ilk_m_info = {
GEN5_FEATURES,
PLATFORM(INTEL_IRONLAKE),
.is_mobile = 1,
@@ -363,7 +379,7 @@ static const struct intel_device_info intel_ironlake_m_info = {
#define GEN6_FEATURES \
GEN(6), \
- .num_pipes = 2, \
+ .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
.display.has_hotplug = 1, \
.display.has_fbc = 1, \
.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
@@ -377,18 +393,19 @@ static const struct intel_device_info intel_ironlake_m_info = {
I9XX_PIPE_OFFSETS, \
I9XX_CURSOR_OFFSETS, \
ILK_COLORS, \
- GEN_DEFAULT_PAGE_SIZES
+ GEN_DEFAULT_PAGE_SIZES, \
+ GEN_DEFAULT_REGIONS
#define SNB_D_PLATFORM \
GEN6_FEATURES, \
PLATFORM(INTEL_SANDYBRIDGE)
-static const struct intel_device_info intel_sandybridge_d_gt1_info = {
+static const struct intel_device_info snb_d_gt1_info = {
SNB_D_PLATFORM,
.gt = 1,
};
-static const struct intel_device_info intel_sandybridge_d_gt2_info = {
+static const struct intel_device_info snb_d_gt2_info = {
SNB_D_PLATFORM,
.gt = 2,
};
@@ -399,19 +416,19 @@ static const struct intel_device_info intel_sandybridge_d_gt2_info = {
.is_mobile = 1
-static const struct intel_device_info intel_sandybridge_m_gt1_info = {
+static const struct intel_device_info snb_m_gt1_info = {
SNB_M_PLATFORM,
.gt = 1,
};
-static const struct intel_device_info intel_sandybridge_m_gt2_info = {
+static const struct intel_device_info snb_m_gt2_info = {
SNB_M_PLATFORM,
.gt = 2,
};
#define GEN7_FEATURES \
GEN(7), \
- .num_pipes = 3, \
+ .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \
.display.has_hotplug = 1, \
.display.has_fbc = 1, \
.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
@@ -425,19 +442,20 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info = {
IVB_PIPE_OFFSETS, \
IVB_CURSOR_OFFSETS, \
IVB_COLORS, \
- GEN_DEFAULT_PAGE_SIZES
+ GEN_DEFAULT_PAGE_SIZES, \
+ GEN_DEFAULT_REGIONS
#define IVB_D_PLATFORM \
GEN7_FEATURES, \
PLATFORM(INTEL_IVYBRIDGE), \
.has_l3_dpf = 1
-static const struct intel_device_info intel_ivybridge_d_gt1_info = {
+static const struct intel_device_info ivb_d_gt1_info = {
IVB_D_PLATFORM,
.gt = 1,
};
-static const struct intel_device_info intel_ivybridge_d_gt2_info = {
+static const struct intel_device_info ivb_d_gt2_info = {
IVB_D_PLATFORM,
.gt = 2,
};
@@ -448,29 +466,29 @@ static const struct intel_device_info intel_ivybridge_d_gt2_info = {
.is_mobile = 1, \
.has_l3_dpf = 1
-static const struct intel_device_info intel_ivybridge_m_gt1_info = {
+static const struct intel_device_info ivb_m_gt1_info = {
IVB_M_PLATFORM,
.gt = 1,
};
-static const struct intel_device_info intel_ivybridge_m_gt2_info = {
+static const struct intel_device_info ivb_m_gt2_info = {
IVB_M_PLATFORM,
.gt = 2,
};
-static const struct intel_device_info intel_ivybridge_q_info = {
+static const struct intel_device_info ivb_q_info = {
GEN7_FEATURES,
PLATFORM(INTEL_IVYBRIDGE),
.gt = 2,
- .num_pipes = 0, /* legal, last one wins */
+ .pipe_mask = 0, /* legal, last one wins */
.has_l3_dpf = 1,
};
-static const struct intel_device_info intel_valleyview_info = {
+static const struct intel_device_info vlv_info = {
PLATFORM(INTEL_VALLEYVIEW),
GEN(7),
.is_lp = 1,
- .num_pipes = 2,
+ .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B),
.has_runtime_pm = 1,
.has_rc6 = 1,
.has_rps = true,
@@ -486,6 +504,7 @@ static const struct intel_device_info intel_valleyview_info = {
I9XX_CURSOR_OFFSETS,
I965_COLORS,
GEN_DEFAULT_PAGE_SIZES,
+ GEN_DEFAULT_REGIONS,
};
#define G75_FEATURES \
@@ -504,17 +523,17 @@ static const struct intel_device_info intel_valleyview_info = {
PLATFORM(INTEL_HASWELL), \
.has_l3_dpf = 1
-static const struct intel_device_info intel_haswell_gt1_info = {
+static const struct intel_device_info hsw_gt1_info = {
HSW_PLATFORM,
.gt = 1,
};
-static const struct intel_device_info intel_haswell_gt2_info = {
+static const struct intel_device_info hsw_gt2_info = {
HSW_PLATFORM,
.gt = 2,
};
-static const struct intel_device_info intel_haswell_gt3_info = {
+static const struct intel_device_info hsw_gt3_info = {
HSW_PLATFORM,
.gt = 3,
};
@@ -522,8 +541,6 @@ static const struct intel_device_info intel_haswell_gt3_info = {
#define GEN8_FEATURES \
G75_FEATURES, \
GEN(8), \
- .page_sizes = I915_GTT_PAGE_SIZE_4K | \
- I915_GTT_PAGE_SIZE_2M, \
.has_logical_ring_contexts = 1, \
.ppgtt_type = INTEL_PPGTT_FULL, \
.ppgtt_size = 48, \
@@ -534,17 +551,17 @@ static const struct intel_device_info intel_haswell_gt3_info = {
GEN8_FEATURES, \
PLATFORM(INTEL_BROADWELL)
-static const struct intel_device_info intel_broadwell_gt1_info = {
+static const struct intel_device_info bdw_gt1_info = {
BDW_PLATFORM,
.gt = 1,
};
-static const struct intel_device_info intel_broadwell_gt2_info = {
+static const struct intel_device_info bdw_gt2_info = {
BDW_PLATFORM,
.gt = 2,
};
-static const struct intel_device_info intel_broadwell_rsvd_info = {
+static const struct intel_device_info bdw_rsvd_info = {
BDW_PLATFORM,
.gt = 3,
/* According to the device ID those devices are GT3, they were
@@ -552,17 +569,17 @@ static const struct intel_device_info intel_broadwell_rsvd_info = {
*/
};
-static const struct intel_device_info intel_broadwell_gt3_info = {
+static const struct intel_device_info bdw_gt3_info = {
BDW_PLATFORM,
.gt = 3,
.engine_mask =
BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1),
};
-static const struct intel_device_info intel_cherryview_info = {
+static const struct intel_device_info chv_info = {
PLATFORM(INTEL_CHERRYVIEW),
GEN(8),
- .num_pipes = 3,
+ .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
.display.has_hotplug = 1,
.is_lp = 1,
.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0),
@@ -572,7 +589,7 @@ static const struct intel_device_info intel_cherryview_info = {
.has_rps = true,
.has_logical_ring_contexts = 1,
.display.has_gmch = 1,
- .ppgtt_type = INTEL_PPGTT_FULL,
+ .ppgtt_type = INTEL_PPGTT_ALIASING,
.ppgtt_size = 32,
.has_reset_engine = 1,
.has_snoop = true,
@@ -582,12 +599,12 @@ static const struct intel_device_info intel_cherryview_info = {
CHV_CURSOR_OFFSETS,
CHV_COLORS,
GEN_DEFAULT_PAGE_SIZES,
+ GEN_DEFAULT_REGIONS,
};
#define GEN9_DEFAULT_PAGE_SIZES \
.page_sizes = I915_GTT_PAGE_SIZE_4K | \
- I915_GTT_PAGE_SIZE_64K | \
- I915_GTT_PAGE_SIZE_2M
+ I915_GTT_PAGE_SIZE_64K
#define GEN9_FEATURES \
GEN8_FEATURES, \
@@ -595,7 +612,8 @@ static const struct intel_device_info intel_cherryview_info = {
GEN9_DEFAULT_PAGE_SIZES, \
.has_logical_ring_preemption = 1, \
.display.has_csr = 1, \
- .has_guc = 1, \
+ .has_gt_uc = 1, \
+ .display.has_hdcp = 1, \
.display.has_ipc = 1, \
.ddb_size = 896
@@ -603,12 +621,12 @@ static const struct intel_device_info intel_cherryview_info = {
GEN9_FEATURES, \
PLATFORM(INTEL_SKYLAKE)
-static const struct intel_device_info intel_skylake_gt1_info = {
+static const struct intel_device_info skl_gt1_info = {
SKL_PLATFORM,
.gt = 1,
};
-static const struct intel_device_info intel_skylake_gt2_info = {
+static const struct intel_device_info skl_gt2_info = {
SKL_PLATFORM,
.gt = 2,
};
@@ -619,12 +637,12 @@ static const struct intel_device_info intel_skylake_gt2_info = {
BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1)
-static const struct intel_device_info intel_skylake_gt3_info = {
+static const struct intel_device_info skl_gt3_info = {
SKL_GT3_PLUS_PLATFORM,
.gt = 3,
};
-static const struct intel_device_info intel_skylake_gt4_info = {
+static const struct intel_device_info skl_gt4_info = {
SKL_GT3_PLUS_PLATFORM,
.gt = 4,
};
@@ -634,11 +652,12 @@ static const struct intel_device_info intel_skylake_gt4_info = {
.is_lp = 1, \
.display.has_hotplug = 1, \
.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
- .num_pipes = 3, \
+ .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \
.has_64bit_reloc = 1, \
.display.has_ddi = 1, \
.has_fpga_dbg = 1, \
.display.has_fbc = 1, \
+ .display.has_hdcp = 1, \
.display.has_psr = 1, \
.has_runtime_pm = 1, \
.display.has_csr = 1, \
@@ -647,7 +666,7 @@ static const struct intel_device_info intel_skylake_gt4_info = {
.display.has_dp_mst = 1, \
.has_logical_ring_contexts = 1, \
.has_logical_ring_preemption = 1, \
- .has_guc = 1, \
+ .has_gt_uc = 1, \
.ppgtt_type = INTEL_PPGTT_FULL, \
.ppgtt_size = 48, \
.has_reset_engine = 1, \
@@ -657,15 +676,16 @@ static const struct intel_device_info intel_skylake_gt4_info = {
HSW_PIPE_OFFSETS, \
IVB_CURSOR_OFFSETS, \
IVB_COLORS, \
- GEN9_DEFAULT_PAGE_SIZES
+ GEN9_DEFAULT_PAGE_SIZES, \
+ GEN_DEFAULT_REGIONS
-static const struct intel_device_info intel_broxton_info = {
+static const struct intel_device_info bxt_info = {
GEN9_LP_FEATURES,
PLATFORM(INTEL_BROXTON),
.ddb_size = 512,
};
-static const struct intel_device_info intel_geminilake_info = {
+static const struct intel_device_info glk_info = {
GEN9_LP_FEATURES,
PLATFORM(INTEL_GEMINILAKE),
.ddb_size = 1024,
@@ -676,17 +696,17 @@ static const struct intel_device_info intel_geminilake_info = {
GEN9_FEATURES, \
PLATFORM(INTEL_KABYLAKE)
-static const struct intel_device_info intel_kabylake_gt1_info = {
+static const struct intel_device_info kbl_gt1_info = {
KBL_PLATFORM,
.gt = 1,
};
-static const struct intel_device_info intel_kabylake_gt2_info = {
+static const struct intel_device_info kbl_gt2_info = {
KBL_PLATFORM,
.gt = 2,
};
-static const struct intel_device_info intel_kabylake_gt3_info = {
+static const struct intel_device_info kbl_gt3_info = {
KBL_PLATFORM,
.gt = 3,
.engine_mask =
@@ -697,17 +717,17 @@ static const struct intel_device_info intel_kabylake_gt3_info = {
GEN9_FEATURES, \
PLATFORM(INTEL_COFFEELAKE)
-static const struct intel_device_info intel_coffeelake_gt1_info = {
+static const struct intel_device_info cfl_gt1_info = {
CFL_PLATFORM,
.gt = 1,
};
-static const struct intel_device_info intel_coffeelake_gt2_info = {
+static const struct intel_device_info cfl_gt2_info = {
CFL_PLATFORM,
.gt = 2,
};
-static const struct intel_device_info intel_coffeelake_gt3_info = {
+static const struct intel_device_info cfl_gt3_info = {
CFL_PLATFORM,
.gt = 3,
.engine_mask =
@@ -718,17 +738,24 @@ static const struct intel_device_info intel_coffeelake_gt3_info = {
GEN9_FEATURES, \
GEN(10), \
.ddb_size = 1024, \
+ .display.has_dsc = 1, \
.has_coherent_ggtt = false, \
GLK_COLORS
-static const struct intel_device_info intel_cannonlake_info = {
+static const struct intel_device_info cnl_info = {
GEN10_FEATURES,
PLATFORM(INTEL_CANNONLAKE),
.gt = 2,
};
+#define GEN11_DEFAULT_PAGE_SIZES \
+ .page_sizes = I915_GTT_PAGE_SIZE_4K | \
+ I915_GTT_PAGE_SIZE_64K | \
+ I915_GTT_PAGE_SIZE_2M
+
#define GEN11_FEATURES \
GEN10_FEATURES, \
+ GEN11_DEFAULT_PAGE_SIZES, \
.pipe_offsets = { \
[TRANSCODER_A] = PIPE_A_OFFSET, \
[TRANSCODER_B] = PIPE_B_OFFSET, \
@@ -750,21 +777,59 @@ static const struct intel_device_info intel_cannonlake_info = {
.has_logical_ring_elsq = 1, \
.color = { .degamma_lut_size = 33, .gamma_lut_size = 262145 }
-static const struct intel_device_info intel_icelake_11_info = {
+static const struct intel_device_info icl_info = {
GEN11_FEATURES,
PLATFORM(INTEL_ICELAKE),
.engine_mask =
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
};
-static const struct intel_device_info intel_elkhartlake_info = {
+static const struct intel_device_info ehl_info = {
GEN11_FEATURES,
PLATFORM(INTEL_ELKHARTLAKE),
.require_force_probe = 1,
- .engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0),
+ .engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0),
.ppgtt_size = 36,
};
+#define GEN12_FEATURES \
+ GEN11_FEATURES, \
+ GEN(12), \
+ .pipe_offsets = { \
+ [TRANSCODER_A] = PIPE_A_OFFSET, \
+ [TRANSCODER_B] = PIPE_B_OFFSET, \
+ [TRANSCODER_C] = PIPE_C_OFFSET, \
+ [TRANSCODER_D] = PIPE_D_OFFSET, \
+ [TRANSCODER_DSI_0] = PIPE_DSI0_OFFSET, \
+ [TRANSCODER_DSI_1] = PIPE_DSI1_OFFSET, \
+ }, \
+ .trans_offsets = { \
+ [TRANSCODER_A] = TRANSCODER_A_OFFSET, \
+ [TRANSCODER_B] = TRANSCODER_B_OFFSET, \
+ [TRANSCODER_C] = TRANSCODER_C_OFFSET, \
+ [TRANSCODER_D] = TRANSCODER_D_OFFSET, \
+ [TRANSCODER_DSI_0] = TRANSCODER_DSI0_OFFSET, \
+ [TRANSCODER_DSI_1] = TRANSCODER_DSI1_OFFSET, \
+ }, \
+ TGL_CURSOR_OFFSETS, \
+ .has_global_mocs = 1, \
+ .display.has_dsb = 1
+
+static const struct intel_device_info tgl_info = {
+ GEN12_FEATURES,
+ PLATFORM(INTEL_TIGERLAKE),
+ .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
+ .require_force_probe = 1,
+ .display.has_modular_fia = 1,
+ .engine_mask =
+ BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
+ .has_rps = false, /* XXX disabled for debugging */
+};
+
+#define GEN12_DGFX_FEATURES \
+ GEN12_FEATURES, \
+ .is_dgfx = 1
+
#undef GEN
#undef PLATFORM
@@ -775,83 +840,86 @@ static const struct intel_device_info intel_elkhartlake_info = {
* PCI ID matches, otherwise we'll use the wrong info struct above.
*/
static const struct pci_device_id pciidlist[] = {
- INTEL_I830_IDS(&intel_i830_info),
- INTEL_I845G_IDS(&intel_i845g_info),
- INTEL_I85X_IDS(&intel_i85x_info),
- INTEL_I865G_IDS(&intel_i865g_info),
- INTEL_I915G_IDS(&intel_i915g_info),
- INTEL_I915GM_IDS(&intel_i915gm_info),
- INTEL_I945G_IDS(&intel_i945g_info),
- INTEL_I945GM_IDS(&intel_i945gm_info),
- INTEL_I965G_IDS(&intel_i965g_info),
- INTEL_G33_IDS(&intel_g33_info),
- INTEL_I965GM_IDS(&intel_i965gm_info),
- INTEL_GM45_IDS(&intel_gm45_info),
- INTEL_G45_IDS(&intel_g45_info),
- INTEL_PINEVIEW_G_IDS(&intel_pineview_g_info),
- INTEL_PINEVIEW_M_IDS(&intel_pineview_m_info),
- INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),
- INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),
- INTEL_SNB_D_GT1_IDS(&intel_sandybridge_d_gt1_info),
- INTEL_SNB_D_GT2_IDS(&intel_sandybridge_d_gt2_info),
- INTEL_SNB_M_GT1_IDS(&intel_sandybridge_m_gt1_info),
- INTEL_SNB_M_GT2_IDS(&intel_sandybridge_m_gt2_info),
- INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */
- INTEL_IVB_M_GT1_IDS(&intel_ivybridge_m_gt1_info),
- INTEL_IVB_M_GT2_IDS(&intel_ivybridge_m_gt2_info),
- INTEL_IVB_D_GT1_IDS(&intel_ivybridge_d_gt1_info),
- INTEL_IVB_D_GT2_IDS(&intel_ivybridge_d_gt2_info),
- INTEL_HSW_GT1_IDS(&intel_haswell_gt1_info),
- INTEL_HSW_GT2_IDS(&intel_haswell_gt2_info),
- INTEL_HSW_GT3_IDS(&intel_haswell_gt3_info),
- INTEL_VLV_IDS(&intel_valleyview_info),
- INTEL_BDW_GT1_IDS(&intel_broadwell_gt1_info),
- INTEL_BDW_GT2_IDS(&intel_broadwell_gt2_info),
- INTEL_BDW_GT3_IDS(&intel_broadwell_gt3_info),
- INTEL_BDW_RSVD_IDS(&intel_broadwell_rsvd_info),
- INTEL_CHV_IDS(&intel_cherryview_info),
- INTEL_SKL_GT1_IDS(&intel_skylake_gt1_info),
- INTEL_SKL_GT2_IDS(&intel_skylake_gt2_info),
- INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info),
- INTEL_SKL_GT4_IDS(&intel_skylake_gt4_info),
- INTEL_BXT_IDS(&intel_broxton_info),
- INTEL_GLK_IDS(&intel_geminilake_info),
- INTEL_KBL_GT1_IDS(&intel_kabylake_gt1_info),
- INTEL_KBL_GT2_IDS(&intel_kabylake_gt2_info),
- INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info),
- INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info),
- INTEL_AML_KBL_GT2_IDS(&intel_kabylake_gt2_info),
- INTEL_CFL_S_GT1_IDS(&intel_coffeelake_gt1_info),
- INTEL_CFL_S_GT2_IDS(&intel_coffeelake_gt2_info),
- INTEL_CFL_H_GT1_IDS(&intel_coffeelake_gt1_info),
- INTEL_CFL_H_GT2_IDS(&intel_coffeelake_gt2_info),
- INTEL_CFL_U_GT2_IDS(&intel_coffeelake_gt2_info),
- INTEL_CFL_U_GT3_IDS(&intel_coffeelake_gt3_info),
- INTEL_WHL_U_GT1_IDS(&intel_coffeelake_gt1_info),
- INTEL_WHL_U_GT2_IDS(&intel_coffeelake_gt2_info),
- INTEL_AML_CFL_GT2_IDS(&intel_coffeelake_gt2_info),
- INTEL_WHL_U_GT3_IDS(&intel_coffeelake_gt3_info),
- INTEL_CML_GT1_IDS(&intel_coffeelake_gt1_info),
- INTEL_CML_GT2_IDS(&intel_coffeelake_gt2_info),
- INTEL_CNL_IDS(&intel_cannonlake_info),
- INTEL_ICL_11_IDS(&intel_icelake_11_info),
- INTEL_EHL_IDS(&intel_elkhartlake_info),
+ INTEL_I830_IDS(&i830_info),
+ INTEL_I845G_IDS(&i845g_info),
+ INTEL_I85X_IDS(&i85x_info),
+ INTEL_I865G_IDS(&i865g_info),
+ INTEL_I915G_IDS(&i915g_info),
+ INTEL_I915GM_IDS(&i915gm_info),
+ INTEL_I945G_IDS(&i945g_info),
+ INTEL_I945GM_IDS(&i945gm_info),
+ INTEL_I965G_IDS(&i965g_info),
+ INTEL_G33_IDS(&g33_info),
+ INTEL_I965GM_IDS(&i965gm_info),
+ INTEL_GM45_IDS(&gm45_info),
+ INTEL_G45_IDS(&g45_info),
+ INTEL_PINEVIEW_G_IDS(&pnv_g_info),
+ INTEL_PINEVIEW_M_IDS(&pnv_m_info),
+ INTEL_IRONLAKE_D_IDS(&ilk_d_info),
+ INTEL_IRONLAKE_M_IDS(&ilk_m_info),
+ INTEL_SNB_D_GT1_IDS(&snb_d_gt1_info),
+ INTEL_SNB_D_GT2_IDS(&snb_d_gt2_info),
+ INTEL_SNB_M_GT1_IDS(&snb_m_gt1_info),
+ INTEL_SNB_M_GT2_IDS(&snb_m_gt2_info),
+ INTEL_IVB_Q_IDS(&ivb_q_info), /* must be first IVB */
+ INTEL_IVB_M_GT1_IDS(&ivb_m_gt1_info),
+ INTEL_IVB_M_GT2_IDS(&ivb_m_gt2_info),
+ INTEL_IVB_D_GT1_IDS(&ivb_d_gt1_info),
+ INTEL_IVB_D_GT2_IDS(&ivb_d_gt2_info),
+ INTEL_HSW_GT1_IDS(&hsw_gt1_info),
+ INTEL_HSW_GT2_IDS(&hsw_gt2_info),
+ INTEL_HSW_GT3_IDS(&hsw_gt3_info),
+ INTEL_VLV_IDS(&vlv_info),
+ INTEL_BDW_GT1_IDS(&bdw_gt1_info),
+ INTEL_BDW_GT2_IDS(&bdw_gt2_info),
+ INTEL_BDW_GT3_IDS(&bdw_gt3_info),
+ INTEL_BDW_RSVD_IDS(&bdw_rsvd_info),
+ INTEL_CHV_IDS(&chv_info),
+ INTEL_SKL_GT1_IDS(&skl_gt1_info),
+ INTEL_SKL_GT2_IDS(&skl_gt2_info),
+ INTEL_SKL_GT3_IDS(&skl_gt3_info),
+ INTEL_SKL_GT4_IDS(&skl_gt4_info),
+ INTEL_BXT_IDS(&bxt_info),
+ INTEL_GLK_IDS(&glk_info),
+ INTEL_KBL_GT1_IDS(&kbl_gt1_info),
+ INTEL_KBL_GT2_IDS(&kbl_gt2_info),
+ INTEL_KBL_GT3_IDS(&kbl_gt3_info),
+ INTEL_KBL_GT4_IDS(&kbl_gt3_info),
+ INTEL_AML_KBL_GT2_IDS(&kbl_gt2_info),
+ INTEL_CFL_S_GT1_IDS(&cfl_gt1_info),
+ INTEL_CFL_S_GT2_IDS(&cfl_gt2_info),
+ INTEL_CFL_H_GT1_IDS(&cfl_gt1_info),
+ INTEL_CFL_H_GT2_IDS(&cfl_gt2_info),
+ INTEL_CFL_U_GT2_IDS(&cfl_gt2_info),
+ INTEL_CFL_U_GT3_IDS(&cfl_gt3_info),
+ INTEL_WHL_U_GT1_IDS(&cfl_gt1_info),
+ INTEL_WHL_U_GT2_IDS(&cfl_gt2_info),
+ INTEL_AML_CFL_GT2_IDS(&cfl_gt2_info),
+ INTEL_WHL_U_GT3_IDS(&cfl_gt3_info),
+ INTEL_CML_GT1_IDS(&cfl_gt1_info),
+ INTEL_CML_GT2_IDS(&cfl_gt2_info),
+ INTEL_CML_U_GT1_IDS(&cfl_gt1_info),
+ INTEL_CML_U_GT2_IDS(&cfl_gt2_info),
+ INTEL_CNL_IDS(&cnl_info),
+ INTEL_ICL_11_IDS(&icl_info),
+ INTEL_EHL_IDS(&ehl_info),
+ INTEL_TGL_12_IDS(&tgl_info),
{0, 0, 0}
};
MODULE_DEVICE_TABLE(pci, pciidlist);
static void i915_pci_remove(struct pci_dev *pdev)
{
- struct drm_device *dev;
+ struct drm_i915_private *i915;
- dev = pci_get_drvdata(pdev);
- if (!dev) /* driver load aborted, nothing to cleanup */
+ i915 = pci_get_drvdata(pdev);
+ if (!i915) /* driver load aborted, nothing to cleanup */
return;
- i915_driver_unload(dev);
- drm_dev_put(dev);
-
+ i915_driver_remove(i915);
pci_set_drvdata(pdev, NULL);
+
+ drm_dev_put(&i915->drm);
}
/* is device_id present in comma separated list of ids */
@@ -923,11 +991,11 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (vga_switcheroo_client_probe_defer(pdev))
return -EPROBE_DEFER;
- err = i915_driver_load(pdev, ent);
+ err = i915_driver_probe(pdev, ent);
if (err)
return err;
- if (i915_inject_load_failure()) {
+ if (i915_inject_probe_failure(pci_get_drvdata(pdev))) {
i915_pci_remove(pdev);
return -ENODEV;
}
@@ -938,6 +1006,12 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return err > 0 ? -ENOTTY : err;
}
+ err = i915_perf_selftests(pdev);
+ if (err) {
+ i915_pci_remove(pdev);
+ return err > 0 ? -ENOTTY : err;
+ }
+
return 0;
}
@@ -980,7 +1054,12 @@ static int __init i915_init(void)
return 0;
}
- return pci_register_driver(&i915_pci_driver);
+ err = pci_register_driver(&i915_pci_driver);
+ if (err)
+ return err;
+
+ i915_perf_sysctl_register();
+ return 0;
}
static void __exit i915_exit(void)
@@ -988,6 +1067,7 @@ static void __exit i915_exit(void)
if (!i915_pci_driver.driver.owner)
return;
+ i915_perf_sysctl_unregister();
pci_unregister_driver(&i915_pci_driver);
i915_globals_exit();
}
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 5140017f9a39..0f556d80ba36 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -196,24 +196,29 @@
#include <linux/uuid.h>
#include "gem/i915_gem_context.h"
-#include "gem/i915_gem_pm.h"
+#include "gt/intel_engine_pm.h"
+#include "gt/intel_engine_user.h"
+#include "gt/intel_gt.h"
#include "gt/intel_lrc_reg.h"
+#include "gt/intel_ring.h"
#include "i915_drv.h"
-#include "i915_oa_hsw.h"
-#include "i915_oa_bdw.h"
-#include "i915_oa_chv.h"
-#include "i915_oa_sklgt2.h"
-#include "i915_oa_sklgt3.h"
-#include "i915_oa_sklgt4.h"
-#include "i915_oa_bxt.h"
-#include "i915_oa_kblgt2.h"
-#include "i915_oa_kblgt3.h"
-#include "i915_oa_glk.h"
-#include "i915_oa_cflgt2.h"
-#include "i915_oa_cflgt3.h"
-#include "i915_oa_cnl.h"
-#include "i915_oa_icl.h"
+#include "i915_perf.h"
+#include "oa/i915_oa_hsw.h"
+#include "oa/i915_oa_bdw.h"
+#include "oa/i915_oa_chv.h"
+#include "oa/i915_oa_sklgt2.h"
+#include "oa/i915_oa_sklgt3.h"
+#include "oa/i915_oa_sklgt4.h"
+#include "oa/i915_oa_bxt.h"
+#include "oa/i915_oa_kblgt2.h"
+#include "oa/i915_oa_kblgt3.h"
+#include "oa/i915_oa_glk.h"
+#include "oa/i915_oa_cflgt2.h"
+#include "oa/i915_oa_cflgt3.h"
+#include "oa/i915_oa_cnl.h"
+#include "oa/i915_oa_icl.h"
+#include "oa/i915_oa_tgl.h"
/* HW requires this to be a power of two, between 128k and 16M, though driver
* is currently generally designed assuming the largest 16M size is used such
@@ -290,6 +295,7 @@ static u32 i915_perf_stream_paranoid = true;
/* On Gen8+ automatically triggered OA reports include a 'reason' field... */
#define OAREPORT_REASON_MASK 0x3f
+#define OAREPORT_REASON_MASK_EXTENDED 0x7f
#define OAREPORT_REASON_SHIFT 19
#define OAREPORT_REASON_TIMER (1<<0)
#define OAREPORT_REASON_CTX_SWITCH (1<<3)
@@ -335,17 +341,24 @@ static const struct i915_oa_format gen8_plus_oa_formats[I915_OA_FORMAT_MAX] = {
[I915_OA_FORMAT_C4_B8] = { 7, 64 },
};
+static const struct i915_oa_format gen12_oa_formats[I915_OA_FORMAT_MAX] = {
+ [I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 },
+};
+
#define SAMPLE_OA_REPORT (1<<0)
/**
* struct perf_open_properties - for validated properties given to open a stream
* @sample_flags: `DRM_I915_PERF_PROP_SAMPLE_*` properties are tracked as flags
* @single_context: Whether a single or all gpu contexts should be monitored
+ * @hold_preemption: Whether the preemption is disabled for the filtered
+ * context
* @ctx_handle: A gem ctx handle for use with @single_context
* @metrics_set: An ID for an OA unit metric set advertised via sysfs
* @oa_format: An OA unit HW report format
* @oa_periodic: Whether to enable periodic OA unit sampling
* @oa_period_exponent: The OA unit sampling period is derived from this
+ * @engine: The engine (typically rcs0) being monitored by the OA unit
*
* As read_properties_unlocked() enumerates and validates the properties given
* to open a stream of metrics the configuration is built up in the structure
@@ -355,6 +368,7 @@ struct perf_open_properties {
u32 sample_flags;
u64 single_context:1;
+ u64 hold_preemption:1;
u64 ctx_handle;
/* OA sampling state */
@@ -362,71 +376,83 @@ struct perf_open_properties {
int oa_format;
bool oa_periodic;
int oa_period_exponent;
+
+ struct intel_engine_cs *engine;
};
-static void free_oa_config(struct drm_i915_private *dev_priv,
- struct i915_oa_config *oa_config)
-{
- if (!PTR_ERR(oa_config->flex_regs))
- kfree(oa_config->flex_regs);
- if (!PTR_ERR(oa_config->b_counter_regs))
- kfree(oa_config->b_counter_regs);
- if (!PTR_ERR(oa_config->mux_regs))
- kfree(oa_config->mux_regs);
- kfree(oa_config);
-}
+struct i915_oa_config_bo {
+ struct llist_node node;
-static void put_oa_config(struct drm_i915_private *dev_priv,
- struct i915_oa_config *oa_config)
+ struct i915_oa_config *oa_config;
+ struct i915_vma *vma;
+};
+
+static struct ctl_table_header *sysctl_header;
+
+static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer);
+
+void i915_oa_config_release(struct kref *ref)
{
- if (!atomic_dec_and_test(&oa_config->ref_count))
- return;
+ struct i915_oa_config *oa_config =
+ container_of(ref, typeof(*oa_config), ref);
+
+ kfree(oa_config->flex_regs);
+ kfree(oa_config->b_counter_regs);
+ kfree(oa_config->mux_regs);
- free_oa_config(dev_priv, oa_config);
+ kfree_rcu(oa_config, rcu);
}
-static int get_oa_config(struct drm_i915_private *dev_priv,
- int metrics_set,
- struct i915_oa_config **out_config)
+struct i915_oa_config *
+i915_perf_get_oa_config(struct i915_perf *perf, int metrics_set)
{
- int ret;
+ struct i915_oa_config *oa_config;
- if (metrics_set == 1) {
- *out_config = &dev_priv->perf.oa.test_config;
- atomic_inc(&dev_priv->perf.oa.test_config.ref_count);
- return 0;
- }
+ rcu_read_lock();
+ if (metrics_set == 1)
+ oa_config = &perf->test_config;
+ else
+ oa_config = idr_find(&perf->metrics_idr, metrics_set);
+ if (oa_config)
+ oa_config = i915_oa_config_get(oa_config);
+ rcu_read_unlock();
- ret = mutex_lock_interruptible(&dev_priv->perf.metrics_lock);
- if (ret)
- return ret;
+ return oa_config;
+}
- *out_config = idr_find(&dev_priv->perf.metrics_idr, metrics_set);
- if (!*out_config)
- ret = -EINVAL;
- else
- atomic_inc(&(*out_config)->ref_count);
+static void free_oa_config_bo(struct i915_oa_config_bo *oa_bo)
+{
+ i915_oa_config_put(oa_bo->oa_config);
+ i915_vma_put(oa_bo->vma);
+ kfree(oa_bo);
+}
- mutex_unlock(&dev_priv->perf.metrics_lock);
+static u32 gen12_oa_hw_tail_read(struct i915_perf_stream *stream)
+{
+ struct intel_uncore *uncore = stream->uncore;
- return ret;
+ return intel_uncore_read(uncore, GEN12_OAG_OATAILPTR) &
+ GEN12_OAG_OATAILPTR_MASK;
}
-static u32 gen8_oa_hw_tail_read(struct drm_i915_private *dev_priv)
+static u32 gen8_oa_hw_tail_read(struct i915_perf_stream *stream)
{
- return I915_READ(GEN8_OATAILPTR) & GEN8_OATAILPTR_MASK;
+ struct intel_uncore *uncore = stream->uncore;
+
+ return intel_uncore_read(uncore, GEN8_OATAILPTR) & GEN8_OATAILPTR_MASK;
}
-static u32 gen7_oa_hw_tail_read(struct drm_i915_private *dev_priv)
+static u32 gen7_oa_hw_tail_read(struct i915_perf_stream *stream)
{
- u32 oastatus1 = I915_READ(GEN7_OASTATUS1);
+ struct intel_uncore *uncore = stream->uncore;
+ u32 oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
return oastatus1 & GEN7_OASTATUS1_TAIL_MASK;
}
/**
* oa_buffer_check_unlocked - check for data and update tail ptr state
- * @dev_priv: i915 device instance
+ * @stream: i915 stream instance
*
* This is either called via fops (for blocking reads in user ctx) or the poll
* check hrtimer (atomic ctx) to check the OA buffer tail pointer and check
@@ -448,9 +474,9 @@ static u32 gen7_oa_hw_tail_read(struct drm_i915_private *dev_priv)
*
* Returns: %true if the OA buffer contains data, else %false
*/
-static bool oa_buffer_check_unlocked(struct drm_i915_private *dev_priv)
+static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream)
{
- int report_size = dev_priv->perf.oa.oa_buffer.format_size;
+ int report_size = stream->oa_buffer.format_size;
unsigned long flags;
unsigned int aged_idx;
u32 head, hw_tail, aged_tail, aging_tail;
@@ -460,19 +486,19 @@ static bool oa_buffer_check_unlocked(struct drm_i915_private *dev_priv)
* could result in an OA buffer reset which might reset the head,
* tails[] and aged_tail state.
*/
- spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+ spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
/* NB: The head we observe here might effectively be a little out of
* date (between head and tails[aged_idx].offset if there is currently
* a read() in progress.
*/
- head = dev_priv->perf.oa.oa_buffer.head;
+ head = stream->oa_buffer.head;
- aged_idx = dev_priv->perf.oa.oa_buffer.aged_tail_idx;
- aged_tail = dev_priv->perf.oa.oa_buffer.tails[aged_idx].offset;
- aging_tail = dev_priv->perf.oa.oa_buffer.tails[!aged_idx].offset;
+ aged_idx = stream->oa_buffer.aged_tail_idx;
+ aged_tail = stream->oa_buffer.tails[aged_idx].offset;
+ aging_tail = stream->oa_buffer.tails[!aged_idx].offset;
- hw_tail = dev_priv->perf.oa.ops.oa_hw_tail_read(dev_priv);
+ hw_tail = stream->perf->ops.oa_hw_tail_read(stream);
/* The tail pointer increases in 64 byte increments,
* not in report_size steps...
@@ -492,16 +518,16 @@ static bool oa_buffer_check_unlocked(struct drm_i915_private *dev_priv)
* available) without needing to wait for a later hrtimer callback.
*/
if (aging_tail != INVALID_TAIL_PTR &&
- ((now - dev_priv->perf.oa.oa_buffer.aging_timestamp) >
+ ((now - stream->oa_buffer.aging_timestamp) >
OA_TAIL_MARGIN_NSEC)) {
aged_idx ^= 1;
- dev_priv->perf.oa.oa_buffer.aged_tail_idx = aged_idx;
+ stream->oa_buffer.aged_tail_idx = aged_idx;
aged_tail = aging_tail;
/* Mark that we need a new pointer to start aging... */
- dev_priv->perf.oa.oa_buffer.tails[!aged_idx].offset = INVALID_TAIL_PTR;
+ stream->oa_buffer.tails[!aged_idx].offset = INVALID_TAIL_PTR;
aging_tail = INVALID_TAIL_PTR;
}
@@ -516,7 +542,7 @@ static bool oa_buffer_check_unlocked(struct drm_i915_private *dev_priv)
if (aging_tail == INVALID_TAIL_PTR &&
(aged_tail == INVALID_TAIL_PTR ||
OA_TAKEN(hw_tail, aged_tail) >= report_size)) {
- struct i915_vma *vma = dev_priv->perf.oa.oa_buffer.vma;
+ struct i915_vma *vma = stream->oa_buffer.vma;
u32 gtt_offset = i915_ggtt_offset(vma);
/* Be paranoid and do a bounds check on the pointer read back
@@ -525,16 +551,16 @@ static bool oa_buffer_check_unlocked(struct drm_i915_private *dev_priv)
*/
if (hw_tail >= gtt_offset &&
hw_tail < (gtt_offset + OA_BUFFER_SIZE)) {
- dev_priv->perf.oa.oa_buffer.tails[!aged_idx].offset =
+ stream->oa_buffer.tails[!aged_idx].offset =
aging_tail = hw_tail;
- dev_priv->perf.oa.oa_buffer.aging_timestamp = now;
+ stream->oa_buffer.aging_timestamp = now;
} else {
- DRM_ERROR("Ignoring spurious out of range OA buffer tail pointer = %u\n",
+ DRM_ERROR("Ignoring spurious out of range OA buffer tail pointer = %x\n",
hw_tail);
}
}
- spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+ spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
return aged_tail == INVALID_TAIL_PTR ?
false : OA_TAKEN(aged_tail, head) >= report_size;
@@ -597,8 +623,7 @@ static int append_oa_sample(struct i915_perf_stream *stream,
size_t *offset,
const u8 *report)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
- int report_size = dev_priv->perf.oa.oa_buffer.format_size;
+ int report_size = stream->oa_buffer.format_size;
struct drm_i915_perf_record_header header;
u32 sample_flags = stream->sample_flags;
@@ -649,10 +674,10 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
size_t count,
size_t *offset)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
- int report_size = dev_priv->perf.oa.oa_buffer.format_size;
- u8 *oa_buf_base = dev_priv->perf.oa.oa_buffer.vaddr;
- u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
+ struct intel_uncore *uncore = stream->uncore;
+ int report_size = stream->oa_buffer.format_size;
+ u8 *oa_buf_base = stream->oa_buffer.vaddr;
+ u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
u32 mask = (OA_BUFFER_SIZE - 1);
size_t start_offset = *offset;
unsigned long flags;
@@ -664,13 +689,13 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
if (WARN_ON(!stream->enabled))
return -EIO;
- spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+ spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
- head = dev_priv->perf.oa.oa_buffer.head;
- aged_tail_idx = dev_priv->perf.oa.oa_buffer.aged_tail_idx;
- tail = dev_priv->perf.oa.oa_buffer.tails[aged_tail_idx].offset;
+ head = stream->oa_buffer.head;
+ aged_tail_idx = stream->oa_buffer.aged_tail_idx;
+ tail = stream->oa_buffer.tails[aged_tail_idx].offset;
- spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+ spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
/*
* An invalid tail pointer here means we're still waiting for the poll
@@ -732,14 +757,16 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
* it to userspace...
*/
reason = ((report32[0] >> OAREPORT_REASON_SHIFT) &
- OAREPORT_REASON_MASK);
+ (IS_GEN(stream->perf->i915, 12) ?
+ OAREPORT_REASON_MASK_EXTENDED :
+ OAREPORT_REASON_MASK));
if (reason == 0) {
- if (__ratelimit(&dev_priv->perf.oa.spurious_report_rs))
+ if (__ratelimit(&stream->perf->spurious_report_rs))
DRM_NOTE("Skipping spurious, invalid OA report\n");
continue;
}
- ctx_id = report32[2] & dev_priv->perf.oa.specific_ctx_id_mask;
+ ctx_id = report32[2] & stream->specific_ctx_id_mask;
/*
* Squash whatever is in the CTX_ID field if it's marked as
@@ -749,7 +776,8 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
* Note: that we don't clear the valid_ctx_bit so userspace can
* understand that the ID has been squashed by the kernel.
*/
- if (!(report32[0] & dev_priv->perf.oa.gen8_valid_ctx_bit))
+ if (!(report32[0] & stream->perf->gen8_valid_ctx_bit) &&
+ INTEL_GEN(stream->perf->i915) <= 11)
ctx_id = report32[2] = INVALID_CTX_ID;
/*
@@ -783,18 +811,17 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
* switches since it's not-uncommon for periodic samples to
* identify a switch before any 'context switch' report.
*/
- if (!dev_priv->perf.oa.exclusive_stream->ctx ||
- dev_priv->perf.oa.specific_ctx_id == ctx_id ||
- (dev_priv->perf.oa.oa_buffer.last_ctx_id ==
- dev_priv->perf.oa.specific_ctx_id) ||
+ if (!stream->perf->exclusive_stream->ctx ||
+ stream->specific_ctx_id == ctx_id ||
+ stream->oa_buffer.last_ctx_id == stream->specific_ctx_id ||
reason & OAREPORT_REASON_CTX_SWITCH) {
/*
* While filtering for a single context we avoid
* leaking the IDs of other contexts.
*/
- if (dev_priv->perf.oa.exclusive_stream->ctx &&
- dev_priv->perf.oa.specific_ctx_id != ctx_id) {
+ if (stream->perf->exclusive_stream->ctx &&
+ stream->specific_ctx_id != ctx_id) {
report32[2] = INVALID_CTX_ID;
}
@@ -803,7 +830,7 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
if (ret)
break;
- dev_priv->perf.oa.oa_buffer.last_ctx_id = ctx_id;
+ stream->oa_buffer.last_ctx_id = ctx_id;
}
/*
@@ -817,18 +844,23 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
}
if (start_offset != *offset) {
- spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+ i915_reg_t oaheadptr;
+
+ oaheadptr = IS_GEN(stream->perf->i915, 12) ?
+ GEN12_OAG_OAHEADPTR : GEN8_OAHEADPTR;
+
+ spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
/*
* We removed the gtt_offset for the copy loop above, indexing
* relative to oa_buf_base so put back here...
*/
head += gtt_offset;
+ intel_uncore_write(uncore, oaheadptr,
+ head & GEN12_OAG_OAHEADPTR_MASK);
+ stream->oa_buffer.head = head;
- I915_WRITE(GEN8_OAHEADPTR, head & GEN8_OAHEADPTR_MASK);
- dev_priv->perf.oa.oa_buffer.head = head;
-
- spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+ spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
}
return ret;
@@ -859,14 +891,18 @@ static int gen8_oa_read(struct i915_perf_stream *stream,
size_t count,
size_t *offset)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct intel_uncore *uncore = stream->uncore;
u32 oastatus;
+ i915_reg_t oastatus_reg;
int ret;
- if (WARN_ON(!dev_priv->perf.oa.oa_buffer.vaddr))
+ if (WARN_ON(!stream->oa_buffer.vaddr))
return -EIO;
- oastatus = I915_READ(GEN8_OASTATUS);
+ oastatus_reg = IS_GEN(stream->perf->i915, 12) ?
+ GEN12_OAG_OASTATUS : GEN8_OASTATUS;
+
+ oastatus = intel_uncore_read(uncore, oastatus_reg);
/*
* We treat OABUFFER_OVERFLOW as a significant error:
@@ -889,16 +925,16 @@ static int gen8_oa_read(struct i915_perf_stream *stream,
return ret;
DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
- dev_priv->perf.oa.period_exponent);
+ stream->period_exponent);
- dev_priv->perf.oa.ops.oa_disable(stream);
- dev_priv->perf.oa.ops.oa_enable(stream);
+ stream->perf->ops.oa_disable(stream);
+ stream->perf->ops.oa_enable(stream);
/*
* Note: .oa_enable() is expected to re-init the oabuffer and
* reset GEN8_OASTATUS for us
*/
- oastatus = I915_READ(GEN8_OASTATUS);
+ oastatus = intel_uncore_read(uncore, oastatus_reg);
}
if (oastatus & GEN8_OASTATUS_REPORT_LOST) {
@@ -906,8 +942,8 @@ static int gen8_oa_read(struct i915_perf_stream *stream,
DRM_I915_PERF_RECORD_OA_REPORT_LOST);
if (ret)
return ret;
- I915_WRITE(GEN8_OASTATUS,
- oastatus & ~GEN8_OASTATUS_REPORT_LOST);
+ intel_uncore_write(uncore, oastatus_reg,
+ oastatus & ~GEN8_OASTATUS_REPORT_LOST);
}
return gen8_append_oa_reports(stream, buf, count, offset);
@@ -938,10 +974,10 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
size_t count,
size_t *offset)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
- int report_size = dev_priv->perf.oa.oa_buffer.format_size;
- u8 *oa_buf_base = dev_priv->perf.oa.oa_buffer.vaddr;
- u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
+ struct intel_uncore *uncore = stream->uncore;
+ int report_size = stream->oa_buffer.format_size;
+ u8 *oa_buf_base = stream->oa_buffer.vaddr;
+ u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
u32 mask = (OA_BUFFER_SIZE - 1);
size_t start_offset = *offset;
unsigned long flags;
@@ -953,13 +989,13 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
if (WARN_ON(!stream->enabled))
return -EIO;
- spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+ spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
- head = dev_priv->perf.oa.oa_buffer.head;
- aged_tail_idx = dev_priv->perf.oa.oa_buffer.aged_tail_idx;
- tail = dev_priv->perf.oa.oa_buffer.tails[aged_tail_idx].offset;
+ head = stream->oa_buffer.head;
+ aged_tail_idx = stream->oa_buffer.aged_tail_idx;
+ tail = stream->oa_buffer.tails[aged_tail_idx].offset;
- spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+ spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
/* An invalid tail pointer here means we're still waiting for the poll
* hrtimer callback to give us a pointer
@@ -1012,7 +1048,7 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
* copying it to userspace...
*/
if (report32[0] == 0) {
- if (__ratelimit(&dev_priv->perf.oa.spurious_report_rs))
+ if (__ratelimit(&stream->perf->spurious_report_rs))
DRM_NOTE("Skipping spurious, invalid OA report\n");
continue;
}
@@ -1031,19 +1067,19 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
}
if (start_offset != *offset) {
- spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+ spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
/* We removed the gtt_offset for the copy loop above, indexing
* relative to oa_buf_base so put back here...
*/
head += gtt_offset;
- I915_WRITE(GEN7_OASTATUS2,
- ((head & GEN7_OASTATUS2_HEAD_MASK) |
- GEN7_OASTATUS2_MEM_SELECT_GGTT));
- dev_priv->perf.oa.oa_buffer.head = head;
+ intel_uncore_write(uncore, GEN7_OASTATUS2,
+ (head & GEN7_OASTATUS2_HEAD_MASK) |
+ GEN7_OASTATUS2_MEM_SELECT_GGTT);
+ stream->oa_buffer.head = head;
- spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+ spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
}
return ret;
@@ -1070,21 +1106,21 @@ static int gen7_oa_read(struct i915_perf_stream *stream,
size_t count,
size_t *offset)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct intel_uncore *uncore = stream->uncore;
u32 oastatus1;
int ret;
- if (WARN_ON(!dev_priv->perf.oa.oa_buffer.vaddr))
+ if (WARN_ON(!stream->oa_buffer.vaddr))
return -EIO;
- oastatus1 = I915_READ(GEN7_OASTATUS1);
+ oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
/* XXX: On Haswell we don't have a safe way to clear oastatus1
* bits while the OA unit is enabled (while the tail pointer
* may be updated asynchronously) so we ignore status bits
* that have already been reported to userspace.
*/
- oastatus1 &= ~dev_priv->perf.oa.gen7_latched_oastatus1;
+ oastatus1 &= ~stream->perf->gen7_latched_oastatus1;
/* We treat OABUFFER_OVERFLOW as a significant error:
*
@@ -1113,12 +1149,12 @@ static int gen7_oa_read(struct i915_perf_stream *stream,
return ret;
DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
- dev_priv->perf.oa.period_exponent);
+ stream->period_exponent);
- dev_priv->perf.oa.ops.oa_disable(stream);
- dev_priv->perf.oa.ops.oa_enable(stream);
+ stream->perf->ops.oa_disable(stream);
+ stream->perf->ops.oa_enable(stream);
- oastatus1 = I915_READ(GEN7_OASTATUS1);
+ oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
}
if (unlikely(oastatus1 & GEN7_OASTATUS1_REPORT_LOST)) {
@@ -1126,7 +1162,7 @@ static int gen7_oa_read(struct i915_perf_stream *stream,
DRM_I915_PERF_RECORD_OA_REPORT_LOST);
if (ret)
return ret;
- dev_priv->perf.oa.gen7_latched_oastatus1 |=
+ stream->perf->gen7_latched_oastatus1 |=
GEN7_OASTATUS1_REPORT_LOST;
}
@@ -1149,14 +1185,12 @@ static int gen7_oa_read(struct i915_perf_stream *stream,
*/
static int i915_oa_wait_unlocked(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
-
/* We would wait indefinitely if periodic sampling is not enabled */
- if (!dev_priv->perf.oa.periodic)
+ if (!stream->periodic)
return -EIO;
- return wait_event_interruptible(dev_priv->perf.oa.poll_wq,
- oa_buffer_check_unlocked(dev_priv));
+ return wait_event_interruptible(stream->poll_wq,
+ oa_buffer_check_unlocked(stream));
}
/**
@@ -1173,9 +1207,7 @@ static void i915_oa_poll_wait(struct i915_perf_stream *stream,
struct file *file,
poll_table *wait)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
-
- poll_wait(file, &dev_priv->perf.oa.poll_wq, wait);
+ poll_wait(file, &stream->poll_wq, wait);
}
/**
@@ -1195,24 +1227,18 @@ static int i915_oa_read(struct i915_perf_stream *stream,
size_t count,
size_t *offset)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
-
- return dev_priv->perf.oa.ops.read(stream, buf, count, offset);
+ return stream->perf->ops.read(stream, buf, count, offset);
}
-static struct intel_context *oa_pin_context(struct drm_i915_private *i915,
- struct i915_gem_context *ctx)
+static struct intel_context *oa_pin_context(struct i915_perf_stream *stream)
{
struct i915_gem_engines_iter it;
+ struct i915_gem_context *ctx = stream->ctx;
struct intel_context *ce;
int err;
- err = i915_mutex_lock_interruptible(&i915->drm);
- if (err)
- return ERR_PTR(err);
-
for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
- if (ce->engine->class != RENDER_CLASS)
+ if (ce->engine != stream->engine) /* first match! */
continue;
/*
@@ -1221,17 +1247,13 @@ static struct intel_context *oa_pin_context(struct drm_i915_private *i915,
*/
err = intel_context_pin(ce);
if (err == 0) {
- i915->perf.oa.pinned_ctx = ce;
+ stream->pinned_ctx = ce;
break;
}
}
i915_gem_context_unlock_engines(ctx);
- mutex_unlock(&i915->drm.struct_mutex);
- if (err)
- return ERR_PTR(err);
-
- return i915->perf.oa.pinned_ctx;
+ return stream->pinned_ctx;
}
/**
@@ -1246,28 +1268,31 @@ static struct intel_context *oa_pin_context(struct drm_i915_private *i915,
*/
static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
{
- struct drm_i915_private *i915 = stream->dev_priv;
struct intel_context *ce;
- ce = oa_pin_context(i915, stream->ctx);
+ ce = oa_pin_context(stream);
if (IS_ERR(ce))
return PTR_ERR(ce);
- switch (INTEL_GEN(i915)) {
+ switch (INTEL_GEN(ce->engine->i915)) {
case 7: {
/*
* On Haswell we don't do any post processing of the reports
* and don't need to use the mask.
*/
- i915->perf.oa.specific_ctx_id = i915_ggtt_offset(ce->state);
- i915->perf.oa.specific_ctx_id_mask = 0;
+ stream->specific_ctx_id = i915_ggtt_offset(ce->state);
+ stream->specific_ctx_id_mask = 0;
break;
}
case 8:
case 9:
case 10:
- if (USES_GUC_SUBMISSION(i915)) {
+ if (intel_engine_in_execlists_submission_mode(ce->engine)) {
+ stream->specific_ctx_id_mask =
+ (1U << GEN8_CTX_ID_WIDTH) - 1;
+ stream->specific_ctx_id = stream->specific_ctx_id_mask;
+ } else {
/*
* When using GuC, the context descriptor we write in
* i915 is read by GuC and rewritten before it's
@@ -1278,43 +1303,35 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
* dropped by GuC. They won't be part of the context
* ID in the OA reports, so squash those lower bits.
*/
- i915->perf.oa.specific_ctx_id =
+ stream->specific_ctx_id =
lower_32_bits(ce->lrc_desc) >> 12;
/*
* GuC uses the top bit to signal proxy submission, so
* ignore that bit.
*/
- i915->perf.oa.specific_ctx_id_mask =
+ stream->specific_ctx_id_mask =
(1U << (GEN8_CTX_ID_WIDTH - 1)) - 1;
- } else {
- i915->perf.oa.specific_ctx_id_mask =
- (1U << GEN8_CTX_ID_WIDTH) - 1;
- i915->perf.oa.specific_ctx_id =
- upper_32_bits(ce->lrc_desc);
- i915->perf.oa.specific_ctx_id &=
- i915->perf.oa.specific_ctx_id_mask;
}
break;
- case 11: {
- i915->perf.oa.specific_ctx_id_mask =
- ((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32) |
- ((1U << GEN11_ENGINE_INSTANCE_WIDTH) - 1) << (GEN11_ENGINE_INSTANCE_SHIFT - 32) |
- ((1 << GEN11_ENGINE_CLASS_WIDTH) - 1) << (GEN11_ENGINE_CLASS_SHIFT - 32);
- i915->perf.oa.specific_ctx_id = upper_32_bits(ce->lrc_desc);
- i915->perf.oa.specific_ctx_id &=
- i915->perf.oa.specific_ctx_id_mask;
+ case 11:
+ case 12: {
+ stream->specific_ctx_id_mask =
+ ((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32);
+ stream->specific_ctx_id = stream->specific_ctx_id_mask;
break;
}
default:
- MISSING_CASE(INTEL_GEN(i915));
+ MISSING_CASE(INTEL_GEN(ce->engine->i915));
}
+ ce->tag = stream->specific_ctx_id_mask;
+
DRM_DEBUG_DRIVER("filtering on ctx_id=0x%x ctx_id_mask=0x%x\n",
- i915->perf.oa.specific_ctx_id,
- i915->perf.oa.specific_ctx_id_mask);
+ stream->specific_ctx_id,
+ stream->specific_ctx_id_mask);
return 0;
}
@@ -1328,93 +1345,104 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
*/
static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
struct intel_context *ce;
- dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
- dev_priv->perf.oa.specific_ctx_id_mask = 0;
-
- ce = fetch_and_zero(&dev_priv->perf.oa.pinned_ctx);
+ ce = fetch_and_zero(&stream->pinned_ctx);
if (ce) {
- mutex_lock(&dev_priv->drm.struct_mutex);
+ ce->tag = 0; /* recomputed on next submission after parking */
intel_context_unpin(ce);
- mutex_unlock(&dev_priv->drm.struct_mutex);
}
+
+ stream->specific_ctx_id = INVALID_CTX_ID;
+ stream->specific_ctx_id_mask = 0;
}
static void
-free_oa_buffer(struct drm_i915_private *i915)
+free_oa_buffer(struct i915_perf_stream *stream)
{
- mutex_lock(&i915->drm.struct_mutex);
-
- i915_vma_unpin_and_release(&i915->perf.oa.oa_buffer.vma,
+ i915_vma_unpin_and_release(&stream->oa_buffer.vma,
I915_VMA_RELEASE_MAP);
- mutex_unlock(&i915->drm.struct_mutex);
+ stream->oa_buffer.vaddr = NULL;
+}
- i915->perf.oa.oa_buffer.vaddr = NULL;
+static void
+free_oa_configs(struct i915_perf_stream *stream)
+{
+ struct i915_oa_config_bo *oa_bo, *tmp;
+
+ i915_oa_config_put(stream->oa_config);
+ llist_for_each_entry_safe(oa_bo, tmp, stream->oa_config_bos.first, node)
+ free_oa_config_bo(oa_bo);
+}
+
+static void
+free_noa_wait(struct i915_perf_stream *stream)
+{
+ i915_vma_unpin_and_release(&stream->noa_wait, 0);
}
static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct i915_perf *perf = stream->perf;
- BUG_ON(stream != dev_priv->perf.oa.exclusive_stream);
+ BUG_ON(stream != perf->exclusive_stream);
/*
* Unset exclusive_stream first, it will be checked while disabling
* the metric set on gen8+.
*/
- mutex_lock(&dev_priv->drm.struct_mutex);
- dev_priv->perf.oa.exclusive_stream = NULL;
- dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ perf->exclusive_stream = NULL;
+ perf->ops.disable_metric_set(stream);
- free_oa_buffer(dev_priv);
+ free_oa_buffer(stream);
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
- intel_runtime_pm_put(&dev_priv->runtime_pm, stream->wakeref);
+ intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL);
+ intel_engine_pm_put(stream->engine);
if (stream->ctx)
oa_put_render_ctx_id(stream);
- put_oa_config(dev_priv, stream->oa_config);
+ free_oa_configs(stream);
+ free_noa_wait(stream);
- if (dev_priv->perf.oa.spurious_report_rs.missed) {
+ if (perf->spurious_report_rs.missed) {
DRM_NOTE("%d spurious OA report notices suppressed due to ratelimiting\n",
- dev_priv->perf.oa.spurious_report_rs.missed);
+ perf->spurious_report_rs.missed);
}
}
-static void gen7_init_oa_buffer(struct drm_i915_private *dev_priv)
+static void gen7_init_oa_buffer(struct i915_perf_stream *stream)
{
- u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
+ struct intel_uncore *uncore = stream->uncore;
+ u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
unsigned long flags;
- spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+ spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
/* Pre-DevBDW: OABUFFER must be set with counters off,
* before OASTATUS1, but after OASTATUS2
*/
- I915_WRITE(GEN7_OASTATUS2,
- gtt_offset | GEN7_OASTATUS2_MEM_SELECT_GGTT); /* head */
- dev_priv->perf.oa.oa_buffer.head = gtt_offset;
+ intel_uncore_write(uncore, GEN7_OASTATUS2, /* head */
+ gtt_offset | GEN7_OASTATUS2_MEM_SELECT_GGTT);
+ stream->oa_buffer.head = gtt_offset;
- I915_WRITE(GEN7_OABUFFER, gtt_offset);
+ intel_uncore_write(uncore, GEN7_OABUFFER, gtt_offset);
- I915_WRITE(GEN7_OASTATUS1, gtt_offset | OABUFFER_SIZE_16M); /* tail */
+ intel_uncore_write(uncore, GEN7_OASTATUS1, /* tail */
+ gtt_offset | OABUFFER_SIZE_16M);
/* Mark that we need updated tail pointers to read from... */
- dev_priv->perf.oa.oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
- dev_priv->perf.oa.oa_buffer.tails[1].offset = INVALID_TAIL_PTR;
+ stream->oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
+ stream->oa_buffer.tails[1].offset = INVALID_TAIL_PTR;
- spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+ spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
/* On Haswell we have to track which OASTATUS1 flags we've
* already seen since they can't be cleared while periodic
* sampling is enabled.
*/
- dev_priv->perf.oa.gen7_latched_oastatus1 = 0;
+ stream->perf->gen7_latched_oastatus1 = 0;
/* NB: although the OA buffer will initially be allocated
* zeroed via shmfs (and so this memset is redundant when
@@ -1427,26 +1455,24 @@ static void gen7_init_oa_buffer(struct drm_i915_private *dev_priv)
* the assumption that new reports are being written to zeroed
* memory...
*/
- memset(dev_priv->perf.oa.oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
+ memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
- /* Maybe make ->pollin per-stream state if we support multiple
- * concurrent streams in the future.
- */
- dev_priv->perf.oa.pollin = false;
+ stream->pollin = false;
}
-static void gen8_init_oa_buffer(struct drm_i915_private *dev_priv)
+static void gen8_init_oa_buffer(struct i915_perf_stream *stream)
{
- u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
+ struct intel_uncore *uncore = stream->uncore;
+ u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
unsigned long flags;
- spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+ spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
- I915_WRITE(GEN8_OASTATUS, 0);
- I915_WRITE(GEN8_OAHEADPTR, gtt_offset);
- dev_priv->perf.oa.oa_buffer.head = gtt_offset;
+ intel_uncore_write(uncore, GEN8_OASTATUS, 0);
+ intel_uncore_write(uncore, GEN8_OAHEADPTR, gtt_offset);
+ stream->oa_buffer.head = gtt_offset;
- I915_WRITE(GEN8_OABUFFER_UDW, 0);
+ intel_uncore_write(uncore, GEN8_OABUFFER_UDW, 0);
/*
* PRM says:
@@ -1456,22 +1482,22 @@ static void gen8_init_oa_buffer(struct drm_i915_private *dev_priv)
* to enable proper functionality of the overflow
* bit."
*/
- I915_WRITE(GEN8_OABUFFER, gtt_offset |
+ intel_uncore_write(uncore, GEN8_OABUFFER, gtt_offset |
OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT);
- I915_WRITE(GEN8_OATAILPTR, gtt_offset & GEN8_OATAILPTR_MASK);
+ intel_uncore_write(uncore, GEN8_OATAILPTR, gtt_offset & GEN8_OATAILPTR_MASK);
/* Mark that we need updated tail pointers to read from... */
- dev_priv->perf.oa.oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
- dev_priv->perf.oa.oa_buffer.tails[1].offset = INVALID_TAIL_PTR;
+ stream->oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
+ stream->oa_buffer.tails[1].offset = INVALID_TAIL_PTR;
/*
* Reset state used to recognise context switches, affecting which
* reports we will forward to userspace while filtering for a single
* context.
*/
- dev_priv->perf.oa.oa_buffer.last_ctx_id = INVALID_CTX_ID;
+ stream->oa_buffer.last_ctx_id = INVALID_CTX_ID;
- spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+ spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
/*
* NB: although the OA buffer will initially be allocated
@@ -1485,36 +1511,84 @@ static void gen8_init_oa_buffer(struct drm_i915_private *dev_priv)
* the assumption that new reports are being written to zeroed
* memory...
*/
- memset(dev_priv->perf.oa.oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
+ memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
+
+ stream->pollin = false;
+}
+
+static void gen12_init_oa_buffer(struct i915_perf_stream *stream)
+{
+ struct intel_uncore *uncore = stream->uncore;
+ u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
+ unsigned long flags;
+
+ spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
+
+ intel_uncore_write(uncore, GEN12_OAG_OASTATUS, 0);
+ intel_uncore_write(uncore, GEN12_OAG_OAHEADPTR,
+ gtt_offset & GEN12_OAG_OAHEADPTR_MASK);
+ stream->oa_buffer.head = gtt_offset;
+
+ /*
+ * PRM says:
+ *
+ * "This MMIO must be set before the OATAILPTR
+ * register and after the OAHEADPTR register. This is
+ * to enable proper functionality of the overflow
+ * bit."
+ */
+ intel_uncore_write(uncore, GEN12_OAG_OABUFFER, gtt_offset |
+ OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT);
+ intel_uncore_write(uncore, GEN12_OAG_OATAILPTR,
+ gtt_offset & GEN12_OAG_OATAILPTR_MASK);
+
+ /* Mark that we need updated tail pointers to read from... */
+ stream->oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
+ stream->oa_buffer.tails[1].offset = INVALID_TAIL_PTR;
/*
- * Maybe make ->pollin per-stream state if we support multiple
- * concurrent streams in the future.
+ * Reset state used to recognise context switches, affecting which
+ * reports we will forward to userspace while filtering for a single
+ * context.
+ */
+ stream->oa_buffer.last_ctx_id = INVALID_CTX_ID;
+
+ spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
+
+ /*
+ * NB: although the OA buffer will initially be allocated
+ * zeroed via shmfs (and so this memset is redundant when
+ * first allocating), we may re-init the OA buffer, either
+ * when re-enabling a stream or in error/reset paths.
+ *
+ * The reason we clear the buffer for each re-init is for the
+ * sanity check in gen8_append_oa_reports() that looks at the
+ * reason field to make sure it's non-zero which relies on
+ * the assumption that new reports are being written to zeroed
+ * memory...
*/
- dev_priv->perf.oa.pollin = false;
+ memset(stream->oa_buffer.vaddr, 0,
+ stream->oa_buffer.vma->size);
+
+ stream->pollin = false;
}
-static int alloc_oa_buffer(struct drm_i915_private *dev_priv)
+static int alloc_oa_buffer(struct i915_perf_stream *stream)
{
struct drm_i915_gem_object *bo;
struct i915_vma *vma;
int ret;
- if (WARN_ON(dev_priv->perf.oa.oa_buffer.vma))
+ if (WARN_ON(stream->oa_buffer.vma))
return -ENODEV;
- ret = i915_mutex_lock_interruptible(&dev_priv->drm);
- if (ret)
- return ret;
-
BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE);
BUILD_BUG_ON(OA_BUFFER_SIZE < SZ_128K || OA_BUFFER_SIZE > SZ_16M);
- bo = i915_gem_object_create_shmem(dev_priv, OA_BUFFER_SIZE);
+ bo = i915_gem_object_create_shmem(stream->perf->i915, OA_BUFFER_SIZE);
if (IS_ERR(bo)) {
DRM_ERROR("Failed to allocate OA buffer\n");
- ret = PTR_ERR(bo);
- goto unlock;
+ return PTR_ERR(bo);
}
i915_gem_object_set_cache_coherency(bo, I915_CACHE_LLC);
@@ -1525,20 +1599,16 @@ static int alloc_oa_buffer(struct drm_i915_private *dev_priv)
ret = PTR_ERR(vma);
goto err_unref;
}
- dev_priv->perf.oa.oa_buffer.vma = vma;
+ stream->oa_buffer.vma = vma;
- dev_priv->perf.oa.oa_buffer.vaddr =
+ stream->oa_buffer.vaddr =
i915_gem_object_pin_map(bo, I915_MAP_WB);
- if (IS_ERR(dev_priv->perf.oa.oa_buffer.vaddr)) {
- ret = PTR_ERR(dev_priv->perf.oa.oa_buffer.vaddr);
+ if (IS_ERR(stream->oa_buffer.vaddr)) {
+ ret = PTR_ERR(stream->oa_buffer.vaddr);
goto err_unpin;
}
- DRM_DEBUG_DRIVER("OA Buffer initialized, gtt offset = 0x%x, vaddr = %p\n",
- i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma),
- dev_priv->perf.oa.oa_buffer.vaddr);
-
- goto unlock;
+ return 0;
err_unpin:
__i915_vma_unpin(vma);
@@ -1546,58 +1616,396 @@ err_unpin:
err_unref:
i915_gem_object_put(bo);
- dev_priv->perf.oa.oa_buffer.vaddr = NULL;
- dev_priv->perf.oa.oa_buffer.vma = NULL;
+ stream->oa_buffer.vaddr = NULL;
+ stream->oa_buffer.vma = NULL;
+
+ return ret;
+}
+
+static u32 *save_restore_register(struct i915_perf_stream *stream, u32 *cs,
+ bool save, i915_reg_t reg, u32 offset,
+ u32 dword_count)
+{
+ u32 cmd;
+ u32 d;
+
+ cmd = save ? MI_STORE_REGISTER_MEM : MI_LOAD_REGISTER_MEM;
+ if (INTEL_GEN(stream->perf->i915) >= 8)
+ cmd++;
+
+ for (d = 0; d < dword_count; d++) {
+ *cs++ = cmd;
+ *cs++ = i915_mmio_reg_offset(reg) + 4 * d;
+ *cs++ = intel_gt_scratch_offset(stream->engine->gt,
+ offset) + 4 * d;
+ *cs++ = 0;
+ }
+
+ return cs;
+}
+
+static int alloc_noa_wait(struct i915_perf_stream *stream)
+{
+ struct drm_i915_private *i915 = stream->perf->i915;
+ struct drm_i915_gem_object *bo;
+ struct i915_vma *vma;
+ const u64 delay_ticks = 0xffffffffffffffff -
+ DIV64_U64_ROUND_UP(
+ atomic64_read(&stream->perf->noa_programming_delay) *
+ RUNTIME_INFO(i915)->cs_timestamp_frequency_khz,
+ 1000000ull);
+ const u32 base = stream->engine->mmio_base;
+#define CS_GPR(x) GEN8_RING_CS_GPR(base, x)
+ u32 *batch, *ts0, *cs, *jump;
+ int ret, i;
+ enum {
+ START_TS,
+ NOW_TS,
+ DELTA_TS,
+ JUMP_PREDICATE,
+ DELTA_TARGET,
+ N_CS_GPR
+ };
+
+ bo = i915_gem_object_create_internal(i915, 4096);
+ if (IS_ERR(bo)) {
+ DRM_ERROR("Failed to allocate NOA wait batchbuffer\n");
+ return PTR_ERR(bo);
+ }
+
+ /*
+ * We pin in GGTT because we jump into this buffer now because
+ * multiple OA config BOs will have a jump to this address and it
+ * needs to be fixed during the lifetime of the i915/perf stream.
+ */
+ vma = i915_gem_object_ggtt_pin(bo, NULL, 0, 0, PIN_HIGH);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto err_unref;
+ }
+
+ batch = cs = i915_gem_object_pin_map(bo, I915_MAP_WB);
+ if (IS_ERR(batch)) {
+ ret = PTR_ERR(batch);
+ goto err_unpin;
+ }
+
+ /* Save registers. */
+ for (i = 0; i < N_CS_GPR; i++)
+ cs = save_restore_register(
+ stream, cs, true /* save */, CS_GPR(i),
+ INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR + 8 * i, 2);
+ cs = save_restore_register(
+ stream, cs, true /* save */, MI_PREDICATE_RESULT_1,
+ INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1, 1);
+
+ /* First timestamp snapshot location. */
+ ts0 = cs;
+
+ /*
+ * Initial snapshot of the timestamp register to implement the wait.
+ * We work with 32b values, so clear out the top 32b bits of the
+ * register because the ALU works 64bits.
+ */
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(CS_GPR(START_TS)) + 4;
+ *cs++ = 0;
+ *cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
+ *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(base));
+ *cs++ = i915_mmio_reg_offset(CS_GPR(START_TS));
+
+ /*
+ * This is the location we're going to jump back into until the
+ * required amount of time has passed.
+ */
+ jump = cs;
+
+ /*
+ * Take another snapshot of the timestamp register. Take care to clear
+ * up the top 32bits of CS_GPR(1) as we're using it for other
+ * operations below.
+ */
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(CS_GPR(NOW_TS)) + 4;
+ *cs++ = 0;
+ *cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
+ *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(base));
+ *cs++ = i915_mmio_reg_offset(CS_GPR(NOW_TS));
+
+ /*
+ * Do a diff between the 2 timestamps and store the result back into
+ * CS_GPR(1).
+ */
+ *cs++ = MI_MATH(5);
+ *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(NOW_TS));
+ *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(START_TS));
+ *cs++ = MI_MATH_SUB;
+ *cs++ = MI_MATH_STORE(MI_MATH_REG(DELTA_TS), MI_MATH_REG_ACCU);
+ *cs++ = MI_MATH_STORE(MI_MATH_REG(JUMP_PREDICATE), MI_MATH_REG_CF);
+
+ /*
+ * Transfer the carry flag (set to 1 if ts1 < ts0, meaning the
+ * timestamp have rolled over the 32bits) into the predicate register
+ * to be used for the predicated jump.
+ */
+ *cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
+ *cs++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE));
+ *cs++ = i915_mmio_reg_offset(MI_PREDICATE_RESULT_1);
+
+ /* Restart from the beginning if we had timestamps roll over. */
+ *cs++ = (INTEL_GEN(i915) < 8 ?
+ MI_BATCH_BUFFER_START :
+ MI_BATCH_BUFFER_START_GEN8) |
+ MI_BATCH_PREDICATE;
+ *cs++ = i915_ggtt_offset(vma) + (ts0 - batch) * 4;
+ *cs++ = 0;
+
+ /*
+ * Now add the diff between to previous timestamps and add it to :
+ * (((1 * << 64) - 1) - delay_ns)
+ *
+ * When the Carry Flag contains 1 this means the elapsed time is
+ * longer than the expected delay, and we can exit the wait loop.
+ */
+ *cs++ = MI_LOAD_REGISTER_IMM(2);
+ *cs++ = i915_mmio_reg_offset(CS_GPR(DELTA_TARGET));
+ *cs++ = lower_32_bits(delay_ticks);
+ *cs++ = i915_mmio_reg_offset(CS_GPR(DELTA_TARGET)) + 4;
+ *cs++ = upper_32_bits(delay_ticks);
+
+ *cs++ = MI_MATH(4);
+ *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(DELTA_TS));
+ *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(DELTA_TARGET));
+ *cs++ = MI_MATH_ADD;
+ *cs++ = MI_MATH_STOREINV(MI_MATH_REG(JUMP_PREDICATE), MI_MATH_REG_CF);
+
+ *cs++ = MI_ARB_CHECK;
-unlock:
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ /*
+ * Transfer the result into the predicate register to be used for the
+ * predicated jump.
+ */
+ *cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
+ *cs++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE));
+ *cs++ = i915_mmio_reg_offset(MI_PREDICATE_RESULT_1);
+
+ /* Predicate the jump. */
+ *cs++ = (INTEL_GEN(i915) < 8 ?
+ MI_BATCH_BUFFER_START :
+ MI_BATCH_BUFFER_START_GEN8) |
+ MI_BATCH_PREDICATE;
+ *cs++ = i915_ggtt_offset(vma) + (jump - batch) * 4;
+ *cs++ = 0;
+
+ /* Restore registers. */
+ for (i = 0; i < N_CS_GPR; i++)
+ cs = save_restore_register(
+ stream, cs, false /* restore */, CS_GPR(i),
+ INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR + 8 * i, 2);
+ cs = save_restore_register(
+ stream, cs, false /* restore */, MI_PREDICATE_RESULT_1,
+ INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1, 1);
+
+ /* And return to the ring. */
+ *cs++ = MI_BATCH_BUFFER_END;
+
+ GEM_BUG_ON(cs - batch > PAGE_SIZE / sizeof(*batch));
+
+ i915_gem_object_flush_map(bo);
+ i915_gem_object_unpin_map(bo);
+
+ stream->noa_wait = vma;
+ return 0;
+
+err_unpin:
+ i915_vma_unpin_and_release(&vma, 0);
+err_unref:
+ i915_gem_object_put(bo);
return ret;
}
-static void config_oa_regs(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg *regs,
- u32 n_regs)
+static u32 *write_cs_mi_lri(u32 *cs,
+ const struct i915_oa_reg *reg_data,
+ u32 n_regs)
{
u32 i;
for (i = 0; i < n_regs; i++) {
- const struct i915_oa_reg *reg = regs + i;
+ if ((i % MI_LOAD_REGISTER_IMM_MAX_REGS) == 0) {
+ u32 n_lri = min_t(u32,
+ n_regs - i,
+ MI_LOAD_REGISTER_IMM_MAX_REGS);
+
+ *cs++ = MI_LOAD_REGISTER_IMM(n_lri);
+ }
+ *cs++ = i915_mmio_reg_offset(reg_data[i].addr);
+ *cs++ = reg_data[i].value;
+ }
+
+ return cs;
+}
+
+static int num_lri_dwords(int num_regs)
+{
+ int count = 0;
+
+ if (num_regs > 0) {
+ count += DIV_ROUND_UP(num_regs, MI_LOAD_REGISTER_IMM_MAX_REGS);
+ count += num_regs * 2;
+ }
+
+ return count;
+}
+
+static struct i915_oa_config_bo *
+alloc_oa_config_buffer(struct i915_perf_stream *stream,
+ struct i915_oa_config *oa_config)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_oa_config_bo *oa_bo;
+ size_t config_length = 0;
+ u32 *cs;
+ int err;
+
+ oa_bo = kzalloc(sizeof(*oa_bo), GFP_KERNEL);
+ if (!oa_bo)
+ return ERR_PTR(-ENOMEM);
- I915_WRITE(reg->addr, reg->value);
+ config_length += num_lri_dwords(oa_config->mux_regs_len);
+ config_length += num_lri_dwords(oa_config->b_counter_regs_len);
+ config_length += num_lri_dwords(oa_config->flex_regs_len);
+ config_length += 3; /* MI_BATCH_BUFFER_START */
+ config_length = ALIGN(sizeof(u32) * config_length, I915_GTT_PAGE_SIZE);
+
+ obj = i915_gem_object_create_shmem(stream->perf->i915, config_length);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto err_free;
+ }
+
+ cs = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_oa_bo;
}
+
+ cs = write_cs_mi_lri(cs,
+ oa_config->mux_regs,
+ oa_config->mux_regs_len);
+ cs = write_cs_mi_lri(cs,
+ oa_config->b_counter_regs,
+ oa_config->b_counter_regs_len);
+ cs = write_cs_mi_lri(cs,
+ oa_config->flex_regs,
+ oa_config->flex_regs_len);
+
+ /* Jump into the active wait. */
+ *cs++ = (INTEL_GEN(stream->perf->i915) < 8 ?
+ MI_BATCH_BUFFER_START :
+ MI_BATCH_BUFFER_START_GEN8);
+ *cs++ = i915_ggtt_offset(stream->noa_wait);
+ *cs++ = 0;
+
+ i915_gem_object_flush_map(obj);
+ i915_gem_object_unpin_map(obj);
+
+ oa_bo->vma = i915_vma_instance(obj,
+ &stream->engine->gt->ggtt->vm,
+ NULL);
+ if (IS_ERR(oa_bo->vma)) {
+ err = PTR_ERR(oa_bo->vma);
+ goto err_oa_bo;
+ }
+
+ oa_bo->oa_config = i915_oa_config_get(oa_config);
+ llist_add(&oa_bo->node, &stream->oa_config_bos);
+
+ return oa_bo;
+
+err_oa_bo:
+ i915_gem_object_put(obj);
+err_free:
+ kfree(oa_bo);
+ return ERR_PTR(err);
}
-static void delay_after_mux(void)
+static struct i915_vma *
+get_oa_vma(struct i915_perf_stream *stream, struct i915_oa_config *oa_config)
{
+ struct i915_oa_config_bo *oa_bo;
+
/*
- * It apparently takes a fairly long time for a new MUX
- * configuration to be be applied after these register writes.
- * This delay duration was derived empirically based on the
- * render_basic config but hopefully it covers the maximum
- * configuration latency.
- *
- * As a fallback, the checks in _append_oa_reports() to skip
- * invalid OA reports do also seem to work to discard reports
- * generated before this config has completed - albeit not
- * silently.
- *
- * Unfortunately this is essentially a magic number, since we
- * don't currently know of a reliable mechanism for predicting
- * how long the MUX config will take to apply and besides
- * seeing invalid reports we don't know of a reliable way to
- * explicitly check that the MUX config has landed.
- *
- * It's even possible we've miss characterized the underlying
- * problem - it just seems like the simplest explanation why
- * a delay at this location would mitigate any invalid reports.
+ * Look for the buffer in the already allocated BOs attached
+ * to the stream.
*/
- usleep_range(15000, 20000);
+ llist_for_each_entry(oa_bo, stream->oa_config_bos.first, node) {
+ if (oa_bo->oa_config == oa_config &&
+ memcmp(oa_bo->oa_config->uuid,
+ oa_config->uuid,
+ sizeof(oa_config->uuid)) == 0)
+ goto out;
+ }
+
+ oa_bo = alloc_oa_config_buffer(stream, oa_config);
+ if (IS_ERR(oa_bo))
+ return ERR_CAST(oa_bo);
+
+out:
+ return i915_vma_get(oa_bo->vma);
+}
+
+static int emit_oa_config(struct i915_perf_stream *stream,
+ struct i915_oa_config *oa_config,
+ struct intel_context *ce)
+{
+ struct i915_request *rq;
+ struct i915_vma *vma;
+ int err;
+
+ vma = get_oa_vma(stream, oa_config);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
+ if (err)
+ goto err_vma_put;
+
+ intel_engine_pm_get(ce->engine);
+ rq = i915_request_create(ce);
+ intel_engine_pm_put(ce->engine);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_vma_unpin;
+ }
+
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj, 0);
+ if (!err)
+ err = i915_vma_move_to_active(vma, rq, 0);
+ i915_vma_unlock(vma);
+ if (err)
+ goto err_add_request;
+
+ err = rq->engine->emit_bb_start(rq,
+ vma->node.start, 0,
+ I915_DISPATCH_SECURE);
+err_add_request:
+ i915_request_add(rq);
+err_vma_unpin:
+ i915_vma_unpin(vma);
+err_vma_put:
+ i915_vma_put(vma);
+ return err;
+}
+
+static struct intel_context *oa_context(struct i915_perf_stream *stream)
+{
+ return stream->pinned_ctx ?: stream->engine->kernel_context;
}
static int hsw_enable_metric_set(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
- const struct i915_oa_config *oa_config = stream->oa_config;
+ struct intel_uncore *uncore = stream->uncore;
/*
* PRM:
@@ -1609,31 +2017,47 @@ static int hsw_enable_metric_set(struct i915_perf_stream *stream)
* count the events from non-render domain. Unit level clock
* gating for RCS should also be disabled.
*/
- I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
- ~GEN7_DOP_CLOCK_GATE_ENABLE));
- I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) |
- GEN6_CSUNIT_CLOCK_GATE_DISABLE));
+ intel_uncore_rmw(uncore, GEN7_MISCCPCTL,
+ GEN7_DOP_CLOCK_GATE_ENABLE, 0);
+ intel_uncore_rmw(uncore, GEN6_UCGCTL1,
+ 0, GEN6_CSUNIT_CLOCK_GATE_DISABLE);
- config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len);
- delay_after_mux();
+ return emit_oa_config(stream, stream->oa_config, oa_context(stream));
+}
- config_oa_regs(dev_priv, oa_config->b_counter_regs,
- oa_config->b_counter_regs_len);
+static void hsw_disable_metric_set(struct i915_perf_stream *stream)
+{
+ struct intel_uncore *uncore = stream->uncore;
- return 0;
+ intel_uncore_rmw(uncore, GEN6_UCGCTL1,
+ GEN6_CSUNIT_CLOCK_GATE_DISABLE, 0);
+ intel_uncore_rmw(uncore, GEN7_MISCCPCTL,
+ 0, GEN7_DOP_CLOCK_GATE_ENABLE);
+
+ intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0);
}
-static void hsw_disable_metric_set(struct drm_i915_private *dev_priv)
+static u32 oa_config_flex_reg(const struct i915_oa_config *oa_config,
+ i915_reg_t reg)
{
- I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) &
- ~GEN6_CSUNIT_CLOCK_GATE_DISABLE));
- I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) |
- GEN7_DOP_CLOCK_GATE_ENABLE));
+ u32 mmio = i915_mmio_reg_offset(reg);
+ int i;
- I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) &
- ~GT_NOA_ENABLE));
-}
+ /*
+ * This arbitrary default will select the 'EU FPU0 Pipeline
+ * Active' event. In the future it's anticipated that there
+ * will be an explicit 'No Event' we can select, but not yet...
+ */
+ if (!oa_config)
+ return 0;
+
+ for (i = 0; i < oa_config->flex_regs_len; i++) {
+ if (i915_mmio_reg_offset(oa_config->flex_regs[i].addr) == mmio)
+ return oa_config->flex_regs[i].value;
+ }
+ return 0;
+}
/*
* NB: It must always remain pointer safe to run this even if the OA unit
* has been disabled.
@@ -1642,13 +2066,11 @@ static void hsw_disable_metric_set(struct drm_i915_private *dev_priv)
* in the case that the OA unit has been disabled.
*/
static void
-gen8_update_reg_state_unlocked(struct intel_context *ce,
- u32 *reg_state,
- const struct i915_oa_config *oa_config)
+gen8_update_reg_state_unlocked(const struct intel_context *ce,
+ const struct i915_perf_stream *stream)
{
- struct drm_i915_private *i915 = ce->gem_context->i915;
- u32 ctx_oactxctrl = i915->perf.oa.ctx_oactxctrl_offset;
- u32 ctx_flexeu0 = i915->perf.oa.ctx_flexeu0_offset;
+ u32 ctx_oactxctrl = stream->perf->ctx_oactxctrl_offset;
+ u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset;
/* The MMIO offsets for Flex EU registers aren't contiguous */
i915_reg_t flex_regs[] = {
EU_PERF_CNTL0,
@@ -1659,41 +2081,187 @@ gen8_update_reg_state_unlocked(struct intel_context *ce,
EU_PERF_CNTL5,
EU_PERF_CNTL6,
};
+ u32 *reg_state = ce->lrc_reg_state;
int i;
- CTX_REG(reg_state, ctx_oactxctrl, GEN8_OACTXCONTROL,
- (i915->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
- (i915->perf.oa.periodic ? GEN8_OA_TIMER_ENABLE : 0) |
- GEN8_OA_COUNTER_RESUME);
+ reg_state[ctx_oactxctrl + 1] =
+ (stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
+ (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
+ GEN8_OA_COUNTER_RESUME;
- for (i = 0; i < ARRAY_SIZE(flex_regs); i++) {
- u32 state_offset = ctx_flexeu0 + i * 2;
- u32 mmio = i915_mmio_reg_offset(flex_regs[i]);
+ for (i = 0; i < ARRAY_SIZE(flex_regs); i++)
+ reg_state[ctx_flexeu0 + i * 2 + 1] =
+ oa_config_flex_reg(stream->oa_config, flex_regs[i]);
- /*
- * This arbitrary default will select the 'EU FPU0 Pipeline
- * Active' event. In the future it's anticipated that there
- * will be an explicit 'No Event' we can select, but not yet...
- */
- u32 value = 0;
+ reg_state[CTX_R_PWR_CLK_STATE] =
+ intel_sseu_make_rpcs(ce->engine->i915, &ce->sseu);
+}
- if (oa_config) {
- u32 j;
+struct flex {
+ i915_reg_t reg;
+ u32 offset;
+ u32 value;
+};
- for (j = 0; j < oa_config->flex_regs_len; j++) {
- if (i915_mmio_reg_offset(oa_config->flex_regs[j].addr) == mmio) {
- value = oa_config->flex_regs[j].value;
- break;
- }
- }
- }
+static int
+gen8_store_flex(struct i915_request *rq,
+ struct intel_context *ce,
+ const struct flex *flex, unsigned int count)
+{
+ u32 offset;
+ u32 *cs;
+
+ cs = intel_ring_begin(rq, 4 * count);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ offset = i915_ggtt_offset(ce->state) + LRC_STATE_PN * PAGE_SIZE;
+ do {
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = offset + flex->offset * sizeof(u32);
+ *cs++ = 0;
+ *cs++ = flex->value;
+ } while (flex++, --count);
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static int
+gen8_load_flex(struct i915_request *rq,
+ struct intel_context *ce,
+ const struct flex *flex, unsigned int count)
+{
+ u32 *cs;
+
+ GEM_BUG_ON(!count || count > 63);
+
+ cs = intel_ring_begin(rq, 2 * count + 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_LOAD_REGISTER_IMM(count);
+ do {
+ *cs++ = i915_mmio_reg_offset(flex->reg);
+ *cs++ = flex->value;
+ } while (flex++, --count);
+ *cs++ = MI_NOOP;
- CTX_REG(reg_state, state_offset, flex_regs[i], value);
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static int gen8_modify_context(struct intel_context *ce,
+ const struct flex *flex, unsigned int count)
+{
+ struct i915_request *rq;
+ int err;
+
+ rq = intel_engine_create_kernel_request(ce->engine);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ /* Serialise with the remote context */
+ err = intel_context_prepare_remote_request(ce, rq);
+ if (err == 0)
+ err = gen8_store_flex(rq, ce, flex, count);
+
+ i915_request_add(rq);
+ return err;
+}
+
+static int gen8_modify_self(struct intel_context *ce,
+ const struct flex *flex, unsigned int count)
+{
+ struct i915_request *rq;
+ int err;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ err = gen8_load_flex(rq, ce, flex, count);
+
+ i915_request_add(rq);
+ return err;
+}
+
+static int gen8_configure_context(struct i915_gem_context *ctx,
+ struct flex *flex, unsigned int count)
+{
+ struct i915_gem_engines_iter it;
+ struct intel_context *ce;
+ int err = 0;
+
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+ GEM_BUG_ON(ce == ce->engine->kernel_context);
+
+ if (ce->engine->class != RENDER_CLASS)
+ continue;
+
+ /* Otherwise OA settings will be set upon first use */
+ if (!intel_context_pin_if_active(ce))
+ continue;
+
+ flex->value = intel_sseu_make_rpcs(ctx->i915, &ce->sseu);
+ err = gen8_modify_context(ce, flex, count);
+
+ intel_context_unpin(ce);
+ if (err)
+ break;
}
+ i915_gem_context_unlock_engines(ctx);
+
+ return err;
+}
+
+static int gen12_configure_oar_context(struct i915_perf_stream *stream, bool enable)
+{
+ int err;
+ struct intel_context *ce = stream->pinned_ctx;
+ u32 format = stream->oa_buffer.format;
+ struct flex regs_context[] = {
+ {
+ GEN8_OACTXCONTROL,
+ stream->perf->ctx_oactxctrl_offset + 1,
+ enable ? GEN8_OA_COUNTER_RESUME : 0,
+ },
+ };
+ /* Offsets in regs_lri are not used since this configuration is only
+ * applied using LRI. Initialize the correct offsets for posterity.
+ */
+#define GEN12_OAR_OACONTROL_OFFSET 0x5B0
+ struct flex regs_lri[] = {
+ {
+ GEN12_OAR_OACONTROL,
+ GEN12_OAR_OACONTROL_OFFSET + 1,
+ (format << GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT) |
+ (enable ? GEN12_OAR_OACONTROL_COUNTER_ENABLE : 0)
+ },
+ {
+ RING_CONTEXT_CONTROL(ce->engine->mmio_base),
+ CTX_CONTEXT_CONTROL,
+ _MASKED_FIELD(GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE,
+ enable ?
+ GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE :
+ 0)
+ },
+ };
- CTX_REG(reg_state,
- CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
- intel_sseu_make_rpcs(i915, &ce->sseu));
+ /* Modify the context image of pinned context with regs_context*/
+ err = intel_context_lock_pinned(ce);
+ if (err)
+ return err;
+
+ err = gen8_modify_context(ce, regs_context, ARRAY_SIZE(regs_context));
+ intel_context_unlock_pinned(ce);
+ if (err)
+ return err;
+
+ /* Apply regs_lri using LRI with pinned context */
+ return gen8_modify_self(ce, regs_lri, ARRAY_SIZE(regs_lri));
}
/*
@@ -1719,16 +2287,18 @@ gen8_update_reg_state_unlocked(struct intel_context *ce,
* per-context OA state.
*
* Note: it's only the RCS/Render context that has any OA state.
+ * Note: the first flex register passed must always be R_PWR_CLK_STATE
*/
-static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
- const struct i915_oa_config *oa_config)
+static int oa_configure_all_contexts(struct i915_perf_stream *stream,
+ struct flex *regs,
+ size_t num_regs)
{
- unsigned int map_type = i915_coherent_map_type(dev_priv);
- struct i915_gem_context *ctx;
- struct i915_request *rq;
- int ret;
+ struct drm_i915_private *i915 = stream->perf->i915;
+ struct intel_engine_cs *engine;
+ struct i915_gem_context *ctx, *cn;
+ int err;
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
+ lockdep_assert_held(&stream->perf->lock);
/*
* The OA register config is setup through the context image. This image
@@ -1740,66 +2310,106 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
* this might leave small interval of time where the OA unit is
* configured at an invalid sampling period.
*
- * So far the best way to work around this issue seems to be draining
- * the GPU from any submitted work.
+ * Note that since we emit all requests from a single ring, there
+ * is still an implicit global barrier here that may cause a high
+ * priority context to wait for an otherwise independent low priority
+ * context. Contexts idle at the time of reconfiguration are not
+ * trapped behind the barrier.
*/
- ret = i915_gem_wait_for_idle(dev_priv,
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
- if (ret)
- return ret;
-
- /* Update all contexts now that we've stalled the submission. */
- list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
- struct i915_gem_engines_iter it;
- struct intel_context *ce;
+ spin_lock(&i915->gem.contexts.lock);
+ list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
+ if (!kref_get_unless_zero(&ctx->ref))
+ continue;
- for_each_gem_engine(ce,
- i915_gem_context_lock_engines(ctx),
- it) {
- u32 *regs;
+ spin_unlock(&i915->gem.contexts.lock);
- if (ce->engine->class != RENDER_CLASS)
- continue;
+ err = gen8_configure_context(ctx, regs, num_regs);
+ if (err) {
+ i915_gem_context_put(ctx);
+ return err;
+ }
- /* OA settings will be set upon first use */
- if (!ce->state)
- continue;
+ spin_lock(&i915->gem.contexts.lock);
+ list_safe_reset_next(ctx, cn, link);
+ i915_gem_context_put(ctx);
+ }
+ spin_unlock(&i915->gem.contexts.lock);
- regs = i915_gem_object_pin_map(ce->state->obj,
- map_type);
- if (IS_ERR(regs)) {
- i915_gem_context_unlock_engines(ctx);
- return PTR_ERR(regs);
- }
+ /*
+ * After updating all other contexts, we need to modify ourselves.
+ * If we don't modify the kernel_context, we do not get events while
+ * idle.
+ */
+ for_each_uabi_engine(engine, i915) {
+ struct intel_context *ce = engine->kernel_context;
- ce->state->obj->mm.dirty = true;
- regs += LRC_STATE_PN * PAGE_SIZE / sizeof(*regs);
+ if (engine->class != RENDER_CLASS)
+ continue;
- gen8_update_reg_state_unlocked(ce, regs, oa_config);
+ regs[0].value = intel_sseu_make_rpcs(i915, &ce->sseu);
- i915_gem_object_unpin_map(ce->state->obj);
- }
- i915_gem_context_unlock_engines(ctx);
+ err = gen8_modify_self(ce, regs, num_regs);
+ if (err)
+ return err;
}
- /*
- * Apply the configuration by doing one context restore of the edited
- * context image.
- */
- rq = i915_request_create(dev_priv->engine[RCS0]->kernel_context);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
+ return 0;
+}
- i915_request_add(rq);
+static int gen12_configure_all_contexts(struct i915_perf_stream *stream,
+ const struct i915_oa_config *oa_config)
+{
+ struct flex regs[] = {
+ {
+ GEN8_R_PWR_CLK_STATE,
+ CTX_R_PWR_CLK_STATE,
+ },
+ };
- return 0;
+ return oa_configure_all_contexts(stream, regs, ARRAY_SIZE(regs));
+}
+
+static int lrc_configure_all_contexts(struct i915_perf_stream *stream,
+ const struct i915_oa_config *oa_config)
+{
+ /* The MMIO offsets for Flex EU registers aren't contiguous */
+ const u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset;
+#define ctx_flexeuN(N) (ctx_flexeu0 + 2 * (N) + 1)
+ struct flex regs[] = {
+ {
+ GEN8_R_PWR_CLK_STATE,
+ CTX_R_PWR_CLK_STATE,
+ },
+ {
+ GEN8_OACTXCONTROL,
+ stream->perf->ctx_oactxctrl_offset + 1,
+ },
+ { EU_PERF_CNTL0, ctx_flexeuN(0) },
+ { EU_PERF_CNTL1, ctx_flexeuN(1) },
+ { EU_PERF_CNTL2, ctx_flexeuN(2) },
+ { EU_PERF_CNTL3, ctx_flexeuN(3) },
+ { EU_PERF_CNTL4, ctx_flexeuN(4) },
+ { EU_PERF_CNTL5, ctx_flexeuN(5) },
+ { EU_PERF_CNTL6, ctx_flexeuN(6) },
+ };
+#undef ctx_flexeuN
+ int i;
+
+ regs[1].value =
+ (stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
+ (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
+ GEN8_OA_COUNTER_RESUME;
+
+ for (i = 2; i < ARRAY_SIZE(regs); i++)
+ regs[i].value = oa_config_flex_reg(oa_config, regs[i].reg);
+
+ return oa_configure_all_contexts(stream, regs, ARRAY_SIZE(regs));
}
static int gen8_enable_metric_set(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
- const struct i915_oa_config *oa_config = stream->oa_config;
+ struct intel_uncore *uncore = stream->uncore;
+ struct i915_oa_config *oa_config = stream->oa_config;
int ret;
/*
@@ -1825,10 +2435,10 @@ static int gen8_enable_metric_set(struct i915_perf_stream *stream)
* be read back from automatically triggered reports, as part of the
* RPT_ID field.
*/
- if (IS_GEN_RANGE(dev_priv, 9, 11)) {
- I915_WRITE(GEN8_OA_DEBUG,
- _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
- GEN9_OA_DEBUG_INCLUDE_CLK_RATIO));
+ if (IS_GEN_RANGE(stream->perf->i915, 9, 11)) {
+ intel_uncore_write(uncore, GEN8_OA_DEBUG,
+ _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
+ GEN9_OA_DEBUG_INCLUDE_CLK_RATIO));
}
/*
@@ -1836,46 +2446,111 @@ static int gen8_enable_metric_set(struct i915_perf_stream *stream)
* to make sure all slices/subslices are ON before writing to NOA
* registers.
*/
- ret = gen8_configure_all_contexts(dev_priv, oa_config);
+ ret = lrc_configure_all_contexts(stream, oa_config);
if (ret)
return ret;
- config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len);
- delay_after_mux();
+ return emit_oa_config(stream, oa_config, oa_context(stream));
+}
+
+static u32 oag_report_ctx_switches(const struct i915_perf_stream *stream)
+{
+ return _MASKED_FIELD(GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS,
+ (stream->sample_flags & SAMPLE_OA_REPORT) ?
+ 0 : GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS);
+}
- config_oa_regs(dev_priv, oa_config->b_counter_regs,
- oa_config->b_counter_regs_len);
+static int gen12_enable_metric_set(struct i915_perf_stream *stream)
+{
+ struct intel_uncore *uncore = stream->uncore;
+ struct i915_oa_config *oa_config = stream->oa_config;
+ bool periodic = stream->periodic;
+ u32 period_exponent = stream->period_exponent;
+ int ret;
- return 0;
+ intel_uncore_write(uncore, GEN12_OAG_OA_DEBUG,
+ /* Disable clk ratio reports, like previous Gens. */
+ _MASKED_BIT_ENABLE(GEN12_OAG_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
+ GEN12_OAG_OA_DEBUG_INCLUDE_CLK_RATIO) |
+ /*
+ * If the user didn't require OA reports, instruct
+ * the hardware not to emit ctx switch reports.
+ */
+ oag_report_ctx_switches(stream));
+
+ intel_uncore_write(uncore, GEN12_OAG_OAGLBCTXCTRL, periodic ?
+ (GEN12_OAG_OAGLBCTXCTRL_COUNTER_RESUME |
+ GEN12_OAG_OAGLBCTXCTRL_TIMER_ENABLE |
+ (period_exponent << GEN12_OAG_OAGLBCTXCTRL_TIMER_PERIOD_SHIFT))
+ : 0);
+
+ /*
+ * Update all contexts prior writing the mux configurations as we need
+ * to make sure all slices/subslices are ON before writing to NOA
+ * registers.
+ */
+ ret = gen12_configure_all_contexts(stream, oa_config);
+ if (ret)
+ return ret;
+
+ /*
+ * For Gen12, performance counters are context
+ * saved/restored. Only enable it for the context that
+ * requested this.
+ */
+ if (stream->ctx) {
+ ret = gen12_configure_oar_context(stream, true);
+ if (ret)
+ return ret;
+ }
+
+ return emit_oa_config(stream, oa_config, oa_context(stream));
}
-static void gen8_disable_metric_set(struct drm_i915_private *dev_priv)
+static void gen8_disable_metric_set(struct i915_perf_stream *stream)
{
+ struct intel_uncore *uncore = stream->uncore;
+
/* Reset all contexts' slices/subslices configurations. */
- gen8_configure_all_contexts(dev_priv, NULL);
+ lrc_configure_all_contexts(stream, NULL);
- I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) &
- ~GT_NOA_ENABLE));
+ intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0);
}
-static void gen10_disable_metric_set(struct drm_i915_private *dev_priv)
+static void gen10_disable_metric_set(struct i915_perf_stream *stream)
{
+ struct intel_uncore *uncore = stream->uncore;
+
/* Reset all contexts' slices/subslices configurations. */
- gen8_configure_all_contexts(dev_priv, NULL);
+ lrc_configure_all_contexts(stream, NULL);
/* Make sure we disable noa to save power. */
- I915_WRITE(RPM_CONFIG1,
- I915_READ(RPM_CONFIG1) & ~GEN10_GT_NOA_ENABLE);
+ intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0);
+}
+
+static void gen12_disable_metric_set(struct i915_perf_stream *stream)
+{
+ struct intel_uncore *uncore = stream->uncore;
+
+ /* Reset all contexts' slices/subslices configurations. */
+ gen12_configure_all_contexts(stream, NULL);
+
+ /* disable the context save/restore or OAR counters */
+ if (stream->ctx)
+ gen12_configure_oar_context(stream, false);
+
+ /* Make sure we disable noa to save power. */
+ intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0);
}
static void gen7_oa_enable(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct intel_uncore *uncore = stream->uncore;
struct i915_gem_context *ctx = stream->ctx;
- u32 ctx_id = dev_priv->perf.oa.specific_ctx_id;
- bool periodic = dev_priv->perf.oa.periodic;
- u32 period_exponent = dev_priv->perf.oa.period_exponent;
- u32 report_format = dev_priv->perf.oa.oa_buffer.format;
+ u32 ctx_id = stream->specific_ctx_id;
+ bool periodic = stream->periodic;
+ u32 period_exponent = stream->period_exponent;
+ u32 report_format = stream->oa_buffer.format;
/*
* Reset buf pointers so we don't forward reports from before now.
@@ -1886,22 +2561,22 @@ static void gen7_oa_enable(struct i915_perf_stream *stream)
* on the assumption that certain fields are written to zeroed
* memory which this helps maintains.
*/
- gen7_init_oa_buffer(dev_priv);
-
- I915_WRITE(GEN7_OACONTROL,
- (ctx_id & GEN7_OACONTROL_CTX_MASK) |
- (period_exponent <<
- GEN7_OACONTROL_TIMER_PERIOD_SHIFT) |
- (periodic ? GEN7_OACONTROL_TIMER_ENABLE : 0) |
- (report_format << GEN7_OACONTROL_FORMAT_SHIFT) |
- (ctx ? GEN7_OACONTROL_PER_CTX_ENABLE : 0) |
- GEN7_OACONTROL_ENABLE);
+ gen7_init_oa_buffer(stream);
+
+ intel_uncore_write(uncore, GEN7_OACONTROL,
+ (ctx_id & GEN7_OACONTROL_CTX_MASK) |
+ (period_exponent <<
+ GEN7_OACONTROL_TIMER_PERIOD_SHIFT) |
+ (periodic ? GEN7_OACONTROL_TIMER_ENABLE : 0) |
+ (report_format << GEN7_OACONTROL_FORMAT_SHIFT) |
+ (ctx ? GEN7_OACONTROL_PER_CTX_ENABLE : 0) |
+ GEN7_OACONTROL_ENABLE);
}
static void gen8_oa_enable(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
- u32 report_format = dev_priv->perf.oa.oa_buffer.format;
+ struct intel_uncore *uncore = stream->uncore;
+ u32 report_format = stream->oa_buffer.format;
/*
* Reset buf pointers so we don't forward reports from before now.
@@ -1912,16 +2587,35 @@ static void gen8_oa_enable(struct i915_perf_stream *stream)
* on the assumption that certain fields are written to zeroed
* memory which this helps maintains.
*/
- gen8_init_oa_buffer(dev_priv);
+ gen8_init_oa_buffer(stream);
/*
* Note: we don't rely on the hardware to perform single context
* filtering and instead filter on the cpu based on the context-id
* field of reports
*/
- I915_WRITE(GEN8_OACONTROL, (report_format <<
- GEN8_OA_REPORT_FORMAT_SHIFT) |
- GEN8_OA_COUNTER_ENABLE);
+ intel_uncore_write(uncore, GEN8_OACONTROL,
+ (report_format << GEN8_OA_REPORT_FORMAT_SHIFT) |
+ GEN8_OA_COUNTER_ENABLE);
+}
+
+static void gen12_oa_enable(struct i915_perf_stream *stream)
+{
+ struct intel_uncore *uncore = stream->uncore;
+ u32 report_format = stream->oa_buffer.format;
+
+ /*
+ * If we don't want OA reports from the OA buffer, then we don't even
+ * need to program the OAG unit.
+ */
+ if (!(stream->sample_flags & SAMPLE_OA_REPORT))
+ return;
+
+ gen12_init_oa_buffer(stream);
+
+ intel_uncore_write(uncore, GEN12_OAG_OACONTROL,
+ (report_format << GEN12_OAG_OACONTROL_OA_COUNTER_FORMAT_SHIFT) |
+ GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE);
}
/**
@@ -1935,19 +2629,17 @@ static void gen8_oa_enable(struct i915_perf_stream *stream)
*/
static void i915_oa_stream_enable(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ stream->perf->ops.oa_enable(stream);
- dev_priv->perf.oa.ops.oa_enable(stream);
-
- if (dev_priv->perf.oa.periodic)
- hrtimer_start(&dev_priv->perf.oa.poll_check_timer,
+ if (stream->periodic)
+ hrtimer_start(&stream->poll_check_timer,
ns_to_ktime(POLL_PERIOD),
HRTIMER_MODE_REL_PINNED);
}
static void gen7_oa_disable(struct i915_perf_stream *stream)
{
- struct intel_uncore *uncore = &stream->dev_priv->uncore;
+ struct intel_uncore *uncore = stream->uncore;
intel_uncore_write(uncore, GEN7_OACONTROL, 0);
if (intel_wait_for_register(uncore,
@@ -1958,7 +2650,7 @@ static void gen7_oa_disable(struct i915_perf_stream *stream)
static void gen8_oa_disable(struct i915_perf_stream *stream)
{
- struct intel_uncore *uncore = &stream->dev_priv->uncore;
+ struct intel_uncore *uncore = stream->uncore;
intel_uncore_write(uncore, GEN8_OACONTROL, 0);
if (intel_wait_for_register(uncore,
@@ -1967,6 +2659,18 @@ static void gen8_oa_disable(struct i915_perf_stream *stream)
DRM_ERROR("wait for OA to be disabled timed out\n");
}
+static void gen12_oa_disable(struct i915_perf_stream *stream)
+{
+ struct intel_uncore *uncore = stream->uncore;
+
+ intel_uncore_write(uncore, GEN12_OAG_OACONTROL, 0);
+ if (intel_wait_for_register(uncore,
+ GEN12_OAG_OACONTROL,
+ GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE, 0,
+ 50))
+ DRM_ERROR("wait for OA to be disabled timed out\n");
+}
+
/**
* i915_oa_stream_disable - handle `I915_PERF_IOCTL_DISABLE` for OA stream
* @stream: An i915 perf stream opened for OA metrics
@@ -1977,12 +2681,10 @@ static void gen8_oa_disable(struct i915_perf_stream *stream)
*/
static void i915_oa_stream_disable(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ stream->perf->ops.oa_disable(stream);
- dev_priv->perf.oa.ops.oa_disable(stream);
-
- if (dev_priv->perf.oa.periodic)
- hrtimer_cancel(&dev_priv->perf.oa.poll_check_timer);
+ if (stream->periodic)
+ hrtimer_cancel(&stream->poll_check_timer);
}
static const struct i915_perf_stream_ops i915_oa_stream_ops = {
@@ -2016,34 +2718,42 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
struct drm_i915_perf_open_param *param,
struct perf_open_properties *props)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct i915_perf *perf = stream->perf;
int format_size;
int ret;
- /* If the sysfs metrics/ directory wasn't registered for some
+ if (!props->engine) {
+ DRM_DEBUG("OA engine not specified\n");
+ return -EINVAL;
+ }
+
+ /*
+ * If the sysfs metrics/ directory wasn't registered for some
* reason then don't let userspace try their luck with config
* IDs
*/
- if (!dev_priv->perf.metrics_kobj) {
+ if (!perf->metrics_kobj) {
DRM_DEBUG("OA metrics weren't advertised via sysfs\n");
return -EINVAL;
}
- if (!(props->sample_flags & SAMPLE_OA_REPORT)) {
+ if (!(props->sample_flags & SAMPLE_OA_REPORT) &&
+ (INTEL_GEN(perf->i915) < 12 || !stream->ctx)) {
DRM_DEBUG("Only OA report sampling supported\n");
return -EINVAL;
}
- if (!dev_priv->perf.oa.ops.enable_metric_set) {
+ if (!perf->ops.enable_metric_set) {
DRM_DEBUG("OA unit not supported\n");
return -ENODEV;
}
- /* To avoid the complexity of having to accurately filter
+ /*
+ * To avoid the complexity of having to accurately filter
* counter reports and marshal to the appropriate client
* we currently only allow exclusive access
*/
- if (dev_priv->perf.oa.exclusive_stream) {
+ if (perf->exclusive_stream) {
DRM_DEBUG("OA unit already in use\n");
return -EBUSY;
}
@@ -2053,43 +2763,28 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
return -EINVAL;
}
- /* We set up some ratelimit state to potentially throttle any _NOTES
- * about spurious, invalid OA reports which we don't forward to
- * userspace.
- *
- * The initialization is associated with opening the stream (not driver
- * init) considering we print a _NOTE about any throttling when closing
- * the stream instead of waiting until driver _fini which no one would
- * ever see.
- *
- * Using the same limiting factors as printk_ratelimit()
- */
- ratelimit_state_init(&dev_priv->perf.oa.spurious_report_rs,
- 5 * HZ, 10);
- /* Since we use a DRM_NOTE for spurious reports it would be
- * inconsistent to let __ratelimit() automatically print a warning for
- * throttling.
- */
- ratelimit_set_flags(&dev_priv->perf.oa.spurious_report_rs,
- RATELIMIT_MSG_ON_RELEASE);
+ stream->engine = props->engine;
+ stream->uncore = stream->engine->gt->uncore;
stream->sample_size = sizeof(struct drm_i915_perf_record_header);
- format_size = dev_priv->perf.oa.oa_formats[props->oa_format].size;
+ format_size = perf->oa_formats[props->oa_format].size;
- stream->sample_flags |= SAMPLE_OA_REPORT;
+ stream->sample_flags = props->sample_flags;
stream->sample_size += format_size;
- dev_priv->perf.oa.oa_buffer.format_size = format_size;
- if (WARN_ON(dev_priv->perf.oa.oa_buffer.format_size == 0))
+ stream->oa_buffer.format_size = format_size;
+ if (WARN_ON(stream->oa_buffer.format_size == 0))
return -EINVAL;
- dev_priv->perf.oa.oa_buffer.format =
- dev_priv->perf.oa.oa_formats[props->oa_format].format;
+ stream->hold_preemption = props->hold_preemption;
+
+ stream->oa_buffer.format =
+ perf->oa_formats[props->oa_format].format;
- dev_priv->perf.oa.periodic = props->oa_periodic;
- if (dev_priv->perf.oa.periodic)
- dev_priv->perf.oa.period_exponent = props->oa_period_exponent;
+ stream->periodic = props->oa_periodic;
+ if (stream->periodic)
+ stream->period_exponent = props->oa_period_exponent;
if (stream->ctx) {
ret = oa_get_render_ctx_id(stream);
@@ -2099,9 +2794,16 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
}
}
- ret = get_oa_config(dev_priv, props->metrics_set, &stream->oa_config);
+ ret = alloc_noa_wait(stream);
if (ret) {
+ DRM_DEBUG("Unable to allocate NOA wait batch buffer\n");
+ goto err_noa_wait_alloc;
+ }
+
+ stream->oa_config = i915_perf_get_oa_config(perf, props->metrics_set);
+ if (!stream->oa_config) {
DRM_DEBUG("Invalid OA config id=%i\n", props->metrics_set);
+ ret = -EINVAL;
goto err_config;
}
@@ -2117,63 +2819,72 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
* In our case we are expecting that taking pm + FORCEWAKE
* references will effectively disable RC6.
*/
- stream->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
+ intel_engine_pm_get(stream->engine);
+ intel_uncore_forcewake_get(stream->uncore, FORCEWAKE_ALL);
- ret = alloc_oa_buffer(dev_priv);
+ ret = alloc_oa_buffer(stream);
if (ret)
goto err_oa_buf_alloc;
- ret = i915_mutex_lock_interruptible(&dev_priv->drm);
- if (ret)
- goto err_lock;
-
stream->ops = &i915_oa_stream_ops;
- dev_priv->perf.oa.exclusive_stream = stream;
+ perf->exclusive_stream = stream;
- ret = dev_priv->perf.oa.ops.enable_metric_set(stream);
+ ret = perf->ops.enable_metric_set(stream);
if (ret) {
DRM_DEBUG("Unable to enable metric set\n");
goto err_enable;
}
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ DRM_DEBUG("opening stream oa config uuid=%s\n",
+ stream->oa_config->uuid);
+
+ hrtimer_init(&stream->poll_check_timer,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ stream->poll_check_timer.function = oa_poll_check_timer_cb;
+ init_waitqueue_head(&stream->poll_wq);
+ spin_lock_init(&stream->oa_buffer.ptr_lock);
return 0;
err_enable:
- dev_priv->perf.oa.exclusive_stream = NULL;
- dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ perf->exclusive_stream = NULL;
+ perf->ops.disable_metric_set(stream);
-err_lock:
- free_oa_buffer(dev_priv);
+ free_oa_buffer(stream);
err_oa_buf_alloc:
- put_oa_config(dev_priv, stream->oa_config);
+ free_oa_configs(stream);
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
- intel_runtime_pm_put(&dev_priv->runtime_pm, stream->wakeref);
+ intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL);
+ intel_engine_pm_put(stream->engine);
err_config:
+ free_noa_wait(stream);
+
+err_noa_wait_alloc:
if (stream->ctx)
oa_put_render_ctx_id(stream);
return ret;
}
-void i915_oa_init_reg_state(struct intel_engine_cs *engine,
- struct intel_context *ce,
- u32 *regs)
+void i915_oa_init_reg_state(const struct intel_context *ce,
+ const struct intel_engine_cs *engine)
{
struct i915_perf_stream *stream;
+ /* perf.exclusive_stream serialised by lrc_configure_all_contexts() */
+
if (engine->class != RENDER_CLASS)
return;
- stream = engine->i915->perf.oa.exclusive_stream;
- if (stream)
- gen8_update_reg_state_unlocked(ce, regs, stream->oa_config);
+ stream = engine->i915->perf.exclusive_stream;
+ /*
+ * For gen12, only CTX_R_PWR_CLK_STATE needs update, but the caller
+ * is already doing that, so nothing to be done for gen12 here.
+ */
+ if (stream && INTEL_GEN(stream->perf->i915) < 12)
+ gen8_update_reg_state_unlocked(ce, stream);
}
/**
@@ -2243,7 +2954,7 @@ static ssize_t i915_perf_read(struct file *file,
loff_t *ppos)
{
struct i915_perf_stream *stream = file->private_data;
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct i915_perf *perf = stream->perf;
ssize_t ret;
/* To ensure it's handled consistently we simply treat all reads of a
@@ -2266,15 +2977,15 @@ static ssize_t i915_perf_read(struct file *file,
if (ret)
return ret;
- mutex_lock(&dev_priv->perf.lock);
+ mutex_lock(&perf->lock);
ret = i915_perf_read_locked(stream, file,
buf, count, ppos);
- mutex_unlock(&dev_priv->perf.lock);
+ mutex_unlock(&perf->lock);
} while (ret == -EAGAIN);
} else {
- mutex_lock(&dev_priv->perf.lock);
+ mutex_lock(&perf->lock);
ret = i915_perf_read_locked(stream, file, buf, count, ppos);
- mutex_unlock(&dev_priv->perf.lock);
+ mutex_unlock(&perf->lock);
}
/* We allow the poll checking to sometimes report false positive EPOLLIN
@@ -2289,7 +3000,7 @@ static ssize_t i915_perf_read(struct file *file,
/* Maybe make ->pollin per-stream state if we support multiple
* concurrent streams in the future.
*/
- dev_priv->perf.oa.pollin = false;
+ stream->pollin = false;
}
return ret;
@@ -2297,13 +3008,12 @@ static ssize_t i915_perf_read(struct file *file,
static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer)
{
- struct drm_i915_private *dev_priv =
- container_of(hrtimer, typeof(*dev_priv),
- perf.oa.poll_check_timer);
+ struct i915_perf_stream *stream =
+ container_of(hrtimer, typeof(*stream), poll_check_timer);
- if (oa_buffer_check_unlocked(dev_priv)) {
- dev_priv->perf.oa.pollin = true;
- wake_up(&dev_priv->perf.oa.poll_wq);
+ if (oa_buffer_check_unlocked(stream)) {
+ stream->pollin = true;
+ wake_up(&stream->poll_wq);
}
hrtimer_forward_now(hrtimer, ns_to_ktime(POLL_PERIOD));
@@ -2313,7 +3023,6 @@ static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer)
/**
* i915_perf_poll_locked - poll_wait() with a suitable wait queue for stream
- * @dev_priv: i915 device instance
* @stream: An i915 perf stream
* @file: An i915 perf stream file
* @wait: poll() state table
@@ -2322,15 +3031,14 @@ static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer)
* &i915_perf_stream_ops->poll_wait to call poll_wait() with a wait queue that
* will be woken for new stream data.
*
- * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize
+ * Note: The &perf->lock mutex has been taken to serialize
* with any non-file-operation driver hooks.
*
* Returns: any poll events that are ready without sleeping
*/
-static __poll_t i915_perf_poll_locked(struct drm_i915_private *dev_priv,
- struct i915_perf_stream *stream,
- struct file *file,
- poll_table *wait)
+static __poll_t i915_perf_poll_locked(struct i915_perf_stream *stream,
+ struct file *file,
+ poll_table *wait)
{
__poll_t events = 0;
@@ -2342,7 +3050,7 @@ static __poll_t i915_perf_poll_locked(struct drm_i915_private *dev_priv,
* the hrtimer/oa_poll_check_timer_cb to notify us when there are
* samples to read.
*/
- if (dev_priv->perf.oa.pollin)
+ if (stream->pollin)
events |= EPOLLIN;
return events;
@@ -2364,12 +3072,12 @@ static __poll_t i915_perf_poll_locked(struct drm_i915_private *dev_priv,
static __poll_t i915_perf_poll(struct file *file, poll_table *wait)
{
struct i915_perf_stream *stream = file->private_data;
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct i915_perf *perf = stream->perf;
__poll_t ret;
- mutex_lock(&dev_priv->perf.lock);
- ret = i915_perf_poll_locked(dev_priv, stream, file, wait);
- mutex_unlock(&dev_priv->perf.lock);
+ mutex_lock(&perf->lock);
+ ret = i915_perf_poll_locked(stream, file, wait);
+ mutex_unlock(&perf->lock);
return ret;
}
@@ -2394,6 +3102,9 @@ static void i915_perf_enable_locked(struct i915_perf_stream *stream)
if (stream->ops->enable)
stream->ops->enable(stream);
+
+ if (stream->hold_preemption)
+ intel_context_set_nopreempt(stream->pinned_ctx);
}
/**
@@ -2418,17 +3129,54 @@ static void i915_perf_disable_locked(struct i915_perf_stream *stream)
/* Allow stream->ops->disable() to refer to this */
stream->enabled = false;
+ if (stream->hold_preemption)
+ intel_context_clear_nopreempt(stream->pinned_ctx);
+
if (stream->ops->disable)
stream->ops->disable(stream);
}
+static long i915_perf_config_locked(struct i915_perf_stream *stream,
+ unsigned long metrics_set)
+{
+ struct i915_oa_config *config;
+ long ret = stream->oa_config->id;
+
+ config = i915_perf_get_oa_config(stream->perf, metrics_set);
+ if (!config)
+ return -EINVAL;
+
+ if (config != stream->oa_config) {
+ int err;
+
+ /*
+ * If OA is bound to a specific context, emit the
+ * reconfiguration inline from that context. The update
+ * will then be ordered with respect to submission on that
+ * context.
+ *
+ * When set globally, we use a low priority kernel context,
+ * so it will effectively take effect when idle.
+ */
+ err = emit_oa_config(stream, config, oa_context(stream));
+ if (err == 0)
+ config = xchg(&stream->oa_config, config);
+ else
+ ret = err;
+ }
+
+ i915_oa_config_put(config);
+
+ return ret;
+}
+
/**
* i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs
* @stream: An i915 perf stream
* @cmd: the ioctl request
* @arg: the ioctl data
*
- * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize
+ * Note: The &perf->lock mutex has been taken to serialize
* with any non-file-operation driver hooks.
*
* Returns: zero on success or a negative error code. Returns -EINVAL for
@@ -2445,6 +3193,8 @@ static long i915_perf_ioctl_locked(struct i915_perf_stream *stream,
case I915_PERF_IOCTL_DISABLE:
i915_perf_disable_locked(stream);
return 0;
+ case I915_PERF_IOCTL_CONFIG:
+ return i915_perf_config_locked(stream, arg);
}
return -EINVAL;
@@ -2466,12 +3216,12 @@ static long i915_perf_ioctl(struct file *file,
unsigned long arg)
{
struct i915_perf_stream *stream = file->private_data;
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct i915_perf *perf = stream->perf;
long ret;
- mutex_lock(&dev_priv->perf.lock);
+ mutex_lock(&perf->lock);
ret = i915_perf_ioctl_locked(stream, cmd, arg);
- mutex_unlock(&dev_priv->perf.lock);
+ mutex_unlock(&perf->lock);
return ret;
}
@@ -2483,7 +3233,7 @@ static long i915_perf_ioctl(struct file *file,
* Frees all resources associated with the given i915 perf @stream, disabling
* any associated data capture in the process.
*
- * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize
+ * Note: The &perf->lock mutex has been taken to serialize
* with any non-file-operation driver hooks.
*/
static void i915_perf_destroy_locked(struct i915_perf_stream *stream)
@@ -2494,8 +3244,6 @@ static void i915_perf_destroy_locked(struct i915_perf_stream *stream)
if (stream->ops->destroy)
stream->ops->destroy(stream);
- list_del(&stream->link);
-
if (stream->ctx)
i915_gem_context_put(stream->ctx);
@@ -2516,14 +3264,14 @@ static void i915_perf_destroy_locked(struct i915_perf_stream *stream)
static int i915_perf_release(struct inode *inode, struct file *file)
{
struct i915_perf_stream *stream = file->private_data;
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct i915_perf *perf = stream->perf;
- mutex_lock(&dev_priv->perf.lock);
+ mutex_lock(&perf->lock);
i915_perf_destroy_locked(stream);
- mutex_unlock(&dev_priv->perf.lock);
+ mutex_unlock(&perf->lock);
/* Release the reference the perf stream kept on the driver. */
- drm_dev_put(&dev_priv->drm);
+ drm_dev_put(&perf->i915->drm);
return 0;
}
@@ -2545,7 +3293,7 @@ static const struct file_operations fops = {
/**
* i915_perf_open_ioctl_locked - DRM ioctl() for userspace to open a stream FD
- * @dev_priv: i915 device instance
+ * @perf: i915 perf instance
* @param: The open parameters passed to 'DRM_I915_PERF_OPEN`
* @props: individually validated u64 property value pairs
* @file: drm file
@@ -2553,7 +3301,7 @@ static const struct file_operations fops = {
* See i915_perf_ioctl_open() for interface details.
*
* Implements further stream config validation and stream initialization on
- * behalf of i915_perf_open_ioctl() with the &drm_i915_private->perf.lock mutex
+ * behalf of i915_perf_open_ioctl() with the &perf->lock mutex
* taken to serialize with any non-file-operation driver hooks.
*
* Note: at this point the @props have only been validated in isolation and
@@ -2568,7 +3316,7 @@ static const struct file_operations fops = {
* Returns: zero on success or a negative error code.
*/
static int
-i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
+i915_perf_open_ioctl_locked(struct i915_perf *perf,
struct drm_i915_perf_open_param *param,
struct perf_open_properties *props,
struct drm_file *file)
@@ -2599,16 +3347,33 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
* rest of the system, which we consider acceptable for a
* non-privileged client.
*
- * For Gen8+ the OA unit no longer supports clock gating off for a
+ * For Gen8->11 the OA unit no longer supports clock gating off for a
* specific context and the kernel can't securely stop the counters
* from updating as system-wide / global values. Even though we can
* filter reports based on the included context ID we can't block
* clients from seeing the raw / global counter values via
* MI_REPORT_PERF_COUNT commands and so consider it a privileged op to
* enable the OA unit by default.
+ *
+ * For Gen12+ we gain a new OAR unit that only monitors the RCS on a
+ * per context basis. So we can relax requirements there if the user
+ * doesn't request global stream access (i.e. query based sampling
+ * using MI_RECORD_PERF_COUNT.
*/
- if (IS_HASWELL(dev_priv) && specific_ctx)
+ if (IS_HASWELL(perf->i915) && specific_ctx)
privileged_op = false;
+ else if (IS_GEN(perf->i915, 12) && specific_ctx &&
+ (props->sample_flags & SAMPLE_OA_REPORT) == 0)
+ privileged_op = false;
+
+ if (props->hold_preemption) {
+ if (!props->single_context) {
+ DRM_DEBUG("preemption disable with no context\n");
+ ret = -EINVAL;
+ goto err;
+ }
+ privileged_op = true;
+ }
/* Similar to perf's kernel.perf_paranoid_cpu sysctl option
* we check a dev.i915.perf_stream_paranoid sysctl option
@@ -2617,7 +3382,7 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
*/
if (privileged_op &&
i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) {
- DRM_DEBUG("Insufficient privileges to open system-wide i915 perf stream\n");
+ DRM_DEBUG("Insufficient privileges to open i915 perf stream\n");
ret = -EACCES;
goto err_ctx;
}
@@ -2628,7 +3393,7 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
goto err_ctx;
}
- stream->dev_priv = dev_priv;
+ stream->perf = perf;
stream->ctx = specific_ctx;
ret = i915_oa_stream_init(stream, param, props);
@@ -2644,8 +3409,6 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
goto err_flags;
}
- list_add(&stream->link, &dev_priv->perf.streams);
-
if (param->flags & I915_PERF_FLAG_FD_CLOEXEC)
f_flags |= O_CLOEXEC;
if (param->flags & I915_PERF_FLAG_FD_NONBLOCK)
@@ -2654,7 +3417,7 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
stream_fd = anon_inode_getfd("[i915_perf]", &fops, stream, f_flags);
if (stream_fd < 0) {
ret = stream_fd;
- goto err_open;
+ goto err_flags;
}
if (!(param->flags & I915_PERF_FLAG_DISABLED))
@@ -2663,12 +3426,10 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
/* Take a reference on the driver that will be kept with stream_fd
* until its release.
*/
- drm_dev_get(&dev_priv->drm);
+ drm_dev_get(&perf->i915->drm);
return stream_fd;
-err_open:
- list_del(&stream->link);
err_flags:
if (stream->ops->destroy)
stream->ops->destroy(stream);
@@ -2681,15 +3442,15 @@ err:
return ret;
}
-static u64 oa_exponent_to_ns(struct drm_i915_private *dev_priv, int exponent)
+static u64 oa_exponent_to_ns(struct i915_perf *perf, int exponent)
{
return div64_u64(1000000000ULL * (2ULL << exponent),
- 1000ULL * RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
+ 1000ULL * RUNTIME_INFO(perf->i915)->cs_timestamp_frequency_khz);
}
/**
* read_properties_unlocked - validate + copy userspace stream open properties
- * @dev_priv: i915 device instance
+ * @perf: i915 perf instance
* @uprops: The array of u64 key value pairs given by userspace
* @n_props: The number of key value pairs expected in @uprops
* @props: The stream configuration built up while validating properties
@@ -2702,7 +3463,7 @@ static u64 oa_exponent_to_ns(struct drm_i915_private *dev_priv, int exponent)
* we shouldn't validate or assume anything about ordering here. This doesn't
* rule out defining new properties with ordering requirements in the future.
*/
-static int read_properties_unlocked(struct drm_i915_private *dev_priv,
+static int read_properties_unlocked(struct i915_perf *perf,
u64 __user *uprops,
u32 n_props,
struct perf_open_properties *props)
@@ -2717,6 +3478,15 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
return -EINVAL;
}
+ /* At the moment we only support using i915-perf on the RCS. */
+ props->engine = intel_engine_lookup_user(perf->i915,
+ I915_ENGINE_CLASS_RENDER,
+ 0);
+ if (!props->engine) {
+ DRM_DEBUG("No RENDER-capable engines\n");
+ return -EINVAL;
+ }
+
/* Considering that ID = 0 is reserved and assuming that we don't
* (currently) expect any configurations to ever specify duplicate
* values for a particular property ID then the last _PROP_MAX value is
@@ -2768,7 +3538,7 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
value);
return -EINVAL;
}
- if (!dev_priv->perf.oa.oa_formats[value].size) {
+ if (!perf->oa_formats[value].size) {
DRM_DEBUG("Unsupported OA report format %llu\n",
value);
return -EINVAL;
@@ -2789,7 +3559,7 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
*/
BUILD_BUG_ON(sizeof(oa_period) != 8);
- oa_period = oa_exponent_to_ns(dev_priv, value);
+ oa_period = oa_exponent_to_ns(perf, value);
/* This check is primarily to ensure that oa_period <=
* UINT32_MAX (before passing to do_div which only
@@ -2814,6 +3584,9 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
props->oa_periodic = true;
props->oa_period_exponent = value;
break;
+ case DRM_I915_PERF_PROP_HOLD_PREEMPTION:
+ props->hold_preemption = !!value;
+ break;
case DRM_I915_PERF_PROP_MAX:
MISSING_CASE(id);
return -EINVAL;
@@ -2843,7 +3616,7 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
* mutex to avoid an awkward lockdep with mmap_sem.
*
* Most of the implementation details are handled by
- * i915_perf_open_ioctl_locked() after taking the &drm_i915_private->perf.lock
+ * i915_perf_open_ioctl_locked() after taking the &perf->lock
* mutex for serializing with any non-file-operation driver hooks.
*
* Return: A newly opened i915 Perf stream file descriptor or negative
@@ -2852,13 +3625,13 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
int i915_perf_open_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_perf *perf = &to_i915(dev)->perf;
struct drm_i915_perf_open_param *param = data;
struct perf_open_properties props;
u32 known_open_flags;
int ret;
- if (!dev_priv->perf.initialized) {
+ if (!perf->i915) {
DRM_DEBUG("i915 perf interface not available for this system\n");
return -ENOTSUPP;
}
@@ -2871,124 +3644,130 @@ int i915_perf_open_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
- ret = read_properties_unlocked(dev_priv,
+ ret = read_properties_unlocked(perf,
u64_to_user_ptr(param->properties_ptr),
param->num_properties,
&props);
if (ret)
return ret;
- mutex_lock(&dev_priv->perf.lock);
- ret = i915_perf_open_ioctl_locked(dev_priv, param, &props, file);
- mutex_unlock(&dev_priv->perf.lock);
+ mutex_lock(&perf->lock);
+ ret = i915_perf_open_ioctl_locked(perf, param, &props, file);
+ mutex_unlock(&perf->lock);
return ret;
}
/**
* i915_perf_register - exposes i915-perf to userspace
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
*
* In particular OA metric sets are advertised under a sysfs metrics/
* directory allowing userspace to enumerate valid IDs that can be
* used to open an i915-perf stream.
*/
-void i915_perf_register(struct drm_i915_private *dev_priv)
+void i915_perf_register(struct drm_i915_private *i915)
{
+ struct i915_perf *perf = &i915->perf;
int ret;
- if (!dev_priv->perf.initialized)
+ if (!perf->i915)
return;
/* To be sure we're synchronized with an attempted
* i915_perf_open_ioctl(); considering that we register after
* being exposed to userspace.
*/
- mutex_lock(&dev_priv->perf.lock);
+ mutex_lock(&perf->lock);
- dev_priv->perf.metrics_kobj =
+ perf->metrics_kobj =
kobject_create_and_add("metrics",
- &dev_priv->drm.primary->kdev->kobj);
- if (!dev_priv->perf.metrics_kobj)
+ &i915->drm.primary->kdev->kobj);
+ if (!perf->metrics_kobj)
goto exit;
- sysfs_attr_init(&dev_priv->perf.oa.test_config.sysfs_metric_id.attr);
-
- if (INTEL_GEN(dev_priv) >= 11) {
- i915_perf_load_test_config_icl(dev_priv);
- } else if (IS_CANNONLAKE(dev_priv)) {
- i915_perf_load_test_config_cnl(dev_priv);
- } else if (IS_COFFEELAKE(dev_priv)) {
- if (IS_CFL_GT2(dev_priv))
- i915_perf_load_test_config_cflgt2(dev_priv);
- if (IS_CFL_GT3(dev_priv))
- i915_perf_load_test_config_cflgt3(dev_priv);
- } else if (IS_GEMINILAKE(dev_priv)) {
- i915_perf_load_test_config_glk(dev_priv);
- } else if (IS_KABYLAKE(dev_priv)) {
- if (IS_KBL_GT2(dev_priv))
- i915_perf_load_test_config_kblgt2(dev_priv);
- else if (IS_KBL_GT3(dev_priv))
- i915_perf_load_test_config_kblgt3(dev_priv);
- } else if (IS_BROXTON(dev_priv)) {
- i915_perf_load_test_config_bxt(dev_priv);
- } else if (IS_SKYLAKE(dev_priv)) {
- if (IS_SKL_GT2(dev_priv))
- i915_perf_load_test_config_sklgt2(dev_priv);
- else if (IS_SKL_GT3(dev_priv))
- i915_perf_load_test_config_sklgt3(dev_priv);
- else if (IS_SKL_GT4(dev_priv))
- i915_perf_load_test_config_sklgt4(dev_priv);
- } else if (IS_CHERRYVIEW(dev_priv)) {
- i915_perf_load_test_config_chv(dev_priv);
- } else if (IS_BROADWELL(dev_priv)) {
- i915_perf_load_test_config_bdw(dev_priv);
- } else if (IS_HASWELL(dev_priv)) {
- i915_perf_load_test_config_hsw(dev_priv);
-}
-
- if (dev_priv->perf.oa.test_config.id == 0)
+ sysfs_attr_init(&perf->test_config.sysfs_metric_id.attr);
+
+ if (IS_TIGERLAKE(i915)) {
+ i915_perf_load_test_config_tgl(i915);
+ } else if (INTEL_GEN(i915) >= 11) {
+ i915_perf_load_test_config_icl(i915);
+ } else if (IS_CANNONLAKE(i915)) {
+ i915_perf_load_test_config_cnl(i915);
+ } else if (IS_COFFEELAKE(i915)) {
+ if (IS_CFL_GT2(i915))
+ i915_perf_load_test_config_cflgt2(i915);
+ if (IS_CFL_GT3(i915))
+ i915_perf_load_test_config_cflgt3(i915);
+ } else if (IS_GEMINILAKE(i915)) {
+ i915_perf_load_test_config_glk(i915);
+ } else if (IS_KABYLAKE(i915)) {
+ if (IS_KBL_GT2(i915))
+ i915_perf_load_test_config_kblgt2(i915);
+ else if (IS_KBL_GT3(i915))
+ i915_perf_load_test_config_kblgt3(i915);
+ } else if (IS_BROXTON(i915)) {
+ i915_perf_load_test_config_bxt(i915);
+ } else if (IS_SKYLAKE(i915)) {
+ if (IS_SKL_GT2(i915))
+ i915_perf_load_test_config_sklgt2(i915);
+ else if (IS_SKL_GT3(i915))
+ i915_perf_load_test_config_sklgt3(i915);
+ else if (IS_SKL_GT4(i915))
+ i915_perf_load_test_config_sklgt4(i915);
+ } else if (IS_CHERRYVIEW(i915)) {
+ i915_perf_load_test_config_chv(i915);
+ } else if (IS_BROADWELL(i915)) {
+ i915_perf_load_test_config_bdw(i915);
+ } else if (IS_HASWELL(i915)) {
+ i915_perf_load_test_config_hsw(i915);
+ }
+
+ if (perf->test_config.id == 0)
goto sysfs_error;
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj,
- &dev_priv->perf.oa.test_config.sysfs_metric);
+ ret = sysfs_create_group(perf->metrics_kobj,
+ &perf->test_config.sysfs_metric);
if (ret)
goto sysfs_error;
- atomic_set(&dev_priv->perf.oa.test_config.ref_count, 1);
+ perf->test_config.perf = perf;
+ kref_init(&perf->test_config.ref);
goto exit;
sysfs_error:
- kobject_put(dev_priv->perf.metrics_kobj);
- dev_priv->perf.metrics_kobj = NULL;
+ kobject_put(perf->metrics_kobj);
+ perf->metrics_kobj = NULL;
exit:
- mutex_unlock(&dev_priv->perf.lock);
+ mutex_unlock(&perf->lock);
}
/**
* i915_perf_unregister - hide i915-perf from userspace
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
*
* i915-perf state cleanup is split up into an 'unregister' and
* 'deinit' phase where the interface is first hidden from
* userspace by i915_perf_unregister() before cleaning up
* remaining state in i915_perf_fini().
*/
-void i915_perf_unregister(struct drm_i915_private *dev_priv)
+void i915_perf_unregister(struct drm_i915_private *i915)
{
- if (!dev_priv->perf.metrics_kobj)
+ struct i915_perf *perf = &i915->perf;
+
+ if (!perf->metrics_kobj)
return;
- sysfs_remove_group(dev_priv->perf.metrics_kobj,
- &dev_priv->perf.oa.test_config.sysfs_metric);
+ sysfs_remove_group(perf->metrics_kobj,
+ &perf->test_config.sysfs_metric);
- kobject_put(dev_priv->perf.metrics_kobj);
- dev_priv->perf.metrics_kobj = NULL;
+ kobject_put(perf->metrics_kobj);
+ perf->metrics_kobj = NULL;
}
-static bool gen8_is_valid_flex_addr(struct drm_i915_private *dev_priv, u32 addr)
+static bool gen8_is_valid_flex_addr(struct i915_perf *perf, u32 addr)
{
static const i915_reg_t flex_eu_regs[] = {
EU_PERF_CNTL0,
@@ -3008,56 +3787,80 @@ static bool gen8_is_valid_flex_addr(struct drm_i915_private *dev_priv, u32 addr)
return false;
}
-static bool gen7_is_valid_b_counter_addr(struct drm_i915_private *dev_priv, u32 addr)
+#define ADDR_IN_RANGE(addr, start, end) \
+ ((addr) >= (start) && \
+ (addr) <= (end))
+
+#define REG_IN_RANGE(addr, start, end) \
+ ((addr) >= i915_mmio_reg_offset(start) && \
+ (addr) <= i915_mmio_reg_offset(end))
+
+#define REG_EQUAL(addr, mmio) \
+ ((addr) == i915_mmio_reg_offset(mmio))
+
+static bool gen7_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr)
+{
+ return REG_IN_RANGE(addr, OASTARTTRIG1, OASTARTTRIG8) ||
+ REG_IN_RANGE(addr, OAREPORTTRIG1, OAREPORTTRIG8) ||
+ REG_IN_RANGE(addr, OACEC0_0, OACEC7_1);
+}
+
+static bool gen7_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
{
- return (addr >= i915_mmio_reg_offset(OASTARTTRIG1) &&
- addr <= i915_mmio_reg_offset(OASTARTTRIG8)) ||
- (addr >= i915_mmio_reg_offset(OAREPORTTRIG1) &&
- addr <= i915_mmio_reg_offset(OAREPORTTRIG8)) ||
- (addr >= i915_mmio_reg_offset(OACEC0_0) &&
- addr <= i915_mmio_reg_offset(OACEC7_1));
+ return REG_EQUAL(addr, HALF_SLICE_CHICKEN2) ||
+ REG_IN_RANGE(addr, MICRO_BP0_0, NOA_WRITE) ||
+ REG_IN_RANGE(addr, OA_PERFCNT1_LO, OA_PERFCNT2_HI) ||
+ REG_IN_RANGE(addr, OA_PERFMATRIX_LO, OA_PERFMATRIX_HI);
}
-static bool gen7_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
+static bool gen8_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
{
- return addr == i915_mmio_reg_offset(HALF_SLICE_CHICKEN2) ||
- (addr >= i915_mmio_reg_offset(MICRO_BP0_0) &&
- addr <= i915_mmio_reg_offset(NOA_WRITE)) ||
- (addr >= i915_mmio_reg_offset(OA_PERFCNT1_LO) &&
- addr <= i915_mmio_reg_offset(OA_PERFCNT2_HI)) ||
- (addr >= i915_mmio_reg_offset(OA_PERFMATRIX_LO) &&
- addr <= i915_mmio_reg_offset(OA_PERFMATRIX_HI));
+ return gen7_is_valid_mux_addr(perf, addr) ||
+ REG_EQUAL(addr, WAIT_FOR_RC6_EXIT) ||
+ REG_IN_RANGE(addr, RPM_CONFIG0, NOA_CONFIG(8));
}
-static bool gen8_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
+static bool gen10_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
{
- return gen7_is_valid_mux_addr(dev_priv, addr) ||
- addr == i915_mmio_reg_offset(WAIT_FOR_RC6_EXIT) ||
- (addr >= i915_mmio_reg_offset(RPM_CONFIG0) &&
- addr <= i915_mmio_reg_offset(NOA_CONFIG(8)));
+ return gen8_is_valid_mux_addr(perf, addr) ||
+ REG_EQUAL(addr, GEN10_NOA_WRITE_HIGH) ||
+ REG_IN_RANGE(addr, OA_PERFCNT3_LO, OA_PERFCNT4_HI);
}
-static bool gen10_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
+static bool hsw_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
{
- return gen8_is_valid_mux_addr(dev_priv, addr) ||
- addr == i915_mmio_reg_offset(GEN10_NOA_WRITE_HIGH) ||
- (addr >= i915_mmio_reg_offset(OA_PERFCNT3_LO) &&
- addr <= i915_mmio_reg_offset(OA_PERFCNT4_HI));
+ return gen7_is_valid_mux_addr(perf, addr) ||
+ ADDR_IN_RANGE(addr, 0x25100, 0x2FF90) ||
+ REG_IN_RANGE(addr, HSW_MBVID2_NOA0, HSW_MBVID2_NOA9) ||
+ REG_EQUAL(addr, HSW_MBVID2_MISR0);
}
-static bool hsw_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
+static bool chv_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
{
- return gen7_is_valid_mux_addr(dev_priv, addr) ||
- (addr >= 0x25100 && addr <= 0x2FF90) ||
- (addr >= i915_mmio_reg_offset(HSW_MBVID2_NOA0) &&
- addr <= i915_mmio_reg_offset(HSW_MBVID2_NOA9)) ||
- addr == i915_mmio_reg_offset(HSW_MBVID2_MISR0);
+ return gen7_is_valid_mux_addr(perf, addr) ||
+ ADDR_IN_RANGE(addr, 0x182300, 0x1823A4);
}
-static bool chv_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
+static bool gen12_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr)
{
- return gen7_is_valid_mux_addr(dev_priv, addr) ||
- (addr >= 0x182300 && addr <= 0x1823A4);
+ return REG_IN_RANGE(addr, GEN12_OAG_OASTARTTRIG1, GEN12_OAG_OASTARTTRIG8) ||
+ REG_IN_RANGE(addr, GEN12_OAG_OAREPORTTRIG1, GEN12_OAG_OAREPORTTRIG8) ||
+ REG_IN_RANGE(addr, GEN12_OAG_CEC0_0, GEN12_OAG_CEC7_1) ||
+ REG_IN_RANGE(addr, GEN12_OAG_SCEC0_0, GEN12_OAG_SCEC7_1) ||
+ REG_EQUAL(addr, GEN12_OAA_DBG_REG) ||
+ REG_EQUAL(addr, GEN12_OAG_OA_PESS) ||
+ REG_EQUAL(addr, GEN12_OAG_SPCTR_CNF);
+}
+
+static bool gen12_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
+{
+ return REG_EQUAL(addr, NOA_WRITE) ||
+ REG_EQUAL(addr, GEN10_NOA_WRITE_HIGH) ||
+ REG_EQUAL(addr, GDT_CHICKEN_BITS) ||
+ REG_EQUAL(addr, WAIT_FOR_RC6_EXIT) ||
+ REG_EQUAL(addr, RPM_CONFIG0) ||
+ REG_EQUAL(addr, RPM_CONFIG1) ||
+ REG_IN_RANGE(addr, NOA_CONFIG(0), NOA_CONFIG(8));
}
static u32 mask_reg_value(u32 reg, u32 val)
@@ -3066,21 +3869,21 @@ static u32 mask_reg_value(u32 reg, u32 val)
* WaDisableSTUnitPowerOptimization workaround. Make sure the value
* programmed by userspace doesn't change this.
*/
- if (i915_mmio_reg_offset(HALF_SLICE_CHICKEN2) == reg)
+ if (REG_EQUAL(reg, HALF_SLICE_CHICKEN2))
val = val & ~_MASKED_BIT_ENABLE(GEN8_ST_PO_DISABLE);
/* WAIT_FOR_RC6_EXIT has only one bit fullfilling the function
* indicated by its name and a bunch of selection fields used by OA
* configs.
*/
- if (i915_mmio_reg_offset(WAIT_FOR_RC6_EXIT) == reg)
+ if (REG_EQUAL(reg, WAIT_FOR_RC6_EXIT))
val = val & ~_MASKED_BIT_ENABLE(HSW_WAIT_FOR_RC6_EXIT_ENABLE);
return val;
}
-static struct i915_oa_reg *alloc_oa_regs(struct drm_i915_private *dev_priv,
- bool (*is_valid)(struct drm_i915_private *dev_priv, u32 addr),
+static struct i915_oa_reg *alloc_oa_regs(struct i915_perf *perf,
+ bool (*is_valid)(struct i915_perf *perf, u32 addr),
u32 __user *regs,
u32 n_regs)
{
@@ -3110,7 +3913,7 @@ static struct i915_oa_reg *alloc_oa_regs(struct drm_i915_private *dev_priv,
if (err)
goto addr_err;
- if (!is_valid(dev_priv, addr)) {
+ if (!is_valid(perf, addr)) {
DRM_DEBUG("Invalid oa_reg address: %X\n", addr);
err = -EINVAL;
goto addr_err;
@@ -3143,7 +3946,7 @@ static ssize_t show_dynamic_id(struct device *dev,
return sprintf(buf, "%d\n", oa_config->id);
}
-static int create_dynamic_oa_sysfs_entry(struct drm_i915_private *dev_priv,
+static int create_dynamic_oa_sysfs_entry(struct i915_perf *perf,
struct i915_oa_config *oa_config)
{
sysfs_attr_init(&oa_config->sysfs_metric_id.attr);
@@ -3158,7 +3961,7 @@ static int create_dynamic_oa_sysfs_entry(struct drm_i915_private *dev_priv,
oa_config->sysfs_metric.name = oa_config->uuid;
oa_config->sysfs_metric.attrs = oa_config->attrs;
- return sysfs_create_group(dev_priv->perf.metrics_kobj,
+ return sysfs_create_group(perf->metrics_kobj,
&oa_config->sysfs_metric);
}
@@ -3178,17 +3981,18 @@ static int create_dynamic_oa_sysfs_entry(struct drm_i915_private *dev_priv,
int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_perf *perf = &to_i915(dev)->perf;
struct drm_i915_perf_oa_config *args = data;
struct i915_oa_config *oa_config, *tmp;
+ struct i915_oa_reg *regs;
int err, id;
- if (!dev_priv->perf.initialized) {
+ if (!perf->i915) {
DRM_DEBUG("i915 perf interface not available for this system\n");
return -ENOTSUPP;
}
- if (!dev_priv->perf.metrics_kobj) {
+ if (!perf->metrics_kobj) {
DRM_DEBUG("OA metrics weren't advertised via sysfs\n");
return -EINVAL;
}
@@ -3211,7 +4015,8 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
return -ENOMEM;
}
- atomic_set(&oa_config->ref_count, 1);
+ oa_config->perf = perf;
+ kref_init(&oa_config->ref);
if (!uuid_is_valid(args->uuid)) {
DRM_DEBUG("Invalid uuid format for OA config\n");
@@ -3225,59 +4030,59 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
memcpy(oa_config->uuid, args->uuid, sizeof(args->uuid));
oa_config->mux_regs_len = args->n_mux_regs;
- oa_config->mux_regs =
- alloc_oa_regs(dev_priv,
- dev_priv->perf.oa.ops.is_valid_mux_reg,
- u64_to_user_ptr(args->mux_regs_ptr),
- args->n_mux_regs);
+ regs = alloc_oa_regs(perf,
+ perf->ops.is_valid_mux_reg,
+ u64_to_user_ptr(args->mux_regs_ptr),
+ args->n_mux_regs);
- if (IS_ERR(oa_config->mux_regs)) {
+ if (IS_ERR(regs)) {
DRM_DEBUG("Failed to create OA config for mux_regs\n");
- err = PTR_ERR(oa_config->mux_regs);
+ err = PTR_ERR(regs);
goto reg_err;
}
+ oa_config->mux_regs = regs;
oa_config->b_counter_regs_len = args->n_boolean_regs;
- oa_config->b_counter_regs =
- alloc_oa_regs(dev_priv,
- dev_priv->perf.oa.ops.is_valid_b_counter_reg,
- u64_to_user_ptr(args->boolean_regs_ptr),
- args->n_boolean_regs);
+ regs = alloc_oa_regs(perf,
+ perf->ops.is_valid_b_counter_reg,
+ u64_to_user_ptr(args->boolean_regs_ptr),
+ args->n_boolean_regs);
- if (IS_ERR(oa_config->b_counter_regs)) {
+ if (IS_ERR(regs)) {
DRM_DEBUG("Failed to create OA config for b_counter_regs\n");
- err = PTR_ERR(oa_config->b_counter_regs);
+ err = PTR_ERR(regs);
goto reg_err;
}
+ oa_config->b_counter_regs = regs;
- if (INTEL_GEN(dev_priv) < 8) {
+ if (INTEL_GEN(perf->i915) < 8) {
if (args->n_flex_regs != 0) {
err = -EINVAL;
goto reg_err;
}
} else {
oa_config->flex_regs_len = args->n_flex_regs;
- oa_config->flex_regs =
- alloc_oa_regs(dev_priv,
- dev_priv->perf.oa.ops.is_valid_flex_reg,
- u64_to_user_ptr(args->flex_regs_ptr),
- args->n_flex_regs);
+ regs = alloc_oa_regs(perf,
+ perf->ops.is_valid_flex_reg,
+ u64_to_user_ptr(args->flex_regs_ptr),
+ args->n_flex_regs);
- if (IS_ERR(oa_config->flex_regs)) {
+ if (IS_ERR(regs)) {
DRM_DEBUG("Failed to create OA config for flex_regs\n");
- err = PTR_ERR(oa_config->flex_regs);
+ err = PTR_ERR(regs);
goto reg_err;
}
+ oa_config->flex_regs = regs;
}
- err = mutex_lock_interruptible(&dev_priv->perf.metrics_lock);
+ err = mutex_lock_interruptible(&perf->metrics_lock);
if (err)
goto reg_err;
/* We shouldn't have too many configs, so this iteration shouldn't be
* too costly.
*/
- idr_for_each_entry(&dev_priv->perf.metrics_idr, tmp, id) {
+ idr_for_each_entry(&perf->metrics_idr, tmp, id) {
if (!strcmp(tmp->uuid, oa_config->uuid)) {
DRM_DEBUG("OA config already exists with this uuid\n");
err = -EADDRINUSE;
@@ -3285,14 +4090,14 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
}
}
- err = create_dynamic_oa_sysfs_entry(dev_priv, oa_config);
+ err = create_dynamic_oa_sysfs_entry(perf, oa_config);
if (err) {
DRM_DEBUG("Failed to create sysfs entry for OA config\n");
goto sysfs_err;
}
/* Config id 0 is invalid, id 1 for kernel stored test config. */
- oa_config->id = idr_alloc(&dev_priv->perf.metrics_idr,
+ oa_config->id = idr_alloc(&perf->metrics_idr,
oa_config, 2,
0, GFP_KERNEL);
if (oa_config->id < 0) {
@@ -3301,16 +4106,16 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
goto sysfs_err;
}
- mutex_unlock(&dev_priv->perf.metrics_lock);
+ mutex_unlock(&perf->metrics_lock);
DRM_DEBUG("Added config %s id=%i\n", oa_config->uuid, oa_config->id);
return oa_config->id;
sysfs_err:
- mutex_unlock(&dev_priv->perf.metrics_lock);
+ mutex_unlock(&perf->metrics_lock);
reg_err:
- put_oa_config(dev_priv, oa_config);
+ i915_oa_config_put(oa_config);
DRM_DEBUG("Failed to add new OA config\n");
return err;
}
@@ -3329,12 +4134,12 @@ reg_err:
int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_perf *perf = &to_i915(dev)->perf;
u64 *arg = data;
struct i915_oa_config *oa_config;
int ret;
- if (!dev_priv->perf.initialized) {
+ if (!perf->i915) {
DRM_DEBUG("i915 perf interface not available for this system\n");
return -ENOTSUPP;
}
@@ -3344,31 +4149,33 @@ int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
return -EACCES;
}
- ret = mutex_lock_interruptible(&dev_priv->perf.metrics_lock);
+ ret = mutex_lock_interruptible(&perf->metrics_lock);
if (ret)
- goto lock_err;
+ return ret;
- oa_config = idr_find(&dev_priv->perf.metrics_idr, *arg);
+ oa_config = idr_find(&perf->metrics_idr, *arg);
if (!oa_config) {
DRM_DEBUG("Failed to remove unknown OA config\n");
ret = -ENOENT;
- goto config_err;
+ goto err_unlock;
}
GEM_BUG_ON(*arg != oa_config->id);
- sysfs_remove_group(dev_priv->perf.metrics_kobj,
- &oa_config->sysfs_metric);
+ sysfs_remove_group(perf->metrics_kobj, &oa_config->sysfs_metric);
+
+ idr_remove(&perf->metrics_idr, *arg);
- idr_remove(&dev_priv->perf.metrics_idr, *arg);
+ mutex_unlock(&perf->metrics_lock);
DRM_DEBUG("Removed config %s id=%i\n", oa_config->uuid, oa_config->id);
- put_oa_config(dev_priv, oa_config);
+ i915_oa_config_put(oa_config);
-config_err:
- mutex_unlock(&dev_priv->perf.metrics_lock);
-lock_err:
+ return 0;
+
+err_unlock:
+ mutex_unlock(&perf->metrics_lock);
return ret;
}
@@ -3415,140 +4222,209 @@ static struct ctl_table dev_root[] = {
};
/**
- * i915_perf_init - initialize i915-perf state on module load
- * @dev_priv: i915 device instance
+ * i915_perf_init - initialize i915-perf state on module bind
+ * @i915: i915 device instance
*
* Initializes i915-perf state without exposing anything to userspace.
*
* Note: i915-perf initialization is split into an 'init' and 'register'
* phase with the i915_perf_register() exposing state to userspace.
*/
-void i915_perf_init(struct drm_i915_private *dev_priv)
-{
- if (IS_HASWELL(dev_priv)) {
- dev_priv->perf.oa.ops.is_valid_b_counter_reg =
- gen7_is_valid_b_counter_addr;
- dev_priv->perf.oa.ops.is_valid_mux_reg =
- hsw_is_valid_mux_addr;
- dev_priv->perf.oa.ops.is_valid_flex_reg = NULL;
- dev_priv->perf.oa.ops.enable_metric_set = hsw_enable_metric_set;
- dev_priv->perf.oa.ops.disable_metric_set = hsw_disable_metric_set;
- dev_priv->perf.oa.ops.oa_enable = gen7_oa_enable;
- dev_priv->perf.oa.ops.oa_disable = gen7_oa_disable;
- dev_priv->perf.oa.ops.read = gen7_oa_read;
- dev_priv->perf.oa.ops.oa_hw_tail_read =
- gen7_oa_hw_tail_read;
-
- dev_priv->perf.oa.oa_formats = hsw_oa_formats;
- } else if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
+void i915_perf_init(struct drm_i915_private *i915)
+{
+ struct i915_perf *perf = &i915->perf;
+
+ /* XXX const struct i915_perf_ops! */
+
+ if (IS_HASWELL(i915)) {
+ perf->ops.is_valid_b_counter_reg = gen7_is_valid_b_counter_addr;
+ perf->ops.is_valid_mux_reg = hsw_is_valid_mux_addr;
+ perf->ops.is_valid_flex_reg = NULL;
+ perf->ops.enable_metric_set = hsw_enable_metric_set;
+ perf->ops.disable_metric_set = hsw_disable_metric_set;
+ perf->ops.oa_enable = gen7_oa_enable;
+ perf->ops.oa_disable = gen7_oa_disable;
+ perf->ops.read = gen7_oa_read;
+ perf->ops.oa_hw_tail_read = gen7_oa_hw_tail_read;
+
+ perf->oa_formats = hsw_oa_formats;
+ } else if (HAS_LOGICAL_RING_CONTEXTS(i915)) {
/* Note: that although we could theoretically also support the
* legacy ringbuffer mode on BDW (and earlier iterations of
* this driver, before upstreaming did this) it didn't seem
* worth the complexity to maintain now that BDW+ enable
* execlist mode by default.
*/
- dev_priv->perf.oa.oa_formats = gen8_plus_oa_formats;
+ perf->ops.read = gen8_oa_read;
- dev_priv->perf.oa.ops.oa_enable = gen8_oa_enable;
- dev_priv->perf.oa.ops.oa_disable = gen8_oa_disable;
- dev_priv->perf.oa.ops.read = gen8_oa_read;
- dev_priv->perf.oa.ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
+ if (IS_GEN_RANGE(i915, 8, 9)) {
+ perf->oa_formats = gen8_plus_oa_formats;
- if (IS_GEN_RANGE(dev_priv, 8, 9)) {
- dev_priv->perf.oa.ops.is_valid_b_counter_reg =
+ perf->ops.is_valid_b_counter_reg =
gen7_is_valid_b_counter_addr;
- dev_priv->perf.oa.ops.is_valid_mux_reg =
+ perf->ops.is_valid_mux_reg =
gen8_is_valid_mux_addr;
- dev_priv->perf.oa.ops.is_valid_flex_reg =
+ perf->ops.is_valid_flex_reg =
gen8_is_valid_flex_addr;
- if (IS_CHERRYVIEW(dev_priv)) {
- dev_priv->perf.oa.ops.is_valid_mux_reg =
+ if (IS_CHERRYVIEW(i915)) {
+ perf->ops.is_valid_mux_reg =
chv_is_valid_mux_addr;
}
- dev_priv->perf.oa.ops.enable_metric_set = gen8_enable_metric_set;
- dev_priv->perf.oa.ops.disable_metric_set = gen8_disable_metric_set;
+ perf->ops.oa_enable = gen8_oa_enable;
+ perf->ops.oa_disable = gen8_oa_disable;
+ perf->ops.enable_metric_set = gen8_enable_metric_set;
+ perf->ops.disable_metric_set = gen8_disable_metric_set;
+ perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
- if (IS_GEN(dev_priv, 8)) {
- dev_priv->perf.oa.ctx_oactxctrl_offset = 0x120;
- dev_priv->perf.oa.ctx_flexeu0_offset = 0x2ce;
+ if (IS_GEN(i915, 8)) {
+ perf->ctx_oactxctrl_offset = 0x120;
+ perf->ctx_flexeu0_offset = 0x2ce;
- dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<25);
+ perf->gen8_valid_ctx_bit = BIT(25);
} else {
- dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128;
- dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de;
+ perf->ctx_oactxctrl_offset = 0x128;
+ perf->ctx_flexeu0_offset = 0x3de;
- dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16);
+ perf->gen8_valid_ctx_bit = BIT(16);
}
- } else if (IS_GEN_RANGE(dev_priv, 10, 11)) {
- dev_priv->perf.oa.ops.is_valid_b_counter_reg =
+ } else if (IS_GEN_RANGE(i915, 10, 11)) {
+ perf->oa_formats = gen8_plus_oa_formats;
+
+ perf->ops.is_valid_b_counter_reg =
gen7_is_valid_b_counter_addr;
- dev_priv->perf.oa.ops.is_valid_mux_reg =
+ perf->ops.is_valid_mux_reg =
gen10_is_valid_mux_addr;
- dev_priv->perf.oa.ops.is_valid_flex_reg =
+ perf->ops.is_valid_flex_reg =
gen8_is_valid_flex_addr;
- dev_priv->perf.oa.ops.enable_metric_set = gen8_enable_metric_set;
- dev_priv->perf.oa.ops.disable_metric_set = gen10_disable_metric_set;
+ perf->ops.oa_enable = gen8_oa_enable;
+ perf->ops.oa_disable = gen8_oa_disable;
+ perf->ops.enable_metric_set = gen8_enable_metric_set;
+ perf->ops.disable_metric_set = gen10_disable_metric_set;
+ perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
- if (IS_GEN(dev_priv, 10)) {
- dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128;
- dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de;
+ if (IS_GEN(i915, 10)) {
+ perf->ctx_oactxctrl_offset = 0x128;
+ perf->ctx_flexeu0_offset = 0x3de;
} else {
- dev_priv->perf.oa.ctx_oactxctrl_offset = 0x124;
- dev_priv->perf.oa.ctx_flexeu0_offset = 0x78e;
+ perf->ctx_oactxctrl_offset = 0x124;
+ perf->ctx_flexeu0_offset = 0x78e;
}
- dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16);
+ perf->gen8_valid_ctx_bit = BIT(16);
+ } else if (IS_GEN(i915, 12)) {
+ perf->oa_formats = gen12_oa_formats;
+
+ perf->ops.is_valid_b_counter_reg =
+ gen12_is_valid_b_counter_addr;
+ perf->ops.is_valid_mux_reg =
+ gen12_is_valid_mux_addr;
+ perf->ops.is_valid_flex_reg =
+ gen8_is_valid_flex_addr;
+
+ perf->ops.oa_enable = gen12_oa_enable;
+ perf->ops.oa_disable = gen12_oa_disable;
+ perf->ops.enable_metric_set = gen12_enable_metric_set;
+ perf->ops.disable_metric_set = gen12_disable_metric_set;
+ perf->ops.oa_hw_tail_read = gen12_oa_hw_tail_read;
+
+ perf->ctx_flexeu0_offset = 0;
+ perf->ctx_oactxctrl_offset = 0x144;
}
}
- if (dev_priv->perf.oa.ops.enable_metric_set) {
- hrtimer_init(&dev_priv->perf.oa.poll_check_timer,
- CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- dev_priv->perf.oa.poll_check_timer.function = oa_poll_check_timer_cb;
- init_waitqueue_head(&dev_priv->perf.oa.poll_wq);
-
- INIT_LIST_HEAD(&dev_priv->perf.streams);
- mutex_init(&dev_priv->perf.lock);
- spin_lock_init(&dev_priv->perf.oa.oa_buffer.ptr_lock);
+ if (perf->ops.enable_metric_set) {
+ mutex_init(&perf->lock);
oa_sample_rate_hard_limit = 1000 *
- (RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz / 2);
- dev_priv->perf.sysctl_header = register_sysctl_table(dev_root);
+ (RUNTIME_INFO(i915)->cs_timestamp_frequency_khz / 2);
- mutex_init(&dev_priv->perf.metrics_lock);
- idr_init(&dev_priv->perf.metrics_idr);
+ mutex_init(&perf->metrics_lock);
+ idr_init(&perf->metrics_idr);
- dev_priv->perf.initialized = true;
+ /* We set up some ratelimit state to potentially throttle any
+ * _NOTES about spurious, invalid OA reports which we don't
+ * forward to userspace.
+ *
+ * We print a _NOTE about any throttling when closing the
+ * stream instead of waiting until driver _fini which no one
+ * would ever see.
+ *
+ * Using the same limiting factors as printk_ratelimit()
+ */
+ ratelimit_state_init(&perf->spurious_report_rs, 5 * HZ, 10);
+ /* Since we use a DRM_NOTE for spurious reports it would be
+ * inconsistent to let __ratelimit() automatically print a
+ * warning for throttling.
+ */
+ ratelimit_set_flags(&perf->spurious_report_rs,
+ RATELIMIT_MSG_ON_RELEASE);
+
+ atomic64_set(&perf->noa_programming_delay,
+ 500 * 1000 /* 500us */);
+
+ perf->i915 = i915;
}
}
static int destroy_config(int id, void *p, void *data)
{
- struct drm_i915_private *dev_priv = data;
- struct i915_oa_config *oa_config = p;
+ i915_oa_config_put(p);
+ return 0;
+}
- put_oa_config(dev_priv, oa_config);
+void i915_perf_sysctl_register(void)
+{
+ sysctl_header = register_sysctl_table(dev_root);
+}
- return 0;
+void i915_perf_sysctl_unregister(void)
+{
+ unregister_sysctl_table(sysctl_header);
}
/**
* i915_perf_fini - Counter part to i915_perf_init()
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
*/
-void i915_perf_fini(struct drm_i915_private *dev_priv)
+void i915_perf_fini(struct drm_i915_private *i915)
{
- if (!dev_priv->perf.initialized)
- return;
+ struct i915_perf *perf = &i915->perf;
- idr_for_each(&dev_priv->perf.metrics_idr, destroy_config, dev_priv);
- idr_destroy(&dev_priv->perf.metrics_idr);
+ if (!perf->i915)
+ return;
- unregister_sysctl_table(dev_priv->perf.sysctl_header);
+ idr_for_each(&perf->metrics_idr, destroy_config, perf);
+ idr_destroy(&perf->metrics_idr);
- memset(&dev_priv->perf.oa.ops, 0, sizeof(dev_priv->perf.oa.ops));
+ memset(&perf->ops, 0, sizeof(perf->ops));
+ perf->i915 = NULL;
+}
- dev_priv->perf.initialized = false;
+/**
+ * i915_perf_ioctl_version - Version of the i915-perf subsystem
+ *
+ * This version number is used by userspace to detect available features.
+ */
+int i915_perf_ioctl_version(void)
+{
+ /*
+ * 1: Initial version
+ * I915_PERF_IOCTL_ENABLE
+ * I915_PERF_IOCTL_DISABLE
+ *
+ * 2: Added runtime modification of OA config.
+ * I915_PERF_IOCTL_CONFIG
+ *
+ * 3: Add DRM_I915_PERF_PROP_HOLD_PREEMPTION parameter to hold
+ * preemption on a particular context so that performance data is
+ * accessible from a delta of MI_RPC reports without looking at the
+ * OA buffer.
+ */
+ return 3;
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/i915_perf.c"
+#endif
diff --git a/drivers/gpu/drm/i915/i915_perf.h b/drivers/gpu/drm/i915/i915_perf.h
new file mode 100644
index 000000000000..882fdd0a7680
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_perf.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_PERF_H__
+#define __I915_PERF_H__
+
+#include <linux/kref.h>
+#include <linux/types.h>
+
+#include "i915_perf_types.h"
+
+struct drm_device;
+struct drm_file;
+struct drm_i915_private;
+struct i915_oa_config;
+struct intel_context;
+struct intel_engine_cs;
+
+void i915_perf_init(struct drm_i915_private *i915);
+void i915_perf_fini(struct drm_i915_private *i915);
+void i915_perf_register(struct drm_i915_private *i915);
+void i915_perf_unregister(struct drm_i915_private *i915);
+int i915_perf_ioctl_version(void);
+void i915_perf_sysctl_register(void);
+void i915_perf_sysctl_unregister(void);
+
+int i915_perf_open_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+
+void i915_oa_init_reg_state(const struct intel_context *ce,
+ const struct intel_engine_cs *engine);
+
+struct i915_oa_config *
+i915_perf_get_oa_config(struct i915_perf *perf, int metrics_set);
+
+static inline struct i915_oa_config *
+i915_oa_config_get(struct i915_oa_config *oa_config)
+{
+ if (kref_get_unless_zero(&oa_config->ref))
+ return oa_config;
+ else
+ return NULL;
+}
+
+void i915_oa_config_release(struct kref *ref);
+static inline void i915_oa_config_put(struct i915_oa_config *oa_config)
+{
+ if (!oa_config)
+ return;
+
+ kref_put(&oa_config->ref, i915_oa_config_release);
+}
+
+#endif /* __I915_PERF_H__ */
diff --git a/drivers/gpu/drm/i915/i915_perf_types.h b/drivers/gpu/drm/i915/i915_perf_types.h
new file mode 100644
index 000000000000..45e581455f5d
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_perf_types.h
@@ -0,0 +1,434 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef _I915_PERF_TYPES_H_
+#define _I915_PERF_TYPES_H_
+
+#include <linux/atomic.h>
+#include <linux/device.h>
+#include <linux/hrtimer.h>
+#include <linux/llist.h>
+#include <linux/poll.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+#include <linux/uuid.h>
+#include <linux/wait.h>
+
+#include "i915_reg.h"
+#include "intel_wakeref.h"
+
+struct drm_i915_private;
+struct file;
+struct i915_gem_context;
+struct i915_perf;
+struct i915_vma;
+struct intel_context;
+struct intel_engine_cs;
+
+struct i915_oa_format {
+ u32 format;
+ int size;
+};
+
+struct i915_oa_reg {
+ i915_reg_t addr;
+ u32 value;
+};
+
+struct i915_oa_config {
+ struct i915_perf *perf;
+
+ char uuid[UUID_STRING_LEN + 1];
+ int id;
+
+ const struct i915_oa_reg *mux_regs;
+ u32 mux_regs_len;
+ const struct i915_oa_reg *b_counter_regs;
+ u32 b_counter_regs_len;
+ const struct i915_oa_reg *flex_regs;
+ u32 flex_regs_len;
+
+ struct attribute_group sysfs_metric;
+ struct attribute *attrs[2];
+ struct device_attribute sysfs_metric_id;
+
+ struct kref ref;
+ struct rcu_head rcu;
+};
+
+struct i915_perf_stream;
+
+/**
+ * struct i915_perf_stream_ops - the OPs to support a specific stream type
+ */
+struct i915_perf_stream_ops {
+ /**
+ * @enable: Enables the collection of HW samples, either in response to
+ * `I915_PERF_IOCTL_ENABLE` or implicitly called when stream is opened
+ * without `I915_PERF_FLAG_DISABLED`.
+ */
+ void (*enable)(struct i915_perf_stream *stream);
+
+ /**
+ * @disable: Disables the collection of HW samples, either in response
+ * to `I915_PERF_IOCTL_DISABLE` or implicitly called before destroying
+ * the stream.
+ */
+ void (*disable)(struct i915_perf_stream *stream);
+
+ /**
+ * @poll_wait: Call poll_wait, passing a wait queue that will be woken
+ * once there is something ready to read() for the stream
+ */
+ void (*poll_wait)(struct i915_perf_stream *stream,
+ struct file *file,
+ poll_table *wait);
+
+ /**
+ * @wait_unlocked: For handling a blocking read, wait until there is
+ * something to ready to read() for the stream. E.g. wait on the same
+ * wait queue that would be passed to poll_wait().
+ */
+ int (*wait_unlocked)(struct i915_perf_stream *stream);
+
+ /**
+ * @read: Copy buffered metrics as records to userspace
+ * **buf**: the userspace, destination buffer
+ * **count**: the number of bytes to copy, requested by userspace
+ * **offset**: zero at the start of the read, updated as the read
+ * proceeds, it represents how many bytes have been copied so far and
+ * the buffer offset for copying the next record.
+ *
+ * Copy as many buffered i915 perf samples and records for this stream
+ * to userspace as will fit in the given buffer.
+ *
+ * Only write complete records; returning -%ENOSPC if there isn't room
+ * for a complete record.
+ *
+ * Return any error condition that results in a short read such as
+ * -%ENOSPC or -%EFAULT, even though these may be squashed before
+ * returning to userspace.
+ */
+ int (*read)(struct i915_perf_stream *stream,
+ char __user *buf,
+ size_t count,
+ size_t *offset);
+
+ /**
+ * @destroy: Cleanup any stream specific resources.
+ *
+ * The stream will always be disabled before this is called.
+ */
+ void (*destroy)(struct i915_perf_stream *stream);
+};
+
+/**
+ * struct i915_perf_stream - state for a single open stream FD
+ */
+struct i915_perf_stream {
+ /**
+ * @perf: i915_perf backpointer
+ */
+ struct i915_perf *perf;
+
+ /**
+ * @uncore: mmio access path
+ */
+ struct intel_uncore *uncore;
+
+ /**
+ * @engine: Engine associated with this performance stream.
+ */
+ struct intel_engine_cs *engine;
+
+ /**
+ * @sample_flags: Flags representing the `DRM_I915_PERF_PROP_SAMPLE_*`
+ * properties given when opening a stream, representing the contents
+ * of a single sample as read() by userspace.
+ */
+ u32 sample_flags;
+
+ /**
+ * @sample_size: Considering the configured contents of a sample
+ * combined with the required header size, this is the total size
+ * of a single sample record.
+ */
+ int sample_size;
+
+ /**
+ * @ctx: %NULL if measuring system-wide across all contexts or a
+ * specific context that is being monitored.
+ */
+ struct i915_gem_context *ctx;
+
+ /**
+ * @enabled: Whether the stream is currently enabled, considering
+ * whether the stream was opened in a disabled state and based
+ * on `I915_PERF_IOCTL_ENABLE` and `I915_PERF_IOCTL_DISABLE` calls.
+ */
+ bool enabled;
+
+ /**
+ * @hold_preemption: Whether preemption is put on hold for command
+ * submissions done on the @ctx. This is useful for some drivers that
+ * cannot easily post process the OA buffer context to subtract delta
+ * of performance counters not associated with @ctx.
+ */
+ bool hold_preemption;
+
+ /**
+ * @ops: The callbacks providing the implementation of this specific
+ * type of configured stream.
+ */
+ const struct i915_perf_stream_ops *ops;
+
+ /**
+ * @oa_config: The OA configuration used by the stream.
+ */
+ struct i915_oa_config *oa_config;
+
+ /**
+ * @oa_config_bos: A list of struct i915_oa_config_bo allocated lazily
+ * each time @oa_config changes.
+ */
+ struct llist_head oa_config_bos;
+
+ /**
+ * @pinned_ctx: The OA context specific information.
+ */
+ struct intel_context *pinned_ctx;
+
+ /**
+ * @specific_ctx_id: The id of the specific context.
+ */
+ u32 specific_ctx_id;
+
+ /**
+ * @specific_ctx_id_mask: The mask used to masking specific_ctx_id bits.
+ */
+ u32 specific_ctx_id_mask;
+
+ /**
+ * @poll_check_timer: High resolution timer that will periodically
+ * check for data in the circular OA buffer for notifying userspace
+ * (e.g. during a read() or poll()).
+ */
+ struct hrtimer poll_check_timer;
+
+ /**
+ * @poll_wq: The wait queue that hrtimer callback wakes when it
+ * sees data ready to read in the circular OA buffer.
+ */
+ wait_queue_head_t poll_wq;
+
+ /**
+ * @pollin: Whether there is data available to read.
+ */
+ bool pollin;
+
+ /**
+ * @periodic: Whether periodic sampling is currently enabled.
+ */
+ bool periodic;
+
+ /**
+ * @period_exponent: The OA unit sampling frequency is derived from this.
+ */
+ int period_exponent;
+
+ /**
+ * @oa_buffer: State of the OA buffer.
+ */
+ struct {
+ struct i915_vma *vma;
+ u8 *vaddr;
+ u32 last_ctx_id;
+ int format;
+ int format_size;
+ int size_exponent;
+
+ /**
+ * @ptr_lock: Locks reads and writes to all head/tail state
+ *
+ * Consider: the head and tail pointer state needs to be read
+ * consistently from a hrtimer callback (atomic context) and
+ * read() fop (user context) with tail pointer updates happening
+ * in atomic context and head updates in user context and the
+ * (unlikely) possibility of read() errors needing to reset all
+ * head/tail state.
+ *
+ * Note: Contention/performance aren't currently a significant
+ * concern here considering the relatively low frequency of
+ * hrtimer callbacks (5ms period) and that reads typically only
+ * happen in response to a hrtimer event and likely complete
+ * before the next callback.
+ *
+ * Note: This lock is not held *while* reading and copying data
+ * to userspace so the value of head observed in htrimer
+ * callbacks won't represent any partial consumption of data.
+ */
+ spinlock_t ptr_lock;
+
+ /**
+ * @tails: One 'aging' tail pointer and one 'aged' tail pointer ready to
+ * used for reading.
+ *
+ * Initial values of 0xffffffff are invalid and imply that an
+ * update is required (and should be ignored by an attempted
+ * read)
+ */
+ struct {
+ u32 offset;
+ } tails[2];
+
+ /**
+ * @aged_tail_idx: Index for the aged tail ready to read() data up to.
+ */
+ unsigned int aged_tail_idx;
+
+ /**
+ * @aging_timestamp: A monotonic timestamp for when the current aging tail pointer
+ * was read; used to determine when it is old enough to trust.
+ */
+ u64 aging_timestamp;
+
+ /**
+ * @head: Although we can always read back the head pointer register,
+ * we prefer to avoid trusting the HW state, just to avoid any
+ * risk that some hardware condition could * somehow bump the
+ * head pointer unpredictably and cause us to forward the wrong
+ * OA buffer data to userspace.
+ */
+ u32 head;
+ } oa_buffer;
+
+ /**
+ * @noa_wait: A batch buffer doing a wait on the GPU for the NOA logic to be
+ * reprogrammed.
+ */
+ struct i915_vma *noa_wait;
+};
+
+/**
+ * struct i915_oa_ops - Gen specific implementation of an OA unit stream
+ */
+struct i915_oa_ops {
+ /**
+ * @is_valid_b_counter_reg: Validates register's address for
+ * programming boolean counters for a particular platform.
+ */
+ bool (*is_valid_b_counter_reg)(struct i915_perf *perf, u32 addr);
+
+ /**
+ * @is_valid_mux_reg: Validates register's address for programming mux
+ * for a particular platform.
+ */
+ bool (*is_valid_mux_reg)(struct i915_perf *perf, u32 addr);
+
+ /**
+ * @is_valid_flex_reg: Validates register's address for programming
+ * flex EU filtering for a particular platform.
+ */
+ bool (*is_valid_flex_reg)(struct i915_perf *perf, u32 addr);
+
+ /**
+ * @enable_metric_set: Selects and applies any MUX configuration to set
+ * up the Boolean and Custom (B/C) counters that are part of the
+ * counter reports being sampled. May apply system constraints such as
+ * disabling EU clock gating as required.
+ */
+ int (*enable_metric_set)(struct i915_perf_stream *stream);
+
+ /**
+ * @disable_metric_set: Remove system constraints associated with using
+ * the OA unit.
+ */
+ void (*disable_metric_set)(struct i915_perf_stream *stream);
+
+ /**
+ * @oa_enable: Enable periodic sampling
+ */
+ void (*oa_enable)(struct i915_perf_stream *stream);
+
+ /**
+ * @oa_disable: Disable periodic sampling
+ */
+ void (*oa_disable)(struct i915_perf_stream *stream);
+
+ /**
+ * @read: Copy data from the circular OA buffer into a given userspace
+ * buffer.
+ */
+ int (*read)(struct i915_perf_stream *stream,
+ char __user *buf,
+ size_t count,
+ size_t *offset);
+
+ /**
+ * @oa_hw_tail_read: read the OA tail pointer register
+ *
+ * In particular this enables us to share all the fiddly code for
+ * handling the OA unit tail pointer race that affects multiple
+ * generations.
+ */
+ u32 (*oa_hw_tail_read)(struct i915_perf_stream *stream);
+};
+
+struct i915_perf {
+ struct drm_i915_private *i915;
+
+ struct kobject *metrics_kobj;
+
+ /*
+ * Lock associated with adding/modifying/removing OA configs
+ * in perf->metrics_idr.
+ */
+ struct mutex metrics_lock;
+
+ /*
+ * List of dynamic configurations (struct i915_oa_config), you
+ * need to hold perf->metrics_lock to access it.
+ */
+ struct idr metrics_idr;
+
+ /*
+ * Lock associated with anything below within this structure
+ * except exclusive_stream.
+ */
+ struct mutex lock;
+
+ /*
+ * The stream currently using the OA unit. If accessed
+ * outside a syscall associated to its file
+ * descriptor.
+ */
+ struct i915_perf_stream *exclusive_stream;
+
+ /**
+ * For rate limiting any notifications of spurious
+ * invalid OA reports
+ */
+ struct ratelimit_state spurious_report_rs;
+
+ struct i915_oa_config test_config;
+
+ u32 gen7_latched_oastatus1;
+ u32 ctx_oactxctrl_offset;
+ u32 ctx_flexeu0_offset;
+
+ /**
+ * The RPT_ID/reason field for Gen8+ includes a bit
+ * to determine if the CTX ID in the report is valid
+ * but the specific bit differs between Gen 8 and 9
+ */
+ u32 gen8_valid_ctx_bit;
+
+ struct i915_oa_ops ops;
+ const struct i915_oa_format *oa_formats;
+
+ atomic64_t noa_programming_delay;
+};
+
+#endif /* _I915_PERF_TYPES_H_ */
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 8fe46ee920a0..ec0299490dd4 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -8,6 +8,11 @@
#include <linux/pm_runtime.h>
#include "gt/intel_engine.h"
+#include "gt/intel_engine_pm.h"
+#include "gt/intel_engine_user.h"
+#include "gt/intel_gt_pm.h"
+#include "gt/intel_rc6.h"
+#include "gt/intel_rps.h"
#include "i915_drv.h"
#include "i915_pmu.h"
@@ -74,8 +79,9 @@ static unsigned int event_enabled_bit(struct perf_event *event)
return config_enabled_bit(event->attr.config);
}
-static bool pmu_needs_timer(struct drm_i915_private *i915, bool gpu_active)
+static bool pmu_needs_timer(struct i915_pmu *pmu, bool gpu_active)
{
+ struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
u64 enable;
/*
@@ -83,7 +89,7 @@ static bool pmu_needs_timer(struct drm_i915_private *i915, bool gpu_active)
*
* We start with a bitmask of all currently enabled events.
*/
- enable = i915->pmu.enable;
+ enable = pmu->enable;
/*
* Mask out all the ones which do not need the timer, or in
@@ -102,10 +108,8 @@ static bool pmu_needs_timer(struct drm_i915_private *i915, bool gpu_active)
/*
* Also there is software busyness tracking available we do not
* need the timer for I915_SAMPLE_BUSY counter.
- *
- * Use RCS as proxy for all engines.
*/
- else if (intel_engine_supports_stats(i915->engine[RCS0]))
+ else if (i915->caps.scheduler & I915_SCHEDULER_CAP_ENGINE_BUSY_STATS)
enable &= ~BIT(I915_SAMPLE_BUSY);
/*
@@ -114,42 +118,139 @@ static bool pmu_needs_timer(struct drm_i915_private *i915, bool gpu_active)
return enable;
}
+static u64 __get_rc6(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ u64 val;
+
+ val = intel_rc6_residency_ns(&gt->rc6,
+ IS_VALLEYVIEW(i915) ?
+ VLV_GT_RENDER_RC6 :
+ GEN6_GT_GFX_RC6);
+
+ if (HAS_RC6p(i915))
+ val += intel_rc6_residency_ns(&gt->rc6, GEN6_GT_GFX_RC6p);
+
+ if (HAS_RC6pp(i915))
+ val += intel_rc6_residency_ns(&gt->rc6, GEN6_GT_GFX_RC6pp);
+
+ return val;
+}
+
+#if IS_ENABLED(CONFIG_PM)
+
+static inline s64 ktime_since(const ktime_t kt)
+{
+ return ktime_to_ns(ktime_sub(ktime_get(), kt));
+}
+
+static u64 get_rc6(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct i915_pmu *pmu = &i915->pmu;
+ unsigned long flags;
+ bool awake = false;
+ u64 val;
+
+ if (intel_gt_pm_get_if_awake(gt)) {
+ val = __get_rc6(gt);
+ intel_gt_pm_put_async(gt);
+ awake = true;
+ }
+
+ spin_lock_irqsave(&pmu->lock, flags);
+
+ if (awake) {
+ pmu->sample[__I915_SAMPLE_RC6].cur = val;
+ } else {
+ /*
+ * We think we are runtime suspended.
+ *
+ * Report the delta from when the device was suspended to now,
+ * on top of the last known real value, as the approximated RC6
+ * counter value.
+ */
+ val = ktime_since(pmu->sleep_last);
+ val += pmu->sample[__I915_SAMPLE_RC6].cur;
+ }
+
+ if (val < pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur)
+ val = pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur;
+ else
+ pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = val;
+
+ spin_unlock_irqrestore(&pmu->lock, flags);
+
+ return val;
+}
+
+static void park_rc6(struct drm_i915_private *i915)
+{
+ struct i915_pmu *pmu = &i915->pmu;
+
+ if (pmu->enable & config_enabled_mask(I915_PMU_RC6_RESIDENCY))
+ pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
+
+ pmu->sleep_last = ktime_get();
+}
+
+#else
+
+static u64 get_rc6(struct intel_gt *gt)
+{
+ return __get_rc6(gt);
+}
+
+static void park_rc6(struct drm_i915_private *i915) {}
+
+#endif
+
+static void __i915_pmu_maybe_start_timer(struct i915_pmu *pmu)
+{
+ if (!pmu->timer_enabled && pmu_needs_timer(pmu, true)) {
+ pmu->timer_enabled = true;
+ pmu->timer_last = ktime_get();
+ hrtimer_start_range_ns(&pmu->timer,
+ ns_to_ktime(PERIOD), 0,
+ HRTIMER_MODE_REL_PINNED);
+ }
+}
+
void i915_pmu_gt_parked(struct drm_i915_private *i915)
{
- if (!i915->pmu.base.event_init)
+ struct i915_pmu *pmu = &i915->pmu;
+
+ if (!pmu->base.event_init)
return;
- spin_lock_irq(&i915->pmu.lock);
+ spin_lock_irq(&pmu->lock);
+
+ park_rc6(i915);
+
/*
* Signal sampling timer to stop if only engine events are enabled and
* GPU went idle.
*/
- i915->pmu.timer_enabled = pmu_needs_timer(i915, false);
- spin_unlock_irq(&i915->pmu.lock);
-}
+ pmu->timer_enabled = pmu_needs_timer(pmu, false);
-static void __i915_pmu_maybe_start_timer(struct drm_i915_private *i915)
-{
- if (!i915->pmu.timer_enabled && pmu_needs_timer(i915, true)) {
- i915->pmu.timer_enabled = true;
- i915->pmu.timer_last = ktime_get();
- hrtimer_start_range_ns(&i915->pmu.timer,
- ns_to_ktime(PERIOD), 0,
- HRTIMER_MODE_REL_PINNED);
- }
+ spin_unlock_irq(&pmu->lock);
}
void i915_pmu_gt_unparked(struct drm_i915_private *i915)
{
- if (!i915->pmu.base.event_init)
+ struct i915_pmu *pmu = &i915->pmu;
+
+ if (!pmu->base.event_init)
return;
- spin_lock_irq(&i915->pmu.lock);
+ spin_lock_irq(&pmu->lock);
+
/*
* Re-enable sampling timer when GPU goes active.
*/
- __i915_pmu_maybe_start_timer(i915);
- spin_unlock_irq(&i915->pmu.lock);
+ __i915_pmu_maybe_start_timer(pmu);
+
+ spin_unlock_irq(&pmu->lock);
}
static void
@@ -158,38 +259,59 @@ add_sample(struct i915_pmu_sample *sample, u32 val)
sample->cur += val;
}
+static bool exclusive_mmio_access(const struct drm_i915_private *i915)
+{
+ /*
+ * We have to avoid concurrent mmio cache line access on gen7 or
+ * risk a machine hang. For a fun history lesson dig out the old
+ * userspace intel_gpu_top and run it on Ivybridge or Haswell!
+ */
+ return IS_GEN(i915, 7);
+}
+
static void
-engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
+engines_sample(struct intel_gt *gt, unsigned int period_ns)
{
+ struct drm_i915_private *i915 = gt->i915;
struct intel_engine_cs *engine;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
- unsigned long flags;
- if ((dev_priv->pmu.enable & ENGINE_SAMPLE_MASK) == 0)
+ if ((i915->pmu.enable & ENGINE_SAMPLE_MASK) == 0)
return;
- wakeref = 0;
- if (READ_ONCE(dev_priv->gt.awake))
- wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
- if (!wakeref)
+ if (!intel_gt_pm_is_awake(gt))
return;
- spin_lock_irqsave(&dev_priv->uncore.lock, flags);
- for_each_engine(engine, dev_priv, id) {
+ for_each_engine(engine, gt, id) {
struct intel_engine_pmu *pmu = &engine->pmu;
+ spinlock_t *mmio_lock;
+ unsigned long flags;
bool busy;
u32 val;
- val = I915_READ_FW(RING_CTL(engine->mmio_base));
- if (val == 0) /* powerwell off => engine idle */
+ if (!intel_engine_pm_get_if_awake(engine))
continue;
+ mmio_lock = NULL;
+ if (exclusive_mmio_access(i915))
+ mmio_lock = &engine->uncore->lock;
+
+ if (unlikely(mmio_lock))
+ spin_lock_irqsave(mmio_lock, flags);
+
+ val = ENGINE_READ_FW(engine, RING_CTL);
+ if (val == 0) /* powerwell off => engine idle */
+ goto skip;
+
if (val & RING_WAIT)
add_sample(&pmu->sample[I915_SAMPLE_WAIT], period_ns);
if (val & RING_WAIT_SEMAPHORE)
add_sample(&pmu->sample[I915_SAMPLE_SEMA], period_ns);
+ /* No need to sample when busy stats are supported. */
+ if (intel_engine_supports_stats(engine))
+ goto skip;
+
/*
* While waiting on a semaphore or event, MI_MODE reports the
* ring as idle. However, previously using the seqno, and with
@@ -199,15 +321,17 @@ engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
*/
busy = val & (RING_WAIT_SEMAPHORE | RING_WAIT);
if (!busy) {
- val = I915_READ_FW(RING_MI_MODE(engine->mmio_base));
+ val = ENGINE_READ_FW(engine, RING_MI_MODE);
busy = !(val & MODE_IDLE);
}
if (busy)
add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns);
- }
- spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+skip:
+ if (unlikely(mmio_lock))
+ spin_unlock_irqrestore(mmio_lock, flags);
+ intel_engine_pm_put_async(engine);
+ }
}
static void
@@ -216,52 +340,74 @@ add_sample_mult(struct i915_pmu_sample *sample, u32 val, u32 mul)
sample->cur += mul_u32_u32(val, mul);
}
+static bool frequency_sampling_enabled(struct i915_pmu *pmu)
+{
+ return pmu->enable &
+ (config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY) |
+ config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY));
+}
+
static void
-frequency_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
+frequency_sample(struct intel_gt *gt, unsigned int period_ns)
{
- if (dev_priv->pmu.enable &
- config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) {
- u32 val;
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore = gt->uncore;
+ struct i915_pmu *pmu = &i915->pmu;
+ struct intel_rps *rps = &gt->rps;
- val = dev_priv->gt_pm.rps.cur_freq;
- if (dev_priv->gt.awake) {
- intel_wakeref_t wakeref;
+ if (!frequency_sampling_enabled(pmu))
+ return;
- with_intel_runtime_pm_if_in_use(&dev_priv->runtime_pm,
- wakeref) {
- val = intel_uncore_read_notrace(&dev_priv->uncore,
- GEN6_RPSTAT1);
- val = intel_get_cagf(dev_priv, val);
- }
- }
+ /* Report 0/0 (actual/requested) frequency while parked. */
+ if (!intel_gt_pm_get_if_awake(gt))
+ return;
- add_sample_mult(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_ACT],
- intel_gpu_freq(dev_priv, val),
- period_ns / 1000);
+ if (pmu->enable & config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) {
+ u32 val;
+
+ /*
+ * We take a quick peek here without using forcewake
+ * so that we don't perturb the system under observation
+ * (forcewake => !rc6 => increased power use). We expect
+ * that if the read fails because it is outside of the
+ * mmio power well, then it will return 0 -- in which
+ * case we assume the system is running at the intended
+ * frequency. Fortunately, the read should rarely fail!
+ */
+ val = intel_uncore_read_fw(uncore, GEN6_RPSTAT1);
+ if (val)
+ val = intel_rps_get_cagf(rps, val);
+ else
+ val = rps->cur_freq;
+
+ add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_ACT],
+ intel_gpu_freq(rps, val), period_ns / 1000);
}
- if (dev_priv->pmu.enable &
- config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) {
- add_sample_mult(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_REQ],
- intel_gpu_freq(dev_priv,
- dev_priv->gt_pm.rps.cur_freq),
+ if (pmu->enable & config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) {
+ add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_REQ],
+ intel_gpu_freq(rps, rps->cur_freq),
period_ns / 1000);
}
+
+ intel_gt_pm_put_async(gt);
}
static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
{
struct drm_i915_private *i915 =
container_of(hrtimer, struct drm_i915_private, pmu.timer);
+ struct i915_pmu *pmu = &i915->pmu;
+ struct intel_gt *gt = &i915->gt;
unsigned int period_ns;
ktime_t now;
- if (!READ_ONCE(i915->pmu.timer_enabled))
+ if (!READ_ONCE(pmu->timer_enabled))
return HRTIMER_NORESTART;
now = ktime_get();
- period_ns = ktime_to_ns(ktime_sub(now, i915->pmu.timer_last));
- i915->pmu.timer_last = now;
+ period_ns = ktime_to_ns(ktime_sub(now, pmu->timer_last));
+ pmu->timer_last = now;
/*
* Strictly speaking the passed in period may not be 100% accurate for
@@ -269,8 +415,8 @@ static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
* grabbing the forcewake. However the potential error from timer call-
* back delay greatly dominates this so we keep it simple.
*/
- engines_sample(i915, period_ns);
- frequency_sample(i915, period_ns);
+ engines_sample(gt, period_ns);
+ frequency_sample(gt, period_ns);
hrtimer_forward(hrtimer, now, ns_to_ktime(PERIOD));
@@ -423,105 +569,11 @@ static int i915_pmu_event_init(struct perf_event *event)
return 0;
}
-static u64 __get_rc6(struct drm_i915_private *i915)
-{
- u64 val;
-
- val = intel_rc6_residency_ns(i915,
- IS_VALLEYVIEW(i915) ?
- VLV_GT_RENDER_RC6 :
- GEN6_GT_GFX_RC6);
-
- if (HAS_RC6p(i915))
- val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6p);
-
- if (HAS_RC6pp(i915))
- val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6pp);
-
- return val;
-}
-
-static u64 get_rc6(struct drm_i915_private *i915)
-{
-#if IS_ENABLED(CONFIG_PM)
- struct intel_runtime_pm *rpm = &i915->runtime_pm;
- intel_wakeref_t wakeref;
- unsigned long flags;
- u64 val;
-
- wakeref = intel_runtime_pm_get_if_in_use(rpm);
- if (wakeref) {
- val = __get_rc6(i915);
- intel_runtime_pm_put(rpm, wakeref);
-
- /*
- * If we are coming back from being runtime suspended we must
- * be careful not to report a larger value than returned
- * previously.
- */
-
- spin_lock_irqsave(&i915->pmu.lock, flags);
-
- if (val >= i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
- i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0;
- i915->pmu.sample[__I915_SAMPLE_RC6].cur = val;
- } else {
- val = i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
- }
-
- spin_unlock_irqrestore(&i915->pmu.lock, flags);
- } else {
- struct device *kdev = rpm->kdev;
-
- /*
- * We are runtime suspended.
- *
- * Report the delta from when the device was suspended to now,
- * on top of the last known real value, as the approximated RC6
- * counter value.
- */
- spin_lock_irqsave(&i915->pmu.lock, flags);
-
- /*
- * After the above branch intel_runtime_pm_get_if_in_use failed
- * to get the runtime PM reference we cannot assume we are in
- * runtime suspend since we can either: a) race with coming out
- * of it before we took the power.lock, or b) there are other
- * states than suspended which can bring us here.
- *
- * We need to double-check that we are indeed currently runtime
- * suspended and if not we cannot do better than report the last
- * known RC6 value.
- */
- if (pm_runtime_status_suspended(kdev)) {
- val = pm_runtime_suspended_time(kdev);
-
- if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur)
- i915->pmu.suspended_time_last = val;
-
- val -= i915->pmu.suspended_time_last;
- val += i915->pmu.sample[__I915_SAMPLE_RC6].cur;
-
- i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;
- } else if (i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
- val = i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
- } else {
- val = i915->pmu.sample[__I915_SAMPLE_RC6].cur;
- }
-
- spin_unlock_irqrestore(&i915->pmu.lock, flags);
- }
-
- return val;
-#else
- return __get_rc6(i915);
-#endif
-}
-
static u64 __i915_pmu_event_read(struct perf_event *event)
{
struct drm_i915_private *i915 =
container_of(event->pmu, typeof(*i915), pmu.base);
+ struct i915_pmu *pmu = &i915->pmu;
u64 val = 0;
if (is_engine_event(event)) {
@@ -544,19 +596,19 @@ static u64 __i915_pmu_event_read(struct perf_event *event)
switch (event->attr.config) {
case I915_PMU_ACTUAL_FREQUENCY:
val =
- div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_ACT].cur,
+ div_u64(pmu->sample[__I915_SAMPLE_FREQ_ACT].cur,
USEC_PER_SEC /* to MHz */);
break;
case I915_PMU_REQUESTED_FREQUENCY:
val =
- div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_REQ].cur,
+ div_u64(pmu->sample[__I915_SAMPLE_FREQ_REQ].cur,
USEC_PER_SEC /* to MHz */);
break;
case I915_PMU_INTERRUPTS:
val = count_interrupts(i915);
break;
case I915_PMU_RC6_RESIDENCY:
- val = get_rc6(i915);
+ val = get_rc6(&i915->gt);
break;
}
}
@@ -584,24 +636,35 @@ static void i915_pmu_enable(struct perf_event *event)
struct drm_i915_private *i915 =
container_of(event->pmu, typeof(*i915), pmu.base);
unsigned int bit = event_enabled_bit(event);
+ struct i915_pmu *pmu = &i915->pmu;
+ intel_wakeref_t wakeref;
unsigned long flags;
- spin_lock_irqsave(&i915->pmu.lock, flags);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ spin_lock_irqsave(&pmu->lock, flags);
/*
* Update the bitmask of enabled events and increment
* the event reference counter.
*/
- BUILD_BUG_ON(ARRAY_SIZE(i915->pmu.enable_count) != I915_PMU_MASK_BITS);
- GEM_BUG_ON(bit >= ARRAY_SIZE(i915->pmu.enable_count));
- GEM_BUG_ON(i915->pmu.enable_count[bit] == ~0);
- i915->pmu.enable |= BIT_ULL(bit);
- i915->pmu.enable_count[bit]++;
+ BUILD_BUG_ON(ARRAY_SIZE(pmu->enable_count) != I915_PMU_MASK_BITS);
+ GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count));
+ GEM_BUG_ON(pmu->enable_count[bit] == ~0);
+
+ if (pmu->enable_count[bit] == 0 &&
+ config_enabled_mask(I915_PMU_RC6_RESIDENCY) & BIT_ULL(bit)) {
+ pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = 0;
+ pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
+ pmu->sleep_last = ktime_get();
+ }
+
+ pmu->enable |= BIT_ULL(bit);
+ pmu->enable_count[bit]++;
/*
* Start the sampling timer if needed and not already enabled.
*/
- __i915_pmu_maybe_start_timer(i915);
+ __i915_pmu_maybe_start_timer(pmu);
/*
* For per-engine events the bitmask and reference counting
@@ -627,7 +690,7 @@ static void i915_pmu_enable(struct perf_event *event)
engine->pmu.enable_count[sample]++;
}
- spin_unlock_irqrestore(&i915->pmu.lock, flags);
+ spin_unlock_irqrestore(&pmu->lock, flags);
/*
* Store the current counter value so we can report the correct delta
@@ -635,6 +698,8 @@ static void i915_pmu_enable(struct perf_event *event)
* an existing non-zero value.
*/
local64_set(&event->hw.prev_count, __i915_pmu_event_read(event));
+
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
static void i915_pmu_disable(struct perf_event *event)
@@ -642,9 +707,10 @@ static void i915_pmu_disable(struct perf_event *event)
struct drm_i915_private *i915 =
container_of(event->pmu, typeof(*i915), pmu.base);
unsigned int bit = event_enabled_bit(event);
+ struct i915_pmu *pmu = &i915->pmu;
unsigned long flags;
- spin_lock_irqsave(&i915->pmu.lock, flags);
+ spin_lock_irqsave(&pmu->lock, flags);
if (is_engine_event(event)) {
u8 sample = engine_event_sample(event);
@@ -666,18 +732,18 @@ static void i915_pmu_disable(struct perf_event *event)
engine->pmu.enable &= ~BIT(sample);
}
- GEM_BUG_ON(bit >= ARRAY_SIZE(i915->pmu.enable_count));
- GEM_BUG_ON(i915->pmu.enable_count[bit] == 0);
+ GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count));
+ GEM_BUG_ON(pmu->enable_count[bit] == 0);
/*
* Decrement the reference count and clear the enabled
* bitmask when the last listener on an event goes away.
*/
- if (--i915->pmu.enable_count[bit] == 0) {
- i915->pmu.enable &= ~BIT_ULL(bit);
- i915->pmu.timer_enabled &= pmu_needs_timer(i915, true);
+ if (--pmu->enable_count[bit] == 0) {
+ pmu->enable &= ~BIT_ULL(bit);
+ pmu->timer_enabled &= pmu_needs_timer(pmu, true);
}
- spin_unlock_irqrestore(&i915->pmu.lock, flags);
+ spin_unlock_irqrestore(&pmu->lock, flags);
}
static void i915_pmu_event_start(struct perf_event *event, int flags)
@@ -826,15 +892,16 @@ add_pmu_attr(struct perf_pmu_events_attr *attr, const char *name,
}
static struct attribute **
-create_event_attributes(struct drm_i915_private *i915)
+create_event_attributes(struct i915_pmu *pmu)
{
+ struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
static const struct {
u64 config;
const char *name;
const char *unit;
} events[] = {
- __event(I915_PMU_ACTUAL_FREQUENCY, "actual-frequency", "MHz"),
- __event(I915_PMU_REQUESTED_FREQUENCY, "requested-frequency", "MHz"),
+ __event(I915_PMU_ACTUAL_FREQUENCY, "actual-frequency", "M"),
+ __event(I915_PMU_REQUESTED_FREQUENCY, "requested-frequency", "M"),
__event(I915_PMU_INTERRUPTS, "interrupts", NULL),
__event(I915_PMU_RC6_RESIDENCY, "rc6-residency", "ns"),
};
@@ -851,7 +918,6 @@ create_event_attributes(struct drm_i915_private *i915)
struct i915_ext_attribute *i915_attr = NULL, *i915_iter;
struct attribute **attr = NULL, **attr_iter;
struct intel_engine_cs *engine;
- enum intel_engine_id id;
unsigned int i;
/* Count how many counters we will be exposing. */
@@ -860,7 +926,7 @@ create_event_attributes(struct drm_i915_private *i915)
count++;
}
- for_each_engine(engine, i915, id) {
+ for_each_uabi_engine(engine, i915) {
for (i = 0; i < ARRAY_SIZE(engine_events); i++) {
if (!engine_event_status(engine,
engine_events[i].sample))
@@ -911,7 +977,7 @@ create_event_attributes(struct drm_i915_private *i915)
}
/* Initialize supported engine counters. */
- for_each_engine(engine, i915, id) {
+ for_each_uabi_engine(engine, i915) {
for (i = 0; i < ARRAY_SIZE(engine_events); i++) {
char *str;
@@ -928,7 +994,7 @@ create_event_attributes(struct drm_i915_private *i915)
i915_iter =
add_i915_attr(i915_iter, str,
__I915_PMU_ENGINE(engine->uabi_class,
- engine->instance,
+ engine->uabi_instance,
engine_events[i].sample));
str = kasprintf(GFP_KERNEL, "%s-%s.unit",
@@ -941,8 +1007,8 @@ create_event_attributes(struct drm_i915_private *i915)
}
}
- i915->pmu.i915_attr = i915_attr;
- i915->pmu.pmu_attr = pmu_attr;
+ pmu->i915_attr = i915_attr;
+ pmu->pmu_attr = pmu_attr;
return attr;
@@ -958,7 +1024,7 @@ err_alloc:
return NULL;
}
-static void free_event_attributes(struct drm_i915_private *i915)
+static void free_event_attributes(struct i915_pmu *pmu)
{
struct attribute **attr_iter = i915_pmu_events_attr_group.attrs;
@@ -966,12 +1032,12 @@ static void free_event_attributes(struct drm_i915_private *i915)
kfree((*attr_iter)->name);
kfree(i915_pmu_events_attr_group.attrs);
- kfree(i915->pmu.i915_attr);
- kfree(i915->pmu.pmu_attr);
+ kfree(pmu->i915_attr);
+ kfree(pmu->pmu_attr);
i915_pmu_events_attr_group.attrs = NULL;
- i915->pmu.i915_attr = NULL;
- i915->pmu.pmu_attr = NULL;
+ pmu->i915_attr = NULL;
+ pmu->pmu_attr = NULL;
}
static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
@@ -1008,7 +1074,7 @@ static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node)
static enum cpuhp_state cpuhp_slot = CPUHP_INVALID;
-static int i915_pmu_register_cpuhp_state(struct drm_i915_private *i915)
+static int i915_pmu_register_cpuhp_state(struct i915_pmu *pmu)
{
enum cpuhp_state slot;
int ret;
@@ -1021,7 +1087,7 @@ static int i915_pmu_register_cpuhp_state(struct drm_i915_private *i915)
return ret;
slot = ret;
- ret = cpuhp_state_add_instance(slot, &i915->pmu.node);
+ ret = cpuhp_state_add_instance(slot, &pmu->node);
if (ret) {
cpuhp_remove_multi_state(slot);
return ret;
@@ -1031,72 +1097,104 @@ static int i915_pmu_register_cpuhp_state(struct drm_i915_private *i915)
return 0;
}
-static void i915_pmu_unregister_cpuhp_state(struct drm_i915_private *i915)
+static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu)
{
WARN_ON(cpuhp_slot == CPUHP_INVALID);
- WARN_ON(cpuhp_state_remove_instance(cpuhp_slot, &i915->pmu.node));
+ WARN_ON(cpuhp_state_remove_instance(cpuhp_slot, &pmu->node));
cpuhp_remove_multi_state(cpuhp_slot);
}
+static bool is_igp(struct drm_i915_private *i915)
+{
+ struct pci_dev *pdev = i915->drm.pdev;
+
+ /* IGP is 0000:00:02.0 */
+ return pci_domain_nr(pdev->bus) == 0 &&
+ pdev->bus->number == 0 &&
+ PCI_SLOT(pdev->devfn) == 2 &&
+ PCI_FUNC(pdev->devfn) == 0;
+}
+
void i915_pmu_register(struct drm_i915_private *i915)
{
- int ret;
+ struct i915_pmu *pmu = &i915->pmu;
+ int ret = -ENOMEM;
if (INTEL_GEN(i915) <= 2) {
- DRM_INFO("PMU not supported for this GPU.");
+ dev_info(i915->drm.dev, "PMU not supported for this GPU.");
return;
}
- i915_pmu_events_attr_group.attrs = create_event_attributes(i915);
- if (!i915_pmu_events_attr_group.attrs) {
- ret = -ENOMEM;
- goto err;
+ spin_lock_init(&pmu->lock);
+ hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ pmu->timer.function = i915_sample;
+
+ if (!is_igp(i915)) {
+ pmu->name = kasprintf(GFP_KERNEL,
+ "i915_%s",
+ dev_name(i915->drm.dev));
+ if (pmu->name) {
+ /* tools/perf reserves colons as special. */
+ strreplace((char *)pmu->name, ':', '_');
+ }
+ } else {
+ pmu->name = "i915";
}
+ if (!pmu->name)
+ goto err;
- i915->pmu.base.attr_groups = i915_pmu_attr_groups;
- i915->pmu.base.task_ctx_nr = perf_invalid_context;
- i915->pmu.base.event_init = i915_pmu_event_init;
- i915->pmu.base.add = i915_pmu_event_add;
- i915->pmu.base.del = i915_pmu_event_del;
- i915->pmu.base.start = i915_pmu_event_start;
- i915->pmu.base.stop = i915_pmu_event_stop;
- i915->pmu.base.read = i915_pmu_event_read;
- i915->pmu.base.event_idx = i915_pmu_event_event_idx;
-
- spin_lock_init(&i915->pmu.lock);
- hrtimer_init(&i915->pmu.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- i915->pmu.timer.function = i915_sample;
-
- ret = perf_pmu_register(&i915->pmu.base, "i915", -1);
+ i915_pmu_events_attr_group.attrs = create_event_attributes(pmu);
+ if (!i915_pmu_events_attr_group.attrs)
+ goto err_name;
+
+ pmu->base.attr_groups = i915_pmu_attr_groups;
+ pmu->base.task_ctx_nr = perf_invalid_context;
+ pmu->base.event_init = i915_pmu_event_init;
+ pmu->base.add = i915_pmu_event_add;
+ pmu->base.del = i915_pmu_event_del;
+ pmu->base.start = i915_pmu_event_start;
+ pmu->base.stop = i915_pmu_event_stop;
+ pmu->base.read = i915_pmu_event_read;
+ pmu->base.event_idx = i915_pmu_event_event_idx;
+
+ ret = perf_pmu_register(&pmu->base, pmu->name, -1);
if (ret)
- goto err;
+ goto err_attr;
- ret = i915_pmu_register_cpuhp_state(i915);
+ ret = i915_pmu_register_cpuhp_state(pmu);
if (ret)
goto err_unreg;
return;
err_unreg:
- perf_pmu_unregister(&i915->pmu.base);
+ perf_pmu_unregister(&pmu->base);
+err_attr:
+ pmu->base.event_init = NULL;
+ free_event_attributes(pmu);
+err_name:
+ if (!is_igp(i915))
+ kfree(pmu->name);
err:
- i915->pmu.base.event_init = NULL;
- free_event_attributes(i915);
- DRM_NOTE("Failed to register PMU! (err=%d)\n", ret);
+ dev_notice(i915->drm.dev, "Failed to register PMU!\n");
}
void i915_pmu_unregister(struct drm_i915_private *i915)
{
- if (!i915->pmu.base.event_init)
+ struct i915_pmu *pmu = &i915->pmu;
+
+ if (!pmu->base.event_init)
return;
- WARN_ON(i915->pmu.enable);
+ WARN_ON(pmu->enable);
- hrtimer_cancel(&i915->pmu.timer);
+ hrtimer_cancel(&pmu->timer);
- i915_pmu_unregister_cpuhp_state(i915);
+ i915_pmu_unregister_cpuhp_state(pmu);
- perf_pmu_unregister(&i915->pmu.base);
- i915->pmu.base.event_init = NULL;
- free_event_attributes(i915);
+ perf_pmu_unregister(&pmu->base);
+ pmu->base.event_init = NULL;
+ if (!is_igp(i915))
+ kfree(pmu->name);
+ free_event_attributes(pmu);
}
diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h
index 4fc4f2478301..6c1647c5daf2 100644
--- a/drivers/gpu/drm/i915/i915_pmu.h
+++ b/drivers/gpu/drm/i915/i915_pmu.h
@@ -18,7 +18,7 @@ enum {
__I915_SAMPLE_FREQ_ACT = 0,
__I915_SAMPLE_FREQ_REQ,
__I915_SAMPLE_RC6,
- __I915_SAMPLE_RC6_ESTIMATED,
+ __I915_SAMPLE_RC6_LAST_REPORTED,
__I915_NUM_PMU_SAMPLERS
};
@@ -47,6 +47,10 @@ struct i915_pmu {
*/
struct pmu base;
/**
+ * @name: Name as registered with perf core.
+ */
+ const char *name;
+ /**
* @lock: Lock protecting enable mask and ref count handling.
*/
spinlock_t lock;
@@ -97,9 +101,9 @@ struct i915_pmu {
*/
struct i915_pmu_sample sample[__I915_NUM_PMU_SAMPLERS];
/**
- * @suspended_time_last: Cached suspend time from PM core.
+ * @sleep_last: Last time GT parked for RC6 estimation.
*/
- u64 suspended_time_last;
+ ktime_t sleep_last;
/**
* @i915_attr: Memory block holding device attributes.
*/
diff --git a/drivers/gpu/drm/i915/i915_priolist_types.h b/drivers/gpu/drm/i915/i915_priolist_types.h
index 49709de69875..732aad148881 100644
--- a/drivers/gpu/drm/i915/i915_priolist_types.h
+++ b/drivers/gpu/drm/i915/i915_priolist_types.h
@@ -17,7 +17,11 @@ enum {
I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY,
I915_PRIORITY_MAX = I915_CONTEXT_MAX_USER_PRIORITY + 1,
- I915_PRIORITY_INVALID = INT_MIN
+ /* A preemptive pulse used to monitor the health of each engine */
+ I915_PRIORITY_HEARTBEAT,
+
+ /* Interactive workload, scheduled for immediate pageflipping */
+ I915_PRIORITY_DISPLAY,
};
#define I915_USER_PRIORITY_SHIFT 2
@@ -29,6 +33,20 @@ enum {
#define I915_PRIORITY_WAIT ((u8)BIT(0))
#define I915_PRIORITY_NOSEMAPHORE ((u8)BIT(1))
+/* Smallest priority value that cannot be bumped. */
+#define I915_PRIORITY_INVALID (INT_MIN | (u8)I915_PRIORITY_MASK)
+
+/*
+ * Requests containing performance queries must not be preempted by
+ * another context. They get scheduled with their default priority and
+ * once they reach the execlist ports we ensure that they stick on the
+ * HW until finished by pretending that they have maximum priority,
+ * i.e. nothing can have higher priority and force us to usurp the
+ * active request.
+ */
+#define I915_PRIORITY_UNPREEMPTABLE INT_MAX
+#define I915_PRIORITY_BARRIER INT_MAX
+
#define __NO_PREEMPTION (I915_PRIORITY_WAIT)
struct i915_priolist {
diff --git a/drivers/gpu/drm/i915/i915_pvinfo.h b/drivers/gpu/drm/i915/i915_pvinfo.h
index 969e514916ab..683e97ac2430 100644
--- a/drivers/gpu/drm/i915/i915_pvinfo.h
+++ b/drivers/gpu/drm/i915/i915_pvinfo.h
@@ -24,6 +24,8 @@
#ifndef _I915_PVINFO_H_
#define _I915_PVINFO_H_
+#include <linux/types.h>
+
/* The MMIO offset of the shared info between guest and host emulator */
#define VGT_PVINFO_PAGE 0x78000
#define VGT_PVINFO_SIZE 0x1000
@@ -110,8 +112,9 @@ struct vgt_if {
u32 rsv7[0x200 - 24]; /* pad to one page */
} __packed;
-#define vgtif_reg(x) \
- _MMIO((VGT_PVINFO_PAGE + offsetof(struct vgt_if, x)))
+#define vgtif_offset(x) (offsetof(struct vgt_if, x))
+
+#define vgtif_reg(x) _MMIO(VGT_PVINFO_PAGE + vgtif_offset(x))
/* vGPU display status to be used by the host side */
#define VGT_DRV_DISPLAY_NOT_READY 0
diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c
index 7b7016171057..ef25ce6e395e 100644
--- a/drivers/gpu/drm/i915/i915_query.c
+++ b/drivers/gpu/drm/i915/i915_query.c
@@ -7,6 +7,7 @@
#include <linux/nospec.h>
#include "i915_drv.h"
+#include "i915_perf.h"
#include "i915_query.h"
#include <uapi/drm/i915_drm.h>
@@ -37,8 +38,6 @@ static int query_topology_info(struct drm_i915_private *dev_priv,
const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
struct drm_i915_query_topology_info topo;
u32 slice_length, subslice_length, eu_length, total_length;
- u8 subslice_stride = GEN_SSEU_STRIDE(sseu->max_subslices);
- u8 eu_stride = GEN_SSEU_STRIDE(sseu->max_eus_per_subslice);
int ret;
if (query_item->flags != 0)
@@ -50,8 +49,8 @@ static int query_topology_info(struct drm_i915_private *dev_priv,
BUILD_BUG_ON(sizeof(u8) != sizeof(sseu->slice_mask));
slice_length = sizeof(sseu->slice_mask);
- subslice_length = sseu->max_slices * subslice_stride;
- eu_length = sseu->max_slices * sseu->max_subslices * eu_stride;
+ subslice_length = sseu->max_slices * sseu->ss_stride;
+ eu_length = sseu->max_slices * sseu->max_subslices * sseu->eu_stride;
total_length = sizeof(topo) + slice_length + subslice_length +
eu_length;
@@ -69,9 +68,9 @@ static int query_topology_info(struct drm_i915_private *dev_priv,
topo.max_eus_per_subslice = sseu->max_eus_per_subslice;
topo.subslice_offset = slice_length;
- topo.subslice_stride = subslice_stride;
+ topo.subslice_stride = sseu->ss_stride;
topo.eu_offset = slice_length + subslice_length;
- topo.eu_stride = eu_stride;
+ topo.eu_stride = sseu->eu_stride;
if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr),
&topo, sizeof(topo)))
@@ -104,16 +103,18 @@ query_engine_info(struct drm_i915_private *i915,
struct drm_i915_engine_info __user *info_ptr;
struct drm_i915_query_engine_info query;
struct drm_i915_engine_info info = { };
+ unsigned int num_uabi_engines = 0;
struct intel_engine_cs *engine;
- enum intel_engine_id id;
int len, ret;
if (query_item->flags)
return -EINVAL;
+ for_each_uabi_engine(engine, i915)
+ num_uabi_engines++;
+
len = sizeof(struct drm_i915_query_engine_info) +
- RUNTIME_INFO(i915)->num_engines *
- sizeof(struct drm_i915_engine_info);
+ num_uabi_engines * sizeof(struct drm_i915_engine_info);
ret = copy_query_item(&query, sizeof(query), len, query_item);
if (ret != 0)
@@ -125,9 +126,9 @@ query_engine_info(struct drm_i915_private *i915,
info_ptr = &query_ptr->engines[0];
- for_each_engine(engine, i915, id) {
+ for_each_uabi_engine(engine, i915) {
info.engine.engine_class = engine->uabi_class;
- info.engine.engine_instance = engine->instance;
+ info.engine.engine_instance = engine->uabi_instance;
info.capabilities = engine->uabi_capabilities;
if (__copy_to_user(info_ptr, &info, sizeof(info)))
@@ -143,10 +144,305 @@ query_engine_info(struct drm_i915_private *i915,
return len;
}
+static int can_copy_perf_config_registers_or_number(u32 user_n_regs,
+ u64 user_regs_ptr,
+ u32 kernel_n_regs)
+{
+ /*
+ * We'll just put the number of registers, and won't copy the
+ * register.
+ */
+ if (user_n_regs == 0)
+ return 0;
+
+ if (user_n_regs < kernel_n_regs)
+ return -EINVAL;
+
+ if (!access_ok(u64_to_user_ptr(user_regs_ptr),
+ 2 * sizeof(u32) * kernel_n_regs))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int copy_perf_config_registers_or_number(const struct i915_oa_reg *kernel_regs,
+ u32 kernel_n_regs,
+ u64 user_regs_ptr,
+ u32 *user_n_regs)
+{
+ u32 r;
+
+ if (*user_n_regs == 0) {
+ *user_n_regs = kernel_n_regs;
+ return 0;
+ }
+
+ *user_n_regs = kernel_n_regs;
+
+ for (r = 0; r < kernel_n_regs; r++) {
+ u32 __user *user_reg_ptr =
+ u64_to_user_ptr(user_regs_ptr + sizeof(u32) * r * 2);
+ u32 __user *user_val_ptr =
+ u64_to_user_ptr(user_regs_ptr + sizeof(u32) * r * 2 +
+ sizeof(u32));
+ int ret;
+
+ ret = __put_user(i915_mmio_reg_offset(kernel_regs[r].addr),
+ user_reg_ptr);
+ if (ret)
+ return -EFAULT;
+
+ ret = __put_user(kernel_regs[r].value, user_val_ptr);
+ if (ret)
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int query_perf_config_data(struct drm_i915_private *i915,
+ struct drm_i915_query_item *query_item,
+ bool use_uuid)
+{
+ struct drm_i915_query_perf_config __user *user_query_config_ptr =
+ u64_to_user_ptr(query_item->data_ptr);
+ struct drm_i915_perf_oa_config __user *user_config_ptr =
+ u64_to_user_ptr(query_item->data_ptr +
+ sizeof(struct drm_i915_query_perf_config));
+ struct drm_i915_perf_oa_config user_config;
+ struct i915_perf *perf = &i915->perf;
+ struct i915_oa_config *oa_config;
+ char uuid[UUID_STRING_LEN + 1];
+ u64 config_id;
+ u32 flags, total_size;
+ int ret;
+
+ if (!perf->i915)
+ return -ENODEV;
+
+ total_size =
+ sizeof(struct drm_i915_query_perf_config) +
+ sizeof(struct drm_i915_perf_oa_config);
+
+ if (query_item->length == 0)
+ return total_size;
+
+ if (query_item->length < total_size) {
+ DRM_DEBUG("Invalid query config data item size=%u expected=%u\n",
+ query_item->length, total_size);
+ return -EINVAL;
+ }
+
+ if (!access_ok(user_query_config_ptr, total_size))
+ return -EFAULT;
+
+ if (__get_user(flags, &user_query_config_ptr->flags))
+ return -EFAULT;
+
+ if (flags != 0)
+ return -EINVAL;
+
+ if (use_uuid) {
+ struct i915_oa_config *tmp;
+ int id;
+
+ BUILD_BUG_ON(sizeof(user_query_config_ptr->uuid) >= sizeof(uuid));
+
+ memset(&uuid, 0, sizeof(uuid));
+ if (__copy_from_user(uuid, user_query_config_ptr->uuid,
+ sizeof(user_query_config_ptr->uuid)))
+ return -EFAULT;
+
+ oa_config = NULL;
+ rcu_read_lock();
+ idr_for_each_entry(&perf->metrics_idr, tmp, id) {
+ if (!strcmp(tmp->uuid, uuid)) {
+ oa_config = i915_oa_config_get(tmp);
+ break;
+ }
+ }
+ rcu_read_unlock();
+ } else {
+ if (__get_user(config_id, &user_query_config_ptr->config))
+ return -EFAULT;
+
+ oa_config = i915_perf_get_oa_config(perf, config_id);
+ }
+ if (!oa_config)
+ return -ENOENT;
+
+ if (__copy_from_user(&user_config, user_config_ptr,
+ sizeof(user_config))) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ ret = can_copy_perf_config_registers_or_number(user_config.n_boolean_regs,
+ user_config.boolean_regs_ptr,
+ oa_config->b_counter_regs_len);
+ if (ret)
+ goto out;
+
+ ret = can_copy_perf_config_registers_or_number(user_config.n_flex_regs,
+ user_config.flex_regs_ptr,
+ oa_config->flex_regs_len);
+ if (ret)
+ goto out;
+
+ ret = can_copy_perf_config_registers_or_number(user_config.n_mux_regs,
+ user_config.mux_regs_ptr,
+ oa_config->mux_regs_len);
+ if (ret)
+ goto out;
+
+ ret = copy_perf_config_registers_or_number(oa_config->b_counter_regs,
+ oa_config->b_counter_regs_len,
+ user_config.boolean_regs_ptr,
+ &user_config.n_boolean_regs);
+ if (ret)
+ goto out;
+
+ ret = copy_perf_config_registers_or_number(oa_config->flex_regs,
+ oa_config->flex_regs_len,
+ user_config.flex_regs_ptr,
+ &user_config.n_flex_regs);
+ if (ret)
+ goto out;
+
+ ret = copy_perf_config_registers_or_number(oa_config->mux_regs,
+ oa_config->mux_regs_len,
+ user_config.mux_regs_ptr,
+ &user_config.n_mux_regs);
+ if (ret)
+ goto out;
+
+ memcpy(user_config.uuid, oa_config->uuid, sizeof(user_config.uuid));
+
+ if (__copy_to_user(user_config_ptr, &user_config,
+ sizeof(user_config))) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ ret = total_size;
+
+out:
+ i915_oa_config_put(oa_config);
+ return ret;
+}
+
+static size_t sizeof_perf_config_list(size_t count)
+{
+ return sizeof(struct drm_i915_query_perf_config) + sizeof(u64) * count;
+}
+
+static size_t sizeof_perf_metrics(struct i915_perf *perf)
+{
+ struct i915_oa_config *tmp;
+ size_t i;
+ int id;
+
+ i = 1;
+ rcu_read_lock();
+ idr_for_each_entry(&perf->metrics_idr, tmp, id)
+ i++;
+ rcu_read_unlock();
+
+ return sizeof_perf_config_list(i);
+}
+
+static int query_perf_config_list(struct drm_i915_private *i915,
+ struct drm_i915_query_item *query_item)
+{
+ struct drm_i915_query_perf_config __user *user_query_config_ptr =
+ u64_to_user_ptr(query_item->data_ptr);
+ struct i915_perf *perf = &i915->perf;
+ u64 *oa_config_ids = NULL;
+ int alloc, n_configs;
+ u32 flags;
+ int ret;
+
+ if (!perf->i915)
+ return -ENODEV;
+
+ if (query_item->length == 0)
+ return sizeof_perf_metrics(perf);
+
+ if (get_user(flags, &user_query_config_ptr->flags))
+ return -EFAULT;
+
+ if (flags != 0)
+ return -EINVAL;
+
+ n_configs = 1;
+ do {
+ struct i915_oa_config *tmp;
+ u64 *ids;
+ int id;
+
+ ids = krealloc(oa_config_ids,
+ n_configs * sizeof(*oa_config_ids),
+ GFP_KERNEL);
+ if (!ids)
+ return -ENOMEM;
+
+ alloc = fetch_and_zero(&n_configs);
+
+ ids[n_configs++] = 1ull; /* reserved for test_config */
+ rcu_read_lock();
+ idr_for_each_entry(&perf->metrics_idr, tmp, id) {
+ if (n_configs < alloc)
+ ids[n_configs] = id;
+ n_configs++;
+ }
+ rcu_read_unlock();
+
+ oa_config_ids = ids;
+ } while (n_configs > alloc);
+
+ if (query_item->length < sizeof_perf_config_list(n_configs)) {
+ DRM_DEBUG("Invalid query config list item size=%u expected=%zu\n",
+ query_item->length,
+ sizeof_perf_config_list(n_configs));
+ kfree(oa_config_ids);
+ return -EINVAL;
+ }
+
+ if (put_user(n_configs, &user_query_config_ptr->config)) {
+ kfree(oa_config_ids);
+ return -EFAULT;
+ }
+
+ ret = copy_to_user(user_query_config_ptr + 1,
+ oa_config_ids,
+ n_configs * sizeof(*oa_config_ids));
+ kfree(oa_config_ids);
+ if (ret)
+ return -EFAULT;
+
+ return sizeof_perf_config_list(n_configs);
+}
+
+static int query_perf_config(struct drm_i915_private *i915,
+ struct drm_i915_query_item *query_item)
+{
+ switch (query_item->flags) {
+ case DRM_I915_QUERY_PERF_CONFIG_LIST:
+ return query_perf_config_list(i915, query_item);
+ case DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID:
+ return query_perf_config_data(i915, query_item, true);
+ case DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID:
+ return query_perf_config_data(i915, query_item, false);
+ default:
+ return -EINVAL;
+ }
+}
+
static int (* const i915_query_funcs[])(struct drm_i915_private *dev_priv,
struct drm_i915_query_item *query_item) = {
query_topology_info,
query_engine_info,
+ query_perf_config,
};
int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index d6483b5dc8e5..6cc55c103f67 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -242,6 +242,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c))
#define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c))
#define _MMIO_PHY3(phy, a, b, c) _MMIO(_PHY3(phy, a, b, c))
+#define _MMIO_PLL3(pll, a, b, c) _MMIO(_PICK(pll, a, b, c))
/*
* Device info offset array based helpers for groups of registers with unevenly
@@ -250,9 +251,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define _MMIO_PIPE2(pipe, reg) _MMIO(INTEL_INFO(dev_priv)->pipe_offsets[pipe] - \
INTEL_INFO(dev_priv)->pipe_offsets[PIPE_A] + (reg) + \
DISPLAY_MMIO_BASE(dev_priv))
-#define _MMIO_TRANS2(pipe, reg) _MMIO(INTEL_INFO(dev_priv)->trans_offsets[(pipe)] - \
- INTEL_INFO(dev_priv)->trans_offsets[TRANSCODER_A] + (reg) + \
- DISPLAY_MMIO_BASE(dev_priv))
+#define _TRANS2(tran, reg) (INTEL_INFO(dev_priv)->trans_offsets[(tran)] - \
+ INTEL_INFO(dev_priv)->trans_offsets[TRANSCODER_A] + (reg) + \
+ DISPLAY_MMIO_BASE(dev_priv))
+#define _MMIO_TRANS2(tran, reg) _MMIO(_TRANS2(tran, reg))
#define _CURSOR2(pipe, reg) _MMIO(INTEL_INFO(dev_priv)->cursor_offsets[(pipe)] - \
INTEL_INFO(dev_priv)->cursor_offsets[PIPE_A] + (reg) + \
DISPLAY_MMIO_BASE(dev_priv))
@@ -270,30 +272,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define _MASKED_BIT_ENABLE(a) ({ typeof(a) _a = (a); _MASKED_FIELD(_a, _a); })
#define _MASKED_BIT_DISABLE(a) (_MASKED_FIELD((a), 0))
-/* Engine ID */
-
-#define RCS0_HW 0
-#define VCS0_HW 1
-#define BCS0_HW 2
-#define VECS0_HW 3
-#define VCS1_HW 4
-#define VCS2_HW 6
-#define VCS3_HW 7
-#define VECS1_HW 12
-
-/* Engine class */
-
-#define RENDER_CLASS 0
-#define VIDEO_DECODE_CLASS 1
-#define VIDEO_ENHANCEMENT_CLASS 2
-#define COPY_ENGINE_CLASS 3
-#define OTHER_CLASS 4
-#define MAX_ENGINE_CLASS 4
-
-#define OTHER_GUC_INSTANCE 0
-#define OTHER_GTPM_INSTANCE 1
-#define MAX_ENGINE_INSTANCE 3
-
/* PCI config space */
#define MCHBAR_I915 0x44
@@ -435,6 +413,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN11_VECS_SFC_USAGE(engine) _MMIO((engine)->mmio_base + 0x2014)
#define GEN11_VECS_SFC_USAGE_BIT (1 << 0)
+#define GEN12_SFC_DONE(n) _MMIO(0x1cc00 + (n) * 0x100)
+#define GEN12_SFC_DONE_MAX 4
+
#define RING_PP_DIR_BASE(base) _MMIO((base) + 0x228)
#define RING_PP_DIR_BASE_READ(base) _MMIO((base) + 0x518)
#define RING_PP_DIR_DCLV(base) _MMIO((base) + 0x220)
@@ -493,6 +474,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define ECOCHK_PPGTT_WT_HSW (0x2 << 3)
#define ECOCHK_PPGTT_WB_HSW (0x3 << 3)
+#define GEN8_RC6_CTX_INFO _MMIO(0x8504)
+
#define GAC_ECO_BITS _MMIO(0x14090)
#define ECOBITS_SNB_BIT (1 << 13)
#define ECOBITS_PPGTT_CACHE64B (3 << 8)
@@ -567,7 +550,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define MI_PREDICATE_SRC0_UDW _MMIO(0x2400 + 4)
#define MI_PREDICATE_SRC1 _MMIO(0x2408)
#define MI_PREDICATE_SRC1_UDW _MMIO(0x2408 + 4)
-
+#define MI_PREDICATE_DATA _MMIO(0x2410)
+#define MI_PREDICATE_RESULT _MMIO(0x2418)
+#define MI_PREDICATE_RESULT_1 _MMIO(0x241c)
#define MI_PREDICATE_RESULT_2 _MMIO(0x2214)
#define LOWER_SLICE_ENABLED (1 << 0)
#define LOWER_SLICE_DISABLED (0 << 0)
@@ -577,6 +562,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
*/
#define BCS_SWCTRL _MMIO(0x22200)
+/* There are 16 GPR registers */
+#define BCS_GPR(n) _MMIO(0x22600 + (n) * 8)
+#define BCS_GPR_UDW(n) _MMIO(0x22600 + (n) * 8 + 4)
+
#define GPGPU_THREADS_DISPATCHED _MMIO(0x2290)
#define GPGPU_THREADS_DISPATCHED_UDW _MMIO(0x2290 + 4)
#define HS_INVOCATION_COUNT _MMIO(0x2300)
@@ -704,6 +693,45 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define OABUFFER_SIZE_8M (6 << 3)
#define OABUFFER_SIZE_16M (7 << 3)
+/* Gen12 OAR unit */
+#define GEN12_OAR_OACONTROL _MMIO(0x2960)
+#define GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT 1
+#define GEN12_OAR_OACONTROL_COUNTER_ENABLE (1 << 0)
+
+#define GEN12_OACTXCONTROL _MMIO(0x2360)
+#define GEN12_OAR_OASTATUS _MMIO(0x2968)
+
+/* Gen12 OAG unit */
+#define GEN12_OAG_OAHEADPTR _MMIO(0xdb00)
+#define GEN12_OAG_OAHEADPTR_MASK 0xffffffc0
+#define GEN12_OAG_OATAILPTR _MMIO(0xdb04)
+#define GEN12_OAG_OATAILPTR_MASK 0xffffffc0
+
+#define GEN12_OAG_OABUFFER _MMIO(0xdb08)
+#define GEN12_OAG_OABUFFER_BUFFER_SIZE_MASK (0x7)
+#define GEN12_OAG_OABUFFER_BUFFER_SIZE_SHIFT (3)
+#define GEN12_OAG_OABUFFER_MEMORY_SELECT (1 << 0) /* 0: PPGTT, 1: GGTT */
+
+#define GEN12_OAG_OAGLBCTXCTRL _MMIO(0x2b28)
+#define GEN12_OAG_OAGLBCTXCTRL_TIMER_PERIOD_SHIFT 2
+#define GEN12_OAG_OAGLBCTXCTRL_TIMER_ENABLE (1 << 1)
+#define GEN12_OAG_OAGLBCTXCTRL_COUNTER_RESUME (1 << 0)
+
+#define GEN12_OAG_OACONTROL _MMIO(0xdaf4)
+#define GEN12_OAG_OACONTROL_OA_COUNTER_FORMAT_SHIFT 2
+#define GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE (1 << 0)
+
+#define GEN12_OAG_OA_DEBUG _MMIO(0xdaf8)
+#define GEN12_OAG_OA_DEBUG_INCLUDE_CLK_RATIO (1 << 6)
+#define GEN12_OAG_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS (1 << 5)
+#define GEN12_OAG_OA_DEBUG_DISABLE_GO_1_0_REPORTS (1 << 2)
+#define GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS (1 << 1)
+
+#define GEN12_OAG_OASTATUS _MMIO(0xdafc)
+#define GEN12_OAG_OASTATUS_COUNTER_OVERFLOW (1 << 2)
+#define GEN12_OAG_OASTATUS_BUFFER_OVERFLOW (1 << 1)
+#define GEN12_OAG_OASTATUS_REPORT_LOST (1 << 0)
+
/*
* Flexible, Aggregate EU Counter Registers.
* Note: these aren't contiguous
@@ -940,6 +968,26 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define OAREPORTTRIG8_NOA_SELECT_6_SHIFT 24
#define OAREPORTTRIG8_NOA_SELECT_7_SHIFT 28
+/* Same layout as OASTARTTRIGX */
+#define GEN12_OAG_OASTARTTRIG1 _MMIO(0xd900)
+#define GEN12_OAG_OASTARTTRIG2 _MMIO(0xd904)
+#define GEN12_OAG_OASTARTTRIG3 _MMIO(0xd908)
+#define GEN12_OAG_OASTARTTRIG4 _MMIO(0xd90c)
+#define GEN12_OAG_OASTARTTRIG5 _MMIO(0xd910)
+#define GEN12_OAG_OASTARTTRIG6 _MMIO(0xd914)
+#define GEN12_OAG_OASTARTTRIG7 _MMIO(0xd918)
+#define GEN12_OAG_OASTARTTRIG8 _MMIO(0xd91c)
+
+/* Same layout as OAREPORTTRIGX */
+#define GEN12_OAG_OAREPORTTRIG1 _MMIO(0xd920)
+#define GEN12_OAG_OAREPORTTRIG2 _MMIO(0xd924)
+#define GEN12_OAG_OAREPORTTRIG3 _MMIO(0xd928)
+#define GEN12_OAG_OAREPORTTRIG4 _MMIO(0xd92c)
+#define GEN12_OAG_OAREPORTTRIG5 _MMIO(0xd930)
+#define GEN12_OAG_OAREPORTTRIG6 _MMIO(0xd934)
+#define GEN12_OAG_OAREPORTTRIG7 _MMIO(0xd938)
+#define GEN12_OAG_OAREPORTTRIG8 _MMIO(0xd93c)
+
/* CECX_0 */
#define OACEC_COMPARE_LESS_OR_EQUAL 6
#define OACEC_COMPARE_NOT_EQUAL 5
@@ -956,6 +1004,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define OACEC_SELECT_PREV (1 << 19)
#define OACEC_SELECT_BOOLEAN (2 << 19)
+/* 11-bit array 0: pass-through, 1: negated */
+#define GEN12_OASCEC_NEGATE_MASK 0x7ff
+#define GEN12_OASCEC_NEGATE_SHIFT 21
+
/* CECX_1 */
#define OACEC_MASK_MASK 0xffff
#define OACEC_CONSIDERATIONS_MASK 0xffff
@@ -978,6 +1030,42 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define OACEC7_0 _MMIO(0x27a8)
#define OACEC7_1 _MMIO(0x27ac)
+/* Same layout as CECX_Y */
+#define GEN12_OAG_CEC0_0 _MMIO(0xd940)
+#define GEN12_OAG_CEC0_1 _MMIO(0xd944)
+#define GEN12_OAG_CEC1_0 _MMIO(0xd948)
+#define GEN12_OAG_CEC1_1 _MMIO(0xd94c)
+#define GEN12_OAG_CEC2_0 _MMIO(0xd950)
+#define GEN12_OAG_CEC2_1 _MMIO(0xd954)
+#define GEN12_OAG_CEC3_0 _MMIO(0xd958)
+#define GEN12_OAG_CEC3_1 _MMIO(0xd95c)
+#define GEN12_OAG_CEC4_0 _MMIO(0xd960)
+#define GEN12_OAG_CEC4_1 _MMIO(0xd964)
+#define GEN12_OAG_CEC5_0 _MMIO(0xd968)
+#define GEN12_OAG_CEC5_1 _MMIO(0xd96c)
+#define GEN12_OAG_CEC6_0 _MMIO(0xd970)
+#define GEN12_OAG_CEC6_1 _MMIO(0xd974)
+#define GEN12_OAG_CEC7_0 _MMIO(0xd978)
+#define GEN12_OAG_CEC7_1 _MMIO(0xd97c)
+
+/* Same layout as CECX_Y + negate 11-bit array */
+#define GEN12_OAG_SCEC0_0 _MMIO(0xdc00)
+#define GEN12_OAG_SCEC0_1 _MMIO(0xdc04)
+#define GEN12_OAG_SCEC1_0 _MMIO(0xdc08)
+#define GEN12_OAG_SCEC1_1 _MMIO(0xdc0c)
+#define GEN12_OAG_SCEC2_0 _MMIO(0xdc10)
+#define GEN12_OAG_SCEC2_1 _MMIO(0xdc14)
+#define GEN12_OAG_SCEC3_0 _MMIO(0xdc18)
+#define GEN12_OAG_SCEC3_1 _MMIO(0xdc1c)
+#define GEN12_OAG_SCEC4_0 _MMIO(0xdc20)
+#define GEN12_OAG_SCEC4_1 _MMIO(0xdc24)
+#define GEN12_OAG_SCEC5_0 _MMIO(0xdc28)
+#define GEN12_OAG_SCEC5_1 _MMIO(0xdc2c)
+#define GEN12_OAG_SCEC6_0 _MMIO(0xdc30)
+#define GEN12_OAG_SCEC6_1 _MMIO(0xdc34)
+#define GEN12_OAG_SCEC7_0 _MMIO(0xdc38)
+#define GEN12_OAG_SCEC7_1 _MMIO(0xdc3c)
+
/* OA perf counters */
#define OA_PERFCNT1_LO _MMIO(0x91B8)
#define OA_PERFCNT1_HI _MMIO(0x91BC)
@@ -1058,6 +1146,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define MICRO_BP3_COUNT_STATUS23 _MMIO(0x9838)
#define MICRO_BP_FIRED_ARMED _MMIO(0x983C)
+#define GEN12_OAA_DBG_REG _MMIO(0xdc44)
+#define GEN12_OAG_OA_PESS _MMIO(0x2b2c)
+#define GEN12_OAG_SPCTR_CNF _MMIO(0xdc40)
+
#define GDT_CHICKEN_BITS _MMIO(0x9840)
#define GT_NOA_ENABLE 0x00000080
@@ -1161,27 +1253,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define PUNIT_REG_ISPSSPM0 0x39
#define PUNIT_REG_ISPSSPM1 0x3a
-/*
- * i915_power_well_id:
- *
- * IDs used to look up power wells. Power wells accessed directly bypassing
- * the power domains framework must be assigned a unique ID. The rest of power
- * wells must be assigned DISP_PW_ID_NONE.
- */
-enum i915_power_well_id {
- DISP_PW_ID_NONE,
-
- VLV_DISP_PW_DISP2D,
- BXT_DISP_PW_DPIO_CMN_A,
- VLV_DISP_PW_DPIO_CMN_BC,
- GLK_DISP_PW_DPIO_CMN_C,
- CHV_DISP_PW_DPIO_CMN_D,
- HSW_DISP_PW_GLOBAL,
- SKL_DISP_PW_MISC_IO,
- SKL_DISP_PW_1,
- SKL_DISP_PW_2,
-};
-
#define PUNIT_REG_PWRGT_CTRL 0x60
#define PUNIT_REG_PWRGT_STATUS 0x61
#define PUNIT_PWRGT_MASK(pw_idx) (3 << ((pw_idx) * 2))
@@ -1793,19 +1864,21 @@ enum i915_power_well_id {
*/
#define _ICL_COMBOPHY_A 0x162000
#define _ICL_COMBOPHY_B 0x6C000
-#define _ICL_COMBOPHY(port) _PICK(port, _ICL_COMBOPHY_A, \
- _ICL_COMBOPHY_B)
+#define _EHL_COMBOPHY_C 0x160000
+#define _ICL_COMBOPHY(phy) _PICK(phy, _ICL_COMBOPHY_A, \
+ _ICL_COMBOPHY_B, \
+ _EHL_COMBOPHY_C)
/* CNL/ICL Port CL_DW registers */
-#define _ICL_PORT_CL_DW(dw, port) (_ICL_COMBOPHY(port) + \
+#define _ICL_PORT_CL_DW(dw, phy) (_ICL_COMBOPHY(phy) + \
4 * (dw))
#define CNL_PORT_CL1CM_DW5 _MMIO(0x162014)
-#define ICL_PORT_CL_DW5(port) _MMIO(_ICL_PORT_CL_DW(5, port))
+#define ICL_PORT_CL_DW5(phy) _MMIO(_ICL_PORT_CL_DW(5, phy))
#define CL_POWER_DOWN_ENABLE (1 << 4)
#define SUS_CLOCK_CONFIG (3 << 0)
-#define ICL_PORT_CL_DW10(port) _MMIO(_ICL_PORT_CL_DW(10, port))
+#define ICL_PORT_CL_DW10(phy) _MMIO(_ICL_PORT_CL_DW(10, phy))
#define PG_SEQ_DELAY_OVERRIDE_MASK (3 << 25)
#define PG_SEQ_DELAY_OVERRIDE_SHIFT 25
#define PG_SEQ_DELAY_OVERRIDE_ENABLE (1 << 24)
@@ -1820,23 +1893,23 @@ enum i915_power_well_id {
#define PWR_DOWN_LN_MASK (0xf << 4)
#define PWR_DOWN_LN_SHIFT 4
-#define ICL_PORT_CL_DW12(port) _MMIO(_ICL_PORT_CL_DW(12, port))
+#define ICL_PORT_CL_DW12(phy) _MMIO(_ICL_PORT_CL_DW(12, phy))
#define ICL_LANE_ENABLE_AUX (1 << 0)
/* CNL/ICL Port COMP_DW registers */
#define _ICL_PORT_COMP 0x100
-#define _ICL_PORT_COMP_DW(dw, port) (_ICL_COMBOPHY(port) + \
+#define _ICL_PORT_COMP_DW(dw, phy) (_ICL_COMBOPHY(phy) + \
_ICL_PORT_COMP + 4 * (dw))
#define CNL_PORT_COMP_DW0 _MMIO(0x162100)
-#define ICL_PORT_COMP_DW0(port) _MMIO(_ICL_PORT_COMP_DW(0, port))
+#define ICL_PORT_COMP_DW0(phy) _MMIO(_ICL_PORT_COMP_DW(0, phy))
#define COMP_INIT (1 << 31)
#define CNL_PORT_COMP_DW1 _MMIO(0x162104)
-#define ICL_PORT_COMP_DW1(port) _MMIO(_ICL_PORT_COMP_DW(1, port))
+#define ICL_PORT_COMP_DW1(phy) _MMIO(_ICL_PORT_COMP_DW(1, phy))
#define CNL_PORT_COMP_DW3 _MMIO(0x16210c)
-#define ICL_PORT_COMP_DW3(port) _MMIO(_ICL_PORT_COMP_DW(3, port))
+#define ICL_PORT_COMP_DW3(phy) _MMIO(_ICL_PORT_COMP_DW(3, phy))
#define PROCESS_INFO_DOT_0 (0 << 26)
#define PROCESS_INFO_DOT_1 (1 << 26)
#define PROCESS_INFO_DOT_4 (2 << 26)
@@ -1848,14 +1921,14 @@ enum i915_power_well_id {
#define VOLTAGE_INFO_MASK (3 << 24)
#define VOLTAGE_INFO_SHIFT 24
-#define ICL_PORT_COMP_DW8(port) _MMIO(_ICL_PORT_COMP_DW(8, port))
+#define ICL_PORT_COMP_DW8(phy) _MMIO(_ICL_PORT_COMP_DW(8, phy))
#define IREFGEN (1 << 24)
#define CNL_PORT_COMP_DW9 _MMIO(0x162124)
-#define ICL_PORT_COMP_DW9(port) _MMIO(_ICL_PORT_COMP_DW(9, port))
+#define ICL_PORT_COMP_DW9(phy) _MMIO(_ICL_PORT_COMP_DW(9, phy))
#define CNL_PORT_COMP_DW10 _MMIO(0x162128)
-#define ICL_PORT_COMP_DW10(port) _MMIO(_ICL_PORT_COMP_DW(10, port))
+#define ICL_PORT_COMP_DW10(phy) _MMIO(_ICL_PORT_COMP_DW(10, phy))
/* CNL/ICL Port PCS registers */
#define _CNL_PORT_PCS_DW1_GRP_AE 0x162304
@@ -1868,14 +1941,14 @@ enum i915_power_well_id {
#define _CNL_PORT_PCS_DW1_LN0_C 0x162C04
#define _CNL_PORT_PCS_DW1_LN0_D 0x162E04
#define _CNL_PORT_PCS_DW1_LN0_F 0x162804
-#define CNL_PORT_PCS_DW1_GRP(port) _MMIO(_PICK(port, \
+#define CNL_PORT_PCS_DW1_GRP(phy) _MMIO(_PICK(phy, \
_CNL_PORT_PCS_DW1_GRP_AE, \
_CNL_PORT_PCS_DW1_GRP_B, \
_CNL_PORT_PCS_DW1_GRP_C, \
_CNL_PORT_PCS_DW1_GRP_D, \
_CNL_PORT_PCS_DW1_GRP_AE, \
_CNL_PORT_PCS_DW1_GRP_F))
-#define CNL_PORT_PCS_DW1_LN0(port) _MMIO(_PICK(port, \
+#define CNL_PORT_PCS_DW1_LN0(phy) _MMIO(_PICK(phy, \
_CNL_PORT_PCS_DW1_LN0_AE, \
_CNL_PORT_PCS_DW1_LN0_B, \
_CNL_PORT_PCS_DW1_LN0_C, \
@@ -1886,16 +1959,18 @@ enum i915_power_well_id {
#define _ICL_PORT_PCS_AUX 0x300
#define _ICL_PORT_PCS_GRP 0x600
#define _ICL_PORT_PCS_LN(ln) (0x800 + (ln) * 0x100)
-#define _ICL_PORT_PCS_DW_AUX(dw, port) (_ICL_COMBOPHY(port) + \
+#define _ICL_PORT_PCS_DW_AUX(dw, phy) (_ICL_COMBOPHY(phy) + \
_ICL_PORT_PCS_AUX + 4 * (dw))
-#define _ICL_PORT_PCS_DW_GRP(dw, port) (_ICL_COMBOPHY(port) + \
+#define _ICL_PORT_PCS_DW_GRP(dw, phy) (_ICL_COMBOPHY(phy) + \
_ICL_PORT_PCS_GRP + 4 * (dw))
-#define _ICL_PORT_PCS_DW_LN(dw, ln, port) (_ICL_COMBOPHY(port) + \
+#define _ICL_PORT_PCS_DW_LN(dw, ln, phy) (_ICL_COMBOPHY(phy) + \
_ICL_PORT_PCS_LN(ln) + 4 * (dw))
-#define ICL_PORT_PCS_DW1_AUX(port) _MMIO(_ICL_PORT_PCS_DW_AUX(1, port))
-#define ICL_PORT_PCS_DW1_GRP(port) _MMIO(_ICL_PORT_PCS_DW_GRP(1, port))
-#define ICL_PORT_PCS_DW1_LN0(port) _MMIO(_ICL_PORT_PCS_DW_LN(1, 0, port))
+#define ICL_PORT_PCS_DW1_AUX(phy) _MMIO(_ICL_PORT_PCS_DW_AUX(1, phy))
+#define ICL_PORT_PCS_DW1_GRP(phy) _MMIO(_ICL_PORT_PCS_DW_GRP(1, phy))
+#define ICL_PORT_PCS_DW1_LN0(phy) _MMIO(_ICL_PORT_PCS_DW_LN(1, 0, phy))
#define COMMON_KEEPER_EN (1 << 26)
+#define LATENCY_OPTIM_MASK (0x3 << 2)
+#define LATENCY_OPTIM_VAL(x) ((x) << 2)
/* CNL/ICL Port TX registers */
#define _CNL_PORT_TX_AE_GRP_OFFSET 0x162340
@@ -1929,18 +2004,18 @@ enum i915_power_well_id {
#define _ICL_PORT_TX_GRP 0x680
#define _ICL_PORT_TX_LN(ln) (0x880 + (ln) * 0x100)
-#define _ICL_PORT_TX_DW_AUX(dw, port) (_ICL_COMBOPHY(port) + \
+#define _ICL_PORT_TX_DW_AUX(dw, phy) (_ICL_COMBOPHY(phy) + \
_ICL_PORT_TX_AUX + 4 * (dw))
-#define _ICL_PORT_TX_DW_GRP(dw, port) (_ICL_COMBOPHY(port) + \
+#define _ICL_PORT_TX_DW_GRP(dw, phy) (_ICL_COMBOPHY(phy) + \
_ICL_PORT_TX_GRP + 4 * (dw))
-#define _ICL_PORT_TX_DW_LN(dw, ln, port) (_ICL_COMBOPHY(port) + \
+#define _ICL_PORT_TX_DW_LN(dw, ln, phy) (_ICL_COMBOPHY(phy) + \
_ICL_PORT_TX_LN(ln) + 4 * (dw))
#define CNL_PORT_TX_DW2_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(2, port))
#define CNL_PORT_TX_DW2_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(2, port))
-#define ICL_PORT_TX_DW2_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(2, port))
-#define ICL_PORT_TX_DW2_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(2, port))
-#define ICL_PORT_TX_DW2_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(2, 0, port))
+#define ICL_PORT_TX_DW2_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(2, phy))
+#define ICL_PORT_TX_DW2_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(2, phy))
+#define ICL_PORT_TX_DW2_LN0(phy) _MMIO(_ICL_PORT_TX_DW_LN(2, 0, phy))
#define SWING_SEL_UPPER(x) (((x) >> 3) << 15)
#define SWING_SEL_UPPER_MASK (1 << 15)
#define SWING_SEL_LOWER(x) (((x) & 0x7) << 11)
@@ -1957,10 +2032,10 @@ enum i915_power_well_id {
#define CNL_PORT_TX_DW4_LN(ln, port) _MMIO(_CNL_PORT_TX_DW_LN0(4, (port)) + \
((ln) * (_CNL_PORT_TX_DW4_LN1_AE - \
_CNL_PORT_TX_DW4_LN0_AE)))
-#define ICL_PORT_TX_DW4_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(4, port))
-#define ICL_PORT_TX_DW4_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(4, port))
-#define ICL_PORT_TX_DW4_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(4, 0, port))
-#define ICL_PORT_TX_DW4_LN(ln, port) _MMIO(_ICL_PORT_TX_DW_LN(4, ln, port))
+#define ICL_PORT_TX_DW4_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(4, phy))
+#define ICL_PORT_TX_DW4_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(4, phy))
+#define ICL_PORT_TX_DW4_LN0(phy) _MMIO(_ICL_PORT_TX_DW_LN(4, 0, phy))
+#define ICL_PORT_TX_DW4_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(4, ln, phy))
#define LOADGEN_SELECT (1 << 31)
#define POST_CURSOR_1(x) ((x) << 12)
#define POST_CURSOR_1_MASK (0x3F << 12)
@@ -1971,9 +2046,9 @@ enum i915_power_well_id {
#define CNL_PORT_TX_DW5_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(5, port))
#define CNL_PORT_TX_DW5_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(5, port))
-#define ICL_PORT_TX_DW5_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(5, port))
-#define ICL_PORT_TX_DW5_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(5, port))
-#define ICL_PORT_TX_DW5_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(5, 0, port))
+#define ICL_PORT_TX_DW5_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(5, phy))
+#define ICL_PORT_TX_DW5_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(5, phy))
+#define ICL_PORT_TX_DW5_LN0(phy) _MMIO(_ICL_PORT_TX_DW_LN(5, 0, phy))
#define TX_TRAINING_EN (1 << 31)
#define TAP2_DISABLE (1 << 30)
#define TAP3_DISABLE (1 << 29)
@@ -1984,15 +2059,19 @@ enum i915_power_well_id {
#define CNL_PORT_TX_DW7_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(7, (port)))
#define CNL_PORT_TX_DW7_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(7, (port)))
-#define ICL_PORT_TX_DW7_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(7, port))
-#define ICL_PORT_TX_DW7_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(7, port))
-#define ICL_PORT_TX_DW7_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(7, 0, port))
-#define ICL_PORT_TX_DW7_LN(ln, port) _MMIO(_ICL_PORT_TX_DW_LN(7, ln, port))
+#define ICL_PORT_TX_DW7_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(7, phy))
+#define ICL_PORT_TX_DW7_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(7, phy))
+#define ICL_PORT_TX_DW7_LN0(phy) _MMIO(_ICL_PORT_TX_DW_LN(7, 0, phy))
+#define ICL_PORT_TX_DW7_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(7, ln, phy))
#define N_SCALAR(x) ((x) << 24)
#define N_SCALAR_MASK (0x7F << 24)
-#define MG_PHY_PORT_LN(ln, port, ln0p1, ln0p2, ln1p1) \
- _MMIO(_PORT((port) - PORT_C, ln0p1, ln0p2) + (ln) * ((ln1p1) - (ln0p1)))
+#define _ICL_DPHY_CHKN_REG 0x194
+#define ICL_DPHY_CHKN(port) _MMIO(_ICL_COMBOPHY(port) + _ICL_DPHY_CHKN_REG)
+#define ICL_DPHY_CHKN_AFE_OVER_PPI_STRAP REG_BIT(7)
+
+#define MG_PHY_PORT_LN(ln, tc_port, ln0p1, ln0p2, ln1p1) \
+ _MMIO(_PORT(tc_port, ln0p1, ln0p2) + (ln) * ((ln1p1) - (ln0p1)))
#define MG_TX_LINK_PARAMS_TX1LN0_PORT1 0x16812C
#define MG_TX_LINK_PARAMS_TX1LN1_PORT1 0x16852C
@@ -2002,10 +2081,10 @@ enum i915_power_well_id {
#define MG_TX_LINK_PARAMS_TX1LN1_PORT3 0x16A52C
#define MG_TX_LINK_PARAMS_TX1LN0_PORT4 0x16B12C
#define MG_TX_LINK_PARAMS_TX1LN1_PORT4 0x16B52C
-#define MG_TX1_LINK_PARAMS(ln, port) \
- MG_PHY_PORT_LN(ln, port, MG_TX_LINK_PARAMS_TX1LN0_PORT1, \
- MG_TX_LINK_PARAMS_TX1LN0_PORT2, \
- MG_TX_LINK_PARAMS_TX1LN1_PORT1)
+#define MG_TX1_LINK_PARAMS(ln, tc_port) \
+ MG_PHY_PORT_LN(ln, tc_port, MG_TX_LINK_PARAMS_TX1LN0_PORT1, \
+ MG_TX_LINK_PARAMS_TX1LN0_PORT2, \
+ MG_TX_LINK_PARAMS_TX1LN1_PORT1)
#define MG_TX_LINK_PARAMS_TX2LN0_PORT1 0x1680AC
#define MG_TX_LINK_PARAMS_TX2LN1_PORT1 0x1684AC
@@ -2015,10 +2094,10 @@ enum i915_power_well_id {
#define MG_TX_LINK_PARAMS_TX2LN1_PORT3 0x16A4AC
#define MG_TX_LINK_PARAMS_TX2LN0_PORT4 0x16B0AC
#define MG_TX_LINK_PARAMS_TX2LN1_PORT4 0x16B4AC
-#define MG_TX2_LINK_PARAMS(ln, port) \
- MG_PHY_PORT_LN(ln, port, MG_TX_LINK_PARAMS_TX2LN0_PORT1, \
- MG_TX_LINK_PARAMS_TX2LN0_PORT2, \
- MG_TX_LINK_PARAMS_TX2LN1_PORT1)
+#define MG_TX2_LINK_PARAMS(ln, tc_port) \
+ MG_PHY_PORT_LN(ln, tc_port, MG_TX_LINK_PARAMS_TX2LN0_PORT1, \
+ MG_TX_LINK_PARAMS_TX2LN0_PORT2, \
+ MG_TX_LINK_PARAMS_TX2LN1_PORT1)
#define CRI_USE_FS32 (1 << 5)
#define MG_TX_PISO_READLOAD_TX1LN0_PORT1 0x16814C
@@ -2029,10 +2108,10 @@ enum i915_power_well_id {
#define MG_TX_PISO_READLOAD_TX1LN1_PORT3 0x16A54C
#define MG_TX_PISO_READLOAD_TX1LN0_PORT4 0x16B14C
#define MG_TX_PISO_READLOAD_TX1LN1_PORT4 0x16B54C
-#define MG_TX1_PISO_READLOAD(ln, port) \
- MG_PHY_PORT_LN(ln, port, MG_TX_PISO_READLOAD_TX1LN0_PORT1, \
- MG_TX_PISO_READLOAD_TX1LN0_PORT2, \
- MG_TX_PISO_READLOAD_TX1LN1_PORT1)
+#define MG_TX1_PISO_READLOAD(ln, tc_port) \
+ MG_PHY_PORT_LN(ln, tc_port, MG_TX_PISO_READLOAD_TX1LN0_PORT1, \
+ MG_TX_PISO_READLOAD_TX1LN0_PORT2, \
+ MG_TX_PISO_READLOAD_TX1LN1_PORT1)
#define MG_TX_PISO_READLOAD_TX2LN0_PORT1 0x1680CC
#define MG_TX_PISO_READLOAD_TX2LN1_PORT1 0x1684CC
@@ -2042,10 +2121,10 @@ enum i915_power_well_id {
#define MG_TX_PISO_READLOAD_TX2LN1_PORT3 0x16A4CC
#define MG_TX_PISO_READLOAD_TX2LN0_PORT4 0x16B0CC
#define MG_TX_PISO_READLOAD_TX2LN1_PORT4 0x16B4CC
-#define MG_TX2_PISO_READLOAD(ln, port) \
- MG_PHY_PORT_LN(ln, port, MG_TX_PISO_READLOAD_TX2LN0_PORT1, \
- MG_TX_PISO_READLOAD_TX2LN0_PORT2, \
- MG_TX_PISO_READLOAD_TX2LN1_PORT1)
+#define MG_TX2_PISO_READLOAD(ln, tc_port) \
+ MG_PHY_PORT_LN(ln, tc_port, MG_TX_PISO_READLOAD_TX2LN0_PORT1, \
+ MG_TX_PISO_READLOAD_TX2LN0_PORT2, \
+ MG_TX_PISO_READLOAD_TX2LN1_PORT1)
#define CRI_CALCINIT (1 << 1)
#define MG_TX_SWINGCTRL_TX1LN0_PORT1 0x168148
@@ -2056,10 +2135,10 @@ enum i915_power_well_id {
#define MG_TX_SWINGCTRL_TX1LN1_PORT3 0x16A548
#define MG_TX_SWINGCTRL_TX1LN0_PORT4 0x16B148
#define MG_TX_SWINGCTRL_TX1LN1_PORT4 0x16B548
-#define MG_TX1_SWINGCTRL(ln, port) \
- MG_PHY_PORT_LN(ln, port, MG_TX_SWINGCTRL_TX1LN0_PORT1, \
- MG_TX_SWINGCTRL_TX1LN0_PORT2, \
- MG_TX_SWINGCTRL_TX1LN1_PORT1)
+#define MG_TX1_SWINGCTRL(ln, tc_port) \
+ MG_PHY_PORT_LN(ln, tc_port, MG_TX_SWINGCTRL_TX1LN0_PORT1, \
+ MG_TX_SWINGCTRL_TX1LN0_PORT2, \
+ MG_TX_SWINGCTRL_TX1LN1_PORT1)
#define MG_TX_SWINGCTRL_TX2LN0_PORT1 0x1680C8
#define MG_TX_SWINGCTRL_TX2LN1_PORT1 0x1684C8
@@ -2069,10 +2148,10 @@ enum i915_power_well_id {
#define MG_TX_SWINGCTRL_TX2LN1_PORT3 0x16A4C8
#define MG_TX_SWINGCTRL_TX2LN0_PORT4 0x16B0C8
#define MG_TX_SWINGCTRL_TX2LN1_PORT4 0x16B4C8
-#define MG_TX2_SWINGCTRL(ln, port) \
- MG_PHY_PORT_LN(ln, port, MG_TX_SWINGCTRL_TX2LN0_PORT1, \
- MG_TX_SWINGCTRL_TX2LN0_PORT2, \
- MG_TX_SWINGCTRL_TX2LN1_PORT1)
+#define MG_TX2_SWINGCTRL(ln, tc_port) \
+ MG_PHY_PORT_LN(ln, tc_port, MG_TX_SWINGCTRL_TX2LN0_PORT1, \
+ MG_TX_SWINGCTRL_TX2LN0_PORT2, \
+ MG_TX_SWINGCTRL_TX2LN1_PORT1)
#define CRI_TXDEEMPH_OVERRIDE_17_12(x) ((x) << 0)
#define CRI_TXDEEMPH_OVERRIDE_17_12_MASK (0x3F << 0)
@@ -2084,10 +2163,10 @@ enum i915_power_well_id {
#define MG_TX_DRVCTRL_TX1LN1_TXPORT3 0x16A544
#define MG_TX_DRVCTRL_TX1LN0_TXPORT4 0x16B144
#define MG_TX_DRVCTRL_TX1LN1_TXPORT4 0x16B544
-#define MG_TX1_DRVCTRL(ln, port) \
- MG_PHY_PORT_LN(ln, port, MG_TX_DRVCTRL_TX1LN0_TXPORT1, \
- MG_TX_DRVCTRL_TX1LN0_TXPORT2, \
- MG_TX_DRVCTRL_TX1LN1_TXPORT1)
+#define MG_TX1_DRVCTRL(ln, tc_port) \
+ MG_PHY_PORT_LN(ln, tc_port, MG_TX_DRVCTRL_TX1LN0_TXPORT1, \
+ MG_TX_DRVCTRL_TX1LN0_TXPORT2, \
+ MG_TX_DRVCTRL_TX1LN1_TXPORT1)
#define MG_TX_DRVCTRL_TX2LN0_PORT1 0x1680C4
#define MG_TX_DRVCTRL_TX2LN1_PORT1 0x1684C4
@@ -2097,10 +2176,10 @@ enum i915_power_well_id {
#define MG_TX_DRVCTRL_TX2LN1_PORT3 0x16A4C4
#define MG_TX_DRVCTRL_TX2LN0_PORT4 0x16B0C4
#define MG_TX_DRVCTRL_TX2LN1_PORT4 0x16B4C4
-#define MG_TX2_DRVCTRL(ln, port) \
- MG_PHY_PORT_LN(ln, port, MG_TX_DRVCTRL_TX2LN0_PORT1, \
- MG_TX_DRVCTRL_TX2LN0_PORT2, \
- MG_TX_DRVCTRL_TX2LN1_PORT1)
+#define MG_TX2_DRVCTRL(ln, tc_port) \
+ MG_PHY_PORT_LN(ln, tc_port, MG_TX_DRVCTRL_TX2LN0_PORT1, \
+ MG_TX_DRVCTRL_TX2LN0_PORT2, \
+ MG_TX_DRVCTRL_TX2LN1_PORT1)
#define CRI_TXDEEMPH_OVERRIDE_11_6(x) ((x) << 24)
#define CRI_TXDEEMPH_OVERRIDE_11_6_MASK (0x3F << 24)
#define CRI_TXDEEMPH_OVERRIDE_EN (1 << 22)
@@ -2117,10 +2196,10 @@ enum i915_power_well_id {
#define MG_CLKHUB_LN1_PORT3 0x16A79C
#define MG_CLKHUB_LN0_PORT4 0x16B39C
#define MG_CLKHUB_LN1_PORT4 0x16B79C
-#define MG_CLKHUB(ln, port) \
- MG_PHY_PORT_LN(ln, port, MG_CLKHUB_LN0_PORT1, \
- MG_CLKHUB_LN0_PORT2, \
- MG_CLKHUB_LN1_PORT1)
+#define MG_CLKHUB(ln, tc_port) \
+ MG_PHY_PORT_LN(ln, tc_port, MG_CLKHUB_LN0_PORT1, \
+ MG_CLKHUB_LN0_PORT2, \
+ MG_CLKHUB_LN1_PORT1)
#define CFG_LOW_RATE_LKREN_EN (1 << 11)
#define MG_TX_DCC_TX1LN0_PORT1 0x168110
@@ -2131,10 +2210,10 @@ enum i915_power_well_id {
#define MG_TX_DCC_TX1LN1_PORT3 0x16A510
#define MG_TX_DCC_TX1LN0_PORT4 0x16B110
#define MG_TX_DCC_TX1LN1_PORT4 0x16B510
-#define MG_TX1_DCC(ln, port) \
- MG_PHY_PORT_LN(ln, port, MG_TX_DCC_TX1LN0_PORT1, \
- MG_TX_DCC_TX1LN0_PORT2, \
- MG_TX_DCC_TX1LN1_PORT1)
+#define MG_TX1_DCC(ln, tc_port) \
+ MG_PHY_PORT_LN(ln, tc_port, MG_TX_DCC_TX1LN0_PORT1, \
+ MG_TX_DCC_TX1LN0_PORT2, \
+ MG_TX_DCC_TX1LN1_PORT1)
#define MG_TX_DCC_TX2LN0_PORT1 0x168090
#define MG_TX_DCC_TX2LN1_PORT1 0x168490
#define MG_TX_DCC_TX2LN0_PORT2 0x169090
@@ -2143,10 +2222,10 @@ enum i915_power_well_id {
#define MG_TX_DCC_TX2LN1_PORT3 0x16A490
#define MG_TX_DCC_TX2LN0_PORT4 0x16B090
#define MG_TX_DCC_TX2LN1_PORT4 0x16B490
-#define MG_TX2_DCC(ln, port) \
- MG_PHY_PORT_LN(ln, port, MG_TX_DCC_TX2LN0_PORT1, \
- MG_TX_DCC_TX2LN0_PORT2, \
- MG_TX_DCC_TX2LN1_PORT1)
+#define MG_TX2_DCC(ln, tc_port) \
+ MG_PHY_PORT_LN(ln, tc_port, MG_TX_DCC_TX2LN0_PORT1, \
+ MG_TX_DCC_TX2LN0_PORT2, \
+ MG_TX_DCC_TX2LN1_PORT1)
#define CFG_AMI_CK_DIV_OVERRIDE_VAL(x) ((x) << 25)
#define CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK (0x3 << 25)
#define CFG_AMI_CK_DIV_OVERRIDE_EN (1 << 24)
@@ -2159,32 +2238,12 @@ enum i915_power_well_id {
#define MG_DP_MODE_LN1_ACU_PORT3 0x16A7A0
#define MG_DP_MODE_LN0_ACU_PORT4 0x16B3A0
#define MG_DP_MODE_LN1_ACU_PORT4 0x16B7A0
-#define MG_DP_MODE(ln, port) \
- MG_PHY_PORT_LN(ln, port, MG_DP_MODE_LN0_ACU_PORT1, \
- MG_DP_MODE_LN0_ACU_PORT2, \
- MG_DP_MODE_LN1_ACU_PORT1)
+#define MG_DP_MODE(ln, tc_port) \
+ MG_PHY_PORT_LN(ln, tc_port, MG_DP_MODE_LN0_ACU_PORT1, \
+ MG_DP_MODE_LN0_ACU_PORT2, \
+ MG_DP_MODE_LN1_ACU_PORT1)
#define MG_DP_MODE_CFG_DP_X2_MODE (1 << 7)
#define MG_DP_MODE_CFG_DP_X1_MODE (1 << 6)
-#define MG_DP_MODE_CFG_TR2PWR_GATING (1 << 5)
-#define MG_DP_MODE_CFG_TRPWR_GATING (1 << 4)
-#define MG_DP_MODE_CFG_CLNPWR_GATING (1 << 3)
-#define MG_DP_MODE_CFG_DIGPWR_GATING (1 << 2)
-#define MG_DP_MODE_CFG_GAONPWR_GATING (1 << 1)
-
-#define MG_MISC_SUS0_PORT1 0x168814
-#define MG_MISC_SUS0_PORT2 0x169814
-#define MG_MISC_SUS0_PORT3 0x16A814
-#define MG_MISC_SUS0_PORT4 0x16B814
-#define MG_MISC_SUS0(tc_port) \
- _MMIO(_PORT(tc_port, MG_MISC_SUS0_PORT1, MG_MISC_SUS0_PORT2))
-#define MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE_MASK (3 << 14)
-#define MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE(x) ((x) << 14)
-#define MG_MISC_SUS0_CFG_TR2PWR_GATING (1 << 12)
-#define MG_MISC_SUS0_CFG_CL2PWR_GATING (1 << 11)
-#define MG_MISC_SUS0_CFG_GAONPWR_GATING (1 << 10)
-#define MG_MISC_SUS0_CFG_TRPWR_GATING (1 << 7)
-#define MG_MISC_SUS0_CFG_CL1PWR_GATING (1 << 6)
-#define MG_MISC_SUS0_CFG_DGPWR_GATING (1 << 5)
/* The spec defines this only for BXT PHY0, but lets assume that this
* would exist for PHY1 too if it had a second channel.
@@ -2195,15 +2254,19 @@ enum i915_power_well_id {
#define DW6_OLDO_DYN_PWR_DOWN_EN (1 << 28)
#define FIA1_BASE 0x163000
+#define FIA2_BASE 0x16E000
+#define FIA3_BASE 0x16F000
+#define _FIA(fia) _PICK((fia), FIA1_BASE, FIA2_BASE, FIA3_BASE)
+#define _MMIO_FIA(fia, off) _MMIO(_FIA(fia) + (off))
/* ICL PHY DFLEX registers */
-#define PORT_TX_DFLEXDPMLE1 _MMIO(FIA1_BASE + 0x008C0)
-#define DFLEXDPMLE1_DPMLETC_MASK(tc_port) (0xf << (4 * (tc_port)))
-#define DFLEXDPMLE1_DPMLETC_ML0(tc_port) (1 << (4 * (tc_port)))
-#define DFLEXDPMLE1_DPMLETC_ML1_0(tc_port) (3 << (4 * (tc_port)))
-#define DFLEXDPMLE1_DPMLETC_ML3(tc_port) (8 << (4 * (tc_port)))
-#define DFLEXDPMLE1_DPMLETC_ML3_2(tc_port) (12 << (4 * (tc_port)))
-#define DFLEXDPMLE1_DPMLETC_ML3_0(tc_port) (15 << (4 * (tc_port)))
+#define PORT_TX_DFLEXDPMLE1(fia) _MMIO_FIA((fia), 0x008C0)
+#define DFLEXDPMLE1_DPMLETC_MASK(idx) (0xf << (4 * (idx)))
+#define DFLEXDPMLE1_DPMLETC_ML0(idx) (1 << (4 * (idx)))
+#define DFLEXDPMLE1_DPMLETC_ML1_0(idx) (3 << (4 * (idx)))
+#define DFLEXDPMLE1_DPMLETC_ML3(idx) (8 << (4 * (idx)))
+#define DFLEXDPMLE1_DPMLETC_ML3_2(idx) (12 << (4 * (idx)))
+#define DFLEXDPMLE1_DPMLETC_ML3_0(idx) (15 << (4 * (idx)))
/* BXT PHY Ref registers */
#define _PORT_REF_DW3_A 0x16218C
@@ -2477,15 +2540,18 @@ enum i915_power_well_id {
#define RENDER_HWS_PGA_GEN7 _MMIO(0x04080)
#define RING_FAULT_REG(engine) _MMIO(0x4094 + 0x100 * (engine)->hw_id)
#define GEN8_RING_FAULT_REG _MMIO(0x4094)
+#define GEN12_RING_FAULT_REG _MMIO(0xcec4)
#define GEN8_RING_FAULT_ENGINE_ID(x) (((x) >> 12) & 0x7)
#define RING_FAULT_GTTSEL_MASK (1 << 11)
#define RING_FAULT_SRCID(x) (((x) >> 3) & 0xff)
#define RING_FAULT_FAULT_TYPE(x) (((x) >> 1) & 0x3)
#define RING_FAULT_VALID (1 << 0)
#define DONE_REG _MMIO(0x40b0)
+#define GEN12_GAM_DONE _MMIO(0xcf68)
#define GEN8_PRIVATE_PAT_LO _MMIO(0x40e0)
#define GEN8_PRIVATE_PAT_HI _MMIO(0x40e0 + 4)
#define GEN10_PAT_INDEX(index) _MMIO(0x40e0 + (index) * 4)
+#define GEN12_PAT_INDEX(index) _MMIO(0x4800 + (index) * 4)
#define BSD_HWS_PGA_GEN7 _MMIO(0x04180)
#define BLT_HWS_PGA_GEN7 _MMIO(0x04280)
#define VEBOX_HWS_PGA_GEN7 _MMIO(0x04380)
@@ -2512,14 +2578,25 @@ enum i915_power_well_id {
#define RING_WAIT (1 << 11) /* gen3+, PRBx_CTL */
#define RING_WAIT_SEMAPHORE (1 << 10) /* gen6+ */
+/* There are 16 64-bit CS General Purpose Registers per-engine on Gen8+ */
+#define GEN8_RING_CS_GPR(base, n) _MMIO((base) + 0x600 + (n) * 8)
+#define GEN8_RING_CS_GPR_UDW(base, n) _MMIO((base) + 0x600 + (n) * 8 + 4)
+
#define RING_FORCE_TO_NONPRIV(base, i) _MMIO(((base) + 0x4D0) + (i) * 4)
-#define RING_FORCE_TO_NONPRIV_RW (0 << 28) /* CFL+ & Gen11+ */
-#define RING_FORCE_TO_NONPRIV_RD (1 << 28)
-#define RING_FORCE_TO_NONPRIV_WR (2 << 28)
+#define RING_FORCE_TO_NONPRIV_ADDRESS_MASK REG_GENMASK(25, 2)
+#define RING_FORCE_TO_NONPRIV_ACCESS_RW (0 << 28) /* CFL+ & Gen11+ */
+#define RING_FORCE_TO_NONPRIV_ACCESS_RD (1 << 28)
+#define RING_FORCE_TO_NONPRIV_ACCESS_WR (2 << 28)
+#define RING_FORCE_TO_NONPRIV_ACCESS_INVALID (3 << 28)
+#define RING_FORCE_TO_NONPRIV_ACCESS_MASK (3 << 28)
#define RING_FORCE_TO_NONPRIV_RANGE_1 (0 << 0) /* CFL+ & Gen11+ */
#define RING_FORCE_TO_NONPRIV_RANGE_4 (1 << 0)
#define RING_FORCE_TO_NONPRIV_RANGE_16 (2 << 0)
#define RING_FORCE_TO_NONPRIV_RANGE_64 (3 << 0)
+#define RING_FORCE_TO_NONPRIV_RANGE_MASK (3 << 0)
+#define RING_FORCE_TO_NONPRIV_MASK_VALID \
+ (RING_FORCE_TO_NONPRIV_RANGE_MASK \
+ | RING_FORCE_TO_NONPRIV_ACCESS_MASK)
#define RING_MAX_NONPRIV_SLOTS 12
#define GEN7_TLB_RD_ADDR _MMIO(0x4700)
@@ -2614,9 +2691,13 @@ enum i915_power_well_id {
#define GEN8_FAULT_TLB_DATA0 _MMIO(0x4b10)
#define GEN8_FAULT_TLB_DATA1 _MMIO(0x4b14)
+#define GEN12_FAULT_TLB_DATA0 _MMIO(0xceb8)
+#define GEN12_FAULT_TLB_DATA1 _MMIO(0xcebc)
#define FAULT_VA_HIGH_BITS (0xf << 0)
#define FAULT_GTT_SEL (1 << 4)
+#define GEN12_AUX_ERR_DBG _MMIO(0x43f4)
+
#define FPGA_DBG _MMIO(0x42300)
#define FPGA_DBG_RM_NOCLAIM (1 << 31)
@@ -2726,6 +2807,7 @@ enum i915_power_well_id {
#define VLV_GU_CTL0 _MMIO(VLV_DISPLAY_BASE + 0x2030)
#define VLV_GU_CTL1 _MMIO(VLV_DISPLAY_BASE + 0x2034)
#define SCPD0 _MMIO(0x209c) /* 915+ only */
+#define CSTATE_RENDER_CLOCK_GATE_DISABLE (1 << 5)
#define GEN2_IER _MMIO(0x20a0)
#define GEN2_IIR _MMIO(0x20a4)
#define GEN2_IMR _MMIO(0x20a8)
@@ -2899,6 +2981,7 @@ enum i915_power_well_id {
#define GEN6_RC_SLEEP_PSMI_CONTROL _MMIO(0x2050)
#define GEN6_PSMI_SLEEP_MSG_DISABLE (1 << 0)
+#define GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE REG_BIT(7)
#define GEN8_RC_SEMA_IDLE_MSG_DISABLE (1 << 12)
#define GEN8_FF_DOP_CLOCK_GATE_DISABLE (1 << 10)
@@ -2977,6 +3060,8 @@ enum i915_power_well_id {
#define GEN11_GT_SUBSLICE_DISABLE _MMIO(0x913C)
+#define GEN12_GT_DSS_ENABLE _MMIO(0x913C)
+
#define GEN6_BSD_SLEEP_PSMI_CONTROL _MMIO(0x12050)
#define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0)
#define GEN6_BSD_SLEEP_FLUSH_DISABLE (1 << 2)
@@ -3229,25 +3314,7 @@ enum i915_power_well_id {
#define GMBUS_RATE_1MHZ (3 << 8) /* reserved on Pineview */
#define GMBUS_HOLD_EXT (1 << 7) /* 300ns hold time, rsvd on Pineview */
#define GMBUS_BYTE_CNT_OVERRIDE (1 << 6)
-#define GMBUS_PIN_DISABLED 0
-#define GMBUS_PIN_SSC 1
-#define GMBUS_PIN_VGADDC 2
-#define GMBUS_PIN_PANEL 3
-#define GMBUS_PIN_DPD_CHV 3 /* HDMID_CHV */
-#define GMBUS_PIN_DPC 4 /* HDMIC */
-#define GMBUS_PIN_DPB 5 /* SDVO, HDMIB */
-#define GMBUS_PIN_DPD 6 /* HDMID */
-#define GMBUS_PIN_RESERVED 7 /* 7 reserved */
-#define GMBUS_PIN_1_BXT 1 /* BXT+ (atom) and CNP+ (big core) */
-#define GMBUS_PIN_2_BXT 2
-#define GMBUS_PIN_3_BXT 3
-#define GMBUS_PIN_4_CNP 4
-#define GMBUS_PIN_9_TC1_ICP 9
-#define GMBUS_PIN_10_TC2_ICP 10
-#define GMBUS_PIN_11_TC3_ICP 11
-#define GMBUS_PIN_12_TC4_ICP 12
-
-#define GMBUS_NUM_PINS 13 /* including 0 */
+
#define GMBUS1 _MMIO(dev_priv->gpio_mmio_base + 0x5104) /* command/status */
#define GMBUS_SW_CLR_INT (1 << 31)
#define GMBUS_SW_RDY (1 << 30)
@@ -3597,6 +3664,9 @@ enum i915_power_well_id {
#define _PALETTE_A 0xa000
#define _PALETTE_B 0xa800
#define _CHV_PALETTE_C 0xc000
+#define PALETTE_RED_MASK REG_GENMASK(23, 16)
+#define PALETTE_GREEN_MASK REG_GENMASK(15, 8)
+#define PALETTE_BLUE_MASK REG_GENMASK(7, 0)
#define PALETTE(pipe, i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + \
_PICK((pipe), _PALETTE_A, \
_PALETTE_B, _CHV_PALETTE_C) + \
@@ -4077,12 +4147,23 @@ enum {
#define SARBUNIT_CLKGATE_DIS (1 << 5)
#define RCCUNIT_CLKGATE_DIS (1 << 7)
#define MSCUNIT_CLKGATE_DIS (1 << 10)
+#define L3_CLKGATE_DIS REG_BIT(16)
+#define L3_CR2X_CLKGATE_DIS REG_BIT(17)
#define SUBSLICE_UNIT_LEVEL_CLKGATE _MMIO(0x9524)
#define GWUNIT_CLKGATE_DIS (1 << 16)
+#define SUBSLICE_UNIT_LEVEL_CLKGATE2 _MMIO(0x9528)
+#define CPSSUNIT_CLKGATE_DIS REG_BIT(9)
+
#define UNSLICE_UNIT_LEVEL_CLKGATE _MMIO(0x9434)
-#define VFUNIT_CLKGATE_DIS (1 << 20)
+#define VFUNIT_CLKGATE_DIS REG_BIT(20)
+#define HSUNIT_CLKGATE_DIS REG_BIT(8)
+#define VSUNIT_CLKGATE_DIS REG_BIT(3)
+
+#define UNSLICE_UNIT_LEVEL_CLKGATE2 _MMIO(0x94e4)
+#define VSUNIT_CLKGATE_DIS_TGL REG_BIT(19)
+#define PSDUNIT_CLKGATE_DIS REG_BIT(5)
#define INF_UNIT_LEVEL_CLKGATE _MMIO(0x9560)
#define CGPSF_CLKGATE_DIS (1 << 3)
@@ -4174,6 +4255,7 @@ enum {
#define _VTOTAL_A 0x6000c
#define _VBLANK_A 0x60010
#define _VSYNC_A 0x60014
+#define _EXITLINE_A 0x60018
#define _PIPEASRC 0x6001c
#define _BCLRPAT_A 0x60020
#define _VSYNCSHIFT_A 0x60028
@@ -4209,6 +4291,7 @@ enum {
#define TRANSCODER_B_OFFSET 0x61000
#define TRANSCODER_C_OFFSET 0x62000
#define CHV_TRANSCODER_C_OFFSET 0x63000
+#define TRANSCODER_D_OFFSET 0x63000
#define TRANSCODER_EDP_OFFSET 0x6f000
#define TRANSCODER_DSI0_OFFSET 0x6b000
#define TRANSCODER_DSI1_OFFSET 0x6b800
@@ -4224,10 +4307,22 @@ enum {
#define PIPESRC(trans) _MMIO_TRANS2(trans, _PIPEASRC)
#define PIPE_MULT(trans) _MMIO_TRANS2(trans, _PIPE_MULT_A)
-/* HSW+ eDP PSR registers */
-#define HSW_EDP_PSR_BASE 0x64800
-#define BDW_EDP_PSR_BASE 0x6f800
-#define EDP_PSR_CTL _MMIO(dev_priv->psr_mmio_base + 0)
+#define EXITLINE(trans) _MMIO_TRANS2(trans, _EXITLINE_A)
+#define EXITLINE_ENABLE REG_BIT(31)
+#define EXITLINE_MASK REG_GENMASK(12, 0)
+#define EXITLINE_SHIFT 0
+
+/*
+ * HSW+ eDP PSR registers
+ *
+ * HSW PSR registers are relative to DDIA(_DDI_BUF_CTL_A + 0x800) with just one
+ * instance of it
+ */
+#define _HSW_EDP_PSR_BASE 0x64800
+#define _SRD_CTL_A 0x60800
+#define _SRD_CTL_EDP 0x6f800
+#define _PSR_ADJ(tran, reg) (_TRANS2(tran, reg) - dev_priv->hsw_psr_mmio_adjust)
+#define EDP_PSR_CTL(tran) _MMIO(_PSR_ADJ(tran, _SRD_CTL_A))
#define EDP_PSR_ENABLE (1 << 31)
#define BDW_PSR_SINGLE_FRAME (1 << 30)
#define EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK (1 << 29) /* SW can't modify */
@@ -4253,27 +4348,40 @@ enum {
#define EDP_PSR_TP1_TIME_0us (3 << 4)
#define EDP_PSR_IDLE_FRAME_SHIFT 0
-/* Bspec claims those aren't shifted but stay at 0x64800 */
+/*
+ * Until TGL, IMR/IIR are fixed at 0x648xx. On TGL+ those registers are relative
+ * to transcoder and bits defined for each one as if using no shift (i.e. as if
+ * it was for TRANSCODER_EDP)
+ */
#define EDP_PSR_IMR _MMIO(0x64834)
#define EDP_PSR_IIR _MMIO(0x64838)
-#define EDP_PSR_ERROR(shift) (1 << ((shift) + 2))
-#define EDP_PSR_POST_EXIT(shift) (1 << ((shift) + 1))
-#define EDP_PSR_PRE_ENTRY(shift) (1 << (shift))
-#define EDP_PSR_TRANSCODER_C_SHIFT 24
-#define EDP_PSR_TRANSCODER_B_SHIFT 16
-#define EDP_PSR_TRANSCODER_A_SHIFT 8
-#define EDP_PSR_TRANSCODER_EDP_SHIFT 0
-
-#define EDP_PSR_AUX_CTL _MMIO(dev_priv->psr_mmio_base + 0x10)
+#define _PSR_IMR_A 0x60814
+#define _PSR_IIR_A 0x60818
+#define TRANS_PSR_IMR(tran) _MMIO_TRANS2(tran, _PSR_IMR_A)
+#define TRANS_PSR_IIR(tran) _MMIO_TRANS2(tran, _PSR_IIR_A)
+#define _EDP_PSR_TRANS_SHIFT(trans) ((trans) == TRANSCODER_EDP ? \
+ 0 : ((trans) - TRANSCODER_A + 1) * 8)
+#define EDP_PSR_TRANS_MASK(trans) (0x7 << _EDP_PSR_TRANS_SHIFT(trans))
+#define EDP_PSR_ERROR(trans) (0x4 << _EDP_PSR_TRANS_SHIFT(trans))
+#define EDP_PSR_POST_EXIT(trans) (0x2 << _EDP_PSR_TRANS_SHIFT(trans))
+#define EDP_PSR_PRE_ENTRY(trans) (0x1 << _EDP_PSR_TRANS_SHIFT(trans))
+
+#define _SRD_AUX_CTL_A 0x60810
+#define _SRD_AUX_CTL_EDP 0x6f810
+#define EDP_PSR_AUX_CTL(tran) _MMIO(_PSR_ADJ(tran, _SRD_AUX_CTL_A))
#define EDP_PSR_AUX_CTL_TIME_OUT_MASK (3 << 26)
#define EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK (0x1f << 20)
#define EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK (0xf << 16)
#define EDP_PSR_AUX_CTL_ERROR_INTERRUPT (1 << 11)
#define EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK (0x7ff)
-#define EDP_PSR_AUX_DATA(i) _MMIO(dev_priv->psr_mmio_base + 0x14 + (i) * 4) /* 5 registers */
+#define _SRD_AUX_DATA_A 0x60814
+#define _SRD_AUX_DATA_EDP 0x6f814
+#define EDP_PSR_AUX_DATA(tran, i) _MMIO(_PSR_ADJ(tran, _SRD_AUX_DATA_A) + (i) + 4) /* 5 registers */
-#define EDP_PSR_STATUS _MMIO(dev_priv->psr_mmio_base + 0x40)
+#define _SRD_STATUS_A 0x60840
+#define _SRD_STATUS_EDP 0x6f840
+#define EDP_PSR_STATUS(tran) _MMIO(_PSR_ADJ(tran, _SRD_STATUS_A))
#define EDP_PSR_STATUS_STATE_MASK (7 << 29)
#define EDP_PSR_STATUS_STATE_SHIFT 29
#define EDP_PSR_STATUS_STATE_IDLE (0 << 29)
@@ -4298,10 +4406,15 @@ enum {
#define EDP_PSR_STATUS_SENDING_TP1 (1 << 4)
#define EDP_PSR_STATUS_IDLE_MASK 0xf
-#define EDP_PSR_PERF_CNT _MMIO(dev_priv->psr_mmio_base + 0x44)
+#define _SRD_PERF_CNT_A 0x60844
+#define _SRD_PERF_CNT_EDP 0x6f844
+#define EDP_PSR_PERF_CNT(tran) _MMIO(_PSR_ADJ(tran, _SRD_PERF_CNT_A))
#define EDP_PSR_PERF_CNT_MASK 0xffffff
-#define EDP_PSR_DEBUG _MMIO(dev_priv->psr_mmio_base + 0x60) /* PSR_MASK on SKL+ */
+/* PSR_MASK on SKL+ */
+#define _SRD_DEBUG_A 0x60860
+#define _SRD_DEBUG_EDP 0x6f860
+#define EDP_PSR_DEBUG(tran) _MMIO(_PSR_ADJ(tran, _SRD_DEBUG_A))
#define EDP_PSR_DEBUG_MASK_MAX_SLEEP (1 << 28)
#define EDP_PSR_DEBUG_MASK_LPSP (1 << 27)
#define EDP_PSR_DEBUG_MASK_MEMUP (1 << 26)
@@ -4309,7 +4422,9 @@ enum {
#define EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (1 << 16) /* Reserved in ICL+ */
#define EDP_PSR_DEBUG_EXIT_ON_PIXEL_UNDERRUN (1 << 15) /* SKL+ */
-#define EDP_PSR2_CTL _MMIO(0x6f900)
+#define _PSR2_CTL_A 0x60900
+#define _PSR2_CTL_EDP 0x6f900
+#define EDP_PSR2_CTL(tran) _MMIO_TRANS2(tran, _PSR2_CTL_A)
#define EDP_PSR2_ENABLE (1 << 31)
#define EDP_SU_TRACK_ENABLE (1 << 30)
#define EDP_Y_COORDINATE_VALID (1 << 26) /* GLK and CNL+ */
@@ -4331,8 +4446,8 @@ enum {
#define _PSR_EVENT_TRANS_B 0x61848
#define _PSR_EVENT_TRANS_C 0x62848
#define _PSR_EVENT_TRANS_D 0x63848
-#define _PSR_EVENT_TRANS_EDP 0x6F848
-#define PSR_EVENT(trans) _MMIO_TRANS2(trans, _PSR_EVENT_TRANS_A)
+#define _PSR_EVENT_TRANS_EDP 0x6f848
+#define PSR_EVENT(tran) _MMIO_TRANS2(tran, _PSR_EVENT_TRANS_A)
#define PSR_EVENT_PSR2_WD_TIMER_EXPIRE (1 << 17)
#define PSR_EVENT_PSR2_DISABLED (1 << 16)
#define PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN (1 << 15)
@@ -4350,15 +4465,16 @@ enum {
#define PSR_EVENT_LPSP_MODE_EXIT (1 << 1)
#define PSR_EVENT_PSR_DISABLE (1 << 0)
-#define EDP_PSR2_STATUS _MMIO(0x6f940)
+#define _PSR2_STATUS_A 0x60940
+#define _PSR2_STATUS_EDP 0x6f940
+#define EDP_PSR2_STATUS(tran) _MMIO_TRANS2(tran, _PSR2_STATUS_A)
#define EDP_PSR2_STATUS_STATE_MASK (0xf << 28)
#define EDP_PSR2_STATUS_STATE_SHIFT 28
-#define _PSR2_SU_STATUS_0 0x6F914
-#define _PSR2_SU_STATUS_1 0x6F918
-#define _PSR2_SU_STATUS_2 0x6F91C
-#define _PSR2_SU_STATUS(index) _MMIO(_PICK_EVEN((index), _PSR2_SU_STATUS_0, _PSR2_SU_STATUS_1))
-#define PSR2_SU_STATUS(frame) (_PSR2_SU_STATUS((frame) / 3))
+#define _PSR2_SU_STATUS_A 0x60914
+#define _PSR2_SU_STATUS_EDP 0x6f914
+#define _PSR2_SU_STATUS(tran, index) _MMIO(_TRANS2(tran, _PSR2_SU_STATUS_A) + (index) * 4)
+#define PSR2_SU_STATUS(tran, frame) (_PSR2_SU_STATUS(tran, (frame) / 3))
#define PSR2_SU_STATUS_SHIFT(frame) (((frame) % 3) * 10)
#define PSR2_SU_STATUS_MASK(frame) (0x3ff << PSR2_SU_STATUS_SHIFT(frame))
#define PSR2_SU_STATUS_FRAMES 8
@@ -4684,6 +4800,7 @@ enum {
* (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte
* of the infoframe structure specified by CEA-861. */
#define VIDEO_DIP_DATA_SIZE 32
+#define VIDEO_DIP_GMP_DATA_SIZE 36
#define VIDEO_DIP_VSC_DATA_SIZE 36
#define VIDEO_DIP_PPS_DATA_SIZE 132
#define VIDEO_DIP_CTL _MMIO(0x61170)
@@ -4911,14 +5028,20 @@ enum {
#define BLM_PCH_POLARITY (1 << 29)
#define BLC_PWM_PCH_CTL2 _MMIO(0xc8254)
-#define UTIL_PIN_CTL _MMIO(0x48400)
-#define UTIL_PIN_ENABLE (1 << 31)
-
-#define UTIL_PIN_PIPE(x) ((x) << 29)
-#define UTIL_PIN_PIPE_MASK (3 << 29)
-#define UTIL_PIN_MODE_PWM (1 << 24)
-#define UTIL_PIN_MODE_MASK (0xf << 24)
-#define UTIL_PIN_POLARITY (1 << 22)
+#define UTIL_PIN_CTL _MMIO(0x48400)
+#define UTIL_PIN_ENABLE (1 << 31)
+#define UTIL_PIN_PIPE_MASK (3 << 29)
+#define UTIL_PIN_PIPE(x) ((x) << 29)
+#define UTIL_PIN_MODE_MASK (0xf << 24)
+#define UTIL_PIN_MODE_DATA (0 << 24)
+#define UTIL_PIN_MODE_PWM (1 << 24)
+#define UTIL_PIN_MODE_VBLANK (4 << 24)
+#define UTIL_PIN_MODE_VSYNC (5 << 24)
+#define UTIL_PIN_MODE_EYE_LEVEL (8 << 24)
+#define UTIL_PIN_OUTPUT_DATA (1 << 23)
+#define UTIL_PIN_POLARITY (1 << 22)
+#define UTIL_PIN_DIRECTION_INPUT (1 << 19)
+#define UTIL_PIN_INPUT_DATA (1 << 16)
/* BXT backlight register definition. */
#define _BXT_BLC_PWM_CTL1 0xC8250
@@ -5520,45 +5643,9 @@ enum {
*/
#define _DPA_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64010)
#define _DPA_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64014)
-#define _DPA_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64018)
-#define _DPA_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6401c)
-#define _DPA_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64020)
-#define _DPA_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64024)
#define _DPB_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64110)
#define _DPB_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64114)
-#define _DPB_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64118)
-#define _DPB_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6411c)
-#define _DPB_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64120)
-#define _DPB_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64124)
-
-#define _DPC_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64210)
-#define _DPC_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64214)
-#define _DPC_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64218)
-#define _DPC_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6421c)
-#define _DPC_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64220)
-#define _DPC_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64224)
-
-#define _DPD_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64310)
-#define _DPD_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64314)
-#define _DPD_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64318)
-#define _DPD_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6431c)
-#define _DPD_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64320)
-#define _DPD_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64324)
-
-#define _DPE_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64410)
-#define _DPE_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64414)
-#define _DPE_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64418)
-#define _DPE_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6441c)
-#define _DPE_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64420)
-#define _DPE_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64424)
-
-#define _DPF_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64510)
-#define _DPF_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64514)
-#define _DPF_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64518)
-#define _DPF_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6451c)
-#define _DPF_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64520)
-#define _DPF_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64524)
#define DP_AUX_CH_CTL(aux_ch) _MMIO_PORT(aux_ch, _DPA_AUX_CH_CTL, _DPB_AUX_CH_CTL)
#define DP_AUX_CH_DATA(aux_ch, i) _MMIO(_PORT(aux_ch, _DPA_AUX_CH_DATA1, _DPB_AUX_CH_DATA1) + (i) * 4) /* 5 registers */
@@ -5656,7 +5743,8 @@ enum {
#define PIPECONF_DOUBLE_WIDE (1 << 30)
#define I965_PIPECONF_ACTIVE (1 << 30)
#define PIPECONF_DSI_PLL_LOCKED (1 << 29) /* vlv & pipe A only */
-#define PIPECONF_FRAME_START_DELAY_MASK (3 << 27)
+#define PIPECONF_FRAME_START_DELAY_MASK (3 << 27) /* pre-hsw */
+#define PIPECONF_FRAME_START_DELAY(x) ((x) << 27) /* pre-hsw: 0-3 */
#define PIPECONF_SINGLE_WIDE 0
#define PIPECONF_PIPE_UNLOCKED 0
#define PIPECONF_PIPE_LOCKED (1 << 25)
@@ -5690,6 +5778,11 @@ enum {
#define PIPECONF_CXSR_DOWNCLOCK (1 << 16)
#define PIPECONF_EDP_RR_MODE_SWITCH_VLV (1 << 14)
#define PIPECONF_COLOR_RANGE_SELECT (1 << 13)
+#define PIPECONF_OUTPUT_COLORSPACE_MASK (3 << 11) /* ilk-ivb */
+#define PIPECONF_OUTPUT_COLORSPACE_RGB (0 << 11) /* ilk-ivb */
+#define PIPECONF_OUTPUT_COLORSPACE_YUV601 (1 << 11) /* ilk-ivb */
+#define PIPECONF_OUTPUT_COLORSPACE_YUV709 (2 << 11) /* ilk-ivb */
+#define PIPECONF_OUTPUT_COLORSPACE_YUV_HSW (1 << 11) /* hsw only */
#define PIPECONF_BPC_MASK (0x7 << 5)
#define PIPECONF_8BPC (0 << 5)
#define PIPECONF_10BPC (1 << 5)
@@ -5755,6 +5848,7 @@ enum {
#define PIPE_A_OFFSET 0x70000
#define PIPE_B_OFFSET 0x71000
#define PIPE_C_OFFSET 0x72000
+#define PIPE_D_OFFSET 0x73000
#define CHV_PIPE_C_OFFSET 0x74000
/*
* There's actually no pipe EDP. Some pipe registers have
@@ -5776,12 +5870,13 @@ enum {
#define _PIPEAGCMAX 0x70010
#define _PIPEBGCMAX 0x71010
+#define PIPEGCMAX_RGB_MASK REG_GENMASK(15, 0)
#define PIPEGCMAX(pipe, i) _MMIO_PIPE2(pipe, _PIPEAGCMAX + (i) * 4)
#define _PIPE_MISC_A 0x70030
#define _PIPE_MISC_B 0x71030
-#define PIPEMISC_YUV420_ENABLE (1 << 27)
-#define PIPEMISC_YUV420_MODE_FULL_BLEND (1 << 26)
+#define PIPEMISC_YUV420_ENABLE (1 << 27) /* glk+ */
+#define PIPEMISC_YUV420_MODE_FULL_BLEND (1 << 26) /* glk+ */
#define PIPEMISC_HDR_MODE_PRECISION (1 << 23) /* icl+ */
#define PIPEMISC_OUTPUT_COLORSPACE_YUV (1 << 11)
#define PIPEMISC_DITHER_BPC_MASK (7 << 5)
@@ -6238,6 +6333,7 @@ enum {
#define CHV_CURSOR_C_OFFSET 0x700e0
#define IVB_CURSOR_B_OFFSET 0x71080
#define IVB_CURSOR_C_OFFSET 0x72080
+#define TGL_CURSOR_D_OFFSET 0x73080
/* Display A control */
#define _DSPACNTR 0x70180
@@ -6256,6 +6352,7 @@ enum {
#define DISPPLANE_RGBX101010 (0x8 << 26)
#define DISPPLANE_RGBA101010 (0x9 << 26)
#define DISPPLANE_BGRX101010 (0xa << 26)
+#define DISPPLANE_BGRA101010 (0xb << 26)
#define DISPPLANE_RGBX161616 (0xc << 26)
#define DISPPLANE_RGBX888 (0xe << 26)
#define DISPPLANE_RGBA888 (0xf << 26)
@@ -6284,6 +6381,7 @@ enum {
#define _DSPATILEOFF 0x701A4 /* 965+ only */
#define _DSPAOFFSET 0x701A4 /* HSW */
#define _DSPASURFLIVE 0x701AC
+#define _DSPAGAMC 0x701E0
#define DSPCNTR(plane) _MMIO_PIPE2(plane, _DSPACNTR)
#define DSPADDR(plane) _MMIO_PIPE2(plane, _DSPAADDR)
@@ -6295,6 +6393,7 @@ enum {
#define DSPLINOFF(plane) DSPADDR(plane)
#define DSPOFFSET(plane) _MMIO_PIPE2(plane, _DSPAOFFSET)
#define DSPSURFLIVE(plane) _MMIO_PIPE2(plane, _DSPASURFLIVE)
+#define DSPGAMC(plane, i) _MMIO(_PIPE2(plane, _DSPAGAMC) + (5 - (i)) * 4) /* plane C only, 6 x u0.8 */
/* CHV pipe B blender and primary plane */
#define _CHV_BLEND_A 0x60a00
@@ -6397,6 +6496,7 @@ enum {
#define _DVSAKEYMAXVAL 0x721a0
#define _DVSATILEOFF 0x721a4
#define _DVSASURFLIVE 0x721ac
+#define _DVSAGAMC_G4X 0x721e0 /* g4x */
#define _DVSASCALE 0x72204
#define DVS_SCALE_ENABLE (1 << 31)
#define DVS_FILTER_MASK (3 << 29)
@@ -6405,7 +6505,8 @@ enum {
#define DVS_FILTER_SOFTENING (2 << 29)
#define DVS_VERTICAL_OFFSET_HALF (1 << 28) /* must be enabled below */
#define DVS_VERTICAL_OFFSET_ENABLE (1 << 27)
-#define _DVSAGAMC 0x72300
+#define _DVSAGAMC_ILK 0x72300 /* ilk/snb */
+#define _DVSAGAMCMAX_ILK 0x72340 /* ilk/snb */
#define _DVSBCNTR 0x73180
#define _DVSBLINOFF 0x73184
@@ -6418,8 +6519,10 @@ enum {
#define _DVSBKEYMAXVAL 0x731a0
#define _DVSBTILEOFF 0x731a4
#define _DVSBSURFLIVE 0x731ac
+#define _DVSBGAMC_G4X 0x731e0 /* g4x */
#define _DVSBSCALE 0x73204
-#define _DVSBGAMC 0x73300
+#define _DVSBGAMC_ILK 0x73300 /* ilk/snb */
+#define _DVSBGAMCMAX_ILK 0x73340 /* ilk/snb */
#define DVSCNTR(pipe) _MMIO_PIPE(pipe, _DVSACNTR, _DVSBCNTR)
#define DVSLINOFF(pipe) _MMIO_PIPE(pipe, _DVSALINOFF, _DVSBLINOFF)
@@ -6433,6 +6536,9 @@ enum {
#define DVSKEYVAL(pipe) _MMIO_PIPE(pipe, _DVSAKEYVAL, _DVSBKEYVAL)
#define DVSKEYMSK(pipe) _MMIO_PIPE(pipe, _DVSAKEYMSK, _DVSBKEYMSK)
#define DVSSURFLIVE(pipe) _MMIO_PIPE(pipe, _DVSASURFLIVE, _DVSBSURFLIVE)
+#define DVSGAMC_G4X(pipe, i) _MMIO(_PIPE(pipe, _DVSAGAMC_G4X, _DVSBGAMC_G4X) + (5 - (i)) * 4) /* 6 x u0.8 */
+#define DVSGAMC_ILK(pipe, i) _MMIO(_PIPE(pipe, _DVSAGAMC_ILK, _DVSBGAMC_ILK) + (i) * 4) /* 16 x u0.10 */
+#define DVSGAMCMAX_ILK(pipe, i) _MMIO(_PIPE(pipe, _DVSAGAMCMAX_ILK, _DVSBGAMCMAX_ILK) + (i) * 4) /* 3 x u1.10 */
#define _SPRA_CTL 0x70280
#define SPRITE_ENABLE (1 << 31)
@@ -6457,7 +6563,7 @@ enum {
#define SPRITE_YUV_ORDER_VYUY (3 << 16)
#define SPRITE_ROTATE_180 (1 << 15)
#define SPRITE_TRICKLE_FEED_DISABLE (1 << 14)
-#define SPRITE_INT_GAMMA_ENABLE (1 << 13)
+#define SPRITE_INT_GAMMA_DISABLE (1 << 13)
#define SPRITE_TILED (1 << 10)
#define SPRITE_DEST_KEY (1 << 2)
#define _SPRA_LINOFF 0x70284
@@ -6480,6 +6586,8 @@ enum {
#define SPRITE_VERTICAL_OFFSET_HALF (1 << 28) /* must be enabled below */
#define SPRITE_VERTICAL_OFFSET_ENABLE (1 << 27)
#define _SPRA_GAMC 0x70400
+#define _SPRA_GAMC16 0x70440
+#define _SPRA_GAMC17 0x7044c
#define _SPRB_CTL 0x71280
#define _SPRB_LINOFF 0x71284
@@ -6495,6 +6603,8 @@ enum {
#define _SPRB_SURFLIVE 0x712ac
#define _SPRB_SCALE 0x71304
#define _SPRB_GAMC 0x71400
+#define _SPRB_GAMC16 0x71440
+#define _SPRB_GAMC17 0x7144c
#define SPRCTL(pipe) _MMIO_PIPE(pipe, _SPRA_CTL, _SPRB_CTL)
#define SPRLINOFF(pipe) _MMIO_PIPE(pipe, _SPRA_LINOFF, _SPRB_LINOFF)
@@ -6508,19 +6618,24 @@ enum {
#define SPRTILEOFF(pipe) _MMIO_PIPE(pipe, _SPRA_TILEOFF, _SPRB_TILEOFF)
#define SPROFFSET(pipe) _MMIO_PIPE(pipe, _SPRA_OFFSET, _SPRB_OFFSET)
#define SPRSCALE(pipe) _MMIO_PIPE(pipe, _SPRA_SCALE, _SPRB_SCALE)
-#define SPRGAMC(pipe) _MMIO_PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC)
+#define SPRGAMC(pipe, i) _MMIO(_PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC) + (i) * 4) /* 16 x u0.10 */
+#define SPRGAMC16(pipe, i) _MMIO(_PIPE(pipe, _SPRA_GAMC16, _SPRB_GAMC16) + (i) * 4) /* 3 x u1.10 */
+#define SPRGAMC17(pipe, i) _MMIO(_PIPE(pipe, _SPRA_GAMC17, _SPRB_GAMC17) + (i) * 4) /* 3 x u2.10 */
#define SPRSURFLIVE(pipe) _MMIO_PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE)
#define _SPACNTR (VLV_DISPLAY_BASE + 0x72180)
#define SP_ENABLE (1 << 31)
#define SP_GAMMA_ENABLE (1 << 30)
#define SP_PIXFORMAT_MASK (0xf << 26)
-#define SP_FORMAT_YUV422 (0 << 26)
-#define SP_FORMAT_BGR565 (5 << 26)
-#define SP_FORMAT_BGRX8888 (6 << 26)
-#define SP_FORMAT_BGRA8888 (7 << 26)
-#define SP_FORMAT_RGBX1010102 (8 << 26)
-#define SP_FORMAT_RGBA1010102 (9 << 26)
+#define SP_FORMAT_YUV422 (0x0 << 26)
+#define SP_FORMAT_8BPP (0x2 << 26)
+#define SP_FORMAT_BGR565 (0x5 << 26)
+#define SP_FORMAT_BGRX8888 (0x6 << 26)
+#define SP_FORMAT_BGRA8888 (0x7 << 26)
+#define SP_FORMAT_RGBX1010102 (0x8 << 26)
+#define SP_FORMAT_RGBA1010102 (0x9 << 26)
+#define SP_FORMAT_BGRX1010102 (0xa << 26) /* CHV pipe B */
+#define SP_FORMAT_BGRA1010102 (0xb << 26) /* CHV pipe B */
#define SP_FORMAT_RGBX8888 (0xe << 26)
#define SP_FORMAT_RGBA8888 (0xf << 26)
#define SP_ALPHA_PREMULTIPLY (1 << 23) /* CHV pipe B */
@@ -6551,7 +6666,7 @@ enum {
#define _SPACLRC1 (VLV_DISPLAY_BASE + 0x721d4)
#define SP_SH_SIN(x) (((x) & 0x7ff) << 16) /* s4.7 */
#define SP_SH_COS(x) (x) /* u3.7 */
-#define _SPAGAMC (VLV_DISPLAY_BASE + 0x721f4)
+#define _SPAGAMC (VLV_DISPLAY_BASE + 0x721e0)
#define _SPBCNTR (VLV_DISPLAY_BASE + 0x72280)
#define _SPBLINOFF (VLV_DISPLAY_BASE + 0x72284)
@@ -6566,10 +6681,12 @@ enum {
#define _SPBCONSTALPHA (VLV_DISPLAY_BASE + 0x722a8)
#define _SPBCLRC0 (VLV_DISPLAY_BASE + 0x722d0)
#define _SPBCLRC1 (VLV_DISPLAY_BASE + 0x722d4)
-#define _SPBGAMC (VLV_DISPLAY_BASE + 0x722f4)
+#define _SPBGAMC (VLV_DISPLAY_BASE + 0x722e0)
+#define _VLV_SPR(pipe, plane_id, reg_a, reg_b) \
+ _PIPE((pipe) * 2 + (plane_id) - PLANE_SPRITE0, (reg_a), (reg_b))
#define _MMIO_VLV_SPR(pipe, plane_id, reg_a, reg_b) \
- _MMIO_PIPE((pipe) * 2 + (plane_id) - PLANE_SPRITE0, (reg_a), (reg_b))
+ _MMIO(_VLV_SPR((pipe), (plane_id), (reg_a), (reg_b)))
#define SPCNTR(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACNTR, _SPBCNTR)
#define SPLINOFF(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPALINOFF, _SPBLINOFF)
@@ -6584,7 +6701,7 @@ enum {
#define SPCONSTALPHA(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACONSTALPHA, _SPBCONSTALPHA)
#define SPCLRC0(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACLRC0, _SPBCLRC0)
#define SPCLRC1(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACLRC1, _SPBCLRC1)
-#define SPGAMC(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAGAMC, _SPBGAMC)
+#define SPGAMC(pipe, plane_id, i) _MMIO(_VLV_SPR((pipe), (plane_id), _SPAGAMC, _SPBGAMC) + (5 - (i)) * 4) /* 6 x u0.10 */
/*
* CHV pipe B sprite CSC
@@ -6669,6 +6786,7 @@ enum {
#define PLANE_CTL_YUV422_VYUY (3 << 16)
#define PLANE_CTL_RENDER_DECOMPRESSION_ENABLE (1 << 15)
#define PLANE_CTL_TRICKLE_FEED_DISABLE (1 << 14)
+#define PLANE_CTL_CLEAR_COLOR_DISABLE (1 << 13) /* TGL+ */
#define PLANE_CTL_PLANE_GAMMA_DISABLE (1 << 13) /* Pre-GLK */
#define PLANE_CTL_TILED_MASK (0x7 << 10)
#define PLANE_CTL_TILED_LINEAR (0 << 10)
@@ -6676,6 +6794,7 @@ enum {
#define PLANE_CTL_TILED_Y (4 << 10)
#define PLANE_CTL_TILED_YF (5 << 10)
#define PLANE_CTL_FLIP_HORIZONTAL (1 << 8)
+#define PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE (1 << 4) /* TGL+ */
#define PLANE_CTL_ALPHA_MASK (0x3 << 4) /* Pre-GLK */
#define PLANE_CTL_ALPHA_DISABLE (0 << 4)
#define PLANE_CTL_ALPHA_SW_PREMULTIPLY (2 << 4)
@@ -7191,11 +7310,17 @@ enum {
/* legacy palette */
#define _LGC_PALETTE_A 0x4a000
#define _LGC_PALETTE_B 0x4a800
+#define LGC_PALETTE_RED_MASK REG_GENMASK(23, 16)
+#define LGC_PALETTE_GREEN_MASK REG_GENMASK(15, 8)
+#define LGC_PALETTE_BLUE_MASK REG_GENMASK(7, 0)
#define LGC_PALETTE(pipe, i) _MMIO(_PIPE(pipe, _LGC_PALETTE_A, _LGC_PALETTE_B) + (i) * 4)
/* ilk/snb precision palette */
#define _PREC_PALETTE_A 0x4b000
#define _PREC_PALETTE_B 0x4c000
+#define PREC_PALETTE_RED_MASK REG_GENMASK(29, 20)
+#define PREC_PALETTE_GREEN_MASK REG_GENMASK(19, 10)
+#define PREC_PALETTE_BLUE_MASK REG_GENMASK(9, 0)
#define PREC_PALETTE(pipe, i) _MMIO(_PIPE(pipe, _PREC_PALETTE_A, _PREC_PALETTE_B) + (i) * 4)
#define _PREC_PIPEAGCMAX 0x4d000
@@ -7228,6 +7353,14 @@ enum {
#define SKL_CSR_DC3_DC5_COUNT _MMIO(0x80030)
#define SKL_CSR_DC5_DC6_COUNT _MMIO(0x8002C)
#define BXT_CSR_DC3_DC5_COUNT _MMIO(0x80038)
+#define TGL_DMC_DEBUG_DC5_COUNT _MMIO(0x101084)
+#define TGL_DMC_DEBUG_DC6_COUNT _MMIO(0x101088)
+
+#define DMC_DEBUG3 _MMIO(0x101090)
+
+/* Display Internal Timeout Register */
+#define RM_TIMEOUT _MMIO(0x42060)
+#define MMIO_TIMEOUT_US(us) ((us) << 0)
/* interrupts */
#define DE_MASTER_IRQ_CONTROL (1 << 31)
@@ -7317,16 +7450,6 @@ enum {
#define GEN8_GT_IIR(which) _MMIO(0x44308 + (0x10 * (which)))
#define GEN8_GT_IER(which) _MMIO(0x4430c + (0x10 * (which)))
-#define GEN9_GUC_TO_HOST_INT_EVENT (1 << 31)
-#define GEN9_GUC_EXEC_ERROR_EVENT (1 << 30)
-#define GEN9_GUC_DISPLAY_EVENT (1 << 29)
-#define GEN9_GUC_SEMA_SIGNAL_EVENT (1 << 28)
-#define GEN9_GUC_IOMMU_MSG_EVENT (1 << 27)
-#define GEN9_GUC_DB_RING_EVENT (1 << 26)
-#define GEN9_GUC_DMA_DONE_EVENT (1 << 25)
-#define GEN9_GUC_FATAL_ERROR_EVENT (1 << 24)
-#define GEN9_GUC_NOTIFICATION_EVENT (1 << 23)
-
#define GEN8_RCS_IRQ_SHIFT 0
#define GEN8_BCS_IRQ_SHIFT 16
#define GEN8_VCS0_IRQ_SHIFT 0 /* NB: VCS1 in bspec! */
@@ -7350,6 +7473,9 @@ enum {
#define GEN8_PIPE_VSYNC (1 << 1)
#define GEN8_PIPE_VBLANK (1 << 0)
#define GEN9_PIPE_CURSOR_FAULT (1 << 11)
+#define GEN11_PIPE_PLANE7_FAULT (1 << 22)
+#define GEN11_PIPE_PLANE6_FAULT (1 << 21)
+#define GEN11_PIPE_PLANE5_FAULT (1 << 20)
#define GEN9_PIPE_PLANE4_FAULT (1 << 10)
#define GEN9_PIPE_PLANE3_FAULT (1 << 9)
#define GEN9_PIPE_PLANE2_FAULT (1 << 8)
@@ -7369,16 +7495,25 @@ enum {
GEN9_PIPE_PLANE3_FAULT | \
GEN9_PIPE_PLANE2_FAULT | \
GEN9_PIPE_PLANE1_FAULT)
+#define GEN11_DE_PIPE_IRQ_FAULT_ERRORS \
+ (GEN9_DE_PIPE_IRQ_FAULT_ERRORS | \
+ GEN11_PIPE_PLANE7_FAULT | \
+ GEN11_PIPE_PLANE6_FAULT | \
+ GEN11_PIPE_PLANE5_FAULT)
#define GEN8_DE_PORT_ISR _MMIO(0x44440)
#define GEN8_DE_PORT_IMR _MMIO(0x44444)
#define GEN8_DE_PORT_IIR _MMIO(0x44448)
#define GEN8_DE_PORT_IER _MMIO(0x4444c)
+#define DSI1_NON_TE (1 << 31)
+#define DSI0_NON_TE (1 << 30)
#define ICL_AUX_CHANNEL_E (1 << 29)
#define CNL_AUX_CHANNEL_F (1 << 28)
#define GEN9_AUX_CHANNEL_D (1 << 27)
#define GEN9_AUX_CHANNEL_C (1 << 26)
#define GEN9_AUX_CHANNEL_B (1 << 25)
+#define DSI1_TE (1 << 24)
+#define DSI0_TE (1 << 23)
#define BXT_DE_PORT_HP_DDIC (1 << 5)
#define BXT_DE_PORT_HP_DDIB (1 << 4)
#define BXT_DE_PORT_HP_DDIA (1 << 3)
@@ -7388,6 +7523,15 @@ enum {
#define GEN8_PORT_DP_A_HOTPLUG (1 << 3)
#define BXT_DE_PORT_GMBUS (1 << 1)
#define GEN8_AUX_CHANNEL_A (1 << 0)
+#define TGL_DE_PORT_AUX_USBC6 (1 << 13)
+#define TGL_DE_PORT_AUX_USBC5 (1 << 12)
+#define TGL_DE_PORT_AUX_USBC4 (1 << 11)
+#define TGL_DE_PORT_AUX_USBC3 (1 << 10)
+#define TGL_DE_PORT_AUX_USBC2 (1 << 9)
+#define TGL_DE_PORT_AUX_USBC1 (1 << 8)
+#define TGL_DE_PORT_AUX_DDIC (1 << 2)
+#define TGL_DE_PORT_AUX_DDIB (1 << 1)
+#define TGL_DE_PORT_AUX_DDIA (1 << 0)
#define GEN8_DE_MISC_ISR _MMIO(0x44460)
#define GEN8_DE_MISC_IMR _MMIO(0x44464)
@@ -7431,21 +7575,29 @@ enum {
#define GEN11_DE_HPD_IMR _MMIO(0x44474)
#define GEN11_DE_HPD_IIR _MMIO(0x44478)
#define GEN11_DE_HPD_IER _MMIO(0x4447c)
+#define GEN12_TC6_HOTPLUG (1 << 21)
+#define GEN12_TC5_HOTPLUG (1 << 20)
#define GEN11_TC4_HOTPLUG (1 << 19)
#define GEN11_TC3_HOTPLUG (1 << 18)
#define GEN11_TC2_HOTPLUG (1 << 17)
#define GEN11_TC1_HOTPLUG (1 << 16)
#define GEN11_TC_HOTPLUG(tc_port) (1 << ((tc_port) + 16))
-#define GEN11_DE_TC_HOTPLUG_MASK (GEN11_TC4_HOTPLUG | \
+#define GEN11_DE_TC_HOTPLUG_MASK (GEN12_TC6_HOTPLUG | \
+ GEN12_TC5_HOTPLUG | \
+ GEN11_TC4_HOTPLUG | \
GEN11_TC3_HOTPLUG | \
GEN11_TC2_HOTPLUG | \
GEN11_TC1_HOTPLUG)
+#define GEN12_TBT6_HOTPLUG (1 << 5)
+#define GEN12_TBT5_HOTPLUG (1 << 4)
#define GEN11_TBT4_HOTPLUG (1 << 3)
#define GEN11_TBT3_HOTPLUG (1 << 2)
#define GEN11_TBT2_HOTPLUG (1 << 1)
#define GEN11_TBT1_HOTPLUG (1 << 0)
#define GEN11_TBT_HOTPLUG(tc_port) (1 << (tc_port))
-#define GEN11_DE_TBT_HOTPLUG_MASK (GEN11_TBT4_HOTPLUG | \
+#define GEN11_DE_TBT_HOTPLUG_MASK (GEN12_TBT6_HOTPLUG | \
+ GEN12_TBT5_HOTPLUG | \
+ GEN11_TBT4_HOTPLUG | \
GEN11_TBT3_HOTPLUG | \
GEN11_TBT2_HOTPLUG | \
GEN11_TBT1_HOTPLUG)
@@ -7479,6 +7631,9 @@ enum {
#define GEN11_INTR_ENGINE_CLASS(x) (((x) & GENMASK(18, 16)) >> 16)
#define GEN11_INTR_ENGINE_INSTANCE(x) (((x) & GENMASK(25, 20)) >> 20)
#define GEN11_INTR_ENGINE_INTR(x) ((x) & 0xffff)
+/* irq instances for OTHER_CLASS */
+#define OTHER_GUC_INSTANCE 0
+#define OTHER_GTPM_INSTANCE 1
#define GEN11_INTR_IDENTITY_REG(x) _MMIO(0x190060 + ((x) * 4))
@@ -7562,10 +7717,19 @@ enum {
#define BDW_DPRS_MASK_VBLANK_SRD (1 << 0)
#define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B)
-#define CHICKEN_TRANS_A _MMIO(0x420c0)
-#define CHICKEN_TRANS_B _MMIO(0x420c4)
-#define CHICKEN_TRANS_C _MMIO(0x420c8)
-#define CHICKEN_TRANS_EDP _MMIO(0x420cc)
+#define _CHICKEN_TRANS_A 0x420c0
+#define _CHICKEN_TRANS_B 0x420c4
+#define _CHICKEN_TRANS_C 0x420c8
+#define _CHICKEN_TRANS_EDP 0x420cc
+#define _CHICKEN_TRANS_D 0x420d8
+#define CHICKEN_TRANS(trans) _MMIO(_PICK((trans), \
+ [TRANSCODER_EDP] = _CHICKEN_TRANS_EDP, \
+ [TRANSCODER_A] = _CHICKEN_TRANS_A, \
+ [TRANSCODER_B] = _CHICKEN_TRANS_B, \
+ [TRANSCODER_C] = _CHICKEN_TRANS_C, \
+ [TRANSCODER_D] = _CHICKEN_TRANS_D))
+#define HSW_FRAME_START_DELAY_MASK (3 << 27)
+#define HSW_FRAME_START_DELAY(x) ((x) << 27) /* 0-3 */
#define VSC_DATA_SEL_SOFTWARE_CONTROL (1 << 25) /* GLK and CNL+ */
#define DDI_TRAINING_OVERRIDE_ENABLE (1 << 19)
#define DDI_TRAINING_OVERRIDE_VALUE (1 << 18)
@@ -7589,6 +7753,14 @@ enum {
#define GEN7_MSG_CTL _MMIO(0x45010)
#define WAIT_FOR_PCH_RESET_ACK (1 << 1)
#define WAIT_FOR_PCH_FLR_ACK (1 << 0)
+
+#define BW_BUDDY1_CTL _MMIO(0x45140)
+#define BW_BUDDY2_CTL _MMIO(0x45150)
+#define BW_BUDDY_DISABLE REG_BIT(31)
+
+#define BW_BUDDY1_PAGE_MASK _MMIO(0x45144)
+#define BW_BUDDY2_PAGE_MASK _MMIO(0x45154)
+
#define HSW_NDE_RSTWRN_OPT _MMIO(0x46408)
#define RESET_PCH_HANDSHAKE_ENABLE (1 << 4)
@@ -7598,14 +7770,19 @@ enum {
#define CNL_DDI_CLOCK_REG_ACCESS_ON (1 << 7)
#define SKL_DFSM _MMIO(0x51000)
-#define SKL_DFSM_CDCLK_LIMIT_MASK (3 << 23)
-#define SKL_DFSM_CDCLK_LIMIT_675 (0 << 23)
-#define SKL_DFSM_CDCLK_LIMIT_540 (1 << 23)
-#define SKL_DFSM_CDCLK_LIMIT_450 (2 << 23)
-#define SKL_DFSM_CDCLK_LIMIT_337_5 (3 << 23)
-#define SKL_DFSM_PIPE_A_DISABLE (1 << 30)
-#define SKL_DFSM_PIPE_B_DISABLE (1 << 21)
-#define SKL_DFSM_PIPE_C_DISABLE (1 << 28)
+#define SKL_DFSM_DISPLAY_PM_DISABLE (1 << 27)
+#define SKL_DFSM_DISPLAY_HDCP_DISABLE (1 << 25)
+#define SKL_DFSM_CDCLK_LIMIT_MASK (3 << 23)
+#define SKL_DFSM_CDCLK_LIMIT_675 (0 << 23)
+#define SKL_DFSM_CDCLK_LIMIT_540 (1 << 23)
+#define SKL_DFSM_CDCLK_LIMIT_450 (2 << 23)
+#define SKL_DFSM_CDCLK_LIMIT_337_5 (3 << 23)
+#define ICL_DFSM_DMC_DISABLE (1 << 23)
+#define SKL_DFSM_PIPE_A_DISABLE (1 << 30)
+#define SKL_DFSM_PIPE_B_DISABLE (1 << 21)
+#define SKL_DFSM_PIPE_C_DISABLE (1 << 28)
+#define TGL_DFSM_PIPE_D_DISABLE (1 << 22)
+#define CNL_DFSM_DISPLAY_DSC_DISABLE (1 << 7)
#define SKL_DSSM _MMIO(0x51004)
#define CNL_DSSM_CDCLK_PLL_REFCLK_24MHz (1 << 31)
@@ -7622,7 +7799,10 @@ enum {
#define GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE (1 << 10)
#define GEN9_CS_DEBUG_MODE1 _MMIO(0x20ec)
+#define FF_DOP_CLOCK_GATE_DISABLE REG_BIT(1)
#define GEN9_CTX_PREEMPT_REG _MMIO(0x2248)
+#define GEN12_DISABLE_POSH_BUSY_FF_DOP_CG REG_BIT(11)
+
#define GEN8_CS_CHICKEN1 _MMIO(0x2580)
#define GEN9_PREEMPT_3D_OBJECT_LEVEL (1 << 0)
#define GEN9_PREEMPT_GPGPU_LEVEL(hi, lo) (((hi) << 2) | ((lo) << 1))
@@ -7647,6 +7827,7 @@ enum {
#define GEN11_COMMON_SLICE_CHICKEN3 _MMIO(0x7304)
#define GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC (1 << 11)
+ #define GEN12_DISABLE_CPS_AWARE_COLOR_PIPE (1 << 9)
#define HIZ_CHICKEN _MMIO(0x7018)
# define CHV_HZ_8X8_MODE_IN_1X (1 << 15)
@@ -7690,6 +7871,9 @@ enum {
#define GEN7_L3SQCREG4 _MMIO(0xb034)
#define L3SQ_URB_READ_CAM_MATCH_DISABLE (1 << 27)
+#define GEN11_SCRATCH2 _MMIO(0xb140)
+#define GEN11_COHERENT_PARTIAL_WRITE_MERGE_ENABLE (1 << 19)
+
#define GEN8_L3SQCREG4 _MMIO(0xb118)
#define GEN11_LQSC_CLEAN_EVICT_DISABLE (1 << 6)
#define GEN8_LQSC_RO_PERF_DIS (1 << 27)
@@ -7734,6 +7918,10 @@ enum {
#define PIXEL_ROUNDING_TRUNC_FB_PASSTHRU (1 << 15)
#define PER_PIXEL_ALPHA_BYPASS_EN (1 << 7)
+#define FF_MODE2 _MMIO(0x6604)
+#define FF_MODE2_TDS_TIMER_MASK REG_GENMASK(23, 16)
+#define FF_MODE2_TDS_TIMER_128 REG_FIELD_PREP(FF_MODE2_TDS_TIMER_MASK, 4)
+
/* PCH */
#define PCH_DISPLAY_BASE 0xc0000u
@@ -7827,22 +8015,25 @@ enum {
SDE_FDI_RXB_CPT | \
SDE_FDI_RXA_CPT)
-/* south display engine interrupt: ICP */
-#define SDE_TC4_HOTPLUG_ICP (1 << 27)
-#define SDE_TC3_HOTPLUG_ICP (1 << 26)
-#define SDE_TC2_HOTPLUG_ICP (1 << 25)
-#define SDE_TC1_HOTPLUG_ICP (1 << 24)
+/* south display engine interrupt: ICP/TGP */
#define SDE_GMBUS_ICP (1 << 23)
-#define SDE_DDIB_HOTPLUG_ICP (1 << 17)
-#define SDE_DDIA_HOTPLUG_ICP (1 << 16)
#define SDE_TC_HOTPLUG_ICP(tc_port) (1 << ((tc_port) + 24))
#define SDE_DDI_HOTPLUG_ICP(port) (1 << ((port) + 16))
-#define SDE_DDI_MASK_ICP (SDE_DDIB_HOTPLUG_ICP | \
- SDE_DDIA_HOTPLUG_ICP)
-#define SDE_TC_MASK_ICP (SDE_TC4_HOTPLUG_ICP | \
- SDE_TC3_HOTPLUG_ICP | \
- SDE_TC2_HOTPLUG_ICP | \
- SDE_TC1_HOTPLUG_ICP)
+#define SDE_DDI_MASK_ICP (SDE_DDI_HOTPLUG_ICP(PORT_B) | \
+ SDE_DDI_HOTPLUG_ICP(PORT_A))
+#define SDE_TC_MASK_ICP (SDE_TC_HOTPLUG_ICP(PORT_TC4) | \
+ SDE_TC_HOTPLUG_ICP(PORT_TC3) | \
+ SDE_TC_HOTPLUG_ICP(PORT_TC2) | \
+ SDE_TC_HOTPLUG_ICP(PORT_TC1))
+#define SDE_DDI_MASK_TGP (SDE_DDI_HOTPLUG_ICP(PORT_C) | \
+ SDE_DDI_HOTPLUG_ICP(PORT_B) | \
+ SDE_DDI_HOTPLUG_ICP(PORT_A))
+#define SDE_TC_MASK_TGP (SDE_TC_HOTPLUG_ICP(PORT_TC6) | \
+ SDE_TC_HOTPLUG_ICP(PORT_TC5) | \
+ SDE_TC_HOTPLUG_ICP(PORT_TC4) | \
+ SDE_TC_HOTPLUG_ICP(PORT_TC3) | \
+ SDE_TC_HOTPLUG_ICP(PORT_TC2) | \
+ SDE_TC_HOTPLUG_ICP(PORT_TC1))
#define SDEISR _MMIO(0xc4000)
#define SDEIMR _MMIO(0xc4004)
@@ -7909,23 +8100,20 @@ enum {
* SHOTPLUG_CTL_DDI and SHOTPLUG_CTL_TC.
*/
-#define SHOTPLUG_CTL_DDI _MMIO(0xc4030)
-#define ICP_DDIB_HPD_ENABLE (1 << 7)
-#define ICP_DDIB_HPD_STATUS_MASK (3 << 4)
-#define ICP_DDIB_HPD_NO_DETECT (0 << 4)
-#define ICP_DDIB_HPD_SHORT_DETECT (1 << 4)
-#define ICP_DDIB_HPD_LONG_DETECT (2 << 4)
-#define ICP_DDIB_HPD_SHORT_LONG_DETECT (3 << 4)
-#define ICP_DDIA_HPD_ENABLE (1 << 3)
-#define ICP_DDIA_HPD_OP_DRIVE_1 (1 << 2)
-#define ICP_DDIA_HPD_STATUS_MASK (3 << 0)
-#define ICP_DDIA_HPD_NO_DETECT (0 << 0)
-#define ICP_DDIA_HPD_SHORT_DETECT (1 << 0)
-#define ICP_DDIA_HPD_LONG_DETECT (2 << 0)
-#define ICP_DDIA_HPD_SHORT_LONG_DETECT (3 << 0)
+#define SHOTPLUG_CTL_DDI _MMIO(0xc4030)
+#define SHOTPLUG_CTL_DDI_HPD_ENABLE(port) (0x8 << (4 * (port)))
+#define SHOTPLUG_CTL_DDI_HPD_STATUS_MASK(port) (0x3 << (4 * (port)))
+#define SHOTPLUG_CTL_DDI_HPD_NO_DETECT(port) (0x0 << (4 * (port)))
+#define SHOTPLUG_CTL_DDI_HPD_SHORT_DETECT(port) (0x1 << (4 * (port)))
+#define SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(port) (0x2 << (4 * (port)))
+#define SHOTPLUG_CTL_DDI_HPD_SHORT_LONG_DETECT(port) (0x3 << (4 * (port)))
#define SHOTPLUG_CTL_TC _MMIO(0xc4034)
#define ICP_TC_HPD_ENABLE(tc_port) (8 << (tc_port) * 4)
+
+#define SHPD_FILTER_CNT _MMIO(0xc4038)
+#define SHPD_FILTER_CNT_500_ADJ 0x001D9
+
/* Icelake DSC Rate Control Range Parameter Registers */
#define DSCA_RC_RANGE_PARAMETERS_0 _MMIO(0x6B240)
#define DSCA_RC_RANGE_PARAMETERS_0_UDW _MMIO(0x6B240 + 4)
@@ -8033,6 +8221,19 @@ enum {
#define ICP_TC_HPD_LONG_DETECT(tc_port) (2 << (tc_port) * 4)
#define ICP_TC_HPD_SHORT_DETECT(tc_port) (1 << (tc_port) * 4)
+#define ICP_DDI_HPD_ENABLE_MASK (SHOTPLUG_CTL_DDI_HPD_ENABLE(PORT_B) | \
+ SHOTPLUG_CTL_DDI_HPD_ENABLE(PORT_A))
+#define ICP_TC_HPD_ENABLE_MASK (ICP_TC_HPD_ENABLE(PORT_TC4) | \
+ ICP_TC_HPD_ENABLE(PORT_TC3) | \
+ ICP_TC_HPD_ENABLE(PORT_TC2) | \
+ ICP_TC_HPD_ENABLE(PORT_TC1))
+#define TGP_DDI_HPD_ENABLE_MASK (SHOTPLUG_CTL_DDI_HPD_ENABLE(PORT_C) | \
+ SHOTPLUG_CTL_DDI_HPD_ENABLE(PORT_B) | \
+ SHOTPLUG_CTL_DDI_HPD_ENABLE(PORT_A))
+#define TGP_TC_HPD_ENABLE_MASK (ICP_TC_HPD_ENABLE(PORT_TC6) | \
+ ICP_TC_HPD_ENABLE(PORT_TC5) | \
+ ICP_TC_HPD_ENABLE_MASK)
+
#define _PCH_DPLL_A 0xc6014
#define _PCH_DPLL_B 0xc6018
#define PCH_DPLL(pll) _MMIO((pll) == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
@@ -8263,10 +8464,8 @@ enum {
#define TRANS_STATE_MASK (1 << 30)
#define TRANS_STATE_DISABLE (0 << 30)
#define TRANS_STATE_ENABLE (1 << 30)
-#define TRANS_FSYNC_DELAY_HB1 (0 << 27)
-#define TRANS_FSYNC_DELAY_HB2 (1 << 27)
-#define TRANS_FSYNC_DELAY_HB3 (2 << 27)
-#define TRANS_FSYNC_DELAY_HB4 (3 << 27)
+#define TRANS_FRAME_START_DELAY_MASK (3 << 27) /* ibx */
+#define TRANS_FRAME_START_DELAY(x) ((x) << 27) /* ibx: 0-3 */
#define TRANS_INTERLACE_MASK (7 << 21)
#define TRANS_PROGRESSIVE (0 << 21)
#define TRANS_INTERLACED (3 << 21)
@@ -8287,6 +8486,7 @@ enum {
#define TRANS_CHICKEN2_TIMING_OVERRIDE (1 << 31)
#define TRANS_CHICKEN2_FDI_POLARITY_REVERSED (1 << 29)
#define TRANS_CHICKEN2_FRAME_START_DELAY_MASK (3 << 27)
+#define TRANS_CHICKEN2_FRAME_START_DELAY(x) ((x) << 27) /* 0-3 */
#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER (1 << 26)
#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH (1 << 25)
@@ -8578,6 +8778,10 @@ enum {
#define GEN9_PWRGT_MEDIA_STATUS_MASK (1 << 0)
#define GEN9_PWRGT_RENDER_STATUS_MASK (1 << 1)
+#define POWERGATE_ENABLE _MMIO(0xa210)
+#define VDN_HCP_POWERGATE_ENABLE(n) BIT(((n) * 2) + 3)
+#define VDN_MFX_POWERGATE_ENABLE(n) BIT(((n) * 2) + 4)
+
#define GTFIFODBG _MMIO(0x120000)
#define GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV (0x1f << 20)
#define GT_FIFO_FREE_ENTRIES_CHV (0x7f << 13)
@@ -8815,6 +9019,7 @@ enum {
#define GEN9_SAGV_DISABLE 0x0
#define GEN9_SAGV_IS_DISABLED 0x1
#define GEN9_SAGV_ENABLE 0x3
+#define GEN12_PCODE_READ_SAGV_BLOCK_TIME_US 0x23
#define GEN6_PCODE_DATA _MMIO(0x138128)
#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16
@@ -9078,6 +9283,10 @@ enum {
#define HSW_AUD_CHICKENBIT _MMIO(0x65f10)
#define SKL_AUD_CODEC_WAKE_SIGNAL (1 << 15)
+#define AUD_FREQ_CNTRL _MMIO(0x65900)
+#define AUD_PIN_BUF_CTL _MMIO(0x48414)
+#define AUD_PIN_BUF_ENABLE REG_BIT(31)
+
/*
* HSW - ICL power wells
*
@@ -9119,7 +9328,8 @@ enum {
#define GLK_PW_CTL_IDX_DDI_A 1
#define SKL_PW_CTL_IDX_MISC_IO 0
-/* ICL - power wells */
+/* ICL/TGL - power wells */
+#define TGL_PW_CTL_IDX_PW_5 4
#define ICL_PW_CTL_IDX_PW_4 3
#define ICL_PW_CTL_IDX_PW_3 2
#define ICL_PW_CTL_IDX_PW_2 1
@@ -9128,13 +9338,25 @@ enum {
#define ICL_PWR_WELL_CTL_AUX1 _MMIO(0x45440)
#define ICL_PWR_WELL_CTL_AUX2 _MMIO(0x45444)
#define ICL_PWR_WELL_CTL_AUX4 _MMIO(0x4544C)
+#define TGL_PW_CTL_IDX_AUX_TBT6 14
+#define TGL_PW_CTL_IDX_AUX_TBT5 13
+#define TGL_PW_CTL_IDX_AUX_TBT4 12
#define ICL_PW_CTL_IDX_AUX_TBT4 11
+#define TGL_PW_CTL_IDX_AUX_TBT3 11
#define ICL_PW_CTL_IDX_AUX_TBT3 10
+#define TGL_PW_CTL_IDX_AUX_TBT2 10
#define ICL_PW_CTL_IDX_AUX_TBT2 9
+#define TGL_PW_CTL_IDX_AUX_TBT1 9
#define ICL_PW_CTL_IDX_AUX_TBT1 8
+#define TGL_PW_CTL_IDX_AUX_TC6 8
+#define TGL_PW_CTL_IDX_AUX_TC5 7
+#define TGL_PW_CTL_IDX_AUX_TC4 6
#define ICL_PW_CTL_IDX_AUX_F 5
+#define TGL_PW_CTL_IDX_AUX_TC3 5
#define ICL_PW_CTL_IDX_AUX_E 4
+#define TGL_PW_CTL_IDX_AUX_TC2 4
#define ICL_PW_CTL_IDX_AUX_D 3
+#define TGL_PW_CTL_IDX_AUX_TC1 3
#define ICL_PW_CTL_IDX_AUX_C 2
#define ICL_PW_CTL_IDX_AUX_B 1
#define ICL_PW_CTL_IDX_AUX_A 0
@@ -9142,9 +9364,15 @@ enum {
#define ICL_PWR_WELL_CTL_DDI1 _MMIO(0x45450)
#define ICL_PWR_WELL_CTL_DDI2 _MMIO(0x45454)
#define ICL_PWR_WELL_CTL_DDI4 _MMIO(0x4545C)
+#define TGL_PW_CTL_IDX_DDI_TC6 8
+#define TGL_PW_CTL_IDX_DDI_TC5 7
+#define TGL_PW_CTL_IDX_DDI_TC4 6
#define ICL_PW_CTL_IDX_DDI_F 5
+#define TGL_PW_CTL_IDX_DDI_TC3 5
#define ICL_PW_CTL_IDX_DDI_E 4
+#define TGL_PW_CTL_IDX_DDI_TC2 4
#define ICL_PW_CTL_IDX_DDI_D 3
+#define TGL_PW_CTL_IDX_DDI_TC1 3
#define ICL_PW_CTL_IDX_DDI_C 2
#define ICL_PW_CTL_IDX_DDI_B 1
#define ICL_PW_CTL_IDX_DDI_A 0
@@ -9219,12 +9447,20 @@ enum skl_power_gate {
/* HDCP Repeater Registers */
#define HDCP_REP_CTL _MMIO(0x66d00)
+#define HDCP_TRANSA_REP_PRESENT BIT(31)
+#define HDCP_TRANSB_REP_PRESENT BIT(30)
+#define HDCP_TRANSC_REP_PRESENT BIT(29)
+#define HDCP_TRANSD_REP_PRESENT BIT(28)
#define HDCP_DDIB_REP_PRESENT BIT(30)
#define HDCP_DDIA_REP_PRESENT BIT(29)
#define HDCP_DDIC_REP_PRESENT BIT(28)
#define HDCP_DDID_REP_PRESENT BIT(27)
#define HDCP_DDIF_REP_PRESENT BIT(26)
#define HDCP_DDIE_REP_PRESENT BIT(25)
+#define HDCP_TRANSA_SHA1_M0 (1 << 20)
+#define HDCP_TRANSB_SHA1_M0 (2 << 20)
+#define HDCP_TRANSC_SHA1_M0 (3 << 20)
+#define HDCP_TRANSD_SHA1_M0 (4 << 20)
#define HDCP_DDIB_SHA1_M0 (1 << 20)
#define HDCP_DDIA_SHA1_M0 (2 << 20)
#define HDCP_DDIC_SHA1_M0 (3 << 20)
@@ -9264,15 +9500,92 @@ enum skl_power_gate {
_PORTE_HDCP_AUTHENC, \
_PORTF_HDCP_AUTHENC) + (x))
#define PORT_HDCP_CONF(port) _PORT_HDCP_AUTHENC(port, 0x0)
+#define _TRANSA_HDCP_CONF 0x66400
+#define _TRANSB_HDCP_CONF 0x66500
+#define TRANS_HDCP_CONF(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_CONF, \
+ _TRANSB_HDCP_CONF)
+#define HDCP_CONF(dev_priv, trans, port) \
+ (INTEL_GEN(dev_priv) >= 12 ? \
+ TRANS_HDCP_CONF(trans) : \
+ PORT_HDCP_CONF(port))
+
#define HDCP_CONF_CAPTURE_AN BIT(0)
#define HDCP_CONF_AUTH_AND_ENC (BIT(1) | BIT(0))
#define PORT_HDCP_ANINIT(port) _PORT_HDCP_AUTHENC(port, 0x4)
+#define _TRANSA_HDCP_ANINIT 0x66404
+#define _TRANSB_HDCP_ANINIT 0x66504
+#define TRANS_HDCP_ANINIT(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP_ANINIT, \
+ _TRANSB_HDCP_ANINIT)
+#define HDCP_ANINIT(dev_priv, trans, port) \
+ (INTEL_GEN(dev_priv) >= 12 ? \
+ TRANS_HDCP_ANINIT(trans) : \
+ PORT_HDCP_ANINIT(port))
+
#define PORT_HDCP_ANLO(port) _PORT_HDCP_AUTHENC(port, 0x8)
+#define _TRANSA_HDCP_ANLO 0x66408
+#define _TRANSB_HDCP_ANLO 0x66508
+#define TRANS_HDCP_ANLO(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_ANLO, \
+ _TRANSB_HDCP_ANLO)
+#define HDCP_ANLO(dev_priv, trans, port) \
+ (INTEL_GEN(dev_priv) >= 12 ? \
+ TRANS_HDCP_ANLO(trans) : \
+ PORT_HDCP_ANLO(port))
+
#define PORT_HDCP_ANHI(port) _PORT_HDCP_AUTHENC(port, 0xC)
+#define _TRANSA_HDCP_ANHI 0x6640C
+#define _TRANSB_HDCP_ANHI 0x6650C
+#define TRANS_HDCP_ANHI(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_ANHI, \
+ _TRANSB_HDCP_ANHI)
+#define HDCP_ANHI(dev_priv, trans, port) \
+ (INTEL_GEN(dev_priv) >= 12 ? \
+ TRANS_HDCP_ANHI(trans) : \
+ PORT_HDCP_ANHI(port))
+
#define PORT_HDCP_BKSVLO(port) _PORT_HDCP_AUTHENC(port, 0x10)
+#define _TRANSA_HDCP_BKSVLO 0x66410
+#define _TRANSB_HDCP_BKSVLO 0x66510
+#define TRANS_HDCP_BKSVLO(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP_BKSVLO, \
+ _TRANSB_HDCP_BKSVLO)
+#define HDCP_BKSVLO(dev_priv, trans, port) \
+ (INTEL_GEN(dev_priv) >= 12 ? \
+ TRANS_HDCP_BKSVLO(trans) : \
+ PORT_HDCP_BKSVLO(port))
+
#define PORT_HDCP_BKSVHI(port) _PORT_HDCP_AUTHENC(port, 0x14)
+#define _TRANSA_HDCP_BKSVHI 0x66414
+#define _TRANSB_HDCP_BKSVHI 0x66514
+#define TRANS_HDCP_BKSVHI(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP_BKSVHI, \
+ _TRANSB_HDCP_BKSVHI)
+#define HDCP_BKSVHI(dev_priv, trans, port) \
+ (INTEL_GEN(dev_priv) >= 12 ? \
+ TRANS_HDCP_BKSVHI(trans) : \
+ PORT_HDCP_BKSVHI(port))
+
#define PORT_HDCP_RPRIME(port) _PORT_HDCP_AUTHENC(port, 0x18)
+#define _TRANSA_HDCP_RPRIME 0x66418
+#define _TRANSB_HDCP_RPRIME 0x66518
+#define TRANS_HDCP_RPRIME(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP_RPRIME, \
+ _TRANSB_HDCP_RPRIME)
+#define HDCP_RPRIME(dev_priv, trans, port) \
+ (INTEL_GEN(dev_priv) >= 12 ? \
+ TRANS_HDCP_RPRIME(trans) : \
+ PORT_HDCP_RPRIME(port))
+
#define PORT_HDCP_STATUS(port) _PORT_HDCP_AUTHENC(port, 0x1C)
+#define _TRANSA_HDCP_STATUS 0x6641C
+#define _TRANSB_HDCP_STATUS 0x6651C
+#define TRANS_HDCP_STATUS(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP_STATUS, \
+ _TRANSB_HDCP_STATUS)
+#define HDCP_STATUS(dev_priv, trans, port) \
+ (INTEL_GEN(dev_priv) >= 12 ? \
+ TRANS_HDCP_STATUS(trans) : \
+ PORT_HDCP_STATUS(port))
+
#define HDCP_STATUS_STREAM_A_ENC BIT(31)
#define HDCP_STATUS_STREAM_B_ENC BIT(30)
#define HDCP_STATUS_STREAM_C_ENC BIT(29)
@@ -9299,28 +9612,50 @@ enum skl_power_gate {
_PORTD_HDCP2_BASE, \
_PORTE_HDCP2_BASE, \
_PORTF_HDCP2_BASE) + (x))
-
-#define HDCP2_AUTH_DDI(port) _PORT_HDCP2_BASE(port, 0x98)
+#define PORT_HDCP2_AUTH(port) _PORT_HDCP2_BASE(port, 0x98)
+#define _TRANSA_HDCP2_AUTH 0x66498
+#define _TRANSB_HDCP2_AUTH 0x66598
+#define TRANS_HDCP2_AUTH(trans) _MMIO_TRANS(trans, _TRANSA_HDCP2_AUTH, \
+ _TRANSB_HDCP2_AUTH)
#define AUTH_LINK_AUTHENTICATED BIT(31)
#define AUTH_LINK_TYPE BIT(30)
#define AUTH_FORCE_CLR_INPUTCTR BIT(19)
#define AUTH_CLR_KEYS BIT(18)
-
-#define HDCP2_CTL_DDI(port) _PORT_HDCP2_BASE(port, 0xB0)
+#define HDCP2_AUTH(dev_priv, trans, port) \
+ (INTEL_GEN(dev_priv) >= 12 ? \
+ TRANS_HDCP2_AUTH(trans) : \
+ PORT_HDCP2_AUTH(port))
+
+#define PORT_HDCP2_CTL(port) _PORT_HDCP2_BASE(port, 0xB0)
+#define _TRANSA_HDCP2_CTL 0x664B0
+#define _TRANSB_HDCP2_CTL 0x665B0
+#define TRANS_HDCP2_CTL(trans) _MMIO_TRANS(trans, _TRANSA_HDCP2_CTL, \
+ _TRANSB_HDCP2_CTL)
#define CTL_LINK_ENCRYPTION_REQ BIT(31)
-
-#define HDCP2_STATUS_DDI(port) _PORT_HDCP2_BASE(port, 0xB4)
-#define STREAM_ENCRYPTION_STATUS_A BIT(31)
-#define STREAM_ENCRYPTION_STATUS_B BIT(30)
-#define STREAM_ENCRYPTION_STATUS_C BIT(29)
+#define HDCP2_CTL(dev_priv, trans, port) \
+ (INTEL_GEN(dev_priv) >= 12 ? \
+ TRANS_HDCP2_CTL(trans) : \
+ PORT_HDCP2_CTL(port))
+
+#define PORT_HDCP2_STATUS(port) _PORT_HDCP2_BASE(port, 0xB4)
+#define _TRANSA_HDCP2_STATUS 0x664B4
+#define _TRANSB_HDCP2_STATUS 0x665B4
+#define TRANS_HDCP2_STATUS(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP2_STATUS, \
+ _TRANSB_HDCP2_STATUS)
#define LINK_TYPE_STATUS BIT(22)
#define LINK_AUTH_STATUS BIT(21)
#define LINK_ENCRYPTION_STATUS BIT(20)
+#define HDCP2_STATUS(dev_priv, trans, port) \
+ (INTEL_GEN(dev_priv) >= 12 ? \
+ TRANS_HDCP2_STATUS(trans) : \
+ PORT_HDCP2_STATUS(port))
/* Per-pipe DDI Function Control */
#define _TRANS_DDI_FUNC_CTL_A 0x60400
#define _TRANS_DDI_FUNC_CTL_B 0x61400
#define _TRANS_DDI_FUNC_CTL_C 0x62400
+#define _TRANS_DDI_FUNC_CTL_D 0x63400
#define _TRANS_DDI_FUNC_CTL_EDP 0x6F400
#define _TRANS_DDI_FUNC_CTL_DSI0 0x6b400
#define _TRANS_DDI_FUNC_CTL_DSI1 0x6bc00
@@ -9328,10 +9663,14 @@ enum skl_power_gate {
#define TRANS_DDI_FUNC_ENABLE (1 << 31)
/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
-#define TRANS_DDI_PORT_MASK (7 << 28)
#define TRANS_DDI_PORT_SHIFT 28
-#define TRANS_DDI_SELECT_PORT(x) ((x) << 28)
-#define TRANS_DDI_PORT_NONE (0 << 28)
+#define TGL_TRANS_DDI_PORT_SHIFT 27
+#define TRANS_DDI_PORT_MASK (7 << TRANS_DDI_PORT_SHIFT)
+#define TGL_TRANS_DDI_PORT_MASK (0xf << TGL_TRANS_DDI_PORT_SHIFT)
+#define TRANS_DDI_SELECT_PORT(x) ((x) << TRANS_DDI_PORT_SHIFT)
+#define TGL_TRANS_DDI_SELECT_PORT(x) (((x) + 1) << TGL_TRANS_DDI_PORT_SHIFT)
+#define TRANS_DDI_FUNC_CTL_VAL_TO_PORT(val) (((val) & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT)
+#define TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(val) ((((val) & TGL_TRANS_DDI_PORT_MASK) >> TGL_TRANS_DDI_PORT_SHIFT) - 1)
#define TRANS_DDI_MODE_SELECT_MASK (7 << 24)
#define TRANS_DDI_MODE_SELECT_HDMI (0 << 24)
#define TRANS_DDI_MODE_SELECT_DVI (1 << 24)
@@ -9350,6 +9689,10 @@ enum skl_power_gate {
#define TRANS_DDI_EDP_INPUT_A_ONOFF (4 << 12)
#define TRANS_DDI_EDP_INPUT_B_ONOFF (5 << 12)
#define TRANS_DDI_EDP_INPUT_C_ONOFF (6 << 12)
+#define TRANS_DDI_EDP_INPUT_D_ONOFF (7 << 12)
+#define TRANS_DDI_MST_TRANSPORT_SELECT_MASK REG_GENMASK(11, 10)
+#define TRANS_DDI_MST_TRANSPORT_SELECT(trans) \
+ REG_FIELD_PREP(TRANS_DDI_MST_TRANSPORT_SELECT_MASK, trans)
#define TRANS_DDI_HDCP_SIGNALLING (1 << 9)
#define TRANS_DDI_DP_VC_PAYLOAD_ALLOC (1 << 8)
#define TRANS_DDI_HDMI_SCRAMBLER_CTS_ENABLE (1 << 7)
@@ -9377,7 +9720,9 @@ enum skl_power_gate {
/* DisplayPort Transport Control */
#define _DP_TP_CTL_A 0x64040
#define _DP_TP_CTL_B 0x64140
+#define _TGL_DP_TP_CTL_A 0x60540
#define DP_TP_CTL(port) _MMIO_PORT(port, _DP_TP_CTL_A, _DP_TP_CTL_B)
+#define TGL_DP_TP_CTL(tran) _MMIO_TRANS2((tran), _TGL_DP_TP_CTL_A)
#define DP_TP_CTL_ENABLE (1 << 31)
#define DP_TP_CTL_FEC_ENABLE (1 << 30)
#define DP_TP_CTL_MODE_SST (0 << 27)
@@ -9397,7 +9742,9 @@ enum skl_power_gate {
/* DisplayPort Transport Status */
#define _DP_TP_STATUS_A 0x64044
#define _DP_TP_STATUS_B 0x64144
+#define _TGL_DP_TP_STATUS_A 0x60544
#define DP_TP_STATUS(port) _MMIO_PORT(port, _DP_TP_STATUS_A, _DP_TP_STATUS_B)
+#define TGL_DP_TP_STATUS(tran) _MMIO_TRANS2((tran), _TGL_DP_TP_STATUS_A)
#define DP_TP_STATUS_FEC_ENABLE_LIVE (1 << 28)
#define DP_TP_STATUS_IDLE_DONE (1 << 25)
#define DP_TP_STATUS_ACT_SENT (1 << 24)
@@ -9541,6 +9888,9 @@ enum skl_power_gate {
/* For each transcoder, we need to select the corresponding port clock */
#define TRANS_CLK_SEL_DISABLED (0x0 << 29)
#define TRANS_CLK_SEL_PORT(x) (((x) + 1) << 29)
+#define TGL_TRANS_CLK_SEL_DISABLED (0x0 << 28)
+#define TGL_TRANS_CLK_SEL_PORT(x) (((x) + 1) << 28)
+
#define CDCLK_FREQ _MMIO(0x46200)
@@ -9549,17 +9899,7 @@ enum skl_power_gate {
#define _TRANSC_MSA_MISC 0x62410
#define _TRANS_EDP_MSA_MISC 0x6f410
#define TRANS_MSA_MISC(tran) _MMIO_TRANS2(tran, _TRANSA_MSA_MISC)
-
-#define TRANS_MSA_SYNC_CLK (1 << 0)
-#define TRANS_MSA_SAMPLING_444 (2 << 1)
-#define TRANS_MSA_CLRSP_YCBCR (2 << 3)
-#define TRANS_MSA_6_BPC (0 << 5)
-#define TRANS_MSA_8_BPC (1 << 5)
-#define TRANS_MSA_10_BPC (2 << 5)
-#define TRANS_MSA_12_BPC (3 << 5)
-#define TRANS_MSA_16_BPC (4 << 5)
-#define TRANS_MSA_CEA_RANGE (1 << 3)
-#define TRANS_MSA_USE_VSC_SDP (1 << 14)
+/* See DP_MSA_MISC_* for the bit definitions */
/* LCPLL Control */
#define LCPLL_CTL _MMIO(0x130040)
@@ -9600,7 +9940,10 @@ enum skl_power_gate {
#define BXT_CDCLK_CD2X_PIPE(pipe) ((pipe) << 20)
#define CDCLK_DIVMUX_CD_OVERRIDE (1 << 19)
#define BXT_CDCLK_CD2X_PIPE_NONE BXT_CDCLK_CD2X_PIPE(3)
+#define ICL_CDCLK_CD2X_PIPE(pipe) (_PICK(pipe, 0, 2, 6) << 19)
#define ICL_CDCLK_CD2X_PIPE_NONE (7 << 19)
+#define TGL_CDCLK_CD2X_PIPE(pipe) BXT_CDCLK_CD2X_PIPE(pipe)
+#define TGL_CDCLK_CD2X_PIPE_NONE ICL_CDCLK_CD2X_PIPE_NONE
#define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1 << 16)
#define CDCLK_FREQ_DECIMAL_MASK (0x7ff)
@@ -9672,17 +10015,22 @@ enum skl_power_gate {
* CNL Clocks
*/
#define DPCLKA_CFGCR0 _MMIO(0x6C200)
-#define DPCLKA_CFGCR0_ICL _MMIO(0x164280)
#define DPCLKA_CFGCR0_DDI_CLK_OFF(port) (1 << ((port) == PORT_F ? 23 : \
(port) + 10))
-#define ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(port) (1 << ((port) + 10))
-#define ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port) (1 << ((tc_port) == PORT_TC4 ? \
- 21 : (tc_port) + 12))
#define DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port) ((port) == PORT_F ? 21 : \
(port) * 2)
#define DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port) (3 << DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port))
#define DPCLKA_CFGCR0_DDI_CLK_SEL(pll, port) ((pll) << DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port))
+#define ICL_DPCLKA_CFGCR0 _MMIO(0x164280)
+#define ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy) (1 << _PICK(phy, 10, 11, 24))
+#define ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port) (1 << ((tc_port) < PORT_TC4 ? \
+ (tc_port) + 12 : \
+ (tc_port) - PORT_TC4 + 21))
+#define ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy) ((phy) * 2)
+#define ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy) (3 << ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
+#define ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll, phy) ((pll) << ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
+
/* CNL PLL */
#define DPLL0_ENABLE 0x46010
#define DPLL1_ENABLE 0x46014
@@ -9887,6 +10235,7 @@ enum skl_power_gate {
#define DPLL_CFGCR1_PDIV_7 (8 << 2)
#define DPLL_CFGCR1_CENTRAL_FREQ (3 << 0)
#define DPLL_CFGCR1_CENTRAL_FREQ_8400 (3 << 0)
+#define TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL (0 << 0)
#define CNL_DPLL_CFGCR1(pll) _MMIO_PLL(pll, _CNL_DPLL0_CFGCR1, _CNL_DPLL1_CFGCR1)
#define _ICL_DPLL0_CFGCR0 0x164000
@@ -9899,6 +10248,182 @@ enum skl_power_gate {
#define ICL_DPLL_CFGCR1(pll) _MMIO_PLL(pll, _ICL_DPLL0_CFGCR1, \
_ICL_DPLL1_CFGCR1)
+#define _TGL_DPLL0_CFGCR0 0x164284
+#define _TGL_DPLL1_CFGCR0 0x16428C
+/* TODO: add DPLL4 */
+#define _TGL_TBTPLL_CFGCR0 0x16429C
+#define TGL_DPLL_CFGCR0(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR0, \
+ _TGL_DPLL1_CFGCR0, \
+ _TGL_TBTPLL_CFGCR0)
+
+#define _TGL_DPLL0_CFGCR1 0x164288
+#define _TGL_DPLL1_CFGCR1 0x164290
+/* TODO: add DPLL4 */
+#define _TGL_TBTPLL_CFGCR1 0x1642A0
+#define TGL_DPLL_CFGCR1(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR1, \
+ _TGL_DPLL1_CFGCR1, \
+ _TGL_TBTPLL_CFGCR1)
+
+#define _DKL_PHY1_BASE 0x168000
+#define _DKL_PHY2_BASE 0x169000
+#define _DKL_PHY3_BASE 0x16A000
+#define _DKL_PHY4_BASE 0x16B000
+#define _DKL_PHY5_BASE 0x16C000
+#define _DKL_PHY6_BASE 0x16D000
+
+/* DEKEL PHY MMIO Address = Phy base + (internal address & ~index_mask) */
+#define _DKL_PLL_DIV0 0x200
+#define DKL_PLL_DIV0_INTEG_COEFF(x) ((x) << 16)
+#define DKL_PLL_DIV0_INTEG_COEFF_MASK (0x1F << 16)
+#define DKL_PLL_DIV0_PROP_COEFF(x) ((x) << 12)
+#define DKL_PLL_DIV0_PROP_COEFF_MASK (0xF << 12)
+#define DKL_PLL_DIV0_FBPREDIV_SHIFT (8)
+#define DKL_PLL_DIV0_FBPREDIV(x) ((x) << DKL_PLL_DIV0_FBPREDIV_SHIFT)
+#define DKL_PLL_DIV0_FBPREDIV_MASK (0xF << DKL_PLL_DIV0_FBPREDIV_SHIFT)
+#define DKL_PLL_DIV0_FBDIV_INT(x) ((x) << 0)
+#define DKL_PLL_DIV0_FBDIV_INT_MASK (0xFF << 0)
+#define DKL_PLL_DIV0(tc_port) _MMIO(_PORT(tc_port, _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_PLL_DIV0)
+
+#define _DKL_PLL_DIV1 0x204
+#define DKL_PLL_DIV1_IREF_TRIM(x) ((x) << 16)
+#define DKL_PLL_DIV1_IREF_TRIM_MASK (0x1F << 16)
+#define DKL_PLL_DIV1_TDC_TARGET_CNT(x) ((x) << 0)
+#define DKL_PLL_DIV1_TDC_TARGET_CNT_MASK (0xFF << 0)
+#define DKL_PLL_DIV1(tc_port) _MMIO(_PORT(tc_port, _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_PLL_DIV1)
+
+#define _DKL_PLL_SSC 0x210
+#define DKL_PLL_SSC_IREF_NDIV_RATIO(x) ((x) << 29)
+#define DKL_PLL_SSC_IREF_NDIV_RATIO_MASK (0x7 << 29)
+#define DKL_PLL_SSC_STEP_LEN(x) ((x) << 16)
+#define DKL_PLL_SSC_STEP_LEN_MASK (0xFF << 16)
+#define DKL_PLL_SSC_STEP_NUM(x) ((x) << 11)
+#define DKL_PLL_SSC_STEP_NUM_MASK (0x7 << 11)
+#define DKL_PLL_SSC_EN (1 << 9)
+#define DKL_PLL_SSC(tc_port) _MMIO(_PORT(tc_port, _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_PLL_SSC)
+
+#define _DKL_PLL_BIAS 0x214
+#define DKL_PLL_BIAS_FRAC_EN_H (1 << 30)
+#define DKL_PLL_BIAS_FBDIV_SHIFT (8)
+#define DKL_PLL_BIAS_FBDIV_FRAC(x) ((x) << DKL_PLL_BIAS_FBDIV_SHIFT)
+#define DKL_PLL_BIAS_FBDIV_FRAC_MASK (0x3FFFFF << DKL_PLL_BIAS_FBDIV_SHIFT)
+#define DKL_PLL_BIAS(tc_port) _MMIO(_PORT(tc_port, _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_PLL_BIAS)
+
+#define _DKL_PLL_TDC_COLDST_BIAS 0x218
+#define DKL_PLL_TDC_SSC_STEP_SIZE(x) ((x) << 8)
+#define DKL_PLL_TDC_SSC_STEP_SIZE_MASK (0xFF << 8)
+#define DKL_PLL_TDC_FEED_FWD_GAIN(x) ((x) << 0)
+#define DKL_PLL_TDC_FEED_FWD_GAIN_MASK (0xFF << 0)
+#define DKL_PLL_TDC_COLDST_BIAS(tc_port) _MMIO(_PORT(tc_port, \
+ _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_PLL_TDC_COLDST_BIAS)
+
+#define _DKL_REFCLKIN_CTL 0x12C
+/* Bits are the same as MG_REFCLKIN_CTL */
+#define DKL_REFCLKIN_CTL(tc_port) _MMIO(_PORT(tc_port, \
+ _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_REFCLKIN_CTL)
+
+#define _DKL_CLKTOP2_HSCLKCTL 0xD4
+/* Bits are the same as MG_CLKTOP2_HSCLKCTL */
+#define DKL_CLKTOP2_HSCLKCTL(tc_port) _MMIO(_PORT(tc_port, \
+ _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_CLKTOP2_HSCLKCTL)
+
+#define _DKL_CLKTOP2_CORECLKCTL1 0xD8
+/* Bits are the same as MG_CLKTOP2_CORECLKCTL1 */
+#define DKL_CLKTOP2_CORECLKCTL1(tc_port) _MMIO(_PORT(tc_port, \
+ _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_CLKTOP2_CORECLKCTL1)
+
+#define _DKL_TX_DPCNTL0 0x2C0
+#define DKL_TX_PRESHOOT_COEFF(x) ((x) << 13)
+#define DKL_TX_PRESHOOT_COEFF_MASK (0x1f << 13)
+#define DKL_TX_DE_EMPHASIS_COEFF(x) ((x) << 8)
+#define DKL_TX_DE_EMPAHSIS_COEFF_MASK (0x1f << 8)
+#define DKL_TX_VSWING_CONTROL(x) ((x) << 0)
+#define DKL_TX_VSWING_CONTROL_MASK (0x7 << 0)
+#define DKL_TX_DPCNTL0(tc_port) _MMIO(_PORT(tc_port, \
+ _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_TX_DPCNTL0)
+
+#define _DKL_TX_DPCNTL1 0x2C4
+/* Bits are the same as DKL_TX_DPCNTRL0 */
+#define DKL_TX_DPCNTL1(tc_port) _MMIO(_PORT(tc_port, \
+ _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_TX_DPCNTL1)
+
+#define _DKL_TX_DPCNTL2 0x2C8
+#define DKL_TX_DP20BITMODE (1 << 2)
+#define DKL_TX_DPCNTL2(tc_port) _MMIO(_PORT(tc_port, \
+ _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_TX_DPCNTL2)
+
+#define _DKL_TX_FW_CALIB 0x2F8
+#define DKL_TX_CFG_DISABLE_WAIT_INIT (1 << 7)
+#define DKL_TX_FW_CALIB(tc_port) _MMIO(_PORT(tc_port, \
+ _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_TX_FW_CALIB)
+
+#define _DKL_TX_PMD_LANE_SUS 0xD00
+#define DKL_TX_PMD_LANE_SUS(tc_port) _MMIO(_PORT(tc_port, \
+ _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_TX_PMD_LANE_SUS)
+
+#define _DKL_TX_DW17 0xDC4
+#define DKL_TX_DW17(tc_port) _MMIO(_PORT(tc_port, \
+ _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_TX_DW17)
+
+#define _DKL_TX_DW18 0xDC8
+#define DKL_TX_DW18(tc_port) _MMIO(_PORT(tc_port, \
+ _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_TX_DW18)
+
+#define _DKL_DP_MODE 0xA0
+#define DKL_DP_MODE(tc_port) _MMIO(_PORT(tc_port, \
+ _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_DP_MODE)
+
+#define _DKL_CMN_UC_DW27 0x36C
+#define DKL_CMN_UC_DW27_UC_HEALTH (0x1 << 15)
+#define DKL_CMN_UC_DW_27(tc_port) _MMIO(_PORT(tc_port, \
+ _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_CMN_UC_DW27)
+
+/*
+ * Each Dekel PHY is addressed through a 4KB aperture. Each PHY has more than
+ * 4KB of register space, so a separate index is programmed in HIP_INDEX_REG0
+ * or HIP_INDEX_REG1, based on the port number, to set the upper 2 address
+ * bits that point the 4KB window into the full PHY register space.
+ */
+#define _HIP_INDEX_REG0 0x1010A0
+#define _HIP_INDEX_REG1 0x1010A4
+#define HIP_INDEX_REG(tc_port) _MMIO((tc_port) < 4 ? _HIP_INDEX_REG0 \
+ : _HIP_INDEX_REG1)
+#define _HIP_INDEX_SHIFT(tc_port) (8 * ((tc_port) % 4))
+#define HIP_INDEX_VAL(tc_port, val) ((val) << _HIP_INDEX_SHIFT(tc_port))
+
/* BXT display engine PLL */
#define BXT_DE_PLL_CTL _MMIO(0x6d000)
#define BXT_DE_PLL_RATIO(x) (x) /* {60,65,100} * 19.2MHz */
@@ -9913,6 +10438,8 @@ enum skl_power_gate {
/* GEN9 DC */
#define DC_STATE_EN _MMIO(0x45504)
#define DC_STATE_DISABLE 0
+#define DC_STATE_EN_DC3CO REG_BIT(30)
+#define DC_STATE_DC3CO_STATUS REG_BIT(29)
#define DC_STATE_EN_UPTO_DC5 (1 << 0)
#define DC_STATE_EN_DC9 (1 << 3)
#define DC_STATE_EN_UPTO_DC6 (2 << 0)
@@ -10041,11 +10568,11 @@ enum skl_power_gate {
#define _PIPE_A_CSC_COEFF_BV 0x49024
#define _PIPE_A_CSC_MODE 0x49028
-#define ICL_CSC_ENABLE (1 << 31)
-#define ICL_OUTPUT_CSC_ENABLE (1 << 30)
-#define CSC_BLACK_SCREEN_OFFSET (1 << 2)
-#define CSC_POSITION_BEFORE_GAMMA (1 << 1)
-#define CSC_MODE_YUV_TO_RGB (1 << 0)
+#define ICL_CSC_ENABLE (1 << 31) /* icl+ */
+#define ICL_OUTPUT_CSC_ENABLE (1 << 30) /* icl+ */
+#define CSC_BLACK_SCREEN_OFFSET (1 << 2) /* ilk/snb */
+#define CSC_POSITION_BEFORE_GAMMA (1 << 1) /* pre-glk */
+#define CSC_MODE_YUV_TO_RGB (1 << 0) /* ilk/snb */
#define _PIPE_A_CSC_PREOFF_HI 0x49030
#define _PIPE_A_CSC_PREOFF_ME 0x49034
@@ -10161,6 +10688,9 @@ enum skl_power_gate {
#define _PAL_PREC_GC_MAX_A 0x4A410
#define _PAL_PREC_GC_MAX_B 0x4AC10
#define _PAL_PREC_GC_MAX_C 0x4B410
+#define PREC_PAL_DATA_RED_MASK REG_GENMASK(29, 20)
+#define PREC_PAL_DATA_GREEN_MASK REG_GENMASK(19, 10)
+#define PREC_PAL_DATA_BLUE_MASK REG_GENMASK(9, 0)
#define _PAL_PREC_EXT_GC_MAX_A 0x4A420
#define _PAL_PREC_EXT_GC_MAX_B 0x4AC20
#define _PAL_PREC_EXT_GC_MAX_C 0x4B420
@@ -10213,6 +10743,9 @@ enum skl_power_gate {
#define CGM_PIPE_MODE_GAMMA (1 << 2)
#define CGM_PIPE_MODE_CSC (1 << 1)
#define CGM_PIPE_MODE_DEGAMMA (1 << 0)
+#define CGM_PIPE_GAMMA_RED_MASK REG_GENMASK(9, 0)
+#define CGM_PIPE_GAMMA_GREEN_MASK REG_GENMASK(25, 16)
+#define CGM_PIPE_GAMMA_BLUE_MASK REG_GENMASK(9, 0)
#define _CGM_PIPE_B_CSC_COEFF01 (VLV_DISPLAY_BASE + 0x69900)
#define _CGM_PIPE_B_CSC_COEFF23 (VLV_DISPLAY_BASE + 0x69904)
@@ -10262,6 +10795,57 @@ enum skl_power_gate {
#define ICL_ESC_CLK_DIV_SHIFT 0
#define DSI_MAX_ESC_CLK 20000 /* in KHz */
+#define _DSI_CMD_FRMCTL_0 0x6b034
+#define _DSI_CMD_FRMCTL_1 0x6b834
+#define DSI_CMD_FRMCTL(port) _MMIO_PORT(port, \
+ _DSI_CMD_FRMCTL_0,\
+ _DSI_CMD_FRMCTL_1)
+#define DSI_FRAME_UPDATE_REQUEST (1 << 31)
+#define DSI_PERIODIC_FRAME_UPDATE_ENABLE (1 << 29)
+#define DSI_NULL_PACKET_ENABLE (1 << 28)
+#define DSI_FRAME_IN_PROGRESS (1 << 0)
+
+#define _DSI_INTR_MASK_REG_0 0x6b070
+#define _DSI_INTR_MASK_REG_1 0x6b870
+#define DSI_INTR_MASK_REG(port) _MMIO_PORT(port, \
+ _DSI_INTR_MASK_REG_0,\
+ _DSI_INTR_MASK_REG_1)
+
+#define _DSI_INTR_IDENT_REG_0 0x6b074
+#define _DSI_INTR_IDENT_REG_1 0x6b874
+#define DSI_INTR_IDENT_REG(port) _MMIO_PORT(port, \
+ _DSI_INTR_IDENT_REG_0,\
+ _DSI_INTR_IDENT_REG_1)
+#define DSI_TE_EVENT (1 << 31)
+#define DSI_RX_DATA_OR_BTA_TERMINATED (1 << 30)
+#define DSI_TX_DATA (1 << 29)
+#define DSI_ULPS_ENTRY_DONE (1 << 28)
+#define DSI_NON_TE_TRIGGER_RECEIVED (1 << 27)
+#define DSI_HOST_CHKSUM_ERROR (1 << 26)
+#define DSI_HOST_MULTI_ECC_ERROR (1 << 25)
+#define DSI_HOST_SINGL_ECC_ERROR (1 << 24)
+#define DSI_HOST_CONTENTION_DETECTED (1 << 23)
+#define DSI_HOST_FALSE_CONTROL_ERROR (1 << 22)
+#define DSI_HOST_TIMEOUT_ERROR (1 << 21)
+#define DSI_HOST_LOW_POWER_TX_SYNC_ERROR (1 << 20)
+#define DSI_HOST_ESCAPE_MODE_ENTRY_ERROR (1 << 19)
+#define DSI_FRAME_UPDATE_DONE (1 << 16)
+#define DSI_PROTOCOL_VIOLATION_REPORTED (1 << 15)
+#define DSI_INVALID_TX_LENGTH (1 << 13)
+#define DSI_INVALID_VC (1 << 12)
+#define DSI_INVALID_DATA_TYPE (1 << 11)
+#define DSI_PERIPHERAL_CHKSUM_ERROR (1 << 10)
+#define DSI_PERIPHERAL_MULTI_ECC_ERROR (1 << 9)
+#define DSI_PERIPHERAL_SINGLE_ECC_ERROR (1 << 8)
+#define DSI_PERIPHERAL_CONTENTION_DETECTED (1 << 7)
+#define DSI_PERIPHERAL_FALSE_CTRL_ERROR (1 << 6)
+#define DSI_PERIPHERAL_TIMEOUT_ERROR (1 << 5)
+#define DSI_PERIPHERAL_LP_TX_SYNC_ERROR (1 << 4)
+#define DSI_PERIPHERAL_ESC_MODE_ENTRY_CMD_ERR (1 << 3)
+#define DSI_EOT_SYNC_ERROR (1 << 2)
+#define DSI_SOT_SYNC_ERROR (1 << 1)
+#define DSI_SOT_ERROR (1 << 0)
+
/* Gen4+ Timestamp and Pipe Frame time stamp registers */
#define GEN4_TIMESTAMP _MMIO(0x2358)
#define ILK_TIMESTAMP_HI _MMIO(0x70070)
@@ -10866,6 +11450,7 @@ enum skl_power_gate {
#define CMD_MODE_TE_GATE (0x1 << 28)
#define VIDEO_MODE_SYNC_EVENT (0x2 << 28)
#define VIDEO_MODE_SYNC_PULSE (0x3 << 28)
+#define TE_SOURCE_GPIO (1 << 27)
#define LINK_READY (1 << 20)
#define PIX_FMT_MASK (0x3 << 16)
#define PIX_FMT_SHIFT 16
@@ -10896,6 +11481,7 @@ enum skl_power_gate {
#define CALIBRATION_DISABLED (0x0 << 4)
#define CALIBRATION_ENABLED_INITIAL_ONLY (0x2 << 4)
#define CALIBRATION_ENABLED_INITIAL_PERIODIC (0x3 << 4)
+#define BLANKING_PACKET_ENABLE (1 << 2)
#define S3D_ORIENTATION_LANDSCAPE (1 << 1)
#define EOTP_DISABLED (1 << 0)
@@ -11117,19 +11703,26 @@ enum skl_power_gate {
/* MOCS (Memory Object Control State) registers */
#define GEN9_LNCFCMOCS(i) _MMIO(0xb020 + (i) * 4) /* L3 Cache Control */
-#define GEN9_GFX_MOCS(i) _MMIO(0xc800 + (i) * 4) /* Graphics MOCS registers */
-#define GEN9_MFX0_MOCS(i) _MMIO(0xc900 + (i) * 4) /* Media 0 MOCS registers */
-#define GEN9_MFX1_MOCS(i) _MMIO(0xca00 + (i) * 4) /* Media 1 MOCS registers */
-#define GEN9_VEBOX_MOCS(i) _MMIO(0xcb00 + (i) * 4) /* Video MOCS registers */
-#define GEN9_BLT_MOCS(i) _MMIO(0xcc00 + (i) * 4) /* Blitter MOCS registers */
-/* Media decoder 2 MOCS registers */
-#define GEN11_MFX2_MOCS(i) _MMIO(0x10000 + (i) * 4)
+#define __GEN9_RCS0_MOCS0 0xc800
+#define GEN9_GFX_MOCS(i) _MMIO(__GEN9_RCS0_MOCS0 + (i) * 4)
+#define __GEN9_VCS0_MOCS0 0xc900
+#define GEN9_MFX0_MOCS(i) _MMIO(__GEN9_VCS0_MOCS0 + (i) * 4)
+#define __GEN9_VCS1_MOCS0 0xca00
+#define GEN9_MFX1_MOCS(i) _MMIO(__GEN9_VCS1_MOCS0 + (i) * 4)
+#define __GEN9_VECS0_MOCS0 0xcb00
+#define GEN9_VEBOX_MOCS(i) _MMIO(__GEN9_VECS0_MOCS0 + (i) * 4)
+#define __GEN9_BCS0_MOCS0 0xcc00
+#define GEN9_BLT_MOCS(i) _MMIO(__GEN9_BCS0_MOCS0 + (i) * 4)
+#define __GEN11_VCS2_MOCS0 0x10000
+#define GEN11_MFX2_MOCS(i) _MMIO(__GEN11_VCS2_MOCS0 + (i) * 4)
#define GEN10_SCRATCH_LNCF2 _MMIO(0xb0a0)
#define PMFLUSHDONE_LNICRSDROP (1 << 20)
#define PMFLUSH_GAPL3UNBLOCK (1 << 21)
#define PMFLUSHDONE_LNEBLK (1 << 22)
+#define GEN12_GLOBAL_MOCS(i) _MMIO(0x4000 + (i) * 4) /* Global MOCS regs */
+
/* gamt regs */
#define GEN8_L3_LRA_1_GPGPU _MMIO(0x4dd4)
#define GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW 0x67F1427F /* max/min for LRA1/2 */
@@ -11145,6 +11738,7 @@ enum skl_power_gate {
#define _ICL_PHY_MISC_B 0x64C04
#define ICL_PHY_MISC(port) _MMIO_PORT(port, _ICL_PHY_MISC_A, \
_ICL_PHY_MISC_B)
+#define ICL_PHY_MISC_MUX_DDID (1 << 28)
#define ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN (1 << 23)
/* Icelake Display Stream Compression Registers */
@@ -11454,17 +12048,33 @@ enum skl_power_gate {
_ICL_DSC1_RC_BUF_THRESH_1_UDW_PB, \
_ICL_DSC1_RC_BUF_THRESH_1_UDW_PC)
-#define PORT_TX_DFLEXDPSP _MMIO(FIA1_BASE + 0x008A0)
-#define TC_LIVE_STATE_TBT(tc_port) (1 << ((tc_port) * 8 + 6))
-#define TC_LIVE_STATE_TC(tc_port) (1 << ((tc_port) * 8 + 5))
-#define DP_LANE_ASSIGNMENT_SHIFT(tc_port) ((tc_port) * 8)
-#define DP_LANE_ASSIGNMENT_MASK(tc_port) (0xf << ((tc_port) * 8))
-#define DP_LANE_ASSIGNMENT(tc_port, x) ((x) << ((tc_port) * 8))
-
-#define PORT_TX_DFLEXDPPMS _MMIO(FIA1_BASE + 0x00890)
-#define DP_PHY_MODE_STATUS_COMPLETED(tc_port) (1 << (tc_port))
-
-#define PORT_TX_DFLEXDPCSSS _MMIO(FIA1_BASE + 0x00894)
-#define DP_PHY_MODE_STATUS_NOT_SAFE(tc_port) (1 << (tc_port))
+#define PORT_TX_DFLEXDPSP(fia) _MMIO_FIA((fia), 0x008A0)
+#define MODULAR_FIA_MASK (1 << 4)
+#define TC_LIVE_STATE_TBT(idx) (1 << ((idx) * 8 + 6))
+#define TC_LIVE_STATE_TC(idx) (1 << ((idx) * 8 + 5))
+#define DP_LANE_ASSIGNMENT_SHIFT(idx) ((idx) * 8)
+#define DP_LANE_ASSIGNMENT_MASK(idx) (0xf << ((idx) * 8))
+#define DP_LANE_ASSIGNMENT(idx, x) ((x) << ((idx) * 8))
+
+#define PORT_TX_DFLEXDPPMS(fia) _MMIO_FIA((fia), 0x00890)
+#define DP_PHY_MODE_STATUS_COMPLETED(idx) (1 << (idx))
+
+#define PORT_TX_DFLEXDPCSSS(fia) _MMIO_FIA((fia), 0x00894)
+#define DP_PHY_MODE_STATUS_NOT_SAFE(idx) (1 << (idx))
+
+#define PORT_TX_DFLEXPA1(fia) _MMIO_FIA((fia), 0x00880)
+#define DP_PIN_ASSIGNMENT_SHIFT(idx) ((idx) * 4)
+#define DP_PIN_ASSIGNMENT_MASK(idx) (0xf << ((idx) * 4))
+#define DP_PIN_ASSIGNMENT(idx, x) ((x) << ((idx) * 4))
+
+/* This register controls the Display State Buffer (DSB) engines. */
+#define _DSBSL_INSTANCE_BASE 0x70B00
+#define DSBSL_INSTANCE(pipe, id) (_DSBSL_INSTANCE_BASE + \
+ (pipe) * 0x1000 + (id) * 0x100)
+#define DSB_HEAD(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x0)
+#define DSB_TAIL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x4)
+#define DSB_CTRL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x8)
+#define DSB_ENABLE (1 << 31)
+#define DSB_STATUS (1 << 0)
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index a195a92d0105..78a5f5d3c070 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -31,10 +31,13 @@
#include "gem/i915_gem_context.h"
#include "gt/intel_context.h"
+#include "gt/intel_ring.h"
+#include "gt/intel_rps.h"
#include "i915_active.h"
#include "i915_drv.h"
#include "i915_globals.h"
+#include "i915_trace.h"
#include "intel_pm.h"
struct execute_cb {
@@ -54,11 +57,13 @@ static struct i915_global_request {
static const char *i915_fence_get_driver_name(struct dma_fence *fence)
{
- return "i915";
+ return dev_name(to_request(fence)->i915->drm.dev);
}
static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
{
+ const struct i915_gem_context *ctx;
+
/*
* The timeline struct (as part of the ppgtt underneath a context)
* may be freed when the request is no longer in use by the GPU.
@@ -71,7 +76,11 @@ static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
return "signaled";
- return to_request(fence)->gem_context->name ?: "[i915]";
+ ctx = i915_request_gem_context(to_request(fence));
+ if (!ctx)
+ return "[" DRIVER_NAME "]";
+
+ return ctx->name;
}
static bool i915_fence_signaled(struct dma_fence *fence)
@@ -119,62 +128,73 @@ const struct dma_fence_ops i915_fence_ops = {
.release = i915_fence_release,
};
-static inline void
-i915_request_remove_from_client(struct i915_request *request)
+static void irq_execute_cb(struct irq_work *wrk)
{
- struct drm_i915_file_private *file_priv;
+ struct execute_cb *cb = container_of(wrk, typeof(*cb), work);
- file_priv = request->file_priv;
- if (!file_priv)
- return;
+ i915_sw_fence_complete(cb->fence);
+ kmem_cache_free(global.slab_execute_cbs, cb);
+}
- spin_lock(&file_priv->mm.lock);
- if (request->file_priv) {
- list_del(&request->client_link);
- request->file_priv = NULL;
- }
- spin_unlock(&file_priv->mm.lock);
+static void irq_execute_cb_hook(struct irq_work *wrk)
+{
+ struct execute_cb *cb = container_of(wrk, typeof(*cb), work);
+
+ cb->hook(container_of(cb->fence, struct i915_request, submit),
+ &cb->signal->fence);
+ i915_request_put(cb->signal);
+
+ irq_execute_cb(wrk);
}
-static void advance_ring(struct i915_request *request)
+static void __notify_execute_cb(struct i915_request *rq)
{
- struct intel_ring *ring = request->ring;
- unsigned int tail;
+ struct execute_cb *cb;
+
+ lockdep_assert_held(&rq->lock);
+
+ if (list_empty(&rq->execute_cb))
+ return;
+
+ list_for_each_entry(cb, &rq->execute_cb, link)
+ irq_work_queue(&cb->work);
/*
- * We know the GPU must have read the request to have
- * sent us the seqno + interrupt, so use the position
- * of tail of the request to update the last known position
- * of the GPU head.
+ * XXX Rollback on __i915_request_unsubmit()
*
- * Note this requires that we are always called in request
- * completion order.
+ * In the future, perhaps when we have an active time-slicing scheduler,
+ * it will be interesting to unsubmit parallel execution and remove
+ * busywaits from the GPU until their master is restarted. This is
+ * quite hairy, we have to carefully rollback the fence and do a
+ * preempt-to-idle cycle on the target engine, all the while the
+ * master execute_cb may refire.
*/
- GEM_BUG_ON(!list_is_first(&request->ring_link, &ring->request_list));
- if (list_is_last(&request->ring_link, &ring->request_list)) {
- /*
- * We may race here with execlists resubmitting this request
- * as we retire it. The resubmission will move the ring->tail
- * forwards (to request->wa_tail). We either read the
- * current value that was written to hw, or the value that
- * is just about to be. Either works, if we miss the last two
- * noops - they are safe to be replayed on a reset.
- */
- tail = READ_ONCE(request->tail);
- list_del(&ring->active_link);
- } else {
- tail = request->postfix;
- }
- list_del_init(&request->ring_link);
+ INIT_LIST_HEAD(&rq->execute_cb);
+}
+
+static inline void
+remove_from_client(struct i915_request *request)
+{
+ struct drm_i915_file_private *file_priv;
+
+ if (!READ_ONCE(request->file_priv))
+ return;
- ring->head = tail;
+ rcu_read_lock();
+ file_priv = xchg(&request->file_priv, NULL);
+ if (file_priv) {
+ spin_lock(&file_priv->mm.lock);
+ list_del(&request->client_link);
+ spin_unlock(&file_priv->mm.lock);
+ }
+ rcu_read_unlock();
}
static void free_capture_list(struct i915_request *request)
{
struct i915_capture_list *capture;
- capture = request->capture_list;
+ capture = fetch_and_zero(&request->capture_list);
while (capture) {
struct i915_capture_list *next = capture->next;
@@ -183,79 +203,83 @@ static void free_capture_list(struct i915_request *request)
}
}
-static bool i915_request_retire(struct i915_request *rq)
+static void remove_from_engine(struct i915_request *rq)
{
- struct i915_active_request *active, *next;
+ struct intel_engine_cs *engine, *locked;
+
+ /*
+ * Virtual engines complicate acquiring the engine timeline lock,
+ * as their rq->engine pointer is not stable until under that
+ * engine lock. The simple ploy we use is to take the lock then
+ * check that the rq still belongs to the newly locked engine.
+ */
+ locked = READ_ONCE(rq->engine);
+ spin_lock_irq(&locked->active.lock);
+ while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
+ spin_unlock(&locked->active.lock);
+ spin_lock(&engine->active.lock);
+ locked = engine;
+ }
+ list_del_init(&rq->sched.link);
+ clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+ clear_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
+ spin_unlock_irq(&locked->active.lock);
+}
- lockdep_assert_held(&rq->i915->drm.struct_mutex);
+bool i915_request_retire(struct i915_request *rq)
+{
if (!i915_request_completed(rq))
return false;
- GEM_TRACE("%s fence %llx:%lld, current %d\n",
- rq->engine->name,
- rq->fence.context, rq->fence.seqno,
- hwsp_seqno(rq));
+ RQ_TRACE(rq, "\n");
GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
trace_i915_request_retire(rq);
- advance_ring(rq);
-
/*
- * Walk through the active list, calling retire on each. This allows
- * objects to track their GPU activity and mark themselves as idle
- * when their *last* active request is completed (updating state
- * tracking lists for eviction, active references for GEM, etc).
+ * We know the GPU must have read the request to have
+ * sent us the seqno + interrupt, so use the position
+ * of tail of the request to update the last known position
+ * of the GPU head.
*
- * As the ->retire() may free the node, we decouple it first and
- * pass along the auxiliary information (to avoid dereferencing
- * the node after the callback).
+ * Note this requires that we are always called in request
+ * completion order.
*/
- list_for_each_entry_safe(active, next, &rq->active_list, link) {
- /*
- * In microbenchmarks or focusing upon time inside the kernel,
- * we may spend an inordinate amount of time simply handling
- * the retirement of requests and processing their callbacks.
- * Of which, this loop itself is particularly hot due to the
- * cache misses when jumping around the list of
- * i915_active_request. So we try to keep this loop as
- * streamlined as possible and also prefetch the next
- * i915_active_request to try and hide the likely cache miss.
- */
- prefetchw(next);
-
- INIT_LIST_HEAD(&active->link);
- RCU_INIT_POINTER(active->request, NULL);
-
- active->retire(active, rq);
- }
-
- local_irq_disable();
+ GEM_BUG_ON(!list_is_first(&rq->link,
+ &i915_request_timeline(rq)->requests));
+ rq->ring->head = rq->postfix;
- spin_lock(&rq->engine->active.lock);
- list_del(&rq->sched.link);
- spin_unlock(&rq->engine->active.lock);
+ /*
+ * We only loosely track inflight requests across preemption,
+ * and so we may find ourselves attempting to retire a _completed_
+ * request that we have removed from the HW and put back on a run
+ * queue.
+ */
+ remove_from_engine(rq);
- spin_lock(&rq->lock);
+ spin_lock_irq(&rq->lock);
i915_request_mark_complete(rq);
if (!i915_request_signaled(rq))
dma_fence_signal_locked(&rq->fence);
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags))
i915_request_cancel_breadcrumb(rq);
- if (rq->waitboost) {
- GEM_BUG_ON(!atomic_read(&rq->i915->gt_pm.rps.num_waiters));
- atomic_dec(&rq->i915->gt_pm.rps.num_waiters);
+ if (i915_request_has_waitboost(rq)) {
+ GEM_BUG_ON(!atomic_read(&rq->engine->gt->rps.num_waiters));
+ atomic_dec(&rq->engine->gt->rps.num_waiters);
}
- spin_unlock(&rq->lock);
-
- local_irq_enable();
-
- intel_context_exit(rq->hw_context);
- intel_context_unpin(rq->hw_context);
+ if (!test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) {
+ set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
+ __notify_execute_cb(rq);
+ }
+ GEM_BUG_ON(!list_empty(&rq->execute_cb));
+ spin_unlock_irq(&rq->lock);
- i915_request_remove_from_client(rq);
+ remove_from_client(rq);
list_del(&rq->link);
+ intel_context_exit(rq->context);
+ intel_context_unpin(rq->context);
+
free_capture_list(rq);
i915_sched_node_fini(&rq->sched);
i915_request_put(rq);
@@ -265,76 +289,24 @@ static bool i915_request_retire(struct i915_request *rq)
void i915_request_retire_upto(struct i915_request *rq)
{
- struct intel_ring *ring = rq->ring;
+ struct intel_timeline * const tl = i915_request_timeline(rq);
struct i915_request *tmp;
- GEM_TRACE("%s fence %llx:%lld, current %d\n",
- rq->engine->name,
- rq->fence.context, rq->fence.seqno,
- hwsp_seqno(rq));
+ RQ_TRACE(rq, "\n");
- lockdep_assert_held(&rq->i915->drm.struct_mutex);
GEM_BUG_ON(!i915_request_completed(rq));
- if (list_empty(&rq->ring_link))
- return;
-
do {
- tmp = list_first_entry(&ring->request_list,
- typeof(*tmp), ring_link);
+ tmp = list_first_entry(&tl->requests, typeof(*tmp), link);
} while (i915_request_retire(tmp) && tmp != rq);
}
-static void irq_execute_cb(struct irq_work *wrk)
-{
- struct execute_cb *cb = container_of(wrk, typeof(*cb), work);
-
- i915_sw_fence_complete(cb->fence);
- kmem_cache_free(global.slab_execute_cbs, cb);
-}
-
-static void irq_execute_cb_hook(struct irq_work *wrk)
-{
- struct execute_cb *cb = container_of(wrk, typeof(*cb), work);
-
- cb->hook(container_of(cb->fence, struct i915_request, submit),
- &cb->signal->fence);
- i915_request_put(cb->signal);
-
- irq_execute_cb(wrk);
-}
-
-static void __notify_execute_cb(struct i915_request *rq)
-{
- struct execute_cb *cb;
-
- lockdep_assert_held(&rq->lock);
-
- if (list_empty(&rq->execute_cb))
- return;
-
- list_for_each_entry(cb, &rq->execute_cb, link)
- irq_work_queue(&cb->work);
-
- /*
- * XXX Rollback on __i915_request_unsubmit()
- *
- * In the future, perhaps when we have an active time-slicing scheduler,
- * it will be interesting to unsubmit parallel execution and remove
- * busywaits from the GPU until their master is restarted. This is
- * quite hairy, we have to carefully rollback the fence and do a
- * preempt-to-idle cycle on the target engine, all the while the
- * master execute_cb may refire.
- */
- INIT_LIST_HEAD(&rq->execute_cb);
-}
-
static int
-__i915_request_await_execution(struct i915_request *rq,
- struct i915_request *signal,
- void (*hook)(struct i915_request *rq,
- struct dma_fence *signal),
- gfp_t gfp)
+__await_execution(struct i915_request *rq,
+ struct i915_request *signal,
+ void (*hook)(struct i915_request *rq,
+ struct dma_fence *signal),
+ gfp_t gfp)
{
struct execute_cb *cb;
@@ -371,22 +343,41 @@ __i915_request_await_execution(struct i915_request *rq,
}
spin_unlock_irq(&signal->lock);
+ /* Copy across semaphore status as we need the same behaviour */
+ rq->sched.flags |= signal->sched.flags;
return 0;
}
-void __i915_request_submit(struct i915_request *request)
+bool __i915_request_submit(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
+ bool result = false;
- GEM_TRACE("%s fence %llx:%lld, current %d\n",
- engine->name,
- request->fence.context, request->fence.seqno,
- hwsp_seqno(request));
+ RQ_TRACE(request, "\n");
GEM_BUG_ON(!irqs_disabled());
lockdep_assert_held(&engine->active.lock);
- if (i915_gem_context_is_banned(request->gem_context))
+ /*
+ * With the advent of preempt-to-busy, we frequently encounter
+ * requests that we have unsubmitted from HW, but left running
+ * until the next ack and so have completed in the meantime. On
+ * resubmission of that completed request, we can skip
+ * updating the payload, and execlists can even skip submitting
+ * the request.
+ *
+ * We must remove the request from the caller's priority queue,
+ * and the caller must only call us when the request is in their
+ * priority queue, under the active.lock. This ensures that the
+ * request has *not* yet been retired and we can safely move
+ * the request into the engine->active.list where it will be
+ * dropped upon retiring. (Otherwise if resubmit a *retired*
+ * request, this would be a horrible use-after-free.)
+ */
+ if (i915_request_completed(request))
+ goto xfer;
+
+ if (intel_context_is_banned(request->context))
i915_request_skip(request, -EIO);
/*
@@ -409,29 +400,31 @@ void __i915_request_submit(struct i915_request *request)
i915_sw_fence_signaled(&request->semaphore))
engine->saturated |= request->sched.semaphores;
- /* We may be recursing from the signal callback of another i915 fence */
- spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
+ engine->emit_fini_breadcrumb(request,
+ request->ring->vaddr + request->postfix);
- list_move_tail(&request->sched.link, &engine->active.requests);
+ trace_i915_request_execute(request);
+ engine->serial++;
+ result = true;
- GEM_BUG_ON(test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
- set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
+xfer: /* We may be recursing from the signal callback of another i915 fence */
+ spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
+
+ if (!test_and_set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags)) {
+ list_move_tail(&request->sched.link, &engine->active.requests);
+ clear_bit(I915_FENCE_FLAG_PQUEUE, &request->fence.flags);
+ }
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) &&
!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags) &&
!i915_request_enable_breadcrumb(request))
- intel_engine_queue_breadcrumbs(engine);
+ intel_engine_signal_breadcrumbs(engine);
__notify_execute_cb(request);
spin_unlock(&request->lock);
- engine->emit_fini_breadcrumb(request,
- request->ring->vaddr + request->postfix);
-
- engine->serial++;
-
- trace_i915_request_execute(request);
+ return result;
}
void i915_request_submit(struct i915_request *request)
@@ -451,10 +444,7 @@ void __i915_request_unsubmit(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
- GEM_TRACE("%s fence %llx:%lld, current %d\n",
- engine->name,
- request->fence.context, request->fence.seqno,
- hwsp_seqno(request));
+ RQ_TRACE(request, "\n");
GEM_BUG_ON(!irqs_disabled());
lockdep_assert_held(&engine->active.lock);
@@ -512,6 +502,10 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
switch (state) {
case FENCE_COMPLETE:
trace_i915_request_submit(request);
+
+ if (unlikely(fence->error))
+ i915_request_skip(request, fence->error);
+
/*
* We need to serialize use of the submit_request() callback
* with its hotplugging performed during an emergency
@@ -552,29 +546,28 @@ semaphore_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
return NOTIFY_DONE;
}
-static void ring_retire_requests(struct intel_ring *ring)
+static void retire_requests(struct intel_timeline *tl)
{
struct i915_request *rq, *rn;
- list_for_each_entry_safe(rq, rn, &ring->request_list, ring_link)
+ list_for_each_entry_safe(rq, rn, &tl->requests, link)
if (!i915_request_retire(rq))
break;
}
static noinline struct i915_request *
-request_alloc_slow(struct intel_context *ce, gfp_t gfp)
+request_alloc_slow(struct intel_timeline *tl, gfp_t gfp)
{
- struct intel_ring *ring = ce->ring;
struct i915_request *rq;
- if (list_empty(&ring->request_list))
+ if (list_empty(&tl->requests))
goto out;
if (!gfpflags_allow_blocking(gfp))
goto out;
/* Move our oldest request to the slab-cache (if not in use!) */
- rq = list_first_entry(&ring->request_list, typeof(*rq), ring_link);
+ rq = list_first_entry(&tl->requests, typeof(*rq), link);
i915_request_retire(rq);
rq = kmem_cache_alloc(global.slab_requests,
@@ -583,20 +576,35 @@ request_alloc_slow(struct intel_context *ce, gfp_t gfp)
return rq;
/* Ratelimit ourselves to prevent oom from malicious clients */
- rq = list_last_entry(&ring->request_list, typeof(*rq), ring_link);
+ rq = list_last_entry(&tl->requests, typeof(*rq), link);
cond_synchronize_rcu(rq->rcustate);
/* Retire our old requests in the hope that we free some */
- ring_retire_requests(ring);
+ retire_requests(tl);
out:
return kmem_cache_alloc(global.slab_requests, gfp);
}
+static void __i915_request_ctor(void *arg)
+{
+ struct i915_request *rq = arg;
+
+ spin_lock_init(&rq->lock);
+ i915_sched_node_init(&rq->sched);
+ i915_sw_fence_init(&rq->submit, submit_notify);
+ i915_sw_fence_init(&rq->semaphore, semaphore_notify);
+
+ rq->file_priv = NULL;
+ rq->capture_list = NULL;
+
+ INIT_LIST_HEAD(&rq->execute_cb);
+}
+
struct i915_request *
__i915_request_create(struct intel_context *ce, gfp_t gfp)
{
- struct i915_timeline *tl = ce->ring->timeline;
+ struct intel_timeline *tl = ce->timeline;
struct i915_request *rq;
u32 seqno;
int ret;
@@ -638,46 +646,43 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
rq = kmem_cache_alloc(global.slab_requests,
gfp | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
if (unlikely(!rq)) {
- rq = request_alloc_slow(ce, gfp);
+ rq = request_alloc_slow(tl, gfp);
if (!rq) {
ret = -ENOMEM;
goto err_unreserve;
}
}
- ret = i915_timeline_get_seqno(tl, rq, &seqno);
+ ret = intel_timeline_get_seqno(tl, rq, &seqno);
if (ret)
goto err_free;
rq->i915 = ce->engine->i915;
- rq->hw_context = ce;
- rq->gem_context = ce->gem_context;
+ rq->context = ce;
rq->engine = ce->engine;
rq->ring = ce->ring;
- rq->timeline = tl;
+ rq->execution_mask = ce->engine->mask;
+
+ RCU_INIT_POINTER(rq->timeline, tl);
+ RCU_INIT_POINTER(rq->hwsp_cacheline, tl->hwsp_cacheline);
rq->hwsp_seqno = tl->hwsp_seqno;
- rq->hwsp_cacheline = tl->hwsp_cacheline;
+
rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */
- spin_lock_init(&rq->lock);
dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock,
tl->fence_context, seqno);
/* We bump the ref for the fence chain */
- i915_sw_fence_init(&i915_request_get(rq)->submit, submit_notify);
- i915_sw_fence_init(&i915_request_get(rq)->semaphore, semaphore_notify);
+ i915_sw_fence_reinit(&i915_request_get(rq)->submit);
+ i915_sw_fence_reinit(&i915_request_get(rq)->semaphore);
- i915_sched_node_init(&rq->sched);
+ i915_sched_node_reinit(&rq->sched);
- /* No zalloc, must clear what we need by hand */
- rq->file_priv = NULL;
+ /* No zalloc, everything must be cleared after use */
rq->batch = NULL;
- rq->capture_list = NULL;
- rq->waitboost = false;
- rq->execution_mask = ALL_ENGINES;
-
- INIT_LIST_HEAD(&rq->active_list);
- INIT_LIST_HEAD(&rq->execute_cb);
+ GEM_BUG_ON(rq->file_priv);
+ GEM_BUG_ON(rq->capture_list);
+ GEM_BUG_ON(!list_empty(&rq->execute_cb));
/*
* Reserve space in the ring buffer for all the commands required to
@@ -715,7 +720,6 @@ err_unwind:
ce->ring->emit = rq->head;
/* Make sure we didn't add ourselves to external state before freeing */
- GEM_BUG_ON(!list_empty(&rq->active_list));
GEM_BUG_ON(!list_empty(&rq->sched.signalers_list));
GEM_BUG_ON(!list_empty(&rq->sched.waiters_list));
@@ -730,15 +734,15 @@ struct i915_request *
i915_request_create(struct intel_context *ce)
{
struct i915_request *rq;
- int err;
+ struct intel_timeline *tl;
- err = intel_context_timeline_lock(ce);
- if (err)
- return ERR_PTR(err);
+ tl = intel_context_timeline_lock(ce);
+ if (IS_ERR(tl))
+ return ERR_CAST(tl);
/* Move our oldest request to the slab-cache (if not in use!) */
- rq = list_first_entry(&ce->ring->request_list, typeof(*rq), ring_link);
- if (!list_is_last(&rq->ring_link, &ce->ring->request_list))
+ rq = list_first_entry(&tl->requests, typeof(*rq), link);
+ if (!list_is_last(&rq->link, &tl->requests))
i915_request_retire(rq);
intel_context_enter(ce);
@@ -748,28 +752,58 @@ i915_request_create(struct intel_context *ce)
goto err_unlock;
/* Check that we do not interrupt ourselves with a new request */
- rq->cookie = lockdep_pin_lock(&ce->ring->timeline->mutex);
+ rq->cookie = lockdep_pin_lock(&tl->mutex);
return rq;
err_unlock:
- intel_context_timeline_unlock(ce);
+ intel_context_timeline_unlock(tl);
return rq;
}
static int
i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
{
- if (list_is_first(&signal->ring_link, &signal->ring->request_list))
- return 0;
+ struct dma_fence *fence;
+ int err;
+
+ GEM_BUG_ON(i915_request_timeline(rq) ==
+ rcu_access_pointer(signal->timeline));
+
+ fence = NULL;
+ rcu_read_lock();
+ spin_lock_irq(&signal->lock);
+ if (!i915_request_started(signal) &&
+ !list_is_first(&signal->link,
+ &rcu_dereference(signal->timeline)->requests)) {
+ struct i915_request *prev = list_prev_entry(signal, link);
- signal = list_prev_entry(signal, ring_link);
- if (i915_timeline_sync_is_later(rq->timeline, &signal->fence))
+ /*
+ * Peek at the request before us in the timeline. That
+ * request will only be valid before it is retired, so
+ * after acquiring a reference to it, confirm that it is
+ * still part of the signaler's timeline.
+ */
+ if (i915_request_get_rcu(prev)) {
+ if (list_next_entry(prev, link) == signal)
+ fence = &prev->fence;
+ else
+ i915_request_put(prev);
+ }
+ }
+ spin_unlock_irq(&signal->lock);
+ rcu_read_unlock();
+ if (!fence)
return 0;
- return i915_sw_fence_await_dma_fence(&rq->submit,
- &signal->fence, 0,
- I915_FENCE_GFP);
+ err = 0;
+ if (intel_timeline_sync_is_later(i915_request_timeline(rq), fence))
+ err = i915_sw_fence_await_dma_fence(&rq->submit,
+ fence, 0,
+ I915_FENCE_GFP);
+ dma_fence_put(fence);
+
+ return err;
}
static intel_engine_mask_t
@@ -791,38 +825,27 @@ already_busywaiting(struct i915_request *rq)
}
static int
-emit_semaphore_wait(struct i915_request *to,
- struct i915_request *from,
- gfp_t gfp)
+__emit_semaphore_wait(struct i915_request *to,
+ struct i915_request *from,
+ u32 seqno)
{
+ const int has_token = INTEL_GEN(to->i915) >= 12;
u32 hwsp_offset;
+ int len, err;
u32 *cs;
- int err;
- GEM_BUG_ON(!from->timeline->has_initial_breadcrumb);
GEM_BUG_ON(INTEL_GEN(to->i915) < 8);
- /* Just emit the first semaphore we see as request space is limited. */
- if (already_busywaiting(to) & from->engine->mask)
- return i915_sw_fence_await_dma_fence(&to->submit,
- &from->fence, 0,
- I915_FENCE_GFP);
-
- err = i915_request_await_start(to, from);
- if (err < 0)
- return err;
-
- /* Only submit our spinner after the signaler is running! */
- err = __i915_request_await_execution(to, from, NULL, gfp);
- if (err)
- return err;
-
/* We need to pin the signaler's HWSP until we are finished reading. */
- err = i915_timeline_read_hwsp(from, to, &hwsp_offset);
+ err = intel_timeline_read_hwsp(from, to, &hwsp_offset);
if (err)
return err;
- cs = intel_ring_begin(to, 4);
+ len = 4;
+ if (has_token)
+ len += 2;
+
+ cs = intel_ring_begin(to, len);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -834,18 +857,50 @@ emit_semaphore_wait(struct i915_request *to,
* (post-wrap) values than they were expecting (and so wait
* forever).
*/
- *cs++ = MI_SEMAPHORE_WAIT |
- MI_SEMAPHORE_GLOBAL_GTT |
- MI_SEMAPHORE_POLL |
- MI_SEMAPHORE_SAD_GTE_SDD;
- *cs++ = from->fence.seqno;
+ *cs++ = (MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_GTE_SDD) +
+ has_token;
+ *cs++ = seqno;
*cs++ = hwsp_offset;
*cs++ = 0;
+ if (has_token) {
+ *cs++ = 0;
+ *cs++ = MI_NOOP;
+ }
intel_ring_advance(to, cs);
+ return 0;
+}
+
+static int
+emit_semaphore_wait(struct i915_request *to,
+ struct i915_request *from,
+ gfp_t gfp)
+{
+ /* Just emit the first semaphore we see as request space is limited. */
+ if (already_busywaiting(to) & from->engine->mask)
+ goto await_fence;
+
+ if (i915_request_await_start(to, from) < 0)
+ goto await_fence;
+
+ /* Only submit our spinner after the signaler is running! */
+ if (__await_execution(to, from, NULL, gfp))
+ goto await_fence;
+
+ if (__emit_semaphore_wait(to, from, from->fence.seqno))
+ goto await_fence;
+
to->sched.semaphores |= from->engine->mask;
to->sched.flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
return 0;
+
+await_fence:
+ return i915_sw_fence_await_dma_fence(&to->submit,
+ &from->fence, 0,
+ I915_FENCE_GFP);
}
static int
@@ -865,18 +920,16 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from)
return ret;
}
- if (to->engine == from->engine) {
+ if (to->engine == from->engine)
ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
&from->submit,
I915_FENCE_GFP);
- } else if (intel_engine_has_semaphores(to->engine) &&
- to->gem_context->sched.priority >= I915_PRIORITY_NORMAL) {
+ else if (intel_context_use_semaphores(to->context))
ret = emit_semaphore_wait(to, from, I915_FENCE_GFP);
- } else {
+ else
ret = i915_sw_fence_await_dma_fence(&to->submit,
&from->fence, 0,
I915_FENCE_GFP);
- }
if (ret < 0)
return ret;
@@ -916,8 +969,10 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
do {
fence = *child++;
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
+ i915_sw_fence_set_error_once(&rq->submit, fence->error);
continue;
+ }
/*
* Requests on the same timeline are explicitly ordered, along
@@ -928,27 +983,80 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
continue;
/* Squash repeated waits to the same timelines */
- if (fence->context != rq->i915->mm.unordered_timeline &&
- i915_timeline_sync_is_later(rq->timeline, fence))
+ if (fence->context &&
+ intel_timeline_sync_is_later(i915_request_timeline(rq),
+ fence))
continue;
if (dma_fence_is_i915(fence))
ret = i915_request_await_request(rq, to_request(fence));
else
ret = i915_sw_fence_await_dma_fence(&rq->submit, fence,
- I915_FENCE_TIMEOUT,
+ fence->context ? I915_FENCE_TIMEOUT : 0,
I915_FENCE_GFP);
if (ret < 0)
return ret;
/* Record the latest fence used against each timeline */
- if (fence->context != rq->i915->mm.unordered_timeline)
- i915_timeline_sync_set(rq->timeline, fence);
+ if (fence->context)
+ intel_timeline_sync_set(i915_request_timeline(rq),
+ fence);
} while (--nchild);
return 0;
}
+static bool intel_timeline_sync_has_start(struct intel_timeline *tl,
+ struct dma_fence *fence)
+{
+ return __intel_timeline_sync_is_later(tl,
+ fence->context,
+ fence->seqno - 1);
+}
+
+static int intel_timeline_sync_set_start(struct intel_timeline *tl,
+ const struct dma_fence *fence)
+{
+ return __intel_timeline_sync_set(tl, fence->context, fence->seqno - 1);
+}
+
+static int
+__i915_request_await_execution(struct i915_request *to,
+ struct i915_request *from,
+ void (*hook)(struct i915_request *rq,
+ struct dma_fence *signal))
+{
+ int err;
+
+ /* Submit both requests at the same time */
+ err = __await_execution(to, from, hook, I915_FENCE_GFP);
+ if (err)
+ return err;
+
+ /* Squash repeated depenendices to the same timelines */
+ if (intel_timeline_sync_has_start(i915_request_timeline(to),
+ &from->fence))
+ return 0;
+
+ /* Ensure both start together [after all semaphores in signal] */
+ if (intel_engine_has_semaphores(to->engine))
+ err = __emit_semaphore_wait(to, from, from->fence.seqno - 1);
+ else
+ err = i915_request_await_start(to, from);
+ if (err < 0)
+ return err;
+
+ /* Couple the dependency tree for PI on this exposed to->fence */
+ if (to->engine->schedule) {
+ err = i915_sched_node_add_dependency(&to->sched, &from->sched);
+ if (err < 0)
+ return err;
+ }
+
+ return intel_timeline_sync_set_start(i915_request_timeline(to),
+ &from->fence);
+}
+
int
i915_request_await_execution(struct i915_request *rq,
struct dma_fence *fence,
@@ -971,8 +1079,10 @@ i915_request_await_execution(struct i915_request *rq,
do {
fence = *child++;
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
+ i915_sw_fence_set_error_once(&rq->submit, fence->error);
continue;
+ }
/*
* We don't squash repeated fence dependencies here as we
@@ -982,8 +1092,7 @@ i915_request_await_execution(struct i915_request *rq,
if (dma_fence_is_i915(fence))
ret = __i915_request_await_execution(rq,
to_request(fence),
- hook,
- I915_FENCE_GFP);
+ hook);
else
ret = i915_sw_fence_await_dma_fence(&rq->submit, fence,
I915_FENCE_TIMEOUT,
@@ -1027,7 +1136,7 @@ i915_request_await_object(struct i915_request *to,
struct dma_fence **shared;
unsigned int count, i;
- ret = reservation_object_get_fences_rcu(obj->base.resv,
+ ret = dma_resv_get_fences_rcu(obj->base.resv,
&excl, &count, &shared);
if (ret)
return ret;
@@ -1044,7 +1153,7 @@ i915_request_await_object(struct i915_request *to,
dma_fence_put(shared[i]);
kfree(shared);
} else {
- excl = reservation_object_get_excl_rcu(obj->base.resv);
+ excl = dma_resv_get_excl_rcu(obj->base.resv);
}
if (excl) {
@@ -1065,6 +1174,9 @@ void i915_request_skip(struct i915_request *rq, int error)
GEM_BUG_ON(!IS_ERR_VALUE((long)error));
dma_fence_set_error(&rq->fence, error);
+ if (rq->infix == rq->postfix)
+ return;
+
/*
* As this request likely depends on state from the lost
* context, clear out all the user operations leaving the
@@ -1076,12 +1188,13 @@ void i915_request_skip(struct i915_request *rq, int error)
head = 0;
}
memset(vaddr + head, 0, rq->postfix - head);
+ rq->infix = rq->postfix;
}
static struct i915_request *
__i915_request_add_to_timeline(struct i915_request *rq)
{
- struct i915_timeline *timeline = rq->timeline;
+ struct intel_timeline *timeline = i915_request_timeline(rq);
struct i915_request *prev;
/*
@@ -1104,7 +1217,8 @@ __i915_request_add_to_timeline(struct i915_request *rq)
* precludes optimising to use semaphores serialisation of a single
* timeline across engines.
*/
- prev = rcu_dereference_protected(timeline->last_request.request, 1);
+ prev = to_request(__i915_active_fence_set(&timeline->last_request,
+ &rq->fence));
if (prev && !i915_request_completed(prev)) {
if (is_power_of_2(prev->engine->mask | rq->engine->mask))
i915_sw_fence_await_sw_fence(&rq->submit,
@@ -1129,7 +1243,6 @@ __i915_request_add_to_timeline(struct i915_request *rq)
* us, the timeline will hold its seqno which is later than ours.
*/
GEM_BUG_ON(timeline->seqno != rq->fence.seqno);
- __i915_active_request_set(&timeline->last_request, rq);
return prev;
}
@@ -1143,11 +1256,9 @@ struct i915_request *__i915_request_commit(struct i915_request *rq)
{
struct intel_engine_cs *engine = rq->engine;
struct intel_ring *ring = rq->ring;
- struct i915_request *prev;
u32 *cs;
- GEM_TRACE("%s fence %llx:%lld\n",
- engine->name, rq->fence.context, rq->fence.seqno);
+ RQ_TRACE(rq, "\n");
/*
* To ensure that this call will not fail, space for its emissions
@@ -1156,6 +1267,7 @@ struct i915_request *__i915_request_commit(struct i915_request *rq)
*/
GEM_BUG_ON(rq->reserved_space > ring->space);
rq->reserved_space = 0;
+ rq->emitted_jiffies = jiffies;
/*
* Record the position of the start of the breadcrumb so that
@@ -1167,13 +1279,12 @@ struct i915_request *__i915_request_commit(struct i915_request *rq)
GEM_BUG_ON(IS_ERR(cs));
rq->postfix = intel_ring_offset(rq, cs);
- prev = __i915_request_add_to_timeline(rq);
-
- list_add_tail(&rq->ring_link, &ring->request_list);
- if (list_is_first(&rq->ring_link, &ring->request_list))
- list_add(&ring->active_link, &rq->i915->gt.active_rings);
- rq->emitted_jiffies = jiffies;
+ return __i915_request_add_to_timeline(rq);
+}
+void __i915_request_queue(struct i915_request *rq,
+ const struct i915_sched_attr *attr)
+{
/*
* Let the backend know a new request has arrived that may need
* to adjust the existing execution schedule due to a high priority
@@ -1185,56 +1296,56 @@ struct i915_request *__i915_request_commit(struct i915_request *rq)
* decide whether to preempt the entire chain so that it is ready to
* run at the earliest possible convenience.
*/
- local_bh_disable();
i915_sw_fence_commit(&rq->semaphore);
- rcu_read_lock(); /* RCU serialisation for set-wedged protection */
- if (engine->schedule) {
- struct i915_sched_attr attr = rq->gem_context->sched;
-
- /*
- * Boost actual workloads past semaphores!
- *
- * With semaphores we spin on one engine waiting for another,
- * simply to reduce the latency of starting our work when
- * the signaler completes. However, if there is any other
- * work that we could be doing on this engine instead, that
- * is better utilisation and will reduce the overall duration
- * of the current work. To avoid PI boosting a semaphore
- * far in the distance past over useful work, we keep a history
- * of any semaphore use along our dependency chain.
- */
- if (!(rq->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN))
- attr.priority |= I915_PRIORITY_NOSEMAPHORE;
-
- /*
- * Boost priorities to new clients (new request flows).
- *
- * Allow interactive/synchronous clients to jump ahead of
- * the bulk clients. (FQ_CODEL)
- */
- if (list_empty(&rq->sched.signalers_list))
- attr.priority |= I915_PRIORITY_WAIT;
-
- engine->schedule(rq, &attr);
- }
- rcu_read_unlock();
+ if (attr && rq->engine->schedule)
+ rq->engine->schedule(rq, attr);
i915_sw_fence_commit(&rq->submit);
- local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
-
- return prev;
}
void i915_request_add(struct i915_request *rq)
{
+ struct intel_timeline * const tl = i915_request_timeline(rq);
+ struct i915_sched_attr attr = {};
struct i915_request *prev;
- lockdep_assert_held(&rq->timeline->mutex);
- lockdep_unpin_lock(&rq->timeline->mutex, rq->cookie);
+ lockdep_assert_held(&tl->mutex);
+ lockdep_unpin_lock(&tl->mutex, rq->cookie);
trace_i915_request_add(rq);
prev = __i915_request_commit(rq);
+ if (rcu_access_pointer(rq->context->gem_context))
+ attr = i915_request_gem_context(rq)->sched;
+
+ /*
+ * Boost actual workloads past semaphores!
+ *
+ * With semaphores we spin on one engine waiting for another,
+ * simply to reduce the latency of starting our work when
+ * the signaler completes. However, if there is any other
+ * work that we could be doing on this engine instead, that
+ * is better utilisation and will reduce the overall duration
+ * of the current work. To avoid PI boosting a semaphore
+ * far in the distance past over useful work, we keep a history
+ * of any semaphore use along our dependency chain.
+ */
+ if (!(rq->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN))
+ attr.priority |= I915_PRIORITY_NOSEMAPHORE;
+
+ /*
+ * Boost priorities to new clients (new request flows).
+ *
+ * Allow interactive/synchronous clients to jump ahead of
+ * the bulk clients. (FQ_CODEL)
+ */
+ if (list_empty(&rq->sched.signalers_list))
+ attr.priority |= I915_PRIORITY_WAIT;
+
+ local_bh_disable();
+ __i915_request_queue(rq, &attr);
+ local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
+
/*
* In typical scenarios, we do not expect the previous request on
* the timeline to be still tracked by timeline->last_request if it
@@ -1252,10 +1363,12 @@ void i915_request_add(struct i915_request *rq)
* work on behalf of others -- but instead we should benefit from
* improved resource management. (Well, that's the theory at least.)
*/
- if (prev && i915_request_completed(prev))
+ if (prev &&
+ i915_request_completed(prev) &&
+ rcu_access_pointer(prev->timeline) == tl)
i915_request_retire_upto(prev);
- mutex_unlock(&rq->timeline->mutex);
+ mutex_unlock(&tl->mutex);
}
static unsigned long local_clock_us(unsigned int *cpu)
@@ -1390,8 +1503,7 @@ long i915_request_wait(struct i915_request *rq,
* serialise wait/reset with an explicit lock, we do want
* lockdep to detect potential dependency cycles.
*/
- mutex_acquire(&rq->i915->gpu_error.wedge_mutex.dep_map,
- 0, 0, _THIS_IP_);
+ mutex_acquire(&rq->engine->gt->reset.mutex.dep_map, 0, 0, _THIS_IP_);
/*
* Optimistic spin before touching IRQs.
@@ -1416,7 +1528,7 @@ long i915_request_wait(struct i915_request *rq,
* completion. That requires having a good predictor for the request
* duration, which we currently lack.
*/
- if (CONFIG_DRM_I915_SPIN_REQUEST &&
+ if (IS_ACTIVE(CONFIG_DRM_I915_SPIN_REQUEST) &&
__i915_spin_request(rq, state, CONFIG_DRM_I915_SPIN_REQUEST)) {
dma_fence_signal(&rq->fence);
goto out;
@@ -1436,7 +1548,7 @@ long i915_request_wait(struct i915_request *rq,
*/
if (flags & I915_WAIT_PRIORITY) {
if (!i915_request_started(rq) && INTEL_GEN(rq->i915) >= 6)
- gen6_rps_boost(rq);
+ intel_rps_boost(rq);
i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT);
}
@@ -1447,8 +1559,10 @@ long i915_request_wait(struct i915_request *rq,
for (;;) {
set_current_state(state);
- if (i915_request_completed(rq))
+ if (i915_request_completed(rq)) {
+ dma_fence_signal(&rq->fence);
break;
+ }
if (signal_pending_state(state, current)) {
timeout = -ERESTARTSYS;
@@ -1460,6 +1574,7 @@ long i915_request_wait(struct i915_request *rq,
break;
}
+ intel_engine_flush_submission(rq->engine);
timeout = io_schedule_timeout(timeout);
}
__set_current_state(TASK_RUNNING);
@@ -1467,27 +1582,11 @@ long i915_request_wait(struct i915_request *rq,
dma_fence_remove_callback(&rq->fence, &wait.cb);
out:
- mutex_release(&rq->i915->gpu_error.wedge_mutex.dep_map, 0, _THIS_IP_);
+ mutex_release(&rq->engine->gt->reset.mutex.dep_map, _THIS_IP_);
trace_i915_request_wait_end(rq);
return timeout;
}
-bool i915_retire_requests(struct drm_i915_private *i915)
-{
- struct intel_ring *ring, *tmp;
-
- lockdep_assert_held(&i915->drm.struct_mutex);
-
- list_for_each_entry_safe(ring, tmp,
- &i915->gt.active_rings, active_link) {
- intel_ring_get(ring); /* last rq holds reference! */
- ring_retire_requests(ring);
- intel_ring_put(ring);
- }
-
- return !list_empty(&i915->gt.active_rings);
-}
-
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_request.c"
#include "selftests/i915_request.c"
@@ -1514,10 +1613,14 @@ static struct i915_global_request global = { {
int __init i915_global_request_init(void)
{
- global.slab_requests = KMEM_CACHE(i915_request,
- SLAB_HWCACHE_ALIGN |
- SLAB_RECLAIM_ACCOUNT |
- SLAB_TYPESAFE_BY_RCU);
+ global.slab_requests =
+ kmem_cache_create("i915_request",
+ sizeof(struct i915_request),
+ __alignof__(struct i915_request),
+ SLAB_HWCACHE_ALIGN |
+ SLAB_RECLAIM_ACCOUNT |
+ SLAB_TYPESAFE_BY_RCU,
+ __i915_request_ctor);
if (!global.slab_requests)
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index edbbdfec24ab..f57eadcf3583 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -28,7 +28,10 @@
#include <linux/dma-fence.h>
#include <linux/lockdep.h>
+#include "gem/i915_gem_context_types.h"
+#include "gt/intel_context_types.h"
#include "gt/intel_engine_types.h"
+#include "gt/intel_timeline_types.h"
#include "i915_gem.h"
#include "i915_scheduler.h"
@@ -40,14 +43,19 @@
struct drm_file;
struct drm_i915_gem_object;
struct i915_request;
-struct i915_timeline;
-struct i915_timeline_cacheline;
struct i915_capture_list {
struct i915_capture_list *next;
struct i915_vma *vma;
};
+#define RQ_TRACE(rq, fmt, ...) do { \
+ const struct i915_request *rq__ = (rq); \
+ ENGINE_TRACE(rq__->engine, "fence %llx:%lld, current %d " fmt, \
+ rq__->fence.context, rq__->fence.seqno, \
+ hwsp_seqno(rq__), ##__VA_ARGS__); \
+} while (0)
+
enum {
/*
* I915_FENCE_FLAG_ACTIVE - this request is currently submitted to HW.
@@ -63,12 +71,63 @@ enum {
I915_FENCE_FLAG_ACTIVE = DMA_FENCE_FLAG_USER_BITS,
/*
+ * I915_FENCE_FLAG_PQUEUE - this request is ready for execution
+ *
+ * Using the scheduler, when a request is ready for execution it is put
+ * into the priority queue, and removed from that queue when transferred
+ * to the HW runlists. We want to track its membership within the
+ * priority queue so that we can easily check before rescheduling.
+ *
+ * See i915_request_in_priority_queue()
+ */
+ I915_FENCE_FLAG_PQUEUE,
+
+ /*
* I915_FENCE_FLAG_SIGNAL - this request is currently on signal_list
*
* Internal bookkeeping used by the breadcrumb code to track when
* a request is on the various signal_list.
*/
I915_FENCE_FLAG_SIGNAL,
+
+ /*
+ * I915_FENCE_FLAG_HOLD - this request is currently on hold
+ *
+ * This request has been suspended, pending an ongoing investigation.
+ */
+ I915_FENCE_FLAG_HOLD,
+
+ /*
+ * I915_FENCE_FLAG_NOPREEMPT - this request should not be preempted
+ *
+ * The execution of some requests should not be interrupted. This is
+ * a sensitive operation as it makes the request super important,
+ * blocking other higher priority work. Abuse of this flag will
+ * lead to quality of service issues.
+ */
+ I915_FENCE_FLAG_NOPREEMPT,
+
+ /*
+ * I915_FENCE_FLAG_SENTINEL - this request should be last in the queue
+ *
+ * A high priority sentinel request may be submitted to clear the
+ * submission queue. As it will be the only request in-flight, upon
+ * execution all other active requests will have been preempted and
+ * unsubmitted. This preemptive pulse is used to re-evaluate the
+ * in-flight requests, particularly in cases where an active context
+ * is banned and those active requests need to be cancelled.
+ */
+ I915_FENCE_FLAG_SENTINEL,
+
+ /*
+ * I915_FENCE_FLAG_BOOST - upclock the gpu for this request
+ *
+ * Some requests are more important than others! In particular, a
+ * request that the user is waiting on is typically required for
+ * interactive latency, for which we want to minimise by upclocking
+ * the GPU. Here we track such boost requests on a per-request basis.
+ */
+ I915_FENCE_FLAG_BOOST,
};
/**
@@ -108,11 +167,10 @@ struct i915_request {
* i915_request_free() will then decrement the refcount on the
* context.
*/
- struct i915_gem_context *gem_context;
struct intel_engine_cs *engine;
- struct intel_context *hw_context;
+ struct intel_context *context;
struct intel_ring *ring;
- struct i915_timeline *timeline;
+ struct intel_timeline __rcu *timeline;
struct list_head signal_link;
/*
@@ -143,6 +201,10 @@ struct i915_request {
union {
wait_queue_entry_t submitq;
struct i915_sw_dma_fence_cb dmaq;
+ struct i915_request_duration_cb {
+ struct dma_fence_cb cb;
+ ktime_t emitted;
+ } duration;
};
struct list_head execute_cb;
struct i915_sw_fence semaphore;
@@ -175,7 +237,7 @@ struct i915_request {
* inside the timeline's HWSP vma, but it is only valid while this
* request has not completed and guarded by the timeline mutex.
*/
- struct i915_timeline_cacheline *hwsp_cacheline;
+ struct intel_timeline_cacheline __rcu *hwsp_cacheline;
/** Position in the ring of the start of the request */
u32 head;
@@ -210,19 +272,13 @@ struct i915_request {
* on the active_list (of their final request).
*/
struct i915_capture_list *capture_list;
- struct list_head active_list;
/** Time at which this request was emitted, in jiffies. */
unsigned long emitted_jiffies;
- bool waitboost;
-
/** timeline->request entry for this request */
struct list_head link;
- /** ring->request_list entry for this request */
- struct list_head ring_link;
-
struct drm_i915_file_private *file_priv;
/** file_priv list entry for this request */
struct list_head client_link;
@@ -248,7 +304,10 @@ struct i915_request * __must_check
i915_request_create(struct intel_context *ce);
struct i915_request *__i915_request_commit(struct i915_request *request);
+void __i915_request_queue(struct i915_request *rq,
+ const struct i915_sched_attr *attr);
+bool i915_request_retire(struct i915_request *rq);
void i915_request_retire_upto(struct i915_request *rq);
static inline struct i915_request *
@@ -290,7 +349,7 @@ int i915_request_await_execution(struct i915_request *rq,
void i915_request_add(struct i915_request *rq);
-void __i915_request_submit(struct i915_request *request);
+bool __i915_request_submit(struct i915_request *request);
void i915_request_submit(struct i915_request *request);
void i915_request_skip(struct i915_request *request, int error);
@@ -307,10 +366,8 @@ long i915_request_wait(struct i915_request *rq,
long timeout)
__attribute__((nonnull(1)));
#define I915_WAIT_INTERRUPTIBLE BIT(0)
-#define I915_WAIT_LOCKED BIT(1) /* struct_mutex held, handle GPU reset */
-#define I915_WAIT_PRIORITY BIT(2) /* small priority bump for the request */
-#define I915_WAIT_ALL BIT(3) /* used by i915_gem_object_wait() */
-#define I915_WAIT_FOR_IDLE_BOOST BIT(4)
+#define I915_WAIT_PRIORITY BIT(1) /* small priority bump for the request */
+#define I915_WAIT_ALL BIT(2) /* used by i915_gem_object_wait() */
static inline bool i915_request_signaled(const struct i915_request *rq)
{
@@ -323,6 +380,11 @@ static inline bool i915_request_is_active(const struct i915_request *rq)
return test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
}
+static inline bool i915_request_in_priority_queue(const struct i915_request *rq)
+{
+ return test_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+}
+
/**
* Returns true if seq1 is later than seq2.
*/
@@ -416,6 +478,27 @@ static inline bool i915_request_is_running(const struct i915_request *rq)
return __i915_request_has_started(rq);
}
+/**
+ * i915_request_is_running - check if the request is ready for execution
+ * @rq: the request
+ *
+ * Upon construction, the request is instructed to wait upon various
+ * signals before it is ready to be executed by the HW. That is, we do
+ * not want to start execution and read data before it is written. In practice,
+ * this is controlled with a mixture of interrupts and semaphores. Once
+ * the submit fence is completed, the backend scheduler will place the
+ * request into its queue and from there submit it for execution. So we
+ * can detect when a request is eligible for execution (and is under control
+ * of the scheduler) by querying where it is in any of the scheduler's lists.
+ *
+ * Returns true if the request is ready for execution (it may be inflight),
+ * false otherwise.
+ */
+static inline bool i915_request_is_ready(const struct i915_request *rq)
+{
+ return !list_empty(&rq->sched.link);
+}
+
static inline bool i915_request_completed(const struct i915_request *rq)
{
if (i915_request_signaled(rq))
@@ -429,6 +512,62 @@ static inline void i915_request_mark_complete(struct i915_request *rq)
rq->hwsp_seqno = (u32 *)&rq->fence.seqno; /* decouple from HWSP */
}
-bool i915_retire_requests(struct drm_i915_private *i915);
+static inline bool i915_request_has_waitboost(const struct i915_request *rq)
+{
+ return test_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags);
+}
+
+static inline bool i915_request_has_nopreempt(const struct i915_request *rq)
+{
+ /* Preemption should only be disabled very rarely */
+ return unlikely(test_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags));
+}
+
+static inline bool i915_request_has_sentinel(const struct i915_request *rq)
+{
+ return unlikely(test_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags));
+}
+
+static inline bool i915_request_on_hold(const struct i915_request *rq)
+{
+ return unlikely(test_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags));
+}
+
+static inline void i915_request_set_hold(struct i915_request *rq)
+{
+ set_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
+}
+
+static inline void i915_request_clear_hold(struct i915_request *rq)
+{
+ clear_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
+}
+
+static inline struct intel_timeline *
+i915_request_timeline(struct i915_request *rq)
+{
+ /* Valid only while the request is being constructed (or retired). */
+ return rcu_dereference_protected(rq->timeline,
+ lockdep_is_held(&rcu_access_pointer(rq->timeline)->mutex));
+}
+
+static inline struct i915_gem_context *
+i915_request_gem_context(struct i915_request *rq)
+{
+ /* Valid only while the request is being constructed (or retired). */
+ return rcu_dereference_protected(rq->context->gem_context, true);
+}
+
+static inline struct intel_timeline *
+i915_request_active_timeline(struct i915_request *rq)
+{
+ /*
+ * When in use during submission, we are protected by a guarantee that
+ * the context/timeline is pinned and must remain pinned until after
+ * this submission.
+ */
+ return rcu_dereference_protected(rq->timeline,
+ lockdep_is_held(&rq->engine->active.lock));
+}
#endif /* I915_REQUEST_H */
diff --git a/drivers/gpu/drm/i915/i915_scatterlist.h b/drivers/gpu/drm/i915/i915_scatterlist.h
index 6617963df9ed..b7b59328cb76 100644
--- a/drivers/gpu/drm/i915/i915_scatterlist.h
+++ b/drivers/gpu/drm/i915/i915_scatterlist.h
@@ -67,15 +67,15 @@ static inline struct scatterlist *__sg_next(struct scatterlist *sg)
}
/**
- * __for_each_sgt_dma - iterate over the DMA addresses of the given sg_table
- * @__dmap: DMA address (output)
+ * __for_each_sgt_daddr - iterate over the device addresses of the given sg_table
+ * @__dp: Device address (output)
* @__iter: 'struct sgt_iter' (iterator state, internal)
* @__sgt: sg_table to iterate over (input)
* @__step: step size
*/
-#define __for_each_sgt_dma(__dmap, __iter, __sgt, __step) \
+#define __for_each_sgt_daddr(__dp, __iter, __sgt, __step) \
for ((__iter) = __sgt_iter((__sgt)->sgl, true); \
- ((__dmap) = (__iter).dma + (__iter).curr); \
+ ((__dp) = (__iter).dma + (__iter).curr), (__iter).sgp; \
(((__iter).curr += (__step)) >= (__iter).max) ? \
(__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0 : 0)
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 2e9b38bdc33c..5d96cfba40f8 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -177,22 +177,51 @@ static inline int rq_prio(const struct i915_request *rq)
return rq->sched.attr.priority | __NO_PREEMPTION;
}
-static void kick_submission(struct intel_engine_cs *engine, int prio)
+static inline bool need_preempt(int prio, int active)
{
- const struct i915_request *inflight =
- port_request(engine->execlists.port);
+ /*
+ * Allow preemption of low -> normal -> high, but we do
+ * not allow low priority tasks to preempt other low priority
+ * tasks under the impression that latency for low priority
+ * tasks does not matter (as much as background throughput),
+ * so kiss.
+ */
+ return prio >= max(I915_PRIORITY_NORMAL, active);
+}
+
+static void kick_submission(struct intel_engine_cs *engine,
+ const struct i915_request *rq,
+ int prio)
+{
+ const struct i915_request *inflight;
/*
- * If we are already the currently executing context, don't
- * bother evaluating if we should preempt ourselves, or if
- * we expect nothing to change as a result of running the
- * tasklet, i.e. we have not change the priority queue
- * sufficiently to oust the running context.
+ * We only need to kick the tasklet once for the high priority
+ * new context we add into the queue.
*/
- if (!inflight || !i915_scheduler_need_preempt(prio, rq_prio(inflight)))
+ if (prio <= engine->execlists.queue_priority_hint)
return;
- tasklet_hi_schedule(&engine->execlists.tasklet);
+ rcu_read_lock();
+
+ /* Nothing currently active? We're overdue for a submission! */
+ inflight = execlists_active(&engine->execlists);
+ if (!inflight)
+ goto unlock;
+
+ /*
+ * If we are already the currently executing context, don't
+ * bother evaluating if we should preempt ourselves.
+ */
+ if (inflight->context == rq->context)
+ goto unlock;
+
+ engine->execlists.queue_priority_hint = prio;
+ if (need_preempt(prio, rq_prio(inflight)))
+ tasklet_hi_schedule(&engine->execlists.tasklet);
+
+unlock:
+ rcu_read_unlock();
}
static void __i915_schedule(struct i915_sched_node *node,
@@ -297,20 +326,18 @@ static void __i915_schedule(struct i915_sched_node *node,
node->attr.priority = prio;
- if (list_empty(&node->link)) {
- /*
- * If the request is not in the priolist queue because
- * it is not yet runnable, then it doesn't contribute
- * to our preemption decisions. On the other hand,
- * if the request is on the HW, it too is not in the
- * queue; but in that case we may still need to reorder
- * the inflight requests.
- */
+ /*
+ * Once the request is ready, it will be placed into the
+ * priority lists and then onto the HW runlist. Before the
+ * request is ready, it does not contribute to our preemption
+ * decisions and we can safely ignore it, as it will, and
+ * any preemption required, be dealt with upon submission.
+ * See engine->submit_request()
+ */
+ if (list_empty(&node->link))
continue;
- }
- if (!intel_engine_is_virtual(engine) &&
- !i915_request_is_active(node_to_request(node))) {
+ if (i915_request_in_priority_queue(node_to_request(node))) {
if (!cache.priolist)
cache.priolist =
i915_sched_lookup_priolist(engine,
@@ -318,13 +345,8 @@ static void __i915_schedule(struct i915_sched_node *node,
list_move_tail(&node->link, cache.priolist);
}
- if (prio <= engine->execlists.queue_priority_hint)
- continue;
-
- engine->execlists.queue_priority_hint = prio;
-
/* Defer (tasklet) submission until after all of our updates. */
- kick_submission(engine, prio);
+ kick_submission(engine, node_to_request(node), prio);
}
spin_unlock(&engine->active.lock);
@@ -350,8 +372,7 @@ void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump)
unsigned long flags;
GEM_BUG_ON(bump & ~I915_PRIORITY_MASK);
-
- if (READ_ONCE(rq->sched.attr.priority) == I915_PRIORITY_INVALID)
+ if (READ_ONCE(rq->sched.attr.priority) & bump)
return;
spin_lock_irqsave(&schedule_lock, flags);
@@ -364,9 +385,19 @@ void i915_sched_node_init(struct i915_sched_node *node)
INIT_LIST_HEAD(&node->signalers_list);
INIT_LIST_HEAD(&node->waiters_list);
INIT_LIST_HEAD(&node->link);
+
+ i915_sched_node_reinit(node);
+}
+
+void i915_sched_node_reinit(struct i915_sched_node *node)
+{
node->attr.priority = I915_PRIORITY_INVALID;
node->semaphores = 0;
node->flags = 0;
+
+ GEM_BUG_ON(!list_empty(&node->signalers_list));
+ GEM_BUG_ON(!list_empty(&node->waiters_list));
+ GEM_BUG_ON(!list_empty(&node->link));
}
static struct i915_dependency *
@@ -395,6 +426,7 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
list_add(&dep->wait_link, &signal->waiters_list);
list_add(&dep->signal_link, &node->signalers_list);
dep->signaler = signal;
+ dep->waiter = node;
dep->flags = flags;
/* Keep track of whether anyone on this chain has a semaphore */
@@ -450,13 +482,13 @@ void i915_sched_node_fini(struct i915_sched_node *node)
* so we may be called out-of-order.
*/
list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
- GEM_BUG_ON(!node_signaled(dep->signaler));
GEM_BUG_ON(!list_empty(&dep->dfs_link));
list_del(&dep->wait_link);
if (dep->flags & I915_DEPENDENCY_ALLOC)
i915_dependency_free(dep);
}
+ INIT_LIST_HEAD(&node->signalers_list);
/* Remove ourselves from everyone who depends upon us */
list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) {
@@ -467,6 +499,7 @@ void i915_sched_node_fini(struct i915_sched_node *node)
if (dep->flags & I915_DEPENDENCY_ALLOC)
i915_dependency_free(dep);
}
+ INIT_LIST_HEAD(&node->waiters_list);
spin_unlock_irq(&schedule_lock);
}
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index 7eefccff39bf..d1dc4efef77b 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -26,6 +26,7 @@
sched.link)
void i915_sched_node_init(struct i915_sched_node *node);
+void i915_sched_node_reinit(struct i915_sched_node *node);
bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
struct i915_sched_node *signal,
@@ -52,22 +53,4 @@ static inline void i915_priolist_free(struct i915_priolist *p)
__i915_priolist_free(p);
}
-static inline bool i915_scheduler_need_preempt(int prio, int active)
-{
- /*
- * Allow preemption of low -> normal -> high, but we do
- * not allow low priority tasks to preempt other low priority
- * tasks under the impression that latency for low priority
- * tasks does not matter (as much as background throughput),
- * so kiss.
- *
- * More naturally we would write
- * prio >= max(0, last);
- * except that we wish to prevent triggering preemption at the same
- * priority level: the task that is running should remain running
- * to preserve FIFO ordering of dependencies.
- */
- return prio > max(I915_PRIORITY_NORMAL - 1, active);
-}
-
#endif /* _I915_SCHEDULER_H_ */
diff --git a/drivers/gpu/drm/i915/i915_scheduler_types.h b/drivers/gpu/drm/i915/i915_scheduler_types.h
index 3e309631bd0b..d18e70550054 100644
--- a/drivers/gpu/drm/i915/i915_scheduler_types.h
+++ b/drivers/gpu/drm/i915/i915_scheduler_types.h
@@ -49,6 +49,15 @@ struct i915_sched_attr {
* DAG of each request, we are able to insert it into a sorted queue when it
* is ready, and are able to reorder its portion of the graph to accommodate
* dynamic priority changes.
+ *
+ * Ok, there is now one active element to the "scheduler" in the backends.
+ * We let a new context run for a small amount of time before re-evaluating
+ * the run order. As we re-evaluate, we maintain the strict ordering of
+ * dependencies, but attempt to rotate the active contexts (the current context
+ * is put to the back of its priority queue, then reshuffling its dependents).
+ * This provides minimal timeslicing and prevents a userspace hog (e.g.
+ * something waiting on a user semaphore [VkEvent]) from denying service to
+ * others.
*/
struct i915_sched_node {
struct list_head signalers_list; /* those before us, we depend upon */
@@ -62,6 +71,7 @@ struct i915_sched_node {
struct i915_dependency {
struct i915_sched_node *signaler;
+ struct i915_sched_node *waiter;
struct list_head signal_link;
struct list_head wait_link;
struct list_head dfs_link;
diff --git a/drivers/gpu/drm/i915/i915_selftest.h b/drivers/gpu/drm/i915/i915_selftest.h
index 207e21b478f2..98bcb6fa0ab4 100644
--- a/drivers/gpu/drm/i915/i915_selftest.h
+++ b/drivers/gpu/drm/i915/i915_selftest.h
@@ -24,6 +24,8 @@
#ifndef __I915_SELFTEST_H__
#define __I915_SELFTEST_H__
+#include <linux/types.h>
+
struct pci_dev;
struct drm_i915_private;
@@ -34,6 +36,7 @@ struct i915_selftest {
char *filter;
int mock;
int live;
+ int perf;
};
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
@@ -43,6 +46,7 @@ extern struct i915_selftest i915_selftest;
int i915_mock_selftests(void);
int i915_live_selftests(struct pci_dev *pdev);
+int i915_perf_selftests(struct pci_dev *pdev);
/* We extract the function declarations from i915_mock_selftests.h and
* i915_live_selftests.h Add your unit test declarations there!
@@ -59,6 +63,7 @@ int i915_live_selftests(struct pci_dev *pdev);
#undef selftest
#define selftest(name, func) int func(struct drm_i915_private *i915);
#include "selftests/i915_live_selftests.h"
+#include "selftests/i915_perf_selftests.h"
#undef selftest
struct i915_subtest {
@@ -66,12 +71,37 @@ struct i915_subtest {
const char *name;
};
+int __i915_nop_setup(void *data);
+int __i915_nop_teardown(int err, void *data);
+
+int __i915_live_setup(void *data);
+int __i915_live_teardown(int err, void *data);
+
+int __intel_gt_live_setup(void *data);
+int __intel_gt_live_teardown(int err, void *data);
+
int __i915_subtests(const char *caller,
+ int (*setup)(void *data),
+ int (*teardown)(int err, void *data),
const struct i915_subtest *st,
unsigned int count,
void *data);
#define i915_subtests(T, data) \
- __i915_subtests(__func__, T, ARRAY_SIZE(T), data)
+ __i915_subtests(__func__, \
+ __i915_nop_setup, __i915_nop_teardown, \
+ T, ARRAY_SIZE(T), data)
+#define i915_live_subtests(T, data) ({ \
+ typecheck(struct drm_i915_private *, data); \
+ __i915_subtests(__func__, \
+ __i915_live_setup, __i915_live_teardown, \
+ T, ARRAY_SIZE(T), data); \
+})
+#define intel_gt_live_subtests(T, data) ({ \
+ typecheck(struct intel_gt *, data); \
+ __i915_subtests(__func__, \
+ __intel_gt_live_setup, __intel_gt_live_teardown, \
+ T, ARRAY_SIZE(T), data); \
+})
#define SUBTEST(x) { x, #x }
@@ -82,6 +112,7 @@ int __i915_subtests(const char *caller,
static inline int i915_mock_selftests(void) { return 0; }
static inline int i915_live_selftests(struct pci_dev *pdev) { return 0; }
+static inline int i915_perf_selftests(struct pci_dev *pdev) { return 0; }
#define I915_SELFTEST_DECLARE(x)
#define I915_SELFTEST_ONLY(x) 0
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index a08d7d16621b..8812cdd9007f 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -28,9 +28,11 @@
#include "display/intel_fbc.h"
#include "display/intel_gmbus.h"
+#include "display/intel_vga.h"
+#include "i915_drv.h"
#include "i915_reg.h"
-#include "intel_drv.h"
+#include "i915_suspend.h"
static void i915_save_display(struct drm_i915_private *dev_priv)
{
@@ -56,7 +58,7 @@ static void i915_restore_display(struct drm_i915_private *dev_priv)
if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv))
I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL);
- i915_redisable_vga(dev_priv);
+ intel_vga_redisable(dev_priv);
}
int i915_save_state(struct drm_i915_private *dev_priv)
@@ -64,8 +66,6 @@ int i915_save_state(struct drm_i915_private *dev_priv)
struct pci_dev *pdev = dev_priv->drm.pdev;
int i;
- mutex_lock(&dev_priv->drm.struct_mutex);
-
i915_save_display(dev_priv);
if (IS_GEN(dev_priv, 4))
@@ -99,8 +99,6 @@ int i915_save_state(struct drm_i915_private *dev_priv)
dev_priv->regfile.saveSWF3[i] = I915_READ(SWF3(i));
}
- mutex_unlock(&dev_priv->drm.struct_mutex);
-
return 0;
}
@@ -109,8 +107,6 @@ int i915_restore_state(struct drm_i915_private *dev_priv)
struct pci_dev *pdev = dev_priv->drm.pdev;
int i;
- mutex_lock(&dev_priv->drm.struct_mutex);
-
if (IS_GEN(dev_priv, 4))
pci_write_config_word(pdev, GCDGMBUS,
dev_priv->regfile.saveGCDGMBUS);
@@ -144,8 +140,6 @@ int i915_restore_state(struct drm_i915_private *dev_priv)
I915_WRITE(SWF3(i), dev_priv->regfile.saveSWF3[i]);
}
- mutex_unlock(&dev_priv->drm.struct_mutex);
-
intel_gmbus_reset(dev_priv);
return 0;
diff --git a/drivers/gpu/drm/i915/i915_suspend.h b/drivers/gpu/drm/i915/i915_suspend.h
new file mode 100644
index 000000000000..3a36fb4ecc05
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_suspend.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_SUSPEND_H__
+#define __I915_SUSPEND_H__
+
+struct drm_i915_private;
+
+int i915_save_state(struct drm_i915_private *i915);
+int i915_restore_state(struct drm_i915_private *i915);
+
+#endif /* __I915_SUSPEND_H__ */
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 5387aafd3424..51ba97daf2a0 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -7,11 +7,17 @@
#include <linux/slab.h>
#include <linux/dma-fence.h>
#include <linux/irq_work.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
#include "i915_sw_fence.h"
#include "i915_selftest.h"
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
+#define I915_SW_FENCE_BUG_ON(expr) BUG_ON(expr)
+#else
+#define I915_SW_FENCE_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
+#endif
+
#define I915_SW_FENCE_FLAG_ALLOC BIT(3) /* after WQ_FLAG_* for safety */
static DEFINE_SPINLOCK(i915_sw_fence_lock);
@@ -157,8 +163,11 @@ static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence,
LIST_HEAD(extra);
do {
- list_for_each_entry_safe(pos, next, &x->head, entry)
- pos->func(pos, TASK_NORMAL, 0, &extra);
+ list_for_each_entry_safe(pos, next, &x->head, entry) {
+ pos->func(pos,
+ TASK_NORMAL, fence->error,
+ &extra);
+ }
if (list_empty(&extra))
break;
@@ -215,11 +224,21 @@ void __i915_sw_fence_init(struct i915_sw_fence *fence,
{
BUG_ON(!fn || (unsigned long)fn & ~I915_SW_FENCE_MASK);
+ __init_waitqueue_head(&fence->wait, name, key);
+ fence->flags = (unsigned long)fn;
+
+ i915_sw_fence_reinit(fence);
+}
+
+void i915_sw_fence_reinit(struct i915_sw_fence *fence)
+{
debug_fence_init(fence);
- __init_waitqueue_head(&fence->wait, name, key);
atomic_set(&fence->pending, 1);
- fence->flags = (unsigned long)fn;
+ fence->error = 0;
+
+ I915_SW_FENCE_BUG_ON(!fence->flags);
+ I915_SW_FENCE_BUG_ON(!list_empty(&fence->wait.head));
}
void i915_sw_fence_commit(struct i915_sw_fence *fence)
@@ -230,6 +249,8 @@ void i915_sw_fence_commit(struct i915_sw_fence *fence)
static int i915_sw_fence_wake(wait_queue_entry_t *wq, unsigned mode, int flags, void *key)
{
+ i915_sw_fence_set_error_once(wq->private, flags);
+
list_del(&wq->entry);
__i915_sw_fence_complete(wq->private, key);
@@ -302,8 +323,10 @@ static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
debug_fence_assert(fence);
might_sleep_if(gfpflags_allow_blocking(gfp));
- if (i915_sw_fence_done(signaler))
+ if (i915_sw_fence_done(signaler)) {
+ i915_sw_fence_set_error_once(fence, signaler->error);
return 0;
+ }
debug_fence_assert(signaler);
@@ -319,6 +342,7 @@ static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
return -ENOMEM;
i915_sw_fence_wait(signaler);
+ i915_sw_fence_set_error_once(fence, signaler->error);
return 0;
}
@@ -337,7 +361,7 @@ static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
__add_wait_queue_entry_tail(&signaler->wait, wq);
pending = 1;
} else {
- i915_sw_fence_wake(wq, 0, 0, NULL);
+ i915_sw_fence_wake(wq, 0, signaler->error, NULL);
pending = 0;
}
spin_unlock_irqrestore(&signaler->wait.lock, flags);
@@ -372,6 +396,7 @@ static void dma_i915_sw_fence_wake(struct dma_fence *dma,
{
struct i915_sw_dma_fence_cb *cb = container_of(data, typeof(*cb), base);
+ i915_sw_fence_set_error_once(cb->fence, dma->error);
i915_sw_fence_complete(cb->fence);
kfree(cb);
}
@@ -391,6 +416,7 @@ static void timer_i915_sw_fence_wake(struct timer_list *t)
cb->dma->seqno,
i915_sw_fence_debug_hint(fence));
+ i915_sw_fence_set_error_once(fence, -ETIMEDOUT);
i915_sw_fence_complete(fence);
}
@@ -402,8 +428,10 @@ static void dma_i915_sw_fence_wake_timer(struct dma_fence *dma,
struct i915_sw_fence *fence;
fence = xchg(&cb->base.fence, NULL);
- if (fence)
+ if (fence) {
+ i915_sw_fence_set_error_once(fence, dma->error);
i915_sw_fence_complete(fence);
+ }
irq_work_queue(&cb->work);
}
@@ -431,8 +459,10 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
debug_fence_assert(fence);
might_sleep_if(gfpflags_allow_blocking(gfp));
- if (dma_fence_is_signaled(dma))
+ if (dma_fence_is_signaled(dma)) {
+ i915_sw_fence_set_error_once(fence, dma->error);
return 0;
+ }
cb = kmalloc(timeout ?
sizeof(struct i915_sw_dma_fence_cb_timer) :
@@ -442,7 +472,12 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
if (!gfpflags_allow_blocking(gfp))
return -ENOMEM;
- return dma_fence_wait(dma, false);
+ ret = dma_fence_wait(dma, false);
+ if (ret)
+ return ret;
+
+ i915_sw_fence_set_error_once(fence, dma->error);
+ return 0;
}
cb->fence = fence;
@@ -480,6 +515,7 @@ static void __dma_i915_sw_fence_wake(struct dma_fence *dma,
{
struct i915_sw_dma_fence_cb *cb = container_of(data, typeof(*cb), base);
+ i915_sw_fence_set_error_once(cb->fence, dma->error);
i915_sw_fence_complete(cb->fence);
}
@@ -491,8 +527,10 @@ int __i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
debug_fence_assert(fence);
- if (dma_fence_is_signaled(dma))
+ if (dma_fence_is_signaled(dma)) {
+ i915_sw_fence_set_error_once(fence, dma->error);
return 0;
+ }
cb->fence = fence;
i915_sw_fence_await(fence);
@@ -501,7 +539,7 @@ int __i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
if (ret == 0) {
ret = 1;
} else {
- i915_sw_fence_complete(fence);
+ __dma_i915_sw_fence_wake(dma, &cb->base);
if (ret == -ENOENT) /* fence already signaled */
ret = 0;
}
@@ -510,7 +548,7 @@ int __i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
}
int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
- struct reservation_object *resv,
+ struct dma_resv *resv,
const struct dma_fence_ops *exclude,
bool write,
unsigned long timeout,
@@ -526,8 +564,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
struct dma_fence **shared;
unsigned int count, i;
- ret = reservation_object_get_fences_rcu(resv,
- &excl, &count, &shared);
+ ret = dma_resv_get_fences_rcu(resv, &excl, &count, &shared);
if (ret)
return ret;
@@ -551,7 +588,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
dma_fence_put(shared[i]);
kfree(shared);
} else {
- excl = reservation_object_get_excl_rcu(resv);
+ excl = dma_resv_get_excl_rcu(resv);
}
if (ret >= 0 && excl && excl->ops != exclude) {
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h
index 9cb5c3b307a6..19e806ce43bc 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.h
+++ b/drivers/gpu/drm/i915/i915_sw_fence.h
@@ -16,12 +16,13 @@
#include <linux/wait.h>
struct completion;
-struct reservation_object;
+struct dma_resv;
struct i915_sw_fence {
wait_queue_head_t wait;
unsigned long flags;
atomic_t pending;
+ int error;
};
#define I915_SW_FENCE_CHECKED_BIT 0 /* used internally for DAG checking */
@@ -53,6 +54,8 @@ do { \
__i915_sw_fence_init((fence), (fn), NULL, NULL)
#endif
+void i915_sw_fence_reinit(struct i915_sw_fence *fence);
+
#ifdef CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS
void i915_sw_fence_fini(struct i915_sw_fence *fence);
#else
@@ -82,7 +85,7 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
gfp_t gfp);
int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
- struct reservation_object *resv,
+ struct dma_resv *resv,
const struct dma_fence_ops *exclude,
bool write,
unsigned long timeout,
@@ -106,4 +109,11 @@ static inline void i915_sw_fence_wait(struct i915_sw_fence *fence)
wait_event(fence->wait, i915_sw_fence_done(fence));
}
+static inline void
+i915_sw_fence_set_error_once(struct i915_sw_fence *fence, int error)
+{
+ if (unlikely(error))
+ cmpxchg(&fence->error, 0, error);
+}
+
#endif /* _I915_SW_FENCE_H_ */
diff --git a/drivers/gpu/drm/i915/i915_sw_fence_work.c b/drivers/gpu/drm/i915/i915_sw_fence_work.c
new file mode 100644
index 000000000000..997b2998f1f2
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_sw_fence_work.c
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: MIT
+
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_sw_fence_work.h"
+
+static void fence_complete(struct dma_fence_work *f)
+{
+ if (f->ops->release)
+ f->ops->release(f);
+ dma_fence_signal(&f->dma);
+}
+
+static void fence_work(struct work_struct *work)
+{
+ struct dma_fence_work *f = container_of(work, typeof(*f), work);
+ int err;
+
+ err = f->ops->work(f);
+ if (err)
+ dma_fence_set_error(&f->dma, err);
+
+ fence_complete(f);
+ dma_fence_put(&f->dma);
+}
+
+static int __i915_sw_fence_call
+fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
+{
+ struct dma_fence_work *f = container_of(fence, typeof(*f), chain);
+
+ switch (state) {
+ case FENCE_COMPLETE:
+ if (fence->error)
+ dma_fence_set_error(&f->dma, fence->error);
+
+ if (!f->dma.error) {
+ dma_fence_get(&f->dma);
+ queue_work(system_unbound_wq, &f->work);
+ } else {
+ fence_complete(f);
+ }
+ break;
+
+ case FENCE_FREE:
+ dma_fence_put(&f->dma);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static const char *get_driver_name(struct dma_fence *fence)
+{
+ return "dma-fence";
+}
+
+static const char *get_timeline_name(struct dma_fence *fence)
+{
+ struct dma_fence_work *f = container_of(fence, typeof(*f), dma);
+
+ return f->ops->name ?: "work";
+}
+
+static void fence_release(struct dma_fence *fence)
+{
+ struct dma_fence_work *f = container_of(fence, typeof(*f), dma);
+
+ i915_sw_fence_fini(&f->chain);
+
+ BUILD_BUG_ON(offsetof(typeof(*f), dma));
+ dma_fence_free(&f->dma);
+}
+
+static const struct dma_fence_ops fence_ops = {
+ .get_driver_name = get_driver_name,
+ .get_timeline_name = get_timeline_name,
+ .release = fence_release,
+};
+
+void dma_fence_work_init(struct dma_fence_work *f,
+ const struct dma_fence_work_ops *ops)
+{
+ f->ops = ops;
+ spin_lock_init(&f->lock);
+ dma_fence_init(&f->dma, &fence_ops, &f->lock, 0, 0);
+ i915_sw_fence_init(&f->chain, fence_notify);
+ INIT_WORK(&f->work, fence_work);
+}
+
+int dma_fence_work_chain(struct dma_fence_work *f, struct dma_fence *signal)
+{
+ if (!signal)
+ return 0;
+
+ return __i915_sw_fence_await_dma_fence(&f->chain, signal, &f->cb);
+}
diff --git a/drivers/gpu/drm/i915/i915_sw_fence_work.h b/drivers/gpu/drm/i915/i915_sw_fence_work.h
new file mode 100644
index 000000000000..3a22b287e201
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_sw_fence_work.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: MIT */
+
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef I915_SW_FENCE_WORK_H
+#define I915_SW_FENCE_WORK_H
+
+#include <linux/dma-fence.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+#include "i915_sw_fence.h"
+
+struct dma_fence_work;
+
+struct dma_fence_work_ops {
+ const char *name;
+ int (*work)(struct dma_fence_work *f);
+ void (*release)(struct dma_fence_work *f);
+};
+
+struct dma_fence_work {
+ struct dma_fence dma;
+ spinlock_t lock;
+
+ struct i915_sw_fence chain;
+ struct i915_sw_dma_fence_cb cb;
+
+ struct work_struct work;
+ const struct dma_fence_work_ops *ops;
+};
+
+void dma_fence_work_init(struct dma_fence_work *f,
+ const struct dma_fence_work_ops *ops);
+int dma_fence_work_chain(struct dma_fence_work *f, struct dma_fence *signal);
+
+static inline void dma_fence_work_commit(struct dma_fence_work *f)
+{
+ i915_sw_fence_commit(&f->chain);
+}
+
+#endif /* I915_SW_FENCE_WORK_H */
diff --git a/drivers/gpu/drm/i915/i915_switcheroo.c b/drivers/gpu/drm/i915/i915_switcheroo.c
new file mode 100644
index 000000000000..39c79e1c5b52
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_switcheroo.c
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/vga_switcheroo.h>
+
+#include "i915_drv.h"
+#include "i915_switcheroo.h"
+
+static void i915_switcheroo_set_state(struct pci_dev *pdev,
+ enum vga_switcheroo_state state)
+{
+ struct drm_i915_private *i915 = pdev_to_i915(pdev);
+ pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
+
+ if (!i915) {
+ dev_err(&pdev->dev, "DRM not initialized, aborting switch.\n");
+ return;
+ }
+
+ if (state == VGA_SWITCHEROO_ON) {
+ pr_info("switched on\n");
+ i915->drm.switch_power_state = DRM_SWITCH_POWER_CHANGING;
+ /* i915 resume handler doesn't set to D0 */
+ pci_set_power_state(pdev, PCI_D0);
+ i915_resume_switcheroo(i915);
+ i915->drm.switch_power_state = DRM_SWITCH_POWER_ON;
+ } else {
+ pr_info("switched off\n");
+ i915->drm.switch_power_state = DRM_SWITCH_POWER_CHANGING;
+ i915_suspend_switcheroo(i915, pmm);
+ i915->drm.switch_power_state = DRM_SWITCH_POWER_OFF;
+ }
+}
+
+static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
+{
+ struct drm_i915_private *i915 = pdev_to_i915(pdev);
+
+ /*
+ * FIXME: open_count is protected by drm_global_mutex but that would lead to
+ * locking inversion with the driver load path. And the access here is
+ * completely racy anyway. So don't bother with locking for now.
+ */
+ return i915 && i915->drm.open_count == 0;
+}
+
+static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
+ .set_gpu_state = i915_switcheroo_set_state,
+ .reprobe = NULL,
+ .can_switch = i915_switcheroo_can_switch,
+};
+
+int i915_switcheroo_register(struct drm_i915_private *i915)
+{
+ struct pci_dev *pdev = i915->drm.pdev;
+
+ return vga_switcheroo_register_client(pdev, &i915_switcheroo_ops, false);
+}
+
+void i915_switcheroo_unregister(struct drm_i915_private *i915)
+{
+ struct pci_dev *pdev = i915->drm.pdev;
+
+ vga_switcheroo_unregister_client(pdev);
+}
diff --git a/drivers/gpu/drm/i915/i915_switcheroo.h b/drivers/gpu/drm/i915/i915_switcheroo.h
new file mode 100644
index 000000000000..59b6c1e07d75
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_switcheroo.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_SWITCHEROO__
+#define __I915_SWITCHEROO__
+
+struct drm_i915_private;
+
+int i915_switcheroo_register(struct drm_i915_private *i915);
+void i915_switcheroo_unregister(struct drm_i915_private *i915);
+
+#endif /* __I915_SWITCHEROO__ */
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index ecac1c386109..0cef3130db05 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -30,8 +30,11 @@
#include <linux/stat.h>
#include <linux/sysfs.h>
+#include "gt/intel_rc6.h"
+#include "gt/intel_rps.h"
+
#include "i915_drv.h"
-#include "intel_drv.h"
+#include "i915_sysfs.h"
#include "intel_pm.h"
#include "intel_sideband.h"
@@ -49,7 +52,7 @@ static u32 calc_residency(struct drm_i915_private *dev_priv,
u64 res = 0;
with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
- res = intel_rc6_residency_us(dev_priv, reg);
+ res = intel_rc6_residency_us(&dev_priv->gt.rc6, reg);
return DIV_ROUND_CLOSEST_ULL(res, 1000);
}
@@ -142,12 +145,12 @@ static const struct attribute_group media_rc6_attr_group = {
};
#endif
-static int l3_access_valid(struct drm_i915_private *dev_priv, loff_t offset)
+static int l3_access_valid(struct drm_i915_private *i915, loff_t offset)
{
- if (!HAS_L3_DPF(dev_priv))
+ if (!HAS_L3_DPF(i915))
return -EPERM;
- if (offset % 4 != 0)
+ if (!IS_ALIGNED(offset, sizeof(u32)))
return -EINVAL;
if (offset >= GEN7_L3LOG_SIZE)
@@ -162,31 +165,24 @@ i915_l3_read(struct file *filp, struct kobject *kobj,
loff_t offset, size_t count)
{
struct device *kdev = kobj_to_dev(kobj);
- struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- struct drm_device *dev = &dev_priv->drm;
+ struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
int slice = (int)(uintptr_t)attr->private;
int ret;
- count = round_down(count, 4);
-
- ret = l3_access_valid(dev_priv, offset);
+ ret = l3_access_valid(i915, offset);
if (ret)
return ret;
+ count = round_down(count, sizeof(u32));
count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count);
+ memset(buf, 0, count);
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ret;
-
- if (dev_priv->l3_parity.remap_info[slice])
+ spin_lock(&i915->gem.contexts.lock);
+ if (i915->l3_parity.remap_info[slice])
memcpy(buf,
- dev_priv->l3_parity.remap_info[slice] + (offset/4),
+ i915->l3_parity.remap_info[slice] + offset / sizeof(u32),
count);
- else
- memset(buf, 0, count);
-
- mutex_unlock(&dev->struct_mutex);
+ spin_unlock(&i915->gem.contexts.lock);
return count;
}
@@ -197,46 +193,49 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
loff_t offset, size_t count)
{
struct device *kdev = kobj_to_dev(kobj);
- struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- struct drm_device *dev = &dev_priv->drm;
- struct i915_gem_context *ctx;
+ struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
int slice = (int)(uintptr_t)attr->private;
- u32 **remap_info;
+ u32 *remap_info, *freeme = NULL;
+ struct i915_gem_context *ctx;
int ret;
- ret = l3_access_valid(dev_priv, offset);
+ ret = l3_access_valid(i915, offset);
if (ret)
return ret;
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ret;
+ if (count < sizeof(u32))
+ return -EINVAL;
- remap_info = &dev_priv->l3_parity.remap_info[slice];
- if (!*remap_info) {
- *remap_info = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
- if (!*remap_info) {
- ret = -ENOMEM;
- goto out;
- }
+ remap_info = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
+ if (!remap_info)
+ return -ENOMEM;
+
+ spin_lock(&i915->gem.contexts.lock);
+
+ if (i915->l3_parity.remap_info[slice]) {
+ freeme = remap_info;
+ remap_info = i915->l3_parity.remap_info[slice];
+ } else {
+ i915->l3_parity.remap_info[slice] = remap_info;
}
- /* TODO: Ideally we really want a GPU reset here to make sure errors
- * aren't propagated. Since I cannot find a stable way to reset the GPU
- * at this point it is left as a TODO.
- */
- memcpy(*remap_info + (offset/4), buf, count);
+ count = round_down(count, sizeof(u32));
+ memcpy(remap_info + offset / sizeof(u32), buf, count);
/* NB: We defer the remapping until we switch to the context */
- list_for_each_entry(ctx, &dev_priv->contexts.list, link)
- ctx->remap_slice |= (1<<slice);
+ list_for_each_entry(ctx, &i915->gem.contexts.list, link)
+ ctx->remap_slice |= BIT(slice);
- ret = count;
+ spin_unlock(&i915->gem.contexts.lock);
+ kfree(freeme);
-out:
- mutex_unlock(&dev->struct_mutex);
+ /*
+ * TODO: Ideally we really want a GPU reset here to make sure errors
+ * aren't propagated. Since I cannot find a stable way to reset the GPU
+ * at this point it is left as a TODO.
+ */
- return ret;
+ return count;
}
static const struct bin_attribute dpf_attrs = {
@@ -260,44 +259,30 @@ static const struct bin_attribute dpf_attrs_1 = {
static ssize_t gt_act_freq_mhz_show(struct device *kdev,
struct device_attribute *attr, char *buf)
{
- struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- intel_wakeref_t wakeref;
- u32 freq;
-
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- vlv_punit_get(dev_priv);
- freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
- vlv_punit_put(dev_priv);
-
- freq = (freq >> 8) & 0xff;
- } else {
- freq = intel_get_cagf(dev_priv, I915_READ(GEN6_RPSTAT1));
- }
-
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
+ struct intel_rps *rps = &i915->gt.rps;
- return snprintf(buf, PAGE_SIZE, "%d\n", intel_gpu_freq(dev_priv, freq));
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ intel_rps_read_actual_frequency(rps));
}
static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
struct device_attribute *attr, char *buf)
{
- struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+ struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
+ struct intel_rps *rps = &i915->gt.rps;
return snprintf(buf, PAGE_SIZE, "%d\n",
- intel_gpu_freq(dev_priv,
- dev_priv->gt_pm.rps.cur_freq));
+ intel_gpu_freq(rps, rps->cur_freq));
}
static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
{
- struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+ struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
+ struct intel_rps *rps = &i915->gt.rps;
return snprintf(buf, PAGE_SIZE, "%d\n",
- intel_gpu_freq(dev_priv,
- dev_priv->gt_pm.rps.boost_freq));
+ intel_gpu_freq(rps, rps->boost_freq));
}
static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
@@ -305,7 +290,7 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
const char *buf, size_t count)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
+ struct intel_rps *rps = &dev_priv->gt.rps;
bool boost = false;
ssize_t ret;
u32 val;
@@ -315,7 +300,7 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
return ret;
/* Validate against (static) hardware limits */
- val = intel_freq_opcode(dev_priv, val);
+ val = intel_freq_opcode(rps, val);
if (val < rps->min_freq || val > rps->max_freq)
return -EINVAL;
@@ -335,19 +320,19 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
struct device_attribute *attr, char *buf)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+ struct intel_rps *rps = &dev_priv->gt.rps;
return snprintf(buf, PAGE_SIZE, "%d\n",
- intel_gpu_freq(dev_priv,
- dev_priv->gt_pm.rps.efficient_freq));
+ intel_gpu_freq(rps, rps->efficient_freq));
}
static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+ struct intel_rps *rps = &dev_priv->gt.rps;
return snprintf(buf, PAGE_SIZE, "%d\n",
- intel_gpu_freq(dev_priv,
- dev_priv->gt_pm.rps.max_freq_softlimit));
+ intel_gpu_freq(rps, rps->max_freq_softlimit));
}
static ssize_t gt_max_freq_mhz_store(struct device *kdev,
@@ -355,19 +340,17 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
const char *buf, size_t count)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- intel_wakeref_t wakeref;
- u32 val;
+ struct intel_rps *rps = &dev_priv->gt.rps;
ssize_t ret;
+ u32 val;
ret = kstrtou32(buf, 0, &val);
if (ret)
return ret;
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
mutex_lock(&rps->lock);
- val = intel_freq_opcode(dev_priv, val);
+ val = intel_freq_opcode(rps, val);
if (val < rps->min_freq ||
val > rps->max_freq ||
val < rps->min_freq_softlimit) {
@@ -377,7 +360,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
if (val > rps->rp0_freq)
DRM_DEBUG("User requested overclocking to %d\n",
- intel_gpu_freq(dev_priv, val));
+ intel_gpu_freq(rps, val));
rps->max_freq_softlimit = val;
@@ -385,14 +368,15 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
rps->min_freq_softlimit,
rps->max_freq_softlimit);
- /* We still need *_set_rps to process the new max_delay and
+ /*
+ * We still need *_set_rps to process the new max_delay and
* update the interrupt limits and PMINTRMSK even though
- * frequency request may be unchanged. */
- ret = intel_set_rps(dev_priv, val);
+ * frequency request may be unchanged.
+ */
+ intel_rps_set(rps, val);
unlock:
mutex_unlock(&rps->lock);
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return ret ?: count;
}
@@ -400,10 +384,10 @@ unlock:
static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+ struct intel_rps *rps = &dev_priv->gt.rps;
return snprintf(buf, PAGE_SIZE, "%d\n",
- intel_gpu_freq(dev_priv,
- dev_priv->gt_pm.rps.min_freq_softlimit));
+ intel_gpu_freq(rps, rps->min_freq_softlimit));
}
static ssize_t gt_min_freq_mhz_store(struct device *kdev,
@@ -411,19 +395,17 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
const char *buf, size_t count)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- intel_wakeref_t wakeref;
- u32 val;
+ struct intel_rps *rps = &dev_priv->gt.rps;
ssize_t ret;
+ u32 val;
ret = kstrtou32(buf, 0, &val);
if (ret)
return ret;
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
mutex_lock(&rps->lock);
- val = intel_freq_opcode(dev_priv, val);
+ val = intel_freq_opcode(rps, val);
if (val < rps->min_freq ||
val > rps->max_freq ||
val > rps->max_freq_softlimit) {
@@ -437,14 +419,15 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
rps->min_freq_softlimit,
rps->max_freq_softlimit);
- /* We still need *_set_rps to process the new min_delay and
+ /*
+ * We still need *_set_rps to process the new min_delay and
* update the interrupt limits and PMINTRMSK even though
- * frequency request may be unchanged. */
- ret = intel_set_rps(dev_priv, val);
+ * frequency request may be unchanged.
+ */
+ intel_rps_set(rps, val);
unlock:
mutex_unlock(&rps->lock);
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return ret ?: count;
}
@@ -466,15 +449,15 @@ static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
+ struct intel_rps *rps = &dev_priv->gt.rps;
u32 val;
if (attr == &dev_attr_gt_RP0_freq_mhz)
- val = intel_gpu_freq(dev_priv, rps->rp0_freq);
+ val = intel_gpu_freq(rps, rps->rp0_freq);
else if (attr == &dev_attr_gt_RP1_freq_mhz)
- val = intel_gpu_freq(dev_priv, rps->rp1_freq);
+ val = intel_gpu_freq(rps, rps->rp1_freq);
else if (attr == &dev_attr_gt_RPn_freq_mhz)
- val = intel_gpu_freq(dev_priv, rps->min_freq);
+ val = intel_gpu_freq(rps, rps->min_freq);
else
BUG();
@@ -515,15 +498,15 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
struct device *kdev = kobj_to_dev(kobj);
struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
- struct i915_gpu_state *gpu;
+ struct i915_gpu_coredump *gpu;
ssize_t ret;
gpu = i915_first_error_state(i915);
if (IS_ERR(gpu)) {
ret = PTR_ERR(gpu);
} else if (gpu) {
- ret = i915_gpu_state_copy_to_buffer(gpu, buf, off, count);
- i915_gpu_state_put(gpu);
+ ret = i915_gpu_coredump_copy_to_buffer(gpu, buf, off, count);
+ i915_gpu_coredump_put(gpu);
} else {
const char *str = "No error state collected\n";
size_t len = strlen(str);
diff --git a/drivers/gpu/drm/i915/i915_sysfs.h b/drivers/gpu/drm/i915/i915_sysfs.h
new file mode 100644
index 000000000000..41afd4366416
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_sysfs.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_SYSFS_H__
+#define __I915_SYSFS_H__
+
+struct drm_i915_private;
+
+void i915_setup_sysfs(struct drm_i915_private *i915);
+void i915_teardown_sysfs(struct drm_i915_private *i915);
+
+#endif /* __I915_SYSFS_H__ */
diff --git a/drivers/gpu/drm/i915/i915_timeline.h b/drivers/gpu/drm/i915/i915_timeline.h
deleted file mode 100644
index 36e5e5a65155..000000000000
--- a/drivers/gpu/drm/i915/i915_timeline.h
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#ifndef I915_TIMELINE_H
-#define I915_TIMELINE_H
-
-#include <linux/lockdep.h>
-
-#include "i915_active.h"
-#include "i915_syncmap.h"
-#include "i915_timeline_types.h"
-
-int i915_timeline_init(struct drm_i915_private *i915,
- struct i915_timeline *tl,
- struct i915_vma *hwsp);
-void i915_timeline_fini(struct i915_timeline *tl);
-
-struct i915_timeline *
-i915_timeline_create(struct drm_i915_private *i915,
- struct i915_vma *global_hwsp);
-
-static inline struct i915_timeline *
-i915_timeline_get(struct i915_timeline *timeline)
-{
- kref_get(&timeline->kref);
- return timeline;
-}
-
-void __i915_timeline_free(struct kref *kref);
-static inline void i915_timeline_put(struct i915_timeline *timeline)
-{
- kref_put(&timeline->kref, __i915_timeline_free);
-}
-
-static inline int __i915_timeline_sync_set(struct i915_timeline *tl,
- u64 context, u32 seqno)
-{
- return i915_syncmap_set(&tl->sync, context, seqno);
-}
-
-static inline int i915_timeline_sync_set(struct i915_timeline *tl,
- const struct dma_fence *fence)
-{
- return __i915_timeline_sync_set(tl, fence->context, fence->seqno);
-}
-
-static inline bool __i915_timeline_sync_is_later(struct i915_timeline *tl,
- u64 context, u32 seqno)
-{
- return i915_syncmap_is_later(&tl->sync, context, seqno);
-}
-
-static inline bool i915_timeline_sync_is_later(struct i915_timeline *tl,
- const struct dma_fence *fence)
-{
- return __i915_timeline_sync_is_later(tl, fence->context, fence->seqno);
-}
-
-int i915_timeline_pin(struct i915_timeline *tl);
-int i915_timeline_get_seqno(struct i915_timeline *tl,
- struct i915_request *rq,
- u32 *seqno);
-void i915_timeline_unpin(struct i915_timeline *tl);
-
-int i915_timeline_read_hwsp(struct i915_request *from,
- struct i915_request *until,
- u32 *hwsp_offset);
-
-void i915_timelines_init(struct drm_i915_private *i915);
-void i915_timelines_park(struct drm_i915_private *i915);
-void i915_timelines_fini(struct drm_i915_private *i915);
-
-#endif
diff --git a/drivers/gpu/drm/i915/i915_timeline_types.h b/drivers/gpu/drm/i915/i915_timeline_types.h
deleted file mode 100644
index fce5cb4f1090..000000000000
--- a/drivers/gpu/drm/i915/i915_timeline_types.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2016 Intel Corporation
- */
-
-#ifndef __I915_TIMELINE_TYPES_H__
-#define __I915_TIMELINE_TYPES_H__
-
-#include <linux/list.h>
-#include <linux/kref.h>
-#include <linux/mutex.h>
-#include <linux/types.h>
-
-#include "i915_active_types.h"
-
-struct drm_i915_private;
-struct i915_vma;
-struct i915_timeline_cacheline;
-struct i915_syncmap;
-
-struct i915_timeline {
- u64 fence_context;
- u32 seqno;
-
- struct mutex mutex; /* protects the flow of requests */
-
- unsigned int pin_count;
- const u32 *hwsp_seqno;
- struct i915_vma *hwsp_ggtt;
- u32 hwsp_offset;
-
- struct i915_timeline_cacheline *hwsp_cacheline;
-
- bool has_initial_breadcrumb;
-
- /**
- * List of breadcrumbs associated with GPU requests currently
- * outstanding.
- */
- struct list_head requests;
-
- /* Contains an RCU guarded pointer to the last request. No reference is
- * held to the request, users must carefully acquire a reference to
- * the request using i915_active_request_get_request_rcu(), or hold the
- * struct_mutex.
- */
- struct i915_active_request last_request;
-
- /**
- * We track the most recent seqno that we wait on in every context so
- * that we only have to emit a new await and dependency on a more
- * recent sync point. As the contexts may be executed out-of-order, we
- * have to track each individually and can not rely on an absolute
- * global_seqno. When we know that all tracked fences are completed
- * (i.e. when the driver is idle), we know that the syncmap is
- * redundant and we can discard it without loss of generality.
- */
- struct i915_syncmap *sync;
-
- struct list_head link;
- struct drm_i915_private *i915;
-
- struct kref kref;
-};
-
-#endif /* __I915_TIMELINE_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index cce426b23a24..233a97a2c276 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -8,11 +8,11 @@
#include <drm/drm_drv.h>
+#include "display/intel_display_types.h"
#include "gt/intel_engine.h"
#include "i915_drv.h"
#include "i915_irq.h"
-#include "intel_drv.h"
#undef TRACE_SYSTEM
#define TRACE_SYSTEM i915
@@ -293,16 +293,16 @@ TRACE_EVENT(intel_update_plane,
TP_STRUCT__entry(
__field(enum pipe, pipe)
- __field(const char *, name)
__field(u32, frame)
__field(u32, scanline)
__array(int, src, 4)
__array(int, dst, 4)
+ __string(name, plane->name)
),
TP_fast_assign(
+ __assign_str(name, plane->name);
__entry->pipe = crtc->pipe;
- __entry->name = plane->name;
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
memcpy(__entry->src, &plane->state->src, sizeof(__entry->src));
@@ -310,7 +310,7 @@ TRACE_EVENT(intel_update_plane,
),
TP_printk("pipe %c, plane %s, frame=%u, scanline=%u, " DRM_RECT_FP_FMT " -> " DRM_RECT_FMT,
- pipe_name(__entry->pipe), __entry->name,
+ pipe_name(__entry->pipe), __get_str(name),
__entry->frame, __entry->scanline,
DRM_RECT_FP_ARG((const struct drm_rect *)__entry->src),
DRM_RECT_ARG((const struct drm_rect *)__entry->dst))
@@ -322,26 +322,26 @@ TRACE_EVENT(intel_disable_plane,
TP_STRUCT__entry(
__field(enum pipe, pipe)
- __field(const char *, name)
__field(u32, frame)
__field(u32, scanline)
+ __string(name, plane->name)
),
TP_fast_assign(
+ __assign_str(name, plane->name);
__entry->pipe = crtc->pipe;
- __entry->name = plane->name;
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
),
TP_printk("pipe %c, plane %s, frame=%u, scanline=%u",
- pipe_name(__entry->pipe), __entry->name,
+ pipe_name(__entry->pipe), __get_str(name),
__entry->frame, __entry->scanline)
);
/* pipe updates */
-TRACE_EVENT(i915_pipe_update_start,
+TRACE_EVENT(intel_pipe_update_start,
TP_PROTO(struct intel_crtc *crtc),
TP_ARGS(crtc),
@@ -366,7 +366,7 @@ TRACE_EVENT(i915_pipe_update_start,
__entry->scanline, __entry->min, __entry->max)
);
-TRACE_EVENT(i915_pipe_update_vblank_evaded,
+TRACE_EVENT(intel_pipe_update_vblank_evaded,
TP_PROTO(struct intel_crtc *crtc),
TP_ARGS(crtc),
@@ -391,7 +391,7 @@ TRACE_EVENT(i915_pipe_update_vblank_evaded,
__entry->scanline, __entry->min, __entry->max)
);
-TRACE_EVENT(i915_pipe_update_end,
+TRACE_EVENT(intel_pipe_update_end,
TP_PROTO(struct intel_crtc *crtc, u32 frame, int scanline_end),
TP_ARGS(crtc, frame, scanline_end),
@@ -665,7 +665,6 @@ TRACE_EVENT(i915_request_queue,
TP_STRUCT__entry(
__field(u32, dev)
- __field(u32, hw_id)
__field(u64, ctx)
__field(u16, class)
__field(u16, instance)
@@ -675,18 +674,16 @@ TRACE_EVENT(i915_request_queue,
TP_fast_assign(
__entry->dev = rq->i915->drm.primary->index;
- __entry->hw_id = rq->gem_context->hw_id;
__entry->class = rq->engine->uabi_class;
- __entry->instance = rq->engine->instance;
+ __entry->instance = rq->engine->uabi_instance;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
__entry->flags = flags;
),
- TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, flags=0x%x",
+ TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, flags=0x%x",
__entry->dev, __entry->class, __entry->instance,
- __entry->hw_id, __entry->ctx, __entry->seqno,
- __entry->flags)
+ __entry->ctx, __entry->seqno, __entry->flags)
);
DECLARE_EVENT_CLASS(i915_request,
@@ -695,7 +692,6 @@ DECLARE_EVENT_CLASS(i915_request,
TP_STRUCT__entry(
__field(u32, dev)
- __field(u32, hw_id)
__field(u64, ctx)
__field(u16, class)
__field(u16, instance)
@@ -704,16 +700,15 @@ DECLARE_EVENT_CLASS(i915_request,
TP_fast_assign(
__entry->dev = rq->i915->drm.primary->index;
- __entry->hw_id = rq->gem_context->hw_id;
__entry->class = rq->engine->uabi_class;
- __entry->instance = rq->engine->instance;
+ __entry->instance = rq->engine->uabi_instance;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
),
- TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u",
+ TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u",
__entry->dev, __entry->class, __entry->instance,
- __entry->hw_id, __entry->ctx, __entry->seqno)
+ __entry->ctx, __entry->seqno)
);
DEFINE_EVENT(i915_request, i915_request_add,
@@ -738,7 +733,6 @@ TRACE_EVENT(i915_request_in,
TP_STRUCT__entry(
__field(u32, dev)
- __field(u32, hw_id)
__field(u64, ctx)
__field(u16, class)
__field(u16, instance)
@@ -749,18 +743,17 @@ TRACE_EVENT(i915_request_in,
TP_fast_assign(
__entry->dev = rq->i915->drm.primary->index;
- __entry->hw_id = rq->gem_context->hw_id;
__entry->class = rq->engine->uabi_class;
- __entry->instance = rq->engine->instance;
+ __entry->instance = rq->engine->uabi_instance;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
__entry->prio = rq->sched.attr.priority;
__entry->port = port;
),
- TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, prio=%u, port=%u",
+ TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, prio=%u, port=%u",
__entry->dev, __entry->class, __entry->instance,
- __entry->hw_id, __entry->ctx, __entry->seqno,
+ __entry->ctx, __entry->seqno,
__entry->prio, __entry->port)
);
@@ -770,7 +763,6 @@ TRACE_EVENT(i915_request_out,
TP_STRUCT__entry(
__field(u32, dev)
- __field(u32, hw_id)
__field(u64, ctx)
__field(u16, class)
__field(u16, instance)
@@ -780,18 +772,16 @@ TRACE_EVENT(i915_request_out,
TP_fast_assign(
__entry->dev = rq->i915->drm.primary->index;
- __entry->hw_id = rq->gem_context->hw_id;
__entry->class = rq->engine->uabi_class;
- __entry->instance = rq->engine->instance;
+ __entry->instance = rq->engine->uabi_instance;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
__entry->completed = i915_request_completed(rq);
),
- TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, completed?=%u",
+ TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, completed?=%u",
__entry->dev, __entry->class, __entry->instance,
- __entry->hw_id, __entry->ctx, __entry->seqno,
- __entry->completed)
+ __entry->ctx, __entry->seqno, __entry->completed)
);
#else
@@ -829,7 +819,6 @@ TRACE_EVENT(i915_request_wait_begin,
TP_STRUCT__entry(
__field(u32, dev)
- __field(u32, hw_id)
__field(u64, ctx)
__field(u16, class)
__field(u16, instance)
@@ -845,17 +834,16 @@ TRACE_EVENT(i915_request_wait_begin,
*/
TP_fast_assign(
__entry->dev = rq->i915->drm.primary->index;
- __entry->hw_id = rq->gem_context->hw_id;
__entry->class = rq->engine->uabi_class;
- __entry->instance = rq->engine->instance;
+ __entry->instance = rq->engine->uabi_instance;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
__entry->flags = flags;
),
- TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, flags=0x%x",
+ TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, flags=0x%x",
__entry->dev, __entry->class, __entry->instance,
- __entry->hw_id, __entry->ctx, __entry->seqno,
+ __entry->ctx, __entry->seqno,
__entry->flags)
);
@@ -958,19 +946,17 @@ DECLARE_EVENT_CLASS(i915_context,
TP_STRUCT__entry(
__field(u32, dev)
__field(struct i915_gem_context *, ctx)
- __field(u32, hw_id)
__field(struct i915_address_space *, vm)
),
TP_fast_assign(
__entry->dev = ctx->i915->drm.primary->index;
__entry->ctx = ctx;
- __entry->hw_id = ctx->hw_id;
- __entry->vm = ctx->vm;
+ __entry->vm = rcu_access_pointer(ctx->vm);
),
- TP_printk("dev=%u, ctx=%p, ctx_vm=%p, hw_id=%u",
- __entry->dev, __entry->ctx, __entry->vm, __entry->hw_id)
+ TP_printk("dev=%u, ctx=%p, ctx_vm=%p",
+ __entry->dev, __entry->ctx, __entry->vm)
)
DEFINE_EVENT(i915_context, i915_context_create,
diff --git a/drivers/gpu/drm/i915/i915_utils.c b/drivers/gpu/drm/i915/i915_utils.c
new file mode 100644
index 000000000000..c47261ae86ea
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_utils.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <drm/drm_drv.h>
+
+#include "i915_drv.h"
+#include "i915_utils.h"
+
+#define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI"
+#define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \
+ "providing the dmesg log by booting with drm.debug=0xf"
+
+void
+__i915_printk(struct drm_i915_private *dev_priv, const char *level,
+ const char *fmt, ...)
+{
+ static bool shown_bug_once;
+ struct device *kdev = dev_priv->drm.dev;
+ bool is_error = level[1] <= KERN_ERR[1];
+ bool is_debug = level[1] == KERN_DEBUG[1];
+ struct va_format vaf;
+ va_list args;
+
+ if (is_debug && !drm_debug_enabled(DRM_UT_DRIVER))
+ return;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ if (is_error)
+ dev_printk(level, kdev, "%pV", &vaf);
+ else
+ dev_printk(level, kdev, "[" DRM_NAME ":%ps] %pV",
+ __builtin_return_address(0), &vaf);
+
+ va_end(args);
+
+ if (is_error && !shown_bug_once) {
+ /*
+ * Ask the user to file a bug report for the error, except
+ * if they may have caused the bug by fiddling with unsafe
+ * module parameters.
+ */
+ if (!test_taint(TAINT_USER))
+ dev_notice(kdev, "%s", FDO_BUG_MSG);
+ shown_bug_once = true;
+ }
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
+static unsigned int i915_probe_fail_count;
+
+int __i915_inject_probe_error(struct drm_i915_private *i915, int err,
+ const char *func, int line)
+{
+ if (i915_probe_fail_count >= i915_modparams.inject_probe_failure)
+ return 0;
+
+ if (++i915_probe_fail_count < i915_modparams.inject_probe_failure)
+ return 0;
+
+ __i915_printk(i915, KERN_INFO,
+ "Injecting failure %d at checkpoint %u [%s:%d]\n",
+ err, i915_modparams.inject_probe_failure, func, line);
+ i915_modparams.inject_probe_failure = 0;
+ return err;
+}
+
+bool i915_error_injected(void)
+{
+ return i915_probe_fail_count && !i915_modparams.inject_probe_failure;
+}
+
+#endif
+
+void cancel_timer(struct timer_list *t)
+{
+ if (!READ_ONCE(t->expires))
+ return;
+
+ del_timer(t);
+ WRITE_ONCE(t->expires, 0);
+}
+
+void set_timer_ms(struct timer_list *t, unsigned long timeout)
+{
+ if (!timeout) {
+ cancel_timer(t);
+ return;
+ }
+
+ timeout = msecs_to_jiffies_timeout(timeout);
+
+ /*
+ * Paranoia to make sure the compiler computes the timeout before
+ * loading 'jiffies' as jiffies is volatile and may be updated in
+ * the background by a timer tick. All to reduce the complexity
+ * of the addition and reduce the risk of losing a jiffie.
+ */
+ barrier();
+
+ mod_timer(t, jiffies + timeout);
+}
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 2987219a6300..b0ade76bec90 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -31,6 +31,9 @@
#include <linux/types.h>
#include <linux/workqueue.h>
+struct drm_i915_private;
+struct timer_list;
+
#undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */
#if 0
@@ -49,6 +52,34 @@
#define MISSING_CASE(x) WARN(1, "Missing case (%s == %ld)\n", \
__stringify(x), (long)(x))
+void __printf(3, 4)
+__i915_printk(struct drm_i915_private *dev_priv, const char *level,
+ const char *fmt, ...);
+
+#define i915_report_error(dev_priv, fmt, ...) \
+ __i915_printk(dev_priv, KERN_ERR, fmt, ##__VA_ARGS__)
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
+
+int __i915_inject_probe_error(struct drm_i915_private *i915, int err,
+ const char *func, int line);
+#define i915_inject_probe_error(_i915, _err) \
+ __i915_inject_probe_error((_i915), (_err), __func__, __LINE__)
+bool i915_error_injected(void);
+
+#else
+
+#define i915_inject_probe_error(i915, e) ({ BUILD_BUG_ON_INVALID(i915); 0; })
+#define i915_error_injected() false
+
+#endif
+
+#define i915_inject_probe_failure(i915) i915_inject_probe_error((i915), -ENODEV)
+
+#define i915_probe_error(i915, fmt, ...) \
+ __i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \
+ fmt, ##__VA_ARGS__)
+
#if defined(GCC_VERSION) && GCC_VERSION >= 70000
#define add_overflows_t(T, A, B) \
__builtin_add_overflow_p((A), (B), (T)0)
@@ -131,6 +162,16 @@ __check_struct_size(size_t base, size_t arr, size_t count, size_t *size)
((typeof(ptr))((unsigned long)(ptr) | __bits)); \
})
+#define ptr_dec(ptr) ({ \
+ unsigned long __v = (unsigned long)(ptr); \
+ (typeof(ptr))(__v - 1); \
+})
+
+#define ptr_inc(ptr) ({ \
+ unsigned long __v = (unsigned long)(ptr); \
+ (typeof(ptr))(__v + 1); \
+})
+
#define page_mask_bits(ptr) ptr_mask_bits(ptr, PAGE_SHIFT)
#define page_unmask_bits(ptr) ptr_unmask_bits(ptr, PAGE_SHIFT)
#define page_pack_bits(ptr, bits) ptr_pack_bits(ptr, bits, PAGE_SHIFT)
@@ -370,4 +411,36 @@ static inline const char *enableddisabled(bool v)
return v ? "enabled" : "disabled";
}
+static inline void add_taint_for_CI(unsigned int taint)
+{
+ /*
+ * The system is "ok", just about surviving for the user, but
+ * CI results are now unreliable as the HW is very suspect.
+ * CI checks the taint state after every test and will reboot
+ * the machine if the kernel is tainted.
+ */
+ add_taint(taint, LOCKDEP_STILL_OK);
+}
+
+void cancel_timer(struct timer_list *t);
+void set_timer_ms(struct timer_list *t, unsigned long timeout);
+
+static inline bool timer_expired(const struct timer_list *t)
+{
+ return READ_ONCE(t->expires) && !timer_pending(t);
+}
+
+/*
+ * This is a lookalike for IS_ENABLED() that takes a kconfig value,
+ * e.g. CONFIG_DRM_I915_SPIN_REQUEST, and evaluates whether it is non-zero
+ * i.e. whether the configuration is active. Wrapping up the config inside
+ * a boolean context prevents clang and smatch from complaining about potential
+ * issues in confusing logical-&& with bitwise-& for constants.
+ *
+ * Sadly IS_ENABLED() itself does not work with kconfig values.
+ *
+ * Returns 0 if @config is 0, 1 if set to any value.
+ */
+#define IS_ACTIVE(config) ((config) != 0)
+
#endif /* !__I915_UTILS_H */
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index 94d3992b599d..968be26735c5 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -21,7 +21,6 @@
* SOFTWARE.
*/
-#include "intel_drv.h"
#include "i915_vgpu.h"
/**
@@ -52,34 +51,54 @@
*/
/**
- * i915_check_vgpu - detect virtual GPU
+ * i915_detect_vgpu - detect virtual GPU
* @dev_priv: i915 device private
*
* This function is called at the initialization stage, to detect whether
* running on a vGPU.
*/
-void i915_check_vgpu(struct drm_i915_private *dev_priv)
+void i915_detect_vgpu(struct drm_i915_private *dev_priv)
{
- struct intel_uncore *uncore = &dev_priv->uncore;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
u64 magic;
u16 version_major;
+ void __iomem *shared_area;
BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
- magic = __raw_uncore_read64(uncore, vgtif_reg(magic));
- if (magic != VGT_MAGIC)
+ /*
+ * This is called before we setup the main MMIO BAR mappings used via
+ * the uncore structure, so we need to access the BAR directly. Since
+ * we do not support VGT on older gens, return early so we don't have
+ * to consider differently numbered or sized MMIO bars
+ */
+ if (INTEL_GEN(dev_priv) < 6)
return;
- version_major = __raw_uncore_read16(uncore, vgtif_reg(version_major));
+ shared_area = pci_iomap_range(pdev, 0, VGT_PVINFO_PAGE, VGT_PVINFO_SIZE);
+ if (!shared_area) {
+ DRM_ERROR("failed to map MMIO bar to check for VGT\n");
+ return;
+ }
+
+ magic = readq(shared_area + vgtif_offset(magic));
+ if (magic != VGT_MAGIC)
+ goto out;
+
+ version_major = readw(shared_area + vgtif_offset(version_major));
if (version_major < VGT_VERSION_MAJOR) {
DRM_INFO("VGT interface version mismatch!\n");
- return;
+ goto out;
}
- dev_priv->vgpu.caps = __raw_uncore_read32(uncore, vgtif_reg(vgt_caps));
+ dev_priv->vgpu.caps = readl(shared_area + vgtif_offset(vgt_caps));
dev_priv->vgpu.active = true;
+ mutex_init(&dev_priv->vgpu.lock);
DRM_INFO("Virtual GPU for Intel GVT-g detected.\n");
+
+out:
+ pci_iounmap(pdev, shared_area);
}
bool intel_vgpu_has_full_ppgtt(struct drm_i915_private *dev_priv)
@@ -101,6 +120,9 @@ static struct _balloon_info_ bl_info;
static void vgt_deballoon_space(struct i915_ggtt *ggtt,
struct drm_mm_node *node)
{
+ if (!drm_mm_node_allocated(node))
+ return;
+
DRM_DEBUG_DRIVER("deballoon space: range [0x%llx - 0x%llx] %llu KiB.\n",
node->start,
node->start + node->size,
@@ -112,22 +134,22 @@ static void vgt_deballoon_space(struct i915_ggtt *ggtt,
/**
* intel_vgt_deballoon - deballoon reserved graphics address trunks
- * @dev_priv: i915 device private data
+ * @ggtt: the global GGTT from which we reserved earlier
*
* This function is called to deallocate the ballooned-out graphic memory, when
* driver is unloaded or when ballooning fails.
*/
-void intel_vgt_deballoon(struct drm_i915_private *dev_priv)
+void intel_vgt_deballoon(struct i915_ggtt *ggtt)
{
int i;
- if (!intel_vgpu_active(dev_priv))
+ if (!intel_vgpu_active(ggtt->vm.i915))
return;
DRM_DEBUG("VGT deballoon.\n");
for (i = 0; i < 4; i++)
- vgt_deballoon_space(&dev_priv->ggtt, &bl_info.space[i]);
+ vgt_deballoon_space(ggtt, &bl_info.space[i]);
}
static int vgt_balloon_space(struct i915_ggtt *ggtt,
@@ -153,7 +175,7 @@ static int vgt_balloon_space(struct i915_ggtt *ggtt,
/**
* intel_vgt_balloon - balloon out reserved graphics address trunks
- * @dev_priv: i915 device private data
+ * @ggtt: the global GGTT from which to reserve
*
* This function is called at the initialization stage, to balloon out the
* graphic address space allocated to other vGPUs, by marking these spaces as
@@ -195,22 +217,26 @@ static int vgt_balloon_space(struct i915_ggtt *ggtt,
* Returns:
* zero on success, non-zero if configuration invalid or ballooning failed
*/
-int intel_vgt_balloon(struct drm_i915_private *dev_priv)
+int intel_vgt_balloon(struct i915_ggtt *ggtt)
{
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
+ struct intel_uncore *uncore = &ggtt->vm.i915->uncore;
unsigned long ggtt_end = ggtt->vm.total;
unsigned long mappable_base, mappable_size, mappable_end;
unsigned long unmappable_base, unmappable_size, unmappable_end;
int ret;
- if (!intel_vgpu_active(dev_priv))
+ if (!intel_vgpu_active(ggtt->vm.i915))
return 0;
- mappable_base = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.base));
- mappable_size = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.size));
- unmappable_base = I915_READ(vgtif_reg(avail_rs.nonmappable_gmadr.base));
- unmappable_size = I915_READ(vgtif_reg(avail_rs.nonmappable_gmadr.size));
+ mappable_base =
+ intel_uncore_read(uncore, vgtif_reg(avail_rs.mappable_gmadr.base));
+ mappable_size =
+ intel_uncore_read(uncore, vgtif_reg(avail_rs.mappable_gmadr.size));
+ unmappable_base =
+ intel_uncore_read(uncore, vgtif_reg(avail_rs.nonmappable_gmadr.base));
+ unmappable_size =
+ intel_uncore_read(uncore, vgtif_reg(avail_rs.nonmappable_gmadr.size));
mappable_end = mappable_base + mappable_size;
unmappable_end = unmappable_base + unmappable_size;
diff --git a/drivers/gpu/drm/i915/i915_vgpu.h b/drivers/gpu/drm/i915/i915_vgpu.h
index ebe1b7bced98..8b3663dad193 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.h
+++ b/drivers/gpu/drm/i915/i915_vgpu.h
@@ -24,9 +24,10 @@
#ifndef _I915_VGPU_H_
#define _I915_VGPU_H_
+#include "i915_drv.h"
#include "i915_pvinfo.h"
-void i915_check_vgpu(struct drm_i915_private *dev_priv);
+void i915_detect_vgpu(struct drm_i915_private *dev_priv);
bool intel_vgpu_has_full_ppgtt(struct drm_i915_private *dev_priv);
@@ -42,7 +43,7 @@ intel_vgpu_has_huge_gtt(struct drm_i915_private *dev_priv)
return dev_priv->vgpu.caps & VGT_CAPS_HUGE_GTT;
}
-int intel_vgt_balloon(struct drm_i915_private *dev_priv);
-void intel_vgt_deballoon(struct drm_i915_private *dev_priv);
+int intel_vgt_balloon(struct i915_ggtt *ggtt);
+void intel_vgt_deballoon(struct i915_ggtt *ggtt);
#endif /* _I915_VGPU_H_ */
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index a57729be8312..4ff380770b32 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -22,14 +22,20 @@
*
*/
+#include <linux/sched/mm.h>
#include <drm/drm_gem.h>
#include "display/intel_frontbuffer.h"
#include "gt/intel_engine.h"
+#include "gt/intel_engine_heartbeat.h"
+#include "gt/intel_gt.h"
+#include "gt/intel_gt_requests.h"
#include "i915_drv.h"
#include "i915_globals.h"
+#include "i915_sw_fence_work.h"
+#include "i915_trace.h"
#include "i915_vma.h"
static struct i915_global_vma {
@@ -77,43 +83,20 @@ static void vma_print_allocator(struct i915_vma *vma, const char *reason)
#endif
-static void obj_bump_mru(struct drm_i915_gem_object *obj)
+static inline struct i915_vma *active_to_vma(struct i915_active *ref)
{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- unsigned long flags;
-
- spin_lock_irqsave(&i915->mm.obj_lock, flags);
- list_move_tail(&obj->mm.link, &i915->mm.shrink_list);
- spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
+ return container_of(ref, typeof(struct i915_vma), active);
+}
- obj->mm.dirty = true; /* be paranoid */
+static int __i915_vma_active(struct i915_active *ref)
+{
+ return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT;
}
+__i915_active_call
static void __i915_vma_retire(struct i915_active *ref)
{
- struct i915_vma *vma = container_of(ref, typeof(*vma), active);
- struct drm_i915_gem_object *obj = vma->obj;
-
- GEM_BUG_ON(!i915_gem_object_is_active(obj));
- if (--obj->active_count)
- return;
-
- /* Prune the shared fence arrays iff completely idle (inc. external) */
- if (reservation_object_trylock(obj->base.resv)) {
- if (reservation_object_test_signaled_rcu(obj->base.resv, true))
- reservation_object_add_excl_fence(obj->base.resv, NULL);
- reservation_object_unlock(obj->base.resv);
- }
-
- /*
- * Bump our place on the bound list to keep it roughly in LRU order
- * so that we don't steal from recently used but inactive objects
- * (unless we are forced to ofc!)
- */
- if (i915_gem_object_is_shrinkable(obj))
- obj_bump_mru(obj);
-
- i915_gem_object_put(obj); /* and drop the active reference */
+ i915_vma_put(active_to_vma(ref));
}
static struct i915_vma *
@@ -125,21 +108,29 @@ vma_create(struct drm_i915_gem_object *obj,
struct rb_node *rb, **p;
/* The aliasing_ppgtt should never be used directly! */
- GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
+ GEM_BUG_ON(vm == &vm->gt->ggtt->alias->vm);
vma = i915_vma_alloc();
if (vma == NULL)
return ERR_PTR(-ENOMEM);
- vma->vm = vm;
+ kref_init(&vma->ref);
+ mutex_init(&vma->pages_mutex);
+ vma->vm = i915_vm_get(vm);
vma->ops = &vm->vma_ops;
vma->obj = obj;
vma->resv = obj->base.resv;
vma->size = obj->base.size;
vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
- i915_active_init(vm->i915, &vma->active, __i915_vma_retire);
- INIT_ACTIVE_REQUEST(&vma->last_fence);
+ i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire);
+
+ /* Declare ourselves safe for use inside shrinkers */
+ if (IS_ENABLED(CONFIG_LOCKDEP)) {
+ fs_reclaim_acquire(GFP_KERNEL);
+ might_lock(&vma->active.mutex);
+ fs_reclaim_release(GFP_KERNEL);
+ }
INIT_LIST_HEAD(&vma->closed_link);
@@ -185,7 +176,7 @@ vma_create(struct drm_i915_gem_object *obj,
i915_gem_object_get_stride(obj));
GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
- vma->flags |= I915_VMA_GGTT;
+ __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
}
spin_lock(&obj->vma.lock);
@@ -232,10 +223,6 @@ vma_create(struct drm_i915_gem_object *obj,
spin_unlock(&obj->vma.lock);
- mutex_lock(&vm->mutex);
- list_add(&vma->vm_link, &vm->unbound_list);
- mutex_unlock(&vm->mutex);
-
return vma;
err_vma:
@@ -279,8 +266,6 @@ vma_lookup(struct drm_i915_gem_object *obj,
* Once created, the VMA is kept until either the object is freed, or the
* address space is closed.
*
- * Must be called with struct_mutex held.
- *
* Returns the vma, or an error pointer.
*/
struct i915_vma *
@@ -291,7 +276,7 @@ i915_vma_instance(struct drm_i915_gem_object *obj,
struct i915_vma *vma;
GEM_BUG_ON(view && !i915_is_ggtt(vm));
- GEM_BUG_ON(vm->closed);
+ GEM_BUG_ON(!atomic_read(&vm->open));
spin_lock(&obj->vma.lock);
vma = vma_lookup(obj, vm, view);
@@ -305,18 +290,70 @@ i915_vma_instance(struct drm_i915_gem_object *obj,
return vma;
}
+struct i915_vma_work {
+ struct dma_fence_work base;
+ struct i915_vma *vma;
+ struct drm_i915_gem_object *pinned;
+ enum i915_cache_level cache_level;
+ unsigned int flags;
+};
+
+static int __vma_bind(struct dma_fence_work *work)
+{
+ struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
+ struct i915_vma *vma = vw->vma;
+ int err;
+
+ err = vma->ops->bind_vma(vma, vw->cache_level, vw->flags);
+ if (err)
+ atomic_or(I915_VMA_ERROR, &vma->flags);
+
+ return err;
+}
+
+static void __vma_release(struct dma_fence_work *work)
+{
+ struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
+
+ if (vw->pinned)
+ __i915_gem_object_unpin_pages(vw->pinned);
+}
+
+static const struct dma_fence_work_ops bind_ops = {
+ .name = "bind",
+ .work = __vma_bind,
+ .release = __vma_release,
+};
+
+struct i915_vma_work *i915_vma_work(void)
+{
+ struct i915_vma_work *vw;
+
+ vw = kzalloc(sizeof(*vw), GFP_KERNEL);
+ if (!vw)
+ return NULL;
+
+ dma_fence_work_init(&vw->base, &bind_ops);
+ vw->base.dma.error = -EAGAIN; /* disable the worker by default */
+
+ return vw;
+}
+
/**
* i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
* @vma: VMA to map
* @cache_level: mapping cache level
* @flags: flags like global or local mapping
+ * @work: preallocated worker for allocating and binding the PTE
*
* DMA addresses are taken from the scatter-gather table of this object (or of
* this VMA in case of non-default GGTT views) and PTE entries set up.
* Note that DMA addresses are also the only part of the SG table we care about.
*/
-int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
- u32 flags)
+int i915_vma_bind(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags,
+ struct i915_vma_work *work)
{
u32 bind_flags;
u32 vma_flags;
@@ -333,13 +370,11 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
if (GEM_DEBUG_WARN_ON(!flags))
return -EINVAL;
- bind_flags = 0;
- if (flags & PIN_GLOBAL)
- bind_flags |= I915_VMA_GLOBAL_BIND;
- if (flags & PIN_USER)
- bind_flags |= I915_VMA_LOCAL_BIND;
+ bind_flags = flags;
+ bind_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
- vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
+ vma_flags = atomic_read(&vma->flags);
+ vma_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
if (flags & PIN_UPDATE)
bind_flags |= vma_flags;
else
@@ -350,11 +385,36 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
GEM_BUG_ON(!vma->pages);
trace_i915_vma_bind(vma, bind_flags);
- ret = vma->ops->bind_vma(vma, cache_level, bind_flags);
- if (ret)
- return ret;
+ if (work && (bind_flags & ~vma_flags) & vma->vm->bind_async_flags) {
+ work->vma = vma;
+ work->cache_level = cache_level;
+ work->flags = bind_flags | I915_VMA_ALLOC;
+
+ /*
+ * Note we only want to chain up to the migration fence on
+ * the pages (not the object itself). As we don't track that,
+ * yet, we have to use the exclusive fence instead.
+ *
+ * Also note that we do not want to track the async vma as
+ * part of the obj->resv->excl_fence as it only affects
+ * execution and not content or object's backing store lifetime.
+ */
+ GEM_BUG_ON(i915_active_has_exclusive(&vma->active));
+ i915_active_set_exclusive(&vma->active, &work->base.dma);
+ work->base.dma.error = 0; /* enable the queue_work() */
- vma->flags |= bind_flags;
+ if (vma->obj) {
+ __i915_gem_object_pin_pages(vma->obj);
+ work->pinned = vma->obj;
+ }
+ } else {
+ GEM_BUG_ON((bind_flags & ~vma_flags) & vma->vm->bind_async_flags);
+ ret = vma->ops->bind_vma(vma, cache_level, bind_flags);
+ if (ret)
+ return ret;
+ }
+
+ atomic_or(bind_flags, &vma->flags);
return 0;
}
@@ -363,19 +423,15 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
void __iomem *ptr;
int err;
- /* Access through the GTT requires the device to be awake. */
- assert_rpm_wakelock_held(&vma->vm->i915->runtime_pm);
-
- lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
- if (WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
+ if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
err = -ENODEV;
goto err;
}
GEM_BUG_ON(!i915_vma_is_ggtt(vma));
- GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0);
+ GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND));
- ptr = vma->iomap;
+ ptr = READ_ONCE(vma->iomap);
if (ptr == NULL) {
ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
vma->node.start,
@@ -385,7 +441,10 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
goto err;
}
- vma->iomap = ptr;
+ if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) {
+ io_mapping_unmap(ptr);
+ ptr = vma->iomap;
+ }
}
__i915_vma_pin(vma);
@@ -395,6 +454,8 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
goto err_unpin;
i915_vma_set_ggtt_write(vma);
+
+ /* NB Access through the GTT requires the device to be awake. */
return ptr;
err_unpin:
@@ -405,18 +466,12 @@ err:
void i915_vma_flush_writes(struct i915_vma *vma)
{
- if (!i915_vma_has_ggtt_write(vma))
- return;
-
- i915_gem_flush_ggtt_writes(vma->vm->i915);
-
- i915_vma_unset_ggtt_write(vma);
+ if (i915_vma_unset_ggtt_write(vma))
+ intel_gt_flush_ggtt_writes(vma->vm->gt);
}
void i915_vma_unpin_iomap(struct i915_vma *vma)
{
- lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
-
GEM_BUG_ON(vma->iomap == NULL);
i915_vma_flush_writes(vma);
@@ -452,6 +507,9 @@ bool i915_vma_misplaced(const struct i915_vma *vma,
if (!drm_mm_node_allocated(&vma->node))
return false;
+ if (test_bit(I915_VMA_ERROR_BIT, __i915_vma_flags(vma)))
+ return true;
+
if (vma->node.size < size)
return true;
@@ -486,17 +544,12 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
if (mappable && fenceable)
- vma->flags |= I915_VMA_CAN_FENCE;
+ set_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
else
- vma->flags &= ~I915_VMA_CAN_FENCE;
-}
-
-static bool color_differs(struct drm_mm_node *node, unsigned long color)
-{
- return node->allocated && node->color != color;
+ clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
}
-bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level)
+bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color)
{
struct drm_mm_node *node = &vma->node;
struct drm_mm_node *other;
@@ -508,7 +561,7 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level)
* these constraints apply and set the drm_mm.color_adjust
* appropriately.
*/
- if (vma->vm->mm.color_adjust == NULL)
+ if (!i915_vm_has_cache_coloring(vma->vm))
return true;
/* Only valid to be called on an already inserted vma */
@@ -516,11 +569,13 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level)
GEM_BUG_ON(list_empty(&node->node_list));
other = list_prev_entry(node, node_list);
- if (color_differs(other, cache_level) && !drm_mm_hole_follows(other))
+ if (i915_node_color_differs(other, color) &&
+ !drm_mm_hole_follows(other))
return false;
other = list_next_entry(node, node_list);
- if (color_differs(other, cache_level) && !drm_mm_hole_follows(node))
+ if (i915_node_color_differs(other, color) &&
+ !drm_mm_hole_follows(node))
return false;
return true;
@@ -555,13 +610,12 @@ static void assert_bind_count(const struct drm_i915_gem_object *obj)
static int
i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
{
- struct drm_i915_private *dev_priv = vma->vm->i915;
- unsigned int cache_level;
+ unsigned long color;
u64 start, end;
int ret;
GEM_BUG_ON(i915_vma_is_closed(vma));
- GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
+ GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
size = max(size, vma->size);
@@ -581,7 +635,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
end = vma->vm->total;
if (flags & PIN_MAPPABLE)
- end = min_t(u64, end, dev_priv->ggtt.mappable_end);
+ end = min_t(u64, end, i915_vm_to_ggtt(vma->vm)->mappable_end);
if (flags & PIN_ZONE_4G)
end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
@@ -597,35 +651,21 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
return -ENOSPC;
}
- if (vma->obj) {
- ret = i915_gem_object_pin_pages(vma->obj);
- if (ret)
- return ret;
-
- cache_level = vma->obj->cache_level;
- } else {
- cache_level = 0;
- }
-
- GEM_BUG_ON(vma->pages);
-
- ret = vma->ops->set_pages(vma);
- if (ret)
- goto err_unpin;
+ color = 0;
+ if (vma->obj && i915_vm_has_cache_coloring(vma->vm))
+ color = vma->obj->cache_level;
if (flags & PIN_OFFSET_FIXED) {
u64 offset = flags & PIN_OFFSET_MASK;
if (!IS_ALIGNED(offset, alignment) ||
- range_overflows(offset, size, end)) {
- ret = -EINVAL;
- goto err_clear;
- }
+ range_overflows(offset, size, end))
+ return -EINVAL;
ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
- size, offset, cache_level,
+ size, offset, color,
flags);
if (ret)
- goto err_clear;
+ return ret;
} else {
/*
* We only support huge gtt pages through the 48b PPGTT,
@@ -661,116 +701,297 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
}
ret = i915_gem_gtt_insert(vma->vm, &vma->node,
- size, alignment, cache_level,
+ size, alignment, color,
start, end, flags);
if (ret)
- goto err_clear;
+ return ret;
GEM_BUG_ON(vma->node.start < start);
GEM_BUG_ON(vma->node.start + vma->node.size > end);
}
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
- GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, cache_level));
-
- mutex_lock(&vma->vm->mutex);
- list_move_tail(&vma->vm_link, &vma->vm->bound_list);
- mutex_unlock(&vma->vm->mutex);
+ GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color));
if (vma->obj) {
- atomic_inc(&vma->obj->bind_count);
- assert_bind_count(vma->obj);
+ struct drm_i915_gem_object *obj = vma->obj;
+
+ atomic_inc(&obj->bind_count);
+ assert_bind_count(obj);
}
+ list_add_tail(&vma->vm_link, &vma->vm->bound_list);
return 0;
-
-err_clear:
- vma->ops->clear_pages(vma);
-err_unpin:
- if (vma->obj)
- i915_gem_object_unpin_pages(vma->obj);
- return ret;
}
static void
-i915_vma_remove(struct i915_vma *vma)
+i915_vma_detach(struct i915_vma *vma)
{
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
- GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
-
- vma->ops->clear_pages(vma);
-
- mutex_lock(&vma->vm->mutex);
- drm_mm_remove_node(&vma->node);
- list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
- mutex_unlock(&vma->vm->mutex);
+ GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
/*
- * Since the unbound list is global, only move to that list if
- * no more VMAs exist.
+ * And finally now the object is completely decoupled from this
+ * vma, we can drop its hold on the backing storage and allow
+ * it to be reaped by the shrinker.
*/
+ list_del(&vma->vm_link);
if (vma->obj) {
struct drm_i915_gem_object *obj = vma->obj;
+ assert_bind_count(obj);
atomic_dec(&obj->bind_count);
+ }
+}
- /*
- * And finally now the object is completely decoupled from this
- * vma, we can drop its hold on the backing storage and allow
- * it to be reaped by the shrinker.
- */
- i915_gem_object_unpin_pages(obj);
- assert_bind_count(obj);
+static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
+{
+ unsigned int bound;
+ bool pinned = true;
+
+ bound = atomic_read(&vma->flags);
+ do {
+ if (unlikely(flags & ~bound))
+ return false;
+
+ if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR)))
+ return false;
+
+ if (!(bound & I915_VMA_PIN_MASK))
+ goto unpinned;
+
+ GEM_BUG_ON(((bound + 1) & I915_VMA_PIN_MASK) == 0);
+ } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
+
+ return true;
+
+unpinned:
+ /*
+ * If pin_count==0, but we are bound, check under the lock to avoid
+ * racing with a concurrent i915_vma_unbind().
+ */
+ mutex_lock(&vma->vm->mutex);
+ do {
+ if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR))) {
+ pinned = false;
+ break;
+ }
+
+ if (unlikely(flags & ~bound)) {
+ pinned = false;
+ break;
+ }
+ } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
+ mutex_unlock(&vma->vm->mutex);
+
+ return pinned;
+}
+
+static int vma_get_pages(struct i915_vma *vma)
+{
+ int err = 0;
+
+ if (atomic_add_unless(&vma->pages_count, 1, 0))
+ return 0;
+
+ /* Allocations ahoy! */
+ if (mutex_lock_interruptible(&vma->pages_mutex))
+ return -EINTR;
+
+ if (!atomic_read(&vma->pages_count)) {
+ if (vma->obj) {
+ err = i915_gem_object_pin_pages(vma->obj);
+ if (err)
+ goto unlock;
+ }
+
+ err = vma->ops->set_pages(vma);
+ if (err) {
+ if (vma->obj)
+ i915_gem_object_unpin_pages(vma->obj);
+ goto unlock;
+ }
}
+ atomic_inc(&vma->pages_count);
+
+unlock:
+ mutex_unlock(&vma->pages_mutex);
+
+ return err;
}
-int __i915_vma_do_pin(struct i915_vma *vma,
- u64 size, u64 alignment, u64 flags)
+static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
{
- const unsigned int bound = vma->flags;
- int ret;
+ /* We allocate under vma_get_pages, so beware the shrinker */
+ mutex_lock_nested(&vma->pages_mutex, SINGLE_DEPTH_NESTING);
+ GEM_BUG_ON(atomic_read(&vma->pages_count) < count);
+ if (atomic_sub_return(count, &vma->pages_count) == 0) {
+ vma->ops->clear_pages(vma);
+ GEM_BUG_ON(vma->pages);
+ if (vma->obj)
+ i915_gem_object_unpin_pages(vma->obj);
+ }
+ mutex_unlock(&vma->pages_mutex);
+}
- lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
- GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
- GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
+static void vma_put_pages(struct i915_vma *vma)
+{
+ if (atomic_add_unless(&vma->pages_count, -1, 1))
+ return;
- if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
- ret = -EBUSY;
- goto err_unpin;
+ __vma_put_pages(vma, 1);
+}
+
+static void vma_unbind_pages(struct i915_vma *vma)
+{
+ unsigned int count;
+
+ lockdep_assert_held(&vma->vm->mutex);
+
+ /* The upper portion of pages_count is the number of bindings */
+ count = atomic_read(&vma->pages_count);
+ count >>= I915_VMA_PAGES_BIAS;
+ GEM_BUG_ON(!count);
+
+ __vma_put_pages(vma, count | count << I915_VMA_PAGES_BIAS);
+}
+
+int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
+{
+ struct i915_vma_work *work = NULL;
+ intel_wakeref_t wakeref = 0;
+ unsigned int bound;
+ int err;
+
+ BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
+ BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
+
+ GEM_BUG_ON(flags & PIN_UPDATE);
+ GEM_BUG_ON(!(flags & (PIN_USER | PIN_GLOBAL)));
+
+ /* First try and grab the pin without rebinding the vma */
+ if (try_qad_pin(vma, flags & I915_VMA_BIND_MASK))
+ return 0;
+
+ err = vma_get_pages(vma);
+ if (err)
+ return err;
+
+ if (flags & vma->vm->bind_async_flags) {
+ work = i915_vma_work();
+ if (!work) {
+ err = -ENOMEM;
+ goto err_pages;
+ }
}
- if ((bound & I915_VMA_BIND_MASK) == 0) {
- ret = i915_vma_insert(vma, size, alignment, flags);
- if (ret)
- goto err_unpin;
+ if (flags & PIN_GLOBAL)
+ wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
+
+ /* No more allocations allowed once we hold vm->mutex */
+ err = mutex_lock_interruptible(&vma->vm->mutex);
+ if (err)
+ goto err_fence;
+
+ bound = atomic_read(&vma->flags);
+ if (unlikely(bound & I915_VMA_ERROR)) {
+ err = -ENOMEM;
+ goto err_unlock;
}
- GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
- ret = i915_vma_bind(vma, vma->obj ? vma->obj->cache_level : 0, flags);
- if (ret)
- goto err_remove;
+ if (unlikely(!((bound + 1) & I915_VMA_PIN_MASK))) {
+ err = -EAGAIN; /* pins are meant to be fairly temporary */
+ goto err_unlock;
+ }
+
+ if (unlikely(!(flags & ~bound & I915_VMA_BIND_MASK))) {
+ __i915_vma_pin(vma);
+ goto err_unlock;
+ }
- GEM_BUG_ON((vma->flags & I915_VMA_BIND_MASK) == 0);
+ err = i915_active_acquire(&vma->active);
+ if (err)
+ goto err_unlock;
+
+ if (!(bound & I915_VMA_BIND_MASK)) {
+ err = i915_vma_insert(vma, size, alignment, flags);
+ if (err)
+ goto err_active;
- if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
- __i915_vma_set_map_and_fenceable(vma);
+ if (i915_is_ggtt(vma->vm))
+ __i915_vma_set_map_and_fenceable(vma);
+ }
+
+ GEM_BUG_ON(!vma->pages);
+ err = i915_vma_bind(vma,
+ vma->obj ? vma->obj->cache_level : 0,
+ flags, work);
+ if (err)
+ goto err_remove;
+
+ /* There should only be at most 2 active bindings (user, global) */
+ GEM_BUG_ON(bound + I915_VMA_PAGES_ACTIVE < bound);
+ atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count);
+ list_move_tail(&vma->vm_link, &vma->vm->bound_list);
+ __i915_vma_pin(vma);
+ GEM_BUG_ON(!i915_vma_is_pinned(vma));
+ GEM_BUG_ON(!i915_vma_is_bound(vma, flags));
GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
- return 0;
err_remove:
- if ((bound & I915_VMA_BIND_MASK) == 0) {
- i915_vma_remove(vma);
- GEM_BUG_ON(vma->pages);
- GEM_BUG_ON(vma->flags & I915_VMA_BIND_MASK);
+ if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) {
+ i915_vma_detach(vma);
+ drm_mm_remove_node(&vma->node);
}
-err_unpin:
- __i915_vma_unpin(vma);
- return ret;
+err_active:
+ i915_active_release(&vma->active);
+err_unlock:
+ mutex_unlock(&vma->vm->mutex);
+err_fence:
+ if (work)
+ dma_fence_work_commit(&work->base);
+ if (wakeref)
+ intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
+err_pages:
+ vma_put_pages(vma);
+ return err;
+}
+
+static void flush_idle_contexts(struct intel_gt *gt)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, gt, id)
+ intel_engine_flush_barriers(engine);
+
+ intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
+}
+
+int i915_ggtt_pin(struct i915_vma *vma, u32 align, unsigned int flags)
+{
+ struct i915_address_space *vm = vma->vm;
+ int err;
+
+ GEM_BUG_ON(!i915_vma_is_ggtt(vma));
+
+ do {
+ err = i915_vma_pin(vma, 0, align, flags | PIN_GLOBAL);
+ if (err != -ENOSPC)
+ return err;
+
+ /* Unlike i915_vma_pin, we don't take no for an answer! */
+ flush_idle_contexts(vm->gt);
+ if (mutex_lock_interruptible(&vm->mutex) == 0) {
+ i915_gem_evict_vm(vm);
+ mutex_unlock(&vm->mutex);
+ }
+ } while (1);
}
void i915_vma_close(struct i915_vma *vma)
{
- struct drm_i915_private *i915 = vma->vm->i915;
+ struct intel_gt *gt = vma->vm->gt;
unsigned long flags;
GEM_BUG_ON(i915_vma_is_closed(vma));
@@ -787,81 +1008,91 @@ void i915_vma_close(struct i915_vma *vma)
* causing us to rebind the VMA once more. This ends up being a lot
* of wasted work for the steady state.
*/
- spin_lock_irqsave(&i915->gt.closed_lock, flags);
- list_add(&vma->closed_link, &i915->gt.closed_vma);
- spin_unlock_irqrestore(&i915->gt.closed_lock, flags);
+ spin_lock_irqsave(&gt->closed_lock, flags);
+ list_add(&vma->closed_link, &gt->closed_vma);
+ spin_unlock_irqrestore(&gt->closed_lock, flags);
}
static void __i915_vma_remove_closed(struct i915_vma *vma)
{
- struct drm_i915_private *i915 = vma->vm->i915;
+ struct intel_gt *gt = vma->vm->gt;
- if (!i915_vma_is_closed(vma))
- return;
-
- spin_lock_irq(&i915->gt.closed_lock);
+ spin_lock_irq(&gt->closed_lock);
list_del_init(&vma->closed_link);
- spin_unlock_irq(&i915->gt.closed_lock);
+ spin_unlock_irq(&gt->closed_lock);
}
void i915_vma_reopen(struct i915_vma *vma)
{
- __i915_vma_remove_closed(vma);
+ if (i915_vma_is_closed(vma))
+ __i915_vma_remove_closed(vma);
}
-static void __i915_vma_destroy(struct i915_vma *vma)
+void i915_vma_release(struct kref *ref)
{
- GEM_BUG_ON(vma->node.allocated);
- GEM_BUG_ON(vma->fence);
-
- GEM_BUG_ON(i915_active_request_isset(&vma->last_fence));
-
- mutex_lock(&vma->vm->mutex);
- list_del(&vma->vm_link);
- mutex_unlock(&vma->vm->mutex);
+ struct i915_vma *vma = container_of(ref, typeof(*vma), ref);
+
+ if (drm_mm_node_allocated(&vma->node)) {
+ mutex_lock(&vma->vm->mutex);
+ atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
+ WARN_ON(__i915_vma_unbind(vma));
+ mutex_unlock(&vma->vm->mutex);
+ GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
+ }
+ GEM_BUG_ON(i915_vma_is_active(vma));
if (vma->obj) {
struct drm_i915_gem_object *obj = vma->obj;
spin_lock(&obj->vma.lock);
list_del(&vma->obj_link);
- rb_erase(&vma->obj_node, &vma->obj->vma.tree);
+ rb_erase(&vma->obj_node, &obj->vma.tree);
spin_unlock(&obj->vma.lock);
}
- i915_active_fini(&vma->active);
+ __i915_vma_remove_closed(vma);
+ i915_vm_put(vma->vm);
+ i915_active_fini(&vma->active);
i915_vma_free(vma);
}
-void i915_vma_destroy(struct i915_vma *vma)
+void i915_vma_parked(struct intel_gt *gt)
{
- lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
+ struct i915_vma *vma, *next;
- GEM_BUG_ON(i915_vma_is_pinned(vma));
+ spin_lock_irq(&gt->closed_lock);
+ list_for_each_entry_safe(vma, next, &gt->closed_vma, closed_link) {
+ struct drm_i915_gem_object *obj = vma->obj;
+ struct i915_address_space *vm = vma->vm;
- __i915_vma_remove_closed(vma);
+ /* XXX All to avoid keeping a reference on i915_vma itself */
- WARN_ON(i915_vma_unbind(vma));
- GEM_BUG_ON(i915_vma_is_active(vma));
+ if (!kref_get_unless_zero(&obj->base.refcount))
+ continue;
- __i915_vma_destroy(vma);
-}
+ if (i915_vm_tryopen(vm)) {
+ list_del_init(&vma->closed_link);
+ } else {
+ i915_gem_object_put(obj);
+ obj = NULL;
+ }
-void i915_vma_parked(struct drm_i915_private *i915)
-{
- struct i915_vma *vma, *next;
+ spin_unlock_irq(&gt->closed_lock);
- spin_lock_irq(&i915->gt.closed_lock);
- list_for_each_entry_safe(vma, next, &i915->gt.closed_vma, closed_link) {
- list_del_init(&vma->closed_link);
- spin_unlock_irq(&i915->gt.closed_lock);
+ if (obj) {
+ __i915_vma_put(vma);
+ i915_gem_object_put(obj);
+ }
- i915_vma_destroy(vma);
+ i915_vm_close(vm);
- spin_lock_irq(&i915->gt.closed_lock);
+ /* Restart after dropping lock */
+ spin_lock_irq(&gt->closed_lock);
+ next = list_first_entry(&gt->closed_vma,
+ typeof(*next), closed_link);
}
- spin_unlock_irq(&i915->gt.closed_lock);
+ spin_unlock_irq(&gt->closed_lock);
}
static void __i915_vma_iounmap(struct i915_vma *vma)
@@ -877,17 +1108,16 @@ static void __i915_vma_iounmap(struct i915_vma *vma)
void i915_vma_revoke_mmap(struct i915_vma *vma)
{
- struct drm_vma_offset_node *node = &vma->obj->base.vma_node;
+ struct drm_vma_offset_node *node;
u64 vma_offset;
- lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
-
if (!i915_vma_has_userfault(vma))
return;
GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
GEM_BUG_ON(!vma->obj->userfault_count);
+ node = &vma->mmo->vma_node;
vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
drm_vma_node_offset_addr(node) + vma_offset,
@@ -899,21 +1129,18 @@ void i915_vma_revoke_mmap(struct i915_vma *vma)
list_del(&vma->obj->userfault_link);
}
-static void export_fence(struct i915_vma *vma,
- struct i915_request *rq,
- unsigned int flags)
+int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
{
- struct reservation_object *resv = vma->resv;
+ int err;
- /*
- * Ignore errors from failing to allocate the new fence, we can't
- * handle an error right now. Worst case should be missed
- * synchronisation leading to rendering corruption.
- */
- if (flags & EXEC_OBJECT_WRITE)
- reservation_object_add_excl_fence(resv, &rq->fence);
- else if (reservation_object_reserve_shared(resv, 1) == 0)
- reservation_object_add_shared_fence(resv, &rq->fence);
+ GEM_BUG_ON(!i915_vma_is_pinned(vma));
+
+ /* Wait for the vma to be bound before we start! */
+ err = i915_request_await_active(rq, &vma->active);
+ if (err)
+ return err;
+
+ return i915_active_add_request(&vma->active, rq);
}
int i915_vma_move_to_active(struct i915_vma *vma,
@@ -921,97 +1148,80 @@ int i915_vma_move_to_active(struct i915_vma *vma,
unsigned int flags)
{
struct drm_i915_gem_object *obj = vma->obj;
+ int err;
- assert_vma_held(vma);
assert_object_held(obj);
- GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
- /*
- * Add a reference if we're newly entering the active list.
- * The order in which we add operations to the retirement queue is
- * vital here: mark_active adds to the start of the callback list,
- * such that subsequent callbacks are called first. Therefore we
- * add the active reference first and queue for it to be dropped
- * *last*.
- */
- if (!vma->active.count && !obj->active_count++)
- i915_gem_object_get(obj); /* once more for the active ref */
-
- if (unlikely(i915_active_ref(&vma->active, rq->fence.context, rq))) {
- if (!vma->active.count && !--obj->active_count)
- i915_gem_object_put(obj);
- return -ENOMEM;
- }
-
- GEM_BUG_ON(!i915_vma_is_active(vma));
- GEM_BUG_ON(!obj->active_count);
+ err = __i915_vma_move_to_active(vma, rq);
+ if (unlikely(err))
+ return err;
- obj->write_domain = 0;
if (flags & EXEC_OBJECT_WRITE) {
- obj->write_domain = I915_GEM_DOMAIN_RENDER;
+ struct intel_frontbuffer *front;
- if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
- __i915_active_request_set(&obj->frontbuffer_write, rq);
+ front = __intel_frontbuffer_get(obj);
+ if (unlikely(front)) {
+ if (intel_frontbuffer_invalidate(front, ORIGIN_CS))
+ i915_active_add_request(&front->write, rq);
+ intel_frontbuffer_put(front);
+ }
+ dma_resv_add_excl_fence(vma->resv, &rq->fence);
+ obj->write_domain = I915_GEM_DOMAIN_RENDER;
obj->read_domains = 0;
+ } else {
+ err = dma_resv_reserve_shared(vma->resv, 1);
+ if (unlikely(err))
+ return err;
+
+ dma_resv_add_shared_fence(vma->resv, &rq->fence);
+ obj->write_domain = 0;
}
obj->read_domains |= I915_GEM_GPU_DOMAINS;
+ obj->mm.dirty = true;
- if (flags & EXEC_OBJECT_NEEDS_FENCE)
- __i915_active_request_set(&vma->last_fence, rq);
-
- export_fence(vma, rq, flags);
+ GEM_BUG_ON(!i915_vma_is_active(vma));
return 0;
}
-int i915_vma_unbind(struct i915_vma *vma)
+int __i915_vma_unbind(struct i915_vma *vma)
{
int ret;
- lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
+ lockdep_assert_held(&vma->vm->mutex);
/*
* First wait upon any activity as retiring the request may
* have side-effects such as unpinning or even unbinding this vma.
+ *
+ * XXX Actually waiting under the vm->mutex is a hinderance and
+ * should be pipelined wherever possible. In cases where that is
+ * unavoidable, we should lift the wait to before the mutex.
*/
- might_sleep();
- if (i915_vma_is_active(vma)) {
- /*
- * When a closed VMA is retired, it is unbound - eek.
- * In order to prevent it from being recursively closed,
- * take a pin on the vma so that the second unbind is
- * aborted.
- *
- * Even more scary is that the retire callback may free
- * the object (last active vma). To prevent the explosion
- * we defer the actual object free to a worker that can
- * only proceed once it acquires the struct_mutex (which
- * we currently hold, therefore it cannot free this object
- * before we are finished).
- */
- __i915_vma_pin(vma);
-
- ret = i915_active_wait(&vma->active);
- if (ret)
- goto unpin;
-
- ret = i915_active_request_retire(&vma->last_fence,
- &vma->vm->i915->drm.struct_mutex);
-unpin:
- __i915_vma_unpin(vma);
- if (ret)
- return ret;
- }
- GEM_BUG_ON(i915_vma_is_active(vma));
+ ret = i915_vma_sync(vma);
+ if (ret)
+ return ret;
if (i915_vma_is_pinned(vma)) {
vma_print_allocator(vma, "is pinned");
- return -EBUSY;
+ return -EAGAIN;
}
+ /*
+ * After confirming that no one else is pinning this vma, wait for
+ * any laggards who may have crept in during the wait (through
+ * a residual pin skipping the vm->mutex) to complete.
+ */
+ ret = i915_vma_sync(vma);
+ if (ret)
+ return ret;
+
if (!drm_mm_node_allocated(&vma->node))
return 0;
+ GEM_BUG_ON(i915_vma_is_pinned(vma));
+ GEM_BUG_ON(i915_vma_is_active(vma));
+
if (i915_vma_is_map_and_fenceable(vma)) {
/*
* Check that we have flushed all writes through the GGTT
@@ -1023,7 +1233,7 @@ unpin:
GEM_BUG_ON(i915_vma_has_ggtt_write(vma));
/* release the fence reg _after_ flushing */
- ret = i915_vma_put_fence(vma);
+ ret = i915_vma_revoke_fence(vma);
if (ret)
return ret;
@@ -1031,22 +1241,66 @@ unpin:
i915_vma_revoke_mmap(vma);
__i915_vma_iounmap(vma);
- vma->flags &= ~I915_VMA_CAN_FENCE;
+ clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
}
GEM_BUG_ON(vma->fence);
GEM_BUG_ON(i915_vma_has_userfault(vma));
- if (likely(!vma->vm->closed)) {
+ if (likely(atomic_read(&vma->vm->open))) {
trace_i915_vma_unbind(vma);
vma->ops->unbind_vma(vma);
}
- vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
+ atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR), &vma->flags);
- i915_vma_remove(vma);
+ i915_vma_detach(vma);
+ vma_unbind_pages(vma);
+ drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
return 0;
}
+int i915_vma_unbind(struct i915_vma *vma)
+{
+ struct i915_address_space *vm = vma->vm;
+ intel_wakeref_t wakeref = 0;
+ int err;
+
+ if (!drm_mm_node_allocated(&vma->node))
+ return 0;
+
+ if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
+ /* XXX not always required: nop_clear_range */
+ wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
+
+ err = mutex_lock_interruptible(&vm->mutex);
+ if (err)
+ return err;
+
+ err = __i915_vma_unbind(vma);
+ mutex_unlock(&vm->mutex);
+
+ if (wakeref)
+ intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
+
+ return err;
+}
+
+struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma)
+{
+ i915_gem_object_make_unshrinkable(vma->obj);
+ return vma;
+}
+
+void i915_vma_make_shrinkable(struct i915_vma *vma)
+{
+ i915_gem_object_make_shrinkable(vma->obj);
+}
+
+void i915_vma_make_purgeable(struct i915_vma *vma)
+{
+ i915_gem_object_make_purgeable(vma->obj);
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/i915_vma.c"
#endif
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 4b769db649bf..02b31a62951e 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -30,121 +30,14 @@
#include <drm/drm_mm.h>
+#include "gem/i915_gem_object.h"
+
#include "i915_gem_gtt.h"
#include "i915_gem_fence_reg.h"
-#include "gem/i915_gem_object.h"
#include "i915_active.h"
#include "i915_request.h"
-
-enum i915_cache_level;
-
-/**
- * DOC: Virtual Memory Address
- *
- * A VMA represents a GEM BO that is bound into an address space. Therefore, a
- * VMA's presence cannot be guaranteed before binding, or after unbinding the
- * object into/from the address space.
- *
- * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
- * will always be <= an objects lifetime. So object refcounting should cover us.
- */
-struct i915_vma {
- struct drm_mm_node node;
- struct drm_i915_gem_object *obj;
- struct i915_address_space *vm;
- const struct i915_vma_ops *ops;
- struct i915_fence_reg *fence;
- struct reservation_object *resv; /** Alias of obj->resv */
- struct sg_table *pages;
- void __iomem *iomap;
- void *private; /* owned by creator */
- u64 size;
- u64 display_alignment;
- struct i915_page_sizes page_sizes;
-
- u32 fence_size;
- u32 fence_alignment;
-
- /**
- * Count of the number of times this vma has been opened by different
- * handles (but same file) for execbuf, i.e. the number of aliases
- * that exist in the ctx->handle_vmas LUT for this vma.
- */
- atomic_t open_count;
- unsigned long flags;
- /**
- * How many users have pinned this object in GTT space.
- *
- * This is a tightly bound, fairly small number of users, so we
- * stuff inside the flags field so that we can both check for overflow
- * and detect a no-op i915_vma_pin() in a single check, while also
- * pinning the vma.
- *
- * The worst case display setup would have the same vma pinned for
- * use on each plane on each crtc, while also building the next atomic
- * state and holding a pin for the length of the cleanup queue. In the
- * future, the flip queue may be increased from 1.
- * Estimated worst case: 3 [qlen] * 4 [max crtcs] * 7 [max planes] = 84
- *
- * For GEM, the number of concurrent users for pwrite/pread is
- * unbounded. For execbuffer, it is currently one but will in future
- * be extended to allow multiple clients to pin vma concurrently.
- *
- * We also use suballocated pages, with each suballocation claiming
- * its own pin on the shared vma. At present, this is limited to
- * exclusive cachelines of a single page, so a maximum of 64 possible
- * users.
- */
-#define I915_VMA_PIN_MASK 0xff
-#define I915_VMA_PIN_OVERFLOW BIT(8)
-
- /** Flags and address space this VMA is bound to */
-#define I915_VMA_GLOBAL_BIND BIT(9)
-#define I915_VMA_LOCAL_BIND BIT(10)
-#define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND | I915_VMA_PIN_OVERFLOW)
-
-#define I915_VMA_GGTT BIT(11)
-#define I915_VMA_CAN_FENCE BIT(12)
-#define I915_VMA_USERFAULT_BIT 13
-#define I915_VMA_USERFAULT BIT(I915_VMA_USERFAULT_BIT)
-#define I915_VMA_GGTT_WRITE BIT(14)
-
- struct i915_active active;
- struct i915_active_request last_fence;
-
- /**
- * Support different GGTT views into the same object.
- * This means there can be multiple VMA mappings per object and per VM.
- * i915_ggtt_view_type is used to distinguish between those entries.
- * The default one of zero (I915_GGTT_VIEW_NORMAL) is default and also
- * assumed in GEM functions which take no ggtt view parameter.
- */
- struct i915_ggtt_view ggtt_view;
-
- /** This object's place on the active/inactive lists */
- struct list_head vm_link;
-
- struct list_head obj_link; /* Link in the object's VMA list */
- struct rb_node obj_node;
- struct hlist_node obj_hash;
-
- /** This vma's place in the execbuf reservation list */
- struct list_head exec_link;
- struct list_head reloc_link;
-
- /** This vma's place in the eviction list */
- struct list_head evict_link;
-
- struct list_head closed_link;
-
- /**
- * Used for performing relocations during execbuffer insertion.
- */
- unsigned int *exec_flags;
- struct hlist_node exec_node;
- u32 exec_handle;
-};
+#include "i915_vma_types.h"
struct i915_vma *
i915_vma_instance(struct drm_i915_gem_object *obj,
@@ -159,52 +52,57 @@ static inline bool i915_vma_is_active(const struct i915_vma *vma)
return !i915_active_is_idle(&vma->active);
}
+int __must_check __i915_vma_move_to_active(struct i915_vma *vma,
+ struct i915_request *rq);
int __must_check i915_vma_move_to_active(struct i915_vma *vma,
struct i915_request *rq,
unsigned int flags);
+#define __i915_vma_flags(v) ((unsigned long *)&(v)->flags.counter)
+
static inline bool i915_vma_is_ggtt(const struct i915_vma *vma)
{
- return vma->flags & I915_VMA_GGTT;
+ return test_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
}
static inline bool i915_vma_has_ggtt_write(const struct i915_vma *vma)
{
- return vma->flags & I915_VMA_GGTT_WRITE;
+ return test_bit(I915_VMA_GGTT_WRITE_BIT, __i915_vma_flags(vma));
}
static inline void i915_vma_set_ggtt_write(struct i915_vma *vma)
{
GEM_BUG_ON(!i915_vma_is_ggtt(vma));
- vma->flags |= I915_VMA_GGTT_WRITE;
+ set_bit(I915_VMA_GGTT_WRITE_BIT, __i915_vma_flags(vma));
}
-static inline void i915_vma_unset_ggtt_write(struct i915_vma *vma)
+static inline bool i915_vma_unset_ggtt_write(struct i915_vma *vma)
{
- vma->flags &= ~I915_VMA_GGTT_WRITE;
+ return test_and_clear_bit(I915_VMA_GGTT_WRITE_BIT,
+ __i915_vma_flags(vma));
}
void i915_vma_flush_writes(struct i915_vma *vma);
static inline bool i915_vma_is_map_and_fenceable(const struct i915_vma *vma)
{
- return vma->flags & I915_VMA_CAN_FENCE;
+ return test_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
}
static inline bool i915_vma_set_userfault(struct i915_vma *vma)
{
GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
- return __test_and_set_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
+ return test_and_set_bit(I915_VMA_USERFAULT_BIT, __i915_vma_flags(vma));
}
static inline void i915_vma_unset_userfault(struct i915_vma *vma)
{
- return __clear_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
+ return clear_bit(I915_VMA_USERFAULT_BIT, __i915_vma_flags(vma));
}
static inline bool i915_vma_has_userfault(const struct i915_vma *vma)
{
- return test_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
+ return test_bit(I915_VMA_USERFAULT_BIT, __i915_vma_flags(vma));
}
static inline bool i915_vma_is_closed(const struct i915_vma *vma)
@@ -215,7 +113,7 @@ static inline bool i915_vma_is_closed(const struct i915_vma *vma)
static inline u32 i915_ggtt_offset(const struct i915_vma *vma)
{
GEM_BUG_ON(!i915_vma_is_ggtt(vma));
- GEM_BUG_ON(!vma->node.allocated);
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(upper_32_bits(vma->node.start));
GEM_BUG_ON(upper_32_bits(vma->node.start + vma->node.size - 1));
return lower_32_bits(vma->node.start);
@@ -232,6 +130,14 @@ static inline struct i915_vma *i915_vma_get(struct i915_vma *vma)
return vma;
}
+static inline struct i915_vma *i915_vma_tryget(struct i915_vma *vma)
+{
+ if (likely(kref_get_unless_zero(&vma->obj->base.refcount)))
+ return vma;
+
+ return NULL;
+}
+
static inline void i915_vma_put(struct i915_vma *vma)
{
i915_gem_object_put(vma->obj);
@@ -286,55 +192,56 @@ i915_vma_compare(struct i915_vma *vma,
return memcmp(&vma->ggtt_view.partial, &view->partial, view->type);
}
-int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
- u32 flags);
-bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level);
+struct i915_vma_work *i915_vma_work(void);
+int i915_vma_bind(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags,
+ struct i915_vma_work *work);
+
+bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color);
bool i915_vma_misplaced(const struct i915_vma *vma,
u64 size, u64 alignment, u64 flags);
void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
void i915_vma_revoke_mmap(struct i915_vma *vma);
+int __i915_vma_unbind(struct i915_vma *vma);
int __must_check i915_vma_unbind(struct i915_vma *vma);
void i915_vma_unlink_ctx(struct i915_vma *vma);
void i915_vma_close(struct i915_vma *vma);
void i915_vma_reopen(struct i915_vma *vma);
-void i915_vma_destroy(struct i915_vma *vma);
-
-#define assert_vma_held(vma) reservation_object_assert_held((vma)->resv)
-static inline void i915_vma_lock(struct i915_vma *vma)
+static inline struct i915_vma *__i915_vma_get(struct i915_vma *vma)
{
- reservation_object_lock(vma->resv, NULL);
+ if (kref_get_unless_zero(&vma->ref))
+ return vma;
+
+ return NULL;
}
-static inline void i915_vma_unlock(struct i915_vma *vma)
+void i915_vma_release(struct kref *ref);
+static inline void __i915_vma_put(struct i915_vma *vma)
{
- reservation_object_unlock(vma->resv);
+ kref_put(&vma->ref, i915_vma_release);
}
-int __i915_vma_do_pin(struct i915_vma *vma,
- u64 size, u64 alignment, u64 flags);
-static inline int __must_check
-i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
-{
- BUILD_BUG_ON(PIN_MBZ != I915_VMA_PIN_OVERFLOW);
- BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
- BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
+#define assert_vma_held(vma) dma_resv_assert_held((vma)->resv)
- /* Pin early to prevent the shrinker/eviction logic from destroying
- * our vma as we insert and bind.
- */
- if (likely(((++vma->flags ^ flags) & I915_VMA_BIND_MASK) == 0)) {
- GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
- GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
- return 0;
- }
+static inline void i915_vma_lock(struct i915_vma *vma)
+{
+ dma_resv_lock(vma->resv, NULL);
+}
- return __i915_vma_do_pin(vma, size, alignment, flags);
+static inline void i915_vma_unlock(struct i915_vma *vma)
+{
+ dma_resv_unlock(vma->resv);
}
+int __must_check
+i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags);
+int i915_ggtt_pin(struct i915_vma *vma, u32 align, unsigned int flags);
+
static inline int i915_vma_pin_count(const struct i915_vma *vma)
{
- return vma->flags & I915_VMA_PIN_MASK;
+ return atomic_read(&vma->flags) & I915_VMA_PIN_MASK;
}
static inline bool i915_vma_is_pinned(const struct i915_vma *vma)
@@ -344,18 +251,18 @@ static inline bool i915_vma_is_pinned(const struct i915_vma *vma)
static inline void __i915_vma_pin(struct i915_vma *vma)
{
- vma->flags++;
- GEM_BUG_ON(vma->flags & I915_VMA_PIN_OVERFLOW);
+ atomic_inc(&vma->flags);
+ GEM_BUG_ON(!i915_vma_is_pinned(vma));
}
static inline void __i915_vma_unpin(struct i915_vma *vma)
{
- vma->flags--;
+ GEM_BUG_ON(!i915_vma_is_pinned(vma));
+ atomic_dec(&vma->flags);
}
static inline void i915_vma_unpin(struct i915_vma *vma)
{
- GEM_BUG_ON(!i915_vma_is_pinned(vma));
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
__i915_vma_unpin(vma);
}
@@ -363,7 +270,13 @@ static inline void i915_vma_unpin(struct i915_vma *vma)
static inline bool i915_vma_is_bound(const struct i915_vma *vma,
unsigned int where)
{
- return vma->flags & where;
+ return atomic_read(&vma->flags) & where;
+}
+
+static inline bool i915_node_color_differs(const struct drm_mm_node *node,
+ unsigned long color)
+{
+ return drm_mm_node_allocated(node) && node->color != color;
}
/**
@@ -375,8 +288,6 @@ static inline bool i915_vma_is_bound(const struct i915_vma *vma,
* the caller must call i915_vma_unpin_iomap to relinquish the pinning
* after the iomapping is no longer required.
*
- * Callers must hold the struct_mutex.
- *
* Returns a valid iomapped pointer or ERR_PTR.
*/
void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
@@ -388,8 +299,8 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
*
* Unpins the previously iomapped VMA from i915_vma_pin_iomap().
*
- * Callers must hold the struct_mutex. This function is only valid to be
- * called on a VMA previously iomapped by the caller with i915_vma_pin_iomap().
+ * This function is only valid to be called on a VMA previously
+ * iomapped by the caller with i915_vma_pin_iomap().
*/
void i915_vma_unpin_iomap(struct i915_vma *vma);
@@ -414,13 +325,15 @@ static inline struct page *i915_vma_first_page(struct i915_vma *vma)
*
* True if the vma has a fence, false otherwise.
*/
-int i915_vma_pin_fence(struct i915_vma *vma);
-int __must_check i915_vma_put_fence(struct i915_vma *vma);
+int __must_check i915_vma_pin_fence(struct i915_vma *vma);
+int __must_check i915_vma_revoke_fence(struct i915_vma *vma);
+
+int __i915_vma_pin_fence(struct i915_vma *vma);
static inline void __i915_vma_unpin_fence(struct i915_vma *vma)
{
- GEM_BUG_ON(vma->fence->pin_count <= 0);
- vma->fence->pin_count--;
+ GEM_BUG_ON(atomic_read(&vma->fence->pin_count) <= 0);
+ atomic_dec(&vma->fence->pin_count);
}
/**
@@ -434,12 +347,11 @@ static inline void __i915_vma_unpin_fence(struct i915_vma *vma)
static inline void
i915_vma_unpin_fence(struct i915_vma *vma)
{
- /* lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); */
if (vma->fence)
__i915_vma_unpin_fence(vma);
}
-void i915_vma_parked(struct drm_i915_private *i915);
+void i915_vma_parked(struct intel_gt *gt);
#define for_each_until(cond) if (cond) break; else
@@ -459,4 +371,14 @@ void i915_vma_parked(struct drm_i915_private *i915);
struct i915_vma *i915_vma_alloc(void);
void i915_vma_free(struct i915_vma *vma);
+struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma);
+void i915_vma_make_shrinkable(struct i915_vma *vma);
+void i915_vma_make_purgeable(struct i915_vma *vma);
+
+static inline int i915_vma_sync(struct i915_vma *vma)
+{
+ /* Wait for the asynchronous bindings and pending GPU reads */
+ return i915_active_wait(&vma->active);
+}
+
#endif
diff --git a/drivers/gpu/drm/i915/i915_vma_types.h b/drivers/gpu/drm/i915/i915_vma_types.h
new file mode 100644
index 000000000000..e0942efd5236
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_vma_types.h
@@ -0,0 +1,294 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __I915_VMA_TYPES_H__
+#define __I915_VMA_TYPES_H__
+
+#include <linux/rbtree.h>
+
+#include <drm/drm_mm.h>
+
+#include "gem/i915_gem_object_types.h"
+
+enum i915_cache_level;
+
+/**
+ * DOC: Global GTT views
+ *
+ * Background and previous state
+ *
+ * Historically objects could exists (be bound) in global GTT space only as
+ * singular instances with a view representing all of the object's backing pages
+ * in a linear fashion. This view will be called a normal view.
+ *
+ * To support multiple views of the same object, where the number of mapped
+ * pages is not equal to the backing store, or where the layout of the pages
+ * is not linear, concept of a GGTT view was added.
+ *
+ * One example of an alternative view is a stereo display driven by a single
+ * image. In this case we would have a framebuffer looking like this
+ * (2x2 pages):
+ *
+ * 12
+ * 34
+ *
+ * Above would represent a normal GGTT view as normally mapped for GPU or CPU
+ * rendering. In contrast, fed to the display engine would be an alternative
+ * view which could look something like this:
+ *
+ * 1212
+ * 3434
+ *
+ * In this example both the size and layout of pages in the alternative view is
+ * different from the normal view.
+ *
+ * Implementation and usage
+ *
+ * GGTT views are implemented using VMAs and are distinguished via enum
+ * i915_ggtt_view_type and struct i915_ggtt_view.
+ *
+ * A new flavour of core GEM functions which work with GGTT bound objects were
+ * added with the _ggtt_ infix, and sometimes with _view postfix to avoid
+ * renaming in large amounts of code. They take the struct i915_ggtt_view
+ * parameter encapsulating all metadata required to implement a view.
+ *
+ * As a helper for callers which are only interested in the normal view,
+ * globally const i915_ggtt_view_normal singleton instance exists. All old core
+ * GEM API functions, the ones not taking the view parameter, are operating on,
+ * or with the normal GGTT view.
+ *
+ * Code wanting to add or use a new GGTT view needs to:
+ *
+ * 1. Add a new enum with a suitable name.
+ * 2. Extend the metadata in the i915_ggtt_view structure if required.
+ * 3. Add support to i915_get_vma_pages().
+ *
+ * New views are required to build a scatter-gather table from within the
+ * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and
+ * exists for the lifetime of an VMA.
+ *
+ * Core API is designed to have copy semantics which means that passed in
+ * struct i915_ggtt_view does not need to be persistent (left around after
+ * calling the core API functions).
+ *
+ */
+
+struct intel_remapped_plane_info {
+ /* in gtt pages */
+ unsigned int width, height, stride, offset;
+} __packed;
+
+struct intel_remapped_info {
+ struct intel_remapped_plane_info plane[2];
+ unsigned int unused_mbz;
+} __packed;
+
+struct intel_rotation_info {
+ struct intel_remapped_plane_info plane[2];
+} __packed;
+
+struct intel_partial_info {
+ u64 offset;
+ unsigned int size;
+} __packed;
+
+enum i915_ggtt_view_type {
+ I915_GGTT_VIEW_NORMAL = 0,
+ I915_GGTT_VIEW_ROTATED = sizeof(struct intel_rotation_info),
+ I915_GGTT_VIEW_PARTIAL = sizeof(struct intel_partial_info),
+ I915_GGTT_VIEW_REMAPPED = sizeof(struct intel_remapped_info),
+};
+
+static inline void assert_i915_gem_gtt_types(void)
+{
+ BUILD_BUG_ON(sizeof(struct intel_rotation_info) != 8*sizeof(unsigned int));
+ BUILD_BUG_ON(sizeof(struct intel_partial_info) != sizeof(u64) + sizeof(unsigned int));
+ BUILD_BUG_ON(sizeof(struct intel_remapped_info) != 9*sizeof(unsigned int));
+
+ /* Check that rotation/remapped shares offsets for simplicity */
+ BUILD_BUG_ON(offsetof(struct intel_remapped_info, plane[0]) !=
+ offsetof(struct intel_rotation_info, plane[0]));
+ BUILD_BUG_ON(offsetofend(struct intel_remapped_info, plane[1]) !=
+ offsetofend(struct intel_rotation_info, plane[1]));
+
+ /* As we encode the size of each branch inside the union into its type,
+ * we have to be careful that each branch has a unique size.
+ */
+ switch ((enum i915_ggtt_view_type)0) {
+ case I915_GGTT_VIEW_NORMAL:
+ case I915_GGTT_VIEW_PARTIAL:
+ case I915_GGTT_VIEW_ROTATED:
+ case I915_GGTT_VIEW_REMAPPED:
+ /* gcc complains if these are identical cases */
+ break;
+ }
+}
+
+struct i915_ggtt_view {
+ enum i915_ggtt_view_type type;
+ union {
+ /* Members need to contain no holes/padding */
+ struct intel_partial_info partial;
+ struct intel_rotation_info rotated;
+ struct intel_remapped_info remapped;
+ };
+};
+
+/**
+ * DOC: Virtual Memory Address
+ *
+ * A VMA represents a GEM BO that is bound into an address space. Therefore, a
+ * VMA's presence cannot be guaranteed before binding, or after unbinding the
+ * object into/from the address space.
+ *
+ * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
+ * will always be <= an objects lifetime. So object refcounting should cover us.
+ */
+struct i915_vma {
+ struct drm_mm_node node;
+
+ struct i915_address_space *vm;
+ const struct i915_vma_ops *ops;
+
+ struct drm_i915_gem_object *obj;
+ struct dma_resv *resv; /** Alias of obj->resv */
+
+ struct sg_table *pages;
+ void __iomem *iomap;
+ void *private; /* owned by creator */
+
+ struct i915_fence_reg *fence;
+
+ u64 size;
+ u64 display_alignment;
+ struct i915_page_sizes page_sizes;
+
+ /* mmap-offset associated with fencing for this vma */
+ struct i915_mmap_offset *mmo;
+
+ u32 fence_size;
+ u32 fence_alignment;
+
+ /**
+ * Count of the number of times this vma has been opened by different
+ * handles (but same file) for execbuf, i.e. the number of aliases
+ * that exist in the ctx->handle_vmas LUT for this vma.
+ */
+ struct kref ref;
+ atomic_t open_count;
+ atomic_t flags;
+ /**
+ * How many users have pinned this object in GTT space.
+ *
+ * This is a tightly bound, fairly small number of users, so we
+ * stuff inside the flags field so that we can both check for overflow
+ * and detect a no-op i915_vma_pin() in a single check, while also
+ * pinning the vma.
+ *
+ * The worst case display setup would have the same vma pinned for
+ * use on each plane on each crtc, while also building the next atomic
+ * state and holding a pin for the length of the cleanup queue. In the
+ * future, the flip queue may be increased from 1.
+ * Estimated worst case: 3 [qlen] * 4 [max crtcs] * 7 [max planes] = 84
+ *
+ * For GEM, the number of concurrent users for pwrite/pread is
+ * unbounded. For execbuffer, it is currently one but will in future
+ * be extended to allow multiple clients to pin vma concurrently.
+ *
+ * We also use suballocated pages, with each suballocation claiming
+ * its own pin on the shared vma. At present, this is limited to
+ * exclusive cachelines of a single page, so a maximum of 64 possible
+ * users.
+ */
+#define I915_VMA_PIN_MASK 0x3ff
+#define I915_VMA_OVERFLOW 0x200
+
+ /** Flags and address space this VMA is bound to */
+#define I915_VMA_GLOBAL_BIND_BIT 10
+#define I915_VMA_LOCAL_BIND_BIT 11
+
+#define I915_VMA_GLOBAL_BIND ((int)BIT(I915_VMA_GLOBAL_BIND_BIT))
+#define I915_VMA_LOCAL_BIND ((int)BIT(I915_VMA_LOCAL_BIND_BIT))
+
+#define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)
+
+#define I915_VMA_ALLOC_BIT 12
+#define I915_VMA_ALLOC ((int)BIT(I915_VMA_ALLOC_BIT))
+
+#define I915_VMA_ERROR_BIT 13
+#define I915_VMA_ERROR ((int)BIT(I915_VMA_ERROR_BIT))
+
+#define I915_VMA_GGTT_BIT 14
+#define I915_VMA_CAN_FENCE_BIT 15
+#define I915_VMA_USERFAULT_BIT 16
+#define I915_VMA_GGTT_WRITE_BIT 17
+
+#define I915_VMA_GGTT ((int)BIT(I915_VMA_GGTT_BIT))
+#define I915_VMA_CAN_FENCE ((int)BIT(I915_VMA_CAN_FENCE_BIT))
+#define I915_VMA_USERFAULT ((int)BIT(I915_VMA_USERFAULT_BIT))
+#define I915_VMA_GGTT_WRITE ((int)BIT(I915_VMA_GGTT_WRITE_BIT))
+
+ struct i915_active active;
+
+#define I915_VMA_PAGES_BIAS 24
+#define I915_VMA_PAGES_ACTIVE (BIT(24) | 1)
+ atomic_t pages_count; /* number of active binds to the pages */
+ struct mutex pages_mutex; /* protect acquire/release of backing pages */
+
+ /**
+ * Support different GGTT views into the same object.
+ * This means there can be multiple VMA mappings per object and per VM.
+ * i915_ggtt_view_type is used to distinguish between those entries.
+ * The default one of zero (I915_GGTT_VIEW_NORMAL) is default and also
+ * assumed in GEM functions which take no ggtt view parameter.
+ */
+ struct i915_ggtt_view ggtt_view;
+
+ /** This object's place on the active/inactive lists */
+ struct list_head vm_link;
+
+ struct list_head obj_link; /* Link in the object's VMA list */
+ struct rb_node obj_node;
+ struct hlist_node obj_hash;
+
+ /** This vma's place in the execbuf reservation list */
+ struct list_head exec_link;
+ struct list_head reloc_link;
+
+ /** This vma's place in the eviction list */
+ struct list_head evict_link;
+
+ struct list_head closed_link;
+
+ /**
+ * Used for performing relocations during execbuffer insertion.
+ */
+ unsigned int *exec_flags;
+ struct hlist_node exec_node;
+ u32 exec_handle;
+};
+
+#endif
+
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index 6ef74531588a..09870a31b4f0 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -39,8 +39,13 @@
#define GEN12_CSR_MAX_FW_SIZE ICL_CSR_MAX_FW_SIZE
-#define ICL_CSR_PATH "i915/icl_dmc_ver1_07.bin"
-#define ICL_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
+#define TGL_CSR_PATH "i915/tgl_dmc_ver2_04.bin"
+#define TGL_CSR_VERSION_REQUIRED CSR_VERSION(2, 4)
+#define TGL_CSR_MAX_FW_SIZE 0x6000
+MODULE_FIRMWARE(TGL_CSR_PATH);
+
+#define ICL_CSR_PATH "i915/icl_dmc_ver1_09.bin"
+#define ICL_CSR_VERSION_REQUIRED CSR_VERSION(1, 9)
#define ICL_CSR_MAX_FW_SIZE 0x6000
MODULE_FIRMWARE(ICL_CSR_PATH);
@@ -674,6 +679,8 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
intel_csr_runtime_pm_get(dev_priv);
if (INTEL_GEN(dev_priv) >= 12) {
+ csr->fw_path = TGL_CSR_PATH;
+ csr->required_version = TGL_CSR_VERSION_REQUIRED;
/* Allow to load fw via parameter using the last known size */
csr->max_fw_size = GEN12_CSR_MAX_FW_SIZE;
} else if (IS_GEN(dev_priv, 11)) {
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 7135d8dc32a7..6670a0763be2 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -58,6 +58,7 @@ static const char * const platform_names[] = {
PLATFORM_NAME(CANNONLAKE),
PLATFORM_NAME(ICELAKE),
PLATFORM_NAME(ELKHARTLAKE),
+ PLATFORM_NAME(TIGERLAKE),
};
#undef PLATFORM_NAME
@@ -72,9 +73,30 @@ const char *intel_platform_name(enum intel_platform platform)
return platform_names[platform];
}
-void intel_device_info_dump_flags(const struct intel_device_info *info,
- struct drm_printer *p)
+static const char *iommu_name(void)
{
+ const char *msg = "n/a";
+
+#ifdef CONFIG_INTEL_IOMMU
+ msg = enableddisabled(intel_iommu_gfx_mapped);
+#endif
+
+ return msg;
+}
+
+void intel_device_info_print_static(const struct intel_device_info *info,
+ struct drm_printer *p)
+{
+ drm_printf(p, "engines: %x\n", info->engine_mask);
+ drm_printf(p, "gen: %d\n", info->gen);
+ drm_printf(p, "gt: %d\n", info->gt);
+ drm_printf(p, "iommu: %s\n", iommu_name());
+ drm_printf(p, "memory-regions: %x\n", info->memory_regions);
+ drm_printf(p, "page-sizes: %x\n", info->page_sizes);
+ drm_printf(p, "platform: %s\n", intel_platform_name(info->platform));
+ drm_printf(p, "ppgtt-size: %d\n", info->ppgtt_size);
+ drm_printf(p, "ppgtt-type: %d\n", info->ppgtt_type);
+
#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->name));
DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
#undef PRINT_FLAG
@@ -92,9 +114,9 @@ static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p)
hweight8(sseu->slice_mask), sseu->slice_mask);
drm_printf(p, "subslice total: %u\n", intel_sseu_subslice_total(sseu));
for (s = 0; s < sseu->max_slices; s++) {
- drm_printf(p, "slice%d: %u subslices, mask=%04x\n",
+ drm_printf(p, "slice%d: %u subslices, mask=%08x\n",
s, intel_sseu_subslices_per_slice(sseu, s),
- sseu->subslice_mask[s]);
+ intel_sseu_get_subslices(sseu, s));
}
drm_printf(p, "EU total: %u\n", sseu->eu_total);
drm_printf(p, "EU per subslice: %u\n", sseu->eu_per_subslice);
@@ -105,8 +127,8 @@ static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p)
drm_printf(p, "has EU power gating: %s\n", yesno(sseu->has_eu_pg));
}
-void intel_device_info_dump_runtime(const struct intel_runtime_info *info,
- struct drm_printer *p)
+void intel_device_info_print_runtime(const struct intel_runtime_info *info,
+ struct drm_printer *p)
{
sseu_dump(&info->sseu, p);
@@ -117,10 +139,9 @@ void intel_device_info_dump_runtime(const struct intel_runtime_info *info,
static int sseu_eu_idx(const struct sseu_dev_info *sseu, int slice,
int subslice)
{
- int subslice_stride = GEN_SSEU_STRIDE(sseu->max_eus_per_subslice);
- int slice_stride = sseu->max_subslices * subslice_stride;
+ int slice_stride = sseu->max_subslices * sseu->eu_stride;
- return slice * slice_stride + subslice * subslice_stride;
+ return slice * slice_stride + subslice * sseu->eu_stride;
}
static u16 sseu_get_eus(const struct sseu_dev_info *sseu, int slice,
@@ -129,7 +150,7 @@ static u16 sseu_get_eus(const struct sseu_dev_info *sseu, int slice,
int i, offset = sseu_eu_idx(sseu, slice, subslice);
u16 eu_mask = 0;
- for (i = 0; i < GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); i++) {
+ for (i = 0; i < sseu->eu_stride; i++) {
eu_mask |= ((u16)sseu->eu_mask[offset + i]) <<
(i * BITS_PER_BYTE);
}
@@ -142,14 +163,14 @@ static void sseu_set_eus(struct sseu_dev_info *sseu, int slice, int subslice,
{
int i, offset = sseu_eu_idx(sseu, slice, subslice);
- for (i = 0; i < GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); i++) {
+ for (i = 0; i < sseu->eu_stride; i++) {
sseu->eu_mask[offset + i] =
(eu_mask >> (BITS_PER_BYTE * i)) & 0xff;
}
}
-void intel_device_info_dump_topology(const struct sseu_dev_info *sseu,
- struct drm_printer *p)
+void intel_device_info_print_topology(const struct sseu_dev_info *sseu,
+ struct drm_printer *p)
{
int s, ss;
@@ -159,9 +180,9 @@ void intel_device_info_dump_topology(const struct sseu_dev_info *sseu,
}
for (s = 0; s < sseu->max_slices; s++) {
- drm_printf(p, "slice%d: %u subslice(s) (0x%hhx):\n",
+ drm_printf(p, "slice%d: %u subslice(s) (0x%08x):\n",
s, intel_sseu_subslices_per_slice(sseu, s),
- sseu->subslice_mask[s]);
+ intel_sseu_get_subslices(sseu, s));
for (ss = 0; ss < sseu->max_subslices; ss++) {
u16 enabled_eus = sseu_get_eus(sseu, s, ss);
@@ -182,44 +203,80 @@ static u16 compute_eu_total(const struct sseu_dev_info *sseu)
return total;
}
+static void gen11_compute_sseu_info(struct sseu_dev_info *sseu,
+ u8 s_en, u32 ss_en, u16 eu_en)
+{
+ int s, ss;
+
+ /* ss_en represents entire subslice mask across all slices */
+ GEM_BUG_ON(sseu->max_slices * sseu->max_subslices >
+ sizeof(ss_en) * BITS_PER_BYTE);
+
+ for (s = 0; s < sseu->max_slices; s++) {
+ if ((s_en & BIT(s)) == 0)
+ continue;
+
+ sseu->slice_mask |= BIT(s);
+
+ intel_sseu_set_subslices(sseu, s, ss_en);
+
+ for (ss = 0; ss < sseu->max_subslices; ss++)
+ if (intel_sseu_has_subslice(sseu, s, ss))
+ sseu_set_eus(sseu, s, ss, eu_en);
+ }
+ sseu->eu_per_subslice = hweight16(eu_en);
+ sseu->eu_total = compute_eu_total(sseu);
+}
+
+static void gen12_sseu_info_init(struct drm_i915_private *dev_priv)
+{
+ struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
+ u8 s_en;
+ u32 dss_en;
+ u16 eu_en = 0;
+ u8 eu_en_fuse;
+ int eu;
+
+ /*
+ * Gen12 has Dual-Subslices, which behave similarly to 2 gen11 SS.
+ * Instead of splitting these, provide userspace with an array
+ * of DSS to more closely represent the hardware resource.
+ */
+ intel_sseu_set_info(sseu, 1, 6, 16);
+
+ s_en = I915_READ(GEN11_GT_SLICE_ENABLE) & GEN11_GT_S_ENA_MASK;
+
+ dss_en = I915_READ(GEN12_GT_DSS_ENABLE);
+
+ /* one bit per pair of EUs */
+ eu_en_fuse = ~(I915_READ(GEN11_EU_DISABLE) & GEN11_EU_DIS_MASK);
+ for (eu = 0; eu < sseu->max_eus_per_subslice / 2; eu++)
+ if (eu_en_fuse & BIT(eu))
+ eu_en |= BIT(eu * 2) | BIT(eu * 2 + 1);
+
+ gen11_compute_sseu_info(sseu, s_en, dss_en, eu_en);
+
+ /* TGL only supports slice-level power gating */
+ sseu->has_slice_pg = 1;
+}
+
static void gen11_sseu_info_init(struct drm_i915_private *dev_priv)
{
struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
u8 s_en;
- u32 ss_en, ss_en_mask;
+ u32 ss_en;
u8 eu_en;
- int s;
- if (IS_ELKHARTLAKE(dev_priv)) {
- sseu->max_slices = 1;
- sseu->max_subslices = 4;
- sseu->max_eus_per_subslice = 8;
- } else {
- sseu->max_slices = 1;
- sseu->max_subslices = 8;
- sseu->max_eus_per_subslice = 8;
- }
+ if (IS_ELKHARTLAKE(dev_priv))
+ intel_sseu_set_info(sseu, 1, 4, 8);
+ else
+ intel_sseu_set_info(sseu, 1, 8, 8);
s_en = I915_READ(GEN11_GT_SLICE_ENABLE) & GEN11_GT_S_ENA_MASK;
ss_en = ~I915_READ(GEN11_GT_SUBSLICE_DISABLE);
- ss_en_mask = BIT(sseu->max_subslices) - 1;
eu_en = ~(I915_READ(GEN11_EU_DISABLE) & GEN11_EU_DIS_MASK);
- for (s = 0; s < sseu->max_slices; s++) {
- if (s_en & BIT(s)) {
- int ss_idx = sseu->max_subslices * s;
- int ss;
-
- sseu->slice_mask |= BIT(s);
- sseu->subslice_mask[s] = (ss_en >> ss_idx) & ss_en_mask;
- for (ss = 0; ss < sseu->max_subslices; ss++) {
- if (sseu->subslice_mask[s] & BIT(ss))
- sseu_set_eus(sseu, s, ss, eu_en);
- }
- }
- }
- sseu->eu_per_subslice = hweight8(eu_en);
- sseu->eu_total = compute_eu_total(sseu);
+ gen11_compute_sseu_info(sseu, s_en, ss_en, eu_en);
/* ICL has no power gating restrictions. */
sseu->has_slice_pg = 1;
@@ -235,23 +292,10 @@ static void gen10_sseu_info_init(struct drm_i915_private *dev_priv)
const int eu_mask = 0xff;
u32 subslice_mask, eu_en;
+ intel_sseu_set_info(sseu, 6, 4, 8);
+
sseu->slice_mask = (fuse2 & GEN10_F2_S_ENA_MASK) >>
GEN10_F2_S_ENA_SHIFT;
- sseu->max_slices = 6;
- sseu->max_subslices = 4;
- sseu->max_eus_per_subslice = 8;
-
- subslice_mask = (1 << 4) - 1;
- subslice_mask &= ~((fuse2 & GEN10_F2_SS_DIS_MASK) >>
- GEN10_F2_SS_DIS_SHIFT);
-
- /*
- * Slice0 can have up to 3 subslices, but there are only 2 in
- * slice1/2.
- */
- sseu->subslice_mask[0] = subslice_mask;
- for (s = 1; s < sseu->max_slices; s++)
- sseu->subslice_mask[s] = subslice_mask & 0x3;
/* Slice0 */
eu_en = ~I915_READ(GEN8_EU_DISABLE0);
@@ -276,14 +320,25 @@ static void gen10_sseu_info_init(struct drm_i915_private *dev_priv)
eu_en = ~I915_READ(GEN10_EU_DISABLE3);
sseu_set_eus(sseu, 5, 1, eu_en & eu_mask);
- /* Do a second pass where we mark the subslices disabled if all their
- * eus are off.
- */
+ subslice_mask = (1 << 4) - 1;
+ subslice_mask &= ~((fuse2 & GEN10_F2_SS_DIS_MASK) >>
+ GEN10_F2_SS_DIS_SHIFT);
+
for (s = 0; s < sseu->max_slices; s++) {
+ u32 subslice_mask_with_eus = subslice_mask;
+
for (ss = 0; ss < sseu->max_subslices; ss++) {
if (sseu_get_eus(sseu, s, ss) == 0)
- sseu->subslice_mask[s] &= ~BIT(ss);
+ subslice_mask_with_eus &= ~BIT(ss);
}
+
+ /*
+ * Slice0 can have up to 3 subslices, but there are only 2 in
+ * slice1/2.
+ */
+ intel_sseu_set_subslices(sseu, s, s == 0 ?
+ subslice_mask_with_eus :
+ subslice_mask_with_eus & 0x3);
}
sseu->eu_total = compute_eu_total(sseu);
@@ -309,13 +364,12 @@ static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
{
struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
u32 fuse;
+ u8 subslice_mask = 0;
fuse = I915_READ(CHV_FUSE_GT);
sseu->slice_mask = BIT(0);
- sseu->max_slices = 1;
- sseu->max_subslices = 2;
- sseu->max_eus_per_subslice = 8;
+ intel_sseu_set_info(sseu, 1, 2, 8);
if (!(fuse & CHV_FGT_DISABLE_SS0)) {
u8 disabled_mask =
@@ -324,7 +378,7 @@ static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
(((fuse & CHV_FGT_EU_DIS_SS0_R1_MASK) >>
CHV_FGT_EU_DIS_SS0_R1_SHIFT) << 4);
- sseu->subslice_mask[0] |= BIT(0);
+ subslice_mask |= BIT(0);
sseu_set_eus(sseu, 0, 0, ~disabled_mask);
}
@@ -335,10 +389,12 @@ static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
(((fuse & CHV_FGT_EU_DIS_SS1_R1_MASK) >>
CHV_FGT_EU_DIS_SS1_R1_SHIFT) << 4);
- sseu->subslice_mask[0] |= BIT(1);
+ subslice_mask |= BIT(1);
sseu_set_eus(sseu, 0, 1, ~disabled_mask);
}
+ intel_sseu_set_subslices(sseu, 0, subslice_mask);
+
sseu->eu_total = compute_eu_total(sseu);
/*
@@ -371,9 +427,8 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
/* BXT has a single slice and at most 3 subslices. */
- sseu->max_slices = IS_GEN9_LP(dev_priv) ? 1 : 3;
- sseu->max_subslices = IS_GEN9_LP(dev_priv) ? 3 : 4;
- sseu->max_eus_per_subslice = 8;
+ intel_sseu_set_info(sseu, IS_GEN9_LP(dev_priv) ? 1 : 3,
+ IS_GEN9_LP(dev_priv) ? 3 : 4, 8);
/*
* The subslice disable field is global, i.e. it applies
@@ -392,14 +447,14 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
/* skip disabled slice */
continue;
- sseu->subslice_mask[s] = subslice_mask;
+ intel_sseu_set_subslices(sseu, s, subslice_mask);
eu_disable = I915_READ(GEN9_EU_DISABLE(s));
for (ss = 0; ss < sseu->max_subslices; ss++) {
int eu_per_ss;
u8 eu_disabled_mask;
- if (!(sseu->subslice_mask[s] & BIT(ss)))
+ if (!intel_sseu_has_subslice(sseu, s, ss))
/* skip disabled subslice */
continue;
@@ -464,7 +519,7 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
}
}
-static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
+static void bdw_sseu_info_init(struct drm_i915_private *dev_priv)
{
struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
int s, ss;
@@ -472,9 +527,7 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
fuse2 = I915_READ(GEN8_FUSE2);
sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
- sseu->max_slices = 3;
- sseu->max_subslices = 3;
- sseu->max_eus_per_subslice = 8;
+ intel_sseu_set_info(sseu, 3, 3, 8);
/*
* The subslice disable field is global, i.e. it applies
@@ -501,13 +554,13 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
/* skip disabled slice */
continue;
- sseu->subslice_mask[s] = subslice_mask;
+ intel_sseu_set_subslices(sseu, s, subslice_mask);
for (ss = 0; ss < sseu->max_subslices; ss++) {
u8 eu_disabled_mask;
u32 n_disabled;
- if (!(sseu->subslice_mask[s] & BIT(ss)))
+ if (!intel_sseu_has_subslice(sseu, s, ss))
/* skip disabled subslice */
continue;
@@ -547,10 +600,11 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
sseu->has_eu_pg = 0;
}
-static void haswell_sseu_info_init(struct drm_i915_private *dev_priv)
+static void hsw_sseu_info_init(struct drm_i915_private *dev_priv)
{
struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
u32 fuse1;
+ u8 subslice_mask = 0;
int s, ss;
/*
@@ -563,22 +617,18 @@ static void haswell_sseu_info_init(struct drm_i915_private *dev_priv)
/* fall through */
case 1:
sseu->slice_mask = BIT(0);
- sseu->subslice_mask[0] = BIT(0);
+ subslice_mask = BIT(0);
break;
case 2:
sseu->slice_mask = BIT(0);
- sseu->subslice_mask[0] = BIT(0) | BIT(1);
+ subslice_mask = BIT(0) | BIT(1);
break;
case 3:
sseu->slice_mask = BIT(0) | BIT(1);
- sseu->subslice_mask[0] = BIT(0) | BIT(1);
- sseu->subslice_mask[1] = BIT(0) | BIT(1);
+ subslice_mask = BIT(0) | BIT(1);
break;
}
- sseu->max_slices = hweight8(sseu->slice_mask);
- sseu->max_subslices = hweight8(sseu->subslice_mask[0]);
-
fuse1 = I915_READ(HSW_PAVP_FUSE1);
switch ((fuse1 & HSW_F1_EU_DIS_MASK) >> HSW_F1_EU_DIS_SHIFT) {
default:
@@ -595,9 +645,14 @@ static void haswell_sseu_info_init(struct drm_i915_private *dev_priv)
sseu->eu_per_subslice = 6;
break;
}
- sseu->max_eus_per_subslice = sseu->eu_per_subslice;
+
+ intel_sseu_set_info(sseu, hweight8(sseu->slice_mask),
+ hweight8(subslice_mask),
+ sseu->eu_per_subslice);
for (s = 0; s < sseu->max_slices; s++) {
+ intel_sseu_set_subslices(sseu, s, subslice_mask);
+
for (ss = 0; ss < sseu->max_subslices; ss++) {
sseu_set_eus(sseu, s, ss,
(1UL << sseu->eu_per_subslice) - 1);
@@ -715,7 +770,7 @@ static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
}
return freq;
- } else if (INTEL_GEN(dev_priv) <= 11) {
+ } else if (INTEL_GEN(dev_priv) <= 12) {
u32 ctc_reg = I915_READ(CTC_MODE);
u32 freq = 0;
@@ -774,6 +829,8 @@ static const u16 subplatform_ult_ids[] = {
INTEL_WHL_U_GT1_IDS(0),
INTEL_WHL_U_GT2_IDS(0),
INTEL_WHL_U_GT3_IDS(0),
+ INTEL_CML_U_GT1_IDS(0),
+ INTEL_CML_U_GT2_IDS(0),
};
static const u16 subplatform_ulx_ids[] = {
@@ -899,12 +956,8 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
runtime->num_sprites[pipe] = 1;
}
- if (i915_modparams.disable_display) {
- DRM_INFO("Display disabled (module parameter)\n");
- info->num_pipes = 0;
- } else if (HAS_DISPLAY(dev_priv) &&
- (IS_GEN_RANGE(dev_priv, 7, 8)) &&
- HAS_PCH_SPLIT(dev_priv)) {
+ if (HAS_DISPLAY(dev_priv) && IS_GEN_RANGE(dev_priv, 7, 8) &&
+ HAS_PCH_SPLIT(dev_priv)) {
u32 fuse_strap = I915_READ(FUSE_STRAP);
u32 sfuse_strap = I915_READ(SFUSE_STRAP);
@@ -922,57 +975,65 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
(HAS_PCH_CPT(dev_priv) &&
!(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
DRM_INFO("Display fused off, disabling\n");
- info->num_pipes = 0;
+ info->pipe_mask = 0;
} else if (fuse_strap & IVB_PIPE_C_DISABLE) {
DRM_INFO("PipeC fused off\n");
- info->num_pipes -= 1;
+ info->pipe_mask &= ~BIT(PIPE_C);
}
} else if (HAS_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 9) {
u32 dfsm = I915_READ(SKL_DFSM);
- u8 disabled_mask = 0;
- bool invalid;
- int num_bits;
+ u8 enabled_mask = info->pipe_mask;
if (dfsm & SKL_DFSM_PIPE_A_DISABLE)
- disabled_mask |= BIT(PIPE_A);
+ enabled_mask &= ~BIT(PIPE_A);
if (dfsm & SKL_DFSM_PIPE_B_DISABLE)
- disabled_mask |= BIT(PIPE_B);
+ enabled_mask &= ~BIT(PIPE_B);
if (dfsm & SKL_DFSM_PIPE_C_DISABLE)
- disabled_mask |= BIT(PIPE_C);
-
- num_bits = hweight8(disabled_mask);
-
- switch (disabled_mask) {
- case BIT(PIPE_A):
- case BIT(PIPE_B):
- case BIT(PIPE_A) | BIT(PIPE_B):
- case BIT(PIPE_A) | BIT(PIPE_C):
- invalid = true;
- break;
- default:
- invalid = false;
- }
+ enabled_mask &= ~BIT(PIPE_C);
+ if (INTEL_GEN(dev_priv) >= 12 &&
+ (dfsm & TGL_DFSM_PIPE_D_DISABLE))
+ enabled_mask &= ~BIT(PIPE_D);
- if (num_bits > info->num_pipes || invalid)
- DRM_ERROR("invalid pipe fuse configuration: 0x%x\n",
- disabled_mask);
+ /*
+ * At least one pipe should be enabled and if there are
+ * disabled pipes, they should be the last ones, with no holes
+ * in the mask.
+ */
+ if (enabled_mask == 0 || !is_power_of_2(enabled_mask + 1))
+ DRM_ERROR("invalid pipe fuse configuration: enabled_mask=0x%x\n",
+ enabled_mask);
else
- info->num_pipes -= num_bits;
+ info->pipe_mask = enabled_mask;
+
+ if (dfsm & SKL_DFSM_DISPLAY_HDCP_DISABLE)
+ info->display.has_hdcp = 0;
+
+ if (dfsm & SKL_DFSM_DISPLAY_PM_DISABLE)
+ info->display.has_fbc = 0;
+
+ if (INTEL_GEN(dev_priv) >= 11 && (dfsm & ICL_DFSM_DMC_DISABLE))
+ info->display.has_csr = 0;
+
+ if (INTEL_GEN(dev_priv) >= 10 &&
+ (dfsm & CNL_DFSM_DISPLAY_DSC_DISABLE))
+ info->display.has_dsc = 0;
}
/* Initialize slice/subslice/EU info */
if (IS_HASWELL(dev_priv))
- haswell_sseu_info_init(dev_priv);
+ hsw_sseu_info_init(dev_priv);
else if (IS_CHERRYVIEW(dev_priv))
cherryview_sseu_info_init(dev_priv);
else if (IS_BROADWELL(dev_priv))
- broadwell_sseu_info_init(dev_priv);
+ bdw_sseu_info_init(dev_priv);
else if (IS_GEN(dev_priv, 9))
gen9_sseu_info_init(dev_priv);
else if (IS_GEN(dev_priv, 10))
gen10_sseu_info_init(dev_priv);
- else if (INTEL_GEN(dev_priv) >= 11)
+ else if (IS_GEN(dev_priv, 11))
gen11_sseu_info_init(dev_priv);
+ else if (INTEL_GEN(dev_priv) >= 12)
+ gen12_sseu_info_init(dev_priv);
if (IS_GEN(dev_priv, 6) && intel_vtd_active()) {
DRM_INFO("Disabling ppGTT for VT-d support\n");
@@ -1016,8 +1077,10 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
GEN11_GT_VEBOX_DISABLE_SHIFT;
for (i = 0; i < I915_MAX_VCS; i++) {
- if (!HAS_ENGINE(dev_priv, _VCS(i)))
+ if (!HAS_ENGINE(dev_priv, _VCS(i))) {
+ vdbox_mask &= ~BIT(i);
continue;
+ }
if (!(BIT(i) & vdbox_mask)) {
info->engine_mask &= ~BIT(_VCS(i));
@@ -1028,8 +1091,9 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
/*
* In Gen11, only even numbered logical VDBOXes are
* hooked up to an SFC (Scaler & Format Converter) unit.
+ * In TGL each VDBOX has access to an SFC.
*/
- if (logical_vdbox++ % 2 == 0)
+ if (INTEL_GEN(dev_priv) >= 12 || logical_vdbox++ % 2 == 0)
RUNTIME_INFO(dev_priv)->vdbox_sfc_access |= BIT(i);
}
DRM_DEBUG_DRIVER("vdbox enable: %04x, instances: %04lx\n",
@@ -1037,8 +1101,10 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
GEM_BUG_ON(vdbox_mask != VDBOX_MASK(dev_priv));
for (i = 0; i < I915_MAX_VECS; i++) {
- if (!HAS_ENGINE(dev_priv, _VECS(i)))
+ if (!HAS_ENGINE(dev_priv, _VECS(i))) {
+ vebox_mask &= ~BIT(i);
continue;
+ }
if (!(BIT(i) & vebox_mask)) {
info->engine_mask &= ~BIT(_VECS(i));
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index ddafc819bf30..2725cb7fc169 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -78,6 +78,8 @@ enum intel_platform {
/* gen11 */
INTEL_ICELAKE,
INTEL_ELKHARTLAKE,
+ /* gen12 */
+ INTEL_TIGERLAKE,
INTEL_MAX_PLATFORMS
};
@@ -105,12 +107,14 @@ enum intel_ppgtt_type {
func(is_mobile); \
func(is_lp); \
func(require_force_probe); \
+ func(is_dgfx); \
/* Keep has_* in alphabetical order */ \
func(has_64bit_reloc); \
func(gpu_reset_clobbers_display); \
func(has_reset_engine); \
func(has_fpga_dbg); \
- func(has_guc); \
+ func(has_global_mocs); \
+ func(has_gt_uc); \
func(has_l3_dpf); \
func(has_llc); \
func(has_logical_ring_contexts); \
@@ -132,10 +136,14 @@ enum intel_ppgtt_type {
func(has_csr); \
func(has_ddi); \
func(has_dp_mst); \
+ func(has_dsb); \
+ func(has_dsc); \
func(has_fbc); \
func(has_gmch); \
+ func(has_hdcp); \
func(has_hotplug); \
func(has_ipc); \
+ func(has_modular_fia); \
func(has_overlay); \
func(has_psr); \
func(overlay_needs_physical); \
@@ -155,9 +163,11 @@ struct intel_device_info {
unsigned int page_sizes; /* page sizes supported by the HW */
+ u32 memory_regions; /* regions supported by the HW */
+
u32 display_mmio_offset;
- u8 num_pipes;
+ u8 pipe_mask;
#define DEFINE_FLAG(name) u8 name:1
DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG);
@@ -220,12 +230,13 @@ const char *intel_platform_name(enum intel_platform platform);
void intel_device_info_subplatform_init(struct drm_i915_private *dev_priv);
void intel_device_info_runtime_init(struct drm_i915_private *dev_priv);
-void intel_device_info_dump_flags(const struct intel_device_info *info,
- struct drm_printer *p);
-void intel_device_info_dump_runtime(const struct intel_runtime_info *info,
+
+void intel_device_info_print_static(const struct intel_device_info *info,
struct drm_printer *p);
-void intel_device_info_dump_topology(const struct sseu_dev_info *sseu,
+void intel_device_info_print_runtime(const struct intel_runtime_info *info,
struct drm_printer *p);
+void intel_device_info_print_topology(const struct sseu_dev_info *sseu,
+ struct drm_printer *p);
void intel_device_info_init_mmio(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_guc_ct.h b/drivers/gpu/drm/i915/intel_guc_ct.h
deleted file mode 100644
index 41ba593a4df7..000000000000
--- a/drivers/gpu/drm/i915/intel_guc_ct.h
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Copyright © 2016-2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#ifndef _INTEL_GUC_CT_H_
-#define _INTEL_GUC_CT_H_
-
-struct intel_guc;
-struct i915_vma;
-
-#include "intel_guc_fwif.h"
-
-/**
- * DOC: Command Transport (CT).
- *
- * Buffer based command transport is a replacement for MMIO based mechanism.
- * It can be used to perform both host-2-guc and guc-to-host communication.
- */
-
-/** Represents single command transport buffer.
- *
- * A single command transport buffer consists of two parts, the header
- * record (command transport buffer descriptor) and the actual buffer which
- * holds the commands.
- *
- * @desc: pointer to the buffer descriptor
- * @cmds: pointer to the commands buffer
- */
-struct intel_guc_ct_buffer {
- struct guc_ct_buffer_desc *desc;
- u32 *cmds;
-};
-
-/** Represents pair of command transport buffers.
- *
- * Buffers go in pairs to allow bi-directional communication.
- * To simplify the code we place both of them in the same vma.
- * Buffers from the same pair must share unique owner id.
- *
- * @vma: pointer to the vma with pair of CT buffers
- * @ctbs: buffers for sending(0) and receiving(1) commands
- * @owner: unique identifier
- * @next_fence: fence to be used with next send command
- */
-struct intel_guc_ct_channel {
- struct i915_vma *vma;
- struct intel_guc_ct_buffer ctbs[2];
- u32 owner;
- u32 next_fence;
- bool enabled;
-};
-
-/** Holds all command transport channels.
- *
- * @host_channel: main channel used by the host
- */
-struct intel_guc_ct {
- struct intel_guc_ct_channel host_channel;
- /* other channels are tbd */
-
- /** @lock: protects pending requests list */
- spinlock_t lock;
-
- /** @pending_requests: list of requests waiting for response */
- struct list_head pending_requests;
-
- /** @incoming_requests: list of incoming requests */
- struct list_head incoming_requests;
-
- /** @worker: worker for handling incoming requests */
- struct work_struct worker;
-};
-
-void intel_guc_ct_init_early(struct intel_guc_ct *ct);
-int intel_guc_ct_init(struct intel_guc_ct *ct);
-void intel_guc_ct_fini(struct intel_guc_ct *ct);
-int intel_guc_ct_enable(struct intel_guc_ct *ct);
-void intel_guc_ct_disable(struct intel_guc_ct *ct);
-
-static inline void intel_guc_ct_stop(struct intel_guc_ct *ct)
-{
- ct->host_channel.enabled = false;
-}
-
-#endif /* _INTEL_GUC_CT_H_ */
diff --git a/drivers/gpu/drm/i915/intel_guc_fw.c b/drivers/gpu/drm/i915/intel_guc_fw.c
deleted file mode 100644
index 72cdafd9636a..000000000000
--- a/drivers/gpu/drm/i915/intel_guc_fw.c
+++ /dev/null
@@ -1,308 +0,0 @@
-/*
- * Copyright © 2014 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- * Vinit Azad <vinit.azad@intel.com>
- * Ben Widawsky <ben@bwidawsk.net>
- * Dave Gordon <david.s.gordon@intel.com>
- * Alex Dai <yu.dai@intel.com>
- */
-
-#include "intel_guc_fw.h"
-#include "i915_drv.h"
-
-#define __MAKE_GUC_FW_PATH(KEY) \
- "i915/" \
- __stringify(KEY##_GUC_FW_PREFIX) "_guc_" \
- __stringify(KEY##_GUC_FW_MAJOR) "." \
- __stringify(KEY##_GUC_FW_MINOR) "." \
- __stringify(KEY##_GUC_FW_PATCH) ".bin"
-
-#define SKL_GUC_FW_PREFIX skl
-#define SKL_GUC_FW_MAJOR 32
-#define SKL_GUC_FW_MINOR 0
-#define SKL_GUC_FW_PATCH 3
-#define SKL_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(SKL)
-MODULE_FIRMWARE(SKL_GUC_FIRMWARE_PATH);
-
-#define BXT_GUC_FW_PREFIX bxt
-#define BXT_GUC_FW_MAJOR 32
-#define BXT_GUC_FW_MINOR 0
-#define BXT_GUC_FW_PATCH 3
-#define BXT_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(BXT)
-MODULE_FIRMWARE(BXT_GUC_FIRMWARE_PATH);
-
-#define KBL_GUC_FW_PREFIX kbl
-#define KBL_GUC_FW_MAJOR 32
-#define KBL_GUC_FW_MINOR 0
-#define KBL_GUC_FW_PATCH 3
-#define KBL_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(KBL)
-MODULE_FIRMWARE(KBL_GUC_FIRMWARE_PATH);
-
-#define GLK_GUC_FW_PREFIX glk
-#define GLK_GUC_FW_MAJOR 32
-#define GLK_GUC_FW_MINOR 0
-#define GLK_GUC_FW_PATCH 3
-#define GLK_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(GLK)
-MODULE_FIRMWARE(GLK_GUC_FIRMWARE_PATH);
-
-#define ICL_GUC_FW_PREFIX icl
-#define ICL_GUC_FW_MAJOR 32
-#define ICL_GUC_FW_MINOR 0
-#define ICL_GUC_FW_PATCH 3
-#define ICL_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(ICL)
-MODULE_FIRMWARE(ICL_GUC_FIRMWARE_PATH);
-
-static void guc_fw_select(struct intel_uc_fw *guc_fw)
-{
- struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw);
- struct drm_i915_private *i915 = guc_to_i915(guc);
-
- GEM_BUG_ON(guc_fw->type != INTEL_UC_FW_TYPE_GUC);
-
- if (!HAS_GUC(i915))
- return;
-
- if (i915_modparams.guc_firmware_path) {
- guc_fw->path = i915_modparams.guc_firmware_path;
- guc_fw->major_ver_wanted = 0;
- guc_fw->minor_ver_wanted = 0;
- } else if (IS_ICELAKE(i915)) {
- guc_fw->path = ICL_GUC_FIRMWARE_PATH;
- guc_fw->major_ver_wanted = ICL_GUC_FW_MAJOR;
- guc_fw->minor_ver_wanted = ICL_GUC_FW_MINOR;
- } else if (IS_GEMINILAKE(i915)) {
- guc_fw->path = GLK_GUC_FIRMWARE_PATH;
- guc_fw->major_ver_wanted = GLK_GUC_FW_MAJOR;
- guc_fw->minor_ver_wanted = GLK_GUC_FW_MINOR;
- } else if (IS_KABYLAKE(i915) || IS_COFFEELAKE(i915)) {
- guc_fw->path = KBL_GUC_FIRMWARE_PATH;
- guc_fw->major_ver_wanted = KBL_GUC_FW_MAJOR;
- guc_fw->minor_ver_wanted = KBL_GUC_FW_MINOR;
- } else if (IS_BROXTON(i915)) {
- guc_fw->path = BXT_GUC_FIRMWARE_PATH;
- guc_fw->major_ver_wanted = BXT_GUC_FW_MAJOR;
- guc_fw->minor_ver_wanted = BXT_GUC_FW_MINOR;
- } else if (IS_SKYLAKE(i915)) {
- guc_fw->path = SKL_GUC_FIRMWARE_PATH;
- guc_fw->major_ver_wanted = SKL_GUC_FW_MAJOR;
- guc_fw->minor_ver_wanted = SKL_GUC_FW_MINOR;
- }
-}
-
-/**
- * intel_guc_fw_init_early() - initializes GuC firmware struct
- * @guc: intel_guc struct
- *
- * On platforms with GuC selects firmware for uploading
- */
-void intel_guc_fw_init_early(struct intel_guc *guc)
-{
- struct intel_uc_fw *guc_fw = &guc->fw;
-
- intel_uc_fw_init_early(guc_fw, INTEL_UC_FW_TYPE_GUC);
- guc_fw_select(guc_fw);
-}
-
-static void guc_prepare_xfer(struct intel_guc *guc)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
-
- /* Must program this register before loading the ucode with DMA */
- I915_WRITE(GUC_SHIM_CONTROL, GUC_DISABLE_SRAM_INIT_TO_ZEROES |
- GUC_ENABLE_READ_CACHE_LOGIC |
- GUC_ENABLE_MIA_CACHING |
- GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA |
- GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA |
- GUC_ENABLE_MIA_CLOCK_GATING);
-
- if (IS_GEN9_LP(dev_priv))
- I915_WRITE(GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
- else
- I915_WRITE(GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
-
- if (IS_GEN(dev_priv, 9)) {
- /* DOP Clock Gating Enable for GuC clocks */
- I915_WRITE(GEN7_MISCCPCTL, (GEN8_DOP_CLOCK_GATE_GUC_ENABLE |
- I915_READ(GEN7_MISCCPCTL)));
-
- /* allows for 5us (in 10ns units) before GT can go to RC6 */
- I915_WRITE(GUC_ARAT_C6DIS, 0x1FF);
- }
-}
-
-/* Copy RSA signature from the fw image to HW for verification */
-static void guc_xfer_rsa(struct intel_guc *guc)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- struct intel_uc_fw *fw = &guc->fw;
- struct sg_table *pages = fw->obj->mm.pages;
- u32 rsa[UOS_RSA_SCRATCH_COUNT];
- int i;
-
- sg_pcopy_to_buffer(pages->sgl, pages->nents,
- rsa, sizeof(rsa), fw->rsa_offset);
-
- for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++)
- I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]);
-}
-
-static bool guc_xfer_completed(struct intel_guc *guc, u32 *status)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
-
- /* Did we complete the xfer? */
- *status = I915_READ(DMA_CTRL);
- return !(*status & START_DMA);
-}
-
-/*
- * Read the GuC status register (GUC_STATUS) and store it in the
- * specified location; then return a boolean indicating whether
- * the value matches either of two values representing completion
- * of the GuC boot process.
- *
- * This is used for polling the GuC status in a wait_for()
- * loop below.
- */
-static inline bool guc_ready(struct intel_guc *guc, u32 *status)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- u32 val = I915_READ(GUC_STATUS);
- u32 uk_val = val & GS_UKERNEL_MASK;
-
- *status = val;
- return (uk_val == GS_UKERNEL_READY) ||
- ((val & GS_MIA_CORE_STATE) && (uk_val == GS_UKERNEL_LAPIC_DONE));
-}
-
-static int guc_wait_ucode(struct intel_guc *guc)
-{
- u32 status;
- int ret;
-
- /*
- * Wait for the GuC to start up.
- * NB: Docs recommend not using the interrupt for completion.
- * Measurements indicate this should take no more than 20ms, so a
- * timeout here indicates that the GuC has failed and is unusable.
- * (Higher levels of the driver may decide to reset the GuC and
- * attempt the ucode load again if this happens.)
- */
- ret = wait_for(guc_ready(guc, &status), 100);
- DRM_DEBUG_DRIVER("GuC status %#x\n", status);
-
- if ((status & GS_BOOTROM_MASK) == GS_BOOTROM_RSA_FAILED) {
- DRM_ERROR("GuC firmware signature verification failed\n");
- ret = -ENOEXEC;
- }
-
- if (ret == 0 && !guc_xfer_completed(guc, &status)) {
- DRM_ERROR("GuC is ready, but the xfer %08x is incomplete\n",
- status);
- ret = -ENXIO;
- }
-
- return ret;
-}
-
-/*
- * Transfer the firmware image to RAM for execution by the microcontroller.
- *
- * Architecturally, the DMA engine is bidirectional, and can potentially even
- * transfer between GTT locations. This functionality is left out of the API
- * for now as there is no need for it.
- */
-static int guc_xfer_ucode(struct intel_guc *guc)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- struct intel_uc_fw *guc_fw = &guc->fw;
- unsigned long offset;
-
- /*
- * The header plus uCode will be copied to WOPCM via DMA, excluding any
- * other components
- */
- I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size);
-
- /* Set the source address for the new blob */
- offset = intel_uc_fw_ggtt_offset(guc_fw) + guc_fw->header_offset;
- I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
- I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
-
- /*
- * Set the DMA destination. Current uCode expects the code to be
- * loaded at 8k; locations below this are used for the stack.
- */
- I915_WRITE(DMA_ADDR_1_LOW, 0x2000);
- I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
-
- /* Finally start the DMA */
- I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA));
-
- return guc_wait_ucode(guc);
-}
-/*
- * Load the GuC firmware blob into the MinuteIA.
- */
-static int guc_fw_xfer(struct intel_uc_fw *guc_fw)
-{
- struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw);
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- int ret;
-
- GEM_BUG_ON(guc_fw->type != INTEL_UC_FW_TYPE_GUC);
-
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
- guc_prepare_xfer(guc);
-
- /*
- * Note that GuC needs the CSS header plus uKernel code to be copied
- * by the DMA engine in one operation, whereas the RSA signature is
- * loaded via MMIO.
- */
- guc_xfer_rsa(guc);
-
- ret = guc_xfer_ucode(guc);
-
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-
- return ret;
-}
-
-/**
- * intel_guc_fw_upload() - load GuC uCode to device
- * @guc: intel_guc structure
- *
- * Called from intel_uc_init_hw() during driver load, resume from sleep and
- * after a GPU reset.
- *
- * The firmware image should have already been fetched into memory, so only
- * check that fetch succeeded, and then transfer the image to the h/w.
- *
- * Return: non-zero code on error
- */
-int intel_guc_fw_upload(struct intel_guc *guc)
-{
- return intel_uc_fw_upload(&guc->fw, guc_fw_xfer);
-}
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
deleted file mode 100644
index db531ebc7704..000000000000
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ /dev/null
@@ -1,1458 +0,0 @@
-/*
- * Copyright © 2014 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#include <linux/circ_buf.h>
-
-#include "gt/intel_engine_pm.h"
-#include "gt/intel_lrc_reg.h"
-#include "gt/intel_context.h"
-#include "gem/i915_gem_context.h"
-
-#include "intel_guc_submission.h"
-#include "i915_drv.h"
-
-#define GUC_PREEMPT_FINISHED 0x1
-#define GUC_PREEMPT_BREADCRUMB_DWORDS 0x8
-#define GUC_PREEMPT_BREADCRUMB_BYTES \
- (sizeof(u32) * GUC_PREEMPT_BREADCRUMB_DWORDS)
-
-/**
- * DOC: GuC-based command submission
- *
- * GuC client:
- * A intel_guc_client refers to a submission path through GuC. Currently, there
- * are two clients. One of them (the execbuf_client) is charged with all
- * submissions to the GuC, the other one (preempt_client) is responsible for
- * preempting the execbuf_client. This struct is the owner of a doorbell, a
- * process descriptor and a workqueue (all of them inside a single gem object
- * that contains all required pages for these elements).
- *
- * GuC stage descriptor:
- * During initialization, the driver allocates a static pool of 1024 such
- * descriptors, and shares them with the GuC.
- * Currently, there exists a 1:1 mapping between a intel_guc_client and a
- * guc_stage_desc (via the client's stage_id), so effectively only one
- * gets used. This stage descriptor lets the GuC know about the doorbell,
- * workqueue and process descriptor. Theoretically, it also lets the GuC
- * know about our HW contexts (context ID, etc...), but we actually
- * employ a kind of submission where the GuC uses the LRCA sent via the work
- * item instead (the single guc_stage_desc associated to execbuf client
- * contains information about the default kernel context only, but this is
- * essentially unused). This is called a "proxy" submission.
- *
- * The Scratch registers:
- * There are 16 MMIO-based registers start from 0xC180. The kernel driver writes
- * a value to the action register (SOFT_SCRATCH_0) along with any data. It then
- * triggers an interrupt on the GuC via another register write (0xC4C8).
- * Firmware writes a success/fail code back to the action register after
- * processes the request. The kernel driver polls waiting for this update and
- * then proceeds.
- * See intel_guc_send()
- *
- * Doorbells:
- * Doorbells are interrupts to uKernel. A doorbell is a single cache line (QW)
- * mapped into process space.
- *
- * Work Items:
- * There are several types of work items that the host may place into a
- * workqueue, each with its own requirements and limitations. Currently only
- * WQ_TYPE_INORDER is needed to support legacy submission via GuC, which
- * represents in-order queue. The kernel driver packs ring tail pointer and an
- * ELSP context descriptor dword into Work Item.
- * See guc_add_request()
- *
- */
-
-static inline u32 intel_hws_preempt_done_address(struct intel_engine_cs *engine)
-{
- return (i915_ggtt_offset(engine->status_page.vma) +
- I915_GEM_HWS_PREEMPT_ADDR);
-}
-
-static inline struct i915_priolist *to_priolist(struct rb_node *rb)
-{
- return rb_entry(rb, struct i915_priolist, node);
-}
-
-static inline bool is_high_priority(struct intel_guc_client *client)
-{
- return (client->priority == GUC_CLIENT_PRIORITY_KMD_HIGH ||
- client->priority == GUC_CLIENT_PRIORITY_HIGH);
-}
-
-static int reserve_doorbell(struct intel_guc_client *client)
-{
- unsigned long offset;
- unsigned long end;
- u16 id;
-
- GEM_BUG_ON(client->doorbell_id != GUC_DOORBELL_INVALID);
-
- /*
- * The bitmap tracks which doorbell registers are currently in use.
- * It is split into two halves; the first half is used for normal
- * priority contexts, the second half for high-priority ones.
- */
- offset = 0;
- end = GUC_NUM_DOORBELLS / 2;
- if (is_high_priority(client)) {
- offset = end;
- end += offset;
- }
-
- id = find_next_zero_bit(client->guc->doorbell_bitmap, end, offset);
- if (id == end)
- return -ENOSPC;
-
- __set_bit(id, client->guc->doorbell_bitmap);
- client->doorbell_id = id;
- DRM_DEBUG_DRIVER("client %u (high prio=%s) reserved doorbell: %d\n",
- client->stage_id, yesno(is_high_priority(client)),
- id);
- return 0;
-}
-
-static bool has_doorbell(struct intel_guc_client *client)
-{
- if (client->doorbell_id == GUC_DOORBELL_INVALID)
- return false;
-
- return test_bit(client->doorbell_id, client->guc->doorbell_bitmap);
-}
-
-static void unreserve_doorbell(struct intel_guc_client *client)
-{
- GEM_BUG_ON(!has_doorbell(client));
-
- __clear_bit(client->doorbell_id, client->guc->doorbell_bitmap);
- client->doorbell_id = GUC_DOORBELL_INVALID;
-}
-
-/*
- * Tell the GuC to allocate or deallocate a specific doorbell
- */
-
-static int __guc_allocate_doorbell(struct intel_guc *guc, u32 stage_id)
-{
- u32 action[] = {
- INTEL_GUC_ACTION_ALLOCATE_DOORBELL,
- stage_id
- };
-
- return intel_guc_send(guc, action, ARRAY_SIZE(action));
-}
-
-static int __guc_deallocate_doorbell(struct intel_guc *guc, u32 stage_id)
-{
- u32 action[] = {
- INTEL_GUC_ACTION_DEALLOCATE_DOORBELL,
- stage_id
- };
-
- return intel_guc_send(guc, action, ARRAY_SIZE(action));
-}
-
-static struct guc_stage_desc *__get_stage_desc(struct intel_guc_client *client)
-{
- struct guc_stage_desc *base = client->guc->stage_desc_pool_vaddr;
-
- return &base[client->stage_id];
-}
-
-/*
- * Initialise, update, or clear doorbell data shared with the GuC
- *
- * These functions modify shared data and so need access to the mapped
- * client object which contains the page being used for the doorbell
- */
-
-static void __update_doorbell_desc(struct intel_guc_client *client, u16 new_id)
-{
- struct guc_stage_desc *desc;
-
- /* Update the GuC's idea of the doorbell ID */
- desc = __get_stage_desc(client);
- desc->db_id = new_id;
-}
-
-static struct guc_doorbell_info *__get_doorbell(struct intel_guc_client *client)
-{
- return client->vaddr + client->doorbell_offset;
-}
-
-static bool __doorbell_valid(struct intel_guc *guc, u16 db_id)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
-
- GEM_BUG_ON(db_id >= GUC_NUM_DOORBELLS);
- return I915_READ(GEN8_DRBREGL(db_id)) & GEN8_DRB_VALID;
-}
-
-static void __init_doorbell(struct intel_guc_client *client)
-{
- struct guc_doorbell_info *doorbell;
-
- doorbell = __get_doorbell(client);
- doorbell->db_status = GUC_DOORBELL_ENABLED;
- doorbell->cookie = 0;
-}
-
-static void __fini_doorbell(struct intel_guc_client *client)
-{
- struct guc_doorbell_info *doorbell;
- u16 db_id = client->doorbell_id;
-
- doorbell = __get_doorbell(client);
- doorbell->db_status = GUC_DOORBELL_DISABLED;
-
- /* Doorbell release flow requires that we wait for GEN8_DRB_VALID bit
- * to go to zero after updating db_status before we call the GuC to
- * release the doorbell
- */
- if (wait_for_us(!__doorbell_valid(client->guc, db_id), 10))
- WARN_ONCE(true, "Doorbell never became invalid after disable\n");
-}
-
-static int create_doorbell(struct intel_guc_client *client)
-{
- int ret;
-
- if (WARN_ON(!has_doorbell(client)))
- return -ENODEV; /* internal setup error, should never happen */
-
- __update_doorbell_desc(client, client->doorbell_id);
- __init_doorbell(client);
-
- ret = __guc_allocate_doorbell(client->guc, client->stage_id);
- if (ret) {
- __fini_doorbell(client);
- __update_doorbell_desc(client, GUC_DOORBELL_INVALID);
- DRM_DEBUG_DRIVER("Couldn't create client %u doorbell: %d\n",
- client->stage_id, ret);
- return ret;
- }
-
- return 0;
-}
-
-static int destroy_doorbell(struct intel_guc_client *client)
-{
- int ret;
-
- GEM_BUG_ON(!has_doorbell(client));
-
- __fini_doorbell(client);
- ret = __guc_deallocate_doorbell(client->guc, client->stage_id);
- if (ret)
- DRM_ERROR("Couldn't destroy client %u doorbell: %d\n",
- client->stage_id, ret);
-
- __update_doorbell_desc(client, GUC_DOORBELL_INVALID);
-
- return ret;
-}
-
-static unsigned long __select_cacheline(struct intel_guc *guc)
-{
- unsigned long offset;
-
- /* Doorbell uses a single cache line within a page */
- offset = offset_in_page(guc->db_cacheline);
-
- /* Moving to next cache line to reduce contention */
- guc->db_cacheline += cache_line_size();
-
- DRM_DEBUG_DRIVER("reserved cacheline 0x%lx, next 0x%x, linesize %u\n",
- offset, guc->db_cacheline, cache_line_size());
- return offset;
-}
-
-static inline struct guc_process_desc *
-__get_process_desc(struct intel_guc_client *client)
-{
- return client->vaddr + client->proc_desc_offset;
-}
-
-/*
- * Initialise the process descriptor shared with the GuC firmware.
- */
-static void guc_proc_desc_init(struct intel_guc_client *client)
-{
- struct guc_process_desc *desc;
-
- desc = memset(__get_process_desc(client), 0, sizeof(*desc));
-
- /*
- * XXX: pDoorbell and WQVBaseAddress are pointers in process address
- * space for ring3 clients (set them as in mmap_ioctl) or kernel
- * space for kernel clients (map on demand instead? May make debug
- * easier to have it mapped).
- */
- desc->wq_base_addr = 0;
- desc->db_base_addr = 0;
-
- desc->stage_id = client->stage_id;
- desc->wq_size_bytes = GUC_WQ_SIZE;
- desc->wq_status = WQ_STATUS_ACTIVE;
- desc->priority = client->priority;
-}
-
-static void guc_proc_desc_fini(struct intel_guc_client *client)
-{
- struct guc_process_desc *desc;
-
- desc = __get_process_desc(client);
- memset(desc, 0, sizeof(*desc));
-}
-
-static int guc_stage_desc_pool_create(struct intel_guc *guc)
-{
- struct i915_vma *vma;
- void *vaddr;
-
- vma = intel_guc_allocate_vma(guc,
- PAGE_ALIGN(sizeof(struct guc_stage_desc) *
- GUC_MAX_STAGE_DESCRIPTORS));
- if (IS_ERR(vma))
- return PTR_ERR(vma);
-
- vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
- if (IS_ERR(vaddr)) {
- i915_vma_unpin_and_release(&vma, 0);
- return PTR_ERR(vaddr);
- }
-
- guc->stage_desc_pool = vma;
- guc->stage_desc_pool_vaddr = vaddr;
- ida_init(&guc->stage_ids);
-
- return 0;
-}
-
-static void guc_stage_desc_pool_destroy(struct intel_guc *guc)
-{
- ida_destroy(&guc->stage_ids);
- i915_vma_unpin_and_release(&guc->stage_desc_pool, I915_VMA_RELEASE_MAP);
-}
-
-/*
- * Initialise/clear the stage descriptor shared with the GuC firmware.
- *
- * This descriptor tells the GuC where (in GGTT space) to find the important
- * data structures relating to this client (doorbell, process descriptor,
- * write queue, etc).
- */
-static void guc_stage_desc_init(struct intel_guc_client *client)
-{
- struct intel_guc *guc = client->guc;
- struct i915_gem_context *ctx = client->owner;
- struct i915_gem_engines_iter it;
- struct guc_stage_desc *desc;
- struct intel_context *ce;
- u32 gfx_addr;
-
- desc = __get_stage_desc(client);
- memset(desc, 0, sizeof(*desc));
-
- desc->attribute = GUC_STAGE_DESC_ATTR_ACTIVE |
- GUC_STAGE_DESC_ATTR_KERNEL;
- if (is_high_priority(client))
- desc->attribute |= GUC_STAGE_DESC_ATTR_PREEMPT;
- desc->stage_id = client->stage_id;
- desc->priority = client->priority;
- desc->db_id = client->doorbell_id;
-
- for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
- struct guc_execlist_context *lrc;
-
- if (!(ce->engine->mask & client->engines))
- continue;
-
- /* TODO: We have a design issue to be solved here. Only when we
- * receive the first batch, we know which engine is used by the
- * user. But here GuC expects the lrc and ring to be pinned. It
- * is not an issue for default context, which is the only one
- * for now who owns a GuC client. But for future owner of GuC
- * client, need to make sure lrc is pinned prior to enter here.
- */
- if (!ce->state)
- break; /* XXX: continue? */
-
- /*
- * XXX: When this is a GUC_STAGE_DESC_ATTR_KERNEL client (proxy
- * submission or, in other words, not using a direct submission
- * model) the KMD's LRCA is not used for any work submission.
- * Instead, the GuC uses the LRCA of the user mode context (see
- * guc_add_request below).
- */
- lrc = &desc->lrc[ce->engine->guc_id];
- lrc->context_desc = lower_32_bits(ce->lrc_desc);
-
- /* The state page is after PPHWSP */
- lrc->ring_lrca = intel_guc_ggtt_offset(guc, ce->state) +
- LRC_STATE_PN * PAGE_SIZE;
-
- /* XXX: In direct submission, the GuC wants the HW context id
- * here. In proxy submission, it wants the stage id
- */
- lrc->context_id = (client->stage_id << GUC_ELC_CTXID_OFFSET) |
- (ce->engine->guc_id << GUC_ELC_ENGINE_OFFSET);
-
- lrc->ring_begin = intel_guc_ggtt_offset(guc, ce->ring->vma);
- lrc->ring_end = lrc->ring_begin + ce->ring->size - 1;
- lrc->ring_next_free_location = lrc->ring_begin;
- lrc->ring_current_tail_pointer_value = 0;
-
- desc->engines_used |= BIT(ce->engine->guc_id);
- }
- i915_gem_context_unlock_engines(ctx);
-
- DRM_DEBUG_DRIVER("Host engines 0x%x => GuC engines used 0x%x\n",
- client->engines, desc->engines_used);
- WARN_ON(desc->engines_used == 0);
-
- /*
- * The doorbell, process descriptor, and workqueue are all parts
- * of the client object, which the GuC will reference via the GGTT
- */
- gfx_addr = intel_guc_ggtt_offset(guc, client->vma);
- desc->db_trigger_phy = sg_dma_address(client->vma->pages->sgl) +
- client->doorbell_offset;
- desc->db_trigger_cpu = ptr_to_u64(__get_doorbell(client));
- desc->db_trigger_uk = gfx_addr + client->doorbell_offset;
- desc->process_desc = gfx_addr + client->proc_desc_offset;
- desc->wq_addr = gfx_addr + GUC_DB_SIZE;
- desc->wq_size = GUC_WQ_SIZE;
-
- desc->desc_private = ptr_to_u64(client);
-}
-
-static void guc_stage_desc_fini(struct intel_guc_client *client)
-{
- struct guc_stage_desc *desc;
-
- desc = __get_stage_desc(client);
- memset(desc, 0, sizeof(*desc));
-}
-
-/* Construct a Work Item and append it to the GuC's Work Queue */
-static void guc_wq_item_append(struct intel_guc_client *client,
- u32 target_engine, u32 context_desc,
- u32 ring_tail, u32 fence_id)
-{
- /* wqi_len is in DWords, and does not include the one-word header */
- const size_t wqi_size = sizeof(struct guc_wq_item);
- const u32 wqi_len = wqi_size / sizeof(u32) - 1;
- struct guc_process_desc *desc = __get_process_desc(client);
- struct guc_wq_item *wqi;
- u32 wq_off;
-
- lockdep_assert_held(&client->wq_lock);
-
- /* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
- * should not have the case where structure wqi is across page, neither
- * wrapped to the beginning. This simplifies the implementation below.
- *
- * XXX: if not the case, we need save data to a temp wqi and copy it to
- * workqueue buffer dw by dw.
- */
- BUILD_BUG_ON(wqi_size != 16);
-
- /* We expect the WQ to be active if we're appending items to it */
- GEM_BUG_ON(desc->wq_status != WQ_STATUS_ACTIVE);
-
- /* Free space is guaranteed. */
- wq_off = READ_ONCE(desc->tail);
- GEM_BUG_ON(CIRC_SPACE(wq_off, READ_ONCE(desc->head),
- GUC_WQ_SIZE) < wqi_size);
- GEM_BUG_ON(wq_off & (wqi_size - 1));
-
- /* WQ starts from the page after doorbell / process_desc */
- wqi = client->vaddr + wq_off + GUC_DB_SIZE;
-
- if (I915_SELFTEST_ONLY(client->use_nop_wqi)) {
- wqi->header = WQ_TYPE_NOOP | (wqi_len << WQ_LEN_SHIFT);
- } else {
- /* Now fill in the 4-word work queue item */
- wqi->header = WQ_TYPE_INORDER |
- (wqi_len << WQ_LEN_SHIFT) |
- (target_engine << WQ_TARGET_SHIFT) |
- WQ_NO_WCFLUSH_WAIT;
- wqi->context_desc = context_desc;
- wqi->submit_element_info = ring_tail << WQ_RING_TAIL_SHIFT;
- GEM_BUG_ON(ring_tail > WQ_RING_TAIL_MAX);
- wqi->fence_id = fence_id;
- }
-
- /* Make the update visible to GuC */
- WRITE_ONCE(desc->tail, (wq_off + wqi_size) & (GUC_WQ_SIZE - 1));
-}
-
-static void guc_ring_doorbell(struct intel_guc_client *client)
-{
- struct guc_doorbell_info *db;
- u32 cookie;
-
- lockdep_assert_held(&client->wq_lock);
-
- /* pointer of current doorbell cacheline */
- db = __get_doorbell(client);
-
- /*
- * We're not expecting the doorbell cookie to change behind our back,
- * we also need to treat 0 as a reserved value.
- */
- cookie = READ_ONCE(db->cookie);
- WARN_ON_ONCE(xchg(&db->cookie, cookie + 1 ?: cookie + 2) != cookie);
-
- /* XXX: doorbell was lost and need to acquire it again */
- GEM_BUG_ON(db->db_status != GUC_DOORBELL_ENABLED);
-}
-
-static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
-{
- struct intel_guc_client *client = guc->execbuf_client;
- struct intel_engine_cs *engine = rq->engine;
- u32 ctx_desc = lower_32_bits(rq->hw_context->lrc_desc);
- u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64);
-
- spin_lock(&client->wq_lock);
-
- guc_wq_item_append(client, engine->guc_id, ctx_desc,
- ring_tail, rq->fence.seqno);
- guc_ring_doorbell(client);
-
- client->submissions[engine->id] += 1;
-
- spin_unlock(&client->wq_lock);
-}
-
-/*
- * When we're doing submissions using regular execlists backend, writing to
- * ELSP from CPU side is enough to make sure that writes to ringbuffer pages
- * pinned in mappable aperture portion of GGTT are visible to command streamer.
- * Writes done by GuC on our behalf are not guaranteeing such ordering,
- * therefore, to ensure the flush, we're issuing a POSTING READ.
- */
-static void flush_ggtt_writes(struct i915_vma *vma)
-{
- struct drm_i915_private *i915 = vma->vm->i915;
-
- if (i915_vma_is_map_and_fenceable(vma))
- intel_uncore_posting_read_fw(&i915->uncore, GUC_STATUS);
-}
-
-static void inject_preempt_context(struct work_struct *work)
-{
- struct guc_preempt_work *preempt_work =
- container_of(work, typeof(*preempt_work), work);
- struct intel_engine_cs *engine = preempt_work->engine;
- struct intel_guc *guc = container_of(preempt_work, typeof(*guc),
- preempt_work[engine->id]);
- struct intel_guc_client *client = guc->preempt_client;
- struct guc_stage_desc *stage_desc = __get_stage_desc(client);
- struct intel_context *ce = engine->preempt_context;
- u32 data[7];
-
- if (!ce->ring->emit) { /* recreate upon load/resume */
- u32 addr = intel_hws_preempt_done_address(engine);
- u32 *cs;
-
- cs = ce->ring->vaddr;
- if (engine->class == RENDER_CLASS) {
- cs = gen8_emit_ggtt_write_rcs(cs,
- GUC_PREEMPT_FINISHED,
- addr,
- PIPE_CONTROL_CS_STALL);
- } else {
- cs = gen8_emit_ggtt_write(cs,
- GUC_PREEMPT_FINISHED,
- addr,
- 0);
- *cs++ = MI_NOOP;
- *cs++ = MI_NOOP;
- }
- *cs++ = MI_USER_INTERRUPT;
- *cs++ = MI_NOOP;
-
- ce->ring->emit = GUC_PREEMPT_BREADCRUMB_BYTES;
- GEM_BUG_ON((void *)cs - ce->ring->vaddr != ce->ring->emit);
-
- flush_ggtt_writes(ce->ring->vma);
- }
-
- spin_lock_irq(&client->wq_lock);
- guc_wq_item_append(client, engine->guc_id, lower_32_bits(ce->lrc_desc),
- GUC_PREEMPT_BREADCRUMB_BYTES / sizeof(u64), 0);
- spin_unlock_irq(&client->wq_lock);
-
- /*
- * If GuC firmware performs an engine reset while that engine had
- * a preemption pending, it will set the terminated attribute bit
- * on our preemption stage descriptor. GuC firmware retains all
- * pending work items for a high-priority GuC client, unlike the
- * normal-priority GuC client where work items are dropped. It
- * wants to make sure the preempt-to-idle work doesn't run when
- * scheduling resumes, and uses this bit to inform its scheduler
- * and presumably us as well. Our job is to clear it for the next
- * preemption after reset, otherwise that and future preemptions
- * will never complete. We'll just clear it every time.
- */
- stage_desc->attribute &= ~GUC_STAGE_DESC_ATTR_TERMINATED;
-
- data[0] = INTEL_GUC_ACTION_REQUEST_PREEMPTION;
- data[1] = client->stage_id;
- data[2] = INTEL_GUC_PREEMPT_OPTION_DROP_WORK_Q |
- INTEL_GUC_PREEMPT_OPTION_DROP_SUBMIT_Q;
- data[3] = engine->guc_id;
- data[4] = guc->execbuf_client->priority;
- data[5] = guc->execbuf_client->stage_id;
- data[6] = intel_guc_ggtt_offset(guc, guc->shared_data);
-
- if (WARN_ON(intel_guc_send(guc, data, ARRAY_SIZE(data)))) {
- execlists_clear_active(&engine->execlists,
- EXECLISTS_ACTIVE_PREEMPT);
- tasklet_schedule(&engine->execlists.tasklet);
- }
-
- (void)I915_SELFTEST_ONLY(engine->execlists.preempt_hang.count++);
-}
-
-/*
- * We're using user interrupt and HWSP value to mark that preemption has
- * finished and GPU is idle. Normally, we could unwind and continue similar to
- * execlists submission path. Unfortunately, with GuC we also need to wait for
- * it to finish its own postprocessing, before attempting to submit. Otherwise
- * GuC may silently ignore our submissions, and thus we risk losing request at
- * best, executing out-of-order and causing kernel panic at worst.
- */
-#define GUC_PREEMPT_POSTPROCESS_DELAY_MS 10
-static void wait_for_guc_preempt_report(struct intel_engine_cs *engine)
-{
- struct intel_guc *guc = &engine->i915->guc;
- struct guc_shared_ctx_data *data = guc->shared_data_vaddr;
- struct guc_ctx_report *report =
- &data->preempt_ctx_report[engine->guc_id];
-
- if (wait_for_atomic(report->report_return_status ==
- INTEL_GUC_REPORT_STATUS_COMPLETE,
- GUC_PREEMPT_POSTPROCESS_DELAY_MS))
- DRM_ERROR("Timed out waiting for GuC preemption report\n");
- /*
- * GuC is expecting that we're also going to clear the affected context
- * counter, let's also reset the return status to not depend on GuC
- * resetting it after recieving another preempt action
- */
- report->affected_count = 0;
- report->report_return_status = INTEL_GUC_REPORT_STATUS_UNKNOWN;
-}
-
-static void complete_preempt_context(struct intel_engine_cs *engine)
-{
- struct intel_engine_execlists *execlists = &engine->execlists;
-
- GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT));
-
- if (inject_preempt_hang(execlists))
- return;
-
- execlists_cancel_port_requests(execlists);
- execlists_unwind_incomplete_requests(execlists);
-
- wait_for_guc_preempt_report(engine);
- intel_write_status_page(engine, I915_GEM_HWS_PREEMPT, 0);
-}
-
-/**
- * guc_submit() - Submit commands through GuC
- * @engine: engine associated with the commands
- *
- * The only error here arises if the doorbell hardware isn't functioning
- * as expected, which really shouln't happen.
- */
-static void guc_submit(struct intel_engine_cs *engine)
-{
- struct intel_guc *guc = &engine->i915->guc;
- struct intel_engine_execlists * const execlists = &engine->execlists;
- struct execlist_port *port = execlists->port;
- unsigned int n;
-
- for (n = 0; n < execlists_num_ports(execlists); n++) {
- struct i915_request *rq;
- unsigned int count;
-
- rq = port_unpack(&port[n], &count);
- if (rq && count == 0) {
- port_set(&port[n], port_pack(rq, ++count));
-
- flush_ggtt_writes(rq->ring->vma);
-
- guc_add_request(guc, rq);
- }
- }
-}
-
-static void port_assign(struct execlist_port *port, struct i915_request *rq)
-{
- GEM_BUG_ON(port_isset(port));
-
- port_set(port, i915_request_get(rq));
-}
-
-static inline int rq_prio(const struct i915_request *rq)
-{
- return rq->sched.attr.priority;
-}
-
-static inline int port_prio(const struct execlist_port *port)
-{
- return rq_prio(port_request(port)) | __NO_PREEMPTION;
-}
-
-static bool __guc_dequeue(struct intel_engine_cs *engine)
-{
- struct intel_engine_execlists * const execlists = &engine->execlists;
- struct execlist_port *port = execlists->port;
- struct i915_request *last = NULL;
- const struct execlist_port * const last_port =
- &execlists->port[execlists->port_mask];
- bool submit = false;
- struct rb_node *rb;
-
- lockdep_assert_held(&engine->active.lock);
-
- if (port_isset(port)) {
- if (intel_engine_has_preemption(engine)) {
- struct guc_preempt_work *preempt_work =
- &engine->i915->guc.preempt_work[engine->id];
- int prio = execlists->queue_priority_hint;
-
- if (i915_scheduler_need_preempt(prio,
- port_prio(port))) {
- execlists_set_active(execlists,
- EXECLISTS_ACTIVE_PREEMPT);
- queue_work(engine->i915->guc.preempt_wq,
- &preempt_work->work);
- return false;
- }
- }
-
- port++;
- if (port_isset(port))
- return false;
- }
- GEM_BUG_ON(port_isset(port));
-
- while ((rb = rb_first_cached(&execlists->queue))) {
- struct i915_priolist *p = to_priolist(rb);
- struct i915_request *rq, *rn;
- int i;
-
- priolist_for_each_request_consume(rq, rn, p, i) {
- if (last && rq->hw_context != last->hw_context) {
- if (port == last_port)
- goto done;
-
- if (submit)
- port_assign(port, last);
- port++;
- }
-
- list_del_init(&rq->sched.link);
-
- __i915_request_submit(rq);
- trace_i915_request_in(rq, port_index(port, execlists));
-
- last = rq;
- submit = true;
- }
-
- rb_erase_cached(&p->node, &execlists->queue);
- i915_priolist_free(p);
- }
-done:
- execlists->queue_priority_hint =
- rb ? to_priolist(rb)->priority : INT_MIN;
- if (submit)
- port_assign(port, last);
- if (last)
- execlists_user_begin(execlists, execlists->port);
-
- /* We must always keep the beast fed if we have work piled up */
- GEM_BUG_ON(port_isset(execlists->port) &&
- !execlists_is_active(execlists, EXECLISTS_ACTIVE_USER));
- GEM_BUG_ON(rb_first_cached(&execlists->queue) &&
- !port_isset(execlists->port));
-
- return submit;
-}
-
-static void guc_dequeue(struct intel_engine_cs *engine)
-{
- if (__guc_dequeue(engine))
- guc_submit(engine);
-}
-
-static void guc_submission_tasklet(unsigned long data)
-{
- struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
- struct intel_engine_execlists * const execlists = &engine->execlists;
- struct execlist_port *port = execlists->port;
- struct i915_request *rq;
- unsigned long flags;
-
- spin_lock_irqsave(&engine->active.lock, flags);
-
- rq = port_request(port);
- while (rq && i915_request_completed(rq)) {
- trace_i915_request_out(rq);
- i915_request_put(rq);
-
- port = execlists_port_complete(execlists, port);
- if (port_isset(port)) {
- execlists_user_begin(execlists, port);
- rq = port_request(port);
- } else {
- execlists_user_end(execlists);
- rq = NULL;
- }
- }
-
- if (execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT) &&
- intel_read_status_page(engine, I915_GEM_HWS_PREEMPT) ==
- GUC_PREEMPT_FINISHED)
- complete_preempt_context(engine);
-
- if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT))
- guc_dequeue(engine);
-
- spin_unlock_irqrestore(&engine->active.lock, flags);
-}
-
-static void guc_reset_prepare(struct intel_engine_cs *engine)
-{
- struct intel_engine_execlists * const execlists = &engine->execlists;
-
- GEM_TRACE("%s\n", engine->name);
-
- /*
- * Prevent request submission to the hardware until we have
- * completed the reset in i915_gem_reset_finish(). If a request
- * is completed by one engine, it may then queue a request
- * to a second via its execlists->tasklet *just* as we are
- * calling engine->init_hw() and also writing the ELSP.
- * Turning off the execlists->tasklet until the reset is over
- * prevents the race.
- */
- __tasklet_disable_sync_once(&execlists->tasklet);
-
- /*
- * We're using worker to queue preemption requests from the tasklet in
- * GuC submission mode.
- * Even though tasklet was disabled, we may still have a worker queued.
- * Let's make sure that all workers scheduled before disabling the
- * tasklet are completed before continuing with the reset.
- */
- if (engine->i915->guc.preempt_wq)
- flush_workqueue(engine->i915->guc.preempt_wq);
-}
-
-static void guc_reset(struct intel_engine_cs *engine, bool stalled)
-{
- struct intel_engine_execlists * const execlists = &engine->execlists;
- struct i915_request *rq;
- unsigned long flags;
-
- spin_lock_irqsave(&engine->active.lock, flags);
-
- execlists_cancel_port_requests(execlists);
-
- /* Push back any incomplete requests for replay after the reset. */
- rq = execlists_unwind_incomplete_requests(execlists);
- if (!rq)
- goto out_unlock;
-
- if (!i915_request_started(rq))
- stalled = false;
-
- i915_reset_request(rq, stalled);
- intel_lr_context_reset(engine, rq->hw_context, rq->head, stalled);
-
-out_unlock:
- spin_unlock_irqrestore(&engine->active.lock, flags);
-}
-
-static void guc_cancel_requests(struct intel_engine_cs *engine)
-{
- struct intel_engine_execlists * const execlists = &engine->execlists;
- struct i915_request *rq, *rn;
- struct rb_node *rb;
- unsigned long flags;
-
- GEM_TRACE("%s\n", engine->name);
-
- /*
- * Before we call engine->cancel_requests(), we should have exclusive
- * access to the submission state. This is arranged for us by the
- * caller disabling the interrupt generation, the tasklet and other
- * threads that may then access the same state, giving us a free hand
- * to reset state. However, we still need to let lockdep be aware that
- * we know this state may be accessed in hardirq context, so we
- * disable the irq around this manipulation and we want to keep
- * the spinlock focused on its duties and not accidentally conflate
- * coverage to the submission's irq state. (Similarly, although we
- * shouldn't need to disable irq around the manipulation of the
- * submission's irq state, we also wish to remind ourselves that
- * it is irq state.)
- */
- spin_lock_irqsave(&engine->active.lock, flags);
-
- /* Cancel the requests on the HW and clear the ELSP tracker. */
- execlists_cancel_port_requests(execlists);
-
- /* Mark all executing requests as skipped. */
- list_for_each_entry(rq, &engine->active.requests, sched.link) {
- if (!i915_request_signaled(rq))
- dma_fence_set_error(&rq->fence, -EIO);
-
- i915_request_mark_complete(rq);
- }
-
- /* Flush the queued requests to the timeline list (for retiring). */
- while ((rb = rb_first_cached(&execlists->queue))) {
- struct i915_priolist *p = to_priolist(rb);
- int i;
-
- priolist_for_each_request_consume(rq, rn, p, i) {
- list_del_init(&rq->sched.link);
- __i915_request_submit(rq);
- dma_fence_set_error(&rq->fence, -EIO);
- i915_request_mark_complete(rq);
- }
-
- rb_erase_cached(&p->node, &execlists->queue);
- i915_priolist_free(p);
- }
-
- /* Remaining _unready_ requests will be nop'ed when submitted */
-
- execlists->queue_priority_hint = INT_MIN;
- execlists->queue = RB_ROOT_CACHED;
- GEM_BUG_ON(port_isset(execlists->port));
-
- spin_unlock_irqrestore(&engine->active.lock, flags);
-}
-
-static void guc_reset_finish(struct intel_engine_cs *engine)
-{
- struct intel_engine_execlists * const execlists = &engine->execlists;
-
- if (__tasklet_enable(&execlists->tasklet))
- /* And kick in case we missed a new request submission. */
- tasklet_hi_schedule(&execlists->tasklet);
-
- GEM_TRACE("%s: depth->%d\n", engine->name,
- atomic_read(&execlists->tasklet.count));
-}
-
-/*
- * Everything below here is concerned with setup & teardown, and is
- * therefore not part of the somewhat time-critical batch-submission
- * path of guc_submit() above.
- */
-
-/* Check that a doorbell register is in the expected state */
-static bool doorbell_ok(struct intel_guc *guc, u16 db_id)
-{
- bool valid;
-
- GEM_BUG_ON(db_id >= GUC_NUM_DOORBELLS);
-
- valid = __doorbell_valid(guc, db_id);
-
- if (test_bit(db_id, guc->doorbell_bitmap) == valid)
- return true;
-
- DRM_DEBUG_DRIVER("Doorbell %u has unexpected state: valid=%s\n",
- db_id, yesno(valid));
-
- return false;
-}
-
-static bool guc_verify_doorbells(struct intel_guc *guc)
-{
- bool doorbells_ok = true;
- u16 db_id;
-
- for (db_id = 0; db_id < GUC_NUM_DOORBELLS; ++db_id)
- if (!doorbell_ok(guc, db_id))
- doorbells_ok = false;
-
- return doorbells_ok;
-}
-
-/**
- * guc_client_alloc() - Allocate an intel_guc_client
- * @dev_priv: driver private data structure
- * @engines: The set of engines to enable for this client
- * @priority: four levels priority _CRITICAL, _HIGH, _NORMAL and _LOW
- * The kernel client to replace ExecList submission is created with
- * NORMAL priority. Priority of a client for scheduler can be HIGH,
- * while a preemption context can use CRITICAL.
- * @ctx: the context that owns the client (we use the default render
- * context)
- *
- * Return: An intel_guc_client object if success, else NULL.
- */
-static struct intel_guc_client *
-guc_client_alloc(struct drm_i915_private *dev_priv,
- u32 engines,
- u32 priority,
- struct i915_gem_context *ctx)
-{
- struct intel_guc_client *client;
- struct intel_guc *guc = &dev_priv->guc;
- struct i915_vma *vma;
- void *vaddr;
- int ret;
-
- client = kzalloc(sizeof(*client), GFP_KERNEL);
- if (!client)
- return ERR_PTR(-ENOMEM);
-
- client->guc = guc;
- client->owner = ctx;
- client->engines = engines;
- client->priority = priority;
- client->doorbell_id = GUC_DOORBELL_INVALID;
- spin_lock_init(&client->wq_lock);
-
- ret = ida_simple_get(&guc->stage_ids, 0, GUC_MAX_STAGE_DESCRIPTORS,
- GFP_KERNEL);
- if (ret < 0)
- goto err_client;
-
- client->stage_id = ret;
-
- /* The first page is doorbell/proc_desc. Two followed pages are wq. */
- vma = intel_guc_allocate_vma(guc, GUC_DB_SIZE + GUC_WQ_SIZE);
- if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
- goto err_id;
- }
-
- /* We'll keep just the first (doorbell/proc) page permanently kmap'd. */
- client->vma = vma;
-
- vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
- if (IS_ERR(vaddr)) {
- ret = PTR_ERR(vaddr);
- goto err_vma;
- }
- client->vaddr = vaddr;
-
- ret = reserve_doorbell(client);
- if (ret)
- goto err_vaddr;
-
- client->doorbell_offset = __select_cacheline(guc);
-
- /*
- * Since the doorbell only requires a single cacheline, we can save
- * space by putting the application process descriptor in the same
- * page. Use the half of the page that doesn't include the doorbell.
- */
- if (client->doorbell_offset >= (GUC_DB_SIZE / 2))
- client->proc_desc_offset = 0;
- else
- client->proc_desc_offset = (GUC_DB_SIZE / 2);
-
- DRM_DEBUG_DRIVER("new priority %u client %p for engine(s) 0x%x: stage_id %u\n",
- priority, client, client->engines, client->stage_id);
- DRM_DEBUG_DRIVER("doorbell id %u, cacheline offset 0x%lx\n",
- client->doorbell_id, client->doorbell_offset);
-
- return client;
-
-err_vaddr:
- i915_gem_object_unpin_map(client->vma->obj);
-err_vma:
- i915_vma_unpin_and_release(&client->vma, 0);
-err_id:
- ida_simple_remove(&guc->stage_ids, client->stage_id);
-err_client:
- kfree(client);
- return ERR_PTR(ret);
-}
-
-static void guc_client_free(struct intel_guc_client *client)
-{
- unreserve_doorbell(client);
- i915_vma_unpin_and_release(&client->vma, I915_VMA_RELEASE_MAP);
- ida_simple_remove(&client->guc->stage_ids, client->stage_id);
- kfree(client);
-}
-
-static inline bool ctx_save_restore_disabled(struct intel_context *ce)
-{
- u32 sr = ce->lrc_reg_state[CTX_CONTEXT_CONTROL + 1];
-
-#define SR_DISABLED \
- _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | \
- CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT)
-
- return (sr & SR_DISABLED) == SR_DISABLED;
-
-#undef SR_DISABLED
-}
-
-static int guc_clients_create(struct intel_guc *guc)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- struct intel_guc_client *client;
-
- GEM_BUG_ON(guc->execbuf_client);
- GEM_BUG_ON(guc->preempt_client);
-
- client = guc_client_alloc(dev_priv,
- INTEL_INFO(dev_priv)->engine_mask,
- GUC_CLIENT_PRIORITY_KMD_NORMAL,
- dev_priv->kernel_context);
- if (IS_ERR(client)) {
- DRM_ERROR("Failed to create GuC client for submission!\n");
- return PTR_ERR(client);
- }
- guc->execbuf_client = client;
-
- if (dev_priv->preempt_context) {
- client = guc_client_alloc(dev_priv,
- INTEL_INFO(dev_priv)->engine_mask,
- GUC_CLIENT_PRIORITY_KMD_HIGH,
- dev_priv->preempt_context);
- if (IS_ERR(client)) {
- DRM_ERROR("Failed to create GuC client for preemption!\n");
- guc_client_free(guc->execbuf_client);
- guc->execbuf_client = NULL;
- return PTR_ERR(client);
- }
- guc->preempt_client = client;
- }
-
- return 0;
-}
-
-static void guc_clients_destroy(struct intel_guc *guc)
-{
- struct intel_guc_client *client;
-
- client = fetch_and_zero(&guc->preempt_client);
- if (client)
- guc_client_free(client);
-
- client = fetch_and_zero(&guc->execbuf_client);
- if (client)
- guc_client_free(client);
-}
-
-static int __guc_client_enable(struct intel_guc_client *client)
-{
- int ret;
-
- guc_proc_desc_init(client);
- guc_stage_desc_init(client);
-
- ret = create_doorbell(client);
- if (ret)
- goto fail;
-
- return 0;
-
-fail:
- guc_stage_desc_fini(client);
- guc_proc_desc_fini(client);
- return ret;
-}
-
-static void __guc_client_disable(struct intel_guc_client *client)
-{
- /*
- * By the time we're here, GuC may have already been reset. if that is
- * the case, instead of trying (in vain) to communicate with it, let's
- * just cleanup the doorbell HW and our internal state.
- */
- if (intel_guc_is_loaded(client->guc))
- destroy_doorbell(client);
- else
- __fini_doorbell(client);
-
- guc_stage_desc_fini(client);
- guc_proc_desc_fini(client);
-}
-
-static int guc_clients_enable(struct intel_guc *guc)
-{
- int ret;
-
- ret = __guc_client_enable(guc->execbuf_client);
- if (ret)
- return ret;
-
- if (guc->preempt_client) {
- ret = __guc_client_enable(guc->preempt_client);
- if (ret) {
- __guc_client_disable(guc->execbuf_client);
- return ret;
- }
- }
-
- return 0;
-}
-
-static void guc_clients_disable(struct intel_guc *guc)
-{
- if (guc->preempt_client)
- __guc_client_disable(guc->preempt_client);
-
- if (guc->execbuf_client)
- __guc_client_disable(guc->execbuf_client);
-}
-
-/*
- * Set up the memory resources to be shared with the GuC (via the GGTT)
- * at firmware loading time.
- */
-int intel_guc_submission_init(struct intel_guc *guc)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int ret;
-
- if (guc->stage_desc_pool)
- return 0;
-
- ret = guc_stage_desc_pool_create(guc);
- if (ret)
- return ret;
- /*
- * Keep static analysers happy, let them know that we allocated the
- * vma after testing that it didn't exist earlier.
- */
- GEM_BUG_ON(!guc->stage_desc_pool);
-
- WARN_ON(!guc_verify_doorbells(guc));
- ret = guc_clients_create(guc);
- if (ret)
- goto err_pool;
-
- for_each_engine(engine, dev_priv, id) {
- guc->preempt_work[id].engine = engine;
- INIT_WORK(&guc->preempt_work[id].work, inject_preempt_context);
- }
-
- return 0;
-
-err_pool:
- guc_stage_desc_pool_destroy(guc);
- return ret;
-}
-
-void intel_guc_submission_fini(struct intel_guc *guc)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- for_each_engine(engine, dev_priv, id)
- cancel_work_sync(&guc->preempt_work[id].work);
-
- guc_clients_destroy(guc);
- WARN_ON(!guc_verify_doorbells(guc));
-
- if (guc->stage_desc_pool)
- guc_stage_desc_pool_destroy(guc);
-}
-
-static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int irqs;
-
- /* tell all command streamers to forward interrupts (but not vblank)
- * to GuC
- */
- irqs = _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
- for_each_engine(engine, dev_priv, id)
- ENGINE_WRITE(engine, RING_MODE_GEN7, irqs);
-
- /* route USER_INTERRUPT to Host, all others are sent to GuC. */
- irqs = GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
- GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
- /* These three registers have the same bit definitions */
- I915_WRITE(GUC_BCS_RCS_IER, ~irqs);
- I915_WRITE(GUC_VCS2_VCS1_IER, ~irqs);
- I915_WRITE(GUC_WD_VECS_IER, ~irqs);
-
- /*
- * The REDIRECT_TO_GUC bit of the PMINTRMSK register directs all
- * (unmasked) PM interrupts to the GuC. All other bits of this
- * register *disable* generation of a specific interrupt.
- *
- * 'pm_intrmsk_mbz' indicates bits that are NOT to be set when
- * writing to the PM interrupt mask register, i.e. interrupts
- * that must not be disabled.
- *
- * If the GuC is handling these interrupts, then we must not let
- * the PM code disable ANY interrupt that the GuC is expecting.
- * So for each ENABLED (0) bit in this register, we must SET the
- * bit in pm_intrmsk_mbz so that it's left enabled for the GuC.
- * GuC needs ARAT expired interrupt unmasked hence it is set in
- * pm_intrmsk_mbz.
- *
- * Here we CLEAR REDIRECT_TO_GUC bit in pm_intrmsk_mbz, which will
- * result in the register bit being left SET!
- */
- rps->pm_intrmsk_mbz |= ARAT_EXPIRED_INTRMSK;
- rps->pm_intrmsk_mbz &= ~GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
-}
-
-static void guc_interrupts_release(struct drm_i915_private *dev_priv)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int irqs;
-
- /*
- * tell all command streamers NOT to forward interrupts or vblank
- * to GuC.
- */
- irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
- irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
- for_each_engine(engine, dev_priv, id)
- ENGINE_WRITE(engine, RING_MODE_GEN7, irqs);
-
- /* route all GT interrupts to the host */
- I915_WRITE(GUC_BCS_RCS_IER, 0);
- I915_WRITE(GUC_VCS2_VCS1_IER, 0);
- I915_WRITE(GUC_WD_VECS_IER, 0);
-
- rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
- rps->pm_intrmsk_mbz &= ~ARAT_EXPIRED_INTRMSK;
-}
-
-static void guc_submission_park(struct intel_engine_cs *engine)
-{
- intel_engine_park(engine);
- intel_engine_unpin_breadcrumbs_irq(engine);
- engine->flags &= ~I915_ENGINE_NEEDS_BREADCRUMB_TASKLET;
-}
-
-static void guc_submission_unpark(struct intel_engine_cs *engine)
-{
- engine->flags |= I915_ENGINE_NEEDS_BREADCRUMB_TASKLET;
- intel_engine_pin_breadcrumbs_irq(engine);
-}
-
-static void guc_set_default_submission(struct intel_engine_cs *engine)
-{
- /*
- * We inherit a bunch of functions from execlists that we'd like
- * to keep using:
- *
- * engine->submit_request = execlists_submit_request;
- * engine->cancel_requests = execlists_cancel_requests;
- * engine->schedule = execlists_schedule;
- *
- * But we need to override the actual submission backend in order
- * to talk to the GuC.
- */
- intel_execlists_set_default_submission(engine);
-
- engine->execlists.tasklet.func = guc_submission_tasklet;
-
- engine->park = guc_submission_park;
- engine->unpark = guc_submission_unpark;
-
- engine->reset.prepare = guc_reset_prepare;
- engine->reset.reset = guc_reset;
- engine->reset.finish = guc_reset_finish;
-
- engine->cancel_requests = guc_cancel_requests;
-
- engine->flags &= ~I915_ENGINE_SUPPORTS_STATS;
-}
-
-int intel_guc_submission_enable(struct intel_guc *guc)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int err;
-
- /*
- * We're using GuC work items for submitting work through GuC. Since
- * we're coalescing multiple requests from a single context into a
- * single work item prior to assigning it to execlist_port, we can
- * never have more work items than the total number of ports (for all
- * engines). The GuC firmware is controlling the HEAD of work queue,
- * and it is guaranteed that it will remove the work item from the
- * queue before our request is completed.
- */
- BUILD_BUG_ON(ARRAY_SIZE(engine->execlists.port) *
- sizeof(struct guc_wq_item) *
- I915_NUM_ENGINES > GUC_WQ_SIZE);
-
- GEM_BUG_ON(!guc->execbuf_client);
-
- err = guc_clients_enable(guc);
- if (err)
- return err;
-
- /* Take over from manual control of ELSP (execlists) */
- guc_interrupts_capture(dev_priv);
-
- for_each_engine(engine, dev_priv, id) {
- engine->set_default_submission = guc_set_default_submission;
- engine->set_default_submission(engine);
- }
-
- return 0;
-}
-
-void intel_guc_submission_disable(struct intel_guc *guc)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
-
- GEM_BUG_ON(dev_priv->gt.awake); /* GT should be parked first */
-
- guc_interrupts_release(dev_priv);
- guc_clients_disable(guc);
-}
-
-#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
-#include "selftests/intel_guc.c"
-#endif
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.h b/drivers/gpu/drm/i915/intel_guc_submission.h
deleted file mode 100644
index 7d823a513b9c..000000000000
--- a/drivers/gpu/drm/i915/intel_guc_submission.h
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Copyright © 2014-2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#ifndef _INTEL_GUC_SUBMISSION_H_
-#define _INTEL_GUC_SUBMISSION_H_
-
-#include <linux/spinlock.h>
-
-#include "gt/intel_engine_types.h"
-
-#include "i915_gem.h"
-#include "i915_selftest.h"
-
-struct drm_i915_private;
-
-/*
- * This structure primarily describes the GEM object shared with the GuC.
- * The specs sometimes refer to this object as a "GuC context", but we use
- * the term "client" to avoid confusion with hardware contexts. This
- * GEM object is held for the entire lifetime of our interaction with
- * the GuC, being allocated before the GuC is loaded with its firmware.
- * Because there's no way to update the address used by the GuC after
- * initialisation, the shared object must stay pinned into the GGTT as
- * long as the GuC is in use. We also keep the first page (only) mapped
- * into kernel address space, as it includes shared data that must be
- * updated on every request submission.
- *
- * The single GEM object described here is actually made up of several
- * separate areas, as far as the GuC is concerned. The first page (kept
- * kmap'd) includes the "process descriptor" which holds sequence data for
- * the doorbell, and one cacheline which actually *is* the doorbell; a
- * write to this will "ring the doorbell" (i.e. send an interrupt to the
- * GuC). The subsequent pages of the client object constitute the work
- * queue (a circular array of work items), again described in the process
- * descriptor. Work queue pages are mapped momentarily as required.
- */
-struct intel_guc_client {
- struct i915_vma *vma;
- void *vaddr;
- struct i915_gem_context *owner;
- struct intel_guc *guc;
-
- /* bitmap of (host) engine ids */
- u32 engines;
- u32 priority;
- u32 stage_id;
- u32 proc_desc_offset;
-
- u16 doorbell_id;
- unsigned long doorbell_offset;
-
- /* Protects GuC client's WQ access */
- spinlock_t wq_lock;
- /* Per-engine counts of GuC submissions */
- u64 submissions[I915_NUM_ENGINES];
-
- /* For testing purposes, use nop WQ items instead of real ones */
- I915_SELFTEST_DECLARE(bool use_nop_wqi);
-};
-
-int intel_guc_submission_init(struct intel_guc *guc);
-int intel_guc_submission_enable(struct intel_guc *guc);
-void intel_guc_submission_disable(struct intel_guc *guc);
-void intel_guc_submission_fini(struct intel_guc *guc);
-int intel_guc_preempt_work_create(struct intel_guc *guc);
-void intel_guc_preempt_work_destroy(struct intel_guc *guc);
-
-#endif
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index 1d7d26e4cf14..2b6c016387c2 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -95,7 +95,7 @@ int intel_gvt_init(struct drm_i915_private *dev_priv)
{
int ret;
- if (i915_inject_load_failure())
+ if (i915_inject_probe_failure(dev_priv))
return -ENODEV;
if (!i915_modparams.enable_gvt) {
@@ -122,13 +122,14 @@ bail:
}
/**
- * intel_gvt_cleanup - cleanup GVT components when i915 driver is unloading
+ * intel_gvt_driver_remove - cleanup GVT components when i915 driver is
+ * unbinding
* @dev_priv: drm i915 private *
*
* This function is called at the i915 driver unloading stage, to shutdown
* GVT components and release the related resources.
*/
-void intel_gvt_cleanup(struct drm_i915_private *dev_priv)
+void intel_gvt_driver_remove(struct drm_i915_private *dev_priv)
{
if (!intel_gvt_active(dev_priv))
return;
diff --git a/drivers/gpu/drm/i915/intel_gvt.h b/drivers/gpu/drm/i915/intel_gvt.h
index 61b246470282..502fad8a8652 100644
--- a/drivers/gpu/drm/i915/intel_gvt.h
+++ b/drivers/gpu/drm/i915/intel_gvt.h
@@ -24,11 +24,11 @@
#ifndef _INTEL_GVT_H_
#define _INTEL_GVT_H_
-struct intel_gvt;
+struct drm_i915_private;
#ifdef CONFIG_DRM_I915_GVT
int intel_gvt_init(struct drm_i915_private *dev_priv);
-void intel_gvt_cleanup(struct drm_i915_private *dev_priv);
+void intel_gvt_driver_remove(struct drm_i915_private *dev_priv);
int intel_gvt_init_device(struct drm_i915_private *dev_priv);
void intel_gvt_clean_device(struct drm_i915_private *dev_priv);
int intel_gvt_init_host(void);
@@ -38,7 +38,8 @@ static inline int intel_gvt_init(struct drm_i915_private *dev_priv)
{
return 0;
}
-static inline void intel_gvt_cleanup(struct drm_i915_private *dev_priv)
+
+static inline void intel_gvt_driver_remove(struct drm_i915_private *dev_priv)
{
}
diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c
deleted file mode 100644
index fb6f693d3cac..000000000000
--- a/drivers/gpu/drm/i915/intel_huc.c
+++ /dev/null
@@ -1,182 +0,0 @@
-/*
- * Copyright © 2016-2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#include <linux/types.h>
-
-#include "intel_huc.h"
-#include "i915_drv.h"
-
-void intel_huc_init_early(struct intel_huc *huc)
-{
- struct drm_i915_private *i915 = huc_to_i915(huc);
-
- intel_huc_fw_init_early(huc);
-
- if (INTEL_GEN(i915) >= 11) {
- huc->status.reg = GEN11_HUC_KERNEL_LOAD_INFO;
- huc->status.mask = HUC_LOAD_SUCCESSFUL;
- huc->status.value = HUC_LOAD_SUCCESSFUL;
- } else {
- huc->status.reg = HUC_STATUS2;
- huc->status.mask = HUC_FW_VERIFIED;
- huc->status.value = HUC_FW_VERIFIED;
- }
-}
-
-int intel_huc_init_misc(struct intel_huc *huc)
-{
- struct drm_i915_private *i915 = huc_to_i915(huc);
-
- intel_uc_fw_fetch(i915, &huc->fw);
- return 0;
-}
-
-static int intel_huc_rsa_data_create(struct intel_huc *huc)
-{
- struct drm_i915_private *i915 = huc_to_i915(huc);
- struct intel_guc *guc = &i915->guc;
- struct i915_vma *vma;
- void *vaddr;
-
- /*
- * HuC firmware will sit above GUC_GGTT_TOP and will not map
- * through GTT. Unfortunately, this means GuC cannot perform
- * the HuC auth. as the rsa offset now falls within the GuC
- * inaccessible range. We resort to perma-pinning an additional
- * vma within the accessible range that only contains the rsa
- * signature. The GuC can use this extra pinning to perform
- * the authentication since its GGTT offset will be GuC
- * accessible.
- */
- vma = intel_guc_allocate_vma(guc, PAGE_SIZE);
- if (IS_ERR(vma))
- return PTR_ERR(vma);
-
- vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
- if (IS_ERR(vaddr)) {
- i915_vma_unpin_and_release(&vma, 0);
- return PTR_ERR(vaddr);
- }
-
- huc->rsa_data = vma;
- huc->rsa_data_vaddr = vaddr;
-
- return 0;
-}
-
-static void intel_huc_rsa_data_destroy(struct intel_huc *huc)
-{
- i915_vma_unpin_and_release(&huc->rsa_data, I915_VMA_RELEASE_MAP);
-}
-
-int intel_huc_init(struct intel_huc *huc)
-{
- int err;
-
- err = intel_huc_rsa_data_create(huc);
- if (err)
- return err;
-
- return intel_uc_fw_init(&huc->fw);
-}
-
-void intel_huc_fini(struct intel_huc *huc)
-{
- intel_uc_fw_fini(&huc->fw);
- intel_huc_rsa_data_destroy(huc);
-}
-
-/**
- * intel_huc_auth() - Authenticate HuC uCode
- * @huc: intel_huc structure
- *
- * Called after HuC and GuC firmware loading during intel_uc_init_hw().
- *
- * This function pins HuC firmware image object into GGTT.
- * Then it invokes GuC action to authenticate passing the offset to RSA
- * signature through intel_guc_auth_huc(). It then waits for 50ms for
- * firmware verification ACK and unpins the object.
- */
-int intel_huc_auth(struct intel_huc *huc)
-{
- struct drm_i915_private *i915 = huc_to_i915(huc);
- struct intel_guc *guc = &i915->guc;
- int ret;
-
- if (huc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
- return -ENOEXEC;
-
- ret = intel_guc_auth_huc(guc,
- intel_guc_ggtt_offset(guc, huc->rsa_data));
- if (ret) {
- DRM_ERROR("HuC: GuC did not ack Auth request %d\n", ret);
- goto fail;
- }
-
- /* Check authentication status, it should be done by now */
- ret = __intel_wait_for_register(&i915->uncore,
- huc->status.reg,
- huc->status.mask,
- huc->status.value,
- 2, 50, NULL);
- if (ret) {
- DRM_ERROR("HuC: Firmware not verified %d\n", ret);
- goto fail;
- }
-
- return 0;
-
-fail:
- huc->fw.load_status = INTEL_UC_FIRMWARE_FAIL;
-
- DRM_ERROR("HuC: Authentication failed %d\n", ret);
- return ret;
-}
-
-/**
- * intel_huc_check_status() - check HuC status
- * @huc: intel_huc structure
- *
- * This function reads status register to verify if HuC
- * firmware was successfully loaded.
- *
- * Returns: 1 if HuC firmware is loaded and verified,
- * 0 if HuC firmware is not loaded and -ENODEV if HuC
- * is not present on this platform.
- */
-int intel_huc_check_status(struct intel_huc *huc)
-{
- struct drm_i915_private *dev_priv = huc_to_i915(huc);
- intel_wakeref_t wakeref;
- bool status = false;
-
- if (!HAS_HUC(dev_priv))
- return -ENODEV;
-
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
- status = (I915_READ(huc->status.reg) & huc->status.mask) ==
- huc->status.value;
-
- return status;
-}
diff --git a/drivers/gpu/drm/i915/intel_huc.h b/drivers/gpu/drm/i915/intel_huc.h
deleted file mode 100644
index 2a6c94e79f17..000000000000
--- a/drivers/gpu/drm/i915/intel_huc.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Copyright © 2014-2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#ifndef _INTEL_HUC_H_
-#define _INTEL_HUC_H_
-
-#include "i915_reg.h"
-#include "intel_uc_fw.h"
-#include "intel_huc_fw.h"
-
-struct intel_huc {
- /* Generic uC firmware management */
- struct intel_uc_fw fw;
-
- /* HuC-specific additions */
- struct i915_vma *rsa_data;
- void *rsa_data_vaddr;
-
- struct {
- i915_reg_t reg;
- u32 mask;
- u32 value;
- } status;
-};
-
-void intel_huc_init_early(struct intel_huc *huc);
-int intel_huc_init_misc(struct intel_huc *huc);
-int intel_huc_init(struct intel_huc *huc);
-void intel_huc_fini(struct intel_huc *huc);
-int intel_huc_auth(struct intel_huc *huc);
-int intel_huc_check_status(struct intel_huc *huc);
-
-static inline void intel_huc_fini_misc(struct intel_huc *huc)
-{
- intel_uc_fw_cleanup_fetch(&huc->fw);
-}
-
-static inline int intel_huc_sanitize(struct intel_huc *huc)
-{
- intel_uc_fw_sanitize(&huc->fw);
- return 0;
-}
-
-#endif
diff --git a/drivers/gpu/drm/i915/intel_huc_fw.c b/drivers/gpu/drm/i915/intel_huc_fw.c
deleted file mode 100644
index 05cbf8338f53..000000000000
--- a/drivers/gpu/drm/i915/intel_huc_fw.c
+++ /dev/null
@@ -1,215 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2014-2018 Intel Corporation
- */
-
-#include "intel_huc_fw.h"
-#include "i915_drv.h"
-
-/**
- * DOC: HuC Firmware
- *
- * Motivation:
- * GEN9 introduces a new dedicated firmware for usage in media HEVC (High
- * Efficiency Video Coding) operations. Userspace can use the firmware
- * capabilities by adding HuC specific commands to batch buffers.
- *
- * Implementation:
- * The same firmware loader is used as the GuC. However, the actual
- * loading to HW is deferred until GEM initialization is done.
- *
- * Note that HuC firmware loading must be done before GuC loading.
- */
-
-#define BXT_HUC_FW_MAJOR 01
-#define BXT_HUC_FW_MINOR 8
-#define BXT_BLD_NUM 2893
-
-#define SKL_HUC_FW_MAJOR 01
-#define SKL_HUC_FW_MINOR 07
-#define SKL_BLD_NUM 1398
-
-#define KBL_HUC_FW_MAJOR 02
-#define KBL_HUC_FW_MINOR 00
-#define KBL_BLD_NUM 1810
-
-#define GLK_HUC_FW_MAJOR 03
-#define GLK_HUC_FW_MINOR 01
-#define GLK_BLD_NUM 2893
-
-#define ICL_HUC_FW_MAJOR 8
-#define ICL_HUC_FW_MINOR 4
-#define ICL_BLD_NUM 3238
-
-#define HUC_FW_PATH(platform, major, minor, bld_num) \
- "i915/" __stringify(platform) "_huc_ver" __stringify(major) "_" \
- __stringify(minor) "_" __stringify(bld_num) ".bin"
-
-#define I915_SKL_HUC_UCODE HUC_FW_PATH(skl, SKL_HUC_FW_MAJOR, \
- SKL_HUC_FW_MINOR, SKL_BLD_NUM)
-MODULE_FIRMWARE(I915_SKL_HUC_UCODE);
-
-#define I915_BXT_HUC_UCODE HUC_FW_PATH(bxt, BXT_HUC_FW_MAJOR, \
- BXT_HUC_FW_MINOR, BXT_BLD_NUM)
-MODULE_FIRMWARE(I915_BXT_HUC_UCODE);
-
-#define I915_KBL_HUC_UCODE HUC_FW_PATH(kbl, KBL_HUC_FW_MAJOR, \
- KBL_HUC_FW_MINOR, KBL_BLD_NUM)
-MODULE_FIRMWARE(I915_KBL_HUC_UCODE);
-
-#define I915_GLK_HUC_UCODE HUC_FW_PATH(glk, GLK_HUC_FW_MAJOR, \
- GLK_HUC_FW_MINOR, GLK_BLD_NUM)
-MODULE_FIRMWARE(I915_GLK_HUC_UCODE);
-
-#define I915_ICL_HUC_UCODE HUC_FW_PATH(icl, ICL_HUC_FW_MAJOR, \
- ICL_HUC_FW_MINOR, ICL_BLD_NUM)
-MODULE_FIRMWARE(I915_ICL_HUC_UCODE);
-
-static void huc_fw_select(struct intel_uc_fw *huc_fw)
-{
- struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw);
- struct drm_i915_private *dev_priv = huc_to_i915(huc);
-
- GEM_BUG_ON(huc_fw->type != INTEL_UC_FW_TYPE_HUC);
-
- if (!HAS_HUC(dev_priv))
- return;
-
- if (i915_modparams.huc_firmware_path) {
- huc_fw->path = i915_modparams.huc_firmware_path;
- huc_fw->major_ver_wanted = 0;
- huc_fw->minor_ver_wanted = 0;
- } else if (IS_SKYLAKE(dev_priv)) {
- huc_fw->path = I915_SKL_HUC_UCODE;
- huc_fw->major_ver_wanted = SKL_HUC_FW_MAJOR;
- huc_fw->minor_ver_wanted = SKL_HUC_FW_MINOR;
- } else if (IS_BROXTON(dev_priv)) {
- huc_fw->path = I915_BXT_HUC_UCODE;
- huc_fw->major_ver_wanted = BXT_HUC_FW_MAJOR;
- huc_fw->minor_ver_wanted = BXT_HUC_FW_MINOR;
- } else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
- huc_fw->path = I915_KBL_HUC_UCODE;
- huc_fw->major_ver_wanted = KBL_HUC_FW_MAJOR;
- huc_fw->minor_ver_wanted = KBL_HUC_FW_MINOR;
- } else if (IS_GEMINILAKE(dev_priv)) {
- huc_fw->path = I915_GLK_HUC_UCODE;
- huc_fw->major_ver_wanted = GLK_HUC_FW_MAJOR;
- huc_fw->minor_ver_wanted = GLK_HUC_FW_MINOR;
- } else if (IS_ICELAKE(dev_priv)) {
- huc_fw->path = I915_ICL_HUC_UCODE;
- huc_fw->major_ver_wanted = ICL_HUC_FW_MAJOR;
- huc_fw->minor_ver_wanted = ICL_HUC_FW_MINOR;
- }
-}
-
-/**
- * intel_huc_fw_init_early() - initializes HuC firmware struct
- * @huc: intel_huc struct
- *
- * On platforms with HuC selects firmware for uploading
- */
-void intel_huc_fw_init_early(struct intel_huc *huc)
-{
- struct intel_uc_fw *huc_fw = &huc->fw;
-
- intel_uc_fw_init_early(huc_fw, INTEL_UC_FW_TYPE_HUC);
- huc_fw_select(huc_fw);
-}
-
-static void huc_xfer_rsa(struct intel_huc *huc)
-{
- struct intel_uc_fw *fw = &huc->fw;
- struct sg_table *pages = fw->obj->mm.pages;
-
- /*
- * HuC firmware image is outside GuC accessible range.
- * Copy the RSA signature out of the image into
- * the perma-pinned region set aside for it
- */
- sg_pcopy_to_buffer(pages->sgl, pages->nents,
- huc->rsa_data_vaddr, fw->rsa_size,
- fw->rsa_offset);
-}
-
-static int huc_xfer_ucode(struct intel_huc *huc)
-{
- struct intel_uc_fw *huc_fw = &huc->fw;
- struct drm_i915_private *dev_priv = huc_to_i915(huc);
- struct intel_uncore *uncore = &dev_priv->uncore;
- unsigned long offset = 0;
- u32 size;
- int ret;
-
- GEM_BUG_ON(huc_fw->type != INTEL_UC_FW_TYPE_HUC);
-
- intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
-
- /* Set the source address for the uCode */
- offset = intel_uc_fw_ggtt_offset(huc_fw) +
- huc_fw->header_offset;
- intel_uncore_write(uncore, DMA_ADDR_0_LOW,
- lower_32_bits(offset));
- intel_uncore_write(uncore, DMA_ADDR_0_HIGH,
- upper_32_bits(offset) & 0xFFFF);
-
- /*
- * Hardware doesn't look at destination address for HuC. Set it to 0,
- * but still program the correct address space.
- */
- intel_uncore_write(uncore, DMA_ADDR_1_LOW, 0);
- intel_uncore_write(uncore, DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
-
- size = huc_fw->header_size + huc_fw->ucode_size;
- intel_uncore_write(uncore, DMA_COPY_SIZE, size);
-
- /* Start the DMA */
- intel_uncore_write(uncore, DMA_CTRL,
- _MASKED_BIT_ENABLE(HUC_UKERNEL | START_DMA));
-
- /* Wait for DMA to finish */
- ret = intel_wait_for_register_fw(uncore, DMA_CTRL, START_DMA, 0, 100);
-
- DRM_DEBUG_DRIVER("HuC DMA transfer wait over with ret %d\n", ret);
-
- /* Disable the bits once DMA is over */
- intel_uncore_write(uncore, DMA_CTRL, _MASKED_BIT_DISABLE(HUC_UKERNEL));
-
- intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
-
- return ret;
-}
-
-/**
- * huc_fw_xfer() - DMA's the firmware
- * @huc_fw: the firmware descriptor
- *
- * Transfer the firmware image to RAM for execution by the microcontroller.
- *
- * Return: 0 on success, non-zero on failure
- */
-static int huc_fw_xfer(struct intel_uc_fw *huc_fw)
-{
- struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw);
-
- huc_xfer_rsa(huc);
-
- return huc_xfer_ucode(huc);
-}
-
-/**
- * intel_huc_fw_upload() - load HuC uCode to device
- * @huc: intel_huc structure
- *
- * Called from intel_uc_init_hw() during driver load, resume from sleep and
- * after a GPU reset. Note that HuC must be loaded before GuC.
- *
- * The firmware image should have already been fetched into memory, so only
- * check that fetch succeeded, and then transfer the image to the h/w.
- *
- * Return: non-zero code on error
- */
-int intel_huc_fw_upload(struct intel_huc *huc)
-{
- return intel_uc_fw_upload(&huc->fw, huc_fw_xfer);
-}
diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c
new file mode 100644
index 000000000000..d0d038b3cd79
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_memory_region.c
@@ -0,0 +1,302 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "intel_memory_region.h"
+#include "i915_drv.h"
+
+/* XXX: Hysterical raisins. BIT(inst) needs to just be (inst) at some point. */
+#define REGION_MAP(type, inst) \
+ BIT((type) + INTEL_MEMORY_TYPE_SHIFT) | BIT(inst)
+
+const u32 intel_region_map[] = {
+ [INTEL_REGION_SMEM] = REGION_MAP(INTEL_MEMORY_SYSTEM, 0),
+ [INTEL_REGION_LMEM] = REGION_MAP(INTEL_MEMORY_LOCAL, 0),
+ [INTEL_REGION_STOLEN] = REGION_MAP(INTEL_MEMORY_STOLEN, 0),
+};
+
+struct intel_memory_region *
+intel_memory_region_by_type(struct drm_i915_private *i915,
+ enum intel_memory_type mem_type)
+{
+ struct intel_memory_region *mr;
+ int id;
+
+ for_each_memory_region(mr, i915, id)
+ if (mr->type == mem_type)
+ return mr;
+
+ return NULL;
+}
+
+static u64
+intel_memory_region_free_pages(struct intel_memory_region *mem,
+ struct list_head *blocks)
+{
+ struct i915_buddy_block *block, *on;
+ u64 size = 0;
+
+ list_for_each_entry_safe(block, on, blocks, link) {
+ size += i915_buddy_block_size(&mem->mm, block);
+ i915_buddy_free(&mem->mm, block);
+ }
+ INIT_LIST_HEAD(blocks);
+
+ return size;
+}
+
+void
+__intel_memory_region_put_pages_buddy(struct intel_memory_region *mem,
+ struct list_head *blocks)
+{
+ mutex_lock(&mem->mm_lock);
+ mem->avail += intel_memory_region_free_pages(mem, blocks);
+ mutex_unlock(&mem->mm_lock);
+}
+
+void
+__intel_memory_region_put_block_buddy(struct i915_buddy_block *block)
+{
+ struct list_head blocks;
+
+ INIT_LIST_HEAD(&blocks);
+ list_add(&block->link, &blocks);
+ __intel_memory_region_put_pages_buddy(block->private, &blocks);
+}
+
+int
+__intel_memory_region_get_pages_buddy(struct intel_memory_region *mem,
+ resource_size_t size,
+ unsigned int flags,
+ struct list_head *blocks)
+{
+ unsigned int min_order = 0;
+ unsigned long n_pages;
+
+ GEM_BUG_ON(!IS_ALIGNED(size, mem->mm.chunk_size));
+ GEM_BUG_ON(!list_empty(blocks));
+
+ if (flags & I915_ALLOC_MIN_PAGE_SIZE) {
+ min_order = ilog2(mem->min_page_size) -
+ ilog2(mem->mm.chunk_size);
+ }
+
+ if (flags & I915_ALLOC_CONTIGUOUS) {
+ size = roundup_pow_of_two(size);
+ min_order = ilog2(size) - ilog2(mem->mm.chunk_size);
+ }
+
+ if (size > BIT(mem->mm.max_order) * mem->mm.chunk_size)
+ return -E2BIG;
+
+ n_pages = size >> ilog2(mem->mm.chunk_size);
+
+ mutex_lock(&mem->mm_lock);
+
+ do {
+ struct i915_buddy_block *block;
+ unsigned int order;
+
+ order = fls(n_pages) - 1;
+ GEM_BUG_ON(order > mem->mm.max_order);
+ GEM_BUG_ON(order < min_order);
+
+ do {
+ block = i915_buddy_alloc(&mem->mm, order);
+ if (!IS_ERR(block))
+ break;
+
+ if (order-- == min_order)
+ goto err_free_blocks;
+ } while (1);
+
+ n_pages -= BIT(order);
+
+ block->private = mem;
+ list_add(&block->link, blocks);
+
+ if (!n_pages)
+ break;
+ } while (1);
+
+ mem->avail -= size;
+ mutex_unlock(&mem->mm_lock);
+ return 0;
+
+err_free_blocks:
+ intel_memory_region_free_pages(mem, blocks);
+ mutex_unlock(&mem->mm_lock);
+ return -ENXIO;
+}
+
+struct i915_buddy_block *
+__intel_memory_region_get_block_buddy(struct intel_memory_region *mem,
+ resource_size_t size,
+ unsigned int flags)
+{
+ struct i915_buddy_block *block;
+ LIST_HEAD(blocks);
+ int ret;
+
+ ret = __intel_memory_region_get_pages_buddy(mem, size, flags, &blocks);
+ if (ret)
+ return ERR_PTR(ret);
+
+ block = list_first_entry(&blocks, typeof(*block), link);
+ list_del_init(&block->link);
+ return block;
+}
+
+int intel_memory_region_init_buddy(struct intel_memory_region *mem)
+{
+ return i915_buddy_init(&mem->mm, resource_size(&mem->region),
+ PAGE_SIZE);
+}
+
+void intel_memory_region_release_buddy(struct intel_memory_region *mem)
+{
+ i915_buddy_fini(&mem->mm);
+}
+
+struct intel_memory_region *
+intel_memory_region_create(struct drm_i915_private *i915,
+ resource_size_t start,
+ resource_size_t size,
+ resource_size_t min_page_size,
+ resource_size_t io_start,
+ const struct intel_memory_region_ops *ops)
+{
+ struct intel_memory_region *mem;
+ int err;
+
+ mem = kzalloc(sizeof(*mem), GFP_KERNEL);
+ if (!mem)
+ return ERR_PTR(-ENOMEM);
+
+ mem->i915 = i915;
+ mem->region = (struct resource)DEFINE_RES_MEM(start, size);
+ mem->io_start = io_start;
+ mem->min_page_size = min_page_size;
+ mem->ops = ops;
+ mem->total = size;
+ mem->avail = mem->total;
+
+ mutex_init(&mem->objects.lock);
+ INIT_LIST_HEAD(&mem->objects.list);
+ INIT_LIST_HEAD(&mem->objects.purgeable);
+
+ mutex_init(&mem->mm_lock);
+
+ if (ops->init) {
+ err = ops->init(mem);
+ if (err)
+ goto err_free;
+ }
+
+ kref_init(&mem->kref);
+ return mem;
+
+err_free:
+ kfree(mem);
+ return ERR_PTR(err);
+}
+
+void intel_memory_region_set_name(struct intel_memory_region *mem,
+ const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ vsnprintf(mem->name, sizeof(mem->name), fmt, ap);
+ va_end(ap);
+}
+
+static void __intel_memory_region_destroy(struct kref *kref)
+{
+ struct intel_memory_region *mem =
+ container_of(kref, typeof(*mem), kref);
+
+ if (mem->ops->release)
+ mem->ops->release(mem);
+
+ mutex_destroy(&mem->mm_lock);
+ mutex_destroy(&mem->objects.lock);
+ kfree(mem);
+}
+
+struct intel_memory_region *
+intel_memory_region_get(struct intel_memory_region *mem)
+{
+ kref_get(&mem->kref);
+ return mem;
+}
+
+void intel_memory_region_put(struct intel_memory_region *mem)
+{
+ kref_put(&mem->kref, __intel_memory_region_destroy);
+}
+
+/* Global memory region registration -- only slight layer inversions! */
+
+int intel_memory_regions_hw_probe(struct drm_i915_private *i915)
+{
+ int err, i;
+
+ for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
+ struct intel_memory_region *mem = ERR_PTR(-ENODEV);
+ u32 type;
+
+ if (!HAS_REGION(i915, BIT(i)))
+ continue;
+
+ type = MEMORY_TYPE_FROM_REGION(intel_region_map[i]);
+ switch (type) {
+ case INTEL_MEMORY_SYSTEM:
+ mem = i915_gem_shmem_setup(i915);
+ break;
+ case INTEL_MEMORY_STOLEN:
+ mem = i915_gem_stolen_setup(i915);
+ break;
+ case INTEL_MEMORY_LOCAL:
+ mem = intel_setup_fake_lmem(i915);
+ break;
+ }
+
+ if (IS_ERR(mem)) {
+ err = PTR_ERR(mem);
+ DRM_ERROR("Failed to setup region(%d) type=%d\n", err, type);
+ goto out_cleanup;
+ }
+
+ mem->id = intel_region_map[i];
+ mem->type = type;
+ mem->instance = MEMORY_INSTANCE_FROM_REGION(intel_region_map[i]);
+
+ i915->mm.regions[i] = mem;
+ }
+
+ return 0;
+
+out_cleanup:
+ intel_memory_regions_driver_release(i915);
+ return err;
+}
+
+void intel_memory_regions_driver_release(struct drm_i915_private *i915)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
+ struct intel_memory_region *region =
+ fetch_and_zero(&i915->mm.regions[i]);
+
+ if (region)
+ intel_memory_region_put(region);
+ }
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/intel_memory_region.c"
+#include "selftests/mock_region.c"
+#endif
diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h
new file mode 100644
index 000000000000..232490d89a83
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_memory_region.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_MEMORY_REGION_H__
+#define __INTEL_MEMORY_REGION_H__
+
+#include <linux/kref.h>
+#include <linux/ioport.h>
+#include <linux/mutex.h>
+#include <linux/io-mapping.h>
+#include <drm/drm_mm.h>
+
+#include "i915_buddy.h"
+
+struct drm_i915_private;
+struct drm_i915_gem_object;
+struct intel_memory_region;
+struct sg_table;
+
+/**
+ * Base memory type
+ */
+enum intel_memory_type {
+ INTEL_MEMORY_SYSTEM = 0,
+ INTEL_MEMORY_LOCAL,
+ INTEL_MEMORY_STOLEN,
+};
+
+enum intel_region_id {
+ INTEL_REGION_SMEM = 0,
+ INTEL_REGION_LMEM,
+ INTEL_REGION_STOLEN,
+ INTEL_REGION_UNKNOWN, /* Should be last */
+};
+
+#define REGION_SMEM BIT(INTEL_REGION_SMEM)
+#define REGION_LMEM BIT(INTEL_REGION_LMEM)
+#define REGION_STOLEN BIT(INTEL_REGION_STOLEN)
+
+#define INTEL_MEMORY_TYPE_SHIFT 16
+
+#define MEMORY_TYPE_FROM_REGION(r) (ilog2((r) >> INTEL_MEMORY_TYPE_SHIFT))
+#define MEMORY_INSTANCE_FROM_REGION(r) (ilog2((r) & 0xffff))
+
+#define I915_ALLOC_MIN_PAGE_SIZE BIT(0)
+#define I915_ALLOC_CONTIGUOUS BIT(1)
+
+#define for_each_memory_region(mr, i915, id) \
+ for (id = 0; id < ARRAY_SIZE((i915)->mm.regions); id++) \
+ for_each_if((mr) = (i915)->mm.regions[id])
+
+/**
+ * Memory regions encoded as type | instance
+ */
+extern const u32 intel_region_map[];
+
+struct intel_memory_region_ops {
+ unsigned int flags;
+
+ int (*init)(struct intel_memory_region *mem);
+ void (*release)(struct intel_memory_region *mem);
+
+ struct drm_i915_gem_object *
+ (*create_object)(struct intel_memory_region *mem,
+ resource_size_t size,
+ unsigned int flags);
+};
+
+struct intel_memory_region {
+ struct drm_i915_private *i915;
+
+ const struct intel_memory_region_ops *ops;
+
+ struct io_mapping iomap;
+ struct resource region;
+
+ /* For fake LMEM */
+ struct drm_mm_node fake_mappable;
+
+ struct i915_buddy_mm mm;
+ struct mutex mm_lock;
+
+ struct kref kref;
+
+ resource_size_t io_start;
+ resource_size_t min_page_size;
+ resource_size_t total;
+ resource_size_t avail;
+
+ unsigned int type;
+ unsigned int instance;
+ unsigned int id;
+ char name[8];
+
+ dma_addr_t remap_addr;
+
+ struct {
+ struct mutex lock; /* Protects access to objects */
+ struct list_head list;
+ struct list_head purgeable;
+ } objects;
+};
+
+int intel_memory_region_init_buddy(struct intel_memory_region *mem);
+void intel_memory_region_release_buddy(struct intel_memory_region *mem);
+
+int __intel_memory_region_get_pages_buddy(struct intel_memory_region *mem,
+ resource_size_t size,
+ unsigned int flags,
+ struct list_head *blocks);
+struct i915_buddy_block *
+__intel_memory_region_get_block_buddy(struct intel_memory_region *mem,
+ resource_size_t size,
+ unsigned int flags);
+void __intel_memory_region_put_pages_buddy(struct intel_memory_region *mem,
+ struct list_head *blocks);
+void __intel_memory_region_put_block_buddy(struct i915_buddy_block *block);
+
+struct intel_memory_region *
+intel_memory_region_create(struct drm_i915_private *i915,
+ resource_size_t start,
+ resource_size_t size,
+ resource_size_t min_page_size,
+ resource_size_t io_start,
+ const struct intel_memory_region_ops *ops);
+
+struct intel_memory_region *
+intel_memory_region_get(struct intel_memory_region *mem);
+void intel_memory_region_put(struct intel_memory_region *mem);
+
+int intel_memory_regions_hw_probe(struct drm_i915_private *i915);
+void intel_memory_regions_driver_release(struct drm_i915_private *i915);
+struct intel_memory_region *
+intel_memory_region_by_type(struct drm_i915_private *i915,
+ enum intel_memory_type mem_type);
+
+__printf(2, 3) void
+intel_memory_region_set_name(struct intel_memory_region *mem,
+ const char *fmt, ...);
+
+#endif
diff --git a/drivers/gpu/drm/i915/intel_pch.c b/drivers/gpu/drm/i915/intel_pch.c
new file mode 100644
index 000000000000..4ed60e1f01db
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_pch.c
@@ -0,0 +1,215 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2019 Intel Corporation.
+ */
+
+#include "i915_drv.h"
+#include "intel_pch.h"
+
+/* Map PCH device id to PCH type, or PCH_NONE if unknown. */
+static enum intel_pch
+intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
+{
+ switch (id) {
+ case INTEL_PCH_IBX_DEVICE_ID_TYPE:
+ drm_dbg_kms(&dev_priv->drm, "Found Ibex Peak PCH\n");
+ WARN_ON(!IS_GEN(dev_priv, 5));
+ return PCH_IBX;
+ case INTEL_PCH_CPT_DEVICE_ID_TYPE:
+ drm_dbg_kms(&dev_priv->drm, "Found CougarPoint PCH\n");
+ WARN_ON(!IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
+ return PCH_CPT;
+ case INTEL_PCH_PPT_DEVICE_ID_TYPE:
+ drm_dbg_kms(&dev_priv->drm, "Found PantherPoint PCH\n");
+ WARN_ON(!IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
+ /* PantherPoint is CPT compatible */
+ return PCH_CPT;
+ case INTEL_PCH_LPT_DEVICE_ID_TYPE:
+ drm_dbg_kms(&dev_priv->drm, "Found LynxPoint PCH\n");
+ WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
+ WARN_ON(IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
+ return PCH_LPT;
+ case INTEL_PCH_LPT_LP_DEVICE_ID_TYPE:
+ drm_dbg_kms(&dev_priv->drm, "Found LynxPoint LP PCH\n");
+ WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
+ WARN_ON(!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
+ return PCH_LPT;
+ case INTEL_PCH_WPT_DEVICE_ID_TYPE:
+ drm_dbg_kms(&dev_priv->drm, "Found WildcatPoint PCH\n");
+ WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
+ WARN_ON(IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
+ /* WildcatPoint is LPT compatible */
+ return PCH_LPT;
+ case INTEL_PCH_WPT_LP_DEVICE_ID_TYPE:
+ drm_dbg_kms(&dev_priv->drm, "Found WildcatPoint LP PCH\n");
+ WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
+ WARN_ON(!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
+ /* WildcatPoint is LPT compatible */
+ return PCH_LPT;
+ case INTEL_PCH_SPT_DEVICE_ID_TYPE:
+ drm_dbg_kms(&dev_priv->drm, "Found SunrisePoint PCH\n");
+ WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv));
+ return PCH_SPT;
+ case INTEL_PCH_SPT_LP_DEVICE_ID_TYPE:
+ drm_dbg_kms(&dev_priv->drm, "Found SunrisePoint LP PCH\n");
+ WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) &&
+ !IS_COFFEELAKE(dev_priv));
+ return PCH_SPT;
+ case INTEL_PCH_KBP_DEVICE_ID_TYPE:
+ drm_dbg_kms(&dev_priv->drm, "Found Kaby Lake PCH (KBP)\n");
+ WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) &&
+ !IS_COFFEELAKE(dev_priv));
+ /* KBP is SPT compatible */
+ return PCH_SPT;
+ case INTEL_PCH_CNP_DEVICE_ID_TYPE:
+ drm_dbg_kms(&dev_priv->drm, "Found Cannon Lake PCH (CNP)\n");
+ WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
+ return PCH_CNP;
+ case INTEL_PCH_CNP_LP_DEVICE_ID_TYPE:
+ drm_dbg_kms(&dev_priv->drm,
+ "Found Cannon Lake LP PCH (CNP-LP)\n");
+ WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
+ return PCH_CNP;
+ case INTEL_PCH_CMP_DEVICE_ID_TYPE:
+ case INTEL_PCH_CMP2_DEVICE_ID_TYPE:
+ drm_dbg_kms(&dev_priv->drm, "Found Comet Lake PCH (CMP)\n");
+ WARN_ON(!IS_COFFEELAKE(dev_priv));
+ /* CometPoint is CNP Compatible */
+ return PCH_CNP;
+ case INTEL_PCH_CMP_V_DEVICE_ID_TYPE:
+ drm_dbg_kms(&dev_priv->drm, "Found Comet Lake V PCH (CMP-V)\n");
+ WARN_ON(!IS_COFFEELAKE(dev_priv));
+ /* Comet Lake V PCH is based on KBP, which is SPT compatible */
+ return PCH_SPT;
+ case INTEL_PCH_ICP_DEVICE_ID_TYPE:
+ drm_dbg_kms(&dev_priv->drm, "Found Ice Lake PCH\n");
+ WARN_ON(!IS_ICELAKE(dev_priv));
+ return PCH_ICP;
+ case INTEL_PCH_MCC_DEVICE_ID_TYPE:
+ drm_dbg_kms(&dev_priv->drm, "Found Mule Creek Canyon PCH\n");
+ WARN_ON(!IS_ELKHARTLAKE(dev_priv));
+ return PCH_MCC;
+ case INTEL_PCH_TGP_DEVICE_ID_TYPE:
+ case INTEL_PCH_TGP2_DEVICE_ID_TYPE:
+ drm_dbg_kms(&dev_priv->drm, "Found Tiger Lake LP PCH\n");
+ WARN_ON(!IS_TIGERLAKE(dev_priv));
+ return PCH_TGP;
+ case INTEL_PCH_JSP_DEVICE_ID_TYPE:
+ case INTEL_PCH_JSP2_DEVICE_ID_TYPE:
+ drm_dbg_kms(&dev_priv->drm, "Found Jasper Lake PCH\n");
+ WARN_ON(!IS_ELKHARTLAKE(dev_priv));
+ return PCH_JSP;
+ default:
+ return PCH_NONE;
+ }
+}
+
+static bool intel_is_virt_pch(unsigned short id,
+ unsigned short svendor, unsigned short sdevice)
+{
+ return (id == INTEL_PCH_P2X_DEVICE_ID_TYPE ||
+ id == INTEL_PCH_P3X_DEVICE_ID_TYPE ||
+ (id == INTEL_PCH_QEMU_DEVICE_ID_TYPE &&
+ svendor == PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
+ sdevice == PCI_SUBDEVICE_ID_QEMU));
+}
+
+static unsigned short
+intel_virt_detect_pch(const struct drm_i915_private *dev_priv)
+{
+ unsigned short id = 0;
+
+ /*
+ * In a virtualized passthrough environment we can be in a
+ * setup where the ISA bridge is not able to be passed through.
+ * In this case, a south bridge can be emulated and we have to
+ * make an educated guess as to which PCH is really there.
+ */
+
+ if (IS_TIGERLAKE(dev_priv))
+ id = INTEL_PCH_TGP_DEVICE_ID_TYPE;
+ else if (IS_ELKHARTLAKE(dev_priv))
+ id = INTEL_PCH_MCC_DEVICE_ID_TYPE;
+ else if (IS_ICELAKE(dev_priv))
+ id = INTEL_PCH_ICP_DEVICE_ID_TYPE;
+ else if (IS_CANNONLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
+ id = INTEL_PCH_CNP_DEVICE_ID_TYPE;
+ else if (IS_KABYLAKE(dev_priv) || IS_SKYLAKE(dev_priv))
+ id = INTEL_PCH_SPT_DEVICE_ID_TYPE;
+ else if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
+ id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
+ else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ id = INTEL_PCH_LPT_DEVICE_ID_TYPE;
+ else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
+ id = INTEL_PCH_CPT_DEVICE_ID_TYPE;
+ else if (IS_GEN(dev_priv, 5))
+ id = INTEL_PCH_IBX_DEVICE_ID_TYPE;
+
+ if (id)
+ drm_dbg_kms(&dev_priv->drm, "Assuming PCH ID %04x\n", id);
+ else
+ drm_dbg_kms(&dev_priv->drm, "Assuming no PCH\n");
+
+ return id;
+}
+
+void intel_detect_pch(struct drm_i915_private *dev_priv)
+{
+ struct pci_dev *pch = NULL;
+
+ /*
+ * The reason to probe ISA bridge instead of Dev31:Fun0 is to
+ * make graphics device passthrough work easy for VMM, that only
+ * need to expose ISA bridge to let driver know the real hardware
+ * underneath. This is a requirement from virtualization team.
+ *
+ * In some virtualized environments (e.g. XEN), there is irrelevant
+ * ISA bridge in the system. To work reliably, we should scan trhough
+ * all the ISA bridge devices and check for the first match, instead
+ * of only checking the first one.
+ */
+ while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
+ unsigned short id;
+ enum intel_pch pch_type;
+
+ if (pch->vendor != PCI_VENDOR_ID_INTEL)
+ continue;
+
+ id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
+
+ pch_type = intel_pch_type(dev_priv, id);
+ if (pch_type != PCH_NONE) {
+ dev_priv->pch_type = pch_type;
+ dev_priv->pch_id = id;
+ break;
+ } else if (intel_is_virt_pch(id, pch->subsystem_vendor,
+ pch->subsystem_device)) {
+ id = intel_virt_detect_pch(dev_priv);
+ pch_type = intel_pch_type(dev_priv, id);
+
+ /* Sanity check virtual PCH id */
+ if (WARN_ON(id && pch_type == PCH_NONE))
+ id = 0;
+
+ dev_priv->pch_type = pch_type;
+ dev_priv->pch_id = id;
+ break;
+ }
+ }
+
+ /*
+ * Use PCH_NOP (PCH but no South Display) for PCH platforms without
+ * display.
+ */
+ if (pch && !HAS_DISPLAY(dev_priv)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Display disabled, reverting to NOP PCH\n");
+ dev_priv->pch_type = PCH_NOP;
+ dev_priv->pch_id = 0;
+ }
+
+ if (!pch)
+ drm_dbg_kms(&dev_priv->drm, "No PCH found.\n");
+
+ pci_dev_put(pch);
+}
diff --git a/drivers/gpu/drm/i915/intel_pch.h b/drivers/gpu/drm/i915/intel_pch.h
new file mode 100644
index 000000000000..3053d1ce398b
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_pch.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2019 Intel Corporation.
+ */
+
+#ifndef __INTEL_PCH__
+#define __INTEL_PCH__
+
+struct drm_i915_private;
+
+/*
+ * Sorted by south display engine compatibility.
+ * If the new PCH comes with a south display engine that is not
+ * inherited from the latest item, please do not add it to the
+ * end. Instead, add it right after its "parent" PCH.
+ */
+enum intel_pch {
+ PCH_NOP = -1, /* PCH without south display */
+ PCH_NONE = 0, /* No PCH present */
+ PCH_IBX, /* Ibexpeak PCH */
+ PCH_CPT, /* Cougarpoint/Pantherpoint PCH */
+ PCH_LPT, /* Lynxpoint/Wildcatpoint PCH */
+ PCH_SPT, /* Sunrisepoint/Kaby Lake PCH */
+ PCH_CNP, /* Cannon/Comet Lake PCH */
+ PCH_ICP, /* Ice Lake PCH */
+ PCH_JSP, /* Jasper Lake PCH */
+ PCH_MCC, /* Mule Creek Canyon PCH */
+ PCH_TGP, /* Tiger Lake PCH */
+};
+
+#define INTEL_PCH_DEVICE_ID_MASK 0xff80
+#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
+#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
+#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
+#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
+#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
+#define INTEL_PCH_WPT_DEVICE_ID_TYPE 0x8c80
+#define INTEL_PCH_WPT_LP_DEVICE_ID_TYPE 0x9c80
+#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100
+#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00
+#define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA280
+#define INTEL_PCH_CNP_DEVICE_ID_TYPE 0xA300
+#define INTEL_PCH_CNP_LP_DEVICE_ID_TYPE 0x9D80
+#define INTEL_PCH_CMP_DEVICE_ID_TYPE 0x0280
+#define INTEL_PCH_CMP2_DEVICE_ID_TYPE 0x0680
+#define INTEL_PCH_CMP_V_DEVICE_ID_TYPE 0xA380
+#define INTEL_PCH_ICP_DEVICE_ID_TYPE 0x3480
+#define INTEL_PCH_MCC_DEVICE_ID_TYPE 0x4B00
+#define INTEL_PCH_TGP_DEVICE_ID_TYPE 0xA080
+#define INTEL_PCH_TGP2_DEVICE_ID_TYPE 0x4380
+#define INTEL_PCH_JSP_DEVICE_ID_TYPE 0x4D80
+#define INTEL_PCH_JSP2_DEVICE_ID_TYPE 0x3880
+#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
+#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
+#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
+
+#define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type)
+#define INTEL_PCH_ID(dev_priv) ((dev_priv)->pch_id)
+#define HAS_PCH_JSP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_JSP)
+#define HAS_PCH_MCC(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_MCC)
+#define HAS_PCH_TGP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_TGP)
+#define HAS_PCH_ICP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ICP)
+#define HAS_PCH_CNP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CNP)
+#define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT)
+#define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT)
+#define HAS_PCH_LPT_LP(dev_priv) \
+ (INTEL_PCH_ID(dev_priv) == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE || \
+ INTEL_PCH_ID(dev_priv) == INTEL_PCH_WPT_LP_DEVICE_ID_TYPE)
+#define HAS_PCH_LPT_H(dev_priv) \
+ (INTEL_PCH_ID(dev_priv) == INTEL_PCH_LPT_DEVICE_ID_TYPE || \
+ INTEL_PCH_ID(dev_priv) == INTEL_PCH_WPT_DEVICE_ID_TYPE)
+#define HAS_PCH_CPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CPT)
+#define HAS_PCH_IBX(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_IBX)
+#define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP)
+#define HAS_PCH_SPLIT(dev_priv) (INTEL_PCH_TYPE(dev_priv) != PCH_NONE)
+
+void intel_detect_pch(struct drm_i915_private *dev_priv);
+
+#endif /* __INTEL_PCH__ */
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index d9a7a13ce32a..bd2d30ecc030 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -25,7 +25,6 @@
*
*/
-#include <linux/cpufreq.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
@@ -34,36 +33,19 @@
#include <drm/drm_plane_helper.h>
#include "display/intel_atomic.h"
+#include "display/intel_display_types.h"
#include "display/intel_fbc.h"
#include "display/intel_sprite.h"
+#include "gt/intel_llc.h"
+
#include "i915_drv.h"
#include "i915_irq.h"
-#include "intel_drv.h"
+#include "i915_trace.h"
#include "intel_pm.h"
#include "intel_sideband.h"
#include "../../../platform/x86/intel_ips.h"
-/**
- * DOC: RC6
- *
- * RC6 is a special power stage which allows the GPU to enter an very
- * low-voltage mode when idle, using down to 0V while at this stage. This
- * stage is entered automatically when the GPU is idle when RC6 support is
- * enabled, and as soon as new workload arises GPU wakes up automatically as well.
- *
- * There are different RC6 modes available in Intel GPU, which differentiate
- * among each other with the latency required to enter and leave RC6 and
- * voltage consumed by the GPU in different states.
- *
- * The combination of the following flags define which states GPU is allowed
- * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
- * RC6pp is deepest RC6. Their support by hardware varies according to the
- * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
- * which brings the most power savings; deeper states save more power, but
- * require higher latency to switch to and wake up.
- */
-
static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
{
if (HAS_LLC(dev_priv)) {
@@ -125,6 +107,14 @@ static void bxt_init_clock_gating(struct drm_i915_private *dev_priv)
*/
I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
PWM1_GATING_DIS | PWM2_GATING_DIS);
+
+ /*
+ * Lower the display internal timeout.
+ * This is needed to avoid any hard hangs when DSI port PLL
+ * is off and a MMIO access is attempted by any privilege
+ * application, using batch buffers or any other means.
+ */
+ I915_WRITE(RM_TIMEOUT, MMIO_TIMEOUT_US(950));
}
static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
@@ -150,7 +140,7 @@ static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
}
-static void i915_pineview_get_mem_freq(struct drm_i915_private *dev_priv)
+static void pnv_get_mem_freq(struct drm_i915_private *dev_priv)
{
u32 tmp;
@@ -188,7 +178,7 @@ static void i915_pineview_get_mem_freq(struct drm_i915_private *dev_priv)
dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
}
-static void i915_ironlake_get_mem_freq(struct drm_i915_private *dev_priv)
+static void ilk_get_mem_freq(struct drm_i915_private *dev_priv)
{
u16 ddrpll, csipll;
@@ -209,14 +199,12 @@ static void i915_ironlake_get_mem_freq(struct drm_i915_private *dev_priv)
dev_priv->mem_freq = 1600;
break;
default:
- DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
- ddrpll & 0xff);
+ drm_dbg(&dev_priv->drm, "unknown memory frequency 0x%02x\n",
+ ddrpll & 0xff);
dev_priv->mem_freq = 0;
break;
}
- dev_priv->ips.r_t = dev_priv->mem_freq;
-
switch (csipll & 0x3ff) {
case 0x00c:
dev_priv->fsb_freq = 3200;
@@ -240,19 +228,11 @@ static void i915_ironlake_get_mem_freq(struct drm_i915_private *dev_priv)
dev_priv->fsb_freq = 6400;
break;
default:
- DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
- csipll & 0x3ff);
+ drm_dbg(&dev_priv->drm, "unknown fsb frequency 0x%04x\n",
+ csipll & 0x3ff);
dev_priv->fsb_freq = 0;
break;
}
-
- if (dev_priv->fsb_freq == 3200) {
- dev_priv->ips.c_m = 0;
- } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
- dev_priv->ips.c_m = 1;
- } else {
- dev_priv->ips.c_m = 2;
- }
}
static const struct cxsr_latency cxsr_latency_table[] = {
@@ -334,7 +314,8 @@ static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
- DRM_ERROR("timed out waiting for Punit DDR DVFS request\n");
+ drm_err(&dev_priv->drm,
+ "timed out waiting for Punit DDR DVFS request\n");
vlv_punit_put(dev_priv);
}
@@ -403,9 +384,9 @@ static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enabl
trace_intel_memory_cxsr(dev_priv, was_enabled, enable);
- DRM_DEBUG_KMS("memory self-refresh is %s (was %s)\n",
- enableddisabled(enable),
- enableddisabled(was_enabled));
+ drm_dbg_kms(&dev_priv->drm, "memory self-refresh is %s (was %s)\n",
+ enableddisabled(enable),
+ enableddisabled(was_enabled));
return was_enabled;
}
@@ -483,7 +464,7 @@ static const int pessimal_latency_ns = 5000;
static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
enum pipe pipe = crtc->pipe;
@@ -530,8 +511,8 @@ static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv,
if (i9xx_plane == PLANE_B)
size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
- DRM_DEBUG_KMS("FIFO size - (0x%08x) %c: %d\n",
- dsparb, plane_name(i9xx_plane), size);
+ drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
+ dsparb, plane_name(i9xx_plane), size);
return size;
}
@@ -547,8 +528,8 @@ static int i830_get_fifo_size(struct drm_i915_private *dev_priv,
size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
size >>= 1; /* Convert to cachelines */
- DRM_DEBUG_KMS("FIFO size - (0x%08x) %c: %d\n",
- dsparb, plane_name(i9xx_plane), size);
+ drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
+ dsparb, plane_name(i9xx_plane), size);
return size;
}
@@ -562,41 +543,45 @@ static int i845_get_fifo_size(struct drm_i915_private *dev_priv,
size = dsparb & 0x7f;
size >>= 2; /* Convert to cachelines */
- DRM_DEBUG_KMS("FIFO size - (0x%08x) %c: %d\n",
- dsparb, plane_name(i9xx_plane), size);
+ drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
+ dsparb, plane_name(i9xx_plane), size);
return size;
}
/* Pineview has different values for various configs */
-static const struct intel_watermark_params pineview_display_wm = {
+static const struct intel_watermark_params pnv_display_wm = {
.fifo_size = PINEVIEW_DISPLAY_FIFO,
.max_wm = PINEVIEW_MAX_WM,
.default_wm = PINEVIEW_DFT_WM,
.guard_size = PINEVIEW_GUARD_WM,
.cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
};
-static const struct intel_watermark_params pineview_display_hplloff_wm = {
+
+static const struct intel_watermark_params pnv_display_hplloff_wm = {
.fifo_size = PINEVIEW_DISPLAY_FIFO,
.max_wm = PINEVIEW_MAX_WM,
.default_wm = PINEVIEW_DFT_HPLLOFF_WM,
.guard_size = PINEVIEW_GUARD_WM,
.cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
};
-static const struct intel_watermark_params pineview_cursor_wm = {
+
+static const struct intel_watermark_params pnv_cursor_wm = {
.fifo_size = PINEVIEW_CURSOR_FIFO,
.max_wm = PINEVIEW_CURSOR_MAX_WM,
.default_wm = PINEVIEW_CURSOR_DFT_WM,
.guard_size = PINEVIEW_CURSOR_GUARD_WM,
.cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
};
-static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
+
+static const struct intel_watermark_params pnv_cursor_hplloff_wm = {
.fifo_size = PINEVIEW_CURSOR_FIFO,
.max_wm = PINEVIEW_CURSOR_MAX_WM,
.default_wm = PINEVIEW_CURSOR_DFT_WM,
.guard_size = PINEVIEW_CURSOR_GUARD_WM,
.cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
};
+
static const struct intel_watermark_params i965_cursor_wm_info = {
.fifo_size = I965_CURSOR_FIFO,
.max_wm = I965_CURSOR_MAX_WM,
@@ -604,6 +589,7 @@ static const struct intel_watermark_params i965_cursor_wm_info = {
.guard_size = 2,
.cacheline_size = I915_FIFO_LINE_SIZE,
};
+
static const struct intel_watermark_params i945_wm_info = {
.fifo_size = I945_FIFO_SIZE,
.max_wm = I915_MAX_WM,
@@ -611,6 +597,7 @@ static const struct intel_watermark_params i945_wm_info = {
.guard_size = 2,
.cacheline_size = I915_FIFO_LINE_SIZE,
};
+
static const struct intel_watermark_params i915_wm_info = {
.fifo_size = I915_FIFO_SIZE,
.max_wm = I915_MAX_WM,
@@ -618,6 +605,7 @@ static const struct intel_watermark_params i915_wm_info = {
.guard_size = 2,
.cacheline_size = I915_FIFO_LINE_SIZE,
};
+
static const struct intel_watermark_params i830_a_wm_info = {
.fifo_size = I855GM_FIFO_SIZE,
.max_wm = I915_MAX_WM,
@@ -625,6 +613,7 @@ static const struct intel_watermark_params i830_a_wm_info = {
.guard_size = 2,
.cacheline_size = I830_FIFO_LINE_SIZE,
};
+
static const struct intel_watermark_params i830_bc_wm_info = {
.fifo_size = I855GM_FIFO_SIZE,
.max_wm = I915_MAX_WM/2,
@@ -632,6 +621,7 @@ static const struct intel_watermark_params i830_bc_wm_info = {
.guard_size = 2,
.cacheline_size = I830_FIFO_LINE_SIZE,
};
+
static const struct intel_watermark_params i845_wm_info = {
.fifo_size = I830_FIFO_SIZE,
.max_wm = I915_MAX_WM,
@@ -814,10 +804,10 @@ static int intel_wm_num_levels(struct drm_i915_private *dev_priv)
static bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
/* FIXME check the 'enable' instead */
- if (!crtc_state->base.active)
+ if (!crtc_state->hw.active)
return false;
/*
@@ -829,9 +819,28 @@ static bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
* around this problem with the watermark code.
*/
if (plane->id == PLANE_CURSOR)
- return plane_state->base.fb != NULL;
+ return plane_state->hw.fb != NULL;
else
- return plane_state->base.visible;
+ return plane_state->uapi.visible;
+}
+
+static bool intel_crtc_active(struct intel_crtc *crtc)
+{
+ /* Be paranoid as we can arrive here with only partial
+ * state retrieved from the hardware during setup.
+ *
+ * We can ditch the adjusted_mode.crtc_clock check as soon
+ * as Haswell has gained clock readout/fastboot support.
+ *
+ * We can ditch the crtc->primary->state->fb check as soon as we can
+ * properly reconstruct framebuffers.
+ *
+ * FIXME: The intel_crtc->active here should be switched to
+ * crtc->state->active once we have proper CRTC states wired up
+ * for atomic.
+ */
+ return crtc->active && crtc->base.primary->state->fb &&
+ crtc->config->hw.adjusted_mode.crtc_clock;
}
static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv)
@@ -849,7 +858,7 @@ static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv)
return enabled;
}
-static void pineview_update_wm(struct intel_crtc *unused_crtc)
+static void pnv_update_wm(struct intel_crtc *unused_crtc)
{
struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
struct intel_crtc *crtc;
@@ -862,7 +871,8 @@ static void pineview_update_wm(struct intel_crtc *unused_crtc)
dev_priv->fsb_freq,
dev_priv->mem_freq);
if (!latency) {
- DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Unknown FSB/MEM found, disable CxSR\n");
intel_set_memory_cxsr(dev_priv, false);
return;
}
@@ -870,25 +880,25 @@ static void pineview_update_wm(struct intel_crtc *unused_crtc)
crtc = single_enabled_crtc(dev_priv);
if (crtc) {
const struct drm_display_mode *adjusted_mode =
- &crtc->config->base.adjusted_mode;
+ &crtc->config->hw.adjusted_mode;
const struct drm_framebuffer *fb =
crtc->base.primary->state->fb;
int cpp = fb->format->cpp[0];
int clock = adjusted_mode->crtc_clock;
/* Display SR */
- wm = intel_calculate_wm(clock, &pineview_display_wm,
- pineview_display_wm.fifo_size,
+ wm = intel_calculate_wm(clock, &pnv_display_wm,
+ pnv_display_wm.fifo_size,
cpp, latency->display_sr);
reg = I915_READ(DSPFW1);
reg &= ~DSPFW_SR_MASK;
reg |= FW_WM(wm, SR);
I915_WRITE(DSPFW1, reg);
- DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
+ drm_dbg_kms(&dev_priv->drm, "DSPFW1 register is %x\n", reg);
/* cursor SR */
- wm = intel_calculate_wm(clock, &pineview_cursor_wm,
- pineview_display_wm.fifo_size,
+ wm = intel_calculate_wm(clock, &pnv_cursor_wm,
+ pnv_display_wm.fifo_size,
4, latency->cursor_sr);
reg = I915_READ(DSPFW3);
reg &= ~DSPFW_CURSOR_SR_MASK;
@@ -896,8 +906,8 @@ static void pineview_update_wm(struct intel_crtc *unused_crtc)
I915_WRITE(DSPFW3, reg);
/* Display HPLL off SR */
- wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
- pineview_display_hplloff_wm.fifo_size,
+ wm = intel_calculate_wm(clock, &pnv_display_hplloff_wm,
+ pnv_display_hplloff_wm.fifo_size,
cpp, latency->display_hpll_disable);
reg = I915_READ(DSPFW3);
reg &= ~DSPFW_HPLL_SR_MASK;
@@ -905,14 +915,14 @@ static void pineview_update_wm(struct intel_crtc *unused_crtc)
I915_WRITE(DSPFW3, reg);
/* cursor HPLL off SR */
- wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
- pineview_display_hplloff_wm.fifo_size,
+ wm = intel_calculate_wm(clock, &pnv_cursor_hplloff_wm,
+ pnv_display_hplloff_wm.fifo_size,
4, latency->cursor_hpll_disable);
reg = I915_READ(DSPFW3);
reg &= ~DSPFW_HPLL_CURSOR_MASK;
reg |= FW_WM(wm, HPLL_CURSOR);
I915_WRITE(DSPFW3, reg);
- DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
+ drm_dbg_kms(&dev_priv->drm, "DSPFW3 register is %x\n", reg);
intel_set_memory_cxsr(dev_priv, true);
} else {
@@ -1103,10 +1113,10 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
int level)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
unsigned int latency = dev_priv->wm.pri_latency[level] * 10;
unsigned int clock, htotal, cpp, width, wm;
@@ -1116,6 +1126,8 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
if (!intel_wm_plane_visible(crtc_state, plane_state))
return 0;
+ cpp = plane_state->hw.fb->format->cpp[0];
+
/*
* Not 100% sure which way ELK should go here as the
* spec only says CL/CTG should assume 32bpp and BW
@@ -1129,17 +1141,12 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
*/
if (IS_GM45(dev_priv) && plane->id == PLANE_PRIMARY &&
level != G4X_WM_LEVEL_NORMAL)
- cpp = 4;
- else
- cpp = plane_state->base.fb->format->cpp[0];
+ cpp = max(cpp, 4u);
clock = adjusted_mode->crtc_clock;
htotal = adjusted_mode->crtc_htotal;
- if (plane->id == PLANE_CURSOR)
- width = plane_state->base.crtc_w;
- else
- width = drm_rect_width(&plane_state->base.dst);
+ width = drm_rect_width(&plane_state->uapi.dst);
if (plane->id == PLANE_CURSOR) {
wm = intel_wm_method2(clock, htotal, width, cpp, latency);
@@ -1166,7 +1173,7 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
int level, enum plane_id plane_id, u16 value)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
bool dirty = false;
for (; level < intel_wm_num_levels(dev_priv); level++) {
@@ -1182,7 +1189,7 @@ static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state,
int level, u16 value)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
bool dirty = false;
/* NORMAL level doesn't have an FBC watermark */
@@ -1198,14 +1205,15 @@ static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state,
return dirty;
}
-static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
- const struct intel_plane_state *pstate,
+static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
u32 pri_val);
static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
int num_levels = intel_wm_num_levels(to_i915(plane->base.dev));
enum plane_id plane_id = plane->id;
bool dirty = false;
@@ -1258,16 +1266,18 @@ static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
out:
if (dirty) {
- DRM_DEBUG_KMS("%s watermarks: normal=%d, SR=%d, HPLL=%d\n",
- plane->base.name,
- crtc_state->wm.g4x.raw[G4X_WM_LEVEL_NORMAL].plane[plane_id],
- crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].plane[plane_id],
- crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].plane[plane_id]);
+ drm_dbg_kms(&dev_priv->drm,
+ "%s watermarks: normal=%d, SR=%d, HPLL=%d\n",
+ plane->base.name,
+ crtc_state->wm.g4x.raw[G4X_WM_LEVEL_NORMAL].plane[plane_id],
+ crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].plane[plane_id],
+ crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].plane[plane_id]);
if (plane_id == PLANE_PRIMARY)
- DRM_DEBUG_KMS("FBC watermarks: SR=%d, HPLL=%d\n",
- crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].fbc,
- crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].fbc);
+ drm_dbg_kms(&dev_priv->drm,
+ "FBC watermarks: SR=%d, HPLL=%d\n",
+ crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].fbc,
+ crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].fbc);
}
return dirty;
@@ -1284,7 +1294,7 @@ static bool g4x_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
static bool g4x_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state,
int level)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
if (level > dev_priv->wm.max_level)
return false;
@@ -1322,12 +1332,12 @@ static void g4x_invalidate_wms(struct intel_crtc *crtc,
static int g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_atomic_state *state =
- to_intel_atomic_state(crtc_state->base.state);
+ to_intel_atomic_state(crtc_state->uapi.state);
struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal;
- int num_active_planes = hweight32(crtc_state->active_planes &
- ~BIT(PLANE_CURSOR));
+ int num_active_planes = hweight8(crtc_state->active_planes &
+ ~BIT(PLANE_CURSOR));
const struct g4x_pipe_wm *raw;
const struct intel_plane_state *old_plane_state;
const struct intel_plane_state *new_plane_state;
@@ -1339,8 +1349,8 @@ static int g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state)
for_each_oldnew_intel_plane_in_state(state, plane,
old_plane_state,
new_plane_state, i) {
- if (new_plane_state->base.crtc != &crtc->base &&
- old_plane_state->base.crtc != &crtc->base)
+ if (new_plane_state->hw.crtc != &crtc->base &&
+ old_plane_state->hw.crtc != &crtc->base)
continue;
if (g4x_raw_plane_wm_compute(crtc_state, new_plane_state))
@@ -1411,17 +1421,17 @@ static int g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state)
static int g4x_compute_intermediate_wm(struct intel_crtc_state *new_crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct g4x_wm_state *intermediate = &new_crtc_state->wm.g4x.intermediate;
const struct g4x_wm_state *optimal = &new_crtc_state->wm.g4x.optimal;
struct intel_atomic_state *intel_state =
- to_intel_atomic_state(new_crtc_state->base.state);
+ to_intel_atomic_state(new_crtc_state->uapi.state);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(intel_state, crtc);
const struct g4x_wm_state *active = &old_crtc_state->wm.g4x.optimal;
enum plane_id plane_id;
- if (!new_crtc_state->base.active || drm_atomic_crtc_needs_modeset(&new_crtc_state->base)) {
+ if (!new_crtc_state->hw.active || drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi)) {
*intermediate = *optimal;
intermediate->cxsr = false;
@@ -1489,7 +1499,7 @@ static void g4x_merge_wm(struct drm_i915_private *dev_priv,
struct g4x_wm_values *wm)
{
struct intel_crtc *crtc;
- int num_active_crtcs = 0;
+ int num_active_pipes = 0;
wm->cxsr = true;
wm->hpll_en = true;
@@ -1508,10 +1518,10 @@ static void g4x_merge_wm(struct drm_i915_private *dev_priv,
if (!wm_state->fbc_en)
wm->fbc_en = false;
- num_active_crtcs++;
+ num_active_pipes++;
}
- if (num_active_crtcs != 1) {
+ if (num_active_pipes != 1) {
wm->cxsr = false;
wm->hpll_en = false;
wm->fbc_en = false;
@@ -1551,10 +1561,11 @@ static void g4x_program_watermarks(struct drm_i915_private *dev_priv)
}
static void g4x_initial_watermarks(struct intel_atomic_state *state,
- struct intel_crtc_state *crtc_state)
+ struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
mutex_lock(&dev_priv->wm.wm_mutex);
crtc->wm.active.g4x = crtc_state->wm.g4x.intermediate;
@@ -1563,16 +1574,17 @@ static void g4x_initial_watermarks(struct intel_atomic_state *state,
}
static void g4x_optimize_watermarks(struct intel_atomic_state *state,
- struct intel_crtc_state *crtc_state)
+ struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
if (!crtc_state->wm.need_postvbl_update)
return;
mutex_lock(&dev_priv->wm.wm_mutex);
- intel_crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
+ crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
g4x_program_watermarks(dev_priv);
mutex_unlock(&dev_priv->wm.wm_mutex);
}
@@ -1612,10 +1624,10 @@ static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
int level)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
unsigned int clock, htotal, cpp, width, wm;
if (dev_priv->wm.pri_latency[level] == 0)
@@ -1624,7 +1636,7 @@ static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
if (!intel_wm_plane_visible(crtc_state, plane_state))
return 0;
- cpp = plane_state->base.fb->format->cpp[0];
+ cpp = plane_state->hw.fb->format->cpp[0];
clock = adjusted_mode->crtc_clock;
htotal = adjusted_mode->crtc_htotal;
width = crtc_state->pipe_src_w;
@@ -1653,12 +1665,12 @@ static bool vlv_need_sprite0_fifo_workaround(unsigned int active_planes)
static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct g4x_pipe_wm *raw =
&crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2];
struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
unsigned int active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
- int num_active_planes = hweight32(active_planes);
+ int num_active_planes = hweight8(active_planes);
const int fifo_size = 511;
int fifo_extra, fifo_left = fifo_size;
int sprite0_fifo_extra = 0;
@@ -1765,7 +1777,7 @@ static u16 vlv_invert_wm_value(u16 wm, u16 fifo_size)
static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
int level, enum plane_id plane_id, u16 value)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
int num_levels = intel_wm_num_levels(dev_priv);
bool dirty = false;
@@ -1782,7 +1794,8 @@ static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
enum plane_id plane_id = plane->id;
int num_levels = intel_wm_num_levels(to_i915(plane->base.dev));
int level;
@@ -1810,11 +1823,12 @@ static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
out:
if (dirty)
- DRM_DEBUG_KMS("%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n",
- plane->base.name,
- crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2].plane[plane_id],
- crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM5].plane[plane_id],
- crtc_state->wm.vlv.raw[VLV_WM_LEVEL_DDR_DVFS].plane[plane_id]);
+ drm_dbg_kms(&dev_priv->drm,
+ "%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n",
+ plane->base.name,
+ crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2].plane[plane_id],
+ crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM5].plane[plane_id],
+ crtc_state->wm.vlv.raw[VLV_WM_LEVEL_DDR_DVFS].plane[plane_id]);
return dirty;
}
@@ -1840,16 +1854,16 @@ static bool vlv_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state,
static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_atomic_state *state =
- to_intel_atomic_state(crtc_state->base.state);
+ to_intel_atomic_state(crtc_state->uapi.state);
struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
const struct vlv_fifo_state *fifo_state =
&crtc_state->wm.vlv.fifo_state;
- int num_active_planes = hweight32(crtc_state->active_planes &
- ~BIT(PLANE_CURSOR));
- bool needs_modeset = drm_atomic_crtc_needs_modeset(&crtc_state->base);
+ int num_active_planes = hweight8(crtc_state->active_planes &
+ ~BIT(PLANE_CURSOR));
+ bool needs_modeset = drm_atomic_crtc_needs_modeset(&crtc_state->uapi);
const struct intel_plane_state *old_plane_state;
const struct intel_plane_state *new_plane_state;
struct intel_plane *plane;
@@ -1860,8 +1874,8 @@ static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
for_each_oldnew_intel_plane_in_state(state, plane,
old_plane_state,
new_plane_state, i) {
- if (new_plane_state->base.crtc != &crtc->base &&
- old_plane_state->base.crtc != &crtc->base)
+ if (new_plane_state->hw.crtc != &crtc->base &&
+ old_plane_state->hw.crtc != &crtc->base)
continue;
if (vlv_raw_plane_wm_compute(crtc_state, new_plane_state))
@@ -1908,7 +1922,7 @@ static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
for (level = 0; level < wm_state->num_levels; level++) {
const struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
- const int sr_fifo_size = INTEL_INFO(dev_priv)->num_pipes * 512 - 1;
+ const int sr_fifo_size = INTEL_NUM_PIPES(dev_priv) * 512 - 1;
if (!vlv_raw_crtc_wm_is_valid(crtc_state, level))
break;
@@ -1946,11 +1960,12 @@ static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
(((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV)
static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
- struct intel_crtc_state *crtc_state)
+ struct intel_crtc *crtc)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_uncore *uncore = &dev_priv->uncore;
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
const struct vlv_fifo_state *fifo_state =
&crtc_state->wm.vlv.fifo_state;
int sprite0_start, sprite1_start, fifo_size;
@@ -2044,17 +2059,17 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
static int vlv_compute_intermediate_wm(struct intel_crtc_state *new_crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct vlv_wm_state *intermediate = &new_crtc_state->wm.vlv.intermediate;
const struct vlv_wm_state *optimal = &new_crtc_state->wm.vlv.optimal;
struct intel_atomic_state *intel_state =
- to_intel_atomic_state(new_crtc_state->base.state);
+ to_intel_atomic_state(new_crtc_state->uapi.state);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(intel_state, crtc);
const struct vlv_wm_state *active = &old_crtc_state->wm.vlv.optimal;
int level;
- if (!new_crtc_state->base.active || drm_atomic_crtc_needs_modeset(&new_crtc_state->base)) {
+ if (!new_crtc_state->hw.active || drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi)) {
*intermediate = *optimal;
intermediate->cxsr = false;
@@ -2097,7 +2112,7 @@ static void vlv_merge_wm(struct drm_i915_private *dev_priv,
struct vlv_wm_values *wm)
{
struct intel_crtc *crtc;
- int num_active_crtcs = 0;
+ int num_active_pipes = 0;
wm->level = dev_priv->wm.max_level;
wm->cxsr = true;
@@ -2111,14 +2126,14 @@ static void vlv_merge_wm(struct drm_i915_private *dev_priv,
if (!wm_state->cxsr)
wm->cxsr = false;
- num_active_crtcs++;
+ num_active_pipes++;
wm->level = min_t(int, wm->level, wm_state->num_levels - 1);
}
- if (num_active_crtcs != 1)
+ if (num_active_pipes != 1)
wm->cxsr = false;
- if (num_active_crtcs > 1)
+ if (num_active_pipes > 1)
wm->level = VLV_WM_LEVEL_PM2;
for_each_intel_crtc(&dev_priv->drm, crtc) {
@@ -2170,10 +2185,11 @@ static void vlv_program_watermarks(struct drm_i915_private *dev_priv)
}
static void vlv_initial_watermarks(struct intel_atomic_state *state,
- struct intel_crtc_state *crtc_state)
+ struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
mutex_lock(&dev_priv->wm.wm_mutex);
crtc->wm.active.vlv = crtc_state->wm.vlv.intermediate;
@@ -2182,16 +2198,17 @@ static void vlv_initial_watermarks(struct intel_atomic_state *state,
}
static void vlv_optimize_watermarks(struct intel_atomic_state *state,
- struct intel_crtc_state *crtc_state)
+ struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
if (!crtc_state->wm.need_postvbl_update)
return;
mutex_lock(&dev_priv->wm.wm_mutex);
- intel_crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
+ crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
vlv_program_watermarks(dev_priv);
mutex_unlock(&dev_priv->wm.wm_mutex);
}
@@ -2210,7 +2227,7 @@ static void i965_update_wm(struct intel_crtc *unused_crtc)
/* self-refresh has much higher latency */
static const int sr_latency_ns = 12000;
const struct drm_display_mode *adjusted_mode =
- &crtc->config->base.adjusted_mode;
+ &crtc->config->hw.adjusted_mode;
const struct drm_framebuffer *fb =
crtc->base.primary->state->fb;
int clock = adjusted_mode->crtc_clock;
@@ -2226,8 +2243,9 @@ static void i965_update_wm(struct intel_crtc *unused_crtc)
if (srwm < 0)
srwm = 1;
srwm &= 0x1ff;
- DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
- entries, srwm);
+ drm_dbg_kms(&dev_priv->drm,
+ "self-refresh entries: %d, wm: %d\n",
+ entries, srwm);
entries = intel_wm_method2(clock, htotal,
crtc->base.cursor->state->crtc_w, 4,
@@ -2240,8 +2258,9 @@ static void i965_update_wm(struct intel_crtc *unused_crtc)
if (cursor_sr > i965_cursor_wm_info.max_wm)
cursor_sr = i965_cursor_wm_info.max_wm;
- DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
- "cursor %d\n", srwm, cursor_sr);
+ drm_dbg_kms(&dev_priv->drm,
+ "self-refresh watermark: display plane %d "
+ "cursor %d\n", srwm, cursor_sr);
cxsr_enabled = true;
} else {
@@ -2250,8 +2269,9 @@ static void i965_update_wm(struct intel_crtc *unused_crtc)
intel_set_memory_cxsr(dev_priv, false);
}
- DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
- srwm);
+ drm_dbg_kms(&dev_priv->drm,
+ "Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
+ srwm);
/* 965 has limitations... */
I915_WRITE(DSPFW1, FW_WM(srwm, SR) |
@@ -2291,7 +2311,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
crtc = intel_get_crtc_for_plane(dev_priv, PLANE_A);
if (intel_crtc_active(crtc)) {
const struct drm_display_mode *adjusted_mode =
- &crtc->config->base.adjusted_mode;
+ &crtc->config->hw.adjusted_mode;
const struct drm_framebuffer *fb =
crtc->base.primary->state->fb;
int cpp;
@@ -2318,7 +2338,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
crtc = intel_get_crtc_for_plane(dev_priv, PLANE_B);
if (intel_crtc_active(crtc)) {
const struct drm_display_mode *adjusted_mode =
- &crtc->config->base.adjusted_mode;
+ &crtc->config->hw.adjusted_mode;
const struct drm_framebuffer *fb =
crtc->base.primary->state->fb;
int cpp;
@@ -2341,7 +2361,8 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
planeb_wm = wm_info->max_wm;
}
- DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
+ drm_dbg_kms(&dev_priv->drm,
+ "FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
if (IS_I915GM(dev_priv) && enabled) {
struct drm_i915_gem_object *obj;
@@ -2366,7 +2387,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
/* self-refresh has much higher latency */
static const int sr_latency_ns = 6000;
const struct drm_display_mode *adjusted_mode =
- &enabled->config->base.adjusted_mode;
+ &enabled->config->hw.adjusted_mode;
const struct drm_framebuffer *fb =
enabled->base.primary->state->fb;
int clock = adjusted_mode->crtc_clock;
@@ -2383,7 +2404,8 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
entries = intel_wm_method2(clock, htotal, hdisplay, cpp,
sr_latency_ns / 100);
entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
- DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
+ drm_dbg_kms(&dev_priv->drm,
+ "self-refresh entries: %d\n", entries);
srwm = wm_info->fifo_size - entries;
if (srwm < 0)
srwm = 1;
@@ -2395,8 +2417,9 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
}
- DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
- planea_wm, planeb_wm, cwm, srwm);
+ drm_dbg_kms(&dev_priv->drm,
+ "Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
+ planea_wm, planeb_wm, cwm, srwm);
fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
fwater_hi = (cwm & 0x1f);
@@ -2424,7 +2447,7 @@ static void i845_update_wm(struct intel_crtc *unused_crtc)
if (crtc == NULL)
return;
- adjusted_mode = &crtc->config->base.adjusted_mode;
+ adjusted_mode = &crtc->config->hw.adjusted_mode;
planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
&i845_wm_info,
dev_priv->display.get_fifo_size(dev_priv, PLANE_A),
@@ -2432,7 +2455,8 @@ static void i845_update_wm(struct intel_crtc *unused_crtc)
fwater_lo = I915_READ(FW_BLC) & ~0xfff;
fwater_lo |= (3<<8) | planea_wm;
- DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
+ drm_dbg_kms(&dev_priv->drm,
+ "Setting FIFO watermarks - A: %d\n", planea_wm);
I915_WRITE(FW_BLC, fwater_lo);
}
@@ -2493,8 +2517,8 @@ struct ilk_wm_maximums {
* For both WM_PIPE and WM_LP.
* mem_value must be in 0.1us units.
*/
-static u32 ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
- const struct intel_plane_state *pstate,
+static u32 ilk_compute_pri_wm(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
u32 mem_value, bool is_lp)
{
u32 method1, method2;
@@ -2503,19 +2527,19 @@ static u32 ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
if (mem_value == 0)
return U32_MAX;
- if (!intel_wm_plane_visible(cstate, pstate))
+ if (!intel_wm_plane_visible(crtc_state, plane_state))
return 0;
- cpp = pstate->base.fb->format->cpp[0];
+ cpp = plane_state->hw.fb->format->cpp[0];
- method1 = ilk_wm_method1(cstate->pixel_rate, cpp, mem_value);
+ method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value);
if (!is_lp)
return method1;
- method2 = ilk_wm_method2(cstate->pixel_rate,
- cstate->base.adjusted_mode.crtc_htotal,
- drm_rect_width(&pstate->base.dst),
+ method2 = ilk_wm_method2(crtc_state->pixel_rate,
+ crtc_state->hw.adjusted_mode.crtc_htotal,
+ drm_rect_width(&plane_state->uapi.dst),
cpp, mem_value);
return min(method1, method2);
@@ -2525,8 +2549,8 @@ static u32 ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
* For both WM_PIPE and WM_LP.
* mem_value must be in 0.1us units.
*/
-static u32 ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
- const struct intel_plane_state *pstate,
+static u32 ilk_compute_spr_wm(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
u32 mem_value)
{
u32 method1, method2;
@@ -2535,15 +2559,15 @@ static u32 ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
if (mem_value == 0)
return U32_MAX;
- if (!intel_wm_plane_visible(cstate, pstate))
+ if (!intel_wm_plane_visible(crtc_state, plane_state))
return 0;
- cpp = pstate->base.fb->format->cpp[0];
+ cpp = plane_state->hw.fb->format->cpp[0];
- method1 = ilk_wm_method1(cstate->pixel_rate, cpp, mem_value);
- method2 = ilk_wm_method2(cstate->pixel_rate,
- cstate->base.adjusted_mode.crtc_htotal,
- drm_rect_width(&pstate->base.dst),
+ method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value);
+ method2 = ilk_wm_method2(crtc_state->pixel_rate,
+ crtc_state->hw.adjusted_mode.crtc_htotal,
+ drm_rect_width(&plane_state->uapi.dst),
cpp, mem_value);
return min(method1, method2);
}
@@ -2552,8 +2576,8 @@ static u32 ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
* For both WM_PIPE and WM_LP.
* mem_value must be in 0.1us units.
*/
-static u32 ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
- const struct intel_plane_state *pstate,
+static u32 ilk_compute_cur_wm(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
u32 mem_value)
{
int cpp;
@@ -2561,29 +2585,31 @@ static u32 ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
if (mem_value == 0)
return U32_MAX;
- if (!intel_wm_plane_visible(cstate, pstate))
+ if (!intel_wm_plane_visible(crtc_state, plane_state))
return 0;
- cpp = pstate->base.fb->format->cpp[0];
+ cpp = plane_state->hw.fb->format->cpp[0];
- return ilk_wm_method2(cstate->pixel_rate,
- cstate->base.adjusted_mode.crtc_htotal,
- pstate->base.crtc_w, cpp, mem_value);
+ return ilk_wm_method2(crtc_state->pixel_rate,
+ crtc_state->hw.adjusted_mode.crtc_htotal,
+ drm_rect_width(&plane_state->uapi.dst),
+ cpp, mem_value);
}
/* Only for WM_LP. */
-static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
- const struct intel_plane_state *pstate,
+static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
u32 pri_val)
{
int cpp;
- if (!intel_wm_plane_visible(cstate, pstate))
+ if (!intel_wm_plane_visible(crtc_state, plane_state))
return 0;
- cpp = pstate->base.fb->format->cpp[0];
+ cpp = plane_state->hw.fb->format->cpp[0];
- return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->base.dst), cpp);
+ return ilk_wm_fbc(pri_val, drm_rect_width(&plane_state->uapi.dst),
+ cpp);
}
static unsigned int
@@ -2647,7 +2673,7 @@ static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv,
/* HSW allows LP1+ watermarks even with multiple pipes */
if (level == 0 || config->num_pipes_active > 1) {
- fifo_size /= INTEL_INFO(dev_priv)->num_pipes;
+ fifo_size /= INTEL_NUM_PIPES(dev_priv);
/*
* For some reason the non self refresh
@@ -2752,7 +2778,7 @@ static bool ilk_validate_wm_level(int level,
static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
const struct intel_crtc *intel_crtc,
int level,
- struct intel_crtc_state *cstate,
+ struct intel_crtc_state *crtc_state,
const struct intel_plane_state *pristate,
const struct intel_plane_state *sprstate,
const struct intel_plane_state *curstate,
@@ -2770,30 +2796,30 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
}
if (pristate) {
- result->pri_val = ilk_compute_pri_wm(cstate, pristate,
+ result->pri_val = ilk_compute_pri_wm(crtc_state, pristate,
pri_latency, level);
- result->fbc_val = ilk_compute_fbc_wm(cstate, pristate, result->pri_val);
+ result->fbc_val = ilk_compute_fbc_wm(crtc_state, pristate, result->pri_val);
}
if (sprstate)
- result->spr_val = ilk_compute_spr_wm(cstate, sprstate, spr_latency);
+ result->spr_val = ilk_compute_spr_wm(crtc_state, sprstate, spr_latency);
if (curstate)
- result->cur_val = ilk_compute_cur_wm(cstate, curstate, cur_latency);
+ result->cur_val = ilk_compute_cur_wm(crtc_state, curstate, cur_latency);
result->enable = true;
}
static u32
-hsw_compute_linetime_wm(const struct intel_crtc_state *cstate)
+hsw_compute_linetime_wm(const struct intel_crtc_state *crtc_state)
{
const struct intel_atomic_state *intel_state =
- to_intel_atomic_state(cstate->base.state);
+ to_intel_atomic_state(crtc_state->uapi.state);
const struct drm_display_mode *adjusted_mode =
- &cstate->base.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
u32 linetime, ips_linetime;
- if (!cstate->base.active)
+ if (!crtc_state->hw.active)
return 0;
if (WARN_ON(adjusted_mode->crtc_clock == 0))
return 0;
@@ -2829,7 +2855,8 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
&val, NULL);
if (ret) {
- DRM_ERROR("SKL Mailbox read error = %d\n", ret);
+ drm_err(&dev_priv->drm,
+ "SKL Mailbox read error = %d\n", ret);
return;
}
@@ -2847,7 +2874,8 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
GEN9_PCODE_READ_MEM_LATENCY,
&val, NULL);
if (ret) {
- DRM_ERROR("SKL Mailbox read error = %d\n", ret);
+ drm_err(&dev_priv->drm,
+ "SKL Mailbox read error = %d\n", ret);
return;
}
@@ -2965,8 +2993,9 @@ static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
unsigned int latency = wm[level];
if (latency == 0) {
- DRM_DEBUG_KMS("%s WM%d latency not provided\n",
- name, level);
+ drm_dbg_kms(&dev_priv->drm,
+ "%s WM%d latency not provided\n",
+ name, level);
continue;
}
@@ -2979,9 +3008,9 @@ static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
else if (level > 0)
latency *= 5;
- DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
- name, level, wm[level],
- latency / 10, latency % 10);
+ drm_dbg_kms(&dev_priv->drm,
+ "%s WM%d latency %u (%u.%u usec)\n", name, level,
+ wm[level], latency / 10, latency % 10);
}
}
@@ -3015,7 +3044,8 @@ static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
if (!changed)
return;
- DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "WM latency values increased to avoid potential underruns\n");
intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
@@ -3043,7 +3073,8 @@ static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv)
dev_priv->wm.spr_latency[3] = 0;
dev_priv->wm.cur_latency[3] = 0;
- DRM_DEBUG_KMS("LP3 watermarks disabled due to potential for lost interrupts\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "LP3 watermarks disabled due to potential for lost interrupts\n");
intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
@@ -3093,7 +3124,7 @@ static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv,
/* At least LP0 must be valid */
if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) {
- DRM_DEBUG_KMS("LP0 watermark invalid\n");
+ drm_dbg_kms(&dev_priv->drm, "LP0 watermark invalid\n");
return false;
}
@@ -3101,40 +3132,36 @@ static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv,
}
/* Compute new watermarks for the pipe */
-static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
+static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state)
{
- struct drm_atomic_state *state = cstate->base.state;
- struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_pipe_wm *pipe_wm;
- struct drm_device *dev = state->dev;
- const struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_plane *plane;
- const struct drm_plane_state *plane_state;
+ struct intel_plane *plane;
+ const struct intel_plane_state *plane_state;
const struct intel_plane_state *pristate = NULL;
const struct intel_plane_state *sprstate = NULL;
const struct intel_plane_state *curstate = NULL;
int level, max_level = ilk_wm_max_level(dev_priv), usable_level;
struct ilk_wm_maximums max;
- pipe_wm = &cstate->wm.ilk.optimal;
-
- drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, &cstate->base) {
- const struct intel_plane_state *ps = to_intel_plane_state(plane_state);
+ pipe_wm = &crtc_state->wm.ilk.optimal;
- if (plane->type == DRM_PLANE_TYPE_PRIMARY)
- pristate = ps;
- else if (plane->type == DRM_PLANE_TYPE_OVERLAY)
- sprstate = ps;
- else if (plane->type == DRM_PLANE_TYPE_CURSOR)
- curstate = ps;
+ intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) {
+ if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
+ pristate = plane_state;
+ else if (plane->base.type == DRM_PLANE_TYPE_OVERLAY)
+ sprstate = plane_state;
+ else if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
+ curstate = plane_state;
}
- pipe_wm->pipe_enabled = cstate->base.active;
+ pipe_wm->pipe_enabled = crtc_state->hw.active;
if (sprstate) {
- pipe_wm->sprites_enabled = sprstate->base.visible;
- pipe_wm->sprites_scaled = sprstate->base.visible &&
- (drm_rect_width(&sprstate->base.dst) != drm_rect_width(&sprstate->base.src) >> 16 ||
- drm_rect_height(&sprstate->base.dst) != drm_rect_height(&sprstate->base.src) >> 16);
+ pipe_wm->sprites_enabled = sprstate->uapi.visible;
+ pipe_wm->sprites_scaled = sprstate->uapi.visible &&
+ (drm_rect_width(&sprstate->uapi.dst) != drm_rect_width(&sprstate->uapi.src) >> 16 ||
+ drm_rect_height(&sprstate->uapi.dst) != drm_rect_height(&sprstate->uapi.src) >> 16);
}
usable_level = max_level;
@@ -3148,11 +3175,11 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
usable_level = 0;
memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
- ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate,
+ ilk_compute_wm_level(dev_priv, intel_crtc, 0, crtc_state,
pristate, sprstate, curstate, &pipe_wm->wm[0]);
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- pipe_wm->linetime = hsw_compute_linetime_wm(cstate);
+ pipe_wm->linetime = hsw_compute_linetime_wm(crtc_state);
if (!ilk_validate_pipe_wm(dev_priv, pipe_wm))
return -EINVAL;
@@ -3162,7 +3189,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
for (level = 1; level <= usable_level; level++) {
struct intel_wm_level *wm = &pipe_wm->wm[level];
- ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate,
+ ilk_compute_wm_level(dev_priv, intel_crtc, level, crtc_state,
pristate, sprstate, curstate, wm);
/*
@@ -3186,11 +3213,11 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
*/
static int ilk_compute_intermediate_wm(struct intel_crtc_state *newstate)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(newstate->base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(newstate->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate;
struct intel_atomic_state *intel_state =
- to_intel_atomic_state(newstate->base.state);
+ to_intel_atomic_state(newstate->uapi.state);
const struct intel_crtc_state *oldstate =
intel_atomic_get_old_crtc_state(intel_state, intel_crtc);
const struct intel_pipe_wm *b = &oldstate->wm.ilk.optimal;
@@ -3202,7 +3229,7 @@ static int ilk_compute_intermediate_wm(struct intel_crtc_state *newstate)
* and after the vblank.
*/
*a = newstate->wm.ilk.optimal;
- if (!newstate->base.active || drm_atomic_crtc_needs_modeset(&newstate->base) ||
+ if (!newstate->hw.active || drm_atomic_crtc_needs_modeset(&newstate->uapi) ||
intel_state->skip_intermediate_wm)
return 0;
@@ -3612,10 +3639,8 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
dev_priv->wm.hw = *results;
}
-bool ilk_disable_lp_wm(struct drm_device *dev)
+bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
}
@@ -3653,10 +3678,47 @@ static bool skl_needs_memory_bw_wa(struct drm_i915_private *dev_priv)
static bool
intel_has_sagv(struct drm_i915_private *dev_priv)
{
+ /* HACK! */
+ if (IS_GEN(dev_priv, 12))
+ return false;
+
return (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) &&
dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED;
}
+static void
+skl_setup_sagv_block_time(struct drm_i915_private *dev_priv)
+{
+ if (INTEL_GEN(dev_priv) >= 12) {
+ u32 val = 0;
+ int ret;
+
+ ret = sandybridge_pcode_read(dev_priv,
+ GEN12_PCODE_READ_SAGV_BLOCK_TIME_US,
+ &val, NULL);
+ if (!ret) {
+ dev_priv->sagv_block_time_us = val;
+ return;
+ }
+
+ drm_dbg(&dev_priv->drm, "Couldn't read SAGV block time!\n");
+ } else if (IS_GEN(dev_priv, 11)) {
+ dev_priv->sagv_block_time_us = 10;
+ return;
+ } else if (IS_GEN(dev_priv, 10)) {
+ dev_priv->sagv_block_time_us = 20;
+ return;
+ } else if (IS_GEN(dev_priv, 9)) {
+ dev_priv->sagv_block_time_us = 30;
+ return;
+ } else {
+ MISSING_CASE(INTEL_GEN(dev_priv));
+ }
+
+ /* Default to an unusable block time */
+ dev_priv->sagv_block_time_us = -1;
+}
+
/*
* SAGV dynamically adjusts the system agent voltage and clock frequencies
* depending on power and performance requirements. The display engine access
@@ -3679,7 +3741,7 @@ intel_enable_sagv(struct drm_i915_private *dev_priv)
if (dev_priv->sagv_status == I915_SAGV_ENABLED)
return 0;
- DRM_DEBUG_KMS("Enabling SAGV\n");
+ drm_dbg_kms(&dev_priv->drm, "Enabling SAGV\n");
ret = sandybridge_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL,
GEN9_SAGV_ENABLE);
@@ -3690,11 +3752,11 @@ intel_enable_sagv(struct drm_i915_private *dev_priv)
* don't actually have SAGV.
*/
if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
- DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
+ drm_dbg(&dev_priv->drm, "No SAGV found on system, ignoring\n");
dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
return 0;
} else if (ret < 0) {
- DRM_ERROR("Failed to enable SAGV\n");
+ drm_err(&dev_priv->drm, "Failed to enable SAGV\n");
return ret;
}
@@ -3713,7 +3775,7 @@ intel_disable_sagv(struct drm_i915_private *dev_priv)
if (dev_priv->sagv_status == I915_SAGV_DISABLED)
return 0;
- DRM_DEBUG_KMS("Disabling SAGV\n");
+ drm_dbg_kms(&dev_priv->drm, "Disabling SAGV\n");
/* bspec says to keep retrying for at least 1 ms */
ret = skl_pcode_request(dev_priv, GEN9_PCODE_SAGV_CONTROL,
GEN9_SAGV_DISABLE,
@@ -3724,11 +3786,11 @@ intel_disable_sagv(struct drm_i915_private *dev_priv)
* don't actually have SAGV.
*/
if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
- DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
+ drm_dbg(&dev_priv->drm, "No SAGV found on system, ignoring\n");
dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
return 0;
} else if (ret < 0) {
- DRM_ERROR("Failed to disable SAGV (%d)\n", ret);
+ drm_err(&dev_priv->drm, "Failed to disable SAGV (%d)\n", ret);
return ret;
}
@@ -3736,52 +3798,43 @@ intel_disable_sagv(struct drm_i915_private *dev_priv)
return 0;
}
-bool intel_can_enable_sagv(struct drm_atomic_state *state)
+bool intel_can_enable_sagv(struct intel_atomic_state *state)
{
- struct drm_device *dev = state->dev;
+ struct drm_device *dev = state->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
struct intel_crtc *crtc;
struct intel_plane *plane;
- struct intel_crtc_state *cstate;
+ struct intel_crtc_state *crtc_state;
enum pipe pipe;
int level, latency;
- int sagv_block_time_us;
if (!intel_has_sagv(dev_priv))
return false;
- if (IS_GEN(dev_priv, 9))
- sagv_block_time_us = 30;
- else if (IS_GEN(dev_priv, 10))
- sagv_block_time_us = 20;
- else
- sagv_block_time_us = 10;
-
/*
* If there are no active CRTCs, no additional checks need be performed
*/
- if (hweight32(intel_state->active_crtcs) == 0)
+ if (hweight8(state->active_pipes) == 0)
return true;
/*
* SKL+ workaround: bspec recommends we disable SAGV when we have
* more then one pipe enabled
*/
- if (hweight32(intel_state->active_crtcs) > 1)
+ if (hweight8(state->active_pipes) > 1)
return false;
/* Since we're now guaranteed to only have one active CRTC... */
- pipe = ffs(intel_state->active_crtcs) - 1;
+ pipe = ffs(state->active_pipes) - 1;
crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
- cstate = to_intel_crtc_state(crtc->base.state);
+ crtc_state = to_intel_crtc_state(crtc->base.state);
- if (crtc->base.state->adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
+ if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
return false;
for_each_intel_plane_on_crtc(dev, crtc, plane) {
struct skl_plane_wm *wm =
- &cstate->wm.skl.optimal.planes[plane->id];
+ &crtc_state->wm.skl.optimal.planes[plane->id];
/* Skip this plane if it's not enabled */
if (!wm->wm[0].plane_en)
@@ -3804,7 +3857,7 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
* incur memory latencies higher than sagv_block_time_us we
* can't enable SAGV.
*/
- if (latency < sagv_block_time_us)
+ if (latency < dev_priv->sagv_block_time_us)
return false;
}
@@ -3812,7 +3865,7 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
}
static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
- const struct intel_crtc_state *cstate,
+ const struct intel_crtc_state *crtc_state,
const u64 total_data_rate,
const int num_active,
struct skl_ddb_allocation *ddb)
@@ -3826,7 +3879,7 @@ static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
if (INTEL_GEN(dev_priv) < 11)
return ddb_size - 4; /* 4 blocks for bypass path allocation */
- adjusted_mode = &cstate->base.adjusted_mode;
+ adjusted_mode = &crtc_state->hw.adjusted_mode;
total_data_bw = total_data_rate * drm_mode_vrefresh(adjusted_mode);
/*
@@ -3849,35 +3902,34 @@ static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
static void
skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
- const struct intel_crtc_state *cstate,
+ const struct intel_crtc_state *crtc_state,
const u64 total_data_rate,
struct skl_ddb_allocation *ddb,
struct skl_ddb_entry *alloc, /* out */
int *num_active /* out */)
{
- struct drm_atomic_state *state = cstate->base.state;
+ struct drm_atomic_state *state = crtc_state->uapi.state;
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
- struct drm_crtc *for_crtc = cstate->base.crtc;
- const struct drm_crtc_state *crtc_state;
- const struct drm_crtc *crtc;
+ struct drm_crtc *for_crtc = crtc_state->uapi.crtc;
+ const struct intel_crtc *crtc;
u32 pipe_width = 0, total_width = 0, width_before_pipe = 0;
enum pipe for_pipe = to_intel_crtc(for_crtc)->pipe;
u16 ddb_size;
u32 i;
- if (WARN_ON(!state) || !cstate->base.active) {
+ if (WARN_ON(!state) || !crtc_state->hw.active) {
alloc->start = 0;
alloc->end = 0;
- *num_active = hweight32(dev_priv->active_crtcs);
+ *num_active = hweight8(dev_priv->active_pipes);
return;
}
if (intel_state->active_pipe_changes)
- *num_active = hweight32(intel_state->active_crtcs);
+ *num_active = hweight8(intel_state->active_pipes);
else
- *num_active = hweight32(dev_priv->active_crtcs);
+ *num_active = hweight8(dev_priv->active_pipes);
- ddb_size = intel_get_ddb_size(dev_priv, cstate, total_data_rate,
+ ddb_size = intel_get_ddb_size(dev_priv, crtc_state, total_data_rate,
*num_active, ddb);
/*
@@ -3902,16 +3954,15 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
* framebuffer, So instead of allocating DDB equally among pipes
* distribute DDB based on resolution/width of the display.
*/
- for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
- const struct drm_display_mode *adjusted_mode;
+ for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+ enum pipe pipe = crtc->pipe;
int hdisplay, vdisplay;
- enum pipe pipe;
- if (!crtc_state->enable)
+ if (!crtc_state->hw.enable)
continue;
- pipe = to_intel_crtc(crtc)->pipe;
- adjusted_mode = &crtc_state->adjusted_mode;
drm_mode_get_hv_timing(adjusted_mode, &hdisplay, &vdisplay);
total_width += hdisplay;
@@ -3930,7 +3981,7 @@ static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
u64 modifier, unsigned int rotation,
u32 plane_pixel_rate, struct skl_wm_params *wp,
int color_plane);
-static void skl_compute_plane_wm(const struct intel_crtc_state *cstate,
+static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
int level,
const struct skl_wm_params *wp,
const struct skl_wm_level *result_prev,
@@ -3940,7 +3991,7 @@ static unsigned int
skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
int num_active)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
int level, max_level = ilk_wm_max_level(dev_priv);
struct skl_wm_level wm = {};
int ret, min_ddb_alloc = 0;
@@ -4007,7 +4058,8 @@ skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id));
- if (is_planar_yuv_format(fourcc))
+ if (fourcc &&
+ drm_format_info_is_yuv_semiplanar(drm_format_info(fourcc)))
swap(val, val2);
skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
@@ -4062,38 +4114,27 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
* Caller should take care of dividing & rounding off the value.
*/
static uint_fixed_16_16_t
-skl_plane_downscale_amount(const struct intel_crtc_state *cstate,
- const struct intel_plane_state *pstate)
+skl_plane_downscale_amount(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(pstate->base.plane);
u32 src_w, src_h, dst_w, dst_h;
uint_fixed_16_16_t fp_w_ratio, fp_h_ratio;
uint_fixed_16_16_t downscale_h, downscale_w;
- if (WARN_ON(!intel_wm_plane_visible(cstate, pstate)))
+ if (WARN_ON(!intel_wm_plane_visible(crtc_state, plane_state)))
return u32_to_fixed16(0);
- /* n.b., src is 16.16 fixed point, dst is whole integer */
- if (plane->id == PLANE_CURSOR) {
- /*
- * Cursors only support 0/180 degree rotation,
- * hence no need to account for rotation here.
- */
- src_w = pstate->base.src_w >> 16;
- src_h = pstate->base.src_h >> 16;
- dst_w = pstate->base.crtc_w;
- dst_h = pstate->base.crtc_h;
- } else {
- /*
- * Src coordinates are already rotated by 270 degrees for
- * the 90/270 degree plane rotation cases (to match the
- * GTT mapping), hence no need to account for rotation here.
- */
- src_w = drm_rect_width(&pstate->base.src) >> 16;
- src_h = drm_rect_height(&pstate->base.src) >> 16;
- dst_w = drm_rect_width(&pstate->base.dst);
- dst_h = drm_rect_height(&pstate->base.dst);
- }
+ /*
+ * Src coordinates are already rotated by 270 degrees for
+ * the 90/270 degree plane rotation cases (to match the
+ * GTT mapping), hence no need to account for rotation here.
+ *
+ * n.b., src is 16.16 fixed point, dst is whole integer.
+ */
+ src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
+ dst_w = drm_rect_width(&plane_state->uapi.dst);
+ dst_h = drm_rect_height(&plane_state->uapi.dst);
fp_w_ratio = div_fixed16(src_w, dst_w);
fp_h_ratio = div_fixed16(src_h, dst_h);
@@ -4103,121 +4144,26 @@ skl_plane_downscale_amount(const struct intel_crtc_state *cstate,
return mul_fixed16(downscale_w, downscale_h);
}
-static uint_fixed_16_16_t
-skl_pipe_downscale_amount(const struct intel_crtc_state *crtc_state)
-{
- uint_fixed_16_16_t pipe_downscale = u32_to_fixed16(1);
-
- if (!crtc_state->base.enable)
- return pipe_downscale;
-
- if (crtc_state->pch_pfit.enabled) {
- u32 src_w, src_h, dst_w, dst_h;
- u32 pfit_size = crtc_state->pch_pfit.size;
- uint_fixed_16_16_t fp_w_ratio, fp_h_ratio;
- uint_fixed_16_16_t downscale_h, downscale_w;
-
- src_w = crtc_state->pipe_src_w;
- src_h = crtc_state->pipe_src_h;
- dst_w = pfit_size >> 16;
- dst_h = pfit_size & 0xffff;
-
- if (!dst_w || !dst_h)
- return pipe_downscale;
-
- fp_w_ratio = div_fixed16(src_w, dst_w);
- fp_h_ratio = div_fixed16(src_h, dst_h);
- downscale_w = max_fixed16(fp_w_ratio, u32_to_fixed16(1));
- downscale_h = max_fixed16(fp_h_ratio, u32_to_fixed16(1));
-
- pipe_downscale = mul_fixed16(downscale_w, downscale_h);
- }
-
- return pipe_downscale;
-}
-
-int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
- struct intel_crtc_state *cstate)
-{
- struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
- struct drm_crtc_state *crtc_state = &cstate->base;
- struct drm_atomic_state *state = crtc_state->state;
- struct drm_plane *plane;
- const struct drm_plane_state *pstate;
- struct intel_plane_state *intel_pstate;
- int crtc_clock, dotclk;
- u32 pipe_max_pixel_rate;
- uint_fixed_16_16_t pipe_downscale;
- uint_fixed_16_16_t max_downscale = u32_to_fixed16(1);
-
- if (!cstate->base.enable)
- return 0;
-
- drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
- uint_fixed_16_16_t plane_downscale;
- uint_fixed_16_16_t fp_9_div_8 = div_fixed16(9, 8);
- int bpp;
-
- if (!intel_wm_plane_visible(cstate,
- to_intel_plane_state(pstate)))
- continue;
-
- if (WARN_ON(!pstate->fb))
- return -EINVAL;
-
- intel_pstate = to_intel_plane_state(pstate);
- plane_downscale = skl_plane_downscale_amount(cstate,
- intel_pstate);
- bpp = pstate->fb->format->cpp[0] * 8;
- if (bpp == 64)
- plane_downscale = mul_fixed16(plane_downscale,
- fp_9_div_8);
-
- max_downscale = max_fixed16(plane_downscale, max_downscale);
- }
- pipe_downscale = skl_pipe_downscale_amount(cstate);
-
- pipe_downscale = mul_fixed16(pipe_downscale, max_downscale);
-
- crtc_clock = crtc_state->adjusted_mode.crtc_clock;
- dotclk = to_intel_atomic_state(state)->cdclk.logical.cdclk;
-
- if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10)
- dotclk *= 2;
-
- pipe_max_pixel_rate = div_round_up_u32_fixed16(dotclk, pipe_downscale);
-
- if (pipe_max_pixel_rate < crtc_clock) {
- DRM_DEBUG_KMS("Max supported pixel clock with scaling exceeded\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
static u64
-skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
- const struct intel_plane_state *intel_pstate,
- const int plane)
+skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ int color_plane)
{
- struct intel_plane *intel_plane =
- to_intel_plane(intel_pstate->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
u32 data_rate;
u32 width = 0, height = 0;
- struct drm_framebuffer *fb;
- u32 format;
uint_fixed_16_16_t down_scale_amount;
u64 rate;
- if (!intel_pstate->base.visible)
+ if (!plane_state->uapi.visible)
return 0;
- fb = intel_pstate->base.fb;
- format = fb->format->format;
-
- if (intel_plane->id == PLANE_CURSOR)
+ if (plane->id == PLANE_CURSOR)
return 0;
- if (plane == 1 && !is_planar_yuv_format(format))
+
+ if (color_plane == 1 &&
+ !intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
return 0;
/*
@@ -4225,55 +4171,50 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
* the 90/270 degree plane rotation cases (to match the
* GTT mapping), hence no need to account for rotation here.
*/
- width = drm_rect_width(&intel_pstate->base.src) >> 16;
- height = drm_rect_height(&intel_pstate->base.src) >> 16;
+ width = drm_rect_width(&plane_state->uapi.src) >> 16;
+ height = drm_rect_height(&plane_state->uapi.src) >> 16;
/* UV plane does 1/2 pixel sub-sampling */
- if (plane == 1 && is_planar_yuv_format(format)) {
+ if (color_plane == 1) {
width /= 2;
height /= 2;
}
data_rate = width * height;
- down_scale_amount = skl_plane_downscale_amount(cstate, intel_pstate);
+ down_scale_amount = skl_plane_downscale_amount(crtc_state, plane_state);
rate = mul_round_up_u32_fixed16(data_rate, down_scale_amount);
- rate *= fb->format->cpp[plane];
+ rate *= fb->format->cpp[color_plane];
return rate;
}
static u64
-skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
+skl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
u64 *plane_data_rate,
u64 *uv_plane_data_rate)
{
- struct drm_crtc_state *cstate = &intel_cstate->base;
- struct drm_atomic_state *state = cstate->state;
- struct drm_plane *plane;
- const struct drm_plane_state *pstate;
+ struct drm_atomic_state *state = crtc_state->uapi.state;
+ struct intel_plane *plane;
+ const struct intel_plane_state *plane_state;
u64 total_data_rate = 0;
if (WARN_ON(!state))
return 0;
/* Calculate and cache data rate for each plane */
- drm_atomic_crtc_state_for_each_plane_state(plane, pstate, cstate) {
- enum plane_id plane_id = to_intel_plane(plane)->id;
+ intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) {
+ enum plane_id plane_id = plane->id;
u64 rate;
- const struct intel_plane_state *intel_pstate =
- to_intel_plane_state(pstate);
/* packed/y */
- rate = skl_plane_relative_data_rate(intel_cstate,
- intel_pstate, 0);
+ rate = skl_plane_relative_data_rate(crtc_state, plane_state, 0);
plane_data_rate[plane_id] = rate;
total_data_rate += rate;
/* uv-plane */
- rate = skl_plane_relative_data_rate(intel_cstate,
- intel_pstate, 1);
+ rate = skl_plane_relative_data_rate(crtc_state, plane_state, 1);
uv_plane_data_rate[plane_id] = rate;
total_data_rate += rate;
}
@@ -4282,28 +4223,23 @@ skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
}
static u64
-icl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
+icl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
u64 *plane_data_rate)
{
- struct drm_crtc_state *cstate = &intel_cstate->base;
- struct drm_atomic_state *state = cstate->state;
- struct drm_plane *plane;
- const struct drm_plane_state *pstate;
+ struct intel_plane *plane;
+ const struct intel_plane_state *plane_state;
u64 total_data_rate = 0;
- if (WARN_ON(!state))
+ if (WARN_ON(!crtc_state->uapi.state))
return 0;
/* Calculate and cache data rate for each plane */
- drm_atomic_crtc_state_for_each_plane_state(plane, pstate, cstate) {
- const struct intel_plane_state *intel_pstate =
- to_intel_plane_state(pstate);
- enum plane_id plane_id = to_intel_plane(plane)->id;
+ intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) {
+ enum plane_id plane_id = plane->id;
u64 rate;
- if (!intel_pstate->linked_plane) {
- rate = skl_plane_relative_data_rate(intel_cstate,
- intel_pstate, 0);
+ if (!plane_state->planar_linked_plane) {
+ rate = skl_plane_relative_data_rate(crtc_state, plane_state, 0);
plane_data_rate[plane_id] = rate;
total_data_rate += rate;
} else {
@@ -4311,23 +4247,21 @@ icl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
/*
* The slave plane might not iterate in
- * drm_atomic_crtc_state_for_each_plane_state(),
+ * intel_atomic_crtc_state_for_each_plane_state(),
* and needs the master plane state which may be
* NULL if we try get_new_plane_state(), so we
* always calculate from the master.
*/
- if (intel_pstate->slave)
+ if (plane_state->planar_slave)
continue;
/* Y plane rate is calculated on the slave */
- rate = skl_plane_relative_data_rate(intel_cstate,
- intel_pstate, 0);
- y_plane_id = intel_pstate->linked_plane->id;
+ rate = skl_plane_relative_data_rate(crtc_state, plane_state, 0);
+ y_plane_id = plane_state->planar_linked_plane->id;
plane_data_rate[y_plane_id] = rate;
total_data_rate += rate;
- rate = skl_plane_relative_data_rate(intel_cstate,
- intel_pstate, 1);
+ rate = skl_plane_relative_data_rate(crtc_state, plane_state, 1);
plane_data_rate[plane_id] = rate;
total_data_rate += rate;
}
@@ -4337,14 +4271,14 @@ icl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
}
static int
-skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
+skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
struct skl_ddb_allocation *ddb /* out */)
{
- struct drm_atomic_state *state = cstate->base.state;
- struct drm_crtc *crtc = cstate->base.crtc;
+ struct drm_atomic_state *state = crtc_state->uapi.state;
+ struct drm_crtc *crtc = crtc_state->uapi.crtc;
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb;
+ struct skl_ddb_entry *alloc = &crtc_state->wm.skl.ddb;
u16 alloc_size, start = 0;
u16 total[I915_MAX_PLANES] = {};
u16 uv_total[I915_MAX_PLANES] = {};
@@ -4357,40 +4291,40 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
int level;
/* Clear the partitioning for disabled planes. */
- memset(cstate->wm.skl.plane_ddb_y, 0, sizeof(cstate->wm.skl.plane_ddb_y));
- memset(cstate->wm.skl.plane_ddb_uv, 0, sizeof(cstate->wm.skl.plane_ddb_uv));
+ memset(crtc_state->wm.skl.plane_ddb_y, 0, sizeof(crtc_state->wm.skl.plane_ddb_y));
+ memset(crtc_state->wm.skl.plane_ddb_uv, 0, sizeof(crtc_state->wm.skl.plane_ddb_uv));
if (WARN_ON(!state))
return 0;
- if (!cstate->base.active) {
+ if (!crtc_state->hw.active) {
alloc->start = alloc->end = 0;
return 0;
}
if (INTEL_GEN(dev_priv) >= 11)
total_data_rate =
- icl_get_total_relative_data_rate(cstate,
+ icl_get_total_relative_data_rate(crtc_state,
plane_data_rate);
else
total_data_rate =
- skl_get_total_relative_data_rate(cstate,
+ skl_get_total_relative_data_rate(crtc_state,
plane_data_rate,
uv_plane_data_rate);
- skl_ddb_get_pipe_allocation_limits(dev_priv, cstate, total_data_rate,
+ skl_ddb_get_pipe_allocation_limits(dev_priv, crtc_state, total_data_rate,
ddb, alloc, &num_active);
alloc_size = skl_ddb_entry_size(alloc);
if (alloc_size == 0)
return 0;
/* Allocate fixed number of blocks for cursor. */
- total[PLANE_CURSOR] = skl_cursor_allocation(cstate, num_active);
+ total[PLANE_CURSOR] = skl_cursor_allocation(crtc_state, num_active);
alloc_size -= total[PLANE_CURSOR];
- cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].start =
+ crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR].start =
alloc->end - total[PLANE_CURSOR];
- cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].end = alloc->end;
+ crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR].end = alloc->end;
if (total_data_rate == 0)
return 0;
@@ -4403,11 +4337,11 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
blocks = 0;
for_each_plane_id_on_crtc(intel_crtc, plane_id) {
const struct skl_plane_wm *wm =
- &cstate->wm.skl.optimal.planes[plane_id];
+ &crtc_state->wm.skl.optimal.planes[plane_id];
if (plane_id == PLANE_CURSOR) {
- if (WARN_ON(wm->wm[level].min_ddb_alloc >
- total[PLANE_CURSOR])) {
+ if (wm->wm[level].min_ddb_alloc > total[PLANE_CURSOR]) {
+ WARN_ON(wm->wm[level].min_ddb_alloc != U16_MAX);
blocks = U32_MAX;
break;
}
@@ -4425,9 +4359,10 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
}
if (level < 0) {
- DRM_DEBUG_KMS("Requested display configuration exceeds system DDB limitations");
- DRM_DEBUG_KMS("minimum required %d/%d\n", blocks,
- alloc_size);
+ drm_dbg_kms(&dev_priv->drm,
+ "Requested display configuration exceeds system DDB limitations");
+ drm_dbg_kms(&dev_priv->drm, "minimum required %d/%d\n",
+ blocks, alloc_size);
return -EINVAL;
}
@@ -4438,7 +4373,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
*/
for_each_plane_id_on_crtc(intel_crtc, plane_id) {
const struct skl_plane_wm *wm =
- &cstate->wm.skl.optimal.planes[plane_id];
+ &crtc_state->wm.skl.optimal.planes[plane_id];
u64 rate;
u16 extra;
@@ -4477,9 +4412,9 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
start = alloc->start;
for_each_plane_id_on_crtc(intel_crtc, plane_id) {
struct skl_ddb_entry *plane_alloc =
- &cstate->wm.skl.plane_ddb_y[plane_id];
+ &crtc_state->wm.skl.plane_ddb_y[plane_id];
struct skl_ddb_entry *uv_plane_alloc =
- &cstate->wm.skl.plane_ddb_uv[plane_id];
+ &crtc_state->wm.skl.plane_ddb_uv[plane_id];
if (plane_id == PLANE_CURSOR)
continue;
@@ -4510,7 +4445,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
for (level++; level <= ilk_wm_max_level(dev_priv); level++) {
for_each_plane_id_on_crtc(intel_crtc, plane_id) {
struct skl_plane_wm *wm =
- &cstate->wm.skl.optimal.planes[plane_id];
+ &crtc_state->wm.skl.optimal.planes[plane_id];
/*
* We only disable the watermarks for each plane if
@@ -4547,7 +4482,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
*/
for_each_plane_id_on_crtc(intel_crtc, plane_id) {
struct skl_plane_wm *wm =
- &cstate->wm.skl.optimal.planes[plane_id];
+ &crtc_state->wm.skl.optimal.planes[plane_id];
if (wm->trans_wm.plane_res_b >= total[plane_id])
memset(&wm->trans_wm, 0, sizeof(wm->trans_wm));
@@ -4599,43 +4534,43 @@ skl_wm_method2(u32 pixel_rate, u32 pipe_htotal, u32 latency,
}
static uint_fixed_16_16_t
-intel_get_linetime_us(const struct intel_crtc_state *cstate)
+intel_get_linetime_us(const struct intel_crtc_state *crtc_state)
{
u32 pixel_rate;
u32 crtc_htotal;
uint_fixed_16_16_t linetime_us;
- if (!cstate->base.active)
+ if (!crtc_state->hw.active)
return u32_to_fixed16(0);
- pixel_rate = cstate->pixel_rate;
+ pixel_rate = crtc_state->pixel_rate;
if (WARN_ON(pixel_rate == 0))
return u32_to_fixed16(0);
- crtc_htotal = cstate->base.adjusted_mode.crtc_htotal;
+ crtc_htotal = crtc_state->hw.adjusted_mode.crtc_htotal;
linetime_us = div_fixed16(crtc_htotal * 1000, pixel_rate);
return linetime_us;
}
static u32
-skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate,
- const struct intel_plane_state *pstate)
+skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
{
u64 adjusted_pixel_rate;
uint_fixed_16_16_t downscale_amount;
/* Shouldn't reach here on disabled planes... */
- if (WARN_ON(!intel_wm_plane_visible(cstate, pstate)))
+ if (WARN_ON(!intel_wm_plane_visible(crtc_state, plane_state)))
return 0;
/*
* Adjusted plane pixel rate is just the pipe's adjusted pixel rate
* with additional adjustments for plane-specific scaling.
*/
- adjusted_pixel_rate = cstate->pixel_rate;
- downscale_amount = skl_plane_downscale_amount(cstate, pstate);
+ adjusted_pixel_rate = crtc_state->pixel_rate;
+ downscale_amount = skl_plane_downscale_amount(crtc_state, plane_state);
return mul_round_up_u32_fixed16(adjusted_pixel_rate,
downscale_amount);
@@ -4648,13 +4583,15 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
u32 plane_pixel_rate, struct skl_wm_params *wp,
int color_plane)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 interm_pbpl;
/* only planar format has two planes */
- if (color_plane == 1 && !is_planar_yuv_format(format->format)) {
- DRM_DEBUG_KMS("Non planar format have single plane\n");
+ if (color_plane == 1 &&
+ !intel_format_info_is_yuv_semiplanar(format, modifier)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Non planar format have single plane\n");
return -EINVAL;
}
@@ -4665,7 +4602,7 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
wp->x_tiled = modifier == I915_FORMAT_MOD_X_TILED;
wp->rc_surface = modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
- wp->is_planar = is_planar_yuv_format(format->format);
+ wp->is_planar = intel_format_info_is_yuv_semiplanar(format, modifier);
wp->width = width;
if (color_plane == 1 && wp->is_planar)
@@ -4737,24 +4674,19 @@ skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
struct skl_wm_params *wp, int color_plane)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
int width;
- if (plane->id == PLANE_CURSOR) {
- width = plane_state->base.crtc_w;
- } else {
- /*
- * Src coordinates are already rotated by 270 degrees for
- * the 90/270 degree plane rotation cases (to match the
- * GTT mapping), hence no need to account for rotation here.
- */
- width = drm_rect_width(&plane_state->base.src) >> 16;
- }
+ /*
+ * Src coordinates are already rotated by 270 degrees for
+ * the 90/270 degree plane rotation cases (to match the
+ * GTT mapping), hence no need to account for rotation here.
+ */
+ width = drm_rect_width(&plane_state->uapi.src) >> 16;
return skl_compute_wm_params(crtc_state, width,
fb->format, fb->modifier,
- plane_state->base.rotation,
+ plane_state->hw.rotation,
skl_adjusted_plane_pixel_rate(crtc_state, plane_state),
wp, color_plane);
}
@@ -4768,13 +4700,13 @@ static bool skl_wm_has_lines(struct drm_i915_private *dev_priv, int level)
return level > 0;
}
-static void skl_compute_plane_wm(const struct intel_crtc_state *cstate,
+static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
int level,
const struct skl_wm_params *wp,
const struct skl_wm_level *result_prev,
struct skl_wm_level *result /* out */)
{
- struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
u32 latency = dev_priv->wm.skl_latency[level];
uint_fixed_16_16_t method1, method2;
uint_fixed_16_16_t selected_result;
@@ -4800,14 +4732,14 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *cstate,
method1 = skl_wm_method1(dev_priv, wp->plane_pixel_rate,
wp->cpp, latency, wp->dbuf_block_size);
method2 = skl_wm_method2(wp->plane_pixel_rate,
- cstate->base.adjusted_mode.crtc_htotal,
+ crtc_state->hw.adjusted_mode.crtc_htotal,
latency,
wp->plane_blocks_per_line);
if (wp->y_tiled) {
selected_result = max_fixed16(method2, wp->y_tile_minimum);
} else {
- if ((wp->cpp * cstate->base.adjusted_mode.crtc_htotal /
+ if ((wp->cpp * crtc_state->hw.adjusted_mode.crtc_htotal /
wp->dbuf_block_size < 1) &&
(wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) {
selected_result = method2;
@@ -4894,18 +4826,18 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *cstate,
}
static void
-skl_compute_wm_levels(const struct intel_crtc_state *cstate,
+skl_compute_wm_levels(const struct intel_crtc_state *crtc_state,
const struct skl_wm_params *wm_params,
struct skl_wm_level *levels)
{
- struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
int level, max_level = ilk_wm_max_level(dev_priv);
struct skl_wm_level *result_prev = &levels[0];
for (level = 0; level <= max_level; level++) {
struct skl_wm_level *result = &levels[level];
- skl_compute_plane_wm(cstate, level, wm_params,
+ skl_compute_plane_wm(crtc_state, level, wm_params,
result_prev, result);
result_prev = result;
@@ -4913,14 +4845,14 @@ skl_compute_wm_levels(const struct intel_crtc_state *cstate,
}
static u32
-skl_compute_linetime_wm(const struct intel_crtc_state *cstate)
+skl_compute_linetime_wm(const struct intel_crtc_state *crtc_state)
{
- struct drm_atomic_state *state = cstate->base.state;
+ struct drm_atomic_state *state = crtc_state->uapi.state;
struct drm_i915_private *dev_priv = to_i915(state->dev);
uint_fixed_16_16_t linetime_us;
u32 linetime_wm;
- linetime_us = intel_get_linetime_us(cstate);
+ linetime_us = intel_get_linetime_us(crtc_state);
linetime_wm = fixed16_to_u32_round_up(mul_u32_fixed16(8, linetime_us));
/* Display WA #1135: BXT:ALL GLK:ALL */
@@ -4930,11 +4862,11 @@ skl_compute_linetime_wm(const struct intel_crtc_state *cstate)
return linetime_wm;
}
-static void skl_compute_transition_wm(const struct intel_crtc_state *cstate,
+static void skl_compute_transition_wm(const struct intel_crtc_state *crtc_state,
const struct skl_wm_params *wp,
struct skl_plane_wm *wm)
{
- struct drm_device *dev = cstate->base.crtc->dev;
+ struct drm_device *dev = crtc_state->uapi.crtc->dev;
const struct drm_i915_private *dev_priv = to_i915(dev);
u16 trans_min, trans_y_tile_min;
const u16 trans_amount = 10; /* This is configurable amount */
@@ -5032,8 +4964,8 @@ static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state,
static int skl_build_plane_wm(struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
enum plane_id plane_id = plane->id;
int ret;
@@ -5058,16 +4990,16 @@ static int skl_build_plane_wm(struct intel_crtc_state *crtc_state,
static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- enum plane_id plane_id = to_intel_plane(plane_state->base.plane)->id;
+ enum plane_id plane_id = to_intel_plane(plane_state->uapi.plane)->id;
int ret;
/* Watermarks calculated in master */
- if (plane_state->slave)
+ if (plane_state->planar_slave)
return 0;
- if (plane_state->linked_plane) {
- const struct drm_framebuffer *fb = plane_state->base.fb;
- enum plane_id y_plane_id = plane_state->linked_plane->id;
+ if (plane_state->planar_linked_plane) {
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ enum plane_id y_plane_id = plane_state->planar_linked_plane->id;
WARN_ON(!intel_wm_plane_visible(crtc_state, plane_state));
WARN_ON(!fb->format->is_yuv ||
@@ -5092,13 +5024,12 @@ static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
return 0;
}
-static int skl_build_pipe_wm(struct intel_crtc_state *cstate)
+static int skl_build_pipe_wm(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
- struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
- struct drm_crtc_state *crtc_state = &cstate->base;
- struct drm_plane *plane;
- const struct drm_plane_state *pstate;
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
+ struct intel_plane *plane;
+ const struct intel_plane_state *plane_state;
int ret;
/*
@@ -5107,19 +5038,18 @@ static int skl_build_pipe_wm(struct intel_crtc_state *cstate)
*/
memset(pipe_wm->planes, 0, sizeof(pipe_wm->planes));
- drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
- const struct intel_plane_state *intel_pstate =
- to_intel_plane_state(pstate);
+ intel_atomic_crtc_state_for_each_plane_state(plane, plane_state,
+ crtc_state) {
if (INTEL_GEN(dev_priv) >= 11)
- ret = icl_build_plane_wm(cstate, intel_pstate);
+ ret = icl_build_plane_wm(crtc_state, plane_state);
else
- ret = skl_build_plane_wm(cstate, intel_pstate);
+ ret = skl_build_plane_wm(crtc_state, plane_state);
if (ret)
return ret;
}
- pipe_wm->linetime = skl_compute_linetime_wm(cstate);
+ pipe_wm->linetime = skl_compute_linetime_wm(crtc_state);
return 0;
}
@@ -5269,25 +5199,12 @@ bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
return false;
}
-static u32
-pipes_modified(struct intel_atomic_state *state)
-{
- struct intel_crtc *crtc;
- struct intel_crtc_state *cstate;
- u32 i, ret = 0;
-
- for_each_new_intel_crtc_in_state(state, crtc, cstate, i)
- ret |= drm_crtc_mask(&crtc->base);
-
- return ret;
-}
-
static int
skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *new_crtc_state)
{
- struct intel_atomic_state *state = to_intel_atomic_state(new_crtc_state->base.state);
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct intel_atomic_state *state = to_intel_atomic_state(new_crtc_state->uapi.state);
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_plane *plane;
@@ -5353,7 +5270,7 @@ skl_print_wm_changes(struct intel_atomic_state *state)
struct intel_crtc *crtc;
int i;
- if ((drm_debug & DRM_UT_KMS) == 0)
+ if (!drm_debug_enabled(DRM_UT_KMS))
return;
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
@@ -5373,10 +5290,11 @@ skl_print_wm_changes(struct intel_atomic_state *state)
if (skl_ddb_entry_equal(old, new))
continue;
- DRM_DEBUG_KMS("[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n",
- plane->base.base.id, plane->base.name,
- old->start, old->end, new->start, new->end,
- skl_ddb_entry_size(old), skl_ddb_entry_size(new));
+ drm_dbg_kms(&dev_priv->drm,
+ "[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n",
+ plane->base.base.id, plane->base.name,
+ old->start, old->end, new->start, new->end,
+ skl_ddb_entry_size(old), skl_ddb_entry_size(new));
}
for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
@@ -5389,104 +5307,99 @@ skl_print_wm_changes(struct intel_atomic_state *state)
if (skl_plane_wm_equals(dev_priv, old_wm, new_wm))
continue;
- DRM_DEBUG_KMS("[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm"
- " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm\n",
- plane->base.base.id, plane->base.name,
- enast(old_wm->wm[0].plane_en), enast(old_wm->wm[1].plane_en),
- enast(old_wm->wm[2].plane_en), enast(old_wm->wm[3].plane_en),
- enast(old_wm->wm[4].plane_en), enast(old_wm->wm[5].plane_en),
- enast(old_wm->wm[6].plane_en), enast(old_wm->wm[7].plane_en),
- enast(old_wm->trans_wm.plane_en),
- enast(new_wm->wm[0].plane_en), enast(new_wm->wm[1].plane_en),
- enast(new_wm->wm[2].plane_en), enast(new_wm->wm[3].plane_en),
- enast(new_wm->wm[4].plane_en), enast(new_wm->wm[5].plane_en),
- enast(new_wm->wm[6].plane_en), enast(new_wm->wm[7].plane_en),
- enast(new_wm->trans_wm.plane_en));
-
- DRM_DEBUG_KMS("[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d"
+ drm_dbg_kms(&dev_priv->drm,
+ "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm"
+ " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm\n",
+ plane->base.base.id, plane->base.name,
+ enast(old_wm->wm[0].plane_en), enast(old_wm->wm[1].plane_en),
+ enast(old_wm->wm[2].plane_en), enast(old_wm->wm[3].plane_en),
+ enast(old_wm->wm[4].plane_en), enast(old_wm->wm[5].plane_en),
+ enast(old_wm->wm[6].plane_en), enast(old_wm->wm[7].plane_en),
+ enast(old_wm->trans_wm.plane_en),
+ enast(new_wm->wm[0].plane_en), enast(new_wm->wm[1].plane_en),
+ enast(new_wm->wm[2].plane_en), enast(new_wm->wm[3].plane_en),
+ enast(new_wm->wm[4].plane_en), enast(new_wm->wm[5].plane_en),
+ enast(new_wm->wm[6].plane_en), enast(new_wm->wm[7].plane_en),
+ enast(new_wm->trans_wm.plane_en));
+
+ drm_dbg_kms(&dev_priv->drm,
+ "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d"
" -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d\n",
- plane->base.base.id, plane->base.name,
- enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].plane_res_l,
- enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].plane_res_l,
- enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].plane_res_l,
- enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].plane_res_l,
- enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].plane_res_l,
- enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].plane_res_l,
- enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].plane_res_l,
- enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].plane_res_l,
- enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.plane_res_l,
-
- enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].plane_res_l,
- enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].plane_res_l,
- enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].plane_res_l,
- enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].plane_res_l,
- enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].plane_res_l,
- enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].plane_res_l,
- enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].plane_res_l,
- enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].plane_res_l,
- enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.plane_res_l);
-
- DRM_DEBUG_KMS("[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d"
- " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n",
- plane->base.base.id, plane->base.name,
- old_wm->wm[0].plane_res_b, old_wm->wm[1].plane_res_b,
- old_wm->wm[2].plane_res_b, old_wm->wm[3].plane_res_b,
- old_wm->wm[4].plane_res_b, old_wm->wm[5].plane_res_b,
- old_wm->wm[6].plane_res_b, old_wm->wm[7].plane_res_b,
- old_wm->trans_wm.plane_res_b,
- new_wm->wm[0].plane_res_b, new_wm->wm[1].plane_res_b,
- new_wm->wm[2].plane_res_b, new_wm->wm[3].plane_res_b,
- new_wm->wm[4].plane_res_b, new_wm->wm[5].plane_res_b,
- new_wm->wm[6].plane_res_b, new_wm->wm[7].plane_res_b,
- new_wm->trans_wm.plane_res_b);
-
- DRM_DEBUG_KMS("[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d"
- " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n",
- plane->base.base.id, plane->base.name,
- old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc,
- old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc,
- old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc,
- old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc,
- old_wm->trans_wm.min_ddb_alloc,
- new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc,
- new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc,
- new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc,
- new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc,
- new_wm->trans_wm.min_ddb_alloc);
+ plane->base.base.id, plane->base.name,
+ enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].plane_res_l,
+ enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].plane_res_l,
+ enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].plane_res_l,
+ enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].plane_res_l,
+ enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].plane_res_l,
+ enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].plane_res_l,
+ enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].plane_res_l,
+ enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].plane_res_l,
+ enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.plane_res_l,
+
+ enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].plane_res_l,
+ enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].plane_res_l,
+ enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].plane_res_l,
+ enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].plane_res_l,
+ enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].plane_res_l,
+ enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].plane_res_l,
+ enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].plane_res_l,
+ enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].plane_res_l,
+ enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.plane_res_l);
+
+ drm_dbg_kms(&dev_priv->drm,
+ "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d"
+ " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n",
+ plane->base.base.id, plane->base.name,
+ old_wm->wm[0].plane_res_b, old_wm->wm[1].plane_res_b,
+ old_wm->wm[2].plane_res_b, old_wm->wm[3].plane_res_b,
+ old_wm->wm[4].plane_res_b, old_wm->wm[5].plane_res_b,
+ old_wm->wm[6].plane_res_b, old_wm->wm[7].plane_res_b,
+ old_wm->trans_wm.plane_res_b,
+ new_wm->wm[0].plane_res_b, new_wm->wm[1].plane_res_b,
+ new_wm->wm[2].plane_res_b, new_wm->wm[3].plane_res_b,
+ new_wm->wm[4].plane_res_b, new_wm->wm[5].plane_res_b,
+ new_wm->wm[6].plane_res_b, new_wm->wm[7].plane_res_b,
+ new_wm->trans_wm.plane_res_b);
+
+ drm_dbg_kms(&dev_priv->drm,
+ "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d"
+ " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n",
+ plane->base.base.id, plane->base.name,
+ old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc,
+ old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc,
+ old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc,
+ old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc,
+ old_wm->trans_wm.min_ddb_alloc,
+ new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc,
+ new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc,
+ new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc,
+ new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc,
+ new_wm->trans_wm.min_ddb_alloc);
}
}
}
-static int
-skl_ddb_add_affected_pipes(struct intel_atomic_state *state, bool *changed)
+static int intel_add_all_pipes(struct intel_atomic_state *state)
{
- struct drm_device *dev = state->base.dev;
- const struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc *crtc;
- struct intel_crtc_state *crtc_state;
- u32 realloc_pipes = pipes_modified(state);
- int ret, i;
- /*
- * When we distrust bios wm we always need to recompute to set the
- * expected DDB allocations for each CRTC.
- */
- if (dev_priv->wm.distrust_bios_wm)
- (*changed) = true;
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ struct intel_crtc_state *crtc_state;
- /*
- * If this transaction isn't actually touching any CRTC's, don't
- * bother with watermark calculation. Note that if we pass this
- * test, we're guaranteed to hold at least one CRTC state mutex,
- * which means we can safely use values like dev_priv->active_crtcs
- * since any racing commits that want to update them would need to
- * hold _all_ CRTC state mutexes.
- */
- for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i)
- (*changed) = true;
+ crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+ }
- if (!*changed)
- return 0;
+ return 0;
+}
+
+static int
+skl_ddb_add_affected_pipes(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ int ret;
/*
* If this is our first atomic update following hardware readout,
@@ -5495,21 +5408,21 @@ skl_ddb_add_affected_pipes(struct intel_atomic_state *state, bool *changed)
* ensure a full DDB recompute.
*/
if (dev_priv->wm.distrust_bios_wm) {
- ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
+ ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
state->base.acquire_ctx);
if (ret)
return ret;
- state->active_pipe_changes = ~0;
+ state->active_pipe_changes = INTEL_INFO(dev_priv)->pipe_mask;
/*
- * We usually only initialize state->active_crtcs if we
+ * We usually only initialize state->active_pipes if we
* we're doing a modeset; make sure this field is always
* initialized during the sanitization process that happens
* on the first commit too.
*/
if (!state->modeset)
- state->active_crtcs = dev_priv->active_crtcs;
+ state->active_pipes = dev_priv->active_pipes;
}
/*
@@ -5526,18 +5439,11 @@ skl_ddb_add_affected_pipes(struct intel_atomic_state *state, bool *changed)
* to grab the lock on *all* CRTC's.
*/
if (state->active_pipe_changes || state->modeset) {
- realloc_pipes = ~0;
- state->wm_results.dirty_pipes = ~0;
- }
+ state->wm_results.dirty_pipes = INTEL_INFO(dev_priv)->pipe_mask;
- /*
- * We're not recomputing for the pipes not included in the commit, so
- * make sure we start with the current state.
- */
- for_each_intel_crtc_mask(dev, crtc, realloc_pipes) {
- crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
- if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
+ ret = intel_add_all_pipes(state);
+ if (ret)
+ return ret;
}
return 0;
@@ -5587,7 +5493,7 @@ static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
* power well the hardware state will go out of sync
* with the software state.
*/
- if (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) &&
+ if (!drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi) &&
skl_plane_wm_equals(dev_priv,
&old_crtc_state->wm.skl.optimal.planes[plane_id],
&new_crtc_state->wm.skl.optimal.planes[plane_id]))
@@ -5610,14 +5516,13 @@ skl_compute_wm(struct intel_atomic_state *state)
struct intel_crtc_state *new_crtc_state;
struct intel_crtc_state *old_crtc_state;
struct skl_ddb_values *results = &state->wm_results;
- bool changed = false;
int ret, i;
/* Clear all dirty flags */
results->dirty_pipes = 0;
- ret = skl_ddb_add_affected_pipes(state, &changed);
- if (ret || !changed)
+ ret = skl_ddb_add_affected_pipes(state);
+ if (ret)
return ret;
/*
@@ -5639,7 +5544,7 @@ skl_compute_wm(struct intel_atomic_state *state)
if (!skl_pipe_wm_equals(crtc,
&old_crtc_state->wm.skl.optimal,
&new_crtc_state->wm.skl.optimal))
- results->dirty_pipes |= drm_crtc_mask(&crtc->base);
+ results->dirty_pipes |= BIT(crtc->pipe);
}
ret = skl_compute_ddb(state);
@@ -5652,34 +5557,35 @@ skl_compute_wm(struct intel_atomic_state *state)
}
static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state,
- struct intel_crtc_state *cstate)
+ struct intel_crtc *crtc)
{
- struct intel_crtc *crtc = to_intel_crtc(cstate->base.crtc);
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
enum pipe pipe = crtc->pipe;
- if (!(state->wm_results.dirty_pipes & drm_crtc_mask(&crtc->base)))
+ if ((state->wm_results.dirty_pipes & BIT(crtc->pipe)) == 0)
return;
I915_WRITE(PIPE_WM_LINETIME(pipe), pipe_wm->linetime);
}
static void skl_initial_wm(struct intel_atomic_state *state,
- struct intel_crtc_state *cstate)
+ struct intel_crtc *crtc)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
- struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
struct skl_ddb_values *results = &state->wm_results;
- if ((results->dirty_pipes & drm_crtc_mask(&intel_crtc->base)) == 0)
+ if ((results->dirty_pipes & BIT(crtc->pipe)) == 0)
return;
mutex_lock(&dev_priv->wm.wm_mutex);
- if (cstate->base.active_changed)
- skl_atomic_update_crtc_wm(state, cstate);
+ if (crtc_state->uapi.active_changed)
+ skl_atomic_update_crtc_wm(state, crtc);
mutex_unlock(&dev_priv->wm.wm_mutex);
}
@@ -5735,28 +5641,31 @@ static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
}
static void ilk_initial_watermarks(struct intel_atomic_state *state,
- struct intel_crtc_state *cstate)
+ struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
mutex_lock(&dev_priv->wm.wm_mutex);
- intel_crtc->wm.active.ilk = cstate->wm.ilk.intermediate;
+ crtc->wm.active.ilk = crtc_state->wm.ilk.intermediate;
ilk_program_watermarks(dev_priv);
mutex_unlock(&dev_priv->wm.wm_mutex);
}
static void ilk_optimize_watermarks(struct intel_atomic_state *state,
- struct intel_crtc_state *cstate)
+ struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ if (!crtc_state->wm.need_postvbl_update)
+ return;
mutex_lock(&dev_priv->wm.wm_mutex);
- if (cstate->wm.need_postvbl_update) {
- intel_crtc->wm.active.ilk = cstate->wm.ilk.optimal;
- ilk_program_watermarks(dev_priv);
- }
+ crtc->wm.active.ilk = crtc_state->wm.ilk.optimal;
+ ilk_program_watermarks(dev_priv);
mutex_unlock(&dev_priv->wm.wm_mutex);
}
@@ -5812,19 +5721,19 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
struct skl_ddb_values *hw = &dev_priv->wm.skl_hw;
struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
struct intel_crtc *crtc;
- struct intel_crtc_state *cstate;
+ struct intel_crtc_state *crtc_state;
skl_ddb_get_hw_state(dev_priv, ddb);
for_each_intel_crtc(&dev_priv->drm, crtc) {
- cstate = to_intel_crtc_state(crtc->base.state);
+ crtc_state = to_intel_crtc_state(crtc->base.state);
- skl_pipe_wm_get_hw_state(crtc, &cstate->wm.skl.optimal);
+ skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal);
if (crtc->active)
- hw->dirty_pipes |= drm_crtc_mask(&crtc->base);
+ hw->dirty_pipes |= BIT(crtc->pipe);
}
- if (dev_priv->active_crtcs) {
+ if (dev_priv->active_pipes) {
/* Fully recompute DDB on first atomic commit */
dev_priv->wm.distrust_bios_wm = true;
}
@@ -5835,8 +5744,8 @@ static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct ilk_wm_values *hw = &dev_priv->wm.hw;
- struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->base.state);
- struct intel_pipe_wm *active = &cstate->wm.ilk.optimal;
+ struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
+ struct intel_pipe_wm *active = &crtc_state->wm.ilk.optimal;
enum pipe pipe = crtc->pipe;
static const i915_reg_t wm0_pipe_reg[] = {
[PIPE_A] = WM0_PIPEA_ILK,
@@ -6057,19 +5966,22 @@ void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
crtc_state->wm.g4x.optimal = *active;
crtc_state->wm.g4x.intermediate = *active;
- DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n",
- pipe_name(pipe),
- wm->pipe[pipe].plane[PLANE_PRIMARY],
- wm->pipe[pipe].plane[PLANE_CURSOR],
- wm->pipe[pipe].plane[PLANE_SPRITE0]);
+ drm_dbg_kms(&dev_priv->drm,
+ "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n",
+ pipe_name(pipe),
+ wm->pipe[pipe].plane[PLANE_PRIMARY],
+ wm->pipe[pipe].plane[PLANE_CURSOR],
+ wm->pipe[pipe].plane[PLANE_SPRITE0]);
}
- DRM_DEBUG_KMS("Initial SR watermarks: plane=%d, cursor=%d fbc=%d\n",
- wm->sr.plane, wm->sr.cursor, wm->sr.fbc);
- DRM_DEBUG_KMS("Initial HPLL watermarks: plane=%d, SR cursor=%d fbc=%d\n",
- wm->hpll.plane, wm->hpll.cursor, wm->hpll.fbc);
- DRM_DEBUG_KMS("Initial SR=%s HPLL=%s FBC=%s\n",
- yesno(wm->cxsr), yesno(wm->hpll_en), yesno(wm->fbc_en));
+ drm_dbg_kms(&dev_priv->drm,
+ "Initial SR watermarks: plane=%d, cursor=%d fbc=%d\n",
+ wm->sr.plane, wm->sr.cursor, wm->sr.fbc);
+ drm_dbg_kms(&dev_priv->drm,
+ "Initial HPLL watermarks: plane=%d, SR cursor=%d fbc=%d\n",
+ wm->hpll.plane, wm->hpll.cursor, wm->hpll.fbc);
+ drm_dbg_kms(&dev_priv->drm, "Initial SR=%s HPLL=%s FBC=%s\n",
+ yesno(wm->cxsr), yesno(wm->hpll_en), yesno(wm->fbc_en));
}
void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
@@ -6090,7 +6002,7 @@ void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
enum plane_id plane_id = plane->id;
int level;
- if (plane_state->base.visible)
+ if (plane_state->uapi.visible)
continue;
for (level = 0; level < 3; level++) {
@@ -6161,8 +6073,9 @@ void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
- DRM_DEBUG_KMS("Punit not acking DDR DVFS request, "
- "assuming DDR DVFS is disabled\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Punit not acking DDR DVFS request, "
+ "assuming DDR DVFS is disabled\n");
dev_priv->wm.max_level = VLV_WM_LEVEL_PM5;
} else {
val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
@@ -6213,16 +6126,18 @@ void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
crtc_state->wm.vlv.optimal = *active;
crtc_state->wm.vlv.intermediate = *active;
- DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
- pipe_name(pipe),
- wm->pipe[pipe].plane[PLANE_PRIMARY],
- wm->pipe[pipe].plane[PLANE_CURSOR],
- wm->pipe[pipe].plane[PLANE_SPRITE0],
- wm->pipe[pipe].plane[PLANE_SPRITE1]);
+ drm_dbg_kms(&dev_priv->drm,
+ "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
+ pipe_name(pipe),
+ wm->pipe[pipe].plane[PLANE_PRIMARY],
+ wm->pipe[pipe].plane[PLANE_CURSOR],
+ wm->pipe[pipe].plane[PLANE_SPRITE0],
+ wm->pipe[pipe].plane[PLANE_SPRITE1]);
}
- DRM_DEBUG_KMS("Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
- wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
+ drm_dbg_kms(&dev_priv->drm,
+ "Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
+ wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
}
void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
@@ -6245,7 +6160,7 @@ void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
enum plane_id plane_id = plane->id;
int level;
- if (plane_state->base.visible)
+ if (plane_state->uapi.visible)
continue;
for (level = 0; level < wm_state->num_levels; level++) {
@@ -6402,2377 +6317,6 @@ void intel_init_ipc(struct drm_i915_private *dev_priv)
intel_enable_ipc(dev_priv);
}
-/*
- * Lock protecting IPS related data structures
- */
-DEFINE_SPINLOCK(mchdev_lock);
-
-bool ironlake_set_drps(struct drm_i915_private *i915, u8 val)
-{
- struct intel_uncore *uncore = &i915->uncore;
- u16 rgvswctl;
-
- lockdep_assert_held(&mchdev_lock);
-
- rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
- if (rgvswctl & MEMCTL_CMD_STS) {
- DRM_DEBUG("gpu busy, RCS change rejected\n");
- return false; /* still busy with another command */
- }
-
- rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
- (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
- intel_uncore_write16(uncore, MEMSWCTL, rgvswctl);
- intel_uncore_posting_read16(uncore, MEMSWCTL);
-
- rgvswctl |= MEMCTL_CMD_STS;
- intel_uncore_write16(uncore, MEMSWCTL, rgvswctl);
-
- return true;
-}
-
-static void ironlake_enable_drps(struct drm_i915_private *dev_priv)
-{
- struct intel_uncore *uncore = &dev_priv->uncore;
- u32 rgvmodectl;
- u8 fmax, fmin, fstart, vstart;
-
- spin_lock_irq(&mchdev_lock);
-
- rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
-
- /* Enable temp reporting */
- intel_uncore_write16(uncore, PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
- intel_uncore_write16(uncore, TSC1, I915_READ(TSC1) | TSE);
-
- /* 100ms RC evaluation intervals */
- intel_uncore_write(uncore, RCUPEI, 100000);
- intel_uncore_write(uncore, RCDNEI, 100000);
-
- /* Set max/min thresholds to 90ms and 80ms respectively */
- intel_uncore_write(uncore, RCBMAXAVG, 90000);
- intel_uncore_write(uncore, RCBMINAVG, 80000);
-
- intel_uncore_write(uncore, MEMIHYST, 1);
-
- /* Set up min, max, and cur for interrupt handling */
- fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
- fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
- fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
- MEMMODE_FSTART_SHIFT;
-
- vstart = (intel_uncore_read(uncore, PXVFREQ(fstart)) &
- PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT;
-
- dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
- dev_priv->ips.fstart = fstart;
-
- dev_priv->ips.max_delay = fstart;
- dev_priv->ips.min_delay = fmin;
- dev_priv->ips.cur_delay = fstart;
-
- DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
- fmax, fmin, fstart);
-
- intel_uncore_write(uncore,
- MEMINTREN,
- MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
-
- /*
- * Interrupts will be enabled in ironlake_irq_postinstall
- */
-
- intel_uncore_write(uncore, VIDSTART, vstart);
- intel_uncore_posting_read(uncore, VIDSTART);
-
- rgvmodectl |= MEMMODE_SWMODE_EN;
- intel_uncore_write(uncore, MEMMODECTL, rgvmodectl);
-
- if (wait_for_atomic((intel_uncore_read(uncore, MEMSWCTL) &
- MEMCTL_CMD_STS) == 0, 10))
- DRM_ERROR("stuck trying to change perf mode\n");
- mdelay(1);
-
- ironlake_set_drps(dev_priv, fstart);
-
- dev_priv->ips.last_count1 =
- intel_uncore_read(uncore, DMIEC) +
- intel_uncore_read(uncore, DDREC) +
- intel_uncore_read(uncore, CSIEC);
- dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
- dev_priv->ips.last_count2 = intel_uncore_read(uncore, GFXEC);
- dev_priv->ips.last_time2 = ktime_get_raw_ns();
-
- spin_unlock_irq(&mchdev_lock);
-}
-
-static void ironlake_disable_drps(struct drm_i915_private *i915)
-{
- struct intel_uncore *uncore = &i915->uncore;
- u16 rgvswctl;
-
- spin_lock_irq(&mchdev_lock);
-
- rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
-
- /* Ack interrupts, disable EFC interrupt */
- intel_uncore_write(uncore,
- MEMINTREN,
- intel_uncore_read(uncore, MEMINTREN) &
- ~MEMINT_EVAL_CHG_EN);
- intel_uncore_write(uncore, MEMINTRSTS, MEMINT_EVAL_CHG);
- intel_uncore_write(uncore,
- DEIER,
- intel_uncore_read(uncore, DEIER) & ~DE_PCU_EVENT);
- intel_uncore_write(uncore, DEIIR, DE_PCU_EVENT);
- intel_uncore_write(uncore,
- DEIMR,
- intel_uncore_read(uncore, DEIMR) | DE_PCU_EVENT);
-
- /* Go back to the starting frequency */
- ironlake_set_drps(i915, i915->ips.fstart);
- mdelay(1);
- rgvswctl |= MEMCTL_CMD_STS;
- intel_uncore_write(uncore, MEMSWCTL, rgvswctl);
- mdelay(1);
-
- spin_unlock_irq(&mchdev_lock);
-}
-
-/* There's a funny hw issue where the hw returns all 0 when reading from
- * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
- * ourselves, instead of doing a rmw cycle (which might result in us clearing
- * all limits and the gpu stuck at whatever frequency it is at atm).
- */
-static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- u32 limits;
-
- /* Only set the down limit when we've reached the lowest level to avoid
- * getting more interrupts, otherwise leave this clear. This prevents a
- * race in the hw when coming out of rc6: There's a tiny window where
- * the hw runs at the minimal clock before selecting the desired
- * frequency, if the down threshold expires in that window we will not
- * receive a down interrupt. */
- if (INTEL_GEN(dev_priv) >= 9) {
- limits = (rps->max_freq_softlimit) << 23;
- if (val <= rps->min_freq_softlimit)
- limits |= (rps->min_freq_softlimit) << 14;
- } else {
- limits = rps->max_freq_softlimit << 24;
- if (val <= rps->min_freq_softlimit)
- limits |= rps->min_freq_softlimit << 16;
- }
-
- return limits;
-}
-
-static void rps_set_power(struct drm_i915_private *dev_priv, int new_power)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- u32 threshold_up = 0, threshold_down = 0; /* in % */
- u32 ei_up = 0, ei_down = 0;
-
- lockdep_assert_held(&rps->power.mutex);
-
- if (new_power == rps->power.mode)
- return;
-
- /* Note the units here are not exactly 1us, but 1280ns. */
- switch (new_power) {
- case LOW_POWER:
- /* Upclock if more than 95% busy over 16ms */
- ei_up = 16000;
- threshold_up = 95;
-
- /* Downclock if less than 85% busy over 32ms */
- ei_down = 32000;
- threshold_down = 85;
- break;
-
- case BETWEEN:
- /* Upclock if more than 90% busy over 13ms */
- ei_up = 13000;
- threshold_up = 90;
-
- /* Downclock if less than 75% busy over 32ms */
- ei_down = 32000;
- threshold_down = 75;
- break;
-
- case HIGH_POWER:
- /* Upclock if more than 85% busy over 10ms */
- ei_up = 10000;
- threshold_up = 85;
-
- /* Downclock if less than 60% busy over 32ms */
- ei_down = 32000;
- threshold_down = 60;
- break;
- }
-
- /* When byt can survive without system hang with dynamic
- * sw freq adjustments, this restriction can be lifted.
- */
- if (IS_VALLEYVIEW(dev_priv))
- goto skip_hw_write;
-
- I915_WRITE(GEN6_RP_UP_EI,
- GT_INTERVAL_FROM_US(dev_priv, ei_up));
- I915_WRITE(GEN6_RP_UP_THRESHOLD,
- GT_INTERVAL_FROM_US(dev_priv,
- ei_up * threshold_up / 100));
-
- I915_WRITE(GEN6_RP_DOWN_EI,
- GT_INTERVAL_FROM_US(dev_priv, ei_down));
- I915_WRITE(GEN6_RP_DOWN_THRESHOLD,
- GT_INTERVAL_FROM_US(dev_priv,
- ei_down * threshold_down / 100));
-
- I915_WRITE(GEN6_RP_CONTROL,
- (INTEL_GEN(dev_priv) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) |
- GEN6_RP_MEDIA_HW_NORMAL_MODE |
- GEN6_RP_MEDIA_IS_GFX |
- GEN6_RP_ENABLE |
- GEN6_RP_UP_BUSY_AVG |
- GEN6_RP_DOWN_IDLE_AVG);
-
-skip_hw_write:
- rps->power.mode = new_power;
- rps->power.up_threshold = threshold_up;
- rps->power.down_threshold = threshold_down;
-}
-
-static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- int new_power;
-
- new_power = rps->power.mode;
- switch (rps->power.mode) {
- case LOW_POWER:
- if (val > rps->efficient_freq + 1 &&
- val > rps->cur_freq)
- new_power = BETWEEN;
- break;
-
- case BETWEEN:
- if (val <= rps->efficient_freq &&
- val < rps->cur_freq)
- new_power = LOW_POWER;
- else if (val >= rps->rp0_freq &&
- val > rps->cur_freq)
- new_power = HIGH_POWER;
- break;
-
- case HIGH_POWER:
- if (val < (rps->rp1_freq + rps->rp0_freq) >> 1 &&
- val < rps->cur_freq)
- new_power = BETWEEN;
- break;
- }
- /* Max/min bins are special */
- if (val <= rps->min_freq_softlimit)
- new_power = LOW_POWER;
- if (val >= rps->max_freq_softlimit)
- new_power = HIGH_POWER;
-
- mutex_lock(&rps->power.mutex);
- if (rps->power.interactive)
- new_power = HIGH_POWER;
- rps_set_power(dev_priv, new_power);
- mutex_unlock(&rps->power.mutex);
-}
-
-void intel_rps_mark_interactive(struct drm_i915_private *i915, bool interactive)
-{
- struct intel_rps *rps = &i915->gt_pm.rps;
-
- if (INTEL_GEN(i915) < 6)
- return;
-
- mutex_lock(&rps->power.mutex);
- if (interactive) {
- if (!rps->power.interactive++ && READ_ONCE(i915->gt.awake))
- rps_set_power(i915, HIGH_POWER);
- } else {
- GEM_BUG_ON(!rps->power.interactive);
- rps->power.interactive--;
- }
- mutex_unlock(&rps->power.mutex);
-}
-
-static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- u32 mask = 0;
-
- /* We use UP_EI_EXPIRED interupts for both up/down in manual mode */
- if (val > rps->min_freq_softlimit)
- mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
- if (val < rps->max_freq_softlimit)
- mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
-
- mask &= dev_priv->pm_rps_events;
-
- return gen6_sanitize_rps_pm_mask(dev_priv, ~mask);
-}
-
-/* gen6_set_rps is called to update the frequency request, but should also be
- * called when the range (min_delay and max_delay) is modified so that we can
- * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
-static int gen6_set_rps(struct drm_i915_private *dev_priv, u8 val)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
- /* min/max delay may still have been modified so be sure to
- * write the limits value.
- */
- if (val != rps->cur_freq) {
- gen6_set_rps_thresholds(dev_priv, val);
-
- if (INTEL_GEN(dev_priv) >= 9)
- I915_WRITE(GEN6_RPNSWREQ,
- GEN9_FREQUENCY(val));
- else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- I915_WRITE(GEN6_RPNSWREQ,
- HSW_FREQUENCY(val));
- else
- I915_WRITE(GEN6_RPNSWREQ,
- GEN6_FREQUENCY(val) |
- GEN6_OFFSET(0) |
- GEN6_AGGRESSIVE_TURBO);
- }
-
- /* Make sure we continue to get interrupts
- * until we hit the minimum or maximum frequencies.
- */
- I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, intel_rps_limits(dev_priv, val));
- I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
-
- rps->cur_freq = val;
- trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
-
- return 0;
-}
-
-static int valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val)
-{
- int err;
-
- if (WARN_ONCE(IS_CHERRYVIEW(dev_priv) && (val & 1),
- "Odd GPU freq value\n"))
- val &= ~1;
-
- I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
-
- if (val != dev_priv->gt_pm.rps.cur_freq) {
- vlv_punit_get(dev_priv);
- err = vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
- vlv_punit_put(dev_priv);
- if (err)
- return err;
-
- gen6_set_rps_thresholds(dev_priv, val);
- }
-
- dev_priv->gt_pm.rps.cur_freq = val;
- trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
-
- return 0;
-}
-
-/* vlv_set_rps_idle: Set the frequency to idle, if Gfx clocks are down
- *
- * * If Gfx is Idle, then
- * 1. Forcewake Media well.
- * 2. Request idle freq.
- * 3. Release Forcewake of Media well.
-*/
-static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- u32 val = rps->idle_freq;
- int err;
-
- if (rps->cur_freq <= val)
- return;
-
- /* The punit delays the write of the frequency and voltage until it
- * determines the GPU is awake. During normal usage we don't want to
- * waste power changing the frequency if the GPU is sleeping (rc6).
- * However, the GPU and driver is now idle and we do not want to delay
- * switching to minimum voltage (reducing power whilst idle) as we do
- * not expect to be woken in the near future and so must flush the
- * change by waking the device.
- *
- * We choose to take the media powerwell (either would do to trick the
- * punit into committing the voltage change) as that takes a lot less
- * power than the render powerwell.
- */
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_MEDIA);
- err = valleyview_set_rps(dev_priv, val);
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_MEDIA);
-
- if (err)
- DRM_ERROR("Failed to set RPS for idle\n");
-}
-
-void gen6_rps_busy(struct drm_i915_private *dev_priv)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
- mutex_lock(&rps->lock);
- if (rps->enabled) {
- u8 freq;
-
- if (dev_priv->pm_rps_events & GEN6_PM_RP_UP_EI_EXPIRED)
- gen6_rps_reset_ei(dev_priv);
- I915_WRITE(GEN6_PMINTRMSK,
- gen6_rps_pm_mask(dev_priv, rps->cur_freq));
-
- gen6_enable_rps_interrupts(dev_priv);
-
- /* Use the user's desired frequency as a guide, but for better
- * performance, jump directly to RPe as our starting frequency.
- */
- freq = max(rps->cur_freq,
- rps->efficient_freq);
-
- if (intel_set_rps(dev_priv,
- clamp(freq,
- rps->min_freq_softlimit,
- rps->max_freq_softlimit)))
- DRM_DEBUG_DRIVER("Failed to set idle frequency\n");
- }
- mutex_unlock(&rps->lock);
-}
-
-void gen6_rps_idle(struct drm_i915_private *dev_priv)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
- /* Flush our bottom-half so that it does not race with us
- * setting the idle frequency and so that it is bounded by
- * our rpm wakeref. And then disable the interrupts to stop any
- * futher RPS reclocking whilst we are asleep.
- */
- gen6_disable_rps_interrupts(dev_priv);
-
- mutex_lock(&rps->lock);
- if (rps->enabled) {
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- vlv_set_rps_idle(dev_priv);
- else
- gen6_set_rps(dev_priv, rps->idle_freq);
- rps->last_adj = 0;
- I915_WRITE(GEN6_PMINTRMSK,
- gen6_sanitize_rps_pm_mask(dev_priv, ~0));
- }
- mutex_unlock(&rps->lock);
-}
-
-void gen6_rps_boost(struct i915_request *rq)
-{
- struct intel_rps *rps = &rq->i915->gt_pm.rps;
- unsigned long flags;
- bool boost;
-
- /* This is intentionally racy! We peek at the state here, then
- * validate inside the RPS worker.
- */
- if (!rps->enabled)
- return;
-
- if (i915_request_signaled(rq))
- return;
-
- /* Serializes with i915_request_retire() */
- boost = false;
- spin_lock_irqsave(&rq->lock, flags);
- if (!rq->waitboost && !dma_fence_is_signaled_locked(&rq->fence)) {
- boost = !atomic_fetch_inc(&rps->num_waiters);
- rq->waitboost = true;
- }
- spin_unlock_irqrestore(&rq->lock, flags);
- if (!boost)
- return;
-
- if (READ_ONCE(rps->cur_freq) < rps->boost_freq)
- schedule_work(&rps->work);
-
- atomic_inc(&rps->boosts);
-}
-
-int intel_set_rps(struct drm_i915_private *dev_priv, u8 val)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- int err;
-
- lockdep_assert_held(&rps->lock);
- GEM_BUG_ON(val > rps->max_freq);
- GEM_BUG_ON(val < rps->min_freq);
-
- if (!rps->enabled) {
- rps->cur_freq = val;
- return 0;
- }
-
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- err = valleyview_set_rps(dev_priv, val);
- else
- err = gen6_set_rps(dev_priv, val);
-
- return err;
-}
-
-static void gen9_disable_rc6(struct drm_i915_private *dev_priv)
-{
- I915_WRITE(GEN6_RC_CONTROL, 0);
- I915_WRITE(GEN9_PG_ENABLE, 0);
-}
-
-static void gen9_disable_rps(struct drm_i915_private *dev_priv)
-{
- I915_WRITE(GEN6_RP_CONTROL, 0);
-}
-
-static void gen6_disable_rc6(struct drm_i915_private *dev_priv)
-{
- I915_WRITE(GEN6_RC_CONTROL, 0);
-}
-
-static void gen6_disable_rps(struct drm_i915_private *dev_priv)
-{
- I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
- I915_WRITE(GEN6_RP_CONTROL, 0);
-}
-
-static void cherryview_disable_rc6(struct drm_i915_private *dev_priv)
-{
- I915_WRITE(GEN6_RC_CONTROL, 0);
-}
-
-static void cherryview_disable_rps(struct drm_i915_private *dev_priv)
-{
- I915_WRITE(GEN6_RP_CONTROL, 0);
-}
-
-static void valleyview_disable_rc6(struct drm_i915_private *dev_priv)
-{
- /* We're doing forcewake before Disabling RC6,
- * This what the BIOS expects when going into suspend */
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
- I915_WRITE(GEN6_RC_CONTROL, 0);
-
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-}
-
-static void valleyview_disable_rps(struct drm_i915_private *dev_priv)
-{
- I915_WRITE(GEN6_RP_CONTROL, 0);
-}
-
-static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv)
-{
- bool enable_rc6 = true;
- unsigned long rc6_ctx_base;
- u32 rc_ctl;
- int rc_sw_target;
-
- rc_ctl = I915_READ(GEN6_RC_CONTROL);
- rc_sw_target = (I915_READ(GEN6_RC_STATE) & RC_SW_TARGET_STATE_MASK) >>
- RC_SW_TARGET_STATE_SHIFT;
- DRM_DEBUG_DRIVER("BIOS enabled RC states: "
- "HW_CTRL %s HW_RC6 %s SW_TARGET_STATE %x\n",
- onoff(rc_ctl & GEN6_RC_CTL_HW_ENABLE),
- onoff(rc_ctl & GEN6_RC_CTL_RC6_ENABLE),
- rc_sw_target);
-
- if (!(I915_READ(RC6_LOCATION) & RC6_CTX_IN_DRAM)) {
- DRM_DEBUG_DRIVER("RC6 Base location not set properly.\n");
- enable_rc6 = false;
- }
-
- /*
- * The exact context size is not known for BXT, so assume a page size
- * for this check.
- */
- rc6_ctx_base = I915_READ(RC6_CTX_BASE) & RC6_CTX_BASE_MASK;
- if (!((rc6_ctx_base >= dev_priv->dsm_reserved.start) &&
- (rc6_ctx_base + PAGE_SIZE < dev_priv->dsm_reserved.end))) {
- DRM_DEBUG_DRIVER("RC6 Base address not as expected.\n");
- enable_rc6 = false;
- }
-
- if (!(((I915_READ(PWRCTX_MAXCNT_RCSUNIT) & IDLE_TIME_MASK) > 1) &&
- ((I915_READ(PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1) &&
- ((I915_READ(PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1) &&
- ((I915_READ(PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1))) {
- DRM_DEBUG_DRIVER("Engine Idle wait time not set properly.\n");
- enable_rc6 = false;
- }
-
- if (!I915_READ(GEN8_PUSHBUS_CONTROL) ||
- !I915_READ(GEN8_PUSHBUS_ENABLE) ||
- !I915_READ(GEN8_PUSHBUS_SHIFT)) {
- DRM_DEBUG_DRIVER("Pushbus not setup properly.\n");
- enable_rc6 = false;
- }
-
- if (!I915_READ(GEN6_GFXPAUSE)) {
- DRM_DEBUG_DRIVER("GFX pause not setup properly.\n");
- enable_rc6 = false;
- }
-
- if (!I915_READ(GEN8_MISC_CTRL0)) {
- DRM_DEBUG_DRIVER("GPM control not setup properly.\n");
- enable_rc6 = false;
- }
-
- return enable_rc6;
-}
-
-static bool sanitize_rc6(struct drm_i915_private *i915)
-{
- struct intel_device_info *info = mkwrite_device_info(i915);
-
- /* Powersaving is controlled by the host when inside a VM */
- if (intel_vgpu_active(i915)) {
- info->has_rc6 = 0;
- info->has_rps = false;
- }
-
- if (info->has_rc6 &&
- IS_GEN9_LP(i915) && !bxt_check_bios_rc6_setup(i915)) {
- DRM_INFO("RC6 disabled by BIOS\n");
- info->has_rc6 = 0;
- }
-
- /*
- * We assume that we do not have any deep rc6 levels if we don't have
- * have the previous rc6 level supported, i.e. we use HAS_RC6()
- * as the initial coarse check for rc6 in general, moving on to
- * progressively finer/deeper levels.
- */
- if (!info->has_rc6 && info->has_rc6p)
- info->has_rc6p = 0;
-
- return info->has_rc6;
-}
-
-static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
- /* All of these values are in units of 50MHz */
-
- /* static values from HW: RP0 > RP1 > RPn (min_freq) */
- if (IS_GEN9_LP(dev_priv)) {
- u32 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
- rps->rp0_freq = (rp_state_cap >> 16) & 0xff;
- rps->rp1_freq = (rp_state_cap >> 8) & 0xff;
- rps->min_freq = (rp_state_cap >> 0) & 0xff;
- } else {
- u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
- rps->rp0_freq = (rp_state_cap >> 0) & 0xff;
- rps->rp1_freq = (rp_state_cap >> 8) & 0xff;
- rps->min_freq = (rp_state_cap >> 16) & 0xff;
- }
- /* hw_max = RP0 until we check for overclocking */
- rps->max_freq = rps->rp0_freq;
-
- rps->efficient_freq = rps->rp1_freq;
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) ||
- IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
- u32 ddcc_status = 0;
-
- if (sandybridge_pcode_read(dev_priv,
- HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
- &ddcc_status, NULL) == 0)
- rps->efficient_freq =
- clamp_t(u8,
- ((ddcc_status >> 8) & 0xff),
- rps->min_freq,
- rps->max_freq);
- }
-
- if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
- /* Store the frequency values in 16.66 MHZ units, which is
- * the natural hardware unit for SKL
- */
- rps->rp0_freq *= GEN9_FREQ_SCALER;
- rps->rp1_freq *= GEN9_FREQ_SCALER;
- rps->min_freq *= GEN9_FREQ_SCALER;
- rps->max_freq *= GEN9_FREQ_SCALER;
- rps->efficient_freq *= GEN9_FREQ_SCALER;
- }
-}
-
-static void reset_rps(struct drm_i915_private *dev_priv,
- int (*set)(struct drm_i915_private *, u8))
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- u8 freq = rps->cur_freq;
-
- /* force a reset */
- rps->power.mode = -1;
- rps->cur_freq = -1;
-
- if (set(dev_priv, freq))
- DRM_ERROR("Failed to reset RPS to initial values\n");
-}
-
-/* See the Gen9_GT_PM_Programming_Guide doc for the below */
-static void gen9_enable_rps(struct drm_i915_private *dev_priv)
-{
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
- /* Program defaults and thresholds for RPS */
- if (IS_GEN(dev_priv, 9))
- I915_WRITE(GEN6_RC_VIDEO_FREQ,
- GEN9_FREQUENCY(dev_priv->gt_pm.rps.rp1_freq));
-
- /* 1 second timeout*/
- I915_WRITE(GEN6_RP_DOWN_TIMEOUT,
- GT_INTERVAL_FROM_US(dev_priv, 1000000));
-
- I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 0xa);
-
- /* Leaning on the below call to gen6_set_rps to program/setup the
- * Up/Down EI & threshold registers, as well as the RP_CONTROL,
- * RP_INTERRUPT_LIMITS & RPNSWREQ registers */
- reset_rps(dev_priv, gen6_set_rps);
-
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-}
-
-static void gen11_enable_rc6(struct drm_i915_private *dev_priv)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- /* 1a: Software RC state - RC0 */
- I915_WRITE(GEN6_RC_STATE, 0);
-
- /*
- * 1b: Get forcewake during program sequence. Although the driver
- * hasn't enabled a state yet where we need forcewake, BIOS may have.
- */
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
- /* 2a: Disable RC states. */
- I915_WRITE(GEN6_RC_CONTROL, 0);
-
- /* 2b: Program RC6 thresholds.*/
- I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85);
- I915_WRITE(GEN10_MEDIA_WAKE_RATE_LIMIT, 150);
-
- I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
- I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
- for_each_engine(engine, dev_priv, id)
- I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
-
- if (HAS_GUC(dev_priv))
- I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
-
- I915_WRITE(GEN6_RC_SLEEP, 0);
-
- I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
-
- /*
- * 2c: Program Coarse Power Gating Policies.
- *
- * Bspec's guidance is to use 25us (really 25 * 1280ns) here. What we
- * use instead is a more conservative estimate for the maximum time
- * it takes us to service a CS interrupt and submit a new ELSP - that
- * is the time which the GPU is idle waiting for the CPU to select the
- * next request to execute. If the idle hysteresis is less than that
- * interrupt service latency, the hardware will automatically gate
- * the power well and we will then incur the wake up cost on top of
- * the service latency. A similar guide from intel_pstate is that we
- * do not want the enable hysteresis to less than the wakeup latency.
- *
- * igt/gem_exec_nop/sequential provides a rough estimate for the
- * service latency, and puts it around 10us for Broadwell (and other
- * big core) and around 40us for Broxton (and other low power cores).
- * [Note that for legacy ringbuffer submission, this is less than 1us!]
- * However, the wakeup latency on Broxton is closer to 100us. To be
- * conservative, we have to factor in a context switch on top (due
- * to ksoftirqd).
- */
- I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 250);
- I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 250);
-
- /* 3a: Enable RC6 */
- I915_WRITE(GEN6_RC_CONTROL,
- GEN6_RC_CTL_HW_ENABLE |
- GEN6_RC_CTL_RC6_ENABLE |
- GEN6_RC_CTL_EI_MODE(1));
-
- /* 3b: Enable Coarse Power Gating only when RC6 is enabled. */
- I915_WRITE(GEN9_PG_ENABLE,
- GEN9_RENDER_PG_ENABLE |
- GEN9_MEDIA_PG_ENABLE |
- GEN11_MEDIA_SAMPLER_PG_ENABLE);
-
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-}
-
-static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- u32 rc6_mode;
-
- /* 1a: Software RC state - RC0 */
- I915_WRITE(GEN6_RC_STATE, 0);
-
- /* 1b: Get forcewake during program sequence. Although the driver
- * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
- /* 2a: Disable RC states. */
- I915_WRITE(GEN6_RC_CONTROL, 0);
-
- /* 2b: Program RC6 thresholds.*/
- if (INTEL_GEN(dev_priv) >= 10) {
- I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85);
- I915_WRITE(GEN10_MEDIA_WAKE_RATE_LIMIT, 150);
- } else if (IS_SKYLAKE(dev_priv)) {
- /*
- * WaRsDoubleRc6WrlWithCoarsePowerGating:skl Doubling WRL only
- * when CPG is enabled
- */
- I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16);
- } else {
- I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
- }
-
- I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
- I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
- for_each_engine(engine, dev_priv, id)
- I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
-
- if (HAS_GUC(dev_priv))
- I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
-
- I915_WRITE(GEN6_RC_SLEEP, 0);
-
- /*
- * 2c: Program Coarse Power Gating Policies.
- *
- * Bspec's guidance is to use 25us (really 25 * 1280ns) here. What we
- * use instead is a more conservative estimate for the maximum time
- * it takes us to service a CS interrupt and submit a new ELSP - that
- * is the time which the GPU is idle waiting for the CPU to select the
- * next request to execute. If the idle hysteresis is less than that
- * interrupt service latency, the hardware will automatically gate
- * the power well and we will then incur the wake up cost on top of
- * the service latency. A similar guide from intel_pstate is that we
- * do not want the enable hysteresis to less than the wakeup latency.
- *
- * igt/gem_exec_nop/sequential provides a rough estimate for the
- * service latency, and puts it around 10us for Broadwell (and other
- * big core) and around 40us for Broxton (and other low power cores).
- * [Note that for legacy ringbuffer submission, this is less than 1us!]
- * However, the wakeup latency on Broxton is closer to 100us. To be
- * conservative, we have to factor in a context switch on top (due
- * to ksoftirqd).
- */
- I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 250);
- I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 250);
-
- /* 3a: Enable RC6 */
- I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
-
- /* WaRsUseTimeoutMode:cnl (pre-prod) */
- if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_C0))
- rc6_mode = GEN7_RC_CTL_TO_MODE;
- else
- rc6_mode = GEN6_RC_CTL_EI_MODE(1);
-
- I915_WRITE(GEN6_RC_CONTROL,
- GEN6_RC_CTL_HW_ENABLE |
- GEN6_RC_CTL_RC6_ENABLE |
- rc6_mode);
-
- /*
- * 3b: Enable Coarse Power Gating only when RC6 is enabled.
- * WaRsDisableCoarsePowerGating:skl,cnl - Render/Media PG need to be disabled with RC6.
- */
- if (NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
- I915_WRITE(GEN9_PG_ENABLE, 0);
- else
- I915_WRITE(GEN9_PG_ENABLE,
- GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE);
-
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-}
-
-static void gen8_enable_rc6(struct drm_i915_private *dev_priv)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- /* 1a: Software RC state - RC0 */
- I915_WRITE(GEN6_RC_STATE, 0);
-
- /* 1b: Get forcewake during program sequence. Although the driver
- * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
- /* 2a: Disable RC states. */
- I915_WRITE(GEN6_RC_CONTROL, 0);
-
- /* 2b: Program RC6 thresholds.*/
- I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
- I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
- I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
- for_each_engine(engine, dev_priv, id)
- I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
- I915_WRITE(GEN6_RC_SLEEP, 0);
- I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
-
- /* 3: Enable RC6 */
-
- I915_WRITE(GEN6_RC_CONTROL,
- GEN6_RC_CTL_HW_ENABLE |
- GEN7_RC_CTL_TO_MODE |
- GEN6_RC_CTL_RC6_ENABLE);
-
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-}
-
-static void gen8_enable_rps(struct drm_i915_private *dev_priv)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
- /* 1 Program defaults and thresholds for RPS*/
- I915_WRITE(GEN6_RPNSWREQ,
- HSW_FREQUENCY(rps->rp1_freq));
- I915_WRITE(GEN6_RC_VIDEO_FREQ,
- HSW_FREQUENCY(rps->rp1_freq));
- /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
- I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
-
- /* Docs recommend 900MHz, and 300 MHz respectively */
- I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
- rps->max_freq_softlimit << 24 |
- rps->min_freq_softlimit << 16);
-
- I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
- I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
- I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */
- I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */
-
- I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
-
- /* 2: Enable RPS */
- I915_WRITE(GEN6_RP_CONTROL,
- GEN6_RP_MEDIA_TURBO |
- GEN6_RP_MEDIA_HW_NORMAL_MODE |
- GEN6_RP_MEDIA_IS_GFX |
- GEN6_RP_ENABLE |
- GEN6_RP_UP_BUSY_AVG |
- GEN6_RP_DOWN_IDLE_AVG);
-
- reset_rps(dev_priv, gen6_set_rps);
-
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-}
-
-static void gen6_enable_rc6(struct drm_i915_private *dev_priv)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- u32 rc6vids, rc6_mask;
- u32 gtfifodbg;
- int ret;
-
- I915_WRITE(GEN6_RC_STATE, 0);
-
- /* Clear the DBG now so we don't confuse earlier errors */
- gtfifodbg = I915_READ(GTFIFODBG);
- if (gtfifodbg) {
- DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
- I915_WRITE(GTFIFODBG, gtfifodbg);
- }
-
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
- /* disable the counters and set deterministic thresholds */
- I915_WRITE(GEN6_RC_CONTROL, 0);
-
- I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
- I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
- I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
- I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
- I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
-
- for_each_engine(engine, dev_priv, id)
- I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
-
- I915_WRITE(GEN6_RC_SLEEP, 0);
- I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
- if (IS_IVYBRIDGE(dev_priv))
- I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
- else
- I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
- I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
- I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
-
- /* We don't use those on Haswell */
- rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
- if (HAS_RC6p(dev_priv))
- rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
- if (HAS_RC6pp(dev_priv))
- rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
- I915_WRITE(GEN6_RC_CONTROL,
- rc6_mask |
- GEN6_RC_CTL_EI_MODE(1) |
- GEN6_RC_CTL_HW_ENABLE);
-
- rc6vids = 0;
- ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
- &rc6vids, NULL);
- if (IS_GEN(dev_priv, 6) && ret) {
- DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
- } else if (IS_GEN(dev_priv, 6) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
- DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
- GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
- rc6vids &= 0xffff00;
- rc6vids |= GEN6_ENCODE_RC6_VID(450);
- ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
- if (ret)
- DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
- }
-
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-}
-
-static void gen6_enable_rps(struct drm_i915_private *dev_priv)
-{
- /* Here begins a magic sequence of register writes to enable
- * auto-downclocking.
- *
- * Perhaps there might be some value in exposing these to
- * userspace...
- */
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
- /* Power down if completely idle for over 50ms */
- I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
- I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
-
- reset_rps(dev_priv, gen6_set_rps);
-
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-}
-
-static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- const int min_freq = 15;
- const int scaling_factor = 180;
- unsigned int gpu_freq;
- unsigned int max_ia_freq, min_ring_freq;
- unsigned int max_gpu_freq, min_gpu_freq;
- struct cpufreq_policy *policy;
-
- lockdep_assert_held(&rps->lock);
-
- if (rps->max_freq <= rps->min_freq)
- return;
-
- policy = cpufreq_cpu_get(0);
- if (policy) {
- max_ia_freq = policy->cpuinfo.max_freq;
- cpufreq_cpu_put(policy);
- } else {
- /*
- * Default to measured freq if none found, PCU will ensure we
- * don't go over
- */
- max_ia_freq = tsc_khz;
- }
-
- /* Convert from kHz to MHz */
- max_ia_freq /= 1000;
-
- min_ring_freq = I915_READ(DCLK) & 0xf;
- /* convert DDR frequency from units of 266.6MHz to bandwidth */
- min_ring_freq = mult_frac(min_ring_freq, 8, 3);
-
- min_gpu_freq = rps->min_freq;
- max_gpu_freq = rps->max_freq;
- if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
- /* Convert GT frequency to 50 HZ units */
- min_gpu_freq /= GEN9_FREQ_SCALER;
- max_gpu_freq /= GEN9_FREQ_SCALER;
- }
-
- /*
- * For each potential GPU frequency, load a ring frequency we'd like
- * to use for memory access. We do this by specifying the IA frequency
- * the PCU should use as a reference to determine the ring frequency.
- */
- for (gpu_freq = max_gpu_freq; gpu_freq >= min_gpu_freq; gpu_freq--) {
- const int diff = max_gpu_freq - gpu_freq;
- unsigned int ia_freq = 0, ring_freq = 0;
-
- if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
- /*
- * ring_freq = 2 * GT. ring_freq is in 100MHz units
- * No floor required for ring frequency on SKL.
- */
- ring_freq = gpu_freq;
- } else if (INTEL_GEN(dev_priv) >= 8) {
- /* max(2 * GT, DDR). NB: GT is 50MHz units */
- ring_freq = max(min_ring_freq, gpu_freq);
- } else if (IS_HASWELL(dev_priv)) {
- ring_freq = mult_frac(gpu_freq, 5, 4);
- ring_freq = max(min_ring_freq, ring_freq);
- /* leave ia_freq as the default, chosen by cpufreq */
- } else {
- /* On older processors, there is no separate ring
- * clock domain, so in order to boost the bandwidth
- * of the ring, we need to upclock the CPU (ia_freq).
- *
- * For GPU frequencies less than 750MHz,
- * just use the lowest ring freq.
- */
- if (gpu_freq < min_freq)
- ia_freq = 800;
- else
- ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
- ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
- }
-
- sandybridge_pcode_write(dev_priv,
- GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
- ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
- ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
- gpu_freq);
- }
-}
-
-static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
-{
- u32 val, rp0;
-
- val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
-
- switch (RUNTIME_INFO(dev_priv)->sseu.eu_total) {
- case 8:
- /* (2 * 4) config */
- rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT);
- break;
- case 12:
- /* (2 * 6) config */
- rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT);
- break;
- case 16:
- /* (2 * 8) config */
- default:
- /* Setting (2 * 8) Min RP0 for any other combination */
- rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT);
- break;
- }
-
- rp0 = (rp0 & FB_GFX_FREQ_FUSE_MASK);
-
- return rp0;
-}
-
-static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv)
-{
- u32 val, rpe;
-
- val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG);
- rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
-
- return rpe;
-}
-
-static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv)
-{
- u32 val, rp1;
-
- val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
- rp1 = (val & FB_GFX_FREQ_FUSE_MASK);
-
- return rp1;
-}
-
-static u32 cherryview_rps_min_freq(struct drm_i915_private *dev_priv)
-{
- u32 val, rpn;
-
- val = vlv_punit_read(dev_priv, FB_GFX_FMIN_AT_VMIN_FUSE);
- rpn = ((val >> FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT) &
- FB_GFX_FREQ_FUSE_MASK);
-
- return rpn;
-}
-
-static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv)
-{
- u32 val, rp1;
-
- val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
-
- rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
-
- return rp1;
-}
-
-static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
-{
- u32 val, rp0;
-
- val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
-
- rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
- /* Clamp to max */
- rp0 = min_t(u32, rp0, 0xea);
-
- return rp0;
-}
-
-static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
-{
- u32 val, rpe;
-
- val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
- rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
- val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
- rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
-
- return rpe;
-}
-
-static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
-{
- u32 val;
-
- val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
- /*
- * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value
- * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on
- * a BYT-M B0 the above register contains 0xbf. Moreover when setting
- * a frequency Punit will not allow values below 0xc0. Clamp it 0xc0
- * to make sure it matches what Punit accepts.
- */
- return max_t(u32, val, 0xc0);
-}
-
-/* Check that the pctx buffer wasn't move under us. */
-static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
-{
- unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
-
- WARN_ON(pctx_addr != dev_priv->dsm.start +
- dev_priv->vlv_pctx->stolen->start);
-}
-
-
-/* Check that the pcbr address is not empty. */
-static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
-{
- unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
-
- WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
-}
-
-static void cherryview_setup_pctx(struct drm_i915_private *dev_priv)
-{
- resource_size_t pctx_paddr, paddr;
- resource_size_t pctx_size = 32*1024;
- u32 pcbr;
-
- pcbr = I915_READ(VLV_PCBR);
- if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
- DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
- paddr = dev_priv->dsm.end + 1 - pctx_size;
- GEM_BUG_ON(paddr > U32_MAX);
-
- pctx_paddr = (paddr & (~4095));
- I915_WRITE(VLV_PCBR, pctx_paddr);
- }
-
- DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
-}
-
-static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
-{
- struct drm_i915_gem_object *pctx;
- resource_size_t pctx_paddr;
- resource_size_t pctx_size = 24*1024;
- u32 pcbr;
-
- pcbr = I915_READ(VLV_PCBR);
- if (pcbr) {
- /* BIOS set it up already, grab the pre-alloc'd space */
- resource_size_t pcbr_offset;
-
- pcbr_offset = (pcbr & (~4095)) - dev_priv->dsm.start;
- pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv,
- pcbr_offset,
- I915_GTT_OFFSET_NONE,
- pctx_size);
- goto out;
- }
-
- DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
-
- /*
- * From the Gunit register HAS:
- * The Gfx driver is expected to program this register and ensure
- * proper allocation within Gfx stolen memory. For example, this
- * register should be programmed such than the PCBR range does not
- * overlap with other ranges, such as the frame buffer, protected
- * memory, or any other relevant ranges.
- */
- pctx = i915_gem_object_create_stolen(dev_priv, pctx_size);
- if (!pctx) {
- DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
- goto out;
- }
-
- GEM_BUG_ON(range_overflows_t(u64,
- dev_priv->dsm.start,
- pctx->stolen->start,
- U32_MAX));
- pctx_paddr = dev_priv->dsm.start + pctx->stolen->start;
- I915_WRITE(VLV_PCBR, pctx_paddr);
-
-out:
- DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
- dev_priv->vlv_pctx = pctx;
-}
-
-static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv)
-{
- struct drm_i915_gem_object *pctx;
-
- pctx = fetch_and_zero(&dev_priv->vlv_pctx);
- if (pctx)
- i915_gem_object_put(pctx);
-}
-
-static void vlv_init_gpll_ref_freq(struct drm_i915_private *dev_priv)
-{
- dev_priv->gt_pm.rps.gpll_ref_freq =
- vlv_get_cck_clock(dev_priv, "GPLL ref",
- CCK_GPLL_CLOCK_CONTROL,
- dev_priv->czclk_freq);
-
- DRM_DEBUG_DRIVER("GPLL reference freq: %d kHz\n",
- dev_priv->gt_pm.rps.gpll_ref_freq);
-}
-
-static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- u32 val;
-
- valleyview_setup_pctx(dev_priv);
-
- vlv_iosf_sb_get(dev_priv,
- BIT(VLV_IOSF_SB_PUNIT) |
- BIT(VLV_IOSF_SB_NC) |
- BIT(VLV_IOSF_SB_CCK));
-
- vlv_init_gpll_ref_freq(dev_priv);
-
- val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
- switch ((val >> 6) & 3) {
- case 0:
- case 1:
- dev_priv->mem_freq = 800;
- break;
- case 2:
- dev_priv->mem_freq = 1066;
- break;
- case 3:
- dev_priv->mem_freq = 1333;
- break;
- }
- DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
-
- rps->max_freq = valleyview_rps_max_freq(dev_priv);
- rps->rp0_freq = rps->max_freq;
- DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(dev_priv, rps->max_freq),
- rps->max_freq);
-
- rps->efficient_freq = valleyview_rps_rpe_freq(dev_priv);
- DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(dev_priv, rps->efficient_freq),
- rps->efficient_freq);
-
- rps->rp1_freq = valleyview_rps_guar_freq(dev_priv);
- DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(dev_priv, rps->rp1_freq),
- rps->rp1_freq);
-
- rps->min_freq = valleyview_rps_min_freq(dev_priv);
- DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(dev_priv, rps->min_freq),
- rps->min_freq);
-
- vlv_iosf_sb_put(dev_priv,
- BIT(VLV_IOSF_SB_PUNIT) |
- BIT(VLV_IOSF_SB_NC) |
- BIT(VLV_IOSF_SB_CCK));
-}
-
-static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- u32 val;
-
- cherryview_setup_pctx(dev_priv);
-
- vlv_iosf_sb_get(dev_priv,
- BIT(VLV_IOSF_SB_PUNIT) |
- BIT(VLV_IOSF_SB_NC) |
- BIT(VLV_IOSF_SB_CCK));
-
- vlv_init_gpll_ref_freq(dev_priv);
-
- val = vlv_cck_read(dev_priv, CCK_FUSE_REG);
-
- switch ((val >> 2) & 0x7) {
- case 3:
- dev_priv->mem_freq = 2000;
- break;
- default:
- dev_priv->mem_freq = 1600;
- break;
- }
- DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
-
- rps->max_freq = cherryview_rps_max_freq(dev_priv);
- rps->rp0_freq = rps->max_freq;
- DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(dev_priv, rps->max_freq),
- rps->max_freq);
-
- rps->efficient_freq = cherryview_rps_rpe_freq(dev_priv);
- DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(dev_priv, rps->efficient_freq),
- rps->efficient_freq);
-
- rps->rp1_freq = cherryview_rps_guar_freq(dev_priv);
- DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(dev_priv, rps->rp1_freq),
- rps->rp1_freq);
-
- rps->min_freq = cherryview_rps_min_freq(dev_priv);
- DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(dev_priv, rps->min_freq),
- rps->min_freq);
-
- vlv_iosf_sb_put(dev_priv,
- BIT(VLV_IOSF_SB_PUNIT) |
- BIT(VLV_IOSF_SB_NC) |
- BIT(VLV_IOSF_SB_CCK));
-
- WARN_ONCE((rps->max_freq | rps->efficient_freq | rps->rp1_freq |
- rps->min_freq) & 1,
- "Odd GPU freq values\n");
-}
-
-static void valleyview_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
-{
- valleyview_cleanup_pctx(dev_priv);
-}
-
-static void cherryview_enable_rc6(struct drm_i915_private *dev_priv)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- u32 gtfifodbg, rc6_mode, pcbr;
-
- gtfifodbg = I915_READ(GTFIFODBG) & ~(GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV |
- GT_FIFO_FREE_ENTRIES_CHV);
- if (gtfifodbg) {
- DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
- gtfifodbg);
- I915_WRITE(GTFIFODBG, gtfifodbg);
- }
-
- cherryview_check_pctx(dev_priv);
-
- /* 1a & 1b: Get forcewake during program sequence. Although the driver
- * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
- /* Disable RC states. */
- I915_WRITE(GEN6_RC_CONTROL, 0);
-
- /* 2a: Program RC6 thresholds.*/
- I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
- I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
- I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
-
- for_each_engine(engine, dev_priv, id)
- I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
- I915_WRITE(GEN6_RC_SLEEP, 0);
-
- /* TO threshold set to 500 us ( 0x186 * 1.28 us) */
- I915_WRITE(GEN6_RC6_THRESHOLD, 0x186);
-
- /* Allows RC6 residency counter to work */
- I915_WRITE(VLV_COUNTER_CONTROL,
- _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
- VLV_MEDIA_RC6_COUNT_EN |
- VLV_RENDER_RC6_COUNT_EN));
-
- /* For now we assume BIOS is allocating and populating the PCBR */
- pcbr = I915_READ(VLV_PCBR);
-
- /* 3: Enable RC6 */
- rc6_mode = 0;
- if (pcbr >> VLV_PCBR_ADDR_SHIFT)
- rc6_mode = GEN7_RC_CTL_TO_MODE;
- I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
-
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-}
-
-static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
-{
- u32 val;
-
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
- /* 1: Program defaults and thresholds for RPS*/
- I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
- I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
- I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
- I915_WRITE(GEN6_RP_UP_EI, 66000);
- I915_WRITE(GEN6_RP_DOWN_EI, 350000);
-
- I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
-
- /* 2: Enable RPS */
- I915_WRITE(GEN6_RP_CONTROL,
- GEN6_RP_MEDIA_HW_NORMAL_MODE |
- GEN6_RP_MEDIA_IS_GFX |
- GEN6_RP_ENABLE |
- GEN6_RP_UP_BUSY_AVG |
- GEN6_RP_DOWN_IDLE_AVG);
-
- /* Setting Fixed Bias */
- vlv_punit_get(dev_priv);
-
- val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | CHV_BIAS_CPU_50_SOC_50;
- vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
-
- val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
-
- vlv_punit_put(dev_priv);
-
- /* RPS code assumes GPLL is used */
- WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
-
- DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
- DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
-
- reset_rps(dev_priv, valleyview_set_rps);
-
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-}
-
-static void valleyview_enable_rc6(struct drm_i915_private *dev_priv)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- u32 gtfifodbg;
-
- valleyview_check_pctx(dev_priv);
-
- gtfifodbg = I915_READ(GTFIFODBG);
- if (gtfifodbg) {
- DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
- gtfifodbg);
- I915_WRITE(GTFIFODBG, gtfifodbg);
- }
-
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
- /* Disable RC states. */
- I915_WRITE(GEN6_RC_CONTROL, 0);
-
- I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
- I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
- I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
-
- for_each_engine(engine, dev_priv, id)
- I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
-
- I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
-
- /* Allows RC6 residency counter to work */
- I915_WRITE(VLV_COUNTER_CONTROL,
- _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
- VLV_MEDIA_RC0_COUNT_EN |
- VLV_RENDER_RC0_COUNT_EN |
- VLV_MEDIA_RC6_COUNT_EN |
- VLV_RENDER_RC6_COUNT_EN));
-
- I915_WRITE(GEN6_RC_CONTROL,
- GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL);
-
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-}
-
-static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
-{
- u32 val;
-
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
- I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
- I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
- I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
- I915_WRITE(GEN6_RP_UP_EI, 66000);
- I915_WRITE(GEN6_RP_DOWN_EI, 350000);
-
- I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
-
- I915_WRITE(GEN6_RP_CONTROL,
- GEN6_RP_MEDIA_TURBO |
- GEN6_RP_MEDIA_HW_NORMAL_MODE |
- GEN6_RP_MEDIA_IS_GFX |
- GEN6_RP_ENABLE |
- GEN6_RP_UP_BUSY_AVG |
- GEN6_RP_DOWN_IDLE_CONT);
-
- vlv_punit_get(dev_priv);
-
- /* Setting Fixed Bias */
- val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | VLV_BIAS_CPU_125_SOC_875;
- vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
-
- val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
-
- vlv_punit_put(dev_priv);
-
- /* RPS code assumes GPLL is used */
- WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
-
- DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
- DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
-
- reset_rps(dev_priv, valleyview_set_rps);
-
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-}
-
-static unsigned long intel_pxfreq(u32 vidfreq)
-{
- unsigned long freq;
- int div = (vidfreq & 0x3f0000) >> 16;
- int post = (vidfreq & 0x3000) >> 12;
- int pre = (vidfreq & 0x7);
-
- if (!pre)
- return 0;
-
- freq = ((div * 133333) / ((1<<post) * pre));
-
- return freq;
-}
-
-static const struct cparams {
- u16 i;
- u16 t;
- u16 m;
- u16 c;
-} cparams[] = {
- { 1, 1333, 301, 28664 },
- { 1, 1066, 294, 24460 },
- { 1, 800, 294, 25192 },
- { 0, 1333, 276, 27605 },
- { 0, 1066, 276, 27605 },
- { 0, 800, 231, 23784 },
-};
-
-static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
-{
- u64 total_count, diff, ret;
- u32 count1, count2, count3, m = 0, c = 0;
- unsigned long now = jiffies_to_msecs(jiffies), diff1;
- int i;
-
- lockdep_assert_held(&mchdev_lock);
-
- diff1 = now - dev_priv->ips.last_time1;
-
- /* Prevent division-by-zero if we are asking too fast.
- * Also, we don't get interesting results if we are polling
- * faster than once in 10ms, so just return the saved value
- * in such cases.
- */
- if (diff1 <= 10)
- return dev_priv->ips.chipset_power;
-
- count1 = I915_READ(DMIEC);
- count2 = I915_READ(DDREC);
- count3 = I915_READ(CSIEC);
-
- total_count = count1 + count2 + count3;
-
- /* FIXME: handle per-counter overflow */
- if (total_count < dev_priv->ips.last_count1) {
- diff = ~0UL - dev_priv->ips.last_count1;
- diff += total_count;
- } else {
- diff = total_count - dev_priv->ips.last_count1;
- }
-
- for (i = 0; i < ARRAY_SIZE(cparams); i++) {
- if (cparams[i].i == dev_priv->ips.c_m &&
- cparams[i].t == dev_priv->ips.r_t) {
- m = cparams[i].m;
- c = cparams[i].c;
- break;
- }
- }
-
- diff = div_u64(diff, diff1);
- ret = ((m * diff) + c);
- ret = div_u64(ret, 10);
-
- dev_priv->ips.last_count1 = total_count;
- dev_priv->ips.last_time1 = now;
-
- dev_priv->ips.chipset_power = ret;
-
- return ret;
-}
-
-unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
-{
- intel_wakeref_t wakeref;
- unsigned long val = 0;
-
- if (!IS_GEN(dev_priv, 5))
- return 0;
-
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
- spin_lock_irq(&mchdev_lock);
- val = __i915_chipset_val(dev_priv);
- spin_unlock_irq(&mchdev_lock);
- }
-
- return val;
-}
-
-unsigned long i915_mch_val(struct drm_i915_private *i915)
-{
- unsigned long m, x, b;
- u32 tsfs;
-
- tsfs = intel_uncore_read(&i915->uncore, TSFS);
-
- m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
- x = intel_uncore_read8(&i915->uncore, TR1);
-
- b = tsfs & TSFS_INTR_MASK;
-
- return ((m * x) / 127) - b;
-}
-
-static int _pxvid_to_vd(u8 pxvid)
-{
- if (pxvid == 0)
- return 0;
-
- if (pxvid >= 8 && pxvid < 31)
- pxvid = 31;
-
- return (pxvid + 2) * 125;
-}
-
-static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
-{
- const int vd = _pxvid_to_vd(pxvid);
- const int vm = vd - 1125;
-
- if (INTEL_INFO(dev_priv)->is_mobile)
- return vm > 0 ? vm : 0;
-
- return vd;
-}
-
-static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
-{
- u64 now, diff, diffms;
- u32 count;
-
- lockdep_assert_held(&mchdev_lock);
-
- now = ktime_get_raw_ns();
- diffms = now - dev_priv->ips.last_time2;
- do_div(diffms, NSEC_PER_MSEC);
-
- /* Don't divide by 0 */
- if (!diffms)
- return;
-
- count = I915_READ(GFXEC);
-
- if (count < dev_priv->ips.last_count2) {
- diff = ~0UL - dev_priv->ips.last_count2;
- diff += count;
- } else {
- diff = count - dev_priv->ips.last_count2;
- }
-
- dev_priv->ips.last_count2 = count;
- dev_priv->ips.last_time2 = now;
-
- /* More magic constants... */
- diff = diff * 1181;
- diff = div_u64(diff, diffms * 10);
- dev_priv->ips.gfx_power = diff;
-}
-
-void i915_update_gfx_val(struct drm_i915_private *dev_priv)
-{
- intel_wakeref_t wakeref;
-
- if (!IS_GEN(dev_priv, 5))
- return;
-
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
- spin_lock_irq(&mchdev_lock);
- __i915_update_gfx_val(dev_priv);
- spin_unlock_irq(&mchdev_lock);
- }
-}
-
-static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
-{
- unsigned long t, corr, state1, corr2, state2;
- u32 pxvid, ext_v;
-
- lockdep_assert_held(&mchdev_lock);
-
- pxvid = I915_READ(PXVFREQ(dev_priv->gt_pm.rps.cur_freq));
- pxvid = (pxvid >> 24) & 0x7f;
- ext_v = pvid_to_extvid(dev_priv, pxvid);
-
- state1 = ext_v;
-
- t = i915_mch_val(dev_priv);
-
- /* Revel in the empirically derived constants */
-
- /* Correction factor in 1/100000 units */
- if (t > 80)
- corr = ((t * 2349) + 135940);
- else if (t >= 50)
- corr = ((t * 964) + 29317);
- else /* < 50 */
- corr = ((t * 301) + 1004);
-
- corr = corr * ((150142 * state1) / 10000 - 78642);
- corr /= 100000;
- corr2 = (corr * dev_priv->ips.corr);
-
- state2 = (corr2 * state1) / 10000;
- state2 /= 100; /* convert to mW */
-
- __i915_update_gfx_val(dev_priv);
-
- return dev_priv->ips.gfx_power + state2;
-}
-
-unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
-{
- intel_wakeref_t wakeref;
- unsigned long val = 0;
-
- if (!IS_GEN(dev_priv, 5))
- return 0;
-
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
- spin_lock_irq(&mchdev_lock);
- val = __i915_gfx_val(dev_priv);
- spin_unlock_irq(&mchdev_lock);
- }
-
- return val;
-}
-
-static struct drm_i915_private __rcu *i915_mch_dev;
-
-static struct drm_i915_private *mchdev_get(void)
-{
- struct drm_i915_private *i915;
-
- rcu_read_lock();
- i915 = rcu_dereference(i915_mch_dev);
- if (!kref_get_unless_zero(&i915->drm.ref))
- i915 = NULL;
- rcu_read_unlock();
-
- return i915;
-}
-
-/**
- * i915_read_mch_val - return value for IPS use
- *
- * Calculate and return a value for the IPS driver to use when deciding whether
- * we have thermal and power headroom to increase CPU or GPU power budget.
- */
-unsigned long i915_read_mch_val(void)
-{
- struct drm_i915_private *i915;
- unsigned long chipset_val = 0;
- unsigned long graphics_val = 0;
- intel_wakeref_t wakeref;
-
- i915 = mchdev_get();
- if (!i915)
- return 0;
-
- with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
- spin_lock_irq(&mchdev_lock);
- chipset_val = __i915_chipset_val(i915);
- graphics_val = __i915_gfx_val(i915);
- spin_unlock_irq(&mchdev_lock);
- }
-
- drm_dev_put(&i915->drm);
- return chipset_val + graphics_val;
-}
-EXPORT_SYMBOL_GPL(i915_read_mch_val);
-
-/**
- * i915_gpu_raise - raise GPU frequency limit
- *
- * Raise the limit; IPS indicates we have thermal headroom.
- */
-bool i915_gpu_raise(void)
-{
- struct drm_i915_private *i915;
-
- i915 = mchdev_get();
- if (!i915)
- return false;
-
- spin_lock_irq(&mchdev_lock);
- if (i915->ips.max_delay > i915->ips.fmax)
- i915->ips.max_delay--;
- spin_unlock_irq(&mchdev_lock);
-
- drm_dev_put(&i915->drm);
- return true;
-}
-EXPORT_SYMBOL_GPL(i915_gpu_raise);
-
-/**
- * i915_gpu_lower - lower GPU frequency limit
- *
- * IPS indicates we're close to a thermal limit, so throttle back the GPU
- * frequency maximum.
- */
-bool i915_gpu_lower(void)
-{
- struct drm_i915_private *i915;
-
- i915 = mchdev_get();
- if (!i915)
- return false;
-
- spin_lock_irq(&mchdev_lock);
- if (i915->ips.max_delay < i915->ips.min_delay)
- i915->ips.max_delay++;
- spin_unlock_irq(&mchdev_lock);
-
- drm_dev_put(&i915->drm);
- return true;
-}
-EXPORT_SYMBOL_GPL(i915_gpu_lower);
-
-/**
- * i915_gpu_busy - indicate GPU business to IPS
- *
- * Tell the IPS driver whether or not the GPU is busy.
- */
-bool i915_gpu_busy(void)
-{
- struct drm_i915_private *i915;
- bool ret;
-
- i915 = mchdev_get();
- if (!i915)
- return false;
-
- ret = i915->gt.awake;
-
- drm_dev_put(&i915->drm);
- return ret;
-}
-EXPORT_SYMBOL_GPL(i915_gpu_busy);
-
-/**
- * i915_gpu_turbo_disable - disable graphics turbo
- *
- * Disable graphics turbo by resetting the max frequency and setting the
- * current frequency to the default.
- */
-bool i915_gpu_turbo_disable(void)
-{
- struct drm_i915_private *i915;
- bool ret;
-
- i915 = mchdev_get();
- if (!i915)
- return false;
-
- spin_lock_irq(&mchdev_lock);
- i915->ips.max_delay = i915->ips.fstart;
- ret = ironlake_set_drps(i915, i915->ips.fstart);
- spin_unlock_irq(&mchdev_lock);
-
- drm_dev_put(&i915->drm);
- return ret;
-}
-EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
-
-/**
- * Tells the intel_ips driver that the i915 driver is now loaded, if
- * IPS got loaded first.
- *
- * This awkward dance is so that neither module has to depend on the
- * other in order for IPS to do the appropriate communication of
- * GPU turbo limits to i915.
- */
-static void
-ips_ping_for_i915_load(void)
-{
- void (*link)(void);
-
- link = symbol_get(ips_link_to_i915_driver);
- if (link) {
- link();
- symbol_put(ips_link_to_i915_driver);
- }
-}
-
-void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
-{
- /* We only register the i915 ips part with intel-ips once everything is
- * set up, to avoid intel-ips sneaking in and reading bogus values. */
- rcu_assign_pointer(i915_mch_dev, dev_priv);
-
- ips_ping_for_i915_load();
-}
-
-void intel_gpu_ips_teardown(void)
-{
- rcu_assign_pointer(i915_mch_dev, NULL);
-}
-
-static void intel_init_emon(struct drm_i915_private *dev_priv)
-{
- u32 lcfuse;
- u8 pxw[16];
- int i;
-
- /* Disable to program */
- I915_WRITE(ECR, 0);
- POSTING_READ(ECR);
-
- /* Program energy weights for various events */
- I915_WRITE(SDEW, 0x15040d00);
- I915_WRITE(CSIEW0, 0x007f0000);
- I915_WRITE(CSIEW1, 0x1e220004);
- I915_WRITE(CSIEW2, 0x04000004);
-
- for (i = 0; i < 5; i++)
- I915_WRITE(PEW(i), 0);
- for (i = 0; i < 3; i++)
- I915_WRITE(DEW(i), 0);
-
- /* Program P-state weights to account for frequency power adjustment */
- for (i = 0; i < 16; i++) {
- u32 pxvidfreq = I915_READ(PXVFREQ(i));
- unsigned long freq = intel_pxfreq(pxvidfreq);
- unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
- PXVFREQ_PX_SHIFT;
- unsigned long val;
-
- val = vid * vid;
- val *= (freq / 1000);
- val *= 255;
- val /= (127*127*900);
- if (val > 0xff)
- DRM_ERROR("bad pxval: %ld\n", val);
- pxw[i] = val;
- }
- /* Render standby states get 0 weight */
- pxw[14] = 0;
- pxw[15] = 0;
-
- for (i = 0; i < 4; i++) {
- u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
- (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
- I915_WRITE(PXW(i), val);
- }
-
- /* Adjust magic regs to magic values (more experimental results) */
- I915_WRITE(OGW0, 0);
- I915_WRITE(OGW1, 0);
- I915_WRITE(EG0, 0x00007f00);
- I915_WRITE(EG1, 0x0000000e);
- I915_WRITE(EG2, 0x000e0000);
- I915_WRITE(EG3, 0x68000300);
- I915_WRITE(EG4, 0x42000000);
- I915_WRITE(EG5, 0x00140031);
- I915_WRITE(EG6, 0);
- I915_WRITE(EG7, 0);
-
- for (i = 0; i < 8; i++)
- I915_WRITE(PXWL(i), 0);
-
- /* Enable PMON + select events */
- I915_WRITE(ECR, 0x80000019);
-
- lcfuse = I915_READ(LCFUSE02);
-
- dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
-}
-
-void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
- /*
- * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
- * requirement.
- */
- if (!sanitize_rc6(dev_priv)) {
- DRM_INFO("RC6 disabled, disabling runtime PM support\n");
- pm_runtime_get(&dev_priv->drm.pdev->dev);
- }
-
- /* Initialize RPS limits (for userspace) */
- if (IS_CHERRYVIEW(dev_priv))
- cherryview_init_gt_powersave(dev_priv);
- else if (IS_VALLEYVIEW(dev_priv))
- valleyview_init_gt_powersave(dev_priv);
- else if (INTEL_GEN(dev_priv) >= 6)
- gen6_init_rps_frequencies(dev_priv);
-
- /* Derive initial user preferences/limits from the hardware limits */
- rps->max_freq_softlimit = rps->max_freq;
- rps->min_freq_softlimit = rps->min_freq;
-
- /* After setting max-softlimit, find the overclock max freq */
- if (IS_GEN(dev_priv, 6) ||
- IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) {
- u32 params = 0;
-
- sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS,
- &params, NULL);
- if (params & BIT(31)) { /* OC supported */
- DRM_DEBUG_DRIVER("Overclocking supported, max: %dMHz, overclock: %dMHz\n",
- (rps->max_freq & 0xff) * 50,
- (params & 0xff) * 50);
- rps->max_freq = params & 0xff;
- }
- }
-
- /* Finally allow us to boost to max by default */
- rps->boost_freq = rps->max_freq;
- rps->idle_freq = rps->min_freq;
- rps->cur_freq = rps->idle_freq;
-}
-
-void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
-{
- if (IS_VALLEYVIEW(dev_priv))
- valleyview_cleanup_gt_powersave(dev_priv);
-
- if (!HAS_RC6(dev_priv))
- pm_runtime_put(&dev_priv->drm.pdev->dev);
-}
-
-void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv)
-{
- dev_priv->gt_pm.rps.enabled = true; /* force RPS disabling */
- dev_priv->gt_pm.rc6.enabled = true; /* force RC6 disabling */
- intel_disable_gt_powersave(dev_priv);
-
- if (INTEL_GEN(dev_priv) >= 11)
- gen11_reset_rps_interrupts(dev_priv);
- else if (INTEL_GEN(dev_priv) >= 6)
- gen6_reset_rps_interrupts(dev_priv);
-}
-
-static inline void intel_disable_llc_pstate(struct drm_i915_private *i915)
-{
- lockdep_assert_held(&i915->gt_pm.rps.lock);
-
- if (!i915->gt_pm.llc_pstate.enabled)
- return;
-
- /* Currently there is no HW configuration to be done to disable. */
-
- i915->gt_pm.llc_pstate.enabled = false;
-}
-
-static void intel_disable_rc6(struct drm_i915_private *dev_priv)
-{
- lockdep_assert_held(&dev_priv->gt_pm.rps.lock);
-
- if (!dev_priv->gt_pm.rc6.enabled)
- return;
-
- if (INTEL_GEN(dev_priv) >= 9)
- gen9_disable_rc6(dev_priv);
- else if (IS_CHERRYVIEW(dev_priv))
- cherryview_disable_rc6(dev_priv);
- else if (IS_VALLEYVIEW(dev_priv))
- valleyview_disable_rc6(dev_priv);
- else if (INTEL_GEN(dev_priv) >= 6)
- gen6_disable_rc6(dev_priv);
-
- dev_priv->gt_pm.rc6.enabled = false;
-}
-
-static void intel_disable_rps(struct drm_i915_private *dev_priv)
-{
- lockdep_assert_held(&dev_priv->gt_pm.rps.lock);
-
- if (!dev_priv->gt_pm.rps.enabled)
- return;
-
- if (INTEL_GEN(dev_priv) >= 9)
- gen9_disable_rps(dev_priv);
- else if (IS_CHERRYVIEW(dev_priv))
- cherryview_disable_rps(dev_priv);
- else if (IS_VALLEYVIEW(dev_priv))
- valleyview_disable_rps(dev_priv);
- else if (INTEL_GEN(dev_priv) >= 6)
- gen6_disable_rps(dev_priv);
- else if (IS_IRONLAKE_M(dev_priv))
- ironlake_disable_drps(dev_priv);
-
- dev_priv->gt_pm.rps.enabled = false;
-}
-
-void intel_disable_gt_powersave(struct drm_i915_private *dev_priv)
-{
- mutex_lock(&dev_priv->gt_pm.rps.lock);
-
- intel_disable_rc6(dev_priv);
- intel_disable_rps(dev_priv);
- if (HAS_LLC(dev_priv))
- intel_disable_llc_pstate(dev_priv);
-
- mutex_unlock(&dev_priv->gt_pm.rps.lock);
-}
-
-static inline void intel_enable_llc_pstate(struct drm_i915_private *i915)
-{
- lockdep_assert_held(&i915->gt_pm.rps.lock);
-
- if (i915->gt_pm.llc_pstate.enabled)
- return;
-
- gen6_update_ring_freq(i915);
-
- i915->gt_pm.llc_pstate.enabled = true;
-}
-
-static void intel_enable_rc6(struct drm_i915_private *dev_priv)
-{
- lockdep_assert_held(&dev_priv->gt_pm.rps.lock);
-
- if (dev_priv->gt_pm.rc6.enabled)
- return;
-
- if (IS_CHERRYVIEW(dev_priv))
- cherryview_enable_rc6(dev_priv);
- else if (IS_VALLEYVIEW(dev_priv))
- valleyview_enable_rc6(dev_priv);
- else if (INTEL_GEN(dev_priv) >= 11)
- gen11_enable_rc6(dev_priv);
- else if (INTEL_GEN(dev_priv) >= 9)
- gen9_enable_rc6(dev_priv);
- else if (IS_BROADWELL(dev_priv))
- gen8_enable_rc6(dev_priv);
- else if (INTEL_GEN(dev_priv) >= 6)
- gen6_enable_rc6(dev_priv);
-
- dev_priv->gt_pm.rc6.enabled = true;
-}
-
-static void intel_enable_rps(struct drm_i915_private *dev_priv)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
- lockdep_assert_held(&rps->lock);
-
- if (rps->enabled)
- return;
-
- if (IS_CHERRYVIEW(dev_priv)) {
- cherryview_enable_rps(dev_priv);
- } else if (IS_VALLEYVIEW(dev_priv)) {
- valleyview_enable_rps(dev_priv);
- } else if (INTEL_GEN(dev_priv) >= 9) {
- gen9_enable_rps(dev_priv);
- } else if (IS_BROADWELL(dev_priv)) {
- gen8_enable_rps(dev_priv);
- } else if (INTEL_GEN(dev_priv) >= 6) {
- gen6_enable_rps(dev_priv);
- } else if (IS_IRONLAKE_M(dev_priv)) {
- ironlake_enable_drps(dev_priv);
- intel_init_emon(dev_priv);
- }
-
- WARN_ON(rps->max_freq < rps->min_freq);
- WARN_ON(rps->idle_freq > rps->max_freq);
-
- WARN_ON(rps->efficient_freq < rps->min_freq);
- WARN_ON(rps->efficient_freq > rps->max_freq);
-
- rps->enabled = true;
-}
-
-void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
-{
- /* Powersaving is controlled by the host when inside a VM */
- if (intel_vgpu_active(dev_priv))
- return;
-
- mutex_lock(&dev_priv->gt_pm.rps.lock);
-
- if (HAS_RC6(dev_priv))
- intel_enable_rc6(dev_priv);
- if (HAS_RPS(dev_priv))
- intel_enable_rps(dev_priv);
- if (HAS_LLC(dev_priv))
- intel_enable_llc_pstate(dev_priv);
-
- mutex_unlock(&dev_priv->gt_pm.rps.lock);
-}
-
static void ibx_init_clock_gating(struct drm_i915_private *dev_priv)
{
/*
@@ -8870,7 +6414,7 @@ static void ilk_init_clock_gating(struct drm_i915_private *dev_priv)
static void cpt_init_clock_gating(struct drm_i915_private *dev_priv)
{
- int pipe;
+ enum pipe pipe;
u32 val;
/*
@@ -8892,7 +6436,6 @@ static void cpt_init_clock_gating(struct drm_i915_private *dev_priv)
val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
if (dev_priv->vbt.fdi_rx_polarity_inverted)
val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
- val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
I915_WRITE(TRANS_CHICKEN2(pipe), val);
@@ -8910,8 +6453,9 @@ static void gen6_check_mch_setup(struct drm_i915_private *dev_priv)
tmp = I915_READ(MCH_SSKPD);
if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
- DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
- tmp);
+ drm_dbg_kms(&dev_priv->drm,
+ "Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
+ tmp);
}
static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
@@ -9088,6 +6632,37 @@ static void icl_init_clock_gating(struct drm_i915_private *dev_priv)
/* WaEnable32PlaneMode:icl */
I915_WRITE(GEN9_CSFE_CHICKEN1_RCS,
_MASKED_BIT_ENABLE(GEN11_ENABLE_32_PLANE_MODE));
+
+ /*
+ * Wa_1408615072:icl,ehl (vsunit)
+ * Wa_1407596294:icl,ehl (hsunit)
+ */
+ intel_uncore_rmw(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE,
+ 0, VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS);
+
+ /* Wa_1407352427:icl,ehl */
+ intel_uncore_rmw(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE2,
+ 0, PSDUNIT_CLKGATE_DIS);
+}
+
+static void tgl_init_clock_gating(struct drm_i915_private *dev_priv)
+{
+ u32 vd_pg_enable = 0;
+ unsigned int i;
+
+ /* Wa_1408615072:tgl */
+ intel_uncore_rmw(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE2,
+ 0, VSUNIT_CLKGATE_DIS_TGL);
+
+ /* This is not a WA. Enable VD HCP & MFX_ENC powergate */
+ for (i = 0; i < I915_MAX_VCS; i++) {
+ if (HAS_ENGINE(dev_priv, _VCS(i)))
+ vd_pg_enable |= VDN_HCP_POWERGATE_ENABLE(i) |
+ VDN_MFX_POWERGATE_ENABLE(i);
+ }
+
+ I915_WRITE(POWERGATE_ENABLE,
+ I915_READ(POWERGATE_ENABLE) | vd_pg_enable);
}
static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
@@ -9181,9 +6756,6 @@ static void skl_init_clock_gating(struct drm_i915_private *dev_priv)
static void bdw_init_clock_gating(struct drm_i915_private *dev_priv)
{
- /* The GTT cache must be disabled if the system is using 2M pages. */
- bool can_use_gtt_cache = !HAS_PAGE_SIZES(dev_priv,
- I915_GTT_PAGE_SIZE_2M);
enum pipe pipe;
/* WaSwitchSolVfFArbitrationPriority:bdw */
@@ -9216,9 +6788,6 @@ static void bdw_init_clock_gating(struct drm_i915_private *dev_priv)
/* WaProgramL3SqcReg1Default:bdw */
gen8_set_l3sqc_credits(dev_priv, 30, 2);
- /* WaGttCachingOffByDefault:bdw */
- I915_WRITE(HSW_GTT_CACHE_EN, can_use_gtt_cache ? GTT_CACHE_EN_ALL : 0);
-
/* WaKVMNotificationOnConfigChange:bdw */
I915_WRITE(CHICKEN_PAR2_1, I915_READ(CHICKEN_PAR2_1)
| KVM_CONFIG_CHANGE_NOTIFICATION_SELECT);
@@ -9483,12 +7052,6 @@ static void chv_init_clock_gating(struct drm_i915_private *dev_priv)
* LSQC Setting Recommendations.
*/
gen8_set_l3sqc_credits(dev_priv, 38, 2);
-
- /*
- * GTT cache may not work with big pages, so if those
- * are ever enabled GTT cache may need to be disabled.
- */
- I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
}
static void g4x_init_clock_gating(struct drm_i915_private *dev_priv)
@@ -9607,7 +7170,8 @@ void intel_suspend_hw(struct drm_i915_private *dev_priv)
static void nop_init_clock_gating(struct drm_i915_private *dev_priv)
{
- DRM_DEBUG_KMS("No clock gating settings or workarounds applied.\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "No clock gating settings or workarounds applied.\n");
}
/**
@@ -9621,7 +7185,9 @@ static void nop_init_clock_gating(struct drm_i915_private *dev_priv)
*/
void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
{
- if (IS_GEN(dev_priv, 11))
+ if (IS_GEN(dev_priv, 12))
+ dev_priv->display.init_clock_gating = tgl_init_clock_gating;
+ else if (IS_GEN(dev_priv, 11))
dev_priv->display.init_clock_gating = icl_init_clock_gating;
else if (IS_CANNONLAKE(dev_priv))
dev_priv->display.init_clock_gating = cnl_init_clock_gating;
@@ -9672,9 +7238,12 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
{
/* For cxsr */
if (IS_PINEVIEW(dev_priv))
- i915_pineview_get_mem_freq(dev_priv);
+ pnv_get_mem_freq(dev_priv);
else if (IS_GEN(dev_priv, 5))
- i915_ironlake_get_mem_freq(dev_priv);
+ ilk_get_mem_freq(dev_priv);
+
+ if (intel_has_sagv(dev_priv))
+ skl_setup_sagv_block_time(dev_priv);
/* For FIFO watermark updates */
if (INTEL_GEN(dev_priv) >= 9) {
@@ -9697,8 +7266,9 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
dev_priv->display.optimize_watermarks =
ilk_optimize_watermarks;
} else {
- DRM_DEBUG_KMS("Failed to read display plane latency. "
- "Disable CxSR\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Failed to read display plane latency. "
+ "Disable CxSR\n");
}
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
vlv_setup_wm_latency(dev_priv);
@@ -9718,7 +7288,8 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
dev_priv->is_ddr3,
dev_priv->fsb_freq,
dev_priv->mem_freq)) {
- DRM_INFO("failed to find known CxSR latency "
+ drm_info(&dev_priv->drm,
+ "failed to find known CxSR latency "
"(found ddr%s fsb freq %d, mem freq %d), "
"disabling CxSR\n",
(dev_priv->is_ddr3 == 1) ? "3" : "2",
@@ -9727,14 +7298,14 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
intel_set_memory_cxsr(dev_priv, false);
dev_priv->display.update_wm = NULL;
} else
- dev_priv->display.update_wm = pineview_update_wm;
+ dev_priv->display.update_wm = pnv_update_wm;
} else if (IS_GEN(dev_priv, 4)) {
dev_priv->display.update_wm = i965_update_wm;
} else if (IS_GEN(dev_priv, 3)) {
dev_priv->display.update_wm = i9xx_update_wm;
dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
} else if (IS_GEN(dev_priv, 2)) {
- if (INTEL_INFO(dev_priv)->num_pipes == 1) {
+ if (INTEL_NUM_PIPES(dev_priv) == 1) {
dev_priv->display.update_wm = i845_update_wm;
dev_priv->display.get_fifo_size = i845_get_fifo_size;
} else {
@@ -9742,221 +7313,13 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
dev_priv->display.get_fifo_size = i830_get_fifo_size;
}
} else {
- DRM_ERROR("unexpected fall-through in intel_init_pm\n");
+ drm_err(&dev_priv->drm,
+ "unexpected fall-through in %s\n", __func__);
}
}
-static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
- /*
- * N = val - 0xb7
- * Slow = Fast = GPLL ref * N
- */
- return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * (val - 0xb7), 1000);
-}
-
-static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
- return DIV_ROUND_CLOSEST(1000 * val, rps->gpll_ref_freq) + 0xb7;
-}
-
-static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
- /*
- * N = val / 2
- * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2
- */
- return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * val, 2 * 2 * 1000);
-}
-
-static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
- /* CHV needs even values */
- return DIV_ROUND_CLOSEST(2 * 1000 * val, rps->gpll_ref_freq) * 2;
-}
-
-int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
-{
- if (INTEL_GEN(dev_priv) >= 9)
- return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER,
- GEN9_FREQ_SCALER);
- else if (IS_CHERRYVIEW(dev_priv))
- return chv_gpu_freq(dev_priv, val);
- else if (IS_VALLEYVIEW(dev_priv))
- return byt_gpu_freq(dev_priv, val);
- else
- return val * GT_FREQUENCY_MULTIPLIER;
-}
-
-int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
-{
- if (INTEL_GEN(dev_priv) >= 9)
- return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER,
- GT_FREQUENCY_MULTIPLIER);
- else if (IS_CHERRYVIEW(dev_priv))
- return chv_freq_opcode(dev_priv, val);
- else if (IS_VALLEYVIEW(dev_priv))
- return byt_freq_opcode(dev_priv, val);
- else
- return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER);
-}
-
void intel_pm_setup(struct drm_i915_private *dev_priv)
{
- mutex_init(&dev_priv->gt_pm.rps.lock);
- mutex_init(&dev_priv->gt_pm.rps.power.mutex);
-
- atomic_set(&dev_priv->gt_pm.rps.num_waiters, 0);
-
dev_priv->runtime_pm.suspended = false;
atomic_set(&dev_priv->runtime_pm.wakeref_count, 0);
}
-
-static u64 vlv_residency_raw(struct drm_i915_private *dev_priv,
- const i915_reg_t reg)
-{
- u32 lower, upper, tmp;
- int loop = 2;
-
- /*
- * The register accessed do not need forcewake. We borrow
- * uncore lock to prevent concurrent access to range reg.
- */
- lockdep_assert_held(&dev_priv->uncore.lock);
-
- /*
- * vlv and chv residency counters are 40 bits in width.
- * With a control bit, we can choose between upper or lower
- * 32bit window into this counter.
- *
- * Although we always use the counter in high-range mode elsewhere,
- * userspace may attempt to read the value before rc6 is initialised,
- * before we have set the default VLV_COUNTER_CONTROL value. So always
- * set the high bit to be safe.
- */
- I915_WRITE_FW(VLV_COUNTER_CONTROL,
- _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH));
- upper = I915_READ_FW(reg);
- do {
- tmp = upper;
-
- I915_WRITE_FW(VLV_COUNTER_CONTROL,
- _MASKED_BIT_DISABLE(VLV_COUNT_RANGE_HIGH));
- lower = I915_READ_FW(reg);
-
- I915_WRITE_FW(VLV_COUNTER_CONTROL,
- _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH));
- upper = I915_READ_FW(reg);
- } while (upper != tmp && --loop);
-
- /*
- * Everywhere else we always use VLV_COUNTER_CONTROL with the
- * VLV_COUNT_RANGE_HIGH bit set - so it is safe to leave it set
- * now.
- */
-
- return lower | (u64)upper << 8;
-}
-
-u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
- const i915_reg_t reg)
-{
- struct intel_uncore *uncore = &dev_priv->uncore;
- u64 time_hw, prev_hw, overflow_hw;
- unsigned int fw_domains;
- unsigned long flags;
- unsigned int i;
- u32 mul, div;
-
- if (!HAS_RC6(dev_priv))
- return 0;
-
- /*
- * Store previous hw counter values for counter wrap-around handling.
- *
- * There are only four interesting registers and they live next to each
- * other so we can use the relative address, compared to the smallest
- * one as the index into driver storage.
- */
- i = (i915_mmio_reg_offset(reg) -
- i915_mmio_reg_offset(GEN6_GT_GFX_RC6_LOCKED)) / sizeof(u32);
- if (WARN_ON_ONCE(i >= ARRAY_SIZE(dev_priv->gt_pm.rc6.cur_residency)))
- return 0;
-
- fw_domains = intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ);
-
- spin_lock_irqsave(&uncore->lock, flags);
- intel_uncore_forcewake_get__locked(uncore, fw_domains);
-
- /* On VLV and CHV, residency time is in CZ units rather than 1.28us */
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- mul = 1000000;
- div = dev_priv->czclk_freq;
- overflow_hw = BIT_ULL(40);
- time_hw = vlv_residency_raw(dev_priv, reg);
- } else {
- /* 833.33ns units on Gen9LP, 1.28us elsewhere. */
- if (IS_GEN9_LP(dev_priv)) {
- mul = 10000;
- div = 12;
- } else {
- mul = 1280;
- div = 1;
- }
-
- overflow_hw = BIT_ULL(32);
- time_hw = intel_uncore_read_fw(uncore, reg);
- }
-
- /*
- * Counter wrap handling.
- *
- * But relying on a sufficient frequency of queries otherwise counters
- * can still wrap.
- */
- prev_hw = dev_priv->gt_pm.rc6.prev_hw_residency[i];
- dev_priv->gt_pm.rc6.prev_hw_residency[i] = time_hw;
-
- /* RC6 delta from last sample. */
- if (time_hw >= prev_hw)
- time_hw -= prev_hw;
- else
- time_hw += overflow_hw - prev_hw;
-
- /* Add delta to RC6 extended raw driver copy. */
- time_hw += dev_priv->gt_pm.rc6.cur_residency[i];
- dev_priv->gt_pm.rc6.cur_residency[i] = time_hw;
-
- intel_uncore_forcewake_put__locked(uncore, fw_domains);
- spin_unlock_irqrestore(&uncore->lock, flags);
-
- return mul_u64_u32_div(time_hw, mul, div);
-}
-
-u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv,
- i915_reg_t reg)
-{
- return DIV_ROUND_UP_ULL(intel_rc6_residency_ns(dev_priv, reg), 1000);
-}
-
-u32 intel_get_cagf(struct drm_i915_private *dev_priv, u32 rpstat)
-{
- u32 cagf;
-
- if (INTEL_GEN(dev_priv) >= 9)
- cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
- else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
- else
- cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
-
- return cagf;
-}
diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h
index 1b489fa399e1..c06c6a846d9a 100644
--- a/drivers/gpu/drm/i915/intel_pm.h
+++ b/drivers/gpu/drm/i915/intel_pm.h
@@ -10,10 +10,10 @@
#include "i915_reg.h"
-struct drm_atomic_state;
struct drm_device;
struct drm_i915_private;
struct i915_request;
+struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
struct intel_plane;
@@ -29,16 +29,6 @@ void intel_update_watermarks(struct intel_crtc *crtc);
void intel_init_pm(struct drm_i915_private *dev_priv);
void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv);
void intel_pm_setup(struct drm_i915_private *dev_priv);
-void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
-void intel_gpu_ips_teardown(void);
-void intel_init_gt_powersave(struct drm_i915_private *dev_priv);
-void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv);
-void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv);
-void intel_enable_gt_powersave(struct drm_i915_private *dev_priv);
-void intel_disable_gt_powersave(struct drm_i915_private *dev_priv);
-void gen6_rps_busy(struct drm_i915_private *dev_priv);
-void gen6_rps_idle(struct drm_i915_private *dev_priv);
-void gen6_rps_boost(struct i915_request *rq);
void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv);
void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv);
void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv);
@@ -52,7 +42,7 @@ void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
struct skl_pipe_wm *out);
void g4x_wm_sanitize(struct drm_i915_private *dev_priv);
void vlv_wm_sanitize(struct drm_i915_private *dev_priv);
-bool intel_can_enable_sagv(struct drm_atomic_state *state);
+bool intel_can_enable_sagv(struct intel_atomic_state *state);
int intel_enable_sagv(struct drm_i915_private *dev_priv);
int intel_disable_sagv(struct drm_i915_private *dev_priv);
bool skl_wm_level_equals(const struct skl_wm_level *l1,
@@ -64,27 +54,10 @@ void skl_write_plane_wm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state);
void skl_write_cursor_wm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state);
-bool ilk_disable_lp_wm(struct drm_device *dev);
-int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
- struct intel_crtc_state *cstate);
+bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv);
void intel_init_ipc(struct drm_i915_private *dev_priv);
void intel_enable_ipc(struct drm_i915_private *dev_priv);
-int intel_gpu_freq(struct drm_i915_private *dev_priv, int val);
-int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
-u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv, i915_reg_t reg);
-u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv, i915_reg_t reg);
-
-u32 intel_get_cagf(struct drm_i915_private *dev_priv, u32 rpstat1);
-
-unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
-unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
-unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
-void i915_update_gfx_val(struct drm_i915_private *dev_priv);
-
-bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val);
-int intel_set_rps(struct drm_i915_private *dev_priv, u8 val);
-void intel_rps_mark_interactive(struct drm_i915_private *i915, bool interactive);
bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable);
#endif /* __INTEL_PM_H__ */
diff --git a/drivers/gpu/drm/i915/intel_region_lmem.c b/drivers/gpu/drm/i915/intel_region_lmem.c
new file mode 100644
index 000000000000..14b59b899c9b
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_region_lmem.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_memory_region.h"
+#include "gem/i915_gem_lmem.h"
+#include "gem/i915_gem_region.h"
+#include "intel_region_lmem.h"
+
+static int init_fake_lmem_bar(struct intel_memory_region *mem)
+{
+ struct drm_i915_private *i915 = mem->i915;
+ struct i915_ggtt *ggtt = &i915->ggtt;
+ unsigned long n;
+ int ret;
+
+ /* We want to 1:1 map the mappable aperture to our reserved region */
+
+ mem->fake_mappable.start = 0;
+ mem->fake_mappable.size = resource_size(&mem->region);
+ mem->fake_mappable.color = I915_COLOR_UNEVICTABLE;
+
+ ret = drm_mm_reserve_node(&ggtt->vm.mm, &mem->fake_mappable);
+ if (ret)
+ return ret;
+
+ mem->remap_addr = dma_map_resource(&i915->drm.pdev->dev,
+ mem->region.start,
+ mem->fake_mappable.size,
+ PCI_DMA_BIDIRECTIONAL,
+ DMA_ATTR_FORCE_CONTIGUOUS);
+ if (dma_mapping_error(&i915->drm.pdev->dev, mem->remap_addr)) {
+ drm_mm_remove_node(&mem->fake_mappable);
+ return -EINVAL;
+ }
+
+ for (n = 0; n < mem->fake_mappable.size >> PAGE_SHIFT; ++n) {
+ ggtt->vm.insert_page(&ggtt->vm,
+ mem->remap_addr + (n << PAGE_SHIFT),
+ n << PAGE_SHIFT,
+ I915_CACHE_NONE, 0);
+ }
+
+ mem->region = (struct resource)DEFINE_RES_MEM(mem->remap_addr,
+ mem->fake_mappable.size);
+
+ return 0;
+}
+
+static void release_fake_lmem_bar(struct intel_memory_region *mem)
+{
+ if (!drm_mm_node_allocated(&mem->fake_mappable))
+ return;
+
+ drm_mm_remove_node(&mem->fake_mappable);
+
+ dma_unmap_resource(&mem->i915->drm.pdev->dev,
+ mem->remap_addr,
+ mem->fake_mappable.size,
+ PCI_DMA_BIDIRECTIONAL,
+ DMA_ATTR_FORCE_CONTIGUOUS);
+}
+
+static void
+region_lmem_release(struct intel_memory_region *mem)
+{
+ release_fake_lmem_bar(mem);
+ io_mapping_fini(&mem->iomap);
+ intel_memory_region_release_buddy(mem);
+}
+
+static int
+region_lmem_init(struct intel_memory_region *mem)
+{
+ int ret;
+
+ if (i915_modparams.fake_lmem_start) {
+ ret = init_fake_lmem_bar(mem);
+ GEM_BUG_ON(ret);
+ }
+
+ if (!io_mapping_init_wc(&mem->iomap,
+ mem->io_start,
+ resource_size(&mem->region)))
+ return -EIO;
+
+ ret = intel_memory_region_init_buddy(mem);
+ if (ret)
+ io_mapping_fini(&mem->iomap);
+
+ intel_memory_region_set_name(mem, "local");
+
+ return ret;
+}
+
+const struct intel_memory_region_ops intel_region_lmem_ops = {
+ .init = region_lmem_init,
+ .release = region_lmem_release,
+ .create_object = __i915_gem_lmem_object_create,
+};
+
+struct intel_memory_region *
+intel_setup_fake_lmem(struct drm_i915_private *i915)
+{
+ struct pci_dev *pdev = i915->drm.pdev;
+ struct intel_memory_region *mem;
+ resource_size_t mappable_end;
+ resource_size_t io_start;
+ resource_size_t start;
+
+ GEM_BUG_ON(i915_ggtt_has_aperture(&i915->ggtt));
+ GEM_BUG_ON(!i915_modparams.fake_lmem_start);
+
+ /* Your mappable aperture belongs to me now! */
+ mappable_end = pci_resource_len(pdev, 2);
+ io_start = pci_resource_start(pdev, 2),
+ start = i915_modparams.fake_lmem_start;
+
+ mem = intel_memory_region_create(i915,
+ start,
+ mappable_end,
+ PAGE_SIZE,
+ io_start,
+ &intel_region_lmem_ops);
+ if (!IS_ERR(mem)) {
+ drm_info(&i915->drm, "Intel graphics fake LMEM: %pR\n",
+ &mem->region);
+ drm_info(&i915->drm,
+ "Intel graphics fake LMEM IO start: %llx\n",
+ (u64)mem->io_start);
+ drm_info(&i915->drm, "Intel graphics fake LMEM size: %llx\n",
+ (u64)resource_size(&mem->region));
+ }
+
+ return mem;
+}
diff --git a/drivers/gpu/drm/i915/intel_region_lmem.h b/drivers/gpu/drm/i915/intel_region_lmem.h
new file mode 100644
index 000000000000..213def7c7b8a
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_region_lmem.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_REGION_LMEM_H
+#define __INTEL_REGION_LMEM_H
+
+struct drm_i915_private;
+
+extern const struct intel_memory_region_ops intel_region_lmem_ops;
+
+struct intel_memory_region *
+intel_setup_fake_lmem(struct drm_i915_private *i915);
+
+#endif /* !__INTEL_REGION_LMEM_H */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 8d1aebc3e857..ad719c9602af 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -27,11 +27,11 @@
*/
#include <linux/pm_runtime.h>
-#include <linux/vgaarb.h>
#include <drm/drm_print.h>
#include "i915_drv.h"
+#include "i915_trace.h"
/**
* DOC: runtime pm
@@ -592,7 +592,7 @@ void intel_runtime_pm_disable(struct intel_runtime_pm *rpm)
pm_runtime_put(kdev);
}
-void intel_runtime_pm_cleanup(struct intel_runtime_pm *rpm)
+void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm)
{
int count = atomic_read(&rpm->wakeref_count);
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.h b/drivers/gpu/drm/i915/intel_runtime_pm.h
index 2ee8f9522e05..ae64ff14c642 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.h
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.h
@@ -173,7 +173,7 @@ enable_rpm_wakeref_asserts(struct intel_runtime_pm *rpm)
void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm);
void intel_runtime_pm_enable(struct intel_runtime_pm *rpm);
void intel_runtime_pm_disable(struct intel_runtime_pm *rpm);
-void intel_runtime_pm_cleanup(struct intel_runtime_pm *rpm);
+void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm);
intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm);
intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm);
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
index a115625e980c..cbfb7171d62d 100644
--- a/drivers/gpu/drm/i915/intel_sideband.c
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -24,10 +24,8 @@
#include <asm/iosf_mbi.h>
-#include "intel_sideband.h"
-
#include "i915_drv.h"
-#include "intel_drv.h"
+#include "intel_sideband.h"
/*
* IOSF sideband, see VLV2_SidebandMsg_HAS.docx and
@@ -107,8 +105,8 @@ static int vlv_sideband_rw(struct drm_i915_private *i915,
if (intel_wait_for_register(uncore,
VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0,
5)) {
- DRM_DEBUG_DRIVER("IOSF sideband idle wait (%s) timed out\n",
- is_read ? "read" : "write");
+ drm_dbg(&i915->drm, "IOSF sideband idle wait (%s) timed out\n",
+ is_read ? "read" : "write");
return -EAGAIN;
}
@@ -131,8 +129,8 @@ static int vlv_sideband_rw(struct drm_i915_private *i915,
*val = intel_uncore_read_fw(uncore, VLV_IOSF_DATA);
err = 0;
} else {
- DRM_DEBUG_DRIVER("IOSF sideband finish wait (%s) timed out\n",
- is_read ? "read" : "write");
+ drm_dbg(&i915->drm, "IOSF sideband finish wait (%s) timed out\n",
+ is_read ? "read" : "write");
err = -ETIMEDOUT;
}
@@ -285,7 +283,8 @@ static int intel_sbi_rw(struct drm_i915_private *i915, u16 reg,
if (intel_wait_for_register_fw(uncore,
SBI_CTL_STAT, SBI_BUSY, 0,
100)) {
- DRM_ERROR("timeout waiting for SBI to become ready\n");
+ drm_err(&i915->drm,
+ "timeout waiting for SBI to become ready\n");
return -EBUSY;
}
@@ -303,12 +302,13 @@ static int intel_sbi_rw(struct drm_i915_private *i915, u16 reg,
if (__intel_wait_for_register_fw(uncore,
SBI_CTL_STAT, SBI_BUSY, 0,
100, 100, &cmd)) {
- DRM_ERROR("timeout waiting for SBI to complete read\n");
+ drm_err(&i915->drm,
+ "timeout waiting for SBI to complete read\n");
return -ETIMEDOUT;
}
if (cmd & SBI_RESPONSE_FAIL) {
- DRM_ERROR("error during SBI read of reg %x\n", reg);
+ drm_err(&i915->drm, "error during SBI read of reg %x\n", reg);
return -ENXIO;
}
@@ -428,8 +428,9 @@ int sandybridge_pcode_read(struct drm_i915_private *i915, u32 mbox,
mutex_unlock(&i915->sb_lock);
if (err) {
- DRM_DEBUG_DRIVER("warning: pcode (read from mbox %x) mailbox access failed for %ps: %d\n",
- mbox, __builtin_return_address(0), err);
+ drm_dbg(&i915->drm,
+ "warning: pcode (read from mbox %x) mailbox access failed for %ps: %d\n",
+ mbox, __builtin_return_address(0), err);
}
return err;
@@ -449,8 +450,9 @@ int sandybridge_pcode_write_timeout(struct drm_i915_private *i915,
mutex_unlock(&i915->sb_lock);
if (err) {
- DRM_DEBUG_DRIVER("warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps: %d\n",
- val, mbox, __builtin_return_address(0), err);
+ drm_dbg(&i915->drm,
+ "warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps: %d\n",
+ val, mbox, __builtin_return_address(0), err);
}
return err;
@@ -521,7 +523,8 @@ int skl_pcode_request(struct drm_i915_private *i915, u32 mbox, u32 request,
* requests, and for any quirks of the PCODE firmware that delays
* the request completion.
*/
- DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n");
+ drm_dbg_kms(&i915->drm,
+ "PCODE timeout, retrying with preemption disabled\n");
WARN_ON_ONCE(timeout_base_ms > 3);
preempt_disable();
ret = wait_for_atomic(COND, 50);
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
deleted file mode 100644
index ae45651ac73c..000000000000
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ /dev/null
@@ -1,561 +0,0 @@
-/*
- * Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#include "gt/intel_reset.h"
-#include "intel_uc.h"
-#include "intel_guc.h"
-#include "intel_guc_ads.h"
-#include "intel_guc_submission.h"
-#include "i915_drv.h"
-
-static void guc_free_load_err_log(struct intel_guc *guc);
-
-/* Reset GuC providing us with fresh state for both GuC and HuC.
- */
-static int __intel_uc_reset_hw(struct drm_i915_private *dev_priv)
-{
- int ret;
- u32 guc_status;
-
- ret = intel_reset_guc(dev_priv);
- if (ret) {
- DRM_ERROR("Failed to reset GuC, ret = %d\n", ret);
- return ret;
- }
-
- guc_status = I915_READ(GUC_STATUS);
- WARN(!(guc_status & GS_MIA_IN_RESET),
- "GuC status: 0x%x, MIA core expected to be in reset\n",
- guc_status);
-
- return ret;
-}
-
-static int __get_platform_enable_guc(struct drm_i915_private *i915)
-{
- struct intel_uc_fw *guc_fw = &i915->guc.fw;
- struct intel_uc_fw *huc_fw = &i915->huc.fw;
- int enable_guc = 0;
-
- /* Default is to use HuC if we know GuC and HuC firmwares */
- if (intel_uc_fw_is_selected(guc_fw) && intel_uc_fw_is_selected(huc_fw))
- enable_guc |= ENABLE_GUC_LOAD_HUC;
-
- /* Any platform specific fine-tuning can be done here */
-
- return enable_guc;
-}
-
-static int __get_default_guc_log_level(struct drm_i915_private *i915)
-{
- int guc_log_level;
-
- if (!HAS_GUC(i915) || !intel_uc_is_using_guc(i915))
- guc_log_level = GUC_LOG_LEVEL_DISABLED;
- else if (IS_ENABLED(CONFIG_DRM_I915_DEBUG) ||
- IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
- guc_log_level = GUC_LOG_LEVEL_MAX;
- else
- guc_log_level = GUC_LOG_LEVEL_NON_VERBOSE;
-
- /* Any platform specific fine-tuning can be done here */
-
- return guc_log_level;
-}
-
-/**
- * sanitize_options_early - sanitize uC related modparam options
- * @i915: device private
- *
- * In case of "enable_guc" option this function will attempt to modify
- * it only if it was initially set to "auto(-1)". Default value for this
- * modparam varies between platforms and it is hardcoded in driver code.
- * Any other modparam value is only monitored against availability of the
- * related hardware or firmware definitions.
- *
- * In case of "guc_log_level" option this function will attempt to modify
- * it only if it was initially set to "auto(-1)" or if initial value was
- * "enable(1..4)" on platforms without the GuC. Default value for this
- * modparam varies between platforms and is usually set to "disable(0)"
- * unless GuC is enabled on given platform and the driver is compiled with
- * debug config when this modparam will default to "enable(1..4)".
- */
-static void sanitize_options_early(struct drm_i915_private *i915)
-{
- struct intel_uc_fw *guc_fw = &i915->guc.fw;
- struct intel_uc_fw *huc_fw = &i915->huc.fw;
-
- /* A negative value means "use platform default" */
- if (i915_modparams.enable_guc < 0)
- i915_modparams.enable_guc = __get_platform_enable_guc(i915);
-
- DRM_DEBUG_DRIVER("enable_guc=%d (submission:%s huc:%s)\n",
- i915_modparams.enable_guc,
- yesno(intel_uc_is_using_guc_submission(i915)),
- yesno(intel_uc_is_using_huc(i915)));
-
- /* Verify GuC firmware availability */
- if (intel_uc_is_using_guc(i915) && !intel_uc_fw_is_selected(guc_fw)) {
- DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
- "enable_guc", i915_modparams.enable_guc,
- !HAS_GUC(i915) ? "no GuC hardware" :
- "no GuC firmware");
- }
-
- /* Verify HuC firmware availability */
- if (intel_uc_is_using_huc(i915) && !intel_uc_fw_is_selected(huc_fw)) {
- DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
- "enable_guc", i915_modparams.enable_guc,
- !HAS_HUC(i915) ? "no HuC hardware" :
- "no HuC firmware");
- }
-
- /* XXX: GuC submission is unavailable for now */
- if (intel_uc_is_using_guc_submission(i915)) {
- DRM_INFO("Incompatible option detected: %s=%d, %s!\n",
- "enable_guc", i915_modparams.enable_guc,
- "GuC submission not supported");
- DRM_INFO("Switching to non-GuC submission mode!\n");
- i915_modparams.enable_guc &= ~ENABLE_GUC_SUBMISSION;
- }
-
- /* A negative value means "use platform/config default" */
- if (i915_modparams.guc_log_level < 0)
- i915_modparams.guc_log_level =
- __get_default_guc_log_level(i915);
-
- if (i915_modparams.guc_log_level > 0 && !intel_uc_is_using_guc(i915)) {
- DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
- "guc_log_level", i915_modparams.guc_log_level,
- !HAS_GUC(i915) ? "no GuC hardware" :
- "GuC not enabled");
- i915_modparams.guc_log_level = 0;
- }
-
- if (i915_modparams.guc_log_level > GUC_LOG_LEVEL_MAX) {
- DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
- "guc_log_level", i915_modparams.guc_log_level,
- "verbosity too high");
- i915_modparams.guc_log_level = GUC_LOG_LEVEL_MAX;
- }
-
- DRM_DEBUG_DRIVER("guc_log_level=%d (enabled:%s, verbose:%s, verbosity:%d)\n",
- i915_modparams.guc_log_level,
- yesno(i915_modparams.guc_log_level),
- yesno(GUC_LOG_LEVEL_IS_VERBOSE(i915_modparams.guc_log_level)),
- GUC_LOG_LEVEL_TO_VERBOSITY(i915_modparams.guc_log_level));
-
- /* Make sure that sanitization was done */
- GEM_BUG_ON(i915_modparams.enable_guc < 0);
- GEM_BUG_ON(i915_modparams.guc_log_level < 0);
-}
-
-void intel_uc_init_early(struct drm_i915_private *i915)
-{
- struct intel_guc *guc = &i915->guc;
- struct intel_huc *huc = &i915->huc;
-
- intel_guc_init_early(guc);
- intel_huc_init_early(huc);
-
- sanitize_options_early(i915);
-}
-
-void intel_uc_cleanup_early(struct drm_i915_private *i915)
-{
- struct intel_guc *guc = &i915->guc;
-
- guc_free_load_err_log(guc);
-}
-
-/**
- * intel_uc_init_mmio - setup uC MMIO access
- * @i915: device private
- *
- * Setup minimal state necessary for MMIO accesses later in the
- * initialization sequence.
- */
-void intel_uc_init_mmio(struct drm_i915_private *i915)
-{
- intel_guc_init_send_regs(&i915->guc);
-}
-
-static void guc_capture_load_err_log(struct intel_guc *guc)
-{
- if (!guc->log.vma || !intel_guc_log_get_level(&guc->log))
- return;
-
- if (!guc->load_err_log)
- guc->load_err_log = i915_gem_object_get(guc->log.vma->obj);
-
- return;
-}
-
-static void guc_free_load_err_log(struct intel_guc *guc)
-{
- if (guc->load_err_log)
- i915_gem_object_put(guc->load_err_log);
-}
-
-static void guc_reset_interrupts(struct intel_guc *guc)
-{
- guc->interrupts.reset(guc_to_i915(guc));
-}
-
-static void guc_enable_interrupts(struct intel_guc *guc)
-{
- guc->interrupts.enable(guc_to_i915(guc));
-}
-
-static void guc_disable_interrupts(struct intel_guc *guc)
-{
- guc->interrupts.disable(guc_to_i915(guc));
-}
-
-static int guc_enable_communication(struct intel_guc *guc)
-{
- guc_enable_interrupts(guc);
-
- return intel_guc_ct_enable(&guc->ct);
-}
-
-static void guc_stop_communication(struct intel_guc *guc)
-{
- intel_guc_ct_stop(&guc->ct);
-
- guc->send = intel_guc_send_nop;
- guc->handler = intel_guc_to_host_event_handler_nop;
-}
-
-static void guc_disable_communication(struct intel_guc *guc)
-{
- intel_guc_ct_disable(&guc->ct);
-
- guc_disable_interrupts(guc);
-
- guc->send = intel_guc_send_nop;
- guc->handler = intel_guc_to_host_event_handler_nop;
-}
-
-int intel_uc_init_misc(struct drm_i915_private *i915)
-{
- struct intel_guc *guc = &i915->guc;
- struct intel_huc *huc = &i915->huc;
- int ret;
-
- if (!USES_GUC(i915))
- return 0;
-
- ret = intel_guc_init_misc(guc);
- if (ret)
- return ret;
-
- if (USES_HUC(i915)) {
- ret = intel_huc_init_misc(huc);
- if (ret)
- goto err_guc;
- }
-
- return 0;
-
-err_guc:
- intel_guc_fini_misc(guc);
- return ret;
-}
-
-void intel_uc_fini_misc(struct drm_i915_private *i915)
-{
- struct intel_guc *guc = &i915->guc;
- struct intel_huc *huc = &i915->huc;
-
- if (!USES_GUC(i915))
- return;
-
- if (USES_HUC(i915))
- intel_huc_fini_misc(huc);
-
- intel_guc_fini_misc(guc);
-}
-
-int intel_uc_init(struct drm_i915_private *i915)
-{
- struct intel_guc *guc = &i915->guc;
- struct intel_huc *huc = &i915->huc;
- int ret;
-
- if (!USES_GUC(i915))
- return 0;
-
- if (!HAS_GUC(i915))
- return -ENODEV;
-
- /* XXX: GuC submission is unavailable for now */
- GEM_BUG_ON(USES_GUC_SUBMISSION(i915));
-
- ret = intel_guc_init(guc);
- if (ret)
- return ret;
-
- if (USES_HUC(i915)) {
- ret = intel_huc_init(huc);
- if (ret)
- goto err_guc;
- }
-
- if (USES_GUC_SUBMISSION(i915)) {
- /*
- * This is stuff we need to have available at fw load time
- * if we are planning to enable submission later
- */
- ret = intel_guc_submission_init(guc);
- if (ret)
- goto err_huc;
- }
-
- return 0;
-
-err_huc:
- if (USES_HUC(i915))
- intel_huc_fini(huc);
-err_guc:
- intel_guc_fini(guc);
- return ret;
-}
-
-void intel_uc_fini(struct drm_i915_private *i915)
-{
- struct intel_guc *guc = &i915->guc;
-
- if (!USES_GUC(i915))
- return;
-
- GEM_BUG_ON(!HAS_GUC(i915));
-
- if (USES_GUC_SUBMISSION(i915))
- intel_guc_submission_fini(guc);
-
- if (USES_HUC(i915))
- intel_huc_fini(&i915->huc);
-
- intel_guc_fini(guc);
-}
-
-static void __uc_sanitize(struct drm_i915_private *i915)
-{
- struct intel_guc *guc = &i915->guc;
- struct intel_huc *huc = &i915->huc;
-
- GEM_BUG_ON(!HAS_GUC(i915));
-
- intel_huc_sanitize(huc);
- intel_guc_sanitize(guc);
-
- __intel_uc_reset_hw(i915);
-}
-
-void intel_uc_sanitize(struct drm_i915_private *i915)
-{
- if (!USES_GUC(i915))
- return;
-
- __uc_sanitize(i915);
-}
-
-int intel_uc_init_hw(struct drm_i915_private *i915)
-{
- struct intel_guc *guc = &i915->guc;
- struct intel_huc *huc = &i915->huc;
- int ret, attempts;
-
- if (!USES_GUC(i915))
- return 0;
-
- GEM_BUG_ON(!HAS_GUC(i915));
-
- guc_reset_interrupts(guc);
-
- /* WaEnableuKernelHeaderValidFix:skl */
- /* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */
- if (IS_GEN(i915, 9))
- attempts = 3;
- else
- attempts = 1;
-
- while (attempts--) {
- /*
- * Always reset the GuC just before (re)loading, so
- * that the state and timing are fairly predictable
- */
- ret = __intel_uc_reset_hw(i915);
- if (ret)
- goto err_out;
-
- if (USES_HUC(i915)) {
- ret = intel_huc_fw_upload(huc);
- if (ret)
- goto err_out;
- }
-
- intel_guc_ads_reset(guc);
- intel_guc_init_params(guc);
- ret = intel_guc_fw_upload(guc);
- if (ret == 0)
- break;
-
- DRM_DEBUG_DRIVER("GuC fw load failed: %d; will reset and "
- "retry %d more time(s)\n", ret, attempts);
- }
-
- /* Did we succeded or run out of retries? */
- if (ret)
- goto err_log_capture;
-
- ret = guc_enable_communication(guc);
- if (ret)
- goto err_log_capture;
-
- if (USES_HUC(i915)) {
- ret = intel_huc_auth(huc);
- if (ret)
- goto err_communication;
- }
-
- ret = intel_guc_sample_forcewake(guc);
- if (ret)
- goto err_communication;
-
- if (USES_GUC_SUBMISSION(i915)) {
- ret = intel_guc_submission_enable(guc);
- if (ret)
- goto err_communication;
- }
-
- dev_info(i915->drm.dev, "GuC firmware version %u.%u\n",
- guc->fw.major_ver_found, guc->fw.minor_ver_found);
- dev_info(i915->drm.dev, "GuC submission %s\n",
- enableddisabled(USES_GUC_SUBMISSION(i915)));
- dev_info(i915->drm.dev, "HuC %s\n",
- enableddisabled(USES_HUC(i915)));
-
- return 0;
-
- /*
- * We've failed to load the firmware :(
- */
-err_communication:
- guc_disable_communication(guc);
-err_log_capture:
- guc_capture_load_err_log(guc);
-err_out:
- __uc_sanitize(i915);
-
- /*
- * Note that there is no fallback as either user explicitly asked for
- * the GuC or driver default option was to run with the GuC enabled.
- */
- if (GEM_WARN_ON(ret == -EIO))
- ret = -EINVAL;
-
- dev_err(i915->drm.dev, "GuC initialization failed %d\n", ret);
- return ret;
-}
-
-void intel_uc_fini_hw(struct drm_i915_private *i915)
-{
- struct intel_guc *guc = &i915->guc;
-
- if (!intel_guc_is_loaded(guc))
- return;
-
- GEM_BUG_ON(!HAS_GUC(i915));
-
- if (USES_GUC_SUBMISSION(i915))
- intel_guc_submission_disable(guc);
-
- guc_disable_communication(guc);
- __uc_sanitize(i915);
-}
-
-/**
- * intel_uc_reset_prepare - Prepare for reset
- * @i915: device private
- *
- * Preparing for full gpu reset.
- */
-void intel_uc_reset_prepare(struct drm_i915_private *i915)
-{
- struct intel_guc *guc = &i915->guc;
-
- if (!intel_guc_is_loaded(guc))
- return;
-
- guc_stop_communication(guc);
- __uc_sanitize(i915);
-}
-
-void intel_uc_runtime_suspend(struct drm_i915_private *i915)
-{
- struct intel_guc *guc = &i915->guc;
- int err;
-
- if (!intel_guc_is_loaded(guc))
- return;
-
- err = intel_guc_suspend(guc);
- if (err)
- DRM_DEBUG_DRIVER("Failed to suspend GuC, err=%d", err);
-
- guc_disable_communication(guc);
-}
-
-void intel_uc_suspend(struct drm_i915_private *i915)
-{
- struct intel_guc *guc = &i915->guc;
- intel_wakeref_t wakeref;
-
- if (!intel_guc_is_loaded(guc))
- return;
-
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- intel_uc_runtime_suspend(i915);
-}
-
-int intel_uc_resume(struct drm_i915_private *i915)
-{
- struct intel_guc *guc = &i915->guc;
- int err;
-
- if (!intel_guc_is_loaded(guc))
- return 0;
-
- guc_enable_communication(guc);
-
- err = intel_guc_resume(guc);
- if (err) {
- DRM_DEBUG_DRIVER("Failed to resume GuC, err=%d", err);
- return err;
- }
-
- return 0;
-}
diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/intel_uc.h
deleted file mode 100644
index 3ea06c87dfcd..000000000000
--- a/drivers/gpu/drm/i915/intel_uc.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright © 2014 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-#ifndef _INTEL_UC_H_
-#define _INTEL_UC_H_
-
-#include "intel_guc.h"
-#include "intel_huc.h"
-#include "i915_params.h"
-
-void intel_uc_init_early(struct drm_i915_private *dev_priv);
-void intel_uc_cleanup_early(struct drm_i915_private *dev_priv);
-void intel_uc_init_mmio(struct drm_i915_private *dev_priv);
-int intel_uc_init_misc(struct drm_i915_private *dev_priv);
-void intel_uc_fini_misc(struct drm_i915_private *dev_priv);
-void intel_uc_sanitize(struct drm_i915_private *dev_priv);
-int intel_uc_init_hw(struct drm_i915_private *dev_priv);
-void intel_uc_fini_hw(struct drm_i915_private *dev_priv);
-int intel_uc_init(struct drm_i915_private *dev_priv);
-void intel_uc_fini(struct drm_i915_private *dev_priv);
-void intel_uc_reset_prepare(struct drm_i915_private *i915);
-void intel_uc_suspend(struct drm_i915_private *i915);
-void intel_uc_runtime_suspend(struct drm_i915_private *i915);
-int intel_uc_resume(struct drm_i915_private *dev_priv);
-
-static inline bool intel_uc_is_using_guc(struct drm_i915_private *i915)
-{
- GEM_BUG_ON(i915_modparams.enable_guc < 0);
- return i915_modparams.enable_guc > 0;
-}
-
-static inline bool intel_uc_is_using_guc_submission(struct drm_i915_private *i915)
-{
- GEM_BUG_ON(i915_modparams.enable_guc < 0);
- return i915_modparams.enable_guc & ENABLE_GUC_SUBMISSION;
-}
-
-static inline bool intel_uc_is_using_huc(struct drm_i915_private *i915)
-{
- GEM_BUG_ON(i915_modparams.enable_guc < 0);
- return i915_modparams.enable_guc & ENABLE_GUC_LOAD_HUC;
-}
-
-#endif
diff --git a/drivers/gpu/drm/i915/intel_uc_fw.c b/drivers/gpu/drm/i915/intel_uc_fw.c
deleted file mode 100644
index f342ddd47df8..000000000000
--- a/drivers/gpu/drm/i915/intel_uc_fw.c
+++ /dev/null
@@ -1,357 +0,0 @@
-/*
- * Copyright © 2016-2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#include <linux/bitfield.h>
-#include <linux/firmware.h>
-#include <drm/drm_print.h>
-
-#include "intel_uc_fw.h"
-#include "i915_drv.h"
-
-/**
- * intel_uc_fw_fetch - fetch uC firmware
- *
- * @dev_priv: device private
- * @uc_fw: uC firmware
- *
- * Fetch uC firmware into GEM obj.
- */
-void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
- struct intel_uc_fw *uc_fw)
-{
- struct pci_dev *pdev = dev_priv->drm.pdev;
- struct drm_i915_gem_object *obj;
- const struct firmware *fw = NULL;
- struct uc_css_header *css;
- size_t size;
- int err;
-
- if (!uc_fw->path) {
- dev_info(dev_priv->drm.dev,
- "%s: No firmware was defined for %s!\n",
- intel_uc_fw_type_repr(uc_fw->type),
- intel_platform_name(INTEL_INFO(dev_priv)->platform));
- return;
- }
-
- DRM_DEBUG_DRIVER("%s fw fetch %s\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->path);
-
- uc_fw->fetch_status = INTEL_UC_FIRMWARE_PENDING;
- DRM_DEBUG_DRIVER("%s fw fetch %s\n",
- intel_uc_fw_type_repr(uc_fw->type),
- intel_uc_fw_status_repr(uc_fw->fetch_status));
-
- err = request_firmware(&fw, uc_fw->path, &pdev->dev);
- if (err) {
- DRM_DEBUG_DRIVER("%s fw request_firmware err=%d\n",
- intel_uc_fw_type_repr(uc_fw->type), err);
- goto fail;
- }
-
- DRM_DEBUG_DRIVER("%s fw size %zu ptr %p\n",
- intel_uc_fw_type_repr(uc_fw->type), fw->size, fw);
-
- /* Check the size of the blob before examining buffer contents */
- if (fw->size < sizeof(struct uc_css_header)) {
- DRM_WARN("%s: Unexpected firmware size (%zu, min %zu)\n",
- intel_uc_fw_type_repr(uc_fw->type),
- fw->size, sizeof(struct uc_css_header));
- err = -ENODATA;
- goto fail;
- }
-
- css = (struct uc_css_header *)fw->data;
-
- /* Firmware bits always start from header */
- uc_fw->header_offset = 0;
- uc_fw->header_size = (css->header_size_dw - css->modulus_size_dw -
- css->key_size_dw - css->exponent_size_dw) *
- sizeof(u32);
-
- if (uc_fw->header_size != sizeof(struct uc_css_header)) {
- DRM_WARN("%s: Mismatched firmware header definition\n",
- intel_uc_fw_type_repr(uc_fw->type));
- err = -ENOEXEC;
- goto fail;
- }
-
- /* then, uCode */
- uc_fw->ucode_offset = uc_fw->header_offset + uc_fw->header_size;
- uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
-
- /* now RSA */
- if (css->key_size_dw != UOS_RSA_SCRATCH_COUNT) {
- DRM_WARN("%s: Mismatched firmware RSA key size (%u)\n",
- intel_uc_fw_type_repr(uc_fw->type), css->key_size_dw);
- err = -ENOEXEC;
- goto fail;
- }
- uc_fw->rsa_offset = uc_fw->ucode_offset + uc_fw->ucode_size;
- uc_fw->rsa_size = css->key_size_dw * sizeof(u32);
-
- /* At least, it should have header, uCode and RSA. Size of all three. */
- size = uc_fw->header_size + uc_fw->ucode_size + uc_fw->rsa_size;
- if (fw->size < size) {
- DRM_WARN("%s: Truncated firmware (%zu, expected %zu)\n",
- intel_uc_fw_type_repr(uc_fw->type), fw->size, size);
- err = -ENOEXEC;
- goto fail;
- }
-
- /* Get version numbers from the CSS header */
- switch (uc_fw->type) {
- case INTEL_UC_FW_TYPE_GUC:
- uc_fw->major_ver_found = FIELD_GET(CSS_SW_VERSION_GUC_MAJOR,
- css->sw_version);
- uc_fw->minor_ver_found = FIELD_GET(CSS_SW_VERSION_GUC_MINOR,
- css->sw_version);
- break;
-
- case INTEL_UC_FW_TYPE_HUC:
- uc_fw->major_ver_found = FIELD_GET(CSS_SW_VERSION_HUC_MAJOR,
- css->sw_version);
- uc_fw->minor_ver_found = FIELD_GET(CSS_SW_VERSION_HUC_MINOR,
- css->sw_version);
- break;
-
- default:
- MISSING_CASE(uc_fw->type);
- break;
- }
-
- DRM_DEBUG_DRIVER("%s fw version %u.%u (wanted %u.%u)\n",
- intel_uc_fw_type_repr(uc_fw->type),
- uc_fw->major_ver_found, uc_fw->minor_ver_found,
- uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
-
- if (uc_fw->major_ver_wanted == 0 && uc_fw->minor_ver_wanted == 0) {
- DRM_NOTE("%s: Skipping firmware version check\n",
- intel_uc_fw_type_repr(uc_fw->type));
- } else if (uc_fw->major_ver_found != uc_fw->major_ver_wanted ||
- uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) {
- DRM_NOTE("%s: Wrong firmware version (%u.%u, required %u.%u)\n",
- intel_uc_fw_type_repr(uc_fw->type),
- uc_fw->major_ver_found, uc_fw->minor_ver_found,
- uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
- err = -ENOEXEC;
- goto fail;
- }
-
- obj = i915_gem_object_create_shmem_from_data(dev_priv,
- fw->data, fw->size);
- if (IS_ERR(obj)) {
- err = PTR_ERR(obj);
- DRM_DEBUG_DRIVER("%s fw object_create err=%d\n",
- intel_uc_fw_type_repr(uc_fw->type), err);
- goto fail;
- }
-
- uc_fw->obj = obj;
- uc_fw->size = fw->size;
- uc_fw->fetch_status = INTEL_UC_FIRMWARE_SUCCESS;
- DRM_DEBUG_DRIVER("%s fw fetch %s\n",
- intel_uc_fw_type_repr(uc_fw->type),
- intel_uc_fw_status_repr(uc_fw->fetch_status));
-
- release_firmware(fw);
- return;
-
-fail:
- uc_fw->fetch_status = INTEL_UC_FIRMWARE_FAIL;
- DRM_DEBUG_DRIVER("%s fw fetch %s\n",
- intel_uc_fw_type_repr(uc_fw->type),
- intel_uc_fw_status_repr(uc_fw->fetch_status));
-
- DRM_WARN("%s: Failed to fetch firmware %s (error %d)\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err);
- DRM_INFO("%s: Firmware can be downloaded from %s\n",
- intel_uc_fw_type_repr(uc_fw->type), INTEL_UC_FIRMWARE_URL);
-
- release_firmware(fw); /* OK even if fw is NULL */
-}
-
-static void intel_uc_fw_ggtt_bind(struct intel_uc_fw *uc_fw)
-{
- struct drm_i915_gem_object *obj = uc_fw->obj;
- struct i915_ggtt *ggtt = &to_i915(obj->base.dev)->ggtt;
- struct i915_vma dummy = {
- .node.start = intel_uc_fw_ggtt_offset(uc_fw),
- .node.size = obj->base.size,
- .pages = obj->mm.pages,
- .vm = &ggtt->vm,
- };
-
- GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
- GEM_BUG_ON(dummy.node.size > ggtt->uc_fw.size);
-
- /* uc_fw->obj cache domains were not controlled across suspend */
- drm_clflush_sg(dummy.pages);
-
- ggtt->vm.insert_entries(&ggtt->vm, &dummy, I915_CACHE_NONE, 0);
-}
-
-static void intel_uc_fw_ggtt_unbind(struct intel_uc_fw *uc_fw)
-{
- struct drm_i915_gem_object *obj = uc_fw->obj;
- struct i915_ggtt *ggtt = &to_i915(obj->base.dev)->ggtt;
- u64 start = intel_uc_fw_ggtt_offset(uc_fw);
-
- ggtt->vm.clear_range(&ggtt->vm, start, obj->base.size);
-}
-
-/**
- * intel_uc_fw_upload - load uC firmware using custom loader
- * @uc_fw: uC firmware
- * @xfer: custom uC firmware loader function
- *
- * Loads uC firmware using custom loader and updates internal flags.
- *
- * Return: 0 on success, non-zero on failure.
- */
-int intel_uc_fw_upload(struct intel_uc_fw *uc_fw,
- int (*xfer)(struct intel_uc_fw *uc_fw))
-{
- int err;
-
- DRM_DEBUG_DRIVER("%s fw load %s\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->path);
-
- if (uc_fw->fetch_status != INTEL_UC_FIRMWARE_SUCCESS)
- return -ENOEXEC;
-
- uc_fw->load_status = INTEL_UC_FIRMWARE_PENDING;
- DRM_DEBUG_DRIVER("%s fw load %s\n",
- intel_uc_fw_type_repr(uc_fw->type),
- intel_uc_fw_status_repr(uc_fw->load_status));
-
- /* Call custom loader */
- intel_uc_fw_ggtt_bind(uc_fw);
- err = xfer(uc_fw);
- intel_uc_fw_ggtt_unbind(uc_fw);
- if (err)
- goto fail;
-
- uc_fw->load_status = INTEL_UC_FIRMWARE_SUCCESS;
- DRM_DEBUG_DRIVER("%s fw load %s\n",
- intel_uc_fw_type_repr(uc_fw->type),
- intel_uc_fw_status_repr(uc_fw->load_status));
-
- DRM_INFO("%s: Loaded firmware %s (version %u.%u)\n",
- intel_uc_fw_type_repr(uc_fw->type),
- uc_fw->path,
- uc_fw->major_ver_found, uc_fw->minor_ver_found);
-
- return 0;
-
-fail:
- uc_fw->load_status = INTEL_UC_FIRMWARE_FAIL;
- DRM_DEBUG_DRIVER("%s fw load %s\n",
- intel_uc_fw_type_repr(uc_fw->type),
- intel_uc_fw_status_repr(uc_fw->load_status));
-
- DRM_WARN("%s: Failed to load firmware %s (error %d)\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err);
-
- return err;
-}
-
-int intel_uc_fw_init(struct intel_uc_fw *uc_fw)
-{
- int err;
-
- if (uc_fw->fetch_status != INTEL_UC_FIRMWARE_SUCCESS)
- return -ENOEXEC;
-
- err = i915_gem_object_pin_pages(uc_fw->obj);
- if (err)
- DRM_DEBUG_DRIVER("%s fw pin-pages err=%d\n",
- intel_uc_fw_type_repr(uc_fw->type), err);
-
- return err;
-}
-
-void intel_uc_fw_fini(struct intel_uc_fw *uc_fw)
-{
- if (uc_fw->fetch_status != INTEL_UC_FIRMWARE_SUCCESS)
- return;
-
- i915_gem_object_unpin_pages(uc_fw->obj);
-}
-
-u32 intel_uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw)
-{
- struct drm_i915_private *i915 = to_i915(uc_fw->obj->base.dev);
- struct i915_ggtt *ggtt = &i915->ggtt;
- struct drm_mm_node *node = &ggtt->uc_fw;
-
- GEM_BUG_ON(!node->allocated);
- GEM_BUG_ON(upper_32_bits(node->start));
- GEM_BUG_ON(upper_32_bits(node->start + node->size - 1));
-
- return lower_32_bits(node->start);
-}
-
-/**
- * intel_uc_fw_cleanup_fetch - cleanup uC firmware
- *
- * @uc_fw: uC firmware
- *
- * Cleans up uC firmware by releasing the firmware GEM obj.
- */
-void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw)
-{
- struct drm_i915_gem_object *obj;
-
- obj = fetch_and_zero(&uc_fw->obj);
- if (obj)
- i915_gem_object_put(obj);
-
- uc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE;
-}
-
-/**
- * intel_uc_fw_dump - dump information about uC firmware
- * @uc_fw: uC firmware
- * @p: the &drm_printer
- *
- * Pretty printer for uC firmware.
- */
-void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p)
-{
- drm_printf(p, "%s firmware: %s\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->path);
- drm_printf(p, "\tstatus: fetch %s, load %s\n",
- intel_uc_fw_status_repr(uc_fw->fetch_status),
- intel_uc_fw_status_repr(uc_fw->load_status));
- drm_printf(p, "\tversion: wanted %u.%u, found %u.%u\n",
- uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted,
- uc_fw->major_ver_found, uc_fw->minor_ver_found);
- drm_printf(p, "\theader: offset %u, size %u\n",
- uc_fw->header_offset, uc_fw->header_size);
- drm_printf(p, "\tuCode: offset %u, size %u\n",
- uc_fw->ucode_offset, uc_fw->ucode_size);
- drm_printf(p, "\tRSA: offset %u, size %u\n",
- uc_fw->rsa_offset, uc_fw->rsa_size);
-}
diff --git a/drivers/gpu/drm/i915/intel_uc_fw.h b/drivers/gpu/drm/i915/intel_uc_fw.h
deleted file mode 100644
index ff98f8661d72..000000000000
--- a/drivers/gpu/drm/i915/intel_uc_fw.h
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- * Copyright © 2014-2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#ifndef _INTEL_UC_FW_H_
-#define _INTEL_UC_FW_H_
-
-struct drm_printer;
-struct drm_i915_private;
-
-/* Home of GuC, HuC and DMC firmwares */
-#define INTEL_UC_FIRMWARE_URL "https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git/tree/i915"
-
-enum intel_uc_fw_status {
- INTEL_UC_FIRMWARE_FAIL = -1,
- INTEL_UC_FIRMWARE_NONE = 0,
- INTEL_UC_FIRMWARE_PENDING,
- INTEL_UC_FIRMWARE_SUCCESS
-};
-
-enum intel_uc_fw_type {
- INTEL_UC_FW_TYPE_GUC,
- INTEL_UC_FW_TYPE_HUC
-};
-
-/*
- * This structure encapsulates all the data needed during the process
- * of fetching, caching, and loading the firmware image into the uC.
- */
-struct intel_uc_fw {
- const char *path;
- size_t size;
- struct drm_i915_gem_object *obj;
- enum intel_uc_fw_status fetch_status;
- enum intel_uc_fw_status load_status;
-
- /*
- * The firmware build process will generate a version header file with major and
- * minor version defined. The versions are built into CSS header of firmware.
- * i915 kernel driver set the minimal firmware version required per platform.
- */
- u16 major_ver_wanted;
- u16 minor_ver_wanted;
- u16 major_ver_found;
- u16 minor_ver_found;
-
- enum intel_uc_fw_type type;
- u32 header_size;
- u32 header_offset;
- u32 rsa_size;
- u32 rsa_offset;
- u32 ucode_size;
- u32 ucode_offset;
-};
-
-static inline
-const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status)
-{
- switch (status) {
- case INTEL_UC_FIRMWARE_FAIL:
- return "FAIL";
- case INTEL_UC_FIRMWARE_NONE:
- return "NONE";
- case INTEL_UC_FIRMWARE_PENDING:
- return "PENDING";
- case INTEL_UC_FIRMWARE_SUCCESS:
- return "SUCCESS";
- }
- return "<invalid>";
-}
-
-static inline const char *intel_uc_fw_type_repr(enum intel_uc_fw_type type)
-{
- switch (type) {
- case INTEL_UC_FW_TYPE_GUC:
- return "GuC";
- case INTEL_UC_FW_TYPE_HUC:
- return "HuC";
- }
- return "uC";
-}
-
-static inline
-void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw,
- enum intel_uc_fw_type type)
-{
- uc_fw->path = NULL;
- uc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE;
- uc_fw->load_status = INTEL_UC_FIRMWARE_NONE;
- uc_fw->type = type;
-}
-
-static inline bool intel_uc_fw_is_selected(struct intel_uc_fw *uc_fw)
-{
- return uc_fw->path != NULL;
-}
-
-static inline bool intel_uc_fw_is_loaded(struct intel_uc_fw *uc_fw)
-{
- return uc_fw->load_status == INTEL_UC_FIRMWARE_SUCCESS;
-}
-
-static inline void intel_uc_fw_sanitize(struct intel_uc_fw *uc_fw)
-{
- if (intel_uc_fw_is_loaded(uc_fw))
- uc_fw->load_status = INTEL_UC_FIRMWARE_PENDING;
-}
-
-/**
- * intel_uc_fw_get_upload_size() - Get size of firmware needed to be uploaded.
- * @uc_fw: uC firmware.
- *
- * Get the size of the firmware and header that will be uploaded to WOPCM.
- *
- * Return: Upload firmware size, or zero on firmware fetch failure.
- */
-static inline u32 intel_uc_fw_get_upload_size(struct intel_uc_fw *uc_fw)
-{
- if (uc_fw->fetch_status != INTEL_UC_FIRMWARE_SUCCESS)
- return 0;
-
- return uc_fw->header_size + uc_fw->ucode_size;
-}
-
-void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
- struct intel_uc_fw *uc_fw);
-void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw);
-int intel_uc_fw_upload(struct intel_uc_fw *uc_fw,
- int (*xfer)(struct intel_uc_fw *uc_fw));
-int intel_uc_fw_init(struct intel_uc_fw *uc_fw);
-void intel_uc_fw_fini(struct intel_uc_fw *uc_fw);
-u32 intel_uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw);
-void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p);
-
-#endif
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index da33aa672c3d..5f2cf6f43b8b 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -25,8 +25,8 @@
#include <asm/iosf_mbi.h>
#include "i915_drv.h"
+#include "i915_trace.h"
#include "i915_vgpu.h"
-#include "intel_drv.h"
#include "intel_pm.h"
#define FORCEWAKE_ACK_TIMEOUT_MS 50
@@ -34,6 +34,32 @@
#define __raw_posting_read(...) ((void)__raw_uncore_read32(__VA_ARGS__))
+void
+intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug)
+{
+ spin_lock_init(&mmio_debug->lock);
+ mmio_debug->unclaimed_mmio_check = 1;
+}
+
+static void mmio_debug_suspend(struct intel_uncore_mmio_debug *mmio_debug)
+{
+ lockdep_assert_held(&mmio_debug->lock);
+
+ /* Save and disable mmio debugging for the user bypass */
+ if (!mmio_debug->suspend_count++) {
+ mmio_debug->saved_mmio_check = mmio_debug->unclaimed_mmio_check;
+ mmio_debug->unclaimed_mmio_check = 0;
+ }
+}
+
+static void mmio_debug_resume(struct intel_uncore_mmio_debug *mmio_debug)
+{
+ lockdep_assert_held(&mmio_debug->lock);
+
+ if (!--mmio_debug->suspend_count)
+ mmio_debug->unclaimed_mmio_check = mmio_debug->saved_mmio_check;
+}
+
static const char * const forcewake_domain_names[] = {
"render",
"blitter",
@@ -78,6 +104,8 @@ fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
static inline void
fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
{
+ GEM_BUG_ON(d->uncore->fw_domains_timer & d->mask);
+ d->uncore->fw_domains_timer |= d->mask;
d->wake_count++;
hrtimer_start_range_ns(&d->timer,
NSEC_PER_MSEC,
@@ -322,7 +350,7 @@ static void __gen6_gt_wait_for_fifo(struct intel_uncore *uncore)
/* On VLV, FIFO will be shared by both SW and HW.
* So, we need to read the FREE_ENTRIES everytime */
- if (IS_VALLEYVIEW(uncore_to_i915(uncore)))
+ if (IS_VALLEYVIEW(uncore->i915))
n = fifo_free_entries(uncore);
else
n = uncore->fifo_count;
@@ -331,7 +359,8 @@ static void __gen6_gt_wait_for_fifo(struct intel_uncore *uncore)
if (wait_for_atomic((n = fifo_free_entries(uncore)) >
GT_FIFO_NUM_RESERVED_ENTRIES,
GT_FIFO_TIMEOUT_MS)) {
- DRM_DEBUG("GT_FIFO timeout, entries: %u\n", n);
+ drm_dbg(&uncore->i915->drm,
+ "GT_FIFO timeout, entries: %u\n", n);
return;
}
}
@@ -344,7 +373,7 @@ intel_uncore_fw_release_timer(struct hrtimer *timer)
{
struct intel_uncore_forcewake_domain *domain =
container_of(timer, struct intel_uncore_forcewake_domain, timer);
- struct intel_uncore *uncore = forcewake_domain_to_uncore(domain);
+ struct intel_uncore *uncore = domain->uncore;
unsigned long irqflags;
assert_rpm_device_not_suspended(uncore->rpm);
@@ -353,9 +382,10 @@ intel_uncore_fw_release_timer(struct hrtimer *timer)
return HRTIMER_RESTART;
spin_lock_irqsave(&uncore->lock, irqflags);
- if (WARN_ON(domain->wake_count == 0))
- domain->wake_count++;
+ uncore->fw_domains_timer &= ~domain->mask;
+
+ GEM_BUG_ON(!domain->wake_count);
if (--domain->wake_count == 0)
uncore->funcs.force_wake_put(uncore, domain->mask);
@@ -403,7 +433,7 @@ intel_uncore_forcewake_reset(struct intel_uncore *uncore)
break;
if (--retry_count == 0) {
- DRM_ERROR("Timed out waiting for forcewake timers to finish\n");
+ drm_err(&uncore->i915->drm, "Timed out waiting for forcewake timers to finish\n");
break;
}
@@ -461,7 +491,7 @@ gen6_check_for_fifo_debug(struct intel_uncore *uncore)
fifodbg = __raw_uncore_read32(uncore, GTFIFODBG);
if (unlikely(fifodbg)) {
- DRM_DEBUG_DRIVER("GTFIFODBG = 0x08%x\n", fifodbg);
+ drm_dbg(&uncore->i915->drm, "GTFIFODBG = 0x08%x\n", fifodbg);
__raw_uncore_write32(uncore, GTFIFODBG, fifodbg);
}
@@ -473,6 +503,11 @@ check_for_unclaimed_mmio(struct intel_uncore *uncore)
{
bool ret = false;
+ lockdep_assert_held(&uncore->debug->lock);
+
+ if (uncore->debug->suspend_count)
+ return false;
+
if (intel_uncore_has_fpga_dbg_unclaimed(uncore))
ret |= fpga_check_for_unclaimed_mmio(uncore);
@@ -485,15 +520,13 @@ check_for_unclaimed_mmio(struct intel_uncore *uncore)
return ret;
}
-static void __intel_uncore_early_sanitize(struct intel_uncore *uncore,
- unsigned int restore_forcewake)
+static void forcewake_early_sanitize(struct intel_uncore *uncore,
+ unsigned int restore_forcewake)
{
- /* clear out unclaimed reg detection bit */
- if (check_for_unclaimed_mmio(uncore))
- DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
+ GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
/* WaDisableShadowRegForCpd:chv */
- if (IS_CHERRYVIEW(uncore_to_i915(uncore))) {
+ if (IS_CHERRYVIEW(uncore->i915)) {
__raw_uncore_write32(uncore, GTFIFOCTL,
__raw_uncore_read32(uncore, GTFIFOCTL) |
GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
@@ -515,6 +548,9 @@ static void __intel_uncore_early_sanitize(struct intel_uncore *uncore,
void intel_uncore_suspend(struct intel_uncore *uncore)
{
+ if (!intel_uncore_has_forcewake(uncore))
+ return;
+
iosf_mbi_punit_acquire();
iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
&uncore->pmic_bus_access_nb);
@@ -526,21 +562,24 @@ void intel_uncore_resume_early(struct intel_uncore *uncore)
{
unsigned int restore_forcewake;
+ if (intel_uncore_unclaimed_mmio(uncore))
+ drm_dbg(&uncore->i915->drm, "unclaimed mmio detected on resume, clearing\n");
+
+ if (!intel_uncore_has_forcewake(uncore))
+ return;
+
restore_forcewake = fetch_and_zero(&uncore->fw_domains_saved);
- __intel_uncore_early_sanitize(uncore, restore_forcewake);
+ forcewake_early_sanitize(uncore, restore_forcewake);
iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
}
void intel_uncore_runtime_resume(struct intel_uncore *uncore)
{
- iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
-}
+ if (!intel_uncore_has_forcewake(uncore))
+ return;
-void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
-{
- /* BIOS often leaves RC6 enabled, but disable it for hw init */
- intel_sanitize_gt_powersave(dev_priv);
+ iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
}
static void __intel_uncore_forcewake_get(struct intel_uncore *uncore,
@@ -601,17 +640,11 @@ void intel_uncore_forcewake_get(struct intel_uncore *uncore,
void intel_uncore_forcewake_user_get(struct intel_uncore *uncore)
{
spin_lock_irq(&uncore->lock);
- if (!uncore->user_forcewake.count++) {
+ if (!uncore->user_forcewake_count++) {
intel_uncore_forcewake_get__locked(uncore, FORCEWAKE_ALL);
-
- /* Save and disable mmio debugging for the user bypass */
- uncore->user_forcewake.saved_mmio_check =
- uncore->unclaimed_mmio_check;
- uncore->user_forcewake.saved_mmio_debug =
- i915_modparams.mmio_debug;
-
- uncore->unclaimed_mmio_check = 0;
- i915_modparams.mmio_debug = 0;
+ spin_lock(&uncore->debug->lock);
+ mmio_debug_suspend(uncore->debug);
+ spin_unlock(&uncore->debug->lock);
}
spin_unlock_irq(&uncore->lock);
}
@@ -626,15 +659,14 @@ void intel_uncore_forcewake_user_get(struct intel_uncore *uncore)
void intel_uncore_forcewake_user_put(struct intel_uncore *uncore)
{
spin_lock_irq(&uncore->lock);
- if (!--uncore->user_forcewake.count) {
- if (intel_uncore_unclaimed_mmio(uncore))
- dev_info(uncore_to_i915(uncore)->drm.dev,
- "Invalid mmio detected during user access\n");
+ if (!--uncore->user_forcewake_count) {
+ spin_lock(&uncore->debug->lock);
+ mmio_debug_resume(uncore->debug);
- uncore->unclaimed_mmio_check =
- uncore->user_forcewake.saved_mmio_check;
- i915_modparams.mmio_debug =
- uncore->user_forcewake.saved_mmio_debug;
+ if (check_for_unclaimed_mmio(uncore))
+ dev_info(uncore->i915->drm.dev,
+ "Invalid mmio detected during user access\n");
+ spin_unlock(&uncore->debug->lock);
intel_uncore_forcewake_put__locked(uncore, FORCEWAKE_ALL);
}
@@ -669,8 +701,7 @@ static void __intel_uncore_forcewake_put(struct intel_uncore *uncore,
fw_domains &= uncore->fw_domains;
for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
- if (WARN_ON(domain->wake_count == 0))
- continue;
+ GEM_BUG_ON(!domain->wake_count);
if (--domain->wake_count) {
domain->active = true;
@@ -734,23 +765,47 @@ void assert_forcewakes_inactive(struct intel_uncore *uncore)
void assert_forcewakes_active(struct intel_uncore *uncore,
enum forcewake_domains fw_domains)
{
+ struct intel_uncore_forcewake_domain *domain;
+ unsigned int tmp;
+
+ if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM))
+ return;
+
if (!uncore->funcs.force_wake_get)
return;
+ spin_lock_irq(&uncore->lock);
+
assert_rpm_wakelock_held(uncore->rpm);
fw_domains &= uncore->fw_domains;
WARN(fw_domains & ~uncore->fw_domains_active,
"Expected %08x fw_domains to be active, but %08x are off\n",
fw_domains, fw_domains & ~uncore->fw_domains_active);
+
+ /*
+ * Check that the caller has an explicit wakeref and we don't mistake
+ * it for the auto wakeref.
+ */
+ for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
+ unsigned int actual = READ_ONCE(domain->wake_count);
+ unsigned int expect = 1;
+
+ if (uncore->fw_domains_timer & domain->mask)
+ expect++; /* pending automatic release */
+
+ if (WARN(actual < expect,
+ "Expected domain %d to be held awake by caller, count=%d\n",
+ domain->id, actual))
+ break;
+ }
+
+ spin_unlock_irq(&uncore->lock);
}
/* We give fast paths for the really cool registers */
#define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000)
-#define GEN11_NEEDS_FORCE_WAKE(reg) \
- ((reg) < 0x40000 || ((reg) >= 0x1c0000 && (reg) < 0x1dc000))
-
#define __gen6_reg_read_fw_domains(uncore, offset) \
({ \
enum forcewake_domains __fwd; \
@@ -846,12 +901,10 @@ static const struct intel_forcewake_range __vlv_fw_ranges[] = {
})
#define __gen11_fwtable_reg_read_fw_domains(uncore, offset) \
-({ \
- enum forcewake_domains __fwd = 0; \
- if (GEN11_NEEDS_FORCE_WAKE((offset))) \
- __fwd = find_fw_domain(uncore, offset); \
- __fwd; \
-})
+ find_fw_domain(uncore, offset)
+
+#define __gen12_fwtable_reg_read_fw_domains(uncore, offset) \
+ find_fw_domain(uncore, offset)
/* *Must* be sorted by offset! See intel_shadow_table_check(). */
static const i915_reg_t gen8_shadowed_regs[] = {
@@ -878,6 +931,20 @@ static const i915_reg_t gen11_shadowed_regs[] = {
/* TODO: Other registers are not yet used */
};
+static const i915_reg_t gen12_shadowed_regs[] = {
+ RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */
+ GEN6_RPNSWREQ, /* 0xA008 */
+ GEN6_RC_VIDEO_FREQ, /* 0xA00C */
+ RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */
+ RING_TAIL(GEN11_BSD_RING_BASE), /* 0x1C0000 (base) */
+ RING_TAIL(GEN11_BSD2_RING_BASE), /* 0x1C4000 (base) */
+ RING_TAIL(GEN11_VEBOX_RING_BASE), /* 0x1C8000 (base) */
+ RING_TAIL(GEN11_BSD3_RING_BASE), /* 0x1D0000 (base) */
+ RING_TAIL(GEN11_BSD4_RING_BASE), /* 0x1D4000 (base) */
+ RING_TAIL(GEN11_VEBOX2_RING_BASE), /* 0x1D8000 (base) */
+ /* TODO: Other registers are not yet used */
+};
+
static int mmio_reg_cmp(u32 key, const i915_reg_t *reg)
{
u32 offset = i915_mmio_reg_offset(*reg);
@@ -900,6 +967,13 @@ static bool is_gen##x##_shadowed(u32 offset) \
__is_genX_shadowed(8)
__is_genX_shadowed(11)
+__is_genX_shadowed(12)
+
+static enum forcewake_domains
+gen6_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg)
+{
+ return FORCEWAKE_RENDER;
+}
#define __gen8_reg_write_fw_domains(uncore, offset) \
({ \
@@ -942,8 +1016,18 @@ static const struct intel_forcewake_range __chv_fw_ranges[] = {
#define __gen11_fwtable_reg_write_fw_domains(uncore, offset) \
({ \
enum forcewake_domains __fwd = 0; \
- if (GEN11_NEEDS_FORCE_WAKE((offset)) && !is_gen11_shadowed(offset)) \
- __fwd = find_fw_domain(uncore, offset); \
+ const u32 __offset = (offset); \
+ if (!is_gen11_shadowed(__offset)) \
+ __fwd = find_fw_domain(uncore, __offset); \
+ __fwd; \
+})
+
+#define __gen12_fwtable_reg_write_fw_domains(uncore, offset) \
+({ \
+ enum forcewake_domains __fwd = 0; \
+ const u32 __offset = (offset); \
+ if (!is_gen12_shadowed(__offset)) \
+ __fwd = find_fw_domain(uncore, __offset); \
__fwd; \
})
@@ -1002,9 +1086,51 @@ static const struct intel_forcewake_range __gen11_fw_ranges[] = {
GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_ALL),
GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER),
GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0xb480, 0xdeff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0xdf00, 0xe8ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0xe900, 0x16dff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x16e00, 0x19fff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x1a000, 0x243ff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x24800, 0x3ffff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x40000, 0x1bffff, 0),
+ GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0),
+ GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1),
+ GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0),
+ GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2),
+ GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3),
+ GEN_FW_RANGE(0x1d8000, 0x1dbfff, FORCEWAKE_MEDIA_VEBOX1)
+};
+
+/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
+static const struct intel_forcewake_range __gen12_fw_ranges[] = {
+ GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
+ GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x8500, 0x8bff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_ALL),
+ GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
GEN_FW_RANGE(0xb480, 0xdfff, FORCEWAKE_BLITTER),
GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
- GEN_FW_RANGE(0xe900, 0x243ff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0xe900, 0x147ff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x14800, 0x148ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x14900, 0x19fff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x1a000, 0x1a7ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x1a800, 0x1afff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x1b000, 0x1bfff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x1c000, 0x243ff, FORCEWAKE_BLITTER),
GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
GEN_FW_RANGE(0x24800, 0x3ffff, FORCEWAKE_BLITTER),
GEN_FW_RANGE(0x40000, 0x1bffff, 0),
@@ -1049,7 +1175,16 @@ unclaimed_reg_debug(struct intel_uncore *uncore,
if (likely(!i915_modparams.mmio_debug))
return;
+ /* interrupts are disabled and re-enabled around uncore->lock usage */
+ lockdep_assert_held(&uncore->lock);
+
+ if (before)
+ spin_lock(&uncore->debug->lock);
+
__unclaimed_reg_debug(uncore, reg, read, before);
+
+ if (!before)
+ spin_unlock(&uncore->debug->lock);
}
#define GEN2_READ_HEADER(x) \
@@ -1123,8 +1258,7 @@ static noinline void ___force_wake_auto(struct intel_uncore *uncore,
static inline void __force_wake_auto(struct intel_uncore *uncore,
enum forcewake_domains fw_domains)
{
- if (WARN_ON(!fw_domains))
- return;
+ GEM_BUG_ON(!fw_domains);
/* Turn on all requested but inactive supported forcewake domains. */
fw_domains &= uncore->fw_domains;
@@ -1145,26 +1279,24 @@ func##_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
val = __raw_uncore_read##x(uncore, reg); \
GEN6_READ_FOOTER; \
}
-#define __gen6_read(x) __gen_read(gen6, x)
-#define __fwtable_read(x) __gen_read(fwtable, x)
-#define __gen11_fwtable_read(x) __gen_read(gen11_fwtable, x)
-
-__gen11_fwtable_read(8)
-__gen11_fwtable_read(16)
-__gen11_fwtable_read(32)
-__gen11_fwtable_read(64)
-__fwtable_read(8)
-__fwtable_read(16)
-__fwtable_read(32)
-__fwtable_read(64)
-__gen6_read(8)
-__gen6_read(16)
-__gen6_read(32)
-__gen6_read(64)
-
-#undef __gen11_fwtable_read
-#undef __fwtable_read
-#undef __gen6_read
+
+#define __gen_reg_read_funcs(func) \
+static enum forcewake_domains \
+func##_reg_read_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) { \
+ return __##func##_reg_read_fw_domains(uncore, i915_mmio_reg_offset(reg)); \
+} \
+\
+__gen_read(func, 8) \
+__gen_read(func, 16) \
+__gen_read(func, 32) \
+__gen_read(func, 64)
+
+__gen_reg_read_funcs(gen12_fwtable);
+__gen_reg_read_funcs(gen11_fwtable);
+__gen_reg_read_funcs(fwtable);
+__gen_reg_read_funcs(gen6);
+
+#undef __gen_reg_read_funcs
#undef GEN6_READ_FOOTER
#undef GEN6_READ_HEADER
@@ -1225,6 +1357,9 @@ gen6_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace)
__raw_uncore_write##x(uncore, reg, val); \
GEN6_WRITE_FOOTER; \
}
+__gen6_write(8)
+__gen6_write(16)
+__gen6_write(32)
#define __gen_write(func, x) \
static void \
@@ -1237,38 +1372,34 @@ func##_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trac
__raw_uncore_write##x(uncore, reg, val); \
GEN6_WRITE_FOOTER; \
}
-#define __gen8_write(x) __gen_write(gen8, x)
-#define __fwtable_write(x) __gen_write(fwtable, x)
-#define __gen11_fwtable_write(x) __gen_write(gen11_fwtable, x)
-
-__gen11_fwtable_write(8)
-__gen11_fwtable_write(16)
-__gen11_fwtable_write(32)
-__fwtable_write(8)
-__fwtable_write(16)
-__fwtable_write(32)
-__gen8_write(8)
-__gen8_write(16)
-__gen8_write(32)
-__gen6_write(8)
-__gen6_write(16)
-__gen6_write(32)
-#undef __gen11_fwtable_write
-#undef __fwtable_write
-#undef __gen8_write
-#undef __gen6_write
+#define __gen_reg_write_funcs(func) \
+static enum forcewake_domains \
+func##_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) { \
+ return __##func##_reg_write_fw_domains(uncore, i915_mmio_reg_offset(reg)); \
+} \
+\
+__gen_write(func, 8) \
+__gen_write(func, 16) \
+__gen_write(func, 32)
+
+__gen_reg_write_funcs(gen12_fwtable);
+__gen_reg_write_funcs(gen11_fwtable);
+__gen_reg_write_funcs(fwtable);
+__gen_reg_write_funcs(gen8);
+
+#undef __gen_reg_write_funcs
#undef GEN6_WRITE_FOOTER
#undef GEN6_WRITE_HEADER
-#define ASSIGN_WRITE_MMIO_VFUNCS(uncore, x) \
+#define ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, x) \
do { \
(uncore)->funcs.mmio_writeb = x##_write8; \
(uncore)->funcs.mmio_writew = x##_write16; \
(uncore)->funcs.mmio_writel = x##_write32; \
} while (0)
-#define ASSIGN_READ_MMIO_VFUNCS(uncore, x) \
+#define ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x) \
do { \
(uncore)->funcs.mmio_readb = x##_read8; \
(uncore)->funcs.mmio_readw = x##_read16; \
@@ -1276,24 +1407,39 @@ do { \
(uncore)->funcs.mmio_readq = x##_read64; \
} while (0)
+#define ASSIGN_WRITE_MMIO_VFUNCS(uncore, x) \
+do { \
+ ASSIGN_RAW_WRITE_MMIO_VFUNCS((uncore), x); \
+ (uncore)->funcs.write_fw_domains = x##_reg_write_fw_domains; \
+} while (0)
+
+#define ASSIGN_READ_MMIO_VFUNCS(uncore, x) \
+do { \
+ ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x); \
+ (uncore)->funcs.read_fw_domains = x##_reg_read_fw_domains; \
+} while (0)
-static void fw_domain_init(struct intel_uncore *uncore,
- enum forcewake_domain_id domain_id,
- i915_reg_t reg_set,
- i915_reg_t reg_ack)
+static int __fw_domain_init(struct intel_uncore *uncore,
+ enum forcewake_domain_id domain_id,
+ i915_reg_t reg_set,
+ i915_reg_t reg_ack)
{
struct intel_uncore_forcewake_domain *d;
- if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
- return;
+ GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT);
+ GEM_BUG_ON(uncore->fw_domain[domain_id]);
- d = &uncore->fw_domain[domain_id];
+ if (i915_inject_probe_failure(uncore->i915))
+ return -ENOMEM;
- WARN_ON(d->wake_count);
+ d = kzalloc(sizeof(*d), GFP_KERNEL);
+ if (!d)
+ return -ENOMEM;
WARN_ON(!i915_mmio_reg_valid(reg_set));
WARN_ON(!i915_mmio_reg_valid(reg_ack));
+ d->uncore = uncore;
d->wake_count = 0;
d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set);
d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack);
@@ -1310,7 +1456,6 @@ static void fw_domain_init(struct intel_uncore *uncore,
BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX0));
BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX1));
-
d->mask = BIT(domain_id);
hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
@@ -1319,6 +1464,10 @@ static void fw_domain_init(struct intel_uncore *uncore,
uncore->fw_domains |= BIT(domain_id);
fw_domain_reset(d);
+
+ uncore->fw_domain[domain_id] = d;
+
+ return 0;
}
static void fw_domain_fini(struct intel_uncore *uncore,
@@ -1326,30 +1475,41 @@ static void fw_domain_fini(struct intel_uncore *uncore,
{
struct intel_uncore_forcewake_domain *d;
- if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
- return;
+ GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT);
- d = &uncore->fw_domain[domain_id];
+ d = fetch_and_zero(&uncore->fw_domain[domain_id]);
+ if (!d)
+ return;
+ uncore->fw_domains &= ~BIT(domain_id);
WARN_ON(d->wake_count);
WARN_ON(hrtimer_cancel(&d->timer));
- memset(d, 0, sizeof(*d));
+ kfree(d);
+}
- uncore->fw_domains &= ~BIT(domain_id);
+static void intel_uncore_fw_domains_fini(struct intel_uncore *uncore)
+{
+ struct intel_uncore_forcewake_domain *d;
+ int tmp;
+
+ for_each_fw_domain(d, uncore, tmp)
+ fw_domain_fini(uncore, d->id);
}
-static void intel_uncore_fw_domains_init(struct intel_uncore *uncore)
+static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
{
- struct drm_i915_private *i915 = uncore_to_i915(uncore);
+ struct drm_i915_private *i915 = uncore->i915;
+ int ret = 0;
- if (!intel_uncore_has_forcewake(uncore))
- return;
+ GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
+
+#define fw_domain_init(uncore__, id__, set__, ack__) \
+ (ret ?: (ret = __fw_domain_init((uncore__), (id__), (set__), (ack__))))
if (INTEL_GEN(i915) >= 11) {
int i;
- uncore->funcs.force_wake_get =
- fw_domains_get_with_fallback;
+ uncore->funcs.force_wake_get = fw_domains_get_with_fallback;
uncore->funcs.force_wake_put = fw_domains_put;
fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
FORCEWAKE_RENDER_GEN9,
@@ -1357,6 +1517,7 @@ static void intel_uncore_fw_domains_init(struct intel_uncore *uncore)
fw_domain_init(uncore, FW_DOMAIN_ID_BLITTER,
FORCEWAKE_BLITTER_GEN9,
FORCEWAKE_ACK_BLITTER_GEN9);
+
for (i = 0; i < I915_MAX_VCS; i++) {
if (!HAS_ENGINE(i915, _VCS(i)))
continue;
@@ -1374,8 +1535,7 @@ static void intel_uncore_fw_domains_init(struct intel_uncore *uncore)
FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i));
}
} else if (IS_GEN_RANGE(i915, 9, 10)) {
- uncore->funcs.force_wake_get =
- fw_domains_get_with_fallback;
+ uncore->funcs.force_wake_get = fw_domains_get_with_fallback;
uncore->funcs.force_wake_put = fw_domains_put;
fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
FORCEWAKE_RENDER_GEN9,
@@ -1424,8 +1584,10 @@ static void intel_uncore_fw_domains_init(struct intel_uncore *uncore)
__raw_uncore_write32(uncore, FORCEWAKE, 0);
__raw_posting_read(uncore, ECOBUS);
- fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
- FORCEWAKE_MT, FORCEWAKE_MT_ACK);
+ ret = __fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
+ FORCEWAKE_MT, FORCEWAKE_MT_ACK);
+ if (ret)
+ goto out;
spin_lock_irq(&uncore->lock);
fw_domains_get_with_thread_status(uncore, FORCEWAKE_RENDER);
@@ -1434,8 +1596,9 @@ static void intel_uncore_fw_domains_init(struct intel_uncore *uncore)
spin_unlock_irq(&uncore->lock);
if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
- DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
- DRM_INFO("when using vblank-synced partial screen updates.\n");
+ drm_info(&i915->drm, "No MT forcewake available on Ivybridge, this can result in issues\n");
+ drm_info(&i915->drm, "when using vblank-synced partial screen updates.\n");
+ fw_domain_fini(uncore, FW_DOMAIN_ID_RENDER);
fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
FORCEWAKE, FORCEWAKE_ACK);
}
@@ -1447,8 +1610,16 @@ static void intel_uncore_fw_domains_init(struct intel_uncore *uncore)
FORCEWAKE, FORCEWAKE_ACK);
}
+#undef fw_domain_init
+
/* All future platforms are expected to require complex power gating */
- WARN_ON(uncore->fw_domains == 0);
+ WARN_ON(!ret && uncore->fw_domains == 0);
+
+out:
+ if (ret)
+ intel_uncore_fw_domains_fini(uncore);
+
+ return ret;
}
#define ASSIGN_FW_DOMAINS_TABLE(uncore, d) \
@@ -1493,7 +1664,7 @@ static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
static int uncore_mmio_setup(struct intel_uncore *uncore)
{
- struct drm_i915_private *i915 = uncore_to_i915(uncore);
+ struct drm_i915_private *i915 = uncore->i915;
struct pci_dev *pdev = i915->drm.pdev;
int mmio_bar;
int mmio_size;
@@ -1513,8 +1684,7 @@ static int uncore_mmio_setup(struct intel_uncore *uncore)
mmio_size = 2 * 1024 * 1024;
uncore->regs = pci_iomap(pdev, mmio_bar, mmio_size);
if (uncore->regs == NULL) {
- DRM_ERROR("failed to map registers\n");
-
+ drm_err(&i915->drm, "failed to map registers\n");
return -EIO;
}
@@ -1523,49 +1693,46 @@ static int uncore_mmio_setup(struct intel_uncore *uncore)
static void uncore_mmio_cleanup(struct intel_uncore *uncore)
{
- struct drm_i915_private *i915 = uncore_to_i915(uncore);
- struct pci_dev *pdev = i915->drm.pdev;
+ struct pci_dev *pdev = uncore->i915->drm.pdev;
pci_iounmap(pdev, uncore->regs);
}
-void intel_uncore_init_early(struct intel_uncore *uncore)
+void intel_uncore_init_early(struct intel_uncore *uncore,
+ struct drm_i915_private *i915)
{
spin_lock_init(&uncore->lock);
+ uncore->i915 = i915;
+ uncore->rpm = &i915->runtime_pm;
+ uncore->debug = &i915->mmio_debug;
}
-int intel_uncore_init_mmio(struct intel_uncore *uncore)
+static void uncore_raw_init(struct intel_uncore *uncore)
{
- struct drm_i915_private *i915 = uncore_to_i915(uncore);
- int ret;
-
- ret = uncore_mmio_setup(uncore);
- if (ret)
- return ret;
+ GEM_BUG_ON(intel_uncore_has_forcewake(uncore));
- i915_check_vgpu(i915);
-
- if (INTEL_GEN(i915) > 5 && !intel_vgpu_active(i915))
- uncore->flags |= UNCORE_HAS_FORCEWAKE;
+ if (IS_GEN(uncore->i915, 5)) {
+ ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen5);
+ ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen5);
+ } else {
+ ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen2);
+ ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen2);
+ }
+}
- intel_uncore_fw_domains_init(uncore);
- __intel_uncore_early_sanitize(uncore, 0);
+static int uncore_forcewake_init(struct intel_uncore *uncore)
+{
+ struct drm_i915_private *i915 = uncore->i915;
+ int ret;
- uncore->unclaimed_mmio_check = 1;
- uncore->pmic_bus_access_nb.notifier_call =
- i915_pmic_bus_access_notifier;
+ GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
- uncore->rpm = &i915->runtime_pm;
+ ret = intel_uncore_fw_domains_init(uncore);
+ if (ret)
+ return ret;
+ forcewake_early_sanitize(uncore, 0);
- if (!intel_uncore_has_forcewake(uncore)) {
- if (IS_GEN(i915, 5)) {
- ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen5);
- ASSIGN_READ_MMIO_VFUNCS(uncore, gen5);
- } else {
- ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen2);
- ASSIGN_READ_MMIO_VFUNCS(uncore, gen2);
- }
- } else if (IS_GEN_RANGE(i915, 6, 7)) {
+ if (IS_GEN_RANGE(i915, 6, 7)) {
ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
if (IS_VALLEYVIEW(i915)) {
@@ -1579,7 +1746,6 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore)
ASSIGN_FW_DOMAINS_TABLE(uncore, __chv_fw_ranges);
ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
-
} else {
ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen8);
ASSIGN_READ_MMIO_VFUNCS(uncore, gen6);
@@ -1588,12 +1754,48 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore)
ASSIGN_FW_DOMAINS_TABLE(uncore, __gen9_fw_ranges);
ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
- } else {
+ } else if (IS_GEN(i915, 11)) {
ASSIGN_FW_DOMAINS_TABLE(uncore, __gen11_fw_ranges);
ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen11_fwtable);
ASSIGN_READ_MMIO_VFUNCS(uncore, gen11_fwtable);
+ } else {
+ ASSIGN_FW_DOMAINS_TABLE(uncore, __gen12_fw_ranges);
+ ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen12_fwtable);
+ ASSIGN_READ_MMIO_VFUNCS(uncore, gen12_fwtable);
}
+ uncore->pmic_bus_access_nb.notifier_call = i915_pmic_bus_access_notifier;
+ iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
+
+ return 0;
+}
+
+int intel_uncore_init_mmio(struct intel_uncore *uncore)
+{
+ struct drm_i915_private *i915 = uncore->i915;
+ int ret;
+
+ ret = uncore_mmio_setup(uncore);
+ if (ret)
+ return ret;
+
+ if (INTEL_GEN(i915) > 5 && !intel_vgpu_active(i915))
+ uncore->flags |= UNCORE_HAS_FORCEWAKE;
+
+ if (!intel_uncore_has_forcewake(uncore)) {
+ uncore_raw_init(uncore);
+ } else {
+ ret = uncore_forcewake_init(uncore);
+ if (ret)
+ goto out_mmio_cleanup;
+ }
+
+ /* make sure fw funcs are set if and only if we have fw*/
+ GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.force_wake_get);
+ GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.force_wake_put);
+ GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.read_fw_domains);
+ GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.write_fw_domains);
+
if (HAS_FPGA_DBG_UNCLAIMED(i915))
uncore->flags |= UNCORE_HAS_FPGA_DBG_UNCLAIMED;
@@ -1603,9 +1805,16 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore)
if (IS_GEN_RANGE(i915, 6, 7))
uncore->flags |= UNCORE_HAS_FIFO;
- iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
+ /* clear out unclaimed reg detection bit */
+ if (intel_uncore_unclaimed_mmio(uncore))
+ drm_dbg(&i915->drm, "unclaimed mmio detected on uncore init, clearing\n");
return 0;
+
+out_mmio_cleanup:
+ uncore_mmio_cleanup(uncore);
+
+ return ret;
}
/*
@@ -1615,45 +1824,46 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore)
*/
void intel_uncore_prune_mmio_domains(struct intel_uncore *uncore)
{
- struct drm_i915_private *i915 = uncore_to_i915(uncore);
+ struct drm_i915_private *i915 = uncore->i915;
+ enum forcewake_domains fw_domains = uncore->fw_domains;
+ enum forcewake_domain_id domain_id;
+ int i;
- if (INTEL_GEN(i915) >= 11) {
- enum forcewake_domains fw_domains = uncore->fw_domains;
- enum forcewake_domain_id domain_id;
- int i;
+ if (!intel_uncore_has_forcewake(uncore) || INTEL_GEN(i915) < 11)
+ return;
- for (i = 0; i < I915_MAX_VCS; i++) {
- domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i;
+ for (i = 0; i < I915_MAX_VCS; i++) {
+ domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i;
- if (HAS_ENGINE(i915, _VCS(i)))
- continue;
+ if (HAS_ENGINE(i915, _VCS(i)))
+ continue;
- if (fw_domains & BIT(domain_id))
- fw_domain_fini(uncore, domain_id);
- }
+ if (fw_domains & BIT(domain_id))
+ fw_domain_fini(uncore, domain_id);
+ }
- for (i = 0; i < I915_MAX_VECS; i++) {
- domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i;
+ for (i = 0; i < I915_MAX_VECS; i++) {
+ domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i;
- if (HAS_ENGINE(i915, _VECS(i)))
- continue;
+ if (HAS_ENGINE(i915, _VECS(i)))
+ continue;
- if (fw_domains & BIT(domain_id))
- fw_domain_fini(uncore, domain_id);
- }
+ if (fw_domains & BIT(domain_id))
+ fw_domain_fini(uncore, domain_id);
}
}
void intel_uncore_fini_mmio(struct intel_uncore *uncore)
{
- /* Paranoia: make sure we have disabled everything before we exit. */
- intel_uncore_sanitize(uncore_to_i915(uncore));
+ if (intel_uncore_has_forcewake(uncore)) {
+ iosf_mbi_punit_acquire();
+ iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
+ &uncore->pmic_bus_access_nb);
+ intel_uncore_forcewake_reset(uncore);
+ intel_uncore_fw_domains_fini(uncore);
+ iosf_mbi_punit_release();
+ }
- iosf_mbi_punit_acquire();
- iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
- &uncore->pmic_bus_access_nb);
- intel_uncore_forcewake_reset(uncore);
- iosf_mbi_punit_release();
uncore_mmio_cleanup(uncore);
}
@@ -1665,7 +1875,7 @@ static const struct reg_whitelist {
} reg_read_whitelist[] = { {
.offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE),
.offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE),
- .gen_mask = INTEL_GEN_MASK(4, 11),
+ .gen_mask = INTEL_GEN_MASK(4, 12),
.size = 8
} };
@@ -1749,7 +1959,7 @@ int i915_reg_read_ioctl(struct drm_device *dev,
* wish to wait without holding forcewake for the duration (i.e. you expect
* the wait to be slow).
*
- * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
+ * Return: 0 if the register matches the desired condition, or -ETIMEDOUT.
*/
int __intel_wait_for_register_fw(struct intel_uncore *uncore,
i915_reg_t reg,
@@ -1797,7 +2007,7 @@ int __intel_wait_for_register_fw(struct intel_uncore *uncore,
*
* Otherwise, the wait will timeout after @timeout_ms milliseconds.
*
- * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
+ * Return: 0 if the register matches the desired condition, or -ETIMEDOUT.
*/
int __intel_wait_for_register(struct intel_uncore *uncore,
i915_reg_t reg,
@@ -1841,7 +2051,13 @@ int __intel_wait_for_register(struct intel_uncore *uncore,
bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore)
{
- return check_for_unclaimed_mmio(uncore);
+ bool ret;
+
+ spin_lock_irq(&uncore->debug->lock);
+ ret = check_for_unclaimed_mmio(uncore);
+ spin_unlock_irq(&uncore->debug->lock);
+
+ return ret;
}
bool
@@ -1849,84 +2065,29 @@ intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore)
{
bool ret = false;
- spin_lock_irq(&uncore->lock);
+ spin_lock_irq(&uncore->debug->lock);
- if (unlikely(uncore->unclaimed_mmio_check <= 0))
+ if (unlikely(uncore->debug->unclaimed_mmio_check <= 0))
goto out;
- if (unlikely(intel_uncore_unclaimed_mmio(uncore))) {
+ if (unlikely(check_for_unclaimed_mmio(uncore))) {
if (!i915_modparams.mmio_debug) {
- DRM_DEBUG("Unclaimed register detected, "
- "enabling oneshot unclaimed register reporting. "
- "Please use i915.mmio_debug=N for more information.\n");
+ drm_dbg(&uncore->i915->drm,
+ "Unclaimed register detected, "
+ "enabling oneshot unclaimed register reporting. "
+ "Please use i915.mmio_debug=N for more information.\n");
i915_modparams.mmio_debug++;
}
- uncore->unclaimed_mmio_check--;
+ uncore->debug->unclaimed_mmio_check--;
ret = true;
}
out:
- spin_unlock_irq(&uncore->lock);
+ spin_unlock_irq(&uncore->debug->lock);
return ret;
}
-static enum forcewake_domains
-intel_uncore_forcewake_for_read(struct intel_uncore *uncore,
- i915_reg_t reg)
-{
- struct drm_i915_private *i915 = uncore_to_i915(uncore);
- u32 offset = i915_mmio_reg_offset(reg);
- enum forcewake_domains fw_domains;
-
- if (INTEL_GEN(i915) >= 11) {
- fw_domains = __gen11_fwtable_reg_read_fw_domains(uncore, offset);
- } else if (HAS_FWTABLE(i915)) {
- fw_domains = __fwtable_reg_read_fw_domains(uncore, offset);
- } else if (INTEL_GEN(i915) >= 6) {
- fw_domains = __gen6_reg_read_fw_domains(uncore, offset);
- } else {
- /* on devices with FW we expect to hit one of the above cases */
- if (intel_uncore_has_forcewake(uncore))
- MISSING_CASE(INTEL_GEN(i915));
-
- fw_domains = 0;
- }
-
- WARN_ON(fw_domains & ~uncore->fw_domains);
-
- return fw_domains;
-}
-
-static enum forcewake_domains
-intel_uncore_forcewake_for_write(struct intel_uncore *uncore,
- i915_reg_t reg)
-{
- struct drm_i915_private *i915 = uncore_to_i915(uncore);
- u32 offset = i915_mmio_reg_offset(reg);
- enum forcewake_domains fw_domains;
-
- if (INTEL_GEN(i915) >= 11) {
- fw_domains = __gen11_fwtable_reg_write_fw_domains(uncore, offset);
- } else if (HAS_FWTABLE(i915) && !IS_VALLEYVIEW(i915)) {
- fw_domains = __fwtable_reg_write_fw_domains(uncore, offset);
- } else if (IS_GEN(i915, 8)) {
- fw_domains = __gen8_reg_write_fw_domains(uncore, offset);
- } else if (IS_GEN_RANGE(i915, 6, 7)) {
- fw_domains = FORCEWAKE_RENDER;
- } else {
- /* on devices with FW we expect to hit one of the above cases */
- if (intel_uncore_has_forcewake(uncore))
- MISSING_CASE(INTEL_GEN(i915));
-
- fw_domains = 0;
- }
-
- WARN_ON(fw_domains & ~uncore->fw_domains);
-
- return fw_domains;
-}
-
/**
* intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
* a register
@@ -1953,10 +2114,12 @@ intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
return 0;
if (op & FW_REG_READ)
- fw_domains = intel_uncore_forcewake_for_read(uncore, reg);
+ fw_domains = uncore->funcs.read_fw_domains(uncore, reg);
if (op & FW_REG_WRITE)
- fw_domains |= intel_uncore_forcewake_for_write(uncore, reg);
+ fw_domains |= uncore->funcs.write_fw_domains(uncore, reg);
+
+ WARN_ON(fw_domains & ~uncore->fw_domains);
return fw_domains;
}
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index 804a0faacc91..dcfa243892c6 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -36,6 +36,13 @@ struct drm_i915_private;
struct intel_runtime_pm;
struct intel_uncore;
+struct intel_uncore_mmio_debug {
+ spinlock_t lock; /** lock is also taken in irq contexts. */
+ int unclaimed_mmio_check;
+ int saved_mmio_check;
+ u32 suspend_count;
+};
+
enum forcewake_domain_id {
FW_DOMAIN_ID_RENDER = 0,
FW_DOMAIN_ID_BLITTER,
@@ -70,6 +77,11 @@ struct intel_uncore_funcs {
void (*force_wake_put)(struct intel_uncore *uncore,
enum forcewake_domains domains);
+ enum forcewake_domains (*read_fw_domains)(struct intel_uncore *uncore,
+ i915_reg_t r);
+ enum forcewake_domains (*write_fw_domains)(struct intel_uncore *uncore,
+ i915_reg_t r);
+
u8 (*mmio_readb)(struct intel_uncore *uncore,
i915_reg_t r, bool trace);
u16 (*mmio_readw)(struct intel_uncore *uncore,
@@ -97,6 +109,7 @@ struct intel_forcewake_range {
struct intel_uncore {
void __iomem *regs;
+ struct drm_i915_private *i915;
struct intel_runtime_pm *rpm;
spinlock_t lock; /** lock is also taken in irq contexts. */
@@ -117,9 +130,11 @@ struct intel_uncore {
enum forcewake_domains fw_domains;
enum forcewake_domains fw_domains_active;
+ enum forcewake_domains fw_domains_timer;
enum forcewake_domains fw_domains_saved; /* user domains saved for S3 */
struct intel_uncore_forcewake_domain {
+ struct intel_uncore *uncore;
enum forcewake_domain_id id;
enum forcewake_domains mask;
unsigned int wake_count;
@@ -127,32 +142,21 @@ struct intel_uncore {
struct hrtimer timer;
u32 __iomem *reg_set;
u32 __iomem *reg_ack;
- } fw_domain[FW_DOMAIN_ID_COUNT];
+ } *fw_domain[FW_DOMAIN_ID_COUNT];
- struct {
- unsigned int count;
+ unsigned int user_forcewake_count;
- int saved_mmio_check;
- int saved_mmio_debug;
- } user_forcewake;
-
- int unclaimed_mmio_check;
+ struct intel_uncore_mmio_debug *debug;
};
/* Iterate over initialised fw domains */
#define for_each_fw_domain_masked(domain__, mask__, uncore__, tmp__) \
- for (tmp__ = (mask__); \
- tmp__ ? (domain__ = &(uncore__)->fw_domain[__mask_next_bit(tmp__)]), 1 : 0;)
+ for (tmp__ = (mask__); tmp__ ;) \
+ for_each_if(domain__ = (uncore__)->fw_domain[__mask_next_bit(tmp__)])
#define for_each_fw_domain(domain__, uncore__, tmp__) \
for_each_fw_domain_masked(domain__, (uncore__)->fw_domains, uncore__, tmp__)
-static inline struct intel_uncore *
-forcewake_domain_to_uncore(const struct intel_uncore_forcewake_domain *d)
-{
- return container_of(d, struct intel_uncore, fw_domain[d->id]);
-}
-
static inline bool
intel_uncore_has_forcewake(const struct intel_uncore *uncore)
{
@@ -177,8 +181,10 @@ intel_uncore_has_fifo(const struct intel_uncore *uncore)
return uncore->flags & UNCORE_HAS_FIFO;
}
-void intel_uncore_sanitize(struct drm_i915_private *dev_priv);
-void intel_uncore_init_early(struct intel_uncore *uncore);
+void
+intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug);
+void intel_uncore_init_early(struct intel_uncore *uncore,
+ struct drm_i915_private *i915);
int intel_uncore_init_mmio(struct intel_uncore *uncore);
void intel_uncore_prune_mmio_domains(struct intel_uncore *uncore);
bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore);
@@ -372,23 +378,35 @@ intel_uncore_read64_2x32(struct intel_uncore *uncore,
static inline void intel_uncore_rmw(struct intel_uncore *uncore,
i915_reg_t reg, u32 clear, u32 set)
{
- u32 val;
+ u32 old, val;
- val = intel_uncore_read(uncore, reg);
- val &= ~clear;
- val |= set;
- intel_uncore_write(uncore, reg, val);
+ old = intel_uncore_read(uncore, reg);
+ val = (old & ~clear) | set;
+ if (val != old)
+ intel_uncore_write(uncore, reg, val);
}
static inline void intel_uncore_rmw_fw(struct intel_uncore *uncore,
i915_reg_t reg, u32 clear, u32 set)
{
- u32 val;
+ u32 old, val;
+
+ old = intel_uncore_read_fw(uncore, reg);
+ val = (old & ~clear) | set;
+ if (val != old)
+ intel_uncore_write_fw(uncore, reg, val);
+}
+
+static inline int intel_uncore_write_and_verify(struct intel_uncore *uncore,
+ i915_reg_t reg, u32 val,
+ u32 mask, u32 expected_val)
+{
+ u32 reg_val;
+
+ intel_uncore_write(uncore, reg, val);
+ reg_val = intel_uncore_read(uncore, reg);
- val = intel_uncore_read_fw(uncore, reg);
- val &= ~clear;
- val |= set;
- intel_uncore_write_fw(uncore, reg, val);
+ return (reg_val & mask) != expected_val ? -EINVAL : 0;
}
#define raw_reg_read(base, reg) \
diff --git a/drivers/gpu/drm/i915/intel_wakeref.c b/drivers/gpu/drm/i915/intel_wakeref.c
index 3db6fa682823..8fbf6f4d3f26 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.c
+++ b/drivers/gpu/drm/i915/intel_wakeref.c
@@ -4,25 +4,25 @@
* Copyright © 2019 Intel Corporation
*/
+#include <linux/wait_bit.h>
+
#include "intel_runtime_pm.h"
-#include "i915_gem.h"
+#include "intel_wakeref.h"
-static void rpm_get(struct intel_runtime_pm *rpm, struct intel_wakeref *wf)
+static void rpm_get(struct intel_wakeref *wf)
{
- wf->wakeref = intel_runtime_pm_get(rpm);
+ wf->wakeref = intel_runtime_pm_get(wf->rpm);
}
-static void rpm_put(struct intel_runtime_pm *rpm, struct intel_wakeref *wf)
+static void rpm_put(struct intel_wakeref *wf)
{
intel_wakeref_t wakeref = fetch_and_zero(&wf->wakeref);
- intel_runtime_pm_put(rpm, wakeref);
- GEM_BUG_ON(!wakeref);
+ intel_runtime_pm_put(wf->rpm, wakeref);
+ INTEL_WAKEREF_BUG_ON(!wakeref);
}
-int __intel_wakeref_get_first(struct intel_runtime_pm *rpm,
- struct intel_wakeref *wf,
- int (*fn)(struct intel_wakeref *wf))
+int __intel_wakeref_get_first(struct intel_wakeref *wf)
{
/*
* Treat get/put as different subclasses, as we may need to run
@@ -34,11 +34,11 @@ int __intel_wakeref_get_first(struct intel_runtime_pm *rpm,
if (!atomic_read(&wf->count)) {
int err;
- rpm_get(rpm, wf);
+ rpm_get(wf);
- err = fn(wf);
+ err = wf->ops->get(wf);
if (unlikely(err)) {
- rpm_put(rpm, wf);
+ rpm_put(wf);
mutex_unlock(&wf->mutex);
return err;
}
@@ -48,30 +48,79 @@ int __intel_wakeref_get_first(struct intel_runtime_pm *rpm,
atomic_inc(&wf->count);
mutex_unlock(&wf->mutex);
+ INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
return 0;
}
-int __intel_wakeref_put_last(struct intel_runtime_pm *rpm,
- struct intel_wakeref *wf,
- int (*fn)(struct intel_wakeref *wf))
+static void ____intel_wakeref_put_last(struct intel_wakeref *wf)
{
- int err;
+ INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
+ if (unlikely(!atomic_dec_and_test(&wf->count)))
+ goto unlock;
+
+ /* ops->put() must reschedule its own release on error/deferral */
+ if (likely(!wf->ops->put(wf))) {
+ rpm_put(wf);
+ wake_up_var(&wf->wakeref);
+ }
- err = fn(wf);
- if (likely(!err))
- rpm_put(rpm, wf);
- else
- atomic_inc(&wf->count);
+unlock:
mutex_unlock(&wf->mutex);
+}
+
+void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags)
+{
+ INTEL_WAKEREF_BUG_ON(work_pending(&wf->work));
- return err;
+ /* Assume we are not in process context and so cannot sleep. */
+ if (flags & INTEL_WAKEREF_PUT_ASYNC || !mutex_trylock(&wf->mutex)) {
+ schedule_work(&wf->work);
+ return;
+ }
+
+ ____intel_wakeref_put_last(wf);
}
-void __intel_wakeref_init(struct intel_wakeref *wf, struct lock_class_key *key)
+static void __intel_wakeref_put_work(struct work_struct *wrk)
{
- __mutex_init(&wf->mutex, "wakeref", key);
+ struct intel_wakeref *wf = container_of(wrk, typeof(*wf), work);
+
+ if (atomic_add_unless(&wf->count, -1, 1))
+ return;
+
+ mutex_lock(&wf->mutex);
+ ____intel_wakeref_put_last(wf);
+}
+
+void __intel_wakeref_init(struct intel_wakeref *wf,
+ struct intel_runtime_pm *rpm,
+ const struct intel_wakeref_ops *ops,
+ struct intel_wakeref_lockclass *key)
+{
+ wf->rpm = rpm;
+ wf->ops = ops;
+
+ __mutex_init(&wf->mutex, "wakeref.mutex", &key->mutex);
atomic_set(&wf->count, 0);
wf->wakeref = 0;
+
+ INIT_WORK(&wf->work, __intel_wakeref_put_work);
+ lockdep_init_map(&wf->work.lockdep_map, "wakeref.work", &key->work, 0);
+}
+
+int intel_wakeref_wait_for_idle(struct intel_wakeref *wf)
+{
+ int err;
+
+ might_sleep();
+
+ err = wait_var_event_killable(&wf->wakeref,
+ !intel_wakeref_is_active(wf));
+ if (err)
+ return err;
+
+ intel_wakeref_unlock_wait(wf);
+ return 0;
}
static void wakeref_auto_timeout(struct timer_list *t)
@@ -115,7 +164,7 @@ void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout)
if (!refcount_inc_not_zero(&wf->count)) {
spin_lock_irqsave(&wf->lock, flags);
if (!refcount_inc_not_zero(&wf->count)) {
- GEM_BUG_ON(wf->wakeref);
+ INTEL_WAKEREF_BUG_ON(wf->wakeref);
wf->wakeref = intel_runtime_pm_get_if_in_use(wf->rpm);
refcount_set(&wf->count, 1);
}
@@ -134,5 +183,5 @@ void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout)
void intel_wakeref_auto_fini(struct intel_wakeref_auto *wf)
{
intel_wakeref_auto(wf, 0);
- GEM_BUG_ON(wf->wakeref);
+ INTEL_WAKEREF_BUG_ON(wf->wakeref);
}
diff --git a/drivers/gpu/drm/i915/intel_wakeref.h b/drivers/gpu/drm/i915/intel_wakeref.h
index 38275310b196..7d1e676b71ef 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.h
+++ b/drivers/gpu/drm/i915/intel_wakeref.h
@@ -8,41 +8,63 @@
#define INTEL_WAKEREF_H
#include <linux/atomic.h>
+#include <linux/bits.h>
+#include <linux/lockdep.h>
#include <linux/mutex.h>
#include <linux/refcount.h>
#include <linux/stackdepot.h>
#include <linux/timer.h>
+#include <linux/workqueue.h>
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
+#define INTEL_WAKEREF_BUG_ON(expr) BUG_ON(expr)
+#else
+#define INTEL_WAKEREF_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
+#endif
struct intel_runtime_pm;
+struct intel_wakeref;
typedef depot_stack_handle_t intel_wakeref_t;
+struct intel_wakeref_ops {
+ int (*get)(struct intel_wakeref *wf);
+ int (*put)(struct intel_wakeref *wf);
+};
+
struct intel_wakeref {
atomic_t count;
struct mutex mutex;
+
intel_wakeref_t wakeref;
+
+ struct intel_runtime_pm *rpm;
+ const struct intel_wakeref_ops *ops;
+
+ struct work_struct work;
+};
+
+struct intel_wakeref_lockclass {
+ struct lock_class_key mutex;
+ struct lock_class_key work;
};
void __intel_wakeref_init(struct intel_wakeref *wf,
- struct lock_class_key *key);
-#define intel_wakeref_init(wf) do { \
- static struct lock_class_key __key; \
+ struct intel_runtime_pm *rpm,
+ const struct intel_wakeref_ops *ops,
+ struct intel_wakeref_lockclass *key);
+#define intel_wakeref_init(wf, rpm, ops) do { \
+ static struct intel_wakeref_lockclass __key; \
\
- __intel_wakeref_init((wf), &__key); \
+ __intel_wakeref_init((wf), (rpm), (ops), &__key); \
} while (0)
-int __intel_wakeref_get_first(struct intel_runtime_pm *rpm,
- struct intel_wakeref *wf,
- int (*fn)(struct intel_wakeref *wf));
-int __intel_wakeref_put_last(struct intel_runtime_pm *rpm,
- struct intel_wakeref *wf,
- int (*fn)(struct intel_wakeref *wf));
+int __intel_wakeref_get_first(struct intel_wakeref *wf);
+void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags);
/**
* intel_wakeref_get: Acquire the wakeref
- * @i915: the drm_i915_private device
* @wf: the wakeref
- * @fn: callback for acquired the wakeref, called only on first acquire.
*
* Acquire a hold on the wakeref. The first user to do so, will acquire
* the runtime pm wakeref and then call the @fn underneath the wakeref
@@ -55,17 +77,32 @@ int __intel_wakeref_put_last(struct intel_runtime_pm *rpm,
* code otherwise.
*/
static inline int
-intel_wakeref_get(struct intel_runtime_pm *rpm,
- struct intel_wakeref *wf,
- int (*fn)(struct intel_wakeref *wf))
+intel_wakeref_get(struct intel_wakeref *wf)
{
+ might_sleep();
if (unlikely(!atomic_inc_not_zero(&wf->count)))
- return __intel_wakeref_get_first(rpm, wf, fn);
+ return __intel_wakeref_get_first(wf);
return 0;
}
/**
+ * __intel_wakeref_get: Acquire the wakeref, again
+ * @wf: the wakeref
+ *
+ * Increment the wakeref counter, only valid if it is already held by
+ * the caller.
+ *
+ * See intel_wakeref_get().
+ */
+static inline void
+__intel_wakeref_get(struct intel_wakeref *wf)
+{
+ INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
+ atomic_inc(&wf->count);
+}
+
+/**
* intel_wakeref_get_if_in_use: Acquire the wakeref
* @wf: the wakeref
*
@@ -81,10 +118,9 @@ intel_wakeref_get_if_active(struct intel_wakeref *wf)
}
/**
- * intel_wakeref_put: Release the wakeref
- * @i915: the drm_i915_private device
+ * intel_wakeref_put_flags: Release the wakeref
* @wf: the wakeref
- * @fn: callback for releasing the wakeref, called only on final release.
+ * @flags: control flags
*
* Release our hold on the wakeref. When there are no more users,
* the runtime pm wakeref will be released after the @fn callback is called
@@ -96,15 +132,26 @@ intel_wakeref_get_if_active(struct intel_wakeref *wf)
* Returns: 0 if the wakeref was released successfully, or a negative error
* code otherwise.
*/
-static inline int
-intel_wakeref_put(struct intel_runtime_pm *rpm,
- struct intel_wakeref *wf,
- int (*fn)(struct intel_wakeref *wf))
+static inline void
+__intel_wakeref_put(struct intel_wakeref *wf, unsigned long flags)
+#define INTEL_WAKEREF_PUT_ASYNC BIT(0)
{
- if (atomic_dec_and_mutex_lock(&wf->count, &wf->mutex))
- return __intel_wakeref_put_last(rpm, wf, fn);
+ INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
+ if (unlikely(!atomic_add_unless(&wf->count, -1, 1)))
+ __intel_wakeref_put_last(wf, flags);
+}
- return 0;
+static inline void
+intel_wakeref_put(struct intel_wakeref *wf)
+{
+ might_sleep();
+ __intel_wakeref_put(wf, 0);
+}
+
+static inline void
+intel_wakeref_put_async(struct intel_wakeref *wf)
+{
+ __intel_wakeref_put(wf, INTEL_WAKEREF_PUT_ASYNC);
}
/**
@@ -136,17 +183,57 @@ intel_wakeref_unlock(struct intel_wakeref *wf)
}
/**
- * intel_wakeref_active: Query whether the wakeref is currently held
+ * intel_wakeref_unlock_wait: Wait until the active callback is complete
+ * @wf: the wakeref
+ *
+ * Waits for the active callback (under the @wf->mutex or another CPU) is
+ * complete.
+ */
+static inline void
+intel_wakeref_unlock_wait(struct intel_wakeref *wf)
+{
+ mutex_lock(&wf->mutex);
+ mutex_unlock(&wf->mutex);
+ flush_work(&wf->work);
+}
+
+/**
+ * intel_wakeref_is_active: Query whether the wakeref is currently held
* @wf: the wakeref
*
* Returns: true if the wakeref is currently held.
*/
static inline bool
-intel_wakeref_active(struct intel_wakeref *wf)
+intel_wakeref_is_active(const struct intel_wakeref *wf)
{
return READ_ONCE(wf->wakeref);
}
+/**
+ * __intel_wakeref_defer_park: Defer the current park callback
+ * @wf: the wakeref
+ */
+static inline void
+__intel_wakeref_defer_park(struct intel_wakeref *wf)
+{
+ lockdep_assert_held(&wf->mutex);
+ INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count));
+ atomic_set_release(&wf->count, 1);
+}
+
+/**
+ * intel_wakeref_wait_for_idle: Wait until the wakeref is idle
+ * @wf: the wakeref
+ *
+ * Wait for the earlier asynchronous release of the wakeref. Note
+ * this will wait for any third party as well, so make sure you only wait
+ * when you have control over the wakeref and trust no one else is acquiring
+ * it.
+ *
+ * Return: 0 on success, error code if killed.
+ */
+int intel_wakeref_wait_for_idle(struct intel_wakeref *wf);
+
struct intel_wakeref_auto {
struct intel_runtime_pm *rpm;
struct timer_list timer;
diff --git a/drivers/gpu/drm/i915/intel_wopcm.c b/drivers/gpu/drm/i915/intel_wopcm.c
index 7b4ba84b9fb8..2bb9f9f9a50a 100644
--- a/drivers/gpu/drm/i915/intel_wopcm.c
+++ b/drivers/gpu/drm/i915/intel_wopcm.c
@@ -1,7 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2017-2018 Intel Corporation
+ * Copyright © 2017-2019 Intel Corporation
*/
#include "intel_wopcm.h"
@@ -64,6 +63,11 @@
#define GEN9_GUC_FW_RESERVED SZ_128K
#define GEN9_GUC_WOPCM_OFFSET (GUC_WOPCM_RESERVED + GEN9_GUC_FW_RESERVED)
+static inline struct drm_i915_private *wopcm_to_i915(struct intel_wopcm *wopcm)
+{
+ return container_of(wopcm, struct drm_i915_private, wopcm);
+}
+
/**
* intel_wopcm_init_early() - Early initialization of the WOPCM.
* @wopcm: pointer to intel_wopcm.
@@ -74,7 +78,7 @@ void intel_wopcm_init_early(struct intel_wopcm *wopcm)
{
struct drm_i915_private *i915 = wopcm_to_i915(wopcm);
- if (!HAS_GUC(i915))
+ if (!HAS_GT_UC(i915))
return;
if (INTEL_GEN(i915) >= 11)
@@ -82,7 +86,7 @@ void intel_wopcm_init_early(struct intel_wopcm *wopcm)
else
wopcm->size = GEN9_WOPCM_SIZE;
- DRM_DEBUG_DRIVER("WOPCM size: %uKiB\n", wopcm->size / 1024);
+ DRM_DEV_DEBUG_DRIVER(i915->drm.dev, "WOPCM: %uK\n", wopcm->size / 1024);
}
static inline u32 context_reserved_size(struct drm_i915_private *i915)
@@ -95,7 +99,8 @@ static inline u32 context_reserved_size(struct drm_i915_private *i915)
return 0;
}
-static inline int gen9_check_dword_gap(u32 guc_wopcm_base, u32 guc_wopcm_size)
+static inline bool gen9_check_dword_gap(struct drm_i915_private *i915,
+ u32 guc_wopcm_base, u32 guc_wopcm_size)
{
u32 offset;
@@ -107,16 +112,18 @@ static inline int gen9_check_dword_gap(u32 guc_wopcm_base, u32 guc_wopcm_size)
offset = guc_wopcm_base + GEN9_GUC_WOPCM_OFFSET;
if (offset > guc_wopcm_size ||
(guc_wopcm_size - offset) < sizeof(u32)) {
- DRM_ERROR("GuC WOPCM size %uKiB is too small. %uKiB needed.\n",
- guc_wopcm_size / 1024,
- (u32)(offset + sizeof(u32)) / 1024);
- return -E2BIG;
+ dev_err(i915->drm.dev,
+ "WOPCM: invalid GuC region size: %uK < %uK\n",
+ guc_wopcm_size / SZ_1K,
+ (u32)(offset + sizeof(u32)) / SZ_1K);
+ return false;
}
- return 0;
+ return true;
}
-static inline int gen9_check_huc_fw_fits(u32 guc_wopcm_size, u32 huc_fw_size)
+static inline bool gen9_check_huc_fw_fits(struct drm_i915_private *i915,
+ u32 guc_wopcm_size, u32 huc_fw_size)
{
/*
* On Gen9 & CNL A0, hardware requires the total available GuC WOPCM
@@ -124,29 +131,81 @@ static inline int gen9_check_huc_fw_fits(u32 guc_wopcm_size, u32 huc_fw_size)
* firmware uploading would fail.
*/
if (huc_fw_size > guc_wopcm_size - GUC_WOPCM_RESERVED) {
- DRM_ERROR("HuC FW (%uKiB) won't fit in GuC WOPCM (%uKiB).\n",
- huc_fw_size / 1024,
- (guc_wopcm_size - GUC_WOPCM_RESERVED) / 1024);
- return -E2BIG;
+ dev_err(i915->drm.dev, "WOPCM: no space for %s: %uK < %uK\n",
+ intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_HUC),
+ (guc_wopcm_size - GUC_WOPCM_RESERVED) / SZ_1K,
+ huc_fw_size / 1024);
+ return false;
}
- return 0;
+ return true;
+}
+
+static inline bool check_hw_restrictions(struct drm_i915_private *i915,
+ u32 guc_wopcm_base, u32 guc_wopcm_size,
+ u32 huc_fw_size)
+{
+ if (IS_GEN(i915, 9) && !gen9_check_dword_gap(i915, guc_wopcm_base,
+ guc_wopcm_size))
+ return false;
+
+ if ((IS_GEN(i915, 9) ||
+ IS_CNL_REVID(i915, CNL_REVID_A0, CNL_REVID_A0)) &&
+ !gen9_check_huc_fw_fits(i915, guc_wopcm_size, huc_fw_size))
+ return false;
+
+ return true;
}
-static inline int check_hw_restriction(struct drm_i915_private *i915,
- u32 guc_wopcm_base, u32 guc_wopcm_size,
- u32 huc_fw_size)
+static inline bool __check_layout(struct drm_i915_private *i915, u32 wopcm_size,
+ u32 guc_wopcm_base, u32 guc_wopcm_size,
+ u32 guc_fw_size, u32 huc_fw_size)
{
- int err = 0;
+ const u32 ctx_rsvd = context_reserved_size(i915);
+ u32 size;
+
+ size = wopcm_size - ctx_rsvd;
+ if (unlikely(range_overflows(guc_wopcm_base, guc_wopcm_size, size))) {
+ dev_err(i915->drm.dev,
+ "WOPCM: invalid GuC region layout: %uK + %uK > %uK\n",
+ guc_wopcm_base / SZ_1K, guc_wopcm_size / SZ_1K,
+ size / SZ_1K);
+ return false;
+ }
+
+ size = guc_fw_size + GUC_WOPCM_RESERVED + GUC_WOPCM_STACK_RESERVED;
+ if (unlikely(guc_wopcm_size < size)) {
+ dev_err(i915->drm.dev, "WOPCM: no space for %s: %uK < %uK\n",
+ intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_GUC),
+ guc_wopcm_size / SZ_1K, size / SZ_1K);
+ return false;
+ }
- if (IS_GEN(i915, 9))
- err = gen9_check_dword_gap(guc_wopcm_base, guc_wopcm_size);
+ size = huc_fw_size + WOPCM_RESERVED_SIZE;
+ if (unlikely(guc_wopcm_base < size)) {
+ dev_err(i915->drm.dev, "WOPCM: no space for %s: %uK < %uK\n",
+ intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_HUC),
+ guc_wopcm_base / SZ_1K, size / SZ_1K);
+ return false;
+ }
+
+ return check_hw_restrictions(i915, guc_wopcm_base, guc_wopcm_size,
+ huc_fw_size);
+}
- if (!err &&
- (IS_GEN(i915, 9) || IS_CNL_REVID(i915, CNL_REVID_A0, CNL_REVID_A0)))
- err = gen9_check_huc_fw_fits(guc_wopcm_size, huc_fw_size);
+static bool __wopcm_regs_locked(struct intel_uncore *uncore,
+ u32 *guc_wopcm_base, u32 *guc_wopcm_size)
+{
+ u32 reg_base = intel_uncore_read(uncore, DMA_GUC_WOPCM_OFFSET);
+ u32 reg_size = intel_uncore_read(uncore, GUC_WOPCM_SIZE);
+
+ if (!(reg_size & GUC_WOPCM_SIZE_LOCKED) ||
+ !(reg_base & GUC_WOPCM_OFFSET_VALID))
+ return false;
- return err;
+ *guc_wopcm_base = reg_base & GUC_WOPCM_OFFSET_MASK;
+ *guc_wopcm_size = reg_size & GUC_WOPCM_SIZE_MASK;
+ return true;
}
/**
@@ -156,135 +215,66 @@ static inline int check_hw_restriction(struct drm_i915_private *i915,
* This function will partition WOPCM space based on GuC and HuC firmware sizes
* and will allocate max remaining for use by GuC. This function will also
* enforce platform dependent hardware restrictions on GuC WOPCM offset and
- * size. It will fail the WOPCM init if any of these checks were failed, so that
- * the following GuC firmware uploading would be aborted.
- *
- * Return: 0 on success, non-zero error code on failure.
+ * size. It will fail the WOPCM init if any of these checks fail, so that the
+ * following WOPCM registers setup and GuC firmware uploading would be aborted.
*/
-int intel_wopcm_init(struct intel_wopcm *wopcm)
+void intel_wopcm_init(struct intel_wopcm *wopcm)
{
struct drm_i915_private *i915 = wopcm_to_i915(wopcm);
- u32 guc_fw_size = intel_uc_fw_get_upload_size(&i915->guc.fw);
- u32 huc_fw_size = intel_uc_fw_get_upload_size(&i915->huc.fw);
+ struct intel_gt *gt = &i915->gt;
+ u32 guc_fw_size = intel_uc_fw_get_upload_size(&gt->uc.guc.fw);
+ u32 huc_fw_size = intel_uc_fw_get_upload_size(&gt->uc.huc.fw);
u32 ctx_rsvd = context_reserved_size(i915);
u32 guc_wopcm_base;
u32 guc_wopcm_size;
- u32 guc_wopcm_rsvd;
- int err;
- if (!USES_GUC(i915))
- return 0;
+ if (!guc_fw_size)
+ return;
GEM_BUG_ON(!wopcm->size);
+ GEM_BUG_ON(wopcm->guc.base);
+ GEM_BUG_ON(wopcm->guc.size);
+ GEM_BUG_ON(guc_fw_size >= wopcm->size);
+ GEM_BUG_ON(huc_fw_size >= wopcm->size);
+ GEM_BUG_ON(ctx_rsvd + WOPCM_RESERVED_SIZE >= wopcm->size);
- if (i915_inject_load_failure())
- return -E2BIG;
+ if (i915_inject_probe_failure(i915))
+ return;
- if (guc_fw_size >= wopcm->size) {
- DRM_ERROR("GuC FW (%uKiB) is too big to fit in WOPCM.",
- guc_fw_size / 1024);
- return -E2BIG;
+ if (__wopcm_regs_locked(gt->uncore, &guc_wopcm_base, &guc_wopcm_size)) {
+ DRM_DEV_DEBUG_DRIVER(i915->drm.dev,
+ "GuC WOPCM is already locked [%uK, %uK)\n",
+ guc_wopcm_base / SZ_1K,
+ guc_wopcm_size / SZ_1K);
+ goto check;
}
- if (huc_fw_size >= wopcm->size) {
- DRM_ERROR("HuC FW (%uKiB) is too big to fit in WOPCM.",
- huc_fw_size / 1024);
- return -E2BIG;
- }
+ /*
+ * Aligned value of guc_wopcm_base will determine available WOPCM space
+ * for HuC firmware and mandatory reserved area.
+ */
+ guc_wopcm_base = huc_fw_size + WOPCM_RESERVED_SIZE;
+ guc_wopcm_base = ALIGN(guc_wopcm_base, GUC_WOPCM_OFFSET_ALIGNMENT);
- guc_wopcm_base = ALIGN(huc_fw_size + WOPCM_RESERVED_SIZE,
- GUC_WOPCM_OFFSET_ALIGNMENT);
- if ((guc_wopcm_base + ctx_rsvd) >= wopcm->size) {
- DRM_ERROR("GuC WOPCM base (%uKiB) is too big.\n",
- guc_wopcm_base / 1024);
- return -E2BIG;
- }
+ /*
+ * Need to clamp guc_wopcm_base now to make sure the following math is
+ * correct. Formal check of whole WOPCM layout will be done below.
+ */
+ guc_wopcm_base = min(guc_wopcm_base, wopcm->size - ctx_rsvd);
- guc_wopcm_size = wopcm->size - guc_wopcm_base - ctx_rsvd;
+ /* Aligned remainings of usable WOPCM space can be assigned to GuC. */
+ guc_wopcm_size = wopcm->size - ctx_rsvd - guc_wopcm_base;
guc_wopcm_size &= GUC_WOPCM_SIZE_MASK;
- DRM_DEBUG_DRIVER("Calculated GuC WOPCM Region: [%uKiB, %uKiB)\n",
- guc_wopcm_base / 1024, guc_wopcm_size / 1024);
+ DRM_DEV_DEBUG_DRIVER(i915->drm.dev, "Calculated GuC WOPCM [%uK, %uK)\n",
+ guc_wopcm_base / SZ_1K, guc_wopcm_size / SZ_1K);
- guc_wopcm_rsvd = GUC_WOPCM_RESERVED + GUC_WOPCM_STACK_RESERVED;
- if ((guc_fw_size + guc_wopcm_rsvd) > guc_wopcm_size) {
- DRM_ERROR("Need %uKiB WOPCM for GuC, %uKiB available.\n",
- (guc_fw_size + guc_wopcm_rsvd) / 1024,
- guc_wopcm_size / 1024);
- return -E2BIG;
+check:
+ if (__check_layout(i915, wopcm->size, guc_wopcm_base, guc_wopcm_size,
+ guc_fw_size, huc_fw_size)) {
+ wopcm->guc.base = guc_wopcm_base;
+ wopcm->guc.size = guc_wopcm_size;
+ GEM_BUG_ON(!wopcm->guc.base);
+ GEM_BUG_ON(!wopcm->guc.size);
}
-
- err = check_hw_restriction(i915, guc_wopcm_base, guc_wopcm_size,
- huc_fw_size);
- if (err)
- return err;
-
- wopcm->guc.base = guc_wopcm_base;
- wopcm->guc.size = guc_wopcm_size;
-
- return 0;
-}
-
-static inline int write_and_verify(struct drm_i915_private *dev_priv,
- i915_reg_t reg, u32 val, u32 mask,
- u32 locked_bit)
-{
- u32 reg_val;
-
- GEM_BUG_ON(val & ~mask);
-
- I915_WRITE(reg, val);
-
- reg_val = I915_READ(reg);
-
- return (reg_val & mask) != (val | locked_bit) ? -EIO : 0;
-}
-
-/**
- * intel_wopcm_init_hw() - Setup GuC WOPCM registers.
- * @wopcm: pointer to intel_wopcm.
- *
- * Setup the GuC WOPCM size and offset registers with the calculated values. It
- * will verify the register values to make sure the registers are locked with
- * correct values.
- *
- * Return: 0 on success. -EIO if registers were locked with incorrect values.
- */
-int intel_wopcm_init_hw(struct intel_wopcm *wopcm)
-{
- struct drm_i915_private *dev_priv = wopcm_to_i915(wopcm);
- u32 huc_agent;
- u32 mask;
- int err;
-
- if (!USES_GUC(dev_priv))
- return 0;
-
- GEM_BUG_ON(!HAS_GUC(dev_priv));
- GEM_BUG_ON(!wopcm->guc.size);
- GEM_BUG_ON(!wopcm->guc.base);
-
- err = write_and_verify(dev_priv, GUC_WOPCM_SIZE, wopcm->guc.size,
- GUC_WOPCM_SIZE_MASK | GUC_WOPCM_SIZE_LOCKED,
- GUC_WOPCM_SIZE_LOCKED);
- if (err)
- goto err_out;
-
- huc_agent = USES_HUC(dev_priv) ? HUC_LOADING_AGENT_GUC : 0;
- mask = GUC_WOPCM_OFFSET_MASK | GUC_WOPCM_OFFSET_VALID | huc_agent;
- err = write_and_verify(dev_priv, DMA_GUC_WOPCM_OFFSET,
- wopcm->guc.base | huc_agent, mask,
- GUC_WOPCM_OFFSET_VALID);
- if (err)
- goto err_out;
-
- return 0;
-
-err_out:
- DRM_ERROR("Failed to init WOPCM registers:\n");
- DRM_ERROR("DMA_GUC_WOPCM_OFFSET=%#x\n",
- I915_READ(DMA_GUC_WOPCM_OFFSET));
- DRM_ERROR("GUC_WOPCM_SIZE=%#x\n", I915_READ(GUC_WOPCM_SIZE));
-
- return err;
}
diff --git a/drivers/gpu/drm/i915/intel_wopcm.h b/drivers/gpu/drm/i915/intel_wopcm.h
index 114401971520..17d6aa86008a 100644
--- a/drivers/gpu/drm/i915/intel_wopcm.h
+++ b/drivers/gpu/drm/i915/intel_wopcm.h
@@ -25,6 +25,21 @@ struct intel_wopcm {
};
/**
+ * intel_wopcm_guc_base()
+ * @wopcm: intel_wopcm structure
+ *
+ * Returns the base of the WOPCM shadowed region.
+ *
+ * Returns:
+ * 0 if GuC is not present or not in use.
+ * Otherwise, the GuC WOPCM base.
+ */
+static inline u32 intel_wopcm_guc_base(struct intel_wopcm *wopcm)
+{
+ return wopcm->guc.base;
+}
+
+/**
* intel_wopcm_guc_size()
* @wopcm: intel_wopcm structure
*
@@ -40,7 +55,6 @@ static inline u32 intel_wopcm_guc_size(struct intel_wopcm *wopcm)
}
void intel_wopcm_init_early(struct intel_wopcm *wopcm);
-int intel_wopcm_init(struct intel_wopcm *wopcm);
-int intel_wopcm_init_hw(struct intel_wopcm *wopcm);
+void intel_wopcm_init(struct intel_wopcm *wopcm);
#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_bdw.c b/drivers/gpu/drm/i915/oa/i915_oa_bdw.c
index 4acdb94555b7..14da5c3b569d 100644
--- a/drivers/gpu/drm/i915/i915_oa_bdw.c
+++ b/drivers/gpu/drm/i915/oa/i915_oa_bdw.c
@@ -1,7 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
+ * Copyright © 2018-2019 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
@@ -66,26 +65,26 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_bdw(struct drm_i915_private *dev_priv)
{
- strlcpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.test_config.uuid,
"d6de6f55-e526-4f79-a6a6-d7315c09044e",
- sizeof(dev_priv->perf.oa.test_config.uuid));
- dev_priv->perf.oa.test_config.id = 1;
+ sizeof(dev_priv->perf.test_config.uuid));
+ dev_priv->perf.test_config.id = 1;
- dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+ dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
+ dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
- dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+ dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
+ dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
- dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+ dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
+ dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
- dev_priv->perf.oa.test_config.sysfs_metric.name = "d6de6f55-e526-4f79-a6a6-d7315c09044e";
- dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+ dev_priv->perf.test_config.sysfs_metric.name = "d6de6f55-e526-4f79-a6a6-d7315c09044e";
+ dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
- dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+ dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
+ dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_bdw.h b/drivers/gpu/drm/i915/oa/i915_oa_bdw.h
new file mode 100644
index 000000000000..0cee3334f0a6
--- /dev/null
+++ b/drivers/gpu/drm/i915/oa/i915_oa_bdw.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2018-2019 Intel Corporation
+ *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
+ */
+
+#ifndef __I915_OA_BDW_H__
+#define __I915_OA_BDW_H__
+
+struct drm_i915_private;
+
+void i915_perf_load_test_config_bdw(struct drm_i915_private *dev_priv);
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_bxt.c b/drivers/gpu/drm/i915/oa/i915_oa_bxt.c
index a44195c39923..3e785bafcf99 100644
--- a/drivers/gpu/drm/i915/i915_oa_bxt.c
+++ b/drivers/gpu/drm/i915/oa/i915_oa_bxt.c
@@ -1,7 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
+ * Copyright © 2018-2019 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
@@ -64,26 +63,26 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_bxt(struct drm_i915_private *dev_priv)
{
- strlcpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.test_config.uuid,
"5ee72f5c-092f-421e-8b70-225f7c3e9612",
- sizeof(dev_priv->perf.oa.test_config.uuid));
- dev_priv->perf.oa.test_config.id = 1;
+ sizeof(dev_priv->perf.test_config.uuid));
+ dev_priv->perf.test_config.id = 1;
- dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+ dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
+ dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
- dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+ dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
+ dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
- dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+ dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
+ dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
- dev_priv->perf.oa.test_config.sysfs_metric.name = "5ee72f5c-092f-421e-8b70-225f7c3e9612";
- dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+ dev_priv->perf.test_config.sysfs_metric.name = "5ee72f5c-092f-421e-8b70-225f7c3e9612";
+ dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
- dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+ dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
+ dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_bxt.h b/drivers/gpu/drm/i915/oa/i915_oa_bxt.h
new file mode 100644
index 000000000000..0bdf391323ec
--- /dev/null
+++ b/drivers/gpu/drm/i915/oa/i915_oa_bxt.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2018-2019 Intel Corporation
+ *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
+ */
+
+#ifndef __I915_OA_BXT_H__
+#define __I915_OA_BXT_H__
+
+struct drm_i915_private;
+
+void i915_perf_load_test_config_bxt(struct drm_i915_private *dev_priv);
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt2.c b/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.c
index 7f60d51b8761..0ea86f70a06c 100644
--- a/drivers/gpu/drm/i915/i915_oa_cflgt2.c
+++ b/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.c
@@ -1,7 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
+ * Copyright © 2018-2019 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
@@ -65,26 +64,26 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_cflgt2(struct drm_i915_private *dev_priv)
{
- strlcpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.test_config.uuid,
"74fb4902-d3d3-4237-9e90-cbdc68d0a446",
- sizeof(dev_priv->perf.oa.test_config.uuid));
- dev_priv->perf.oa.test_config.id = 1;
+ sizeof(dev_priv->perf.test_config.uuid));
+ dev_priv->perf.test_config.id = 1;
- dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+ dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
+ dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
- dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+ dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
+ dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
- dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+ dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
+ dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
- dev_priv->perf.oa.test_config.sysfs_metric.name = "74fb4902-d3d3-4237-9e90-cbdc68d0a446";
- dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+ dev_priv->perf.test_config.sysfs_metric.name = "74fb4902-d3d3-4237-9e90-cbdc68d0a446";
+ dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
- dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+ dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
+ dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.h b/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.h
new file mode 100644
index 000000000000..6b862280ab78
--- /dev/null
+++ b/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2018-2019 Intel Corporation
+ *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
+ */
+
+#ifndef __I915_OA_CFLGT2_H__
+#define __I915_OA_CFLGT2_H__
+
+struct drm_i915_private;
+
+void i915_perf_load_test_config_cflgt2(struct drm_i915_private *dev_priv);
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt3.c b/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.c
index a92c38e3a0ce..fc632dd890bf 100644
--- a/drivers/gpu/drm/i915/i915_oa_cflgt3.c
+++ b/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.c
@@ -1,7 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
+ * Copyright © 2018-2019 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
@@ -65,26 +64,26 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_cflgt3(struct drm_i915_private *dev_priv)
{
- strlcpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.test_config.uuid,
"577e8e2c-3fa0-4875-8743-3538d585e3b0",
- sizeof(dev_priv->perf.oa.test_config.uuid));
- dev_priv->perf.oa.test_config.id = 1;
+ sizeof(dev_priv->perf.test_config.uuid));
+ dev_priv->perf.test_config.id = 1;
- dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+ dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
+ dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
- dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+ dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
+ dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
- dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+ dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
+ dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
- dev_priv->perf.oa.test_config.sysfs_metric.name = "577e8e2c-3fa0-4875-8743-3538d585e3b0";
- dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+ dev_priv->perf.test_config.sysfs_metric.name = "577e8e2c-3fa0-4875-8743-3538d585e3b0";
+ dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
- dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+ dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
+ dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.h b/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.h
new file mode 100644
index 000000000000..4ca9d8f89b2f
--- /dev/null
+++ b/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2018-2019 Intel Corporation
+ *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
+ */
+
+#ifndef __I915_OA_CFLGT3_H__
+#define __I915_OA_CFLGT3_H__
+
+struct drm_i915_private;
+
+void i915_perf_load_test_config_cflgt3(struct drm_i915_private *dev_priv);
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_chv.c b/drivers/gpu/drm/i915/oa/i915_oa_chv.c
index 71ec889a0114..6cd4e9921a8a 100644
--- a/drivers/gpu/drm/i915/i915_oa_chv.c
+++ b/drivers/gpu/drm/i915/oa/i915_oa_chv.c
@@ -1,7 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
+ * Copyright © 2018-2019 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
@@ -65,26 +64,26 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_chv(struct drm_i915_private *dev_priv)
{
- strlcpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.test_config.uuid,
"4a534b07-cba3-414d-8d60-874830e883aa",
- sizeof(dev_priv->perf.oa.test_config.uuid));
- dev_priv->perf.oa.test_config.id = 1;
+ sizeof(dev_priv->perf.test_config.uuid));
+ dev_priv->perf.test_config.id = 1;
- dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+ dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
+ dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
- dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+ dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
+ dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
- dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+ dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
+ dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
- dev_priv->perf.oa.test_config.sysfs_metric.name = "4a534b07-cba3-414d-8d60-874830e883aa";
- dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+ dev_priv->perf.test_config.sysfs_metric.name = "4a534b07-cba3-414d-8d60-874830e883aa";
+ dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
- dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+ dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
+ dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_chv.h b/drivers/gpu/drm/i915/oa/i915_oa_chv.h
new file mode 100644
index 000000000000..3cac7bbc9c71
--- /dev/null
+++ b/drivers/gpu/drm/i915/oa/i915_oa_chv.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2018-2019 Intel Corporation
+ *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
+ */
+
+#ifndef __I915_OA_CHV_H__
+#define __I915_OA_CHV_H__
+
+struct drm_i915_private;
+
+void i915_perf_load_test_config_chv(struct drm_i915_private *dev_priv);
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_cnl.c b/drivers/gpu/drm/i915/oa/i915_oa_cnl.c
index 5c23d883d6c9..1041e8914993 100644
--- a/drivers/gpu/drm/i915/i915_oa_cnl.c
+++ b/drivers/gpu/drm/i915/oa/i915_oa_cnl.c
@@ -1,7 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
+ * Copyright © 2018-2019 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
@@ -77,26 +76,26 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_cnl(struct drm_i915_private *dev_priv)
{
- strlcpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.test_config.uuid,
"db41edd4-d8e7-4730-ad11-b9a2d6833503",
- sizeof(dev_priv->perf.oa.test_config.uuid));
- dev_priv->perf.oa.test_config.id = 1;
+ sizeof(dev_priv->perf.test_config.uuid));
+ dev_priv->perf.test_config.id = 1;
- dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+ dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
+ dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
- dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+ dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
+ dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
- dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+ dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
+ dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
- dev_priv->perf.oa.test_config.sysfs_metric.name = "db41edd4-d8e7-4730-ad11-b9a2d6833503";
- dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+ dev_priv->perf.test_config.sysfs_metric.name = "db41edd4-d8e7-4730-ad11-b9a2d6833503";
+ dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
- dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+ dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
+ dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_cnl.h b/drivers/gpu/drm/i915/oa/i915_oa_cnl.h
new file mode 100644
index 000000000000..db379f5fcbb9
--- /dev/null
+++ b/drivers/gpu/drm/i915/oa/i915_oa_cnl.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2018-2019 Intel Corporation
+ *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
+ */
+
+#ifndef __I915_OA_CNL_H__
+#define __I915_OA_CNL_H__
+
+struct drm_i915_private;
+
+void i915_perf_load_test_config_cnl(struct drm_i915_private *dev_priv);
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_glk.c b/drivers/gpu/drm/i915/oa/i915_oa_glk.c
index 4bdda66df7d2..bd15ebe9aeeb 100644
--- a/drivers/gpu/drm/i915/i915_oa_glk.c
+++ b/drivers/gpu/drm/i915/oa/i915_oa_glk.c
@@ -1,7 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
+ * Copyright © 2018-2019 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
@@ -64,26 +63,26 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_glk(struct drm_i915_private *dev_priv)
{
- strlcpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.test_config.uuid,
"dd3fd789-e783-4204-8cd0-b671bbccb0cf",
- sizeof(dev_priv->perf.oa.test_config.uuid));
- dev_priv->perf.oa.test_config.id = 1;
+ sizeof(dev_priv->perf.test_config.uuid));
+ dev_priv->perf.test_config.id = 1;
- dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+ dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
+ dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
- dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+ dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
+ dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
- dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+ dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
+ dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
- dev_priv->perf.oa.test_config.sysfs_metric.name = "dd3fd789-e783-4204-8cd0-b671bbccb0cf";
- dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+ dev_priv->perf.test_config.sysfs_metric.name = "dd3fd789-e783-4204-8cd0-b671bbccb0cf";
+ dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
- dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+ dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
+ dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_glk.h b/drivers/gpu/drm/i915/oa/i915_oa_glk.h
new file mode 100644
index 000000000000..779f343efd11
--- /dev/null
+++ b/drivers/gpu/drm/i915/oa/i915_oa_glk.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2018-2019 Intel Corporation
+ *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
+ */
+
+#ifndef __I915_OA_GLK_H__
+#define __I915_OA_GLK_H__
+
+struct drm_i915_private;
+
+void i915_perf_load_test_config_glk(struct drm_i915_private *dev_priv);
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_hsw.c b/drivers/gpu/drm/i915/oa/i915_oa_hsw.c
index cc6526fdd2bd..133721a8619f 100644
--- a/drivers/gpu/drm/i915/i915_oa_hsw.c
+++ b/drivers/gpu/drm/i915/oa/i915_oa_hsw.c
@@ -1,7 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
+ * Copyright © 2018-2019 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
@@ -94,26 +93,26 @@ show_render_basic_id(struct device *kdev, struct device_attribute *attr, char *b
void
i915_perf_load_test_config_hsw(struct drm_i915_private *dev_priv)
{
- strlcpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.test_config.uuid,
"403d8832-1a27-4aa6-a64e-f5389ce7b212",
- sizeof(dev_priv->perf.oa.test_config.uuid));
- dev_priv->perf.oa.test_config.id = 1;
+ sizeof(dev_priv->perf.test_config.uuid));
+ dev_priv->perf.test_config.id = 1;
- dev_priv->perf.oa.test_config.mux_regs = mux_config_render_basic;
- dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_render_basic);
+ dev_priv->perf.test_config.mux_regs = mux_config_render_basic;
+ dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_render_basic);
- dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_render_basic;
- dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_render_basic);
+ dev_priv->perf.test_config.b_counter_regs = b_counter_config_render_basic;
+ dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_render_basic);
- dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_render_basic;
- dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_render_basic);
+ dev_priv->perf.test_config.flex_regs = flex_eu_config_render_basic;
+ dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_render_basic);
- dev_priv->perf.oa.test_config.sysfs_metric.name = "403d8832-1a27-4aa6-a64e-f5389ce7b212";
- dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+ dev_priv->perf.test_config.sysfs_metric.name = "403d8832-1a27-4aa6-a64e-f5389ce7b212";
+ dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
- dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+ dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_render_basic_id;
+ dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.test_config.sysfs_metric_id.show = show_render_basic_id;
}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_hsw.h b/drivers/gpu/drm/i915/oa/i915_oa_hsw.h
new file mode 100644
index 000000000000..ba97f732f136
--- /dev/null
+++ b/drivers/gpu/drm/i915/oa/i915_oa_hsw.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2018-2019 Intel Corporation
+ *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
+ */
+
+#ifndef __I915_OA_HSW_H__
+#define __I915_OA_HSW_H__
+
+struct drm_i915_private;
+
+void i915_perf_load_test_config_hsw(struct drm_i915_private *dev_priv);
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_icl.c b/drivers/gpu/drm/i915/oa/i915_oa_icl.c
index baa51427a543..2d92041b754f 100644
--- a/drivers/gpu/drm/i915/i915_oa_icl.c
+++ b/drivers/gpu/drm/i915/oa/i915_oa_icl.c
@@ -1,7 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
+ * Copyright © 2018-2019 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
@@ -74,26 +73,26 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_icl(struct drm_i915_private *dev_priv)
{
- strlcpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.test_config.uuid,
"a291665e-244b-4b76-9b9a-01de9d3c8068",
- sizeof(dev_priv->perf.oa.test_config.uuid));
- dev_priv->perf.oa.test_config.id = 1;
+ sizeof(dev_priv->perf.test_config.uuid));
+ dev_priv->perf.test_config.id = 1;
- dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+ dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
+ dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
- dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+ dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
+ dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
- dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+ dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
+ dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
- dev_priv->perf.oa.test_config.sysfs_metric.name = "a291665e-244b-4b76-9b9a-01de9d3c8068";
- dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+ dev_priv->perf.test_config.sysfs_metric.name = "a291665e-244b-4b76-9b9a-01de9d3c8068";
+ dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
- dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+ dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
+ dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_icl.h b/drivers/gpu/drm/i915/oa/i915_oa_icl.h
new file mode 100644
index 000000000000..5c64112d720e
--- /dev/null
+++ b/drivers/gpu/drm/i915/oa/i915_oa_icl.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2018-2019 Intel Corporation
+ *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
+ */
+
+#ifndef __I915_OA_ICL_H__
+#define __I915_OA_ICL_H__
+
+struct drm_i915_private;
+
+void i915_perf_load_test_config_icl(struct drm_i915_private *dev_priv);
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt2.c b/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.c
index 168e49ab0d4d..1c3a67c9cfe0 100644
--- a/drivers/gpu/drm/i915/i915_oa_kblgt2.c
+++ b/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.c
@@ -1,7 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
+ * Copyright © 2018-2019 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
@@ -65,26 +64,26 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_kblgt2(struct drm_i915_private *dev_priv)
{
- strlcpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.test_config.uuid,
"baa3c7e4-52b6-4b85-801e-465a94b746dd",
- sizeof(dev_priv->perf.oa.test_config.uuid));
- dev_priv->perf.oa.test_config.id = 1;
+ sizeof(dev_priv->perf.test_config.uuid));
+ dev_priv->perf.test_config.id = 1;
- dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+ dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
+ dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
- dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+ dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
+ dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
- dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+ dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
+ dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
- dev_priv->perf.oa.test_config.sysfs_metric.name = "baa3c7e4-52b6-4b85-801e-465a94b746dd";
- dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+ dev_priv->perf.test_config.sysfs_metric.name = "baa3c7e4-52b6-4b85-801e-465a94b746dd";
+ dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
- dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+ dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
+ dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.h b/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.h
new file mode 100644
index 000000000000..810532fa6b63
--- /dev/null
+++ b/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2018-2019 Intel Corporation
+ *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
+ */
+
+#ifndef __I915_OA_KBLGT2_H__
+#define __I915_OA_KBLGT2_H__
+
+struct drm_i915_private;
+
+void i915_perf_load_test_config_kblgt2(struct drm_i915_private *dev_priv);
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt3.c b/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.c
index 6ffa553c388e..ebbe5a9c9fdc 100644
--- a/drivers/gpu/drm/i915/i915_oa_kblgt3.c
+++ b/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.c
@@ -1,7 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
+ * Copyright © 2018-2019 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
@@ -65,26 +64,26 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_kblgt3(struct drm_i915_private *dev_priv)
{
- strlcpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.test_config.uuid,
"f1792f32-6db2-4b50-b4b2-557128f1688d",
- sizeof(dev_priv->perf.oa.test_config.uuid));
- dev_priv->perf.oa.test_config.id = 1;
+ sizeof(dev_priv->perf.test_config.uuid));
+ dev_priv->perf.test_config.id = 1;
- dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+ dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
+ dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
- dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+ dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
+ dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
- dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+ dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
+ dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
- dev_priv->perf.oa.test_config.sysfs_metric.name = "f1792f32-6db2-4b50-b4b2-557128f1688d";
- dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+ dev_priv->perf.test_config.sysfs_metric.name = "f1792f32-6db2-4b50-b4b2-557128f1688d";
+ dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
- dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+ dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
+ dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.h b/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.h
new file mode 100644
index 000000000000..13d70456fabd
--- /dev/null
+++ b/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2018-2019 Intel Corporation
+ *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
+ */
+
+#ifndef __I915_OA_KBLGT3_H__
+#define __I915_OA_KBLGT3_H__
+
+struct drm_i915_private;
+
+void i915_perf_load_test_config_kblgt3(struct drm_i915_private *dev_priv);
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt2.c b/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.c
index 7ce6ee851d43..1bc359ed34e8 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt2.c
+++ b/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.c
@@ -1,7 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
+ * Copyright © 2018-2019 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
@@ -64,26 +63,26 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_sklgt2(struct drm_i915_private *dev_priv)
{
- strlcpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.test_config.uuid,
"1651949f-0ac0-4cb1-a06f-dafd74a407d1",
- sizeof(dev_priv->perf.oa.test_config.uuid));
- dev_priv->perf.oa.test_config.id = 1;
+ sizeof(dev_priv->perf.test_config.uuid));
+ dev_priv->perf.test_config.id = 1;
- dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+ dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
+ dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
- dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+ dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
+ dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
- dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+ dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
+ dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
- dev_priv->perf.oa.test_config.sysfs_metric.name = "1651949f-0ac0-4cb1-a06f-dafd74a407d1";
- dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+ dev_priv->perf.test_config.sysfs_metric.name = "1651949f-0ac0-4cb1-a06f-dafd74a407d1";
+ dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
- dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+ dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
+ dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.h b/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.h
new file mode 100644
index 000000000000..fda70c51a6ec
--- /dev/null
+++ b/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2018-2019 Intel Corporation
+ *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
+ */
+
+#ifndef __I915_OA_SKLGT2_H__
+#define __I915_OA_SKLGT2_H__
+
+struct drm_i915_private;
+
+void i915_perf_load_test_config_sklgt2(struct drm_i915_private *dev_priv);
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt3.c b/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.c
index 086ca2631e1c..6e352f881310 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt3.c
+++ b/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.c
@@ -1,7 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
+ * Copyright © 2018-2019 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
@@ -65,26 +64,26 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_sklgt3(struct drm_i915_private *dev_priv)
{
- strlcpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.test_config.uuid,
"2b985803-d3c9-4629-8a4f-634bfecba0e8",
- sizeof(dev_priv->perf.oa.test_config.uuid));
- dev_priv->perf.oa.test_config.id = 1;
+ sizeof(dev_priv->perf.test_config.uuid));
+ dev_priv->perf.test_config.id = 1;
- dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+ dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
+ dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
- dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+ dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
+ dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
- dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+ dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
+ dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
- dev_priv->perf.oa.test_config.sysfs_metric.name = "2b985803-d3c9-4629-8a4f-634bfecba0e8";
- dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+ dev_priv->perf.test_config.sysfs_metric.name = "2b985803-d3c9-4629-8a4f-634bfecba0e8";
+ dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
- dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+ dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
+ dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.h b/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.h
new file mode 100644
index 000000000000..df74eba5799e
--- /dev/null
+++ b/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2018-2019 Intel Corporation
+ *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
+ */
+
+#ifndef __I915_OA_SKLGT3_H__
+#define __I915_OA_SKLGT3_H__
+
+struct drm_i915_private;
+
+void i915_perf_load_test_config_sklgt3(struct drm_i915_private *dev_priv);
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt4.c b/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.c
index b291a6eb8a87..8f345115a306 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt4.c
+++ b/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.c
@@ -1,7 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
+ * Copyright © 2018-2019 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
@@ -65,26 +64,26 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_sklgt4(struct drm_i915_private *dev_priv)
{
- strlcpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.test_config.uuid,
"882fa433-1f4a-4a67-a962-c741888fe5f5",
- sizeof(dev_priv->perf.oa.test_config.uuid));
- dev_priv->perf.oa.test_config.id = 1;
+ sizeof(dev_priv->perf.test_config.uuid));
+ dev_priv->perf.test_config.id = 1;
- dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+ dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
+ dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
- dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+ dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
+ dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
- dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+ dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
+ dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
- dev_priv->perf.oa.test_config.sysfs_metric.name = "882fa433-1f4a-4a67-a962-c741888fe5f5";
- dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+ dev_priv->perf.test_config.sysfs_metric.name = "882fa433-1f4a-4a67-a962-c741888fe5f5";
+ dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
- dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+ dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
+ dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.h b/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.h
new file mode 100644
index 000000000000..378ab7ab78d5
--- /dev/null
+++ b/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2018-2019 Intel Corporation
+ *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
+ */
+
+#ifndef __I915_OA_SKLGT4_H__
+#define __I915_OA_SKLGT4_H__
+
+struct drm_i915_private;
+
+void i915_perf_load_test_config_sklgt4(struct drm_i915_private *dev_priv);
+
+#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_tgl.c b/drivers/gpu/drm/i915/oa/i915_oa_tgl.c
new file mode 100644
index 000000000000..a29d93707345
--- /dev/null
+++ b/drivers/gpu/drm/i915/oa/i915_oa_tgl.c
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2018 Intel Corporation
+ *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
+ */
+
+#include <linux/sysfs.h>
+
+#include "i915_drv.h"
+#include "i915_oa_tgl.h"
+
+static const struct i915_oa_reg b_counter_config_test_oa[] = {
+ { _MMIO(0xD920), 0x00000000 },
+ { _MMIO(0xD900), 0x00000000 },
+ { _MMIO(0xD904), 0xF0800000 },
+ { _MMIO(0xD910), 0x00000000 },
+ { _MMIO(0xD914), 0xF0800000 },
+ { _MMIO(0xDC40), 0x00FF0000 },
+ { _MMIO(0xD940), 0x00000004 },
+ { _MMIO(0xD944), 0x0000FFFF },
+ { _MMIO(0xDC00), 0x00000004 },
+ { _MMIO(0xDC04), 0x0000FFFF },
+ { _MMIO(0xD948), 0x00000003 },
+ { _MMIO(0xD94C), 0x0000FFFF },
+ { _MMIO(0xDC08), 0x00000003 },
+ { _MMIO(0xDC0C), 0x0000FFFF },
+ { _MMIO(0xD950), 0x00000007 },
+ { _MMIO(0xD954), 0x0000FFFF },
+ { _MMIO(0xDC10), 0x00000007 },
+ { _MMIO(0xDC14), 0x0000FFFF },
+ { _MMIO(0xD958), 0x00100002 },
+ { _MMIO(0xD95C), 0x0000FFF7 },
+ { _MMIO(0xDC18), 0x00100002 },
+ { _MMIO(0xDC1C), 0x0000FFF7 },
+ { _MMIO(0xD960), 0x00100002 },
+ { _MMIO(0xD964), 0x0000FFCF },
+ { _MMIO(0xDC20), 0x00100002 },
+ { _MMIO(0xDC24), 0x0000FFCF },
+ { _MMIO(0xD968), 0x00100082 },
+ { _MMIO(0xD96C), 0x0000FFEF },
+ { _MMIO(0xDC28), 0x00100082 },
+ { _MMIO(0xDC2C), 0x0000FFEF },
+ { _MMIO(0xD970), 0x001000C2 },
+ { _MMIO(0xD974), 0x0000FFE7 },
+ { _MMIO(0xDC30), 0x001000C2 },
+ { _MMIO(0xDC34), 0x0000FFE7 },
+ { _MMIO(0xD978), 0x00100001 },
+ { _MMIO(0xD97C), 0x0000FFE7 },
+ { _MMIO(0xDC38), 0x00100001 },
+ { _MMIO(0xDC3C), 0x0000FFE7 },
+};
+
+static const struct i915_oa_reg flex_eu_config_test_oa[] = {
+};
+
+static const struct i915_oa_reg mux_config_test_oa[] = {
+ { _MMIO(0x0D04), 0x00000200 },
+ { _MMIO(0x9840), 0x00000000 },
+ { _MMIO(0x9884), 0x00000000 },
+ { _MMIO(0x9888), 0x280E0000 },
+ { _MMIO(0x9888), 0x1E0E0147 },
+ { _MMIO(0x9888), 0x180E0000 },
+ { _MMIO(0x9888), 0x160E0000 },
+ { _MMIO(0x9888), 0x1E0F1000 },
+ { _MMIO(0x9888), 0x1E104000 },
+ { _MMIO(0x9888), 0x2E020100 },
+ { _MMIO(0x9888), 0x2C030004 },
+ { _MMIO(0x9888), 0x38003000 },
+ { _MMIO(0x9888), 0x1E0A8000 },
+ { _MMIO(0x9884), 0x00000003 },
+ { _MMIO(0x9888), 0x49110000 },
+ { _MMIO(0x9888), 0x5D101400 },
+ { _MMIO(0x9888), 0x1D140020 },
+ { _MMIO(0x9888), 0x1D1103A3 },
+ { _MMIO(0x9888), 0x01110000 },
+ { _MMIO(0x9888), 0x61111000 },
+ { _MMIO(0x9888), 0x1F128000 },
+ { _MMIO(0x9888), 0x17100000 },
+ { _MMIO(0x9888), 0x55100630 },
+ { _MMIO(0x9888), 0x57100000 },
+ { _MMIO(0x9888), 0x31100000 },
+ { _MMIO(0x9884), 0x00000003 },
+ { _MMIO(0x9888), 0x65100002 },
+ { _MMIO(0x9884), 0x00000000 },
+ { _MMIO(0x9888), 0x42000001 },
+};
+
+static ssize_t
+show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "1\n");
+}
+
+void
+i915_perf_load_test_config_tgl(struct drm_i915_private *dev_priv)
+{
+ strlcpy(dev_priv->perf.test_config.uuid,
+ "80a833f0-2504-4321-8894-e9277844ce7b",
+ sizeof(dev_priv->perf.test_config.uuid));
+ dev_priv->perf.test_config.id = 1;
+
+ dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
+ dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+
+ dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
+ dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+
+ dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
+ dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+
+ dev_priv->perf.test_config.sysfs_metric.name = "80a833f0-2504-4321-8894-e9277844ce7b";
+ dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
+
+ dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
+
+ dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
+}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_tgl.h b/drivers/gpu/drm/i915/oa/i915_oa_tgl.h
new file mode 100644
index 000000000000..4c25f0be825c
--- /dev/null
+++ b/drivers/gpu/drm/i915/oa/i915_oa_tgl.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2018 Intel Corporation
+ *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
+ */
+
+#ifndef __I915_OA_TGL_H__
+#define __I915_OA_TGL_H__
+
+struct drm_i915_private;
+
+void i915_perf_load_test_config_tgl(struct drm_i915_private *dev_priv);
+
+#endif
diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c
index c0b3537a5fa6..ef572a0c2566 100644
--- a/drivers/gpu/drm/i915/selftests/i915_active.c
+++ b/drivers/gpu/drm/i915/selftests/i915_active.c
@@ -4,7 +4,10 @@
* Copyright © 2018 Intel Corporation
*/
+#include <linux/kref.h>
+
#include "gem/i915_gem_pm.h"
+#include "gt/intel_gt.h"
#include "i915_selftest.h"
@@ -13,42 +16,90 @@
struct live_active {
struct i915_active base;
+ struct kref ref;
bool retired;
};
-static void __live_active_retire(struct i915_active *base)
+static void __live_get(struct live_active *active)
+{
+ kref_get(&active->ref);
+}
+
+static void __live_free(struct live_active *active)
+{
+ i915_active_fini(&active->base);
+ kfree(active);
+}
+
+static void __live_release(struct kref *ref)
+{
+ struct live_active *active = container_of(ref, typeof(*active), ref);
+
+ __live_free(active);
+}
+
+static void __live_put(struct live_active *active)
+{
+ kref_put(&active->ref, __live_release);
+}
+
+static int __live_active(struct i915_active *base)
+{
+ struct live_active *active = container_of(base, typeof(*active), base);
+
+ __live_get(active);
+ return 0;
+}
+
+static void __live_retire(struct i915_active *base)
{
struct live_active *active = container_of(base, typeof(*active), base);
active->retired = true;
+ __live_put(active);
+}
+
+static struct live_active *__live_alloc(struct drm_i915_private *i915)
+{
+ struct live_active *active;
+
+ active = kzalloc(sizeof(*active), GFP_KERNEL);
+ if (!active)
+ return NULL;
+
+ kref_init(&active->ref);
+ i915_active_init(&active->base, __live_active, __live_retire);
+
+ return active;
}
-static int __live_active_setup(struct drm_i915_private *i915,
- struct live_active *active)
+static struct live_active *
+__live_active_setup(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
struct i915_sw_fence *submit;
- enum intel_engine_id id;
+ struct live_active *active;
unsigned int count = 0;
int err = 0;
- submit = heap_fence_create(GFP_KERNEL);
- if (!submit)
- return -ENOMEM;
+ active = __live_alloc(i915);
+ if (!active)
+ return ERR_PTR(-ENOMEM);
- i915_active_init(i915, &active->base, __live_active_retire);
- active->retired = false;
+ submit = heap_fence_create(GFP_KERNEL);
+ if (!submit) {
+ kfree(active);
+ return ERR_PTR(-ENOMEM);
+ }
- if (!i915_active_acquire(&active->base)) {
- pr_err("First i915_active_acquire should report being idle\n");
- err = -EINVAL;
+ err = i915_active_acquire(&active->base);
+ if (err)
goto out;
- }
- for_each_engine(engine, i915, id) {
+ for_each_uabi_engine(engine, i915) {
struct i915_request *rq;
- rq = i915_request_create(engine->kernel_context);
+ rq = intel_engine_create_kernel_request(engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
break;
@@ -58,8 +109,7 @@ static int __live_active_setup(struct drm_i915_private *i915,
submit,
GFP_KERNEL);
if (err >= 0)
- err = i915_active_ref(&active->base,
- rq->fence.context, rq);
+ err = i915_active_add_request(&active->base, rq);
i915_request_add(rq);
if (err) {
pr_err("Failed to track active ref!\n");
@@ -70,78 +120,84 @@ static int __live_active_setup(struct drm_i915_private *i915,
}
i915_active_release(&active->base);
- if (active->retired && count) {
+ if (READ_ONCE(active->retired) && count) {
pr_err("i915_active retired before submission!\n");
err = -EINVAL;
}
- if (active->base.count != count) {
+ if (atomic_read(&active->base.count) != count) {
pr_err("i915_active not tracking all requests, found %d, expected %d\n",
- active->base.count, count);
+ atomic_read(&active->base.count), count);
err = -EINVAL;
}
out:
i915_sw_fence_commit(submit);
heap_fence_put(submit);
+ if (err) {
+ __live_put(active);
+ active = ERR_PTR(err);
+ }
- return err;
+ return active;
}
static int live_active_wait(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct live_active active;
- intel_wakeref_t wakeref;
- int err;
+ struct live_active *active;
+ int err = 0;
/* Check that we get a callback when requests retire upon waiting */
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ active = __live_active_setup(i915);
+ if (IS_ERR(active))
+ return PTR_ERR(active);
- err = __live_active_setup(i915, &active);
+ i915_active_wait(&active->base);
+ if (!READ_ONCE(active->retired)) {
+ struct drm_printer p = drm_err_printer(__func__);
- i915_active_wait(&active.base);
- if (!active.retired) {
pr_err("i915_active not retired after waiting!\n");
+ i915_active_print(&active->base, &p);
+
err = -EINVAL;
}
- i915_active_fini(&active.base);
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ __live_put(active);
+
+ if (igt_flush_test(i915))
err = -EIO;
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
return err;
}
static int live_active_retire(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct live_active active;
- intel_wakeref_t wakeref;
- int err;
+ struct live_active *active;
+ int err = 0;
/* Check that we get a callback when requests are indirectly retired */
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
- err = __live_active_setup(i915, &active);
+ active = __live_active_setup(i915);
+ if (IS_ERR(active))
+ return PTR_ERR(active);
/* waits for & retires all requests */
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(i915))
err = -EIO;
- if (!active.retired) {
+ if (!READ_ONCE(active->retired)) {
+ struct drm_printer p = drm_err_printer(__func__);
+
pr_err("i915_active not retired after flushing!\n");
+ i915_active_print(&active->base, &p);
+
err = -EINVAL;
}
- i915_active_fini(&active.base);
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
+ __live_put(active);
+
return err;
}
@@ -152,8 +208,86 @@ int i915_active_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_active_retire),
};
- if (i915_terminally_wedged(i915))
+ if (intel_gt_is_wedged(&i915->gt))
return 0;
return i915_subtests(tests, i915);
}
+
+static struct intel_engine_cs *node_to_barrier(struct active_node *it)
+{
+ struct intel_engine_cs *engine;
+
+ if (!is_barrier(&it->base))
+ return NULL;
+
+ engine = __barrier_to_engine(it);
+ smp_rmb(); /* serialise with add_active_barriers */
+ if (!is_barrier(&it->base))
+ return NULL;
+
+ return engine;
+}
+
+void i915_active_print(struct i915_active *ref, struct drm_printer *m)
+{
+ drm_printf(m, "active %pS:%pS\n", ref->active, ref->retire);
+ drm_printf(m, "\tcount: %d\n", atomic_read(&ref->count));
+ drm_printf(m, "\tpreallocated barriers? %s\n",
+ yesno(!llist_empty(&ref->preallocated_barriers)));
+
+ if (i915_active_acquire_if_busy(ref)) {
+ struct active_node *it, *n;
+
+ rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
+ struct intel_engine_cs *engine;
+
+ engine = node_to_barrier(it);
+ if (engine) {
+ drm_printf(m, "\tbarrier: %s\n", engine->name);
+ continue;
+ }
+
+ if (i915_active_fence_isset(&it->base)) {
+ drm_printf(m,
+ "\ttimeline: %llx\n", it->timeline);
+ continue;
+ }
+ }
+
+ i915_active_release(ref);
+ }
+}
+
+static void spin_unlock_wait(spinlock_t *lock)
+{
+ spin_lock_irq(lock);
+ spin_unlock_irq(lock);
+}
+
+void i915_active_unlock_wait(struct i915_active *ref)
+{
+ if (i915_active_acquire_if_busy(ref)) {
+ struct active_node *it, *n;
+
+ rcu_read_lock();
+ rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
+ struct dma_fence *f;
+
+ /* Wait for all active callbacks */
+ f = rcu_dereference(it->base.fence);
+ if (f)
+ spin_unlock_wait(f->lock);
+ }
+ rcu_read_unlock();
+
+ i915_active_release(ref);
+ }
+
+ /* And wait for the retire callback */
+ spin_lock_irq(&ref->tree_lock);
+ spin_unlock_irq(&ref->tree_lock);
+
+ /* ... which may have been on a thread instead */
+ flush_work(&ref->work);
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_buddy.c b/drivers/gpu/drm/i915/selftests/i915_buddy.c
new file mode 100644
index 000000000000..1b856bae67b5
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_buddy.c
@@ -0,0 +1,724 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/prime_numbers.h>
+
+#include "../i915_selftest.h"
+#include "i915_random.h"
+
+#define SZ_8G (1ULL << 33)
+
+static void __igt_dump_block(struct i915_buddy_mm *mm,
+ struct i915_buddy_block *block,
+ bool buddy)
+{
+ pr_err("block info: header=%llx, state=%u, order=%d, offset=%llx size=%llx root=%s buddy=%s\n",
+ block->header,
+ i915_buddy_block_state(block),
+ i915_buddy_block_order(block),
+ i915_buddy_block_offset(block),
+ i915_buddy_block_size(mm, block),
+ yesno(!block->parent),
+ yesno(buddy));
+}
+
+static void igt_dump_block(struct i915_buddy_mm *mm,
+ struct i915_buddy_block *block)
+{
+ struct i915_buddy_block *buddy;
+
+ __igt_dump_block(mm, block, false);
+
+ buddy = get_buddy(block);
+ if (buddy)
+ __igt_dump_block(mm, buddy, true);
+}
+
+static int igt_check_block(struct i915_buddy_mm *mm,
+ struct i915_buddy_block *block)
+{
+ struct i915_buddy_block *buddy;
+ unsigned int block_state;
+ u64 block_size;
+ u64 offset;
+ int err = 0;
+
+ block_state = i915_buddy_block_state(block);
+
+ if (block_state != I915_BUDDY_ALLOCATED &&
+ block_state != I915_BUDDY_FREE &&
+ block_state != I915_BUDDY_SPLIT) {
+ pr_err("block state mismatch\n");
+ err = -EINVAL;
+ }
+
+ block_size = i915_buddy_block_size(mm, block);
+ offset = i915_buddy_block_offset(block);
+
+ if (block_size < mm->chunk_size) {
+ pr_err("block size smaller than min size\n");
+ err = -EINVAL;
+ }
+
+ if (!is_power_of_2(block_size)) {
+ pr_err("block size not power of two\n");
+ err = -EINVAL;
+ }
+
+ if (!IS_ALIGNED(block_size, mm->chunk_size)) {
+ pr_err("block size not aligned to min size\n");
+ err = -EINVAL;
+ }
+
+ if (!IS_ALIGNED(offset, mm->chunk_size)) {
+ pr_err("block offset not aligned to min size\n");
+ err = -EINVAL;
+ }
+
+ if (!IS_ALIGNED(offset, block_size)) {
+ pr_err("block offset not aligned to block size\n");
+ err = -EINVAL;
+ }
+
+ buddy = get_buddy(block);
+
+ if (!buddy && block->parent) {
+ pr_err("buddy has gone fishing\n");
+ err = -EINVAL;
+ }
+
+ if (buddy) {
+ if (i915_buddy_block_offset(buddy) != (offset ^ block_size)) {
+ pr_err("buddy has wrong offset\n");
+ err = -EINVAL;
+ }
+
+ if (i915_buddy_block_size(mm, buddy) != block_size) {
+ pr_err("buddy size mismatch\n");
+ err = -EINVAL;
+ }
+
+ if (i915_buddy_block_state(buddy) == block_state &&
+ block_state == I915_BUDDY_FREE) {
+ pr_err("block and its buddy are free\n");
+ err = -EINVAL;
+ }
+ }
+
+ return err;
+}
+
+static int igt_check_blocks(struct i915_buddy_mm *mm,
+ struct list_head *blocks,
+ u64 expected_size,
+ bool is_contiguous)
+{
+ struct i915_buddy_block *block;
+ struct i915_buddy_block *prev;
+ u64 total;
+ int err = 0;
+
+ block = NULL;
+ prev = NULL;
+ total = 0;
+
+ list_for_each_entry(block, blocks, link) {
+ err = igt_check_block(mm, block);
+
+ if (!i915_buddy_block_is_allocated(block)) {
+ pr_err("block not allocated\n"),
+ err = -EINVAL;
+ }
+
+ if (is_contiguous && prev) {
+ u64 prev_block_size;
+ u64 prev_offset;
+ u64 offset;
+
+ prev_offset = i915_buddy_block_offset(prev);
+ prev_block_size = i915_buddy_block_size(mm, prev);
+ offset = i915_buddy_block_offset(block);
+
+ if (offset != (prev_offset + prev_block_size)) {
+ pr_err("block offset mismatch\n");
+ err = -EINVAL;
+ }
+ }
+
+ if (err)
+ break;
+
+ total += i915_buddy_block_size(mm, block);
+ prev = block;
+ }
+
+ if (!err) {
+ if (total != expected_size) {
+ pr_err("size mismatch, expected=%llx, found=%llx\n",
+ expected_size, total);
+ err = -EINVAL;
+ }
+ return err;
+ }
+
+ if (prev) {
+ pr_err("prev block, dump:\n");
+ igt_dump_block(mm, prev);
+ }
+
+ if (block) {
+ pr_err("bad block, dump:\n");
+ igt_dump_block(mm, block);
+ }
+
+ return err;
+}
+
+static int igt_check_mm(struct i915_buddy_mm *mm)
+{
+ struct i915_buddy_block *root;
+ struct i915_buddy_block *prev;
+ unsigned int i;
+ u64 total;
+ int err = 0;
+
+ if (!mm->n_roots) {
+ pr_err("n_roots is zero\n");
+ return -EINVAL;
+ }
+
+ if (mm->n_roots != hweight64(mm->size)) {
+ pr_err("n_roots mismatch, n_roots=%u, expected=%lu\n",
+ mm->n_roots, hweight64(mm->size));
+ return -EINVAL;
+ }
+
+ root = NULL;
+ prev = NULL;
+ total = 0;
+
+ for (i = 0; i < mm->n_roots; ++i) {
+ struct i915_buddy_block *block;
+ unsigned int order;
+
+ root = mm->roots[i];
+ if (!root) {
+ pr_err("root(%u) is NULL\n", i);
+ err = -EINVAL;
+ break;
+ }
+
+ err = igt_check_block(mm, root);
+
+ if (!i915_buddy_block_is_free(root)) {
+ pr_err("root not free\n");
+ err = -EINVAL;
+ }
+
+ order = i915_buddy_block_order(root);
+
+ if (!i) {
+ if (order != mm->max_order) {
+ pr_err("max order root missing\n");
+ err = -EINVAL;
+ }
+ }
+
+ if (prev) {
+ u64 prev_block_size;
+ u64 prev_offset;
+ u64 offset;
+
+ prev_offset = i915_buddy_block_offset(prev);
+ prev_block_size = i915_buddy_block_size(mm, prev);
+ offset = i915_buddy_block_offset(root);
+
+ if (offset != (prev_offset + prev_block_size)) {
+ pr_err("root offset mismatch\n");
+ err = -EINVAL;
+ }
+ }
+
+ block = list_first_entry_or_null(&mm->free_list[order],
+ struct i915_buddy_block,
+ link);
+ if (block != root) {
+ pr_err("root mismatch at order=%u\n", order);
+ err = -EINVAL;
+ }
+
+ if (err)
+ break;
+
+ prev = root;
+ total += i915_buddy_block_size(mm, root);
+ }
+
+ if (!err) {
+ if (total != mm->size) {
+ pr_err("expected mm size=%llx, found=%llx\n", mm->size,
+ total);
+ err = -EINVAL;
+ }
+ return err;
+ }
+
+ if (prev) {
+ pr_err("prev root(%u), dump:\n", i - 1);
+ igt_dump_block(mm, prev);
+ }
+
+ if (root) {
+ pr_err("bad root(%u), dump:\n", i);
+ igt_dump_block(mm, root);
+ }
+
+ return err;
+}
+
+static void igt_mm_config(u64 *size, u64 *chunk_size)
+{
+ I915_RND_STATE(prng);
+ u64 s, ms;
+
+ /* Nothing fancy, just try to get an interesting bit pattern */
+
+ prandom_seed_state(&prng, i915_selftest.random_seed);
+
+ s = i915_prandom_u64_state(&prng) & (SZ_8G - 1);
+ ms = BIT_ULL(12 + (prandom_u32_state(&prng) % ilog2(s >> 12)));
+ s = max(s & -ms, ms);
+
+ *chunk_size = ms;
+ *size = s;
+}
+
+static int igt_buddy_alloc_smoke(void *arg)
+{
+ struct i915_buddy_mm mm;
+ int max_order;
+ u64 chunk_size;
+ u64 mm_size;
+ int err;
+
+ igt_mm_config(&mm_size, &chunk_size);
+
+ pr_info("buddy_init with size=%llx, chunk_size=%llx\n", mm_size, chunk_size);
+
+ err = i915_buddy_init(&mm, mm_size, chunk_size);
+ if (err) {
+ pr_err("buddy_init failed(%d)\n", err);
+ return err;
+ }
+
+ for (max_order = mm.max_order; max_order >= 0; max_order--) {
+ struct i915_buddy_block *block;
+ int order;
+ LIST_HEAD(blocks);
+ u64 total;
+
+ err = igt_check_mm(&mm);
+ if (err) {
+ pr_err("pre-mm check failed, abort\n");
+ break;
+ }
+
+ pr_info("filling from max_order=%u\n", max_order);
+
+ order = max_order;
+ total = 0;
+
+ do {
+retry:
+ block = i915_buddy_alloc(&mm, order);
+ if (IS_ERR(block)) {
+ err = PTR_ERR(block);
+ if (err == -ENOMEM) {
+ pr_info("buddy_alloc hit -ENOMEM with order=%d\n",
+ order);
+ } else {
+ if (order--) {
+ err = 0;
+ goto retry;
+ }
+
+ pr_err("buddy_alloc with order=%d failed(%d)\n",
+ order, err);
+ }
+
+ break;
+ }
+
+ list_add_tail(&block->link, &blocks);
+
+ if (i915_buddy_block_order(block) != order) {
+ pr_err("buddy_alloc order mismatch\n");
+ err = -EINVAL;
+ break;
+ }
+
+ total += i915_buddy_block_size(&mm, block);
+ } while (total < mm.size);
+
+ if (!err)
+ err = igt_check_blocks(&mm, &blocks, total, false);
+
+ i915_buddy_free_list(&mm, &blocks);
+
+ if (!err) {
+ err = igt_check_mm(&mm);
+ if (err)
+ pr_err("post-mm check failed\n");
+ }
+
+ if (err)
+ break;
+
+ cond_resched();
+ }
+
+ if (err == -ENOMEM)
+ err = 0;
+
+ i915_buddy_fini(&mm);
+
+ return err;
+}
+
+static int igt_buddy_alloc_pessimistic(void *arg)
+{
+ const unsigned int max_order = 16;
+ struct i915_buddy_block *block, *bn;
+ struct i915_buddy_mm mm;
+ unsigned int order;
+ LIST_HEAD(blocks);
+ int err;
+
+ /*
+ * Create a pot-sized mm, then allocate one of each possible
+ * order within. This should leave the mm with exactly one
+ * page left.
+ */
+
+ err = i915_buddy_init(&mm, PAGE_SIZE << max_order, PAGE_SIZE);
+ if (err) {
+ pr_err("buddy_init failed(%d)\n", err);
+ return err;
+ }
+ GEM_BUG_ON(mm.max_order != max_order);
+
+ for (order = 0; order < max_order; order++) {
+ block = i915_buddy_alloc(&mm, order);
+ if (IS_ERR(block)) {
+ pr_info("buddy_alloc hit -ENOMEM with order=%d\n",
+ order);
+ err = PTR_ERR(block);
+ goto err;
+ }
+
+ list_add_tail(&block->link, &blocks);
+ }
+
+ /* And now the last remaining block available */
+ block = i915_buddy_alloc(&mm, 0);
+ if (IS_ERR(block)) {
+ pr_info("buddy_alloc hit -ENOMEM on final alloc\n");
+ err = PTR_ERR(block);
+ goto err;
+ }
+ list_add_tail(&block->link, &blocks);
+
+ /* Should be completely full! */
+ for (order = max_order; order--; ) {
+ block = i915_buddy_alloc(&mm, order);
+ if (!IS_ERR(block)) {
+ pr_info("buddy_alloc unexpectedly succeeded at order %d, it should be full!",
+ order);
+ list_add_tail(&block->link, &blocks);
+ err = -EINVAL;
+ goto err;
+ }
+ }
+
+ block = list_last_entry(&blocks, typeof(*block), link);
+ list_del(&block->link);
+ i915_buddy_free(&mm, block);
+
+ /* As we free in increasing size, we make available larger blocks */
+ order = 1;
+ list_for_each_entry_safe(block, bn, &blocks, link) {
+ list_del(&block->link);
+ i915_buddy_free(&mm, block);
+
+ block = i915_buddy_alloc(&mm, order);
+ if (IS_ERR(block)) {
+ pr_info("buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
+ order);
+ err = PTR_ERR(block);
+ goto err;
+ }
+ i915_buddy_free(&mm, block);
+ order++;
+ }
+
+ /* To confirm, now the whole mm should be available */
+ block = i915_buddy_alloc(&mm, max_order);
+ if (IS_ERR(block)) {
+ pr_info("buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
+ max_order);
+ err = PTR_ERR(block);
+ goto err;
+ }
+ i915_buddy_free(&mm, block);
+
+err:
+ i915_buddy_free_list(&mm, &blocks);
+ i915_buddy_fini(&mm);
+ return err;
+}
+
+static int igt_buddy_alloc_optimistic(void *arg)
+{
+ const int max_order = 16;
+ struct i915_buddy_block *block;
+ struct i915_buddy_mm mm;
+ LIST_HEAD(blocks);
+ int order;
+ int err;
+
+ /*
+ * Create a mm with one block of each order available, and
+ * try to allocate them all.
+ */
+
+ err = i915_buddy_init(&mm,
+ PAGE_SIZE * ((1 << (max_order + 1)) - 1),
+ PAGE_SIZE);
+ if (err) {
+ pr_err("buddy_init failed(%d)\n", err);
+ return err;
+ }
+ GEM_BUG_ON(mm.max_order != max_order);
+
+ for (order = 0; order <= max_order; order++) {
+ block = i915_buddy_alloc(&mm, order);
+ if (IS_ERR(block)) {
+ pr_info("buddy_alloc hit -ENOMEM with order=%d\n",
+ order);
+ err = PTR_ERR(block);
+ goto err;
+ }
+
+ list_add_tail(&block->link, &blocks);
+ }
+
+ /* Should be completely full! */
+ block = i915_buddy_alloc(&mm, 0);
+ if (!IS_ERR(block)) {
+ pr_info("buddy_alloc unexpectedly succeeded, it should be full!");
+ list_add_tail(&block->link, &blocks);
+ err = -EINVAL;
+ goto err;
+ }
+
+err:
+ i915_buddy_free_list(&mm, &blocks);
+ i915_buddy_fini(&mm);
+ return err;
+}
+
+static int igt_buddy_alloc_pathological(void *arg)
+{
+ const int max_order = 16;
+ struct i915_buddy_block *block;
+ struct i915_buddy_mm mm;
+ LIST_HEAD(blocks);
+ LIST_HEAD(holes);
+ int order, top;
+ int err;
+
+ /*
+ * Create a pot-sized mm, then allocate one of each possible
+ * order within. This should leave the mm with exactly one
+ * page left. Free the largest block, then whittle down again.
+ * Eventually we will have a fully 50% fragmented mm.
+ */
+
+ err = i915_buddy_init(&mm, PAGE_SIZE << max_order, PAGE_SIZE);
+ if (err) {
+ pr_err("buddy_init failed(%d)\n", err);
+ return err;
+ }
+ GEM_BUG_ON(mm.max_order != max_order);
+
+ for (top = max_order; top; top--) {
+ /* Make room by freeing the largest allocated block */
+ block = list_first_entry_or_null(&blocks, typeof(*block), link);
+ if (block) {
+ list_del(&block->link);
+ i915_buddy_free(&mm, block);
+ }
+
+ for (order = top; order--; ) {
+ block = i915_buddy_alloc(&mm, order);
+ if (IS_ERR(block)) {
+ pr_info("buddy_alloc hit -ENOMEM with order=%d, top=%d\n",
+ order, top);
+ err = PTR_ERR(block);
+ goto err;
+ }
+ list_add_tail(&block->link, &blocks);
+ }
+
+ /* There should be one final page for this sub-allocation */
+ block = i915_buddy_alloc(&mm, 0);
+ if (IS_ERR(block)) {
+ pr_info("buddy_alloc hit -ENOMEM for hole\n");
+ err = PTR_ERR(block);
+ goto err;
+ }
+ list_add_tail(&block->link, &holes);
+
+ block = i915_buddy_alloc(&mm, top);
+ if (!IS_ERR(block)) {
+ pr_info("buddy_alloc unexpectedly succeeded at top-order %d/%d, it should be full!",
+ top, max_order);
+ list_add_tail(&block->link, &blocks);
+ err = -EINVAL;
+ goto err;
+ }
+ }
+
+ i915_buddy_free_list(&mm, &holes);
+
+ /* Nothing larger than blocks of chunk_size now available */
+ for (order = 1; order <= max_order; order++) {
+ block = i915_buddy_alloc(&mm, order);
+ if (!IS_ERR(block)) {
+ pr_info("buddy_alloc unexpectedly succeeded at order %d, it should be full!",
+ order);
+ list_add_tail(&block->link, &blocks);
+ err = -EINVAL;
+ goto err;
+ }
+ }
+
+err:
+ list_splice_tail(&holes, &blocks);
+ i915_buddy_free_list(&mm, &blocks);
+ i915_buddy_fini(&mm);
+ return err;
+}
+
+static int igt_buddy_alloc_range(void *arg)
+{
+ struct i915_buddy_mm mm;
+ unsigned long page_num;
+ LIST_HEAD(blocks);
+ u64 chunk_size;
+ u64 offset;
+ u64 size;
+ u64 rem;
+ int err;
+
+ igt_mm_config(&size, &chunk_size);
+
+ pr_info("buddy_init with size=%llx, chunk_size=%llx\n", size, chunk_size);
+
+ err = i915_buddy_init(&mm, size, chunk_size);
+ if (err) {
+ pr_err("buddy_init failed(%d)\n", err);
+ return err;
+ }
+
+ err = igt_check_mm(&mm);
+ if (err) {
+ pr_err("pre-mm check failed, abort, abort, abort!\n");
+ goto err_fini;
+ }
+
+ rem = mm.size;
+ offset = 0;
+
+ for_each_prime_number_from(page_num, 1, ULONG_MAX - 1) {
+ struct i915_buddy_block *block;
+ LIST_HEAD(tmp);
+
+ size = min(page_num * mm.chunk_size, rem);
+
+ err = i915_buddy_alloc_range(&mm, &tmp, offset, size);
+ if (err) {
+ if (err == -ENOMEM) {
+ pr_info("alloc_range hit -ENOMEM with size=%llx\n",
+ size);
+ } else {
+ pr_err("alloc_range with offset=%llx, size=%llx failed(%d)\n",
+ offset, size, err);
+ }
+
+ break;
+ }
+
+ block = list_first_entry_or_null(&tmp,
+ struct i915_buddy_block,
+ link);
+ if (!block) {
+ pr_err("alloc_range has no blocks\n");
+ err = -EINVAL;
+ break;
+ }
+
+ if (i915_buddy_block_offset(block) != offset) {
+ pr_err("alloc_range start offset mismatch, found=%llx, expected=%llx\n",
+ i915_buddy_block_offset(block), offset);
+ err = -EINVAL;
+ }
+
+ if (!err)
+ err = igt_check_blocks(&mm, &tmp, size, true);
+
+ list_splice_tail(&tmp, &blocks);
+
+ if (err)
+ break;
+
+ offset += size;
+
+ rem -= size;
+ if (!rem)
+ break;
+
+ cond_resched();
+ }
+
+ if (err == -ENOMEM)
+ err = 0;
+
+ i915_buddy_free_list(&mm, &blocks);
+
+ if (!err) {
+ err = igt_check_mm(&mm);
+ if (err)
+ pr_err("post-mm check failed\n");
+ }
+
+err_fini:
+ i915_buddy_fini(&mm);
+
+ return err;
+}
+
+int i915_buddy_mock_selftests(void)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_buddy_alloc_pessimistic),
+ SUBTEST(igt_buddy_alloc_optimistic),
+ SUBTEST(igt_buddy_alloc_pathological),
+ SUBTEST(igt_buddy_alloc_smoke),
+ SUBTEST(igt_buddy_alloc_range),
+ };
+
+ return i915_subtests(tests, NULL);
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
index c6a01a6e87f1..78f36faf2bbe 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -8,29 +8,34 @@
#include "gem/selftests/igt_gem_utils.h"
#include "gem/selftests/mock_context.h"
+#include "gt/intel_gt.h"
+#include "gt/intel_gt_pm.h"
#include "i915_selftest.h"
#include "igt_flush_test.h"
#include "mock_drm.h"
-static int switch_to_context(struct drm_i915_private *i915,
- struct i915_gem_context *ctx)
+static int switch_to_context(struct i915_gem_context *ctx)
{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
+ struct i915_gem_engines_iter it;
+ struct intel_context *ce;
+ int err = 0;
- for_each_engine(engine, i915, id) {
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
struct i915_request *rq;
- rq = igt_request_alloc(ctx, engine);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
i915_request_add(rq);
}
+ i915_gem_context_unlock_engines(ctx);
- return 0;
+ return err;
}
static void trash_stolen(struct drm_i915_private *i915)
@@ -41,6 +46,10 @@ static void trash_stolen(struct drm_i915_private *i915)
unsigned long page;
u32 prng = 0x12345678;
+ /* XXX: fsck. needs some more thought... */
+ if (!i915_ggtt_has_aperture(ggtt))
+ return;
+
for (page = 0; page < size; page += PAGE_SIZE) {
const dma_addr_t dma = i915->dsm.start + page;
u32 __iomem *s;
@@ -115,8 +124,9 @@ static void pm_resume(struct drm_i915_private *i915)
* that runtime-pm just works.
*/
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
- intel_gt_sanitize(i915, false);
- i915_gem_sanitize(i915);
+ i915_gem_restore_gtt_mappings(i915);
+ i915_gem_restore_fences(&i915->ggtt);
+
i915_gem_resume(i915);
}
}
@@ -125,7 +135,7 @@ static int igt_gem_suspend(void *arg)
{
struct drm_i915_private *i915 = arg;
struct i915_gem_context *ctx;
- struct drm_file *file;
+ struct file *file;
int err;
file = mock_file(i915);
@@ -133,11 +143,9 @@ static int igt_gem_suspend(void *arg)
return PTR_ERR(file);
err = -ENOMEM;
- mutex_lock(&i915->drm.struct_mutex);
ctx = live_context(i915, file);
if (!IS_ERR(ctx))
- err = switch_to_context(i915, ctx);
- mutex_unlock(&i915->drm.struct_mutex);
+ err = switch_to_context(ctx);
if (err)
goto out;
@@ -152,13 +160,9 @@ static int igt_gem_suspend(void *arg)
pm_resume(i915);
- mutex_lock(&i915->drm.struct_mutex);
- err = switch_to_context(i915, ctx);
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
- err = -EIO;
- mutex_unlock(&i915->drm.struct_mutex);
+ err = switch_to_context(ctx);
out:
- mock_file_free(i915, file);
+ fput(file);
return err;
}
@@ -166,7 +170,7 @@ static int igt_gem_hibernate(void *arg)
{
struct drm_i915_private *i915 = arg;
struct i915_gem_context *ctx;
- struct drm_file *file;
+ struct file *file;
int err;
file = mock_file(i915);
@@ -174,11 +178,9 @@ static int igt_gem_hibernate(void *arg)
return PTR_ERR(file);
err = -ENOMEM;
- mutex_lock(&i915->drm.struct_mutex);
ctx = live_context(i915, file);
if (!IS_ERR(ctx))
- err = switch_to_context(i915, ctx);
- mutex_unlock(&i915->drm.struct_mutex);
+ err = switch_to_context(ctx);
if (err)
goto out;
@@ -193,13 +195,9 @@ static int igt_gem_hibernate(void *arg)
pm_resume(i915);
- mutex_lock(&i915->drm.struct_mutex);
- err = switch_to_context(i915, ctx);
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
- err = -EIO;
- mutex_unlock(&i915->drm.struct_mutex);
+ err = switch_to_context(ctx);
out:
- mock_file_free(i915, file);
+ fput(file);
return err;
}
@@ -210,8 +208,8 @@ int i915_gem_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_gem_hibernate),
};
- if (i915_terminally_wedged(i915))
+ if (intel_gt_is_wedged(&i915->gt))
return 0;
- return i915_subtests(tests, i915);
+ return i915_live_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index a3cb0aade6f1..06ef88510209 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -25,6 +25,7 @@
#include "gem/i915_gem_pm.h"
#include "gem/selftests/igt_gem_utils.h"
#include "gem/selftests/mock_context.h"
+#include "gt/intel_gt.h"
#include "i915_selftest.h"
@@ -42,31 +43,34 @@ static void quirk_add(struct drm_i915_gem_object *obj,
list_add(&obj->st_link, objects);
}
-static int populate_ggtt(struct drm_i915_private *i915,
- struct list_head *objects)
+static int populate_ggtt(struct i915_ggtt *ggtt, struct list_head *objects)
{
unsigned long unbound, bound, count;
struct drm_i915_gem_object *obj;
- u64 size;
count = 0;
- for (size = 0;
- size + I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
- size += I915_GTT_PAGE_SIZE) {
+ do {
struct i915_vma *vma;
- obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
+ obj = i915_gem_object_create_internal(ggtt->vm.i915,
+ I915_GTT_PAGE_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
- quirk_add(obj, objects);
-
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
- if (IS_ERR(vma))
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ if (vma == ERR_PTR(-ENOSPC))
+ break;
+
return PTR_ERR(vma);
+ }
+ quirk_add(obj, objects);
count++;
- }
+ } while (1);
+ pr_debug("Filled GGTT with %lu pages [%llu total]\n",
+ count, ggtt->vm.total / PAGE_SIZE);
bound = 0;
unbound = 0;
@@ -92,7 +96,7 @@ static int populate_ggtt(struct drm_i915_private *i915,
return -EINVAL;
}
- if (list_empty(&i915->ggtt.vm.bound_list)) {
+ if (list_empty(&ggtt->vm.bound_list)) {
pr_err("No objects on the GGTT inactive list!\n");
return -EINVAL;
}
@@ -100,20 +104,16 @@ static int populate_ggtt(struct drm_i915_private *i915,
return 0;
}
-static void unpin_ggtt(struct drm_i915_private *i915)
+static void unpin_ggtt(struct i915_ggtt *ggtt)
{
- struct i915_ggtt *ggtt = &i915->ggtt;
struct i915_vma *vma;
- mutex_lock(&ggtt->vm.mutex);
- list_for_each_entry(vma, &i915->ggtt.vm.bound_list, vm_link)
+ list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
if (vma->obj->mm.quirked)
i915_vma_unpin(vma);
- mutex_unlock(&ggtt->vm.mutex);
}
-static void cleanup_objects(struct drm_i915_private *i915,
- struct list_head *list)
+static void cleanup_objects(struct i915_ggtt *ggtt, struct list_head *list)
{
struct drm_i915_gem_object *obj, *on;
@@ -123,44 +123,44 @@ static void cleanup_objects(struct drm_i915_private *i915,
i915_gem_object_put(obj);
}
- mutex_unlock(&i915->drm.struct_mutex);
-
- i915_gem_drain_freed_objects(i915);
-
- mutex_lock(&i915->drm.struct_mutex);
+ i915_gem_drain_freed_objects(ggtt->vm.i915);
}
static int igt_evict_something(void *arg)
{
- struct drm_i915_private *i915 = arg;
- struct i915_ggtt *ggtt = &i915->ggtt;
+ struct intel_gt *gt = arg;
+ struct i915_ggtt *ggtt = gt->ggtt;
LIST_HEAD(objects);
int err;
/* Fill the GGTT with pinned objects and try to evict one. */
- err = populate_ggtt(i915, &objects);
+ err = populate_ggtt(ggtt, &objects);
if (err)
goto cleanup;
/* Everything is pinned, nothing should happen */
+ mutex_lock(&ggtt->vm.mutex);
err = i915_gem_evict_something(&ggtt->vm,
I915_GTT_PAGE_SIZE, 0, 0,
0, U64_MAX,
0);
+ mutex_unlock(&ggtt->vm.mutex);
if (err != -ENOSPC) {
pr_err("i915_gem_evict_something failed on a full GGTT with err=%d\n",
err);
goto cleanup;
}
- unpin_ggtt(i915);
+ unpin_ggtt(ggtt);
/* Everything is unpinned, we should be able to evict something */
+ mutex_lock(&ggtt->vm.mutex);
err = i915_gem_evict_something(&ggtt->vm,
I915_GTT_PAGE_SIZE, 0, 0,
0, U64_MAX,
0);
+ mutex_unlock(&ggtt->vm.mutex);
if (err) {
pr_err("i915_gem_evict_something failed on a full GGTT with err=%d\n",
err);
@@ -168,13 +168,14 @@ static int igt_evict_something(void *arg)
}
cleanup:
- cleanup_objects(i915, &objects);
+ cleanup_objects(ggtt, &objects);
return err;
}
static int igt_overcommit(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
+ struct i915_ggtt *ggtt = gt->ggtt;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
LIST_HEAD(objects);
@@ -184,11 +185,11 @@ static int igt_overcommit(void *arg)
* We expect it to fail.
*/
- err = populate_ggtt(i915, &objects);
+ err = populate_ggtt(ggtt, &objects);
if (err)
goto cleanup;
- obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
+ obj = i915_gem_object_create_internal(gt->i915, I915_GTT_PAGE_SIZE);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto cleanup;
@@ -197,21 +198,21 @@ static int igt_overcommit(void *arg)
quirk_add(obj, &objects);
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
- if (!IS_ERR(vma) || PTR_ERR(vma) != -ENOSPC) {
- pr_err("Failed to evict+insert, i915_gem_object_ggtt_pin returned err=%d\n", (int)PTR_ERR(vma));
+ if (vma != ERR_PTR(-ENOSPC)) {
+ pr_err("Failed to evict+insert, i915_gem_object_ggtt_pin returned err=%d\n", (int)PTR_ERR_OR_ZERO(vma));
err = -EINVAL;
goto cleanup;
}
cleanup:
- cleanup_objects(i915, &objects);
+ cleanup_objects(ggtt, &objects);
return err;
}
static int igt_evict_for_vma(void *arg)
{
- struct drm_i915_private *i915 = arg;
- struct i915_ggtt *ggtt = &i915->ggtt;
+ struct intel_gt *gt = arg;
+ struct i915_ggtt *ggtt = gt->ggtt;
struct drm_mm_node target = {
.start = 0,
.size = 4096,
@@ -221,22 +222,26 @@ static int igt_evict_for_vma(void *arg)
/* Fill the GGTT with pinned objects and try to evict a range. */
- err = populate_ggtt(i915, &objects);
+ err = populate_ggtt(ggtt, &objects);
if (err)
goto cleanup;
/* Everything is pinned, nothing should happen */
+ mutex_lock(&ggtt->vm.mutex);
err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
+ mutex_unlock(&ggtt->vm.mutex);
if (err != -ENOSPC) {
pr_err("i915_gem_evict_for_node on a full GGTT returned err=%d\n",
err);
goto cleanup;
}
- unpin_ggtt(i915);
+ unpin_ggtt(ggtt);
/* Everything is unpinned, we should be able to evict the node */
+ mutex_lock(&ggtt->vm.mutex);
err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
+ mutex_unlock(&ggtt->vm.mutex);
if (err) {
pr_err("i915_gem_evict_for_node returned err=%d\n",
err);
@@ -244,7 +249,7 @@ static int igt_evict_for_vma(void *arg)
}
cleanup:
- cleanup_objects(i915, &objects);
+ cleanup_objects(ggtt, &objects);
return err;
}
@@ -257,8 +262,8 @@ static void mock_color_adjust(const struct drm_mm_node *node,
static int igt_evict_for_cache_color(void *arg)
{
- struct drm_i915_private *i915 = arg;
- struct i915_ggtt *ggtt = &i915->ggtt;
+ struct intel_gt *gt = arg;
+ struct i915_ggtt *ggtt = gt->ggtt;
const unsigned long flags = PIN_OFFSET_FIXED;
struct drm_mm_node target = {
.start = I915_GTT_PAGE_SIZE * 2,
@@ -270,14 +275,16 @@ static int igt_evict_for_cache_color(void *arg)
LIST_HEAD(objects);
int err;
- /* Currently the use of color_adjust is limited to cache domains within
- * the ggtt, and so the presence of mm.color_adjust is assumed to be
- * i915_gtt_color_adjust throughout our driver, so using a mock color
- * adjust will work just fine for our purposes.
+ /*
+ * Currently the use of color_adjust for the GGTT is limited to cache
+ * coloring and guard pages, and so the presence of mm.color_adjust for
+ * the GGTT is assumed to be i915_ggtt_color_adjust, hence using a mock
+ * color adjust will work just fine for our purposes.
*/
ggtt->vm.mm.color_adjust = mock_color_adjust;
+ GEM_BUG_ON(!i915_vm_has_cache_coloring(&ggtt->vm));
- obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
+ obj = i915_gem_object_create_internal(gt->i915, I915_GTT_PAGE_SIZE);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto cleanup;
@@ -293,7 +300,7 @@ static int igt_evict_for_cache_color(void *arg)
goto cleanup;
}
- obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
+ obj = i915_gem_object_create_internal(gt->i915, I915_GTT_PAGE_SIZE);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto cleanup;
@@ -313,7 +320,9 @@ static int igt_evict_for_cache_color(void *arg)
i915_vma_unpin(vma);
/* Remove just the second vma */
+ mutex_lock(&ggtt->vm.mutex);
err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
+ mutex_unlock(&ggtt->vm.mutex);
if (err) {
pr_err("[0]i915_gem_evict_for_node returned err=%d\n", err);
goto cleanup;
@@ -324,7 +333,9 @@ static int igt_evict_for_cache_color(void *arg)
*/
target.color = I915_CACHE_L3_LLC;
+ mutex_lock(&ggtt->vm.mutex);
err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
+ mutex_unlock(&ggtt->vm.mutex);
if (!err) {
pr_err("[1]i915_gem_evict_for_node returned err=%d\n", err);
err = -EINVAL;
@@ -334,36 +345,40 @@ static int igt_evict_for_cache_color(void *arg)
err = 0;
cleanup:
- unpin_ggtt(i915);
- cleanup_objects(i915, &objects);
+ unpin_ggtt(ggtt);
+ cleanup_objects(ggtt, &objects);
ggtt->vm.mm.color_adjust = NULL;
return err;
}
static int igt_evict_vm(void *arg)
{
- struct drm_i915_private *i915 = arg;
- struct i915_ggtt *ggtt = &i915->ggtt;
+ struct intel_gt *gt = arg;
+ struct i915_ggtt *ggtt = gt->ggtt;
LIST_HEAD(objects);
int err;
/* Fill the GGTT with pinned objects and try to evict everything. */
- err = populate_ggtt(i915, &objects);
+ err = populate_ggtt(ggtt, &objects);
if (err)
goto cleanup;
/* Everything is pinned, nothing should happen */
+ mutex_lock(&ggtt->vm.mutex);
err = i915_gem_evict_vm(&ggtt->vm);
+ mutex_unlock(&ggtt->vm.mutex);
if (err) {
pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
err);
goto cleanup;
}
- unpin_ggtt(i915);
+ unpin_ggtt(ggtt);
+ mutex_lock(&ggtt->vm.mutex);
err = i915_gem_evict_vm(&ggtt->vm);
+ mutex_unlock(&ggtt->vm.mutex);
if (err) {
pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
err);
@@ -371,14 +386,16 @@ static int igt_evict_vm(void *arg)
}
cleanup:
- cleanup_objects(i915, &objects);
+ cleanup_objects(ggtt, &objects);
return err;
}
static int igt_evict_contexts(void *arg)
{
const u64 PRETEND_GGTT_SIZE = 16ull << 20;
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
+ struct i915_ggtt *ggtt = gt->ggtt;
+ struct drm_i915_private *i915 = gt->i915;
struct intel_engine_cs *engine;
enum intel_engine_id id;
struct reserved {
@@ -404,14 +421,14 @@ static int igt_evict_contexts(void *arg)
if (!HAS_FULL_PPGTT(i915))
return 0;
- mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
/* Reserve a block so that we know we have enough to fit a few rq */
memset(&hole, 0, sizeof(hole));
- err = i915_gem_gtt_insert(&i915->ggtt.vm, &hole,
+ mutex_lock(&ggtt->vm.mutex);
+ err = i915_gem_gtt_insert(&ggtt->vm, &hole,
PRETEND_GGTT_SIZE, 0, I915_COLOR_UNEVICTABLE,
- 0, i915->ggtt.vm.total,
+ 0, ggtt->vm.total,
PIN_NOEVICT);
if (err)
goto out_locked;
@@ -421,15 +438,17 @@ static int igt_evict_contexts(void *arg)
do {
struct reserved *r;
+ mutex_unlock(&ggtt->vm.mutex);
r = kcalloc(1, sizeof(*r), GFP_KERNEL);
+ mutex_lock(&ggtt->vm.mutex);
if (!r) {
err = -ENOMEM;
goto out_locked;
}
- if (i915_gem_gtt_insert(&i915->ggtt.vm, &r->node,
+ if (i915_gem_gtt_insert(&ggtt->vm, &r->node,
1ul << 20, 0, I915_COLOR_UNEVICTABLE,
- 0, i915->ggtt.vm.total,
+ 0, ggtt->vm.total,
PIN_NOEVICT)) {
kfree(r);
break;
@@ -441,13 +460,13 @@ static int igt_evict_contexts(void *arg)
count++;
} while (1);
drm_mm_remove_node(&hole);
- mutex_unlock(&i915->drm.struct_mutex);
+ mutex_unlock(&ggtt->vm.mutex);
pr_info("Filled GGTT with %lu 1MiB nodes\n", count);
/* Overfill the GGTT with context objects and so try to evict one. */
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
struct i915_sw_fence fence;
- struct drm_file *file;
+ struct file *file;
file = mock_file(i915);
if (IS_ERR(file)) {
@@ -456,7 +475,6 @@ static int igt_evict_contexts(void *arg)
}
count = 0;
- mutex_lock(&i915->drm.struct_mutex);
onstack_fence_init(&fence);
do {
struct i915_request *rq;
@@ -474,8 +492,8 @@ static int igt_evict_contexts(void *arg)
if (IS_ERR(rq)) {
/* When full, fail_if_busy will trigger EBUSY */
if (PTR_ERR(rq) != -EBUSY) {
- pr_err("Unexpected error from request alloc (ctx hw id %u, on %s): %d\n",
- ctx->hw_id, engine->name,
+ pr_err("Unexpected error from request alloc (on %s): %d\n",
+ engine->name,
(int)PTR_ERR(rq));
err = PTR_ERR(rq);
}
@@ -493,20 +511,18 @@ static int igt_evict_contexts(void *arg)
count++;
err = 0;
} while(1);
- mutex_unlock(&i915->drm.struct_mutex);
-
onstack_fence_fini(&fence);
pr_info("Submitted %lu contexts/requests on %s\n",
count, engine->name);
- mock_file_free(i915, file);
+ fput(file);
if (err)
break;
}
- mutex_lock(&i915->drm.struct_mutex);
+ mutex_lock(&ggtt->vm.mutex);
out_locked:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(i915))
err = -EIO;
while (reserved) {
struct reserved *next = reserved->next;
@@ -518,8 +534,8 @@ out_locked:
}
if (drm_mm_node_allocated(&hole))
drm_mm_remove_node(&hole);
+ mutex_unlock(&ggtt->vm.mutex);
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -541,11 +557,8 @@ int i915_gem_evict_mock_selftests(void)
if (!i915)
return -ENOMEM;
- mutex_lock(&i915->drm.struct_mutex);
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- err = i915_subtests(tests, i915);
-
- mutex_unlock(&i915->drm.struct_mutex);
+ err = i915_subtests(tests, &i915->gt);
drm_dev_put(&i915->drm);
return err;
@@ -557,8 +570,8 @@ int i915_gem_evict_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_evict_contexts),
};
- if (i915_terminally_wedged(i915))
+ if (intel_gt_is_wedged(&i915->gt))
return 0;
- return i915_subtests(tests, i915);
+ return intel_gt_live_subtests(tests, &i915->gt);
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 1a60b9fe8221..b342bef5e7c9 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -25,26 +25,21 @@
#include <linux/list_sort.h>
#include <linux/prime_numbers.h>
+#include "gem/i915_gem_context.h"
#include "gem/selftests/mock_context.h"
+#include "gt/intel_context.h"
#include "i915_random.h"
#include "i915_selftest.h"
#include "mock_drm.h"
#include "mock_gem_device.h"
+#include "mock_gtt.h"
+#include "igt_flush_test.h"
static void cleanup_freed_objects(struct drm_i915_private *i915)
{
- /*
- * As we may hold onto the struct_mutex for inordinate lengths of
- * time, the NMI khungtaskd detector may fire for the free objects
- * worker.
- */
- mutex_unlock(&i915->drm.struct_mutex);
-
i915_gem_drain_freed_objects(i915);
-
- mutex_lock(&i915->drm.struct_mutex);
}
static void fake_free_pages(struct drm_i915_gem_object *obj,
@@ -88,8 +83,6 @@ static int fake_get_pages(struct drm_i915_gem_object *obj)
}
GEM_BUG_ON(rem);
- obj->mm.madv = I915_MADV_DONTNEED;
-
__i915_gem_object_set_pages(obj, pages, sg_page_sizes);
return 0;
@@ -101,7 +94,6 @@ static void fake_put_pages(struct drm_i915_gem_object *obj,
{
fake_free_pages(obj, pages);
obj->mm.dirty = false;
- obj->mm.madv = I915_MADV_WILLNEED;
}
static const struct drm_i915_gem_object_ops fake_ops = {
@@ -113,6 +105,7 @@ static const struct drm_i915_gem_object_ops fake_ops = {
static struct drm_i915_gem_object *
fake_dma_object(struct drm_i915_private *i915, u64 size)
{
+ static struct lock_class_key lock_class;
struct drm_i915_gem_object *obj;
GEM_BUG_ON(!size);
@@ -126,7 +119,9 @@ fake_dma_object(struct drm_i915_private *i915, u64 size)
goto err;
drm_gem_private_object_init(&i915->drm, &obj->base, size);
- i915_gem_object_init(obj, &fake_ops);
+ i915_gem_object_init(obj, &fake_ops, &lock_class);
+
+ i915_gem_object_set_volatile(obj);
obj->write_domain = I915_GEM_DOMAIN_CPU;
obj->read_domains = I915_GEM_DOMAIN_CPU;
@@ -157,7 +152,7 @@ static int igt_ppgtt_alloc(void *arg)
if (!HAS_PPGTT(dev_priv))
return 0;
- ppgtt = __ppgtt_create(dev_priv);
+ ppgtt = i915_ppgtt_create(&dev_priv->gt);
if (IS_ERR(ppgtt))
return PTR_ERR(ppgtt);
@@ -208,22 +203,21 @@ static int igt_ppgtt_alloc(void *arg)
}
err_ppgtt_cleanup:
- mutex_lock(&dev_priv->drm.struct_mutex);
i915_vm_put(&ppgtt->vm);
- mutex_unlock(&dev_priv->drm.struct_mutex);
return err;
}
-static int lowlevel_hole(struct drm_i915_private *i915,
- struct i915_address_space *vm,
+static int lowlevel_hole(struct i915_address_space *vm,
u64 hole_start, u64 hole_end,
unsigned long end_time)
{
I915_RND_STATE(seed_prng);
+ struct i915_vma *mock_vma;
unsigned int size;
- struct i915_vma mock_vma;
- memset(&mock_vma, 0, sizeof(struct i915_vma));
+ mock_vma = kzalloc(sizeof(*mock_vma), GFP_KERNEL);
+ if (!mock_vma)
+ return -ENOMEM;
/* Keep creating larger objects until one cannot fit into the hole */
for (size = 12; (hole_end - hole_start) >> size; size++) {
@@ -247,8 +241,10 @@ static int lowlevel_hole(struct drm_i915_private *i915,
if (order)
break;
} while (count >>= 1);
- if (!count)
+ if (!count) {
+ kfree(mock_vma);
return -ENOMEM;
+ }
GEM_BUG_ON(!order);
GEM_BUG_ON(count * BIT_ULL(size) > vm->total);
@@ -260,7 +256,7 @@ static int lowlevel_hole(struct drm_i915_private *i915,
* memory. We expect to hit -ENOMEM.
*/
- obj = fake_dma_object(i915, BIT_ULL(size));
+ obj = fake_dma_object(vm->i915, BIT_ULL(size));
if (IS_ERR(obj)) {
kfree(order);
break;
@@ -291,22 +287,24 @@ static int lowlevel_hole(struct drm_i915_private *i915,
vm->allocate_va_range(vm, addr, BIT_ULL(size)))
break;
- mock_vma.pages = obj->mm.pages;
- mock_vma.node.size = BIT_ULL(size);
- mock_vma.node.start = addr;
+ mock_vma->pages = obj->mm.pages;
+ mock_vma->node.size = BIT_ULL(size);
+ mock_vma->node.start = addr;
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- vm->insert_entries(vm, &mock_vma, I915_CACHE_NONE, 0);
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref)
+ vm->insert_entries(vm, mock_vma,
+ I915_CACHE_NONE, 0);
}
count = n;
i915_random_reorder(order, count, &prng);
for (n = 0; n < count; n++) {
u64 addr = hole_start + order[n] * BIT_ULL(size);
+ intel_wakeref_t wakeref;
GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
- vm->clear_range(vm, addr, BIT_ULL(size));
+ with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref)
+ vm->clear_range(vm, addr, BIT_ULL(size));
}
i915_gem_object_unpin_pages(obj);
@@ -314,9 +312,10 @@ static int lowlevel_hole(struct drm_i915_private *i915,
kfree(order);
- cleanup_freed_objects(i915);
+ cleanup_freed_objects(vm->i915);
}
+ kfree(mock_vma);
return 0;
}
@@ -341,8 +340,7 @@ static void close_object_list(struct list_head *objects,
}
}
-static int fill_hole(struct drm_i915_private *i915,
- struct i915_address_space *vm,
+static int fill_hole(struct i915_address_space *vm,
u64 hole_start, u64 hole_end,
unsigned long end_time)
{
@@ -375,7 +373,7 @@ static int fill_hole(struct drm_i915_private *i915,
{ }
}, *p;
- obj = fake_dma_object(i915, full_size);
+ obj = fake_dma_object(vm->i915, full_size);
if (IS_ERR(obj))
break;
@@ -543,7 +541,7 @@ static int fill_hole(struct drm_i915_private *i915,
}
close_object_list(&objects, vm);
- cleanup_freed_objects(i915);
+ cleanup_freed_objects(vm->i915);
}
return 0;
@@ -553,8 +551,7 @@ err:
return err;
}
-static int walk_hole(struct drm_i915_private *i915,
- struct i915_address_space *vm,
+static int walk_hole(struct i915_address_space *vm,
u64 hole_start, u64 hole_end,
unsigned long end_time)
{
@@ -576,7 +573,7 @@ static int walk_hole(struct drm_i915_private *i915,
u64 addr;
int err = 0;
- obj = fake_dma_object(i915, size << PAGE_SHIFT);
+ obj = fake_dma_object(vm->i915, size << PAGE_SHIFT);
if (IS_ERR(obj))
break;
@@ -631,14 +628,13 @@ err_put:
if (err)
return err;
- cleanup_freed_objects(i915);
+ cleanup_freed_objects(vm->i915);
}
return 0;
}
-static int pot_hole(struct drm_i915_private *i915,
- struct i915_address_space *vm,
+static int pot_hole(struct i915_address_space *vm,
u64 hole_start, u64 hole_end,
unsigned long end_time)
{
@@ -652,7 +648,7 @@ static int pot_hole(struct drm_i915_private *i915,
if (i915_is_ggtt(vm))
flags |= PIN_GLOBAL;
- obj = i915_gem_object_create_internal(i915, 2 * I915_GTT_PAGE_SIZE);
+ obj = i915_gem_object_create_internal(vm->i915, 2 * I915_GTT_PAGE_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -713,8 +709,7 @@ err_obj:
return err;
}
-static int drunk_hole(struct drm_i915_private *i915,
- struct i915_address_space *vm,
+static int drunk_hole(struct i915_address_space *vm,
u64 hole_start, u64 hole_end,
unsigned long end_time)
{
@@ -759,7 +754,7 @@ static int drunk_hole(struct drm_i915_private *i915,
* memory. We expect to hit -ENOMEM.
*/
- obj = fake_dma_object(i915, BIT_ULL(size));
+ obj = fake_dma_object(vm->i915, BIT_ULL(size));
if (IS_ERR(obj)) {
kfree(order);
break;
@@ -817,14 +812,13 @@ err_obj:
if (err)
return err;
- cleanup_freed_objects(i915);
+ cleanup_freed_objects(vm->i915);
}
return 0;
}
-static int __shrink_hole(struct drm_i915_private *i915,
- struct i915_address_space *vm,
+static int __shrink_hole(struct i915_address_space *vm,
u64 hole_start, u64 hole_end,
unsigned long end_time)
{
@@ -841,7 +835,7 @@ static int __shrink_hole(struct drm_i915_private *i915,
u64 size = BIT_ULL(order++);
size = min(size, hole_end - addr);
- obj = fake_dma_object(i915, size);
+ obj = fake_dma_object(vm->i915, size);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
break;
@@ -877,6 +871,15 @@ static int __shrink_hole(struct drm_i915_private *i915,
i915_vma_unpin(vma);
addr += size;
+ /*
+ * Since we are injecting allocation faults at random intervals,
+ * wait for this allocation to complete before we change the
+ * faultinjection.
+ */
+ err = i915_vma_sync(vma);
+ if (err)
+ break;
+
if (igt_timeout(end_time,
"%s timed out at ofset %llx [%llx - %llx]\n",
__func__, addr, hole_start, hole_end)) {
@@ -886,12 +889,11 @@ static int __shrink_hole(struct drm_i915_private *i915,
}
close_object_list(&objects, vm);
- cleanup_freed_objects(i915);
+ cleanup_freed_objects(vm->i915);
return err;
}
-static int shrink_hole(struct drm_i915_private *i915,
- struct i915_address_space *vm,
+static int shrink_hole(struct i915_address_space *vm,
u64 hole_start, u64 hole_end,
unsigned long end_time)
{
@@ -903,7 +905,7 @@ static int shrink_hole(struct drm_i915_private *i915,
for_each_prime_number_from(prime, 0, ULONG_MAX - 1) {
vm->fault_attr.interval = prime;
- err = __shrink_hole(i915, vm, hole_start, hole_end, end_time);
+ err = __shrink_hole(vm, hole_start, hole_end, end_time);
if (err)
break;
}
@@ -913,8 +915,7 @@ static int shrink_hole(struct drm_i915_private *i915,
return err;
}
-static int shrink_boom(struct drm_i915_private *i915,
- struct i915_address_space *vm,
+static int shrink_boom(struct i915_address_space *vm,
u64 hole_start, u64 hole_end,
unsigned long end_time)
{
@@ -936,7 +937,7 @@ static int shrink_boom(struct drm_i915_private *i915,
unsigned int size = sizes[i];
struct i915_vma *vma;
- purge = fake_dma_object(i915, size);
+ purge = fake_dma_object(vm->i915, size);
if (IS_ERR(purge))
return PTR_ERR(purge);
@@ -953,7 +954,7 @@ static int shrink_boom(struct drm_i915_private *i915,
/* Should now be ripe for purging */
i915_vma_unpin(vma);
- explode = fake_dma_object(i915, size);
+ explode = fake_dma_object(vm->i915, size);
if (IS_ERR(explode)) {
err = PTR_ERR(explode);
goto err_purge;
@@ -979,7 +980,7 @@ static int shrink_boom(struct drm_i915_private *i915,
i915_gem_object_put(explode);
memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
- cleanup_freed_objects(i915);
+ cleanup_freed_objects(vm->i915);
}
return 0;
@@ -993,14 +994,13 @@ err_purge:
}
static int exercise_ppgtt(struct drm_i915_private *dev_priv,
- int (*func)(struct drm_i915_private *i915,
- struct i915_address_space *vm,
+ int (*func)(struct i915_address_space *vm,
u64 hole_start, u64 hole_end,
unsigned long end_time))
{
- struct drm_file *file;
struct i915_ppgtt *ppgtt;
IGT_TIMEOUT(end_time);
+ struct file *file;
int err;
if (!HAS_FULL_PPGTT(dev_priv))
@@ -1010,22 +1010,20 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv,
if (IS_ERR(file))
return PTR_ERR(file);
- mutex_lock(&dev_priv->drm.struct_mutex);
- ppgtt = i915_ppgtt_create(dev_priv);
+ ppgtt = i915_ppgtt_create(&dev_priv->gt);
if (IS_ERR(ppgtt)) {
err = PTR_ERR(ppgtt);
- goto out_unlock;
+ goto out_free;
}
GEM_BUG_ON(offset_in_page(ppgtt->vm.total));
- GEM_BUG_ON(ppgtt->vm.closed);
+ GEM_BUG_ON(!atomic_read(&ppgtt->vm.open));
- err = func(dev_priv, &ppgtt->vm, 0, ppgtt->vm.total, end_time);
+ err = func(&ppgtt->vm, 0, ppgtt->vm.total, end_time);
i915_vm_put(&ppgtt->vm);
-out_unlock:
- mutex_unlock(&dev_priv->drm.struct_mutex);
- mock_file_free(dev_priv, file);
+out_free:
+ fput(file);
return err;
}
@@ -1076,8 +1074,7 @@ static int sort_holes(void *priv, struct list_head *A, struct list_head *B)
}
static int exercise_ggtt(struct drm_i915_private *i915,
- int (*func)(struct drm_i915_private *i915,
- struct i915_address_space *vm,
+ int (*func)(struct i915_address_space *vm,
u64 hole_start, u64 hole_end,
unsigned long end_time))
{
@@ -1087,7 +1084,6 @@ static int exercise_ggtt(struct drm_i915_private *i915,
IGT_TIMEOUT(end_time);
int err = 0;
- mutex_lock(&i915->drm.struct_mutex);
restart:
list_sort(NULL, &ggtt->vm.mm.hole_stack, sort_holes);
drm_mm_for_each_hole(node, &ggtt->vm.mm, hole_start, hole_end) {
@@ -1100,7 +1096,7 @@ restart:
if (hole_start >= hole_end)
continue;
- err = func(i915, &ggtt->vm, hole_start, hole_end, end_time);
+ err = func(&ggtt->vm, hole_start, hole_end, end_time);
if (err)
break;
@@ -1108,7 +1104,6 @@ restart:
last = hole_end;
goto restart;
}
- mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -1150,24 +1145,25 @@ static int igt_ggtt_page(void *arg)
unsigned int *order, n;
int err;
- mutex_lock(&i915->drm.struct_mutex);
+ if (!i915_ggtt_has_aperture(ggtt))
+ return 0;
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
- if (IS_ERR(obj)) {
- err = PTR_ERR(obj);
- goto out_unlock;
- }
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
err = i915_gem_object_pin_pages(obj);
if (err)
goto out_free;
memset(&tmp, 0, sizeof(tmp));
+ mutex_lock(&ggtt->vm.mutex);
err = drm_mm_insert_node_in_range(&ggtt->vm.mm, &tmp,
count * PAGE_SIZE, 0,
I915_COLOR_UNEVICTABLE,
0, ggtt->mappable_end,
DRM_MM_INSERT_LOW);
+ mutex_unlock(&ggtt->vm.mutex);
if (err)
goto out_unpin;
@@ -1195,7 +1191,7 @@ static int igt_ggtt_page(void *arg)
iowrite32(n, vaddr + n);
io_mapping_unmap_atomic(vaddr);
}
- i915_gem_flush_ggtt_writes(i915);
+ intel_gt_flush_ggtt_writes(ggtt->vm.gt);
i915_random_reorder(order, count, &prng);
for (n = 0; n < count; n++) {
@@ -1219,13 +1215,13 @@ static int igt_ggtt_page(void *arg)
out_remove:
ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size);
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ mutex_lock(&ggtt->vm.mutex);
drm_mm_remove_node(&tmp);
+ mutex_unlock(&ggtt->vm.mutex);
out_unpin:
i915_gem_object_unpin_pages(obj);
out_free:
i915_gem_object_put(obj);
-out_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -1236,20 +1232,23 @@ static void track_vma_bind(struct i915_vma *vma)
atomic_inc(&obj->bind_count); /* track for eviction later */
__i915_gem_object_pin_pages(obj);
+ GEM_BUG_ON(vma->pages);
+ atomic_set(&vma->pages_count, I915_VMA_PAGES_ACTIVE);
+ __i915_gem_object_pin_pages(obj);
vma->pages = obj->mm.pages;
mutex_lock(&vma->vm->mutex);
- list_move_tail(&vma->vm_link, &vma->vm->bound_list);
+ list_add_tail(&vma->vm_link, &vma->vm->bound_list);
mutex_unlock(&vma->vm->mutex);
}
static int exercise_mock(struct drm_i915_private *i915,
- int (*func)(struct drm_i915_private *i915,
- struct i915_address_space *vm,
+ int (*func)(struct i915_address_space *vm,
u64 hole_start, u64 hole_end,
unsigned long end_time))
{
const u64 limit = totalram_pages() << PAGE_SHIFT;
+ struct i915_address_space *vm;
struct i915_gem_context *ctx;
IGT_TIMEOUT(end_time);
int err;
@@ -1258,7 +1257,9 @@ static int exercise_mock(struct drm_i915_private *i915,
if (!ctx)
return -ENOMEM;
- err = func(i915, ctx->vm, 0, min(ctx->vm->total, limit), end_time);
+ vm = i915_gem_context_get_vm_rcu(ctx);
+ err = func(vm, 0, min(vm->total, limit), end_time);
+ i915_vm_put(vm);
mock_context_close(ctx);
return err;
@@ -1296,6 +1297,7 @@ static int igt_gtt_reserve(void *arg)
{
struct i915_ggtt *ggtt = arg;
struct drm_i915_gem_object *obj, *on;
+ I915_RND_STATE(prng);
LIST_HEAD(objects);
u64 total;
int err = -ENODEV;
@@ -1332,11 +1334,13 @@ static int igt_gtt_reserve(void *arg)
goto out;
}
+ mutex_lock(&ggtt->vm.mutex);
err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
obj->base.size,
total,
obj->cache_level,
0);
+ mutex_unlock(&ggtt->vm.mutex);
if (err) {
pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
total, ggtt->vm.total, err);
@@ -1382,11 +1386,13 @@ static int igt_gtt_reserve(void *arg)
goto out;
}
+ mutex_lock(&ggtt->vm.mutex);
err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
obj->base.size,
total,
obj->cache_level,
0);
+ mutex_unlock(&ggtt->vm.mutex);
if (err) {
pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
total, ggtt->vm.total, err);
@@ -1422,15 +1428,18 @@ static int igt_gtt_reserve(void *arg)
goto out;
}
- offset = random_offset(0, ggtt->vm.total,
- 2*I915_GTT_PAGE_SIZE,
- I915_GTT_MIN_ALIGNMENT);
+ offset = igt_random_offset(&prng,
+ 0, ggtt->vm.total,
+ 2 * I915_GTT_PAGE_SIZE,
+ I915_GTT_MIN_ALIGNMENT);
+ mutex_lock(&ggtt->vm.mutex);
err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
obj->base.size,
offset,
obj->cache_level,
0);
+ mutex_unlock(&ggtt->vm.mutex);
if (err) {
pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
total, ggtt->vm.total, err);
@@ -1499,11 +1508,13 @@ static int igt_gtt_insert(void *arg)
/* Check a couple of obviously invalid requests */
for (ii = invalid_insert; ii->size; ii++) {
+ mutex_lock(&ggtt->vm.mutex);
err = i915_gem_gtt_insert(&ggtt->vm, &tmp,
ii->size, ii->alignment,
I915_COLOR_UNEVICTABLE,
ii->start, ii->end,
0);
+ mutex_unlock(&ggtt->vm.mutex);
if (err != -ENOSPC) {
pr_err("Invalid i915_gem_gtt_insert(.size=%llx, .alignment=%llx, .start=%llx, .end=%llx) succeeded (err=%d)\n",
ii->size, ii->alignment, ii->start, ii->end,
@@ -1539,10 +1550,12 @@ static int igt_gtt_insert(void *arg)
goto out;
}
+ mutex_lock(&ggtt->vm.mutex);
err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
obj->base.size, 0, obj->cache_level,
0, ggtt->vm.total,
0);
+ mutex_unlock(&ggtt->vm.mutex);
if (err == -ENOSPC) {
/* maxed out the GGTT space */
i915_gem_object_put(obj);
@@ -1597,10 +1610,12 @@ static int igt_gtt_insert(void *arg)
goto out;
}
+ mutex_lock(&ggtt->vm.mutex);
err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
obj->base.size, 0, obj->cache_level,
0, ggtt->vm.total,
0);
+ mutex_unlock(&ggtt->vm.mutex);
if (err) {
pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
total, ggtt->vm.total, err);
@@ -1644,10 +1659,12 @@ static int igt_gtt_insert(void *arg)
goto out;
}
+ mutex_lock(&ggtt->vm.mutex);
err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
obj->base.size, 0, obj->cache_level,
0, ggtt->vm.total,
0);
+ mutex_unlock(&ggtt->vm.mutex);
if (err) {
pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
total, ggtt->vm.total, err);
@@ -1691,13 +1708,10 @@ int i915_gem_gtt_mock_selftests(void)
}
mock_init_ggtt(i915, ggtt);
- mutex_lock(&i915->drm.struct_mutex);
err = i915_subtests(tests, ggtt);
- mock_device_flush(i915);
- mutex_unlock(&i915->drm.struct_mutex);
+ mock_device_flush(i915);
i915_gem_drain_freed_objects(i915);
-
mock_fini_ggtt(ggtt);
kfree(ggtt);
out_put:
@@ -1705,6 +1719,312 @@ out_put:
return err;
}
+static int context_sync(struct intel_context *ce)
+{
+ struct i915_request *rq;
+ long timeout;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ timeout = i915_request_wait(rq, 0, HZ / 5);
+ i915_request_put(rq);
+
+ return timeout < 0 ? -EIO : 0;
+}
+
+static struct i915_request *
+submit_batch(struct intel_context *ce, u64 addr)
+{
+ struct i915_request *rq;
+ int err;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return rq;
+
+ err = 0;
+ if (rq->engine->emit_init_breadcrumb) /* detect a hang */
+ err = rq->engine->emit_init_breadcrumb(rq);
+ if (err == 0)
+ err = rq->engine->emit_bb_start(rq, addr, 0, 0);
+
+ if (err == 0)
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ return err ? ERR_PTR(err) : rq;
+}
+
+static u32 *spinner(u32 *batch, int i)
+{
+ return batch + i * 64 / sizeof(*batch) + 4;
+}
+
+static void end_spin(u32 *batch, int i)
+{
+ *spinner(batch, i) = MI_BATCH_BUFFER_END;
+ wmb();
+}
+
+static int igt_cs_tlb(void *arg)
+{
+ const unsigned int count = PAGE_SIZE / 64;
+ const unsigned int chunk_size = count * PAGE_SIZE;
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *bbe, *act, *out;
+ struct i915_gem_engines_iter it;
+ struct i915_address_space *vm;
+ struct i915_gem_context *ctx;
+ struct intel_context *ce;
+ struct i915_vma *vma;
+ I915_RND_STATE(prng);
+ struct file *file;
+ unsigned int i;
+ u32 *result;
+ u32 *batch;
+ int err = 0;
+
+ /*
+ * Our mission here is to fool the hardware to execute something
+ * from scratch as it has not seen the batch move (due to missing
+ * the TLB invalidate).
+ */
+
+ file = mock_file(i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ ctx = live_context(i915, file);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out_unlock;
+ }
+
+ vm = i915_gem_context_get_vm_rcu(ctx);
+ if (i915_is_ggtt(vm))
+ goto out_vm;
+
+ /* Create two pages; dummy we prefill the TLB, and intended */
+ bbe = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(bbe)) {
+ err = PTR_ERR(bbe);
+ goto out_vm;
+ }
+
+ batch = i915_gem_object_pin_map(bbe, I915_MAP_WC);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ goto out_put_bbe;
+ }
+ memset32(batch, MI_BATCH_BUFFER_END, PAGE_SIZE / sizeof(u32));
+ i915_gem_object_flush_map(bbe);
+ i915_gem_object_unpin_map(bbe);
+
+ act = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(act)) {
+ err = PTR_ERR(act);
+ goto out_put_bbe;
+ }
+
+ /* Track the execution of each request by writing into different slot */
+ batch = i915_gem_object_pin_map(act, I915_MAP_WC);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ goto out_put_act;
+ }
+ for (i = 0; i < count; i++) {
+ u32 *cs = batch + i * 64 / sizeof(*cs);
+ u64 addr = (vm->total - PAGE_SIZE) + i * sizeof(u32);
+
+ GEM_BUG_ON(INTEL_GEN(i915) < 6);
+ cs[0] = MI_STORE_DWORD_IMM_GEN4;
+ if (INTEL_GEN(i915) >= 8) {
+ cs[1] = lower_32_bits(addr);
+ cs[2] = upper_32_bits(addr);
+ cs[3] = i;
+ cs[4] = MI_NOOP;
+ cs[5] = MI_BATCH_BUFFER_START_GEN8;
+ } else {
+ cs[1] = 0;
+ cs[2] = lower_32_bits(addr);
+ cs[3] = i;
+ cs[4] = MI_NOOP;
+ cs[5] = MI_BATCH_BUFFER_START;
+ }
+ }
+
+ out = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(out)) {
+ err = PTR_ERR(out);
+ goto out_put_batch;
+ }
+ i915_gem_object_set_cache_coherency(out, I915_CACHING_CACHED);
+
+ vma = i915_vma_instance(out, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_put_batch;
+ }
+
+ err = i915_vma_pin(vma, 0, 0,
+ PIN_USER |
+ PIN_OFFSET_FIXED |
+ (vm->total - PAGE_SIZE));
+ if (err)
+ goto out_put_out;
+ GEM_BUG_ON(vma->node.start != vm->total - PAGE_SIZE);
+
+ result = i915_gem_object_pin_map(out, I915_MAP_WB);
+ if (IS_ERR(result)) {
+ err = PTR_ERR(result);
+ goto out_put_out;
+ }
+
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+ IGT_TIMEOUT(end_time);
+ unsigned long pass = 0;
+
+ if (!intel_engine_can_store_dword(ce->engine))
+ continue;
+
+ while (!__igt_timeout(end_time, NULL)) {
+ struct i915_request *rq;
+ u64 offset;
+
+ offset = igt_random_offset(&prng,
+ 0, vm->total - PAGE_SIZE,
+ chunk_size, PAGE_SIZE);
+
+ err = vm->allocate_va_range(vm, offset, chunk_size);
+ if (err)
+ goto end;
+
+ memset32(result, STACK_MAGIC, PAGE_SIZE / sizeof(u32));
+
+ vma = i915_vma_instance(bbe, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto end;
+ }
+
+ err = vma->ops->set_pages(vma);
+ if (err)
+ goto end;
+
+ /* Prime the TLB with the dummy pages */
+ for (i = 0; i < count; i++) {
+ vma->node.start = offset + i * PAGE_SIZE;
+ vm->insert_entries(vm, vma, I915_CACHE_NONE, 0);
+
+ rq = submit_batch(ce, vma->node.start);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto end;
+ }
+ i915_request_put(rq);
+ }
+
+ vma->ops->clear_pages(vma);
+
+ err = context_sync(ce);
+ if (err) {
+ pr_err("%s: dummy setup timed out\n",
+ ce->engine->name);
+ goto end;
+ }
+
+ vma = i915_vma_instance(act, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto end;
+ }
+
+ err = vma->ops->set_pages(vma);
+ if (err)
+ goto end;
+
+ /* Replace the TLB with target batches */
+ for (i = 0; i < count; i++) {
+ struct i915_request *rq;
+ u32 *cs = batch + i * 64 / sizeof(*cs);
+ u64 addr;
+
+ vma->node.start = offset + i * PAGE_SIZE;
+ vm->insert_entries(vm, vma, I915_CACHE_NONE, 0);
+
+ addr = vma->node.start + i * 64;
+ cs[4] = MI_NOOP;
+ cs[6] = lower_32_bits(addr);
+ cs[7] = upper_32_bits(addr);
+ wmb();
+
+ rq = submit_batch(ce, addr);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto end;
+ }
+
+ /* Wait until the context chain has started */
+ if (i == 0) {
+ while (READ_ONCE(result[i]) &&
+ !i915_request_completed(rq))
+ cond_resched();
+ } else {
+ end_spin(batch, i - 1);
+ }
+
+ i915_request_put(rq);
+ }
+ end_spin(batch, count - 1);
+
+ vma->ops->clear_pages(vma);
+
+ err = context_sync(ce);
+ if (err) {
+ pr_err("%s: writes timed out\n",
+ ce->engine->name);
+ goto end;
+ }
+
+ for (i = 0; i < count; i++) {
+ if (result[i] != i) {
+ pr_err("%s: Write lost on pass %lu, at offset %llx, index %d, found %x, expected %x\n",
+ ce->engine->name, pass,
+ offset, i, result[i], i);
+ err = -EINVAL;
+ goto end;
+ }
+ }
+
+ vm->clear_range(vm, offset, chunk_size);
+ pass++;
+ }
+ }
+end:
+ if (igt_flush_test(i915))
+ err = -EIO;
+ i915_gem_context_unlock_engines(ctx);
+ i915_gem_object_unpin_map(out);
+out_put_out:
+ i915_gem_object_put(out);
+out_put_batch:
+ i915_gem_object_unpin_map(act);
+out_put_act:
+ i915_gem_object_put(act);
+out_put_bbe:
+ i915_gem_object_put(bbe);
+out_vm:
+ i915_vm_put(vm);
+out_unlock:
+ fput(file);
+ return err;
+}
+
int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
@@ -1722,6 +2042,7 @@ int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_ggtt_pot),
SUBTEST(igt_ggtt_fill),
SUBTEST(igt_ggtt_page),
+ SUBTEST(igt_cs_tlb),
};
GEM_BUG_ON(offset_in_page(i915->ggtt.vm.total));
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index d5dc4427d664..34138c7bdd15 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -1,5 +1,11 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* List each unit test as selftest(name, function)
+
+#ifndef selftest
+#define selftest(x, y)
+#endif
+
+/*
+ * List each unit test as selftest(name, function)
*
* The name is used as both an enum and expanded as subtest__name to create
* a module parameter. It must be unique and legal for a C identifier.
@@ -12,7 +18,13 @@
selftest(sanitycheck, i915_live_sanitycheck) /* keep first (igt selfcheck) */
selftest(uncore, intel_uncore_live_selftests)
selftest(workarounds, intel_workarounds_live_selftests)
-selftest(timelines, i915_timeline_live_selftests)
+selftest(gt_engines, intel_engine_live_selftests)
+selftest(gt_timelines, intel_timeline_live_selftests)
+selftest(gt_contexts, intel_context_live_selftests)
+selftest(gt_lrc, intel_lrc_live_selftests)
+selftest(gt_mocs, intel_mocs_live_selftests)
+selftest(gt_pm, intel_gt_pm_live_selftests)
+selftest(gt_heartbeat, intel_heartbeat_live_selftests)
selftest(requests, i915_request_live_selftests)
selftest(active, i915_active_live_selftests)
selftest(objects, i915_gem_object_live_selftests)
@@ -24,10 +36,13 @@ selftest(gtt, i915_gem_gtt_live_selftests)
selftest(gem, i915_gem_live_selftests)
selftest(evict, i915_gem_evict_live_selftests)
selftest(hugepages, i915_gem_huge_page_live_selftests)
-selftest(contexts, i915_gem_context_live_selftests)
+selftest(gem_contexts, i915_gem_context_live_selftests)
selftest(blt, i915_gem_object_blt_live_selftests)
selftest(client, i915_gem_client_blt_live_selftests)
selftest(reset, intel_reset_live_selftests)
+selftest(memory_region, intel_memory_region_live_selftests)
selftest(hangcheck, intel_hangcheck_live_selftests)
selftest(execlists, intel_execlists_live_selftests)
-selftest(guc, intel_guc_live_selftest)
+selftest(perf, i915_perf_live_selftests)
+/* Here be dragons: keep last to run last! */
+selftest(late_gt_pm, intel_gt_pm_late_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
index 510eb176bb2c..5b39bab4da1d 100644
--- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
@@ -1,5 +1,11 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* List each unit test as selftest(name, function)
+
+#ifndef selftest
+#define selftest(x, y)
+#endif
+
+/*
+ * List each unit test as selftest(name, function)
*
* The name is used as both an enum and expanded as subtest__name to create
* a module parameter. It must be unique and legal for a C identifier.
@@ -15,7 +21,7 @@ selftest(scatterlist, scatterlist_mock_selftests)
selftest(syncmap, i915_syncmap_mock_selftests)
selftest(uncore, intel_uncore_mock_selftests)
selftest(engine, intel_engine_cs_mock_selftests)
-selftest(timelines, i915_timeline_mock_selftests)
+selftest(timelines, intel_timeline_mock_selftests)
selftest(requests, i915_request_mock_selftests)
selftest(objects, i915_gem_object_mock_selftests)
selftest(phys, i915_gem_phys_mock_selftests)
@@ -25,3 +31,5 @@ selftest(evict, i915_gem_evict_mock_selftests)
selftest(gtt, i915_gem_gtt_mock_selftests)
selftest(hugepages, i915_gem_huge_page_mock_selftests)
selftest(contexts, i915_gem_context_mock_selftests)
+selftest(buddy, i915_buddy_mock_selftests)
+selftest(memory_region, intel_memory_region_mock_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_perf.c b/drivers/gpu/drm/i915/selftests/i915_perf.c
new file mode 100644
index 000000000000..d1a1568c47ba
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_perf.c
@@ -0,0 +1,217 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/kref.h>
+
+#include "gem/i915_gem_pm.h"
+#include "gt/intel_gt.h"
+
+#include "i915_selftest.h"
+
+#include "igt_flush_test.h"
+#include "lib_sw_fence.h"
+
+static struct i915_perf_stream *
+test_stream(struct i915_perf *perf)
+{
+ struct drm_i915_perf_open_param param = {};
+ struct perf_open_properties props = {
+ .engine = intel_engine_lookup_user(perf->i915,
+ I915_ENGINE_CLASS_RENDER,
+ 0),
+ .sample_flags = SAMPLE_OA_REPORT,
+ .oa_format = IS_GEN(perf->i915, 12) ?
+ I915_OA_FORMAT_A32u40_A4u32_B8_C8 : I915_OA_FORMAT_C4_B8,
+ .metrics_set = 1,
+ };
+ struct i915_perf_stream *stream;
+
+ stream = kzalloc(sizeof(*stream), GFP_KERNEL);
+ if (!stream)
+ return NULL;
+
+ stream->perf = perf;
+
+ mutex_lock(&perf->lock);
+ if (i915_oa_stream_init(stream, &param, &props)) {
+ kfree(stream);
+ stream = NULL;
+ }
+ mutex_unlock(&perf->lock);
+
+ return stream;
+}
+
+static void stream_destroy(struct i915_perf_stream *stream)
+{
+ struct i915_perf *perf = stream->perf;
+
+ mutex_lock(&perf->lock);
+ i915_perf_destroy_locked(stream);
+ mutex_unlock(&perf->lock);
+}
+
+static int live_sanitycheck(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct i915_perf_stream *stream;
+
+ /* Quick check we can create a perf stream */
+
+ stream = test_stream(&i915->perf);
+ if (!stream)
+ return -EINVAL;
+
+ stream_destroy(stream);
+ return 0;
+}
+
+static int write_timestamp(struct i915_request *rq, int slot)
+{
+ u32 *cs;
+ int len;
+
+ cs = intel_ring_begin(rq, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ len = 5;
+ if (INTEL_GEN(rq->i915) >= 8)
+ len++;
+
+ *cs++ = GFX_OP_PIPE_CONTROL(len);
+ *cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB |
+ PIPE_CONTROL_STORE_DATA_INDEX |
+ PIPE_CONTROL_WRITE_TIMESTAMP;
+ *cs++ = slot * sizeof(u32);
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static ktime_t poll_status(struct i915_request *rq, int slot)
+{
+ while (!intel_read_status_page(rq->engine, slot) &&
+ !i915_request_completed(rq))
+ cpu_relax();
+
+ return ktime_get();
+}
+
+static int live_noa_delay(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct i915_perf_stream *stream;
+ struct i915_request *rq;
+ ktime_t t0, t1;
+ u64 expected;
+ u32 delay;
+ int err;
+ int i;
+
+ /* Check that the GPU delays matches expectations */
+
+ stream = test_stream(&i915->perf);
+ if (!stream)
+ return -ENOMEM;
+
+ expected = atomic64_read(&stream->perf->noa_programming_delay);
+
+ if (stream->engine->class != RENDER_CLASS) {
+ err = -ENODEV;
+ goto out;
+ }
+
+ for (i = 0; i < 4; i++)
+ intel_write_status_page(stream->engine, 0x100 + i, 0);
+
+ rq = intel_engine_create_kernel_request(stream->engine);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ if (rq->engine->emit_init_breadcrumb &&
+ i915_request_timeline(rq)->has_initial_breadcrumb) {
+ err = rq->engine->emit_init_breadcrumb(rq);
+ if (err) {
+ i915_request_add(rq);
+ goto out;
+ }
+ }
+
+ err = write_timestamp(rq, 0x100);
+ if (err) {
+ i915_request_add(rq);
+ goto out;
+ }
+
+ err = rq->engine->emit_bb_start(rq,
+ i915_ggtt_offset(stream->noa_wait), 0,
+ I915_DISPATCH_SECURE);
+ if (err) {
+ i915_request_add(rq);
+ goto out;
+ }
+
+ err = write_timestamp(rq, 0x102);
+ if (err) {
+ i915_request_add(rq);
+ goto out;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ preempt_disable();
+ t0 = poll_status(rq, 0x100);
+ t1 = poll_status(rq, 0x102);
+ preempt_enable();
+
+ pr_info("CPU delay: %lluns, expected %lluns\n",
+ ktime_sub(t1, t0), expected);
+
+ delay = intel_read_status_page(stream->engine, 0x102);
+ delay -= intel_read_status_page(stream->engine, 0x100);
+ delay = div_u64(mul_u32_u32(delay, 1000 * 1000),
+ RUNTIME_INFO(i915)->cs_timestamp_frequency_khz);
+ pr_info("GPU delay: %uns, expected %lluns\n",
+ delay, expected);
+
+ if (4 * delay < 3 * expected || 2 * delay > 3 * expected) {
+ pr_err("GPU delay [%uus] outside of expected threshold! [%lluus, %lluus]\n",
+ delay / 1000,
+ div_u64(3 * expected, 4000),
+ div_u64(3 * expected, 2000));
+ err = -EINVAL;
+ }
+
+ i915_request_put(rq);
+out:
+ stream_destroy(stream);
+ return err;
+}
+
+int i915_perf_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_sanitycheck),
+ SUBTEST(live_noa_delay),
+ };
+ struct i915_perf *perf = &i915->perf;
+
+ if (!perf->metrics_kobj || !perf->ops.enable_metric_set)
+ return 0;
+
+ if (intel_gt_is_wedged(&i915->gt))
+ return 0;
+
+ return i915_subtests(tests, i915);
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h b/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h
new file mode 100644
index 000000000000..5a577a1332f5
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef selftest
+#define selftest(x, y)
+#endif
+
+/*
+ * List each unit test as selftest(name, function)
+ *
+ * The name is used as both an enum and expanded as subtest__name to create
+ * a module parameter. It must be unique and legal for a C identifier.
+ *
+ * The function should be of type int function(void). It may be conditionally
+ * compiled using #if IS_ENABLED(DRM_I915_SELFTEST).
+ *
+ * Tests are executed in order by igt/i915_selftest
+ */
+selftest(engine_cs, intel_engine_cs_perf_selftests)
+selftest(blt, i915_gem_object_blt_perf_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_random.c b/drivers/gpu/drm/i915/selftests/i915_random.c
index 716a3f19f030..abdfadcf626b 100644
--- a/drivers/gpu/drm/i915/selftests/i915_random.c
+++ b/drivers/gpu/drm/i915/selftests/i915_random.c
@@ -29,6 +29,7 @@
#include <linux/types.h>
#include "i915_random.h"
+#include "i915_utils.h"
u64 i915_prandom_u64_state(struct rnd_state *rnd)
{
@@ -87,3 +88,22 @@ unsigned int *i915_random_order(unsigned int count, struct rnd_state *state)
i915_random_reorder(order, count, state);
return order;
}
+
+u64 igt_random_offset(struct rnd_state *state,
+ u64 start, u64 end,
+ u64 len, u64 align)
+{
+ u64 range, addr;
+
+ BUG_ON(range_overflows(start, len, end));
+ BUG_ON(round_up(start, align) > round_down(end - len, align));
+
+ range = round_down(end - len, align) - round_up(start, align);
+ if (range) {
+ addr = i915_prandom_u64_state(state);
+ div64_u64_rem(addr, range, &addr);
+ start += addr;
+ }
+
+ return round_up(start, align);
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_random.h b/drivers/gpu/drm/i915/selftests/i915_random.h
index 8e1ff9c105b6..05364eca20f7 100644
--- a/drivers/gpu/drm/i915/selftests/i915_random.h
+++ b/drivers/gpu/drm/i915/selftests/i915_random.h
@@ -25,6 +25,7 @@
#ifndef __I915_SELFTESTS_RANDOM_H__
#define __I915_SELFTESTS_RANDOM_H__
+#include <linux/math64.h>
#include <linux/random.h>
#include "../i915_selftest.h"
@@ -57,4 +58,8 @@ void i915_random_reorder(unsigned int *order,
void i915_prandom_shuffle(void *arr, size_t elsz, size_t count,
struct rnd_state *state);
+u64 igt_random_offset(struct rnd_state *state,
+ u64 start, u64 end,
+ u64 len, u64 align);
+
#endif /* !__I915_SELFTESTS_RANDOM_H__ */
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index 298bb7116c51..f89d9c42f1fa 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -27,35 +27,44 @@
#include "gem/i915_gem_pm.h"
#include "gem/selftests/mock_context.h"
+#include "gt/intel_engine_pm.h"
+#include "gt/intel_gt.h"
+
#include "i915_random.h"
#include "i915_selftest.h"
#include "igt_live_test.h"
+#include "igt_spinner.h"
#include "lib_sw_fence.h"
#include "mock_drm.h"
#include "mock_gem_device.h"
+static unsigned int num_uabi_engines(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ unsigned int count;
+
+ count = 0;
+ for_each_uabi_engine(engine, i915)
+ count++;
+
+ return count;
+}
+
static int igt_add_request(void *arg)
{
struct drm_i915_private *i915 = arg;
struct i915_request *request;
- int err = -ENOMEM;
/* Basic preliminary test to create a request and let it loose! */
- mutex_lock(&i915->drm.struct_mutex);
- request = mock_request(i915->engine[RCS0],
- i915->kernel_context,
- HZ / 10);
+ request = mock_request(i915->engine[RCS0]->kernel_context, HZ / 10);
if (!request)
- goto out_unlock;
+ return -ENOMEM;
i915_request_add(request);
- err = 0;
-out_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
- return err;
+ return 0;
}
static int igt_wait_request(void *arg)
@@ -67,64 +76,63 @@ static int igt_wait_request(void *arg)
/* Submit a request, then wait upon it */
- mutex_lock(&i915->drm.struct_mutex);
- request = mock_request(i915->engine[RCS0], i915->kernel_context, T);
- if (!request) {
- err = -ENOMEM;
- goto out_unlock;
- }
+ request = mock_request(i915->engine[RCS0]->kernel_context, T);
+ if (!request)
+ return -ENOMEM;
+
+ i915_request_get(request);
if (i915_request_wait(request, 0, 0) != -ETIME) {
pr_err("request wait (busy query) succeeded (expected timeout before submit!)\n");
- goto out_unlock;
+ goto out_request;
}
if (i915_request_wait(request, 0, T) != -ETIME) {
pr_err("request wait succeeded (expected timeout before submit!)\n");
- goto out_unlock;
+ goto out_request;
}
if (i915_request_completed(request)) {
pr_err("request completed before submit!!\n");
- goto out_unlock;
+ goto out_request;
}
i915_request_add(request);
if (i915_request_wait(request, 0, 0) != -ETIME) {
pr_err("request wait (busy query) succeeded (expected timeout after submit!)\n");
- goto out_unlock;
+ goto out_request;
}
if (i915_request_completed(request)) {
pr_err("request completed immediately!\n");
- goto out_unlock;
+ goto out_request;
}
if (i915_request_wait(request, 0, T / 2) != -ETIME) {
pr_err("request wait succeeded (expected timeout!)\n");
- goto out_unlock;
+ goto out_request;
}
if (i915_request_wait(request, 0, T) == -ETIME) {
pr_err("request wait timed out!\n");
- goto out_unlock;
+ goto out_request;
}
if (!i915_request_completed(request)) {
pr_err("request not complete after waiting!\n");
- goto out_unlock;
+ goto out_request;
}
if (i915_request_wait(request, 0, T) == -ETIME) {
pr_err("request wait timed out when already complete!\n");
- goto out_unlock;
+ goto out_request;
}
err = 0;
-out_unlock:
+out_request:
+ i915_request_put(request);
mock_device_flush(i915);
- mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -137,52 +145,45 @@ static int igt_fence_wait(void *arg)
/* Submit a request, treat it as a fence and wait upon it */
- mutex_lock(&i915->drm.struct_mutex);
- request = mock_request(i915->engine[RCS0], i915->kernel_context, T);
- if (!request) {
- err = -ENOMEM;
- goto out_locked;
- }
+ request = mock_request(i915->engine[RCS0]->kernel_context, T);
+ if (!request)
+ return -ENOMEM;
if (dma_fence_wait_timeout(&request->fence, false, T) != -ETIME) {
pr_err("fence wait success before submit (expected timeout)!\n");
- goto out_locked;
+ goto out;
}
i915_request_add(request);
- mutex_unlock(&i915->drm.struct_mutex);
if (dma_fence_is_signaled(&request->fence)) {
pr_err("fence signaled immediately!\n");
- goto out_device;
+ goto out;
}
if (dma_fence_wait_timeout(&request->fence, false, T / 2) != -ETIME) {
pr_err("fence wait success after submit (expected timeout)!\n");
- goto out_device;
+ goto out;
}
if (dma_fence_wait_timeout(&request->fence, false, T) <= 0) {
pr_err("fence wait timed out (expected success)!\n");
- goto out_device;
+ goto out;
}
if (!dma_fence_is_signaled(&request->fence)) {
pr_err("fence unsignaled after waiting!\n");
- goto out_device;
+ goto out;
}
if (dma_fence_wait_timeout(&request->fence, false, T) <= 0) {
pr_err("fence wait timed out when complete (expected success)!\n");
- goto out_device;
+ goto out;
}
err = 0;
-out_device:
- mutex_lock(&i915->drm.struct_mutex);
-out_locked:
+out:
mock_device_flush(i915);
- mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -191,11 +192,15 @@ static int igt_request_rewind(void *arg)
struct drm_i915_private *i915 = arg;
struct i915_request *request, *vip;
struct i915_gem_context *ctx[2];
+ struct intel_context *ce;
int err = -EINVAL;
- mutex_lock(&i915->drm.struct_mutex);
ctx[0] = mock_context(i915, "A");
- request = mock_request(i915->engine[RCS0], ctx[0], 2 * HZ);
+
+ ce = i915_gem_context_get_engine(ctx[0], RCS0);
+ GEM_BUG_ON(IS_ERR(ce));
+ request = mock_request(ce, 2 * HZ);
+ intel_context_put(ce);
if (!request) {
err = -ENOMEM;
goto err_context_0;
@@ -205,7 +210,11 @@ static int igt_request_rewind(void *arg)
i915_request_add(request);
ctx[1] = mock_context(i915, "B");
- vip = mock_request(i915->engine[RCS0], ctx[1], 0);
+
+ ce = i915_gem_context_get_engine(ctx[1], RCS0);
+ GEM_BUG_ON(IS_ERR(ce));
+ vip = mock_request(ce, 0);
+ intel_context_put(ce);
if (!vip) {
err = -ENOMEM;
goto err_context_1;
@@ -223,7 +232,6 @@ static int igt_request_rewind(void *arg)
request->engine->submit_request(request);
rcu_read_unlock();
- mutex_unlock(&i915->drm.struct_mutex);
if (i915_request_wait(vip, 0, HZ) == -ETIME) {
pr_err("timed out waiting for high priority request\n");
@@ -238,14 +246,12 @@ static int igt_request_rewind(void *arg)
err = 0;
err:
i915_request_put(vip);
- mutex_lock(&i915->drm.struct_mutex);
err_context_1:
mock_context_close(ctx[1]);
i915_request_put(request);
err_context_0:
mock_context_close(ctx[0]);
mock_device_flush(i915);
- mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -254,28 +260,24 @@ struct smoketest {
struct i915_gem_context **contexts;
atomic_long_t num_waits, num_fences;
int ncontexts, max_batch;
- struct i915_request *(*request_alloc)(struct i915_gem_context *,
- struct intel_engine_cs *);
+ struct i915_request *(*request_alloc)(struct intel_context *ce);
};
static struct i915_request *
-__mock_request_alloc(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
+__mock_request_alloc(struct intel_context *ce)
{
- return mock_request(engine, ctx, 0);
+ return mock_request(ce, 0);
}
static struct i915_request *
-__live_request_alloc(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
+__live_request_alloc(struct intel_context *ce)
{
- return igt_request_alloc(ctx, engine);
+ return intel_context_create_request(ce);
}
static int __igt_breadcrumbs_smoketest(void *arg)
{
struct smoketest *t = arg;
- struct mutex * const BKL = &t->engine->i915->drm.struct_mutex;
const unsigned int max_batch = min(t->ncontexts, t->max_batch) - 1;
const unsigned int total = 4 * t->ncontexts + 1;
unsigned int num_waits = 0, num_fences = 0;
@@ -293,7 +295,7 @@ static int __igt_breadcrumbs_smoketest(void *arg)
* that the fences were marked as signaled.
*/
- requests = kmalloc_array(total, sizeof(*requests), GFP_KERNEL);
+ requests = kcalloc(total, sizeof(*requests), GFP_KERNEL);
if (!requests)
return -ENOMEM;
@@ -328,12 +330,13 @@ static int __igt_breadcrumbs_smoketest(void *arg)
struct i915_gem_context *ctx =
t->contexts[order[n] % t->ncontexts];
struct i915_request *rq;
+ struct intel_context *ce;
- mutex_lock(BKL);
-
- rq = t->request_alloc(ctx, t->engine);
+ ce = i915_gem_context_get_engine(ctx, t->engine->legacy_idx);
+ GEM_BUG_ON(IS_ERR(ce));
+ rq = t->request_alloc(ce);
+ intel_context_put(ce);
if (IS_ERR(rq)) {
- mutex_unlock(BKL);
err = PTR_ERR(rq);
count = n;
break;
@@ -346,8 +349,6 @@ static int __igt_breadcrumbs_smoketest(void *arg)
requests[n] = i915_request_get(rq);
i915_request_add(rq);
- mutex_unlock(BKL);
-
if (err >= 0)
err = i915_sw_fence_await_dma_fence(wait,
&rq->fence,
@@ -366,14 +367,16 @@ static int __igt_breadcrumbs_smoketest(void *arg)
if (!wait_event_timeout(wait->wait,
i915_sw_fence_done(wait),
- HZ / 2)) {
+ 5 * HZ)) {
struct i915_request *rq = requests[count - 1];
- pr_err("waiting for %d fences (last %llx:%lld) on %s timed out!\n",
- count,
+ pr_err("waiting for %d/%d fences (last %llx:%lld) on %s timed out!\n",
+ atomic_read(&wait->pending), count,
rq->fence.context, rq->fence.seqno,
t->engine->name);
- i915_gem_set_wedged(t->engine->i915);
+ GEM_TRACE_DUMP();
+
+ intel_gt_set_wedged(t->engine->gt);
GEM_BUG_ON(!i915_request_completed(rq));
i915_sw_fence_wait(wait);
err = -EIO;
@@ -433,18 +436,16 @@ static int mock_breadcrumbs_smoketest(void *arg)
* See __igt_breadcrumbs_smoketest();
*/
- threads = kmalloc_array(ncpus, sizeof(*threads), GFP_KERNEL);
+ threads = kcalloc(ncpus, sizeof(*threads), GFP_KERNEL);
if (!threads)
return -ENOMEM;
- t.contexts =
- kmalloc_array(t.ncontexts, sizeof(*t.contexts), GFP_KERNEL);
+ t.contexts = kcalloc(t.ncontexts, sizeof(*t.contexts), GFP_KERNEL);
if (!t.contexts) {
ret = -ENOMEM;
goto out_threads;
}
- mutex_lock(&t.engine->i915->drm.struct_mutex);
for (n = 0; n < t.ncontexts; n++) {
t.contexts[n] = mock_context(t.engine->i915, "mock");
if (!t.contexts[n]) {
@@ -452,7 +453,6 @@ static int mock_breadcrumbs_smoketest(void *arg)
goto out_contexts;
}
}
- mutex_unlock(&t.engine->i915->drm.struct_mutex);
for (n = 0; n < ncpus; n++) {
threads[n] = kthread_run(__igt_breadcrumbs_smoketest,
@@ -466,6 +466,7 @@ static int mock_breadcrumbs_smoketest(void *arg)
get_task_struct(threads[n]);
}
+ yield(); /* start all threads before we begin */
msleep(jiffies_to_msecs(i915_selftest.timeout_jiffies));
for (n = 0; n < ncpus; n++) {
@@ -482,18 +483,15 @@ static int mock_breadcrumbs_smoketest(void *arg)
atomic_long_read(&t.num_fences),
ncpus);
- mutex_lock(&t.engine->i915->drm.struct_mutex);
out_contexts:
for (n = 0; n < t.ncontexts; n++) {
if (!t.contexts[n])
break;
mock_context_close(t.contexts[n]);
}
- mutex_unlock(&t.engine->i915->drm.struct_mutex);
kfree(t.contexts);
out_threads:
kfree(threads);
-
return ret;
}
@@ -526,40 +524,38 @@ static int live_nop_request(void *arg)
{
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine;
- intel_wakeref_t wakeref;
struct igt_live_test t;
- unsigned int id;
int err = -ENODEV;
- /* Submit various sized batches of empty requests, to each engine
+ /*
+ * Submit various sized batches of empty requests, to each engine
* (individually), and wait for the batch to complete. We can check
* the overhead of submitting requests to the hardware.
*/
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
- for_each_engine(engine, i915, id) {
- struct i915_request *request = NULL;
+ for_each_uabi_engine(engine, i915) {
unsigned long n, prime;
IGT_TIMEOUT(end_time);
ktime_t times[2] = {};
err = igt_live_test_begin(&t, i915, __func__, engine->name);
if (err)
- goto out_unlock;
+ return err;
+ intel_engine_pm_get(engine);
for_each_prime_number_from(prime, 1, 8192) {
+ struct i915_request *request = NULL;
+
times[1] = ktime_get_raw();
for (n = 0; n < prime; n++) {
+ i915_request_put(request);
request = i915_request_create(engine->kernel_context);
- if (IS_ERR(request)) {
- err = PTR_ERR(request);
- goto out_unlock;
- }
+ if (IS_ERR(request))
+ return PTR_ERR(request);
- /* This space is left intentionally blank.
+ /*
+ * This space is left intentionally blank.
*
* We do not actually want to perform any
* action with this request, we just want
@@ -572,9 +568,11 @@ static int live_nop_request(void *arg)
* for latency.
*/
+ i915_request_get(request);
i915_request_add(request);
}
i915_request_wait(request, 0, MAX_SCHEDULE_TIMEOUT);
+ i915_request_put(request);
times[1] = ktime_sub(ktime_get_raw(), times[1]);
if (prime == 1)
@@ -583,10 +581,11 @@ static int live_nop_request(void *arg)
if (__igt_timeout(end_time, NULL))
break;
}
+ intel_engine_pm_put(engine);
err = igt_live_test_end(&t);
if (err)
- goto out_unlock;
+ return err;
pr_info("Request latencies on %s: 1 = %lluns, %lu = %lluns\n",
engine->name,
@@ -594,9 +593,6 @@ static int live_nop_request(void *arg)
prime, div64_u64(ktime_to_ns(times[1]), prime));
}
-out_unlock:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -622,7 +618,7 @@ static struct i915_vma *empty_batch(struct drm_i915_private *i915)
__i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj);
- i915_gem_chipset_flush(i915);
+ intel_gt_chipset_flush(&i915->gt);
vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
@@ -634,8 +630,15 @@ static struct i915_vma *empty_batch(struct drm_i915_private *i915)
if (err)
goto err;
+ /* Force the wait wait now to avoid including it in the benchmark */
+ err = i915_vma_sync(vma);
+ if (err)
+ goto err_pin;
+
return vma;
+err_pin:
+ i915_vma_unpin(vma);
err:
i915_gem_object_put(obj);
return ERR_PTR(err);
@@ -659,6 +662,7 @@ empty_request(struct intel_engine_cs *engine,
if (err)
goto out_request;
+ i915_request_get(request);
out_request:
i915_request_add(request);
return err ? ERR_PTR(err) : request;
@@ -668,27 +672,21 @@ static int live_empty_request(void *arg)
{
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine;
- intel_wakeref_t wakeref;
struct igt_live_test t;
struct i915_vma *batch;
- unsigned int id;
int err = 0;
- /* Submit various sized batches of empty requests, to each engine
+ /*
+ * Submit various sized batches of empty requests, to each engine
* (individually), and wait for the batch to complete. We can check
* the overhead of submitting requests to the hardware.
*/
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
batch = empty_batch(i915);
- if (IS_ERR(batch)) {
- err = PTR_ERR(batch);
- goto out_unlock;
- }
+ if (IS_ERR(batch))
+ return PTR_ERR(batch);
- for_each_engine(engine, i915, id) {
+ for_each_uabi_engine(engine, i915) {
IGT_TIMEOUT(end_time);
struct i915_request *request;
unsigned long n, prime;
@@ -698,10 +696,13 @@ static int live_empty_request(void *arg)
if (err)
goto out_batch;
+ intel_engine_pm_get(engine);
+
/* Warmup / preload */
request = empty_request(engine, batch);
if (IS_ERR(request)) {
err = PTR_ERR(request);
+ intel_engine_pm_put(engine);
goto out_batch;
}
i915_request_wait(request, 0, MAX_SCHEDULE_TIMEOUT);
@@ -710,9 +711,11 @@ static int live_empty_request(void *arg)
times[1] = ktime_get_raw();
for (n = 0; n < prime; n++) {
+ i915_request_put(request);
request = empty_request(engine, batch);
if (IS_ERR(request)) {
err = PTR_ERR(request);
+ intel_engine_pm_put(engine);
goto out_batch;
}
}
@@ -725,6 +728,8 @@ static int live_empty_request(void *arg)
if (__igt_timeout(end_time, NULL))
break;
}
+ i915_request_put(request);
+ intel_engine_pm_put(engine);
err = igt_live_test_end(&t);
if (err)
@@ -739,16 +744,11 @@ static int live_empty_request(void *arg)
out_batch:
i915_vma_unpin(batch);
i915_vma_put(batch);
-out_unlock:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
return err;
}
static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
{
- struct i915_gem_context *ctx = i915->kernel_context;
- struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
struct drm_i915_gem_object *obj;
const int gen = INTEL_GEN(i915);
struct i915_vma *vma;
@@ -759,7 +759,7 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
if (IS_ERR(obj))
return ERR_CAST(obj);
- vma = i915_vma_instance(obj, vm, NULL);
+ vma = i915_vma_instance(obj, i915->gt.vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err;
@@ -791,7 +791,7 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
__i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj);
- i915_gem_chipset_flush(i915);
+ intel_gt_chipset_flush(&i915->gt);
return vma;
@@ -809,7 +809,7 @@ static int recursive_batch_resolve(struct i915_vma *batch)
return PTR_ERR(cmd);
*cmd = MI_BATCH_BUFFER_END;
- i915_gem_chipset_flush(batch->vm->i915);
+ intel_gt_chipset_flush(batch->vm->gt);
i915_gem_object_unpin_map(batch->obj);
@@ -819,65 +819,73 @@ static int recursive_batch_resolve(struct i915_vma *batch)
static int live_all_engines(void *arg)
{
struct drm_i915_private *i915 = arg;
+ const unsigned int nengines = num_uabi_engines(i915);
struct intel_engine_cs *engine;
- struct i915_request *request[I915_NUM_ENGINES];
- intel_wakeref_t wakeref;
+ struct i915_request **request;
struct igt_live_test t;
struct i915_vma *batch;
- unsigned int id;
+ unsigned int idx;
int err;
- /* Check we can submit requests to all engines simultaneously. We
+ /*
+ * Check we can submit requests to all engines simultaneously. We
* send a recursive batch to each engine - checking that we don't
* block doing so, and that they don't complete too soon.
*/
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ request = kcalloc(nengines, sizeof(*request), GFP_KERNEL);
+ if (!request)
+ return -ENOMEM;
err = igt_live_test_begin(&t, i915, __func__, "");
if (err)
- goto out_unlock;
+ goto out_free;
batch = recursive_batch(i915);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
pr_err("%s: Unable to create batch, err=%d\n", __func__, err);
- goto out_unlock;
+ goto out_free;
}
- for_each_engine(engine, i915, id) {
- request[id] = i915_request_create(engine->kernel_context);
- if (IS_ERR(request[id])) {
- err = PTR_ERR(request[id]);
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
+ request[idx] = intel_engine_create_kernel_request(engine);
+ if (IS_ERR(request[idx])) {
+ err = PTR_ERR(request[idx]);
pr_err("%s: Request allocation failed with err=%d\n",
__func__, err);
goto out_request;
}
- err = engine->emit_bb_start(request[id],
+ err = engine->emit_bb_start(request[idx],
batch->node.start,
batch->node.size,
0);
GEM_BUG_ON(err);
- request[id]->batch = batch;
+ request[idx]->batch = batch;
i915_vma_lock(batch);
- err = i915_vma_move_to_active(batch, request[id], 0);
+ err = i915_request_await_object(request[idx], batch->obj, 0);
+ if (err == 0)
+ err = i915_vma_move_to_active(batch, request[idx], 0);
i915_vma_unlock(batch);
GEM_BUG_ON(err);
- i915_request_get(request[id]);
- i915_request_add(request[id]);
+ i915_request_get(request[idx]);
+ i915_request_add(request[idx]);
+ idx++;
}
- for_each_engine(engine, i915, id) {
- if (i915_request_completed(request[id])) {
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
+ if (i915_request_completed(request[idx])) {
pr_err("%s(%s): request completed too early!\n",
__func__, engine->name);
err = -EINVAL;
goto out_request;
}
+ idx++;
}
err = recursive_batch_resolve(batch);
@@ -886,10 +894,11 @@ static int live_all_engines(void *arg)
goto out_request;
}
- for_each_engine(engine, i915, id) {
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
long timeout;
- timeout = i915_request_wait(request[id], 0,
+ timeout = i915_request_wait(request[idx], 0,
MAX_SCHEDULE_TIMEOUT);
if (timeout < 0) {
err = timeout;
@@ -898,50 +907,56 @@ static int live_all_engines(void *arg)
goto out_request;
}
- GEM_BUG_ON(!i915_request_completed(request[id]));
- i915_request_put(request[id]);
- request[id] = NULL;
+ GEM_BUG_ON(!i915_request_completed(request[idx]));
+ i915_request_put(request[idx]);
+ request[idx] = NULL;
+ idx++;
}
err = igt_live_test_end(&t);
out_request:
- for_each_engine(engine, i915, id)
- if (request[id])
- i915_request_put(request[id]);
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
+ if (request[idx])
+ i915_request_put(request[idx]);
+ idx++;
+ }
i915_vma_unpin(batch);
i915_vma_put(batch);
-out_unlock:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
+out_free:
+ kfree(request);
return err;
}
static int live_sequential_engines(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct i915_request *request[I915_NUM_ENGINES] = {};
+ const unsigned int nengines = num_uabi_engines(i915);
+ struct i915_request **request;
struct i915_request *prev = NULL;
struct intel_engine_cs *engine;
- intel_wakeref_t wakeref;
struct igt_live_test t;
- unsigned int id;
+ unsigned int idx;
int err;
- /* Check we can submit requests to all engines sequentially, such
+ /*
+ * Check we can submit requests to all engines sequentially, such
* that each successive request waits for the earlier ones. This
* tests that we don't execute requests out of order, even though
* they are running on independent engines.
*/
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ request = kcalloc(nengines, sizeof(*request), GFP_KERNEL);
+ if (!request)
+ return -ENOMEM;
err = igt_live_test_begin(&t, i915, __func__, "");
if (err)
- goto out_unlock;
+ goto out_free;
- for_each_engine(engine, i915, id) {
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
struct i915_vma *batch;
batch = recursive_batch(i915);
@@ -949,64 +964,69 @@ static int live_sequential_engines(void *arg)
err = PTR_ERR(batch);
pr_err("%s: Unable to create batch for %s, err=%d\n",
__func__, engine->name, err);
- goto out_unlock;
+ goto out_free;
}
- request[id] = i915_request_create(engine->kernel_context);
- if (IS_ERR(request[id])) {
- err = PTR_ERR(request[id]);
+ request[idx] = intel_engine_create_kernel_request(engine);
+ if (IS_ERR(request[idx])) {
+ err = PTR_ERR(request[idx]);
pr_err("%s: Request allocation failed for %s with err=%d\n",
__func__, engine->name, err);
goto out_request;
}
if (prev) {
- err = i915_request_await_dma_fence(request[id],
+ err = i915_request_await_dma_fence(request[idx],
&prev->fence);
if (err) {
- i915_request_add(request[id]);
+ i915_request_add(request[idx]);
pr_err("%s: Request await failed for %s with err=%d\n",
__func__, engine->name, err);
goto out_request;
}
}
- err = engine->emit_bb_start(request[id],
+ err = engine->emit_bb_start(request[idx],
batch->node.start,
batch->node.size,
0);
GEM_BUG_ON(err);
- request[id]->batch = batch;
+ request[idx]->batch = batch;
i915_vma_lock(batch);
- err = i915_vma_move_to_active(batch, request[id], 0);
+ err = i915_request_await_object(request[idx],
+ batch->obj, false);
+ if (err == 0)
+ err = i915_vma_move_to_active(batch, request[idx], 0);
i915_vma_unlock(batch);
GEM_BUG_ON(err);
- i915_request_get(request[id]);
- i915_request_add(request[id]);
+ i915_request_get(request[idx]);
+ i915_request_add(request[idx]);
- prev = request[id];
+ prev = request[idx];
+ idx++;
}
- for_each_engine(engine, i915, id) {
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
long timeout;
- if (i915_request_completed(request[id])) {
+ if (i915_request_completed(request[idx])) {
pr_err("%s(%s): request completed too early!\n",
__func__, engine->name);
err = -EINVAL;
goto out_request;
}
- err = recursive_batch_resolve(request[id]->batch);
+ err = recursive_batch_resolve(request[idx]->batch);
if (err) {
pr_err("%s: failed to resolve batch, err=%d\n",
__func__, err);
goto out_request;
}
- timeout = i915_request_wait(request[id], 0,
+ timeout = i915_request_wait(request[idx], 0,
MAX_SCHEDULE_TIMEOUT);
if (timeout < 0) {
err = timeout;
@@ -1015,33 +1035,244 @@ static int live_sequential_engines(void *arg)
goto out_request;
}
- GEM_BUG_ON(!i915_request_completed(request[id]));
+ GEM_BUG_ON(!i915_request_completed(request[idx]));
+ idx++;
}
err = igt_live_test_end(&t);
out_request:
- for_each_engine(engine, i915, id) {
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
u32 *cmd;
- if (!request[id])
+ if (!request[idx])
break;
- cmd = i915_gem_object_pin_map(request[id]->batch->obj,
+ cmd = i915_gem_object_pin_map(request[idx]->batch->obj,
I915_MAP_WC);
if (!IS_ERR(cmd)) {
*cmd = MI_BATCH_BUFFER_END;
- i915_gem_chipset_flush(i915);
+ intel_gt_chipset_flush(engine->gt);
- i915_gem_object_unpin_map(request[id]->batch->obj);
+ i915_gem_object_unpin_map(request[idx]->batch->obj);
}
- i915_vma_put(request[id]->batch);
- i915_request_put(request[id]);
+ i915_vma_put(request[idx]->batch);
+ i915_request_put(request[idx]);
+ idx++;
}
-out_unlock:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
+out_free:
+ kfree(request);
+ return err;
+}
+
+static int __live_parallel_engine1(void *arg)
+{
+ struct intel_engine_cs *engine = arg;
+ IGT_TIMEOUT(end_time);
+ unsigned long count;
+ int err = 0;
+
+ count = 0;
+ intel_engine_pm_get(engine);
+ do {
+ struct i915_request *rq;
+
+ rq = i915_request_create(engine->kernel_context);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ err = 0;
+ if (i915_request_wait(rq, 0, HZ / 5) < 0)
+ err = -ETIME;
+ i915_request_put(rq);
+ if (err)
+ break;
+
+ count++;
+ } while (!__igt_timeout(end_time, NULL));
+ intel_engine_pm_put(engine);
+
+ pr_info("%s: %lu request + sync\n", engine->name, count);
+ return err;
+}
+
+static int __live_parallel_engineN(void *arg)
+{
+ struct intel_engine_cs *engine = arg;
+ IGT_TIMEOUT(end_time);
+ unsigned long count;
+ int err = 0;
+
+ count = 0;
+ intel_engine_pm_get(engine);
+ do {
+ struct i915_request *rq;
+
+ rq = i915_request_create(engine->kernel_context);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ i915_request_add(rq);
+ count++;
+ } while (!__igt_timeout(end_time, NULL));
+ intel_engine_pm_put(engine);
+
+ pr_info("%s: %lu requests\n", engine->name, count);
+ return err;
+}
+
+static bool wake_all(struct drm_i915_private *i915)
+{
+ if (atomic_dec_and_test(&i915->selftest.counter)) {
+ wake_up_var(&i915->selftest.counter);
+ return true;
+ }
+
+ return false;
+}
+
+static int wait_for_all(struct drm_i915_private *i915)
+{
+ if (wake_all(i915))
+ return 0;
+
+ if (wait_var_event_timeout(&i915->selftest.counter,
+ !atomic_read(&i915->selftest.counter),
+ i915_selftest.timeout_jiffies))
+ return 0;
+
+ return -ETIME;
+}
+
+static int __live_parallel_spin(void *arg)
+{
+ struct intel_engine_cs *engine = arg;
+ struct igt_spinner spin;
+ struct i915_request *rq;
+ int err = 0;
+
+ /*
+ * Create a spinner running for eternity on each engine. If a second
+ * spinner is incorrectly placed on the same engine, it will not be
+ * able to start in time.
+ */
+
+ if (igt_spinner_init(&spin, engine->gt)) {
+ wake_all(engine->i915);
+ return -ENOMEM;
+ }
+
+ intel_engine_pm_get(engine);
+ rq = igt_spinner_create_request(&spin,
+ engine->kernel_context,
+ MI_NOOP); /* no preemption */
+ intel_engine_pm_put(engine);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ if (err == -ENODEV)
+ err = 0;
+ wake_all(engine->i915);
+ goto out_spin;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (igt_wait_for_spinner(&spin, rq)) {
+ /* Occupy this engine for the whole test */
+ err = wait_for_all(engine->i915);
+ } else {
+ pr_err("Failed to start spinner on %s\n", engine->name);
+ err = -EINVAL;
+ }
+ igt_spinner_end(&spin);
+
+ if (err == 0 && i915_request_wait(rq, 0, HZ / 5) < 0)
+ err = -EIO;
+ i915_request_put(rq);
+
+out_spin:
+ igt_spinner_fini(&spin);
+ return err;
+}
+
+static int live_parallel_engines(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ static int (* const func[])(void *arg) = {
+ __live_parallel_engine1,
+ __live_parallel_engineN,
+ __live_parallel_spin,
+ NULL,
+ };
+ const unsigned int nengines = num_uabi_engines(i915);
+ struct intel_engine_cs *engine;
+ int (* const *fn)(void *arg);
+ struct task_struct **tsk;
+ int err = 0;
+
+ /*
+ * Check we can submit requests to all engines concurrently. This
+ * tests that we load up the system maximally.
+ */
+
+ tsk = kcalloc(nengines, sizeof(*tsk), GFP_KERNEL);
+ if (!tsk)
+ return -ENOMEM;
+
+ for (fn = func; !err && *fn; fn++) {
+ char name[KSYM_NAME_LEN];
+ struct igt_live_test t;
+ unsigned int idx;
+
+ snprintf(name, sizeof(name), "%pS", fn);
+ err = igt_live_test_begin(&t, i915, __func__, name);
+ if (err)
+ break;
+
+ atomic_set(&i915->selftest.counter, nengines);
+
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
+ tsk[idx] = kthread_run(*fn, engine,
+ "igt/parallel:%s",
+ engine->name);
+ if (IS_ERR(tsk[idx])) {
+ err = PTR_ERR(tsk[idx]);
+ break;
+ }
+ get_task_struct(tsk[idx++]);
+ }
+
+ yield(); /* start all threads before we kthread_stop() */
+
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
+ int status;
+
+ if (IS_ERR(tsk[idx]))
+ break;
+
+ status = kthread_stop(tsk[idx]);
+ if (status && !err)
+ err = status;
+
+ put_task_struct(tsk[idx++]);
+ }
+
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ }
+
+ kfree(tsk);
return err;
}
@@ -1085,16 +1316,16 @@ max_batches(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
static int live_breadcrumbs_smoketest(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct smoketest t[I915_NUM_ENGINES];
- unsigned int ncpus = num_online_cpus();
+ const unsigned int nengines = num_uabi_engines(i915);
+ const unsigned int ncpus = num_online_cpus();
unsigned long num_waits, num_fences;
struct intel_engine_cs *engine;
struct task_struct **threads;
struct igt_live_test live;
- enum intel_engine_id id;
intel_wakeref_t wakeref;
- struct drm_file *file;
- unsigned int n;
+ struct smoketest *smoke;
+ unsigned int n, idx;
+ struct file *file;
int ret = 0;
/*
@@ -1113,29 +1344,31 @@ static int live_breadcrumbs_smoketest(void *arg)
goto out_rpm;
}
- threads = kcalloc(ncpus * I915_NUM_ENGINES,
- sizeof(*threads),
- GFP_KERNEL);
- if (!threads) {
+ smoke = kcalloc(nengines, sizeof(*smoke), GFP_KERNEL);
+ if (!smoke) {
ret = -ENOMEM;
goto out_file;
}
- memset(&t[0], 0, sizeof(t[0]));
- t[0].request_alloc = __live_request_alloc;
- t[0].ncontexts = 64;
- t[0].contexts = kmalloc_array(t[0].ncontexts,
- sizeof(*t[0].contexts),
- GFP_KERNEL);
- if (!t[0].contexts) {
+ threads = kcalloc(ncpus * nengines, sizeof(*threads), GFP_KERNEL);
+ if (!threads) {
+ ret = -ENOMEM;
+ goto out_smoke;
+ }
+
+ smoke[0].request_alloc = __live_request_alloc;
+ smoke[0].ncontexts = 64;
+ smoke[0].contexts = kcalloc(smoke[0].ncontexts,
+ sizeof(*smoke[0].contexts),
+ GFP_KERNEL);
+ if (!smoke[0].contexts) {
ret = -ENOMEM;
goto out_threads;
}
- mutex_lock(&i915->drm.struct_mutex);
- for (n = 0; n < t[0].ncontexts; n++) {
- t[0].contexts[n] = live_context(i915, file);
- if (!t[0].contexts[n]) {
+ for (n = 0; n < smoke[0].ncontexts; n++) {
+ smoke[0].contexts[n] = live_context(i915, file);
+ if (!smoke[0].contexts[n]) {
ret = -ENOMEM;
goto out_contexts;
}
@@ -1145,45 +1378,48 @@ static int live_breadcrumbs_smoketest(void *arg)
if (ret)
goto out_contexts;
- for_each_engine(engine, i915, id) {
- t[id] = t[0];
- t[id].engine = engine;
- t[id].max_batch = max_batches(t[0].contexts[0], engine);
- if (t[id].max_batch < 0) {
- ret = t[id].max_batch;
- mutex_unlock(&i915->drm.struct_mutex);
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
+ smoke[idx] = smoke[0];
+ smoke[idx].engine = engine;
+ smoke[idx].max_batch =
+ max_batches(smoke[0].contexts[0], engine);
+ if (smoke[idx].max_batch < 0) {
+ ret = smoke[idx].max_batch;
goto out_flush;
}
/* One ring interleaved between requests from all cpus */
- t[id].max_batch /= num_online_cpus() + 1;
+ smoke[idx].max_batch /= num_online_cpus() + 1;
pr_debug("Limiting batches to %d requests on %s\n",
- t[id].max_batch, engine->name);
+ smoke[idx].max_batch, engine->name);
for (n = 0; n < ncpus; n++) {
struct task_struct *tsk;
tsk = kthread_run(__igt_breadcrumbs_smoketest,
- &t[id], "igt/%d.%d", id, n);
+ &smoke[idx], "igt/%d.%d", idx, n);
if (IS_ERR(tsk)) {
ret = PTR_ERR(tsk);
- mutex_unlock(&i915->drm.struct_mutex);
goto out_flush;
}
get_task_struct(tsk);
- threads[id * ncpus + n] = tsk;
+ threads[idx * ncpus + n] = tsk;
}
+
+ idx++;
}
- mutex_unlock(&i915->drm.struct_mutex);
+ yield(); /* start all threads before we begin */
msleep(jiffies_to_msecs(i915_selftest.timeout_jiffies));
out_flush:
+ idx = 0;
num_waits = 0;
num_fences = 0;
- for_each_engine(engine, i915, id) {
+ for_each_uabi_engine(engine, i915) {
for (n = 0; n < ncpus; n++) {
- struct task_struct *tsk = threads[id * ncpus + n];
+ struct task_struct *tsk = threads[idx * ncpus + n];
int err;
if (!tsk)
@@ -1196,21 +1432,22 @@ out_flush:
put_task_struct(tsk);
}
- num_waits += atomic_long_read(&t[id].num_waits);
- num_fences += atomic_long_read(&t[id].num_fences);
+ num_waits += atomic_long_read(&smoke[idx].num_waits);
+ num_fences += atomic_long_read(&smoke[idx].num_fences);
+ idx++;
}
pr_info("Completed %lu waits for %lu fences across %d engines and %d cpus\n",
num_waits, num_fences, RUNTIME_INFO(i915)->num_engines, ncpus);
- mutex_lock(&i915->drm.struct_mutex);
ret = igt_live_test_end(&live) ?: ret;
out_contexts:
- mutex_unlock(&i915->drm.struct_mutex);
- kfree(t[0].contexts);
+ kfree(smoke[0].contexts);
out_threads:
kfree(threads);
+out_smoke:
+ kfree(smoke);
out_file:
- mock_file_free(i915, file);
+ fput(file);
out_rpm:
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
@@ -1223,11 +1460,12 @@ int i915_request_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_nop_request),
SUBTEST(live_all_engines),
SUBTEST(live_sequential_engines),
+ SUBTEST(live_parallel_engines),
SUBTEST(live_empty_request),
SUBTEST(live_breadcrumbs_smoketest),
};
- if (i915_terminally_wedged(i915))
+ if (intel_gt_is_wedged(&i915->gt))
return 0;
return i915_subtests(tests, i915);
diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c
index b18eaefef798..d3bf9eefb682 100644
--- a/drivers/gpu/drm/i915/selftests/i915_selftest.c
+++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c
@@ -23,11 +23,14 @@
#include <linux/random.h>
-#include "../i915_drv.h"
-#include "../i915_selftest.h"
+#include "gt/intel_gt_pm.h"
+#include "i915_drv.h"
+#include "i915_selftest.h"
+
+#include "igt_flush_test.h"
struct i915_selftest i915_selftest __read_mostly = {
- .timeout_ms = 1000,
+ .timeout_ms = 500,
};
int i915_mock_sanitycheck(void)
@@ -54,6 +57,12 @@ enum {
#undef selftest
};
+enum {
+#define selftest(name, func) perf_##name,
+#include "i915_perf_selftests.h"
+#undef selftest
+};
+
struct selftest {
bool enabled;
const char *name;
@@ -75,6 +84,12 @@ static struct selftest live_selftests[] = {
};
#undef selftest
+#define selftest(n, f) [perf_##n] = { .name = #n, { .live = f } },
+static struct selftest perf_selftests[] = {
+#include "i915_perf_selftests.h"
+};
+#undef selftest
+
/* Embed the line number into the parameter name so that we can order tests */
#define selftest(n, func) selftest_0(n, func, param(n))
#define param(n) __PASTE(igt__, __PASTE(__LINE__, __mock_##n))
@@ -90,6 +105,13 @@ module_param_named(id, live_selftests[live_##n].enabled, bool, 0400);
#include "i915_live_selftests.h"
#undef selftest_0
#undef param
+
+#define param(n) __PASTE(igt__, __PASTE(__LINE__, __perf_##n))
+#define selftest_0(n, func, id) \
+module_param_named(id, perf_selftests[perf_##n].enabled, bool, 0400);
+#include "i915_perf_selftests.h"
+#undef selftest_0
+#undef param
#undef selftest
static void set_default_test_all(struct selftest *st, unsigned int count)
@@ -183,7 +205,7 @@ int i915_live_selftests(struct pci_dev *pdev)
if (!i915_selftest.live)
return 0;
- err = run_selftests(live, to_i915(pci_get_drvdata(pdev)));
+ err = run_selftests(live, pdev_to_i915(pdev));
if (err) {
i915_selftest.live = err;
return err;
@@ -197,6 +219,27 @@ int i915_live_selftests(struct pci_dev *pdev)
return 0;
}
+int i915_perf_selftests(struct pci_dev *pdev)
+{
+ int err;
+
+ if (!i915_selftest.perf)
+ return 0;
+
+ err = run_selftests(perf, pdev_to_i915(pdev));
+ if (err) {
+ i915_selftest.perf = err;
+ return err;
+ }
+
+ if (i915_selftest.perf < 0) {
+ i915_selftest.perf = -ENOTTY;
+ return 1;
+ }
+
+ return 0;
+}
+
static bool apply_subtest_filter(const char *caller, const char *name)
{
char *filter, *sep, *tok;
@@ -240,7 +283,65 @@ static bool apply_subtest_filter(const char *caller, const char *name)
return result;
}
+int __i915_nop_setup(void *data)
+{
+ return 0;
+}
+
+int __i915_nop_teardown(int err, void *data)
+{
+ return err;
+}
+
+int __i915_live_setup(void *data)
+{
+ struct drm_i915_private *i915 = data;
+
+ /* The selftests expect an idle system */
+ if (intel_gt_pm_wait_for_idle(&i915->gt))
+ return -EIO;
+
+ return intel_gt_terminally_wedged(&i915->gt);
+}
+
+int __i915_live_teardown(int err, void *data)
+{
+ struct drm_i915_private *i915 = data;
+
+ if (igt_flush_test(i915))
+ err = -EIO;
+
+ i915_gem_drain_freed_objects(i915);
+
+ return err;
+}
+
+int __intel_gt_live_setup(void *data)
+{
+ struct intel_gt *gt = data;
+
+ /* The selftests expect an idle system */
+ if (intel_gt_pm_wait_for_idle(gt))
+ return -EIO;
+
+ return intel_gt_terminally_wedged(gt);
+}
+
+int __intel_gt_live_teardown(int err, void *data)
+{
+ struct intel_gt *gt = data;
+
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+
+ i915_gem_drain_freed_objects(gt->i915);
+
+ return err;
+}
+
int __i915_subtests(const char *caller,
+ int (*setup)(void *data),
+ int (*teardown)(int err, void *data),
const struct i915_subtest *st,
unsigned int count,
void *data)
@@ -255,10 +356,17 @@ int __i915_subtests(const char *caller,
if (!apply_subtest_filter(caller, st->name))
continue;
+ err = setup(data);
+ if (err) {
+ pr_err(DRIVER_NAME "/%s: setup failed for %s\n",
+ caller, st->name);
+ return err;
+ }
+
pr_info(DRIVER_NAME ": Running %s/%s\n", caller, st->name);
GEM_TRACE("Running %s/%s\n", caller, st->name);
- err = st->func(data);
+ err = teardown(st->func(data), data);
if (err && err != -EINTR) {
pr_err(DRIVER_NAME "/%s: %s failed with error %d\n",
caller, st->name, err);
@@ -297,3 +405,6 @@ MODULE_PARM_DESC(mock_selftests, "Run selftests before loading, using mock hardw
module_param_named_unsafe(live_selftests, i915_selftest.live, int, 0400);
MODULE_PARM_DESC(live_selftests, "Run selftests after driver initialisation on the live system (0:disabled [default], 1:run tests then continue, -1:run tests then exit module)");
+
+module_param_named_unsafe(perf_selftests, i915_selftest.perf, int, 0400);
+MODULE_PARM_DESC(perf_selftests, "Run performance orientated selftests after driver initialisation on the live system (0:disabled [default], 1:run tests then continue, -1:run tests then exit module)");
diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
index fbc79b14823a..58b5f40a07dd 100644
--- a/drivers/gpu/drm/i915/selftests/i915_vma.c
+++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
@@ -24,6 +24,7 @@
#include <linux/prime_numbers.h>
+#include "gem/i915_gem_context.h"
#include "gem/selftests/mock_context.h"
#include "i915_scatterlist.h"
@@ -38,7 +39,7 @@ static bool assert_vma(struct i915_vma *vma,
{
bool ok = true;
- if (vma->vm != ctx->vm) {
+ if (vma->vm != rcu_access_pointer(ctx->vm)) {
pr_err("VMA created with wrong VM\n");
ok = false;
}
@@ -113,11 +114,13 @@ static int create_vmas(struct drm_i915_private *i915,
list_for_each_entry(obj, objects, st_link) {
for (pinned = 0; pinned <= 1; pinned++) {
list_for_each_entry(ctx, contexts, link) {
- struct i915_address_space *vm = ctx->vm;
+ struct i915_address_space *vm;
struct i915_vma *vma;
int err;
+ vm = i915_gem_context_get_vm_rcu(ctx);
vma = checked_vma_instance(obj, vm, NULL);
+ i915_vm_put(vm);
if (IS_ERR(vma))
return PTR_ERR(vma);
@@ -170,7 +173,7 @@ static int igt_vma_create(void *arg)
}
nc = 0;
- for_each_prime_number(num_ctx, MAX_CONTEXT_HW_ID) {
+ for_each_prime_number(num_ctx, 2 * NUM_CONTEXT_TAG) {
for (; nc < num_ctx; nc++) {
ctx = mock_context(i915, "mock");
if (!ctx)
@@ -193,6 +196,8 @@ static int igt_vma_create(void *arg)
list_del_init(&ctx->link);
mock_context_close(ctx);
}
+
+ cond_resched();
}
end:
@@ -341,6 +346,8 @@ static int igt_vma_pin1(void *arg)
goto out;
}
}
+
+ cond_resched();
}
err = 0;
@@ -597,6 +604,8 @@ static int igt_vma_rotate_remap(void *arg)
}
i915_vma_unpin(vma);
+
+ cond_resched();
}
}
}
@@ -617,7 +626,7 @@ static bool assert_partial(struct drm_i915_gem_object *obj,
struct sgt_iter sgt;
dma_addr_t dma;
- for_each_sgt_dma(dma, sgt, vma->pages) {
+ for_each_sgt_daddr(dma, sgt, vma->pages) {
dma_addr_t src;
if (!size) {
@@ -752,6 +761,8 @@ static int igt_vma_partial(void *arg)
i915_vma_unpin(vma);
nvma++;
+
+ cond_resched();
}
}
@@ -823,13 +834,10 @@ int i915_vma_mock_selftests(void)
}
mock_init_ggtt(i915, ggtt);
- mutex_lock(&i915->drm.struct_mutex);
err = i915_subtests(tests, ggtt);
- mock_device_flush(i915);
- mutex_unlock(&i915->drm.struct_mutex);
+ mock_device_flush(i915);
i915_gem_drain_freed_objects(i915);
-
mock_fini_ggtt(ggtt);
kfree(ggtt);
out_put:
@@ -871,8 +879,6 @@ static int igt_vma_remapped_gtt(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
- mutex_lock(&i915->drm.struct_mutex);
-
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
for (t = types; *t; t++) {
@@ -961,12 +967,13 @@ static int igt_vma_remapped_gtt(void *arg)
}
}
i915_vma_unpin_iomap(vma);
+
+ cond_resched();
}
}
out:
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
i915_gem_object_put(obj);
return err;
diff --git a/drivers/gpu/drm/i915/selftests/igt_atomic.c b/drivers/gpu/drm/i915/selftests/igt_atomic.c
new file mode 100644
index 000000000000..fb506b699095
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_atomic.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include <linux/preempt.h>
+#include <linux/bottom_half.h>
+#include <linux/irqflags.h>
+
+#include "igt_atomic.h"
+
+static void __preempt_begin(void)
+{
+ preempt_disable();
+}
+
+static void __preempt_end(void)
+{
+ preempt_enable();
+}
+
+static void __softirq_begin(void)
+{
+ local_bh_disable();
+}
+
+static void __softirq_end(void)
+{
+ local_bh_enable();
+}
+
+static void __hardirq_begin(void)
+{
+ local_irq_disable();
+}
+
+static void __hardirq_end(void)
+{
+ local_irq_enable();
+}
+
+const struct igt_atomic_section igt_atomic_phases[] = {
+ { "preempt", __preempt_begin, __preempt_end },
+ { "softirq", __softirq_begin, __softirq_end },
+ { "hardirq", __hardirq_begin, __hardirq_end },
+ { }
+};
diff --git a/drivers/gpu/drm/i915/selftests/igt_atomic.h b/drivers/gpu/drm/i915/selftests/igt_atomic.h
index 93ec89f487ec..1991798abf4b 100644
--- a/drivers/gpu/drm/i915/selftests/igt_atomic.h
+++ b/drivers/gpu/drm/i915/selftests/igt_atomic.h
@@ -6,51 +6,12 @@
#ifndef IGT_ATOMIC_H
#define IGT_ATOMIC_H
-#include <linux/preempt.h>
-#include <linux/bottom_half.h>
-#include <linux/irqflags.h>
-
-static void __preempt_begin(void)
-{
- preempt_disable();
-}
-
-static void __preempt_end(void)
-{
- preempt_enable();
-}
-
-static void __softirq_begin(void)
-{
- local_bh_disable();
-}
-
-static void __softirq_end(void)
-{
- local_bh_enable();
-}
-
-static void __hardirq_begin(void)
-{
- local_irq_disable();
-}
-
-static void __hardirq_end(void)
-{
- local_irq_enable();
-}
-
struct igt_atomic_section {
const char *name;
void (*critical_section_begin)(void);
void (*critical_section_end)(void);
};
-static const struct igt_atomic_section igt_atomic_phases[] = {
- { "preempt", __preempt_begin, __preempt_end },
- { "softirq", __softirq_begin, __softirq_end },
- { "hardirq", __hardirq_begin, __hardirq_end },
- { }
-};
+extern const struct igt_atomic_section igt_atomic_phases[];
#endif /* IGT_ATOMIC_H */
diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.c b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
index 5bfd1b2626a2..7b0939e3f007 100644
--- a/drivers/gpu/drm/i915/selftests/igt_flush_test.c
+++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
@@ -4,38 +4,32 @@
* Copyright © 2018 Intel Corporation
*/
-#include "gem/i915_gem_context.h"
+#include "gt/intel_gt.h"
+#include "gt/intel_gt_requests.h"
#include "i915_drv.h"
#include "i915_selftest.h"
#include "igt_flush_test.h"
-int igt_flush_test(struct drm_i915_private *i915, unsigned int flags)
+int igt_flush_test(struct drm_i915_private *i915)
{
- int ret = i915_terminally_wedged(i915) ? -EIO : 0;
- int repeat = !!(flags & I915_WAIT_LOCKED);
+ struct intel_gt *gt = &i915->gt;
+ int ret = intel_gt_is_wedged(gt) ? -EIO : 0;
cond_resched();
- do {
- if (i915_gem_wait_for_idle(i915, flags, HZ / 5) == -ETIME) {
- pr_err("%pS timed out, cancelling all further testing.\n",
- __builtin_return_address(0));
+ if (intel_gt_wait_for_idle(gt, HZ / 5) == -ETIME) {
+ pr_err("%pS timed out, cancelling all further testing.\n",
+ __builtin_return_address(0));
- GEM_TRACE("%pS timed out.\n",
- __builtin_return_address(0));
- GEM_TRACE_DUMP();
+ GEM_TRACE("%pS timed out.\n",
+ __builtin_return_address(0));
+ GEM_TRACE_DUMP();
- i915_gem_set_wedged(i915);
- repeat = 0;
- ret = -EIO;
- }
-
- /* Ensure we also flush after wedging. */
- if (flags & I915_WAIT_LOCKED)
- i915_retire_requests(i915);
- } while (repeat--);
+ intel_gt_set_wedged(gt);
+ ret = -EIO;
+ }
return ret;
}
diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.h b/drivers/gpu/drm/i915/selftests/igt_flush_test.h
index 63e009927c43..7541fa74e641 100644
--- a/drivers/gpu/drm/i915/selftests/igt_flush_test.h
+++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.h
@@ -9,6 +9,6 @@
struct drm_i915_private;
-int igt_flush_test(struct drm_i915_private *i915, unsigned int flags);
+int igt_flush_test(struct drm_i915_private *i915);
#endif /* IGT_FLUSH_TEST_H */
diff --git a/drivers/gpu/drm/i915/selftests/igt_live_test.c b/drivers/gpu/drm/i915/selftests/igt_live_test.c
index 3e902761cd16..c130010a7033 100644
--- a/drivers/gpu/drm/i915/selftests/igt_live_test.c
+++ b/drivers/gpu/drm/i915/selftests/igt_live_test.c
@@ -4,7 +4,8 @@
* Copyright © 2018 Intel Corporation
*/
-#include "../i915_drv.h"
+#include "i915_drv.h"
+#include "gt/intel_gt_requests.h"
#include "../i915_selftest.h"
#include "igt_flush_test.h"
@@ -15,20 +16,16 @@ int igt_live_test_begin(struct igt_live_test *t,
const char *func,
const char *name)
{
+ struct intel_gt *gt = &i915->gt;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err;
- lockdep_assert_held(&i915->drm.struct_mutex);
-
t->i915 = i915;
t->func = func;
t->name = name;
- err = i915_gem_wait_for_idle(i915,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
+ err = intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
if (err) {
pr_err("%s(%s): failed to idle before, with err=%d!",
func, name, err);
@@ -37,7 +34,7 @@ int igt_live_test_begin(struct igt_live_test *t,
t->reset_global = i915_reset_count(&i915->gpu_error);
- for_each_engine(engine, i915, id)
+ for_each_engine(engine, gt, id)
t->reset_engine[id] =
i915_reset_engine_count(&i915->gpu_error, engine);
@@ -50,9 +47,7 @@ int igt_live_test_end(struct igt_live_test *t)
struct intel_engine_cs *engine;
enum intel_engine_id id;
- lockdep_assert_held(&i915->drm.struct_mutex);
-
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(i915))
return -EIO;
if (t->reset_global != i915_reset_count(&i915->gpu_error)) {
@@ -62,7 +57,7 @@ int igt_live_test_end(struct igt_live_test *t)
return -EIO;
}
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, &i915->gt, id) {
if (t->reset_engine[id] ==
i915_reset_engine_count(&i915->gpu_error, engine))
continue;
diff --git a/drivers/gpu/drm/i915/selftests/igt_live_test.h b/drivers/gpu/drm/i915/selftests/igt_live_test.h
index c0e9f99d50de..36ed42736c52 100644
--- a/drivers/gpu/drm/i915/selftests/igt_live_test.h
+++ b/drivers/gpu/drm/i915/selftests/igt_live_test.h
@@ -7,7 +7,7 @@
#ifndef IGT_LIVE_TEST_H
#define IGT_LIVE_TEST_H
-#include "../i915_gem.h"
+#include "gt/intel_engine.h" /* for I915_NUM_ENGINES */
struct drm_i915_private;
diff --git a/drivers/gpu/drm/i915/selftests/igt_mmap.c b/drivers/gpu/drm/i915/selftests/igt_mmap.c
new file mode 100644
index 000000000000..583a4ff8b8c9
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_mmap.c
@@ -0,0 +1,39 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <drm/drm_file.h>
+
+#include "i915_drv.h"
+#include "igt_mmap.h"
+
+unsigned long igt_mmap_node(struct drm_i915_private *i915,
+ struct drm_vma_offset_node *node,
+ unsigned long addr,
+ unsigned long prot,
+ unsigned long flags)
+{
+ struct file *file;
+ int err;
+
+ /* Pretend to open("/dev/dri/card0") */
+ file = mock_drm_getfile(i915->drm.primary, O_RDWR);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ err = drm_vma_node_allow(node, file->private_data);
+ if (err) {
+ addr = err;
+ goto out_file;
+ }
+
+ addr = vm_mmap(file, addr, drm_vma_node_size(node) << PAGE_SHIFT,
+ prot, flags, drm_vma_node_offset_addr(node));
+
+ drm_vma_node_revoke(node, file->private_data);
+out_file:
+ fput(file);
+ return addr;
+}
diff --git a/drivers/gpu/drm/i915/selftests/igt_mmap.h b/drivers/gpu/drm/i915/selftests/igt_mmap.h
new file mode 100644
index 000000000000..6e716cb59d7e
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_mmap.h
@@ -0,0 +1,19 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef IGT_MMAP_H
+#define IGT_MMAP_H
+
+struct drm_i915_private;
+struct drm_vma_offset_node;
+
+unsigned long igt_mmap_node(struct drm_i915_private *i915,
+ struct drm_vma_offset_node *node,
+ unsigned long addr,
+ unsigned long prot,
+ unsigned long flags);
+
+#endif /* IGT_MMAP_H */
diff --git a/drivers/gpu/drm/i915/selftests/igt_reset.c b/drivers/gpu/drm/i915/selftests/igt_reset.c
index 587df6fd4ffe..9f8590b868a9 100644
--- a/drivers/gpu/drm/i915/selftests/igt_reset.c
+++ b/drivers/gpu/drm/i915/selftests/igt_reset.c
@@ -7,47 +7,45 @@
#include "igt_reset.h"
#include "gt/intel_engine.h"
+#include "gt/intel_gt.h"
#include "../i915_drv.h"
-void igt_global_reset_lock(struct drm_i915_private *i915)
+void igt_global_reset_lock(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- pr_debug("%s: current gpu_error=%08lx\n",
- __func__, i915->gpu_error.flags);
+ pr_debug("%s: current gpu_error=%08lx\n", __func__, gt->reset.flags);
- while (test_and_set_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags))
- wait_event(i915->gpu_error.reset_queue,
- !test_bit(I915_RESET_BACKOFF,
- &i915->gpu_error.flags));
+ while (test_and_set_bit(I915_RESET_BACKOFF, &gt->reset.flags))
+ wait_event(gt->reset.queue,
+ !test_bit(I915_RESET_BACKOFF, &gt->reset.flags));
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
while (test_and_set_bit(I915_RESET_ENGINE + id,
- &i915->gpu_error.flags))
- wait_on_bit(&i915->gpu_error.flags,
- I915_RESET_ENGINE + id,
+ &gt->reset.flags))
+ wait_on_bit(&gt->reset.flags, I915_RESET_ENGINE + id,
TASK_UNINTERRUPTIBLE);
}
}
-void igt_global_reset_unlock(struct drm_i915_private *i915)
+void igt_global_reset_unlock(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, i915, id)
- clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
+ for_each_engine(engine, gt, id)
+ clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
- clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
- wake_up_all(&i915->gpu_error.reset_queue);
+ clear_bit(I915_RESET_BACKOFF, &gt->reset.flags);
+ wake_up_all(&gt->reset.queue);
}
-bool igt_force_reset(struct drm_i915_private *i915)
+bool igt_force_reset(struct intel_gt *gt)
{
- i915_gem_set_wedged(i915);
- i915_reset(i915, 0, NULL);
+ intel_gt_set_wedged(gt);
+ intel_gt_reset(gt, 0, NULL);
- return !i915_reset_failed(i915);
+ return !intel_gt_is_wedged(gt);
}
diff --git a/drivers/gpu/drm/i915/selftests/igt_reset.h b/drivers/gpu/drm/i915/selftests/igt_reset.h
index 363bd853e50f..851873b67ab3 100644
--- a/drivers/gpu/drm/i915/selftests/igt_reset.h
+++ b/drivers/gpu/drm/i915/selftests/igt_reset.h
@@ -7,10 +7,12 @@
#ifndef __I915_SELFTESTS_IGT_RESET_H__
#define __I915_SELFTESTS_IGT_RESET_H__
-#include "../i915_drv.h"
+#include <linux/types.h>
-void igt_global_reset_lock(struct drm_i915_private *i915);
-void igt_global_reset_unlock(struct drm_i915_private *i915);
-bool igt_force_reset(struct drm_i915_private *i915);
+struct intel_gt;
+
+void igt_global_reset_lock(struct intel_gt *gt);
+void igt_global_reset_unlock(struct intel_gt *gt);
+bool igt_force_reset(struct intel_gt *gt);
#endif
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
index 1e59b543cf27..e8a58fe49c39 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.c
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -3,29 +3,28 @@
*
* Copyright © 2018 Intel Corporation
*/
+#include "gt/intel_gt.h"
#include "gem/selftests/igt_gem_utils.h"
#include "igt_spinner.h"
-int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915)
+int igt_spinner_init(struct igt_spinner *spin, struct intel_gt *gt)
{
unsigned int mode;
void *vaddr;
int err;
- GEM_BUG_ON(INTEL_GEN(i915) < 8);
-
memset(spin, 0, sizeof(*spin));
- spin->i915 = i915;
+ spin->gt = gt;
- spin->hws = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ spin->hws = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
if (IS_ERR(spin->hws)) {
err = PTR_ERR(spin->hws);
goto err;
}
- spin->obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ spin->obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
if (IS_ERR(spin->obj)) {
err = PTR_ERR(spin->obj);
goto err_hws;
@@ -39,7 +38,7 @@ int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915)
}
spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);
- mode = i915_coherent_map_type(i915);
+ mode = i915_coherent_map_type(gt->i915);
vaddr = i915_gem_object_pin_map(spin->obj, mode);
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
@@ -77,7 +76,10 @@ static int move_to_active(struct i915_vma *vma,
int err;
i915_vma_lock(vma);
- err = i915_vma_move_to_active(vma, rq, flags);
+ err = i915_request_await_object(rq, vma->obj,
+ flags & EXEC_OBJECT_WRITE);
+ if (err == 0)
+ err = i915_vma_move_to_active(vma, rq, flags);
i915_vma_unlock(vma);
return err;
@@ -85,20 +87,26 @@ static int move_to_active(struct i915_vma *vma,
struct i915_request *
igt_spinner_create_request(struct igt_spinner *spin,
- struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
+ struct intel_context *ce,
u32 arbitration_command)
{
+ struct intel_engine_cs *engine = ce->engine;
struct i915_request *rq = NULL;
struct i915_vma *hws, *vma;
+ unsigned int flags;
u32 *batch;
int err;
- vma = i915_vma_instance(spin->obj, ctx->vm, NULL);
+ GEM_BUG_ON(spin->gt != ce->vm->gt);
+
+ if (!intel_engine_can_store_dword(ce->engine))
+ return ERR_PTR(-ENODEV);
+
+ vma = i915_vma_instance(spin->obj, ce->vm, NULL);
if (IS_ERR(vma))
return ERR_CAST(vma);
- hws = i915_vma_instance(spin->hws, ctx->vm, NULL);
+ hws = i915_vma_instance(spin->hws, ce->vm, NULL);
if (IS_ERR(hws))
return ERR_CAST(hws);
@@ -110,7 +118,7 @@ igt_spinner_create_request(struct igt_spinner *spin,
if (err)
goto unpin_vma;
- rq = igt_request_alloc(ctx, engine);
+ rq = intel_context_create_request(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto unpin_hws;
@@ -126,28 +134,52 @@ igt_spinner_create_request(struct igt_spinner *spin,
batch = spin->batch;
- *batch++ = MI_STORE_DWORD_IMM_GEN4;
- *batch++ = lower_32_bits(hws_address(hws, rq));
- *batch++ = upper_32_bits(hws_address(hws, rq));
+ if (INTEL_GEN(rq->i915) >= 8) {
+ *batch++ = MI_STORE_DWORD_IMM_GEN4;
+ *batch++ = lower_32_bits(hws_address(hws, rq));
+ *batch++ = upper_32_bits(hws_address(hws, rq));
+ } else if (INTEL_GEN(rq->i915) >= 6) {
+ *batch++ = MI_STORE_DWORD_IMM_GEN4;
+ *batch++ = 0;
+ *batch++ = hws_address(hws, rq);
+ } else if (INTEL_GEN(rq->i915) >= 4) {
+ *batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *batch++ = 0;
+ *batch++ = hws_address(hws, rq);
+ } else {
+ *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
+ *batch++ = hws_address(hws, rq);
+ }
*batch++ = rq->fence.seqno;
*batch++ = arbitration_command;
- *batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
+ if (INTEL_GEN(rq->i915) >= 8)
+ *batch++ = MI_BATCH_BUFFER_START | BIT(8) | 1;
+ else if (IS_HASWELL(rq->i915))
+ *batch++ = MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW;
+ else if (INTEL_GEN(rq->i915) >= 6)
+ *batch++ = MI_BATCH_BUFFER_START;
+ else
+ *batch++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
*batch++ = lower_32_bits(vma->node.start);
*batch++ = upper_32_bits(vma->node.start);
+
*batch++ = MI_BATCH_BUFFER_END; /* not reached */
- i915_gem_chipset_flush(spin->i915);
+ intel_gt_chipset_flush(engine->gt);
if (engine->emit_init_breadcrumb &&
- rq->timeline->has_initial_breadcrumb) {
+ i915_request_timeline(rq)->has_initial_breadcrumb) {
err = engine->emit_init_breadcrumb(rq);
if (err)
goto cancel_rq;
}
- err = engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
+ flags = 0;
+ if (INTEL_GEN(rq->i915) <= 5)
+ flags |= I915_DISPATCH_SECURE;
+ err = engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
cancel_rq:
if (err) {
@@ -172,7 +204,7 @@ hws_seqno(const struct igt_spinner *spin, const struct i915_request *rq)
void igt_spinner_end(struct igt_spinner *spin)
{
*spin->batch = MI_BATCH_BUFFER_END;
- i915_gem_chipset_flush(spin->i915);
+ intel_gt_chipset_flush(spin->gt);
}
void igt_spinner_fini(struct igt_spinner *spin)
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.h b/drivers/gpu/drm/i915/selftests/igt_spinner.h
index 34a88ac9b47a..ec62c9ef320b 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.h
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.h
@@ -14,21 +14,22 @@
#include "i915_request.h"
#include "i915_selftest.h"
+struct intel_gt;
+
struct igt_spinner {
- struct drm_i915_private *i915;
+ struct intel_gt *gt;
struct drm_i915_gem_object *hws;
struct drm_i915_gem_object *obj;
u32 *batch;
void *seqno;
};
-int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915);
+int igt_spinner_init(struct igt_spinner *spin, struct intel_gt *gt);
void igt_spinner_fini(struct igt_spinner *spin);
struct i915_request *
igt_spinner_create_request(struct igt_spinner *spin,
- struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
+ struct intel_context *ce,
u32 arbitration_command);
void igt_spinner_end(struct igt_spinner *spin);
diff --git a/drivers/gpu/drm/i915/selftests/igt_wedge_me.h b/drivers/gpu/drm/i915/selftests/igt_wedge_me.h
deleted file mode 100644
index 08e5ff11bbd9..000000000000
--- a/drivers/gpu/drm/i915/selftests/igt_wedge_me.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
- */
-
-#ifndef IGT_WEDGE_ME_H
-#define IGT_WEDGE_ME_H
-
-#include <linux/workqueue.h>
-
-#include "../i915_gem.h"
-
-struct drm_i915_private;
-
-struct igt_wedge_me {
- struct delayed_work work;
- struct drm_i915_private *i915;
- const char *name;
-};
-
-static void __igt_wedge_me(struct work_struct *work)
-{
- struct igt_wedge_me *w = container_of(work, typeof(*w), work.work);
-
- pr_err("%s timed out, cancelling test.\n", w->name);
-
- GEM_TRACE("%s timed out.\n", w->name);
- GEM_TRACE_DUMP();
-
- i915_gem_set_wedged(w->i915);
-}
-
-static void __igt_init_wedge(struct igt_wedge_me *w,
- struct drm_i915_private *i915,
- long timeout,
- const char *name)
-{
- w->i915 = i915;
- w->name = name;
-
- INIT_DELAYED_WORK_ONSTACK(&w->work, __igt_wedge_me);
- schedule_delayed_work(&w->work, timeout);
-}
-
-static void __igt_fini_wedge(struct igt_wedge_me *w)
-{
- cancel_delayed_work_sync(&w->work);
- destroy_delayed_work_on_stack(&w->work);
- w->i915 = NULL;
-}
-
-#define igt_wedge_on_timeout(W, DEV, TIMEOUT) \
- for (__igt_init_wedge((W), (DEV), (TIMEOUT), __func__); \
- (W)->i915; \
- __igt_fini_wedge((W)))
-
-#endif /* IGT_WEDGE_ME_H */
diff --git a/drivers/gpu/drm/i915/selftests/intel_guc.c b/drivers/gpu/drm/i915/selftests/intel_guc.c
deleted file mode 100644
index 6ca8584cd64c..000000000000
--- a/drivers/gpu/drm/i915/selftests/intel_guc.c
+++ /dev/null
@@ -1,359 +0,0 @@
-/*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#include "i915_selftest.h"
-#include "gem/i915_gem_pm.h"
-
-/* max doorbell number + negative test for each client type */
-#define ATTEMPTS (GUC_NUM_DOORBELLS + GUC_CLIENT_PRIORITY_NUM)
-
-static struct intel_guc_client *clients[ATTEMPTS];
-
-static bool available_dbs(struct intel_guc *guc, u32 priority)
-{
- unsigned long offset;
- unsigned long end;
- u16 id;
-
- /* first half is used for normal priority, second half for high */
- offset = 0;
- end = GUC_NUM_DOORBELLS / 2;
- if (priority <= GUC_CLIENT_PRIORITY_HIGH) {
- offset = end;
- end += offset;
- }
-
- id = find_next_zero_bit(guc->doorbell_bitmap, end, offset);
- if (id < end)
- return true;
-
- return false;
-}
-
-static int check_all_doorbells(struct intel_guc *guc)
-{
- u16 db_id;
-
- pr_info_once("Max number of doorbells: %d", GUC_NUM_DOORBELLS);
- for (db_id = 0; db_id < GUC_NUM_DOORBELLS; ++db_id) {
- if (!doorbell_ok(guc, db_id)) {
- pr_err("doorbell %d, not ok\n", db_id);
- return -EIO;
- }
- }
-
- return 0;
-}
-
-static int ring_doorbell_nop(struct intel_guc_client *client)
-{
- struct guc_process_desc *desc = __get_process_desc(client);
- int err;
-
- client->use_nop_wqi = true;
-
- spin_lock_irq(&client->wq_lock);
-
- guc_wq_item_append(client, 0, 0, 0, 0);
- guc_ring_doorbell(client);
-
- spin_unlock_irq(&client->wq_lock);
-
- client->use_nop_wqi = false;
-
- /* if there are no issues GuC will update the WQ head and keep the
- * WQ in active status
- */
- err = wait_for(READ_ONCE(desc->head) == READ_ONCE(desc->tail), 10);
- if (err) {
- pr_err("doorbell %u ring failed!\n", client->doorbell_id);
- return -EIO;
- }
-
- if (desc->wq_status != WQ_STATUS_ACTIVE) {
- pr_err("doorbell %u ring put WQ in bad state (%u)!\n",
- client->doorbell_id, desc->wq_status);
- return -EIO;
- }
-
- return 0;
-}
-
-/*
- * Basic client sanity check, handy to validate create_clients.
- */
-static int validate_client(struct intel_guc_client *client,
- int client_priority,
- bool is_preempt_client)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(client->guc);
- struct i915_gem_context *ctx_owner = is_preempt_client ?
- dev_priv->preempt_context : dev_priv->kernel_context;
-
- if (client->owner != ctx_owner ||
- client->engines != INTEL_INFO(dev_priv)->engine_mask ||
- client->priority != client_priority ||
- client->doorbell_id == GUC_DOORBELL_INVALID)
- return -EINVAL;
- else
- return 0;
-}
-
-static bool client_doorbell_in_sync(struct intel_guc_client *client)
-{
- return !client || doorbell_ok(client->guc, client->doorbell_id);
-}
-
-/*
- * Check that we're able to synchronize guc_clients with their doorbells
- *
- * We're creating clients and reserving doorbells once, at module load. During
- * module lifetime, GuC, doorbell HW, and i915 state may go out of sync due to
- * GuC being reset. In other words - GuC clients are still around, but the
- * status of their doorbells may be incorrect. This is the reason behind
- * validating that the doorbells status expected by the driver matches what the
- * GuC/HW have.
- */
-static int igt_guc_clients(void *args)
-{
- struct drm_i915_private *dev_priv = args;
- intel_wakeref_t wakeref;
- struct intel_guc *guc;
- int err = 0;
-
- GEM_BUG_ON(!HAS_GUC(dev_priv));
- mutex_lock(&dev_priv->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-
- guc = &dev_priv->guc;
- if (!guc) {
- pr_err("No guc object!\n");
- err = -EINVAL;
- goto unlock;
- }
-
- err = check_all_doorbells(guc);
- if (err)
- goto unlock;
-
- /*
- * Get rid of clients created during driver load because the test will
- * recreate them.
- */
- guc_clients_disable(guc);
- guc_clients_destroy(guc);
- if (guc->execbuf_client || guc->preempt_client) {
- pr_err("guc_clients_destroy lied!\n");
- err = -EINVAL;
- goto unlock;
- }
-
- err = guc_clients_create(guc);
- if (err) {
- pr_err("Failed to create clients\n");
- goto unlock;
- }
- GEM_BUG_ON(!guc->execbuf_client);
-
- err = validate_client(guc->execbuf_client,
- GUC_CLIENT_PRIORITY_KMD_NORMAL, false);
- if (err) {
- pr_err("execbug client validation failed\n");
- goto out;
- }
-
- if (guc->preempt_client) {
- err = validate_client(guc->preempt_client,
- GUC_CLIENT_PRIORITY_KMD_HIGH, true);
- if (err) {
- pr_err("preempt client validation failed\n");
- goto out;
- }
- }
-
- /* each client should now have reserved a doorbell */
- if (!has_doorbell(guc->execbuf_client) ||
- (guc->preempt_client && !has_doorbell(guc->preempt_client))) {
- pr_err("guc_clients_create didn't reserve doorbells\n");
- err = -EINVAL;
- goto out;
- }
-
- /* Now enable the clients */
- guc_clients_enable(guc);
-
- /* each client should now have received a doorbell */
- if (!client_doorbell_in_sync(guc->execbuf_client) ||
- !client_doorbell_in_sync(guc->preempt_client)) {
- pr_err("failed to initialize the doorbells\n");
- err = -EINVAL;
- goto out;
- }
-
- /*
- * Basic test - an attempt to reallocate a valid doorbell to the
- * client it is currently assigned should not cause a failure.
- */
- err = create_doorbell(guc->execbuf_client);
-
-out:
- /*
- * Leave clean state for other test, plus the driver always destroy the
- * clients during unload.
- */
- guc_clients_disable(guc);
- guc_clients_destroy(guc);
- guc_clients_create(guc);
- guc_clients_enable(guc);
-unlock:
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
- mutex_unlock(&dev_priv->drm.struct_mutex);
- return err;
-}
-
-/*
- * Create as many clients as number of doorbells. Note that there's already
- * client(s)/doorbell(s) created during driver load, but this test creates
- * its own and do not interact with the existing ones.
- */
-static int igt_guc_doorbells(void *arg)
-{
- struct drm_i915_private *dev_priv = arg;
- intel_wakeref_t wakeref;
- struct intel_guc *guc;
- int i, err = 0;
- u16 db_id;
-
- GEM_BUG_ON(!HAS_GUC(dev_priv));
- mutex_lock(&dev_priv->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-
- guc = &dev_priv->guc;
- if (!guc) {
- pr_err("No guc object!\n");
- err = -EINVAL;
- goto unlock;
- }
-
- err = check_all_doorbells(guc);
- if (err)
- goto unlock;
-
- for (i = 0; i < ATTEMPTS; i++) {
- clients[i] = guc_client_alloc(dev_priv,
- INTEL_INFO(dev_priv)->engine_mask,
- i % GUC_CLIENT_PRIORITY_NUM,
- dev_priv->kernel_context);
-
- if (!clients[i]) {
- pr_err("[%d] No guc client\n", i);
- err = -EINVAL;
- goto out;
- }
-
- if (IS_ERR(clients[i])) {
- if (PTR_ERR(clients[i]) != -ENOSPC) {
- pr_err("[%d] unexpected error\n", i);
- err = PTR_ERR(clients[i]);
- goto out;
- }
-
- if (available_dbs(guc, i % GUC_CLIENT_PRIORITY_NUM)) {
- pr_err("[%d] non-db related alloc fail\n", i);
- err = -EINVAL;
- goto out;
- }
-
- /* expected, ran out of dbs for this client type */
- continue;
- }
-
- /*
- * The check below is only valid because we keep a doorbell
- * assigned during the whole life of the client.
- */
- if (clients[i]->stage_id >= GUC_NUM_DOORBELLS) {
- pr_err("[%d] more clients than doorbells (%d >= %d)\n",
- i, clients[i]->stage_id, GUC_NUM_DOORBELLS);
- err = -EINVAL;
- goto out;
- }
-
- err = validate_client(clients[i],
- i % GUC_CLIENT_PRIORITY_NUM, false);
- if (err) {
- pr_err("[%d] client_alloc sanity check failed!\n", i);
- err = -EINVAL;
- goto out;
- }
-
- db_id = clients[i]->doorbell_id;
-
- err = __guc_client_enable(clients[i]);
- if (err) {
- pr_err("[%d] Failed to create a doorbell\n", i);
- goto out;
- }
-
- /* doorbell id shouldn't change, we are holding the mutex */
- if (db_id != clients[i]->doorbell_id) {
- pr_err("[%d] doorbell id changed (%d != %d)\n",
- i, db_id, clients[i]->doorbell_id);
- err = -EINVAL;
- goto out;
- }
-
- err = check_all_doorbells(guc);
- if (err)
- goto out;
-
- err = ring_doorbell_nop(clients[i]);
- if (err)
- goto out;
- }
-
-out:
- for (i = 0; i < ATTEMPTS; i++)
- if (!IS_ERR_OR_NULL(clients[i])) {
- __guc_client_disable(clients[i]);
- guc_client_free(clients[i]);
- }
-unlock:
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
- mutex_unlock(&dev_priv->drm.struct_mutex);
- return err;
-}
-
-int intel_guc_live_selftest(struct drm_i915_private *dev_priv)
-{
- static const struct i915_subtest tests[] = {
- SUBTEST(igt_guc_clients),
- SUBTEST(igt_guc_doorbells),
- };
-
- if (!USES_GUC_SUBMISSION(dev_priv))
- return 0;
-
- return i915_subtests(tests, dev_priv);
-}
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
new file mode 100644
index 000000000000..3ef3620e0da5
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -0,0 +1,621 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/prime_numbers.h>
+
+#include "../i915_selftest.h"
+
+#include "mock_drm.h"
+#include "mock_gem_device.h"
+#include "mock_region.h"
+
+#include "gem/i915_gem_context.h"
+#include "gem/i915_gem_lmem.h"
+#include "gem/i915_gem_region.h"
+#include "gem/i915_gem_object_blt.h"
+#include "gem/selftests/igt_gem_utils.h"
+#include "gem/selftests/mock_context.h"
+#include "gt/intel_engine_user.h"
+#include "gt/intel_gt.h"
+#include "selftests/igt_flush_test.h"
+#include "selftests/i915_random.h"
+
+static void close_objects(struct intel_memory_region *mem,
+ struct list_head *objects)
+{
+ struct drm_i915_private *i915 = mem->i915;
+ struct drm_i915_gem_object *obj, *on;
+
+ list_for_each_entry_safe(obj, on, objects, st_link) {
+ if (i915_gem_object_has_pinned_pages(obj))
+ i915_gem_object_unpin_pages(obj);
+ /* No polluting the memory region between tests */
+ __i915_gem_object_put_pages(obj);
+ list_del(&obj->st_link);
+ i915_gem_object_put(obj);
+ }
+
+ cond_resched();
+
+ i915_gem_drain_freed_objects(i915);
+}
+
+static int igt_mock_fill(void *arg)
+{
+ struct intel_memory_region *mem = arg;
+ resource_size_t total = resource_size(&mem->region);
+ resource_size_t page_size;
+ resource_size_t rem;
+ unsigned long max_pages;
+ unsigned long page_num;
+ LIST_HEAD(objects);
+ int err = 0;
+
+ page_size = mem->mm.chunk_size;
+ max_pages = div64_u64(total, page_size);
+ rem = total;
+
+ for_each_prime_number_from(page_num, 1, max_pages) {
+ resource_size_t size = page_num * page_size;
+ struct drm_i915_gem_object *obj;
+
+ obj = i915_gem_object_create_region(mem, size, 0);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ break;
+ }
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err) {
+ i915_gem_object_put(obj);
+ break;
+ }
+
+ list_add(&obj->st_link, &objects);
+ rem -= size;
+ }
+
+ if (err == -ENOMEM)
+ err = 0;
+ if (err == -ENXIO) {
+ if (page_num * page_size <= rem) {
+ pr_err("%s failed, space still left in region\n",
+ __func__);
+ err = -EINVAL;
+ } else {
+ err = 0;
+ }
+ }
+
+ close_objects(mem, &objects);
+
+ return err;
+}
+
+static struct drm_i915_gem_object *
+igt_object_create(struct intel_memory_region *mem,
+ struct list_head *objects,
+ u64 size,
+ unsigned int flags)
+{
+ struct drm_i915_gem_object *obj;
+ int err;
+
+ obj = i915_gem_object_create_region(mem, size, flags);
+ if (IS_ERR(obj))
+ return obj;
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ goto put;
+
+ list_add(&obj->st_link, objects);
+ return obj;
+
+put:
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+}
+
+static void igt_object_release(struct drm_i915_gem_object *obj)
+{
+ i915_gem_object_unpin_pages(obj);
+ __i915_gem_object_put_pages(obj);
+ list_del(&obj->st_link);
+ i915_gem_object_put(obj);
+}
+
+static int igt_mock_contiguous(void *arg)
+{
+ struct intel_memory_region *mem = arg;
+ struct drm_i915_gem_object *obj;
+ unsigned long n_objects;
+ LIST_HEAD(objects);
+ LIST_HEAD(holes);
+ I915_RND_STATE(prng);
+ resource_size_t total;
+ resource_size_t min;
+ u64 target;
+ int err = 0;
+
+ total = resource_size(&mem->region);
+
+ /* Min size */
+ obj = igt_object_create(mem, &objects, mem->mm.chunk_size,
+ I915_BO_ALLOC_CONTIGUOUS);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ if (obj->mm.pages->nents != 1) {
+ pr_err("%s min object spans multiple sg entries\n", __func__);
+ err = -EINVAL;
+ goto err_close_objects;
+ }
+
+ igt_object_release(obj);
+
+ /* Max size */
+ obj = igt_object_create(mem, &objects, total, I915_BO_ALLOC_CONTIGUOUS);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ if (obj->mm.pages->nents != 1) {
+ pr_err("%s max object spans multiple sg entries\n", __func__);
+ err = -EINVAL;
+ goto err_close_objects;
+ }
+
+ igt_object_release(obj);
+
+ /* Internal fragmentation should not bleed into the object size */
+ target = i915_prandom_u64_state(&prng);
+ div64_u64_rem(target, total, &target);
+ target = round_up(target, PAGE_SIZE);
+ target = max_t(u64, PAGE_SIZE, target);
+
+ obj = igt_object_create(mem, &objects, target,
+ I915_BO_ALLOC_CONTIGUOUS);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ if (obj->base.size != target) {
+ pr_err("%s obj->base.size(%zx) != target(%llx)\n", __func__,
+ obj->base.size, target);
+ err = -EINVAL;
+ goto err_close_objects;
+ }
+
+ if (obj->mm.pages->nents != 1) {
+ pr_err("%s object spans multiple sg entries\n", __func__);
+ err = -EINVAL;
+ goto err_close_objects;
+ }
+
+ igt_object_release(obj);
+
+ /*
+ * Try to fragment the address space, such that half of it is free, but
+ * the max contiguous block size is SZ_64K.
+ */
+
+ target = SZ_64K;
+ n_objects = div64_u64(total, target);
+
+ while (n_objects--) {
+ struct list_head *list;
+
+ if (n_objects % 2)
+ list = &holes;
+ else
+ list = &objects;
+
+ obj = igt_object_create(mem, list, target,
+ I915_BO_ALLOC_CONTIGUOUS);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto err_close_objects;
+ }
+ }
+
+ close_objects(mem, &holes);
+
+ min = target;
+ target = total >> 1;
+
+ /* Make sure we can still allocate all the fragmented space */
+ obj = igt_object_create(mem, &objects, target, 0);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto err_close_objects;
+ }
+
+ igt_object_release(obj);
+
+ /*
+ * Even though we have enough free space, we don't have a big enough
+ * contiguous block. Make sure that holds true.
+ */
+
+ do {
+ bool should_fail = target > min;
+
+ obj = igt_object_create(mem, &objects, target,
+ I915_BO_ALLOC_CONTIGUOUS);
+ if (should_fail != IS_ERR(obj)) {
+ pr_err("%s target allocation(%llx) mismatch\n",
+ __func__, target);
+ err = -EINVAL;
+ goto err_close_objects;
+ }
+
+ target >>= 1;
+ } while (target >= mem->mm.chunk_size);
+
+err_close_objects:
+ list_splice_tail(&holes, &objects);
+ close_objects(mem, &objects);
+ return err;
+}
+
+static int igt_gpu_write_dw(struct intel_context *ce,
+ struct i915_vma *vma,
+ u32 dword,
+ u32 value)
+{
+ return igt_gpu_fill_dw(ce, vma, dword * sizeof(u32),
+ vma->size >> PAGE_SHIFT, value);
+}
+
+static int igt_cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
+{
+ unsigned long n = obj->base.size >> PAGE_SHIFT;
+ u32 *ptr;
+ int err;
+
+ err = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT);
+ if (err)
+ return err;
+
+ ptr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(ptr))
+ return PTR_ERR(ptr);
+
+ ptr += dword;
+ while (n--) {
+ if (*ptr != val) {
+ pr_err("base[%u]=%08x, val=%08x\n",
+ dword, *ptr, val);
+ err = -EINVAL;
+ break;
+ }
+
+ ptr += PAGE_SIZE / sizeof(*ptr);
+ }
+
+ i915_gem_object_unpin_map(obj);
+ return err;
+}
+
+static int igt_gpu_write(struct i915_gem_context *ctx,
+ struct drm_i915_gem_object *obj)
+{
+ struct i915_gem_engines *engines;
+ struct i915_gem_engines_iter it;
+ struct i915_address_space *vm;
+ struct intel_context *ce;
+ I915_RND_STATE(prng);
+ IGT_TIMEOUT(end_time);
+ unsigned int count;
+ struct i915_vma *vma;
+ int *order;
+ int i, n;
+ int err = 0;
+
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+
+ n = 0;
+ count = 0;
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+ count++;
+ if (!intel_engine_can_store_dword(ce->engine))
+ continue;
+
+ vm = ce->vm;
+ n++;
+ }
+ i915_gem_context_unlock_engines(ctx);
+ if (!n)
+ return 0;
+
+ order = i915_random_order(count * count, &prng);
+ if (!order)
+ return -ENOMEM;
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_free;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto out_free;
+
+ i = 0;
+ engines = i915_gem_context_lock_engines(ctx);
+ do {
+ u32 rng = prandom_u32_state(&prng);
+ u32 dword = offset_in_page(rng) / 4;
+
+ ce = engines->engines[order[i] % engines->num_engines];
+ i = (i + 1) % (count * count);
+ if (!ce || !intel_engine_can_store_dword(ce->engine))
+ continue;
+
+ err = igt_gpu_write_dw(ce, vma, dword, rng);
+ if (err)
+ break;
+
+ err = igt_cpu_check(obj, dword, rng);
+ if (err)
+ break;
+ } while (!__igt_timeout(end_time, NULL));
+ i915_gem_context_unlock_engines(ctx);
+
+out_free:
+ kfree(order);
+
+ if (err == -ENOMEM)
+ err = 0;
+
+ return err;
+}
+
+static int igt_lmem_create(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj;
+ int err = 0;
+
+ obj = i915_gem_object_create_lmem(i915, PAGE_SIZE, 0);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ goto out_put;
+
+ i915_gem_object_unpin_pages(obj);
+out_put:
+ i915_gem_object_put(obj);
+
+ return err;
+}
+
+static int igt_lmem_write_gpu(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj;
+ struct i915_gem_context *ctx;
+ struct file *file;
+ I915_RND_STATE(prng);
+ u32 sz;
+ int err;
+
+ file = mock_file(i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ ctx = live_context(i915, file);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out_file;
+ }
+
+ sz = round_up(prandom_u32_state(&prng) % SZ_32M, PAGE_SIZE);
+
+ obj = i915_gem_object_create_lmem(i915, sz, 0);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out_file;
+ }
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ goto out_put;
+
+ err = igt_gpu_write(ctx, obj);
+ if (err)
+ pr_err("igt_gpu_write failed(%d)\n", err);
+
+ i915_gem_object_unpin_pages(obj);
+out_put:
+ i915_gem_object_put(obj);
+out_file:
+ fput(file);
+ return err;
+}
+
+static struct intel_engine_cs *
+random_engine_class(struct drm_i915_private *i915,
+ unsigned int class,
+ struct rnd_state *prng)
+{
+ struct intel_engine_cs *engine;
+ unsigned int count;
+
+ count = 0;
+ for (engine = intel_engine_lookup_user(i915, class, 0);
+ engine && engine->uabi_class == class;
+ engine = rb_entry_safe(rb_next(&engine->uabi_node),
+ typeof(*engine), uabi_node))
+ count++;
+
+ count = i915_prandom_u32_max_state(count, prng);
+ return intel_engine_lookup_user(i915, class, count);
+}
+
+static int igt_lmem_write_cpu(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj;
+ I915_RND_STATE(prng);
+ IGT_TIMEOUT(end_time);
+ u32 bytes[] = {
+ 0, /* rng placeholder */
+ sizeof(u32),
+ sizeof(u64),
+ 64, /* cl */
+ PAGE_SIZE,
+ PAGE_SIZE - sizeof(u32),
+ PAGE_SIZE - sizeof(u64),
+ PAGE_SIZE - 64,
+ };
+ struct intel_engine_cs *engine;
+ u32 *vaddr;
+ u32 sz;
+ u32 i;
+ int *order;
+ int count;
+ int err;
+
+ engine = random_engine_class(i915, I915_ENGINE_CLASS_COPY, &prng);
+ if (!engine)
+ return 0;
+
+ pr_info("%s: using %s\n", __func__, engine->name);
+
+ sz = round_up(prandom_u32_state(&prng) % SZ_32M, PAGE_SIZE);
+ sz = max_t(u32, 2 * PAGE_SIZE, sz);
+
+ obj = i915_gem_object_create_lmem(i915, sz, I915_BO_ALLOC_CONTIGUOUS);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto out_put;
+ }
+
+ /* Put the pages into a known state -- from the gpu for added fun */
+ intel_engine_pm_get(engine);
+ err = i915_gem_object_fill_blt(obj, engine->kernel_context, 0xdeadbeaf);
+ intel_engine_pm_put(engine);
+ if (err)
+ goto out_unpin;
+
+ i915_gem_object_lock(obj);
+ err = i915_gem_object_set_to_wc_domain(obj, true);
+ i915_gem_object_unlock(obj);
+ if (err)
+ goto out_unpin;
+
+ count = ARRAY_SIZE(bytes);
+ order = i915_random_order(count * count, &prng);
+ if (!order) {
+ err = -ENOMEM;
+ goto out_unpin;
+ }
+
+ /* We want to throw in a random width/align */
+ bytes[0] = igt_random_offset(&prng, 0, PAGE_SIZE, sizeof(u32),
+ sizeof(u32));
+
+ i = 0;
+ do {
+ u32 offset;
+ u32 align;
+ u32 dword;
+ u32 size;
+ u32 val;
+
+ size = bytes[order[i] % count];
+ i = (i + 1) % (count * count);
+
+ align = bytes[order[i] % count];
+ i = (i + 1) % (count * count);
+
+ align = max_t(u32, sizeof(u32), rounddown_pow_of_two(align));
+
+ offset = igt_random_offset(&prng, 0, obj->base.size,
+ size, align);
+
+ val = prandom_u32_state(&prng);
+ memset32(vaddr + offset / sizeof(u32), val ^ 0xdeadbeaf,
+ size / sizeof(u32));
+
+ /*
+ * Sample random dw -- don't waste precious time reading every
+ * single dw.
+ */
+ dword = igt_random_offset(&prng, offset,
+ offset + size,
+ sizeof(u32), sizeof(u32));
+ dword /= sizeof(u32);
+ if (vaddr[dword] != (val ^ 0xdeadbeaf)) {
+ pr_err("%s vaddr[%u]=%u, val=%u, size=%u, align=%u, offset=%u\n",
+ __func__, dword, vaddr[dword], val ^ 0xdeadbeaf,
+ size, align, offset);
+ err = -EINVAL;
+ break;
+ }
+ } while (!__igt_timeout(end_time, NULL));
+
+out_unpin:
+ i915_gem_object_unpin_map(obj);
+out_put:
+ i915_gem_object_put(obj);
+
+ return err;
+}
+
+int intel_memory_region_mock_selftests(void)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_mock_fill),
+ SUBTEST(igt_mock_contiguous),
+ };
+ struct intel_memory_region *mem;
+ struct drm_i915_private *i915;
+ int err;
+
+ i915 = mock_gem_device();
+ if (!i915)
+ return -ENOMEM;
+
+ mem = mock_region_create(i915, 0, SZ_2G, I915_GTT_PAGE_SIZE_4K, 0);
+ if (IS_ERR(mem)) {
+ pr_err("failed to create memory region\n");
+ err = PTR_ERR(mem);
+ goto out_unref;
+ }
+
+ err = i915_subtests(tests, mem);
+
+ intel_memory_region_put(mem);
+out_unref:
+ drm_dev_put(&i915->drm);
+ return err;
+}
+
+int intel_memory_region_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_lmem_create),
+ SUBTEST(igt_lmem_write_cpu),
+ SUBTEST(igt_lmem_write_gpu),
+ };
+
+ if (!HAS_LMEM(i915)) {
+ pr_info("device lacks LMEM support, skipping\n");
+ return 0;
+ }
+
+ if (intel_gt_is_wedged(&i915->gt))
+ return 0;
+
+ return i915_live_subtests(tests, i915);
+}
diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c
index 86815c6072a1..0e4e6be0101d 100644
--- a/drivers/gpu/drm/i915/selftests/intel_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c
@@ -67,6 +67,7 @@ static int intel_shadow_table_check(void)
} reg_lists[] = {
{ gen8_shadowed_regs, ARRAY_SIZE(gen8_shadowed_regs) },
{ gen11_shadowed_regs, ARRAY_SIZE(gen11_shadowed_regs) },
+ { gen12_shadowed_regs, ARRAY_SIZE(gen12_shadowed_regs) },
};
const i915_reg_t *reg;
unsigned int i, j;
@@ -101,6 +102,7 @@ int intel_uncore_mock_selftests(void)
{ __chv_fw_ranges, ARRAY_SIZE(__chv_fw_ranges), false },
{ __gen9_fw_ranges, ARRAY_SIZE(__gen9_fw_ranges), true },
{ __gen11_fw_ranges, ARRAY_SIZE(__gen11_fw_ranges), true },
+ { __gen12_fw_ranges, ARRAY_SIZE(__gen12_fw_ranges), true },
};
int err, i;
@@ -138,19 +140,19 @@ static int live_forcewake_ops(void *arg)
}
};
const struct reg *r;
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct intel_uncore_forcewake_domain *domain;
- struct intel_uncore *uncore = &i915->uncore;
+ struct intel_uncore *uncore = gt->uncore;
struct intel_engine_cs *engine;
enum intel_engine_id id;
intel_wakeref_t wakeref;
unsigned int tmp;
int err = 0;
- GEM_BUG_ON(i915->gt.awake);
+ GEM_BUG_ON(gt->awake);
/* vlv/chv with their pcu behave differently wrt reads */
- if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+ if (IS_VALLEYVIEW(gt->i915) || IS_CHERRYVIEW(gt->i915)) {
pr_debug("PCU fakes forcewake badly; skipping\n");
return 0;
}
@@ -168,15 +170,15 @@ static int live_forcewake_ops(void *arg)
/* We have to pick carefully to get the exact behaviour we need */
for (r = registers; r->name; r++)
- if (r->platforms & INTEL_INFO(i915)->gen_mask)
+ if (r->platforms & INTEL_INFO(gt->i915)->gen_mask)
break;
if (!r->name) {
pr_debug("Forcewaked register not known for %s; skipping\n",
- intel_platform_name(INTEL_INFO(i915)->platform));
+ intel_platform_name(INTEL_INFO(gt->i915)->platform));
return 0;
}
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ wakeref = intel_runtime_pm_get(uncore->rpm);
for_each_fw_domain(domain, uncore, tmp) {
smp_store_mb(domain->active, false);
@@ -186,7 +188,7 @@ static int live_forcewake_ops(void *arg)
intel_uncore_fw_release_timer(&domain->timer);
}
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
i915_reg_t mmio = _MMIO(engine->mmio_base + r->offset);
u32 __iomem *reg = uncore->regs + engine->mmio_base + r->offset;
enum forcewake_domains fw_domains;
@@ -247,22 +249,22 @@ static int live_forcewake_ops(void *arg)
}
out_rpm:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ intel_runtime_pm_put(uncore->rpm, wakeref);
return err;
}
static int live_forcewake_domains(void *arg)
{
#define FW_RANGE 0x40000
- struct drm_i915_private *dev_priv = arg;
- struct intel_uncore *uncore = &dev_priv->uncore;
+ struct intel_gt *gt = arg;
+ struct intel_uncore *uncore = gt->uncore;
unsigned long *valid;
u32 offset;
int err;
- if (!HAS_FPGA_DBG_UNCLAIMED(dev_priv) &&
- !IS_VALLEYVIEW(dev_priv) &&
- !IS_CHERRYVIEW(dev_priv))
+ if (!HAS_FPGA_DBG_UNCLAIMED(gt->i915) &&
+ !IS_VALLEYVIEW(gt->i915) &&
+ !IS_CHERRYVIEW(gt->i915))
return 0;
/*
@@ -281,7 +283,7 @@ static int live_forcewake_domains(void *arg)
for (offset = 0; offset < FW_RANGE; offset += 4) {
i915_reg_t reg = { offset };
- (void)I915_READ_FW(reg);
+ intel_uncore_posting_read_fw(uncore, reg);
if (!check_for_unclaimed_mmio(uncore))
set_bit(offset, valid);
}
@@ -298,7 +300,7 @@ static int live_forcewake_domains(void *arg)
check_for_unclaimed_mmio(uncore);
- (void)I915_READ(reg);
+ intel_uncore_posting_read_fw(uncore, reg);
if (check_for_unclaimed_mmio(uncore)) {
pr_err("Unclaimed mmio read to register 0x%04x\n",
offset);
@@ -310,21 +312,23 @@ static int live_forcewake_domains(void *arg)
return err;
}
+static int live_fw_table(void *arg)
+{
+ struct intel_gt *gt = arg;
+
+ /* Confirm the table we load is still valid */
+ return intel_fw_table_check(gt->uncore->fw_domains_table,
+ gt->uncore->fw_domains_table_entries,
+ INTEL_GEN(gt->i915) >= 9);
+}
+
int intel_uncore_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
+ SUBTEST(live_fw_table),
SUBTEST(live_forcewake_ops),
SUBTEST(live_forcewake_domains),
};
- int err;
-
- /* Confirm the table we load is still valid */
- err = intel_fw_table_check(i915->uncore.fw_domains_table,
- i915->uncore.fw_domains_table_entries,
- INTEL_GEN(i915) >= 9);
- if (err)
- return err;
-
- return i915_subtests(tests, i915);
+ return intel_gt_live_subtests(tests, &i915->gt);
}
diff --git a/drivers/gpu/drm/i915/selftests/lib_sw_fence.c b/drivers/gpu/drm/i915/selftests/lib_sw_fence.c
index b976c12817c5..080b90b63d16 100644
--- a/drivers/gpu/drm/i915/selftests/lib_sw_fence.c
+++ b/drivers/gpu/drm/i915/selftests/lib_sw_fence.c
@@ -40,6 +40,7 @@ void __onstack_fence_init(struct i915_sw_fence *fence,
__init_waitqueue_head(&fence->wait, name, key);
atomic_set(&fence->pending, 1);
+ fence->error = 0;
fence->flags = (unsigned long)nop_fence_notify;
}
diff --git a/drivers/gpu/drm/i915/selftests/mock_drm.c b/drivers/gpu/drm/i915/selftests/mock_drm.c
deleted file mode 100644
index 09c704153456..000000000000
--- a/drivers/gpu/drm/i915/selftests/mock_drm.c
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#include "mock_drm.h"
-
-struct drm_file *mock_file(struct drm_i915_private *i915)
-{
- struct file *filp;
- struct inode *inode;
- struct drm_file *file;
- int err;
-
- inode = kzalloc(sizeof(*inode), GFP_KERNEL);
- if (!inode) {
- err = -ENOMEM;
- goto err;
- }
-
- inode->i_rdev = i915->drm.primary->index;
-
- filp = kzalloc(sizeof(*filp), GFP_KERNEL);
- if (!filp) {
- err = -ENOMEM;
- goto err_inode;
- }
-
- err = drm_open(inode, filp);
- if (err)
- goto err_filp;
-
- file = filp->private_data;
- memset(&file->filp, POISON_INUSE, sizeof(file->filp));
- file->authenticated = true;
-
- kfree(filp);
- kfree(inode);
- return file;
-
-err_filp:
- kfree(filp);
-err_inode:
- kfree(inode);
-err:
- return ERR_PTR(err);
-}
-
-void mock_file_free(struct drm_i915_private *i915, struct drm_file *file)
-{
- struct file filp = { .private_data = file };
-
- drm_release(NULL, &filp);
-}
diff --git a/drivers/gpu/drm/i915/selftests/mock_drm.h b/drivers/gpu/drm/i915/selftests/mock_drm.h
index b39beee9f8f6..9916b6f95526 100644
--- a/drivers/gpu/drm/i915/selftests/mock_drm.h
+++ b/drivers/gpu/drm/i915/selftests/mock_drm.h
@@ -25,7 +25,21 @@
#ifndef __MOCK_DRM_H
#define __MOCK_DRM_H
-struct drm_file *mock_file(struct drm_i915_private *i915);
-void mock_file_free(struct drm_i915_private *i915, struct drm_file *file);
+#include <drm/drm_file.h>
+
+#include "i915_drv.h"
+
+struct drm_file;
+struct file;
+
+static inline struct file *mock_file(struct drm_i915_private *i915)
+{
+ return mock_drm_getfile(i915->drm.primary, O_RDWR);
+}
+
+static inline struct drm_file *to_drm_file(struct file *f)
+{
+ return f->private_data;
+}
#endif /* !__MOCK_DRM_H */
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 64bc51400ae7..3b8986983afc 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -25,60 +25,49 @@
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
+#include "gt/intel_gt.h"
+#include "gt/intel_gt_requests.h"
#include "gt/mock_engine.h"
+#include "intel_memory_region.h"
#include "mock_request.h"
#include "mock_gem_device.h"
#include "mock_gtt.h"
#include "mock_uncore.h"
+#include "mock_region.h"
#include "gem/selftests/mock_context.h"
#include "gem/selftests/mock_gem_object.h"
void mock_device_flush(struct drm_i915_private *i915)
{
+ struct intel_gt *gt = &i915->gt;
struct intel_engine_cs *engine;
enum intel_engine_id id;
- lockdep_assert_held(&i915->drm.struct_mutex);
-
do {
- for_each_engine(engine, i915, id)
+ for_each_engine(engine, gt, id)
mock_engine_flush(engine);
- } while (i915_retire_requests(i915));
+ } while (intel_gt_retire_requests_timeout(gt, MAX_SCHEDULE_TIMEOUT));
}
static void mock_device_release(struct drm_device *dev)
{
struct drm_i915_private *i915 = to_i915(dev);
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- mutex_lock(&i915->drm.struct_mutex);
mock_device_flush(i915);
- mutex_unlock(&i915->drm.struct_mutex);
-
- flush_work(&i915->gem.idle_work);
- i915_gem_drain_workqueue(i915);
-
- mutex_lock(&i915->drm.struct_mutex);
- for_each_engine(engine, i915, id)
- mock_engine_free(engine);
- i915_gem_contexts_fini(i915);
- mutex_unlock(&i915->drm.struct_mutex);
+ intel_gt_driver_remove(&i915->gt);
- i915_timelines_fini(i915);
+ i915_gem_driver_release__contexts(i915);
- drain_workqueue(i915->wq);
+ i915_gem_drain_workqueue(i915);
i915_gem_drain_freed_objects(i915);
- mutex_lock(&i915->drm.struct_mutex);
mock_fini_ggtt(&i915->ggtt);
- mutex_unlock(&i915->drm.struct_mutex);
-
destroy_workqueue(i915->wq);
- i915_gemfs_fini(i915);
+ intel_gt_driver_late_release(&i915->gt);
+ intel_memory_regions_driver_release(i915);
drm_mode_config_cleanup(&i915->drm);
@@ -102,14 +91,6 @@ static void release_dev(struct device *dev)
kfree(pdev);
}
-static void mock_retire_work_handler(struct work_struct *work)
-{
-}
-
-static void mock_idle_work_handler(struct work_struct *work)
-{
-}
-
static int pm_domain_resume(struct device *dev)
{
return pm_generic_runtime_resume(dev);
@@ -177,15 +158,15 @@ struct drm_i915_private *mock_gem_device(void)
I915_GTT_PAGE_SIZE_64K |
I915_GTT_PAGE_SIZE_2M;
- mock_uncore_init(&i915->uncore);
+ mkwrite_device_info(i915)->memory_regions = REGION_SMEM;
+ intel_memory_regions_hw_probe(i915);
+
+ mock_uncore_init(&i915->uncore, i915);
+
i915_gem_init__mm(i915);
- intel_gt_pm_init(i915);
+ intel_gt_init_early(&i915->gt, i915);
atomic_inc(&i915->gt.wakeref.count); /* disable; no hw support */
-
- init_waitqueue_head(&i915->gpu_error.wait_queue);
- init_waitqueue_head(&i915->gpu_error.reset_queue);
- init_srcu_struct(&i915->gpu_error.reset_backoff_srcu);
- mutex_init(&i915->gpu_error.wedge_mutex);
+ i915->gt.awake = -ENODEV;
i915->wq = alloc_ordered_workqueue("mock", 0);
if (!i915->wq)
@@ -193,20 +174,8 @@ struct drm_i915_private *mock_gem_device(void)
mock_init_contexts(i915);
- INIT_DELAYED_WORK(&i915->gem.retire_work, mock_retire_work_handler);
- INIT_WORK(&i915->gem.idle_work, mock_idle_work_handler);
-
- i915->gt.awake = true;
-
- i915_timelines_init(i915);
-
- INIT_LIST_HEAD(&i915->gt.active_rings);
- INIT_LIST_HEAD(&i915->gt.closed_vma);
- spin_lock_init(&i915->gt.closed_lock);
-
- mutex_lock(&i915->drm.struct_mutex);
-
mock_init_ggtt(i915, &i915->ggtt);
+ i915->gt.vm = i915_vm_get(&i915->ggtt.vm);
mkwrite_device_info(i915)->engine_mask = BIT(0);
@@ -214,28 +183,21 @@ struct drm_i915_private *mock_gem_device(void)
if (!i915->engine[RCS0])
goto err_unlock;
- i915->kernel_context = mock_context(i915, NULL);
- if (!i915->kernel_context)
- goto err_engine;
-
if (mock_engine_init(i915->engine[RCS0]))
goto err_context;
- mutex_unlock(&i915->drm.struct_mutex);
-
- WARN_ON(i915_gemfs_init(i915));
+ __clear_bit(I915_WEDGED, &i915->gt.reset.flags);
+ intel_engines_driver_register(i915);
return i915;
err_context:
- i915_gem_contexts_fini(i915);
-err_engine:
- mock_engine_free(i915->engine[RCS0]);
+ intel_gt_driver_remove(&i915->gt);
err_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
- i915_timelines_fini(i915);
destroy_workqueue(i915->wq);
err_drv:
+ intel_gt_driver_late_release(&i915->gt);
+ intel_memory_regions_driver_release(i915);
drm_mode_config_cleanup(&i915->drm);
drm_dev_fini(&i915->drm);
put_device:
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c
index f625c307a406..edc5e3dda8ca 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c
@@ -43,7 +43,7 @@ static int mock_bind_ppgtt(struct i915_vma *vma,
u32 flags)
{
GEM_BUG_ON(flags & I915_VMA_GLOBAL_BIND);
- vma->flags |= I915_VMA_LOCAL_BIND;
+ set_bit(I915_VMA_LOCAL_BIND_BIT, __i915_vma_flags(vma));
return 0;
}
@@ -55,6 +55,11 @@ static void mock_cleanup(struct i915_address_space *vm)
{
}
+static void mock_clear_range(struct i915_address_space *vm,
+ u64 start, u64 length)
+{
+}
+
struct i915_ppgtt *mock_ppgtt(struct drm_i915_private *i915, const char *name)
{
struct i915_ppgtt *ppgtt;
@@ -63,13 +68,14 @@ struct i915_ppgtt *mock_ppgtt(struct drm_i915_private *i915, const char *name)
if (!ppgtt)
return NULL;
+ ppgtt->vm.gt = &i915->gt;
ppgtt->vm.i915 = i915;
ppgtt->vm.total = round_down(U64_MAX, PAGE_SIZE);
ppgtt->vm.file = ERR_PTR(-ENODEV);
i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
- ppgtt->vm.clear_range = nop_clear_range;
+ ppgtt->vm.clear_range = mock_clear_range;
ppgtt->vm.insert_page = mock_insert_page;
ppgtt->vm.insert_entries = mock_insert_entries;
ppgtt->vm.cleanup = mock_cleanup;
@@ -86,7 +92,7 @@ static int mock_bind_ggtt(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags)
{
- vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
+ atomic_or(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND, &vma->flags);
return 0;
}
@@ -98,6 +104,7 @@ void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt)
{
memset(ggtt, 0, sizeof(*ggtt));
+ ggtt->vm.gt = &i915->gt;
ggtt->vm.i915 = i915;
ggtt->vm.is_ggtt = true;
@@ -105,7 +112,7 @@ void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt)
ggtt->mappable_end = resource_size(&ggtt->gmadr);
ggtt->vm.total = 4096 * PAGE_SIZE;
- ggtt->vm.clear_range = nop_clear_range;
+ ggtt->vm.clear_range = mock_clear_range;
ggtt->vm.insert_page = mock_insert_page;
ggtt->vm.insert_entries = mock_insert_entries;
ggtt->vm.cleanup = mock_cleanup;
@@ -116,6 +123,7 @@ void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt)
ggtt->vm.vma_ops.clear_pages = clear_pages;
i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
+ i915->gt.ggtt = ggtt;
}
void mock_fini_ggtt(struct i915_ggtt *ggtt)
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.h b/drivers/gpu/drm/i915/selftests/mock_gtt.h
index 3387393286de..e3f224f43beb 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.h
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.h
@@ -25,6 +25,9 @@
#ifndef __MOCK_GTT_H
#define __MOCK_GTT_H
+struct drm_i915_private;
+struct i915_ggtt;
+
void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt);
void mock_fini_ggtt(struct i915_ggtt *ggtt);
diff --git a/drivers/gpu/drm/i915/selftests/mock_region.c b/drivers/gpu/drm/i915/selftests/mock_region.c
new file mode 100644
index 000000000000..b2ad41c27e67
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/mock_region.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "gem/i915_gem_region.h"
+#include "intel_memory_region.h"
+
+#include "mock_region.h"
+
+static const struct drm_i915_gem_object_ops mock_region_obj_ops = {
+ .get_pages = i915_gem_object_get_pages_buddy,
+ .put_pages = i915_gem_object_put_pages_buddy,
+ .release = i915_gem_object_release_memory_region,
+};
+
+static struct drm_i915_gem_object *
+mock_object_create(struct intel_memory_region *mem,
+ resource_size_t size,
+ unsigned int flags)
+{
+ static struct lock_class_key lock_class;
+ struct drm_i915_private *i915 = mem->i915;
+ struct drm_i915_gem_object *obj;
+
+ if (size > BIT(mem->mm.max_order) * mem->mm.chunk_size)
+ return ERR_PTR(-E2BIG);
+
+ obj = i915_gem_object_alloc();
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+
+ drm_gem_private_object_init(&i915->drm, &obj->base, size);
+ i915_gem_object_init(obj, &mock_region_obj_ops, &lock_class);
+
+ obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
+
+ i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
+
+ i915_gem_object_init_memory_region(obj, mem, flags);
+
+ return obj;
+}
+
+static const struct intel_memory_region_ops mock_region_ops = {
+ .init = intel_memory_region_init_buddy,
+ .release = intel_memory_region_release_buddy,
+ .create_object = mock_object_create,
+};
+
+struct intel_memory_region *
+mock_region_create(struct drm_i915_private *i915,
+ resource_size_t start,
+ resource_size_t size,
+ resource_size_t min_page_size,
+ resource_size_t io_start)
+{
+ return intel_memory_region_create(i915, start, size, min_page_size,
+ io_start, &mock_region_ops);
+}
diff --git a/drivers/gpu/drm/i915/selftests/mock_region.h b/drivers/gpu/drm/i915/selftests/mock_region.h
new file mode 100644
index 000000000000..329bf74dfaca
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/mock_region.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __MOCK_REGION_H
+#define __MOCK_REGION_H
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+struct intel_memory_region;
+
+struct intel_memory_region *
+mock_region_create(struct drm_i915_private *i915,
+ resource_size_t start,
+ resource_size_t size,
+ resource_size_t min_page_size,
+ resource_size_t io_start);
+
+#endif /* !__MOCK_REGION_H */
diff --git a/drivers/gpu/drm/i915/selftests/mock_request.c b/drivers/gpu/drm/i915/selftests/mock_request.c
index 9390fc09984b..09f747228dff 100644
--- a/drivers/gpu/drm/i915/selftests/mock_request.c
+++ b/drivers/gpu/drm/i915/selftests/mock_request.c
@@ -28,14 +28,12 @@
#include "mock_request.h"
struct i915_request *
-mock_request(struct intel_engine_cs *engine,
- struct i915_gem_context *context,
- unsigned long delay)
+mock_request(struct intel_context *ce, unsigned long delay)
{
struct i915_request *request;
/* NB the i915->requests slab cache is enlarged to fit mock_request */
- request = igt_request_alloc(context, engine);
+ request = intel_context_create_request(ce);
if (IS_ERR(request))
return NULL;
diff --git a/drivers/gpu/drm/i915/selftests/mock_request.h b/drivers/gpu/drm/i915/selftests/mock_request.h
index 4acf0211df20..8907b60c290d 100644
--- a/drivers/gpu/drm/i915/selftests/mock_request.h
+++ b/drivers/gpu/drm/i915/selftests/mock_request.h
@@ -30,9 +30,7 @@
#include "../i915_request.h"
struct i915_request *
-mock_request(struct intel_engine_cs *engine,
- struct i915_gem_context *context,
- unsigned long delay);
+mock_request(struct intel_context *ce, unsigned long delay);
bool mock_cancel_request(struct i915_request *request);
diff --git a/drivers/gpu/drm/i915/selftests/mock_timeline.h b/drivers/gpu/drm/i915/selftests/mock_timeline.h
deleted file mode 100644
index b6deaa61110d..000000000000
--- a/drivers/gpu/drm/i915/selftests/mock_timeline.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2017-2018 Intel Corporation
- */
-
-#ifndef __MOCK_TIMELINE__
-#define __MOCK_TIMELINE__
-
-struct i915_timeline;
-
-void mock_timeline_init(struct i915_timeline *timeline, u64 context);
-void mock_timeline_fini(struct i915_timeline *timeline);
-
-#endif /* !__MOCK_TIMELINE__ */
diff --git a/drivers/gpu/drm/i915/selftests/mock_uncore.c b/drivers/gpu/drm/i915/selftests/mock_uncore.c
index ff8999c63a12..ca57e4008701 100644
--- a/drivers/gpu/drm/i915/selftests/mock_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/mock_uncore.c
@@ -39,8 +39,11 @@ __nop_read(16)
__nop_read(32)
__nop_read(64)
-void mock_uncore_init(struct intel_uncore *uncore)
+void mock_uncore_init(struct intel_uncore *uncore,
+ struct drm_i915_private *i915)
{
- ASSIGN_WRITE_MMIO_VFUNCS(uncore, nop);
- ASSIGN_READ_MMIO_VFUNCS(uncore, nop);
+ intel_uncore_init_early(uncore, i915);
+
+ ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, nop);
+ ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, nop);
}
diff --git a/drivers/gpu/drm/i915/selftests/mock_uncore.h b/drivers/gpu/drm/i915/selftests/mock_uncore.h
index dacb36b5ffcd..7acf1ef4d488 100644
--- a/drivers/gpu/drm/i915/selftests/mock_uncore.h
+++ b/drivers/gpu/drm/i915/selftests/mock_uncore.h
@@ -25,6 +25,10 @@
#ifndef __MOCK_UNCORE_H
#define __MOCK_UNCORE_H
-void mock_uncore_init(struct intel_uncore *uncore);
+struct drm_i915_private;
+struct intel_uncore;
+
+void mock_uncore_init(struct intel_uncore *uncore,
+ struct drm_i915_private *i915);
#endif /* !__MOCK_UNCORE_H */
diff --git a/drivers/gpu/drm/imx/Makefile b/drivers/gpu/drm/imx/Makefile
index ab6c83caceb7..21cdcc2faabc 100644
--- a/drivers/gpu/drm/imx/Makefile
+++ b/drivers/gpu/drm/imx/Makefile
@@ -8,5 +8,4 @@ obj-$(CONFIG_DRM_IMX_PARALLEL_DISPLAY) += parallel-display.o
obj-$(CONFIG_DRM_IMX_TVE) += imx-tve.o
obj-$(CONFIG_DRM_IMX_LDB) += imx-ldb.o
-obj-$(CONFIG_DRM_IMX_IPUV3) += imx-ipuv3-crtc.o
obj-$(CONFIG_DRM_IMX_HDMI) += dw_hdmi-imx.o
diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c
index 06393cd1067d..f22cfbf9353e 100644
--- a/drivers/gpu/drm/imx/dw_hdmi-imx.c
+++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c
@@ -3,19 +3,21 @@
*
* derived from imx-hdmi.c(renamed to bridge/dw_hdmi.c now)
*/
-#include <linux/module.h>
-#include <linux/platform_device.h>
+
#include <linux/component.h>
#include <linux/mfd/syscon.h>
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
-#include <drm/bridge/dw_hdmi.h>
-#include <video/imx-ipu-v3.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
-#include <drm/drm_of.h>
-#include <drm/drmP.h>
+
+#include <video/imx-ipu-v3.h>
+
+#include <drm/bridge/dw_hdmi.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
-#include <drm/drm_encoder_slave.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_of.h>
#include "imx-drm.h"
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 3e8bece620df..da87c70e413b 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -4,14 +4,18 @@
*
* Copyright (C) 2011 Sascha Hauer, Pengutronix
*/
+
#include <linux/component.h>
#include <linux/device.h>
#include <linux/dma-buf.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <drm/drmP.h>
+
+#include <video/imx-ipu-v3.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
@@ -19,7 +23,7 @@
#include <drm/drm_of.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
-#include <video/imx-ipu-v3.h>
+#include <drm/drm_vblank.h>
#include "imx-drm.h"
#include "ipuv3-plane.h"
@@ -147,16 +151,13 @@ static const struct drm_ioctl_desc imx_drm_ioctls[] = {
};
static struct drm_driver imx_drm_driver = {
- .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
- DRIVER_ATOMIC,
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = drm_gem_cma_dumb_create,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_import = drm_gem_prime_import,
- .gem_prime_export = drm_gem_prime_export,
.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
.gem_prime_vmap = drm_gem_cma_prime_vmap,
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 383733302280..8cb2665b2c74 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -5,25 +5,28 @@
* Copyright (C) 2012 Sascha Hauer, Pengutronix
*/
-#include <linux/module.h>
#include <linux/clk.h>
#include <linux/component.h>
-#include <drm/drmP.h>
-#include <drm/drm_atomic.h>
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_of.h>
-#include <drm/drm_panel.h>
-#include <drm/drm_probe_helper.h>
#include <linux/mfd/syscon.h>
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
+#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of_graph.h>
-#include <video/of_display_timing.h>
-#include <video/of_videomode.h>
#include <linux/regmap.h>
#include <linux/videodev2.h>
+#include <video/of_display_timing.h>
+#include <video/of_videomode.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+
#include "imx-drm.h"
#define DRIVER_NAME "imx-ldb"
@@ -122,14 +125,11 @@ static void imx_ldb_ch_set_bus_format(struct imx_ldb_channel *imx_ldb_ch,
static int imx_ldb_connector_get_modes(struct drm_connector *connector)
{
struct imx_ldb_channel *imx_ldb_ch = con_to_imx_ldb_ch(connector);
- int num_modes = 0;
+ int num_modes;
- if (imx_ldb_ch->panel && imx_ldb_ch->panel->funcs &&
- imx_ldb_ch->panel->funcs->get_modes) {
- num_modes = imx_ldb_ch->panel->funcs->get_modes(imx_ldb_ch->panel);
- if (num_modes > 0)
- return num_modes;
- }
+ num_modes = drm_panel_get_modes(imx_ldb_ch->panel, connector);
+ if (num_modes > 0)
+ return num_modes;
if (!imx_ldb_ch->edid && imx_ldb_ch->ddc)
imx_ldb_ch->edid = drm_get_edid(connector, imx_ldb_ch->ddc);
@@ -460,9 +460,10 @@ static int imx_ldb_register(struct drm_device *drm,
*/
drm_connector_helper_add(&imx_ldb_ch->connector,
&imx_ldb_connector_helper_funcs);
- drm_connector_init(drm, &imx_ldb_ch->connector,
- &imx_ldb_connector_funcs,
- DRM_MODE_CONNECTOR_LVDS);
+ drm_connector_init_with_ddc(drm, &imx_ldb_ch->connector,
+ &imx_ldb_connector_funcs,
+ DRM_MODE_CONNECTOR_LVDS,
+ imx_ldb_ch->ddc);
drm_connector_attach_encoder(&imx_ldb_ch->connector, encoder);
}
diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
index e725af8a0025..5bbfaa2cd0f4 100644
--- a/drivers/gpu/drm/imx/imx-tve.c
+++ b/drivers/gpu/drm/imx/imx-tve.c
@@ -5,20 +5,22 @@
* Copyright (C) 2013 Philipp Zabel, Pengutronix
*/
-#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/clk.h>
#include <linux/component.h>
-#include <linux/module.h>
#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/spinlock.h>
#include <linux/videodev2.h>
-#include <drm/drmP.h>
+
+#include <video/imx-ipu-v3.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_probe_helper.h>
-#include <video/imx-ipu-v3.h>
#include "imx-drm.h"
@@ -482,8 +484,10 @@ static int imx_tve_register(struct drm_device *drm, struct imx_tve *tve)
drm_connector_helper_add(&tve->connector,
&imx_tve_connector_helper_funcs);
- drm_connector_init(drm, &tve->connector, &imx_tve_connector_funcs,
- DRM_MODE_CONNECTOR_VGA);
+ drm_connector_init_with_ddc(drm, &tve->connector,
+ &imx_tve_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA,
+ tve->ddc);
drm_connector_attach_encoder(&tve->connector, &tve->encoder);
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index c436a28d50e4..63c0284f8b3c 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -4,21 +4,25 @@
*
* Copyright (C) 2011 Sascha Hauer, Pengutronix
*/
+
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/device.h>
+#include <linux/dma-mapping.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <drm/drmP.h>
+
+#include <video/imx-ipu-v3.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
-#include <video/imx-ipu-v3.h>
#include "imx-drm.h"
#include "ipuv3-plane.h"
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 2a1e071d39ee..28826c0aa24a 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -5,15 +5,16 @@
* Copyright (C) 2013 Philipp Zabel, Pengutronix
*/
-#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
-#include "video/imx-ipu-v3.h"
+#include <video/imx-ipu-v3.h>
+
#include "imx-drm.h"
#include "ipuv3-plane.h"
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 1a76de1e8e7b..3dca424059f7 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -7,14 +7,17 @@
#include <linux/component.h>
#include <linux/module.h>
-#include <drm/drmP.h>
+#include <linux/platform_device.h>
+#include <linux/videodev2.h>
+
+#include <video/of_display_timing.h>
+
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
-#include <linux/videodev2.h>
-#include <video/of_display_timing.h>
#include "imx-drm.h"
@@ -45,14 +48,11 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
{
struct imx_parallel_display *imxpd = con_to_imxpd(connector);
struct device_node *np = imxpd->dev->of_node;
- int num_modes = 0;
+ int num_modes;
- if (imxpd->panel && imxpd->panel->funcs &&
- imxpd->panel->funcs->get_modes) {
- num_modes = imxpd->panel->funcs->get_modes(imxpd->panel);
- if (num_modes > 0)
- return num_modes;
- }
+ num_modes = drm_panel_get_modes(imxpd->panel, connector);
+ if (num_modes > 0)
+ return num_modes;
if (imxpd->edid) {
drm_connector_update_edid_property(connector, imxpd->edid);
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm.c b/drivers/gpu/drm/ingenic/ingenic-drm.c
index e9f9e9fb9b17..6d47ef7b148c 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm.c
+++ b/drivers/gpu/drm/ingenic/ingenic-drm.c
@@ -13,6 +13,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
@@ -152,6 +153,7 @@ struct ingenic_dma_hwdesc {
struct jz_soc_info {
bool needs_dev_clk;
+ unsigned int max_width, max_height;
};
struct ingenic_drm {
@@ -163,9 +165,12 @@ struct ingenic_drm {
struct device *dev;
struct regmap *map;
struct clk *lcd_clk, *pix_clk;
+ const struct jz_soc_info *soc_info;
struct ingenic_dma_hwdesc *dma_hwdesc;
dma_addr_t dma_hwdesc_phys;
+
+ bool panel_is_sharp;
};
static const u32 ingenic_drm_primary_formats[] = {
@@ -283,6 +288,13 @@ static void ingenic_drm_crtc_update_timings(struct ingenic_drm *priv,
regmap_write(priv->map, JZ_REG_LCD_DAV,
vds << JZ_LCD_DAV_VDS_OFFSET |
vde << JZ_LCD_DAV_VDE_OFFSET);
+
+ if (priv->panel_is_sharp) {
+ regmap_write(priv->map, JZ_REG_LCD_PS, hde << 16 | (hde + 1));
+ regmap_write(priv->map, JZ_REG_LCD_CLS, hde << 16 | (hde + 1));
+ regmap_write(priv->map, JZ_REG_LCD_SPL, hpe << 16 | (hpe + 1));
+ regmap_write(priv->map, JZ_REG_LCD_REV, mode->htotal << 16);
+ }
}
static void ingenic_drm_crtc_update_ctrl(struct ingenic_drm *priv,
@@ -316,6 +328,10 @@ static int ingenic_drm_crtc_atomic_check(struct drm_crtc *crtc,
if (!drm_atomic_crtc_needs_modeset(state))
return 0;
+ if (state->mode.hdisplay > priv->soc_info->max_height ||
+ state->mode.vdisplay > priv->soc_info->max_width)
+ return -EINVAL;
+
rate = clk_round_rate(priv->pix_clk,
state->adjusted_mode.clock * 1000);
if (rate < 0)
@@ -362,14 +378,18 @@ static void ingenic_drm_plane_atomic_update(struct drm_plane *plane,
struct ingenic_drm *priv = drm_plane_get_priv(plane);
struct drm_plane_state *state = plane->state;
unsigned int width, height, cpp;
+ dma_addr_t addr;
- width = state->crtc->state->adjusted_mode.hdisplay;
- height = state->crtc->state->adjusted_mode.vdisplay;
- cpp = state->fb->format->cpp[plane->index];
+ if (state && state->fb) {
+ addr = drm_fb_cma_get_gem_addr(state->fb, state, 0);
+ width = state->src_w >> 16;
+ height = state->src_h >> 16;
+ cpp = state->fb->format->cpp[plane->index];
- priv->dma_hwdesc->addr = drm_fb_cma_get_gem_addr(state->fb, state, 0);
- priv->dma_hwdesc->cmd = width * height * cpp / 4;
- priv->dma_hwdesc->cmd |= JZ_LCD_CMD_EOF_IRQ;
+ priv->dma_hwdesc->addr = addr;
+ priv->dma_hwdesc->cmd = width * height * cpp / 4;
+ priv->dma_hwdesc->cmd |= JZ_LCD_CMD_EOF_IRQ;
+ }
}
static void ingenic_drm_encoder_atomic_mode_set(struct drm_encoder *encoder,
@@ -378,11 +398,18 @@ static void ingenic_drm_encoder_atomic_mode_set(struct drm_encoder *encoder,
{
struct ingenic_drm *priv = drm_encoder_get_priv(encoder);
struct drm_display_mode *mode = &crtc_state->adjusted_mode;
- struct drm_display_info *info = &conn_state->connector->display_info;
- unsigned int cfg = JZ_LCD_CFG_PS_DISABLE
- | JZ_LCD_CFG_CLS_DISABLE
- | JZ_LCD_CFG_SPL_DISABLE
- | JZ_LCD_CFG_REV_DISABLE;
+ struct drm_connector *conn = conn_state->connector;
+ struct drm_display_info *info = &conn->display_info;
+ unsigned int cfg;
+
+ priv->panel_is_sharp = info->bus_flags & DRM_BUS_FLAG_SHARP_SIGNALS;
+
+ if (priv->panel_is_sharp) {
+ cfg = JZ_LCD_CFG_MODE_SPECIAL_TFT_1 | JZ_LCD_CFG_REV_POLARITY;
+ } else {
+ cfg = JZ_LCD_CFG_PS_DISABLE | JZ_LCD_CFG_CLS_DISABLE
+ | JZ_LCD_CFG_SPL_DISABLE | JZ_LCD_CFG_REV_DISABLE;
+ }
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
cfg |= JZ_LCD_CFG_HSYNC_ACTIVE_LOW;
@@ -393,24 +420,29 @@ static void ingenic_drm_encoder_atomic_mode_set(struct drm_encoder *encoder,
if (info->bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE)
cfg |= JZ_LCD_CFG_PCLK_FALLING_EDGE;
- if (conn_state->connector->connector_type == DRM_MODE_CONNECTOR_TV) {
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- cfg |= JZ_LCD_CFG_MODE_TV_OUT_I;
- else
- cfg |= JZ_LCD_CFG_MODE_TV_OUT_P;
- } else {
- switch (*info->bus_formats) {
- case MEDIA_BUS_FMT_RGB565_1X16:
- cfg |= JZ_LCD_CFG_MODE_GENERIC_16BIT;
- break;
- case MEDIA_BUS_FMT_RGB666_1X18:
- cfg |= JZ_LCD_CFG_MODE_GENERIC_18BIT;
- break;
- case MEDIA_BUS_FMT_RGB888_1X24:
- cfg |= JZ_LCD_CFG_MODE_GENERIC_24BIT;
- break;
- default:
- break;
+ if (!priv->panel_is_sharp) {
+ if (conn->connector_type == DRM_MODE_CONNECTOR_TV) {
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ cfg |= JZ_LCD_CFG_MODE_TV_OUT_I;
+ else
+ cfg |= JZ_LCD_CFG_MODE_TV_OUT_P;
+ } else {
+ switch (*info->bus_formats) {
+ case MEDIA_BUS_FMT_RGB565_1X16:
+ cfg |= JZ_LCD_CFG_MODE_GENERIC_16BIT;
+ break;
+ case MEDIA_BUS_FMT_RGB666_1X18:
+ cfg |= JZ_LCD_CFG_MODE_GENERIC_18BIT;
+ break;
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ cfg |= JZ_LCD_CFG_MODE_GENERIC_24BIT;
+ break;
+ case MEDIA_BUS_FMT_RGB888_3X8:
+ cfg |= JZ_LCD_CFG_MODE_8BIT_SERIAL;
+ break;
+ default:
+ break;
+ }
}
}
@@ -433,6 +465,7 @@ static int ingenic_drm_encoder_atomic_check(struct drm_encoder *encoder,
case MEDIA_BUS_FMT_RGB565_1X16:
case MEDIA_BUS_FMT_RGB666_1X18:
case MEDIA_BUS_FMT_RGB888_1X24:
+ case MEDIA_BUS_FMT_RGB888_3X8:
return 0;
default:
return -EINVAL;
@@ -484,8 +517,7 @@ static void ingenic_drm_disable_vblank(struct drm_crtc *crtc)
DEFINE_DRM_GEM_CMA_FOPS(ingenic_drm_fops);
static struct drm_driver ingenic_drm_driver_data = {
- .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME
- | DRIVER_ATOMIC,
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.name = "ingenic-drm",
.desc = "DRM module for Ingenic SoCs",
.date = "20190422",
@@ -581,7 +613,6 @@ static int ingenic_drm_probe(struct platform_device *pdev)
struct drm_bridge *bridge;
struct drm_panel *panel;
struct drm_device *drm;
- struct resource *mem;
void __iomem *base;
long parent_rate;
int ret, irq;
@@ -596,6 +627,7 @@ static int ingenic_drm_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
+ priv->soc_info = soc_info;
priv->dev = dev;
drm = &priv->drm;
drm->dev_private = priv;
@@ -611,12 +643,11 @@ static int ingenic_drm_probe(struct platform_device *pdev)
drm_mode_config_init(drm);
drm->mode_config.min_width = 0;
drm->mode_config.min_height = 0;
- drm->mode_config.max_width = 800;
- drm->mode_config.max_height = 600;
+ drm->mode_config.max_width = soc_info->max_width;
+ drm->mode_config.max_height = 4095;
drm->mode_config.funcs = &ingenic_drm_mode_config_funcs;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, mem);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base)) {
dev_err(dev, "Failed to get memory resource");
return PTR_ERR(base);
@@ -656,10 +687,9 @@ static int ingenic_drm_probe(struct platform_device *pdev)
return ret;
}
- if (panel) {
- bridge = devm_drm_panel_bridge_add(dev, panel,
- DRM_MODE_CONNECTOR_Unknown);
- }
+ if (panel)
+ bridge = devm_drm_panel_bridge_add_typed(dev, panel,
+ DRM_MODE_CONNECTOR_DPI);
priv->dma_hwdesc = dma_alloc_coherent(dev, sizeof(*priv->dma_hwdesc),
&priv->dma_hwdesc_phys,
@@ -791,15 +821,26 @@ static int ingenic_drm_remove(struct platform_device *pdev)
static const struct jz_soc_info jz4740_soc_info = {
.needs_dev_clk = true,
+ .max_width = 800,
+ .max_height = 600,
};
static const struct jz_soc_info jz4725b_soc_info = {
.needs_dev_clk = false,
+ .max_width = 800,
+ .max_height = 600,
+};
+
+static const struct jz_soc_info jz4770_soc_info = {
+ .needs_dev_clk = false,
+ .max_width = 1280,
+ .max_height = 720,
};
static const struct of_device_id ingenic_drm_of_match[] = {
{ .compatible = "ingenic,jz4740-lcd", .data = &jz4740_soc_info },
{ .compatible = "ingenic,jz4725b-lcd", .data = &jz4725b_soc_info },
+ { .compatible = "ingenic,jz4770-lcd", .data = &jz4770_soc_info },
{ /* sentinel */ },
};
diff --git a/drivers/gpu/drm/lima/Kconfig b/drivers/gpu/drm/lima/Kconfig
index bb4ddc6bb0a6..d589f09d04d9 100644
--- a/drivers/gpu/drm/lima/Kconfig
+++ b/drivers/gpu/drm/lima/Kconfig
@@ -9,5 +9,6 @@ config DRM_LIMA
depends on COMMON_CLK
depends on OF
select DRM_SCHED
+ select DRM_GEM_SHMEM_HELPER
help
- DRM driver for ARM Mali 400/450 GPUs.
+ DRM driver for ARM Mali 400/450 GPUs.
diff --git a/drivers/gpu/drm/lima/Makefile b/drivers/gpu/drm/lima/Makefile
index 38cc70281ba5..a85444b0a1d4 100644
--- a/drivers/gpu/drm/lima/Makefile
+++ b/drivers/gpu/drm/lima/Makefile
@@ -13,9 +13,7 @@ lima-y := \
lima_vm.o \
lima_sched.o \
lima_ctx.o \
- lima_gem_prime.o \
lima_dlbu.o \
- lima_bcast.o \
- lima_object.o
+ lima_bcast.o
obj-$(CONFIG_DRM_LIMA) += lima.o
diff --git a/drivers/gpu/drm/lima/lima_device.c b/drivers/gpu/drm/lima/lima_device.c
index 570d0e93f9a9..19829b543024 100644
--- a/drivers/gpu/drm/lima/lima_device.c
+++ b/drivers/gpu/drm/lima/lima_device.c
@@ -80,26 +80,23 @@ const char *lima_ip_name(struct lima_ip *ip)
static int lima_clk_init(struct lima_device *dev)
{
int err;
- unsigned long bus_rate, gpu_rate;
dev->clk_bus = devm_clk_get(dev->dev, "bus");
if (IS_ERR(dev->clk_bus)) {
- dev_err(dev->dev, "get bus clk failed %ld\n", PTR_ERR(dev->clk_bus));
- return PTR_ERR(dev->clk_bus);
+ err = PTR_ERR(dev->clk_bus);
+ if (err != -EPROBE_DEFER)
+ dev_err(dev->dev, "get bus clk failed %d\n", err);
+ return err;
}
dev->clk_gpu = devm_clk_get(dev->dev, "core");
if (IS_ERR(dev->clk_gpu)) {
- dev_err(dev->dev, "get core clk failed %ld\n", PTR_ERR(dev->clk_gpu));
- return PTR_ERR(dev->clk_gpu);
+ err = PTR_ERR(dev->clk_gpu);
+ if (err != -EPROBE_DEFER)
+ dev_err(dev->dev, "get core clk failed %d\n", err);
+ return err;
}
- bus_rate = clk_get_rate(dev->clk_bus);
- dev_info(dev->dev, "bus rate = %lu\n", bus_rate);
-
- gpu_rate = clk_get_rate(dev->clk_gpu);
- dev_info(dev->dev, "mod rate = %lu", gpu_rate);
-
err = clk_prepare_enable(dev->clk_bus);
if (err)
return err;
@@ -108,14 +105,21 @@ static int lima_clk_init(struct lima_device *dev)
if (err)
goto error_out0;
- dev->reset = devm_reset_control_get_optional(dev->dev, NULL);
+ dev->reset = devm_reset_control_array_get_optional_shared(dev->dev);
+
if (IS_ERR(dev->reset)) {
err = PTR_ERR(dev->reset);
+ if (err != -EPROBE_DEFER)
+ dev_err(dev->dev, "get reset controller failed %d\n",
+ err);
goto error_out1;
} else if (dev->reset != NULL) {
err = reset_control_deassert(dev->reset);
- if (err)
+ if (err) {
+ dev_err(dev->dev,
+ "reset controller deassert failed %d\n", err);
goto error_out1;
+ }
}
return 0;
@@ -145,7 +149,8 @@ static int lima_regulator_init(struct lima_device *dev)
dev->regulator = NULL;
if (ret == -ENODEV)
return 0;
- dev_err(dev->dev, "failed to get regulator: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev->dev, "failed to get regulator: %d\n", ret);
return ret;
}
@@ -291,16 +296,12 @@ int lima_device_init(struct lima_device *ldev)
dma_set_coherent_mask(ldev->dev, DMA_BIT_MASK(32));
err = lima_clk_init(ldev);
- if (err) {
- dev_err(ldev->dev, "clk init fail %d\n", err);
+ if (err)
return err;
- }
err = lima_regulator_init(ldev);
- if (err) {
- dev_err(ldev->dev, "regulator init fail %d\n", err);
+ if (err)
goto err_out0;
- }
ldev->empty_vm = lima_vm_create(ldev);
if (!ldev->empty_vm) {
@@ -313,7 +314,7 @@ int lima_device_init(struct lima_device *ldev)
ldev->va_end = LIMA_VA_RESERVE_START;
ldev->dlbu_cpu = dma_alloc_wc(
ldev->dev, LIMA_PAGE_SIZE,
- &ldev->dlbu_dma, GFP_KERNEL);
+ &ldev->dlbu_dma, GFP_KERNEL | __GFP_NOWARN);
if (!ldev->dlbu_cpu) {
err = -ENOMEM;
goto err_out2;
@@ -343,6 +344,9 @@ int lima_device_init(struct lima_device *ldev)
if (err)
goto err_out5;
+ dev_info(ldev->dev, "bus rate = %lu\n", clk_get_rate(ldev->clk_bus));
+ dev_info(ldev->dev, "mod rate = %lu", clk_get_rate(ldev->clk_gpu));
+
return 0;
err_out5:
diff --git a/drivers/gpu/drm/lima/lima_drv.c b/drivers/gpu/drm/lima/lima_drv.c
index b29c26cd13b2..124efe4fa97b 100644
--- a/drivers/gpu/drm/lima/lima_drv.c
+++ b/drivers/gpu/drm/lima/lima_drv.c
@@ -12,7 +12,6 @@
#include "lima_drv.h"
#include "lima_gem.h"
-#include "lima_gem_prime.h"
#include "lima_vm.h"
int lima_sched_timeout_ms;
@@ -231,37 +230,24 @@ static void lima_drm_driver_postclose(struct drm_device *dev, struct drm_file *f
}
static const struct drm_ioctl_desc lima_drm_driver_ioctls[] = {
- DRM_IOCTL_DEF_DRV(LIMA_GET_PARAM, lima_ioctl_get_param, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(LIMA_GEM_CREATE, lima_ioctl_gem_create, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(LIMA_GEM_INFO, lima_ioctl_gem_info, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(LIMA_GEM_SUBMIT, lima_ioctl_gem_submit, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(LIMA_GEM_WAIT, lima_ioctl_gem_wait, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(LIMA_CTX_CREATE, lima_ioctl_ctx_create, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(LIMA_CTX_FREE, lima_ioctl_ctx_free, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(LIMA_GET_PARAM, lima_ioctl_get_param, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(LIMA_GEM_CREATE, lima_ioctl_gem_create, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(LIMA_GEM_INFO, lima_ioctl_gem_info, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(LIMA_GEM_SUBMIT, lima_ioctl_gem_submit, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(LIMA_GEM_WAIT, lima_ioctl_gem_wait, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(LIMA_CTX_CREATE, lima_ioctl_ctx_create, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(LIMA_CTX_FREE, lima_ioctl_ctx_free, DRM_RENDER_ALLOW),
};
-static const struct file_operations lima_drm_driver_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = drm_compat_ioctl,
-#endif
- .mmap = lima_gem_mmap,
-};
+DEFINE_DRM_GEM_FOPS(lima_drm_driver_fops);
static struct drm_driver lima_drm_driver = {
- .driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_PRIME | DRIVER_SYNCOBJ,
+ .driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ,
.open = lima_drm_driver_open,
.postclose = lima_drm_driver_postclose,
.ioctls = lima_drm_driver_ioctls,
.num_ioctls = ARRAY_SIZE(lima_drm_driver_ioctls),
.fops = &lima_drm_driver_fops,
- .gem_free_object_unlocked = lima_gem_free_object,
- .gem_open_object = lima_gem_object_open,
- .gem_close_object = lima_gem_object_close,
- .gem_vm_ops = &lima_gem_vm_ops,
.name = "lima",
.desc = "lima DRM",
.date = "20190217",
@@ -269,11 +255,11 @@ static struct drm_driver lima_drm_driver = {
.minor = 0,
.patchlevel = 0,
+ .gem_create_object = lima_gem_create_object,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_import_sg_table = lima_gem_prime_import_sg_table,
+ .gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .gem_prime_get_sg_table = lima_gem_prime_get_sg_table,
- .gem_prime_mmap = lima_gem_prime_mmap,
+ .gem_prime_mmap = drm_gem_prime_mmap,
};
static int lima_pdev_probe(struct platform_device *pdev)
@@ -307,10 +293,8 @@ static int lima_pdev_probe(struct platform_device *pdev)
ldev->ddev = ddev;
err = lima_device_init(ldev);
- if (err) {
- dev_err(&pdev->dev, "Fatal error during GPU init\n");
+ if (err)
goto err_out1;
- }
/*
* Register the DRM device with the core and the connectors with
diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
index 477c0f766663..d0059d8c97d8 100644
--- a/drivers/gpu/drm/lima/lima_gem.c
+++ b/drivers/gpu/drm/lima/lima_gem.c
@@ -3,7 +3,7 @@
#include <linux/mm.h>
#include <linux/sync_file.h>
-#include <linux/pfn_t.h>
+#include <linux/pagemap.h>
#include <drm/drm_file.h>
#include <drm/drm_syncobj.h>
@@ -13,40 +13,55 @@
#include "lima_drv.h"
#include "lima_gem.h"
-#include "lima_gem_prime.h"
#include "lima_vm.h"
-#include "lima_object.h"
int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file,
u32 size, u32 flags, u32 *handle)
{
int err;
- struct lima_bo *bo;
- struct lima_device *ldev = to_lima_dev(dev);
+ gfp_t mask;
+ struct drm_gem_shmem_object *shmem;
+ struct drm_gem_object *obj;
+ struct sg_table *sgt;
+
+ shmem = drm_gem_shmem_create(dev, size);
+ if (IS_ERR(shmem))
+ return PTR_ERR(shmem);
+
+ obj = &shmem->base;
- bo = lima_bo_create(ldev, size, flags, NULL, NULL);
- if (IS_ERR(bo))
- return PTR_ERR(bo);
+ /* Mali Utgard GPU can only support 32bit address space */
+ mask = mapping_gfp_mask(obj->filp->f_mapping);
+ mask &= ~__GFP_HIGHMEM;
+ mask |= __GFP_DMA32;
+ mapping_set_gfp_mask(obj->filp->f_mapping, mask);
- err = drm_gem_handle_create(file, &bo->gem, handle);
+ sgt = drm_gem_shmem_get_pages_sgt(obj);
+ if (IS_ERR(sgt)) {
+ err = PTR_ERR(sgt);
+ goto out;
+ }
+
+ err = drm_gem_handle_create(file, obj, handle);
+out:
/* drop reference from allocate - handle holds it now */
- drm_gem_object_put_unlocked(&bo->gem);
+ drm_gem_object_put_unlocked(obj);
return err;
}
-void lima_gem_free_object(struct drm_gem_object *obj)
+static void lima_gem_free_object(struct drm_gem_object *obj)
{
struct lima_bo *bo = to_lima_bo(obj);
if (!list_empty(&bo->va))
dev_err(obj->dev->dev, "lima gem free bo still has va\n");
- lima_bo_destroy(bo);
+ drm_gem_shmem_free_object(obj);
}
-int lima_gem_object_open(struct drm_gem_object *obj, struct drm_file *file)
+static int lima_gem_object_open(struct drm_gem_object *obj, struct drm_file *file)
{
struct lima_bo *bo = to_lima_bo(obj);
struct lima_drm_priv *priv = to_lima_drm_priv(file);
@@ -55,7 +70,7 @@ int lima_gem_object_open(struct drm_gem_object *obj, struct drm_file *file)
return lima_vm_bo_add(vm, bo, true);
}
-void lima_gem_object_close(struct drm_gem_object *obj, struct drm_file *file)
+static void lima_gem_object_close(struct drm_gem_object *obj, struct drm_file *file)
{
struct lima_bo *bo = to_lima_bo(obj);
struct lima_drm_priv *priv = to_lima_drm_priv(file);
@@ -64,13 +79,41 @@ void lima_gem_object_close(struct drm_gem_object *obj, struct drm_file *file)
lima_vm_bo_del(vm, bo);
}
+static const struct drm_gem_object_funcs lima_gem_funcs = {
+ .free = lima_gem_free_object,
+ .open = lima_gem_object_open,
+ .close = lima_gem_object_close,
+ .print_info = drm_gem_shmem_print_info,
+ .pin = drm_gem_shmem_pin,
+ .unpin = drm_gem_shmem_unpin,
+ .get_sg_table = drm_gem_shmem_get_sg_table,
+ .vmap = drm_gem_shmem_vmap,
+ .vunmap = drm_gem_shmem_vunmap,
+ .mmap = drm_gem_shmem_mmap,
+};
+
+struct drm_gem_object *lima_gem_create_object(struct drm_device *dev, size_t size)
+{
+ struct lima_bo *bo;
+
+ bo = kzalloc(sizeof(*bo), GFP_KERNEL);
+ if (!bo)
+ return NULL;
+
+ mutex_init(&bo->lock);
+ INIT_LIST_HEAD(&bo->va);
+
+ bo->base.base.funcs = &lima_gem_funcs;
+
+ return &bo->base.base;
+}
+
int lima_gem_get_info(struct drm_file *file, u32 handle, u32 *va, u64 *offset)
{
struct drm_gem_object *obj;
struct lima_bo *bo;
struct lima_drm_priv *priv = to_lima_drm_priv(file);
struct lima_vm *vm = priv->vm;
- int err;
obj = drm_gem_object_lookup(file, handle);
if (!obj)
@@ -80,53 +123,9 @@ int lima_gem_get_info(struct drm_file *file, u32 handle, u32 *va, u64 *offset)
*va = lima_vm_get_va(vm, bo);
- err = drm_gem_create_mmap_offset(obj);
- if (!err)
- *offset = drm_vma_node_offset_addr(&obj->vma_node);
+ *offset = drm_vma_node_offset_addr(&obj->vma_node);
drm_gem_object_put_unlocked(obj);
- return err;
-}
-
-static vm_fault_t lima_gem_fault(struct vm_fault *vmf)
-{
- struct vm_area_struct *vma = vmf->vma;
- struct drm_gem_object *obj = vma->vm_private_data;
- struct lima_bo *bo = to_lima_bo(obj);
- pfn_t pfn;
- pgoff_t pgoff;
-
- /* We don't use vmf->pgoff since that has the fake offset: */
- pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
- pfn = __pfn_to_pfn_t(page_to_pfn(bo->pages[pgoff]), PFN_DEV);
-
- return vmf_insert_mixed(vma, vmf->address, pfn);
-}
-
-const struct vm_operations_struct lima_gem_vm_ops = {
- .fault = lima_gem_fault,
- .open = drm_gem_vm_open,
- .close = drm_gem_vm_close,
-};
-
-void lima_set_vma_flags(struct vm_area_struct *vma)
-{
- pgprot_t prot = vm_get_page_prot(vma->vm_flags);
-
- vma->vm_flags |= VM_MIXEDMAP;
- vma->vm_flags &= ~VM_PFNMAP;
- vma->vm_page_prot = pgprot_writecombine(prot);
-}
-
-int lima_gem_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- int ret;
-
- ret = drm_gem_mmap(filp, vma);
- if (ret)
- return ret;
-
- lima_set_vma_flags(vma);
return 0;
}
@@ -136,7 +135,7 @@ static int lima_gem_sync_bo(struct lima_sched_task *task, struct lima_bo *bo,
int err = 0;
if (!write) {
- err = reservation_object_reserve_shared(bo->gem.resv, 1);
+ err = dma_resv_reserve_shared(lima_bo_resv(bo), 1);
if (err)
return err;
}
@@ -145,62 +144,7 @@ static int lima_gem_sync_bo(struct lima_sched_task *task, struct lima_bo *bo,
if (explicit)
return 0;
- return drm_gem_fence_array_add_implicit(&task->deps, &bo->gem, write);
-}
-
-static int lima_gem_lock_bos(struct lima_bo **bos, u32 nr_bos,
- struct ww_acquire_ctx *ctx)
-{
- int i, ret = 0, contended, slow_locked = -1;
-
- ww_acquire_init(ctx, &reservation_ww_class);
-
-retry:
- for (i = 0; i < nr_bos; i++) {
- if (i == slow_locked) {
- slow_locked = -1;
- continue;
- }
-
- ret = ww_mutex_lock_interruptible(&bos[i]->gem.resv->lock, ctx);
- if (ret < 0) {
- contended = i;
- goto err;
- }
- }
-
- ww_acquire_done(ctx);
- return 0;
-
-err:
- for (i--; i >= 0; i--)
- ww_mutex_unlock(&bos[i]->gem.resv->lock);
-
- if (slow_locked >= 0)
- ww_mutex_unlock(&bos[slow_locked]->gem.resv->lock);
-
- if (ret == -EDEADLK) {
- /* we lost out in a seqno race, lock and retry.. */
- ret = ww_mutex_lock_slow_interruptible(
- &bos[contended]->gem.resv->lock, ctx);
- if (!ret) {
- slow_locked = contended;
- goto retry;
- }
- }
- ww_acquire_fini(ctx);
-
- return ret;
-}
-
-static void lima_gem_unlock_bos(struct lima_bo **bos, u32 nr_bos,
- struct ww_acquire_ctx *ctx)
-{
- int i;
-
- for (i = 0; i < nr_bos; i++)
- ww_mutex_unlock(&bos[i]->gem.resv->lock);
- ww_acquire_fini(ctx);
+ return drm_gem_fence_array_add_implicit(&task->deps, &bo->base.base, write);
}
static int lima_gem_add_deps(struct drm_file *file, struct lima_submit *submit)
@@ -268,7 +212,8 @@ int lima_gem_submit(struct drm_file *file, struct lima_submit *submit)
bos[i] = bo;
}
- err = lima_gem_lock_bos(bos, submit->nr_bos, &ctx);
+ err = drm_gem_lock_reservations((struct drm_gem_object **)bos,
+ submit->nr_bos, &ctx);
if (err)
goto err_out0;
@@ -296,15 +241,16 @@ int lima_gem_submit(struct drm_file *file, struct lima_submit *submit)
for (i = 0; i < submit->nr_bos; i++) {
if (submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE)
- reservation_object_add_excl_fence(bos[i]->gem.resv, fence);
+ dma_resv_add_excl_fence(lima_bo_resv(bos[i]), fence);
else
- reservation_object_add_shared_fence(bos[i]->gem.resv, fence);
+ dma_resv_add_shared_fence(lima_bo_resv(bos[i]), fence);
}
- lima_gem_unlock_bos(bos, submit->nr_bos, &ctx);
+ drm_gem_unlock_reservations((struct drm_gem_object **)bos,
+ submit->nr_bos, &ctx);
for (i = 0; i < submit->nr_bos; i++)
- drm_gem_object_put_unlocked(&bos[i]->gem);
+ drm_gem_object_put_unlocked(&bos[i]->base.base);
if (out_sync) {
drm_syncobj_replace_fence(out_sync, fence);
@@ -318,13 +264,14 @@ int lima_gem_submit(struct drm_file *file, struct lima_submit *submit)
err_out2:
lima_sched_task_fini(submit->task);
err_out1:
- lima_gem_unlock_bos(bos, submit->nr_bos, &ctx);
+ drm_gem_unlock_reservations((struct drm_gem_object **)bos,
+ submit->nr_bos, &ctx);
err_out0:
for (i = 0; i < submit->nr_bos; i++) {
if (!bos[i])
break;
lima_vm_bo_del(vm, bos[i]);
- drm_gem_object_put_unlocked(&bos[i]->gem);
+ drm_gem_object_put_unlocked(&bos[i]->base.base);
}
if (out_sync)
drm_syncobj_put(out_sync);
@@ -341,8 +288,8 @@ int lima_gem_wait(struct drm_file *file, u32 handle, u32 op, s64 timeout_ns)
timeout = drm_timeout_abs_to_jiffies(timeout_ns);
- ret = drm_gem_reservation_object_wait(file, handle, write, timeout);
- if (ret == 0)
+ ret = drm_gem_dma_resv_wait(file, handle, write, timeout);
+ if (ret == -ETIME)
ret = timeout ? -ETIMEDOUT : -EBUSY;
return ret;
diff --git a/drivers/gpu/drm/lima/lima_gem.h b/drivers/gpu/drm/lima/lima_gem.h
index 556111a01135..1800feb3e47f 100644
--- a/drivers/gpu/drm/lima/lima_gem.h
+++ b/drivers/gpu/drm/lima/lima_gem.h
@@ -4,19 +4,37 @@
#ifndef __LIMA_GEM_H__
#define __LIMA_GEM_H__
-struct lima_bo;
+#include <drm/drm_gem_shmem_helper.h>
+
struct lima_submit;
-extern const struct vm_operations_struct lima_gem_vm_ops;
+struct lima_bo {
+ struct drm_gem_shmem_object base;
+
+ struct mutex lock;
+ struct list_head va;
+};
+
+static inline struct lima_bo *
+to_lima_bo(struct drm_gem_object *obj)
+{
+ return container_of(to_drm_gem_shmem_obj(obj), struct lima_bo, base);
+}
+
+static inline size_t lima_bo_size(struct lima_bo *bo)
+{
+ return bo->base.base.size;
+}
+
+static inline struct dma_resv *lima_bo_resv(struct lima_bo *bo)
+{
+ return bo->base.base.resv;
+}
-struct lima_bo *lima_gem_create_bo(struct drm_device *dev, u32 size, u32 flags);
+struct drm_gem_object *lima_gem_create_object(struct drm_device *dev, size_t size);
int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file,
u32 size, u32 flags, u32 *handle);
-void lima_gem_free_object(struct drm_gem_object *obj);
-int lima_gem_object_open(struct drm_gem_object *obj, struct drm_file *file);
-void lima_gem_object_close(struct drm_gem_object *obj, struct drm_file *file);
int lima_gem_get_info(struct drm_file *file, u32 handle, u32 *va, u64 *offset);
-int lima_gem_mmap(struct file *filp, struct vm_area_struct *vma);
int lima_gem_submit(struct drm_file *file, struct lima_submit *submit);
int lima_gem_wait(struct drm_file *file, u32 handle, u32 op, s64 timeout_ns);
diff --git a/drivers/gpu/drm/lima/lima_gem_prime.c b/drivers/gpu/drm/lima/lima_gem_prime.c
deleted file mode 100644
index 9c6d9f1dba55..000000000000
--- a/drivers/gpu/drm/lima/lima_gem_prime.c
+++ /dev/null
@@ -1,47 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR MIT
-/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
-
-#include <linux/dma-buf.h>
-#include <drm/drm_prime.h>
-#include <drm/drm_drv.h>
-#include <drm/drm_file.h>
-
-#include "lima_device.h"
-#include "lima_object.h"
-#include "lima_gem.h"
-#include "lima_gem_prime.h"
-
-struct drm_gem_object *lima_gem_prime_import_sg_table(
- struct drm_device *dev, struct dma_buf_attachment *attach,
- struct sg_table *sgt)
-{
- struct lima_device *ldev = to_lima_dev(dev);
- struct lima_bo *bo;
-
- bo = lima_bo_create(ldev, attach->dmabuf->size, 0, sgt,
- attach->dmabuf->resv);
- if (IS_ERR(bo))
- return ERR_CAST(bo);
-
- return &bo->gem;
-}
-
-struct sg_table *lima_gem_prime_get_sg_table(struct drm_gem_object *obj)
-{
- struct lima_bo *bo = to_lima_bo(obj);
- int npages = obj->size >> PAGE_SHIFT;
-
- return drm_prime_pages_to_sg(bo->pages, npages);
-}
-
-int lima_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
-{
- int ret;
-
- ret = drm_gem_mmap_obj(obj, obj->size, vma);
- if (ret)
- return ret;
-
- lima_set_vma_flags(vma);
- return 0;
-}
diff --git a/drivers/gpu/drm/lima/lima_gem_prime.h b/drivers/gpu/drm/lima/lima_gem_prime.h
deleted file mode 100644
index 34b4d35c21e3..000000000000
--- a/drivers/gpu/drm/lima/lima_gem_prime.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR MIT */
-/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
-
-#ifndef __LIMA_GEM_PRIME_H__
-#define __LIMA_GEM_PRIME_H__
-
-struct drm_gem_object *lima_gem_prime_import_sg_table(
- struct drm_device *dev, struct dma_buf_attachment *attach,
- struct sg_table *sgt);
-struct sg_table *lima_gem_prime_get_sg_table(struct drm_gem_object *obj);
-int lima_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
-
-#endif
diff --git a/drivers/gpu/drm/lima/lima_mmu.c b/drivers/gpu/drm/lima/lima_mmu.c
index 8e1651d6a61f..97ec09dee572 100644
--- a/drivers/gpu/drm/lima/lima_mmu.c
+++ b/drivers/gpu/drm/lima/lima_mmu.c
@@ -8,7 +8,6 @@
#include "lima_device.h"
#include "lima_mmu.h"
#include "lima_vm.h"
-#include "lima_object.h"
#include "lima_regs.h"
#define mmu_write(reg, data) writel(data, ip->iomem + reg)
diff --git a/drivers/gpu/drm/lima/lima_object.c b/drivers/gpu/drm/lima/lima_object.c
deleted file mode 100644
index 5c41f859a72f..000000000000
--- a/drivers/gpu/drm/lima/lima_object.c
+++ /dev/null
@@ -1,122 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR MIT
-/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
-
-#include <drm/drm_prime.h>
-#include <linux/pagemap.h>
-#include <linux/dma-mapping.h>
-
-#include "lima_object.h"
-
-void lima_bo_destroy(struct lima_bo *bo)
-{
- if (bo->sgt) {
- kfree(bo->pages);
- drm_prime_gem_destroy(&bo->gem, bo->sgt);
- } else {
- if (bo->pages_dma_addr) {
- int i, npages = bo->gem.size >> PAGE_SHIFT;
-
- for (i = 0; i < npages; i++) {
- if (bo->pages_dma_addr[i])
- dma_unmap_page(bo->gem.dev->dev,
- bo->pages_dma_addr[i],
- PAGE_SIZE, DMA_BIDIRECTIONAL);
- }
- }
-
- if (bo->pages)
- drm_gem_put_pages(&bo->gem, bo->pages, true, true);
- }
-
- kfree(bo->pages_dma_addr);
- drm_gem_object_release(&bo->gem);
- kfree(bo);
-}
-
-static struct lima_bo *lima_bo_create_struct(struct lima_device *dev, u32 size, u32 flags,
- struct reservation_object *resv)
-{
- struct lima_bo *bo;
- int err;
-
- size = PAGE_ALIGN(size);
-
- bo = kzalloc(sizeof(*bo), GFP_KERNEL);
- if (!bo)
- return ERR_PTR(-ENOMEM);
-
- mutex_init(&bo->lock);
- INIT_LIST_HEAD(&bo->va);
- bo->gem.resv = resv;
-
- err = drm_gem_object_init(dev->ddev, &bo->gem, size);
- if (err) {
- kfree(bo);
- return ERR_PTR(err);
- }
-
- return bo;
-}
-
-struct lima_bo *lima_bo_create(struct lima_device *dev, u32 size,
- u32 flags, struct sg_table *sgt,
- struct reservation_object *resv)
-{
- int i, err;
- size_t npages;
- struct lima_bo *bo, *ret;
-
- bo = lima_bo_create_struct(dev, size, flags, resv);
- if (IS_ERR(bo))
- return bo;
-
- npages = bo->gem.size >> PAGE_SHIFT;
-
- bo->pages_dma_addr = kcalloc(npages, sizeof(dma_addr_t), GFP_KERNEL);
- if (!bo->pages_dma_addr) {
- ret = ERR_PTR(-ENOMEM);
- goto err_out;
- }
-
- if (sgt) {
- bo->sgt = sgt;
-
- bo->pages = kcalloc(npages, sizeof(*bo->pages), GFP_KERNEL);
- if (!bo->pages) {
- ret = ERR_PTR(-ENOMEM);
- goto err_out;
- }
-
- err = drm_prime_sg_to_page_addr_arrays(
- sgt, bo->pages, bo->pages_dma_addr, npages);
- if (err) {
- ret = ERR_PTR(err);
- goto err_out;
- }
- } else {
- mapping_set_gfp_mask(bo->gem.filp->f_mapping, GFP_DMA32);
- bo->pages = drm_gem_get_pages(&bo->gem);
- if (IS_ERR(bo->pages)) {
- ret = ERR_CAST(bo->pages);
- bo->pages = NULL;
- goto err_out;
- }
-
- for (i = 0; i < npages; i++) {
- dma_addr_t addr = dma_map_page(dev->dev, bo->pages[i], 0,
- PAGE_SIZE, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(dev->dev, addr)) {
- ret = ERR_PTR(-EFAULT);
- goto err_out;
- }
- bo->pages_dma_addr[i] = addr;
- }
-
- }
-
- return bo;
-
-err_out:
- lima_bo_destroy(bo);
- return ret;
-}
diff --git a/drivers/gpu/drm/lima/lima_object.h b/drivers/gpu/drm/lima/lima_object.h
deleted file mode 100644
index 6738724afb7b..000000000000
--- a/drivers/gpu/drm/lima/lima_object.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR MIT */
-/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
-
-#ifndef __LIMA_OBJECT_H__
-#define __LIMA_OBJECT_H__
-
-#include <drm/drm_gem.h>
-
-#include "lima_device.h"
-
-struct lima_bo {
- struct drm_gem_object gem;
-
- struct page **pages;
- dma_addr_t *pages_dma_addr;
- struct sg_table *sgt;
- void *vaddr;
-
- struct mutex lock;
- struct list_head va;
-};
-
-static inline struct lima_bo *
-to_lima_bo(struct drm_gem_object *obj)
-{
- return container_of(obj, struct lima_bo, gem);
-}
-
-struct lima_bo *lima_bo_create(struct lima_device *dev, u32 size,
- u32 flags, struct sg_table *sgt,
- struct reservation_object *resv);
-void lima_bo_destroy(struct lima_bo *bo);
-void *lima_bo_vmap(struct lima_bo *bo);
-void lima_bo_vunmap(struct lima_bo *bo);
-
-#endif
diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
index 4127cacac454..b561dd05bd62 100644
--- a/drivers/gpu/drm/lima/lima_sched.c
+++ b/drivers/gpu/drm/lima/lima_sched.c
@@ -10,7 +10,7 @@
#include "lima_vm.h"
#include "lima_mmu.h"
#include "lima_l2_cache.h"
-#include "lima_object.h"
+#include "lima_gem.h"
struct lima_fence {
struct dma_fence base;
@@ -117,7 +117,7 @@ int lima_sched_task_init(struct lima_sched_task *task,
return -ENOMEM;
for (i = 0; i < num_bos; i++)
- drm_gem_object_get(&bos[i]->gem);
+ drm_gem_object_get(&bos[i]->base.base);
err = drm_sched_job_init(&task->base, &context->base, vm);
if (err) {
@@ -148,7 +148,7 @@ void lima_sched_task_fini(struct lima_sched_task *task)
if (task->bos) {
for (i = 0; i < task->num_bos; i++)
- drm_gem_object_put_unlocked(&task->bos[i]->gem);
+ drm_gem_object_put_unlocked(&task->bos[i]->base.base);
kfree(task->bos);
}
@@ -159,9 +159,10 @@ int lima_sched_context_init(struct lima_sched_pipe *pipe,
struct lima_sched_context *context,
atomic_t *guilty)
{
- struct drm_sched_rq *rq = pipe->base.sched_rq + DRM_SCHED_PRIORITY_NORMAL;
+ struct drm_gpu_scheduler *sched = &pipe->base;
- return drm_sched_entity_init(&context->base, &rq, 1, guilty);
+ return drm_sched_entity_init(&context->base, DRM_SCHED_PRIORITY_NORMAL,
+ &sched, 1, guilty);
}
void lima_sched_context_fini(struct lima_sched_pipe *pipe,
@@ -255,13 +256,17 @@ static struct dma_fence *lima_sched_run_job(struct drm_sched_job *job)
return task->fence;
}
-static void lima_sched_handle_error_task(struct lima_sched_pipe *pipe,
- struct lima_sched_task *task)
+static void lima_sched_timedout_job(struct drm_sched_job *job)
{
+ struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
+ struct lima_sched_task *task = to_lima_task(job);
+
+ if (!pipe->error)
+ DRM_ERROR("lima job timeout\n");
+
drm_sched_stop(&pipe->base, &task->base);
- if (task)
- drm_sched_increase_karma(&task->base);
+ drm_sched_increase_karma(&task->base);
pipe->task_error(pipe);
@@ -284,16 +289,6 @@ static void lima_sched_handle_error_task(struct lima_sched_pipe *pipe,
drm_sched_start(&pipe->base, true);
}
-static void lima_sched_timedout_job(struct drm_sched_job *job)
-{
- struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
- struct lima_sched_task *task = to_lima_task(job);
-
- DRM_ERROR("lima job timeout\n");
-
- lima_sched_handle_error_task(pipe, task);
-}
-
static void lima_sched_free_job(struct drm_sched_job *job)
{
struct lima_sched_task *task = to_lima_task(job);
@@ -318,15 +313,6 @@ static const struct drm_sched_backend_ops lima_sched_ops = {
.free_job = lima_sched_free_job,
};
-static void lima_sched_error_work(struct work_struct *work)
-{
- struct lima_sched_pipe *pipe =
- container_of(work, struct lima_sched_pipe, error_work);
- struct lima_sched_task *task = pipe->current_task;
-
- lima_sched_handle_error_task(pipe, task);
-}
-
int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name)
{
unsigned int timeout = lima_sched_timeout_ms > 0 ?
@@ -335,8 +321,6 @@ int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name)
pipe->fence_context = dma_fence_context_alloc(1);
spin_lock_init(&pipe->fence_lock);
- INIT_WORK(&pipe->error_work, lima_sched_error_work);
-
return drm_sched_init(&pipe->base, &lima_sched_ops, 1, 0,
msecs_to_jiffies(timeout), name);
}
@@ -349,7 +333,7 @@ void lima_sched_pipe_fini(struct lima_sched_pipe *pipe)
void lima_sched_pipe_task_done(struct lima_sched_pipe *pipe)
{
if (pipe->error)
- schedule_work(&pipe->error_work);
+ drm_sched_fault(&pipe->base);
else {
struct lima_sched_task *task = pipe->current_task;
diff --git a/drivers/gpu/drm/lima/lima_sched.h b/drivers/gpu/drm/lima/lima_sched.h
index 928af91c1118..1d814fecbcc0 100644
--- a/drivers/gpu/drm/lima/lima_sched.h
+++ b/drivers/gpu/drm/lima/lima_sched.h
@@ -68,8 +68,6 @@ struct lima_sched_pipe {
void (*task_fini)(struct lima_sched_pipe *pipe);
void (*task_error)(struct lima_sched_pipe *pipe);
void (*task_mmu_error)(struct lima_sched_pipe *pipe);
-
- struct work_struct error_work;
};
int lima_sched_task_init(struct lima_sched_task *task,
diff --git a/drivers/gpu/drm/lima/lima_vm.c b/drivers/gpu/drm/lima/lima_vm.c
index 19e88ca16527..840e2350d872 100644
--- a/drivers/gpu/drm/lima/lima_vm.c
+++ b/drivers/gpu/drm/lima/lima_vm.c
@@ -6,7 +6,7 @@
#include "lima_device.h"
#include "lima_vm.h"
-#include "lima_object.h"
+#include "lima_gem.h"
#include "lima_regs.h"
struct lima_bo_va {
@@ -32,7 +32,7 @@ struct lima_bo_va {
#define LIMA_BTE(va) ((va & LIMA_VM_BT_MASK) >> LIMA_VM_BT_SHIFT)
-static void lima_vm_unmap_page_table(struct lima_vm *vm, u32 start, u32 end)
+static void lima_vm_unmap_range(struct lima_vm *vm, u32 start, u32 end)
{
u32 addr;
@@ -44,41 +44,32 @@ static void lima_vm_unmap_page_table(struct lima_vm *vm, u32 start, u32 end)
}
}
-static int lima_vm_map_page_table(struct lima_vm *vm, dma_addr_t *dma,
- u32 start, u32 end)
+static int lima_vm_map_page(struct lima_vm *vm, dma_addr_t pa, u32 va)
{
- u64 addr;
- int i = 0;
-
- for (addr = start; addr <= end; addr += LIMA_PAGE_SIZE) {
- u32 pbe = LIMA_PBE(addr);
- u32 bte = LIMA_BTE(addr);
-
- if (!vm->bts[pbe].cpu) {
- dma_addr_t pts;
- u32 *pd;
- int j;
-
- vm->bts[pbe].cpu = dma_alloc_wc(
- vm->dev->dev, LIMA_PAGE_SIZE << LIMA_VM_NUM_PT_PER_BT_SHIFT,
- &vm->bts[pbe].dma, GFP_KERNEL | __GFP_ZERO);
- if (!vm->bts[pbe].cpu) {
- if (addr != start)
- lima_vm_unmap_page_table(vm, start, addr - 1);
- return -ENOMEM;
- }
-
- pts = vm->bts[pbe].dma;
- pd = vm->pd.cpu + (pbe << LIMA_VM_NUM_PT_PER_BT_SHIFT);
- for (j = 0; j < LIMA_VM_NUM_PT_PER_BT; j++) {
- pd[j] = pts | LIMA_VM_FLAG_PRESENT;
- pts += LIMA_PAGE_SIZE;
- }
+ u32 pbe = LIMA_PBE(va);
+ u32 bte = LIMA_BTE(va);
+
+ if (!vm->bts[pbe].cpu) {
+ dma_addr_t pts;
+ u32 *pd;
+ int j;
+
+ vm->bts[pbe].cpu = dma_alloc_wc(
+ vm->dev->dev, LIMA_PAGE_SIZE << LIMA_VM_NUM_PT_PER_BT_SHIFT,
+ &vm->bts[pbe].dma, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
+ if (!vm->bts[pbe].cpu)
+ return -ENOMEM;
+
+ pts = vm->bts[pbe].dma;
+ pd = vm->pd.cpu + (pbe << LIMA_VM_NUM_PT_PER_BT_SHIFT);
+ for (j = 0; j < LIMA_VM_NUM_PT_PER_BT; j++) {
+ pd[j] = pts | LIMA_VM_FLAG_PRESENT;
+ pts += LIMA_PAGE_SIZE;
}
-
- vm->bts[pbe].cpu[bte] = dma[i++] | LIMA_VM_FLAGS_CACHE;
}
+ vm->bts[pbe].cpu[bte] = pa | LIMA_VM_FLAGS_CACHE;
+
return 0;
}
@@ -100,7 +91,8 @@ lima_vm_bo_find(struct lima_vm *vm, struct lima_bo *bo)
int lima_vm_bo_add(struct lima_vm *vm, struct lima_bo *bo, bool create)
{
struct lima_bo_va *bo_va;
- int err;
+ struct sg_dma_page_iter sg_iter;
+ int offset = 0, err;
mutex_lock(&bo->lock);
@@ -128,14 +120,18 @@ int lima_vm_bo_add(struct lima_vm *vm, struct lima_bo *bo, bool create)
mutex_lock(&vm->lock);
- err = drm_mm_insert_node(&vm->mm, &bo_va->node, bo->gem.size);
+ err = drm_mm_insert_node(&vm->mm, &bo_va->node, lima_bo_size(bo));
if (err)
goto err_out1;
- err = lima_vm_map_page_table(vm, bo->pages_dma_addr, bo_va->node.start,
- bo_va->node.start + bo_va->node.size - 1);
- if (err)
- goto err_out2;
+ for_each_sg_dma_page(bo->base.sgt->sgl, &sg_iter, bo->base.sgt->nents, 0) {
+ err = lima_vm_map_page(vm, sg_page_iter_dma_address(&sg_iter),
+ bo_va->node.start + offset);
+ if (err)
+ goto err_out2;
+
+ offset += PAGE_SIZE;
+ }
mutex_unlock(&vm->lock);
@@ -145,6 +141,8 @@ int lima_vm_bo_add(struct lima_vm *vm, struct lima_bo *bo, bool create)
return 0;
err_out2:
+ if (offset)
+ lima_vm_unmap_range(vm, bo_va->node.start, bo_va->node.start + offset - 1);
drm_mm_remove_node(&bo_va->node);
err_out1:
mutex_unlock(&vm->lock);
@@ -168,8 +166,8 @@ void lima_vm_bo_del(struct lima_vm *vm, struct lima_bo *bo)
mutex_lock(&vm->lock);
- lima_vm_unmap_page_table(vm, bo_va->node.start,
- bo_va->node.start + bo_va->node.size - 1);
+ lima_vm_unmap_range(vm, bo_va->node.start,
+ bo_va->node.start + bo_va->node.size - 1);
drm_mm_remove_node(&bo_va->node);
@@ -210,14 +208,13 @@ struct lima_vm *lima_vm_create(struct lima_device *dev)
kref_init(&vm->refcount);
vm->pd.cpu = dma_alloc_wc(dev->dev, LIMA_PAGE_SIZE, &vm->pd.dma,
- GFP_KERNEL | __GFP_ZERO);
+ GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
if (!vm->pd.cpu)
goto err_out0;
if (dev->dlbu_cpu) {
- int err = lima_vm_map_page_table(
- vm, &dev->dlbu_dma, LIMA_VA_RESERVE_DLBU,
- LIMA_VA_RESERVE_DLBU + LIMA_PAGE_SIZE - 1);
+ int err = lima_vm_map_page(
+ vm, dev->dlbu_dma, LIMA_VA_RESERVE_DLBU);
if (err)
goto err_out1;
}
diff --git a/drivers/gpu/drm/lima/lima_vm.h b/drivers/gpu/drm/lima/lima_vm.h
index caee2f8a29b4..e0bdedcf14dd 100644
--- a/drivers/gpu/drm/lima/lima_vm.h
+++ b/drivers/gpu/drm/lima/lima_vm.h
@@ -15,9 +15,9 @@
#define LIMA_VM_NUM_PT_PER_BT (1 << LIMA_VM_NUM_PT_PER_BT_SHIFT)
#define LIMA_VM_NUM_BT (LIMA_PAGE_ENT_NUM >> LIMA_VM_NUM_PT_PER_BT_SHIFT)
-#define LIMA_VA_RESERVE_START 0xFFF00000
+#define LIMA_VA_RESERVE_START 0x0FFF00000ULL
#define LIMA_VA_RESERVE_DLBU LIMA_VA_RESERVE_START
-#define LIMA_VA_RESERVE_END 0x100000000
+#define LIMA_VA_RESERVE_END 0x100000000ULL
struct lima_device;
diff --git a/drivers/gpu/drm/mcde/mcde_display.c b/drivers/gpu/drm/mcde/mcde_display.c
index 751454ae3cd1..e59907e68854 100644
--- a/drivers/gpu/drm/mcde/mcde_display.c
+++ b/drivers/gpu/drm/mcde/mcde_display.c
@@ -498,24 +498,20 @@ static void mcde_configure_channel(struct mcde *mcde, enum mcde_channel ch,
}
/* Set up channel 0 sync (based on chnl_update_registers()) */
- if (mcde->te_sync) {
- /*
- * Turn on hardware TE0 synchronization
- */
+ if (mcde->video_mode || mcde->te_sync)
val = MCDE_CHNLXSYNCHMOD_SRC_SYNCH_HARDWARE
<< MCDE_CHNLXSYNCHMOD_SRC_SYNCH_SHIFT;
- val |= MCDE_CHNLXSYNCHMOD_OUT_SYNCH_SRC_TE0
- << MCDE_CHNLXSYNCHMOD_OUT_SYNCH_SRC_SHIFT;
- } else {
- /*
- * Set up sync source to software, out sync formatter
- * Code mostly from mcde_hw.c chnl_update_registers()
- */
+ else
val = MCDE_CHNLXSYNCHMOD_SRC_SYNCH_SOFTWARE
<< MCDE_CHNLXSYNCHMOD_SRC_SYNCH_SHIFT;
+
+ if (mcde->te_sync)
+ val |= MCDE_CHNLXSYNCHMOD_OUT_SYNCH_SRC_TE0
+ << MCDE_CHNLXSYNCHMOD_OUT_SYNCH_SRC_SHIFT;
+ else
val |= MCDE_CHNLXSYNCHMOD_OUT_SYNCH_SRC_FORMATTER
<< MCDE_CHNLXSYNCHMOD_OUT_SYNCH_SRC_SHIFT;
- }
+
writel(val, mcde->regs + sync);
/* Set up pixels per line and lines per frame */
@@ -934,10 +930,17 @@ static void mcde_display_enable(struct drm_simple_display_pipe *pipe,
val = readl(mcde->regs + MCDE_CRC);
val |= MCDE_CRC_SYCEN0;
writel(val, mcde->regs + MCDE_CRC);
-
- drm_crtc_vblank_on(crtc);
}
+ drm_crtc_vblank_on(crtc);
+
+ if (mcde->video_mode)
+ /*
+ * Keep FIFO permanently enabled in video mode,
+ * otherwise MCDE will stop feeding data to the panel.
+ */
+ mcde_enable_fifo(mcde, MCDE_FIFO_A);
+
dev_info(drm->dev, "MCDE display is enabled\n");
}
@@ -946,13 +949,22 @@ static void mcde_display_disable(struct drm_simple_display_pipe *pipe)
struct drm_crtc *crtc = &pipe->crtc;
struct drm_device *drm = crtc->dev;
struct mcde *mcde = drm->dev_private;
+ struct drm_pending_vblank_event *event;
- if (mcde->te_sync)
- drm_crtc_vblank_off(crtc);
+ drm_crtc_vblank_off(crtc);
/* Disable FIFO A flow */
mcde_disable_fifo(mcde, MCDE_FIFO_A, true);
+ event = crtc->state->event;
+ if (event) {
+ crtc->state->event = NULL;
+
+ spin_lock_irq(&crtc->dev->event_lock);
+ drm_crtc_send_vblank_event(crtc, event);
+ spin_unlock_irq(&crtc->dev->event_lock);
+ }
+
dev_info(drm->dev, "MCDE display is disabled\n");
}
@@ -1048,8 +1060,9 @@ static void mcde_display_update(struct drm_simple_display_pipe *pipe,
*/
if (fb) {
mcde_set_extsrc(mcde, drm_fb_cma_get_gem_addr(fb, pstate, 0));
- /* Send a single frame using software sync */
- mcde_display_send_one_frame(mcde);
+ if (!mcde->video_mode)
+ /* Send a single frame using software sync */
+ mcde_display_send_one_frame(mcde);
dev_info_once(mcde->dev, "sent first display update\n");
} else {
/*
@@ -1097,6 +1110,8 @@ static struct drm_simple_display_pipe_funcs mcde_display_funcs = {
.enable = mcde_display_enable,
.disable = mcde_display_disable,
.update = mcde_display_update,
+ .enable_vblank = mcde_display_enable_vblank,
+ .disable_vblank = mcde_display_disable_vblank,
.prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
};
@@ -1123,12 +1138,6 @@ int mcde_display_init(struct drm_device *drm)
DRM_FORMAT_YUV422,
};
- /* Provide vblank only when we have TE enabled */
- if (mcde->te_sync) {
- mcde_display_funcs.enable_vblank = mcde_display_enable_vblank;
- mcde_display_funcs.disable_vblank = mcde_display_disable_vblank;
- }
-
ret = drm_simple_display_pipe_init(drm, &mcde->pipe,
&mcde_display_funcs,
formats, ARRAY_SIZE(formats),
diff --git a/drivers/gpu/drm/mcde/mcde_drm.h b/drivers/gpu/drm/mcde/mcde_drm.h
index dab4db021231..80edd6628979 100644
--- a/drivers/gpu/drm/mcde/mcde_drm.h
+++ b/drivers/gpu/drm/mcde/mcde_drm.h
@@ -19,6 +19,7 @@ struct mcde {
struct mipi_dsi_device *mdsi;
s16 stride;
bool te_sync;
+ bool video_mode;
bool oneshot_mode;
unsigned int flow_active;
spinlock_t flow_lock; /* Locks the channel flow control */
diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c
index baf63fb6850a..9008ddcfc528 100644
--- a/drivers/gpu/drm/mcde/mcde_drv.c
+++ b/drivers/gpu/drm/mcde/mcde_drv.c
@@ -179,18 +179,10 @@ static int mcde_modeset_init(struct drm_device *drm)
mode_config->min_height = 1;
mode_config->max_height = 1080;
- /*
- * Currently we only support vblank handling on the DSI bridge, using
- * TE synchronization. If TE sync is not set up, it is still possible
- * to push out a single update on demand, but this is hard for DRM to
- * exploit.
- */
- if (mcde->te_sync) {
- ret = drm_vblank_init(drm, 1);
- if (ret) {
- dev_err(drm->dev, "failed to init vblank\n");
- goto out_config;
- }
+ ret = drm_vblank_init(drm, 1);
+ if (ret) {
+ dev_err(drm->dev, "failed to init vblank\n");
+ goto out_config;
}
ret = mcde_display_init(drm);
@@ -237,7 +229,7 @@ DEFINE_DRM_GEM_CMA_FOPS(drm_fops);
static struct drm_driver mcde_drm_driver = {
.driver_features =
- DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_ATOMIC,
+ DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.release = mcde_release,
.lastclose = drm_fb_helper_lastclose,
.ioctls = NULL,
@@ -254,8 +246,6 @@ static struct drm_driver mcde_drm_driver = {
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_import = drm_gem_prime_import,
- .gem_prime_export = drm_gem_prime_export,
.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
.gem_prime_vmap = drm_gem_cma_prime_vmap,
@@ -319,7 +309,7 @@ static int mcde_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct drm_device *drm;
struct mcde *mcde;
- struct component_match *match;
+ struct component_match *match = NULL;
struct resource *res;
u32 pid;
u32 val;
@@ -341,8 +331,6 @@ static int mcde_probe(struct platform_device *pdev)
drm->dev_private = mcde;
platform_set_drvdata(pdev, drm);
- /* Enable use of the TE signal and interrupt */
- mcde->te_sync = true;
/* Enable continuous updates: this is what Linux' framebuffer expects */
mcde->oneshot_mode = false;
drm->dev_private = mcde;
@@ -477,14 +465,18 @@ static int mcde_probe(struct platform_device *pdev)
struct device_driver *drv = &mcde_component_drivers[i]->driver;
struct device *p = NULL, *d;
- while ((d = bus_find_device(&platform_bus_type, p, drv,
- (void *)platform_bus_type.match))) {
+ while ((d = platform_find_device_by_driver(p, drv))) {
put_device(p);
component_match_add(dev, &match, mcde_compare_dev, d);
p = d;
}
put_device(p);
}
+ if (!match) {
+ dev_err(dev, "no matching components\n");
+ ret = -ENODEV;
+ goto clk_disable;
+ }
if (IS_ERR(match)) {
dev_err(dev, "could not create component match\n");
ret = PTR_ERR(match);
diff --git a/drivers/gpu/drm/mcde/mcde_dsi.c b/drivers/gpu/drm/mcde/mcde_dsi.c
index 07f7090d08b3..bb6528b01cd0 100644
--- a/drivers/gpu/drm/mcde/mcde_dsi.c
+++ b/drivers/gpu/drm/mcde/mcde_dsi.c
@@ -39,7 +39,6 @@ struct mcde_dsi {
struct device *dev;
struct mcde *mcde;
struct drm_bridge bridge;
- struct drm_connector connector;
struct drm_panel *panel;
struct drm_bridge *bridge_out;
struct mipi_dsi_host dsi_host;
@@ -64,11 +63,6 @@ static inline struct mcde_dsi *host_to_mcde_dsi(struct mipi_dsi_host *h)
return container_of(h, struct mcde_dsi, dsi_host);
}
-static inline struct mcde_dsi *connector_to_mcde_dsi(struct drm_connector *c)
-{
- return container_of(c, struct mcde_dsi, connector);
-}
-
bool mcde_dsi_irq(struct mipi_dsi_device *mdsi)
{
struct mcde_dsi *d;
@@ -124,12 +118,41 @@ bool mcde_dsi_irq(struct mipi_dsi_device *mdsi)
val = readl(d->regs + DSI_VID_MODE_STS_FLAG);
if (val)
- dev_err(d->dev, "some video mode error status\n");
+ dev_dbg(d->dev, "DSI_VID_MODE_STS_FLAG = %08x\n", val);
+ if (val & DSI_VID_MODE_STS_VSG_RUNNING)
+ dev_dbg(d->dev, "VID mode VSG running\n");
+ if (val & DSI_VID_MODE_STS_ERR_MISSING_DATA)
+ dev_err(d->dev, "VID mode missing data\n");
+ if (val & DSI_VID_MODE_STS_ERR_MISSING_HSYNC)
+ dev_err(d->dev, "VID mode missing HSYNC\n");
+ if (val & DSI_VID_MODE_STS_ERR_MISSING_VSYNC)
+ dev_err(d->dev, "VID mode missing VSYNC\n");
+ if (val & DSI_VID_MODE_STS_REG_ERR_SMALL_LENGTH)
+ dev_err(d->dev, "VID mode less bytes than expected between two HSYNC\n");
+ if (val & DSI_VID_MODE_STS_REG_ERR_SMALL_HEIGHT)
+ dev_err(d->dev, "VID mode less lines than expected between two VSYNC\n");
+ if (val & (DSI_VID_MODE_STS_ERR_BURSTWRITE |
+ DSI_VID_MODE_STS_ERR_LINEWRITE |
+ DSI_VID_MODE_STS_ERR_LONGREAD))
+ dev_err(d->dev, "VID mode read/write error\n");
+ if (val & DSI_VID_MODE_STS_ERR_VRS_WRONG_LENGTH)
+ dev_err(d->dev, "VID mode received packets differ from expected size\n");
+ if (val & DSI_VID_MODE_STS_VSG_RECOVERY)
+ dev_err(d->dev, "VID mode VSG in recovery mode\n");
writel(val, d->regs + DSI_VID_MODE_STS_CLR);
return te_received;
}
+static void mcde_dsi_attach_to_mcde(struct mcde_dsi *d)
+{
+ d->mcde->mdsi = d->mdsi;
+
+ d->mcde->video_mode = !!(d->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO);
+ /* Enable use of the TE signal for all command mode panels */
+ d->mcde->te_sync = !d->mcde->video_mode;
+}
+
static int mcde_dsi_host_attach(struct mipi_dsi_host *host,
struct mipi_dsi_device *mdsi)
{
@@ -148,7 +171,7 @@ static int mcde_dsi_host_attach(struct mipi_dsi_host *host,
d->mdsi = mdsi;
if (d->mcde)
- d->mcde->mdsi = mdsi;
+ mcde_dsi_attach_to_mcde(d);
return 0;
}
@@ -178,22 +201,26 @@ static ssize_t mcde_dsi_host_transfer(struct mipi_dsi_host *host,
const u32 loop_delay_us = 10; /* us */
const u8 *tx = msg->tx_buf;
u32 loop_counter;
- size_t txlen;
+ size_t txlen = msg->tx_len;
+ size_t rxlen = msg->rx_len;
u32 val;
int ret;
int i;
- txlen = msg->tx_len;
- if (txlen > 12) {
+ if (txlen > 16) {
dev_err(d->dev,
- "dunno how to write more than 12 bytes yet\n");
+ "dunno how to write more than 16 bytes yet\n");
+ return -EIO;
+ }
+ if (rxlen > 4) {
+ dev_err(d->dev,
+ "dunno how to read more than 4 bytes yet\n");
return -EIO;
}
dev_dbg(d->dev,
- "message to channel %d, %zd bytes",
- msg->channel,
- txlen);
+ "message to channel %d, write %zd bytes read %zd bytes\n",
+ msg->channel, txlen, rxlen);
/* Command "nature" */
if (MCDE_DSI_HOST_IS_READ(msg->type))
@@ -210,9 +237,7 @@ static ssize_t mcde_dsi_host_transfer(struct mipi_dsi_host *host,
if (mipi_dsi_packet_format_is_long(msg->type))
val |= DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LONGNOTSHORT;
val |= 0 << DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_ID_SHIFT;
- /* Add one to the length for the MIPI DCS command */
- val |= txlen
- << DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_SIZE_SHIFT;
+ val |= txlen << DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_SIZE_SHIFT;
val |= DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LP_EN;
val |= msg->type << DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_SHIFT;
writel(val, d->regs + DSI_DIRECT_CMD_MAIN_SETTINGS);
@@ -221,25 +246,25 @@ static ssize_t mcde_dsi_host_transfer(struct mipi_dsi_host *host,
if (txlen > 0) {
val = 0;
for (i = 0; i < 4 && i < txlen; i++)
- val |= tx[i] << (i & 3) * 8;
+ val |= tx[i] << (i * 8);
}
writel(val, d->regs + DSI_DIRECT_CMD_WRDAT0);
if (txlen > 4) {
val = 0;
for (i = 0; i < 4 && (i + 4) < txlen; i++)
- val |= tx[i + 4] << (i & 3) * 8;
+ val |= tx[i + 4] << (i * 8);
writel(val, d->regs + DSI_DIRECT_CMD_WRDAT1);
}
if (txlen > 8) {
val = 0;
for (i = 0; i < 4 && (i + 8) < txlen; i++)
- val |= tx[i + 8] << (i & 3) * 8;
+ val |= tx[i + 8] << (i * 8);
writel(val, d->regs + DSI_DIRECT_CMD_WRDAT2);
}
if (txlen > 12) {
val = 0;
for (i = 0; i < 4 && (i + 12) < txlen; i++)
- val |= tx[i + 12] << (i & 3) * 8;
+ val |= tx[i + 12] << (i * 8);
writel(val, d->regs + DSI_DIRECT_CMD_WRDAT3);
}
@@ -249,17 +274,36 @@ static ssize_t mcde_dsi_host_transfer(struct mipi_dsi_host *host,
writel(1, d->regs + DSI_DIRECT_CMD_SEND);
loop_counter = 1000 * 1000 / loop_delay_us;
- while (!(readl(d->regs + DSI_DIRECT_CMD_STS) &
- DSI_DIRECT_CMD_STS_WRITE_COMPLETED)
- && --loop_counter)
- usleep_range(loop_delay_us, (loop_delay_us * 3) / 2);
-
- if (!loop_counter) {
- dev_err(d->dev, "DSI write timeout!\n");
- return -ETIME;
+ if (MCDE_DSI_HOST_IS_READ(msg->type)) {
+ /* Read command */
+ while (!(readl(d->regs + DSI_DIRECT_CMD_STS) &
+ (DSI_DIRECT_CMD_STS_READ_COMPLETED |
+ DSI_DIRECT_CMD_STS_READ_COMPLETED_WITH_ERR))
+ && --loop_counter)
+ usleep_range(loop_delay_us, (loop_delay_us * 3) / 2);
+ if (!loop_counter) {
+ dev_err(d->dev, "DSI read timeout!\n");
+ return -ETIME;
+ }
+ } else {
+ /* Writing only */
+ while (!(readl(d->regs + DSI_DIRECT_CMD_STS) &
+ DSI_DIRECT_CMD_STS_WRITE_COMPLETED)
+ && --loop_counter)
+ usleep_range(loop_delay_us, (loop_delay_us * 3) / 2);
+
+ if (!loop_counter) {
+ dev_err(d->dev, "DSI write timeout!\n");
+ return -ETIME;
+ }
}
val = readl(d->regs + DSI_DIRECT_CMD_STS);
+ if (val & DSI_DIRECT_CMD_STS_READ_COMPLETED_WITH_ERR) {
+ dev_err(d->dev, "read completed with error\n");
+ writel(1, d->regs + DSI_DIRECT_CMD_RD_INIT);
+ return -EIO;
+ }
if (val & DSI_DIRECT_CMD_STS_ACKNOWLEDGE_WITH_ERR_RECEIVED) {
val >>= DSI_DIRECT_CMD_STS_ACK_VAL_SHIFT;
dev_err(d->dev, "error during transmission: %04x\n",
@@ -269,10 +313,7 @@ static ssize_t mcde_dsi_host_transfer(struct mipi_dsi_host *host,
if (!MCDE_DSI_HOST_IS_READ(msg->type)) {
/* Return number of bytes written */
- if (mipi_dsi_packet_format_is_long(msg->type))
- ret = 4 + txlen;
- else
- ret = 4;
+ ret = txlen;
} else {
/* OK this is a read command, get the response */
u32 rdsz;
@@ -282,7 +323,13 @@ static ssize_t mcde_dsi_host_transfer(struct mipi_dsi_host *host,
rdsz = readl(d->regs + DSI_DIRECT_CMD_RD_PROPERTY);
rdsz &= DSI_DIRECT_CMD_RD_PROPERTY_RD_SIZE_MASK;
rddat = readl(d->regs + DSI_DIRECT_CMD_RDDAT);
- for (i = 0; i < 4 && i < rdsz; i++)
+ if (rdsz < rxlen) {
+ dev_err(d->dev, "read error, requested %zd got %d\n",
+ rxlen, rdsz);
+ return -EIO;
+ }
+ /* FIXME: read more than 4 bytes */
+ for (i = 0; i < 4 && i < rxlen; i++)
rx[i] = (rddat >> (i * 8)) & 0xff;
ret = rdsz;
}
@@ -312,7 +359,7 @@ void mcde_dsi_te_request(struct mipi_dsi_device *mdsi)
val |= 0 << DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_ID_SHIFT;
val |= 2 << DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_SIZE_SHIFT;
val |= DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LP_EN;
- val |= DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_DCS_SHORT_WRITE_1 <<
+ val |= MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM <<
DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_SHIFT;
writel(val, d->regs + DSI_DIRECT_CMD_MAIN_SETTINGS);
@@ -341,13 +388,14 @@ void mcde_dsi_te_request(struct mipi_dsi_device *mdsi)
static void mcde_dsi_setup_video_mode(struct mcde_dsi *d,
const struct drm_display_mode *mode)
{
- u8 bpp = mipi_dsi_pixel_format_to_bpp(d->mdsi->format);
+ /* cpp, characters per pixel, number of bytes per pixel */
+ u8 cpp = mipi_dsi_pixel_format_to_bpp(d->mdsi->format) / 8;
+ u64 pclk;
u64 bpl;
- u32 hfp;
- u32 hbp;
- u32 hsa;
+ int hfp;
+ int hbp;
+ int hsa;
u32 blkline_pck, line_duration;
- u32 blkeol_pck, blkeol_duration;
u32 val;
val = 0;
@@ -384,11 +432,21 @@ static void mcde_dsi_setup_video_mode(struct mcde_dsi *d,
return;
}
- /* TODO: TVG could be enabled here */
+ /* TODO: TVG (test video generator) could be enabled here */
- /* Send blanking packet */
+ /*
+ * During vertical blanking: go to LP mode
+ * Like with the EOL setting, if this is not set, the EOL area will be
+ * filled with NULL or blanking packets in the vblank area.
+ * FIXME: some Samsung phones and display panels such as s6e63m0 use
+ * DSI_VID_MAIN_CTL_REG_BLKLINE_MODE_BLANKING here instead,
+ * figure out how to properly configure that from the panel.
+ */
val |= DSI_VID_MAIN_CTL_REG_BLKLINE_MODE_LP_0;
- /* Send EOL packet */
+ /*
+ * During EOL: go to LP mode. If this is not set, the EOL area will be
+ * filled with NULL or blanking packets.
+ */
val |= DSI_VID_MAIN_CTL_REG_BLKEOL_MODE_LP_0;
/* Recovery mode 1 */
val |= 1 << DSI_VID_MAIN_CTL_RECOVERY_MODE_SHIFT;
@@ -396,13 +454,13 @@ static void mcde_dsi_setup_video_mode(struct mcde_dsi *d,
writel(val, d->regs + DSI_VID_MAIN_CTL);
/* Vertical frame parameters are pretty straight-forward */
- val = mode->vdisplay << DSI_VID_VSIZE_VSA_LENGTH_SHIFT;
+ val = mode->vdisplay << DSI_VID_VSIZE_VACT_LENGTH_SHIFT;
/* vertical front porch */
val |= (mode->vsync_start - mode->vdisplay)
<< DSI_VID_VSIZE_VFP_LENGTH_SHIFT;
/* vertical sync active */
val |= (mode->vsync_end - mode->vsync_start)
- << DSI_VID_VSIZE_VACT_LENGTH_SHIFT;
+ << DSI_VID_VSIZE_VSA_LENGTH_SHIFT;
/* vertical back porch */
val |= (mode->vtotal - mode->vsync_end)
<< DSI_VID_VSIZE_VBP_LENGTH_SHIFT;
@@ -410,36 +468,54 @@ static void mcde_dsi_setup_video_mode(struct mcde_dsi *d,
/*
* Horizontal frame parameters:
- * horizontal resolution is given in pixels and must be re-calculated
- * into bytes since this is what the hardware expects.
+ * horizontal resolution is given in pixels but must be re-calculated
+ * into bytes since this is what the hardware expects, these registers
+ * define the payload size of the packet.
+ *
+ * hfp = horizontal front porch in bytes
+ * hbp = horizontal back porch in bytes
+ * hsa = horizontal sync active in bytes
*
* 6 + 2 is HFP header + checksum
*/
- hfp = (mode->hsync_start - mode->hdisplay) * bpp - 6 - 2;
+ hfp = (mode->hsync_start - mode->hdisplay) * cpp - 6 - 2;
if (d->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) {
/*
+ * Use sync pulse for sync: explicit HSA time
* 6 is HBP header + checksum
* 4 is RGB header + checksum
*/
- hbp = (mode->htotal - mode->hsync_end) * bpp - 4 - 6;
+ hbp = (mode->htotal - mode->hsync_end) * cpp - 4 - 6;
/*
* 6 is HBP header + checksum
* 4 is HSW packet bytes
* 4 is RGB header + checksum
*/
- hsa = (mode->hsync_end - mode->hsync_start) * bpp - 4 - 4 - 6;
+ hsa = (mode->hsync_end - mode->hsync_start) * cpp - 4 - 4 - 6;
} else {
/*
- * HBP includes both back porch and sync
+ * Use event for sync: HBP includes both back porch and sync
* 6 is HBP header + checksum
* 4 is HSW packet bytes
* 4 is RGB header + checksum
*/
- hbp = (mode->htotal - mode->hsync_start) * bpp - 4 - 4 - 6;
- /* HSA is not considered in this mode and set to 0 */
+ hbp = (mode->htotal - mode->hsync_start) * cpp - 4 - 4 - 6;
+ /* HSA is not present in this mode and set to 0 */
hsa = 0;
}
- dev_dbg(d->dev, "hfp: %u, hbp: %u, hsa: %u\n",
+ if (hfp < 0) {
+ dev_info(d->dev, "hfp negative, set to 0\n");
+ hfp = 0;
+ }
+ if (hbp < 0) {
+ dev_info(d->dev, "hbp negative, set to 0\n");
+ hbp = 0;
+ }
+ if (hsa < 0) {
+ dev_info(d->dev, "hsa negative, set to 0\n");
+ hsa = 0;
+ }
+ dev_dbg(d->dev, "hfp: %u, hbp: %u, hsa: %u bytes\n",
hfp, hbp, hsa);
/* Frame parameters: horizontal sync active */
@@ -450,91 +526,185 @@ static void mcde_dsi_setup_video_mode(struct mcde_dsi *d,
val |= hfp << DSI_VID_HSIZE1_HFP_LENGTH_SHIFT;
writel(val, d->regs + DSI_VID_HSIZE1);
- /* RGB data length (bytes on one scanline) */
- val = mode->hdisplay * (bpp / 8);
+ /* RGB data length (visible bytes on one scanline) */
+ val = mode->hdisplay * cpp;
writel(val, d->regs + DSI_VID_HSIZE2);
+ dev_dbg(d->dev, "RGB length, visible area on a line: %u bytes\n", val);
- /* TODO: further adjustments for TVG mode here */
+ /*
+ * Calculate the time between two pixels in picoseconds using
+ * the supplied refresh rate and total resolution including
+ * porches and sync.
+ */
+ /* (ps/s) / (pixels/s) = ps/pixels */
+ pclk = DIV_ROUND_UP_ULL(1000000000000,
+ (mode->vrefresh * mode->htotal * mode->vtotal));
+ dev_dbg(d->dev, "picoseconds between two pixels: %llu\n",
+ pclk);
/*
- * EOL packet length from bits per line calculations: pixel clock
- * is given in kHz, calculate the time between two pixels in
- * picoseconds.
+ * How many bytes per line will this update frequency yield?
+ *
+ * Calculate the number of picoseconds for one scanline (1), then
+ * divide by 1000000000000 (2) to get in pixels per second we
+ * want to output.
+ *
+ * Multiply with number of bytes per second at this video display
+ * frequency (3) to get number of bytes transferred during this
+ * time. Notice that we use the frequency the display wants,
+ * not what we actually get from the DSI PLL, which is hs_freq.
+ *
+ * These arithmetics are done in a different order to avoid
+ * overflow.
*/
- bpl = mode->clock * mode->htotal;
- bpl *= (d->hs_freq / 8);
- do_div(bpl, 1000000); /* microseconds */
- do_div(bpl, 1000000); /* seconds */
+ bpl = pclk * mode->htotal; /* (1) picoseconds per line */
+ dev_dbg(d->dev, "picoseconds per line: %llu\n", bpl);
+ /* Multiply with bytes per second (3) */
+ bpl *= (d->mdsi->hs_rate / 8);
+ /* Pixels per second (2) */
+ bpl = DIV_ROUND_DOWN_ULL(bpl, 1000000); /* microseconds */
+ bpl = DIV_ROUND_DOWN_ULL(bpl, 1000000); /* seconds */
+ /* parallel transactions in all lanes */
bpl *= d->mdsi->lanes;
- dev_dbg(d->dev, "calculated bytes per line: %llu\n", bpl);
+ dev_dbg(d->dev,
+ "calculated bytes per line: %llu @ %d Hz with HS %lu Hz\n",
+ bpl, mode->vrefresh, d->mdsi->hs_rate);
+
/*
* 6 is header + checksum, header = 4 bytes, checksum = 2 bytes
* 4 is short packet for vsync/hsync
*/
if (d->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) {
- /* Fixme: isn't the hsync width in pixels? */
+ /* Set the event packet size to 0 (not used) */
+ writel(0, d->regs + DSI_VID_BLKSIZE1);
+ /*
+ * FIXME: isn't the hsync width in pixels? The porch and
+ * sync area size is in pixels here, but this -6
+ * seems to be for bytes. It looks like this in the vendor
+ * code though. Is it completely untested?
+ */
blkline_pck = bpl - (mode->hsync_end - mode->hsync_start) - 6;
val = blkline_pck << DSI_VID_BLKSIZE2_BLKLINE_PULSE_PCK_SHIFT;
writel(val, d->regs + DSI_VID_BLKSIZE2);
} else {
+ /* Set the sync pulse packet size to 0 (not used) */
+ writel(0, d->regs + DSI_VID_BLKSIZE2);
+ /* Specifying payload size in bytes (-4-6 from manual) */
blkline_pck = bpl - 4 - 6;
+ if (blkline_pck > 0x1FFF)
+ dev_err(d->dev, "blkline_pck too big %d bytes\n",
+ blkline_pck);
val = blkline_pck << DSI_VID_BLKSIZE1_BLKLINE_EVENT_PCK_SHIFT;
+ val &= DSI_VID_BLKSIZE1_BLKLINE_EVENT_PCK_MASK;
writel(val, d->regs + DSI_VID_BLKSIZE1);
}
- line_duration = (blkline_pck + 6) / d->mdsi->lanes;
- dev_dbg(d->dev, "line duration %u\n", line_duration);
+ /*
+ * The line duration is used to scale back the frequency from
+ * the max frequency supported by the HS clock to the desired
+ * update frequency in vrefresh.
+ */
+ line_duration = blkline_pck + 6;
+ /*
+ * The datasheet contains this complex condition to decreasing
+ * the line duration by 1 under very specific circumstances.
+ * Here we also imply that LP is used during burst EOL.
+ */
+ if (d->mdsi->lanes == 2 && (hsa & 0x01) && (hfp & 0x01)
+ && (d->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST))
+ line_duration--;
+ line_duration = DIV_ROUND_CLOSEST(line_duration, d->mdsi->lanes);
+ dev_dbg(d->dev, "line duration %u bytes\n", line_duration);
val = line_duration << DSI_VID_DPHY_TIME_REG_LINE_DURATION_SHIFT;
/*
* This is the time to perform LP->HS on D-PHY
* FIXME: nowhere to get this from: DT property on the DSI?
+ * The manual says this is "system dependent".
+ * values like 48 and 72 seen in the vendor code.
*/
- val |= 0 << DSI_VID_DPHY_TIME_REG_WAKEUP_TIME_SHIFT;
+ val |= 48 << DSI_VID_DPHY_TIME_REG_WAKEUP_TIME_SHIFT;
writel(val, d->regs + DSI_VID_DPHY_TIME);
- /* Calculate block end of line */
- blkeol_pck = bpl - mode->hdisplay * bpp - 6;
- blkeol_duration = (blkeol_pck + 6) / d->mdsi->lanes;
- dev_dbg(d->dev, "blkeol pck: %u, duration: %u\n",
- blkeol_pck, blkeol_duration);
-
+ /*
+ * See the manual figure 657 page 2203 for understanding the impact
+ * of the different burst mode settings.
+ */
if (d->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) {
- /* Set up EOL clock for burst mode */
+ int blkeol_pck, blkeol_duration;
+ /*
+ * Packet size at EOL for burst mode, this is only used
+ * if DSI_VID_MAIN_CTL_REG_BLKEOL_MODE_LP_0 is NOT set,
+ * but we instead send NULL or blanking packets at EOL.
+ * This is given in number of bytes.
+ *
+ * See the manual page 2198 for the 13 reg_blkeol_pck bits.
+ */
+ blkeol_pck = bpl - (mode->htotal * cpp) - 6;
+ if (blkeol_pck < 0) {
+ dev_err(d->dev, "video block does not fit on line!\n");
+ dev_err(d->dev,
+ "calculated bytes per line: %llu @ %d Hz\n",
+ bpl, mode->vrefresh);
+ dev_err(d->dev,
+ "bytes per line (blkline_pck) %u bytes\n",
+ blkline_pck);
+ dev_err(d->dev,
+ "blkeol_pck becomes %d bytes\n", blkeol_pck);
+ return;
+ }
+ dev_dbg(d->dev, "BLKEOL packet: %d bytes\n", blkeol_pck);
+
val = readl(d->regs + DSI_VID_BLKSIZE1);
+ val &= ~DSI_VID_BLKSIZE1_BLKEOL_PCK_MASK;
val |= blkeol_pck << DSI_VID_BLKSIZE1_BLKEOL_PCK_SHIFT;
writel(val, d->regs + DSI_VID_BLKSIZE1);
- writel(blkeol_pck, d->regs + DSI_VID_VCA_SETTING2);
-
- writel(blkeol_duration, d->regs + DSI_VID_PCK_TIME);
- writel(blkeol_duration - 6, d->regs + DSI_VID_VCA_SETTING1);
+ /* Use the same value for exact burst limit */
+ val = blkeol_pck <<
+ DSI_VID_VCA_SETTING2_EXACT_BURST_LIMIT_SHIFT;
+ val &= DSI_VID_VCA_SETTING2_EXACT_BURST_LIMIT_MASK;
+ writel(val, d->regs + DSI_VID_VCA_SETTING2);
+ /*
+ * This BLKEOL duration is claimed to be the duration in clock
+ * cycles of the BLLP end-of-line (EOL) period for each line if
+ * DSI_VID_MAIN_CTL_REG_BLKEOL_MODE_LP_0 is set.
+ *
+ * It is hard to trust the manuals' claim that this is in clock
+ * cycles as we mimic the behaviour of the vendor code, which
+ * appears to write a number of bytes that would have been
+ * transferred on a single lane.
+ *
+ * See the manual figure 657 page 2203 and page 2198 for the 13
+ * reg_blkeol_duration bits.
+ *
+ * FIXME: should this also be set up also for non-burst mode
+ * according to figure 565 page 2202?
+ */
+ blkeol_duration = DIV_ROUND_CLOSEST(blkeol_pck + 6,
+ d->mdsi->lanes);
+ dev_dbg(d->dev, "BLKEOL duration: %d clock cycles\n",
+ blkeol_duration);
+
+ val = readl(d->regs + DSI_VID_PCK_TIME);
+ val &= ~DSI_VID_PCK_TIME_BLKEOL_DURATION_MASK;
+ val |= blkeol_duration <<
+ DSI_VID_PCK_TIME_BLKEOL_DURATION_SHIFT;
+ writel(val, d->regs + DSI_VID_PCK_TIME);
+
+ /* Max burst limit, this is given in bytes */
+ val = readl(d->regs + DSI_VID_VCA_SETTING1);
+ val &= ~DSI_VID_VCA_SETTING1_MAX_BURST_LIMIT_MASK;
+ val |= (blkeol_pck - 6) <<
+ DSI_VID_VCA_SETTING1_MAX_BURST_LIMIT_SHIFT;
+ writel(val, d->regs + DSI_VID_VCA_SETTING1);
}
/* Maximum line limit */
val = readl(d->regs + DSI_VID_VCA_SETTING2);
- val |= blkline_pck <<
- DSI_VID_VCA_SETTING2_EXACT_BURST_LIMIT_SHIFT;
+ val &= ~DSI_VID_VCA_SETTING2_MAX_LINE_LIMIT_MASK;
+ val |= (blkline_pck - 6) <<
+ DSI_VID_VCA_SETTING2_MAX_LINE_LIMIT_SHIFT;
writel(val, d->regs + DSI_VID_VCA_SETTING2);
-
- /* Put IF1 into video mode */
- val = readl(d->regs + DSI_MCTL_MAIN_DATA_CTL);
- val |= DSI_MCTL_MAIN_DATA_CTL_IF1_MODE;
- writel(val, d->regs + DSI_MCTL_MAIN_DATA_CTL);
-
- /* Disable command mode on IF1 */
- val = readl(d->regs + DSI_CMD_MODE_CTL);
- val &= ~DSI_CMD_MODE_CTL_IF1_LP_EN;
- writel(val, d->regs + DSI_CMD_MODE_CTL);
-
- /* Enable some error interrupts */
- val = readl(d->regs + DSI_VID_MODE_STS_CTL);
- val |= DSI_VID_MODE_STS_CTL_ERR_MISSING_VSYNC;
- val |= DSI_VID_MODE_STS_CTL_ERR_MISSING_DATA;
- writel(val, d->regs + DSI_VID_MODE_STS_CTL);
-
- /* Enable video mode */
- val = readl(d->regs + DSI_MCTL_MAIN_DATA_CTL);
- val |= DSI_MCTL_MAIN_DATA_CTL_VID_EN;
- writel(val, d->regs + DSI_MCTL_MAIN_DATA_CTL);
+ dev_dbg(d->dev, "blkline pck: %d bytes\n", blkline_pck - 6);
}
static void mcde_dsi_start(struct mcde_dsi *d)
@@ -646,30 +816,25 @@ static void mcde_dsi_start(struct mcde_dsi *d)
static void mcde_dsi_bridge_enable(struct drm_bridge *bridge)
{
struct mcde_dsi *d = bridge_to_mcde_dsi(bridge);
+ u32 val;
+
+ if (d->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
+ /* Enable video mode */
+ val = readl(d->regs + DSI_MCTL_MAIN_DATA_CTL);
+ val |= DSI_MCTL_MAIN_DATA_CTL_VID_EN;
+ writel(val, d->regs + DSI_MCTL_MAIN_DATA_CTL);
+ }
dev_info(d->dev, "enable DSI master\n");
};
-static void mcde_dsi_bridge_mode_set(struct drm_bridge *bridge,
- const struct drm_display_mode *mode,
- const struct drm_display_mode *adj)
+static void mcde_dsi_bridge_pre_enable(struct drm_bridge *bridge)
{
struct mcde_dsi *d = bridge_to_mcde_dsi(bridge);
- unsigned long pixel_clock_hz = mode->clock * 1000;
unsigned long hs_freq, lp_freq;
u32 val;
int ret;
- if (!d->mdsi) {
- dev_err(d->dev, "no DSI device attached to encoder!\n");
- return;
- }
-
- dev_info(d->dev, "set DSI master to %dx%d %lu Hz %s mode\n",
- mode->hdisplay, mode->vdisplay, pixel_clock_hz,
- (d->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO) ? "VIDEO" : "CMD"
- );
-
/* Copy maximum clock frequencies */
if (d->mdsi->lp_rate)
lp_freq = d->mdsi->lp_rate;
@@ -708,7 +873,21 @@ static void mcde_dsi_bridge_mode_set(struct drm_bridge *bridge,
d->hs_freq);
if (d->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
- mcde_dsi_setup_video_mode(d, mode);
+ /* Put IF1 into video mode */
+ val = readl(d->regs + DSI_MCTL_MAIN_DATA_CTL);
+ val |= DSI_MCTL_MAIN_DATA_CTL_IF1_MODE;
+ writel(val, d->regs + DSI_MCTL_MAIN_DATA_CTL);
+
+ /* Disable command mode on IF1 */
+ val = readl(d->regs + DSI_CMD_MODE_CTL);
+ val &= ~DSI_CMD_MODE_CTL_IF1_LP_EN;
+ writel(val, d->regs + DSI_CMD_MODE_CTL);
+
+ /* Enable some error interrupts */
+ val = readl(d->regs + DSI_VID_MODE_STS_CTL);
+ val |= DSI_VID_MODE_STS_CTL_ERR_MISSING_VSYNC;
+ val |= DSI_VID_MODE_STS_CTL_ERR_MISSING_DATA;
+ writel(val, d->regs + DSI_VID_MODE_STS_CTL);
} else {
/* Command mode, clear IF1 ID */
val = readl(d->regs + DSI_CMD_MODE_CTL);
@@ -722,6 +901,26 @@ static void mcde_dsi_bridge_mode_set(struct drm_bridge *bridge,
}
}
+static void mcde_dsi_bridge_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adj)
+{
+ struct mcde_dsi *d = bridge_to_mcde_dsi(bridge);
+
+ if (!d->mdsi) {
+ dev_err(d->dev, "no DSI device attached to encoder!\n");
+ return;
+ }
+
+ dev_info(d->dev, "set DSI master to %dx%d %u Hz %s mode\n",
+ mode->hdisplay, mode->vdisplay, mode->clock * 1000,
+ (d->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO) ? "VIDEO" : "CMD"
+ );
+
+ if (d->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO)
+ mcde_dsi_setup_video_mode(d, mode);
+}
+
static void mcde_dsi_wait_for_command_mode_stop(struct mcde_dsi *d)
{
u32 val;
@@ -787,67 +986,23 @@ static void mcde_dsi_bridge_disable(struct drm_bridge *bridge)
clk_disable_unprepare(d->lp_clk);
}
-/*
- * This connector needs no special handling, just use the default
- * helpers for everything. It's pretty dummy.
- */
-static const struct drm_connector_funcs mcde_dsi_connector_funcs = {
- .reset = drm_atomic_helper_connector_reset,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = drm_connector_cleanup,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static int mcde_dsi_get_modes(struct drm_connector *connector)
-{
- struct mcde_dsi *d = connector_to_mcde_dsi(connector);
-
- /* Just pass the question to the panel */
- if (d->panel)
- return drm_panel_get_modes(d->panel);
-
- /* TODO: deal with bridges */
-
- return 0;
-}
-
-static const struct drm_connector_helper_funcs
-mcde_dsi_connector_helper_funcs = {
- .get_modes = mcde_dsi_get_modes,
-};
-
static int mcde_dsi_bridge_attach(struct drm_bridge *bridge)
{
struct mcde_dsi *d = bridge_to_mcde_dsi(bridge);
struct drm_device *drm = bridge->dev;
int ret;
- drm_connector_helper_add(&d->connector,
- &mcde_dsi_connector_helper_funcs);
-
if (!drm_core_check_feature(drm, DRIVER_ATOMIC)) {
dev_err(d->dev, "we need atomic updates\n");
return -ENOTSUPP;
}
- ret = drm_connector_init(drm, &d->connector,
- &mcde_dsi_connector_funcs,
- DRM_MODE_CONNECTOR_DSI);
- if (ret) {
- dev_err(d->dev, "failed to initialize DSI bridge connector\n");
- return ret;
- }
- d->connector.polled = DRM_CONNECTOR_POLL_CONNECT;
- /* The encoder in the bridge attached to the DSI bridge */
- drm_connector_attach_encoder(&d->connector, bridge->encoder);
- /* Then we attach the DSI bridge to the output (panel etc) bridge */
+ /* Attach the DSI bridge to the output (panel etc) bridge */
ret = drm_bridge_attach(bridge->encoder, d->bridge_out, bridge);
if (ret) {
dev_err(d->dev, "failed to attach the DSI bridge\n");
return ret;
}
- d->connector.status = connector_status_connected;
return 0;
}
@@ -857,6 +1012,7 @@ static const struct drm_bridge_funcs mcde_dsi_bridge_funcs = {
.mode_set = mcde_dsi_bridge_mode_set,
.disable = mcde_dsi_bridge_disable,
.enable = mcde_dsi_bridge_enable,
+ .pre_enable = mcde_dsi_bridge_pre_enable,
};
static int mcde_dsi_bind(struct device *dev, struct device *master,
@@ -877,7 +1033,7 @@ static int mcde_dsi_bind(struct device *dev, struct device *master,
d->mcde = mcde;
/* If the display attached before binding, set this up */
if (d->mdsi)
- d->mcde->mdsi = d->mdsi;
+ mcde_dsi_attach_to_mcde(d);
/* Obtain the clocks */
d->hs_clk = devm_clk_get(dev, "hs");
@@ -911,19 +1067,21 @@ static int mcde_dsi_bind(struct device *dev, struct device *master,
for_each_available_child_of_node(dev->of_node, child) {
panel = of_drm_find_panel(child);
if (IS_ERR(panel)) {
- dev_err(dev, "failed to find panel try bridge (%lu)\n",
+ dev_err(dev, "failed to find panel try bridge (%ld)\n",
PTR_ERR(panel));
+ panel = NULL;
+
bridge = of_drm_find_bridge(child);
if (IS_ERR(bridge)) {
- dev_err(dev, "failed to find bridge (%lu)\n",
+ dev_err(dev, "failed to find bridge (%ld)\n",
PTR_ERR(bridge));
return PTR_ERR(bridge);
}
}
}
if (panel) {
- bridge = drm_panel_bridge_add(panel,
- DRM_MODE_CONNECTOR_DSI);
+ bridge = drm_panel_bridge_add_typed(panel,
+ DRM_MODE_CONNECTOR_DSI);
if (IS_ERR(bridge)) {
dev_err(dev, "error adding panel bridge\n");
return PTR_ERR(bridge);
diff --git a/drivers/gpu/drm/mcde/mcde_dsi_regs.h b/drivers/gpu/drm/mcde/mcde_dsi_regs.h
index c9253321a3be..16551af1089e 100644
--- a/drivers/gpu/drm/mcde/mcde_dsi_regs.h
+++ b/drivers/gpu/drm/mcde/mcde_dsi_regs.h
@@ -123,17 +123,6 @@
#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LONGNOTSHORT BIT(3)
#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_SHIFT 8
#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_MASK 0x00003F00
-#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_TURN_ON_PERIPHERAL 50
-#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_SHUT_DOWN_PERIPHERAL 34
-#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_GENERIC_SHORT_WRITE_0 3
-#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_GENERIC_SHORT_WRITE_1 19
-#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_GENERIC_SHORT_WRITE_2 35
-#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_GENERIC_LONG_WRITE 41
-#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_DCS_SHORT_WRITE_0 5
-#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_DCS_SHORT_WRITE_1 21
-#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_DCS_LONG_WRITE 57
-#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_DCS_READ 6
-#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_SET_MAX_PKT_SIZE 55
#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_ID_SHIFT 14
#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_SIZE_SHIFT 16
#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LP_EN BIT(21)
@@ -239,6 +228,7 @@
#define DSI_VID_PCK_TIME 0x000000A8
#define DSI_VID_PCK_TIME_BLKEOL_DURATION_SHIFT 0
+#define DSI_VID_PCK_TIME_BLKEOL_DURATION_MASK 0x00000FFF
#define DSI_VID_DPHY_TIME 0x000000AC
#define DSI_VID_DPHY_TIME_REG_LINE_DURATION_SHIFT 0
@@ -248,6 +238,16 @@
#define DSI_VID_MODE_STS 0x000000BC
#define DSI_VID_MODE_STS_VSG_RUNNING BIT(0)
+#define DSI_VID_MODE_STS_ERR_MISSING_DATA BIT(1)
+#define DSI_VID_MODE_STS_ERR_MISSING_HSYNC BIT(2)
+#define DSI_VID_MODE_STS_ERR_MISSING_VSYNC BIT(3)
+#define DSI_VID_MODE_STS_REG_ERR_SMALL_LENGTH BIT(4)
+#define DSI_VID_MODE_STS_REG_ERR_SMALL_HEIGHT BIT(5)
+#define DSI_VID_MODE_STS_ERR_BURSTWRITE BIT(6)
+#define DSI_VID_MODE_STS_ERR_LINEWRITE BIT(7)
+#define DSI_VID_MODE_STS_ERR_LONGREAD BIT(8)
+#define DSI_VID_MODE_STS_ERR_VRS_WRONG_LENGTH BIT(9)
+#define DSI_VID_MODE_STS_VSG_RECOVERY BIT(10)
#define DSI_VID_VCA_SETTING1 0x000000C0
#define DSI_VID_VCA_SETTING1_MAX_BURST_LIMIT_SHIFT 0
diff --git a/drivers/gpu/drm/mediatek/Makefile b/drivers/gpu/drm/mediatek/Makefile
index 82ae49c64221..b7a82ed5788f 100644
--- a/drivers/gpu/drm/mediatek/Makefile
+++ b/drivers/gpu/drm/mediatek/Makefile
@@ -7,11 +7,12 @@ mediatek-drm-y := mtk_disp_color.o \
mtk_drm_ddp.o \
mtk_drm_ddp_comp.o \
mtk_drm_drv.o \
- mtk_drm_fb.o \
mtk_drm_gem.o \
mtk_drm_plane.o \
mtk_dsi.o \
mtk_mipi_tx.o \
+ mtk_mt8173_mipi_tx.o \
+ mtk_mt8183_mipi_tx.o \
mtk_dpi.o
obj-$(CONFIG_DRM_MEDIATEK) += mediatek-drm.o
@@ -19,7 +20,7 @@ obj-$(CONFIG_DRM_MEDIATEK) += mediatek-drm.o
mediatek-drm-hdmi-objs := mtk_cec.o \
mtk_hdmi.o \
mtk_hdmi_ddc.o \
- mtk_mt2701_hdmi_phy.o \
+ mtk_mt2701_hdmi_phy.o \
mtk_mt8173_hdmi_phy.o \
mtk_hdmi_phy.o
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_color.c b/drivers/gpu/drm/mediatek/mtk_disp_color.c
index f33d98b356d6..6fb0d6983a4a 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_color.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_color.c
@@ -3,12 +3,13 @@
* Copyright (c) 2017 MediaTek Inc.
*/
-#include <drm/drmP.h>
#include <linux/clk.h>
#include <linux/component.h>
+#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
+#include <linux/soc/mediatek/mtk-cmdq.h>
#include "mtk_drm_crtc.h"
#include "mtk_drm_ddp_comp.h"
@@ -45,12 +46,12 @@ static inline struct mtk_disp_color *comp_to_color(struct mtk_ddp_comp *comp)
static void mtk_color_config(struct mtk_ddp_comp *comp, unsigned int w,
unsigned int h, unsigned int vrefresh,
- unsigned int bpc)
+ unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
{
struct mtk_disp_color *color = comp_to_color(comp);
- writel(w, comp->regs + DISP_COLOR_WIDTH(color));
- writel(h, comp->regs + DISP_COLOR_HEIGHT(color));
+ mtk_ddp_write(cmdq_pkt, w, comp, DISP_COLOR_WIDTH(color));
+ mtk_ddp_write(cmdq_pkt, h, comp, DISP_COLOR_HEIGHT(color));
}
static void mtk_color_start(struct mtk_ddp_comp *comp)
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index c4f07c28c74f..891d80c73e04 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -3,12 +3,15 @@
* Copyright (c) 2015 MediaTek Inc.
*/
-#include <drm/drmP.h>
+#include <drm/drm_fourcc.h>
+
#include <linux/clk.h>
#include <linux/component.h>
+#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
+#include <linux/soc/mediatek/mtk-cmdq.h>
#include "mtk_drm_crtc.h"
#include "mtk_drm_ddp_comp.h"
@@ -19,6 +22,8 @@
#define DISP_REG_OVL_EN 0x000c
#define DISP_REG_OVL_RST 0x0014
#define DISP_REG_OVL_ROI_SIZE 0x0020
+#define DISP_REG_OVL_DATAPATH_CON 0x0024
+#define OVL_BGCLR_SEL_IN BIT(2)
#define DISP_REG_OVL_ROI_BGCLR 0x0028
#define DISP_REG_OVL_SRC_CON 0x002c
#define DISP_REG_OVL_CON(n) (0x0030 + 0x20 * (n))
@@ -31,7 +36,9 @@
#define DISP_REG_OVL_ADDR_MT8173 0x0f40
#define DISP_REG_OVL_ADDR(ovl, n) ((ovl)->data->addr + 0x20 * (n))
-#define OVL_RDMA_MEM_GMC 0x40402020
+#define GMC_THRESHOLD_BITS 16
+#define GMC_THRESHOLD_HIGH ((1 << GMC_THRESHOLD_BITS) / 4)
+#define GMC_THRESHOLD_LOW ((1 << GMC_THRESHOLD_BITS) / 8)
#define OVL_CON_BYTE_SWAP BIT(24)
#define OVL_CON_MTX_YUV_TO_RGB (6 << 16)
@@ -46,9 +53,13 @@
OVL_CON_CLRFMT_RGB : 0)
#define OVL_CON_AEN BIT(8)
#define OVL_CON_ALPHA 0xff
+#define OVL_CON_VIRT_FLIP BIT(9)
+#define OVL_CON_HORZ_FLIP BIT(10)
struct mtk_disp_ovl_data {
unsigned int addr;
+ unsigned int gmc_bits;
+ unsigned int layer_nr;
bool fmt_rgb565_is_0;
};
@@ -114,42 +125,90 @@ static void mtk_ovl_stop(struct mtk_ddp_comp *comp)
static void mtk_ovl_config(struct mtk_ddp_comp *comp, unsigned int w,
unsigned int h, unsigned int vrefresh,
- unsigned int bpc)
+ unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
{
if (w != 0 && h != 0)
- writel_relaxed(h << 16 | w, comp->regs + DISP_REG_OVL_ROI_SIZE);
- writel_relaxed(0x0, comp->regs + DISP_REG_OVL_ROI_BGCLR);
+ mtk_ddp_write_relaxed(cmdq_pkt, h << 16 | w, comp,
+ DISP_REG_OVL_ROI_SIZE);
+ mtk_ddp_write_relaxed(cmdq_pkt, 0x0, comp, DISP_REG_OVL_ROI_BGCLR);
- writel(0x1, comp->regs + DISP_REG_OVL_RST);
- writel(0x0, comp->regs + DISP_REG_OVL_RST);
+ mtk_ddp_write(cmdq_pkt, 0x1, comp, DISP_REG_OVL_RST);
+ mtk_ddp_write(cmdq_pkt, 0x0, comp, DISP_REG_OVL_RST);
}
static unsigned int mtk_ovl_layer_nr(struct mtk_ddp_comp *comp)
{
- return 4;
+ struct mtk_disp_ovl *ovl = comp_to_ovl(comp);
+
+ return ovl->data->layer_nr;
}
-static void mtk_ovl_layer_on(struct mtk_ddp_comp *comp, unsigned int idx)
+static unsigned int mtk_ovl_supported_rotations(struct mtk_ddp_comp *comp)
{
- unsigned int reg;
+ return DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
+ DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y;
+}
+
+static int mtk_ovl_layer_check(struct mtk_ddp_comp *comp, unsigned int idx,
+ struct mtk_plane_state *mtk_state)
+{
+ struct drm_plane_state *state = &mtk_state->base;
+ unsigned int rotation = 0;
+
+ rotation = drm_rotation_simplify(state->rotation,
+ DRM_MODE_ROTATE_0 |
+ DRM_MODE_REFLECT_X |
+ DRM_MODE_REFLECT_Y);
+ rotation &= ~DRM_MODE_ROTATE_0;
+
+ /* We can only do reflection, not rotation */
+ if ((rotation & DRM_MODE_ROTATE_MASK) != 0)
+ return -EINVAL;
+
+ /*
+ * TODO: Rotating/reflecting YUV buffers is not supported at this time.
+ * Only RGB[AX] variants are supported.
+ */
+ if (state->fb->format->is_yuv && rotation != 0)
+ return -EINVAL;
- writel(0x1, comp->regs + DISP_REG_OVL_RDMA_CTRL(idx));
- writel(OVL_RDMA_MEM_GMC, comp->regs + DISP_REG_OVL_RDMA_GMC(idx));
+ state->rotation = rotation;
- reg = readl(comp->regs + DISP_REG_OVL_SRC_CON);
- reg = reg | BIT(idx);
- writel(reg, comp->regs + DISP_REG_OVL_SRC_CON);
+ return 0;
}
-static void mtk_ovl_layer_off(struct mtk_ddp_comp *comp, unsigned int idx)
+static void mtk_ovl_layer_on(struct mtk_ddp_comp *comp, unsigned int idx,
+ struct cmdq_pkt *cmdq_pkt)
{
- unsigned int reg;
+ unsigned int gmc_thrshd_l;
+ unsigned int gmc_thrshd_h;
+ unsigned int gmc_value;
+ struct mtk_disp_ovl *ovl = comp_to_ovl(comp);
- reg = readl(comp->regs + DISP_REG_OVL_SRC_CON);
- reg = reg & ~BIT(idx);
- writel(reg, comp->regs + DISP_REG_OVL_SRC_CON);
+ mtk_ddp_write(cmdq_pkt, 0x1, comp,
+ DISP_REG_OVL_RDMA_CTRL(idx));
+ gmc_thrshd_l = GMC_THRESHOLD_LOW >>
+ (GMC_THRESHOLD_BITS - ovl->data->gmc_bits);
+ gmc_thrshd_h = GMC_THRESHOLD_HIGH >>
+ (GMC_THRESHOLD_BITS - ovl->data->gmc_bits);
+ if (ovl->data->gmc_bits == 10)
+ gmc_value = gmc_thrshd_h | gmc_thrshd_h << 16;
+ else
+ gmc_value = gmc_thrshd_l | gmc_thrshd_l << 8 |
+ gmc_thrshd_h << 16 | gmc_thrshd_h << 24;
+ mtk_ddp_write(cmdq_pkt, gmc_value,
+ comp, DISP_REG_OVL_RDMA_GMC(idx));
+ mtk_ddp_write_mask(cmdq_pkt, BIT(idx), comp,
+ DISP_REG_OVL_SRC_CON, BIT(idx));
+}
- writel(0x0, comp->regs + DISP_REG_OVL_RDMA_CTRL(idx));
+static void mtk_ovl_layer_off(struct mtk_ddp_comp *comp, unsigned int idx,
+ struct cmdq_pkt *cmdq_pkt)
+{
+ mtk_ddp_write_mask(cmdq_pkt, 0, comp,
+ DISP_REG_OVL_SRC_CON, BIT(idx));
+ mtk_ddp_write(cmdq_pkt, 0, comp,
+ DISP_REG_OVL_RDMA_CTRL(idx));
}
static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt)
@@ -189,7 +248,8 @@ static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt)
}
static void mtk_ovl_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
- struct mtk_plane_state *state)
+ struct mtk_plane_state *state,
+ struct cmdq_pkt *cmdq_pkt)
{
struct mtk_disp_ovl *ovl = comp_to_ovl(comp);
struct mtk_plane_pending_state *pending = &state->pending;
@@ -200,21 +260,55 @@ static void mtk_ovl_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
unsigned int src_size = (pending->height << 16) | pending->width;
unsigned int con;
- if (!pending->enable)
- mtk_ovl_layer_off(comp, idx);
+ if (!pending->enable) {
+ mtk_ovl_layer_off(comp, idx, cmdq_pkt);
+ return;
+ }
con = ovl_fmt_convert(ovl, fmt);
- if (idx != 0)
+ if (state->base.fb->format->has_alpha)
con |= OVL_CON_AEN | OVL_CON_ALPHA;
- writel_relaxed(con, comp->regs + DISP_REG_OVL_CON(idx));
- writel_relaxed(pitch, comp->regs + DISP_REG_OVL_PITCH(idx));
- writel_relaxed(src_size, comp->regs + DISP_REG_OVL_SRC_SIZE(idx));
- writel_relaxed(offset, comp->regs + DISP_REG_OVL_OFFSET(idx));
- writel_relaxed(addr, comp->regs + DISP_REG_OVL_ADDR(ovl, idx));
+ if (pending->rotation & DRM_MODE_REFLECT_Y) {
+ con |= OVL_CON_VIRT_FLIP;
+ addr += (pending->height - 1) * pending->pitch;
+ }
- if (pending->enable)
- mtk_ovl_layer_on(comp, idx);
+ if (pending->rotation & DRM_MODE_REFLECT_X) {
+ con |= OVL_CON_HORZ_FLIP;
+ addr += pending->pitch - 1;
+ }
+
+ mtk_ddp_write_relaxed(cmdq_pkt, con, comp,
+ DISP_REG_OVL_CON(idx));
+ mtk_ddp_write_relaxed(cmdq_pkt, pitch, comp,
+ DISP_REG_OVL_PITCH(idx));
+ mtk_ddp_write_relaxed(cmdq_pkt, src_size, comp,
+ DISP_REG_OVL_SRC_SIZE(idx));
+ mtk_ddp_write_relaxed(cmdq_pkt, offset, comp,
+ DISP_REG_OVL_OFFSET(idx));
+ mtk_ddp_write_relaxed(cmdq_pkt, addr, comp,
+ DISP_REG_OVL_ADDR(ovl, idx));
+
+ mtk_ovl_layer_on(comp, idx, cmdq_pkt);
+}
+
+static void mtk_ovl_bgclr_in_on(struct mtk_ddp_comp *comp)
+{
+ unsigned int reg;
+
+ reg = readl(comp->regs + DISP_REG_OVL_DATAPATH_CON);
+ reg = reg | OVL_BGCLR_SEL_IN;
+ writel(reg, comp->regs + DISP_REG_OVL_DATAPATH_CON);
+}
+
+static void mtk_ovl_bgclr_in_off(struct mtk_ddp_comp *comp)
+{
+ unsigned int reg;
+
+ reg = readl(comp->regs + DISP_REG_OVL_DATAPATH_CON);
+ reg = reg & ~OVL_BGCLR_SEL_IN;
+ writel(reg, comp->regs + DISP_REG_OVL_DATAPATH_CON);
}
static const struct mtk_ddp_comp_funcs mtk_disp_ovl_funcs = {
@@ -223,10 +317,12 @@ static const struct mtk_ddp_comp_funcs mtk_disp_ovl_funcs = {
.stop = mtk_ovl_stop,
.enable_vblank = mtk_ovl_enable_vblank,
.disable_vblank = mtk_ovl_disable_vblank,
+ .supported_rotations = mtk_ovl_supported_rotations,
.layer_nr = mtk_ovl_layer_nr,
- .layer_on = mtk_ovl_layer_on,
- .layer_off = mtk_ovl_layer_off,
+ .layer_check = mtk_ovl_layer_check,
.layer_config = mtk_ovl_layer_config,
+ .bgclr_in_on = mtk_ovl_bgclr_in_on,
+ .bgclr_in_off = mtk_ovl_bgclr_in_off,
};
static int mtk_disp_ovl_bind(struct device *dev, struct device *master,
@@ -276,7 +372,12 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DISP_OVL);
+ priv->data = of_device_get_match_data(dev);
+
+ comp_id = mtk_ddp_comp_get_id(dev->of_node,
+ priv->data->layer_nr == 4 ?
+ MTK_DISP_OVL :
+ MTK_DISP_OVL_2L);
if (comp_id < 0) {
dev_err(dev, "Failed to identify by alias: %d\n", comp_id);
return comp_id;
@@ -289,8 +390,6 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev)
return ret;
}
- priv->data = of_device_get_match_data(dev);
-
platform_set_drvdata(pdev, priv);
ret = devm_request_irq(dev, irq, mtk_disp_ovl_irq_handler,
@@ -316,11 +415,15 @@ static int mtk_disp_ovl_remove(struct platform_device *pdev)
static const struct mtk_disp_ovl_data mt2701_ovl_driver_data = {
.addr = DISP_REG_OVL_ADDR_MT2701,
+ .gmc_bits = 8,
+ .layer_nr = 4,
.fmt_rgb565_is_0 = false,
};
static const struct mtk_disp_ovl_data mt8173_ovl_driver_data = {
.addr = DISP_REG_OVL_ADDR_MT8173,
+ .gmc_bits = 8,
+ .layer_nr = 4,
.fmt_rgb565_is_0 = true,
};
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
index 9a6f0a29e43c..0cb848d64206 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
@@ -3,12 +3,13 @@
* Copyright (c) 2015 MediaTek Inc.
*/
-#include <drm/drmP.h>
#include <linux/clk.h>
#include <linux/component.h>
+#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
+#include <linux/soc/mediatek/mtk-cmdq.h>
#include "mtk_drm_crtc.h"
#include "mtk_drm_ddp_comp.h"
@@ -125,14 +126,16 @@ static void mtk_rdma_stop(struct mtk_ddp_comp *comp)
static void mtk_rdma_config(struct mtk_ddp_comp *comp, unsigned int width,
unsigned int height, unsigned int vrefresh,
- unsigned int bpc)
+ unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
{
unsigned int threshold;
unsigned int reg;
struct mtk_disp_rdma *rdma = comp_to_rdma(comp);
- rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0, 0xfff, width);
- rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_1, 0xfffff, height);
+ mtk_ddp_write_mask(cmdq_pkt, width, comp,
+ DISP_REG_RDMA_SIZE_CON_0, 0xfff);
+ mtk_ddp_write_mask(cmdq_pkt, height, comp,
+ DISP_REG_RDMA_SIZE_CON_1, 0xfffff);
/*
* Enable FIFO underflow since DSI and DPI can't be blocked.
@@ -144,7 +147,7 @@ static void mtk_rdma_config(struct mtk_ddp_comp *comp, unsigned int width,
reg = RDMA_FIFO_UNDERFLOW_EN |
RDMA_FIFO_PSEUDO_SIZE(RDMA_FIFO_SIZE(rdma)) |
RDMA_OUTPUT_VALID_FIFO_THRESHOLD(threshold);
- writel(reg, comp->regs + DISP_REG_RDMA_FIFO_CON);
+ mtk_ddp_write(cmdq_pkt, reg, comp, DISP_REG_RDMA_FIFO_CON);
}
static unsigned int rdma_fmt_convert(struct mtk_disp_rdma *rdma,
@@ -190,7 +193,8 @@ static unsigned int mtk_rdma_layer_nr(struct mtk_ddp_comp *comp)
}
static void mtk_rdma_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
- struct mtk_plane_state *state)
+ struct mtk_plane_state *state,
+ struct cmdq_pkt *cmdq_pkt)
{
struct mtk_disp_rdma *rdma = comp_to_rdma(comp);
struct mtk_plane_pending_state *pending = &state->pending;
@@ -200,24 +204,27 @@ static void mtk_rdma_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
unsigned int con;
con = rdma_fmt_convert(rdma, fmt);
- writel_relaxed(con, comp->regs + DISP_RDMA_MEM_CON);
+ mtk_ddp_write_relaxed(cmdq_pkt, con, comp, DISP_RDMA_MEM_CON);
if (fmt == DRM_FORMAT_UYVY || fmt == DRM_FORMAT_YUYV) {
- rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0,
- RDMA_MATRIX_ENABLE, RDMA_MATRIX_ENABLE);
- rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0,
- RDMA_MATRIX_INT_MTX_SEL,
- RDMA_MATRIX_INT_MTX_BT601_to_RGB);
+ mtk_ddp_write_mask(cmdq_pkt, RDMA_MATRIX_ENABLE, comp,
+ DISP_REG_RDMA_SIZE_CON_0,
+ RDMA_MATRIX_ENABLE);
+ mtk_ddp_write_mask(cmdq_pkt, RDMA_MATRIX_INT_MTX_BT601_to_RGB,
+ comp, DISP_REG_RDMA_SIZE_CON_0,
+ RDMA_MATRIX_INT_MTX_SEL);
} else {
- rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0,
- RDMA_MATRIX_ENABLE, 0);
+ mtk_ddp_write_mask(cmdq_pkt, 0, comp,
+ DISP_REG_RDMA_SIZE_CON_0,
+ RDMA_MATRIX_ENABLE);
}
+ mtk_ddp_write_relaxed(cmdq_pkt, addr, comp, DISP_RDMA_MEM_START_ADDR);
+ mtk_ddp_write_relaxed(cmdq_pkt, pitch, comp, DISP_RDMA_MEM_SRC_PITCH);
+ mtk_ddp_write(cmdq_pkt, RDMA_MEM_GMC, comp,
+ DISP_RDMA_MEM_GMC_SETTING_0);
+ mtk_ddp_write_mask(cmdq_pkt, RDMA_MODE_MEMORY, comp,
+ DISP_REG_RDMA_GLOBAL_CON, RDMA_MODE_MEMORY);
- writel_relaxed(addr, comp->regs + DISP_RDMA_MEM_START_ADDR);
- writel_relaxed(pitch, comp->regs + DISP_RDMA_MEM_SRC_PITCH);
- writel(RDMA_MEM_GMC, comp->regs + DISP_RDMA_MEM_GMC_SETTING_0);
- rdma_update_bits(comp, DISP_REG_RDMA_GLOBAL_CON,
- RDMA_MODE_MEMORY, RDMA_MODE_MEMORY);
}
static const struct mtk_ddp_comp_funcs mtk_disp_rdma_funcs = {
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index bacd989cc9aa..01fa8b8d763d 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -3,21 +3,24 @@
* Copyright (c) 2014 MediaTek Inc.
* Author: Jie Qiu <jie.qiu@mediatek.com>
*/
-#include <drm/drmP.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_of.h>
-#include <linux/kernel.h>
+
+#include <linux/clk.h>
#include <linux/component.h>
-#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_graph.h>
-#include <linux/interrupt.h>
+#include <linux/platform_device.h>
#include <linux/types.h>
-#include <linux/clk.h>
+
#include <video/videomode.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_of.h>
+
#include "mtk_dpi_regs.h"
#include "mtk_drm_ddp_comp.h"
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index a9007210dda1..0dfcd1787e65 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -3,14 +3,17 @@
* Copyright (c) 2015 MediaTek Inc.
*/
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+#include <linux/soc/mediatek/mtk-cmdq.h>
+
#include <asm/barrier.h>
-#include <drm/drmP.h>
+#include <soc/mediatek/smi.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
-#include <linux/clk.h>
-#include <linux/pm_runtime.h>
-#include <soc/mediatek/smi.h>
+#include <drm/drm_vblank.h>
#include "mtk_drm_drv.h"
#include "mtk_drm_crtc.h"
@@ -40,11 +43,20 @@ struct mtk_drm_crtc {
struct drm_plane *planes;
unsigned int layer_nr;
bool pending_planes;
+ bool pending_async_planes;
+
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ struct cmdq_client *cmdq_client;
+ u32 cmdq_event;
+#endif
void __iomem *config_regs;
struct mtk_disp_mutex *mutex;
unsigned int ddp_comp_nr;
struct mtk_ddp_comp **ddp_comp;
+
+ /* lock for display hardware access */
+ struct mutex hw_lock;
};
struct mtk_crtc_state {
@@ -205,6 +217,36 @@ static void mtk_crtc_ddp_clk_disable(struct mtk_drm_crtc *mtk_crtc)
clk_disable_unprepare(mtk_crtc->ddp_comp[i]->clk);
}
+static
+struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc,
+ struct drm_plane *plane,
+ unsigned int *local_layer)
+{
+ struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+ struct mtk_ddp_comp *comp;
+ int i, count = 0;
+ unsigned int local_index = plane - mtk_crtc->planes;
+
+ for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
+ comp = mtk_crtc->ddp_comp[i];
+ if (local_index < (count + mtk_ddp_comp_layer_nr(comp))) {
+ *local_layer = local_index - count;
+ return comp;
+ }
+ count += mtk_ddp_comp_layer_nr(comp);
+ }
+
+ WARN(1, "Failed to find component for plane %d\n", plane->index);
+ return NULL;
+}
+
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+static void ddp_cmdq_cb(struct cmdq_cb_data data)
+{
+ cmdq_pkt_destroy(data.data);
+}
+#endif
+
static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
{
struct drm_crtc *crtc = &mtk_crtc->base;
@@ -270,7 +312,10 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[i];
- mtk_ddp_comp_config(comp, width, height, vrefresh, bpc);
+ if (i == 1)
+ mtk_ddp_comp_bgclr_in_on(comp);
+
+ mtk_ddp_comp_config(comp, width, height, vrefresh, bpc, NULL);
mtk_ddp_comp_start(comp);
}
@@ -278,10 +323,14 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
for (i = 0; i < mtk_crtc->layer_nr; i++) {
struct drm_plane *plane = &mtk_crtc->planes[i];
struct mtk_plane_state *plane_state;
+ struct mtk_ddp_comp *comp;
+ unsigned int local_layer;
plane_state = to_mtk_plane_state(plane->state);
- mtk_ddp_comp_layer_config(mtk_crtc->ddp_comp[0], i,
- plane_state);
+ comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer);
+ if (comp)
+ mtk_ddp_comp_layer_config(comp, local_layer,
+ plane_state, NULL);
}
return 0;
@@ -296,11 +345,16 @@ err_pm_runtime_put:
static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc)
{
struct drm_device *drm = mtk_crtc->base.dev;
+ struct drm_crtc *crtc = &mtk_crtc->base;
int i;
DRM_DEBUG_DRIVER("%s\n", __func__);
- for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
+ for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
mtk_ddp_comp_stop(mtk_crtc->ddp_comp[i]);
+ if (i == 1)
+ mtk_ddp_comp_bgclr_in_off(mtk_crtc->ddp_comp[i]);
+ }
+
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
mtk_disp_mutex_remove_comp(mtk_crtc->mutex,
mtk_crtc->ddp_comp[i]->id);
@@ -317,14 +371,23 @@ static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc)
mtk_disp_mutex_unprepare(mtk_crtc->mutex);
pm_runtime_put(drm->dev);
+
+ if (crtc->state->event && !crtc->state->active) {
+ spin_lock_irq(&crtc->dev->event_lock);
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ spin_unlock_irq(&crtc->dev->event_lock);
+ }
}
-static void mtk_crtc_ddp_config(struct drm_crtc *crtc)
+static void mtk_crtc_ddp_config(struct drm_crtc *crtc,
+ struct cmdq_pkt *cmdq_handle)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state);
struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
unsigned int i;
+ unsigned int local_layer;
/*
* TODO: instead of updating the registers here, we should prepare
@@ -334,7 +397,8 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc)
if (state->pending_config) {
mtk_ddp_comp_config(comp, state->pending_width,
state->pending_height,
- state->pending_vrefresh, 0);
+ state->pending_vrefresh, 0,
+ cmdq_handle);
state->pending_config = false;
}
@@ -346,13 +410,116 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc)
plane_state = to_mtk_plane_state(plane->state);
- if (plane_state->pending.config) {
- mtk_ddp_comp_layer_config(comp, i, plane_state);
- plane_state->pending.config = false;
- }
+ if (!plane_state->pending.config)
+ continue;
+
+ comp = mtk_drm_ddp_comp_for_plane(crtc, plane,
+ &local_layer);
+
+ if (comp)
+ mtk_ddp_comp_layer_config(comp, local_layer,
+ plane_state,
+ cmdq_handle);
+ plane_state->pending.config = false;
}
mtk_crtc->pending_planes = false;
}
+
+ if (mtk_crtc->pending_async_planes) {
+ for (i = 0; i < mtk_crtc->layer_nr; i++) {
+ struct drm_plane *plane = &mtk_crtc->planes[i];
+ struct mtk_plane_state *plane_state;
+
+ plane_state = to_mtk_plane_state(plane->state);
+
+ if (!plane_state->pending.async_config)
+ continue;
+
+ comp = mtk_drm_ddp_comp_for_plane(crtc, plane,
+ &local_layer);
+
+ if (comp)
+ mtk_ddp_comp_layer_config(comp, local_layer,
+ plane_state,
+ cmdq_handle);
+ plane_state->pending.async_config = false;
+ }
+ mtk_crtc->pending_async_planes = false;
+ }
+}
+
+static void mtk_drm_crtc_hw_config(struct mtk_drm_crtc *mtk_crtc)
+{
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ struct cmdq_pkt *cmdq_handle;
+#endif
+ struct drm_crtc *crtc = &mtk_crtc->base;
+ struct mtk_drm_private *priv = crtc->dev->dev_private;
+ unsigned int pending_planes = 0, pending_async_planes = 0;
+ int i;
+
+ mutex_lock(&mtk_crtc->hw_lock);
+ for (i = 0; i < mtk_crtc->layer_nr; i++) {
+ struct drm_plane *plane = &mtk_crtc->planes[i];
+ struct mtk_plane_state *plane_state;
+
+ plane_state = to_mtk_plane_state(plane->state);
+ if (plane_state->pending.dirty) {
+ plane_state->pending.config = true;
+ plane_state->pending.dirty = false;
+ pending_planes |= BIT(i);
+ } else if (plane_state->pending.async_dirty) {
+ plane_state->pending.async_config = true;
+ plane_state->pending.async_dirty = false;
+ pending_async_planes |= BIT(i);
+ }
+ }
+ if (pending_planes)
+ mtk_crtc->pending_planes = true;
+ if (pending_async_planes)
+ mtk_crtc->pending_async_planes = true;
+
+ if (priv->data->shadow_register) {
+ mtk_disp_mutex_acquire(mtk_crtc->mutex);
+ mtk_crtc_ddp_config(crtc, NULL);
+ mtk_disp_mutex_release(mtk_crtc->mutex);
+ }
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ if (mtk_crtc->cmdq_client) {
+ cmdq_handle = cmdq_pkt_create(mtk_crtc->cmdq_client, PAGE_SIZE);
+ cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event);
+ cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event);
+ mtk_crtc_ddp_config(crtc, cmdq_handle);
+ cmdq_pkt_flush_async(cmdq_handle, ddp_cmdq_cb, cmdq_handle);
+ }
+#endif
+ mutex_unlock(&mtk_crtc->hw_lock);
+}
+
+int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane,
+ struct mtk_plane_state *state)
+{
+ unsigned int local_layer;
+ struct mtk_ddp_comp *comp;
+
+ comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer);
+ if (comp)
+ return mtk_ddp_comp_layer_check(comp, local_layer, state);
+ return 0;
+}
+
+void mtk_drm_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+ const struct drm_plane_helper_funcs *plane_helper_funcs =
+ plane->helper_private;
+
+ if (!mtk_crtc->enabled)
+ return;
+
+ plane_helper_funcs->atomic_update(plane, new_state);
+ mtk_drm_crtc_hw_config(mtk_crtc);
}
static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc,
@@ -402,6 +569,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc,
}
mtk_crtc->pending_planes = true;
+ mtk_drm_crtc_hw_config(mtk_crtc);
/* Wait for planes to be disabled */
drm_crtc_wait_one_vblank(crtc);
@@ -433,34 +601,16 @@ static void mtk_drm_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
- struct mtk_drm_private *priv = crtc->dev->dev_private;
- unsigned int pending_planes = 0;
int i;
if (mtk_crtc->event)
mtk_crtc->pending_needs_vblank = true;
- for (i = 0; i < mtk_crtc->layer_nr; i++) {
- struct drm_plane *plane = &mtk_crtc->planes[i];
- struct mtk_plane_state *plane_state;
-
- plane_state = to_mtk_plane_state(plane->state);
- if (plane_state->pending.dirty) {
- plane_state->pending.config = true;
- plane_state->pending.dirty = false;
- pending_planes |= BIT(i);
- }
- }
- if (pending_planes)
- mtk_crtc->pending_planes = true;
if (crtc->state->color_mgmt_changed)
- for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
+ for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
mtk_ddp_gamma_set(mtk_crtc->ddp_comp[i], crtc->state);
-
- if (priv->data->shadow_register) {
- mtk_disp_mutex_acquire(mtk_crtc->mutex);
- mtk_crtc_ddp_config(crtc);
- mtk_disp_mutex_release(mtk_crtc->mutex);
- }
+ mtk_ddp_ctm_set(mtk_crtc->ddp_comp[i], crtc->state);
+ }
+ mtk_drm_crtc_hw_config(mtk_crtc);
}
static const struct drm_crtc_funcs mtk_crtc_funcs = {
@@ -510,23 +660,80 @@ void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp)
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
struct mtk_drm_private *priv = crtc->dev->dev_private;
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ if (!priv->data->shadow_register && !mtk_crtc->cmdq_client)
+#else
if (!priv->data->shadow_register)
- mtk_crtc_ddp_config(crtc);
+#endif
+ mtk_crtc_ddp_config(crtc, NULL);
mtk_drm_finish_page_flip(mtk_crtc);
}
+static int mtk_drm_crtc_num_comp_planes(struct mtk_drm_crtc *mtk_crtc,
+ int comp_idx)
+{
+ struct mtk_ddp_comp *comp;
+
+ if (comp_idx > 1)
+ return 0;
+
+ comp = mtk_crtc->ddp_comp[comp_idx];
+ if (!comp->funcs)
+ return 0;
+
+ if (comp_idx == 1 && !comp->funcs->bgclr_in_on)
+ return 0;
+
+ return mtk_ddp_comp_layer_nr(comp);
+}
+
+static inline
+enum drm_plane_type mtk_drm_crtc_plane_type(unsigned int plane_idx)
+{
+ if (plane_idx == 0)
+ return DRM_PLANE_TYPE_PRIMARY;
+ else if (plane_idx == 1)
+ return DRM_PLANE_TYPE_CURSOR;
+ else
+ return DRM_PLANE_TYPE_OVERLAY;
+
+}
+
+static int mtk_drm_crtc_init_comp_planes(struct drm_device *drm_dev,
+ struct mtk_drm_crtc *mtk_crtc,
+ int comp_idx, int pipe)
+{
+ int num_planes = mtk_drm_crtc_num_comp_planes(mtk_crtc, comp_idx);
+ struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[comp_idx];
+ int i, ret;
+
+ for (i = 0; i < num_planes; i++) {
+ ret = mtk_plane_init(drm_dev,
+ &mtk_crtc->planes[mtk_crtc->layer_nr],
+ BIT(pipe),
+ mtk_drm_crtc_plane_type(mtk_crtc->layer_nr),
+ mtk_ddp_comp_supported_rotations(comp));
+ if (ret)
+ return ret;
+
+ mtk_crtc->layer_nr++;
+ }
+ return 0;
+}
+
int mtk_drm_crtc_create(struct drm_device *drm_dev,
const enum mtk_ddp_comp_id *path, unsigned int path_len)
{
struct mtk_drm_private *priv = drm_dev->dev_private;
struct device *dev = drm_dev->dev;
struct mtk_drm_crtc *mtk_crtc;
- enum drm_plane_type type;
- unsigned int zpos;
+ unsigned int num_comp_planes = 0;
int pipe = priv->num_pipes;
int ret;
int i;
+ bool has_ctm = false;
+ uint gamma_lut_size = 0;
if (!path)
return 0;
@@ -577,19 +784,25 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
}
mtk_crtc->ddp_comp[i] = comp;
+
+ if (comp->funcs) {
+ if (comp->funcs->gamma_set)
+ gamma_lut_size = MTK_LUT_SIZE;
+
+ if (comp->funcs->ctm_set)
+ has_ctm = true;
+ }
}
- mtk_crtc->layer_nr = mtk_ddp_comp_layer_nr(mtk_crtc->ddp_comp[0]);
- mtk_crtc->planes = devm_kcalloc(dev, mtk_crtc->layer_nr,
- sizeof(struct drm_plane),
- GFP_KERNEL);
-
- for (zpos = 0; zpos < mtk_crtc->layer_nr; zpos++) {
- type = (zpos == 0) ? DRM_PLANE_TYPE_PRIMARY :
- (zpos == 1) ? DRM_PLANE_TYPE_CURSOR :
- DRM_PLANE_TYPE_OVERLAY;
- ret = mtk_plane_init(drm_dev, &mtk_crtc->planes[zpos],
- BIT(pipe), type);
+ for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
+ num_comp_planes += mtk_drm_crtc_num_comp_planes(mtk_crtc, i);
+
+ mtk_crtc->planes = devm_kcalloc(dev, num_comp_planes,
+ sizeof(struct drm_plane), GFP_KERNEL);
+
+ for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
+ ret = mtk_drm_crtc_init_comp_planes(drm_dev, mtk_crtc, i,
+ pipe);
if (ret)
return ret;
}
@@ -599,9 +812,28 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
NULL, pipe);
if (ret < 0)
return ret;
- drm_mode_crtc_set_gamma_size(&mtk_crtc->base, MTK_LUT_SIZE);
- drm_crtc_enable_color_mgmt(&mtk_crtc->base, 0, false, MTK_LUT_SIZE);
- priv->num_pipes++;
+ if (gamma_lut_size)
+ drm_mode_crtc_set_gamma_size(&mtk_crtc->base, gamma_lut_size);
+ drm_crtc_enable_color_mgmt(&mtk_crtc->base, 0, has_ctm, gamma_lut_size);
+ priv->num_pipes++;
+ mutex_init(&mtk_crtc->hw_lock);
+
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ mtk_crtc->cmdq_client =
+ cmdq_mbox_create(dev, drm_crtc_index(&mtk_crtc->base),
+ 2000);
+ if (IS_ERR(mtk_crtc->cmdq_client)) {
+ dev_dbg(dev, "mtk_crtc %d failed to create mailbox client, writing register by CPU now\n",
+ drm_crtc_index(&mtk_crtc->base));
+ mtk_crtc->cmdq_client = NULL;
+ }
+ ret = of_property_read_u32_index(dev->of_node, "mediatek,gce-events",
+ drm_crtc_index(&mtk_crtc->base),
+ &mtk_crtc->cmdq_event);
+ if (ret)
+ dev_dbg(dev, "mtk_crtc %d failed to get mediatek,gce-events property\n",
+ drm_crtc_index(&mtk_crtc->base));
+#endif
return 0;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
index fcc134eb00c9..a2b4677a451c 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
@@ -19,5 +19,9 @@ void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp);
int mtk_drm_crtc_create(struct drm_device *drm_dev,
const enum mtk_ddp_comp_id *path,
unsigned int path_len);
+int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane,
+ struct mtk_plane_state *state);
+void mtk_drm_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane,
+ struct drm_plane_state *plane_state);
#endif /* MTK_DRM_CRTC_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
index 8106a71a7404..13035c906035 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
@@ -33,12 +33,15 @@
#define DISP_REG_CONFIG_DSI_SEL 0x050
#define DISP_REG_CONFIG_DPI_SEL 0x064
-#define DISP_REG_MUTEX_EN(n) (0x20 + 0x20 * (n))
-#define DISP_REG_MUTEX(n) (0x24 + 0x20 * (n))
-#define DISP_REG_MUTEX_RST(n) (0x28 + 0x20 * (n))
-#define DISP_REG_MUTEX_MOD(n) (0x2c + 0x20 * (n))
-#define DISP_REG_MUTEX_SOF(n) (0x30 + 0x20 * (n))
-#define DISP_REG_MUTEX_MOD2(n) (0x34 + 0x20 * (n))
+#define MT2701_DISP_MUTEX0_MOD0 0x2c
+#define MT2701_DISP_MUTEX0_SOF0 0x30
+
+#define DISP_REG_MUTEX_EN(n) (0x20 + 0x20 * (n))
+#define DISP_REG_MUTEX(n) (0x24 + 0x20 * (n))
+#define DISP_REG_MUTEX_RST(n) (0x28 + 0x20 * (n))
+#define DISP_REG_MUTEX_MOD(mutex_mod_reg, n) (mutex_mod_reg + 0x20 * (n))
+#define DISP_REG_MUTEX_SOF(mutex_sof_reg, n) (mutex_sof_reg + 0x20 * (n))
+#define DISP_REG_MUTEX_MOD2(n) (0x34 + 0x20 * (n))
#define INT_MUTEX BIT(1)
@@ -139,12 +142,30 @@ struct mtk_disp_mutex {
bool claimed;
};
+enum mtk_ddp_mutex_sof_id {
+ DDP_MUTEX_SOF_SINGLE_MODE,
+ DDP_MUTEX_SOF_DSI0,
+ DDP_MUTEX_SOF_DSI1,
+ DDP_MUTEX_SOF_DPI0,
+ DDP_MUTEX_SOF_DPI1,
+ DDP_MUTEX_SOF_DSI2,
+ DDP_MUTEX_SOF_DSI3,
+};
+
+struct mtk_ddp_data {
+ const unsigned int *mutex_mod;
+ const unsigned int *mutex_sof;
+ const unsigned int mutex_mod_reg;
+ const unsigned int mutex_sof_reg;
+ const bool no_clk;
+};
+
struct mtk_ddp {
struct device *dev;
struct clk *clk;
void __iomem *regs;
struct mtk_disp_mutex mutex[10];
- const unsigned int *mutex_mod;
+ const struct mtk_ddp_data *data;
};
static const unsigned int mt2701_mutex_mod[DDP_COMPONENT_ID_MAX] = {
@@ -194,6 +215,37 @@ static const unsigned int mt8173_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_WDMA1] = MT8173_MUTEX_MOD_DISP_WDMA1,
};
+static const unsigned int mt2712_mutex_sof[DDP_MUTEX_SOF_DSI3 + 1] = {
+ [DDP_MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE,
+ [DDP_MUTEX_SOF_DSI0] = MUTEX_SOF_DSI0,
+ [DDP_MUTEX_SOF_DSI1] = MUTEX_SOF_DSI1,
+ [DDP_MUTEX_SOF_DPI0] = MUTEX_SOF_DPI0,
+ [DDP_MUTEX_SOF_DPI1] = MUTEX_SOF_DPI1,
+ [DDP_MUTEX_SOF_DSI2] = MUTEX_SOF_DSI2,
+ [DDP_MUTEX_SOF_DSI3] = MUTEX_SOF_DSI3,
+};
+
+static const struct mtk_ddp_data mt2701_ddp_driver_data = {
+ .mutex_mod = mt2701_mutex_mod,
+ .mutex_sof = mt2712_mutex_sof,
+ .mutex_mod_reg = MT2701_DISP_MUTEX0_MOD0,
+ .mutex_sof_reg = MT2701_DISP_MUTEX0_SOF0,
+};
+
+static const struct mtk_ddp_data mt2712_ddp_driver_data = {
+ .mutex_mod = mt2712_mutex_mod,
+ .mutex_sof = mt2712_mutex_sof,
+ .mutex_mod_reg = MT2701_DISP_MUTEX0_MOD0,
+ .mutex_sof_reg = MT2701_DISP_MUTEX0_SOF0,
+};
+
+static const struct mtk_ddp_data mt8173_ddp_driver_data = {
+ .mutex_mod = mt8173_mutex_mod,
+ .mutex_sof = mt2712_mutex_sof,
+ .mutex_mod_reg = MT2701_DISP_MUTEX0_MOD0,
+ .mutex_sof_reg = MT2701_DISP_MUTEX0_SOF0,
+};
+
static unsigned int mtk_ddp_mout_en(enum mtk_ddp_comp_id cur,
enum mtk_ddp_comp_id next,
unsigned int *addr)
@@ -432,45 +484,49 @@ void mtk_disp_mutex_add_comp(struct mtk_disp_mutex *mutex,
struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp,
mutex[mutex->id]);
unsigned int reg;
+ unsigned int sof_id;
unsigned int offset;
WARN_ON(&ddp->mutex[mutex->id] != mutex);
switch (id) {
case DDP_COMPONENT_DSI0:
- reg = MUTEX_SOF_DSI0;
+ sof_id = DDP_MUTEX_SOF_DSI0;
break;
case DDP_COMPONENT_DSI1:
- reg = MUTEX_SOF_DSI0;
+ sof_id = DDP_MUTEX_SOF_DSI0;
break;
case DDP_COMPONENT_DSI2:
- reg = MUTEX_SOF_DSI2;
+ sof_id = DDP_MUTEX_SOF_DSI2;
break;
case DDP_COMPONENT_DSI3:
- reg = MUTEX_SOF_DSI3;
+ sof_id = DDP_MUTEX_SOF_DSI3;
break;
case DDP_COMPONENT_DPI0:
- reg = MUTEX_SOF_DPI0;
+ sof_id = DDP_MUTEX_SOF_DPI0;
break;
case DDP_COMPONENT_DPI1:
- reg = MUTEX_SOF_DPI1;
+ sof_id = DDP_MUTEX_SOF_DPI1;
break;
default:
- if (ddp->mutex_mod[id] < 32) {
- offset = DISP_REG_MUTEX_MOD(mutex->id);
+ if (ddp->data->mutex_mod[id] < 32) {
+ offset = DISP_REG_MUTEX_MOD(ddp->data->mutex_mod_reg,
+ mutex->id);
reg = readl_relaxed(ddp->regs + offset);
- reg |= 1 << ddp->mutex_mod[id];
+ reg |= 1 << ddp->data->mutex_mod[id];
writel_relaxed(reg, ddp->regs + offset);
} else {
offset = DISP_REG_MUTEX_MOD2(mutex->id);
reg = readl_relaxed(ddp->regs + offset);
- reg |= 1 << (ddp->mutex_mod[id] - 32);
+ reg |= 1 << (ddp->data->mutex_mod[id] - 32);
writel_relaxed(reg, ddp->regs + offset);
}
return;
}
- writel_relaxed(reg, ddp->regs + DISP_REG_MUTEX_SOF(mutex->id));
+ writel_relaxed(ddp->data->mutex_sof[sof_id],
+ ddp->regs +
+ DISP_REG_MUTEX_SOF(ddp->data->mutex_sof_reg, mutex->id));
}
void mtk_disp_mutex_remove_comp(struct mtk_disp_mutex *mutex,
@@ -491,18 +547,21 @@ void mtk_disp_mutex_remove_comp(struct mtk_disp_mutex *mutex,
case DDP_COMPONENT_DPI0:
case DDP_COMPONENT_DPI1:
writel_relaxed(MUTEX_SOF_SINGLE_MODE,
- ddp->regs + DISP_REG_MUTEX_SOF(mutex->id));
+ ddp->regs +
+ DISP_REG_MUTEX_SOF(ddp->data->mutex_sof_reg,
+ mutex->id));
break;
default:
- if (ddp->mutex_mod[id] < 32) {
- offset = DISP_REG_MUTEX_MOD(mutex->id);
+ if (ddp->data->mutex_mod[id] < 32) {
+ offset = DISP_REG_MUTEX_MOD(ddp->data->mutex_mod_reg,
+ mutex->id);
reg = readl_relaxed(ddp->regs + offset);
- reg &= ~(1 << ddp->mutex_mod[id]);
+ reg &= ~(1 << ddp->data->mutex_mod[id]);
writel_relaxed(reg, ddp->regs + offset);
} else {
offset = DISP_REG_MUTEX_MOD2(mutex->id);
reg = readl_relaxed(ddp->regs + offset);
- reg &= ~(1 << (ddp->mutex_mod[id] - 32));
+ reg &= ~(1 << (ddp->data->mutex_mod[id] - 32));
writel_relaxed(reg, ddp->regs + offset);
}
break;
@@ -564,10 +623,14 @@ static int mtk_ddp_probe(struct platform_device *pdev)
for (i = 0; i < 10; i++)
ddp->mutex[i].id = i;
- ddp->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(ddp->clk)) {
- dev_err(dev, "Failed to get clock\n");
- return PTR_ERR(ddp->clk);
+ ddp->data = of_device_get_match_data(dev);
+
+ if (!ddp->data->no_clk) {
+ ddp->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(ddp->clk)) {
+ dev_err(dev, "Failed to get clock\n");
+ return PTR_ERR(ddp->clk);
+ }
}
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -577,8 +640,6 @@ static int mtk_ddp_probe(struct platform_device *pdev)
return PTR_ERR(ddp->regs);
}
- ddp->mutex_mod = of_device_get_match_data(dev);
-
platform_set_drvdata(pdev, ddp);
return 0;
@@ -590,9 +651,12 @@ static int mtk_ddp_remove(struct platform_device *pdev)
}
static const struct of_device_id ddp_driver_dt_match[] = {
- { .compatible = "mediatek,mt2701-disp-mutex", .data = mt2701_mutex_mod},
- { .compatible = "mediatek,mt2712-disp-mutex", .data = mt2712_mutex_mod},
- { .compatible = "mediatek,mt8173-disp-mutex", .data = mt8173_mutex_mod},
+ { .compatible = "mediatek,mt2701-disp-mutex",
+ .data = &mt2701_ddp_driver_data},
+ { .compatible = "mediatek,mt2712-disp-mutex",
+ .data = &mt2712_ddp_driver_data},
+ { .compatible = "mediatek,mt8173-disp-mutex",
+ .data = &mt8173_ddp_driver_data},
{},
};
MODULE_DEVICE_TABLE(of, ddp_driver_dt_match);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
index b38963f1f2ec..1f5a112bb034 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
@@ -12,7 +12,7 @@
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
-#include <drm/drmP.h>
+#include <linux/soc/mediatek/mtk-cmdq.h>
#include "mtk_drm_drv.h"
#include "mtk_drm_plane.h"
#include "mtk_drm_ddp_comp.h"
@@ -33,6 +33,26 @@
#define DISP_AAL_EN 0x0000
#define DISP_AAL_SIZE 0x0030
+#define DISP_CCORR_EN 0x0000
+#define CCORR_EN BIT(0)
+#define DISP_CCORR_CFG 0x0020
+#define CCORR_RELAY_MODE BIT(0)
+#define CCORR_ENGINE_EN BIT(1)
+#define CCORR_GAMMA_OFF BIT(2)
+#define CCORR_WGAMUT_SRC_CLIP BIT(3)
+#define DISP_CCORR_SIZE 0x0030
+#define DISP_CCORR_COEF_0 0x0080
+#define DISP_CCORR_COEF_1 0x0084
+#define DISP_CCORR_COEF_2 0x0088
+#define DISP_CCORR_COEF_3 0x008C
+#define DISP_CCORR_COEF_4 0x0090
+
+#define DISP_DITHER_EN 0x0000
+#define DITHER_EN BIT(0)
+#define DISP_DITHER_CFG 0x0020
+#define DITHER_RELAY_MODE BIT(0)
+#define DISP_DITHER_SIZE 0x0030
+
#define DISP_GAMMA_EN 0x0000
#define DISP_GAMMA_CFG 0x0020
#define DISP_GAMMA_SIZE 0x0030
@@ -64,36 +84,84 @@
#define DITHER_ADD_LSHIFT_G(x) (((x) & 0x7) << 4)
#define DITHER_ADD_RSHIFT_G(x) (((x) & 0x7) << 0)
+void mtk_ddp_write(struct cmdq_pkt *cmdq_pkt, unsigned int value,
+ struct mtk_ddp_comp *comp, unsigned int offset)
+{
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ if (cmdq_pkt)
+ cmdq_pkt_write(cmdq_pkt, comp->subsys,
+ comp->regs_pa + offset, value);
+ else
+#endif
+ writel(value, comp->regs + offset);
+}
+
+void mtk_ddp_write_relaxed(struct cmdq_pkt *cmdq_pkt, unsigned int value,
+ struct mtk_ddp_comp *comp,
+ unsigned int offset)
+{
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ if (cmdq_pkt)
+ cmdq_pkt_write(cmdq_pkt, comp->subsys,
+ comp->regs_pa + offset, value);
+ else
+#endif
+ writel_relaxed(value, comp->regs + offset);
+}
+
+void mtk_ddp_write_mask(struct cmdq_pkt *cmdq_pkt,
+ unsigned int value,
+ struct mtk_ddp_comp *comp,
+ unsigned int offset,
+ unsigned int mask)
+{
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ if (cmdq_pkt) {
+ cmdq_pkt_write_mask(cmdq_pkt, comp->subsys,
+ comp->regs_pa + offset, value, mask);
+ } else {
+#endif
+ u32 tmp = readl(comp->regs + offset);
+
+ tmp = (tmp & ~mask) | (value & mask);
+ writel(tmp, comp->regs + offset);
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ }
+#endif
+}
+
void mtk_dither_set(struct mtk_ddp_comp *comp, unsigned int bpc,
- unsigned int CFG)
+ unsigned int CFG, struct cmdq_pkt *cmdq_pkt)
{
/* If bpc equal to 0, the dithering function didn't be enabled */
if (bpc == 0)
return;
if (bpc >= MTK_MIN_BPC) {
- writel(0, comp->regs + DISP_DITHER_5);
- writel(0, comp->regs + DISP_DITHER_7);
- writel(DITHER_LSB_ERR_SHIFT_R(MTK_MAX_BPC - bpc) |
- DITHER_ADD_LSHIFT_R(MTK_MAX_BPC - bpc) |
- DITHER_NEW_BIT_MODE,
- comp->regs + DISP_DITHER_15);
- writel(DITHER_LSB_ERR_SHIFT_B(MTK_MAX_BPC - bpc) |
- DITHER_ADD_LSHIFT_B(MTK_MAX_BPC - bpc) |
- DITHER_LSB_ERR_SHIFT_G(MTK_MAX_BPC - bpc) |
- DITHER_ADD_LSHIFT_G(MTK_MAX_BPC - bpc),
- comp->regs + DISP_DITHER_16);
- writel(DISP_DITHERING, comp->regs + CFG);
+ mtk_ddp_write(cmdq_pkt, 0, comp, DISP_DITHER_5);
+ mtk_ddp_write(cmdq_pkt, 0, comp, DISP_DITHER_7);
+ mtk_ddp_write(cmdq_pkt,
+ DITHER_LSB_ERR_SHIFT_R(MTK_MAX_BPC - bpc) |
+ DITHER_ADD_LSHIFT_R(MTK_MAX_BPC - bpc) |
+ DITHER_NEW_BIT_MODE,
+ comp, DISP_DITHER_15);
+ mtk_ddp_write(cmdq_pkt,
+ DITHER_LSB_ERR_SHIFT_B(MTK_MAX_BPC - bpc) |
+ DITHER_ADD_LSHIFT_B(MTK_MAX_BPC - bpc) |
+ DITHER_LSB_ERR_SHIFT_G(MTK_MAX_BPC - bpc) |
+ DITHER_ADD_LSHIFT_G(MTK_MAX_BPC - bpc),
+ comp, DISP_DITHER_16);
+ mtk_ddp_write(cmdq_pkt, DISP_DITHERING, comp, CFG);
}
}
static void mtk_od_config(struct mtk_ddp_comp *comp, unsigned int w,
unsigned int h, unsigned int vrefresh,
- unsigned int bpc)
+ unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
{
- writel(w << 16 | h, comp->regs + DISP_OD_SIZE);
- writel(OD_RELAYMODE, comp->regs + DISP_OD_CFG);
- mtk_dither_set(comp, bpc, DISP_OD_CFG);
+ mtk_ddp_write(cmdq_pkt, w << 16 | h, comp, DISP_OD_SIZE);
+ mtk_ddp_write(cmdq_pkt, OD_RELAYMODE, comp, DISP_OD_CFG);
+ mtk_dither_set(comp, bpc, DISP_OD_CFG, cmdq_pkt);
}
static void mtk_od_start(struct mtk_ddp_comp *comp)
@@ -108,9 +176,9 @@ static void mtk_ufoe_start(struct mtk_ddp_comp *comp)
static void mtk_aal_config(struct mtk_ddp_comp *comp, unsigned int w,
unsigned int h, unsigned int vrefresh,
- unsigned int bpc)
+ unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
{
- writel(h << 16 | w, comp->regs + DISP_AAL_SIZE);
+ mtk_ddp_write(cmdq_pkt, h << 16 | w, comp, DISP_AAL_SIZE);
}
static void mtk_aal_start(struct mtk_ddp_comp *comp)
@@ -123,12 +191,99 @@ static void mtk_aal_stop(struct mtk_ddp_comp *comp)
writel_relaxed(0x0, comp->regs + DISP_AAL_EN);
}
+static void mtk_ccorr_config(struct mtk_ddp_comp *comp, unsigned int w,
+ unsigned int h, unsigned int vrefresh,
+ unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
+{
+ mtk_ddp_write(cmdq_pkt, h << 16 | w, comp, DISP_CCORR_SIZE);
+ mtk_ddp_write(cmdq_pkt, CCORR_ENGINE_EN, comp, DISP_CCORR_CFG);
+}
+
+static void mtk_ccorr_start(struct mtk_ddp_comp *comp)
+{
+ writel(CCORR_EN, comp->regs + DISP_CCORR_EN);
+}
+
+static void mtk_ccorr_stop(struct mtk_ddp_comp *comp)
+{
+ writel_relaxed(0x0, comp->regs + DISP_CCORR_EN);
+}
+
+/* Converts a DRM S31.32 value to the HW S1.10 format. */
+static u16 mtk_ctm_s31_32_to_s1_10(u64 in)
+{
+ u16 r;
+
+ /* Sign bit. */
+ r = in & BIT_ULL(63) ? BIT(11) : 0;
+
+ if ((in & GENMASK_ULL(62, 33)) > 0) {
+ /* identity value 0x100000000 -> 0x400, */
+ /* if bigger this, set it to max 0x7ff. */
+ r |= GENMASK(10, 0);
+ } else {
+ /* take the 11 most important bits. */
+ r |= (in >> 22) & GENMASK(10, 0);
+ }
+
+ return r;
+}
+
+static void mtk_ccorr_ctm_set(struct mtk_ddp_comp *comp,
+ struct drm_crtc_state *state)
+{
+ struct drm_property_blob *blob = state->ctm;
+ struct drm_color_ctm *ctm;
+ const u64 *input;
+ uint16_t coeffs[9] = { 0 };
+ int i;
+ struct cmdq_pkt *cmdq_pkt = NULL;
+
+ if (!blob)
+ return;
+
+ ctm = (struct drm_color_ctm *)blob->data;
+ input = ctm->matrix;
+
+ for (i = 0; i < ARRAY_SIZE(coeffs); i++)
+ coeffs[i] = mtk_ctm_s31_32_to_s1_10(input[i]);
+
+ mtk_ddp_write(cmdq_pkt, coeffs[0] << 16 | coeffs[1],
+ comp, DISP_CCORR_COEF_0);
+ mtk_ddp_write(cmdq_pkt, coeffs[2] << 16 | coeffs[3],
+ comp, DISP_CCORR_COEF_1);
+ mtk_ddp_write(cmdq_pkt, coeffs[4] << 16 | coeffs[5],
+ comp, DISP_CCORR_COEF_2);
+ mtk_ddp_write(cmdq_pkt, coeffs[6] << 16 | coeffs[7],
+ comp, DISP_CCORR_COEF_3);
+ mtk_ddp_write(cmdq_pkt, coeffs[8] << 16,
+ comp, DISP_CCORR_COEF_4);
+}
+
+static void mtk_dither_config(struct mtk_ddp_comp *comp, unsigned int w,
+ unsigned int h, unsigned int vrefresh,
+ unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
+{
+ mtk_ddp_write(cmdq_pkt, h << 16 | w, comp, DISP_DITHER_SIZE);
+ mtk_ddp_write(cmdq_pkt, DITHER_RELAY_MODE, comp, DISP_DITHER_CFG);
+}
+
+static void mtk_dither_start(struct mtk_ddp_comp *comp)
+{
+ writel(DITHER_EN, comp->regs + DISP_DITHER_EN);
+}
+
+static void mtk_dither_stop(struct mtk_ddp_comp *comp)
+{
+ writel_relaxed(0x0, comp->regs + DISP_DITHER_EN);
+}
+
static void mtk_gamma_config(struct mtk_ddp_comp *comp, unsigned int w,
unsigned int h, unsigned int vrefresh,
- unsigned int bpc)
+ unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
{
- writel(h << 16 | w, comp->regs + DISP_GAMMA_SIZE);
- mtk_dither_set(comp, bpc, DISP_GAMMA_CFG);
+ mtk_ddp_write(cmdq_pkt, h << 16 | w, comp, DISP_GAMMA_SIZE);
+ mtk_dither_set(comp, bpc, DISP_GAMMA_CFG, cmdq_pkt);
}
static void mtk_gamma_start(struct mtk_ddp_comp *comp)
@@ -171,6 +326,19 @@ static const struct mtk_ddp_comp_funcs ddp_aal = {
.stop = mtk_aal_stop,
};
+static const struct mtk_ddp_comp_funcs ddp_ccorr = {
+ .config = mtk_ccorr_config,
+ .start = mtk_ccorr_start,
+ .stop = mtk_ccorr_stop,
+ .ctm_set = mtk_ccorr_ctm_set,
+};
+
+static const struct mtk_ddp_comp_funcs ddp_dither = {
+ .config = mtk_dither_config,
+ .start = mtk_dither_start,
+ .stop = mtk_dither_stop,
+};
+
static const struct mtk_ddp_comp_funcs ddp_gamma = {
.gamma_set = mtk_gamma_set,
.config = mtk_gamma_config,
@@ -189,11 +357,14 @@ static const struct mtk_ddp_comp_funcs ddp_ufoe = {
static const char * const mtk_ddp_comp_stem[MTK_DDP_COMP_TYPE_MAX] = {
[MTK_DISP_OVL] = "ovl",
+ [MTK_DISP_OVL_2L] = "ovl_2l",
[MTK_DISP_RDMA] = "rdma",
[MTK_DISP_WDMA] = "wdma",
[MTK_DISP_COLOR] = "color",
+ [MTK_DISP_CCORR] = "ccorr",
[MTK_DISP_AAL] = "aal",
[MTK_DISP_GAMMA] = "gamma",
+ [MTK_DISP_DITHER] = "dither",
[MTK_DISP_UFOE] = "ufoe",
[MTK_DSI] = "dsi",
[MTK_DPI] = "dpi",
@@ -213,8 +384,10 @@ static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_AAL0] = { MTK_DISP_AAL, 0, &ddp_aal },
[DDP_COMPONENT_AAL1] = { MTK_DISP_AAL, 1, &ddp_aal },
[DDP_COMPONENT_BLS] = { MTK_DISP_BLS, 0, NULL },
+ [DDP_COMPONENT_CCORR] = { MTK_DISP_CCORR, 0, &ddp_ccorr },
[DDP_COMPONENT_COLOR0] = { MTK_DISP_COLOR, 0, NULL },
[DDP_COMPONENT_COLOR1] = { MTK_DISP_COLOR, 1, NULL },
+ [DDP_COMPONENT_DITHER] = { MTK_DISP_DITHER, 0, &ddp_dither },
[DDP_COMPONENT_DPI0] = { MTK_DPI, 0, NULL },
[DDP_COMPONENT_DPI1] = { MTK_DPI, 1, NULL },
[DDP_COMPONENT_DSI0] = { MTK_DSI, 0, NULL },
@@ -226,6 +399,8 @@ static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_OD1] = { MTK_DISP_OD, 1, &ddp_od },
[DDP_COMPONENT_OVL0] = { MTK_DISP_OVL, 0, NULL },
[DDP_COMPONENT_OVL1] = { MTK_DISP_OVL, 1, NULL },
+ [DDP_COMPONENT_OVL_2L0] = { MTK_DISP_OVL_2L, 0, NULL },
+ [DDP_COMPONENT_OVL_2L1] = { MTK_DISP_OVL_2L, 1, NULL },
[DDP_COMPONENT_PWM0] = { MTK_DISP_PWM, 0, NULL },
[DDP_COMPONENT_PWM1] = { MTK_DISP_PWM, 1, NULL },
[DDP_COMPONENT_PWM2] = { MTK_DISP_PWM, 2, NULL },
@@ -259,6 +434,11 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *node,
enum mtk_ddp_comp_type type;
struct device_node *larb_node;
struct platform_device *larb_pdev;
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ struct resource res;
+ struct cmdq_client_reg cmdq_reg;
+ int ret;
+#endif
if (comp_id < 0 || comp_id >= DDP_COMPONENT_ID_MAX)
return -EINVAL;
@@ -312,6 +492,19 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *node,
comp->larb_dev = &larb_pdev->dev;
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ if (of_address_to_resource(node, 0, &res) != 0) {
+ dev_err(dev, "Missing reg in %s node\n", node->full_name);
+ return -EINVAL;
+ }
+ comp->regs_pa = res.start;
+
+ ret = cmdq_dev_get_client_reg(dev, &cmdq_reg, 0);
+ if (ret)
+ dev_dbg(dev, "get mediatek,gce-client-reg fail!\n");
+ else
+ comp->subsys = cmdq_reg.subsys;
+#endif
return 0;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
index 0ad287f427cc..debe36395fe7 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
@@ -17,9 +17,12 @@ struct drm_crtc_state;
enum mtk_ddp_comp_type {
MTK_DISP_OVL,
+ MTK_DISP_OVL_2L,
MTK_DISP_RDMA,
MTK_DISP_WDMA,
MTK_DISP_COLOR,
+ MTK_DISP_CCORR,
+ MTK_DISP_DITHER,
MTK_DISP_AAL,
MTK_DISP_GAMMA,
MTK_DISP_UFOE,
@@ -36,8 +39,10 @@ enum mtk_ddp_comp_id {
DDP_COMPONENT_AAL0,
DDP_COMPONENT_AAL1,
DDP_COMPONENT_BLS,
+ DDP_COMPONENT_CCORR,
DDP_COMPONENT_COLOR0,
DDP_COMPONENT_COLOR1,
+ DDP_COMPONENT_DITHER,
DDP_COMPONENT_DPI0,
DDP_COMPONENT_DPI1,
DDP_COMPONENT_DSI0,
@@ -48,6 +53,8 @@ enum mtk_ddp_comp_id {
DDP_COMPONENT_OD0,
DDP_COMPONENT_OD1,
DDP_COMPONENT_OVL0,
+ DDP_COMPONENT_OVL_2L0,
+ DDP_COMPONENT_OVL_2L1,
DDP_COMPONENT_OVL1,
DDP_COMPONENT_PWM0,
DDP_COMPONENT_PWM1,
@@ -62,21 +69,29 @@ enum mtk_ddp_comp_id {
};
struct mtk_ddp_comp;
-
+struct cmdq_pkt;
struct mtk_ddp_comp_funcs {
void (*config)(struct mtk_ddp_comp *comp, unsigned int w,
- unsigned int h, unsigned int vrefresh, unsigned int bpc);
+ unsigned int h, unsigned int vrefresh,
+ unsigned int bpc, struct cmdq_pkt *cmdq_pkt);
void (*start)(struct mtk_ddp_comp *comp);
void (*stop)(struct mtk_ddp_comp *comp);
void (*enable_vblank)(struct mtk_ddp_comp *comp, struct drm_crtc *crtc);
void (*disable_vblank)(struct mtk_ddp_comp *comp);
+ unsigned int (*supported_rotations)(struct mtk_ddp_comp *comp);
unsigned int (*layer_nr)(struct mtk_ddp_comp *comp);
- void (*layer_on)(struct mtk_ddp_comp *comp, unsigned int idx);
- void (*layer_off)(struct mtk_ddp_comp *comp, unsigned int idx);
+ int (*layer_check)(struct mtk_ddp_comp *comp,
+ unsigned int idx,
+ struct mtk_plane_state *state);
void (*layer_config)(struct mtk_ddp_comp *comp, unsigned int idx,
- struct mtk_plane_state *state);
+ struct mtk_plane_state *state,
+ struct cmdq_pkt *cmdq_pkt);
void (*gamma_set)(struct mtk_ddp_comp *comp,
struct drm_crtc_state *state);
+ void (*bgclr_in_on)(struct mtk_ddp_comp *comp);
+ void (*bgclr_in_off)(struct mtk_ddp_comp *comp);
+ void (*ctm_set)(struct mtk_ddp_comp *comp,
+ struct drm_crtc_state *state);
};
struct mtk_ddp_comp {
@@ -86,14 +101,17 @@ struct mtk_ddp_comp {
struct device *larb_dev;
enum mtk_ddp_comp_id id;
const struct mtk_ddp_comp_funcs *funcs;
+ resource_size_t regs_pa;
+ u8 subsys;
};
static inline void mtk_ddp_comp_config(struct mtk_ddp_comp *comp,
unsigned int w, unsigned int h,
- unsigned int vrefresh, unsigned int bpc)
+ unsigned int vrefresh, unsigned int bpc,
+ struct cmdq_pkt *cmdq_pkt)
{
if (comp->funcs && comp->funcs->config)
- comp->funcs->config(comp, w, h, vrefresh, bpc);
+ comp->funcs->config(comp, w, h, vrefresh, bpc, cmdq_pkt);
}
static inline void mtk_ddp_comp_start(struct mtk_ddp_comp *comp)
@@ -121,34 +139,39 @@ static inline void mtk_ddp_comp_disable_vblank(struct mtk_ddp_comp *comp)
comp->funcs->disable_vblank(comp);
}
-static inline unsigned int mtk_ddp_comp_layer_nr(struct mtk_ddp_comp *comp)
+static inline
+unsigned int mtk_ddp_comp_supported_rotations(struct mtk_ddp_comp *comp)
{
- if (comp->funcs && comp->funcs->layer_nr)
- return comp->funcs->layer_nr(comp);
+ if (comp->funcs && comp->funcs->supported_rotations)
+ return comp->funcs->supported_rotations(comp);
return 0;
}
-static inline void mtk_ddp_comp_layer_on(struct mtk_ddp_comp *comp,
- unsigned int idx)
+static inline unsigned int mtk_ddp_comp_layer_nr(struct mtk_ddp_comp *comp)
{
- if (comp->funcs && comp->funcs->layer_on)
- comp->funcs->layer_on(comp, idx);
+ if (comp->funcs && comp->funcs->layer_nr)
+ return comp->funcs->layer_nr(comp);
+
+ return 0;
}
-static inline void mtk_ddp_comp_layer_off(struct mtk_ddp_comp *comp,
- unsigned int idx)
+static inline int mtk_ddp_comp_layer_check(struct mtk_ddp_comp *comp,
+ unsigned int idx,
+ struct mtk_plane_state *state)
{
- if (comp->funcs && comp->funcs->layer_off)
- comp->funcs->layer_off(comp, idx);
+ if (comp->funcs && comp->funcs->layer_check)
+ return comp->funcs->layer_check(comp, idx, state);
+ return 0;
}
static inline void mtk_ddp_comp_layer_config(struct mtk_ddp_comp *comp,
unsigned int idx,
- struct mtk_plane_state *state)
+ struct mtk_plane_state *state,
+ struct cmdq_pkt *cmdq_pkt)
{
if (comp->funcs && comp->funcs->layer_config)
- comp->funcs->layer_config(comp, idx, state);
+ comp->funcs->layer_config(comp, idx, state, cmdq_pkt);
}
static inline void mtk_ddp_gamma_set(struct mtk_ddp_comp *comp,
@@ -158,6 +181,25 @@ static inline void mtk_ddp_gamma_set(struct mtk_ddp_comp *comp,
comp->funcs->gamma_set(comp, state);
}
+static inline void mtk_ddp_comp_bgclr_in_on(struct mtk_ddp_comp *comp)
+{
+ if (comp->funcs && comp->funcs->bgclr_in_on)
+ comp->funcs->bgclr_in_on(comp);
+}
+
+static inline void mtk_ddp_comp_bgclr_in_off(struct mtk_ddp_comp *comp)
+{
+ if (comp->funcs && comp->funcs->bgclr_in_off)
+ comp->funcs->bgclr_in_off(comp);
+}
+
+static inline void mtk_ddp_ctm_set(struct mtk_ddp_comp *comp,
+ struct drm_crtc_state *state)
+{
+ if (comp->funcs && comp->funcs->ctm_set)
+ comp->funcs->ctm_set(comp, state);
+}
+
int mtk_ddp_comp_get_id(struct device_node *node,
enum mtk_ddp_comp_type comp_type);
int mtk_ddp_comp_init(struct device *dev, struct device_node *comp_node,
@@ -166,6 +208,13 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *comp_node,
int mtk_ddp_comp_register(struct drm_device *drm, struct mtk_ddp_comp *comp);
void mtk_ddp_comp_unregister(struct drm_device *drm, struct mtk_ddp_comp *comp);
void mtk_dither_set(struct mtk_ddp_comp *comp, unsigned int bpc,
- unsigned int CFG);
-
+ unsigned int CFG, struct cmdq_pkt *cmdq_pkt);
+enum mtk_ddp_comp_type mtk_ddp_comp_get_type(enum mtk_ddp_comp_id comp_id);
+void mtk_ddp_write(struct cmdq_pkt *cmdq_pkt, unsigned int value,
+ struct mtk_ddp_comp *comp, unsigned int offset);
+void mtk_ddp_write_relaxed(struct cmdq_pkt *cmdq_pkt, unsigned int value,
+ struct mtk_ddp_comp *comp, unsigned int offset);
+void mtk_ddp_write_mask(struct cmdq_pkt *cmdq_pkt, unsigned int value,
+ struct mtk_ddp_comp *comp, unsigned int offset,
+ unsigned int mask);
#endif /* MTK_DRM_DDP_COMP_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 95fdbd0fbcac..0563c6813333 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -4,25 +4,31 @@
* Author: YT SHEN <yt.shen@mediatek.com>
*/
-#include <drm/drmP.h>
+#include <linux/component.h>
+#include <linux/iommu.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
+#include <linux/dma-mapping.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
-#include <linux/component.h>
-#include <linux/iommu.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
-#include <linux/pm_runtime.h>
+#include <drm/drm_vblank.h>
#include "mtk_drm_crtc.h"
#include "mtk_drm_ddp.h"
+#include "mtk_drm_ddp.h"
#include "mtk_drm_ddp_comp.h"
#include "mtk_drm_drv.h"
-#include "mtk_drm_fb.h"
#include "mtk_drm_gem.h"
#define DRIVER_NAME "mediatek"
@@ -31,99 +37,27 @@
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
-static void mtk_atomic_schedule(struct mtk_drm_private *private,
- struct drm_atomic_state *state)
-{
- private->commit.state = state;
- schedule_work(&private->commit.work);
-}
-
-static void mtk_atomic_wait_for_fences(struct drm_atomic_state *state)
-{
- struct drm_plane *plane;
- struct drm_plane_state *new_plane_state;
- int i;
-
- for_each_new_plane_in_state(state, plane, new_plane_state, i)
- mtk_fb_wait(new_plane_state->fb);
-}
-
-static void mtk_atomic_complete(struct mtk_drm_private *private,
- struct drm_atomic_state *state)
-{
- struct drm_device *drm = private->drm;
-
- mtk_atomic_wait_for_fences(state);
-
- /*
- * Mediatek drm supports runtime PM, so plane registers cannot be
- * written when their crtc is disabled.
- *
- * The comment for drm_atomic_helper_commit states:
- * For drivers supporting runtime PM the recommended sequence is
- *
- * drm_atomic_helper_commit_modeset_disables(dev, state);
- * drm_atomic_helper_commit_modeset_enables(dev, state);
- * drm_atomic_helper_commit_planes(dev, state,
- * DRM_PLANE_COMMIT_ACTIVE_ONLY);
- *
- * See the kerneldoc entries for these three functions for more details.
- */
- drm_atomic_helper_commit_modeset_disables(drm, state);
- drm_atomic_helper_commit_modeset_enables(drm, state);
- drm_atomic_helper_commit_planes(drm, state,
- DRM_PLANE_COMMIT_ACTIVE_ONLY);
-
- drm_atomic_helper_wait_for_vblanks(drm, state);
-
- drm_atomic_helper_cleanup_planes(drm, state);
- drm_atomic_state_put(state);
-}
-
-static void mtk_atomic_work(struct work_struct *work)
-{
- struct mtk_drm_private *private = container_of(work,
- struct mtk_drm_private, commit.work);
-
- mtk_atomic_complete(private, private->commit.state);
-}
+static const struct drm_mode_config_helper_funcs mtk_drm_mode_config_helpers = {
+ .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
+};
-static int mtk_atomic_commit(struct drm_device *drm,
- struct drm_atomic_state *state,
- bool async)
+static struct drm_framebuffer *
+mtk_drm_mode_fb_create(struct drm_device *dev,
+ struct drm_file *file,
+ const struct drm_mode_fb_cmd2 *cmd)
{
- struct mtk_drm_private *private = drm->dev_private;
- int ret;
-
- ret = drm_atomic_helper_prepare_planes(drm, state);
- if (ret)
- return ret;
+ const struct drm_format_info *info = drm_get_format_info(dev, cmd);
- mutex_lock(&private->commit.lock);
- flush_work(&private->commit.work);
-
- ret = drm_atomic_helper_swap_state(state, true);
- if (ret) {
- mutex_unlock(&private->commit.lock);
- drm_atomic_helper_cleanup_planes(drm, state);
- return ret;
- }
+ if (info->num_planes != 1)
+ return ERR_PTR(-EINVAL);
- drm_atomic_state_get(state);
- if (async)
- mtk_atomic_schedule(private, state);
- else
- mtk_atomic_complete(private, state);
-
- mutex_unlock(&private->commit.lock);
-
- return 0;
+ return drm_gem_fb_create(dev, file, cmd);
}
static const struct drm_mode_config_funcs mtk_drm_mode_config_funcs = {
.fb_create = mtk_drm_mode_fb_create,
.atomic_check = drm_atomic_helper_check,
- .atomic_commit = mtk_atomic_commit,
+ .atomic_commit = drm_atomic_helper_commit,
};
static const enum mtk_ddp_comp_id mt2701_mtk_ddp_main[] = {
@@ -213,6 +147,7 @@ static int mtk_drm_kms_init(struct drm_device *drm)
struct mtk_drm_private *private = drm->dev_private;
struct platform_device *pdev;
struct device_node *np;
+ struct device *dma_dev;
int ret;
if (!iommu_present(&platform_bus_type))
@@ -240,6 +175,7 @@ static int mtk_drm_kms_init(struct drm_device *drm)
drm->mode_config.max_width = 4096;
drm->mode_config.max_height = 4096;
drm->mode_config.funcs = &mtk_drm_mode_config_funcs;
+ drm->mode_config.helper_private = &mtk_drm_mode_config_helpers;
ret = component_bind_all(drm->dev, drm);
if (ret)
@@ -275,7 +211,29 @@ static int mtk_drm_kms_init(struct drm_device *drm)
goto err_component_unbind;
}
- private->dma_dev = &pdev->dev;
+ dma_dev = &pdev->dev;
+ private->dma_dev = dma_dev;
+
+ /*
+ * Configure the DMA segment size to make sure we get contiguous IOVA
+ * when importing PRIME buffers.
+ */
+ if (!dma_dev->dma_parms) {
+ private->dma_parms_allocated = true;
+ dma_dev->dma_parms =
+ devm_kzalloc(drm->dev, sizeof(*dma_dev->dma_parms),
+ GFP_KERNEL);
+ }
+ if (!dma_dev->dma_parms) {
+ ret = -ENOMEM;
+ goto err_component_unbind;
+ }
+
+ ret = dma_set_max_seg_size(dma_dev, (unsigned int)DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(dma_dev, "Failed to set DMA segment size\n");
+ goto err_unset_dma_parms;
+ }
/*
* We don't use the drm_irq_install() helpers provided by the DRM
@@ -285,13 +243,16 @@ static int mtk_drm_kms_init(struct drm_device *drm)
drm->irq_enabled = true;
ret = drm_vblank_init(drm, MAX_CRTC);
if (ret < 0)
- goto err_component_unbind;
+ goto err_unset_dma_parms;
drm_kms_helper_poll_init(drm);
drm_mode_config_reset(drm);
return 0;
+err_unset_dma_parms:
+ if (private->dma_parms_allocated)
+ dma_dev->dma_parms = NULL;
err_component_unbind:
component_unbind_all(drm->dev, drm);
err_config_cleanup:
@@ -302,9 +263,14 @@ err_config_cleanup:
static void mtk_drm_kms_deinit(struct drm_device *drm)
{
+ struct mtk_drm_private *private = drm->dev_private;
+
drm_kms_helper_poll_fini(drm);
drm_atomic_helper_shutdown(drm);
+ if (private->dma_parms_allocated)
+ private->dma_dev->dma_parms = NULL;
+
component_unbind_all(drm->dev, drm);
drm_mode_config_cleanup(drm);
}
@@ -320,9 +286,20 @@ static const struct file_operations mtk_drm_fops = {
.compat_ioctl = drm_compat_ioctl,
};
+/*
+ * We need to override this because the device used to import the memory is
+ * not dev->dev, as drm_gem_prime_import() expects.
+ */
+struct drm_gem_object *mtk_drm_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf)
+{
+ struct mtk_drm_private *private = dev->dev_private;
+
+ return drm_gem_prime_import_dev(dev, dma_buf, private->dma_dev);
+}
+
static struct drm_driver mtk_drm_driver = {
- .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
- DRIVER_ATOMIC,
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.gem_free_object_unlocked = mtk_drm_gem_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
@@ -330,8 +307,7 @@ static struct drm_driver mtk_drm_driver = {
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = drm_gem_prime_export,
- .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_import = mtk_drm_gem_prime_import,
.gem_prime_get_sg_table = mtk_gem_prime_get_sg_table,
.gem_prime_import_sg_table = mtk_gem_prime_import_sg_table,
.gem_prime_mmap = mtk_drm_gem_mmap_buf,
@@ -459,8 +435,6 @@ static int mtk_drm_probe(struct platform_device *pdev)
if (!private)
return -ENOMEM;
- mutex_init(&private->commit.lock);
- INIT_WORK(&private->commit.work, mtk_atomic_work);
private->data = of_device_get_match_data(dev);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -511,6 +485,7 @@ static int mtk_drm_probe(struct platform_device *pdev)
*/
if (comp_type == MTK_DISP_COLOR ||
comp_type == MTK_DISP_OVL ||
+ comp_type == MTK_DISP_OVL_2L ||
comp_type == MTK_DISP_RDMA ||
comp_type == MTK_DSI ||
comp_type == MTK_DPI) {
@@ -524,12 +499,15 @@ static int mtk_drm_probe(struct platform_device *pdev)
comp = devm_kzalloc(dev, sizeof(*comp), GFP_KERNEL);
if (!comp) {
ret = -ENOMEM;
+ of_node_put(node);
goto err_node;
}
ret = mtk_ddp_comp_init(dev, node, comp, comp_id, NULL);
- if (ret)
+ if (ret) {
+ of_node_put(node);
goto err_node;
+ }
private->ddp_comp[comp_id] = comp;
}
@@ -630,8 +608,8 @@ static struct platform_driver * const mtk_drm_drivers[] = {
&mtk_disp_rdma_driver,
&mtk_dpi_driver,
&mtk_drm_platform_driver,
- &mtk_dsi_driver,
&mtk_mipi_tx_driver,
+ &mtk_dsi_driver,
};
static int __init mtk_drm_init(void)
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
index 598ff3e70446..17bc99b9f5d4 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
@@ -43,14 +43,9 @@ struct mtk_drm_private {
struct device_node *comp_node[DDP_COMPONENT_ID_MAX];
struct mtk_ddp_comp *ddp_comp[DDP_COMPONENT_ID_MAX];
const struct mtk_mmsys_driver_data *data;
-
- struct {
- struct drm_atomic_state *state;
- struct work_struct work;
- struct mutex lock;
- } commit;
-
struct drm_atomic_state *suspend_state;
+
+ bool dma_parms_allocated;
};
extern struct platform_driver mtk_ddp_driver;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_fb.c b/drivers/gpu/drm/mediatek/mtk_drm_fb.c
deleted file mode 100644
index 4c3ad7de2d3b..000000000000
--- a/drivers/gpu/drm/mediatek/mtk_drm_fb.c
+++ /dev/null
@@ -1,119 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2015 MediaTek Inc.
- */
-
-#include <drm/drmP.h>
-#include <drm/drm_modeset_helper.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem.h>
-#include <drm/drm_gem_framebuffer_helper.h>
-#include <linux/dma-buf.h>
-#include <linux/reservation.h>
-
-#include "mtk_drm_drv.h"
-#include "mtk_drm_fb.h"
-#include "mtk_drm_gem.h"
-
-static const struct drm_framebuffer_funcs mtk_drm_fb_funcs = {
- .create_handle = drm_gem_fb_create_handle,
- .destroy = drm_gem_fb_destroy,
-};
-
-static struct drm_framebuffer *mtk_drm_framebuffer_init(struct drm_device *dev,
- const struct drm_mode_fb_cmd2 *mode,
- struct drm_gem_object *obj)
-{
- const struct drm_format_info *info = drm_get_format_info(dev, mode);
- struct drm_framebuffer *fb;
- int ret;
-
- if (info->num_planes != 1)
- return ERR_PTR(-EINVAL);
-
- fb = kzalloc(sizeof(*fb), GFP_KERNEL);
- if (!fb)
- return ERR_PTR(-ENOMEM);
-
- drm_helper_mode_fill_fb_struct(dev, fb, mode);
-
- fb->obj[0] = obj;
-
- ret = drm_framebuffer_init(dev, fb, &mtk_drm_fb_funcs);
- if (ret) {
- DRM_ERROR("failed to initialize framebuffer\n");
- kfree(fb);
- return ERR_PTR(ret);
- }
-
- return fb;
-}
-
-/*
- * Wait for any exclusive fence in fb's gem object's reservation object.
- *
- * Returns -ERESTARTSYS if interrupted, else 0.
- */
-int mtk_fb_wait(struct drm_framebuffer *fb)
-{
- struct drm_gem_object *gem;
- struct reservation_object *resv;
- long ret;
-
- if (!fb)
- return 0;
-
- gem = fb->obj[0];
- if (!gem || !gem->dma_buf || !gem->dma_buf->resv)
- return 0;
-
- resv = gem->dma_buf->resv;
- ret = reservation_object_wait_timeout_rcu(resv, false, true,
- MAX_SCHEDULE_TIMEOUT);
- /* MAX_SCHEDULE_TIMEOUT on success, -ERESTARTSYS if interrupted */
- if (WARN_ON(ret < 0))
- return ret;
-
- return 0;
-}
-
-struct drm_framebuffer *mtk_drm_mode_fb_create(struct drm_device *dev,
- struct drm_file *file,
- const struct drm_mode_fb_cmd2 *cmd)
-{
- const struct drm_format_info *info = drm_get_format_info(dev, cmd);
- struct drm_framebuffer *fb;
- struct drm_gem_object *gem;
- unsigned int width = cmd->width;
- unsigned int height = cmd->height;
- unsigned int size, bpp;
- int ret;
-
- if (info->num_planes != 1)
- return ERR_PTR(-EINVAL);
-
- gem = drm_gem_object_lookup(file, cmd->handles[0]);
- if (!gem)
- return ERR_PTR(-ENOENT);
-
- bpp = info->cpp[0];
- size = (height - 1) * cmd->pitches[0] + width * bpp;
- size += cmd->offsets[0];
-
- if (gem->size < size) {
- ret = -EINVAL;
- goto unreference;
- }
-
- fb = mtk_drm_framebuffer_init(dev, cmd, gem);
- if (IS_ERR(fb)) {
- ret = PTR_ERR(fb);
- goto unreference;
- }
-
- return fb;
-
-unreference:
- drm_gem_object_put_unlocked(gem);
- return ERR_PTR(ret);
-}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_fb.h b/drivers/gpu/drm/mediatek/mtk_drm_fb.h
deleted file mode 100644
index 6b80c28e33cf..000000000000
--- a/drivers/gpu/drm/mediatek/mtk_drm_fb.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2015 MediaTek Inc.
- */
-
-#ifndef MTK_DRM_FB_H
-#define MTK_DRM_FB_H
-
-int mtk_fb_wait(struct drm_framebuffer *fb);
-struct drm_framebuffer *mtk_drm_mode_fb_create(struct drm_device *dev,
- struct drm_file *file,
- const struct drm_mode_fb_cmd2 *cmd);
-
-#endif /* MTK_DRM_FB_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
index 0d69698f8173..b04a3c2b111e 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
@@ -3,10 +3,13 @@
* Copyright (c) 2015 MediaTek Inc.
*/
-#include <drm/drmP.h>
-#include <drm/drm_gem.h>
#include <linux/dma-buf.h>
+#include <drm/drm.h>
+#include <drm/drm_device.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_prime.h>
+
#include "mtk_drm_drv.h"
#include "mtk_drm_gem.h"
@@ -268,7 +271,7 @@ void *mtk_drm_gem_prime_vmap(struct drm_gem_object *obj)
pgprot_writecombine(PAGE_KERNEL));
out:
- kfree((void *)sgt);
+ kfree(sgt);
return mtk_gem->kvaddr;
}
@@ -282,5 +285,5 @@ void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
vunmap(vaddr);
mtk_gem->kvaddr = 0;
- kfree((void *)mtk_gem->pages);
+ kfree(mtk_gem->pages);
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
index f2ef83aed6f9..914cc7619cd7 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
@@ -4,21 +4,28 @@
* Author: CK Hu <ck.hu@mediatek.com>
*/
-#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_atomic_uapi.h>
#include <drm/drm_plane_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include "mtk_drm_crtc.h"
#include "mtk_drm_ddp_comp.h"
#include "mtk_drm_drv.h"
-#include "mtk_drm_fb.h"
#include "mtk_drm_gem.h"
#include "mtk_drm_plane.h"
static const u32 formats[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGR888,
DRM_FORMAT_RGB565,
DRM_FORMAT_UYVY,
DRM_FORMAT_YUYV,
@@ -69,6 +76,50 @@ static void mtk_drm_plane_destroy_state(struct drm_plane *plane,
kfree(to_mtk_plane_state(state));
}
+static int mtk_plane_atomic_async_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct drm_crtc_state *crtc_state;
+
+ if (plane != state->crtc->cursor)
+ return -EINVAL;
+
+ if (!plane->state)
+ return -EINVAL;
+
+ if (!plane->state->fb)
+ return -EINVAL;
+
+ if (state->state)
+ crtc_state = drm_atomic_get_existing_crtc_state(state->state,
+ state->crtc);
+ else /* Special case for asynchronous cursor updates. */
+ crtc_state = state->crtc->state;
+
+ return drm_atomic_helper_check_plane_state(plane->state, crtc_state,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ true, true);
+}
+
+static void mtk_plane_atomic_async_update(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ struct mtk_plane_state *state = to_mtk_plane_state(plane->state);
+
+ plane->state->crtc_x = new_state->crtc_x;
+ plane->state->crtc_y = new_state->crtc_y;
+ plane->state->crtc_h = new_state->crtc_h;
+ plane->state->crtc_w = new_state->crtc_w;
+ plane->state->src_x = new_state->src_x;
+ plane->state->src_y = new_state->src_y;
+ plane->state->src_h = new_state->src_h;
+ plane->state->src_w = new_state->src_w;
+ state->pending.async_dirty = true;
+
+ mtk_drm_crtc_async_update(new_state->crtc, plane, new_state);
+}
+
static const struct drm_plane_funcs mtk_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
@@ -83,13 +134,19 @@ static int mtk_plane_atomic_check(struct drm_plane *plane,
{
struct drm_framebuffer *fb = state->fb;
struct drm_crtc_state *crtc_state;
+ int ret;
if (!fb)
return 0;
- if (!state->crtc)
+ if (WARN_ON(!state->crtc))
return 0;
+ ret = mtk_drm_crtc_plane_check(state->crtc, plane,
+ to_mtk_plane_state(state));
+ if (ret)
+ return ret;
+
crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
@@ -131,6 +188,7 @@ static void mtk_plane_atomic_update(struct drm_plane *plane,
state->pending.y = plane->state->dst.y1;
state->pending.width = drm_rect_width(&plane->state->dst);
state->pending.height = drm_rect_height(&plane->state->dst);
+ state->pending.rotation = plane->state->rotation;
wmb(); /* Make sure the above parameters are set before update */
state->pending.dirty = true;
}
@@ -146,13 +204,17 @@ static void mtk_plane_atomic_disable(struct drm_plane *plane,
}
static const struct drm_plane_helper_funcs mtk_plane_helper_funcs = {
+ .prepare_fb = drm_gem_fb_prepare_fb,
.atomic_check = mtk_plane_atomic_check,
.atomic_update = mtk_plane_atomic_update,
.atomic_disable = mtk_plane_atomic_disable,
+ .atomic_async_update = mtk_plane_atomic_async_update,
+ .atomic_async_check = mtk_plane_atomic_async_check,
};
int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
- unsigned long possible_crtcs, enum drm_plane_type type)
+ unsigned long possible_crtcs, enum drm_plane_type type,
+ unsigned int supported_rotations)
{
int err;
@@ -164,6 +226,14 @@ int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
return err;
}
+ if (supported_rotations & ~DRM_MODE_ROTATE_0) {
+ err = drm_plane_create_rotation_property(plane,
+ DRM_MODE_ROTATE_0,
+ supported_rotations);
+ if (err)
+ DRM_INFO("Create rotation property failed\n");
+ }
+
drm_plane_helper_add(plane, &mtk_plane_helper_funcs);
return 0;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.h b/drivers/gpu/drm/mediatek/mtk_drm_plane.h
index 6f842df722c7..d454bece9535 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.h
@@ -20,7 +20,10 @@ struct mtk_plane_pending_state {
unsigned int y;
unsigned int width;
unsigned int height;
+ unsigned int rotation;
bool dirty;
+ bool async_dirty;
+ bool async_config;
};
struct mtk_plane_state {
@@ -35,6 +38,7 @@ to_mtk_plane_state(struct drm_plane_state *state)
}
int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
- unsigned long possible_crtcs, enum drm_plane_type type);
+ unsigned long possible_crtcs, enum drm_plane_type type,
+ unsigned int supported_rotations);
#endif
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index b91c4616644a..5fa1073cf26b 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -3,12 +3,6 @@
* Copyright (c) 2015 MediaTek Inc.
*/
-#include <drm/drmP.h>
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_mipi_dsi.h>
-#include <drm/drm_panel.h>
-#include <drm/drm_of.h>
-#include <drm/drm_probe_helper.h>
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/iopoll.h>
@@ -17,9 +11,18 @@
#include <linux/of_platform.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+
#include <video/mipi_display.h>
#include <video/videomode.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+
#include "mtk_drm_ddp_comp.h"
#define DSI_START 0x00
@@ -37,6 +40,7 @@
#define DSI_CON_CTRL 0x10
#define DSI_RESET BIT(0)
#define DSI_EN BIT(1)
+#define DPHY_RESET BIT(2)
#define DSI_MODE_CTRL 0x14
#define MODE (3)
@@ -70,6 +74,7 @@
#define DSI_VBP_NL 0x24
#define DSI_VFP_NL 0x28
#define DSI_VACT_NL 0x2C
+#define DSI_SIZE_CON 0x38
#define DSI_HSA_WC 0x50
#define DSI_HBP_WC 0x54
#define DSI_HFP_WC 0x58
@@ -123,7 +128,10 @@
#define VM_CMD_EN BIT(0)
#define TS_VFP_EN BIT(5)
-#define DSI_CMDQ0 0x180
+#define DSI_SHADOW_DEBUG 0x190U
+#define FORCE_COMMIT BIT(0)
+#define BYPASS_SHADOW BIT(1)
+
#define CONFIG (0xff << 0)
#define SHORT_PACKET 0
#define LONG_PACKET 2
@@ -132,12 +140,6 @@
#define DATA_0 (0xff << 16)
#define DATA_1 (0xff << 24)
-#define T_LPX 5
-#define T_HS_PREP 6
-#define T_HS_TRAIL 8
-#define T_HS_EXIT 7
-#define T_HS_ZERO 10
-
#define NS_TO_CYCLE(n, c) ((n) / (c) + (((n) % (c)) ? 1 : 0))
#define MTK_DSI_HOST_IS_READ(type) \
@@ -146,8 +148,33 @@
(type == MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM) || \
(type == MIPI_DSI_DCS_READ))
+struct mtk_phy_timing {
+ u32 lpx;
+ u32 da_hs_prepare;
+ u32 da_hs_zero;
+ u32 da_hs_trail;
+
+ u32 ta_go;
+ u32 ta_sure;
+ u32 ta_get;
+ u32 da_hs_exit;
+
+ u32 clk_hs_zero;
+ u32 clk_hs_trail;
+
+ u32 clk_hs_prepare;
+ u32 clk_hs_post;
+ u32 clk_hs_exit;
+};
+
struct phy;
+struct mtk_dsi_driver_data {
+ const u32 reg_cmdq_off;
+ bool has_shadow_ctl;
+ bool has_size_ctl;
+};
+
struct mtk_dsi {
struct mtk_ddp_comp ddp_comp;
struct device *dev;
@@ -170,10 +197,12 @@ struct mtk_dsi {
enum mipi_dsi_pixel_format format;
unsigned int lanes;
struct videomode vm;
+ struct mtk_phy_timing phy_timing;
int refcount;
bool enabled;
u32 irq_data;
wait_queue_head_t irq_wait_queue;
+ const struct mtk_dsi_driver_data *driver_data;
};
static inline struct mtk_dsi *encoder_to_dsi(struct drm_encoder *e)
@@ -201,18 +230,34 @@ static void mtk_dsi_mask(struct mtk_dsi *dsi, u32 offset, u32 mask, u32 data)
static void mtk_dsi_phy_timconfig(struct mtk_dsi *dsi)
{
u32 timcon0, timcon1, timcon2, timcon3;
- u32 ui, cycle_time;
-
- ui = 1000 / dsi->data_rate + 0x01;
- cycle_time = 8000 / dsi->data_rate + 0x01;
-
- timcon0 = T_LPX | T_HS_PREP << 8 | T_HS_ZERO << 16 | T_HS_TRAIL << 24;
- timcon1 = 4 * T_LPX | (3 * T_LPX / 2) << 8 | 5 * T_LPX << 16 |
- T_HS_EXIT << 24;
- timcon2 = ((NS_TO_CYCLE(0x64, cycle_time) + 0xa) << 24) |
- (NS_TO_CYCLE(0x150, cycle_time) << 16);
- timcon3 = NS_TO_CYCLE(0x40, cycle_time) | (2 * T_LPX) << 16 |
- NS_TO_CYCLE(80 + 52 * ui, cycle_time) << 8;
+ u32 data_rate_mhz = DIV_ROUND_UP(dsi->data_rate, 1000000);
+ struct mtk_phy_timing *timing = &dsi->phy_timing;
+
+ timing->lpx = (60 * data_rate_mhz / (8 * 1000)) + 1;
+ timing->da_hs_prepare = (80 * data_rate_mhz + 4 * 1000) / 8000;
+ timing->da_hs_zero = (170 * data_rate_mhz + 10 * 1000) / 8000 + 1 -
+ timing->da_hs_prepare;
+ timing->da_hs_trail = timing->da_hs_prepare + 1;
+
+ timing->ta_go = 4 * timing->lpx - 2;
+ timing->ta_sure = timing->lpx + 2;
+ timing->ta_get = 4 * timing->lpx;
+ timing->da_hs_exit = 2 * timing->lpx + 1;
+
+ timing->clk_hs_prepare = 70 * data_rate_mhz / (8 * 1000);
+ timing->clk_hs_post = timing->clk_hs_prepare + 8;
+ timing->clk_hs_trail = timing->clk_hs_prepare;
+ timing->clk_hs_zero = timing->clk_hs_trail * 4;
+ timing->clk_hs_exit = 2 * timing->clk_hs_trail;
+
+ timcon0 = timing->lpx | timing->da_hs_prepare << 8 |
+ timing->da_hs_zero << 16 | timing->da_hs_trail << 24;
+ timcon1 = timing->ta_go | timing->ta_sure << 8 |
+ timing->ta_get << 16 | timing->da_hs_exit << 24;
+ timcon2 = 1 << 8 | timing->clk_hs_zero << 16 |
+ timing->clk_hs_trail << 24;
+ timcon3 = timing->clk_hs_prepare | timing->clk_hs_post << 8 |
+ timing->clk_hs_exit << 16;
writel(timcon0, dsi->regs + DSI_PHY_TIMECON0);
writel(timcon1, dsi->regs + DSI_PHY_TIMECON1);
@@ -236,6 +281,12 @@ static void mtk_dsi_reset_engine(struct mtk_dsi *dsi)
mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_RESET, 0);
}
+static void mtk_dsi_reset_dphy(struct mtk_dsi *dsi)
+{
+ mtk_dsi_mask(dsi, DSI_CON_CTRL, DPHY_RESET, DPHY_RESET);
+ mtk_dsi_mask(dsi, DSI_CON_CTRL, DPHY_RESET, 0);
+}
+
static void mtk_dsi_clk_ulp_mode_enter(struct mtk_dsi *dsi)
{
mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_HS_TX_EN, 0);
@@ -399,7 +450,8 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
u32 horizontal_sync_active_byte;
u32 horizontal_backporch_byte;
u32 horizontal_frontporch_byte;
- u32 dsi_tmp_buf_bpp;
+ u32 dsi_tmp_buf_bpp, data_phy_cycles;
+ struct mtk_phy_timing *timing = &dsi->phy_timing;
struct videomode *vm = &dsi->vm;
@@ -413,6 +465,10 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
writel(vm->vfront_porch, dsi->regs + DSI_VFP_NL);
writel(vm->vactive, dsi->regs + DSI_VACT_NL);
+ if (dsi->driver_data->has_size_ctl)
+ writel(vm->vactive << 16 | vm->hactive,
+ dsi->regs + DSI_SIZE_CON);
+
horizontal_sync_active_byte = (vm->hsync_len * dsi_tmp_buf_bpp - 10);
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
@@ -422,7 +478,46 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
horizontal_backporch_byte = ((vm->hback_porch + vm->hsync_len) *
dsi_tmp_buf_bpp - 10);
- horizontal_frontporch_byte = (vm->hfront_porch * dsi_tmp_buf_bpp - 12);
+ data_phy_cycles = timing->lpx + timing->da_hs_prepare +
+ timing->da_hs_zero + timing->da_hs_exit + 3;
+
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) {
+ if ((vm->hfront_porch + vm->hback_porch) * dsi_tmp_buf_bpp >
+ data_phy_cycles * dsi->lanes + 18) {
+ horizontal_frontporch_byte =
+ vm->hfront_porch * dsi_tmp_buf_bpp -
+ (data_phy_cycles * dsi->lanes + 18) *
+ vm->hfront_porch /
+ (vm->hfront_porch + vm->hback_porch);
+
+ horizontal_backporch_byte =
+ horizontal_backporch_byte -
+ (data_phy_cycles * dsi->lanes + 18) *
+ vm->hback_porch /
+ (vm->hfront_porch + vm->hback_porch);
+ } else {
+ DRM_WARN("HFP less than d-phy, FPS will under 60Hz\n");
+ horizontal_frontporch_byte = vm->hfront_porch *
+ dsi_tmp_buf_bpp;
+ }
+ } else {
+ if ((vm->hfront_porch + vm->hback_porch) * dsi_tmp_buf_bpp >
+ data_phy_cycles * dsi->lanes + 12) {
+ horizontal_frontporch_byte =
+ vm->hfront_porch * dsi_tmp_buf_bpp -
+ (data_phy_cycles * dsi->lanes + 12) *
+ vm->hfront_porch /
+ (vm->hfront_porch + vm->hback_porch);
+ horizontal_backporch_byte = horizontal_backporch_byte -
+ (data_phy_cycles * dsi->lanes + 12) *
+ vm->hback_porch /
+ (vm->hfront_porch + vm->hback_porch);
+ } else {
+ DRM_WARN("HFP less than d-phy, FPS will under 60Hz\n");
+ horizontal_frontporch_byte = vm->hfront_porch *
+ dsi_tmp_buf_bpp;
+ }
+ }
writel(horizontal_sync_active_byte, dsi->regs + DSI_HSA_WC);
writel(horizontal_backporch_byte, dsi->regs + DSI_HBP_WC);
@@ -520,10 +615,9 @@ static s32 mtk_dsi_switch_to_cmd_mode(struct mtk_dsi *dsi, u8 irq_flag, u32 t)
static int mtk_dsi_poweron(struct mtk_dsi *dsi)
{
- struct device *dev = dsi->dev;
+ struct device *dev = dsi->host.dev;
int ret;
- u64 pixel_clock, total_bits;
- u32 htotal, htotal_bits, bit_per_pixel, overhead_cycles, overhead_bits;
+ u32 bit_per_pixel;
if (++dsi->refcount != 1)
return 0;
@@ -542,24 +636,8 @@ static int mtk_dsi_poweron(struct mtk_dsi *dsi)
break;
}
- /**
- * htotal_time = htotal * byte_per_pixel / num_lanes
- * overhead_time = lpx + hs_prepare + hs_zero + hs_trail + hs_exit
- * mipi_ratio = (htotal_time + overhead_time) / htotal_time
- * data_rate = pixel_clock * bit_per_pixel * mipi_ratio / num_lanes;
- */
- pixel_clock = dsi->vm.pixelclock;
- htotal = dsi->vm.hactive + dsi->vm.hback_porch + dsi->vm.hfront_porch +
- dsi->vm.hsync_len;
- htotal_bits = htotal * bit_per_pixel;
-
- overhead_cycles = T_LPX + T_HS_PREP + T_HS_ZERO + T_HS_TRAIL +
- T_HS_EXIT;
- overhead_bits = overhead_cycles * dsi->lanes * 8;
- total_bits = htotal_bits + overhead_bits;
-
- dsi->data_rate = DIV_ROUND_UP_ULL(pixel_clock * total_bits,
- htotal * dsi->lanes);
+ dsi->data_rate = DIV_ROUND_UP_ULL(dsi->vm.pixelclock * bit_per_pixel,
+ dsi->lanes);
ret = clk_set_rate(dsi->hs_clk, dsi->data_rate);
if (ret < 0) {
@@ -582,10 +660,17 @@ static int mtk_dsi_poweron(struct mtk_dsi *dsi)
}
mtk_dsi_enable(dsi);
+
+ if (dsi->driver_data->has_shadow_ctl)
+ writel(FORCE_COMMIT | BYPASS_SHADOW,
+ dsi->regs + DSI_SHADOW_DEBUG);
+
mtk_dsi_reset_engine(dsi);
mtk_dsi_phy_timconfig(dsi);
mtk_dsi_rxtx_control(dsi);
+ usleep_range(30, 100);
+ mtk_dsi_reset_dphy(dsi);
mtk_dsi_ps_control_vact(dsi);
mtk_dsi_set_vm_cmd(dsi);
mtk_dsi_config_vdo_timing(dsi);
@@ -745,7 +830,7 @@ static int mtk_dsi_connector_get_modes(struct drm_connector *connector)
{
struct mtk_dsi *dsi = connector_to_dsi(connector);
- return drm_panel_get_modes(dsi->panel);
+ return drm_panel_get_modes(dsi->panel, connector);
}
static const struct drm_encoder_helper_funcs mtk_dsi_encoder_helper_funcs = {
@@ -936,6 +1021,7 @@ static void mtk_dsi_cmdq(struct mtk_dsi *dsi, const struct mipi_dsi_msg *msg)
const char *tx_buf = msg->tx_buf;
u8 config, cmdq_size, cmdq_off, type = msg->type;
u32 reg_val, cmdq_mask, i;
+ u32 reg_cmdq_off = dsi->driver_data->reg_cmdq_off;
if (MTK_DSI_HOST_IS_READ(type))
config = BTA;
@@ -955,9 +1041,11 @@ static void mtk_dsi_cmdq(struct mtk_dsi *dsi, const struct mipi_dsi_msg *msg)
}
for (i = 0; i < msg->tx_len; i++)
- writeb(tx_buf[i], dsi->regs + DSI_CMDQ0 + cmdq_off + i);
+ mtk_dsi_mask(dsi, (reg_cmdq_off + cmdq_off + i) & (~0x3U),
+ (0xffUL << (((i + cmdq_off) & 3U) * 8U)),
+ tx_buf[i] << (((i + cmdq_off) & 3U) * 8U));
- mtk_dsi_mask(dsi, DSI_CMDQ0, cmdq_mask, reg_val);
+ mtk_dsi_mask(dsi, reg_cmdq_off, cmdq_mask, reg_val);
mtk_dsi_mask(dsi, DSI_CMDQ_SIZE, CMDQ_SIZE, cmdq_size);
}
@@ -1047,12 +1135,6 @@ static int mtk_dsi_bind(struct device *dev, struct device *master, void *data)
return ret;
}
- ret = mipi_dsi_host_register(&dsi->host);
- if (ret < 0) {
- dev_err(dev, "failed to register DSI host: %d\n", ret);
- goto err_ddp_comp_unregister;
- }
-
ret = mtk_dsi_create_conn_enc(drm, dsi);
if (ret) {
DRM_ERROR("Encoder create failed with %d\n", ret);
@@ -1062,8 +1144,6 @@ static int mtk_dsi_bind(struct device *dev, struct device *master, void *data)
return 0;
err_unregister:
- mipi_dsi_host_unregister(&dsi->host);
-err_ddp_comp_unregister:
mtk_ddp_comp_unregister(drm, &dsi->ddp_comp);
return ret;
}
@@ -1075,7 +1155,6 @@ static void mtk_dsi_unbind(struct device *dev, struct device *master,
struct mtk_dsi *dsi = dev_get_drvdata(dev);
mtk_dsi_destroy_conn_enc(dsi);
- mipi_dsi_host_unregister(&dsi->host);
mtk_ddp_comp_unregister(drm, &dsi->ddp_comp);
}
@@ -1099,31 +1178,38 @@ static int mtk_dsi_probe(struct platform_device *pdev)
dsi->host.ops = &mtk_dsi_ops;
dsi->host.dev = dev;
+ ret = mipi_dsi_host_register(&dsi->host);
+ if (ret < 0) {
+ dev_err(dev, "failed to register DSI host: %d\n", ret);
+ return ret;
+ }
ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0,
&dsi->panel, &dsi->bridge);
if (ret)
- return ret;
+ goto err_unregister_host;
+
+ dsi->driver_data = of_device_get_match_data(dev);
dsi->engine_clk = devm_clk_get(dev, "engine");
if (IS_ERR(dsi->engine_clk)) {
ret = PTR_ERR(dsi->engine_clk);
dev_err(dev, "Failed to get engine clock: %d\n", ret);
- return ret;
+ goto err_unregister_host;
}
dsi->digital_clk = devm_clk_get(dev, "digital");
if (IS_ERR(dsi->digital_clk)) {
ret = PTR_ERR(dsi->digital_clk);
dev_err(dev, "Failed to get digital clock: %d\n", ret);
- return ret;
+ goto err_unregister_host;
}
dsi->hs_clk = devm_clk_get(dev, "hs");
if (IS_ERR(dsi->hs_clk)) {
ret = PTR_ERR(dsi->hs_clk);
dev_err(dev, "Failed to get hs clock: %d\n", ret);
- return ret;
+ goto err_unregister_host;
}
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1131,33 +1217,35 @@ static int mtk_dsi_probe(struct platform_device *pdev)
if (IS_ERR(dsi->regs)) {
ret = PTR_ERR(dsi->regs);
dev_err(dev, "Failed to ioremap memory: %d\n", ret);
- return ret;
+ goto err_unregister_host;
}
dsi->phy = devm_phy_get(dev, "dphy");
if (IS_ERR(dsi->phy)) {
ret = PTR_ERR(dsi->phy);
dev_err(dev, "Failed to get MIPI-DPHY: %d\n", ret);
- return ret;
+ goto err_unregister_host;
}
comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DSI);
if (comp_id < 0) {
dev_err(dev, "Failed to identify by alias: %d\n", comp_id);
- return comp_id;
+ ret = comp_id;
+ goto err_unregister_host;
}
ret = mtk_ddp_comp_init(dev, dev->of_node, &dsi->ddp_comp, comp_id,
&mtk_dsi_funcs);
if (ret) {
dev_err(dev, "Failed to initialize component: %d\n", ret);
- return ret;
+ goto err_unregister_host;
}
irq_num = platform_get_irq(pdev, 0);
if (irq_num < 0) {
- dev_err(&pdev->dev, "failed to request dsi irq resource\n");
- return -EPROBE_DEFER;
+ dev_err(&pdev->dev, "failed to get dsi irq_num: %d\n", irq_num);
+ ret = irq_num;
+ goto err_unregister_host;
}
irq_set_status_flags(irq_num, IRQ_TYPE_LEVEL_LOW);
@@ -1165,14 +1253,24 @@ static int mtk_dsi_probe(struct platform_device *pdev)
IRQF_TRIGGER_LOW, dev_name(&pdev->dev), dsi);
if (ret) {
dev_err(&pdev->dev, "failed to request mediatek dsi irq\n");
- return -EPROBE_DEFER;
+ goto err_unregister_host;
}
init_waitqueue_head(&dsi->irq_wait_queue);
platform_set_drvdata(pdev, dsi);
- return component_add(&pdev->dev, &mtk_dsi_component_ops);
+ ret = component_add(&pdev->dev, &mtk_dsi_component_ops);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add component: %d\n", ret);
+ goto err_unregister_host;
+ }
+
+ return 0;
+
+err_unregister_host:
+ mipi_dsi_host_unregister(&dsi->host);
+ return ret;
}
static int mtk_dsi_remove(struct platform_device *pdev)
@@ -1181,13 +1279,32 @@ static int mtk_dsi_remove(struct platform_device *pdev)
mtk_output_dsi_disable(dsi);
component_del(&pdev->dev, &mtk_dsi_component_ops);
+ mipi_dsi_host_unregister(&dsi->host);
return 0;
}
+static const struct mtk_dsi_driver_data mt8173_dsi_driver_data = {
+ .reg_cmdq_off = 0x200,
+};
+
+static const struct mtk_dsi_driver_data mt2701_dsi_driver_data = {
+ .reg_cmdq_off = 0x180,
+};
+
+static const struct mtk_dsi_driver_data mt8183_dsi_driver_data = {
+ .reg_cmdq_off = 0x200,
+ .has_shadow_ctl = true,
+ .has_size_ctl = true,
+};
+
static const struct of_device_id mtk_dsi_of_match[] = {
- { .compatible = "mediatek,mt2701-dsi" },
- { .compatible = "mediatek,mt8173-dsi" },
+ { .compatible = "mediatek,mt2701-dsi",
+ .data = &mt2701_dsi_driver_data },
+ { .compatible = "mediatek,mt8173-dsi",
+ .data = &mt8173_dsi_driver_data },
+ { .compatible = "mediatek,mt8183-dsi",
+ .data = &mt8183_dsi_driver_data },
{ },
};
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 5d6a9f094df5..5e4a4dbda443 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -3,11 +3,7 @@
* Copyright (c) 2014 MediaTek Inc.
* Author: Jie Qiu <jie.qiu@mediatek.com>
*/
-#include <drm/drmP.h>
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_probe_helper.h>
-#include <drm/drm_edid.h>
+
#include <linux/arm-smccc.h>
#include <linux/clk.h>
#include <linux/delay.h>
@@ -23,7 +19,16 @@
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
+
#include <sound/hdmi-codec.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+
#include "mtk_cec.h"
#include "mtk_hdmi.h"
#include "mtk_hdmi_regs.h"
@@ -1233,17 +1238,19 @@ static int mtk_hdmi_conn_mode_valid(struct drm_connector *conn,
struct drm_display_mode *mode)
{
struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn);
+ struct drm_bridge *next_bridge;
dev_dbg(hdmi->dev, "xres=%d, yres=%d, refresh=%d, intl=%d clock=%d\n",
mode->hdisplay, mode->vdisplay, mode->vrefresh,
!!(mode->flags & DRM_MODE_FLAG_INTERLACE), mode->clock * 1000);
- if (hdmi->bridge.next) {
+ next_bridge = drm_bridge_get_next_bridge(&hdmi->bridge);
+ if (next_bridge) {
struct drm_display_mode adjusted_mode;
drm_mode_copy(&adjusted_mode, mode);
- if (!drm_bridge_mode_fixup(hdmi->bridge.next, mode,
- &adjusted_mode))
+ if (!drm_bridge_chain_mode_fixup(next_bridge, mode,
+ &adjusted_mode))
return MODE_BAD;
}
@@ -1295,9 +1302,10 @@ static int mtk_hdmi_bridge_attach(struct drm_bridge *bridge)
struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
int ret;
- ret = drm_connector_init(bridge->encoder->dev, &hdmi->conn,
- &mtk_hdmi_connector_funcs,
- DRM_MODE_CONNECTOR_HDMIA);
+ ret = drm_connector_init_with_ddc(bridge->encoder->dev, &hdmi->conn,
+ &mtk_hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA,
+ hdmi->ddc_adpt);
if (ret) {
dev_err(hdmi->dev, "Failed to initialize connector: %d\n", ret);
return ret;
diff --git a/drivers/gpu/drm/mediatek/mtk_mipi_tx.c b/drivers/gpu/drm/mediatek/mtk_mipi_tx.c
index 1842dc2caae9..e4d34484ecc8 100644
--- a/drivers/gpu/drm/mediatek/mtk_mipi_tx.c
+++ b/drivers/gpu/drm/mediatek/mtk_mipi_tx.c
@@ -3,292 +3,39 @@
* Copyright (c) 2015 MediaTek Inc.
*/
-#include <linux/clk.h>
-#include <linux/clk-provider.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/platform_device.h>
-#include <linux/phy/phy.h>
-
-#define MIPITX_DSI_CON 0x00
-#define RG_DSI_LDOCORE_EN BIT(0)
-#define RG_DSI_CKG_LDOOUT_EN BIT(1)
-#define RG_DSI_BCLK_SEL (3 << 2)
-#define RG_DSI_LD_IDX_SEL (7 << 4)
-#define RG_DSI_PHYCLK_SEL (2 << 8)
-#define RG_DSI_DSICLK_FREQ_SEL BIT(10)
-#define RG_DSI_LPTX_CLMP_EN BIT(11)
-
-#define MIPITX_DSI_CLOCK_LANE 0x04
-#define MIPITX_DSI_DATA_LANE0 0x08
-#define MIPITX_DSI_DATA_LANE1 0x0c
-#define MIPITX_DSI_DATA_LANE2 0x10
-#define MIPITX_DSI_DATA_LANE3 0x14
-#define RG_DSI_LNTx_LDOOUT_EN BIT(0)
-#define RG_DSI_LNTx_CKLANE_EN BIT(1)
-#define RG_DSI_LNTx_LPTX_IPLUS1 BIT(2)
-#define RG_DSI_LNTx_LPTX_IPLUS2 BIT(3)
-#define RG_DSI_LNTx_LPTX_IMINUS BIT(4)
-#define RG_DSI_LNTx_LPCD_IPLUS BIT(5)
-#define RG_DSI_LNTx_LPCD_IMINUS BIT(6)
-#define RG_DSI_LNTx_RT_CODE (0xf << 8)
-
-#define MIPITX_DSI_TOP_CON 0x40
-#define RG_DSI_LNT_INTR_EN BIT(0)
-#define RG_DSI_LNT_HS_BIAS_EN BIT(1)
-#define RG_DSI_LNT_IMP_CAL_EN BIT(2)
-#define RG_DSI_LNT_TESTMODE_EN BIT(3)
-#define RG_DSI_LNT_IMP_CAL_CODE (0xf << 4)
-#define RG_DSI_LNT_AIO_SEL (7 << 8)
-#define RG_DSI_PAD_TIE_LOW_EN BIT(11)
-#define RG_DSI_DEBUG_INPUT_EN BIT(12)
-#define RG_DSI_PRESERVE (7 << 13)
-
-#define MIPITX_DSI_BG_CON 0x44
-#define RG_DSI_BG_CORE_EN BIT(0)
-#define RG_DSI_BG_CKEN BIT(1)
-#define RG_DSI_BG_DIV (0x3 << 2)
-#define RG_DSI_BG_FAST_CHARGE BIT(4)
-#define RG_DSI_VOUT_MSK (0x3ffff << 5)
-#define RG_DSI_V12_SEL (7 << 5)
-#define RG_DSI_V10_SEL (7 << 8)
-#define RG_DSI_V072_SEL (7 << 11)
-#define RG_DSI_V04_SEL (7 << 14)
-#define RG_DSI_V032_SEL (7 << 17)
-#define RG_DSI_V02_SEL (7 << 20)
-#define RG_DSI_BG_R1_TRIM (0xf << 24)
-#define RG_DSI_BG_R2_TRIM (0xf << 28)
-
-#define MIPITX_DSI_PLL_CON0 0x50
-#define RG_DSI_MPPLL_PLL_EN BIT(0)
-#define RG_DSI_MPPLL_DIV_MSK (0x1ff << 1)
-#define RG_DSI_MPPLL_PREDIV (3 << 1)
-#define RG_DSI_MPPLL_TXDIV0 (3 << 3)
-#define RG_DSI_MPPLL_TXDIV1 (3 << 5)
-#define RG_DSI_MPPLL_POSDIV (7 << 7)
-#define RG_DSI_MPPLL_MONVC_EN BIT(10)
-#define RG_DSI_MPPLL_MONREF_EN BIT(11)
-#define RG_DSI_MPPLL_VOD_EN BIT(12)
-
-#define MIPITX_DSI_PLL_CON1 0x54
-#define RG_DSI_MPPLL_SDM_FRA_EN BIT(0)
-#define RG_DSI_MPPLL_SDM_SSC_PH_INIT BIT(1)
-#define RG_DSI_MPPLL_SDM_SSC_EN BIT(2)
-#define RG_DSI_MPPLL_SDM_SSC_PRD (0xffff << 16)
-
-#define MIPITX_DSI_PLL_CON2 0x58
-
-#define MIPITX_DSI_PLL_TOP 0x64
-#define RG_DSI_MPPLL_PRESERVE (0xff << 8)
-
-#define MIPITX_DSI_PLL_PWR 0x68
-#define RG_DSI_MPPLL_SDM_PWR_ON BIT(0)
-#define RG_DSI_MPPLL_SDM_ISO_EN BIT(1)
-#define RG_DSI_MPPLL_SDM_PWR_ACK BIT(8)
-
-#define MIPITX_DSI_SW_CTRL 0x80
-#define SW_CTRL_EN BIT(0)
-
-#define MIPITX_DSI_SW_CTRL_CON0 0x84
-#define SW_LNTC_LPTX_PRE_OE BIT(0)
-#define SW_LNTC_LPTX_OE BIT(1)
-#define SW_LNTC_LPTX_P BIT(2)
-#define SW_LNTC_LPTX_N BIT(3)
-#define SW_LNTC_HSTX_PRE_OE BIT(4)
-#define SW_LNTC_HSTX_OE BIT(5)
-#define SW_LNTC_HSTX_ZEROCLK BIT(6)
-#define SW_LNT0_LPTX_PRE_OE BIT(7)
-#define SW_LNT0_LPTX_OE BIT(8)
-#define SW_LNT0_LPTX_P BIT(9)
-#define SW_LNT0_LPTX_N BIT(10)
-#define SW_LNT0_HSTX_PRE_OE BIT(11)
-#define SW_LNT0_HSTX_OE BIT(12)
-#define SW_LNT0_LPRX_EN BIT(13)
-#define SW_LNT1_LPTX_PRE_OE BIT(14)
-#define SW_LNT1_LPTX_OE BIT(15)
-#define SW_LNT1_LPTX_P BIT(16)
-#define SW_LNT1_LPTX_N BIT(17)
-#define SW_LNT1_HSTX_PRE_OE BIT(18)
-#define SW_LNT1_HSTX_OE BIT(19)
-#define SW_LNT2_LPTX_PRE_OE BIT(20)
-#define SW_LNT2_LPTX_OE BIT(21)
-#define SW_LNT2_LPTX_P BIT(22)
-#define SW_LNT2_LPTX_N BIT(23)
-#define SW_LNT2_HSTX_PRE_OE BIT(24)
-#define SW_LNT2_HSTX_OE BIT(25)
-
-struct mtk_mipitx_data {
- const u32 mppll_preserve;
-};
-
-struct mtk_mipi_tx {
- struct device *dev;
- void __iomem *regs;
- u32 data_rate;
- const struct mtk_mipitx_data *driver_data;
- struct clk_hw pll_hw;
- struct clk *pll;
-};
+#include "mtk_mipi_tx.h"
-static inline struct mtk_mipi_tx *mtk_mipi_tx_from_clk_hw(struct clk_hw *hw)
+inline struct mtk_mipi_tx *mtk_mipi_tx_from_clk_hw(struct clk_hw *hw)
{
return container_of(hw, struct mtk_mipi_tx, pll_hw);
}
-static void mtk_mipi_tx_clear_bits(struct mtk_mipi_tx *mipi_tx, u32 offset,
- u32 bits)
+void mtk_mipi_tx_clear_bits(struct mtk_mipi_tx *mipi_tx, u32 offset,
+ u32 bits)
{
u32 temp = readl(mipi_tx->regs + offset);
writel(temp & ~bits, mipi_tx->regs + offset);
}
-static void mtk_mipi_tx_set_bits(struct mtk_mipi_tx *mipi_tx, u32 offset,
- u32 bits)
+void mtk_mipi_tx_set_bits(struct mtk_mipi_tx *mipi_tx, u32 offset,
+ u32 bits)
{
u32 temp = readl(mipi_tx->regs + offset);
writel(temp | bits, mipi_tx->regs + offset);
}
-static void mtk_mipi_tx_update_bits(struct mtk_mipi_tx *mipi_tx, u32 offset,
- u32 mask, u32 data)
+void mtk_mipi_tx_update_bits(struct mtk_mipi_tx *mipi_tx, u32 offset,
+ u32 mask, u32 data)
{
u32 temp = readl(mipi_tx->regs + offset);
writel((temp & ~mask) | (data & mask), mipi_tx->regs + offset);
}
-static int mtk_mipi_tx_pll_prepare(struct clk_hw *hw)
-{
- struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
- u8 txdiv, txdiv0, txdiv1;
- u64 pcw;
-
- dev_dbg(mipi_tx->dev, "prepare: %u Hz\n", mipi_tx->data_rate);
-
- if (mipi_tx->data_rate >= 500000000) {
- txdiv = 1;
- txdiv0 = 0;
- txdiv1 = 0;
- } else if (mipi_tx->data_rate >= 250000000) {
- txdiv = 2;
- txdiv0 = 1;
- txdiv1 = 0;
- } else if (mipi_tx->data_rate >= 125000000) {
- txdiv = 4;
- txdiv0 = 2;
- txdiv1 = 0;
- } else if (mipi_tx->data_rate > 62000000) {
- txdiv = 8;
- txdiv0 = 2;
- txdiv1 = 1;
- } else if (mipi_tx->data_rate >= 50000000) {
- txdiv = 16;
- txdiv0 = 2;
- txdiv1 = 2;
- } else {
- return -EINVAL;
- }
-
- mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_BG_CON,
- RG_DSI_VOUT_MSK |
- RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN,
- (4 << 20) | (4 << 17) | (4 << 14) |
- (4 << 11) | (4 << 8) | (4 << 5) |
- RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN);
-
- usleep_range(30, 100);
-
- mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_TOP_CON,
- RG_DSI_LNT_IMP_CAL_CODE | RG_DSI_LNT_HS_BIAS_EN,
- (8 << 4) | RG_DSI_LNT_HS_BIAS_EN);
-
- mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_CON,
- RG_DSI_CKG_LDOOUT_EN | RG_DSI_LDOCORE_EN);
-
- mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_PWR,
- RG_DSI_MPPLL_SDM_PWR_ON |
- RG_DSI_MPPLL_SDM_ISO_EN,
- RG_DSI_MPPLL_SDM_PWR_ON);
-
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
- RG_DSI_MPPLL_PLL_EN);
-
- mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
- RG_DSI_MPPLL_TXDIV0 | RG_DSI_MPPLL_TXDIV1 |
- RG_DSI_MPPLL_PREDIV,
- (txdiv0 << 3) | (txdiv1 << 5));
-
- /*
- * PLL PCW config
- * PCW bit 24~30 = integer part of pcw
- * PCW bit 0~23 = fractional part of pcw
- * pcw = data_Rate*4*txdiv/(Ref_clk*2);
- * Post DIV =4, so need data_Rate*4
- * Ref_clk is 26MHz
- */
- pcw = div_u64(((u64)mipi_tx->data_rate * 2 * txdiv) << 24,
- 26000000);
- writel(pcw, mipi_tx->regs + MIPITX_DSI_PLL_CON2);
-
- mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_PLL_CON1,
- RG_DSI_MPPLL_SDM_FRA_EN);
-
- mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_PLL_CON0, RG_DSI_MPPLL_PLL_EN);
-
- usleep_range(20, 100);
-
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON1,
- RG_DSI_MPPLL_SDM_SSC_EN);
-
- mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_TOP,
- RG_DSI_MPPLL_PRESERVE,
- mipi_tx->driver_data->mppll_preserve);
-
- return 0;
-}
-
-static void mtk_mipi_tx_pll_unprepare(struct clk_hw *hw)
-{
- struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
-
- dev_dbg(mipi_tx->dev, "unprepare\n");
-
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
- RG_DSI_MPPLL_PLL_EN);
-
- mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_TOP,
- RG_DSI_MPPLL_PRESERVE, 0);
-
- mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_PWR,
- RG_DSI_MPPLL_SDM_ISO_EN |
- RG_DSI_MPPLL_SDM_PWR_ON,
- RG_DSI_MPPLL_SDM_ISO_EN);
-
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_TOP_CON,
- RG_DSI_LNT_HS_BIAS_EN);
-
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_CON,
- RG_DSI_CKG_LDOOUT_EN | RG_DSI_LDOCORE_EN);
-
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_BG_CON,
- RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN);
-
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
- RG_DSI_MPPLL_DIV_MSK);
-}
-
-static long mtk_mipi_tx_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
-{
- return clamp_val(rate, 50000000, 1250000000);
-}
-
-static int mtk_mipi_tx_pll_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
+int mtk_mipi_tx_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
{
struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
@@ -299,37 +46,14 @@ static int mtk_mipi_tx_pll_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
-static unsigned long mtk_mipi_tx_pll_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
+unsigned long mtk_mipi_tx_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
{
struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
return mipi_tx->data_rate;
}
-static const struct clk_ops mtk_mipi_tx_pll_ops = {
- .prepare = mtk_mipi_tx_pll_prepare,
- .unprepare = mtk_mipi_tx_pll_unprepare,
- .round_rate = mtk_mipi_tx_pll_round_rate,
- .set_rate = mtk_mipi_tx_pll_set_rate,
- .recalc_rate = mtk_mipi_tx_pll_recalc_rate,
-};
-
-static int mtk_mipi_tx_power_on_signal(struct phy *phy)
-{
- struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
- u32 reg;
-
- for (reg = MIPITX_DSI_CLOCK_LANE;
- reg <= MIPITX_DSI_DATA_LANE3; reg += 4)
- mtk_mipi_tx_set_bits(mipi_tx, reg, RG_DSI_LNTx_LDOOUT_EN);
-
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_TOP_CON,
- RG_DSI_PAD_TIE_LOW_EN);
-
- return 0;
-}
-
static int mtk_mipi_tx_power_on(struct phy *phy)
{
struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
@@ -341,30 +65,16 @@ static int mtk_mipi_tx_power_on(struct phy *phy)
return ret;
/* Enable DSI Lane LDO outputs, disable pad tie low */
- mtk_mipi_tx_power_on_signal(phy);
-
+ mipi_tx->driver_data->mipi_tx_enable_signal(phy);
return 0;
}
-static void mtk_mipi_tx_power_off_signal(struct phy *phy)
-{
- struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
- u32 reg;
-
- mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_TOP_CON,
- RG_DSI_PAD_TIE_LOW_EN);
-
- for (reg = MIPITX_DSI_CLOCK_LANE;
- reg <= MIPITX_DSI_DATA_LANE3; reg += 4)
- mtk_mipi_tx_clear_bits(mipi_tx, reg, RG_DSI_LNTx_LDOOUT_EN);
-}
-
static int mtk_mipi_tx_power_off(struct phy *phy)
{
struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
/* Enable pad tie low, disable DSI Lane LDO outputs */
- mtk_mipi_tx_power_off_signal(phy);
+ mipi_tx->driver_data->mipi_tx_disable_signal(phy);
/* Disable PLL and power down core */
clk_disable_unprepare(mipi_tx->pll);
@@ -383,10 +93,9 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct mtk_mipi_tx *mipi_tx;
struct resource *mem;
- struct clk *ref_clk;
const char *ref_clk_name;
+ struct clk *ref_clk;
struct clk_init_data clk_init = {
- .ops = &mtk_mipi_tx_pll_ops,
.num_parents = 1,
.parent_names = (const char * const *)&ref_clk_name,
.flags = CLK_SET_RATE_GATE,
@@ -400,6 +109,7 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev)
return -ENOMEM;
mipi_tx->driver_data = of_device_get_match_data(dev);
+
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mipi_tx->regs = devm_ioremap_resource(dev, mem);
if (IS_ERR(mipi_tx->regs)) {
@@ -414,6 +124,7 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev)
dev_err(dev, "Failed to get reference clock: %d\n", ret);
return ret;
}
+
ref_clk_name = __clk_get_name(ref_clk);
ret = of_property_read_string(dev->of_node, "clock-output-names",
@@ -423,6 +134,8 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev)
return ret;
}
+ clk_init.ops = mipi_tx->driver_data->mipi_tx_clk_ops;
+
mipi_tx->pll_hw.init = &clk_init;
mipi_tx->pll = devm_clk_register(dev, &mipi_tx->pll_hw);
if (IS_ERR(mipi_tx->pll)) {
@@ -457,20 +170,14 @@ static int mtk_mipi_tx_remove(struct platform_device *pdev)
return 0;
}
-static const struct mtk_mipitx_data mt2701_mipitx_data = {
- .mppll_preserve = (3 << 8)
-};
-
-static const struct mtk_mipitx_data mt8173_mipitx_data = {
- .mppll_preserve = (0 << 8)
-};
-
static const struct of_device_id mtk_mipi_tx_match[] = {
{ .compatible = "mediatek,mt2701-mipi-tx",
.data = &mt2701_mipitx_data },
{ .compatible = "mediatek,mt8173-mipi-tx",
.data = &mt8173_mipitx_data },
- {},
+ { .compatible = "mediatek,mt8183-mipi-tx",
+ .data = &mt8183_mipitx_data },
+ { },
};
struct platform_driver mtk_mipi_tx_driver = {
@@ -481,3 +188,4 @@ struct platform_driver mtk_mipi_tx_driver = {
.of_match_table = mtk_mipi_tx_match,
},
};
+
diff --git a/drivers/gpu/drm/mediatek/mtk_mipi_tx.h b/drivers/gpu/drm/mediatek/mtk_mipi_tx.h
new file mode 100644
index 000000000000..413f35d86219
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_mipi_tx.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ * Author: Jitao Shi <jitao.shi@mediatek.com>
+ */
+
+#ifndef _MTK_MIPI_TX_H
+#define _MTK_MIPI_TX_H
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/phy/phy.h>
+
+struct mtk_mipitx_data {
+ const u32 mppll_preserve;
+ const struct clk_ops *mipi_tx_clk_ops;
+ void (*mipi_tx_enable_signal)(struct phy *phy);
+ void (*mipi_tx_disable_signal)(struct phy *phy);
+};
+
+struct mtk_mipi_tx {
+ struct device *dev;
+ void __iomem *regs;
+ u32 data_rate;
+ const struct mtk_mipitx_data *driver_data;
+ struct clk_hw pll_hw;
+ struct clk *pll;
+};
+
+struct mtk_mipi_tx *mtk_mipi_tx_from_clk_hw(struct clk_hw *hw);
+void mtk_mipi_tx_clear_bits(struct mtk_mipi_tx *mipi_tx, u32 offset, u32 bits);
+void mtk_mipi_tx_set_bits(struct mtk_mipi_tx *mipi_tx, u32 offset, u32 bits);
+void mtk_mipi_tx_update_bits(struct mtk_mipi_tx *mipi_tx, u32 offset, u32 mask,
+ u32 data);
+int mtk_mipi_tx_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate);
+unsigned long mtk_mipi_tx_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate);
+
+extern const struct mtk_mipitx_data mt2701_mipitx_data;
+extern const struct mtk_mipitx_data mt8173_mipitx_data;
+extern const struct mtk_mipitx_data mt8183_mipitx_data;
+
+#endif
diff --git a/drivers/gpu/drm/mediatek/mtk_mt8173_mipi_tx.c b/drivers/gpu/drm/mediatek/mtk_mt8173_mipi_tx.c
new file mode 100644
index 000000000000..f18db14d8b63
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_mt8173_mipi_tx.c
@@ -0,0 +1,288 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ * Author: jitao.shi <jitao.shi@mediatek.com>
+ */
+
+#include "mtk_mipi_tx.h"
+
+#define MIPITX_DSI_CON 0x00
+#define RG_DSI_LDOCORE_EN BIT(0)
+#define RG_DSI_CKG_LDOOUT_EN BIT(1)
+#define RG_DSI_BCLK_SEL (3 << 2)
+#define RG_DSI_LD_IDX_SEL (7 << 4)
+#define RG_DSI_PHYCLK_SEL (2 << 8)
+#define RG_DSI_DSICLK_FREQ_SEL BIT(10)
+#define RG_DSI_LPTX_CLMP_EN BIT(11)
+
+#define MIPITX_DSI_CLOCK_LANE 0x04
+#define MIPITX_DSI_DATA_LANE0 0x08
+#define MIPITX_DSI_DATA_LANE1 0x0c
+#define MIPITX_DSI_DATA_LANE2 0x10
+#define MIPITX_DSI_DATA_LANE3 0x14
+#define RG_DSI_LNTx_LDOOUT_EN BIT(0)
+#define RG_DSI_LNTx_CKLANE_EN BIT(1)
+#define RG_DSI_LNTx_LPTX_IPLUS1 BIT(2)
+#define RG_DSI_LNTx_LPTX_IPLUS2 BIT(3)
+#define RG_DSI_LNTx_LPTX_IMINUS BIT(4)
+#define RG_DSI_LNTx_LPCD_IPLUS BIT(5)
+#define RG_DSI_LNTx_LPCD_IMINUS BIT(6)
+#define RG_DSI_LNTx_RT_CODE (0xf << 8)
+
+#define MIPITX_DSI_TOP_CON 0x40
+#define RG_DSI_LNT_INTR_EN BIT(0)
+#define RG_DSI_LNT_HS_BIAS_EN BIT(1)
+#define RG_DSI_LNT_IMP_CAL_EN BIT(2)
+#define RG_DSI_LNT_TESTMODE_EN BIT(3)
+#define RG_DSI_LNT_IMP_CAL_CODE (0xf << 4)
+#define RG_DSI_LNT_AIO_SEL (7 << 8)
+#define RG_DSI_PAD_TIE_LOW_EN BIT(11)
+#define RG_DSI_DEBUG_INPUT_EN BIT(12)
+#define RG_DSI_PRESERVE (7 << 13)
+
+#define MIPITX_DSI_BG_CON 0x44
+#define RG_DSI_BG_CORE_EN BIT(0)
+#define RG_DSI_BG_CKEN BIT(1)
+#define RG_DSI_BG_DIV (0x3 << 2)
+#define RG_DSI_BG_FAST_CHARGE BIT(4)
+#define RG_DSI_VOUT_MSK (0x3ffff << 5)
+#define RG_DSI_V12_SEL (7 << 5)
+#define RG_DSI_V10_SEL (7 << 8)
+#define RG_DSI_V072_SEL (7 << 11)
+#define RG_DSI_V04_SEL (7 << 14)
+#define RG_DSI_V032_SEL (7 << 17)
+#define RG_DSI_V02_SEL (7 << 20)
+#define RG_DSI_BG_R1_TRIM (0xf << 24)
+#define RG_DSI_BG_R2_TRIM (0xf << 28)
+
+#define MIPITX_DSI_PLL_CON0 0x50
+#define RG_DSI_MPPLL_PLL_EN BIT(0)
+#define RG_DSI_MPPLL_DIV_MSK (0x1ff << 1)
+#define RG_DSI_MPPLL_PREDIV (3 << 1)
+#define RG_DSI_MPPLL_TXDIV0 (3 << 3)
+#define RG_DSI_MPPLL_TXDIV1 (3 << 5)
+#define RG_DSI_MPPLL_POSDIV (7 << 7)
+#define RG_DSI_MPPLL_MONVC_EN BIT(10)
+#define RG_DSI_MPPLL_MONREF_EN BIT(11)
+#define RG_DSI_MPPLL_VOD_EN BIT(12)
+
+#define MIPITX_DSI_PLL_CON1 0x54
+#define RG_DSI_MPPLL_SDM_FRA_EN BIT(0)
+#define RG_DSI_MPPLL_SDM_SSC_PH_INIT BIT(1)
+#define RG_DSI_MPPLL_SDM_SSC_EN BIT(2)
+#define RG_DSI_MPPLL_SDM_SSC_PRD (0xffff << 16)
+
+#define MIPITX_DSI_PLL_CON2 0x58
+
+#define MIPITX_DSI_PLL_TOP 0x64
+#define RG_DSI_MPPLL_PRESERVE (0xff << 8)
+
+#define MIPITX_DSI_PLL_PWR 0x68
+#define RG_DSI_MPPLL_SDM_PWR_ON BIT(0)
+#define RG_DSI_MPPLL_SDM_ISO_EN BIT(1)
+#define RG_DSI_MPPLL_SDM_PWR_ACK BIT(8)
+
+#define MIPITX_DSI_SW_CTRL 0x80
+#define SW_CTRL_EN BIT(0)
+
+#define MIPITX_DSI_SW_CTRL_CON0 0x84
+#define SW_LNTC_LPTX_PRE_OE BIT(0)
+#define SW_LNTC_LPTX_OE BIT(1)
+#define SW_LNTC_LPTX_P BIT(2)
+#define SW_LNTC_LPTX_N BIT(3)
+#define SW_LNTC_HSTX_PRE_OE BIT(4)
+#define SW_LNTC_HSTX_OE BIT(5)
+#define SW_LNTC_HSTX_ZEROCLK BIT(6)
+#define SW_LNT0_LPTX_PRE_OE BIT(7)
+#define SW_LNT0_LPTX_OE BIT(8)
+#define SW_LNT0_LPTX_P BIT(9)
+#define SW_LNT0_LPTX_N BIT(10)
+#define SW_LNT0_HSTX_PRE_OE BIT(11)
+#define SW_LNT0_HSTX_OE BIT(12)
+#define SW_LNT0_LPRX_EN BIT(13)
+#define SW_LNT1_LPTX_PRE_OE BIT(14)
+#define SW_LNT1_LPTX_OE BIT(15)
+#define SW_LNT1_LPTX_P BIT(16)
+#define SW_LNT1_LPTX_N BIT(17)
+#define SW_LNT1_HSTX_PRE_OE BIT(18)
+#define SW_LNT1_HSTX_OE BIT(19)
+#define SW_LNT2_LPTX_PRE_OE BIT(20)
+#define SW_LNT2_LPTX_OE BIT(21)
+#define SW_LNT2_LPTX_P BIT(22)
+#define SW_LNT2_LPTX_N BIT(23)
+#define SW_LNT2_HSTX_PRE_OE BIT(24)
+#define SW_LNT2_HSTX_OE BIT(25)
+
+static int mtk_mipi_tx_pll_prepare(struct clk_hw *hw)
+{
+ struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
+ u8 txdiv, txdiv0, txdiv1;
+ u64 pcw;
+
+ dev_dbg(mipi_tx->dev, "prepare: %u Hz\n", mipi_tx->data_rate);
+
+ if (mipi_tx->data_rate >= 500000000) {
+ txdiv = 1;
+ txdiv0 = 0;
+ txdiv1 = 0;
+ } else if (mipi_tx->data_rate >= 250000000) {
+ txdiv = 2;
+ txdiv0 = 1;
+ txdiv1 = 0;
+ } else if (mipi_tx->data_rate >= 125000000) {
+ txdiv = 4;
+ txdiv0 = 2;
+ txdiv1 = 0;
+ } else if (mipi_tx->data_rate > 62000000) {
+ txdiv = 8;
+ txdiv0 = 2;
+ txdiv1 = 1;
+ } else if (mipi_tx->data_rate >= 50000000) {
+ txdiv = 16;
+ txdiv0 = 2;
+ txdiv1 = 2;
+ } else {
+ return -EINVAL;
+ }
+
+ mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_BG_CON,
+ RG_DSI_VOUT_MSK |
+ RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN,
+ (4 << 20) | (4 << 17) | (4 << 14) |
+ (4 << 11) | (4 << 8) | (4 << 5) |
+ RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN);
+
+ usleep_range(30, 100);
+
+ mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_TOP_CON,
+ RG_DSI_LNT_IMP_CAL_CODE | RG_DSI_LNT_HS_BIAS_EN,
+ (8 << 4) | RG_DSI_LNT_HS_BIAS_EN);
+
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_CON,
+ RG_DSI_CKG_LDOOUT_EN | RG_DSI_LDOCORE_EN);
+
+ mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_PWR,
+ RG_DSI_MPPLL_SDM_PWR_ON |
+ RG_DSI_MPPLL_SDM_ISO_EN,
+ RG_DSI_MPPLL_SDM_PWR_ON);
+
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
+ RG_DSI_MPPLL_PLL_EN);
+
+ mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
+ RG_DSI_MPPLL_TXDIV0 | RG_DSI_MPPLL_TXDIV1 |
+ RG_DSI_MPPLL_PREDIV,
+ (txdiv0 << 3) | (txdiv1 << 5));
+
+ /*
+ * PLL PCW config
+ * PCW bit 24~30 = integer part of pcw
+ * PCW bit 0~23 = fractional part of pcw
+ * pcw = data_Rate*4*txdiv/(Ref_clk*2);
+ * Post DIV =4, so need data_Rate*4
+ * Ref_clk is 26MHz
+ */
+ pcw = div_u64(((u64)mipi_tx->data_rate * 2 * txdiv) << 24,
+ 26000000);
+ writel(pcw, mipi_tx->regs + MIPITX_DSI_PLL_CON2);
+
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_PLL_CON1,
+ RG_DSI_MPPLL_SDM_FRA_EN);
+
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_PLL_CON0, RG_DSI_MPPLL_PLL_EN);
+
+ usleep_range(20, 100);
+
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON1,
+ RG_DSI_MPPLL_SDM_SSC_EN);
+
+ mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_TOP,
+ RG_DSI_MPPLL_PRESERVE,
+ mipi_tx->driver_data->mppll_preserve);
+
+ return 0;
+}
+
+static void mtk_mipi_tx_pll_unprepare(struct clk_hw *hw)
+{
+ struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
+
+ dev_dbg(mipi_tx->dev, "unprepare\n");
+
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
+ RG_DSI_MPPLL_PLL_EN);
+
+ mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_TOP,
+ RG_DSI_MPPLL_PRESERVE, 0);
+
+ mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_PWR,
+ RG_DSI_MPPLL_SDM_ISO_EN |
+ RG_DSI_MPPLL_SDM_PWR_ON,
+ RG_DSI_MPPLL_SDM_ISO_EN);
+
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_TOP_CON,
+ RG_DSI_LNT_HS_BIAS_EN);
+
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_CON,
+ RG_DSI_CKG_LDOOUT_EN | RG_DSI_LDOCORE_EN);
+
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_BG_CON,
+ RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN);
+
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
+ RG_DSI_MPPLL_DIV_MSK);
+}
+
+static long mtk_mipi_tx_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ return clamp_val(rate, 50000000, 1250000000);
+}
+
+static const struct clk_ops mtk_mipi_tx_pll_ops = {
+ .prepare = mtk_mipi_tx_pll_prepare,
+ .unprepare = mtk_mipi_tx_pll_unprepare,
+ .round_rate = mtk_mipi_tx_pll_round_rate,
+ .set_rate = mtk_mipi_tx_pll_set_rate,
+ .recalc_rate = mtk_mipi_tx_pll_recalc_rate,
+};
+
+static void mtk_mipi_tx_power_on_signal(struct phy *phy)
+{
+ struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
+ u32 reg;
+
+ for (reg = MIPITX_DSI_CLOCK_LANE;
+ reg <= MIPITX_DSI_DATA_LANE3; reg += 4)
+ mtk_mipi_tx_set_bits(mipi_tx, reg, RG_DSI_LNTx_LDOOUT_EN);
+
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_TOP_CON,
+ RG_DSI_PAD_TIE_LOW_EN);
+}
+
+static void mtk_mipi_tx_power_off_signal(struct phy *phy)
+{
+ struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
+ u32 reg;
+
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_TOP_CON,
+ RG_DSI_PAD_TIE_LOW_EN);
+
+ for (reg = MIPITX_DSI_CLOCK_LANE;
+ reg <= MIPITX_DSI_DATA_LANE3; reg += 4)
+ mtk_mipi_tx_clear_bits(mipi_tx, reg, RG_DSI_LNTx_LDOOUT_EN);
+}
+
+const struct mtk_mipitx_data mt2701_mipitx_data = {
+ .mppll_preserve = (3 << 8),
+ .mipi_tx_clk_ops = &mtk_mipi_tx_pll_ops,
+ .mipi_tx_enable_signal = mtk_mipi_tx_power_on_signal,
+ .mipi_tx_disable_signal = mtk_mipi_tx_power_off_signal,
+};
+
+const struct mtk_mipitx_data mt8173_mipitx_data = {
+ .mppll_preserve = (0 << 8),
+ .mipi_tx_clk_ops = &mtk_mipi_tx_pll_ops,
+ .mipi_tx_enable_signal = mtk_mipi_tx_power_on_signal,
+ .mipi_tx_disable_signal = mtk_mipi_tx_power_off_signal,
+};
diff --git a/drivers/gpu/drm/mediatek/mtk_mt8183_mipi_tx.c b/drivers/gpu/drm/mediatek/mtk_mt8183_mipi_tx.c
new file mode 100644
index 000000000000..91f08a351fd0
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_mt8183_mipi_tx.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ * Author: jitao.shi <jitao.shi@mediatek.com>
+ */
+
+#include "mtk_mipi_tx.h"
+
+#define MIPITX_LANE_CON 0x000c
+#define RG_DSI_CPHY_T1DRV_EN BIT(0)
+#define RG_DSI_ANA_CK_SEL BIT(1)
+#define RG_DSI_PHY_CK_SEL BIT(2)
+#define RG_DSI_CPHY_EN BIT(3)
+#define RG_DSI_PHYCK_INV_EN BIT(4)
+#define RG_DSI_PWR04_EN BIT(5)
+#define RG_DSI_BG_LPF_EN BIT(6)
+#define RG_DSI_BG_CORE_EN BIT(7)
+#define RG_DSI_PAD_TIEL_SEL BIT(8)
+
+#define MIPITX_PLL_PWR 0x0028
+#define MIPITX_PLL_CON0 0x002c
+#define MIPITX_PLL_CON1 0x0030
+#define MIPITX_PLL_CON2 0x0034
+#define MIPITX_PLL_CON3 0x0038
+#define MIPITX_PLL_CON4 0x003c
+#define RG_DSI_PLL_IBIAS (3 << 10)
+
+#define MIPITX_D2_SW_CTL_EN 0x0144
+#define MIPITX_D0_SW_CTL_EN 0x0244
+#define MIPITX_CK_CKMODE_EN 0x0328
+#define DSI_CK_CKMODE_EN BIT(0)
+#define MIPITX_CK_SW_CTL_EN 0x0344
+#define MIPITX_D1_SW_CTL_EN 0x0444
+#define MIPITX_D3_SW_CTL_EN 0x0544
+#define DSI_SW_CTL_EN BIT(0)
+#define AD_DSI_PLL_SDM_PWR_ON BIT(0)
+#define AD_DSI_PLL_SDM_ISO_EN BIT(1)
+
+#define RG_DSI_PLL_EN BIT(4)
+#define RG_DSI_PLL_POSDIV (0x7 << 8)
+
+static int mtk_mipi_tx_pll_enable(struct clk_hw *hw)
+{
+ struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
+ unsigned int txdiv, txdiv0;
+ u64 pcw;
+
+ dev_dbg(mipi_tx->dev, "enable: %u bps\n", mipi_tx->data_rate);
+
+ if (mipi_tx->data_rate >= 2000000000) {
+ txdiv = 1;
+ txdiv0 = 0;
+ } else if (mipi_tx->data_rate >= 1000000000) {
+ txdiv = 2;
+ txdiv0 = 1;
+ } else if (mipi_tx->data_rate >= 500000000) {
+ txdiv = 4;
+ txdiv0 = 2;
+ } else if (mipi_tx->data_rate > 250000000) {
+ txdiv = 8;
+ txdiv0 = 3;
+ } else if (mipi_tx->data_rate >= 125000000) {
+ txdiv = 16;
+ txdiv0 = 4;
+ } else {
+ return -EINVAL;
+ }
+
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_PLL_CON4, RG_DSI_PLL_IBIAS);
+
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_PLL_PWR, AD_DSI_PLL_SDM_PWR_ON);
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_PLL_CON1, RG_DSI_PLL_EN);
+ udelay(1);
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_PLL_PWR, AD_DSI_PLL_SDM_ISO_EN);
+ pcw = div_u64(((u64)mipi_tx->data_rate * txdiv) << 24, 26000000);
+ writel(pcw, mipi_tx->regs + MIPITX_PLL_CON0);
+ mtk_mipi_tx_update_bits(mipi_tx, MIPITX_PLL_CON1, RG_DSI_PLL_POSDIV,
+ txdiv0 << 8);
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_PLL_CON1, RG_DSI_PLL_EN);
+
+ return 0;
+}
+
+static void mtk_mipi_tx_pll_disable(struct clk_hw *hw)
+{
+ struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
+
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_PLL_CON1, RG_DSI_PLL_EN);
+
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_PLL_PWR, AD_DSI_PLL_SDM_ISO_EN);
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_PLL_PWR, AD_DSI_PLL_SDM_PWR_ON);
+}
+
+static long mtk_mipi_tx_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ return clamp_val(rate, 50000000, 1600000000);
+}
+
+static const struct clk_ops mtk_mipi_tx_pll_ops = {
+ .enable = mtk_mipi_tx_pll_enable,
+ .disable = mtk_mipi_tx_pll_disable,
+ .round_rate = mtk_mipi_tx_pll_round_rate,
+ .set_rate = mtk_mipi_tx_pll_set_rate,
+ .recalc_rate = mtk_mipi_tx_pll_recalc_rate,
+};
+
+static void mtk_mipi_tx_power_on_signal(struct phy *phy)
+{
+ struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
+
+ /* BG_LPF_EN / BG_CORE_EN */
+ writel(RG_DSI_PAD_TIEL_SEL | RG_DSI_BG_CORE_EN,
+ mipi_tx->regs + MIPITX_LANE_CON);
+ usleep_range(30, 100);
+ writel(RG_DSI_BG_CORE_EN | RG_DSI_BG_LPF_EN,
+ mipi_tx->regs + MIPITX_LANE_CON);
+
+ /* Switch OFF each Lane */
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_D0_SW_CTL_EN, DSI_SW_CTL_EN);
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_D1_SW_CTL_EN, DSI_SW_CTL_EN);
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_D2_SW_CTL_EN, DSI_SW_CTL_EN);
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_D3_SW_CTL_EN, DSI_SW_CTL_EN);
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_CK_SW_CTL_EN, DSI_SW_CTL_EN);
+
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_CK_CKMODE_EN, DSI_CK_CKMODE_EN);
+}
+
+static void mtk_mipi_tx_power_off_signal(struct phy *phy)
+{
+ struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
+
+ /* Switch ON each Lane */
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_D0_SW_CTL_EN, DSI_SW_CTL_EN);
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_D1_SW_CTL_EN, DSI_SW_CTL_EN);
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_D2_SW_CTL_EN, DSI_SW_CTL_EN);
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_D3_SW_CTL_EN, DSI_SW_CTL_EN);
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_CK_SW_CTL_EN, DSI_SW_CTL_EN);
+
+ writel(RG_DSI_PAD_TIEL_SEL | RG_DSI_BG_CORE_EN,
+ mipi_tx->regs + MIPITX_LANE_CON);
+ writel(RG_DSI_PAD_TIEL_SEL, mipi_tx->regs + MIPITX_LANE_CON);
+}
+
+const struct mtk_mipitx_data mt8183_mipitx_data = {
+ .mipi_tx_clk_ops = &mtk_mipi_tx_pll_ops,
+ .mipi_tx_enable_signal = mtk_mipi_tx_power_on_signal,
+ .mipi_tx_disable_signal = mtk_mipi_tx_power_off_signal,
+};
diff --git a/drivers/gpu/drm/meson/Makefile b/drivers/gpu/drm/meson/Makefile
index c389e2399133..28a519cdf66b 100644
--- a/drivers/gpu/drm/meson/Makefile
+++ b/drivers/gpu/drm/meson/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
meson-drm-y := meson_drv.o meson_plane.o meson_crtc.o meson_venc_cvbs.o
meson-drm-y += meson_viu.o meson_vpp.o meson_venc.o meson_vclk.o meson_overlay.o
+meson-drm-y += meson_rdma.o meson_osd_afbcd.o
obj-$(CONFIG_DRM_MESON) += meson-drm.o
obj-$(CONFIG_DRM_MESON_DW_HDMI) += meson_dw_hdmi.o
diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c
index aa8ea107524e..e66b6271ff58 100644
--- a/drivers/gpu/drm/meson/meson_crtc.c
+++ b/drivers/gpu/drm/meson/meson_crtc.c
@@ -9,23 +9,23 @@
* Jasper St. Pierre <jstpierre@mecheye.net>
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/platform_device.h>
#include <linux/bitfield.h>
-#include <drm/drmP.h>
-#include <drm/drm_atomic.h>
+#include <linux/soc/amlogic/meson-canvas.h>
+
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_flip_work.h>
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "meson_crtc.h"
#include "meson_plane.h"
+#include "meson_registers.h"
#include "meson_venc.h"
-#include "meson_vpp.h"
#include "meson_viu.h"
-#include "meson_registers.h"
+#include "meson_rdma.h"
+#include "meson_vpp.h"
+#include "meson_osd_afbcd.h"
#define MESON_G12A_VIU_OFFSET 0x5ec0
@@ -37,7 +37,11 @@ struct meson_crtc {
struct meson_drm *priv;
void (*enable_osd1)(struct meson_drm *priv);
void (*enable_vd1)(struct meson_drm *priv);
+ void (*enable_osd1_afbc)(struct meson_drm *priv);
+ void (*disable_osd1_afbc)(struct meson_drm *priv);
unsigned int viu_offset;
+ bool vsync_forced;
+ bool vsync_disabled;
};
#define to_meson_crtc(x) container_of(x, struct meson_crtc, base)
@@ -48,6 +52,7 @@ static int meson_crtc_enable_vblank(struct drm_crtc *crtc)
struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
struct meson_drm *priv = meson_crtc->priv;
+ meson_crtc->vsync_disabled = false;
meson_venc_enable_vsync(priv);
return 0;
@@ -58,7 +63,10 @@ static void meson_crtc_disable_vblank(struct drm_crtc *crtc)
struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
struct meson_drm *priv = meson_crtc->priv;
- meson_venc_disable_vsync(priv);
+ if (!meson_crtc->vsync_forced) {
+ meson_crtc->vsync_disabled = true;
+ meson_venc_disable_vsync(priv);
+ }
}
static const struct drm_crtc_funcs meson_crtc_funcs = {
@@ -238,6 +246,26 @@ static void meson_crtc_enable_osd1(struct meson_drm *priv)
priv->io_base + _REG(VPP_MISC));
}
+static void meson_crtc_g12a_enable_osd1_afbc(struct meson_drm *priv)
+{
+ writel_relaxed(priv->viu.osd1_blk2_cfg4,
+ priv->io_base + _REG(VIU_OSD1_BLK2_CFG_W4));
+
+ writel_bits_relaxed(OSD_MEM_LINEAR_ADDR, OSD_MEM_LINEAR_ADDR,
+ priv->io_base + _REG(VIU_OSD1_CTRL_STAT));
+
+ writel_relaxed(priv->viu.osd1_blk1_cfg4,
+ priv->io_base + _REG(VIU_OSD1_BLK1_CFG_W4));
+
+ meson_viu_g12a_enable_osd1_afbc(priv);
+
+ writel_bits_relaxed(OSD_MEM_LINEAR_ADDR, OSD_MEM_LINEAR_ADDR,
+ priv->io_base + _REG(VIU_OSD1_CTRL_STAT));
+
+ writel_bits_relaxed(OSD_MALI_SRC_EN, OSD_MALI_SRC_EN,
+ priv->io_base + _REG(VIU_OSD1_BLK0_CFG_W0));
+}
+
static void meson_g12a_crtc_enable_osd1(struct meson_drm *priv)
{
writel_relaxed(priv->viu.osd_blend_din0_scope_h,
@@ -267,11 +295,11 @@ static void meson_crtc_enable_vd1(struct meson_drm *priv)
static void meson_g12a_crtc_enable_vd1(struct meson_drm *priv)
{
- writel_relaxed(((1 << 16) | /* post bld premult*/
- (1 << 8) | /* post src */
- (1 << 4) | /* pre bld premult*/
- (1 << 0)),
- priv->io_base + _REG(VD1_BLEND_SRC_CTRL));
+ writel_relaxed(VD_BLEND_PREBLD_SRC_VD1 |
+ VD_BLEND_PREBLD_PREMULT_EN |
+ VD_BLEND_POSTBLD_SRC_VD1 |
+ VD_BLEND_POSTBLD_PREMULT_EN,
+ priv->io_base + _REG(VD1_BLEND_SRC_CTRL));
}
void meson_crtc_irq(struct meson_drm *priv)
@@ -283,6 +311,8 @@ void meson_crtc_irq(struct meson_drm *priv)
if (priv->viu.osd1_enabled && priv->viu.osd1_commit) {
writel_relaxed(priv->viu.osd1_ctrl_stat,
priv->io_base + _REG(VIU_OSD1_CTRL_STAT));
+ writel_relaxed(priv->viu.osd1_ctrl_stat2,
+ priv->io_base + _REG(VIU_OSD1_CTRL_STAT2));
writel_relaxed(priv->viu.osd1_blk0_cfg[0],
priv->io_base + _REG(VIU_OSD1_BLK0_CFG_W0));
writel_relaxed(priv->viu.osd1_blk0_cfg[1],
@@ -293,6 +323,20 @@ void meson_crtc_irq(struct meson_drm *priv)
priv->io_base + _REG(VIU_OSD1_BLK0_CFG_W3));
writel_relaxed(priv->viu.osd1_blk0_cfg[4],
priv->io_base + _REG(VIU_OSD1_BLK0_CFG_W4));
+
+ if (priv->viu.osd1_afbcd) {
+ if (meson_crtc->enable_osd1_afbc)
+ meson_crtc->enable_osd1_afbc(priv);
+ } else {
+ if (meson_crtc->disable_osd1_afbc)
+ meson_crtc->disable_osd1_afbc(priv);
+ if (priv->afbcd.ops) {
+ priv->afbcd.ops->reset(priv);
+ priv->afbcd.ops->disable(priv);
+ }
+ meson_crtc->vsync_forced = false;
+ }
+
writel_relaxed(priv->viu.osd_sc_ctrl0,
priv->io_base + _REG(VPP_OSD_SC_CTRL0));
writel_relaxed(priv->viu.osd_sc_i_wh_m1,
@@ -314,15 +358,25 @@ void meson_crtc_irq(struct meson_drm *priv)
writel_relaxed(priv->viu.osd_sc_v_ctrl0,
priv->io_base + _REG(VPP_OSD_VSC_CTRL0));
- meson_canvas_config(priv->canvas, priv->canvas_id_osd1,
- priv->viu.osd1_addr, priv->viu.osd1_stride,
- priv->viu.osd1_height, MESON_CANVAS_WRAP_NONE,
- MESON_CANVAS_BLKMODE_LINEAR, 0);
+ if (!priv->viu.osd1_afbcd)
+ meson_canvas_config(priv->canvas, priv->canvas_id_osd1,
+ priv->viu.osd1_addr,
+ priv->viu.osd1_stride,
+ priv->viu.osd1_height,
+ MESON_CANVAS_WRAP_NONE,
+ MESON_CANVAS_BLKMODE_LINEAR, 0);
/* Enable OSD1 */
if (meson_crtc->enable_osd1)
meson_crtc->enable_osd1(priv);
+ if (priv->viu.osd1_afbcd) {
+ priv->afbcd.ops->reset(priv);
+ priv->afbcd.ops->setup(priv);
+ priv->afbcd.ops->enable(priv);
+ meson_crtc->vsync_forced = true;
+ }
+
priv->viu.osd1_commit = false;
}
@@ -359,7 +413,7 @@ void meson_crtc_irq(struct meson_drm *priv)
MESON_CANVAS_WRAP_NONE,
MESON_CANVAS_BLKMODE_LINEAR,
MESON_CANVAS_ENDIAN_SWAP64);
- };
+ }
writel_relaxed(priv->viu.vd1_if0_gen_reg,
priv->io_base + meson_crtc->viu_offset +
@@ -489,7 +543,12 @@ void meson_crtc_irq(struct meson_drm *priv)
writel_relaxed(priv->viu.vd1_range_map_cr,
priv->io_base + meson_crtc->viu_offset +
_REG(VD1_IF0_RANGE_MAP_CR));
- writel_relaxed(0x78404,
+ writel_relaxed(VPP_VSC_BANK_LENGTH(4) |
+ VPP_HSC_BANK_LENGTH(4) |
+ VPP_SC_VD_EN_ENABLE |
+ VPP_SC_TOP_EN_ENABLE |
+ VPP_SC_HSC_EN_ENABLE |
+ VPP_SC_VSC_EN_ENABLE,
priv->io_base + _REG(VPP_SC_MISC));
writel_relaxed(priv->viu.vpp_pic_in_height,
priv->io_base + _REG(VPP_PIC_IN_HEIGHT));
@@ -540,6 +599,9 @@ void meson_crtc_irq(struct meson_drm *priv)
priv->viu.vd1_commit = false;
}
+ if (meson_crtc->vsync_disabled)
+ return;
+
drm_crtc_handle_vblank(priv->crtc);
spin_lock_irqsave(&priv->drm->event_lock, flags);
@@ -572,14 +634,24 @@ int meson_crtc_create(struct meson_drm *priv)
return ret;
}
- if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
meson_crtc->enable_osd1 = meson_g12a_crtc_enable_osd1;
meson_crtc->enable_vd1 = meson_g12a_crtc_enable_vd1;
meson_crtc->viu_offset = MESON_G12A_VIU_OFFSET;
+ meson_crtc->enable_osd1_afbc =
+ meson_crtc_g12a_enable_osd1_afbc;
+ meson_crtc->disable_osd1_afbc =
+ meson_viu_g12a_disable_osd1_afbc;
drm_crtc_helper_add(crtc, &meson_g12a_crtc_helper_funcs);
} else {
meson_crtc->enable_osd1 = meson_crtc_enable_osd1;
meson_crtc->enable_vd1 = meson_crtc_enable_vd1;
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM)) {
+ meson_crtc->enable_osd1_afbc =
+ meson_viu_gxm_enable_osd1_afbc;
+ meson_crtc->disable_osd1_afbc =
+ meson_viu_gxm_disable_osd1_afbc;
+ }
drm_crtc_helper_add(crtc, &meson_crtc_helper_funcs);
}
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 2310c96fff46..b5f5eb7b4bb9 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -8,35 +8,32 @@
* Jasper St. Pierre <jstpierre@mecheye.net>
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/platform_device.h>
#include <linux/component.h>
+#include <linux/module.h>
#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/soc/amlogic/meson-canvas.h>
-#include <drm/drmP.h>
-#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_flip_work.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drm_plane_helper.h>
+#include <drm/drm_irq.h>
+#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_rect.h>
+#include <drm/drm_vblank.h>
+#include "meson_crtc.h"
#include "meson_drv.h"
-#include "meson_plane.h"
#include "meson_overlay.h"
-#include "meson_crtc.h"
+#include "meson_plane.h"
+#include "meson_osd_afbcd.h"
+#include "meson_registers.h"
#include "meson_venc_cvbs.h"
-
-#include "meson_vpp.h"
#include "meson_viu.h"
-#include "meson_venc.h"
-#include "meson_registers.h"
+#include "meson_vpp.h"
+#include "meson_rdma.h"
#define DRIVER_NAME "meson"
#define DRIVER_DESC "Amlogic Meson DRM driver"
@@ -93,9 +90,7 @@ static int meson_dumb_create(struct drm_file *file, struct drm_device *dev,
DEFINE_DRM_GEM_CMA_FOPS(fops);
static struct drm_driver meson_driver = {
- .driver_features = DRIVER_GEM |
- DRIVER_MODESET | DRIVER_PRIME |
- DRIVER_ATOMIC,
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
/* IRQ */
.irq_handler = meson_irq,
@@ -103,8 +98,6 @@ static struct drm_driver meson_driver = {
/* PRIME Ops */
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_import = drm_gem_prime_import,
- .gem_prime_export = drm_gem_prime_export,
.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
.gem_prime_vmap = drm_gem_cma_prime_vmap,
@@ -149,10 +142,28 @@ static struct regmap_config meson_regmap_config = {
static void meson_vpu_init(struct meson_drm *priv)
{
- writel_relaxed(0x210000, priv->io_base + _REG(VPU_RDARB_MODE_L1C1));
- writel_relaxed(0x10000, priv->io_base + _REG(VPU_RDARB_MODE_L1C2));
- writel_relaxed(0x900000, priv->io_base + _REG(VPU_RDARB_MODE_L2C1));
- writel_relaxed(0x20000, priv->io_base + _REG(VPU_WRARB_MODE_L2C1));
+ u32 value;
+
+ /*
+ * Slave dc0 and dc5 connected to master port 1.
+ * By default other slaves are connected to master port 0.
+ */
+ value = VPU_RDARB_SLAVE_TO_MASTER_PORT(0, 1) |
+ VPU_RDARB_SLAVE_TO_MASTER_PORT(5, 1);
+ writel_relaxed(value, priv->io_base + _REG(VPU_RDARB_MODE_L1C1));
+
+ /* Slave dc0 connected to master port 1 */
+ value = VPU_RDARB_SLAVE_TO_MASTER_PORT(0, 1);
+ writel_relaxed(value, priv->io_base + _REG(VPU_RDARB_MODE_L1C2));
+
+ /* Slave dc4 and dc7 connected to master port 1 */
+ value = VPU_RDARB_SLAVE_TO_MASTER_PORT(4, 1) |
+ VPU_RDARB_SLAVE_TO_MASTER_PORT(7, 1);
+ writel_relaxed(value, priv->io_base + _REG(VPU_RDARB_MODE_L2C1));
+
+ /* Slave dc1 connected to master port 1 */
+ value = VPU_RDARB_SLAVE_TO_MASTER_PORT(1, 1);
+ writel_relaxed(value, priv->io_base + _REG(VPU_WRARB_MODE_L2C1));
}
static void meson_remove_framebuffers(void)
@@ -175,6 +186,7 @@ static void meson_remove_framebuffers(void)
static int meson_drv_bind_master(struct device *dev, bool has_components)
{
struct platform_device *pdev = to_platform_device(dev);
+ const struct meson_drm_match_data *match;
struct meson_drm *priv;
struct drm_device *drm;
struct resource *res;
@@ -187,6 +199,10 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
return -ENODEV;
}
+ match = of_device_get_match_data(dev);
+ if (!match)
+ return -ENODEV;
+
drm = drm_dev_alloc(&meson_driver, dev);
if (IS_ERR(drm))
return PTR_ERR(drm);
@@ -199,6 +215,8 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
drm->dev_private = priv;
priv->drm = drm;
priv->dev = dev;
+ priv->compat = match->compat;
+ priv->afbcd.ops = match->afbcd_ops;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpu");
regs = devm_ioremap_resource(dev, res);
@@ -278,6 +296,11 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
meson_venc_init(priv);
meson_vpp_init(priv);
meson_viu_init(priv);
+ if (priv->afbcd.ops) {
+ ret = priv->afbcd.ops->init(priv);
+ if (ret)
+ return ret;
+ }
/* Encoder Initialization */
@@ -348,12 +371,16 @@ static void meson_drv_unbind(struct device *dev)
meson_canvas_free(priv->canvas, priv->canvas_id_vd1_2);
}
+ if (priv->afbcd.ops) {
+ priv->afbcd.ops->reset(priv);
+ meson_rdma_free(priv);
+ }
+
drm_dev_unregister(drm);
drm_irq_uninstall(drm);
drm_kms_helper_poll_fini(drm);
drm_mode_config_cleanup(drm);
drm_dev_put(drm);
-
}
static const struct component_master_ops meson_drv_master_ops = {
@@ -361,6 +388,35 @@ static const struct component_master_ops meson_drv_master_ops = {
.unbind = meson_drv_unbind,
};
+static int __maybe_unused meson_drv_pm_suspend(struct device *dev)
+{
+ struct meson_drm *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return 0;
+
+ return drm_mode_config_helper_suspend(priv->drm);
+}
+
+static int __maybe_unused meson_drv_pm_resume(struct device *dev)
+{
+ struct meson_drm *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return 0;
+
+ meson_vpu_init(priv);
+ meson_venc_init(priv);
+ meson_vpp_init(priv);
+ meson_viu_init(priv);
+ if (priv->afbcd.ops)
+ priv->afbcd.ops->init(priv);
+
+ drm_mode_config_helper_resume(priv->drm);
+
+ return 0;
+}
+
static int compare_of(struct device *dev, void *data)
{
DRM_DEBUG_DRIVER("Comparing of node %pOF with %pOF\n",
@@ -443,20 +499,47 @@ static int meson_drv_probe(struct platform_device *pdev)
return 0;
};
+static struct meson_drm_match_data meson_drm_gxbb_data = {
+ .compat = VPU_COMPATIBLE_GXBB,
+};
+
+static struct meson_drm_match_data meson_drm_gxl_data = {
+ .compat = VPU_COMPATIBLE_GXL,
+};
+
+static struct meson_drm_match_data meson_drm_gxm_data = {
+ .compat = VPU_COMPATIBLE_GXM,
+ .afbcd_ops = &meson_afbcd_gxm_ops,
+};
+
+static struct meson_drm_match_data meson_drm_g12a_data = {
+ .compat = VPU_COMPATIBLE_G12A,
+ .afbcd_ops = &meson_afbcd_g12a_ops,
+};
+
static const struct of_device_id dt_match[] = {
- { .compatible = "amlogic,meson-gxbb-vpu" },
- { .compatible = "amlogic,meson-gxl-vpu" },
- { .compatible = "amlogic,meson-gxm-vpu" },
- { .compatible = "amlogic,meson-g12a-vpu" },
+ { .compatible = "amlogic,meson-gxbb-vpu",
+ .data = (void *)&meson_drm_gxbb_data },
+ { .compatible = "amlogic,meson-gxl-vpu",
+ .data = (void *)&meson_drm_gxl_data },
+ { .compatible = "amlogic,meson-gxm-vpu",
+ .data = (void *)&meson_drm_gxm_data },
+ { .compatible = "amlogic,meson-g12a-vpu",
+ .data = (void *)&meson_drm_g12a_data },
{}
};
MODULE_DEVICE_TABLE(of, dt_match);
+static const struct dev_pm_ops meson_drv_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(meson_drv_pm_suspend, meson_drv_pm_resume)
+};
+
static struct platform_driver meson_drm_platform_driver = {
.probe = meson_drv_probe,
.driver = {
.name = "meson-drm",
.of_match_table = dt_match,
+ .pm = &meson_drv_pm_ops,
},
};
diff --git a/drivers/gpu/drm/meson/meson_drv.h b/drivers/gpu/drm/meson/meson_drv.h
index 7b6593f33dfe..04fdf3826643 100644
--- a/drivers/gpu/drm/meson/meson_drv.h
+++ b/drivers/gpu/drm/meson/meson_drv.h
@@ -7,14 +7,32 @@
#ifndef __MESON_DRV_H
#define __MESON_DRV_H
-#include <linux/platform_device.h>
-#include <linux/regmap.h>
+#include <linux/device.h>
#include <linux/of.h>
-#include <linux/soc/amlogic/meson-canvas.h>
-#include <drm/drmP.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+struct drm_crtc;
+struct drm_device;
+struct drm_plane;
+struct meson_drm;
+struct meson_afbcd_ops;
+
+enum vpu_compatible {
+ VPU_COMPATIBLE_GXBB = 0,
+ VPU_COMPATIBLE_GXL = 1,
+ VPU_COMPATIBLE_GXM = 2,
+ VPU_COMPATIBLE_G12A = 3,
+};
+
+struct meson_drm_match_data {
+ enum vpu_compatible compat;
+ struct meson_afbcd_ops *afbcd_ops;
+};
struct meson_drm {
struct device *dev;
+ enum vpu_compatible compat;
void __iomem *io_base;
struct regmap *hhi;
int vsync_irq;
@@ -35,11 +53,16 @@ struct meson_drm {
bool osd1_enabled;
bool osd1_interlace;
bool osd1_commit;
+ bool osd1_afbcd;
uint32_t osd1_ctrl_stat;
+ uint32_t osd1_ctrl_stat2;
uint32_t osd1_blk0_cfg[5];
+ uint32_t osd1_blk1_cfg4;
+ uint32_t osd1_blk2_cfg4;
uint32_t osd1_addr;
uint32_t osd1_stride;
uint32_t osd1_height;
+ uint32_t osd1_width;
uint32_t osd_sc_ctrl0;
uint32_t osd_sc_i_wh_m1;
uint32_t osd_sc_o_h_start_end;
@@ -110,12 +133,24 @@ struct meson_drm {
bool venc_repeat;
bool hdmi_use_enci;
} venc;
+
+ struct {
+ dma_addr_t addr_dma;
+ uint32_t *addr;
+ unsigned int offset;
+ } rdma;
+
+ struct {
+ struct meson_afbcd_ops *ops;
+ u64 modifier;
+ u32 format;
+ } afbcd;
};
static inline int meson_vpu_is_compatible(struct meson_drm *priv,
- const char *compat)
+ enum vpu_compatible family)
{
- return of_device_is_compatible(priv->dev->of_node, compat);
+ return priv->compat == family;
}
#endif /* __MESON_DRV_H */
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index df3f9ddd2234..3bb7ffe5fc39 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -5,29 +5,30 @@
* Copyright (C) 2015 Amlogic, Inc. All rights reserved.
*/
+#include <linux/clk.h>
+#include <linux/component.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/component.h>
#include <linux/of_device.h>
#include <linux/of_graph.h>
-#include <linux/reset.h>
-#include <linux/clk.h>
#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
-#include <drm/drmP.h>
+#include <drm/bridge/dw_hdmi.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_device.h>
#include <drm/drm_edid.h>
#include <drm/drm_probe_helper.h>
-#include <drm/bridge/dw_hdmi.h>
+#include <drm/drm_print.h>
-#include <uapi/linux/media-bus-format.h>
-#include <uapi/linux/videodev2.h>
+#include <linux/media-bus-format.h>
+#include <linux/videodev2.h>
#include "meson_drv.h"
-#include "meson_venc.h"
-#include "meson_vclk.h"
#include "meson_dw_hdmi.h"
#include "meson_registers.h"
+#include "meson_vclk.h"
+#include "meson_venc.h"
#define DRIVER_NAME "meson-dw-hdmi"
#define DRIVER_DESC "Amlogic Meson HDMI-TX DRM driver"
@@ -428,6 +429,8 @@ static int dw_hdmi_phy_init(struct dw_hdmi *hdmi, void *data,
/* Enable internal pixclk, tmds_clk, spdif_clk, i2s_clk, cecclk */
dw_hdmi_top_write_bits(dw_hdmi, HDMITX_TOP_CLK_CNTL,
0x3, 0x3);
+
+ /* Enable cec_clk and hdcp22_tmdsclk_en */
dw_hdmi_top_write_bits(dw_hdmi, HDMITX_TOP_CLK_CNTL,
0x3 << 4, 0x3 << 4);
@@ -799,6 +802,47 @@ static bool meson_hdmi_connector_is_available(struct device *dev)
return false;
}
+static void meson_dw_hdmi_init(struct meson_dw_hdmi *meson_dw_hdmi)
+{
+ struct meson_drm *priv = meson_dw_hdmi->priv;
+
+ /* Enable clocks */
+ regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL, 0xffff, 0x100);
+
+ /* Bring HDMITX MEM output of power down */
+ regmap_update_bits(priv->hhi, HHI_MEM_PD_REG0, 0xff << 8, 0);
+
+ /* Reset HDMITX APB & TX & PHY */
+ reset_control_reset(meson_dw_hdmi->hdmitx_apb);
+ reset_control_reset(meson_dw_hdmi->hdmitx_ctrl);
+ reset_control_reset(meson_dw_hdmi->hdmitx_phy);
+
+ /* Enable APB3 fail on error */
+ if (!meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
+ writel_bits_relaxed(BIT(15), BIT(15),
+ meson_dw_hdmi->hdmitx + HDMITX_TOP_CTRL_REG);
+ writel_bits_relaxed(BIT(15), BIT(15),
+ meson_dw_hdmi->hdmitx + HDMITX_DWC_CTRL_REG);
+ }
+
+ /* Bring out of reset */
+ meson_dw_hdmi->data->top_write(meson_dw_hdmi,
+ HDMITX_TOP_SW_RESET, 0);
+
+ msleep(20);
+
+ meson_dw_hdmi->data->top_write(meson_dw_hdmi,
+ HDMITX_TOP_CLK_CNTL, 0xff);
+
+ /* Enable HDMI-TX Interrupt */
+ meson_dw_hdmi->data->top_write(meson_dw_hdmi, HDMITX_TOP_INTR_STAT_CLR,
+ HDMITX_TOP_INTR_CORE);
+
+ meson_dw_hdmi->data->top_write(meson_dw_hdmi, HDMITX_TOP_INTR_MASKN,
+ HDMITX_TOP_INTR_CORE);
+
+}
+
static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
void *data)
{
@@ -922,40 +966,7 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
DRM_DEBUG_DRIVER("encoder initialized\n");
- /* Enable clocks */
- regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL, 0xffff, 0x100);
-
- /* Bring HDMITX MEM output of power down */
- regmap_update_bits(priv->hhi, HHI_MEM_PD_REG0, 0xff << 8, 0);
-
- /* Reset HDMITX APB & TX & PHY */
- reset_control_reset(meson_dw_hdmi->hdmitx_apb);
- reset_control_reset(meson_dw_hdmi->hdmitx_ctrl);
- reset_control_reset(meson_dw_hdmi->hdmitx_phy);
-
- /* Enable APB3 fail on error */
- if (!meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
- writel_bits_relaxed(BIT(15), BIT(15),
- meson_dw_hdmi->hdmitx + HDMITX_TOP_CTRL_REG);
- writel_bits_relaxed(BIT(15), BIT(15),
- meson_dw_hdmi->hdmitx + HDMITX_DWC_CTRL_REG);
- }
-
- /* Bring out of reset */
- meson_dw_hdmi->data->top_write(meson_dw_hdmi,
- HDMITX_TOP_SW_RESET, 0);
-
- msleep(20);
-
- meson_dw_hdmi->data->top_write(meson_dw_hdmi,
- HDMITX_TOP_CLK_CNTL, 0xff);
-
- /* Enable HDMI-TX Interrupt */
- meson_dw_hdmi->data->top_write(meson_dw_hdmi, HDMITX_TOP_INTR_STAT_CLR,
- HDMITX_TOP_INTR_CORE);
-
- meson_dw_hdmi->data->top_write(meson_dw_hdmi, HDMITX_TOP_INTR_MASKN,
- HDMITX_TOP_INTR_CORE);
+ meson_dw_hdmi_init(meson_dw_hdmi);
/* Bridge / Connector */
@@ -966,6 +977,11 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
dw_plat_data->input_bus_format = MEDIA_BUS_FMT_YUV8_1X24;
dw_plat_data->input_bus_encoding = V4L2_YCBCR_ENC_709;
+ if (dw_hdmi_is_compatible(meson_dw_hdmi, "amlogic,meson-gxl-dw-hdmi") ||
+ dw_hdmi_is_compatible(meson_dw_hdmi, "amlogic,meson-gxm-dw-hdmi") ||
+ dw_hdmi_is_compatible(meson_dw_hdmi, "amlogic,meson-g12a-dw-hdmi"))
+ dw_plat_data->use_drm_infoframe = true;
+
platform_set_drvdata(pdev, meson_dw_hdmi);
meson_dw_hdmi->hdmi = dw_hdmi_bind(pdev, encoder,
@@ -991,6 +1007,34 @@ static const struct component_ops meson_dw_hdmi_ops = {
.unbind = meson_dw_hdmi_unbind,
};
+static int __maybe_unused meson_dw_hdmi_pm_suspend(struct device *dev)
+{
+ struct meson_dw_hdmi *meson_dw_hdmi = dev_get_drvdata(dev);
+
+ if (!meson_dw_hdmi)
+ return 0;
+
+ /* Reset TOP */
+ meson_dw_hdmi->data->top_write(meson_dw_hdmi,
+ HDMITX_TOP_SW_RESET, 0);
+
+ return 0;
+}
+
+static int __maybe_unused meson_dw_hdmi_pm_resume(struct device *dev)
+{
+ struct meson_dw_hdmi *meson_dw_hdmi = dev_get_drvdata(dev);
+
+ if (!meson_dw_hdmi)
+ return 0;
+
+ meson_dw_hdmi_init(meson_dw_hdmi);
+
+ dw_hdmi_resume(meson_dw_hdmi->hdmi);
+
+ return 0;
+}
+
static int meson_dw_hdmi_probe(struct platform_device *pdev)
{
return component_add(&pdev->dev, &meson_dw_hdmi_ops);
@@ -1003,6 +1047,11 @@ static int meson_dw_hdmi_remove(struct platform_device *pdev)
return 0;
}
+static const struct dev_pm_ops meson_dw_hdmi_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(meson_dw_hdmi_pm_suspend,
+ meson_dw_hdmi_pm_resume)
+};
+
static const struct of_device_id meson_dw_hdmi_of_table[] = {
{ .compatible = "amlogic,meson-gxbb-dw-hdmi",
.data = &meson_dw_hdmi_gx_data },
@@ -1022,6 +1071,7 @@ static struct platform_driver meson_dw_hdmi_platform_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = meson_dw_hdmi_of_table,
+ .pm = &meson_dw_hdmi_pm_ops,
},
};
module_platform_driver(meson_dw_hdmi_platform_driver);
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.h b/drivers/gpu/drm/meson/meson_dw_hdmi.h
index 1b2ef043eb5c..08e1c14e4ea0 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.h
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.h
@@ -100,7 +100,8 @@
#define HDMITX_TOP_INTR_RXSENSE_RISE BIT(6)
#define HDMITX_TOP_INTR_RXSENSE_FALL BIT(7)
-/* Bit 14:12 RW tmds_sel: 3'b000=Output zero; 3'b001=Output normal TMDS data;
+/*
+ * Bit 14:12 RW tmds_sel: 3'b000=Output zero; 3'b001=Output normal TMDS data;
* 3'b010=Output PRBS data; 3'b100=Output shift pattern. Default 0.
* Bit 11: 9 RW shift_pttn_repeat: 0=New pattern every clk cycle; 1=New pattern
* every 2 clk cycles; ...; 7=New pattern every 8 clk cycles. Default 0.
@@ -135,7 +136,8 @@
/* Bit 9: 0 RW tmds_clk_pttn[29:20]. Default 0. */
#define HDMITX_TOP_TMDS_CLK_PTTN_23 (0x00B)
-/* Bit 1 RW shift_tmds_clk_pttn:1=Enable shifting clk pattern,
+/*
+ * Bit 1 RW shift_tmds_clk_pttn:1=Enable shifting clk pattern,
* used when TMDS CLK rate = TMDS character rate /4. Default 0.
* Bit 0 R Reserved. Default 0.
* [ 1] shift_tmds_clk_pttn
@@ -143,12 +145,14 @@
*/
#define HDMITX_TOP_TMDS_CLK_PTTN_CNTL (0x00C)
-/* Bit 0 RW revocmem_wr_fail: Read back 1 to indicate Host write REVOC MEM
+/*
+ * Bit 0 RW revocmem_wr_fail: Read back 1 to indicate Host write REVOC MEM
* failure, write 1 to clear the failure flag. Default 0.
*/
#define HDMITX_TOP_REVOCMEM_STAT (0x00D)
-/* Bit 1 R filtered RxSense status
+/*
+ * Bit 1 R filtered RxSense status
* Bit 0 R filtered HPD status.
*/
#define HDMITX_TOP_STAT0 (0x00E)
diff --git a/drivers/gpu/drm/meson/meson_osd_afbcd.c b/drivers/gpu/drm/meson/meson_osd_afbcd.c
new file mode 100644
index 000000000000..f12e0271f166
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_osd_afbcd.c
@@ -0,0 +1,389 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ */
+
+#include <linux/bitfield.h>
+
+#include <drm/drm_print.h>
+#include <drm/drm_fourcc.h>
+
+#include "meson_drv.h"
+#include "meson_registers.h"
+#include "meson_viu.h"
+#include "meson_rdma.h"
+#include "meson_osd_afbcd.h"
+
+/*
+ * DOC: Driver for the ARM FrameBuffer Compression Decoders
+ *
+ * The Amlogic GXM and G12A SoC families embeds an AFBC Decoder,
+ * to decode compressed buffers generated by the ARM Mali GPU.
+ *
+ * For the GXM Family, Amlogic designed their own Decoder, named in
+ * the vendor source as "MESON_AFBC", and a single decoder is available
+ * for the 2 OSD planes.
+ * This decoder is compatible with the AFBC 1.0 specifications and the
+ * Mali T820 GPU capabilities.
+ * It supports :
+ * - basic AFBC buffer for RGB32 only, thus YTR feature is mandatory
+ * - SPARSE layout and SPLIT layout
+ * - only 16x16 superblock
+ *
+ * The decoder reads the data from the SDRAM, decodes and sends the
+ * decoded pixel stream to the OSD1 Plane pixel composer.
+ *
+ * For the G12A Family, Amlogic integrated an ARM AFBC Decoder, named
+ * in the vendor source as "MALI_AFBC", and the decoder can decode up
+ * to 4 surfaces, one for each of the 4 available OSDs.
+ * This decoder is compatible with the AFBC 1.2 specifications for the
+ * Mali G31 and G52 GPUs.
+ * Is supports :
+ * - basic AFBC buffer for multiple RGB and YUV pixel formats
+ * - SPARSE layout and SPLIT layout
+ * - 16x16 and 32x8 "wideblk" superblocks
+ * - Tiled header
+ *
+ * The ARM AFBC Decoder independent from the VPU Pixel Pipeline, so
+ * the ARM AFBC Decoder reads the data from the SDRAM then decodes
+ * into a private internal physical address where the OSD1 Plane pixel
+ * composer unpacks the decoded data.
+ */
+
+/* Amlogic AFBC Decoder for GXM Family */
+
+#define OSD1_AFBCD_RGB32 0x15
+
+static int meson_gxm_afbcd_pixel_fmt(u64 modifier, uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ return OSD1_AFBCD_RGB32;
+ /* TOFIX support mode formats */
+ default:
+ DRM_DEBUG("unsupported afbc format[%08x]\n", format);
+ return -EINVAL;
+ }
+}
+
+static bool meson_gxm_afbcd_supported_fmt(u64 modifier, uint32_t format)
+{
+ if (modifier & AFBC_FORMAT_MOD_BLOCK_SIZE_32x8)
+ return false;
+
+ if (!(modifier & AFBC_FORMAT_MOD_YTR))
+ return false;
+
+ return meson_gxm_afbcd_pixel_fmt(modifier, format) >= 0;
+}
+
+static int meson_gxm_afbcd_init(struct meson_drm *priv)
+{
+ return 0;
+}
+
+static int meson_gxm_afbcd_reset(struct meson_drm *priv)
+{
+ writel_relaxed(VIU_SW_RESET_OSD1_AFBCD,
+ priv->io_base + _REG(VIU_SW_RESET));
+ writel_relaxed(0, priv->io_base + _REG(VIU_SW_RESET));
+
+ return 0;
+}
+
+static int meson_gxm_afbcd_enable(struct meson_drm *priv)
+{
+ writel_relaxed(FIELD_PREP(OSD1_AFBCD_ID_FIFO_THRD, 0x40) |
+ OSD1_AFBCD_DEC_ENABLE,
+ priv->io_base + _REG(OSD1_AFBCD_ENABLE));
+
+ return 0;
+}
+
+static int meson_gxm_afbcd_disable(struct meson_drm *priv)
+{
+ writel_bits_relaxed(OSD1_AFBCD_DEC_ENABLE, 0,
+ priv->io_base + _REG(OSD1_AFBCD_ENABLE));
+
+ return 0;
+}
+
+static int meson_gxm_afbcd_setup(struct meson_drm *priv)
+{
+ u32 conv_lbuf_len;
+ u32 mode = FIELD_PREP(OSD1_AFBCD_MIF_URGENT, 3) |
+ FIELD_PREP(OSD1_AFBCD_HOLD_LINE_NUM, 4) |
+ FIELD_PREP(OSD1_AFBCD_RGBA_EXCHAN_CTRL, 0x34) |
+ meson_gxm_afbcd_pixel_fmt(priv->afbcd.modifier,
+ priv->afbcd.format);
+
+ if (priv->afbcd.modifier & AFBC_FORMAT_MOD_SPARSE)
+ mode |= OSD1_AFBCD_HREG_HALF_BLOCK;
+
+ if (priv->afbcd.modifier & AFBC_FORMAT_MOD_SPLIT)
+ mode |= OSD1_AFBCD_HREG_BLOCK_SPLIT;
+
+ writel_relaxed(mode, priv->io_base + _REG(OSD1_AFBCD_MODE));
+
+ writel_relaxed(FIELD_PREP(OSD1_AFBCD_HREG_VSIZE_IN,
+ priv->viu.osd1_width) |
+ FIELD_PREP(OSD1_AFBCD_HREG_HSIZE_IN,
+ priv->viu.osd1_height),
+ priv->io_base + _REG(OSD1_AFBCD_SIZE_IN));
+
+ writel_relaxed(priv->viu.osd1_addr >> 4,
+ priv->io_base + _REG(OSD1_AFBCD_HDR_PTR));
+ writel_relaxed(priv->viu.osd1_addr >> 4,
+ priv->io_base + _REG(OSD1_AFBCD_FRAME_PTR));
+ /* TOFIX: bits 31:24 are not documented, nor the meaning of 0xe4 */
+ writel_relaxed((0xe4 << 24) | (priv->viu.osd1_addr & 0xffffff),
+ priv->io_base + _REG(OSD1_AFBCD_CHROMA_PTR));
+
+ if (priv->viu.osd1_width <= 128)
+ conv_lbuf_len = 32;
+ else if (priv->viu.osd1_width <= 256)
+ conv_lbuf_len = 64;
+ else if (priv->viu.osd1_width <= 512)
+ conv_lbuf_len = 128;
+ else if (priv->viu.osd1_width <= 1024)
+ conv_lbuf_len = 256;
+ else if (priv->viu.osd1_width <= 2048)
+ conv_lbuf_len = 512;
+ else
+ conv_lbuf_len = 1024;
+
+ writel_relaxed(conv_lbuf_len,
+ priv->io_base + _REG(OSD1_AFBCD_CONV_CTRL));
+
+ writel_relaxed(FIELD_PREP(OSD1_AFBCD_DEC_PIXEL_BGN_H, 0) |
+ FIELD_PREP(OSD1_AFBCD_DEC_PIXEL_END_H,
+ priv->viu.osd1_width - 1),
+ priv->io_base + _REG(OSD1_AFBCD_PIXEL_HSCOPE));
+
+ writel_relaxed(FIELD_PREP(OSD1_AFBCD_DEC_PIXEL_BGN_V, 0) |
+ FIELD_PREP(OSD1_AFBCD_DEC_PIXEL_END_V,
+ priv->viu.osd1_height - 1),
+ priv->io_base + _REG(OSD1_AFBCD_PIXEL_VSCOPE));
+
+ return 0;
+}
+
+struct meson_afbcd_ops meson_afbcd_gxm_ops = {
+ .init = meson_gxm_afbcd_init,
+ .reset = meson_gxm_afbcd_reset,
+ .enable = meson_gxm_afbcd_enable,
+ .disable = meson_gxm_afbcd_disable,
+ .setup = meson_gxm_afbcd_setup,
+ .supported_fmt = meson_gxm_afbcd_supported_fmt,
+};
+
+/* ARM AFBC Decoder for G12A Family */
+
+/* Amlogic G12A Mali AFBC Decoder supported formats */
+enum {
+ MAFBC_FMT_RGB565 = 0,
+ MAFBC_FMT_RGBA5551,
+ MAFBC_FMT_RGBA1010102,
+ MAFBC_FMT_YUV420_10B,
+ MAFBC_FMT_RGB888,
+ MAFBC_FMT_RGBA8888,
+ MAFBC_FMT_RGBA4444,
+ MAFBC_FMT_R8,
+ MAFBC_FMT_RG88,
+ MAFBC_FMT_YUV420_8B,
+ MAFBC_FMT_YUV422_8B = 11,
+ MAFBC_FMT_YUV422_10B = 14,
+};
+
+static int meson_g12a_afbcd_pixel_fmt(u64 modifier, uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ /* YTR is forbidden for non XBGR formats */
+ if (modifier & AFBC_FORMAT_MOD_YTR)
+ return -EINVAL;
+ /* fall through */
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ return MAFBC_FMT_RGBA8888;
+ case DRM_FORMAT_RGB888:
+ /* YTR is forbidden for non XBGR formats */
+ if (modifier & AFBC_FORMAT_MOD_YTR)
+ return -EINVAL;
+ return MAFBC_FMT_RGB888;
+ case DRM_FORMAT_RGB565:
+ /* YTR is forbidden for non XBGR formats */
+ if (modifier & AFBC_FORMAT_MOD_YTR)
+ return -EINVAL;
+ return MAFBC_FMT_RGB565;
+ /* TOFIX support mode formats */
+ default:
+ DRM_DEBUG("unsupported afbc format[%08x]\n", format);
+ return -EINVAL;
+ }
+}
+
+static int meson_g12a_afbcd_bpp(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ return 32;
+ case DRM_FORMAT_RGB888:
+ return 24;
+ case DRM_FORMAT_RGB565:
+ return 16;
+ /* TOFIX support mode formats */
+ default:
+ DRM_ERROR("unsupported afbc format[%08x]\n", format);
+ return 0;
+ }
+}
+
+static int meson_g12a_afbcd_fmt_to_blk_mode(u64 modifier, uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ return OSD_MALI_COLOR_MODE_RGBA8888;
+ case DRM_FORMAT_RGB888:
+ return OSD_MALI_COLOR_MODE_RGB888;
+ case DRM_FORMAT_RGB565:
+ return OSD_MALI_COLOR_MODE_RGB565;
+ /* TOFIX support mode formats */
+ default:
+ DRM_DEBUG("unsupported afbc format[%08x]\n", format);
+ return -EINVAL;
+ }
+}
+
+static bool meson_g12a_afbcd_supported_fmt(u64 modifier, uint32_t format)
+{
+ return meson_g12a_afbcd_pixel_fmt(modifier, format) >= 0;
+}
+
+static int meson_g12a_afbcd_init(struct meson_drm *priv)
+{
+ int ret;
+
+ ret = meson_rdma_init(priv);
+ if (ret)
+ return ret;
+
+ meson_rdma_setup(priv);
+
+ /* Handle AFBC Decoder reset manually */
+ writel_bits_relaxed(MALI_AFBCD_MANUAL_RESET, MALI_AFBCD_MANUAL_RESET,
+ priv->io_base + _REG(MALI_AFBCD_TOP_CTRL));
+
+ return 0;
+}
+
+static int meson_g12a_afbcd_reset(struct meson_drm *priv)
+{
+ meson_rdma_reset(priv);
+
+ meson_rdma_writel_sync(priv, VIU_SW_RESET_G12A_AFBC_ARB |
+ VIU_SW_RESET_G12A_OSD1_AFBCD,
+ VIU_SW_RESET);
+ meson_rdma_writel_sync(priv, 0, VIU_SW_RESET);
+
+ return 0;
+}
+
+static int meson_g12a_afbcd_enable(struct meson_drm *priv)
+{
+ meson_rdma_writel_sync(priv, VPU_MAFBC_IRQ_SURFACES_COMPLETED |
+ VPU_MAFBC_IRQ_CONFIGURATION_SWAPPED |
+ VPU_MAFBC_IRQ_DECODE_ERROR |
+ VPU_MAFBC_IRQ_DETILING_ERROR,
+ VPU_MAFBC_IRQ_MASK);
+
+ meson_rdma_writel_sync(priv, VPU_MAFBC_S0_ENABLE,
+ VPU_MAFBC_SURFACE_CFG);
+
+ meson_rdma_writel_sync(priv, VPU_MAFBC_DIRECT_SWAP,
+ VPU_MAFBC_COMMAND);
+
+ /* This will enable the RDMA replaying the register writes on vsync */
+ meson_rdma_flush(priv);
+
+ return 0;
+}
+
+static int meson_g12a_afbcd_disable(struct meson_drm *priv)
+{
+ writel_bits_relaxed(VPU_MAFBC_S0_ENABLE, 0,
+ priv->io_base + _REG(VPU_MAFBC_SURFACE_CFG));
+
+ return 0;
+}
+
+static int meson_g12a_afbcd_setup(struct meson_drm *priv)
+{
+ u32 format = meson_g12a_afbcd_pixel_fmt(priv->afbcd.modifier,
+ priv->afbcd.format);
+
+ if (priv->afbcd.modifier & AFBC_FORMAT_MOD_YTR)
+ format |= VPU_MAFBC_YUV_TRANSFORM;
+
+ if (priv->afbcd.modifier & AFBC_FORMAT_MOD_SPLIT)
+ format |= VPU_MAFBC_BLOCK_SPLIT;
+
+ if (priv->afbcd.modifier & AFBC_FORMAT_MOD_TILED)
+ format |= VPU_MAFBC_TILED_HEADER_EN;
+
+ if ((priv->afbcd.modifier & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK) ==
+ AFBC_FORMAT_MOD_BLOCK_SIZE_32x8)
+ format |= FIELD_PREP(VPU_MAFBC_SUPER_BLOCK_ASPECT, 1);
+
+ meson_rdma_writel_sync(priv, format,
+ VPU_MAFBC_FORMAT_SPECIFIER_S0);
+
+ meson_rdma_writel_sync(priv, priv->viu.osd1_addr,
+ VPU_MAFBC_HEADER_BUF_ADDR_LOW_S0);
+ meson_rdma_writel_sync(priv, 0,
+ VPU_MAFBC_HEADER_BUF_ADDR_HIGH_S0);
+
+ meson_rdma_writel_sync(priv, priv->viu.osd1_width,
+ VPU_MAFBC_BUFFER_WIDTH_S0);
+ meson_rdma_writel_sync(priv, ALIGN(priv->viu.osd1_height, 32),
+ VPU_MAFBC_BUFFER_HEIGHT_S0);
+
+ meson_rdma_writel_sync(priv, 0,
+ VPU_MAFBC_BOUNDING_BOX_X_START_S0);
+ meson_rdma_writel_sync(priv, priv->viu.osd1_width - 1,
+ VPU_MAFBC_BOUNDING_BOX_X_END_S0);
+ meson_rdma_writel_sync(priv, 0,
+ VPU_MAFBC_BOUNDING_BOX_Y_START_S0);
+ meson_rdma_writel_sync(priv, priv->viu.osd1_height - 1,
+ VPU_MAFBC_BOUNDING_BOX_Y_END_S0);
+
+ meson_rdma_writel_sync(priv, MESON_G12A_AFBCD_OUT_ADDR,
+ VPU_MAFBC_OUTPUT_BUF_ADDR_LOW_S0);
+ meson_rdma_writel_sync(priv, 0,
+ VPU_MAFBC_OUTPUT_BUF_ADDR_HIGH_S0);
+
+ meson_rdma_writel_sync(priv, priv->viu.osd1_width *
+ (meson_g12a_afbcd_bpp(priv->afbcd.format) / 8),
+ VPU_MAFBC_OUTPUT_BUF_STRIDE_S0);
+
+ return 0;
+}
+
+struct meson_afbcd_ops meson_afbcd_g12a_ops = {
+ .init = meson_g12a_afbcd_init,
+ .reset = meson_g12a_afbcd_reset,
+ .enable = meson_g12a_afbcd_enable,
+ .disable = meson_g12a_afbcd_disable,
+ .setup = meson_g12a_afbcd_setup,
+ .fmt_to_blk_mode = meson_g12a_afbcd_fmt_to_blk_mode,
+ .supported_fmt = meson_g12a_afbcd_supported_fmt,
+};
diff --git a/drivers/gpu/drm/meson/meson_osd_afbcd.h b/drivers/gpu/drm/meson/meson_osd_afbcd.h
new file mode 100644
index 000000000000..5e5523304f42
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_osd_afbcd.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2019 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ */
+
+#ifndef __MESON_OSD_AFBCD_H
+#define __MESON_OSD_AFBCD_H
+
+#include "meson_drv.h"
+
+/* This is an internal address used to transfer pixel from AFBC to the VIU */
+#define MESON_G12A_AFBCD_OUT_ADDR 0x1000000
+
+struct meson_afbcd_ops {
+ int (*init)(struct meson_drm *priv);
+ int (*reset)(struct meson_drm *priv);
+ int (*enable)(struct meson_drm *priv);
+ int (*disable)(struct meson_drm *priv);
+ int (*setup)(struct meson_drm *priv);
+ int (*fmt_to_blk_mode)(u64 modifier, uint32_t format);
+ bool (*supported_fmt)(u64 modifier, uint32_t format);
+};
+
+extern struct meson_afbcd_ops meson_afbcd_gxm_ops;
+extern struct meson_afbcd_ops meson_afbcd_g12a_ops;
+
+#endif /* __MESON_OSD_AFBCD_H */
diff --git a/drivers/gpu/drm/meson/meson_overlay.c b/drivers/gpu/drm/meson/meson_overlay.c
index cc7c6ae3013d..2468b0212d52 100644
--- a/drivers/gpu/drm/meson/meson_overlay.c
+++ b/drivers/gpu/drm/meson/meson_overlay.c
@@ -5,24 +5,21 @@
* Copyright (C) 2015 Amlogic, Inc. All rights reserved.
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
#include <linux/bitfield.h>
-#include <linux/platform_device.h>
-#include <drm/drmP.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_device.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drm_rect.h>
#include "meson_overlay.h"
-#include "meson_vpp.h"
-#include "meson_viu.h"
#include "meson_registers.h"
+#include "meson_viu.h"
+#include "meson_vpp.h"
/* VD1_IF0_GEN_REG */
#define VD_URGENT_CHROMA BIT(28)
@@ -516,7 +513,7 @@ static void meson_overlay_atomic_disable(struct drm_plane *plane,
priv->viu.vd1_enabled = false;
/* Disable VD1 */
- if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
writel_relaxed(0, priv->io_base + _REG(VD1_BLEND_SRC_CTRL));
writel_relaxed(0, priv->io_base + _REG(VD2_BLEND_SRC_CTRL));
writel_relaxed(0, priv->io_base + _REG(VD1_IF0_GEN_REG + 0x17b0));
diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c
index 7a7e88dadd0b..d5cbc47835bf 100644
--- a/drivers/gpu/drm/meson/meson_plane.c
+++ b/drivers/gpu/drm/meson/meson_plane.c
@@ -9,24 +9,21 @@
* Jasper St. Pierre <jstpierre@mecheye.net>
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
#include <linux/bitfield.h>
-#include <linux/platform_device.h>
-#include <drm/drmP.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_plane_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_device.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drm_rect.h>
+#include <drm/drm_plane_helper.h>
#include "meson_plane.h"
-#include "meson_vpp.h"
-#include "meson_viu.h"
#include "meson_registers.h"
+#include "meson_viu.h"
+#include "meson_osd_afbcd.h"
/* OSD_SCI_WH_M1 */
#define SCI_WH_M1_W(w) FIELD_PREP(GENMASK(28, 16), w)
@@ -96,12 +93,38 @@ static int meson_plane_atomic_check(struct drm_plane *plane,
false, true);
}
+#define MESON_MOD_AFBC_VALID_BITS (AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 | \
+ AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 | \
+ AFBC_FORMAT_MOD_YTR | \
+ AFBC_FORMAT_MOD_SPARSE | \
+ AFBC_FORMAT_MOD_SPLIT)
+
/* Takes a fixed 16.16 number and converts it to integer. */
static inline int64_t fixed16_to_int(int64_t value)
{
return value >> 16;
}
+static u32 meson_g12a_afbcd_line_stride(struct meson_drm *priv)
+{
+ u32 line_stride = 0;
+
+ switch (priv->afbcd.format) {
+ case DRM_FORMAT_RGB565:
+ line_stride = ((priv->viu.osd1_width << 4) + 127) >> 7;
+ break;
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ line_stride = ((priv->viu.osd1_width << 5) + 127) >> 7;
+ break;
+ }
+
+ return ((line_stride + 1) >> 1) << 1;
+}
+
static void meson_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
@@ -130,59 +153,91 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
*/
spin_lock_irqsave(&priv->drm->event_lock, flags);
+ /* Check if AFBC decoder is required for this buffer */
+ if ((meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) ||
+ meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) &&
+ fb->modifier & DRM_FORMAT_MOD_ARM_AFBC(MESON_MOD_AFBC_VALID_BITS))
+ priv->viu.osd1_afbcd = true;
+ else
+ priv->viu.osd1_afbcd = false;
+
/* Enable OSD and BLK0, set max global alpha */
priv->viu.osd1_ctrl_stat = OSD_ENABLE |
(0xFF << OSD_GLOBAL_ALPHA_SHIFT) |
OSD_BLK0_ENABLE;
+ priv->viu.osd1_ctrl_stat2 = readl(priv->io_base +
+ _REG(VIU_OSD1_CTRL_STAT2));
+
canvas_id_osd1 = priv->canvas_id_osd1;
/* Set up BLK0 to point to the right canvas */
- priv->viu.osd1_blk0_cfg[0] = ((canvas_id_osd1 << OSD_CANVAS_SEL) |
- OSD_ENDIANNESS_LE);
+ priv->viu.osd1_blk0_cfg[0] = canvas_id_osd1 << OSD_CANVAS_SEL;
+
+ if (priv->viu.osd1_afbcd) {
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
+ /* This is the internal decoding memory address */
+ priv->viu.osd1_blk1_cfg4 = MESON_G12A_AFBCD_OUT_ADDR;
+ priv->viu.osd1_blk0_cfg[0] |= OSD_ENDIANNESS_BE;
+ priv->viu.osd1_ctrl_stat2 |= OSD_PENDING_STAT_CLEAN;
+ priv->viu.osd1_ctrl_stat |= VIU_OSD1_CFG_SYN_EN;
+ }
+
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM)) {
+ priv->viu.osd1_blk0_cfg[0] |= OSD_ENDIANNESS_LE;
+ priv->viu.osd1_ctrl_stat2 |= OSD_DPATH_MALI_AFBCD;
+ }
+ } else {
+ priv->viu.osd1_blk0_cfg[0] |= OSD_ENDIANNESS_LE;
+
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM))
+ priv->viu.osd1_ctrl_stat2 &= ~OSD_DPATH_MALI_AFBCD;
+ }
/* On GXBB, Use the old non-HDR RGB2YUV converter */
- if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu"))
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB))
priv->viu.osd1_blk0_cfg[0] |= OSD_OUTPUT_COLOR_RGB;
+ if (priv->viu.osd1_afbcd &&
+ meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
+ priv->viu.osd1_blk0_cfg[0] |= OSD_MALI_SRC_EN |
+ priv->afbcd.ops->fmt_to_blk_mode(fb->modifier,
+ fb->format->format);
+ } else {
+ switch (fb->format->format) {
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 |
+ OSD_COLOR_MATRIX_32_ARGB;
+ break;
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 |
+ OSD_COLOR_MATRIX_32_ABGR;
+ break;
+ case DRM_FORMAT_RGB888:
+ priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_24 |
+ OSD_COLOR_MATRIX_24_RGB;
+ break;
+ case DRM_FORMAT_RGB565:
+ priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_16 |
+ OSD_COLOR_MATRIX_16_RGB565;
+ break;
+ };
+ }
+
switch (fb->format->format) {
case DRM_FORMAT_XRGB8888:
- /* For XRGB, replace the pixel's alpha by 0xFF */
- writel_bits_relaxed(OSD_REPLACE_EN, OSD_REPLACE_EN,
- priv->io_base + _REG(VIU_OSD1_CTRL_STAT2));
- priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 |
- OSD_COLOR_MATRIX_32_ARGB;
- break;
case DRM_FORMAT_XBGR8888:
/* For XRGB, replace the pixel's alpha by 0xFF */
- writel_bits_relaxed(OSD_REPLACE_EN, OSD_REPLACE_EN,
- priv->io_base + _REG(VIU_OSD1_CTRL_STAT2));
- priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 |
- OSD_COLOR_MATRIX_32_ABGR;
+ priv->viu.osd1_ctrl_stat2 |= OSD_REPLACE_EN;
break;
case DRM_FORMAT_ARGB8888:
- /* For ARGB, use the pixel's alpha */
- writel_bits_relaxed(OSD_REPLACE_EN, 0,
- priv->io_base + _REG(VIU_OSD1_CTRL_STAT2));
- priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 |
- OSD_COLOR_MATRIX_32_ARGB;
- break;
case DRM_FORMAT_ABGR8888:
/* For ARGB, use the pixel's alpha */
- writel_bits_relaxed(OSD_REPLACE_EN, 0,
- priv->io_base + _REG(VIU_OSD1_CTRL_STAT2));
- priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 |
- OSD_COLOR_MATRIX_32_ABGR;
- break;
- case DRM_FORMAT_RGB888:
- priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_24 |
- OSD_COLOR_MATRIX_24_RGB;
- break;
- case DRM_FORMAT_RGB565:
- priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_16 |
- OSD_COLOR_MATRIX_16_RGB565;
+ priv->viu.osd1_ctrl_stat2 &= ~OSD_REPLACE_EN;
break;
- };
+ }
/* Default scaler parameters */
vsc_bot_rcv_num = 0;
@@ -296,7 +351,7 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
priv->viu.osd1_blk0_cfg[3] = ((dest.x2 - 1) << 16) | dest.x1;
priv->viu.osd1_blk0_cfg[4] = ((dest.y2 - 1) << 16) | dest.y1;
- if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
priv->viu.osd_blend_din0_scope_h = ((dest.x2 - 1) << 16) | dest.x1;
priv->viu.osd_blend_din0_scope_v = ((dest.y2 - 1) << 16) | dest.y1;
priv->viu.osb_blend0_size = dst_h << 16 | dst_w;
@@ -309,11 +364,22 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
priv->viu.osd1_addr = gem->paddr;
priv->viu.osd1_stride = fb->pitches[0];
priv->viu.osd1_height = fb->height;
+ priv->viu.osd1_width = fb->width;
+
+ if (priv->viu.osd1_afbcd) {
+ priv->afbcd.modifier = fb->modifier;
+ priv->afbcd.format = fb->format->format;
+
+ /* Calculate decoder write stride */
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
+ priv->viu.osd1_blk2_cfg4 =
+ meson_g12a_afbcd_line_stride(priv);
+ }
if (!meson_plane->enabled) {
/* Reset OSD1 before enabling it on GXL+ SoCs */
- if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
- meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) ||
+ meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL))
meson_viu_osd1_reset(priv);
meson_plane->enabled = true;
@@ -330,9 +396,14 @@ static void meson_plane_atomic_disable(struct drm_plane *plane,
struct meson_plane *meson_plane = to_meson_plane(plane);
struct meson_drm *priv = meson_plane->priv;
+ if (priv->afbcd.ops) {
+ priv->afbcd.ops->reset(priv);
+ priv->afbcd.ops->disable(priv);
+ }
+
/* Disable OSD1 */
- if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu"))
- writel_bits_relaxed(3 << 8, 0,
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
+ writel_bits_relaxed(VIU_OSD1_POSTBLD_SRC_OSD1, 0,
priv->io_base + _REG(OSD1_BLEND_SRC_CTRL));
else
writel_bits_relaxed(VPP_OSD1_POSTBLEND, 0,
@@ -349,6 +420,42 @@ static const struct drm_plane_helper_funcs meson_plane_helper_funcs = {
.prepare_fb = drm_gem_fb_prepare_fb,
};
+static bool meson_plane_format_mod_supported(struct drm_plane *plane,
+ u32 format, u64 modifier)
+{
+ struct meson_plane *meson_plane = to_meson_plane(plane);
+ struct meson_drm *priv = meson_plane->priv;
+ int i;
+
+ if (modifier == DRM_FORMAT_MOD_INVALID)
+ return false;
+
+ if (modifier == DRM_FORMAT_MOD_LINEAR)
+ return true;
+
+ if (!meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) &&
+ !meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
+ return false;
+
+ if (modifier & ~DRM_FORMAT_MOD_ARM_AFBC(MESON_MOD_AFBC_VALID_BITS))
+ return false;
+
+ for (i = 0 ; i < plane->modifier_count ; ++i)
+ if (plane->modifiers[i] == modifier)
+ break;
+
+ if (i == plane->modifier_count) {
+ DRM_DEBUG_KMS("Unsupported modifier\n");
+ return false;
+ }
+
+ if (priv->afbcd.ops && priv->afbcd.ops->supported_fmt)
+ return priv->afbcd.ops->supported_fmt(modifier, format);
+
+ DRM_DEBUG_KMS("AFBC Unsupported\n");
+ return false;
+}
+
static const struct drm_plane_funcs meson_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
@@ -356,6 +463,7 @@ static const struct drm_plane_funcs meson_plane_funcs = {
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+ .format_mod_supported = meson_plane_format_mod_supported,
};
static const uint32_t supported_drm_formats[] = {
@@ -367,10 +475,60 @@ static const uint32_t supported_drm_formats[] = {
DRM_FORMAT_RGB565,
};
+static const uint64_t format_modifiers_afbc_gxm[] = {
+ DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 |
+ AFBC_FORMAT_MOD_SPARSE |
+ AFBC_FORMAT_MOD_YTR),
+ /* SPLIT mandates SPARSE, RGB modes mandates YTR */
+ DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 |
+ AFBC_FORMAT_MOD_YTR |
+ AFBC_FORMAT_MOD_SPARSE |
+ AFBC_FORMAT_MOD_SPLIT),
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID,
+};
+
+static const uint64_t format_modifiers_afbc_g12a[] = {
+ /*
+ * - TOFIX Support AFBC modifiers for YUV formats (16x16 + TILED)
+ * - SPLIT is mandatory for performances reasons when in 16x16
+ * block size
+ * - 32x8 block size + SPLIT is mandatory with 4K frame size
+ * for performances reasons
+ */
+ DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 |
+ AFBC_FORMAT_MOD_SPARSE |
+ AFBC_FORMAT_MOD_SPLIT),
+ DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 |
+ AFBC_FORMAT_MOD_YTR |
+ AFBC_FORMAT_MOD_SPARSE |
+ AFBC_FORMAT_MOD_SPLIT),
+ DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 |
+ AFBC_FORMAT_MOD_SPARSE),
+ DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 |
+ AFBC_FORMAT_MOD_YTR |
+ AFBC_FORMAT_MOD_SPARSE),
+ DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 |
+ AFBC_FORMAT_MOD_SPARSE |
+ AFBC_FORMAT_MOD_SPLIT),
+ DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 |
+ AFBC_FORMAT_MOD_YTR |
+ AFBC_FORMAT_MOD_SPARSE |
+ AFBC_FORMAT_MOD_SPLIT),
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID,
+};
+
+static const uint64_t format_modifiers_default[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID,
+};
+
int meson_plane_create(struct meson_drm *priv)
{
struct meson_plane *meson_plane;
struct drm_plane *plane;
+ const uint64_t *format_modifiers = format_modifiers_default;
meson_plane = devm_kzalloc(priv->drm->dev, sizeof(*meson_plane),
GFP_KERNEL);
@@ -380,11 +538,16 @@ int meson_plane_create(struct meson_drm *priv)
meson_plane->priv = priv;
plane = &meson_plane->base;
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM))
+ format_modifiers = format_modifiers_afbc_gxm;
+ else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
+ format_modifiers = format_modifiers_afbc_g12a;
+
drm_universal_plane_init(priv->drm, plane, 0xFF,
&meson_plane_funcs,
supported_drm_formats,
ARRAY_SIZE(supported_drm_formats),
- NULL,
+ format_modifiers,
DRM_PLANE_TYPE_PRIMARY, "meson_primary_plane");
drm_plane_helper_add(plane, &meson_plane_helper_funcs);
diff --git a/drivers/gpu/drm/meson/meson_rdma.c b/drivers/gpu/drm/meson/meson_rdma.c
new file mode 100644
index 000000000000..130382178c63
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_rdma.c
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/dma-mapping.h>
+
+#include "meson_drv.h"
+#include "meson_registers.h"
+#include "meson_rdma.h"
+
+/*
+ * The VPU embeds a "Register DMA" that can write a sequence of registers
+ * on the VPU AHB bus, either manually or triggered by an internal IRQ
+ * event like VSYNC or a line input counter.
+ * The initial implementation handles a single channel (over 8), triggered
+ * by the VSYNC irq and does not handle the RDMA irq.
+ */
+
+#define RDMA_DESC_SIZE (sizeof(uint32_t) * 2)
+
+int meson_rdma_init(struct meson_drm *priv)
+{
+ if (!priv->rdma.addr) {
+ /* Allocate a PAGE buffer */
+ priv->rdma.addr =
+ dma_alloc_coherent(priv->dev, SZ_4K,
+ &priv->rdma.addr_dma,
+ GFP_KERNEL);
+ if (!priv->rdma.addr)
+ return -ENOMEM;
+ }
+
+ priv->rdma.offset = 0;
+
+ writel_relaxed(RDMA_CTRL_SW_RESET,
+ priv->io_base + _REG(RDMA_CTRL));
+ writel_relaxed(RDMA_DEFAULT_CONFIG |
+ FIELD_PREP(RDMA_CTRL_AHB_WR_BURST, 3) |
+ FIELD_PREP(RDMA_CTRL_AHB_RD_BURST, 0),
+ priv->io_base + _REG(RDMA_CTRL));
+
+ return 0;
+}
+
+void meson_rdma_free(struct meson_drm *priv)
+{
+ if (!priv->rdma.addr && !priv->rdma.addr_dma)
+ return;
+
+ meson_rdma_stop(priv);
+
+ dma_free_coherent(priv->dev, SZ_4K,
+ priv->rdma.addr, priv->rdma.addr_dma);
+
+ priv->rdma.addr = NULL;
+ priv->rdma.addr_dma = (dma_addr_t)0;
+}
+
+void meson_rdma_setup(struct meson_drm *priv)
+{
+ /* Channel 1: Write Flag, No Address Increment */
+ writel_bits_relaxed(RDMA_ACCESS_RW_FLAG_CHAN1 |
+ RDMA_ACCESS_ADDR_INC_CHAN1,
+ RDMA_ACCESS_RW_FLAG_CHAN1,
+ priv->io_base + _REG(RDMA_ACCESS_AUTO));
+}
+
+void meson_rdma_stop(struct meson_drm *priv)
+{
+ writel_bits_relaxed(RDMA_IRQ_CLEAR_CHAN1,
+ RDMA_IRQ_CLEAR_CHAN1,
+ priv->io_base + _REG(RDMA_CTRL));
+
+ /* Stop Channel 1 */
+ writel_bits_relaxed(RDMA_ACCESS_TRIGGER_CHAN1,
+ FIELD_PREP(RDMA_ACCESS_ADDR_INC_CHAN1,
+ RDMA_ACCESS_TRIGGER_STOP),
+ priv->io_base + _REG(RDMA_ACCESS_AUTO));
+}
+
+void meson_rdma_reset(struct meson_drm *priv)
+{
+ meson_rdma_stop(priv);
+
+ priv->rdma.offset = 0;
+}
+
+static void meson_rdma_writel(struct meson_drm *priv, uint32_t val,
+ uint32_t reg)
+{
+ if (priv->rdma.offset >= (SZ_4K / RDMA_DESC_SIZE)) {
+ dev_warn_once(priv->dev, "%s: overflow\n", __func__);
+ return;
+ }
+
+ priv->rdma.addr[priv->rdma.offset++] = reg;
+ priv->rdma.addr[priv->rdma.offset++] = val;
+}
+
+/*
+ * This will add the register to the RDMA buffer and write it to the
+ * hardware at the same time.
+ * When meson_rdma_flush is called, the RDMA will replay the register
+ * writes in order.
+ */
+void meson_rdma_writel_sync(struct meson_drm *priv, uint32_t val, uint32_t reg)
+{
+ meson_rdma_writel(priv, val, reg);
+
+ writel_relaxed(val, priv->io_base + _REG(reg));
+}
+
+void meson_rdma_flush(struct meson_drm *priv)
+{
+ meson_rdma_stop(priv);
+
+ /* Start of Channel 1 register writes buffer */
+ writel(priv->rdma.addr_dma,
+ priv->io_base + _REG(RDMA_AHB_START_ADDR_1));
+
+ /* Last byte on Channel 1 register writes buffer */
+ writel(priv->rdma.addr_dma + (priv->rdma.offset * RDMA_DESC_SIZE) - 1,
+ priv->io_base + _REG(RDMA_AHB_END_ADDR_1));
+
+ /* Trigger Channel 1 on VSYNC event */
+ writel_bits_relaxed(RDMA_ACCESS_TRIGGER_CHAN1,
+ FIELD_PREP(RDMA_ACCESS_TRIGGER_CHAN1,
+ RDMA_ACCESS_TRIGGER_VSYNC),
+ priv->io_base + _REG(RDMA_ACCESS_AUTO));
+
+ priv->rdma.offset = 0;
+}
diff --git a/drivers/gpu/drm/meson/meson_rdma.h b/drivers/gpu/drm/meson/meson_rdma.h
new file mode 100644
index 000000000000..3870bff7b47f
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_rdma.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2019 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ */
+
+#ifndef __MESON_RDMA_H
+#define __MESON_RDMA_H
+
+#include "meson_drv.h"
+
+int meson_rdma_init(struct meson_drm *priv);
+void meson_rdma_free(struct meson_drm *priv);
+void meson_rdma_setup(struct meson_drm *priv);
+void meson_rdma_reset(struct meson_drm *priv);
+void meson_rdma_stop(struct meson_drm *priv);
+
+void meson_rdma_writel_sync(struct meson_drm *priv, uint32_t val, uint32_t reg);
+void meson_rdma_flush(struct meson_drm *priv);
+
+#endif /* __MESON_RDMA_H */
diff --git a/drivers/gpu/drm/meson/meson_registers.h b/drivers/gpu/drm/meson/meson_registers.h
index 410e324d6f93..8ea00546cd4e 100644
--- a/drivers/gpu/drm/meson/meson_registers.h
+++ b/drivers/gpu/drm/meson/meson_registers.h
@@ -6,11 +6,13 @@
#ifndef __MESON_REGISTERS_H
#define __MESON_REGISTERS_H
+#include <linux/io.h>
+
/* Shift all registers by 2 */
#define _REG(reg) ((reg) << 2)
#define writel_bits_relaxed(mask, val, addr) \
- writel_relaxed((readl_relaxed(addr) & ~(mask)) | (val), addr)
+ writel_relaxed((readl_relaxed(addr) & ~(mask)) | ((val) & (mask)), addr)
/* vpp2 */
#define VPP2_DUMMY_DATA 0x1900
@@ -136,11 +138,25 @@
#define VIU_ADDR_START 0x1a00
#define VIU_ADDR_END 0x1aff
#define VIU_SW_RESET 0x1a01
+#define VIU_SW_RESET_OSD1_AFBCD BIT(31)
+#define VIU_SW_RESET_G12A_OSD1_AFBCD BIT(21)
+#define VIU_SW_RESET_G12A_AFBC_ARB BIT(19)
+#define VIU_SW_RESET_OSD1 BIT(0)
#define VIU_MISC_CTRL0 0x1a06
+#define VIU_CTRL0_VD1_AFBC_MASK 0x170000
#define VIU_MISC_CTRL1 0x1a07
+#define MALI_AFBC_MISC GENMASK(15, 8)
#define D2D3_INTF_LENGTH 0x1a08
#define D2D3_INTF_CTRL0 0x1a09
#define VIU_OSD1_CTRL_STAT 0x1a10
+#define VIU_OSD1_OSD_BLK_ENABLE BIT(0)
+#define VIU_OSD1_OSD_MEM_MODE_LINEAR BIT(2)
+#define VIU_OSD1_POSTBLD_SRC_VD1 (1 << 8)
+#define VIU_OSD1_POSTBLD_SRC_VD2 (2 << 8)
+#define VIU_OSD1_POSTBLD_SRC_OSD1 (3 << 8)
+#define VIU_OSD1_POSTBLD_SRC_OSD2 (4 << 8)
+#define VIU_OSD1_OSD_ENABLE BIT(21)
+#define VIU_OSD1_CFG_SYN_EN BIT(31)
#define VIU_OSD1_CTRL_STAT2 0x1a2d
#define VIU_OSD1_COLOR_ADDR 0x1a11
#define VIU_OSD1_COLOR 0x1a12
@@ -171,6 +187,16 @@
#define VIU_OSD1_FIFO_CTRL_STAT 0x1a2b
#define VIU_OSD1_TEST_RDDATA 0x1a2c
#define VIU_OSD1_PROT_CTRL 0x1a2e
+#define VIU_OSD1_MALI_UNPACK_CTRL 0x1a2f
+#define VIU_OSD1_MALI_UNPACK_EN BIT(31)
+#define VIU_OSD1_MALI_AFBCD_R_REORDER GENMASK(15, 12)
+#define VIU_OSD1_MALI_AFBCD_G_REORDER GENMASK(11, 8)
+#define VIU_OSD1_MALI_AFBCD_B_REORDER GENMASK(7, 4)
+#define VIU_OSD1_MALI_AFBCD_A_REORDER GENMASK(3, 0)
+#define VIU_OSD1_MALI_REORDER_R 1
+#define VIU_OSD1_MALI_REORDER_G 2
+#define VIU_OSD1_MALI_REORDER_B 3
+#define VIU_OSD1_MALI_REORDER_A 4
#define VIU_OSD2_CTRL_STAT 0x1a30
#define VIU_OSD2_CTRL_STAT2 0x1a4d
#define VIU_OSD2_COLOR_ADDR 0x1a31
@@ -230,6 +256,12 @@
#define VIU_OSD3_MALI_UNPACK_CTRL 0x3d9f
#define VIU_OSD3_DIMM_CTRL 0x3da0
+#define VIU_OSD_DDR_PRIORITY_URGENT BIT(0)
+#define VIU_OSD_HOLD_FIFO_LINES(lines) ((lines & 0x1f) << 5)
+#define VIU_OSD_FIFO_DEPTH_VAL(val) ((val & 0x7f) << 12)
+#define VIU_OSD_WORDS_PER_BURST(words) (((words & 0x4) >> 1) << 22)
+#define VIU_OSD_FIFO_LIMITS(size) ((size & 0xf) << 24)
+
#define VD1_IF0_GEN_REG 0x1a50
#define VD1_IF0_CANVAS0 0x1a51
#define VD1_IF0_CANVAS1 0x1a52
@@ -339,6 +371,7 @@
#define VPP_LINE_IN_LENGTH 0x1d01
#define VPP_PIC_IN_HEIGHT 0x1d02
#define VPP_SCALE_COEF_IDX 0x1d03
+#define VPP_SCALE_HORIZONTAL_COEF BIT(8)
#define VPP_SCALE_COEF 0x1d04
#define VPP_VSC_REGION12_STARTP 0x1d05
#define VPP_VSC_REGION34_STARTP 0x1d06
@@ -360,6 +393,12 @@
#define VPP_HSC_REGION4_PHASE_SLOPE 0x1d17
#define VPP_HSC_PHASE_CTRL 0x1d18
#define VPP_SC_MISC 0x1d19
+#define VPP_SC_VD_EN_ENABLE BIT(15)
+#define VPP_SC_TOP_EN_ENABLE BIT(16)
+#define VPP_SC_HSC_EN_ENABLE BIT(17)
+#define VPP_SC_VSC_EN_ENABLE BIT(18)
+#define VPP_VSC_BANK_LENGTH(length) (length & 0x7)
+#define VPP_HSC_BANK_LENGTH(length) ((length & 0x7) << 8)
#define VPP_PREBLEND_VD1_H_START_END 0x1d1a
#define VPP_PREBLEND_VD1_V_START_END 0x1d1b
#define VPP_POSTBLEND_VD1_H_START_END 0x1d1c
@@ -369,24 +408,28 @@
#define VPP_PREBLEND_H_SIZE 0x1d20
#define VPP_POSTBLEND_H_SIZE 0x1d21
#define VPP_HOLD_LINES 0x1d22
+#define VPP_POSTBLEND_HOLD_LINES(lines) (lines & 0xf)
+#define VPP_PREBLEND_HOLD_LINES(lines) ((lines & 0xf) << 8)
#define VPP_BLEND_ONECOLOR_CTRL 0x1d23
#define VPP_PREBLEND_CURRENT_XY 0x1d24
#define VPP_POSTBLEND_CURRENT_XY 0x1d25
#define VPP_MISC 0x1d26
-#define VPP_PREBLEND_ENABLE BIT(6)
-#define VPP_POSTBLEND_ENABLE BIT(7)
-#define VPP_OSD2_ALPHA_PREMULT BIT(8)
-#define VPP_OSD1_ALPHA_PREMULT BIT(9)
-#define VPP_VD1_POSTBLEND BIT(10)
-#define VPP_VD2_POSTBLEND BIT(11)
-#define VPP_OSD1_POSTBLEND BIT(12)
-#define VPP_OSD2_POSTBLEND BIT(13)
-#define VPP_VD1_PREBLEND BIT(14)
-#define VPP_VD2_PREBLEND BIT(15)
-#define VPP_OSD1_PREBLEND BIT(16)
-#define VPP_OSD2_PREBLEND BIT(17)
-#define VPP_COLOR_MNG_ENABLE BIT(28)
+#define VPP_PREBLEND_ENABLE BIT(6)
+#define VPP_POSTBLEND_ENABLE BIT(7)
+#define VPP_OSD2_ALPHA_PREMULT BIT(8)
+#define VPP_OSD1_ALPHA_PREMULT BIT(9)
+#define VPP_VD1_POSTBLEND BIT(10)
+#define VPP_VD2_POSTBLEND BIT(11)
+#define VPP_OSD1_POSTBLEND BIT(12)
+#define VPP_OSD2_POSTBLEND BIT(13)
+#define VPP_VD1_PREBLEND BIT(14)
+#define VPP_VD2_PREBLEND BIT(15)
+#define VPP_OSD1_PREBLEND BIT(16)
+#define VPP_OSD2_PREBLEND BIT(17)
+#define VPP_COLOR_MNG_ENABLE BIT(28)
#define VPP_OFIFO_SIZE 0x1d27
+#define VPP_OFIFO_SIZE_MASK GENMASK(13, 0)
+#define VPP_OFIFO_SIZE_DEFAULT (0xfff << 20 | 0x1000)
#define VPP_FIFO_STATUS 0x1d28
#define VPP_SMOKE_CTRL 0x1d29
#define VPP_SMOKE1_VAL 0x1d2a
@@ -402,6 +445,8 @@
#define VPP_HSC_PHASE_CTRL1 0x1d34
#define VPP_HSC_INI_PAT_CTRL 0x1d35
#define VPP_VADJ_CTRL 0x1d40
+#define VPP_MINUS_BLACK_LVL_VADJ1_ENABLE BIT(1)
+
#define VPP_VADJ1_Y 0x1d41
#define VPP_VADJ1_MA_MB 0x1d42
#define VPP_VADJ1_MC_MD 0x1d43
@@ -461,6 +506,7 @@
#define VPP_PEAKING_VGAIN 0x1d92
#define VPP_PEAKING_NLP_1 0x1d93
#define VPP_DOLBY_CTRL 0x1d93
+#define VPP_PPS_DUMMY_DATA_MODE (1 << 17)
#define VPP_PEAKING_NLP_2 0x1d94
#define VPP_PEAKING_NLP_3 0x1d95
#define VPP_PEAKING_NLP_4 0x1d96
@@ -591,6 +637,7 @@
#define OSD34_SCI_WH_M1 0x3d29
#define OSD34_SCO_H_START_END 0x3d2a
#define OSD34_SCO_V_START_END 0x3d2b
+
/* viu2 */
#define VIU2_ADDR_START 0x1e00
#define VIU2_ADDR_END 0x1eff
@@ -704,6 +751,25 @@
#define VENC_UPSAMPLE_CTRL0 0x1b64
#define VENC_UPSAMPLE_CTRL1 0x1b65
#define VENC_UPSAMPLE_CTRL2 0x1b66
+#define VENC_UPSAMPLE_CTRL_F0_2_CLK_RATIO BIT(0)
+#define VENC_UPSAMPLE_CTRL_F1_EN BIT(5)
+#define VENC_UPSAMPLE_CTRL_F1_UPSAMPLE_EN BIT(6)
+#define VENC_UPSAMPLE_CTRL_INTERLACE_HIGH_LUMA (0x0 << 12)
+#define VENC_UPSAMPLE_CTRL_CVBS (0x1 << 12)
+#define VENC_UPSAMPLE_CTRL_S_VIDEO_LUMA (0x2 << 12)
+#define VENC_UPSAMPLE_CTRL_S_VIDEO_CHROMA (0x3 << 12)
+#define VENC_UPSAMPLE_CTRL_INTERLACE_PB (0x4 << 12)
+#define VENC_UPSAMPLE_CTRL_INTERLACE_PR (0x5 << 12)
+#define VENC_UPSAMPLE_CTRL_INTERLACE_R (0x6 << 12)
+#define VENC_UPSAMPLE_CTRL_INTERLACE_G (0x7 << 12)
+#define VENC_UPSAMPLE_CTRL_INTERLACE_B (0x8 << 12)
+#define VENC_UPSAMPLE_CTRL_PROGRESSIVE_Y (0x9 << 12)
+#define VENC_UPSAMPLE_CTRL_PROGRESSIVE_PB (0xa << 12)
+#define VENC_UPSAMPLE_CTRL_PROGRESSIVE_PR (0xb << 12)
+#define VENC_UPSAMPLE_CTRL_PROGRESSIVE_R (0xc << 12)
+#define VENC_UPSAMPLE_CTRL_PROGRESSIVE_G (0xd << 12)
+#define VENC_UPSAMPLE_CTRL_PROGRESSIVE_B (0xe << 12)
+#define VENC_UPSAMPLE_CTRL_VDAC_TEST_VALUE (0xf << 12)
#define TCON_INVERT_CTL 0x1b67
#define VENC_VIDEO_PROG_MODE 0x1b68
#define VENC_ENCI_LINE 0x1b69
@@ -712,6 +778,7 @@
#define VENC_ENCP_PIXEL 0x1b6c
#define VENC_STATA 0x1b6d
#define VENC_INTCTRL 0x1b6e
+#define VENC_INTCTRL_ENCI_LNRST_INT_EN BIT(1)
#define VENC_INTFLAG 0x1b6f
#define VENC_VIDEO_TST_EN 0x1b70
#define VENC_VIDEO_TST_MDSEL 0x1b71
@@ -722,6 +789,7 @@
#define VENC_VIDEO_TST_CLRBAR_WIDTH 0x1b76
#define VENC_VIDEO_TST_VDCNT_STSET 0x1b77
#define VENC_VDAC_DACSEL0 0x1b78
+#define VENC_VDAC_SEL_ATV_DMD BIT(5)
#define VENC_VDAC_DACSEL1 0x1b79
#define VENC_VDAC_DACSEL2 0x1b7a
#define VENC_VDAC_DACSEL3 0x1b7b
@@ -742,6 +810,7 @@
#define VENC_VDAC_DAC5_GAINCTRL 0x1bfa
#define VENC_VDAC_DAC5_OFFSET 0x1bfb
#define VENC_VDAC_FIFO_CTRL 0x1bfc
+#define VENC_VDAC_FIFO_EN_ENCI_ENABLE BIT(13)
#define ENCL_TCON_INVERT_CTL 0x1bfd
#define ENCP_VIDEO_EN 0x1b80
#define ENCP_VIDEO_SYNC_MODE 0x1b81
@@ -757,6 +826,7 @@
#define ENCP_VIDEO_SYNC_OFFST 0x1b8b
#define ENCP_VIDEO_MACV_OFFST 0x1b8c
#define ENCP_VIDEO_MODE 0x1b8d
+#define ENCP_VIDEO_MODE_DE_V_HIGH BIT(14)
#define ENCP_VIDEO_MODE_ADV 0x1b8e
#define ENCP_DBG_PX_RST 0x1b90
#define ENCP_DBG_LN_RST 0x1b91
@@ -835,6 +905,11 @@
#define C656_FS_LNED 0x1be7
#define ENCI_VIDEO_MODE 0x1b00
#define ENCI_VIDEO_MODE_ADV 0x1b01
+#define ENCI_VIDEO_MODE_ADV_DMXMD(val) (val & 0x3)
+#define ENCI_VIDEO_MODE_ADV_VBICTL_LINE_17_22 BIT(2)
+#define ENCI_VIDEO_MODE_ADV_YBW_MEDIUM (0 << 4)
+#define ENCI_VIDEO_MODE_ADV_YBW_LOW (0x1 << 4)
+#define ENCI_VIDEO_MODE_ADV_YBW_HIGH (0x2 << 4)
#define ENCI_VIDEO_FSC_ADJ 0x1b02
#define ENCI_VIDEO_BRIGHT 0x1b03
#define ENCI_VIDEO_CONT 0x1b04
@@ -905,13 +980,17 @@
#define ENCI_DBG_MAXPX 0x1b4c
#define ENCI_DBG_MAXLN 0x1b4d
#define ENCI_MACV_MAX_AMP 0x1b50
+#define ENCI_MACV_MAX_AMP_ENABLE_CHANGE BIT(15)
+#define ENCI_MACV_MAX_AMP_VAL(val) (val & 0x83ff)
#define ENCI_MACV_PULSE_LO 0x1b51
#define ENCI_MACV_PULSE_HI 0x1b52
#define ENCI_MACV_BKP_MAX 0x1b53
#define ENCI_CFILT_CTRL 0x1b54
+#define ENCI_CFILT_CMPT_SEL_HIGH BIT(1)
#define ENCI_CFILT7 0x1b55
#define ENCI_YC_DELAY 0x1b56
#define ENCI_VIDEO_EN 0x1b57
+#define ENCI_VIDEO_EN_ENABLE BIT(0)
#define ENCI_DVI_HSO_BEGIN 0x1c00
#define ENCI_DVI_HSO_END 0x1c01
#define ENCI_DVI_VSO_BLINE_EVN 0x1c02
@@ -923,6 +1002,10 @@
#define ENCI_DVI_VSO_END_EVN 0x1c08
#define ENCI_DVI_VSO_END_ODD 0x1c09
#define ENCI_CFILT_CTRL2 0x1c0a
+#define ENCI_CFILT_CMPT_CR_DLY(delay) (delay & 0xf)
+#define ENCI_CFILT_CMPT_CB_DLY(delay) ((delay & 0xf) << 4)
+#define ENCI_CFILT_CVBS_CR_DLY(delay) ((delay & 0xf) << 8)
+#define ENCI_CFILT_CVBS_CB_DLY(delay) ((delay & 0xf) << 12)
#define ENCI_DACSEL_0 0x1c0b
#define ENCI_DACSEL_1 0x1c0c
#define ENCP_DACSEL_0 0x1c0d
@@ -937,6 +1020,8 @@
#define ENCI_TST_CLRBAR_WIDTH 0x1c16
#define ENCI_TST_VDCNT_STSET 0x1c17
#define ENCI_VFIFO2VD_CTL 0x1c18
+#define ENCI_VFIFO2VD_CTL_ENABLE BIT(0)
+#define ENCI_VFIFO2VD_CTL_VD_SEL(val) ((val & 0xff) << 8)
#define ENCI_VFIFO2VD_PIXEL_START 0x1c19
#define ENCI_VFIFO2VD_PIXEL_END 0x1c1a
#define ENCI_VFIFO2VD_LINE_TOP_START 0x1c1b
@@ -999,6 +1084,7 @@
#define VENC_VDAC_DAC5_FILT_CTRL0 0x1c56
#define VENC_VDAC_DAC5_FILT_CTRL1 0x1c57
#define VENC_VDAC_DAC0_FILT_CTRL0 0x1c58
+#define VENC_VDAC_DAC0_FILT_CTRL0_EN BIT(0)
#define VENC_VDAC_DAC0_FILT_CTRL1 0x1c59
#define VENC_VDAC_DAC1_FILT_CTRL0 0x1c5a
#define VENC_VDAC_DAC1_FILT_CTRL1 0x1c5b
@@ -1125,11 +1211,59 @@
#define RDMA_AHB_START_ADDR_7 0x110e
#define RDMA_AHB_END_ADDR_7 0x110f
#define RDMA_ACCESS_AUTO 0x1110
+#define RDMA_ACCESS_TRIGGER_CHAN3 GENMASK(31, 24)
+#define RDMA_ACCESS_TRIGGER_CHAN2 GENMASK(23, 16)
+#define RDMA_ACCESS_TRIGGER_CHAN1 GENMASK(15, 8)
+#define RDMA_ACCESS_TRIGGER_STOP 0
+#define RDMA_ACCESS_TRIGGER_VSYNC 1
+#define RDMA_ACCESS_TRIGGER_LINE 32
+#define RDMA_ACCESS_RW_FLAG_CHAN3 BIT(7)
+#define RDMA_ACCESS_RW_FLAG_CHAN2 BIT(6)
+#define RDMA_ACCESS_RW_FLAG_CHAN1 BIT(5)
+#define RDMA_ACCESS_ADDR_INC_CHAN3 BIT(3)
+#define RDMA_ACCESS_ADDR_INC_CHAN2 BIT(2)
+#define RDMA_ACCESS_ADDR_INC_CHAN1 BIT(1)
#define RDMA_ACCESS_AUTO2 0x1111
+#define RDMA_ACCESS_RW_FLAG_CHAN7 BIT(7)
+#define RDMA_ACCESS_RW_FLAG_CHAN6 BIT(6)
+#define RDMA_ACCESS_RW_FLAG_CHAN5 BIT(5)
+#define RDMA_ACCESS_RW_FLAG_CHAN4 BIT(4)
+#define RDMA_ACCESS_ADDR_INC_CHAN7 BIT(3)
+#define RDMA_ACCESS_ADDR_INC_CHAN6 BIT(2)
+#define RDMA_ACCESS_ADDR_INC_CHAN5 BIT(1)
+#define RDMA_ACCESS_ADDR_INC_CHAN4 BIT(0)
#define RDMA_ACCESS_AUTO3 0x1112
+#define RDMA_ACCESS_TRIGGER_CHAN7 GENMASK(31, 24)
+#define RDMA_ACCESS_TRIGGER_CHAN6 GENMASK(23, 16)
+#define RDMA_ACCESS_TRIGGER_CHAN5 GENMASK(15, 8)
+#define RDMA_ACCESS_TRIGGER_CHAN4 GENMASK(7, 0)
#define RDMA_ACCESS_MAN 0x1113
+#define RDMA_ACCESS_MAN_RW_FLAG BIT(2)
+#define RDMA_ACCESS_MAN_ADDR_INC BIT(1)
+#define RDMA_ACCESS_MAN_START BIT(0)
#define RDMA_CTRL 0x1114
+#define RDMA_IRQ_CLEAR_CHAN7 BIT(31)
+#define RDMA_IRQ_CLEAR_CHAN6 BIT(30)
+#define RDMA_IRQ_CLEAR_CHAN5 BIT(29)
+#define RDMA_IRQ_CLEAR_CHAN4 BIT(28)
+#define RDMA_IRQ_CLEAR_CHAN3 BIT(27)
+#define RDMA_IRQ_CLEAR_CHAN2 BIT(26)
+#define RDMA_IRQ_CLEAR_CHAN1 BIT(25)
+#define RDMA_IRQ_CLEAR_CHAN_MAN BIT(24)
+#define RDMA_DEFAULT_CONFIG (BIT(7) | BIT(6))
+#define RDMA_CTRL_AHB_WR_BURST GENMASK(5, 4)
+#define RDMA_CTRL_AHB_RD_BURST GENMASK(3, 2)
+#define RDMA_CTRL_SW_RESET BIT(1)
+#define RDMA_CTRL_FREE_CLK_EN BIT(0)
#define RDMA_STATUS 0x1115
+#define RDMA_IRQ_STAT_CHAN7 BIT(31)
+#define RDMA_IRQ_STAT_CHAN6 BIT(30)
+#define RDMA_IRQ_STAT_CHAN5 BIT(29)
+#define RDMA_IRQ_STAT_CHAN4 BIT(28)
+#define RDMA_IRQ_STAT_CHAN3 BIT(27)
+#define RDMA_IRQ_STAT_CHAN2 BIT(26)
+#define RDMA_IRQ_STAT_CHAN1 BIT(25)
+#define RDMA_IRQ_STAT_CHAN_MAN BIT(24)
#define RDMA_STATUS2 0x1116
#define RDMA_STATUS3 0x1117
#define L_GAMMA_CNTL_PORT 0x1400
@@ -1404,6 +1538,18 @@
#define VIU2_SEL_VENC_ENCP (2 << 2)
#define VIU2_SEL_VENC_ENCT (3 << 2)
#define VPU_HDMI_SETTING 0x271b
+#define VPU_HDMI_ENCI_DATA_TO_HDMI BIT(0)
+#define VPU_HDMI_ENCP_DATA_TO_HDMI BIT(1)
+#define VPU_HDMI_INV_HSYNC BIT(2)
+#define VPU_HDMI_INV_VSYNC BIT(3)
+#define VPU_HDMI_OUTPUT_CRYCB (0 << 5)
+#define VPU_HDMI_OUTPUT_YCBCR (1 << 5)
+#define VPU_HDMI_OUTPUT_YCRCB (2 << 5)
+#define VPU_HDMI_OUTPUT_CBCRY (3 << 5)
+#define VPU_HDMI_OUTPUT_CBYCR (4 << 5)
+#define VPU_HDMI_OUTPUT_CRCBY (5 << 5)
+#define VPU_HDMI_WR_RATE(rate) (((rate & 0x1f) - 1) << 8)
+#define VPU_HDMI_RD_RATE(rate) (((rate & 0x1f) - 1) << 12)
#define ENCI_INFO_READ 0x271c
#define ENCP_INFO_READ 0x271d
#define ENCT_INFO_READ 0x271e
@@ -1480,6 +1626,7 @@
#define VPU_RDARB_MODE_L1C2 0x2799
#define VPU_RDARB_MODE_L2C1 0x279d
#define VPU_WRARB_MODE_L2C1 0x27a2
+#define VPU_RDARB_SLAVE_TO_MASTER_PORT(dc, port) (port << (16 + dc))
/* osd super scale */
#define OSDSR_HV_SIZEIN 0x3130
@@ -1512,16 +1659,33 @@
/* osd afbcd on gxtvbb */
#define OSD1_AFBCD_ENABLE 0x31a0
+#define OSD1_AFBCD_ID_FIFO_THRD GENMASK(15, 9)
+#define OSD1_AFBCD_DEC_ENABLE BIT(8)
+#define OSD1_AFBCD_FRM_START BIT(0)
#define OSD1_AFBCD_MODE 0x31a1
+#define OSD1_AFBCD_SOFT_RESET BIT(31)
+#define OSD1_AFBCD_AXI_REORDER_MODE BIT(28)
+#define OSD1_AFBCD_MIF_URGENT GENMASK(25, 24)
+#define OSD1_AFBCD_HOLD_LINE_NUM GENMASK(22, 16)
+#define OSD1_AFBCD_RGBA_EXCHAN_CTRL GENMASK(15, 8)
+#define OSD1_AFBCD_HREG_BLOCK_SPLIT BIT(6)
+#define OSD1_AFBCD_HREG_HALF_BLOCK BIT(5)
+#define OSD1_AFBCD_HREG_PIXEL_PACKING_FMT GENMASK(4, 0)
#define OSD1_AFBCD_SIZE_IN 0x31a2
+#define OSD1_AFBCD_HREG_VSIZE_IN GENMASK(31, 16)
+#define OSD1_AFBCD_HREG_HSIZE_IN GENMASK(15, 0)
#define OSD1_AFBCD_HDR_PTR 0x31a3
#define OSD1_AFBCD_FRAME_PTR 0x31a4
#define OSD1_AFBCD_CHROMA_PTR 0x31a5
#define OSD1_AFBCD_CONV_CTRL 0x31a6
+#define OSD1_AFBCD_CONV_LBUF_LEN GENMASK(15, 0)
#define OSD1_AFBCD_STATUS 0x31a8
#define OSD1_AFBCD_PIXEL_HSCOPE 0x31a9
+#define OSD1_AFBCD_DEC_PIXEL_BGN_H GENMASK(31, 16)
+#define OSD1_AFBCD_DEC_PIXEL_END_H GENMASK(15, 0)
#define OSD1_AFBCD_PIXEL_VSCOPE 0x31aa
-#define VIU_MISC_CTRL1 0x1a07
+#define OSD1_AFBCD_DEC_PIXEL_BGN_V GENMASK(31, 16)
+#define OSD1_AFBCD_DEC_PIXEL_END_V GENMASK(15, 0)
/* add for gxm and 962e dv core2 */
#define DOLBY_CORE2A_SWAP_CTRL1 0x3434
@@ -1533,14 +1697,34 @@
#define VPU_MAFBC_IRQ_CLEAR 0x3a02
#define VPU_MAFBC_IRQ_MASK 0x3a03
#define VPU_MAFBC_IRQ_STATUS 0x3a04
+#define VPU_MAFBC_IRQ_SECURE_ID_ERROR BIT(5)
+#define VPU_MAFBC_IRQ_AXI_ERROR BIT(4)
+#define VPU_MAFBC_IRQ_DETILING_ERROR BIT(3)
+#define VPU_MAFBC_IRQ_DECODE_ERROR BIT(2)
+#define VPU_MAFBC_IRQ_CONFIGURATION_SWAPPED BIT(1)
+#define VPU_MAFBC_IRQ_SURFACES_COMPLETED BIT(0)
#define VPU_MAFBC_COMMAND 0x3a05
+#define VPU_MAFBC_PENDING_SWAP BIT(1)
+#define VPU_MAFBC_DIRECT_SWAP BIT(0)
#define VPU_MAFBC_STATUS 0x3a06
+#define VPU_MAFBC_ERROR BIT(2)
+#define VPU_MAFBC_SWAPPING BIT(1)
+#define VPU_MAFBC_ACTIVE BIT(0)
#define VPU_MAFBC_SURFACE_CFG 0x3a07
-
-/* osd afbc on g12a */
+#define VPU_MAFBC_CONTINUOUS_DECODING_ENABLE BIT(16)
+#define VPU_MAFBC_S3_ENABLE BIT(3)
+#define VPU_MAFBC_S2_ENABLE BIT(2)
+#define VPU_MAFBC_S1_ENABLE BIT(1)
+#define VPU_MAFBC_S0_ENABLE BIT(0)
#define VPU_MAFBC_HEADER_BUF_ADDR_LOW_S0 0x3a10
#define VPU_MAFBC_HEADER_BUF_ADDR_HIGH_S0 0x3a11
#define VPU_MAFBC_FORMAT_SPECIFIER_S0 0x3a12
+#define VPU_MAFBC_PAYLOAD_LIMIT_EN BIT(19)
+#define VPU_MAFBC_TILED_HEADER_EN BIT(18)
+#define VPU_MAFBC_SUPER_BLOCK_ASPECT GENMASK(17, 16)
+#define VPU_MAFBC_BLOCK_SPLIT BIT(9)
+#define VPU_MAFBC_YUV_TRANSFORM BIT(8)
+#define VPU_MAFBC_PIXEL_FORMAT GENMASK(3, 0)
#define VPU_MAFBC_BUFFER_WIDTH_S0 0x3a13
#define VPU_MAFBC_BUFFER_HEIGHT_S0 0x3a14
#define VPU_MAFBC_BOUNDING_BOX_X_START_S0 0x3a15
@@ -1551,6 +1735,8 @@
#define VPU_MAFBC_OUTPUT_BUF_ADDR_HIGH_S0 0x3a1a
#define VPU_MAFBC_OUTPUT_BUF_STRIDE_S0 0x3a1b
#define VPU_MAFBC_PREFETCH_CFG_S0 0x3a1c
+#define VPU_MAFBC_PREFETCH_READ_DIRECTION_Y BIT(1)
+#define VPU_MAFBC_PREFETCH_READ_DIRECTION_X BIT(0)
#define VPU_MAFBC_HEADER_BUF_ADDR_LOW_S1 0x3a30
#define VPU_MAFBC_HEADER_BUF_ADDR_HIGH_S1 0x3a31
@@ -1595,10 +1781,22 @@
#define VPU_MAFBC_PREFETCH_CFG_S3 0x3a7c
#define DOLBY_PATH_CTRL 0x1a0c
+#define DOLBY_BYPASS_EN(val) (val & 0xf)
#define OSD_PATH_MISC_CTRL 0x1a0e
+#define OSD_PATH_OSD_AXI_SEL_OSD1_AFBCD BIT(4)
+#define OSD_PATH_OSD_AXI_SEL_OSD2_AFBCD BIT(5)
+#define OSD_PATH_OSD_AXI_SEL_OSD3_AFBCD BIT(6)
#define MALI_AFBCD_TOP_CTRL 0x1a0f
+#define MALI_AFBCD_MANUAL_RESET BIT(23)
#define VIU_OSD_BLEND_CTRL 0x39b0
+#define VIU_OSD_BLEND_REORDER(dest, src) ((src) << (dest * 4))
+#define VIU_OSD_BLEND_DIN_EN(bits) ((bits & 0xf) << 20)
+#define VIU_OSD_BLEND1_DIN3_BYPASS_TO_DOUT1 BIT(24)
+#define VIU_OSD_BLEND1_DOUT_BYPASS_TO_BLEND2 BIT(25)
+#define VIU_OSD_BLEND_DIN0_BYPASS_TO_DOUT0 BIT(26)
+#define VIU_OSD_BLEND_BLEN2_PREMULT_EN(input) ((input & 0x3) << 27)
+#define VIU_OSD_BLEND_HOLD_LINES(lines) ((lines & 0x7) << 29)
#define VIU_OSD_BLEND_CTRL1 0x39c0
#define VIU_OSD_BLEND_DIN0_SCOPE_H 0x39b1
#define VIU_OSD_BLEND_DIN0_SCOPE_V 0x39b2
@@ -1628,13 +1826,27 @@
#define VPP_SLEEP_CTRL 0x1dfa
#define VD1_BLEND_SRC_CTRL 0x1dfb
#define VD2_BLEND_SRC_CTRL 0x1dfc
+#define VD_BLEND_PREBLD_SRC_VD1 (1 << 0)
+#define VD_BLEND_PREBLD_SRC_VD2 (2 << 0)
+#define VD_BLEND_PREBLD_SRC_OSD1 (3 << 0)
+#define VD_BLEND_PREBLD_SRC_OSD2 (4 << 0)
+#define VD_BLEND_PREBLD_PREMULT_EN BIT(4)
+#define VD_BLEND_POSTBLD_SRC_VD1 (1 << 8)
+#define VD_BLEND_POSTBLD_SRC_VD2 (2 << 8)
+#define VD_BLEND_POSTBLD_SRC_OSD1 (3 << 8)
+#define VD_BLEND_POSTBLD_SRC_OSD2 (4 << 8)
+#define VD_BLEND_POSTBLD_PREMULT_EN BIT(16)
#define OSD1_BLEND_SRC_CTRL 0x1dfd
#define OSD2_BLEND_SRC_CTRL 0x1dfe
+#define OSD_BLEND_POSTBLD_SRC_VD1 (1 << 8)
+#define OSD_BLEND_POSTBLD_SRC_VD2 (2 << 8)
+#define OSD_BLEND_POSTBLD_SRC_OSD1 (3 << 8)
+#define OSD_BLEND_POSTBLD_SRC_OSD2 (4 << 8)
+#define OSD_BLEND_PATH_SEL_ENABLE BIT(20)
#define VPP_POST_BLEND_BLEND_DUMMY_DATA 0x3968
#define VPP_POST_BLEND_DUMMY_ALPHA 0x3969
#define VPP_RDARB_MODE 0x3978
#define VPP_RDARB_REQEN_SLV 0x3979
-#define VPU_RDARB_MODE_L2C1 0x279d
#endif /* __MESON_REGISTERS_H */
diff --git a/drivers/gpu/drm/meson/meson_vclk.c b/drivers/gpu/drm/meson/meson_vclk.c
index 26732f038d19..f690793ae2d5 100644
--- a/drivers/gpu/drm/meson/meson_vclk.c
+++ b/drivers/gpu/drm/meson/meson_vclk.c
@@ -5,9 +5,10 @@
* Copyright (C) 2015 Amlogic, Inc. All rights reserved.
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <drm/drmP.h>
+#include <linux/export.h>
+
+#include <drm/drm_print.h>
+
#include "meson_drv.h"
#include "meson_vclk.h"
@@ -96,6 +97,7 @@
#define HHI_VDAC_CNTL1 0x2F8 /* 0xbe offset in data sheet */
#define HHI_HDMI_PLL_CNTL 0x320 /* 0xc8 offset in data sheet */
+#define HHI_HDMI_PLL_CNTL_EN BIT(30)
#define HHI_HDMI_PLL_CNTL2 0x324 /* 0xc9 offset in data sheet */
#define HHI_HDMI_PLL_CNTL3 0x328 /* 0xca offset in data sheet */
#define HHI_HDMI_PLL_CNTL4 0x32C /* 0xcb offset in data sheet */
@@ -240,7 +242,7 @@ static void meson_venci_cvbs_clock_config(struct meson_drm *priv)
unsigned int val;
/* Setup PLL to output 1.485GHz */
- if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu")) {
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB)) {
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x5800023d);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x00404e00);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x0d5c5091);
@@ -252,8 +254,8 @@ static void meson_venci_cvbs_clock_config(struct meson_drm *priv)
/* Poll for lock bit */
regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL, val,
(val & HDMI_PLL_LOCK), 10, 0);
- } else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
- meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu")) {
+ } else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) ||
+ meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL)) {
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x4000027b);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb300);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0xa6212844);
@@ -270,7 +272,7 @@ static void meson_venci_cvbs_clock_config(struct meson_drm *priv)
/* Poll for lock bit */
regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL, val,
(val & HDMI_PLL_LOCK), 10, 0);
- } else if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
+ } else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x1a0504f7);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x00010000);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x00000000);
@@ -298,7 +300,7 @@ static void meson_venci_cvbs_clock_config(struct meson_drm *priv)
VCLK2_DIV_MASK, (55 - 1));
/* select vid_pll for vclk2 */
- if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu"))
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
regmap_update_bits(priv->hhi, HHI_VIID_CLK_CNTL,
VCLK2_SEL_MASK, (0 << VCLK2_SEL_SHIFT));
else
@@ -453,7 +455,7 @@ void meson_hdmi_pll_set_params(struct meson_drm *priv, unsigned int m,
{
unsigned int val;
- if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu")) {
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB)) {
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x58000200 | m);
if (frac)
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2,
@@ -468,13 +470,13 @@ void meson_hdmi_pll_set_params(struct meson_drm *priv, unsigned int m,
/* Enable and unreset */
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
- 0x7 << 28, 0x4 << 28);
+ 0x7 << 28, HHI_HDMI_PLL_CNTL_EN);
/* Poll for lock bit */
regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL,
val, (val & HDMI_PLL_LOCK), 10, 0);
- } else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
- meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu")) {
+ } else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) ||
+ meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL)) {
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x40000200 | m);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb000 | frac);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4);
@@ -491,10 +493,11 @@ void meson_hdmi_pll_set_params(struct meson_drm *priv, unsigned int m,
/* Poll for lock bit */
regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL, val,
(val & HDMI_PLL_LOCK), 10, 0);
- } else if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
+ } else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x0b3a0400 | m);
/* Enable and reset */
+ /* TODO: add specific macro for g12a here */
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
0x3 << 28, 0x3 << 28);
@@ -542,36 +545,36 @@ void meson_hdmi_pll_set_params(struct meson_drm *priv, unsigned int m,
} while(1);
}
- if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu"))
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
3 << 16, pll_od_to_reg(od1) << 16);
- else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
- meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
+ else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) ||
+ meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL3,
3 << 21, pll_od_to_reg(od1) << 21);
- else if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu"))
+ else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
3 << 16, pll_od_to_reg(od1) << 16);
- if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu"))
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
3 << 22, pll_od_to_reg(od2) << 22);
- else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
- meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
+ else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) ||
+ meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL3,
3 << 23, pll_od_to_reg(od2) << 23);
- else if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu"))
+ else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
3 << 18, pll_od_to_reg(od2) << 18);
- if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu"))
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
3 << 18, pll_od_to_reg(od3) << 18);
- else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
- meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
+ else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) ||
+ meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL3,
3 << 19, pll_od_to_reg(od3) << 19);
- else if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu"))
+ else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
3 << 20, pll_od_to_reg(od3) << 20);
}
@@ -582,7 +585,7 @@ static unsigned int meson_hdmi_pll_get_m(struct meson_drm *priv,
unsigned int pll_freq)
{
/* The GXBB PLL has a /2 pre-multiplier */
- if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu"))
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB))
pll_freq /= 2;
return pll_freq / XTAL_FREQ;
@@ -602,12 +605,12 @@ static unsigned int meson_hdmi_pll_get_frac(struct meson_drm *priv,
unsigned int frac;
/* The GXBB PLL has a /2 pre-multiplier and a larger FRAC width */
- if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu")) {
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB)) {
frac_max = HDMI_FRAC_MAX_GXBB;
parent_freq *= 2;
}
- if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu"))
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
frac_max = HDMI_FRAC_MAX_G12A;
/* We can have a perfect match !*/
@@ -628,20 +631,25 @@ static bool meson_hdmi_pll_validate_params(struct meson_drm *priv,
unsigned int m,
unsigned int frac)
{
- if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu")) {
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB)) {
/* Empiric supported min/max dividers */
if (m < 53 || m > 123)
return false;
if (frac >= HDMI_FRAC_MAX_GXBB)
return false;
- } else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
- meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu") ||
- meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
+ } else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) ||
+ meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL)) {
/* Empiric supported min/max dividers */
if (m < 106 || m > 247)
return false;
if (frac >= HDMI_FRAC_MAX_GXL)
return false;
+ } else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
+ /* Empiric supported min/max dividers */
+ if (m < 106 || m > 247)
+ return false;
+ if (frac >= HDMI_FRAC_MAX_G12A)
+ return false;
}
return true;
@@ -756,7 +764,7 @@ static void meson_vclk_set(struct meson_drm *priv, unsigned int pll_base_freq,
/* Set HDMI PLL rate */
if (!od1 && !od2 && !od3) {
meson_hdmi_pll_generic_set(priv, pll_base_freq);
- } else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu")) {
+ } else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB)) {
switch (pll_base_freq) {
case 2970000:
m = 0x3d;
@@ -773,8 +781,8 @@ static void meson_vclk_set(struct meson_drm *priv, unsigned int pll_base_freq,
}
meson_hdmi_pll_set_params(priv, m, frac, od1, od2, od3);
- } else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
- meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu")) {
+ } else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) ||
+ meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL)) {
switch (pll_base_freq) {
case 2970000:
m = 0x7b;
@@ -791,7 +799,7 @@ static void meson_vclk_set(struct meson_drm *priv, unsigned int pll_base_freq,
}
meson_hdmi_pll_set_params(priv, m, frac, od1, od2, od3);
- } else if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
+ } else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
switch (pll_base_freq) {
case 2970000:
m = 0x7b;
@@ -969,7 +977,8 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
meson_venci_cvbs_clock_config(priv);
return;
} else if (target == MESON_VCLK_TARGET_DMT) {
- /* The DMT clock path is fixed after the PLL:
+ /*
+ * The DMT clock path is fixed after the PLL:
* - automatic PLL freq + OD management
* - vid_pll_div = VID_PLL_DIV_5
* - vclk_div = 2
diff --git a/drivers/gpu/drm/meson/meson_vclk.h b/drivers/gpu/drm/meson/meson_vclk.h
index ed993d20abda..b62125540aef 100644
--- a/drivers/gpu/drm/meson/meson_vclk.h
+++ b/drivers/gpu/drm/meson/meson_vclk.h
@@ -9,6 +9,10 @@
#ifndef __MESON_VCLK_H
#define __MESON_VCLK_H
+#include <drm/drm_modes.h>
+
+struct meson_drm;
+
enum {
MESON_VCLK_TARGET_CVBS = 0,
MESON_VCLK_TARGET_HDMI = 1,
diff --git a/drivers/gpu/drm/meson/meson_venc.c b/drivers/gpu/drm/meson/meson_venc.c
index 7b7a0d8d737c..4efd7864d5bf 100644
--- a/drivers/gpu/drm/meson/meson_venc.c
+++ b/drivers/gpu/drm/meson/meson_venc.c
@@ -5,14 +5,14 @@
* Copyright (C) 2015 Amlogic, Inc. All rights reserved.
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <drm/drmP.h>
+#include <linux/export.h>
+
+#include <drm/drm_modes.h>
+
#include "meson_drv.h"
+#include "meson_registers.h"
#include "meson_venc.h"
#include "meson_vpp.h"
-#include "meson_vclk.h"
-#include "meson_registers.h"
/**
* DOC: Video Encoder
@@ -61,9 +61,9 @@
/* HHI Registers */
#define HHI_GCLK_MPEG2 0x148 /* 0x52 offset in data sheet */
#define HHI_VDAC_CNTL0 0x2F4 /* 0xbd offset in data sheet */
-#define HHI_VDAC_CNTL0_G12A 0x2EC /* 0xbd offset in data sheet */
+#define HHI_VDAC_CNTL0_G12A 0x2EC /* 0xbb offset in data sheet */
#define HHI_VDAC_CNTL1 0x2F8 /* 0xbe offset in data sheet */
-#define HHI_VDAC_CNTL1_G12A 0x2F0 /* 0xbe offset in data sheet */
+#define HHI_VDAC_CNTL1_G12A 0x2F0 /* 0xbc offset in data sheet */
#define HHI_HDMI_PHY_CNTL0 0x3a0 /* 0xe8 offset in data sheet */
struct meson_cvbs_enci_mode meson_cvbs_enci_pal = {
@@ -192,7 +192,7 @@ union meson_hdmi_venc_mode meson_hdmi_enci_mode_480i = {
.hso_end = 129,
.vso_even = 3,
.vso_odd = 260,
- .macv_max_amp = 0x810b,
+ .macv_max_amp = 0xb,
.video_prog_mode = 0xf0,
.video_mode = 0x8,
.sch_adjust = 0x20,
@@ -212,7 +212,7 @@ union meson_hdmi_venc_mode meson_hdmi_enci_mode_576i = {
.hso_end = 129,
.vso_even = 3,
.vso_odd = 260,
- .macv_max_amp = 8107,
+ .macv_max_amp = 0x7,
.video_prog_mode = 0xff,
.video_mode = 0x13,
.sch_adjust = 0x28,
@@ -976,6 +976,7 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
unsigned int eof_lines;
unsigned int sof_lines;
unsigned int vsync_lines;
+ u32 reg;
/* Use VENCI for 480i and 576i and double HDMI pixels */
if (mode->flags & DRM_MODE_FLAG_DBLCLK) {
@@ -1048,8 +1049,11 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
unsigned int lines_f1;
/* CVBS Filter settings */
- writel_relaxed(0x12, priv->io_base + _REG(ENCI_CFILT_CTRL));
- writel_relaxed(0x12, priv->io_base + _REG(ENCI_CFILT_CTRL2));
+ writel_relaxed(ENCI_CFILT_CMPT_SEL_HIGH | 0x10,
+ priv->io_base + _REG(ENCI_CFILT_CTRL));
+ writel_relaxed(ENCI_CFILT_CMPT_CR_DLY(2) |
+ ENCI_CFILT_CMPT_CB_DLY(1),
+ priv->io_base + _REG(ENCI_CFILT_CTRL2));
/* Digital Video Select : Interlace, clk27 clk, external */
writel_relaxed(0, priv->io_base + _REG(VENC_DVI_SETTING));
@@ -1071,8 +1075,9 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
priv->io_base + _REG(ENCI_SYNC_VSO_ODDLN));
/* Macrovision max amplitude change */
- writel_relaxed(vmode->enci.macv_max_amp,
- priv->io_base + _REG(ENCI_MACV_MAX_AMP));
+ writel_relaxed(ENCI_MACV_MAX_AMP_ENABLE_CHANGE |
+ ENCI_MACV_MAX_AMP_VAL(vmode->enci.macv_max_amp),
+ priv->io_base + _REG(ENCI_MACV_MAX_AMP));
/* Video mode */
writel_relaxed(vmode->enci.video_prog_mode,
@@ -1080,7 +1085,8 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
writel_relaxed(vmode->enci.video_mode,
priv->io_base + _REG(ENCI_VIDEO_MODE));
- /* Advanced Video Mode :
+ /*
+ * Advanced Video Mode :
* Demux shifting 0x2
* Blank line end at line17/22
* High bandwidth Luma Filter
@@ -1088,7 +1094,10 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
* Bypass luma low pass filter
* No macrovision on CSYNC
*/
- writel_relaxed(0x26, priv->io_base + _REG(ENCI_VIDEO_MODE_ADV));
+ writel_relaxed(ENCI_VIDEO_MODE_ADV_DMXMD(2) |
+ ENCI_VIDEO_MODE_ADV_VBICTL_LINE_17_22 |
+ ENCI_VIDEO_MODE_ADV_YBW_HIGH,
+ priv->io_base + _REG(ENCI_VIDEO_MODE_ADV));
writel(vmode->enci.sch_adjust,
priv->io_base + _REG(ENCI_VIDEO_SCH));
@@ -1104,8 +1113,17 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
/* UNreset Interlaced TV Encoder */
writel_relaxed(0, priv->io_base + _REG(ENCI_DBG_PX_RST));
- /* Enable Vfifo2vd, Y_Cb_Y_Cr select */
- writel_relaxed(0x4e01, priv->io_base + _REG(ENCI_VFIFO2VD_CTL));
+ /*
+ * Enable Vfifo2vd and set Y_Cb_Y_Cr:
+ * Corresponding value:
+ * Y => 00 or 10
+ * Cb => 01
+ * Cr => 11
+ * Ex: 0x4e => 01001110 would mean Cb/Y/Cr/Y
+ */
+ writel_relaxed(ENCI_VFIFO2VD_CTL_ENABLE |
+ ENCI_VFIFO2VD_CTL_VD_SEL(0x4e),
+ priv->io_base + _REG(ENCI_VFIFO2VD_CTL));
/* Timings */
writel_relaxed(vmode->enci.pixel_start,
@@ -1127,7 +1145,8 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
meson_vpp_setup_mux(priv, MESON_VIU_VPP_MUX_ENCI);
/* Interlace video enable */
- writel_relaxed(1, priv->io_base + _REG(ENCI_VIDEO_EN));
+ writel_relaxed(ENCI_VIDEO_EN_ENABLE,
+ priv->io_base + _REG(ENCI_VIDEO_EN));
lines_f0 = mode->vtotal >> 1;
lines_f1 = lines_f0 + 1;
@@ -1374,7 +1393,8 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
writel_relaxed(1, priv->io_base + _REG(ENCP_VIDEO_EN));
/* Set DE signal’s polarity is active high */
- writel_bits_relaxed(BIT(14), BIT(14),
+ writel_bits_relaxed(ENCP_VIDEO_MODE_DE_V_HIGH,
+ ENCP_VIDEO_MODE_DE_V_HIGH,
priv->io_base + _REG(ENCP_VIDEO_MODE));
/* Program DE timing */
@@ -1493,13 +1513,39 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
meson_vpp_setup_mux(priv, MESON_VIU_VPP_MUX_ENCP);
}
- writel_relaxed((use_enci ? 1 : 2) |
- (mode->flags & DRM_MODE_FLAG_PHSYNC ? 1 << 2 : 0) |
- (mode->flags & DRM_MODE_FLAG_PVSYNC ? 1 << 3 : 0) |
- 4 << 5 |
- (venc_repeat ? 1 << 8 : 0) |
- (hdmi_repeat ? 1 << 12 : 0),
- priv->io_base + _REG(VPU_HDMI_SETTING));
+ /* Set VPU HDMI setting */
+ /* Select ENCP or ENCI data to HDMI */
+ if (use_enci)
+ reg = VPU_HDMI_ENCI_DATA_TO_HDMI;
+ else
+ reg = VPU_HDMI_ENCP_DATA_TO_HDMI;
+
+ /* Invert polarity of HSYNC from VENC */
+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+ reg |= VPU_HDMI_INV_HSYNC;
+
+ /* Invert polarity of VSYNC from VENC */
+ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+ reg |= VPU_HDMI_INV_VSYNC;
+
+ /* Output data format: CbYCr */
+ reg |= VPU_HDMI_OUTPUT_CBYCR;
+
+ /*
+ * Write rate to the async FIFO between VENC and HDMI.
+ * One write every 2 wr_clk.
+ */
+ if (venc_repeat)
+ reg |= VPU_HDMI_WR_RATE(2);
+
+ /*
+ * Read rate to the async FIFO between VENC and HDMI.
+ * One read every 2 wr_clk.
+ */
+ if (hdmi_repeat)
+ reg |= VPU_HDMI_RD_RATE(2);
+
+ writel_relaxed(reg, priv->io_base + _REG(VPU_HDMI_SETTING));
priv->venc.hdmi_repeat = hdmi_repeat;
priv->venc.venc_repeat = venc_repeat;
@@ -1512,12 +1558,17 @@ EXPORT_SYMBOL_GPL(meson_venc_hdmi_mode_set);
void meson_venci_cvbs_mode_set(struct meson_drm *priv,
struct meson_cvbs_enci_mode *mode)
{
+ u32 reg;
+
if (mode->mode_tag == priv->venc.current_mode)
return;
/* CVBS Filter settings */
- writel_relaxed(0x12, priv->io_base + _REG(ENCI_CFILT_CTRL));
- writel_relaxed(0x12, priv->io_base + _REG(ENCI_CFILT_CTRL2));
+ writel_relaxed(ENCI_CFILT_CMPT_SEL_HIGH | 0x10,
+ priv->io_base + _REG(ENCI_CFILT_CTRL));
+ writel_relaxed(ENCI_CFILT_CMPT_CR_DLY(2) |
+ ENCI_CFILT_CMPT_CB_DLY(1),
+ priv->io_base + _REG(ENCI_CFILT_CTRL2));
/* Digital Video Select : Interlace, clk27 clk, external */
writel_relaxed(0, priv->io_base + _REG(VENC_DVI_SETTING));
@@ -1539,8 +1590,9 @@ void meson_venci_cvbs_mode_set(struct meson_drm *priv,
priv->io_base + _REG(ENCI_SYNC_VSO_ODDLN));
/* Macrovision max amplitude change */
- writel_relaxed(0x8100 + mode->macv_max_amp,
- priv->io_base + _REG(ENCI_MACV_MAX_AMP));
+ writel_relaxed(ENCI_MACV_MAX_AMP_ENABLE_CHANGE |
+ ENCI_MACV_MAX_AMP_VAL(mode->macv_max_amp),
+ priv->io_base + _REG(ENCI_MACV_MAX_AMP));
/* Video mode */
writel_relaxed(mode->video_prog_mode,
@@ -1548,7 +1600,8 @@ void meson_venci_cvbs_mode_set(struct meson_drm *priv,
writel_relaxed(mode->video_mode,
priv->io_base + _REG(ENCI_VIDEO_MODE));
- /* Advanced Video Mode :
+ /*
+ * Advanced Video Mode :
* Demux shifting 0x2
* Blank line end at line17/22
* High bandwidth Luma Filter
@@ -1556,7 +1609,10 @@ void meson_venci_cvbs_mode_set(struct meson_drm *priv,
* Bypass luma low pass filter
* No macrovision on CSYNC
*/
- writel_relaxed(0x26, priv->io_base + _REG(ENCI_VIDEO_MODE_ADV));
+ writel_relaxed(ENCI_VIDEO_MODE_ADV_DMXMD(2) |
+ ENCI_VIDEO_MODE_ADV_VBICTL_LINE_17_22 |
+ ENCI_VIDEO_MODE_ADV_YBW_HIGH,
+ priv->io_base + _REG(ENCI_VIDEO_MODE_ADV));
writel(mode->sch_adjust, priv->io_base + _REG(ENCI_VIDEO_SCH));
@@ -1588,16 +1644,50 @@ void meson_venci_cvbs_mode_set(struct meson_drm *priv,
/* UNreset Interlaced TV Encoder */
writel_relaxed(0, priv->io_base + _REG(ENCI_DBG_PX_RST));
- /* Enable Vfifo2vd, Y_Cb_Y_Cr select */
- writel_relaxed(0x4e01, priv->io_base + _REG(ENCI_VFIFO2VD_CTL));
+ /*
+ * Enable Vfifo2vd and set Y_Cb_Y_Cr:
+ * Corresponding value:
+ * Y => 00 or 10
+ * Cb => 01
+ * Cr => 11
+ * Ex: 0x4e => 01001110 would mean Cb/Y/Cr/Y
+ */
+ writel_relaxed(ENCI_VFIFO2VD_CTL_ENABLE |
+ ENCI_VFIFO2VD_CTL_VD_SEL(0x4e),
+ priv->io_base + _REG(ENCI_VFIFO2VD_CTL));
/* Power UP Dacs */
writel_relaxed(0, priv->io_base + _REG(VENC_VDAC_SETTING));
/* Video Upsampling */
- writel_relaxed(0x0061, priv->io_base + _REG(VENC_UPSAMPLE_CTRL0));
- writel_relaxed(0x4061, priv->io_base + _REG(VENC_UPSAMPLE_CTRL1));
- writel_relaxed(0x5061, priv->io_base + _REG(VENC_UPSAMPLE_CTRL2));
+ /*
+ * CTRL0, CTRL1 and CTRL2:
+ * Filter0: input data sample every 2 cloks
+ * Filter1: filtering and upsample enable
+ */
+ reg = VENC_UPSAMPLE_CTRL_F0_2_CLK_RATIO | VENC_UPSAMPLE_CTRL_F1_EN |
+ VENC_UPSAMPLE_CTRL_F1_UPSAMPLE_EN;
+
+ /*
+ * Upsample CTRL0:
+ * Interlace High Bandwidth Luma
+ */
+ writel_relaxed(VENC_UPSAMPLE_CTRL_INTERLACE_HIGH_LUMA | reg,
+ priv->io_base + _REG(VENC_UPSAMPLE_CTRL0));
+
+ /*
+ * Upsample CTRL1:
+ * Interlace Pb
+ */
+ writel_relaxed(VENC_UPSAMPLE_CTRL_INTERLACE_PB | reg,
+ priv->io_base + _REG(VENC_UPSAMPLE_CTRL1));
+
+ /*
+ * Upsample CTRL2:
+ * Interlace R
+ */
+ writel_relaxed(VENC_UPSAMPLE_CTRL_INTERLACE_PR | reg,
+ priv->io_base + _REG(VENC_UPSAMPLE_CTRL2));
/* Select Interlace Y DACs */
writel_relaxed(0, priv->io_base + _REG(VENC_VDAC_DACSEL0));
@@ -1611,14 +1701,16 @@ void meson_venci_cvbs_mode_set(struct meson_drm *priv,
meson_vpp_setup_mux(priv, MESON_VIU_VPP_MUX_ENCI);
/* Enable ENCI FIFO */
- writel_relaxed(0x2000, priv->io_base + _REG(VENC_VDAC_FIFO_CTRL));
+ writel_relaxed(VENC_VDAC_FIFO_EN_ENCI_ENABLE,
+ priv->io_base + _REG(VENC_VDAC_FIFO_CTRL));
/* Select ENCI DACs 0, 1, 4, and 5 */
writel_relaxed(0x11, priv->io_base + _REG(ENCI_DACSEL_0));
writel_relaxed(0x11, priv->io_base + _REG(ENCI_DACSEL_1));
/* Interlace video enable */
- writel_relaxed(1, priv->io_base + _REG(ENCI_VIDEO_EN));
+ writel_relaxed(ENCI_VIDEO_EN_ENABLE,
+ priv->io_base + _REG(ENCI_VIDEO_EN));
/* Configure Video Saturation / Contrast / Brightness / Hue */
writel_relaxed(mode->video_saturation,
@@ -1631,7 +1723,8 @@ void meson_venci_cvbs_mode_set(struct meson_drm *priv,
priv->io_base + _REG(ENCI_VIDEO_HUE));
/* Enable DAC0 Filter */
- writel_relaxed(0x1, priv->io_base + _REG(VENC_VDAC_DAC0_FILT_CTRL0));
+ writel_relaxed(VENC_VDAC_DAC0_FILT_CTRL0_EN,
+ priv->io_base + _REG(VENC_VDAC_DAC0_FILT_CTRL0));
writel_relaxed(0xfc48, priv->io_base + _REG(VENC_VDAC_DAC0_FILT_CTRL1));
/* 0 in Macrovision register 0 */
@@ -1652,7 +1745,8 @@ unsigned int meson_venci_get_field(struct meson_drm *priv)
void meson_venc_enable_vsync(struct meson_drm *priv)
{
- writel_relaxed(2, priv->io_base + _REG(VENC_INTCTRL));
+ writel_relaxed(VENC_INTCTRL_ENCI_LNRST_INT_EN,
+ priv->io_base + _REG(VENC_INTCTRL));
regmap_update_bits(priv->hhi, HHI_GCLK_MPEG2, BIT(25), BIT(25));
}
@@ -1665,7 +1759,7 @@ void meson_venc_disable_vsync(struct meson_drm *priv)
void meson_venc_init(struct meson_drm *priv)
{
/* Disable CVBS VDAC */
- if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
regmap_write(priv->hhi, HHI_VDAC_CNTL0_G12A, 0);
regmap_write(priv->hhi, HHI_VDAC_CNTL1_G12A, 8);
} else {
@@ -1680,7 +1774,8 @@ void meson_venc_init(struct meson_drm *priv)
regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, 0);
/* Disable HDMI */
- writel_bits_relaxed(0x3, 0,
+ writel_bits_relaxed(VPU_HDMI_ENCI_DATA_TO_HDMI |
+ VPU_HDMI_ENCP_DATA_TO_HDMI, 0,
priv->io_base + _REG(VPU_HDMI_SETTING));
/* Disable all encoders */
diff --git a/drivers/gpu/drm/meson/meson_venc.h b/drivers/gpu/drm/meson/meson_venc.h
index 985642a1678e..576768bdd08d 100644
--- a/drivers/gpu/drm/meson/meson_venc.h
+++ b/drivers/gpu/drm/meson/meson_venc.h
@@ -14,6 +14,8 @@
#ifndef __MESON_VENC_H
#define __MESON_VENC_H
+struct drm_display_mode;
+
enum {
MESON_VENC_MODE_NONE = 0,
MESON_VENC_MODE_CVBS_PAL,
diff --git a/drivers/gpu/drm/meson/meson_venc_cvbs.c b/drivers/gpu/drm/meson/meson_venc_cvbs.c
index 6313a519f257..1bd6b6d15ffb 100644
--- a/drivers/gpu/drm/meson/meson_venc_cvbs.c
+++ b/drivers/gpu/drm/meson/meson_venc_cvbs.c
@@ -9,19 +9,18 @@
* Jasper St. Pierre <jstpierre@mecheye.net>
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/of_graph.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_device.h>
#include <drm/drm_edid.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_print.h>
-#include "meson_venc_cvbs.h"
-#include "meson_venc.h"
-#include "meson_vclk.h"
#include "meson_registers.h"
+#include "meson_vclk.h"
+#include "meson_venc_cvbs.h"
/* HHI VDAC Registers */
#define HHI_VDAC_CNTL0 0x2F4 /* 0xbd offset in data sheet */
@@ -65,6 +64,25 @@ struct meson_cvbs_mode meson_cvbs_modes[MESON_CVBS_MODES_COUNT] = {
},
};
+static const struct meson_cvbs_mode *
+meson_cvbs_get_mode(const struct drm_display_mode *req_mode)
+{
+ int i;
+
+ for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) {
+ struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i];
+
+ if (drm_mode_match(req_mode, &meson_mode->mode,
+ DRM_MODE_MATCH_TIMINGS |
+ DRM_MODE_MATCH_CLOCK |
+ DRM_MODE_MATCH_FLAGS |
+ DRM_MODE_MATCH_3D_FLAGS))
+ return meson_mode;
+ }
+
+ return NULL;
+}
+
/* Connector */
static void meson_cvbs_connector_destroy(struct drm_connector *connector)
@@ -137,14 +155,8 @@ static int meson_venc_cvbs_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- int i;
-
- for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) {
- struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i];
-
- if (drm_mode_equal(&crtc_state->mode, &meson_mode->mode))
- return 0;
- }
+ if (meson_cvbs_get_mode(&crtc_state->mode))
+ return 0;
return -EINVAL;
}
@@ -156,7 +168,7 @@ static void meson_venc_cvbs_encoder_disable(struct drm_encoder *encoder)
struct meson_drm *priv = meson_venc_cvbs->priv;
/* Disable CVBS VDAC */
- if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
regmap_write(priv->hhi, HHI_VDAC_CNTL0_G12A, 0);
regmap_write(priv->hhi, HHI_VDAC_CNTL1_G12A, 0);
} else {
@@ -172,16 +184,17 @@ static void meson_venc_cvbs_encoder_enable(struct drm_encoder *encoder)
struct meson_drm *priv = meson_venc_cvbs->priv;
/* VDAC0 source is not from ATV */
- writel_bits_relaxed(BIT(5), 0, priv->io_base + _REG(VENC_VDAC_DACSEL0));
+ writel_bits_relaxed(VENC_VDAC_SEL_ATV_DMD, 0,
+ priv->io_base + _REG(VENC_VDAC_DACSEL0));
- if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu")) {
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB)) {
regmap_write(priv->hhi, HHI_VDAC_CNTL0, 1);
regmap_write(priv->hhi, HHI_VDAC_CNTL1, 0);
- } else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
- meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu")) {
+ } else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) ||
+ meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL)) {
regmap_write(priv->hhi, HHI_VDAC_CNTL0, 0xf0001);
regmap_write(priv->hhi, HHI_VDAC_CNTL1, 0);
- } else if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
+ } else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
regmap_write(priv->hhi, HHI_VDAC_CNTL0_G12A, 0x906001);
regmap_write(priv->hhi, HHI_VDAC_CNTL1_G12A, 0);
}
@@ -191,24 +204,17 @@ static void meson_venc_cvbs_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
+ const struct meson_cvbs_mode *meson_mode = meson_cvbs_get_mode(mode);
struct meson_venc_cvbs *meson_venc_cvbs =
encoder_to_meson_venc_cvbs(encoder);
struct meson_drm *priv = meson_venc_cvbs->priv;
- int i;
-
- for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) {
- struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i];
- if (drm_mode_equal(mode, &meson_mode->mode)) {
- meson_venci_cvbs_mode_set(priv,
- meson_mode->enci);
+ if (meson_mode) {
+ meson_venci_cvbs_mode_set(priv, meson_mode->enci);
- /* Setup 27MHz vclk2 for ENCI and VDAC */
- meson_vclk_setup(priv, MESON_VCLK_TARGET_CVBS,
- MESON_VCLK_CVBS, MESON_VCLK_CVBS,
- MESON_VCLK_CVBS, true);
- break;
- }
+ /* Setup 27MHz vclk2 for ENCI and VDAC */
+ meson_vclk_setup(priv, MESON_VCLK_TARGET_CVBS, MESON_VCLK_CVBS,
+ MESON_VCLK_CVBS, MESON_VCLK_CVBS, true);
}
}
diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c
index 4b2b3024d371..304f8ff1339c 100644
--- a/drivers/gpu/drm/meson/meson_viu.c
+++ b/drivers/gpu/drm/meson/meson_viu.c
@@ -6,13 +6,13 @@
* Copyright (C) 2014 Endless Mobile
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <drm/drmP.h>
+#include <linux/export.h>
+#include <linux/bitfield.h>
+
+#include <drm/drm_fourcc.h>
+
#include "meson_drv.h"
#include "meson_viu.h"
-#include "meson_vpp.h"
-#include "meson_venc.h"
#include "meson_registers.h"
/**
@@ -323,9 +323,9 @@ void meson_viu_osd1_reset(struct meson_drm *priv)
priv->io_base + _REG(VIU_OSD1_CTRL_STAT2));
/* Reset OSD1 */
- writel_bits_relaxed(BIT(0), BIT(0),
+ writel_bits_relaxed(VIU_SW_RESET_OSD1, VIU_SW_RESET_OSD1,
priv->io_base + _REG(VIU_SW_RESET));
- writel_bits_relaxed(BIT(0), 0,
+ writel_bits_relaxed(VIU_SW_RESET_OSD1, 0,
priv->io_base + _REG(VIU_SW_RESET));
/* Rewrite these registers state lost in the reset */
@@ -338,38 +338,116 @@ void meson_viu_osd1_reset(struct meson_drm *priv)
meson_viu_load_matrix(priv);
}
+#define OSD1_MALI_ORDER_ABGR \
+ (FIELD_PREP(VIU_OSD1_MALI_AFBCD_A_REORDER, \
+ VIU_OSD1_MALI_REORDER_A) | \
+ FIELD_PREP(VIU_OSD1_MALI_AFBCD_B_REORDER, \
+ VIU_OSD1_MALI_REORDER_B) | \
+ FIELD_PREP(VIU_OSD1_MALI_AFBCD_G_REORDER, \
+ VIU_OSD1_MALI_REORDER_G) | \
+ FIELD_PREP(VIU_OSD1_MALI_AFBCD_R_REORDER, \
+ VIU_OSD1_MALI_REORDER_R))
+
+#define OSD1_MALI_ORDER_ARGB \
+ (FIELD_PREP(VIU_OSD1_MALI_AFBCD_A_REORDER, \
+ VIU_OSD1_MALI_REORDER_A) | \
+ FIELD_PREP(VIU_OSD1_MALI_AFBCD_B_REORDER, \
+ VIU_OSD1_MALI_REORDER_R) | \
+ FIELD_PREP(VIU_OSD1_MALI_AFBCD_G_REORDER, \
+ VIU_OSD1_MALI_REORDER_G) | \
+ FIELD_PREP(VIU_OSD1_MALI_AFBCD_R_REORDER, \
+ VIU_OSD1_MALI_REORDER_B))
+
+void meson_viu_g12a_enable_osd1_afbc(struct meson_drm *priv)
+{
+ u32 afbc_order = OSD1_MALI_ORDER_ARGB;
+
+ /* Enable Mali AFBC Unpack */
+ writel_bits_relaxed(VIU_OSD1_MALI_UNPACK_EN,
+ VIU_OSD1_MALI_UNPACK_EN,
+ priv->io_base + _REG(VIU_OSD1_MALI_UNPACK_CTRL));
+
+ switch (priv->afbcd.format) {
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ afbc_order = OSD1_MALI_ORDER_ABGR;
+ break;
+ }
+
+ /* Setup RGBA Reordering */
+ writel_bits_relaxed(VIU_OSD1_MALI_AFBCD_A_REORDER |
+ VIU_OSD1_MALI_AFBCD_B_REORDER |
+ VIU_OSD1_MALI_AFBCD_G_REORDER |
+ VIU_OSD1_MALI_AFBCD_R_REORDER,
+ afbc_order,
+ priv->io_base + _REG(VIU_OSD1_MALI_UNPACK_CTRL));
+
+ /* Select AFBCD path for OSD1 */
+ writel_bits_relaxed(OSD_PATH_OSD_AXI_SEL_OSD1_AFBCD,
+ OSD_PATH_OSD_AXI_SEL_OSD1_AFBCD,
+ priv->io_base + _REG(OSD_PATH_MISC_CTRL));
+}
+
+void meson_viu_g12a_disable_osd1_afbc(struct meson_drm *priv)
+{
+ /* Disable AFBCD path for OSD1 */
+ writel_bits_relaxed(OSD_PATH_OSD_AXI_SEL_OSD1_AFBCD, 0,
+ priv->io_base + _REG(OSD_PATH_MISC_CTRL));
+
+ /* Disable AFBCD unpack */
+ writel_bits_relaxed(VIU_OSD1_MALI_UNPACK_EN, 0,
+ priv->io_base + _REG(VIU_OSD1_MALI_UNPACK_CTRL));
+}
+
+void meson_viu_gxm_enable_osd1_afbc(struct meson_drm *priv)
+{
+ writel_bits_relaxed(MALI_AFBC_MISC, FIELD_PREP(MALI_AFBC_MISC, 0x90),
+ priv->io_base + _REG(VIU_MISC_CTRL1));
+}
+
+void meson_viu_gxm_disable_osd1_afbc(struct meson_drm *priv)
+{
+ writel_bits_relaxed(MALI_AFBC_MISC, FIELD_PREP(MALI_AFBC_MISC, 0x00),
+ priv->io_base + _REG(VIU_MISC_CTRL1));
+}
+
+static inline uint32_t meson_viu_osd_burst_length_reg(uint32_t length)
+{
+ uint32_t val = (((length & 0x80) % 24) / 12);
+
+ return (((val & 0x3) << 10) | (((val & 0x4) >> 2) << 31));
+}
+
void meson_viu_init(struct meson_drm *priv)
{
uint32_t reg;
/* Disable OSDs */
- writel_bits_relaxed(BIT(0) | BIT(21), 0,
- priv->io_base + _REG(VIU_OSD1_CTRL_STAT));
- writel_bits_relaxed(BIT(0) | BIT(21), 0,
- priv->io_base + _REG(VIU_OSD2_CTRL_STAT));
+ writel_bits_relaxed(VIU_OSD1_OSD_BLK_ENABLE | VIU_OSD1_OSD_ENABLE, 0,
+ priv->io_base + _REG(VIU_OSD1_CTRL_STAT));
+ writel_bits_relaxed(VIU_OSD1_OSD_BLK_ENABLE | VIU_OSD1_OSD_ENABLE, 0,
+ priv->io_base + _REG(VIU_OSD2_CTRL_STAT));
/* On GXL/GXM, Use the 10bit HDR conversion matrix */
- if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
- meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) ||
+ meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL))
meson_viu_load_matrix(priv);
- else if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu"))
+ else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
meson_viu_set_g12a_osd1_matrix(priv, RGB709_to_YUV709l_coeff,
true);
/* Initialize OSD1 fifo control register */
- reg = BIT(0) | /* Urgent DDR request priority */
- (4 << 5); /* hold_fifo_lines */
- if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu"))
- reg |= (1 << 10) | /* burst length 32 */
- (32 << 12) | /* fifo_depth_val: 32*8=256 */
- (2 << 22) | /* 4 words in 1 burst */
- (2 << 24) |
- (1 << 31);
+ reg = VIU_OSD_DDR_PRIORITY_URGENT |
+ VIU_OSD_HOLD_FIFO_LINES(31) |
+ VIU_OSD_FIFO_DEPTH_VAL(32) | /* fifo_depth_val: 32*8=256 */
+ VIU_OSD_WORDS_PER_BURST(4) | /* 4 words in 1 burst */
+ VIU_OSD_FIFO_LIMITS(2); /* fifo_lim: 2*16=32 */
+
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
+ reg |= meson_viu_osd_burst_length_reg(32);
else
- reg |= (3 << 10) | /* burst length 64 */
- (32 << 12) | /* fifo_depth_val: 32*8=256 */
- (2 << 22) | /* 4 words in 1 burst */
- (2 << 24);
+ reg |= meson_viu_osd_burst_length_reg(64);
+
writel_relaxed(reg, priv->io_base + _REG(VIU_OSD1_FIFO_CTRL_STAT));
writel_relaxed(reg, priv->io_base + _REG(VIU_OSD2_FIFO_CTRL_STAT));
@@ -382,12 +460,9 @@ void meson_viu_init(struct meson_drm *priv)
priv->io_base + _REG(VIU_OSD2_CTRL_STAT2));
/* Disable VD1 AFBC */
- /* di_mif0_en=0 mif0_to_vpp_en=0 di_mad_en=0 */
- writel_bits_relaxed(0x7 << 16, 0,
- priv->io_base + _REG(VIU_MISC_CTRL0));
- /* afbc vd1 set=0 */
- writel_bits_relaxed(BIT(20), 0,
- priv->io_base + _REG(VIU_MISC_CTRL0));
+ /* di_mif0_en=0 mif0_to_vpp_en=0 di_mad_en=0 and afbc vd1 set=0*/
+ writel_bits_relaxed(VIU_CTRL0_VD1_AFBC_MASK, 0,
+ priv->io_base + _REG(VIU_MISC_CTRL0));
writel_relaxed(0, priv->io_base + _REG(AFBC_ENABLE));
writel_relaxed(0x00FF00C0,
@@ -395,30 +470,39 @@ void meson_viu_init(struct meson_drm *priv)
writel_relaxed(0x00FF00C0,
priv->io_base + _REG(VD2_IF0_LUMA_FIFO_SIZE));
- if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
- writel_relaxed(4 << 29 |
- 1 << 27 |
- 1 << 26 | /* blend_din0 input to blend0 */
- 1 << 25 | /* blend1_dout to blend2 */
- 1 << 24 | /* blend1_din3 input to blend1 */
- 1 << 20 |
- 0 << 16 |
- 1,
- priv->io_base + _REG(VIU_OSD_BLEND_CTRL));
- writel_relaxed(1 << 20,
- priv->io_base + _REG(OSD1_BLEND_SRC_CTRL));
- writel_relaxed(1 << 20,
- priv->io_base + _REG(OSD2_BLEND_SRC_CTRL));
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
+ writel_relaxed(VIU_OSD_BLEND_REORDER(0, 1) |
+ VIU_OSD_BLEND_REORDER(1, 0) |
+ VIU_OSD_BLEND_REORDER(2, 0) |
+ VIU_OSD_BLEND_REORDER(3, 0) |
+ VIU_OSD_BLEND_DIN_EN(1) |
+ VIU_OSD_BLEND1_DIN3_BYPASS_TO_DOUT1 |
+ VIU_OSD_BLEND1_DOUT_BYPASS_TO_BLEND2 |
+ VIU_OSD_BLEND_DIN0_BYPASS_TO_DOUT0 |
+ VIU_OSD_BLEND_BLEN2_PREMULT_EN(1) |
+ VIU_OSD_BLEND_HOLD_LINES(4),
+ priv->io_base + _REG(VIU_OSD_BLEND_CTRL));
+
+ writel_relaxed(OSD_BLEND_PATH_SEL_ENABLE,
+ priv->io_base + _REG(OSD1_BLEND_SRC_CTRL));
+ writel_relaxed(OSD_BLEND_PATH_SEL_ENABLE,
+ priv->io_base + _REG(OSD2_BLEND_SRC_CTRL));
writel_relaxed(0, priv->io_base + _REG(VD1_BLEND_SRC_CTRL));
writel_relaxed(0, priv->io_base + _REG(VD2_BLEND_SRC_CTRL));
writel_relaxed(0,
priv->io_base + _REG(VIU_OSD_BLEND_DUMMY_DATA0));
writel_relaxed(0,
priv->io_base + _REG(VIU_OSD_BLEND_DUMMY_ALPHA));
- writel_bits_relaxed(0x3 << 2, 0x3 << 2,
- priv->io_base + _REG(DOLBY_PATH_CTRL));
+
+ writel_bits_relaxed(DOLBY_BYPASS_EN(0xc), DOLBY_BYPASS_EN(0xc),
+ priv->io_base + _REG(DOLBY_PATH_CTRL));
+
+ meson_viu_g12a_disable_osd1_afbc(priv);
}
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM))
+ meson_viu_gxm_disable_osd1_afbc(priv);
+
priv->viu.osd1_enabled = false;
priv->viu.osd1_commit = false;
priv->viu.osd1_interlace = false;
diff --git a/drivers/gpu/drm/meson/meson_viu.h b/drivers/gpu/drm/meson/meson_viu.h
index a112e8d18850..e4a2f24d7c38 100644
--- a/drivers/gpu/drm/meson/meson_viu.h
+++ b/drivers/gpu/drm/meson/meson_viu.h
@@ -10,6 +10,8 @@
#define __MESON_VIU_H
/* OSDx_BLKx_CFG */
+#define OSD_MALI_SRC_EN BIT(30)
+
#define OSD_CANVAS_SEL 16
#define OSD_ENDIANNESS_LE BIT(15)
@@ -33,21 +35,38 @@
#define OSD_COLOR_MATRIX_16_RGB655 (0x00 << 2)
#define OSD_COLOR_MATRIX_16_RGB565 (0x04 << 2)
+#define OSD_MALI_COLOR_MODE_R8 (0 << 8)
+#define OSD_MALI_COLOR_MODE_YUV422 (1 << 8)
+#define OSD_MALI_COLOR_MODE_RGB565 (2 << 8)
+#define OSD_MALI_COLOR_MODE_RGBA5551 (3 << 8)
+#define OSD_MALI_COLOR_MODE_RGBA4444 (4 << 8)
+#define OSD_MALI_COLOR_MODE_RGBA8888 (5 << 8)
+#define OSD_MALI_COLOR_MODE_RGB888 (7 << 8)
+#define OSD_MALI_COLOR_MODE_YUV422_10B (8 << 8)
+#define OSD_MALI_COLOR_MODE_RGBA1010102 (9 << 8)
+
#define OSD_INTERLACE_ENABLED BIT(1)
#define OSD_INTERLACE_ODD BIT(0)
#define OSD_INTERLACE_EVEN (0)
/* OSDx_CTRL_STAT */
#define OSD_ENABLE BIT(21)
+#define OSD_MEM_LINEAR_ADDR BIT(2)
#define OSD_BLK0_ENABLE BIT(0)
#define OSD_GLOBAL_ALPHA_SHIFT 12
/* OSDx_CTRL_STAT2 */
+#define OSD_DPATH_MALI_AFBCD BIT(15)
#define OSD_REPLACE_EN BIT(14)
#define OSD_REPLACE_SHIFT 6
+#define OSD_PENDING_STAT_CLEAN BIT(1)
void meson_viu_osd1_reset(struct meson_drm *priv);
+void meson_viu_g12a_enable_osd1_afbc(struct meson_drm *priv);
+void meson_viu_g12a_disable_osd1_afbc(struct meson_drm *priv);
+void meson_viu_gxm_enable_osd1_afbc(struct meson_drm *priv);
+void meson_viu_gxm_disable_osd1_afbc(struct meson_drm *priv);
void meson_viu_init(struct meson_drm *priv);
#endif /* __MESON_VIU_H */
diff --git a/drivers/gpu/drm/meson/meson_vpp.c b/drivers/gpu/drm/meson/meson_vpp.c
index bfee30fa6e34..154837688ab0 100644
--- a/drivers/gpu/drm/meson/meson_vpp.c
+++ b/drivers/gpu/drm/meson/meson_vpp.c
@@ -6,12 +6,11 @@
* Copyright (C) 2014 Endless Mobile
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <drm/drmP.h>
+#include <linux/export.h>
+
#include "meson_drv.h"
-#include "meson_vpp.h"
#include "meson_registers.h"
+#include "meson_vpp.h"
/**
* DOC: Video Post Processing
@@ -57,7 +56,7 @@ static void meson_vpp_write_scaling_filter_coefs(struct meson_drm *priv,
{
int i;
- writel_relaxed(is_horizontal ? BIT(8) : 0,
+ writel_relaxed(is_horizontal ? VPP_SCALE_HORIZONTAL_COEF : 0,
priv->io_base + _REG(VPP_OSD_SCALE_COEF_IDX));
for (i = 0; i < 33; i++)
writel_relaxed(coefs[i],
@@ -82,7 +81,7 @@ static void meson_vpp_write_vd_scaling_filter_coefs(struct meson_drm *priv,
{
int i;
- writel_relaxed(is_horizontal ? BIT(8) : 0,
+ writel_relaxed(is_horizontal ? VPP_SCALE_HORIZONTAL_COEF : 0,
priv->io_base + _REG(VPP_SCALE_COEF_IDX));
for (i = 0; i < 33; i++)
writel_relaxed(coefs[i],
@@ -92,27 +91,29 @@ static void meson_vpp_write_vd_scaling_filter_coefs(struct meson_drm *priv,
void meson_vpp_init(struct meson_drm *priv)
{
/* set dummy data default YUV black */
- if (meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL))
writel_relaxed(0x108080, priv->io_base + _REG(VPP_DUMMY_DATA1));
- else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu")) {
+ else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM)) {
writel_bits_relaxed(0xff << 16, 0xff << 16,
priv->io_base + _REG(VIU_MISC_CTRL1));
- writel_relaxed(0x20000, priv->io_base + _REG(VPP_DOLBY_CTRL));
+ writel_relaxed(VPP_PPS_DUMMY_DATA_MODE,
+ priv->io_base + _REG(VPP_DOLBY_CTRL));
writel_relaxed(0x1020080,
priv->io_base + _REG(VPP_DUMMY_DATA1));
- } else if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu"))
+ } else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
writel_relaxed(0xf, priv->io_base + _REG(DOLBY_PATH_CTRL));
/* Initialize vpu fifo control registers */
- if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu"))
- writel_relaxed(0xfff << 20 | 0x1000,
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
+ writel_relaxed(VPP_OFIFO_SIZE_DEFAULT,
priv->io_base + _REG(VPP_OFIFO_SIZE));
else
- writel_relaxed(readl_relaxed(priv->io_base + _REG(VPP_OFIFO_SIZE)) |
- 0x77f, priv->io_base + _REG(VPP_OFIFO_SIZE));
- writel_relaxed(0x08080808, priv->io_base + _REG(VPP_HOLD_LINES));
+ writel_bits_relaxed(VPP_OFIFO_SIZE_MASK, 0x77f,
+ priv->io_base + _REG(VPP_OFIFO_SIZE));
+ writel_relaxed(VPP_POSTBLEND_HOLD_LINES(4) | VPP_PREBLEND_HOLD_LINES(4),
+ priv->io_base + _REG(VPP_HOLD_LINES));
- if (!meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
+ if (!meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
/* Turn off preblend */
writel_bits_relaxed(VPP_PREBLEND_ENABLE, 0,
priv->io_base + _REG(VPP_MISC));
@@ -138,10 +139,15 @@ void meson_vpp_init(struct meson_drm *priv)
writel_relaxed(0, priv->io_base + _REG(VPP_OSD_SC_CTRL0));
writel_relaxed(0, priv->io_base + _REG(VPP_OSD_VSC_CTRL0));
writel_relaxed(0, priv->io_base + _REG(VPP_OSD_HSC_CTRL0));
- writel_relaxed(4 | (4 << 8) | BIT(15),
+
+ /* Set horizontal/vertical bank length and enable video scale out */
+ writel_relaxed(VPP_VSC_BANK_LENGTH(4) | VPP_HSC_BANK_LENGTH(4) |
+ VPP_SC_VD_EN_ENABLE,
priv->io_base + _REG(VPP_SC_MISC));
- writel_relaxed(1, priv->io_base + _REG(VPP_VADJ_CTRL));
+ /* Enable minus black level for vadj1 */
+ writel_relaxed(VPP_MINUS_BLACK_LVL_VADJ1_ENABLE,
+ priv->io_base + _REG(VPP_VADJ_CTRL));
/* Write in the proper filter coefficients. */
meson_vpp_write_scaling_filter_coefs(priv,
diff --git a/drivers/gpu/drm/meson/meson_vpp.h b/drivers/gpu/drm/meson/meson_vpp.h
index 9fc82db8a12d..afc9553ed8d3 100644
--- a/drivers/gpu/drm/meson/meson_vpp.h
+++ b/drivers/gpu/drm/meson/meson_vpp.h
@@ -9,6 +9,9 @@
#ifndef __MESON_VPP_H
#define __MESON_VPP_H
+struct drm_rect;
+struct meson_drm;
+
/* Mux VIU/VPP to ENCI */
#define MESON_VIU_VPP_MUX_ENCI 0x5
/* Mux VIU/VPP to ENCP */
diff --git a/drivers/gpu/drm/mga/mga_dma.c b/drivers/gpu/drm/mga/mga_dma.c
index 1ffdafea27e4..85c74364ce24 100644
--- a/drivers/gpu/drm/mga/mga_dma.c
+++ b/drivers/gpu/drm/mga/mga_dma.c
@@ -35,8 +35,8 @@
* \author Gareth Hughes <gareth@valinux.com>
*/
-#include <drm/drmP.h>
-#include <drm/mga_drm.h>
+#include <linux/delay.h>
+
#include "mga_drv.h"
#define MGA_DEFAULT_USEC_TIMEOUT 10000
@@ -62,7 +62,7 @@ int mga_do_wait_for_idle(drm_mga_private_t *dev_priv)
MGA_WRITE8(MGA_CRTC_INDEX, 0);
return 0;
}
- DRM_UDELAY(1);
+ udelay(1);
}
#if MGA_DMA_DEBUG
@@ -114,7 +114,7 @@ void mga_do_dma_flush(drm_mga_private_t *dev_priv)
status = MGA_READ(MGA_STATUS) & MGA_ENGINE_IDLE_MASK;
if (status == MGA_ENDPRDMASTS)
break;
- DRM_UDELAY(1);
+ udelay(1);
}
if (primary->tail == primary->last_flush) {
@@ -1120,7 +1120,7 @@ int mga_dma_buffers(struct drm_device *dev, void *data,
*/
if (d->send_count != 0) {
DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
- DRM_CURRENTPID, d->send_count);
+ task_pid_nr(current), d->send_count);
return -EINVAL;
}
@@ -1128,7 +1128,8 @@ int mga_dma_buffers(struct drm_device *dev, void *data,
*/
if (d->request_count < 0 || d->request_count > dma->buf_count) {
DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
- DRM_CURRENTPID, d->request_count, dma->buf_count);
+ task_pid_nr(current), d->request_count,
+ dma->buf_count);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c
index 6e1d1054ad06..71128e6f6ae9 100644
--- a/drivers/gpu/drm/mga/mga_drv.c
+++ b/drivers/gpu/drm/mga/mga_drv.c
@@ -31,12 +31,11 @@
#include <linux/module.h>
-#include <drm/drmP.h>
-#include <drm/mga_drm.h>
-#include "mga_drv.h"
-
+#include <drm/drm_drv.h>
#include <drm/drm_pciids.h>
+#include "mga_drv.h"
+
static struct pci_device_id pciidlist[] = {
mga_PCI_IDS
};
diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
index a45bb22275a7..66df51607896 100644
--- a/drivers/gpu/drm/mga/mga_drv.h
+++ b/drivers/gpu/drm/mga/mga_drv.h
@@ -31,7 +31,20 @@
#ifndef __MGA_DRV_H__
#define __MGA_DRV_H__
+#include <linux/irqreturn.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+
+#include <drm/drm_agpsupport.h>
+#include <drm/drm_device.h>
+#include <drm/drm_file.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_irq.h>
#include <drm/drm_legacy.h>
+#include <drm/drm_print.h>
+#include <drm/drm_sarea.h>
+#include <drm/drm_vblank.h>
+#include <drm/mga_drm.h>
/* General customization:
*/
@@ -188,7 +201,7 @@ extern int mga_warp_init(drm_mga_private_t *dev_priv);
extern int mga_enable_vblank(struct drm_device *dev, unsigned int pipe);
extern void mga_disable_vblank(struct drm_device *dev, unsigned int pipe);
extern u32 mga_get_vblank_counter(struct drm_device *dev, unsigned int pipe);
-extern int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence);
+extern void mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence);
extern int mga_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence);
extern irqreturn_t mga_driver_irq_handler(int irq, void *arg);
extern void mga_driver_irq_preinstall(struct drm_device *dev);
@@ -199,10 +212,14 @@ extern long mga_compat_ioctl(struct file *filp, unsigned int cmd,
#define mga_flush_write_combine() wmb()
-#define MGA_READ8(reg) DRM_READ8(dev_priv->mmio, (reg))
-#define MGA_READ(reg) DRM_READ32(dev_priv->mmio, (reg))
-#define MGA_WRITE8(reg, val) DRM_WRITE8(dev_priv->mmio, (reg), (val))
-#define MGA_WRITE(reg, val) DRM_WRITE32(dev_priv->mmio, (reg), (val))
+#define MGA_READ8(reg) \
+ readb(((void __iomem *)dev_priv->mmio->handle) + (reg))
+#define MGA_READ(reg) \
+ readl(((void __iomem *)dev_priv->mmio->handle) + (reg))
+#define MGA_WRITE8(reg, val) \
+ writeb(val, ((void __iomem *)dev_priv->mmio->handle) + (reg))
+#define MGA_WRITE(reg, val) \
+ writel(val, ((void __iomem *)dev_priv->mmio->handle) + (reg))
#define DWGREG0 0x1c00
#define DWGREG0_END 0x1dff
diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
index 245fb2e359cf..6ccd270789c6 100644
--- a/drivers/gpu/drm/mga/mga_ioc32.c
+++ b/drivers/gpu/drm/mga/mga_ioc32.c
@@ -30,10 +30,9 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
+
#include <linux/compat.h>
-#include <drm/drmP.h>
-#include <drm/mga_drm.h>
#include "mga_drv.h"
typedef struct drm32_mga_init {
diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
index 693ba708cfed..a7e6ffc80a78 100644
--- a/drivers/gpu/drm/mga/mga_irq.c
+++ b/drivers/gpu/drm/mga/mga_irq.c
@@ -31,8 +31,6 @@
* Eric Anholt <anholt@FreeBSD.org>
*/
-#include <drm/drmP.h>
-#include <drm/mga_drm.h>
#include "mga_drv.h"
u32 mga_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
@@ -118,23 +116,21 @@ void mga_disable_vblank(struct drm_device *dev, unsigned int pipe)
/* MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN); */
}
-int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
+void mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
{
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
unsigned int cur_fence;
- int ret = 0;
/* Assume that the user has missed the current sequence number
* by about a day rather than she wants to wait for years
* using fences.
*/
- DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * HZ,
+ wait_event_timeout(dev_priv->fence_queue,
(((cur_fence = atomic_read(&dev_priv->last_fence_retired))
- - *sequence) <= (1 << 23)));
+ - *sequence) <= (1 << 23)),
+ msecs_to_jiffies(3000));
*sequence = cur_fence;
-
- return ret;
}
void mga_driver_irq_preinstall(struct drm_device *dev)
diff --git a/drivers/gpu/drm/mga/mga_state.c b/drivers/gpu/drm/mga/mga_state.c
index e5f6b735f575..77a0b006f066 100644
--- a/drivers/gpu/drm/mga/mga_state.c
+++ b/drivers/gpu/drm/mga/mga_state.c
@@ -32,8 +32,6 @@
* Gareth Hughes <gareth@valinux.com>
*/
-#include <drm/drmP.h>
-#include <drm/mga_drm.h>
#include "mga_drv.h"
/* ================================================================
@@ -1016,7 +1014,7 @@ int mga_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
return -EINVAL;
}
- DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
+ DRM_DEBUG("pid=%d\n", task_pid_nr(current));
switch (param->param) {
case MGA_PARAM_IRQ_NR:
@@ -1048,7 +1046,7 @@ static int mga_set_fence(struct drm_device *dev, void *data, struct drm_file *fi
return -EINVAL;
}
- DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
+ DRM_DEBUG("pid=%d\n", task_pid_nr(current));
/* I would normal do this assignment in the declaration of fence,
* but dev_priv may be NULL.
@@ -1077,7 +1075,7 @@ file_priv)
return -EINVAL;
}
- DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
+ DRM_DEBUG("pid=%d\n", task_pid_nr(current));
mga_driver_fence_wait(dev, fence);
return 0;
diff --git a/drivers/gpu/drm/mga/mga_warp.c b/drivers/gpu/drm/mga/mga_warp.c
index 0b76352260a9..b5ef1d2c8b1c 100644
--- a/drivers/gpu/drm/mga/mga_warp.c
+++ b/drivers/gpu/drm/mga/mga_warp.c
@@ -29,11 +29,9 @@
#include <linux/firmware.h>
#include <linux/ihex.h>
-#include <linux/platform_device.h>
#include <linux/module.h>
+#include <linux/platform_device.h>
-#include <drm/drmP.h>
-#include <drm/mga_drm.h>
#include "mga_drv.h"
#define FIRMWARE_G200 "matrox/g200_warp.fw"
diff --git a/drivers/gpu/drm/mgag200/Kconfig b/drivers/gpu/drm/mgag200/Kconfig
index 76fee0fbdcae..d60aa4b9ccd4 100644
--- a/drivers/gpu/drm/mgag200/Kconfig
+++ b/drivers/gpu/drm/mgag200/Kconfig
@@ -4,10 +4,12 @@ config DRM_MGAG200
depends on DRM && PCI && MMU
select DRM_KMS_HELPER
select DRM_VRAM_HELPER
+ select DRM_TTM
+ select DRM_TTM_HELPER
help
This is a KMS driver for the MGA G200 server chips, it
- does not support the original MGA G200 or any of the desktop
- chips. It requires 0.3.0 of the modesetting userspace driver,
- and a version of mga driver that will fail on KMS enabled
- devices.
+ does not support the original MGA G200 or any of the desktop
+ chips. It requires 0.3.0 of the modesetting userspace driver,
+ and a version of mga driver that will fail on KMS enabled
+ devices.
diff --git a/drivers/gpu/drm/mgag200/Makefile b/drivers/gpu/drm/mgag200/Makefile
index 98d204408bd0..04b281bcf655 100644
--- a/drivers/gpu/drm/mgag200/Makefile
+++ b/drivers/gpu/drm/mgag200/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
mgag200-y := mgag200_main.o mgag200_mode.o mgag200_cursor.o \
- mgag200_drv.o mgag200_fb.o mgag200_i2c.o mgag200_ttm.o
+ mgag200_drv.o mgag200_i2c.o mgag200_ttm.o
obj-$(CONFIG_DRM_MGAG200) += mgag200.o
diff --git a/drivers/gpu/drm/mgag200/mgag200_cursor.c b/drivers/gpu/drm/mgag200/mgag200_cursor.c
index f0c61a92351c..d491edd317ff 100644
--- a/drivers/gpu/drm/mgag200/mgag200_cursor.c
+++ b/drivers/gpu/drm/mgag200/mgag200_cursor.c
@@ -5,41 +5,17 @@
* Author: Christopher Harvey <charvey@matrox.com>
*/
-#include <drm/drmP.h>
+#include <linux/pci.h>
+
#include "mgag200_drv.h"
static bool warn_transparent = true;
static bool warn_palette = true;
-/*
- Hide the cursor off screen. We can't disable the cursor hardware because it
- takes too long to re-activate and causes momentary corruption
-*/
-static void mga_hide_cursor(struct mga_device *mdev)
-{
- WREG8(MGA_CURPOSXL, 0);
- WREG8(MGA_CURPOSXH, 0);
- if (mdev->cursor.pixels_current)
- drm_gem_vram_unpin(mdev->cursor.pixels_current);
- mdev->cursor.pixels_current = NULL;
-}
-
-int mga_crtc_cursor_set(struct drm_crtc *crtc,
- struct drm_file *file_priv,
- uint32_t handle,
- uint32_t width,
- uint32_t height)
+static int mgag200_cursor_update(struct mga_device *mdev, void *dst, void *src,
+ unsigned int width, unsigned int height)
{
- struct drm_device *dev = crtc->dev;
- struct mga_device *mdev = (struct mga_device *)dev->dev_private;
- struct drm_gem_vram_object *pixels_1 = mdev->cursor.pixels_1;
- struct drm_gem_vram_object *pixels_2 = mdev->cursor.pixels_2;
- struct drm_gem_vram_object *pixels_current = mdev->cursor.pixels_current;
- struct drm_gem_vram_object *pixels_next;
- struct drm_gem_object *obj;
- struct drm_gem_vram_object *gbo = NULL;
- int ret = 0;
- u8 *src, *dst;
+ struct drm_device *dev = mdev->dev;
unsigned int i, row, col;
uint32_t colour_set[16];
uint32_t *next_space = &colour_set[0];
@@ -47,78 +23,9 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc,
uint32_t this_colour;
bool found = false;
int colour_count = 0;
- s64 gpu_addr;
- u64 dst_gpu;
u8 reg_index;
u8 this_row[48];
- if (!pixels_1 || !pixels_2) {
- WREG8(MGA_CURPOSXL, 0);
- WREG8(MGA_CURPOSXH, 0);
- return -ENOTSUPP; /* Didn't allocate space for cursors */
- }
-
- if (WARN_ON(pixels_current &&
- pixels_1 != pixels_current &&
- pixels_2 != pixels_current)) {
- return -ENOTSUPP; /* inconsistent state */
- }
-
- if (!handle || !file_priv) {
- mga_hide_cursor(mdev);
- return 0;
- }
-
- if (width != 64 || height != 64) {
- WREG8(MGA_CURPOSXL, 0);
- WREG8(MGA_CURPOSXH, 0);
- return -EINVAL;
- }
-
- if (pixels_current == pixels_1)
- pixels_next = pixels_2;
- else
- pixels_next = pixels_1;
-
- obj = drm_gem_object_lookup(file_priv, handle);
- if (!obj)
- return -ENOENT;
- gbo = drm_gem_vram_of_gem(obj);
- ret = drm_gem_vram_pin(gbo, 0);
- if (ret) {
- dev_err(&dev->pdev->dev, "failed to lock user bo\n");
- goto err_drm_gem_object_put_unlocked;
- }
- src = drm_gem_vram_kmap(gbo, true, NULL);
- if (IS_ERR(src)) {
- ret = PTR_ERR(src);
- dev_err(&dev->pdev->dev,
- "failed to kmap user buffer updates\n");
- goto err_drm_gem_vram_unpin_src;
- }
-
- /* Pin and map up-coming buffer to write colour indices */
- ret = drm_gem_vram_pin(pixels_next, 0);
- if (ret)
- dev_err(&dev->pdev->dev,
- "failed to pin cursor buffer: %d\n", ret);
- goto err_drm_gem_vram_kunmap_src;
- dst = drm_gem_vram_kmap(pixels_next, true, NULL);
- if (IS_ERR(dst)) {
- ret = PTR_ERR(dst);
- dev_err(&dev->pdev->dev,
- "failed to kmap cursor updates: %d\n", ret);
- goto err_drm_gem_vram_unpin_dst;
- }
- gpu_addr = drm_gem_vram_offset(pixels_2);
- if (gpu_addr < 0) {
- ret = (int)gpu_addr;
- dev_err(&dev->pdev->dev,
- "failed to get cursor scanout address: %d\n", ret);
- goto err_drm_gem_vram_kunmap_dst;
- }
- dst_gpu = (u64)gpu_addr;
-
memset(&colour_set[0], 0, sizeof(uint32_t)*16);
/* width*height*4 = 16384 */
for (i = 0; i < 16384; i += 4) {
@@ -131,8 +38,7 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc,
dev_info(&dev->pdev->dev, "Not enabling hardware cursor.\n");
warn_transparent = false; /* Only tell the user once. */
}
- ret = -EINVAL;
- goto err_drm_gem_vram_kunmap_dst;
+ return -EINVAL;
}
/* Don't need to store transparent pixels as colours */
if (this_colour>>24 == 0x0)
@@ -153,8 +59,7 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc,
dev_info(&dev->pdev->dev, "Not enabling hardware cursor.\n");
warn_palette = false; /* Only tell the user once. */
}
- ret = -EINVAL;
- goto err_drm_gem_vram_kunmap_dst;
+ return -EINVAL;
}
*next_space = this_colour;
next_space++;
@@ -198,55 +103,217 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc,
memcpy_toio(dst + row*48, &this_row[0], 48);
}
+ return 0;
+}
+
+static void mgag200_cursor_set_base(struct mga_device *mdev, u64 address)
+{
+ u8 addrl = (address >> 10) & 0xff;
+ u8 addrh = (address >> 18) & 0x3f;
+
/* Program gpu address of cursor buffer */
- WREG_DAC(MGA1064_CURSOR_BASE_ADR_LOW, (u8)((dst_gpu>>10) & 0xff));
- WREG_DAC(MGA1064_CURSOR_BASE_ADR_HI, (u8)((dst_gpu>>18) & 0x3f));
+ WREG_DAC(MGA1064_CURSOR_BASE_ADR_LOW, addrl);
+ WREG_DAC(MGA1064_CURSOR_BASE_ADR_HI, addrh);
+}
+
+static int mgag200_show_cursor(struct mga_device *mdev, void *src,
+ unsigned int width, unsigned int height)
+{
+ struct drm_device *dev = mdev->dev;
+ struct drm_gem_vram_object *gbo;
+ void *dst;
+ s64 off;
+ int ret;
+
+ gbo = mdev->cursor.gbo[mdev->cursor.next_index];
+ if (!gbo) {
+ WREG8(MGA_CURPOSXL, 0);
+ WREG8(MGA_CURPOSXH, 0);
+ return -ENOTSUPP; /* Didn't allocate space for cursors */
+ }
+ dst = drm_gem_vram_vmap(gbo);
+ if (IS_ERR(dst)) {
+ ret = PTR_ERR(dst);
+ dev_err(&dev->pdev->dev,
+ "failed to map cursor updates: %d\n", ret);
+ return ret;
+ }
+ off = drm_gem_vram_offset(gbo);
+ if (off < 0) {
+ ret = (int)off;
+ dev_err(&dev->pdev->dev,
+ "failed to get cursor scanout address: %d\n", ret);
+ goto err_drm_gem_vram_vunmap;
+ }
+
+ ret = mgag200_cursor_update(mdev, dst, src, width, height);
+ if (ret)
+ goto err_drm_gem_vram_vunmap;
+ mgag200_cursor_set_base(mdev, off);
/* Adjust cursor control register to turn on the cursor */
WREG_DAC(MGA1064_CURSOR_CTL, 4); /* 16-colour palletized cursor mode */
+ drm_gem_vram_vunmap(gbo, dst);
+
+ ++mdev->cursor.next_index;
+ mdev->cursor.next_index %= ARRAY_SIZE(mdev->cursor.gbo);
+
+ return 0;
+
+err_drm_gem_vram_vunmap:
+ drm_gem_vram_vunmap(gbo, dst);
+ return ret;
+}
+
+/*
+ * Hide the cursor off screen. We can't disable the cursor hardware because
+ * it takes too long to re-activate and causes momentary corruption.
+ */
+static void mgag200_hide_cursor(struct mga_device *mdev)
+{
+ WREG8(MGA_CURPOSXL, 0);
+ WREG8(MGA_CURPOSXH, 0);
+}
+
+static void mgag200_move_cursor(struct mga_device *mdev, int x, int y)
+{
+ if (WARN_ON(x <= 0))
+ return;
+ if (WARN_ON(y <= 0))
+ return;
+ if (WARN_ON(x & ~0xffff))
+ return;
+ if (WARN_ON(y & ~0xffff))
+ return;
+
+ WREG8(MGA_CURPOSXL, x & 0xff);
+ WREG8(MGA_CURPOSXH, (x>>8) & 0xff);
+
+ WREG8(MGA_CURPOSYL, y & 0xff);
+ WREG8(MGA_CURPOSYH, (y>>8) & 0xff);
+}
+
+int mgag200_cursor_init(struct mga_device *mdev)
+{
+ struct drm_device *dev = mdev->dev;
+ size_t ncursors = ARRAY_SIZE(mdev->cursor.gbo);
+ size_t size;
+ int ret;
+ size_t i;
+ struct drm_gem_vram_object *gbo;
+
+ size = roundup(64 * 48, PAGE_SIZE);
+ if (size * ncursors > mdev->vram_fb_available)
+ return -ENOMEM;
+
+ for (i = 0; i < ncursors; ++i) {
+ gbo = drm_gem_vram_create(dev, size, 0);
+ if (IS_ERR(gbo)) {
+ ret = PTR_ERR(gbo);
+ goto err_drm_gem_vram_put;
+ }
+ ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM |
+ DRM_GEM_VRAM_PL_FLAG_TOPDOWN);
+ if (ret) {
+ drm_gem_vram_put(gbo);
+ goto err_drm_gem_vram_put;
+ }
+
+ mdev->cursor.gbo[i] = gbo;
+ }
+
+ /*
+ * At the high end of video memory, we reserve space for
+ * buffer objects. The cursor plane uses this memory to store
+ * a double-buffered image of the current cursor. Hence, it's
+ * not available for framebuffers.
+ */
+ mdev->vram_fb_available -= ncursors * size;
+
+ return 0;
+
+err_drm_gem_vram_put:
+ while (i) {
+ --i;
+ gbo = mdev->cursor.gbo[i];
+ drm_gem_vram_unpin(gbo);
+ drm_gem_vram_put(gbo);
+ mdev->cursor.gbo[i] = NULL;
+ }
+ return ret;
+}
+
+void mgag200_cursor_fini(struct mga_device *mdev)
+{
+ size_t i;
+ struct drm_gem_vram_object *gbo;
+
+ for (i = 0; i < ARRAY_SIZE(mdev->cursor.gbo); ++i) {
+ gbo = mdev->cursor.gbo[i];
+ drm_gem_vram_unpin(gbo);
+ drm_gem_vram_put(gbo);
+ }
+}
+
+int mgag200_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
+ uint32_t handle, uint32_t width, uint32_t height)
+{
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = (struct mga_device *)dev->dev_private;
+ struct drm_gem_object *obj;
+ struct drm_gem_vram_object *gbo = NULL;
+ int ret;
+ u8 *src;
+
+ if (!handle || !file_priv) {
+ mgag200_hide_cursor(mdev);
+ return 0;
+ }
+
+ if (width != 64 || height != 64) {
+ WREG8(MGA_CURPOSXL, 0);
+ WREG8(MGA_CURPOSXH, 0);
+ return -EINVAL;
+ }
+
+ obj = drm_gem_object_lookup(file_priv, handle);
+ if (!obj)
+ return -ENOENT;
+ gbo = drm_gem_vram_of_gem(obj);
+ src = drm_gem_vram_vmap(gbo);
+ if (IS_ERR(src)) {
+ ret = PTR_ERR(src);
+ dev_err(&dev->pdev->dev,
+ "failed to map user buffer updates\n");
+ goto err_drm_gem_object_put_unlocked;
+ }
+
+ ret = mgag200_show_cursor(mdev, src, width, height);
+ if (ret)
+ goto err_drm_gem_vram_vunmap;
+
/* Now update internal buffer pointers */
- if (pixels_current)
- drm_gem_vram_unpin(pixels_current);
- mdev->cursor.pixels_current = pixels_next;
-
- drm_gem_vram_kunmap(pixels_next);
- drm_gem_vram_unpin(pixels_next);
- drm_gem_vram_kunmap(gbo);
- drm_gem_vram_unpin(gbo);
+ drm_gem_vram_vunmap(gbo, src);
drm_gem_object_put_unlocked(obj);
return 0;
-
-err_drm_gem_vram_kunmap_dst:
- drm_gem_vram_kunmap(pixels_next);
-err_drm_gem_vram_unpin_dst:
- drm_gem_vram_unpin(pixels_next);
-err_drm_gem_vram_kunmap_src:
- drm_gem_vram_kunmap(gbo);
-err_drm_gem_vram_unpin_src:
- drm_gem_vram_unpin(gbo);
+err_drm_gem_vram_vunmap:
+ drm_gem_vram_vunmap(gbo, src);
err_drm_gem_object_put_unlocked:
drm_gem_object_put_unlocked(obj);
return ret;
}
-int mga_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+int mgag200_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
{
struct mga_device *mdev = (struct mga_device *)crtc->dev->dev_private;
+
/* Our origin is at (64,64) */
x += 64;
y += 64;
- BUG_ON(x <= 0);
- BUG_ON(y <= 0);
- BUG_ON(x & ~0xffff);
- BUG_ON(y & ~0xffff);
+ mgag200_move_cursor(mdev, x, y);
- WREG8(MGA_CURPOSXL, x & 0xff);
- WREG8(MGA_CURPOSXH, (x>>8) & 0xff);
-
- WREG8(MGA_CURPOSYL, y & 0xff);
- WREG8(MGA_CURPOSYH, (y>>8) & 0xff);
return 0;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index aafa1cb31f50..7a5bad2f57d7 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -5,14 +5,18 @@
* Authors: Matthew Garrett
* Dave Airlie
*/
-#include <linux/module.h>
-#include <linux/console.h>
-#include <drm/drmP.h>
-#include "mgag200_drv.h"
+#include <linux/console.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_ioctl.h>
#include <drm/drm_pciids.h>
+#include "mgag200_drv.h"
+
/*
* This is the generic driver code. This binds the driver to the drm core,
* which then performs further device association and calls our graphics init
@@ -23,10 +27,15 @@ int mgag200_modeset = -1;
MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
module_param_named(modeset, mgag200_modeset, int, 0400);
+int mgag200_hw_bug_no_startadd = -1;
+MODULE_PARM_DESC(modeset, "HW does not interpret scanout-buffer start address correctly");
+module_param_named(hw_bug_no_startadd, mgag200_hw_bug_no_startadd, int, 0400);
+
static struct drm_driver driver;
static const struct pci_device_id pciidlist[] = {
- { PCI_VENDOR_ID_MATROX, 0x522, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_SE_A },
+ { PCI_VENDOR_ID_MATROX, 0x522, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ G200_SE_A | MGAG200_FLAG_HW_BUG_NO_STARTADD},
{ PCI_VENDOR_ID_MATROX, 0x524, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_SE_B },
{ PCI_VENDOR_ID_MATROX, 0x530, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EV },
{ PCI_VENDOR_ID_MATROX, 0x532, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_WB },
@@ -42,27 +51,94 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
static int mga_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "mgag200drmfb");
+ struct drm_device *dev;
+ int ret;
+
+ drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "mgag200drmfb");
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ dev = drm_dev_alloc(&driver, &pdev->dev);
+ if (IS_ERR(dev)) {
+ ret = PTR_ERR(dev);
+ goto err_pci_disable_device;
+ }
+
+ dev->pdev = pdev;
+ pci_set_drvdata(pdev, dev);
+
+ ret = mgag200_driver_load(dev, ent->driver_data);
+ if (ret)
+ goto err_drm_dev_put;
+
+ ret = drm_dev_register(dev, ent->driver_data);
+ if (ret)
+ goto err_mgag200_driver_unload;
- return drm_get_pci_dev(pdev, ent, &driver);
+ return 0;
+
+err_mgag200_driver_unload:
+ mgag200_driver_unload(dev);
+err_drm_dev_put:
+ drm_dev_put(dev);
+err_pci_disable_device:
+ pci_disable_device(pdev);
+ return ret;
}
static void mga_pci_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
- drm_put_dev(dev);
+ drm_dev_unregister(dev);
+ mgag200_driver_unload(dev);
+ drm_dev_put(dev);
}
-static const struct file_operations mgag200_driver_fops = {
- .owner = THIS_MODULE,
- DRM_VRAM_MM_FILE_OPERATIONS
-};
+DEFINE_DRM_GEM_FOPS(mgag200_driver_fops);
+
+static bool mgag200_pin_bo_at_0(const struct mga_device *mdev)
+{
+ if (mgag200_hw_bug_no_startadd > 0) {
+ DRM_WARN_ONCE("Option hw_bug_no_startradd is enabled. Please "
+ "report the output of 'lspci -vvnn' to "
+ "<dri-devel@lists.freedesktop.org> if this "
+ "option is required to make mgag200 work "
+ "correctly on your system.\n");
+ return true;
+ } else if (!mgag200_hw_bug_no_startadd) {
+ return false;
+ }
+ return mdev->flags & MGAG200_FLAG_HW_BUG_NO_STARTADD;
+}
+
+int mgag200_driver_dumb_create(struct drm_file *file,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ struct mga_device *mdev = dev->dev_private;
+ unsigned long pg_align;
+
+ if (WARN_ONCE(!dev->vram_mm, "VRAM MM not initialized"))
+ return -EINVAL;
+
+ pg_align = 0ul;
+
+ /*
+ * Aligning scanout buffers to the size of the video ram forces
+ * placement at offset 0. Works around a bug where HW does not
+ * respect 'startadd' field.
+ */
+ if (mgag200_pin_bo_at_0(mdev))
+ pg_align = PFN_UP(mdev->mc.vram_size);
+
+ return drm_gem_vram_fill_create_dumb(file, dev, pg_align, 0, args);
+}
static struct drm_driver driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET,
- .load = mgag200_driver_load,
- .unload = mgag200_driver_unload,
.fops = &mgag200_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
@@ -70,7 +146,10 @@ static struct drm_driver driver = {
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
- DRM_GEM_VRAM_DRIVER
+ .debugfs_init = drm_vram_mm_debugfs_init,
+ .dumb_create = mgag200_driver_dumb_create,
+ .dumb_map_offset = drm_gem_vram_driver_dumb_mmap_offset,
+ .gem_prime_mmap = drm_gem_prime_mmap,
};
static struct pci_driver mgag200_pci_driver = {
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index c47671ce6c48..aa32aad222c2 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -10,19 +10,16 @@
#ifndef __MGAG200_DRV_H__
#define __MGAG200_DRV_H__
+#include <linux/i2c-algo-bit.h>
+#include <linux/i2c.h>
+
#include <video/vga.h>
#include <drm/drm_encoder.h>
#include <drm/drm_fb_helper.h>
-
#include <drm/drm_gem.h>
#include <drm/drm_gem_vram_helper.h>
-#include <drm/drm_vram_mm_helper.h>
-
-#include <linux/i2c.h>
-#include <linux/i2c-algo-bit.h>
-
#include "mgag200_reg.h"
#define DRIVER_AUTHOR "Matthew Garrett"
@@ -100,21 +97,6 @@
#define to_mga_crtc(x) container_of(x, struct mga_crtc, base)
#define to_mga_encoder(x) container_of(x, struct mga_encoder, base)
#define to_mga_connector(x) container_of(x, struct mga_connector, base)
-#define to_mga_framebuffer(x) container_of(x, struct mga_framebuffer, base)
-
-struct mga_framebuffer {
- struct drm_framebuffer base;
- struct drm_gem_object *obj;
-};
-
-struct mga_fbdev {
- struct drm_fb_helper helper; /* must be first */
- struct mga_framebuffer mfb;
- void *sysram;
- int size;
- int x1, y1, x2, y2; /* dirty rect */
- spinlock_t dirty_lock;
-};
struct mga_crtc {
struct drm_crtc base;
@@ -147,16 +129,8 @@ struct mga_connector {
};
struct mga_cursor {
- /*
- We have to have 2 buffers for the cursor to avoid occasional
- corruption while switching cursor icons.
- If either of these is NULL, then don't do hardware cursors, and
- fall back to software.
- */
- struct drm_gem_vram_object *pixels_1;
- struct drm_gem_vram_object *pixels_2;
- /* The currently displayed icon, this points to one of pixels_1, or pixels_2 */
- struct drm_gem_vram_object *pixels_current;
+ struct drm_gem_vram_object *gbo[2];
+ unsigned int next_index;
};
struct mga_mc {
@@ -176,6 +150,12 @@ enum mga_type {
G200_EW3,
};
+/* HW does not handle 'startadd' field correct. */
+#define MGAG200_FLAG_HW_BUG_NO_STARTADD (1ul << 8)
+
+#define MGAG200_TYPE_MASK (0x000000ff)
+#define MGAG200_FLAG_MASK (0x00ffff00)
+
#define IS_G200_SE(mdev) (mdev->type == G200_SE_A || mdev->type == G200_SE_B)
struct mga_device {
@@ -189,9 +169,10 @@ struct mga_device {
struct mga_mc mc;
struct mga_mode_info mode_info;
- struct mga_fbdev *mfbdev;
struct mga_cursor cursor;
+ size_t vram_fb_available;
+
bool suspended;
int num_crtc;
enum mga_type type;
@@ -206,29 +187,25 @@ struct mga_device {
u32 unique_rev_id;
};
+static inline enum mga_type
+mgag200_type_from_driver_data(kernel_ulong_t driver_data)
+{
+ return (enum mga_type)(driver_data & MGAG200_TYPE_MASK);
+}
+
+static inline unsigned long
+mgag200_flags_from_driver_data(kernel_ulong_t driver_data)
+{
+ return driver_data & MGAG200_FLAG_MASK;
+}
+
/* mgag200_mode.c */
int mgag200_modeset_init(struct mga_device *mdev);
void mgag200_modeset_fini(struct mga_device *mdev);
- /* mgag200_fb.c */
-int mgag200_fbdev_init(struct mga_device *mdev);
-void mgag200_fbdev_fini(struct mga_device *mdev);
-
/* mgag200_main.c */
-int mgag200_framebuffer_init(struct drm_device *dev,
- struct mga_framebuffer *mfb,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object *obj);
-
-
int mgag200_driver_load(struct drm_device *dev, unsigned long flags);
void mgag200_driver_unload(struct drm_device *dev);
-int mgag200_gem_create(struct drm_device *dev,
- u32 size, bool iskernel,
- struct drm_gem_object **obj);
-int mgag200_dumb_create(struct drm_file *file,
- struct drm_device *dev,
- struct drm_mode_create_dumb *args);
/* mgag200_i2c.c */
struct mga_i2c_chan *mgag200_i2c_create(struct drm_device *dev);
@@ -238,8 +215,10 @@ int mgag200_mm_init(struct mga_device *mdev);
void mgag200_mm_fini(struct mga_device *mdev);
int mgag200_mmap(struct file *filp, struct vm_area_struct *vma);
-int mga_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
- uint32_t handle, uint32_t width, uint32_t height);
-int mga_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
+int mgag200_cursor_init(struct mga_device *mdev);
+void mgag200_cursor_fini(struct mga_device *mdev);
+int mgag200_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
+ uint32_t handle, uint32_t width, uint32_t height);
+int mgag200_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
#endif /* __MGAG200_DRV_H__ */
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
deleted file mode 100644
index 8adb33228732..000000000000
--- a/drivers/gpu/drm/mgag200/mgag200_fb.c
+++ /dev/null
@@ -1,315 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright 2010 Matt Turner.
- * Copyright 2012 Red Hat
- *
- * Authors: Matthew Garrett
- * Matt Turner
- * Dave Airlie
- */
-#include <linux/module.h>
-#include <drm/drmP.h>
-#include <drm/drm_util.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_crtc_helper.h>
-
-#include "mgag200_drv.h"
-
-static void mga_dirty_update(struct mga_fbdev *mfbdev,
- int x, int y, int width, int height)
-{
- int i;
- struct drm_gem_object *obj;
- struct drm_gem_vram_object *gbo;
- int src_offset, dst_offset;
- int bpp = mfbdev->mfb.base.format->cpp[0];
- int ret;
- u8 *dst;
- bool unmap = false;
- bool store_for_later = false;
- int x2, y2;
- unsigned long flags;
-
- obj = mfbdev->mfb.obj;
- gbo = drm_gem_vram_of_gem(obj);
-
- if (drm_can_sleep()) {
- /* We pin the BO so it won't be moved during the
- * update. The actual location, video RAM or system
- * memory, is not important.
- */
- ret = drm_gem_vram_pin(gbo, 0);
- if (ret) {
- if (ret != -EBUSY)
- return;
- store_for_later = true;
- }
- } else {
- store_for_later = true;
- }
-
- x2 = x + width - 1;
- y2 = y + height - 1;
- spin_lock_irqsave(&mfbdev->dirty_lock, flags);
-
- if (mfbdev->y1 < y)
- y = mfbdev->y1;
- if (mfbdev->y2 > y2)
- y2 = mfbdev->y2;
- if (mfbdev->x1 < x)
- x = mfbdev->x1;
- if (mfbdev->x2 > x2)
- x2 = mfbdev->x2;
-
- if (store_for_later) {
- mfbdev->x1 = x;
- mfbdev->x2 = x2;
- mfbdev->y1 = y;
- mfbdev->y2 = y2;
- spin_unlock_irqrestore(&mfbdev->dirty_lock, flags);
- return;
- }
-
- mfbdev->x1 = mfbdev->y1 = INT_MAX;
- mfbdev->x2 = mfbdev->y2 = 0;
- spin_unlock_irqrestore(&mfbdev->dirty_lock, flags);
-
- dst = drm_gem_vram_kmap(gbo, false, NULL);
- if (IS_ERR(dst)) {
- DRM_ERROR("failed to kmap fb updates\n");
- goto out;
- } else if (!dst) {
- dst = drm_gem_vram_kmap(gbo, true, NULL);
- if (IS_ERR(dst)) {
- DRM_ERROR("failed to kmap fb updates\n");
- goto out;
- }
- unmap = true;
- }
-
- for (i = y; i <= y2; i++) {
- /* assume equal stride for now */
- src_offset = dst_offset =
- i * mfbdev->mfb.base.pitches[0] + (x * bpp);
- memcpy_toio(dst + dst_offset, mfbdev->sysram + src_offset,
- (x2 - x + 1) * bpp);
- }
-
- if (unmap)
- drm_gem_vram_kunmap(gbo);
-
-out:
- drm_gem_vram_unpin(gbo);
-}
-
-static void mga_fillrect(struct fb_info *info,
- const struct fb_fillrect *rect)
-{
- struct mga_fbdev *mfbdev = info->par;
- drm_fb_helper_sys_fillrect(info, rect);
- mga_dirty_update(mfbdev, rect->dx, rect->dy, rect->width,
- rect->height);
-}
-
-static void mga_copyarea(struct fb_info *info,
- const struct fb_copyarea *area)
-{
- struct mga_fbdev *mfbdev = info->par;
- drm_fb_helper_sys_copyarea(info, area);
- mga_dirty_update(mfbdev, area->dx, area->dy, area->width,
- area->height);
-}
-
-static void mga_imageblit(struct fb_info *info,
- const struct fb_image *image)
-{
- struct mga_fbdev *mfbdev = info->par;
- drm_fb_helper_sys_imageblit(info, image);
- mga_dirty_update(mfbdev, image->dx, image->dy, image->width,
- image->height);
-}
-
-
-static struct fb_ops mgag200fb_ops = {
- .owner = THIS_MODULE,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
- .fb_fillrect = mga_fillrect,
- .fb_copyarea = mga_copyarea,
- .fb_imageblit = mga_imageblit,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_blank = drm_fb_helper_blank,
- .fb_setcmap = drm_fb_helper_setcmap,
-};
-
-static int mgag200fb_create_object(struct mga_fbdev *afbdev,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object **gobj_p)
-{
- struct drm_device *dev = afbdev->helper.dev;
- u32 size;
- struct drm_gem_object *gobj;
- int ret = 0;
-
- size = mode_cmd->pitches[0] * mode_cmd->height;
- ret = mgag200_gem_create(dev, size, true, &gobj);
- if (ret)
- return ret;
-
- *gobj_p = gobj;
- return ret;
-}
-
-static int mgag200fb_create(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- struct mga_fbdev *mfbdev =
- container_of(helper, struct mga_fbdev, helper);
- struct drm_device *dev = mfbdev->helper.dev;
- struct drm_mode_fb_cmd2 mode_cmd;
- struct mga_device *mdev = dev->dev_private;
- struct fb_info *info;
- struct drm_framebuffer *fb;
- struct drm_gem_object *gobj = NULL;
- int ret;
- void *sysram;
- int size;
-
- mode_cmd.width = sizes->surface_width;
- mode_cmd.height = sizes->surface_height;
- mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
-
- mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
- sizes->surface_depth);
- size = mode_cmd.pitches[0] * mode_cmd.height;
-
- ret = mgag200fb_create_object(mfbdev, &mode_cmd, &gobj);
- if (ret) {
- DRM_ERROR("failed to create fbcon backing object %d\n", ret);
- return ret;
- }
-
- sysram = vmalloc(size);
- if (!sysram) {
- ret = -ENOMEM;
- goto err_sysram;
- }
-
- info = drm_fb_helper_alloc_fbi(helper);
- if (IS_ERR(info)) {
- ret = PTR_ERR(info);
- goto err_alloc_fbi;
- }
-
- ret = mgag200_framebuffer_init(dev, &mfbdev->mfb, &mode_cmd, gobj);
- if (ret)
- goto err_alloc_fbi;
-
- mfbdev->sysram = sysram;
- mfbdev->size = size;
-
- fb = &mfbdev->mfb.base;
-
- /* setup helper */
- mfbdev->helper.fb = fb;
-
- info->fbops = &mgag200fb_ops;
-
- /* setup aperture base/size for vesafb takeover */
- info->apertures->ranges[0].base = mdev->dev->mode_config.fb_base;
- info->apertures->ranges[0].size = mdev->mc.vram_size;
-
- drm_fb_helper_fill_info(info, &mfbdev->helper, sizes);
-
- info->screen_base = sysram;
- info->screen_size = size;
- info->pixmap.flags = FB_PIXMAP_SYSTEM;
-
- DRM_DEBUG_KMS("allocated %dx%d\n",
- fb->width, fb->height);
-
- return 0;
-
-err_alloc_fbi:
- vfree(sysram);
-err_sysram:
- drm_gem_object_put_unlocked(gobj);
-
- return ret;
-}
-
-static int mga_fbdev_destroy(struct drm_device *dev,
- struct mga_fbdev *mfbdev)
-{
- struct mga_framebuffer *mfb = &mfbdev->mfb;
-
- drm_fb_helper_unregister_fbi(&mfbdev->helper);
-
- if (mfb->obj) {
- drm_gem_object_put_unlocked(mfb->obj);
- mfb->obj = NULL;
- }
- drm_fb_helper_fini(&mfbdev->helper);
- vfree(mfbdev->sysram);
- drm_framebuffer_unregister_private(&mfb->base);
- drm_framebuffer_cleanup(&mfb->base);
-
- return 0;
-}
-
-static const struct drm_fb_helper_funcs mga_fb_helper_funcs = {
- .fb_probe = mgag200fb_create,
-};
-
-int mgag200_fbdev_init(struct mga_device *mdev)
-{
- struct mga_fbdev *mfbdev;
- int ret;
- int bpp_sel = 32;
-
- /* prefer 16bpp on low end gpus with limited VRAM */
- if (IS_G200_SE(mdev) && mdev->mc.vram_size < (2048*1024))
- bpp_sel = 16;
-
- mfbdev = devm_kzalloc(mdev->dev->dev, sizeof(struct mga_fbdev), GFP_KERNEL);
- if (!mfbdev)
- return -ENOMEM;
-
- mdev->mfbdev = mfbdev;
- spin_lock_init(&mfbdev->dirty_lock);
-
- drm_fb_helper_prepare(mdev->dev, &mfbdev->helper, &mga_fb_helper_funcs);
-
- ret = drm_fb_helper_init(mdev->dev, &mfbdev->helper,
- MGAG200FB_CONN_LIMIT);
- if (ret)
- goto err_fb_helper;
-
- ret = drm_fb_helper_single_add_all_connectors(&mfbdev->helper);
- if (ret)
- goto err_fb_setup;
-
- /* disable all the possible outputs/crtcs before entering KMS mode */
- drm_helper_disable_unused_functions(mdev->dev);
-
- ret = drm_fb_helper_initial_config(&mfbdev->helper, bpp_sel);
- if (ret)
- goto err_fb_setup;
-
- return 0;
-
-err_fb_setup:
- drm_fb_helper_fini(&mfbdev->helper);
-err_fb_helper:
- mdev->mfbdev = NULL;
-
- return ret;
-}
-
-void mgag200_fbdev_fini(struct mga_device *mdev)
-{
- if (!mdev->mfbdev)
- return;
-
- mga_fbdev_destroy(mdev->dev, mdev->mfbdev);
-}
diff --git a/drivers/gpu/drm/mgag200/mgag200_i2c.c b/drivers/gpu/drm/mgag200/mgag200_i2c.c
index 77d1c4771786..9f4635916d32 100644
--- a/drivers/gpu/drm/mgag200/mgag200_i2c.c
+++ b/drivers/gpu/drm/mgag200/mgag200_i2c.c
@@ -25,10 +25,11 @@
/*
* Authors: Dave Airlie <airlied@redhat.com>
*/
+
#include <linux/export.h>
-#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
-#include <drm/drmP.h>
+#include <linux/i2c.h>
+#include <linux/pci.h>
#include "mgag200_drv.h"
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index dd61ccc5af5c..e278b6a547bd 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -7,70 +7,16 @@
* Matt Turner
* Dave Airlie
*/
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
-#include "mgag200_drv.h"
-
-static void mga_user_framebuffer_destroy(struct drm_framebuffer *fb)
-{
- struct mga_framebuffer *mga_fb = to_mga_framebuffer(fb);
-
- drm_gem_object_put_unlocked(mga_fb->obj);
- drm_framebuffer_cleanup(fb);
- kfree(fb);
-}
-
-static const struct drm_framebuffer_funcs mga_fb_funcs = {
- .destroy = mga_user_framebuffer_destroy,
-};
-
-int mgag200_framebuffer_init(struct drm_device *dev,
- struct mga_framebuffer *gfb,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object *obj)
-{
- int ret;
- drm_helper_mode_fill_fb_struct(dev, &gfb->base, mode_cmd);
- gfb->obj = obj;
- ret = drm_framebuffer_init(dev, &gfb->base, &mga_fb_funcs);
- if (ret) {
- DRM_ERROR("drm_framebuffer_init failed: %d\n", ret);
- return ret;
- }
- return 0;
-}
+#include <linux/pci.h>
-static struct drm_framebuffer *
-mgag200_user_framebuffer_create(struct drm_device *dev,
- struct drm_file *filp,
- const struct drm_mode_fb_cmd2 *mode_cmd)
-{
- struct drm_gem_object *obj;
- struct mga_framebuffer *mga_fb;
- int ret;
-
- obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
- if (obj == NULL)
- return ERR_PTR(-ENOENT);
-
- mga_fb = kzalloc(sizeof(*mga_fb), GFP_KERNEL);
- if (!mga_fb) {
- drm_gem_object_put_unlocked(obj);
- return ERR_PTR(-ENOMEM);
- }
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
- ret = mgag200_framebuffer_init(dev, mga_fb, mode_cmd, obj);
- if (ret) {
- drm_gem_object_put_unlocked(obj);
- kfree(mga_fb);
- return ERR_PTR(ret);
- }
- return &mga_fb->base;
-}
+#include "mgag200_drv.h"
static const struct drm_mode_config_funcs mga_mode_funcs = {
- .fb_create = mgag200_user_framebuffer_create,
+ .fb_create = drm_gem_fb_create
};
static int mga_probe_vram(struct mga_device *mdev, void __iomem *mem)
@@ -149,7 +95,8 @@ static int mgag200_device_init(struct drm_device *dev,
struct mga_device *mdev = dev->dev_private;
int ret, option;
- mdev->type = flags;
+ mdev->flags = mgag200_flags_from_driver_data(flags);
+ mdev->type = mgag200_type_from_driver_data(flags);
/* Hardcode the number of CRTCs to 1 */
mdev->num_crtc = 1;
@@ -172,8 +119,11 @@ static int mgag200_device_init(struct drm_device *dev,
return -ENOMEM;
/* stash G200 SE model number for later use */
- if (IS_G200_SE(mdev))
+ if (IS_G200_SE(mdev)) {
mdev->unique_rev_id = RREG32(0x1e24);
+ DRM_DEBUG("G200 SE unique revision id is 0x%x\n",
+ mdev->unique_rev_id);
+ }
ret = mga_vram_init(mdev);
if (ret)
@@ -214,10 +164,10 @@ int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
drm_mode_config_init(dev);
dev->mode_config.funcs = (void *)&mga_mode_funcs;
- if (IS_G200_SE(mdev) && mdev->mc.vram_size < (2048*1024))
+ if (IS_G200_SE(mdev) && mdev->vram_fb_available < (2048*1024))
dev->mode_config.preferred_depth = 16;
else
- dev->mode_config.preferred_depth = 24;
+ dev->mode_config.preferred_depth = 32;
dev->mode_config.prefer_shadow = 1;
r = mgag200_modeset_init(mdev);
@@ -226,25 +176,20 @@ int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
goto err_modeset;
}
- /* Make small buffers to store a hardware cursor (double buffered icon updates) */
- mdev->cursor.pixels_1 = drm_gem_vram_create(dev, &dev->vram_mm->bdev,
- roundup(48*64, PAGE_SIZE),
- 0, 0);
- mdev->cursor.pixels_2 = drm_gem_vram_create(dev, &dev->vram_mm->bdev,
- roundup(48*64, PAGE_SIZE),
- 0, 0);
- if (IS_ERR(mdev->cursor.pixels_2) || IS_ERR(mdev->cursor.pixels_1)) {
- mdev->cursor.pixels_1 = NULL;
- mdev->cursor.pixels_2 = NULL;
+ r = mgag200_cursor_init(mdev);
+ if (r)
dev_warn(&dev->pdev->dev,
- "Could not allocate space for cursors. Not doing hardware cursors.\n");
- }
- mdev->cursor.pixels_current = NULL;
+ "Could not initialize cursors. Not doing hardware cursors.\n");
+
+ r = drm_fbdev_generic_setup(mdev->dev, 0);
+ if (r)
+ goto err_modeset;
return 0;
err_modeset:
drm_mode_config_cleanup(dev);
+ mgag200_cursor_fini(mdev);
mgag200_mm_fini(mdev);
err_mm:
dev->dev_private = NULL;
@@ -259,32 +204,8 @@ void mgag200_driver_unload(struct drm_device *dev)
if (mdev == NULL)
return;
mgag200_modeset_fini(mdev);
- mgag200_fbdev_fini(mdev);
drm_mode_config_cleanup(dev);
+ mgag200_cursor_fini(mdev);
mgag200_mm_fini(mdev);
dev->dev_private = NULL;
}
-
-int mgag200_gem_create(struct drm_device *dev,
- u32 size, bool iskernel,
- struct drm_gem_object **obj)
-{
- struct drm_gem_vram_object *gbo;
- int ret;
-
- *obj = NULL;
-
- size = roundup(size, PAGE_SIZE);
- if (size == 0)
- return -EINVAL;
-
- gbo = drm_gem_vram_create(dev, &dev->vram_mm->bdev, size, 0, false);
- if (IS_ERR(gbo)) {
- ret = PTR_ERR(gbo);
- if (ret != -ERESTARTSYS)
- DRM_ERROR("failed to allocate GEM object\n");
- return ret;
- }
- *obj = &gbo->gem;
- return 0;
-}
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index a25054015e8c..62a8e9ccb16d 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -9,9 +9,10 @@
*/
#include <linux/delay.h>
+#include <linux/pci.h>
-#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
@@ -859,28 +860,16 @@ static int mga_crtc_do_set_base(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int x, int y, int atomic)
{
- struct mga_device *mdev = crtc->dev->dev_private;
- struct drm_gem_object *obj;
- struct mga_framebuffer *mga_fb;
struct drm_gem_vram_object *gbo;
int ret;
s64 gpu_addr;
- void *base;
if (!atomic && fb) {
- mga_fb = to_mga_framebuffer(fb);
- obj = mga_fb->obj;
- gbo = drm_gem_vram_of_gem(obj);
-
- /* unmap if console */
- if (&mdev->mfbdev->mfb == mga_fb)
- drm_gem_vram_kunmap(gbo);
+ gbo = drm_gem_vram_of_gem(fb->obj[0]);
drm_gem_vram_unpin(gbo);
}
- mga_fb = to_mga_framebuffer(crtc->primary->fb);
- obj = mga_fb->obj;
- gbo = drm_gem_vram_of_gem(obj);
+ gbo = drm_gem_vram_of_gem(crtc->primary->fb->obj[0]);
ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
if (ret)
@@ -891,15 +880,6 @@ static int mga_crtc_do_set_base(struct drm_crtc *crtc,
goto err_drm_gem_vram_unpin;
}
- if (&mdev->mfbdev->mfb == mga_fb) {
- /* if pushing console in kmap it */
- base = drm_gem_vram_kmap(gbo, true, NULL);
- if (IS_ERR(base)) {
- ret = PTR_ERR(base);
- DRM_ERROR("failed to kmap fbcon\n");
- }
- }
-
mga_set_start_address(crtc, (u32)gpu_addr);
return 0;
@@ -1423,14 +1403,9 @@ static void mga_crtc_disable(struct drm_crtc *crtc)
DRM_DEBUG_KMS("\n");
mga_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
if (crtc->primary->fb) {
- struct mga_device *mdev = crtc->dev->dev_private;
- struct mga_framebuffer *mga_fb = to_mga_framebuffer(crtc->primary->fb);
- struct drm_gem_object *obj = mga_fb->obj;
- struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(obj);
-
- /* unmap if console */
- if (&mdev->mfbdev->mfb == mga_fb)
- drm_gem_vram_kunmap(gbo);
+ struct drm_framebuffer *fb = crtc->primary->fb;
+ struct drm_gem_vram_object *gbo =
+ drm_gem_vram_of_gem(fb->obj[0]);
drm_gem_vram_unpin(gbo);
}
crtc->primary->fb = NULL;
@@ -1438,8 +1413,8 @@ static void mga_crtc_disable(struct drm_crtc *crtc)
/* These provide the minimum set of functions required to handle a CRTC */
static const struct drm_crtc_funcs mga_crtc_funcs = {
- .cursor_set = mga_crtc_cursor_set,
- .cursor_move = mga_crtc_cursor_move,
+ .cursor_set = mgag200_crtc_cursor_set,
+ .cursor_move = mgag200_crtc_cursor_move,
.gamma_set = mga_crtc_gamma_set,
.set_config = drm_crtc_helper_set_config,
.destroy = mga_crtc_destroy,
@@ -1654,7 +1629,7 @@ static enum drm_mode_status mga_vga_mode_valid(struct drm_connector *connector,
bpp = connector->cmdline_mode.bpp;
}
- if ((mode->hdisplay * mode->vdisplay * (bpp/8)) > mdev->mc.vram_size) {
+ if ((mode->hdisplay * mode->vdisplay * (bpp/8)) > mdev->vram_fb_available) {
if (connector->cmdline_mode.specified)
connector->cmdline_mode.specified = false;
return MODE_BAD;
@@ -1663,16 +1638,6 @@ static enum drm_mode_status mga_vga_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-static struct drm_encoder *mga_connector_best_encoder(struct drm_connector
- *connector)
-{
- int enc_id = connector->encoder_ids[0];
- /* pick the encoder ids */
- if (enc_id)
- return drm_encoder_find(connector->dev, NULL, enc_id);
- return NULL;
-}
-
static void mga_connector_destroy(struct drm_connector *connector)
{
struct mga_connector *mga_connector = to_mga_connector(connector);
@@ -1684,7 +1649,6 @@ static void mga_connector_destroy(struct drm_connector *connector)
static const struct drm_connector_helper_funcs mga_vga_connector_helper_funcs = {
.get_modes = mga_vga_get_modes,
.mode_valid = mga_vga_mode_valid,
- .best_encoder = mga_connector_best_encoder,
};
static const struct drm_connector_funcs mga_vga_connector_funcs = {
@@ -1703,18 +1667,19 @@ static struct drm_connector *mga_vga_init(struct drm_device *dev)
return NULL;
connector = &mga_connector->base;
+ mga_connector->i2c = mgag200_i2c_create(dev);
+ if (!mga_connector->i2c)
+ DRM_ERROR("failed to add ddc bus\n");
- drm_connector_init(dev, connector,
- &mga_vga_connector_funcs, DRM_MODE_CONNECTOR_VGA);
+ drm_connector_init_with_ddc(dev, connector,
+ &mga_vga_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA,
+ &mga_connector->i2c->adapter);
drm_connector_helper_add(connector, &mga_vga_connector_helper_funcs);
drm_connector_register(connector);
- mga_connector->i2c = mgag200_i2c_create(dev);
- if (!mga_connector->i2c)
- DRM_ERROR("failed to add ddc bus\n");
-
return connector;
}
@@ -1723,7 +1688,6 @@ int mgag200_modeset_init(struct mga_device *mdev)
{
struct drm_encoder *encoder;
struct drm_connector *connector;
- int ret;
mdev->mode_info.mode_config_initialized = true;
@@ -1748,12 +1712,6 @@ int mgag200_modeset_init(struct mga_device *mdev)
drm_connector_attach_encoder(connector, encoder);
- ret = mgag200_fbdev_init(mdev);
- if (ret) {
- DRM_ERROR("mga_fbdev_init failed\n");
- return ret;
- }
-
return 0;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 59294c0fd24a..e89657630ea7 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -25,7 +25,8 @@
/*
* Authors: Dave Airlie <airlied@redhat.com>
*/
-#include <drm/drmP.h>
+
+#include <linux/pci.h>
#include "mgag200_drv.h"
@@ -36,8 +37,7 @@ int mgag200_mm_init(struct mga_device *mdev)
struct drm_device *dev = mdev->dev;
vmm = drm_vram_helper_alloc_mm(dev, pci_resource_start(dev->pdev, 0),
- mdev->mc.vram_size,
- &drm_gem_vram_mm_funcs);
+ mdev->mc.vram_size);
if (IS_ERR(vmm)) {
ret = PTR_ERR(vmm);
DRM_ERROR("Error initializing VRAM MM; %d\n", ret);
@@ -50,6 +50,8 @@ int mgag200_mm_init(struct mga_device *mdev)
mdev->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
pci_resource_len(dev->pdev, 0));
+ mdev->vram_fb_available = mdev->mc.vram_size;
+
return 0;
}
@@ -57,6 +59,8 @@ void mgag200_mm_fini(struct mga_device *mdev)
{
struct drm_device *dev = mdev->dev;
+ mdev->vram_fb_available = 0;
+
drm_vram_helper_release_mm(dev);
arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 9c37e4de5896..6deaa7d01654 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -7,6 +7,7 @@ config DRM_MSM
depends on OF && COMMON_CLK
depends on MMU
depends on INTERCONNECT || !INTERCONNECT
+ depends on QCOM_OCMEM || QCOM_OCMEM=n
select QCOM_MDT_LOADER if ARCH_QCOM
select REGULATOR
select DRM_KMS_HELPER
@@ -14,11 +15,11 @@ config DRM_MSM
select SHMEM
select TMPFS
select QCOM_SCM if ARCH_QCOM
+ select QCOM_COMMAND_DB if ARCH_QCOM
select WANT_DEV_COREDUMP
select SND_SOC_HDMI_CODEC if SND_SOC
select SYNC_FILE
select PM_OPP
- default y
help
DRM/KMS driver for MSM/snapdragon.
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 7a05cbf2f820..1579cf0d828f 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -75,6 +75,7 @@ msm-y := \
disp/dpu1/dpu_rm.o \
disp/dpu1/dpu_vbif.o \
msm_atomic.o \
+ msm_atomic_tracepoints.o \
msm_debugfs.o \
msm_drv.o \
msm_fb.o \
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index 5f7e98028eaf..b67f88872726 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -6,10 +6,6 @@
* Copyright (c) 2014 The Linux Foundation. All rights reserved.
*/
-#ifdef CONFIG_MSM_OCMEM
-# include <mach/ocmem.h>
-#endif
-
#include "a3xx_gpu.h"
#define A3XX_INT0_MASK \
@@ -195,9 +191,9 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A3XX_RBBM_GPR0_CTL, 0x00000000);
/* Set the OCMEM base address for A330, etc */
- if (a3xx_gpu->ocmem_hdl) {
+ if (a3xx_gpu->ocmem.hdl) {
gpu_write(gpu, REG_A3XX_RB_GMEM_BASE_ADDR,
- (unsigned int)(a3xx_gpu->ocmem_base >> 14));
+ (unsigned int)(a3xx_gpu->ocmem.base >> 14));
}
/* Turn on performance counters: */
@@ -318,10 +314,7 @@ static void a3xx_destroy(struct msm_gpu *gpu)
adreno_gpu_cleanup(adreno_gpu);
-#ifdef CONFIG_MSM_OCMEM
- if (a3xx_gpu->ocmem_base)
- ocmem_free(OCMEM_GRAPHICS, a3xx_gpu->ocmem_hdl);
-#endif
+ adreno_gpu_ocmem_cleanup(&a3xx_gpu->ocmem);
kfree(a3xx_gpu);
}
@@ -494,17 +487,10 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
/* if needed, allocate gmem: */
if (adreno_is_a330(adreno_gpu)) {
-#ifdef CONFIG_MSM_OCMEM
- /* TODO this is different/missing upstream: */
- struct ocmem_buf *ocmem_hdl =
- ocmem_allocate(OCMEM_GRAPHICS, adreno_gpu->gmem);
-
- a3xx_gpu->ocmem_hdl = ocmem_hdl;
- a3xx_gpu->ocmem_base = ocmem_hdl->addr;
- adreno_gpu->gmem = ocmem_hdl->len;
- DBG("using %dK of OCMEM at 0x%08x", adreno_gpu->gmem / 1024,
- a3xx_gpu->ocmem_base);
-#endif
+ ret = adreno_gpu_ocmem_init(&adreno_gpu->base.pdev->dev,
+ adreno_gpu, &a3xx_gpu->ocmem);
+ if (ret)
+ goto fail;
}
if (!gpu->aspace) {
@@ -520,6 +506,14 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
goto fail;
}
+ /*
+ * Set the ICC path to maximum speed for now by multiplying the fastest
+ * frequency by the bus width (8). We'll want to scale this later on to
+ * improve battery life.
+ */
+ icc_set_bw(gpu->icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8);
+ icc_set_bw(gpu->ocmem_icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8);
+
return gpu;
fail:
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.h b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
index 5dc33e5ea53b..c555fb13e0d7 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
@@ -19,8 +19,7 @@ struct a3xx_gpu {
struct adreno_gpu base;
/* if OCMEM is used for GMEM: */
- uint32_t ocmem_base;
- void *ocmem_hdl;
+ struct adreno_ocmem ocmem;
};
#define to_a3xx_gpu(x) container_of(x, struct a3xx_gpu, base)
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index ab2b752566d8..253d8d85daad 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -2,9 +2,6 @@
/* Copyright (c) 2014 The Linux Foundation. All rights reserved.
*/
#include "a4xx_gpu.h"
-#ifdef CONFIG_MSM_OCMEM
-# include <soc/qcom/ocmem.h>
-#endif
#define A4XX_INT0_MASK \
(A4XX_INT0_RBBM_AHB_ERROR | \
@@ -188,7 +185,7 @@ static int a4xx_hw_init(struct msm_gpu *gpu)
(1 << 30) | 0xFFFF);
gpu_write(gpu, REG_A4XX_RB_GMEM_BASE_ADDR,
- (unsigned int)(a4xx_gpu->ocmem_base >> 14));
+ (unsigned int)(a4xx_gpu->ocmem.base >> 14));
/* Turn on performance counters: */
gpu_write(gpu, REG_A4XX_RBBM_PERFCTR_CTL, 0x01);
@@ -318,10 +315,7 @@ static void a4xx_destroy(struct msm_gpu *gpu)
adreno_gpu_cleanup(adreno_gpu);
-#ifdef CONFIG_MSM_OCMEM
- if (a4xx_gpu->ocmem_base)
- ocmem_free(OCMEM_GRAPHICS, a4xx_gpu->ocmem_hdl);
-#endif
+ adreno_gpu_ocmem_cleanup(&a4xx_gpu->ocmem);
kfree(a4xx_gpu);
}
@@ -578,17 +572,10 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
/* if needed, allocate gmem: */
if (adreno_is_a4xx(adreno_gpu)) {
-#ifdef CONFIG_MSM_OCMEM
- /* TODO this is different/missing upstream: */
- struct ocmem_buf *ocmem_hdl =
- ocmem_allocate(OCMEM_GRAPHICS, adreno_gpu->gmem);
-
- a4xx_gpu->ocmem_hdl = ocmem_hdl;
- a4xx_gpu->ocmem_base = ocmem_hdl->addr;
- adreno_gpu->gmem = ocmem_hdl->len;
- DBG("using %dK of OCMEM at 0x%08x", adreno_gpu->gmem / 1024,
- a4xx_gpu->ocmem_base);
-#endif
+ ret = adreno_gpu_ocmem_init(dev->dev, adreno_gpu,
+ &a4xx_gpu->ocmem);
+ if (ret)
+ goto fail;
}
if (!gpu->aspace) {
@@ -604,6 +591,14 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
goto fail;
}
+ /*
+ * Set the ICC path to maximum speed for now by multiplying the fastest
+ * frequency by the bus width (8). We'll want to scale this later on to
+ * improve battery life.
+ */
+ icc_set_bw(gpu->icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8);
+ icc_set_bw(gpu->ocmem_icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8);
+
return gpu;
fail:
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.h b/drivers/gpu/drm/msm/adreno/a4xx_gpu.h
index d506311ee240..a01448cba2ea 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.h
@@ -16,8 +16,7 @@ struct a4xx_gpu {
struct adreno_gpu base;
/* if OCMEM is used for GMEM: */
- uint32_t ocmem_base;
- void *ocmem_hdl;
+ struct adreno_ocmem ocmem;
};
#define to_a4xx_gpu(x) container_of(x, struct a4xx_gpu, base)
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
index 9f2dd76bd67a..075ecce4b5e0 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
@@ -2,9 +2,11 @@
/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
*/
-
#include <linux/types.h>
#include <linux/debugfs.h>
+
+#include <drm/drm_debugfs.h>
+#include <drm/drm_file.h>
#include <drm/drm_print.h>
#include "a5xx_gpu.h"
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index e9c55d1d6c04..7d9e63e20ded 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -353,6 +353,9 @@ static int a5xx_me_init(struct msm_gpu *gpu)
* 2D mode 3 draw
*/
OUT_RING(ring, 0x0000000B);
+ } else if (adreno_is_a510(adreno_gpu)) {
+ /* Workaround for token and syncs */
+ OUT_RING(ring, 0x00000001);
} else {
/* No workarounds enabled */
OUT_RING(ring, 0x00000000);
@@ -568,15 +571,24 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
0x00100000 + adreno_gpu->gmem - 1);
gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_HI, 0x00000000);
- gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x40);
- if (adreno_is_a530(adreno_gpu))
- gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x40);
- if (adreno_is_a540(adreno_gpu))
- gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x400);
- gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x80000060);
- gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x40201B16);
-
- gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, (0x400 << 11 | 0x300 << 22));
+ if (adreno_is_a510(adreno_gpu)) {
+ gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x20);
+ gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x20);
+ gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x40000030);
+ gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x20100D0A);
+ gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL,
+ (0x200 << 11 | 0x200 << 22));
+ } else {
+ gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x40);
+ if (adreno_is_a530(adreno_gpu))
+ gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x40);
+ if (adreno_is_a540(adreno_gpu))
+ gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x400);
+ gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x80000060);
+ gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x40201B16);
+ gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL,
+ (0x400 << 11 | 0x300 << 22));
+ }
if (adreno_gpu->info->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI)
gpu_rmw(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8));
@@ -589,6 +601,19 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
/* Enable ME/PFP split notification */
gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF);
+ /*
+ * In A5x, CCU can send context_done event of a particular context to
+ * UCHE which ultimately reaches CP even when there is valid
+ * transaction of that context inside CCU. This can let CP to program
+ * config registers, which will make the "valid transaction" inside
+ * CCU to be interpreted differently. This can cause gpu fault. This
+ * bug is fixed in latest A510 revision. To enable this bug fix -
+ * bit[11] of RB_DBG_ECO_CNTL need to be set to 0, default is 1
+ * (disable). For older A510 version this bit is unused.
+ */
+ if (adreno_is_a510(adreno_gpu))
+ gpu_rmw(gpu, REG_A5XX_RB_DBG_ECO_CNTL, (1 << 11), 0);
+
/* Enable HWCG */
a5xx_set_hwcg(gpu, true);
@@ -635,7 +660,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
/* UCHE */
gpu_write(gpu, REG_A5XX_CP_PROTECT(16), ADRENO_PROTECT_RW(0xE80, 16));
- if (adreno_is_a530(adreno_gpu))
+ if (adreno_is_a530(adreno_gpu) || adreno_is_a510(adreno_gpu))
gpu_write(gpu, REG_A5XX_CP_PROTECT(17),
ADRENO_PROTECT_RW(0x10000, 0x8000));
@@ -679,7 +704,8 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
a5xx_preempt_hw_init(gpu);
- a5xx_gpmu_ucode_init(gpu);
+ if (!adreno_is_a510(adreno_gpu))
+ a5xx_gpmu_ucode_init(gpu);
ret = a5xx_ucode_init(gpu);
if (ret)
@@ -712,7 +738,8 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
}
/*
- * Try to load a zap shader into the secure world. If successful
+ * If the chip that we are using does support loading one, then
+ * try to load a zap shader into the secure world. If successful
* we can use the CP to switch out of secure mode. If not then we
* have no resource but to try to switch ourselves out manually. If we
* guessed wrong then access to the RBBM_SECVID_TRUST_CNTL register will
@@ -726,11 +753,18 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
gpu->funcs->flush(gpu, gpu->rb[0]);
if (!a5xx_idle(gpu, gpu->rb[0]))
return -EINVAL;
- } else {
- /* Print a warning so if we die, we know why */
+ } else if (ret == -ENODEV) {
+ /*
+ * This device does not use zap shader (but print a warning
+ * just in case someone got their dt wrong.. hopefully they
+ * have a debug UART to realize the error of their ways...
+ * if you mess this up you are about to crash horribly)
+ */
dev_warn_once(gpu->dev->dev,
"Zap shader not enabled - using SECVID_TRUST_CNTL instead\n");
gpu_write(gpu, REG_A5XX_RBBM_SECVID_TRUST_CNTL, 0x0);
+ } else {
+ return ret;
}
/* Last step - yield the ringbuffer */
@@ -1066,6 +1100,7 @@ static void a5xx_dump(struct msm_gpu *gpu)
static int a5xx_pm_resume(struct msm_gpu *gpu)
{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
int ret;
/* Turn on the core power */
@@ -1073,6 +1108,15 @@ static int a5xx_pm_resume(struct msm_gpu *gpu)
if (ret)
return ret;
+ if (adreno_is_a510(adreno_gpu)) {
+ /* Halt the sp_input_clk at HM level */
+ gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0x00000055);
+ a5xx_set_hwcg(gpu, true);
+ /* Turn on sp_input_clk at HM level */
+ gpu_rmw(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0xff, 0);
+ return 0;
+ }
+
/* Turn the RBCCU domain first to limit the chances of voltage droop */
gpu_write(gpu, REG_A5XX_GPMU_RBCCU_POWER_CNTL, 0x778000);
@@ -1101,9 +1145,17 @@ static int a5xx_pm_resume(struct msm_gpu *gpu)
static int a5xx_pm_suspend(struct msm_gpu *gpu)
{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ u32 mask = 0xf;
+
+ /* A510 has 3 XIN ports in VBIF */
+ if (adreno_is_a510(adreno_gpu))
+ mask = 0x7;
+
/* Clear the VBIF pipe before shutting down */
- gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0xF);
- spin_until((gpu_read(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL1) & 0xF) == 0xF);
+ gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, mask);
+ spin_until((gpu_read(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL1) &
+ mask) == mask);
gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0);
@@ -1289,7 +1341,7 @@ static void a5xx_gpu_state_destroy(struct kref *kref)
kfree(a5xx_state);
}
-int a5xx_gpu_state_put(struct msm_gpu_state *state)
+static int a5xx_gpu_state_put(struct msm_gpu_state *state)
{
if (IS_ERR_OR_NULL(state))
return 1;
@@ -1299,8 +1351,8 @@ int a5xx_gpu_state_put(struct msm_gpu_state *state)
#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
-void a5xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
- struct drm_printer *p)
+static void a5xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
+ struct drm_printer *p)
{
int i, j;
u32 pos = 0;
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c
index a3a06db675ba..321a8061fd32 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_power.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c
@@ -297,6 +297,10 @@ int a5xx_power_init(struct msm_gpu *gpu)
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
int ret;
+ /* Not all A5xx chips have a GPMU */
+ if (adreno_is_a510(adreno_gpu))
+ return 0;
+
/* Set up the limits management */
if (adreno_is_a530(adreno_gpu))
a530_lm_setup(gpu);
@@ -326,6 +330,9 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
unsigned int *data, *ptr, *cmds;
unsigned int cmds_size;
+ if (adreno_is_a510(adreno_gpu))
+ return;
+
if (a5xx_gpu->gpmu_bo)
return;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx.xml.h b/drivers/gpu/drm/msm/adreno/a6xx.xml.h
index f44553ec3193..ed78fee2a262 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx.xml.h
@@ -16,11 +16,11 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-12-02 17:29:54)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 140790 bytes, from 2018-12-02 17:29:54)
+- /home/smasetty/playarea/envytools/rnndb/adreno/a6xx.xml ( 161969 bytes, from 2019-11-29 07:18:16)
- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
-Copyright (C) 2013-2018 by the following authors:
+Copyright (C) 2013-2019 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
@@ -2519,6 +2519,54 @@ static inline uint32_t A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL(uint32_t val)
#define REG_A6XX_VBIF_PERF_PWR_CNT_HIGH2 0x0000311a
+#define REG_A6XX_GBIF_SCACHE_CNTL1 0x00003c02
+
+#define REG_A6XX_GBIF_QSB_SIDE0 0x00003c03
+
+#define REG_A6XX_GBIF_QSB_SIDE1 0x00003c04
+
+#define REG_A6XX_GBIF_QSB_SIDE2 0x00003c05
+
+#define REG_A6XX_GBIF_QSB_SIDE3 0x00003c06
+
+#define REG_A6XX_GBIF_HALT 0x00003c45
+
+#define REG_A6XX_GBIF_HALT_ACK 0x00003c46
+
+#define REG_A6XX_GBIF_PERF_PWR_CNT_EN 0x00003cc0
+
+#define REG_A6XX_GBIF_PERF_CNT_SEL 0x00003cc2
+
+#define REG_A6XX_GBIF_PERF_PWR_CNT_SEL 0x00003cc3
+
+#define REG_A6XX_GBIF_PERF_CNT_LOW0 0x00003cc4
+
+#define REG_A6XX_GBIF_PERF_CNT_LOW1 0x00003cc5
+
+#define REG_A6XX_GBIF_PERF_CNT_LOW2 0x00003cc6
+
+#define REG_A6XX_GBIF_PERF_CNT_LOW3 0x00003cc7
+
+#define REG_A6XX_GBIF_PERF_CNT_HIGH0 0x00003cc8
+
+#define REG_A6XX_GBIF_PERF_CNT_HIGH1 0x00003cc9
+
+#define REG_A6XX_GBIF_PERF_CNT_HIGH2 0x00003cca
+
+#define REG_A6XX_GBIF_PERF_CNT_HIGH3 0x00003ccb
+
+#define REG_A6XX_GBIF_PWR_CNT_LOW0 0x00003ccc
+
+#define REG_A6XX_GBIF_PWR_CNT_LOW1 0x00003ccd
+
+#define REG_A6XX_GBIF_PWR_CNT_LOW2 0x00003cce
+
+#define REG_A6XX_GBIF_PWR_CNT_HIGH0 0x00003ccf
+
+#define REG_A6XX_GBIF_PWR_CNT_HIGH1 0x00003cd0
+
+#define REG_A6XX_GBIF_PWR_CNT_HIGH2 0x00003cd1
+
#define REG_A6XX_RB_WINDOW_OFFSET2 0x000088d4
#define A6XX_RB_WINDOW_OFFSET2_WINDOW_OFFSET_DISABLE 0x80000000
#define A6XX_RB_WINDOW_OFFSET2_X__MASK 0x00007fff
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 2ca470eb5cb8..983afeaee737 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */
#include <linux/clk.h>
#include <linux/interconnect.h>
@@ -149,6 +149,8 @@ void a6xx_gmu_set_freq(struct msm_gpu *gpu, unsigned long freq)
if (freq == gmu->gpu_freqs[perf_index])
break;
+ gmu->current_perf_index = perf_index;
+
__a6xx_gmu_set_freq(gmu, perf_index);
}
@@ -433,6 +435,8 @@ static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
struct platform_device *pdev = to_platform_device(gmu->dev);
void __iomem *pdcptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc");
void __iomem *seqptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc_seq");
@@ -480,20 +484,34 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 4, 0x10108);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 4, 0x30000);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 4, 0x0);
+
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 8, 0x10108);
- pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30080);
+ if (adreno_is_a618(adreno_gpu))
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30090);
+ else
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30080);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 8, 0x0);
+
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK, 7);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK, 0);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CONTROL, 0);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID, 0x10108);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR, 0x30010);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA, 2);
+
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000);
- pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3);
+ if (adreno_is_a618(adreno_gpu))
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x2);
+ else
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3);
+
+
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 8, 0x10108);
- pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30080);
+ if (adreno_is_a618(adreno_gpu))
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30090);
+ else
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30080);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 8, 0x3);
/* Setup GPU PDC */
@@ -741,8 +759,8 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~A6XX_HFI_IRQ_MASK);
enable_irq(gmu->hfi_irq);
- /* Set the GPU to the highest power frequency */
- __a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1);
+ /* Set the GPU to the current freq */
+ __a6xx_gmu_set_freq(gmu, gmu->current_perf_index);
/*
* "enable" the GX power domain which won't actually do anything but it
@@ -1166,13 +1184,15 @@ static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu)
gmu->nr_gpu_freqs = a6xx_gmu_build_freq_table(&gpu->pdev->dev,
gmu->gpu_freqs, ARRAY_SIZE(gmu->gpu_freqs));
+ gmu->current_perf_index = gmu->nr_gpu_freqs - 1;
+
/* Build the list of RPMh votes that we'll send to the GMU */
return a6xx_gmu_rpmh_votes_init(gmu);
}
static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu)
{
- int ret = msm_clk_bulk_get(gmu->dev, &gmu->clocks);
+ int ret = devm_clk_bulk_get_all(gmu->dev, &gmu->clocks);
if (ret < 1)
return ret;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
index 39a26dd63674..2af91ed7ed0c 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
@@ -63,6 +63,9 @@ struct a6xx_gmu {
struct clk_bulk_data *clocks;
struct clk *core_clk;
+ /* current performance index set externally */
+ int current_perf_index;
+
int nr_gpu_freqs;
unsigned long gpu_freqs[16];
u32 gx_arc_votes[16];
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index dc8ec2c94301..daf07800cde0 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */
#include "msm_gem.h"
@@ -378,6 +378,18 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
int ret;
+ /*
+ * During a previous slumber, GBIF halt is asserted to ensure
+ * no further transaction can go through GPU before GPU
+ * headswitch is turned off.
+ *
+ * This halt is deasserted once headswitch goes off but
+ * incase headswitch doesn't goes off clear GBIF halt
+ * here to ensure GPU wake-up doesn't fail because of
+ * halted GPU transactions.
+ */
+ gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0);
+
/* Make sure the GMU keeps the GPU on while we set it up */
a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
@@ -406,12 +418,17 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A6XX_TPL1_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
- /* enable hardware clockgating */
- a6xx_set_hwcg(gpu, true);
+ /*
+ * enable hardware clockgating
+ * For now enable clock gating only for a630
+ */
+ if (adreno_is_a630(adreno_gpu))
+ a6xx_set_hwcg(gpu, true);
- /* VBIF start */
- gpu_write(gpu, REG_A6XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009);
+ /* VBIF/GBIF start*/
gpu_write(gpu, REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3);
+ if (adreno_is_a630(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009);
/* Make all blocks contribute to the GPU BUSY perf counter */
gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xffffffff);
@@ -537,12 +554,19 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
a6xx_flush(gpu, gpu->rb[0]);
if (!a6xx_idle(gpu, gpu->rb[0]))
return -EINVAL;
- } else {
- /* Print a warning so if we die, we know why */
+ } else if (ret == -ENODEV) {
+ /*
+ * This device does not use zap shader (but print a warning
+ * just in case someone got their dt wrong.. hopefully they
+ * have a debug UART to realize the error of their ways...
+ * if you mess this up you are about to crash horribly)
+ */
dev_warn_once(gpu->dev->dev,
"Zap shader not enabled - using SECVID_TRUST_CNTL instead\n");
gpu_write(gpu, REG_A6XX_RBBM_SECVID_TRUST_CNTL, 0x0);
ret = 0;
+ } else {
+ return ret;
}
out:
@@ -724,6 +748,39 @@ static const u32 a6xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A6XX_CP_RB_CNTL),
};
+#define GBIF_CLIENT_HALT_MASK BIT(0)
+#define GBIF_ARB_HALT_MASK BIT(1)
+
+static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu)
+{
+ struct msm_gpu *gpu = &adreno_gpu->base;
+
+ if(!a6xx_has_gbif(adreno_gpu)){
+ gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf);
+ spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) &
+ 0xf) == 0xf);
+ gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0);
+
+ return;
+ }
+
+ /* Halt new client requests on GBIF */
+ gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK);
+ spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
+ (GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK);
+
+ /* Halt all AXI requests on GBIF */
+ gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK);
+ spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
+ (GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK);
+
+ /*
+ * GMU needs DDR access in slumber path. Deassert GBIF halt now
+ * to allow for GMU to access system memory.
+ */
+ gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0);
+}
+
static int a6xx_pm_resume(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -748,6 +805,16 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu)
devfreq_suspend_device(gpu->devfreq.devfreq);
+ /*
+ * Make sure the GMU is idle before continuing (because some transitions
+ * may use VBIF
+ */
+ a6xx_gmu_wait_for_idle(&a6xx_gpu->gmu);
+
+ /* Clear the VBIF pipe before shutting down */
+ /* FIXME: This accesses the GPU - do we need to make sure it is on? */
+ a6xx_bus_clear_pending_transactions(adreno_gpu);
+
return a6xx_gmu_stop(a6xx_gpu);
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
index 64399554f2dd..7239b8b60939 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2017 The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2017, 2019 The Linux Foundation. All rights reserved. */
#ifndef __A6XX_GPU_H__
#define __A6XX_GPU_H__
@@ -42,6 +42,13 @@ struct a6xx_gpu {
#define A6XX_PROTECT_RDONLY(_reg, _len) \
((((_len) & 0x3FFF) << 18) | ((_reg) & 0x3FFFF))
+static inline bool a6xx_has_gbif(struct adreno_gpu *gpu)
+{
+ if(adreno_is_a630(gpu))
+ return false;
+
+ return true;
+}
int a6xx_gmu_resume(struct a6xx_gpu *gpu);
int a6xx_gmu_stop(struct a6xx_gpu *gpu);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
index e686331fa089..d6023ba8033c 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2018 The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. */
#include <linux/ascii85.h>
#include "msm_gem.h"
@@ -320,6 +320,7 @@ static void a6xx_get_debugbus(struct msm_gpu *gpu,
{
struct resource *res;
void __iomem *cxdbg = NULL;
+ int nr_debugbus_blocks;
/* Set up the GX debug bus */
@@ -352,31 +353,33 @@ static void a6xx_get_debugbus(struct msm_gpu *gpu,
cxdbg = ioremap(res->start, resource_size(res));
if (cxdbg) {
- cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_CNTLT,
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_CNTLT,
A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT(0xf));
- cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_CNTLM,
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_CNTLM,
A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE(0xf));
- cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_0, 0);
- cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_1, 0);
- cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_2, 0);
- cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_3, 0);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_0, 0);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_1, 0);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_2, 0);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_3, 0);
- cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_0,
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0,
0x76543210);
- cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_1,
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1,
0xFEDCBA98);
- cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_0, 0);
- cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_1, 0);
- cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_2, 0);
- cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_3, 0);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_0, 0);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_1, 0);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_2, 0);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3, 0);
}
- a6xx_state->debugbus = state_kcalloc(a6xx_state,
- ARRAY_SIZE(a6xx_debugbus_blocks),
- sizeof(*a6xx_state->debugbus));
+ nr_debugbus_blocks = ARRAY_SIZE(a6xx_debugbus_blocks) +
+ (a6xx_has_gbif(to_adreno_gpu(gpu)) ? 1 : 0);
+
+ a6xx_state->debugbus = state_kcalloc(a6xx_state, nr_debugbus_blocks,
+ sizeof(*a6xx_state->debugbus));
if (a6xx_state->debugbus) {
int i;
@@ -388,15 +391,31 @@ static void a6xx_get_debugbus(struct msm_gpu *gpu,
&a6xx_state->debugbus[i]);
a6xx_state->nr_debugbus = ARRAY_SIZE(a6xx_debugbus_blocks);
+
+ /*
+ * GBIF has same debugbus as of other GPU blocks, fall back to
+ * default path if GPU uses GBIF, also GBIF uses exactly same
+ * ID as of VBIF.
+ */
+ if (a6xx_has_gbif(to_adreno_gpu(gpu))) {
+ a6xx_get_debugbus_block(gpu, a6xx_state,
+ &a6xx_gbif_debugbus_block,
+ &a6xx_state->debugbus[i]);
+
+ a6xx_state->nr_debugbus += 1;
+ }
}
- a6xx_state->vbif_debugbus =
- state_kcalloc(a6xx_state, 1,
- sizeof(*a6xx_state->vbif_debugbus));
+ /* Dump the VBIF debugbus on applicable targets */
+ if (!a6xx_has_gbif(to_adreno_gpu(gpu))) {
+ a6xx_state->vbif_debugbus =
+ state_kcalloc(a6xx_state, 1,
+ sizeof(*a6xx_state->vbif_debugbus));
- if (a6xx_state->vbif_debugbus)
- a6xx_get_vbif_debugbus_block(gpu, a6xx_state,
- a6xx_state->vbif_debugbus);
+ if (a6xx_state->vbif_debugbus)
+ a6xx_get_vbif_debugbus_block(gpu, a6xx_state,
+ a6xx_state->vbif_debugbus);
+ }
if (cxdbg) {
a6xx_state->cx_debugbus =
@@ -770,14 +789,16 @@ static void a6xx_get_gmu_registers(struct msm_gpu *gpu,
&a6xx_state->gmu_registers[1]);
}
+#define A6XX_GBIF_REGLIST_SIZE 1
static void a6xx_get_registers(struct msm_gpu *gpu,
struct a6xx_gpu_state *a6xx_state,
struct a6xx_crashdumper *dumper)
{
int i, count = ARRAY_SIZE(a6xx_ahb_reglist) +
ARRAY_SIZE(a6xx_reglist) +
- ARRAY_SIZE(a6xx_hlsq_reglist);
+ ARRAY_SIZE(a6xx_hlsq_reglist) + A6XX_GBIF_REGLIST_SIZE;
int index = 0;
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
a6xx_state->registers = state_kcalloc(a6xx_state,
count, sizeof(*a6xx_state->registers));
@@ -792,6 +813,15 @@ static void a6xx_get_registers(struct msm_gpu *gpu,
a6xx_state, &a6xx_ahb_reglist[i],
&a6xx_state->registers[index++]);
+ if (a6xx_has_gbif(adreno_gpu))
+ a6xx_get_ahb_gpu_registers(gpu,
+ a6xx_state, &a6xx_gbif_reglist,
+ &a6xx_state->registers[index++]);
+ else
+ a6xx_get_ahb_gpu_registers(gpu,
+ a6xx_state, &a6xx_vbif_reglist,
+ &a6xx_state->registers[index++]);
+
for (i = 0; i < ARRAY_SIZE(a6xx_reglist); i++)
a6xx_get_crashdumper_registers(gpu,
a6xx_state, &a6xx_reglist[i],
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
index 68cccfa2870a..e67c20c415af 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2018 The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. */
#ifndef _A6XX_CRASH_DUMP_H_
#define _A6XX_CRASH_DUMP_H_
@@ -307,11 +307,20 @@ static const u32 a6xx_vbif_registers[] = {
0x3410, 0x3410, 0x3800, 0x3801,
};
+static const u32 a6xx_gbif_registers[] = {
+ 0x3C00, 0X3C0B, 0X3C40, 0X3C47, 0X3CC0, 0X3CD1, 0xE3A, 0xE3A,
+};
+
static const struct a6xx_registers a6xx_ahb_reglist[] = {
REGS(a6xx_ahb_registers, 0, 0),
- REGS(a6xx_vbif_registers, 0, 0),
};
+static const struct a6xx_registers a6xx_vbif_reglist =
+ REGS(a6xx_vbif_registers, 0, 0);
+
+static const struct a6xx_registers a6xx_gbif_reglist =
+ REGS(a6xx_gbif_registers, 0, 0);
+
static const u32 a6xx_gmu_gx_registers[] = {
/* GMU GX */
0x0000, 0x0000, 0x0010, 0x0013, 0x0016, 0x0016, 0x0018, 0x001b,
@@ -422,6 +431,9 @@ static const struct a6xx_debugbus_block {
DEBUGBUS(A6XX_DBGBUS_TPL1_3, 0x100),
};
+static const struct a6xx_debugbus_block a6xx_gbif_debugbus_block =
+ DEBUGBUS(A6XX_DBGBUS_VBIF, 0x100);
+
static const struct a6xx_debugbus_block a6xx_cx_debugbus_blocks[] = {
DEBUGBUS(A6XX_DBGBUS_GMU_CX, 0x100),
DEBUGBUS(A6XX_DBGBUS_CX, 0x100),
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 40133a43960c..cb3a6e597d76 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -115,6 +115,21 @@ static const struct adreno_info gpulist[] = {
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a4xx_gpu_init,
}, {
+ .rev = ADRENO_REV(5, 1, 0, ANY_ID),
+ .revn = 510,
+ .name = "A510",
+ .fw = {
+ [ADRENO_FW_PM4] = "a530_pm4.fw",
+ [ADRENO_FW_PFP] = "a530_pfp.fw",
+ },
+ .gmem = SZ_256K,
+ /*
+ * Increase inactive period to 250 to avoid bouncing
+ * the GDSC which appears to make it grumpy
+ */
+ .inactive_period = 250,
+ .init = a5xx_gpu_init,
+ }, {
.rev = ADRENO_REV(5, 3, 0, 2),
.revn = 530,
.name = "A530",
@@ -152,6 +167,17 @@ static const struct adreno_info gpulist[] = {
.init = a5xx_gpu_init,
.zapfw = "a540_zap.mdt",
}, {
+ .rev = ADRENO_REV(6, 1, 8, ANY_ID),
+ .revn = 618,
+ .name = "A618",
+ .fw = {
+ [ADRENO_FW_SQE] = "a630_sqe.fw",
+ [ADRENO_FW_GMU] = "a630_gmu.bin",
+ },
+ .gmem = SZ_512K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a6xx_gpu_init,
+ }, {
.rev = ADRENO_REV(6, 3, 0, ANY_ID),
.revn = 630,
.name = "A630",
@@ -181,6 +207,7 @@ MODULE_FIRMWARE("qcom/a530_zap.b01");
MODULE_FIRMWARE("qcom/a530_zap.b02");
MODULE_FIRMWARE("qcom/a630_sqe.fw");
MODULE_FIRMWARE("qcom/a630_gmu.bin");
+MODULE_FIRMWARE("qcom/a630_zap.mbn");
static inline bool _rev_match(uint8_t entry, uint8_t id)
{
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 048c8be426f3..7fd29829b2fa 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -14,6 +14,7 @@
#include <linux/pm_opp.h>
#include <linux/slab.h>
#include <linux/soc/qcom/mdt_loader.h>
+#include <soc/qcom/ocmem.h>
#include "adreno_gpu.h"
#include "msm_gem.h"
#include "msm_mmu.h"
@@ -25,6 +26,7 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname,
{
struct device *dev = &gpu->pdev->dev;
const struct firmware *fw;
+ const char *signed_fwname = NULL;
struct device_node *np, *mem_np;
struct resource r;
phys_addr_t mem_phys;
@@ -57,8 +59,43 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname,
mem_phys = r.start;
- /* Request the MDT file for the firmware */
- fw = adreno_request_fw(to_adreno_gpu(gpu), fwname);
+ /*
+ * Check for a firmware-name property. This is the new scheme
+ * to handle firmware that may be signed with device specific
+ * keys, allowing us to have a different zap fw path for different
+ * devices.
+ *
+ * If the firmware-name property is found, we bypass the
+ * adreno_request_fw() mechanism, because we don't need to handle
+ * the /lib/firmware/qcom/... vs /lib/firmware/... case.
+ *
+ * If the firmware-name property is not found, for backwards
+ * compatibility we fall back to the fwname from the gpulist
+ * table.
+ */
+ of_property_read_string_index(np, "firmware-name", 0, &signed_fwname);
+ if (signed_fwname) {
+ fwname = signed_fwname;
+ ret = request_firmware_direct(&fw, fwname, gpu->dev->dev);
+ if (ret)
+ fw = ERR_PTR(ret);
+ } else if (fwname) {
+ /* Request the MDT file from the default location: */
+ fw = adreno_request_fw(to_adreno_gpu(gpu), fwname);
+ } else {
+ /*
+ * For new targets, we require the firmware-name property,
+ * if a zap-shader is required, rather than falling back
+ * to a firmware name specified in gpulist.
+ *
+ * Because the firmware is signed with a (potentially)
+ * device specific key, having the name come from gpulist
+ * was a bad idea, and is only provided for backwards
+ * compatibility for older targets.
+ */
+ return -ENODEV;
+ }
+
if (IS_ERR(fw)) {
DRM_DEV_ERROR(dev, "Unable to load %s\n", fwname);
return PTR_ERR(fw);
@@ -94,7 +131,7 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname,
* not. But since we've already gotten through adreno_request_fw()
* we know which of the two cases it is:
*/
- if (to_adreno_gpu(gpu)->fwloc == FW_LOCATION_LEGACY) {
+ if (signed_fwname || (to_adreno_gpu(gpu)->fwloc == FW_LOCATION_LEGACY)) {
ret = qcom_mdt_load(dev, fw, fwname, pasid,
mem_region, mem_phys, mem_size, NULL);
} else {
@@ -145,14 +182,6 @@ int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid)
return -EPROBE_DEFER;
}
- /* Each GPU has a target specific zap shader firmware name to use */
- if (!adreno_gpu->info->zapfw) {
- zap_available = false;
- DRM_DEV_ERROR(&pdev->dev,
- "Zap shader firmware file not specified for this target\n");
- return -ENODEV;
- }
-
return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid);
}
@@ -825,7 +854,7 @@ static int adreno_get_legacy_pwrlevels(struct device *dev)
node = of_get_compatible_child(dev->of_node, "qcom,gpu-pwrlevels");
if (!node) {
- DRM_DEV_ERROR(dev, "Could not find the GPU powerlevels\n");
+ DRM_DEV_DEBUG(dev, "Could not find the GPU powerlevels\n");
return -ENXIO;
}
@@ -886,13 +915,63 @@ static int adreno_get_pwrlevels(struct device *dev,
DBG("fast_rate=%u, slow_rate=27000000", gpu->fast_rate);
/* Check for an interconnect path for the bus */
- gpu->icc_path = of_icc_get(dev, NULL);
+ gpu->icc_path = of_icc_get(dev, "gfx-mem");
+ if (!gpu->icc_path) {
+ /*
+ * Keep compatbility with device trees that don't have an
+ * interconnect-names property.
+ */
+ gpu->icc_path = of_icc_get(dev, NULL);
+ }
if (IS_ERR(gpu->icc_path))
gpu->icc_path = NULL;
+ gpu->ocmem_icc_path = of_icc_get(dev, "ocmem");
+ if (IS_ERR(gpu->ocmem_icc_path))
+ gpu->ocmem_icc_path = NULL;
+
+ return 0;
+}
+
+int adreno_gpu_ocmem_init(struct device *dev, struct adreno_gpu *adreno_gpu,
+ struct adreno_ocmem *adreno_ocmem)
+{
+ struct ocmem_buf *ocmem_hdl;
+ struct ocmem *ocmem;
+
+ ocmem = of_get_ocmem(dev);
+ if (IS_ERR(ocmem)) {
+ if (PTR_ERR(ocmem) == -ENODEV) {
+ /*
+ * Return success since either the ocmem property was
+ * not specified in device tree, or ocmem support is
+ * not compiled into the kernel.
+ */
+ return 0;
+ }
+
+ return PTR_ERR(ocmem);
+ }
+
+ ocmem_hdl = ocmem_allocate(ocmem, OCMEM_GRAPHICS, adreno_gpu->gmem);
+ if (IS_ERR(ocmem_hdl))
+ return PTR_ERR(ocmem_hdl);
+
+ adreno_ocmem->ocmem = ocmem;
+ adreno_ocmem->base = ocmem_hdl->addr;
+ adreno_ocmem->hdl = ocmem_hdl;
+ adreno_gpu->gmem = ocmem_hdl->len;
+
return 0;
}
+void adreno_gpu_ocmem_cleanup(struct adreno_ocmem *adreno_ocmem)
+{
+ if (adreno_ocmem && adreno_ocmem->base)
+ ocmem_free(adreno_ocmem->ocmem, OCMEM_GRAPHICS,
+ adreno_ocmem->hdl);
+}
+
int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct adreno_gpu *adreno_gpu,
const struct adreno_gpu_funcs *funcs, int nr_rings)
@@ -937,6 +1016,7 @@ void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
release_firmware(adreno_gpu->fw[i]);
icc_put(gpu->icc_path);
+ icc_put(gpu->ocmem_icc_path);
msm_gpu_cleanup(&adreno_gpu->base);
}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index c7441fb8313e..9ff4e550e7bd 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -3,7 +3,7 @@
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
- * Copyright (c) 2014,2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014,2017, 2019 The Linux Foundation. All rights reserved.
*/
#ifndef __ADRENO_GPU_H__
@@ -126,6 +126,12 @@ struct adreno_gpu {
};
#define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base)
+struct adreno_ocmem {
+ struct ocmem *ocmem;
+ unsigned long base;
+ void *hdl;
+};
+
/* platform config data (ie. from DT, or pdata) */
struct adreno_platform_config {
struct adreno_rev rev;
@@ -206,6 +212,11 @@ static inline int adreno_is_a430(struct adreno_gpu *gpu)
return gpu->revn == 430;
}
+static inline int adreno_is_a510(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 510;
+}
+
static inline int adreno_is_a530(struct adreno_gpu *gpu)
{
return gpu->revn == 530;
@@ -216,6 +227,16 @@ static inline int adreno_is_a540(struct adreno_gpu *gpu)
return gpu->revn == 540;
}
+static inline int adreno_is_a618(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 618;
+}
+
+static inline int adreno_is_a630(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 630;
+}
+
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
const struct firmware *adreno_request_fw(struct adreno_gpu *adreno_gpu,
const char *fwname);
@@ -236,6 +257,10 @@ void adreno_dump(struct msm_gpu *gpu);
void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords);
struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu);
+int adreno_gpu_ocmem_init(struct device *dev, struct adreno_gpu *adreno_gpu,
+ struct adreno_ocmem *ocmem);
+void adreno_gpu_ocmem_cleanup(struct adreno_ocmem *ocmem);
+
int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs,
int nr_rings);
@@ -315,10 +340,7 @@ OUT_PKT7(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
static inline bool adreno_reg_check(struct adreno_gpu *gpu,
enum adreno_regs offset_name)
{
- if (offset_name >= REG_ADRENO_REGISTER_MAX ||
- !gpu->reg_offsets[offset_name]) {
- BUG();
- }
+ BUG_ON(offset_name >= REG_ADRENO_REGISTER_MAX || !gpu->reg_offsets[offset_name]);
/*
* REG_SKIP is a special value that tell us that the register in
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
index cdbea38b8697..f1bc6a1af7a7 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
@@ -55,8 +55,7 @@ static void dpu_core_irq_callback_handler(void *arg, int irq_idx)
int dpu_core_irq_idx_lookup(struct dpu_kms *dpu_kms,
enum dpu_intr_type intr_type, u32 instance_idx)
{
- if (!dpu_kms || !dpu_kms->hw_intr ||
- !dpu_kms->hw_intr->ops.irq_idx_lookup)
+ if (!dpu_kms->hw_intr || !dpu_kms->hw_intr->ops.irq_idx_lookup)
return -EINVAL;
return dpu_kms->hw_intr->ops.irq_idx_lookup(intr_type,
@@ -73,7 +72,7 @@ static int _dpu_core_irq_enable(struct dpu_kms *dpu_kms, int irq_idx)
unsigned long irq_flags;
int ret = 0, enable_count;
- if (!dpu_kms || !dpu_kms->hw_intr ||
+ if (!dpu_kms->hw_intr ||
!dpu_kms->irq_obj.enable_counts ||
!dpu_kms->irq_obj.irq_counts) {
DPU_ERROR("invalid params\n");
@@ -114,7 +113,7 @@ int dpu_core_irq_enable(struct dpu_kms *dpu_kms, int *irq_idxs, u32 irq_count)
{
int i, ret = 0, counts;
- if (!dpu_kms || !irq_idxs || !irq_count) {
+ if (!irq_idxs || !irq_count) {
DPU_ERROR("invalid params\n");
return -EINVAL;
}
@@ -138,7 +137,7 @@ static int _dpu_core_irq_disable(struct dpu_kms *dpu_kms, int irq_idx)
{
int ret = 0, enable_count;
- if (!dpu_kms || !dpu_kms->hw_intr || !dpu_kms->irq_obj.enable_counts) {
+ if (!dpu_kms->hw_intr || !dpu_kms->irq_obj.enable_counts) {
DPU_ERROR("invalid params\n");
return -EINVAL;
}
@@ -169,7 +168,7 @@ int dpu_core_irq_disable(struct dpu_kms *dpu_kms, int *irq_idxs, u32 irq_count)
{
int i, ret = 0, counts;
- if (!dpu_kms || !irq_idxs || !irq_count) {
+ if (!irq_idxs || !irq_count) {
DPU_ERROR("invalid params\n");
return -EINVAL;
}
@@ -186,7 +185,7 @@ int dpu_core_irq_disable(struct dpu_kms *dpu_kms, int *irq_idxs, u32 irq_count)
u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx, bool clear)
{
- if (!dpu_kms || !dpu_kms->hw_intr ||
+ if (!dpu_kms->hw_intr ||
!dpu_kms->hw_intr->ops.get_interrupt_status)
return 0;
@@ -205,7 +204,7 @@ int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx,
{
unsigned long irq_flags;
- if (!dpu_kms || !dpu_kms->irq_obj.irq_cb_tbl) {
+ if (!dpu_kms->irq_obj.irq_cb_tbl) {
DPU_ERROR("invalid params\n");
return -EINVAL;
}
@@ -240,7 +239,7 @@ int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx,
{
unsigned long irq_flags;
- if (!dpu_kms || !dpu_kms->irq_obj.irq_cb_tbl) {
+ if (!dpu_kms->irq_obj.irq_cb_tbl) {
DPU_ERROR("invalid params\n");
return -EINVAL;
}
@@ -274,8 +273,7 @@ int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx,
static void dpu_clear_all_irqs(struct dpu_kms *dpu_kms)
{
- if (!dpu_kms || !dpu_kms->hw_intr ||
- !dpu_kms->hw_intr->ops.clear_all_irqs)
+ if (!dpu_kms->hw_intr || !dpu_kms->hw_intr->ops.clear_all_irqs)
return;
dpu_kms->hw_intr->ops.clear_all_irqs(dpu_kms->hw_intr);
@@ -283,8 +281,7 @@ static void dpu_clear_all_irqs(struct dpu_kms *dpu_kms)
static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms)
{
- if (!dpu_kms || !dpu_kms->hw_intr ||
- !dpu_kms->hw_intr->ops.disable_all_irqs)
+ if (!dpu_kms->hw_intr || !dpu_kms->hw_intr->ops.disable_all_irqs)
return;
dpu_kms->hw_intr->ops.disable_all_irqs(dpu_kms->hw_intr);
@@ -343,18 +340,8 @@ void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms)
{
- struct msm_drm_private *priv;
int i;
- if (!dpu_kms->dev) {
- DPU_ERROR("invalid drm device\n");
- return;
- } else if (!dpu_kms->dev->dev_private) {
- DPU_ERROR("invalid device private\n");
- return;
- }
- priv = dpu_kms->dev->dev_private;
-
pm_runtime_get_sync(&dpu_kms->pdev->dev);
dpu_clear_all_irqs(dpu_kms);
dpu_disable_all_irqs(dpu_kms);
@@ -379,18 +366,8 @@ void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms)
void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms)
{
- struct msm_drm_private *priv;
int i;
- if (!dpu_kms->dev) {
- DPU_ERROR("invalid drm device\n");
- return;
- } else if (!dpu_kms->dev->dev_private) {
- DPU_ERROR("invalid device private\n");
- return;
- }
- priv = dpu_kms->dev->dev_private;
-
pm_runtime_get_sync(&dpu_kms->pdev->dev);
for (i = 0; i < dpu_kms->irq_obj.total_irqs; i++)
if (atomic_read(&dpu_kms->irq_obj.enable_counts[i]) ||
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
index 5cda96875e03..11f2bebe3869 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
@@ -32,18 +32,7 @@ enum dpu_perf_mode {
static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
{
struct msm_drm_private *priv;
-
- if (!crtc->dev || !crtc->dev->dev_private) {
- DPU_ERROR("invalid device\n");
- return NULL;
- }
-
priv = crtc->dev->dev_private;
- if (!priv || !priv->kms) {
- DPU_ERROR("invalid kms\n");
- return NULL;
- }
-
return to_dpu_kms(priv->kms);
}
@@ -116,7 +105,7 @@ int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
}
kms = _dpu_crtc_get_kms(crtc);
- if (!kms || !kms->catalog) {
+ if (!kms->catalog) {
DPU_ERROR("invalid parameters\n");
return 0;
}
@@ -214,9 +203,7 @@ static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms,
*/
void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc)
{
- struct drm_crtc *tmp_crtc;
struct dpu_crtc *dpu_crtc;
- struct dpu_crtc_state *dpu_cstate;
struct dpu_kms *kms;
if (!crtc) {
@@ -225,30 +212,16 @@ void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc)
}
kms = _dpu_crtc_get_kms(crtc);
- if (!kms || !kms->catalog) {
+ if (!kms->catalog) {
DPU_ERROR("invalid kms\n");
return;
}
dpu_crtc = to_dpu_crtc(crtc);
- dpu_cstate = to_dpu_crtc_state(crtc->state);
- /* only do this for command mode rt client */
- if (dpu_crtc_get_intf_mode(crtc) != INTF_MODE_CMD)
+ if (atomic_dec_return(&kms->bandwidth_ref) > 0)
return;
- /*
- * If video interface present, cmd panel bandwidth cannot be
- * released.
- */
- if (dpu_crtc_get_intf_mode(crtc) == INTF_MODE_CMD)
- drm_for_each_crtc(tmp_crtc, crtc->dev) {
- if (tmp_crtc->enabled &&
- dpu_crtc_get_intf_mode(tmp_crtc) ==
- INTF_MODE_VIDEO)
- return;
- }
-
/* Release the bandwidth */
if (kms->perf.enable_bw_release) {
trace_dpu_cmd_release_bw(crtc->base.id);
@@ -301,7 +274,6 @@ int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
u64 clk_rate = 0;
struct dpu_crtc *dpu_crtc;
struct dpu_crtc_state *dpu_cstate;
- struct msm_drm_private *priv;
struct dpu_kms *kms;
int ret;
@@ -311,11 +283,10 @@ int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
}
kms = _dpu_crtc_get_kms(crtc);
- if (!kms || !kms->catalog) {
+ if (!kms->catalog) {
DPU_ERROR("invalid kms\n");
return -EINVAL;
}
- priv = kms->dev->dev_private;
dpu_crtc = to_dpu_crtc(crtc);
dpu_cstate = to_dpu_crtc_state(crtc->state);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index b3417d56032d..bf513411b243 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -9,11 +9,13 @@
#include <linux/sort.h>
#include <linux/debugfs.h>
#include <linux/ktime.h>
+
#include <drm/drm_crtc.h>
#include <drm/drm_flip_work.h>
#include <drm/drm_mode.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_rect.h>
+#include <drm/drm_vblank.h>
#include "dpu_kms.h"
#include "dpu_hw_lm.h"
@@ -195,10 +197,6 @@ static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
DPU_DEBUG("%s\n", dpu_crtc->name);
for (i = 0; i < cstate->num_mixers; i++) {
- if (!mixer[i].hw_lm || !mixer[i].lm_ctl) {
- DPU_ERROR("invalid lm or ctl assigned to mixer\n");
- return;
- }
mixer[i].mixer_op_mode = 0;
mixer[i].flush_mask = 0;
if (mixer[i].lm_ctl->ops.clear_all_blendstages)
@@ -264,11 +262,20 @@ enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc)
{
struct drm_encoder *encoder;
- if (!crtc || !crtc->dev) {
+ if (!crtc) {
DPU_ERROR("invalid crtc\n");
return INTF_MODE_NONE;
}
+ /*
+ * TODO: This function is called from dpu debugfs and as part of atomic
+ * check. When called from debugfs, the crtc->mutex must be held to
+ * read crtc->state. However reading crtc->state from atomic check isn't
+ * allowed (unless you have a good reason, a big comment, and a deep
+ * understanding of how the atomic/modeset locks work (<- and this is
+ * probably not possible)). So we'll keep the WARN_ON here for now, but
+ * really we need to figure out a better way to track our operating mode
+ */
WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
/* TODO: Returns the first INTF_MODE, could there be multiple values? */
@@ -292,19 +299,6 @@ void dpu_crtc_vblank_callback(struct drm_crtc *crtc)
trace_dpu_crtc_vblank_cb(DRMID(crtc));
}
-static void dpu_crtc_release_bw_unlocked(struct drm_crtc *crtc)
-{
- int ret = 0;
- struct drm_modeset_acquire_ctx ctx;
-
- DRM_MODESET_LOCK_ALL_BEGIN(crtc->dev, ctx, 0, ret);
- dpu_core_perf_crtc_release_bw(crtc);
- DRM_MODESET_LOCK_ALL_END(ctx, ret);
- if (ret)
- DRM_ERROR("Failed to acquire modeset locks to release bw, %d\n",
- ret);
-}
-
static void dpu_crtc_frame_event_work(struct kthread_work *work)
{
struct dpu_crtc_frame_event *fevent = container_of(work,
@@ -324,17 +318,12 @@ static void dpu_crtc_frame_event_work(struct kthread_work *work)
| DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
if (atomic_read(&dpu_crtc->frame_pending) < 1) {
- /* this should not happen */
- DRM_ERROR("crtc%d ev:%u ts:%lld frame_pending:%d\n",
- crtc->base.id,
- fevent->event,
- ktime_to_ns(fevent->ts),
- atomic_read(&dpu_crtc->frame_pending));
+ /* ignore vblank when not pending */
} else if (atomic_dec_return(&dpu_crtc->frame_pending) == 0) {
/* release bandwidth and other resources */
trace_dpu_crtc_frame_event_done(DRMID(crtc),
fevent->event);
- dpu_crtc_release_bw_unlocked(crtc);
+ dpu_core_perf_crtc_release_bw(crtc);
} else {
trace_dpu_crtc_frame_event_more_pending(DRMID(crtc),
fevent->event);
@@ -407,13 +396,8 @@ static void dpu_crtc_frame_event_cb(void *data, u32 event)
kthread_queue_work(&priv->event_thread[crtc_id].worker, &fevent->work);
}
-void dpu_crtc_complete_commit(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+void dpu_crtc_complete_commit(struct drm_crtc *crtc)
{
- if (!crtc || !crtc->state) {
- DPU_ERROR("invalid crtc\n");
- return;
- }
trace_dpu_crtc_complete_commit(DRMID(crtc));
}
@@ -623,13 +607,12 @@ static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc)
return rc;
}
-void dpu_crtc_commit_kickoff(struct drm_crtc *crtc, bool async)
+void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
{
struct drm_encoder *encoder;
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
- int ret;
/*
* If no mixers has been allocated in dpu_crtc_atomic_check(),
@@ -647,37 +630,22 @@ void dpu_crtc_commit_kickoff(struct drm_crtc *crtc, bool async)
*/
drm_for_each_encoder_mask(encoder, crtc->dev,
crtc->state->encoder_mask)
- dpu_encoder_prepare_for_kickoff(encoder, async);
-
- if (!async) {
- /* wait for frame_event_done completion */
- DPU_ATRACE_BEGIN("wait_for_frame_done_event");
- ret = _dpu_crtc_wait_for_frame_done(crtc);
- DPU_ATRACE_END("wait_for_frame_done_event");
- if (ret) {
- DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
- crtc->base.id,
- atomic_read(&dpu_crtc->frame_pending));
- goto end;
- }
+ dpu_encoder_prepare_for_kickoff(encoder);
- if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) {
- /* acquire bandwidth and other resources */
- DPU_DEBUG("crtc%d first commit\n", crtc->base.id);
- } else
- DPU_DEBUG("crtc%d commit\n", crtc->base.id);
+ if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) {
+ /* acquire bandwidth and other resources */
+ DPU_DEBUG("crtc%d first commit\n", crtc->base.id);
+ } else
+ DPU_DEBUG("crtc%d commit\n", crtc->base.id);
- dpu_crtc->play_count++;
- }
+ dpu_crtc->play_count++;
dpu_vbif_clear_errors(dpu_kms);
drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
- dpu_encoder_kickoff(encoder, async);
+ dpu_encoder_kickoff(encoder);
-end:
- if (!async)
- reinit_completion(&dpu_crtc->frame_done_comp);
+ reinit_completion(&dpu_crtc->frame_done_comp);
DPU_ATRACE_END("crtc_commit");
}
@@ -729,8 +697,9 @@ static void dpu_crtc_disable(struct drm_crtc *crtc,
struct drm_encoder *encoder;
struct msm_drm_private *priv;
unsigned long flags;
+ bool release_bandwidth = false;
- if (!crtc || !crtc->dev || !crtc->dev->dev_private || !crtc->state) {
+ if (!crtc || !crtc->state) {
DPU_ERROR("invalid crtc\n");
return;
}
@@ -745,8 +714,15 @@ static void dpu_crtc_disable(struct drm_crtc *crtc,
drm_crtc_vblank_off(crtc);
drm_for_each_encoder_mask(encoder, crtc->dev,
- old_crtc_state->encoder_mask)
+ old_crtc_state->encoder_mask) {
+ /* in video mode, we hold an extra bandwidth reference
+ * as we cannot drop bandwidth at frame-done if any
+ * crtc is being used in video mode.
+ */
+ if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
+ release_bandwidth = true;
dpu_encoder_assign_crtc(encoder, NULL);
+ }
/* wait for frame_event_done completion */
if (_dpu_crtc_wait_for_frame_done(crtc))
@@ -760,7 +736,8 @@ static void dpu_crtc_disable(struct drm_crtc *crtc,
if (atomic_read(&dpu_crtc->frame_pending)) {
trace_dpu_crtc_disable_frame_pending(DRMID(crtc),
atomic_read(&dpu_crtc->frame_pending));
- dpu_core_perf_crtc_release_bw(crtc);
+ if (release_bandwidth)
+ dpu_core_perf_crtc_release_bw(crtc);
atomic_set(&dpu_crtc->frame_pending, 0);
}
@@ -792,8 +769,9 @@ static void dpu_crtc_enable(struct drm_crtc *crtc,
struct dpu_crtc *dpu_crtc;
struct drm_encoder *encoder;
struct msm_drm_private *priv;
+ bool request_bandwidth;
- if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
+ if (!crtc) {
DPU_ERROR("invalid crtc\n");
return;
}
@@ -804,9 +782,19 @@ static void dpu_crtc_enable(struct drm_crtc *crtc,
DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
dpu_crtc = to_dpu_crtc(crtc);
- drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
+ drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) {
+ /* in video mode, we hold an extra bandwidth reference
+ * as we cannot drop bandwidth at frame-done if any
+ * crtc is being used in video mode.
+ */
+ if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
+ request_bandwidth = true;
dpu_encoder_register_frame_event_callback(encoder,
dpu_crtc_frame_event_cb, (void *)crtc);
+ }
+
+ if (request_bandwidth)
+ atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref);
trace_dpu_crtc_enable(DRMID(crtc), true, dpu_crtc);
dpu_crtc->enabled = true;
@@ -981,6 +969,8 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
}
}
+ atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref);
+
rc = dpu_core_perf_crtc_check(crtc, state);
if (rc) {
DPU_ERROR("crtc%d failed performance check %d\n",
@@ -1119,14 +1109,9 @@ static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
for (i = 0; i < cstate->num_mixers; ++i) {
m = &cstate->mixers[i];
- if (!m->hw_lm)
- seq_printf(s, "\tmixer[%d] has no lm\n", i);
- else if (!m->lm_ctl)
- seq_printf(s, "\tmixer[%d] has no ctl\n", i);
- else
- seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n",
- m->hw_lm->idx - LM_0, m->lm_ctl->idx - CTL_0,
- out_width, mode->vdisplay);
+ seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n",
+ m->hw_lm->idx - LM_0, m->lm_ctl->idx - CTL_0,
+ out_width, mode->vdisplay);
}
seq_puts(s, "\n");
@@ -1303,13 +1288,8 @@ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
{
struct drm_crtc *crtc = NULL;
struct dpu_crtc *dpu_crtc = NULL;
- struct msm_drm_private *priv = NULL;
- struct dpu_kms *kms = NULL;
int i;
- priv = dev->dev_private;
- kms = to_dpu_kms(priv->kms);
-
dpu_crtc = kzalloc(sizeof(*dpu_crtc), GFP_KERNEL);
if (!dpu_crtc)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
index 5181f079a6a1..5174e86124cc 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
@@ -238,17 +238,14 @@ void dpu_crtc_vblank_callback(struct drm_crtc *crtc);
/**
* dpu_crtc_commit_kickoff - trigger kickoff of the commit for this crtc
* @crtc: Pointer to drm crtc object
- * @async: true if the commit is asynchronous, false otherwise
*/
-void dpu_crtc_commit_kickoff(struct drm_crtc *crtc, bool async);
+void dpu_crtc_commit_kickoff(struct drm_crtc *crtc);
/**
* dpu_crtc_complete_commit - callback signalling completion of current commit
* @crtc: Pointer to drm crtc object
- * @old_state: Pointer to drm crtc old state object
*/
-void dpu_crtc_complete_commit(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state);
+void dpu_crtc_complete_commit(struct drm_crtc *crtc);
/**
* dpu_crtc_init - create a new crtc object
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 0aa8a12c9952..f8ac3bf60fd6 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -6,14 +6,16 @@
*/
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
-#include <linux/kthread.h>
#include <linux/debugfs.h>
+#include <linux/kthread.h>
#include <linux/seq_file.h>
-#include "msm_drv.h"
-#include "dpu_kms.h"
#include <drm/drm_crtc.h>
+#include <drm/drm_file.h>
#include <drm/drm_probe_helper.h>
+
+#include "msm_drv.h"
+#include "dpu_kms.h"
#include "dpu_hwio.h"
#include "dpu_hw_catalog.h"
#include "dpu_hw_intf.h"
@@ -56,7 +58,7 @@
#define IDLE_SHORT_TIMEOUT 1
-#define MAX_VDISPLAY_SPLIT 1080
+#define MAX_HDISPLAY_SPLIT 1080
/* timeout in frames waiting for frame done */
#define DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES 5
@@ -231,7 +233,7 @@ int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
u32 irq_status;
int ret;
- if (!phys_enc || !wait_info || intr_idx >= INTR_IDX_MAX) {
+ if (!wait_info || intr_idx >= INTR_IDX_MAX) {
DPU_ERROR("invalid params\n");
return -EINVAL;
}
@@ -306,7 +308,7 @@ int dpu_encoder_helper_register_irq(struct dpu_encoder_phys *phys_enc,
struct dpu_encoder_irq *irq;
int ret = 0;
- if (!phys_enc || intr_idx >= INTR_IDX_MAX) {
+ if (intr_idx >= INTR_IDX_MAX) {
DPU_ERROR("invalid params\n");
return -EINVAL;
}
@@ -361,10 +363,6 @@ int dpu_encoder_helper_unregister_irq(struct dpu_encoder_phys *phys_enc,
struct dpu_encoder_irq *irq;
int ret;
- if (!phys_enc) {
- DPU_ERROR("invalid encoder\n");
- return -EINVAL;
- }
irq = &phys_enc->irq[intr_idx];
/* silently skip irqs that weren't registered */
@@ -413,7 +411,7 @@ void dpu_encoder_get_hw_resources(struct drm_encoder *drm_enc,
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
- if (phys && phys->ops.get_hw_resources)
+ if (phys->ops.get_hw_resources)
phys->ops.get_hw_resources(phys, hw_res);
}
}
@@ -436,7 +434,7 @@ static void dpu_encoder_destroy(struct drm_encoder *drm_enc)
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
- if (phys && phys->ops.destroy) {
+ if (phys->ops.destroy) {
phys->ops.destroy(phys);
--dpu_enc->num_phys_encs;
dpu_enc->phys_encs[i] = NULL;
@@ -462,7 +460,7 @@ void dpu_encoder_helper_split_config(
struct dpu_hw_mdp *hw_mdptop;
struct msm_display_info *disp_info;
- if (!phys_enc || !phys_enc->hw_mdptop || !phys_enc->parent) {
+ if (!phys_enc->hw_mdptop || !phys_enc->parent) {
DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0);
return;
}
@@ -532,8 +530,23 @@ static struct msm_display_topology dpu_encoder_get_topology(
if (dpu_enc->phys_encs[i])
intf_count++;
- /* User split topology for width > 1080 */
- topology.num_lm = (mode->vdisplay > MAX_VDISPLAY_SPLIT) ? 2 : 1;
+ /* Datapath topology selection
+ *
+ * Dual display
+ * 2 LM, 2 INTF ( Split display using 2 interfaces)
+ *
+ * Single display
+ * 1 LM, 1 INTF
+ * 2 LM, 1 INTF (stream merge to support high resolution interfaces)
+ *
+ */
+ if (intf_count == 2)
+ topology.num_lm = 2;
+ else if (!dpu_kms->catalog->caps->has_3d_merge)
+ topology.num_lm = 1;
+ else
+ topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1;
+
topology.num_enc = 0;
topology.num_intf = intf_count;
@@ -581,10 +594,10 @@ static int dpu_encoder_virt_atomic_check(
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
- if (phys && phys->ops.atomic_check)
+ if (phys->ops.atomic_check)
ret = phys->ops.atomic_check(phys, crtc_state,
conn_state);
- else if (phys && phys->ops.mode_fixup)
+ else if (phys->ops.mode_fixup)
if (!phys->ops.mode_fixup(phys, mode, adj_mode))
ret = -EINVAL;
@@ -643,11 +656,6 @@ static void _dpu_encoder_update_vsync_source(struct dpu_encoder_virt *dpu_enc,
priv = drm_enc->dev->dev_private;
dpu_kms = to_dpu_kms(priv->kms);
- if (!dpu_kms) {
- DPU_ERROR("invalid dpu_kms\n");
- return;
- }
-
hw_mdptop = dpu_kms->hw_mdp;
if (!hw_mdptop) {
DPU_ERROR("invalid mdptop\n");
@@ -685,7 +693,7 @@ static void _dpu_encoder_irq_control(struct drm_encoder *drm_enc, bool enable)
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
- if (phys && phys->ops.irq_control)
+ if (phys->ops.irq_control)
phys->ops.irq_control(phys, enable);
}
@@ -733,8 +741,7 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
struct msm_drm_private *priv;
bool is_vid_mode = false;
- if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private ||
- !drm_enc->crtc) {
+ if (!drm_enc || !drm_enc->dev || !drm_enc->crtc) {
DPU_ERROR("invalid parameters\n");
return -EINVAL;
}
@@ -1036,46 +1043,43 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
- if (phys) {
- if (!dpu_enc->hw_pp[i]) {
- DPU_ERROR_ENC(dpu_enc, "no pp block assigned"
- "at idx: %d\n", i);
- goto error;
- }
+ if (!dpu_enc->hw_pp[i]) {
+ DPU_ERROR_ENC(dpu_enc,
+ "no pp block assigned at idx: %d\n", i);
+ goto error;
+ }
- if (!hw_ctl[i]) {
- DPU_ERROR_ENC(dpu_enc, "no ctl block assigned"
- "at idx: %d\n", i);
- goto error;
- }
+ if (!hw_ctl[i]) {
+ DPU_ERROR_ENC(dpu_enc,
+ "no ctl block assigned at idx: %d\n", i);
+ goto error;
+ }
- phys->hw_pp = dpu_enc->hw_pp[i];
- phys->hw_ctl = hw_ctl[i];
+ phys->hw_pp = dpu_enc->hw_pp[i];
+ phys->hw_ctl = hw_ctl[i];
- dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id,
- DPU_HW_BLK_INTF);
- for (j = 0; j < MAX_CHANNELS_PER_ENC; j++) {
- struct dpu_hw_intf *hw_intf;
+ dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id,
+ DPU_HW_BLK_INTF);
+ for (j = 0; j < MAX_CHANNELS_PER_ENC; j++) {
+ struct dpu_hw_intf *hw_intf;
- if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter))
- break;
+ if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter))
+ break;
- hw_intf = (struct dpu_hw_intf *)hw_iter.hw;
- if (hw_intf->idx == phys->intf_idx)
- phys->hw_intf = hw_intf;
- }
+ hw_intf = (struct dpu_hw_intf *)hw_iter.hw;
+ if (hw_intf->idx == phys->intf_idx)
+ phys->hw_intf = hw_intf;
+ }
- if (!phys->hw_intf) {
- DPU_ERROR_ENC(dpu_enc,
- "no intf block assigned at idx: %d\n",
- i);
+ if (!phys->hw_intf) {
+ DPU_ERROR_ENC(dpu_enc,
+ "no intf block assigned at idx: %d\n", i);
goto error;
- }
-
- phys->connector = conn->state->connector;
- if (phys->ops.mode_set)
- phys->ops.mode_set(phys, mode, adj_mode);
}
+
+ phys->connector = conn->state->connector;
+ if (phys->ops.mode_set)
+ phys->ops.mode_set(phys, mode, adj_mode);
}
dpu_enc->mode_set_complete = true;
@@ -1090,17 +1094,13 @@ static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
struct msm_drm_private *priv;
struct dpu_kms *dpu_kms;
- if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) {
+ if (!drm_enc || !drm_enc->dev) {
DPU_ERROR("invalid parameters\n");
return;
}
priv = drm_enc->dev->dev_private;
dpu_kms = to_dpu_kms(priv->kms);
- if (!dpu_kms) {
- DPU_ERROR("invalid dpu_kms\n");
- return;
- }
dpu_enc = to_dpu_encoder_virt(drm_enc);
if (!dpu_enc || !dpu_enc->cur_master) {
@@ -1182,7 +1182,6 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
struct dpu_encoder_virt *dpu_enc = NULL;
struct msm_drm_private *priv;
struct dpu_kms *dpu_kms;
- struct drm_display_mode *mode;
int i = 0;
if (!drm_enc) {
@@ -1191,9 +1190,6 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
} else if (!drm_enc->dev) {
DPU_ERROR("invalid dev\n");
return;
- } else if (!drm_enc->dev->dev_private) {
- DPU_ERROR("invalid dev_private\n");
- return;
}
dpu_enc = to_dpu_encoder_virt(drm_enc);
@@ -1202,8 +1198,6 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
mutex_lock(&dpu_enc->enc_lock);
dpu_enc->enabled = false;
- mode = &drm_enc->crtc->state->adjusted_mode;
-
priv = drm_enc->dev->dev_private;
dpu_kms = to_dpu_kms(priv->kms);
@@ -1217,7 +1211,7 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
- if (phys && phys->ops.disable)
+ if (phys->ops.disable)
phys->ops.disable(phys);
}
@@ -1230,8 +1224,7 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_STOP);
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
- if (dpu_enc->phys_encs[i])
- dpu_enc->phys_encs[i]->connector = NULL;
+ dpu_enc->phys_encs[i]->connector = NULL;
}
DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n");
@@ -1321,7 +1314,7 @@ void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *drm_enc,
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
- if (phys && phys->ops.control_vblank_irq)
+ if (phys->ops.control_vblank_irq)
phys->ops.control_vblank_irq(phys, enable);
}
}
@@ -1421,34 +1414,24 @@ static void dpu_encoder_off_work(struct work_struct *work)
* extra_flush_bits: Additional bit mask to include in flush trigger
*/
static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
- struct dpu_encoder_phys *phys, uint32_t extra_flush_bits,
- bool async)
+ struct dpu_encoder_phys *phys, uint32_t extra_flush_bits)
{
struct dpu_hw_ctl *ctl;
int pending_kickoff_cnt;
u32 ret = UINT_MAX;
- if (!drm_enc || !phys) {
- DPU_ERROR("invalid argument(s), drm_enc %d, phys_enc %d\n",
- drm_enc != 0, phys != 0);
- return;
- }
-
if (!phys->hw_pp) {
DPU_ERROR("invalid pingpong hw\n");
return;
}
ctl = phys->hw_ctl;
- if (!ctl || !ctl->ops.trigger_flush) {
+ if (!ctl->ops.trigger_flush) {
DPU_ERROR("missing trigger cb\n");
return;
}
- if (!async)
- pending_kickoff_cnt = dpu_encoder_phys_inc_pending(phys);
- else
- pending_kickoff_cnt = atomic_read(&phys->pending_kickoff_cnt);
+ pending_kickoff_cnt = dpu_encoder_phys_inc_pending(phys);
if (extra_flush_bits && ctl->ops.update_pending_flush)
ctl->ops.update_pending_flush(ctl, extra_flush_bits);
@@ -1487,13 +1470,8 @@ void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc)
{
struct dpu_hw_ctl *ctl;
- if (!phys_enc) {
- DPU_ERROR("invalid encoder\n");
- return;
- }
-
ctl = phys_enc->hw_ctl;
- if (ctl && ctl->ops.trigger_start) {
+ if (ctl->ops.trigger_start) {
ctl->ops.trigger_start(ctl);
trace_dpu_enc_trigger_start(DRMID(phys_enc->parent), ctl->idx);
}
@@ -1530,14 +1508,10 @@ static void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
struct dpu_hw_ctl *ctl;
int rc;
- if (!phys_enc) {
- DPU_ERROR("invalid encoder\n");
- return;
- }
dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
ctl = phys_enc->hw_ctl;
- if (!ctl || !ctl->ops.reset)
+ if (!ctl->ops.reset)
return;
DRM_DEBUG_KMS("id:%u ctl %d reset\n", DRMID(phys_enc->parent),
@@ -1559,18 +1533,12 @@ static void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
* a time.
* dpu_enc: Pointer to virtual encoder structure
*/
-static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc,
- bool async)
+static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc)
{
struct dpu_hw_ctl *ctl;
uint32_t i, pending_flush;
unsigned long lock_flags;
- if (!dpu_enc) {
- DPU_ERROR("invalid encoder\n");
- return;
- }
-
pending_flush = 0x0;
/* update pending counts and trigger kickoff ctl flush atomically */
@@ -1580,25 +1548,22 @@ static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc,
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
- if (!phys || phys->enable_state == DPU_ENC_DISABLED)
+ if (phys->enable_state == DPU_ENC_DISABLED)
continue;
ctl = phys->hw_ctl;
- if (!ctl)
- continue;
/*
* This is cleared in frame_done worker, which isn't invoked
* for async commits. So don't set this for async, since it'll
* roll over to the next commit.
*/
- if (!async && phys->split_role != ENC_ROLE_SLAVE)
+ if (phys->split_role != ENC_ROLE_SLAVE)
set_bit(i, dpu_enc->frame_busy_mask);
if (!phys->ops.needs_single_flush ||
!phys->ops.needs_single_flush(phys))
- _dpu_encoder_trigger_flush(&dpu_enc->base, phys, 0x0,
- async);
+ _dpu_encoder_trigger_flush(&dpu_enc->base, phys, 0x0);
else if (ctl->ops.get_pending_flush)
pending_flush |= ctl->ops.get_pending_flush(ctl);
}
@@ -1608,7 +1573,7 @@ static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc,
_dpu_encoder_trigger_flush(
&dpu_enc->base,
dpu_enc->cur_master,
- pending_flush, async);
+ pending_flush);
}
_dpu_encoder_trigger_start(dpu_enc->cur_master);
@@ -1634,17 +1599,15 @@ void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc)
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
phys = dpu_enc->phys_encs[i];
- if (phys && phys->hw_ctl) {
- ctl = phys->hw_ctl;
- if (ctl->ops.clear_pending_flush)
- ctl->ops.clear_pending_flush(ctl);
+ ctl = phys->hw_ctl;
+ if (ctl->ops.clear_pending_flush)
+ ctl->ops.clear_pending_flush(ctl);
- /* update only for command mode primary ctl */
- if ((phys == dpu_enc->cur_master) &&
- (disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE)
- && ctl->ops.trigger_pending)
- ctl->ops.trigger_pending(ctl);
- }
+ /* update only for command mode primary ctl */
+ if ((phys == dpu_enc->cur_master) &&
+ (disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE)
+ && ctl->ops.trigger_pending)
+ ctl->ops.trigger_pending(ctl);
}
}
@@ -1695,8 +1658,7 @@ static u32 _dpu_encoder_calculate_linetime(struct dpu_encoder_virt *dpu_enc,
return line_time;
}
-static int _dpu_encoder_wakeup_time(struct drm_encoder *drm_enc,
- ktime_t *wakeup_time)
+int dpu_encoder_vsync_time(struct drm_encoder *drm_enc, ktime_t *wakeup_time)
{
struct drm_display_mode *mode;
struct dpu_encoder_virt *dpu_enc;
@@ -1750,8 +1712,7 @@ static void dpu_encoder_vsync_event_handler(struct timer_list *t)
struct msm_drm_private *priv;
struct msm_drm_thread *event_thread;
- if (!drm_enc->dev || !drm_enc->dev->dev_private ||
- !drm_enc->crtc) {
+ if (!drm_enc->dev || !drm_enc->crtc) {
DPU_ERROR("invalid parameters\n");
return;
}
@@ -1783,7 +1744,7 @@ static void dpu_encoder_vsync_event_work_handler(struct kthread_work *work)
return;
}
- if (_dpu_encoder_wakeup_time(&dpu_enc->base, &wakeup_time))
+ if (dpu_encoder_vsync_time(&dpu_enc->base, &wakeup_time))
return;
trace_dpu_enc_vsync_event_work(DRMID(&dpu_enc->base), wakeup_time);
@@ -1791,17 +1752,13 @@ static void dpu_encoder_vsync_event_work_handler(struct kthread_work *work)
nsecs_to_jiffies(ktime_to_ns(wakeup_time)));
}
-void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc, bool async)
+void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc;
struct dpu_encoder_phys *phys;
bool needs_hw_reset = false;
unsigned int i;
- if (!drm_enc) {
- DPU_ERROR("invalid args\n");
- return;
- }
dpu_enc = to_dpu_encoder_virt(drm_enc);
trace_dpu_enc_prepare_kickoff(DRMID(drm_enc));
@@ -1810,12 +1767,10 @@ void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc, bool async)
DPU_ATRACE_BEGIN("enc_prepare_for_kickoff");
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
phys = dpu_enc->phys_encs[i];
- if (phys) {
- if (phys->ops.prepare_for_kickoff)
- phys->ops.prepare_for_kickoff(phys);
- if (phys->enable_state == DPU_ENC_ERR_NEEDS_HW_RESET)
- needs_hw_reset = true;
- }
+ if (phys->ops.prepare_for_kickoff)
+ phys->ops.prepare_for_kickoff(phys);
+ if (phys->enable_state == DPU_ENC_ERR_NEEDS_HW_RESET)
+ needs_hw_reset = true;
}
DPU_ATRACE_END("enc_prepare_for_kickoff");
@@ -1830,49 +1785,38 @@ void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc, bool async)
}
}
-void dpu_encoder_kickoff(struct drm_encoder *drm_enc, bool async)
+void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc;
struct dpu_encoder_phys *phys;
ktime_t wakeup_time;
+ unsigned long timeout_ms;
unsigned int i;
- if (!drm_enc) {
- DPU_ERROR("invalid encoder\n");
- return;
- }
DPU_ATRACE_BEGIN("encoder_kickoff");
dpu_enc = to_dpu_encoder_virt(drm_enc);
trace_dpu_enc_kickoff(DRMID(drm_enc));
- /*
- * Asynchronous frames don't handle FRAME_DONE events. As such, they
- * shouldn't enable the frame_done watchdog since it will always time
- * out.
- */
- if (!async) {
- unsigned long timeout_ms;
- timeout_ms = DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES * 1000 /
+ timeout_ms = DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES * 1000 /
drm_mode_vrefresh(&drm_enc->crtc->state->adjusted_mode);
- atomic_set(&dpu_enc->frame_done_timeout_ms, timeout_ms);
- mod_timer(&dpu_enc->frame_done_timer,
- jiffies + msecs_to_jiffies(timeout_ms));
- }
+ atomic_set(&dpu_enc->frame_done_timeout_ms, timeout_ms);
+ mod_timer(&dpu_enc->frame_done_timer,
+ jiffies + msecs_to_jiffies(timeout_ms));
/* All phys encs are ready to go, trigger the kickoff */
- _dpu_encoder_kickoff_phys(dpu_enc, async);
+ _dpu_encoder_kickoff_phys(dpu_enc);
/* allow phys encs to handle any post-kickoff business */
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
phys = dpu_enc->phys_encs[i];
- if (phys && phys->ops.handle_post_kickoff)
+ if (phys->ops.handle_post_kickoff)
phys->ops.handle_post_kickoff(phys);
}
if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI &&
- !_dpu_encoder_wakeup_time(drm_enc, &wakeup_time)) {
+ !dpu_encoder_vsync_time(drm_enc, &wakeup_time)) {
trace_dpu_enc_early_kickoff(DRMID(drm_enc),
ktime_to_ms(wakeup_time));
mod_timer(&dpu_enc->vsync_event_timer,
@@ -1896,7 +1840,7 @@ void dpu_encoder_prepare_commit(struct drm_encoder *drm_enc)
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
phys = dpu_enc->phys_encs[i];
- if (phys && phys->ops.prepare_commit)
+ if (phys->ops.prepare_commit)
phys->ops.prepare_commit(phys);
}
}
@@ -1911,9 +1855,6 @@ static int _dpu_encoder_status_show(struct seq_file *s, void *data)
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
- if (!phys)
- continue;
-
seq_printf(s, "intf:%d vsync:%8d underrun:%8d ",
phys->intf_idx - INTF_0,
atomic_read(&phys->vsync_cnt),
@@ -1945,8 +1886,6 @@ static int _dpu_encoder_debugfs_status_open(struct inode *inode,
static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
- struct msm_drm_private *priv;
- struct dpu_kms *dpu_kms;
int i;
static const struct file_operations debugfs_status_fops = {
@@ -1958,14 +1897,11 @@ static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
char name[DPU_NAME_SIZE];
- if (!drm_enc->dev || !drm_enc->dev->dev_private) {
+ if (!drm_enc->dev) {
DPU_ERROR("invalid encoder or kms\n");
return -EINVAL;
}
- priv = drm_enc->dev->dev_private;
- dpu_kms = to_dpu_kms(priv->kms);
-
snprintf(name, DPU_NAME_SIZE, "encoder%u", drm_enc->base.id);
/* create overall sub-directory for the encoder */
@@ -1977,8 +1913,7 @@ static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
dpu_enc->debugfs_root, dpu_enc, &debugfs_status_fops);
for (i = 0; i < dpu_enc->num_phys_encs; i++)
- if (dpu_enc->phys_encs[i] &&
- dpu_enc->phys_encs[i]->ops.late_register)
+ if (dpu_enc->phys_encs[i]->ops.late_register)
dpu_enc->phys_encs[i]->ops.late_register(
dpu_enc->phys_encs[i],
dpu_enc->debugfs_root);
@@ -2073,9 +2008,8 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
enum dpu_intf_type intf_type;
struct dpu_enc_phys_init_params phys_params;
- if (!dpu_enc || !dpu_kms) {
- DPU_ERROR("invalid arg(s), enc %d kms %d\n",
- dpu_enc != 0, dpu_kms != 0);
+ if (!dpu_enc) {
+ DPU_ERROR("invalid arg(s), enc %d\n", dpu_enc != 0);
return -EINVAL;
}
@@ -2148,11 +2082,8 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
-
- if (phys) {
- atomic_set(&phys->vsync_cnt, 0);
- atomic_set(&phys->underrun_cnt, 0);
- }
+ atomic_set(&phys->vsync_cnt, 0);
+ atomic_set(&phys->underrun_cnt, 0);
}
mutex_unlock(&dpu_enc->enc_lock);
@@ -2164,14 +2095,12 @@ static void dpu_encoder_frame_done_timeout(struct timer_list *t)
struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t,
frame_done_timer);
struct drm_encoder *drm_enc = &dpu_enc->base;
- struct msm_drm_private *priv;
u32 event;
- if (!drm_enc->dev || !drm_enc->dev->dev_private) {
+ if (!drm_enc->dev) {
DPU_ERROR("invalid parameters\n");
return;
}
- priv = drm_enc->dev->dev_private;
if (!dpu_enc->frame_busy_mask[0] || !dpu_enc->crtc_frame_event_cb) {
DRM_DEBUG_KMS("id:%u invalid timeout frame_busy_mask=%lu\n",
@@ -2296,8 +2225,6 @@ int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc,
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
- if (!phys)
- continue;
switch (event) {
case MSM_ENC_COMMIT_DONE:
@@ -2313,7 +2240,7 @@ int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc,
DPU_ERROR_ENC(dpu_enc, "unknown wait event %d\n",
event);
return -EINVAL;
- };
+ }
if (fn_wait) {
DPU_ATRACE_BEGIN("wait_for_completion_event");
@@ -2330,7 +2257,6 @@ int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc,
enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder)
{
struct dpu_encoder_virt *dpu_enc = NULL;
- int i;
if (!encoder) {
DPU_ERROR("invalid encoder\n");
@@ -2341,12 +2267,8 @@ enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder)
if (dpu_enc->cur_master)
return dpu_enc->cur_master->intf_mode;
- for (i = 0; i < dpu_enc->num_phys_encs; i++) {
- struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
-
- if (phys)
- return phys->intf_mode;
- }
+ if (dpu_enc->num_phys_encs)
+ return dpu_enc->phys_encs[0]->intf_mode;
return INTF_MODE_NONE;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
index a8bf1147fc56..b4913465e602 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
@@ -68,9 +68,8 @@ void dpu_encoder_register_frame_event_callback(struct drm_encoder *encoder,
* Immediately: if no previous commit is outstanding.
* Delayed: Block until next trigger can be issued.
* @encoder: encoder pointer
- * @async: true if this is an asynchronous commit
*/
-void dpu_encoder_prepare_for_kickoff(struct drm_encoder *encoder, bool async);
+void dpu_encoder_prepare_for_kickoff(struct drm_encoder *encoder);
/**
* dpu_encoder_trigger_kickoff_pending - Clear the flush bits from previous
@@ -83,9 +82,13 @@ void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *encoder);
* dpu_encoder_kickoff - trigger a double buffer flip of the ctl path
* (i.e. ctl flush and start) immediately.
* @encoder: encoder pointer
- * @async: true if this is an asynchronous commit
*/
-void dpu_encoder_kickoff(struct drm_encoder *encoder, bool async);
+void dpu_encoder_kickoff(struct drm_encoder *encoder);
+
+/**
+ * dpu_encoder_wakeup_time - get the time of the next vsync
+ */
+int dpu_encoder_vsync_time(struct drm_encoder *drm_enc, ktime_t *wakeup_time);
/**
* dpu_encoder_wait_for_event - Waits for encoder events
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
index 1b3ab909f367..39e1e280ba44 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
@@ -45,8 +45,7 @@ static bool dpu_encoder_phys_cmd_mode_fixup(
const struct drm_display_mode *mode,
struct drm_display_mode *adj_mode)
{
- if (phys_enc)
- DPU_DEBUG_CMDENC(to_dpu_encoder_phys_cmd(phys_enc), "\n");
+ DPU_DEBUG_CMDENC(to_dpu_encoder_phys_cmd(phys_enc), "\n");
return true;
}
@@ -58,11 +57,8 @@ static void _dpu_encoder_phys_cmd_update_intf_cfg(
struct dpu_hw_ctl *ctl;
struct dpu_hw_intf_cfg intf_cfg = { 0 };
- if (!phys_enc)
- return;
-
ctl = phys_enc->hw_ctl;
- if (!ctl || !ctl->ops.setup_intf_cfg)
+ if (!ctl->ops.setup_intf_cfg)
return;
intf_cfg.intf = phys_enc->intf_idx;
@@ -79,7 +75,7 @@ static void dpu_encoder_phys_cmd_pp_tx_done_irq(void *arg, int irq_idx)
int new_cnt;
u32 event = DPU_ENCODER_FRAME_EVENT_DONE;
- if (!phys_enc || !phys_enc->hw_pp)
+ if (!phys_enc->hw_pp)
return;
DPU_ATRACE_BEGIN("pp_done_irq");
@@ -106,7 +102,7 @@ static void dpu_encoder_phys_cmd_pp_rd_ptr_irq(void *arg, int irq_idx)
struct dpu_encoder_phys *phys_enc = arg;
struct dpu_encoder_phys_cmd *cmd_enc;
- if (!phys_enc || !phys_enc->hw_pp)
+ if (!phys_enc->hw_pp)
return;
DPU_ATRACE_BEGIN("rd_ptr_irq");
@@ -124,13 +120,8 @@ static void dpu_encoder_phys_cmd_pp_rd_ptr_irq(void *arg, int irq_idx)
static void dpu_encoder_phys_cmd_ctl_start_irq(void *arg, int irq_idx)
{
struct dpu_encoder_phys *phys_enc = arg;
- struct dpu_encoder_phys_cmd *cmd_enc;
-
- if (!phys_enc || !phys_enc->hw_ctl)
- return;
DPU_ATRACE_BEGIN("ctl_start_irq");
- cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
atomic_add_unless(&phys_enc->pending_ctlstart_cnt, -1, 0);
@@ -143,9 +134,6 @@ static void dpu_encoder_phys_cmd_underrun_irq(void *arg, int irq_idx)
{
struct dpu_encoder_phys *phys_enc = arg;
- if (!phys_enc)
- return;
-
if (phys_enc->parent_ops->handle_underrun_virt)
phys_enc->parent_ops->handle_underrun_virt(phys_enc->parent,
phys_enc);
@@ -181,7 +169,7 @@ static void dpu_encoder_phys_cmd_mode_set(
struct dpu_encoder_phys_cmd *cmd_enc =
to_dpu_encoder_phys_cmd(phys_enc);
- if (!phys_enc || !mode || !adj_mode) {
+ if (!mode || !adj_mode) {
DPU_ERROR("invalid args\n");
return;
}
@@ -200,7 +188,7 @@ static int _dpu_encoder_phys_cmd_handle_ppdone_timeout(
u32 frame_event = DPU_ENCODER_FRAME_EVENT_ERROR;
bool do_log = false;
- if (!phys_enc || !phys_enc->hw_pp || !phys_enc->hw_ctl)
+ if (!phys_enc->hw_pp)
return -EINVAL;
cmd_enc->pp_timeout_report_cnt++;
@@ -249,11 +237,6 @@ static int _dpu_encoder_phys_cmd_wait_for_idle(
struct dpu_encoder_wait_info wait_info;
int ret;
- if (!phys_enc) {
- DPU_ERROR("invalid encoder\n");
- return -EINVAL;
- }
-
wait_info.wq = &phys_enc->pending_kickoff_wq;
wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
@@ -275,7 +258,7 @@ static int dpu_encoder_phys_cmd_control_vblank_irq(
int ret = 0;
int refcount;
- if (!phys_enc || !phys_enc->hw_pp) {
+ if (!phys_enc->hw_pp) {
DPU_ERROR("invalid encoder\n");
return -EINVAL;
}
@@ -316,13 +299,6 @@ end:
static void dpu_encoder_phys_cmd_irq_control(struct dpu_encoder_phys *phys_enc,
bool enable)
{
- struct dpu_encoder_phys_cmd *cmd_enc;
-
- if (!phys_enc)
- return;
-
- cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
-
trace_dpu_enc_phys_cmd_irq_ctrl(DRMID(phys_enc->parent),
phys_enc->hw_pp->idx - PINGPONG_0,
enable, atomic_read(&phys_enc->vblank_refcount));
@@ -355,10 +331,9 @@ static void dpu_encoder_phys_cmd_tearcheck_config(
struct drm_display_mode *mode;
bool tc_enable = true;
u32 vsync_hz;
- struct msm_drm_private *priv;
struct dpu_kms *dpu_kms;
- if (!phys_enc || !phys_enc->hw_pp) {
+ if (!phys_enc->hw_pp) {
DPU_ERROR("invalid encoder\n");
return;
}
@@ -373,11 +348,6 @@ static void dpu_encoder_phys_cmd_tearcheck_config(
}
dpu_kms = phys_enc->dpu_kms;
- if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev_private) {
- DPU_ERROR("invalid device\n");
- return;
- }
- priv = dpu_kms->dev->dev_private;
/*
* TE default: dsi byte clock calculated base on 70 fps;
@@ -440,8 +410,7 @@ static void _dpu_encoder_phys_cmd_pingpong_config(
struct dpu_encoder_phys_cmd *cmd_enc =
to_dpu_encoder_phys_cmd(phys_enc);
- if (!phys_enc || !phys_enc->hw_ctl || !phys_enc->hw_pp
- || !phys_enc->hw_ctl->ops.setup_intf_cfg) {
+ if (!phys_enc->hw_pp || !phys_enc->hw_ctl->ops.setup_intf_cfg) {
DPU_ERROR("invalid arg(s), enc %d\n", phys_enc != 0);
return;
}
@@ -470,7 +439,7 @@ static void dpu_encoder_phys_cmd_enable_helper(
struct dpu_hw_ctl *ctl;
u32 flush_mask = 0;
- if (!phys_enc || !phys_enc->hw_ctl || !phys_enc->hw_pp) {
+ if (!phys_enc->hw_pp) {
DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0);
return;
}
@@ -492,7 +461,7 @@ static void dpu_encoder_phys_cmd_enable(struct dpu_encoder_phys *phys_enc)
struct dpu_encoder_phys_cmd *cmd_enc =
to_dpu_encoder_phys_cmd(phys_enc);
- if (!phys_enc || !phys_enc->hw_pp) {
+ if (!phys_enc->hw_pp) {
DPU_ERROR("invalid phys encoder\n");
return;
}
@@ -511,8 +480,7 @@ static void dpu_encoder_phys_cmd_enable(struct dpu_encoder_phys *phys_enc)
static void _dpu_encoder_phys_cmd_connect_te(
struct dpu_encoder_phys *phys_enc, bool enable)
{
- if (!phys_enc || !phys_enc->hw_pp ||
- !phys_enc->hw_pp->ops.connect_external_te)
+ if (!phys_enc->hw_pp || !phys_enc->hw_pp->ops.connect_external_te)
return;
trace_dpu_enc_phys_cmd_connect_te(DRMID(phys_enc->parent), enable);
@@ -530,7 +498,7 @@ static int dpu_encoder_phys_cmd_get_line_count(
{
struct dpu_hw_pingpong *hw_pp;
- if (!phys_enc || !phys_enc->hw_pp)
+ if (!phys_enc->hw_pp)
return -EINVAL;
if (!dpu_encoder_phys_cmd_is_master(phys_enc))
@@ -548,7 +516,7 @@ static void dpu_encoder_phys_cmd_disable(struct dpu_encoder_phys *phys_enc)
struct dpu_encoder_phys_cmd *cmd_enc =
to_dpu_encoder_phys_cmd(phys_enc);
- if (!phys_enc || !phys_enc->hw_pp) {
+ if (!phys_enc->hw_pp) {
DPU_ERROR("invalid encoder\n");
return;
}
@@ -571,10 +539,6 @@ static void dpu_encoder_phys_cmd_destroy(struct dpu_encoder_phys *phys_enc)
struct dpu_encoder_phys_cmd *cmd_enc =
to_dpu_encoder_phys_cmd(phys_enc);
- if (!phys_enc) {
- DPU_ERROR("invalid encoder\n");
- return;
- }
kfree(cmd_enc);
}
@@ -592,7 +556,7 @@ static void dpu_encoder_phys_cmd_prepare_for_kickoff(
to_dpu_encoder_phys_cmd(phys_enc);
int ret;
- if (!phys_enc || !phys_enc->hw_pp) {
+ if (!phys_enc->hw_pp) {
DPU_ERROR("invalid encoder\n");
return;
}
@@ -626,11 +590,6 @@ static int _dpu_encoder_phys_cmd_wait_for_ctl_start(
struct dpu_encoder_wait_info wait_info;
int ret;
- if (!phys_enc || !phys_enc->hw_ctl) {
- DPU_ERROR("invalid argument(s)\n");
- return -EINVAL;
- }
-
wait_info.wq = &phys_enc->pending_kickoff_wq;
wait_info.atomic_cnt = &phys_enc->pending_ctlstart_cnt;
wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
@@ -650,12 +609,6 @@ static int dpu_encoder_phys_cmd_wait_for_tx_complete(
struct dpu_encoder_phys *phys_enc)
{
int rc;
- struct dpu_encoder_phys_cmd *cmd_enc;
-
- if (!phys_enc)
- return -EINVAL;
-
- cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
rc = _dpu_encoder_phys_cmd_wait_for_idle(phys_enc);
if (rc) {
@@ -673,9 +626,6 @@ static int dpu_encoder_phys_cmd_wait_for_commit_done(
int rc = 0;
struct dpu_encoder_phys_cmd *cmd_enc;
- if (!phys_enc)
- return -EINVAL;
-
cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
/* only required for master controller */
@@ -696,9 +646,6 @@ static int dpu_encoder_phys_cmd_wait_for_vblank(
struct dpu_encoder_phys_cmd *cmd_enc;
struct dpu_encoder_wait_info wait_info;
- if (!phys_enc)
- return -EINVAL;
-
cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
/* only required for master controller */
@@ -720,9 +667,6 @@ static int dpu_encoder_phys_cmd_wait_for_vblank(
static void dpu_encoder_phys_cmd_handle_post_kickoff(
struct dpu_encoder_phys *phys_enc)
{
- if (!phys_enc)
- return;
-
/**
* re-enable external TE, either for the first time after enabling
* or if disabled for Autorefresh
@@ -733,9 +677,6 @@ static void dpu_encoder_phys_cmd_handle_post_kickoff(
static void dpu_encoder_phys_cmd_trigger_start(
struct dpu_encoder_phys *phys_enc)
{
- if (!phys_enc)
- return;
-
dpu_encoder_helper_trigger_start(phys_enc);
}
@@ -834,6 +775,4 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
DPU_DEBUG_CMDENC(cmd_enc, "created\n");
return phys_enc;
-
- return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
index 5055a5eec869..c71c18de5966 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
@@ -220,8 +220,7 @@ static bool dpu_encoder_phys_vid_mode_fixup(
const struct drm_display_mode *mode,
struct drm_display_mode *adj_mode)
{
- if (phys_enc)
- DPU_DEBUG_VIDENC(phys_enc, "\n");
+ DPU_DEBUG_VIDENC(phys_enc, "\n");
/*
* Modifying mode has consequences when the mode comes back to us
@@ -239,7 +238,7 @@ static void dpu_encoder_phys_vid_setup_timing_engine(
unsigned long lock_flags;
struct dpu_hw_intf_cfg intf_cfg = { 0 };
- if (!phys_enc || !phys_enc->hw_ctl->ops.setup_intf_cfg) {
+ if (!phys_enc->hw_ctl->ops.setup_intf_cfg) {
DPU_ERROR("invalid encoder %d\n", phys_enc != 0);
return;
}
@@ -280,6 +279,14 @@ static void dpu_encoder_phys_vid_setup_timing_engine(
phys_enc->hw_intf->ops.setup_timing_gen(phys_enc->hw_intf,
&timing_params, fmt);
phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg);
+
+ /* setup which pp blk will connect to this intf */
+ if (phys_enc->hw_intf->ops.bind_pingpong_blk)
+ phys_enc->hw_intf->ops.bind_pingpong_blk(
+ phys_enc->hw_intf,
+ true,
+ phys_enc->hw_pp->idx);
+
spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
programmable_fetch_config(phys_enc, &timing_params);
@@ -293,12 +300,7 @@ static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
u32 flush_register = 0;
int new_cnt = -1, old_cnt = -1;
- if (!phys_enc)
- return;
-
hw_ctl = phys_enc->hw_ctl;
- if (!hw_ctl)
- return;
DPU_ATRACE_BEGIN("vblank_irq");
@@ -314,7 +316,7 @@ static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
* so we need to double-check with hw that it accepted the flush bits
*/
spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
- if (hw_ctl && hw_ctl->ops.get_flush_register)
+ if (hw_ctl->ops.get_flush_register)
flush_register = hw_ctl->ops.get_flush_register(hw_ctl);
if (!(flush_register & hw_ctl->ops.get_pending_flush(hw_ctl)))
@@ -324,6 +326,10 @@ static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
/* Signal any waiting atomic commit thread */
wake_up_all(&phys_enc->pending_kickoff_wq);
+
+ phys_enc->parent_ops->handle_frame_done(phys_enc->parent, phys_enc,
+ DPU_ENCODER_FRAME_EVENT_DONE);
+
DPU_ATRACE_END("vblank_irq");
}
@@ -331,9 +337,6 @@ static void dpu_encoder_phys_vid_underrun_irq(void *arg, int irq_idx)
{
struct dpu_encoder_phys *phys_enc = arg;
- if (!phys_enc)
- return;
-
if (phys_enc->parent_ops->handle_underrun_virt)
phys_enc->parent_ops->handle_underrun_virt(phys_enc->parent,
phys_enc);
@@ -370,11 +373,6 @@ static void dpu_encoder_phys_vid_mode_set(
struct drm_display_mode *mode,
struct drm_display_mode *adj_mode)
{
- if (!phys_enc || !phys_enc->dpu_kms) {
- DPU_ERROR("invalid encoder/kms\n");
- return;
- }
-
if (adj_mode) {
phys_enc->cached_mode = *adj_mode;
drm_mode_debug_printmodeline(adj_mode);
@@ -391,11 +389,6 @@ static int dpu_encoder_phys_vid_control_vblank_irq(
int ret = 0;
int refcount;
- if (!phys_enc) {
- DPU_ERROR("invalid encoder\n");
- return -EINVAL;
- }
-
refcount = atomic_read(&phys_enc->vblank_refcount);
/* Slave encoders don't report vblank */
@@ -431,6 +424,7 @@ static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc)
{
struct dpu_hw_ctl *ctl;
u32 flush_mask = 0;
+ u32 intf_flush_mask = 0;
ctl = phys_enc->hw_ctl;
@@ -455,10 +449,18 @@ static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc)
ctl->ops.get_bitmask_intf(ctl, &flush_mask, phys_enc->hw_intf->idx);
ctl->ops.update_pending_flush(ctl, flush_mask);
+ if (ctl->ops.get_bitmask_active_intf)
+ ctl->ops.get_bitmask_active_intf(ctl, &intf_flush_mask,
+ phys_enc->hw_intf->idx);
+
+ if (ctl->ops.update_pending_intf_flush)
+ ctl->ops.update_pending_intf_flush(ctl, intf_flush_mask);
+
skip_flush:
DPU_DEBUG_VIDENC(phys_enc,
- "update pending flush ctl %d flush_mask %x\n",
- ctl->idx - CTL_0, flush_mask);
+ "update pending flush ctl %d flush_mask 0%x intf_mask 0x%x\n",
+ ctl->idx - CTL_0, flush_mask, intf_flush_mask);
+
/* ctl_flush & timing engine enable will be triggered by framework */
if (phys_enc->enable_state == DPU_ENC_DISABLED)
@@ -467,11 +469,6 @@ skip_flush:
static void dpu_encoder_phys_vid_destroy(struct dpu_encoder_phys *phys_enc)
{
- if (!phys_enc) {
- DPU_ERROR("invalid encoder\n");
- return;
- }
-
DPU_DEBUG_VIDENC(phys_enc, "\n");
kfree(phys_enc);
}
@@ -483,26 +480,17 @@ static void dpu_encoder_phys_vid_get_hw_resources(
hw_res->intfs[phys_enc->intf_idx - INTF_0] = INTF_MODE_VIDEO;
}
-static int _dpu_encoder_phys_vid_wait_for_vblank(
- struct dpu_encoder_phys *phys_enc, bool notify)
+static int dpu_encoder_phys_vid_wait_for_vblank(
+ struct dpu_encoder_phys *phys_enc)
{
struct dpu_encoder_wait_info wait_info;
int ret;
- if (!phys_enc) {
- pr_err("invalid encoder\n");
- return -EINVAL;
- }
-
wait_info.wq = &phys_enc->pending_kickoff_wq;
wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
if (!dpu_encoder_phys_vid_is_master(phys_enc)) {
- if (notify && phys_enc->parent_ops->handle_frame_done)
- phys_enc->parent_ops->handle_frame_done(
- phys_enc->parent, phys_enc,
- DPU_ENCODER_FRAME_EVENT_DONE);
return 0;
}
@@ -512,18 +500,29 @@ static int _dpu_encoder_phys_vid_wait_for_vblank(
if (ret == -ETIMEDOUT) {
dpu_encoder_helper_report_irq_timeout(phys_enc, INTR_IDX_VSYNC);
- } else if (!ret && notify && phys_enc->parent_ops->handle_frame_done)
- phys_enc->parent_ops->handle_frame_done(
- phys_enc->parent, phys_enc,
- DPU_ENCODER_FRAME_EVENT_DONE);
+ }
return ret;
}
-static int dpu_encoder_phys_vid_wait_for_vblank(
+static int dpu_encoder_phys_vid_wait_for_commit_done(
struct dpu_encoder_phys *phys_enc)
{
- return _dpu_encoder_phys_vid_wait_for_vblank(phys_enc, true);
+ struct dpu_hw_ctl *hw_ctl = phys_enc->hw_ctl;
+ int ret;
+
+ if (!hw_ctl)
+ return 0;
+
+ ret = wait_event_timeout(phys_enc->pending_kickoff_wq,
+ (hw_ctl->ops.get_flush_register(hw_ctl) == 0),
+ msecs_to_jiffies(50));
+ if (ret <= 0) {
+ DPU_ERROR("vblank timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
}
static void dpu_encoder_phys_vid_prepare_for_kickoff(
@@ -532,13 +531,8 @@ static void dpu_encoder_phys_vid_prepare_for_kickoff(
struct dpu_hw_ctl *ctl;
int rc;
- if (!phys_enc) {
- DPU_ERROR("invalid encoder/parameters\n");
- return;
- }
-
ctl = phys_enc->hw_ctl;
- if (!ctl || !ctl->ops.wait_reset_status)
+ if (!ctl->ops.wait_reset_status)
return;
/*
@@ -555,18 +549,15 @@ static void dpu_encoder_phys_vid_prepare_for_kickoff(
static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc)
{
- struct msm_drm_private *priv;
unsigned long lock_flags;
int ret;
- if (!phys_enc || !phys_enc->parent || !phys_enc->parent->dev ||
- !phys_enc->parent->dev->dev_private) {
+ if (!phys_enc->parent || !phys_enc->parent->dev) {
DPU_ERROR("invalid encoder/device\n");
return;
}
- priv = phys_enc->parent->dev->dev_private;
- if (!phys_enc->hw_intf || !phys_enc->hw_ctl) {
+ if (!phys_enc->hw_intf) {
DPU_ERROR("invalid hw_intf %d hw_ctl %d\n",
phys_enc->hw_intf != 0, phys_enc->hw_ctl != 0);
return;
@@ -595,7 +586,7 @@ static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc)
* scanout buffer) don't latch properly..
*/
if (dpu_encoder_phys_vid_is_master(phys_enc)) {
- ret = _dpu_encoder_phys_vid_wait_for_vblank(phys_enc, false);
+ ret = dpu_encoder_phys_vid_wait_for_vblank(phys_enc);
if (ret) {
atomic_set(&phys_enc->pending_kickoff_cnt, 0);
DRM_ERROR("wait disable failed: id:%u intf:%d ret:%d\n",
@@ -612,11 +603,6 @@ static void dpu_encoder_phys_vid_handle_post_kickoff(
{
unsigned long lock_flags;
- if (!phys_enc) {
- DPU_ERROR("invalid encoder\n");
- return;
- }
-
/*
* Video mode must flush CTL before enabling timing engine
* Video encoders need to turn on their interfaces now
@@ -636,9 +622,6 @@ static void dpu_encoder_phys_vid_irq_control(struct dpu_encoder_phys *phys_enc,
{
int ret;
- if (!phys_enc)
- return;
-
trace_dpu_enc_phys_vid_irq_ctrl(DRMID(phys_enc->parent),
phys_enc->hw_intf->idx - INTF_0,
enable,
@@ -659,9 +642,6 @@ static void dpu_encoder_phys_vid_irq_control(struct dpu_encoder_phys *phys_enc,
static int dpu_encoder_phys_vid_get_line_count(
struct dpu_encoder_phys *phys_enc)
{
- if (!phys_enc)
- return -EINVAL;
-
if (!dpu_encoder_phys_vid_is_master(phys_enc))
return -EINVAL;
@@ -681,7 +661,7 @@ static void dpu_encoder_phys_vid_init_ops(struct dpu_encoder_phys_ops *ops)
ops->destroy = dpu_encoder_phys_vid_destroy;
ops->get_hw_resources = dpu_encoder_phys_vid_get_hw_resources;
ops->control_vblank_irq = dpu_encoder_phys_vid_control_vblank_irq;
- ops->wait_for_commit_done = dpu_encoder_phys_vid_wait_for_vblank;
+ ops->wait_for_commit_done = dpu_encoder_phys_vid_wait_for_commit_done;
ops->wait_for_vblank = dpu_encoder_phys_vid_wait_for_vblank;
ops->wait_for_tx_complete = dpu_encoder_phys_vid_wait_for_vblank;
ops->irq_control = dpu_encoder_phys_vid_irq_control;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
index 24ab6249083a..528632690f1e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
@@ -489,12 +489,28 @@ static const struct dpu_format dpu_format_map_ubwc[] = {
true, 4, DPU_FORMAT_FLAG_COMPRESSED,
DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+ /* ARGB8888 and ABGR8888 purposely have the same color
+ * ordering. The hardware only supports ABGR8888 UBWC
+ * natively.
+ */
+ INTERLEAVED_RGB_FMT_TILED(ARGB8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, DPU_FORMAT_FLAG_COMPRESSED,
+ DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+
INTERLEAVED_RGB_FMT_TILED(XBGR8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
false, 4, DPU_FORMAT_FLAG_COMPRESSED,
DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+ INTERLEAVED_RGB_FMT_TILED(XRGB8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 4, DPU_FORMAT_FLAG_COMPRESSED,
+ DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+
INTERLEAVED_RGB_FMT_TILED(ABGR2101010,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
@@ -550,7 +566,9 @@ static int _dpu_format_get_media_color_ubwc(const struct dpu_format *fmt)
{
static const struct dpu_media_color_map dpu_media_ubwc_map[] = {
{DRM_FORMAT_ABGR8888, COLOR_FMT_RGBA8888_UBWC},
+ {DRM_FORMAT_ARGB8888, COLOR_FMT_RGBA8888_UBWC},
{DRM_FORMAT_XBGR8888, COLOR_FMT_RGBA8888_UBWC},
+ {DRM_FORMAT_XRGB8888, COLOR_FMT_RGBA8888_UBWC},
{DRM_FORMAT_ABGR2101010, COLOR_FMT_RGBA1010102_UBWC},
{DRM_FORMAT_XBGR2101010, COLOR_FMT_RGBA1010102_UBWC},
{DRM_FORMAT_BGR565, COLOR_FMT_RGB565_UBWC},
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index 04c8c44f5b9c..c567917541e8 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -11,11 +11,17 @@
#include "dpu_hw_catalog_format.h"
#include "dpu_kms.h"
-#define VIG_SDM845_MASK \
- (BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_SCALER_QSEED3) | BIT(DPU_SSPP_QOS) |\
+#define VIG_MASK \
+ (BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) |\
BIT(DPU_SSPP_CSC_10BIT) | BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_QOS_8LVL) |\
BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_EXCL_RECT))
+#define VIG_SDM845_MASK \
+ (VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3))
+
+#define VIG_SC7180_MASK \
+ (VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED4))
+
#define DMA_SDM845_MASK \
(BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) | BIT(DPU_SSPP_QOS_8LVL) |\
BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_TS_PREFILL_REC1) |\
@@ -27,6 +33,9 @@
#define MIXER_SDM845_MASK \
(BIT(DPU_MIXER_SOURCESPLIT) | BIT(DPU_DIM_LAYER))
+#define MIXER_SC7180_MASK \
+ (BIT(DPU_DIM_LAYER))
+
#define PINGPONG_SDM845_MASK BIT(DPU_PINGPONG_DITHER)
#define PINGPONG_SDM845_SPLIT_MASK \
@@ -58,9 +67,20 @@ static const struct dpu_caps sdm845_dpu_caps = {
.has_src_split = true,
.has_dim_layer = true,
.has_idle_pc = true,
+ .has_3d_merge = true,
+};
+
+static const struct dpu_caps sc7180_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .max_mixer_blendstages = 0x9,
+ .qseed_type = DPU_SSPP_SCALER_QSEED4,
+ .smart_dma_rev = DPU_SSPP_SMART_DMA_V2,
+ .ubwc_version = DPU_HW_UBWC_VER_20,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
};
-static struct dpu_mdp_cfg sdm845_mdp[] = {
+static const struct dpu_mdp_cfg sdm845_mdp[] = {
{
.name = "top_0", .id = MDP_TOP,
.base = 0x0, .len = 0x45C,
@@ -85,10 +105,27 @@ static struct dpu_mdp_cfg sdm845_mdp[] = {
},
};
+static const struct dpu_mdp_cfg sc7180_mdp[] = {
+ {
+ .name = "top_0", .id = MDP_TOP,
+ .base = 0x0, .len = 0x494,
+ .features = 0,
+ .highest_bank_bit = 0x3,
+ .clk_ctrls[DPU_CLK_CTRL_VIG0] = {
+ .reg_off = 0x2AC, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_DMA0] = {
+ .reg_off = 0x2AC, .bit_off = 8},
+ .clk_ctrls[DPU_CLK_CTRL_DMA1] = {
+ .reg_off = 0x2B4, .bit_off = 8},
+ .clk_ctrls[DPU_CLK_CTRL_CURSOR0] = {
+ .reg_off = 0x2BC, .bit_off = 8},
+ },
+};
+
/*************************************************************
* CTL sub blocks config
*************************************************************/
-static struct dpu_ctl_cfg sdm845_ctl[] = {
+static const struct dpu_ctl_cfg sdm845_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x1000, .len = 0xE4,
@@ -116,6 +153,24 @@ static struct dpu_ctl_cfg sdm845_ctl[] = {
},
};
+static const struct dpu_ctl_cfg sc7180_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x1000, .len = 0xE4,
+ .features = BIT(DPU_CTL_ACTIVE_CFG)
+ },
+ {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x1200, .len = 0xE4,
+ .features = BIT(DPU_CTL_ACTIVE_CFG)
+ },
+ {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x1400, .len = 0xE4,
+ .features = BIT(DPU_CTL_ACTIVE_CFG)
+ },
+};
+
/*************************************************************
* SSPP sub blocks config
*************************************************************/
@@ -128,7 +183,7 @@ static const struct dpu_sspp_blks_common sdm845_sspp_common = {
.maxvdeciexp = MAX_VERT_DECIMATION,
};
-#define _VIG_SBLK(num, sdma_pri) \
+#define _VIG_SBLK(num, sdma_pri, qseed_ver) \
{ \
.common = &sdm845_sspp_common, \
.maxdwnscale = MAX_DOWNSCALE_RATIO, \
@@ -137,7 +192,7 @@ static const struct dpu_sspp_blks_common sdm845_sspp_common = {
.src_blk = {.name = STRCAT("sspp_src_", num), \
.id = DPU_SSPP_SRC, .base = 0x00, .len = 0x150,}, \
.scaler_blk = {.name = STRCAT("sspp_scaler", num), \
- .id = DPU_SSPP_SCALER_QSEED3, \
+ .id = qseed_ver, \
.base = 0xa00, .len = 0xa0,}, \
.csc_blk = {.name = STRCAT("sspp_csc", num), \
.id = DPU_SSPP_CSC_10BIT, \
@@ -162,10 +217,14 @@ static const struct dpu_sspp_blks_common sdm845_sspp_common = {
.virt_num_formats = ARRAY_SIZE(plane_formats), \
}
-static const struct dpu_sspp_sub_blks sdm845_vig_sblk_0 = _VIG_SBLK("0", 5);
-static const struct dpu_sspp_sub_blks sdm845_vig_sblk_1 = _VIG_SBLK("1", 6);
-static const struct dpu_sspp_sub_blks sdm845_vig_sblk_2 = _VIG_SBLK("2", 7);
-static const struct dpu_sspp_sub_blks sdm845_vig_sblk_3 = _VIG_SBLK("3", 8);
+static const struct dpu_sspp_sub_blks sdm845_vig_sblk_0 =
+ _VIG_SBLK("0", 5, DPU_SSPP_SCALER_QSEED3);
+static const struct dpu_sspp_sub_blks sdm845_vig_sblk_1 =
+ _VIG_SBLK("1", 6, DPU_SSPP_SCALER_QSEED3);
+static const struct dpu_sspp_sub_blks sdm845_vig_sblk_2 =
+ _VIG_SBLK("2", 7, DPU_SSPP_SCALER_QSEED3);
+static const struct dpu_sspp_sub_blks sdm845_vig_sblk_3 =
+ _VIG_SBLK("3", 8, DPU_SSPP_SCALER_QSEED3);
static const struct dpu_sspp_sub_blks sdm845_dma_sblk_0 = _DMA_SBLK("8", 1);
static const struct dpu_sspp_sub_blks sdm845_dma_sblk_1 = _DMA_SBLK("9", 2);
@@ -184,7 +243,7 @@ static const struct dpu_sspp_sub_blks sdm845_dma_sblk_3 = _DMA_SBLK("11", 4);
.clk_ctrl = _clkctrl \
}
-static struct dpu_sspp_cfg sdm845_sspp[] = {
+static const struct dpu_sspp_cfg sdm845_sspp[] = {
SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SDM845_MASK,
sdm845_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
SSPP_BLK("sspp_1", SSPP_VIG1, 0x6000, VIG_SDM845_MASK,
@@ -203,9 +262,26 @@ static struct dpu_sspp_cfg sdm845_sspp[] = {
sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1),
};
+static const struct dpu_sspp_sub_blks sc7180_vig_sblk_0 =
+ _VIG_SBLK("0", 4, DPU_SSPP_SCALER_QSEED4);
+
+static const struct dpu_sspp_cfg sc7180_sspp[] = {
+ SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SC7180_MASK,
+ sc7180_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
+ SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, DMA_SDM845_MASK,
+ sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0),
+ SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, DMA_SDM845_MASK,
+ sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1),
+ SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000, DMA_CURSOR_SDM845_MASK,
+ sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR0),
+};
+
/*************************************************************
* MIXER sub blocks config
*************************************************************/
+
+/* SDM845 */
+
static const struct dpu_lm_sub_blks sdm845_lm_sblk = {
.maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.maxblendstages = 11, /* excluding base layer */
@@ -215,23 +291,46 @@ static const struct dpu_lm_sub_blks sdm845_lm_sblk = {
},
};
-#define LM_BLK(_name, _id, _base, _pp, _lmpair) \
+#define LM_BLK(_name, _id, _base, _fmask, _sblk, _pp, _lmpair) \
{ \
.name = _name, .id = _id, \
.base = _base, .len = 0x320, \
- .features = MIXER_SDM845_MASK, \
- .sblk = &sdm845_lm_sblk, \
+ .features = _fmask, \
+ .sblk = _sblk, \
.pingpong = _pp, \
.lm_pair_mask = (1 << _lmpair) \
}
-static struct dpu_lm_cfg sdm845_lm[] = {
- LM_BLK("lm_0", LM_0, 0x44000, PINGPONG_0, LM_1),
- LM_BLK("lm_1", LM_1, 0x45000, PINGPONG_1, LM_0),
- LM_BLK("lm_2", LM_2, 0x46000, PINGPONG_2, LM_5),
- LM_BLK("lm_3", LM_3, 0x0, PINGPONG_MAX, 0),
- LM_BLK("lm_4", LM_4, 0x0, PINGPONG_MAX, 0),
- LM_BLK("lm_5", LM_5, 0x49000, PINGPONG_3, LM_2),
+static const struct dpu_lm_cfg sdm845_lm[] = {
+ LM_BLK("lm_0", LM_0, 0x44000, MIXER_SDM845_MASK,
+ &sdm845_lm_sblk, PINGPONG_0, LM_1),
+ LM_BLK("lm_1", LM_1, 0x45000, MIXER_SDM845_MASK,
+ &sdm845_lm_sblk, PINGPONG_1, LM_0),
+ LM_BLK("lm_2", LM_2, 0x46000, MIXER_SDM845_MASK,
+ &sdm845_lm_sblk, PINGPONG_2, LM_5),
+ LM_BLK("lm_3", LM_3, 0x0, MIXER_SDM845_MASK,
+ &sdm845_lm_sblk, PINGPONG_MAX, 0),
+ LM_BLK("lm_4", LM_4, 0x0, MIXER_SDM845_MASK,
+ &sdm845_lm_sblk, PINGPONG_MAX, 0),
+ LM_BLK("lm_5", LM_5, 0x49000, MIXER_SDM845_MASK,
+ &sdm845_lm_sblk, PINGPONG_3, LM_2),
+};
+
+/* SC7180 */
+
+static const struct dpu_lm_sub_blks sc7180_lm_sblk = {
+ .maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .maxblendstages = 7, /* excluding base layer */
+ .blendstage_base = { /* offsets relative to mixer base */
+ 0x20, 0x38, 0x50, 0x68, 0x80, 0x98, 0xb0
+ },
+};
+
+static const struct dpu_lm_cfg sc7180_lm[] = {
+ LM_BLK("lm_0", LM_0, 0x44000, MIXER_SC7180_MASK,
+ &sc7180_lm_sblk, PINGPONG_0, LM_1),
+ LM_BLK("lm_1", LM_1, 0x45000, MIXER_SC7180_MASK,
+ &sc7180_lm_sblk, PINGPONG_1, LM_0),
};
/*************************************************************
@@ -264,13 +363,18 @@ static const struct dpu_pingpong_sub_blks sdm845_pp_sblk = {
.sblk = &sdm845_pp_sblk \
}
-static struct dpu_pingpong_cfg sdm845_pp[] = {
+static const struct dpu_pingpong_cfg sdm845_pp[] = {
PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000),
PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800),
PP_BLK("pingpong_2", PINGPONG_2, 0x71000),
PP_BLK("pingpong_3", PINGPONG_3, 0x71800),
};
+static struct dpu_pingpong_cfg sc7180_pp[] = {
+ PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000),
+ PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800),
+};
+
/*************************************************************
* INTF sub blocks config
*************************************************************/
@@ -278,26 +382,32 @@ static struct dpu_pingpong_cfg sdm845_pp[] = {
{\
.name = _name, .id = _id, \
.base = _base, .len = 0x280, \
+ .features = BIT(DPU_CTL_ACTIVE_CFG), \
.type = _type, \
.controller_id = _ctrl_id, \
.prog_fetch_lines_worst_case = 24 \
}
-static struct dpu_intf_cfg sdm845_intf[] = {
+static const struct dpu_intf_cfg sdm845_intf[] = {
INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0),
INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0),
INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1),
INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1),
};
+static const struct dpu_intf_cfg sc7180_intf[] = {
+ INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0),
+ INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0),
+};
+
/*************************************************************
* VBIF sub blocks config
*************************************************************/
/* VBIF QOS remap */
-static u32 sdm845_rt_pri_lvl[] = {3, 3, 4, 4, 5, 5, 6, 6};
-static u32 sdm845_nrt_pri_lvl[] = {3, 3, 3, 3, 3, 3, 3, 3};
+static const u32 sdm845_rt_pri_lvl[] = {3, 3, 4, 4, 5, 5, 6, 6};
+static const u32 sdm845_nrt_pri_lvl[] = {3, 3, 3, 3, 3, 3, 3, 3};
-static struct dpu_vbif_cfg sdm845_vbif[] = {
+static const struct dpu_vbif_cfg sdm845_vbif[] = {
{
.name = "vbif_0", .id = VBIF_0,
.base = 0, .len = 0x1040,
@@ -316,7 +426,7 @@ static struct dpu_vbif_cfg sdm845_vbif[] = {
},
};
-static struct dpu_reg_dma_cfg sdm845_regdma = {
+static const struct dpu_reg_dma_cfg sdm845_regdma = {
.base = 0x0, .version = 0x1, .trigger_sel_off = 0x119c
};
@@ -325,7 +435,7 @@ static struct dpu_reg_dma_cfg sdm845_regdma = {
*************************************************************/
/* SSPP QOS LUTs */
-static struct dpu_qos_lut_entry sdm845_qos_linear[] = {
+static const struct dpu_qos_lut_entry sdm845_qos_linear[] = {
{.fl = 4, .lut = 0x357},
{.fl = 5, .lut = 0x3357},
{.fl = 6, .lut = 0x23357},
@@ -340,7 +450,11 @@ static struct dpu_qos_lut_entry sdm845_qos_linear[] = {
{.fl = 0, .lut = 0x11222222223357}
};
-static struct dpu_qos_lut_entry sdm845_qos_macrotile[] = {
+static const struct dpu_qos_lut_entry sc7180_qos_linear[] = {
+ {.fl = 0, .lut = 0x0011222222335777},
+};
+
+static const struct dpu_qos_lut_entry sdm845_qos_macrotile[] = {
{.fl = 10, .lut = 0x344556677},
{.fl = 11, .lut = 0x3344556677},
{.fl = 12, .lut = 0x23344556677},
@@ -349,11 +463,19 @@ static struct dpu_qos_lut_entry sdm845_qos_macrotile[] = {
{.fl = 0, .lut = 0x112233344556677},
};
-static struct dpu_qos_lut_entry sdm845_qos_nrt[] = {
+static const struct dpu_qos_lut_entry sc7180_qos_macrotile[] = {
+ {.fl = 0, .lut = 0x0011223344556677},
+};
+
+static const struct dpu_qos_lut_entry sdm845_qos_nrt[] = {
+ {.fl = 0, .lut = 0x0},
+};
+
+static const struct dpu_qos_lut_entry sc7180_qos_nrt[] = {
{.fl = 0, .lut = 0x0},
};
-static struct dpu_perf_cfg sdm845_perf_data = {
+static const struct dpu_perf_cfg sdm845_perf_data = {
.max_bw_low = 6800000,
.max_bw_high = 6800000,
.min_core_ib = 2400000,
@@ -392,6 +514,30 @@ static struct dpu_perf_cfg sdm845_perf_data = {
},
};
+static const struct dpu_perf_cfg sc7180_perf_data = {
+ .max_bw_low = 3900000,
+ .max_bw_high = 5500000,
+ .min_core_ib = 2400000,
+ .min_llcc_ib = 800000,
+ .min_dram_ib = 800000,
+ .danger_lut_tbl = {0xff, 0xffff, 0x0},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(sc7180_qos_linear),
+ .entries = sc7180_qos_linear
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_macrotile),
+ .entries = sc7180_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_nrt),
+ .entries = sc7180_qos_nrt
+ },
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+};
+
/*************************************************************
* Hardware catalog init
*************************************************************/
@@ -421,12 +567,43 @@ static void sdm845_cfg_init(struct dpu_mdss_cfg *dpu_cfg)
.reg_dma_count = 1,
.dma_cfg = sdm845_regdma,
.perf = sdm845_perf_data,
+ .mdss_irqs = 0x3ff,
+ };
+}
+
+/*
+ * sc7180_cfg_init(): populate sc7180 dpu sub-blocks reg offsets
+ * and instance counts.
+ */
+static void sc7180_cfg_init(struct dpu_mdss_cfg *dpu_cfg)
+{
+ *dpu_cfg = (struct dpu_mdss_cfg){
+ .caps = &sc7180_dpu_caps,
+ .mdp_count = ARRAY_SIZE(sc7180_mdp),
+ .mdp = sc7180_mdp,
+ .ctl_count = ARRAY_SIZE(sc7180_ctl),
+ .ctl = sc7180_ctl,
+ .sspp_count = ARRAY_SIZE(sc7180_sspp),
+ .sspp = sc7180_sspp,
+ .mixer_count = ARRAY_SIZE(sc7180_lm),
+ .mixer = sc7180_lm,
+ .pingpong_count = ARRAY_SIZE(sc7180_pp),
+ .pingpong = sc7180_pp,
+ .intf_count = ARRAY_SIZE(sc7180_intf),
+ .intf = sc7180_intf,
+ .vbif_count = ARRAY_SIZE(sdm845_vbif),
+ .vbif = sdm845_vbif,
+ .reg_dma_count = 1,
+ .dma_cfg = sdm845_regdma,
+ .perf = sc7180_perf_data,
+ .mdss_irqs = 0x3f,
};
}
-static struct dpu_mdss_hw_cfg_handler cfg_handler[] = {
+static const struct dpu_mdss_hw_cfg_handler cfg_handler[] = {
{ .hw_rev = DPU_HW_VER_400, .cfg_init = sdm845_cfg_init},
{ .hw_rev = DPU_HW_VER_401, .cfg_init = sdm845_cfg_init},
+ { .hw_rev = DPU_HW_VER_620, .cfg_init = sc7180_cfg_init},
};
void dpu_hw_catalog_deinit(struct dpu_mdss_cfg *dpu_cfg)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
index 90f439812088..09df7d87dd43 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -9,7 +9,6 @@
#include <linux/bug.h>
#include <linux/bitmap.h>
#include <linux/err.h>
-#include <drm/drmP.h>
/**
* Max hardware block count: For ex: max 12 SSPP pipes or
@@ -39,6 +38,7 @@
#define DPU_HW_VER_401 DPU_HW_VER(4, 0, 1) /* sdm845 v2.0 */
#define DPU_HW_VER_410 DPU_HW_VER(4, 1, 0) /* sdm670 v1.0 */
#define DPU_HW_VER_500 DPU_HW_VER(5, 0, 0) /* sdm855 v1.0 */
+#define DPU_HW_VER_620 DPU_HW_VER(6, 2, 0) /* sc7180 v1.0 */
#define IS_MSM8996_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_170)
@@ -46,6 +46,7 @@
#define IS_SDM845_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_400)
#define IS_SDM670_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_410)
#define IS_SDM855_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_500)
+#define IS_SC7180_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_620)
#define DPU_HW_BLK_NAME_LEN 16
@@ -93,6 +94,7 @@ enum {
* @DPU_SSPP_SRC Src and fetch part of the pipes,
* @DPU_SSPP_SCALER_QSEED2, QSEED2 algorithm support
* @DPU_SSPP_SCALER_QSEED3, QSEED3 alogorithm support
+ * @DPU_SSPP_SCALER_QSEED4, QSEED4 algorithm support
* @DPU_SSPP_SCALER_RGB, RGB Scaler, supported by RGB pipes
* @DPU_SSPP_CSC, Support of Color space converion
* @DPU_SSPP_CSC_10BIT, Support of 10-bit Color space conversion
@@ -111,6 +113,7 @@ enum {
DPU_SSPP_SRC = 0x1,
DPU_SSPP_SCALER_QSEED2,
DPU_SSPP_SCALER_QSEED3,
+ DPU_SSPP_SCALER_QSEED4,
DPU_SSPP_SCALER_RGB,
DPU_SSPP_CSC,
DPU_SSPP_CSC_10BIT,
@@ -167,6 +170,7 @@ enum {
*/
enum {
DPU_CTL_SPLIT_DISPLAY = 0x1,
+ DPU_CTL_ACTIVE_CFG,
DPU_CTL_MAX
};
@@ -270,7 +274,7 @@ struct dpu_qos_lut_entry {
*/
struct dpu_qos_lut_tbl {
u32 nentry;
- struct dpu_qos_lut_entry *entries;
+ const struct dpu_qos_lut_entry *entries;
};
/**
@@ -284,6 +288,7 @@ struct dpu_qos_lut_tbl {
* @has_src_split source split feature status
* @has_dim_layer dim layer feature status
* @has_idle_pc indicate if idle power collapse feature is supported
+ * @has_3d_merge indicate if 3D merge is supported
*/
struct dpu_caps {
u32 max_mixer_width;
@@ -294,6 +299,7 @@ struct dpu_caps {
bool has_src_split;
bool has_dim_layer;
bool has_idle_pc;
+ bool has_3d_merge;
};
/**
@@ -321,6 +327,7 @@ struct dpu_sspp_blks_common {
* @maxupscale: maxupscale ratio supported
* @smart_dma_priority: hw priority of rect1 of multirect pipe
* @max_per_pipe_bw: maximum allowable bandwidth of this pipe in kBps
+ * @qseed_ver: qseed version
* @src_blk:
* @scaler_blk:
* @csc_blk:
@@ -341,6 +348,7 @@ struct dpu_sspp_sub_blks {
u32 maxupscale;
u32 smart_dma_priority;
u32 max_per_pipe_bw;
+ u32 qseed_ver;
struct dpu_src_blk src_blk;
struct dpu_scaler_blk scaler_blk;
struct dpu_pp_blk csc_blk;
@@ -512,7 +520,7 @@ struct dpu_vbif_dynamic_ot_cfg {
*/
struct dpu_vbif_dynamic_ot_tbl {
u32 count;
- struct dpu_vbif_dynamic_ot_cfg *cfg;
+ const struct dpu_vbif_dynamic_ot_cfg *cfg;
};
/**
@@ -522,7 +530,7 @@ struct dpu_vbif_dynamic_ot_tbl {
*/
struct dpu_vbif_qos_tbl {
u32 npriority_lvl;
- u32 *priority_lvl;
+ const u32 *priority_lvl;
};
/**
@@ -647,6 +655,7 @@ struct dpu_perf_cfg {
* @dma_formats Supported formats for dma pipe
* @cursor_formats Supported formats for cursor pipe
* @vig_formats Supported formats for vig pipe
+ * @mdss_irqs: Bitmap with the irqs supported by the target
*/
struct dpu_mdss_cfg {
u32 hwversion;
@@ -654,25 +663,25 @@ struct dpu_mdss_cfg {
const struct dpu_caps *caps;
u32 mdp_count;
- struct dpu_mdp_cfg *mdp;
+ const struct dpu_mdp_cfg *mdp;
u32 ctl_count;
- struct dpu_ctl_cfg *ctl;
+ const struct dpu_ctl_cfg *ctl;
u32 sspp_count;
- struct dpu_sspp_cfg *sspp;
+ const struct dpu_sspp_cfg *sspp;
u32 mixer_count;
- struct dpu_lm_cfg *mixer;
+ const struct dpu_lm_cfg *mixer;
u32 pingpong_count;
- struct dpu_pingpong_cfg *pingpong;
+ const struct dpu_pingpong_cfg *pingpong;
u32 intf_count;
- struct dpu_intf_cfg *intf;
+ const struct dpu_intf_cfg *intf;
u32 vbif_count;
- struct dpu_vbif_cfg *vbif;
+ const struct dpu_vbif_cfg *vbif;
u32 reg_dma_count;
struct dpu_reg_dma_cfg dma_cfg;
@@ -682,9 +691,11 @@ struct dpu_mdss_cfg {
/* Add additional block data structures here */
struct dpu_perf_cfg perf;
- struct dpu_format_extended *dma_formats;
- struct dpu_format_extended *cursor_formats;
- struct dpu_format_extended *vig_formats;
+ const struct dpu_format_extended *dma_formats;
+ const struct dpu_format_extended *cursor_formats;
+ const struct dpu_format_extended *vig_formats;
+
+ unsigned long mdss_irqs;
};
struct dpu_mdss_hw_cfg_handler {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h
index bb6112c949ae..3766f0fd0bf0 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h
@@ -6,8 +6,12 @@
static const uint32_t qcom_compressed_supported_formats[] = {
DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_ARGB8888,
DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_XRGB8888,
DRM_FORMAT_BGR565,
+
+ DRM_FORMAT_NV12,
};
static const uint32_t plane_formats[] = {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
index b2f7b0e886b5..831e5f7a9b7f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
@@ -22,14 +22,18 @@
#define CTL_PREPARE 0x0d0
#define CTL_SW_RESET 0x030
#define CTL_LAYER_EXTN_OFFSET 0x40
+#define CTL_INTF_ACTIVE 0x0F4
+#define CTL_INTF_FLUSH 0x110
+#define CTL_INTF_MASTER 0x134
#define CTL_MIXER_BORDER_OUT BIT(24)
#define CTL_FLUSH_MASK_CTL BIT(17)
#define DPU_REG_RESET_TIMEOUT_US 2000
+#define INTF_IDX 31
-static struct dpu_ctl_cfg *_ctl_offset(enum dpu_ctl ctl,
- struct dpu_mdss_cfg *m,
+static const struct dpu_ctl_cfg *_ctl_offset(enum dpu_ctl ctl,
+ const struct dpu_mdss_cfg *m,
void __iomem *addr,
struct dpu_hw_blk_reg_map *b)
{
@@ -100,14 +104,27 @@ static inline void dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl *ctx,
ctx->pending_flush_mask |= flushbits;
}
-static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx)
+static inline void dpu_hw_ctl_update_pending_intf_flush(struct dpu_hw_ctl *ctx,
+ u32 flushbits)
{
- if (!ctx)
- return 0x0;
+ ctx->pending_intf_flush_mask |= flushbits;
+}
+static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx)
+{
return ctx->pending_flush_mask;
}
+static inline void dpu_hw_ctl_trigger_flush_v1(struct dpu_hw_ctl *ctx)
+{
+
+ if (ctx->pending_flush_mask & BIT(INTF_IDX))
+ DPU_REG_WRITE(&ctx->hw, CTL_INTF_FLUSH,
+ ctx->pending_intf_flush_mask);
+
+ DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
+}
+
static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx)
{
trace_dpu_hw_ctl_trigger_pending_flush(ctx->pending_flush_mask,
@@ -225,6 +242,36 @@ static int dpu_hw_ctl_get_bitmask_intf(struct dpu_hw_ctl *ctx,
return 0;
}
+static int dpu_hw_ctl_get_bitmask_intf_v1(struct dpu_hw_ctl *ctx,
+ u32 *flushbits, enum dpu_intf intf)
+{
+ switch (intf) {
+ case INTF_0:
+ case INTF_1:
+ *flushbits |= BIT(31);
+ break;
+ default:
+ return 0;
+ }
+ return 0;
+}
+
+static int dpu_hw_ctl_active_get_bitmask_intf(struct dpu_hw_ctl *ctx,
+ u32 *flushbits, enum dpu_intf intf)
+{
+ switch (intf) {
+ case INTF_0:
+ *flushbits |= BIT(0);
+ break;
+ case INTF_1:
+ *flushbits |= BIT(1);
+ break;
+ default:
+ return 0;
+ }
+ return 0;
+}
+
static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
@@ -425,6 +472,24 @@ exit:
DPU_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg_ext3);
}
+
+static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
+ struct dpu_hw_intf_cfg *cfg)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 intf_active = 0;
+ u32 mode_sel = 0;
+
+ if (cfg->intf_mode_sel == DPU_CTL_MODE_SEL_CMD)
+ mode_sel |= BIT(17);
+
+ intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
+ intf_active |= BIT(cfg->intf - INTF_0);
+
+ DPU_REG_WRITE(c, CTL_TOP, mode_sel);
+ DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
+}
+
static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
struct dpu_hw_intf_cfg *cfg)
{
@@ -458,31 +523,41 @@ static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
unsigned long cap)
{
+ if (cap & BIT(DPU_CTL_ACTIVE_CFG)) {
+ ops->trigger_flush = dpu_hw_ctl_trigger_flush_v1;
+ ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg_v1;
+ ops->get_bitmask_intf = dpu_hw_ctl_get_bitmask_intf_v1;
+ ops->get_bitmask_active_intf =
+ dpu_hw_ctl_active_get_bitmask_intf;
+ ops->update_pending_intf_flush =
+ dpu_hw_ctl_update_pending_intf_flush;
+ } else {
+ ops->trigger_flush = dpu_hw_ctl_trigger_flush;
+ ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg;
+ ops->get_bitmask_intf = dpu_hw_ctl_get_bitmask_intf;
+ }
ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush;
ops->update_pending_flush = dpu_hw_ctl_update_pending_flush;
ops->get_pending_flush = dpu_hw_ctl_get_pending_flush;
- ops->trigger_flush = dpu_hw_ctl_trigger_flush;
ops->get_flush_register = dpu_hw_ctl_get_flush_register;
ops->trigger_start = dpu_hw_ctl_trigger_start;
ops->trigger_pending = dpu_hw_ctl_trigger_pending;
- ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg;
ops->reset = dpu_hw_ctl_reset_control;
ops->wait_reset_status = dpu_hw_ctl_wait_reset_status;
ops->clear_all_blendstages = dpu_hw_ctl_clear_all_blendstages;
ops->setup_blendstage = dpu_hw_ctl_setup_blendstage;
ops->get_bitmask_sspp = dpu_hw_ctl_get_bitmask_sspp;
ops->get_bitmask_mixer = dpu_hw_ctl_get_bitmask_mixer;
- ops->get_bitmask_intf = dpu_hw_ctl_get_bitmask_intf;
};
static struct dpu_hw_blk_ops dpu_hw_ops;
struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
void __iomem *addr,
- struct dpu_mdss_cfg *m)
+ const struct dpu_mdss_cfg *m)
{
struct dpu_hw_ctl *c;
- struct dpu_ctl_cfg *cfg;
+ const struct dpu_ctl_cfg *cfg;
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
index d3ae939ef9f8..09e1263c72e2 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
@@ -91,6 +91,15 @@ struct dpu_hw_ctl_ops {
u32 flushbits);
/**
+ * OR in the given flushbits to the cached pending_intf_flush_mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ * @flushbits : module flushmask
+ */
+ void (*update_pending_intf_flush)(struct dpu_hw_ctl *ctx,
+ u32 flushbits);
+
+ /**
* Write the value of the pending_flush_mask to hardware
* @ctx : ctl path ctx pointer
*/
@@ -130,11 +139,24 @@ struct dpu_hw_ctl_ops {
uint32_t (*get_bitmask_mixer)(struct dpu_hw_ctl *ctx,
enum dpu_lm blk);
+ /**
+ * Query the value of the intf flush mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ */
int (*get_bitmask_intf)(struct dpu_hw_ctl *ctx,
u32 *flushbits,
enum dpu_intf blk);
/**
+ * Query the value of the intf active flush mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ */
+ int (*get_bitmask_active_intf)(struct dpu_hw_ctl *ctx,
+ u32 *flushbits, enum dpu_intf blk);
+
+ /**
* Set all blend stages to disabled
* @ctx : ctl path ctx pointer
*/
@@ -159,6 +181,7 @@ struct dpu_hw_ctl_ops {
* @mixer_count: number of mixers
* @mixer_hw_caps: mixer hardware capabilities
* @pending_flush_mask: storage for pending ctl_flush managed via ops
+ * @pending_intf_flush_mask: pending INTF flush
* @ops: operation list
*/
struct dpu_hw_ctl {
@@ -171,6 +194,7 @@ struct dpu_hw_ctl {
int mixer_count;
const struct dpu_lm_cfg *mixer_hw_caps;
u32 pending_flush_mask;
+ u32 pending_intf_flush_mask;
/* ops */
struct dpu_hw_ctl_ops ops;
@@ -195,7 +219,7 @@ static inline struct dpu_hw_ctl *to_dpu_hw_ctl(struct dpu_hw_blk *hw)
*/
struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
void __iomem *addr,
- struct dpu_mdss_cfg *m);
+ const struct dpu_mdss_cfg *m);
/**
* dpu_hw_ctl_destroy(): Destroys ctl driver context
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
index 8bfa7d0eede6..d84a84f7fe1a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
@@ -800,8 +800,8 @@ static void dpu_hw_intr_dispatch_irq(struct dpu_hw_intr *intr,
start_idx = reg_idx * 32;
end_idx = start_idx + 32;
- if (start_idx >= ARRAY_SIZE(dpu_irq_map) ||
- end_idx > ARRAY_SIZE(dpu_irq_map))
+ if (!test_bit(reg_idx, &intr->irq_mask) ||
+ start_idx >= ARRAY_SIZE(dpu_irq_map))
continue;
/*
@@ -955,8 +955,11 @@ static int dpu_hw_intr_clear_irqs(struct dpu_hw_intr *intr)
if (!intr)
return -EINVAL;
- for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++)
- DPU_REG_WRITE(&intr->hw, dpu_intr_set[i].clr_off, 0xffffffff);
+ for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) {
+ if (test_bit(i, &intr->irq_mask))
+ DPU_REG_WRITE(&intr->hw,
+ dpu_intr_set[i].clr_off, 0xffffffff);
+ }
/* ensure register writes go through */
wmb();
@@ -971,8 +974,11 @@ static int dpu_hw_intr_disable_irqs(struct dpu_hw_intr *intr)
if (!intr)
return -EINVAL;
- for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++)
- DPU_REG_WRITE(&intr->hw, dpu_intr_set[i].en_off, 0x00000000);
+ for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) {
+ if (test_bit(i, &intr->irq_mask))
+ DPU_REG_WRITE(&intr->hw,
+ dpu_intr_set[i].en_off, 0x00000000);
+ }
/* ensure register writes go through */
wmb();
@@ -991,6 +997,9 @@ static void dpu_hw_intr_get_interrupt_statuses(struct dpu_hw_intr *intr)
spin_lock_irqsave(&intr->irq_lock, irq_flags);
for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) {
+ if (!test_bit(i, &intr->irq_mask))
+ continue;
+
/* Read interrupt status */
intr->save_irq_status[i] = DPU_REG_READ(&intr->hw,
dpu_intr_set[i].status_off);
@@ -1115,6 +1124,7 @@ struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
return ERR_PTR(-ENOMEM);
}
+ intr->irq_mask = m->mdss_irqs;
spin_lock_init(&intr->irq_lock);
return intr;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
index 4edcf402dc46..fc9c98617281 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
@@ -187,6 +187,7 @@ struct dpu_hw_intr {
u32 *save_irq_status;
u32 irq_idx_tbl_size;
spinlock_t irq_lock;
+ unsigned long irq_mask;
};
/**
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
index dcd87cda13fe..efe9a5719c6b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
@@ -56,8 +56,10 @@
#define INTF_FRAME_COUNT 0x0AC
#define INTF_LINE_COUNT 0x0B0
-static struct dpu_intf_cfg *_intf_offset(enum dpu_intf intf,
- struct dpu_mdss_cfg *m,
+#define INTF_MUX 0x25C
+
+static const struct dpu_intf_cfg *_intf_offset(enum dpu_intf intf,
+ const struct dpu_mdss_cfg *m,
void __iomem *addr,
struct dpu_hw_blk_reg_map *b)
{
@@ -218,6 +220,30 @@ static void dpu_hw_intf_setup_prg_fetch(
DPU_REG_WRITE(c, INTF_CONFIG, fetch_enable);
}
+static void dpu_hw_intf_bind_pingpong_blk(
+ struct dpu_hw_intf *intf,
+ bool enable,
+ const enum dpu_pingpong pp)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 mux_cfg;
+
+ if (!intf)
+ return;
+
+ c = &intf->hw;
+
+ mux_cfg = DPU_REG_READ(c, INTF_MUX);
+ mux_cfg &= ~0xf;
+
+ if (enable)
+ mux_cfg |= (pp - PINGPONG_0) & 0x7;
+ else
+ mux_cfg |= 0xf;
+
+ DPU_REG_WRITE(c, INTF_MUX, mux_cfg);
+}
+
static void dpu_hw_intf_get_status(
struct dpu_hw_intf *intf,
struct intf_status *s)
@@ -254,16 +280,18 @@ static void _setup_intf_ops(struct dpu_hw_intf_ops *ops,
ops->get_status = dpu_hw_intf_get_status;
ops->enable_timing = dpu_hw_intf_enable_timing_engine;
ops->get_line_count = dpu_hw_intf_get_line_count;
+ if (cap & BIT(DPU_CTL_ACTIVE_CFG))
+ ops->bind_pingpong_blk = dpu_hw_intf_bind_pingpong_blk;
}
static struct dpu_hw_blk_ops dpu_hw_ops;
struct dpu_hw_intf *dpu_hw_intf_init(enum dpu_intf idx,
void __iomem *addr,
- struct dpu_mdss_cfg *m)
+ const struct dpu_mdss_cfg *m)
{
struct dpu_hw_intf *c;
- struct dpu_intf_cfg *cfg;
+ const struct dpu_intf_cfg *cfg;
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
index b03acc225c9b..85468981632d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
@@ -52,6 +52,8 @@ struct intf_status {
* @ enable_timing: enable/disable timing engine
* @ get_status: returns if timing engine is enabled or not
* @ get_line_count: reads current vertical line counter
+ * @bind_pingpong_blk: enable/disable the connection with pingpong which will
+ * feed pixels to this interface
*/
struct dpu_hw_intf_ops {
void (*setup_timing_gen)(struct dpu_hw_intf *intf,
@@ -68,6 +70,10 @@ struct dpu_hw_intf_ops {
struct intf_status *status);
u32 (*get_line_count)(struct dpu_hw_intf *intf);
+
+ void (*bind_pingpong_blk)(struct dpu_hw_intf *intf,
+ bool enable,
+ const enum dpu_pingpong pp);
};
struct dpu_hw_intf {
@@ -92,7 +98,7 @@ struct dpu_hw_intf {
*/
struct dpu_hw_intf *dpu_hw_intf_init(enum dpu_intf idx,
void __iomem *addr,
- struct dpu_mdss_cfg *m);
+ const struct dpu_mdss_cfg *m);
/**
* dpu_hw_intf_destroy(): Destroys INTF driver context
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
index 5bc39baa746a..37becd43bd54 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
@@ -24,8 +24,8 @@
#define LM_BLEND0_FG_ALPHA 0x04
#define LM_BLEND0_BG_ALPHA 0x08
-static struct dpu_lm_cfg *_lm_offset(enum dpu_lm mixer,
- struct dpu_mdss_cfg *m,
+static const struct dpu_lm_cfg *_lm_offset(enum dpu_lm mixer,
+ const struct dpu_mdss_cfg *m,
void __iomem *addr,
struct dpu_hw_blk_reg_map *b)
{
@@ -147,12 +147,13 @@ static void dpu_hw_lm_setup_color3(struct dpu_hw_mixer *ctx,
DPU_REG_WRITE(c, LM_OP_MODE, op_mode);
}
-static void _setup_mixer_ops(struct dpu_mdss_cfg *m,
+static void _setup_mixer_ops(const struct dpu_mdss_cfg *m,
struct dpu_hw_lm_ops *ops,
unsigned long features)
{
ops->setup_mixer_out = dpu_hw_lm_setup_out;
- if (IS_SDM845_TARGET(m->hwversion) || IS_SDM670_TARGET(m->hwversion))
+ if (IS_SDM845_TARGET(m->hwversion) || IS_SDM670_TARGET(m->hwversion)
+ || IS_SC7180_TARGET(m->hwversion))
ops->setup_blend_config = dpu_hw_lm_setup_blend_config_sdm845;
else
ops->setup_blend_config = dpu_hw_lm_setup_blend_config;
@@ -164,10 +165,10 @@ static struct dpu_hw_blk_ops dpu_hw_ops;
struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx,
void __iomem *addr,
- struct dpu_mdss_cfg *m)
+ const struct dpu_mdss_cfg *m)
{
struct dpu_hw_mixer *c;
- struct dpu_lm_cfg *cfg;
+ const struct dpu_lm_cfg *cfg;
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
index 147ace31cfc2..4a6b2de19ef6 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
@@ -91,7 +91,7 @@ static inline struct dpu_hw_mixer *to_dpu_hw_mixer(struct dpu_hw_blk *hw)
*/
struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx,
void __iomem *addr,
- struct dpu_mdss_cfg *m);
+ const struct dpu_mdss_cfg *m);
/**
* dpu_hw_lm_destroy(): Destroys layer mixer driver context
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
index 5dbaba9fd180..d110a40f0e73 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
@@ -28,8 +28,8 @@
#define PP_FBC_BUDGET_CTL 0x038
#define PP_FBC_LOSSY_MODE 0x03C
-static struct dpu_pingpong_cfg *_pingpong_offset(enum dpu_pingpong pp,
- struct dpu_mdss_cfg *m,
+static const struct dpu_pingpong_cfg *_pingpong_offset(enum dpu_pingpong pp,
+ const struct dpu_mdss_cfg *m,
void __iomem *addr,
struct dpu_hw_blk_reg_map *b)
{
@@ -195,10 +195,10 @@ static struct dpu_hw_blk_ops dpu_hw_ops;
struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx,
void __iomem *addr,
- struct dpu_mdss_cfg *m)
+ const struct dpu_mdss_cfg *m)
{
struct dpu_hw_pingpong *c;
- struct dpu_pingpong_cfg *cfg;
+ const struct dpu_pingpong_cfg *cfg;
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
index 58bdb9279aa8..3d6f46b1db30 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
@@ -106,7 +106,7 @@ struct dpu_hw_pingpong {
*/
struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx,
void __iomem *addr,
- struct dpu_mdss_cfg *m);
+ const struct dpu_mdss_cfg *m);
/**
* dpu_hw_pingpong_destroy - destroys pingpong driver context
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
index 4f8b813aab81..82c5dbfdabc7 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
@@ -132,6 +132,7 @@
/* traffic shaper clock in Hz */
#define TS_CLK 19200000
+
static int _sspp_subblk_offset(struct dpu_hw_pipe *ctx,
int s_id,
u32 *idx)
@@ -657,7 +658,8 @@ static void _setup_layer_ops(struct dpu_hw_pipe *c,
test_bit(DPU_SSPP_SMART_DMA_V2, &c->cap->features))
c->ops.setup_multirect = dpu_hw_sspp_setup_multirect;
- if (test_bit(DPU_SSPP_SCALER_QSEED3, &features)) {
+ if (test_bit(DPU_SSPP_SCALER_QSEED3, &features) ||
+ test_bit(DPU_SSPP_SCALER_QSEED4, &features)) {
c->ops.setup_scaler = _dpu_hw_sspp_setup_scaler3;
c->ops.get_scaler_ver = _dpu_hw_sspp_get_scaler3_ver;
}
@@ -666,7 +668,7 @@ static void _setup_layer_ops(struct dpu_hw_pipe *c,
c->ops.setup_cdp = dpu_hw_sspp_setup_cdp;
}
-static struct dpu_sspp_cfg *_sspp_offset(enum dpu_sspp sspp,
+static const struct dpu_sspp_cfg *_sspp_offset(enum dpu_sspp sspp,
void __iomem *addr,
struct dpu_mdss_cfg *catalog,
struct dpu_hw_blk_reg_map *b)
@@ -696,7 +698,7 @@ struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx,
bool is_virtual_pipe)
{
struct dpu_hw_pipe *hw_pipe;
- struct dpu_sspp_cfg *cfg;
+ const struct dpu_sspp_cfg *cfg;
if (!addr || !catalog)
return ERR_PTR(-EINVAL);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
index a3680b482b41..85b018a9b03c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
@@ -27,7 +27,8 @@ struct dpu_hw_pipe;
*/
#define DPU_SSPP_SCALER ((1UL << DPU_SSPP_SCALER_RGB) | \
(1UL << DPU_SSPP_SCALER_QSEED2) | \
- (1UL << DPU_SSPP_SCALER_QSEED3))
+ (1UL << DPU_SSPP_SCALER_QSEED3) | \
+ (1UL << DPU_SSPP_SCALER_QSEED4))
/**
* Component indices
@@ -373,7 +374,7 @@ struct dpu_hw_pipe {
struct dpu_hw_blk base;
struct dpu_hw_blk_reg_map hw;
struct dpu_mdss_cfg *catalog;
- struct dpu_mdp_cfg *mdp;
+ const struct dpu_mdp_cfg *mdp;
/* Pipe */
enum dpu_sspp idx;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c
index 71b6987bff1e..078afc5f5882 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c
@@ -7,6 +7,7 @@
#include <linux/clk/clk-conf.h>
#include <linux/err.h>
#include <linux/delay.h>
+#include <linux/of.h>
#include <drm/drm_print.h>
@@ -92,19 +93,12 @@ int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable)
DEV_DBG("%pS->%s: enable '%s'\n",
__builtin_return_address(0), __func__,
clk_arry[i].clk_name);
- if (clk_arry[i].clk) {
- rc = clk_prepare_enable(clk_arry[i].clk);
- if (rc)
- DEV_ERR("%pS->%s: %s en fail. rc=%d\n",
- __builtin_return_address(0),
- __func__,
- clk_arry[i].clk_name, rc);
- } else {
- DEV_ERR("%pS->%s: '%s' is not available\n",
- __builtin_return_address(0), __func__,
- clk_arry[i].clk_name);
- rc = -EPERM;
- }
+ rc = clk_prepare_enable(clk_arry[i].clk);
+ if (rc)
+ DEV_ERR("%pS->%s: %s en fail. rc=%d\n",
+ __builtin_return_address(0),
+ __func__,
+ clk_arry[i].clk_name, rc);
if (rc && i) {
msm_dss_enable_clk(&clk_arry[i - 1],
@@ -118,12 +112,7 @@ int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable)
__builtin_return_address(0), __func__,
clk_arry[i].clk_name);
- if (clk_arry[i].clk)
- clk_disable_unprepare(clk_arry[i].clk);
- else
- DEV_ERR("%pS->%s: '%s' is not available\n",
- __builtin_return_address(0), __func__,
- clk_arry[i].clk_name);
+ clk_disable_unprepare(clk_arry[i].clk);
}
}
@@ -186,6 +175,7 @@ int msm_dss_parse_clock(struct platform_device *pdev,
continue;
mp->clk_config[i].rate = rate;
mp->clk_config[i].type = DSS_CLK_PCLK;
+ mp->clk_config[i].max_rate = rate;
}
mp->num_clk = num_clk;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.h
index 09083e9f06bb..e6b5c772fa3b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.h
@@ -5,7 +5,6 @@
#ifndef __DPU_IO_UTIL_H__
#define __DPU_IO_UTIL_H__
-#include <linux/gpio.h>
#include <linux/platform_device.h>
#include <linux/types.h>
@@ -14,12 +13,6 @@
#define DEV_WARN(fmt, args...) pr_warn(fmt, ##args)
#define DEV_ERR(fmt, args...) pr_err(fmt, ##args)
-struct dss_gpio {
- unsigned int gpio;
- unsigned int value;
- char gpio_name[32];
-};
-
enum dss_clk_type {
DSS_CLK_AHB, /* no set rate. rate controlled through rpm */
DSS_CLK_PCLK,
@@ -34,8 +27,6 @@ struct dss_clk {
};
struct dss_module_power {
- unsigned int num_gpio;
- struct dss_gpio *gpio_config;
unsigned int num_clk;
struct dss_clk *clk_config;
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index bb9d44e7bd26..cb08fafb1dc1 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -7,10 +7,12 @@
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
-#include <drm/drm_crtc.h>
#include <linux/debugfs.h>
-#include <linux/of_irq.h>
#include <linux/dma-buf.h>
+#include <linux/of_irq.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_file.h>
#include "msm_drv.h"
#include "msm_mmu.h"
@@ -28,10 +30,6 @@
#define CREATE_TRACE_POINTS
#include "dpu_trace.h"
-static const char * const iommu_ports[] = {
- "mdp_0",
-};
-
/*
* To enable overall DRM driver logging
* # echo 0x2 > /sys/module/drm/parameters/debug
@@ -66,16 +64,14 @@ static int _dpu_danger_signal_status(struct seq_file *s,
bool danger_status)
{
struct dpu_kms *kms = (struct dpu_kms *)s->private;
- struct msm_drm_private *priv;
struct dpu_danger_safe_status status;
int i;
- if (!kms->dev || !kms->dev->dev_private || !kms->hw_mdp) {
+ if (!kms->hw_mdp) {
DPU_ERROR("invalid arg(s)\n");
return 0;
}
- priv = kms->dev->dev_private;
memset(&status, 0, sizeof(struct dpu_danger_safe_status));
pm_runtime_get_sync(&kms->pdev->dev);
@@ -151,13 +147,7 @@ static int _dpu_debugfs_show_regset32(struct seq_file *s, void *data)
return 0;
dev = dpu_kms->dev;
- if (!dev)
- return 0;
-
priv = dev->dev_private;
- if (!priv)
- return 0;
-
base = dpu_kms->mmio + regset->offset;
/* insert padding spaces, if needed */
@@ -248,11 +238,36 @@ static void dpu_kms_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
dpu_crtc_vblank(crtc, false);
}
+static void dpu_kms_enable_commit(struct msm_kms *kms)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+}
+
+static void dpu_kms_disable_commit(struct msm_kms *kms)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+}
+
+static ktime_t dpu_kms_vsync_time(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+ struct drm_encoder *encoder;
+
+ drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) {
+ ktime_t vsync_time;
+
+ if (dpu_encoder_vsync_time(encoder, &vsync_time) == 0)
+ return vsync_time;
+ }
+
+ return ktime_get();
+}
+
static void dpu_kms_prepare_commit(struct msm_kms *kms,
struct drm_atomic_state *state)
{
struct dpu_kms *dpu_kms;
- struct msm_drm_private *priv;
struct drm_device *dev;
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
@@ -264,11 +279,6 @@ static void dpu_kms_prepare_commit(struct msm_kms *kms,
dpu_kms = to_dpu_kms(kms);
dev = dpu_kms->dev;
- if (!dev || !dev->dev_private)
- return;
- priv = dev->dev_private;
- pm_runtime_get_sync(&dpu_kms->pdev->dev);
-
/* Call prepare_commit for all affected encoders */
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
drm_for_each_encoder_mask(encoder, crtc->dev,
@@ -278,6 +288,20 @@ static void dpu_kms_prepare_commit(struct msm_kms *kms,
}
}
+static void dpu_kms_flush_commit(struct msm_kms *kms, unsigned crtc_mask)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+ struct drm_crtc *crtc;
+
+ for_each_crtc_mask(dpu_kms->dev, crtc, crtc_mask) {
+ if (!crtc->state->active)
+ continue;
+
+ trace_dpu_kms_commit(DRMID(crtc));
+ dpu_crtc_commit_kickoff(crtc);
+ }
+}
+
/*
* Override the encoder enable since we need to setup the inline rotator and do
* some crtc magic before enabling any bridge that might be present.
@@ -292,58 +316,23 @@ void dpu_kms_encoder_enable(struct drm_encoder *encoder)
if (funcs && funcs->commit)
funcs->commit(encoder);
- WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
drm_for_each_crtc(crtc, dev) {
if (!(crtc->state->encoder_mask & drm_encoder_mask(encoder)))
continue;
trace_dpu_kms_enc_enable(DRMID(crtc));
- dpu_crtc_commit_kickoff(crtc, false);
}
}
-static void dpu_kms_commit(struct msm_kms *kms, struct drm_atomic_state *state)
+static void dpu_kms_complete_commit(struct msm_kms *kms, unsigned crtc_mask)
{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
struct drm_crtc *crtc;
- struct drm_crtc_state *crtc_state;
- int i;
-
- for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
- /* If modeset is required, kickoff is run in encoder_enable */
- if (drm_atomic_crtc_needs_modeset(crtc_state))
- continue;
-
- if (crtc->state->active) {
- trace_dpu_kms_commit(DRMID(crtc));
- dpu_crtc_commit_kickoff(crtc,
- state->legacy_cursor_update);
- }
- }
-}
-
-static void dpu_kms_complete_commit(struct msm_kms *kms,
- struct drm_atomic_state *old_state)
-{
- struct dpu_kms *dpu_kms;
- struct msm_drm_private *priv;
- struct drm_crtc *crtc;
- struct drm_crtc_state *old_crtc_state;
- int i;
-
- if (!kms || !old_state)
- return;
- dpu_kms = to_dpu_kms(kms);
-
- if (!dpu_kms->dev || !dpu_kms->dev->dev_private)
- return;
- priv = dpu_kms->dev->dev_private;
DPU_ATRACE_BEGIN("kms_complete_commit");
- for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i)
- dpu_crtc_complete_commit(crtc, old_crtc_state);
-
- pm_runtime_put_sync(&dpu_kms->pdev->dev);
+ for_each_crtc_mask(dpu_kms->dev, crtc, crtc_mask)
+ dpu_crtc_complete_commit(crtc);
DPU_ATRACE_END("kms_complete_commit");
}
@@ -389,6 +378,15 @@ static void dpu_kms_wait_for_commit_done(struct msm_kms *kms,
}
}
+static void dpu_kms_wait_flush(struct msm_kms *kms, unsigned crtc_mask)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+ struct drm_crtc *crtc;
+
+ for_each_crtc_mask(dpu_kms->dev, crtc, crtc_mask)
+ dpu_kms_wait_for_commit_done(kms, crtc);
+}
+
static int _dpu_kms_initialize_dsi(struct drm_device *dev,
struct msm_drm_private *priv,
struct dpu_kms *dpu_kms)
@@ -448,16 +446,6 @@ static void _dpu_kms_drm_obj_destroy(struct dpu_kms *dpu_kms)
struct msm_drm_private *priv;
int i;
- if (!dpu_kms) {
- DPU_ERROR("invalid dpu_kms\n");
- return;
- } else if (!dpu_kms->dev) {
- DPU_ERROR("invalid dev\n");
- return;
- } else if (!dpu_kms->dev->dev_private) {
- DPU_ERROR("invalid dev_private\n");
- return;
- }
priv = dpu_kms->dev->dev_private;
for (i = 0; i < priv->num_crtcs; i++)
@@ -489,12 +477,6 @@ static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
int primary_planes_idx = 0, cursor_planes_idx = 0, i, ret;
int max_crtc_count;
-
- if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev) {
- DPU_ERROR("invalid dpu_kms\n");
- return -EINVAL;
- }
-
dev = dpu_kms->dev;
priv = dev->dev_private;
catalog = dpu_kms->catalog;
@@ -574,8 +556,6 @@ static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
int i;
dev = dpu_kms->dev;
- if (!dev)
- return;
if (dpu_kms->hw_intr)
dpu_hw_intr_destroy(dpu_kms->hw_intr);
@@ -686,10 +666,13 @@ static const struct msm_kms_funcs kms_funcs = {
.irq_preinstall = dpu_irq_preinstall,
.irq_uninstall = dpu_irq_uninstall,
.irq = dpu_irq,
+ .enable_commit = dpu_kms_enable_commit,
+ .disable_commit = dpu_kms_disable_commit,
+ .vsync_time = dpu_kms_vsync_time,
.prepare_commit = dpu_kms_prepare_commit,
- .commit = dpu_kms_commit,
+ .flush_commit = dpu_kms_flush_commit,
+ .wait_flush = dpu_kms_wait_flush,
.complete_commit = dpu_kms_complete_commit,
- .wait_for_crtc_commit_done = dpu_kms_wait_for_commit_done,
.enable_vblank = dpu_kms_enable_vblank,
.disable_vblank = dpu_kms_disable_vblank,
.check_modified_format = dpu_format_check_modified_format,
@@ -711,8 +694,7 @@ static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms)
mmu = dpu_kms->base.aspace->mmu;
- mmu->funcs->detach(mmu, (const char **)iommu_ports,
- ARRAY_SIZE(iommu_ports));
+ mmu->funcs->detach(mmu);
msm_gem_address_space_put(dpu_kms->base.aspace);
dpu_kms->base.aspace = NULL;
@@ -738,8 +720,7 @@ static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
return PTR_ERR(aspace);
}
- ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
- ARRAY_SIZE(iommu_ports));
+ ret = aspace->mmu->funcs->attach(aspace->mmu);
if (ret) {
DPU_ERROR("failed to attach iommu %d\n", ret);
msm_gem_address_space_put(aspace);
@@ -789,16 +770,9 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
dpu_kms = to_dpu_kms(kms);
dev = dpu_kms->dev;
- if (!dev) {
- DPU_ERROR("invalid device\n");
- return rc;
- }
-
priv = dev->dev_private;
- if (!priv) {
- DPU_ERROR("invalid private data\n");
- return rc;
- }
+
+ atomic_set(&dpu_kms->bandwidth_ref, 0);
dpu_kms->mmio = msm_ioremap(dpu_kms->pdev, "mdp", "mdp");
if (IS_ERR(dpu_kms->mmio)) {
@@ -958,7 +932,7 @@ struct msm_kms *dpu_kms_init(struct drm_device *dev)
struct dpu_kms *dpu_kms;
int irq;
- if (!dev || !dev->dev_private) {
+ if (!dev) {
DPU_ERROR("drm device node invalid\n");
return ERR_PTR(-EINVAL);
}
@@ -1048,11 +1022,6 @@ static int __maybe_unused dpu_runtime_suspend(struct device *dev)
struct dss_module_power *mp = &dpu_kms->mp;
ddev = dpu_kms->dev;
- if (!ddev) {
- DPU_ERROR("invalid drm_device\n");
- return rc;
- }
-
rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false);
if (rc)
DPU_ERROR("clock disable failed rc:%d\n", rc);
@@ -1070,11 +1039,6 @@ static int __maybe_unused dpu_runtime_resume(struct device *dev)
struct dss_module_power *mp = &dpu_kms->mp;
ddev = dpu_kms->dev;
- if (!ddev) {
- DPU_ERROR("invalid drm_device\n");
- return rc;
- }
-
rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
if (rc) {
DPU_ERROR("clock enable failed rc:%d\n", rc);
@@ -1095,6 +1059,7 @@ static const struct dev_pm_ops dpu_pm_ops = {
static const struct of_device_id dpu_dt_match[] = {
{ .compatible = "qcom,sdm845-dpu", },
+ { .compatible = "qcom,sc7180-dpu", },
{}
};
MODULE_DEVICE_TABLE(of, dpu_dt_match);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
index 9e40f559c51f..c6169e7df19d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -8,6 +8,8 @@
#ifndef __DPU_KMS_H__
#define __DPU_KMS_H__
+#include <drm/drm_drv.h>
+
#include "msm_drv.h"
#include "msm_kms.h"
#include "msm_mmu.h"
@@ -29,7 +31,7 @@
*/
#define DPU_DEBUG(fmt, ...) \
do { \
- if (unlikely(drm_debug & DRM_UT_KMS)) \
+ if (drm_debug_enabled(DRM_UT_KMS)) \
DRM_DEBUG(fmt, ##__VA_ARGS__); \
else \
pr_debug(fmt, ##__VA_ARGS__); \
@@ -41,7 +43,7 @@
*/
#define DPU_DEBUG_DRIVER(fmt, ...) \
do { \
- if (unlikely(drm_debug & DRM_UT_DRIVER)) \
+ if (drm_debug_enabled(DRM_UT_DRIVER)) \
DRM_ERROR(fmt, ##__VA_ARGS__); \
else \
pr_debug(fmt, ##__VA_ARGS__); \
@@ -120,6 +122,14 @@ struct dpu_kms {
struct platform_device *pdev;
bool rpm_enabled;
struct dss_module_power mp;
+
+ /* reference count bandwidth requests, so we know when we can
+ * release bandwidth. Each atomic update increments, and frame-
+ * done event decrements. Additionally, for video mode, the
+ * reference is incremented when crtc is enabled, and decremented
+ * when disabled.
+ */
+ atomic_t bandwidth_ref;
};
struct vsync_info {
@@ -129,10 +139,6 @@ struct vsync_info {
#define to_dpu_kms(x) container_of(x, struct dpu_kms, base)
-/* get struct msm_kms * from drm_device * */
-#define ddev_to_msm_kms(D) ((D) && (D)->dev_private ? \
- ((struct msm_drm_private *)((D)->dev_private))->kms : NULL)
-
/**
* Debugfs functions - extra helper functions for debugfs support
*
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
index 986915bbbc02..29705e773a4b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
@@ -3,6 +3,10 @@
* Copyright (c) 2018, The Linux Foundation
*/
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdesc.h>
+#include <linux/irqchip/chained_irq.h>
#include "dpu_kms.h"
#include <linux/interconnect.h>
@@ -22,7 +26,6 @@ struct dpu_mdss {
struct msm_mdss base;
void __iomem *mmio;
unsigned long mmio_len;
- u32 hwversion;
struct dss_module_power mp;
struct dpu_irq_controller irq_controller;
struct icc_path *path[2];
@@ -287,10 +290,6 @@ int dpu_mdss_init(struct drm_device *dev)
dpu_mdss_icc_request_bw(priv->mdss);
- pm_runtime_get_sync(dev->dev);
- dpu_mdss->hwversion = readl_relaxed(dpu_mdss->mmio);
- pm_runtime_put_sync(dev->dev);
-
return ret;
irq_error:
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index 45bfac9e3af7..3b9c33e694bf 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -10,8 +10,10 @@
#include <linux/debugfs.h>
#include <linux/dma-buf.h>
-#include <drm/drm_damage_helper.h>
#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_file.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include "msm_drv.h"
#include "dpu_kms.h"
@@ -51,8 +53,13 @@ enum {
R_MAX
};
+/*
+ * Default Preload Values
+ */
#define DPU_QSEED3_DEFAULT_PRELOAD_H 0x4
#define DPU_QSEED3_DEFAULT_PRELOAD_V 0x3
+#define DPU_QSEED4_DEFAULT_PRELOAD_V 0x2
+#define DPU_QSEED4_DEFAULT_PRELOAD_H 0x4
#define DEFAULT_REFRESH_RATE 60
@@ -475,8 +482,16 @@ static void _dpu_plane_setup_scaler3(struct dpu_plane *pdpu,
scale_cfg->src_width[i] /= chroma_subsmpl_h;
scale_cfg->src_height[i] /= chroma_subsmpl_v;
}
- scale_cfg->preload_x[i] = DPU_QSEED3_DEFAULT_PRELOAD_H;
- scale_cfg->preload_y[i] = DPU_QSEED3_DEFAULT_PRELOAD_V;
+
+ if (pdpu->pipe_hw->cap->features &
+ BIT(DPU_SSPP_SCALER_QSEED4)) {
+ scale_cfg->preload_x[i] = DPU_QSEED4_DEFAULT_PRELOAD_H;
+ scale_cfg->preload_y[i] = DPU_QSEED4_DEFAULT_PRELOAD_V;
+ } else {
+ scale_cfg->preload_x[i] = DPU_QSEED3_DEFAULT_PRELOAD_H;
+ scale_cfg->preload_y[i] = DPU_QSEED3_DEFAULT_PRELOAD_V;
+ }
+
pstate->pixel_ext.num_ext_pxls_top[i] =
scale_cfg->src_height[i];
pstate->pixel_ext.num_ext_pxls_left[i] =
@@ -736,7 +751,7 @@ done:
} else {
pstate[R0]->multirect_index = DPU_SSPP_RECT_0;
pstate[R1]->multirect_index = DPU_SSPP_RECT_1;
- };
+ }
DPU_DEBUG_PLANE(dpu_plane[R0], "R0: %d - %d\n",
pstate[R0]->multirect_mode, pstate[R0]->multirect_index);
@@ -764,8 +779,6 @@ static int dpu_plane_prepare_fb(struct drm_plane *plane,
struct dpu_plane *pdpu = to_dpu_plane(plane);
struct dpu_plane_state *pstate = to_dpu_plane_state(new_state);
struct dpu_hw_fmt_layout layout;
- struct drm_gem_object *obj;
- struct dma_fence *fence;
struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
int ret;
@@ -782,10 +795,7 @@ static int dpu_plane_prepare_fb(struct drm_plane *plane,
* we can use msm_atomic_prepare_fb() instead of doing the
* implicit fence and fb prepare by hand here.
*/
- obj = msm_framebuffer_bo(new_state->fb, 0);
- fence = reservation_object_get_excl_rcu(obj->resv);
- if (fence)
- drm_atomic_set_fence_for_plane(new_state, fence);
+ drm_gem_fb_prepare_fb(plane, new_state);
if (pstate->aspace) {
ret = msm_framebuffer_prepare(new_state->fb,
@@ -861,7 +871,7 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
pdpu->pipe_sblk->maxupscale << 16,
true, true);
if (ret) {
- DPU_ERROR_PLANE(pdpu, "Check plane state failed (%d)\n", ret);
+ DPU_DEBUG_PLANE(pdpu, "Check plane state failed (%d)\n", ret);
return ret;
}
if (!state->visible)
@@ -887,13 +897,13 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
(!(pdpu->features & DPU_SSPP_SCALER) ||
!(pdpu->features & (BIT(DPU_SSPP_CSC)
| BIT(DPU_SSPP_CSC_10BIT))))) {
- DPU_ERROR_PLANE(pdpu,
+ DPU_DEBUG_PLANE(pdpu,
"plane doesn't have scaler/csc for yuv\n");
return -EINVAL;
/* check src bounds */
} else if (!dpu_plane_validate_src(&src, &fb_rect, min_src_size)) {
- DPU_ERROR_PLANE(pdpu, "invalid source " DRM_RECT_FMT "\n",
+ DPU_DEBUG_PLANE(pdpu, "invalid source " DRM_RECT_FMT "\n",
DRM_RECT_ARG(&src));
return -E2BIG;
@@ -902,19 +912,19 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
(src.x1 & 0x1 || src.y1 & 0x1 ||
drm_rect_width(&src) & 0x1 ||
drm_rect_height(&src) & 0x1)) {
- DPU_ERROR_PLANE(pdpu, "invalid yuv source " DRM_RECT_FMT "\n",
+ DPU_DEBUG_PLANE(pdpu, "invalid yuv source " DRM_RECT_FMT "\n",
DRM_RECT_ARG(&src));
return -EINVAL;
/* min dst support */
} else if (drm_rect_width(&dst) < 0x1 || drm_rect_height(&dst) < 0x1) {
- DPU_ERROR_PLANE(pdpu, "invalid dest rect " DRM_RECT_FMT "\n",
+ DPU_DEBUG_PLANE(pdpu, "invalid dest rect " DRM_RECT_FMT "\n",
DRM_RECT_ARG(&dst));
return -EINVAL;
/* check decimated source width */
} else if (drm_rect_width(&src) > max_linewidth) {
- DPU_ERROR_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u\n",
+ DPU_DEBUG_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u\n",
DRM_RECT_ARG(&src), max_linewidth);
return -E2BIG;
}
@@ -1040,8 +1050,21 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane)
pstate->multirect_mode);
if (pdpu->pipe_hw->ops.setup_format) {
+ unsigned int rotation;
+
src_flags = 0x0;
+ rotation = drm_rotation_simplify(state->rotation,
+ DRM_MODE_ROTATE_0 |
+ DRM_MODE_REFLECT_X |
+ DRM_MODE_REFLECT_Y);
+
+ if (rotation & DRM_MODE_REFLECT_X)
+ src_flags |= DPU_SSPP_FLIP_LR;
+
+ if (rotation & DRM_MODE_REFLECT_Y)
+ src_flags |= DPU_SSPP_FLIP_UD;
+
/* update format */
pdpu->pipe_hw->ops.setup_format(pdpu->pipe_hw, fmt, src_flags,
pstate->multirect_index);
@@ -1327,7 +1350,8 @@ static int _dpu_plane_init_debugfs(struct drm_plane *plane)
pdpu->debugfs_root, &pdpu->debugfs_src);
if (cfg->features & BIT(DPU_SSPP_SCALER_QSEED3) ||
- cfg->features & BIT(DPU_SSPP_SCALER_QSEED2)) {
+ cfg->features & BIT(DPU_SSPP_SCALER_QSEED2) ||
+ cfg->features & BIT(DPU_SSPP_SCALER_QSEED4)) {
dpu_debugfs_setup_regset32(&pdpu->debugfs_scaler,
sblk->scaler_blk.base + cfg->base,
sblk->scaler_blk.len,
@@ -1522,6 +1546,13 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
if (ret)
DPU_ERROR("failed to install zpos property, rc = %d\n", ret);
+ drm_plane_create_rotation_property(plane,
+ DRM_MODE_ROTATE_0,
+ DRM_MODE_ROTATE_0 |
+ DRM_MODE_ROTATE_180 |
+ DRM_MODE_REFLECT_X |
+ DRM_MODE_REFLECT_Y);
+
drm_plane_enable_fb_damage_clips(plane);
/* success! finalize initialization */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
index ddc8412731af..23f5b1433b35 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -141,11 +141,11 @@ int dpu_rm_destroy(struct dpu_rm *rm)
static int _dpu_rm_hw_blk_create(
struct dpu_rm *rm,
- struct dpu_mdss_cfg *cat,
+ const struct dpu_mdss_cfg *cat,
void __iomem *mmio,
enum dpu_hw_blk_type type,
uint32_t id,
- void *hw_catalog_info)
+ const void *hw_catalog_info)
{
struct dpu_rm_hw_blk *blk;
void *hw;
@@ -215,7 +215,7 @@ int dpu_rm_init(struct dpu_rm *rm,
/* Interrogate HW catalog and create tracking items for hw blocks */
for (i = 0; i < cat->mixer_count; i++) {
- struct dpu_lm_cfg *lm = &cat->mixer[i];
+ const struct dpu_lm_cfg *lm = &cat->mixer[i];
if (lm->pingpong == PINGPONG_MAX) {
DPU_DEBUG("skip mixer %d without pingpong\n", lm->id);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
index 765484437d11..eecfe9b3199e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
@@ -392,7 +392,7 @@ TRACE_EVENT(dpu_enc_rc,
__entry->rc_state = rc_state;
__assign_str(stage_str, stage);
),
- TP_printk("%s: id:%u, sw_event:%d, idle_pc_supported:%s, rc_state:%d\n",
+ TP_printk("%s: id:%u, sw_event:%d, idle_pc_supported:%s, rc_state:%d",
__get_str(stage_str), __entry->drm_id, __entry->sw_event,
__entry->idle_pc_supported ? "true" : "false",
__entry->rc_state)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
index 8bc3aea7cd86..93ab36bd8df3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
@@ -5,6 +5,7 @@
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
#include <linux/debugfs.h>
+#include <linux/delay.h>
#include "dpu_vbif.h"
#include "dpu_hw_vbif.h"
@@ -153,10 +154,6 @@ void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
u32 ot_lim;
int ret, i;
- if (!dpu_kms) {
- DPU_ERROR("invalid arguments\n");
- return;
- }
mdp = dpu_kms->hw_mdp;
for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
@@ -213,7 +210,7 @@ void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
const struct dpu_vbif_qos_tbl *qos_tbl;
int i;
- if (!dpu_kms || !params || !dpu_kms->hw_mdp) {
+ if (!params || !dpu_kms->hw_mdp) {
DPU_ERROR("invalid arguments\n");
return;
}
@@ -264,11 +261,6 @@ void dpu_vbif_clear_errors(struct dpu_kms *dpu_kms)
struct dpu_hw_vbif *vbif;
u32 i, pnd, src;
- if (!dpu_kms) {
- DPU_ERROR("invalid argument\n");
- return;
- }
-
for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
vbif = dpu_kms->hw_vbif[i];
if (vbif && vbif->ops.clear_errors) {
@@ -286,11 +278,6 @@ void dpu_vbif_init_memtypes(struct dpu_kms *dpu_kms)
struct dpu_hw_vbif *vbif;
int i, j;
- if (!dpu_kms) {
- DPU_ERROR("invalid argument\n");
- return;
- }
-
for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
vbif = dpu_kms->hw_vbif[i];
if (vbif && vbif->cap && vbif->ops.set_mem_type) {
@@ -312,7 +299,7 @@ void dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root)
entry = debugfs_create_dir("vbif", debugfs_root);
for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
- struct dpu_vbif_cfg *vbif = &dpu_kms->catalog->vbif[i];
+ const struct dpu_vbif_cfg *vbif = &dpu_kms->catalog->vbif[i];
snprintf(vbif_name, sizeof(vbif_name), "%d", vbif->id);
@@ -331,7 +318,7 @@ void dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root)
(u32 *)&vbif->default_ot_wr_limit);
for (j = 0; j < vbif->dynamic_ot_rd_tbl.count; j++) {
- struct dpu_vbif_dynamic_ot_cfg *cfg =
+ const struct dpu_vbif_dynamic_ot_cfg *cfg =
&vbif->dynamic_ot_rd_tbl.cfg[j];
snprintf(vbif_name, sizeof(vbif_name),
@@ -345,7 +332,7 @@ void dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root)
}
for (j = 0; j < vbif->dynamic_ot_wr_tbl.count; j++) {
- struct dpu_vbif_dynamic_ot_cfg *cfg =
+ const struct dpu_vbif_dynamic_ot_cfg *cfg =
&vbif->dynamic_ot_wr_tbl.cfg[j];
snprintf(vbif_name, sizeof(vbif_name),
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
index 668c41975d74..f34dca5d4532 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
@@ -8,6 +8,7 @@
#include <drm/drm_flip_work.h>
#include <drm/drm_mode.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "mdp4_kms.h"
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c
index 772f0753ed38..aaf2f26f8505 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c
@@ -121,7 +121,7 @@ static void mdp4_dsi_encoder_enable(struct drm_encoder *encoder)
if (mdp4_dsi_encoder->enabled)
return;
- mdp4_crtc_set_config(encoder->crtc,
+ mdp4_crtc_set_config(encoder->crtc,
MDP4_DMA_CONFIG_PACK_ALIGN_MSB |
MDP4_DMA_CONFIG_DEFLKR_EN |
MDP4_DMA_CONFIG_DITHER_EN |
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_irq.c
index 62fbca302ac2..4d49f3ba6a96 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_irq.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_irq.c
@@ -5,6 +5,7 @@
*/
#include <drm/drm_print.h>
+#include <drm/drm_vblank.h>
#include "msm_drv.h"
#include "mdp4_kms.h"
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
index 7a9ab55b4608..dda05436f716 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
@@ -4,6 +4,9 @@
* Author: Rob Clark <robdclark@gmail.com>
*/
+#include <linux/delay.h>
+
+#include <drm/drm_vblank.h>
#include "msm_drv.h"
#include "msm_gem.h"
@@ -93,40 +96,51 @@ out:
return ret;
}
-static void mdp4_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state)
+static void mdp4_enable_commit(struct msm_kms *kms)
+{
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+ mdp4_enable(mdp4_kms);
+}
+
+static void mdp4_disable_commit(struct msm_kms *kms)
{
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+ mdp4_disable(mdp4_kms);
+}
+
+static void mdp4_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state)
+{
int i;
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
- mdp4_enable(mdp4_kms);
-
/* see 119ecb7fd */
for_each_new_crtc_in_state(state, crtc, crtc_state, i)
drm_crtc_vblank_get(crtc);
}
-static void mdp4_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state)
+static void mdp4_flush_commit(struct msm_kms *kms, unsigned crtc_mask)
+{
+ /* TODO */
+}
+
+static void mdp4_wait_flush(struct msm_kms *kms, unsigned crtc_mask)
{
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
- int i;
struct drm_crtc *crtc;
- struct drm_crtc_state *crtc_state;
-
- drm_atomic_helper_wait_for_vblanks(mdp4_kms->dev, state);
-
- /* see 119ecb7fd */
- for_each_new_crtc_in_state(state, crtc, crtc_state, i)
- drm_crtc_vblank_put(crtc);
- mdp4_disable(mdp4_kms);
+ for_each_crtc_mask(mdp4_kms->dev, crtc, crtc_mask)
+ mdp4_crtc_wait_for_commit_done(crtc);
}
-static void mdp4_wait_for_crtc_commit_done(struct msm_kms *kms,
- struct drm_crtc *crtc)
+static void mdp4_complete_commit(struct msm_kms *kms, unsigned crtc_mask)
{
- mdp4_crtc_wait_for_commit_done(crtc);
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+ struct drm_crtc *crtc;
+
+ /* see 119ecb7fd */
+ for_each_crtc_mask(mdp4_kms->dev, crtc, crtc_mask)
+ drm_crtc_vblank_put(crtc);
}
static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate,
@@ -143,10 +157,6 @@ static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate,
}
}
-static const char * const iommu_ports[] = {
- "mdp_port0_cb0", "mdp_port1_cb0",
-};
-
static void mdp4_destroy(struct msm_kms *kms)
{
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
@@ -158,8 +168,7 @@ static void mdp4_destroy(struct msm_kms *kms)
drm_gem_object_put_unlocked(mdp4_kms->blank_cursor_bo);
if (aspace) {
- aspace->mmu->funcs->detach(aspace->mmu,
- iommu_ports, ARRAY_SIZE(iommu_ports));
+ aspace->mmu->funcs->detach(aspace->mmu);
msm_gem_address_space_put(aspace);
}
@@ -178,9 +187,12 @@ static const struct mdp_kms_funcs kms_funcs = {
.irq = mdp4_irq,
.enable_vblank = mdp4_enable_vblank,
.disable_vblank = mdp4_disable_vblank,
+ .enable_commit = mdp4_enable_commit,
+ .disable_commit = mdp4_disable_commit,
.prepare_commit = mdp4_prepare_commit,
+ .flush_commit = mdp4_flush_commit,
+ .wait_flush = mdp4_wait_flush,
.complete_commit = mdp4_complete_commit,
- .wait_for_crtc_commit_done = mdp4_wait_for_crtc_commit_done,
.get_format = mdp_get_format,
.round_pixclk = mdp4_round_pixclk,
.destroy = mdp4_destroy,
@@ -507,8 +519,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
kms->aspace = aspace;
- ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
- ARRAY_SIZE(iommu_ports));
+ ret = aspace->mmu->funcs->attach(aspace->mmu);
if (ret)
goto fail;
} else {
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
index 62e2ebe455ea..871f3514ef69 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
@@ -5,6 +5,8 @@
* Author: Vinay Simha <vinaysimha@inforcecomputing.com>
*/
+#include <linux/delay.h>
+
#include <drm/drm_crtc.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c
index ecef4f5b9f26..c7df71e2fafc 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c
@@ -5,8 +5,6 @@
* Author: Vinay Simha <vinaysimha@inforcecomputing.com>
*/
-#include <linux/gpio.h>
-
#include "mdp4_kms.h"
struct mdp4_lvds_connector {
@@ -55,7 +53,7 @@ static int mdp4_lvds_connector_get_modes(struct drm_connector *connector)
if (panel) {
drm_panel_attach(panel, connector);
- ret = panel->funcs->get_modes(panel);
+ ret = drm_panel_get_modes(panel, connector);
drm_panel_detach(panel);
}
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
index e3010f023371..da3cc1d8c331 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
@@ -5,6 +5,8 @@
*/
#include <drm/drm_damage_helper.h>
+#include <drm/drm_fourcc.h>
+
#include "mdp4_kms.h"
#define DOWN_SCALE_MAX 8
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
index dd1daf0e305a..e3c4c250238b 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
@@ -14,7 +14,7 @@ struct mdp5_cfg_handler {
/* mdp5_cfg must be exposed (used in mdp5.xml.h) */
const struct mdp5_cfg_hw *mdp5_cfg = NULL;
-const struct mdp5_cfg_hw msm8x74v1_config = {
+static const struct mdp5_cfg_hw msm8x74v1_config = {
.name = "msm8x74v1",
.mdp = {
.count = 1,
@@ -98,7 +98,7 @@ const struct mdp5_cfg_hw msm8x74v1_config = {
.max_clk = 200000000,
};
-const struct mdp5_cfg_hw msm8x74v2_config = {
+static const struct mdp5_cfg_hw msm8x74v2_config = {
.name = "msm8x74",
.mdp = {
.count = 1,
@@ -180,7 +180,7 @@ const struct mdp5_cfg_hw msm8x74v2_config = {
.max_clk = 200000000,
};
-const struct mdp5_cfg_hw apq8084_config = {
+static const struct mdp5_cfg_hw apq8084_config = {
.name = "apq8084",
.mdp = {
.count = 1,
@@ -275,7 +275,7 @@ const struct mdp5_cfg_hw apq8084_config = {
.max_clk = 320000000,
};
-const struct mdp5_cfg_hw msm8x16_config = {
+static const struct mdp5_cfg_hw msm8x16_config = {
.name = "msm8x16",
.mdp = {
.count = 1,
@@ -342,7 +342,7 @@ const struct mdp5_cfg_hw msm8x16_config = {
.max_clk = 320000000,
};
-const struct mdp5_cfg_hw msm8x94_config = {
+static const struct mdp5_cfg_hw msm8x94_config = {
.name = "msm8x94",
.mdp = {
.count = 1,
@@ -437,7 +437,7 @@ const struct mdp5_cfg_hw msm8x94_config = {
.max_clk = 400000000,
};
-const struct mdp5_cfg_hw msm8x96_config = {
+static const struct mdp5_cfg_hw msm8x96_config = {
.name = "msm8x96",
.mdp = {
.count = 1,
@@ -545,7 +545,104 @@ const struct mdp5_cfg_hw msm8x96_config = {
.max_clk = 412500000,
};
-const struct mdp5_cfg_hw msm8917_config = {
+const struct mdp5_cfg_hw msm8x76_config = {
+ .name = "msm8x76",
+ .mdp = {
+ .count = 1,
+ .caps = MDP_CAP_SMP |
+ MDP_CAP_DSC |
+ MDP_CAP_SRC_SPLIT |
+ 0,
+ },
+ .ctl = {
+ .count = 3,
+ .base = { 0x01000, 0x01200, 0x01400 },
+ .flush_hw_mask = 0xffffffff,
+ },
+ .smp = {
+ .mmb_count = 10,
+ .mmb_size = 10240,
+ .clients = {
+ [SSPP_VIG0] = 1, [SSPP_VIG1] = 9,
+ [SSPP_DMA0] = 4,
+ [SSPP_RGB0] = 7, [SSPP_RGB1] = 8,
+ },
+ },
+ .pipe_vig = {
+ .count = 2,
+ .base = { 0x04000, 0x06000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE |
+ MDP_PIPE_CAP_CSC |
+ MDP_PIPE_CAP_DECIMATION |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_rgb = {
+ .count = 2,
+ .base = { 0x14000, 0x16000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_DECIMATION |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_dma = {
+ .count = 1,
+ .base = { 0x24000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_cursor = {
+ .count = 1,
+ .base = { 0x440DC },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ MDP_PIPE_CAP_CURSOR |
+ 0,
+ },
+ .lm = {
+ .count = 2,
+ .base = { 0x44000, 0x45000 },
+ .instances = {
+ { .id = 0, .pp = 0, .dspp = 0,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 1, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB },
+ },
+ .nb_stages = 8,
+ .max_width = 2560,
+ .max_height = 0xFFFF,
+ },
+ .dspp = {
+ .count = 1,
+ .base = { 0x54000 },
+
+ },
+ .pp = {
+ .count = 3,
+ .base = { 0x70000, 0x70800, 0x72000 },
+ },
+ .dsc = {
+ .count = 2,
+ .base = { 0x80000, 0x80400 },
+ },
+ .intf = {
+ .base = { 0x6a000, 0x6a800, 0x6b000 },
+ .connect = {
+ [0] = INTF_DISABLED,
+ [1] = INTF_DSI,
+ [2] = INTF_DSI,
+ },
+ },
+ .max_clk = 360000000,
+};
+
+static const struct mdp5_cfg_hw msm8917_config = {
.name = "msm8917",
.mdp = {
.count = 1,
@@ -630,16 +727,129 @@ const struct mdp5_cfg_hw msm8917_config = {
.max_clk = 320000000,
};
-static const struct mdp5_cfg_handler cfg_handlers[] = {
+static const struct mdp5_cfg_hw msm8998_config = {
+ .name = "msm8998",
+ .mdp = {
+ .count = 1,
+ .caps = MDP_CAP_DSC |
+ MDP_CAP_CDM |
+ MDP_CAP_SRC_SPLIT |
+ 0,
+ },
+ .ctl = {
+ .count = 5,
+ .base = { 0x01000, 0x01200, 0x01400, 0x01600, 0x01800 },
+ .flush_hw_mask = 0xf7ffffff,
+ },
+ .pipe_vig = {
+ .count = 4,
+ .base = { 0x04000, 0x06000, 0x08000, 0x0a000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE |
+ MDP_PIPE_CAP_CSC |
+ MDP_PIPE_CAP_DECIMATION |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_rgb = {
+ .count = 4,
+ .base = { 0x14000, 0x16000, 0x18000, 0x1a000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE |
+ MDP_PIPE_CAP_DECIMATION |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_dma = {
+ .count = 2, /* driver supports max of 2 currently */
+ .base = { 0x24000, 0x26000, 0x28000, 0x2a000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_cursor = {
+ .count = 2,
+ .base = { 0x34000, 0x36000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ MDP_PIPE_CAP_CURSOR |
+ 0,
+ },
+
+ .lm = {
+ .count = 6,
+ .base = { 0x44000, 0x45000, 0x46000, 0x47000, 0x48000, 0x49000 },
+ .instances = {
+ { .id = 0, .pp = 0, .dspp = 0,
+ .caps = MDP_LM_CAP_DISPLAY |
+ MDP_LM_CAP_PAIR, },
+ { .id = 1, .pp = 1, .dspp = 1,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 2, .pp = 2, .dspp = -1,
+ .caps = MDP_LM_CAP_DISPLAY |
+ MDP_LM_CAP_PAIR, },
+ { .id = 3, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB, },
+ { .id = 4, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB, },
+ { .id = 5, .pp = 3, .dspp = -1,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ },
+ .nb_stages = 8,
+ .max_width = 2560,
+ .max_height = 0xFFFF,
+ },
+ .dspp = {
+ .count = 2,
+ .base = { 0x54000, 0x56000 },
+ },
+ .ad = {
+ .count = 3,
+ .base = { 0x78000, 0x78800, 0x79000 },
+ },
+ .pp = {
+ .count = 4,
+ .base = { 0x70000, 0x70800, 0x71000, 0x71800 },
+ },
+ .cdm = {
+ .count = 1,
+ .base = { 0x79200 },
+ },
+ .dsc = {
+ .count = 2,
+ .base = { 0x80000, 0x80400 },
+ },
+ .intf = {
+ .base = { 0x6a000, 0x6a800, 0x6b000, 0x6b800, 0x6c000 },
+ .connect = {
+ [0] = INTF_eDP,
+ [1] = INTF_DSI,
+ [2] = INTF_DSI,
+ [3] = INTF_HDMI,
+ },
+ },
+ .max_clk = 412500000,
+};
+
+static const struct mdp5_cfg_handler cfg_handlers_v1[] = {
{ .revision = 0, .config = { .hw = &msm8x74v1_config } },
{ .revision = 2, .config = { .hw = &msm8x74v2_config } },
{ .revision = 3, .config = { .hw = &apq8084_config } },
{ .revision = 6, .config = { .hw = &msm8x16_config } },
{ .revision = 9, .config = { .hw = &msm8x94_config } },
{ .revision = 7, .config = { .hw = &msm8x96_config } },
+ { .revision = 11, .config = { .hw = &msm8x76_config } },
{ .revision = 15, .config = { .hw = &msm8917_config } },
};
+static const struct mdp5_cfg_handler cfg_handlers_v3[] = {
+ { .revision = 0, .config = { .hw = &msm8998_config } },
+};
+
static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev);
const struct mdp5_cfg_hw *mdp5_cfg_get_hw_config(struct mdp5_cfg_handler *cfg_handler)
@@ -668,8 +878,9 @@ struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
struct drm_device *dev = mdp5_kms->dev;
struct platform_device *pdev = to_platform_device(dev->dev);
struct mdp5_cfg_handler *cfg_handler;
+ const struct mdp5_cfg_handler *cfg_handlers;
struct mdp5_cfg_platform *pconfig;
- int i, ret = 0;
+ int i, ret = 0, num_handlers;
cfg_handler = kzalloc(sizeof(*cfg_handler), GFP_KERNEL);
if (unlikely(!cfg_handler)) {
@@ -677,7 +888,16 @@ struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
goto fail;
}
- if (major != 1) {
+ switch (major) {
+ case 1:
+ cfg_handlers = cfg_handlers_v1;
+ num_handlers = ARRAY_SIZE(cfg_handlers_v1);
+ break;
+ case 3:
+ cfg_handlers = cfg_handlers_v3;
+ num_handlers = ARRAY_SIZE(cfg_handlers_v3);
+ break;
+ default:
DRM_DEV_ERROR(dev->dev, "unexpected MDP major version: v%d.%d\n",
major, minor);
ret = -ENXIO;
@@ -685,7 +905,7 @@ struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
}
/* only after mdp5_cfg global pointer's init can we access the hw */
- for (i = 0; i < ARRAY_SIZE(cfg_handlers); i++) {
+ for (i = 0; i < num_handlers; i++) {
if (cfg_handlers[i].revision != minor)
continue;
mdp5_cfg = cfg_handlers[i].config.hw;
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
index 78d5fa230c16..05cc04f729d6 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
@@ -6,10 +6,13 @@
*/
#include <linux/sort.h>
+
#include <drm/drm_mode.h>
#include <drm/drm_crtc.h>
#include <drm/drm_flip_work.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "mdp5_kms.h"
@@ -211,7 +214,6 @@ static void blend_setup(struct drm_crtc *crtc)
struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
struct mdp5_kms *mdp5_kms = get_kms(crtc);
struct drm_plane *plane;
- const struct mdp5_cfg_hw *hw_cfg;
struct mdp5_plane_state *pstate, *pstates[STAGE_MAX + 1] = {NULL};
const struct mdp_format *format;
struct mdp5_hw_mixer *mixer = pipeline->mixer;
@@ -229,8 +231,6 @@ static void blend_setup(struct drm_crtc *crtc)
u32 val;
#define blender(stage) ((stage) - STAGE0)
- hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
-
spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
/* ctl could be released already when we are shutting down: */
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c
index 4804cf40de14..030279d7b64b 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c
@@ -253,7 +253,7 @@ int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
u32 blend_cfg;
struct mdp5_hw_mixer *mixer = pipeline->mixer;
- if (unlikely(WARN_ON(!mixer))) {
+ if (WARN_ON(!mixer)) {
DRM_DEV_ERROR(ctl_mgr->dev->dev, "CTL %d cannot find LM",
ctl->id);
return -EINVAL;
@@ -695,7 +695,7 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
goto fail;
}
- if (unlikely(WARN_ON(ctl_cfg->count > MAX_CTL))) {
+ if (WARN_ON(ctl_cfg->count > MAX_CTL)) {
DRM_DEV_ERROR(dev->dev, "Increase static pool size to at least %d\n",
ctl_cfg->count);
ret = -ENOSPC;
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_irq.c
index 58db08a2abfa..9b4c8d92ff32 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_irq.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_irq.c
@@ -7,6 +7,7 @@
#include <linux/irq.h>
#include <drm/drm_print.h>
+#include <drm/drm_vblank.h>
#include "msm_drv.h"
#include "mdp5_kms.h"
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index fec6ef1ae3b9..e43ecd4be10a 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -5,18 +5,20 @@
* Author: Rob Clark <robdclark@gmail.com>
*/
+#include <linux/delay.h>
#include <linux/interconnect.h>
#include <linux/of_irq.h>
+#include <drm/drm_debugfs.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_vblank.h>
+
#include "msm_drv.h"
#include "msm_gem.h"
#include "msm_mmu.h"
#include "mdp5_kms.h"
-static const char *iommu_ports[] = {
- "mdp_0",
-};
-
static int mdp5_hw_init(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
@@ -140,40 +142,52 @@ static int mdp5_global_obj_init(struct mdp5_kms *mdp5_kms)
return 0;
}
+static void mdp5_enable_commit(struct msm_kms *kms)
+{
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ pm_runtime_get_sync(&mdp5_kms->pdev->dev);
+}
+
+static void mdp5_disable_commit(struct msm_kms *kms)
+{
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ pm_runtime_put_sync(&mdp5_kms->pdev->dev);
+}
+
static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
- struct device *dev = &mdp5_kms->pdev->dev;
struct mdp5_global_state *global_state;
global_state = mdp5_get_existing_global_state(mdp5_kms);
- pm_runtime_get_sync(dev);
-
if (mdp5_kms->smp)
mdp5_smp_prepare_commit(mdp5_kms->smp, &global_state->smp);
}
-static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state)
+static void mdp5_flush_commit(struct msm_kms *kms, unsigned crtc_mask)
+{
+ /* TODO */
+}
+
+static void mdp5_wait_flush(struct msm_kms *kms, unsigned crtc_mask)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
- struct device *dev = &mdp5_kms->pdev->dev;
- struct mdp5_global_state *global_state;
+ struct drm_crtc *crtc;
- drm_atomic_helper_wait_for_vblanks(mdp5_kms->dev, state);
+ for_each_crtc_mask(mdp5_kms->dev, crtc, crtc_mask)
+ mdp5_crtc_wait_for_commit_done(crtc);
+}
+
+static void mdp5_complete_commit(struct msm_kms *kms, unsigned crtc_mask)
+{
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ struct mdp5_global_state *global_state;
global_state = mdp5_get_existing_global_state(mdp5_kms);
if (mdp5_kms->smp)
mdp5_smp_complete_commit(mdp5_kms->smp, &global_state->smp);
-
- pm_runtime_put_sync(dev);
-}
-
-static void mdp5_wait_for_crtc_commit_done(struct msm_kms *kms,
- struct drm_crtc *crtc)
-{
- mdp5_crtc_wait_for_commit_done(crtc);
}
static long mdp5_round_pixclk(struct msm_kms *kms, unsigned long rate,
@@ -215,8 +229,7 @@ static void mdp5_kms_destroy(struct msm_kms *kms)
mdp5_pipe_destroy(mdp5_kms->hwpipes[i]);
if (aspace) {
- aspace->mmu->funcs->detach(aspace->mmu,
- iommu_ports, ARRAY_SIZE(iommu_ports));
+ aspace->mmu->funcs->detach(aspace->mmu);
msm_gem_address_space_put(aspace);
}
}
@@ -271,9 +284,12 @@ static const struct mdp_kms_funcs kms_funcs = {
.irq = mdp5_irq,
.enable_vblank = mdp5_enable_vblank,
.disable_vblank = mdp5_disable_vblank,
+ .flush_commit = mdp5_flush_commit,
+ .enable_commit = mdp5_enable_commit,
+ .disable_commit = mdp5_disable_commit,
.prepare_commit = mdp5_prepare_commit,
+ .wait_flush = mdp5_wait_flush,
.complete_commit = mdp5_complete_commit,
- .wait_for_crtc_commit_done = mdp5_wait_for_crtc_commit_done,
.get_format = mdp_get_format,
.round_pixclk = mdp5_round_pixclk,
.set_split_display = mdp5_set_split_display,
@@ -293,6 +309,10 @@ int mdp5_disable(struct mdp5_kms *mdp5_kms)
mdp5_kms->enable_count--;
WARN_ON(mdp5_kms->enable_count < 0);
+ if (mdp5_kms->tbu_rt_clk)
+ clk_disable_unprepare(mdp5_kms->tbu_rt_clk);
+ if (mdp5_kms->tbu_clk)
+ clk_disable_unprepare(mdp5_kms->tbu_clk);
clk_disable_unprepare(mdp5_kms->ahb_clk);
clk_disable_unprepare(mdp5_kms->axi_clk);
clk_disable_unprepare(mdp5_kms->core_clk);
@@ -313,6 +333,10 @@ int mdp5_enable(struct mdp5_kms *mdp5_kms)
clk_prepare_enable(mdp5_kms->core_clk);
if (mdp5_kms->lut_clk)
clk_prepare_enable(mdp5_kms->lut_clk);
+ if (mdp5_kms->tbu_clk)
+ clk_prepare_enable(mdp5_kms->tbu_clk);
+ if (mdp5_kms->tbu_rt_clk)
+ clk_prepare_enable(mdp5_kms->tbu_rt_clk);
return 0;
}
@@ -445,14 +469,11 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
{
struct drm_device *dev = mdp5_kms->dev;
struct msm_drm_private *priv = dev->dev_private;
- const struct mdp5_cfg_hw *hw_cfg;
unsigned int num_crtcs;
int i, ret, pi = 0, ci = 0;
struct drm_plane *primary[MAX_BASES] = { NULL };
struct drm_plane *cursor[MAX_BASES] = { NULL };
- hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
-
/*
* Construct encoders and modeset initialize connector devices
* for each external display interface.
@@ -663,6 +684,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
struct msm_kms *kms;
struct msm_gem_address_space *aspace;
int irq, i, ret;
+ struct device *iommu_dev;
/* priv->kms would have been populated by the MDP5 driver */
kms = priv->kms;
@@ -702,7 +724,11 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
mdelay(16);
if (config->platform.iommu) {
- aspace = msm_gem_address_space_create(&pdev->dev,
+ iommu_dev = &pdev->dev;
+ if (!iommu_dev->iommu_fwspec)
+ iommu_dev = iommu_dev->parent;
+
+ aspace = msm_gem_address_space_create(iommu_dev,
config->platform.iommu, "mdp5");
if (IS_ERR(aspace)) {
ret = PTR_ERR(aspace);
@@ -711,8 +737,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
kms->aspace = aspace;
- ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
- ARRAY_SIZE(iommu_ports));
+ ret = aspace->mmu->funcs->attach(aspace->mmu);
if (ret) {
DRM_DEV_ERROR(&pdev->dev, "failed to attach iommu: %d\n",
ret);
@@ -948,6 +973,8 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
/* optional clocks: */
get_clk(pdev, &mdp5_kms->lut_clk, "lut", false);
+ get_clk(pdev, &mdp5_kms->tbu_clk, "tbu", false);
+ get_clk(pdev, &mdp5_kms->tbu_rt_clk, "tbu_rt", false);
/* we need to set a default rate before enabling. Set a safe
* rate first, then figure out hw revision, and then set a
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h
index d1bf4fdfc815..128866742593 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h
@@ -53,6 +53,8 @@ struct mdp5_kms {
struct clk *ahb_clk;
struct clk *core_clk;
struct clk *lut_clk;
+ struct clk *tbu_clk;
+ struct clk *tbu_rt_clk;
struct clk *vsync_clk;
/*
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
index c7e6725693ea..83423092de2f 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
@@ -6,7 +6,9 @@
*/
#include <drm/drm_damage_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_print.h>
+
#include "mdp5_kms.h"
struct mdp5_plane {
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
index 776337f85a68..d7fa2c49e741 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
@@ -5,6 +5,7 @@
* Author: Rob Clark <robdclark@gmail.com>
*/
+#include <drm/drm_fourcc.h>
#include <drm/drm_util.h>
#include "mdp5_kms.h"
@@ -120,7 +121,6 @@ uint32_t mdp5_smp_calculate(struct mdp5_smp *smp,
struct mdp5_kms *mdp5_kms = get_kms(smp);
int rev = mdp5_cfg_get_hw_rev(mdp5_kms->cfg);
int i, hsub, nplanes, nlines;
- u32 fmt = format->base.pixel_format;
uint32_t blkcfg = 0;
nplanes = info->num_planes;
@@ -134,7 +134,6 @@ uint32_t mdp5_smp_calculate(struct mdp5_smp *smp,
* them together, writes to SMP using a single client.
*/
if ((rev > 0) && (format->chroma_sample > CHROMA_FULL)) {
- fmt = DRM_FORMAT_NV24;
nplanes = 2;
/* if decimation is enabled, HW decimates less on the
diff --git a/drivers/gpu/drm/msm/disp/mdp_format.c b/drivers/gpu/drm/msm/disp/mdp_format.c
index 8afb0f9c04bb..5495d8b3f5b9 100644
--- a/drivers/gpu/drm/msm/disp/mdp_format.c
+++ b/drivers/gpu/drm/msm/disp/mdp_format.c
@@ -174,7 +174,7 @@ const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format,
struct csc_cfg *mdp_get_default_csc_cfg(enum csc_type type)
{
- if (unlikely(WARN_ON(type >= CSC_MAX)))
+ if (WARN_ON(type >= CSC_MAX))
return NULL;
return &csc_convert[type];
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
index 0da8a4e428ad..4de771d6f0be 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -9,6 +9,7 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_panel.h>
@@ -177,6 +178,8 @@ int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
int msm_dsi_host_init(struct msm_dsi *msm_dsi);
int msm_dsi_runtime_suspend(struct device *dev);
int msm_dsi_runtime_resume(struct device *dev);
+int dsi_link_clk_set_rate_6g(struct msm_dsi_host *msm_host);
+int dsi_link_clk_set_rate_v2(struct msm_dsi_host *msm_host);
int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host);
int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host);
void dsi_link_clk_disable_6g(struct msm_dsi_host *msm_host);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
index b7b7c1a9164a..813d69deb5e8 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
@@ -66,6 +66,26 @@ static const struct msm_dsi_config msm8916_dsi_cfg = {
.num_dsi = 1,
};
+static const char * const dsi_8976_bus_clk_names[] = {
+ "mdp_core", "iface", "bus",
+};
+
+static const struct msm_dsi_config msm8976_dsi_cfg = {
+ .io_offset = DSI_6G_REG_SHIFT,
+ .reg_cfg = {
+ .num = 3,
+ .regs = {
+ {"gdsc", -1, -1},
+ {"vdda", 100000, 100}, /* 1.2 V */
+ {"vddio", 100000, 100}, /* 1.8 V */
+ },
+ },
+ .bus_clk_names = dsi_8976_bus_clk_names,
+ .num_bus_clks = ARRAY_SIZE(dsi_8976_bus_clk_names),
+ .io_start = { 0x1a94000, 0x1a96000 },
+ .num_dsi = 2,
+};
+
static const struct msm_dsi_config msm8994_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
.reg_cfg = {
@@ -133,6 +153,10 @@ static const char * const dsi_sdm845_bus_clk_names[] = {
"iface", "bus",
};
+static const char * const dsi_sc7180_bus_clk_names[] = {
+ "iface", "bus",
+};
+
static const struct msm_dsi_config sdm845_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
.reg_cfg = {
@@ -147,7 +171,22 @@ static const struct msm_dsi_config sdm845_dsi_cfg = {
.num_dsi = 2,
};
-const static struct msm_dsi_host_cfg_ops msm_dsi_v2_host_ops = {
+static const struct msm_dsi_config sc7180_dsi_cfg = {
+ .io_offset = DSI_6G_REG_SHIFT,
+ .reg_cfg = {
+ .num = 1,
+ .regs = {
+ {"vdda", 21800, 4 }, /* 1.2 V */
+ },
+ },
+ .bus_clk_names = dsi_sc7180_bus_clk_names,
+ .num_bus_clks = ARRAY_SIZE(dsi_sc7180_bus_clk_names),
+ .io_start = { 0xae94000 },
+ .num_dsi = 1,
+};
+
+static const struct msm_dsi_host_cfg_ops msm_dsi_v2_host_ops = {
+ .link_clk_set_rate = dsi_link_clk_set_rate_v2,
.link_clk_enable = dsi_link_clk_enable_v2,
.link_clk_disable = dsi_link_clk_disable_v2,
.clk_init_ver = dsi_clk_init_v2,
@@ -158,7 +197,8 @@ const static struct msm_dsi_host_cfg_ops msm_dsi_v2_host_ops = {
.calc_clk_rate = dsi_calc_clk_rate_v2,
};
-const static struct msm_dsi_host_cfg_ops msm_dsi_6g_host_ops = {
+static const struct msm_dsi_host_cfg_ops msm_dsi_6g_host_ops = {
+ .link_clk_set_rate = dsi_link_clk_set_rate_6g,
.link_clk_enable = dsi_link_clk_enable_6g,
.link_clk_disable = dsi_link_clk_disable_6g,
.clk_init_ver = NULL,
@@ -169,7 +209,8 @@ const static struct msm_dsi_host_cfg_ops msm_dsi_6g_host_ops = {
.calc_clk_rate = dsi_calc_clk_rate_6g,
};
-const static struct msm_dsi_host_cfg_ops msm_dsi_6g_v2_host_ops = {
+static const struct msm_dsi_host_cfg_ops msm_dsi_6g_v2_host_ops = {
+ .link_clk_set_rate = dsi_link_clk_set_rate_6g,
.link_clk_enable = dsi_link_clk_enable_6g,
.link_clk_disable = dsi_link_clk_disable_6g,
.clk_init_ver = dsi_clk_init_6g_v2,
@@ -197,10 +238,15 @@ static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
&msm8916_dsi_cfg, &msm_dsi_6g_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_4_1,
&msm8996_dsi_cfg, &msm_dsi_6g_host_ops},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_4_2,
+ &msm8976_dsi_cfg, &msm_dsi_6g_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_0,
&msm8998_dsi_cfg, &msm_dsi_6g_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_1,
&sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_4_1,
+ &sc7180_dsi_cfg, &msm_dsi_6g_v2_host_ops},
+
};
const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor)
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
index e2b7a7dfbe49..217e24a65178 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.h
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
@@ -17,8 +17,10 @@
#define MSM_DSI_6G_VER_MINOR_V1_3 0x10030000
#define MSM_DSI_6G_VER_MINOR_V1_3_1 0x10030001
#define MSM_DSI_6G_VER_MINOR_V1_4_1 0x10040001
+#define MSM_DSI_6G_VER_MINOR_V1_4_2 0x10040002
#define MSM_DSI_6G_VER_MINOR_V2_2_0 0x20000000
#define MSM_DSI_6G_VER_MINOR_V2_2_1 0x20020001
+#define MSM_DSI_6G_VER_MINOR_V2_4_1 0x20040001
#define MSM_DSI_V2_VER_MINOR_8064 0x0
@@ -34,6 +36,7 @@ struct msm_dsi_config {
};
struct msm_dsi_host_cfg_ops {
+ int (*link_clk_set_rate)(struct msm_dsi_host *msm_host);
int (*link_clk_enable)(struct msm_dsi_host *msm_host);
void (*link_clk_disable)(struct msm_dsi_host *msm_host);
int (*clk_init_ver)(struct msm_dsi_host *msm_host);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index aa35d18ab43c..11ae5b8444c3 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -5,19 +5,19 @@
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/dma-mapping.h>
#include <linux/err.h>
-#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
+#include <linux/mfd/syscon.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
+#include <linux/of_graph.h>
#include <linux/of_irq.h>
#include <linux/pinctrl/consumer.h>
-#include <linux/of_graph.h>
+#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/spinlock.h>
-#include <linux/mfd/syscon.h>
-#include <linux/regmap.h>
+
#include <video/mipi_display.h>
#include "dsi.h"
@@ -26,6 +26,8 @@
#include "dsi_cfg.h"
#include "msm_kms.h"
+#define DSI_RESET_TOGGLE_DELAY_MS 20
+
static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor)
{
u32 ver;
@@ -421,15 +423,15 @@ static int dsi_clk_init(struct msm_dsi_host *msm_host)
}
msm_host->byte_clk_src = clk_get_parent(msm_host->byte_clk);
- if (!msm_host->byte_clk_src) {
- ret = -ENODEV;
+ if (IS_ERR(msm_host->byte_clk_src)) {
+ ret = PTR_ERR(msm_host->byte_clk_src);
pr_err("%s: can't find byte_clk clock. ret=%d\n", __func__, ret);
goto exit;
}
msm_host->pixel_clk_src = clk_get_parent(msm_host->pixel_clk);
- if (!msm_host->pixel_clk_src) {
- ret = -ENODEV;
+ if (IS_ERR(msm_host->pixel_clk_src)) {
+ ret = PTR_ERR(msm_host->pixel_clk_src);
pr_err("%s: can't find pixel_clk clock. ret=%d\n", __func__, ret);
goto exit;
}
@@ -503,7 +505,7 @@ int msm_dsi_runtime_resume(struct device *dev)
return dsi_bus_clk_enable(msm_host);
}
-int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
+int dsi_link_clk_set_rate_6g(struct msm_dsi_host *msm_host)
{
int ret;
@@ -513,13 +515,13 @@ int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate);
if (ret) {
pr_err("%s: Failed to set rate byte clk, %d\n", __func__, ret);
- goto error;
+ return ret;
}
ret = clk_set_rate(msm_host->pixel_clk, msm_host->pixel_clk_rate);
if (ret) {
pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
- goto error;
+ return ret;
}
if (msm_host->byte_intf_clk) {
@@ -528,10 +530,18 @@ int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
if (ret) {
pr_err("%s: Failed to set rate byte intf clk, %d\n",
__func__, ret);
- goto error;
+ return ret;
}
}
+ return 0;
+}
+
+
+int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
+{
+ int ret;
+
ret = clk_prepare_enable(msm_host->esc_clk);
if (ret) {
pr_err("%s: Failed to enable dsi esc clk\n", __func__);
@@ -571,7 +581,7 @@ error:
return ret;
}
-int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host)
+int dsi_link_clk_set_rate_v2(struct msm_dsi_host *msm_host)
{
int ret;
@@ -582,27 +592,34 @@ int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host)
ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate);
if (ret) {
pr_err("%s: Failed to set rate byte clk, %d\n", __func__, ret);
- goto error;
+ return ret;
}
ret = clk_set_rate(msm_host->esc_clk, msm_host->esc_clk_rate);
if (ret) {
pr_err("%s: Failed to set rate esc clk, %d\n", __func__, ret);
- goto error;
+ return ret;
}
ret = clk_set_rate(msm_host->src_clk, msm_host->src_clk_rate);
if (ret) {
pr_err("%s: Failed to set rate src clk, %d\n", __func__, ret);
- goto error;
+ return ret;
}
ret = clk_set_rate(msm_host->pixel_clk, msm_host->pixel_clk_rate);
if (ret) {
pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
- goto error;
+ return ret;
}
+ return 0;
+}
+
+int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host)
+{
+ int ret;
+
ret = clk_prepare_enable(msm_host->byte_clk);
if (ret) {
pr_err("%s: Failed to enable dsi byte clk\n", __func__);
@@ -816,7 +833,7 @@ static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
u32 flags = msm_host->mode_flags;
enum mipi_dsi_pixel_format mipi_fmt = msm_host->format;
const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
- u32 data = 0;
+ u32 data = 0, lane_ctrl = 0;
if (!enable) {
dsi_write(msm_host, REG_DSI_CTRL, 0);
@@ -904,9 +921,11 @@ static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL,
DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(msm_host->dlane_swap));
- if (!(flags & MIPI_DSI_CLOCK_NON_CONTINUOUS))
+ if (!(flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)) {
+ lane_ctrl = dsi_read(msm_host, REG_DSI_LANE_CTRL);
dsi_write(msm_host, REG_DSI_LANE_CTRL,
- DSI_LANE_CTRL_CLKLN_HS_FORCE_REQUEST);
+ lane_ctrl | DSI_LANE_CTRL_CLKLN_HS_FORCE_REQUEST);
+ }
data |= DSI_CTRL_ENABLE;
@@ -986,7 +1005,7 @@ static void dsi_sw_reset(struct msm_dsi_host *msm_host)
wmb(); /* clocks need to be enabled before reset */
dsi_write(msm_host, REG_DSI_RESET, 1);
- wmb(); /* make sure reset happen */
+ msleep(DSI_RESET_TOGGLE_DELAY_MS); /* make sure reset happen */
dsi_write(msm_host, REG_DSI_RESET, 0);
}
@@ -1291,14 +1310,13 @@ static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
static int dsi_cmd_dma_rx(struct msm_dsi_host *msm_host,
u8 *buf, int rx_byte, int pkt_size)
{
- u32 *lp, *temp, data;
+ u32 *temp, data;
int i, j = 0, cnt;
u32 read_cnt;
u8 reg[16];
int repeated_bytes = 0;
int buf_offset = buf - msm_host->rx_buf;
- lp = (u32 *)buf;
temp = (u32 *)reg;
cnt = (rx_byte + 3) >> 2;
if (cnt > 4)
@@ -1396,7 +1414,7 @@ static void dsi_sw_reset_restore(struct msm_dsi_host *msm_host)
/* dsi controller can only be reset while clocks are running */
dsi_write(msm_host, REG_DSI_RESET, 1);
- wmb(); /* make sure reset happen */
+ msleep(DSI_RESET_TOGGLE_DELAY_MS); /* make sure reset happen */
dsi_write(msm_host, REG_DSI_RESET, 0);
wmb(); /* controller out of reset */
dsi_write(msm_host, REG_DSI_CTRL, data0);
@@ -1995,6 +2013,7 @@ int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
* mdp clock need to be enabled to receive dsi interrupt
*/
pm_runtime_get_sync(&msm_host->pdev->dev);
+ cfg_hnd->ops->link_clk_set_rate(msm_host);
cfg_hnd->ops->link_clk_enable(msm_host);
/* TODO: vote for bus bandwidth */
@@ -2343,7 +2362,9 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host,
}
pm_runtime_get_sync(&msm_host->pdev->dev);
- ret = cfg_hnd->ops->link_clk_enable(msm_host);
+ ret = cfg_hnd->ops->link_clk_set_rate(msm_host);
+ if (!ret)
+ ret = cfg_hnd->ops->link_clk_enable(msm_host);
if (ret) {
pr_err("%s: failed to enable link clocks. ret=%d\n",
__func__, ret);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 271aa7bbca92..104115d112eb 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -329,7 +329,7 @@ static int dsi_mgr_connector_get_modes(struct drm_connector *connector)
* attached to the drm_panel.
*/
drm_panel_attach(panel, connector);
- num = drm_panel_get_modes(panel);
+ num = drm_panel_get_modes(panel, connector);
if (!num)
return 0;
@@ -432,20 +432,8 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
}
}
- if (panel) {
- ret = drm_panel_enable(panel);
- if (ret) {
- pr_err("%s: enable panel %d failed, %d\n", __func__, id,
- ret);
- goto panel_en_fail;
- }
- }
-
return;
-panel_en_fail:
- if (is_dual_dsi && msm_dsi1)
- msm_dsi_host_disable(msm_dsi1->host);
host1_en_fail:
msm_dsi_host_disable(host);
host_en_fail:
@@ -464,12 +452,51 @@ phy_en_fail:
static void dsi_mgr_bridge_enable(struct drm_bridge *bridge)
{
- DBG("");
+ int id = dsi_mgr_bridge_get_id(bridge);
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct drm_panel *panel = msm_dsi->panel;
+ bool is_dual_dsi = IS_DUAL_DSI();
+ int ret;
+
+ DBG("id=%d", id);
+ if (!msm_dsi_device_connected(msm_dsi))
+ return;
+
+ /* Do nothing with the host if it is slave-DSI in case of dual DSI */
+ if (is_dual_dsi && !IS_MASTER_DSI_LINK(id))
+ return;
+
+ if (panel) {
+ ret = drm_panel_enable(panel);
+ if (ret) {
+ pr_err("%s: enable panel %d failed, %d\n", __func__, id,
+ ret);
+ }
+ }
}
static void dsi_mgr_bridge_disable(struct drm_bridge *bridge)
{
- DBG("");
+ int id = dsi_mgr_bridge_get_id(bridge);
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct drm_panel *panel = msm_dsi->panel;
+ bool is_dual_dsi = IS_DUAL_DSI();
+ int ret;
+
+ DBG("id=%d", id);
+ if (!msm_dsi_device_connected(msm_dsi))
+ return;
+
+ /* Do nothing with the host if it is slave-DSI in case of dual DSI */
+ if (is_dual_dsi && !IS_MASTER_DSI_LINK(id))
+ return;
+
+ if (panel) {
+ ret = drm_panel_disable(panel);
+ if (ret)
+ pr_err("%s: Panel %d OFF failed, %d\n", __func__, id,
+ ret);
+ }
}
static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
@@ -495,13 +522,6 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
if (is_dual_dsi && !IS_MASTER_DSI_LINK(id))
goto disable_phy;
- if (panel) {
- ret = drm_panel_disable(panel);
- if (ret)
- pr_err("%s: Panel %d OFF failed, %d\n", __func__, id,
- ret);
- }
-
ret = msm_dsi_host_disable(host);
if (ret)
pr_err("%s: host %d disable failed, %d\n", __func__, id, ret);
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index 4097eca1b3ef..b0cfa67d2a57 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -145,7 +145,7 @@ int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing,
{
const unsigned long bit_rate = clk_req->bitclk_rate;
const unsigned long esc_rate = clk_req->escclk_rate;
- s32 ui, ui_x8, lpx;
+ s32 ui, ui_x8;
s32 tmax, tmin;
s32 pcnt0 = 50;
s32 pcnt1 = 50;
@@ -175,7 +175,6 @@ int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing,
ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
ui_x8 = ui << 3;
- lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000);
temp = S_DIV_ROUND_UP(38 * coeff - val_ckln * ui, ui_x8);
tmin = max_t(s32, temp, 0);
@@ -262,7 +261,7 @@ int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
{
const unsigned long bit_rate = clk_req->bitclk_rate;
const unsigned long esc_rate = clk_req->escclk_rate;
- s32 ui, ui_x8, lpx;
+ s32 ui, ui_x8;
s32 tmax, tmin;
s32 pcnt0 = 50;
s32 pcnt1 = 50;
@@ -284,7 +283,6 @@ int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
ui_x8 = ui << 3;
- lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000);
temp = S_DIV_ROUND_UP(38 * coeff, ui_x8);
tmin = max_t(s32, temp, 0);
@@ -396,8 +394,12 @@ static int dsi_phy_regulator_init(struct msm_dsi_phy *phy)
ret = devm_regulator_bulk_get(dev, num, s);
if (ret < 0) {
- DRM_DEV_ERROR(dev, "%s: failed to init regulator, ret=%d\n",
- __func__, ret);
+ if (ret != -EPROBE_DEFER) {
+ DRM_DEV_ERROR(dev,
+ "%s: failed to init regulator, ret=%d\n",
+ __func__, ret);
+ }
+
return ret;
}
@@ -481,6 +483,8 @@ static const struct of_device_id dsi_phy_dt_match[] = {
#ifdef CONFIG_DRM_MSM_DSI_28NM_PHY
{ .compatible = "qcom,dsi-phy-28nm-hpm",
.data = &dsi_phy_28nm_hpm_cfgs },
+ { .compatible = "qcom,dsi-phy-28nm-hpm-fam-b",
+ .data = &dsi_phy_28nm_hpm_famb_cfgs },
{ .compatible = "qcom,dsi-phy-28nm-lp",
.data = &dsi_phy_28nm_lp_cfgs },
#endif
@@ -584,10 +588,8 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
}
ret = dsi_phy_regulator_init(phy);
- if (ret) {
- DRM_DEV_ERROR(dev, "%s: failed to init regulator\n", __func__);
+ if (ret)
goto fail;
- }
phy->ahb_clk = msm_clk_get(pdev, "iface");
if (IS_ERR(phy->ahb_clk)) {
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
index c4069ce6afe6..24b294ed3059 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
@@ -40,6 +40,7 @@ struct msm_dsi_phy_cfg {
};
extern const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_famb_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs;
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
index c3a61876470f..1594f1422372 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
@@ -3,6 +3,8 @@
* Copyright (c) 2016, The Linux Foundation. All rights reserved.
*/
+#include <linux/delay.h>
+
#include "dsi_phy.h"
#include "dsi.xml.h"
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
index b3f678f6c2aa..c3c580cfd8b1 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
@@ -39,15 +39,10 @@ static void dsi_28nm_dphy_set_timing(struct msm_dsi_phy *phy,
DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
}
-static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable)
+static void dsi_28nm_phy_regulator_enable_dcdc(struct msm_dsi_phy *phy)
{
void __iomem *base = phy->reg_base;
- if (!enable) {
- dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 0);
- return;
- }
-
dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x0);
dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 1);
dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_5, 0);
@@ -56,6 +51,39 @@ static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable)
dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1, 0x9);
dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x7);
dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4, 0x20);
+ dsi_phy_write(phy->base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x00);
+}
+
+static void dsi_28nm_phy_regulator_enable_ldo(struct msm_dsi_phy *phy)
+{
+ void __iomem *base = phy->reg_base;
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_5, 0x7);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_3, 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_2, 0x1);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1, 0x1);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4, 0x20);
+
+ if (phy->cfg->type == MSM_DSI_PHY_28NM_LP)
+ dsi_phy_write(phy->base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x05);
+ else
+ dsi_phy_write(phy->base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x0d);
+}
+
+static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable)
+{
+ if (!enable) {
+ dsi_phy_write(phy->reg_base +
+ REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 0);
+ return;
+ }
+
+ if (phy->regulator_ldo_mode)
+ dsi_28nm_phy_regulator_enable_ldo(phy);
+ else
+ dsi_28nm_phy_regulator_enable_dcdc(phy);
}
static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
@@ -77,8 +105,6 @@ static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
dsi_28nm_phy_regulator_ctrl(phy, true);
- dsi_phy_write(base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x00);
-
dsi_28nm_dphy_set_timing(phy, timing);
dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_1, 0x00);
@@ -142,6 +168,24 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs = {
.num_dsi_phy = 2,
};
+const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_famb_cfgs = {
+ .type = MSM_DSI_PHY_28NM_HPM,
+ .src_pll_truthtable = { {true, true}, {false, true} },
+ .reg_cfg = {
+ .num = 1,
+ .regs = {
+ {"vddio", 100000, 100},
+ },
+ },
+ .ops = {
+ .enable = dsi_28nm_phy_enable,
+ .disable = dsi_28nm_phy_disable,
+ .init = msm_dsi_phy_init_common,
+ },
+ .io_start = { 0x1a94400, 0x1a96400 },
+ .num_dsi_phy = 2,
+};
+
const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs = {
.type = MSM_DSI_PHY_28NM_LP,
.src_pll_truthtable = { {true, true}, {true, true} },
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
index a198f51d47b4..f22583353957 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
@@ -3,6 +3,8 @@
* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
*/
+#include <linux/delay.h>
+
#include "dsi_phy.h"
#include "dsi.xml.h"
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
index 118bebe53de3..c6a3623f905d 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
@@ -6,8 +6,8 @@
#ifndef __DSI_PLL_H__
#define __DSI_PLL_H__
-#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/delay.h>
#include "dsi.h"
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
index 8f6100db90ed..1c894548dd72 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
@@ -751,9 +751,9 @@ static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm)
snprintf(parent4, 32, "dsi%d_pll_post_out_div_clk", pll_10nm->id);
hw = clk_hw_register_mux(dev, clk_name,
- (const char *[]){
+ ((const char *[]){
parent, parent2, parent3, parent4
- }, 4, 0, pll_10nm->phy_cmn_mmio +
+ }), 4, 0, pll_10nm->phy_cmn_mmio +
REG_DSI_10nm_PHY_CMN_CLK_CFG1,
0, 2, 0, NULL);
if (IS_ERR(hw)) {
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
index 8c99e01ae332..6dffd7f4a99b 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
@@ -554,9 +554,9 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm)
snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->id);
snprintf(parent2, 32, "dsi%dindirect_path_div2_clk", pll_28nm->id);
clks[num++] = clk_register_mux(dev, clk_name,
- (const char *[]){
+ ((const char *[]){
parent1, parent2
- }, 2, CLK_SET_RATE_PARENT, pll_28nm->mmio +
+ }), 2, CLK_SET_RATE_PARENT, pll_28nm->mmio +
REG_DSI_28nm_PHY_PLL_VREG_CFG, 1, 1, 0, NULL);
snprintf(clk_name, 32, "dsi%dpllbyte", pll_28nm->id);
diff --git a/drivers/gpu/drm/msm/edp/edp.c b/drivers/gpu/drm/msm/edp/edp.c
index 0f312ac5b624..ad4e963ccd9b 100644
--- a/drivers/gpu/drm/msm/edp/edp.c
+++ b/drivers/gpu/drm/msm/edp/edp.c
@@ -178,7 +178,9 @@ int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
goto fail;
}
- encoder->bridge = edp->bridge;
+ ret = drm_bridge_attach(encoder, edp->bridge, NULL);
+ if (ret)
+ goto fail;
priv->bridges[priv->num_bridges++] = edp->bridge;
priv->connectors[priv->num_connectors++] = edp->connector;
diff --git a/drivers/gpu/drm/msm/edp/edp.h b/drivers/gpu/drm/msm/edp/edp.h
index f2c17858a703..eb34243dad53 100644
--- a/drivers/gpu/drm/msm/edp/edp.h
+++ b/drivers/gpu/drm/msm/edp/edp.h
@@ -10,6 +10,7 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_dp_helper.h>
diff --git a/drivers/gpu/drm/msm/edp/edp_bridge.c b/drivers/gpu/drm/msm/edp/edp_bridge.c
index 2950bba4aca9..b65b5cc2dba2 100644
--- a/drivers/gpu/drm/msm/edp/edp_bridge.c
+++ b/drivers/gpu/drm/msm/edp/edp_bridge.c
@@ -55,8 +55,14 @@ static void edp_bridge_mode_set(struct drm_bridge *bridge,
DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode));
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if ((connector->encoder != NULL) &&
- (connector->encoder->bridge == bridge)) {
+ struct drm_encoder *encoder = connector->encoder;
+ struct drm_bridge *first_bridge;
+
+ if (!connector->encoder)
+ continue;
+
+ first_bridge = drm_bridge_chain_get_first_bridge(encoder);
+ if (bridge == first_bridge) {
msm_edp_ctrl_timing_cfg(edp->ctrl,
adjusted_mode, &connector->display_info);
break;
diff --git a/drivers/gpu/drm/msm/edp/edp_ctrl.c b/drivers/gpu/drm/msm/edp/edp_ctrl.c
index 7f3dd3ffe2c9..0d9657cc70db 100644
--- a/drivers/gpu/drm/msm/edp/edp_ctrl.c
+++ b/drivers/gpu/drm/msm/edp/edp_ctrl.c
@@ -89,7 +89,6 @@ struct edp_ctrl {
/* edid raw data */
struct edid *edid;
- struct drm_dp_link dp_link;
struct drm_dp_aux *drm_aux;
/* dpcd raw data */
@@ -403,7 +402,7 @@ static void edp_fill_link_cfg(struct edp_ctrl *ctrl)
u32 prate;
u32 lrate;
u32 bpp;
- u8 max_lane = ctrl->dp_link.num_lanes;
+ u8 max_lane = drm_dp_max_lane_count(ctrl->dpcd);
u8 lane;
prate = ctrl->pixel_rate;
@@ -413,7 +412,7 @@ static void edp_fill_link_cfg(struct edp_ctrl *ctrl)
* By default, use the maximum link rate and minimum lane count,
* so that we can do rate down shift during link training.
*/
- ctrl->link_rate = drm_dp_link_rate_to_bw_code(ctrl->dp_link.rate);
+ ctrl->link_rate = ctrl->dpcd[DP_MAX_LINK_RATE];
prate *= bpp;
prate /= 8; /* in kByte */
@@ -439,7 +438,7 @@ static void edp_config_ctrl(struct edp_ctrl *ctrl)
data = EDP_CONFIGURATION_CTRL_LANES(ctrl->lane_cnt - 1);
- if (ctrl->dp_link.capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
+ if (drm_dp_enhanced_frame_cap(ctrl->dpcd))
data |= EDP_CONFIGURATION_CTRL_ENHANCED_FRAMING;
depth = EDP_6BIT;
@@ -701,7 +700,7 @@ static int edp_link_rate_down_shift(struct edp_ctrl *ctrl)
rate = ctrl->link_rate;
lane = ctrl->lane_cnt;
- max_lane = ctrl->dp_link.num_lanes;
+ max_lane = drm_dp_max_lane_count(ctrl->dpcd);
bpp = ctrl->color_depth * 3;
prate = ctrl->pixel_rate;
@@ -751,18 +750,22 @@ static int edp_clear_training_pattern(struct edp_ctrl *ctrl)
static int edp_do_link_train(struct edp_ctrl *ctrl)
{
+ u8 values[2];
int ret;
- struct drm_dp_link dp_link;
DBG("");
/*
* Set the current link rate and lane cnt to panel. They may have been
* adjusted and the values are different from them in DPCD CAP
*/
- dp_link.num_lanes = ctrl->lane_cnt;
- dp_link.rate = drm_dp_bw_code_to_link_rate(ctrl->link_rate);
- dp_link.capabilities = ctrl->dp_link.capabilities;
- if (drm_dp_link_configure(ctrl->drm_aux, &dp_link) < 0)
+ values[0] = ctrl->lane_cnt;
+ values[1] = ctrl->link_rate;
+
+ if (drm_dp_enhanced_frame_cap(ctrl->dpcd))
+ values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+
+ if (drm_dp_dpcd_write(ctrl->drm_aux, DP_LINK_BW_SET, values,
+ sizeof(values)) < 0)
return EDP_TRAIN_FAIL;
ctrl->v_level = 0; /* start from default level */
@@ -952,6 +955,7 @@ static void edp_ctrl_on_worker(struct work_struct *work)
{
struct edp_ctrl *ctrl = container_of(
work, struct edp_ctrl, on_work);
+ u8 value;
int ret;
mutex_lock(&ctrl->dev_mutex);
@@ -965,9 +969,27 @@ static void edp_ctrl_on_worker(struct work_struct *work)
edp_ctrl_link_enable(ctrl, 1);
edp_ctrl_irq_enable(ctrl, 1);
- ret = drm_dp_link_power_up(ctrl->drm_aux, &ctrl->dp_link);
- if (ret)
- goto fail;
+
+ /* DP_SET_POWER register is only available on DPCD v1.1 and later */
+ if (ctrl->dpcd[DP_DPCD_REV] >= 0x11) {
+ ret = drm_dp_dpcd_readb(ctrl->drm_aux, DP_SET_POWER, &value);
+ if (ret < 0)
+ goto fail;
+
+ value &= ~DP_SET_POWER_MASK;
+ value |= DP_SET_POWER_D0;
+
+ ret = drm_dp_dpcd_writeb(ctrl->drm_aux, DP_SET_POWER, value);
+ if (ret < 0)
+ goto fail;
+
+ /*
+ * According to the DP 1.1 specification, a "Sink Device must
+ * exit the power saving state within 1 ms" (Section 2.5.3.1,
+ * Table 5-52, "Sink Control Field" (register 0x600).
+ */
+ usleep_range(1000, 2000);
+ }
ctrl->power_on = true;
@@ -1011,7 +1033,19 @@ static void edp_ctrl_off_worker(struct work_struct *work)
edp_state_ctrl(ctrl, 0);
- drm_dp_link_power_down(ctrl->drm_aux, &ctrl->dp_link);
+ /* DP_SET_POWER register is only available on DPCD v1.1 and later */
+ if (ctrl->dpcd[DP_DPCD_REV] >= 0x11) {
+ u8 value;
+ int ret;
+
+ ret = drm_dp_dpcd_readb(ctrl->drm_aux, DP_SET_POWER, &value);
+ if (ret > 0) {
+ value &= ~DP_SET_POWER_MASK;
+ value |= DP_SET_POWER_D3;
+
+ drm_dp_dpcd_writeb(ctrl->drm_aux, DP_SET_POWER, value);
+ }
+ }
edp_ctrl_irq_enable(ctrl, 0);
@@ -1225,14 +1259,8 @@ int msm_edp_ctrl_get_panel_info(struct edp_ctrl *ctrl,
edp_ctrl_irq_enable(ctrl, 1);
}
- ret = drm_dp_link_probe(ctrl->drm_aux, &ctrl->dp_link);
- if (ret) {
- pr_err("%s: read dpcd cap failed, %d\n", __func__, ret);
- goto disable_ret;
- }
-
/* Initialize link rate as panel max link rate */
- ctrl->link_rate = drm_dp_link_rate_to_bw_code(ctrl->dp_link.rate);
+ ctrl->link_rate = ctrl->dpcd[DP_MAX_LINK_RATE];
ctrl->edid = drm_get_edid(connector, &ctrl->drm_aux->ddc);
if (!ctrl->edid) {
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 0e4217be3f00..1a9b6289637d 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -327,7 +327,9 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
goto fail;
}
- encoder->bridge = hdmi->bridge;
+ ret = drm_bridge_attach(encoder, hdmi->bridge, NULL);
+ if (ret)
+ goto fail;
priv->bridges[priv->num_bridges++] = hdmi->bridge;
priv->connectors[priv->num_connectors++] = hdmi->connector;
@@ -425,38 +427,6 @@ static const struct {
{ "qcom,hdmi-tx-mux-lpm", true, 1, "HDMI_MUX_LPM" },
};
-static int msm_hdmi_get_gpio(struct device_node *of_node, const char *name)
-{
- int gpio;
-
- /* try with the gpio names as in the table (downstream bindings) */
- gpio = of_get_named_gpio(of_node, name, 0);
- if (gpio < 0) {
- char name2[32];
-
- /* try with the gpio names as in the upstream bindings */
- snprintf(name2, sizeof(name2), "%s-gpios", name);
- gpio = of_get_named_gpio(of_node, name2, 0);
- if (gpio < 0) {
- char name3[32];
-
- /*
- * try again after stripping out the "qcom,hdmi-tx"
- * prefix. This is mainly to match "hpd-gpios" used
- * in the upstream bindings
- */
- if (sscanf(name2, "qcom,hdmi-tx-%s", name3))
- gpio = of_get_named_gpio(of_node, name3, 0);
- }
-
- if (gpio < 0) {
- DBG("failed to get gpio: %s (%d)", name, gpio);
- gpio = -1;
- }
- }
- return gpio;
-}
-
/*
* HDMI audio codec callbacks
*/
@@ -582,11 +552,39 @@ static int msm_hdmi_bind(struct device *dev, struct device *master, void *data)
hdmi_cfg->qfprom_mmio_name = "qfprom_physical";
for (i = 0; i < HDMI_MAX_NUM_GPIO; i++) {
- hdmi_cfg->gpios[i].num = msm_hdmi_get_gpio(of_node,
- msm_hdmi_gpio_pdata[i].name);
+ const char *name = msm_hdmi_gpio_pdata[i].name;
+ struct gpio_desc *gpiod;
+
+ /*
+ * We are fetching the GPIO lines "as is" since the connector
+ * code is enabling and disabling the lines. Until that point
+ * the power-on default value will be kept.
+ */
+ gpiod = devm_gpiod_get_optional(dev, name, GPIOD_ASIS);
+ /* This will catch e.g. -PROBE_DEFER */
+ if (IS_ERR(gpiod))
+ return PTR_ERR(gpiod);
+ if (!gpiod) {
+ /* Try a second time, stripping down the name */
+ char name3[32];
+
+ /*
+ * Try again after stripping out the "qcom,hdmi-tx"
+ * prefix. This is mainly to match "hpd-gpios" used
+ * in the upstream bindings.
+ */
+ if (sscanf(name, "qcom,hdmi-tx-%s", name3))
+ gpiod = devm_gpiod_get_optional(dev, name3, GPIOD_ASIS);
+ if (IS_ERR(gpiod))
+ return PTR_ERR(gpiod);
+ if (!gpiod)
+ DBG("failed to get gpio: %s", name);
+ }
+ hdmi_cfg->gpios[i].gpiod = gpiod;
+ if (gpiod)
+ gpiod_set_consumer_name(gpiod, msm_hdmi_gpio_pdata[i].label);
hdmi_cfg->gpios[i].output = msm_hdmi_gpio_pdata[i].output;
hdmi_cfg->gpios[i].value = msm_hdmi_gpio_pdata[i].value;
- hdmi_cfg->gpios[i].label = msm_hdmi_gpio_pdata[i].label;
}
dev->platform_data = hdmi_cfg;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index 982865866a29..d0b84f0abee1 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -11,8 +11,11 @@
#include <linux/clk.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
+#include <linux/gpio/consumer.h>
#include <linux/hdmi.h>
+#include <drm/drm_bridge.h>
+
#include "msm_drv.h"
#include "hdmi.xml.h"
@@ -22,10 +25,9 @@ struct hdmi_phy;
struct hdmi_platform_config;
struct hdmi_gpio_data {
- int num;
+ struct gpio_desc *gpiod;
bool output;
int value;
- const char *label;
};
struct hdmi_audio {
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
index c8dbd82854c2..ba81338a9bf8 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -4,6 +4,8 @@
* Author: Rob Clark <robdclark@gmail.com>
*/
+#include <linux/delay.h>
+
#include "hdmi.h"
struct hdmi_bridge {
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index 07b4cb877d82..58707a1f3878 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -4,7 +4,8 @@
* Author: Rob Clark <robdclark@gmail.com>
*/
-#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
#include <linux/pinctrl/consumer.h>
#include "msm_kms.h"
@@ -68,30 +69,21 @@ static void msm_hdmi_phy_reset(struct hdmi *hdmi)
static int gpio_config(struct hdmi *hdmi, bool on)
{
- struct device *dev = &hdmi->pdev->dev;
const struct hdmi_platform_config *config = hdmi->config;
- int ret, i;
+ int i;
if (on) {
for (i = 0; i < HDMI_MAX_NUM_GPIO; i++) {
struct hdmi_gpio_data gpio = config->gpios[i];
- if (gpio.num != -1) {
- ret = gpio_request(gpio.num, gpio.label);
- if (ret) {
- DRM_DEV_ERROR(dev,
- "'%s'(%d) gpio_request failed: %d\n",
- gpio.label, gpio.num, ret);
- goto err;
- }
-
+ if (gpio.gpiod) {
if (gpio.output) {
- gpio_direction_output(gpio.num,
- gpio.value);
+ gpiod_direction_output(gpio.gpiod,
+ gpio.value);
} else {
- gpio_direction_input(gpio.num);
- gpio_set_value_cansleep(gpio.num,
- gpio.value);
+ gpiod_direction_input(gpio.gpiod);
+ gpiod_set_value_cansleep(gpio.gpiod,
+ gpio.value);
}
}
}
@@ -101,29 +93,20 @@ static int gpio_config(struct hdmi *hdmi, bool on)
for (i = 0; i < HDMI_MAX_NUM_GPIO; i++) {
struct hdmi_gpio_data gpio = config->gpios[i];
- if (gpio.num == -1)
+ if (!gpio.gpiod)
continue;
if (gpio.output) {
int value = gpio.value ? 0 : 1;
- gpio_set_value_cansleep(gpio.num, value);
+ gpiod_set_value_cansleep(gpio.gpiod, value);
}
-
- gpio_free(gpio.num);
- };
+ }
DBG("gpio off");
}
return 0;
-err:
- while (i--) {
- if (config->gpios[i].num != -1)
- gpio_free(config->gpios[i].num);
- }
-
- return ret;
}
static void enable_hpd_clocks(struct hdmi *hdmi, bool enable)
@@ -311,7 +294,7 @@ static enum drm_connector_status detect_gpio(struct hdmi *hdmi)
const struct hdmi_platform_config *config = hdmi->config;
struct hdmi_gpio_data hpd_gpio = config->gpios[HPD_GPIO_INDEX];
- return gpio_get_value(hpd_gpio.num) ?
+ return gpiod_get_value(hpd_gpio.gpiod) ?
connector_status_connected :
connector_status_disconnected;
}
@@ -330,7 +313,7 @@ static enum drm_connector_status hdmi_connector_detect(
* some platforms may not have hpd gpio. Rely only on the status
* provided by REG_HDMI_HPD_INT_STATUS in this case.
*/
- if (hpd_gpio.num == -1)
+ if (!hpd_gpio.gpiod)
return detect_reg(hdmi);
do {
@@ -450,8 +433,10 @@ struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi)
connector = &hdmi_connector->base;
- drm_connector_init(hdmi->dev, connector, &hdmi_connector_funcs,
- DRM_MODE_CONNECTOR_HDMIA);
+ drm_connector_init_with_ddc(hdmi->dev, connector,
+ &hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA,
+ hdmi->i2c);
drm_connector_helper_add(connector, &msm_hdmi_connector_helper_funcs);
connector->polled = DRM_CONNECTOR_POLL_CONNECT |
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
index 1697e61f9c2f..8a38d4b95102 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
@@ -29,8 +29,12 @@ static int msm_hdmi_phy_resource_init(struct hdmi_phy *phy)
reg = devm_regulator_get(dev, cfg->reg_names[i]);
if (IS_ERR(reg)) {
ret = PTR_ERR(reg);
- DRM_DEV_ERROR(dev, "failed to get phy regulator: %s (%d)\n",
- cfg->reg_names[i], ret);
+ if (ret != -EPROBE_DEFER) {
+ DRM_DEV_ERROR(dev,
+ "failed to get phy regulator: %s (%d)\n",
+ cfg->reg_names[i], ret);
+ }
+
return ret;
}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
index fe82ad38aa7a..a8f3b2cbfdc5 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
@@ -4,6 +4,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/delay.h>
#include "hdmi.h"
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c
index 1acc33ce9d52..95f2928cb2cb 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c
@@ -4,6 +4,8 @@
* Author: Rob Clark <robdclark@gmail.com>
*/
+#include <linux/delay.h>
+
#include "hdmi.h"
static void hdmi_phy_8x60_powerup(struct hdmi_phy *phy,
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c
index e24a11d91945..562dfac67792 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c
@@ -6,6 +6,8 @@
*/
#include <linux/clk-provider.h>
+#include <linux/delay.h>
+
#include "hdmi.h"
struct hdmi_pll_8960 {
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index dd16babdd8c0..5ccfad794c6a 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -5,50 +5,138 @@
*/
#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_vblank.h>
+#include "msm_atomic_trace.h"
#include "msm_drv.h"
#include "msm_gem.h"
#include "msm_kms.h"
-static void msm_atomic_wait_for_commit_done(struct drm_device *dev,
- struct drm_atomic_state *old_state)
+int msm_atomic_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
{
- struct drm_crtc *crtc;
- struct drm_crtc_state *new_crtc_state;
- struct msm_drm_private *priv = old_state->dev->dev_private;
+ struct msm_drm_private *priv = plane->dev->dev_private;
struct msm_kms *kms = priv->kms;
- int i;
- for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
- if (!new_crtc_state->active)
- continue;
+ if (!new_state->fb)
+ return 0;
+
+ drm_gem_fb_prepare_fb(plane, new_state);
+
+ return msm_framebuffer_prepare(new_state->fb, kms->aspace);
+}
+
+static void msm_atomic_async_commit(struct msm_kms *kms, int crtc_idx)
+{
+ unsigned crtc_mask = BIT(crtc_idx);
- if (drm_crtc_vblank_get(crtc))
- continue;
+ trace_msm_atomic_async_commit_start(crtc_mask);
- kms->funcs->wait_for_crtc_commit_done(kms, crtc);
+ mutex_lock(&kms->commit_lock);
- drm_crtc_vblank_put(crtc);
+ if (!(kms->pending_crtc_mask & crtc_mask)) {
+ mutex_unlock(&kms->commit_lock);
+ goto out;
}
+
+ kms->pending_crtc_mask &= ~crtc_mask;
+
+ kms->funcs->enable_commit(kms);
+
+ /*
+ * Flush hardware updates:
+ */
+ trace_msm_atomic_flush_commit(crtc_mask);
+ kms->funcs->flush_commit(kms, crtc_mask);
+ mutex_unlock(&kms->commit_lock);
+
+ /*
+ * Wait for flush to complete:
+ */
+ trace_msm_atomic_wait_flush_start(crtc_mask);
+ kms->funcs->wait_flush(kms, crtc_mask);
+ trace_msm_atomic_wait_flush_finish(crtc_mask);
+
+ mutex_lock(&kms->commit_lock);
+ kms->funcs->complete_commit(kms, crtc_mask);
+ mutex_unlock(&kms->commit_lock);
+ kms->funcs->disable_commit(kms);
+
+out:
+ trace_msm_atomic_async_commit_finish(crtc_mask);
}
-int msm_atomic_prepare_fb(struct drm_plane *plane,
- struct drm_plane_state *new_state)
+static enum hrtimer_restart msm_atomic_pending_timer(struct hrtimer *t)
{
- struct msm_drm_private *priv = plane->dev->dev_private;
- struct msm_kms *kms = priv->kms;
- struct drm_gem_object *obj;
- struct dma_fence *fence;
+ struct msm_pending_timer *timer = container_of(t,
+ struct msm_pending_timer, timer);
+ struct msm_drm_private *priv = timer->kms->dev->dev_private;
- if (!new_state->fb)
- return 0;
+ queue_work(priv->wq, &timer->work);
- obj = msm_framebuffer_bo(new_state->fb, 0);
- fence = reservation_object_get_excl_rcu(obj->resv);
+ return HRTIMER_NORESTART;
+}
- drm_atomic_set_fence_for_plane(new_state, fence);
+static void msm_atomic_pending_work(struct work_struct *work)
+{
+ struct msm_pending_timer *timer = container_of(work,
+ struct msm_pending_timer, work);
- return msm_framebuffer_prepare(new_state->fb, kms->aspace);
+ msm_atomic_async_commit(timer->kms, timer->crtc_idx);
+}
+
+void msm_atomic_init_pending_timer(struct msm_pending_timer *timer,
+ struct msm_kms *kms, int crtc_idx)
+{
+ timer->kms = kms;
+ timer->crtc_idx = crtc_idx;
+ hrtimer_init(&timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ timer->timer.function = msm_atomic_pending_timer;
+ INIT_WORK(&timer->work, msm_atomic_pending_work);
+}
+
+static bool can_do_async(struct drm_atomic_state *state,
+ struct drm_crtc **async_crtc)
+{
+ struct drm_connector_state *connector_state;
+ struct drm_connector *connector;
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc *crtc;
+ int i, num_crtcs = 0;
+
+ if (!(state->legacy_cursor_update || state->async_update))
+ return false;
+
+ /* any connector change, means slow path: */
+ for_each_new_connector_in_state(state, connector, connector_state, i)
+ return false;
+
+ for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+ if (drm_atomic_crtc_needs_modeset(crtc_state))
+ return false;
+ if (++num_crtcs > 1)
+ return false;
+ *async_crtc = crtc;
+ }
+
+ return true;
+}
+
+/* Get bitmask of crtcs that will need to be flushed. The bitmask
+ * can be used with for_each_crtc_mask() iterator, to iterate
+ * effected crtcs without needing to preserve the atomic state.
+ */
+static unsigned get_crtc_mask(struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc *crtc;
+ unsigned i, mask = 0;
+
+ for_each_new_crtc_in_state(state, crtc, crtc_state, i)
+ mask |= drm_crtc_mask(crtc);
+
+ return mask;
}
void msm_atomic_commit_tail(struct drm_atomic_state *state)
@@ -56,26 +144,104 @@ void msm_atomic_commit_tail(struct drm_atomic_state *state)
struct drm_device *dev = state->dev;
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
+ struct drm_crtc *async_crtc = NULL;
+ unsigned crtc_mask = get_crtc_mask(state);
+ bool async = kms->funcs->vsync_time &&
+ can_do_async(state, &async_crtc);
+
+ trace_msm_atomic_commit_tail_start(async, crtc_mask);
+
+ kms->funcs->enable_commit(kms);
+ /*
+ * Ensure any previous (potentially async) commit has
+ * completed:
+ */
+ trace_msm_atomic_wait_flush_start(crtc_mask);
+ kms->funcs->wait_flush(kms, crtc_mask);
+ trace_msm_atomic_wait_flush_finish(crtc_mask);
+
+ mutex_lock(&kms->commit_lock);
+
+ /*
+ * Now that there is no in-progress flush, prepare the
+ * current update:
+ */
kms->funcs->prepare_commit(kms, state);
+ /*
+ * Push atomic updates down to hardware:
+ */
drm_atomic_helper_commit_modeset_disables(dev, state);
-
drm_atomic_helper_commit_planes(dev, state, 0);
-
drm_atomic_helper_commit_modeset_enables(dev, state);
- if (kms->funcs->commit) {
- DRM_DEBUG_ATOMIC("triggering commit\n");
- kms->funcs->commit(kms, state);
- }
+ if (async) {
+ struct msm_pending_timer *timer =
+ &kms->pending_timers[drm_crtc_index(async_crtc)];
- if (!state->legacy_cursor_update)
- msm_atomic_wait_for_commit_done(dev, state);
+ /* async updates are limited to single-crtc updates: */
+ WARN_ON(crtc_mask != drm_crtc_mask(async_crtc));
- kms->funcs->complete_commit(kms, state);
+ /*
+ * Start timer if we don't already have an update pending
+ * on this crtc:
+ */
+ if (!(kms->pending_crtc_mask & crtc_mask)) {
+ ktime_t vsync_time, wakeup_time;
- drm_atomic_helper_commit_hw_done(state);
+ kms->pending_crtc_mask |= crtc_mask;
+
+ vsync_time = kms->funcs->vsync_time(kms, async_crtc);
+ wakeup_time = ktime_sub(vsync_time, ms_to_ktime(1));
+
+ hrtimer_start(&timer->timer, wakeup_time,
+ HRTIMER_MODE_ABS);
+ }
+ kms->funcs->disable_commit(kms);
+ mutex_unlock(&kms->commit_lock);
+
+ /*
+ * At this point, from drm core's perspective, we
+ * are done with the atomic update, so we can just
+ * go ahead and signal that it is done:
+ */
+ drm_atomic_helper_commit_hw_done(state);
+ drm_atomic_helper_cleanup_planes(dev, state);
+
+ trace_msm_atomic_commit_tail_finish(async, crtc_mask);
+
+ return;
+ }
+
+ /*
+ * If there is any async flush pending on updated crtcs, fold
+ * them into the current flush.
+ */
+ kms->pending_crtc_mask &= ~crtc_mask;
+
+ /*
+ * Flush hardware updates:
+ */
+ trace_msm_atomic_flush_commit(crtc_mask);
+ kms->funcs->flush_commit(kms, crtc_mask);
+ mutex_unlock(&kms->commit_lock);
+
+ /*
+ * Wait for flush to complete:
+ */
+ trace_msm_atomic_wait_flush_start(crtc_mask);
+ kms->funcs->wait_flush(kms, crtc_mask);
+ trace_msm_atomic_wait_flush_finish(crtc_mask);
+
+ mutex_lock(&kms->commit_lock);
+ kms->funcs->complete_commit(kms, crtc_mask);
+ mutex_unlock(&kms->commit_lock);
+ kms->funcs->disable_commit(kms);
+
+ drm_atomic_helper_commit_hw_done(state);
drm_atomic_helper_cleanup_planes(dev, state);
+
+ trace_msm_atomic_commit_tail_finish(async, crtc_mask);
}
diff --git a/drivers/gpu/drm/msm/msm_atomic_trace.h b/drivers/gpu/drm/msm/msm_atomic_trace.h
new file mode 100644
index 000000000000..b4ca0ed3b4a3
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_atomic_trace.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#if !defined(_MSM_GPU_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _MSM_GPU_TRACE_H_
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM drm_msm_atomic
+#define TRACE_INCLUDE_FILE msm_atomic_trace
+
+TRACE_EVENT(msm_atomic_commit_tail_start,
+ TP_PROTO(bool async, unsigned crtc_mask),
+ TP_ARGS(async, crtc_mask),
+ TP_STRUCT__entry(
+ __field(bool, async)
+ __field(u32, crtc_mask)
+ ),
+ TP_fast_assign(
+ __entry->async = async;
+ __entry->crtc_mask = crtc_mask;
+ ),
+ TP_printk("async=%d crtc_mask=%x",
+ __entry->async, __entry->crtc_mask)
+);
+
+TRACE_EVENT(msm_atomic_commit_tail_finish,
+ TP_PROTO(bool async, unsigned crtc_mask),
+ TP_ARGS(async, crtc_mask),
+ TP_STRUCT__entry(
+ __field(bool, async)
+ __field(u32, crtc_mask)
+ ),
+ TP_fast_assign(
+ __entry->async = async;
+ __entry->crtc_mask = crtc_mask;
+ ),
+ TP_printk("async=%d crtc_mask=%x",
+ __entry->async, __entry->crtc_mask)
+);
+
+TRACE_EVENT(msm_atomic_async_commit_start,
+ TP_PROTO(unsigned crtc_mask),
+ TP_ARGS(crtc_mask),
+ TP_STRUCT__entry(
+ __field(u32, crtc_mask)
+ ),
+ TP_fast_assign(
+ __entry->crtc_mask = crtc_mask;
+ ),
+ TP_printk("crtc_mask=%x",
+ __entry->crtc_mask)
+);
+
+TRACE_EVENT(msm_atomic_async_commit_finish,
+ TP_PROTO(unsigned crtc_mask),
+ TP_ARGS(crtc_mask),
+ TP_STRUCT__entry(
+ __field(u32, crtc_mask)
+ ),
+ TP_fast_assign(
+ __entry->crtc_mask = crtc_mask;
+ ),
+ TP_printk("crtc_mask=%x",
+ __entry->crtc_mask)
+);
+
+TRACE_EVENT(msm_atomic_wait_flush_start,
+ TP_PROTO(unsigned crtc_mask),
+ TP_ARGS(crtc_mask),
+ TP_STRUCT__entry(
+ __field(u32, crtc_mask)
+ ),
+ TP_fast_assign(
+ __entry->crtc_mask = crtc_mask;
+ ),
+ TP_printk("crtc_mask=%x",
+ __entry->crtc_mask)
+);
+
+TRACE_EVENT(msm_atomic_wait_flush_finish,
+ TP_PROTO(unsigned crtc_mask),
+ TP_ARGS(crtc_mask),
+ TP_STRUCT__entry(
+ __field(u32, crtc_mask)
+ ),
+ TP_fast_assign(
+ __entry->crtc_mask = crtc_mask;
+ ),
+ TP_printk("crtc_mask=%x",
+ __entry->crtc_mask)
+);
+
+TRACE_EVENT(msm_atomic_flush_commit,
+ TP_PROTO(unsigned crtc_mask),
+ TP_ARGS(crtc_mask),
+ TP_STRUCT__entry(
+ __field(u32, crtc_mask)
+ ),
+ TP_fast_assign(
+ __entry->crtc_mask = crtc_mask;
+ ),
+ TP_printk("crtc_mask=%x",
+ __entry->crtc_mask)
+);
+
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/msm
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/msm/msm_atomic_tracepoints.c b/drivers/gpu/drm/msm/msm_atomic_tracepoints.c
new file mode 100644
index 000000000000..011dc881f391
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_atomic_tracepoints.c
@@ -0,0 +1,3 @@
+// SPDX-License-Identifier: GPL-2.0
+#define CREATE_TRACE_POINTS
+#include "msm_atomic_trace.h"
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
index a0a8df591e93..1c74381a4fc9 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.c
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -5,7 +5,12 @@
*/
#ifdef CONFIG_DEBUG_FS
+
#include <linux/debugfs.h>
+
+#include <drm/drm_debugfs.h>
+#include <drm/drm_file.h>
+
#include "msm_drv.h"
#include "msm_gpu.h"
#include "msm_kms.h"
@@ -42,12 +47,8 @@ static int msm_gpu_release(struct inode *inode, struct file *file)
struct msm_gpu_show_priv *show_priv = m->private;
struct msm_drm_private *priv = show_priv->dev->dev_private;
struct msm_gpu *gpu = priv->gpu;
- int ret;
-
- ret = mutex_lock_interruptible(&show_priv->dev->struct_mutex);
- if (ret)
- return ret;
+ mutex_lock(&show_priv->dev->struct_mutex);
gpu->funcs->gpu_state_put(show_priv->state);
mutex_unlock(&show_priv->dev->struct_mutex);
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index c356f5ccf253..e4b750b0c2d3 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -5,9 +5,18 @@
* Author: Rob Clark <robdclark@gmail.com>
*/
+#include <linux/dma-mapping.h>
#include <linux/kthread.h>
+#include <linux/uaccess.h>
#include <uapi/linux/sched/types.h>
+
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_irq.h>
+#include <drm/drm_prime.h>
#include <drm/drm_of.h>
+#include <drm/drm_vblank.h>
#include "msm_drv.h"
#include "msm_debugfs.h"
@@ -17,7 +26,6 @@
#include "msm_kms.h"
#include "adreno/adreno_gpu.h"
-
/*
* MSM driver version:
* - 1.0.0 - initial interface
@@ -75,46 +83,6 @@ module_param(modeset, bool, 0600);
* Util/helpers:
*/
-int msm_clk_bulk_get(struct device *dev, struct clk_bulk_data **bulk)
-{
- struct property *prop;
- const char *name;
- struct clk_bulk_data *local;
- int i = 0, ret, count;
-
- count = of_property_count_strings(dev->of_node, "clock-names");
- if (count < 1)
- return 0;
-
- local = devm_kcalloc(dev, sizeof(struct clk_bulk_data *),
- count, GFP_KERNEL);
- if (!local)
- return -ENOMEM;
-
- of_property_for_each_string(dev->of_node, "clock-names", prop, name) {
- local[i].id = devm_kstrdup(dev, name, GFP_KERNEL);
- if (!local[i].id) {
- devm_kfree(dev, local);
- return -ENOMEM;
- }
-
- i++;
- }
-
- ret = devm_clk_bulk_get(dev, count, local);
-
- if (ret) {
- for (i = 0; i < count; i++)
- devm_kfree(dev, (void *) local[i].id);
- devm_kfree(dev, local);
-
- return ret;
- }
-
- *bulk = local;
- return count;
-}
-
struct clk *msm_clk_bulk_get_clock(struct clk_bulk_data *bulk, int count,
const char *name)
{
@@ -170,7 +138,7 @@ void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
size = resource_size(res);
- ptr = devm_ioremap_nocache(&pdev->dev, res->start, size);
+ ptr = devm_ioremap(&pdev->dev, res->start, size);
if (!ptr) {
DRM_DEV_ERROR(&pdev->dev, "failed to ioremap: %s\n", name);
return ERR_PTR(-ENOMEM);
@@ -473,6 +441,14 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
if (ret)
goto err_msm_uninit;
+ if (!dev->dma_parms) {
+ dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
+ GFP_KERNEL);
+ if (!dev->dma_parms)
+ return -ENOMEM;
+ }
+ dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
+
msm_gem_shrinker_init(ddev);
switch (get_mdp_ver(pdev)) {
@@ -505,6 +481,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
ddev->mode_config.normalize_zpos = true;
if (kms) {
+ kms->dev = ddev;
ret = kms->funcs->hw_init(kms);
if (ret) {
DRM_DEV_ERROR(dev, "kms hw init failed: %d\n", ret);
@@ -984,17 +961,17 @@ static int msm_ioctl_submitqueue_close(struct drm_device *dev, void *data,
}
static const struct drm_ioctl_desc msm_ioctls[] = {
- DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(MSM_GEM_INFO, msm_ioctl_gem_info, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE, msm_ioctl_gem_madvise, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_NEW, msm_ioctl_submitqueue_new, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_CLOSE, msm_ioctl_submitqueue_close, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_QUERY, msm_ioctl_submitqueue_query, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_GEM_INFO, msm_ioctl_gem_info, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE, msm_ioctl_gem_madvise, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_NEW, msm_ioctl_submitqueue_new, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_CLOSE, msm_ioctl_submitqueue_close, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_QUERY, msm_ioctl_submitqueue_query, DRM_RENDER_ALLOW),
};
static const struct vm_operations_struct vm_ops = {
@@ -1017,7 +994,6 @@ static const struct file_operations fops = {
static struct drm_driver msm_driver = {
.driver_features = DRIVER_GEM |
- DRIVER_PRIME |
DRIVER_RENDER |
DRIVER_ATOMIC |
DRIVER_MODESET,
@@ -1036,8 +1012,6 @@ static struct drm_driver msm_driver = {
.dumb_map_offset = msm_gem_dumb_map_offset,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = drm_gem_prime_export,
- .gem_prime_import = drm_gem_prime_import,
.gem_prime_pin = msm_gem_prime_pin,
.gem_prime_unpin = msm_gem_prime_unpin,
.gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
@@ -1226,7 +1200,8 @@ static int add_display_components(struct device *dev,
* the interfaces to our components list.
*/
if (of_device_is_compatible(dev->of_node, "qcom,mdss") ||
- of_device_is_compatible(dev->of_node, "qcom,sdm845-mdss")) {
+ of_device_is_compatible(dev->of_node, "qcom,sdm845-mdss") ||
+ of_device_is_compatible(dev->of_node, "qcom,sc7180-mdss")) {
ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
if (ret) {
DRM_DEV_ERROR(dev, "failed to populate children devices\n");
@@ -1351,6 +1326,7 @@ static const struct of_device_id dt_match[] = {
{ .compatible = "qcom,mdp4", .data = (void *)KMS_MDP4 },
{ .compatible = "qcom,mdss", .data = (void *)KMS_MDP5 },
{ .compatible = "qcom,sdm845-mdss", .data = (void *)KMS_DPU },
+ { .compatible = "qcom,sc7180-mdss", .data = (void *)KMS_DPU },
{}
};
MODULE_DEVICE_TABLE(of, dt_match);
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index ee7b512dc158..740bf7c70d8f 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -25,7 +25,6 @@
#include <linux/sizes.h>
#include <linux/kthread.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_plane_helper.h>
@@ -222,8 +221,12 @@ struct msm_format {
uint32_t pixel_format;
};
+struct msm_pending_timer;
+
int msm_atomic_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state);
+void msm_atomic_init_pending_timer(struct msm_pending_timer *timer,
+ struct msm_kms *kms, int crtc_idx);
void msm_atomic_commit_tail(struct drm_atomic_state *state);
struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev);
void msm_atomic_state_clear(struct drm_atomic_state *state);
@@ -399,7 +402,6 @@ static inline void msm_perf_debugfs_cleanup(struct msm_drm_private *priv) {}
#endif
struct clk *msm_clk_get(struct platform_device *pdev, const char *name);
-int msm_clk_bulk_get(struct device *dev, struct clk_bulk_data **bulk);
struct clk *msm_clk_bulk_get_clock(struct clk_bulk_data *bulk, int count,
const char *name);
@@ -452,8 +454,7 @@ static inline unsigned long timeout_to_jiffies(const ktime_t *timeout)
remaining_jiffies = 0;
} else {
ktime_t rem = ktime_sub(*timeout, now);
- struct timespec ts = ktime_to_timespec(rem);
- remaining_jiffies = timespec_to_jiffies(&ts);
+ remaining_jiffies = ktime_divns(rem, NSEC_PER_SEC / HZ);
}
return remaining_jiffies;
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index 5bcd5e502a6b..37674e886e99 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -6,6 +6,8 @@
#include <drm/drm_crtc.h>
#include <drm/drm_damage_helper.h>
+#include <drm/drm_file.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index 2429d5e6ce9f..db48867df47d 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -6,6 +6,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_fourcc.h>
#include "msm_drv.h"
#include "msm_kms.h"
@@ -25,7 +26,7 @@ struct msm_fbdev {
struct drm_framebuffer *fb;
};
-static struct fb_ops msm_fb_ops = {
+static const struct fb_ops msm_fb_ops = {
.owner = THIS_MODULE,
DRM_FB_HELPER_DEFAULT_OPS,
@@ -169,6 +170,9 @@ struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev)
if (ret)
goto fini;
+ /* the fw fb could be anywhere in memory */
+ drm_fb_helper_remove_conflicting_framebuffers(NULL, "msm", false);
+
ret = drm_fb_helper_initial_config(helper, 32);
if (ret)
goto fini;
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 8cf6362e64bf..5a6a79fbc9d6 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -9,6 +9,8 @@
#include <linux/dma-buf.h>
#include <linux/pfn_t.h>
+#include <drm/drm_prime.h>
+
#include "msm_drv.h"
#include "msm_fence.h"
#include "msm_gem.h"
@@ -50,7 +52,7 @@ static void sync_for_device(struct msm_gem_object *msm_obj)
{
struct device *dev = msm_obj->base.dev->dev;
- if (get_dma_ops(dev)) {
+ if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) {
dma_sync_sg_for_device(dev, msm_obj->sgt->sgl,
msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
} else {
@@ -63,7 +65,7 @@ static void sync_for_cpu(struct msm_gem_object *msm_obj)
{
struct device *dev = msm_obj->base.dev->dev;
- if (get_dma_ops(dev)) {
+ if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) {
dma_sync_sg_for_cpu(dev, msm_obj->sgt->sgl,
msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
} else {
@@ -700,13 +702,13 @@ void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass)
int msm_gem_sync_object(struct drm_gem_object *obj,
struct msm_fence_context *fctx, bool exclusive)
{
- struct reservation_object_list *fobj;
+ struct dma_resv_list *fobj;
struct dma_fence *fence;
int i, ret;
- fobj = reservation_object_get_list(obj->resv);
+ fobj = dma_resv_get_list(obj->resv);
if (!fobj || (fobj->shared_count == 0)) {
- fence = reservation_object_get_excl(obj->resv);
+ fence = dma_resv_get_excl(obj->resv);
/* don't need to wait on our own fences, since ring is fifo */
if (fence && (fence->context != fctx->context)) {
ret = dma_fence_wait(fence, true);
@@ -720,7 +722,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
for (i = 0; i < fobj->shared_count; i++) {
fence = rcu_dereference_protected(fobj->shared[i],
- reservation_object_held(obj->resv));
+ dma_resv_held(obj->resv));
if (fence->context != fctx->context) {
ret = dma_fence_wait(fence, true);
if (ret)
@@ -738,9 +740,9 @@ void msm_gem_move_to_active(struct drm_gem_object *obj,
WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
msm_obj->gpu = gpu;
if (exclusive)
- reservation_object_add_excl_fence(obj->resv, fence);
+ dma_resv_add_excl_fence(obj->resv, fence);
else
- reservation_object_add_shared_fence(obj->resv, fence);
+ dma_resv_add_shared_fence(obj->resv, fence);
list_del_init(&msm_obj->mm_list);
list_add_tail(&msm_obj->mm_list, &gpu->active_list);
}
@@ -765,7 +767,7 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
long ret;
- ret = reservation_object_wait_timeout_rcu(obj->resv, write,
+ ret = dma_resv_wait_timeout_rcu(obj->resv, write,
true, remain);
if (ret == 0)
return remain == 0 ? -EBUSY : -ETIMEDOUT;
@@ -797,8 +799,8 @@ static void describe_fence(struct dma_fence *fence, const char *type,
void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct reservation_object *robj = obj->resv;
- struct reservation_object_list *fobj;
+ struct dma_resv *robj = obj->resv;
+ struct dma_resv_list *fobj;
struct dma_fence *fence;
struct msm_gem_vma *vma;
uint64_t off = drm_vma_node_start(&obj->vma_node);
@@ -975,7 +977,6 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
static int msm_gem_new_impl(struct drm_device *dev,
uint32_t size, uint32_t flags,
- struct reservation_object *resv,
struct drm_gem_object **obj,
bool struct_mutex_locked)
{
@@ -1002,9 +1003,6 @@ static int msm_gem_new_impl(struct drm_device *dev,
msm_obj->flags = flags;
msm_obj->madv = MSM_MADV_WILLNEED;
- if (resv)
- msm_obj->base.resv = resv;
-
INIT_LIST_HEAD(&msm_obj->submit_entry);
INIT_LIST_HEAD(&msm_obj->vmas);
@@ -1046,7 +1044,7 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
if (size == 0)
return ERR_PTR(-EINVAL);
- ret = msm_gem_new_impl(dev, size, flags, NULL, &obj, struct_mutex_locked);
+ ret = msm_gem_new_impl(dev, size, flags, &obj, struct_mutex_locked);
if (ret)
goto fail;
@@ -1123,7 +1121,7 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
size = PAGE_ALIGN(dmabuf->size);
- ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj, false);
+ ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj, false);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 8cfcf8f09e3e..9e0953c2b7ce 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -8,7 +8,7 @@
#define __MSM_GEM_H__
#include <linux/kref.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
#include "msm_drv.h"
/* Additional internal-use only BO flags: */
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index 5d64e0671f7a..d7c8948427fe 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -4,11 +4,13 @@
* Author: Rob Clark <robdclark@gmail.com>
*/
+#include <linux/dma-buf.h>
+
+#include <drm/drm_prime.h>
+
#include "msm_drv.h"
#include "msm_gem.h"
-#include <linux/dma-buf.h>
-
struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 348f8c2be806..385d4965a8d0 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -4,7 +4,11 @@
* Author: Rob Clark <robdclark@gmail.com>
*/
+#include <linux/file.h>
#include <linux/sync_file.h>
+#include <linux/uaccess.h>
+
+#include <drm/drm_file.h>
#include "msm_drv.h"
#include "msm_gpu.h"
@@ -26,8 +30,8 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
uint32_t nr_cmds)
{
struct msm_gem_submit *submit;
- uint64_t sz = sizeof(*submit) + ((u64)nr_bos * sizeof(submit->bos[0])) +
- ((u64)nr_cmds * sizeof(submit->cmd[0]));
+ uint64_t sz = struct_size(submit, bos, nr_bos) +
+ ((u64)nr_cmds * sizeof(submit->cmd[0]));
if (sz > SIZE_MAX)
return NULL;
@@ -50,7 +54,6 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
INIT_LIST_HEAD(&submit->node);
INIT_LIST_HEAD(&submit->bo_list);
- ww_acquire_init(&submit->ticket, &reservation_ww_class);
return submit;
}
@@ -154,7 +157,7 @@ static void submit_unlock_unpin_bo(struct msm_gem_submit *submit,
msm_gem_unpin_iova(&msm_obj->base, submit->aspace);
if (submit->bos[i].flags & BO_LOCKED)
- ww_mutex_unlock(&msm_obj->base.resv->lock);
+ dma_resv_unlock(msm_obj->base.resv);
if (backoff && !(submit->bos[i].flags & BO_VALID))
submit->bos[i].iova = 0;
@@ -177,8 +180,8 @@ retry:
contended = i;
if (!(submit->bos[i].flags & BO_LOCKED)) {
- ret = ww_mutex_lock_interruptible(&msm_obj->base.resv->lock,
- &submit->ticket);
+ ret = dma_resv_lock_interruptible(msm_obj->base.resv,
+ &submit->ticket);
if (ret)
goto fail;
submit->bos[i].flags |= BO_LOCKED;
@@ -199,8 +202,8 @@ fail:
if (ret == -EDEADLK) {
struct msm_gem_object *msm_obj = submit->bos[contended].obj;
/* we lost out in a seqno race, lock and retry.. */
- ret = ww_mutex_lock_slow_interruptible(&msm_obj->base.resv->lock,
- &submit->ticket);
+ ret = dma_resv_lock_slow_interruptible(msm_obj->base.resv,
+ &submit->ticket);
if (!ret) {
submit->bos[contended].flags |= BO_LOCKED;
slow_locked = contended;
@@ -225,7 +228,7 @@ static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
* strange place to call it. OTOH this is a
* convenient can-fail point to hook it in.
*/
- ret = reservation_object_reserve_shared(msm_obj->base.resv,
+ ret = dma_resv_reserve_shared(msm_obj->base.resv,
1);
if (ret)
return ret;
@@ -386,8 +389,6 @@ static void submit_cleanup(struct msm_gem_submit *submit)
list_del_init(&msm_obj->submit_entry);
drm_gem_object_put(&msm_obj->base);
}
-
- ww_acquire_fini(&submit->ticket);
}
int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
@@ -404,6 +405,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct msm_ringbuffer *ring;
int out_fence_fd = -1;
struct pid *pid = get_pid(task_pid(current));
+ bool has_ww_ticket = false;
unsigned i;
int ret, submitid;
if (!gpu)
@@ -485,6 +487,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (ret)
goto out;
+ /* copy_*_user while holding a ww ticket upsets lockdep */
+ ww_acquire_init(&submit->ticket, &reservation_ww_class);
+ has_ww_ticket = true;
ret = submit_lock_objects(submit);
if (ret)
goto out;
@@ -584,6 +589,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
out:
submit_cleanup(submit);
+ if (has_ww_ticket)
+ ww_acquire_fini(&submit->ticket);
if (ret)
msm_gem_submit_free(submit);
out_unlock:
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 4edb874548b3..18f3a5c53ffb 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -16,6 +16,7 @@
#include <linux/pm_opp.h>
#include <linux/devfreq.h>
#include <linux/devcoredump.h>
+#include <linux/sched/task.h>
/*
* Power Management:
@@ -95,7 +96,8 @@ static void msm_devfreq_init(struct msm_gpu *gpu)
*/
gpu->devfreq.devfreq = devm_devfreq_add_device(&gpu->pdev->dev,
- &msm_devfreq_profile, "simple_ondemand", NULL);
+ &msm_devfreq_profile, DEVFREQ_GOV_SIMPLE_ONDEMAND,
+ NULL);
if (IS_ERR(gpu->devfreq.devfreq)) {
DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n");
@@ -783,7 +785,7 @@ static irqreturn_t irq_handler(int irq, void *data)
static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
{
- int ret = msm_clk_bulk_get(&pdev->dev, &gpu->grp_clks);
+ int ret = devm_clk_bulk_get_all(&pdev->dev, &gpu->grp_clks);
if (ret < 1) {
gpu->nr_clocks = 0;
@@ -837,7 +839,7 @@ msm_gpu_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev,
return ERR_CAST(aspace);
}
- ret = aspace->mmu->funcs->attach(aspace->mmu, NULL, 0);
+ ret = aspace->mmu->funcs->attach(aspace->mmu);
if (ret) {
msm_gem_address_space_put(aspace);
return ERR_PTR(ret);
@@ -994,8 +996,7 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace, false);
if (!IS_ERR_OR_NULL(gpu->aspace)) {
- gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu,
- NULL, 0);
+ gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu);
msm_gem_address_space_put(gpu->aspace);
}
}
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index ab8f0f9c9dc8..be5bc2e8425c 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -111,8 +111,15 @@ struct msm_gpu {
struct clk *ebi1_clk, *core_clk, *rbbmtimer_clk;
uint32_t fast_rate;
+ /* The gfx-mem interconnect path that's used by all GPU types. */
struct icc_path *icc_path;
+ /*
+ * Second interconnect path for some A3xx and all A4xx GPUs to the
+ * On Chip MEMory (OCMEM).
+ */
+ struct icc_path *ocmem_icc_path;
+
/* Hang and Inactivity Detection:
*/
#define DRM_MSM_INACTIVE_PERIOD 66 /* in ms (roughly four frames) */
diff --git a/drivers/gpu/drm/msm/msm_gpu_trace.h b/drivers/gpu/drm/msm/msm_gpu_trace.h
index 1155118a27a1..122b84789238 100644
--- a/drivers/gpu/drm/msm/msm_gpu_trace.h
+++ b/drivers/gpu/drm/msm/msm_gpu_trace.h
@@ -5,7 +5,7 @@
#include <linux/tracepoint.h>
#undef TRACE_SYSTEM
-#define TRACE_SYSTEM drm_msm
+#define TRACE_SYSTEM drm_msm_gpu
#define TRACE_INCLUDE_FILE msm_gpu_trace
TRACE_EVENT(msm_gpu_submit,
diff --git a/drivers/gpu/drm/msm/msm_gpummu.c b/drivers/gpu/drm/msm/msm_gpummu.c
index 27312b553dd8..34980d8eb7ad 100644
--- a/drivers/gpu/drm/msm/msm_gpummu.c
+++ b/drivers/gpu/drm/msm/msm_gpummu.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018 The Linux Foundation. All rights reserved. */
+#include <linux/dma-mapping.h>
+
#include "msm_drv.h"
#include "msm_mmu.h"
#include "adreno/adreno_gpu.h"
@@ -19,14 +21,12 @@ struct msm_gpummu {
#define GPUMMU_PAGE_SIZE SZ_4K
#define TABLE_SIZE (sizeof(uint32_t) * GPUMMU_VA_RANGE / GPUMMU_PAGE_SIZE)
-static int msm_gpummu_attach(struct msm_mmu *mmu, const char * const *names,
- int cnt)
+static int msm_gpummu_attach(struct msm_mmu *mmu)
{
return 0;
}
-static void msm_gpummu_detach(struct msm_mmu *mmu, const char * const *names,
- int cnt)
+static void msm_gpummu_detach(struct msm_mmu *mmu)
{
}
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 8c95c31e2b12..ad58cfe5998e 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -23,16 +23,14 @@ static int msm_fault_handler(struct iommu_domain *domain, struct device *dev,
return 0;
}
-static int msm_iommu_attach(struct msm_mmu *mmu, const char * const *names,
- int cnt)
+static int msm_iommu_attach(struct msm_mmu *mmu)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
return iommu_attach_device(iommu->domain, mmu->dev);
}
-static void msm_iommu_detach(struct msm_mmu *mmu, const char * const *names,
- int cnt)
+static void msm_iommu_detach(struct msm_mmu *mmu)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index c7588a42635e..1cbef6b200b7 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -30,13 +30,76 @@ struct msm_kms_funcs {
irqreturn_t (*irq)(struct msm_kms *kms);
int (*enable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
void (*disable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
- /* modeset, bracketing atomic_commit(): */
+
+ /*
+ * Atomic commit handling:
+ *
+ * Note that in the case of async commits, the funcs which take
+ * a crtc_mask (ie. ->flush_commit(), and ->complete_commit())
+ * might not be evenly balanced with ->prepare_commit(), however
+ * each crtc that effected by a ->prepare_commit() (potentially
+ * multiple times) will eventually (at end of vsync period) be
+ * flushed and completed.
+ *
+ * This has some implications about tracking of cleanup state,
+ * for example SMP blocks to release after commit completes. Ie.
+ * cleanup state should be also duplicated in the various
+ * duplicate_state() methods, as the current cleanup state at
+ * ->complete_commit() time may have accumulated cleanup work
+ * from multiple commits.
+ */
+
+ /**
+ * Enable/disable power/clks needed for hw access done in other
+ * commit related methods.
+ *
+ * If mdp4 is migrated to runpm, we could probably drop these
+ * and use runpm directly.
+ */
+ void (*enable_commit)(struct msm_kms *kms);
+ void (*disable_commit)(struct msm_kms *kms);
+
+ /**
+ * If the kms backend supports async commit, it should implement
+ * this method to return the time of the next vsync. This is
+ * used to determine a time slightly before vsync, for the async
+ * commit timer to run and complete an async commit.
+ */
+ ktime_t (*vsync_time)(struct msm_kms *kms, struct drm_crtc *crtc);
+
+ /**
+ * Prepare for atomic commit. This is called after any previous
+ * (async or otherwise) commit has completed.
+ */
void (*prepare_commit)(struct msm_kms *kms, struct drm_atomic_state *state);
- void (*commit)(struct msm_kms *kms, struct drm_atomic_state *state);
- void (*complete_commit)(struct msm_kms *kms, struct drm_atomic_state *state);
- /* functions to wait for atomic commit completed on each CRTC */
- void (*wait_for_crtc_commit_done)(struct msm_kms *kms,
- struct drm_crtc *crtc);
+
+ /**
+ * Flush an atomic commit. This is called after the hardware
+ * updates have already been pushed down to effected planes/
+ * crtcs/encoders/connectors.
+ */
+ void (*flush_commit)(struct msm_kms *kms, unsigned crtc_mask);
+
+ /**
+ * Wait for any in-progress flush to complete on the specified
+ * crtcs. This should not block if there is no in-progress
+ * commit (ie. don't just wait for a vblank), as it will also
+ * be called before ->prepare_commit() to ensure any potential
+ * "async" commit has completed.
+ */
+ void (*wait_flush)(struct msm_kms *kms, unsigned crtc_mask);
+
+ /**
+ * Clean up after commit is completed. This is called after
+ * ->wait_flush(), to give the backend a chance to do any
+ * post-commit cleanup.
+ */
+ void (*complete_commit)(struct msm_kms *kms, unsigned crtc_mask);
+
+ /*
+ * Format handling:
+ */
+
/* get msm_format w/ optional format modifiers from drm_mode_fb_cmd2 */
const struct msm_format *(*get_format)(struct msm_kms *kms,
const uint32_t format,
@@ -46,6 +109,7 @@ struct msm_kms_funcs {
const struct msm_format *msm_fmt,
const struct drm_mode_fb_cmd2 *cmd,
struct drm_gem_object **bos);
+
/* misc: */
long (*round_pixclk)(struct msm_kms *kms, unsigned long rate,
struct drm_encoder *encoder);
@@ -64,20 +128,48 @@ struct msm_kms_funcs {
#endif
};
+struct msm_kms;
+
+/*
+ * A per-crtc timer for pending async atomic flushes. Scheduled to expire
+ * shortly before vblank to flush pending async updates.
+ */
+struct msm_pending_timer {
+ struct hrtimer timer;
+ struct work_struct work;
+ struct msm_kms *kms;
+ unsigned crtc_idx;
+};
+
struct msm_kms {
const struct msm_kms_funcs *funcs;
+ struct drm_device *dev;
/* irq number to be passed on to drm_irq_install */
int irq;
/* mapper-id used to request GEM buffer mapped for scanout: */
struct msm_gem_address_space *aspace;
+
+ /*
+ * For async commit, where ->flush_commit() and later happens
+ * from the crtc's pending_timer close to end of the frame:
+ */
+ struct mutex commit_lock;
+ unsigned pending_crtc_mask;
+ struct msm_pending_timer pending_timers[MAX_CRTCS];
};
static inline void msm_kms_init(struct msm_kms *kms,
const struct msm_kms_funcs *funcs)
{
+ unsigned i;
+
+ mutex_init(&kms->commit_lock);
kms->funcs = funcs;
+
+ for (i = 0; i < ARRAY_SIZE(kms->pending_timers); i++)
+ msm_atomic_init_pending_timer(&kms->pending_timers[i], kms, i);
}
struct msm_kms *mdp4_kms_init(struct drm_device *dev);
@@ -98,4 +190,8 @@ struct msm_mdss {
int mdp5_mdss_init(struct drm_device *dev);
int dpu_mdss_init(struct drm_device *dev);
+#define for_each_crtc_mask(dev, crtc, crtc_mask) \
+ drm_for_each_crtc(crtc, dev) \
+ for_each_if (drm_crtc_mask(crtc) & (crtc_mask))
+
#endif /* __MSM_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index 871d56303697..67a623f14319 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -10,8 +10,8 @@
#include <linux/iommu.h>
struct msm_mmu_funcs {
- int (*attach)(struct msm_mmu *mmu, const char * const *names, int cnt);
- void (*detach)(struct msm_mmu *mmu, const char * const *names, int cnt);
+ int (*attach)(struct msm_mmu *mmu);
+ void (*detach)(struct msm_mmu *mmu);
int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
unsigned len, int prot);
int (*unmap)(struct msm_mmu *mmu, uint64_t iova, unsigned len);
diff --git a/drivers/gpu/drm/msm/msm_perf.c b/drivers/gpu/drm/msm/msm_perf.c
index 490cadda2796..3a27153eef08 100644
--- a/drivers/gpu/drm/msm/msm_perf.c
+++ b/drivers/gpu/drm/msm/msm_perf.c
@@ -15,6 +15,9 @@
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+
+#include <drm/drm_file.h>
#include "msm_drv.h"
#include "msm_gpu.h"
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index 76d3fdd17bf8..af7ceb246c7c 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -31,11 +31,14 @@
#ifdef CONFIG_DEBUG_FS
-#include <linux/kfifo.h>
-#include <linux/debugfs.h>
#include <linux/circ_buf.h>
+#include <linux/debugfs.h>
+#include <linux/kfifo.h>
+#include <linux/uaccess.h>
#include <linux/wait.h>
+#include <drm/drm_file.h>
+
#include "msm_drv.h"
#include "msm_gpu.h"
#include "msm_gem.h"
@@ -295,7 +298,7 @@ void msm_rd_debugfs_cleanup(struct msm_drm_private *priv)
static void snapshot_buf(struct msm_rd_state *rd,
struct msm_gem_submit *submit, int idx,
- uint64_t iova, uint32_t size)
+ uint64_t iova, uint32_t size, bool full)
{
struct msm_gem_object *obj = submit->bos[idx].obj;
unsigned offset = 0;
@@ -315,6 +318,9 @@ static void snapshot_buf(struct msm_rd_state *rd,
rd_write_section(rd, RD_GPUADDR,
(uint32_t[3]){ iova, size, iova >> 32 }, 12);
+ if (!full)
+ return;
+
/* But only dump the contents of buffers marked READ */
if (!(submit->bos[idx].flags & MSM_SUBMIT_BO_READ))
return;
@@ -378,18 +384,21 @@ void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4));
for (i = 0; i < submit->nr_bos; i++)
- if (should_dump(submit, i))
- snapshot_buf(rd, submit, i, 0, 0);
+ snapshot_buf(rd, submit, i, 0, 0, should_dump(submit, i));
for (i = 0; i < submit->nr_cmds; i++) {
- uint64_t iova = submit->cmd[i].iova;
uint32_t szd = submit->cmd[i].size; /* in dwords */
/* snapshot cmdstream bo's (if we haven't already): */
if (!should_dump(submit, i)) {
snapshot_buf(rd, submit, submit->cmd[i].idx,
- submit->cmd[i].iova, szd * 4);
+ submit->cmd[i].iova, szd * 4, true);
}
+ }
+
+ for (i = 0; i < submit->nr_cmds; i++) {
+ uint64_t iova = submit->cmd[i].iova;
+ uint32_t szd = submit->cmd[i].size; /* in dwords */
switch (submit->cmd[i].type) {
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
diff --git a/drivers/gpu/drm/msm/msm_submitqueue.c b/drivers/gpu/drm/msm/msm_submitqueue.c
index c70e00e22c4c..001fbf537440 100644
--- a/drivers/gpu/drm/msm/msm_submitqueue.c
+++ b/drivers/gpu/drm/msm/msm_submitqueue.c
@@ -3,6 +3,8 @@
*/
#include <linux/kref.h>
+#include <linux/uaccess.h>
+
#include "msm_gpu.h"
void msm_submitqueue_destroy(struct kref *kref)
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
index 93f413345e0d..b69ace8bf526 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
@@ -8,21 +8,23 @@
* Copyright (C) 2008 Embedded Alley Solutions, Inc All Rights Reserved.
*/
-#include <drm/drmP.h>
+#include <linux/clk.h>
+#include <linux/iopoll.h>
+#include <linux/of_graph.h>
+#include <linux/platform_data/simplefb.h>
+
+#include <video/videomode.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
-#include <linux/clk.h>
-#include <linux/iopoll.h>
-#include <linux/of_graph.h>
-#include <linux/platform_data/simplefb.h>
-#include <video/videomode.h>
+#include <drm/drm_vblank.h>
#include "mxsfb_drv.h"
#include "mxsfb_regs.h"
@@ -93,8 +95,11 @@ static void mxsfb_set_bus_fmt(struct mxsfb_drm_private *mxsfb)
reg = readl(mxsfb->base + LCDC_CTRL);
- if (mxsfb->connector.display_info.num_bus_formats)
- bus_format = mxsfb->connector.display_info.bus_formats[0];
+ if (mxsfb->connector->display_info.num_bus_formats)
+ bus_format = mxsfb->connector->display_info.bus_formats[0];
+
+ DRM_DEV_DEBUG_DRIVER(drm->dev, "Using bus_format: 0x%08X\n",
+ bus_format);
reg &= ~CTRL_BUS_WIDTH_MASK;
switch (bus_format) {
@@ -202,8 +207,9 @@ static dma_addr_t mxsfb_get_fb_paddr(struct mxsfb_drm_private *mxsfb)
static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb)
{
+ struct drm_device *drm = mxsfb->pipe.crtc.dev;
struct drm_display_mode *m = &mxsfb->pipe.crtc.state->adjusted_mode;
- const u32 bus_flags = mxsfb->connector.display_info.bus_flags;
+ u32 bus_flags = mxsfb->connector->display_info.bus_flags;
u32 vdctrl0, vsync_pulse_len, hsync_pulse_len;
int err;
@@ -227,6 +233,16 @@ static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb)
clk_set_rate(mxsfb->clk, m->crtc_clock * 1000);
+ if (mxsfb->bridge && mxsfb->bridge->timings)
+ bus_flags = mxsfb->bridge->timings->input_bus_flags;
+
+ DRM_DEV_DEBUG_DRIVER(drm->dev, "Pixel clock: %dkHz (actual: %dkHz)\n",
+ m->crtc_clock,
+ (int)(clk_get_rate(mxsfb->clk) / 1000));
+ DRM_DEV_DEBUG_DRIVER(drm->dev, "Connector bus_flags: 0x%08X\n",
+ bus_flags);
+ DRM_DEV_DEBUG_DRIVER(drm->dev, "Mode flags: 0x%08X\n", m->flags);
+
writel(TRANSFER_COUNT_SET_VCOUNT(m->crtc_vdisplay) |
TRANSFER_COUNT_SET_HCOUNT(m->crtc_hdisplay),
mxsfb->base + mxsfb->devdata->transfer_count);
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
index 6fafc90da4ec..497cf443a9af 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -8,29 +8,32 @@
* Copyright (C) 2008 Embedded Alley Solutions, Inc All Rights Reserved.
*/
-#include <linux/module.h>
-#include <linux/spinlock.h>
#include <linux/clk.h>
#include <linux/component.h>
+#include <linux/dma-mapping.h>
#include <linux/list.h>
+#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/of_reserved_mem.h>
#include <linux/pm_runtime.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
+#include <linux/spinlock.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_irq.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
+#include <drm/drm_vblank.h>
#include "mxsfb_drv.h"
#include "mxsfb_regs.h"
@@ -98,9 +101,25 @@ static void mxsfb_pipe_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
+ struct drm_connector *connector;
struct mxsfb_drm_private *mxsfb = drm_pipe_to_mxsfb_drm_private(pipe);
struct drm_device *drm = pipe->plane.dev;
+ if (!mxsfb->connector) {
+ list_for_each_entry(connector,
+ &drm->mode_config.connector_list,
+ head)
+ if (connector->encoder == &mxsfb->pipe.encoder) {
+ mxsfb->connector = connector;
+ break;
+ }
+ }
+
+ if (!mxsfb->connector) {
+ dev_warn(drm->dev, "No connector attached, using default\n");
+ mxsfb->connector = &mxsfb->panel_connector;
+ }
+
pm_runtime_get_sync(drm->dev);
drm_panel_prepare(mxsfb->panel);
mxsfb_crtc_enable(mxsfb);
@@ -126,6 +145,9 @@ static void mxsfb_pipe_disable(struct drm_simple_display_pipe *pipe)
drm_crtc_send_vblank_event(crtc, event);
}
spin_unlock_irq(&drm->event_lock);
+
+ if (mxsfb->connector != &mxsfb->panel_connector)
+ mxsfb->connector = NULL;
}
static void mxsfb_pipe_update(struct drm_simple_display_pipe *pipe,
@@ -223,16 +245,33 @@ static int mxsfb_load(struct drm_device *drm, unsigned long flags)
ret = drm_simple_display_pipe_init(drm, &mxsfb->pipe, &mxsfb_funcs,
mxsfb_formats, ARRAY_SIZE(mxsfb_formats), NULL,
- &mxsfb->connector);
+ mxsfb->connector);
if (ret < 0) {
dev_err(drm->dev, "Cannot setup simple display pipe\n");
goto err_vblank;
}
- ret = drm_panel_attach(mxsfb->panel, &mxsfb->connector);
- if (ret) {
- dev_err(drm->dev, "Cannot connect panel\n");
- goto err_vblank;
+ /*
+ * Attach panel only if there is one.
+ * If there is no panel attach, it must be a bridge. In this case, we
+ * need a reference to its connector for a proper initialization.
+ * We will do this check in pipe->enable(), since the connector won't
+ * be attached to an encoder until then.
+ */
+
+ if (mxsfb->panel) {
+ ret = drm_panel_attach(mxsfb->panel, mxsfb->connector);
+ if (ret) {
+ dev_err(drm->dev, "Cannot connect panel: %d\n", ret);
+ goto err_vblank;
+ }
+ } else if (mxsfb->bridge) {
+ ret = drm_simple_display_pipe_attach_bridge(&mxsfb->pipe,
+ mxsfb->bridge);
+ if (ret) {
+ dev_err(drm->dev, "Cannot connect bridge: %d\n", ret);
+ goto err_vblank;
+ }
}
drm->mode_config.min_width = MXSFB_MIN_XRES;
@@ -313,8 +352,7 @@ static irqreturn_t mxsfb_irq_handler(int irq, void *data)
DEFINE_DRM_GEM_CMA_FOPS(fops);
static struct drm_driver mxsfb_driver = {
- .driver_features = DRIVER_GEM | DRIVER_MODESET |
- DRIVER_PRIME | DRIVER_ATOMIC,
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.irq_handler = mxsfb_irq_handler,
.irq_preinstall = mxsfb_irq_preinstall,
.irq_uninstall = mxsfb_irq_preinstall,
@@ -323,8 +361,6 @@ static struct drm_driver mxsfb_driver = {
.dumb_create = drm_gem_cma_dumb_create,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = drm_gem_prime_export,
- .gem_prime_import = drm_gem_prime_import,
.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
.gem_prime_vmap = drm_gem_cma_prime_vmap,
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.h b/drivers/gpu/drm/mxsfb/mxsfb_drv.h
index d975300dca05..0b65b5194a9c 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.h
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.h
@@ -27,8 +27,10 @@ struct mxsfb_drm_private {
struct clk *clk_disp_axi;
struct drm_simple_display_pipe pipe;
- struct drm_connector connector;
+ struct drm_connector panel_connector;
+ struct drm_connector *connector;
struct drm_panel *panel;
+ struct drm_bridge *bridge;
};
int mxsfb_setup_crtc(struct drm_device *dev);
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_out.c b/drivers/gpu/drm/mxsfb/mxsfb_out.c
index 91e76f9cead6..9eca1605d11d 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_out.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_out.c
@@ -15,14 +15,14 @@
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
-#include <drm/drmP.h>
#include "mxsfb_drv.h"
static struct mxsfb_drm_private *
drm_connector_to_mxsfb_drm_private(struct drm_connector *connector)
{
- return container_of(connector, struct mxsfb_drm_private, connector);
+ return container_of(connector, struct mxsfb_drm_private,
+ panel_connector);
}
static int mxsfb_panel_get_modes(struct drm_connector *connector)
@@ -31,7 +31,7 @@ static int mxsfb_panel_get_modes(struct drm_connector *connector)
drm_connector_to_mxsfb_drm_private(connector);
if (mxsfb->panel)
- return mxsfb->panel->funcs->get_modes(mxsfb->panel);
+ return drm_panel_get_modes(mxsfb->panel, connector);
return 0;
}
@@ -77,22 +77,23 @@ static const struct drm_connector_funcs mxsfb_panel_connector_funcs = {
int mxsfb_create_output(struct drm_device *drm)
{
struct mxsfb_drm_private *mxsfb = drm->dev_private;
- struct drm_panel *panel;
int ret;
- ret = drm_of_find_panel_or_bridge(drm->dev->of_node, 0, 0, &panel, NULL);
+ ret = drm_of_find_panel_or_bridge(drm->dev->of_node, 0, 0,
+ &mxsfb->panel, &mxsfb->bridge);
if (ret)
return ret;
- mxsfb->connector.dpms = DRM_MODE_DPMS_OFF;
- mxsfb->connector.polled = 0;
- drm_connector_helper_add(&mxsfb->connector,
- &mxsfb_panel_connector_helper_funcs);
- ret = drm_connector_init(drm, &mxsfb->connector,
- &mxsfb_panel_connector_funcs,
- DRM_MODE_CONNECTOR_Unknown);
- if (!ret)
- mxsfb->panel = panel;
+ if (mxsfb->panel) {
+ mxsfb->connector = &mxsfb->panel_connector;
+ mxsfb->connector->dpms = DRM_MODE_DPMS_OFF;
+ mxsfb->connector->polled = 0;
+ drm_connector_helper_add(mxsfb->connector,
+ &mxsfb_panel_connector_helper_funcs);
+ ret = drm_connector_init(drm, mxsfb->connector,
+ &mxsfb_panel_connector_funcs,
+ DRM_MODE_CONNECTOR_Unknown);
+ }
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index 96b9814e6d06..d6e4ae1ef705 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -2,7 +2,8 @@
config DRM_NOUVEAU
tristate "Nouveau (NVIDIA) cards"
depends on DRM && PCI && MMU
- select FW_LOADER
+ select IOMMU_API
+ select FW_LOADER
select DRM_KMS_HELPER
select DRM_TTM
select BACKLIGHT_CLASS_DEVICE if DRM_NOUVEAU_BACKLIGHT
@@ -16,6 +17,7 @@ config DRM_NOUVEAU
select INPUT if ACPI && X86
select THERMAL if ACPI && X86
select ACPI_VIDEO if ACPI && X86
+ select SND_HDA_COMPONENT if SND_HDA_CORE
help
Choose this option for open-source NVIDIA support.
@@ -86,9 +88,10 @@ config DRM_NOUVEAU_SVM
bool "(EXPERIMENTAL) Enable SVM (Shared Virtual Memory) support"
depends on DEVICE_PRIVATE
depends on DRM_NOUVEAU
- depends on HMM_MIRROR
+ depends on MMU
depends on STAGING
- select MIGRATE_VMA_HELPER
+ select HMM_MIRROR
+ select MMU_NOTIFIER
default n
help
Say Y here if you want to enable experimental support for
diff --git a/drivers/gpu/drm/nouveau/dispnv04/arb.c b/drivers/gpu/drm/nouveau/dispnv04/arb.c
index c79160c37f84..9d4a2d97507e 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/arb.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/arb.c
@@ -21,8 +21,6 @@
* SOFTWARE.
*/
-#include <drm/drmP.h>
-
#include "nouveau_drv.h"
#include "nouveau_reg.h"
#include "hw.h"
@@ -55,8 +53,8 @@ struct nv_sim_state {
static void
nv04_calc_arb(struct nv_fifo_info *fifo, struct nv_sim_state *arb)
{
- int pagemiss, cas, width, bpp;
- int nvclks, mclks, pclks, crtpagemiss;
+ int pagemiss, cas, bpp;
+ int nvclks, mclks, crtpagemiss;
int found, mclk_extra, mclk_loop, cbs, m1, p1;
int mclk_freq, pclk_freq, nvclk_freq;
int us_m, us_n, us_p, crtc_drain_rate;
@@ -67,11 +65,9 @@ nv04_calc_arb(struct nv_fifo_info *fifo, struct nv_sim_state *arb)
nvclk_freq = arb->nvclk_khz;
pagemiss = arb->mem_page_miss;
cas = arb->mem_latency;
- width = arb->memory_width >> 6;
bpp = arb->bpp;
cbs = 128;
- pclks = 2;
nvclks = 10;
mclks = 13 + cas;
mclk_extra = 3;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index f22f01020625..37c50ea8f847 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -22,11 +22,10 @@
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
-#include <linux/pm_runtime.h>
-
-#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
+#include <drm/drm_vblank.h>
#include "nouveau_drv.h"
#include "nouveau_reg.h"
@@ -1031,53 +1030,6 @@ nv04_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
return 0;
}
-static int
-nouveau_crtc_set_config(struct drm_mode_set *set,
- struct drm_modeset_acquire_ctx *ctx)
-{
- struct drm_device *dev;
- struct nouveau_drm *drm;
- int ret;
- struct drm_crtc *crtc;
- bool active = false;
- if (!set || !set->crtc)
- return -EINVAL;
-
- dev = set->crtc->dev;
-
- /* get a pm reference here */
- ret = pm_runtime_get_sync(dev->dev);
- if (ret < 0 && ret != -EACCES)
- return ret;
-
- ret = drm_crtc_helper_set_config(set, ctx);
-
- drm = nouveau_drm(dev);
-
- /* if we get here with no crtcs active then we can drop a reference */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- if (crtc->enabled)
- active = true;
- }
-
- pm_runtime_mark_last_busy(dev->dev);
- /* if we have active crtcs and we don't have a power ref,
- take the current one */
- if (active && !drm->have_disp_power_ref) {
- drm->have_disp_power_ref = true;
- return ret;
- }
- /* if we have no active crtcs, then drop the power ref
- we got before */
- if (!active && drm->have_disp_power_ref) {
- pm_runtime_put_autosuspend(dev->dev);
- drm->have_disp_power_ref = false;
- }
- /* drop the power reference we got coming in here */
- pm_runtime_put_autosuspend(dev->dev);
- return ret;
-}
-
struct nv04_page_flip_state {
struct list_head head;
struct drm_pending_vblank_event *event;
@@ -1293,7 +1245,7 @@ static const struct drm_crtc_funcs nv04_crtc_funcs = {
.cursor_set = nv04_crtc_cursor_set,
.cursor_move = nv04_crtc_cursor_move,
.gamma_set = nv_crtc_gamma_set,
- .set_config = nouveau_crtc_set_config,
+ .set_config = drm_crtc_helper_set_config,
.page_flip = nv04_crtc_page_flip,
.destroy = nv_crtc_destroy,
};
diff --git a/drivers/gpu/drm/nouveau/dispnv04/cursor.c b/drivers/gpu/drm/nouveau/dispnv04/cursor.c
index 16e09f6b9113..4c6440d29c3f 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/cursor.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/cursor.c
@@ -1,5 +1,4 @@
// SPDX-License-Identifier: MIT
-#include <drm/drmP.h>
#include <drm/drm_mode.h>
#include "nouveau_drv.h"
#include "nouveau_reg.h"
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dac.c b/drivers/gpu/drm/nouveau/dispnv04/dac.c
index e7af95d37ddb..e8eef88a8382 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dac.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dac.c
@@ -24,7 +24,6 @@
* DEALINGS IN THE SOFTWARE.
*/
-#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include "nouveau_drv.h"
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
index 73d41abbb510..f9f4482c79b5 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
@@ -24,8 +24,8 @@
* DEALINGS IN THE SOFTWARE.
*/
-#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fourcc.h>
#include "nouveau_drv.h"
#include "nouveau_reg.h"
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index 5713bacaee80..44ee82d0c9b6 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -22,7 +22,6 @@
* Author: Ben Skeggs
*/
-#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include "nouveau_drv.h"
@@ -210,7 +209,7 @@ nv04_display_create(struct drm_device *dev)
nouveau_display(dev)->fini = nv04_display_fini;
/* Pre-nv50 doesn't support atomic, so don't expose the ioctls */
- dev->driver->driver_features &= ~DRIVER_ATOMIC;
+ dev->driver_features &= ~DRIVER_ATOMIC;
/* Request page flip completion event. */
if (drm->nvsw.client) {
@@ -257,7 +256,7 @@ nv04_display_create(struct drm_device *dev)
list_for_each_entry_safe(connector, ct,
&dev->mode_config.connector_list, head) {
- if (!connector->encoder_ids[0]) {
+ if (!connector->possible_encoders) {
NV_WARN(drm, "%s has no encoders, removing\n",
connector->name);
connector->funcs->destroy(connector);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h
index 6ccfc09bcf0f..495d3284e876 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h
@@ -161,7 +161,6 @@ nv_match_device(struct drm_device *dev, unsigned device,
dev->pdev->subsystem_device == sub_device;
}
-#include <subdev/bios.h>
#include <subdev/bios/init.h>
static inline void
diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c
index 0c9bdf023f5b..3fdfafa8b0ad 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/hw.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c
@@ -22,7 +22,6 @@
* SOFTWARE.
*/
-#include <drm/drmP.h>
#include "nouveau_drv.h"
#include "hw.h"
diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.h b/drivers/gpu/drm/nouveau/dispnv04/hw.h
index 3a2be47fb4f1..6987e1766cd2 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/hw.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/hw.h
@@ -23,7 +23,6 @@
#ifndef __NOUVEAU_HW_H__
#define __NOUVEAU_HW_H__
-#include <drm/drmP.h>
#include "disp.h"
#include "nvreg.h"
diff --git a/drivers/gpu/drm/nouveau/dispnv04/overlay.c b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
index df4358e31075..a3a0a73ae8ab 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/overlay.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
@@ -23,7 +23,6 @@
* written by Arthur Huillet.
*/
-#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fourcc.h>
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c
index 2b83b2c39d1d..2f6d2b6711ab 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c
@@ -24,7 +24,6 @@
*
*/
-#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include "nouveau_drv.h"
#include "nouveau_encoder.h"
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
index de4490b4ed30..b701a4d8fe76 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
@@ -24,7 +24,6 @@
*
*/
-#include <drm/drmP.h>
#include "nouveau_drv.h"
#include "nouveau_reg.h"
#include "nouveau_encoder.h"
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
index 26fd71c06626..3a9489ed6544 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
@@ -24,7 +24,6 @@
*
*/
-#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_probe_helper.h>
#include "nouveau_drv.h"
@@ -645,16 +644,13 @@ static int nv17_tv_create_resources(struct drm_encoder *encoder,
int i;
if (nouveau_tv_norm) {
- for (i = 0; i < num_tv_norms; i++) {
- if (!strcmp(nv17_tv_norm_names[i], nouveau_tv_norm)) {
- tv_enc->tv_norm = i;
- break;
- }
- }
-
- if (i == num_tv_norms)
+ i = match_string(nv17_tv_norm_names, num_tv_norms,
+ nouveau_tv_norm);
+ if (i < 0)
NV_WARN(drm, "Invalid TV norm setting \"%s\"\n",
nouveau_tv_norm);
+ else
+ tv_enc->tv_norm = i;
}
drm_mode_create_tv_properties(dev, num_tv_norms, nv17_tv_norm_names);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/atom.h b/drivers/gpu/drm/nouveau/dispnv50/atom.h
index b5fae5ab3fa8..24f7700768da 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/atom.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/atom.h
@@ -114,6 +114,7 @@ struct nv50_head_atom {
u8 nhsync:1;
u8 nvsync:1;
u8 depth:4;
+ u8 bpc;
} or;
/* Currently only used for MST */
@@ -185,6 +186,11 @@ struct nv50_wndw_atom {
} xlut;
struct {
+ u32 matrix[12];
+ bool valid;
+ } csc;
+
+ struct {
u8 mode:2;
u8 interval:4;
@@ -216,14 +222,23 @@ struct nv50_wndw_atom {
u16 y;
} point;
+ struct {
+ u8 depth;
+ u8 k1;
+ u8 src_color:4;
+ u8 dst_color:4;
+ } blend;
+
union nv50_wndw_atom_mask {
struct {
bool ntfy:1;
bool sema:1;
bool xlut:1;
+ bool csc:1;
bool image:1;
bool scale:1;
bool point:1;
+ bool blend:1;
};
u8 mask;
} set, clr;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base507c.c b/drivers/gpu/drm/nouveau/dispnv50/base507c.c
index d5e295ca2caa..00a85f1e1a4a 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base507c.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base507c.c
@@ -25,7 +25,9 @@
#include <nvif/event.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
+
#include "nouveau_bo.h"
void
@@ -56,12 +58,21 @@ static void
base507c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
u32 *push;
- if ((push = evo_wait(&wndw->wndw, 10))) {
+ if ((push = evo_wait(&wndw->wndw, 13))) {
evo_mthd(push, 0x0084, 1);
evo_data(push, asyw->image.mode << 8 |
asyw->image.interval << 4);
evo_mthd(push, 0x00c0, 1);
evo_data(push, asyw->image.handle[0]);
+ if (asyw->image.format == 0xca) {
+ evo_mthd(push, 0x0110, 2);
+ evo_data(push, 1);
+ evo_data(push, 0x6400);
+ } else {
+ evo_mthd(push, 0x0110, 2);
+ evo_data(push, 0);
+ evo_data(push, 0);
+ }
evo_mthd(push, 0x0800, 5);
evo_data(push, asyw->image.offset[0] >> 8);
evo_data(push, 0x00000000);
@@ -179,9 +190,6 @@ base507c_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
const struct drm_framebuffer *fb = asyw->state.fb;
int ret;
- if (!fb->format->depth)
- return -EINVAL;
-
ret = drm_atomic_helper_check_plane_state(&asyw->state, &asyh->state,
DRM_PLANE_HELPER_NO_SCALING,
DRM_PLANE_HELPER_NO_SCALING,
@@ -200,6 +208,14 @@ base507c_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
asyh->base.y = asyw->state.src.y1 >> 16;
asyh->base.w = asyw->state.fb->width;
asyh->base.h = asyw->state.fb->height;
+
+ /* Some newer formats, esp FP16 ones, don't have a
+ * "depth". There's nothing that really makes sense there
+ * either, so just set it to the implicit bit count.
+ */
+ if (!asyh->base.depth)
+ asyh->base.depth = asyh->base.cpp * 8;
+
return 0;
}
@@ -215,6 +231,8 @@ base507c_format[] = {
DRM_FORMAT_ABGR2101010,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XBGR16161616F,
+ DRM_FORMAT_ABGR16161616F,
0
};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base827c.c b/drivers/gpu/drm/nouveau/dispnv50/base827c.c
index 73646819a0d6..f4c05949dd62 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base827c.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base827c.c
@@ -25,12 +25,21 @@ static void
base827c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
u32 *push;
- if ((push = evo_wait(&wndw->wndw, 10))) {
+ if ((push = evo_wait(&wndw->wndw, 13))) {
evo_mthd(push, 0x0084, 1);
evo_data(push, asyw->image.mode << 8 |
asyw->image.interval << 4);
evo_mthd(push, 0x00c0, 1);
evo_data(push, asyw->image.handle[0]);
+ if (asyw->image.format == 0xca) {
+ evo_mthd(push, 0x0110, 2);
+ evo_data(push, 1);
+ evo_data(push, 0x6400);
+ } else {
+ evo_mthd(push, 0x0110, 2);
+ evo_data(push, 0);
+ evo_data(push, 0);
+ }
evo_mthd(push, 0x0800, 5);
evo_data(push, asyw->image.offset[0] >> 8);
evo_data(push, 0x00000000);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base907c.c b/drivers/gpu/drm/nouveau/dispnv50/base907c.c
index 049ce6da321c..224a34c340fe 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base907c.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base907c.c
@@ -75,12 +75,78 @@ base907c_xlut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
}
}
-static void
-base907c_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+static bool
+base907c_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size)
{
- asyw->xlut.i.mode = 7;
+ if (size != 256 && size != 1024)
+ return false;
+
+ asyw->xlut.i.mode = size == 1024 ? 4 : 7;
asyw->xlut.i.enable = 2;
asyw->xlut.i.load = head907d_olut_load;
+ return true;
+}
+
+static inline u32
+csc_drm_to_base(u64 in)
+{
+ /* base takes a 19-bit 2's complement value in S3.16 format */
+ bool sign = in & BIT_ULL(63);
+ u32 integer = (in >> 32) & 0x7fffffff;
+ u32 fraction = in & 0xffffffff;
+
+ if (integer >= 4) {
+ return (1 << 18) - (sign ? 0 : 1);
+ } else {
+ u32 ret = (integer << 16) | (fraction >> 16);
+ if (sign)
+ ret = -ret;
+ return ret & GENMASK(18, 0);
+ }
+}
+
+void
+base907c_csc(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
+ const struct drm_color_ctm *ctm)
+{
+ int i, j;
+
+ for (j = 0; j < 3; j++) {
+ for (i = 0; i < 4; i++) {
+ u32 *val = &asyw->csc.matrix[j * 4 + i];
+ /* DRM does not support constant offset, while
+ * HW CSC does. Skip it. */
+ if (i == 3) {
+ *val = 0;
+ } else {
+ *val = csc_drm_to_base(ctm->matrix[j * 3 + i]);
+ }
+ }
+ }
+}
+
+static void
+base907c_csc_clr(struct nv50_wndw *wndw)
+{
+ u32 *push;
+ if ((push = evo_wait(&wndw->wndw, 2))) {
+ evo_mthd(push, 0x0140, 1);
+ evo_data(push, 0x00000000);
+ evo_kick(push, &wndw->wndw);
+ }
+}
+
+static void
+base907c_csc_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ u32 *push, i;
+ if ((push = evo_wait(&wndw->wndw, 13))) {
+ evo_mthd(push, 0x0140, 12);
+ evo_data(push, asyw->csc.matrix[0] | 0x80000000);
+ for (i = 1; i < 12; i++)
+ evo_data(push, asyw->csc.matrix[i]);
+ evo_kick(push, &wndw->wndw);
+ }
}
const struct nv50_wndw_func
@@ -94,7 +160,11 @@ base907c = {
.ntfy_clr = base507c_ntfy_clr,
.ntfy_wait_begun = base507c_ntfy_wait_begun,
.ilut = base907c_ilut,
+ .csc = base907c_csc,
+ .csc_set = base907c_csc_set,
+ .csc_clr = base907c_csc_clr,
.olut_core = true,
+ .ilut_size = 1024,
.xlut_set = base907c_xlut_set,
.xlut_clr = base907c_xlut_clr,
.image_set = base907c_image_set,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base917c.c b/drivers/gpu/drm/nouveau/dispnv50/base917c.c
index 54d705bb81a5..a1baed4fe0e9 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base917c.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base917c.c
@@ -36,6 +36,8 @@ base917c_format[] = {
DRM_FORMAT_ABGR8888,
DRM_FORMAT_XRGB2101010,
DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_XBGR16161616F,
+ DRM_FORMAT_ABGR16161616F,
0
};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core.h b/drivers/gpu/drm/nouveau/dispnv50/core.h
index df8336b593f7..ff94f3f6f264 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/core.h
@@ -6,6 +6,7 @@
struct nv50_core {
const struct nv50_core_func *func;
struct nv50_dmac chan;
+ bool assign_windows;
};
int nv50_core_new(struct nouveau_drm *, struct nv50_core **);
@@ -18,6 +19,10 @@ struct nv50_core_func {
struct nvif_device *);
void (*update)(struct nv50_core *, u32 *interlock, bool ntfy);
+ struct {
+ void (*owner)(struct nv50_core *);
+ } wndw;
+
const struct nv50_head_func *head;
const struct nv50_outp_func {
void (*ctrl)(struct nv50_core *, int or, u32 ctrl,
@@ -48,6 +53,7 @@ int core917d_new(struct nouveau_drm *, s32, struct nv50_core **);
int corec37d_new(struct nouveau_drm *, s32, struct nv50_core **);
int corec37d_ntfy_wait_done(struct nouveau_bo *, u32, struct nvif_device *);
void corec37d_update(struct nv50_core *, u32 *, bool);
+void corec37d_wndw_owner(struct nv50_core *);
extern const struct nv50_outp_func sorc37d;
int corec57d_new(struct nouveau_drm *, s32, struct nv50_core **);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
index 7860774b65bc..3b36dc8d36b2 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
@@ -25,6 +25,20 @@
#include <nouveau_bo.h>
void
+corec37d_wndw_owner(struct nv50_core *core)
+{
+ const u32 windows = 8; /*XXX*/
+ u32 *push, i;
+ if ((push = evo_wait(&core->chan, 2 * windows))) {
+ for (i = 0; i < windows; i++) {
+ evo_mthd(push, 0x1000 + (i * 0x080), 1);
+ evo_data(push, i >> 1);
+ }
+ evo_kick(push, &core->chan);
+ }
+}
+
+void
corec37d_update(struct nv50_core *core, u32 *interlock, bool ntfy)
{
u32 *push;
@@ -76,20 +90,18 @@ corec37d_init(struct nv50_core *core)
{
const u32 windows = 8; /*XXX*/
u32 *push, i;
- if ((push = evo_wait(&core->chan, 2 + 6 * windows + 2))) {
+ if ((push = evo_wait(&core->chan, 2 + 5 * windows))) {
evo_mthd(push, 0x0208, 1);
evo_data(push, core->chan.sync.handle);
for (i = 0; i < windows; i++) {
- evo_mthd(push, 0x1000 + (i * 0x080), 3);
- evo_data(push, i >> 1);
- evo_data(push, 0x00000017);
+ evo_mthd(push, 0x1004 + (i * 0x080), 2);
+ evo_data(push, 0x0000001f);
evo_data(push, 0x00000000);
evo_mthd(push, 0x1010 + (i * 0x080), 1);
evo_data(push, 0x00127fff);
}
- evo_mthd(push, 0x0200, 1);
- evo_data(push, 0x00000001);
evo_kick(push, &core->chan);
+ core->assign_windows = true;
}
}
@@ -99,6 +111,7 @@ corec37d = {
.ntfy_init = corec37d_ntfy_init,
.ntfy_wait_done = corec37d_ntfy_wait_done,
.update = corec37d_update,
+ .wndw.owner = corec37d_wndw_owner,
.head = &headc37d,
.sor = &sorc37d,
};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec57d.c b/drivers/gpu/drm/nouveau/dispnv50/corec57d.c
index b606d68cda10..147adcd60937 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/corec57d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/corec57d.c
@@ -27,20 +27,18 @@ corec57d_init(struct nv50_core *core)
{
const u32 windows = 8; /*XXX*/
u32 *push, i;
- if ((push = evo_wait(&core->chan, 2 + 6 * windows + 2))) {
+ if ((push = evo_wait(&core->chan, 2 + 5 * windows))) {
evo_mthd(push, 0x0208, 1);
evo_data(push, core->chan.sync.handle);
for (i = 0; i < windows; i++) {
- evo_mthd(push, 0x1000 + (i * 0x080), 3);
- evo_data(push, i >> 1);
+ evo_mthd(push, 0x1004 + (i * 0x080), 2);
evo_data(push, 0x0000000f);
evo_data(push, 0x00000000);
evo_mthd(push, 0x1010 + (i * 0x080), 1);
evo_data(push, 0x00117fff);
}
- evo_mthd(push, 0x0200, 1);
- evo_data(push, 0x00000001);
evo_kick(push, &core->chan);
+ core->assign_windows = true;
}
}
@@ -50,6 +48,7 @@ corec57d = {
.ntfy_init = corec37d_ntfy_init,
.ntfy_wait_done = corec37d_ntfy_wait_done,
.update = corec37d_update,
+ .wndw.owner = corec37d_wndw_owner,
.head = &headc57d,
.sor = &sorc37d,
};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 5c36c75232e6..a3dc2ba19fb2 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -29,15 +29,16 @@
#include <linux/dma-mapping.h>
#include <linux/hdmi.h>
+#include <linux/component.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_dp_helper.h>
+#include <drm/drm_edid.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_scdc_helper.h>
-#include <drm/drm_edid.h>
+#include <drm/drm_vblank.h>
#include <nvif/class.h>
#include <nvif/cl0002.h>
@@ -326,9 +327,9 @@ nv50_outp_atomic_check_view(struct drm_encoder *encoder,
* same size as the native one (e.g. different
* refresh rate)
*/
- if (adjusted_mode->hdisplay == native_mode->hdisplay &&
- adjusted_mode->vdisplay == native_mode->vdisplay &&
- adjusted_mode->type & DRM_MODE_TYPE_DRIVER)
+ if (mode->hdisplay == native_mode->hdisplay &&
+ mode->vdisplay == native_mode->vdisplay &&
+ mode->type & DRM_MODE_TYPE_DRIVER)
break;
mode = native_mode;
asyc->scaler.full = true;
@@ -353,10 +354,20 @@ nv50_outp_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- struct nouveau_connector *nv_connector =
- nouveau_connector(conn_state->connector);
- return nv50_outp_atomic_check_view(encoder, crtc_state, conn_state,
- nv_connector->native_mode);
+ struct drm_connector *connector = conn_state->connector;
+ struct nouveau_connector *nv_connector = nouveau_connector(connector);
+ struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
+ int ret;
+
+ ret = nv50_outp_atomic_check_view(encoder, crtc_state, conn_state,
+ nv_connector->native_mode);
+ if (ret)
+ return ret;
+
+ if (crtc_state->mode_changed || crtc_state->connectors_changed)
+ asyh->or.bpc = connector->display_info.bpc;
+
+ return 0;
}
/******************************************************************************
@@ -466,12 +477,113 @@ nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
return 0;
}
+/*
+ * audio component binding for ELD notification
+ */
+static void
+nv50_audio_component_eld_notify(struct drm_audio_component *acomp, int port)
+{
+ if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify)
+ acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
+ port, -1);
+}
+
+static int
+nv50_audio_component_get_eld(struct device *kdev, int port, int pipe,
+ bool *enabled, unsigned char *buf, int max_bytes)
+{
+ struct drm_device *drm_dev = dev_get_drvdata(kdev);
+ struct nouveau_drm *drm = nouveau_drm(drm_dev);
+ struct drm_encoder *encoder;
+ struct nouveau_encoder *nv_encoder;
+ struct nouveau_connector *nv_connector;
+ struct nouveau_crtc *nv_crtc;
+ int ret = 0;
+
+ *enabled = false;
+ drm_for_each_encoder(encoder, drm->dev) {
+ nv_encoder = nouveau_encoder(encoder);
+ nv_connector = nouveau_encoder_connector_get(nv_encoder);
+ nv_crtc = nouveau_crtc(encoder->crtc);
+ if (!nv_connector || !nv_crtc || nv_crtc->index != port)
+ continue;
+ *enabled = drm_detect_monitor_audio(nv_connector->edid);
+ if (*enabled) {
+ ret = drm_eld_size(nv_connector->base.eld);
+ memcpy(buf, nv_connector->base.eld,
+ min(max_bytes, ret));
+ }
+ break;
+ }
+ return ret;
+}
+
+static const struct drm_audio_component_ops nv50_audio_component_ops = {
+ .get_eld = nv50_audio_component_get_eld,
+};
+
+static int
+nv50_audio_component_bind(struct device *kdev, struct device *hda_kdev,
+ void *data)
+{
+ struct drm_device *drm_dev = dev_get_drvdata(kdev);
+ struct nouveau_drm *drm = nouveau_drm(drm_dev);
+ struct drm_audio_component *acomp = data;
+
+ if (WARN_ON(!device_link_add(hda_kdev, kdev, DL_FLAG_STATELESS)))
+ return -ENOMEM;
+
+ drm_modeset_lock_all(drm_dev);
+ acomp->ops = &nv50_audio_component_ops;
+ acomp->dev = kdev;
+ drm->audio.component = acomp;
+ drm_modeset_unlock_all(drm_dev);
+ return 0;
+}
+
+static void
+nv50_audio_component_unbind(struct device *kdev, struct device *hda_kdev,
+ void *data)
+{
+ struct drm_device *drm_dev = dev_get_drvdata(kdev);
+ struct nouveau_drm *drm = nouveau_drm(drm_dev);
+ struct drm_audio_component *acomp = data;
+
+ drm_modeset_lock_all(drm_dev);
+ drm->audio.component = NULL;
+ acomp->ops = NULL;
+ acomp->dev = NULL;
+ drm_modeset_unlock_all(drm_dev);
+}
+
+static const struct component_ops nv50_audio_component_bind_ops = {
+ .bind = nv50_audio_component_bind,
+ .unbind = nv50_audio_component_unbind,
+};
+
+static void
+nv50_audio_component_init(struct nouveau_drm *drm)
+{
+ if (!component_add(drm->dev->dev, &nv50_audio_component_bind_ops))
+ drm->audio.component_registered = true;
+}
+
+static void
+nv50_audio_component_fini(struct nouveau_drm *drm)
+{
+ if (drm->audio.component_registered) {
+ component_del(drm->dev->dev, &nv50_audio_component_bind_ops);
+ drm->audio.component_registered = false;
+ }
+}
+
/******************************************************************************
* Audio
*****************************************************************************/
static void
nv50_audio_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
{
+ struct nouveau_drm *drm = nouveau_drm(encoder->dev);
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nv50_disp *disp = nv50_disp(encoder->dev);
struct {
@@ -486,11 +598,14 @@ nv50_audio_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
};
nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
+
+ nv50_audio_component_eld_notify(drm->audio.component, nv_crtc->index);
}
static void
nv50_audio_enable(struct drm_encoder *encoder, struct drm_display_mode *mode)
{
+ struct nouveau_drm *drm = nouveau_drm(encoder->dev);
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
struct nouveau_connector *nv_connector;
@@ -517,6 +632,8 @@ nv50_audio_enable(struct drm_encoder *encoder, struct drm_display_mode *mode)
nvif_mthd(&disp->disp->object, 0, &args,
sizeof(args.base) + drm_eld_size(args.data));
+
+ nv50_audio_component_eld_notify(drm->audio.component, nv_crtc->index);
}
/******************************************************************************
@@ -650,7 +767,6 @@ struct nv50_mstm {
struct nouveau_encoder *outp;
struct drm_dp_mst_topology_mgr mgr;
- struct nv50_msto *msto[4];
bool modified;
bool disabled;
@@ -716,7 +832,6 @@ nv50_msto_cleanup(struct nv50_msto *msto)
drm_dp_mst_deallocate_vcpi(&mstm->mgr, mstc->port);
msto->mstc = NULL;
- msto->head = NULL;
msto->disabled = false;
}
@@ -770,32 +885,54 @@ nv50_msto_atomic_check(struct drm_encoder *encoder,
struct nv50_mstm *mstm = mstc->mstm;
struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
int slots;
+ int ret;
+
+ ret = nv50_outp_atomic_check_view(encoder, crtc_state, conn_state,
+ mstc->native);
+ if (ret)
+ return ret;
+
+ if (!crtc_state->mode_changed && !crtc_state->connectors_changed)
+ return 0;
+
+ /*
+ * When restoring duplicated states, we need to make sure that the bw
+ * remains the same and avoid recalculating it, as the connector's bpc
+ * may have changed after the state was duplicated
+ */
+ if (!state->duplicated) {
+ const int clock = crtc_state->adjusted_mode.clock;
- if (crtc_state->mode_changed || crtc_state->connectors_changed) {
/*
- * When restoring duplicated states, we need to make sure that
- * the bw remains the same and avoid recalculating it, as the
- * connector's bpc may have changed after the state was
- * duplicated
+ * XXX: Since we don't use HDR in userspace quite yet, limit
+ * the bpc to 8 to save bandwidth on the topology. In the
+ * future, we'll want to properly fix this by dynamically
+ * selecting the highest possible bpc that would fit in the
+ * topology
*/
- if (!state->duplicated) {
- const int bpp = connector->display_info.bpc * 3;
- const int clock = crtc_state->adjusted_mode.clock;
+ asyh->or.bpc = min(connector->display_info.bpc, 8U);
+ asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, asyh->or.bpc * 3, false);
+ }
- asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, bpp);
- }
+ slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr, mstc->port,
+ asyh->dp.pbn, 0);
+ if (slots < 0)
+ return slots;
- slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr,
- mstc->port,
- asyh->dp.pbn);
- if (slots < 0)
- return slots;
+ asyh->dp.tu = slots;
- asyh->dp.tu = slots;
- }
+ return 0;
+}
- return nv50_outp_atomic_check_view(encoder, crtc_state, conn_state,
- mstc->native);
+static u8
+nv50_dp_bpc_to_depth(unsigned int bpc)
+{
+ switch (bpc) {
+ case 6: return 0x2;
+ case 8: return 0x5;
+ case 10: /* fall-through */
+ default: return 0x6;
+ }
}
static void
@@ -808,7 +945,7 @@ nv50_msto_enable(struct drm_encoder *encoder)
struct nv50_mstm *mstm = NULL;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
- u8 proto, depth;
+ u8 proto;
bool r;
drm_connector_list_iter_begin(encoder->dev, &conn_iter);
@@ -837,16 +974,9 @@ nv50_msto_enable(struct drm_encoder *encoder)
else
proto = 0x9;
- switch (mstc->connector.display_info.bpc) {
- case 6: depth = 0x2; break;
- case 8: depth = 0x5; break;
- case 10:
- default: depth = 0x6; break;
- }
+ mstm->outp->update(mstm->outp, head->base.index, armh, proto,
+ nv50_dp_bpc_to_depth(armh->or.bpc));
- mstm->outp->update(mstm->outp, head->base.index, armh, proto, depth);
-
- msto->head = head;
msto->mstc = mstc;
mstm->modified = true;
}
@@ -887,45 +1017,40 @@ nv50_msto = {
.destroy = nv50_msto_destroy,
};
-static int
-nv50_msto_new(struct drm_device *dev, u32 heads, const char *name, int id,
- struct nv50_msto **pmsto)
+static struct nv50_msto *
+nv50_msto_new(struct drm_device *dev, struct nv50_head *head, int id)
{
struct nv50_msto *msto;
int ret;
- if (!(msto = *pmsto = kzalloc(sizeof(*msto), GFP_KERNEL)))
- return -ENOMEM;
+ msto = kzalloc(sizeof(*msto), GFP_KERNEL);
+ if (!msto)
+ return ERR_PTR(-ENOMEM);
ret = drm_encoder_init(dev, &msto->encoder, &nv50_msto,
- DRM_MODE_ENCODER_DPMST, "%s-mst-%d", name, id);
+ DRM_MODE_ENCODER_DPMST, "mst-%d", id);
if (ret) {
- kfree(*pmsto);
- *pmsto = NULL;
- return ret;
+ kfree(msto);
+ return ERR_PTR(ret);
}
drm_encoder_helper_add(&msto->encoder, &nv50_msto_help);
- msto->encoder.possible_crtcs = heads;
- return 0;
+ msto->encoder.possible_crtcs = drm_crtc_mask(&head->base.base);
+ msto->head = head;
+ return msto;
}
static struct drm_encoder *
nv50_mstc_atomic_best_encoder(struct drm_connector *connector,
struct drm_connector_state *connector_state)
{
- struct nv50_head *head = nv50_head(connector_state->crtc);
struct nv50_mstc *mstc = nv50_mstc(connector);
+ struct drm_crtc *crtc = connector_state->crtc;
- return &mstc->mstm->msto[head->base.index]->encoder;
-}
-
-static struct drm_encoder *
-nv50_mstc_best_encoder(struct drm_connector *connector)
-{
- struct nv50_mstc *mstc = nv50_mstc(connector);
+ if (!(mstc->mstm->outp->dcb->heads & drm_crtc_mask(crtc)))
+ return NULL;
- return &mstc->mstm->msto[0]->encoder;
+ return &nv50_head(crtc)->msto->encoder;
}
static enum drm_mode_status
@@ -986,20 +1111,11 @@ nv50_mstc_atomic_check(struct drm_connector *connector,
return drm_dp_atomic_release_vcpi_slots(state, mgr, mstc->port);
}
-static const struct drm_connector_helper_funcs
-nv50_mstc_help = {
- .get_modes = nv50_mstc_get_modes,
- .mode_valid = nv50_mstc_mode_valid,
- .best_encoder = nv50_mstc_best_encoder,
- .atomic_best_encoder = nv50_mstc_atomic_best_encoder,
- .atomic_check = nv50_mstc_atomic_check,
-};
-
-static enum drm_connector_status
-nv50_mstc_detect(struct drm_connector *connector, bool force)
+static int
+nv50_mstc_detect(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx, bool force)
{
struct nv50_mstc *mstc = nv50_mstc(connector);
- enum drm_connector_status conn_status;
int ret;
if (drm_connector_is_unregistered(connector))
@@ -1009,14 +1125,23 @@ nv50_mstc_detect(struct drm_connector *connector, bool force)
if (ret < 0 && ret != -EACCES)
return connector_status_disconnected;
- conn_status = drm_dp_mst_detect_port(connector, mstc->port->mgr,
- mstc->port);
+ ret = drm_dp_mst_detect_port(connector, ctx, mstc->port->mgr,
+ mstc->port);
pm_runtime_mark_last_busy(connector->dev->dev);
pm_runtime_put_autosuspend(connector->dev->dev);
- return conn_status;
+ return ret;
}
+static const struct drm_connector_helper_funcs
+nv50_mstc_help = {
+ .get_modes = nv50_mstc_get_modes,
+ .mode_valid = nv50_mstc_mode_valid,
+ .atomic_best_encoder = nv50_mstc_atomic_best_encoder,
+ .atomic_check = nv50_mstc_atomic_check,
+ .detect_ctx = nv50_mstc_detect,
+};
+
static void
nv50_mstc_destroy(struct drm_connector *connector)
{
@@ -1031,7 +1156,6 @@ nv50_mstc_destroy(struct drm_connector *connector)
static const struct drm_connector_funcs
nv50_mstc = {
.reset = nouveau_conn_reset,
- .detect = nv50_mstc_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = nv50_mstc_destroy,
.atomic_duplicate_state = nouveau_conn_atomic_duplicate_state,
@@ -1045,8 +1169,9 @@ nv50_mstc_new(struct nv50_mstm *mstm, struct drm_dp_mst_port *port,
const char *path, struct nv50_mstc **pmstc)
{
struct drm_device *dev = mstm->outp->base.base.dev;
+ struct drm_crtc *crtc;
struct nv50_mstc *mstc;
- int ret, i;
+ int ret;
if (!(mstc = *pmstc = kzalloc(sizeof(*mstc), GFP_KERNEL)))
return -ENOMEM;
@@ -1066,8 +1191,13 @@ nv50_mstc_new(struct nv50_mstm *mstm, struct drm_dp_mst_port *port,
mstc->connector.funcs->reset(&mstc->connector);
nouveau_conn_attach_properties(&mstc->connector);
- for (i = 0; i < ARRAY_SIZE(mstm->msto) && mstm->msto[i]; i++)
- drm_connector_attach_encoder(&mstc->connector, &mstm->msto[i]->encoder);
+ drm_for_each_crtc(crtc, dev) {
+ if (!(mstm->outp->dcb->heads & drm_crtc_mask(crtc)))
+ continue;
+
+ drm_connector_attach_encoder(&mstc->connector,
+ &nv50_head(crtc)->msto->encoder);
+ }
drm_object_attach_property(&mstc->connector.base, dev->mode_config.path_property, 0);
drm_object_attach_property(&mstc->connector.base, dev->mode_config.tile_property, 0);
@@ -1309,14 +1439,14 @@ nv50_mstm_fini(struct nv50_mstm *mstm)
}
static void
-nv50_mstm_init(struct nv50_mstm *mstm)
+nv50_mstm_init(struct nv50_mstm *mstm, bool runtime)
{
int ret;
if (!mstm || !mstm->mgr.mst_state)
return;
- ret = drm_dp_mst_topology_mgr_resume(&mstm->mgr);
+ ret = drm_dp_mst_topology_mgr_resume(&mstm->mgr, !runtime);
if (ret == -1) {
drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false);
drm_kms_helper_hotplug_event(mstm->mgr.dev);
@@ -1341,7 +1471,7 @@ nv50_mstm_new(struct nouveau_encoder *outp, struct drm_dp_aux *aux, int aux_max,
const int max_payloads = hweight8(outp->dcb->heads);
struct drm_device *dev = outp->base.base.dev;
struct nv50_mstm *mstm;
- int ret, i;
+ int ret;
u8 dpcd;
/* This is a workaround for some monitors not functioning
@@ -1364,13 +1494,6 @@ nv50_mstm_new(struct nouveau_encoder *outp, struct drm_dp_aux *aux, int aux_max,
if (ret)
return ret;
- for (i = 0; i < max_payloads; i++) {
- ret = nv50_msto_new(dev, outp->dcb->heads, outp->base.base.name,
- i, &mstm->msto[i]);
- if (ret)
- return ret;
- }
-
return 0;
}
@@ -1498,20 +1621,14 @@ nv50_sor_enable(struct drm_encoder *encoder)
lvds.lvds.script |= 0x0200;
}
- if (nv_connector->base.display_info.bpc == 8)
+ if (asyh->or.bpc == 8)
lvds.lvds.script |= 0x0200;
}
nvif_mthd(&disp->disp->object, 0, &lvds, sizeof(lvds));
break;
case DCB_OUTPUT_DP:
- if (nv_connector->base.display_info.bpc == 6)
- depth = 0x2;
- else
- if (nv_connector->base.display_info.bpc == 8)
- depth = 0x5;
- else
- depth = 0x6;
+ depth = nv50_dp_bpc_to_depth(asyh->or.bpc);
if (nv_encoder->link & 1)
proto = 0x8;
@@ -1549,17 +1666,24 @@ nv50_sor_func = {
.destroy = nv50_sor_destroy,
};
+static bool nv50_has_mst(struct nouveau_drm *drm)
+{
+ struct nvkm_bios *bios = nvxx_bios(&drm->client.device);
+ u32 data;
+ u8 ver, hdr, cnt, len;
+
+ data = nvbios_dp_table(bios, &ver, &hdr, &cnt, &len);
+ return data && ver >= 0x40 && (nvbios_rd08(bios, data + 0x08) & 0x04);
+}
+
static int
nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
{
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_drm *drm = nouveau_drm(connector->dev);
- struct nvkm_bios *bios = nvxx_bios(&drm->client.device);
struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
struct nouveau_encoder *nv_encoder;
struct drm_encoder *encoder;
- u8 ver, hdr, cnt, len;
- u32 data;
int type, ret;
switch (dcbe->type) {
@@ -1603,10 +1727,10 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
nv_encoder->aux = aux;
}
- if ((data = nvbios_dp_table(bios, &ver, &hdr, &cnt, &len)) &&
- ver >= 0x40 && (nvbios_rd08(bios, data + 0x08) & 0x04)) {
- ret = nv50_mstm_new(nv_encoder, &nv_connector->aux, 16,
- nv_connector->base.base.id,
+ if (nv_connector->type != DCB_CONNECTOR_eDP &&
+ nv50_has_mst(drm)) {
+ ret = nv50_mstm_new(nv_encoder, &nv_connector->aux,
+ 16, nv_connector->base.base.id,
&nv_encoder->dp.mstm);
if (ret)
return ret;
@@ -1652,7 +1776,6 @@ nv50_pior_enable(struct drm_encoder *encoder)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
- struct nouveau_connector *nv_connector;
struct nv50_head_atom *asyh = nv50_head_atom(nv_crtc->base.state);
struct nv50_core *core = nv50_disp(encoder->dev)->core;
u8 owner = 1 << nv_crtc->index;
@@ -1660,8 +1783,7 @@ nv50_pior_enable(struct drm_encoder *encoder)
nv50_outp_acquire(nv_encoder);
- nv_connector = nouveau_encoder_connector_get(nv_encoder);
- switch (nv_connector->base.display_info.bpc) {
+ switch (asyh->or.bpc) {
case 10: asyh->or.depth = 0x6; break;
case 8: asyh->or.depth = 0x5; break;
case 6: asyh->or.depth = 0x2; break;
@@ -1811,6 +1933,7 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
struct nouveau_drm *drm = nouveau_drm(dev);
struct nv50_disp *disp = nv50_disp(dev);
struct nv50_atom *atom = nv50_atom(state);
+ struct nv50_core *core = disp->core;
struct nv50_outp_atom *outp, *outt;
u32 interlock[NV50_DISP_INTERLOCK__SIZE] = {};
int i;
@@ -1830,8 +1953,11 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
NV_ATOMIC(drm, "%s: clr %04x (set %04x)\n", crtc->name,
asyh->clr.mask, asyh->set.mask);
- if (old_crtc_state->active && !new_crtc_state->active)
+
+ if (old_crtc_state->active && !new_crtc_state->active) {
+ pm_runtime_put_noidle(dev->dev);
drm_crtc_vblank_off(crtc);
+ }
if (asyh->clr.mask) {
nv50_head_flush_clr(head, asyh, atom->flush_disable);
@@ -1917,13 +2043,30 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
}
if (new_crtc_state->active) {
- if (!old_crtc_state->active)
+ if (!old_crtc_state->active) {
drm_crtc_vblank_on(crtc);
+ pm_runtime_get_noresume(dev->dev);
+ }
if (new_crtc_state->event)
drm_crtc_vblank_get(crtc);
}
}
+ /* Update window->head assignment.
+ *
+ * This has to happen in an update that's not interlocked with
+ * any window channels to avoid hitting HW error checks.
+ *
+ *TODO: Proper handling of window ownership (Turing apparently
+ * supports non-fixed mappings).
+ */
+ if (core->assign_windows) {
+ core->func->wndw.owner(core);
+ core->func->update(core, interlock, false);
+ core->assign_windows = false;
+ interlock[NV50_DISP_INTERLOCK_CORE] = 0;
+ }
+
/* Update plane(s). */
for_each_new_plane_in_state(state, plane, new_plane_state, i) {
struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
@@ -1983,6 +2126,10 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
drm_atomic_helper_cleanup_planes(dev, state);
drm_atomic_helper_commit_cleanup_done(state);
drm_atomic_state_put(state);
+
+ /* Drop the RPM ref we got from nv50_disp_atomic_commit() */
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
}
static void
@@ -1997,11 +2144,8 @@ static int
nv50_disp_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state, bool nonblock)
{
- struct nouveau_drm *drm = nouveau_drm(dev);
struct drm_plane_state *new_plane_state;
struct drm_plane *plane;
- struct drm_crtc *crtc;
- bool active = false;
int ret, i;
ret = pm_runtime_get_sync(dev->dev);
@@ -2038,27 +2182,17 @@ nv50_disp_atomic_commit(struct drm_device *dev,
drm_atomic_state_get(state);
+ /*
+ * Grab another RPM ref for the commit tail, which will release the
+ * ref when it's finished
+ */
+ pm_runtime_get_noresume(dev->dev);
+
if (nonblock)
queue_work(system_unbound_wq, &state->commit_work);
else
nv50_disp_atomic_commit_tail(state);
- drm_for_each_crtc(crtc, dev) {
- if (crtc->state->active) {
- if (!drm->have_disp_power_ref) {
- drm->have_disp_power_ref = true;
- return 0;
- }
- active = true;
- break;
- }
- }
-
- if (!active && drm->have_disp_power_ref) {
- pm_runtime_put_autosuspend(dev->dev);
- drm->have_disp_power_ref = false;
- }
-
err_cleanup:
if (ret)
drm_atomic_helper_cleanup_planes(dev, state);
@@ -2266,7 +2400,7 @@ nv50_display_init(struct drm_device *dev, bool resume, bool runtime)
if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
struct nouveau_encoder *nv_encoder =
nouveau_encoder(encoder);
- nv50_mstm_init(nv_encoder->dp.mstm);
+ nv50_mstm_init(nv_encoder->dp.mstm, runtime);
}
}
@@ -2285,6 +2419,8 @@ nv50_display_destroy(struct drm_device *dev)
{
struct nv50_disp *disp = nv50_disp(dev);
+ nv50_audio_component_fini(nouveau_drm(dev));
+
nv50_core_del(&disp->core);
nouveau_bo_unmap(disp->sync);
@@ -2306,6 +2442,7 @@ nv50_display_create(struct drm_device *dev)
struct nv50_disp *disp;
struct dcb_output *dcbe;
int crtcs, ret, i;
+ bool has_mst = nv50_has_mst(drm);
disp = kzalloc(sizeof(*disp), GFP_KERNEL);
if (!disp)
@@ -2320,6 +2457,7 @@ nv50_display_create(struct drm_device *dev)
disp->disp = &nouveau_display(dev)->disp;
dev->mode_config.funcs = &nv50_disp_func;
dev->mode_config.quirk_addfb_prefer_xbgr_30bpp = true;
+ dev->mode_config.normalize_zpos = true;
/* small shared memory area we use for notifiers and semaphores */
ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM,
@@ -2353,11 +2491,37 @@ nv50_display_create(struct drm_device *dev)
crtcs = 0x3;
for (i = 0; i < fls(crtcs); i++) {
+ struct nv50_head *head;
+
if (!(crtcs & (1 << i)))
continue;
- ret = nv50_head_create(dev, i);
- if (ret)
+
+ head = nv50_head_create(dev, i);
+ if (IS_ERR(head)) {
+ ret = PTR_ERR(head);
goto out;
+ }
+
+ if (has_mst) {
+ head->msto = nv50_msto_new(dev, head, i);
+ if (IS_ERR(head->msto)) {
+ ret = PTR_ERR(head->msto);
+ head->msto = NULL;
+ goto out;
+ }
+
+ /*
+ * FIXME: This is a hack to workaround the following
+ * issues:
+ *
+ * https://gitlab.gnome.org/GNOME/mutter/issues/759
+ * https://gitlab.freedesktop.org/xorg/xserver/merge_requests/277
+ *
+ * Once these issues are closed, this should be
+ * removed
+ */
+ head->msto->encoder.possible_crtcs = crtcs;
+ }
}
/* create encoder/connector objects based on VBIOS DCB table */
@@ -2394,7 +2558,7 @@ nv50_display_create(struct drm_device *dev)
/* cull any connectors we created that don't have an encoder */
list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) {
- if (connector->encoder_ids[0])
+ if (connector->possible_encoders)
continue;
NV_WARN(drm, "%s has no encoders, removing\n",
@@ -2405,6 +2569,8 @@ nv50_display_create(struct drm_device *dev)
/* Disable vblank irqs aggressively for power-saving, safe on nv50+ */
dev->vblank_disable_immediate = true;
+ nv50_audio_component_init(drm);
+
out:
if (ret)
nv50_display_destroy(dev);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.h b/drivers/gpu/drm/nouveau/dispnv50/disp.h
index 7c41b0599d1a..d54fe00ac3a3 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.h
@@ -4,6 +4,8 @@
#include "nouveau_display.h"
+struct nv50_msto;
+
struct nv50_disp {
struct nvif_disp *disp;
struct nv50_core *core;
@@ -78,14 +80,14 @@ void evo_kick(u32 *, struct nv50_dmac *);
#define evo_mthd(p, m, s) do { \
const u32 _m = (m), _s = (s); \
- if (drm_debug & DRM_UT_KMS) \
+ if (drm_debug_enabled(DRM_UT_KMS)) \
pr_err("%04x %d %s\n", _m, _s, __func__); \
*((p)++) = ((_s << 18) | _m); \
} while(0)
#define evo_data(p, d) do { \
const u32 _d = (d); \
- if (drm_debug & DRM_UT_KMS) \
+ if (drm_debug_enabled(DRM_UT_KMS)) \
pr_err("\t%08x\n", _d); \
*((p)++) = _d; \
} while(0)
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c
index 929d93b1677e..d9d64602947d 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.c
@@ -81,18 +81,17 @@ nv50_head_atomic_check_dither(struct nv50_head_atom *armh,
struct nv50_head_atom *asyh,
struct nouveau_conn_atom *asyc)
{
- struct drm_connector *connector = asyc->state.connector;
u32 mode = 0x00;
if (asyc->dither.mode == DITHERING_MODE_AUTO) {
- if (asyh->base.depth > connector->display_info.bpc * 3)
+ if (asyh->base.depth > asyh->or.bpc * 3)
mode = DITHERING_MODE_DYNAMIC2X2;
} else {
mode = asyc->dither.mode;
}
if (asyc->dither.depth == DITHERING_DEPTH_AUTO) {
- if (connector->display_info.bpc >= 8)
+ if (asyh->or.bpc >= 8)
mode |= DITHERING_DEPTH_8BPC;
} else {
mode |= asyc->dither.depth;
@@ -214,6 +213,7 @@ nv50_head_atomic_check_lut(struct nv50_head *head,
{
struct nv50_disp *disp = nv50_disp(head->base.base.dev);
struct drm_property_blob *olut = asyh->state.gamma_lut;
+ int size;
/* Determine whether core output LUT should be enabled. */
if (olut) {
@@ -230,14 +230,23 @@ nv50_head_atomic_check_lut(struct nv50_head *head,
}
}
- if (!olut && !head->func->olut_identity) {
- asyh->olut.handle = 0;
- return 0;
+ if (!olut) {
+ if (!head->func->olut_identity) {
+ asyh->olut.handle = 0;
+ return 0;
+ }
+ size = 0;
+ } else {
+ size = drm_color_lut_size(olut);
}
+ if (!head->func->olut(head, asyh, size)) {
+ DRM_DEBUG_KMS("Invalid olut\n");
+ return -EINVAL;
+ }
asyh->olut.handle = disp->core->chan.vram.handle;
asyh->olut.buffer = !asyh->olut.buffer;
- head->func->olut(head, asyh);
+
return 0;
}
@@ -474,53 +483,56 @@ nv50_head_func = {
.atomic_destroy_state = nv50_head_atomic_destroy_state,
};
-int
+struct nv50_head *
nv50_head_create(struct drm_device *dev, int index)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nv50_disp *disp = nv50_disp(dev);
struct nv50_head *head;
- struct nv50_wndw *curs, *wndw;
+ struct nv50_wndw *base, *ovly, *curs;
struct drm_crtc *crtc;
int ret;
head = kzalloc(sizeof(*head), GFP_KERNEL);
if (!head)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
head->func = disp->core->func->head;
head->base.index = index;
if (disp->disp->object.oclass < GV100_DISP) {
- ret = nv50_ovly_new(drm, head->base.index, &wndw);
- ret = nv50_base_new(drm, head->base.index, &wndw);
+ ret = nv50_base_new(drm, head->base.index, &base);
+ ret = nv50_ovly_new(drm, head->base.index, &ovly);
} else {
- ret = nv50_wndw_new(drm, DRM_PLANE_TYPE_OVERLAY,
- head->base.index * 2 + 1, &wndw);
ret = nv50_wndw_new(drm, DRM_PLANE_TYPE_PRIMARY,
- head->base.index * 2 + 0, &wndw);
+ head->base.index * 2 + 0, &base);
+ ret = nv50_wndw_new(drm, DRM_PLANE_TYPE_OVERLAY,
+ head->base.index * 2 + 1, &ovly);
}
if (ret == 0)
ret = nv50_curs_new(drm, head->base.index, &curs);
if (ret) {
kfree(head);
- return ret;
+ return ERR_PTR(ret);
}
crtc = &head->base.base;
- drm_crtc_init_with_planes(dev, crtc, &wndw->plane, &curs->plane,
+ drm_crtc_init_with_planes(dev, crtc, &base->plane, &curs->plane,
&nv50_head_func, "head-%d", head->base.index);
drm_crtc_helper_add(crtc, &nv50_head_help);
+ /* Keep the legacy gamma size at 256 to avoid compatibility issues */
drm_mode_crtc_set_gamma_size(crtc, 256);
+ drm_crtc_enable_color_mgmt(crtc, base->func->ilut_size,
+ disp->disp->object.oclass >= GF110_DISP,
+ head->func->olut_size);
if (head->func->olut_set) {
ret = nv50_lut_init(disp, &drm->client.mmu, &head->olut);
- if (ret)
- goto out;
+ if (ret) {
+ nv50_head_destroy(crtc);
+ return ERR_PTR(ret);
+ }
}
-out:
- if (ret)
- nv50_head_destroy(crtc);
- return ret;
+ return head;
}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.h b/drivers/gpu/drm/nouveau/dispnv50/head.h
index d1c002f534d4..c32b27cdaefc 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.h
@@ -11,17 +11,19 @@ struct nv50_head {
const struct nv50_head_func *func;
struct nouveau_crtc base;
struct nv50_lut olut;
+ struct nv50_msto *msto;
};
-int nv50_head_create(struct drm_device *, int index);
+struct nv50_head *nv50_head_create(struct drm_device *, int index);
void nv50_head_flush_set(struct nv50_head *, struct nv50_head_atom *);
void nv50_head_flush_clr(struct nv50_head *, struct nv50_head_atom *, bool y);
struct nv50_head_func {
void (*view)(struct nv50_head *, struct nv50_head_atom *);
void (*mode)(struct nv50_head *, struct nv50_head_atom *);
- void (*olut)(struct nv50_head *, struct nv50_head_atom *);
+ bool (*olut)(struct nv50_head *, struct nv50_head_atom *, int);
bool olut_identity;
+ int olut_size;
void (*olut_set)(struct nv50_head *, struct nv50_head_atom *);
void (*olut_clr)(struct nv50_head *);
void (*core_calc)(struct nv50_head *, struct nv50_head_atom *);
@@ -43,7 +45,7 @@ struct nv50_head_func {
extern const struct nv50_head_func head507d;
void head507d_view(struct nv50_head *, struct nv50_head_atom *);
void head507d_mode(struct nv50_head *, struct nv50_head_atom *);
-void head507d_olut(struct nv50_head *, struct nv50_head_atom *);
+bool head507d_olut(struct nv50_head *, struct nv50_head_atom *, int);
void head507d_core_calc(struct nv50_head *, struct nv50_head_atom *);
void head507d_core_clr(struct nv50_head *);
int head507d_curs_layout(struct nv50_head *, struct nv50_wndw_atom *,
@@ -60,7 +62,7 @@ extern const struct nv50_head_func head827d;
extern const struct nv50_head_func head907d;
void head907d_view(struct nv50_head *, struct nv50_head_atom *);
void head907d_mode(struct nv50_head *, struct nv50_head_atom *);
-void head907d_olut(struct nv50_head *, struct nv50_head_atom *);
+bool head907d_olut(struct nv50_head *, struct nv50_head_atom *, int);
void head907d_olut_set(struct nv50_head *, struct nv50_head_atom *);
void head907d_olut_clr(struct nv50_head *);
void head907d_core_set(struct nv50_head *, struct nv50_head_atom *);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head507d.c b/drivers/gpu/drm/nouveau/dispnv50/head507d.c
index 7561be5ca707..66ccf36b56a2 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head507d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head507d.c
@@ -271,15 +271,19 @@ head507d_olut_load(struct drm_color_lut *in, int size, void __iomem *mem)
writew(readw(mem - 4), mem + 4);
}
-void
-head507d_olut(struct nv50_head *head, struct nv50_head_atom *asyh)
+bool
+head507d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size)
{
+ if (size != 256)
+ return false;
+
if (asyh->base.cpp == 1)
asyh->olut.mode = 0;
else
asyh->olut.mode = 1;
asyh->olut.load = head507d_olut_load;
+ return true;
}
void
@@ -328,6 +332,7 @@ head507d = {
.view = head507d_view,
.mode = head507d_mode,
.olut = head507d_olut,
+ .olut_size = 256,
.olut_set = head507d_olut_set,
.olut_clr = head507d_olut_clr,
.core_calc = head507d_core_calc,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head827d.c b/drivers/gpu/drm/nouveau/dispnv50/head827d.c
index af5e7bd5978b..11877119eea4 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head827d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head827d.c
@@ -108,6 +108,7 @@ head827d = {
.view = head507d_view,
.mode = head507d_mode,
.olut = head507d_olut,
+ .olut_size = 256,
.olut_set = head827d_olut_set,
.olut_clr = head827d_olut_clr,
.core_calc = head507d_core_calc,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head907d.c b/drivers/gpu/drm/nouveau/dispnv50/head907d.c
index c2d09dd97b1f..3002ec23d7a6 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head907d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head907d.c
@@ -230,11 +230,15 @@ head907d_olut_load(struct drm_color_lut *in, int size, void __iomem *mem)
writew(readw(mem - 4), mem + 4);
}
-void
-head907d_olut(struct nv50_head *head, struct nv50_head_atom *asyh)
+bool
+head907d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size)
{
- asyh->olut.mode = 7;
+ if (size != 256 && size != 1024)
+ return false;
+
+ asyh->olut.mode = size == 1024 ? 4 : 7;
asyh->olut.load = head907d_olut_load;
+ return true;
}
void
@@ -285,6 +289,7 @@ head907d = {
.view = head907d_view,
.mode = head907d_mode,
.olut = head907d_olut,
+ .olut_size = 1024,
.olut_set = head907d_olut_set,
.olut_clr = head907d_olut_clr,
.core_calc = head507d_core_calc,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head917d.c b/drivers/gpu/drm/nouveau/dispnv50/head917d.c
index 303df8459ca8..76958cedd51f 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head917d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head917d.c
@@ -83,6 +83,7 @@ head917d = {
.view = head907d_view,
.mode = head907d_mode,
.olut = head907d_olut,
+ .olut_size = 1024,
.olut_set = head907d_olut_set,
.olut_clr = head907d_olut_clr,
.core_calc = head507d_core_calc,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/headc37d.c b/drivers/gpu/drm/nouveau/dispnv50/headc37d.c
index ef6a99d95a9c..00011ce109a6 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/headc37d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/headc37d.c
@@ -148,14 +148,18 @@ headc37d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
}
}
-static void
-headc37d_olut(struct nv50_head *head, struct nv50_head_atom *asyh)
+static bool
+headc37d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size)
{
+ if (size != 256 && size != 1024)
+ return false;
+
asyh->olut.mode = 2;
- asyh->olut.size = 0;
+ asyh->olut.size = size == 1024 ? 2 : 0;
asyh->olut.range = 0;
asyh->olut.output_mode = 1;
asyh->olut.load = head907d_olut_load;
+ return true;
}
static void
@@ -201,6 +205,7 @@ headc37d = {
.view = headc37d_view,
.mode = headc37d_mode,
.olut = headc37d_olut,
+ .olut_size = 1024,
.olut_set = headc37d_olut_set,
.olut_clr = headc37d_olut_clr,
.curs_layout = head917d_curs_layout,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/headc57d.c b/drivers/gpu/drm/nouveau/dispnv50/headc57d.c
index 32a7f9e85fb0..938d910a1b1e 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/headc57d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/headc57d.c
@@ -151,17 +151,20 @@ headc57d_olut_load(struct drm_color_lut *in, int size, void __iomem *mem)
writew(readw(mem - 4), mem + 4);
}
-void
-headc57d_olut(struct nv50_head *head, struct nv50_head_atom *asyh)
+bool
+headc57d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size)
{
+ if (size != 0 && size != 256 && size != 1024)
+ return false;
+
asyh->olut.mode = 2; /* DIRECT10 */
asyh->olut.size = 4 /* VSS header. */ + 1024 + 1 /* Entries. */;
asyh->olut.output_mode = 1; /* INTERPOLATE_ENABLE. */
- if (asyh->state.gamma_lut &&
- asyh->state.gamma_lut->length / sizeof(struct drm_color_lut) == 256)
+ if (size == 256)
asyh->olut.load = headc57d_olut_load_8;
else
asyh->olut.load = headc57d_olut_load;
+ return true;
}
static void
@@ -194,6 +197,7 @@ headc57d = {
.mode = headc57d_mode,
.olut = headc57d_olut,
.olut_identity = true,
+ .olut_size = 1024,
.olut_set = headc57d_olut_set,
.olut_clr = headc57d_olut_clr,
.curs_layout = head917d_curs_layout,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/lut.c b/drivers/gpu/drm/nouveau/dispnv50/lut.c
index 994def4fd51a..4e95ca5604ab 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/lut.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/lut.c
@@ -49,7 +49,7 @@ nv50_lut_load(struct nv50_lut *lut, int buffer, struct drm_property_blob *blob,
kvfree(in);
}
} else {
- load(in, blob->length / sizeof(*in), mem);
+ load(in, drm_color_lut_size(blob), mem);
}
return addr;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/ovly507e.c b/drivers/gpu/drm/nouveau/dispnv50/ovly507e.c
index cc417664f823..8ccd96113bad 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/ovly507e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/ovly507e.c
@@ -23,6 +23,7 @@
#include "atom.h"
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
#include <nvif/cl507e.h>
@@ -160,9 +161,7 @@ ovly507e_format[] = {
DRM_FORMAT_YUYV,
DRM_FORMAT_UYVY,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_ARGB8888,
DRM_FORMAT_XRGB1555,
- DRM_FORMAT_ARGB1555,
0
};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/ovly827e.c b/drivers/gpu/drm/nouveau/dispnv50/ovly827e.c
index aaa9fe5a4fc8..2e68fc736fe1 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/ovly827e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/ovly827e.c
@@ -90,11 +90,8 @@ ovly827e_format[] = {
DRM_FORMAT_YUYV,
DRM_FORMAT_UYVY,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_ARGB8888,
DRM_FORMAT_XRGB1555,
- DRM_FORMAT_ARGB1555,
DRM_FORMAT_XBGR2101010,
- DRM_FORMAT_ABGR2101010,
0
};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/ovly907e.c b/drivers/gpu/drm/nouveau/dispnv50/ovly907e.c
index a3ce53046015..9efe5e9d5ce4 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/ovly907e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/ovly907e.c
@@ -61,10 +61,21 @@ ovly907e = {
.update = ovly507e_update,
};
+static const u32
+ovly907e_format[] = {
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_XBGR16161616F,
+ 0
+};
+
int
ovly907e_new(struct nouveau_drm *drm, int head, s32 oclass,
struct nv50_wndw **pwndw)
{
- return ovly507e_new_(&ovly907e, ovly827e_format, drm, head, oclass,
+ return ovly507e_new_(&ovly907e, ovly907e_format, drm, head, oclass,
0x00000004 << (head * 4), pwndw);
}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/ovly917e.c b/drivers/gpu/drm/nouveau/dispnv50/ovly917e.c
index 505fa7e78523..e24d6fd23450 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/ovly917e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/ovly917e.c
@@ -26,13 +26,10 @@ ovly917e_format[] = {
DRM_FORMAT_YUYV,
DRM_FORMAT_UYVY,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_ARGB8888,
DRM_FORMAT_XRGB1555,
- DRM_FORMAT_ARGB1555,
DRM_FORMAT_XBGR2101010,
- DRM_FORMAT_ABGR2101010,
DRM_FORMAT_XRGB2101010,
- DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_XBGR16161616F,
0
};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index 283ff690350e..890315291b01 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -26,6 +26,8 @@
#include <nvif/cl0002.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fourcc.h>
+
#include "nouveau_bo.h"
static void
@@ -118,6 +120,7 @@ nv50_wndw_flush_clr(struct nv50_wndw *wndw, u32 *interlock, bool flush,
if (clr.sema ) wndw->func-> sema_clr(wndw);
if (clr.ntfy ) wndw->func-> ntfy_clr(wndw);
if (clr.xlut ) wndw->func-> xlut_clr(wndw);
+ if (clr.csc ) wndw->func-> csc_clr(wndw);
if (clr.image) wndw->func->image_clr(wndw);
interlock[wndw->interlock.type] |= wndw->interlock.data;
@@ -145,7 +148,9 @@ nv50_wndw_flush_set(struct nv50_wndw *wndw, u32 *interlock,
wndw->func->xlut_set(wndw, asyw);
}
+ if (asyw->set.csc ) wndw->func->csc_set (wndw, asyw);
if (asyw->set.scale) wndw->func->scale_set(wndw, asyw);
+ if (asyw->set.blend) wndw->func->blend_set(wndw, asyw);
if (asyw->set.point) {
if (asyw->set.point = false, asyw->set.mask)
interlock[wndw->interlock.type] |= wndw->interlock.data;
@@ -202,18 +207,20 @@ static int
nv50_wndw_atomic_check_acquire_rgb(struct nv50_wndw_atom *asyw)
{
switch (asyw->state.fb->format->format) {
- case DRM_FORMAT_C8 : asyw->image.format = 0x1e; break;
- case DRM_FORMAT_XRGB8888 :
- case DRM_FORMAT_ARGB8888 : asyw->image.format = 0xcf; break;
- case DRM_FORMAT_RGB565 : asyw->image.format = 0xe8; break;
- case DRM_FORMAT_XRGB1555 :
- case DRM_FORMAT_ARGB1555 : asyw->image.format = 0xe9; break;
- case DRM_FORMAT_XBGR2101010:
- case DRM_FORMAT_ABGR2101010: asyw->image.format = 0xd1; break;
- case DRM_FORMAT_XBGR8888 :
- case DRM_FORMAT_ABGR8888 : asyw->image.format = 0xd5; break;
- case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_ARGB2101010: asyw->image.format = 0xdf; break;
+ case DRM_FORMAT_C8 : asyw->image.format = 0x1e; break;
+ case DRM_FORMAT_XRGB8888 :
+ case DRM_FORMAT_ARGB8888 : asyw->image.format = 0xcf; break;
+ case DRM_FORMAT_RGB565 : asyw->image.format = 0xe8; break;
+ case DRM_FORMAT_XRGB1555 :
+ case DRM_FORMAT_ARGB1555 : asyw->image.format = 0xe9; break;
+ case DRM_FORMAT_XBGR2101010 :
+ case DRM_FORMAT_ABGR2101010 : asyw->image.format = 0xd1; break;
+ case DRM_FORMAT_XBGR8888 :
+ case DRM_FORMAT_ABGR8888 : asyw->image.format = 0xd5; break;
+ case DRM_FORMAT_XRGB2101010 :
+ case DRM_FORMAT_ARGB2101010 : asyw->image.format = 0xdf; break;
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_ABGR16161616F: asyw->image.format = 0xca; break;
default:
return -EINVAL;
}
@@ -260,7 +267,7 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw, bool modeset,
asyw->image.pitch[0] = fb->base.pitches[0];
}
- if (!(asyh->state.pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC))
+ if (!asyh->state.async_flip)
asyw->image.interval = 1;
else
asyw->image.interval = 0;
@@ -279,6 +286,28 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw, bool modeset,
asyw->set.scale = true;
}
+ if (wndw->func->blend_set) {
+ asyw->blend.depth = 255 - asyw->state.normalized_zpos;
+ asyw->blend.k1 = asyw->state.alpha >> 8;
+ switch (asyw->state.pixel_blend_mode) {
+ case DRM_MODE_BLEND_PREMULTI:
+ asyw->blend.src_color = 2; /* K1 */
+ asyw->blend.dst_color = 7; /* NEG_K1_TIMES_SRC */
+ break;
+ case DRM_MODE_BLEND_COVERAGE:
+ asyw->blend.src_color = 5; /* K1_TIMES_SRC */
+ asyw->blend.dst_color = 7; /* NEG_K1_TIMES_SRC */
+ break;
+ case DRM_MODE_BLEND_PIXEL_NONE:
+ default:
+ asyw->blend.src_color = 2; /* K1 */
+ asyw->blend.dst_color = 4; /* NEG_K1 */
+ break;
+ }
+ if (memcmp(&armw->blend, &asyw->blend, sizeof(asyw->blend)))
+ asyw->set.blend = true;
+ }
+
if (wndw->immd) {
asyw->point.x = asyw->state.crtc_x;
asyw->point.y = asyw->state.crtc_y;
@@ -289,7 +318,7 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw, bool modeset,
return wndw->func->acquire(wndw, asyw, asyh);
}
-static void
+static int
nv50_wndw_atomic_check_lut(struct nv50_wndw *wndw,
struct nv50_wndw_atom *armw,
struct nv50_wndw_atom *asyw,
@@ -311,7 +340,7 @@ nv50_wndw_atomic_check_lut(struct nv50_wndw *wndw,
*/
if (!(ilut = asyh->state.gamma_lut)) {
asyw->visible = false;
- return;
+ return 0;
}
if (wndw->func->ilut)
@@ -320,7 +349,9 @@ nv50_wndw_atomic_check_lut(struct nv50_wndw *wndw,
asyh->wndw.olut &= ~BIT(wndw->id);
}
- if (!ilut && wndw->func->ilut_identity) {
+ if (!ilut && wndw->func->ilut_identity &&
+ asyw->state.fb->format->format != DRM_FORMAT_XBGR16161616F &&
+ asyw->state.fb->format->format != DRM_FORMAT_ABGR16161616F) {
static struct drm_property_blob dummy = {};
ilut = &dummy;
}
@@ -328,10 +359,15 @@ nv50_wndw_atomic_check_lut(struct nv50_wndw *wndw,
/* Recalculate LUT state. */
memset(&asyw->xlut, 0x00, sizeof(asyw->xlut));
if ((asyw->ilut = wndw->func->ilut ? ilut : NULL)) {
- wndw->func->ilut(wndw, asyw);
+ if (!wndw->func->ilut(wndw, asyw, drm_color_lut_size(ilut))) {
+ DRM_DEBUG_KMS("Invalid ilut\n");
+ return -EINVAL;
+ }
asyw->xlut.handle = wndw->wndw.vram.handle;
asyw->xlut.i.buffer = !asyw->xlut.i.buffer;
asyw->set.xlut = true;
+ } else {
+ asyw->clr.xlut = armw->xlut.handle != 0;
}
/* Handle setting base SET_OUTPUT_LUT_LO_ENABLE_USE_CORE_LUT. */
@@ -339,8 +375,19 @@ nv50_wndw_atomic_check_lut(struct nv50_wndw *wndw,
(!armw->visible || (armw->xlut.handle && !asyw->xlut.handle)))
asyw->set.xlut = true;
+ if (wndw->func->csc && asyh->state.ctm) {
+ const struct drm_color_ctm *ctm = asyh->state.ctm->data;
+ wndw->func->csc(wndw, asyw, ctm);
+ asyw->csc.valid = true;
+ asyw->set.csc = true;
+ } else {
+ asyw->csc.valid = false;
+ asyw->clr.csc = armw->csc.valid;
+ }
+
/* Can't do an immediate flip while changing the LUT. */
- asyh->state.pageflip_flags &= ~DRM_MODE_PAGE_FLIP_ASYNC;
+ asyh->state.async_flip = false;
+ return 0;
}
static int
@@ -381,8 +428,11 @@ nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state)
(!armw->visible ||
asyh->state.color_mgmt_changed ||
asyw->state.fb->format->format !=
- armw->state.fb->format->format))
- nv50_wndw_atomic_check_lut(wndw, armw, asyw, asyh);
+ armw->state.fb->format->format)) {
+ ret = nv50_wndw_atomic_check_lut(wndw, armw, asyw, asyh);
+ if (ret)
+ return ret;
+ }
/* Calculate new window state. */
if (asyw->visible) {
@@ -408,6 +458,7 @@ nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state)
asyw->clr.ntfy = armw->ntfy.handle != 0;
asyw->clr.sema = armw->sema.handle != 0;
asyw->clr.xlut = armw->xlut.handle != 0;
+ asyw->clr.csc = armw->csc.valid;
if (wndw->func->image_clr)
asyw->clr.image = armw->image.handle[0] != 0;
}
@@ -457,7 +508,7 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
asyw->image.handle[0] = ctxdma->object.handle;
}
- asyw->state.fence = reservation_object_get_excl_rcu(fb->nvbo->bo.resv);
+ asyw->state.fence = dma_resv_get_excl_rcu(fb->nvbo->bo.base.resv);
asyw->image.offset[0] = fb->nvbo->bo.offset;
if (wndw->func->prepare) {
@@ -499,6 +550,7 @@ nv50_wndw_atomic_duplicate_state(struct drm_plane *plane)
asyw->ntfy = armw->ntfy;
asyw->ilut = NULL;
asyw->xlut = armw->xlut;
+ asyw->csc = armw->csc;
asyw->image = armw->image;
asyw->point = armw->point;
asyw->clr.mask = 0;
@@ -506,6 +558,13 @@ nv50_wndw_atomic_duplicate_state(struct drm_plane *plane)
return &asyw->state;
}
+static int
+nv50_wndw_zpos_default(struct drm_plane *plane)
+{
+ return (plane->type == DRM_PLANE_TYPE_PRIMARY) ? 0 :
+ (plane->type == DRM_PLANE_TYPE_OVERLAY) ? 1 : 255;
+}
+
static void
nv50_wndw_reset(struct drm_plane *plane)
{
@@ -516,9 +575,10 @@ nv50_wndw_reset(struct drm_plane *plane)
if (plane->state)
plane->funcs->atomic_destroy_state(plane, plane->state);
- plane->state = &asyw->state;
- plane->state->plane = plane;
- plane->state->rotation = DRM_MODE_ROTATE_0;
+
+ __drm_atomic_helper_plane_reset(plane, &asyw->state);
+ plane->state->zpos = nv50_wndw_zpos_default(plane);
+ plane->state->normalized_zpos = nv50_wndw_zpos_default(plane);
}
static void
@@ -613,6 +673,30 @@ nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev,
}
wndw->notify.func = nv50_wndw_notify;
+
+ if (wndw->func->blend_set) {
+ ret = drm_plane_create_zpos_property(&wndw->plane,
+ nv50_wndw_zpos_default(&wndw->plane), 0, 254);
+ if (ret)
+ return ret;
+
+ ret = drm_plane_create_alpha_property(&wndw->plane);
+ if (ret)
+ return ret;
+
+ ret = drm_plane_create_blend_mode_property(&wndw->plane,
+ BIT(DRM_MODE_BLEND_PIXEL_NONE) |
+ BIT(DRM_MODE_BLEND_PREMULTI) |
+ BIT(DRM_MODE_BLEND_COVERAGE));
+ if (ret)
+ return ret;
+ } else {
+ ret = drm_plane_create_zpos_immutable_property(&wndw->plane,
+ nv50_wndw_zpos_default(&wndw->plane));
+ if (ret)
+ return ret;
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.h b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
index 03f3d8dc235a..caf397475918 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
@@ -64,14 +64,20 @@ struct nv50_wndw_func {
void (*ntfy_clr)(struct nv50_wndw *);
int (*ntfy_wait_begun)(struct nouveau_bo *, u32 offset,
struct nvif_device *);
- void (*ilut)(struct nv50_wndw *, struct nv50_wndw_atom *);
+ bool (*ilut)(struct nv50_wndw *, struct nv50_wndw_atom *, int);
+ void (*csc)(struct nv50_wndw *, struct nv50_wndw_atom *,
+ const struct drm_color_ctm *);
+ void (*csc_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
+ void (*csc_clr)(struct nv50_wndw *);
bool ilut_identity;
+ int ilut_size;
bool olut_core;
void (*xlut_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
void (*xlut_clr)(struct nv50_wndw *);
void (*image_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
void (*image_clr)(struct nv50_wndw *);
void (*scale_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
+ void (*blend_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
void (*update)(struct nv50_wndw *, u32 *interlock);
};
@@ -81,6 +87,9 @@ extern const struct drm_plane_funcs nv50_wndw;
void base507c_ntfy_reset(struct nouveau_bo *, u32);
int base507c_ntfy_wait_begun(struct nouveau_bo *, u32, struct nvif_device *);
+void base907c_csc(struct nv50_wndw *, struct nv50_wndw_atom *,
+ const struct drm_color_ctm *);
+
struct nv50_wimm_func {
void (*point)(struct nv50_wndw *, struct nv50_wndw_atom *);
@@ -102,8 +111,8 @@ void wndwc37e_sema_set(struct nv50_wndw *, struct nv50_wndw_atom *);
void wndwc37e_sema_clr(struct nv50_wndw *);
void wndwc37e_ntfy_set(struct nv50_wndw *, struct nv50_wndw_atom *);
void wndwc37e_ntfy_clr(struct nv50_wndw *);
-void wndwc37e_image_set(struct nv50_wndw *, struct nv50_wndw_atom *);
void wndwc37e_image_clr(struct nv50_wndw *);
+void wndwc37e_blend_set(struct nv50_wndw *, struct nv50_wndw_atom *);
void wndwc37e_update(struct nv50_wndw *, u32 *);
int wndwc57e_new(struct nouveau_drm *, enum drm_plane_type, int, s32,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
index e52a85c83f7a..b92dc3461bbd 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
@@ -29,6 +29,23 @@
#include <nvif/clc37e.h>
static void
+wndwc37e_csc_clr(struct nv50_wndw *wndw)
+{
+}
+
+static void
+wndwc37e_csc_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ u32 *push, i;
+ if ((push = evo_wait(&wndw->wndw, 13))) {
+ evo_mthd(push, 0x02bc, 12);
+ for (i = 0; i < 12; i++)
+ evo_data(push, asyw->csc.matrix[i]);
+ evo_kick(push, &wndw->wndw);
+ }
+}
+
+static void
wndwc37e_ilut_clr(struct nv50_wndw *wndw)
{
u32 *push;
@@ -54,14 +71,38 @@ wndwc37e_ilut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
}
}
-static void
-wndwc37e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+static bool
+wndwc37e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size)
{
+ if (size != 256 && size != 1024)
+ return false;
+
asyw->xlut.i.mode = 2;
- asyw->xlut.i.size = 0;
+ asyw->xlut.i.size = size == 1024 ? 2 : 0;
asyw->xlut.i.range = 0;
asyw->xlut.i.output_mode = 1;
asyw->xlut.i.load = head907d_olut_load;
+ return true;
+}
+
+void
+wndwc37e_blend_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ u32 *push;
+ if ((push = evo_wait(&wndw->wndw, 8))) {
+ evo_mthd(push, 0x02ec, 7);
+ evo_data(push, asyw->blend.depth << 4);
+ evo_data(push, asyw->blend.k1);
+ evo_data(push, asyw->blend.dst_color << 12 |
+ asyw->blend.dst_color << 8 |
+ asyw->blend.src_color << 4 |
+ asyw->blend.src_color);
+ evo_data(push, 0xffff0000);
+ evo_data(push, 0xffff0000);
+ evo_data(push, 0xffff0000);
+ evo_data(push, 0xffff0000);
+ evo_kick(push, &wndw->wndw);
+ }
}
void
@@ -77,12 +118,12 @@ wndwc37e_image_clr(struct nv50_wndw *wndw)
}
}
-void
+static void
wndwc37e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
u32 *push;
- if (!(push = evo_wait(&wndw->wndw, 25)))
+ if (!(push = evo_wait(&wndw->wndw, 17)))
return;
evo_mthd(push, 0x0308, 1);
@@ -90,7 +131,9 @@ wndwc37e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
evo_mthd(push, 0x0224, 4);
evo_data(push, asyw->image.h << 16 | asyw->image.w);
evo_data(push, asyw->image.layout << 4 | asyw->image.blockh);
- evo_data(push, asyw->image.colorspace << 8 | asyw->image.format);
+ evo_data(push, asyw->csc.valid << 17 |
+ asyw->image.colorspace << 8 |
+ asyw->image.format);
evo_data(push, asyw->image.blocks[0] | (asyw->image.pitch[0] >> 6));
evo_mthd(push, 0x0240, 1);
evo_data(push, asyw->image.handle[0]);
@@ -105,16 +148,6 @@ wndwc37e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
evo_mthd(push, 0x02a4, 1);
evo_data(push, asyw->state.crtc_h << 16 |
asyw->state.crtc_w);
-
- /*XXX: Composition-related stuff. Need to implement properly. */
- evo_mthd(push, 0x02ec, 1);
- evo_data(push, (2 - (wndw->id & 1)) << 4);
- evo_mthd(push, 0x02f4, 5);
- evo_data(push, 0x00000011);
- evo_data(push, 0xffff0000);
- evo_data(push, 0xffff0000);
- evo_data(push, 0xffff0000);
- evo_data(push, 0xffff0000);
evo_kick(push, &wndw->wndw);
}
@@ -216,6 +249,8 @@ wndwc37e_format[] = {
DRM_FORMAT_ABGR8888,
DRM_FORMAT_XRGB2101010,
DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_XBGR16161616F,
+ DRM_FORMAT_ABGR16161616F,
0
};
@@ -230,10 +265,15 @@ wndwc37e = {
.ntfy_reset = corec37d_ntfy_init,
.ntfy_wait_begun = base507c_ntfy_wait_begun,
.ilut = wndwc37e_ilut,
+ .ilut_size = 1024,
.xlut_set = wndwc37e_ilut_set,
.xlut_clr = wndwc37e_ilut_clr,
+ .csc = base907c_csc,
+ .csc_set = wndwc37e_csc_set,
+ .csc_clr = wndwc37e_csc_clr,
.image_set = wndwc37e_image_set,
.image_clr = wndwc37e_image_clr,
+ .blend_set = wndwc37e_blend_set,
.update = wndwc37e_update,
};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
index ba89f1a5fcfa..35c9c52fab26 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
@@ -29,6 +29,72 @@
#include <nvif/clc37e.h>
static void
+wndwc57e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ u32 *push;
+
+ if (!(push = evo_wait(&wndw->wndw, 17)))
+ return;
+
+ evo_mthd(push, 0x0308, 1);
+ evo_data(push, asyw->image.mode << 4 | asyw->image.interval);
+ evo_mthd(push, 0x0224, 4);
+ evo_data(push, asyw->image.h << 16 | asyw->image.w);
+ evo_data(push, asyw->image.layout << 4 | asyw->image.blockh);
+ evo_data(push, asyw->image.colorspace << 8 |
+ asyw->image.format);
+ evo_data(push, asyw->image.blocks[0] | (asyw->image.pitch[0] >> 6));
+ evo_mthd(push, 0x0240, 1);
+ evo_data(push, asyw->image.handle[0]);
+ evo_mthd(push, 0x0260, 1);
+ evo_data(push, asyw->image.offset[0] >> 8);
+ evo_mthd(push, 0x0290, 1);
+ evo_data(push, (asyw->state.src_y >> 16) << 16 |
+ (asyw->state.src_x >> 16));
+ evo_mthd(push, 0x0298, 1);
+ evo_data(push, (asyw->state.src_h >> 16) << 16 |
+ (asyw->state.src_w >> 16));
+ evo_mthd(push, 0x02a4, 1);
+ evo_data(push, asyw->state.crtc_h << 16 |
+ asyw->state.crtc_w);
+ evo_kick(push, &wndw->wndw);
+}
+
+static void
+wndwc57e_csc_clr(struct nv50_wndw *wndw)
+{
+ u32 *push;
+ if ((push = evo_wait(&wndw->wndw, 13))) {
+ evo_mthd(push, 0x0400, 12);
+ evo_data(push, 0x00010000);
+ evo_data(push, 0x00000000);
+ evo_data(push, 0x00000000);
+ evo_data(push, 0x00000000);
+ evo_data(push, 0x00000000);
+ evo_data(push, 0x00010000);
+ evo_data(push, 0x00000000);
+ evo_data(push, 0x00000000);
+ evo_data(push, 0x00000000);
+ evo_data(push, 0x00000000);
+ evo_data(push, 0x00010000);
+ evo_data(push, 0x00000000);
+ evo_kick(push, &wndw->wndw);
+ }
+}
+
+static void
+wndwc57e_csc_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ u32 *push, i;
+ if ((push = evo_wait(&wndw->wndw, 13))) {
+ evo_mthd(push, 0x0400, 12);
+ for (i = 0; i < 12; i++)
+ evo_data(push, asyw->csc.matrix[i]);
+ evo_kick(push, &wndw->wndw);
+ }
+}
+
+static void
wndwc57e_ilut_clr(struct nv50_wndw *wndw)
{
u32 *push;
@@ -90,19 +156,21 @@ wndwc57e_ilut_load(struct drm_color_lut *in, int size, void __iomem *mem)
writew(readw(mem - 4), mem + 4);
}
-static void
-wndwc57e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+static bool
+wndwc57e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size)
{
- u16 size = asyw->ilut->length / sizeof(struct drm_color_lut);
+ if (size = size ? size : 1024, size != 256 && size != 1024)
+ return false;
+
if (size == 256) {
asyw->xlut.i.mode = 1; /* DIRECT8. */
} else {
asyw->xlut.i.mode = 2; /* DIRECT10. */
- size = 1024;
}
asyw->xlut.i.size = 4 /* VSS header. */ + size + 1 /* Entries. */;
asyw->xlut.i.output_mode = 0; /* INTERPOLATE_DISABLE. */
asyw->xlut.i.load = wndwc57e_ilut_load;
+ return true;
}
static const struct nv50_wndw_func
@@ -117,10 +185,15 @@ wndwc57e = {
.ntfy_wait_begun = base507c_ntfy_wait_begun,
.ilut = wndwc57e_ilut,
.ilut_identity = true,
+ .ilut_size = 1024,
.xlut_set = wndwc57e_ilut_set,
.xlut_clr = wndwc57e_ilut_clr,
- .image_set = wndwc37e_image_set,
+ .csc = base907c_csc,
+ .csc_set = wndwc57e_csc_set,
+ .csc_clr = wndwc57e_csc_clr,
+ .image_set = wndwc57e_image_set,
.image_clr = wndwc37e_image_clr,
+ .blend_set = wndwc37e_blend_set,
.update = wndwc37e_update,
};
diff --git a/drivers/gpu/drm/nouveau/include/nvfw/acr.h b/drivers/gpu/drm/nouveau/include/nvfw/acr.h
new file mode 100644
index 000000000000..e65d6a8db104
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvfw/acr.h
@@ -0,0 +1,152 @@
+#ifndef __NVFW_ACR_H__
+#define __NVFW_ACR_H__
+
+struct wpr_header {
+#define WPR_HEADER_V0_FALCON_ID_INVALID 0xffffffff
+ u32 falcon_id;
+ u32 lsb_offset;
+ u32 bootstrap_owner;
+ u32 lazy_bootstrap;
+#define WPR_HEADER_V0_STATUS_NONE 0
+#define WPR_HEADER_V0_STATUS_COPY 1
+#define WPR_HEADER_V0_STATUS_VALIDATION_CODE_FAILED 2
+#define WPR_HEADER_V0_STATUS_VALIDATION_DATA_FAILED 3
+#define WPR_HEADER_V0_STATUS_VALIDATION_DONE 4
+#define WPR_HEADER_V0_STATUS_VALIDATION_SKIPPED 5
+#define WPR_HEADER_V0_STATUS_BOOTSTRAP_READY 6
+ u32 status;
+};
+
+void wpr_header_dump(struct nvkm_subdev *, const struct wpr_header *);
+
+struct wpr_header_v1 {
+#define WPR_HEADER_V1_FALCON_ID_INVALID 0xffffffff
+ u32 falcon_id;
+ u32 lsb_offset;
+ u32 bootstrap_owner;
+ u32 lazy_bootstrap;
+ u32 bin_version;
+#define WPR_HEADER_V1_STATUS_NONE 0
+#define WPR_HEADER_V1_STATUS_COPY 1
+#define WPR_HEADER_V1_STATUS_VALIDATION_CODE_FAILED 2
+#define WPR_HEADER_V1_STATUS_VALIDATION_DATA_FAILED 3
+#define WPR_HEADER_V1_STATUS_VALIDATION_DONE 4
+#define WPR_HEADER_V1_STATUS_VALIDATION_SKIPPED 5
+#define WPR_HEADER_V1_STATUS_BOOTSTRAP_READY 6
+#define WPR_HEADER_V1_STATUS_REVOCATION_CHECK_FAILED 7
+ u32 status;
+};
+
+void wpr_header_v1_dump(struct nvkm_subdev *, const struct wpr_header_v1 *);
+
+struct lsf_signature {
+ u8 prd_keys[2][16];
+ u8 dbg_keys[2][16];
+ u32 b_prd_present;
+ u32 b_dbg_present;
+ u32 falcon_id;
+};
+
+struct lsf_signature_v1 {
+ u8 prd_keys[2][16];
+ u8 dbg_keys[2][16];
+ u32 b_prd_present;
+ u32 b_dbg_present;
+ u32 falcon_id;
+ u32 supports_versioning;
+ u32 version;
+ u32 depmap_count;
+ u8 depmap[11/*LSF_LSB_DEPMAP_SIZE*/ * 2 * 4];
+ u8 kdf[16];
+};
+
+struct lsb_header_tail {
+ u32 ucode_off;
+ u32 ucode_size;
+ u32 data_size;
+ u32 bl_code_size;
+ u32 bl_imem_off;
+ u32 bl_data_off;
+ u32 bl_data_size;
+ u32 app_code_off;
+ u32 app_code_size;
+ u32 app_data_off;
+ u32 app_data_size;
+ u32 flags;
+};
+
+struct lsb_header {
+ struct lsf_signature signature;
+ struct lsb_header_tail tail;
+};
+
+void lsb_header_dump(struct nvkm_subdev *, struct lsb_header *);
+
+struct lsb_header_v1 {
+ struct lsf_signature_v1 signature;
+ struct lsb_header_tail tail;
+};
+
+void lsb_header_v1_dump(struct nvkm_subdev *, struct lsb_header_v1 *);
+
+struct flcn_acr_desc {
+ union {
+ u8 reserved_dmem[0x200];
+ u32 signatures[4];
+ } ucode_reserved_space;
+ u32 wpr_region_id;
+ u32 wpr_offset;
+ u32 mmu_mem_range;
+ struct {
+ u32 no_regions;
+ struct {
+ u32 start_addr;
+ u32 end_addr;
+ u32 region_id;
+ u32 read_mask;
+ u32 write_mask;
+ u32 client_mask;
+ } region_props[2];
+ } regions;
+ u32 ucode_blob_size;
+ u64 ucode_blob_base __aligned(8);
+ struct {
+ u32 vpr_enabled;
+ u32 vpr_start;
+ u32 vpr_end;
+ u32 hdcp_policies;
+ } vpr_desc;
+};
+
+void flcn_acr_desc_dump(struct nvkm_subdev *, struct flcn_acr_desc *);
+
+struct flcn_acr_desc_v1 {
+ u8 reserved_dmem[0x200];
+ u32 signatures[4];
+ u32 wpr_region_id;
+ u32 wpr_offset;
+ u32 mmu_memory_range;
+ struct {
+ u32 no_regions;
+ struct {
+ u32 start_addr;
+ u32 end_addr;
+ u32 region_id;
+ u32 read_mask;
+ u32 write_mask;
+ u32 client_mask;
+ u32 shadow_mem_start_addr;
+ } region_props[2];
+ } regions;
+ u32 ucode_blob_size;
+ u64 ucode_blob_base __aligned(8);
+ struct {
+ u32 vpr_enabled;
+ u32 vpr_start;
+ u32 vpr_end;
+ u32 hdcp_policies;
+ } vpr_desc;
+};
+
+void flcn_acr_desc_v1_dump(struct nvkm_subdev *, struct flcn_acr_desc_v1 *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvfw/flcn.h b/drivers/gpu/drm/nouveau/include/nvfw/flcn.h
new file mode 100644
index 000000000000..e090f347d220
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvfw/flcn.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVFW_FLCN_H__
+#define __NVFW_FLCN_H__
+#include <core/os.h>
+struct nvkm_subdev;
+
+struct loader_config {
+ u32 dma_idx;
+ u32 code_dma_base;
+ u32 code_size_total;
+ u32 code_size_to_load;
+ u32 code_entry_point;
+ u32 data_dma_base;
+ u32 data_size;
+ u32 overlay_dma_base;
+ u32 argc;
+ u32 argv;
+ u32 code_dma_base1;
+ u32 data_dma_base1;
+ u32 overlay_dma_base1;
+};
+
+void
+loader_config_dump(struct nvkm_subdev *, const struct loader_config *);
+
+struct loader_config_v1 {
+ u32 reserved;
+ u32 dma_idx;
+ u64 code_dma_base;
+ u32 code_size_total;
+ u32 code_size_to_load;
+ u32 code_entry_point;
+ u64 data_dma_base;
+ u32 data_size;
+ u64 overlay_dma_base;
+ u32 argc;
+ u32 argv;
+} __packed;
+
+void
+loader_config_v1_dump(struct nvkm_subdev *, const struct loader_config_v1 *);
+
+struct flcn_bl_dmem_desc {
+ u32 reserved[4];
+ u32 signature[4];
+ u32 ctx_dma;
+ u32 code_dma_base;
+ u32 non_sec_code_off;
+ u32 non_sec_code_size;
+ u32 sec_code_off;
+ u32 sec_code_size;
+ u32 code_entry_point;
+ u32 data_dma_base;
+ u32 data_size;
+ u32 code_dma_base1;
+ u32 data_dma_base1;
+};
+
+void
+flcn_bl_dmem_desc_dump(struct nvkm_subdev *, const struct flcn_bl_dmem_desc *);
+
+struct flcn_bl_dmem_desc_v1 {
+ u32 reserved[4];
+ u32 signature[4];
+ u32 ctx_dma;
+ u64 code_dma_base;
+ u32 non_sec_code_off;
+ u32 non_sec_code_size;
+ u32 sec_code_off;
+ u32 sec_code_size;
+ u32 code_entry_point;
+ u64 data_dma_base;
+ u32 data_size;
+} __packed;
+
+void flcn_bl_dmem_desc_v1_dump(struct nvkm_subdev *,
+ const struct flcn_bl_dmem_desc_v1 *);
+
+struct flcn_bl_dmem_desc_v2 {
+ u32 reserved[4];
+ u32 signature[4];
+ u32 ctx_dma;
+ u64 code_dma_base;
+ u32 non_sec_code_off;
+ u32 non_sec_code_size;
+ u32 sec_code_off;
+ u32 sec_code_size;
+ u32 code_entry_point;
+ u64 data_dma_base;
+ u32 data_size;
+ u32 argc;
+ u32 argv;
+} __packed;
+
+void flcn_bl_dmem_desc_v2_dump(struct nvkm_subdev *,
+ const struct flcn_bl_dmem_desc_v2 *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvfw/fw.h b/drivers/gpu/drm/nouveau/include/nvfw/fw.h
new file mode 100644
index 000000000000..a7cf1188c9d6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvfw/fw.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVFW_FW_H__
+#define __NVFW_FW_H__
+#include <core/os.h>
+struct nvkm_subdev;
+
+struct nvfw_bin_hdr {
+ u32 bin_magic;
+ u32 bin_ver;
+ u32 bin_size;
+ u32 header_offset;
+ u32 data_offset;
+ u32 data_size;
+};
+
+const struct nvfw_bin_hdr *nvfw_bin_hdr(struct nvkm_subdev *, const void *);
+
+struct nvfw_bl_desc {
+ u32 start_tag;
+ u32 dmem_load_off;
+ u32 code_off;
+ u32 code_size;
+ u32 data_off;
+ u32 data_size;
+};
+
+const struct nvfw_bl_desc *nvfw_bl_desc(struct nvkm_subdev *, const void *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvfw/hs.h b/drivers/gpu/drm/nouveau/include/nvfw/hs.h
new file mode 100644
index 000000000000..64d0d32200c2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvfw/hs.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVFW_HS_H__
+#define __NVFW_HS_H__
+#include <core/os.h>
+struct nvkm_subdev;
+
+struct nvfw_hs_header {
+ u32 sig_dbg_offset;
+ u32 sig_dbg_size;
+ u32 sig_prod_offset;
+ u32 sig_prod_size;
+ u32 patch_loc;
+ u32 patch_sig;
+ u32 hdr_offset;
+ u32 hdr_size;
+};
+
+const struct nvfw_hs_header *nvfw_hs_header(struct nvkm_subdev *, const void *);
+
+struct nvfw_hs_load_header {
+ u32 non_sec_code_off;
+ u32 non_sec_code_size;
+ u32 data_dma_base;
+ u32 data_size;
+ u32 num_apps;
+ u32 apps[0];
+};
+
+const struct nvfw_hs_load_header *
+nvfw_hs_load_header(struct nvkm_subdev *, const void *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvfw/ls.h b/drivers/gpu/drm/nouveau/include/nvfw/ls.h
new file mode 100644
index 000000000000..f63692a2a16c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvfw/ls.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVFW_LS_H__
+#define __NVFW_LS_H__
+#include <core/os.h>
+struct nvkm_subdev;
+
+struct nvfw_ls_desc_head {
+ u32 descriptor_size;
+ u32 image_size;
+ u32 tools_version;
+ u32 app_version;
+ char date[64];
+ u32 bootloader_start_offset;
+ u32 bootloader_size;
+ u32 bootloader_imem_offset;
+ u32 bootloader_entry_point;
+ u32 app_start_offset;
+ u32 app_size;
+ u32 app_imem_offset;
+ u32 app_imem_entry;
+ u32 app_dmem_offset;
+ u32 app_resident_code_offset;
+ u32 app_resident_code_size;
+ u32 app_resident_data_offset;
+ u32 app_resident_data_size;
+};
+
+struct nvfw_ls_desc {
+ struct nvfw_ls_desc_head head;
+ u32 nb_overlays;
+ struct {
+ u32 start;
+ u32 size;
+ } load_ovl[64];
+ u32 compressed;
+};
+
+const struct nvfw_ls_desc *nvfw_ls_desc(struct nvkm_subdev *, const void *);
+
+struct nvfw_ls_desc_v1 {
+ struct nvfw_ls_desc_head head;
+ u32 nb_imem_overlays;
+ u32 nb_dmem_overlays;
+ struct {
+ u32 start;
+ u32 size;
+ } load_ovl[64];
+ u32 compressed;
+};
+
+const struct nvfw_ls_desc_v1 *
+nvfw_ls_desc_v1(struct nvkm_subdev *, const void *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvfw/pmu.h b/drivers/gpu/drm/nouveau/include/nvfw/pmu.h
new file mode 100644
index 000000000000..452ed7d03827
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvfw/pmu.h
@@ -0,0 +1,98 @@
+#ifndef __NVFW_PMU_H__
+#define __NVFW_PMU_H__
+
+struct nv_pmu_args {
+ u32 reserved;
+ u32 freq_hz;
+ u32 trace_size;
+ u32 trace_dma_base;
+ u16 trace_dma_base1;
+ u8 trace_dma_offset;
+ u32 trace_dma_idx;
+ bool secure_mode;
+ bool raise_priv_sec;
+ struct {
+ u32 dma_base;
+ u16 dma_base1;
+ u8 dma_offset;
+ u16 fb_size;
+ u8 dma_idx;
+ } gc6_ctx;
+ u8 pad;
+};
+
+#define NV_PMU_UNIT_INIT 0x07
+#define NV_PMU_UNIT_ACR 0x0a
+
+struct nv_pmu_init_msg {
+ struct nv_falcon_msg hdr;
+#define NV_PMU_INIT_MSG_INIT 0x00
+ u8 msg_type;
+
+ u8 pad;
+ u16 os_debug_entry_point;
+
+ struct {
+ u16 size;
+ u16 offset;
+ u8 index;
+ u8 pad;
+ } queue_info[5];
+
+ u16 sw_managed_area_offset;
+ u16 sw_managed_area_size;
+};
+
+struct nv_pmu_acr_cmd {
+ struct nv_falcon_cmd hdr;
+#define NV_PMU_ACR_CMD_INIT_WPR_REGION 0x00
+#define NV_PMU_ACR_CMD_BOOTSTRAP_FALCON 0x01
+#define NV_PMU_ACR_CMD_BOOTSTRAP_MULTIPLE_FALCONS 0x03
+ u8 cmd_type;
+};
+
+struct nv_pmu_acr_msg {
+ struct nv_falcon_cmd hdr;
+ u8 msg_type;
+};
+
+struct nv_pmu_acr_init_wpr_region_cmd {
+ struct nv_pmu_acr_cmd cmd;
+ u32 region_id;
+ u32 wpr_offset;
+};
+
+struct nv_pmu_acr_init_wpr_region_msg {
+ struct nv_pmu_acr_msg msg;
+ u32 error_code;
+};
+
+struct nv_pmu_acr_bootstrap_falcon_cmd {
+ struct nv_pmu_acr_cmd cmd;
+#define NV_PMU_ACR_BOOTSTRAP_FALCON_FLAGS_RESET_YES 0x00000000
+#define NV_PMU_ACR_BOOTSTRAP_FALCON_FLAGS_RESET_NO 0x00000001
+ u32 flags;
+ u32 falcon_id;
+};
+
+struct nv_pmu_acr_bootstrap_falcon_msg {
+ struct nv_pmu_acr_msg msg;
+ u32 falcon_id;
+};
+
+struct nv_pmu_acr_bootstrap_multiple_falcons_cmd {
+ struct nv_pmu_acr_cmd cmd;
+#define NV_PMU_ACR_BOOTSTRAP_MULTIPLE_FALCONS_FLAGS_RESET_YES 0x00000000
+#define NV_PMU_ACR_BOOTSTRAP_MULTIPLE_FALCONS_FLAGS_RESET_NO 0x00000001
+ u32 flags;
+ u32 falcon_mask;
+ u32 use_va_mask;
+ u32 wpr_lo;
+ u32 wpr_hi;
+};
+
+struct nv_pmu_acr_bootstrap_multiple_falcons_msg {
+ struct nv_pmu_acr_msg msg;
+ u32 falcon_mask;
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvfw/sec2.h b/drivers/gpu/drm/nouveau/include/nvfw/sec2.h
new file mode 100644
index 000000000000..03496558b775
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvfw/sec2.h
@@ -0,0 +1,60 @@
+#ifndef __NVFW_SEC2_H__
+#define __NVFW_SEC2_H__
+
+struct nv_sec2_args {
+ u32 freq_hz;
+ u32 falc_trace_size;
+ u32 falc_trace_dma_base;
+ u32 falc_trace_dma_idx;
+ bool secure_mode;
+};
+
+#define NV_SEC2_UNIT_INIT 0x01
+#define NV_SEC2_UNIT_ACR 0x08
+
+struct nv_sec2_init_msg {
+ struct nv_falcon_msg hdr;
+#define NV_SEC2_INIT_MSG_INIT 0x00
+ u8 msg_type;
+
+ u8 num_queues;
+ u16 os_debug_entry_point;
+
+ struct {
+ u32 offset;
+ u16 size;
+ u8 index;
+#define NV_SEC2_INIT_MSG_QUEUE_ID_CMDQ 0x00
+#define NV_SEC2_INIT_MSG_QUEUE_ID_MSGQ 0x01
+ u8 id;
+ } queue_info[2];
+
+ u32 sw_managed_area_offset;
+ u16 sw_managed_area_size;
+};
+
+struct nv_sec2_acr_cmd {
+ struct nv_falcon_cmd hdr;
+#define NV_SEC2_ACR_CMD_BOOTSTRAP_FALCON 0x00
+ u8 cmd_type;
+};
+
+struct nv_sec2_acr_msg {
+ struct nv_falcon_cmd hdr;
+ u8 msg_type;
+};
+
+struct nv_sec2_acr_bootstrap_falcon_cmd {
+ struct nv_sec2_acr_cmd cmd;
+#define NV_SEC2_ACR_BOOTSTRAP_FALCON_FLAGS_RESET_YES 0x00000000
+#define NV_SEC2_ACR_BOOTSTRAP_FALCON_FLAGS_RESET_NO 0x00000001
+ u32 flags;
+ u32 falcon_id;
+};
+
+struct nv_sec2_acr_bootstrap_falcon_msg {
+ struct nv_sec2_acr_msg msg;
+ u32 error_code;
+ u32 falcon_id;
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h
index f704ae600e94..30659747ffe8 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/class.h
@@ -166,6 +166,8 @@
#define VOLTA_A /* cl9097.h */ 0x0000c397
+#define TURING_A /* cl9097.h */ 0x0000c597
+
#define NV74_BSP 0x000074b0
#define GT212_MSVLD 0x000085b1
@@ -207,6 +209,7 @@
#define PASCAL_COMPUTE_A 0x0000c0c0
#define PASCAL_COMPUTE_B 0x0000c1c0
#define VOLTA_COMPUTE_A 0x0000c3c0
+#define TURING_COMPUTE_A 0x0000c5c0
#define NV74_CIPHER 0x000074c1
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if0008.h b/drivers/gpu/drm/nouveau/include/nvif/if0008.h
index 8450127420f5..c21d09f04f1d 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/if0008.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/if0008.h
@@ -35,7 +35,7 @@ struct nvif_mmu_type_v0 {
struct nvif_mmu_kind_v0 {
__u8 version;
- __u8 pad01[1];
+ __u8 kind_inv;
__u16 count;
__u8 data[];
};
diff --git a/drivers/gpu/drm/nouveau/include/nvif/mmu.h b/drivers/gpu/drm/nouveau/include/nvif/mmu.h
index 747ecf67e403..cec1e88a0a05 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/mmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/mmu.h
@@ -7,6 +7,7 @@ struct nvif_mmu {
u8 dmabits;
u8 heap_nr;
u8 type_nr;
+ u8 kind_inv;
u16 kind_nr;
s32 mem;
@@ -36,9 +37,8 @@ void nvif_mmu_fini(struct nvif_mmu *);
static inline bool
nvif_mmu_kind_valid(struct nvif_mmu *mmu, u8 kind)
{
- const u8 invalid = mmu->kind_nr - 1;
if (kind) {
- if (kind >= mmu->kind_nr || mmu->kind[kind] == invalid)
+ if (kind >= mmu->kind_nr || mmu->kind[kind] == mmu->kind_inv)
return false;
}
return true;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
index 6d55cd0476aa..5c007ce62fc3 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
@@ -23,13 +23,13 @@ enum nvkm_devidx {
NVKM_SUBDEV_MMU,
NVKM_SUBDEV_BAR,
NVKM_SUBDEV_FAULT,
+ NVKM_SUBDEV_ACR,
NVKM_SUBDEV_PMU,
NVKM_SUBDEV_VOLT,
NVKM_SUBDEV_ICCSENSE,
NVKM_SUBDEV_THERM,
NVKM_SUBDEV_CLK,
NVKM_SUBDEV_GSP,
- NVKM_SUBDEV_SECBOOT,
NVKM_ENGINE_BSP,
@@ -129,6 +129,7 @@ struct nvkm_device {
struct notifier_block nb;
} acpi;
+ struct nvkm_acr *acr;
struct nvkm_bar *bar;
struct nvkm_bios *bios;
struct nvkm_bus *bus;
@@ -149,7 +150,6 @@ struct nvkm_device {
struct nvkm_subdev *mxm;
struct nvkm_pci *pci;
struct nvkm_pmu *pmu;
- struct nvkm_secboot *secboot;
struct nvkm_therm *therm;
struct nvkm_timer *timer;
struct nvkm_top *top;
@@ -169,7 +169,7 @@ struct nvkm_device {
struct nvkm_engine *mspdec;
struct nvkm_engine *msppp;
struct nvkm_engine *msvld;
- struct nvkm_engine *nvenc[3];
+ struct nvkm_nvenc *nvenc[3];
struct nvkm_nvdec *nvdec[3];
struct nvkm_pm *pm;
struct nvkm_engine *sec;
@@ -202,6 +202,7 @@ struct nvkm_device_quirk {
struct nvkm_device_chip {
const char *name;
+ int (*acr )(struct nvkm_device *, int idx, struct nvkm_acr **);
int (*bar )(struct nvkm_device *, int idx, struct nvkm_bar **);
int (*bios )(struct nvkm_device *, int idx, struct nvkm_bios **);
int (*bus )(struct nvkm_device *, int idx, struct nvkm_bus **);
@@ -222,7 +223,6 @@ struct nvkm_device_chip {
int (*mxm )(struct nvkm_device *, int idx, struct nvkm_subdev **);
int (*pci )(struct nvkm_device *, int idx, struct nvkm_pci **);
int (*pmu )(struct nvkm_device *, int idx, struct nvkm_pmu **);
- int (*secboot )(struct nvkm_device *, int idx, struct nvkm_secboot **);
int (*therm )(struct nvkm_device *, int idx, struct nvkm_therm **);
int (*timer )(struct nvkm_device *, int idx, struct nvkm_timer **);
int (*top )(struct nvkm_device *, int idx, struct nvkm_top **);
@@ -242,7 +242,7 @@ struct nvkm_device_chip {
int (*mspdec )(struct nvkm_device *, int idx, struct nvkm_engine **);
int (*msppp )(struct nvkm_device *, int idx, struct nvkm_engine **);
int (*msvld )(struct nvkm_device *, int idx, struct nvkm_engine **);
- int (*nvenc[3])(struct nvkm_device *, int idx, struct nvkm_engine **);
+ int (*nvenc[3])(struct nvkm_device *, int idx, struct nvkm_nvenc **);
int (*nvdec[3])(struct nvkm_device *, int idx, struct nvkm_nvdec **);
int (*pm )(struct nvkm_device *, int idx, struct nvkm_pm **);
int (*sec )(struct nvkm_device *, int idx, struct nvkm_engine **);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/falcon.h b/drivers/gpu/drm/nouveau/include/nvkm/core/falcon.h
new file mode 100644
index 000000000000..daa8e4bfb6bf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/falcon.h
@@ -0,0 +1,77 @@
+#ifndef __NVKM_FALCON_H__
+#define __NVKM_FALCON_H__
+#include <engine/falcon.h>
+
+int nvkm_falcon_ctor(const struct nvkm_falcon_func *, struct nvkm_subdev *owner,
+ const char *name, u32 addr, struct nvkm_falcon *);
+void nvkm_falcon_dtor(struct nvkm_falcon *);
+
+void nvkm_falcon_v1_load_imem(struct nvkm_falcon *,
+ void *, u32, u32, u16, u8, bool);
+void nvkm_falcon_v1_load_dmem(struct nvkm_falcon *, void *, u32, u32, u8);
+void nvkm_falcon_v1_read_dmem(struct nvkm_falcon *, u32, u32, u8, void *);
+void nvkm_falcon_v1_bind_context(struct nvkm_falcon *, struct nvkm_memory *);
+int nvkm_falcon_v1_wait_for_halt(struct nvkm_falcon *, u32);
+int nvkm_falcon_v1_clear_interrupt(struct nvkm_falcon *, u32);
+void nvkm_falcon_v1_set_start_addr(struct nvkm_falcon *, u32 start_addr);
+void nvkm_falcon_v1_start(struct nvkm_falcon *);
+int nvkm_falcon_v1_enable(struct nvkm_falcon *);
+void nvkm_falcon_v1_disable(struct nvkm_falcon *);
+
+void gp102_sec2_flcn_bind_context(struct nvkm_falcon *, struct nvkm_memory *);
+int gp102_sec2_flcn_enable(struct nvkm_falcon *);
+
+#define FLCN_PRINTK(t,f,fmt,a...) do { \
+ if (nvkm_subdev_name[(f)->owner->index] != (f)->name) \
+ nvkm_##t((f)->owner, "%s: "fmt"\n", (f)->name, ##a); \
+ else \
+ nvkm_##t((f)->owner, fmt"\n", ##a); \
+} while(0)
+#define FLCN_DBG(f,fmt,a...) FLCN_PRINTK(debug, (f), fmt, ##a)
+#define FLCN_ERR(f,fmt,a...) FLCN_PRINTK(error, (f), fmt, ##a)
+
+/**
+ * struct nv_falcon_msg - header for all messages
+ *
+ * @unit_id: id of firmware process that sent the message
+ * @size: total size of message
+ * @ctrl_flags: control flags
+ * @seq_id: used to match a message from its corresponding command
+ */
+struct nv_falcon_msg {
+ u8 unit_id;
+ u8 size;
+ u8 ctrl_flags;
+ u8 seq_id;
+};
+
+#define nv_falcon_cmd nv_falcon_msg
+#define NV_FALCON_CMD_UNIT_ID_REWIND 0x00
+
+struct nvkm_falcon_qmgr;
+int nvkm_falcon_qmgr_new(struct nvkm_falcon *, struct nvkm_falcon_qmgr **);
+void nvkm_falcon_qmgr_del(struct nvkm_falcon_qmgr **);
+
+typedef int
+(*nvkm_falcon_qmgr_callback)(void *priv, struct nv_falcon_msg *);
+
+struct nvkm_falcon_cmdq;
+int nvkm_falcon_cmdq_new(struct nvkm_falcon_qmgr *, const char *name,
+ struct nvkm_falcon_cmdq **);
+void nvkm_falcon_cmdq_del(struct nvkm_falcon_cmdq **);
+void nvkm_falcon_cmdq_init(struct nvkm_falcon_cmdq *,
+ u32 index, u32 offset, u32 size);
+void nvkm_falcon_cmdq_fini(struct nvkm_falcon_cmdq *);
+int nvkm_falcon_cmdq_send(struct nvkm_falcon_cmdq *, struct nv_falcon_cmd *,
+ nvkm_falcon_qmgr_callback, void *priv,
+ unsigned long timeout_jiffies);
+
+struct nvkm_falcon_msgq;
+int nvkm_falcon_msgq_new(struct nvkm_falcon_qmgr *, const char *name,
+ struct nvkm_falcon_msgq **);
+void nvkm_falcon_msgq_del(struct nvkm_falcon_msgq **);
+void nvkm_falcon_msgq_init(struct nvkm_falcon_msgq *,
+ u32 index, u32 offset, u32 size);
+int nvkm_falcon_msgq_recv_initmsg(struct nvkm_falcon_msgq *, void *, u32 size);
+void nvkm_falcon_msgq_recv(struct nvkm_falcon_msgq *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h b/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h
index 383370c32428..d14b7fb07368 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h
@@ -1,12 +1,55 @@
/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_FIRMWARE_H__
#define __NVKM_FIRMWARE_H__
+#include <core/option.h>
#include <core/subdev.h>
-int nvkm_firmware_get_version(const struct nvkm_subdev *, const char *fwname,
- int min_version, int max_version,
- const struct firmware **);
-int nvkm_firmware_get(const struct nvkm_subdev *, const char *fwname,
+int nvkm_firmware_get(const struct nvkm_subdev *, const char *fwname, int ver,
const struct firmware **);
void nvkm_firmware_put(const struct firmware *);
+
+int nvkm_firmware_load_blob(const struct nvkm_subdev *subdev, const char *path,
+ const char *name, int ver, struct nvkm_blob *);
+int nvkm_firmware_load_name(const struct nvkm_subdev *subdev, const char *path,
+ const char *name, int ver,
+ const struct firmware **);
+
+#define nvkm_firmware_load(s,l,o,p...) ({ \
+ struct nvkm_subdev *_s = (s); \
+ const char *_opts = (o); \
+ char _option[32]; \
+ typeof(l[0]) *_list = (l), *_next, *_fwif = NULL; \
+ int _ver, _fwv, _ret = 0; \
+ \
+ snprintf(_option, sizeof(_option), "Nv%sFw", _opts); \
+ _ver = nvkm_longopt(_s->device->cfgopt, _option, -2); \
+ if (_ver >= -1) { \
+ for (_next = _list; !_fwif && _next->load; _next++) { \
+ if (_next->version == _ver) \
+ _fwif = _next; \
+ } \
+ _ret = _fwif ? 0 : -EINVAL; \
+ } \
+ \
+ if (_ret == 0) { \
+ snprintf(_option, sizeof(_option), "Nv%sFwVer", _opts); \
+ _fwv = _fwif ? _fwif->version : -1; \
+ _ver = nvkm_longopt(_s->device->cfgopt, _option, _fwv); \
+ for (_next = _fwif ? _fwif : _list; _next->load; _next++) { \
+ _fwv = (_ver >= 0) ? _ver : _next->version; \
+ _ret = _next->load(p, _fwv, _next); \
+ if (_ret == 0 || _ver >= 0) { \
+ _fwif = _next; \
+ break; \
+ } \
+ } \
+ } \
+ \
+ if (_ret) { \
+ nvkm_error(_s, "failed to load firmware\n"); \
+ _fwif = ERR_PTR(_ret); \
+ } \
+ \
+ _fwif; \
+})
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h b/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
index b23bf6109f2d..74d3f1a809d7 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
@@ -84,6 +84,22 @@ void nvkm_memory_tags_put(struct nvkm_memory *, struct nvkm_device *,
nvkm_wo32((o), __a + 4, upper_32_bits(__d)); \
} while(0)
+#define nvkm_robj(o,a,p,s) do { \
+ u32 _addr = (a), _size = (s) >> 2, *_data = (void *)(p); \
+ while (_size--) { \
+ *(_data++) = nvkm_ro32((o), _addr); \
+ _addr += 4; \
+ } \
+} while(0)
+
+#define nvkm_wobj(o,a,p,s) do { \
+ u32 _addr = (a), _size = (s) >> 2, *_data = (void *)(p); \
+ while (_size--) { \
+ nvkm_wo32((o), _addr, *(_data++)); \
+ _addr += 4; \
+ } \
+} while(0)
+
#define nvkm_fill(t,s,o,a,d,c) do { \
u64 _a = (a), _c = (c), _d = (d), _o = _a >> s, _s = _c << s; \
u##t __iomem *_m = nvkm_kmap(o); \
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/os.h b/drivers/gpu/drm/nouveau/include/nvkm/core/os.h
index 029a416197db..d7ba3205207f 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/os.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/os.h
@@ -21,4 +21,17 @@
iowrite32_native(lower_32_bits(_v), &_p[0]); \
iowrite32_native(upper_32_bits(_v), &_p[1]); \
} while(0)
+
+struct nvkm_blob {
+ void *data;
+ u32 size;
+};
+
+static inline void
+nvkm_blob_dtor(struct nvkm_blob *blob)
+{
+ kfree(blob->data);
+ blob->data = NULL;
+ blob->size = 0;
+}
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
index 23b582d696c6..27c1f868552c 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: MIT */
-#ifndef __NVKM_FALCON_H__
-#define __NVKM_FALCON_H__
+#ifndef __NVKM_FLCNEN_H__
+#define __NVKM_FLCNEN_H__
#define nvkm_falcon(p) container_of((p), struct nvkm_falcon, engine)
#include <core/engine.h>
struct nvkm_fifo_chan;
@@ -23,12 +23,13 @@ struct nvkm_falcon {
struct mutex mutex;
struct mutex dmem_mutex;
+ bool oneinit;
+
const struct nvkm_subdev *user;
u8 version;
u8 secret;
bool debug;
- bool has_emem;
struct nvkm_memory *core;
bool external;
@@ -76,9 +77,14 @@ struct nvkm_falcon_func {
} data;
void (*init)(struct nvkm_falcon *);
void (*intr)(struct nvkm_falcon *, struct nvkm_fifo_chan *);
+
+ u32 debug;
+ u32 fbif;
+
void (*load_imem)(struct nvkm_falcon *, void *, u32, u32, u16, u8, bool);
void (*load_dmem)(struct nvkm_falcon *, void *, u32, u32, u8);
void (*read_dmem)(struct nvkm_falcon *, u32, u32, u8, void *);
+ u32 emem_addr;
void (*bind_context)(struct nvkm_falcon *, struct nvkm_memory *);
int (*wait_for_halt)(struct nvkm_falcon *, u32);
int (*clear_interrupt)(struct nvkm_falcon *, u32);
@@ -86,6 +92,13 @@ struct nvkm_falcon_func {
void (*start)(struct nvkm_falcon *);
int (*enable)(struct nvkm_falcon *falcon);
void (*disable)(struct nvkm_falcon *falcon);
+ int (*reset)(struct nvkm_falcon *);
+
+ struct {
+ u32 head;
+ u32 tail;
+ u32 stride;
+ } cmdq, msgq;
struct nvkm_sclass sclass[];
};
@@ -122,5 +135,4 @@ int nvkm_falcon_clear_interrupt(struct nvkm_falcon *, u32);
int nvkm_falcon_enable(struct nvkm_falcon *);
void nvkm_falcon_disable(struct nvkm_falcon *);
int nvkm_falcon_reset(struct nvkm_falcon *);
-
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
index 2cde36f3c064..1530c81f86a2 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
@@ -50,6 +50,8 @@ int gp100_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
int gp102_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
int gp104_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
int gp107_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gp108_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
int gp10b_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
int gv100_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int tu102_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h
index 7c7d7f0abfcc..1b3183e31606 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h
@@ -3,13 +3,13 @@
#define __NVKM_NVDEC_H__
#define nvkm_nvdec(p) container_of((p), struct nvkm_nvdec, engine)
#include <core/engine.h>
+#include <core/falcon.h>
struct nvkm_nvdec {
+ const struct nvkm_nvdec_func *func;
struct nvkm_engine engine;
- u32 addr;
-
- struct nvkm_falcon *falcon;
+ struct nvkm_falcon falcon;
};
-int gp102_nvdec_new(struct nvkm_device *, int, struct nvkm_nvdec **);
+int gm107_nvdec_new(struct nvkm_device *, int, struct nvkm_nvdec **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h
index 21624046d0a1..33e6ba8adc8d 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h
@@ -1,5 +1,15 @@
/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_NVENC_H__
#define __NVKM_NVENC_H__
+#define nvkm_nvenc(p) container_of((p), struct nvkm_nvenc, engine)
#include <core/engine.h>
+#include <core/falcon.h>
+
+struct nvkm_nvenc {
+ const struct nvkm_nvenc_func *func;
+ struct nvkm_engine engine;
+ struct nvkm_falcon falcon;
+};
+
+int gm107_nvenc_new(struct nvkm_device *, int, struct nvkm_nvenc **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h
index 33078f86c779..34dc765648d5 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h
@@ -1,17 +1,24 @@
/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_SEC2_H__
#define __NVKM_SEC2_H__
+#define nvkm_sec2(p) container_of((p), struct nvkm_sec2, engine)
#include <core/engine.h>
+#include <core/falcon.h>
struct nvkm_sec2 {
+ const struct nvkm_sec2_func *func;
struct nvkm_engine engine;
- u32 addr;
+ struct nvkm_falcon falcon;
+
+ struct nvkm_falcon_qmgr *qmgr;
+ struct nvkm_falcon_cmdq *cmdq;
+ struct nvkm_falcon_msgq *msgq;
- struct nvkm_falcon *falcon;
- struct nvkm_msgqueue *queue;
struct work_struct work;
+ bool initmsg_received;
};
int gp102_sec2_new(struct nvkm_device *, int, struct nvkm_sec2 **);
+int gp108_sec2_new(struct nvkm_device *, int, struct nvkm_sec2 **);
int tu102_sec2_new(struct nvkm_device *, int, struct nvkm_sec2 **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/acr.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/acr.h
new file mode 100644
index 000000000000..5d9c3a966de6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/acr.h
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVKM_ACR_H__
+#define __NVKM_ACR_H__
+#define nvkm_acr(p) container_of((p), struct nvkm_acr, subdev)
+#include <core/subdev.h>
+#include <core/falcon.h>
+
+enum nvkm_acr_lsf_id {
+ NVKM_ACR_LSF_PMU = 0,
+ NVKM_ACR_LSF_GSPLITE = 1,
+ NVKM_ACR_LSF_FECS = 2,
+ NVKM_ACR_LSF_GPCCS = 3,
+ NVKM_ACR_LSF_NVDEC = 4,
+ NVKM_ACR_LSF_SEC2 = 7,
+ NVKM_ACR_LSF_MINION = 10,
+ NVKM_ACR_LSF_NUM
+};
+
+static inline const char *
+nvkm_acr_lsf_id(enum nvkm_acr_lsf_id id)
+{
+ switch (id) {
+ case NVKM_ACR_LSF_PMU : return "pmu";
+ case NVKM_ACR_LSF_GSPLITE: return "gsplite";
+ case NVKM_ACR_LSF_FECS : return "fecs";
+ case NVKM_ACR_LSF_GPCCS : return "gpccs";
+ case NVKM_ACR_LSF_NVDEC : return "nvdec";
+ case NVKM_ACR_LSF_SEC2 : return "sec2";
+ case NVKM_ACR_LSF_MINION : return "minion";
+ default:
+ return "unknown";
+ }
+}
+
+struct nvkm_acr {
+ const struct nvkm_acr_func *func;
+ struct nvkm_subdev subdev;
+
+ struct list_head hsfw, hsf;
+ struct list_head lsfw, lsf;
+
+ struct nvkm_memory *wpr;
+ u64 wpr_start;
+ u64 wpr_end;
+ u64 shadow_start;
+
+ struct nvkm_memory *inst;
+ struct nvkm_vmm *vmm;
+
+ bool done;
+
+ const struct firmware *wpr_fw;
+ bool wpr_comp;
+ u64 wpr_prev;
+};
+
+bool nvkm_acr_managed_falcon(struct nvkm_device *, enum nvkm_acr_lsf_id);
+int nvkm_acr_bootstrap_falcons(struct nvkm_device *, unsigned long mask);
+
+int gm200_acr_new(struct nvkm_device *, int, struct nvkm_acr **);
+int gm20b_acr_new(struct nvkm_device *, int, struct nvkm_acr **);
+int gp102_acr_new(struct nvkm_device *, int, struct nvkm_acr **);
+int gp108_acr_new(struct nvkm_device *, int, struct nvkm_acr **);
+int gp10b_acr_new(struct nvkm_device *, int, struct nvkm_acr **);
+int tu102_acr_new(struct nvkm_device *, int, struct nvkm_acr **);
+
+struct nvkm_acr_lsfw {
+ const struct nvkm_acr_lsf_func *func;
+ struct nvkm_falcon *falcon;
+ enum nvkm_acr_lsf_id id;
+
+ struct list_head head;
+
+ struct nvkm_blob img;
+
+ const struct firmware *sig;
+
+ u32 bootloader_size;
+ u32 bootloader_imem_offset;
+
+ u32 app_size;
+ u32 app_start_offset;
+ u32 app_imem_entry;
+ u32 app_resident_code_offset;
+ u32 app_resident_code_size;
+ u32 app_resident_data_offset;
+ u32 app_resident_data_size;
+
+ u32 ucode_size;
+ u32 data_size;
+
+ struct {
+ u32 lsb;
+ u32 img;
+ u32 bld;
+ } offset;
+ u32 bl_data_size;
+};
+
+struct nvkm_acr_lsf_func {
+/* The (currently) map directly to LSB header flags. */
+#define NVKM_ACR_LSF_LOAD_CODE_AT_0 0x00000001
+#define NVKM_ACR_LSF_DMACTL_REQ_CTX 0x00000004
+#define NVKM_ACR_LSF_FORCE_PRIV_LOAD 0x00000008
+ u32 flags;
+ u32 bld_size;
+ void (*bld_write)(struct nvkm_acr *, u32 bld, struct nvkm_acr_lsfw *);
+ void (*bld_patch)(struct nvkm_acr *, u32 bld, s64 adjust);
+ int (*boot)(struct nvkm_falcon *);
+ int (*bootstrap_falcon)(struct nvkm_falcon *, enum nvkm_acr_lsf_id);
+ int (*bootstrap_multiple_falcons)(struct nvkm_falcon *, u32 mask);
+};
+
+int
+nvkm_acr_lsfw_load_sig_image_desc(struct nvkm_subdev *, struct nvkm_falcon *,
+ enum nvkm_acr_lsf_id, const char *path,
+ int ver, const struct nvkm_acr_lsf_func *);
+int
+nvkm_acr_lsfw_load_sig_image_desc_v1(struct nvkm_subdev *, struct nvkm_falcon *,
+ enum nvkm_acr_lsf_id, const char *path,
+ int ver, const struct nvkm_acr_lsf_func *);
+int
+nvkm_acr_lsfw_load_bl_inst_data_sig(struct nvkm_subdev *, struct nvkm_falcon *,
+ enum nvkm_acr_lsf_id, const char *path,
+ int ver, const struct nvkm_acr_lsf_func *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/extdev.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/extdev.h
index f29f2d8da142..9ac3dda4b44f 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/extdev.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/extdev.h
@@ -26,4 +26,6 @@ nvbios_extdev_parse(struct nvkm_bios *, int, struct nvbios_extdev_func *);
int
nvbios_extdev_find(struct nvkm_bios *, enum nvbios_extdev_type,
struct nvbios_extdev_func *);
+
+bool nvbios_extdev_skip_probe(struct nvkm_bios *);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h
index 7c4f00366e71..3f785f29dfac 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h
@@ -3,10 +3,13 @@
#define __NVBIOS_GPIO_H__
enum dcb_gpio_func_name {
DCB_GPIO_PANEL_POWER = 0x01,
+ DCB_GPIO_FAN = 0x09,
DCB_GPIO_TVDAC0 = 0x0c,
+ DCB_GPIO_THERM_EXT_POWER_EVENT = 0x10,
DCB_GPIO_TVDAC1 = 0x2d,
- DCB_GPIO_FAN = 0x09,
DCB_GPIO_FAN_SENSE = 0x3d,
+ DCB_GPIO_POWER_ALERT = 0x4c,
+ DCB_GPIO_EXT_POWER_LOW = 0x79,
DCB_GPIO_LOGO_LED_PWM = 0x84,
DCB_GPIO_UNUSED = 0xff,
DCB_GPIO_VID0 = 0x04,
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h
index 97322f95b3ee..a513c16ab105 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h
@@ -31,6 +31,7 @@ struct nvkm_fault_data {
};
int gp100_fault_new(struct nvkm_device *, int, struct nvkm_fault **);
+int gp10b_fault_new(struct nvkm_device *, int, struct nvkm_fault **);
int gv100_fault_new(struct nvkm_device *, int, struct nvkm_fault **);
int tu102_fault_new(struct nvkm_device *, int, struct nvkm_fault **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
index 239ad222b95a..34b56b10218a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
@@ -33,6 +33,8 @@ struct nvkm_fb {
const struct nvkm_fb_func *func;
struct nvkm_subdev subdev;
+ struct nvkm_blob vpr_scrubber;
+
struct nvkm_ram *ram;
struct nvkm_mm tags;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
index 4c672a5c4cd5..06db67610a50 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
@@ -2,12 +2,11 @@
#define __NVKM_GSP_H__
#define nvkm_gsp(p) container_of((p), struct nvkm_gsp, subdev)
#include <core/subdev.h>
+#include <core/falcon.h>
struct nvkm_gsp {
struct nvkm_subdev subdev;
- u32 addr;
-
- struct nvkm_falcon *falcon;
+ struct nvkm_falcon falcon;
};
int gv100_gsp_new(struct nvkm_device *, int, struct nvkm_gsp **);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
index 644d527c3b96..d76f60d7d29a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
@@ -40,4 +40,5 @@ int gm107_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
int gm200_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
int gp100_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
int gp102_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
+int gp10b_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
index 24fbcccd93eb..da553089d2d8 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
@@ -2,13 +2,20 @@
#ifndef __NVKM_PMU_H__
#define __NVKM_PMU_H__
#include <core/subdev.h>
-#include <engine/falcon.h>
+#include <core/falcon.h>
struct nvkm_pmu {
const struct nvkm_pmu_func *func;
struct nvkm_subdev subdev;
- struct nvkm_falcon *falcon;
- struct nvkm_msgqueue *queue;
+ struct nvkm_falcon falcon;
+
+ struct nvkm_falcon_qmgr *qmgr;
+ struct nvkm_falcon_cmdq *hpq;
+ struct nvkm_falcon_cmdq *lpq;
+ struct nvkm_falcon_msgq *msgq;
+ bool initmsg_received;
+
+ struct completion wpr_ready;
struct {
u32 base;
@@ -30,6 +37,7 @@ struct nvkm_pmu {
int nvkm_pmu_send(struct nvkm_pmu *, u32 reply[2], u32 process,
u32 message, u32 data0, u32 data1);
void nvkm_pmu_pgob(struct nvkm_pmu *, bool enable);
+bool nvkm_pmu_fan_controlled(struct nvkm_device *);
int gt215_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
int gf100_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
@@ -42,6 +50,7 @@ int gm107_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
int gm20b_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
int gp100_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
int gp102_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
+int gp10b_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
/* interface to MEMX process running on PMU */
struct nvkm_memx;
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index c3fd5dd39ed9..e2bae1424502 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -139,7 +139,7 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
if (chan->ntfy) {
nouveau_vma_del(&chan->ntfy_vma);
nouveau_bo_unpin(chan->ntfy);
- drm_gem_object_put_unlocked(&chan->ntfy->gem);
+ drm_gem_object_put_unlocked(&chan->ntfy->bo.base);
}
if (chan->heap.block_size)
@@ -245,12 +245,6 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
}
int
-nouveau_abi16_ioctl_setparam(ABI16_IOCTL_ARGS)
-{
- return -EINVAL;
-}
-
-int
nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
{
struct drm_nouveau_channel_alloc *init = data;
@@ -345,7 +339,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
goto done;
}
- ret = drm_gem_handle_create(file_priv, &chan->ntfy->gem,
+ ret = drm_gem_handle_create(file_priv, &chan->ntfy->bo.base,
&init->notifier_handle);
if (ret)
goto done;
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.h b/drivers/gpu/drm/nouveau/nouveau_abi16.h
index 195546719bfe..70f6aa5c9dd1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.h
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.h
@@ -6,7 +6,6 @@
struct drm_device *dev, void *data, struct drm_file *file_priv
int nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS);
-int nouveau_abi16_ioctl_setparam(ABI16_IOCTL_ARGS);
int nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS);
int nouveau_abi16_ioctl_channel_free(ABI16_IOCTL_ARGS);
int nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 66bf2aff4a3e..d204ea8a5618 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -22,8 +22,6 @@
* SOFTWARE.
*/
-#include <drm/drmP.h>
-
#include "nouveau_drv.h"
#include "nouveau_reg.h"
#include "dispnv04/hw.h"
@@ -935,7 +933,7 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
tmdstableptr = ROM16(bios->data[bitentry->offset]);
if (!tmdstableptr) {
- NV_ERROR(drm, "Pointer to TMDS table invalid\n");
+ NV_INFO(drm, "Pointer to TMDS table not found\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 34a998012bf6..1b62ccc57aef 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -136,10 +136,16 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
struct drm_device *dev = drm->dev;
struct nouveau_bo *nvbo = nouveau_bo(bo);
- if (unlikely(nvbo->gem.filp))
- DRM_ERROR("bo %p still attached to GEM object\n", bo);
WARN_ON(nvbo->pin_refcnt > 0);
nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
+
+ /*
+ * If nouveau_bo_new() allocated this buffer, the GEM object was never
+ * initialized, so don't attempt to release it.
+ */
+ if (bo->base.dev)
+ drm_gem_object_release(&bo->base);
+
kfree(nvbo);
}
@@ -185,31 +191,24 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
*size = roundup_64(*size, PAGE_SIZE);
}
-int
-nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
- uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
- struct sg_table *sg, struct reservation_object *robj,
- struct nouveau_bo **pnvbo)
+struct nouveau_bo *
+nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 flags,
+ u32 tile_mode, u32 tile_flags)
{
struct nouveau_drm *drm = cli->drm;
struct nouveau_bo *nvbo;
struct nvif_mmu *mmu = &cli->mmu;
struct nvif_vmm *vmm = cli->svm.cli ? &cli->svm.vmm : &cli->vmm.vmm;
- size_t acc_size;
- int type = ttm_bo_type_device;
- int ret, i, pi = -1;
+ int i, pi = -1;
- if (!size) {
- NV_WARN(drm, "skipped size %016llx\n", size);
- return -EINVAL;
+ if (!*size) {
+ NV_WARN(drm, "skipped size %016llx\n", *size);
+ return ERR_PTR(-EINVAL);
}
- if (sg)
- type = ttm_bo_type_sg;
-
nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
if (!nvbo)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&nvbo->head);
INIT_LIST_HEAD(&nvbo->entry);
INIT_LIST_HEAD(&nvbo->vma_list);
@@ -231,7 +230,7 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
nvbo->kind = (tile_flags & 0x0000ff00) >> 8;
if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
kfree(nvbo);
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind;
@@ -241,7 +240,7 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
nvbo->comp = (tile_flags & 0x00030000) >> 16;
if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
kfree(nvbo);
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
} else {
nvbo->zeta = (tile_flags & 0x00000007);
@@ -273,12 +272,12 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
pi = i;
/* Stop once the buffer is larger than the current page size. */
- if (size >= 1ULL << vmm->page[i].shift)
+ if (*size >= 1ULL << vmm->page[i].shift)
break;
}
if (WARN_ON(pi < 0))
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
/* Disable compression if suitable settings couldn't be found. */
if (nvbo->comp && !vmm->page[pi].comp) {
@@ -288,22 +287,53 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
}
nvbo->page = vmm->page[pi].shift;
- nouveau_bo_fixup_align(nvbo, flags, &align, &size);
+ nouveau_bo_fixup_align(nvbo, flags, align, size);
+
+ return nvbo;
+}
+
+int
+nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 flags,
+ struct sg_table *sg, struct dma_resv *robj)
+{
+ int type = sg ? ttm_bo_type_sg : ttm_bo_type_device;
+ size_t acc_size;
+ int ret;
+
+ acc_size = ttm_bo_dma_acc_size(nvbo->bo.bdev, size, sizeof(*nvbo));
+
nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
nouveau_bo_placement_set(nvbo, flags, 0);
- acc_size = ttm_bo_dma_acc_size(&drm->ttm.bdev, size,
- sizeof(struct nouveau_bo));
-
- ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size,
- type, &nvbo->placement,
- align >> PAGE_SHIFT, false, acc_size, sg,
- robj, nouveau_bo_del_ttm);
+ ret = ttm_bo_init(nvbo->bo.bdev, &nvbo->bo, size, type,
+ &nvbo->placement, align >> PAGE_SHIFT, false,
+ acc_size, sg, robj, nouveau_bo_del_ttm);
if (ret) {
/* ttm will call nouveau_bo_del_ttm if it fails.. */
return ret;
}
+ return 0;
+}
+
+int
+nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
+ uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
+ struct sg_table *sg, struct dma_resv *robj,
+ struct nouveau_bo **pnvbo)
+{
+ struct nouveau_bo *nvbo;
+ int ret;
+
+ nvbo = nouveau_bo_alloc(cli, &size, &align, flags, tile_mode,
+ tile_flags);
+ if (IS_ERR(nvbo))
+ return PTR_ERR(nvbo);
+
+ ret = nouveau_bo_init(nvbo, size, align, flags, sg, robj);
+ if (ret)
+ return ret;
+
*pnvbo = nvbo;
return 0;
}
@@ -1132,7 +1162,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
void
nouveau_bo_move_init(struct nouveau_drm *drm)
{
- static const struct {
+ static const struct _method_table {
const char *name;
int engine;
s32 oclass;
@@ -1162,7 +1192,8 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
{ "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
{},
{ "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init },
- }, *mthd = _methods;
+ };
+ const struct _method_table *mthd = _methods;
const char *name = "CPU";
int ret;
@@ -1323,7 +1354,7 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct drm_device *dev = drm->dev;
- struct dma_fence *fence = reservation_object_get_excl(bo->resv);
+ struct dma_fence *fence = dma_resv_get_excl(bo->base.resv);
nv10_bo_put_tile_region(dev, *old_tile, fence);
*old_tile = new_tile;
@@ -1400,7 +1431,7 @@ nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
struct nouveau_bo *nvbo = nouveau_bo(bo);
- return drm_vma_node_verify_access(&nvbo->gem.vma_node,
+ return drm_vma_node_verify_access(&nvbo->bo.base.vma_node,
filp->private_data);
}
@@ -1654,12 +1685,12 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
void
nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive)
{
- struct reservation_object *resv = nvbo->bo.resv;
+ struct dma_resv *resv = nvbo->bo.base.resv;
if (exclusive)
- reservation_object_add_excl_fence(resv, &fence->base);
+ dma_resv_add_excl_fence(resv, &fence->base);
else if (fence)
- reservation_object_add_shared_fence(resv, &fence->base);
+ dma_resv_add_shared_fence(resv, &fence->base);
}
struct ttm_bo_driver nouveau_bo_driver = {
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index 383ac36d5869..38f9d8350963 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -35,11 +35,6 @@ struct nouveau_bo {
struct nouveau_drm_tile *tile;
- /* Only valid if allocated via nouveau_gem_new() and iff you hold a
- * gem reference to it! For debugging, use gem.filp != NULL to test
- * whether it is valid. */
- struct drm_gem_object gem;
-
/* protect by the ttm reservation lock */
int pin_refcnt;
@@ -76,9 +71,13 @@ nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo)
extern struct ttm_bo_driver nouveau_bo_driver;
void nouveau_bo_move_init(struct nouveau_drm *);
+struct nouveau_bo *nouveau_bo_alloc(struct nouveau_cli *, u64 *size, int *align,
+ u32 flags, u32 tile_mode, u32 tile_flags);
+int nouveau_bo_init(struct nouveau_bo *, u64 size, int align, u32 flags,
+ struct sg_table *sg, struct dma_resv *robj);
int nouveau_bo_new(struct nouveau_cli *, u64 size, int align, u32 flags,
u32 tile_mode, u32 tile_flags, struct sg_table *sg,
- struct reservation_object *robj,
+ struct dma_resv *robj,
struct nouveau_bo **);
int nouveau_bo_pin(struct nouveau_bo *, u32 flags, bool contig);
int nouveau_bo_unpin(struct nouveau_bo *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index 282fd90b65e1..d9381a053169 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -55,6 +55,8 @@ nouveau_channel_killed(struct nvif_notify *ntfy)
struct nouveau_cli *cli = (void *)chan->user.client;
NV_PRINTK(warn, cli, "channel %d killed!\n", chan->chid);
atomic_set(&chan->killed, 1);
+ if (chan->fence)
+ nouveau_fence_context_kill(chan->fence, -ENODEV);
return NVIF_NOTIFY_DROP;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 8f15281faa79..9a9a7f5003d3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -29,7 +29,6 @@
#include <linux/pm_runtime.h>
#include <linux/vga_switcheroo.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_crtc_helper.h>
@@ -246,14 +245,22 @@ nouveau_conn_atomic_duplicate_state(struct drm_connector *connector)
void
nouveau_conn_reset(struct drm_connector *connector)
{
+ struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_conn_atom *asyc;
- if (WARN_ON(!(asyc = kzalloc(sizeof(*asyc), GFP_KERNEL))))
- return;
+ if (drm_drv_uses_atomic_modeset(connector->dev)) {
+ if (WARN_ON(!(asyc = kzalloc(sizeof(*asyc), GFP_KERNEL))))
+ return;
+
+ if (connector->state)
+ nouveau_conn_atomic_destroy_state(connector,
+ connector->state);
+
+ __drm_atomic_helper_connector_reset(connector, &asyc->state);
+ } else {
+ asyc = &nv_connector->properties_state;
+ }
- if (connector->state)
- nouveau_conn_atomic_destroy_state(connector, connector->state);
- __drm_atomic_helper_connector_reset(connector, &asyc->state);
asyc->dither.mode = DITHERING_MODE_AUTO;
asyc->dither.depth = DITHERING_DEPTH_AUTO;
asyc->scaler.mode = DRM_MODE_SCALE_NONE;
@@ -277,8 +284,14 @@ void
nouveau_conn_attach_properties(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct nouveau_conn_atom *armc = nouveau_conn_atom(connector->state);
struct nouveau_display *disp = nouveau_display(dev);
+ struct nouveau_connector *nv_connector = nouveau_connector(connector);
+ struct nouveau_conn_atom *armc;
+
+ if (drm_drv_uses_atomic_modeset(connector->dev))
+ armc = nouveau_conn_atom(connector->state);
+ else
+ armc = &nv_connector->properties_state;
/* Init DVI-I specific properties. */
if (connector->connector_type == DRM_MODE_CONNECTOR_DVII)
@@ -366,9 +379,8 @@ find_encoder(struct drm_connector *connector, int type)
{
struct nouveau_encoder *nv_encoder;
struct drm_encoder *enc;
- int i;
- drm_connector_for_each_possible_encoder(connector, enc, i) {
+ drm_connector_for_each_possible_encoder(connector, enc) {
nv_encoder = nouveau_encoder(enc);
if (type == DCB_OUTPUT_ANY ||
@@ -415,10 +427,10 @@ nouveau_connector_ddc_detect(struct drm_connector *connector)
struct drm_device *dev = connector->dev;
struct nouveau_encoder *nv_encoder = NULL, *found = NULL;
struct drm_encoder *encoder;
- int i, ret;
+ int ret;
bool switcheroo_ddc = false;
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
nv_encoder = nouveau_encoder(encoder);
switch (nv_encoder->dcb->type) {
@@ -750,9 +762,9 @@ static int
nouveau_connector_set_property(struct drm_connector *connector,
struct drm_property *property, uint64_t value)
{
- struct nouveau_conn_atom *asyc = nouveau_conn_atom(connector->state);
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
+ struct nouveau_conn_atom *asyc = &nv_connector->properties_state;
struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
int ret;
@@ -1132,6 +1144,16 @@ nouveau_connector_hotplug(struct nvif_notify *notify)
const char *name = connector->name;
struct nouveau_encoder *nv_encoder;
int ret;
+ bool plugged = (rep->mask != NVIF_NOTIFY_CONN_V0_UNPLUG);
+
+ if (rep->mask & NVIF_NOTIFY_CONN_V0_IRQ) {
+ NV_DEBUG(drm, "service %s\n", name);
+ drm_dp_cec_irq(&nv_connector->aux);
+ if ((nv_encoder = find_encoder(connector, DCB_OUTPUT_DP)))
+ nv50_mstm_service(nv_encoder->dp.mstm);
+
+ return NVIF_NOTIFY_KEEP;
+ }
ret = pm_runtime_get(drm->dev->dev);
if (ret == 0) {
@@ -1152,25 +1174,16 @@ nouveau_connector_hotplug(struct nvif_notify *notify)
return NVIF_NOTIFY_DROP;
}
- if (rep->mask & NVIF_NOTIFY_CONN_V0_IRQ) {
- NV_DEBUG(drm, "service %s\n", name);
- drm_dp_cec_irq(&nv_connector->aux);
- if ((nv_encoder = find_encoder(connector, DCB_OUTPUT_DP)))
- nv50_mstm_service(nv_encoder->dp.mstm);
- } else {
- bool plugged = (rep->mask != NVIF_NOTIFY_CONN_V0_UNPLUG);
-
+ if (!plugged)
+ drm_dp_cec_unset_edid(&nv_connector->aux);
+ NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", name);
+ if ((nv_encoder = find_encoder(connector, DCB_OUTPUT_DP))) {
if (!plugged)
- drm_dp_cec_unset_edid(&nv_connector->aux);
- NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", name);
- if ((nv_encoder = find_encoder(connector, DCB_OUTPUT_DP))) {
- if (!plugged)
- nv50_mstm_remove(nv_encoder->dp.mstm);
- }
-
- drm_helper_hpd_irq_event(connector->dev);
+ nv50_mstm_remove(nv_encoder->dp.mstm);
}
+ drm_helper_hpd_irq_event(connector->dev);
+
pm_runtime_mark_last_busy(drm->dev->dev);
pm_runtime_put_autosuspend(drm->dev->dev);
return NVIF_NOTIFY_KEEP;
@@ -1349,7 +1362,7 @@ nouveau_connector_create(struct drm_device *dev,
break;
case DRM_MODE_CONNECTOR_DisplayPort:
case DRM_MODE_CONNECTOR_eDP:
- nv_connector->aux.dev = dev->dev;
+ nv_connector->aux.dev = connector->kdev;
nv_connector->aux.transfer = nouveau_connector_aux_xfer;
snprintf(aux_name, sizeof(aux_name), "sor-%04x-%04x",
dcbe->hasht, dcbe->hashm);
@@ -1416,8 +1429,7 @@ nouveau_connector_create(struct drm_device *dev,
switch (type) {
case DRM_MODE_CONNECTOR_DisplayPort:
case DRM_MODE_CONNECTOR_eDP:
- drm_dp_cec_register_connector(&nv_connector->aux,
- connector->name, dev->dev);
+ drm_dp_cec_register_connector(&nv_connector->aux, connector);
break;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index f43a8d63aef8..de84fb4708c7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -29,6 +29,7 @@
#include <nvif/notify.h>
+#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_encoder.h>
#include <drm/drm_dp_helper.h>
@@ -44,6 +45,60 @@ struct dcb_output;
struct nouveau_backlight;
#endif
+#define nouveau_conn_atom(p) \
+ container_of((p), struct nouveau_conn_atom, state)
+
+struct nouveau_conn_atom {
+ struct drm_connector_state state;
+
+ struct {
+ /* The enum values specifically defined here match nv50/gf119
+ * hw values, and the code relies on this.
+ */
+ enum {
+ DITHERING_MODE_OFF = 0x00,
+ DITHERING_MODE_ON = 0x01,
+ DITHERING_MODE_DYNAMIC2X2 = 0x10 | DITHERING_MODE_ON,
+ DITHERING_MODE_STATIC2X2 = 0x18 | DITHERING_MODE_ON,
+ DITHERING_MODE_TEMPORAL = 0x20 | DITHERING_MODE_ON,
+ DITHERING_MODE_AUTO
+ } mode;
+ enum {
+ DITHERING_DEPTH_6BPC = 0x00,
+ DITHERING_DEPTH_8BPC = 0x02,
+ DITHERING_DEPTH_AUTO
+ } depth;
+ } dither;
+
+ struct {
+ int mode; /* DRM_MODE_SCALE_* */
+ struct {
+ enum {
+ UNDERSCAN_OFF,
+ UNDERSCAN_ON,
+ UNDERSCAN_AUTO,
+ } mode;
+ u32 hborder;
+ u32 vborder;
+ } underscan;
+ bool full;
+ } scaler;
+
+ struct {
+ int color_vibrance;
+ int vibrant_hue;
+ } procamp;
+
+ union {
+ struct {
+ bool dither:1;
+ bool scaler:1;
+ bool procamp:1;
+ };
+ u8 mask;
+ } set;
+};
+
struct nouveau_connector {
struct drm_connector base;
enum dcb_connector_type type;
@@ -63,6 +118,12 @@ struct nouveau_connector {
#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
struct nouveau_backlight *backlight;
#endif
+ /*
+ * Our connector property code expects a nouveau_conn_atom struct
+ * even on pre-nv50 where we do not support atomic. This embedded
+ * version gets used in the non atomic modeset case.
+ */
+ struct nouveau_conn_atom properties_state;
};
static inline struct nouveau_connector *nouveau_connector(
@@ -121,61 +182,6 @@ extern int nouveau_ignorelid;
extern int nouveau_duallink;
extern int nouveau_hdmimhz;
-#include <drm/drm_crtc.h>
-#define nouveau_conn_atom(p) \
- container_of((p), struct nouveau_conn_atom, state)
-
-struct nouveau_conn_atom {
- struct drm_connector_state state;
-
- struct {
- /* The enum values specifically defined here match nv50/gf119
- * hw values, and the code relies on this.
- */
- enum {
- DITHERING_MODE_OFF = 0x00,
- DITHERING_MODE_ON = 0x01,
- DITHERING_MODE_DYNAMIC2X2 = 0x10 | DITHERING_MODE_ON,
- DITHERING_MODE_STATIC2X2 = 0x18 | DITHERING_MODE_ON,
- DITHERING_MODE_TEMPORAL = 0x20 | DITHERING_MODE_ON,
- DITHERING_MODE_AUTO
- } mode;
- enum {
- DITHERING_DEPTH_6BPC = 0x00,
- DITHERING_DEPTH_8BPC = 0x02,
- DITHERING_DEPTH_AUTO
- } depth;
- } dither;
-
- struct {
- int mode; /* DRM_MODE_SCALE_* */
- struct {
- enum {
- UNDERSCAN_OFF,
- UNDERSCAN_ON,
- UNDERSCAN_AUTO,
- } mode;
- u32 hborder;
- u32 vborder;
- } underscan;
- bool full;
- } scaler;
-
- struct {
- int color_vibrance;
- int vibrant_hue;
- } procamp;
-
- union {
- struct {
- bool dither:1;
- bool scaler:1;
- bool procamp:1;
- };
- u8 mask;
- } set;
-};
-
void nouveau_conn_attach_properties(struct drm_connector *);
void nouveau_conn_reset(struct drm_connector *);
struct drm_connector_state *
diff --git a/drivers/gpu/drm/nouveau/nouveau_crtc.h b/drivers/gpu/drm/nouveau/nouveau_crtc.h
index 366acb928f57..7f63be2ec35d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_crtc.h
+++ b/drivers/gpu/drm/nouveau/nouveau_crtc.h
@@ -27,6 +27,8 @@
#ifndef __NOUVEAU_CRTC_H__
#define __NOUVEAU_CRTC_H__
+#include <drm/drm_crtc.h>
+
#include <nvif/notify.h>
struct nouveau_crtc {
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.h b/drivers/gpu/drm/nouveau/nouveau_debugfs.h
index 9420a6aca138..8909c010e8ea 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.h
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.h
@@ -2,7 +2,7 @@
#ifndef __NOUVEAU_DEBUGFS_H__
#define __NOUVEAU_DEBUGFS_H__
-#include <drm/drmP.h>
+#include <drm/drm_debugfs.h>
#if defined(CONFIG_DEBUG_FS)
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 832da8e0020d..53f9bceaf17a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -25,12 +25,14 @@
*/
#include <acpi/video.h>
-#include <drm/drmP.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "nouveau_fbcon.h"
#include "nouveau_crtc.h"
@@ -201,7 +203,7 @@ nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
if (fb->nvbo)
- drm_gem_object_put_unlocked(&fb->nvbo->gem);
+ drm_gem_object_put_unlocked(&fb->nvbo->bo.base);
drm_framebuffer_cleanup(drm_fb);
kfree(fb);
@@ -214,7 +216,7 @@ nouveau_user_framebuffer_create_handle(struct drm_framebuffer *drm_fb,
{
struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
- return drm_gem_handle_create(file_priv, &fb->nvbo->gem, handle);
+ return drm_gem_handle_create(file_priv, &fb->nvbo->bo.base, handle);
}
static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
@@ -405,6 +407,17 @@ nouveau_display_init(struct drm_device *dev, bool resume, bool runtime)
struct drm_connector_list_iter conn_iter;
int ret;
+ /*
+ * Enable hotplug interrupts (done as early as possible, since we need
+ * them for MST)
+ */
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
+ struct nouveau_connector *conn = nouveau_connector(connector);
+ nvif_notify_get(&conn->hpd);
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
ret = disp->init(dev, resume, runtime);
if (ret)
return ret;
@@ -414,14 +427,6 @@ nouveau_display_init(struct drm_device *dev, bool resume, bool runtime)
*/
drm_kms_helper_poll_enable(dev);
- /* enable hotplug interrupts */
- drm_connector_list_iter_begin(dev, &conn_iter);
- nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
- struct nouveau_connector *conn = nouveau_connector(connector);
- nvif_notify_get(&conn->hpd);
- }
- drm_connector_list_iter_end(&conn_iter);
-
return ret;
}
@@ -660,8 +665,8 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
if (ret)
return ret;
- ret = drm_gem_handle_create(file_priv, &bo->gem, &args->handle);
- drm_gem_object_put_unlocked(&bo->gem);
+ ret = drm_gem_handle_create(file_priv, &bo->bo.base, &args->handle);
+ drm_gem_object_put_unlocked(&bo->bo.base);
return ret;
}
@@ -675,7 +680,7 @@ nouveau_display_dumb_map_offset(struct drm_file *file_priv,
gem = drm_gem_object_lookup(file_priv, handle);
if (gem) {
struct nouveau_bo *bo = nouveau_gem_object(gem);
- *poffset = drm_vma_node_offset_addr(&bo->bo.vma_node);
+ *poffset = drm_vma_node_offset_addr(&bo->bo.base.vma_node);
drm_gem_object_put_unlocked(gem);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index 9185f01e2d9b..6e8e66882e45 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -1,9 +1,13 @@
/* SPDX-License-Identifier: MIT */
#ifndef __NOUVEAU_DISPLAY_H__
#define __NOUVEAU_DISPLAY_H__
+
#include "nouveau_drv.h"
+
#include <nvif/disp.h>
+#include <drm/drm_framebuffer.h>
+
struct nouveau_framebuffer {
struct drm_framebuffer base;
struct nouveau_bo *nvbo;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 078f65d849ce..3c430a550a51 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -118,7 +118,7 @@ nv50_dma_push_wait(struct nouveau_channel *chan, int count)
}
if ((++cnt & 0xff) == 0) {
- DRM_UDELAY(1);
+ udelay(1);
if (cnt > 100000)
return -EBUSY;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index 1333220787a1..0ad5d87b5a8e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -44,8 +44,6 @@
#define DMEM_CHUNK_SIZE (2UL << 20)
#define DMEM_CHUNK_NPAGES (DMEM_CHUNK_SIZE >> PAGE_SHIFT)
-struct nouveau_migrate;
-
enum nouveau_aper {
NOUVEAU_APER_VIRT,
NOUVEAU_APER_VRAM,
@@ -86,21 +84,13 @@ static inline struct nouveau_dmem *page_to_dmem(struct page *page)
return container_of(page->pgmap, struct nouveau_dmem, pagemap);
}
-struct nouveau_dmem_fault {
- struct nouveau_drm *drm;
- struct nouveau_fence *fence;
- dma_addr_t *dma;
- unsigned long npages;
-};
+static unsigned long nouveau_dmem_page_addr(struct page *page)
+{
+ struct nouveau_dmem_chunk *chunk = page->zone_device_data;
+ unsigned long idx = page_to_pfn(page) - chunk->pfn_first;
-struct nouveau_migrate {
- struct vm_area_struct *vma;
- struct nouveau_drm *drm;
- struct nouveau_fence *fence;
- unsigned long npages;
- dma_addr_t *dma;
- unsigned long dma_nr;
-};
+ return (idx << PAGE_SHIFT) + chunk->bo->bo.offset;
+}
static void nouveau_dmem_page_free(struct page *page)
{
@@ -125,165 +115,90 @@ static void nouveau_dmem_page_free(struct page *page)
spin_unlock(&chunk->lock);
}
-static void
-nouveau_dmem_fault_alloc_and_copy(struct vm_area_struct *vma,
- const unsigned long *src_pfns,
- unsigned long *dst_pfns,
- unsigned long start,
- unsigned long end,
- void *private)
+static void nouveau_dmem_fence_done(struct nouveau_fence **fence)
{
- struct nouveau_dmem_fault *fault = private;
- struct nouveau_drm *drm = fault->drm;
- struct device *dev = drm->dev->dev;
- unsigned long addr, i, npages = 0;
- nouveau_migrate_copy_t copy;
- int ret;
-
-
- /* First allocate new memory */
- for (addr = start, i = 0; addr < end; addr += PAGE_SIZE, i++) {
- struct page *dpage, *spage;
-
- dst_pfns[i] = 0;
- spage = migrate_pfn_to_page(src_pfns[i]);
- if (!spage || !(src_pfns[i] & MIGRATE_PFN_MIGRATE))
- continue;
-
- dpage = alloc_page_vma(GFP_HIGHUSER, vma, addr);
- if (!dpage) {
- dst_pfns[i] = MIGRATE_PFN_ERROR;
- continue;
- }
- lock_page(dpage);
-
- dst_pfns[i] = migrate_pfn(page_to_pfn(dpage)) |
- MIGRATE_PFN_LOCKED;
- npages++;
- }
-
- /* Allocate storage for DMA addresses, so we can unmap later. */
- fault->dma = kmalloc(sizeof(*fault->dma) * npages, GFP_KERNEL);
- if (!fault->dma)
- goto error;
-
- /* Copy things over */
- copy = drm->dmem->migrate.copy_func;
- for (addr = start, i = 0; addr < end; addr += PAGE_SIZE, i++) {
- struct nouveau_dmem_chunk *chunk;
- struct page *spage, *dpage;
- u64 src_addr, dst_addr;
-
- dpage = migrate_pfn_to_page(dst_pfns[i]);
- if (!dpage || dst_pfns[i] == MIGRATE_PFN_ERROR)
- continue;
-
- spage = migrate_pfn_to_page(src_pfns[i]);
- if (!spage || !(src_pfns[i] & MIGRATE_PFN_MIGRATE)) {
- dst_pfns[i] = MIGRATE_PFN_ERROR;
- __free_page(dpage);
- continue;
- }
-
- fault->dma[fault->npages] =
- dma_map_page_attrs(dev, dpage, 0, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL,
- DMA_ATTR_SKIP_CPU_SYNC);
- if (dma_mapping_error(dev, fault->dma[fault->npages])) {
- dst_pfns[i] = MIGRATE_PFN_ERROR;
- __free_page(dpage);
- continue;
- }
-
- dst_addr = fault->dma[fault->npages++];
-
- chunk = spage->zone_device_data;
- src_addr = page_to_pfn(spage) - chunk->pfn_first;
- src_addr = (src_addr << PAGE_SHIFT) + chunk->bo->bo.offset;
-
- ret = copy(drm, 1, NOUVEAU_APER_HOST, dst_addr,
- NOUVEAU_APER_VRAM, src_addr);
- if (ret) {
- dst_pfns[i] = MIGRATE_PFN_ERROR;
- __free_page(dpage);
- continue;
- }
+ if (fence) {
+ nouveau_fence_wait(*fence, true, false);
+ nouveau_fence_unref(fence);
+ } else {
+ /*
+ * FIXME wait for channel to be IDLE before calling finalizing
+ * the hmem object.
+ */
}
+}
- nouveau_fence_new(drm->dmem->migrate.chan, false, &fault->fence);
-
- return;
-
-error:
- for (addr = start, i = 0; addr < end; addr += PAGE_SIZE, ++i) {
- struct page *page;
+static vm_fault_t nouveau_dmem_fault_copy_one(struct nouveau_drm *drm,
+ struct vm_fault *vmf, struct migrate_vma *args,
+ dma_addr_t *dma_addr)
+{
+ struct device *dev = drm->dev->dev;
+ struct page *dpage, *spage;
- if (!dst_pfns[i] || dst_pfns[i] == MIGRATE_PFN_ERROR)
- continue;
+ spage = migrate_pfn_to_page(args->src[0]);
+ if (!spage || !(args->src[0] & MIGRATE_PFN_MIGRATE))
+ return 0;
- page = migrate_pfn_to_page(dst_pfns[i]);
- dst_pfns[i] = MIGRATE_PFN_ERROR;
- if (page == NULL)
- continue;
+ dpage = alloc_page_vma(GFP_HIGHUSER, vmf->vma, vmf->address);
+ if (!dpage)
+ return VM_FAULT_SIGBUS;
+ lock_page(dpage);
- __free_page(page);
- }
-}
+ *dma_addr = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, *dma_addr))
+ goto error_free_page;
-void nouveau_dmem_fault_finalize_and_map(struct vm_area_struct *vma,
- const unsigned long *src_pfns,
- const unsigned long *dst_pfns,
- unsigned long start,
- unsigned long end,
- void *private)
-{
- struct nouveau_dmem_fault *fault = private;
- struct nouveau_drm *drm = fault->drm;
+ if (drm->dmem->migrate.copy_func(drm, 1, NOUVEAU_APER_HOST, *dma_addr,
+ NOUVEAU_APER_VRAM, nouveau_dmem_page_addr(spage)))
+ goto error_dma_unmap;
- if (fault->fence) {
- nouveau_fence_wait(fault->fence, true, false);
- nouveau_fence_unref(&fault->fence);
- } else {
- /*
- * FIXME wait for channel to be IDLE before calling finalizing
- * the hmem object below (nouveau_migrate_hmem_fini()).
- */
- }
+ args->dst[0] = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
+ return 0;
- while (fault->npages--) {
- dma_unmap_page(drm->dev->dev, fault->dma[fault->npages],
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- }
- kfree(fault->dma);
+error_dma_unmap:
+ dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+error_free_page:
+ __free_page(dpage);
+ return VM_FAULT_SIGBUS;
}
-static const struct migrate_vma_ops nouveau_dmem_fault_migrate_ops = {
- .alloc_and_copy = nouveau_dmem_fault_alloc_and_copy,
- .finalize_and_map = nouveau_dmem_fault_finalize_and_map,
-};
-
static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
{
struct nouveau_dmem *dmem = page_to_dmem(vmf->page);
- unsigned long src[1] = {0}, dst[1] = {0};
- struct nouveau_dmem_fault fault = { .drm = dmem->drm };
- int ret;
+ struct nouveau_drm *drm = dmem->drm;
+ struct nouveau_fence *fence;
+ unsigned long src = 0, dst = 0;
+ dma_addr_t dma_addr = 0;
+ vm_fault_t ret;
+ struct migrate_vma args = {
+ .vma = vmf->vma,
+ .start = vmf->address,
+ .end = vmf->address + PAGE_SIZE,
+ .src = &src,
+ .dst = &dst,
+ };
/*
* FIXME what we really want is to find some heuristic to migrate more
* than just one page on CPU fault. When such fault happens it is very
* likely that more surrounding page will CPU fault too.
*/
- ret = migrate_vma(&nouveau_dmem_fault_migrate_ops, vmf->vma,
- vmf->address, vmf->address + PAGE_SIZE,
- src, dst, &fault);
- if (ret)
+ if (migrate_vma_setup(&args) < 0)
return VM_FAULT_SIGBUS;
+ if (!args.cpages)
+ return 0;
- if (dst[0] == MIGRATE_PFN_ERROR)
- return VM_FAULT_SIGBUS;
+ ret = nouveau_dmem_fault_copy_one(drm, vmf, &args, &dma_addr);
+ if (ret || dst == 0)
+ goto done;
- return 0;
+ nouveau_fence_new(dmem->migrate.chan, false, &fence);
+ migrate_vma_pages(&args);
+ nouveau_dmem_fence_done(&fence);
+ dma_unmap_page(drm->dev->dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+done:
+ migrate_vma_finalize(&args);
+ return ret;
}
static const struct dev_pagemap_ops nouveau_dmem_pagemap_ops = {
@@ -642,188 +557,115 @@ out_free:
drm->dmem = NULL;
}
-static void
-nouveau_dmem_migrate_alloc_and_copy(struct vm_area_struct *vma,
- const unsigned long *src_pfns,
- unsigned long *dst_pfns,
- unsigned long start,
- unsigned long end,
- void *private)
+static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
+ unsigned long src, dma_addr_t *dma_addr)
{
- struct nouveau_migrate *migrate = private;
- struct nouveau_drm *drm = migrate->drm;
struct device *dev = drm->dev->dev;
- unsigned long addr, i, npages = 0;
- nouveau_migrate_copy_t copy;
- int ret;
-
- /* First allocate new memory */
- for (addr = start, i = 0; addr < end; addr += PAGE_SIZE, i++) {
- struct page *dpage, *spage;
-
- dst_pfns[i] = 0;
- spage = migrate_pfn_to_page(src_pfns[i]);
- if (!spage || !(src_pfns[i] & MIGRATE_PFN_MIGRATE))
- continue;
-
- dpage = nouveau_dmem_page_alloc_locked(drm);
- if (!dpage)
- continue;
-
- dst_pfns[i] = migrate_pfn(page_to_pfn(dpage)) |
- MIGRATE_PFN_LOCKED |
- MIGRATE_PFN_DEVICE;
- npages++;
- }
-
- if (!npages)
- return;
-
- /* Allocate storage for DMA addresses, so we can unmap later. */
- migrate->dma = kmalloc(sizeof(*migrate->dma) * npages, GFP_KERNEL);
- if (!migrate->dma)
- goto error;
-
- /* Copy things over */
- copy = drm->dmem->migrate.copy_func;
- for (addr = start, i = 0; addr < end; addr += PAGE_SIZE, i++) {
- struct nouveau_dmem_chunk *chunk;
- struct page *spage, *dpage;
- u64 src_addr, dst_addr;
-
- dpage = migrate_pfn_to_page(dst_pfns[i]);
- if (!dpage || dst_pfns[i] == MIGRATE_PFN_ERROR)
- continue;
-
- chunk = dpage->zone_device_data;
- dst_addr = page_to_pfn(dpage) - chunk->pfn_first;
- dst_addr = (dst_addr << PAGE_SHIFT) + chunk->bo->bo.offset;
-
- spage = migrate_pfn_to_page(src_pfns[i]);
- if (!spage || !(src_pfns[i] & MIGRATE_PFN_MIGRATE)) {
- nouveau_dmem_page_free_locked(drm, dpage);
- dst_pfns[i] = 0;
- continue;
- }
-
- migrate->dma[migrate->dma_nr] =
- dma_map_page_attrs(dev, spage, 0, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL,
- DMA_ATTR_SKIP_CPU_SYNC);
- if (dma_mapping_error(dev, migrate->dma[migrate->dma_nr])) {
- nouveau_dmem_page_free_locked(drm, dpage);
- dst_pfns[i] = 0;
- continue;
- }
-
- src_addr = migrate->dma[migrate->dma_nr++];
+ struct page *dpage, *spage;
- ret = copy(drm, 1, NOUVEAU_APER_VRAM, dst_addr,
- NOUVEAU_APER_HOST, src_addr);
- if (ret) {
- nouveau_dmem_page_free_locked(drm, dpage);
- dst_pfns[i] = 0;
- continue;
- }
- }
-
- nouveau_fence_new(drm->dmem->migrate.chan, false, &migrate->fence);
+ spage = migrate_pfn_to_page(src);
+ if (!spage || !(src & MIGRATE_PFN_MIGRATE))
+ goto out;
- return;
+ dpage = nouveau_dmem_page_alloc_locked(drm);
+ if (!dpage)
+ return 0;
-error:
- for (addr = start, i = 0; addr < end; addr += PAGE_SIZE, ++i) {
- struct page *page;
+ *dma_addr = dma_map_page(dev, spage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, *dma_addr))
+ goto out_free_page;
- if (!dst_pfns[i] || dst_pfns[i] == MIGRATE_PFN_ERROR)
- continue;
+ if (drm->dmem->migrate.copy_func(drm, 1, NOUVEAU_APER_VRAM,
+ nouveau_dmem_page_addr(dpage), NOUVEAU_APER_HOST,
+ *dma_addr))
+ goto out_dma_unmap;
- page = migrate_pfn_to_page(dst_pfns[i]);
- dst_pfns[i] = MIGRATE_PFN_ERROR;
- if (page == NULL)
- continue;
+ return migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
- __free_page(page);
- }
+out_dma_unmap:
+ dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+out_free_page:
+ nouveau_dmem_page_free_locked(drm, dpage);
+out:
+ return 0;
}
-void nouveau_dmem_migrate_finalize_and_map(struct vm_area_struct *vma,
- const unsigned long *src_pfns,
- const unsigned long *dst_pfns,
- unsigned long start,
- unsigned long end,
- void *private)
+static void nouveau_dmem_migrate_chunk(struct nouveau_drm *drm,
+ struct migrate_vma *args, dma_addr_t *dma_addrs)
{
- struct nouveau_migrate *migrate = private;
- struct nouveau_drm *drm = migrate->drm;
-
- if (migrate->fence) {
- nouveau_fence_wait(migrate->fence, true, false);
- nouveau_fence_unref(&migrate->fence);
- } else {
- /*
- * FIXME wait for channel to be IDLE before finalizing
- * the hmem object below (nouveau_migrate_hmem_fini()) ?
- */
+ struct nouveau_fence *fence;
+ unsigned long addr = args->start, nr_dma = 0, i;
+
+ for (i = 0; addr < args->end; i++) {
+ args->dst[i] = nouveau_dmem_migrate_copy_one(drm, args->src[i],
+ dma_addrs + nr_dma);
+ if (args->dst[i])
+ nr_dma++;
+ addr += PAGE_SIZE;
}
- while (migrate->dma_nr--) {
- dma_unmap_page(drm->dev->dev, migrate->dma[migrate->dma_nr],
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- }
- kfree(migrate->dma);
+ nouveau_fence_new(drm->dmem->migrate.chan, false, &fence);
+ migrate_vma_pages(args);
+ nouveau_dmem_fence_done(&fence);
+ while (nr_dma--) {
+ dma_unmap_page(drm->dev->dev, dma_addrs[nr_dma], PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ }
/*
- * FIXME optimization: update GPU page table to point to newly
- * migrated memory.
+ * FIXME optimization: update GPU page table to point to newly migrated
+ * memory.
*/
+ migrate_vma_finalize(args);
}
-static const struct migrate_vma_ops nouveau_dmem_migrate_ops = {
- .alloc_and_copy = nouveau_dmem_migrate_alloc_and_copy,
- .finalize_and_map = nouveau_dmem_migrate_finalize_and_map,
-};
-
int
nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
struct vm_area_struct *vma,
unsigned long start,
unsigned long end)
{
- unsigned long *src_pfns, *dst_pfns, npages;
- struct nouveau_migrate migrate = {0};
- unsigned long i, c, max;
- int ret = 0;
-
- npages = (end - start) >> PAGE_SHIFT;
- max = min(SG_MAX_SINGLE_ALLOC, npages);
- src_pfns = kzalloc(sizeof(long) * max, GFP_KERNEL);
- if (src_pfns == NULL)
- return -ENOMEM;
- dst_pfns = kzalloc(sizeof(long) * max, GFP_KERNEL);
- if (dst_pfns == NULL) {
- kfree(src_pfns);
- return -ENOMEM;
- }
+ unsigned long npages = (end - start) >> PAGE_SHIFT;
+ unsigned long max = min(SG_MAX_SINGLE_ALLOC, npages);
+ dma_addr_t *dma_addrs;
+ struct migrate_vma args = {
+ .vma = vma,
+ .start = start,
+ };
+ unsigned long c, i;
+ int ret = -ENOMEM;
+
+ args.src = kcalloc(max, sizeof(*args.src), GFP_KERNEL);
+ if (!args.src)
+ goto out;
+ args.dst = kcalloc(max, sizeof(*args.dst), GFP_KERNEL);
+ if (!args.dst)
+ goto out_free_src;
- migrate.drm = drm;
- migrate.vma = vma;
- migrate.npages = npages;
- for (i = 0; i < npages; i += c) {
- unsigned long next;
+ dma_addrs = kmalloc_array(max, sizeof(*dma_addrs), GFP_KERNEL);
+ if (!dma_addrs)
+ goto out_free_dst;
+ for (i = 0; i < npages; i += c) {
c = min(SG_MAX_SINGLE_ALLOC, npages);
- next = start + (c << PAGE_SHIFT);
- ret = migrate_vma(&nouveau_dmem_migrate_ops, vma, start,
- next, src_pfns, dst_pfns, &migrate);
+ args.end = start + (c << PAGE_SHIFT);
+ ret = migrate_vma_setup(&args);
if (ret)
- goto out;
- start = next;
+ goto out_free_dma;
+
+ if (args.cpages)
+ nouveau_dmem_migrate_chunk(drm, &args, dma_addrs);
+ args.start = args.end;
}
+ ret = 0;
+out_free_dma:
+ kfree(dma_addrs);
+out_free_dst:
+ kfree(args.dst);
+out_free_src:
+ kfree(args.src);
out:
- kfree(dst_pfns);
- kfree(src_pfns);
return ret;
}
@@ -841,11 +683,10 @@ nouveau_dmem_convert_pfn(struct nouveau_drm *drm,
npages = (range->end - range->start) >> PAGE_SHIFT;
for (i = 0; i < npages; ++i) {
- struct nouveau_dmem_chunk *chunk;
struct page *page;
uint64_t addr;
- page = hmm_pfn_to_page(range, range->pfns[i]);
+ page = hmm_device_entry_to_page(range, range->pfns[i]);
if (page == NULL)
continue;
@@ -859,10 +700,7 @@ nouveau_dmem_convert_pfn(struct nouveau_drm *drm,
continue;
}
- chunk = page->zone_device_data;
- addr = page_to_pfn(page) - chunk->pfn_first;
- addr = (addr + chunk->bo->bo.mem.start) << PAGE_SHIFT;
-
+ addr = nouveau_dmem_page_addr(page);
range->pfns[i] &= ((1UL << range->pfn_shift) - 1);
range->pfns[i] |= (addr >> PAGE_SHIFT) << range->pfn_shift;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.h b/drivers/gpu/drm/nouveau/nouveau_dmem.h
index 9d97d756fb7d..92394be5d649 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.h
@@ -45,16 +45,5 @@ static inline void nouveau_dmem_init(struct nouveau_drm *drm) {}
static inline void nouveau_dmem_fini(struct nouveau_drm *drm) {}
static inline void nouveau_dmem_suspend(struct nouveau_drm *drm) {}
static inline void nouveau_dmem_resume(struct nouveau_drm *drm) {}
-
-static inline int nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
- struct vm_area_struct *vma,
- unsigned long start,
- unsigned long end)
-{
- return 0;
-}
-
-static inline void nouveau_dmem_convert_pfn(struct nouveau_drm *drm,
- struct hmm_range *range) {}
#endif /* IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM) */
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 0d052e1660f8..2674f1587457 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -22,7 +22,6 @@
* Authors: Ben Skeggs
*/
-#include <drm/drmP.h>
#include <drm/drm_dp_helper.h>
#include "nouveau_drv.h"
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 7c2fcaba42d6..b65ae817eabf 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -28,9 +28,11 @@
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <linux/vga_switcheroo.h>
+#include <linux/mmu_notifier.h>
-#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_vblank.h>
#include <core/gpuobj.h>
#include <core/option.h>
@@ -713,7 +715,6 @@ fail_nvkm:
void
nouveau_drm_device_remove(struct drm_device *dev)
{
- struct pci_dev *pdev = dev->pdev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvkm_client *client;
struct nvkm_device *device;
@@ -725,7 +726,6 @@ nouveau_drm_device_remove(struct drm_device *dev)
device = nvkm_device_find(client->device);
nouveau_drm_device_fini(dev);
- pci_disable_device(pdev);
drm_dev_put(dev);
nvkm_device_del(&device);
}
@@ -736,6 +736,7 @@ nouveau_drm_remove(struct pci_dev *pdev)
struct drm_device *dev = pci_get_drvdata(pdev);
nouveau_drm_device_remove(dev);
+ pci_disable_device(pdev);
}
static int
@@ -1046,20 +1047,20 @@ nouveau_drm_postclose(struct drm_device *dev, struct drm_file *fpriv)
static const struct drm_ioctl_desc
nouveau_ioctls[] = {
- DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_abi16_ioctl_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_abi16_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_abi16_ioctl_channel_alloc, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_abi16_ioctl_channel_free, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_abi16_ioctl_grobj_alloc, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_abi16_ioctl_notifierobj_alloc, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_abi16_ioctl_gpuobj_free, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(NOUVEAU_SVM_INIT, nouveau_svmm_init, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(NOUVEAU_SVM_BIND, nouveau_svmm_bind, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_abi16_ioctl_getparam, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_abi16_ioctl_channel_alloc, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_abi16_ioctl_channel_free, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_abi16_ioctl_grobj_alloc, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_abi16_ioctl_notifierobj_alloc, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_abi16_ioctl_gpuobj_free, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_SVM_INIT, nouveau_svmm_init, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_SVM_BIND, nouveau_svmm_bind, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_RENDER_ALLOW),
};
long
@@ -1105,7 +1106,7 @@ nouveau_driver_fops = {
static struct drm_driver
driver_stub = {
.driver_features =
- DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER
+ DRIVER_GEM | DRIVER_MODESET | DRIVER_RENDER
#if defined(CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT)
| DRIVER_KMS_LEGACY_CONTEXT
#endif
@@ -1130,10 +1131,7 @@ driver_stub = {
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = drm_gem_prime_export,
- .gem_prime_import = drm_gem_prime_import,
.gem_prime_pin = nouveau_gem_prime_pin,
- .gem_prime_res_obj = nouveau_gem_prime_res_obj,
.gem_prime_unpin = nouveau_gem_prime_unpin,
.gem_prime_get_sg_table = nouveau_gem_prime_get_sg_table,
.gem_prime_import_sg_table = nouveau_gem_prime_import_sg_table,
@@ -1292,6 +1290,8 @@ nouveau_drm_exit(void)
#ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER
platform_driver_unregister(&nouveau_platform_driver);
#endif
+ if (IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM))
+ mmu_notifier_synchronize();
}
module_init(nouveau_drm_init);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index aae035816383..c2c332fbde97 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -46,7 +46,10 @@
#include <nvif/mmu.h>
#include <nvif/vmm.h>
-#include <drm/drmP.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
@@ -55,6 +58,8 @@
#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_page_alloc.h>
+#include <drm/drm_audio_component.h>
+
#include "uapi/drm/nouveau_drm.h"
struct nouveau_channel;
@@ -127,7 +132,6 @@ nouveau_cli(struct drm_file *fpriv)
}
#include <nvif/object.h>
-#include <nvif/device.h>
struct nouveau_drm {
struct nouveau_cli master;
@@ -204,14 +208,16 @@ struct nouveau_drm {
/* led management */
struct nouveau_led *led;
- /* display power reference */
- bool have_disp_power_ref;
-
struct dev_pm_domain vga_pm_domain;
struct nouveau_svm *svm;
struct nouveau_dmem *dmem;
+
+ struct {
+ struct drm_audio_component *component;
+ bool component_registered;
+ } audio;
};
static inline struct nouveau_drm *
@@ -249,11 +255,11 @@ void nouveau_drm_device_remove(struct drm_device *dev);
#define NV_INFO(drm,f,a...) NV_PRINTK(info, &(drm)->client, f, ##a)
#define NV_DEBUG(drm,f,a...) do { \
- if (unlikely(drm_debug & DRM_UT_DRIVER)) \
+ if (drm_debug_enabled(DRM_UT_DRIVER)) \
NV_PRINTK(info, &(drm)->client, f, ##a); \
} while(0)
#define NV_ATOMIC(drm,f,a...) do { \
- if (unlikely(drm_debug & DRM_UT_ATOMIC)) \
+ if (drm_debug_enabled(DRM_UT_ATOMIC)) \
NV_PRINTK(info, &(drm)->client, f, ##a); \
} while(0)
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 73cc3217068a..0c5cdda3c336 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -37,10 +37,10 @@
#include <linux/vga_switcheroo.h>
#include <linux/console.h>
-#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_atomic.h>
#include "nouveau_drv.h"
@@ -203,7 +203,7 @@ nouveau_fbcon_release(struct fb_info *info, int user)
return 0;
}
-static struct fb_ops nouveau_fbcon_ops = {
+static const struct fb_ops nouveau_fbcon_ops = {
.owner = THIS_MODULE,
DRM_FB_HELPER_DEFAULT_OPS,
.fb_open = nouveau_fbcon_open,
@@ -214,7 +214,7 @@ static struct fb_ops nouveau_fbcon_ops = {
.fb_sync = nouveau_fbcon_sync,
};
-static struct fb_ops nouveau_fbcon_sw_ops = {
+static const struct fb_ops nouveau_fbcon_sw_ops = {
.owner = THIS_MODULE,
DRM_FB_HELPER_DEFAULT_OPS,
.fb_open = nouveau_fbcon_open,
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index d4964f3397a1..666f2090d92b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -24,10 +24,9 @@
*
*/
-#include <drm/drmP.h>
-
#include <linux/ktime.h>
#include <linux/hrtimer.h>
+#include <linux/sched/signal.h>
#include <trace/events/dma_fence.h>
#include <nvif/cl826e.h>
@@ -88,7 +87,7 @@ nouveau_local_fence(struct dma_fence *fence, struct nouveau_drm *drm)
}
void
-nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
+nouveau_fence_context_kill(struct nouveau_fence_chan *fctx, int error)
{
struct nouveau_fence *fence;
@@ -96,11 +95,19 @@ nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
while (!list_empty(&fctx->pending)) {
fence = list_entry(fctx->pending.next, typeof(*fence), head);
+ if (error)
+ dma_fence_set_error(&fence->base, error);
+
if (nouveau_fence_signal(fence))
nvif_notify_put(&fctx->notify);
}
spin_unlock_irq(&fctx->lock);
+}
+void
+nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
+{
+ nouveau_fence_context_kill(fctx, 0);
nvif_notify_fini(&fctx->notify);
fctx->dead = 1;
@@ -157,7 +164,7 @@ nouveau_fence_wait_uevent_handler(struct nvif_notify *notify)
fence = list_entry(fctx->pending.next, typeof(*fence), head);
chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
- if (nouveau_fence_update(fence->channel, fctx))
+ if (nouveau_fence_update(chan, fctx))
ret = NVIF_NOTIFY_DROP;
}
spin_unlock_irqrestore(&fctx->lock, flags);
@@ -335,20 +342,20 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
{
struct nouveau_fence_chan *fctx = chan->fence;
struct dma_fence *fence;
- struct reservation_object *resv = nvbo->bo.resv;
- struct reservation_object_list *fobj;
+ struct dma_resv *resv = nvbo->bo.base.resv;
+ struct dma_resv_list *fobj;
struct nouveau_fence *f;
int ret = 0, i;
if (!exclusive) {
- ret = reservation_object_reserve_shared(resv, 1);
+ ret = dma_resv_reserve_shared(resv, 1);
if (ret)
return ret;
}
- fobj = reservation_object_get_list(resv);
- fence = reservation_object_get_excl(resv);
+ fobj = dma_resv_get_list(resv);
+ fence = dma_resv_get_excl(resv);
if (fence && (!exclusive || !fobj || !fobj->shared_count)) {
struct nouveau_channel *prev = NULL;
@@ -377,7 +384,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
bool must_wait = true;
fence = rcu_dereference_protected(fobj->shared[i],
- reservation_object_held(resv));
+ dma_resv_held(resv));
f = nouveau_local_fence(fence, chan->drm);
if (f) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index c9e24baaaa4f..4887caa69c65 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -63,6 +63,7 @@ struct nouveau_fence_priv {
void nouveau_fence_context_new(struct nouveau_channel *, struct nouveau_fence_chan *);
void nouveau_fence_context_del(struct nouveau_fence_chan *);
void nouveau_fence_context_free(struct nouveau_fence_chan *);
+void nouveau_fence_context_kill(struct nouveau_fence_chan *, int error);
int nv04_fence_create(struct nouveau_drm *);
int nv04_fence_mthd(struct nouveau_channel *, u32, u32, u32);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index b4bda716564d..f5ece1f94973 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -51,10 +51,6 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
if (gem->import_attach)
drm_prime_gem_destroy(gem, nvbo->bo.sg);
- drm_gem_object_release(gem);
-
- /* reset filp so nouveau_bo_del_ttm() can test for it */
- gem->filp = NULL;
ttm_bo_put(&nvbo->bo);
pm_runtime_mark_last_busy(dev);
@@ -188,11 +184,24 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
if (domain & NOUVEAU_GEM_DOMAIN_COHERENT)
flags |= TTM_PL_FLAG_UNCACHED;
- ret = nouveau_bo_new(cli, size, align, flags, tile_mode,
- tile_flags, NULL, NULL, pnvbo);
- if (ret)
+ nvbo = nouveau_bo_alloc(cli, &size, &align, flags, tile_mode,
+ tile_flags);
+ if (IS_ERR(nvbo))
+ return PTR_ERR(nvbo);
+
+ /* Initialize the embedded gem-object. We return a single gem-reference
+ * to the caller, instead of a normal nouveau_bo ttm reference. */
+ ret = drm_gem_object_init(drm->dev, &nvbo->bo.base, size);
+ if (ret) {
+ nouveau_bo_ref(NULL, &nvbo);
return ret;
- nvbo = *pnvbo;
+ }
+
+ ret = nouveau_bo_init(nvbo, size, align, flags, NULL, NULL);
+ if (ret) {
+ nouveau_bo_ref(NULL, &nvbo);
+ return ret;
+ }
/* we restrict allowed domains on nv50+ to only the types
* that were requested at creation time. not possibly on
@@ -203,15 +212,8 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
nvbo->valid_domains &= domain;
- /* Initialize the embedded gem-object. We return a single gem-reference
- * to the caller, instead of a normal nouveau_bo ttm reference. */
- ret = drm_gem_object_init(drm->dev, &nvbo->gem, nvbo->bo.mem.size);
- if (ret) {
- nouveau_bo_ref(NULL, pnvbo);
- return -ENOMEM;
- }
-
- nvbo->bo.persistent_swap_storage = nvbo->gem.filp;
+ nvbo->bo.persistent_swap_storage = nvbo->bo.base.filp;
+ *pnvbo = nvbo;
return 0;
}
@@ -240,7 +242,7 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
}
rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
- rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.vma_node);
+ rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.base.vma_node);
rep->tile_mode = nvbo->mode;
rep->tile_flags = nvbo->contig ? 0 : NOUVEAU_GEM_TILE_NONCONTIG;
if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI)
@@ -268,15 +270,16 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
if (ret)
return ret;
- ret = drm_gem_handle_create(file_priv, &nvbo->gem, &req->info.handle);
+ ret = drm_gem_handle_create(file_priv, &nvbo->bo.base,
+ &req->info.handle);
if (ret == 0) {
- ret = nouveau_gem_info(file_priv, &nvbo->gem, &req->info);
+ ret = nouveau_gem_info(file_priv, &nvbo->bo.base, &req->info);
if (ret)
drm_gem_handle_delete(file_priv, req->info.handle);
}
/* drop reference from allocate - handle holds it now */
- drm_gem_object_put_unlocked(&nvbo->gem);
+ drm_gem_object_put_unlocked(&nvbo->bo.base);
return ret;
}
@@ -355,7 +358,7 @@ validate_fini_no_ticket(struct validate_op *op, struct nouveau_channel *chan,
list_del(&nvbo->entry);
nvbo->reserved_by = NULL;
ttm_bo_unreserve(&nvbo->bo);
- drm_gem_object_put_unlocked(&nvbo->gem);
+ drm_gem_object_put_unlocked(&nvbo->bo.base);
}
}
@@ -481,19 +484,16 @@ retry:
static int
validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
- struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo,
- uint64_t user_pbbo_ptr)
+ struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo)
{
struct nouveau_drm *drm = chan->drm;
- struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
- (void __force __user *)(uintptr_t)user_pbbo_ptr;
struct nouveau_bo *nvbo;
int ret, relocs = 0;
list_for_each_entry(nvbo, list, entry) {
struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
- ret = nouveau_gem_set_domain(&nvbo->gem, b->read_domains,
+ ret = nouveau_gem_set_domain(&nvbo->bo.base, b->read_domains,
b->write_domains,
b->valid_domains);
if (unlikely(ret)) {
@@ -530,10 +530,6 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
b->presumed.offset = nvbo->bo.offset;
b->presumed.valid = 0;
relocs++;
-
- if (copy_to_user(&upbbo[nvbo->pbbo_index].presumed,
- &b->presumed, sizeof(b->presumed)))
- return -EFAULT;
}
}
@@ -544,8 +540,8 @@ static int
nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
struct drm_file *file_priv,
struct drm_nouveau_gem_pushbuf_bo *pbbo,
- uint64_t user_buffers, int nr_buffers,
- struct validate_op *op, int *apply_relocs)
+ int nr_buffers,
+ struct validate_op *op, bool *apply_relocs)
{
struct nouveau_cli *cli = nouveau_cli(file_priv);
int ret;
@@ -562,7 +558,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
return ret;
}
- ret = validate_list(chan, cli, &op->list, pbbo, user_buffers);
+ ret = validate_list(chan, cli, &op->list, pbbo);
if (unlikely(ret < 0)) {
if (ret != -ERESTARTSYS)
NV_PRINTK(err, cli, "validating bo list\n");
@@ -602,16 +598,12 @@ u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
static int
nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
struct drm_nouveau_gem_pushbuf *req,
+ struct drm_nouveau_gem_pushbuf_reloc *reloc,
struct drm_nouveau_gem_pushbuf_bo *bo)
{
- struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
int ret = 0;
unsigned i;
- reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
- if (IS_ERR(reloc))
- return PTR_ERR(reloc);
-
for (i = 0; i < req->nr_relocs; i++) {
struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
struct drm_nouveau_gem_pushbuf_bo *b;
@@ -690,11 +682,13 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
struct nouveau_drm *drm = nouveau_drm(dev);
struct drm_nouveau_gem_pushbuf *req = data;
struct drm_nouveau_gem_pushbuf_push *push;
+ struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
struct drm_nouveau_gem_pushbuf_bo *bo;
struct nouveau_channel *chan = NULL;
struct validate_op op;
struct nouveau_fence *fence = NULL;
- int i, j, ret = 0, do_reloc = 0;
+ int i, j, ret = 0;
+ bool do_reloc = false, sync = false;
if (unlikely(!abi16))
return -ENOMEM;
@@ -708,6 +702,10 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
if (!chan)
return nouveau_abi16_put(abi16, -ENOENT);
+ if (unlikely(atomic_read(&chan->killed)))
+ return nouveau_abi16_put(abi16, -ENODEV);
+
+ sync = req->vram_available & NOUVEAU_GEM_PUSHBUF_SYNC;
req->vram_available = drm->gem.vram_available;
req->gart_available = drm->gem.gart_available;
@@ -752,7 +750,8 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
}
/* Validate buffer list */
- ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
+revalidate:
+ ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo,
req->nr_buffers, &op, &do_reloc);
if (ret) {
if (ret != -ERESTARTSYS)
@@ -762,7 +761,18 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
/* Apply any relocations that are required */
if (do_reloc) {
- ret = nouveau_gem_pushbuf_reloc_apply(cli, req, bo);
+ if (!reloc) {
+ validate_fini(&op, chan, NULL, bo);
+ reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
+ if (IS_ERR(reloc)) {
+ ret = PTR_ERR(reloc);
+ goto out_prevalid;
+ }
+
+ goto revalidate;
+ }
+
+ ret = nouveau_gem_pushbuf_reloc_apply(cli, req, reloc, bo);
if (ret) {
NV_PRINTK(err, cli, "reloc apply: %d\n", ret);
goto out;
@@ -844,10 +854,33 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
goto out;
}
+ if (sync) {
+ if (!(ret = nouveau_fence_wait(fence, false, false))) {
+ if ((ret = dma_fence_get_status(&fence->base)) == 1)
+ ret = 0;
+ }
+ }
+
out:
validate_fini(&op, chan, fence, bo);
nouveau_fence_unref(&fence);
+ if (do_reloc) {
+ struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
+ u64_to_user_ptr(req->buffers);
+
+ for (i = 0; i < req->nr_buffers; i++) {
+ if (bo[i].presumed.valid)
+ continue;
+
+ if (copy_to_user(&upbbo[i].presumed, &bo[i].presumed,
+ sizeof(bo[i].presumed))) {
+ ret = -EFAULT;
+ break;
+ }
+ }
+ u_free(reloc);
+ }
out_prevalid:
u_free(bo);
u_free(push);
@@ -886,7 +919,7 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
return -ENOENT;
nvbo = nouveau_gem_object(gem);
- lret = reservation_object_wait_timeout_rcu(nvbo->bo.resv, write, true,
+ lret = dma_resv_wait_timeout_rcu(nvbo->bo.base.resv, write, true,
no_wait ? 0 : 30 * HZ);
if (!lret)
ret = -EBUSY;
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.h b/drivers/gpu/drm/nouveau/nouveau_gem.h
index 03371204a47c..978e07591990 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.h
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.h
@@ -2,15 +2,13 @@
#ifndef __NOUVEAU_GEM_H__
#define __NOUVEAU_GEM_H__
-#include <drm/drmP.h>
-
#include "nouveau_drv.h"
#include "nouveau_bo.h"
static inline struct nouveau_bo *
nouveau_gem_object(struct drm_gem_object *gem)
{
- return gem ? container_of(gem, struct nouveau_bo, gem) : NULL;
+ return gem ? container_of(gem, struct nouveau_bo, bo.base) : NULL;
}
/* nouveau_gem.c */
@@ -33,7 +31,6 @@ extern int nouveau_gem_ioctl_info(struct drm_device *, void *,
struct drm_file *);
extern int nouveau_gem_prime_pin(struct drm_gem_object *);
-struct reservation_object *nouveau_gem_prime_res_obj(struct drm_gem_object *);
extern void nouveau_gem_prime_unpin(struct drm_gem_object *);
extern struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *);
extern struct drm_gem_object *nouveau_gem_prime_import_sg_table(
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.c b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
index 6af2d299c3f9..1c3104d20571 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hwmon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
@@ -29,8 +29,6 @@
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
-#include <drm/drmP.h>
-
#include "nouveau_drv.h"
#include "nouveau_hwmon.h"
@@ -743,7 +741,7 @@ nouveau_hwmon_init(struct drm_device *dev)
special_groups[i++] = &pwm_fan_sensor_group;
}
- special_groups[i] = 0;
+ special_groups[i] = NULL;
hwmon_dev = hwmon_device_register_with_info(dev->dev, "nouveau", dev,
&nouveau_chip_info,
special_groups);
diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
index 462679a8fec5..adf01ca9e035 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
@@ -33,7 +33,8 @@
#include <linux/compat.h>
-#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/drm_ioctl.h>
#include "nouveau_ioctl.h"
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index 1fefc93af1d7..bae6a3eccee0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -22,7 +22,6 @@
* Authors: Dave Airlie
*/
-#include <drm/drmP.h>
#include <linux/dma-buf.h>
#include "nouveau_drv.h"
@@ -61,31 +60,46 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
struct sg_table *sg)
{
struct nouveau_drm *drm = nouveau_drm(dev);
+ struct drm_gem_object *obj;
struct nouveau_bo *nvbo;
- struct reservation_object *robj = attach->dmabuf->resv;
+ struct dma_resv *robj = attach->dmabuf->resv;
+ u64 size = attach->dmabuf->size;
u32 flags = 0;
+ int align = 0;
int ret;
flags = TTM_PL_FLAG_TT;
- ww_mutex_lock(&robj->lock, NULL);
- ret = nouveau_bo_new(&drm->client, attach->dmabuf->size, 0, flags, 0, 0,
- sg, robj, &nvbo);
- ww_mutex_unlock(&robj->lock);
- if (ret)
- return ERR_PTR(ret);
+ dma_resv_lock(robj, NULL);
+ nvbo = nouveau_bo_alloc(&drm->client, &size, &align, flags, 0, 0);
+ if (IS_ERR(nvbo)) {
+ obj = ERR_CAST(nvbo);
+ goto unlock;
+ }
nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART;
/* Initialize the embedded gem-object. We return a single gem-reference
* to the caller, instead of a normal nouveau_bo ttm reference. */
- ret = drm_gem_object_init(dev, &nvbo->gem, nvbo->bo.mem.size);
+ ret = drm_gem_object_init(dev, &nvbo->bo.base, size);
+ if (ret) {
+ nouveau_bo_ref(NULL, &nvbo);
+ obj = ERR_PTR(-ENOMEM);
+ goto unlock;
+ }
+
+ ret = nouveau_bo_init(nvbo, size, align, flags, sg, robj);
if (ret) {
nouveau_bo_ref(NULL, &nvbo);
- return ERR_PTR(-ENOMEM);
+ obj = ERR_PTR(ret);
+ goto unlock;
}
- return &nvbo->gem;
+ obj = &nvbo->bo.base;
+
+unlock:
+ dma_resv_unlock(robj);
+ return obj;
}
int nouveau_gem_prime_pin(struct drm_gem_object *obj)
@@ -107,10 +121,3 @@ void nouveau_gem_prime_unpin(struct drm_gem_object *obj)
nouveau_bo_unpin(nvbo);
}
-
-struct reservation_object *nouveau_gem_prime_res_obj(struct drm_gem_object *obj)
-{
- struct nouveau_bo *nvbo = nouveau_gem_object(obj);
-
- return nvbo->bo.resv;
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index a835cebb6d90..df9bf1fd1bc0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -88,6 +88,7 @@ nouveau_ivmm_find(struct nouveau_svm *svm, u64 inst)
}
struct nouveau_svmm {
+ struct mmu_notifier notifier;
struct nouveau_vmm *vmm;
struct {
unsigned long start;
@@ -95,9 +96,6 @@ struct nouveau_svmm {
} unmanaged;
struct mutex mutex;
-
- struct mm_struct *mm;
- struct hmm_mirror mirror;
};
#define SVMM_DBG(s,f,a...) \
@@ -251,19 +249,23 @@ nouveau_svmm_invalidate(struct nouveau_svmm *svmm, u64 start, u64 limit)
}
static int
-nouveau_svmm_sync_cpu_device_pagetables(struct hmm_mirror *mirror,
- const struct hmm_update *update)
+nouveau_svmm_invalidate_range_start(struct mmu_notifier *mn,
+ const struct mmu_notifier_range *update)
{
- struct nouveau_svmm *svmm = container_of(mirror, typeof(*svmm), mirror);
+ struct nouveau_svmm *svmm =
+ container_of(mn, struct nouveau_svmm, notifier);
unsigned long start = update->start;
unsigned long limit = update->end;
- if (!update->blockable)
+ if (!mmu_notifier_range_blockable(update))
return -EAGAIN;
SVMM_DBG(svmm, "invalidate %016lx-%016lx", start, limit);
mutex_lock(&svmm->mutex);
+ if (unlikely(!svmm->vmm))
+ goto out;
+
if (limit > svmm->unmanaged.start && start < svmm->unmanaged.limit) {
if (start < svmm->unmanaged.start) {
nouveau_svmm_invalidate(svmm, start,
@@ -273,19 +275,20 @@ nouveau_svmm_sync_cpu_device_pagetables(struct hmm_mirror *mirror,
}
nouveau_svmm_invalidate(svmm, start, limit);
+
+out:
mutex_unlock(&svmm->mutex);
return 0;
}
-static void
-nouveau_svmm_release(struct hmm_mirror *mirror)
+static void nouveau_svmm_free_notifier(struct mmu_notifier *mn)
{
+ kfree(container_of(mn, struct nouveau_svmm, notifier));
}
-static const struct hmm_mirror_ops
-nouveau_svmm = {
- .sync_cpu_device_pagetables = nouveau_svmm_sync_cpu_device_pagetables,
- .release = nouveau_svmm_release,
+static const struct mmu_notifier_ops nouveau_mn_ops = {
+ .invalidate_range_start = nouveau_svmm_invalidate_range_start,
+ .free_notifier = nouveau_svmm_free_notifier,
};
void
@@ -293,8 +296,10 @@ nouveau_svmm_fini(struct nouveau_svmm **psvmm)
{
struct nouveau_svmm *svmm = *psvmm;
if (svmm) {
- hmm_mirror_unregister(&svmm->mirror);
- kfree(*psvmm);
+ mutex_lock(&svmm->mutex);
+ svmm->vmm = NULL;
+ mutex_unlock(&svmm->mutex);
+ mmu_notifier_put(&svmm->notifier);
*psvmm = NULL;
}
}
@@ -320,7 +325,7 @@ nouveau_svmm_init(struct drm_device *dev, void *data,
mutex_lock(&cli->mutex);
if (cli->svm.cli) {
ret = -EBUSY;
- goto done;
+ goto out_free;
}
/* Allocate a new GPU VMM that can support SVM (managed by the
@@ -335,24 +340,26 @@ nouveau_svmm_init(struct drm_device *dev, void *data,
.fault_replay = true,
}, sizeof(struct gp100_vmm_v0), &cli->svm.vmm);
if (ret)
- goto done;
-
- /* Enable HMM mirroring of CPU address-space to VMM. */
- svmm->mm = get_task_mm(current);
- down_write(&svmm->mm->mmap_sem);
- svmm->mirror.ops = &nouveau_svmm;
- ret = hmm_mirror_register(&svmm->mirror, svmm->mm);
- if (ret == 0) {
- cli->svm.svmm = svmm;
- cli->svm.cli = cli;
- }
- up_write(&svmm->mm->mmap_sem);
- mmput(svmm->mm);
+ goto out_free;
-done:
+ down_write(&current->mm->mmap_sem);
+ svmm->notifier.ops = &nouveau_mn_ops;
+ ret = __mmu_notifier_register(&svmm->notifier, current->mm);
if (ret)
- nouveau_svmm_fini(&svmm);
+ goto out_mm_unlock;
+ /* Note, ownership of svmm transfers to mmu_notifier */
+
+ cli->svm.svmm = svmm;
+ cli->svm.cli = cli;
+ up_write(&current->mm->mmap_sem);
mutex_unlock(&cli->mutex);
+ return 0;
+
+out_mm_unlock:
+ up_write(&current->mm->mmap_sem);
+out_free:
+ mutex_unlock(&cli->mutex);
+ kfree(svmm);
return ret;
}
@@ -475,45 +482,90 @@ nouveau_svm_fault_cache(struct nouveau_svm *svm,
fault->inst, fault->addr, fault->access);
}
-static inline bool
-nouveau_range_done(struct hmm_range *range)
+struct svm_notifier {
+ struct mmu_interval_notifier notifier;
+ struct nouveau_svmm *svmm;
+};
+
+static bool nouveau_svm_range_invalidate(struct mmu_interval_notifier *mni,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq)
{
- bool ret = hmm_range_valid(range);
+ struct svm_notifier *sn =
+ container_of(mni, struct svm_notifier, notifier);
- hmm_range_unregister(range);
- return ret;
+ /*
+ * serializes the update to mni->invalidate_seq done by caller and
+ * prevents invalidation of the PTE from progressing while HW is being
+ * programmed. This is very hacky and only works because the normal
+ * notifier that does invalidation is always called after the range
+ * notifier.
+ */
+ if (mmu_notifier_range_blockable(range))
+ mutex_lock(&sn->svmm->mutex);
+ else if (!mutex_trylock(&sn->svmm->mutex))
+ return false;
+ mmu_interval_set_seq(mni, cur_seq);
+ mutex_unlock(&sn->svmm->mutex);
+ return true;
}
-static int
-nouveau_range_fault(struct hmm_mirror *mirror, struct hmm_range *range)
+static const struct mmu_interval_notifier_ops nouveau_svm_mni_ops = {
+ .invalidate = nouveau_svm_range_invalidate,
+};
+
+static int nouveau_range_fault(struct nouveau_svmm *svmm,
+ struct nouveau_drm *drm, void *data, u32 size,
+ u64 *pfns, struct svm_notifier *notifier)
{
+ unsigned long timeout =
+ jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
+ /* Have HMM fault pages within the fault window to the GPU. */
+ struct hmm_range range = {
+ .notifier = &notifier->notifier,
+ .start = notifier->notifier.interval_tree.start,
+ .end = notifier->notifier.interval_tree.last + 1,
+ .pfns = pfns,
+ .flags = nouveau_svm_pfn_flags,
+ .values = nouveau_svm_pfn_values,
+ .pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT,
+ };
+ struct mm_struct *mm = notifier->notifier.mm;
long ret;
- range->default_flags = 0;
- range->pfn_flags_mask = -1UL;
+ while (true) {
+ if (time_after(jiffies, timeout))
+ return -EBUSY;
+
+ range.notifier_seq = mmu_interval_read_begin(range.notifier);
+ range.default_flags = 0;
+ range.pfn_flags_mask = -1UL;
+ down_read(&mm->mmap_sem);
+ ret = hmm_range_fault(&range, 0);
+ up_read(&mm->mmap_sem);
+ if (ret <= 0) {
+ if (ret == 0 || ret == -EBUSY)
+ continue;
+ return ret;
+ }
- ret = hmm_range_register(range, mirror,
- range->start, range->end,
- PAGE_SHIFT);
- if (ret) {
- up_read(&range->vma->vm_mm->mmap_sem);
- return (int)ret;
+ mutex_lock(&svmm->mutex);
+ if (mmu_interval_read_retry(range.notifier,
+ range.notifier_seq)) {
+ mutex_unlock(&svmm->mutex);
+ continue;
+ }
+ break;
}
- if (!hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT)) {
- up_read(&range->vma->vm_mm->mmap_sem);
- return -EAGAIN;
- }
+ nouveau_dmem_convert_pfn(drm, &range);
- ret = hmm_range_fault(range, true);
- if (ret <= 0) {
- if (ret == 0)
- ret = -EBUSY;
- up_read(&range->vma->vm_mm->mmap_sem);
- hmm_range_unregister(range);
- return ret;
- }
- return 0;
+ svmm->vmm->vmm.object.client->super = true;
+ ret = nvif_object_ioctl(&svmm->vmm->vmm.object, data, size, NULL);
+ svmm->vmm->vmm.object.client->super = false;
+ mutex_unlock(&svmm->mutex);
+
+ return ret;
}
static int
@@ -533,7 +585,6 @@ nouveau_svm_fault(struct nvif_notify *notify)
} i;
u64 phys[16];
} args;
- struct hmm_range range;
struct vm_area_struct *vma;
u64 inst, start, limit;
int fi, fn, pi, fill;
@@ -589,6 +640,9 @@ nouveau_svm_fault(struct nvif_notify *notify)
args.i.p.version = 0;
for (fi = 0; fn = fi + 1, fi < buffer->fault_nr; fi = fn) {
+ struct svm_notifier notifier;
+ struct mm_struct *mm;
+
/* Cancel any faults from non-SVM channels. */
if (!(svmm = buffer->fault[fi]->svmm)) {
nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
@@ -608,24 +662,32 @@ nouveau_svm_fault(struct nvif_notify *notify)
start = max_t(u64, start, svmm->unmanaged.limit);
SVMM_DBG(svmm, "wndw %016llx-%016llx", start, limit);
+ mm = svmm->notifier.mm;
+ if (!mmget_not_zero(mm)) {
+ nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
+ continue;
+ }
+
/* Intersect fault window with the CPU VMA, cancelling
* the fault if the address is invalid.
*/
- down_read(&svmm->mm->mmap_sem);
- vma = find_vma_intersection(svmm->mm, start, limit);
+ down_read(&mm->mmap_sem);
+ vma = find_vma_intersection(mm, start, limit);
if (!vma) {
SVMM_ERR(svmm, "wndw %016llx-%016llx", start, limit);
- up_read(&svmm->mm->mmap_sem);
+ up_read(&mm->mmap_sem);
+ mmput(mm);
nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
continue;
}
start = max_t(u64, start, vma->vm_start);
limit = min_t(u64, limit, vma->vm_end);
+ up_read(&mm->mmap_sem);
SVMM_DBG(svmm, "wndw %016llx-%016llx", start, limit);
if (buffer->fault[fi]->addr != start) {
SVMM_ERR(svmm, "addr %016llx", buffer->fault[fi]->addr);
- up_read(&svmm->mm->mmap_sem);
+ mmput(mm);
nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
continue;
}
@@ -681,34 +743,19 @@ nouveau_svm_fault(struct nvif_notify *notify)
args.i.p.addr,
args.i.p.addr + args.i.p.size, fn - fi);
- /* Have HMM fault pages within the fault window to the GPU. */
- range.vma = vma;
- range.start = args.i.p.addr;
- range.end = args.i.p.addr + args.i.p.size;
- range.pfns = args.phys;
- range.flags = nouveau_svm_pfn_flags;
- range.values = nouveau_svm_pfn_values;
- range.pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT;
-again:
- ret = nouveau_range_fault(&svmm->mirror, &range);
- if (ret == 0) {
- mutex_lock(&svmm->mutex);
- if (!nouveau_range_done(&range)) {
- mutex_unlock(&svmm->mutex);
- goto again;
- }
-
- nouveau_dmem_convert_pfn(svm->drm, &range);
-
- svmm->vmm->vmm.object.client->super = true;
- ret = nvif_object_ioctl(&svmm->vmm->vmm.object,
- &args, sizeof(args.i) +
- pi * sizeof(args.phys[0]),
- NULL);
- svmm->vmm->vmm.object.client->super = false;
- mutex_unlock(&svmm->mutex);
- up_read(&svmm->mm->mmap_sem);
+ notifier.svmm = svmm;
+ ret = mmu_interval_notifier_insert(&notifier.notifier,
+ svmm->notifier.mm,
+ args.i.p.addr, args.i.p.size,
+ &nouveau_svm_mni_ops);
+ if (!ret) {
+ ret = nouveau_range_fault(
+ svmm, svm->drm, &args,
+ sizeof(args.i) + pi * sizeof(args.phys[0]),
+ args.phys, &notifier);
+ mmu_interval_notifier_remove(&notifier.notifier);
}
+ mmput(mm);
/* Cancel any faults in the window whose pages didn't manage
* to keep their valid bit, or stay writeable when required.
@@ -717,10 +764,10 @@ again:
*/
while (fi < fn) {
struct nouveau_svm_fault *fault = buffer->fault[fi++];
- pi = (fault->addr - range.start) >> PAGE_SHIFT;
+ pi = (fault->addr - args.i.p.addr) >> PAGE_SHIFT;
if (ret ||
- !(range.pfns[pi] & NVIF_VMM_PFNMAP_V0_V) ||
- (!(range.pfns[pi] & NVIF_VMM_PFNMAP_V0_W) &&
+ !(args.phys[pi] & NVIF_VMM_PFNMAP_V0_V) ||
+ (!(args.phys[pi] & NVIF_VMM_PFNMAP_V0_W) &&
fault->access != 0 && fault->access != 3)) {
nouveau_svm_fault_cancel_fault(svm, fault);
continue;
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index f0daf958e03a..7ca0a2498532 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -63,14 +63,12 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
{
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
- struct nouveau_mem *mem;
int ret;
if (drm->client.device.info.ram_size == 0)
return -ENOMEM;
ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
- mem = nouveau_mem(reg);
if (ret)
return ret;
@@ -103,11 +101,9 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
{
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
- struct nouveau_mem *mem;
int ret;
ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
- mem = nouveau_mem(reg);
if (ret)
return ret;
@@ -236,6 +232,7 @@ nouveau_ttm_init(struct nouveau_drm *drm)
ret = ttm_bo_device_init(&drm->ttm.bdev,
&nouveau_bo_driver,
dev->anon_inode->i_mapping,
+ dev->vma_offset_manager,
drm->client.mmu.dmabits <= 32 ? true : false);
if (ret) {
NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index 8f4b12a8092c..d865d8aeac3c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -2,7 +2,6 @@
#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
-#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
diff --git a/drivers/gpu/drm/nouveau/nouveau_vmm.c b/drivers/gpu/drm/nouveau/nouveau_vmm.c
index 77061182a1cf..b28c7dc13ad6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vmm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vmm.c
@@ -69,8 +69,8 @@ nouveau_vma_del(struct nouveau_vma **pvma)
}
list_del(&vma->head);
kfree(*pvma);
- *pvma = NULL;
}
+ *pvma = NULL;
}
int
diff --git a/drivers/gpu/drm/nouveau/nvif/mmu.c b/drivers/gpu/drm/nouveau/nvif/mmu.c
index ae08a1ca8044..47efc408efa6 100644
--- a/drivers/gpu/drm/nouveau/nvif/mmu.c
+++ b/drivers/gpu/drm/nouveau/nvif/mmu.c
@@ -110,7 +110,7 @@ nvif_mmu_init(struct nvif_object *parent, s32 oclass, struct nvif_mmu *mmu)
if (mmu->kind_nr) {
struct nvif_mmu_kind_v0 *kind;
- u32 argc = sizeof(*kind) + sizeof(*kind->data) * mmu->kind_nr;
+ size_t argc = struct_size(kind, data, mmu->kind_nr);
if (ret = -ENOMEM, !(kind = kmalloc(argc, GFP_KERNEL)))
goto done;
@@ -121,6 +121,7 @@ nvif_mmu_init(struct nvif_object *parent, s32 oclass, struct nvif_mmu *mmu)
kind, argc);
if (ret == 0)
memcpy(mmu->kind, kind->data, kind->count);
+ mmu->kind_inv = kind->kind_inv;
kfree(kind);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/Kbuild b/drivers/gpu/drm/nouveau/nvkm/Kbuild
index b53de9ba8c73..db3ade125fa9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/Kbuild
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: MIT
include $(src)/nvkm/core/Kbuild
+include $(src)/nvkm/nvfw/Kbuild
include $(src)/nvkm/falcon/Kbuild
include $(src)/nvkm/subdev/Kbuild
include $(src)/nvkm/engine/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/firmware.c b/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
index 092acdec2c39..8b25367917ca 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
@@ -22,6 +22,40 @@
#include <core/device.h>
#include <core/firmware.h>
+int
+nvkm_firmware_load_name(const struct nvkm_subdev *subdev, const char *base,
+ const char *name, int ver, const struct firmware **pfw)
+{
+ char path[64];
+ int ret;
+
+ snprintf(path, sizeof(path), "%s%s", base, name);
+ ret = nvkm_firmware_get(subdev, path, ver, pfw);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+int
+nvkm_firmware_load_blob(const struct nvkm_subdev *subdev, const char *base,
+ const char *name, int ver, struct nvkm_blob *blob)
+{
+ const struct firmware *fw;
+ int ret;
+
+ ret = nvkm_firmware_load_name(subdev, base, name, ver, &fw);
+ if (ret == 0) {
+ blob->data = kmemdup(fw->data, fw->size, GFP_KERNEL);
+ blob->size = fw->size;
+ nvkm_firmware_put(fw);
+ if (!blob->data)
+ return -ENOMEM;
+ }
+
+ return ret;
+}
+
/**
* nvkm_firmware_get - load firmware from the official nvidia/chip/ directory
* @subdev subdevice that will use that firmware
@@ -32,9 +66,8 @@
* Firmware files released by NVIDIA will always follow this format.
*/
int
-nvkm_firmware_get_version(const struct nvkm_subdev *subdev, const char *fwname,
- int min_version, int max_version,
- const struct firmware **fw)
+nvkm_firmware_get(const struct nvkm_subdev *subdev, const char *fwname, int ver,
+ const struct firmware **fw)
{
struct nvkm_device *device = subdev->device;
char f[64];
@@ -50,31 +83,21 @@ nvkm_firmware_get_version(const struct nvkm_subdev *subdev, const char *fwname,
cname[i] = tolower(cname[i]);
}
- for (i = max_version; i >= min_version; i--) {
- if (i != 0)
- snprintf(f, sizeof(f), "nvidia/%s/%s-%d.bin", cname, fwname, i);
- else
- snprintf(f, sizeof(f), "nvidia/%s/%s.bin", cname, fwname);
-
- if (!firmware_request_nowarn(fw, f, device->dev)) {
- nvkm_debug(subdev, "firmware \"%s\" loaded\n", f);
- return i;
- }
+ if (ver != 0)
+ snprintf(f, sizeof(f), "nvidia/%s/%s-%d.bin", cname, fwname, ver);
+ else
+ snprintf(f, sizeof(f), "nvidia/%s/%s.bin", cname, fwname);
- nvkm_debug(subdev, "firmware \"%s\" unavailable\n", f);
+ if (!firmware_request_nowarn(fw, f, device->dev)) {
+ nvkm_debug(subdev, "firmware \"%s\" loaded - %zu byte(s)\n",
+ f, (*fw)->size);
+ return 0;
}
- nvkm_error(subdev, "failed to load firmware \"%s\"", fwname);
+ nvkm_debug(subdev, "firmware \"%s\" unavailable\n", f);
return -ENOENT;
}
-int
-nvkm_firmware_get(const struct nvkm_subdev *subdev, const char *fwname,
- const struct firmware **fw)
-{
- return nvkm_firmware_get_version(subdev, fwname, 0, 0, fw);
-}
-
/**
* nvkm_firmware_put - release firmware loaded with nvkm_firmware_get
*/
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/memory.c b/drivers/gpu/drm/nouveau/nvkm/core/memory.c
index e85a08ecd9da..4cc186262d34 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/memory.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/memory.c
@@ -91,8 +91,8 @@ nvkm_memory_tags_get(struct nvkm_memory *memory, struct nvkm_device *device,
}
refcount_set(&tags->refcount, 1);
+ *ptags = memory->tags = tags;
mutex_unlock(&fb->subdev.mutex);
- *ptags = tags;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
index 245990de1e90..79a8f9d305c5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
@@ -30,6 +30,7 @@ static struct lock_class_key nvkm_subdev_lock_class[NVKM_SUBDEV_NR];
const char *
nvkm_subdev_name[NVKM_SUBDEV_NR] = {
+ [NVKM_SUBDEV_ACR ] = "acr",
[NVKM_SUBDEV_BAR ] = "bar",
[NVKM_SUBDEV_VBIOS ] = "bios",
[NVKM_SUBDEV_BUS ] = "bus",
@@ -50,7 +51,6 @@ nvkm_subdev_name[NVKM_SUBDEV_NR] = {
[NVKM_SUBDEV_MXM ] = "mxm",
[NVKM_SUBDEV_PCI ] = "pci",
[NVKM_SUBDEV_PMU ] = "pmu",
- [NVKM_SUBDEV_SECBOOT ] = "secboot",
[NVKM_SUBDEV_THERM ] = "therm",
[NVKM_SUBDEV_TIMER ] = "tmr",
[NVKM_SUBDEV_TOP ] = "top",
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index c3c7159f3411..c7d700916eae 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -1987,6 +1987,8 @@ nv117_chipset = {
.dma = gf119_dma_new,
.fifo = gm107_fifo_new,
.gr = gm107_gr_new,
+ .nvdec[0] = gm107_nvdec_new,
+ .nvenc[0] = gm107_nvenc_new,
.sw = gf100_sw_new,
};
@@ -2027,6 +2029,7 @@ nv118_chipset = {
static const struct nvkm_device_chip
nv120_chipset = {
.name = "GM200",
+ .acr = gm200_acr_new,
.bar = gm107_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
@@ -2045,7 +2048,6 @@ nv120_chipset = {
.pci = gk104_pci_new,
.pmu = gm107_pmu_new,
.therm = gm200_therm_new,
- .secboot = gm200_secboot_new,
.timer = gk20a_timer_new,
.top = gk104_top_new,
.volt = gk104_volt_new,
@@ -2056,12 +2058,16 @@ nv120_chipset = {
.dma = gf119_dma_new,
.fifo = gm200_fifo_new,
.gr = gm200_gr_new,
+ .nvdec[0] = gm107_nvdec_new,
+ .nvenc[0] = gm107_nvenc_new,
+ .nvenc[1] = gm107_nvenc_new,
.sw = gf100_sw_new,
};
static const struct nvkm_device_chip
nv124_chipset = {
.name = "GM204",
+ .acr = gm200_acr_new,
.bar = gm107_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
@@ -2080,7 +2086,6 @@ nv124_chipset = {
.pci = gk104_pci_new,
.pmu = gm107_pmu_new,
.therm = gm200_therm_new,
- .secboot = gm200_secboot_new,
.timer = gk20a_timer_new,
.top = gk104_top_new,
.volt = gk104_volt_new,
@@ -2091,12 +2096,16 @@ nv124_chipset = {
.dma = gf119_dma_new,
.fifo = gm200_fifo_new,
.gr = gm200_gr_new,
+ .nvdec[0] = gm107_nvdec_new,
+ .nvenc[0] = gm107_nvenc_new,
+ .nvenc[1] = gm107_nvenc_new,
.sw = gf100_sw_new,
};
static const struct nvkm_device_chip
nv126_chipset = {
.name = "GM206",
+ .acr = gm200_acr_new,
.bar = gm107_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
@@ -2115,7 +2124,6 @@ nv126_chipset = {
.pci = gk104_pci_new,
.pmu = gm107_pmu_new,
.therm = gm200_therm_new,
- .secboot = gm200_secboot_new,
.timer = gk20a_timer_new,
.top = gk104_top_new,
.volt = gk104_volt_new,
@@ -2126,12 +2134,15 @@ nv126_chipset = {
.dma = gf119_dma_new,
.fifo = gm200_fifo_new,
.gr = gm200_gr_new,
+ .nvdec[0] = gm107_nvdec_new,
+ .nvenc[0] = gm107_nvenc_new,
.sw = gf100_sw_new,
};
static const struct nvkm_device_chip
nv12b_chipset = {
.name = "GM20B",
+ .acr = gm20b_acr_new,
.bar = gm20b_bar_new,
.bus = gf100_bus_new,
.clk = gm20b_clk_new,
@@ -2143,7 +2154,6 @@ nv12b_chipset = {
.mc = gk20a_mc_new,
.mmu = gm20b_mmu_new,
.pmu = gm20b_pmu_new,
- .secboot = gm20b_secboot_new,
.timer = gk20a_timer_new,
.top = gk104_top_new,
.ce[2] = gm200_ce_new,
@@ -2157,6 +2167,7 @@ nv12b_chipset = {
static const struct nvkm_device_chip
nv130_chipset = {
.name = "GP100",
+ .acr = gm200_acr_new,
.bar = gm107_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
@@ -2172,7 +2183,6 @@ nv130_chipset = {
.mc = gp100_mc_new,
.mmu = gp100_mmu_new,
.therm = gp100_therm_new,
- .secboot = gm200_secboot_new,
.pci = gp100_pci_new,
.pmu = gp100_pmu_new,
.timer = gk20a_timer_new,
@@ -2187,12 +2197,17 @@ nv130_chipset = {
.disp = gp100_disp_new,
.fifo = gp100_fifo_new,
.gr = gp100_gr_new,
+ .nvdec[0] = gm107_nvdec_new,
+ .nvenc[0] = gm107_nvenc_new,
+ .nvenc[1] = gm107_nvenc_new,
+ .nvenc[2] = gm107_nvenc_new,
.sw = gf100_sw_new,
};
static const struct nvkm_device_chip
nv132_chipset = {
.name = "GP102",
+ .acr = gp102_acr_new,
.bar = gm107_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
@@ -2208,7 +2223,6 @@ nv132_chipset = {
.mc = gp100_mc_new,
.mmu = gp100_mmu_new,
.therm = gp100_therm_new,
- .secboot = gp102_secboot_new,
.pci = gp100_pci_new,
.pmu = gp102_pmu_new,
.timer = gk20a_timer_new,
@@ -2221,7 +2235,9 @@ nv132_chipset = {
.dma = gf119_dma_new,
.fifo = gp100_fifo_new,
.gr = gp102_gr_new,
- .nvdec[0] = gp102_nvdec_new,
+ .nvdec[0] = gm107_nvdec_new,
+ .nvenc[0] = gm107_nvenc_new,
+ .nvenc[1] = gm107_nvenc_new,
.sec2 = gp102_sec2_new,
.sw = gf100_sw_new,
};
@@ -2229,6 +2245,7 @@ nv132_chipset = {
static const struct nvkm_device_chip
nv134_chipset = {
.name = "GP104",
+ .acr = gp102_acr_new,
.bar = gm107_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
@@ -2244,7 +2261,6 @@ nv134_chipset = {
.mc = gp100_mc_new,
.mmu = gp100_mmu_new,
.therm = gp100_therm_new,
- .secboot = gp102_secboot_new,
.pci = gp100_pci_new,
.pmu = gp102_pmu_new,
.timer = gk20a_timer_new,
@@ -2257,7 +2273,9 @@ nv134_chipset = {
.dma = gf119_dma_new,
.fifo = gp100_fifo_new,
.gr = gp104_gr_new,
- .nvdec[0] = gp102_nvdec_new,
+ .nvdec[0] = gm107_nvdec_new,
+ .nvenc[0] = gm107_nvenc_new,
+ .nvenc[1] = gm107_nvenc_new,
.sec2 = gp102_sec2_new,
.sw = gf100_sw_new,
};
@@ -2265,6 +2283,7 @@ nv134_chipset = {
static const struct nvkm_device_chip
nv136_chipset = {
.name = "GP106",
+ .acr = gp102_acr_new,
.bar = gm107_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
@@ -2280,7 +2299,6 @@ nv136_chipset = {
.mc = gp100_mc_new,
.mmu = gp100_mmu_new,
.therm = gp100_therm_new,
- .secboot = gp102_secboot_new,
.pci = gp100_pci_new,
.pmu = gp102_pmu_new,
.timer = gk20a_timer_new,
@@ -2293,7 +2311,8 @@ nv136_chipset = {
.dma = gf119_dma_new,
.fifo = gp100_fifo_new,
.gr = gp104_gr_new,
- .nvdec[0] = gp102_nvdec_new,
+ .nvdec[0] = gm107_nvdec_new,
+ .nvenc[0] = gm107_nvenc_new,
.sec2 = gp102_sec2_new,
.sw = gf100_sw_new,
};
@@ -2301,6 +2320,7 @@ nv136_chipset = {
static const struct nvkm_device_chip
nv137_chipset = {
.name = "GP107",
+ .acr = gp102_acr_new,
.bar = gm107_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
@@ -2316,7 +2336,6 @@ nv137_chipset = {
.mc = gp100_mc_new,
.mmu = gp100_mmu_new,
.therm = gp100_therm_new,
- .secboot = gp102_secboot_new,
.pci = gp100_pci_new,
.pmu = gp102_pmu_new,
.timer = gk20a_timer_new,
@@ -2329,7 +2348,9 @@ nv137_chipset = {
.dma = gf119_dma_new,
.fifo = gp100_fifo_new,
.gr = gp107_gr_new,
- .nvdec[0] = gp102_nvdec_new,
+ .nvdec[0] = gm107_nvdec_new,
+ .nvenc[0] = gm107_nvenc_new,
+ .nvenc[1] = gm107_nvenc_new,
.sec2 = gp102_sec2_new,
.sw = gf100_sw_new,
};
@@ -2337,6 +2358,7 @@ nv137_chipset = {
static const struct nvkm_device_chip
nv138_chipset = {
.name = "GP108",
+ .acr = gp108_acr_new,
.bar = gm107_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
@@ -2352,7 +2374,6 @@ nv138_chipset = {
.mc = gp100_mc_new,
.mmu = gp100_mmu_new,
.therm = gp100_therm_new,
- .secboot = gp108_secboot_new,
.pci = gp100_pci_new,
.pmu = gp102_pmu_new,
.timer = gk20a_timer_new,
@@ -2364,30 +2385,30 @@ nv138_chipset = {
.disp = gp102_disp_new,
.dma = gf119_dma_new,
.fifo = gp100_fifo_new,
- .gr = gp107_gr_new,
- .nvdec[0] = gp102_nvdec_new,
- .sec2 = gp102_sec2_new,
+ .gr = gp108_gr_new,
+ .nvdec[0] = gm107_nvdec_new,
+ .sec2 = gp108_sec2_new,
.sw = gf100_sw_new,
};
static const struct nvkm_device_chip
nv13b_chipset = {
.name = "GP10B",
+ .acr = gp10b_acr_new,
.bar = gm20b_bar_new,
.bus = gf100_bus_new,
- .fault = gp100_fault_new,
+ .fault = gp10b_fault_new,
.fb = gp10b_fb_new,
.fuse = gm107_fuse_new,
.ibus = gp10b_ibus_new,
.imem = gk20a_instmem_new,
- .ltc = gp102_ltc_new,
+ .ltc = gp10b_ltc_new,
.mc = gp10b_mc_new,
.mmu = gp10b_mmu_new,
- .secboot = gp10b_secboot_new,
- .pmu = gm20b_pmu_new,
+ .pmu = gp10b_pmu_new,
.timer = gk20a_timer_new,
.top = gk104_top_new,
- .ce[2] = gp102_ce_new,
+ .ce[0] = gp100_ce_new,
.dma = gf119_dma_new,
.fifo = gp10b_fifo_new,
.gr = gp10b_gr_new,
@@ -2397,6 +2418,7 @@ nv13b_chipset = {
static const struct nvkm_device_chip
nv140_chipset = {
.name = "GV100",
+ .acr = gp108_acr_new,
.bar = gm107_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
@@ -2414,7 +2436,6 @@ nv140_chipset = {
.mmu = gv100_mmu_new,
.pci = gp100_pci_new,
.pmu = gp102_pmu_new,
- .secboot = gp108_secboot_new,
.therm = gp100_therm_new,
.timer = gk20a_timer_new,
.top = gk104_top_new,
@@ -2431,13 +2452,17 @@ nv140_chipset = {
.dma = gv100_dma_new,
.fifo = gv100_fifo_new,
.gr = gv100_gr_new,
- .nvdec[0] = gp102_nvdec_new,
- .sec2 = gp102_sec2_new,
+ .nvdec[0] = gm107_nvdec_new,
+ .nvenc[0] = gm107_nvenc_new,
+ .nvenc[1] = gm107_nvenc_new,
+ .nvenc[2] = gm107_nvenc_new,
+ .sec2 = gp108_sec2_new,
};
static const struct nvkm_device_chip
nv162_chipset = {
.name = "TU102",
+ .acr = tu102_acr_new,
.bar = tu102_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
@@ -2466,13 +2491,16 @@ nv162_chipset = {
.disp = tu102_disp_new,
.dma = gv100_dma_new,
.fifo = tu102_fifo_new,
- .nvdec[0] = gp102_nvdec_new,
+ .gr = tu102_gr_new,
+ .nvdec[0] = gm107_nvdec_new,
+ .nvenc[0] = gm107_nvenc_new,
.sec2 = tu102_sec2_new,
};
static const struct nvkm_device_chip
nv164_chipset = {
.name = "TU104",
+ .acr = tu102_acr_new,
.bar = tu102_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
@@ -2501,13 +2529,17 @@ nv164_chipset = {
.disp = tu102_disp_new,
.dma = gv100_dma_new,
.fifo = tu102_fifo_new,
- .nvdec[0] = gp102_nvdec_new,
+ .gr = tu102_gr_new,
+ .nvdec[0] = gm107_nvdec_new,
+ .nvdec[1] = gm107_nvdec_new,
+ .nvenc[0] = gm107_nvenc_new,
.sec2 = tu102_sec2_new,
};
static const struct nvkm_device_chip
nv166_chipset = {
.name = "TU106",
+ .acr = tu102_acr_new,
.bar = tu102_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
@@ -2536,7 +2568,11 @@ nv166_chipset = {
.disp = tu102_disp_new,
.dma = gv100_dma_new,
.fifo = tu102_fifo_new,
- .nvdec[0] = gp102_nvdec_new,
+ .gr = tu102_gr_new,
+ .nvdec[0] = gm107_nvdec_new,
+ .nvdec[1] = gm107_nvdec_new,
+ .nvdec[2] = gm107_nvdec_new,
+ .nvenc[0] = gm107_nvenc_new,
.sec2 = tu102_sec2_new,
};
@@ -2571,7 +2607,8 @@ nv167_chipset = {
.disp = tu102_disp_new,
.dma = gv100_dma_new,
.fifo = tu102_fifo_new,
- .nvdec[0] = gp102_nvdec_new,
+ .nvdec[0] = gm107_nvdec_new,
+ .nvenc[0] = gm107_nvenc_new,
.sec2 = tu102_sec2_new,
};
@@ -2606,7 +2643,8 @@ nv168_chipset = {
.disp = tu102_disp_new,
.dma = gv100_dma_new,
.fifo = tu102_fifo_new,
- .nvdec[0] = gp102_nvdec_new,
+ .nvdec[0] = gm107_nvdec_new,
+ .nvenc[0] = gm107_nvenc_new,
.sec2 = tu102_sec2_new,
};
@@ -2638,6 +2676,7 @@ nvkm_device_subdev(struct nvkm_device *device, int index)
switch (index) {
#define _(n,p,m) case NVKM_SUBDEV_##n: if (p) return (m); break
+ _(ACR , device->acr , &device->acr->subdev);
_(BAR , device->bar , &device->bar->subdev);
_(VBIOS , device->bios , &device->bios->subdev);
_(BUS , device->bus , &device->bus->subdev);
@@ -2658,7 +2697,6 @@ nvkm_device_subdev(struct nvkm_device *device, int index)
_(MXM , device->mxm , device->mxm);
_(PCI , device->pci , &device->pci->subdev);
_(PMU , device->pmu , &device->pmu->subdev);
- _(SECBOOT , device->secboot , &device->secboot->subdev);
_(THERM , device->therm , &device->therm->subdev);
_(TIMER , device->timer , &device->timer->subdev);
_(TOP , device->top , &device->top->subdev);
@@ -2703,9 +2741,9 @@ nvkm_device_engine(struct nvkm_device *device, int index)
_(MSPDEC , device->mspdec , device->mspdec);
_(MSPPP , device->msppp , device->msppp);
_(MSVLD , device->msvld , device->msvld);
- _(NVENC0 , device->nvenc[0], device->nvenc[0]);
- _(NVENC1 , device->nvenc[1], device->nvenc[1]);
- _(NVENC2 , device->nvenc[2], device->nvenc[2]);
+ _(NVENC0 , device->nvenc[0], &device->nvenc[0]->engine);
+ _(NVENC1 , device->nvenc[1], &device->nvenc[1]->engine);
+ _(NVENC2 , device->nvenc[2], &device->nvenc[2]->engine);
_(NVDEC0 , device->nvdec[0], &device->nvdec[0]->engine);
_(NVDEC1 , device->nvdec[1], &device->nvdec[1]->engine);
_(NVDEC2 , device->nvdec[2], &device->nvdec[2]->engine);
@@ -3144,6 +3182,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
} \
break
switch (i) {
+ _(NVKM_SUBDEV_ACR , acr);
_(NVKM_SUBDEV_BAR , bar);
_(NVKM_SUBDEV_VBIOS , bios);
_(NVKM_SUBDEV_BUS , bus);
@@ -3164,7 +3203,6 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
_(NVKM_SUBDEV_MXM , mxm);
_(NVKM_SUBDEV_PCI , pci);
_(NVKM_SUBDEV_PMU , pmu);
- _(NVKM_SUBDEV_SECBOOT , secboot);
_(NVKM_SUBDEV_THERM , therm);
_(NVKM_SUBDEV_TIMER , timer);
_(NVKM_SUBDEV_TOP , top);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
index d8be2f77ac66..54eab5e04230 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
@@ -3,6 +3,7 @@
#define __NVKM_DEVICE_PRIV_H__
#include <core/device.h>
+#include <subdev/acr.h>
#include <subdev/bar.h>
#include <subdev/bios.h>
#include <subdev/bus.h>
@@ -27,7 +28,6 @@
#include <subdev/timer.h>
#include <subdev/top.h>
#include <subdev/volt.h>
-#include <subdev/secboot.h>
#include <engine/bsp.h>
#include <engine/ce.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
index 0e372a190d3f..d0d52c1d4aee 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -52,18 +52,18 @@ nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev)
clk_set_rate(tdev->clk_pwr, 204000000);
udelay(10);
- reset_control_assert(tdev->rst);
- udelay(10);
-
if (!tdev->pdev->dev.pm_domain) {
+ reset_control_assert(tdev->rst);
+ udelay(10);
+
ret = tegra_powergate_remove_clamping(TEGRA_POWERGATE_3D);
if (ret)
goto err_clamp;
udelay(10);
- }
- reset_control_deassert(tdev->rst);
- udelay(10);
+ reset_control_deassert(tdev->rst);
+ udelay(10);
+ }
return 0;
@@ -279,6 +279,7 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
struct nvkm_device **pdevice)
{
struct nvkm_device_tegra *tdev;
+ unsigned long rate;
int ret;
if (!(tdev = kzalloc(sizeof(*tdev), GFP_KERNEL)))
@@ -307,6 +308,17 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
goto free;
}
+ rate = clk_get_rate(tdev->clk);
+ if (rate == 0) {
+ ret = clk_set_rate(tdev->clk, ULONG_MAX);
+ if (ret < 0)
+ goto free;
+
+ rate = clk_get_rate(tdev->clk);
+
+ dev_dbg(&pdev->dev, "GPU clock set to %lu\n", rate);
+ }
+
if (func->require_ref_clk)
tdev->clk_ref = devm_clk_get(&pdev->dev, "ref");
if (IS_ERR(tdev->clk_ref)) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
index bcf32d92ee5a..50e3539f33d2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
@@ -74,6 +74,8 @@ nv50_disp_chan_mthd(struct nv50_disp_chan *chan, int debug)
if (debug > subdev->debug)
return;
+ if (!mthd)
+ return;
for (i = 0; (list = mthd->data[i].mthd) != NULL; i++) {
u32 base = chan->head * mthd->addr;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
index 818d21bd28d3..3800aeb507d0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
@@ -365,7 +365,7 @@ nvkm_dp_train(struct nvkm_dp *dp, u32 dataKBps)
* and it's better to have a failed modeset than that.
*/
for (cfg = nvkm_dp_rates; cfg->rate; cfg++) {
- if (cfg->nr <= outp_nr && cfg->nr <= outp_bw) {
+ if (cfg->nr <= outp_nr && cfg->bw <= outp_bw) {
/* Try to respect sink limits too when selecting
* lowest link configuration.
*/
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
index 892be6c9b76c..c1032527f791 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
@@ -101,15 +101,26 @@ gv100_disp_exception(struct nv50_disp *disp, int chid)
u32 stat = nvkm_rd32(device, 0x611020 + (chid * 12));
u32 type = (stat & 0x00007000) >> 12;
u32 mthd = (stat & 0x00000fff) << 2;
- u32 data = nvkm_rd32(device, 0x611024 + (chid * 12));
- u32 code = nvkm_rd32(device, 0x611028 + (chid * 12));
const struct nvkm_enum *reason =
nvkm_enum_find(nv50_disp_intr_error_type, type);
- nvkm_error(subdev, "chid %d stat %08x reason %d [%s] mthd %04x "
- "data %08x code %08x\n",
- chid, stat, type, reason ? reason->name : "",
- mthd, data, code);
+ /*TODO: Suspect 33->41 are for WRBK channel exceptions, but we
+ * don't support those currently.
+ *
+ * CORE+WIN CHIDs map directly to the FE_EXCEPT() slots.
+ */
+ if (chid <= 32) {
+ u32 data = nvkm_rd32(device, 0x611024 + (chid * 12));
+ u32 code = nvkm_rd32(device, 0x611028 + (chid * 12));
+ nvkm_error(subdev, "chid %d stat %08x reason %d [%s] "
+ "mthd %04x data %08x code %08x\n",
+ chid, stat, type, reason ? reason->name : "",
+ mthd, data, code);
+ } else {
+ nvkm_error(subdev, "chid %d stat %08x reason %d [%s] "
+ "mthd %04x\n",
+ chid, stat, type, reason ? reason->name : "", mthd);
+ }
if (chid < ARRAY_SIZE(disp->chan) && disp->chan[chid]) {
switch (mthd) {
@@ -144,6 +155,12 @@ gv100_disp_intr_ctrl_disp(struct nv50_disp *disp)
if (stat & 0x00000008)
stat &= ~0x00000008;
+ if (stat & 0x00000080) {
+ u32 error = nvkm_mask(device, 0x611848, 0x00000000, 0x00000000);
+ nvkm_warn(subdev, "error %08x\n", error);
+ stat &= ~0x00000080;
+ }
+
if (stat & 0x00000100) {
unsigned long wndws = nvkm_rd32(device, 0x611858);
unsigned long other = nvkm_rd32(device, 0x61185c);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
index 10a2e7039a75..5a39e51d42d7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
@@ -28,6 +28,7 @@
#include <core/enum.h>
#include <core/gpuobj.h>
#include <subdev/bar.h>
+#include <subdev/fault.h>
#include <engine/sw.h>
#include <nvif/class.h>
@@ -194,68 +195,6 @@ gf100_fifo_recover(struct gf100_fifo *fifo, struct nvkm_engine *engine,
}
static const struct nvkm_enum
-gf100_fifo_sched_reason[] = {
- { 0x0a, "CTXSW_TIMEOUT" },
- {}
-};
-
-static void
-gf100_fifo_intr_sched_ctxsw(struct gf100_fifo *fifo)
-{
- struct nvkm_device *device = fifo->base.engine.subdev.device;
- struct nvkm_engine *engine;
- struct gf100_fifo_chan *chan;
- unsigned long flags;
- u32 engn;
-
- spin_lock_irqsave(&fifo->base.lock, flags);
- for (engn = 0; engn < 6; engn++) {
- u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x04));
- u32 busy = (stat & 0x80000000);
- u32 save = (stat & 0x00100000); /* maybe? */
- u32 unk0 = (stat & 0x00040000);
- u32 unk1 = (stat & 0x00001000);
- u32 chid = (stat & 0x0000007f);
- (void)save;
-
- if (busy && unk0 && unk1) {
- list_for_each_entry(chan, &fifo->chan, head) {
- if (chan->base.chid == chid) {
- engine = gf100_fifo_engine(fifo, engn);
- if (!engine)
- break;
- gf100_fifo_recover(fifo, engine, chan);
- break;
- }
- }
- }
- }
- spin_unlock_irqrestore(&fifo->base.lock, flags);
-}
-
-static void
-gf100_fifo_intr_sched(struct gf100_fifo *fifo)
-{
- struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
- struct nvkm_device *device = subdev->device;
- u32 intr = nvkm_rd32(device, 0x00254c);
- u32 code = intr & 0x000000ff;
- const struct nvkm_enum *en;
-
- en = nvkm_enum_find(gf100_fifo_sched_reason, code);
-
- nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : "");
-
- switch (code) {
- case 0x0a:
- gf100_fifo_intr_sched_ctxsw(fifo);
- break;
- default:
- break;
- }
-}
-
-static const struct nvkm_enum
gf100_fifo_fault_engine[] = {
{ 0x00, "PGRAPH", NULL, NVKM_ENGINE_GR },
{ 0x03, "PEEPHOLE", NULL, NVKM_ENGINE_IFB },
@@ -315,32 +254,24 @@ gf100_fifo_fault_gpcclient[] = {
};
static void
-gf100_fifo_intr_fault(struct gf100_fifo *fifo, int unit)
+gf100_fifo_fault(struct nvkm_fifo *base, struct nvkm_fault_data *info)
{
+ struct gf100_fifo *fifo = gf100_fifo(base);
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
struct nvkm_device *device = subdev->device;
- u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
- u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
- u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
- u32 stat = nvkm_rd32(device, 0x00280c + (unit * 0x10));
- u32 gpc = (stat & 0x1f000000) >> 24;
- u32 client = (stat & 0x00001f00) >> 8;
- u32 write = (stat & 0x00000080);
- u32 hub = (stat & 0x00000040);
- u32 reason = (stat & 0x0000000f);
const struct nvkm_enum *er, *eu, *ec;
struct nvkm_engine *engine = NULL;
struct nvkm_fifo_chan *chan;
unsigned long flags;
char gpcid[8] = "";
- er = nvkm_enum_find(gf100_fifo_fault_reason, reason);
- eu = nvkm_enum_find(gf100_fifo_fault_engine, unit);
- if (hub) {
- ec = nvkm_enum_find(gf100_fifo_fault_hubclient, client);
+ er = nvkm_enum_find(gf100_fifo_fault_reason, info->reason);
+ eu = nvkm_enum_find(gf100_fifo_fault_engine, info->engine);
+ if (info->hub) {
+ ec = nvkm_enum_find(gf100_fifo_fault_hubclient, info->client);
} else {
- ec = nvkm_enum_find(gf100_fifo_fault_gpcclient, client);
- snprintf(gpcid, sizeof(gpcid), "GPC%d/", gpc);
+ ec = nvkm_enum_find(gf100_fifo_fault_gpcclient, info->client);
+ snprintf(gpcid, sizeof(gpcid), "GPC%d/", info->gpc);
}
if (eu && eu->data2) {
@@ -360,22 +291,108 @@ gf100_fifo_intr_fault(struct gf100_fifo *fifo, int unit)
}
}
- chan = nvkm_fifo_chan_inst(&fifo->base, (u64)inst << 12, &flags);
+ chan = nvkm_fifo_chan_inst(&fifo->base, info->inst, &flags);
nvkm_error(subdev,
"%s fault at %010llx engine %02x [%s] client %02x [%s%s] "
"reason %02x [%s] on channel %d [%010llx %s]\n",
- write ? "write" : "read", (u64)vahi << 32 | valo,
- unit, eu ? eu->name : "", client, gpcid, ec ? ec->name : "",
- reason, er ? er->name : "", chan ? chan->chid : -1,
- (u64)inst << 12,
- chan ? chan->object.client->name : "unknown");
+ info->access ? "write" : "read", info->addr,
+ info->engine, eu ? eu->name : "",
+ info->client, gpcid, ec ? ec->name : "",
+ info->reason, er ? er->name : "", chan ? chan->chid : -1,
+ info->inst, chan ? chan->object.client->name : "unknown");
if (engine && chan)
gf100_fifo_recover(fifo, engine, (void *)chan);
nvkm_fifo_chan_put(&fifo->base, flags, &chan);
}
+static const struct nvkm_enum
+gf100_fifo_sched_reason[] = {
+ { 0x0a, "CTXSW_TIMEOUT" },
+ {}
+};
+
+static void
+gf100_fifo_intr_sched_ctxsw(struct gf100_fifo *fifo)
+{
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ struct nvkm_engine *engine;
+ struct gf100_fifo_chan *chan;
+ unsigned long flags;
+ u32 engn;
+
+ spin_lock_irqsave(&fifo->base.lock, flags);
+ for (engn = 0; engn < 6; engn++) {
+ u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x04));
+ u32 busy = (stat & 0x80000000);
+ u32 save = (stat & 0x00100000); /* maybe? */
+ u32 unk0 = (stat & 0x00040000);
+ u32 unk1 = (stat & 0x00001000);
+ u32 chid = (stat & 0x0000007f);
+ (void)save;
+
+ if (busy && unk0 && unk1) {
+ list_for_each_entry(chan, &fifo->chan, head) {
+ if (chan->base.chid == chid) {
+ engine = gf100_fifo_engine(fifo, engn);
+ if (!engine)
+ break;
+ gf100_fifo_recover(fifo, engine, chan);
+ break;
+ }
+ }
+ }
+ }
+ spin_unlock_irqrestore(&fifo->base.lock, flags);
+}
+
+static void
+gf100_fifo_intr_sched(struct gf100_fifo *fifo)
+{
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 intr = nvkm_rd32(device, 0x00254c);
+ u32 code = intr & 0x000000ff;
+ const struct nvkm_enum *en;
+
+ en = nvkm_enum_find(gf100_fifo_sched_reason, code);
+
+ nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : "");
+
+ switch (code) {
+ case 0x0a:
+ gf100_fifo_intr_sched_ctxsw(fifo);
+ break;
+ default:
+ break;
+ }
+}
+
+void
+gf100_fifo_intr_fault(struct nvkm_fifo *fifo, int unit)
+{
+ struct nvkm_device *device = fifo->engine.subdev.device;
+ u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
+ u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
+ u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
+ u32 type = nvkm_rd32(device, 0x00280c + (unit * 0x10));
+ struct nvkm_fault_data info;
+
+ info.inst = (u64)inst << 12;
+ info.addr = ((u64)vahi << 32) | valo;
+ info.time = 0;
+ info.engine = unit;
+ info.valid = 1;
+ info.gpc = (type & 0x1f000000) >> 24;
+ info.client = (type & 0x00001f00) >> 8;
+ info.access = (type & 0x00000080) >> 7;
+ info.hub = (type & 0x00000040) >> 6;
+ info.reason = (type & 0x0000000f);
+
+ nvkm_fifo_fault(fifo, &info);
+}
+
static const struct nvkm_bitfield
gf100_fifo_pbdma_intr[] = {
/* { 0x00008000, "" } seen with null ib push */
@@ -518,7 +535,7 @@ gf100_fifo_intr(struct nvkm_fifo *base)
u32 mask = nvkm_rd32(device, 0x00259c);
while (mask) {
u32 unit = __ffs(mask);
- gf100_fifo_intr_fault(fifo, unit);
+ gf100_fifo_intr_fault(&fifo->base, unit);
nvkm_wr32(device, 0x00259c, (1 << unit));
mask &= ~(1 << unit);
}
@@ -655,6 +672,7 @@ gf100_fifo = {
.init = gf100_fifo_init,
.fini = gf100_fifo_fini,
.intr = gf100_fifo_intr,
+ .fault = gf100_fifo_fault,
.uevent_init = gf100_fifo_uevent_init,
.uevent_fini = gf100_fifo_uevent_fini,
.chan = {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
index 1053fe796466..5d4b695cab8e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
@@ -646,31 +646,6 @@ gk104_fifo_intr_dropped_fault(struct gk104_fifo *fifo)
nvkm_error(subdev, "DROPPED_MMU_FAULT %08x\n", stat);
}
-static void
-gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit)
-{
- struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
- struct nvkm_device *device = subdev->device;
- u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
- u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
- u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
- u32 type = nvkm_rd32(device, 0x00280c + (unit * 0x10));
- struct nvkm_fault_data info;
-
- info.inst = (u64)inst << 12;
- info.addr = ((u64)vahi << 32) | valo;
- info.time = 0;
- info.engine = unit;
- info.valid = 1;
- info.gpc = (type & 0x1f000000) >> 24;
- info.client = (type & 0x00001f00) >> 8;
- info.access = (type & 0x00000080) >> 7;
- info.hub = (type & 0x00000040) >> 6;
- info.reason = (type & 0x000000ff);
-
- nvkm_fifo_fault(&fifo->base, &info);
-}
-
static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0[] = {
{ 0x00000001, "MEMREQ" },
{ 0x00000002, "MEMACK_TIMEOUT" },
@@ -849,7 +824,7 @@ gk104_fifo_intr(struct nvkm_fifo *base)
u32 mask = nvkm_rd32(device, 0x00259c);
while (mask) {
u32 unit = __ffs(mask);
- gk104_fifo_intr_fault(fifo, unit);
+ fifo->func->intr.fault(&fifo->base, unit);
nvkm_wr32(device, 0x00259c, (1 << unit));
mask &= ~(1 << unit);
}
@@ -1204,6 +1179,7 @@ gk104_fifo_fault_gpcclient[] = {
static const struct gk104_fifo_func
gk104_fifo = {
+ .intr.fault = gf100_fifo_intr_fault,
.pbdma = &gk104_fifo_pbdma,
.fault.access = gk104_fifo_fault_access,
.fault.engine = gk104_fifo_fault_engine,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
index c33f4593cbc6..6407a4a174cf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
@@ -45,6 +45,10 @@ struct gk104_fifo {
};
struct gk104_fifo_func {
+ struct {
+ void (*fault)(struct nvkm_fifo *, int unit);
+ } intr;
+
const struct gk104_fifo_pbdma_func {
int (*nr)(struct gk104_fifo *);
void (*init)(struct gk104_fifo *);
@@ -110,12 +114,14 @@ void gk110_fifo_runlist_cgrp(struct nvkm_fifo_cgrp *,
extern const struct gk104_fifo_pbdma_func gk208_fifo_pbdma;
void gk208_fifo_pbdma_init_timeout(struct gk104_fifo *);
+void gm107_fifo_intr_fault(struct nvkm_fifo *, int);
extern const struct nvkm_enum gm107_fifo_fault_engine[];
extern const struct gk104_fifo_runlist_func gm107_fifo_runlist;
extern const struct gk104_fifo_pbdma_func gm200_fifo_pbdma;
int gm200_fifo_pbdma_nr(struct gk104_fifo *);
+void gp100_fifo_intr_fault(struct nvkm_fifo *, int);
extern const struct nvkm_enum gp100_fifo_fault_engine[];
extern const struct nvkm_enum gv100_fifo_fault_access[];
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c
index 8adfa6b182cb..f820969e4405 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c
@@ -48,6 +48,7 @@ gk110_fifo_runlist = {
static const struct gk104_fifo_func
gk110_fifo = {
+ .intr.fault = gf100_fifo_intr_fault,
.pbdma = &gk104_fifo_pbdma,
.fault.access = gk104_fifo_fault_access,
.fault.engine = gk104_fifo_fault_engine,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
index 9553fb4af601..2f54787b5fd0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
@@ -45,6 +45,7 @@ gk208_fifo_pbdma = {
static const struct gk104_fifo_func
gk208_fifo = {
+ .intr.fault = gf100_fifo_intr_fault,
.pbdma = &gk208_fifo_pbdma,
.fault.access = gk104_fifo_fault_access,
.fault.engine = gk104_fifo_fault_engine,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
index a4c6ac3cd6c7..a814c4e0ed3e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
@@ -26,6 +26,7 @@
static const struct gk104_fifo_func
gk20a_fifo = {
+ .intr.fault = gf100_fifo_intr_fault,
.pbdma = &gk208_fifo_pbdma,
.fault.access = gk104_fifo_fault_access,
.fault.engine = gk104_fifo_fault_engine,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
index acf230764cb0..c2a2e4572f6c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
@@ -25,6 +25,7 @@
#include "changk104.h"
#include <core/gpuobj.h>
+#include <subdev/fault.h>
#include <nvif/class.h>
@@ -67,8 +68,33 @@ gm107_fifo_fault_engine[] = {
{}
};
+void
+gm107_fifo_intr_fault(struct nvkm_fifo *fifo, int unit)
+{
+ struct nvkm_device *device = fifo->engine.subdev.device;
+ u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
+ u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
+ u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
+ u32 type = nvkm_rd32(device, 0x00280c + (unit * 0x10));
+ struct nvkm_fault_data info;
+
+ info.inst = (u64)inst << 12;
+ info.addr = ((u64)vahi << 32) | valo;
+ info.time = 0;
+ info.engine = unit;
+ info.valid = 1;
+ info.gpc = (type & 0x1f000000) >> 24;
+ info.client = (type & 0x00003f00) >> 8;
+ info.access = (type & 0x00000080) >> 7;
+ info.hub = (type & 0x00000040) >> 6;
+ info.reason = (type & 0x0000000f);
+
+ nvkm_fifo_fault(fifo, &info);
+}
+
static const struct gk104_fifo_func
gm107_fifo = {
+ .intr.fault = gm107_fifo_intr_fault,
.pbdma = &gk208_fifo_pbdma,
.fault.access = gk104_fifo_fault_access,
.fault.engine = gm107_fifo_fault_engine,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c
index b96c1c5d6577..b8cfe3b28c4f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c
@@ -42,6 +42,7 @@ gm200_fifo_pbdma = {
static const struct gk104_fifo_func
gm200_fifo = {
+ .intr.fault = gm107_fifo_intr_fault,
.pbdma = &gm200_fifo_pbdma,
.fault.access = gk104_fifo_fault_access,
.fault.engine = gm107_fifo_fault_engine,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
index a49539b9e4ec..70b4feebc1fa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
@@ -26,6 +26,7 @@
static const struct gk104_fifo_func
gm20b_fifo = {
+ .intr.fault = gm107_fifo_intr_fault,
.pbdma = &gm200_fifo_pbdma,
.fault.access = gk104_fifo_fault_access,
.fault.engine = gm107_fifo_fault_engine,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
index 54377e0f6a88..2c7a0176b3c8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
@@ -24,6 +24,8 @@
#include "gk104.h"
#include "changk104.h"
+#include <subdev/fault.h>
+
#include <nvif/class.h>
const struct nvkm_enum
@@ -50,8 +52,33 @@ gp100_fifo_fault_engine[] = {
{}
};
+void
+gp100_fifo_intr_fault(struct nvkm_fifo *fifo, int unit)
+{
+ struct nvkm_device *device = fifo->engine.subdev.device;
+ u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
+ u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
+ u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
+ u32 type = nvkm_rd32(device, 0x00280c + (unit * 0x10));
+ struct nvkm_fault_data info;
+
+ info.inst = (u64)inst << 12;
+ info.addr = ((u64)vahi << 32) | valo;
+ info.time = 0;
+ info.engine = unit;
+ info.valid = 1;
+ info.gpc = (type & 0x1f000000) >> 24;
+ info.hub = (type & 0x00100000) >> 20;
+ info.access = (type & 0x00070000) >> 16;
+ info.client = (type & 0x00007f00) >> 8;
+ info.reason = (type & 0x0000001f);
+
+ nvkm_fifo_fault(fifo, &info);
+}
+
static const struct gk104_fifo_func
gp100_fifo = {
+ .intr.fault = gp100_fifo_intr_fault,
.pbdma = &gm200_fifo_pbdma,
.fault.access = gk104_fifo_fault_access,
.fault.engine = gp100_fifo_fault_engine,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c
index 778ba7e46fb3..8c65ad4feedb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c
@@ -26,6 +26,7 @@
static const struct gk104_fifo_func
gp10b_fifo = {
+ .intr.fault = gp100_fifo_intr_fault,
.pbdma = &gm200_fifo_pbdma,
.fault.access = gk104_fifo_fault_access,
.fault.engine = gp100_fifo_fault_engine,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
index c66f5370b21f..0ef8baab513e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
@@ -37,4 +37,6 @@ struct nvkm_fifo_func {
void nv04_fifo_intr(struct nvkm_fifo *);
void nv04_fifo_pause(struct nvkm_fifo *, unsigned long *);
void nv04_fifo_start(struct nvkm_fifo *, unsigned long *);
+
+void gf100_fifo_intr_fault(struct nvkm_fifo *, int);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
index 73724a8cb861..558c86fd8e82 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
@@ -36,8 +36,10 @@ nvkm-y += nvkm/engine/gr/gp100.o
nvkm-y += nvkm/engine/gr/gp102.o
nvkm-y += nvkm/engine/gr/gp104.o
nvkm-y += nvkm/engine/gr/gp107.o
+nvkm-y += nvkm/engine/gr/gp108.o
nvkm-y += nvkm/engine/gr/gp10b.o
nvkm-y += nvkm/engine/gr/gv100.o
+nvkm-y += nvkm/engine/gr/tu102.o
nvkm-y += nvkm/engine/gr/ctxnv40.o
nvkm-y += nvkm/engine/gr/ctxnv50.o
@@ -60,3 +62,4 @@ nvkm-y += nvkm/engine/gr/ctxgp102.o
nvkm-y += nvkm/engine/gr/ctxgp104.o
nvkm-y += nvkm/engine/gr/ctxgp107.o
nvkm-y += nvkm/engine/gr/ctxgv100.o
+nvkm-y += nvkm/engine/gr/ctxtu102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
index 85f2d1e950e8..297915719bf2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
@@ -1324,10 +1324,8 @@ gf100_grctx_generate_sm_id(struct gf100_gr *gr, int gpc, int tpc, int sm)
void
gf100_grctx_generate_floorsweep(struct gf100_gr *gr)
{
- struct nvkm_device *device = gr->base.engine.subdev.device;
const struct gf100_grctx_func *func = gr->func->grctx;
- int gpc, sm, i, j;
- u32 data;
+ int sm;
for (sm = 0; sm < gr->sm_nr; sm++) {
func->sm_id(gr, gr->sm[sm].gpc, gr->sm[sm].tpc, sm);
@@ -1335,12 +1333,9 @@ gf100_grctx_generate_floorsweep(struct gf100_gr *gr)
func->tpc_nr(gr, gr->sm[sm].gpc);
}
- for (gpc = 0, i = 0; i < 4; i++) {
- for (data = 0, j = 0; j < 8 && gpc < gr->gpc_nr; j++, gpc++)
- data |= gr->tpc_nr[gpc] << (j * 4);
- nvkm_wr32(device, 0x406028 + (i * 4), data);
- nvkm_wr32(device, 0x405870 + (i * 4), data);
- }
+ gf100_gr_init_num_tpc_per_gpc(gr, false, true);
+ if (!func->skip_pd_num_tpc_per_gpc)
+ gf100_gr_init_num_tpc_per_gpc(gr, true, false);
if (func->r4060a8)
func->r4060a8(gr);
@@ -1374,7 +1369,7 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
nvkm_mc_unk260(device, 0);
- if (!gr->fuc_sw_ctx) {
+ if (!gr->sw_ctx) {
gf100_gr_mmio(gr, grctx->hub);
gf100_gr_mmio(gr, grctx->gpc_0);
gf100_gr_mmio(gr, grctx->zcull);
@@ -1382,7 +1377,7 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
gf100_gr_mmio(gr, grctx->tpc);
gf100_gr_mmio(gr, grctx->ppc);
} else {
- gf100_gr_mmio(gr, gr->fuc_sw_ctx);
+ gf100_gr_mmio(gr, gr->sw_ctx);
}
gf100_gr_wait_idle(gr);
@@ -1401,8 +1396,8 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
gf100_gr_wait_idle(gr);
if (grctx->r400088) grctx->r400088(gr, false);
- if (gr->fuc_bundle)
- gf100_gr_icmd(gr, gr->fuc_bundle);
+ if (gr->bundle)
+ gf100_gr_icmd(gr, gr->bundle);
else
gf100_gr_icmd(gr, grctx->icmd);
if (grctx->sw_veid_bundle_init)
@@ -1411,8 +1406,8 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
nvkm_wr32(device, 0x404154, idle_timeout);
- if (gr->fuc_method)
- gf100_gr_mthd(gr, gr->fuc_method);
+ if (gr->method)
+ gf100_gr_mthd(gr, gr->method);
else
gf100_gr_mthd(gr, grctx->mthd);
nvkm_mc_unk260(device, 1);
@@ -1431,6 +1426,8 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
grctx->r419a3c(gr);
if (grctx->r408840)
grctx->r408840(gr);
+ if (grctx->r419c0c)
+ grctx->r419c0c(gr);
}
#define CB_RESERVED 0x80000
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
index 478b4723d0f9..32bbddc0993e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
@@ -57,6 +57,7 @@ struct gf100_grctx_func {
/* floorsweeping */
void (*sm_id)(struct gf100_gr *, int gpc, int tpc, int sm);
void (*tpc_nr)(struct gf100_gr *, int gpc);
+ bool skip_pd_num_tpc_per_gpc;
void (*r4060a8)(struct gf100_gr *);
void (*rop_mapping)(struct gf100_gr *);
void (*alpha_beta_tables)(struct gf100_gr *);
@@ -76,6 +77,7 @@ struct gf100_grctx_func {
void (*r418e94)(struct gf100_gr *);
void (*r419a3c)(struct gf100_gr *);
void (*r408840)(struct gf100_gr *);
+ void (*r419c0c)(struct gf100_gr *);
};
extern const struct gf100_grctx_func gf100_grctx;
@@ -153,6 +155,14 @@ extern const struct gf100_grctx_func gp107_grctx;
extern const struct gf100_grctx_func gv100_grctx;
+extern const struct gf100_grctx_func tu102_grctx;
+void gv100_grctx_unkn88c(struct gf100_gr *, bool);
+void gv100_grctx_generate_unkn(struct gf100_gr *);
+extern const struct gf100_gr_init gv100_grctx_init_sw_veid_bundle_init_0[];
+void gv100_grctx_generate_attrib(struct gf100_grctx *);
+void gv100_grctx_generate_rop_mapping(struct gf100_gr *);
+void gv100_grctx_generate_r400088(struct gf100_gr *, bool);
+
/* context init value lists */
extern const struct gf100_gr_pack gf100_grctx_pack_icmd[];
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c
index 896d473dcc0f..c0d36bc601f9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c
@@ -32,7 +32,7 @@ gk20a_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
u32 idle_timeout;
int i;
- gf100_gr_mmio(gr, gr->fuc_sw_ctx);
+ gf100_gr_mmio(gr, gr->sw_ctx);
gf100_gr_wait_idle(gr);
@@ -56,10 +56,10 @@ gk20a_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
nvkm_wr32(device, 0x404154, idle_timeout);
gf100_gr_wait_idle(gr);
- gf100_gr_mthd(gr, gr->fuc_method);
+ gf100_gr_mthd(gr, gr->method);
gf100_gr_wait_idle(gr);
- gf100_gr_icmd(gr, gr->fuc_bundle);
+ gf100_gr_icmd(gr, gr->bundle);
grctx->pagepool(info);
grctx->bundle(info);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c
index a1d9e114ebeb..6b92f8aa18a3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c
@@ -29,7 +29,7 @@ gm20b_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
u32 idle_timeout;
int i, tmp;
- gf100_gr_mmio(gr, gr->fuc_sw_ctx);
+ gf100_gr_mmio(gr, gr->sw_ctx);
gf100_gr_wait_idle(gr);
@@ -59,10 +59,10 @@ gm20b_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
nvkm_wr32(device, 0x404154, idle_timeout);
gf100_gr_wait_idle(gr);
- gf100_gr_mthd(gr, gr->fuc_method);
+ gf100_gr_mthd(gr, gr->method);
gf100_gr_wait_idle(gr);
- gf100_gr_icmd(gr, gr->fuc_bundle);
+ gf100_gr_icmd(gr, gr->bundle);
grctx->pagepool(info);
grctx->bundle(info);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgv100.c
index 0990765ef191..39553d55d3f3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgv100.c
@@ -25,7 +25,7 @@
* PGRAPH context implementation
******************************************************************************/
-static const struct gf100_gr_init
+const struct gf100_gr_init
gv100_grctx_init_sw_veid_bundle_init_0[] = {
{ 0x00001000, 64, 0x00100000, 0x00000008 },
{ 0x00000941, 64, 0x00100000, 0x00000000 },
@@ -58,7 +58,7 @@ gv100_grctx_pack_sw_veid_bundle_init[] = {
{}
};
-static void
+void
gv100_grctx_generate_attrib(struct gf100_grctx *info)
{
struct gf100_gr *gr = info->gr;
@@ -67,14 +67,14 @@ gv100_grctx_generate_attrib(struct gf100_grctx *info)
const u32 attrib = grctx->attrib_nr;
const u32 gfxp = grctx->gfxp_nr;
const int s = 12;
- const int max_batches = 0xffff;
u32 size = grctx->alpha_nr_max * gr->tpc_total;
u32 ao = 0;
u32 bo = ao + size;
int gpc, ppc, b, n = 0;
- size += grctx->gfxp_nr * gr->tpc_total;
- size = ((size * 0x20) + 128) & ~127;
+ for (gpc = 0; gpc < gr->gpc_nr; gpc++)
+ size += grctx->gfxp_nr * gr->ppc_nr[gpc] * gr->ppc_tpc_max;
+ size = ((size * 0x20) + 127) & ~127;
b = mmio_vram(info, size, (1 << s), false);
mmio_refn(info, 0x418810, 0x80000000, s, b);
@@ -84,13 +84,12 @@ gv100_grctx_generate_attrib(struct gf100_grctx *info)
mmio_wr32(info, 0x419e04, 0x80000000 | size >> 7);
mmio_wr32(info, 0x405830, attrib);
mmio_wr32(info, 0x40585c, alpha);
- mmio_wr32(info, 0x4064c4, ((alpha / 4) << 16) | max_batches);
for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
for (ppc = 0; ppc < gr->ppc_nr[gpc]; ppc++, n++) {
const u32 as = alpha * gr->ppc_tpc_nr[gpc][ppc];
- const u32 bs = attrib * gr->ppc_tpc_nr[gpc][ppc];
- const u32 gs = gfxp * gr->ppc_tpc_nr[gpc][ppc];
+ const u32 bs = attrib * gr->ppc_tpc_max;
+ const u32 gs = gfxp * gr->ppc_tpc_max;
const u32 u = 0x418ea0 + (n * 0x04);
const u32 o = PPC_UNIT(gpc, ppc, 0);
if (!(gr->ppc_mask[gpc] & (1 << ppc)))
@@ -110,7 +109,7 @@ gv100_grctx_generate_attrib(struct gf100_grctx *info)
mmio_wr32(info, 0x41befc, 0x00000100);
}
-static void
+void
gv100_grctx_generate_rop_mapping(struct gf100_gr *gr)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
@@ -147,7 +146,7 @@ gv100_grctx_generate_rop_mapping(struct gf100_gr *gr)
gr->screen_tile_row_offset);
}
-static void
+void
gv100_grctx_generate_r400088(struct gf100_gr *gr, bool on)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
@@ -163,7 +162,7 @@ gv100_grctx_generate_sm_id(struct gf100_gr *gr, int gpc, int tpc, int sm)
nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x088), sm);
}
-static void
+void
gv100_grctx_generate_unkn(struct gf100_gr *gr)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
@@ -174,7 +173,7 @@ gv100_grctx_generate_unkn(struct gf100_gr *gr)
nvkm_mask(device, 0x419c00, 0x00000008, 0x00000008);
}
-static void
+void
gv100_grctx_unkn88c(struct gf100_gr *gr, bool on)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxtu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxtu102.c
new file mode 100644
index 000000000000..2299ca07d04a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxtu102.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "ctxgf100.h"
+
+static void
+tu102_grctx_generate_r419c0c(struct gf100_gr *gr)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ nvkm_mask(device, 0x419c0c, 0x80000000, 0x80000000);
+ nvkm_mask(device, 0x40584c, 0x00000008, 0x00000000);
+ nvkm_mask(device, 0x400080, 0x00000000, 0x00000000);
+}
+
+static void
+tu102_grctx_generate_sm_id(struct gf100_gr *gr, int gpc, int tpc, int sm)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x608), sm);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x088), sm);
+}
+
+static const struct gf100_gr_init
+tu102_grctx_init_unknown_bundle_init_0[] = {
+ { 0x00001000, 1, 0x00000001, 0x00000004 },
+ { 0x00002020, 64, 0x00000001, 0x00000000 },
+ { 0x0001e100, 1, 0x00000001, 0x00000001 },
+ {}
+};
+
+static const struct gf100_gr_pack
+tu102_grctx_pack_sw_veid_bundle_init[] = {
+ { gv100_grctx_init_sw_veid_bundle_init_0 },
+ { tu102_grctx_init_unknown_bundle_init_0 },
+ {}
+};
+
+static void
+tu102_grctx_generate_attrib(struct gf100_grctx *info)
+{
+ const u64 size = 0x80000; /*XXX: educated guess */
+ const int s = 8;
+ const int b = mmio_vram(info, size, (1 << s), true);
+
+ gv100_grctx_generate_attrib(info);
+
+ mmio_refn(info, 0x408070, 0x00000000, s, b);
+ mmio_wr32(info, 0x408074, size >> s); /*XXX: guess */
+ mmio_refn(info, 0x419034, 0x00000000, s, b);
+ mmio_wr32(info, 0x408078, 0x00000000);
+}
+
+const struct gf100_grctx_func
+tu102_grctx = {
+ .unkn88c = gv100_grctx_unkn88c,
+ .main = gf100_grctx_generate_main,
+ .unkn = gv100_grctx_generate_unkn,
+ .sw_veid_bundle_init = tu102_grctx_pack_sw_veid_bundle_init,
+ .bundle = gm107_grctx_generate_bundle,
+ .bundle_size = 0x3000,
+ .bundle_min_gpm_fifo_depth = 0x180,
+ .bundle_token_limit = 0xa80,
+ .pagepool = gp100_grctx_generate_pagepool,
+ .pagepool_size = 0x20000,
+ .attrib = tu102_grctx_generate_attrib,
+ .attrib_nr_max = 0x800,
+ .attrib_nr = 0x700,
+ .alpha_nr_max = 0xc00,
+ .alpha_nr = 0x800,
+ .gfxp_nr = 0xfa8,
+ .sm_id = tu102_grctx_generate_sm_id,
+ .skip_pd_num_tpc_per_gpc = true,
+ .rop_mapping = gv100_grctx_generate_rop_mapping,
+ .r406500 = gm200_grctx_generate_r406500,
+ .r400088 = gv100_grctx_generate_r400088,
+ .r419c0c = tu102_grctx_generate_r419c0c,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h
index c24f35ad56a6..ae2d5b6891cb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h
@@ -441,7 +441,7 @@ static uint32_t gk208_grhub_code[] = {
0x020014fe,
0x12004002,
0xbd0002f6,
- 0x05c94104,
+ 0x05ca4104,
0xbd0010fe,
0x07004024,
0xbd0002f6,
@@ -460,423 +460,423 @@ static uint32_t gk208_grhub_code[] = {
0x01039204,
0x03090080,
0xbd0003f6,
- 0x87044204,
- 0xf6040040,
- 0x04bd0002,
- 0x00400402,
- 0x0002f603,
- 0x31f404bd,
- 0x96048e10,
- 0x00657e40,
- 0xc7feb200,
- 0x01b590f1,
- 0x1ff4f003,
- 0x01020fb5,
- 0x041fbb01,
- 0x800112b6,
- 0xf6010300,
- 0x04bd0001,
- 0x01040080,
+ 0x87048204,
+ 0x04004000,
+ 0xbd0002f6,
+ 0x40040204,
+ 0x02f60300,
+ 0xf404bd00,
+ 0x048e1031,
+ 0x657e4096,
+ 0xfeb20000,
+ 0xb590f1c7,
+ 0xf4f00301,
+ 0x020fb51f,
+ 0x1fbb0101,
+ 0x0112b604,
+ 0x01030080,
0xbd0001f6,
- 0x01004104,
- 0xac7e020f,
- 0xbb7e0006,
- 0x100f0006,
- 0x0006fd7e,
- 0x98000e98,
- 0x207e010f,
- 0x14950001,
- 0xc0008008,
- 0x0004f601,
- 0x008004bd,
- 0x04f601c1,
- 0xb704bd00,
- 0xbb130030,
- 0xf5b6001f,
- 0xd3008002,
- 0x000ff601,
- 0x15b604bd,
- 0x0110b608,
- 0xb20814b6,
- 0x02687e1f,
- 0x001fbb00,
- 0x84020398,
-/* 0x041f: init_gpc */
- 0xb8502000,
- 0x0008044e,
- 0x8f7e1fb2,
+ 0x04008004,
+ 0x0001f601,
+ 0x004104bd,
+ 0x7e020f01,
+ 0x7e0006ad,
+ 0x0f0006bc,
+ 0x06fe7e10,
+ 0x000e9800,
+ 0x7e010f98,
+ 0x95000120,
+ 0x00800814,
+ 0x04f601c0,
+ 0x8004bd00,
+ 0xf601c100,
+ 0x04bd0004,
+ 0x130030b7,
+ 0xb6001fbb,
+ 0x008002f5,
+ 0x0ff601d3,
+ 0xb604bd00,
+ 0x10b60815,
+ 0x0814b601,
+ 0x687e1fb2,
+ 0x1fbb0002,
+ 0x02039800,
+ 0x50200084,
+/* 0x0420: init_gpc */
+ 0x08044eb8,
+ 0x7e1fb200,
+ 0xb800008f,
+ 0x00010c4e,
+ 0x8f7ef4bd,
0x4eb80000,
- 0xbd00010c,
- 0x008f7ef4,
- 0x044eb800,
- 0x8f7e0001,
+ 0x7e000104,
+ 0xb800008f,
+ 0x0001004e,
+ 0x8f7e020f,
0x4eb80000,
- 0x0f000100,
- 0x008f7e02,
- 0x004eb800,
-/* 0x044e: init_gpc_wait */
+/* 0x044f: init_gpc_wait */
+ 0x7e000800,
+ 0xc8000065,
+ 0x0bf41fff,
+ 0x044eb8f9,
0x657e0008,
- 0xffc80000,
- 0xf90bf41f,
- 0x08044eb8,
- 0x00657e00,
- 0x001fbb00,
- 0x800040b7,
- 0xf40132b6,
- 0x000fb41b,
- 0x0006fd7e,
- 0xac7e000f,
- 0x00800006,
- 0x01f60201,
- 0xbd04bd00,
- 0x1f19f014,
- 0x02300080,
- 0xbd0001f6,
-/* 0x0491: wait */
- 0x0028f404,
-/* 0x0497: main */
- 0x0d0031f4,
- 0x00377e10,
- 0xf401f400,
- 0x4001e4b1,
- 0x00c71bf5,
- 0x99f094bd,
- 0x37008004,
- 0x0009f602,
- 0x008104bd,
- 0x11cf02c0,
- 0xc1008200,
- 0x0022cf02,
- 0xf41f13c8,
- 0x23c8770b,
- 0x550bf41f,
- 0x12b220f9,
- 0x99f094bd,
- 0x37008007,
- 0x0009f602,
- 0x32f404bd,
- 0x0231f401,
- 0x0008807e,
- 0x99f094bd,
- 0x17008007,
- 0x0009f602,
- 0x20fc04bd,
- 0x99f094bd,
- 0x37008006,
- 0x0009f602,
- 0x31f404bd,
- 0x08807e01,
+ 0x1fbb0000,
+ 0x0040b700,
+ 0x0132b680,
+ 0x0fb41bf4,
+ 0x06fe7e00,
+ 0x7e000f00,
+ 0x800006ad,
+ 0xf6020100,
+ 0x04bd0001,
+ 0x19f014bd,
+ 0x3000801f,
+ 0x0001f602,
+/* 0x0492: wait */
+ 0x28f404bd,
+ 0x0031f400,
+/* 0x0498: main */
+ 0x377e100d,
+ 0x01f40000,
+ 0x01e4b1f4,
+ 0xc71bf540,
0xf094bd00,
- 0x00800699,
+ 0x00800499,
+ 0x09f60237,
+ 0x8104bd00,
+ 0xcf02c000,
+ 0x00820011,
+ 0x22cf02c1,
+ 0x1f13c800,
+ 0xc8770bf4,
+ 0x0bf41f23,
+ 0xb220f955,
+ 0xf094bd12,
+ 0x00800799,
+ 0x09f60237,
+ 0xf404bd00,
+ 0x31f40132,
+ 0x08817e02,
+ 0xf094bd00,
+ 0x00800799,
0x09f60217,
+ 0xfc04bd00,
+ 0xf094bd20,
+ 0x00800699,
+ 0x09f60237,
0xf404bd00,
-/* 0x0522: chsw_prev_no_next */
- 0x20f92f0e,
- 0x32f412b2,
- 0x0232f401,
- 0x0008807e,
- 0x008020fc,
- 0x02f602c0,
+ 0x817e0131,
+ 0x94bd0008,
+ 0x800699f0,
+ 0xf6021700,
+ 0x04bd0009,
+/* 0x0523: chsw_prev_no_next */
+ 0xf92f0ef4,
+ 0xf412b220,
+ 0x32f40132,
+ 0x08817e02,
+ 0x8020fc00,
+ 0xf602c000,
+ 0x04bd0002,
+/* 0x053f: chsw_no_prev */
+ 0xc8130ef4,
+ 0x0bf41f23,
+ 0x0131f40d,
+ 0x7e0232f4,
+/* 0x054f: chsw_done */
+ 0x02000881,
+ 0xc3008001,
+ 0x0002f602,
+ 0x94bd04bd,
+ 0x800499f0,
+ 0xf6021700,
+ 0x04bd0009,
+ 0xff300ef5,
+/* 0x056c: main_not_ctx_switch */
+ 0xf401e4b0,
+ 0xf2b20c1b,
+ 0x0008217e,
+/* 0x057b: main_not_ctx_chan */
+ 0xb0400ef4,
+ 0x1bf402e4,
+ 0xf094bd2c,
+ 0x00800799,
+ 0x09f60237,
0xf404bd00,
-/* 0x053e: chsw_no_prev */
- 0x23c8130e,
- 0x0d0bf41f,
- 0xf40131f4,
- 0x807e0232,
-/* 0x054e: chsw_done */
- 0x01020008,
- 0x02c30080,
- 0xbd0002f6,
- 0xf094bd04,
- 0x00800499,
+ 0x32f40132,
+ 0x08817e02,
+ 0xf094bd00,
+ 0x00800799,
0x09f60217,
- 0xf504bd00,
-/* 0x056b: main_not_ctx_switch */
- 0xb0ff300e,
- 0x1bf401e4,
- 0x7ef2b20c,
- 0xf4000820,
-/* 0x057a: main_not_ctx_chan */
- 0xe4b0400e,
- 0x2c1bf402,
- 0x99f094bd,
- 0x37008007,
- 0x0009f602,
- 0x32f404bd,
- 0x0232f401,
- 0x0008807e,
- 0x99f094bd,
- 0x17008007,
- 0x0009f602,
- 0x0ef404bd,
-/* 0x05a9: main_not_ctx_save */
- 0x10ef9411,
- 0x7e01f5f0,
- 0xf50002f8,
-/* 0x05b7: main_done */
- 0xbdfee40e,
- 0x1f29f024,
- 0x02300080,
- 0xbd0002f6,
- 0xd20ef504,
-/* 0x05c9: ih */
- 0xf900f9fe,
- 0x0188fe80,
- 0x90f980f9,
- 0xb0f9a0f9,
- 0xe0f9d0f9,
- 0x04bdf0f9,
- 0xcf02004a,
- 0xabc400aa,
- 0x230bf404,
- 0x004e100d,
- 0x00eecf1a,
- 0xcf19004f,
- 0x047e00ff,
- 0xb0b70000,
- 0x010e0400,
- 0xf61d0040,
- 0x04bd000e,
-/* 0x060c: ih_no_fifo */
- 0x0100abe4,
- 0x0d0c0bf4,
- 0x40014e10,
- 0x0000047e,
-/* 0x061c: ih_no_ctxsw */
- 0x0400abe4,
- 0x8e560bf4,
- 0x7e400708,
+ 0xf404bd00,
+/* 0x05aa: main_not_ctx_save */
+ 0xef94110e,
+ 0x01f5f010,
+ 0x0002f87e,
+ 0xfee40ef5,
+/* 0x05b8: main_done */
+ 0x29f024bd,
+ 0x3000801f,
+ 0x0002f602,
+ 0x0ef504bd,
+/* 0x05ca: ih */
+ 0x00f9fed2,
+ 0x88fe80f9,
+ 0xf980f901,
+ 0xf9a0f990,
+ 0xf9d0f9b0,
+ 0xbdf0f9e0,
+ 0x02004a04,
+ 0xc400aacf,
+ 0x0bf404ab,
+ 0x4e100d23,
+ 0xeecf1a00,
+ 0x19004f00,
+ 0x7e00ffcf,
+ 0xb7000004,
+ 0x0e0400b0,
+ 0x1d004001,
+ 0xbd000ef6,
+/* 0x060d: ih_no_fifo */
+ 0x00abe404,
+ 0x0c0bf401,
+ 0x014e100d,
+ 0x00047e40,
+/* 0x061d: ih_no_ctxsw */
+ 0x00abe400,
+ 0x560bf404,
+ 0x4007088e,
+ 0x0000657e,
+ 0x0080ffb2,
+ 0x0ff60204,
+ 0x8e04bd00,
+ 0x7e400704,
0xb2000065,
- 0x040080ff,
+ 0x030080ff,
0x000ff602,
- 0x048e04bd,
- 0x657e4007,
- 0xffb20000,
- 0x02030080,
- 0xbd000ff6,
- 0x50fec704,
- 0x8f02ee94,
- 0xbb400700,
- 0x657e00ef,
- 0x00800000,
- 0x0ff60202,
+ 0xfec704bd,
+ 0x02ee9450,
+ 0x4007008f,
+ 0x7e00efbb,
+ 0x80000065,
+ 0xf6020200,
+ 0x04bd000f,
+ 0xf87e030f,
+ 0x004b0002,
+ 0x8ebfb201,
+ 0x7e400144,
+/* 0x0677: ih_no_fwmthd */
+ 0x4b00008f,
+ 0xb0bd0504,
+ 0xf4b4abff,
+ 0x00800c0b,
+ 0x0bf60307,
+/* 0x068b: ih_no_other */
+ 0x4004bd00,
+ 0x0af60100,
+ 0xfc04bd00,
+ 0xfce0fcf0,
+ 0xfcb0fcd0,
+ 0xfc90fca0,
+ 0x0088fe80,
+ 0x00fc80fc,
+ 0xf80032f4,
+/* 0x06ad: ctx_4170s */
+ 0x10f5f001,
+ 0x708effb2,
+ 0x8f7e4041,
+ 0x00f80000,
+/* 0x06bc: ctx_4170w */
+ 0x4041708e,
+ 0x0000657e,
+ 0xf4f0ffb2,
+ 0xf31bf410,
+/* 0x06ce: ctx_redswitch */
+ 0x004e00f8,
+ 0x40e5f002,
+ 0xf020e5f0,
+ 0x008010e5,
+ 0x0ef60185,
0x0f04bd00,
- 0x02f87e03,
- 0x01004b00,
- 0x448ebfb2,
- 0x8f7e4001,
-/* 0x0676: ih_no_fwmthd */
- 0x044b0000,
- 0xffb0bd05,
- 0x0bf4b4ab,
- 0x0700800c,
- 0x000bf603,
-/* 0x068a: ih_no_other */
- 0x004004bd,
- 0x000af601,
- 0xf0fc04bd,
- 0xd0fce0fc,
- 0xa0fcb0fc,
- 0x80fc90fc,
- 0xfc0088fe,
- 0xf400fc80,
- 0x01f80032,
-/* 0x06ac: ctx_4170s */
- 0xb210f5f0,
- 0x41708eff,
+/* 0x06e5: ctx_redswitch_delay */
+ 0x01f2b608,
+ 0xf1fd1bf4,
+ 0xf10400e5,
+ 0x800100e5,
+ 0xf6018500,
+ 0x04bd000e,
+/* 0x06fe: ctx_86c */
+ 0x008000f8,
+ 0x0ff60223,
+ 0xb204bd00,
+ 0x8a148eff,
0x008f7e40,
-/* 0x06bb: ctx_4170w */
- 0x8e00f800,
- 0x7e404170,
- 0xb2000065,
- 0x10f4f0ff,
- 0xf8f31bf4,
-/* 0x06cd: ctx_redswitch */
- 0x02004e00,
- 0xf040e5f0,
- 0xe5f020e5,
- 0x85008010,
- 0x000ef601,
- 0x080f04bd,
-/* 0x06e4: ctx_redswitch_delay */
- 0xf401f2b6,
- 0xe5f1fd1b,
- 0xe5f10400,
- 0x00800100,
- 0x0ef60185,
- 0xf804bd00,
-/* 0x06fd: ctx_86c */
- 0x23008000,
+ 0x8effb200,
+ 0x7e41a88c,
+ 0xf800008f,
+/* 0x071d: ctx_mem */
+ 0x84008000,
0x000ff602,
- 0xffb204bd,
- 0x408a148e,
- 0x00008f7e,
- 0x8c8effb2,
- 0x8f7e41a8,
- 0x00f80000,
-/* 0x071c: ctx_mem */
- 0x02840080,
- 0xbd000ff6,
-/* 0x0725: ctx_mem_wait */
- 0x84008f04,
- 0x00ffcf02,
- 0xf405fffd,
- 0x00f8f61b,
-/* 0x0734: ctx_load */
- 0x99f094bd,
- 0x37008005,
- 0x0009f602,
- 0x0c0a04bd,
- 0x0000b87e,
- 0x0080f4bd,
- 0x0ff60289,
- 0x8004bd00,
- 0xf602c100,
- 0x04bd0002,
- 0x02830080,
+/* 0x0726: ctx_mem_wait */
+ 0x008f04bd,
+ 0xffcf0284,
+ 0x05fffd00,
+ 0xf8f61bf4,
+/* 0x0735: ctx_load */
+ 0xf094bd00,
+ 0x00800599,
+ 0x09f60237,
+ 0x0a04bd00,
+ 0x00b87e0c,
+ 0x80f4bd00,
+ 0xf6028900,
+ 0x04bd000f,
+ 0x02c10080,
0xbd0002f6,
- 0x7e070f04,
- 0x8000071c,
- 0xf602c000,
- 0x04bd0002,
- 0xf0000bfe,
- 0x24b61f2a,
- 0x0220b604,
- 0x99f094bd,
- 0x37008008,
- 0x0009f602,
- 0x008004bd,
- 0x02f60281,
- 0xd204bd00,
- 0x80000000,
- 0x800225f0,
- 0xf6028800,
- 0x04bd0002,
- 0x00421001,
- 0x0223f002,
- 0xf80512fa,
- 0xf094bd03,
+ 0x83008004,
+ 0x0002f602,
+ 0x070f04bd,
+ 0x00071d7e,
+ 0x02c00080,
+ 0xbd0002f6,
+ 0x000bfe04,
+ 0xb61f2af0,
+ 0x20b60424,
+ 0xf094bd02,
0x00800899,
- 0x09f60217,
- 0x9804bd00,
- 0x14b68101,
- 0x80029818,
- 0xfd0825b6,
- 0x01b50512,
- 0xf094bd16,
- 0x00800999,
0x09f60237,
0x8004bd00,
0xf6028100,
- 0x04bd0001,
- 0x00800102,
- 0x02f60288,
- 0x4104bd00,
- 0x13f00100,
- 0x0501fa06,
+ 0x04bd0002,
+ 0x000000d2,
+ 0x0225f080,
+ 0x02880080,
+ 0xbd0002f6,
+ 0x42100104,
+ 0x23f00200,
+ 0x0512fa02,
0x94bd03f8,
- 0x800999f0,
+ 0x800899f0,
0xf6021700,
0x04bd0009,
- 0x99f094bd,
- 0x17008005,
- 0x0009f602,
- 0x00f804bd,
-/* 0x0820: ctx_chan */
- 0x0007347e,
- 0xb87e0c0a,
- 0x050f0000,
- 0x00071c7e,
-/* 0x0832: ctx_mmio_exec */
- 0x039800f8,
- 0x81008041,
- 0x0003f602,
- 0x34bd04bd,
-/* 0x0840: ctx_mmio_loop */
- 0xf4ff34c4,
- 0x00450e1b,
- 0x0653f002,
- 0xf80535fa,
-/* 0x0851: ctx_mmio_pull */
- 0x804e9803,
- 0x7e814f98,
- 0xb600008f,
- 0x12b60830,
- 0xdf1bf401,
-/* 0x0864: ctx_mmio_done */
- 0x80160398,
- 0xf6028100,
- 0x04bd0003,
- 0x414000b5,
- 0x13f00100,
- 0x0601fa06,
- 0x00f803f8,
-/* 0x0880: ctx_xfer */
- 0x0080040e,
- 0x0ef60302,
-/* 0x088b: ctx_xfer_idle */
- 0x8e04bd00,
- 0xcf030000,
- 0xe4f100ee,
- 0x1bf42000,
- 0x0611f4f5,
-/* 0x089f: ctx_xfer_pre */
- 0x0f0c02f4,
- 0x06fd7e10,
- 0x1b11f400,
-/* 0x08a8: ctx_xfer_pre_load */
- 0xac7e020f,
- 0xbb7e0006,
- 0xcd7e0006,
- 0xf4bd0006,
- 0x0006ac7e,
- 0x0007347e,
-/* 0x08c0: ctx_xfer_exec */
- 0xbd160198,
- 0x05008024,
- 0x0002f601,
- 0x1fb204bd,
- 0x41a5008e,
- 0x00008f7e,
- 0xf001fcf0,
- 0x24b6022c,
- 0x05f2fd01,
- 0x048effb2,
- 0x8f7e41a5,
- 0x167e0000,
- 0x24bd0002,
- 0x0247fc80,
- 0xbd0002f6,
- 0x012cf004,
- 0x800320b6,
- 0xf6024afc,
+ 0xb6810198,
+ 0x02981814,
+ 0x0825b680,
+ 0xb50512fd,
+ 0x94bd1601,
+ 0x800999f0,
+ 0xf6023700,
+ 0x04bd0009,
+ 0x02810080,
+ 0xbd0001f6,
+ 0x80010204,
+ 0xf6028800,
0x04bd0002,
- 0xf001acf0,
- 0x000b06a5,
- 0x98000c98,
- 0x000e010d,
- 0x00013d7e,
- 0xec7e080a,
- 0x0a7e0000,
- 0x01f40002,
- 0x7e0c0a12,
+ 0xf0010041,
+ 0x01fa0613,
+ 0xbd03f805,
+ 0x0999f094,
+ 0x02170080,
+ 0xbd0009f6,
+ 0xf094bd04,
+ 0x00800599,
+ 0x09f60217,
+ 0xf804bd00,
+/* 0x0821: ctx_chan */
+ 0x07357e00,
+ 0x7e0c0a00,
0x0f0000b8,
- 0x071c7e05,
- 0x2d02f400,
-/* 0x093c: ctx_xfer_post */
- 0xac7e020f,
- 0xf4bd0006,
- 0x0006fd7e,
- 0x0002277e,
- 0x0006bb7e,
- 0xac7ef4bd,
+ 0x071d7e05,
+/* 0x0833: ctx_mmio_exec */
+ 0x9800f800,
+ 0x00804103,
+ 0x03f60281,
+ 0xbd04bd00,
+/* 0x0841: ctx_mmio_loop */
+ 0xff34c434,
+ 0x450e1bf4,
+ 0x53f00200,
+ 0x0535fa06,
+/* 0x0852: ctx_mmio_pull */
+ 0x4e9803f8,
+ 0x814f9880,
+ 0x00008f7e,
+ 0xb60830b6,
+ 0x1bf40112,
+/* 0x0865: ctx_mmio_done */
+ 0x160398df,
+ 0x02810080,
+ 0xbd0003f6,
+ 0x4000b504,
+ 0xf0010041,
+ 0x01fa0613,
+ 0xf803f806,
+/* 0x0881: ctx_xfer */
+ 0x80040e00,
+ 0xf6030200,
+ 0x04bd000e,
+/* 0x088c: ctx_xfer_idle */
+ 0x0300008e,
+ 0xf100eecf,
+ 0xf42000e4,
+ 0x11f4f51b,
+ 0x0c02f406,
+/* 0x08a0: ctx_xfer_pre */
+ 0xfe7e100f,
0x11f40006,
- 0x40019810,
- 0xf40511fd,
- 0x327e070b,
-/* 0x0966: ctx_xfer_no_post_mmio */
-/* 0x0966: ctx_xfer_done */
- 0x00f80008,
+/* 0x08a9: ctx_xfer_pre_load */
+ 0x7e020f1b,
+ 0x7e0006ad,
+ 0x7e0006bc,
+ 0xbd0006ce,
+ 0x06ad7ef4,
+ 0x07357e00,
+/* 0x08c1: ctx_xfer_exec */
+ 0x16019800,
+ 0x008024bd,
+ 0x02f60105,
+ 0xb204bd00,
+ 0xa5008e1f,
+ 0x008f7e41,
+ 0x01fcf000,
+ 0xb6022cf0,
+ 0xf2fd0124,
+ 0x8effb205,
+ 0x7e41a504,
+ 0x7e00008f,
+ 0xbd000216,
+ 0x47fc8024,
+ 0x0002f602,
+ 0x2cf004bd,
+ 0x0320b601,
+ 0x024afc80,
+ 0xbd0002f6,
+ 0x01acf004,
+ 0x0b06a5f0,
+ 0x000c9800,
+ 0x0e010d98,
+ 0x013d7e00,
+ 0x7e080a00,
+ 0x7e0000ec,
+ 0xf400020a,
+ 0x0c0a1201,
+ 0x0000b87e,
+ 0x1d7e050f,
+ 0x02f40007,
+/* 0x093d: ctx_xfer_post */
+ 0x7e020f2d,
+ 0xbd0006ad,
+ 0x06fe7ef4,
+ 0x02277e00,
+ 0x06bc7e00,
+ 0x7ef4bd00,
+ 0xf40006ad,
+ 0x01981011,
+ 0x0511fd40,
+ 0x7e070bf4,
+/* 0x0967: ctx_xfer_no_post_mmio */
+/* 0x0967: ctx_xfer_done */
+ 0xf8000833,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h
index 649a442b4390..449dae753203 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h
@@ -441,7 +441,7 @@ static uint32_t gm107_grhub_code[] = {
0x020014fe,
0x12004002,
0xbd0002f6,
- 0x05c94104,
+ 0x05ca4104,
0xbd0010fe,
0x07004024,
0xbd0002f6,
@@ -460,423 +460,423 @@ static uint32_t gm107_grhub_code[] = {
0x01039204,
0x03090080,
0xbd0003f6,
- 0x87044204,
- 0xf6040040,
- 0x04bd0002,
- 0x00400402,
- 0x0002f603,
- 0x31f404bd,
- 0x96048e10,
- 0x00657e40,
- 0xc7feb200,
- 0x01b590f1,
- 0x1ff4f003,
- 0x01020fb5,
- 0x041fbb01,
- 0x800112b6,
- 0xf6010300,
- 0x04bd0001,
- 0x01040080,
+ 0x87048204,
+ 0x04004000,
+ 0xbd0002f6,
+ 0x40040204,
+ 0x02f60300,
+ 0xf404bd00,
+ 0x048e1031,
+ 0x657e4096,
+ 0xfeb20000,
+ 0xb590f1c7,
+ 0xf4f00301,
+ 0x020fb51f,
+ 0x1fbb0101,
+ 0x0112b604,
+ 0x01030080,
0xbd0001f6,
- 0x01004104,
- 0xac7e020f,
- 0xbb7e0006,
- 0x100f0006,
- 0x0006fd7e,
- 0x98000e98,
- 0x207e010f,
- 0x14950001,
- 0xc0008008,
- 0x0004f601,
- 0x008004bd,
- 0x04f601c1,
- 0xb704bd00,
- 0xbb130030,
- 0xf5b6001f,
- 0xd3008002,
- 0x000ff601,
- 0x15b604bd,
- 0x0110b608,
- 0xb20814b6,
- 0x02687e1f,
- 0x001fbb00,
- 0x84020398,
-/* 0x041f: init_gpc */
- 0xb8502000,
- 0x0008044e,
- 0x8f7e1fb2,
+ 0x04008004,
+ 0x0001f601,
+ 0x004104bd,
+ 0x7e020f01,
+ 0x7e0006ad,
+ 0x0f0006bc,
+ 0x06fe7e10,
+ 0x000e9800,
+ 0x7e010f98,
+ 0x95000120,
+ 0x00800814,
+ 0x04f601c0,
+ 0x8004bd00,
+ 0xf601c100,
+ 0x04bd0004,
+ 0x130030b7,
+ 0xb6001fbb,
+ 0x008002f5,
+ 0x0ff601d3,
+ 0xb604bd00,
+ 0x10b60815,
+ 0x0814b601,
+ 0x687e1fb2,
+ 0x1fbb0002,
+ 0x02039800,
+ 0x50200084,
+/* 0x0420: init_gpc */
+ 0x08044eb8,
+ 0x7e1fb200,
+ 0xb800008f,
+ 0x00010c4e,
+ 0x8f7ef4bd,
0x4eb80000,
- 0xbd00010c,
- 0x008f7ef4,
- 0x044eb800,
- 0x8f7e0001,
+ 0x7e000104,
+ 0xb800008f,
+ 0x0001004e,
+ 0x8f7e020f,
0x4eb80000,
- 0x0f000100,
- 0x008f7e02,
- 0x004eb800,
-/* 0x044e: init_gpc_wait */
+/* 0x044f: init_gpc_wait */
+ 0x7e000800,
+ 0xc8000065,
+ 0x0bf41fff,
+ 0x044eb8f9,
0x657e0008,
- 0xffc80000,
- 0xf90bf41f,
- 0x08044eb8,
- 0x00657e00,
- 0x001fbb00,
- 0x800040b7,
- 0xf40132b6,
- 0x000fb41b,
- 0x0006fd7e,
- 0xac7e000f,
- 0x00800006,
- 0x01f60201,
- 0xbd04bd00,
- 0x1f19f014,
- 0x02300080,
- 0xbd0001f6,
-/* 0x0491: wait */
- 0x0028f404,
-/* 0x0497: main */
- 0x0d0031f4,
- 0x00377e10,
- 0xf401f400,
- 0x4001e4b1,
- 0x00c71bf5,
- 0x99f094bd,
- 0x37008004,
- 0x0009f602,
- 0x008104bd,
- 0x11cf02c0,
- 0xc1008200,
- 0x0022cf02,
- 0xf41f13c8,
- 0x23c8770b,
- 0x550bf41f,
- 0x12b220f9,
- 0x99f094bd,
- 0x37008007,
- 0x0009f602,
- 0x32f404bd,
- 0x0231f401,
- 0x0008807e,
- 0x99f094bd,
- 0x17008007,
- 0x0009f602,
- 0x20fc04bd,
- 0x99f094bd,
- 0x37008006,
- 0x0009f602,
- 0x31f404bd,
- 0x08807e01,
+ 0x1fbb0000,
+ 0x0040b700,
+ 0x0132b680,
+ 0x0fb41bf4,
+ 0x06fe7e00,
+ 0x7e000f00,
+ 0x800006ad,
+ 0xf6020100,
+ 0x04bd0001,
+ 0x19f014bd,
+ 0x3000801f,
+ 0x0001f602,
+/* 0x0492: wait */
+ 0x28f404bd,
+ 0x0031f400,
+/* 0x0498: main */
+ 0x377e100d,
+ 0x01f40000,
+ 0x01e4b1f4,
+ 0xc71bf540,
0xf094bd00,
- 0x00800699,
+ 0x00800499,
+ 0x09f60237,
+ 0x8104bd00,
+ 0xcf02c000,
+ 0x00820011,
+ 0x22cf02c1,
+ 0x1f13c800,
+ 0xc8770bf4,
+ 0x0bf41f23,
+ 0xb220f955,
+ 0xf094bd12,
+ 0x00800799,
+ 0x09f60237,
+ 0xf404bd00,
+ 0x31f40132,
+ 0x08817e02,
+ 0xf094bd00,
+ 0x00800799,
0x09f60217,
+ 0xfc04bd00,
+ 0xf094bd20,
+ 0x00800699,
+ 0x09f60237,
0xf404bd00,
-/* 0x0522: chsw_prev_no_next */
- 0x20f92f0e,
- 0x32f412b2,
- 0x0232f401,
- 0x0008807e,
- 0x008020fc,
- 0x02f602c0,
+ 0x817e0131,
+ 0x94bd0008,
+ 0x800699f0,
+ 0xf6021700,
+ 0x04bd0009,
+/* 0x0523: chsw_prev_no_next */
+ 0xf92f0ef4,
+ 0xf412b220,
+ 0x32f40132,
+ 0x08817e02,
+ 0x8020fc00,
+ 0xf602c000,
+ 0x04bd0002,
+/* 0x053f: chsw_no_prev */
+ 0xc8130ef4,
+ 0x0bf41f23,
+ 0x0131f40d,
+ 0x7e0232f4,
+/* 0x054f: chsw_done */
+ 0x02000881,
+ 0xc3008001,
+ 0x0002f602,
+ 0x94bd04bd,
+ 0x800499f0,
+ 0xf6021700,
+ 0x04bd0009,
+ 0xff300ef5,
+/* 0x056c: main_not_ctx_switch */
+ 0xf401e4b0,
+ 0xf2b20c1b,
+ 0x0008217e,
+/* 0x057b: main_not_ctx_chan */
+ 0xb0400ef4,
+ 0x1bf402e4,
+ 0xf094bd2c,
+ 0x00800799,
+ 0x09f60237,
0xf404bd00,
-/* 0x053e: chsw_no_prev */
- 0x23c8130e,
- 0x0d0bf41f,
- 0xf40131f4,
- 0x807e0232,
-/* 0x054e: chsw_done */
- 0x01020008,
- 0x02c30080,
- 0xbd0002f6,
- 0xf094bd04,
- 0x00800499,
+ 0x32f40132,
+ 0x08817e02,
+ 0xf094bd00,
+ 0x00800799,
0x09f60217,
- 0xf504bd00,
-/* 0x056b: main_not_ctx_switch */
- 0xb0ff300e,
- 0x1bf401e4,
- 0x7ef2b20c,
- 0xf4000820,
-/* 0x057a: main_not_ctx_chan */
- 0xe4b0400e,
- 0x2c1bf402,
- 0x99f094bd,
- 0x37008007,
- 0x0009f602,
- 0x32f404bd,
- 0x0232f401,
- 0x0008807e,
- 0x99f094bd,
- 0x17008007,
- 0x0009f602,
- 0x0ef404bd,
-/* 0x05a9: main_not_ctx_save */
- 0x10ef9411,
- 0x7e01f5f0,
- 0xf50002f8,
-/* 0x05b7: main_done */
- 0xbdfee40e,
- 0x1f29f024,
- 0x02300080,
- 0xbd0002f6,
- 0xd20ef504,
-/* 0x05c9: ih */
- 0xf900f9fe,
- 0x0188fe80,
- 0x90f980f9,
- 0xb0f9a0f9,
- 0xe0f9d0f9,
- 0x04bdf0f9,
- 0xcf02004a,
- 0xabc400aa,
- 0x230bf404,
- 0x004e100d,
- 0x00eecf1a,
- 0xcf19004f,
- 0x047e00ff,
- 0xb0b70000,
- 0x010e0400,
- 0xf61d0040,
- 0x04bd000e,
-/* 0x060c: ih_no_fifo */
- 0x0100abe4,
- 0x0d0c0bf4,
- 0x40014e10,
- 0x0000047e,
-/* 0x061c: ih_no_ctxsw */
- 0x0400abe4,
- 0x8e560bf4,
- 0x7e400708,
+ 0xf404bd00,
+/* 0x05aa: main_not_ctx_save */
+ 0xef94110e,
+ 0x01f5f010,
+ 0x0002f87e,
+ 0xfee40ef5,
+/* 0x05b8: main_done */
+ 0x29f024bd,
+ 0x3000801f,
+ 0x0002f602,
+ 0x0ef504bd,
+/* 0x05ca: ih */
+ 0x00f9fed2,
+ 0x88fe80f9,
+ 0xf980f901,
+ 0xf9a0f990,
+ 0xf9d0f9b0,
+ 0xbdf0f9e0,
+ 0x02004a04,
+ 0xc400aacf,
+ 0x0bf404ab,
+ 0x4e100d23,
+ 0xeecf1a00,
+ 0x19004f00,
+ 0x7e00ffcf,
+ 0xb7000004,
+ 0x0e0400b0,
+ 0x1d004001,
+ 0xbd000ef6,
+/* 0x060d: ih_no_fifo */
+ 0x00abe404,
+ 0x0c0bf401,
+ 0x014e100d,
+ 0x00047e40,
+/* 0x061d: ih_no_ctxsw */
+ 0x00abe400,
+ 0x560bf404,
+ 0x4007088e,
+ 0x0000657e,
+ 0x0080ffb2,
+ 0x0ff60204,
+ 0x8e04bd00,
+ 0x7e400704,
0xb2000065,
- 0x040080ff,
+ 0x030080ff,
0x000ff602,
- 0x048e04bd,
- 0x657e4007,
- 0xffb20000,
- 0x02030080,
- 0xbd000ff6,
- 0x50fec704,
- 0x8f02ee94,
- 0xbb400700,
- 0x657e00ef,
- 0x00800000,
- 0x0ff60202,
+ 0xfec704bd,
+ 0x02ee9450,
+ 0x4007008f,
+ 0x7e00efbb,
+ 0x80000065,
+ 0xf6020200,
+ 0x04bd000f,
+ 0xf87e030f,
+ 0x004b0002,
+ 0x8ebfb201,
+ 0x7e400144,
+/* 0x0677: ih_no_fwmthd */
+ 0x4b00008f,
+ 0xb0bd0504,
+ 0xf4b4abff,
+ 0x00800c0b,
+ 0x0bf60307,
+/* 0x068b: ih_no_other */
+ 0x4004bd00,
+ 0x0af60100,
+ 0xfc04bd00,
+ 0xfce0fcf0,
+ 0xfcb0fcd0,
+ 0xfc90fca0,
+ 0x0088fe80,
+ 0x00fc80fc,
+ 0xf80032f4,
+/* 0x06ad: ctx_4170s */
+ 0x10f5f001,
+ 0x708effb2,
+ 0x8f7e4041,
+ 0x00f80000,
+/* 0x06bc: ctx_4170w */
+ 0x4041708e,
+ 0x0000657e,
+ 0xf4f0ffb2,
+ 0xf31bf410,
+/* 0x06ce: ctx_redswitch */
+ 0x004e00f8,
+ 0x40e5f002,
+ 0xf020e5f0,
+ 0x008010e5,
+ 0x0ef60185,
0x0f04bd00,
- 0x02f87e03,
- 0x01004b00,
- 0x448ebfb2,
- 0x8f7e4001,
-/* 0x0676: ih_no_fwmthd */
- 0x044b0000,
- 0xffb0bd05,
- 0x0bf4b4ab,
- 0x0700800c,
- 0x000bf603,
-/* 0x068a: ih_no_other */
- 0x004004bd,
- 0x000af601,
- 0xf0fc04bd,
- 0xd0fce0fc,
- 0xa0fcb0fc,
- 0x80fc90fc,
- 0xfc0088fe,
- 0xf400fc80,
- 0x01f80032,
-/* 0x06ac: ctx_4170s */
- 0xb210f5f0,
- 0x41708eff,
+/* 0x06e5: ctx_redswitch_delay */
+ 0x01f2b608,
+ 0xf1fd1bf4,
+ 0xf10400e5,
+ 0x800100e5,
+ 0xf6018500,
+ 0x04bd000e,
+/* 0x06fe: ctx_86c */
+ 0x008000f8,
+ 0x0ff60223,
+ 0xb204bd00,
+ 0x8a148eff,
0x008f7e40,
-/* 0x06bb: ctx_4170w */
- 0x8e00f800,
- 0x7e404170,
- 0xb2000065,
- 0x10f4f0ff,
- 0xf8f31bf4,
-/* 0x06cd: ctx_redswitch */
- 0x02004e00,
- 0xf040e5f0,
- 0xe5f020e5,
- 0x85008010,
- 0x000ef601,
- 0x080f04bd,
-/* 0x06e4: ctx_redswitch_delay */
- 0xf401f2b6,
- 0xe5f1fd1b,
- 0xe5f10400,
- 0x00800100,
- 0x0ef60185,
- 0xf804bd00,
-/* 0x06fd: ctx_86c */
- 0x23008000,
+ 0x8effb200,
+ 0x7e41a88c,
+ 0xf800008f,
+/* 0x071d: ctx_mem */
+ 0x84008000,
0x000ff602,
- 0xffb204bd,
- 0x408a148e,
- 0x00008f7e,
- 0x8c8effb2,
- 0x8f7e41a8,
- 0x00f80000,
-/* 0x071c: ctx_mem */
- 0x02840080,
- 0xbd000ff6,
-/* 0x0725: ctx_mem_wait */
- 0x84008f04,
- 0x00ffcf02,
- 0xf405fffd,
- 0x00f8f61b,
-/* 0x0734: ctx_load */
- 0x99f094bd,
- 0x37008005,
- 0x0009f602,
- 0x0c0a04bd,
- 0x0000b87e,
- 0x0080f4bd,
- 0x0ff60289,
- 0x8004bd00,
- 0xf602c100,
- 0x04bd0002,
- 0x02830080,
+/* 0x0726: ctx_mem_wait */
+ 0x008f04bd,
+ 0xffcf0284,
+ 0x05fffd00,
+ 0xf8f61bf4,
+/* 0x0735: ctx_load */
+ 0xf094bd00,
+ 0x00800599,
+ 0x09f60237,
+ 0x0a04bd00,
+ 0x00b87e0c,
+ 0x80f4bd00,
+ 0xf6028900,
+ 0x04bd000f,
+ 0x02c10080,
0xbd0002f6,
- 0x7e070f04,
- 0x8000071c,
- 0xf602c000,
- 0x04bd0002,
- 0xf0000bfe,
- 0x24b61f2a,
- 0x0220b604,
- 0x99f094bd,
- 0x37008008,
- 0x0009f602,
- 0x008004bd,
- 0x02f60281,
- 0xd204bd00,
- 0x80000000,
- 0x800225f0,
- 0xf6028800,
- 0x04bd0002,
- 0x00421001,
- 0x0223f002,
- 0xf80512fa,
- 0xf094bd03,
+ 0x83008004,
+ 0x0002f602,
+ 0x070f04bd,
+ 0x00071d7e,
+ 0x02c00080,
+ 0xbd0002f6,
+ 0x000bfe04,
+ 0xb61f2af0,
+ 0x20b60424,
+ 0xf094bd02,
0x00800899,
- 0x09f60217,
- 0x9804bd00,
- 0x14b68101,
- 0x80029818,
- 0xfd0825b6,
- 0x01b50512,
- 0xf094bd16,
- 0x00800999,
0x09f60237,
0x8004bd00,
0xf6028100,
- 0x04bd0001,
- 0x00800102,
- 0x02f60288,
- 0x4104bd00,
- 0x13f00100,
- 0x0501fa06,
+ 0x04bd0002,
+ 0x000000d2,
+ 0x0225f080,
+ 0x02880080,
+ 0xbd0002f6,
+ 0x42100104,
+ 0x23f00200,
+ 0x0512fa02,
0x94bd03f8,
- 0x800999f0,
+ 0x800899f0,
0xf6021700,
0x04bd0009,
- 0x99f094bd,
- 0x17008005,
- 0x0009f602,
- 0x00f804bd,
-/* 0x0820: ctx_chan */
- 0x0007347e,
- 0xb87e0c0a,
- 0x050f0000,
- 0x00071c7e,
-/* 0x0832: ctx_mmio_exec */
- 0x039800f8,
- 0x81008041,
- 0x0003f602,
- 0x34bd04bd,
-/* 0x0840: ctx_mmio_loop */
- 0xf4ff34c4,
- 0x00450e1b,
- 0x0653f002,
- 0xf80535fa,
-/* 0x0851: ctx_mmio_pull */
- 0x804e9803,
- 0x7e814f98,
- 0xb600008f,
- 0x12b60830,
- 0xdf1bf401,
-/* 0x0864: ctx_mmio_done */
- 0x80160398,
- 0xf6028100,
- 0x04bd0003,
- 0x414000b5,
- 0x13f00100,
- 0x0601fa06,
- 0x00f803f8,
-/* 0x0880: ctx_xfer */
- 0x0080040e,
- 0x0ef60302,
-/* 0x088b: ctx_xfer_idle */
- 0x8e04bd00,
- 0xcf030000,
- 0xe4f100ee,
- 0x1bf42000,
- 0x0611f4f5,
-/* 0x089f: ctx_xfer_pre */
- 0x0f0c02f4,
- 0x06fd7e10,
- 0x1b11f400,
-/* 0x08a8: ctx_xfer_pre_load */
- 0xac7e020f,
- 0xbb7e0006,
- 0xcd7e0006,
- 0xf4bd0006,
- 0x0006ac7e,
- 0x0007347e,
-/* 0x08c0: ctx_xfer_exec */
- 0xbd160198,
- 0x05008024,
- 0x0002f601,
- 0x1fb204bd,
- 0x41a5008e,
- 0x00008f7e,
- 0xf001fcf0,
- 0x24b6022c,
- 0x05f2fd01,
- 0x048effb2,
- 0x8f7e41a5,
- 0x167e0000,
- 0x24bd0002,
- 0x0247fc80,
- 0xbd0002f6,
- 0x012cf004,
- 0x800320b6,
- 0xf6024afc,
+ 0xb6810198,
+ 0x02981814,
+ 0x0825b680,
+ 0xb50512fd,
+ 0x94bd1601,
+ 0x800999f0,
+ 0xf6023700,
+ 0x04bd0009,
+ 0x02810080,
+ 0xbd0001f6,
+ 0x80010204,
+ 0xf6028800,
0x04bd0002,
- 0xf001acf0,
- 0x000b06a5,
- 0x98000c98,
- 0x000e010d,
- 0x00013d7e,
- 0xec7e080a,
- 0x0a7e0000,
- 0x01f40002,
- 0x7e0c0a12,
+ 0xf0010041,
+ 0x01fa0613,
+ 0xbd03f805,
+ 0x0999f094,
+ 0x02170080,
+ 0xbd0009f6,
+ 0xf094bd04,
+ 0x00800599,
+ 0x09f60217,
+ 0xf804bd00,
+/* 0x0821: ctx_chan */
+ 0x07357e00,
+ 0x7e0c0a00,
0x0f0000b8,
- 0x071c7e05,
- 0x2d02f400,
-/* 0x093c: ctx_xfer_post */
- 0xac7e020f,
- 0xf4bd0006,
- 0x0006fd7e,
- 0x0002277e,
- 0x0006bb7e,
- 0xac7ef4bd,
+ 0x071d7e05,
+/* 0x0833: ctx_mmio_exec */
+ 0x9800f800,
+ 0x00804103,
+ 0x03f60281,
+ 0xbd04bd00,
+/* 0x0841: ctx_mmio_loop */
+ 0xff34c434,
+ 0x450e1bf4,
+ 0x53f00200,
+ 0x0535fa06,
+/* 0x0852: ctx_mmio_pull */
+ 0x4e9803f8,
+ 0x814f9880,
+ 0x00008f7e,
+ 0xb60830b6,
+ 0x1bf40112,
+/* 0x0865: ctx_mmio_done */
+ 0x160398df,
+ 0x02810080,
+ 0xbd0003f6,
+ 0x4000b504,
+ 0xf0010041,
+ 0x01fa0613,
+ 0xf803f806,
+/* 0x0881: ctx_xfer */
+ 0x80040e00,
+ 0xf6030200,
+ 0x04bd000e,
+/* 0x088c: ctx_xfer_idle */
+ 0x0300008e,
+ 0xf100eecf,
+ 0xf42000e4,
+ 0x11f4f51b,
+ 0x0c02f406,
+/* 0x08a0: ctx_xfer_pre */
+ 0xfe7e100f,
0x11f40006,
- 0x40019810,
- 0xf40511fd,
- 0x327e070b,
-/* 0x0966: ctx_xfer_no_post_mmio */
-/* 0x0966: ctx_xfer_done */
- 0x00f80008,
+/* 0x08a9: ctx_xfer_pre_load */
+ 0x7e020f1b,
+ 0x7e0006ad,
+ 0x7e0006bc,
+ 0xbd0006ce,
+ 0x06ad7ef4,
+ 0x07357e00,
+/* 0x08c1: ctx_xfer_exec */
+ 0x16019800,
+ 0x008024bd,
+ 0x02f60105,
+ 0xb204bd00,
+ 0xa5008e1f,
+ 0x008f7e41,
+ 0x01fcf000,
+ 0xb6022cf0,
+ 0xf2fd0124,
+ 0x8effb205,
+ 0x7e41a504,
+ 0x7e00008f,
+ 0xbd000216,
+ 0x47fc8024,
+ 0x0002f602,
+ 0x2cf004bd,
+ 0x0320b601,
+ 0x024afc80,
+ 0xbd0002f6,
+ 0x01acf004,
+ 0x0b06a5f0,
+ 0x000c9800,
+ 0x0e010d98,
+ 0x013d7e00,
+ 0x7e080a00,
+ 0x7e0000ec,
+ 0xf400020a,
+ 0x0c0a1201,
+ 0x0000b87e,
+ 0x1d7e050f,
+ 0x02f40007,
+/* 0x093d: ctx_xfer_post */
+ 0x7e020f2d,
+ 0xbd0006ad,
+ 0x06fe7ef4,
+ 0x02277e00,
+ 0x06bc7e00,
+ 0x7ef4bd00,
+ 0xf40006ad,
+ 0x01981011,
+ 0x0511fd40,
+ 0x7e070bf4,
+/* 0x0967: ctx_xfer_no_post_mmio */
+/* 0x0967: ctx_xfer_done */
+ 0xf8000833,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index c578deb5867a..dd8f85b8b3a7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -26,9 +26,9 @@
#include "fuc/os.h"
#include <core/client.h>
-#include <core/option.h>
#include <core/firmware.h>
-#include <subdev/secboot.h>
+#include <core/option.h>
+#include <subdev/acr.h>
#include <subdev/fb.h>
#include <subdev/mc.h>
#include <subdev/pmu.h>
@@ -1636,7 +1636,7 @@ gf100_gr_intr(struct nvkm_gr *base)
static void
gf100_gr_init_fw(struct nvkm_falcon *falcon,
- struct gf100_gr_fuc *code, struct gf100_gr_fuc *data)
+ struct nvkm_blob *code, struct nvkm_blob *data)
{
nvkm_falcon_load_dmem(falcon, data->data, 0x0, data->size, 0);
nvkm_falcon_load_imem(falcon, code->data, 0x0, code->size, 0, 0, false);
@@ -1690,26 +1690,30 @@ gf100_gr_init_ctxctl_ext(struct gf100_gr *gr)
{
struct nvkm_subdev *subdev = &gr->base.engine.subdev;
struct nvkm_device *device = subdev->device;
- struct nvkm_secboot *sb = device->secboot;
- u32 secboot_mask = 0;
+ u32 lsf_mask = 0;
int ret;
/* load fuc microcode */
nvkm_mc_unk260(device, 0);
/* securely-managed falcons must be reset using secure boot */
- if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_FECS))
- secboot_mask |= BIT(NVKM_SECBOOT_FALCON_FECS);
- else
- gf100_gr_init_fw(gr->fecs.falcon, &gr->fuc409c, &gr->fuc409d);
- if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_GPCCS))
- secboot_mask |= BIT(NVKM_SECBOOT_FALCON_GPCCS);
- else
- gf100_gr_init_fw(gr->gpccs.falcon, &gr->fuc41ac, &gr->fuc41ad);
+ if (!nvkm_acr_managed_falcon(device, NVKM_ACR_LSF_FECS)) {
+ gf100_gr_init_fw(&gr->fecs.falcon, &gr->fecs.inst,
+ &gr->fecs.data);
+ } else {
+ lsf_mask |= BIT(NVKM_ACR_LSF_FECS);
+ }
- if (secboot_mask != 0) {
- int ret = nvkm_secboot_reset(sb, secboot_mask);
+ if (!nvkm_acr_managed_falcon(device, NVKM_ACR_LSF_GPCCS)) {
+ gf100_gr_init_fw(&gr->gpccs.falcon, &gr->gpccs.inst,
+ &gr->gpccs.data);
+ } else {
+ lsf_mask |= BIT(NVKM_ACR_LSF_GPCCS);
+ }
+
+ if (lsf_mask) {
+ ret = nvkm_acr_bootstrap_falcons(device, lsf_mask);
if (ret)
return ret;
}
@@ -1721,8 +1725,8 @@ gf100_gr_init_ctxctl_ext(struct gf100_gr *gr)
nvkm_wr32(device, 0x41a10c, 0x00000000);
nvkm_wr32(device, 0x40910c, 0x00000000);
- nvkm_falcon_start(gr->gpccs.falcon);
- nvkm_falcon_start(gr->fecs.falcon);
+ nvkm_falcon_start(&gr->gpccs.falcon);
+ nvkm_falcon_start(&gr->fecs.falcon);
if (nvkm_msec(device, 2000,
if (nvkm_rd32(device, 0x409800) & 0x00000001)
@@ -1784,18 +1788,18 @@ gf100_gr_init_ctxctl_int(struct gf100_gr *gr)
/* load HUB microcode */
nvkm_mc_unk260(device, 0);
- nvkm_falcon_load_dmem(gr->fecs.falcon,
+ nvkm_falcon_load_dmem(&gr->fecs.falcon,
gr->func->fecs.ucode->data.data, 0x0,
gr->func->fecs.ucode->data.size, 0);
- nvkm_falcon_load_imem(gr->fecs.falcon,
+ nvkm_falcon_load_imem(&gr->fecs.falcon,
gr->func->fecs.ucode->code.data, 0x0,
gr->func->fecs.ucode->code.size, 0, 0, false);
/* load GPC microcode */
- nvkm_falcon_load_dmem(gr->gpccs.falcon,
+ nvkm_falcon_load_dmem(&gr->gpccs.falcon,
gr->func->gpccs.ucode->data.data, 0x0,
gr->func->gpccs.ucode->data.size, 0);
- nvkm_falcon_load_imem(gr->gpccs.falcon,
+ nvkm_falcon_load_imem(&gr->gpccs.falcon,
gr->func->gpccs.ucode->code.data, 0x0,
gr->func->gpccs.ucode->code.size, 0, 0, false);
nvkm_mc_unk260(device, 1);
@@ -1941,17 +1945,6 @@ gf100_gr_oneinit(struct nvkm_gr *base)
struct nvkm_subdev *subdev = &gr->base.engine.subdev;
struct nvkm_device *device = subdev->device;
int i, j;
- int ret;
-
- ret = nvkm_falcon_v1_new(subdev, "FECS", 0x409000, &gr->fecs.falcon);
- if (ret)
- return ret;
-
- mutex_init(&gr->fecs.mutex);
-
- ret = nvkm_falcon_v1_new(subdev, "GPCCS", 0x41a000, &gr->gpccs.falcon);
- if (ret)
- return ret;
nvkm_pmu_pgob(device->pmu, false);
@@ -1992,11 +1985,11 @@ gf100_gr_init_(struct nvkm_gr *base)
nvkm_pmu_pgob(gr->base.engine.subdev.device->pmu, false);
- ret = nvkm_falcon_get(gr->fecs.falcon, subdev);
+ ret = nvkm_falcon_get(&gr->fecs.falcon, subdev);
if (ret)
return ret;
- ret = nvkm_falcon_get(gr->gpccs.falcon, subdev);
+ ret = nvkm_falcon_get(&gr->gpccs.falcon, subdev);
if (ret)
return ret;
@@ -2004,49 +1997,34 @@ gf100_gr_init_(struct nvkm_gr *base)
}
static int
-gf100_gr_fini_(struct nvkm_gr *base, bool suspend)
+gf100_gr_fini(struct nvkm_gr *base, bool suspend)
{
struct gf100_gr *gr = gf100_gr(base);
struct nvkm_subdev *subdev = &gr->base.engine.subdev;
- nvkm_falcon_put(gr->gpccs.falcon, subdev);
- nvkm_falcon_put(gr->fecs.falcon, subdev);
+ nvkm_falcon_put(&gr->gpccs.falcon, subdev);
+ nvkm_falcon_put(&gr->fecs.falcon, subdev);
return 0;
}
-void
-gf100_gr_dtor_fw(struct gf100_gr_fuc *fuc)
-{
- kfree(fuc->data);
- fuc->data = NULL;
-}
-
-static void
-gf100_gr_dtor_init(struct gf100_gr_pack *pack)
-{
- vfree(pack);
-}
-
void *
gf100_gr_dtor(struct nvkm_gr *base)
{
struct gf100_gr *gr = gf100_gr(base);
- if (gr->func->dtor)
- gr->func->dtor(gr);
kfree(gr->data);
- nvkm_falcon_del(&gr->gpccs.falcon);
- nvkm_falcon_del(&gr->fecs.falcon);
+ nvkm_falcon_dtor(&gr->gpccs.falcon);
+ nvkm_falcon_dtor(&gr->fecs.falcon);
- gf100_gr_dtor_fw(&gr->fuc409c);
- gf100_gr_dtor_fw(&gr->fuc409d);
- gf100_gr_dtor_fw(&gr->fuc41ac);
- gf100_gr_dtor_fw(&gr->fuc41ad);
+ nvkm_blob_dtor(&gr->fecs.inst);
+ nvkm_blob_dtor(&gr->fecs.data);
+ nvkm_blob_dtor(&gr->gpccs.inst);
+ nvkm_blob_dtor(&gr->gpccs.data);
- gf100_gr_dtor_init(gr->fuc_bundle);
- gf100_gr_dtor_init(gr->fuc_method);
- gf100_gr_dtor_init(gr->fuc_sw_ctx);
- gf100_gr_dtor_init(gr->fuc_sw_nonctx);
+ vfree(gr->bundle);
+ vfree(gr->method);
+ vfree(gr->sw_ctx);
+ vfree(gr->sw_nonctx);
return gr;
}
@@ -2056,7 +2034,7 @@ gf100_gr_ = {
.dtor = gf100_gr_dtor,
.oneinit = gf100_gr_oneinit,
.init = gf100_gr_init_,
- .fini = gf100_gr_fini_,
+ .fini = gf100_gr_fini,
.intr = gf100_gr_intr,
.units = gf100_gr_units,
.chan_new = gf100_gr_chan_new,
@@ -2067,87 +2045,24 @@ gf100_gr_ = {
.ctxsw.inst = gf100_gr_ctxsw_inst,
};
-int
-gf100_gr_ctor_fw_legacy(struct gf100_gr *gr, const char *fwname,
- struct gf100_gr_fuc *fuc, int ret)
-{
- struct nvkm_subdev *subdev = &gr->base.engine.subdev;
- struct nvkm_device *device = subdev->device;
- const struct firmware *fw;
- char f[32];
-
- /* see if this firmware has a legacy path */
- if (!strcmp(fwname, "fecs_inst"))
- fwname = "fuc409c";
- else if (!strcmp(fwname, "fecs_data"))
- fwname = "fuc409d";
- else if (!strcmp(fwname, "gpccs_inst"))
- fwname = "fuc41ac";
- else if (!strcmp(fwname, "gpccs_data"))
- fwname = "fuc41ad";
- else {
- /* nope, let's just return the error we got */
- nvkm_error(subdev, "failed to load %s\n", fwname);
- return ret;
- }
-
- /* yes, try to load from the legacy path */
- nvkm_debug(subdev, "%s: falling back to legacy path\n", fwname);
-
- snprintf(f, sizeof(f), "nouveau/nv%02x_%s", device->chipset, fwname);
- ret = request_firmware(&fw, f, device->dev);
- if (ret) {
- snprintf(f, sizeof(f), "nouveau/%s", fwname);
- ret = request_firmware(&fw, f, device->dev);
- if (ret) {
- nvkm_error(subdev, "failed to load %s\n", fwname);
- return ret;
- }
- }
-
- fuc->size = fw->size;
- fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL);
- release_firmware(fw);
- return (fuc->data != NULL) ? 0 : -ENOMEM;
-}
-
-int
-gf100_gr_ctor_fw(struct gf100_gr *gr, const char *fwname,
- struct gf100_gr_fuc *fuc)
-{
- const struct firmware *fw;
- int ret;
-
- ret = nvkm_firmware_get(&gr->base.engine.subdev, fwname, &fw);
- if (ret) {
- ret = gf100_gr_ctor_fw_legacy(gr, fwname, fuc, ret);
- if (ret)
- return -ENODEV;
- return 0;
- }
-
- fuc->size = fw->size;
- fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL);
- nvkm_firmware_put(fw);
- return (fuc->data != NULL) ? 0 : -ENOMEM;
-}
-
-int
-gf100_gr_ctor(const struct gf100_gr_func *func, struct nvkm_device *device,
- int index, struct gf100_gr *gr)
-{
- gr->func = func;
- gr->firmware = nvkm_boolopt(device->cfgopt, "NvGrUseFW",
- func->fecs.ucode == NULL);
-
- return nvkm_gr_ctor(&gf100_gr_, device, index,
- gr->firmware || func->fecs.ucode != NULL,
- &gr->base);
-}
+static const struct nvkm_falcon_func
+gf100_gr_flcn = {
+ .fbif = 0x600,
+ .load_imem = nvkm_falcon_v1_load_imem,
+ .load_dmem = nvkm_falcon_v1_load_dmem,
+ .read_dmem = nvkm_falcon_v1_read_dmem,
+ .bind_context = nvkm_falcon_v1_bind_context,
+ .wait_for_halt = nvkm_falcon_v1_wait_for_halt,
+ .clear_interrupt = nvkm_falcon_v1_clear_interrupt,
+ .set_start_addr = nvkm_falcon_v1_set_start_addr,
+ .start = nvkm_falcon_v1_start,
+ .enable = nvkm_falcon_v1_enable,
+ .disable = nvkm_falcon_v1_disable,
+};
int
-gf100_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device,
- int index, struct nvkm_gr **pgr)
+gf100_gr_new_(const struct gf100_gr_fwif *fwif,
+ struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
struct gf100_gr *gr;
int ret;
@@ -2156,22 +2071,49 @@ gf100_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device,
return -ENOMEM;
*pgr = &gr->base;
- ret = gf100_gr_ctor(func, device, index, gr);
+ ret = nvkm_gr_ctor(&gf100_gr_, device, index, true, &gr->base);
if (ret)
return ret;
- if (gr->firmware) {
- if (gf100_gr_ctor_fw(gr, "fecs_inst", &gr->fuc409c) ||
- gf100_gr_ctor_fw(gr, "fecs_data", &gr->fuc409d) ||
- gf100_gr_ctor_fw(gr, "gpccs_inst", &gr->fuc41ac) ||
- gf100_gr_ctor_fw(gr, "gpccs_data", &gr->fuc41ad))
- return -ENODEV;
- }
+ fwif = nvkm_firmware_load(&gr->base.engine.subdev, fwif, "Gr", gr);
+ if (IS_ERR(fwif))
+ return -ENODEV;
+
+ gr->func = fwif->func;
+
+ ret = nvkm_falcon_ctor(&gf100_gr_flcn, &gr->base.engine.subdev,
+ "fecs", 0x409000, &gr->fecs.falcon);
+ if (ret)
+ return ret;
+
+ mutex_init(&gr->fecs.mutex);
+
+ ret = nvkm_falcon_ctor(&gf100_gr_flcn, &gr->base.engine.subdev,
+ "gpccs", 0x41a000, &gr->gpccs.falcon);
+ if (ret)
+ return ret;
return 0;
}
void
+gf100_gr_init_num_tpc_per_gpc(struct gf100_gr *gr, bool pd, bool ds)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ int gpc, i, j;
+ u32 data;
+
+ for (gpc = 0, i = 0; i < 4; i++) {
+ for (data = 0, j = 0; j < 8 && gpc < gr->gpc_nr; j++, gpc++)
+ data |= gr->tpc_nr[gpc] << (j * 4);
+ if (pd)
+ nvkm_wr32(device, 0x406028 + (i * 4), data);
+ if (ds)
+ nvkm_wr32(device, 0x405870 + (i * 4), data);
+ }
+}
+
+void
gf100_gr_init_400054(struct gf100_gr *gr)
{
nvkm_wr32(gr->base.engine.subdev.device, 0x400054, 0x34ce3464);
@@ -2295,8 +2237,8 @@ gf100_gr_init(struct gf100_gr *gr)
gr->func->init_gpc_mmu(gr);
- if (gr->fuc_sw_nonctx)
- gf100_gr_mmio(gr, gr->fuc_sw_nonctx);
+ if (gr->sw_nonctx)
+ gf100_gr_mmio(gr, gr->sw_nonctx);
else
gf100_gr_mmio(gr, gr->func->mmio);
@@ -2320,6 +2262,8 @@ gf100_gr_init(struct gf100_gr *gr)
gr->func->init_bios_2(gr);
if (gr->func->init_swdx_pes_mask)
gr->func->init_swdx_pes_mask(gr);
+ if (gr->func->init_fs)
+ gr->func->init_fs(gr);
nvkm_wr32(device, 0x400500, 0x00010001);
@@ -2338,8 +2282,8 @@ gf100_gr_init(struct gf100_gr *gr)
if (gr->func->init_40601c)
gr->func->init_40601c(gr);
- nvkm_wr32(device, 0x404490, 0xc0000000);
nvkm_wr32(device, 0x406018, 0xc0000000);
+ nvkm_wr32(device, 0x404490, 0xc0000000);
if (gr->func->init_sked_hww_esr)
gr->func->init_sked_hww_esr(gr);
@@ -2454,7 +2398,66 @@ gf100_gr = {
};
int
+gf100_gr_nofw(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif)
+{
+ gr->firmware = false;
+ return 0;
+}
+
+static int
+gf100_gr_load_fw(struct gf100_gr *gr, const char *name,
+ struct nvkm_blob *blob)
+{
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ const struct firmware *fw;
+ char f[32];
+ int ret;
+
+ snprintf(f, sizeof(f), "nouveau/nv%02x_%s", device->chipset, name);
+ ret = request_firmware(&fw, f, device->dev);
+ if (ret) {
+ snprintf(f, sizeof(f), "nouveau/%s", name);
+ ret = request_firmware(&fw, f, device->dev);
+ if (ret) {
+ nvkm_error(subdev, "failed to load %s\n", name);
+ return ret;
+ }
+ }
+
+ blob->size = fw->size;
+ blob->data = kmemdup(fw->data, blob->size, GFP_KERNEL);
+ release_firmware(fw);
+ return (blob->data != NULL) ? 0 : -ENOMEM;
+}
+
+int
+gf100_gr_load(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+
+ if (!nvkm_boolopt(device->cfgopt, "NvGrUseFW", false))
+ return -EINVAL;
+
+ if (gf100_gr_load_fw(gr, "fuc409c", &gr->fecs.inst) ||
+ gf100_gr_load_fw(gr, "fuc409d", &gr->fecs.data) ||
+ gf100_gr_load_fw(gr, "fuc41ac", &gr->gpccs.inst) ||
+ gf100_gr_load_fw(gr, "fuc41ad", &gr->gpccs.data))
+ return -ENOENT;
+
+ gr->firmware = true;
+ return 0;
+}
+
+static const struct gf100_gr_fwif
+gf100_gr_fwif[] = {
+ { -1, gf100_gr_load, &gf100_gr },
+ { -1, gf100_gr_nofw, &gf100_gr },
+ {}
+};
+
+int
gf100_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(&gf100_gr, device, index, pgr);
+ return gf100_gr_new_(gf100_gr_fwif, device, index, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index fafdd0bbea9b..88bcb57c2e07 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -31,6 +31,8 @@
#include <subdev/mmu.h>
#include <engine/falcon.h>
+struct nvkm_acr_lsfw;
+
#define GPC_MAX 32
#define TPC_MAX_PER_GPC 8
#define TPC_MAX (GPC_MAX * TPC_MAX_PER_GPC)
@@ -55,11 +57,6 @@ struct gf100_gr_mmio {
int buffer;
};
-struct gf100_gr_fuc {
- u32 *data;
- u32 size;
-};
-
struct gf100_gr_zbc_color {
u32 format;
u32 ds[4];
@@ -83,29 +80,30 @@ struct gf100_gr {
struct nvkm_gr base;
struct {
- struct nvkm_falcon *falcon;
+ struct nvkm_falcon falcon;
+ struct nvkm_blob inst;
+ struct nvkm_blob data;
+
struct mutex mutex;
u32 disable;
} fecs;
struct {
- struct nvkm_falcon *falcon;
+ struct nvkm_falcon falcon;
+ struct nvkm_blob inst;
+ struct nvkm_blob data;
} gpccs;
- struct gf100_gr_fuc fuc409c;
- struct gf100_gr_fuc fuc409d;
- struct gf100_gr_fuc fuc41ac;
- struct gf100_gr_fuc fuc41ad;
bool firmware;
/*
* Used if the register packs are loaded from NVIDIA fw instead of
* using hardcoded arrays. To be allocated with vzalloc().
*/
- struct gf100_gr_pack *fuc_sw_nonctx;
- struct gf100_gr_pack *fuc_sw_ctx;
- struct gf100_gr_pack *fuc_bundle;
- struct gf100_gr_pack *fuc_method;
+ struct gf100_gr_pack *sw_nonctx;
+ struct gf100_gr_pack *sw_ctx;
+ struct gf100_gr_pack *bundle;
+ struct gf100_gr_pack *method;
struct gf100_gr_zbc_color zbc_color[NVKM_LTC_MAX_ZBC_CNT];
struct gf100_gr_zbc_depth zbc_depth[NVKM_LTC_MAX_ZBC_CNT];
@@ -140,12 +138,6 @@ struct gf100_gr {
u32 size_pm;
};
-int gf100_gr_ctor(const struct gf100_gr_func *, struct nvkm_device *,
- int, struct gf100_gr *);
-int gf100_gr_new_(const struct gf100_gr_func *, struct nvkm_device *,
- int, struct nvkm_gr **);
-void *gf100_gr_dtor(struct nvkm_gr *);
-
int gf100_gr_fecs_bind_pointer(struct gf100_gr *, u32 inst);
struct gf100_gr_func_zbc {
@@ -157,7 +149,6 @@ struct gf100_gr_func_zbc {
};
struct gf100_gr_func {
- void (*dtor)(struct gf100_gr *);
void (*oneinit_tiles)(struct gf100_gr *);
void (*oneinit_sm_id)(struct gf100_gr *);
int (*init)(struct gf100_gr *);
@@ -171,6 +162,7 @@ struct gf100_gr_func {
void (*init_rop_active_fbps)(struct gf100_gr *);
void (*init_bios_2)(struct gf100_gr *);
void (*init_swdx_pes_mask)(struct gf100_gr *);
+ void (*init_fs)(struct gf100_gr *);
void (*init_fecs_exceptions)(struct gf100_gr *);
void (*init_ds_hww_esr_2)(struct gf100_gr *);
void (*init_40601c)(struct gf100_gr *);
@@ -217,6 +209,7 @@ void gf100_gr_init_419eb4(struct gf100_gr *);
void gf100_gr_init_tex_hww_esr(struct gf100_gr *, int, int);
void gf100_gr_init_shader_exceptions(struct gf100_gr *, int, int);
void gf100_gr_init_400054(struct gf100_gr *);
+void gf100_gr_init_num_tpc_per_gpc(struct gf100_gr *, bool, bool);
extern const struct gf100_gr_func_zbc gf100_gr_zbc;
void gf117_gr_init_zcull(struct gf100_gr *);
@@ -245,10 +238,18 @@ void gp100_gr_init_fecs_exceptions(struct gf100_gr *);
void gp100_gr_init_shader_exceptions(struct gf100_gr *, int, int);
void gp100_gr_zbc_clear_color(struct gf100_gr *, int);
void gp100_gr_zbc_clear_depth(struct gf100_gr *, int);
+extern const struct gf100_gr_func_zbc gp100_gr_zbc;
void gp102_gr_init_swdx_pes_mask(struct gf100_gr *);
extern const struct gf100_gr_func_zbc gp102_gr_zbc;
+extern const struct gf100_gr_func gp107_gr;
+
+void gv100_gr_init_419bd8(struct gf100_gr *);
+void gv100_gr_init_504430(struct gf100_gr *, int, int);
+void gv100_gr_init_shader_exceptions(struct gf100_gr *, int, int);
+void gv100_gr_trap_mp(struct gf100_gr *, int, int);
+
#define gf100_gr_chan(p) container_of((p), struct gf100_gr_chan, object)
#include <core/object.h>
@@ -269,9 +270,6 @@ struct gf100_gr_chan {
void gf100_gr_ctxctl_debug(struct gf100_gr *);
-void gf100_gr_dtor_fw(struct gf100_gr_fuc *);
-int gf100_gr_ctor_fw(struct gf100_gr *, const char *,
- struct gf100_gr_fuc *);
u64 gf100_gr_units(struct nvkm_gr *);
void gf100_gr_zbc_init(struct gf100_gr *);
@@ -294,8 +292,8 @@ struct gf100_gr_pack {
for (init = pack->init; init && init->count; init++)
struct gf100_gr_ucode {
- struct gf100_gr_fuc code;
- struct gf100_gr_fuc data;
+ struct nvkm_blob code;
+ struct nvkm_blob data;
};
extern struct gf100_gr_ucode gf100_gr_fecs_ucode;
@@ -310,17 +308,6 @@ void gf100_gr_icmd(struct gf100_gr *, const struct gf100_gr_pack *);
void gf100_gr_mthd(struct gf100_gr *, const struct gf100_gr_pack *);
int gf100_gr_init_ctxctl(struct gf100_gr *);
-/* external bundles loading functions */
-int gk20a_gr_av_to_init(struct gf100_gr *, const char *,
- struct gf100_gr_pack **);
-int gk20a_gr_aiv_to_init(struct gf100_gr *, const char *,
- struct gf100_gr_pack **);
-int gk20a_gr_av_to_method(struct gf100_gr *, const char *,
- struct gf100_gr_pack **);
-
-int gm200_gr_new_(const struct gf100_gr_func *, struct nvkm_device *, int,
- struct nvkm_gr **);
-
/* register init value lists */
extern const struct gf100_gr_init gf100_gr_init_main_0[];
@@ -403,4 +390,31 @@ extern const struct gf100_gr_init gm107_gr_init_cbm_0[];
void gm107_gr_init_bios(struct gf100_gr *);
void gm200_gr_init_gpc_mmu(struct gf100_gr *);
+
+struct gf100_gr_fwif {
+ int version;
+ int (*load)(struct gf100_gr *, int ver, const struct gf100_gr_fwif *);
+ const struct gf100_gr_func *func;
+ const struct nvkm_acr_lsf_func *fecs;
+ const struct nvkm_acr_lsf_func *gpccs;
+};
+
+int gf100_gr_load(struct gf100_gr *, int, const struct gf100_gr_fwif *);
+int gf100_gr_nofw(struct gf100_gr *, int, const struct gf100_gr_fwif *);
+
+int gk20a_gr_load_sw(struct gf100_gr *, const char *path, int ver);
+
+int gm200_gr_load(struct gf100_gr *, int, const struct gf100_gr_fwif *);
+extern const struct nvkm_acr_lsf_func gm200_gr_gpccs_acr;
+extern const struct nvkm_acr_lsf_func gm200_gr_fecs_acr;
+
+extern const struct nvkm_acr_lsf_func gm20b_gr_fecs_acr;
+void gm20b_gr_acr_bld_write(struct nvkm_acr *, u32, struct nvkm_acr_lsfw *);
+void gm20b_gr_acr_bld_patch(struct nvkm_acr *, u32, s64);
+
+extern const struct nvkm_acr_lsf_func gp108_gr_gpccs_acr;
+extern const struct nvkm_acr_lsf_func gp108_gr_fecs_acr;
+
+int gf100_gr_new_(const struct gf100_gr_fwif *, struct nvkm_device *, int,
+ struct nvkm_gr **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
index 42c2fd9fc04e..0536fe8b2b92 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
@@ -144,8 +144,15 @@ gf104_gr = {
}
};
+static const struct gf100_gr_fwif
+gf104_gr_fwif[] = {
+ { -1, gf100_gr_load, &gf104_gr },
+ { -1, gf100_gr_nofw, &gf104_gr },
+ {}
+};
+
int
gf104_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(&gf104_gr, device, index, pgr);
+ return gf100_gr_new_(gf104_gr_fwif, device, index, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
index 4731a460adc7..14284b06112f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
@@ -143,8 +143,15 @@ gf108_gr = {
}
};
+const struct gf100_gr_fwif
+gf108_gr_fwif[] = {
+ { -1, gf100_gr_load, &gf108_gr },
+ { -1, gf100_gr_nofw, &gf108_gr },
+ {}
+};
+
int
gf108_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(&gf108_gr, device, index, pgr);
+ return gf100_gr_new_(gf108_gr_fwif, device, index, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
index cdf759c8cd7f..280752551a3a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
@@ -119,8 +119,15 @@ gf110_gr = {
}
};
+static const struct gf100_gr_fwif
+gf110_gr_fwif[] = {
+ { -1, gf100_gr_load, &gf110_gr },
+ { -1, gf100_gr_nofw, &gf110_gr },
+ {}
+};
+
int
gf110_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(&gf110_gr, device, index, pgr);
+ return gf100_gr_new_(gf110_gr_fwif, device, index, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
index a4158f84c649..235c3fbe4b95 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
@@ -184,8 +184,15 @@ gf117_gr = {
}
};
+static const struct gf100_gr_fwif
+gf117_gr_fwif[] = {
+ { -1, gf100_gr_load, &gf117_gr },
+ { -1, gf100_gr_nofw, &gf117_gr },
+ {}
+};
+
int
gf117_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(&gf117_gr, device, index, pgr);
+ return gf100_gr_new_(gf117_gr_fwif, device, index, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
index 4197844870b3..7eac385ece97 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
@@ -210,8 +210,15 @@ gf119_gr = {
}
};
+static const struct gf100_gr_fwif
+gf119_gr_fwif[] = {
+ { -1, gf100_gr_load, &gf119_gr },
+ { -1, gf100_gr_nofw, &gf119_gr },
+ {}
+};
+
int
gf119_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(&gf119_gr, device, index, pgr);
+ return gf100_gr_new_(gf119_gr_fwif, device, index, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
index 477fee3e3715..89f51d76082b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
@@ -489,8 +489,15 @@ gk104_gr = {
}
};
+static const struct gf100_gr_fwif
+gk104_gr_fwif[] = {
+ { -1, gf100_gr_load, &gk104_gr },
+ { -1, gf100_gr_nofw, &gk104_gr },
+ {}
+};
+
int
gk104_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(&gk104_gr, device, index, pgr);
+ return gf100_gr_new_(gk104_gr_fwif, device, index, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
index 7cd628c84e07..735f05e54d62 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
@@ -385,8 +385,15 @@ gk110_gr = {
}
};
+static const struct gf100_gr_fwif
+gk110_gr_fwif[] = {
+ { -1, gf100_gr_load, &gk110_gr },
+ { -1, gf100_gr_nofw, &gk110_gr },
+ {}
+};
+
int
gk110_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(&gk110_gr, device, index, pgr);
+ return gf100_gr_new_(gk110_gr_fwif, device, index, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
index a38faa215635..adc971be8f3b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
@@ -136,8 +136,15 @@ gk110b_gr = {
}
};
+static const struct gf100_gr_fwif
+gk110b_gr_fwif[] = {
+ { -1, gf100_gr_load, &gk110b_gr },
+ { -1, gf100_gr_nofw, &gk110b_gr },
+ {}
+};
+
int
gk110b_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(&gk110b_gr, device, index, pgr);
+ return gf100_gr_new_(gk110b_gr_fwif, device, index, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
index 58456660e603..aa0eff6795ac 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
@@ -194,8 +194,15 @@ gk208_gr = {
}
};
+static const struct gf100_gr_fwif
+gk208_gr_fwif[] = {
+ { -1, gf100_gr_load, &gk208_gr },
+ { -1, gf100_gr_nofw, &gk208_gr },
+ {}
+};
+
int
gk208_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(&gk208_gr, device, index, pgr);
+ return gf100_gr_new_(gk208_gr_fwif, device, index, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
index 500cb08dd608..4209b24a46d7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
@@ -22,6 +22,7 @@
#include "gf100.h"
#include "ctxgf100.h"
+#include <core/firmware.h>
#include <subdev/timer.h>
#include <nvif/class.h>
@@ -33,21 +34,22 @@ struct gk20a_fw_av
};
int
-gk20a_gr_av_to_init(struct gf100_gr *gr, const char *fw_name,
- struct gf100_gr_pack **ppack)
+gk20a_gr_av_to_init(struct gf100_gr *gr, const char *path, const char *name,
+ int ver, struct gf100_gr_pack **ppack)
{
- struct gf100_gr_fuc fuc;
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_blob blob;
struct gf100_gr_init *init;
struct gf100_gr_pack *pack;
int nent;
int ret;
int i;
- ret = gf100_gr_ctor_fw(gr, fw_name, &fuc);
+ ret = nvkm_firmware_load_blob(subdev, path, name, ver, &blob);
if (ret)
return ret;
- nent = (fuc.size / sizeof(struct gk20a_fw_av));
+ nent = (blob.size / sizeof(struct gk20a_fw_av));
pack = vzalloc((sizeof(*pack) * 2) + (sizeof(*init) * (nent + 1)));
if (!pack) {
ret = -ENOMEM;
@@ -59,7 +61,7 @@ gk20a_gr_av_to_init(struct gf100_gr *gr, const char *fw_name,
for (i = 0; i < nent; i++) {
struct gf100_gr_init *ent = &init[i];
- struct gk20a_fw_av *av = &((struct gk20a_fw_av *)fuc.data)[i];
+ struct gk20a_fw_av *av = &((struct gk20a_fw_av *)blob.data)[i];
ent->addr = av->addr;
ent->data = av->data;
@@ -70,7 +72,7 @@ gk20a_gr_av_to_init(struct gf100_gr *gr, const char *fw_name,
*ppack = pack;
end:
- gf100_gr_dtor_fw(&fuc);
+ nvkm_blob_dtor(&blob);
return ret;
}
@@ -82,21 +84,22 @@ struct gk20a_fw_aiv
};
int
-gk20a_gr_aiv_to_init(struct gf100_gr *gr, const char *fw_name,
- struct gf100_gr_pack **ppack)
+gk20a_gr_aiv_to_init(struct gf100_gr *gr, const char *path, const char *name,
+ int ver, struct gf100_gr_pack **ppack)
{
- struct gf100_gr_fuc fuc;
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_blob blob;
struct gf100_gr_init *init;
struct gf100_gr_pack *pack;
int nent;
int ret;
int i;
- ret = gf100_gr_ctor_fw(gr, fw_name, &fuc);
+ ret = nvkm_firmware_load_blob(subdev, path, name, ver, &blob);
if (ret)
return ret;
- nent = (fuc.size / sizeof(struct gk20a_fw_aiv));
+ nent = (blob.size / sizeof(struct gk20a_fw_aiv));
pack = vzalloc((sizeof(*pack) * 2) + (sizeof(*init) * (nent + 1)));
if (!pack) {
ret = -ENOMEM;
@@ -108,7 +111,7 @@ gk20a_gr_aiv_to_init(struct gf100_gr *gr, const char *fw_name,
for (i = 0; i < nent; i++) {
struct gf100_gr_init *ent = &init[i];
- struct gk20a_fw_aiv *av = &((struct gk20a_fw_aiv *)fuc.data)[i];
+ struct gk20a_fw_aiv *av = &((struct gk20a_fw_aiv *)blob.data)[i];
ent->addr = av->addr;
ent->data = av->data;
@@ -119,15 +122,16 @@ gk20a_gr_aiv_to_init(struct gf100_gr *gr, const char *fw_name,
*ppack = pack;
end:
- gf100_gr_dtor_fw(&fuc);
+ nvkm_blob_dtor(&blob);
return ret;
}
int
-gk20a_gr_av_to_method(struct gf100_gr *gr, const char *fw_name,
- struct gf100_gr_pack **ppack)
+gk20a_gr_av_to_method(struct gf100_gr *gr, const char *path, const char *name,
+ int ver, struct gf100_gr_pack **ppack)
{
- struct gf100_gr_fuc fuc;
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_blob blob;
struct gf100_gr_init *init;
struct gf100_gr_pack *pack;
/* We don't suppose we will initialize more than 16 classes here... */
@@ -137,29 +141,30 @@ gk20a_gr_av_to_method(struct gf100_gr *gr, const char *fw_name,
int ret;
int i;
- ret = gf100_gr_ctor_fw(gr, fw_name, &fuc);
+ ret = nvkm_firmware_load_blob(subdev, path, name, ver, &blob);
if (ret)
return ret;
- nent = (fuc.size / sizeof(struct gk20a_fw_av));
+ nent = (blob.size / sizeof(struct gk20a_fw_av));
- pack = vzalloc((sizeof(*pack) * max_classes) +
- (sizeof(*init) * (nent + 1)));
+ pack = vzalloc((sizeof(*pack) * (max_classes + 1)) +
+ (sizeof(*init) * (nent + max_classes + 1)));
if (!pack) {
ret = -ENOMEM;
goto end;
}
- init = (void *)(pack + max_classes);
+ init = (void *)(pack + max_classes + 1);
- for (i = 0; i < nent; i++) {
- struct gf100_gr_init *ent = &init[i];
- struct gk20a_fw_av *av = &((struct gk20a_fw_av *)fuc.data)[i];
+ for (i = 0; i < nent; i++, init++) {
+ struct gk20a_fw_av *av = &((struct gk20a_fw_av *)blob.data)[i];
u32 class = av->addr & 0xffff;
u32 addr = (av->addr & 0xffff0000) >> 14;
if (prevclass != class) {
- pack[classidx].init = ent;
+ if (prevclass) /* Add terminator to the method list. */
+ init++;
+ pack[classidx].init = init;
pack[classidx].type = class;
prevclass = class;
if (++classidx >= max_classes) {
@@ -169,16 +174,16 @@ gk20a_gr_av_to_method(struct gf100_gr *gr, const char *fw_name,
}
}
- ent->addr = addr;
- ent->data = av->data;
- ent->count = 1;
- ent->pitch = 1;
+ init->addr = addr;
+ init->data = av->data;
+ init->count = 1;
+ init->pitch = 1;
}
*ppack = pack;
end:
- gf100_gr_dtor_fw(&fuc);
+ nvkm_blob_dtor(&blob);
return ret;
}
@@ -224,7 +229,7 @@ gk20a_gr_init(struct gf100_gr *gr)
/* Clear SCC RAM */
nvkm_wr32(device, 0x40802c, 0x1);
- gf100_gr_mmio(gr, gr->fuc_sw_nonctx);
+ gf100_gr_mmio(gr, gr->sw_nonctx);
ret = gk20a_gr_wait_mem_scrubbing(gr);
if (ret)
@@ -303,40 +308,45 @@ gk20a_gr = {
};
int
-gk20a_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gk20a_gr_load_sw(struct gf100_gr *gr, const char *path, int ver)
{
- struct gf100_gr *gr;
- int ret;
+ if (gk20a_gr_av_to_init(gr, path, "sw_nonctx", ver, &gr->sw_nonctx) ||
+ gk20a_gr_aiv_to_init(gr, path, "sw_ctx", ver, &gr->sw_ctx) ||
+ gk20a_gr_av_to_init(gr, path, "sw_bundle_init", ver, &gr->bundle) ||
+ gk20a_gr_av_to_method(gr, path, "sw_method_init", ver, &gr->method))
+ return -ENOENT;
- if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
- return -ENOMEM;
- *pgr = &gr->base;
-
- ret = gf100_gr_ctor(&gk20a_gr, device, index, gr);
- if (ret)
- return ret;
+ return 0;
+}
- if (gf100_gr_ctor_fw(gr, "fecs_inst", &gr->fuc409c) ||
- gf100_gr_ctor_fw(gr, "fecs_data", &gr->fuc409d) ||
- gf100_gr_ctor_fw(gr, "gpccs_inst", &gr->fuc41ac) ||
- gf100_gr_ctor_fw(gr, "gpccs_data", &gr->fuc41ad))
- return -ENODEV;
+static int
+gk20a_gr_load(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif)
+{
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
- ret = gk20a_gr_av_to_init(gr, "sw_nonctx", &gr->fuc_sw_nonctx);
- if (ret)
- return ret;
+ if (nvkm_firmware_load_blob(subdev, "", "fecs_inst", ver,
+ &gr->fecs.inst) ||
+ nvkm_firmware_load_blob(subdev, "", "fecs_data", ver,
+ &gr->fecs.data) ||
+ nvkm_firmware_load_blob(subdev, "", "gpccs_inst", ver,
+ &gr->gpccs.inst) ||
+ nvkm_firmware_load_blob(subdev, "", "gpccs_data", ver,
+ &gr->gpccs.data))
+ return -ENOENT;
- ret = gk20a_gr_aiv_to_init(gr, "sw_ctx", &gr->fuc_sw_ctx);
- if (ret)
- return ret;
+ gr->firmware = true;
- ret = gk20a_gr_av_to_init(gr, "sw_bundle_init", &gr->fuc_bundle);
- if (ret)
- return ret;
+ return gk20a_gr_load_sw(gr, "", ver);
+}
- ret = gk20a_gr_av_to_method(gr, "sw_method_init", &gr->fuc_method);
- if (ret)
- return ret;
+static const struct gf100_gr_fwif
+gk20a_gr_fwif[] = {
+ { -1, gk20a_gr_load, &gk20a_gr },
+ {}
+};
- return 0;
+int
+gk20a_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+ return gf100_gr_new_(gk20a_gr_fwif, device, index, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
index 92e31d397207..09bb78ba9d00 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
@@ -429,8 +429,15 @@ gm107_gr = {
}
};
+static const struct gf100_gr_fwif
+gm107_gr_fwif[] = {
+ { -1, gf100_gr_load, &gm107_gr },
+ { -1, gf100_gr_nofw, &gm107_gr },
+ {}
+};
+
int
gm107_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(&gm107_gr, device, index, pgr);
+ return gf100_gr_new_(gm107_gr_fwif, device, index, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
index eff30662b984..3d67cfb08395 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
@@ -24,14 +24,64 @@
#include "gf100.h"
#include "ctxgf100.h"
+#include <core/firmware.h>
+#include <subdev/acr.h>
#include <subdev/secboot.h>
+#include <nvfw/flcn.h>
+
#include <nvif/class.h>
/*******************************************************************************
* PGRAPH engine/subdev functions
******************************************************************************/
+static void
+gm200_gr_acr_bld_patch(struct nvkm_acr *acr, u32 bld, s64 adjust)
+{
+ struct flcn_bl_dmem_desc_v1 hdr;
+ nvkm_robj(acr->wpr, bld, &hdr, sizeof(hdr));
+ hdr.code_dma_base = hdr.code_dma_base + adjust;
+ hdr.data_dma_base = hdr.data_dma_base + adjust;
+ nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
+ flcn_bl_dmem_desc_v1_dump(&acr->subdev, &hdr);
+}
+
+static void
+gm200_gr_acr_bld_write(struct nvkm_acr *acr, u32 bld,
+ struct nvkm_acr_lsfw *lsfw)
+{
+ const u64 base = lsfw->offset.img + lsfw->app_start_offset;
+ const u64 code = base + lsfw->app_resident_code_offset;
+ const u64 data = base + lsfw->app_resident_data_offset;
+ const struct flcn_bl_dmem_desc_v1 hdr = {
+ .ctx_dma = FALCON_DMAIDX_UCODE,
+ .code_dma_base = code,
+ .non_sec_code_off = lsfw->app_resident_code_offset,
+ .non_sec_code_size = lsfw->app_resident_code_size,
+ .code_entry_point = lsfw->app_imem_entry,
+ .data_dma_base = data,
+ .data_size = lsfw->app_resident_data_size,
+ };
+
+ nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
+}
+
+const struct nvkm_acr_lsf_func
+gm200_gr_gpccs_acr = {
+ .flags = NVKM_ACR_LSF_FORCE_PRIV_LOAD,
+ .bld_size = sizeof(struct flcn_bl_dmem_desc_v1),
+ .bld_write = gm200_gr_acr_bld_write,
+ .bld_patch = gm200_gr_acr_bld_patch,
+};
+
+const struct nvkm_acr_lsf_func
+gm200_gr_fecs_acr = {
+ .bld_size = sizeof(struct flcn_bl_dmem_desc_v1),
+ .bld_write = gm200_gr_acr_bld_write,
+ .bld_patch = gm200_gr_acr_bld_patch,
+};
+
int
gm200_gr_rops(struct gf100_gr *gr)
{
@@ -124,44 +174,6 @@ gm200_gr_oneinit_tiles(struct gf100_gr *gr)
}
}
-int
-gm200_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device,
- int index, struct nvkm_gr **pgr)
-{
- struct gf100_gr *gr;
- int ret;
-
- if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
- return -ENOMEM;
- *pgr = &gr->base;
-
- ret = gf100_gr_ctor(func, device, index, gr);
- if (ret)
- return ret;
-
- /* Load firmwares for non-secure falcons */
- if (!nvkm_secboot_is_managed(device->secboot,
- NVKM_SECBOOT_FALCON_FECS)) {
- if ((ret = gf100_gr_ctor_fw(gr, "gr/fecs_inst", &gr->fuc409c)) ||
- (ret = gf100_gr_ctor_fw(gr, "gr/fecs_data", &gr->fuc409d)))
- return ret;
- }
- if (!nvkm_secboot_is_managed(device->secboot,
- NVKM_SECBOOT_FALCON_GPCCS)) {
- if ((ret = gf100_gr_ctor_fw(gr, "gr/gpccs_inst", &gr->fuc41ac)) ||
- (ret = gf100_gr_ctor_fw(gr, "gr/gpccs_data", &gr->fuc41ad)))
- return ret;
- }
-
- if ((ret = gk20a_gr_av_to_init(gr, "gr/sw_nonctx", &gr->fuc_sw_nonctx)) ||
- (ret = gk20a_gr_aiv_to_init(gr, "gr/sw_ctx", &gr->fuc_sw_ctx)) ||
- (ret = gk20a_gr_av_to_init(gr, "gr/sw_bundle_init", &gr->fuc_bundle)) ||
- (ret = gk20a_gr_av_to_method(gr, "gr/sw_method_init", &gr->fuc_method)))
- return ret;
-
- return 0;
-}
-
static const struct gf100_gr_func
gm200_gr = {
.oneinit_tiles = gm200_gr_oneinit_tiles,
@@ -198,7 +210,77 @@ gm200_gr = {
};
int
+gm200_gr_load(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif)
+{
+ int ret;
+
+ ret = nvkm_acr_lsfw_load_bl_inst_data_sig(&gr->base.engine.subdev,
+ &gr->fecs.falcon,
+ NVKM_ACR_LSF_FECS,
+ "gr/fecs_", ver, fwif->fecs);
+ if (ret)
+ return ret;
+
+ ret = nvkm_acr_lsfw_load_bl_inst_data_sig(&gr->base.engine.subdev,
+ &gr->gpccs.falcon,
+ NVKM_ACR_LSF_GPCCS,
+ "gr/gpccs_", ver,
+ fwif->gpccs);
+ if (ret)
+ return ret;
+
+ gr->firmware = true;
+
+ return gk20a_gr_load_sw(gr, "gr/", ver);
+}
+
+MODULE_FIRMWARE("nvidia/gm200/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/sw_method_init.bin");
+
+MODULE_FIRMWARE("nvidia/gm204/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/sw_method_init.bin");
+
+MODULE_FIRMWARE("nvidia/gm206/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/sw_method_init.bin");
+
+static const struct gf100_gr_fwif
+gm200_gr_fwif[] = {
+ { 0, gm200_gr_load, &gm200_gr, &gm200_gr_fecs_acr, &gm200_gr_gpccs_acr },
+ {}
+};
+
+int
gm200_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
- return gm200_gr_new_(&gm200_gr, device, index, pgr);
+ return gf100_gr_new_(gm200_gr_fwif, device, index, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
index a667770ce3cb..09d8c5d5b000 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
@@ -22,10 +22,61 @@
#include "gf100.h"
#include "ctxgf100.h"
+#include <core/firmware.h>
+#include <subdev/acr.h>
#include <subdev/timer.h>
+#include <nvfw/flcn.h>
+
#include <nvif/class.h>
+void
+gm20b_gr_acr_bld_patch(struct nvkm_acr *acr, u32 bld, s64 adjust)
+{
+ struct flcn_bl_dmem_desc hdr;
+ u64 addr;
+
+ nvkm_robj(acr->wpr, bld, &hdr, sizeof(hdr));
+ addr = ((u64)hdr.code_dma_base1 << 40 | hdr.code_dma_base << 8);
+ hdr.code_dma_base = lower_32_bits((addr + adjust) >> 8);
+ hdr.code_dma_base1 = upper_32_bits((addr + adjust) >> 8);
+ addr = ((u64)hdr.data_dma_base1 << 40 | hdr.data_dma_base << 8);
+ hdr.data_dma_base = lower_32_bits((addr + adjust) >> 8);
+ hdr.data_dma_base1 = upper_32_bits((addr + adjust) >> 8);
+ nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
+
+ flcn_bl_dmem_desc_dump(&acr->subdev, &hdr);
+}
+
+void
+gm20b_gr_acr_bld_write(struct nvkm_acr *acr, u32 bld,
+ struct nvkm_acr_lsfw *lsfw)
+{
+ const u64 base = lsfw->offset.img + lsfw->app_start_offset;
+ const u64 code = (base + lsfw->app_resident_code_offset) >> 8;
+ const u64 data = (base + lsfw->app_resident_data_offset) >> 8;
+ const struct flcn_bl_dmem_desc hdr = {
+ .ctx_dma = FALCON_DMAIDX_UCODE,
+ .code_dma_base = lower_32_bits(code),
+ .non_sec_code_off = lsfw->app_resident_code_offset,
+ .non_sec_code_size = lsfw->app_resident_code_size,
+ .code_entry_point = lsfw->app_imem_entry,
+ .data_dma_base = lower_32_bits(data),
+ .data_size = lsfw->app_resident_data_size,
+ .code_dma_base1 = upper_32_bits(code),
+ .data_dma_base1 = upper_32_bits(data),
+ };
+
+ nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
+}
+
+const struct nvkm_acr_lsf_func
+gm20b_gr_fecs_acr = {
+ .bld_size = sizeof(struct flcn_bl_dmem_desc),
+ .bld_write = gm20b_gr_acr_bld_write,
+ .bld_patch = gm20b_gr_acr_bld_patch,
+};
+
static void
gm20b_gr_init_gpc_mmu(struct gf100_gr *gr)
{
@@ -33,7 +84,7 @@ gm20b_gr_init_gpc_mmu(struct gf100_gr *gr)
u32 val;
/* Bypass MMU check for non-secure boot */
- if (!device->secboot) {
+ if (!device->acr) {
nvkm_wr32(device, 0x100ce4, 0xffffffff);
if (nvkm_rd32(device, 0x100ce4) != 0xffffffff)
@@ -85,8 +136,51 @@ gm20b_gr = {
}
};
+static int
+gm20b_gr_load(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif)
+{
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ int ret;
+
+ ret = nvkm_acr_lsfw_load_bl_inst_data_sig(subdev, &gr->fecs.falcon,
+ NVKM_ACR_LSF_FECS,
+ "gr/fecs_", ver, fwif->fecs);
+ if (ret)
+ return ret;
+
+
+ if (nvkm_firmware_load_blob(subdev, "gr/", "gpccs_inst", ver,
+ &gr->gpccs.inst) ||
+ nvkm_firmware_load_blob(subdev, "gr/", "gpccs_data", ver,
+ &gr->gpccs.data))
+ return -ENOENT;
+
+ gr->firmware = true;
+
+ return gk20a_gr_load_sw(gr, "gr/", ver);
+}
+
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
+MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gm20b/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gm20b/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gm20b/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gm20b/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gm20b/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gm20b/gr/sw_method_init.bin");
+#endif
+
+static const struct gf100_gr_fwif
+gm20b_gr_fwif[] = {
+ { 0, gm20b_gr_load, &gm20b_gr, &gm20b_gr_fecs_acr },
+ {}
+};
+
int
gm20b_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
- return gm200_gr_new_(&gm20b_gr, device, index, pgr);
+ return gf100_gr_new_(gm20b_gr_fwif, device, index, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
index 9d0521ce309a..33c8634ae567 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
@@ -62,7 +62,7 @@ gp100_gr_zbc_clear_depth(struct gf100_gr *gr, int zbc)
gr->zbc_depth[zbc].format << ((znum % 4) * 7));
}
-static const struct gf100_gr_func_zbc
+const struct gf100_gr_func_zbc
gp100_gr_zbc = {
.clear_color = gp100_gr_zbc_clear_color,
.clear_depth = gp100_gr_zbc_clear_depth,
@@ -135,8 +135,27 @@ gp100_gr = {
}
};
+MODULE_FIRMWARE("nvidia/gp100/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/sw_method_init.bin");
+
+static const struct gf100_gr_fwif
+gp100_gr_fwif[] = {
+ { 0, gm200_gr_load, &gp100_gr, &gm200_gr_fecs_acr, &gm200_gr_gpccs_acr },
+ {}
+};
+
int
gp100_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
- return gm200_gr_new_(&gp100_gr, device, index, pgr);
+ return gf100_gr_new_(gp100_gr_fwif, device, index, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
index 37f7d739bf80..7baf67f743f4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
@@ -131,8 +131,27 @@ gp102_gr = {
}
};
+MODULE_FIRMWARE("nvidia/gp102/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp102/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp102/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gp102/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp102/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gp102/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gp102/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gp102/gr/sw_method_init.bin");
+
+static const struct gf100_gr_fwif
+gp102_gr_fwif[] = {
+ { 0, gm200_gr_load, &gp102_gr, &gm200_gr_fecs_acr, &gm200_gr_gpccs_acr },
+ {}
+};
+
int
gp102_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
- return gm200_gr_new_(&gp102_gr, device, index, pgr);
+ return gf100_gr_new_(gp102_gr_fwif, device, index, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c
index 4573c914c021..d9b8ef875f8d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c
@@ -59,8 +59,40 @@ gp104_gr = {
}
};
+MODULE_FIRMWARE("nvidia/gp104/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp104/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp104/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gp104/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp104/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gp104/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gp104/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gp104/gr/sw_method_init.bin");
+
+MODULE_FIRMWARE("nvidia/gp106/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp106/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp106/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gp106/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp106/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gp106/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gp106/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gp106/gr/sw_method_init.bin");
+
+static const struct gf100_gr_fwif
+gp104_gr_fwif[] = {
+ { 0, gm200_gr_load, &gp104_gr, &gm200_gr_fecs_acr, &gm200_gr_gpccs_acr },
+ {}
+};
+
int
gp104_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
- return gm200_gr_new_(&gp104_gr, device, index, pgr);
+ return gf100_gr_new_(gp104_gr_fwif, device, index, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
index 812aba91653f..2b1ad5522184 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
@@ -26,7 +26,7 @@
#include <nvif/class.h>
-static const struct gf100_gr_func
+const struct gf100_gr_func
gp107_gr = {
.oneinit_tiles = gm200_gr_oneinit_tiles,
.oneinit_sm_id = gm200_gr_oneinit_sm_id,
@@ -61,8 +61,27 @@ gp107_gr = {
}
};
+MODULE_FIRMWARE("nvidia/gp107/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp107/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp107/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gp107/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp107/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gp107/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gp107/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gp107/gr/sw_method_init.bin");
+
+static const struct gf100_gr_fwif
+gp107_gr_fwif[] = {
+ { 0, gm200_gr_load, &gp107_gr, &gm200_gr_fecs_acr, &gm200_gr_gpccs_acr },
+ {}
+};
+
int
gp107_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
- return gm200_gr_new_(&gp107_gr, device, index, pgr);
+ return gf100_gr_new_(gp107_gr_fwif, device, index, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp108.c
new file mode 100644
index 000000000000..113e4c1ba9e8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp108.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "gf100.h"
+
+#include <subdev/acr.h>
+
+#include <nvfw/flcn.h>
+
+static void
+gp108_gr_acr_bld_patch(struct nvkm_acr *acr, u32 bld, s64 adjust)
+{
+ struct flcn_bl_dmem_desc_v2 hdr;
+ nvkm_robj(acr->wpr, bld, &hdr, sizeof(hdr));
+ hdr.code_dma_base = hdr.code_dma_base + adjust;
+ hdr.data_dma_base = hdr.data_dma_base + adjust;
+ nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
+ flcn_bl_dmem_desc_v2_dump(&acr->subdev, &hdr);
+}
+
+static void
+gp108_gr_acr_bld_write(struct nvkm_acr *acr, u32 bld,
+ struct nvkm_acr_lsfw *lsfw)
+{
+ const u64 base = lsfw->offset.img + lsfw->app_start_offset;
+ const u64 code = base + lsfw->app_resident_code_offset;
+ const u64 data = base + lsfw->app_resident_data_offset;
+ const struct flcn_bl_dmem_desc_v2 hdr = {
+ .ctx_dma = FALCON_DMAIDX_UCODE,
+ .code_dma_base = code,
+ .non_sec_code_off = lsfw->app_resident_code_offset,
+ .non_sec_code_size = lsfw->app_resident_code_size,
+ .code_entry_point = lsfw->app_imem_entry,
+ .data_dma_base = data,
+ .data_size = lsfw->app_resident_data_size,
+ };
+
+ nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
+}
+
+const struct nvkm_acr_lsf_func
+gp108_gr_gpccs_acr = {
+ .flags = NVKM_ACR_LSF_FORCE_PRIV_LOAD,
+ .bld_size = sizeof(struct flcn_bl_dmem_desc_v2),
+ .bld_write = gp108_gr_acr_bld_write,
+ .bld_patch = gp108_gr_acr_bld_patch,
+};
+
+const struct nvkm_acr_lsf_func
+gp108_gr_fecs_acr = {
+ .bld_size = sizeof(struct flcn_bl_dmem_desc_v2),
+ .bld_write = gp108_gr_acr_bld_write,
+ .bld_patch = gp108_gr_acr_bld_patch,
+};
+
+MODULE_FIRMWARE("nvidia/gp108/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/sw_method_init.bin");
+
+static const struct gf100_gr_fwif
+gp108_gr_fwif[] = {
+ { 0, gm200_gr_load, &gp107_gr, &gp108_gr_fecs_acr, &gp108_gr_gpccs_acr },
+ {}
+};
+
+int
+gp108_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+ return gf100_gr_new_(gp108_gr_fwif, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
index 303dceddd4a8..eaf913eb5aa3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
@@ -23,8 +23,20 @@
#include "gf100.h"
#include "ctxgf100.h"
+#include <subdev/acr.h>
+
#include <nvif/class.h>
+#include <nvfw/flcn.h>
+
+static const struct nvkm_acr_lsf_func
+gp10b_gr_gpccs_acr = {
+ .flags = NVKM_ACR_LSF_FORCE_PRIV_LOAD,
+ .bld_size = sizeof(struct flcn_bl_dmem_desc),
+ .bld_write = gm20b_gr_acr_bld_write,
+ .bld_patch = gm20b_gr_acr_bld_patch,
+};
+
static const struct gf100_gr_func
gp10b_gr = {
.oneinit_tiles = gm200_gr_oneinit_tiles,
@@ -48,8 +60,8 @@ gp10b_gr = {
.gpc_nr = 1,
.tpc_nr = 2,
.ppc_nr = 1,
- .grctx = &gp102_grctx,
- .zbc = &gp102_gr_zbc,
+ .grctx = &gp100_grctx,
+ .zbc = &gp100_gr_zbc,
.sclass = {
{ -1, -1, FERMI_TWOD_A },
{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
@@ -59,8 +71,29 @@ gp10b_gr = {
}
};
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC)
+MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/sw_method_init.bin");
+#endif
+
+static const struct gf100_gr_fwif
+gp10b_gr_fwif[] = {
+ { 0, gm200_gr_load, &gp10b_gr, &gm20b_gr_fecs_acr, &gp10b_gr_gpccs_acr },
+ {}
+};
+
int
gp10b_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
- return gm200_gr_new_(&gp10b_gr, device, index, pgr);
+ return gf100_gr_new_(gp10b_gr_fwif, device, index, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c
index 3b3327789ae7..70639d88b8e6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c
@@ -45,7 +45,7 @@ gv100_gr_trap_sm(struct gf100_gr *gr, int gpc, int tpc, int sm)
nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x734 + sm * 0x80), gerr);
}
-static void
+void
gv100_gr_trap_mp(struct gf100_gr *gr, int gpc, int tpc)
{
gv100_gr_trap_sm(gr, gpc, tpc, 0);
@@ -59,7 +59,7 @@ gv100_gr_init_4188a4(struct gf100_gr *gr)
nvkm_mask(device, 0x4188a4, 0x03000000, 0x03000000);
}
-static void
+void
gv100_gr_init_shader_exceptions(struct gf100_gr *gr, int gpc, int tpc)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
@@ -71,14 +71,14 @@ gv100_gr_init_shader_exceptions(struct gf100_gr *gr, int gpc, int tpc)
}
}
-static void
+void
gv100_gr_init_504430(struct gf100_gr *gr, int gpc, int tpc)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x430), 0x403f0000);
}
-static void
+void
gv100_gr_init_419bd8(struct gf100_gr *gr)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
@@ -120,8 +120,27 @@ gv100_gr = {
}
};
+MODULE_FIRMWARE("nvidia/gv100/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gv100/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gv100/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gv100/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gv100/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gv100/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gv100/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gv100/gr/sw_method_init.bin");
+
+static const struct gf100_gr_fwif
+gv100_gr_fwif[] = {
+ { 0, gm200_gr_load, &gv100_gr, &gp108_gr_fecs_acr, &gp108_gr_gpccs_acr },
+ {}
+};
+
int
gv100_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
- return gm200_gr_new_(&gv100_gr, device, index, pgr);
+ return gf100_gr_new_(gv100_gr_fwif, device, index, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c
new file mode 100644
index 000000000000..454668b1cf54
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c
@@ -0,0 +1,177 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "gf100.h"
+#include "ctxgf100.h"
+
+#include <nvif/class.h>
+
+static void
+tu102_gr_init_fecs_exceptions(struct gf100_gr *gr)
+{
+ nvkm_wr32(gr->base.engine.subdev.device, 0x409c24, 0x006f0002);
+}
+
+static void
+tu102_gr_init_fs(struct gf100_gr *gr)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ int sm;
+
+ gp100_grctx_generate_smid_config(gr);
+ gk104_grctx_generate_gpc_tpc_nr(gr);
+
+ for (sm = 0; sm < gr->sm_nr; sm++) {
+ nvkm_wr32(device, GPC_UNIT(gr->sm[sm].gpc, 0x0c10 +
+ gr->sm[sm].tpc * 4), sm);
+ }
+
+ gm200_grctx_generate_dist_skip_table(gr);
+ gf100_gr_init_num_tpc_per_gpc(gr, true, true);
+}
+
+static void
+tu102_gr_init_zcull(struct gf100_gr *gr)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
+ const u8 tile_nr = ALIGN(gr->tpc_total, 64);
+ u8 bank[GPC_MAX] = {}, gpc, i, j;
+ u32 data;
+
+ for (i = 0; i < tile_nr; i += 8) {
+ for (data = 0, j = 0; j < 8 && i + j < gr->tpc_total; j++) {
+ data |= bank[gr->tile[i + j]] << (j * 4);
+ bank[gr->tile[i + j]]++;
+ }
+ nvkm_wr32(device, GPC_BCAST(0x0980 + ((i / 8) * 4)), data);
+ }
+
+ for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
+ gr->screen_tile_row_offset << 8 | gr->tpc_nr[gpc]);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
+ gr->tpc_total);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
+ }
+
+ nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
+}
+
+static void
+tu102_gr_init_gpc_mmu(struct gf100_gr *gr)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+
+ nvkm_wr32(device, 0x418880, nvkm_rd32(device, 0x100c80) & 0xf8001fff);
+ nvkm_wr32(device, 0x418890, 0x00000000);
+ nvkm_wr32(device, 0x418894, 0x00000000);
+
+ nvkm_wr32(device, 0x4188b4, nvkm_rd32(device, 0x100cc8));
+ nvkm_wr32(device, 0x4188b8, nvkm_rd32(device, 0x100ccc));
+ nvkm_wr32(device, 0x4188b0, nvkm_rd32(device, 0x100cc4));
+}
+
+static const struct gf100_gr_func
+tu102_gr = {
+ .oneinit_tiles = gm200_gr_oneinit_tiles,
+ .oneinit_sm_id = gm200_gr_oneinit_sm_id,
+ .init = gf100_gr_init,
+ .init_419bd8 = gv100_gr_init_419bd8,
+ .init_gpc_mmu = tu102_gr_init_gpc_mmu,
+ .init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
+ .init_zcull = tu102_gr_init_zcull,
+ .init_num_active_ltcs = gf100_gr_init_num_active_ltcs,
+ .init_rop_active_fbps = gp100_gr_init_rop_active_fbps,
+ .init_swdx_pes_mask = gp102_gr_init_swdx_pes_mask,
+ .init_fs = tu102_gr_init_fs,
+ .init_fecs_exceptions = tu102_gr_init_fecs_exceptions,
+ .init_ds_hww_esr_2 = gm200_gr_init_ds_hww_esr_2,
+ .init_sked_hww_esr = gk104_gr_init_sked_hww_esr,
+ .init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
+ .init_504430 = gv100_gr_init_504430,
+ .init_shader_exceptions = gv100_gr_init_shader_exceptions,
+ .trap_mp = gv100_gr_trap_mp,
+ .rops = gm200_gr_rops,
+ .gpc_nr = 6,
+ .tpc_nr = 5,
+ .ppc_nr = 3,
+ .grctx = &tu102_grctx,
+ .zbc = &gp102_gr_zbc,
+ .sclass = {
+ { -1, -1, FERMI_TWOD_A },
+ { -1, -1, KEPLER_INLINE_TO_MEMORY_B },
+ { -1, -1, TURING_A, &gf100_fermi },
+ { -1, -1, TURING_COMPUTE_A },
+ {}
+ }
+};
+
+MODULE_FIRMWARE("nvidia/tu102/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/tu102/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/tu102/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/tu102/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/tu102/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/tu102/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/tu102/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/tu102/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/tu102/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/tu102/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/tu102/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/tu102/gr/sw_method_init.bin");
+
+MODULE_FIRMWARE("nvidia/tu104/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/tu104/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/tu104/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/tu104/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/tu104/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/tu104/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/tu104/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/tu104/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/tu104/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/tu104/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/tu104/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/tu104/gr/sw_method_init.bin");
+
+MODULE_FIRMWARE("nvidia/tu106/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/tu106/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/tu106/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/tu106/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/tu106/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/tu106/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/tu106/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/tu106/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/tu106/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/tu106/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/tu106/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/tu106/gr/sw_method_init.bin");
+
+static const struct gf100_gr_fwif
+tu102_gr_fwif[] = {
+ { 0, gm200_gr_load, &tu102_gr, &gp108_gr_fecs_acr, &gp108_gr_gpccs_acr },
+ {}
+};
+
+int
+tu102_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+ return gf100_gr_new_(tu102_gr_fwif, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild
index cdf631822282..9a0fd9812750 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild
@@ -1,3 +1,3 @@
# SPDX-License-Identifier: MIT
nvkm-y += nvkm/engine/nvdec/base.o
-nvkm-y += nvkm/engine/nvdec/gp102.o
+nvkm-y += nvkm/engine/nvdec/gm107.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c
index 4a63581bdd5e..9b23c1b70ebf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c
@@ -20,48 +20,42 @@
* DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
-
-#include <subdev/top.h>
-#include <engine/falcon.h>
-
-static int
-nvkm_nvdec_oneinit(struct nvkm_engine *engine)
-{
- struct nvkm_nvdec *nvdec = nvkm_nvdec(engine);
- struct nvkm_subdev *subdev = &nvdec->engine.subdev;
-
- nvdec->addr = nvkm_top_addr(subdev->device, subdev->index);
- if (!nvdec->addr)
- return -EINVAL;
-
- /*XXX: fix naming of this when adding support for multiple-NVDEC */
- return nvkm_falcon_v1_new(subdev, "NVDEC", nvdec->addr,
- &nvdec->falcon);
-}
+#include <core/firmware.h>
static void *
nvkm_nvdec_dtor(struct nvkm_engine *engine)
{
struct nvkm_nvdec *nvdec = nvkm_nvdec(engine);
- nvkm_falcon_del(&nvdec->falcon);
+ nvkm_falcon_dtor(&nvdec->falcon);
return nvdec;
}
static const struct nvkm_engine_func
nvkm_nvdec = {
.dtor = nvkm_nvdec_dtor,
- .oneinit = nvkm_nvdec_oneinit,
};
int
-nvkm_nvdec_new_(struct nvkm_device *device, int index,
- struct nvkm_nvdec **pnvdec)
+nvkm_nvdec_new_(const struct nvkm_nvdec_fwif *fwif, struct nvkm_device *device,
+ int index, struct nvkm_nvdec **pnvdec)
{
struct nvkm_nvdec *nvdec;
+ int ret;
if (!(nvdec = *pnvdec = kzalloc(sizeof(*nvdec), GFP_KERNEL)))
return -ENOMEM;
- return nvkm_engine_ctor(&nvkm_nvdec, device, index, true,
- &nvdec->engine);
+ ret = nvkm_engine_ctor(&nvkm_nvdec, device, index, true,
+ &nvdec->engine);
+ if (ret)
+ return ret;
+
+ fwif = nvkm_firmware_load(&nvdec->engine.subdev, fwif, "Nvdec", nvdec);
+ if (IS_ERR(fwif))
+ return -ENODEV;
+
+ nvdec->func = fwif->func;
+
+ return nvkm_falcon_ctor(nvdec->func->flcn, &nvdec->engine.subdev,
+ nvkm_subdev_name[index], 0, &nvdec->falcon);
};
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/msgqueue.h b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gm107.c
index bf3e532665fb..0ab27ab4d8ee 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/msgqueue.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gm107.c
@@ -19,25 +19,45 @@
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
+#include "priv.h"
-#ifndef __NVKM_CORE_MSGQUEUE_H
-#define __NVKM_CORE_MSGQUEUE_H
-#include <subdev/secboot.h>
-struct nvkm_msgqueue;
+static const struct nvkm_falcon_func
+gm107_nvdec_flcn = {
+ .debug = 0xd00,
+ .fbif = 0x600,
+ .load_imem = nvkm_falcon_v1_load_imem,
+ .load_dmem = nvkm_falcon_v1_load_dmem,
+ .read_dmem = nvkm_falcon_v1_read_dmem,
+ .bind_context = nvkm_falcon_v1_bind_context,
+ .wait_for_halt = nvkm_falcon_v1_wait_for_halt,
+ .clear_interrupt = nvkm_falcon_v1_clear_interrupt,
+ .set_start_addr = nvkm_falcon_v1_set_start_addr,
+ .start = nvkm_falcon_v1_start,
+ .enable = nvkm_falcon_v1_enable,
+ .disable = nvkm_falcon_v1_disable,
+};
-/* Hopefully we will never have firmware arguments larger than that... */
-#define NVKM_MSGQUEUE_CMDLINE_SIZE 0x100
+static const struct nvkm_nvdec_func
+gm107_nvdec = {
+ .flcn = &gm107_nvdec_flcn,
+};
-int nvkm_msgqueue_new(u32, struct nvkm_falcon *, const struct nvkm_secboot *,
- struct nvkm_msgqueue **);
-void nvkm_msgqueue_del(struct nvkm_msgqueue **);
-void nvkm_msgqueue_recv(struct nvkm_msgqueue *);
-int nvkm_msgqueue_reinit(struct nvkm_msgqueue *);
+static int
+gm107_nvdec_nofw(struct nvkm_nvdec *nvdec, int ver,
+ const struct nvkm_nvdec_fwif *fwif)
+{
+ return 0;
+}
-/* useful if we run a NVIDIA-signed firmware */
-void nvkm_msgqueue_write_cmdline(struct nvkm_msgqueue *, void *);
+static const struct nvkm_nvdec_fwif
+gm107_nvdec_fwif[] = {
+ { -1, gm107_nvdec_nofw, &gm107_nvdec },
+ {}
+};
-/* interface to ACR unit running on falcon (NVIDIA signed firmware) */
-int nvkm_msgqueue_acr_boot_falcons(struct nvkm_msgqueue *, unsigned long);
-
-#endif
+int
+gm107_nvdec_new(struct nvkm_device *device, int index,
+ struct nvkm_nvdec **pnvdec)
+{
+ return nvkm_nvdec_new_(gm107_nvdec_fwif, device, index, pnvdec);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h
index 57bfa3aa1835..e14da8b000d0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h
@@ -3,5 +3,17 @@
#define __NVKM_NVDEC_PRIV_H__
#include <engine/nvdec.h>
-int nvkm_nvdec_new_(struct nvkm_device *, int, struct nvkm_nvdec **);
+struct nvkm_nvdec_func {
+ const struct nvkm_falcon_func *flcn;
+};
+
+struct nvkm_nvdec_fwif {
+ int version;
+ int (*load)(struct nvkm_nvdec *, int ver,
+ const struct nvkm_nvdec_fwif *);
+ const struct nvkm_nvdec_func *func;
+};
+
+int nvkm_nvdec_new_(const struct nvkm_nvdec_fwif *fwif,
+ struct nvkm_device *, int, struct nvkm_nvdec **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild
index f316de8d45a8..75bf4436bf3f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild
@@ -1,2 +1,3 @@
# SPDX-License-Identifier: MIT
-#nvkm-y += nvkm/engine/nvenc/base.o
+nvkm-y += nvkm/engine/nvenc/base.o
+nvkm-y += nvkm/engine/nvenc/gm107.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/base.c
new file mode 100644
index 000000000000..484100e15668
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/base.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include "priv.h"
+#include <core/firmware.h>
+
+static void *
+nvkm_nvenc_dtor(struct nvkm_engine *engine)
+{
+ struct nvkm_nvenc *nvenc = nvkm_nvenc(engine);
+ nvkm_falcon_dtor(&nvenc->falcon);
+ return nvenc;
+}
+
+static const struct nvkm_engine_func
+nvkm_nvenc = {
+ .dtor = nvkm_nvenc_dtor,
+};
+
+int
+nvkm_nvenc_new_(const struct nvkm_nvenc_fwif *fwif, struct nvkm_device *device,
+ int index, struct nvkm_nvenc **pnvenc)
+{
+ struct nvkm_nvenc *nvenc;
+ int ret;
+
+ if (!(nvenc = *pnvenc = kzalloc(sizeof(*nvenc), GFP_KERNEL)))
+ return -ENOMEM;
+
+ ret = nvkm_engine_ctor(&nvkm_nvenc, device, index, true,
+ &nvenc->engine);
+ if (ret)
+ return ret;
+
+ fwif = nvkm_firmware_load(&nvenc->engine.subdev, fwif, "Nvenc", nvenc);
+ if (IS_ERR(fwif))
+ return -ENODEV;
+
+ nvenc->func = fwif->func;
+
+ return nvkm_falcon_ctor(nvenc->func->flcn, &nvenc->engine.subdev,
+ nvkm_subdev_name[index], 0, &nvenc->falcon);
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/gm107.c
new file mode 100644
index 000000000000..d249c8ffb2d5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/gm107.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "priv.h"
+
+static const struct nvkm_falcon_func
+gm107_nvenc_flcn = {
+ .fbif = 0x800,
+ .load_imem = nvkm_falcon_v1_load_imem,
+ .load_dmem = nvkm_falcon_v1_load_dmem,
+ .read_dmem = nvkm_falcon_v1_read_dmem,
+ .bind_context = nvkm_falcon_v1_bind_context,
+ .wait_for_halt = nvkm_falcon_v1_wait_for_halt,
+ .clear_interrupt = nvkm_falcon_v1_clear_interrupt,
+ .set_start_addr = nvkm_falcon_v1_set_start_addr,
+ .start = nvkm_falcon_v1_start,
+ .enable = nvkm_falcon_v1_enable,
+ .disable = nvkm_falcon_v1_disable,
+};
+
+static const struct nvkm_nvenc_func
+gm107_nvenc = {
+ .flcn = &gm107_nvenc_flcn,
+};
+
+static int
+gm107_nvenc_nofw(struct nvkm_nvenc *nvenc, int ver,
+ const struct nvkm_nvenc_fwif *fwif)
+{
+ return 0;
+}
+
+static const struct nvkm_nvenc_fwif
+gm107_nvenc_fwif[] = {
+ { -1, gm107_nvenc_nofw, &gm107_nvenc },
+ {}
+};
+
+int
+gm107_nvenc_new(struct nvkm_device *device, int index,
+ struct nvkm_nvenc **pnvenc)
+{
+ return nvkm_nvenc_new_(gm107_nvenc_fwif, device, index, pnvenc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h
new file mode 100644
index 000000000000..100fa5ebbeef
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVKM_NVENC_PRIV_H__
+#define __NVKM_NVENC_PRIV_H__
+#include <engine/nvenc.h>
+
+struct nvkm_nvenc_func {
+ const struct nvkm_falcon_func *flcn;
+};
+
+struct nvkm_nvenc_fwif {
+ int version;
+ int (*load)(struct nvkm_nvenc *, int ver,
+ const struct nvkm_nvenc_fwif *);
+ const struct nvkm_nvenc_func *func;
+};
+
+int nvkm_nvenc_new_(const struct nvkm_nvenc_fwif *, struct nvkm_device *,
+ int, struct nvkm_nvenc **pnvenc);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/Kbuild
index 97c4696171f0..63cd2be3de08 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/Kbuild
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: MIT
nvkm-y += nvkm/engine/sec2/base.o
nvkm-y += nvkm/engine/sec2/gp102.o
+nvkm-y += nvkm/engine/sec2/gp108.o
nvkm-y += nvkm/engine/sec2/tu102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c
index 1b49e5b6717f..41318aa0d481 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c
@@ -21,97 +21,98 @@
*/
#include "priv.h"
-#include <core/msgqueue.h>
+#include <core/firmware.h>
#include <subdev/top.h>
-#include <engine/falcon.h>
-
-static void *
-nvkm_sec2_dtor(struct nvkm_engine *engine)
-{
- struct nvkm_sec2 *sec2 = nvkm_sec2(engine);
- nvkm_msgqueue_del(&sec2->queue);
- nvkm_falcon_del(&sec2->falcon);
- return sec2;
-}
static void
-nvkm_sec2_intr(struct nvkm_engine *engine)
+nvkm_sec2_recv(struct work_struct *work)
{
- struct nvkm_sec2 *sec2 = nvkm_sec2(engine);
- struct nvkm_subdev *subdev = &engine->subdev;
- struct nvkm_device *device = subdev->device;
- u32 disp = nvkm_rd32(device, sec2->addr + 0x01c);
- u32 intr = nvkm_rd32(device, sec2->addr + 0x008) & disp & ~(disp >> 16);
-
- if (intr & 0x00000040) {
- schedule_work(&sec2->work);
- nvkm_wr32(device, sec2->addr + 0x004, 0x00000040);
- intr &= ~0x00000040;
- }
+ struct nvkm_sec2 *sec2 = container_of(work, typeof(*sec2), work);
- if (intr) {
- nvkm_error(subdev, "unhandled intr %08x\n", intr);
- nvkm_wr32(device, sec2->addr + 0x004, intr);
+ if (!sec2->initmsg_received) {
+ int ret = sec2->func->initmsg(sec2);
+ if (ret) {
+ nvkm_error(&sec2->engine.subdev,
+ "error parsing init message: %d\n", ret);
+ return;
+ }
+ sec2->initmsg_received = true;
}
+
+ nvkm_falcon_msgq_recv(sec2->msgq);
}
static void
-nvkm_sec2_recv(struct work_struct *work)
+nvkm_sec2_intr(struct nvkm_engine *engine)
{
- struct nvkm_sec2 *sec2 = container_of(work, typeof(*sec2), work);
-
- if (!sec2->queue) {
- nvkm_warn(&sec2->engine.subdev,
- "recv function called while no firmware set!\n");
- return;
- }
-
- nvkm_msgqueue_recv(sec2->queue);
+ struct nvkm_sec2 *sec2 = nvkm_sec2(engine);
+ sec2->func->intr(sec2);
}
-
static int
-nvkm_sec2_oneinit(struct nvkm_engine *engine)
+nvkm_sec2_fini(struct nvkm_engine *engine, bool suspend)
{
struct nvkm_sec2 *sec2 = nvkm_sec2(engine);
- struct nvkm_subdev *subdev = &sec2->engine.subdev;
- if (!sec2->addr) {
- sec2->addr = nvkm_top_addr(subdev->device, subdev->index);
- if (WARN_ON(!sec2->addr))
- return -EINVAL;
+ flush_work(&sec2->work);
+
+ if (suspend) {
+ nvkm_falcon_cmdq_fini(sec2->cmdq);
+ sec2->initmsg_received = false;
}
- return nvkm_falcon_v1_new(subdev, "SEC2", sec2->addr, &sec2->falcon);
+ return 0;
}
-static int
-nvkm_sec2_fini(struct nvkm_engine *engine, bool suspend)
+static void *
+nvkm_sec2_dtor(struct nvkm_engine *engine)
{
struct nvkm_sec2 *sec2 = nvkm_sec2(engine);
- flush_work(&sec2->work);
- return 0;
+ nvkm_falcon_msgq_del(&sec2->msgq);
+ nvkm_falcon_cmdq_del(&sec2->cmdq);
+ nvkm_falcon_qmgr_del(&sec2->qmgr);
+ nvkm_falcon_dtor(&sec2->falcon);
+ return sec2;
}
static const struct nvkm_engine_func
nvkm_sec2 = {
.dtor = nvkm_sec2_dtor,
- .oneinit = nvkm_sec2_oneinit,
.fini = nvkm_sec2_fini,
.intr = nvkm_sec2_intr,
};
int
-nvkm_sec2_new_(struct nvkm_device *device, int index, u32 addr,
- struct nvkm_sec2 **psec2)
+nvkm_sec2_new_(const struct nvkm_sec2_fwif *fwif, struct nvkm_device *device,
+ int index, u32 addr, struct nvkm_sec2 **psec2)
{
struct nvkm_sec2 *sec2;
+ int ret;
if (!(sec2 = *psec2 = kzalloc(sizeof(*sec2), GFP_KERNEL)))
return -ENOMEM;
- sec2->addr = addr;
- INIT_WORK(&sec2->work, nvkm_sec2_recv);
- return nvkm_engine_ctor(&nvkm_sec2, device, index, true, &sec2->engine);
+ ret = nvkm_engine_ctor(&nvkm_sec2, device, index, true, &sec2->engine);
+ if (ret)
+ return ret;
+
+ fwif = nvkm_firmware_load(&sec2->engine.subdev, fwif, "Sec2", sec2);
+ if (IS_ERR(fwif))
+ return PTR_ERR(fwif);
+
+ sec2->func = fwif->func;
+
+ ret = nvkm_falcon_ctor(sec2->func->flcn, &sec2->engine.subdev,
+ nvkm_subdev_name[index], addr, &sec2->falcon);
+ if (ret)
+ return ret;
+
+ if ((ret = nvkm_falcon_qmgr_new(&sec2->falcon, &sec2->qmgr)) ||
+ (ret = nvkm_falcon_cmdq_new(sec2->qmgr, "cmdq", &sec2->cmdq)) ||
+ (ret = nvkm_falcon_msgq_new(sec2->qmgr, "msgq", &sec2->msgq)))
+ return ret;
+
+ INIT_WORK(&sec2->work, nvkm_sec2_recv);
+ return 0;
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c
index 858cf27fa010..368f2a0042ff 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c
@@ -19,12 +19,316 @@
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
-
#include "priv.h"
+#include <core/memory.h>
+#include <subdev/acr.h>
+#include <subdev/timer.h>
+
+#include <nvfw/flcn.h>
+#include <nvfw/sec2.h>
+
+static int
+gp102_sec2_acr_bootstrap_falcon_callback(void *priv, struct nv_falcon_msg *hdr)
+{
+ struct nv_sec2_acr_bootstrap_falcon_msg *msg =
+ container_of(hdr, typeof(*msg), msg.hdr);
+ struct nvkm_subdev *subdev = priv;
+ const char *name = nvkm_acr_lsf_id(msg->falcon_id);
+
+ if (msg->error_code) {
+ nvkm_error(subdev, "ACR_BOOTSTRAP_FALCON failed for "
+ "falcon %d [%s]: %08x\n",
+ msg->falcon_id, name, msg->error_code);
+ return -EINVAL;
+ }
+
+ nvkm_debug(subdev, "%s booted\n", name);
+ return 0;
+}
+
+static int
+gp102_sec2_acr_bootstrap_falcon(struct nvkm_falcon *falcon,
+ enum nvkm_acr_lsf_id id)
+{
+ struct nvkm_sec2 *sec2 = container_of(falcon, typeof(*sec2), falcon);
+ struct nv_sec2_acr_bootstrap_falcon_cmd cmd = {
+ .cmd.hdr.unit_id = sec2->func->unit_acr,
+ .cmd.hdr.size = sizeof(cmd),
+ .cmd.cmd_type = NV_SEC2_ACR_CMD_BOOTSTRAP_FALCON,
+ .flags = NV_SEC2_ACR_BOOTSTRAP_FALCON_FLAGS_RESET_YES,
+ .falcon_id = id,
+ };
+
+ return nvkm_falcon_cmdq_send(sec2->cmdq, &cmd.cmd.hdr,
+ gp102_sec2_acr_bootstrap_falcon_callback,
+ &sec2->engine.subdev,
+ msecs_to_jiffies(1000));
+}
+
+static int
+gp102_sec2_acr_boot(struct nvkm_falcon *falcon)
+{
+ struct nv_sec2_args args = {};
+ nvkm_falcon_load_dmem(falcon, &args,
+ falcon->func->emem_addr, sizeof(args), 0);
+ nvkm_falcon_start(falcon);
+ return 0;
+}
+
+static void
+gp102_sec2_acr_bld_patch(struct nvkm_acr *acr, u32 bld, s64 adjust)
+{
+ struct loader_config_v1 hdr;
+ nvkm_robj(acr->wpr, bld, &hdr, sizeof(hdr));
+ hdr.code_dma_base = hdr.code_dma_base + adjust;
+ hdr.data_dma_base = hdr.data_dma_base + adjust;
+ hdr.overlay_dma_base = hdr.overlay_dma_base + adjust;
+ nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
+ loader_config_v1_dump(&acr->subdev, &hdr);
+}
+
+static void
+gp102_sec2_acr_bld_write(struct nvkm_acr *acr, u32 bld,
+ struct nvkm_acr_lsfw *lsfw)
+{
+ const struct loader_config_v1 hdr = {
+ .dma_idx = FALCON_SEC2_DMAIDX_UCODE,
+ .code_dma_base = lsfw->offset.img + lsfw->app_start_offset,
+ .code_size_total = lsfw->app_size,
+ .code_size_to_load = lsfw->app_resident_code_size,
+ .code_entry_point = lsfw->app_imem_entry,
+ .data_dma_base = lsfw->offset.img + lsfw->app_start_offset +
+ lsfw->app_resident_data_offset,
+ .data_size = lsfw->app_resident_data_size,
+ .overlay_dma_base = lsfw->offset.img + lsfw->app_start_offset,
+ .argc = 1,
+ .argv = lsfw->falcon->func->emem_addr,
+ };
+
+ nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
+}
+
+static const struct nvkm_acr_lsf_func
+gp102_sec2_acr_0 = {
+ .bld_size = sizeof(struct loader_config_v1),
+ .bld_write = gp102_sec2_acr_bld_write,
+ .bld_patch = gp102_sec2_acr_bld_patch,
+ .boot = gp102_sec2_acr_boot,
+ .bootstrap_falcon = gp102_sec2_acr_bootstrap_falcon,
+};
+
+int
+gp102_sec2_initmsg(struct nvkm_sec2 *sec2)
+{
+ struct nv_sec2_init_msg msg;
+ int ret, i;
+
+ ret = nvkm_falcon_msgq_recv_initmsg(sec2->msgq, &msg, sizeof(msg));
+ if (ret)
+ return ret;
+
+ if (msg.hdr.unit_id != NV_SEC2_UNIT_INIT ||
+ msg.msg_type != NV_SEC2_INIT_MSG_INIT)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(msg.queue_info); i++) {
+ if (msg.queue_info[i].id == NV_SEC2_INIT_MSG_QUEUE_ID_MSGQ) {
+ nvkm_falcon_msgq_init(sec2->msgq,
+ msg.queue_info[i].index,
+ msg.queue_info[i].offset,
+ msg.queue_info[i].size);
+ } else {
+ nvkm_falcon_cmdq_init(sec2->cmdq,
+ msg.queue_info[i].index,
+ msg.queue_info[i].offset,
+ msg.queue_info[i].size);
+ }
+ }
+
+ return 0;
+}
+
+void
+gp102_sec2_intr(struct nvkm_sec2 *sec2)
+{
+ struct nvkm_subdev *subdev = &sec2->engine.subdev;
+ struct nvkm_falcon *falcon = &sec2->falcon;
+ u32 disp = nvkm_falcon_rd32(falcon, 0x01c);
+ u32 intr = nvkm_falcon_rd32(falcon, 0x008) & disp & ~(disp >> 16);
+
+ if (intr & 0x00000040) {
+ schedule_work(&sec2->work);
+ nvkm_falcon_wr32(falcon, 0x004, 0x00000040);
+ intr &= ~0x00000040;
+ }
+
+ if (intr) {
+ nvkm_error(subdev, "unhandled intr %08x\n", intr);
+ nvkm_falcon_wr32(falcon, 0x004, intr);
+ }
+}
+
+int
+gp102_sec2_flcn_enable(struct nvkm_falcon *falcon)
+{
+ nvkm_falcon_mask(falcon, 0x3c0, 0x00000001, 0x00000001);
+ udelay(10);
+ nvkm_falcon_mask(falcon, 0x3c0, 0x00000001, 0x00000000);
+ return nvkm_falcon_v1_enable(falcon);
+}
+
+void
+gp102_sec2_flcn_bind_context(struct nvkm_falcon *falcon,
+ struct nvkm_memory *ctx)
+{
+ struct nvkm_device *device = falcon->owner->device;
+
+ nvkm_falcon_v1_bind_context(falcon, ctx);
+ if (!ctx)
+ return;
+
+ /* Not sure if this is a WAR for a HW issue, or some additional
+ * programming sequence that's needed to properly complete the
+ * context switch we trigger above.
+ *
+ * Fixes unreliability of booting the SEC2 RTOS on Quadro P620,
+ * particularly when resuming from suspend.
+ *
+ * Also removes the need for an odd workaround where we needed
+ * to program SEC2's FALCON_CPUCTL_ALIAS_STARTCPU twice before
+ * the SEC2 RTOS would begin executing.
+ */
+ nvkm_msec(device, 10,
+ u32 irqstat = nvkm_falcon_rd32(falcon, 0x008);
+ u32 flcn0dc = nvkm_falcon_rd32(falcon, 0x0dc);
+ if ((irqstat & 0x00000008) &&
+ (flcn0dc & 0x00007000) == 0x00005000)
+ break;
+ );
+
+ nvkm_falcon_mask(falcon, 0x004, 0x00000008, 0x00000008);
+ nvkm_falcon_mask(falcon, 0x058, 0x00000002, 0x00000002);
+
+ nvkm_msec(device, 10,
+ u32 flcn0dc = nvkm_falcon_rd32(falcon, 0x0dc);
+ if ((flcn0dc & 0x00007000) == 0x00000000)
+ break;
+ );
+}
+
+static const struct nvkm_falcon_func
+gp102_sec2_flcn = {
+ .debug = 0x408,
+ .fbif = 0x600,
+ .load_imem = nvkm_falcon_v1_load_imem,
+ .load_dmem = nvkm_falcon_v1_load_dmem,
+ .read_dmem = nvkm_falcon_v1_read_dmem,
+ .emem_addr = 0x01000000,
+ .bind_context = gp102_sec2_flcn_bind_context,
+ .wait_for_halt = nvkm_falcon_v1_wait_for_halt,
+ .clear_interrupt = nvkm_falcon_v1_clear_interrupt,
+ .set_start_addr = nvkm_falcon_v1_set_start_addr,
+ .start = nvkm_falcon_v1_start,
+ .enable = gp102_sec2_flcn_enable,
+ .disable = nvkm_falcon_v1_disable,
+ .cmdq = { 0xa00, 0xa04, 8 },
+ .msgq = { 0xa30, 0xa34, 8 },
+};
+
+const struct nvkm_sec2_func
+gp102_sec2 = {
+ .flcn = &gp102_sec2_flcn,
+ .unit_acr = NV_SEC2_UNIT_ACR,
+ .intr = gp102_sec2_intr,
+ .initmsg = gp102_sec2_initmsg,
+};
+
+MODULE_FIRMWARE("nvidia/gp102/sec2/desc.bin");
+MODULE_FIRMWARE("nvidia/gp102/sec2/image.bin");
+MODULE_FIRMWARE("nvidia/gp102/sec2/sig.bin");
+MODULE_FIRMWARE("nvidia/gp104/sec2/desc.bin");
+MODULE_FIRMWARE("nvidia/gp104/sec2/image.bin");
+MODULE_FIRMWARE("nvidia/gp104/sec2/sig.bin");
+MODULE_FIRMWARE("nvidia/gp106/sec2/desc.bin");
+MODULE_FIRMWARE("nvidia/gp106/sec2/image.bin");
+MODULE_FIRMWARE("nvidia/gp106/sec2/sig.bin");
+MODULE_FIRMWARE("nvidia/gp107/sec2/desc.bin");
+MODULE_FIRMWARE("nvidia/gp107/sec2/image.bin");
+MODULE_FIRMWARE("nvidia/gp107/sec2/sig.bin");
+
+static void
+gp102_sec2_acr_bld_patch_1(struct nvkm_acr *acr, u32 bld, s64 adjust)
+{
+ struct flcn_bl_dmem_desc_v2 hdr;
+ nvkm_robj(acr->wpr, bld, &hdr, sizeof(hdr));
+ hdr.code_dma_base = hdr.code_dma_base + adjust;
+ hdr.data_dma_base = hdr.data_dma_base + adjust;
+ nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
+ flcn_bl_dmem_desc_v2_dump(&acr->subdev, &hdr);
+}
+
+static void
+gp102_sec2_acr_bld_write_1(struct nvkm_acr *acr, u32 bld,
+ struct nvkm_acr_lsfw *lsfw)
+{
+ const struct flcn_bl_dmem_desc_v2 hdr = {
+ .ctx_dma = FALCON_SEC2_DMAIDX_UCODE,
+ .code_dma_base = lsfw->offset.img + lsfw->app_start_offset,
+ .non_sec_code_off = lsfw->app_resident_code_offset,
+ .non_sec_code_size = lsfw->app_resident_code_size,
+ .code_entry_point = lsfw->app_imem_entry,
+ .data_dma_base = lsfw->offset.img + lsfw->app_start_offset +
+ lsfw->app_resident_data_offset,
+ .data_size = lsfw->app_resident_data_size,
+ .argc = 1,
+ .argv = lsfw->falcon->func->emem_addr,
+ };
+
+ nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
+}
+
+const struct nvkm_acr_lsf_func
+gp102_sec2_acr_1 = {
+ .bld_size = sizeof(struct flcn_bl_dmem_desc_v2),
+ .bld_write = gp102_sec2_acr_bld_write_1,
+ .bld_patch = gp102_sec2_acr_bld_patch_1,
+ .boot = gp102_sec2_acr_boot,
+ .bootstrap_falcon = gp102_sec2_acr_bootstrap_falcon,
+};
+
+int
+gp102_sec2_load(struct nvkm_sec2 *sec2, int ver,
+ const struct nvkm_sec2_fwif *fwif)
+{
+ return nvkm_acr_lsfw_load_sig_image_desc_v1(&sec2->engine.subdev,
+ &sec2->falcon,
+ NVKM_ACR_LSF_SEC2, "sec2/",
+ ver, fwif->acr);
+}
+
+MODULE_FIRMWARE("nvidia/gp102/sec2/desc-1.bin");
+MODULE_FIRMWARE("nvidia/gp102/sec2/image-1.bin");
+MODULE_FIRMWARE("nvidia/gp102/sec2/sig-1.bin");
+MODULE_FIRMWARE("nvidia/gp104/sec2/desc-1.bin");
+MODULE_FIRMWARE("nvidia/gp104/sec2/image-1.bin");
+MODULE_FIRMWARE("nvidia/gp104/sec2/sig-1.bin");
+MODULE_FIRMWARE("nvidia/gp106/sec2/desc-1.bin");
+MODULE_FIRMWARE("nvidia/gp106/sec2/image-1.bin");
+MODULE_FIRMWARE("nvidia/gp106/sec2/sig-1.bin");
+MODULE_FIRMWARE("nvidia/gp107/sec2/desc-1.bin");
+MODULE_FIRMWARE("nvidia/gp107/sec2/image-1.bin");
+MODULE_FIRMWARE("nvidia/gp107/sec2/sig-1.bin");
+
+static const struct nvkm_sec2_fwif
+gp102_sec2_fwif[] = {
+ { 1, gp102_sec2_load, &gp102_sec2, &gp102_sec2_acr_1 },
+ { 0, gp102_sec2_load, &gp102_sec2, &gp102_sec2_acr_0 },
+ {}
+};
+
int
-gp102_sec2_new(struct nvkm_device *device, int index,
- struct nvkm_sec2 **psec2)
+gp102_sec2_new(struct nvkm_device *device, int index, struct nvkm_sec2 **psec2)
{
- return nvkm_sec2_new_(device, index, 0, psec2);
+ return nvkm_sec2_new_(gp102_sec2_fwif, device, index, 0, psec2);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp108.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp108.c
new file mode 100644
index 000000000000..232a9d7c51e5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp108.c
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+#include <subdev/acr.h>
+
+MODULE_FIRMWARE("nvidia/gp108/sec2/desc.bin");
+MODULE_FIRMWARE("nvidia/gp108/sec2/image.bin");
+MODULE_FIRMWARE("nvidia/gp108/sec2/sig.bin");
+
+static const struct nvkm_sec2_fwif
+gp108_sec2_fwif[] = {
+ { 0, gp102_sec2_load, &gp102_sec2, &gp102_sec2_acr_1 },
+ {}
+};
+
+int
+gp108_sec2_new(struct nvkm_device *device, int index, struct nvkm_sec2 **psec2)
+{
+ return nvkm_sec2_new_(gp108_sec2_fwif, device, index, 0, psec2);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h
index b331b00517e6..bb88117e018a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h
@@ -3,7 +3,27 @@
#define __NVKM_SEC2_PRIV_H__
#include <engine/sec2.h>
-#define nvkm_sec2(p) container_of((p), struct nvkm_sec2, engine)
+struct nvkm_sec2_func {
+ const struct nvkm_falcon_func *flcn;
+ u8 unit_acr;
+ void (*intr)(struct nvkm_sec2 *);
+ int (*initmsg)(struct nvkm_sec2 *);
+};
-int nvkm_sec2_new_(struct nvkm_device *, int, u32 addr, struct nvkm_sec2 **);
+void gp102_sec2_intr(struct nvkm_sec2 *);
+int gp102_sec2_initmsg(struct nvkm_sec2 *);
+
+struct nvkm_sec2_fwif {
+ int version;
+ int (*load)(struct nvkm_sec2 *, int ver, const struct nvkm_sec2_fwif *);
+ const struct nvkm_sec2_func *func;
+ const struct nvkm_acr_lsf_func *acr;
+};
+
+int gp102_sec2_load(struct nvkm_sec2 *, int, const struct nvkm_sec2_fwif *);
+extern const struct nvkm_sec2_func gp102_sec2;
+extern const struct nvkm_acr_lsf_func gp102_sec2_acr_1;
+
+int nvkm_sec2_new_(const struct nvkm_sec2_fwif *, struct nvkm_device *,
+ int, u32 addr, struct nvkm_sec2 **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c
index d655576164b1..b6ebd95c9ba1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c
@@ -19,15 +19,54 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-
#include "priv.h"
+#include <subdev/acr.h>
+
+static const struct nvkm_falcon_func
+tu102_sec2_flcn = {
+ .debug = 0x408,
+ .fbif = 0x600,
+ .load_imem = nvkm_falcon_v1_load_imem,
+ .load_dmem = nvkm_falcon_v1_load_dmem,
+ .read_dmem = nvkm_falcon_v1_read_dmem,
+ .emem_addr = 0x01000000,
+ .bind_context = gp102_sec2_flcn_bind_context,
+ .wait_for_halt = nvkm_falcon_v1_wait_for_halt,
+ .clear_interrupt = nvkm_falcon_v1_clear_interrupt,
+ .set_start_addr = nvkm_falcon_v1_set_start_addr,
+ .start = nvkm_falcon_v1_start,
+ .enable = nvkm_falcon_v1_enable,
+ .disable = nvkm_falcon_v1_disable,
+ .cmdq = { 0xc00, 0xc04, 8 },
+ .msgq = { 0xc80, 0xc84, 8 },
+};
+
+static const struct nvkm_sec2_func
+tu102_sec2 = {
+ .flcn = &tu102_sec2_flcn,
+ .unit_acr = 0x07,
+ .intr = gp102_sec2_intr,
+ .initmsg = gp102_sec2_initmsg,
+};
+
+static int
+tu102_sec2_nofw(struct nvkm_sec2 *sec2, int ver,
+ const struct nvkm_sec2_fwif *fwif)
+{
+ return 0;
+}
+
+static const struct nvkm_sec2_fwif
+tu102_sec2_fwif[] = {
+ { 0, gp102_sec2_load, &tu102_sec2, &gp102_sec2_acr_1 },
+ { -1, tu102_sec2_nofw, &tu102_sec2 }
+};
int
-tu102_sec2_new(struct nvkm_device *device, int index,
- struct nvkm_sec2 **psec2)
+tu102_sec2_new(struct nvkm_device *device, int index, struct nvkm_sec2 **psec2)
{
/* TOP info wasn't updated on Turing to reflect the PRI
* address change for some reason. We override it here.
*/
- return nvkm_sec2_new_(device, index, 0x840000, psec2);
+ return nvkm_sec2_new_(tu102_sec2_fwif, device, index, 0x840000, psec2);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild b/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild
index b5665ada850a..d79d783904ee 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: MIT
nvkm-y += nvkm/falcon/base.o
+nvkm-y += nvkm/falcon/cmdq.o
+nvkm-y += nvkm/falcon/msgq.o
+nvkm-y += nvkm/falcon/qmgr.o
nvkm-y += nvkm/falcon/v1.o
-nvkm-y += nvkm/falcon/msgqueue.o
-nvkm-y += nvkm/falcon/msgqueue_0137c63d.o
-nvkm-y += nvkm/falcon/msgqueue_0148cdec.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
index 366c87de6e72..c6a3448180d6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
@@ -22,6 +22,7 @@
#include "priv.h"
#include <subdev/mc.h>
+#include <subdev/top.h>
void
nvkm_falcon_load_imem(struct nvkm_falcon *falcon, void *data, u32 start,
@@ -134,6 +135,37 @@ nvkm_falcon_clear_interrupt(struct nvkm_falcon *falcon, u32 mask)
return falcon->func->clear_interrupt(falcon, mask);
}
+static int
+nvkm_falcon_oneinit(struct nvkm_falcon *falcon)
+{
+ const struct nvkm_falcon_func *func = falcon->func;
+ const struct nvkm_subdev *subdev = falcon->owner;
+ u32 reg;
+
+ if (!falcon->addr) {
+ falcon->addr = nvkm_top_addr(subdev->device, subdev->index);
+ if (WARN_ON(!falcon->addr))
+ return -ENODEV;
+ }
+
+ reg = nvkm_falcon_rd32(falcon, 0x12c);
+ falcon->version = reg & 0xf;
+ falcon->secret = (reg >> 4) & 0x3;
+ falcon->code.ports = (reg >> 8) & 0xf;
+ falcon->data.ports = (reg >> 12) & 0xf;
+
+ reg = nvkm_falcon_rd32(falcon, 0x108);
+ falcon->code.limit = (reg & 0x1ff) << 8;
+ falcon->data.limit = (reg & 0x3fe00) >> 1;
+
+ if (func->debug) {
+ u32 val = nvkm_falcon_rd32(falcon, func->debug);
+ falcon->debug = (val >> 20) & 0x1;
+ }
+
+ return 0;
+}
+
void
nvkm_falcon_put(struct nvkm_falcon *falcon, const struct nvkm_subdev *user)
{
@@ -151,6 +183,8 @@ nvkm_falcon_put(struct nvkm_falcon *falcon, const struct nvkm_subdev *user)
int
nvkm_falcon_get(struct nvkm_falcon *falcon, const struct nvkm_subdev *user)
{
+ int ret = 0;
+
mutex_lock(&falcon->mutex);
if (falcon->user) {
nvkm_error(user, "%s falcon already acquired by %s!\n",
@@ -160,70 +194,37 @@ nvkm_falcon_get(struct nvkm_falcon *falcon, const struct nvkm_subdev *user)
}
nvkm_debug(user, "acquired %s falcon\n", falcon->name);
+ if (!falcon->oneinit)
+ ret = nvkm_falcon_oneinit(falcon);
falcon->user = user;
mutex_unlock(&falcon->mutex);
- return 0;
+ return ret;
}
void
+nvkm_falcon_dtor(struct nvkm_falcon *falcon)
+{
+}
+
+int
nvkm_falcon_ctor(const struct nvkm_falcon_func *func,
struct nvkm_subdev *subdev, const char *name, u32 addr,
struct nvkm_falcon *falcon)
{
- u32 debug_reg;
- u32 reg;
-
falcon->func = func;
falcon->owner = subdev;
falcon->name = name;
falcon->addr = addr;
mutex_init(&falcon->mutex);
mutex_init(&falcon->dmem_mutex);
-
- reg = nvkm_falcon_rd32(falcon, 0x12c);
- falcon->version = reg & 0xf;
- falcon->secret = (reg >> 4) & 0x3;
- falcon->code.ports = (reg >> 8) & 0xf;
- falcon->data.ports = (reg >> 12) & 0xf;
-
- reg = nvkm_falcon_rd32(falcon, 0x108);
- falcon->code.limit = (reg & 0x1ff) << 8;
- falcon->data.limit = (reg & 0x3fe00) >> 1;
-
- switch (subdev->index) {
- case NVKM_ENGINE_GR:
- debug_reg = 0x0;
- break;
- case NVKM_SUBDEV_PMU:
- debug_reg = 0xc08;
- break;
- case NVKM_ENGINE_NVDEC0:
- debug_reg = 0xd00;
- break;
- case NVKM_ENGINE_SEC2:
- debug_reg = 0x408;
- falcon->has_emem = true;
- break;
- case NVKM_SUBDEV_GSP:
- debug_reg = 0x0; /*XXX*/
- break;
- default:
- nvkm_warn(subdev, "unsupported falcon %s!\n",
- nvkm_subdev_name[subdev->index]);
- debug_reg = 0;
- break;
- }
-
- if (debug_reg) {
- u32 val = nvkm_falcon_rd32(falcon, debug_reg);
- falcon->debug = (val >> 20) & 0x1;
- }
+ return 0;
}
void
nvkm_falcon_del(struct nvkm_falcon **pfalcon)
{
if (*pfalcon) {
+ nvkm_falcon_dtor(*pfalcon);
kfree(*pfalcon);
*pfalcon = NULL;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/cmdq.c b/drivers/gpu/drm/nouveau/nvkm/falcon/cmdq.c
new file mode 100644
index 000000000000..40e3f3fc83ef
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/cmdq.c
@@ -0,0 +1,214 @@
+/*
+ * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "qmgr.h"
+
+static bool
+nvkm_falcon_cmdq_has_room(struct nvkm_falcon_cmdq *cmdq, u32 size, bool *rewind)
+{
+ u32 head = nvkm_falcon_rd32(cmdq->qmgr->falcon, cmdq->head_reg);
+ u32 tail = nvkm_falcon_rd32(cmdq->qmgr->falcon, cmdq->tail_reg);
+ u32 free;
+
+ size = ALIGN(size, QUEUE_ALIGNMENT);
+
+ if (head >= tail) {
+ free = cmdq->offset + cmdq->size - head;
+ free -= HDR_SIZE;
+
+ if (size > free) {
+ *rewind = true;
+ head = cmdq->offset;
+ }
+ }
+
+ if (head < tail)
+ free = tail - head - 1;
+
+ return size <= free;
+}
+
+static void
+nvkm_falcon_cmdq_push(struct nvkm_falcon_cmdq *cmdq, void *data, u32 size)
+{
+ struct nvkm_falcon *falcon = cmdq->qmgr->falcon;
+ nvkm_falcon_load_dmem(falcon, data, cmdq->position, size, 0);
+ cmdq->position += ALIGN(size, QUEUE_ALIGNMENT);
+}
+
+static void
+nvkm_falcon_cmdq_rewind(struct nvkm_falcon_cmdq *cmdq)
+{
+ struct nv_falcon_cmd cmd;
+
+ cmd.unit_id = NV_FALCON_CMD_UNIT_ID_REWIND;
+ cmd.size = sizeof(cmd);
+ nvkm_falcon_cmdq_push(cmdq, &cmd, cmd.size);
+
+ cmdq->position = cmdq->offset;
+}
+
+static int
+nvkm_falcon_cmdq_open(struct nvkm_falcon_cmdq *cmdq, u32 size)
+{
+ struct nvkm_falcon *falcon = cmdq->qmgr->falcon;
+ bool rewind = false;
+
+ mutex_lock(&cmdq->mutex);
+
+ if (!nvkm_falcon_cmdq_has_room(cmdq, size, &rewind)) {
+ FLCNQ_DBG(cmdq, "queue full");
+ mutex_unlock(&cmdq->mutex);
+ return -EAGAIN;
+ }
+
+ cmdq->position = nvkm_falcon_rd32(falcon, cmdq->head_reg);
+
+ if (rewind)
+ nvkm_falcon_cmdq_rewind(cmdq);
+
+ return 0;
+}
+
+static void
+nvkm_falcon_cmdq_close(struct nvkm_falcon_cmdq *cmdq)
+{
+ nvkm_falcon_wr32(cmdq->qmgr->falcon, cmdq->head_reg, cmdq->position);
+ mutex_unlock(&cmdq->mutex);
+}
+
+static int
+nvkm_falcon_cmdq_write(struct nvkm_falcon_cmdq *cmdq, struct nv_falcon_cmd *cmd)
+{
+ static unsigned timeout = 2000;
+ unsigned long end_jiffies = jiffies + msecs_to_jiffies(timeout);
+ int ret = -EAGAIN;
+
+ while (ret == -EAGAIN && time_before(jiffies, end_jiffies))
+ ret = nvkm_falcon_cmdq_open(cmdq, cmd->size);
+ if (ret) {
+ FLCNQ_ERR(cmdq, "timeout waiting for queue space");
+ return ret;
+ }
+
+ nvkm_falcon_cmdq_push(cmdq, cmd, cmd->size);
+ nvkm_falcon_cmdq_close(cmdq);
+ return ret;
+}
+
+/* specifies that we want to know the command status in the answer message */
+#define CMD_FLAGS_STATUS BIT(0)
+/* specifies that we want an interrupt when the answer message is queued */
+#define CMD_FLAGS_INTR BIT(1)
+
+int
+nvkm_falcon_cmdq_send(struct nvkm_falcon_cmdq *cmdq, struct nv_falcon_cmd *cmd,
+ nvkm_falcon_qmgr_callback cb, void *priv,
+ unsigned long timeout)
+{
+ struct nvkm_falcon_qmgr_seq *seq;
+ int ret;
+
+ if (!wait_for_completion_timeout(&cmdq->ready,
+ msecs_to_jiffies(1000))) {
+ FLCNQ_ERR(cmdq, "timeout waiting for queue ready");
+ return -ETIMEDOUT;
+ }
+
+ seq = nvkm_falcon_qmgr_seq_acquire(cmdq->qmgr);
+ if (IS_ERR(seq))
+ return PTR_ERR(seq);
+
+ cmd->seq_id = seq->id;
+ cmd->ctrl_flags = CMD_FLAGS_STATUS | CMD_FLAGS_INTR;
+
+ seq->state = SEQ_STATE_USED;
+ seq->async = !timeout;
+ seq->callback = cb;
+ seq->priv = priv;
+
+ ret = nvkm_falcon_cmdq_write(cmdq, cmd);
+ if (ret) {
+ seq->state = SEQ_STATE_PENDING;
+ nvkm_falcon_qmgr_seq_release(cmdq->qmgr, seq);
+ return ret;
+ }
+
+ if (!seq->async) {
+ if (!wait_for_completion_timeout(&seq->done, timeout)) {
+ FLCNQ_ERR(cmdq, "timeout waiting for reply");
+ return -ETIMEDOUT;
+ }
+ ret = seq->result;
+ nvkm_falcon_qmgr_seq_release(cmdq->qmgr, seq);
+ }
+
+ return ret;
+}
+
+void
+nvkm_falcon_cmdq_fini(struct nvkm_falcon_cmdq *cmdq)
+{
+ reinit_completion(&cmdq->ready);
+}
+
+void
+nvkm_falcon_cmdq_init(struct nvkm_falcon_cmdq *cmdq,
+ u32 index, u32 offset, u32 size)
+{
+ const struct nvkm_falcon_func *func = cmdq->qmgr->falcon->func;
+
+ cmdq->head_reg = func->cmdq.head + index * func->cmdq.stride;
+ cmdq->tail_reg = func->cmdq.tail + index * func->cmdq.stride;
+ cmdq->offset = offset;
+ cmdq->size = size;
+ complete_all(&cmdq->ready);
+
+ FLCNQ_DBG(cmdq, "initialised @ index %d offset 0x%08x size 0x%08x",
+ index, cmdq->offset, cmdq->size);
+}
+
+void
+nvkm_falcon_cmdq_del(struct nvkm_falcon_cmdq **pcmdq)
+{
+ struct nvkm_falcon_cmdq *cmdq = *pcmdq;
+ if (cmdq) {
+ kfree(*pcmdq);
+ *pcmdq = NULL;
+ }
+}
+
+int
+nvkm_falcon_cmdq_new(struct nvkm_falcon_qmgr *qmgr, const char *name,
+ struct nvkm_falcon_cmdq **pcmdq)
+{
+ struct nvkm_falcon_cmdq *cmdq = *pcmdq;
+
+ if (!(cmdq = *pcmdq = kzalloc(sizeof(*cmdq), GFP_KERNEL)))
+ return -ENOMEM;
+
+ cmdq->qmgr = qmgr;
+ cmdq->name = name;
+ mutex_init(&cmdq->mutex);
+ init_completion(&cmdq->ready);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgq.c b/drivers/gpu/drm/nouveau/nvkm/falcon/msgq.c
new file mode 100644
index 000000000000..cbfe09a561a1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/msgq.c
@@ -0,0 +1,213 @@
+/*
+ * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "qmgr.h"
+
+static void
+nvkm_falcon_msgq_open(struct nvkm_falcon_msgq *msgq)
+{
+ mutex_lock(&msgq->mutex);
+ msgq->position = nvkm_falcon_rd32(msgq->qmgr->falcon, msgq->tail_reg);
+}
+
+static void
+nvkm_falcon_msgq_close(struct nvkm_falcon_msgq *msgq, bool commit)
+{
+ struct nvkm_falcon *falcon = msgq->qmgr->falcon;
+
+ if (commit)
+ nvkm_falcon_wr32(falcon, msgq->tail_reg, msgq->position);
+
+ mutex_unlock(&msgq->mutex);
+}
+
+static bool
+nvkm_falcon_msgq_empty(struct nvkm_falcon_msgq *msgq)
+{
+ u32 head = nvkm_falcon_rd32(msgq->qmgr->falcon, msgq->head_reg);
+ u32 tail = nvkm_falcon_rd32(msgq->qmgr->falcon, msgq->tail_reg);
+ return head == tail;
+}
+
+static int
+nvkm_falcon_msgq_pop(struct nvkm_falcon_msgq *msgq, void *data, u32 size)
+{
+ struct nvkm_falcon *falcon = msgq->qmgr->falcon;
+ u32 head, tail, available;
+
+ head = nvkm_falcon_rd32(falcon, msgq->head_reg);
+ /* has the buffer looped? */
+ if (head < msgq->position)
+ msgq->position = msgq->offset;
+
+ tail = msgq->position;
+
+ available = head - tail;
+ if (size > available) {
+ FLCNQ_ERR(msgq, "requested %d bytes, but only %d available",
+ size, available);
+ return -EINVAL;
+ }
+
+ nvkm_falcon_read_dmem(falcon, tail, size, 0, data);
+ msgq->position += ALIGN(size, QUEUE_ALIGNMENT);
+ return 0;
+}
+
+static int
+nvkm_falcon_msgq_read(struct nvkm_falcon_msgq *msgq, struct nv_falcon_msg *hdr)
+{
+ int ret = 0;
+
+ nvkm_falcon_msgq_open(msgq);
+
+ if (nvkm_falcon_msgq_empty(msgq))
+ goto close;
+
+ ret = nvkm_falcon_msgq_pop(msgq, hdr, HDR_SIZE);
+ if (ret) {
+ FLCNQ_ERR(msgq, "failed to read message header");
+ goto close;
+ }
+
+ if (hdr->size > MSG_BUF_SIZE) {
+ FLCNQ_ERR(msgq, "message too big, %d bytes", hdr->size);
+ ret = -ENOSPC;
+ goto close;
+ }
+
+ if (hdr->size > HDR_SIZE) {
+ u32 read_size = hdr->size - HDR_SIZE;
+
+ ret = nvkm_falcon_msgq_pop(msgq, (hdr + 1), read_size);
+ if (ret) {
+ FLCNQ_ERR(msgq, "failed to read message data");
+ goto close;
+ }
+ }
+
+ ret = 1;
+close:
+ nvkm_falcon_msgq_close(msgq, (ret >= 0));
+ return ret;
+}
+
+static int
+nvkm_falcon_msgq_exec(struct nvkm_falcon_msgq *msgq, struct nv_falcon_msg *hdr)
+{
+ struct nvkm_falcon_qmgr_seq *seq;
+
+ seq = &msgq->qmgr->seq.id[hdr->seq_id];
+ if (seq->state != SEQ_STATE_USED && seq->state != SEQ_STATE_CANCELLED) {
+ FLCNQ_ERR(msgq, "message for unknown sequence %08x", seq->id);
+ return -EINVAL;
+ }
+
+ if (seq->state == SEQ_STATE_USED) {
+ if (seq->callback)
+ seq->result = seq->callback(seq->priv, hdr);
+ }
+
+ if (seq->async) {
+ nvkm_falcon_qmgr_seq_release(msgq->qmgr, seq);
+ return 0;
+ }
+
+ complete_all(&seq->done);
+ return 0;
+}
+
+void
+nvkm_falcon_msgq_recv(struct nvkm_falcon_msgq *msgq)
+{
+ /*
+ * We are invoked from a worker thread, so normally we have plenty of
+ * stack space to work with.
+ */
+ u8 msg_buffer[MSG_BUF_SIZE];
+ struct nv_falcon_msg *hdr = (void *)msg_buffer;
+
+ while (nvkm_falcon_msgq_read(msgq, hdr) > 0)
+ nvkm_falcon_msgq_exec(msgq, hdr);
+}
+
+int
+nvkm_falcon_msgq_recv_initmsg(struct nvkm_falcon_msgq *msgq,
+ void *data, u32 size)
+{
+ struct nvkm_falcon *falcon = msgq->qmgr->falcon;
+ struct nv_falcon_msg *hdr = data;
+ int ret;
+
+ msgq->head_reg = falcon->func->msgq.head;
+ msgq->tail_reg = falcon->func->msgq.tail;
+ msgq->offset = nvkm_falcon_rd32(falcon, falcon->func->msgq.tail);
+
+ nvkm_falcon_msgq_open(msgq);
+ ret = nvkm_falcon_msgq_pop(msgq, data, size);
+ if (ret == 0 && hdr->size != size) {
+ FLCN_ERR(falcon, "unexpected init message size %d vs %d",
+ hdr->size, size);
+ ret = -EINVAL;
+ }
+ nvkm_falcon_msgq_close(msgq, ret == 0);
+ return ret;
+}
+
+void
+nvkm_falcon_msgq_init(struct nvkm_falcon_msgq *msgq,
+ u32 index, u32 offset, u32 size)
+{
+ const struct nvkm_falcon_func *func = msgq->qmgr->falcon->func;
+
+ msgq->head_reg = func->msgq.head + index * func->msgq.stride;
+ msgq->tail_reg = func->msgq.tail + index * func->msgq.stride;
+ msgq->offset = offset;
+
+ FLCNQ_DBG(msgq, "initialised @ index %d offset 0x%08x size 0x%08x",
+ index, msgq->offset, size);
+}
+
+void
+nvkm_falcon_msgq_del(struct nvkm_falcon_msgq **pmsgq)
+{
+ struct nvkm_falcon_msgq *msgq = *pmsgq;
+ if (msgq) {
+ kfree(*pmsgq);
+ *pmsgq = NULL;
+ }
+}
+
+int
+nvkm_falcon_msgq_new(struct nvkm_falcon_qmgr *qmgr, const char *name,
+ struct nvkm_falcon_msgq **pmsgq)
+{
+ struct nvkm_falcon_msgq *msgq = *pmsgq;
+
+ if (!(msgq = *pmsgq = kzalloc(sizeof(*msgq), GFP_KERNEL)))
+ return -ENOMEM;
+
+ msgq->qmgr = qmgr;
+ msgq->name = name;
+ mutex_init(&msgq->mutex);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c
deleted file mode 100644
index a8bee1e046aa..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c
+++ /dev/null
@@ -1,577 +0,0 @@
-/*
- * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "msgqueue.h"
-#include <engine/falcon.h>
-
-#include <subdev/secboot.h>
-
-
-#define HDR_SIZE sizeof(struct nvkm_msgqueue_hdr)
-#define QUEUE_ALIGNMENT 4
-/* max size of the messages we can receive */
-#define MSG_BUF_SIZE 128
-
-static int
-msg_queue_open(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue)
-{
- struct nvkm_falcon *falcon = priv->falcon;
-
- mutex_lock(&queue->mutex);
-
- queue->position = nvkm_falcon_rd32(falcon, queue->tail_reg);
-
- return 0;
-}
-
-static void
-msg_queue_close(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
- bool commit)
-{
- struct nvkm_falcon *falcon = priv->falcon;
-
- if (commit)
- nvkm_falcon_wr32(falcon, queue->tail_reg, queue->position);
-
- mutex_unlock(&queue->mutex);
-}
-
-static bool
-msg_queue_empty(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue)
-{
- struct nvkm_falcon *falcon = priv->falcon;
- u32 head, tail;
-
- head = nvkm_falcon_rd32(falcon, queue->head_reg);
- tail = nvkm_falcon_rd32(falcon, queue->tail_reg);
-
- return head == tail;
-}
-
-static int
-msg_queue_pop(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
- void *data, u32 size)
-{
- struct nvkm_falcon *falcon = priv->falcon;
- const struct nvkm_subdev *subdev = priv->falcon->owner;
- u32 head, tail, available;
-
- head = nvkm_falcon_rd32(falcon, queue->head_reg);
- /* has the buffer looped? */
- if (head < queue->position)
- queue->position = queue->offset;
-
- tail = queue->position;
-
- available = head - tail;
-
- if (available == 0) {
- nvkm_warn(subdev, "no message data available\n");
- return 0;
- }
-
- if (size > available) {
- nvkm_warn(subdev, "message data smaller than read request\n");
- size = available;
- }
-
- nvkm_falcon_read_dmem(priv->falcon, tail, size, 0, data);
- queue->position += ALIGN(size, QUEUE_ALIGNMENT);
-
- return size;
-}
-
-static int
-msg_queue_read(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
- struct nvkm_msgqueue_hdr *hdr)
-{
- const struct nvkm_subdev *subdev = priv->falcon->owner;
- int err;
-
- err = msg_queue_open(priv, queue);
- if (err) {
- nvkm_error(subdev, "fail to open queue %d\n", queue->index);
- return err;
- }
-
- if (msg_queue_empty(priv, queue)) {
- err = 0;
- goto close;
- }
-
- err = msg_queue_pop(priv, queue, hdr, HDR_SIZE);
- if (err >= 0 && err != HDR_SIZE)
- err = -EINVAL;
- if (err < 0) {
- nvkm_error(subdev, "failed to read message header: %d\n", err);
- goto close;
- }
-
- if (hdr->size > MSG_BUF_SIZE) {
- nvkm_error(subdev, "message too big (%d bytes)\n", hdr->size);
- err = -ENOSPC;
- goto close;
- }
-
- if (hdr->size > HDR_SIZE) {
- u32 read_size = hdr->size - HDR_SIZE;
-
- err = msg_queue_pop(priv, queue, (hdr + 1), read_size);
- if (err >= 0 && err != read_size)
- err = -EINVAL;
- if (err < 0) {
- nvkm_error(subdev, "failed to read message: %d\n", err);
- goto close;
- }
- }
-
-close:
- msg_queue_close(priv, queue, (err >= 0));
-
- return err;
-}
-
-static bool
-cmd_queue_has_room(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
- u32 size, bool *rewind)
-{
- struct nvkm_falcon *falcon = priv->falcon;
- u32 head, tail, free;
-
- size = ALIGN(size, QUEUE_ALIGNMENT);
-
- head = nvkm_falcon_rd32(falcon, queue->head_reg);
- tail = nvkm_falcon_rd32(falcon, queue->tail_reg);
-
- if (head >= tail) {
- free = queue->offset + queue->size - head;
- free -= HDR_SIZE;
-
- if (size > free) {
- *rewind = true;
- head = queue->offset;
- }
- }
-
- if (head < tail)
- free = tail - head - 1;
-
- return size <= free;
-}
-
-static int
-cmd_queue_push(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
- void *data, u32 size)
-{
- nvkm_falcon_load_dmem(priv->falcon, data, queue->position, size, 0);
- queue->position += ALIGN(size, QUEUE_ALIGNMENT);
-
- return 0;
-}
-
-/* REWIND unit is always 0x00 */
-#define MSGQUEUE_UNIT_REWIND 0x00
-
-static void
-cmd_queue_rewind(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue)
-{
- const struct nvkm_subdev *subdev = priv->falcon->owner;
- struct nvkm_msgqueue_hdr cmd;
- int err;
-
- cmd.unit_id = MSGQUEUE_UNIT_REWIND;
- cmd.size = sizeof(cmd);
- err = cmd_queue_push(priv, queue, &cmd, cmd.size);
- if (err)
- nvkm_error(subdev, "queue %d rewind failed\n", queue->index);
- else
- nvkm_error(subdev, "queue %d rewinded\n", queue->index);
-
- queue->position = queue->offset;
-}
-
-static int
-cmd_queue_open(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
- u32 size)
-{
- struct nvkm_falcon *falcon = priv->falcon;
- const struct nvkm_subdev *subdev = priv->falcon->owner;
- bool rewind = false;
-
- mutex_lock(&queue->mutex);
-
- if (!cmd_queue_has_room(priv, queue, size, &rewind)) {
- nvkm_error(subdev, "queue full\n");
- mutex_unlock(&queue->mutex);
- return -EAGAIN;
- }
-
- queue->position = nvkm_falcon_rd32(falcon, queue->head_reg);
-
- if (rewind)
- cmd_queue_rewind(priv, queue);
-
- return 0;
-}
-
-static void
-cmd_queue_close(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
- bool commit)
-{
- struct nvkm_falcon *falcon = priv->falcon;
-
- if (commit)
- nvkm_falcon_wr32(falcon, queue->head_reg, queue->position);
-
- mutex_unlock(&queue->mutex);
-}
-
-static int
-cmd_write(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_hdr *cmd,
- struct nvkm_msgqueue_queue *queue)
-{
- const struct nvkm_subdev *subdev = priv->falcon->owner;
- static unsigned timeout = 2000;
- unsigned long end_jiffies = jiffies + msecs_to_jiffies(timeout);
- int ret = -EAGAIN;
- bool commit = true;
-
- while (ret == -EAGAIN && time_before(jiffies, end_jiffies))
- ret = cmd_queue_open(priv, queue, cmd->size);
- if (ret) {
- nvkm_error(subdev, "pmu_queue_open_write failed\n");
- return ret;
- }
-
- ret = cmd_queue_push(priv, queue, cmd, cmd->size);
- if (ret) {
- nvkm_error(subdev, "pmu_queue_push failed\n");
- commit = false;
- }
-
- cmd_queue_close(priv, queue, commit);
-
- return ret;
-}
-
-static struct nvkm_msgqueue_seq *
-msgqueue_seq_acquire(struct nvkm_msgqueue *priv)
-{
- const struct nvkm_subdev *subdev = priv->falcon->owner;
- struct nvkm_msgqueue_seq *seq;
- u32 index;
-
- mutex_lock(&priv->seq_lock);
-
- index = find_first_zero_bit(priv->seq_tbl, NVKM_MSGQUEUE_NUM_SEQUENCES);
-
- if (index >= NVKM_MSGQUEUE_NUM_SEQUENCES) {
- nvkm_error(subdev, "no free sequence available\n");
- mutex_unlock(&priv->seq_lock);
- return ERR_PTR(-EAGAIN);
- }
-
- set_bit(index, priv->seq_tbl);
-
- mutex_unlock(&priv->seq_lock);
-
- seq = &priv->seq[index];
- seq->state = SEQ_STATE_PENDING;
-
- return seq;
-}
-
-static void
-msgqueue_seq_release(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_seq *seq)
-{
- /* no need to acquire seq_lock since clear_bit is atomic */
- seq->state = SEQ_STATE_FREE;
- seq->callback = NULL;
- seq->completion = NULL;
- clear_bit(seq->id, priv->seq_tbl);
-}
-
-/* specifies that we want to know the command status in the answer message */
-#define CMD_FLAGS_STATUS BIT(0)
-/* specifies that we want an interrupt when the answer message is queued */
-#define CMD_FLAGS_INTR BIT(1)
-
-int
-nvkm_msgqueue_post(struct nvkm_msgqueue *priv, enum msgqueue_msg_priority prio,
- struct nvkm_msgqueue_hdr *cmd, nvkm_msgqueue_callback cb,
- struct completion *completion, bool wait_init)
-{
- struct nvkm_msgqueue_seq *seq;
- struct nvkm_msgqueue_queue *queue;
- int ret;
-
- if (wait_init && !wait_for_completion_timeout(&priv->init_done,
- msecs_to_jiffies(1000)))
- return -ETIMEDOUT;
-
- queue = priv->func->cmd_queue(priv, prio);
- if (IS_ERR(queue))
- return PTR_ERR(queue);
-
- seq = msgqueue_seq_acquire(priv);
- if (IS_ERR(seq))
- return PTR_ERR(seq);
-
- cmd->seq_id = seq->id;
- cmd->ctrl_flags = CMD_FLAGS_STATUS | CMD_FLAGS_INTR;
-
- seq->callback = cb;
- seq->state = SEQ_STATE_USED;
- seq->completion = completion;
-
- ret = cmd_write(priv, cmd, queue);
- if (ret) {
- seq->state = SEQ_STATE_PENDING;
- msgqueue_seq_release(priv, seq);
- }
-
- return ret;
-}
-
-static int
-msgqueue_msg_handle(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_hdr *hdr)
-{
- const struct nvkm_subdev *subdev = priv->falcon->owner;
- struct nvkm_msgqueue_seq *seq;
-
- seq = &priv->seq[hdr->seq_id];
- if (seq->state != SEQ_STATE_USED && seq->state != SEQ_STATE_CANCELLED) {
- nvkm_error(subdev, "msg for unknown sequence %d", seq->id);
- return -EINVAL;
- }
-
- if (seq->state == SEQ_STATE_USED) {
- if (seq->callback)
- seq->callback(priv, hdr);
- }
-
- if (seq->completion)
- complete(seq->completion);
-
- msgqueue_seq_release(priv, seq);
-
- return 0;
-}
-
-static int
-msgqueue_handle_init_msg(struct nvkm_msgqueue *priv,
- struct nvkm_msgqueue_hdr *hdr)
-{
- struct nvkm_falcon *falcon = priv->falcon;
- const struct nvkm_subdev *subdev = falcon->owner;
- u32 tail;
- u32 tail_reg;
- int ret;
-
- /*
- * Of course the message queue registers vary depending on the falcon
- * used...
- */
- switch (falcon->owner->index) {
- case NVKM_SUBDEV_PMU:
- tail_reg = 0x4cc;
- break;
- case NVKM_ENGINE_SEC2:
- tail_reg = 0xa34;
- break;
- default:
- nvkm_error(subdev, "falcon %s unsupported for msgqueue!\n",
- nvkm_subdev_name[falcon->owner->index]);
- return -EINVAL;
- }
-
- /*
- * Read the message - queues are not initialized yet so we cannot rely
- * on msg_queue_read()
- */
- tail = nvkm_falcon_rd32(falcon, tail_reg);
- nvkm_falcon_read_dmem(falcon, tail, HDR_SIZE, 0, hdr);
-
- if (hdr->size > MSG_BUF_SIZE) {
- nvkm_error(subdev, "message too big (%d bytes)\n", hdr->size);
- return -ENOSPC;
- }
-
- nvkm_falcon_read_dmem(falcon, tail + HDR_SIZE, hdr->size - HDR_SIZE, 0,
- (hdr + 1));
-
- tail += ALIGN(hdr->size, QUEUE_ALIGNMENT);
- nvkm_falcon_wr32(falcon, tail_reg, tail);
-
- ret = priv->func->init_func->init_callback(priv, hdr);
- if (ret)
- return ret;
-
- return 0;
-}
-
-void
-nvkm_msgqueue_process_msgs(struct nvkm_msgqueue *priv,
- struct nvkm_msgqueue_queue *queue)
-{
- /*
- * We are invoked from a worker thread, so normally we have plenty of
- * stack space to work with.
- */
- u8 msg_buffer[MSG_BUF_SIZE];
- struct nvkm_msgqueue_hdr *hdr = (void *)msg_buffer;
- int ret;
-
- /* the first message we receive must be the init message */
- if ((!priv->init_msg_received)) {
- ret = msgqueue_handle_init_msg(priv, hdr);
- if (!ret)
- priv->init_msg_received = true;
- } else {
- while (msg_queue_read(priv, queue, hdr) > 0)
- msgqueue_msg_handle(priv, hdr);
- }
-}
-
-void
-nvkm_msgqueue_write_cmdline(struct nvkm_msgqueue *queue, void *buf)
-{
- if (!queue || !queue->func || !queue->func->init_func)
- return;
-
- queue->func->init_func->gen_cmdline(queue, buf);
-}
-
-int
-nvkm_msgqueue_acr_boot_falcons(struct nvkm_msgqueue *queue,
- unsigned long falcon_mask)
-{
- unsigned long falcon;
-
- if (!queue || !queue->func->acr_func)
- return -ENODEV;
-
- /* Does the firmware support booting multiple falcons? */
- if (queue->func->acr_func->boot_multiple_falcons)
- return queue->func->acr_func->boot_multiple_falcons(queue,
- falcon_mask);
-
- /* Else boot all requested falcons individually */
- if (!queue->func->acr_func->boot_falcon)
- return -ENODEV;
-
- for_each_set_bit(falcon, &falcon_mask, NVKM_SECBOOT_FALCON_END) {
- int ret = queue->func->acr_func->boot_falcon(queue, falcon);
-
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-int
-nvkm_msgqueue_new(u32 version, struct nvkm_falcon *falcon,
- const struct nvkm_secboot *sb, struct nvkm_msgqueue **queue)
-{
- const struct nvkm_subdev *subdev = falcon->owner;
- int ret = -EINVAL;
-
- switch (version) {
- case 0x0137c63d:
- ret = msgqueue_0137c63d_new(falcon, sb, queue);
- break;
- case 0x0137bca5:
- ret = msgqueue_0137bca5_new(falcon, sb, queue);
- break;
- case 0x0148cdec:
- case 0x015ccf3e:
- case 0x0167d263:
- ret = msgqueue_0148cdec_new(falcon, sb, queue);
- break;
- default:
- nvkm_error(subdev, "unhandled firmware version 0x%08x\n",
- version);
- break;
- }
-
- if (ret == 0) {
- nvkm_debug(subdev, "firmware version: 0x%08x\n", version);
- (*queue)->fw_version = version;
- }
-
- return ret;
-}
-
-void
-nvkm_msgqueue_del(struct nvkm_msgqueue **queue)
-{
- if (*queue) {
- (*queue)->func->dtor(*queue);
- *queue = NULL;
- }
-}
-
-void
-nvkm_msgqueue_recv(struct nvkm_msgqueue *queue)
-{
- if (!queue->func || !queue->func->recv) {
- const struct nvkm_subdev *subdev = queue->falcon->owner;
-
- nvkm_warn(subdev, "missing msgqueue recv function\n");
- return;
- }
-
- queue->func->recv(queue);
-}
-
-int
-nvkm_msgqueue_reinit(struct nvkm_msgqueue *queue)
-{
- /* firmware not set yet... */
- if (!queue)
- return 0;
-
- queue->init_msg_received = false;
- reinit_completion(&queue->init_done);
-
- return 0;
-}
-
-void
-nvkm_msgqueue_ctor(const struct nvkm_msgqueue_func *func,
- struct nvkm_falcon *falcon,
- struct nvkm_msgqueue *queue)
-{
- int i;
-
- queue->func = func;
- queue->falcon = falcon;
- mutex_init(&queue->seq_lock);
- for (i = 0; i < NVKM_MSGQUEUE_NUM_SEQUENCES; i++)
- queue->seq[i].id = i;
-
- init_completion(&queue->init_done);
-
-
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.h b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.h
deleted file mode 100644
index 13b54f8d8e04..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.h
+++ /dev/null
@@ -1,213 +0,0 @@
-/*
- * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef __NVKM_CORE_FALCON_MSGQUEUE_H
-#define __NVKM_CORE_FALCON_MSGQUEUE_H
-
-#include <core/msgqueue.h>
-
-/*
- * The struct nvkm_msgqueue (named so for lack of better candidate) manages
- * a firmware (typically, NVIDIA signed firmware) running under a given falcon.
- *
- * Such firmwares expect to receive commands (through one or several command
- * queues) and will reply to such command by sending messages (using one
- * message queue).
- *
- * Each firmware can support one or several units - ACR for managing secure
- * falcons, PMU for power management, etc. A unit can be seen as a class to
- * which command can be sent.
- *
- * One usage example would be to send a command to the SEC falcon to ask it to
- * reset a secure falcon. The SEC falcon will receive the command, process it,
- * and send a message to signal success or failure. Only when the corresponding
- * message is received can the requester assume the request has been processed.
- *
- * Since we expect many variations between the firmwares NVIDIA will release
- * across GPU generations, this library is built in a very modular way. Message
- * formats and queues details (such as number of usage) are left to
- * specializations of struct nvkm_msgqueue, while the functions in msgqueue.c
- * take care of posting commands and processing messages in a fashion that is
- * universal.
- *
- */
-
-enum msgqueue_msg_priority {
- MSGQUEUE_MSG_PRIORITY_HIGH,
- MSGQUEUE_MSG_PRIORITY_LOW,
-};
-
-/**
- * struct nvkm_msgqueue_hdr - header for all commands/messages
- * @unit_id: id of firmware using receiving the command/sending the message
- * @size: total size of command/message
- * @ctrl_flags: type of command/message
- * @seq_id: used to match a message from its corresponding command
- */
-struct nvkm_msgqueue_hdr {
- u8 unit_id;
- u8 size;
- u8 ctrl_flags;
- u8 seq_id;
-};
-
-/**
- * struct nvkm_msgqueue_msg - base message.
- *
- * This is just a header and a message (or command) type. Useful when
- * building command-specific structures.
- */
-struct nvkm_msgqueue_msg {
- struct nvkm_msgqueue_hdr hdr;
- u8 msg_type;
-};
-
-struct nvkm_msgqueue;
-typedef void
-(*nvkm_msgqueue_callback)(struct nvkm_msgqueue *, struct nvkm_msgqueue_hdr *);
-
-/**
- * struct nvkm_msgqueue_init_func - msgqueue functions related to initialization
- *
- * @gen_cmdline: build the commandline into a pre-allocated buffer
- * @init_callback: called to process the init message
- */
-struct nvkm_msgqueue_init_func {
- void (*gen_cmdline)(struct nvkm_msgqueue *, void *);
- int (*init_callback)(struct nvkm_msgqueue *, struct nvkm_msgqueue_hdr *);
-};
-
-/**
- * struct nvkm_msgqueue_acr_func - msgqueue functions related to ACR
- *
- * @boot_falcon: build and send the command to reset a given falcon
- * @boot_multiple_falcons: build and send the command to reset several falcons
- */
-struct nvkm_msgqueue_acr_func {
- int (*boot_falcon)(struct nvkm_msgqueue *, enum nvkm_secboot_falcon);
- int (*boot_multiple_falcons)(struct nvkm_msgqueue *, unsigned long);
-};
-
-struct nvkm_msgqueue_func {
- const struct nvkm_msgqueue_init_func *init_func;
- const struct nvkm_msgqueue_acr_func *acr_func;
- void (*dtor)(struct nvkm_msgqueue *);
- struct nvkm_msgqueue_queue *(*cmd_queue)(struct nvkm_msgqueue *,
- enum msgqueue_msg_priority);
- void (*recv)(struct nvkm_msgqueue *queue);
-};
-
-/**
- * struct nvkm_msgqueue_queue - information about a command or message queue
- *
- * The number of queues is firmware-dependent. All queues must have their
- * information filled by the init message handler.
- *
- * @mutex_lock: to be acquired when the queue is being used
- * @index: physical queue index
- * @offset: DMEM offset where this queue begins
- * @size: size allocated to this queue in DMEM (in bytes)
- * @position: current write position
- * @head_reg: address of the HEAD register for this queue
- * @tail_reg: address of the TAIL register for this queue
- */
-struct nvkm_msgqueue_queue {
- struct mutex mutex;
- u32 index;
- u32 offset;
- u32 size;
- u32 position;
-
- u32 head_reg;
- u32 tail_reg;
-};
-
-/**
- * struct nvkm_msgqueue_seq - keep track of ongoing commands
- *
- * Every time a command is sent, a sequence is assigned to it so the
- * corresponding message can be matched. Upon receiving the message, a callback
- * can be called and/or a completion signaled.
- *
- * @id: sequence ID
- * @state: current state
- * @callback: callback to call upon receiving matching message
- * @completion: completion to signal after callback is called
- */
-struct nvkm_msgqueue_seq {
- u16 id;
- enum {
- SEQ_STATE_FREE = 0,
- SEQ_STATE_PENDING,
- SEQ_STATE_USED,
- SEQ_STATE_CANCELLED
- } state;
- nvkm_msgqueue_callback callback;
- struct completion *completion;
-};
-
-/*
- * We can have an arbitrary number of sequences, but realistically we will
- * probably not use that much simultaneously.
- */
-#define NVKM_MSGQUEUE_NUM_SEQUENCES 16
-
-/**
- * struct nvkm_msgqueue - manage a command/message based FW on a falcon
- *
- * @falcon: falcon to be managed
- * @func: implementation of the firmware to use
- * @init_msg_received: whether the init message has already been received
- * @init_done: whether all init is complete and commands can be processed
- * @seq_lock: protects seq and seq_tbl
- * @seq: sequences to match commands and messages
- * @seq_tbl: bitmap of sequences currently in use
- */
-struct nvkm_msgqueue {
- struct nvkm_falcon *falcon;
- const struct nvkm_msgqueue_func *func;
- u32 fw_version;
- bool init_msg_received;
- struct completion init_done;
-
- struct mutex seq_lock;
- struct nvkm_msgqueue_seq seq[NVKM_MSGQUEUE_NUM_SEQUENCES];
- unsigned long seq_tbl[BITS_TO_LONGS(NVKM_MSGQUEUE_NUM_SEQUENCES)];
-};
-
-void nvkm_msgqueue_ctor(const struct nvkm_msgqueue_func *, struct nvkm_falcon *,
- struct nvkm_msgqueue *);
-int nvkm_msgqueue_post(struct nvkm_msgqueue *, enum msgqueue_msg_priority,
- struct nvkm_msgqueue_hdr *, nvkm_msgqueue_callback,
- struct completion *, bool);
-void nvkm_msgqueue_process_msgs(struct nvkm_msgqueue *,
- struct nvkm_msgqueue_queue *);
-
-int msgqueue_0137c63d_new(struct nvkm_falcon *, const struct nvkm_secboot *,
- struct nvkm_msgqueue **);
-int msgqueue_0137bca5_new(struct nvkm_falcon *, const struct nvkm_secboot *,
- struct nvkm_msgqueue **);
-int msgqueue_0148cdec_new(struct nvkm_falcon *, const struct nvkm_secboot *,
- struct nvkm_msgqueue **);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c
deleted file mode 100644
index fec0273158f6..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c
+++ /dev/null
@@ -1,436 +0,0 @@
-/*
- * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include "msgqueue.h"
-#include <engine/falcon.h>
-#include <subdev/secboot.h>
-
-/* Queues identifiers */
-enum {
- /* High Priority Command Queue for Host -> PMU communication */
- MSGQUEUE_0137C63D_COMMAND_QUEUE_HPQ = 0,
- /* Low Priority Command Queue for Host -> PMU communication */
- MSGQUEUE_0137C63D_COMMAND_QUEUE_LPQ = 1,
- /* Message queue for PMU -> Host communication */
- MSGQUEUE_0137C63D_MESSAGE_QUEUE = 4,
- MSGQUEUE_0137C63D_NUM_QUEUES = 5,
-};
-
-struct msgqueue_0137c63d {
- struct nvkm_msgqueue base;
-
- struct nvkm_msgqueue_queue queue[MSGQUEUE_0137C63D_NUM_QUEUES];
-};
-#define msgqueue_0137c63d(q) \
- container_of(q, struct msgqueue_0137c63d, base)
-
-struct msgqueue_0137bca5 {
- struct msgqueue_0137c63d base;
-
- u64 wpr_addr;
-};
-#define msgqueue_0137bca5(q) \
- container_of(container_of(q, struct msgqueue_0137c63d, base), \
- struct msgqueue_0137bca5, base);
-
-static struct nvkm_msgqueue_queue *
-msgqueue_0137c63d_cmd_queue(struct nvkm_msgqueue *queue,
- enum msgqueue_msg_priority priority)
-{
- struct msgqueue_0137c63d *priv = msgqueue_0137c63d(queue);
- const struct nvkm_subdev *subdev = priv->base.falcon->owner;
-
- switch (priority) {
- case MSGQUEUE_MSG_PRIORITY_HIGH:
- return &priv->queue[MSGQUEUE_0137C63D_COMMAND_QUEUE_HPQ];
- case MSGQUEUE_MSG_PRIORITY_LOW:
- return &priv->queue[MSGQUEUE_0137C63D_COMMAND_QUEUE_LPQ];
- default:
- nvkm_error(subdev, "invalid command queue!\n");
- return ERR_PTR(-EINVAL);
- }
-}
-
-static void
-msgqueue_0137c63d_process_msgs(struct nvkm_msgqueue *queue)
-{
- struct msgqueue_0137c63d *priv = msgqueue_0137c63d(queue);
- struct nvkm_msgqueue_queue *q_queue =
- &priv->queue[MSGQUEUE_0137C63D_MESSAGE_QUEUE];
-
- nvkm_msgqueue_process_msgs(&priv->base, q_queue);
-}
-
-/* Init unit */
-#define MSGQUEUE_0137C63D_UNIT_INIT 0x07
-
-enum {
- INIT_MSG_INIT = 0x0,
-};
-
-static void
-init_gen_cmdline(struct nvkm_msgqueue *queue, void *buf)
-{
- struct {
- u32 reserved;
- u32 freq_hz;
- u32 trace_size;
- u32 trace_dma_base;
- u16 trace_dma_base1;
- u8 trace_dma_offset;
- u32 trace_dma_idx;
- bool secure_mode;
- bool raise_priv_sec;
- struct {
- u32 dma_base;
- u16 dma_base1;
- u8 dma_offset;
- u16 fb_size;
- u8 dma_idx;
- } gc6_ctx;
- u8 pad;
- } *args = buf;
-
- args->secure_mode = 1;
-}
-
-/* forward declaration */
-static int acr_init_wpr(struct nvkm_msgqueue *queue);
-
-static int
-init_callback(struct nvkm_msgqueue *_queue, struct nvkm_msgqueue_hdr *hdr)
-{
- struct msgqueue_0137c63d *priv = msgqueue_0137c63d(_queue);
- struct {
- struct nvkm_msgqueue_msg base;
-
- u8 pad;
- u16 os_debug_entry_point;
-
- struct {
- u16 size;
- u16 offset;
- u8 index;
- u8 pad;
- } queue_info[MSGQUEUE_0137C63D_NUM_QUEUES];
-
- u16 sw_managed_area_offset;
- u16 sw_managed_area_size;
- } *init = (void *)hdr;
- const struct nvkm_subdev *subdev = _queue->falcon->owner;
- int i;
-
- if (init->base.hdr.unit_id != MSGQUEUE_0137C63D_UNIT_INIT) {
- nvkm_error(subdev, "expected message from init unit\n");
- return -EINVAL;
- }
-
- if (init->base.msg_type != INIT_MSG_INIT) {
- nvkm_error(subdev, "expected PMU init msg\n");
- return -EINVAL;
- }
-
- for (i = 0; i < MSGQUEUE_0137C63D_NUM_QUEUES; i++) {
- struct nvkm_msgqueue_queue *queue = &priv->queue[i];
-
- mutex_init(&queue->mutex);
-
- queue->index = init->queue_info[i].index;
- queue->offset = init->queue_info[i].offset;
- queue->size = init->queue_info[i].size;
-
- if (i != MSGQUEUE_0137C63D_MESSAGE_QUEUE) {
- queue->head_reg = 0x4a0 + (queue->index * 4);
- queue->tail_reg = 0x4b0 + (queue->index * 4);
- } else {
- queue->head_reg = 0x4c8;
- queue->tail_reg = 0x4cc;
- }
-
- nvkm_debug(subdev,
- "queue %d: index %d, offset 0x%08x, size 0x%08x\n",
- i, queue->index, queue->offset, queue->size);
- }
-
- /* Complete initialization by initializing WPR region */
- return acr_init_wpr(&priv->base);
-}
-
-static const struct nvkm_msgqueue_init_func
-msgqueue_0137c63d_init_func = {
- .gen_cmdline = init_gen_cmdline,
- .init_callback = init_callback,
-};
-
-
-
-/* ACR unit */
-#define MSGQUEUE_0137C63D_UNIT_ACR 0x0a
-
-enum {
- ACR_CMD_INIT_WPR_REGION = 0x00,
- ACR_CMD_BOOTSTRAP_FALCON = 0x01,
- ACR_CMD_BOOTSTRAP_MULTIPLE_FALCONS = 0x03,
-};
-
-static void
-acr_init_wpr_callback(struct nvkm_msgqueue *queue,
- struct nvkm_msgqueue_hdr *hdr)
-{
- struct {
- struct nvkm_msgqueue_msg base;
- u32 error_code;
- } *msg = (void *)hdr;
- const struct nvkm_subdev *subdev = queue->falcon->owner;
-
- if (msg->error_code) {
- nvkm_error(subdev, "ACR WPR init failure: %d\n",
- msg->error_code);
- return;
- }
-
- nvkm_debug(subdev, "ACR WPR init complete\n");
- complete_all(&queue->init_done);
-}
-
-static int
-acr_init_wpr(struct nvkm_msgqueue *queue)
-{
- /*
- * region_id: region ID in WPR region
- * wpr_offset: offset in WPR region
- */
- struct {
- struct nvkm_msgqueue_hdr hdr;
- u8 cmd_type;
- u32 region_id;
- u32 wpr_offset;
- } cmd;
- memset(&cmd, 0, sizeof(cmd));
-
- cmd.hdr.unit_id = MSGQUEUE_0137C63D_UNIT_ACR;
- cmd.hdr.size = sizeof(cmd);
- cmd.cmd_type = ACR_CMD_INIT_WPR_REGION;
- cmd.region_id = 0x01;
- cmd.wpr_offset = 0x00;
-
- nvkm_msgqueue_post(queue, MSGQUEUE_MSG_PRIORITY_HIGH, &cmd.hdr,
- acr_init_wpr_callback, NULL, false);
-
- return 0;
-}
-
-
-static void
-acr_boot_falcon_callback(struct nvkm_msgqueue *priv,
- struct nvkm_msgqueue_hdr *hdr)
-{
- struct acr_bootstrap_falcon_msg {
- struct nvkm_msgqueue_msg base;
-
- u32 falcon_id;
- } *msg = (void *)hdr;
- const struct nvkm_subdev *subdev = priv->falcon->owner;
- u32 falcon_id = msg->falcon_id;
-
- if (falcon_id >= NVKM_SECBOOT_FALCON_END) {
- nvkm_error(subdev, "in bootstrap falcon callback:\n");
- nvkm_error(subdev, "invalid falcon ID 0x%x\n", falcon_id);
- return;
- }
- nvkm_debug(subdev, "%s booted\n", nvkm_secboot_falcon_name[falcon_id]);
-}
-
-enum {
- ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES = 0,
- ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_NO = 1,
-};
-
-static int
-acr_boot_falcon(struct nvkm_msgqueue *priv, enum nvkm_secboot_falcon falcon)
-{
- DECLARE_COMPLETION_ONSTACK(completed);
- /*
- * flags - Flag specifying RESET or no RESET.
- * falcon id - Falcon id specifying falcon to bootstrap.
- */
- struct {
- struct nvkm_msgqueue_hdr hdr;
- u8 cmd_type;
- u32 flags;
- u32 falcon_id;
- } cmd;
-
- memset(&cmd, 0, sizeof(cmd));
-
- cmd.hdr.unit_id = MSGQUEUE_0137C63D_UNIT_ACR;
- cmd.hdr.size = sizeof(cmd);
- cmd.cmd_type = ACR_CMD_BOOTSTRAP_FALCON;
- cmd.flags = ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES;
- cmd.falcon_id = falcon;
- nvkm_msgqueue_post(priv, MSGQUEUE_MSG_PRIORITY_HIGH, &cmd.hdr,
- acr_boot_falcon_callback, &completed, true);
-
- if (!wait_for_completion_timeout(&completed, msecs_to_jiffies(1000)))
- return -ETIMEDOUT;
-
- return 0;
-}
-
-static void
-acr_boot_multiple_falcons_callback(struct nvkm_msgqueue *priv,
- struct nvkm_msgqueue_hdr *hdr)
-{
- struct acr_bootstrap_falcon_msg {
- struct nvkm_msgqueue_msg base;
-
- u32 falcon_mask;
- } *msg = (void *)hdr;
- const struct nvkm_subdev *subdev = priv->falcon->owner;
- unsigned long falcon_mask = msg->falcon_mask;
- u32 falcon_id, falcon_treated = 0;
-
- for_each_set_bit(falcon_id, &falcon_mask, NVKM_SECBOOT_FALCON_END) {
- nvkm_debug(subdev, "%s booted\n",
- nvkm_secboot_falcon_name[falcon_id]);
- falcon_treated |= BIT(falcon_id);
- }
-
- if (falcon_treated != msg->falcon_mask) {
- nvkm_error(subdev, "in bootstrap falcon callback:\n");
- nvkm_error(subdev, "invalid falcon mask 0x%x\n",
- msg->falcon_mask);
- return;
- }
-}
-
-static int
-acr_boot_multiple_falcons(struct nvkm_msgqueue *priv, unsigned long falcon_mask)
-{
- DECLARE_COMPLETION_ONSTACK(completed);
- /*
- * flags - Flag specifying RESET or no RESET.
- * falcon id - Falcon id specifying falcon to bootstrap.
- */
- struct {
- struct nvkm_msgqueue_hdr hdr;
- u8 cmd_type;
- u32 flags;
- u32 falcon_mask;
- u32 use_va_mask;
- u32 wpr_lo;
- u32 wpr_hi;
- } cmd;
- struct msgqueue_0137bca5 *queue = msgqueue_0137bca5(priv);
-
- memset(&cmd, 0, sizeof(cmd));
-
- cmd.hdr.unit_id = MSGQUEUE_0137C63D_UNIT_ACR;
- cmd.hdr.size = sizeof(cmd);
- cmd.cmd_type = ACR_CMD_BOOTSTRAP_MULTIPLE_FALCONS;
- cmd.flags = ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES;
- cmd.falcon_mask = falcon_mask;
- cmd.wpr_lo = lower_32_bits(queue->wpr_addr);
- cmd.wpr_hi = upper_32_bits(queue->wpr_addr);
- nvkm_msgqueue_post(priv, MSGQUEUE_MSG_PRIORITY_HIGH, &cmd.hdr,
- acr_boot_multiple_falcons_callback, &completed, true);
-
- if (!wait_for_completion_timeout(&completed, msecs_to_jiffies(1000)))
- return -ETIMEDOUT;
-
- return 0;
-}
-
-static const struct nvkm_msgqueue_acr_func
-msgqueue_0137c63d_acr_func = {
- .boot_falcon = acr_boot_falcon,
-};
-
-static const struct nvkm_msgqueue_acr_func
-msgqueue_0137bca5_acr_func = {
- .boot_falcon = acr_boot_falcon,
- .boot_multiple_falcons = acr_boot_multiple_falcons,
-};
-
-static void
-msgqueue_0137c63d_dtor(struct nvkm_msgqueue *queue)
-{
- kfree(msgqueue_0137c63d(queue));
-}
-
-static const struct nvkm_msgqueue_func
-msgqueue_0137c63d_func = {
- .init_func = &msgqueue_0137c63d_init_func,
- .acr_func = &msgqueue_0137c63d_acr_func,
- .cmd_queue = msgqueue_0137c63d_cmd_queue,
- .recv = msgqueue_0137c63d_process_msgs,
- .dtor = msgqueue_0137c63d_dtor,
-};
-
-int
-msgqueue_0137c63d_new(struct nvkm_falcon *falcon, const struct nvkm_secboot *sb,
- struct nvkm_msgqueue **queue)
-{
- struct msgqueue_0137c63d *ret;
-
- ret = kzalloc(sizeof(*ret), GFP_KERNEL);
- if (!ret)
- return -ENOMEM;
-
- *queue = &ret->base;
-
- nvkm_msgqueue_ctor(&msgqueue_0137c63d_func, falcon, &ret->base);
-
- return 0;
-}
-
-static const struct nvkm_msgqueue_func
-msgqueue_0137bca5_func = {
- .init_func = &msgqueue_0137c63d_init_func,
- .acr_func = &msgqueue_0137bca5_acr_func,
- .cmd_queue = msgqueue_0137c63d_cmd_queue,
- .recv = msgqueue_0137c63d_process_msgs,
- .dtor = msgqueue_0137c63d_dtor,
-};
-
-int
-msgqueue_0137bca5_new(struct nvkm_falcon *falcon, const struct nvkm_secboot *sb,
- struct nvkm_msgqueue **queue)
-{
- struct msgqueue_0137bca5 *ret;
-
- ret = kzalloc(sizeof(*ret), GFP_KERNEL);
- if (!ret)
- return -ENOMEM;
-
- *queue = &ret->base.base;
-
- /*
- * FIXME this must be set to the address of a *GPU* mapping within the
- * ACR address space!
- */
- /* ret->wpr_addr = sb->wpr_addr; */
-
- nvkm_msgqueue_ctor(&msgqueue_0137bca5_func, falcon, &ret->base.base);
-
- return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c
deleted file mode 100644
index 9424803b9ef4..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c
+++ /dev/null
@@ -1,264 +0,0 @@
-/*
- * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "msgqueue.h"
-#include <engine/falcon.h>
-#include <subdev/secboot.h>
-
-/*
- * This firmware runs on the SEC falcon. It only has one command and one
- * message queue, and uses a different command line and init message.
- */
-
-enum {
- MSGQUEUE_0148CDEC_COMMAND_QUEUE = 0,
- MSGQUEUE_0148CDEC_MESSAGE_QUEUE = 1,
- MSGQUEUE_0148CDEC_NUM_QUEUES,
-};
-
-struct msgqueue_0148cdec {
- struct nvkm_msgqueue base;
-
- struct nvkm_msgqueue_queue queue[MSGQUEUE_0148CDEC_NUM_QUEUES];
-};
-#define msgqueue_0148cdec(q) \
- container_of(q, struct msgqueue_0148cdec, base)
-
-static struct nvkm_msgqueue_queue *
-msgqueue_0148cdec_cmd_queue(struct nvkm_msgqueue *queue,
- enum msgqueue_msg_priority priority)
-{
- struct msgqueue_0148cdec *priv = msgqueue_0148cdec(queue);
-
- return &priv->queue[MSGQUEUE_0148CDEC_COMMAND_QUEUE];
-}
-
-static void
-msgqueue_0148cdec_process_msgs(struct nvkm_msgqueue *queue)
-{
- struct msgqueue_0148cdec *priv = msgqueue_0148cdec(queue);
- struct nvkm_msgqueue_queue *q_queue =
- &priv->queue[MSGQUEUE_0148CDEC_MESSAGE_QUEUE];
-
- nvkm_msgqueue_process_msgs(&priv->base, q_queue);
-}
-
-
-/* Init unit */
-#define MSGQUEUE_0148CDEC_UNIT_INIT 0x01
-
-enum {
- INIT_MSG_INIT = 0x0,
-};
-
-static void
-init_gen_cmdline(struct nvkm_msgqueue *queue, void *buf)
-{
- struct {
- u32 freq_hz;
- u32 falc_trace_size;
- u32 falc_trace_dma_base;
- u32 falc_trace_dma_idx;
- bool secure_mode;
- } *args = buf;
-
- args->secure_mode = false;
-}
-
-static int
-init_callback(struct nvkm_msgqueue *_queue, struct nvkm_msgqueue_hdr *hdr)
-{
- struct msgqueue_0148cdec *priv = msgqueue_0148cdec(_queue);
- struct {
- struct nvkm_msgqueue_msg base;
-
- u8 num_queues;
- u16 os_debug_entry_point;
-
- struct {
- u32 offset;
- u16 size;
- u8 index;
- u8 id;
- } queue_info[MSGQUEUE_0148CDEC_NUM_QUEUES];
-
- u16 sw_managed_area_offset;
- u16 sw_managed_area_size;
- } *init = (void *)hdr;
- const struct nvkm_subdev *subdev = _queue->falcon->owner;
- int i;
-
- if (init->base.hdr.unit_id != MSGQUEUE_0148CDEC_UNIT_INIT) {
- nvkm_error(subdev, "expected message from init unit\n");
- return -EINVAL;
- }
-
- if (init->base.msg_type != INIT_MSG_INIT) {
- nvkm_error(subdev, "expected SEC init msg\n");
- return -EINVAL;
- }
-
- for (i = 0; i < MSGQUEUE_0148CDEC_NUM_QUEUES; i++) {
- u8 id = init->queue_info[i].id;
- struct nvkm_msgqueue_queue *queue = &priv->queue[id];
-
- mutex_init(&queue->mutex);
-
- queue->index = init->queue_info[i].index;
- queue->offset = init->queue_info[i].offset;
- queue->size = init->queue_info[i].size;
-
- if (id == MSGQUEUE_0148CDEC_MESSAGE_QUEUE) {
- queue->head_reg = 0xa30 + (queue->index * 8);
- queue->tail_reg = 0xa34 + (queue->index * 8);
- } else {
- queue->head_reg = 0xa00 + (queue->index * 8);
- queue->tail_reg = 0xa04 + (queue->index * 8);
- }
-
- nvkm_debug(subdev,
- "queue %d: index %d, offset 0x%08x, size 0x%08x\n",
- id, queue->index, queue->offset, queue->size);
- }
-
- complete_all(&_queue->init_done);
-
- return 0;
-}
-
-static const struct nvkm_msgqueue_init_func
-msgqueue_0148cdec_init_func = {
- .gen_cmdline = init_gen_cmdline,
- .init_callback = init_callback,
-};
-
-
-
-/* ACR unit */
-#define MSGQUEUE_0148CDEC_UNIT_ACR 0x08
-
-enum {
- ACR_CMD_BOOTSTRAP_FALCON = 0x00,
-};
-
-static void
-acr_boot_falcon_callback(struct nvkm_msgqueue *priv,
- struct nvkm_msgqueue_hdr *hdr)
-{
- struct acr_bootstrap_falcon_msg {
- struct nvkm_msgqueue_msg base;
-
- u32 error_code;
- u32 falcon_id;
- } *msg = (void *)hdr;
- const struct nvkm_subdev *subdev = priv->falcon->owner;
- u32 falcon_id = msg->falcon_id;
-
- if (msg->error_code) {
- nvkm_error(subdev, "in bootstrap falcon callback:\n");
- nvkm_error(subdev, "expected error code 0x%x\n",
- msg->error_code);
- return;
- }
-
- if (falcon_id >= NVKM_SECBOOT_FALCON_END) {
- nvkm_error(subdev, "in bootstrap falcon callback:\n");
- nvkm_error(subdev, "invalid falcon ID 0x%x\n", falcon_id);
- return;
- }
-
- nvkm_debug(subdev, "%s booted\n", nvkm_secboot_falcon_name[falcon_id]);
-}
-
-enum {
- ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES = 0,
- ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_NO = 1,
-};
-
-static int
-acr_boot_falcon(struct nvkm_msgqueue *priv, enum nvkm_secboot_falcon falcon)
-{
- DECLARE_COMPLETION_ONSTACK(completed);
- /*
- * flags - Flag specifying RESET or no RESET.
- * falcon id - Falcon id specifying falcon to bootstrap.
- */
- struct {
- struct nvkm_msgqueue_hdr hdr;
- u8 cmd_type;
- u32 flags;
- u32 falcon_id;
- } cmd;
-
- memset(&cmd, 0, sizeof(cmd));
-
- cmd.hdr.unit_id = MSGQUEUE_0148CDEC_UNIT_ACR;
- cmd.hdr.size = sizeof(cmd);
- cmd.cmd_type = ACR_CMD_BOOTSTRAP_FALCON;
- cmd.flags = ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES;
- cmd.falcon_id = falcon;
- nvkm_msgqueue_post(priv, MSGQUEUE_MSG_PRIORITY_HIGH, &cmd.hdr,
- acr_boot_falcon_callback, &completed, true);
-
- if (!wait_for_completion_timeout(&completed, msecs_to_jiffies(1000)))
- return -ETIMEDOUT;
-
- return 0;
-}
-
-const struct nvkm_msgqueue_acr_func
-msgqueue_0148cdec_acr_func = {
- .boot_falcon = acr_boot_falcon,
-};
-
-static void
-msgqueue_0148cdec_dtor(struct nvkm_msgqueue *queue)
-{
- kfree(msgqueue_0148cdec(queue));
-}
-
-const struct nvkm_msgqueue_func
-msgqueue_0148cdec_func = {
- .init_func = &msgqueue_0148cdec_init_func,
- .acr_func = &msgqueue_0148cdec_acr_func,
- .cmd_queue = msgqueue_0148cdec_cmd_queue,
- .recv = msgqueue_0148cdec_process_msgs,
- .dtor = msgqueue_0148cdec_dtor,
-};
-
-int
-msgqueue_0148cdec_new(struct nvkm_falcon *falcon, const struct nvkm_secboot *sb,
- struct nvkm_msgqueue **queue)
-{
- struct msgqueue_0148cdec *ret;
-
- ret = kzalloc(sizeof(*ret), GFP_KERNEL);
- if (!ret)
- return -ENOMEM;
-
- *queue = &ret->base;
-
- nvkm_msgqueue_ctor(&msgqueue_0148cdec_func, falcon, &ret->base);
-
- return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/priv.h b/drivers/gpu/drm/nouveau/nvkm/falcon/priv.h
index 900fe1d37b4d..466188752eb0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/priv.h
@@ -1,9 +1,5 @@
/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_FALCON_PRIV_H__
#define __NVKM_FALCON_PRIV_H__
-#include <engine/falcon.h>
-
-void
-nvkm_falcon_ctor(const struct nvkm_falcon_func *, struct nvkm_subdev *,
- const char *, u32, struct nvkm_falcon *);
+#include <core/falcon.h>
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.c b/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.c
new file mode 100644
index 000000000000..a453de341a75
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.c
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "qmgr.h"
+
+struct nvkm_falcon_qmgr_seq *
+nvkm_falcon_qmgr_seq_acquire(struct nvkm_falcon_qmgr *qmgr)
+{
+ const struct nvkm_subdev *subdev = qmgr->falcon->owner;
+ struct nvkm_falcon_qmgr_seq *seq;
+ u32 index;
+
+ mutex_lock(&qmgr->seq.mutex);
+ index = find_first_zero_bit(qmgr->seq.tbl, NVKM_FALCON_QMGR_SEQ_NUM);
+ if (index >= NVKM_FALCON_QMGR_SEQ_NUM) {
+ nvkm_error(subdev, "no free sequence available\n");
+ mutex_unlock(&qmgr->seq.mutex);
+ return ERR_PTR(-EAGAIN);
+ }
+
+ set_bit(index, qmgr->seq.tbl);
+ mutex_unlock(&qmgr->seq.mutex);
+
+ seq = &qmgr->seq.id[index];
+ seq->state = SEQ_STATE_PENDING;
+ return seq;
+}
+
+void
+nvkm_falcon_qmgr_seq_release(struct nvkm_falcon_qmgr *qmgr,
+ struct nvkm_falcon_qmgr_seq *seq)
+{
+ /* no need to acquire seq.mutex since clear_bit is atomic */
+ seq->state = SEQ_STATE_FREE;
+ seq->callback = NULL;
+ reinit_completion(&seq->done);
+ clear_bit(seq->id, qmgr->seq.tbl);
+}
+
+void
+nvkm_falcon_qmgr_del(struct nvkm_falcon_qmgr **pqmgr)
+{
+ struct nvkm_falcon_qmgr *qmgr = *pqmgr;
+ if (qmgr) {
+ kfree(*pqmgr);
+ *pqmgr = NULL;
+ }
+}
+
+int
+nvkm_falcon_qmgr_new(struct nvkm_falcon *falcon,
+ struct nvkm_falcon_qmgr **pqmgr)
+{
+ struct nvkm_falcon_qmgr *qmgr;
+ int i;
+
+ if (!(qmgr = *pqmgr = kzalloc(sizeof(*qmgr), GFP_KERNEL)))
+ return -ENOMEM;
+
+ qmgr->falcon = falcon;
+ mutex_init(&qmgr->seq.mutex);
+ for (i = 0; i < NVKM_FALCON_QMGR_SEQ_NUM; i++) {
+ qmgr->seq.id[i].id = i;
+ init_completion(&qmgr->seq.id[i].done);
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.h b/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.h
new file mode 100644
index 000000000000..a45cd705e4f7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVKM_FALCON_QMGR_H__
+#define __NVKM_FALCON_QMGR_H__
+#include <core/falcon.h>
+
+#define HDR_SIZE sizeof(struct nv_falcon_msg)
+#define QUEUE_ALIGNMENT 4
+/* max size of the messages we can receive */
+#define MSG_BUF_SIZE 128
+
+/**
+ * struct nvkm_falcon_qmgr_seq - keep track of ongoing commands
+ *
+ * Every time a command is sent, a sequence is assigned to it so the
+ * corresponding message can be matched. Upon receiving the message, a callback
+ * can be called and/or a completion signaled.
+ *
+ * @id: sequence ID
+ * @state: current state
+ * @callback: callback to call upon receiving matching message
+ * @completion: completion to signal after callback is called
+ */
+struct nvkm_falcon_qmgr_seq {
+ u16 id;
+ enum {
+ SEQ_STATE_FREE = 0,
+ SEQ_STATE_PENDING,
+ SEQ_STATE_USED,
+ SEQ_STATE_CANCELLED
+ } state;
+ bool async;
+ nvkm_falcon_qmgr_callback callback;
+ void *priv;
+ struct completion done;
+ int result;
+};
+
+/*
+ * We can have an arbitrary number of sequences, but realistically we will
+ * probably not use that much simultaneously.
+ */
+#define NVKM_FALCON_QMGR_SEQ_NUM 16
+
+struct nvkm_falcon_qmgr {
+ struct nvkm_falcon *falcon;
+
+ struct {
+ struct mutex mutex;
+ struct nvkm_falcon_qmgr_seq id[NVKM_FALCON_QMGR_SEQ_NUM];
+ unsigned long tbl[BITS_TO_LONGS(NVKM_FALCON_QMGR_SEQ_NUM)];
+ } seq;
+};
+
+struct nvkm_falcon_qmgr_seq *
+nvkm_falcon_qmgr_seq_acquire(struct nvkm_falcon_qmgr *);
+void nvkm_falcon_qmgr_seq_release(struct nvkm_falcon_qmgr *,
+ struct nvkm_falcon_qmgr_seq *);
+
+struct nvkm_falcon_cmdq {
+ struct nvkm_falcon_qmgr *qmgr;
+ const char *name;
+ struct mutex mutex;
+ struct completion ready;
+
+ u32 head_reg;
+ u32 tail_reg;
+ u32 offset;
+ u32 size;
+
+ u32 position;
+};
+
+struct nvkm_falcon_msgq {
+ struct nvkm_falcon_qmgr *qmgr;
+ const char *name;
+ struct mutex mutex;
+
+ u32 head_reg;
+ u32 tail_reg;
+ u32 offset;
+
+ u32 position;
+};
+
+#define FLCNQ_PRINTK(t,q,f,a...) \
+ FLCN_PRINTK(t, (q)->qmgr->falcon, "%s: "f, (q)->name, ##a)
+#define FLCNQ_DBG(q,f,a...) FLCNQ_PRINTK(debug, (q), f, ##a)
+#define FLCNQ_ERR(q,f,a...) FLCNQ_PRINTK(error, (q), f, ##a)
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c b/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c
index 6d978feebbd7..1ff9b9c2e651 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c
@@ -25,7 +25,7 @@
#include <core/memory.h>
#include <subdev/timer.h>
-static void
+void
nvkm_falcon_v1_load_imem(struct nvkm_falcon *falcon, void *data, u32 start,
u32 size, u16 tag, u8 port, bool secure)
{
@@ -89,18 +89,17 @@ nvkm_falcon_v1_load_emem(struct nvkm_falcon *falcon, void *data, u32 start,
}
}
-static const u32 EMEM_START_ADDR = 0x1000000;
-
-static void
+void
nvkm_falcon_v1_load_dmem(struct nvkm_falcon *falcon, void *data, u32 start,
- u32 size, u8 port)
+ u32 size, u8 port)
{
+ const struct nvkm_falcon_func *func = falcon->func;
u8 rem = size % 4;
int i;
- if (start >= EMEM_START_ADDR && falcon->has_emem)
+ if (func->emem_addr && start >= func->emem_addr)
return nvkm_falcon_v1_load_emem(falcon, data,
- start - EMEM_START_ADDR, size,
+ start - func->emem_addr, size,
port);
size -= rem;
@@ -148,15 +147,16 @@ nvkm_falcon_v1_read_emem(struct nvkm_falcon *falcon, u32 start, u32 size,
}
}
-static void
+void
nvkm_falcon_v1_read_dmem(struct nvkm_falcon *falcon, u32 start, u32 size,
u8 port, void *data)
{
+ const struct nvkm_falcon_func *func = falcon->func;
u8 rem = size % 4;
int i;
- if (start >= EMEM_START_ADDR && falcon->has_emem)
- return nvkm_falcon_v1_read_emem(falcon, start - EMEM_START_ADDR,
+ if (func->emem_addr && start >= func->emem_addr)
+ return nvkm_falcon_v1_read_emem(falcon, start - func->emem_addr,
size, port, data);
size -= rem;
@@ -179,12 +179,11 @@ nvkm_falcon_v1_read_dmem(struct nvkm_falcon *falcon, u32 start, u32 size,
}
}
-static void
+void
nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_memory *ctx)
{
- struct nvkm_device *device = falcon->owner->device;
+ const u32 fbif = falcon->func->fbif;
u32 inst_loc;
- u32 fbif;
/* disable instance block binding */
if (ctx == NULL) {
@@ -192,20 +191,6 @@ nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_memory *ctx)
return;
}
- switch (falcon->owner->index) {
- case NVKM_ENGINE_NVENC0:
- case NVKM_ENGINE_NVENC1:
- case NVKM_ENGINE_NVENC2:
- fbif = 0x800;
- break;
- case NVKM_SUBDEV_PMU:
- fbif = 0xe00;
- break;
- default:
- fbif = 0x600;
- break;
- }
-
nvkm_falcon_wr32(falcon, 0x10c, 0x1);
/* setup apertures - virtual */
@@ -234,50 +219,15 @@ nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_memory *ctx)
nvkm_falcon_mask(falcon, 0x090, 0x10000, 0x10000);
nvkm_falcon_mask(falcon, 0x0a4, 0x8, 0x8);
-
- /* Not sure if this is a WAR for a HW issue, or some additional
- * programming sequence that's needed to properly complete the
- * context switch we trigger above.
- *
- * Fixes unreliability of booting the SEC2 RTOS on Quadro P620,
- * particularly when resuming from suspend.
- *
- * Also removes the need for an odd workaround where we needed
- * to program SEC2's FALCON_CPUCTL_ALIAS_STARTCPU twice before
- * the SEC2 RTOS would begin executing.
- */
- switch (falcon->owner->index) {
- case NVKM_SUBDEV_GSP:
- case NVKM_ENGINE_SEC2:
- nvkm_msec(device, 10,
- u32 irqstat = nvkm_falcon_rd32(falcon, 0x008);
- u32 flcn0dc = nvkm_falcon_rd32(falcon, 0x0dc);
- if ((irqstat & 0x00000008) &&
- (flcn0dc & 0x00007000) == 0x00005000)
- break;
- );
-
- nvkm_falcon_mask(falcon, 0x004, 0x00000008, 0x00000008);
- nvkm_falcon_mask(falcon, 0x058, 0x00000002, 0x00000002);
-
- nvkm_msec(device, 10,
- u32 flcn0dc = nvkm_falcon_rd32(falcon, 0x0dc);
- if ((flcn0dc & 0x00007000) == 0x00000000)
- break;
- );
- break;
- default:
- break;
- }
}
-static void
+void
nvkm_falcon_v1_set_start_addr(struct nvkm_falcon *falcon, u32 start_addr)
{
nvkm_falcon_wr32(falcon, 0x104, start_addr);
}
-static void
+void
nvkm_falcon_v1_start(struct nvkm_falcon *falcon)
{
u32 reg = nvkm_falcon_rd32(falcon, 0x100);
@@ -288,7 +238,7 @@ nvkm_falcon_v1_start(struct nvkm_falcon *falcon)
nvkm_falcon_wr32(falcon, 0x100, 0x2);
}
-static int
+int
nvkm_falcon_v1_wait_for_halt(struct nvkm_falcon *falcon, u32 ms)
{
struct nvkm_device *device = falcon->owner->device;
@@ -301,7 +251,7 @@ nvkm_falcon_v1_wait_for_halt(struct nvkm_falcon *falcon, u32 ms)
return 0;
}
-static int
+int
nvkm_falcon_v1_clear_interrupt(struct nvkm_falcon *falcon, u32 mask)
{
struct nvkm_device *device = falcon->owner->device;
@@ -330,7 +280,7 @@ falcon_v1_wait_idle(struct nvkm_falcon *falcon)
return 0;
}
-static int
+int
nvkm_falcon_v1_enable(struct nvkm_falcon *falcon)
{
struct nvkm_device *device = falcon->owner->device;
@@ -352,7 +302,7 @@ nvkm_falcon_v1_enable(struct nvkm_falcon *falcon)
return 0;
}
-static void
+void
nvkm_falcon_v1_disable(struct nvkm_falcon *falcon)
{
/* disable IRQs and wait for any previous code to complete */
diff --git a/drivers/gpu/drm/nouveau/nvkm/nvfw/Kbuild b/drivers/gpu/drm/nouveau/nvkm/nvfw/Kbuild
new file mode 100644
index 000000000000..41d75f98e603
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/nvfw/Kbuild
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: MIT
+nvkm-y += nvkm/nvfw/fw.o
+nvkm-y += nvkm/nvfw/hs.o
+nvkm-y += nvkm/nvfw/ls.o
+
+nvkm-y += nvkm/nvfw/acr.o
+nvkm-y += nvkm/nvfw/flcn.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/nvfw/acr.c b/drivers/gpu/drm/nouveau/nvkm/nvfw/acr.c
new file mode 100644
index 000000000000..0d063b8317f7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/nvfw/acr.c
@@ -0,0 +1,165 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <core/subdev.h>
+#include <nvfw/acr.h>
+
+void
+wpr_header_dump(struct nvkm_subdev *subdev, const struct wpr_header *hdr)
+{
+ nvkm_debug(subdev, "wprHeader\n");
+ nvkm_debug(subdev, "\tfalconID : %d\n", hdr->falcon_id);
+ nvkm_debug(subdev, "\tlsbOffset : 0x%x\n", hdr->lsb_offset);
+ nvkm_debug(subdev, "\tbootstrapOwner: %d\n", hdr->bootstrap_owner);
+ nvkm_debug(subdev, "\tlazyBootstrap : %d\n", hdr->lazy_bootstrap);
+ nvkm_debug(subdev, "\tstatus : %d\n", hdr->status);
+}
+
+void
+wpr_header_v1_dump(struct nvkm_subdev *subdev, const struct wpr_header_v1 *hdr)
+{
+ nvkm_debug(subdev, "wprHeader\n");
+ nvkm_debug(subdev, "\tfalconID : %d\n", hdr->falcon_id);
+ nvkm_debug(subdev, "\tlsbOffset : 0x%x\n", hdr->lsb_offset);
+ nvkm_debug(subdev, "\tbootstrapOwner: %d\n", hdr->bootstrap_owner);
+ nvkm_debug(subdev, "\tlazyBootstrap : %d\n", hdr->lazy_bootstrap);
+ nvkm_debug(subdev, "\tbinVersion : %d\n", hdr->bin_version);
+ nvkm_debug(subdev, "\tstatus : %d\n", hdr->status);
+}
+
+void
+lsb_header_tail_dump(struct nvkm_subdev *subdev,
+ struct lsb_header_tail *hdr)
+{
+ nvkm_debug(subdev, "lsbHeader\n");
+ nvkm_debug(subdev, "\tucodeOff : 0x%x\n", hdr->ucode_off);
+ nvkm_debug(subdev, "\tucodeSize : 0x%x\n", hdr->ucode_size);
+ nvkm_debug(subdev, "\tdataSize : 0x%x\n", hdr->data_size);
+ nvkm_debug(subdev, "\tblCodeSize : 0x%x\n", hdr->bl_code_size);
+ nvkm_debug(subdev, "\tblImemOff : 0x%x\n", hdr->bl_imem_off);
+ nvkm_debug(subdev, "\tblDataOff : 0x%x\n", hdr->bl_data_off);
+ nvkm_debug(subdev, "\tblDataSize : 0x%x\n", hdr->bl_data_size);
+ nvkm_debug(subdev, "\tappCodeOff : 0x%x\n", hdr->app_code_off);
+ nvkm_debug(subdev, "\tappCodeSize : 0x%x\n", hdr->app_code_size);
+ nvkm_debug(subdev, "\tappDataOff : 0x%x\n", hdr->app_data_off);
+ nvkm_debug(subdev, "\tappDataSize : 0x%x\n", hdr->app_data_size);
+ nvkm_debug(subdev, "\tflags : 0x%x\n", hdr->flags);
+}
+
+void
+lsb_header_dump(struct nvkm_subdev *subdev, struct lsb_header *hdr)
+{
+ lsb_header_tail_dump(subdev, &hdr->tail);
+}
+
+void
+lsb_header_v1_dump(struct nvkm_subdev *subdev, struct lsb_header_v1 *hdr)
+{
+ lsb_header_tail_dump(subdev, &hdr->tail);
+}
+
+void
+flcn_acr_desc_dump(struct nvkm_subdev *subdev, struct flcn_acr_desc *hdr)
+{
+ int i;
+
+ nvkm_debug(subdev, "acrDesc\n");
+ nvkm_debug(subdev, "\twprRegionId : %d\n", hdr->wpr_region_id);
+ nvkm_debug(subdev, "\twprOffset : 0x%x\n", hdr->wpr_offset);
+ nvkm_debug(subdev, "\tmmuMemRange : 0x%x\n",
+ hdr->mmu_mem_range);
+ nvkm_debug(subdev, "\tnoRegions : %d\n",
+ hdr->regions.no_regions);
+
+ for (i = 0; i < ARRAY_SIZE(hdr->regions.region_props); i++) {
+ nvkm_debug(subdev, "\tregion[%d] :\n", i);
+ nvkm_debug(subdev, "\t startAddr : 0x%x\n",
+ hdr->regions.region_props[i].start_addr);
+ nvkm_debug(subdev, "\t endAddr : 0x%x\n",
+ hdr->regions.region_props[i].end_addr);
+ nvkm_debug(subdev, "\t regionId : %d\n",
+ hdr->regions.region_props[i].region_id);
+ nvkm_debug(subdev, "\t readMask : 0x%x\n",
+ hdr->regions.region_props[i].read_mask);
+ nvkm_debug(subdev, "\t writeMask : 0x%x\n",
+ hdr->regions.region_props[i].write_mask);
+ nvkm_debug(subdev, "\t clientMask : 0x%x\n",
+ hdr->regions.region_props[i].client_mask);
+ }
+
+ nvkm_debug(subdev, "\tucodeBlobSize: %d\n",
+ hdr->ucode_blob_size);
+ nvkm_debug(subdev, "\tucodeBlobBase: 0x%llx\n",
+ hdr->ucode_blob_base);
+ nvkm_debug(subdev, "\tvprEnabled : %d\n",
+ hdr->vpr_desc.vpr_enabled);
+ nvkm_debug(subdev, "\tvprStart : 0x%x\n",
+ hdr->vpr_desc.vpr_start);
+ nvkm_debug(subdev, "\tvprEnd : 0x%x\n",
+ hdr->vpr_desc.vpr_end);
+ nvkm_debug(subdev, "\thdcpPolicies : 0x%x\n",
+ hdr->vpr_desc.hdcp_policies);
+}
+
+void
+flcn_acr_desc_v1_dump(struct nvkm_subdev *subdev, struct flcn_acr_desc_v1 *hdr)
+{
+ int i;
+
+ nvkm_debug(subdev, "acrDesc\n");
+ nvkm_debug(subdev, "\twprRegionId : %d\n", hdr->wpr_region_id);
+ nvkm_debug(subdev, "\twprOffset : 0x%x\n", hdr->wpr_offset);
+ nvkm_debug(subdev, "\tmmuMemoryRange : 0x%x\n",
+ hdr->mmu_memory_range);
+ nvkm_debug(subdev, "\tnoRegions : %d\n",
+ hdr->regions.no_regions);
+
+ for (i = 0; i < ARRAY_SIZE(hdr->regions.region_props); i++) {
+ nvkm_debug(subdev, "\tregion[%d] :\n", i);
+ nvkm_debug(subdev, "\t startAddr : 0x%x\n",
+ hdr->regions.region_props[i].start_addr);
+ nvkm_debug(subdev, "\t endAddr : 0x%x\n",
+ hdr->regions.region_props[i].end_addr);
+ nvkm_debug(subdev, "\t regionId : %d\n",
+ hdr->regions.region_props[i].region_id);
+ nvkm_debug(subdev, "\t readMask : 0x%x\n",
+ hdr->regions.region_props[i].read_mask);
+ nvkm_debug(subdev, "\t writeMask : 0x%x\n",
+ hdr->regions.region_props[i].write_mask);
+ nvkm_debug(subdev, "\t clientMask : 0x%x\n",
+ hdr->regions.region_props[i].client_mask);
+ nvkm_debug(subdev, "\t shadowMemStartAddr: 0x%x\n",
+ hdr->regions.region_props[i].shadow_mem_start_addr);
+ }
+
+ nvkm_debug(subdev, "\tucodeBlobSize : %d\n",
+ hdr->ucode_blob_size);
+ nvkm_debug(subdev, "\tucodeBlobBase : 0x%llx\n",
+ hdr->ucode_blob_base);
+ nvkm_debug(subdev, "\tvprEnabled : %d\n",
+ hdr->vpr_desc.vpr_enabled);
+ nvkm_debug(subdev, "\tvprStart : 0x%x\n",
+ hdr->vpr_desc.vpr_start);
+ nvkm_debug(subdev, "\tvprEnd : 0x%x\n",
+ hdr->vpr_desc.vpr_end);
+ nvkm_debug(subdev, "\thdcpPolicies : 0x%x\n",
+ hdr->vpr_desc.hdcp_policies);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/nvfw/flcn.c b/drivers/gpu/drm/nouveau/nvkm/nvfw/flcn.c
new file mode 100644
index 000000000000..00ec764e1aab
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/nvfw/flcn.c
@@ -0,0 +1,115 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <core/subdev.h>
+#include <nvfw/flcn.h>
+
+void
+loader_config_dump(struct nvkm_subdev *subdev, const struct loader_config *hdr)
+{
+ nvkm_debug(subdev, "loaderConfig\n");
+ nvkm_debug(subdev, "\tdmaIdx : %d\n", hdr->dma_idx);
+ nvkm_debug(subdev, "\tcodeDmaBase : 0x%xx\n", hdr->code_dma_base);
+ nvkm_debug(subdev, "\tcodeSizeTotal : 0x%x\n", hdr->code_size_total);
+ nvkm_debug(subdev, "\tcodeSizeToLoad: 0x%x\n", hdr->code_size_to_load);
+ nvkm_debug(subdev, "\tcodeEntryPoint: 0x%x\n", hdr->code_entry_point);
+ nvkm_debug(subdev, "\tdataDmaBase : 0x%x\n", hdr->data_dma_base);
+ nvkm_debug(subdev, "\tdataSize : 0x%x\n", hdr->data_size);
+ nvkm_debug(subdev, "\toverlayDmaBase: 0x%x\n", hdr->overlay_dma_base);
+ nvkm_debug(subdev, "\targc : 0x%08x\n", hdr->argc);
+ nvkm_debug(subdev, "\targv : 0x%08x\n", hdr->argv);
+ nvkm_debug(subdev, "\tcodeDmaBase1 : 0x%x\n", hdr->code_dma_base1);
+ nvkm_debug(subdev, "\tdataDmaBase1 : 0x%x\n", hdr->data_dma_base1);
+ nvkm_debug(subdev, "\tovlyDmaBase1 : 0x%x\n", hdr->overlay_dma_base1);
+}
+
+void
+loader_config_v1_dump(struct nvkm_subdev *subdev,
+ const struct loader_config_v1 *hdr)
+{
+ nvkm_debug(subdev, "loaderConfig\n");
+ nvkm_debug(subdev, "\treserved : 0x%08x\n", hdr->reserved);
+ nvkm_debug(subdev, "\tdmaIdx : %d\n", hdr->dma_idx);
+ nvkm_debug(subdev, "\tcodeDmaBase : 0x%llxx\n", hdr->code_dma_base);
+ nvkm_debug(subdev, "\tcodeSizeTotal : 0x%x\n", hdr->code_size_total);
+ nvkm_debug(subdev, "\tcodeSizeToLoad: 0x%x\n", hdr->code_size_to_load);
+ nvkm_debug(subdev, "\tcodeEntryPoint: 0x%x\n", hdr->code_entry_point);
+ nvkm_debug(subdev, "\tdataDmaBase : 0x%llx\n", hdr->data_dma_base);
+ nvkm_debug(subdev, "\tdataSize : 0x%x\n", hdr->data_size);
+ nvkm_debug(subdev, "\toverlayDmaBase: 0x%llx\n", hdr->overlay_dma_base);
+ nvkm_debug(subdev, "\targc : 0x%08x\n", hdr->argc);
+ nvkm_debug(subdev, "\targv : 0x%08x\n", hdr->argv);
+}
+
+void
+flcn_bl_dmem_desc_dump(struct nvkm_subdev *subdev,
+ const struct flcn_bl_dmem_desc *hdr)
+{
+ nvkm_debug(subdev, "flcnBlDmemDesc\n");
+ nvkm_debug(subdev, "\treserved : 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ hdr->reserved[0], hdr->reserved[1], hdr->reserved[2],
+ hdr->reserved[3]);
+ nvkm_debug(subdev, "\tsignature : 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ hdr->signature[0], hdr->signature[1], hdr->signature[2],
+ hdr->signature[3]);
+ nvkm_debug(subdev, "\tctxDma : %d\n", hdr->ctx_dma);
+ nvkm_debug(subdev, "\tcodeDmaBase : 0x%x\n", hdr->code_dma_base);
+ nvkm_debug(subdev, "\tnonSecCodeOff : 0x%x\n", hdr->non_sec_code_off);
+ nvkm_debug(subdev, "\tnonSecCodeSize: 0x%x\n", hdr->non_sec_code_size);
+ nvkm_debug(subdev, "\tsecCodeOff : 0x%x\n", hdr->sec_code_off);
+ nvkm_debug(subdev, "\tsecCodeSize : 0x%x\n", hdr->sec_code_size);
+ nvkm_debug(subdev, "\tcodeEntryPoint: 0x%x\n", hdr->code_entry_point);
+ nvkm_debug(subdev, "\tdataDmaBase : 0x%x\n", hdr->data_dma_base);
+ nvkm_debug(subdev, "\tdataSize : 0x%x\n", hdr->data_size);
+ nvkm_debug(subdev, "\tcodeDmaBase1 : 0x%x\n", hdr->code_dma_base1);
+ nvkm_debug(subdev, "\tdataDmaBase1 : 0x%x\n", hdr->data_dma_base1);
+}
+
+void
+flcn_bl_dmem_desc_v1_dump(struct nvkm_subdev *subdev,
+ const struct flcn_bl_dmem_desc_v1 *hdr)
+{
+ nvkm_debug(subdev, "flcnBlDmemDesc\n");
+ nvkm_debug(subdev, "\treserved : 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ hdr->reserved[0], hdr->reserved[1], hdr->reserved[2],
+ hdr->reserved[3]);
+ nvkm_debug(subdev, "\tsignature : 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ hdr->signature[0], hdr->signature[1], hdr->signature[2],
+ hdr->signature[3]);
+ nvkm_debug(subdev, "\tctxDma : %d\n", hdr->ctx_dma);
+ nvkm_debug(subdev, "\tcodeDmaBase : 0x%llx\n", hdr->code_dma_base);
+ nvkm_debug(subdev, "\tnonSecCodeOff : 0x%x\n", hdr->non_sec_code_off);
+ nvkm_debug(subdev, "\tnonSecCodeSize: 0x%x\n", hdr->non_sec_code_size);
+ nvkm_debug(subdev, "\tsecCodeOff : 0x%x\n", hdr->sec_code_off);
+ nvkm_debug(subdev, "\tsecCodeSize : 0x%x\n", hdr->sec_code_size);
+ nvkm_debug(subdev, "\tcodeEntryPoint: 0x%x\n", hdr->code_entry_point);
+ nvkm_debug(subdev, "\tdataDmaBase : 0x%llx\n", hdr->data_dma_base);
+ nvkm_debug(subdev, "\tdataSize : 0x%x\n", hdr->data_size);
+}
+
+void
+flcn_bl_dmem_desc_v2_dump(struct nvkm_subdev *subdev,
+ const struct flcn_bl_dmem_desc_v2 *hdr)
+{
+ flcn_bl_dmem_desc_v1_dump(subdev, (void *)hdr);
+ nvkm_debug(subdev, "\targc : 0x%08x\n", hdr->argc);
+ nvkm_debug(subdev, "\targv : 0x%08x\n", hdr->argv);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/nvfw/fw.c b/drivers/gpu/drm/nouveau/nvkm/nvfw/fw.c
new file mode 100644
index 000000000000..746803bd5318
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/nvfw/fw.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <core/subdev.h>
+#include <nvfw/fw.h>
+
+const struct nvfw_bin_hdr *
+nvfw_bin_hdr(struct nvkm_subdev *subdev, const void *data)
+{
+ const struct nvfw_bin_hdr *hdr = data;
+ nvkm_debug(subdev, "binHdr:\n");
+ nvkm_debug(subdev, "\tbinMagic : 0x%08x\n", hdr->bin_magic);
+ nvkm_debug(subdev, "\tbinVer : %d\n", hdr->bin_ver);
+ nvkm_debug(subdev, "\tbinSize : %d\n", hdr->bin_size);
+ nvkm_debug(subdev, "\theaderOffset : 0x%x\n", hdr->header_offset);
+ nvkm_debug(subdev, "\tdataOffset : 0x%x\n", hdr->data_offset);
+ nvkm_debug(subdev, "\tdataSize : 0x%x\n", hdr->data_size);
+ return hdr;
+}
+
+const struct nvfw_bl_desc *
+nvfw_bl_desc(struct nvkm_subdev *subdev, const void *data)
+{
+ const struct nvfw_bl_desc *hdr = data;
+ nvkm_debug(subdev, "blDesc\n");
+ nvkm_debug(subdev, "\tstartTag : 0x%x\n", hdr->start_tag);
+ nvkm_debug(subdev, "\tdmemLoadOff : 0x%x\n", hdr->dmem_load_off);
+ nvkm_debug(subdev, "\tcodeOff : 0x%x\n", hdr->code_off);
+ nvkm_debug(subdev, "\tcodeSize : 0x%x\n", hdr->code_size);
+ nvkm_debug(subdev, "\tdataOff : 0x%x\n", hdr->data_off);
+ nvkm_debug(subdev, "\tdataSize : 0x%x\n", hdr->data_size);
+ return hdr;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/nvfw/hs.c b/drivers/gpu/drm/nouveau/nvkm/nvfw/hs.c
new file mode 100644
index 000000000000..04ed77cb2eba
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/nvfw/hs.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <core/subdev.h>
+#include <nvfw/hs.h>
+
+const struct nvfw_hs_header *
+nvfw_hs_header(struct nvkm_subdev *subdev, const void *data)
+{
+ const struct nvfw_hs_header *hdr = data;
+ nvkm_debug(subdev, "hsHeader:\n");
+ nvkm_debug(subdev, "\tsigDbgOffset : 0x%x\n", hdr->sig_dbg_offset);
+ nvkm_debug(subdev, "\tsigDbgSize : 0x%x\n", hdr->sig_dbg_size);
+ nvkm_debug(subdev, "\tsigProdOffset : 0x%x\n", hdr->sig_prod_offset);
+ nvkm_debug(subdev, "\tsigProdSize : 0x%x\n", hdr->sig_prod_size);
+ nvkm_debug(subdev, "\tpatchLoc : 0x%x\n", hdr->patch_loc);
+ nvkm_debug(subdev, "\tpatchSig : 0x%x\n", hdr->patch_sig);
+ nvkm_debug(subdev, "\thdrOffset : 0x%x\n", hdr->hdr_offset);
+ nvkm_debug(subdev, "\thdrSize : 0x%x\n", hdr->hdr_size);
+ return hdr;
+}
+
+const struct nvfw_hs_load_header *
+nvfw_hs_load_header(struct nvkm_subdev *subdev, const void *data)
+{
+ const struct nvfw_hs_load_header *hdr = data;
+ int i;
+
+ nvkm_debug(subdev, "hsLoadHeader:\n");
+ nvkm_debug(subdev, "\tnonSecCodeOff : 0x%x\n",
+ hdr->non_sec_code_off);
+ nvkm_debug(subdev, "\tnonSecCodeSize : 0x%x\n",
+ hdr->non_sec_code_size);
+ nvkm_debug(subdev, "\tdataDmaBase : 0x%x\n", hdr->data_dma_base);
+ nvkm_debug(subdev, "\tdataSize : 0x%x\n", hdr->data_size);
+ nvkm_debug(subdev, "\tnumApps : 0x%x\n", hdr->num_apps);
+ for (i = 0; i < hdr->num_apps; i++) {
+ nvkm_debug(subdev,
+ "\tApp[%d] : offset 0x%x size 0x%x\n", i,
+ hdr->apps[(i * 2) + 0], hdr->apps[(i * 2) + 1]);
+ }
+
+ return hdr;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/nvfw/ls.c b/drivers/gpu/drm/nouveau/nvkm/nvfw/ls.c
new file mode 100644
index 000000000000..b847f281ce97
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/nvfw/ls.c
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <core/subdev.h>
+#include <nvfw/ls.h>
+
+static void
+nvfw_ls_desc_head(struct nvkm_subdev *subdev,
+ const struct nvfw_ls_desc_head *hdr)
+{
+ char *date;
+
+ nvkm_debug(subdev, "lsUcodeImgDesc:\n");
+ nvkm_debug(subdev, "\tdescriptorSize : %d\n",
+ hdr->descriptor_size);
+ nvkm_debug(subdev, "\timageSize : %d\n", hdr->image_size);
+ nvkm_debug(subdev, "\ttoolsVersion : 0x%x\n",
+ hdr->tools_version);
+ nvkm_debug(subdev, "\tappVersion : 0x%x\n", hdr->app_version);
+
+ date = kstrndup(hdr->date, sizeof(hdr->date), GFP_KERNEL);
+ nvkm_debug(subdev, "\tdate : %s\n", date);
+ kfree(date);
+
+ nvkm_debug(subdev, "\tbootloaderStartOffset: 0x%x\n",
+ hdr->bootloader_start_offset);
+ nvkm_debug(subdev, "\tbootloaderSize : 0x%x\n",
+ hdr->bootloader_size);
+ nvkm_debug(subdev, "\tbootloaderImemOffset : 0x%x\n",
+ hdr->bootloader_imem_offset);
+ nvkm_debug(subdev, "\tbootloaderEntryPoint : 0x%x\n",
+ hdr->bootloader_entry_point);
+
+ nvkm_debug(subdev, "\tappStartOffset : 0x%x\n",
+ hdr->app_start_offset);
+ nvkm_debug(subdev, "\tappSize : 0x%x\n", hdr->app_size);
+ nvkm_debug(subdev, "\tappImemOffset : 0x%x\n",
+ hdr->app_imem_offset);
+ nvkm_debug(subdev, "\tappImemEntry : 0x%x\n",
+ hdr->app_imem_entry);
+ nvkm_debug(subdev, "\tappDmemOffset : 0x%x\n",
+ hdr->app_dmem_offset);
+ nvkm_debug(subdev, "\tappResidentCodeOffset: 0x%x\n",
+ hdr->app_resident_code_offset);
+ nvkm_debug(subdev, "\tappResidentCodeSize : 0x%x\n",
+ hdr->app_resident_code_size);
+ nvkm_debug(subdev, "\tappResidentDataOffset: 0x%x\n",
+ hdr->app_resident_data_offset);
+ nvkm_debug(subdev, "\tappResidentDataSize : 0x%x\n",
+ hdr->app_resident_data_size);
+}
+
+const struct nvfw_ls_desc *
+nvfw_ls_desc(struct nvkm_subdev *subdev, const void *data)
+{
+ const struct nvfw_ls_desc *hdr = data;
+ int i;
+
+ nvfw_ls_desc_head(subdev, &hdr->head);
+
+ nvkm_debug(subdev, "\tnbOverlays : %d\n", hdr->nb_overlays);
+ for (i = 0; i < ARRAY_SIZE(hdr->load_ovl); i++) {
+ nvkm_debug(subdev, "\tloadOvl[%d] : 0x%x %d\n", i,
+ hdr->load_ovl[i].start, hdr->load_ovl[i].size);
+ }
+ nvkm_debug(subdev, "\tcompressed : %d\n", hdr->compressed);
+
+ return hdr;
+}
+
+const struct nvfw_ls_desc_v1 *
+nvfw_ls_desc_v1(struct nvkm_subdev *subdev, const void *data)
+{
+ const struct nvfw_ls_desc_v1 *hdr = data;
+ int i;
+
+ nvfw_ls_desc_head(subdev, &hdr->head);
+
+ nvkm_debug(subdev, "\tnbImemOverlays : %d\n",
+ hdr->nb_imem_overlays);
+ nvkm_debug(subdev, "\tnbDmemOverlays : %d\n",
+ hdr->nb_imem_overlays);
+ for (i = 0; i < ARRAY_SIZE(hdr->load_ovl); i++) {
+ nvkm_debug(subdev, "\tloadOvl[%2d] : 0x%x %d\n", i,
+ hdr->load_ovl[i].start, hdr->load_ovl[i].size);
+ }
+ nvkm_debug(subdev, "\tcompressed : %d\n", hdr->compressed);
+
+ return hdr;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
index 4e136f3d7c28..fb4fff1222af 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: MIT
+include $(src)/nvkm/subdev/acr/Kbuild
include $(src)/nvkm/subdev/bar/Kbuild
include $(src)/nvkm/subdev/bios/Kbuild
include $(src)/nvkm/subdev/bus/Kbuild
@@ -19,7 +20,6 @@ include $(src)/nvkm/subdev/mmu/Kbuild
include $(src)/nvkm/subdev/mxm/Kbuild
include $(src)/nvkm/subdev/pci/Kbuild
include $(src)/nvkm/subdev/pmu/Kbuild
-include $(src)/nvkm/subdev/secboot/Kbuild
include $(src)/nvkm/subdev/therm/Kbuild
include $(src)/nvkm/subdev/timer/Kbuild
include $(src)/nvkm/subdev/top/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/Kbuild
new file mode 100644
index 000000000000..5b9f64a8957f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/Kbuild
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: MIT
+nvkm-y += nvkm/subdev/acr/base.o
+nvkm-y += nvkm/subdev/acr/hsfw.o
+nvkm-y += nvkm/subdev/acr/lsfw.o
+nvkm-y += nvkm/subdev/acr/gm200.o
+nvkm-y += nvkm/subdev/acr/gm20b.o
+nvkm-y += nvkm/subdev/acr/gp102.o
+nvkm-y += nvkm/subdev/acr/gp108.o
+nvkm-y += nvkm/subdev/acr/gp10b.o
+nvkm-y += nvkm/subdev/acr/tu102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c
new file mode 100644
index 000000000000..8eb2a930a9b5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c
@@ -0,0 +1,411 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <core/firmware.h>
+#include <core/memory.h>
+#include <subdev/mmu.h>
+
+static struct nvkm_acr_hsf *
+nvkm_acr_hsf_find(struct nvkm_acr *acr, const char *name)
+{
+ struct nvkm_acr_hsf *hsf;
+ list_for_each_entry(hsf, &acr->hsf, head) {
+ if (!strcmp(hsf->name, name))
+ return hsf;
+ }
+ return NULL;
+}
+
+int
+nvkm_acr_hsf_boot(struct nvkm_acr *acr, const char *name)
+{
+ struct nvkm_subdev *subdev = &acr->subdev;
+ struct nvkm_acr_hsf *hsf;
+ int ret;
+
+ hsf = nvkm_acr_hsf_find(acr, name);
+ if (!hsf)
+ return -EINVAL;
+
+ nvkm_debug(subdev, "executing %s binary\n", hsf->name);
+ ret = nvkm_falcon_get(hsf->falcon, subdev);
+ if (ret)
+ return ret;
+
+ ret = hsf->func->boot(acr, hsf);
+ nvkm_falcon_put(hsf->falcon, subdev);
+ if (ret) {
+ nvkm_error(subdev, "%s binary failed\n", hsf->name);
+ return ret;
+ }
+
+ nvkm_debug(subdev, "%s binary completed successfully\n", hsf->name);
+ return 0;
+}
+
+static void
+nvkm_acr_unload(struct nvkm_acr *acr)
+{
+ if (acr->done) {
+ nvkm_acr_hsf_boot(acr, "unload");
+ acr->done = false;
+ }
+}
+
+static int
+nvkm_acr_load(struct nvkm_acr *acr)
+{
+ struct nvkm_subdev *subdev = &acr->subdev;
+ struct nvkm_acr_lsf *lsf;
+ u64 start, limit;
+ int ret;
+
+ if (list_empty(&acr->lsf)) {
+ nvkm_debug(subdev, "No LSF(s) present.\n");
+ return 0;
+ }
+
+ ret = acr->func->init(acr);
+ if (ret)
+ return ret;
+
+ acr->func->wpr_check(acr, &start, &limit);
+
+ if (start != acr->wpr_start || limit != acr->wpr_end) {
+ nvkm_error(subdev, "WPR not configured as expected: "
+ "%016llx-%016llx vs %016llx-%016llx\n",
+ acr->wpr_start, acr->wpr_end, start, limit);
+ return -EIO;
+ }
+
+ acr->done = true;
+
+ list_for_each_entry(lsf, &acr->lsf, head) {
+ if (lsf->func->boot) {
+ ret = lsf->func->boot(lsf->falcon);
+ if (ret)
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int
+nvkm_acr_reload(struct nvkm_acr *acr)
+{
+ nvkm_acr_unload(acr);
+ return nvkm_acr_load(acr);
+}
+
+static struct nvkm_acr_lsf *
+nvkm_acr_falcon(struct nvkm_device *device)
+{
+ struct nvkm_acr *acr = device->acr;
+ struct nvkm_acr_lsf *lsf;
+
+ if (acr) {
+ list_for_each_entry(lsf, &acr->lsf, head) {
+ if (lsf->func->bootstrap_falcon)
+ return lsf;
+ }
+ }
+
+ return NULL;
+}
+
+int
+nvkm_acr_bootstrap_falcons(struct nvkm_device *device, unsigned long mask)
+{
+ struct nvkm_acr_lsf *acrflcn = nvkm_acr_falcon(device);
+ struct nvkm_acr *acr = device->acr;
+ unsigned long id;
+
+ if (!acrflcn) {
+ int ret = nvkm_acr_reload(acr);
+ if (ret)
+ return ret;
+
+ return acr->done ? 0 : -EINVAL;
+ }
+
+ if (acrflcn->func->bootstrap_multiple_falcons) {
+ return acrflcn->func->
+ bootstrap_multiple_falcons(acrflcn->falcon, mask);
+ }
+
+ for_each_set_bit(id, &mask, NVKM_ACR_LSF_NUM) {
+ int ret = acrflcn->func->bootstrap_falcon(acrflcn->falcon, id);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+bool
+nvkm_acr_managed_falcon(struct nvkm_device *device, enum nvkm_acr_lsf_id id)
+{
+ struct nvkm_acr *acr = device->acr;
+ struct nvkm_acr_lsf *lsf;
+
+ if (acr) {
+ list_for_each_entry(lsf, &acr->lsf, head) {
+ if (lsf->id == id)
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static int
+nvkm_acr_fini(struct nvkm_subdev *subdev, bool suspend)
+{
+ nvkm_acr_unload(nvkm_acr(subdev));
+ return 0;
+}
+
+static int
+nvkm_acr_init(struct nvkm_subdev *subdev)
+{
+ if (!nvkm_acr_falcon(subdev->device))
+ return 0;
+
+ return nvkm_acr_load(nvkm_acr(subdev));
+}
+
+static void
+nvkm_acr_cleanup(struct nvkm_acr *acr)
+{
+ nvkm_acr_lsfw_del_all(acr);
+ nvkm_acr_hsfw_del_all(acr);
+ nvkm_firmware_put(acr->wpr_fw);
+ acr->wpr_fw = NULL;
+}
+
+static int
+nvkm_acr_oneinit(struct nvkm_subdev *subdev)
+{
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_acr *acr = nvkm_acr(subdev);
+ struct nvkm_acr_hsfw *hsfw;
+ struct nvkm_acr_lsfw *lsfw, *lsft;
+ struct nvkm_acr_lsf *lsf;
+ u32 wpr_size = 0;
+ int ret, i;
+
+ if (list_empty(&acr->hsfw)) {
+ nvkm_debug(subdev, "No HSFW(s)\n");
+ nvkm_acr_cleanup(acr);
+ return 0;
+ }
+
+ /* Determine layout/size of WPR image up-front, as we need to know
+ * it to allocate memory before we begin constructing it.
+ */
+ list_for_each_entry_safe(lsfw, lsft, &acr->lsfw, head) {
+ /* Cull unknown falcons that are present in WPR image. */
+ if (acr->wpr_fw) {
+ if (!lsfw->func) {
+ nvkm_acr_lsfw_del(lsfw);
+ continue;
+ }
+
+ wpr_size = acr->wpr_fw->size;
+ }
+
+ /* Ensure we've fetched falcon configuration. */
+ ret = nvkm_falcon_get(lsfw->falcon, subdev);
+ if (ret)
+ return ret;
+
+ nvkm_falcon_put(lsfw->falcon, subdev);
+
+ if (!(lsf = kmalloc(sizeof(*lsf), GFP_KERNEL)))
+ return -ENOMEM;
+ lsf->func = lsfw->func;
+ lsf->falcon = lsfw->falcon;
+ lsf->id = lsfw->id;
+ list_add_tail(&lsf->head, &acr->lsf);
+ }
+
+ if (!acr->wpr_fw || acr->wpr_comp)
+ wpr_size = acr->func->wpr_layout(acr);
+
+ /* Allocate/Locate WPR + fill ucode blob pointer.
+ *
+ * dGPU: allocate WPR + shadow blob
+ * Tegra: locate WPR with regs, ensure size is sufficient,
+ * allocate ucode blob.
+ */
+ ret = acr->func->wpr_alloc(acr, wpr_size);
+ if (ret)
+ return ret;
+
+ nvkm_debug(subdev, "WPR region is from 0x%llx-0x%llx (shadow 0x%llx)\n",
+ acr->wpr_start, acr->wpr_end, acr->shadow_start);
+
+ /* Write WPR to ucode blob. */
+ nvkm_kmap(acr->wpr);
+ if (acr->wpr_fw && !acr->wpr_comp)
+ nvkm_wobj(acr->wpr, 0, acr->wpr_fw->data, acr->wpr_fw->size);
+
+ if (!acr->wpr_fw || acr->wpr_comp)
+ acr->func->wpr_build(acr, nvkm_acr_falcon(device));
+ acr->func->wpr_patch(acr, (s64)acr->wpr_start - acr->wpr_prev);
+
+ if (acr->wpr_fw && acr->wpr_comp) {
+ nvkm_kmap(acr->wpr);
+ for (i = 0; i < acr->wpr_fw->size; i += 4) {
+ u32 us = nvkm_ro32(acr->wpr, i);
+ u32 fw = ((u32 *)acr->wpr_fw->data)[i/4];
+ if (fw != us) {
+ nvkm_warn(subdev, "%08x: %08x %08x\n",
+ i, us, fw);
+ }
+ }
+ return -EINVAL;
+ }
+ nvkm_done(acr->wpr);
+
+ /* Allocate instance block for ACR-related stuff. */
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0, true,
+ &acr->inst);
+ if (ret)
+ return ret;
+
+ ret = nvkm_vmm_new(device, 0, 0, NULL, 0, NULL, "acr", &acr->vmm);
+ if (ret)
+ return ret;
+
+ acr->vmm->debug = acr->subdev.debug;
+
+ ret = nvkm_vmm_join(acr->vmm, acr->inst);
+ if (ret)
+ return ret;
+
+ /* Load HS firmware blobs into ACR VMM. */
+ list_for_each_entry(hsfw, &acr->hsfw, head) {
+ nvkm_debug(subdev, "loading %s fw\n", hsfw->name);
+ ret = hsfw->func->load(acr, hsfw);
+ if (ret)
+ return ret;
+ }
+
+ /* Kill temporary data. */
+ nvkm_acr_cleanup(acr);
+ return 0;
+}
+
+static void *
+nvkm_acr_dtor(struct nvkm_subdev *subdev)
+{
+ struct nvkm_acr *acr = nvkm_acr(subdev);
+ struct nvkm_acr_hsf *hsf, *hst;
+ struct nvkm_acr_lsf *lsf, *lst;
+
+ list_for_each_entry_safe(hsf, hst, &acr->hsf, head) {
+ nvkm_vmm_put(acr->vmm, &hsf->vma);
+ nvkm_memory_unref(&hsf->ucode);
+ kfree(hsf->imem);
+ list_del(&hsf->head);
+ kfree(hsf);
+ }
+
+ nvkm_vmm_part(acr->vmm, acr->inst);
+ nvkm_vmm_unref(&acr->vmm);
+ nvkm_memory_unref(&acr->inst);
+
+ nvkm_memory_unref(&acr->wpr);
+
+ list_for_each_entry_safe(lsf, lst, &acr->lsf, head) {
+ list_del(&lsf->head);
+ kfree(lsf);
+ }
+
+ nvkm_acr_cleanup(acr);
+ return acr;
+}
+
+static const struct nvkm_subdev_func
+nvkm_acr = {
+ .dtor = nvkm_acr_dtor,
+ .oneinit = nvkm_acr_oneinit,
+ .init = nvkm_acr_init,
+ .fini = nvkm_acr_fini,
+};
+
+static int
+nvkm_acr_ctor_wpr(struct nvkm_acr *acr, int ver)
+{
+ struct nvkm_subdev *subdev = &acr->subdev;
+ struct nvkm_device *device = subdev->device;
+ int ret;
+
+ ret = nvkm_firmware_get(subdev, "acr/wpr", ver, &acr->wpr_fw);
+ if (ret < 0)
+ return ret;
+
+ /* Pre-add LSFs in the order they appear in the FW WPR image so that
+ * we're able to do a binary comparison with our own generator.
+ */
+ ret = acr->func->wpr_parse(acr);
+ if (ret)
+ return ret;
+
+ acr->wpr_comp = nvkm_boolopt(device->cfgopt, "NvAcrWprCompare", false);
+ acr->wpr_prev = nvkm_longopt(device->cfgopt, "NvAcrWprPrevAddr", 0);
+ return 0;
+}
+
+int
+nvkm_acr_new_(const struct nvkm_acr_fwif *fwif, struct nvkm_device *device,
+ int index, struct nvkm_acr **pacr)
+{
+ struct nvkm_acr *acr;
+ long wprfw;
+
+ if (!(acr = *pacr = kzalloc(sizeof(*acr), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_subdev_ctor(&nvkm_acr, device, index, &acr->subdev);
+ INIT_LIST_HEAD(&acr->hsfw);
+ INIT_LIST_HEAD(&acr->lsfw);
+ INIT_LIST_HEAD(&acr->hsf);
+ INIT_LIST_HEAD(&acr->lsf);
+
+ fwif = nvkm_firmware_load(&acr->subdev, fwif, "Acr", acr);
+ if (IS_ERR(fwif))
+ return PTR_ERR(fwif);
+
+ acr->func = fwif->func;
+
+ wprfw = nvkm_longopt(device->cfgopt, "NvAcrWpr", -1);
+ if (wprfw >= 0) {
+ int ret = nvkm_acr_ctor_wpr(acr, wprfw);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c
new file mode 100644
index 000000000000..9a6394085cf0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c
@@ -0,0 +1,470 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <core/falcon.h>
+#include <core/firmware.h>
+#include <core/memory.h>
+#include <subdev/mc.h>
+#include <subdev/mmu.h>
+#include <subdev/pmu.h>
+#include <subdev/timer.h>
+
+#include <nvfw/acr.h>
+#include <nvfw/flcn.h>
+
+int
+gm200_acr_init(struct nvkm_acr *acr)
+{
+ return nvkm_acr_hsf_boot(acr, "load");
+}
+
+void
+gm200_acr_wpr_check(struct nvkm_acr *acr, u64 *start, u64 *limit)
+{
+ struct nvkm_device *device = acr->subdev.device;
+
+ nvkm_wr32(device, 0x100cd4, 2);
+ *start = (u64)(nvkm_rd32(device, 0x100cd4) & 0xffffff00) << 8;
+ nvkm_wr32(device, 0x100cd4, 3);
+ *limit = (u64)(nvkm_rd32(device, 0x100cd4) & 0xffffff00) << 8;
+ *limit = *limit + 0x20000;
+}
+
+void
+gm200_acr_wpr_patch(struct nvkm_acr *acr, s64 adjust)
+{
+ struct nvkm_subdev *subdev = &acr->subdev;
+ struct wpr_header hdr;
+ struct lsb_header lsb;
+ struct nvkm_acr_lsf *lsfw;
+ u32 offset = 0;
+
+ do {
+ nvkm_robj(acr->wpr, offset, &hdr, sizeof(hdr));
+ wpr_header_dump(subdev, &hdr);
+
+ list_for_each_entry(lsfw, &acr->lsfw, head) {
+ if (lsfw->id != hdr.falcon_id)
+ continue;
+
+ nvkm_robj(acr->wpr, hdr.lsb_offset, &lsb, sizeof(lsb));
+ lsb_header_dump(subdev, &lsb);
+
+ lsfw->func->bld_patch(acr, lsb.tail.bl_data_off, adjust);
+ break;
+ }
+ offset += sizeof(hdr);
+ } while (hdr.falcon_id != WPR_HEADER_V0_FALCON_ID_INVALID);
+}
+
+void
+gm200_acr_wpr_build_lsb_tail(struct nvkm_acr_lsfw *lsfw,
+ struct lsb_header_tail *hdr)
+{
+ hdr->ucode_off = lsfw->offset.img;
+ hdr->ucode_size = lsfw->ucode_size;
+ hdr->data_size = lsfw->data_size;
+ hdr->bl_code_size = lsfw->bootloader_size;
+ hdr->bl_imem_off = lsfw->bootloader_imem_offset;
+ hdr->bl_data_off = lsfw->offset.bld;
+ hdr->bl_data_size = lsfw->bl_data_size;
+ hdr->app_code_off = lsfw->app_start_offset +
+ lsfw->app_resident_code_offset;
+ hdr->app_code_size = lsfw->app_resident_code_size;
+ hdr->app_data_off = lsfw->app_start_offset +
+ lsfw->app_resident_data_offset;
+ hdr->app_data_size = lsfw->app_resident_data_size;
+ hdr->flags = lsfw->func->flags;
+}
+
+static int
+gm200_acr_wpr_build_lsb(struct nvkm_acr *acr, struct nvkm_acr_lsfw *lsfw)
+{
+ struct lsb_header hdr;
+
+ if (WARN_ON(lsfw->sig->size != sizeof(hdr.signature)))
+ return -EINVAL;
+
+ memcpy(&hdr.signature, lsfw->sig->data, lsfw->sig->size);
+ gm200_acr_wpr_build_lsb_tail(lsfw, &hdr.tail);
+
+ nvkm_wobj(acr->wpr, lsfw->offset.lsb, &hdr, sizeof(hdr));
+ return 0;
+}
+
+int
+gm200_acr_wpr_build(struct nvkm_acr *acr, struct nvkm_acr_lsf *rtos)
+{
+ struct nvkm_acr_lsfw *lsfw;
+ u32 offset = 0;
+ int ret;
+
+ /* Fill per-LSF structures. */
+ list_for_each_entry(lsfw, &acr->lsfw, head) {
+ struct wpr_header hdr = {
+ .falcon_id = lsfw->id,
+ .lsb_offset = lsfw->offset.lsb,
+ .bootstrap_owner = NVKM_ACR_LSF_PMU,
+ .lazy_bootstrap = rtos && lsfw->id != rtos->id,
+ .status = WPR_HEADER_V0_STATUS_COPY,
+ };
+
+ /* Write WPR header. */
+ nvkm_wobj(acr->wpr, offset, &hdr, sizeof(hdr));
+ offset += sizeof(hdr);
+
+ /* Write LSB header. */
+ ret = gm200_acr_wpr_build_lsb(acr, lsfw);
+ if (ret)
+ return ret;
+
+ /* Write ucode image. */
+ nvkm_wobj(acr->wpr, lsfw->offset.img,
+ lsfw->img.data,
+ lsfw->img.size);
+
+ /* Write bootloader data. */
+ lsfw->func->bld_write(acr, lsfw->offset.bld, lsfw);
+ }
+
+ /* Finalise WPR. */
+ nvkm_wo32(acr->wpr, offset, WPR_HEADER_V0_FALCON_ID_INVALID);
+ return 0;
+}
+
+static int
+gm200_acr_wpr_alloc(struct nvkm_acr *acr, u32 wpr_size)
+{
+ int ret = nvkm_memory_new(acr->subdev.device, NVKM_MEM_TARGET_INST,
+ ALIGN(wpr_size, 0x40000), 0x40000, true,
+ &acr->wpr);
+ if (ret)
+ return ret;
+
+ acr->wpr_start = nvkm_memory_addr(acr->wpr);
+ acr->wpr_end = acr->wpr_start + nvkm_memory_size(acr->wpr);
+ return 0;
+}
+
+u32
+gm200_acr_wpr_layout(struct nvkm_acr *acr)
+{
+ struct nvkm_acr_lsfw *lsfw;
+ u32 wpr = 0;
+
+ wpr += 11 /* MAX_LSF */ * sizeof(struct wpr_header);
+
+ list_for_each_entry(lsfw, &acr->lsfw, head) {
+ wpr = ALIGN(wpr, 256);
+ lsfw->offset.lsb = wpr;
+ wpr += sizeof(struct lsb_header);
+
+ wpr = ALIGN(wpr, 4096);
+ lsfw->offset.img = wpr;
+ wpr += lsfw->img.size;
+
+ wpr = ALIGN(wpr, 256);
+ lsfw->offset.bld = wpr;
+ lsfw->bl_data_size = ALIGN(lsfw->func->bld_size, 256);
+ wpr += lsfw->bl_data_size;
+ }
+
+ return wpr;
+}
+
+int
+gm200_acr_wpr_parse(struct nvkm_acr *acr)
+{
+ const struct wpr_header *hdr = (void *)acr->wpr_fw->data;
+
+ while (hdr->falcon_id != WPR_HEADER_V0_FALCON_ID_INVALID) {
+ wpr_header_dump(&acr->subdev, hdr);
+ if (!nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id))
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void
+gm200_acr_hsfw_bld(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf)
+{
+ struct flcn_bl_dmem_desc_v1 hsdesc = {
+ .ctx_dma = FALCON_DMAIDX_VIRT,
+ .code_dma_base = hsf->vma->addr,
+ .non_sec_code_off = hsf->non_sec_addr,
+ .non_sec_code_size = hsf->non_sec_size,
+ .sec_code_off = hsf->sec_addr,
+ .sec_code_size = hsf->sec_size,
+ .code_entry_point = 0,
+ .data_dma_base = hsf->vma->addr + hsf->data_addr,
+ .data_size = hsf->data_size,
+ };
+
+ flcn_bl_dmem_desc_v1_dump(&acr->subdev, &hsdesc);
+
+ nvkm_falcon_load_dmem(hsf->falcon, &hsdesc, 0, sizeof(hsdesc), 0);
+}
+
+int
+gm200_acr_hsfw_boot(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf,
+ u32 intr_clear, u32 mbox0_ok)
+{
+ struct nvkm_subdev *subdev = &acr->subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_falcon *falcon = hsf->falcon;
+ u32 mbox0, mbox1;
+ int ret;
+
+ /* Reset falcon. */
+ nvkm_falcon_reset(falcon);
+ nvkm_falcon_bind_context(falcon, acr->inst);
+
+ /* Load bootloader into IMEM. */
+ nvkm_falcon_load_imem(falcon, hsf->imem,
+ falcon->code.limit - hsf->imem_size,
+ hsf->imem_size,
+ hsf->imem_tag,
+ 0, false);
+
+ /* Load bootloader data into DMEM. */
+ hsf->func->bld(acr, hsf);
+
+ /* Boot the falcon. */
+ nvkm_mc_intr_mask(device, falcon->owner->index, false);
+
+ nvkm_falcon_wr32(falcon, 0x040, 0xdeada5a5);
+ nvkm_falcon_set_start_addr(falcon, hsf->imem_tag << 8);
+ nvkm_falcon_start(falcon);
+ ret = nvkm_falcon_wait_for_halt(falcon, 100);
+ if (ret)
+ return ret;
+
+ /* Check for successful completion. */
+ mbox0 = nvkm_falcon_rd32(falcon, 0x040);
+ mbox1 = nvkm_falcon_rd32(falcon, 0x044);
+ nvkm_debug(subdev, "mailbox %08x %08x\n", mbox0, mbox1);
+ if (mbox0 && mbox0 != mbox0_ok)
+ return -EIO;
+
+ nvkm_falcon_clear_interrupt(falcon, intr_clear);
+ nvkm_mc_intr_mask(device, falcon->owner->index, true);
+ return ret;
+}
+
+int
+gm200_acr_hsfw_load(struct nvkm_acr *acr, struct nvkm_acr_hsfw *hsfw,
+ struct nvkm_falcon *falcon)
+{
+ struct nvkm_subdev *subdev = &acr->subdev;
+ struct nvkm_acr_hsf *hsf;
+ int ret;
+
+ /* Patch the appropriate signature (production/debug) into the FW
+ * image, as determined by the mode the falcon is in.
+ */
+ ret = nvkm_falcon_get(falcon, subdev);
+ if (ret)
+ return ret;
+
+ if (hsfw->sig.patch_loc) {
+ if (!falcon->debug) {
+ nvkm_debug(subdev, "patching production signature\n");
+ memcpy(hsfw->image + hsfw->sig.patch_loc,
+ hsfw->sig.prod.data,
+ hsfw->sig.prod.size);
+ } else {
+ nvkm_debug(subdev, "patching debug signature\n");
+ memcpy(hsfw->image + hsfw->sig.patch_loc,
+ hsfw->sig.dbg.data,
+ hsfw->sig.dbg.size);
+ }
+ }
+
+ nvkm_falcon_put(falcon, subdev);
+
+ if (!(hsf = kzalloc(sizeof(*hsf), GFP_KERNEL)))
+ return -ENOMEM;
+ hsf->func = hsfw->func;
+ hsf->name = hsfw->name;
+ list_add_tail(&hsf->head, &acr->hsf);
+
+ hsf->imem_size = hsfw->imem_size;
+ hsf->imem_tag = hsfw->imem_tag;
+ hsf->imem = kmemdup(hsfw->imem, hsfw->imem_size, GFP_KERNEL);
+ if (!hsf->imem)
+ return -ENOMEM;
+
+ hsf->non_sec_addr = hsfw->non_sec_addr;
+ hsf->non_sec_size = hsfw->non_sec_size;
+ hsf->sec_addr = hsfw->sec_addr;
+ hsf->sec_size = hsfw->sec_size;
+ hsf->data_addr = hsfw->data_addr;
+ hsf->data_size = hsfw->data_size;
+
+ /* Make the FW image accessible to the HS bootloader. */
+ ret = nvkm_memory_new(subdev->device, NVKM_MEM_TARGET_INST,
+ hsfw->image_size, 0x1000, false, &hsf->ucode);
+ if (ret)
+ return ret;
+
+ nvkm_kmap(hsf->ucode);
+ nvkm_wobj(hsf->ucode, 0, hsfw->image, hsfw->image_size);
+ nvkm_done(hsf->ucode);
+
+ ret = nvkm_vmm_get(acr->vmm, 12, nvkm_memory_size(hsf->ucode),
+ &hsf->vma);
+ if (ret)
+ return ret;
+
+ ret = nvkm_memory_map(hsf->ucode, 0, acr->vmm, hsf->vma, NULL, 0);
+ if (ret)
+ return ret;
+
+ hsf->falcon = falcon;
+ return 0;
+}
+
+int
+gm200_acr_unload_boot(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf)
+{
+ return gm200_acr_hsfw_boot(acr, hsf, 0, 0x1d);
+}
+
+int
+gm200_acr_unload_load(struct nvkm_acr *acr, struct nvkm_acr_hsfw *hsfw)
+{
+ return gm200_acr_hsfw_load(acr, hsfw, &acr->subdev.device->pmu->falcon);
+}
+
+const struct nvkm_acr_hsf_func
+gm200_acr_unload_0 = {
+ .load = gm200_acr_unload_load,
+ .boot = gm200_acr_unload_boot,
+ .bld = gm200_acr_hsfw_bld,
+};
+
+MODULE_FIRMWARE("nvidia/gm200/acr/ucode_unload.bin");
+MODULE_FIRMWARE("nvidia/gm204/acr/ucode_unload.bin");
+MODULE_FIRMWARE("nvidia/gm206/acr/ucode_unload.bin");
+MODULE_FIRMWARE("nvidia/gp100/acr/ucode_unload.bin");
+
+static const struct nvkm_acr_hsf_fwif
+gm200_acr_unload_fwif[] = {
+ { 0, nvkm_acr_hsfw_load, &gm200_acr_unload_0 },
+ {}
+};
+
+int
+gm200_acr_load_boot(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf)
+{
+ return gm200_acr_hsfw_boot(acr, hsf, 0x10, 0);
+}
+
+static int
+gm200_acr_load_load(struct nvkm_acr *acr, struct nvkm_acr_hsfw *hsfw)
+{
+ struct flcn_acr_desc *desc = (void *)&hsfw->image[hsfw->data_addr];
+
+ desc->wpr_region_id = 1;
+ desc->regions.no_regions = 2;
+ desc->regions.region_props[0].start_addr = acr->wpr_start >> 8;
+ desc->regions.region_props[0].end_addr = acr->wpr_end >> 8;
+ desc->regions.region_props[0].region_id = 1;
+ desc->regions.region_props[0].read_mask = 0xf;
+ desc->regions.region_props[0].write_mask = 0xc;
+ desc->regions.region_props[0].client_mask = 0x2;
+ flcn_acr_desc_dump(&acr->subdev, desc);
+
+ return gm200_acr_hsfw_load(acr, hsfw, &acr->subdev.device->pmu->falcon);
+}
+
+static const struct nvkm_acr_hsf_func
+gm200_acr_load_0 = {
+ .load = gm200_acr_load_load,
+ .boot = gm200_acr_load_boot,
+ .bld = gm200_acr_hsfw_bld,
+};
+
+MODULE_FIRMWARE("nvidia/gm200/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gm200/acr/ucode_load.bin");
+
+MODULE_FIRMWARE("nvidia/gm204/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gm204/acr/ucode_load.bin");
+
+MODULE_FIRMWARE("nvidia/gm206/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gm206/acr/ucode_load.bin");
+
+MODULE_FIRMWARE("nvidia/gp100/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gp100/acr/ucode_load.bin");
+
+static const struct nvkm_acr_hsf_fwif
+gm200_acr_load_fwif[] = {
+ { 0, nvkm_acr_hsfw_load, &gm200_acr_load_0 },
+ {}
+};
+
+static const struct nvkm_acr_func
+gm200_acr = {
+ .load = gm200_acr_load_fwif,
+ .unload = gm200_acr_unload_fwif,
+ .wpr_parse = gm200_acr_wpr_parse,
+ .wpr_layout = gm200_acr_wpr_layout,
+ .wpr_alloc = gm200_acr_wpr_alloc,
+ .wpr_build = gm200_acr_wpr_build,
+ .wpr_patch = gm200_acr_wpr_patch,
+ .wpr_check = gm200_acr_wpr_check,
+ .init = gm200_acr_init,
+};
+
+static int
+gm200_acr_load(struct nvkm_acr *acr, int ver, const struct nvkm_acr_fwif *fwif)
+{
+ struct nvkm_subdev *subdev = &acr->subdev;
+ const struct nvkm_acr_hsf_fwif *hsfwif;
+
+ hsfwif = nvkm_firmware_load(subdev, fwif->func->load, "AcrLoad",
+ acr, "acr/bl", "acr/ucode_load", "load");
+ if (IS_ERR(hsfwif))
+ return PTR_ERR(hsfwif);
+
+ hsfwif = nvkm_firmware_load(subdev, fwif->func->unload, "AcrUnload",
+ acr, "acr/bl", "acr/ucode_unload",
+ "unload");
+ if (IS_ERR(hsfwif))
+ return PTR_ERR(hsfwif);
+
+ return 0;
+}
+
+static const struct nvkm_acr_fwif
+gm200_acr_fwif[] = {
+ { 0, gm200_acr_load, &gm200_acr },
+ {}
+};
+
+int
+gm200_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr)
+{
+ return nvkm_acr_new_(gm200_acr_fwif, device, index, pacr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm20b.c
new file mode 100644
index 000000000000..034a6ede70c7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm20b.c
@@ -0,0 +1,134 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <core/firmware.h>
+#include <core/memory.h>
+#include <subdev/mmu.h>
+#include <subdev/pmu.h>
+
+#include <nvfw/acr.h>
+#include <nvfw/flcn.h>
+
+int
+gm20b_acr_wpr_alloc(struct nvkm_acr *acr, u32 wpr_size)
+{
+ struct nvkm_subdev *subdev = &acr->subdev;
+
+ acr->func->wpr_check(acr, &acr->wpr_start, &acr->wpr_end);
+
+ if ((acr->wpr_end - acr->wpr_start) < wpr_size) {
+ nvkm_error(subdev, "WPR image too big for WPR!\n");
+ return -ENOSPC;
+ }
+
+ return nvkm_memory_new(subdev->device, NVKM_MEM_TARGET_INST,
+ wpr_size, 0, true, &acr->wpr);
+}
+
+static void
+gm20b_acr_load_bld(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf)
+{
+ struct flcn_bl_dmem_desc hsdesc = {
+ .ctx_dma = FALCON_DMAIDX_VIRT,
+ .code_dma_base = hsf->vma->addr >> 8,
+ .non_sec_code_off = hsf->non_sec_addr,
+ .non_sec_code_size = hsf->non_sec_size,
+ .sec_code_off = hsf->sec_addr,
+ .sec_code_size = hsf->sec_size,
+ .code_entry_point = 0,
+ .data_dma_base = (hsf->vma->addr + hsf->data_addr) >> 8,
+ .data_size = hsf->data_size,
+ };
+
+ flcn_bl_dmem_desc_dump(&acr->subdev, &hsdesc);
+
+ nvkm_falcon_load_dmem(hsf->falcon, &hsdesc, 0, sizeof(hsdesc), 0);
+}
+
+static int
+gm20b_acr_load_load(struct nvkm_acr *acr, struct nvkm_acr_hsfw *hsfw)
+{
+ struct flcn_acr_desc *desc = (void *)&hsfw->image[hsfw->data_addr];
+
+ desc->ucode_blob_base = nvkm_memory_addr(acr->wpr);
+ desc->ucode_blob_size = nvkm_memory_size(acr->wpr);
+ flcn_acr_desc_dump(&acr->subdev, desc);
+
+ return gm200_acr_hsfw_load(acr, hsfw, &acr->subdev.device->pmu->falcon);
+}
+
+const struct nvkm_acr_hsf_func
+gm20b_acr_load_0 = {
+ .load = gm20b_acr_load_load,
+ .boot = gm200_acr_load_boot,
+ .bld = gm20b_acr_load_bld,
+};
+
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
+MODULE_FIRMWARE("nvidia/gm20b/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gm20b/acr/ucode_load.bin");
+#endif
+
+static const struct nvkm_acr_hsf_fwif
+gm20b_acr_load_fwif[] = {
+ { 0, nvkm_acr_hsfw_load, &gm20b_acr_load_0 },
+ {}
+};
+
+static const struct nvkm_acr_func
+gm20b_acr = {
+ .load = gm20b_acr_load_fwif,
+ .wpr_parse = gm200_acr_wpr_parse,
+ .wpr_layout = gm200_acr_wpr_layout,
+ .wpr_alloc = gm20b_acr_wpr_alloc,
+ .wpr_build = gm200_acr_wpr_build,
+ .wpr_patch = gm200_acr_wpr_patch,
+ .wpr_check = gm200_acr_wpr_check,
+ .init = gm200_acr_init,
+};
+
+int
+gm20b_acr_load(struct nvkm_acr *acr, int ver, const struct nvkm_acr_fwif *fwif)
+{
+ struct nvkm_subdev *subdev = &acr->subdev;
+ const struct nvkm_acr_hsf_fwif *hsfwif;
+
+ hsfwif = nvkm_firmware_load(subdev, fwif->func->load, "AcrLoad",
+ acr, "acr/bl", "acr/ucode_load", "load");
+ if (IS_ERR(hsfwif))
+ return PTR_ERR(hsfwif);
+
+ return 0;
+}
+
+static const struct nvkm_acr_fwif
+gm20b_acr_fwif[] = {
+ { 0, gm20b_acr_load, &gm20b_acr },
+ {}
+};
+
+int
+gm20b_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr)
+{
+ return nvkm_acr_new_(gm20b_acr_fwif, device, index, pacr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c
new file mode 100644
index 000000000000..49e11c46d525
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c
@@ -0,0 +1,281 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <core/firmware.h>
+#include <core/memory.h>
+#include <subdev/mmu.h>
+#include <engine/sec2.h>
+
+#include <nvfw/acr.h>
+#include <nvfw/flcn.h>
+
+void
+gp102_acr_wpr_patch(struct nvkm_acr *acr, s64 adjust)
+{
+ struct wpr_header_v1 hdr;
+ struct lsb_header_v1 lsb;
+ struct nvkm_acr_lsfw *lsfw;
+ u32 offset = 0;
+
+ do {
+ nvkm_robj(acr->wpr, offset, &hdr, sizeof(hdr));
+ wpr_header_v1_dump(&acr->subdev, &hdr);
+
+ list_for_each_entry(lsfw, &acr->lsfw, head) {
+ if (lsfw->id != hdr.falcon_id)
+ continue;
+
+ nvkm_robj(acr->wpr, hdr.lsb_offset, &lsb, sizeof(lsb));
+ lsb_header_v1_dump(&acr->subdev, &lsb);
+
+ lsfw->func->bld_patch(acr, lsb.tail.bl_data_off, adjust);
+ break;
+ }
+
+ offset += sizeof(hdr);
+ } while (hdr.falcon_id != WPR_HEADER_V1_FALCON_ID_INVALID);
+}
+
+int
+gp102_acr_wpr_build_lsb(struct nvkm_acr *acr, struct nvkm_acr_lsfw *lsfw)
+{
+ struct lsb_header_v1 hdr;
+
+ if (WARN_ON(lsfw->sig->size != sizeof(hdr.signature)))
+ return -EINVAL;
+
+ memcpy(&hdr.signature, lsfw->sig->data, lsfw->sig->size);
+ gm200_acr_wpr_build_lsb_tail(lsfw, &hdr.tail);
+
+ nvkm_wobj(acr->wpr, lsfw->offset.lsb, &hdr, sizeof(hdr));
+ return 0;
+}
+
+int
+gp102_acr_wpr_build(struct nvkm_acr *acr, struct nvkm_acr_lsf *rtos)
+{
+ struct nvkm_acr_lsfw *lsfw;
+ u32 offset = 0;
+ int ret;
+
+ /* Fill per-LSF structures. */
+ list_for_each_entry(lsfw, &acr->lsfw, head) {
+ struct lsf_signature_v1 *sig = (void *)lsfw->sig->data;
+ struct wpr_header_v1 hdr = {
+ .falcon_id = lsfw->id,
+ .lsb_offset = lsfw->offset.lsb,
+ .bootstrap_owner = NVKM_ACR_LSF_SEC2,
+ .lazy_bootstrap = rtos && lsfw->id != rtos->id,
+ .bin_version = sig->version,
+ .status = WPR_HEADER_V1_STATUS_COPY,
+ };
+
+ /* Write WPR header. */
+ nvkm_wobj(acr->wpr, offset, &hdr, sizeof(hdr));
+ offset += sizeof(hdr);
+
+ /* Write LSB header. */
+ ret = gp102_acr_wpr_build_lsb(acr, lsfw);
+ if (ret)
+ return ret;
+
+ /* Write ucode image. */
+ nvkm_wobj(acr->wpr, lsfw->offset.img,
+ lsfw->img.data,
+ lsfw->img.size);
+
+ /* Write bootloader data. */
+ lsfw->func->bld_write(acr, lsfw->offset.bld, lsfw);
+ }
+
+ /* Finalise WPR. */
+ nvkm_wo32(acr->wpr, offset, WPR_HEADER_V1_FALCON_ID_INVALID);
+ return 0;
+}
+
+int
+gp102_acr_wpr_alloc(struct nvkm_acr *acr, u32 wpr_size)
+{
+ int ret = nvkm_memory_new(acr->subdev.device, NVKM_MEM_TARGET_INST,
+ ALIGN(wpr_size, 0x40000) << 1, 0x40000, true,
+ &acr->wpr);
+ if (ret)
+ return ret;
+
+ acr->shadow_start = nvkm_memory_addr(acr->wpr);
+ acr->wpr_start = acr->shadow_start + (nvkm_memory_size(acr->wpr) >> 1);
+ acr->wpr_end = acr->wpr_start + (nvkm_memory_size(acr->wpr) >> 1);
+ return 0;
+}
+
+u32
+gp102_acr_wpr_layout(struct nvkm_acr *acr)
+{
+ struct nvkm_acr_lsfw *lsfw;
+ u32 wpr = 0;
+
+ wpr += 11 /* MAX_LSF */ * sizeof(struct wpr_header_v1);
+ wpr = ALIGN(wpr, 256);
+
+ wpr += 0x100; /* Shared sub-WPR headers. */
+
+ list_for_each_entry(lsfw, &acr->lsfw, head) {
+ wpr = ALIGN(wpr, 256);
+ lsfw->offset.lsb = wpr;
+ wpr += sizeof(struct lsb_header_v1);
+
+ wpr = ALIGN(wpr, 4096);
+ lsfw->offset.img = wpr;
+ wpr += lsfw->img.size;
+
+ wpr = ALIGN(wpr, 256);
+ lsfw->offset.bld = wpr;
+ lsfw->bl_data_size = ALIGN(lsfw->func->bld_size, 256);
+ wpr += lsfw->bl_data_size;
+ }
+
+ return wpr;
+}
+
+int
+gp102_acr_wpr_parse(struct nvkm_acr *acr)
+{
+ const struct wpr_header_v1 *hdr = (void *)acr->wpr_fw->data;
+
+ while (hdr->falcon_id != WPR_HEADER_V1_FALCON_ID_INVALID) {
+ wpr_header_v1_dump(&acr->subdev, hdr);
+ if (!nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id))
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+MODULE_FIRMWARE("nvidia/gp102/acr/unload_bl.bin");
+MODULE_FIRMWARE("nvidia/gp102/acr/ucode_unload.bin");
+
+MODULE_FIRMWARE("nvidia/gp104/acr/unload_bl.bin");
+MODULE_FIRMWARE("nvidia/gp104/acr/ucode_unload.bin");
+
+MODULE_FIRMWARE("nvidia/gp106/acr/unload_bl.bin");
+MODULE_FIRMWARE("nvidia/gp106/acr/ucode_unload.bin");
+
+MODULE_FIRMWARE("nvidia/gp107/acr/unload_bl.bin");
+MODULE_FIRMWARE("nvidia/gp107/acr/ucode_unload.bin");
+
+static const struct nvkm_acr_hsf_fwif
+gp102_acr_unload_fwif[] = {
+ { 0, nvkm_acr_hsfw_load, &gm200_acr_unload_0 },
+ {}
+};
+
+int
+gp102_acr_load_load(struct nvkm_acr *acr, struct nvkm_acr_hsfw *hsfw)
+{
+ struct flcn_acr_desc_v1 *desc = (void *)&hsfw->image[hsfw->data_addr];
+
+ desc->wpr_region_id = 1;
+ desc->regions.no_regions = 2;
+ desc->regions.region_props[0].start_addr = acr->wpr_start >> 8;
+ desc->regions.region_props[0].end_addr = acr->wpr_end >> 8;
+ desc->regions.region_props[0].region_id = 1;
+ desc->regions.region_props[0].read_mask = 0xf;
+ desc->regions.region_props[0].write_mask = 0xc;
+ desc->regions.region_props[0].client_mask = 0x2;
+ desc->regions.region_props[0].shadow_mem_start_addr =
+ acr->shadow_start >> 8;
+ flcn_acr_desc_v1_dump(&acr->subdev, desc);
+
+ return gm200_acr_hsfw_load(acr, hsfw,
+ &acr->subdev.device->sec2->falcon);
+}
+
+static const struct nvkm_acr_hsf_func
+gp102_acr_load_0 = {
+ .load = gp102_acr_load_load,
+ .boot = gm200_acr_load_boot,
+ .bld = gm200_acr_hsfw_bld,
+};
+
+MODULE_FIRMWARE("nvidia/gp102/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gp102/acr/ucode_load.bin");
+
+MODULE_FIRMWARE("nvidia/gp104/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gp104/acr/ucode_load.bin");
+
+MODULE_FIRMWARE("nvidia/gp106/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gp106/acr/ucode_load.bin");
+
+MODULE_FIRMWARE("nvidia/gp107/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gp107/acr/ucode_load.bin");
+
+static const struct nvkm_acr_hsf_fwif
+gp102_acr_load_fwif[] = {
+ { 0, nvkm_acr_hsfw_load, &gp102_acr_load_0 },
+ {}
+};
+
+static const struct nvkm_acr_func
+gp102_acr = {
+ .load = gp102_acr_load_fwif,
+ .unload = gp102_acr_unload_fwif,
+ .wpr_parse = gp102_acr_wpr_parse,
+ .wpr_layout = gp102_acr_wpr_layout,
+ .wpr_alloc = gp102_acr_wpr_alloc,
+ .wpr_build = gp102_acr_wpr_build,
+ .wpr_patch = gp102_acr_wpr_patch,
+ .wpr_check = gm200_acr_wpr_check,
+ .init = gm200_acr_init,
+};
+
+int
+gp102_acr_load(struct nvkm_acr *acr, int ver, const struct nvkm_acr_fwif *fwif)
+{
+ struct nvkm_subdev *subdev = &acr->subdev;
+ const struct nvkm_acr_hsf_fwif *hsfwif;
+
+ hsfwif = nvkm_firmware_load(subdev, fwif->func->load, "AcrLoad",
+ acr, "acr/bl", "acr/ucode_load", "load");
+ if (IS_ERR(hsfwif))
+ return PTR_ERR(hsfwif);
+
+ hsfwif = nvkm_firmware_load(subdev, fwif->func->unload, "AcrUnload",
+ acr, "acr/unload_bl", "acr/ucode_unload",
+ "unload");
+ if (IS_ERR(hsfwif))
+ return PTR_ERR(hsfwif);
+
+ return 0;
+}
+
+static const struct nvkm_acr_fwif
+gp102_acr_fwif[] = {
+ { 0, gp102_acr_load, &gp102_acr },
+ {}
+};
+
+int
+gp102_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr)
+{
+ return nvkm_acr_new_(gp102_acr_fwif, device, index, pacr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp108.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp108.c
new file mode 100644
index 000000000000..f10dc9112678
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp108.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <subdev/mmu.h>
+
+#include <nvfw/flcn.h>
+
+void
+gp108_acr_hsfw_bld(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf)
+{
+ struct flcn_bl_dmem_desc_v2 hsdesc = {
+ .ctx_dma = FALCON_DMAIDX_VIRT,
+ .code_dma_base = hsf->vma->addr,
+ .non_sec_code_off = hsf->non_sec_addr,
+ .non_sec_code_size = hsf->non_sec_size,
+ .sec_code_off = hsf->sec_addr,
+ .sec_code_size = hsf->sec_size,
+ .code_entry_point = 0,
+ .data_dma_base = hsf->vma->addr + hsf->data_addr,
+ .data_size = hsf->data_size,
+ .argc = 0,
+ .argv = 0,
+ };
+
+ flcn_bl_dmem_desc_v2_dump(&acr->subdev, &hsdesc);
+
+ nvkm_falcon_load_dmem(hsf->falcon, &hsdesc, 0, sizeof(hsdesc), 0);
+}
+
+const struct nvkm_acr_hsf_func
+gp108_acr_unload_0 = {
+ .load = gm200_acr_unload_load,
+ .boot = gm200_acr_unload_boot,
+ .bld = gp108_acr_hsfw_bld,
+};
+
+MODULE_FIRMWARE("nvidia/gp108/acr/unload_bl.bin");
+MODULE_FIRMWARE("nvidia/gp108/acr/ucode_unload.bin");
+
+MODULE_FIRMWARE("nvidia/gv100/acr/unload_bl.bin");
+MODULE_FIRMWARE("nvidia/gv100/acr/ucode_unload.bin");
+
+static const struct nvkm_acr_hsf_fwif
+gp108_acr_unload_fwif[] = {
+ { 0, nvkm_acr_hsfw_load, &gp108_acr_unload_0 },
+ {}
+};
+
+static const struct nvkm_acr_hsf_func
+gp108_acr_load_0 = {
+ .load = gp102_acr_load_load,
+ .boot = gm200_acr_load_boot,
+ .bld = gp108_acr_hsfw_bld,
+};
+
+MODULE_FIRMWARE("nvidia/gp108/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gp108/acr/ucode_load.bin");
+
+MODULE_FIRMWARE("nvidia/gv100/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gv100/acr/ucode_load.bin");
+
+static const struct nvkm_acr_hsf_fwif
+gp108_acr_load_fwif[] = {
+ { 0, nvkm_acr_hsfw_load, &gp108_acr_load_0 },
+ {}
+};
+
+static const struct nvkm_acr_func
+gp108_acr = {
+ .load = gp108_acr_load_fwif,
+ .unload = gp108_acr_unload_fwif,
+ .wpr_parse = gp102_acr_wpr_parse,
+ .wpr_layout = gp102_acr_wpr_layout,
+ .wpr_alloc = gp102_acr_wpr_alloc,
+ .wpr_build = gp102_acr_wpr_build,
+ .wpr_patch = gp102_acr_wpr_patch,
+ .wpr_check = gm200_acr_wpr_check,
+ .init = gm200_acr_init,
+};
+
+static const struct nvkm_acr_fwif
+gp108_acr_fwif[] = {
+ { 0, gp102_acr_load, &gp108_acr },
+ {}
+};
+
+int
+gp108_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr)
+{
+ return nvkm_acr_new_(gp108_acr_fwif, device, index, pacr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp10b.c
new file mode 100644
index 000000000000..39de64292a41
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp10b.c
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC)
+MODULE_FIRMWARE("nvidia/gp10b/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gp10b/acr/ucode_load.bin");
+#endif
+
+static const struct nvkm_acr_hsf_fwif
+gp10b_acr_load_fwif[] = {
+ { 0, nvkm_acr_hsfw_load, &gm20b_acr_load_0 },
+ {}
+};
+
+static const struct nvkm_acr_func
+gp10b_acr = {
+ .load = gp10b_acr_load_fwif,
+ .wpr_parse = gm200_acr_wpr_parse,
+ .wpr_layout = gm200_acr_wpr_layout,
+ .wpr_alloc = gm20b_acr_wpr_alloc,
+ .wpr_build = gm200_acr_wpr_build,
+ .wpr_patch = gm200_acr_wpr_patch,
+ .wpr_check = gm200_acr_wpr_check,
+ .init = gm200_acr_init,
+};
+
+static const struct nvkm_acr_fwif
+gp10b_acr_fwif[] = {
+ { 0, gm20b_acr_load, &gp10b_acr },
+ {}
+};
+
+int
+gp10b_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr)
+{
+ return nvkm_acr_new_(gp10b_acr_fwif, device, index, pacr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c
new file mode 100644
index 000000000000..aecce2dac558
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c
@@ -0,0 +1,180 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <core/firmware.h>
+
+#include <nvfw/fw.h>
+#include <nvfw/hs.h>
+
+static void
+nvkm_acr_hsfw_del(struct nvkm_acr_hsfw *hsfw)
+{
+ list_del(&hsfw->head);
+ kfree(hsfw->imem);
+ kfree(hsfw->image);
+ kfree(hsfw->sig.prod.data);
+ kfree(hsfw->sig.dbg.data);
+ kfree(hsfw);
+}
+
+void
+nvkm_acr_hsfw_del_all(struct nvkm_acr *acr)
+{
+ struct nvkm_acr_hsfw *hsfw, *hsft;
+ list_for_each_entry_safe(hsfw, hsft, &acr->hsfw, head) {
+ nvkm_acr_hsfw_del(hsfw);
+ }
+}
+
+static int
+nvkm_acr_hsfw_load_image(struct nvkm_acr *acr, const char *name, int ver,
+ struct nvkm_acr_hsfw *hsfw)
+{
+ struct nvkm_subdev *subdev = &acr->subdev;
+ const struct firmware *fw;
+ const struct nvfw_bin_hdr *hdr;
+ const struct nvfw_hs_header *fwhdr;
+ const struct nvfw_hs_load_header *lhdr;
+ u32 loc, sig;
+ int ret;
+
+ ret = nvkm_firmware_get(subdev, name, ver, &fw);
+ if (ret < 0)
+ return ret;
+
+ hdr = nvfw_bin_hdr(subdev, fw->data);
+ fwhdr = nvfw_hs_header(subdev, fw->data + hdr->header_offset);
+
+ /* Earlier FW releases by NVIDIA for Nouveau's use aren't in NVIDIA's
+ * standard format, and don't have the indirection seen in the 0x10de
+ * case.
+ */
+ switch (hdr->bin_magic) {
+ case 0x000010de:
+ loc = *(u32 *)(fw->data + fwhdr->patch_loc);
+ sig = *(u32 *)(fw->data + fwhdr->patch_sig);
+ break;
+ case 0x3b1d14f0:
+ loc = fwhdr->patch_loc;
+ sig = fwhdr->patch_sig;
+ break;
+ default:
+ ret = -EINVAL;
+ goto done;
+ }
+
+ lhdr = nvfw_hs_load_header(subdev, fw->data + fwhdr->hdr_offset);
+
+ if (!(hsfw->image = kmalloc(hdr->data_size, GFP_KERNEL))) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ memcpy(hsfw->image, fw->data + hdr->data_offset, hdr->data_size);
+ hsfw->image_size = hdr->data_size;
+ hsfw->non_sec_addr = lhdr->non_sec_code_off;
+ hsfw->non_sec_size = lhdr->non_sec_code_size;
+ hsfw->sec_addr = lhdr->apps[0];
+ hsfw->sec_size = lhdr->apps[lhdr->num_apps];
+ hsfw->data_addr = lhdr->data_dma_base;
+ hsfw->data_size = lhdr->data_size;
+
+ hsfw->sig.prod.size = fwhdr->sig_prod_size;
+ hsfw->sig.prod.data = kmalloc(hsfw->sig.prod.size, GFP_KERNEL);
+ if (!hsfw->sig.prod.data) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ memcpy(hsfw->sig.prod.data, fw->data + fwhdr->sig_prod_offset + sig,
+ hsfw->sig.prod.size);
+
+ hsfw->sig.dbg.size = fwhdr->sig_dbg_size;
+ hsfw->sig.dbg.data = kmalloc(hsfw->sig.dbg.size, GFP_KERNEL);
+ if (!hsfw->sig.dbg.data) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ memcpy(hsfw->sig.dbg.data, fw->data + fwhdr->sig_dbg_offset + sig,
+ hsfw->sig.dbg.size);
+
+ hsfw->sig.patch_loc = loc;
+done:
+ nvkm_firmware_put(fw);
+ return ret;
+}
+
+static int
+nvkm_acr_hsfw_load_bl(struct nvkm_acr *acr, const char *name, int ver,
+ struct nvkm_acr_hsfw *hsfw)
+{
+ struct nvkm_subdev *subdev = &acr->subdev;
+ const struct nvfw_bin_hdr *hdr;
+ const struct nvfw_bl_desc *desc;
+ const struct firmware *fw;
+ u8 *data;
+ int ret;
+
+ ret = nvkm_firmware_get(subdev, name, ver, &fw);
+ if (ret)
+ return ret;
+
+ hdr = nvfw_bin_hdr(subdev, fw->data);
+ desc = nvfw_bl_desc(subdev, fw->data + hdr->header_offset);
+ data = (void *)fw->data + hdr->data_offset;
+
+ hsfw->imem_size = desc->code_size;
+ hsfw->imem_tag = desc->start_tag;
+ hsfw->imem = kmalloc(desc->code_size, GFP_KERNEL);
+ memcpy(hsfw->imem, data + desc->code_off, desc->code_size);
+
+ nvkm_firmware_put(fw);
+ return 0;
+}
+
+int
+nvkm_acr_hsfw_load(struct nvkm_acr *acr, const char *bl, const char *fw,
+ const char *name, int version,
+ const struct nvkm_acr_hsf_fwif *fwif)
+{
+ struct nvkm_acr_hsfw *hsfw;
+ int ret;
+
+ if (!(hsfw = kzalloc(sizeof(*hsfw), GFP_KERNEL)))
+ return -ENOMEM;
+
+ hsfw->func = fwif->func;
+ hsfw->name = name;
+ list_add_tail(&hsfw->head, &acr->hsfw);
+
+ ret = nvkm_acr_hsfw_load_bl(acr, bl, version, hsfw);
+ if (ret)
+ goto done;
+
+ ret = nvkm_acr_hsfw_load_image(acr, fw, version, hsfw);
+done:
+ if (ret)
+ nvkm_acr_hsfw_del(hsfw);
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/lsfw.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/lsfw.c
new file mode 100644
index 000000000000..07d1830126ab
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/lsfw.c
@@ -0,0 +1,253 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+#include <core/falcon.h>
+#include <core/firmware.h>
+#include <nvfw/fw.h>
+#include <nvfw/ls.h>
+
+void
+nvkm_acr_lsfw_del(struct nvkm_acr_lsfw *lsfw)
+{
+ nvkm_blob_dtor(&lsfw->img);
+ nvkm_firmware_put(lsfw->sig);
+ list_del(&lsfw->head);
+ kfree(lsfw);
+}
+
+void
+nvkm_acr_lsfw_del_all(struct nvkm_acr *acr)
+{
+ struct nvkm_acr_lsfw *lsfw, *lsft;
+ list_for_each_entry_safe(lsfw, lsft, &acr->lsfw, head) {
+ nvkm_acr_lsfw_del(lsfw);
+ }
+}
+
+static struct nvkm_acr_lsfw *
+nvkm_acr_lsfw_get(struct nvkm_acr *acr, enum nvkm_acr_lsf_id id)
+{
+ struct nvkm_acr_lsfw *lsfw;
+ list_for_each_entry(lsfw, &acr->lsfw, head) {
+ if (lsfw->id == id)
+ return lsfw;
+ }
+ return NULL;
+}
+
+struct nvkm_acr_lsfw *
+nvkm_acr_lsfw_add(const struct nvkm_acr_lsf_func *func, struct nvkm_acr *acr,
+ struct nvkm_falcon *falcon, enum nvkm_acr_lsf_id id)
+{
+ struct nvkm_acr_lsfw *lsfw;
+
+ if (!acr)
+ return ERR_PTR(-ENOSYS);
+
+ lsfw = nvkm_acr_lsfw_get(acr, id);
+ if (lsfw && lsfw->func) {
+ nvkm_error(&acr->subdev, "LSFW %d redefined\n", id);
+ return ERR_PTR(-EEXIST);
+ }
+
+ if (!lsfw) {
+ if (!(lsfw = kzalloc(sizeof(*lsfw), GFP_KERNEL)))
+ return ERR_PTR(-ENOMEM);
+
+ lsfw->id = id;
+ list_add_tail(&lsfw->head, &acr->lsfw);
+ }
+
+ lsfw->func = func;
+ lsfw->falcon = falcon;
+ return lsfw;
+}
+
+static struct nvkm_acr_lsfw *
+nvkm_acr_lsfw_load_sig_image_desc_(struct nvkm_subdev *subdev,
+ struct nvkm_falcon *falcon,
+ enum nvkm_acr_lsf_id id,
+ const char *path, int ver,
+ const struct nvkm_acr_lsf_func *func,
+ const struct firmware **pdesc)
+{
+ struct nvkm_acr *acr = subdev->device->acr;
+ struct nvkm_acr_lsfw *lsfw;
+ int ret;
+
+ if (IS_ERR((lsfw = nvkm_acr_lsfw_add(func, acr, falcon, id))))
+ return lsfw;
+
+ ret = nvkm_firmware_load_name(subdev, path, "sig", ver, &lsfw->sig);
+ if (ret)
+ goto done;
+
+ ret = nvkm_firmware_load_blob(subdev, path, "image", ver, &lsfw->img);
+ if (ret)
+ goto done;
+
+ ret = nvkm_firmware_load_name(subdev, path, "desc", ver, pdesc);
+done:
+ if (ret) {
+ nvkm_acr_lsfw_del(lsfw);
+ return ERR_PTR(ret);
+ }
+
+ return lsfw;
+}
+
+static void
+nvkm_acr_lsfw_from_desc(const struct nvfw_ls_desc_head *desc,
+ struct nvkm_acr_lsfw *lsfw)
+{
+ lsfw->bootloader_size = ALIGN(desc->bootloader_size, 256);
+ lsfw->bootloader_imem_offset = desc->bootloader_imem_offset;
+
+ lsfw->app_size = ALIGN(desc->app_size, 256);
+ lsfw->app_start_offset = desc->app_start_offset;
+ lsfw->app_imem_entry = desc->app_imem_entry;
+ lsfw->app_resident_code_offset = desc->app_resident_code_offset;
+ lsfw->app_resident_code_size = desc->app_resident_code_size;
+ lsfw->app_resident_data_offset = desc->app_resident_data_offset;
+ lsfw->app_resident_data_size = desc->app_resident_data_size;
+
+ lsfw->ucode_size = ALIGN(lsfw->app_resident_data_offset, 256) +
+ lsfw->bootloader_size;
+ lsfw->data_size = lsfw->app_size + lsfw->bootloader_size -
+ lsfw->ucode_size;
+}
+
+int
+nvkm_acr_lsfw_load_sig_image_desc(struct nvkm_subdev *subdev,
+ struct nvkm_falcon *falcon,
+ enum nvkm_acr_lsf_id id,
+ const char *path, int ver,
+ const struct nvkm_acr_lsf_func *func)
+{
+ const struct firmware *fw;
+ struct nvkm_acr_lsfw *lsfw;
+
+ lsfw = nvkm_acr_lsfw_load_sig_image_desc_(subdev, falcon, id, path, ver,
+ func, &fw);
+ if (IS_ERR(lsfw))
+ return PTR_ERR(lsfw);
+
+ nvkm_acr_lsfw_from_desc(&nvfw_ls_desc(subdev, fw->data)->head, lsfw);
+ nvkm_firmware_put(fw);
+ return 0;
+}
+
+int
+nvkm_acr_lsfw_load_sig_image_desc_v1(struct nvkm_subdev *subdev,
+ struct nvkm_falcon *falcon,
+ enum nvkm_acr_lsf_id id,
+ const char *path, int ver,
+ const struct nvkm_acr_lsf_func *func)
+{
+ const struct firmware *fw;
+ struct nvkm_acr_lsfw *lsfw;
+
+ lsfw = nvkm_acr_lsfw_load_sig_image_desc_(subdev, falcon, id, path, ver,
+ func, &fw);
+ if (IS_ERR(lsfw))
+ return PTR_ERR(lsfw);
+
+ nvkm_acr_lsfw_from_desc(&nvfw_ls_desc_v1(subdev, fw->data)->head, lsfw);
+ nvkm_firmware_put(fw);
+ return 0;
+}
+
+int
+nvkm_acr_lsfw_load_bl_inst_data_sig(struct nvkm_subdev *subdev,
+ struct nvkm_falcon *falcon,
+ enum nvkm_acr_lsf_id id,
+ const char *path, int ver,
+ const struct nvkm_acr_lsf_func *func)
+{
+ struct nvkm_acr *acr = subdev->device->acr;
+ struct nvkm_acr_lsfw *lsfw;
+ const struct firmware *bl = NULL, *inst = NULL, *data = NULL;
+ const struct nvfw_bin_hdr *hdr;
+ const struct nvfw_bl_desc *desc;
+ u32 *bldata;
+ int ret;
+
+ if (IS_ERR((lsfw = nvkm_acr_lsfw_add(func, acr, falcon, id))))
+ return PTR_ERR(lsfw);
+
+ ret = nvkm_firmware_load_name(subdev, path, "bl", ver, &bl);
+ if (ret)
+ goto done;
+
+ hdr = nvfw_bin_hdr(subdev, bl->data);
+ desc = nvfw_bl_desc(subdev, bl->data + hdr->header_offset);
+ bldata = (void *)(bl->data + hdr->data_offset);
+
+ ret = nvkm_firmware_load_name(subdev, path, "inst", ver, &inst);
+ if (ret)
+ goto done;
+
+ ret = nvkm_firmware_load_name(subdev, path, "data", ver, &data);
+ if (ret)
+ goto done;
+
+ ret = nvkm_firmware_load_name(subdev, path, "sig", ver, &lsfw->sig);
+ if (ret)
+ goto done;
+
+ lsfw->bootloader_size = ALIGN(desc->code_size, 256);
+ lsfw->bootloader_imem_offset = desc->start_tag << 8;
+
+ lsfw->app_start_offset = lsfw->bootloader_size;
+ lsfw->app_imem_entry = 0;
+ lsfw->app_resident_code_offset = 0;
+ lsfw->app_resident_code_size = ALIGN(inst->size, 256);
+ lsfw->app_resident_data_offset = lsfw->app_resident_code_size;
+ lsfw->app_resident_data_size = ALIGN(data->size, 256);
+ lsfw->app_size = lsfw->app_resident_code_size +
+ lsfw->app_resident_data_size;
+
+ lsfw->img.size = lsfw->bootloader_size + lsfw->app_size;
+ if (!(lsfw->img.data = kzalloc(lsfw->img.size, GFP_KERNEL))) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ memcpy(lsfw->img.data, bldata, lsfw->bootloader_size);
+ memcpy(lsfw->img.data + lsfw->app_start_offset +
+ lsfw->app_resident_code_offset, inst->data, inst->size);
+ memcpy(lsfw->img.data + lsfw->app_start_offset +
+ lsfw->app_resident_data_offset, data->data, data->size);
+
+ lsfw->ucode_size = ALIGN(lsfw->app_resident_data_offset, 256) +
+ lsfw->bootloader_size;
+ lsfw->data_size = lsfw->app_size + lsfw->bootloader_size -
+ lsfw->ucode_size;
+
+done:
+ if (ret)
+ nvkm_acr_lsfw_del(lsfw);
+ nvkm_firmware_put(data);
+ nvkm_firmware_put(inst);
+ nvkm_firmware_put(bl);
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/priv.h
new file mode 100644
index 000000000000..d8ba72806d39
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/priv.h
@@ -0,0 +1,151 @@
+#ifndef __NVKM_ACR_PRIV_H__
+#define __NVKM_ACR_PRIV_H__
+#include <subdev/acr.h>
+struct lsb_header_tail;
+
+struct nvkm_acr_fwif {
+ int version;
+ int (*load)(struct nvkm_acr *, int version,
+ const struct nvkm_acr_fwif *);
+ const struct nvkm_acr_func *func;
+};
+
+int gm20b_acr_load(struct nvkm_acr *, int, const struct nvkm_acr_fwif *);
+int gp102_acr_load(struct nvkm_acr *, int, const struct nvkm_acr_fwif *);
+
+struct nvkm_acr_lsf;
+struct nvkm_acr_func {
+ const struct nvkm_acr_hsf_fwif *load;
+ const struct nvkm_acr_hsf_fwif *ahesasc;
+ const struct nvkm_acr_hsf_fwif *asb;
+ const struct nvkm_acr_hsf_fwif *unload;
+ int (*wpr_parse)(struct nvkm_acr *);
+ u32 (*wpr_layout)(struct nvkm_acr *);
+ int (*wpr_alloc)(struct nvkm_acr *, u32 wpr_size);
+ int (*wpr_build)(struct nvkm_acr *, struct nvkm_acr_lsf *rtos);
+ void (*wpr_patch)(struct nvkm_acr *, s64 adjust);
+ void (*wpr_check)(struct nvkm_acr *, u64 *start, u64 *limit);
+ int (*init)(struct nvkm_acr *);
+ void (*fini)(struct nvkm_acr *);
+};
+
+int gm200_acr_wpr_parse(struct nvkm_acr *);
+u32 gm200_acr_wpr_layout(struct nvkm_acr *);
+int gm200_acr_wpr_build(struct nvkm_acr *, struct nvkm_acr_lsf *);
+void gm200_acr_wpr_patch(struct nvkm_acr *, s64);
+void gm200_acr_wpr_check(struct nvkm_acr *, u64 *, u64 *);
+void gm200_acr_wpr_build_lsb_tail(struct nvkm_acr_lsfw *,
+ struct lsb_header_tail *);
+int gm200_acr_init(struct nvkm_acr *);
+
+int gm20b_acr_wpr_alloc(struct nvkm_acr *, u32 wpr_size);
+
+int gp102_acr_wpr_parse(struct nvkm_acr *);
+u32 gp102_acr_wpr_layout(struct nvkm_acr *);
+int gp102_acr_wpr_alloc(struct nvkm_acr *, u32 wpr_size);
+int gp102_acr_wpr_build(struct nvkm_acr *, struct nvkm_acr_lsf *);
+int gp102_acr_wpr_build_lsb(struct nvkm_acr *, struct nvkm_acr_lsfw *);
+void gp102_acr_wpr_patch(struct nvkm_acr *, s64);
+
+struct nvkm_acr_hsfw {
+ const struct nvkm_acr_hsf_func *func;
+ const char *name;
+ struct list_head head;
+
+ u32 imem_size;
+ u32 imem_tag;
+ u32 *imem;
+
+ u8 *image;
+ u32 image_size;
+ u32 non_sec_addr;
+ u32 non_sec_size;
+ u32 sec_addr;
+ u32 sec_size;
+ u32 data_addr;
+ u32 data_size;
+
+ struct {
+ struct {
+ void *data;
+ u32 size;
+ } prod, dbg;
+ u32 patch_loc;
+ } sig;
+};
+
+struct nvkm_acr_hsf_fwif {
+ int version;
+ int (*load)(struct nvkm_acr *, const char *bl, const char *fw,
+ const char *name, int version,
+ const struct nvkm_acr_hsf_fwif *);
+ const struct nvkm_acr_hsf_func *func;
+};
+
+int nvkm_acr_hsfw_load(struct nvkm_acr *, const char *, const char *,
+ const char *, int, const struct nvkm_acr_hsf_fwif *);
+void nvkm_acr_hsfw_del_all(struct nvkm_acr *);
+
+struct nvkm_acr_hsf {
+ const struct nvkm_acr_hsf_func *func;
+ const char *name;
+ struct list_head head;
+
+ u32 imem_size;
+ u32 imem_tag;
+ u32 *imem;
+
+ u32 non_sec_addr;
+ u32 non_sec_size;
+ u32 sec_addr;
+ u32 sec_size;
+ u32 data_addr;
+ u32 data_size;
+
+ struct nvkm_memory *ucode;
+ struct nvkm_vma *vma;
+ struct nvkm_falcon *falcon;
+};
+
+struct nvkm_acr_hsf_func {
+ int (*load)(struct nvkm_acr *, struct nvkm_acr_hsfw *);
+ int (*boot)(struct nvkm_acr *, struct nvkm_acr_hsf *);
+ void (*bld)(struct nvkm_acr *, struct nvkm_acr_hsf *);
+};
+
+int gm200_acr_hsfw_load(struct nvkm_acr *, struct nvkm_acr_hsfw *,
+ struct nvkm_falcon *);
+int gm200_acr_hsfw_boot(struct nvkm_acr *, struct nvkm_acr_hsf *,
+ u32 clear_intr, u32 mbox0_ok);
+
+int gm200_acr_load_boot(struct nvkm_acr *, struct nvkm_acr_hsf *);
+
+extern const struct nvkm_acr_hsf_func gm200_acr_unload_0;
+int gm200_acr_unload_load(struct nvkm_acr *, struct nvkm_acr_hsfw *);
+int gm200_acr_unload_boot(struct nvkm_acr *, struct nvkm_acr_hsf *);
+void gm200_acr_hsfw_bld(struct nvkm_acr *, struct nvkm_acr_hsf *);
+
+extern const struct nvkm_acr_hsf_func gm20b_acr_load_0;
+
+int gp102_acr_load_load(struct nvkm_acr *, struct nvkm_acr_hsfw *);
+
+extern const struct nvkm_acr_hsf_func gp108_acr_unload_0;
+void gp108_acr_hsfw_bld(struct nvkm_acr *, struct nvkm_acr_hsf *);
+
+int nvkm_acr_new_(const struct nvkm_acr_fwif *, struct nvkm_device *, int,
+ struct nvkm_acr **);
+int nvkm_acr_hsf_boot(struct nvkm_acr *, const char *name);
+
+struct nvkm_acr_lsf {
+ const struct nvkm_acr_lsf_func *func;
+ struct nvkm_falcon *falcon;
+ enum nvkm_acr_lsf_id id;
+ struct list_head head;
+};
+
+struct nvkm_acr_lsfw *nvkm_acr_lsfw_add(const struct nvkm_acr_lsf_func *,
+ struct nvkm_acr *, struct nvkm_falcon *,
+ enum nvkm_acr_lsf_id);
+void nvkm_acr_lsfw_del(struct nvkm_acr_lsfw *);
+void nvkm_acr_lsfw_del_all(struct nvkm_acr *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c
new file mode 100644
index 000000000000..7f4b89d82d32
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c
@@ -0,0 +1,215 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <core/firmware.h>
+#include <core/memory.h>
+#include <subdev/gsp.h>
+#include <subdev/pmu.h>
+#include <engine/sec2.h>
+
+#include <nvfw/acr.h>
+
+static int
+tu102_acr_init(struct nvkm_acr *acr)
+{
+ int ret = nvkm_acr_hsf_boot(acr, "AHESASC");
+ if (ret)
+ return ret;
+
+ return nvkm_acr_hsf_boot(acr, "ASB");
+}
+
+static int
+tu102_acr_wpr_build(struct nvkm_acr *acr, struct nvkm_acr_lsf *rtos)
+{
+ struct nvkm_acr_lsfw *lsfw;
+ u32 offset = 0;
+ int ret;
+
+ /*XXX: shared sub-WPR headers, fill terminator for now. */
+ nvkm_wo32(acr->wpr, 0x200, 0xffffffff);
+
+ /* Fill per-LSF structures. */
+ list_for_each_entry(lsfw, &acr->lsfw, head) {
+ struct lsf_signature_v1 *sig = (void *)lsfw->sig->data;
+ struct wpr_header_v1 hdr = {
+ .falcon_id = lsfw->id,
+ .lsb_offset = lsfw->offset.lsb,
+ .bootstrap_owner = NVKM_ACR_LSF_GSPLITE,
+ .lazy_bootstrap = 1,
+ .bin_version = sig->version,
+ .status = WPR_HEADER_V1_STATUS_COPY,
+ };
+
+ /* Write WPR header. */
+ nvkm_wobj(acr->wpr, offset, &hdr, sizeof(hdr));
+ offset += sizeof(hdr);
+
+ /* Write LSB header. */
+ ret = gp102_acr_wpr_build_lsb(acr, lsfw);
+ if (ret)
+ return ret;
+
+ /* Write ucode image. */
+ nvkm_wobj(acr->wpr, lsfw->offset.img,
+ lsfw->img.data,
+ lsfw->img.size);
+
+ /* Write bootloader data. */
+ lsfw->func->bld_write(acr, lsfw->offset.bld, lsfw);
+ }
+
+ /* Finalise WPR. */
+ nvkm_wo32(acr->wpr, offset, WPR_HEADER_V1_FALCON_ID_INVALID);
+ return 0;
+}
+
+static int
+tu102_acr_hsfw_boot(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf)
+{
+ return gm200_acr_hsfw_boot(acr, hsf, 0, 0);
+}
+
+static int
+tu102_acr_hsfw_nofw(struct nvkm_acr *acr, const char *bl, const char *fw,
+ const char *name, int version,
+ const struct nvkm_acr_hsf_fwif *fwif)
+{
+ return 0;
+}
+
+MODULE_FIRMWARE("nvidia/tu102/acr/unload_bl.bin");
+MODULE_FIRMWARE("nvidia/tu102/acr/ucode_unload.bin");
+
+MODULE_FIRMWARE("nvidia/tu104/acr/unload_bl.bin");
+MODULE_FIRMWARE("nvidia/tu104/acr/ucode_unload.bin");
+
+MODULE_FIRMWARE("nvidia/tu106/acr/unload_bl.bin");
+MODULE_FIRMWARE("nvidia/tu106/acr/ucode_unload.bin");
+
+static const struct nvkm_acr_hsf_fwif
+tu102_acr_unload_fwif[] = {
+ { 0, nvkm_acr_hsfw_load, &gp108_acr_unload_0 },
+ { -1, tu102_acr_hsfw_nofw },
+ {}
+};
+
+static int
+tu102_acr_asb_load(struct nvkm_acr *acr, struct nvkm_acr_hsfw *hsfw)
+{
+ return gm200_acr_hsfw_load(acr, hsfw, &acr->subdev.device->gsp->falcon);
+}
+
+static const struct nvkm_acr_hsf_func
+tu102_acr_asb_0 = {
+ .load = tu102_acr_asb_load,
+ .boot = tu102_acr_hsfw_boot,
+ .bld = gp108_acr_hsfw_bld,
+};
+
+MODULE_FIRMWARE("nvidia/tu102/acr/ucode_asb.bin");
+MODULE_FIRMWARE("nvidia/tu104/acr/ucode_asb.bin");
+MODULE_FIRMWARE("nvidia/tu106/acr/ucode_asb.bin");
+
+static const struct nvkm_acr_hsf_fwif
+tu102_acr_asb_fwif[] = {
+ { 0, nvkm_acr_hsfw_load, &tu102_acr_asb_0 },
+ { -1, tu102_acr_hsfw_nofw },
+ {}
+};
+
+static const struct nvkm_acr_hsf_func
+tu102_acr_ahesasc_0 = {
+ .load = gp102_acr_load_load,
+ .boot = tu102_acr_hsfw_boot,
+ .bld = gp108_acr_hsfw_bld,
+};
+
+MODULE_FIRMWARE("nvidia/tu102/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/tu102/acr/ucode_ahesasc.bin");
+
+MODULE_FIRMWARE("nvidia/tu104/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/tu104/acr/ucode_ahesasc.bin");
+
+MODULE_FIRMWARE("nvidia/tu106/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/tu106/acr/ucode_ahesasc.bin");
+
+static const struct nvkm_acr_hsf_fwif
+tu102_acr_ahesasc_fwif[] = {
+ { 0, nvkm_acr_hsfw_load, &tu102_acr_ahesasc_0 },
+ { -1, tu102_acr_hsfw_nofw },
+ {}
+};
+
+static const struct nvkm_acr_func
+tu102_acr = {
+ .ahesasc = tu102_acr_ahesasc_fwif,
+ .asb = tu102_acr_asb_fwif,
+ .unload = tu102_acr_unload_fwif,
+ .wpr_parse = gp102_acr_wpr_parse,
+ .wpr_layout = gp102_acr_wpr_layout,
+ .wpr_alloc = gp102_acr_wpr_alloc,
+ .wpr_patch = gp102_acr_wpr_patch,
+ .wpr_build = tu102_acr_wpr_build,
+ .wpr_check = gm200_acr_wpr_check,
+ .init = tu102_acr_init,
+};
+
+static int
+tu102_acr_load(struct nvkm_acr *acr, int version,
+ const struct nvkm_acr_fwif *fwif)
+{
+ struct nvkm_subdev *subdev = &acr->subdev;
+ const struct nvkm_acr_hsf_fwif *hsfwif;
+
+ hsfwif = nvkm_firmware_load(subdev, fwif->func->ahesasc, "AcrAHESASC",
+ acr, "acr/bl", "acr/ucode_ahesasc",
+ "AHESASC");
+ if (IS_ERR(hsfwif))
+ return PTR_ERR(hsfwif);
+
+ hsfwif = nvkm_firmware_load(subdev, fwif->func->asb, "AcrASB",
+ acr, "acr/bl", "acr/ucode_asb", "ASB");
+ if (IS_ERR(hsfwif))
+ return PTR_ERR(hsfwif);
+
+ hsfwif = nvkm_firmware_load(subdev, fwif->func->unload, "AcrUnload",
+ acr, "acr/unload_bl", "acr/ucode_unload",
+ "unload");
+ if (IS_ERR(hsfwif))
+ return PTR_ERR(hsfwif);
+
+ return 0;
+}
+
+static const struct nvkm_acr_fwif
+tu102_acr_fwif[] = {
+ { 0, tu102_acr_load, &tu102_acr },
+ {}
+};
+
+int
+tu102_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr)
+{
+ return nvkm_acr_new_(tu102_acr_fwif, device, index, pacr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm20b.c
index 950bff1955ad..1ed6170891c4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm20b.c
@@ -26,7 +26,6 @@ gm20b_bar_func = {
.dtor = gf100_bar_dtor,
.oneinit = gf100_bar_oneinit,
.bar1.init = gf100_bar_bar1_init,
- .bar1.fini = gf100_bar_bar1_fini,
.bar1.wait = gm107_bar_bar1_wait,
.bar1.vmm = gf100_bar_bar1_vmm,
.flush = g84_bar_flush,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/extdev.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/extdev.c
index b8578359e61b..118e33174cbe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/extdev.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/extdev.c
@@ -46,6 +46,19 @@ extdev_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *len, u8 *cnt)
return extdev + *hdr;
}
+bool
+nvbios_extdev_skip_probe(struct nvkm_bios *bios)
+{
+ u8 ver, hdr, len, cnt;
+ u16 data = extdev_table(bios, &ver, &hdr, &len, &cnt);
+ if (data && ver == 0x40 && hdr >= 5) {
+ u8 flags = nvbios_rd08(bios, data - hdr + 4);
+ if (flags & 1)
+ return true;
+ }
+ return false;
+}
+
static u16
nvbios_extdev_entry(struct nvkm_bios *bios, int idx, u8 *ver, u8 *len)
{
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
index ec0e9f7224b5..9de74f41dcd2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
@@ -834,7 +834,7 @@ init_generic_condition(struct nvbios_init *init)
init_exec_set(init, false);
break;
default:
- warn("INIT_GENERIC_CONDITON: unknown 0x%02x\n", cond);
+ warn("INIT_GENERIC_CONDITION: unknown 0x%02x\n", cond);
init->offset += size;
break;
}
@@ -1935,6 +1935,28 @@ init_ram_restrict_pll(struct nvbios_init *init)
}
/**
+ * INIT_RESET_BEGUN - opcode 0x8c
+ *
+ */
+static void
+init_reset_begun(struct nvbios_init *init)
+{
+ trace("RESET_BEGUN\n");
+ init->offset += 1;
+}
+
+/**
+ * INIT_RESET_END - opcode 0x8d
+ *
+ */
+static void
+init_reset_end(struct nvbios_init *init)
+{
+ trace("RESET_END\n");
+ init->offset += 1;
+}
+
+/**
* INIT_GPIO - opcode 0x8e
*
*/
@@ -2260,8 +2282,8 @@ static struct nvbios_init_opcode {
[0x79] = { init_pll },
[0x7a] = { init_zm_reg },
[0x87] = { init_ram_restrict_pll },
- [0x8c] = { init_reserved },
- [0x8d] = { init_reserved },
+ [0x8c] = { init_reset_begun },
+ [0x8d] = { init_reset_end },
[0x8e] = { init_gpio },
[0x8f] = { init_ram_restrict_zm_reg_group },
[0x90] = { init_copy_zm_reg },
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
index 7143ea4611aa..33a9fb5ac558 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
@@ -96,6 +96,8 @@ nvbios_volt_parse(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
info->min = min(info->base,
info->base + info->step * info->vidmask);
info->max = nvbios_rd32(bios, volt + 0x0e);
+ if (!info->max)
+ info->max = max(info->base, info->base + info->step * info->vidmask);
break;
case 0x50:
info->min = nvbios_rd32(bios, volt + 0x0a);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild
index 53b9d638f2c8..d65ec719f153 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild
@@ -2,5 +2,6 @@
nvkm-y += nvkm/subdev/fault/base.o
nvkm-y += nvkm/subdev/fault/user.o
nvkm-y += nvkm/subdev/fault/gp100.o
+nvkm-y += nvkm/subdev/fault/gp10b.o
nvkm-y += nvkm/subdev/fault/gv100.o
nvkm-y += nvkm/subdev/fault/tu102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
index ca251560d3e0..f6dca97140d6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
@@ -108,7 +108,7 @@ nvkm_fault_oneinit_buffer(struct nvkm_fault *fault, int id)
return ret;
/* Pin fault buffer in BAR2. */
- buffer->addr = nvkm_memory_bar2(buffer->mem);
+ buffer->addr = fault->func->buffer.pin(buffer);
if (buffer->addr == ~0ULL)
return -EFAULT;
@@ -146,6 +146,7 @@ nvkm_fault_dtor(struct nvkm_subdev *subdev)
struct nvkm_fault *fault = nvkm_fault(subdev);
int i;
+ nvkm_notify_fini(&fault->nrpfb);
nvkm_event_fini(&fault->event);
for (i = 0; i < fault->buffer_nr; i++) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c
index 4f3c4e091117..f6b189cc4330 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c
@@ -21,25 +21,26 @@
*/
#include "priv.h"
+#include <core/memory.h>
#include <subdev/mc.h>
#include <nvif/class.h>
-static void
+void
gp100_fault_buffer_intr(struct nvkm_fault_buffer *buffer, bool enable)
{
struct nvkm_device *device = buffer->fault->subdev.device;
nvkm_mc_intr_mask(device, NVKM_SUBDEV_FAULT, enable);
}
-static void
+void
gp100_fault_buffer_fini(struct nvkm_fault_buffer *buffer)
{
struct nvkm_device *device = buffer->fault->subdev.device;
nvkm_mask(device, 0x002a70, 0x00000001, 0x00000000);
}
-static void
+void
gp100_fault_buffer_init(struct nvkm_fault_buffer *buffer)
{
struct nvkm_device *device = buffer->fault->subdev.device;
@@ -48,7 +49,12 @@ gp100_fault_buffer_init(struct nvkm_fault_buffer *buffer)
nvkm_mask(device, 0x002a70, 0x00000001, 0x00000001);
}
-static void
+u64 gp100_fault_buffer_pin(struct nvkm_fault_buffer *buffer)
+{
+ return nvkm_memory_bar2(buffer->mem);
+}
+
+void
gp100_fault_buffer_info(struct nvkm_fault_buffer *buffer)
{
buffer->entries = nvkm_rd32(buffer->fault->subdev.device, 0x002a78);
@@ -56,7 +62,7 @@ gp100_fault_buffer_info(struct nvkm_fault_buffer *buffer)
buffer->put = 0x002a80;
}
-static void
+void
gp100_fault_intr(struct nvkm_fault *fault)
{
nvkm_event_send(&fault->event, 1, 0, NULL, 0);
@@ -68,6 +74,7 @@ gp100_fault = {
.buffer.nr = 1,
.buffer.entry_size = 32,
.buffer.info = gp100_fault_buffer_info,
+ .buffer.pin = gp100_fault_buffer_pin,
.buffer.init = gp100_fault_buffer_init,
.buffer.fini = gp100_fault_buffer_fini,
.buffer.intr = gp100_fault_buffer_intr,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp10b.c
new file mode 100644
index 000000000000..9e66d1f7654d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp10b.c
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2019 NVIDIA Corporation.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "priv.h"
+
+#include <core/memory.h>
+
+#include <nvif/class.h>
+
+u64
+gp10b_fault_buffer_pin(struct nvkm_fault_buffer *buffer)
+{
+ return nvkm_memory_addr(buffer->mem);
+}
+
+static const struct nvkm_fault_func
+gp10b_fault = {
+ .intr = gp100_fault_intr,
+ .buffer.nr = 1,
+ .buffer.entry_size = 32,
+ .buffer.info = gp100_fault_buffer_info,
+ .buffer.pin = gp10b_fault_buffer_pin,
+ .buffer.init = gp100_fault_buffer_init,
+ .buffer.fini = gp100_fault_buffer_fini,
+ .buffer.intr = gp100_fault_buffer_intr,
+ .user = { { 0, 0, MAXWELL_FAULT_BUFFER_A }, 0 },
+};
+
+int
+gp10b_fault_new(struct nvkm_device *device, int index,
+ struct nvkm_fault **pfault)
+{
+ return nvkm_fault_new_(&gp10b_fault, device, index, pfault);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c
index 6747f09c2dc3..2707be4ffabc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c
@@ -214,6 +214,7 @@ gv100_fault = {
.buffer.nr = 2,
.buffer.entry_size = 32,
.buffer.info = gv100_fault_buffer_info,
+ .buffer.pin = gp100_fault_buffer_pin,
.buffer.init = gv100_fault_buffer_init,
.buffer.fini = gv100_fault_buffer_fini,
.buffer.intr = gv100_fault_buffer_intr,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h
index 975e66ac6344..f6f1dd7eee1f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h
@@ -30,6 +30,7 @@ struct nvkm_fault_func {
int nr;
u32 entry_size;
void (*info)(struct nvkm_fault_buffer *);
+ u64 (*pin)(struct nvkm_fault_buffer *);
void (*init)(struct nvkm_fault_buffer *);
void (*fini)(struct nvkm_fault_buffer *);
void (*intr)(struct nvkm_fault_buffer *, bool enable);
@@ -40,6 +41,15 @@ struct nvkm_fault_func {
} user;
};
+void gp100_fault_buffer_intr(struct nvkm_fault_buffer *, bool enable);
+void gp100_fault_buffer_fini(struct nvkm_fault_buffer *);
+void gp100_fault_buffer_init(struct nvkm_fault_buffer *);
+u64 gp100_fault_buffer_pin(struct nvkm_fault_buffer *);
+void gp100_fault_buffer_info(struct nvkm_fault_buffer *);
+void gp100_fault_intr(struct nvkm_fault *);
+
+u64 gp10b_fault_buffer_pin(struct nvkm_fault_buffer *);
+
int gv100_fault_oneinit(struct nvkm_fault *);
int nvkm_ufault_new(struct nvkm_device *, const struct nvkm_oclass *,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c
index fa1dfe5692b0..45a6a68b9f48 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c
@@ -154,6 +154,7 @@ tu102_fault = {
.buffer.nr = 2,
.buffer.entry_size = 32,
.buffer.info = tu102_fault_buffer_info,
+ .buffer.pin = gp100_fault_buffer_pin,
.buffer.init = tu102_fault_buffer_init,
.buffer.fini = tu102_fault_buffer_fini,
.buffer.intr = tu102_fault_buffer_intr,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
index b2bb5a3ccb02..5940e0dea2f8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
@@ -126,6 +126,34 @@ nvkm_fb_oneinit(struct nvkm_subdev *subdev)
}
static int
+nvkm_fb_init_scrub_vpr(struct nvkm_fb *fb)
+{
+ struct nvkm_subdev *subdev = &fb->subdev;
+ int ret;
+
+ nvkm_debug(subdev, "VPR locked, running scrubber binary\n");
+
+ if (!fb->vpr_scrubber.size) {
+ nvkm_warn(subdev, "VPR locked, but no scrubber binary!\n");
+ return 0;
+ }
+
+ ret = fb->func->vpr.scrub(fb);
+ if (ret) {
+ nvkm_error(subdev, "VPR scrubber binary failed\n");
+ return ret;
+ }
+
+ if (fb->func->vpr.scrub_required(fb)) {
+ nvkm_error(subdev, "VPR still locked after scrub!\n");
+ return -EIO;
+ }
+
+ nvkm_debug(subdev, "VPR scrubber binary successful\n");
+ return 0;
+}
+
+static int
nvkm_fb_init(struct nvkm_subdev *subdev)
{
struct nvkm_fb *fb = nvkm_fb(subdev);
@@ -154,6 +182,14 @@ nvkm_fb_init(struct nvkm_subdev *subdev)
if (fb->func->init_unkn)
fb->func->init_unkn(fb);
+
+ if (fb->func->vpr.scrub_required &&
+ fb->func->vpr.scrub_required(fb)) {
+ ret = nvkm_fb_init_scrub_vpr(fb);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
@@ -172,6 +208,8 @@ nvkm_fb_dtor(struct nvkm_subdev *subdev)
nvkm_mm_fini(&fb->tags);
nvkm_ram_del(&fb->ram);
+ nvkm_blob_dtor(&fb->vpr_scrubber);
+
if (fb->func->dtor)
return fb->func->dtor(fb);
return fb;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
index b4d74e815674..fc8c93aa3da5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
@@ -24,7 +24,81 @@
#include "gf100.h"
#include "ram.h"
+#include <core/firmware.h>
#include <core/memory.h>
+#include <nvfw/fw.h>
+#include <nvfw/hs.h>
+#include <engine/nvdec.h>
+
+int
+gp102_fb_vpr_scrub(struct nvkm_fb *fb)
+{
+ struct nvkm_subdev *subdev = &fb->subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_falcon *falcon = &device->nvdec[0]->falcon;
+ struct nvkm_blob *blob = &fb->vpr_scrubber;
+ const struct nvfw_bin_hdr *hsbin_hdr;
+ const struct nvfw_hs_header *fw_hdr;
+ const struct nvfw_hs_load_header *lhdr;
+ void *scrub_data;
+ u32 patch_loc, patch_sig;
+ int ret;
+
+ nvkm_falcon_get(falcon, subdev);
+
+ hsbin_hdr = nvfw_bin_hdr(subdev, blob->data);
+ fw_hdr = nvfw_hs_header(subdev, blob->data + hsbin_hdr->header_offset);
+ lhdr = nvfw_hs_load_header(subdev, blob->data + fw_hdr->hdr_offset);
+ scrub_data = blob->data + hsbin_hdr->data_offset;
+
+ patch_loc = *(u32 *)(blob->data + fw_hdr->patch_loc);
+ patch_sig = *(u32 *)(blob->data + fw_hdr->patch_sig);
+ if (falcon->debug) {
+ memcpy(scrub_data + patch_loc,
+ blob->data + fw_hdr->sig_dbg_offset + patch_sig,
+ fw_hdr->sig_dbg_size);
+ } else {
+ memcpy(scrub_data + patch_loc,
+ blob->data + fw_hdr->sig_prod_offset + patch_sig,
+ fw_hdr->sig_prod_size);
+ }
+
+ nvkm_falcon_reset(falcon);
+ nvkm_falcon_bind_context(falcon, NULL);
+
+ nvkm_falcon_load_imem(falcon, scrub_data, lhdr->non_sec_code_off,
+ lhdr->non_sec_code_size,
+ lhdr->non_sec_code_off >> 8, 0, false);
+ nvkm_falcon_load_imem(falcon, scrub_data + lhdr->apps[0],
+ ALIGN(lhdr->apps[0], 0x100),
+ lhdr->apps[1],
+ lhdr->apps[0] >> 8, 0, true);
+ nvkm_falcon_load_dmem(falcon, scrub_data + lhdr->data_dma_base, 0,
+ lhdr->data_size, 0);
+
+ nvkm_falcon_set_start_addr(falcon, 0x0);
+ nvkm_falcon_start(falcon);
+
+ ret = nvkm_falcon_wait_for_halt(falcon, 500);
+ if (ret < 0) {
+ ret = -ETIMEDOUT;
+ goto end;
+ }
+
+ /* put nvdec in clean state - without reset it will remain in HS mode */
+ nvkm_falcon_reset(falcon);
+end:
+ nvkm_falcon_put(falcon, subdev);
+ return ret;
+}
+
+bool
+gp102_fb_vpr_scrub_required(struct nvkm_fb *fb)
+{
+ struct nvkm_device *device = fb->subdev.device;
+ nvkm_wr32(device, 0x100cd0, 0x2);
+ return (nvkm_rd32(device, 0x100cd0) & 0x00000010) != 0;
+}
static const struct nvkm_fb_func
gp102_fb = {
@@ -33,11 +107,32 @@ gp102_fb = {
.init = gp100_fb_init,
.init_remapper = gp100_fb_init_remapper,
.init_page = gm200_fb_init_page,
+ .vpr.scrub_required = gp102_fb_vpr_scrub_required,
+ .vpr.scrub = gp102_fb_vpr_scrub,
.ram_new = gp100_ram_new,
};
int
+gp102_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device,
+ int index, struct nvkm_fb **pfb)
+{
+ int ret = gf100_fb_new_(func, device, index, pfb);
+ if (ret)
+ return ret;
+
+ nvkm_firmware_load_blob(&(*pfb)->subdev, "nvdec/scrubber", "", 0,
+ &(*pfb)->vpr_scrubber);
+ return 0;
+}
+
+int
gp102_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
{
- return gf100_fb_new_(&gp102_fb, device, index, pfb);
+ return gp102_fb_new_(&gp102_fb, device, index, pfb);
}
+
+MODULE_FIRMWARE("nvidia/gp102/nvdec/scrubber.bin");
+MODULE_FIRMWARE("nvidia/gp104/nvdec/scrubber.bin");
+MODULE_FIRMWARE("nvidia/gp106/nvdec/scrubber.bin");
+MODULE_FIRMWARE("nvidia/gp107/nvdec/scrubber.bin");
+MODULE_FIRMWARE("nvidia/gp108/nvdec/scrubber.bin");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
index 3c5e02e9794a..389bad312bf2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
@@ -35,6 +35,8 @@ gv100_fb = {
.init = gp100_fb_init,
.init_page = gv100_fb_init_page,
.init_unkn = gp100_fb_init_unkn,
+ .vpr.scrub_required = gp102_fb_vpr_scrub_required,
+ .vpr.scrub = gp102_fb_vpr_scrub,
.ram_new = gp100_ram_new,
.default_bigpage = 16,
};
@@ -42,5 +44,10 @@ gv100_fb = {
int
gv100_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
{
- return gf100_fb_new_(&gv100_fb, device, index, pfb);
+ return gp102_fb_new_(&gv100_fb, device, index, pfb);
}
+
+MODULE_FIRMWARE("nvidia/gv100/nvdec/scrubber.bin");
+MODULE_FIRMWARE("nvidia/tu102/nvdec/scrubber.bin");
+MODULE_FIRMWARE("nvidia/tu104/nvdec/scrubber.bin");
+MODULE_FIRMWARE("nvidia/tu106/nvdec/scrubber.bin");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
index c4e9f55af283..5be9c563350d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
@@ -17,6 +17,11 @@ struct nvkm_fb_func {
void (*intr)(struct nvkm_fb *);
struct {
+ bool (*scrub_required)(struct nvkm_fb *);
+ int (*scrub)(struct nvkm_fb *);
+ } vpr;
+
+ struct {
int regions;
void (*init)(struct nvkm_fb *, int i, u32 addr, u32 size,
u32 pitch, u32 flags, struct nvkm_fb_tile *);
@@ -72,4 +77,9 @@ int gm200_fb_init_page(struct nvkm_fb *);
void gp100_fb_init_remapper(struct nvkm_fb *);
void gp100_fb_init_unkn(struct nvkm_fb *);
+
+int gp102_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *, int,
+ struct nvkm_fb **);
+bool gp102_fb_vpr_scrub_required(struct nvkm_fb *);
+int gp102_fb_vpr_scrub(struct nvkm_fb *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
index ac87a3b6b7c9..ba43fe158b22 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
@@ -655,7 +655,7 @@ gf100_ram_new_(const struct nvkm_ram_func *func,
static const struct nvkm_ram_func
gf100_ram = {
- .upper = 0x0200000000,
+ .upper = 0x0200000000ULL,
.probe_fbp = gf100_ram_probe_fbp,
.probe_fbp_amount = gf100_ram_probe_fbp_amount,
.probe_fbpa_amount = gf100_ram_probe_fbpa_amount,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf108.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf108.c
index 70a06e3cd55a..d97fa43efb91 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf108.c
@@ -43,7 +43,7 @@ gf108_ram_probe_fbp_amount(const struct nvkm_ram_func *func, u32 fbpao,
static const struct nvkm_ram_func
gf108_ram = {
- .upper = 0x0200000000,
+ .upper = 0x0200000000ULL,
.probe_fbp = gf100_ram_probe_fbp,
.probe_fbp_amount = gf108_ram_probe_fbp_amount,
.probe_fbpa_amount = gf100_ram_probe_fbpa_amount,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
index 456aed1f2a02..d350d92852d2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
@@ -1698,7 +1698,7 @@ gk104_ram_new_(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
static const struct nvkm_ram_func
gk104_ram = {
- .upper = 0x0200000000,
+ .upper = 0x0200000000ULL,
.probe_fbp = gf100_ram_probe_fbp,
.probe_fbp_amount = gf108_ram_probe_fbp_amount,
.probe_fbpa_amount = gf100_ram_probe_fbpa_amount,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
index 27c68e3f9772..be91da854dca 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
@@ -33,7 +33,7 @@ gm107_ram_probe_fbp(const struct nvkm_ram_func *func,
static const struct nvkm_ram_func
gm107_ram = {
- .upper = 0x1000000000,
+ .upper = 0x1000000000ULL,
.probe_fbp = gm107_ram_probe_fbp,
.probe_fbp_amount = gf108_ram_probe_fbp_amount,
.probe_fbpa_amount = gf100_ram_probe_fbpa_amount,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm200.c
index 6b0cac1fe7b4..8f91ea91ee25 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm200.c
@@ -48,7 +48,7 @@ gm200_ram_probe_fbp_amount(const struct nvkm_ram_func *func, u32 fbpao,
static const struct nvkm_ram_func
gm200_ram = {
- .upper = 0x1000000000,
+ .upper = 0x1000000000ULL,
.probe_fbp = gm107_ram_probe_fbp,
.probe_fbp_amount = gm200_ram_probe_fbp_amount,
.probe_fbpa_amount = gf100_ram_probe_fbpa_amount,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c
index adb62a6beb63..378f6fb70990 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c
@@ -79,7 +79,7 @@ gp100_ram_probe_fbpa(struct nvkm_device *device, int fbpa)
static const struct nvkm_ram_func
gp100_ram = {
- .upper = 0x1000000000,
+ .upper = 0x1000000000ULL,
.probe_fbp = gm107_ram_probe_fbp,
.probe_fbp_amount = gm200_ram_probe_fbp_amount,
.probe_fbpa_amount = gp100_ram_probe_fbpa,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c
index 1399d923d446..914276410ef8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c
@@ -23,6 +23,7 @@
*/
#include "priv.h"
+#include <core/option.h>
#include <core/notify.h>
static int
@@ -182,12 +183,43 @@ static const struct dmi_system_id gpio_reset_ids[] = {
{ }
};
+static enum dcb_gpio_func_name power_checks[] = {
+ DCB_GPIO_THERM_EXT_POWER_EVENT,
+ DCB_GPIO_POWER_ALERT,
+ DCB_GPIO_EXT_POWER_LOW,
+};
+
static int
nvkm_gpio_init(struct nvkm_subdev *subdev)
{
struct nvkm_gpio *gpio = nvkm_gpio(subdev);
+ struct dcb_gpio_func func;
+ int ret;
+ int i;
+
if (dmi_check_system(gpio_reset_ids))
nvkm_gpio_reset(gpio, DCB_GPIO_UNUSED);
+
+ if (nvkm_boolopt(subdev->device->cfgopt, "NvPowerChecks", true)) {
+ for (i = 0; i < ARRAY_SIZE(power_checks); ++i) {
+ ret = nvkm_gpio_find(gpio, 0, power_checks[i],
+ DCB_GPIO_UNUSED, &func);
+ if (ret)
+ continue;
+
+ ret = nvkm_gpio_get(gpio, 0, func.func, func.line);
+ if (!ret)
+ continue;
+
+ nvkm_error(&gpio->subdev,
+ "GPU is missing power, check its power "
+ "cables. Boot with "
+ "nouveau.config=NvPowerChecks=0 to "
+ "disable.\n");
+ return -EINVAL;
+ }
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild
index e7c4f068936e..67cc3b320169 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild
@@ -1,2 +1,3 @@
# SPDX-License-Identifier: MIT
+nvkm-y += nvkm/subdev/gsp/base.o
nvkm-y += nvkm/subdev/gsp/gv100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c
new file mode 100644
index 000000000000..5a32df0f9992
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+#include <core/falcon.h>
+#include <core/firmware.h>
+#include <subdev/acr.h>
+#include <subdev/top.h>
+
+static void *
+nvkm_gsp_dtor(struct nvkm_subdev *subdev)
+{
+ struct nvkm_gsp *gsp = nvkm_gsp(subdev);
+ nvkm_falcon_dtor(&gsp->falcon);
+ return gsp;
+}
+
+static const struct nvkm_subdev_func
+nvkm_gsp = {
+ .dtor = nvkm_gsp_dtor,
+};
+
+int
+nvkm_gsp_new_(const struct nvkm_gsp_fwif *fwif, struct nvkm_device *device,
+ int index, struct nvkm_gsp **pgsp)
+{
+ struct nvkm_gsp *gsp;
+
+ if (!(gsp = *pgsp = kzalloc(sizeof(*gsp), GFP_KERNEL)))
+ return -ENOMEM;
+
+ nvkm_subdev_ctor(&nvkm_gsp, device, index, &gsp->subdev);
+
+ fwif = nvkm_firmware_load(&gsp->subdev, fwif, "Gsp", gsp);
+ if (IS_ERR(fwif))
+ return PTR_ERR(fwif);
+
+ return nvkm_falcon_ctor(fwif->flcn, &gsp->subdev,
+ nvkm_subdev_name[gsp->subdev.index], 0,
+ &gsp->falcon);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c
index dccfaf1162e2..2114f9b00a28 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c
@@ -19,44 +19,37 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <subdev/gsp.h>
-#include <subdev/top.h>
-#include <engine/falcon.h>
+#include "priv.h"
+
+static const struct nvkm_falcon_func
+gv100_gsp_flcn = {
+ .fbif = 0x600,
+ .load_imem = nvkm_falcon_v1_load_imem,
+ .load_dmem = nvkm_falcon_v1_load_dmem,
+ .read_dmem = nvkm_falcon_v1_read_dmem,
+ .bind_context = gp102_sec2_flcn_bind_context,
+ .wait_for_halt = nvkm_falcon_v1_wait_for_halt,
+ .clear_interrupt = nvkm_falcon_v1_clear_interrupt,
+ .set_start_addr = nvkm_falcon_v1_set_start_addr,
+ .start = nvkm_falcon_v1_start,
+ .enable = gp102_sec2_flcn_enable,
+ .disable = nvkm_falcon_v1_disable,
+};
static int
-gv100_gsp_oneinit(struct nvkm_subdev *subdev)
-{
- struct nvkm_gsp *gsp = nvkm_gsp(subdev);
-
- gsp->addr = nvkm_top_addr(subdev->device, subdev->index);
- if (!gsp->addr)
- return -EINVAL;
-
- return nvkm_falcon_v1_new(subdev, "GSP", gsp->addr, &gsp->falcon);
-}
-
-static void *
-gv100_gsp_dtor(struct nvkm_subdev *subdev)
+gv100_gsp_nofw(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif)
{
- struct nvkm_gsp *gsp = nvkm_gsp(subdev);
- nvkm_falcon_del(&gsp->falcon);
- return gsp;
+ return 0;
}
-static const struct nvkm_subdev_func
-gv100_gsp = {
- .dtor = gv100_gsp_dtor,
- .oneinit = gv100_gsp_oneinit,
+struct nvkm_gsp_fwif
+gv100_gsp[] = {
+ { -1, gv100_gsp_nofw, &gv100_gsp_flcn },
+ {}
};
int
gv100_gsp_new(struct nvkm_device *device, int index, struct nvkm_gsp **pgsp)
{
- struct nvkm_gsp *gsp;
-
- if (!(gsp = *pgsp = kzalloc(sizeof(*gsp), GFP_KERNEL)))
- return -ENOMEM;
-
- nvkm_subdev_ctor(&gv100_gsp, device, index, &gsp->subdev);
- return 0;
+ return nvkm_gsp_new_(gv100_gsp, device, index, pgsp);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
new file mode 100644
index 000000000000..92820fb997c1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVKM_GSP_PRIV_H__
+#define __NVKM_GSP_PRIV_H__
+#include <subdev/gsp.h>
+enum nvkm_acr_lsf_id;
+
+struct nvkm_gsp_fwif {
+ int version;
+ int (*load)(struct nvkm_gsp *, int ver, const struct nvkm_gsp_fwif *);
+ const struct nvkm_falcon_func *flcn;
+};
+
+int nvkm_gsp_new_(const struct nvkm_gsp_fwif *, struct nvkm_device *, int,
+ struct nvkm_gsp **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
index b4e7404fe660..a11637b0f6cc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
@@ -40,8 +40,7 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
u8 *ptr = msg->buf;
while (remaining) {
- u8 cnt = (remaining > 16) ? 16 : remaining;
- u8 cmd;
+ u8 cnt, retries, cmd;
if (msg->flags & I2C_M_RD)
cmd = 1;
@@ -51,10 +50,19 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
if (mcnt || remaining > 16)
cmd |= 4; /* MOT */
- ret = aux->func->xfer(aux, true, cmd, msg->addr, ptr, &cnt);
- if (ret < 0) {
- nvkm_i2c_aux_release(aux);
- return ret;
+ for (retries = 0, cnt = 0;
+ retries < 32 && !cnt;
+ retries++) {
+ cnt = min_t(u8, remaining, 16);
+ ret = aux->func->xfer(aux, true, cmd,
+ msg->addr, ptr, &cnt);
+ if (ret < 0)
+ goto out;
+ }
+ if (!cnt) {
+ AUX_TRACE(aux, "no data after 32 retries");
+ ret = -EIO;
+ goto out;
}
ptr += cnt;
@@ -64,8 +72,10 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
msg++;
}
+ ret = num;
+out:
nvkm_i2c_aux_release(aux);
- return num;
+ return ret;
}
static u32
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild
index 2b6d36ea7067..728d75010847 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild
@@ -6,3 +6,4 @@ nvkm-y += nvkm/subdev/ltc/gm107.o
nvkm-y += nvkm/subdev/ltc/gm200.o
nvkm-y += nvkm/subdev/ltc/gp100.o
nvkm-y += nvkm/subdev/ltc/gp102.o
+nvkm-y += nvkm/subdev/ltc/gp10b.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp10b.c
new file mode 100644
index 000000000000..c0063c7caa50
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp10b.c
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2019 NVIDIA Corporation.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Thierry Reding
+ */
+
+#include "priv.h"
+
+static void
+gp10b_ltc_init(struct nvkm_ltc *ltc)
+{
+ struct nvkm_device *device = ltc->subdev.device;
+ struct iommu_fwspec *spec;
+
+ nvkm_wr32(device, 0x17e27c, ltc->ltc_nr);
+ nvkm_wr32(device, 0x17e000, ltc->ltc_nr);
+ nvkm_wr32(device, 0x100800, ltc->ltc_nr);
+
+ spec = dev_iommu_fwspec_get(device->dev);
+ if (spec) {
+ u32 sid = spec->ids[0] & 0xffff;
+
+ /* stream ID */
+ nvkm_wr32(device, 0x160000, sid << 2);
+ }
+}
+
+static const struct nvkm_ltc_func
+gp10b_ltc = {
+ .oneinit = gp100_ltc_oneinit,
+ .init = gp10b_ltc_init,
+ .intr = gp100_ltc_intr,
+ .cbc_clear = gm107_ltc_cbc_clear,
+ .cbc_wait = gm107_ltc_cbc_wait,
+ .zbc = 16,
+ .zbc_clear_color = gm107_ltc_zbc_clear_color,
+ .zbc_clear_depth = gm107_ltc_zbc_clear_depth,
+ .zbc_clear_stencil = gp102_ltc_zbc_clear_stencil,
+ .invalidate = gf100_ltc_invalidate,
+ .flush = gf100_ltc_flush,
+};
+
+int
+gp10b_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc)
+{
+ return nvkm_ltc_new_(&gp10b_ltc, device, index, pltc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
index 2fcf18e46ce3..eca5a711b1b8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
@@ -46,4 +46,6 @@ void gm107_ltc_zbc_clear_depth(struct nvkm_ltc *, int, const u32);
int gp100_ltc_oneinit(struct nvkm_ltc *);
void gp100_ltc_init(struct nvkm_ltc *);
void gp100_ltc_intr(struct nvkm_ltc *);
+
+void gp102_ltc_zbc_clear_stencil(struct nvkm_ltc *, int, const u32);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c
index 2d075246dc46..2cd5ec81c0d0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c
@@ -30,7 +30,7 @@
* The value 0xff represents an invalid storage type.
*/
const u8 *
-gf100_mmu_kind(struct nvkm_mmu *mmu, int *count)
+gf100_mmu_kind(struct nvkm_mmu *mmu, int *count, u8 *invalid)
{
static const u8
kind[256] = {
@@ -69,6 +69,7 @@ gf100_mmu_kind(struct nvkm_mmu *mmu, int *count)
};
*count = ARRAY_SIZE(kind);
+ *invalid = 0xff;
return kind;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c
index dbf644ebac97..83990c83f9f8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c
@@ -27,7 +27,7 @@
#include <nvif/class.h>
const u8 *
-gm200_mmu_kind(struct nvkm_mmu *mmu, int *count)
+gm200_mmu_kind(struct nvkm_mmu *mmu, int *count, u8 *invalid)
{
static const u8
kind[256] = {
@@ -65,6 +65,7 @@ gm200_mmu_kind(struct nvkm_mmu *mmu, int *count)
0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xfd, 0xfe, 0xff
};
*count = ARRAY_SIZE(kind);
+ *invalid = 0xff;
return kind;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c
index db3dfbbb2aa0..c0083ddda65a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c
@@ -27,7 +27,7 @@
#include <nvif/class.h>
const u8 *
-nv50_mmu_kind(struct nvkm_mmu *base, int *count)
+nv50_mmu_kind(struct nvkm_mmu *base, int *count, u8 *invalid)
{
/* 0x01: no bank swizzle
* 0x02: bank swizzled
@@ -57,6 +57,7 @@ nv50_mmu_kind(struct nvkm_mmu *base, int *count)
0x01, 0x01, 0x02, 0x02, 0x01, 0x01, 0x7f, 0x7f
};
*count = ARRAY_SIZE(kind);
+ *invalid = 0x7f;
return kind;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
index 07f2fcd18f3d..479b02344271 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
@@ -35,17 +35,17 @@ struct nvkm_mmu_func {
u32 pd_offset;
} vmm;
- const u8 *(*kind)(struct nvkm_mmu *, int *count);
+ const u8 *(*kind)(struct nvkm_mmu *, int *count, u8 *invalid);
bool kind_sys;
};
extern const struct nvkm_mmu_func nv04_mmu;
-const u8 *nv50_mmu_kind(struct nvkm_mmu *, int *count);
+const u8 *nv50_mmu_kind(struct nvkm_mmu *, int *count, u8 *invalid);
-const u8 *gf100_mmu_kind(struct nvkm_mmu *, int *count);
+const u8 *gf100_mmu_kind(struct nvkm_mmu *, int *count, u8 *invalid);
-const u8 *gm200_mmu_kind(struct nvkm_mmu *, int *);
+const u8 *gm200_mmu_kind(struct nvkm_mmu *, int *, u8 *);
struct nvkm_mmu_pt {
union {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c
index c0db0ce10cba..b21e82eb0916 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c
@@ -1,5 +1,6 @@
/*
* Copyright 2018 Red Hat Inc.
+ * Copyright 2019 NVIDIA Corporation.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -26,13 +27,26 @@
#include <nvif/class.h>
+const u8 *
+tu102_mmu_kind(struct nvkm_mmu *mmu, int *count, u8 *invalid)
+{
+ static const u8
+ kind[16] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00 */
+ 0x06, 0x06, 0x02, 0x01, 0x03, 0x04, 0x05, 0x07,
+ };
+ *count = ARRAY_SIZE(kind);
+ *invalid = 0x07;
+ return kind;
+}
+
static const struct nvkm_mmu_func
tu102_mmu = {
.dma_bits = 47,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
.mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
.vmm = {{ -1, 0, NVIF_CLASS_VMM_GP100}, tu102_vmm_new },
- .kind = gm200_mmu_kind,
+ .kind = tu102_mmu_kind,
.kind_sys = true,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c
index 353f10f92b77..0e4b8941da37 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c
@@ -111,15 +111,17 @@ nvkm_ummu_kind(struct nvkm_ummu *ummu, void *argv, u32 argc)
} *args = argv;
const u8 *kind = NULL;
int ret = -ENOSYS, count = 0;
+ u8 kind_inv = 0;
if (mmu->func->kind)
- kind = mmu->func->kind(mmu, &count);
+ kind = mmu->func->kind(mmu, &count, &kind_inv);
if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, true))) {
if (argc != args->v0.count * sizeof(*args->v0.data))
return -EINVAL;
if (args->v0.count > count)
return -EINVAL;
+ args->v0.kind_inv = kind_inv;
memcpy(args->v0.data, kind, args->v0.count);
} else
return ret;
@@ -157,9 +159,10 @@ nvkm_ummu_new(struct nvkm_device *device, const struct nvkm_oclass *oclass,
struct nvkm_mmu *mmu = device->mmu;
struct nvkm_ummu *ummu;
int ret = -ENOSYS, kinds = 0;
+ u8 unused = 0;
if (mmu->func->kind)
- mmu->func->kind(mmu, &kinds);
+ mmu->func->kind(mmu, &kinds, &unused);
if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
args->v0.dmabits = mmu->dma_bits;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
index ab6424faf84c..6a2d9eb8e1ea 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
@@ -247,7 +247,7 @@ gf100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
} *args = argv;
struct nvkm_device *device = vmm->mmu->subdev.device;
struct nvkm_memory *memory = map->memory;
- u8 kind, priv, ro, vol;
+ u8 kind, kind_inv, priv, ro, vol;
int kindn, aper, ret = -ENOSYS;
const u8 *kindm;
@@ -274,8 +274,8 @@ gf100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
if (WARN_ON(aper < 0))
return aper;
- kindm = vmm->mmu->func->kind(vmm->mmu, &kindn);
- if (kind >= kindn || kindm[kind] == 0xff) {
+ kindm = vmm->mmu->func->kind(vmm->mmu, &kindn, &kind_inv);
+ if (kind >= kindn || kindm[kind] == kind_inv) {
VMM_DEBUG(vmm, "kind %02x", kind);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
index b4f519768d5e..d86287565542 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
@@ -320,7 +320,7 @@ gp100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
} *args = argv;
struct nvkm_device *device = vmm->mmu->subdev.device;
struct nvkm_memory *memory = map->memory;
- u8 kind, priv, ro, vol;
+ u8 kind, kind_inv, priv, ro, vol;
int kindn, aper, ret = -ENOSYS;
const u8 *kindm;
@@ -347,8 +347,8 @@ gp100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
if (WARN_ON(aper < 0))
return aper;
- kindm = vmm->mmu->func->kind(vmm->mmu, &kindn);
- if (kind >= kindn || kindm[kind] == 0xff) {
+ kindm = vmm->mmu->func->kind(vmm->mmu, &kindn, &kind_inv);
+ if (kind >= kindn || kindm[kind] == kind_inv) {
VMM_DEBUG(vmm, "kind %02x", kind);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
index c98afe3134ee..2d89e27e8e9e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
@@ -235,7 +235,7 @@ nv50_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
struct nvkm_device *device = vmm->mmu->subdev.device;
struct nvkm_ram *ram = device->fb->ram;
struct nvkm_memory *memory = map->memory;
- u8 aper, kind, comp, priv, ro;
+ u8 aper, kind, kind_inv, comp, priv, ro;
int kindn, ret = -ENOSYS;
const u8 *kindm;
@@ -278,8 +278,8 @@ nv50_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
return -EINVAL;
}
- kindm = vmm->mmu->func->kind(vmm->mmu, &kindn);
- if (kind >= kindn || kindm[kind] == 0x7f) {
+ kindm = vmm->mmu->func->kind(vmm->mmu, &kindn, &kind_inv);
+ if (kind >= kindn || kindm[kind] == kind_inv) {
VMM_DEBUG(vmm, "kind %02x", kind);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild
index e37b6e45eaa2..a76c2a7bd696 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild
@@ -12,3 +12,4 @@ nvkm-y += nvkm/subdev/pmu/gm107.o
nvkm-y += nvkm/subdev/pmu/gm20b.o
nvkm-y += nvkm/subdev/pmu/gp100.o
nvkm-y += nvkm/subdev/pmu/gp102.o
+nvkm-y += nvkm/subdev/pmu/gp10b.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
index ce70a193caa7..a0fe607c9c07 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
@@ -23,9 +23,27 @@
*/
#include "priv.h"
-#include <core/msgqueue.h>
+#include <core/firmware.h>
#include <subdev/timer.h>
+bool
+nvkm_pmu_fan_controlled(struct nvkm_device *device)
+{
+ struct nvkm_pmu *pmu = device->pmu;
+
+ /* Internal PMU FW does not currently control fans in any way,
+ * allow SW control of fans instead.
+ */
+ if (pmu && pmu->func->code.size)
+ return false;
+
+ /* Default (board-loaded, or VBIOS PMU/PREOS) PMU FW on Fermi
+ * and newer automatically control the fan speed, which would
+ * interfere with SW control.
+ */
+ return (device->chipset >= 0xc0);
+}
+
void
nvkm_pmu_pgob(struct nvkm_pmu *pmu, bool enable)
{
@@ -67,6 +85,12 @@ nvkm_pmu_fini(struct nvkm_subdev *subdev, bool suspend)
pmu->func->fini(pmu);
flush_work(&pmu->recv.work);
+
+ reinit_completion(&pmu->wpr_ready);
+
+ nvkm_falcon_cmdq_fini(pmu->lpq);
+ nvkm_falcon_cmdq_fini(pmu->hpq);
+ pmu->initmsg_received = false;
return 0;
}
@@ -115,19 +139,15 @@ nvkm_pmu_init(struct nvkm_subdev *subdev)
return ret;
}
-static int
-nvkm_pmu_oneinit(struct nvkm_subdev *subdev)
-{
- struct nvkm_pmu *pmu = nvkm_pmu(subdev);
- return nvkm_falcon_v1_new(&pmu->subdev, "PMU", 0x10a000, &pmu->falcon);
-}
-
static void *
nvkm_pmu_dtor(struct nvkm_subdev *subdev)
{
struct nvkm_pmu *pmu = nvkm_pmu(subdev);
- nvkm_msgqueue_del(&pmu->queue);
- nvkm_falcon_del(&pmu->falcon);
+ nvkm_falcon_msgq_del(&pmu->msgq);
+ nvkm_falcon_cmdq_del(&pmu->lpq);
+ nvkm_falcon_cmdq_del(&pmu->hpq);
+ nvkm_falcon_qmgr_del(&pmu->qmgr);
+ nvkm_falcon_dtor(&pmu->falcon);
return nvkm_pmu(subdev);
}
@@ -135,29 +155,50 @@ static const struct nvkm_subdev_func
nvkm_pmu = {
.dtor = nvkm_pmu_dtor,
.preinit = nvkm_pmu_preinit,
- .oneinit = nvkm_pmu_oneinit,
.init = nvkm_pmu_init,
.fini = nvkm_pmu_fini,
.intr = nvkm_pmu_intr,
};
int
-nvkm_pmu_ctor(const struct nvkm_pmu_func *func, struct nvkm_device *device,
+nvkm_pmu_ctor(const struct nvkm_pmu_fwif *fwif, struct nvkm_device *device,
int index, struct nvkm_pmu *pmu)
{
+ int ret;
+
nvkm_subdev_ctor(&nvkm_pmu, device, index, &pmu->subdev);
- pmu->func = func;
+
INIT_WORK(&pmu->recv.work, nvkm_pmu_recv);
init_waitqueue_head(&pmu->recv.wait);
+
+ fwif = nvkm_firmware_load(&pmu->subdev, fwif, "Pmu", pmu);
+ if (IS_ERR(fwif))
+ return PTR_ERR(fwif);
+
+ pmu->func = fwif->func;
+
+ ret = nvkm_falcon_ctor(pmu->func->flcn, &pmu->subdev,
+ nvkm_subdev_name[pmu->subdev.index], 0x10a000,
+ &pmu->falcon);
+ if (ret)
+ return ret;
+
+ if ((ret = nvkm_falcon_qmgr_new(&pmu->falcon, &pmu->qmgr)) ||
+ (ret = nvkm_falcon_cmdq_new(pmu->qmgr, "hpq", &pmu->hpq)) ||
+ (ret = nvkm_falcon_cmdq_new(pmu->qmgr, "lpq", &pmu->lpq)) ||
+ (ret = nvkm_falcon_msgq_new(pmu->qmgr, "msgq", &pmu->msgq)))
+ return ret;
+
+ init_completion(&pmu->wpr_ready);
return 0;
}
int
-nvkm_pmu_new_(const struct nvkm_pmu_func *func, struct nvkm_device *device,
+nvkm_pmu_new_(const struct nvkm_pmu_fwif *fwif, struct nvkm_device *device,
int index, struct nvkm_pmu **ppmu)
{
struct nvkm_pmu *pmu;
if (!(pmu = *ppmu = kzalloc(sizeof(*pmu), GFP_KERNEL)))
return -ENOMEM;
- return nvkm_pmu_ctor(func, device, index, *ppmu);
+ return nvkm_pmu_ctor(fwif, device, index, *ppmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c
index 0b458656e870..3ecb3d9cbcf2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c
@@ -42,6 +42,7 @@ gf100_pmu_enabled(struct nvkm_pmu *pmu)
static const struct nvkm_pmu_func
gf100_pmu = {
+ .flcn = &gt215_pmu_flcn,
.code.data = gf100_pmu_code,
.code.size = sizeof(gf100_pmu_code),
.data.data = gf100_pmu_data,
@@ -56,7 +57,19 @@ gf100_pmu = {
};
int
+gf100_pmu_nofw(struct nvkm_pmu *pmu, int ver, const struct nvkm_pmu_fwif *fwif)
+{
+ return 0;
+}
+
+static const struct nvkm_pmu_fwif
+gf100_pmu_fwif[] = {
+ { -1, gf100_pmu_nofw, &gf100_pmu },
+ {}
+};
+
+int
gf100_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
{
- return nvkm_pmu_new_(&gf100_pmu, device, index, ppmu);
+ return nvkm_pmu_new_(gf100_pmu_fwif, device, index, ppmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c
index 3dfa79d4fb13..8dd0271aaaee 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c
@@ -26,6 +26,7 @@
static const struct nvkm_pmu_func
gf119_pmu = {
+ .flcn = &gt215_pmu_flcn,
.code.data = gf119_pmu_code,
.code.size = sizeof(gf119_pmu_code),
.data.data = gf119_pmu_data,
@@ -39,8 +40,14 @@ gf119_pmu = {
.recv = gt215_pmu_recv,
};
+static const struct nvkm_pmu_fwif
+gf119_pmu_fwif[] = {
+ { -1, gf100_pmu_nofw, &gf119_pmu },
+ {}
+};
+
int
gf119_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
{
- return nvkm_pmu_new_(&gf119_pmu, device, index, ppmu);
+ return nvkm_pmu_new_(gf119_pmu_fwif, device, index, ppmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
index 8f7ec10fd2a4..8b70cc17a634 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
@@ -105,6 +105,7 @@ gk104_pmu_pgob(struct nvkm_pmu *pmu, bool enable)
static const struct nvkm_pmu_func
gk104_pmu = {
+ .flcn = &gt215_pmu_flcn,
.code.data = gk104_pmu_code,
.code.size = sizeof(gk104_pmu_code),
.data.data = gk104_pmu_data,
@@ -119,8 +120,14 @@ gk104_pmu = {
.pgob = gk104_pmu_pgob,
};
+static const struct nvkm_pmu_fwif
+gk104_pmu_fwif[] = {
+ { -1, gf100_pmu_nofw, &gk104_pmu },
+ {}
+};
+
int
gk104_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
{
- return nvkm_pmu_new_(&gk104_pmu, device, index, ppmu);
+ return nvkm_pmu_new_(gk104_pmu_fwif, device, index, ppmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c
index 345741d55a56..0081f2141b10 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c
@@ -84,6 +84,7 @@ gk110_pmu_pgob(struct nvkm_pmu *pmu, bool enable)
static const struct nvkm_pmu_func
gk110_pmu = {
+ .flcn = &gt215_pmu_flcn,
.code.data = gk110_pmu_code,
.code.size = sizeof(gk110_pmu_code),
.data.data = gk110_pmu_data,
@@ -98,8 +99,14 @@ gk110_pmu = {
.pgob = gk110_pmu_pgob,
};
+static const struct nvkm_pmu_fwif
+gk110_pmu_fwif[] = {
+ { -1, gf100_pmu_nofw, &gk110_pmu },
+ {}
+};
+
int
gk110_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
{
- return nvkm_pmu_new_(&gk110_pmu, device, index, ppmu);
+ return nvkm_pmu_new_(gk110_pmu_fwif, device, index, ppmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c
index e4acf7876ea1..b227c701a5e7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c
@@ -26,6 +26,7 @@
static const struct nvkm_pmu_func
gk208_pmu = {
+ .flcn = &gt215_pmu_flcn,
.code.data = gk208_pmu_code,
.code.size = sizeof(gk208_pmu_code),
.data.data = gk208_pmu_data,
@@ -40,8 +41,14 @@ gk208_pmu = {
.pgob = gk110_pmu_pgob,
};
+static const struct nvkm_pmu_fwif
+gk208_pmu_fwif[] = {
+ { -1, gf100_pmu_nofw, &gk208_pmu },
+ {}
+};
+
int
gk208_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
{
- return nvkm_pmu_new_(&gk208_pmu, device, index, ppmu);
+ return nvkm_pmu_new_(gk208_pmu_fwif, device, index, ppmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
index 05e81855c367..26c1adf8f44c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
@@ -95,7 +95,7 @@ static void
gk20a_pmu_dvfs_get_dev_status(struct gk20a_pmu *pmu,
struct gk20a_pmu_dvfs_dev_status *status)
{
- struct nvkm_falcon *falcon = pmu->base.falcon;
+ struct nvkm_falcon *falcon = &pmu->base.falcon;
status->busy = nvkm_falcon_rd32(falcon, 0x508 + (BUSY_SLOT * 0x10));
status->total= nvkm_falcon_rd32(falcon, 0x508 + (CLK_SLOT * 0x10));
@@ -104,7 +104,7 @@ gk20a_pmu_dvfs_get_dev_status(struct gk20a_pmu *pmu,
static void
gk20a_pmu_dvfs_reset_dev_status(struct gk20a_pmu *pmu)
{
- struct nvkm_falcon *falcon = pmu->base.falcon;
+ struct nvkm_falcon *falcon = &pmu->base.falcon;
nvkm_falcon_wr32(falcon, 0x508 + (BUSY_SLOT * 0x10), 0x80000000);
nvkm_falcon_wr32(falcon, 0x508 + (CLK_SLOT * 0x10), 0x80000000);
@@ -160,7 +160,7 @@ gk20a_pmu_fini(struct nvkm_pmu *pmu)
struct gk20a_pmu *gpmu = gk20a_pmu(pmu);
nvkm_timer_alarm(pmu->subdev.device->timer, 0, &gpmu->alarm);
- nvkm_falcon_put(pmu->falcon, &pmu->subdev);
+ nvkm_falcon_put(&pmu->falcon, &pmu->subdev);
}
static int
@@ -169,7 +169,7 @@ gk20a_pmu_init(struct nvkm_pmu *pmu)
struct gk20a_pmu *gpmu = gk20a_pmu(pmu);
struct nvkm_subdev *subdev = &pmu->subdev;
struct nvkm_device *device = pmu->subdev.device;
- struct nvkm_falcon *falcon = pmu->falcon;
+ struct nvkm_falcon *falcon = &pmu->falcon;
int ret;
ret = nvkm_falcon_get(falcon, subdev);
@@ -196,25 +196,34 @@ gk20a_dvfs_data= {
static const struct nvkm_pmu_func
gk20a_pmu = {
+ .flcn = &gt215_pmu_flcn,
.enabled = gf100_pmu_enabled,
.init = gk20a_pmu_init,
.fini = gk20a_pmu_fini,
.reset = gf100_pmu_reset,
};
+static const struct nvkm_pmu_fwif
+gk20a_pmu_fwif[] = {
+ { -1, gf100_pmu_nofw, &gk20a_pmu },
+ {}
+};
+
int
gk20a_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
{
struct gk20a_pmu *pmu;
+ int ret;
if (!(pmu = kzalloc(sizeof(*pmu), GFP_KERNEL)))
return -ENOMEM;
*ppmu = &pmu->base;
- nvkm_pmu_ctor(&gk20a_pmu, device, index, &pmu->base);
+ ret = nvkm_pmu_ctor(gk20a_pmu_fwif, device, index, &pmu->base);
+ if (ret)
+ return ret;
pmu->data = &gk20a_dvfs_data;
nvkm_alarm_init(&pmu->alarm, gk20a_pmu_dvfs_work);
-
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c
index 459df1ef9e70..5afb55e58b51 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c
@@ -28,6 +28,7 @@
static const struct nvkm_pmu_func
gm107_pmu = {
+ .flcn = &gt215_pmu_flcn,
.code.data = gm107_pmu_code,
.code.size = sizeof(gm107_pmu_code),
.data.data = gm107_pmu_data,
@@ -41,8 +42,14 @@ gm107_pmu = {
.recv = gt215_pmu_recv,
};
+static const struct nvkm_pmu_fwif
+gm107_pmu_fwif[] = {
+ { -1, gf100_pmu_nofw, &gm107_pmu },
+ {}
+};
+
int
gm107_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
{
- return nvkm_pmu_new_(&gm107_pmu, device, index, ppmu);
+ return nvkm_pmu_new_(gm107_pmu_fwif, device, index, ppmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
index 31c843145c7a..82571032a07d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
@@ -19,38 +19,224 @@
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
-
-#include <engine/falcon.h>
-#include <core/msgqueue.h>
#include "priv.h"
-static void
+#include <core/memory.h>
+#include <subdev/acr.h>
+
+#include <nvfw/flcn.h>
+#include <nvfw/pmu.h>
+
+static int
+gm20b_pmu_acr_bootstrap_falcon_cb(void *priv, struct nv_falcon_msg *hdr)
+{
+ struct nv_pmu_acr_bootstrap_falcon_msg *msg =
+ container_of(hdr, typeof(*msg), msg.hdr);
+ return msg->falcon_id;
+}
+
+int
+gm20b_pmu_acr_bootstrap_falcon(struct nvkm_falcon *falcon,
+ enum nvkm_acr_lsf_id id)
+{
+ struct nvkm_pmu *pmu = container_of(falcon, typeof(*pmu), falcon);
+ struct nv_pmu_acr_bootstrap_falcon_cmd cmd = {
+ .cmd.hdr.unit_id = NV_PMU_UNIT_ACR,
+ .cmd.hdr.size = sizeof(cmd),
+ .cmd.cmd_type = NV_PMU_ACR_CMD_BOOTSTRAP_FALCON,
+ .flags = NV_PMU_ACR_BOOTSTRAP_FALCON_FLAGS_RESET_YES,
+ .falcon_id = id,
+ };
+ int ret;
+
+ ret = nvkm_falcon_cmdq_send(pmu->hpq, &cmd.cmd.hdr,
+ gm20b_pmu_acr_bootstrap_falcon_cb,
+ &pmu->subdev, msecs_to_jiffies(1000));
+ if (ret >= 0) {
+ if (ret != cmd.falcon_id)
+ ret = -EIO;
+ else
+ ret = 0;
+ }
+
+ return ret;
+}
+
+int
+gm20b_pmu_acr_boot(struct nvkm_falcon *falcon)
+{
+ struct nv_pmu_args args = { .secure_mode = true };
+ const u32 addr_args = falcon->data.limit - sizeof(struct nv_pmu_args);
+ nvkm_falcon_load_dmem(falcon, &args, addr_args, sizeof(args), 0);
+ nvkm_falcon_start(falcon);
+ return 0;
+}
+
+void
+gm20b_pmu_acr_bld_patch(struct nvkm_acr *acr, u32 bld, s64 adjust)
+{
+ struct loader_config hdr;
+ u64 addr;
+
+ nvkm_robj(acr->wpr, bld, &hdr, sizeof(hdr));
+ addr = ((u64)hdr.code_dma_base1 << 40 | hdr.code_dma_base << 8);
+ hdr.code_dma_base = lower_32_bits((addr + adjust) >> 8);
+ hdr.code_dma_base1 = upper_32_bits((addr + adjust) >> 8);
+ addr = ((u64)hdr.data_dma_base1 << 40 | hdr.data_dma_base << 8);
+ hdr.data_dma_base = lower_32_bits((addr + adjust) >> 8);
+ hdr.data_dma_base1 = upper_32_bits((addr + adjust) >> 8);
+ addr = ((u64)hdr.overlay_dma_base1 << 40 | hdr.overlay_dma_base << 8);
+ hdr.overlay_dma_base = lower_32_bits((addr + adjust) << 8);
+ hdr.overlay_dma_base1 = upper_32_bits((addr + adjust) << 8);
+ nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
+
+ loader_config_dump(&acr->subdev, &hdr);
+}
+
+void
+gm20b_pmu_acr_bld_write(struct nvkm_acr *acr, u32 bld,
+ struct nvkm_acr_lsfw *lsfw)
+{
+ const u64 base = lsfw->offset.img + lsfw->app_start_offset;
+ const u64 code = (base + lsfw->app_resident_code_offset) >> 8;
+ const u64 data = (base + lsfw->app_resident_data_offset) >> 8;
+ const struct loader_config hdr = {
+ .dma_idx = FALCON_DMAIDX_UCODE,
+ .code_dma_base = lower_32_bits(code),
+ .code_size_total = lsfw->app_size,
+ .code_size_to_load = lsfw->app_resident_code_size,
+ .code_entry_point = lsfw->app_imem_entry,
+ .data_dma_base = lower_32_bits(data),
+ .data_size = lsfw->app_resident_data_size,
+ .overlay_dma_base = lower_32_bits(code),
+ .argc = 1,
+ .argv = lsfw->falcon->data.limit - sizeof(struct nv_pmu_args),
+ .code_dma_base1 = upper_32_bits(code),
+ .data_dma_base1 = upper_32_bits(data),
+ .overlay_dma_base1 = upper_32_bits(code),
+ };
+
+ nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
+}
+
+static const struct nvkm_acr_lsf_func
+gm20b_pmu_acr = {
+ .flags = NVKM_ACR_LSF_DMACTL_REQ_CTX,
+ .bld_size = sizeof(struct loader_config),
+ .bld_write = gm20b_pmu_acr_bld_write,
+ .bld_patch = gm20b_pmu_acr_bld_patch,
+ .boot = gm20b_pmu_acr_boot,
+ .bootstrap_falcon = gm20b_pmu_acr_bootstrap_falcon,
+};
+
+static int
+gm20b_pmu_acr_init_wpr_callback(void *priv, struct nv_falcon_msg *hdr)
+{
+ struct nv_pmu_acr_init_wpr_region_msg *msg =
+ container_of(hdr, typeof(*msg), msg.hdr);
+ struct nvkm_pmu *pmu = priv;
+ struct nvkm_subdev *subdev = &pmu->subdev;
+
+ if (msg->error_code) {
+ nvkm_error(subdev, "ACR WPR init failure: %d\n",
+ msg->error_code);
+ return -EINVAL;
+ }
+
+ nvkm_debug(subdev, "ACR WPR init complete\n");
+ complete_all(&pmu->wpr_ready);
+ return 0;
+}
+
+static int
+gm20b_pmu_acr_init_wpr(struct nvkm_pmu *pmu)
+{
+ struct nv_pmu_acr_init_wpr_region_cmd cmd = {
+ .cmd.hdr.unit_id = NV_PMU_UNIT_ACR,
+ .cmd.hdr.size = sizeof(cmd),
+ .cmd.cmd_type = NV_PMU_ACR_CMD_INIT_WPR_REGION,
+ .region_id = 1,
+ .wpr_offset = 0,
+ };
+
+ return nvkm_falcon_cmdq_send(pmu->hpq, &cmd.cmd.hdr,
+ gm20b_pmu_acr_init_wpr_callback, pmu, 0);
+}
+
+int
+gm20b_pmu_initmsg(struct nvkm_pmu *pmu)
+{
+ struct nv_pmu_init_msg msg;
+ int ret;
+
+ ret = nvkm_falcon_msgq_recv_initmsg(pmu->msgq, &msg, sizeof(msg));
+ if (ret)
+ return ret;
+
+ if (msg.hdr.unit_id != NV_PMU_UNIT_INIT ||
+ msg.msg_type != NV_PMU_INIT_MSG_INIT)
+ return -EINVAL;
+
+ nvkm_falcon_cmdq_init(pmu->hpq, msg.queue_info[0].index,
+ msg.queue_info[0].offset,
+ msg.queue_info[0].size);
+ nvkm_falcon_cmdq_init(pmu->lpq, msg.queue_info[1].index,
+ msg.queue_info[1].offset,
+ msg.queue_info[1].size);
+ nvkm_falcon_msgq_init(pmu->msgq, msg.queue_info[4].index,
+ msg.queue_info[4].offset,
+ msg.queue_info[4].size);
+ return gm20b_pmu_acr_init_wpr(pmu);
+}
+
+void
gm20b_pmu_recv(struct nvkm_pmu *pmu)
{
- if (!pmu->queue) {
- nvkm_warn(&pmu->subdev,
- "recv function called while no firmware set!\n");
- return;
+ if (!pmu->initmsg_received) {
+ int ret = pmu->func->initmsg(pmu);
+ if (ret) {
+ nvkm_error(&pmu->subdev,
+ "error parsing init message: %d\n", ret);
+ return;
+ }
+
+ pmu->initmsg_received = true;
}
- nvkm_msgqueue_recv(pmu->queue);
+ nvkm_falcon_msgq_recv(pmu->msgq);
}
static const struct nvkm_pmu_func
gm20b_pmu = {
+ .flcn = &gt215_pmu_flcn,
.enabled = gf100_pmu_enabled,
.intr = gt215_pmu_intr,
.recv = gm20b_pmu_recv,
+ .initmsg = gm20b_pmu_initmsg,
};
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
+MODULE_FIRMWARE("nvidia/gm20b/pmu/desc.bin");
+MODULE_FIRMWARE("nvidia/gm20b/pmu/image.bin");
+MODULE_FIRMWARE("nvidia/gm20b/pmu/sig.bin");
+#endif
+
int
-gm20b_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+gm20b_pmu_load(struct nvkm_pmu *pmu, int ver, const struct nvkm_pmu_fwif *fwif)
{
- int ret;
+ return nvkm_acr_lsfw_load_sig_image_desc(&pmu->subdev, &pmu->falcon,
+ NVKM_ACR_LSF_PMU, "pmu/",
+ ver, fwif->acr);
+}
- ret = nvkm_pmu_new_(&gm20b_pmu, device, index, ppmu);
- if (ret)
- return ret;
+static const struct nvkm_pmu_fwif
+gm20b_pmu_fwif[] = {
+ { 0, gm20b_pmu_load, &gm20b_pmu, &gm20b_pmu_acr },
+ {}
+};
- return 0;
+int
+gm20b_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+{
+ return nvkm_pmu_new_(gm20b_pmu_fwif, device, index, ppmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c
index e210cd6af816..09e05db21ff5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c
@@ -25,12 +25,19 @@
static const struct nvkm_pmu_func
gp100_pmu = {
+ .flcn = &gt215_pmu_flcn,
.enabled = gf100_pmu_enabled,
.reset = gf100_pmu_reset,
};
+static const struct nvkm_pmu_fwif
+gp100_pmu_fwif[] = {
+ { -1, gf100_pmu_nofw, &gp100_pmu },
+ {}
+};
+
int
gp100_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
{
- return nvkm_pmu_new_(&gp100_pmu, device, index, ppmu);
+ return nvkm_pmu_new_(gp100_pmu_fwif, device, index, ppmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
index 98c7a2a8afc4..262b8a3dd507 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
@@ -39,12 +39,19 @@ gp102_pmu_enabled(struct nvkm_pmu *pmu)
static const struct nvkm_pmu_func
gp102_pmu = {
+ .flcn = &gt215_pmu_flcn,
.enabled = gp102_pmu_enabled,
.reset = gp102_pmu_reset,
};
+static const struct nvkm_pmu_fwif
+gp102_pmu_fwif[] = {
+ { -1, gf100_pmu_nofw, &gp102_pmu },
+ {}
+};
+
int
gp102_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
{
- return nvkm_pmu_new_(&gp102_pmu, device, index, ppmu);
+ return nvkm_pmu_new_(gp102_pmu_fwif, device, index, ppmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
new file mode 100644
index 000000000000..5b81c7320479
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <subdev/acr.h>
+
+#include <nvfw/flcn.h>
+#include <nvfw/pmu.h>
+
+static int
+gp10b_pmu_acr_bootstrap_multiple_falcons_cb(void *priv,
+ struct nv_falcon_msg *hdr)
+{
+ struct nv_pmu_acr_bootstrap_multiple_falcons_msg *msg =
+ container_of(hdr, typeof(*msg), msg.hdr);
+ return msg->falcon_mask;
+}
+static int
+gp10b_pmu_acr_bootstrap_multiple_falcons(struct nvkm_falcon *falcon, u32 mask)
+{
+ struct nvkm_pmu *pmu = container_of(falcon, typeof(*pmu), falcon);
+ struct nv_pmu_acr_bootstrap_multiple_falcons_cmd cmd = {
+ .cmd.hdr.unit_id = NV_PMU_UNIT_ACR,
+ .cmd.hdr.size = sizeof(cmd),
+ .cmd.cmd_type = NV_PMU_ACR_CMD_BOOTSTRAP_MULTIPLE_FALCONS,
+ .flags = NV_PMU_ACR_BOOTSTRAP_MULTIPLE_FALCONS_FLAGS_RESET_YES,
+ .falcon_mask = mask,
+ .wpr_lo = 0, /*XXX*/
+ .wpr_hi = 0, /*XXX*/
+ };
+ int ret;
+
+ ret = nvkm_falcon_cmdq_send(pmu->hpq, &cmd.cmd.hdr,
+ gp10b_pmu_acr_bootstrap_multiple_falcons_cb,
+ &pmu->subdev, msecs_to_jiffies(1000));
+ if (ret >= 0) {
+ if (ret != cmd.falcon_mask)
+ ret = -EIO;
+ else
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static const struct nvkm_acr_lsf_func
+gp10b_pmu_acr = {
+ .flags = NVKM_ACR_LSF_DMACTL_REQ_CTX,
+ .bld_size = sizeof(struct loader_config),
+ .bld_write = gm20b_pmu_acr_bld_write,
+ .bld_patch = gm20b_pmu_acr_bld_patch,
+ .boot = gm20b_pmu_acr_boot,
+ .bootstrap_falcon = gm20b_pmu_acr_bootstrap_falcon,
+ .bootstrap_multiple_falcons = gp10b_pmu_acr_bootstrap_multiple_falcons,
+};
+
+static const struct nvkm_pmu_func
+gp10b_pmu = {
+ .flcn = &gt215_pmu_flcn,
+ .enabled = gf100_pmu_enabled,
+ .intr = gt215_pmu_intr,
+ .recv = gm20b_pmu_recv,
+ .initmsg = gm20b_pmu_initmsg,
+};
+
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
+MODULE_FIRMWARE("nvidia/gp10b/pmu/desc.bin");
+MODULE_FIRMWARE("nvidia/gp10b/pmu/image.bin");
+MODULE_FIRMWARE("nvidia/gp10b/pmu/sig.bin");
+#endif
+
+static const struct nvkm_pmu_fwif
+gp10b_pmu_fwif[] = {
+ { 0, gm20b_pmu_load, &gp10b_pmu, &gp10b_pmu_acr },
+ {}
+};
+
+int
+gp10b_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+{
+ return nvkm_pmu_new_(gp10b_pmu_fwif, device, index, ppmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c
index e04216daea58..88b909913ff9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c
@@ -241,8 +241,27 @@ gt215_pmu_init(struct nvkm_pmu *pmu)
return 0;
}
+const struct nvkm_falcon_func
+gt215_pmu_flcn = {
+ .debug = 0xc08,
+ .fbif = 0xe00,
+ .load_imem = nvkm_falcon_v1_load_imem,
+ .load_dmem = nvkm_falcon_v1_load_dmem,
+ .read_dmem = nvkm_falcon_v1_read_dmem,
+ .bind_context = nvkm_falcon_v1_bind_context,
+ .wait_for_halt = nvkm_falcon_v1_wait_for_halt,
+ .clear_interrupt = nvkm_falcon_v1_clear_interrupt,
+ .set_start_addr = nvkm_falcon_v1_set_start_addr,
+ .start = nvkm_falcon_v1_start,
+ .enable = nvkm_falcon_v1_enable,
+ .disable = nvkm_falcon_v1_disable,
+ .cmdq = { 0x4a0, 0x4b0, 4 },
+ .msgq = { 0x4c8, 0x4cc, 0 },
+};
+
static const struct nvkm_pmu_func
gt215_pmu = {
+ .flcn = &gt215_pmu_flcn,
.code.data = gt215_pmu_code,
.code.size = sizeof(gt215_pmu_code),
.data.data = gt215_pmu_data,
@@ -256,8 +275,14 @@ gt215_pmu = {
.recv = gt215_pmu_recv,
};
+static const struct nvkm_pmu_fwif
+gt215_pmu_fwif[] = {
+ { -1, gf100_pmu_nofw, &gt215_pmu },
+ {}
+};
+
int
gt215_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
{
- return nvkm_pmu_new_(&gt215_pmu, device, index, ppmu);
+ return nvkm_pmu_new_(gt215_pmu_fwif, device, index, ppmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
index 26d73f9cd6d3..f470859244de 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
@@ -4,13 +4,12 @@
#define nvkm_pmu(p) container_of((p), struct nvkm_pmu, subdev)
#include <subdev/pmu.h>
#include <subdev/pmu/fuc/os.h>
-
-int nvkm_pmu_ctor(const struct nvkm_pmu_func *, struct nvkm_device *,
- int index, struct nvkm_pmu *);
-int nvkm_pmu_new_(const struct nvkm_pmu_func *, struct nvkm_device *,
- int index, struct nvkm_pmu **);
+enum nvkm_acr_lsf_id;
+struct nvkm_acr_lsfw;
struct nvkm_pmu_func {
+ const struct nvkm_falcon_func *flcn;
+
struct {
u32 *data;
u32 size;
@@ -29,9 +28,11 @@ struct nvkm_pmu_func {
int (*send)(struct nvkm_pmu *, u32 reply[2], u32 process,
u32 message, u32 data0, u32 data1);
void (*recv)(struct nvkm_pmu *);
+ int (*initmsg)(struct nvkm_pmu *);
void (*pgob)(struct nvkm_pmu *, bool);
};
+extern const struct nvkm_falcon_func gt215_pmu_flcn;
int gt215_pmu_init(struct nvkm_pmu *);
void gt215_pmu_fini(struct nvkm_pmu *);
void gt215_pmu_intr(struct nvkm_pmu *);
@@ -42,4 +43,26 @@ bool gf100_pmu_enabled(struct nvkm_pmu *);
void gf100_pmu_reset(struct nvkm_pmu *);
void gk110_pmu_pgob(struct nvkm_pmu *, bool);
+
+void gm20b_pmu_acr_bld_patch(struct nvkm_acr *, u32, s64);
+void gm20b_pmu_acr_bld_write(struct nvkm_acr *, u32, struct nvkm_acr_lsfw *);
+int gm20b_pmu_acr_boot(struct nvkm_falcon *);
+int gm20b_pmu_acr_bootstrap_falcon(struct nvkm_falcon *, enum nvkm_acr_lsf_id);
+void gm20b_pmu_recv(struct nvkm_pmu *);
+int gm20b_pmu_initmsg(struct nvkm_pmu *);
+
+struct nvkm_pmu_fwif {
+ int version;
+ int (*load)(struct nvkm_pmu *, int ver, const struct nvkm_pmu_fwif *);
+ const struct nvkm_pmu_func *func;
+ const struct nvkm_acr_lsf_func *acr;
+};
+
+int gf100_pmu_nofw(struct nvkm_pmu *, int, const struct nvkm_pmu_fwif *);
+int gm20b_pmu_load(struct nvkm_pmu *, int, const struct nvkm_pmu_fwif *);
+
+int nvkm_pmu_ctor(const struct nvkm_pmu_fwif *, struct nvkm_device *,
+ int index, struct nvkm_pmu *);
+int nvkm_pmu_new_(const struct nvkm_pmu_fwif *, struct nvkm_device *,
+ int index, struct nvkm_pmu **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild
deleted file mode 100644
index f3dee2693c79..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild
+++ /dev/null
@@ -1,17 +0,0 @@
-# SPDX-License-Identifier: MIT
-nvkm-y += nvkm/subdev/secboot/base.o
-nvkm-y += nvkm/subdev/secboot/hs_ucode.o
-nvkm-y += nvkm/subdev/secboot/ls_ucode_gr.o
-nvkm-y += nvkm/subdev/secboot/ls_ucode_msgqueue.o
-nvkm-y += nvkm/subdev/secboot/acr.o
-nvkm-y += nvkm/subdev/secboot/acr_r352.o
-nvkm-y += nvkm/subdev/secboot/acr_r361.o
-nvkm-y += nvkm/subdev/secboot/acr_r364.o
-nvkm-y += nvkm/subdev/secboot/acr_r367.o
-nvkm-y += nvkm/subdev/secboot/acr_r370.o
-nvkm-y += nvkm/subdev/secboot/acr_r375.o
-nvkm-y += nvkm/subdev/secboot/gm200.o
-nvkm-y += nvkm/subdev/secboot/gm20b.o
-nvkm-y += nvkm/subdev/secboot/gp102.o
-nvkm-y += nvkm/subdev/secboot/gp108.o
-nvkm-y += nvkm/subdev/secboot/gp10b.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.c
deleted file mode 100644
index dc80985cf093..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.c
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include "acr.h"
-
-#include <core/firmware.h>
-
-/**
- * Convenience function to duplicate a firmware file in memory and check that
- * it has the required minimum size.
- */
-void *
-nvkm_acr_load_firmware(const struct nvkm_subdev *subdev, const char *name,
- size_t min_size)
-{
- const struct firmware *fw;
- void *blob;
- int ret;
-
- ret = nvkm_firmware_get(subdev, name, &fw);
- if (ret)
- return ERR_PTR(ret);
- if (fw->size < min_size) {
- nvkm_error(subdev, "%s is smaller than expected size %zu\n",
- name, min_size);
- nvkm_firmware_put(fw);
- return ERR_PTR(-EINVAL);
- }
- blob = kmemdup(fw->data, fw->size, GFP_KERNEL);
- nvkm_firmware_put(fw);
- if (!blob)
- return ERR_PTR(-ENOMEM);
-
- return blob;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h
deleted file mode 100644
index 73a2ac81ac69..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-#ifndef __NVKM_SECBOOT_ACR_H__
-#define __NVKM_SECBOOT_ACR_H__
-
-#include "priv.h"
-
-struct nvkm_acr;
-
-/**
- * struct nvkm_acr_func - properties and functions specific to an ACR
- *
- * @load: make the ACR ready to run on the given secboot device
- * @reset: reset the specified falcon
- * @start: start the specified falcon (assumed to have been reset)
- */
-struct nvkm_acr_func {
- void (*dtor)(struct nvkm_acr *);
- int (*oneinit)(struct nvkm_acr *, struct nvkm_secboot *);
- int (*fini)(struct nvkm_acr *, struct nvkm_secboot *, bool);
- int (*load)(struct nvkm_acr *, struct nvkm_falcon *,
- struct nvkm_gpuobj *, u64);
- int (*reset)(struct nvkm_acr *, struct nvkm_secboot *, unsigned long);
-};
-
-/**
- * struct nvkm_acr - instance of an ACR
- *
- * @boot_falcon: ID of the falcon that will perform secure boot
- * @managed_falcons: bitfield of falcons managed by this ACR
- * @optional_falcons: bitfield of falcons we can live without
- */
-struct nvkm_acr {
- const struct nvkm_acr_func *func;
- const struct nvkm_subdev *subdev;
-
- enum nvkm_secboot_falcon boot_falcon;
- unsigned long managed_falcons;
- unsigned long optional_falcons;
-};
-
-void *nvkm_acr_load_firmware(const struct nvkm_subdev *, const char *, size_t);
-
-struct nvkm_acr *acr_r352_new(unsigned long);
-struct nvkm_acr *acr_r361_new(unsigned long);
-struct nvkm_acr *acr_r364_new(unsigned long);
-struct nvkm_acr *acr_r367_new(enum nvkm_secboot_falcon, unsigned long);
-struct nvkm_acr *acr_r370_new(enum nvkm_secboot_falcon, unsigned long);
-struct nvkm_acr *acr_r375_new(enum nvkm_secboot_falcon, unsigned long);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c
deleted file mode 100644
index 4fd4cfe459b8..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c
+++ /dev/null
@@ -1,1241 +0,0 @@
-/*
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include "acr_r352.h"
-#include "hs_ucode.h"
-
-#include <core/gpuobj.h>
-#include <core/firmware.h>
-#include <engine/falcon.h>
-#include <subdev/pmu.h>
-#include <core/msgqueue.h>
-#include <engine/sec2.h>
-
-/**
- * struct acr_r352_flcn_bl_desc - DMEM bootloader descriptor
- * @signature: 16B signature for secure code. 0s if no secure code
- * @ctx_dma: DMA context to be used by BL while loading code/data
- * @code_dma_base: 256B-aligned Physical FB Address where code is located
- * (falcon's $xcbase register)
- * @non_sec_code_off: offset from code_dma_base where the non-secure code is
- * located. The offset must be multiple of 256 to help perf
- * @non_sec_code_size: the size of the nonSecure code part.
- * @sec_code_off: offset from code_dma_base where the secure code is
- * located. The offset must be multiple of 256 to help perf
- * @sec_code_size: offset from code_dma_base where the secure code is
- * located. The offset must be multiple of 256 to help perf
- * @code_entry_point: code entry point which will be invoked by BL after
- * code is loaded.
- * @data_dma_base: 256B aligned Physical FB Address where data is located.
- * (falcon's $xdbase register)
- * @data_size: size of data block. Should be multiple of 256B
- *
- * Structure used by the bootloader to load the rest of the code. This has
- * to be filled by host and copied into DMEM at offset provided in the
- * hsflcn_bl_desc.bl_desc_dmem_load_off.
- */
-struct acr_r352_flcn_bl_desc {
- u32 reserved[4];
- u32 signature[4];
- u32 ctx_dma;
- u32 code_dma_base;
- u32 non_sec_code_off;
- u32 non_sec_code_size;
- u32 sec_code_off;
- u32 sec_code_size;
- u32 code_entry_point;
- u32 data_dma_base;
- u32 data_size;
- u32 code_dma_base1;
- u32 data_dma_base1;
-};
-
-/**
- * acr_r352_generate_flcn_bl_desc - generate generic BL descriptor for LS image
- */
-static void
-acr_r352_generate_flcn_bl_desc(const struct nvkm_acr *acr,
- const struct ls_ucode_img *img, u64 wpr_addr,
- void *_desc)
-{
- struct acr_r352_flcn_bl_desc *desc = _desc;
- const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
- u64 base, addr_code, addr_data;
-
- base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
- addr_code = (base + pdesc->app_resident_code_offset) >> 8;
- addr_data = (base + pdesc->app_resident_data_offset) >> 8;
-
- desc->ctx_dma = FALCON_DMAIDX_UCODE;
- desc->code_dma_base = lower_32_bits(addr_code);
- desc->code_dma_base1 = upper_32_bits(addr_code);
- desc->non_sec_code_off = pdesc->app_resident_code_offset;
- desc->non_sec_code_size = pdesc->app_resident_code_size;
- desc->code_entry_point = pdesc->app_imem_entry;
- desc->data_dma_base = lower_32_bits(addr_data);
- desc->data_dma_base1 = upper_32_bits(addr_data);
- desc->data_size = pdesc->app_resident_data_size;
-}
-
-
-/**
- * struct hsflcn_acr_desc - data section of the HS firmware
- *
- * This header is to be copied at the beginning of DMEM by the HS bootloader.
- *
- * @signature: signature of ACR ucode
- * @wpr_region_id: region ID holding the WPR header and its details
- * @wpr_offset: offset from the WPR region holding the wpr header
- * @regions: region descriptors
- * @nonwpr_ucode_blob_size: size of LS blob
- * @nonwpr_ucode_blob_start: FB location of LS blob is
- */
-struct hsflcn_acr_desc {
- union {
- u8 reserved_dmem[0x200];
- u32 signatures[4];
- } ucode_reserved_space;
- u32 wpr_region_id;
- u32 wpr_offset;
- u32 mmu_mem_range;
-#define FLCN_ACR_MAX_REGIONS 2
- struct {
- u32 no_regions;
- struct {
- u32 start_addr;
- u32 end_addr;
- u32 region_id;
- u32 read_mask;
- u32 write_mask;
- u32 client_mask;
- } region_props[FLCN_ACR_MAX_REGIONS];
- } regions;
- u32 ucode_blob_size;
- u64 ucode_blob_base __aligned(8);
- struct {
- u32 vpr_enabled;
- u32 vpr_start;
- u32 vpr_end;
- u32 hdcp_policies;
- } vpr_desc;
-};
-
-
-/*
- * Low-secure blob creation
- */
-
-/**
- * struct acr_r352_lsf_lsb_header - LS firmware header
- * @signature: signature to verify the firmware against
- * @ucode_off: offset of the ucode blob in the WPR region. The ucode
- * blob contains the bootloader, code and data of the
- * LS falcon
- * @ucode_size: size of the ucode blob, including bootloader
- * @data_size: size of the ucode blob data
- * @bl_code_size: size of the bootloader code
- * @bl_imem_off: offset in imem of the bootloader
- * @bl_data_off: offset of the bootloader data in WPR region
- * @bl_data_size: size of the bootloader data
- * @app_code_off: offset of the app code relative to ucode_off
- * @app_code_size: size of the app code
- * @app_data_off: offset of the app data relative to ucode_off
- * @app_data_size: size of the app data
- * @flags: flags for the secure bootloader
- *
- * This structure is written into the WPR region for each managed falcon. Each
- * instance is referenced by the lsb_offset member of the corresponding
- * lsf_wpr_header.
- */
-struct acr_r352_lsf_lsb_header {
- /**
- * LS falcon signatures
- * @prd_keys: signature to use in production mode
- * @dgb_keys: signature to use in debug mode
- * @b_prd_present: whether the production key is present
- * @b_dgb_present: whether the debug key is present
- * @falcon_id: ID of the falcon the ucode applies to
- */
- struct {
- u8 prd_keys[2][16];
- u8 dbg_keys[2][16];
- u32 b_prd_present;
- u32 b_dbg_present;
- u32 falcon_id;
- } signature;
- u32 ucode_off;
- u32 ucode_size;
- u32 data_size;
- u32 bl_code_size;
- u32 bl_imem_off;
- u32 bl_data_off;
- u32 bl_data_size;
- u32 app_code_off;
- u32 app_code_size;
- u32 app_data_off;
- u32 app_data_size;
- u32 flags;
-};
-
-/**
- * struct acr_r352_lsf_wpr_header - LS blob WPR Header
- * @falcon_id: LS falcon ID
- * @lsb_offset: offset of the lsb_lsf_header in the WPR region
- * @bootstrap_owner: secure falcon reponsible for bootstrapping the LS falcon
- * @lazy_bootstrap: skip bootstrapping by ACR
- * @status: bootstrapping status
- *
- * An array of these is written at the beginning of the WPR region, one for
- * each managed falcon. The array is terminated by an instance which falcon_id
- * is LSF_FALCON_ID_INVALID.
- */
-struct acr_r352_lsf_wpr_header {
- u32 falcon_id;
- u32 lsb_offset;
- u32 bootstrap_owner;
- u32 lazy_bootstrap;
- u32 status;
-#define LSF_IMAGE_STATUS_NONE 0
-#define LSF_IMAGE_STATUS_COPY 1
-#define LSF_IMAGE_STATUS_VALIDATION_CODE_FAILED 2
-#define LSF_IMAGE_STATUS_VALIDATION_DATA_FAILED 3
-#define LSF_IMAGE_STATUS_VALIDATION_DONE 4
-#define LSF_IMAGE_STATUS_VALIDATION_SKIPPED 5
-#define LSF_IMAGE_STATUS_BOOTSTRAP_READY 6
-};
-
-/**
- * struct ls_ucode_img_r352 - ucode image augmented with r352 headers
- */
-struct ls_ucode_img_r352 {
- struct ls_ucode_img base;
-
- const struct acr_r352_lsf_func *func;
-
- struct acr_r352_lsf_wpr_header wpr_header;
- struct acr_r352_lsf_lsb_header lsb_header;
-};
-#define ls_ucode_img_r352(i) container_of(i, struct ls_ucode_img_r352, base)
-
-/**
- * ls_ucode_img_load() - create a lsf_ucode_img and load it
- */
-struct ls_ucode_img *
-acr_r352_ls_ucode_img_load(const struct acr_r352 *acr,
- const struct nvkm_secboot *sb,
- enum nvkm_secboot_falcon falcon_id)
-{
- const struct nvkm_subdev *subdev = acr->base.subdev;
- const struct acr_r352_ls_func *func = acr->func->ls_func[falcon_id];
- struct ls_ucode_img_r352 *img;
- int ret;
-
- img = kzalloc(sizeof(*img), GFP_KERNEL);
- if (!img)
- return ERR_PTR(-ENOMEM);
-
- img->base.falcon_id = falcon_id;
-
- ret = func->load(sb, func->version_max, &img->base);
- if (ret < 0) {
- kfree(img->base.ucode_data);
- kfree(img->base.sig);
- kfree(img);
- return ERR_PTR(ret);
- }
-
- img->func = func->version[ret];
-
- /* Check that the signature size matches our expectations... */
- if (img->base.sig_size != sizeof(img->lsb_header.signature)) {
- nvkm_error(subdev, "invalid signature size for %s falcon!\n",
- nvkm_secboot_falcon_name[falcon_id]);
- return ERR_PTR(-EINVAL);
- }
-
- /* Copy signature to the right place */
- memcpy(&img->lsb_header.signature, img->base.sig, img->base.sig_size);
-
- /* not needed? the signature should already have the right value */
- img->lsb_header.signature.falcon_id = falcon_id;
-
- return &img->base;
-}
-
-#define LSF_LSB_HEADER_ALIGN 256
-#define LSF_BL_DATA_ALIGN 256
-#define LSF_BL_DATA_SIZE_ALIGN 256
-#define LSF_BL_CODE_SIZE_ALIGN 256
-#define LSF_UCODE_DATA_ALIGN 4096
-
-/**
- * acr_r352_ls_img_fill_headers - fill the WPR and LSB headers of an image
- * @acr: ACR to use
- * @img: image to generate for
- * @offset: offset in the WPR region where this image starts
- *
- * Allocate space in the WPR area from offset and write the WPR and LSB headers
- * accordingly.
- *
- * Return: offset at the end of this image.
- */
-static u32
-acr_r352_ls_img_fill_headers(struct acr_r352 *acr,
- struct ls_ucode_img_r352 *img, u32 offset)
-{
- struct ls_ucode_img *_img = &img->base;
- struct acr_r352_lsf_wpr_header *whdr = &img->wpr_header;
- struct acr_r352_lsf_lsb_header *lhdr = &img->lsb_header;
- struct ls_ucode_img_desc *desc = &_img->ucode_desc;
- const struct acr_r352_lsf_func *func = img->func;
-
- /* Fill WPR header */
- whdr->falcon_id = _img->falcon_id;
- whdr->bootstrap_owner = acr->base.boot_falcon;
- whdr->status = LSF_IMAGE_STATUS_COPY;
-
- /* Skip bootstrapping falcons started by someone else than ACR */
- if (acr->lazy_bootstrap & BIT(_img->falcon_id))
- whdr->lazy_bootstrap = 1;
-
- /* Align, save off, and include an LSB header size */
- offset = ALIGN(offset, LSF_LSB_HEADER_ALIGN);
- whdr->lsb_offset = offset;
- offset += sizeof(*lhdr);
-
- /*
- * Align, save off, and include the original (static) ucode
- * image size
- */
- offset = ALIGN(offset, LSF_UCODE_DATA_ALIGN);
- _img->ucode_off = lhdr->ucode_off = offset;
- offset += _img->ucode_size;
-
- /*
- * For falcons that use a boot loader (BL), we append a loader
- * desc structure on the end of the ucode image and consider
- * this the boot loader data. The host will then copy the loader
- * desc args to this space within the WPR region (before locking
- * down) and the HS bin will then copy them to DMEM 0 for the
- * loader.
- */
- lhdr->bl_code_size = ALIGN(desc->bootloader_size,
- LSF_BL_CODE_SIZE_ALIGN);
- lhdr->ucode_size = ALIGN(desc->app_resident_data_offset,
- LSF_BL_CODE_SIZE_ALIGN) + lhdr->bl_code_size;
- lhdr->data_size = ALIGN(desc->app_size, LSF_BL_CODE_SIZE_ALIGN) +
- lhdr->bl_code_size - lhdr->ucode_size;
- /*
- * Though the BL is located at 0th offset of the image, the VA
- * is different to make sure that it doesn't collide the actual
- * OS VA range
- */
- lhdr->bl_imem_off = desc->bootloader_imem_offset;
- lhdr->app_code_off = desc->app_start_offset +
- desc->app_resident_code_offset;
- lhdr->app_code_size = desc->app_resident_code_size;
- lhdr->app_data_off = desc->app_start_offset +
- desc->app_resident_data_offset;
- lhdr->app_data_size = desc->app_resident_data_size;
-
- lhdr->flags = func->lhdr_flags;
- if (_img->falcon_id == acr->base.boot_falcon)
- lhdr->flags |= LSF_FLAG_DMACTL_REQ_CTX;
-
- /* Align and save off BL descriptor size */
- lhdr->bl_data_size = ALIGN(func->bl_desc_size, LSF_BL_DATA_SIZE_ALIGN);
-
- /*
- * Align, save off, and include the additional BL data
- */
- offset = ALIGN(offset, LSF_BL_DATA_ALIGN);
- lhdr->bl_data_off = offset;
- offset += lhdr->bl_data_size;
-
- return offset;
-}
-
-/**
- * acr_r352_ls_fill_headers - fill WPR and LSB headers of all managed images
- */
-int
-acr_r352_ls_fill_headers(struct acr_r352 *acr, struct list_head *imgs)
-{
- struct ls_ucode_img_r352 *img;
- struct list_head *l;
- u32 count = 0;
- u32 offset;
-
- /* Count the number of images to manage */
- list_for_each(l, imgs)
- count++;
-
- /*
- * Start with an array of WPR headers at the base of the WPR.
- * The expectation here is that the secure falcon will do a single DMA
- * read of this array and cache it internally so it's ok to pack these.
- * Also, we add 1 to the falcon count to indicate the end of the array.
- */
- offset = sizeof(img->wpr_header) * (count + 1);
-
- /*
- * Walk the managed falcons, accounting for the LSB structs
- * as well as the ucode images.
- */
- list_for_each_entry(img, imgs, base.node) {
- offset = acr_r352_ls_img_fill_headers(acr, img, offset);
- }
-
- return offset;
-}
-
-/**
- * acr_r352_ls_write_wpr - write the WPR blob contents
- */
-int
-acr_r352_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
- struct nvkm_gpuobj *wpr_blob, u64 wpr_addr)
-{
- struct ls_ucode_img *_img;
- u32 pos = 0;
- u32 max_desc_size = 0;
- u8 *gdesc;
-
- /* Figure out how large we need gdesc to be. */
- list_for_each_entry(_img, imgs, node) {
- struct ls_ucode_img_r352 *img = ls_ucode_img_r352(_img);
- const struct acr_r352_lsf_func *ls_func = img->func;
-
- max_desc_size = max(max_desc_size, ls_func->bl_desc_size);
- }
-
- gdesc = kmalloc(max_desc_size, GFP_KERNEL);
- if (!gdesc)
- return -ENOMEM;
-
- nvkm_kmap(wpr_blob);
-
- list_for_each_entry(_img, imgs, node) {
- struct ls_ucode_img_r352 *img = ls_ucode_img_r352(_img);
- const struct acr_r352_lsf_func *ls_func = img->func;
-
- nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header,
- sizeof(img->wpr_header));
-
- nvkm_gpuobj_memcpy_to(wpr_blob, img->wpr_header.lsb_offset,
- &img->lsb_header, sizeof(img->lsb_header));
-
- /* Generate and write BL descriptor */
- memset(gdesc, 0, ls_func->bl_desc_size);
- ls_func->generate_bl_desc(&acr->base, _img, wpr_addr, gdesc);
-
- nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.bl_data_off,
- gdesc, ls_func->bl_desc_size);
-
- /* Copy ucode */
- nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.ucode_off,
- _img->ucode_data, _img->ucode_size);
-
- pos += sizeof(img->wpr_header);
- }
-
- nvkm_wo32(wpr_blob, pos, NVKM_SECBOOT_FALCON_INVALID);
-
- nvkm_done(wpr_blob);
-
- kfree(gdesc);
-
- return 0;
-}
-
-/* Both size and address of WPR need to be 256K-aligned */
-#define WPR_ALIGNMENT 0x40000
-/**
- * acr_r352_prepare_ls_blob() - prepare the LS blob
- *
- * For each securely managed falcon, load the FW, signatures and bootloaders and
- * prepare a ucode blob. Then, compute the offsets in the WPR region for each
- * blob, and finally write the headers and ucode blobs into a GPU object that
- * will be copied into the WPR region by the HS firmware.
- */
-static int
-acr_r352_prepare_ls_blob(struct acr_r352 *acr, struct nvkm_secboot *sb)
-{
- const struct nvkm_subdev *subdev = acr->base.subdev;
- struct list_head imgs;
- struct ls_ucode_img *img, *t;
- unsigned long managed_falcons = acr->base.managed_falcons;
- u64 wpr_addr = sb->wpr_addr;
- u32 wpr_size = sb->wpr_size;
- int managed_count = 0;
- u32 image_wpr_size, ls_blob_size;
- int falcon_id;
- int ret;
-
- INIT_LIST_HEAD(&imgs);
-
- /* Load all LS blobs */
- for_each_set_bit(falcon_id, &managed_falcons, NVKM_SECBOOT_FALCON_END) {
- struct ls_ucode_img *img;
-
- img = acr->func->ls_ucode_img_load(acr, sb, falcon_id);
- if (IS_ERR(img)) {
- if (acr->base.optional_falcons & BIT(falcon_id)) {
- managed_falcons &= ~BIT(falcon_id);
- nvkm_info(subdev, "skipping %s falcon...\n",
- nvkm_secboot_falcon_name[falcon_id]);
- continue;
- }
- ret = PTR_ERR(img);
- goto cleanup;
- }
-
- list_add_tail(&img->node, &imgs);
- managed_count++;
- }
-
- /* Commit the actual list of falcons we will manage from now on */
- acr->base.managed_falcons = managed_falcons;
-
- /*
- * If the boot falcon has a firmare, let it manage the bootstrap of other
- * falcons.
- */
- if (acr->func->ls_func[acr->base.boot_falcon] &&
- (managed_falcons & BIT(acr->base.boot_falcon))) {
- for_each_set_bit(falcon_id, &managed_falcons,
- NVKM_SECBOOT_FALCON_END) {
- if (falcon_id == acr->base.boot_falcon)
- continue;
-
- acr->lazy_bootstrap |= BIT(falcon_id);
- }
- }
-
- /*
- * Fill the WPR and LSF headers with the right offsets and compute
- * required WPR size
- */
- image_wpr_size = acr->func->ls_fill_headers(acr, &imgs);
- image_wpr_size = ALIGN(image_wpr_size, WPR_ALIGNMENT);
-
- ls_blob_size = image_wpr_size;
-
- /*
- * If we need a shadow area, allocate twice the size and use the
- * upper half as WPR
- */
- if (wpr_size == 0 && acr->func->shadow_blob)
- ls_blob_size *= 2;
-
- /* Allocate GPU object that will contain the WPR region */
- ret = nvkm_gpuobj_new(subdev->device, ls_blob_size, WPR_ALIGNMENT,
- false, NULL, &acr->ls_blob);
- if (ret)
- goto cleanup;
-
- nvkm_debug(subdev, "%d managed LS falcons, WPR size is %d bytes\n",
- managed_count, image_wpr_size);
-
- /* If WPR address and size are not fixed, set them to fit the LS blob */
- if (wpr_size == 0) {
- wpr_addr = acr->ls_blob->addr;
- if (acr->func->shadow_blob)
- wpr_addr += acr->ls_blob->size / 2;
-
- wpr_size = image_wpr_size;
- /*
- * But if the WPR region is set by the bootloader, it is illegal for
- * the HS blob to be larger than this region.
- */
- } else if (image_wpr_size > wpr_size) {
- nvkm_error(subdev, "WPR region too small for FW blob!\n");
- nvkm_error(subdev, "required: %dB\n", image_wpr_size);
- nvkm_error(subdev, "available: %dB\n", wpr_size);
- ret = -ENOSPC;
- goto cleanup;
- }
-
- /* Write LS blob */
- ret = acr->func->ls_write_wpr(acr, &imgs, acr->ls_blob, wpr_addr);
- if (ret)
- nvkm_gpuobj_del(&acr->ls_blob);
-
-cleanup:
- list_for_each_entry_safe(img, t, &imgs, node) {
- kfree(img->ucode_data);
- kfree(img->sig);
- kfree(img);
- }
-
- return ret;
-}
-
-
-
-
-void
-acr_r352_fixup_hs_desc(struct acr_r352 *acr, struct nvkm_secboot *sb,
- void *_desc)
-{
- struct hsflcn_acr_desc *desc = _desc;
- struct nvkm_gpuobj *ls_blob = acr->ls_blob;
-
- /* WPR region information if WPR is not fixed */
- if (sb->wpr_size == 0) {
- u64 wpr_start = ls_blob->addr;
- u64 wpr_end = wpr_start + ls_blob->size;
-
- desc->wpr_region_id = 1;
- desc->regions.no_regions = 2;
- desc->regions.region_props[0].start_addr = wpr_start >> 8;
- desc->regions.region_props[0].end_addr = wpr_end >> 8;
- desc->regions.region_props[0].region_id = 1;
- desc->regions.region_props[0].read_mask = 0xf;
- desc->regions.region_props[0].write_mask = 0xc;
- desc->regions.region_props[0].client_mask = 0x2;
- } else {
- desc->ucode_blob_base = ls_blob->addr;
- desc->ucode_blob_size = ls_blob->size;
- }
-}
-
-static void
-acr_r352_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc,
- u64 offset)
-{
- struct acr_r352_flcn_bl_desc *bl_desc = _bl_desc;
- u64 addr_code, addr_data;
-
- addr_code = offset >> 8;
- addr_data = (offset + hdr->data_dma_base) >> 8;
-
- bl_desc->ctx_dma = FALCON_DMAIDX_VIRT;
- bl_desc->code_dma_base = lower_32_bits(addr_code);
- bl_desc->non_sec_code_off = hdr->non_sec_code_off;
- bl_desc->non_sec_code_size = hdr->non_sec_code_size;
- bl_desc->sec_code_off = hsf_load_header_app_off(hdr, 0);
- bl_desc->sec_code_size = hsf_load_header_app_size(hdr, 0);
- bl_desc->code_entry_point = 0;
- bl_desc->data_dma_base = lower_32_bits(addr_data);
- bl_desc->data_size = hdr->data_size;
-}
-
-/**
- * acr_r352_prepare_hs_blob - load and prepare a HS blob and BL descriptor
- *
- * @sb secure boot instance to prepare for
- * @fw name of the HS firmware to load
- * @blob pointer to gpuobj that will be allocated to receive the HS FW payload
- * @bl_desc pointer to the BL descriptor to write for this firmware
- * @patch whether we should patch the HS descriptor (only for HS loaders)
- */
-static int
-acr_r352_prepare_hs_blob(struct acr_r352 *acr, struct nvkm_secboot *sb,
- const char *fw, struct nvkm_gpuobj **blob,
- struct hsf_load_header *load_header, bool patch)
-{
- struct nvkm_subdev *subdev = &sb->subdev;
- void *acr_image;
- struct fw_bin_header *hsbin_hdr;
- struct hsf_fw_header *fw_hdr;
- struct hsf_load_header *load_hdr;
- void *acr_data;
- int ret;
-
- acr_image = hs_ucode_load_blob(subdev, sb->boot_falcon, fw);
- if (IS_ERR(acr_image))
- return PTR_ERR(acr_image);
-
- hsbin_hdr = acr_image;
- fw_hdr = acr_image + hsbin_hdr->header_offset;
- load_hdr = acr_image + fw_hdr->hdr_offset;
- acr_data = acr_image + hsbin_hdr->data_offset;
-
- /* Patch descriptor with WPR information? */
- if (patch) {
- struct hsflcn_acr_desc *desc;
-
- desc = acr_data + load_hdr->data_dma_base;
- acr->func->fixup_hs_desc(acr, sb, desc);
- }
-
- if (load_hdr->num_apps > ACR_R352_MAX_APPS) {
- nvkm_error(subdev, "more apps (%d) than supported (%d)!",
- load_hdr->num_apps, ACR_R352_MAX_APPS);
- ret = -EINVAL;
- goto cleanup;
- }
- memcpy(load_header, load_hdr, sizeof(*load_header) +
- (sizeof(load_hdr->apps[0]) * 2 * load_hdr->num_apps));
-
- /* Create ACR blob and copy HS data to it */
- ret = nvkm_gpuobj_new(subdev->device, ALIGN(hsbin_hdr->data_size, 256),
- 0x1000, false, NULL, blob);
- if (ret)
- goto cleanup;
-
- nvkm_kmap(*blob);
- nvkm_gpuobj_memcpy_to(*blob, 0, acr_data, hsbin_hdr->data_size);
- nvkm_done(*blob);
-
-cleanup:
- kfree(acr_image);
-
- return ret;
-}
-
-/**
- * acr_r352_load_blobs - load blobs common to all ACR V1 versions.
- *
- * This includes the LS blob, HS ucode loading blob, and HS bootloader.
- *
- * The HS ucode unload blob is only used on dGPU if the WPR region is variable.
- */
-int
-acr_r352_load_blobs(struct acr_r352 *acr, struct nvkm_secboot *sb)
-{
- struct nvkm_subdev *subdev = &sb->subdev;
- int ret;
-
- /* Firmware already loaded? */
- if (acr->firmware_ok)
- return 0;
-
- /* Load and prepare the managed falcon's firmwares */
- ret = acr_r352_prepare_ls_blob(acr, sb);
- if (ret)
- return ret;
-
- /* Load the HS firmware that will load the LS firmwares */
- if (!acr->load_blob) {
- ret = acr_r352_prepare_hs_blob(acr, sb, "acr/ucode_load",
- &acr->load_blob,
- &acr->load_bl_header, true);
- if (ret)
- return ret;
- }
-
- /* If the ACR region is dynamically programmed, we need an unload FW */
- if (sb->wpr_size == 0) {
- ret = acr_r352_prepare_hs_blob(acr, sb, "acr/ucode_unload",
- &acr->unload_blob,
- &acr->unload_bl_header, false);
- if (ret)
- return ret;
- }
-
- /* Load the HS firmware bootloader */
- if (!acr->hsbl_blob) {
- acr->hsbl_blob = nvkm_acr_load_firmware(subdev, "acr/bl", 0);
- if (IS_ERR(acr->hsbl_blob)) {
- ret = PTR_ERR(acr->hsbl_blob);
- acr->hsbl_blob = NULL;
- return ret;
- }
-
- if (acr->base.boot_falcon != NVKM_SECBOOT_FALCON_PMU) {
- acr->hsbl_unload_blob = nvkm_acr_load_firmware(subdev,
- "acr/unload_bl", 0);
- if (IS_ERR(acr->hsbl_unload_blob)) {
- ret = PTR_ERR(acr->hsbl_unload_blob);
- acr->hsbl_unload_blob = NULL;
- return ret;
- }
- } else {
- acr->hsbl_unload_blob = acr->hsbl_blob;
- }
- }
-
- acr->firmware_ok = true;
- nvkm_debug(&sb->subdev, "LS blob successfully created\n");
-
- return 0;
-}
-
-/**
- * acr_r352_load() - prepare HS falcon to run the specified blob, mapped.
- *
- * Returns the start address to use, or a negative error value.
- */
-static int
-acr_r352_load(struct nvkm_acr *_acr, struct nvkm_falcon *falcon,
- struct nvkm_gpuobj *blob, u64 offset)
-{
- struct acr_r352 *acr = acr_r352(_acr);
- const u32 bl_desc_size = acr->func->hs_bl_desc_size;
- const struct hsf_load_header *load_hdr;
- struct fw_bin_header *bl_hdr;
- struct fw_bl_desc *hsbl_desc;
- void *bl, *blob_data, *hsbl_code, *hsbl_data;
- u32 code_size;
- u8 *bl_desc;
-
- bl_desc = kzalloc(bl_desc_size, GFP_KERNEL);
- if (!bl_desc)
- return -ENOMEM;
-
- /* Find the bootloader descriptor for our blob and copy it */
- if (blob == acr->load_blob) {
- load_hdr = &acr->load_bl_header;
- bl = acr->hsbl_blob;
- } else if (blob == acr->unload_blob) {
- load_hdr = &acr->unload_bl_header;
- bl = acr->hsbl_unload_blob;
- } else {
- nvkm_error(_acr->subdev, "invalid secure boot blob!\n");
- kfree(bl_desc);
- return -EINVAL;
- }
-
- bl_hdr = bl;
- hsbl_desc = bl + bl_hdr->header_offset;
- blob_data = bl + bl_hdr->data_offset;
- hsbl_code = blob_data + hsbl_desc->code_off;
- hsbl_data = blob_data + hsbl_desc->data_off;
- code_size = ALIGN(hsbl_desc->code_size, 256);
-
- /*
- * Copy HS bootloader data
- */
- nvkm_falcon_load_dmem(falcon, hsbl_data, 0x0, hsbl_desc->data_size, 0);
-
- /* Copy HS bootloader code to end of IMEM */
- nvkm_falcon_load_imem(falcon, hsbl_code, falcon->code.limit - code_size,
- code_size, hsbl_desc->start_tag, 0, false);
-
- /* Generate the BL header */
- acr->func->generate_hs_bl_desc(load_hdr, bl_desc, offset);
-
- /*
- * Copy HS BL header where the HS descriptor expects it to be
- */
- nvkm_falcon_load_dmem(falcon, bl_desc, hsbl_desc->dmem_load_off,
- bl_desc_size, 0);
-
- kfree(bl_desc);
- return hsbl_desc->start_tag << 8;
-}
-
-static int
-acr_r352_shutdown(struct acr_r352 *acr, struct nvkm_secboot *sb)
-{
- struct nvkm_subdev *subdev = &sb->subdev;
- int i;
-
- /* Run the unload blob to unprotect the WPR region */
- if (acr->unload_blob && sb->wpr_set) {
- int ret;
-
- nvkm_debug(subdev, "running HS unload blob\n");
- ret = sb->func->run_blob(sb, acr->unload_blob, sb->halt_falcon);
- if (ret < 0)
- return ret;
- /*
- * Unload blob will return this error code - it is not an error
- * and the expected behavior on RM as well
- */
- if (ret && ret != 0x1d) {
- nvkm_error(subdev, "HS unload failed, ret 0x%08x\n", ret);
- return -EINVAL;
- }
- nvkm_debug(subdev, "HS unload blob completed\n");
- }
-
- for (i = 0; i < NVKM_SECBOOT_FALCON_END; i++)
- acr->falcon_state[i] = NON_SECURE;
-
- sb->wpr_set = false;
-
- return 0;
-}
-
-/**
- * Check if the WPR region has been indeed set by the ACR firmware, and
- * matches where it should be.
- */
-static bool
-acr_r352_wpr_is_set(const struct acr_r352 *acr, const struct nvkm_secboot *sb)
-{
- const struct nvkm_subdev *subdev = &sb->subdev;
- const struct nvkm_device *device = subdev->device;
- u64 wpr_lo, wpr_hi;
- u64 wpr_range_lo, wpr_range_hi;
-
- nvkm_wr32(device, 0x100cd4, 0x2);
- wpr_lo = (nvkm_rd32(device, 0x100cd4) & ~0xff);
- wpr_lo <<= 8;
- nvkm_wr32(device, 0x100cd4, 0x3);
- wpr_hi = (nvkm_rd32(device, 0x100cd4) & ~0xff);
- wpr_hi <<= 8;
-
- if (sb->wpr_size != 0) {
- wpr_range_lo = sb->wpr_addr;
- wpr_range_hi = wpr_range_lo + sb->wpr_size;
- } else {
- wpr_range_lo = acr->ls_blob->addr;
- wpr_range_hi = wpr_range_lo + acr->ls_blob->size;
- }
-
- return (wpr_lo >= wpr_range_lo && wpr_lo < wpr_range_hi &&
- wpr_hi > wpr_range_lo && wpr_hi <= wpr_range_hi);
-}
-
-static int
-acr_r352_bootstrap(struct acr_r352 *acr, struct nvkm_secboot *sb)
-{
- const struct nvkm_subdev *subdev = &sb->subdev;
- unsigned long managed_falcons = acr->base.managed_falcons;
- int falcon_id;
- int ret;
-
- if (sb->wpr_set)
- return 0;
-
- /* Make sure all blobs are ready */
- ret = acr_r352_load_blobs(acr, sb);
- if (ret)
- return ret;
-
- nvkm_debug(subdev, "running HS load blob\n");
- ret = sb->func->run_blob(sb, acr->load_blob, sb->boot_falcon);
- /* clear halt interrupt */
- nvkm_falcon_clear_interrupt(sb->boot_falcon, 0x10);
- sb->wpr_set = acr_r352_wpr_is_set(acr, sb);
- if (ret < 0) {
- return ret;
- } else if (ret > 0) {
- nvkm_error(subdev, "HS load failed, ret 0x%08x\n", ret);
- return -EINVAL;
- }
- nvkm_debug(subdev, "HS load blob completed\n");
- /* WPR must be set at this point */
- if (!sb->wpr_set) {
- nvkm_error(subdev, "ACR blob completed but WPR not set!\n");
- return -EINVAL;
- }
-
- /* Run LS firmwares post_run hooks */
- for_each_set_bit(falcon_id, &managed_falcons, NVKM_SECBOOT_FALCON_END) {
- const struct acr_r352_ls_func *func =
- acr->func->ls_func[falcon_id];
-
- if (func->post_run) {
- ret = func->post_run(&acr->base, sb);
- if (ret)
- return ret;
- }
- }
-
- return 0;
-}
-
-/**
- * acr_r352_reset_nopmu - dummy reset method when no PMU firmware is loaded
- *
- * Reset is done by re-executing secure boot from scratch, with lazy bootstrap
- * disabled. This has the effect of making all managed falcons ready-to-run.
- */
-static int
-acr_r352_reset_nopmu(struct acr_r352 *acr, struct nvkm_secboot *sb,
- unsigned long falcon_mask)
-{
- int falcon;
- int ret;
-
- /*
- * Perform secure boot each time we are called on FECS. Since only FECS
- * and GPCCS are managed and started together, this ought to be safe.
- */
- if (!(falcon_mask & BIT(NVKM_SECBOOT_FALCON_FECS)))
- goto end;
-
- ret = acr_r352_shutdown(acr, sb);
- if (ret)
- return ret;
-
- ret = acr_r352_bootstrap(acr, sb);
- if (ret)
- return ret;
-
-end:
- for_each_set_bit(falcon, &falcon_mask, NVKM_SECBOOT_FALCON_END) {
- acr->falcon_state[falcon] = RESET;
- }
- return 0;
-}
-
-/*
- * acr_r352_reset() - execute secure boot from the prepared state
- *
- * Load the HS bootloader and ask the falcon to run it. This will in turn
- * load the HS firmware and run it, so once the falcon stops all the managed
- * falcons should have their LS firmware loaded and be ready to run.
- */
-static int
-acr_r352_reset(struct nvkm_acr *_acr, struct nvkm_secboot *sb,
- unsigned long falcon_mask)
-{
- struct acr_r352 *acr = acr_r352(_acr);
- struct nvkm_msgqueue *queue;
- int falcon;
- bool wpr_already_set = sb->wpr_set;
- int ret;
-
- /* Make sure secure boot is performed */
- ret = acr_r352_bootstrap(acr, sb);
- if (ret)
- return ret;
-
- /* No PMU interface? */
- if (!nvkm_secboot_is_managed(sb, _acr->boot_falcon)) {
- /* Redo secure boot entirely if it was already done */
- if (wpr_already_set)
- return acr_r352_reset_nopmu(acr, sb, falcon_mask);
- /* Else return the result of the initial invokation */
- else
- return ret;
- }
-
- switch (_acr->boot_falcon) {
- case NVKM_SECBOOT_FALCON_PMU:
- queue = sb->subdev.device->pmu->queue;
- break;
- case NVKM_SECBOOT_FALCON_SEC2:
- queue = sb->subdev.device->sec2->queue;
- break;
- default:
- return -EINVAL;
- }
-
- /* Otherwise just ask the LS firmware to reset the falcon */
- for_each_set_bit(falcon, &falcon_mask, NVKM_SECBOOT_FALCON_END)
- nvkm_debug(&sb->subdev, "resetting %s falcon\n",
- nvkm_secboot_falcon_name[falcon]);
- ret = nvkm_msgqueue_acr_boot_falcons(queue, falcon_mask);
- if (ret) {
- nvkm_error(&sb->subdev, "error during falcon reset: %d\n", ret);
- return ret;
- }
- nvkm_debug(&sb->subdev, "falcon reset done\n");
-
- return 0;
-}
-
-static int
-acr_r352_fini(struct nvkm_acr *_acr, struct nvkm_secboot *sb, bool suspend)
-{
- struct acr_r352 *acr = acr_r352(_acr);
-
- return acr_r352_shutdown(acr, sb);
-}
-
-static void
-acr_r352_dtor(struct nvkm_acr *_acr)
-{
- struct acr_r352 *acr = acr_r352(_acr);
-
- nvkm_gpuobj_del(&acr->unload_blob);
-
- if (_acr->boot_falcon != NVKM_SECBOOT_FALCON_PMU)
- kfree(acr->hsbl_unload_blob);
- kfree(acr->hsbl_blob);
- nvkm_gpuobj_del(&acr->load_blob);
- nvkm_gpuobj_del(&acr->ls_blob);
-
- kfree(acr);
-}
-
-static const struct acr_r352_lsf_func
-acr_r352_ls_fecs_func_0 = {
- .generate_bl_desc = acr_r352_generate_flcn_bl_desc,
- .bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc),
-};
-
-const struct acr_r352_ls_func
-acr_r352_ls_fecs_func = {
- .load = acr_ls_ucode_load_fecs,
- .version_max = 0,
- .version = {
- &acr_r352_ls_fecs_func_0,
- }
-};
-
-static const struct acr_r352_lsf_func
-acr_r352_ls_gpccs_func_0 = {
- .generate_bl_desc = acr_r352_generate_flcn_bl_desc,
- .bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc),
- /* GPCCS will be loaded using PRI */
- .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
-};
-
-const struct acr_r352_ls_func
-acr_r352_ls_gpccs_func = {
- .load = acr_ls_ucode_load_gpccs,
- .version_max = 0,
- .version = {
- &acr_r352_ls_gpccs_func_0,
- }
-};
-
-
-
-/**
- * struct acr_r352_pmu_bl_desc - PMU DMEM bootloader descriptor
- * @dma_idx: DMA context to be used by BL while loading code/data
- * @code_dma_base: 256B-aligned Physical FB Address where code is located
- * @total_code_size: total size of the code part in the ucode
- * @code_size_to_load: size of the code part to load in PMU IMEM.
- * @code_entry_point: entry point in the code.
- * @data_dma_base: Physical FB address where data part of ucode is located
- * @data_size: Total size of the data portion.
- * @overlay_dma_base: Physical Fb address for resident code present in ucode
- * @argc: Total number of args
- * @argv: offset where args are copied into PMU's DMEM.
- *
- * Structure used by the PMU bootloader to load the rest of the code
- */
-struct acr_r352_pmu_bl_desc {
- u32 dma_idx;
- u32 code_dma_base;
- u32 code_size_total;
- u32 code_size_to_load;
- u32 code_entry_point;
- u32 data_dma_base;
- u32 data_size;
- u32 overlay_dma_base;
- u32 argc;
- u32 argv;
- u16 code_dma_base1;
- u16 data_dma_base1;
- u16 overlay_dma_base1;
-};
-
-/**
- * acr_r352_generate_pmu_bl_desc() - populate a DMEM BL descriptor for PMU LS image
- *
- */
-static void
-acr_r352_generate_pmu_bl_desc(const struct nvkm_acr *acr,
- const struct ls_ucode_img *img, u64 wpr_addr,
- void *_desc)
-{
- const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
- const struct nvkm_pmu *pmu = acr->subdev->device->pmu;
- struct acr_r352_pmu_bl_desc *desc = _desc;
- u64 base;
- u64 addr_code;
- u64 addr_data;
- u32 addr_args;
-
- base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
- addr_code = (base + pdesc->app_resident_code_offset) >> 8;
- addr_data = (base + pdesc->app_resident_data_offset) >> 8;
- addr_args = pmu->falcon->data.limit;
- addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE;
-
- desc->dma_idx = FALCON_DMAIDX_UCODE;
- desc->code_dma_base = lower_32_bits(addr_code);
- desc->code_dma_base1 = upper_32_bits(addr_code);
- desc->code_size_total = pdesc->app_size;
- desc->code_size_to_load = pdesc->app_resident_code_size;
- desc->code_entry_point = pdesc->app_imem_entry;
- desc->data_dma_base = lower_32_bits(addr_data);
- desc->data_dma_base1 = upper_32_bits(addr_data);
- desc->data_size = pdesc->app_resident_data_size;
- desc->overlay_dma_base = lower_32_bits(addr_code);
- desc->overlay_dma_base1 = upper_32_bits(addr_code);
- desc->argc = 1;
- desc->argv = addr_args;
-}
-
-static const struct acr_r352_lsf_func
-acr_r352_ls_pmu_func_0 = {
- .generate_bl_desc = acr_r352_generate_pmu_bl_desc,
- .bl_desc_size = sizeof(struct acr_r352_pmu_bl_desc),
-};
-
-static const struct acr_r352_ls_func
-acr_r352_ls_pmu_func = {
- .load = acr_ls_ucode_load_pmu,
- .post_run = acr_ls_pmu_post_run,
- .version_max = 0,
- .version = {
- &acr_r352_ls_pmu_func_0,
- }
-};
-
-const struct acr_r352_func
-acr_r352_func = {
- .fixup_hs_desc = acr_r352_fixup_hs_desc,
- .generate_hs_bl_desc = acr_r352_generate_hs_bl_desc,
- .hs_bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc),
- .ls_ucode_img_load = acr_r352_ls_ucode_img_load,
- .ls_fill_headers = acr_r352_ls_fill_headers,
- .ls_write_wpr = acr_r352_ls_write_wpr,
- .ls_func = {
- [NVKM_SECBOOT_FALCON_FECS] = &acr_r352_ls_fecs_func,
- [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r352_ls_gpccs_func,
- [NVKM_SECBOOT_FALCON_PMU] = &acr_r352_ls_pmu_func,
- },
-};
-
-static const struct nvkm_acr_func
-acr_r352_base_func = {
- .dtor = acr_r352_dtor,
- .fini = acr_r352_fini,
- .load = acr_r352_load,
- .reset = acr_r352_reset,
-};
-
-struct nvkm_acr *
-acr_r352_new_(const struct acr_r352_func *func,
- enum nvkm_secboot_falcon boot_falcon,
- unsigned long managed_falcons)
-{
- struct acr_r352 *acr;
- int i;
-
- /* Check that all requested falcons are supported */
- for_each_set_bit(i, &managed_falcons, NVKM_SECBOOT_FALCON_END) {
- if (!func->ls_func[i])
- return ERR_PTR(-ENOTSUPP);
- }
-
- acr = kzalloc(sizeof(*acr), GFP_KERNEL);
- if (!acr)
- return ERR_PTR(-ENOMEM);
-
- acr->base.boot_falcon = boot_falcon;
- acr->base.managed_falcons = managed_falcons;
- acr->base.func = &acr_r352_base_func;
- acr->func = func;
-
- return &acr->base;
-}
-
-struct nvkm_acr *
-acr_r352_new(unsigned long managed_falcons)
-{
- return acr_r352_new_(&acr_r352_func, NVKM_SECBOOT_FALCON_PMU,
- managed_falcons);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h
deleted file mode 100644
index e516cab849dd..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h
+++ /dev/null
@@ -1,167 +0,0 @@
-/*
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-#ifndef __NVKM_SECBOOT_ACR_R352_H__
-#define __NVKM_SECBOOT_ACR_R352_H__
-
-#include "acr.h"
-#include "ls_ucode.h"
-#include "hs_ucode.h"
-
-struct ls_ucode_img;
-
-#define ACR_R352_MAX_APPS 8
-
-#define LSF_FLAG_LOAD_CODE_AT_0 1
-#define LSF_FLAG_DMACTL_REQ_CTX 4
-#define LSF_FLAG_FORCE_PRIV_LOAD 8
-
-static inline u32
-hsf_load_header_app_off(const struct hsf_load_header *hdr, u32 app)
-{
- return hdr->apps[app];
-}
-
-static inline u32
-hsf_load_header_app_size(const struct hsf_load_header *hdr, u32 app)
-{
- return hdr->apps[hdr->num_apps + app];
-}
-
-/**
- * struct acr_r352_lsf_func - manages a specific LS firmware version
- *
- * @generate_bl_desc: function called on a block of bl_desc_size to generate the
- * proper bootloader descriptor for this LS firmware
- * @bl_desc_size: size of the bootloader descriptor
- * @lhdr_flags: LS flags
- */
-struct acr_r352_lsf_func {
- void (*generate_bl_desc)(const struct nvkm_acr *,
- const struct ls_ucode_img *, u64, void *);
- u32 bl_desc_size;
- u32 lhdr_flags;
-};
-
-/**
- * struct acr_r352_ls_func - manages a single LS falcon
- *
- * @load: load the external firmware into a ls_ucode_img
- * @post_run: hook called right after the ACR is executed
- */
-struct acr_r352_ls_func {
- int (*load)(const struct nvkm_secboot *, int maxver,
- struct ls_ucode_img *);
- int (*post_run)(const struct nvkm_acr *, const struct nvkm_secboot *);
- int version_max;
- const struct acr_r352_lsf_func *version[];
-};
-
-struct acr_r352;
-
-/**
- * struct acr_r352_func - manages nuances between ACR versions
- *
- * @generate_hs_bl_desc: function called on a block of bl_desc_size to generate
- * the proper HS bootloader descriptor
- * @hs_bl_desc_size: size of the HS bootloader descriptor
- */
-struct acr_r352_func {
- void (*generate_hs_bl_desc)(const struct hsf_load_header *, void *,
- u64);
- void (*fixup_hs_desc)(struct acr_r352 *, struct nvkm_secboot *, void *);
- u32 hs_bl_desc_size;
- bool shadow_blob;
-
- struct ls_ucode_img *(*ls_ucode_img_load)(const struct acr_r352 *,
- const struct nvkm_secboot *,
- enum nvkm_secboot_falcon);
- int (*ls_fill_headers)(struct acr_r352 *, struct list_head *);
- int (*ls_write_wpr)(struct acr_r352 *, struct list_head *,
- struct nvkm_gpuobj *, u64);
-
- const struct acr_r352_ls_func *ls_func[NVKM_SECBOOT_FALCON_END];
-};
-
-/**
- * struct acr_r352 - ACR data for driver release 352 (and beyond)
- */
-struct acr_r352 {
- struct nvkm_acr base;
- const struct acr_r352_func *func;
-
- /*
- * HS FW - lock WPR region (dGPU only) and load LS FWs
- * on Tegra the HS FW copies the LS blob into the fixed WPR instead
- */
- struct nvkm_gpuobj *load_blob;
- struct {
- struct hsf_load_header load_bl_header;
- u32 __load_apps[ACR_R352_MAX_APPS * 2];
- };
-
- /* HS FW - unlock WPR region (dGPU only) */
- struct nvkm_gpuobj *unload_blob;
- struct {
- struct hsf_load_header unload_bl_header;
- u32 __unload_apps[ACR_R352_MAX_APPS * 2];
- };
-
- /* HS bootloader */
- void *hsbl_blob;
-
- /* HS bootloader for unload blob, if using a different falcon */
- void *hsbl_unload_blob;
-
- /* LS FWs, to be loaded by the HS ACR */
- struct nvkm_gpuobj *ls_blob;
-
- /* Firmware already loaded? */
- bool firmware_ok;
-
- /* Falcons to lazy-bootstrap */
- u32 lazy_bootstrap;
-
- /* To keep track of the state of all managed falcons */
- enum {
- /* In non-secure state, no firmware loaded, no privileges*/
- NON_SECURE = 0,
- /* In low-secure mode and ready to be started */
- RESET,
- /* In low-secure mode and running */
- RUNNING,
- } falcon_state[NVKM_SECBOOT_FALCON_END];
-};
-#define acr_r352(acr) container_of(acr, struct acr_r352, base)
-
-struct nvkm_acr *acr_r352_new_(const struct acr_r352_func *,
- enum nvkm_secboot_falcon, unsigned long);
-
-struct ls_ucode_img *acr_r352_ls_ucode_img_load(const struct acr_r352 *,
- const struct nvkm_secboot *,
- enum nvkm_secboot_falcon);
-int acr_r352_ls_fill_headers(struct acr_r352 *, struct list_head *);
-int acr_r352_ls_write_wpr(struct acr_r352 *, struct list_head *,
- struct nvkm_gpuobj *, u64);
-
-void acr_r352_fixup_hs_desc(struct acr_r352 *, struct nvkm_secboot *, void *);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c
deleted file mode 100644
index f6b2d20d7fc3..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c
+++ /dev/null
@@ -1,229 +0,0 @@
-/*
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include "acr_r361.h"
-
-#include <engine/falcon.h>
-#include <core/msgqueue.h>
-#include <subdev/pmu.h>
-#include <engine/sec2.h>
-
-static void
-acr_r361_generate_flcn_bl_desc(const struct nvkm_acr *acr,
- const struct ls_ucode_img *img, u64 wpr_addr,
- void *_desc)
-{
- struct acr_r361_flcn_bl_desc *desc = _desc;
- const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
- u64 base, addr_code, addr_data;
-
- base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
- addr_code = base + pdesc->app_resident_code_offset;
- addr_data = base + pdesc->app_resident_data_offset;
-
- desc->ctx_dma = FALCON_DMAIDX_UCODE;
- desc->code_dma_base = u64_to_flcn64(addr_code);
- desc->non_sec_code_off = pdesc->app_resident_code_offset;
- desc->non_sec_code_size = pdesc->app_resident_code_size;
- desc->code_entry_point = pdesc->app_imem_entry;
- desc->data_dma_base = u64_to_flcn64(addr_data);
- desc->data_size = pdesc->app_resident_data_size;
-}
-
-void
-acr_r361_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc,
- u64 offset)
-{
- struct acr_r361_flcn_bl_desc *bl_desc = _bl_desc;
-
- bl_desc->ctx_dma = FALCON_DMAIDX_VIRT;
- bl_desc->code_dma_base = u64_to_flcn64(offset);
- bl_desc->non_sec_code_off = hdr->non_sec_code_off;
- bl_desc->non_sec_code_size = hdr->non_sec_code_size;
- bl_desc->sec_code_off = hsf_load_header_app_off(hdr, 0);
- bl_desc->sec_code_size = hsf_load_header_app_size(hdr, 0);
- bl_desc->code_entry_point = 0;
- bl_desc->data_dma_base = u64_to_flcn64(offset + hdr->data_dma_base);
- bl_desc->data_size = hdr->data_size;
-}
-
-static const struct acr_r352_lsf_func
-acr_r361_ls_fecs_func_0 = {
- .generate_bl_desc = acr_r361_generate_flcn_bl_desc,
- .bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
-};
-
-const struct acr_r352_ls_func
-acr_r361_ls_fecs_func = {
- .load = acr_ls_ucode_load_fecs,
- .version_max = 0,
- .version = {
- &acr_r361_ls_fecs_func_0,
- }
-};
-
-static const struct acr_r352_lsf_func
-acr_r361_ls_gpccs_func_0 = {
- .generate_bl_desc = acr_r361_generate_flcn_bl_desc,
- .bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
- /* GPCCS will be loaded using PRI */
- .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
-};
-
-const struct acr_r352_ls_func
-acr_r361_ls_gpccs_func = {
- .load = acr_ls_ucode_load_gpccs,
- .version_max = 0,
- .version = {
- &acr_r361_ls_gpccs_func_0,
- }
-};
-
-struct acr_r361_pmu_bl_desc {
- u32 reserved;
- u32 dma_idx;
- struct flcn_u64 code_dma_base;
- u32 total_code_size;
- u32 code_size_to_load;
- u32 code_entry_point;
- struct flcn_u64 data_dma_base;
- u32 data_size;
- struct flcn_u64 overlay_dma_base;
- u32 argc;
- u32 argv;
-};
-
-static void
-acr_r361_generate_pmu_bl_desc(const struct nvkm_acr *acr,
- const struct ls_ucode_img *img, u64 wpr_addr,
- void *_desc)
-{
- const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
- const struct nvkm_pmu *pmu = acr->subdev->device->pmu;
- struct acr_r361_pmu_bl_desc *desc = _desc;
- u64 base, addr_code, addr_data;
- u32 addr_args;
-
- base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
- addr_code = base + pdesc->app_resident_code_offset;
- addr_data = base + pdesc->app_resident_data_offset;
- addr_args = pmu->falcon->data.limit;
- addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE;
-
- desc->dma_idx = FALCON_DMAIDX_UCODE;
- desc->code_dma_base = u64_to_flcn64(addr_code);
- desc->total_code_size = pdesc->app_size;
- desc->code_size_to_load = pdesc->app_resident_code_size;
- desc->code_entry_point = pdesc->app_imem_entry;
- desc->data_dma_base = u64_to_flcn64(addr_data);
- desc->data_size = pdesc->app_resident_data_size;
- desc->overlay_dma_base = u64_to_flcn64(addr_code);
- desc->argc = 1;
- desc->argv = addr_args;
-}
-
-static const struct acr_r352_lsf_func
-acr_r361_ls_pmu_func_0 = {
- .generate_bl_desc = acr_r361_generate_pmu_bl_desc,
- .bl_desc_size = sizeof(struct acr_r361_pmu_bl_desc),
-};
-
-const struct acr_r352_ls_func
-acr_r361_ls_pmu_func = {
- .load = acr_ls_ucode_load_pmu,
- .post_run = acr_ls_pmu_post_run,
- .version_max = 0,
- .version = {
- &acr_r361_ls_pmu_func_0,
- }
-};
-
-static void
-acr_r361_generate_sec2_bl_desc(const struct nvkm_acr *acr,
- const struct ls_ucode_img *img, u64 wpr_addr,
- void *_desc)
-{
- const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
- const struct nvkm_sec2 *sec = acr->subdev->device->sec2;
- struct acr_r361_pmu_bl_desc *desc = _desc;
- u64 base, addr_code, addr_data;
- u32 addr_args;
-
- base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
- /* For some reason we should not add app_resident_code_offset here */
- addr_code = base;
- addr_data = base + pdesc->app_resident_data_offset;
- addr_args = sec->falcon->data.limit;
- addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE;
-
- desc->dma_idx = FALCON_SEC2_DMAIDX_UCODE;
- desc->code_dma_base = u64_to_flcn64(addr_code);
- desc->total_code_size = pdesc->app_size;
- desc->code_size_to_load = pdesc->app_resident_code_size;
- desc->code_entry_point = pdesc->app_imem_entry;
- desc->data_dma_base = u64_to_flcn64(addr_data);
- desc->data_size = pdesc->app_resident_data_size;
- desc->overlay_dma_base = u64_to_flcn64(addr_code);
- desc->argc = 1;
- /* args are stored at the beginning of EMEM */
- desc->argv = 0x01000000;
-}
-
-const struct acr_r352_lsf_func
-acr_r361_ls_sec2_func_0 = {
- .generate_bl_desc = acr_r361_generate_sec2_bl_desc,
- .bl_desc_size = sizeof(struct acr_r361_pmu_bl_desc),
-};
-
-static const struct acr_r352_ls_func
-acr_r361_ls_sec2_func = {
- .load = acr_ls_ucode_load_sec2,
- .post_run = acr_ls_sec2_post_run,
- .version_max = 0,
- .version = {
- &acr_r361_ls_sec2_func_0,
- }
-};
-
-
-const struct acr_r352_func
-acr_r361_func = {
- .fixup_hs_desc = acr_r352_fixup_hs_desc,
- .generate_hs_bl_desc = acr_r361_generate_hs_bl_desc,
- .hs_bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
- .ls_ucode_img_load = acr_r352_ls_ucode_img_load,
- .ls_fill_headers = acr_r352_ls_fill_headers,
- .ls_write_wpr = acr_r352_ls_write_wpr,
- .ls_func = {
- [NVKM_SECBOOT_FALCON_FECS] = &acr_r361_ls_fecs_func,
- [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r361_ls_gpccs_func,
- [NVKM_SECBOOT_FALCON_PMU] = &acr_r361_ls_pmu_func,
- [NVKM_SECBOOT_FALCON_SEC2] = &acr_r361_ls_sec2_func,
- },
-};
-
-struct nvkm_acr *
-acr_r361_new(unsigned long managed_falcons)
-{
- return acr_r352_new_(&acr_r361_func, NVKM_SECBOOT_FALCON_PMU,
- managed_falcons);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.h
deleted file mode 100644
index 38dec93779c8..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef __NVKM_SECBOOT_ACR_R361_H__
-#define __NVKM_SECBOOT_ACR_R361_H__
-
-#include "acr_r352.h"
-
-/**
- * struct acr_r361_flcn_bl_desc - DMEM bootloader descriptor
- * @signature: 16B signature for secure code. 0s if no secure code
- * @ctx_dma: DMA context to be used by BL while loading code/data
- * @code_dma_base: 256B-aligned Physical FB Address where code is located
- * (falcon's $xcbase register)
- * @non_sec_code_off: offset from code_dma_base where the non-secure code is
- * located. The offset must be multiple of 256 to help perf
- * @non_sec_code_size: the size of the nonSecure code part.
- * @sec_code_off: offset from code_dma_base where the secure code is
- * located. The offset must be multiple of 256 to help perf
- * @sec_code_size: offset from code_dma_base where the secure code is
- * located. The offset must be multiple of 256 to help perf
- * @code_entry_point: code entry point which will be invoked by BL after
- * code is loaded.
- * @data_dma_base: 256B aligned Physical FB Address where data is located.
- * (falcon's $xdbase register)
- * @data_size: size of data block. Should be multiple of 256B
- *
- * Structure used by the bootloader to load the rest of the code. This has
- * to be filled by host and copied into DMEM at offset provided in the
- * hsflcn_bl_desc.bl_desc_dmem_load_off.
- */
-struct acr_r361_flcn_bl_desc {
- u32 reserved[4];
- u32 signature[4];
- u32 ctx_dma;
- struct flcn_u64 code_dma_base;
- u32 non_sec_code_off;
- u32 non_sec_code_size;
- u32 sec_code_off;
- u32 sec_code_size;
- u32 code_entry_point;
- struct flcn_u64 data_dma_base;
- u32 data_size;
-};
-
-void acr_r361_generate_hs_bl_desc(const struct hsf_load_header *, void *, u64);
-
-extern const struct acr_r352_ls_func acr_r361_ls_fecs_func;
-extern const struct acr_r352_ls_func acr_r361_ls_gpccs_func;
-extern const struct acr_r352_ls_func acr_r361_ls_pmu_func;
-extern const struct acr_r352_lsf_func acr_r361_ls_sec2_func_0;
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r364.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r364.c
deleted file mode 100644
index 30cf04109991..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r364.c
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include "acr_r361.h"
-
-#include <core/gpuobj.h>
-
-/*
- * r364 ACR: hsflcn_desc structure has changed to introduce the shadow_mem
- * parameter.
- */
-
-struct acr_r364_hsflcn_desc {
- union {
- u8 reserved_dmem[0x200];
- u32 signatures[4];
- } ucode_reserved_space;
- u32 wpr_region_id;
- u32 wpr_offset;
- u32 mmu_memory_range;
- struct {
- u32 no_regions;
- struct {
- u32 start_addr;
- u32 end_addr;
- u32 region_id;
- u32 read_mask;
- u32 write_mask;
- u32 client_mask;
- u32 shadow_mem_start_addr;
- } region_props[2];
- } regions;
- u32 ucode_blob_size;
- u64 ucode_blob_base __aligned(8);
- struct {
- u32 vpr_enabled;
- u32 vpr_start;
- u32 vpr_end;
- u32 hdcp_policies;
- } vpr_desc;
-};
-
-static void
-acr_r364_fixup_hs_desc(struct acr_r352 *acr, struct nvkm_secboot *sb,
- void *_desc)
-{
- struct acr_r364_hsflcn_desc *desc = _desc;
- struct nvkm_gpuobj *ls_blob = acr->ls_blob;
-
- /* WPR region information if WPR is not fixed */
- if (sb->wpr_size == 0) {
- u64 wpr_start = ls_blob->addr;
- u64 wpr_end = ls_blob->addr + ls_blob->size;
-
- if (acr->func->shadow_blob)
- wpr_start += ls_blob->size / 2;
-
- desc->wpr_region_id = 1;
- desc->regions.no_regions = 2;
- desc->regions.region_props[0].start_addr = wpr_start >> 8;
- desc->regions.region_props[0].end_addr = wpr_end >> 8;
- desc->regions.region_props[0].region_id = 1;
- desc->regions.region_props[0].read_mask = 0xf;
- desc->regions.region_props[0].write_mask = 0xc;
- desc->regions.region_props[0].client_mask = 0x2;
- if (acr->func->shadow_blob)
- desc->regions.region_props[0].shadow_mem_start_addr =
- ls_blob->addr >> 8;
- else
- desc->regions.region_props[0].shadow_mem_start_addr = 0;
- } else {
- desc->ucode_blob_base = ls_blob->addr;
- desc->ucode_blob_size = ls_blob->size;
- }
-}
-
-const struct acr_r352_func
-acr_r364_func = {
- .fixup_hs_desc = acr_r364_fixup_hs_desc,
- .generate_hs_bl_desc = acr_r361_generate_hs_bl_desc,
- .hs_bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
- .ls_ucode_img_load = acr_r352_ls_ucode_img_load,
- .ls_fill_headers = acr_r352_ls_fill_headers,
- .ls_write_wpr = acr_r352_ls_write_wpr,
- .ls_func = {
- [NVKM_SECBOOT_FALCON_FECS] = &acr_r361_ls_fecs_func,
- [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r361_ls_gpccs_func,
- [NVKM_SECBOOT_FALCON_PMU] = &acr_r361_ls_pmu_func,
- },
-};
-
-
-struct nvkm_acr *
-acr_r364_new(unsigned long managed_falcons)
-{
- return acr_r352_new_(&acr_r364_func, NVKM_SECBOOT_FALCON_PMU,
- managed_falcons);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c
deleted file mode 100644
index 472ced29da7e..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c
+++ /dev/null
@@ -1,418 +0,0 @@
-/*
- * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include "acr_r367.h"
-#include "acr_r361.h"
-#include "acr_r370.h"
-
-#include <core/gpuobj.h>
-
-/*
- * r367 ACR: new LS signature format requires a rewrite of LS firmware and
- * blob creation functions. Also the hsflcn_desc layout has changed slightly.
- */
-
-#define LSF_LSB_DEPMAP_SIZE 11
-
-/**
- * struct acr_r367_lsf_lsb_header - LS firmware header
- *
- * See also struct acr_r352_lsf_lsb_header for documentation.
- */
-struct acr_r367_lsf_lsb_header {
- /**
- * LS falcon signatures
- * @prd_keys: signature to use in production mode
- * @dgb_keys: signature to use in debug mode
- * @b_prd_present: whether the production key is present
- * @b_dgb_present: whether the debug key is present
- * @falcon_id: ID of the falcon the ucode applies to
- */
- struct {
- u8 prd_keys[2][16];
- u8 dbg_keys[2][16];
- u32 b_prd_present;
- u32 b_dbg_present;
- u32 falcon_id;
- u32 supports_versioning;
- u32 version;
- u32 depmap_count;
- u8 depmap[LSF_LSB_DEPMAP_SIZE * 2 * 4];
- u8 kdf[16];
- } signature;
- u32 ucode_off;
- u32 ucode_size;
- u32 data_size;
- u32 bl_code_size;
- u32 bl_imem_off;
- u32 bl_data_off;
- u32 bl_data_size;
- u32 app_code_off;
- u32 app_code_size;
- u32 app_data_off;
- u32 app_data_size;
- u32 flags;
-};
-
-/**
- * struct acr_r367_lsf_wpr_header - LS blob WPR Header
- *
- * See also struct acr_r352_lsf_wpr_header for documentation.
- */
-struct acr_r367_lsf_wpr_header {
- u32 falcon_id;
- u32 lsb_offset;
- u32 bootstrap_owner;
- u32 lazy_bootstrap;
- u32 bin_version;
- u32 status;
-#define LSF_IMAGE_STATUS_NONE 0
-#define LSF_IMAGE_STATUS_COPY 1
-#define LSF_IMAGE_STATUS_VALIDATION_CODE_FAILED 2
-#define LSF_IMAGE_STATUS_VALIDATION_DATA_FAILED 3
-#define LSF_IMAGE_STATUS_VALIDATION_DONE 4
-#define LSF_IMAGE_STATUS_VALIDATION_SKIPPED 5
-#define LSF_IMAGE_STATUS_BOOTSTRAP_READY 6
-#define LSF_IMAGE_STATUS_REVOCATION_CHECK_FAILED 7
-};
-
-/**
- * struct ls_ucode_img_r367 - ucode image augmented with r367 headers
- */
-struct ls_ucode_img_r367 {
- struct ls_ucode_img base;
-
- const struct acr_r352_lsf_func *func;
-
- struct acr_r367_lsf_wpr_header wpr_header;
- struct acr_r367_lsf_lsb_header lsb_header;
-};
-#define ls_ucode_img_r367(i) container_of(i, struct ls_ucode_img_r367, base)
-
-struct ls_ucode_img *
-acr_r367_ls_ucode_img_load(const struct acr_r352 *acr,
- const struct nvkm_secboot *sb,
- enum nvkm_secboot_falcon falcon_id)
-{
- const struct nvkm_subdev *subdev = acr->base.subdev;
- const struct acr_r352_ls_func *func = acr->func->ls_func[falcon_id];
- struct ls_ucode_img_r367 *img;
- int ret;
-
- img = kzalloc(sizeof(*img), GFP_KERNEL);
- if (!img)
- return ERR_PTR(-ENOMEM);
-
- img->base.falcon_id = falcon_id;
-
- ret = func->load(sb, func->version_max, &img->base);
- if (ret < 0) {
- kfree(img->base.ucode_data);
- kfree(img->base.sig);
- kfree(img);
- return ERR_PTR(ret);
- }
-
- img->func = func->version[ret];
-
- /* Check that the signature size matches our expectations... */
- if (img->base.sig_size != sizeof(img->lsb_header.signature)) {
- nvkm_error(subdev, "invalid signature size for %s falcon!\n",
- nvkm_secboot_falcon_name[falcon_id]);
- return ERR_PTR(-EINVAL);
- }
-
- /* Copy signature to the right place */
- memcpy(&img->lsb_header.signature, img->base.sig, img->base.sig_size);
-
- /* not needed? the signature should already have the right value */
- img->lsb_header.signature.falcon_id = falcon_id;
-
- return &img->base;
-}
-
-#define LSF_LSB_HEADER_ALIGN 256
-#define LSF_BL_DATA_ALIGN 256
-#define LSF_BL_DATA_SIZE_ALIGN 256
-#define LSF_BL_CODE_SIZE_ALIGN 256
-#define LSF_UCODE_DATA_ALIGN 4096
-
-static u32
-acr_r367_ls_img_fill_headers(struct acr_r352 *acr,
- struct ls_ucode_img_r367 *img, u32 offset)
-{
- struct ls_ucode_img *_img = &img->base;
- struct acr_r367_lsf_wpr_header *whdr = &img->wpr_header;
- struct acr_r367_lsf_lsb_header *lhdr = &img->lsb_header;
- struct ls_ucode_img_desc *desc = &_img->ucode_desc;
- const struct acr_r352_lsf_func *func = img->func;
-
- /* Fill WPR header */
- whdr->falcon_id = _img->falcon_id;
- whdr->bootstrap_owner = acr->base.boot_falcon;
- whdr->bin_version = lhdr->signature.version;
- whdr->status = LSF_IMAGE_STATUS_COPY;
-
- /* Skip bootstrapping falcons started by someone else than ACR */
- if (acr->lazy_bootstrap & BIT(_img->falcon_id))
- whdr->lazy_bootstrap = 1;
-
- /* Align, save off, and include an LSB header size */
- offset = ALIGN(offset, LSF_LSB_HEADER_ALIGN);
- whdr->lsb_offset = offset;
- offset += sizeof(*lhdr);
-
- /*
- * Align, save off, and include the original (static) ucode
- * image size
- */
- offset = ALIGN(offset, LSF_UCODE_DATA_ALIGN);
- _img->ucode_off = lhdr->ucode_off = offset;
- offset += _img->ucode_size;
-
- /*
- * For falcons that use a boot loader (BL), we append a loader
- * desc structure on the end of the ucode image and consider
- * this the boot loader data. The host will then copy the loader
- * desc args to this space within the WPR region (before locking
- * down) and the HS bin will then copy them to DMEM 0 for the
- * loader.
- */
- lhdr->bl_code_size = ALIGN(desc->bootloader_size,
- LSF_BL_CODE_SIZE_ALIGN);
- lhdr->ucode_size = ALIGN(desc->app_resident_data_offset,
- LSF_BL_CODE_SIZE_ALIGN) + lhdr->bl_code_size;
- lhdr->data_size = ALIGN(desc->app_size, LSF_BL_CODE_SIZE_ALIGN) +
- lhdr->bl_code_size - lhdr->ucode_size;
- /*
- * Though the BL is located at 0th offset of the image, the VA
- * is different to make sure that it doesn't collide the actual
- * OS VA range
- */
- lhdr->bl_imem_off = desc->bootloader_imem_offset;
- lhdr->app_code_off = desc->app_start_offset +
- desc->app_resident_code_offset;
- lhdr->app_code_size = desc->app_resident_code_size;
- lhdr->app_data_off = desc->app_start_offset +
- desc->app_resident_data_offset;
- lhdr->app_data_size = desc->app_resident_data_size;
-
- lhdr->flags = func->lhdr_flags;
- if (_img->falcon_id == acr->base.boot_falcon)
- lhdr->flags |= LSF_FLAG_DMACTL_REQ_CTX;
-
- /* Align and save off BL descriptor size */
- lhdr->bl_data_size = ALIGN(func->bl_desc_size, LSF_BL_DATA_SIZE_ALIGN);
-
- /*
- * Align, save off, and include the additional BL data
- */
- offset = ALIGN(offset, LSF_BL_DATA_ALIGN);
- lhdr->bl_data_off = offset;
- offset += lhdr->bl_data_size;
-
- return offset;
-}
-
-int
-acr_r367_ls_fill_headers(struct acr_r352 *acr, struct list_head *imgs)
-{
- struct ls_ucode_img_r367 *img;
- struct list_head *l;
- u32 count = 0;
- u32 offset;
-
- /* Count the number of images to manage */
- list_for_each(l, imgs)
- count++;
-
- /*
- * Start with an array of WPR headers at the base of the WPR.
- * The expectation here is that the secure falcon will do a single DMA
- * read of this array and cache it internally so it's ok to pack these.
- * Also, we add 1 to the falcon count to indicate the end of the array.
- */
- offset = sizeof(img->wpr_header) * (count + 1);
-
- /*
- * Walk the managed falcons, accounting for the LSB structs
- * as well as the ucode images.
- */
- list_for_each_entry(img, imgs, base.node) {
- offset = acr_r367_ls_img_fill_headers(acr, img, offset);
- }
-
- return offset;
-}
-
-int
-acr_r367_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
- struct nvkm_gpuobj *wpr_blob, u64 wpr_addr)
-{
- struct ls_ucode_img *_img;
- u32 pos = 0;
- u32 max_desc_size = 0;
- u8 *gdesc;
-
- list_for_each_entry(_img, imgs, node) {
- struct ls_ucode_img_r367 *img = ls_ucode_img_r367(_img);
- const struct acr_r352_lsf_func *ls_func = img->func;
-
- max_desc_size = max(max_desc_size, ls_func->bl_desc_size);
- }
-
- gdesc = kmalloc(max_desc_size, GFP_KERNEL);
- if (!gdesc)
- return -ENOMEM;
-
- nvkm_kmap(wpr_blob);
-
- list_for_each_entry(_img, imgs, node) {
- struct ls_ucode_img_r367 *img = ls_ucode_img_r367(_img);
- const struct acr_r352_lsf_func *ls_func = img->func;
-
- nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header,
- sizeof(img->wpr_header));
-
- nvkm_gpuobj_memcpy_to(wpr_blob, img->wpr_header.lsb_offset,
- &img->lsb_header, sizeof(img->lsb_header));
-
- /* Generate and write BL descriptor */
- memset(gdesc, 0, ls_func->bl_desc_size);
- ls_func->generate_bl_desc(&acr->base, _img, wpr_addr, gdesc);
-
- nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.bl_data_off,
- gdesc, ls_func->bl_desc_size);
-
- /* Copy ucode */
- nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.ucode_off,
- _img->ucode_data, _img->ucode_size);
-
- pos += sizeof(img->wpr_header);
- }
-
- nvkm_wo32(wpr_blob, pos, NVKM_SECBOOT_FALCON_INVALID);
-
- nvkm_done(wpr_blob);
-
- kfree(gdesc);
-
- return 0;
-}
-
-struct acr_r367_hsflcn_desc {
- u8 reserved_dmem[0x200];
- u32 signatures[4];
- u32 wpr_region_id;
- u32 wpr_offset;
- u32 mmu_memory_range;
-#define FLCN_ACR_MAX_REGIONS 2
- struct {
- u32 no_regions;
- struct {
- u32 start_addr;
- u32 end_addr;
- u32 region_id;
- u32 read_mask;
- u32 write_mask;
- u32 client_mask;
- u32 shadow_mem_start_addr;
- } region_props[FLCN_ACR_MAX_REGIONS];
- } regions;
- u32 ucode_blob_size;
- u64 ucode_blob_base __aligned(8);
- struct {
- u32 vpr_enabled;
- u32 vpr_start;
- u32 vpr_end;
- u32 hdcp_policies;
- } vpr_desc;
-};
-
-void
-acr_r367_fixup_hs_desc(struct acr_r352 *acr, struct nvkm_secboot *sb,
- void *_desc)
-{
- struct acr_r367_hsflcn_desc *desc = _desc;
- struct nvkm_gpuobj *ls_blob = acr->ls_blob;
-
- /* WPR region information if WPR is not fixed */
- if (sb->wpr_size == 0) {
- u64 wpr_start = ls_blob->addr;
- u64 wpr_end = ls_blob->addr + ls_blob->size;
-
- if (acr->func->shadow_blob)
- wpr_start += ls_blob->size / 2;
-
- desc->wpr_region_id = 1;
- desc->regions.no_regions = 2;
- desc->regions.region_props[0].start_addr = wpr_start >> 8;
- desc->regions.region_props[0].end_addr = wpr_end >> 8;
- desc->regions.region_props[0].region_id = 1;
- desc->regions.region_props[0].read_mask = 0xf;
- desc->regions.region_props[0].write_mask = 0xc;
- desc->regions.region_props[0].client_mask = 0x2;
- if (acr->func->shadow_blob)
- desc->regions.region_props[0].shadow_mem_start_addr =
- ls_blob->addr >> 8;
- else
- desc->regions.region_props[0].shadow_mem_start_addr = 0;
- } else {
- desc->ucode_blob_base = ls_blob->addr;
- desc->ucode_blob_size = ls_blob->size;
- }
-}
-
-static const struct acr_r352_ls_func
-acr_r367_ls_sec2_func = {
- .load = acr_ls_ucode_load_sec2,
- .post_run = acr_ls_sec2_post_run,
- .version_max = 1,
- .version = {
- &acr_r361_ls_sec2_func_0,
- &acr_r370_ls_sec2_func_0,
- }
-};
-
-const struct acr_r352_func
-acr_r367_func = {
- .fixup_hs_desc = acr_r367_fixup_hs_desc,
- .generate_hs_bl_desc = acr_r361_generate_hs_bl_desc,
- .hs_bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
- .shadow_blob = true,
- .ls_ucode_img_load = acr_r367_ls_ucode_img_load,
- .ls_fill_headers = acr_r367_ls_fill_headers,
- .ls_write_wpr = acr_r367_ls_write_wpr,
- .ls_func = {
- [NVKM_SECBOOT_FALCON_FECS] = &acr_r361_ls_fecs_func,
- [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r361_ls_gpccs_func,
- [NVKM_SECBOOT_FALCON_PMU] = &acr_r361_ls_pmu_func,
- [NVKM_SECBOOT_FALCON_SEC2] = &acr_r367_ls_sec2_func,
- },
-};
-
-struct nvkm_acr *
-acr_r367_new(enum nvkm_secboot_falcon boot_falcon,
- unsigned long managed_falcons)
-{
- return acr_r352_new_(&acr_r367_func, boot_falcon, managed_falcons);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.c
deleted file mode 100644
index e821d0fd6217..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.c
+++ /dev/null
@@ -1,168 +0,0 @@
-/*
- * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include "acr_r370.h"
-#include "acr_r367.h"
-
-#include <core/msgqueue.h>
-#include <engine/falcon.h>
-#include <engine/sec2.h>
-
-static void
-acr_r370_generate_flcn_bl_desc(const struct nvkm_acr *acr,
- const struct ls_ucode_img *img, u64 wpr_addr,
- void *_desc)
-{
- struct acr_r370_flcn_bl_desc *desc = _desc;
- const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
- u64 base, addr_code, addr_data;
-
- base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
- addr_code = base + pdesc->app_resident_code_offset;
- addr_data = base + pdesc->app_resident_data_offset;
-
- desc->ctx_dma = FALCON_DMAIDX_UCODE;
- desc->code_dma_base = u64_to_flcn64(addr_code);
- desc->non_sec_code_off = pdesc->app_resident_code_offset;
- desc->non_sec_code_size = pdesc->app_resident_code_size;
- desc->code_entry_point = pdesc->app_imem_entry;
- desc->data_dma_base = u64_to_flcn64(addr_data);
- desc->data_size = pdesc->app_resident_data_size;
-}
-
-static const struct acr_r352_lsf_func
-acr_r370_ls_fecs_func_0 = {
- .generate_bl_desc = acr_r370_generate_flcn_bl_desc,
- .bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
-};
-
-const struct acr_r352_ls_func
-acr_r370_ls_fecs_func = {
- .load = acr_ls_ucode_load_fecs,
- .version_max = 0,
- .version = {
- &acr_r370_ls_fecs_func_0,
- }
-};
-
-static const struct acr_r352_lsf_func
-acr_r370_ls_gpccs_func_0 = {
- .generate_bl_desc = acr_r370_generate_flcn_bl_desc,
- .bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
- /* GPCCS will be loaded using PRI */
- .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
-};
-
-const struct acr_r352_ls_func
-acr_r370_ls_gpccs_func = {
- .load = acr_ls_ucode_load_gpccs,
- .version_max = 0,
- .version = {
- &acr_r370_ls_gpccs_func_0,
- }
-};
-
-static void
-acr_r370_generate_sec2_bl_desc(const struct nvkm_acr *acr,
- const struct ls_ucode_img *img, u64 wpr_addr,
- void *_desc)
-{
- const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
- const struct nvkm_sec2 *sec = acr->subdev->device->sec2;
- struct acr_r370_flcn_bl_desc *desc = _desc;
- u64 base, addr_code, addr_data;
- u32 addr_args;
-
- base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
- /* For some reason we should not add app_resident_code_offset here */
- addr_code = base;
- addr_data = base + pdesc->app_resident_data_offset;
- addr_args = sec->falcon->data.limit;
- addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE;
-
- desc->ctx_dma = FALCON_SEC2_DMAIDX_UCODE;
- desc->code_dma_base = u64_to_flcn64(addr_code);
- desc->non_sec_code_off = pdesc->app_resident_code_offset;
- desc->non_sec_code_size = pdesc->app_resident_code_size;
- desc->code_entry_point = pdesc->app_imem_entry;
- desc->data_dma_base = u64_to_flcn64(addr_data);
- desc->data_size = pdesc->app_resident_data_size;
- desc->argc = 1;
- /* args are stored at the beginning of EMEM */
- desc->argv = 0x01000000;
-}
-
-const struct acr_r352_lsf_func
-acr_r370_ls_sec2_func_0 = {
- .generate_bl_desc = acr_r370_generate_sec2_bl_desc,
- .bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
-};
-
-const struct acr_r352_ls_func
-acr_r370_ls_sec2_func = {
- .load = acr_ls_ucode_load_sec2,
- .post_run = acr_ls_sec2_post_run,
- .version_max = 0,
- .version = {
- &acr_r370_ls_sec2_func_0,
- }
-};
-
-void
-acr_r370_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc,
- u64 offset)
-{
- struct acr_r370_flcn_bl_desc *bl_desc = _bl_desc;
-
- bl_desc->ctx_dma = FALCON_DMAIDX_VIRT;
- bl_desc->non_sec_code_off = hdr->non_sec_code_off;
- bl_desc->non_sec_code_size = hdr->non_sec_code_size;
- bl_desc->sec_code_off = hsf_load_header_app_off(hdr, 0);
- bl_desc->sec_code_size = hsf_load_header_app_size(hdr, 0);
- bl_desc->code_entry_point = 0;
- bl_desc->code_dma_base = u64_to_flcn64(offset);
- bl_desc->data_dma_base = u64_to_flcn64(offset + hdr->data_dma_base);
- bl_desc->data_size = hdr->data_size;
-}
-
-const struct acr_r352_func
-acr_r370_func = {
- .fixup_hs_desc = acr_r367_fixup_hs_desc,
- .generate_hs_bl_desc = acr_r370_generate_hs_bl_desc,
- .hs_bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
- .shadow_blob = true,
- .ls_ucode_img_load = acr_r367_ls_ucode_img_load,
- .ls_fill_headers = acr_r367_ls_fill_headers,
- .ls_write_wpr = acr_r367_ls_write_wpr,
- .ls_func = {
- [NVKM_SECBOOT_FALCON_SEC2] = &acr_r370_ls_sec2_func,
- [NVKM_SECBOOT_FALCON_FECS] = &acr_r370_ls_fecs_func,
- [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r370_ls_gpccs_func,
- },
-};
-
-struct nvkm_acr *
-acr_r370_new(enum nvkm_secboot_falcon boot_falcon,
- unsigned long managed_falcons)
-{
- return acr_r352_new_(&acr_r370_func, boot_falcon, managed_falcons);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.h
deleted file mode 100644
index 2efed6f995ad..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef __NVKM_SECBOOT_ACR_R370_H__
-#define __NVKM_SECBOOT_ACR_R370_H__
-
-#include "priv.h"
-struct hsf_load_header;
-
-/* Same as acr_r361_flcn_bl_desc, plus argc/argv */
-struct acr_r370_flcn_bl_desc {
- u32 reserved[4];
- u32 signature[4];
- u32 ctx_dma;
- struct flcn_u64 code_dma_base;
- u32 non_sec_code_off;
- u32 non_sec_code_size;
- u32 sec_code_off;
- u32 sec_code_size;
- u32 code_entry_point;
- struct flcn_u64 data_dma_base;
- u32 data_size;
- u32 argc;
- u32 argv;
-};
-
-void acr_r370_generate_hs_bl_desc(const struct hsf_load_header *, void *, u64);
-extern const struct acr_r352_ls_func acr_r370_ls_fecs_func;
-extern const struct acr_r352_ls_func acr_r370_ls_gpccs_func;
-extern const struct acr_r352_lsf_func acr_r370_ls_sec2_func_0;
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c
deleted file mode 100644
index 8f0647766038..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include "acr_r370.h"
-#include "acr_r367.h"
-
-#include <core/msgqueue.h>
-#include <subdev/pmu.h>
-
-static void
-acr_r375_generate_pmu_bl_desc(const struct nvkm_acr *acr,
- const struct ls_ucode_img *img, u64 wpr_addr,
- void *_desc)
-{
- const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
- const struct nvkm_pmu *pmu = acr->subdev->device->pmu;
- struct acr_r370_flcn_bl_desc *desc = _desc;
- u64 base, addr_code, addr_data;
- u32 addr_args;
-
- base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
- addr_code = base + pdesc->app_resident_code_offset;
- addr_data = base + pdesc->app_resident_data_offset;
- addr_args = pmu->falcon->data.limit;
- addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE;
-
- desc->ctx_dma = FALCON_DMAIDX_UCODE;
- desc->code_dma_base = u64_to_flcn64(addr_code);
- desc->non_sec_code_off = pdesc->app_resident_code_offset;
- desc->non_sec_code_size = pdesc->app_resident_code_size;
- desc->code_entry_point = pdesc->app_imem_entry;
- desc->data_dma_base = u64_to_flcn64(addr_data);
- desc->data_size = pdesc->app_resident_data_size;
- desc->argc = 1;
- desc->argv = addr_args;
-}
-
-static const struct acr_r352_lsf_func
-acr_r375_ls_pmu_func_0 = {
- .generate_bl_desc = acr_r375_generate_pmu_bl_desc,
- .bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
-};
-
-const struct acr_r352_ls_func
-acr_r375_ls_pmu_func = {
- .load = acr_ls_ucode_load_pmu,
- .post_run = acr_ls_pmu_post_run,
- .version_max = 0,
- .version = {
- &acr_r375_ls_pmu_func_0,
- }
-};
-
-const struct acr_r352_func
-acr_r375_func = {
- .fixup_hs_desc = acr_r367_fixup_hs_desc,
- .generate_hs_bl_desc = acr_r370_generate_hs_bl_desc,
- .hs_bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
- .shadow_blob = true,
- .ls_ucode_img_load = acr_r367_ls_ucode_img_load,
- .ls_fill_headers = acr_r367_ls_fill_headers,
- .ls_write_wpr = acr_r367_ls_write_wpr,
- .ls_func = {
- [NVKM_SECBOOT_FALCON_FECS] = &acr_r370_ls_fecs_func,
- [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r370_ls_gpccs_func,
- [NVKM_SECBOOT_FALCON_PMU] = &acr_r375_ls_pmu_func,
- },
-};
-
-struct nvkm_acr *
-acr_r375_new(enum nvkm_secboot_falcon boot_falcon,
- unsigned long managed_falcons)
-{
- return acr_r352_new_(&acr_r375_func, boot_falcon, managed_falcons);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c
deleted file mode 100644
index ee29c6c11afd..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c
+++ /dev/null
@@ -1,213 +0,0 @@
-/*
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-/*
- * Secure boot is the process by which NVIDIA-signed firmware is loaded into
- * some of the falcons of a GPU. For production devices this is the only way
- * for the firmware to access useful (but sensitive) registers.
- *
- * A Falcon microprocessor supporting advanced security modes can run in one of
- * three modes:
- *
- * - Non-secure (NS). In this mode, functionality is similar to Falcon
- * architectures before security modes were introduced (pre-Maxwell), but
- * capability is restricted. In particular, certain registers may be
- * inaccessible for reads and/or writes, and physical memory access may be
- * disabled (on certain Falcon instances). This is the only possible mode that
- * can be used if you don't have microcode cryptographically signed by NVIDIA.
- *
- * - Heavy Secure (HS). In this mode, the microprocessor is a black box - it's
- * not possible to read or write any Falcon internal state or Falcon registers
- * from outside the Falcon (for example, from the host system). The only way
- * to enable this mode is by loading microcode that has been signed by NVIDIA.
- * (The loading process involves tagging the IMEM block as secure, writing the
- * signature into a Falcon register, and starting execution. The hardware will
- * validate the signature, and if valid, grant HS privileges.)
- *
- * - Light Secure (LS). In this mode, the microprocessor has more privileges
- * than NS but fewer than HS. Some of the microprocessor state is visible to
- * host software to ease debugging. The only way to enable this mode is by HS
- * microcode enabling LS mode. Some privileges available to HS mode are not
- * available here. LS mode is introduced in GM20x.
- *
- * Secure boot consists in temporarily switching a HS-capable falcon (typically
- * PMU) into HS mode in order to validate the LS firmwares of managed falcons,
- * load them, and switch managed falcons into LS mode. Once secure boot
- * completes, no falcon remains in HS mode.
- *
- * Secure boot requires a write-protected memory region (WPR) which can only be
- * written by the secure falcon. On dGPU, the driver sets up the WPR region in
- * video memory. On Tegra, it is set up by the bootloader and its location and
- * size written into memory controller registers.
- *
- * The secure boot process takes place as follows:
- *
- * 1) A LS blob is constructed that contains all the LS firmwares we want to
- * load, along with their signatures and bootloaders.
- *
- * 2) A HS blob (also called ACR) is created that contains the signed HS
- * firmware in charge of loading the LS firmwares into their respective
- * falcons.
- *
- * 3) The HS blob is loaded (via its own bootloader) and executed on the
- * HS-capable falcon. It authenticates itself, switches the secure falcon to
- * HS mode and setup the WPR region around the LS blob (dGPU) or copies the
- * LS blob into the WPR region (Tegra).
- *
- * 4) The LS blob is now secure from all external tampering. The HS falcon
- * checks the signatures of the LS firmwares and, if valid, switches the
- * managed falcons to LS mode and makes them ready to run the LS firmware.
- *
- * 5) The managed falcons remain in LS mode and can be started.
- *
- */
-
-#include "priv.h"
-#include "acr.h"
-
-#include <subdev/mc.h>
-#include <subdev/timer.h>
-#include <subdev/pmu.h>
-#include <engine/sec2.h>
-
-const char *
-nvkm_secboot_falcon_name[] = {
- [NVKM_SECBOOT_FALCON_PMU] = "PMU",
- [NVKM_SECBOOT_FALCON_RESERVED] = "<reserved>",
- [NVKM_SECBOOT_FALCON_FECS] = "FECS",
- [NVKM_SECBOOT_FALCON_GPCCS] = "GPCCS",
- [NVKM_SECBOOT_FALCON_SEC2] = "SEC2",
- [NVKM_SECBOOT_FALCON_END] = "<invalid>",
-};
-/**
- * nvkm_secboot_reset() - reset specified falcon
- */
-int
-nvkm_secboot_reset(struct nvkm_secboot *sb, unsigned long falcon_mask)
-{
- /* Unmanaged falcon? */
- if ((falcon_mask | sb->acr->managed_falcons) != sb->acr->managed_falcons) {
- nvkm_error(&sb->subdev, "cannot reset unmanaged falcon!\n");
- return -EINVAL;
- }
-
- return sb->acr->func->reset(sb->acr, sb, falcon_mask);
-}
-
-/**
- * nvkm_secboot_is_managed() - check whether a given falcon is securely-managed
- */
-bool
-nvkm_secboot_is_managed(struct nvkm_secboot *sb, enum nvkm_secboot_falcon fid)
-{
- if (!sb)
- return false;
-
- return sb->acr->managed_falcons & BIT(fid);
-}
-
-static int
-nvkm_secboot_oneinit(struct nvkm_subdev *subdev)
-{
- struct nvkm_secboot *sb = nvkm_secboot(subdev);
- int ret = 0;
-
- switch (sb->acr->boot_falcon) {
- case NVKM_SECBOOT_FALCON_PMU:
- sb->halt_falcon = sb->boot_falcon = subdev->device->pmu->falcon;
- break;
- case NVKM_SECBOOT_FALCON_SEC2:
- /* we must keep SEC2 alive forever since ACR will run on it */
- nvkm_engine_ref(&subdev->device->sec2->engine);
- sb->boot_falcon = subdev->device->sec2->falcon;
- sb->halt_falcon = subdev->device->pmu->falcon;
- break;
- default:
- nvkm_error(subdev, "Unmanaged boot falcon %s!\n",
- nvkm_secboot_falcon_name[sb->acr->boot_falcon]);
- return -EINVAL;
- }
- nvkm_debug(subdev, "using %s falcon for ACR\n", sb->boot_falcon->name);
-
- /* Call chip-specific init function */
- if (sb->func->oneinit)
- ret = sb->func->oneinit(sb);
- if (ret) {
- nvkm_error(subdev, "Secure Boot initialization failed: %d\n",
- ret);
- return ret;
- }
-
- return 0;
-}
-
-static int
-nvkm_secboot_fini(struct nvkm_subdev *subdev, bool suspend)
-{
- struct nvkm_secboot *sb = nvkm_secboot(subdev);
- int ret = 0;
-
- if (sb->func->fini)
- ret = sb->func->fini(sb, suspend);
-
- return ret;
-}
-
-static void *
-nvkm_secboot_dtor(struct nvkm_subdev *subdev)
-{
- struct nvkm_secboot *sb = nvkm_secboot(subdev);
- void *ret = NULL;
-
- if (sb->func->dtor)
- ret = sb->func->dtor(sb);
-
- return ret;
-}
-
-static const struct nvkm_subdev_func
-nvkm_secboot = {
- .oneinit = nvkm_secboot_oneinit,
- .fini = nvkm_secboot_fini,
- .dtor = nvkm_secboot_dtor,
-};
-
-int
-nvkm_secboot_ctor(const struct nvkm_secboot_func *func, struct nvkm_acr *acr,
- struct nvkm_device *device, int index,
- struct nvkm_secboot *sb)
-{
- unsigned long fid;
-
- nvkm_subdev_ctor(&nvkm_secboot, device, index, &sb->subdev);
- sb->func = func;
- sb->acr = acr;
- acr->subdev = &sb->subdev;
-
- nvkm_debug(&sb->subdev, "securely managed falcons:\n");
- for_each_set_bit(fid, &sb->acr->managed_falcons,
- NVKM_SECBOOT_FALCON_END)
- nvkm_debug(&sb->subdev, "- %s\n",
- nvkm_secboot_falcon_name[fid]);
-
- return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c
deleted file mode 100644
index 5e91b3f90065..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c
+++ /dev/null
@@ -1,262 +0,0 @@
-/*
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-
-#include "acr.h"
-#include "gm200.h"
-
-#include <core/gpuobj.h>
-#include <subdev/fb.h>
-#include <engine/falcon.h>
-#include <subdev/mc.h>
-
-/**
- * gm200_secboot_run_blob() - run the given high-secure blob
- *
- */
-int
-gm200_secboot_run_blob(struct nvkm_secboot *sb, struct nvkm_gpuobj *blob,
- struct nvkm_falcon *falcon)
-{
- struct gm200_secboot *gsb = gm200_secboot(sb);
- struct nvkm_subdev *subdev = &gsb->base.subdev;
- struct nvkm_vma *vma = NULL;
- u32 start_address;
- int ret;
-
- ret = nvkm_falcon_get(falcon, subdev);
- if (ret)
- return ret;
-
- /* Map the HS firmware so the HS bootloader can see it */
- ret = nvkm_vmm_get(gsb->vmm, 12, blob->size, &vma);
- if (ret) {
- nvkm_falcon_put(falcon, subdev);
- return ret;
- }
-
- ret = nvkm_memory_map(blob, 0, gsb->vmm, vma, NULL, 0);
- if (ret)
- goto end;
-
- /* Reset and set the falcon up */
- ret = nvkm_falcon_reset(falcon);
- if (ret)
- goto end;
- nvkm_falcon_bind_context(falcon, gsb->inst);
-
- /* Load the HS bootloader into the falcon's IMEM/DMEM */
- ret = sb->acr->func->load(sb->acr, falcon, blob, vma->addr);
- if (ret < 0)
- goto end;
-
- start_address = ret;
-
- /* Disable interrupts as we will poll for the HALT bit */
- nvkm_mc_intr_mask(sb->subdev.device, falcon->owner->index, false);
-
- /* Set default error value in mailbox register */
- nvkm_falcon_wr32(falcon, 0x040, 0xdeada5a5);
-
- /* Start the HS bootloader */
- nvkm_falcon_set_start_addr(falcon, start_address);
- nvkm_falcon_start(falcon);
- ret = nvkm_falcon_wait_for_halt(falcon, 100);
- if (ret)
- goto end;
-
- /*
- * The mailbox register contains the (positive) error code - return this
- * to the caller
- */
- ret = nvkm_falcon_rd32(falcon, 0x040);
-
-end:
- /* Reenable interrupts */
- nvkm_mc_intr_mask(sb->subdev.device, falcon->owner->index, true);
-
- /* We don't need the ACR firmware anymore */
- nvkm_vmm_put(gsb->vmm, &vma);
- nvkm_falcon_put(falcon, subdev);
-
- return ret;
-}
-
-int
-gm200_secboot_oneinit(struct nvkm_secboot *sb)
-{
- struct gm200_secboot *gsb = gm200_secboot(sb);
- struct nvkm_device *device = sb->subdev.device;
- int ret;
-
- /* Allocate instance block and VM */
- ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0, true,
- &gsb->inst);
- if (ret)
- return ret;
-
- ret = nvkm_vmm_new(device, 0, 600 * 1024, NULL, 0, NULL, "acr",
- &gsb->vmm);
- if (ret)
- return ret;
-
- atomic_inc(&gsb->vmm->engref[NVKM_SUBDEV_PMU]);
- gsb->vmm->debug = gsb->base.subdev.debug;
-
- ret = nvkm_vmm_join(gsb->vmm, gsb->inst);
- if (ret)
- return ret;
-
- if (sb->acr->func->oneinit) {
- ret = sb->acr->func->oneinit(sb->acr, sb);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-int
-gm200_secboot_fini(struct nvkm_secboot *sb, bool suspend)
-{
- int ret = 0;
-
- if (sb->acr->func->fini)
- ret = sb->acr->func->fini(sb->acr, sb, suspend);
-
- return ret;
-}
-
-void *
-gm200_secboot_dtor(struct nvkm_secboot *sb)
-{
- struct gm200_secboot *gsb = gm200_secboot(sb);
-
- sb->acr->func->dtor(sb->acr);
-
- nvkm_vmm_part(gsb->vmm, gsb->inst);
- nvkm_vmm_unref(&gsb->vmm);
- nvkm_memory_unref(&gsb->inst);
-
- return gsb;
-}
-
-
-static const struct nvkm_secboot_func
-gm200_secboot = {
- .dtor = gm200_secboot_dtor,
- .oneinit = gm200_secboot_oneinit,
- .fini = gm200_secboot_fini,
- .run_blob = gm200_secboot_run_blob,
-};
-
-int
-gm200_secboot_new(struct nvkm_device *device, int index,
- struct nvkm_secboot **psb)
-{
- int ret;
- struct gm200_secboot *gsb;
- struct nvkm_acr *acr;
-
- acr = acr_r361_new(BIT(NVKM_SECBOOT_FALCON_FECS) |
- BIT(NVKM_SECBOOT_FALCON_GPCCS));
- if (IS_ERR(acr))
- return PTR_ERR(acr);
-
- gsb = kzalloc(sizeof(*gsb), GFP_KERNEL);
- if (!gsb) {
- psb = NULL;
- return -ENOMEM;
- }
- *psb = &gsb->base;
-
- ret = nvkm_secboot_ctor(&gm200_secboot, acr, device, index, &gsb->base);
- if (ret)
- return ret;
-
- return 0;
-}
-
-
-MODULE_FIRMWARE("nvidia/gm200/acr/bl.bin");
-MODULE_FIRMWARE("nvidia/gm200/acr/ucode_load.bin");
-MODULE_FIRMWARE("nvidia/gm200/acr/ucode_unload.bin");
-MODULE_FIRMWARE("nvidia/gm200/gr/fecs_bl.bin");
-MODULE_FIRMWARE("nvidia/gm200/gr/fecs_inst.bin");
-MODULE_FIRMWARE("nvidia/gm200/gr/fecs_data.bin");
-MODULE_FIRMWARE("nvidia/gm200/gr/fecs_sig.bin");
-MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_bl.bin");
-MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_inst.bin");
-MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_data.bin");
-MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_sig.bin");
-MODULE_FIRMWARE("nvidia/gm200/gr/sw_ctx.bin");
-MODULE_FIRMWARE("nvidia/gm200/gr/sw_nonctx.bin");
-MODULE_FIRMWARE("nvidia/gm200/gr/sw_bundle_init.bin");
-MODULE_FIRMWARE("nvidia/gm200/gr/sw_method_init.bin");
-
-MODULE_FIRMWARE("nvidia/gm204/acr/bl.bin");
-MODULE_FIRMWARE("nvidia/gm204/acr/ucode_load.bin");
-MODULE_FIRMWARE("nvidia/gm204/acr/ucode_unload.bin");
-MODULE_FIRMWARE("nvidia/gm204/gr/fecs_bl.bin");
-MODULE_FIRMWARE("nvidia/gm204/gr/fecs_inst.bin");
-MODULE_FIRMWARE("nvidia/gm204/gr/fecs_data.bin");
-MODULE_FIRMWARE("nvidia/gm204/gr/fecs_sig.bin");
-MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_bl.bin");
-MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_inst.bin");
-MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_data.bin");
-MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_sig.bin");
-MODULE_FIRMWARE("nvidia/gm204/gr/sw_ctx.bin");
-MODULE_FIRMWARE("nvidia/gm204/gr/sw_nonctx.bin");
-MODULE_FIRMWARE("nvidia/gm204/gr/sw_bundle_init.bin");
-MODULE_FIRMWARE("nvidia/gm204/gr/sw_method_init.bin");
-
-MODULE_FIRMWARE("nvidia/gm206/acr/bl.bin");
-MODULE_FIRMWARE("nvidia/gm206/acr/ucode_load.bin");
-MODULE_FIRMWARE("nvidia/gm206/acr/ucode_unload.bin");
-MODULE_FIRMWARE("nvidia/gm206/gr/fecs_bl.bin");
-MODULE_FIRMWARE("nvidia/gm206/gr/fecs_inst.bin");
-MODULE_FIRMWARE("nvidia/gm206/gr/fecs_data.bin");
-MODULE_FIRMWARE("nvidia/gm206/gr/fecs_sig.bin");
-MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_bl.bin");
-MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_inst.bin");
-MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_data.bin");
-MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_sig.bin");
-MODULE_FIRMWARE("nvidia/gm206/gr/sw_ctx.bin");
-MODULE_FIRMWARE("nvidia/gm206/gr/sw_nonctx.bin");
-MODULE_FIRMWARE("nvidia/gm206/gr/sw_bundle_init.bin");
-MODULE_FIRMWARE("nvidia/gm206/gr/sw_method_init.bin");
-
-MODULE_FIRMWARE("nvidia/gp100/acr/bl.bin");
-MODULE_FIRMWARE("nvidia/gp100/acr/ucode_load.bin");
-MODULE_FIRMWARE("nvidia/gp100/acr/ucode_unload.bin");
-MODULE_FIRMWARE("nvidia/gp100/gr/fecs_bl.bin");
-MODULE_FIRMWARE("nvidia/gp100/gr/fecs_inst.bin");
-MODULE_FIRMWARE("nvidia/gp100/gr/fecs_data.bin");
-MODULE_FIRMWARE("nvidia/gp100/gr/fecs_sig.bin");
-MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_bl.bin");
-MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_inst.bin");
-MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_data.bin");
-MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_sig.bin");
-MODULE_FIRMWARE("nvidia/gp100/gr/sw_ctx.bin");
-MODULE_FIRMWARE("nvidia/gp100/gr/sw_nonctx.bin");
-MODULE_FIRMWARE("nvidia/gp100/gr/sw_bundle_init.bin");
-MODULE_FIRMWARE("nvidia/gp100/gr/sw_method_init.bin");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h
deleted file mode 100644
index 62c5e162099a..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef __NVKM_SECBOOT_GM200_H__
-#define __NVKM_SECBOOT_GM200_H__
-
-#include "priv.h"
-
-struct gm200_secboot {
- struct nvkm_secboot base;
-
- /* Instance block & address space used for HS FW execution */
- struct nvkm_memory *inst;
- struct nvkm_vmm *vmm;
-};
-#define gm200_secboot(sb) container_of(sb, struct gm200_secboot, base)
-
-int gm200_secboot_oneinit(struct nvkm_secboot *);
-int gm200_secboot_fini(struct nvkm_secboot *, bool);
-void *gm200_secboot_dtor(struct nvkm_secboot *);
-int gm200_secboot_run_blob(struct nvkm_secboot *, struct nvkm_gpuobj *,
- struct nvkm_falcon *);
-
-/* Tegra-only */
-int gm20b_secboot_tegra_read_wpr(struct gm200_secboot *, u32);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c
deleted file mode 100644
index df8b919dcf09..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include "acr.h"
-#include "gm200.h"
-
-#define TEGRA210_MC_BASE 0x70019000
-
-#ifdef CONFIG_ARCH_TEGRA
-#define MC_SECURITY_CARVEOUT2_CFG0 0xc58
-#define MC_SECURITY_CARVEOUT2_BOM_0 0xc5c
-#define MC_SECURITY_CARVEOUT2_BOM_HI_0 0xc60
-#define MC_SECURITY_CARVEOUT2_SIZE_128K 0xc64
-#define TEGRA_MC_SECURITY_CARVEOUT_CFG_LOCKED (1 << 1)
-/**
- * gm20b_secboot_tegra_read_wpr() - read the WPR registers on Tegra
- *
- * On dGPU, we can manage the WPR region ourselves, but on Tegra the WPR region
- * is reserved from system memory by the bootloader and irreversibly locked.
- * This function reads the address and size of the pre-configured WPR region.
- */
-int
-gm20b_secboot_tegra_read_wpr(struct gm200_secboot *gsb, u32 mc_base)
-{
- struct nvkm_secboot *sb = &gsb->base;
- void __iomem *mc;
- u32 cfg;
-
- mc = ioremap(mc_base, 0xd00);
- if (!mc) {
- nvkm_error(&sb->subdev, "Cannot map Tegra MC registers\n");
- return -ENOMEM;
- }
- sb->wpr_addr = ioread32_native(mc + MC_SECURITY_CARVEOUT2_BOM_0) |
- ((u64)ioread32_native(mc + MC_SECURITY_CARVEOUT2_BOM_HI_0) << 32);
- sb->wpr_size = ioread32_native(mc + MC_SECURITY_CARVEOUT2_SIZE_128K)
- << 17;
- cfg = ioread32_native(mc + MC_SECURITY_CARVEOUT2_CFG0);
- iounmap(mc);
-
- /* Check that WPR settings are valid */
- if (sb->wpr_size == 0) {
- nvkm_error(&sb->subdev, "WPR region is empty\n");
- return -EINVAL;
- }
-
- if (!(cfg & TEGRA_MC_SECURITY_CARVEOUT_CFG_LOCKED)) {
- nvkm_error(&sb->subdev, "WPR region not locked\n");
- return -EINVAL;
- }
-
- return 0;
-}
-#else
-int
-gm20b_secboot_tegra_read_wpr(struct gm200_secboot *gsb, u32 mc_base)
-{
- nvkm_error(&gsb->base.subdev, "Tegra support not compiled in\n");
- return -EINVAL;
-}
-#endif
-
-static int
-gm20b_secboot_oneinit(struct nvkm_secboot *sb)
-{
- struct gm200_secboot *gsb = gm200_secboot(sb);
- int ret;
-
- ret = gm20b_secboot_tegra_read_wpr(gsb, TEGRA210_MC_BASE);
- if (ret)
- return ret;
-
- return gm200_secboot_oneinit(sb);
-}
-
-static const struct nvkm_secboot_func
-gm20b_secboot = {
- .dtor = gm200_secboot_dtor,
- .oneinit = gm20b_secboot_oneinit,
- .fini = gm200_secboot_fini,
- .run_blob = gm200_secboot_run_blob,
-};
-
-int
-gm20b_secboot_new(struct nvkm_device *device, int index,
- struct nvkm_secboot **psb)
-{
- int ret;
- struct gm200_secboot *gsb;
- struct nvkm_acr *acr;
-
- acr = acr_r352_new(BIT(NVKM_SECBOOT_FALCON_FECS) |
- BIT(NVKM_SECBOOT_FALCON_PMU));
- if (IS_ERR(acr))
- return PTR_ERR(acr);
- /* Support the initial GM20B firmware release without PMU */
- acr->optional_falcons = BIT(NVKM_SECBOOT_FALCON_PMU);
-
- gsb = kzalloc(sizeof(*gsb), GFP_KERNEL);
- if (!gsb) {
- psb = NULL;
- return -ENOMEM;
- }
- *psb = &gsb->base;
-
- ret = nvkm_secboot_ctor(&gm20b_secboot, acr, device, index, &gsb->base);
- if (ret)
- return ret;
-
- return 0;
-}
-
-#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
-MODULE_FIRMWARE("nvidia/gm20b/acr/bl.bin");
-MODULE_FIRMWARE("nvidia/gm20b/acr/ucode_load.bin");
-MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_bl.bin");
-MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_inst.bin");
-MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_data.bin");
-MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_sig.bin");
-MODULE_FIRMWARE("nvidia/gm20b/gr/gpccs_inst.bin");
-MODULE_FIRMWARE("nvidia/gm20b/gr/gpccs_data.bin");
-MODULE_FIRMWARE("nvidia/gm20b/gr/sw_ctx.bin");
-MODULE_FIRMWARE("nvidia/gm20b/gr/sw_nonctx.bin");
-MODULE_FIRMWARE("nvidia/gm20b/gr/sw_bundle_init.bin");
-MODULE_FIRMWARE("nvidia/gm20b/gr/sw_method_init.bin");
-MODULE_FIRMWARE("nvidia/gm20b/pmu/desc.bin");
-MODULE_FIRMWARE("nvidia/gm20b/pmu/image.bin");
-MODULE_FIRMWARE("nvidia/gm20b/pmu/sig.bin");
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c
deleted file mode 100644
index 84a2f243ed9b..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c
+++ /dev/null
@@ -1,252 +0,0 @@
-/*
- * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include "acr.h"
-#include "gm200.h"
-
-#include "ls_ucode.h"
-#include "hs_ucode.h"
-#include <subdev/mc.h>
-#include <subdev/timer.h>
-#include <engine/falcon.h>
-#include <engine/nvdec.h>
-
-static bool
-gp102_secboot_scrub_required(struct nvkm_secboot *sb)
-{
- struct nvkm_subdev *subdev = &sb->subdev;
- struct nvkm_device *device = subdev->device;
- u32 reg;
-
- nvkm_wr32(device, 0x100cd0, 0x2);
- reg = nvkm_rd32(device, 0x100cd0);
-
- return (reg & BIT(4));
-}
-
-static int
-gp102_run_secure_scrub(struct nvkm_secboot *sb)
-{
- struct nvkm_subdev *subdev = &sb->subdev;
- struct nvkm_device *device = subdev->device;
- struct nvkm_engine *engine;
- struct nvkm_falcon *falcon;
- void *scrub_image;
- struct fw_bin_header *hsbin_hdr;
- struct hsf_fw_header *fw_hdr;
- struct hsf_load_header *lhdr;
- void *scrub_data;
- int ret;
-
- nvkm_debug(subdev, "running VPR scrubber binary on NVDEC...\n");
-
- engine = nvkm_engine_ref(&device->nvdec[0]->engine);
- if (IS_ERR(engine))
- return PTR_ERR(engine);
- falcon = device->nvdec[0]->falcon;
-
- nvkm_falcon_get(falcon, &sb->subdev);
-
- scrub_image = hs_ucode_load_blob(subdev, falcon, "nvdec/scrubber");
- if (IS_ERR(scrub_image))
- return PTR_ERR(scrub_image);
-
- nvkm_falcon_reset(falcon);
- nvkm_falcon_bind_context(falcon, NULL);
-
- hsbin_hdr = scrub_image;
- fw_hdr = scrub_image + hsbin_hdr->header_offset;
- lhdr = scrub_image + fw_hdr->hdr_offset;
- scrub_data = scrub_image + hsbin_hdr->data_offset;
-
- nvkm_falcon_load_imem(falcon, scrub_data, lhdr->non_sec_code_off,
- lhdr->non_sec_code_size,
- lhdr->non_sec_code_off >> 8, 0, false);
- nvkm_falcon_load_imem(falcon, scrub_data + lhdr->apps[0],
- ALIGN(lhdr->apps[0], 0x100),
- lhdr->apps[1],
- lhdr->apps[0] >> 8, 0, true);
- nvkm_falcon_load_dmem(falcon, scrub_data + lhdr->data_dma_base, 0,
- lhdr->data_size, 0);
-
- kfree(scrub_image);
-
- nvkm_falcon_set_start_addr(falcon, 0x0);
- nvkm_falcon_start(falcon);
-
- ret = nvkm_falcon_wait_for_halt(falcon, 500);
- if (ret < 0) {
- nvkm_error(subdev, "failed to run VPR scrubber binary!\n");
- ret = -ETIMEDOUT;
- goto end;
- }
-
- /* put nvdec in clean state - without reset it will remain in HS mode */
- nvkm_falcon_reset(falcon);
-
- if (gp102_secboot_scrub_required(sb)) {
- nvkm_error(subdev, "VPR scrubber binary failed!\n");
- ret = -EINVAL;
- goto end;
- }
-
- nvkm_debug(subdev, "VPR scrub successfully completed\n");
-
-end:
- nvkm_falcon_put(falcon, &sb->subdev);
- nvkm_engine_unref(&engine);
- return ret;
-}
-
-static int
-gp102_secboot_run_blob(struct nvkm_secboot *sb, struct nvkm_gpuobj *blob,
- struct nvkm_falcon *falcon)
-{
- int ret;
-
- /* make sure the VPR region is unlocked */
- if (gp102_secboot_scrub_required(sb)) {
- ret = gp102_run_secure_scrub(sb);
- if (ret)
- return ret;
- }
-
- return gm200_secboot_run_blob(sb, blob, falcon);
-}
-
-const struct nvkm_secboot_func
-gp102_secboot = {
- .dtor = gm200_secboot_dtor,
- .oneinit = gm200_secboot_oneinit,
- .fini = gm200_secboot_fini,
- .run_blob = gp102_secboot_run_blob,
-};
-
-int
-gp102_secboot_new(struct nvkm_device *device, int index,
- struct nvkm_secboot **psb)
-{
- int ret;
- struct gm200_secboot *gsb;
- struct nvkm_acr *acr;
-
- acr = acr_r367_new(NVKM_SECBOOT_FALCON_SEC2,
- BIT(NVKM_SECBOOT_FALCON_FECS) |
- BIT(NVKM_SECBOOT_FALCON_GPCCS) |
- BIT(NVKM_SECBOOT_FALCON_SEC2));
- if (IS_ERR(acr))
- return PTR_ERR(acr);
-
- gsb = kzalloc(sizeof(*gsb), GFP_KERNEL);
- if (!gsb) {
- psb = NULL;
- return -ENOMEM;
- }
- *psb = &gsb->base;
-
- ret = nvkm_secboot_ctor(&gp102_secboot, acr, device, index, &gsb->base);
- if (ret)
- return ret;
-
- return 0;
-}
-
-MODULE_FIRMWARE("nvidia/gp102/acr/bl.bin");
-MODULE_FIRMWARE("nvidia/gp102/acr/unload_bl.bin");
-MODULE_FIRMWARE("nvidia/gp102/acr/ucode_load.bin");
-MODULE_FIRMWARE("nvidia/gp102/acr/ucode_unload.bin");
-MODULE_FIRMWARE("nvidia/gp102/gr/fecs_bl.bin");
-MODULE_FIRMWARE("nvidia/gp102/gr/fecs_inst.bin");
-MODULE_FIRMWARE("nvidia/gp102/gr/fecs_data.bin");
-MODULE_FIRMWARE("nvidia/gp102/gr/fecs_sig.bin");
-MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_bl.bin");
-MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_inst.bin");
-MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_data.bin");
-MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_sig.bin");
-MODULE_FIRMWARE("nvidia/gp102/gr/sw_ctx.bin");
-MODULE_FIRMWARE("nvidia/gp102/gr/sw_nonctx.bin");
-MODULE_FIRMWARE("nvidia/gp102/gr/sw_bundle_init.bin");
-MODULE_FIRMWARE("nvidia/gp102/gr/sw_method_init.bin");
-MODULE_FIRMWARE("nvidia/gp102/nvdec/scrubber.bin");
-MODULE_FIRMWARE("nvidia/gp102/sec2/desc.bin");
-MODULE_FIRMWARE("nvidia/gp102/sec2/image.bin");
-MODULE_FIRMWARE("nvidia/gp102/sec2/sig.bin");
-MODULE_FIRMWARE("nvidia/gp104/acr/bl.bin");
-MODULE_FIRMWARE("nvidia/gp104/acr/unload_bl.bin");
-MODULE_FIRMWARE("nvidia/gp104/acr/ucode_load.bin");
-MODULE_FIRMWARE("nvidia/gp104/acr/ucode_unload.bin");
-MODULE_FIRMWARE("nvidia/gp104/gr/fecs_bl.bin");
-MODULE_FIRMWARE("nvidia/gp104/gr/fecs_inst.bin");
-MODULE_FIRMWARE("nvidia/gp104/gr/fecs_data.bin");
-MODULE_FIRMWARE("nvidia/gp104/gr/fecs_sig.bin");
-MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_bl.bin");
-MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_inst.bin");
-MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_data.bin");
-MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_sig.bin");
-MODULE_FIRMWARE("nvidia/gp104/gr/sw_ctx.bin");
-MODULE_FIRMWARE("nvidia/gp104/gr/sw_nonctx.bin");
-MODULE_FIRMWARE("nvidia/gp104/gr/sw_bundle_init.bin");
-MODULE_FIRMWARE("nvidia/gp104/gr/sw_method_init.bin");
-MODULE_FIRMWARE("nvidia/gp104/nvdec/scrubber.bin");
-MODULE_FIRMWARE("nvidia/gp104/sec2/desc.bin");
-MODULE_FIRMWARE("nvidia/gp104/sec2/image.bin");
-MODULE_FIRMWARE("nvidia/gp104/sec2/sig.bin");
-MODULE_FIRMWARE("nvidia/gp106/acr/bl.bin");
-MODULE_FIRMWARE("nvidia/gp106/acr/unload_bl.bin");
-MODULE_FIRMWARE("nvidia/gp106/acr/ucode_load.bin");
-MODULE_FIRMWARE("nvidia/gp106/acr/ucode_unload.bin");
-MODULE_FIRMWARE("nvidia/gp106/gr/fecs_bl.bin");
-MODULE_FIRMWARE("nvidia/gp106/gr/fecs_inst.bin");
-MODULE_FIRMWARE("nvidia/gp106/gr/fecs_data.bin");
-MODULE_FIRMWARE("nvidia/gp106/gr/fecs_sig.bin");
-MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_bl.bin");
-MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_inst.bin");
-MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_data.bin");
-MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_sig.bin");
-MODULE_FIRMWARE("nvidia/gp106/gr/sw_ctx.bin");
-MODULE_FIRMWARE("nvidia/gp106/gr/sw_nonctx.bin");
-MODULE_FIRMWARE("nvidia/gp106/gr/sw_bundle_init.bin");
-MODULE_FIRMWARE("nvidia/gp106/gr/sw_method_init.bin");
-MODULE_FIRMWARE("nvidia/gp106/nvdec/scrubber.bin");
-MODULE_FIRMWARE("nvidia/gp106/sec2/desc.bin");
-MODULE_FIRMWARE("nvidia/gp106/sec2/image.bin");
-MODULE_FIRMWARE("nvidia/gp106/sec2/sig.bin");
-MODULE_FIRMWARE("nvidia/gp107/acr/bl.bin");
-MODULE_FIRMWARE("nvidia/gp107/acr/unload_bl.bin");
-MODULE_FIRMWARE("nvidia/gp107/acr/ucode_load.bin");
-MODULE_FIRMWARE("nvidia/gp107/acr/ucode_unload.bin");
-MODULE_FIRMWARE("nvidia/gp107/gr/fecs_bl.bin");
-MODULE_FIRMWARE("nvidia/gp107/gr/fecs_inst.bin");
-MODULE_FIRMWARE("nvidia/gp107/gr/fecs_data.bin");
-MODULE_FIRMWARE("nvidia/gp107/gr/fecs_sig.bin");
-MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_bl.bin");
-MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_inst.bin");
-MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_data.bin");
-MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_sig.bin");
-MODULE_FIRMWARE("nvidia/gp107/gr/sw_ctx.bin");
-MODULE_FIRMWARE("nvidia/gp107/gr/sw_nonctx.bin");
-MODULE_FIRMWARE("nvidia/gp107/gr/sw_bundle_init.bin");
-MODULE_FIRMWARE("nvidia/gp107/gr/sw_method_init.bin");
-MODULE_FIRMWARE("nvidia/gp107/nvdec/scrubber.bin");
-MODULE_FIRMWARE("nvidia/gp107/sec2/desc.bin");
-MODULE_FIRMWARE("nvidia/gp107/sec2/image.bin");
-MODULE_FIRMWARE("nvidia/gp107/sec2/sig.bin");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp108.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp108.c
deleted file mode 100644
index 737a8d50a1f2..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp108.c
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Copyright 2017 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include "gm200.h"
-#include "acr.h"
-
-int
-gp108_secboot_new(struct nvkm_device *device, int index,
- struct nvkm_secboot **psb)
-{
- struct gm200_secboot *gsb;
- struct nvkm_acr *acr;
-
- acr = acr_r370_new(NVKM_SECBOOT_FALCON_SEC2,
- BIT(NVKM_SECBOOT_FALCON_FECS) |
- BIT(NVKM_SECBOOT_FALCON_GPCCS) |
- BIT(NVKM_SECBOOT_FALCON_SEC2));
- if (IS_ERR(acr))
- return PTR_ERR(acr);
-
- if (!(gsb = kzalloc(sizeof(*gsb), GFP_KERNEL))) {
- acr->func->dtor(acr);
- return -ENOMEM;
- }
- *psb = &gsb->base;
-
- return nvkm_secboot_ctor(&gp102_secboot, acr, device, index, &gsb->base);
-}
-
-MODULE_FIRMWARE("nvidia/gp108/acr/bl.bin");
-MODULE_FIRMWARE("nvidia/gp108/acr/unload_bl.bin");
-MODULE_FIRMWARE("nvidia/gp108/acr/ucode_load.bin");
-MODULE_FIRMWARE("nvidia/gp108/acr/ucode_unload.bin");
-MODULE_FIRMWARE("nvidia/gp108/gr/fecs_bl.bin");
-MODULE_FIRMWARE("nvidia/gp108/gr/fecs_inst.bin");
-MODULE_FIRMWARE("nvidia/gp108/gr/fecs_data.bin");
-MODULE_FIRMWARE("nvidia/gp108/gr/fecs_sig.bin");
-MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_bl.bin");
-MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_inst.bin");
-MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_data.bin");
-MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_sig.bin");
-MODULE_FIRMWARE("nvidia/gp108/gr/sw_ctx.bin");
-MODULE_FIRMWARE("nvidia/gp108/gr/sw_nonctx.bin");
-MODULE_FIRMWARE("nvidia/gp108/gr/sw_bundle_init.bin");
-MODULE_FIRMWARE("nvidia/gp108/gr/sw_method_init.bin");
-MODULE_FIRMWARE("nvidia/gp108/nvdec/scrubber.bin");
-MODULE_FIRMWARE("nvidia/gp108/sec2/desc.bin");
-MODULE_FIRMWARE("nvidia/gp108/sec2/image.bin");
-MODULE_FIRMWARE("nvidia/gp108/sec2/sig.bin");
-
-MODULE_FIRMWARE("nvidia/gv100/acr/bl.bin");
-MODULE_FIRMWARE("nvidia/gv100/acr/unload_bl.bin");
-MODULE_FIRMWARE("nvidia/gv100/acr/ucode_load.bin");
-MODULE_FIRMWARE("nvidia/gv100/acr/ucode_unload.bin");
-MODULE_FIRMWARE("nvidia/gv100/gr/fecs_bl.bin");
-MODULE_FIRMWARE("nvidia/gv100/gr/fecs_inst.bin");
-MODULE_FIRMWARE("nvidia/gv100/gr/fecs_data.bin");
-MODULE_FIRMWARE("nvidia/gv100/gr/fecs_sig.bin");
-MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_bl.bin");
-MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_inst.bin");
-MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_data.bin");
-MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_sig.bin");
-MODULE_FIRMWARE("nvidia/gv100/gr/sw_ctx.bin");
-MODULE_FIRMWARE("nvidia/gv100/gr/sw_nonctx.bin");
-MODULE_FIRMWARE("nvidia/gv100/gr/sw_bundle_init.bin");
-MODULE_FIRMWARE("nvidia/gv100/gr/sw_method_init.bin");
-MODULE_FIRMWARE("nvidia/gv100/nvdec/scrubber.bin");
-MODULE_FIRMWARE("nvidia/gv100/sec2/desc.bin");
-MODULE_FIRMWARE("nvidia/gv100/sec2/image.bin");
-MODULE_FIRMWARE("nvidia/gv100/sec2/sig.bin");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp10b.c
deleted file mode 100644
index 28ca29d0eeee..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp10b.c
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include "acr.h"
-#include "gm200.h"
-
-#define TEGRA186_MC_BASE 0x02c10000
-
-static int
-gp10b_secboot_oneinit(struct nvkm_secboot *sb)
-{
- struct gm200_secboot *gsb = gm200_secboot(sb);
- int ret;
-
- ret = gm20b_secboot_tegra_read_wpr(gsb, TEGRA186_MC_BASE);
- if (ret)
- return ret;
-
- return gm200_secboot_oneinit(sb);
-}
-
-static const struct nvkm_secboot_func
-gp10b_secboot = {
- .dtor = gm200_secboot_dtor,
- .oneinit = gp10b_secboot_oneinit,
- .fini = gm200_secboot_fini,
- .run_blob = gm200_secboot_run_blob,
-};
-
-int
-gp10b_secboot_new(struct nvkm_device *device, int index,
- struct nvkm_secboot **psb)
-{
- int ret;
- struct gm200_secboot *gsb;
- struct nvkm_acr *acr;
-
- acr = acr_r352_new(BIT(NVKM_SECBOOT_FALCON_FECS) |
- BIT(NVKM_SECBOOT_FALCON_GPCCS) |
- BIT(NVKM_SECBOOT_FALCON_PMU));
- if (IS_ERR(acr))
- return PTR_ERR(acr);
-
- gsb = kzalloc(sizeof(*gsb), GFP_KERNEL);
- if (!gsb) {
- psb = NULL;
- return -ENOMEM;
- }
- *psb = &gsb->base;
-
- ret = nvkm_secboot_ctor(&gp10b_secboot, acr, device, index, &gsb->base);
- if (ret)
- return ret;
-
- return 0;
-}
-
-#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC)
-MODULE_FIRMWARE("nvidia/gp10b/acr/bl.bin");
-MODULE_FIRMWARE("nvidia/gp10b/acr/ucode_load.bin");
-MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_bl.bin");
-MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_inst.bin");
-MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_data.bin");
-MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_sig.bin");
-MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_bl.bin");
-MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_inst.bin");
-MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_data.bin");
-MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_sig.bin");
-MODULE_FIRMWARE("nvidia/gp10b/gr/sw_ctx.bin");
-MODULE_FIRMWARE("nvidia/gp10b/gr/sw_nonctx.bin");
-MODULE_FIRMWARE("nvidia/gp10b/gr/sw_bundle_init.bin");
-MODULE_FIRMWARE("nvidia/gp10b/gr/sw_method_init.bin");
-MODULE_FIRMWARE("nvidia/gp10b/pmu/desc.bin");
-MODULE_FIRMWARE("nvidia/gp10b/pmu/image.bin");
-MODULE_FIRMWARE("nvidia/gp10b/pmu/sig.bin");
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.c
deleted file mode 100644
index 6b33182ddc2f..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.c
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include "hs_ucode.h"
-#include "ls_ucode.h"
-#include "acr.h"
-
-#include <engine/falcon.h>
-
-/**
- * hs_ucode_patch_signature() - patch HS blob with correct signature for
- * specified falcon.
- */
-static void
-hs_ucode_patch_signature(const struct nvkm_falcon *falcon, void *acr_image,
- bool new_format)
-{
- struct fw_bin_header *hsbin_hdr = acr_image;
- struct hsf_fw_header *fw_hdr = acr_image + hsbin_hdr->header_offset;
- void *hs_data = acr_image + hsbin_hdr->data_offset;
- void *sig;
- u32 sig_size;
- u32 patch_loc, patch_sig;
-
- /*
- * I had the brilliant idea to "improve" the binary format by
- * removing this useless indirection. However to make NVIDIA files
- * directly compatible, let's support both format.
- */
- if (new_format) {
- patch_loc = fw_hdr->patch_loc;
- patch_sig = fw_hdr->patch_sig;
- } else {
- patch_loc = *(u32 *)(acr_image + fw_hdr->patch_loc);
- patch_sig = *(u32 *)(acr_image + fw_hdr->patch_sig);
- }
-
- /* Falcon in debug or production mode? */
- if (falcon->debug) {
- sig = acr_image + fw_hdr->sig_dbg_offset;
- sig_size = fw_hdr->sig_dbg_size;
- } else {
- sig = acr_image + fw_hdr->sig_prod_offset;
- sig_size = fw_hdr->sig_prod_size;
- }
-
- /* Patch signature */
- memcpy(hs_data + patch_loc, sig + patch_sig, sig_size);
-}
-
-void *
-hs_ucode_load_blob(struct nvkm_subdev *subdev, const struct nvkm_falcon *falcon,
- const char *fw)
-{
- void *acr_image;
- bool new_format;
-
- acr_image = nvkm_acr_load_firmware(subdev, fw, 0);
- if (IS_ERR(acr_image))
- return acr_image;
-
- /* detect the format to define how signature should be patched */
- switch (((u32 *)acr_image)[0]) {
- case 0x3b1d14f0:
- new_format = true;
- break;
- case 0x000010de:
- new_format = false;
- break;
- default:
- nvkm_error(subdev, "unknown header for HS blob %s\n", fw);
- return ERR_PTR(-EINVAL);
- }
-
- hs_ucode_patch_signature(falcon, acr_image, new_format);
-
- return acr_image;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.h
deleted file mode 100644
index d8cfc6f7752a..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef __NVKM_SECBOOT_HS_UCODE_H__
-#define __NVKM_SECBOOT_HS_UCODE_H__
-
-#include <core/os.h>
-#include <core/subdev.h>
-
-struct nvkm_falcon;
-
-/**
- * struct hsf_fw_header - HS firmware descriptor
- * @sig_dbg_offset: offset of the debug signature
- * @sig_dbg_size: size of the debug signature
- * @sig_prod_offset: offset of the production signature
- * @sig_prod_size: size of the production signature
- * @patch_loc: offset of the offset (sic) of where the signature is
- * @patch_sig: offset of the offset (sic) to add to sig_*_offset
- * @hdr_offset: offset of the load header (see struct hs_load_header)
- * @hdr_size: size of above header
- *
- * This structure is embedded in the HS firmware image at
- * hs_bin_hdr.header_offset.
- */
-struct hsf_fw_header {
- u32 sig_dbg_offset;
- u32 sig_dbg_size;
- u32 sig_prod_offset;
- u32 sig_prod_size;
- u32 patch_loc;
- u32 patch_sig;
- u32 hdr_offset;
- u32 hdr_size;
-};
-
-/**
- * struct hsf_load_header - HS firmware load header
- */
-struct hsf_load_header {
- u32 non_sec_code_off;
- u32 non_sec_code_size;
- u32 data_dma_base;
- u32 data_size;
- u32 num_apps;
- /*
- * Organized as follows:
- * - app0_code_off
- * - app1_code_off
- * - ...
- * - appn_code_off
- * - app0_code_size
- * - app1_code_size
- * - ...
- */
- u32 apps[0];
-};
-
-void *hs_ucode_load_blob(struct nvkm_subdev *, const struct nvkm_falcon *,
- const char *);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h
deleted file mode 100644
index d43f906da3a7..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef __NVKM_SECBOOT_LS_UCODE_H__
-#define __NVKM_SECBOOT_LS_UCODE_H__
-
-#include <core/os.h>
-#include <core/subdev.h>
-#include <subdev/secboot.h>
-
-struct nvkm_acr;
-
-/**
- * struct ls_ucode_img_desc - descriptor of firmware image
- * @descriptor_size: size of this descriptor
- * @image_size: size of the whole image
- * @bootloader_start_offset: start offset of the bootloader in ucode image
- * @bootloader_size: size of the bootloader
- * @bootloader_imem_offset: start off set of the bootloader in IMEM
- * @bootloader_entry_point: entry point of the bootloader in IMEM
- * @app_start_offset: start offset of the LS firmware
- * @app_size: size of the LS firmware's code and data
- * @app_imem_offset: offset of the app in IMEM
- * @app_imem_entry: entry point of the app in IMEM
- * @app_dmem_offset: offset of the data in DMEM
- * @app_resident_code_offset: offset of app code from app_start_offset
- * @app_resident_code_size: size of the code
- * @app_resident_data_offset: offset of data from app_start_offset
- * @app_resident_data_size: size of data
- *
- * A firmware image contains the code, data, and bootloader of a given LS
- * falcon in a single blob. This structure describes where everything is.
- *
- * This can be generated from a (bootloader, code, data) set if they have
- * been loaded separately, or come directly from a file.
- */
-struct ls_ucode_img_desc {
- u32 descriptor_size;
- u32 image_size;
- u32 tools_version;
- u32 app_version;
- char date[64];
- u32 bootloader_start_offset;
- u32 bootloader_size;
- u32 bootloader_imem_offset;
- u32 bootloader_entry_point;
- u32 app_start_offset;
- u32 app_size;
- u32 app_imem_offset;
- u32 app_imem_entry;
- u32 app_dmem_offset;
- u32 app_resident_code_offset;
- u32 app_resident_code_size;
- u32 app_resident_data_offset;
- u32 app_resident_data_size;
- u32 nb_overlays;
- struct {u32 start; u32 size; } load_ovl[64];
- u32 compressed;
-};
-
-/**
- * struct ls_ucode_img - temporary storage for loaded LS firmwares
- * @node: to link within lsf_ucode_mgr
- * @falcon_id: ID of the falcon this LS firmware is for
- * @ucode_desc: loaded or generated map of ucode_data
- * @ucode_data: firmware payload (code and data)
- * @ucode_size: size in bytes of data in ucode_data
- * @ucode_off: offset of the ucode in ucode_data
- * @sig: signature for this firmware
- * @sig:size: size of the signature in bytes
- *
- * Preparing the WPR LS blob requires information about all the LS firmwares
- * (size, etc) to be known. This structure contains all the data of one LS
- * firmware.
- */
-struct ls_ucode_img {
- struct list_head node;
- enum nvkm_secboot_falcon falcon_id;
-
- struct ls_ucode_img_desc ucode_desc;
- u8 *ucode_data;
- u32 ucode_size;
- u32 ucode_off;
-
- u8 *sig;
- u32 sig_size;
-};
-
-/**
- * struct fw_bin_header - header of firmware files
- * @bin_magic: always 0x3b1d14f0
- * @bin_ver: version of the bin format
- * @bin_size: entire image size including this header
- * @header_offset: offset of the firmware/bootloader header in the file
- * @data_offset: offset of the firmware/bootloader payload in the file
- * @data_size: size of the payload
- *
- * This header is located at the beginning of the HS firmware and HS bootloader
- * files, to describe where the headers and data can be found.
- */
-struct fw_bin_header {
- u32 bin_magic;
- u32 bin_ver;
- u32 bin_size;
- u32 header_offset;
- u32 data_offset;
- u32 data_size;
-};
-
-/**
- * struct fw_bl_desc - firmware bootloader descriptor
- * @start_tag: starting tag of bootloader
- * @desc_dmem_load_off: DMEM offset of flcn_bl_dmem_desc
- * @code_off: offset of code section
- * @code_size: size of code section
- * @data_off: offset of data section
- * @data_size: size of data section
- *
- * This structure is embedded in bootloader firmware files at to describe the
- * IMEM and DMEM layout expected by the bootloader.
- */
-struct fw_bl_desc {
- u32 start_tag;
- u32 dmem_load_off;
- u32 code_off;
- u32 code_size;
- u32 data_off;
- u32 data_size;
-};
-
-int acr_ls_ucode_load_fecs(const struct nvkm_secboot *, int,
- struct ls_ucode_img *);
-int acr_ls_ucode_load_gpccs(const struct nvkm_secboot *, int,
- struct ls_ucode_img *);
-int acr_ls_ucode_load_pmu(const struct nvkm_secboot *, int,
- struct ls_ucode_img *);
-int acr_ls_pmu_post_run(const struct nvkm_acr *, const struct nvkm_secboot *);
-int acr_ls_ucode_load_sec2(const struct nvkm_secboot *, int,
- struct ls_ucode_img *);
-int acr_ls_sec2_post_run(const struct nvkm_acr *, const struct nvkm_secboot *);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c
deleted file mode 100644
index 821d3b2bdb1f..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c
+++ /dev/null
@@ -1,160 +0,0 @@
-/*
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-
-#include "ls_ucode.h"
-#include "acr.h"
-
-#include <core/firmware.h>
-
-#define BL_DESC_BLK_SIZE 256
-/**
- * Build a ucode image and descriptor from provided bootloader, code and data.
- *
- * @bl: bootloader image, including 16-bytes descriptor
- * @code: LS firmware code segment
- * @data: LS firmware data segment
- * @desc: ucode descriptor to be written
- *
- * Return: allocated ucode image with corresponding descriptor information. desc
- * is also updated to contain the right offsets within returned image.
- */
-static void *
-ls_ucode_img_build(const struct firmware *bl, const struct firmware *code,
- const struct firmware *data, struct ls_ucode_img_desc *desc)
-{
- struct fw_bin_header *bin_hdr = (void *)bl->data;
- struct fw_bl_desc *bl_desc = (void *)bl->data + bin_hdr->header_offset;
- void *bl_data = (void *)bl->data + bin_hdr->data_offset;
- u32 pos = 0;
- void *image;
-
- desc->bootloader_start_offset = pos;
- desc->bootloader_size = ALIGN(bl_desc->code_size, sizeof(u32));
- desc->bootloader_imem_offset = bl_desc->start_tag * 256;
- desc->bootloader_entry_point = bl_desc->start_tag * 256;
-
- pos = ALIGN(pos + desc->bootloader_size, BL_DESC_BLK_SIZE);
- desc->app_start_offset = pos;
- desc->app_size = ALIGN(code->size, BL_DESC_BLK_SIZE) +
- ALIGN(data->size, BL_DESC_BLK_SIZE);
- desc->app_imem_offset = 0;
- desc->app_imem_entry = 0;
- desc->app_dmem_offset = 0;
- desc->app_resident_code_offset = 0;
- desc->app_resident_code_size = ALIGN(code->size, BL_DESC_BLK_SIZE);
-
- pos = ALIGN(pos + desc->app_resident_code_size, BL_DESC_BLK_SIZE);
- desc->app_resident_data_offset = pos - desc->app_start_offset;
- desc->app_resident_data_size = ALIGN(data->size, BL_DESC_BLK_SIZE);
-
- desc->image_size = ALIGN(bl_desc->code_size, BL_DESC_BLK_SIZE) +
- desc->app_size;
-
- image = kzalloc(desc->image_size, GFP_KERNEL);
- if (!image)
- return ERR_PTR(-ENOMEM);
-
- memcpy(image + desc->bootloader_start_offset, bl_data,
- bl_desc->code_size);
- memcpy(image + desc->app_start_offset, code->data, code->size);
- memcpy(image + desc->app_start_offset + desc->app_resident_data_offset,
- data->data, data->size);
-
- return image;
-}
-
-/**
- * ls_ucode_img_load_gr() - load and prepare a LS GR ucode image
- *
- * Load the LS microcode, bootloader and signature and pack them into a single
- * blob. Also generate the corresponding ucode descriptor.
- */
-static int
-ls_ucode_img_load_gr(const struct nvkm_subdev *subdev, int maxver,
- struct ls_ucode_img *img, const char *falcon_name)
-{
- const struct firmware *bl, *code, *data, *sig;
- char f[64];
- int ret;
-
- snprintf(f, sizeof(f), "gr/%s_bl", falcon_name);
- ret = nvkm_firmware_get(subdev, f, &bl);
- if (ret)
- goto error;
-
- snprintf(f, sizeof(f), "gr/%s_inst", falcon_name);
- ret = nvkm_firmware_get(subdev, f, &code);
- if (ret)
- goto free_bl;
-
- snprintf(f, sizeof(f), "gr/%s_data", falcon_name);
- ret = nvkm_firmware_get(subdev, f, &data);
- if (ret)
- goto free_inst;
-
- snprintf(f, sizeof(f), "gr/%s_sig", falcon_name);
- ret = nvkm_firmware_get(subdev, f, &sig);
- if (ret)
- goto free_data;
-
- img->sig = kmemdup(sig->data, sig->size, GFP_KERNEL);
- if (!img->sig) {
- ret = -ENOMEM;
- goto free_sig;
- }
- img->sig_size = sig->size;
-
- img->ucode_data = ls_ucode_img_build(bl, code, data,
- &img->ucode_desc);
- if (IS_ERR(img->ucode_data)) {
- kfree(img->sig);
- ret = PTR_ERR(img->ucode_data);
- goto free_sig;
- }
- img->ucode_size = img->ucode_desc.image_size;
-
-free_sig:
- nvkm_firmware_put(sig);
-free_data:
- nvkm_firmware_put(data);
-free_inst:
- nvkm_firmware_put(code);
-free_bl:
- nvkm_firmware_put(bl);
-error:
- return ret;
-}
-
-int
-acr_ls_ucode_load_fecs(const struct nvkm_secboot *sb, int maxver,
- struct ls_ucode_img *img)
-{
- return ls_ucode_img_load_gr(&sb->subdev, maxver, img, "fecs");
-}
-
-int
-acr_ls_ucode_load_gpccs(const struct nvkm_secboot *sb, int maxver,
- struct ls_ucode_img *img)
-{
- return ls_ucode_img_load_gr(&sb->subdev, maxver, img, "gpccs");
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c
deleted file mode 100644
index a84a999445bb..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c
+++ /dev/null
@@ -1,177 +0,0 @@
-/*
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-
-#include "ls_ucode.h"
-#include "acr.h"
-
-#include <core/firmware.h>
-#include <core/msgqueue.h>
-#include <subdev/pmu.h>
-#include <engine/sec2.h>
-#include <subdev/mc.h>
-#include <subdev/timer.h>
-
-/**
- * acr_ls_ucode_load_msgqueue - load and prepare a ucode img for a msgqueue fw
- *
- * Load the LS microcode, desc and signature and pack them into a single
- * blob.
- */
-static int
-acr_ls_ucode_load_msgqueue(const struct nvkm_subdev *subdev, const char *name,
- int maxver, struct ls_ucode_img *img)
-{
- const struct firmware *image, *desc, *sig;
- char f[64];
- int ver, ret;
-
- snprintf(f, sizeof(f), "%s/image", name);
- ver = nvkm_firmware_get_version(subdev, f, 0, maxver, &image);
- if (ver < 0)
- return ver;
- img->ucode_data = kmemdup(image->data, image->size, GFP_KERNEL);
- nvkm_firmware_put(image);
- if (!img->ucode_data)
- return -ENOMEM;
-
- snprintf(f, sizeof(f), "%s/desc", name);
- ret = nvkm_firmware_get_version(subdev, f, ver, ver, &desc);
- if (ret < 0)
- return ret;
- memcpy(&img->ucode_desc, desc->data, sizeof(img->ucode_desc));
- img->ucode_size = ALIGN(img->ucode_desc.app_start_offset + img->ucode_desc.app_size, 256);
- nvkm_firmware_put(desc);
-
- snprintf(f, sizeof(f), "%s/sig", name);
- ret = nvkm_firmware_get_version(subdev, f, ver, ver, &sig);
- if (ret < 0)
- return ret;
- img->sig_size = sig->size;
- img->sig = kmemdup(sig->data, sig->size, GFP_KERNEL);
- nvkm_firmware_put(sig);
- if (!img->sig)
- return -ENOMEM;
-
- return ver;
-}
-
-static int
-acr_ls_msgqueue_post_run(struct nvkm_msgqueue *queue,
- struct nvkm_falcon *falcon, u32 addr_args)
-{
- struct nvkm_device *device = falcon->owner->device;
- u8 buf[NVKM_MSGQUEUE_CMDLINE_SIZE];
-
- memset(buf, 0, sizeof(buf));
- nvkm_msgqueue_write_cmdline(queue, buf);
- nvkm_falcon_load_dmem(falcon, buf, addr_args, sizeof(buf), 0);
- /* rearm the queue so it will wait for the init message */
- nvkm_msgqueue_reinit(queue);
-
- /* Enable interrupts */
- nvkm_falcon_wr32(falcon, 0x10, 0xff);
- nvkm_mc_intr_mask(device, falcon->owner->index, true);
-
- /* Start LS firmware on boot falcon */
- nvkm_falcon_start(falcon);
-
- return 0;
-}
-
-int
-acr_ls_ucode_load_pmu(const struct nvkm_secboot *sb, int maxver,
- struct ls_ucode_img *img)
-{
- struct nvkm_pmu *pmu = sb->subdev.device->pmu;
- int ret;
-
- ret = acr_ls_ucode_load_msgqueue(&sb->subdev, "pmu", maxver, img);
- if (ret)
- return ret;
-
- /* Allocate the PMU queue corresponding to the FW version */
- ret = nvkm_msgqueue_new(img->ucode_desc.app_version, pmu->falcon,
- sb, &pmu->queue);
- if (ret)
- return ret;
-
- return 0;
-}
-
-int
-acr_ls_pmu_post_run(const struct nvkm_acr *acr, const struct nvkm_secboot *sb)
-{
- struct nvkm_device *device = sb->subdev.device;
- struct nvkm_pmu *pmu = device->pmu;
- u32 addr_args = pmu->falcon->data.limit - NVKM_MSGQUEUE_CMDLINE_SIZE;
- int ret;
-
- ret = acr_ls_msgqueue_post_run(pmu->queue, pmu->falcon, addr_args);
- if (ret)
- return ret;
-
- nvkm_debug(&sb->subdev, "%s started\n",
- nvkm_secboot_falcon_name[acr->boot_falcon]);
-
- return 0;
-}
-
-int
-acr_ls_ucode_load_sec2(const struct nvkm_secboot *sb, int maxver,
- struct ls_ucode_img *img)
-{
- struct nvkm_sec2 *sec = sb->subdev.device->sec2;
- int ver, ret;
-
- ver = acr_ls_ucode_load_msgqueue(&sb->subdev, "sec2", maxver, img);
- if (ver < 0)
- return ver;
-
- /* Allocate the PMU queue corresponding to the FW version */
- ret = nvkm_msgqueue_new(img->ucode_desc.app_version, sec->falcon,
- sb, &sec->queue);
- if (ret)
- return ret;
-
- return ver;
-}
-
-int
-acr_ls_sec2_post_run(const struct nvkm_acr *acr, const struct nvkm_secboot *sb)
-{
- const struct nvkm_subdev *subdev = &sb->subdev;
- struct nvkm_device *device = subdev->device;
- struct nvkm_sec2 *sec = device->sec2;
- /* on SEC arguments are always at the beginning of EMEM */
- const u32 addr_args = 0x01000000;
- int ret;
-
- ret = acr_ls_msgqueue_post_run(sec->queue, sec->falcon, addr_args);
- if (ret)
- return ret;
-
- nvkm_debug(&sb->subdev, "%s started\n",
- nvkm_secboot_falcon_name[acr->boot_falcon]);
-
- return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h
deleted file mode 100644
index 959a7b2dbdc9..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef __NVKM_SECBOOT_PRIV_H__
-#define __NVKM_SECBOOT_PRIV_H__
-
-#include <subdev/secboot.h>
-#include <subdev/mmu.h>
-struct nvkm_gpuobj;
-
-struct nvkm_secboot_func {
- int (*oneinit)(struct nvkm_secboot *);
- int (*fini)(struct nvkm_secboot *, bool suspend);
- void *(*dtor)(struct nvkm_secboot *);
- int (*run_blob)(struct nvkm_secboot *, struct nvkm_gpuobj *,
- struct nvkm_falcon *);
-};
-
-int nvkm_secboot_ctor(const struct nvkm_secboot_func *, struct nvkm_acr *,
- struct nvkm_device *, int, struct nvkm_secboot *);
-int nvkm_secboot_falcon_reset(struct nvkm_secboot *);
-int nvkm_secboot_falcon_run(struct nvkm_secboot *);
-
-extern const struct nvkm_secboot_func gp102_secboot;
-
-struct flcn_u64 {
- u32 lo;
- u32 hi;
-};
-
-static inline u64 flcn64_to_u64(const struct flcn_u64 f)
-{
- return ((u64)f.hi) << 32 | f.lo;
-}
-
-static inline struct flcn_u64 u64_to_flcn64(u64 u)
-{
- struct flcn_u64 ret;
-
- ret.hi = upper_32_bits(u);
- ret.lo = lower_32_bits(u);
-
- return ret;
-}
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
index 07914e36939e..4a4d1e224126 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
@@ -21,9 +21,11 @@
*
* Authors: Martin Peres
*/
-#include <nvkm/core/option.h>
#include "priv.h"
+#include <core/option.h>
+#include <subdev/pmu.h>
+
int
nvkm_therm_temp_get(struct nvkm_therm *therm)
{
@@ -192,8 +194,7 @@ nvkm_therm_fan_mode(struct nvkm_therm *therm, int mode)
/* The default PPWR ucode on fermi interferes with fan management */
if ((mode >= ARRAY_SIZE(name)) ||
- (mode != NVKM_THERM_CTRL_NONE && device->card_type >= NV_C0 &&
- !device->pmu))
+ (mode != NVKM_THERM_CTRL_NONE && nvkm_pmu_fan_controlled(device)))
return -EINVAL;
/* do not allow automatic fan management if the thermal sensor is
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c
index 6e0ddc1bb583..03b355dabab3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c
@@ -116,6 +116,9 @@ nvkm_therm_ic_ctor(struct nvkm_therm *therm)
return;
}
+ if (nvbios_extdev_skip_probe(bios))
+ return;
+
/* The vbios doesn't provide the address of an exisiting monitoring
device. Let's try our static list.
*/
diff --git a/drivers/gpu/drm/omapdrm/displays/Kconfig b/drivers/gpu/drm/omapdrm/displays/Kconfig
index c2566da32ac4..b562a8cd61bf 100644
--- a/drivers/gpu/drm/omapdrm/displays/Kconfig
+++ b/drivers/gpu/drm/omapdrm/displays/Kconfig
@@ -8,18 +8,18 @@ config DRM_OMAP_ENCODER_OPA362
through a GPIO.
config DRM_OMAP_ENCODER_TPD12S015
- tristate "TPD12S015 HDMI ESD protection and level shifter"
+ tristate "TPD12S015 HDMI ESD protection and level shifter"
help
Driver for TPD12S015, which offers HDMI ESD protection and level
shifting.
config DRM_OMAP_CONNECTOR_HDMI
- tristate "HDMI Connector"
+ tristate "HDMI Connector"
help
Driver for a generic HDMI connector.
config DRM_OMAP_CONNECTOR_ANALOG_TV
- tristate "Analog TV Connector"
+ tristate "Analog TV Connector"
help
Driver for a generic analog TV connector.
@@ -29,42 +29,4 @@ config DRM_OMAP_PANEL_DSI_CM
help
Driver for generic DSI command mode panels.
-config DRM_OMAP_PANEL_SONY_ACX565AKM
- tristate "ACX565AKM Panel"
- depends on SPI && BACKLIGHT_CLASS_DEVICE
- help
- This is the LCD panel used on Nokia N900
-
-config DRM_OMAP_PANEL_LGPHILIPS_LB035Q02
- tristate "LG.Philips LB035Q02 LCD Panel"
- depends on SPI
- help
- LCD Panel used on the Gumstix Overo Palo35
-
-config DRM_OMAP_PANEL_SHARP_LS037V7DW01
- tristate "Sharp LS037V7DW01 LCD Panel"
- depends on BACKLIGHT_CLASS_DEVICE
- help
- LCD Panel used in TI's SDP3430 and EVM boards
-
-config DRM_OMAP_PANEL_TPO_TD028TTEC1
- tristate "TPO TD028TTEC1 LCD Panel"
- depends on SPI
- help
- LCD panel used in Openmoko.
-
-config DRM_OMAP_PANEL_TPO_TD043MTEA1
- tristate "TPO TD043MTEA1 LCD Panel"
- depends on SPI
- help
- LCD Panel used in OMAP3 Pandora
-
-config DRM_OMAP_PANEL_NEC_NL8048HL11
- tristate "NEC NL8048HL11 Panel"
- depends on SPI
- depends on BACKLIGHT_CLASS_DEVICE
- help
- This NEC NL8048HL11 panel is TFT LCD used in the
- Zoom2/3/3630 sdp boards.
-
endmenu
diff --git a/drivers/gpu/drm/omapdrm/displays/Makefile b/drivers/gpu/drm/omapdrm/displays/Makefile
index 1db34d4fed64..cb76859dc574 100644
--- a/drivers/gpu/drm/omapdrm/displays/Makefile
+++ b/drivers/gpu/drm/omapdrm/displays/Makefile
@@ -4,9 +4,3 @@ obj-$(CONFIG_DRM_OMAP_ENCODER_TPD12S015) += encoder-tpd12s015.o
obj-$(CONFIG_DRM_OMAP_CONNECTOR_HDMI) += connector-hdmi.o
obj-$(CONFIG_DRM_OMAP_CONNECTOR_ANALOG_TV) += connector-analog-tv.o
obj-$(CONFIG_DRM_OMAP_PANEL_DSI_CM) += panel-dsi-cm.o
-obj-$(CONFIG_DRM_OMAP_PANEL_SONY_ACX565AKM) += panel-sony-acx565akm.o
-obj-$(CONFIG_DRM_OMAP_PANEL_LGPHILIPS_LB035Q02) += panel-lgphilips-lb035q02.o
-obj-$(CONFIG_DRM_OMAP_PANEL_SHARP_LS037V7DW01) += panel-sharp-ls037v7dw01.o
-obj-$(CONFIG_DRM_OMAP_PANEL_TPO_TD028TTEC1) += panel-tpo-td028ttec1.o
-obj-$(CONFIG_DRM_OMAP_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o
-obj-$(CONFIG_DRM_OMAP_PANEL_NEC_NL8048HL11) += panel-nec-nl8048hl11.o
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
deleted file mode 100644
index 1fd0d84e6e38..000000000000
--- a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
+++ /dev/null
@@ -1,251 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * LG.Philips LB035Q02 LCD Panel driver
- *
- * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
- * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
- * Based on a driver by: Steve Sakoman <steve@sakoman.com>
- */
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/spi/spi.h>
-#include <linux/mutex.h>
-#include <linux/gpio.h>
-#include <linux/gpio/consumer.h>
-
-#include "../dss/omapdss.h"
-
-static const struct videomode lb035q02_vm = {
- .hactive = 320,
- .vactive = 240,
-
- .pixelclock = 6500000,
-
- .hsync_len = 2,
- .hfront_porch = 20,
- .hback_porch = 68,
-
- .vsync_len = 2,
- .vfront_porch = 4,
- .vback_porch = 18,
-
- .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW,
-};
-
-struct panel_drv_data {
- struct omap_dss_device dssdev;
-
- struct spi_device *spi;
-
- struct videomode vm;
-
- struct gpio_desc *enable_gpio;
-};
-
-#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
-
-static int lb035q02_write_reg(struct spi_device *spi, u8 reg, u16 val)
-{
- struct spi_message msg;
- struct spi_transfer index_xfer = {
- .len = 3,
- .cs_change = 1,
- };
- struct spi_transfer value_xfer = {
- .len = 3,
- };
- u8 buffer[16];
-
- spi_message_init(&msg);
-
- /* register index */
- buffer[0] = 0x70;
- buffer[1] = 0x00;
- buffer[2] = reg & 0x7f;
- index_xfer.tx_buf = buffer;
- spi_message_add_tail(&index_xfer, &msg);
-
- /* register value */
- buffer[4] = 0x72;
- buffer[5] = val >> 8;
- buffer[6] = val;
- value_xfer.tx_buf = buffer + 4;
- spi_message_add_tail(&value_xfer, &msg);
-
- return spi_sync(spi, &msg);
-}
-
-static void init_lb035q02_panel(struct spi_device *spi)
-{
- /* Init sequence from page 28 of the lb035q02 spec */
- lb035q02_write_reg(spi, 0x01, 0x6300);
- lb035q02_write_reg(spi, 0x02, 0x0200);
- lb035q02_write_reg(spi, 0x03, 0x0177);
- lb035q02_write_reg(spi, 0x04, 0x04c7);
- lb035q02_write_reg(spi, 0x05, 0xffc0);
- lb035q02_write_reg(spi, 0x06, 0xe806);
- lb035q02_write_reg(spi, 0x0a, 0x4008);
- lb035q02_write_reg(spi, 0x0b, 0x0000);
- lb035q02_write_reg(spi, 0x0d, 0x0030);
- lb035q02_write_reg(spi, 0x0e, 0x2800);
- lb035q02_write_reg(spi, 0x0f, 0x0000);
- lb035q02_write_reg(spi, 0x16, 0x9f80);
- lb035q02_write_reg(spi, 0x17, 0x0a0f);
- lb035q02_write_reg(spi, 0x1e, 0x00c1);
- lb035q02_write_reg(spi, 0x30, 0x0300);
- lb035q02_write_reg(spi, 0x31, 0x0007);
- lb035q02_write_reg(spi, 0x32, 0x0000);
- lb035q02_write_reg(spi, 0x33, 0x0000);
- lb035q02_write_reg(spi, 0x34, 0x0707);
- lb035q02_write_reg(spi, 0x35, 0x0004);
- lb035q02_write_reg(spi, 0x36, 0x0302);
- lb035q02_write_reg(spi, 0x37, 0x0202);
- lb035q02_write_reg(spi, 0x3a, 0x0a0d);
- lb035q02_write_reg(spi, 0x3b, 0x0806);
-}
-
-static int lb035q02_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- struct panel_drv_data *ddata = to_panel_data(dst);
-
- init_lb035q02_panel(ddata->spi);
-
- return 0;
-}
-
-static void lb035q02_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
-}
-
-static void lb035q02_enable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- if (ddata->enable_gpio)
- gpiod_set_value_cansleep(ddata->enable_gpio, 1);
-}
-
-static void lb035q02_disable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- if (ddata->enable_gpio)
- gpiod_set_value_cansleep(ddata->enable_gpio, 0);
-}
-
-static int lb035q02_get_modes(struct omap_dss_device *dssdev,
- struct drm_connector *connector)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- return omapdss_display_get_modes(connector, &ddata->vm);
-}
-
-static const struct omap_dss_device_ops lb035q02_ops = {
- .connect = lb035q02_connect,
- .disconnect = lb035q02_disconnect,
-
- .enable = lb035q02_enable,
- .disable = lb035q02_disable,
-
- .get_modes = lb035q02_get_modes,
-};
-
-static int lb035q02_probe_of(struct spi_device *spi)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
- struct gpio_desc *gpio;
-
- gpio = devm_gpiod_get(&spi->dev, "enable", GPIOD_OUT_LOW);
- if (IS_ERR(gpio)) {
- dev_err(&spi->dev, "failed to parse enable gpio\n");
- return PTR_ERR(gpio);
- }
-
- ddata->enable_gpio = gpio;
-
- return 0;
-}
-
-static int lb035q02_panel_spi_probe(struct spi_device *spi)
-{
- struct panel_drv_data *ddata;
- struct omap_dss_device *dssdev;
- int r;
-
- ddata = devm_kzalloc(&spi->dev, sizeof(*ddata), GFP_KERNEL);
- if (ddata == NULL)
- return -ENOMEM;
-
- dev_set_drvdata(&spi->dev, ddata);
-
- ddata->spi = spi;
-
- r = lb035q02_probe_of(spi);
- if (r)
- return r;
-
- ddata->vm = lb035q02_vm;
-
- dssdev = &ddata->dssdev;
- dssdev->dev = &spi->dev;
- dssdev->ops = &lb035q02_ops;
- dssdev->type = OMAP_DISPLAY_TYPE_DPI;
- dssdev->display = true;
- dssdev->owner = THIS_MODULE;
- dssdev->of_ports = BIT(0);
- dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES;
-
- /*
- * Note: According to the panel documentation:
- * DE is active LOW
- * DATA needs to be driven on the FALLING edge
- */
- dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH
- | DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE
- | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE;
-
- omapdss_display_init(dssdev);
- omapdss_device_register(dssdev);
-
- return 0;
-}
-
-static int lb035q02_panel_spi_remove(struct spi_device *spi)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
- struct omap_dss_device *dssdev = &ddata->dssdev;
-
- omapdss_device_unregister(dssdev);
-
- lb035q02_disable(dssdev);
-
- return 0;
-}
-
-static const struct of_device_id lb035q02_of_match[] = {
- { .compatible = "omapdss,lgphilips,lb035q02", },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, lb035q02_of_match);
-
-static struct spi_driver lb035q02_spi_driver = {
- .probe = lb035q02_panel_spi_probe,
- .remove = lb035q02_panel_spi_remove,
- .driver = {
- .name = "panel_lgphilips_lb035q02",
- .of_match_table = lb035q02_of_match,
- .suppress_bind_attrs = true,
- },
-};
-
-module_spi_driver(lb035q02_spi_driver);
-
-MODULE_ALIAS("spi:lgphilips,lb035q02");
-MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
-MODULE_DESCRIPTION("LG.Philips LB035Q02 LCD Panel driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
deleted file mode 100644
index eba5bd1d702f..000000000000
--- a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
+++ /dev/null
@@ -1,271 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * NEC NL8048HL11 Panel driver
- *
- * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
- * Author: Erik Gilling <konkers@android.com>
- * Converted to new DSS device model: Tomi Valkeinen <tomi.valkeinen@ti.com>
- */
-
-#include <linux/delay.h>
-#include <linux/gpio/consumer.h>
-#include <linux/module.h>
-#include <linux/spi/spi.h>
-
-#include "../dss/omapdss.h"
-
-struct panel_drv_data {
- struct omap_dss_device dssdev;
-
- struct videomode vm;
-
- struct gpio_desc *res_gpio;
-
- struct spi_device *spi;
-};
-
-#define LCD_XRES 800
-#define LCD_YRES 480
-/*
- * NEC PIX Clock Ratings
- * MIN:21.8MHz TYP:23.8MHz MAX:25.7MHz
- */
-#define LCD_PIXEL_CLOCK 23800000
-
-static const struct {
- unsigned char addr;
- unsigned char dat;
-} nec_8048_init_seq[] = {
- { 3, 0x01 }, { 0, 0x00 }, { 1, 0x01 }, { 4, 0x00 }, { 5, 0x14 },
- { 6, 0x24 }, { 16, 0xD7 }, { 17, 0x00 }, { 18, 0x00 }, { 19, 0x55 },
- { 20, 0x01 }, { 21, 0x70 }, { 22, 0x1E }, { 23, 0x25 }, { 24, 0x25 },
- { 25, 0x02 }, { 26, 0x02 }, { 27, 0xA0 }, { 32, 0x2F }, { 33, 0x0F },
- { 34, 0x0F }, { 35, 0x0F }, { 36, 0x0F }, { 37, 0x0F }, { 38, 0x0F },
- { 39, 0x00 }, { 40, 0x02 }, { 41, 0x02 }, { 42, 0x02 }, { 43, 0x0F },
- { 44, 0x0F }, { 45, 0x0F }, { 46, 0x0F }, { 47, 0x0F }, { 48, 0x0F },
- { 49, 0x0F }, { 50, 0x00 }, { 51, 0x02 }, { 52, 0x02 }, { 53, 0x02 },
- { 80, 0x0C }, { 83, 0x42 }, { 84, 0x42 }, { 85, 0x41 }, { 86, 0x14 },
- { 89, 0x88 }, { 90, 0x01 }, { 91, 0x00 }, { 92, 0x02 }, { 93, 0x0C },
- { 94, 0x1C }, { 95, 0x27 }, { 98, 0x49 }, { 99, 0x27 }, { 102, 0x76 },
- { 103, 0x27 }, { 112, 0x01 }, { 113, 0x0E }, { 114, 0x02 },
- { 115, 0x0C }, { 118, 0x0C }, { 121, 0x30 }, { 130, 0x00 },
- { 131, 0x00 }, { 132, 0xFC }, { 134, 0x00 }, { 136, 0x00 },
- { 138, 0x00 }, { 139, 0x00 }, { 140, 0x00 }, { 141, 0xFC },
- { 143, 0x00 }, { 145, 0x00 }, { 147, 0x00 }, { 148, 0x00 },
- { 149, 0x00 }, { 150, 0xFC }, { 152, 0x00 }, { 154, 0x00 },
- { 156, 0x00 }, { 157, 0x00 }, { 2, 0x00 },
-};
-
-static const struct videomode nec_8048_panel_vm = {
- .hactive = LCD_XRES,
- .vactive = LCD_YRES,
- .pixelclock = LCD_PIXEL_CLOCK,
- .hfront_porch = 6,
- .hsync_len = 1,
- .hback_porch = 4,
- .vfront_porch = 3,
- .vsync_len = 1,
- .vback_porch = 4,
-
- .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW,
-};
-
-#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
-
-static int nec_8048_spi_send(struct spi_device *spi, unsigned char reg_addr,
- unsigned char reg_data)
-{
- int ret = 0;
- unsigned int cmd = 0, data = 0;
-
- cmd = 0x0000 | reg_addr; /* register address write */
- data = 0x0100 | reg_data; /* register data write */
- data = (cmd << 16) | data;
-
- ret = spi_write(spi, (unsigned char *)&data, 4);
- if (ret)
- pr_err("error in spi_write %x\n", data);
-
- return ret;
-}
-
-static int init_nec_8048_wvga_lcd(struct spi_device *spi)
-{
- unsigned int i;
- /* Initialization Sequence */
- /* nec_8048_spi_send(spi, REG, VAL) */
- for (i = 0; i < (ARRAY_SIZE(nec_8048_init_seq) - 1); i++)
- nec_8048_spi_send(spi, nec_8048_init_seq[i].addr,
- nec_8048_init_seq[i].dat);
- udelay(20);
- nec_8048_spi_send(spi, nec_8048_init_seq[i].addr,
- nec_8048_init_seq[i].dat);
- return 0;
-}
-
-static int nec_8048_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- return 0;
-}
-
-static void nec_8048_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
-}
-
-static void nec_8048_enable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- gpiod_set_value_cansleep(ddata->res_gpio, 1);
-}
-
-static void nec_8048_disable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- gpiod_set_value_cansleep(ddata->res_gpio, 0);
-}
-
-static int nec_8048_get_modes(struct omap_dss_device *dssdev,
- struct drm_connector *connector)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- return omapdss_display_get_modes(connector, &ddata->vm);
-}
-
-static const struct omap_dss_device_ops nec_8048_ops = {
- .connect = nec_8048_connect,
- .disconnect = nec_8048_disconnect,
-
- .enable = nec_8048_enable,
- .disable = nec_8048_disable,
-
- .get_modes = nec_8048_get_modes,
-};
-
-static int nec_8048_probe(struct spi_device *spi)
-{
- struct panel_drv_data *ddata;
- struct omap_dss_device *dssdev;
- struct gpio_desc *gpio;
- int r;
-
- dev_dbg(&spi->dev, "%s\n", __func__);
-
- spi->mode = SPI_MODE_0;
- spi->bits_per_word = 32;
-
- r = spi_setup(spi);
- if (r < 0) {
- dev_err(&spi->dev, "spi_setup failed: %d\n", r);
- return r;
- }
-
- init_nec_8048_wvga_lcd(spi);
-
- ddata = devm_kzalloc(&spi->dev, sizeof(*ddata), GFP_KERNEL);
- if (ddata == NULL)
- return -ENOMEM;
-
- dev_set_drvdata(&spi->dev, ddata);
-
- ddata->spi = spi;
-
- gpio = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_LOW);
- if (IS_ERR(gpio)) {
- dev_err(&spi->dev, "failed to get reset gpio\n");
- return PTR_ERR(gpio);
- }
-
- ddata->res_gpio = gpio;
-
- ddata->vm = nec_8048_panel_vm;
-
- dssdev = &ddata->dssdev;
- dssdev->dev = &spi->dev;
- dssdev->ops = &nec_8048_ops;
- dssdev->type = OMAP_DISPLAY_TYPE_DPI;
- dssdev->display = true;
- dssdev->owner = THIS_MODULE;
- dssdev->of_ports = BIT(0);
- dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES;
- dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH
- | DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE
- | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE;
-
- omapdss_display_init(dssdev);
- omapdss_device_register(dssdev);
-
- return 0;
-}
-
-static int nec_8048_remove(struct spi_device *spi)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
- struct omap_dss_device *dssdev = &ddata->dssdev;
-
- dev_dbg(&ddata->spi->dev, "%s\n", __func__);
-
- omapdss_device_unregister(dssdev);
-
- nec_8048_disable(dssdev);
-
- return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int nec_8048_suspend(struct device *dev)
-{
- struct spi_device *spi = to_spi_device(dev);
-
- nec_8048_spi_send(spi, 2, 0x01);
- mdelay(40);
-
- return 0;
-}
-
-static int nec_8048_resume(struct device *dev)
-{
- struct spi_device *spi = to_spi_device(dev);
-
- /* reinitialize the panel */
- spi_setup(spi);
- nec_8048_spi_send(spi, 2, 0x00);
- init_nec_8048_wvga_lcd(spi);
-
- return 0;
-}
-static SIMPLE_DEV_PM_OPS(nec_8048_pm_ops, nec_8048_suspend,
- nec_8048_resume);
-#define NEC_8048_PM_OPS (&nec_8048_pm_ops)
-#else
-#define NEC_8048_PM_OPS NULL
-#endif
-
-static const struct of_device_id nec_8048_of_match[] = {
- { .compatible = "omapdss,nec,nl8048hl11", },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, nec_8048_of_match);
-
-static struct spi_driver nec_8048_driver = {
- .driver = {
- .name = "panel-nec-nl8048hl11",
- .pm = NEC_8048_PM_OPS,
- .of_match_table = nec_8048_of_match,
- .suppress_bind_attrs = true,
- },
- .probe = nec_8048_probe,
- .remove = nec_8048_remove,
-};
-
-module_spi_driver(nec_8048_driver);
-
-MODULE_ALIAS("spi:nec,nl8048hl11");
-MODULE_AUTHOR("Erik Gilling <konkers@android.com>");
-MODULE_DESCRIPTION("NEC-NL8048HL11 Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
deleted file mode 100644
index 3ab50fd1f3f2..000000000000
--- a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
+++ /dev/null
@@ -1,262 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * LCD panel driver for Sharp LS037V7DW01
- *
- * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
- * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
- */
-
-#include <linux/delay.h>
-#include <linux/gpio/consumer.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/regulator/consumer.h>
-
-#include "../dss/omapdss.h"
-
-struct panel_drv_data {
- struct omap_dss_device dssdev;
- struct regulator *vcc;
-
- struct videomode vm;
-
- struct gpio_desc *resb_gpio; /* low = reset active min 20 us */
- struct gpio_desc *ini_gpio; /* high = power on */
- struct gpio_desc *mo_gpio; /* low = 480x640, high = 240x320 */
- struct gpio_desc *lr_gpio; /* high = conventional horizontal scanning */
- struct gpio_desc *ud_gpio; /* high = conventional vertical scanning */
-};
-
-static const struct videomode sharp_ls_vm = {
- .hactive = 480,
- .vactive = 640,
-
- .pixelclock = 19200000,
-
- .hsync_len = 2,
- .hfront_porch = 1,
- .hback_porch = 28,
-
- .vsync_len = 1,
- .vfront_porch = 1,
- .vback_porch = 1,
-
- .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW,
-};
-
-#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
-
-static int sharp_ls_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- return 0;
-}
-
-static void sharp_ls_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
-}
-
-static void sharp_ls_pre_enable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- int r;
-
- if (ddata->vcc) {
- r = regulator_enable(ddata->vcc);
- if (r)
- dev_err(dssdev->dev, "%s: failed to enable regulator\n",
- __func__);
- }
-}
-
-static void sharp_ls_enable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- /* wait couple of vsyncs until enabling the LCD */
- msleep(50);
-
- if (ddata->resb_gpio)
- gpiod_set_value_cansleep(ddata->resb_gpio, 1);
-
- if (ddata->ini_gpio)
- gpiod_set_value_cansleep(ddata->ini_gpio, 1);
-}
-
-static void sharp_ls_disable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- if (ddata->ini_gpio)
- gpiod_set_value_cansleep(ddata->ini_gpio, 0);
-
- if (ddata->resb_gpio)
- gpiod_set_value_cansleep(ddata->resb_gpio, 0);
-
- /* wait at least 5 vsyncs after disabling the LCD */
- msleep(100);
-}
-
-static void sharp_ls_post_disable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- if (ddata->vcc)
- regulator_disable(ddata->vcc);
-}
-
-static int sharp_ls_get_modes(struct omap_dss_device *dssdev,
- struct drm_connector *connector)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- return omapdss_display_get_modes(connector, &ddata->vm);
-}
-
-static const struct omap_dss_device_ops sharp_ls_ops = {
- .connect = sharp_ls_connect,
- .disconnect = sharp_ls_disconnect,
-
- .pre_enable = sharp_ls_pre_enable,
- .enable = sharp_ls_enable,
- .disable = sharp_ls_disable,
- .post_disable = sharp_ls_post_disable,
-
- .get_modes = sharp_ls_get_modes,
-};
-
-static int sharp_ls_get_gpio_of(struct device *dev, int index, int val,
- const char *desc, struct gpio_desc **gpiod)
-{
- struct gpio_desc *gd;
-
- *gpiod = NULL;
-
- gd = devm_gpiod_get_index(dev, desc, index, GPIOD_OUT_LOW);
- if (IS_ERR(gd))
- return PTR_ERR(gd);
-
- *gpiod = gd;
- return 0;
-}
-
-static int sharp_ls_probe_of(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- int r;
-
- ddata->vcc = devm_regulator_get(&pdev->dev, "envdd");
- if (IS_ERR(ddata->vcc)) {
- dev_err(&pdev->dev, "failed to get regulator\n");
- return PTR_ERR(ddata->vcc);
- }
-
- /* lcd INI */
- r = sharp_ls_get_gpio_of(&pdev->dev, 0, 0, "enable", &ddata->ini_gpio);
- if (r)
- return r;
-
- /* lcd RESB */
- r = sharp_ls_get_gpio_of(&pdev->dev, 0, 0, "reset", &ddata->resb_gpio);
- if (r)
- return r;
-
- /* lcd MO */
- r = sharp_ls_get_gpio_of(&pdev->dev, 0, 0, "mode", &ddata->mo_gpio);
- if (r)
- return r;
-
- /* lcd LR */
- r = sharp_ls_get_gpio_of(&pdev->dev, 1, 1, "mode", &ddata->lr_gpio);
- if (r)
- return r;
-
- /* lcd UD */
- r = sharp_ls_get_gpio_of(&pdev->dev, 2, 1, "mode", &ddata->ud_gpio);
- if (r)
- return r;
-
- return 0;
-}
-
-static int sharp_ls_probe(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata;
- struct omap_dss_device *dssdev;
- int r;
-
- ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
- if (ddata == NULL)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, ddata);
-
- r = sharp_ls_probe_of(pdev);
- if (r)
- return r;
-
- ddata->vm = sharp_ls_vm;
-
- dssdev = &ddata->dssdev;
- dssdev->dev = &pdev->dev;
- dssdev->ops = &sharp_ls_ops;
- dssdev->type = OMAP_DISPLAY_TYPE_DPI;
- dssdev->display = true;
- dssdev->owner = THIS_MODULE;
- dssdev->of_ports = BIT(0);
- dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES;
-
- /*
- * Note: According to the panel documentation:
- * DATA needs to be driven on the FALLING edge
- */
- dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH
- | DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE
- | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE;
-
- omapdss_display_init(dssdev);
- omapdss_device_register(dssdev);
-
- return 0;
-}
-
-static int __exit sharp_ls_remove(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct omap_dss_device *dssdev = &ddata->dssdev;
-
- omapdss_device_unregister(dssdev);
-
- if (omapdss_device_is_enabled(dssdev)) {
- sharp_ls_disable(dssdev);
- sharp_ls_post_disable(dssdev);
- }
-
- return 0;
-}
-
-static const struct of_device_id sharp_ls_of_match[] = {
- { .compatible = "omapdss,sharp,ls037v7dw01", },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, sharp_ls_of_match);
-
-static struct platform_driver sharp_ls_driver = {
- .probe = sharp_ls_probe,
- .remove = __exit_p(sharp_ls_remove),
- .driver = {
- .name = "panel-sharp-ls037v7dw01",
- .of_match_table = sharp_ls_of_match,
- .suppress_bind_attrs = true,
- },
-};
-
-module_platform_driver(sharp_ls_driver);
-
-MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
-MODULE_DESCRIPTION("Sharp LS037V7DW01 Panel Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
deleted file mode 100644
index 588a1a6bbcc3..000000000000
--- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
+++ /dev/null
@@ -1,755 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Sony ACX565AKM LCD Panel driver
- *
- * Copyright (C) 2010 Nokia Corporation
- *
- * Original Driver Author: Imre Deak <imre.deak@nokia.com>
- * Based on panel-generic.c by Tomi Valkeinen <tomi.valkeinen@ti.com>
- * Adapted to new DSS2 framework: Roger Quadros <roger.quadros@nokia.com>
- */
-
-#include <linux/backlight.h>
-#include <linux/delay.h>
-#include <linux/gpio/consumer.h>
-#include <linux/jiffies.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/sched.h>
-#include <linux/spi/spi.h>
-
-#include "../dss/omapdss.h"
-
-#define MIPID_CMD_READ_DISP_ID 0x04
-#define MIPID_CMD_READ_RED 0x06
-#define MIPID_CMD_READ_GREEN 0x07
-#define MIPID_CMD_READ_BLUE 0x08
-#define MIPID_CMD_READ_DISP_STATUS 0x09
-#define MIPID_CMD_RDDSDR 0x0F
-#define MIPID_CMD_SLEEP_IN 0x10
-#define MIPID_CMD_SLEEP_OUT 0x11
-#define MIPID_CMD_DISP_OFF 0x28
-#define MIPID_CMD_DISP_ON 0x29
-#define MIPID_CMD_WRITE_DISP_BRIGHTNESS 0x51
-#define MIPID_CMD_READ_DISP_BRIGHTNESS 0x52
-#define MIPID_CMD_WRITE_CTRL_DISP 0x53
-
-#define CTRL_DISP_BRIGHTNESS_CTRL_ON (1 << 5)
-#define CTRL_DISP_AMBIENT_LIGHT_CTRL_ON (1 << 4)
-#define CTRL_DISP_BACKLIGHT_ON (1 << 2)
-#define CTRL_DISP_AUTO_BRIGHTNESS_ON (1 << 1)
-
-#define MIPID_CMD_READ_CTRL_DISP 0x54
-#define MIPID_CMD_WRITE_CABC 0x55
-#define MIPID_CMD_READ_CABC 0x56
-
-#define MIPID_VER_LPH8923 3
-#define MIPID_VER_LS041Y3 4
-#define MIPID_VER_L4F00311 8
-#define MIPID_VER_ACX565AKM 9
-
-struct panel_drv_data {
- struct omap_dss_device dssdev;
-
- struct gpio_desc *reset_gpio;
-
- struct videomode vm;
-
- char *name;
- int enabled;
- int model;
- int revision;
- u8 display_id[3];
- unsigned has_bc:1;
- unsigned has_cabc:1;
- unsigned cabc_mode;
- unsigned long hw_guard_end; /* next value of jiffies
- when we can issue the
- next sleep in/out command */
- unsigned long hw_guard_wait; /* max guard time in jiffies */
-
- struct spi_device *spi;
- struct mutex mutex;
-
- struct backlight_device *bl_dev;
-};
-
-static const struct videomode acx565akm_panel_vm = {
- .hactive = 800,
- .vactive = 480,
- .pixelclock = 24000000,
- .hfront_porch = 28,
- .hsync_len = 4,
- .hback_porch = 24,
- .vfront_porch = 3,
- .vsync_len = 3,
- .vback_porch = 4,
-
- .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW,
-};
-
-#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
-
-static void acx565akm_transfer(struct panel_drv_data *ddata, int cmd,
- const u8 *wbuf, int wlen, u8 *rbuf, int rlen)
-{
- struct spi_message m;
- struct spi_transfer *x, xfer[5];
- int r;
-
- BUG_ON(ddata->spi == NULL);
-
- spi_message_init(&m);
-
- memset(xfer, 0, sizeof(xfer));
- x = &xfer[0];
-
- cmd &= 0xff;
- x->tx_buf = &cmd;
- x->bits_per_word = 9;
- x->len = 2;
-
- if (rlen > 1 && wlen == 0) {
- /*
- * Between the command and the response data there is a
- * dummy clock cycle. Add an extra bit after the command
- * word to account for this.
- */
- x->bits_per_word = 10;
- cmd <<= 1;
- }
- spi_message_add_tail(x, &m);
-
- if (wlen) {
- x++;
- x->tx_buf = wbuf;
- x->len = wlen;
- x->bits_per_word = 9;
- spi_message_add_tail(x, &m);
- }
-
- if (rlen) {
- x++;
- x->rx_buf = rbuf;
- x->len = rlen;
- spi_message_add_tail(x, &m);
- }
-
- r = spi_sync(ddata->spi, &m);
- if (r < 0)
- dev_dbg(&ddata->spi->dev, "spi_sync %d\n", r);
-}
-
-static inline void acx565akm_cmd(struct panel_drv_data *ddata, int cmd)
-{
- acx565akm_transfer(ddata, cmd, NULL, 0, NULL, 0);
-}
-
-static inline void acx565akm_write(struct panel_drv_data *ddata,
- int reg, const u8 *buf, int len)
-{
- acx565akm_transfer(ddata, reg, buf, len, NULL, 0);
-}
-
-static inline void acx565akm_read(struct panel_drv_data *ddata,
- int reg, u8 *buf, int len)
-{
- acx565akm_transfer(ddata, reg, NULL, 0, buf, len);
-}
-
-static void hw_guard_start(struct panel_drv_data *ddata, int guard_msec)
-{
- ddata->hw_guard_wait = msecs_to_jiffies(guard_msec);
- ddata->hw_guard_end = jiffies + ddata->hw_guard_wait;
-}
-
-static void hw_guard_wait(struct panel_drv_data *ddata)
-{
- unsigned long wait = ddata->hw_guard_end - jiffies;
-
- if ((long)wait > 0 && wait <= ddata->hw_guard_wait) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(wait);
- }
-}
-
-static void set_sleep_mode(struct panel_drv_data *ddata, int on)
-{
- int cmd;
-
- if (on)
- cmd = MIPID_CMD_SLEEP_IN;
- else
- cmd = MIPID_CMD_SLEEP_OUT;
- /*
- * We have to keep 120msec between sleep in/out commands.
- * (8.2.15, 8.2.16).
- */
- hw_guard_wait(ddata);
- acx565akm_cmd(ddata, cmd);
- hw_guard_start(ddata, 120);
-}
-
-static void set_display_state(struct panel_drv_data *ddata, int enabled)
-{
- int cmd = enabled ? MIPID_CMD_DISP_ON : MIPID_CMD_DISP_OFF;
-
- acx565akm_cmd(ddata, cmd);
-}
-
-static int panel_enabled(struct panel_drv_data *ddata)
-{
- __be32 v;
- u32 disp_status;
- int enabled;
-
- acx565akm_read(ddata, MIPID_CMD_READ_DISP_STATUS, (u8 *)&v, 4);
- disp_status = __be32_to_cpu(v);
- enabled = (disp_status & (1 << 17)) && (disp_status & (1 << 10));
- dev_dbg(&ddata->spi->dev,
- "LCD panel %senabled by bootloader (status 0x%04x)\n",
- enabled ? "" : "not ", disp_status);
- return enabled;
-}
-
-static int panel_detect(struct panel_drv_data *ddata)
-{
- acx565akm_read(ddata, MIPID_CMD_READ_DISP_ID, ddata->display_id, 3);
- dev_dbg(&ddata->spi->dev, "MIPI display ID: %02x%02x%02x\n",
- ddata->display_id[0],
- ddata->display_id[1],
- ddata->display_id[2]);
-
- switch (ddata->display_id[0]) {
- case 0x10:
- ddata->model = MIPID_VER_ACX565AKM;
- ddata->name = "acx565akm";
- ddata->has_bc = 1;
- ddata->has_cabc = 1;
- break;
- case 0x29:
- ddata->model = MIPID_VER_L4F00311;
- ddata->name = "l4f00311";
- break;
- case 0x45:
- ddata->model = MIPID_VER_LPH8923;
- ddata->name = "lph8923";
- break;
- case 0x83:
- ddata->model = MIPID_VER_LS041Y3;
- ddata->name = "ls041y3";
- break;
- default:
- ddata->name = "unknown";
- dev_err(&ddata->spi->dev, "invalid display ID\n");
- return -ENODEV;
- }
-
- ddata->revision = ddata->display_id[1];
-
- dev_info(&ddata->spi->dev, "omapfb: %s rev %02x LCD detected\n",
- ddata->name, ddata->revision);
-
- return 0;
-}
-
-/*----------------------Backlight Control-------------------------*/
-
-static void enable_backlight_ctrl(struct panel_drv_data *ddata, int enable)
-{
- u16 ctrl;
-
- acx565akm_read(ddata, MIPID_CMD_READ_CTRL_DISP, (u8 *)&ctrl, 1);
- if (enable) {
- ctrl |= CTRL_DISP_BRIGHTNESS_CTRL_ON |
- CTRL_DISP_BACKLIGHT_ON;
- } else {
- ctrl &= ~(CTRL_DISP_BRIGHTNESS_CTRL_ON |
- CTRL_DISP_BACKLIGHT_ON);
- }
-
- ctrl |= 1 << 8;
- acx565akm_write(ddata, MIPID_CMD_WRITE_CTRL_DISP, (u8 *)&ctrl, 2);
-}
-
-static void set_cabc_mode(struct panel_drv_data *ddata, unsigned int mode)
-{
- u16 cabc_ctrl;
-
- ddata->cabc_mode = mode;
- if (!ddata->enabled)
- return;
- cabc_ctrl = 0;
- acx565akm_read(ddata, MIPID_CMD_READ_CABC, (u8 *)&cabc_ctrl, 1);
- cabc_ctrl &= ~3;
- cabc_ctrl |= (1 << 8) | (mode & 3);
- acx565akm_write(ddata, MIPID_CMD_WRITE_CABC, (u8 *)&cabc_ctrl, 2);
-}
-
-static unsigned int get_cabc_mode(struct panel_drv_data *ddata)
-{
- return ddata->cabc_mode;
-}
-
-static unsigned int get_hw_cabc_mode(struct panel_drv_data *ddata)
-{
- u8 cabc_ctrl;
-
- acx565akm_read(ddata, MIPID_CMD_READ_CABC, &cabc_ctrl, 1);
- return cabc_ctrl & 3;
-}
-
-static void acx565akm_set_brightness(struct panel_drv_data *ddata, int level)
-{
- int bv;
-
- bv = level | (1 << 8);
- acx565akm_write(ddata, MIPID_CMD_WRITE_DISP_BRIGHTNESS, (u8 *)&bv, 2);
-
- if (level)
- enable_backlight_ctrl(ddata, 1);
- else
- enable_backlight_ctrl(ddata, 0);
-}
-
-static int acx565akm_get_actual_brightness(struct panel_drv_data *ddata)
-{
- u8 bv;
-
- acx565akm_read(ddata, MIPID_CMD_READ_DISP_BRIGHTNESS, &bv, 1);
-
- return bv;
-}
-
-
-static int acx565akm_bl_update_status(struct backlight_device *dev)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(&dev->dev);
- int level;
-
- dev_dbg(&ddata->spi->dev, "%s\n", __func__);
-
- if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
- dev->props.power == FB_BLANK_UNBLANK)
- level = dev->props.brightness;
- else
- level = 0;
-
- if (ddata->has_bc)
- acx565akm_set_brightness(ddata, level);
- else
- return -ENODEV;
-
- return 0;
-}
-
-static int acx565akm_bl_get_intensity(struct backlight_device *dev)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(&dev->dev);
-
- dev_dbg(&dev->dev, "%s\n", __func__);
-
- if (!ddata->has_bc)
- return -ENODEV;
-
- if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
- dev->props.power == FB_BLANK_UNBLANK) {
- if (ddata->has_bc)
- return acx565akm_get_actual_brightness(ddata);
- else
- return dev->props.brightness;
- }
-
- return 0;
-}
-
-static int acx565akm_bl_update_status_locked(struct backlight_device *dev)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(&dev->dev);
- int r;
-
- mutex_lock(&ddata->mutex);
- r = acx565akm_bl_update_status(dev);
- mutex_unlock(&ddata->mutex);
-
- return r;
-}
-
-static int acx565akm_bl_get_intensity_locked(struct backlight_device *dev)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(&dev->dev);
- int r;
-
- mutex_lock(&ddata->mutex);
- r = acx565akm_bl_get_intensity(dev);
- mutex_unlock(&ddata->mutex);
-
- return r;
-}
-
-static const struct backlight_ops acx565akm_bl_ops = {
- .get_brightness = acx565akm_bl_get_intensity_locked,
- .update_status = acx565akm_bl_update_status_locked,
-};
-
-/*--------------------Auto Brightness control via Sysfs---------------------*/
-
-static const char * const cabc_modes[] = {
- "off", /* always used when CABC is not supported */
- "ui",
- "still-image",
- "moving-image",
-};
-
-static ssize_t show_cabc_mode(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(dev);
- const char *mode_str;
- int mode;
- int len;
-
- if (!ddata->has_cabc)
- mode = 0;
- else
- mode = get_cabc_mode(ddata);
- mode_str = "unknown";
- if (mode >= 0 && mode < ARRAY_SIZE(cabc_modes))
- mode_str = cabc_modes[mode];
- len = snprintf(buf, PAGE_SIZE, "%s\n", mode_str);
-
- return len < PAGE_SIZE - 1 ? len : PAGE_SIZE - 1;
-}
-
-static ssize_t store_cabc_mode(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(dev);
- int i;
-
- for (i = 0; i < ARRAY_SIZE(cabc_modes); i++) {
- const char *mode_str = cabc_modes[i];
- int cmp_len = strlen(mode_str);
-
- if (count > 0 && buf[count - 1] == '\n')
- count--;
- if (count != cmp_len)
- continue;
-
- if (strncmp(buf, mode_str, cmp_len) == 0)
- break;
- }
-
- if (i == ARRAY_SIZE(cabc_modes))
- return -EINVAL;
-
- if (!ddata->has_cabc && i != 0)
- return -EINVAL;
-
- mutex_lock(&ddata->mutex);
- set_cabc_mode(ddata, i);
- mutex_unlock(&ddata->mutex);
-
- return count;
-}
-
-static ssize_t show_cabc_available_modes(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(dev);
- int len;
- int i;
-
- if (!ddata->has_cabc)
- return snprintf(buf, PAGE_SIZE, "%s\n", cabc_modes[0]);
-
- for (i = 0, len = 0;
- len < PAGE_SIZE && i < ARRAY_SIZE(cabc_modes); i++)
- len += snprintf(&buf[len], PAGE_SIZE - len, "%s%s%s",
- i ? " " : "", cabc_modes[i],
- i == ARRAY_SIZE(cabc_modes) - 1 ? "\n" : "");
-
- return len < PAGE_SIZE ? len : PAGE_SIZE - 1;
-}
-
-static DEVICE_ATTR(cabc_mode, S_IRUGO | S_IWUSR,
- show_cabc_mode, store_cabc_mode);
-static DEVICE_ATTR(cabc_available_modes, S_IRUGO,
- show_cabc_available_modes, NULL);
-
-static struct attribute *bldev_attrs[] = {
- &dev_attr_cabc_mode.attr,
- &dev_attr_cabc_available_modes.attr,
- NULL,
-};
-
-static const struct attribute_group bldev_attr_group = {
- .attrs = bldev_attrs,
-};
-
-static int acx565akm_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- return 0;
-}
-
-static void acx565akm_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
-}
-
-static int acx565akm_panel_power_on(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- dev_dbg(&ddata->spi->dev, "%s\n", __func__);
-
- /*FIXME tweak me */
- msleep(50);
-
- if (ddata->reset_gpio)
- gpiod_set_value(ddata->reset_gpio, 1);
-
- if (ddata->enabled) {
- dev_dbg(&ddata->spi->dev, "panel already enabled\n");
- return 0;
- }
-
- /*
- * We have to meet all the following delay requirements:
- * 1. tRW: reset pulse width 10usec (7.12.1)
- * 2. tRT: reset cancel time 5msec (7.12.1)
- * 3. Providing PCLK,HS,VS signals for 2 frames = ~50msec worst
- * case (7.6.2)
- * 4. 120msec before the sleep out command (7.12.1)
- */
- msleep(120);
-
- set_sleep_mode(ddata, 0);
- ddata->enabled = 1;
-
- /* 5msec between sleep out and the next command. (8.2.16) */
- usleep_range(5000, 10000);
- set_display_state(ddata, 1);
- set_cabc_mode(ddata, ddata->cabc_mode);
-
- return acx565akm_bl_update_status(ddata->bl_dev);
-}
-
-static void acx565akm_panel_power_off(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- dev_dbg(dssdev->dev, "%s\n", __func__);
-
- if (!ddata->enabled)
- return;
-
- set_display_state(ddata, 0);
- set_sleep_mode(ddata, 1);
- ddata->enabled = 0;
- /*
- * We have to provide PCLK,HS,VS signals for 2 frames (worst case
- * ~50msec) after sending the sleep in command and asserting the
- * reset signal. We probably could assert the reset w/o the delay
- * but we still delay to avoid possible artifacts. (7.6.1)
- */
- msleep(50);
-
- if (ddata->reset_gpio)
- gpiod_set_value(ddata->reset_gpio, 0);
-
- /* FIXME need to tweak this delay */
- msleep(100);
-}
-
-static void acx565akm_enable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- mutex_lock(&ddata->mutex);
- acx565akm_panel_power_on(dssdev);
- mutex_unlock(&ddata->mutex);
-}
-
-static void acx565akm_disable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- mutex_lock(&ddata->mutex);
- acx565akm_panel_power_off(dssdev);
- mutex_unlock(&ddata->mutex);
-}
-
-static int acx565akm_get_modes(struct omap_dss_device *dssdev,
- struct drm_connector *connector)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- return omapdss_display_get_modes(connector, &ddata->vm);
-}
-
-static const struct omap_dss_device_ops acx565akm_ops = {
- .connect = acx565akm_connect,
- .disconnect = acx565akm_disconnect,
-
- .enable = acx565akm_enable,
- .disable = acx565akm_disable,
-
- .get_modes = acx565akm_get_modes,
-};
-
-static int acx565akm_probe(struct spi_device *spi)
-{
- struct panel_drv_data *ddata;
- struct omap_dss_device *dssdev;
- struct backlight_device *bldev;
- int max_brightness, brightness;
- struct backlight_properties props;
- struct gpio_desc *gpio;
- int r;
-
- dev_dbg(&spi->dev, "%s\n", __func__);
-
- spi->mode = SPI_MODE_3;
-
- ddata = devm_kzalloc(&spi->dev, sizeof(*ddata), GFP_KERNEL);
- if (ddata == NULL)
- return -ENOMEM;
-
- dev_set_drvdata(&spi->dev, ddata);
-
- ddata->spi = spi;
-
- mutex_init(&ddata->mutex);
-
- gpio = devm_gpiod_get_optional(&spi->dev, "reset", GPIOD_OUT_LOW);
- if (IS_ERR(gpio)) {
- dev_err(&spi->dev, "failed to parse reset gpio\n");
- return PTR_ERR(gpio);
- }
-
- ddata->reset_gpio = gpio;
-
- if (ddata->reset_gpio)
- gpiod_set_value(ddata->reset_gpio, 1);
-
- /*
- * After reset we have to wait 5 msec before the first
- * command can be sent.
- */
- usleep_range(5000, 10000);
-
- ddata->enabled = panel_enabled(ddata);
-
- r = panel_detect(ddata);
-
- if (!ddata->enabled && ddata->reset_gpio)
- gpiod_set_value(ddata->reset_gpio, 0);
-
- if (r) {
- dev_err(&spi->dev, "%s panel detect error\n", __func__);
- return r;
- }
-
- memset(&props, 0, sizeof(props));
- props.fb_blank = FB_BLANK_UNBLANK;
- props.power = FB_BLANK_UNBLANK;
- props.type = BACKLIGHT_RAW;
-
- bldev = backlight_device_register("acx565akm", &ddata->spi->dev,
- ddata, &acx565akm_bl_ops, &props);
- if (IS_ERR(bldev))
- return PTR_ERR(bldev);
- ddata->bl_dev = bldev;
- if (ddata->has_cabc) {
- r = sysfs_create_group(&bldev->dev.kobj, &bldev_attr_group);
- if (r) {
- dev_err(&bldev->dev,
- "%s failed to create sysfs files\n", __func__);
- goto err_backlight_unregister;
- }
- ddata->cabc_mode = get_hw_cabc_mode(ddata);
- }
-
- max_brightness = 255;
-
- if (ddata->has_bc)
- brightness = acx565akm_get_actual_brightness(ddata);
- else
- brightness = 0;
-
- bldev->props.max_brightness = max_brightness;
- bldev->props.brightness = brightness;
-
- acx565akm_bl_update_status(bldev);
-
-
- ddata->vm = acx565akm_panel_vm;
-
- dssdev = &ddata->dssdev;
- dssdev->dev = &spi->dev;
- dssdev->ops = &acx565akm_ops;
- dssdev->type = OMAP_DISPLAY_TYPE_SDI;
- dssdev->display = true;
- dssdev->owner = THIS_MODULE;
- dssdev->of_ports = BIT(0);
- dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES;
- dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH
- | DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE
- | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE;
-
- omapdss_display_init(dssdev);
- omapdss_device_register(dssdev);
-
- return 0;
-
-err_backlight_unregister:
- backlight_device_unregister(bldev);
- return r;
-}
-
-static int acx565akm_remove(struct spi_device *spi)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
- struct omap_dss_device *dssdev = &ddata->dssdev;
-
- dev_dbg(&ddata->spi->dev, "%s\n", __func__);
-
- sysfs_remove_group(&ddata->bl_dev->dev.kobj, &bldev_attr_group);
- backlight_device_unregister(ddata->bl_dev);
-
- omapdss_device_unregister(dssdev);
-
- if (omapdss_device_is_enabled(dssdev))
- acx565akm_disable(dssdev);
-
- return 0;
-}
-
-static const struct of_device_id acx565akm_of_match[] = {
- { .compatible = "omapdss,sony,acx565akm", },
- {},
-};
-MODULE_DEVICE_TABLE(of, acx565akm_of_match);
-
-static struct spi_driver acx565akm_driver = {
- .driver = {
- .name = "acx565akm",
- .of_match_table = acx565akm_of_match,
- .suppress_bind_attrs = true,
- },
- .probe = acx565akm_probe,
- .remove = acx565akm_remove,
-};
-
-module_spi_driver(acx565akm_driver);
-
-MODULE_ALIAS("spi:sony,acx565akm");
-MODULE_AUTHOR("Nokia Corporation");
-MODULE_DESCRIPTION("acx565akm LCD Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
deleted file mode 100644
index c885018ac6ce..000000000000
--- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
+++ /dev/null
@@ -1,390 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Toppoly TD028TTEC1 panel support
- *
- * Copyright (C) 2008 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
- *
- * Neo 1973 code (jbt6k74.c):
- * Copyright (C) 2006-2007 by OpenMoko, Inc.
- * Author: Harald Welte <laforge@openmoko.org>
- *
- * Ported and adapted from Neo 1973 U-Boot by:
- * H. Nikolaus Schaller <hns@goldelico.com>
- */
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/spi/spi.h>
-
-#include "../dss/omapdss.h"
-
-struct panel_drv_data {
- struct omap_dss_device dssdev;
-
- struct videomode vm;
-
- struct backlight_device *backlight;
-
- struct spi_device *spi_dev;
-};
-
-static const struct videomode td028ttec1_panel_vm = {
- .hactive = 480,
- .vactive = 640,
- .pixelclock = 22153000,
- .hfront_porch = 24,
- .hsync_len = 8,
- .hback_porch = 8,
- .vfront_porch = 4,
- .vsync_len = 2,
- .vback_porch = 2,
-
- .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW,
-};
-
-#define JBT_COMMAND 0x000
-#define JBT_DATA 0x100
-
-static int jbt_ret_write_0(struct panel_drv_data *ddata, u8 reg)
-{
- int rc;
- u16 tx_buf = JBT_COMMAND | reg;
-
- rc = spi_write(ddata->spi_dev, (u8 *)&tx_buf,
- 1*sizeof(u16));
- if (rc != 0)
- dev_err(&ddata->spi_dev->dev,
- "jbt_ret_write_0 spi_write ret %d\n", rc);
-
- return rc;
-}
-
-static int jbt_reg_write_1(struct panel_drv_data *ddata, u8 reg, u8 data)
-{
- int rc;
- u16 tx_buf[2];
-
- tx_buf[0] = JBT_COMMAND | reg;
- tx_buf[1] = JBT_DATA | data;
- rc = spi_write(ddata->spi_dev, (u8 *)tx_buf,
- 2*sizeof(u16));
- if (rc != 0)
- dev_err(&ddata->spi_dev->dev,
- "jbt_reg_write_1 spi_write ret %d\n", rc);
-
- return rc;
-}
-
-static int jbt_reg_write_2(struct panel_drv_data *ddata, u8 reg, u16 data)
-{
- int rc;
- u16 tx_buf[3];
-
- tx_buf[0] = JBT_COMMAND | reg;
- tx_buf[1] = JBT_DATA | (data >> 8);
- tx_buf[2] = JBT_DATA | (data & 0xff);
-
- rc = spi_write(ddata->spi_dev, (u8 *)tx_buf,
- 3*sizeof(u16));
-
- if (rc != 0)
- dev_err(&ddata->spi_dev->dev,
- "jbt_reg_write_2 spi_write ret %d\n", rc);
-
- return rc;
-}
-
-enum jbt_register {
- JBT_REG_SLEEP_IN = 0x10,
- JBT_REG_SLEEP_OUT = 0x11,
-
- JBT_REG_DISPLAY_OFF = 0x28,
- JBT_REG_DISPLAY_ON = 0x29,
-
- JBT_REG_RGB_FORMAT = 0x3a,
- JBT_REG_QUAD_RATE = 0x3b,
-
- JBT_REG_POWER_ON_OFF = 0xb0,
- JBT_REG_BOOSTER_OP = 0xb1,
- JBT_REG_BOOSTER_MODE = 0xb2,
- JBT_REG_BOOSTER_FREQ = 0xb3,
- JBT_REG_OPAMP_SYSCLK = 0xb4,
- JBT_REG_VSC_VOLTAGE = 0xb5,
- JBT_REG_VCOM_VOLTAGE = 0xb6,
- JBT_REG_EXT_DISPL = 0xb7,
- JBT_REG_OUTPUT_CONTROL = 0xb8,
- JBT_REG_DCCLK_DCEV = 0xb9,
- JBT_REG_DISPLAY_MODE1 = 0xba,
- JBT_REG_DISPLAY_MODE2 = 0xbb,
- JBT_REG_DISPLAY_MODE = 0xbc,
- JBT_REG_ASW_SLEW = 0xbd,
- JBT_REG_DUMMY_DISPLAY = 0xbe,
- JBT_REG_DRIVE_SYSTEM = 0xbf,
-
- JBT_REG_SLEEP_OUT_FR_A = 0xc0,
- JBT_REG_SLEEP_OUT_FR_B = 0xc1,
- JBT_REG_SLEEP_OUT_FR_C = 0xc2,
- JBT_REG_SLEEP_IN_LCCNT_D = 0xc3,
- JBT_REG_SLEEP_IN_LCCNT_E = 0xc4,
- JBT_REG_SLEEP_IN_LCCNT_F = 0xc5,
- JBT_REG_SLEEP_IN_LCCNT_G = 0xc6,
-
- JBT_REG_GAMMA1_FINE_1 = 0xc7,
- JBT_REG_GAMMA1_FINE_2 = 0xc8,
- JBT_REG_GAMMA1_INCLINATION = 0xc9,
- JBT_REG_GAMMA1_BLUE_OFFSET = 0xca,
-
- JBT_REG_BLANK_CONTROL = 0xcf,
- JBT_REG_BLANK_TH_TV = 0xd0,
- JBT_REG_CKV_ON_OFF = 0xd1,
- JBT_REG_CKV_1_2 = 0xd2,
- JBT_REG_OEV_TIMING = 0xd3,
- JBT_REG_ASW_TIMING_1 = 0xd4,
- JBT_REG_ASW_TIMING_2 = 0xd5,
-
- JBT_REG_HCLOCK_VGA = 0xec,
- JBT_REG_HCLOCK_QVGA = 0xed,
-};
-
-#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
-
-static int td028ttec1_panel_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- return 0;
-}
-
-static void td028ttec1_panel_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
-}
-
-static void td028ttec1_panel_enable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- int r = 0;
-
- dev_dbg(dssdev->dev, "%s: state %d\n", __func__, dssdev->state);
-
- /* three times command zero */
- r |= jbt_ret_write_0(ddata, 0x00);
- usleep_range(1000, 2000);
- r |= jbt_ret_write_0(ddata, 0x00);
- usleep_range(1000, 2000);
- r |= jbt_ret_write_0(ddata, 0x00);
- usleep_range(1000, 2000);
-
- if (r) {
- dev_warn(dssdev->dev, "%s: transfer error\n", __func__);
- return;
- }
-
- /* deep standby out */
- r |= jbt_reg_write_1(ddata, JBT_REG_POWER_ON_OFF, 0x17);
-
- /* RGB I/F on, RAM write off, QVGA through, SIGCON enable */
- r |= jbt_reg_write_1(ddata, JBT_REG_DISPLAY_MODE, 0x80);
-
- /* Quad mode off */
- r |= jbt_reg_write_1(ddata, JBT_REG_QUAD_RATE, 0x00);
-
- /* AVDD on, XVDD on */
- r |= jbt_reg_write_1(ddata, JBT_REG_POWER_ON_OFF, 0x16);
-
- /* Output control */
- r |= jbt_reg_write_2(ddata, JBT_REG_OUTPUT_CONTROL, 0xfff9);
-
- /* Sleep mode off */
- r |= jbt_ret_write_0(ddata, JBT_REG_SLEEP_OUT);
-
- /* at this point we have like 50% grey */
-
- /* initialize register set */
- r |= jbt_reg_write_1(ddata, JBT_REG_DISPLAY_MODE1, 0x01);
- r |= jbt_reg_write_1(ddata, JBT_REG_DISPLAY_MODE2, 0x00);
- r |= jbt_reg_write_1(ddata, JBT_REG_RGB_FORMAT, 0x60);
- r |= jbt_reg_write_1(ddata, JBT_REG_DRIVE_SYSTEM, 0x10);
- r |= jbt_reg_write_1(ddata, JBT_REG_BOOSTER_OP, 0x56);
- r |= jbt_reg_write_1(ddata, JBT_REG_BOOSTER_MODE, 0x33);
- r |= jbt_reg_write_1(ddata, JBT_REG_BOOSTER_FREQ, 0x11);
- r |= jbt_reg_write_1(ddata, JBT_REG_BOOSTER_FREQ, 0x11);
- r |= jbt_reg_write_1(ddata, JBT_REG_OPAMP_SYSCLK, 0x02);
- r |= jbt_reg_write_1(ddata, JBT_REG_VSC_VOLTAGE, 0x2b);
- r |= jbt_reg_write_1(ddata, JBT_REG_VCOM_VOLTAGE, 0x40);
- r |= jbt_reg_write_1(ddata, JBT_REG_EXT_DISPL, 0x03);
- r |= jbt_reg_write_1(ddata, JBT_REG_DCCLK_DCEV, 0x04);
- /*
- * default of 0x02 in JBT_REG_ASW_SLEW responsible for 72Hz requirement
- * to avoid red / blue flicker
- */
- r |= jbt_reg_write_1(ddata, JBT_REG_ASW_SLEW, 0x04);
- r |= jbt_reg_write_1(ddata, JBT_REG_DUMMY_DISPLAY, 0x00);
-
- r |= jbt_reg_write_1(ddata, JBT_REG_SLEEP_OUT_FR_A, 0x11);
- r |= jbt_reg_write_1(ddata, JBT_REG_SLEEP_OUT_FR_B, 0x11);
- r |= jbt_reg_write_1(ddata, JBT_REG_SLEEP_OUT_FR_C, 0x11);
- r |= jbt_reg_write_2(ddata, JBT_REG_SLEEP_IN_LCCNT_D, 0x2040);
- r |= jbt_reg_write_2(ddata, JBT_REG_SLEEP_IN_LCCNT_E, 0x60c0);
- r |= jbt_reg_write_2(ddata, JBT_REG_SLEEP_IN_LCCNT_F, 0x1020);
- r |= jbt_reg_write_2(ddata, JBT_REG_SLEEP_IN_LCCNT_G, 0x60c0);
-
- r |= jbt_reg_write_2(ddata, JBT_REG_GAMMA1_FINE_1, 0x5533);
- r |= jbt_reg_write_1(ddata, JBT_REG_GAMMA1_FINE_2, 0x00);
- r |= jbt_reg_write_1(ddata, JBT_REG_GAMMA1_INCLINATION, 0x00);
- r |= jbt_reg_write_1(ddata, JBT_REG_GAMMA1_BLUE_OFFSET, 0x00);
-
- r |= jbt_reg_write_2(ddata, JBT_REG_HCLOCK_VGA, 0x1f0);
- r |= jbt_reg_write_1(ddata, JBT_REG_BLANK_CONTROL, 0x02);
- r |= jbt_reg_write_2(ddata, JBT_REG_BLANK_TH_TV, 0x0804);
-
- r |= jbt_reg_write_1(ddata, JBT_REG_CKV_ON_OFF, 0x01);
- r |= jbt_reg_write_2(ddata, JBT_REG_CKV_1_2, 0x0000);
-
- r |= jbt_reg_write_2(ddata, JBT_REG_OEV_TIMING, 0x0d0e);
- r |= jbt_reg_write_2(ddata, JBT_REG_ASW_TIMING_1, 0x11a4);
- r |= jbt_reg_write_1(ddata, JBT_REG_ASW_TIMING_2, 0x0e);
-
- r |= jbt_ret_write_0(ddata, JBT_REG_DISPLAY_ON);
-
- if (r)
- dev_err(dssdev->dev, "%s: write error\n", __func__);
-
- backlight_enable(ddata->backlight);
-}
-
-static void td028ttec1_panel_disable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- backlight_disable(ddata->backlight);
-
- dev_dbg(dssdev->dev, "td028ttec1_panel_disable()\n");
-
- jbt_ret_write_0(ddata, JBT_REG_DISPLAY_OFF);
- jbt_reg_write_2(ddata, JBT_REG_OUTPUT_CONTROL, 0x8002);
- jbt_ret_write_0(ddata, JBT_REG_SLEEP_IN);
- jbt_reg_write_1(ddata, JBT_REG_POWER_ON_OFF, 0x00);
-}
-
-static int td028ttec1_panel_get_modes(struct omap_dss_device *dssdev,
- struct drm_connector *connector)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- return omapdss_display_get_modes(connector, &ddata->vm);
-}
-
-static const struct omap_dss_device_ops td028ttec1_ops = {
- .connect = td028ttec1_panel_connect,
- .disconnect = td028ttec1_panel_disconnect,
-
- .enable = td028ttec1_panel_enable,
- .disable = td028ttec1_panel_disable,
-
- .get_modes = td028ttec1_panel_get_modes,
-};
-
-static int td028ttec1_panel_probe(struct spi_device *spi)
-{
- struct panel_drv_data *ddata;
- struct omap_dss_device *dssdev;
- int r;
-
- dev_dbg(&spi->dev, "%s\n", __func__);
-
- spi->bits_per_word = 9;
- spi->mode = SPI_MODE_3;
-
- r = spi_setup(spi);
- if (r < 0) {
- dev_err(&spi->dev, "spi_setup failed: %d\n", r);
- return r;
- }
-
- ddata = devm_kzalloc(&spi->dev, sizeof(*ddata), GFP_KERNEL);
- if (ddata == NULL)
- return -ENOMEM;
-
- ddata->backlight = devm_of_find_backlight(&spi->dev);
- if (IS_ERR(ddata->backlight))
- return PTR_ERR(ddata->backlight);
-
- dev_set_drvdata(&spi->dev, ddata);
-
- ddata->spi_dev = spi;
-
- ddata->vm = td028ttec1_panel_vm;
-
- dssdev = &ddata->dssdev;
- dssdev->dev = &spi->dev;
- dssdev->ops = &td028ttec1_ops;
- dssdev->type = OMAP_DISPLAY_TYPE_DPI;
- dssdev->display = true;
- dssdev->owner = THIS_MODULE;
- dssdev->of_ports = BIT(0);
- dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES;
-
- /*
- * Note: According to the panel documentation:
- * SYNC needs to be driven on the FALLING edge
- */
- dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH
- | DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE
- | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE;
-
- omapdss_display_init(dssdev);
- omapdss_device_register(dssdev);
-
- return 0;
-}
-
-static int td028ttec1_panel_remove(struct spi_device *spi)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
- struct omap_dss_device *dssdev = &ddata->dssdev;
-
- dev_dbg(&ddata->spi_dev->dev, "%s\n", __func__);
-
- omapdss_device_unregister(dssdev);
-
- td028ttec1_panel_disable(dssdev);
-
- return 0;
-}
-
-static const struct of_device_id td028ttec1_of_match[] = {
- { .compatible = "omapdss,tpo,td028ttec1", },
- /* keep to not break older DTB */
- { .compatible = "omapdss,toppoly,td028ttec1", },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, td028ttec1_of_match);
-
-static const struct spi_device_id td028ttec1_ids[] = {
- { "toppoly,td028ttec1", 0 },
- { "tpo,td028ttec1", 0},
- { /* sentinel */ }
-};
-
-MODULE_DEVICE_TABLE(spi, td028ttec1_ids);
-
-
-static struct spi_driver td028ttec1_spi_driver = {
- .probe = td028ttec1_panel_probe,
- .remove = td028ttec1_panel_remove,
- .id_table = td028ttec1_ids,
-
- .driver = {
- .name = "panel-tpo-td028ttec1",
- .of_match_table = td028ttec1_of_match,
- .suppress_bind_attrs = true,
- },
-};
-
-module_spi_driver(td028ttec1_spi_driver);
-
-MODULE_AUTHOR("H. Nikolaus Schaller <hns@goldelico.com>");
-MODULE_DESCRIPTION("Toppoly TD028TTEC1 panel driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
deleted file mode 100644
index ce09217da597..000000000000
--- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
+++ /dev/null
@@ -1,513 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * TPO TD043MTEA1 Panel driver
- *
- * Author: GraĹľvydas Ignotas <notasas@gmail.com>
- * Converted to new DSS device model: Tomi Valkeinen <tomi.valkeinen@ti.com>
- */
-
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/gpio/consumer.h>
-#include <linux/module.h>
-#include <linux/regulator/consumer.h>
-#include <linux/slab.h>
-#include <linux/spi/spi.h>
-
-#include "../dss/omapdss.h"
-
-#define TPO_R02_MODE(x) ((x) & 7)
-#define TPO_R02_MODE_800x480 7
-#define TPO_R02_NCLK_RISING BIT(3)
-#define TPO_R02_HSYNC_HIGH BIT(4)
-#define TPO_R02_VSYNC_HIGH BIT(5)
-
-#define TPO_R03_NSTANDBY BIT(0)
-#define TPO_R03_EN_CP_CLK BIT(1)
-#define TPO_R03_EN_VGL_PUMP BIT(2)
-#define TPO_R03_EN_PWM BIT(3)
-#define TPO_R03_DRIVING_CAP_100 BIT(4)
-#define TPO_R03_EN_PRE_CHARGE BIT(6)
-#define TPO_R03_SOFTWARE_CTL BIT(7)
-
-#define TPO_R04_NFLIP_H BIT(0)
-#define TPO_R04_NFLIP_V BIT(1)
-#define TPO_R04_CP_CLK_FREQ_1H BIT(2)
-#define TPO_R04_VGL_FREQ_1H BIT(4)
-
-#define TPO_R03_VAL_NORMAL (TPO_R03_NSTANDBY | TPO_R03_EN_CP_CLK | \
- TPO_R03_EN_VGL_PUMP | TPO_R03_EN_PWM | \
- TPO_R03_DRIVING_CAP_100 | TPO_R03_EN_PRE_CHARGE | \
- TPO_R03_SOFTWARE_CTL)
-
-#define TPO_R03_VAL_STANDBY (TPO_R03_DRIVING_CAP_100 | \
- TPO_R03_EN_PRE_CHARGE | TPO_R03_SOFTWARE_CTL)
-
-static const u16 tpo_td043_def_gamma[12] = {
- 105, 315, 381, 431, 490, 537, 579, 686, 780, 837, 880, 1023
-};
-
-struct panel_drv_data {
- struct omap_dss_device dssdev;
-
- struct videomode vm;
-
- struct spi_device *spi;
- struct regulator *vcc_reg;
- struct gpio_desc *reset_gpio;
- u16 gamma[12];
- u32 mode;
- u32 vmirror:1;
- u32 powered_on:1;
- u32 spi_suspended:1;
- u32 power_on_resume:1;
-};
-
-static const struct videomode tpo_td043_vm = {
- .hactive = 800,
- .vactive = 480,
-
- .pixelclock = 36000000,
-
- .hsync_len = 1,
- .hfront_porch = 68,
- .hback_porch = 214,
-
- .vsync_len = 1,
- .vfront_porch = 39,
- .vback_porch = 34,
-
- .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW,
-};
-
-#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
-
-static int tpo_td043_write(struct spi_device *spi, u8 addr, u8 data)
-{
- struct spi_message m;
- struct spi_transfer xfer;
- u16 w;
- int r;
-
- spi_message_init(&m);
-
- memset(&xfer, 0, sizeof(xfer));
-
- w = ((u16)addr << 10) | (1 << 8) | data;
- xfer.tx_buf = &w;
- xfer.bits_per_word = 16;
- xfer.len = 2;
- spi_message_add_tail(&xfer, &m);
-
- r = spi_sync(spi, &m);
- if (r < 0)
- dev_warn(&spi->dev, "failed to write to LCD reg (%d)\n", r);
- return r;
-}
-
-static void tpo_td043_write_gamma(struct spi_device *spi, u16 gamma[12])
-{
- u8 i, val;
-
- /* gamma bits [9:8] */
- for (val = i = 0; i < 4; i++)
- val |= (gamma[i] & 0x300) >> ((i + 1) * 2);
- tpo_td043_write(spi, 0x11, val);
-
- for (val = i = 0; i < 4; i++)
- val |= (gamma[i+4] & 0x300) >> ((i + 1) * 2);
- tpo_td043_write(spi, 0x12, val);
-
- for (val = i = 0; i < 4; i++)
- val |= (gamma[i+8] & 0x300) >> ((i + 1) * 2);
- tpo_td043_write(spi, 0x13, val);
-
- /* gamma bits [7:0] */
- for (val = i = 0; i < 12; i++)
- tpo_td043_write(spi, 0x14 + i, gamma[i] & 0xff);
-}
-
-static int tpo_td043_write_mirror(struct spi_device *spi, bool h, bool v)
-{
- u8 reg4 = TPO_R04_NFLIP_H | TPO_R04_NFLIP_V |
- TPO_R04_CP_CLK_FREQ_1H | TPO_R04_VGL_FREQ_1H;
- if (h)
- reg4 &= ~TPO_R04_NFLIP_H;
- if (v)
- reg4 &= ~TPO_R04_NFLIP_V;
-
- return tpo_td043_write(spi, 4, reg4);
-}
-
-static ssize_t tpo_td043_vmirror_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(dev);
-
- return snprintf(buf, PAGE_SIZE, "%d\n", ddata->vmirror);
-}
-
-static ssize_t tpo_td043_vmirror_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(dev);
- int val;
- int ret;
-
- ret = kstrtoint(buf, 0, &val);
- if (ret < 0)
- return ret;
-
- val = !!val;
-
- ret = tpo_td043_write_mirror(ddata->spi, false, val);
- if (ret < 0)
- return ret;
-
- ddata->vmirror = val;
-
- return count;
-}
-
-static ssize_t tpo_td043_mode_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(dev);
-
- return snprintf(buf, PAGE_SIZE, "%d\n", ddata->mode);
-}
-
-static ssize_t tpo_td043_mode_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(dev);
- long val;
- int ret;
-
- ret = kstrtol(buf, 0, &val);
- if (ret != 0 || val & ~7)
- return -EINVAL;
-
- ddata->mode = val;
-
- val |= TPO_R02_NCLK_RISING;
- tpo_td043_write(ddata->spi, 2, val);
-
- return count;
-}
-
-static ssize_t tpo_td043_gamma_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(dev);
- ssize_t len = 0;
- int ret;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(ddata->gamma); i++) {
- ret = snprintf(buf + len, PAGE_SIZE - len, "%u ",
- ddata->gamma[i]);
- if (ret < 0)
- return ret;
- len += ret;
- }
- buf[len - 1] = '\n';
-
- return len;
-}
-
-static ssize_t tpo_td043_gamma_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(dev);
- unsigned int g[12];
- int ret;
- int i;
-
- ret = sscanf(buf, "%u %u %u %u %u %u %u %u %u %u %u %u",
- &g[0], &g[1], &g[2], &g[3], &g[4], &g[5],
- &g[6], &g[7], &g[8], &g[9], &g[10], &g[11]);
-
- if (ret != 12)
- return -EINVAL;
-
- for (i = 0; i < 12; i++)
- ddata->gamma[i] = g[i];
-
- tpo_td043_write_gamma(ddata->spi, ddata->gamma);
-
- return count;
-}
-
-static DEVICE_ATTR(vmirror, S_IRUGO | S_IWUSR,
- tpo_td043_vmirror_show, tpo_td043_vmirror_store);
-static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR,
- tpo_td043_mode_show, tpo_td043_mode_store);
-static DEVICE_ATTR(gamma, S_IRUGO | S_IWUSR,
- tpo_td043_gamma_show, tpo_td043_gamma_store);
-
-static struct attribute *tpo_td043_attrs[] = {
- &dev_attr_vmirror.attr,
- &dev_attr_mode.attr,
- &dev_attr_gamma.attr,
- NULL,
-};
-
-static const struct attribute_group tpo_td043_attr_group = {
- .attrs = tpo_td043_attrs,
-};
-
-static int tpo_td043_power_on(struct panel_drv_data *ddata)
-{
- int r;
-
- if (ddata->powered_on)
- return 0;
-
- r = regulator_enable(ddata->vcc_reg);
- if (r != 0)
- return r;
-
- /* wait for panel to stabilize */
- msleep(160);
-
- gpiod_set_value(ddata->reset_gpio, 0);
-
- tpo_td043_write(ddata->spi, 2,
- TPO_R02_MODE(ddata->mode) | TPO_R02_NCLK_RISING);
- tpo_td043_write(ddata->spi, 3, TPO_R03_VAL_NORMAL);
- tpo_td043_write(ddata->spi, 0x20, 0xf0);
- tpo_td043_write(ddata->spi, 0x21, 0xf0);
- tpo_td043_write_mirror(ddata->spi, false, ddata->vmirror);
- tpo_td043_write_gamma(ddata->spi, ddata->gamma);
-
- ddata->powered_on = 1;
- return 0;
-}
-
-static void tpo_td043_power_off(struct panel_drv_data *ddata)
-{
- if (!ddata->powered_on)
- return;
-
- tpo_td043_write(ddata->spi, 3,
- TPO_R03_VAL_STANDBY | TPO_R03_EN_PWM);
-
- gpiod_set_value(ddata->reset_gpio, 1);
-
- /* wait for at least 2 vsyncs before cutting off power */
- msleep(50);
-
- tpo_td043_write(ddata->spi, 3, TPO_R03_VAL_STANDBY);
-
- regulator_disable(ddata->vcc_reg);
-
- ddata->powered_on = 0;
-}
-
-static int tpo_td043_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- return 0;
-}
-
-static void tpo_td043_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
-}
-
-static void tpo_td043_enable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- int r;
-
- /*
- * If we are resuming from system suspend, SPI clocks might not be
- * enabled yet, so we'll program the LCD from SPI PM resume callback.
- */
- if (!ddata->spi_suspended) {
- r = tpo_td043_power_on(ddata);
- if (r) {
- dev_err(&ddata->spi->dev, "%s: power on failed (%d)\n",
- __func__, r);
- return;
- }
- }
-}
-
-static void tpo_td043_disable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- if (!ddata->spi_suspended)
- tpo_td043_power_off(ddata);
-}
-
-static int tpo_td043_get_modes(struct omap_dss_device *dssdev,
- struct drm_connector *connector)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- return omapdss_display_get_modes(connector, &ddata->vm);
-}
-
-static const struct omap_dss_device_ops tpo_td043_ops = {
- .connect = tpo_td043_connect,
- .disconnect = tpo_td043_disconnect,
-
- .enable = tpo_td043_enable,
- .disable = tpo_td043_disable,
-
- .get_modes = tpo_td043_get_modes,
-};
-
-static int tpo_td043_probe(struct spi_device *spi)
-{
- struct panel_drv_data *ddata;
- struct omap_dss_device *dssdev;
- struct gpio_desc *gpio;
- int r;
-
- dev_dbg(&spi->dev, "%s\n", __func__);
-
- spi->bits_per_word = 16;
- spi->mode = SPI_MODE_0;
-
- r = spi_setup(spi);
- if (r < 0) {
- dev_err(&spi->dev, "spi_setup failed: %d\n", r);
- return r;
- }
-
- ddata = devm_kzalloc(&spi->dev, sizeof(*ddata), GFP_KERNEL);
- if (ddata == NULL)
- return -ENOMEM;
-
- dev_set_drvdata(&spi->dev, ddata);
-
- ddata->spi = spi;
-
- ddata->mode = TPO_R02_MODE_800x480;
- memcpy(ddata->gamma, tpo_td043_def_gamma, sizeof(ddata->gamma));
-
- ddata->vcc_reg = devm_regulator_get(&spi->dev, "vcc");
- if (IS_ERR(ddata->vcc_reg)) {
- dev_err(&spi->dev, "failed to get LCD VCC regulator\n");
- return PTR_ERR(ddata->vcc_reg);
- }
-
- gpio = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_HIGH);
- if (IS_ERR(gpio)) {
- dev_err(&spi->dev, "failed to get reset gpio\n");
- return PTR_ERR(gpio);
- }
-
- ddata->reset_gpio = gpio;
-
- r = sysfs_create_group(&spi->dev.kobj, &tpo_td043_attr_group);
- if (r) {
- dev_err(&spi->dev, "failed to create sysfs files\n");
- return r;
- }
-
- ddata->vm = tpo_td043_vm;
-
- dssdev = &ddata->dssdev;
- dssdev->dev = &spi->dev;
- dssdev->ops = &tpo_td043_ops;
- dssdev->type = OMAP_DISPLAY_TYPE_DPI;
- dssdev->display = true;
- dssdev->owner = THIS_MODULE;
- dssdev->of_ports = BIT(0);
- dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES;
-
- /*
- * Note: According to the panel documentation:
- * SYNC needs to be driven on the FALLING edge
- */
- dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH
- | DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE
- | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE;
-
- omapdss_display_init(dssdev);
- omapdss_device_register(dssdev);
-
- return 0;
-}
-
-static int tpo_td043_remove(struct spi_device *spi)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
- struct omap_dss_device *dssdev = &ddata->dssdev;
-
- dev_dbg(&ddata->spi->dev, "%s\n", __func__);
-
- omapdss_device_unregister(dssdev);
-
- if (omapdss_device_is_enabled(dssdev))
- tpo_td043_disable(dssdev);
-
- sysfs_remove_group(&spi->dev.kobj, &tpo_td043_attr_group);
-
- return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int tpo_td043_spi_suspend(struct device *dev)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(dev);
-
- dev_dbg(dev, "tpo_td043_spi_suspend, tpo %p\n", ddata);
-
- ddata->power_on_resume = ddata->powered_on;
- tpo_td043_power_off(ddata);
- ddata->spi_suspended = 1;
-
- return 0;
-}
-
-static int tpo_td043_spi_resume(struct device *dev)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(dev);
- int ret;
-
- dev_dbg(dev, "tpo_td043_spi_resume\n");
-
- if (ddata->power_on_resume) {
- ret = tpo_td043_power_on(ddata);
- if (ret)
- return ret;
- }
- ddata->spi_suspended = 0;
-
- return 0;
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(tpo_td043_spi_pm,
- tpo_td043_spi_suspend, tpo_td043_spi_resume);
-
-static const struct of_device_id tpo_td043_of_match[] = {
- { .compatible = "omapdss,tpo,td043mtea1", },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, tpo_td043_of_match);
-
-static struct spi_driver tpo_td043_spi_driver = {
- .driver = {
- .name = "panel-tpo-td043mtea1",
- .pm = &tpo_td043_spi_pm,
- .of_match_table = tpo_td043_of_match,
- .suppress_bind_attrs = true,
- },
- .probe = tpo_td043_probe,
- .remove = tpo_td043_remove,
-};
-
-module_spi_driver(tpo_td043_spi_driver);
-
-MODULE_ALIAS("spi:tpo,td043mtea1");
-MODULE_AUTHOR("GraĹľvydas Ignotas <notasas@gmail.com>");
-MODULE_DESCRIPTION("TPO TD043MTEA1 LCD Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/omapdrm/dss/Kconfig b/drivers/gpu/drm/omapdrm/dss/Kconfig
index 956f23e1452d..72ae79c0c9b4 100644
--- a/drivers/gpu/drm/omapdrm/dss/Kconfig
+++ b/drivers/gpu/drm/omapdrm/dss/Kconfig
@@ -6,12 +6,12 @@ config OMAP_DSS_BASE
tristate
menuconfig OMAP2_DSS
- tristate "OMAP2+ Display Subsystem support"
+ tristate "OMAP2+ Display Subsystem support"
select OMAP_DSS_BASE
select VIDEOMODE_HELPERS
select OMAP2_DSS_INIT
select HDMI
- help
+ help
OMAP2+ Display Subsystem support.
if OMAP2_DSS
@@ -52,7 +52,7 @@ config OMAP2_DSS_DPI
config OMAP2_DSS_VENC
bool "VENC support"
- default y
+ default y
help
OMAP Video Encoder support for S-Video and composite TV-out.
@@ -61,7 +61,7 @@ config OMAP2_DSS_HDMI_COMMON
config OMAP4_DSS_HDMI
bool "HDMI support for OMAP4"
- default y
+ default y
select OMAP2_DSS_HDMI_COMMON
help
HDMI support for OMAP4 based SoCs.
@@ -85,7 +85,7 @@ config OMAP5_DSS_HDMI
config OMAP2_DSS_SDI
bool "SDI support"
- default n
+ default n
help
SDI (Serial Display Interface) support.
@@ -94,7 +94,7 @@ config OMAP2_DSS_SDI
config OMAP2_DSS_DSI
bool "DSI support"
- default n
+ default n
help
MIPI DSI (Display Serial Interface) support.
diff --git a/drivers/gpu/drm/omapdrm/dss/Makefile b/drivers/gpu/drm/omapdrm/dss/Makefile
index 904101c5e79d..5950c3f52c2e 100644
--- a/drivers/gpu/drm/omapdrm/dss/Makefile
+++ b/drivers/gpu/drm/omapdrm/dss/Makefile
@@ -6,7 +6,7 @@ omapdss-base-y := base.o display.o dss-of.o output.o
obj-$(CONFIG_OMAP2_DSS) += omapdss.o
# Core DSS files
-omapdss-y := core.o dss.o dispc.o dispc_coefs.o \
+omapdss-y := dss.o dispc.o dispc_coefs.o \
pll.o video-pll.o
omapdss-$(CONFIG_OMAP2_DSS_DPI) += dpi.o
omapdss-$(CONFIG_OMAP2_DSS_VENC) += venc.o
diff --git a/drivers/gpu/drm/omapdrm/dss/core.c b/drivers/gpu/drm/omapdrm/dss/core.c
deleted file mode 100644
index 6ac497b63711..000000000000
--- a/drivers/gpu/drm/omapdrm/dss/core.c
+++ /dev/null
@@ -1,55 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2009 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
- *
- * Some code and ideas taken from drivers/video/omap/ driver
- * by Imre Deak.
- */
-
-#define DSS_SUBSYS_NAME "CORE"
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-
-#include "omapdss.h"
-#include "dss.h"
-
-/* INIT */
-static struct platform_driver * const omap_dss_drivers[] = {
- &omap_dsshw_driver,
- &omap_dispchw_driver,
-#ifdef CONFIG_OMAP2_DSS_DSI
- &omap_dsihw_driver,
-#endif
-#ifdef CONFIG_OMAP2_DSS_VENC
- &omap_venchw_driver,
-#endif
-#ifdef CONFIG_OMAP4_DSS_HDMI
- &omapdss_hdmi4hw_driver,
-#endif
-#ifdef CONFIG_OMAP5_DSS_HDMI
- &omapdss_hdmi5hw_driver,
-#endif
-};
-
-static int __init omap_dss_init(void)
-{
- return platform_register_drivers(omap_dss_drivers,
- ARRAY_SIZE(omap_dss_drivers));
-}
-
-static void __exit omap_dss_exit(void)
-{
- platform_unregister_drivers(omap_dss_drivers,
- ARRAY_SIZE(omap_dss_drivers));
-}
-
-module_init(omap_dss_init);
-module_exit(omap_dss_exit);
-
-MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
-MODULE_DESCRIPTION("OMAP2/3 Display Subsystem");
-MODULE_LICENSE("GPL v2");
-
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c
index 785c5546067a..dbb90f2d2ccd 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.c
@@ -114,6 +114,7 @@ struct dispc_features {
const unsigned int num_reg_fields;
const enum omap_overlay_caps *overlay_caps;
const u32 **supported_color_modes;
+ const u32 *supported_scaler_color_modes;
unsigned int num_mgrs;
unsigned int num_ovls;
unsigned int buffer_size_unit;
@@ -184,9 +185,6 @@ struct dispc_device {
struct regmap *syscon_pol;
u32 syscon_pol_offset;
-
- /* DISPC_CONTROL & DISPC_CONFIG lock*/
- spinlock_t control_lock;
};
enum omap_color_component {
@@ -368,25 +366,17 @@ static inline u32 dispc_read_reg(struct dispc_device *dispc, u16 idx)
static u32 mgr_fld_read(struct dispc_device *dispc, enum omap_channel channel,
enum mgr_reg_fields regfld)
{
- const struct dispc_reg_field rfld = mgr_desc[channel].reg_desc[regfld];
+ const struct dispc_reg_field *rfld = &mgr_desc[channel].reg_desc[regfld];
- return REG_GET(dispc, rfld.reg, rfld.high, rfld.low);
+ return REG_GET(dispc, rfld->reg, rfld->high, rfld->low);
}
static void mgr_fld_write(struct dispc_device *dispc, enum omap_channel channel,
enum mgr_reg_fields regfld, int val)
{
- const struct dispc_reg_field rfld = mgr_desc[channel].reg_desc[regfld];
- const bool need_lock = rfld.reg == DISPC_CONTROL || rfld.reg == DISPC_CONFIG;
- unsigned long flags;
+ const struct dispc_reg_field *rfld = &mgr_desc[channel].reg_desc[regfld];
- if (need_lock) {
- spin_lock_irqsave(&dispc->control_lock, flags);
- REG_FLD_MOD(dispc, rfld.reg, val, rfld.high, rfld.low);
- spin_unlock_irqrestore(&dispc->control_lock, flags);
- } else {
- REG_FLD_MOD(dispc, rfld.reg, val, rfld.high, rfld.low);
- }
+ REG_FLD_MOD(dispc, rfld->reg, val, rfld->high, rfld->low);
}
static int dispc_get_num_ovls(struct dispc_device *dispc)
@@ -403,8 +393,7 @@ static void dispc_get_reg_field(struct dispc_device *dispc,
enum dispc_feat_reg_field id,
u8 *start, u8 *end)
{
- if (id >= dispc->feat->num_reg_fields)
- BUG();
+ BUG_ON(id >= dispc->feat->num_reg_fields);
*start = dispc->feat->reg_fields[id].start;
*end = dispc->feat->reg_fields[id].end;
@@ -2510,6 +2499,19 @@ static int dispc_ovl_calc_scaling(struct dispc_device *dispc,
if (width == out_width && height == out_height)
return 0;
+ if (dispc->feat->supported_scaler_color_modes) {
+ const u32 *modes = dispc->feat->supported_scaler_color_modes;
+ unsigned int i;
+
+ for (i = 0; modes[i]; ++i) {
+ if (modes[i] == fourcc)
+ break;
+ }
+
+ if (modes[i] == 0)
+ return -EINVAL;
+ }
+
if (plane == OMAP_DSS_WB) {
switch (fourcc) {
case DRM_FORMAT_NV12:
@@ -4225,6 +4227,12 @@ static const u32 *omap4_dispc_supported_color_modes[] = {
DRM_FORMAT_RGBX8888),
};
+static const u32 omap3_dispc_supported_scaler_color_modes[] = {
+ DRM_FORMAT_XRGB8888, DRM_FORMAT_RGB565, DRM_FORMAT_YUYV,
+ DRM_FORMAT_UYVY,
+ 0,
+};
+
static const struct dispc_features omap24xx_dispc_feats = {
.sw_start = 5,
.fp_start = 15,
@@ -4253,6 +4261,7 @@ static const struct dispc_features omap24xx_dispc_feats = {
.num_reg_fields = ARRAY_SIZE(omap2_dispc_reg_fields),
.overlay_caps = omap2_dispc_overlay_caps,
.supported_color_modes = omap2_dispc_supported_color_modes,
+ .supported_scaler_color_modes = COLOR_ARRAY(DRM_FORMAT_XRGB8888),
.num_mgrs = 2,
.num_ovls = 3,
.buffer_size_unit = 1,
@@ -4287,6 +4296,7 @@ static const struct dispc_features omap34xx_rev1_0_dispc_feats = {
.num_reg_fields = ARRAY_SIZE(omap3_dispc_reg_fields),
.overlay_caps = omap3430_dispc_overlay_caps,
.supported_color_modes = omap3_dispc_supported_color_modes,
+ .supported_scaler_color_modes = omap3_dispc_supported_scaler_color_modes,
.num_mgrs = 2,
.num_ovls = 3,
.buffer_size_unit = 1,
@@ -4321,6 +4331,7 @@ static const struct dispc_features omap34xx_rev3_0_dispc_feats = {
.num_reg_fields = ARRAY_SIZE(omap3_dispc_reg_fields),
.overlay_caps = omap3430_dispc_overlay_caps,
.supported_color_modes = omap3_dispc_supported_color_modes,
+ .supported_scaler_color_modes = omap3_dispc_supported_scaler_color_modes,
.num_mgrs = 2,
.num_ovls = 3,
.buffer_size_unit = 1,
@@ -4355,6 +4366,7 @@ static const struct dispc_features omap36xx_dispc_feats = {
.num_reg_fields = ARRAY_SIZE(omap3_dispc_reg_fields),
.overlay_caps = omap3630_dispc_overlay_caps,
.supported_color_modes = omap3_dispc_supported_color_modes,
+ .supported_scaler_color_modes = omap3_dispc_supported_scaler_color_modes,
.num_mgrs = 2,
.num_ovls = 3,
.buffer_size_unit = 1,
@@ -4389,6 +4401,7 @@ static const struct dispc_features am43xx_dispc_feats = {
.num_reg_fields = ARRAY_SIZE(omap3_dispc_reg_fields),
.overlay_caps = omap3430_dispc_overlay_caps,
.supported_color_modes = omap3_dispc_supported_color_modes,
+ .supported_scaler_color_modes = omap3_dispc_supported_scaler_color_modes,
.num_mgrs = 1,
.num_ovls = 3,
.buffer_size_unit = 1,
@@ -4609,11 +4622,10 @@ static int dispc_errata_i734_wa_init(struct dispc_device *dispc)
i734_buf.size = i734.ovli.width * i734.ovli.height *
color_mode_to_bpp(i734.ovli.fourcc) / 8;
- i734_buf.vaddr = dma_alloc_writecombine(&dispc->pdev->dev,
- i734_buf.size, &i734_buf.paddr,
- GFP_KERNEL);
+ i734_buf.vaddr = dma_alloc_wc(&dispc->pdev->dev, i734_buf.size,
+ &i734_buf.paddr, GFP_KERNEL);
if (!i734_buf.vaddr) {
- dev_err(&dispc->pdev->dev, "%s: dma_alloc_writecombine failed\n",
+ dev_err(&dispc->pdev->dev, "%s: dma_alloc_wc failed\n",
__func__);
return -ENOMEM;
}
@@ -4626,8 +4638,8 @@ static void dispc_errata_i734_wa_fini(struct dispc_device *dispc)
if (!dispc->feat->has_gamma_i734_bug)
return;
- dma_free_writecombine(&dispc->pdev->dev, i734_buf.size, i734_buf.vaddr,
- i734_buf.paddr);
+ dma_free_wc(&dispc->pdev->dev, i734_buf.size, i734_buf.vaddr,
+ i734_buf.paddr);
}
static void dispc_errata_i734_wa(struct dispc_device *dispc)
@@ -4769,8 +4781,6 @@ static int dispc_bind(struct device *dev, struct device *master, void *data)
platform_set_drvdata(pdev, dispc);
dispc->dss = dss;
- spin_lock_init(&dispc->control_lock);
-
/*
* The OMAP3-based models can't be told apart using the compatible
* string, use SoC device matching.
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index b30fcaa2d0f5..da16ea095f13 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -3548,7 +3548,7 @@ static int dsi_proto_config(struct dsi_data *dsi)
static void dsi_proto_timings(struct dsi_data *dsi)
{
- unsigned int tlpx, tclk_zero, tclk_prepare, tclk_trail;
+ unsigned int tlpx, tclk_zero, tclk_prepare;
unsigned int tclk_pre, tclk_post;
unsigned int ths_prepare, ths_prepare_ths_zero, ths_zero;
unsigned int ths_trail, ths_exit;
@@ -3567,7 +3567,6 @@ static void dsi_proto_timings(struct dsi_data *dsi)
r = dsi_read_reg(dsi, DSI_DSIPHY_CFG1);
tlpx = FLD_GET(r, 20, 16) * 2;
- tclk_trail = FLD_GET(r, 15, 8);
tclk_zero = FLD_GET(r, 7, 0);
r = dsi_read_reg(dsi, DSI_DSIPHY_CFG2);
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
index 5711b7a720e6..225ec808b01a 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -923,7 +923,6 @@ dss_debugfs_create_file(struct dss_device *dss, const char *name,
void *data)
{
struct dss_debugfs_entry *entry;
- struct dentry *d;
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
@@ -931,15 +930,9 @@ dss_debugfs_create_file(struct dss_device *dss, const char *name,
entry->show_fn = show_fn;
entry->data = data;
+ entry->dentry = debugfs_create_file(name, 0444, dss->debugfs.root,
+ entry, &dss_debug_fops);
- d = debugfs_create_file(name, 0444, dss->debugfs.root, entry,
- &dss_debug_fops);
- if (IS_ERR(d)) {
- kfree(entry);
- return ERR_CAST(d);
- }
-
- entry->dentry = d;
return entry;
}
@@ -1090,7 +1083,7 @@ static const struct dss_features omap34xx_dss_feats = {
static const struct dss_features omap3630_dss_feats = {
.model = DSS_MODEL_OMAP3,
- .fck_div_max = 32,
+ .fck_div_max = 31,
.fck_freq_max = 173000000,
.dss_fck_multiplier = 1,
.parent_clk_name = "dpll4_ck",
@@ -1605,3 +1598,40 @@ struct platform_driver omap_dsshw_driver = {
.suppress_bind_attrs = true,
},
};
+
+/* INIT */
+static struct platform_driver * const omap_dss_drivers[] = {
+ &omap_dsshw_driver,
+ &omap_dispchw_driver,
+#ifdef CONFIG_OMAP2_DSS_DSI
+ &omap_dsihw_driver,
+#endif
+#ifdef CONFIG_OMAP2_DSS_VENC
+ &omap_venchw_driver,
+#endif
+#ifdef CONFIG_OMAP4_DSS_HDMI
+ &omapdss_hdmi4hw_driver,
+#endif
+#ifdef CONFIG_OMAP5_DSS_HDMI
+ &omapdss_hdmi5hw_driver,
+#endif
+};
+
+static int __init omap_dss_init(void)
+{
+ return platform_register_drivers(omap_dss_drivers,
+ ARRAY_SIZE(omap_dss_drivers));
+}
+
+static void __exit omap_dss_exit(void)
+{
+ platform_unregister_drivers(omap_dss_drivers,
+ ARRAY_SIZE(omap_dss_drivers));
+}
+
+module_init(omap_dss_init);
+module_exit(omap_dss_exit);
+
+MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
+MODULE_DESCRIPTION("OMAP2/3/4/5 Display Subsystem");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
index 5d5d5588ebc1..ea5d5c228534 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
@@ -542,8 +542,9 @@ static void hdmi_core_audio_config(struct hdmi_core_data *core,
}
/* Set ACR clock divisor */
- REG_FLD_MOD(av_base,
- HDMI_CORE_AV_FREQ_SVAL, cfg->mclk_mode, 2, 0);
+ if (cfg->use_mclk)
+ REG_FLD_MOD(av_base, HDMI_CORE_AV_FREQ_SVAL,
+ cfg->mclk_mode, 2, 0);
r = hdmi_read_reg(av_base, HDMI_CORE_AV_ACR_CTRL);
/*
@@ -675,7 +676,7 @@ int hdmi4_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
struct hdmi_audio_format audio_format;
struct hdmi_audio_dma audio_dma;
struct hdmi_core_audio_config acore;
- int err, n, cts, channel_count;
+ int n, cts, channel_count;
unsigned int fs_nr;
bool word_length_16b = false;
@@ -737,7 +738,7 @@ int hdmi4_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
return -EINVAL;
}
- err = hdmi_compute_acr(pclk, fs_nr, &n, &cts);
+ hdmi_compute_acr(pclk, fs_nr, &n, &cts);
/* Audio clock regeneration settings */
acore.n = n;
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
index 7400fb99d453..ff4d35c8771f 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
@@ -23,24 +23,12 @@
#include "hdmi5_core.h"
-/* only 24 bit color depth used for now */
-static const struct csc_table csc_table_deepcolor[] = {
- /* HDMI_DEEP_COLOR_24BIT */
- [0] = { 7036, 0, 0, 32, 0, 7036, 0, 32, 0, 0, 7036, 32, },
- /* HDMI_DEEP_COLOR_30BIT */
- [1] = { 7015, 0, 0, 128, 0, 7015, 0, 128, 0, 0, 7015, 128, },
- /* HDMI_DEEP_COLOR_36BIT */
- [2] = { 7010, 0, 0, 512, 0, 7010, 0, 512, 0, 0, 7010, 512, },
- /* FULL RANGE */
- [3] = { 8192, 0, 0, 0, 0, 8192, 0, 0, 0, 0, 8192, 0, },
-};
-
static void hdmi_core_ddc_init(struct hdmi_core_data *core)
{
void __iomem *base = core->base;
const unsigned long long iclk = 266000000; /* DSS L3 ICLK */
- const unsigned int ss_scl_high = 4600; /* ns */
- const unsigned int ss_scl_low = 5400; /* ns */
+ const unsigned int ss_scl_high = 4700; /* ns */
+ const unsigned int ss_scl_low = 5500; /* ns */
const unsigned int fs_scl_high = 600; /* ns */
const unsigned int fs_scl_low = 1300; /* ns */
const unsigned int sda_hold = 1000; /* ns */
@@ -397,14 +385,6 @@ static void hdmi_core_config_video_packetizer(struct hdmi_core_data *core)
REG_FLD_MOD(base, HDMI_CORE_VP_CONF, clr_depth ? 0 : 2, 1, 0);
}
-static void hdmi_core_config_csc(struct hdmi_core_data *core)
-{
- int clr_depth = 0; /* 24 bit color depth */
-
- /* CSC_COLORDEPTH */
- REG_FLD_MOD(core->base, HDMI_CORE_CSC_SCALE, clr_depth, 7, 4);
-}
-
static void hdmi_core_config_video_sampler(struct hdmi_core_data *core)
{
int video_mapping = 1; /* for 24 bit color depth */
@@ -469,47 +449,67 @@ static void hdmi_core_write_avi_infoframe(struct hdmi_core_data *core,
REG_FLD_MOD(base, HDMI_CORE_FC_PRCONF, pr, 3, 0);
}
-static void hdmi_core_csc_config(struct hdmi_core_data *core,
- struct csc_table csc_coeff)
+static void hdmi_core_write_csc(struct hdmi_core_data *core,
+ const struct csc_table *csc_coeff)
{
void __iomem *base = core->base;
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A1_MSB, csc_coeff.a1 >> 8 , 6, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A1_LSB, csc_coeff.a1, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A2_MSB, csc_coeff.a2 >> 8, 6, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A2_LSB, csc_coeff.a2, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A3_MSB, csc_coeff.a3 >> 8, 6, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A3_LSB, csc_coeff.a3, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A4_MSB, csc_coeff.a4 >> 8, 6, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A4_LSB, csc_coeff.a4, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B1_MSB, csc_coeff.b1 >> 8, 6, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B1_LSB, csc_coeff.b1, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B2_MSB, csc_coeff.b2 >> 8, 6, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B2_LSB, csc_coeff.b2, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B3_MSB, csc_coeff.b3 >> 8, 6, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B3_LSB, csc_coeff.b3, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B4_MSB, csc_coeff.b4 >> 8, 6, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B4_LSB, csc_coeff.b4, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C1_MSB, csc_coeff.c1 >> 8, 6, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C1_LSB, csc_coeff.c1, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C2_MSB, csc_coeff.c2 >> 8, 6, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C2_LSB, csc_coeff.c2, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C3_MSB, csc_coeff.c3 >> 8, 6, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C3_LSB, csc_coeff.c3, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C4_MSB, csc_coeff.c4 >> 8, 6, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C4_LSB, csc_coeff.c4, 7, 0);
-
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A1_MSB, csc_coeff->a1 >> 8, 6, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A1_LSB, csc_coeff->a1, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A2_MSB, csc_coeff->a2 >> 8, 6, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A2_LSB, csc_coeff->a2, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A3_MSB, csc_coeff->a3 >> 8, 6, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A3_LSB, csc_coeff->a3, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A4_MSB, csc_coeff->a4 >> 8, 6, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A4_LSB, csc_coeff->a4, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B1_MSB, csc_coeff->b1 >> 8, 6, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B1_LSB, csc_coeff->b1, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B2_MSB, csc_coeff->b2 >> 8, 6, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B2_LSB, csc_coeff->b2, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B3_MSB, csc_coeff->b3 >> 8, 6, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B3_LSB, csc_coeff->b3, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B4_MSB, csc_coeff->b4 >> 8, 6, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B4_LSB, csc_coeff->b4, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C1_MSB, csc_coeff->c1 >> 8, 6, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C1_LSB, csc_coeff->c1, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C2_MSB, csc_coeff->c2 >> 8, 6, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C2_LSB, csc_coeff->c2, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C3_MSB, csc_coeff->c3 >> 8, 6, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C3_LSB, csc_coeff->c3, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C4_MSB, csc_coeff->c4 >> 8, 6, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C4_LSB, csc_coeff->c4, 7, 0);
+
+ /* enable CSC */
REG_FLD_MOD(base, HDMI_CORE_MC_FLOWCTRL, 0x1, 0, 0);
}
-static void hdmi_core_configure_range(struct hdmi_core_data *core)
+static void hdmi_core_configure_range(struct hdmi_core_data *core,
+ enum hdmi_quantization_range range)
{
- struct csc_table csc_coeff = { 0 };
+ static const struct csc_table csc_limited_range = {
+ 7036, 0, 0, 32, 0, 7036, 0, 32, 0, 0, 7036, 32
+ };
+ static const struct csc_table csc_full_range = {
+ 8192, 0, 0, 0, 0, 8192, 0, 0, 0, 0, 8192, 0
+ };
+ const struct csc_table *csc_coeff;
+
+ /* CSC_COLORDEPTH = 24 bits*/
+ REG_FLD_MOD(core->base, HDMI_CORE_CSC_SCALE, 0, 7, 4);
+
+ switch (range) {
+ case HDMI_QUANTIZATION_RANGE_FULL:
+ csc_coeff = &csc_full_range;
+ break;
- /* support limited range with 24 bit color depth for now */
- csc_coeff = csc_table_deepcolor[0];
+ case HDMI_QUANTIZATION_RANGE_DEFAULT:
+ case HDMI_QUANTIZATION_RANGE_LIMITED:
+ default:
+ csc_coeff = &csc_limited_range;
+ break;
+ }
- hdmi_core_csc_config(core, csc_coeff);
+ hdmi_core_write_csc(core, csc_coeff);
}
static void hdmi_core_enable_video_path(struct hdmi_core_data *core)
@@ -600,9 +600,20 @@ void hdmi5_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
struct videomode vm;
struct hdmi_video_format video_format;
struct hdmi_core_vid_config v_core_cfg;
+ enum hdmi_quantization_range range;
hdmi_core_mask_interrupts(core);
+ if (cfg->hdmi_dvi_mode == HDMI_HDMI) {
+ char vic = cfg->infoframe.video_code;
+
+ /* All CEA modes other than VIC 1 use limited quantization range. */
+ range = vic > 1 ? HDMI_QUANTIZATION_RANGE_LIMITED :
+ HDMI_QUANTIZATION_RANGE_FULL;
+ } else {
+ range = HDMI_QUANTIZATION_RANGE_FULL;
+ }
+
hdmi_core_init(&v_core_cfg, cfg);
hdmi_wp_init_vid_fmt_timings(&video_format, &vm, cfg);
@@ -616,9 +627,8 @@ void hdmi5_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
hdmi_wp_video_config_interface(wp, &vm);
- /* support limited range with 24 bit color depth for now */
- hdmi_core_configure_range(core);
- cfg->infoframe.quantization_range = HDMI_QUANTIZATION_RANGE_LIMITED;
+ hdmi_core_configure_range(core, range);
+ cfg->infoframe.quantization_range = range;
/*
* configure core video part, set software reset in the core
@@ -628,7 +638,6 @@ void hdmi5_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
hdmi_core_video_config(core, &v_core_cfg);
hdmi_core_config_video_packetizer(core);
- hdmi_core_config_csc(core);
hdmi_core_config_video_sampler(core);
if (cfg->hdmi_dvi_mode == HDMI_HDMI)
@@ -798,7 +807,7 @@ int hdmi5_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
struct hdmi_audio_format audio_format;
struct hdmi_audio_dma audio_dma;
struct hdmi_core_audio_config core_cfg;
- int err, n, cts, channel_count;
+ int n, cts, channel_count;
unsigned int fs_nr;
bool word_length_16b = false;
@@ -841,7 +850,7 @@ int hdmi5_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
return -EINVAL;
}
- err = hdmi_compute_acr(pclk, fs_nr, &n, &cts);
+ hdmi_compute_acr(pclk, fs_nr, &n, &cts);
core_cfg.n = n;
core_cfg.cts = cts;
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
index a140de79c50e..31502857f013 100644
--- a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
+++ b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
@@ -176,17 +176,10 @@ static const struct of_device_id omapdss_of_match[] __initconst = {
static const struct of_device_id omapdss_of_fixups_whitelist[] __initconst = {
{ .compatible = "composite-video-connector" },
{ .compatible = "hdmi-connector" },
- { .compatible = "lgphilips,lb035q02" },
- { .compatible = "nec,nl8048hl11" },
{ .compatible = "panel-dsi-cm" },
- { .compatible = "sharp,ls037v7dw01" },
- { .compatible = "sony,acx565akm" },
{ .compatible = "svideo-connector" },
{ .compatible = "ti,opa362" },
{ .compatible = "ti,tpd12s015" },
- { .compatible = "toppoly,td028ttec1" },
- { .compatible = "tpo,td028ttec1" },
- { .compatible = "tpo,td043mtea1" },
{},
};
diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c
index de0f882f0f7b..0693d34fca1b 100644
--- a/drivers/gpu/drm/omapdrm/dss/output.c
+++ b/drivers/gpu/drm/omapdrm/dss/output.c
@@ -4,6 +4,7 @@
* Author: Archit Taneja <archit@ti.com>
*/
+#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -11,6 +12,7 @@
#include <linux/of.h>
#include <linux/of_graph.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_panel.h>
#include "dss.h"
@@ -20,7 +22,8 @@ int omapdss_device_init_output(struct omap_dss_device *out)
{
struct device_node *remote_node;
- remote_node = of_graph_get_remote_node(out->dev->of_node, 0, 0);
+ remote_node = of_graph_get_remote_node(out->dev->of_node,
+ ffs(out->of_ports) - 1, 0);
if (!remote_node) {
dev_dbg(out->dev, "failed to find video sink\n");
return 0;
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
index 5b8799c69f68..94cded387174 100644
--- a/drivers/gpu/drm/omapdrm/omap_connector.c
+++ b/drivers/gpu/drm/omapdrm/omap_connector.c
@@ -229,7 +229,8 @@ static int omap_connector_get_modes(struct drm_connector *connector)
* operation to the panel API.
*/
if (omap_connector->output->panel)
- return drm_panel_get_modes(omap_connector->output->panel);
+ return drm_panel_get_modes(omap_connector->output->panel,
+ connector);
/*
* We can't retrieve modes, which can happen for instance for a DVI or
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index f9ac9afc5641..3c5ddbf30e97 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -4,12 +4,14 @@
* Author: Rob Clark <rob@ti.com>
*/
+#include <linux/math64.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_mode.h>
#include <drm/drm_plane_helper.h>
-#include <linux/math64.h>
+#include <drm/drm_vblank.h>
#include "omap_drv.h"
diff --git a/drivers/gpu/drm/omapdrm/omap_debugfs.c b/drivers/gpu/drm/omapdrm/omap_debugfs.c
index 2b283f68fab7..34dfb33145b4 100644
--- a/drivers/gpu/drm/omapdrm/omap_debugfs.c
+++ b/drivers/gpu/drm/omapdrm/omap_debugfs.c
@@ -7,6 +7,8 @@
#include <linux/seq_file.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_debugfs.h>
+#include <drm/drm_file.h>
#include <drm/drm_fb_helper.h>
#include "omap_drv.h"
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h
index 835e6654fa82..43c1d096b021 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h
@@ -113,7 +113,7 @@ extern struct platform_driver omap_dmm_driver;
/* GEM bo flags -> tiler fmt */
static inline enum tiler_fmt gem2fmt(u32 flags)
{
- switch (flags & OMAP_BO_TILED) {
+ switch (flags & OMAP_BO_TILED_MASK) {
case OMAP_BO_TILED_8:
return TILFMT_8BIT;
case OMAP_BO_TILED_16:
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 288c59dae56a..d2750f60f519 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -4,15 +4,22 @@
* Author: Rob Clark <rob@ti.com>
*/
-#include <linux/of.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
#include <linux/sort.h>
#include <linux/sys_soc.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_probe_helper.h>
+#include <drm/drm_file.h>
+#include <drm/drm_ioctl.h>
#include <drm/drm_panel.h>
+#include <drm/drm_prime.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "omap_dmm_tiler.h"
#include "omap_drv.h"
@@ -210,8 +217,8 @@ static int omap_display_id(struct omap_dss_device *output)
} else if (output->bridge) {
struct drm_bridge *bridge = output->bridge;
- while (bridge->next)
- bridge = bridge->next;
+ while (drm_bridge_get_next_bridge(bridge))
+ bridge = drm_bridge_get_next_bridge(bridge);
node = bridge->of_node;
} else if (output->panel) {
@@ -466,19 +473,19 @@ static int ioctl_gem_info(struct drm_device *dev, void *data,
static const struct drm_ioctl_desc ioctls[DRM_COMMAND_END - DRM_COMMAND_BASE] = {
DRM_IOCTL_DEF_DRV(OMAP_GET_PARAM, ioctl_get_param,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(OMAP_SET_PARAM, drm_invalid_op,
DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(OMAP_GEM_NEW, ioctl_gem_new,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
/* Deprecated, to be removed. */
DRM_IOCTL_DEF_DRV(OMAP_GEM_CPU_PREP, drm_noop,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
/* Deprecated, to be removed. */
DRM_IOCTL_DEF_DRV(OMAP_GEM_CPU_FINI, drm_noop,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(OMAP_GEM_INFO, ioctl_gem_info,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
};
/*
@@ -513,7 +520,7 @@ static const struct file_operations omapdriver_fops = {
};
static struct drm_driver omap_drm_driver = {
- .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
+ .driver_features = DRIVER_MODESET | DRIVER_GEM |
DRIVER_ATOMIC | DRIVER_RENDER,
.open = dev_open,
.lastclose = drm_fb_helper_lastclose,
@@ -669,7 +676,7 @@ static int pdev_probe(struct platform_device *pdev)
if (omapdss_is_initialized() == false)
return -EPROBE_DEFER;
- ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (ret) {
dev_err(&pdev->dev, "Failed to set the DMA mask\n");
return ret;
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 025bd57081d5..7c4b66efcaa7 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -11,12 +11,11 @@
#include <linux/types.h>
#include <linux/workqueue.h>
-#include <drm/drmP.h>
+#include "dss/omapdss.h"
+
#include <drm/drm_gem.h>
#include <drm/omap_drm.h>
-#include "dss/omapdss.h"
-
#include "omap_connector.h"
#include "omap_crtc.h"
#include "omap_encoder.h"
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c
index 6fe14111cd95..4f2165a37795 100644
--- a/drivers/gpu/drm/omapdrm/omap_encoder.c
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.c
@@ -6,6 +6,7 @@
#include <linux/list.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_edid.h>
@@ -125,7 +126,8 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder,
for (dssdev = output; dssdev; dssdev = dssdev->next)
omap_encoder_update_videomode_flags(&vm, dssdev->bus_flags);
- for (bridge = output->bridge; bridge; bridge = bridge->next) {
+ for (bridge = output->bridge; bridge;
+ bridge = drm_bridge_get_next_bridge(bridge)) {
if (!bridge->timings)
continue;
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index 7e89e5cb4068..9aeab81dfb90 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -4,10 +4,10 @@
* Author: Rob Clark <rob@ti.com>
*/
-#include <linux/seq_file.h>
+#include <linux/dma-mapping.h>
-#include <drm/drm_crtc.h>
#include <drm/drm_modeset_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include "omap_dmm_tiler.h"
@@ -95,7 +95,7 @@ static u32 get_linear_addr(struct drm_framebuffer *fb,
bool omap_framebuffer_supports_rotation(struct drm_framebuffer *fb)
{
- return omap_gem_flags(fb->obj[0]) & OMAP_BO_TILED;
+ return omap_gem_flags(fb->obj[0]) & OMAP_BO_TILED_MASK;
}
/* Note: DRM rotates counter-clockwise, TILER & DSS rotates clockwise */
@@ -135,7 +135,6 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
{
struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
const struct drm_format_info *format = omap_fb->format;
- struct plane *plane = &omap_fb->planes[0];
u32 x, y, orient = 0;
info->fourcc = fb->format->format;
@@ -154,7 +153,7 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
x = state->src_x >> 16;
y = state->src_y >> 16;
- if (omap_gem_flags(fb->obj[0]) & OMAP_BO_TILED) {
+ if (omap_gem_flags(fb->obj[0]) & OMAP_BO_TILED_MASK) {
u32 w = state->src_w >> 16;
u32 h = state->src_h >> 16;
@@ -209,10 +208,8 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
info->screen_width /= format->cpp[0];
if (fb->format->format == DRM_FORMAT_NV12) {
- plane = &omap_fb->planes[1];
-
if (info->rotation_type == OMAP_DSS_ROT_TILER) {
- WARN_ON(!(omap_gem_flags(fb->obj[1]) & OMAP_BO_TILED));
+ WARN_ON(!(omap_gem_flags(fb->obj[1]) & OMAP_BO_TILED_MASK));
omap_gem_rotated_dma_addr(fb->obj[1], orient, x/2, y/2,
&info->p_uv_addr);
} else {
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index 561c4812545b..b06e5cbfd03a 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -7,6 +7,8 @@
#include <drm/drm_crtc.h>
#include <drm/drm_util.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_file.h>
+#include <drm/drm_fourcc.h>
#include "omap_drv.h"
@@ -68,7 +70,7 @@ fallback:
return drm_fb_helper_pan_display(var, fbi);
}
-static struct fb_ops omap_fb_ops = {
+static const struct fb_ops omap_fb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
@@ -76,8 +78,6 @@ static struct fb_ops omap_fb_ops = {
.fb_setcmap = drm_fb_helper_setcmap,
.fb_blank = drm_fb_helper_blank,
.fb_pan_display = omap_fbdev_pan_display,
- .fb_debug_enter = drm_fb_helper_debug_enter,
- .fb_debug_leave = drm_fb_helper_debug_leave,
.fb_ioctl = drm_fb_helper_ioctl,
.fb_read = drm_fb_helper_sys_read,
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 37378dbc50d0..d08ae95ecc0a 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -4,11 +4,13 @@
* Author: Rob Clark <rob.clark@linaro.org>
*/
+#include <linux/dma-mapping.h>
#include <linux/seq_file.h>
#include <linux/shmem_fs.h>
#include <linux/spinlock.h>
#include <linux/pfn_t.h>
+#include <drm/drm_prime.h>
#include <drm/drm_vma_manager.h>
#include "omap_drv.h"
@@ -65,7 +67,7 @@ struct omap_gem_object {
/**
* # of users of dma_addr
*/
- u32 dma_addr_cnt;
+ refcount_t dma_addr_cnt;
/**
* If the buffer has been imported from a dmabuf the OMAP_DB_DMABUF flag
@@ -194,7 +196,7 @@ static void omap_gem_evict(struct drm_gem_object *obj)
struct omap_gem_object *omap_obj = to_omap_bo(obj);
struct omap_drm_private *priv = obj->dev->dev_private;
- if (omap_obj->flags & OMAP_BO_TILED) {
+ if (omap_obj->flags & OMAP_BO_TILED_MASK) {
enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
int i;
@@ -322,7 +324,7 @@ size_t omap_gem_mmap_size(struct drm_gem_object *obj)
struct omap_gem_object *omap_obj = to_omap_bo(obj);
size_t size = obj->size;
- if (omap_obj->flags & OMAP_BO_TILED) {
+ if (omap_obj->flags & OMAP_BO_TILED_MASK) {
/* for tiled buffers, the virtual size has stride rounded up
* to 4kb.. (to hide the fact that row n+1 might start 16kb or
* 32kb later!). But we don't back the entire buffer with
@@ -511,7 +513,7 @@ vm_fault_t omap_gem_fault(struct vm_fault *vmf)
* probably trigger put_pages()?
*/
- if (omap_obj->flags & OMAP_BO_TILED)
+ if (omap_obj->flags & OMAP_BO_TILED_MASK)
ret = omap_gem_fault_2d(obj, vma, vmf);
else
ret = omap_gem_fault_1d(obj, vma, vmf);
@@ -771,18 +773,20 @@ int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
mutex_lock(&omap_obj->lock);
if (!omap_gem_is_contiguous(omap_obj) && priv->has_dmm) {
- if (omap_obj->dma_addr_cnt == 0) {
+ if (refcount_read(&omap_obj->dma_addr_cnt) == 0) {
u32 npages = obj->size >> PAGE_SHIFT;
enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
struct tiler_block *block;
BUG_ON(omap_obj->block);
+ refcount_set(&omap_obj->dma_addr_cnt, 1);
+
ret = omap_gem_attach_pages(obj);
if (ret)
goto fail;
- if (omap_obj->flags & OMAP_BO_TILED) {
+ if (omap_obj->flags & OMAP_BO_TILED_MASK) {
block = tiler_reserve_2d(fmt,
omap_obj->width,
omap_obj->height, 0);
@@ -811,13 +815,15 @@ int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
omap_obj->block = block;
DBG("got dma address: %pad", &omap_obj->dma_addr);
+ } else {
+ refcount_inc(&omap_obj->dma_addr_cnt);
}
- omap_obj->dma_addr_cnt++;
-
- *dma_addr = omap_obj->dma_addr;
+ if (dma_addr)
+ *dma_addr = omap_obj->dma_addr;
} else if (omap_gem_is_contiguous(omap_obj)) {
- *dma_addr = omap_obj->dma_addr;
+ if (dma_addr)
+ *dma_addr = omap_obj->dma_addr;
} else {
ret = -EINVAL;
goto fail;
@@ -830,38 +836,50 @@ fail:
}
/**
- * omap_gem_unpin() - Unpin a GEM object from memory
+ * omap_gem_unpin_locked() - Unpin a GEM object from memory
* @obj: the GEM object
*
- * Unpin the given GEM object previously pinned with omap_gem_pin(). Pins are
- * reference-counted, the actualy unpin will only be performed when the number
- * of calls to this function matches the number of calls to omap_gem_pin().
+ * omap_gem_unpin() without locking.
*/
-void omap_gem_unpin(struct drm_gem_object *obj)
+static void omap_gem_unpin_locked(struct drm_gem_object *obj)
{
+ struct omap_drm_private *priv = obj->dev->dev_private;
struct omap_gem_object *omap_obj = to_omap_bo(obj);
int ret;
- mutex_lock(&omap_obj->lock);
+ if (omap_gem_is_contiguous(omap_obj) || !priv->has_dmm)
+ return;
- if (omap_obj->dma_addr_cnt > 0) {
- omap_obj->dma_addr_cnt--;
- if (omap_obj->dma_addr_cnt == 0) {
- ret = tiler_unpin(omap_obj->block);
- if (ret) {
- dev_err(obj->dev->dev,
- "could not unpin pages: %d\n", ret);
- }
- ret = tiler_release(omap_obj->block);
- if (ret) {
- dev_err(obj->dev->dev,
- "could not release unmap: %d\n", ret);
- }
- omap_obj->dma_addr = 0;
- omap_obj->block = NULL;
+ if (refcount_dec_and_test(&omap_obj->dma_addr_cnt)) {
+ ret = tiler_unpin(omap_obj->block);
+ if (ret) {
+ dev_err(obj->dev->dev,
+ "could not unpin pages: %d\n", ret);
}
+ ret = tiler_release(omap_obj->block);
+ if (ret) {
+ dev_err(obj->dev->dev,
+ "could not release unmap: %d\n", ret);
+ }
+ omap_obj->dma_addr = 0;
+ omap_obj->block = NULL;
}
+}
+/**
+ * omap_gem_unpin() - Unpin a GEM object from memory
+ * @obj: the GEM object
+ *
+ * Unpin the given GEM object previously pinned with omap_gem_pin(). Pins are
+ * reference-counted, the actual unpin will only be performed when the number
+ * of calls to this function matches the number of calls to omap_gem_pin().
+ */
+void omap_gem_unpin(struct drm_gem_object *obj)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+
+ mutex_lock(&omap_obj->lock);
+ omap_gem_unpin_locked(obj);
mutex_unlock(&omap_obj->lock);
}
@@ -877,8 +895,8 @@ int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, u32 orient,
mutex_lock(&omap_obj->lock);
- if ((omap_obj->dma_addr_cnt > 0) && omap_obj->block &&
- (omap_obj->flags & OMAP_BO_TILED)) {
+ if ((refcount_read(&omap_obj->dma_addr_cnt) > 0) && omap_obj->block &&
+ (omap_obj->flags & OMAP_BO_TILED_MASK)) {
*dma_addr = tiler_tsptr(omap_obj->block, orient, x, y);
ret = 0;
}
@@ -893,7 +911,7 @@ int omap_gem_tiled_stride(struct drm_gem_object *obj, u32 orient)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
int ret = -EINVAL;
- if (omap_obj->flags & OMAP_BO_TILED)
+ if (omap_obj->flags & OMAP_BO_TILED_MASK)
ret = tiler_stride(gem2fmt(omap_obj->flags), orient);
return ret;
}
@@ -1028,10 +1046,11 @@ void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
omap_obj->flags, obj->name, kref_read(&obj->refcount),
- off, &omap_obj->dma_addr, omap_obj->dma_addr_cnt,
+ off, &omap_obj->dma_addr,
+ refcount_read(&omap_obj->dma_addr_cnt),
omap_obj->vaddr, omap_obj->roll);
- if (omap_obj->flags & OMAP_BO_TILED) {
+ if (omap_obj->flags & OMAP_BO_TILED_MASK) {
seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height);
if (omap_obj->block) {
struct tcm_area *area = &omap_obj->block->area;
@@ -1091,7 +1110,7 @@ void omap_gem_free_object(struct drm_gem_object *obj)
mutex_lock(&omap_obj->lock);
/* The object should not be pinned. */
- WARN_ON(omap_obj->dma_addr_cnt > 0);
+ WARN_ON(refcount_read(&omap_obj->dma_addr_cnt) > 0);
if (omap_obj->pages) {
if (omap_obj->flags & OMAP_BO_MEM_DMABUF)
@@ -1118,6 +1137,38 @@ void omap_gem_free_object(struct drm_gem_object *obj)
kfree(omap_obj);
}
+static bool omap_gem_validate_flags(struct drm_device *dev, u32 flags)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+
+ switch (flags & OMAP_BO_CACHE_MASK) {
+ case OMAP_BO_CACHED:
+ case OMAP_BO_WC:
+ case OMAP_BO_CACHE_MASK:
+ break;
+
+ default:
+ return false;
+ }
+
+ if (flags & OMAP_BO_TILED_MASK) {
+ if (!priv->usergart)
+ return false;
+
+ switch (flags & OMAP_BO_TILED_MASK) {
+ case OMAP_BO_TILED_8:
+ case OMAP_BO_TILED_16:
+ case OMAP_BO_TILED_32:
+ break;
+
+ default:
+ return false;
+ }
+ }
+
+ return true;
+}
+
/* GEM buffer object constructor */
struct drm_gem_object *omap_gem_new(struct drm_device *dev,
union omap_gem_size gsize, u32 flags)
@@ -1129,18 +1180,15 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
size_t size;
int ret;
- /* Validate the flags and compute the memory and cache flags. */
- if (flags & OMAP_BO_TILED) {
- if (!priv->usergart) {
- dev_err(dev->dev, "Tiled buffers require DMM\n");
- return NULL;
- }
+ if (!omap_gem_validate_flags(dev, flags))
+ return NULL;
+ /* Validate the flags and compute the memory and cache flags. */
+ if (flags & OMAP_BO_TILED_MASK) {
/*
* Tiled buffers are always shmem paged backed. When they are
* scanned out, they are remapped into DMM/TILER.
*/
- flags &= ~OMAP_BO_SCANOUT;
flags |= OMAP_BO_MEM_SHMEM;
/*
@@ -1151,9 +1199,8 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
flags |= tiler_get_cpu_cache_flags();
} else if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
/*
- * OMAP_BO_SCANOUT hints that the buffer doesn't need to be
- * tiled. However, to lower the pressure on memory allocation,
- * use contiguous memory only if no TILER is available.
+ * If we don't have DMM, we must allocate scanout buffers
+ * from contiguous DMA memory.
*/
flags |= OMAP_BO_MEM_DMA_API;
} else if (!(flags & OMAP_BO_MEM_DMABUF)) {
@@ -1172,7 +1219,7 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
omap_obj->flags = flags;
mutex_init(&omap_obj->lock);
- if (flags & OMAP_BO_TILED) {
+ if (flags & OMAP_BO_TILED_MASK) {
/*
* For tiled buffers align dimensions to slot boundaries and
* calculate size based on aligned dimensions.
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.h b/drivers/gpu/drm/omapdrm/omap_gem.h
index 31cf345bf8ae..729b7812a815 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.h
+++ b/drivers/gpu/drm/omapdrm/omap_gem.h
@@ -65,8 +65,7 @@ u64 omap_gem_mmap_offset(struct drm_gem_object *obj);
size_t omap_gem_mmap_size(struct drm_gem_object *obj);
/* PRIME Interface */
-struct dma_buf *omap_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *obj, int flags);
+struct dma_buf *omap_gem_prime_export(struct drm_gem_object *obj, int flags);
struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
struct dma_buf *buffer);
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
index 07c0b1b486f7..b319fe7f2371 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -5,6 +5,9 @@
*/
#include <linux/dma-buf.h>
+#include <linux/highmem.h>
+
+#include <drm/drm_prime.h>
#include "omap_drv.h"
@@ -64,7 +67,7 @@ static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer,
{
struct drm_gem_object *obj = buffer->priv;
struct page **pages;
- if (omap_gem_flags(obj) & OMAP_BO_TILED) {
+ if (omap_gem_flags(obj) & OMAP_BO_TILED_MASK) {
/* TODO we would need to pin at least part of the buffer to
* get de-tiled view. For now just reject it.
*/
@@ -82,25 +85,6 @@ static int omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer,
return 0;
}
-static void *omap_gem_dmabuf_kmap(struct dma_buf *buffer,
- unsigned long page_num)
-{
- struct drm_gem_object *obj = buffer->priv;
- struct page **pages;
- omap_gem_get_pages(obj, &pages, false);
- omap_gem_cpu_sync_page(obj, page_num);
- return kmap(pages[page_num]);
-}
-
-static void omap_gem_dmabuf_kunmap(struct dma_buf *buffer,
- unsigned long page_num, void *addr)
-{
- struct drm_gem_object *obj = buffer->priv;
- struct page **pages;
- omap_gem_get_pages(obj, &pages, false);
- kunmap(pages[page_num]);
-}
-
static int omap_gem_dmabuf_mmap(struct dma_buf *buffer,
struct vm_area_struct *vma)
{
@@ -120,13 +104,10 @@ static const struct dma_buf_ops omap_dmabuf_ops = {
.release = drm_gem_dmabuf_release,
.begin_cpu_access = omap_gem_dmabuf_begin_cpu_access,
.end_cpu_access = omap_gem_dmabuf_end_cpu_access,
- .map = omap_gem_dmabuf_kmap,
- .unmap = omap_gem_dmabuf_kunmap,
.mmap = omap_gem_dmabuf_mmap,
};
-struct dma_buf *omap_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *obj, int flags)
+struct dma_buf *omap_gem_prime_export(struct drm_gem_object *obj, int flags)
{
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
@@ -135,7 +116,7 @@ struct dma_buf *omap_gem_prime_export(struct drm_device *dev,
exp_info.flags = flags;
exp_info.priv = obj;
- return drm_gem_dmabuf_export(dev, &exp_info);
+ return drm_gem_dmabuf_export(obj->dev, &exp_info);
}
/* -----------------------------------------------------------------------------
diff --git a/drivers/gpu/drm/omapdrm/omap_irq.c b/drivers/gpu/drm/omapdrm/omap_irq.c
index 726a013e7988..382bcdc72ac0 100644
--- a/drivers/gpu/drm/omapdrm/omap_irq.c
+++ b/drivers/gpu/drm/omapdrm/omap_irq.c
@@ -4,6 +4,8 @@
* Author: Rob Clark <rob.clark@linaro.org>
*/
+#include <drm/drm_vblank.h>
+
#include "omap_drv.h"
struct omap_irq_wait {
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index 84e1be981cfe..73ec99819a3d 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -53,8 +53,12 @@ static void omap_plane_atomic_update(struct drm_plane *plane,
memset(&info, 0, sizeof(info));
info.rotation_type = OMAP_DSS_ROT_NONE;
info.rotation = DRM_MODE_ROTATE_0;
- info.global_alpha = 0xff;
+ info.global_alpha = state->alpha >> 8;
info.zorder = state->normalized_zpos;
+ if (state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI)
+ info.pre_mult_alpha = 1;
+ else
+ info.pre_mult_alpha = 0;
/* update scanout: */
omap_framebuffer_update_scanout(state->fb, state, &info);
@@ -285,6 +289,9 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
omap_plane_install_properties(plane, &plane->base);
drm_plane_create_zpos_property(plane, 0, 0, num_planes - 1);
+ drm_plane_create_alpha_property(plane);
+ drm_plane_create_blend_mode_property(plane, BIT(DRM_MODE_BLEND_PREMULTI) |
+ BIT(DRM_MODE_BLEND_COVERAGE));
return plane;
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index d9d931aa6e26..ae44ac2ec106 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -18,6 +18,17 @@ config DRM_PANEL_ARM_VERSATILE
reference designs. The panel is detected using special registers
in the Versatile family syscon registers.
+config DRM_PANEL_BOE_HIMAX8279D
+ tristate "Boe Himax8279d panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for Boe Himax8279d
+ TFT-LCD modules. The panel has a 1200x1920 resolution and uses
+ 24 bit RGB per pixel. It provides a MIPI DSI interface to
+ the host and has a built-in LED backlight.
+
config DRM_PANEL_LVDS
tristate "Generic LVDS panel driver"
depends on OF
@@ -98,11 +109,30 @@ config DRM_PANEL_KINGDISPLAY_KD097D04
24 bit RGB per pixel. It provides a MIPI DSI interface to
the host and has a built-in LED backlight.
+config DRM_PANEL_LEADTEK_LTK500HD1829
+ tristate "Leadtek LTK500HD1829 panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for Kingdisplay kd097d04
+ TFT-LCD modules. The panel has a 1536x2048 resolution and uses
+ 24 bit RGB per pixel. It provides a MIPI DSI interface to
+ the host and has a built-in LED backlight.
+
config DRM_PANEL_SAMSUNG_LD9040
tristate "Samsung LD9040 RGB/SPI panel"
depends on OF && SPI
select VIDEOMODE_HELPERS
+config DRM_PANEL_LG_LB035Q02
+ tristate "LG LB035Q024573 RGB panel"
+ depends on GPIOLIB && OF && SPI
+ help
+ Say Y here if you want to enable support for the LB035Q02 RGB panel
+ (found on the Gumstix Overo Palo35 board). To compile this driver as
+ a module, choose M here.
+
config DRM_PANEL_LG_LG4573
tristate "LG4573 RGB/SPI panel"
depends on OF && SPI
@@ -111,6 +141,23 @@ config DRM_PANEL_LG_LG4573
Say Y here if you want to enable support for LG4573 RGB panel.
To compile this driver as a module, choose M here.
+config DRM_PANEL_NEC_NL8048HL11
+ tristate "NEC NL8048HL11 RGB panel"
+ depends on GPIOLIB && OF && SPI
+ help
+ Say Y here if you want to enable support for the NEC NL8048HL11 RGB
+ panel (found on the Zoom2/3/3630 SDP boards). To compile this driver
+ as a module, choose M here.
+
+config DRM_PANEL_NOVATEK_NT39016
+ tristate "Novatek NT39016 RGB/SPI panel"
+ depends on OF && SPI
+ depends on BACKLIGHT_CLASS_DEVICE
+ select REGMAP_SPI
+ help
+ Say Y here if you want to enable support for the panels built
+ around the Novatek NT39016 display controller.
+
config DRM_PANEL_OLIMEX_LCD_OLINUXINO
tristate "Olimex LCD-OLinuXino panel"
depends on OF
@@ -159,6 +206,15 @@ config DRM_PANEL_RASPBERRYPI_TOUCHSCREEN
Pi 7" Touchscreen. To compile this driver as a module,
choose M here.
+config DRM_PANEL_RAYDIUM_RM67191
+ tristate "Raydium RM67191 FHD 1080x1920 DSI video mode panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for Raydium RM67191 FHD
+ (1080x1920) DSI panel.
+
config DRM_PANEL_RAYDIUM_RM68200
tristate "Raydium RM68200 720x1280 DSI video mode panel"
depends on OF
@@ -248,6 +304,13 @@ config DRM_PANEL_SHARP_LQ101R1SX01
To compile this driver as a module, choose M here: the module
will be called panel-sharp-lq101r1sx01.
+config DRM_PANEL_SHARP_LS037V7DW01
+ tristate "Sharp LS037V7DW01 VGA LCD panel"
+ depends on GPIOLIB && OF && REGULATOR
+ help
+ Say Y here if you want to enable support for Sharp LS037V7DW01 VGA
+ (480x640) LCD panel (found on the TI SDP3430 board).
+
config DRM_PANEL_SHARP_LS043T1LE01
tristate "Sharp LS043T1LE01 qHD video mode panel"
depends on OF
@@ -275,6 +338,40 @@ config DRM_PANEL_SITRONIX_ST7789V
Say Y here if you want to enable support for the Sitronix
ST7789V controller for 240x320 LCD panels
+config DRM_PANEL_SONY_ACX424AKP
+ tristate "Sony ACX424AKP DSI command mode panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ select VIDEOMODE_HELPERS
+ help
+ Say Y here if you want to enable the Sony ACX424 display
+ panel. This panel supports DSI in both command and video
+ mode.
+
+config DRM_PANEL_SONY_ACX565AKM
+ tristate "Sony ACX565AKM panel"
+ depends on GPIOLIB && OF && SPI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for the Sony ACX565AKM
+ 800x600 3.5" panel (found on the Nokia N900).
+
+config DRM_PANEL_TPO_TD028TTEC1
+ tristate "Toppoly (TPO) TD028TTEC1 panel driver"
+ depends on OF && SPI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for TPO TD028TTEC1 480x640
+ 2.8" panel (found on the OpenMoko Neo FreeRunner and Neo 1973).
+
+config DRM_PANEL_TPO_TD043MTEA1
+ tristate "Toppoly (TPO) TD043MTEA1 panel driver"
+ depends on GPIOLIB && OF && REGULATOR && SPI
+ help
+ Say Y here if you want to enable support for TPO TD043MTEA1 800x480
+ 4.3" panel (found on the OMAP3 Pandora board).
+
config DRM_PANEL_TPO_TPG110
tristate "TPO TPG 800x400 panel"
depends on OF && SPI && GPIOLIB
@@ -291,4 +388,14 @@ config DRM_PANEL_TRULY_NT35597_WQXGA
help
Say Y here if you want to enable support for Truly NT35597 WQXGA Dual DSI
Video Mode panel
+
+config DRM_PANEL_XINPENG_XPP055C272
+ tristate "Xinpeng XPP055C272 panel driver"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for the Xinpeng
+ XPP055C272 controller for 720x1280 LCD panels with MIPI/RGB/SPI
+ system interfaces.
endmenu
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index fb0cb3aaa9e6..7c4d3c581fd4 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_DRM_PANEL_ARM_VERSATILE) += panel-arm-versatile.o
+obj-$(CONFIG_DRM_PANEL_BOE_HIMAX8279D) += panel-boe-himax8279d.o
obj-$(CONFIG_DRM_PANEL_LVDS) += panel-lvds.o
obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o
obj-$(CONFIG_DRM_PANEL_FEIYANG_FY07024DI26A30D) += panel-feiyang-fy07024di26a30d.o
@@ -8,12 +9,17 @@ obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9881C) += panel-ilitek-ili9881c.o
obj-$(CONFIG_DRM_PANEL_INNOLUX_P079ZCA) += panel-innolux-p079zca.o
obj-$(CONFIG_DRM_PANEL_JDI_LT070ME05000) += panel-jdi-lt070me05000.o
obj-$(CONFIG_DRM_PANEL_KINGDISPLAY_KD097D04) += panel-kingdisplay-kd097d04.o
+obj-$(CONFIG_DRM_PANEL_LEADTEK_LTK500HD1829) += panel-leadtek-ltk500hd1829.o
+obj-$(CONFIG_DRM_PANEL_LG_LB035Q02) += panel-lg-lb035q02.o
obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
+obj-$(CONFIG_DRM_PANEL_NEC_NL8048HL11) += panel-nec-nl8048hl11.o
+obj-$(CONFIG_DRM_PANEL_NOVATEK_NT39016) += panel-novatek-nt39016.o
obj-$(CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO) += panel-olimex-lcd-olinuxino.o
obj-$(CONFIG_DRM_PANEL_ORISETECH_OTM8009A) += panel-orisetech-otm8009a.o
obj-$(CONFIG_DRM_PANEL_OSD_OSD101T2587_53TS) += panel-osd-osd101t2587-53ts.o
obj-$(CONFIG_DRM_PANEL_PANASONIC_VVX10F034N00) += panel-panasonic-vvx10f034n00.o
obj-$(CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN) += panel-raspberrypi-touchscreen.o
+obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM67191) += panel-raydium-rm67191.o
obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM68200) += panel-raydium-rm68200.o
obj-$(CONFIG_DRM_PANEL_ROCKTECH_JH057N00900) += panel-rocktech-jh057n00900.o
obj-$(CONFIG_DRM_PANEL_RONBO_RB070D30) += panel-ronbo-rb070d30.o
@@ -25,8 +31,14 @@ obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63M0) += panel-samsung-s6e63m0.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0) += panel-samsung-s6e8aa0.o
obj-$(CONFIG_DRM_PANEL_SEIKO_43WVF1G) += panel-seiko-43wvf1g.o
obj-$(CONFIG_DRM_PANEL_SHARP_LQ101R1SX01) += panel-sharp-lq101r1sx01.o
+obj-$(CONFIG_DRM_PANEL_SHARP_LS037V7DW01) += panel-sharp-ls037v7dw01.o
obj-$(CONFIG_DRM_PANEL_SHARP_LS043T1LE01) += panel-sharp-ls043t1le01.o
obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7701) += panel-sitronix-st7701.o
obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7789V) += panel-sitronix-st7789v.o
+obj-$(CONFIG_DRM_PANEL_SONY_ACX424AKP) += panel-sony-acx424akp.o
+obj-$(CONFIG_DRM_PANEL_SONY_ACX565AKM) += panel-sony-acx565akm.o
+obj-$(CONFIG_DRM_PANEL_TPO_TD028TTEC1) += panel-tpo-td028ttec1.o
+obj-$(CONFIG_DRM_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o
obj-$(CONFIG_DRM_PANEL_TPO_TPG110) += panel-tpo-tpg110.o
obj-$(CONFIG_DRM_PANEL_TRULY_NT35597_WQXGA) += panel-truly-nt35597.o
+obj-$(CONFIG_DRM_PANEL_XINPENG_XPP055C272) += panel-xinpeng-xpp055c272.o
diff --git a/drivers/gpu/drm/panel/panel-arm-versatile.c b/drivers/gpu/drm/panel/panel-arm-versatile.c
index 5f72c922a04b..41444a73c980 100644
--- a/drivers/gpu/drm/panel/panel-arm-versatile.c
+++ b/drivers/gpu/drm/panel/panel-arm-versatile.c
@@ -260,9 +260,9 @@ static int versatile_panel_enable(struct drm_panel *panel)
return 0;
}
-static int versatile_panel_get_modes(struct drm_panel *panel)
+static int versatile_panel_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
- struct drm_connector *connector = panel->connector;
struct versatile_panel *vpanel = to_versatile_panel(panel);
struct drm_display_mode *mode;
@@ -270,7 +270,7 @@ static int versatile_panel_get_modes(struct drm_panel *panel)
connector->display_info.height_mm = vpanel->panel_type->height_mm;
connector->display_info.bus_flags = vpanel->panel_type->bus_flags;
- mode = drm_mode_duplicate(panel->drm, &vpanel->panel_type->mode);
+ mode = drm_mode_duplicate(connector->dev, &vpanel->panel_type->mode);
drm_mode_set_name(mode);
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
@@ -350,9 +350,8 @@ static int versatile_panel_probe(struct platform_device *pdev)
dev_info(dev, "panel mounted on IB2 daughterboard\n");
}
- drm_panel_init(&vpanel->panel);
- vpanel->panel.dev = dev;
- vpanel->panel.funcs = &versatile_panel_drm_funcs;
+ drm_panel_init(&vpanel->panel, dev, &versatile_panel_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
return drm_panel_add(&vpanel->panel);
}
diff --git a/drivers/gpu/drm/panel/panel-boe-himax8279d.c b/drivers/gpu/drm/panel/panel-boe-himax8279d.c
new file mode 100644
index 000000000000..74d58ee7d04c
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-boe-himax8279d.c
@@ -0,0 +1,978 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019, Huaqin Telecom Technology Co., Ltd
+ *
+ * Author: Jerry Han <jerry.han.hq@gmail.com>
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+#include <video/mipi_display.h>
+
+struct panel_cmd {
+ char cmd;
+ char data;
+};
+
+struct panel_desc {
+ const struct drm_display_mode *display_mode;
+ unsigned int bpc;
+ unsigned int width_mm;
+ unsigned int height_mm;
+
+ unsigned long mode_flags;
+ enum mipi_dsi_pixel_format format;
+ unsigned int lanes;
+ const struct panel_cmd *on_cmds;
+ unsigned int on_cmds_num;
+};
+
+struct panel_info {
+ struct drm_panel base;
+ struct mipi_dsi_device *link;
+ const struct panel_desc *desc;
+
+ struct gpio_desc *enable_gpio;
+ struct gpio_desc *pp33_gpio;
+ struct gpio_desc *pp18_gpio;
+
+ bool prepared;
+ bool enabled;
+};
+
+static inline struct panel_info *to_panel_info(struct drm_panel *panel)
+{
+ return container_of(panel, struct panel_info, base);
+}
+
+static void disable_gpios(struct panel_info *pinfo)
+{
+ gpiod_set_value(pinfo->enable_gpio, 0);
+ gpiod_set_value(pinfo->pp33_gpio, 0);
+ gpiod_set_value(pinfo->pp18_gpio, 0);
+}
+
+static int send_mipi_cmds(struct drm_panel *panel, const struct panel_cmd *cmds)
+{
+ struct panel_info *pinfo = to_panel_info(panel);
+ unsigned int i = 0;
+ int err;
+
+ for (i = 0; i < pinfo->desc->on_cmds_num; i++) {
+ err = mipi_dsi_dcs_write_buffer(pinfo->link, &cmds[i],
+ sizeof(struct panel_cmd));
+
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+static int boe_panel_disable(struct drm_panel *panel)
+{
+ struct panel_info *pinfo = to_panel_info(panel);
+ int err;
+
+ if (!pinfo->enabled)
+ return 0;
+
+ err = mipi_dsi_dcs_set_display_off(pinfo->link);
+ if (err < 0) {
+ DRM_DEV_ERROR(panel->dev, "failed to set display off: %d\n",
+ err);
+ return err;
+ }
+
+ pinfo->enabled = false;
+
+ return 0;
+}
+
+static int boe_panel_unprepare(struct drm_panel *panel)
+{
+ struct panel_info *pinfo = to_panel_info(panel);
+ int err;
+
+ if (!pinfo->prepared)
+ return 0;
+
+ err = mipi_dsi_dcs_set_display_off(pinfo->link);
+ if (err < 0)
+ DRM_DEV_ERROR(panel->dev, "failed to set display off: %d\n",
+ err);
+
+ err = mipi_dsi_dcs_enter_sleep_mode(pinfo->link);
+ if (err < 0)
+ DRM_DEV_ERROR(panel->dev, "failed to enter sleep mode: %d\n",
+ err);
+
+ /* sleep_mode_delay: 1ms - 2ms */
+ usleep_range(1000, 2000);
+
+ disable_gpios(pinfo);
+
+ pinfo->prepared = false;
+
+ return 0;
+}
+
+static int boe_panel_prepare(struct drm_panel *panel)
+{
+ struct panel_info *pinfo = to_panel_info(panel);
+ int err;
+
+ if (pinfo->prepared)
+ return 0;
+
+ gpiod_set_value(pinfo->pp18_gpio, 1);
+ /* T1: 5ms - 6ms */
+ usleep_range(5000, 6000);
+ gpiod_set_value(pinfo->pp33_gpio, 1);
+
+ /* reset sequence */
+ /* T2: 14ms - 15ms */
+ usleep_range(14000, 15000);
+ gpiod_set_value(pinfo->enable_gpio, 1);
+
+ /* T3: 1ms - 2ms */
+ usleep_range(1000, 2000);
+ gpiod_set_value(pinfo->enable_gpio, 0);
+
+ /* T4: 1ms - 2ms */
+ usleep_range(1000, 2000);
+ gpiod_set_value(pinfo->enable_gpio, 1);
+
+ /* T5: 5ms - 6ms */
+ usleep_range(5000, 6000);
+
+ /* send init code */
+ err = send_mipi_cmds(panel, pinfo->desc->on_cmds);
+ if (err < 0) {
+ DRM_DEV_ERROR(panel->dev, "failed to send DCS Init Code: %d\n",
+ err);
+ goto poweroff;
+ }
+
+ err = mipi_dsi_dcs_exit_sleep_mode(pinfo->link);
+ if (err < 0) {
+ DRM_DEV_ERROR(panel->dev, "failed to exit sleep mode: %d\n",
+ err);
+ goto poweroff;
+ }
+
+ /* T6: 120ms - 121ms */
+ usleep_range(120000, 121000);
+
+ err = mipi_dsi_dcs_set_display_on(pinfo->link);
+ if (err < 0) {
+ DRM_DEV_ERROR(panel->dev, "failed to set display on: %d\n",
+ err);
+ goto poweroff;
+ }
+
+ /* T7: 20ms - 21ms */
+ usleep_range(20000, 21000);
+
+ pinfo->prepared = true;
+
+ return 0;
+
+poweroff:
+ disable_gpios(pinfo);
+ return err;
+}
+
+static int boe_panel_enable(struct drm_panel *panel)
+{
+ struct panel_info *pinfo = to_panel_info(panel);
+ int ret;
+
+ if (pinfo->enabled)
+ return 0;
+
+ usleep_range(120000, 121000);
+
+ ret = mipi_dsi_dcs_set_display_on(pinfo->link);
+ if (ret < 0) {
+ DRM_DEV_ERROR(panel->dev, "failed to set display on: %d\n",
+ ret);
+ return ret;
+ }
+
+ pinfo->enabled = true;
+
+ return 0;
+}
+
+static int boe_panel_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct panel_info *pinfo = to_panel_info(panel);
+ const struct drm_display_mode *m = pinfo->desc->display_mode;
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, m);
+ if (!mode) {
+ DRM_DEV_ERROR(pinfo->base.dev, "failed to add mode %ux%u@%u\n",
+ m->hdisplay, m->vdisplay, m->vrefresh);
+ return -ENOMEM;
+ }
+
+ drm_mode_set_name(mode);
+
+ drm_mode_probed_add(connector, mode);
+
+ connector->display_info.width_mm = pinfo->desc->width_mm;
+ connector->display_info.height_mm = pinfo->desc->height_mm;
+ connector->display_info.bpc = pinfo->desc->bpc;
+
+ return 1;
+}
+
+static const struct drm_panel_funcs panel_funcs = {
+ .disable = boe_panel_disable,
+ .unprepare = boe_panel_unprepare,
+ .prepare = boe_panel_prepare,
+ .enable = boe_panel_enable,
+ .get_modes = boe_panel_get_modes,
+};
+
+static const struct drm_display_mode default_display_mode = {
+ .clock = 159420,
+ .hdisplay = 1200,
+ .hsync_start = 1200 + 80,
+ .hsync_end = 1200 + 80 + 60,
+ .htotal = 1200 + 80 + 60 + 24,
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 10,
+ .vsync_end = 1920 + 10 + 14,
+ .vtotal = 1920 + 10 + 14 + 4,
+ .vrefresh = 60,
+};
+
+/* 8 inch */
+static const struct panel_cmd boe_himax8279d8p_on_cmds[] = {
+ { 0xB0, 0x05 },
+ { 0xB1, 0xE5 },
+ { 0xB3, 0x52 },
+ { 0xC0, 0x00 },
+ { 0xC2, 0x57 },
+ { 0xD9, 0x85 },
+ { 0xB0, 0x01 },
+ { 0xC8, 0x00 },
+ { 0xC9, 0x00 },
+ { 0xCC, 0x26 },
+ { 0xCD, 0x26 },
+ { 0xDC, 0x00 },
+ { 0xDD, 0x00 },
+ { 0xE0, 0x26 },
+ { 0xE1, 0x26 },
+ { 0xB0, 0x03 },
+ { 0xC3, 0x2A },
+ { 0xE7, 0x2A },
+ { 0xC5, 0x2A },
+ { 0xDE, 0x2A },
+ { 0xBC, 0x02 },
+ { 0xCB, 0x02 },
+ { 0xB0, 0x00 },
+ { 0xB6, 0x03 },
+ { 0xBA, 0x8B },
+ { 0xBF, 0x15 },
+ { 0xC0, 0x18 },
+ { 0xC2, 0x14 },
+ { 0xC3, 0x02 },
+ { 0xC4, 0x14 },
+ { 0xC5, 0x02 },
+ { 0xCC, 0x0A },
+ { 0xB0, 0x06 },
+ { 0xC0, 0xA5 },
+ { 0xD5, 0x20 },
+ { 0xC0, 0x00 },
+ { 0xB0, 0x02 },
+ { 0xC0, 0x00 },
+ { 0xC1, 0x02 },
+ { 0xC2, 0x06 },
+ { 0xC3, 0x16 },
+ { 0xC4, 0x0E },
+ { 0xC5, 0x18 },
+ { 0xC6, 0x26 },
+ { 0xC7, 0x32 },
+ { 0xC8, 0x3F },
+ { 0xC9, 0x3F },
+ { 0xCA, 0x3F },
+ { 0xCB, 0x3F },
+ { 0xCC, 0x3D },
+ { 0xCD, 0x2F },
+ { 0xCE, 0x2F },
+ { 0xCF, 0x2F },
+ { 0xD0, 0x07 },
+ { 0xD2, 0x00 },
+ { 0xD3, 0x02 },
+ { 0xD4, 0x06 },
+ { 0xD5, 0x12 },
+ { 0xD6, 0x0A },
+ { 0xD7, 0x14 },
+ { 0xD8, 0x22 },
+ { 0xD9, 0x2E },
+ { 0xDA, 0x3D },
+ { 0xDB, 0x3F },
+ { 0xDC, 0x3F },
+ { 0xDD, 0x3F },
+ { 0xDE, 0x3D },
+ { 0xDF, 0x2F },
+ { 0xE0, 0x2F },
+ { 0xE1, 0x2F },
+ { 0xE2, 0x07 },
+ { 0xB0, 0x07 },
+ { 0xB1, 0x18 },
+ { 0xB2, 0x19 },
+ { 0xB3, 0x2E },
+ { 0xB4, 0x52 },
+ { 0xB5, 0x72 },
+ { 0xB6, 0x8C },
+ { 0xB7, 0xBD },
+ { 0xB8, 0xEB },
+ { 0xB9, 0x47 },
+ { 0xBA, 0x96 },
+ { 0xBB, 0x1E },
+ { 0xBC, 0x90 },
+ { 0xBD, 0x93 },
+ { 0xBE, 0xFA },
+ { 0xBF, 0x56 },
+ { 0xC0, 0x8C },
+ { 0xC1, 0xB7 },
+ { 0xC2, 0xCC },
+ { 0xC3, 0xDF },
+ { 0xC4, 0xE8 },
+ { 0xC5, 0xF0 },
+ { 0xC6, 0xF8 },
+ { 0xC7, 0xFA },
+ { 0xC8, 0xFC },
+ { 0xC9, 0x00 },
+ { 0xCA, 0x00 },
+ { 0xCB, 0x5A },
+ { 0xCC, 0xAF },
+ { 0xCD, 0xFF },
+ { 0xCE, 0xFF },
+ { 0xB0, 0x08 },
+ { 0xB1, 0x04 },
+ { 0xB2, 0x15 },
+ { 0xB3, 0x2D },
+ { 0xB4, 0x51 },
+ { 0xB5, 0x72 },
+ { 0xB6, 0x8D },
+ { 0xB7, 0xBE },
+ { 0xB8, 0xED },
+ { 0xB9, 0x4A },
+ { 0xBA, 0x9A },
+ { 0xBB, 0x23 },
+ { 0xBC, 0x95 },
+ { 0xBD, 0x98 },
+ { 0xBE, 0xFF },
+ { 0xBF, 0x59 },
+ { 0xC0, 0x8E },
+ { 0xC1, 0xB9 },
+ { 0xC2, 0xCD },
+ { 0xC3, 0xDF },
+ { 0xC4, 0xE8 },
+ { 0xC5, 0xF0 },
+ { 0xC6, 0xF8 },
+ { 0xC7, 0xFA },
+ { 0xC8, 0xFC },
+ { 0xC9, 0x00 },
+ { 0xCA, 0x00 },
+ { 0xCB, 0x5A },
+ { 0xCC, 0xAF },
+ { 0xCD, 0xFF },
+ { 0xCE, 0xFF },
+ { 0xB0, 0x09 },
+ { 0xB1, 0x04 },
+ { 0xB2, 0x2C },
+ { 0xB3, 0x36 },
+ { 0xB4, 0x53 },
+ { 0xB5, 0x73 },
+ { 0xB6, 0x8E },
+ { 0xB7, 0xC0 },
+ { 0xB8, 0xEF },
+ { 0xB9, 0x4C },
+ { 0xBA, 0x9D },
+ { 0xBB, 0x25 },
+ { 0xBC, 0x96 },
+ { 0xBD, 0x9A },
+ { 0xBE, 0x01 },
+ { 0xBF, 0x59 },
+ { 0xC0, 0x8E },
+ { 0xC1, 0xB9 },
+ { 0xC2, 0xCD },
+ { 0xC3, 0xDF },
+ { 0xC4, 0xE8 },
+ { 0xC5, 0xF0 },
+ { 0xC6, 0xF8 },
+ { 0xC7, 0xFA },
+ { 0xC8, 0xFC },
+ { 0xC9, 0x00 },
+ { 0xCA, 0x00 },
+ { 0xCB, 0x5A },
+ { 0xCC, 0xBF },
+ { 0xCD, 0xFF },
+ { 0xCE, 0xFF },
+ { 0xB0, 0x0A },
+ { 0xB1, 0x18 },
+ { 0xB2, 0x19 },
+ { 0xB3, 0x2E },
+ { 0xB4, 0x52 },
+ { 0xB5, 0x72 },
+ { 0xB6, 0x8C },
+ { 0xB7, 0xBD },
+ { 0xB8, 0xEB },
+ { 0xB9, 0x47 },
+ { 0xBA, 0x96 },
+ { 0xBB, 0x1E },
+ { 0xBC, 0x90 },
+ { 0xBD, 0x93 },
+ { 0xBE, 0xFA },
+ { 0xBF, 0x56 },
+ { 0xC0, 0x8C },
+ { 0xC1, 0xB7 },
+ { 0xC2, 0xCC },
+ { 0xC3, 0xDF },
+ { 0xC4, 0xE8 },
+ { 0xC5, 0xF0 },
+ { 0xC6, 0xF8 },
+ { 0xC7, 0xFA },
+ { 0xC8, 0xFC },
+ { 0xC9, 0x00 },
+ { 0xCA, 0x00 },
+ { 0xCB, 0x5A },
+ { 0xCC, 0xAF },
+ { 0xCD, 0xFF },
+ { 0xCE, 0xFF },
+ { 0xB0, 0x0B },
+ { 0xB1, 0x04 },
+ { 0xB2, 0x15 },
+ { 0xB3, 0x2D },
+ { 0xB4, 0x51 },
+ { 0xB5, 0x72 },
+ { 0xB6, 0x8D },
+ { 0xB7, 0xBE },
+ { 0xB8, 0xED },
+ { 0xB9, 0x4A },
+ { 0xBA, 0x9A },
+ { 0xBB, 0x23 },
+ { 0xBC, 0x95 },
+ { 0xBD, 0x98 },
+ { 0xBE, 0xFF },
+ { 0xBF, 0x59 },
+ { 0xC0, 0x8E },
+ { 0xC1, 0xB9 },
+ { 0xC2, 0xCD },
+ { 0xC3, 0xDF },
+ { 0xC4, 0xE8 },
+ { 0xC5, 0xF0 },
+ { 0xC6, 0xF8 },
+ { 0xC7, 0xFA },
+ { 0xC8, 0xFC },
+ { 0xC9, 0x00 },
+ { 0xCA, 0x00 },
+ { 0xCB, 0x5A },
+ { 0xCC, 0xAF },
+ { 0xCD, 0xFF },
+ { 0xCE, 0xFF },
+ { 0xB0, 0x0C },
+ { 0xB1, 0x04 },
+ { 0xB2, 0x2C },
+ { 0xB3, 0x36 },
+ { 0xB4, 0x53 },
+ { 0xB5, 0x73 },
+ { 0xB6, 0x8E },
+ { 0xB7, 0xC0 },
+ { 0xB8, 0xEF },
+ { 0xB9, 0x4C },
+ { 0xBA, 0x9D },
+ { 0xBB, 0x25 },
+ { 0xBC, 0x96 },
+ { 0xBD, 0x9A },
+ { 0xBE, 0x01 },
+ { 0xBF, 0x59 },
+ { 0xC0, 0x8E },
+ { 0xC1, 0xB9 },
+ { 0xC2, 0xCD },
+ { 0xC3, 0xDF },
+ { 0xC4, 0xE8 },
+ { 0xC5, 0xF0 },
+ { 0xC6, 0xF8 },
+ { 0xC7, 0xFA },
+ { 0xC8, 0xFC },
+ { 0xC9, 0x00 },
+ { 0xCA, 0x00 },
+ { 0xCB, 0x5A },
+ { 0xCC, 0xBF },
+ { 0xCD, 0xFF },
+ { 0xCE, 0xFF },
+ { 0xB0, 0x04 },
+ { 0xB5, 0x02 },
+ { 0xB6, 0x01 },
+};
+
+static const struct panel_desc boe_himax8279d8p_panel_desc = {
+ .display_mode = &default_display_mode,
+ .bpc = 8,
+ .width_mm = 107,
+ .height_mm = 172,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM,
+ .format = MIPI_DSI_FMT_RGB888,
+ .lanes = 4,
+ .on_cmds = boe_himax8279d8p_on_cmds,
+ .on_cmds_num = 260,
+};
+
+/* 10 inch */
+static const struct panel_cmd boe_himax8279d10p_on_cmds[] = {
+ { 0xB0, 0x05 },
+ { 0xB1, 0xE5 },
+ { 0xB3, 0x52 },
+ { 0xB0, 0x00 },
+ { 0xB6, 0x03 },
+ { 0xBA, 0x8B },
+ { 0xBF, 0x1A },
+ { 0xC0, 0x0F },
+ { 0xC2, 0x0C },
+ { 0xC3, 0x02 },
+ { 0xC4, 0x0C },
+ { 0xC5, 0x02 },
+ { 0xB0, 0x01 },
+ { 0xE0, 0x26 },
+ { 0xE1, 0x26 },
+ { 0xDC, 0x00 },
+ { 0xDD, 0x00 },
+ { 0xCC, 0x26 },
+ { 0xCD, 0x26 },
+ { 0xC8, 0x00 },
+ { 0xC9, 0x00 },
+ { 0xD2, 0x03 },
+ { 0xD3, 0x03 },
+ { 0xE6, 0x04 },
+ { 0xE7, 0x04 },
+ { 0xC4, 0x09 },
+ { 0xC5, 0x09 },
+ { 0xD8, 0x0A },
+ { 0xD9, 0x0A },
+ { 0xC2, 0x0B },
+ { 0xC3, 0x0B },
+ { 0xD6, 0x0C },
+ { 0xD7, 0x0C },
+ { 0xC0, 0x05 },
+ { 0xC1, 0x05 },
+ { 0xD4, 0x06 },
+ { 0xD5, 0x06 },
+ { 0xCA, 0x07 },
+ { 0xCB, 0x07 },
+ { 0xDE, 0x08 },
+ { 0xDF, 0x08 },
+ { 0xB0, 0x02 },
+ { 0xC0, 0x00 },
+ { 0xC1, 0x0D },
+ { 0xC2, 0x17 },
+ { 0xC3, 0x26 },
+ { 0xC4, 0x31 },
+ { 0xC5, 0x1C },
+ { 0xC6, 0x2C },
+ { 0xC7, 0x33 },
+ { 0xC8, 0x31 },
+ { 0xC9, 0x37 },
+ { 0xCA, 0x37 },
+ { 0xCB, 0x37 },
+ { 0xCC, 0x39 },
+ { 0xCD, 0x2E },
+ { 0xCE, 0x2F },
+ { 0xCF, 0x2F },
+ { 0xD0, 0x07 },
+ { 0xD2, 0x00 },
+ { 0xD3, 0x0D },
+ { 0xD4, 0x17 },
+ { 0xD5, 0x26 },
+ { 0xD6, 0x31 },
+ { 0xD7, 0x3F },
+ { 0xD8, 0x3F },
+ { 0xD9, 0x3F },
+ { 0xDA, 0x3F },
+ { 0xDB, 0x37 },
+ { 0xDC, 0x37 },
+ { 0xDD, 0x37 },
+ { 0xDE, 0x39 },
+ { 0xDF, 0x2E },
+ { 0xE0, 0x2F },
+ { 0xE1, 0x2F },
+ { 0xE2, 0x07 },
+ { 0xB0, 0x03 },
+ { 0xC8, 0x0B },
+ { 0xC9, 0x07 },
+ { 0xC3, 0x00 },
+ { 0xE7, 0x00 },
+ { 0xC5, 0x2A },
+ { 0xDE, 0x2A },
+ { 0xCA, 0x43 },
+ { 0xC9, 0x07 },
+ { 0xE4, 0xC0 },
+ { 0xE5, 0x0D },
+ { 0xCB, 0x01 },
+ { 0xBC, 0x01 },
+ { 0xB0, 0x06 },
+ { 0xB8, 0xA5 },
+ { 0xC0, 0xA5 },
+ { 0xC7, 0x0F },
+ { 0xD5, 0x32 },
+ { 0xB8, 0x00 },
+ { 0xC0, 0x00 },
+ { 0xBC, 0x00 },
+ { 0xB0, 0x07 },
+ { 0xB1, 0x00 },
+ { 0xB2, 0x05 },
+ { 0xB3, 0x10 },
+ { 0xB4, 0x22 },
+ { 0xB5, 0x36 },
+ { 0xB6, 0x4A },
+ { 0xB7, 0x6C },
+ { 0xB8, 0x9A },
+ { 0xB9, 0xD7 },
+ { 0xBA, 0x17 },
+ { 0xBB, 0x92 },
+ { 0xBC, 0x15 },
+ { 0xBD, 0x18 },
+ { 0xBE, 0x8C },
+ { 0xBF, 0x00 },
+ { 0xC0, 0x3A },
+ { 0xC1, 0x72 },
+ { 0xC2, 0x8C },
+ { 0xC3, 0xA5 },
+ { 0xC4, 0xB1 },
+ { 0xC5, 0xBE },
+ { 0xC6, 0xCA },
+ { 0xC7, 0xD1 },
+ { 0xC8, 0xD4 },
+ { 0xC9, 0x00 },
+ { 0xCA, 0x00 },
+ { 0xCB, 0x16 },
+ { 0xCC, 0xAF },
+ { 0xCD, 0xFF },
+ { 0xCE, 0xFF },
+ { 0xB0, 0x08 },
+ { 0xB1, 0x04 },
+ { 0xB2, 0x05 },
+ { 0xB3, 0x11 },
+ { 0xB4, 0x24 },
+ { 0xB5, 0x39 },
+ { 0xB6, 0x4E },
+ { 0xB7, 0x72 },
+ { 0xB8, 0xA3 },
+ { 0xB9, 0xE1 },
+ { 0xBA, 0x25 },
+ { 0xBB, 0xA8 },
+ { 0xBC, 0x2E },
+ { 0xBD, 0x32 },
+ { 0xBE, 0xAD },
+ { 0xBF, 0x28 },
+ { 0xC0, 0x63 },
+ { 0xC1, 0x9B },
+ { 0xC2, 0xB5 },
+ { 0xC3, 0xCF },
+ { 0xC4, 0xDB },
+ { 0xC5, 0xE8 },
+ { 0xC6, 0xF5 },
+ { 0xC7, 0xFA },
+ { 0xC8, 0xFC },
+ { 0xC9, 0x00 },
+ { 0xCA, 0x00 },
+ { 0xCB, 0x16 },
+ { 0xCC, 0xAF },
+ { 0xCD, 0xFF },
+ { 0xCE, 0xFF },
+ { 0xB0, 0x09 },
+ { 0xB1, 0x04 },
+ { 0xB2, 0x04 },
+ { 0xB3, 0x0F },
+ { 0xB4, 0x22 },
+ { 0xB5, 0x37 },
+ { 0xB6, 0x4D },
+ { 0xB7, 0x71 },
+ { 0xB8, 0xA2 },
+ { 0xB9, 0xE1 },
+ { 0xBA, 0x26 },
+ { 0xBB, 0xA9 },
+ { 0xBC, 0x2F },
+ { 0xBD, 0x33 },
+ { 0xBE, 0xAC },
+ { 0xBF, 0x24 },
+ { 0xC0, 0x5D },
+ { 0xC1, 0x94 },
+ { 0xC2, 0xAC },
+ { 0xC3, 0xC5 },
+ { 0xC4, 0xD1 },
+ { 0xC5, 0xDC },
+ { 0xC6, 0xE8 },
+ { 0xC7, 0xED },
+ { 0xC8, 0xF0 },
+ { 0xC9, 0x00 },
+ { 0xCA, 0x00 },
+ { 0xCB, 0x16 },
+ { 0xCC, 0xAF },
+ { 0xCD, 0xFF },
+ { 0xCE, 0xFF },
+ { 0xB0, 0x0A },
+ { 0xB1, 0x00 },
+ { 0xB2, 0x05 },
+ { 0xB3, 0x10 },
+ { 0xB4, 0x22 },
+ { 0xB5, 0x36 },
+ { 0xB6, 0x4A },
+ { 0xB7, 0x6C },
+ { 0xB8, 0x9A },
+ { 0xB9, 0xD7 },
+ { 0xBA, 0x17 },
+ { 0xBB, 0x92 },
+ { 0xBC, 0x15 },
+ { 0xBD, 0x18 },
+ { 0xBE, 0x8C },
+ { 0xBF, 0x00 },
+ { 0xC0, 0x3A },
+ { 0xC1, 0x72 },
+ { 0xC2, 0x8C },
+ { 0xC3, 0xA5 },
+ { 0xC4, 0xB1 },
+ { 0xC5, 0xBE },
+ { 0xC6, 0xCA },
+ { 0xC7, 0xD1 },
+ { 0xC8, 0xD4 },
+ { 0xC9, 0x00 },
+ { 0xCA, 0x00 },
+ { 0xCB, 0x16 },
+ { 0xCC, 0xAF },
+ { 0xCD, 0xFF },
+ { 0xCE, 0xFF },
+ { 0xB0, 0x0B },
+ { 0xB1, 0x04 },
+ { 0xB2, 0x05 },
+ { 0xB3, 0x11 },
+ { 0xB4, 0x24 },
+ { 0xB5, 0x39 },
+ { 0xB6, 0x4E },
+ { 0xB7, 0x72 },
+ { 0xB8, 0xA3 },
+ { 0xB9, 0xE1 },
+ { 0xBA, 0x25 },
+ { 0xBB, 0xA8 },
+ { 0xBC, 0x2E },
+ { 0xBD, 0x32 },
+ { 0xBE, 0xAD },
+ { 0xBF, 0x28 },
+ { 0xC0, 0x63 },
+ { 0xC1, 0x9B },
+ { 0xC2, 0xB5 },
+ { 0xC3, 0xCF },
+ { 0xC4, 0xDB },
+ { 0xC5, 0xE8 },
+ { 0xC6, 0xF5 },
+ { 0xC7, 0xFA },
+ { 0xC8, 0xFC },
+ { 0xC9, 0x00 },
+ { 0xCA, 0x00 },
+ { 0xCB, 0x16 },
+ { 0xCC, 0xAF },
+ { 0xCD, 0xFF },
+ { 0xCE, 0xFF },
+ { 0xB0, 0x0C },
+ { 0xB1, 0x04 },
+ { 0xB2, 0x04 },
+ { 0xB3, 0x0F },
+ { 0xB4, 0x22 },
+ { 0xB5, 0x37 },
+ { 0xB6, 0x4D },
+ { 0xB7, 0x71 },
+ { 0xB8, 0xA2 },
+ { 0xB9, 0xE1 },
+ { 0xBA, 0x26 },
+ { 0xBB, 0xA9 },
+ { 0xBC, 0x2F },
+ { 0xBD, 0x33 },
+ { 0xBE, 0xAC },
+ { 0xBF, 0x24 },
+ { 0xC0, 0x5D },
+ { 0xC1, 0x94 },
+ { 0xC2, 0xAC },
+ { 0xC3, 0xC5 },
+ { 0xC4, 0xD1 },
+ { 0xC5, 0xDC },
+ { 0xC6, 0xE8 },
+ { 0xC7, 0xED },
+ { 0xC8, 0xF0 },
+ { 0xC9, 0x00 },
+ { 0xCA, 0x00 },
+ { 0xCB, 0x16 },
+ { 0xCC, 0xAF },
+ { 0xCD, 0xFF },
+ { 0xCE, 0xFF },
+};
+
+static const struct panel_desc boe_himax8279d10p_panel_desc = {
+ .display_mode = &default_display_mode,
+ .bpc = 8,
+ .width_mm = 135,
+ .height_mm = 216,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM,
+ .format = MIPI_DSI_FMT_RGB888,
+ .lanes = 4,
+ .on_cmds = boe_himax8279d10p_on_cmds,
+ .on_cmds_num = 283,
+};
+
+static const struct of_device_id panel_of_match[] = {
+ {
+ .compatible = "boe,himax8279d8p",
+ .data = &boe_himax8279d8p_panel_desc,
+ },
+ {
+ .compatible = "boe,himax8279d10p",
+ .data = &boe_himax8279d10p_panel_desc,
+ },
+ {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(of, panel_of_match);
+
+static int panel_add(struct panel_info *pinfo)
+{
+ struct device *dev = &pinfo->link->dev;
+ int ret;
+
+ pinfo->pp18_gpio = devm_gpiod_get(dev, "pp18", GPIOD_OUT_HIGH);
+ if (IS_ERR(pinfo->pp18_gpio)) {
+ ret = PTR_ERR(pinfo->pp18_gpio);
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev, "failed to get pp18 gpio: %d\n",
+ ret);
+ return ret;
+ }
+
+ pinfo->pp33_gpio = devm_gpiod_get(dev, "pp33", GPIOD_OUT_HIGH);
+ if (IS_ERR(pinfo->pp33_gpio)) {
+ ret = PTR_ERR(pinfo->pp33_gpio);
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev, "failed to get pp33 gpio: %d\n",
+ ret);
+ return ret;
+ }
+
+ pinfo->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_HIGH);
+ if (IS_ERR(pinfo->enable_gpio)) {
+ ret = PTR_ERR(pinfo->enable_gpio);
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev, "failed to get enable gpio: %d\n",
+ ret);
+ return ret;
+ }
+
+ drm_panel_init(&pinfo->base, dev, &panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ ret = drm_panel_of_backlight(&pinfo->base);
+ if (ret)
+ return ret;
+
+ return drm_panel_add(&pinfo->base);
+}
+
+static int panel_probe(struct mipi_dsi_device *dsi)
+{
+ struct panel_info *pinfo;
+ const struct panel_desc *desc;
+ int err;
+
+ pinfo = devm_kzalloc(&dsi->dev, sizeof(*pinfo), GFP_KERNEL);
+ if (!pinfo)
+ return -ENOMEM;
+
+ desc = of_device_get_match_data(&dsi->dev);
+ dsi->mode_flags = desc->mode_flags;
+ dsi->format = desc->format;
+ dsi->lanes = desc->lanes;
+ pinfo->desc = desc;
+
+ pinfo->link = dsi;
+ mipi_dsi_set_drvdata(dsi, pinfo);
+
+ err = panel_add(pinfo);
+ if (err < 0)
+ return err;
+
+ err = mipi_dsi_attach(dsi);
+ if (err < 0)
+ drm_panel_remove(&pinfo->base);
+
+ return err;
+}
+
+static int panel_remove(struct mipi_dsi_device *dsi)
+{
+ struct panel_info *pinfo = mipi_dsi_get_drvdata(dsi);
+ int err;
+
+ err = boe_panel_disable(&pinfo->base);
+ if (err < 0)
+ DRM_DEV_ERROR(&dsi->dev, "failed to disable panel: %d\n",
+ err);
+
+ err = boe_panel_unprepare(&pinfo->base);
+ if (err < 0)
+ DRM_DEV_ERROR(&dsi->dev, "failed to unprepare panel: %d\n",
+ err);
+
+ err = mipi_dsi_detach(dsi);
+ if (err < 0)
+ DRM_DEV_ERROR(&dsi->dev, "failed to detach from DSI host: %d\n",
+ err);
+
+ drm_panel_remove(&pinfo->base);
+
+ return 0;
+}
+
+static void panel_shutdown(struct mipi_dsi_device *dsi)
+{
+ struct panel_info *pinfo = mipi_dsi_get_drvdata(dsi);
+
+ boe_panel_disable(&pinfo->base);
+ boe_panel_unprepare(&pinfo->base);
+}
+
+static struct mipi_dsi_driver panel_driver = {
+ .driver = {
+ .name = "panel-boe-himax8279d",
+ .of_match_table = panel_of_match,
+ },
+ .probe = panel_probe,
+ .remove = panel_remove,
+ .shutdown = panel_shutdown,
+};
+module_mipi_dsi_driver(panel_driver);
+
+MODULE_AUTHOR("Jerry Han <jerry.han.hq@gmail.com>");
+MODULE_DESCRIPTION("Boe Himax8279d driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c b/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
index dabf59e0f56f..95b789ab9d29 100644
--- a/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
+++ b/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
@@ -9,7 +9,6 @@
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
-#include <linux/backlight.h>
#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include <linux/module.h>
@@ -22,7 +21,6 @@ struct feiyang {
struct drm_panel panel;
struct mipi_dsi_device *dsi;
- struct backlight_device *backlight;
struct regulator *dvdd;
struct regulator *avdd;
struct gpio_desc *reset;
@@ -102,7 +100,6 @@ static int feiyang_enable(struct drm_panel *panel)
msleep(200);
mipi_dsi_dcs_set_display_on(ctx->dsi);
- backlight_enable(ctx->backlight);
return 0;
}
@@ -111,7 +108,6 @@ static int feiyang_disable(struct drm_panel *panel)
{
struct feiyang *ctx = panel_to_feiyang(panel);
- backlight_disable(ctx->backlight);
return mipi_dsi_dcs_set_display_off(ctx->dsi);
}
@@ -162,13 +158,13 @@ static const struct drm_display_mode feiyang_default_mode = {
.type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
};
-static int feiyang_get_modes(struct drm_panel *panel)
+static int feiyang_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
- struct drm_connector *connector = panel->connector;
struct feiyang *ctx = panel_to_feiyang(panel);
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(panel->drm, &feiyang_default_mode);
+ mode = drm_mode_duplicate(connector->dev, &feiyang_default_mode);
if (!mode) {
DRM_DEV_ERROR(&ctx->dsi->dev, "failed to add mode %ux%ux@%u\n",
feiyang_default_mode.hdisplay,
@@ -204,9 +200,8 @@ static int feiyang_dsi_probe(struct mipi_dsi_device *dsi)
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dsi = dsi;
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = &dsi->dev;
- ctx->panel.funcs = &feiyang_funcs;
+ drm_panel_init(&ctx->panel, &dsi->dev, &feiyang_funcs,
+ DRM_MODE_CONNECTOR_DSI);
ctx->dvdd = devm_regulator_get(&dsi->dev, "dvdd");
if (IS_ERR(ctx->dvdd)) {
@@ -226,9 +221,9 @@ static int feiyang_dsi_probe(struct mipi_dsi_device *dsi)
return PTR_ERR(ctx->reset);
}
- ctx->backlight = devm_of_find_backlight(&dsi->dev);
- if (IS_ERR(ctx->backlight))
- return PTR_ERR(ctx->backlight);
+ ret = drm_panel_of_backlight(&ctx->panel);
+ if (ret)
+ return ret;
ret = drm_panel_add(&ctx->panel);
if (ret < 0)
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
index 53dd1e128795..f394d53a7da4 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
@@ -349,7 +349,6 @@ static const struct regmap_config ili9322_regmap_config = {
static int ili9322_init(struct drm_panel *panel, struct ili9322 *ili)
{
- struct drm_connector *connector = panel->connector;
u8 reg;
int ret;
int i;
@@ -407,23 +406,11 @@ static int ili9322_init(struct drm_panel *panel, struct ili9322 *ili)
* Polarity and inverted color order for RGB input.
* None of this applies in the BT.656 mode.
*/
- if (ili->conf->dclk_active_high) {
+ reg = 0;
+ if (ili->conf->dclk_active_high)
reg = ILI9322_POL_DCLK;
- connector->display_info.bus_flags |=
- DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE;
- } else {
- reg = 0;
- connector->display_info.bus_flags |=
- DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE;
- }
- if (ili->conf->de_active_high) {
+ if (ili->conf->de_active_high)
reg |= ILI9322_POL_DE;
- connector->display_info.bus_flags |=
- DRM_BUS_FLAG_DE_HIGH;
- } else {
- connector->display_info.bus_flags |=
- DRM_BUS_FLAG_DE_LOW;
- }
if (ili->conf->hsync_active_high)
reg |= ILI9322_POL_HSYNC;
if (ili->conf->vsync_active_high)
@@ -654,37 +641,49 @@ static const struct drm_display_mode itu_r_bt_656_720_mode = {
.flags = 0,
};
-static int ili9322_get_modes(struct drm_panel *panel)
+static int ili9322_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
- struct drm_connector *connector = panel->connector;
struct ili9322 *ili = panel_to_ili9322(panel);
+ struct drm_device *drm = connector->dev;
struct drm_display_mode *mode;
+ struct drm_display_info *info;
- connector->display_info.width_mm = ili->conf->width_mm;
- connector->display_info.height_mm = ili->conf->height_mm;
+ info = &connector->display_info;
+ info->width_mm = ili->conf->width_mm;
+ info->height_mm = ili->conf->height_mm;
+ if (ili->conf->dclk_active_high)
+ info->bus_flags |= DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE;
+ else
+ info->bus_flags |= DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE;
+
+ if (ili->conf->de_active_high)
+ info->bus_flags |= DRM_BUS_FLAG_DE_HIGH;
+ else
+ info->bus_flags |= DRM_BUS_FLAG_DE_LOW;
switch (ili->input) {
case ILI9322_INPUT_SRGB_DUMMY_320X240:
- mode = drm_mode_duplicate(panel->drm, &srgb_320x240_mode);
+ mode = drm_mode_duplicate(drm, &srgb_320x240_mode);
break;
case ILI9322_INPUT_SRGB_DUMMY_360X240:
- mode = drm_mode_duplicate(panel->drm, &srgb_360x240_mode);
+ mode = drm_mode_duplicate(drm, &srgb_360x240_mode);
break;
case ILI9322_INPUT_PRGB_THROUGH:
case ILI9322_INPUT_PRGB_ALIGNED:
- mode = drm_mode_duplicate(panel->drm, &prgb_320x240_mode);
+ mode = drm_mode_duplicate(drm, &prgb_320x240_mode);
break;
case ILI9322_INPUT_YUV_640X320_YCBCR:
- mode = drm_mode_duplicate(panel->drm, &yuv_640x320_mode);
+ mode = drm_mode_duplicate(drm, &yuv_640x320_mode);
break;
case ILI9322_INPUT_YUV_720X360_YCBCR:
- mode = drm_mode_duplicate(panel->drm, &yuv_720x360_mode);
+ mode = drm_mode_duplicate(drm, &yuv_720x360_mode);
break;
case ILI9322_INPUT_ITU_R_BT656_720X360_YCBCR:
- mode = drm_mode_duplicate(panel->drm, &itu_r_bt_656_720_mode);
+ mode = drm_mode_duplicate(drm, &itu_r_bt_656_720_mode);
break;
case ILI9322_INPUT_ITU_R_BT656_640X320_YCBCR:
- mode = drm_mode_duplicate(panel->drm, &itu_r_bt_656_640_mode);
+ mode = drm_mode_duplicate(drm, &itu_r_bt_656_640_mode);
break;
default:
mode = NULL;
@@ -897,9 +896,8 @@ static int ili9322_probe(struct spi_device *spi)
ili->input = ili->conf->input;
}
- drm_panel_init(&ili->panel);
- ili->panel.dev = dev;
- ili->panel.funcs = &ili9322_drm_funcs;
+ drm_panel_init(&ili->panel, dev, &ili9322_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
return drm_panel_add(&ili->panel);
}
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
index 3ad4a46c4e94..f54077c216a3 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
@@ -3,7 +3,6 @@
* Copyright (C) 2017-2018, Bootlin
*/
-#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
@@ -25,7 +24,6 @@ struct ili9881c {
struct drm_panel panel;
struct mipi_dsi_device *dsi;
- struct backlight_device *backlight;
struct regulator *power;
struct gpio_desc *reset;
};
@@ -348,7 +346,6 @@ static int ili9881c_enable(struct drm_panel *panel)
msleep(120);
mipi_dsi_dcs_set_display_on(ctx->dsi);
- backlight_enable(ctx->backlight);
return 0;
}
@@ -357,7 +354,6 @@ static int ili9881c_disable(struct drm_panel *panel)
{
struct ili9881c *ctx = panel_to_ili9881c(panel);
- backlight_disable(ctx->backlight);
return mipi_dsi_dcs_set_display_off(ctx->dsi);
}
@@ -387,13 +383,13 @@ static const struct drm_display_mode bananapi_default_mode = {
.vtotal = 1280 + 10 + 10 + 20,
};
-static int ili9881c_get_modes(struct drm_panel *panel)
+static int ili9881c_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
- struct drm_connector *connector = panel->connector;
struct ili9881c *ctx = panel_to_ili9881c(panel);
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(panel->drm, &bananapi_default_mode);
+ mode = drm_mode_duplicate(connector->dev, &bananapi_default_mode);
if (!mode) {
dev_err(&ctx->dsi->dev, "failed to add mode %ux%ux@%u\n",
bananapi_default_mode.hdisplay,
@@ -407,8 +403,8 @@ static int ili9881c_get_modes(struct drm_panel *panel)
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
drm_mode_probed_add(connector, mode);
- panel->connector->display_info.width_mm = 62;
- panel->connector->display_info.height_mm = 110;
+ connector->display_info.width_mm = 62;
+ connector->display_info.height_mm = 110;
return 1;
}
@@ -423,7 +419,6 @@ static const struct drm_panel_funcs ili9881c_funcs = {
static int ili9881c_dsi_probe(struct mipi_dsi_device *dsi)
{
- struct device_node *np;
struct ili9881c *ctx;
int ret;
@@ -433,9 +428,8 @@ static int ili9881c_dsi_probe(struct mipi_dsi_device *dsi)
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dsi = dsi;
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = &dsi->dev;
- ctx->panel.funcs = &ili9881c_funcs;
+ drm_panel_init(&ctx->panel, &dsi->dev, &ili9881c_funcs,
+ DRM_MODE_CONNECTOR_DSI);
ctx->power = devm_regulator_get(&dsi->dev, "power");
if (IS_ERR(ctx->power)) {
@@ -449,14 +443,9 @@ static int ili9881c_dsi_probe(struct mipi_dsi_device *dsi)
return PTR_ERR(ctx->reset);
}
- np = of_parse_phandle(dsi->dev.of_node, "backlight", 0);
- if (np) {
- ctx->backlight = of_find_backlight_by_node(np);
- of_node_put(np);
-
- if (!ctx->backlight)
- return -EPROBE_DEFER;
- }
+ ret = drm_panel_of_backlight(&ctx->panel);
+ if (ret)
+ return ret;
ret = drm_panel_add(&ctx->panel);
if (ret < 0)
@@ -476,9 +465,6 @@ static int ili9881c_dsi_remove(struct mipi_dsi_device *dsi)
mipi_dsi_detach(dsi);
drm_panel_remove(&ctx->panel);
- if (ctx->backlight)
- put_device(&ctx->backlight->dev);
-
return 0;
}
diff --git a/drivers/gpu/drm/panel/panel-innolux-p079zca.c b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
index d92d1c98878c..7419f1f0acee 100644
--- a/drivers/gpu/drm/panel/panel-innolux-p079zca.c
+++ b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
@@ -3,7 +3,6 @@
* Copyright (c) 2017, Fuzhou Rockchip Electronics Co., Ltd
*/
-#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
@@ -52,7 +51,6 @@ struct innolux_panel {
struct mipi_dsi_device *link;
const struct panel_desc *desc;
- struct backlight_device *backlight;
struct regulator_bulk_data *supplies;
struct gpio_desc *enable_gpio;
@@ -72,8 +70,6 @@ static int innolux_panel_disable(struct drm_panel *panel)
if (!innolux->enabled)
return 0;
- backlight_disable(innolux->backlight);
-
innolux->enabled = false;
return 0;
@@ -204,18 +200,10 @@ poweroff:
static int innolux_panel_enable(struct drm_panel *panel)
{
struct innolux_panel *innolux = to_innolux_panel(panel);
- int ret;
if (innolux->enabled)
return 0;
- ret = backlight_enable(innolux->backlight);
- if (ret) {
- DRM_DEV_ERROR(panel->drm->dev,
- "Failed to enable backlight %d\n", ret);
- return ret;
- }
-
innolux->enabled = true;
return 0;
@@ -403,28 +391,27 @@ static const struct panel_desc innolux_p097pfg_panel_desc = {
.sleep_mode_delay = 100, /* T15 */
};
-static int innolux_panel_get_modes(struct drm_panel *panel)
+static int innolux_panel_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
struct innolux_panel *innolux = to_innolux_panel(panel);
const struct drm_display_mode *m = innolux->desc->mode;
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(panel->drm, m);
+ mode = drm_mode_duplicate(connector->dev, m);
if (!mode) {
- DRM_DEV_ERROR(panel->drm->dev, "failed to add mode %ux%ux@%u\n",
+ DRM_DEV_ERROR(panel->dev, "failed to add mode %ux%ux@%u\n",
m->hdisplay, m->vdisplay, m->vrefresh);
return -ENOMEM;
}
drm_mode_set_name(mode);
- drm_mode_probed_add(panel->connector, mode);
+ drm_mode_probed_add(connector, mode);
- panel->connector->display_info.width_mm =
- innolux->desc->size.width;
- panel->connector->display_info.height_mm =
- innolux->desc->size.height;
- panel->connector->display_info.bpc = innolux->desc->bpc;
+ connector->display_info.width_mm = innolux->desc->size.width;
+ connector->display_info.height_mm = innolux->desc->size.height;
+ connector->display_info.bpc = innolux->desc->bpc;
return 1;
}
@@ -483,13 +470,12 @@ static int innolux_panel_add(struct mipi_dsi_device *dsi,
innolux->enable_gpio = NULL;
}
- innolux->backlight = devm_of_find_backlight(dev);
- if (IS_ERR(innolux->backlight))
- return PTR_ERR(innolux->backlight);
+ drm_panel_init(&innolux->base, dev, &innolux_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
- drm_panel_init(&innolux->base);
- innolux->base.funcs = &innolux_panel_funcs;
- innolux->base.dev = dev;
+ err = drm_panel_of_backlight(&innolux->base);
+ if (err)
+ return err;
err = drm_panel_add(&innolux->base);
if (err < 0)
@@ -528,12 +514,12 @@ static int innolux_panel_remove(struct mipi_dsi_device *dsi)
struct innolux_panel *innolux = mipi_dsi_get_drvdata(dsi);
int err;
- err = innolux_panel_unprepare(&innolux->base);
+ err = drm_panel_unprepare(&innolux->base);
if (err < 0)
DRM_DEV_ERROR(&dsi->dev, "failed to unprepare panel: %d\n",
err);
- err = innolux_panel_disable(&innolux->base);
+ err = drm_panel_disable(&innolux->base);
if (err < 0)
DRM_DEV_ERROR(&dsi->dev, "failed to disable panel: %d\n", err);
@@ -551,8 +537,8 @@ static void innolux_panel_shutdown(struct mipi_dsi_device *dsi)
{
struct innolux_panel *innolux = mipi_dsi_get_drvdata(dsi);
- innolux_panel_unprepare(&innolux->base);
- innolux_panel_disable(&innolux->base);
+ drm_panel_unprepare(&innolux->base);
+ drm_panel_disable(&innolux->base);
}
static struct mipi_dsi_driver innolux_panel_driver = {
diff --git a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
index ff3e89e61e3f..4bfd8c877c8e 100644
--- a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
+++ b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
@@ -300,13 +300,14 @@ static const struct drm_display_mode default_mode = {
.flags = 0,
};
-static int jdi_panel_get_modes(struct drm_panel *panel)
+static int jdi_panel_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
struct drm_display_mode *mode;
struct jdi_panel *jdi = to_jdi_panel(panel);
struct device *dev = &jdi->dsi->dev;
- mode = drm_mode_duplicate(panel->drm, &default_mode);
+ mode = drm_mode_duplicate(connector->dev, &default_mode);
if (!mode) {
dev_err(dev, "failed to add mode %ux%ux@%u\n",
default_mode.hdisplay, default_mode.vdisplay,
@@ -316,10 +317,10 @@ static int jdi_panel_get_modes(struct drm_panel *panel)
drm_mode_set_name(mode);
- drm_mode_probed_add(panel->connector, mode);
+ drm_mode_probed_add(connector, mode);
- panel->connector->display_info.width_mm = 95;
- panel->connector->display_info.height_mm = 151;
+ connector->display_info.width_mm = 95;
+ connector->display_info.height_mm = 151;
return 1;
}
@@ -437,9 +438,8 @@ static int jdi_panel_add(struct jdi_panel *jdi)
return ret;
}
- drm_panel_init(&jdi->base);
- jdi->base.funcs = &jdi_panel_funcs;
- jdi->base.dev = &jdi->dsi->dev;
+ drm_panel_init(&jdi->base, &jdi->dsi->dev, &jdi_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
ret = drm_panel_add(&jdi->base);
diff --git a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
index 3ac04eb8d0fe..bac1a2a06c92 100644
--- a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
+++ b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
@@ -3,7 +3,6 @@
* Copyright (c) 2017, Fuzhou Rockchip Electronics Co., Ltd
*/
-#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
@@ -23,7 +22,6 @@ struct kingdisplay_panel {
struct drm_panel base;
struct mipi_dsi_device *link;
- struct backlight_device *backlight;
struct regulator *supply;
struct gpio_desc *enable_gpio;
@@ -191,8 +189,6 @@ static int kingdisplay_panel_disable(struct drm_panel *panel)
if (!kingdisplay->enabled)
return 0;
- backlight_disable(kingdisplay->backlight);
-
err = mipi_dsi_dcs_set_display_off(kingdisplay->link);
if (err < 0)
DRM_DEV_ERROR(panel->dev, "failed to set display off: %d\n",
@@ -303,18 +299,10 @@ poweroff:
static int kingdisplay_panel_enable(struct drm_panel *panel)
{
struct kingdisplay_panel *kingdisplay = to_kingdisplay_panel(panel);
- int ret;
if (kingdisplay->enabled)
return 0;
- ret = backlight_enable(kingdisplay->backlight);
- if (ret) {
- DRM_DEV_ERROR(panel->drm->dev,
- "Failed to enable backlight %d\n", ret);
- return ret;
- }
-
kingdisplay->enabled = true;
return 0;
@@ -333,13 +321,14 @@ static const struct drm_display_mode default_mode = {
.vrefresh = 60,
};
-static int kingdisplay_panel_get_modes(struct drm_panel *panel)
+static int kingdisplay_panel_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(panel->drm, &default_mode);
+ mode = drm_mode_duplicate(connector->dev, &default_mode);
if (!mode) {
- DRM_DEV_ERROR(panel->drm->dev, "failed to add mode %ux%ux@%u\n",
+ DRM_DEV_ERROR(panel->dev, "failed to add mode %ux%ux@%u\n",
default_mode.hdisplay, default_mode.vdisplay,
default_mode.vrefresh);
return -ENOMEM;
@@ -347,11 +336,11 @@ static int kingdisplay_panel_get_modes(struct drm_panel *panel)
drm_mode_set_name(mode);
- drm_mode_probed_add(panel->connector, mode);
+ drm_mode_probed_add(connector, mode);
- panel->connector->display_info.width_mm = 147;
- panel->connector->display_info.height_mm = 196;
- panel->connector->display_info.bpc = 8;
+ connector->display_info.width_mm = 147;
+ connector->display_info.height_mm = 196;
+ connector->display_info.bpc = 8;
return 1;
}
@@ -387,13 +376,12 @@ static int kingdisplay_panel_add(struct kingdisplay_panel *kingdisplay)
kingdisplay->enable_gpio = NULL;
}
- kingdisplay->backlight = devm_of_find_backlight(dev);
- if (IS_ERR(kingdisplay->backlight))
- return PTR_ERR(kingdisplay->backlight);
+ drm_panel_init(&kingdisplay->base, &kingdisplay->link->dev,
+ &kingdisplay_panel_funcs, DRM_MODE_CONNECTOR_DSI);
- drm_panel_init(&kingdisplay->base);
- kingdisplay->base.funcs = &kingdisplay_panel_funcs;
- kingdisplay->base.dev = &kingdisplay->link->dev;
+ err = drm_panel_of_backlight(&kingdisplay->base);
+ if (err)
+ return err;
return drm_panel_add(&kingdisplay->base);
}
@@ -432,12 +420,12 @@ static int kingdisplay_panel_remove(struct mipi_dsi_device *dsi)
struct kingdisplay_panel *kingdisplay = mipi_dsi_get_drvdata(dsi);
int err;
- err = kingdisplay_panel_unprepare(&kingdisplay->base);
+ err = drm_panel_unprepare(&kingdisplay->base);
if (err < 0)
DRM_DEV_ERROR(&dsi->dev, "failed to unprepare panel: %d\n",
err);
- err = kingdisplay_panel_disable(&kingdisplay->base);
+ err = drm_panel_disable(&kingdisplay->base);
if (err < 0)
DRM_DEV_ERROR(&dsi->dev, "failed to disable panel: %d\n", err);
@@ -455,8 +443,8 @@ static void kingdisplay_panel_shutdown(struct mipi_dsi_device *dsi)
{
struct kingdisplay_panel *kingdisplay = mipi_dsi_get_drvdata(dsi);
- kingdisplay_panel_unprepare(&kingdisplay->base);
- kingdisplay_panel_disable(&kingdisplay->base);
+ drm_panel_unprepare(&kingdisplay->base);
+ drm_panel_disable(&kingdisplay->base);
}
static struct mipi_dsi_driver kingdisplay_panel_driver = {
diff --git a/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c b/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c
new file mode 100644
index 000000000000..76ecf2de9c44
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c
@@ -0,0 +1,531 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2019 Theobroma Systems Design und Consulting GmbH
+ *
+ * base on panel-kingdisplay-kd097d04.c
+ * Copyright (c) 2017, Fuzhou Rockchip Electronics Co., Ltd
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_device.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+struct ltk500hd1829 {
+ struct device *dev;
+ struct drm_panel panel;
+ struct gpio_desc *reset_gpio;
+ struct regulator *vcc;
+ struct regulator *iovcc;
+ bool prepared;
+};
+
+struct ltk500hd1829_cmd {
+ char cmd;
+ char data;
+};
+
+/*
+ * There is no description in the Reference Manual about these commands.
+ * We received them from the vendor, so just use them as is.
+ */
+static const struct ltk500hd1829_cmd init_code[] = {
+ { 0xE0, 0x00 },
+ { 0xE1, 0x93 },
+ { 0xE2, 0x65 },
+ { 0xE3, 0xF8 },
+ { 0x80, 0x03 },
+ { 0xE0, 0x04 },
+ { 0x2D, 0x03 },
+ { 0xE0, 0x01 },
+ { 0x00, 0x00 },
+ { 0x01, 0xB6 },
+ { 0x03, 0x00 },
+ { 0x04, 0xC5 },
+ { 0x17, 0x00 },
+ { 0x18, 0xBF },
+ { 0x19, 0x01 },
+ { 0x1A, 0x00 },
+ { 0x1B, 0xBF },
+ { 0x1C, 0x01 },
+ { 0x1F, 0x7C },
+ { 0x20, 0x26 },
+ { 0x21, 0x26 },
+ { 0x22, 0x4E },
+ { 0x37, 0x09 },
+ { 0x38, 0x04 },
+ { 0x39, 0x08 },
+ { 0x3A, 0x1F },
+ { 0x3B, 0x1F },
+ { 0x3C, 0x78 },
+ { 0x3D, 0xFF },
+ { 0x3E, 0xFF },
+ { 0x3F, 0x00 },
+ { 0x40, 0x04 },
+ { 0x41, 0xA0 },
+ { 0x43, 0x0F },
+ { 0x44, 0x0A },
+ { 0x45, 0x24 },
+ { 0x55, 0x01 },
+ { 0x56, 0x01 },
+ { 0x57, 0xA5 },
+ { 0x58, 0x0A },
+ { 0x59, 0x4A },
+ { 0x5A, 0x38 },
+ { 0x5B, 0x10 },
+ { 0x5C, 0x19 },
+ { 0x5D, 0x7C },
+ { 0x5E, 0x64 },
+ { 0x5F, 0x54 },
+ { 0x60, 0x48 },
+ { 0x61, 0x44 },
+ { 0x62, 0x35 },
+ { 0x63, 0x3A },
+ { 0x64, 0x24 },
+ { 0x65, 0x3B },
+ { 0x66, 0x39 },
+ { 0x67, 0x37 },
+ { 0x68, 0x56 },
+ { 0x69, 0x41 },
+ { 0x6A, 0x47 },
+ { 0x6B, 0x2F },
+ { 0x6C, 0x23 },
+ { 0x6D, 0x13 },
+ { 0x6E, 0x02 },
+ { 0x6F, 0x08 },
+ { 0x70, 0x7C },
+ { 0x71, 0x64 },
+ { 0x72, 0x54 },
+ { 0x73, 0x48 },
+ { 0x74, 0x44 },
+ { 0x75, 0x35 },
+ { 0x76, 0x3A },
+ { 0x77, 0x22 },
+ { 0x78, 0x3B },
+ { 0x79, 0x39 },
+ { 0x7A, 0x38 },
+ { 0x7B, 0x52 },
+ { 0x7C, 0x41 },
+ { 0x7D, 0x47 },
+ { 0x7E, 0x2F },
+ { 0x7F, 0x23 },
+ { 0x80, 0x13 },
+ { 0x81, 0x02 },
+ { 0x82, 0x08 },
+ { 0xE0, 0x02 },
+ { 0x00, 0x57 },
+ { 0x01, 0x77 },
+ { 0x02, 0x44 },
+ { 0x03, 0x46 },
+ { 0x04, 0x48 },
+ { 0x05, 0x4A },
+ { 0x06, 0x4C },
+ { 0x07, 0x4E },
+ { 0x08, 0x50 },
+ { 0x09, 0x55 },
+ { 0x0A, 0x52 },
+ { 0x0B, 0x55 },
+ { 0x0C, 0x55 },
+ { 0x0D, 0x55 },
+ { 0x0E, 0x55 },
+ { 0x0F, 0x55 },
+ { 0x10, 0x55 },
+ { 0x11, 0x55 },
+ { 0x12, 0x55 },
+ { 0x13, 0x40 },
+ { 0x14, 0x55 },
+ { 0x15, 0x55 },
+ { 0x16, 0x57 },
+ { 0x17, 0x77 },
+ { 0x18, 0x45 },
+ { 0x19, 0x47 },
+ { 0x1A, 0x49 },
+ { 0x1B, 0x4B },
+ { 0x1C, 0x4D },
+ { 0x1D, 0x4F },
+ { 0x1E, 0x51 },
+ { 0x1F, 0x55 },
+ { 0x20, 0x53 },
+ { 0x21, 0x55 },
+ { 0x22, 0x55 },
+ { 0x23, 0x55 },
+ { 0x24, 0x55 },
+ { 0x25, 0x55 },
+ { 0x26, 0x55 },
+ { 0x27, 0x55 },
+ { 0x28, 0x55 },
+ { 0x29, 0x41 },
+ { 0x2A, 0x55 },
+ { 0x2B, 0x55 },
+ { 0x2C, 0x57 },
+ { 0x2D, 0x77 },
+ { 0x2E, 0x4F },
+ { 0x2F, 0x4D },
+ { 0x30, 0x4B },
+ { 0x31, 0x49 },
+ { 0x32, 0x47 },
+ { 0x33, 0x45 },
+ { 0x34, 0x41 },
+ { 0x35, 0x55 },
+ { 0x36, 0x53 },
+ { 0x37, 0x55 },
+ { 0x38, 0x55 },
+ { 0x39, 0x55 },
+ { 0x3A, 0x55 },
+ { 0x3B, 0x55 },
+ { 0x3C, 0x55 },
+ { 0x3D, 0x55 },
+ { 0x3E, 0x55 },
+ { 0x3F, 0x51 },
+ { 0x40, 0x55 },
+ { 0x41, 0x55 },
+ { 0x42, 0x57 },
+ { 0x43, 0x77 },
+ { 0x44, 0x4E },
+ { 0x45, 0x4C },
+ { 0x46, 0x4A },
+ { 0x47, 0x48 },
+ { 0x48, 0x46 },
+ { 0x49, 0x44 },
+ { 0x4A, 0x40 },
+ { 0x4B, 0x55 },
+ { 0x4C, 0x52 },
+ { 0x4D, 0x55 },
+ { 0x4E, 0x55 },
+ { 0x4F, 0x55 },
+ { 0x50, 0x55 },
+ { 0x51, 0x55 },
+ { 0x52, 0x55 },
+ { 0x53, 0x55 },
+ { 0x54, 0x55 },
+ { 0x55, 0x50 },
+ { 0x56, 0x55 },
+ { 0x57, 0x55 },
+ { 0x58, 0x40 },
+ { 0x59, 0x00 },
+ { 0x5A, 0x00 },
+ { 0x5B, 0x10 },
+ { 0x5C, 0x09 },
+ { 0x5D, 0x30 },
+ { 0x5E, 0x01 },
+ { 0x5F, 0x02 },
+ { 0x60, 0x30 },
+ { 0x61, 0x03 },
+ { 0x62, 0x04 },
+ { 0x63, 0x06 },
+ { 0x64, 0x6A },
+ { 0x65, 0x75 },
+ { 0x66, 0x0F },
+ { 0x67, 0xB3 },
+ { 0x68, 0x0B },
+ { 0x69, 0x06 },
+ { 0x6A, 0x6A },
+ { 0x6B, 0x10 },
+ { 0x6C, 0x00 },
+ { 0x6D, 0x04 },
+ { 0x6E, 0x04 },
+ { 0x6F, 0x88 },
+ { 0x70, 0x00 },
+ { 0x71, 0x00 },
+ { 0x72, 0x06 },
+ { 0x73, 0x7B },
+ { 0x74, 0x00 },
+ { 0x75, 0xBC },
+ { 0x76, 0x00 },
+ { 0x77, 0x05 },
+ { 0x78, 0x2E },
+ { 0x79, 0x00 },
+ { 0x7A, 0x00 },
+ { 0x7B, 0x00 },
+ { 0x7C, 0x00 },
+ { 0x7D, 0x03 },
+ { 0x7E, 0x7B },
+ { 0xE0, 0x04 },
+ { 0x09, 0x10 },
+ { 0x2B, 0x2B },
+ { 0x2E, 0x44 },
+ { 0xE0, 0x00 },
+ { 0xE6, 0x02 },
+ { 0xE7, 0x02 },
+ { 0x35, 0x00 },
+};
+
+static inline
+struct ltk500hd1829 *panel_to_ltk500hd1829(struct drm_panel *panel)
+{
+ return container_of(panel, struct ltk500hd1829, panel);
+}
+
+static int ltk500hd1829_unprepare(struct drm_panel *panel)
+{
+ struct ltk500hd1829 *ctx = panel_to_ltk500hd1829(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ int ret;
+
+ if (!ctx->prepared)
+ return 0;
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret < 0)
+ DRM_DEV_ERROR(panel->dev, "failed to set display off: %d\n",
+ ret);
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(panel->dev, "failed to enter sleep mode: %d\n",
+ ret);
+ }
+
+ /* 120ms to enter sleep mode */
+ msleep(120);
+
+ regulator_disable(ctx->iovcc);
+ regulator_disable(ctx->vcc);
+
+ ctx->prepared = false;
+
+ return 0;
+}
+
+static int ltk500hd1829_prepare(struct drm_panel *panel)
+{
+ struct ltk500hd1829 *ctx = panel_to_ltk500hd1829(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ unsigned int i;
+ int ret;
+
+ if (ctx->prepared)
+ return 0;
+
+ ret = regulator_enable(ctx->vcc);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev,
+ "Failed to enable vci supply: %d\n", ret);
+ return ret;
+ }
+ ret = regulator_enable(ctx->iovcc);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev,
+ "Failed to enable iovcc supply: %d\n", ret);
+ goto disable_vcc;
+ }
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ /* tRW: 10us */
+ usleep_range(10, 20);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+
+ /* tRT: >= 5ms */
+ usleep_range(5000, 6000);
+
+ for (i = 0; i < ARRAY_SIZE(init_code); i++) {
+ ret = mipi_dsi_generic_write(dsi, &init_code[i],
+ sizeof(struct ltk500hd1829_cmd));
+ if (ret < 0) {
+ DRM_DEV_ERROR(panel->dev,
+ "failed to write init cmds: %d\n", ret);
+ goto disable_iovcc;
+ }
+ }
+
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(panel->dev, "failed to exit sleep mode: %d\n",
+ ret);
+ goto disable_iovcc;
+ }
+
+ /* 120ms to exit sleep mode */
+ msleep(120);
+
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(panel->dev, "failed to set display on: %d\n",
+ ret);
+ goto disable_iovcc;
+ }
+
+ ctx->prepared = true;
+
+ return 0;
+
+disable_iovcc:
+ regulator_disable(ctx->iovcc);
+disable_vcc:
+ regulator_disable(ctx->vcc);
+ return ret;
+}
+
+static const struct drm_display_mode default_mode = {
+ .hdisplay = 720,
+ .hsync_start = 720 + 50,
+ .hsync_end = 720 + 50 + 50,
+ .htotal = 720 + 50 + 50 + 50,
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 30,
+ .vsync_end = 1280 + 30 + 4,
+ .vtotal = 1280 + 30 + 4 + 12,
+ .vrefresh = 60,
+ .clock = 41600,
+ .width_mm = 62,
+ .height_mm = 110,
+};
+
+static int ltk500hd1829_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct ltk500hd1829 *ctx = panel_to_ltk500hd1829(panel);
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &default_mode);
+ if (!mode) {
+ DRM_DEV_ERROR(ctx->dev, "failed to add mode %ux%ux@%u\n",
+ default_mode.hdisplay, default_mode.vdisplay,
+ default_mode.vrefresh);
+ return -ENOMEM;
+ }
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs ltk500hd1829_funcs = {
+ .unprepare = ltk500hd1829_unprepare,
+ .prepare = ltk500hd1829_prepare,
+ .get_modes = ltk500hd1829_get_modes,
+};
+
+static int ltk500hd1829_probe(struct mipi_dsi_device *dsi)
+{
+ struct ltk500hd1829 *ctx;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset_gpio)) {
+ DRM_DEV_ERROR(dev, "cannot get reset gpio\n");
+ return PTR_ERR(ctx->reset_gpio);
+ }
+
+ ctx->vcc = devm_regulator_get(dev, "vcc");
+ if (IS_ERR(ctx->vcc)) {
+ ret = PTR_ERR(ctx->vcc);
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev,
+ "Failed to request vcc regulator: %d\n",
+ ret);
+ return ret;
+ }
+
+ ctx->iovcc = devm_regulator_get(dev, "iovcc");
+ if (IS_ERR(ctx->iovcc)) {
+ ret = PTR_ERR(ctx->iovcc);
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev,
+ "Failed to request iovcc regulator: %d\n",
+ ret);
+ return ret;
+ }
+
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ ctx->dev = dev;
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_EOT_PACKET;
+
+ drm_panel_init(&ctx->panel, &dsi->dev, &ltk500hd1829_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ ret = drm_panel_of_backlight(&ctx->panel);
+ if (ret)
+ return ret;
+
+ drm_panel_add(&ctx->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "mipi_dsi_attach failed: %d\n", ret);
+ drm_panel_remove(&ctx->panel);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ltk500hd1829_shutdown(struct mipi_dsi_device *dsi)
+{
+ struct ltk500hd1829 *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = drm_panel_unprepare(&ctx->panel);
+ if (ret < 0)
+ DRM_DEV_ERROR(&dsi->dev, "Failed to unprepare panel: %d\n",
+ ret);
+
+ ret = drm_panel_disable(&ctx->panel);
+ if (ret < 0)
+ DRM_DEV_ERROR(&dsi->dev, "Failed to disable panel: %d\n",
+ ret);
+}
+
+static int ltk500hd1829_remove(struct mipi_dsi_device *dsi)
+{
+ struct ltk500hd1829 *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ltk500hd1829_shutdown(dsi);
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ DRM_DEV_ERROR(&dsi->dev, "failed to detach from DSI host: %d\n",
+ ret);
+
+ drm_panel_remove(&ctx->panel);
+
+ return 0;
+}
+
+static const struct of_device_id ltk500hd1829_of_match[] = {
+ { .compatible = "leadtek,ltk500hd1829", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ltk500hd1829_of_match);
+
+static struct mipi_dsi_driver ltk500hd1829_driver = {
+ .driver = {
+ .name = "panel-leadtek-ltk500hd1829",
+ .of_match_table = ltk500hd1829_of_match,
+ },
+ .probe = ltk500hd1829_probe,
+ .remove = ltk500hd1829_remove,
+ .shutdown = ltk500hd1829_shutdown,
+};
+module_mipi_dsi_driver(ltk500hd1829_driver);
+
+MODULE_AUTHOR("Heiko Stuebner <heiko.stuebner@theobroma-systems.com>");
+MODULE_DESCRIPTION("Leadtek LTK500HD1829 panel driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-lg-lb035q02.c b/drivers/gpu/drm/panel/panel-lg-lb035q02.c
new file mode 100644
index 000000000000..e90efeaba4ad
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-lg-lb035q02.c
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * LG.Philips LB035Q02 LCD Panel Driver
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated
+ *
+ * Based on the omapdrm-specific panel-lgphilips-lb035q02 driver
+ *
+ * Copyright (C) 2013 Texas Instruments Incorporated
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ *
+ * Based on a driver by: Steve Sakoman <steve@sakoman.com>
+ */
+
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+
+#include <drm/drm_connector.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+struct lb035q02_device {
+ struct drm_panel panel;
+
+ struct spi_device *spi;
+ struct gpio_desc *enable_gpio;
+};
+
+#define to_lb035q02_device(p) container_of(p, struct lb035q02_device, panel)
+
+static int lb035q02_write(struct lb035q02_device *lcd, u16 reg, u16 val)
+{
+ struct spi_message msg;
+ struct spi_transfer index_xfer = {
+ .len = 3,
+ .cs_change = 1,
+ };
+ struct spi_transfer value_xfer = {
+ .len = 3,
+ };
+ u8 buffer[16];
+
+ spi_message_init(&msg);
+
+ /* register index */
+ buffer[0] = 0x70;
+ buffer[1] = 0x00;
+ buffer[2] = reg & 0x7f;
+ index_xfer.tx_buf = buffer;
+ spi_message_add_tail(&index_xfer, &msg);
+
+ /* register value */
+ buffer[4] = 0x72;
+ buffer[5] = val >> 8;
+ buffer[6] = val;
+ value_xfer.tx_buf = buffer + 4;
+ spi_message_add_tail(&value_xfer, &msg);
+
+ return spi_sync(lcd->spi, &msg);
+}
+
+static int lb035q02_init(struct lb035q02_device *lcd)
+{
+ /* Init sequence from page 28 of the lb035q02 spec. */
+ static const struct {
+ u16 index;
+ u16 value;
+ } init_data[] = {
+ { 0x01, 0x6300 },
+ { 0x02, 0x0200 },
+ { 0x03, 0x0177 },
+ { 0x04, 0x04c7 },
+ { 0x05, 0xffc0 },
+ { 0x06, 0xe806 },
+ { 0x0a, 0x4008 },
+ { 0x0b, 0x0000 },
+ { 0x0d, 0x0030 },
+ { 0x0e, 0x2800 },
+ { 0x0f, 0x0000 },
+ { 0x16, 0x9f80 },
+ { 0x17, 0x0a0f },
+ { 0x1e, 0x00c1 },
+ { 0x30, 0x0300 },
+ { 0x31, 0x0007 },
+ { 0x32, 0x0000 },
+ { 0x33, 0x0000 },
+ { 0x34, 0x0707 },
+ { 0x35, 0x0004 },
+ { 0x36, 0x0302 },
+ { 0x37, 0x0202 },
+ { 0x3a, 0x0a0d },
+ { 0x3b, 0x0806 },
+ };
+
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(init_data); ++i) {
+ ret = lb035q02_write(lcd, init_data[i].index,
+ init_data[i].value);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int lb035q02_disable(struct drm_panel *panel)
+{
+ struct lb035q02_device *lcd = to_lb035q02_device(panel);
+
+ gpiod_set_value_cansleep(lcd->enable_gpio, 0);
+
+ return 0;
+}
+
+static int lb035q02_enable(struct drm_panel *panel)
+{
+ struct lb035q02_device *lcd = to_lb035q02_device(panel);
+
+ gpiod_set_value_cansleep(lcd->enable_gpio, 1);
+
+ return 0;
+}
+
+static const struct drm_display_mode lb035q02_mode = {
+ .clock = 6500,
+ .hdisplay = 320,
+ .hsync_start = 320 + 20,
+ .hsync_end = 320 + 20 + 2,
+ .htotal = 320 + 20 + 2 + 68,
+ .vdisplay = 240,
+ .vsync_start = 240 + 4,
+ .vsync_end = 240 + 4 + 2,
+ .vtotal = 240 + 4 + 2 + 18,
+ .vrefresh = 60,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ .width_mm = 70,
+ .height_mm = 53,
+};
+
+static int lb035q02_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &lb035q02_mode);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+ drm_mode_probed_add(connector, mode);
+
+ connector->display_info.width_mm = lb035q02_mode.width_mm;
+ connector->display_info.height_mm = lb035q02_mode.height_mm;
+ /*
+ * FIXME: According to the datasheet pixel data is sampled on the
+ * rising edge of the clock, but the code running on the Gumstix Overo
+ * Palo35 indicates sampling on the negative edge. This should be
+ * tested on a real device.
+ */
+ connector->display_info.bus_flags = DRM_BUS_FLAG_DE_HIGH
+ | DRM_BUS_FLAG_SYNC_SAMPLE_POSEDGE
+ | DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE;
+
+ return 1;
+}
+
+static const struct drm_panel_funcs lb035q02_funcs = {
+ .disable = lb035q02_disable,
+ .enable = lb035q02_enable,
+ .get_modes = lb035q02_get_modes,
+};
+
+static int lb035q02_probe(struct spi_device *spi)
+{
+ struct lb035q02_device *lcd;
+ int ret;
+
+ lcd = devm_kzalloc(&spi->dev, sizeof(*lcd), GFP_KERNEL);
+ if (!lcd)
+ return -ENOMEM;
+
+ spi_set_drvdata(spi, lcd);
+ lcd->spi = spi;
+
+ lcd->enable_gpio = devm_gpiod_get(&spi->dev, "enable", GPIOD_OUT_LOW);
+ if (IS_ERR(lcd->enable_gpio)) {
+ dev_err(&spi->dev, "failed to parse enable gpio\n");
+ return PTR_ERR(lcd->enable_gpio);
+ }
+
+ ret = lb035q02_init(lcd);
+ if (ret < 0)
+ return ret;
+
+ drm_panel_init(&lcd->panel, &lcd->spi->dev, &lb035q02_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+
+ return drm_panel_add(&lcd->panel);
+}
+
+static int lb035q02_remove(struct spi_device *spi)
+{
+ struct lb035q02_device *lcd = spi_get_drvdata(spi);
+
+ drm_panel_remove(&lcd->panel);
+ drm_panel_disable(&lcd->panel);
+
+ return 0;
+}
+
+static const struct of_device_id lb035q02_of_match[] = {
+ { .compatible = "lgphilips,lb035q02", },
+ { /* sentinel */ },
+};
+
+MODULE_DEVICE_TABLE(of, lb035q02_of_match);
+
+static const struct spi_device_id lb035q02_ids[] = {
+ { "lb035q02", 0 },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(spi, lb035q02_ids);
+
+static struct spi_driver lb035q02_driver = {
+ .probe = lb035q02_probe,
+ .remove = lb035q02_remove,
+ .id_table = lb035q02_ids,
+ .driver = {
+ .name = "panel-lg-lb035q02",
+ .of_match_table = lb035q02_of_match,
+ },
+};
+
+module_spi_driver(lb035q02_driver);
+
+MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
+MODULE_DESCRIPTION("LG.Philips LB035Q02 LCD Panel driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-lg-lg4573.c b/drivers/gpu/drm/panel/panel-lg-lg4573.c
index 41bf02d122a1..b262b53dbd85 100644
--- a/drivers/gpu/drm/panel/panel-lg-lg4573.c
+++ b/drivers/gpu/drm/panel/panel-lg-lg4573.c
@@ -42,7 +42,7 @@ static int lg4573_spi_write_u16(struct lg4573 *ctx, u16 data)
struct spi_transfer xfer = {
.len = 2,
};
- u16 temp = cpu_to_be16(data);
+ __be16 temp = cpu_to_be16(data);
struct spi_message msg;
dev_dbg(ctx->panel.dev, "writing data: %x\n", data);
@@ -209,14 +209,14 @@ static const struct drm_display_mode default_mode = {
.vrefresh = 60,
};
-static int lg4573_get_modes(struct drm_panel *panel)
+static int lg4573_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
- struct drm_connector *connector = panel->connector;
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(panel->drm, &default_mode);
+ mode = drm_mode_duplicate(connector->dev, &default_mode);
if (!mode) {
- dev_err(panel->drm->dev, "failed to add mode %ux%ux@%u\n",
+ dev_err(panel->dev, "failed to add mode %ux%ux@%u\n",
default_mode.hdisplay, default_mode.vdisplay,
default_mode.vrefresh);
return -ENOMEM;
@@ -227,8 +227,8 @@ static int lg4573_get_modes(struct drm_panel *panel)
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
drm_mode_probed_add(connector, mode);
- panel->connector->display_info.width_mm = 61;
- panel->connector->display_info.height_mm = 103;
+ connector->display_info.width_mm = 61;
+ connector->display_info.height_mm = 103;
return 1;
}
@@ -259,9 +259,8 @@ static int lg4573_probe(struct spi_device *spi)
return ret;
}
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = &spi->dev;
- ctx->panel.funcs = &lg4573_drm_funcs;
+ drm_panel_init(&ctx->panel, &spi->dev, &lg4573_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
return drm_panel_add(&ctx->panel);
}
diff --git a/drivers/gpu/drm/panel/panel-lvds.c b/drivers/gpu/drm/panel/panel-lvds.c
index 1ec57d0806a8..5ce3f4a2b7a1 100644
--- a/drivers/gpu/drm/panel/panel-lvds.c
+++ b/drivers/gpu/drm/panel/panel-lvds.c
@@ -8,7 +8,6 @@
* Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*/
-#include <linux/backlight.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/of_platform.h>
@@ -34,7 +33,6 @@ struct panel_lvds {
unsigned int bus_format;
bool data_mirror;
- struct backlight_device *backlight;
struct regulator *supply;
struct gpio_desc *enable_gpio;
@@ -46,19 +44,6 @@ static inline struct panel_lvds *to_panel_lvds(struct drm_panel *panel)
return container_of(panel, struct panel_lvds, panel);
}
-static int panel_lvds_disable(struct drm_panel *panel)
-{
- struct panel_lvds *lvds = to_panel_lvds(panel);
-
- if (lvds->backlight) {
- lvds->backlight->props.power = FB_BLANK_POWERDOWN;
- lvds->backlight->props.state |= BL_CORE_FBBLANK;
- backlight_update_status(lvds->backlight);
- }
-
- return 0;
-}
-
static int panel_lvds_unprepare(struct drm_panel *panel)
{
struct panel_lvds *lvds = to_panel_lvds(panel);
@@ -93,26 +78,13 @@ static int panel_lvds_prepare(struct drm_panel *panel)
return 0;
}
-static int panel_lvds_enable(struct drm_panel *panel)
+static int panel_lvds_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
struct panel_lvds *lvds = to_panel_lvds(panel);
-
- if (lvds->backlight) {
- lvds->backlight->props.state &= ~BL_CORE_FBBLANK;
- lvds->backlight->props.power = FB_BLANK_UNBLANK;
- backlight_update_status(lvds->backlight);
- }
-
- return 0;
-}
-
-static int panel_lvds_get_modes(struct drm_panel *panel)
-{
- struct panel_lvds *lvds = to_panel_lvds(panel);
- struct drm_connector *connector = lvds->panel.connector;
struct drm_display_mode *mode;
- mode = drm_mode_create(lvds->panel.drm);
+ mode = drm_mode_create(connector->dev);
if (!mode)
return 0;
@@ -132,10 +104,8 @@ static int panel_lvds_get_modes(struct drm_panel *panel)
}
static const struct drm_panel_funcs panel_lvds_funcs = {
- .disable = panel_lvds_disable,
.unprepare = panel_lvds_unprepare,
.prepare = panel_lvds_prepare,
- .enable = panel_lvds_enable,
.get_modes = panel_lvds_get_modes,
};
@@ -147,8 +117,11 @@ static int panel_lvds_parse_dt(struct panel_lvds *lvds)
int ret;
ret = of_get_display_timing(np, "panel-timing", &timing);
- if (ret < 0)
+ if (ret < 0) {
+ dev_err(lvds->dev, "%pOF: problems parsing panel-timing (%d)\n",
+ np, ret);
return ret;
+ }
videomode_from_timing(&timing, &lvds->video_mode);
@@ -194,7 +167,6 @@ static int panel_lvds_parse_dt(struct panel_lvds *lvds)
static int panel_lvds_probe(struct platform_device *pdev)
{
struct panel_lvds *lvds;
- struct device_node *np;
int ret;
lvds = devm_kzalloc(&pdev->dev, sizeof(*lvds), GFP_KERNEL);
@@ -240,15 +212,6 @@ static int panel_lvds_probe(struct platform_device *pdev)
return ret;
}
- np = of_parse_phandle(lvds->dev->of_node, "backlight", 0);
- if (np) {
- lvds->backlight = of_find_backlight_by_node(np);
- of_node_put(np);
-
- if (!lvds->backlight)
- return -EPROBE_DEFER;
- }
-
/*
* TODO: Handle all power supplies specified in the DT node in a generic
* way for panels that don't care about power supply ordering. LVDS
@@ -257,20 +220,19 @@ static int panel_lvds_probe(struct platform_device *pdev)
*/
/* Register the panel. */
- drm_panel_init(&lvds->panel);
- lvds->panel.dev = lvds->dev;
- lvds->panel.funcs = &panel_lvds_funcs;
+ drm_panel_init(&lvds->panel, lvds->dev, &panel_lvds_funcs,
+ DRM_MODE_CONNECTOR_LVDS);
+
+ ret = drm_panel_of_backlight(&lvds->panel);
+ if (ret)
+ return ret;
ret = drm_panel_add(&lvds->panel);
if (ret < 0)
- goto error;
+ return ret;
dev_set_drvdata(lvds->dev, lvds);
return 0;
-
-error:
- put_device(&lvds->backlight->dev);
- return ret;
}
static int panel_lvds_remove(struct platform_device *pdev)
@@ -279,10 +241,7 @@ static int panel_lvds_remove(struct platform_device *pdev)
drm_panel_remove(&lvds->panel);
- panel_lvds_disable(&lvds->panel);
-
- if (lvds->backlight)
- put_device(&lvds->backlight->dev);
+ drm_panel_disable(&lvds->panel);
return 0;
}
diff --git a/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c b/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c
new file mode 100644
index 000000000000..c4f83f6384e1
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c
@@ -0,0 +1,254 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * NEC NL8048HL11 Panel Driver
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated
+ *
+ * Based on the omapdrm-specific panel-nec-nl8048hl11 driver
+ *
+ * Copyright (C) 2010 Texas Instruments Incorporated
+ * Author: Erik Gilling <konkers@android.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/spi/spi.h>
+
+#include <drm/drm_connector.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+struct nl8048_panel {
+ struct drm_panel panel;
+
+ struct spi_device *spi;
+ struct gpio_desc *reset_gpio;
+};
+
+#define to_nl8048_device(p) container_of(p, struct nl8048_panel, panel)
+
+static int nl8048_write(struct nl8048_panel *lcd, unsigned char addr,
+ unsigned char value)
+{
+ u8 data[4] = { value, 0x01, addr, 0x00 };
+ int ret;
+
+ ret = spi_write(lcd->spi, data, sizeof(data));
+ if (ret)
+ dev_err(&lcd->spi->dev, "SPI write to %u failed: %d\n",
+ addr, ret);
+
+ return ret;
+}
+
+static int nl8048_init(struct nl8048_panel *lcd)
+{
+ static const struct {
+ unsigned char addr;
+ unsigned char data;
+ } nl8048_init_seq[] = {
+ { 3, 0x01 }, { 0, 0x00 }, { 1, 0x01 }, { 4, 0x00 },
+ { 5, 0x14 }, { 6, 0x24 }, { 16, 0xd7 }, { 17, 0x00 },
+ { 18, 0x00 }, { 19, 0x55 }, { 20, 0x01 }, { 21, 0x70 },
+ { 22, 0x1e }, { 23, 0x25 }, { 24, 0x25 }, { 25, 0x02 },
+ { 26, 0x02 }, { 27, 0xa0 }, { 32, 0x2f }, { 33, 0x0f },
+ { 34, 0x0f }, { 35, 0x0f }, { 36, 0x0f }, { 37, 0x0f },
+ { 38, 0x0f }, { 39, 0x00 }, { 40, 0x02 }, { 41, 0x02 },
+ { 42, 0x02 }, { 43, 0x0f }, { 44, 0x0f }, { 45, 0x0f },
+ { 46, 0x0f }, { 47, 0x0f }, { 48, 0x0f }, { 49, 0x0f },
+ { 50, 0x00 }, { 51, 0x02 }, { 52, 0x02 }, { 53, 0x02 },
+ { 80, 0x0c }, { 83, 0x42 }, { 84, 0x42 }, { 85, 0x41 },
+ { 86, 0x14 }, { 89, 0x88 }, { 90, 0x01 }, { 91, 0x00 },
+ { 92, 0x02 }, { 93, 0x0c }, { 94, 0x1c }, { 95, 0x27 },
+ { 98, 0x49 }, { 99, 0x27 }, { 102, 0x76 }, { 103, 0x27 },
+ { 112, 0x01 }, { 113, 0x0e }, { 114, 0x02 }, { 115, 0x0c },
+ { 118, 0x0c }, { 121, 0x30 }, { 130, 0x00 }, { 131, 0x00 },
+ { 132, 0xfc }, { 134, 0x00 }, { 136, 0x00 }, { 138, 0x00 },
+ { 139, 0x00 }, { 140, 0x00 }, { 141, 0xfc }, { 143, 0x00 },
+ { 145, 0x00 }, { 147, 0x00 }, { 148, 0x00 }, { 149, 0x00 },
+ { 150, 0xfc }, { 152, 0x00 }, { 154, 0x00 }, { 156, 0x00 },
+ { 157, 0x00 },
+ };
+
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(nl8048_init_seq); ++i) {
+ ret = nl8048_write(lcd, nl8048_init_seq[i].addr,
+ nl8048_init_seq[i].data);
+ if (ret < 0)
+ return ret;
+ }
+
+ udelay(20);
+
+ return nl8048_write(lcd, 2, 0x00);
+}
+
+static int nl8048_disable(struct drm_panel *panel)
+{
+ struct nl8048_panel *lcd = to_nl8048_device(panel);
+
+ gpiod_set_value_cansleep(lcd->reset_gpio, 0);
+
+ return 0;
+}
+
+static int nl8048_enable(struct drm_panel *panel)
+{
+ struct nl8048_panel *lcd = to_nl8048_device(panel);
+
+ gpiod_set_value_cansleep(lcd->reset_gpio, 1);
+
+ return 0;
+}
+
+static const struct drm_display_mode nl8048_mode = {
+ /* NEC PIX Clock Ratings MIN:21.8MHz TYP:23.8MHz MAX:25.7MHz */
+ .clock = 23800,
+ .hdisplay = 800,
+ .hsync_start = 800 + 6,
+ .hsync_end = 800 + 6 + 1,
+ .htotal = 800 + 6 + 1 + 4,
+ .vdisplay = 480,
+ .vsync_start = 480 + 3,
+ .vsync_end = 480 + 3 + 1,
+ .vtotal = 480 + 3 + 1 + 4,
+ .vrefresh = 60,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ .width_mm = 89,
+ .height_mm = 53,
+};
+
+static int nl8048_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &nl8048_mode);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+ drm_mode_probed_add(connector, mode);
+
+ connector->display_info.width_mm = nl8048_mode.width_mm;
+ connector->display_info.height_mm = nl8048_mode.height_mm;
+ connector->display_info.bus_flags = DRM_BUS_FLAG_DE_HIGH
+ | DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE
+ | DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE;
+
+ return 1;
+}
+
+static const struct drm_panel_funcs nl8048_funcs = {
+ .disable = nl8048_disable,
+ .enable = nl8048_enable,
+ .get_modes = nl8048_get_modes,
+};
+
+static int __maybe_unused nl8048_suspend(struct device *dev)
+{
+ struct nl8048_panel *lcd = dev_get_drvdata(dev);
+
+ nl8048_write(lcd, 2, 0x01);
+ msleep(40);
+
+ return 0;
+}
+
+static int __maybe_unused nl8048_resume(struct device *dev)
+{
+ struct nl8048_panel *lcd = dev_get_drvdata(dev);
+
+ /* Reinitialize the panel. */
+ spi_setup(lcd->spi);
+ nl8048_write(lcd, 2, 0x00);
+ nl8048_init(lcd);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(nl8048_pm_ops, nl8048_suspend, nl8048_resume);
+
+static int nl8048_probe(struct spi_device *spi)
+{
+ struct nl8048_panel *lcd;
+ int ret;
+
+ lcd = devm_kzalloc(&spi->dev, sizeof(*lcd), GFP_KERNEL);
+ if (!lcd)
+ return -ENOMEM;
+
+ spi_set_drvdata(spi, lcd);
+ lcd->spi = spi;
+
+ lcd->reset_gpio = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(lcd->reset_gpio)) {
+ dev_err(&spi->dev, "failed to parse reset gpio\n");
+ return PTR_ERR(lcd->reset_gpio);
+ }
+
+ spi->mode = SPI_MODE_0;
+ spi->bits_per_word = 32;
+
+ ret = spi_setup(spi);
+ if (ret < 0) {
+ dev_err(&spi->dev, "failed to setup SPI: %d\n", ret);
+ return ret;
+ }
+
+ ret = nl8048_init(lcd);
+ if (ret < 0)
+ return ret;
+
+ drm_panel_init(&lcd->panel, &lcd->spi->dev, &nl8048_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+
+ return drm_panel_add(&lcd->panel);
+}
+
+static int nl8048_remove(struct spi_device *spi)
+{
+ struct nl8048_panel *lcd = spi_get_drvdata(spi);
+
+ drm_panel_remove(&lcd->panel);
+ drm_panel_disable(&lcd->panel);
+ drm_panel_unprepare(&lcd->panel);
+
+ return 0;
+}
+
+static const struct of_device_id nl8048_of_match[] = {
+ { .compatible = "nec,nl8048hl11", },
+ { /* sentinel */ },
+};
+
+MODULE_DEVICE_TABLE(of, nl8048_of_match);
+
+static const struct spi_device_id nl8048_ids[] = {
+ { "nl8048hl11", 0 },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(spi, nl8048_ids);
+
+static struct spi_driver nl8048_driver = {
+ .probe = nl8048_probe,
+ .remove = nl8048_remove,
+ .id_table = nl8048_ids,
+ .driver = {
+ .name = "panel-nec-nl8048hl11",
+ .pm = &nl8048_pm_ops,
+ .of_match_table = nl8048_of_match,
+ },
+};
+
+module_spi_driver(nl8048_driver);
+
+MODULE_AUTHOR("Erik Gilling <konkers@android.com>");
+MODULE_DESCRIPTION("NEC-NL8048HL11 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt39016.c b/drivers/gpu/drm/panel/panel-novatek-nt39016.c
new file mode 100644
index 000000000000..a470810f7dbe
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-novatek-nt39016.c
@@ -0,0 +1,358 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Novatek NT39016 TFT LCD panel driver
+ *
+ * Copyright (C) 2017, Maarten ter Huurne <maarten@treewalker.org>
+ * Copyright (C) 2019, Paul Cercueil <paul@crapouillou.net>
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/media-bus-format.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+enum nt39016_regs {
+ NT39016_REG_SYSTEM,
+ NT39016_REG_TIMING,
+ NT39016_REG_OP,
+ NT39016_REG_DATA_IN,
+ NT39016_REG_SRC_TIMING_DELAY,
+ NT39016_REG_GATE_TIMING_DELAY,
+ NT39016_REG_RESERVED,
+ NT39016_REG_INITIAL_FUNC,
+ NT39016_REG_CONTRAST,
+ NT39016_REG_BRIGHTNESS,
+ NT39016_REG_HUE_SATURATION,
+ NT39016_REG_RB_SUBCONTRAST,
+ NT39016_REG_R_SUBBRIGHTNESS,
+ NT39016_REG_B_SUBBRIGHTNESS,
+ NT39016_REG_VCOMDC,
+ NT39016_REG_VCOMAC,
+ NT39016_REG_VGAM2,
+ NT39016_REG_VGAM34,
+ NT39016_REG_VGAM56,
+ NT39016_REG_VCOMDC_TRIM = 0x1e,
+ NT39016_REG_DISPLAY_MODE = 0x20,
+};
+
+#define NT39016_SYSTEM_RESET_N BIT(0)
+#define NT39016_SYSTEM_STANDBY BIT(1)
+
+struct nt39016_panel_info {
+ struct drm_display_mode display_mode;
+ u16 width_mm, height_mm;
+ u32 bus_format, bus_flags;
+};
+
+struct nt39016 {
+ struct drm_panel drm_panel;
+ struct device *dev;
+ struct regmap *map;
+ struct regulator *supply;
+ const struct nt39016_panel_info *panel_info;
+
+ struct gpio_desc *reset_gpio;
+
+ struct backlight_device *backlight;
+};
+
+static inline struct nt39016 *to_nt39016(struct drm_panel *panel)
+{
+ return container_of(panel, struct nt39016, drm_panel);
+}
+
+#define RV(REG, VAL) { .reg = (REG), .def = (VAL), .delay_us = 2 }
+static const struct reg_sequence nt39016_panel_regs[] = {
+ RV(NT39016_REG_SYSTEM, 0x00),
+ RV(NT39016_REG_TIMING, 0x00),
+ RV(NT39016_REG_OP, 0x03),
+ RV(NT39016_REG_DATA_IN, 0xCC),
+ RV(NT39016_REG_SRC_TIMING_DELAY, 0x46),
+ RV(NT39016_REG_GATE_TIMING_DELAY, 0x05),
+ RV(NT39016_REG_RESERVED, 0x00),
+ RV(NT39016_REG_INITIAL_FUNC, 0x00),
+ RV(NT39016_REG_CONTRAST, 0x08),
+ RV(NT39016_REG_BRIGHTNESS, 0x40),
+ RV(NT39016_REG_HUE_SATURATION, 0x88),
+ RV(NT39016_REG_RB_SUBCONTRAST, 0x88),
+ RV(NT39016_REG_R_SUBBRIGHTNESS, 0x20),
+ RV(NT39016_REG_B_SUBBRIGHTNESS, 0x20),
+ RV(NT39016_REG_VCOMDC, 0x67),
+ RV(NT39016_REG_VCOMAC, 0xA4),
+ RV(NT39016_REG_VGAM2, 0x04),
+ RV(NT39016_REG_VGAM34, 0x24),
+ RV(NT39016_REG_VGAM56, 0x24),
+ RV(NT39016_REG_DISPLAY_MODE, 0x00),
+};
+
+#undef RV
+
+static const struct regmap_range nt39016_regmap_no_ranges[] = {
+ regmap_reg_range(0x13, 0x1D),
+ regmap_reg_range(0x1F, 0x1F),
+};
+
+static const struct regmap_access_table nt39016_regmap_access_table = {
+ .no_ranges = nt39016_regmap_no_ranges,
+ .n_no_ranges = ARRAY_SIZE(nt39016_regmap_no_ranges),
+};
+
+static const struct regmap_config nt39016_regmap_config = {
+ .reg_bits = 6,
+ .pad_bits = 2,
+ .val_bits = 8,
+
+ .max_register = NT39016_REG_DISPLAY_MODE,
+ .wr_table = &nt39016_regmap_access_table,
+ .write_flag_mask = 0x02,
+
+ .cache_type = REGCACHE_FLAT,
+};
+
+static int nt39016_prepare(struct drm_panel *drm_panel)
+{
+ struct nt39016 *panel = to_nt39016(drm_panel);
+ int err;
+
+ err = regulator_enable(panel->supply);
+ if (err) {
+ dev_err(panel->dev, "Failed to enable power supply: %d", err);
+ return err;
+ }
+
+ /*
+ * Reset the NT39016.
+ * The documentation says the reset pulse should be at least 40 us to
+ * pass the glitch filter, but when testing I see some resets fail and
+ * some succeed when using a 70 us delay, so we use 100 us instead.
+ */
+ gpiod_set_value_cansleep(panel->reset_gpio, 1);
+ usleep_range(100, 1000);
+ gpiod_set_value_cansleep(panel->reset_gpio, 0);
+ udelay(2);
+
+ /* Init all registers. */
+ err = regmap_multi_reg_write(panel->map, nt39016_panel_regs,
+ ARRAY_SIZE(nt39016_panel_regs));
+ if (err) {
+ dev_err(panel->dev, "Failed to init registers: %d", err);
+ goto err_disable_regulator;
+ }
+
+ return 0;
+
+err_disable_regulator:
+ regulator_disable(panel->supply);
+ return err;
+}
+
+static int nt39016_unprepare(struct drm_panel *drm_panel)
+{
+ struct nt39016 *panel = to_nt39016(drm_panel);
+
+ gpiod_set_value_cansleep(panel->reset_gpio, 1);
+
+ regulator_disable(panel->supply);
+
+ return 0;
+}
+
+static int nt39016_enable(struct drm_panel *drm_panel)
+{
+ struct nt39016 *panel = to_nt39016(drm_panel);
+ int ret;
+
+ ret = regmap_write(panel->map, NT39016_REG_SYSTEM,
+ NT39016_SYSTEM_RESET_N | NT39016_SYSTEM_STANDBY);
+ if (ret) {
+ dev_err(panel->dev, "Unable to enable panel: %d", ret);
+ return ret;
+ }
+
+ if (panel->backlight) {
+ /* Wait for the picture to be ready before enabling backlight */
+ msleep(150);
+
+ ret = backlight_enable(panel->backlight);
+ }
+
+ return ret;
+}
+
+static int nt39016_disable(struct drm_panel *drm_panel)
+{
+ struct nt39016 *panel = to_nt39016(drm_panel);
+ int err;
+
+ backlight_disable(panel->backlight);
+
+ err = regmap_write(panel->map, NT39016_REG_SYSTEM,
+ NT39016_SYSTEM_RESET_N);
+ if (err) {
+ dev_err(panel->dev, "Unable to disable panel: %d", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int nt39016_get_modes(struct drm_panel *drm_panel,
+ struct drm_connector *connector)
+{
+ struct nt39016 *panel = to_nt39016(drm_panel);
+ const struct nt39016_panel_info *panel_info = panel->panel_info;
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &panel_info->display_mode);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_probed_add(connector, mode);
+
+ connector->display_info.bpc = 8;
+ connector->display_info.width_mm = panel_info->width_mm;
+ connector->display_info.height_mm = panel_info->height_mm;
+
+ drm_display_info_set_bus_formats(&connector->display_info,
+ &panel_info->bus_format, 1);
+ connector->display_info.bus_flags = panel_info->bus_flags;
+
+ return 1;
+}
+
+static const struct drm_panel_funcs nt39016_funcs = {
+ .prepare = nt39016_prepare,
+ .unprepare = nt39016_unprepare,
+ .enable = nt39016_enable,
+ .disable = nt39016_disable,
+ .get_modes = nt39016_get_modes,
+};
+
+static int nt39016_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct nt39016 *panel;
+ int err;
+
+ panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL);
+ if (!panel)
+ return -ENOMEM;
+
+ panel->dev = dev;
+ spi_set_drvdata(spi, panel);
+
+ panel->panel_info = of_device_get_match_data(dev);
+ if (!panel->panel_info)
+ return -EINVAL;
+
+ panel->supply = devm_regulator_get(dev, "power");
+ if (IS_ERR(panel->supply)) {
+ dev_err(dev, "Failed to get power supply");
+ return PTR_ERR(panel->supply);
+ }
+
+ panel->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(panel->reset_gpio)) {
+ dev_err(dev, "Failed to get reset GPIO");
+ return PTR_ERR(panel->reset_gpio);
+ }
+
+ spi->bits_per_word = 8;
+ spi->mode = SPI_MODE_3 | SPI_3WIRE;
+ err = spi_setup(spi);
+ if (err) {
+ dev_err(dev, "Failed to setup SPI");
+ return err;
+ }
+
+ panel->map = devm_regmap_init_spi(spi, &nt39016_regmap_config);
+ if (IS_ERR(panel->map)) {
+ dev_err(dev, "Failed to init regmap");
+ return PTR_ERR(panel->map);
+ }
+
+ panel->backlight = devm_of_find_backlight(dev);
+ if (IS_ERR(panel->backlight)) {
+ err = PTR_ERR(panel->backlight);
+ if (err != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get backlight handle");
+ return err;
+ }
+
+ drm_panel_init(&panel->drm_panel, dev, &nt39016_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+
+ err = drm_panel_add(&panel->drm_panel);
+ if (err < 0) {
+ dev_err(dev, "Failed to register panel");
+ return err;
+ }
+
+ return 0;
+}
+
+static int nt39016_remove(struct spi_device *spi)
+{
+ struct nt39016 *panel = spi_get_drvdata(spi);
+
+ drm_panel_remove(&panel->drm_panel);
+
+ nt39016_disable(&panel->drm_panel);
+ nt39016_unprepare(&panel->drm_panel);
+
+ return 0;
+}
+
+static const struct nt39016_panel_info kd035g6_info = {
+ .display_mode = {
+ .clock = 6000,
+ .hdisplay = 320,
+ .hsync_start = 320 + 10,
+ .hsync_end = 320 + 10 + 50,
+ .htotal = 320 + 10 + 50 + 20,
+ .vdisplay = 240,
+ .vsync_start = 240 + 5,
+ .vsync_end = 240 + 5 + 1,
+ .vtotal = 240 + 5 + 1 + 4,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ },
+ .width_mm = 71,
+ .height_mm = 53,
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_PIXDATA_NEGEDGE,
+};
+
+static const struct of_device_id nt39016_of_match[] = {
+ { .compatible = "kingdisplay,kd035g6-54nt", .data = &kd035g6_info },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, nt39016_of_match);
+
+static struct spi_driver nt39016_driver = {
+ .driver = {
+ .name = "nt39016",
+ .of_match_table = nt39016_of_match,
+ },
+ .probe = nt39016_probe,
+ .remove = nt39016_remove,
+};
+
+module_spi_driver(nt39016_driver);
+
+MODULE_AUTHOR("Maarten ter Huurne <maarten@treewalker.org>");
+MODULE_AUTHOR("Paul Cercueil <paul@crapouillou.net>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c b/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
index 2bae1db3ff34..09deb99981a4 100644
--- a/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
+++ b/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
@@ -6,7 +6,6 @@
* Author: Stefan Mavrodiev <stefan@olimex.com>
*/
-#include <linux/backlight.h>
#include <linux/crc32.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
@@ -68,7 +67,6 @@ struct lcd_olinuxino {
bool prepared;
bool enabled;
- struct backlight_device *backlight;
struct regulator *supply;
struct gpio_desc *enable_gpio;
@@ -87,8 +85,6 @@ static int lcd_olinuxino_disable(struct drm_panel *panel)
if (!lcd->enabled)
return 0;
- backlight_disable(lcd->backlight);
-
lcd->enabled = false;
return 0;
@@ -134,19 +130,16 @@ static int lcd_olinuxino_enable(struct drm_panel *panel)
if (lcd->enabled)
return 0;
- backlight_enable(lcd->backlight);
-
lcd->enabled = true;
return 0;
}
-static int lcd_olinuxino_get_modes(struct drm_panel *panel)
+static int lcd_olinuxino_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
struct lcd_olinuxino *lcd = to_lcd_olinuxino(panel);
- struct drm_connector *connector = lcd->panel.connector;
struct lcd_olinuxino_info *lcd_info = &lcd->eeprom.info;
- struct drm_device *drm = lcd->panel.drm;
struct lcd_olinuxino_mode *lcd_mode;
struct drm_display_mode *mode;
u32 i, num = 0;
@@ -155,13 +148,13 @@ static int lcd_olinuxino_get_modes(struct drm_panel *panel)
lcd_mode = (struct lcd_olinuxino_mode *)
&lcd->eeprom.reserved[i * sizeof(*lcd_mode)];
- mode = drm_mode_create(drm);
+ mode = drm_mode_create(connector->dev);
if (!mode) {
- dev_err(drm->dev, "failed to add mode %ux%u@%u\n",
+ dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
lcd_mode->hactive,
lcd_mode->vactive,
lcd_mode->refresh);
- continue;
+ continue;
}
mode->clock = lcd_mode->pixelclock;
@@ -284,13 +277,12 @@ static int lcd_olinuxino_probe(struct i2c_client *client,
if (IS_ERR(lcd->enable_gpio))
return PTR_ERR(lcd->enable_gpio);
- lcd->backlight = devm_of_find_backlight(dev);
- if (IS_ERR(lcd->backlight))
- return PTR_ERR(lcd->backlight);
+ drm_panel_init(&lcd->panel, dev, &lcd_olinuxino_funcs,
+ DRM_MODE_CONNECTOR_DPI);
- drm_panel_init(&lcd->panel);
- lcd->panel.dev = dev;
- lcd->panel.funcs = &lcd_olinuxino_funcs;
+ ret = drm_panel_of_backlight(&lcd->panel);
+ if (ret)
+ return ret;
return drm_panel_add(&lcd->panel);
}
@@ -301,8 +293,8 @@ static int lcd_olinuxino_remove(struct i2c_client *client)
drm_panel_remove(&panel->panel);
- lcd_olinuxino_disable(&panel->panel);
- lcd_olinuxino_unprepare(&panel->panel);
+ drm_panel_disable(&panel->panel);
+ drm_panel_unprepare(&panel->panel);
return 0;
}
diff --git a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
index c7b48df8869a..bb0c992171e8 100644
--- a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
+++ b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
@@ -349,11 +349,12 @@ static int otm8009a_enable(struct drm_panel *panel)
return 0;
}
-static int otm8009a_get_modes(struct drm_panel *panel)
+static int otm8009a_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(panel->drm, &default_mode);
+ mode = drm_mode_duplicate(connector->dev, &default_mode);
if (!mode) {
DRM_ERROR("failed to add mode %ux%ux@%u\n",
default_mode.hdisplay, default_mode.vdisplay,
@@ -364,10 +365,10 @@ static int otm8009a_get_modes(struct drm_panel *panel)
drm_mode_set_name(mode);
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
- drm_mode_probed_add(panel->connector, mode);
+ drm_mode_probed_add(connector, mode);
- panel->connector->display_info.width_mm = mode->width_mm;
- panel->connector->display_info.height_mm = mode->height_mm;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
return 1;
}
@@ -455,9 +456,8 @@ static int otm8009a_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_LPM;
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = dev;
- ctx->panel.funcs = &otm8009a_drm_funcs;
+ drm_panel_init(&ctx->panel, dev, &otm8009a_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
ctx->bl_dev = devm_backlight_device_register(dev, dev_name(dev),
dsi->host->dev, ctx,
diff --git a/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c b/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c
index e0e20ecff916..3a0229d60095 100644
--- a/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c
+++ b/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c
@@ -4,7 +4,6 @@
* Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
*/
-#include <linux/backlight.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/regulator/consumer.h>
@@ -20,7 +19,6 @@ struct osd101t2587_panel {
struct drm_panel base;
struct mipi_dsi_device *dsi;
- struct backlight_device *backlight;
struct regulator *supply;
bool prepared;
@@ -42,8 +40,6 @@ static int osd101t2587_panel_disable(struct drm_panel *panel)
if (!osd101t2587->enabled)
return 0;
- backlight_disable(osd101t2587->backlight);
-
ret = mipi_dsi_shutdown_peripheral(osd101t2587->dsi);
osd101t2587->enabled = false;
@@ -91,8 +87,6 @@ static int osd101t2587_panel_enable(struct drm_panel *panel)
if (ret)
return ret;
- backlight_enable(osd101t2587->backlight);
-
osd101t2587->enabled = true;
return ret;
@@ -112,14 +106,15 @@ static const struct drm_display_mode default_mode_osd101t2587 = {
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
};
-static int osd101t2587_panel_get_modes(struct drm_panel *panel)
+static int osd101t2587_panel_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
struct osd101t2587_panel *osd101t2587 = ti_osd_panel(panel);
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(panel->drm, osd101t2587->default_mode);
+ mode = drm_mode_duplicate(connector->dev, osd101t2587->default_mode);
if (!mode) {
- dev_err(panel->drm->dev, "failed to add mode %ux%ux@%u\n",
+ dev_err(panel->dev, "failed to add mode %ux%ux@%u\n",
osd101t2587->default_mode->hdisplay,
osd101t2587->default_mode->vdisplay,
osd101t2587->default_mode->vrefresh);
@@ -128,10 +123,10 @@ static int osd101t2587_panel_get_modes(struct drm_panel *panel)
drm_mode_set_name(mode);
- drm_mode_probed_add(panel->connector, mode);
+ drm_mode_probed_add(connector, mode);
- panel->connector->display_info.width_mm = 217;
- panel->connector->display_info.height_mm = 136;
+ connector->display_info.width_mm = 217;
+ connector->display_info.height_mm = 136;
return 1;
}
@@ -157,18 +152,18 @@ MODULE_DEVICE_TABLE(of, osd101t2587_of_match);
static int osd101t2587_panel_add(struct osd101t2587_panel *osd101t2587)
{
struct device *dev = &osd101t2587->dsi->dev;
+ int ret;
osd101t2587->supply = devm_regulator_get(dev, "power");
if (IS_ERR(osd101t2587->supply))
return PTR_ERR(osd101t2587->supply);
- osd101t2587->backlight = devm_of_find_backlight(dev);
- if (IS_ERR(osd101t2587->backlight))
- return PTR_ERR(osd101t2587->backlight);
+ drm_panel_init(&osd101t2587->base, &osd101t2587->dsi->dev,
+ &osd101t2587_panel_funcs, DRM_MODE_CONNECTOR_DSI);
- drm_panel_init(&osd101t2587->base);
- osd101t2587->base.funcs = &osd101t2587_panel_funcs;
- osd101t2587->base.dev = &osd101t2587->dsi->dev;
+ ret = drm_panel_of_backlight(&osd101t2587->base);
+ if (ret)
+ return ret;
return drm_panel_add(&osd101t2587->base);
}
@@ -215,12 +210,11 @@ static int osd101t2587_panel_remove(struct mipi_dsi_device *dsi)
struct osd101t2587_panel *osd101t2587 = mipi_dsi_get_drvdata(dsi);
int ret;
- ret = osd101t2587_panel_disable(&osd101t2587->base);
+ ret = drm_panel_disable(&osd101t2587->base);
if (ret < 0)
dev_warn(&dsi->dev, "failed to disable panel: %d\n", ret);
- osd101t2587_panel_unprepare(&osd101t2587->base);
-
+ drm_panel_unprepare(&osd101t2587->base);
drm_panel_remove(&osd101t2587->base);
ret = mipi_dsi_detach(dsi);
@@ -234,8 +228,8 @@ static void osd101t2587_panel_shutdown(struct mipi_dsi_device *dsi)
{
struct osd101t2587_panel *osd101t2587 = mipi_dsi_get_drvdata(dsi);
- osd101t2587_panel_disable(&osd101t2587->base);
- osd101t2587_panel_unprepare(&osd101t2587->base);
+ drm_panel_disable(&osd101t2587->base);
+ drm_panel_unprepare(&osd101t2587->base);
}
static struct mipi_dsi_driver osd101t2587_panel_driver = {
diff --git a/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c b/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
index 3dff0b3f73c2..69693451462e 100644
--- a/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
+++ b/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
@@ -7,7 +7,6 @@
* Based on AUO panel driver by Rob Clark <robdclark@gmail.com>
*/
-#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -31,7 +30,6 @@ struct wuxga_nt_panel {
struct drm_panel base;
struct mipi_dsi_device *dsi;
- struct backlight_device *backlight;
struct regulator *supply;
bool prepared;
@@ -62,12 +60,6 @@ static int wuxga_nt_panel_disable(struct drm_panel *panel)
mipi_ret = mipi_dsi_shutdown_peripheral(wuxga_nt->dsi);
- if (wuxga_nt->backlight) {
- wuxga_nt->backlight->props.power = FB_BLANK_POWERDOWN;
- wuxga_nt->backlight->props.state |= BL_CORE_FBBLANK;
- bl_ret = backlight_update_status(wuxga_nt->backlight);
- }
-
wuxga_nt->enabled = false;
return mipi_ret ? mipi_ret : bl_ret;
@@ -142,12 +134,6 @@ static int wuxga_nt_panel_enable(struct drm_panel *panel)
if (wuxga_nt->enabled)
return 0;
- if (wuxga_nt->backlight) {
- wuxga_nt->backlight->props.power = FB_BLANK_UNBLANK;
- wuxga_nt->backlight->props.state &= ~BL_CORE_FBBLANK;
- backlight_update_status(wuxga_nt->backlight);
- }
-
wuxga_nt->enabled = true;
return 0;
@@ -166,24 +152,25 @@ static const struct drm_display_mode default_mode = {
.vrefresh = 60,
};
-static int wuxga_nt_panel_get_modes(struct drm_panel *panel)
+static int wuxga_nt_panel_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(panel->drm, &default_mode);
+ mode = drm_mode_duplicate(connector->dev, &default_mode);
if (!mode) {
- dev_err(panel->drm->dev, "failed to add mode %ux%ux@%u\n",
- default_mode.hdisplay, default_mode.vdisplay,
- default_mode.vrefresh);
+ dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
+ default_mode.hdisplay, default_mode.vdisplay,
+ default_mode.vrefresh);
return -ENOMEM;
}
drm_mode_set_name(mode);
- drm_mode_probed_add(panel->connector, mode);
+ drm_mode_probed_add(connector, mode);
- panel->connector->display_info.width_mm = 217;
- panel->connector->display_info.height_mm = 136;
+ connector->display_info.width_mm = 217;
+ connector->display_info.height_mm = 136;
return 1;
}
@@ -205,7 +192,6 @@ MODULE_DEVICE_TABLE(of, wuxga_nt_of_match);
static int wuxga_nt_panel_add(struct wuxga_nt_panel *wuxga_nt)
{
struct device *dev = &wuxga_nt->dsi->dev;
- struct device_node *np;
int ret;
wuxga_nt->mode = &default_mode;
@@ -214,39 +200,20 @@ static int wuxga_nt_panel_add(struct wuxga_nt_panel *wuxga_nt)
if (IS_ERR(wuxga_nt->supply))
return PTR_ERR(wuxga_nt->supply);
- np = of_parse_phandle(dev->of_node, "backlight", 0);
- if (np) {
- wuxga_nt->backlight = of_find_backlight_by_node(np);
- of_node_put(np);
-
- if (!wuxga_nt->backlight)
- return -EPROBE_DEFER;
- }
-
- drm_panel_init(&wuxga_nt->base);
- wuxga_nt->base.funcs = &wuxga_nt_panel_funcs;
- wuxga_nt->base.dev = &wuxga_nt->dsi->dev;
+ drm_panel_init(&wuxga_nt->base, &wuxga_nt->dsi->dev,
+ &wuxga_nt_panel_funcs, DRM_MODE_CONNECTOR_DSI);
- ret = drm_panel_add(&wuxga_nt->base);
- if (ret < 0)
- goto put_backlight;
-
- return 0;
-
-put_backlight:
- if (wuxga_nt->backlight)
- put_device(&wuxga_nt->backlight->dev);
+ ret = drm_panel_of_backlight(&wuxga_nt->base);
+ if (ret)
+ return ret;
- return ret;
+ return drm_panel_add(&wuxga_nt->base);
}
static void wuxga_nt_panel_del(struct wuxga_nt_panel *wuxga_nt)
{
if (wuxga_nt->base.dev)
drm_panel_remove(&wuxga_nt->base);
-
- if (wuxga_nt->backlight)
- put_device(&wuxga_nt->backlight->dev);
}
static int wuxga_nt_panel_probe(struct mipi_dsi_device *dsi)
@@ -281,7 +248,7 @@ static int wuxga_nt_panel_remove(struct mipi_dsi_device *dsi)
struct wuxga_nt_panel *wuxga_nt = mipi_dsi_get_drvdata(dsi);
int ret;
- ret = wuxga_nt_panel_disable(&wuxga_nt->base);
+ ret = drm_panel_disable(&wuxga_nt->base);
if (ret < 0)
dev_err(&dsi->dev, "failed to disable panel: %d\n", ret);
@@ -298,7 +265,7 @@ static void wuxga_nt_panel_shutdown(struct mipi_dsi_device *dsi)
{
struct wuxga_nt_panel *wuxga_nt = mipi_dsi_get_drvdata(dsi);
- wuxga_nt_panel_disable(&wuxga_nt->base);
+ drm_panel_disable(&wuxga_nt->base);
}
static struct mipi_dsi_driver wuxga_nt_panel_driver = {
diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
index 28c0620dfe0f..8f078b7dd89e 100644
--- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
+++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
@@ -44,8 +44,6 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/fb.h>
-#include <linux/gpio.h>
-#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -311,10 +309,9 @@ static int rpi_touchscreen_enable(struct drm_panel *panel)
return 0;
}
-static int rpi_touchscreen_get_modes(struct drm_panel *panel)
+static int rpi_touchscreen_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
- struct drm_connector *connector = panel->connector;
- struct drm_device *drm = panel->drm;
unsigned int i, num = 0;
static const u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
@@ -322,9 +319,9 @@ static int rpi_touchscreen_get_modes(struct drm_panel *panel)
const struct drm_display_mode *m = &rpi_touchscreen_modes[i];
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(drm, m);
+ mode = drm_mode_duplicate(connector->dev, m);
if (!mode) {
- dev_err(drm->dev, "failed to add mode %ux%u@%u\n",
+ dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
m->hdisplay, m->vdisplay, m->vrefresh);
continue;
}
@@ -399,7 +396,13 @@ static int rpi_touchscreen_probe(struct i2c_client *i2c,
/* Look up the DSI host. It needs to probe before we do. */
endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
+ if (!endpoint)
+ return -ENODEV;
+
dsi_host_node = of_graph_get_remote_port_parent(endpoint);
+ if (!dsi_host_node)
+ goto error;
+
host = of_find_mipi_dsi_host_by_node(dsi_host_node);
of_node_put(dsi_host_node);
if (!host) {
@@ -408,6 +411,9 @@ static int rpi_touchscreen_probe(struct i2c_client *i2c,
}
info.node = of_graph_get_remote_port(endpoint);
+ if (!info.node)
+ goto error;
+
of_node_put(endpoint);
ts->dsi = mipi_dsi_device_register_full(host, &info);
@@ -417,8 +423,8 @@ static int rpi_touchscreen_probe(struct i2c_client *i2c,
return PTR_ERR(ts->dsi);
}
- ts->base.dev = dev;
- ts->base.funcs = &rpi_touchscreen_funcs;
+ drm_panel_init(&ts->base, dev, &rpi_touchscreen_funcs,
+ DRM_MODE_CONNECTOR_DSI);
/* This appears last, as it's what will unblock the DSI host
* driver's component bind function.
@@ -428,6 +434,10 @@ static int rpi_touchscreen_probe(struct i2c_client *i2c,
return ret;
return 0;
+
+error:
+ of_node_put(endpoint);
+ return -ENODEV;
}
static int rpi_touchscreen_remove(struct i2c_client *i2c)
diff --git a/drivers/gpu/drm/panel/panel-raydium-rm67191.c b/drivers/gpu/drm/panel/panel-raydium-rm67191.c
new file mode 100644
index 000000000000..313637d53d28
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-raydium-rm67191.c
@@ -0,0 +1,667 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Raydium RM67191 MIPI-DSI panel driver
+ *
+ * Copyright 2019 NXP
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+#include <video/of_videomode.h>
+#include <video/videomode.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+/* Panel specific color-format bits */
+#define COL_FMT_16BPP 0x55
+#define COL_FMT_18BPP 0x66
+#define COL_FMT_24BPP 0x77
+
+/* Write Manufacture Command Set Control */
+#define WRMAUCCTR 0xFE
+
+/* Manufacturer Command Set pages (CMD2) */
+struct cmd_set_entry {
+ u8 cmd;
+ u8 param;
+};
+
+/*
+ * There is no description in the Reference Manual about these commands.
+ * We received them from vendor, so just use them as is.
+ */
+static const struct cmd_set_entry manufacturer_cmd_set[] = {
+ {0xFE, 0x0B},
+ {0x28, 0x40},
+ {0x29, 0x4F},
+ {0xFE, 0x0E},
+ {0x4B, 0x00},
+ {0x4C, 0x0F},
+ {0x4D, 0x20},
+ {0x4E, 0x40},
+ {0x4F, 0x60},
+ {0x50, 0xA0},
+ {0x51, 0xC0},
+ {0x52, 0xE0},
+ {0x53, 0xFF},
+ {0xFE, 0x0D},
+ {0x18, 0x08},
+ {0x42, 0x00},
+ {0x08, 0x41},
+ {0x46, 0x02},
+ {0x72, 0x09},
+ {0xFE, 0x0A},
+ {0x24, 0x17},
+ {0x04, 0x07},
+ {0x1A, 0x0C},
+ {0x0F, 0x44},
+ {0xFE, 0x04},
+ {0x00, 0x0C},
+ {0x05, 0x08},
+ {0x06, 0x08},
+ {0x08, 0x08},
+ {0x09, 0x08},
+ {0x0A, 0xE6},
+ {0x0B, 0x8C},
+ {0x1A, 0x12},
+ {0x1E, 0xE0},
+ {0x29, 0x93},
+ {0x2A, 0x93},
+ {0x2F, 0x02},
+ {0x31, 0x02},
+ {0x33, 0x05},
+ {0x37, 0x2D},
+ {0x38, 0x2D},
+ {0x3A, 0x1E},
+ {0x3B, 0x1E},
+ {0x3D, 0x27},
+ {0x3F, 0x80},
+ {0x40, 0x40},
+ {0x41, 0xE0},
+ {0x4F, 0x2F},
+ {0x50, 0x1E},
+ {0xFE, 0x06},
+ {0x00, 0xCC},
+ {0x05, 0x05},
+ {0x07, 0xA2},
+ {0x08, 0xCC},
+ {0x0D, 0x03},
+ {0x0F, 0xA2},
+ {0x32, 0xCC},
+ {0x37, 0x05},
+ {0x39, 0x83},
+ {0x3A, 0xCC},
+ {0x41, 0x04},
+ {0x43, 0x83},
+ {0x44, 0xCC},
+ {0x49, 0x05},
+ {0x4B, 0xA2},
+ {0x4C, 0xCC},
+ {0x51, 0x03},
+ {0x53, 0xA2},
+ {0x75, 0xCC},
+ {0x7A, 0x03},
+ {0x7C, 0x83},
+ {0x7D, 0xCC},
+ {0x82, 0x02},
+ {0x84, 0x83},
+ {0x85, 0xEC},
+ {0x86, 0x0F},
+ {0x87, 0xFF},
+ {0x88, 0x00},
+ {0x8A, 0x02},
+ {0x8C, 0xA2},
+ {0x8D, 0xEA},
+ {0x8E, 0x01},
+ {0x8F, 0xE8},
+ {0xFE, 0x06},
+ {0x90, 0x0A},
+ {0x92, 0x06},
+ {0x93, 0xA0},
+ {0x94, 0xA8},
+ {0x95, 0xEC},
+ {0x96, 0x0F},
+ {0x97, 0xFF},
+ {0x98, 0x00},
+ {0x9A, 0x02},
+ {0x9C, 0xA2},
+ {0xAC, 0x04},
+ {0xFE, 0x06},
+ {0xB1, 0x12},
+ {0xB2, 0x17},
+ {0xB3, 0x17},
+ {0xB4, 0x17},
+ {0xB5, 0x17},
+ {0xB6, 0x11},
+ {0xB7, 0x08},
+ {0xB8, 0x09},
+ {0xB9, 0x06},
+ {0xBA, 0x07},
+ {0xBB, 0x17},
+ {0xBC, 0x17},
+ {0xBD, 0x17},
+ {0xBE, 0x17},
+ {0xBF, 0x17},
+ {0xC0, 0x17},
+ {0xC1, 0x17},
+ {0xC2, 0x17},
+ {0xC3, 0x17},
+ {0xC4, 0x0F},
+ {0xC5, 0x0E},
+ {0xC6, 0x00},
+ {0xC7, 0x01},
+ {0xC8, 0x10},
+ {0xFE, 0x06},
+ {0x95, 0xEC},
+ {0x8D, 0xEE},
+ {0x44, 0xEC},
+ {0x4C, 0xEC},
+ {0x32, 0xEC},
+ {0x3A, 0xEC},
+ {0x7D, 0xEC},
+ {0x75, 0xEC},
+ {0x00, 0xEC},
+ {0x08, 0xEC},
+ {0x85, 0xEC},
+ {0xA6, 0x21},
+ {0xA7, 0x05},
+ {0xA9, 0x06},
+ {0x82, 0x06},
+ {0x41, 0x06},
+ {0x7A, 0x07},
+ {0x37, 0x07},
+ {0x05, 0x06},
+ {0x49, 0x06},
+ {0x0D, 0x04},
+ {0x51, 0x04},
+};
+
+static const u32 rad_bus_formats[] = {
+ MEDIA_BUS_FMT_RGB888_1X24,
+ MEDIA_BUS_FMT_RGB666_1X18,
+ MEDIA_BUS_FMT_RGB565_1X16,
+};
+
+static const u32 rad_bus_flags = DRM_BUS_FLAG_DE_LOW |
+ DRM_BUS_FLAG_PIXDATA_NEGEDGE;
+
+struct rad_panel {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+
+ struct gpio_desc *reset;
+ struct backlight_device *backlight;
+
+ struct regulator_bulk_data *supplies;
+ unsigned int num_supplies;
+
+ bool prepared;
+ bool enabled;
+};
+
+static const struct drm_display_mode default_mode = {
+ .clock = 132000,
+ .hdisplay = 1080,
+ .hsync_start = 1080 + 20,
+ .hsync_end = 1080 + 20 + 2,
+ .htotal = 1080 + 20 + 2 + 34,
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 10,
+ .vsync_end = 1920 + 10 + 2,
+ .vtotal = 1920 + 10 + 2 + 4,
+ .vrefresh = 60,
+ .width_mm = 68,
+ .height_mm = 121,
+ .flags = DRM_MODE_FLAG_NHSYNC |
+ DRM_MODE_FLAG_NVSYNC,
+};
+
+static inline struct rad_panel *to_rad_panel(struct drm_panel *panel)
+{
+ return container_of(panel, struct rad_panel, panel);
+}
+
+static int rad_panel_push_cmd_list(struct mipi_dsi_device *dsi)
+{
+ size_t i;
+ size_t count = ARRAY_SIZE(manufacturer_cmd_set);
+ int ret = 0;
+
+ for (i = 0; i < count; i++) {
+ const struct cmd_set_entry *entry = &manufacturer_cmd_set[i];
+ u8 buffer[2] = { entry->cmd, entry->param };
+
+ ret = mipi_dsi_generic_write(dsi, &buffer, sizeof(buffer));
+ if (ret < 0)
+ return ret;
+ }
+
+ return ret;
+};
+
+static int color_format_from_dsi_format(enum mipi_dsi_pixel_format format)
+{
+ switch (format) {
+ case MIPI_DSI_FMT_RGB565:
+ return COL_FMT_16BPP;
+ case MIPI_DSI_FMT_RGB666:
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ return COL_FMT_18BPP;
+ case MIPI_DSI_FMT_RGB888:
+ return COL_FMT_24BPP;
+ default:
+ return COL_FMT_24BPP; /* for backward compatibility */
+ }
+};
+
+static int rad_panel_prepare(struct drm_panel *panel)
+{
+ struct rad_panel *rad = to_rad_panel(panel);
+ int ret;
+
+ if (rad->prepared)
+ return 0;
+
+ ret = regulator_bulk_enable(rad->num_supplies, rad->supplies);
+ if (ret)
+ return ret;
+
+ if (rad->reset) {
+ gpiod_set_value_cansleep(rad->reset, 1);
+ usleep_range(3000, 5000);
+ gpiod_set_value_cansleep(rad->reset, 0);
+ usleep_range(18000, 20000);
+ }
+
+ rad->prepared = true;
+
+ return 0;
+}
+
+static int rad_panel_unprepare(struct drm_panel *panel)
+{
+ struct rad_panel *rad = to_rad_panel(panel);
+ int ret;
+
+ if (!rad->prepared)
+ return 0;
+
+ /*
+ * Right after asserting the reset, we need to release it, so that the
+ * touch driver can have an active connection with the touch controller
+ * even after the display is turned off.
+ */
+ if (rad->reset) {
+ gpiod_set_value_cansleep(rad->reset, 1);
+ usleep_range(15000, 17000);
+ gpiod_set_value_cansleep(rad->reset, 0);
+ }
+
+ ret = regulator_bulk_disable(rad->num_supplies, rad->supplies);
+ if (ret)
+ return ret;
+
+ rad->prepared = false;
+
+ return 0;
+}
+
+static int rad_panel_enable(struct drm_panel *panel)
+{
+ struct rad_panel *rad = to_rad_panel(panel);
+ struct mipi_dsi_device *dsi = rad->dsi;
+ struct device *dev = &dsi->dev;
+ int color_format = color_format_from_dsi_format(dsi->format);
+ int ret;
+
+ if (rad->enabled)
+ return 0;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ ret = rad_panel_push_cmd_list(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "Failed to send MCS (%d)\n", ret);
+ goto fail;
+ }
+
+ /* Select User Command Set table (CMD1) */
+ ret = mipi_dsi_generic_write(dsi, (u8[]){ WRMAUCCTR, 0x00 }, 2);
+ if (ret < 0)
+ goto fail;
+
+ /* Software reset */
+ ret = mipi_dsi_dcs_soft_reset(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "Failed to do Software Reset (%d)\n", ret);
+ goto fail;
+ }
+
+ usleep_range(15000, 17000);
+
+ /* Set DSI mode */
+ ret = mipi_dsi_generic_write(dsi, (u8[]){ 0xC2, 0x0B }, 2);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "Failed to set DSI mode (%d)\n", ret);
+ goto fail;
+ }
+ /* Set tear ON */
+ ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "Failed to set tear ON (%d)\n", ret);
+ goto fail;
+ }
+ /* Set tear scanline */
+ ret = mipi_dsi_dcs_set_tear_scanline(dsi, 0x380);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "Failed to set tear scanline (%d)\n", ret);
+ goto fail;
+ }
+ /* Set pixel format */
+ ret = mipi_dsi_dcs_set_pixel_format(dsi, color_format);
+ DRM_DEV_DEBUG_DRIVER(dev, "Interface color format set to 0x%x\n",
+ color_format);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "Failed to set pixel format (%d)\n", ret);
+ goto fail;
+ }
+ /* Exit sleep mode */
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "Failed to exit sleep mode (%d)\n", ret);
+ goto fail;
+ }
+
+ usleep_range(5000, 7000);
+
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "Failed to set display ON (%d)\n", ret);
+ goto fail;
+ }
+
+ backlight_enable(rad->backlight);
+
+ rad->enabled = true;
+
+ return 0;
+
+fail:
+ gpiod_set_value_cansleep(rad->reset, 1);
+
+ return ret;
+}
+
+static int rad_panel_disable(struct drm_panel *panel)
+{
+ struct rad_panel *rad = to_rad_panel(panel);
+ struct mipi_dsi_device *dsi = rad->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ if (!rad->enabled)
+ return 0;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ backlight_disable(rad->backlight);
+
+ usleep_range(10000, 12000);
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "Failed to set display OFF (%d)\n", ret);
+ return ret;
+ }
+
+ usleep_range(5000, 10000);
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "Failed to enter sleep mode (%d)\n", ret);
+ return ret;
+ }
+
+ rad->enabled = false;
+
+ return 0;
+}
+
+static int rad_panel_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &default_mode);
+ if (!mode) {
+ DRM_DEV_ERROR(panel->dev, "failed to add mode %ux%ux@%u\n",
+ default_mode.hdisplay, default_mode.vdisplay,
+ default_mode.vrefresh);
+ return -ENOMEM;
+ }
+
+ drm_mode_set_name(mode);
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_probed_add(connector, mode);
+
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+ connector->display_info.bus_flags = rad_bus_flags;
+
+ drm_display_info_set_bus_formats(&connector->display_info,
+ rad_bus_formats,
+ ARRAY_SIZE(rad_bus_formats));
+ return 1;
+}
+
+static int rad_bl_get_brightness(struct backlight_device *bl)
+{
+ struct mipi_dsi_device *dsi = bl_get_data(bl);
+ struct rad_panel *rad = mipi_dsi_get_drvdata(dsi);
+ u16 brightness;
+ int ret;
+
+ if (!rad->prepared)
+ return 0;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_get_display_brightness(dsi, &brightness);
+ if (ret < 0)
+ return ret;
+
+ bl->props.brightness = brightness;
+
+ return brightness & 0xff;
+}
+
+static int rad_bl_update_status(struct backlight_device *bl)
+{
+ struct mipi_dsi_device *dsi = bl_get_data(bl);
+ struct rad_panel *rad = mipi_dsi_get_drvdata(dsi);
+ int ret = 0;
+
+ if (!rad->prepared)
+ return 0;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_set_display_brightness(dsi, bl->props.brightness);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static const struct backlight_ops rad_bl_ops = {
+ .update_status = rad_bl_update_status,
+ .get_brightness = rad_bl_get_brightness,
+};
+
+static const struct drm_panel_funcs rad_panel_funcs = {
+ .prepare = rad_panel_prepare,
+ .unprepare = rad_panel_unprepare,
+ .enable = rad_panel_enable,
+ .disable = rad_panel_disable,
+ .get_modes = rad_panel_get_modes,
+};
+
+static const char * const rad_supply_names[] = {
+ "v3p3",
+ "v1p8",
+};
+
+static int rad_init_regulators(struct rad_panel *rad)
+{
+ struct device *dev = &rad->dsi->dev;
+ int i;
+
+ rad->num_supplies = ARRAY_SIZE(rad_supply_names);
+ rad->supplies = devm_kcalloc(dev, rad->num_supplies,
+ sizeof(*rad->supplies), GFP_KERNEL);
+ if (!rad->supplies)
+ return -ENOMEM;
+
+ for (i = 0; i < rad->num_supplies; i++)
+ rad->supplies[i].supply = rad_supply_names[i];
+
+ return devm_regulator_bulk_get(dev, rad->num_supplies, rad->supplies);
+};
+
+static int rad_panel_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct device_node *np = dev->of_node;
+ struct rad_panel *panel;
+ struct backlight_properties bl_props;
+ int ret;
+ u32 video_mode;
+
+ panel = devm_kzalloc(&dsi->dev, sizeof(*panel), GFP_KERNEL);
+ if (!panel)
+ return -ENOMEM;
+
+ mipi_dsi_set_drvdata(dsi, panel);
+
+ panel->dsi = dsi;
+
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO_HSE | MIPI_DSI_MODE_VIDEO |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS;
+
+ ret = of_property_read_u32(np, "video-mode", &video_mode);
+ if (!ret) {
+ switch (video_mode) {
+ case 0:
+ /* burst mode */
+ dsi->mode_flags |= MIPI_DSI_MODE_VIDEO_BURST;
+ break;
+ case 1:
+ /* non-burst mode with sync event */
+ break;
+ case 2:
+ /* non-burst mode with sync pulse */
+ dsi->mode_flags |= MIPI_DSI_MODE_VIDEO_SYNC_PULSE;
+ break;
+ default:
+ dev_warn(dev, "invalid video mode %d\n", video_mode);
+ break;
+ }
+ }
+
+ ret = of_property_read_u32(np, "dsi-lanes", &dsi->lanes);
+ if (ret) {
+ dev_err(dev, "Failed to get dsi-lanes property (%d)\n", ret);
+ return ret;
+ }
+
+ panel->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(panel->reset))
+ return PTR_ERR(panel->reset);
+
+ memset(&bl_props, 0, sizeof(bl_props));
+ bl_props.type = BACKLIGHT_RAW;
+ bl_props.brightness = 255;
+ bl_props.max_brightness = 255;
+
+ panel->backlight = devm_backlight_device_register(dev, dev_name(dev),
+ dev, dsi, &rad_bl_ops,
+ &bl_props);
+ if (IS_ERR(panel->backlight)) {
+ ret = PTR_ERR(panel->backlight);
+ dev_err(dev, "Failed to register backlight (%d)\n", ret);
+ return ret;
+ }
+
+ ret = rad_init_regulators(panel);
+ if (ret)
+ return ret;
+
+ drm_panel_init(&panel->panel, dev, &rad_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ dev_set_drvdata(dev, panel);
+
+ ret = drm_panel_add(&panel->panel);
+ if (ret)
+ return ret;
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret)
+ drm_panel_remove(&panel->panel);
+
+ return ret;
+}
+
+static int rad_panel_remove(struct mipi_dsi_device *dsi)
+{
+ struct rad_panel *rad = mipi_dsi_get_drvdata(dsi);
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret)
+ DRM_DEV_ERROR(dev, "Failed to detach from host (%d)\n",
+ ret);
+
+ drm_panel_remove(&rad->panel);
+
+ return 0;
+}
+
+static void rad_panel_shutdown(struct mipi_dsi_device *dsi)
+{
+ struct rad_panel *rad = mipi_dsi_get_drvdata(dsi);
+
+ rad_panel_disable(&rad->panel);
+ rad_panel_unprepare(&rad->panel);
+}
+
+static const struct of_device_id rad_of_match[] = {
+ { .compatible = "raydium,rm67191", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, rad_of_match);
+
+static struct mipi_dsi_driver rad_panel_driver = {
+ .driver = {
+ .name = "panel-raydium-rm67191",
+ .of_match_table = rad_of_match,
+ },
+ .probe = rad_panel_probe,
+ .remove = rad_panel_remove,
+ .shutdown = rad_panel_shutdown,
+};
+module_mipi_dsi_driver(rad_panel_driver);
+
+MODULE_AUTHOR("Robert Chiras <robert.chiras@nxp.com>");
+MODULE_DESCRIPTION("DRM Driver for Raydium RM67191 MIPI DSI panel");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-raydium-rm68200.c b/drivers/gpu/drm/panel/panel-raydium-rm68200.c
index ba889625ad43..e8982948e0ea 100644
--- a/drivers/gpu/drm/panel/panel-raydium-rm68200.c
+++ b/drivers/gpu/drm/panel/panel-raydium-rm68200.c
@@ -6,9 +6,9 @@
* Yannick Fertre <yannick.fertre@st.com>
*/
-#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/regulator/consumer.h>
@@ -78,7 +78,6 @@ struct rm68200 {
struct drm_panel panel;
struct gpio_desc *reset_gpio;
struct regulator *supply;
- struct backlight_device *backlight;
bool prepared;
bool enabled;
};
@@ -242,8 +241,6 @@ static int rm68200_disable(struct drm_panel *panel)
if (!ctx->enabled)
return 0;
- backlight_disable(ctx->backlight);
-
ctx->enabled = false;
return 0;
@@ -328,18 +325,17 @@ static int rm68200_enable(struct drm_panel *panel)
if (ctx->enabled)
return 0;
- backlight_enable(ctx->backlight);
-
ctx->enabled = true;
return 0;
}
-static int rm68200_get_modes(struct drm_panel *panel)
+static int rm68200_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(panel->drm, &default_mode);
+ mode = drm_mode_duplicate(connector->dev, &default_mode);
if (!mode) {
DRM_ERROR("failed to add mode %ux%ux@%u\n",
default_mode.hdisplay, default_mode.vdisplay,
@@ -350,10 +346,10 @@ static int rm68200_get_modes(struct drm_panel *panel)
drm_mode_set_name(mode);
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
- drm_mode_probed_add(panel->connector, mode);
+ drm_mode_probed_add(connector, mode);
- panel->connector->display_info.width_mm = mode->width_mm;
- panel->connector->display_info.height_mm = mode->height_mm;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
return 1;
}
@@ -391,10 +387,6 @@ static int rm68200_probe(struct mipi_dsi_device *dsi)
return ret;
}
- ctx->backlight = devm_of_find_backlight(dev);
- if (IS_ERR(ctx->backlight))
- return PTR_ERR(ctx->backlight);
-
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dev = dev;
@@ -404,9 +396,12 @@ static int rm68200_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_LPM;
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = dev;
- ctx->panel.funcs = &rm68200_drm_funcs;
+ drm_panel_init(&ctx->panel, dev, &rm68200_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ ret = drm_panel_of_backlight(&ctx->panel);
+ if (ret)
+ return ret;
drm_panel_add(&ctx->panel);
diff --git a/drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c b/drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c
index 6dcb692c4701..38ff742bc120 100644
--- a/drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c
+++ b/drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c
@@ -5,19 +5,22 @@
* Copyright (C) Purism SPC 2019
*/
-#include <drm/drm_mipi_dsi.h>
-#include <drm/drm_modes.h>
-#include <drm/drm_panel.h>
-#include <drm/drm_print.h>
-#include <linux/backlight.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/media-bus-format.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+
#include <video/display_timing.h>
#include <video/mipi_display.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
#define DRV_NAME "panel-rocktech-jh057n00900"
/* Manufacturer specific Commands send via DSI */
@@ -33,6 +36,7 @@
#define ST7703_CMD_SETEXTC 0xB9
#define ST7703_CMD_SETMIPI 0xBA
#define ST7703_CMD_SETVDC 0xBC
+#define ST7703_CMD_UNKNOWN0 0xBF
#define ST7703_CMD_SETSCR 0xC0
#define ST7703_CMD_SETPOWER 0xC1
#define ST7703_CMD_SETPANEL 0xCC
@@ -45,7 +49,8 @@ struct jh057n {
struct device *dev;
struct drm_panel panel;
struct gpio_desc *reset_gpio;
- struct backlight_device *backlight;
+ struct regulator *vcc;
+ struct regulator *iovcc;
bool prepared;
struct dentry *debugfs;
@@ -94,7 +99,7 @@ static int jh057n_init_sequence(struct jh057n *ctx)
msleep(20);
dsi_generic_write_seq(dsi, ST7703_CMD_SETVCOM, 0x3F, 0x3F);
- dsi_generic_write_seq(dsi, 0xBF, 0x02, 0x11, 0x00);
+ dsi_generic_write_seq(dsi, ST7703_CMD_UNKNOWN0, 0x02, 0x11, 0x00);
dsi_generic_write_seq(dsi, ST7703_CMD_SETGIP1,
0x82, 0x10, 0x06, 0x05, 0x9E, 0x0A, 0xA5, 0x12,
0x31, 0x23, 0x37, 0x83, 0x04, 0xBC, 0x27, 0x38,
@@ -123,7 +128,7 @@ static int jh057n_init_sequence(struct jh057n *ctx)
ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
if (ret < 0) {
- DRM_DEV_ERROR(dev, "Failed to exit sleep mode\n");
+ DRM_DEV_ERROR(dev, "Failed to exit sleep mode: %d\n", ret);
return ret;
}
/* Panel is operational 120 msec after reset */
@@ -139,26 +144,35 @@ static int jh057n_init_sequence(struct jh057n *ctx)
static int jh057n_enable(struct drm_panel *panel)
{
struct jh057n *ctx = panel_to_jh057n(panel);
+ int ret;
- return backlight_enable(ctx->backlight);
+ ret = jh057n_init_sequence(ctx);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev, "Panel init sequence failed: %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
}
static int jh057n_disable(struct drm_panel *panel)
{
struct jh057n *ctx = panel_to_jh057n(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
- return backlight_disable(ctx->backlight);
+ return mipi_dsi_dcs_set_display_off(dsi);
}
static int jh057n_unprepare(struct drm_panel *panel)
{
struct jh057n *ctx = panel_to_jh057n(panel);
- struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
if (!ctx->prepared)
return 0;
- mipi_dsi_dcs_set_display_off(dsi);
+ regulator_disable(ctx->iovcc);
+ regulator_disable(ctx->vcc);
ctx->prepared = false;
return 0;
@@ -173,21 +187,31 @@ static int jh057n_prepare(struct drm_panel *panel)
return 0;
DRM_DEV_DEBUG_DRIVER(ctx->dev, "Resetting the panel\n");
+ ret = regulator_enable(ctx->vcc);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev,
+ "Failed to enable vcc supply: %d\n", ret);
+ return ret;
+ }
+ ret = regulator_enable(ctx->iovcc);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev,
+ "Failed to enable iovcc supply: %d\n", ret);
+ goto disable_vcc;
+ }
+
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
usleep_range(20, 40);
gpiod_set_value_cansleep(ctx->reset_gpio, 0);
msleep(20);
- ret = jh057n_init_sequence(ctx);
- if (ret < 0) {
- DRM_DEV_ERROR(ctx->dev, "Panel init sequence failed: %d\n",
- ret);
- return ret;
- }
-
ctx->prepared = true;
return 0;
+
+disable_vcc:
+ regulator_disable(ctx->vcc);
+ return ret;
}
static const struct drm_display_mode default_mode = {
@@ -206,12 +230,13 @@ static const struct drm_display_mode default_mode = {
.height_mm = 130,
};
-static int jh057n_get_modes(struct drm_panel *panel)
+static int jh057n_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
struct jh057n *ctx = panel_to_jh057n(panel);
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(panel->drm, &default_mode);
+ mode = drm_mode_duplicate(connector->dev, &default_mode);
if (!mode) {
DRM_DEV_ERROR(ctx->dev, "Failed to add mode %ux%u@%u\n",
default_mode.hdisplay, default_mode.vdisplay,
@@ -222,9 +247,9 @@ static int jh057n_get_modes(struct drm_panel *panel)
drm_mode_set_name(mode);
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
- panel->connector->display_info.width_mm = mode->width_mm;
- panel->connector->display_info.height_mm = mode->height_mm;
- drm_mode_probed_add(panel->connector, mode);
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+ drm_mode_probed_add(connector, mode);
return 1;
}
@@ -296,19 +321,39 @@ static int jh057n_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO |
MIPI_DSI_MODE_VIDEO_BURST | MIPI_DSI_MODE_VIDEO_SYNC_PULSE;
- ctx->backlight = devm_of_find_backlight(dev);
- if (IS_ERR(ctx->backlight))
- return PTR_ERR(ctx->backlight);
+ ctx->vcc = devm_regulator_get(dev, "vcc");
+ if (IS_ERR(ctx->vcc)) {
+ ret = PTR_ERR(ctx->vcc);
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev,
+ "Failed to request vcc regulator: %d\n",
+ ret);
+ return ret;
+ }
+ ctx->iovcc = devm_regulator_get(dev, "iovcc");
+ if (IS_ERR(ctx->iovcc)) {
+ ret = PTR_ERR(ctx->iovcc);
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev,
+ "Failed to request iovcc regulator: %d\n",
+ ret);
+ return ret;
+ }
+
+ drm_panel_init(&ctx->panel, dev, &jh057n_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = dev;
- ctx->panel.funcs = &jh057n_drm_funcs;
+ ret = drm_panel_of_backlight(&ctx->panel);
+ if (ret)
+ return ret;
drm_panel_add(&ctx->panel);
ret = mipi_dsi_attach(dsi);
if (ret < 0) {
- DRM_DEV_ERROR(dev, "mipi_dsi_attach failed. Is host ready?\n");
+ DRM_DEV_ERROR(dev,
+ "mipi_dsi_attach failed (%d). Is host ready?\n",
+ ret);
drm_panel_remove(&ctx->panel);
return ret;
}
@@ -327,12 +372,12 @@ static void jh057n_shutdown(struct mipi_dsi_device *dsi)
struct jh057n *ctx = mipi_dsi_get_drvdata(dsi);
int ret;
- ret = jh057n_unprepare(&ctx->panel);
+ ret = drm_panel_unprepare(&ctx->panel);
if (ret < 0)
DRM_DEV_ERROR(&dsi->dev, "Failed to unprepare panel: %d\n",
ret);
- ret = jh057n_disable(&ctx->panel);
+ ret = drm_panel_disable(&ctx->panel);
if (ret < 0)
DRM_DEV_ERROR(&dsi->dev, "Failed to disable panel: %d\n",
ret);
diff --git a/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
index 3c15764f0c03..ef18559e237e 100644
--- a/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
+++ b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
@@ -7,7 +7,6 @@
* This file based on panel-ilitek-ili9881c.c
*/
-#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
@@ -29,7 +28,6 @@
struct rb070d30_panel {
struct drm_panel panel;
struct mipi_dsi_device *dsi;
- struct backlight_device *backlight;
struct regulator *supply;
struct {
@@ -84,22 +82,13 @@ static int rb070d30_panel_enable(struct drm_panel *panel)
if (ret)
return ret;
- ret = backlight_enable(ctx->backlight);
- if (ret)
- goto out;
-
return 0;
-
-out:
- mipi_dsi_dcs_enter_sleep_mode(ctx->dsi);
- return ret;
}
static int rb070d30_panel_disable(struct drm_panel *panel)
{
struct rb070d30_panel *ctx = panel_to_rb070d30_panel(panel);
- backlight_disable(ctx->backlight);
return mipi_dsi_dcs_enter_sleep_mode(ctx->dsi);
}
@@ -120,14 +109,14 @@ static const struct drm_display_mode default_mode = {
.height_mm = 85,
};
-static int rb070d30_panel_get_modes(struct drm_panel *panel)
+static int rb070d30_panel_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
- struct drm_connector *connector = panel->connector;
struct rb070d30_panel *ctx = panel_to_rb070d30_panel(panel);
struct drm_display_mode *mode;
static const u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
- mode = drm_mode_duplicate(panel->drm, &default_mode);
+ mode = drm_mode_duplicate(connector->dev, &default_mode);
if (!mode) {
DRM_DEV_ERROR(&ctx->dsi->dev,
"Failed to add mode " DRM_MODE_FMT "\n",
@@ -140,9 +129,9 @@ static int rb070d30_panel_get_modes(struct drm_panel *panel)
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
drm_mode_probed_add(connector, mode);
- panel->connector->display_info.bpc = 8;
- panel->connector->display_info.width_mm = mode->width_mm;
- panel->connector->display_info.height_mm = mode->height_mm;
+ connector->display_info.bpc = 8;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
drm_display_info_set_bus_formats(&connector->display_info,
&bus_format, 1);
@@ -173,9 +162,8 @@ static int rb070d30_panel_dsi_probe(struct mipi_dsi_device *dsi)
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dsi = dsi;
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = &dsi->dev;
- ctx->panel.funcs = &rb070d30_panel_funcs;
+ drm_panel_init(&ctx->panel, &dsi->dev, &rb070d30_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
ctx->gpios.reset = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(ctx->gpios.reset)) {
@@ -209,11 +197,9 @@ static int rb070d30_panel_dsi_probe(struct mipi_dsi_device *dsi)
return PTR_ERR(ctx->gpios.shlr);
}
- ctx->backlight = devm_of_find_backlight(&dsi->dev);
- if (IS_ERR(ctx->backlight)) {
- DRM_DEV_ERROR(&dsi->dev, "Couldn't get our backlight\n");
- return PTR_ERR(ctx->backlight);
- }
+ ret = drm_panel_of_backlight(&ctx->panel);
+ if (ret)
+ return ret;
ret = drm_panel_add(&ctx->panel);
if (ret < 0)
diff --git a/drivers/gpu/drm/panel/panel-samsung-ld9040.c b/drivers/gpu/drm/panel/panel-samsung-ld9040.c
index 3be902dcedc0..3c52f15f7a1c 100644
--- a/drivers/gpu/drm/panel/panel-samsung-ld9040.c
+++ b/drivers/gpu/drm/panel/panel-samsung-ld9040.c
@@ -261,9 +261,9 @@ static int ld9040_enable(struct drm_panel *panel)
return 0;
}
-static int ld9040_get_modes(struct drm_panel *panel)
+static int ld9040_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
- struct drm_connector *connector = panel->connector;
struct ld9040 *ctx = panel_to_ld9040(panel);
struct drm_display_mode *mode;
@@ -351,9 +351,8 @@ static int ld9040_probe(struct spi_device *spi)
return ret;
}
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = dev;
- ctx->panel.funcs = &ld9040_drm_funcs;
+ drm_panel_init(&ctx->panel, dev, &ld9040_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
return drm_panel_add(&ctx->panel);
}
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
index f75bef24e050..2150043dcf6b 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
@@ -143,12 +143,12 @@ static int s6d16d0_disable(struct drm_panel *panel)
return 0;
}
-static int s6d16d0_get_modes(struct drm_panel *panel)
+static int s6d16d0_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
- struct drm_connector *connector = panel->connector;
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(panel->drm, &samsung_s6d16d0_mode);
+ mode = drm_mode_duplicate(connector->dev, &samsung_s6d16d0_mode);
if (!mode) {
DRM_ERROR("bad mode or failed to add mode\n");
return -EINVAL;
@@ -215,9 +215,8 @@ static int s6d16d0_probe(struct mipi_dsi_device *dsi)
return ret;
}
- drm_panel_init(&s6->panel);
- s6->panel.dev = dev;
- s6->panel.funcs = &s6d16d0_drm_funcs;
+ drm_panel_init(&s6->panel, dev, &s6d16d0_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
ret = drm_panel_add(&s6->panel);
if (ret < 0)
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
index b923de23ed65..36ebd5a4ac7b 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
@@ -645,13 +645,13 @@ static const struct s6e3ha2_panel_desc samsung_s6e3hf2 = {
.type = HF2_TYPE,
};
-static int s6e3ha2_get_modes(struct drm_panel *panel)
+static int s6e3ha2_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
- struct drm_connector *connector = panel->connector;
struct s6e3ha2 *ctx = container_of(panel, struct s6e3ha2, panel);
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(panel->drm, ctx->desc->mode);
+ mode = drm_mode_duplicate(connector->dev, ctx->desc->mode);
if (!mode) {
DRM_ERROR("failed to add mode %ux%ux@%u\n",
ctx->desc->mode->hdisplay, ctx->desc->mode->vdisplay,
@@ -732,9 +732,8 @@ static int s6e3ha2_probe(struct mipi_dsi_device *dsi)
ctx->bl_dev->props.brightness = S6E3HA2_DEFAULT_BRIGHTNESS;
ctx->bl_dev->props.power = FB_BLANK_POWERDOWN;
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = dev;
- ctx->panel.funcs = &s6e3ha2_drm_funcs;
+ drm_panel_init(&ctx->panel, dev, &s6e3ha2_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
ret = drm_panel_add(&ctx->panel);
if (ret < 0)
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
index cd90fa700c49..a3570e0a90a8 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
@@ -400,12 +400,12 @@ static int s6e63j0x03_enable(struct drm_panel *panel)
return 0;
}
-static int s6e63j0x03_get_modes(struct drm_panel *panel)
+static int s6e63j0x03_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
- struct drm_connector *connector = panel->connector;
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(panel->drm, &default_mode);
+ mode = drm_mode_duplicate(connector->dev, &default_mode);
if (!mode) {
DRM_ERROR("failed to add mode %ux%ux@%u\n",
default_mode.hdisplay, default_mode.vdisplay,
@@ -466,9 +466,8 @@ static int s6e63j0x03_probe(struct mipi_dsi_device *dsi)
return PTR_ERR(ctx->reset_gpio);
}
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = dev;
- ctx->panel.funcs = &s6e63j0x03_funcs;
+ drm_panel_init(&ctx->panel, dev, &s6e63j0x03_funcs,
+ DRM_MODE_CONNECTOR_DSI);
ctx->bl_dev = backlight_device_register("s6e63j0x03", dev, ctx,
&s6e63j0x03_bl_ops, NULL);
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
index 142d395ea512..a5f76eb4fa25 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
@@ -362,12 +362,12 @@ static int s6e63m0_enable(struct drm_panel *panel)
return 0;
}
-static int s6e63m0_get_modes(struct drm_panel *panel)
+static int s6e63m0_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
- struct drm_connector *connector = panel->connector;
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(panel->drm, &default_mode);
+ mode = drm_mode_duplicate(connector->dev, &default_mode);
if (!mode) {
DRM_ERROR("failed to add mode %ux%ux@%u\n",
default_mode.hdisplay, default_mode.vdisplay,
@@ -473,9 +473,8 @@ static int s6e63m0_probe(struct spi_device *spi)
return ret;
}
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = dev;
- ctx->panel.funcs = &s6e63m0_drm_funcs;
+ drm_panel_init(&ctx->panel, dev, &s6e63m0_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
ret = s6e63m0_backlight_register(ctx);
if (ret < 0)
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
index 81858267723a..8a028d2bd0d6 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
@@ -920,9 +920,9 @@ static int s6e8aa0_enable(struct drm_panel *panel)
return 0;
}
-static int s6e8aa0_get_modes(struct drm_panel *panel)
+static int s6e8aa0_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
- struct drm_connector *connector = panel->connector;
struct s6e8aa0 *ctx = panel_to_s6e8aa0(panel);
struct drm_display_mode *mode;
@@ -1017,9 +1017,8 @@ static int s6e8aa0_probe(struct mipi_dsi_device *dsi)
ctx->brightness = GAMMA_LEVEL_NUM - 1;
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = dev;
- ctx->panel.funcs = &s6e8aa0_drm_funcs;
+ drm_panel_init(&ctx->panel, dev, &s6e8aa0_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
ret = drm_panel_add(&ctx->panel);
if (ret < 0)
diff --git a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
index 18b22b1294fb..40fcbbbacb2c 100644
--- a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
+++ b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
@@ -6,7 +6,6 @@
* Based on Panel Simple driver by Thierry Reding <treding@nvidia.com>
*/
-#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -46,7 +45,6 @@ struct seiko_panel {
bool prepared;
bool enabled;
const struct seiko_panel_desc *desc;
- struct backlight_device *backlight;
struct regulator *dvdd;
struct regulator *avdd;
};
@@ -56,10 +54,9 @@ static inline struct seiko_panel *to_seiko_panel(struct drm_panel *panel)
return container_of(panel, struct seiko_panel, base);
}
-static int seiko_panel_get_fixed_modes(struct seiko_panel *panel)
+static int seiko_panel_get_fixed_modes(struct seiko_panel *panel,
+ struct drm_connector *connector)
{
- struct drm_connector *connector = panel->base.connector;
- struct drm_device *drm = panel->base.drm;
struct drm_display_mode *mode;
unsigned int i, num = 0;
@@ -71,9 +68,9 @@ static int seiko_panel_get_fixed_modes(struct seiko_panel *panel)
struct videomode vm;
videomode_from_timing(dt, &vm);
- mode = drm_mode_create(drm);
+ mode = drm_mode_create(connector->dev);
if (!mode) {
- dev_err(drm->dev, "failed to add mode %ux%u\n",
+ dev_err(panel->base.dev, "failed to add mode %ux%u\n",
dt->hactive.typ, dt->vactive.typ);
continue;
}
@@ -92,9 +89,9 @@ static int seiko_panel_get_fixed_modes(struct seiko_panel *panel)
for (i = 0; i < panel->desc->num_modes; i++) {
const struct drm_display_mode *m = &panel->desc->modes[i];
- mode = drm_mode_duplicate(drm, m);
+ mode = drm_mode_duplicate(connector->dev, m);
if (!mode) {
- dev_err(drm->dev, "failed to add mode %ux%u@%u\n",
+ dev_err(panel->base.dev, "failed to add mode %ux%u@%u\n",
m->hdisplay, m->vdisplay, m->vrefresh);
continue;
}
@@ -128,12 +125,6 @@ static int seiko_panel_disable(struct drm_panel *panel)
if (!p->enabled)
return 0;
- if (p->backlight) {
- p->backlight->props.power = FB_BLANK_POWERDOWN;
- p->backlight->props.state |= BL_CORE_FBBLANK;
- backlight_update_status(p->backlight);
- }
-
p->enabled = false;
return 0;
@@ -197,23 +188,18 @@ static int seiko_panel_enable(struct drm_panel *panel)
if (p->enabled)
return 0;
- if (p->backlight) {
- p->backlight->props.state &= ~BL_CORE_FBBLANK;
- p->backlight->props.power = FB_BLANK_UNBLANK;
- backlight_update_status(p->backlight);
- }
-
p->enabled = true;
return 0;
}
-static int seiko_panel_get_modes(struct drm_panel *panel)
+static int seiko_panel_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
struct seiko_panel *p = to_seiko_panel(panel);
/* add hard-coded panel modes */
- return seiko_panel_get_fixed_modes(p);
+ return seiko_panel_get_fixed_modes(p, connector);
}
static int seiko_panel_get_timings(struct drm_panel *panel,
@@ -245,7 +231,6 @@ static const struct drm_panel_funcs seiko_panel_funcs = {
static int seiko_panel_probe(struct device *dev,
const struct seiko_panel_desc *desc)
{
- struct device_node *backlight;
struct seiko_panel *panel;
int err;
@@ -265,18 +250,12 @@ static int seiko_panel_probe(struct device *dev,
if (IS_ERR(panel->avdd))
return PTR_ERR(panel->avdd);
- backlight = of_parse_phandle(dev->of_node, "backlight", 0);
- if (backlight) {
- panel->backlight = of_find_backlight_by_node(backlight);
- of_node_put(backlight);
+ drm_panel_init(&panel->base, dev, &seiko_panel_funcs,
+ DRM_MODE_CONNECTOR_DPI);
- if (!panel->backlight)
- return -EPROBE_DEFER;
- }
-
- drm_panel_init(&panel->base);
- panel->base.dev = dev;
- panel->base.funcs = &seiko_panel_funcs;
+ err = drm_panel_of_backlight(&panel->base);
+ if (err)
+ return err;
err = drm_panel_add(&panel->base);
if (err < 0)
@@ -292,11 +271,7 @@ static int seiko_panel_remove(struct platform_device *pdev)
struct seiko_panel *panel = dev_get_drvdata(&pdev->dev);
drm_panel_remove(&panel->base);
-
- seiko_panel_disable(&panel->base);
-
- if (panel->backlight)
- put_device(&panel->backlight->dev);
+ drm_panel_disable(&panel->base);
return 0;
}
@@ -305,7 +280,7 @@ static void seiko_panel_shutdown(struct platform_device *pdev)
{
struct seiko_panel *panel = dev_get_drvdata(&pdev->dev);
- seiko_panel_disable(&panel->base);
+ drm_panel_disable(&panel->base);
}
static const struct display_timing seiko_43wvf1g_timing = {
diff --git a/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
index e910b4ad1310..b5d1977221a7 100644
--- a/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
@@ -3,7 +3,6 @@
* Copyright (C) 2014 NVIDIA Corporation
*/
-#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
@@ -23,7 +22,6 @@ struct sharp_panel {
struct mipi_dsi_device *link1;
struct mipi_dsi_device *link2;
- struct backlight_device *backlight;
struct regulator *supply;
bool prepared;
@@ -94,8 +92,6 @@ static int sharp_panel_disable(struct drm_panel *panel)
if (!sharp->enabled)
return 0;
- backlight_disable(sharp->backlight);
-
sharp->enabled = false;
return 0;
@@ -258,8 +254,6 @@ static int sharp_panel_enable(struct drm_panel *panel)
if (sharp->enabled)
return 0;
- backlight_enable(sharp->backlight);
-
sharp->enabled = true;
return 0;
@@ -278,13 +272,14 @@ static const struct drm_display_mode default_mode = {
.vrefresh = 60,
};
-static int sharp_panel_get_modes(struct drm_panel *panel)
+static int sharp_panel_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(panel->drm, &default_mode);
+ mode = drm_mode_duplicate(connector->dev, &default_mode);
if (!mode) {
- dev_err(panel->drm->dev, "failed to add mode %ux%ux@%u\n",
+ dev_err(panel->dev, "failed to add mode %ux%ux@%u\n",
default_mode.hdisplay, default_mode.vdisplay,
default_mode.vrefresh);
return -ENOMEM;
@@ -292,10 +287,10 @@ static int sharp_panel_get_modes(struct drm_panel *panel)
drm_mode_set_name(mode);
- drm_mode_probed_add(panel->connector, mode);
+ drm_mode_probed_add(connector, mode);
- panel->connector->display_info.width_mm = 217;
- panel->connector->display_info.height_mm = 136;
+ connector->display_info.width_mm = 217;
+ connector->display_info.height_mm = 136;
return 1;
}
@@ -316,7 +311,7 @@ MODULE_DEVICE_TABLE(of, sharp_of_match);
static int sharp_panel_add(struct sharp_panel *sharp)
{
- struct device *dev = &sharp->link1->dev;
+ int ret;
sharp->mode = &default_mode;
@@ -324,14 +319,12 @@ static int sharp_panel_add(struct sharp_panel *sharp)
if (IS_ERR(sharp->supply))
return PTR_ERR(sharp->supply);
- sharp->backlight = devm_of_find_backlight(dev);
-
- if (IS_ERR(sharp->backlight))
- return PTR_ERR(sharp->backlight);
+ drm_panel_init(&sharp->base, &sharp->link1->dev, &sharp_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
- drm_panel_init(&sharp->base);
- sharp->base.funcs = &sharp_panel_funcs;
- sharp->base.dev = &sharp->link1->dev;
+ ret = drm_panel_of_backlight(&sharp->base);
+ if (ret)
+ return ret;
return drm_panel_add(&sharp->base);
}
@@ -408,7 +401,7 @@ static int sharp_panel_remove(struct mipi_dsi_device *dsi)
return 0;
}
- err = sharp_panel_disable(&sharp->base);
+ err = drm_panel_disable(&sharp->base);
if (err < 0)
dev_err(&dsi->dev, "failed to disable panel: %d\n", err);
@@ -429,7 +422,7 @@ static void sharp_panel_shutdown(struct mipi_dsi_device *dsi)
if (!sharp)
return;
- sharp_panel_disable(&sharp->base);
+ drm_panel_disable(&sharp->base);
}
static struct mipi_dsi_driver sharp_panel_driver = {
diff --git a/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c
new file mode 100644
index 000000000000..1cf3f02435c1
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Sharp LS037V7DW01 LCD Panel Driver
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated
+ *
+ * Based on the omapdrm-specific panel-sharp-ls037v7dw01 driver
+ *
+ * Copyright (C) 2013 Texas Instruments Incorporated
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_connector.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+struct ls037v7dw01_panel {
+ struct drm_panel panel;
+ struct platform_device *pdev;
+
+ struct regulator *vdd;
+ struct gpio_desc *resb_gpio; /* low = reset active min 20 us */
+ struct gpio_desc *ini_gpio; /* high = power on */
+ struct gpio_desc *mo_gpio; /* low = 480x640, high = 240x320 */
+ struct gpio_desc *lr_gpio; /* high = conventional horizontal scanning */
+ struct gpio_desc *ud_gpio; /* high = conventional vertical scanning */
+};
+
+#define to_ls037v7dw01_device(p) \
+ container_of(p, struct ls037v7dw01_panel, panel)
+
+static int ls037v7dw01_disable(struct drm_panel *panel)
+{
+ struct ls037v7dw01_panel *lcd = to_ls037v7dw01_device(panel);
+
+ gpiod_set_value_cansleep(lcd->ini_gpio, 0);
+ gpiod_set_value_cansleep(lcd->resb_gpio, 0);
+
+ /* Wait at least 5 vsyncs after disabling the LCD. */
+ msleep(100);
+
+ return 0;
+}
+
+static int ls037v7dw01_unprepare(struct drm_panel *panel)
+{
+ struct ls037v7dw01_panel *lcd = to_ls037v7dw01_device(panel);
+
+ regulator_disable(lcd->vdd);
+ return 0;
+}
+
+static int ls037v7dw01_prepare(struct drm_panel *panel)
+{
+ struct ls037v7dw01_panel *lcd = to_ls037v7dw01_device(panel);
+ int ret;
+
+ ret = regulator_enable(lcd->vdd);
+ if (ret < 0)
+ dev_err(&lcd->pdev->dev, "%s: failed to enable regulator\n",
+ __func__);
+
+ return ret;
+}
+
+static int ls037v7dw01_enable(struct drm_panel *panel)
+{
+ struct ls037v7dw01_panel *lcd = to_ls037v7dw01_device(panel);
+
+ /* Wait couple of vsyncs before enabling the LCD. */
+ msleep(50);
+
+ gpiod_set_value_cansleep(lcd->resb_gpio, 1);
+ gpiod_set_value_cansleep(lcd->ini_gpio, 1);
+
+ return 0;
+}
+
+static const struct drm_display_mode ls037v7dw01_mode = {
+ .clock = 19200,
+ .hdisplay = 480,
+ .hsync_start = 480 + 1,
+ .hsync_end = 480 + 1 + 2,
+ .htotal = 480 + 1 + 2 + 28,
+ .vdisplay = 640,
+ .vsync_start = 640 + 1,
+ .vsync_end = 640 + 1 + 1,
+ .vtotal = 640 + 1 + 1 + 1,
+ .vrefresh = 58,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ .width_mm = 56,
+ .height_mm = 75,
+};
+
+static int ls037v7dw01_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &ls037v7dw01_mode);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+ drm_mode_probed_add(connector, mode);
+
+ connector->display_info.width_mm = ls037v7dw01_mode.width_mm;
+ connector->display_info.height_mm = ls037v7dw01_mode.height_mm;
+ /*
+ * FIXME: According to the datasheet pixel data is sampled on the
+ * rising edge of the clock, but the code running on the SDP3430
+ * indicates sampling on the negative edge. This should be tested on a
+ * real device.
+ */
+ connector->display_info.bus_flags = DRM_BUS_FLAG_DE_HIGH
+ | DRM_BUS_FLAG_SYNC_SAMPLE_POSEDGE
+ | DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE;
+
+ return 1;
+}
+
+static const struct drm_panel_funcs ls037v7dw01_funcs = {
+ .disable = ls037v7dw01_disable,
+ .unprepare = ls037v7dw01_unprepare,
+ .prepare = ls037v7dw01_prepare,
+ .enable = ls037v7dw01_enable,
+ .get_modes = ls037v7dw01_get_modes,
+};
+
+static int ls037v7dw01_probe(struct platform_device *pdev)
+{
+ struct ls037v7dw01_panel *lcd;
+
+ lcd = devm_kzalloc(&pdev->dev, sizeof(*lcd), GFP_KERNEL);
+ if (!lcd)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, lcd);
+ lcd->pdev = pdev;
+
+ lcd->vdd = devm_regulator_get(&pdev->dev, "envdd");
+ if (IS_ERR(lcd->vdd)) {
+ dev_err(&pdev->dev, "failed to get regulator\n");
+ return PTR_ERR(lcd->vdd);
+ }
+
+ lcd->ini_gpio = devm_gpiod_get(&pdev->dev, "enable", GPIOD_OUT_LOW);
+ if (IS_ERR(lcd->ini_gpio)) {
+ dev_err(&pdev->dev, "failed to get enable gpio\n");
+ return PTR_ERR(lcd->ini_gpio);
+ }
+
+ lcd->resb_gpio = devm_gpiod_get(&pdev->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(lcd->resb_gpio)) {
+ dev_err(&pdev->dev, "failed to get reset gpio\n");
+ return PTR_ERR(lcd->resb_gpio);
+ }
+
+ lcd->mo_gpio = devm_gpiod_get_index(&pdev->dev, "mode", 0,
+ GPIOD_OUT_LOW);
+ if (IS_ERR(lcd->mo_gpio)) {
+ dev_err(&pdev->dev, "failed to get mode[0] gpio\n");
+ return PTR_ERR(lcd->mo_gpio);
+ }
+
+ lcd->lr_gpio = devm_gpiod_get_index(&pdev->dev, "mode", 1,
+ GPIOD_OUT_LOW);
+ if (IS_ERR(lcd->lr_gpio)) {
+ dev_err(&pdev->dev, "failed to get mode[1] gpio\n");
+ return PTR_ERR(lcd->lr_gpio);
+ }
+
+ lcd->ud_gpio = devm_gpiod_get_index(&pdev->dev, "mode", 2,
+ GPIOD_OUT_LOW);
+ if (IS_ERR(lcd->ud_gpio)) {
+ dev_err(&pdev->dev, "failed to get mode[2] gpio\n");
+ return PTR_ERR(lcd->ud_gpio);
+ }
+
+ drm_panel_init(&lcd->panel, &pdev->dev, &ls037v7dw01_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+
+ return drm_panel_add(&lcd->panel);
+}
+
+static int ls037v7dw01_remove(struct platform_device *pdev)
+{
+ struct ls037v7dw01_panel *lcd = platform_get_drvdata(pdev);
+
+ drm_panel_remove(&lcd->panel);
+ drm_panel_disable(&lcd->panel);
+ drm_panel_unprepare(&lcd->panel);
+
+ return 0;
+}
+
+static const struct of_device_id ls037v7dw01_of_match[] = {
+ { .compatible = "sharp,ls037v7dw01", },
+ { /* sentinel */ },
+};
+
+MODULE_DEVICE_TABLE(of, ls037v7dw01_of_match);
+
+static struct platform_driver ls037v7dw01_driver = {
+ .probe = ls037v7dw01_probe,
+ .remove = ls037v7dw01_remove,
+ .driver = {
+ .name = "panel-sharp-ls037v7dw01",
+ .of_match_table = ls037v7dw01_of_match,
+ },
+};
+
+module_platform_driver(ls037v7dw01_driver);
+
+MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
+MODULE_DESCRIPTION("Sharp LS037V7DW01 Panel Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
index c39abde9f9f1..ce586c6d70c7 100644
--- a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
@@ -7,7 +7,6 @@
* Based on AUO panel driver by Rob Clark <robdclark@gmail.com>
*/
-#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
@@ -25,7 +24,6 @@ struct sharp_nt_panel {
struct drm_panel base;
struct mipi_dsi_device *dsi;
- struct backlight_device *backlight;
struct regulator *supply;
struct gpio_desc *reset_gpio;
@@ -107,8 +105,6 @@ static int sharp_nt_panel_disable(struct drm_panel *panel)
if (!sharp_nt->enabled)
return 0;
- backlight_disable(sharp_nt->backlight);
-
sharp_nt->enabled = false;
return 0;
@@ -190,8 +186,6 @@ static int sharp_nt_panel_enable(struct drm_panel *panel)
if (sharp_nt->enabled)
return 0;
- backlight_enable(sharp_nt->backlight);
-
sharp_nt->enabled = true;
return 0;
@@ -210,24 +204,25 @@ static const struct drm_display_mode default_mode = {
.vrefresh = 60,
};
-static int sharp_nt_panel_get_modes(struct drm_panel *panel)
+static int sharp_nt_panel_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(panel->drm, &default_mode);
+ mode = drm_mode_duplicate(connector->dev, &default_mode);
if (!mode) {
- dev_err(panel->drm->dev, "failed to add mode %ux%ux@%u\n",
- default_mode.hdisplay, default_mode.vdisplay,
- default_mode.vrefresh);
+ dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
+ default_mode.hdisplay, default_mode.vdisplay,
+ default_mode.vrefresh);
return -ENOMEM;
}
drm_mode_set_name(mode);
- drm_mode_probed_add(panel->connector, mode);
+ drm_mode_probed_add(connector, mode);
- panel->connector->display_info.width_mm = 54;
- panel->connector->display_info.height_mm = 95;
+ connector->display_info.width_mm = 54;
+ connector->display_info.height_mm = 95;
return 1;
}
@@ -243,6 +238,7 @@ static const struct drm_panel_funcs sharp_nt_panel_funcs = {
static int sharp_nt_panel_add(struct sharp_nt_panel *sharp_nt)
{
struct device *dev = &sharp_nt->dsi->dev;
+ int ret;
sharp_nt->mode = &default_mode;
@@ -259,14 +255,12 @@ static int sharp_nt_panel_add(struct sharp_nt_panel *sharp_nt)
gpiod_set_value(sharp_nt->reset_gpio, 0);
}
- sharp_nt->backlight = devm_of_find_backlight(dev);
+ drm_panel_init(&sharp_nt->base, &sharp_nt->dsi->dev,
+ &sharp_nt_panel_funcs, DRM_MODE_CONNECTOR_DSI);
- if (IS_ERR(sharp_nt->backlight))
- return PTR_ERR(sharp_nt->backlight);
-
- drm_panel_init(&sharp_nt->base);
- sharp_nt->base.funcs = &sharp_nt_panel_funcs;
- sharp_nt->base.dev = &sharp_nt->dsi->dev;
+ ret = drm_panel_of_backlight(&sharp_nt->base);
+ if (ret)
+ return ret;
return drm_panel_add(&sharp_nt->base);
}
@@ -309,7 +303,7 @@ static int sharp_nt_panel_remove(struct mipi_dsi_device *dsi)
struct sharp_nt_panel *sharp_nt = mipi_dsi_get_drvdata(dsi);
int ret;
- ret = sharp_nt_panel_disable(&sharp_nt->base);
+ ret = drm_panel_disable(&sharp_nt->base);
if (ret < 0)
dev_err(&dsi->dev, "failed to disable panel: %d\n", ret);
@@ -326,7 +320,7 @@ static void sharp_nt_panel_shutdown(struct mipi_dsi_device *dsi)
{
struct sharp_nt_panel *sharp_nt = mipi_dsi_get_drvdata(dsi);
- sharp_nt_panel_disable(&sharp_nt->base);
+ drm_panel_disable(&sharp_nt->base);
}
static const struct of_device_id sharp_nt_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 5a93c4edf1e4..e14c14ac62b5 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -21,7 +21,6 @@
* DEALINGS IN THE SOFTWARE.
*/
-#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
@@ -30,6 +29,7 @@
#include <linux/regulator/consumer.h>
#include <video/display_timing.h>
+#include <video/of_display_timing.h>
#include <video/videomode.h>
#include <drm/drm_crtc.h>
@@ -37,6 +37,22 @@
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_panel.h>
+/**
+ * @modes: Pointer to array of fixed modes appropriate for this panel. If
+ * only one mode then this can just be the address of this the mode.
+ * NOTE: cannot be used with "timings" and also if this is specified
+ * then you cannot override the mode in the device tree.
+ * @num_modes: Number of elements in modes array.
+ * @timings: Pointer to array of display timings. NOTE: cannot be used with
+ * "modes" and also these will be used to validate a device tree
+ * override if one is present.
+ * @num_timings: Number of elements in timings array.
+ * @bpc: Bits per color.
+ * @size: Structure containing the physical size of this panel.
+ * @delay: Structure containing various delay values for this panel.
+ * @bus_format: See MEDIA_BUS_FMT_... defines.
+ * @bus_flags: See DRM_BUS_FLAG_... defines.
+ */
struct panel_desc {
const struct drm_display_mode *modes;
unsigned int num_modes;
@@ -77,6 +93,7 @@ struct panel_desc {
u32 bus_format;
u32 bus_flags;
+ int connector_type;
};
struct panel_simple {
@@ -87,11 +104,12 @@ struct panel_simple {
const struct panel_desc *desc;
- struct backlight_device *backlight;
struct regulator *supply;
struct i2c_adapter *ddc;
struct gpio_desc *enable_gpio;
+
+ struct drm_display_mode override_mode;
};
static inline struct panel_simple *to_panel_simple(struct drm_panel *panel)
@@ -99,24 +117,20 @@ static inline struct panel_simple *to_panel_simple(struct drm_panel *panel)
return container_of(panel, struct panel_simple, base);
}
-static int panel_simple_get_fixed_modes(struct panel_simple *panel)
+static unsigned int panel_simple_get_timings_modes(struct panel_simple *panel,
+ struct drm_connector *connector)
{
- struct drm_connector *connector = panel->base.connector;
- struct drm_device *drm = panel->base.drm;
struct drm_display_mode *mode;
unsigned int i, num = 0;
- if (!panel->desc)
- return 0;
-
for (i = 0; i < panel->desc->num_timings; i++) {
const struct display_timing *dt = &panel->desc->timings[i];
struct videomode vm;
videomode_from_timing(dt, &vm);
- mode = drm_mode_create(drm);
+ mode = drm_mode_create(connector->dev);
if (!mode) {
- dev_err(drm->dev, "failed to add mode %ux%u\n",
+ dev_err(panel->base.dev, "failed to add mode %ux%u\n",
dt->hactive.typ, dt->vactive.typ);
continue;
}
@@ -132,12 +146,21 @@ static int panel_simple_get_fixed_modes(struct panel_simple *panel)
num++;
}
+ return num;
+}
+
+static unsigned int panel_simple_get_display_modes(struct panel_simple *panel,
+ struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+ unsigned int i, num = 0;
+
for (i = 0; i < panel->desc->num_modes; i++) {
const struct drm_display_mode *m = &panel->desc->modes[i];
- mode = drm_mode_duplicate(drm, m);
+ mode = drm_mode_duplicate(connector->dev, m);
if (!mode) {
- dev_err(drm->dev, "failed to add mode %ux%u@%u\n",
+ dev_err(panel->base.dev, "failed to add mode %ux%u@%u\n",
m->hdisplay, m->vdisplay, m->vrefresh);
continue;
}
@@ -153,6 +176,44 @@ static int panel_simple_get_fixed_modes(struct panel_simple *panel)
num++;
}
+ return num;
+}
+
+static int panel_simple_get_non_edid_modes(struct panel_simple *panel,
+ struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+ bool has_override = panel->override_mode.type;
+ unsigned int num = 0;
+
+ if (!panel->desc)
+ return 0;
+
+ if (has_override) {
+ mode = drm_mode_duplicate(connector->dev,
+ &panel->override_mode);
+ if (mode) {
+ drm_mode_probed_add(connector, mode);
+ num = 1;
+ } else {
+ dev_err(panel->base.dev, "failed to add override mode\n");
+ }
+ }
+
+ /* Only add timings if override was not there or failed to validate */
+ if (num == 0 && panel->desc->num_timings)
+ num = panel_simple_get_timings_modes(panel, connector);
+
+ /*
+ * Only add fixed modes if timings/override added no mode.
+ *
+ * We should only ever have either the display timings specified
+ * or a fixed mode. Anything else is rather bogus.
+ */
+ WARN_ON(panel->desc->num_timings && panel->desc->num_modes);
+ if (num == 0)
+ num = panel_simple_get_display_modes(panel, connector);
+
connector->display_info.bpc = panel->desc->bpc;
connector->display_info.width_mm = panel->desc->size.width;
connector->display_info.height_mm = panel->desc->size.height;
@@ -171,12 +232,6 @@ static int panel_simple_disable(struct drm_panel *panel)
if (!p->enabled)
return 0;
- if (p->backlight) {
- p->backlight->props.power = FB_BLANK_POWERDOWN;
- p->backlight->props.state |= BL_CORE_FBBLANK;
- backlight_update_status(p->backlight);
- }
-
if (p->desc->delay.disable)
msleep(p->desc->delay.disable);
@@ -242,34 +297,30 @@ static int panel_simple_enable(struct drm_panel *panel)
if (p->desc->delay.enable)
msleep(p->desc->delay.enable);
- if (p->backlight) {
- p->backlight->props.state &= ~BL_CORE_FBBLANK;
- p->backlight->props.power = FB_BLANK_UNBLANK;
- backlight_update_status(p->backlight);
- }
-
p->enabled = true;
return 0;
}
-static int panel_simple_get_modes(struct drm_panel *panel)
+static int panel_simple_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
struct panel_simple *p = to_panel_simple(panel);
int num = 0;
/* probe EDID if a DDC bus is available */
if (p->ddc) {
- struct edid *edid = drm_get_edid(panel->connector, p->ddc);
- drm_connector_update_edid_property(panel->connector, edid);
+ struct edid *edid = drm_get_edid(connector, p->ddc);
+
+ drm_connector_update_edid_property(connector, edid);
if (edid) {
- num += drm_add_edid_modes(panel->connector, edid);
+ num += drm_add_edid_modes(connector, edid);
kfree(edid);
}
}
/* add hard-coded panel modes */
- num += panel_simple_get_fixed_modes(p);
+ num += panel_simple_get_non_edid_modes(p, connector);
return num;
}
@@ -300,10 +351,58 @@ static const struct drm_panel_funcs panel_simple_funcs = {
.get_timings = panel_simple_get_timings,
};
+#define PANEL_SIMPLE_BOUNDS_CHECK(to_check, bounds, field) \
+ (to_check->field.typ >= bounds->field.min && \
+ to_check->field.typ <= bounds->field.max)
+static void panel_simple_parse_panel_timing_node(struct device *dev,
+ struct panel_simple *panel,
+ const struct display_timing *ot)
+{
+ const struct panel_desc *desc = panel->desc;
+ struct videomode vm;
+ unsigned int i;
+
+ if (WARN_ON(desc->num_modes)) {
+ dev_err(dev, "Reject override mode: panel has a fixed mode\n");
+ return;
+ }
+ if (WARN_ON(!desc->num_timings)) {
+ dev_err(dev, "Reject override mode: no timings specified\n");
+ return;
+ }
+
+ for (i = 0; i < panel->desc->num_timings; i++) {
+ const struct display_timing *dt = &panel->desc->timings[i];
+
+ if (!PANEL_SIMPLE_BOUNDS_CHECK(ot, dt, hactive) ||
+ !PANEL_SIMPLE_BOUNDS_CHECK(ot, dt, hfront_porch) ||
+ !PANEL_SIMPLE_BOUNDS_CHECK(ot, dt, hback_porch) ||
+ !PANEL_SIMPLE_BOUNDS_CHECK(ot, dt, hsync_len) ||
+ !PANEL_SIMPLE_BOUNDS_CHECK(ot, dt, vactive) ||
+ !PANEL_SIMPLE_BOUNDS_CHECK(ot, dt, vfront_porch) ||
+ !PANEL_SIMPLE_BOUNDS_CHECK(ot, dt, vback_porch) ||
+ !PANEL_SIMPLE_BOUNDS_CHECK(ot, dt, vsync_len))
+ continue;
+
+ if (ot->flags != dt->flags)
+ continue;
+
+ videomode_from_timing(ot, &vm);
+ drm_display_mode_from_videomode(&vm, &panel->override_mode);
+ panel->override_mode.type |= DRM_MODE_TYPE_DRIVER |
+ DRM_MODE_TYPE_PREFERRED;
+ break;
+ }
+
+ if (WARN_ON(!panel->override_mode.type))
+ dev_err(dev, "Reject override mode: No display_timing found\n");
+}
+
static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
{
- struct device_node *backlight, *ddc;
struct panel_simple *panel;
+ struct display_timing dt;
+ struct device_node *ddc;
int err;
panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL);
@@ -329,29 +428,24 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
return err;
}
- backlight = of_parse_phandle(dev->of_node, "backlight", 0);
- if (backlight) {
- panel->backlight = of_find_backlight_by_node(backlight);
- of_node_put(backlight);
-
- if (!panel->backlight)
- return -EPROBE_DEFER;
- }
-
ddc = of_parse_phandle(dev->of_node, "ddc-i2c-bus", 0);
if (ddc) {
panel->ddc = of_find_i2c_adapter_by_node(ddc);
of_node_put(ddc);
- if (!panel->ddc) {
- err = -EPROBE_DEFER;
- goto free_backlight;
- }
+ if (!panel->ddc)
+ return -EPROBE_DEFER;
}
- drm_panel_init(&panel->base);
- panel->base.dev = dev;
- panel->base.funcs = &panel_simple_funcs;
+ if (!of_get_display_timing(dev->of_node, "panel-timing", &dt))
+ panel_simple_parse_panel_timing_node(dev, panel, &dt);
+
+ drm_panel_init(&panel->base, dev, &panel_simple_funcs,
+ desc->connector_type);
+
+ err = drm_panel_of_backlight(&panel->base);
+ if (err)
+ goto free_ddc;
err = drm_panel_add(&panel->base);
if (err < 0)
@@ -364,9 +458,6 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
free_ddc:
if (panel->ddc)
put_device(&panel->ddc->dev);
-free_backlight:
- if (panel->backlight)
- put_device(&panel->backlight->dev);
return err;
}
@@ -376,16 +467,12 @@ static int panel_simple_remove(struct device *dev)
struct panel_simple *panel = dev_get_drvdata(dev);
drm_panel_remove(&panel->base);
-
- panel_simple_disable(&panel->base);
- panel_simple_unprepare(&panel->base);
+ drm_panel_disable(&panel->base);
+ drm_panel_unprepare(&panel->base);
if (panel->ddc)
put_device(&panel->ddc->dev);
- if (panel->backlight)
- put_device(&panel->backlight->dev);
-
return 0;
}
@@ -393,8 +480,8 @@ static void panel_simple_shutdown(struct device *dev)
{
struct panel_simple *panel = dev_get_drvdata(dev);
- panel_simple_disable(&panel->base);
- panel_simple_unprepare(&panel->base);
+ drm_panel_disable(&panel->base);
+ drm_panel_unprepare(&panel->base);
}
static const struct drm_display_mode ampire_am_480272h3tmqw_t01h_mode = {
@@ -496,22 +583,21 @@ static const struct panel_desc auo_b101aw03 = {
},
};
-static const struct drm_display_mode auo_b101ean01_mode = {
- .clock = 72500,
- .hdisplay = 1280,
- .hsync_start = 1280 + 119,
- .hsync_end = 1280 + 119 + 32,
- .htotal = 1280 + 119 + 32 + 21,
- .vdisplay = 800,
- .vsync_start = 800 + 4,
- .vsync_end = 800 + 4 + 20,
- .vtotal = 800 + 4 + 20 + 8,
- .vrefresh = 60,
+static const struct display_timing auo_b101ean01_timing = {
+ .pixelclock = { 65300000, 72500000, 75000000 },
+ .hactive = { 1280, 1280, 1280 },
+ .hfront_porch = { 18, 119, 119 },
+ .hback_porch = { 21, 21, 21 },
+ .hsync_len = { 32, 32, 32 },
+ .vactive = { 800, 800, 800 },
+ .vfront_porch = { 4, 4, 4 },
+ .vback_porch = { 8, 8, 8 },
+ .vsync_len = { 18, 20, 20 },
};
static const struct panel_desc auo_b101ean01 = {
- .modes = &auo_b101ean01_mode,
- .num_modes = 1,
+ .timings = &auo_b101ean01_timing,
+ .num_timings = 1,
.bpc = 6,
.size = {
.width = 217,
@@ -543,6 +629,35 @@ static const struct panel_desc auo_b101xtn01 = {
},
};
+static const struct drm_display_mode auo_b116xak01_mode = {
+ .clock = 69300,
+ .hdisplay = 1366,
+ .hsync_start = 1366 + 48,
+ .hsync_end = 1366 + 48 + 32,
+ .htotal = 1366 + 48 + 32 + 10,
+ .vdisplay = 768,
+ .vsync_start = 768 + 4,
+ .vsync_end = 768 + 4 + 6,
+ .vtotal = 768 + 4 + 6 + 15,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static const struct panel_desc auo_b116xak01 = {
+ .modes = &auo_b116xak01_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 256,
+ .height = 144,
+ },
+ .delay = {
+ .hpd_absent_delay = 200,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+ .connector_type = DRM_MODE_CONNECTOR_eDP,
+};
+
static const struct drm_display_mode auo_b116xw03_mode = {
.clock = 70589,
.hdisplay = 1366,
@@ -719,14 +834,15 @@ static const struct panel_desc auo_g133han01 = {
.unprepare = 1000,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing auo_g185han01_timings = {
.pixelclock = { 120000000, 144000000, 175000000 },
.hactive = { 1920, 1920, 1920 },
- .hfront_porch = { 18, 60, 74 },
- .hback_porch = { 12, 44, 54 },
- .hsync_len = { 10, 24, 32 },
+ .hfront_porch = { 36, 120, 148 },
+ .hback_porch = { 24, 88, 108 },
+ .hsync_len = { 20, 48, 64 },
.vactive = { 1080, 1080, 1080 },
.vfront_porch = { 6, 10, 40 },
.vback_porch = { 2, 5, 20 },
@@ -748,6 +864,7 @@ static const struct panel_desc auo_g185han01 = {
.unprepare = 1000,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing auo_p320hvn03_timings = {
@@ -776,6 +893,7 @@ static const struct panel_desc auo_p320hvn03 = {
.unprepare = 500,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode auo_t215hvn01_mode = {
@@ -919,6 +1037,38 @@ static const struct panel_desc boe_nv101wxmn51 = {
},
};
+static const struct drm_display_mode boe_nv140fhmn49_modes[] = {
+ {
+ .clock = 148500,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 48,
+ .hsync_end = 1920 + 48 + 32,
+ .htotal = 2200,
+ .vdisplay = 1080,
+ .vsync_start = 1080 + 3,
+ .vsync_end = 1080 + 3 + 5,
+ .vtotal = 1125,
+ .vrefresh = 60,
+ },
+};
+
+static const struct panel_desc boe_nv140fhmn49 = {
+ .modes = boe_nv140fhmn49_modes,
+ .num_modes = ARRAY_SIZE(boe_nv140fhmn49_modes),
+ .bpc = 6,
+ .size = {
+ .width = 309,
+ .height = 174,
+ },
+ .delay = {
+ .prepare = 210,
+ .enable = 50,
+ .unprepare = 160,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+ .connector_type = DRM_MODE_CONNECTOR_eDP,
+};
+
static const struct drm_display_mode cdtech_s043wq26h_ct7_mode = {
.clock = 9000,
.hdisplay = 480,
@@ -1091,6 +1241,7 @@ static const struct panel_desc dlc_dlc0700yzg_1 = {
.disable = 200,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing dlc_dlc1010gig_timing = {
@@ -1121,6 +1272,7 @@ static const struct panel_desc dlc_dlc1010gig = {
.unprepare = 60,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode edt_et035012dm6_mode = {
@@ -1335,6 +1487,31 @@ static const struct panel_desc giantplus_gpg482739qs5 = {
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
};
+static const struct display_timing giantplus_gpm940b0_timing = {
+ .pixelclock = { 13500000, 27000000, 27500000 },
+ .hactive = { 320, 320, 320 },
+ .hfront_porch = { 14, 686, 718 },
+ .hback_porch = { 50, 70, 255 },
+ .hsync_len = { 1, 1, 1 },
+ .vactive = { 240, 240, 240 },
+ .vfront_porch = { 1, 1, 179 },
+ .vback_porch = { 1, 21, 31 },
+ .vsync_len = { 1, 1, 6 },
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW,
+};
+
+static const struct panel_desc giantplus_gpm940b0 = {
+ .timings = &giantplus_gpm940b0_timing,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 60,
+ .height = 45,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_3X8,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_NEGEDGE,
+};
+
static const struct display_timing hannstar_hsd070pww1_timing = {
.pixelclock = { 64300000, 71100000, 82000000 },
.hactive = { 1280, 1280, 1280 },
@@ -1362,6 +1539,7 @@ static const struct panel_desc hannstar_hsd070pww1 = {
.height = 94,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing hannstar_hsd100pxn1_timing = {
@@ -1386,6 +1564,7 @@ static const struct panel_desc hannstar_hsd100pxn1 = {
.height = 152,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode hitachi_tx23d38vm0caa_mode = {
@@ -1492,6 +1671,7 @@ static const struct panel_desc innolux_g070y2_l01 = {
.unprepare = 800,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing innolux_g101ice_l01_timing = {
@@ -1520,6 +1700,7 @@ static const struct panel_desc innolux_g101ice_l01 = {
.disable = 200,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing innolux_g121i1_l01_timing = {
@@ -1547,6 +1728,7 @@ static const struct panel_desc innolux_g121i1_l01 = {
.disable = 20,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode innolux_g121x1_l03_mode = {
@@ -1578,23 +1760,32 @@ static const struct panel_desc innolux_g121x1_l03 = {
},
};
-static const struct drm_display_mode innolux_n116bge_mode = {
- .clock = 76420,
- .hdisplay = 1366,
- .hsync_start = 1366 + 136,
- .hsync_end = 1366 + 136 + 30,
- .htotal = 1366 + 136 + 30 + 60,
- .vdisplay = 768,
- .vsync_start = 768 + 8,
- .vsync_end = 768 + 8 + 12,
- .vtotal = 768 + 8 + 12 + 12,
- .vrefresh = 60,
- .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+/*
+ * Datasheet specifies that at 60 Hz refresh rate:
+ * - total horizontal time: { 1506, 1592, 1716 }
+ * - total vertical time: { 788, 800, 868 }
+ *
+ * ...but doesn't go into exactly how that should be split into a front
+ * porch, back porch, or sync length. For now we'll leave a single setting
+ * here which allows a bit of tweaking of the pixel clock at the expense of
+ * refresh rate.
+ */
+static const struct display_timing innolux_n116bge_timing = {
+ .pixelclock = { 72600000, 76420000, 80240000 },
+ .hactive = { 1366, 1366, 1366 },
+ .hfront_porch = { 136, 136, 136 },
+ .hback_porch = { 60, 60, 60 },
+ .hsync_len = { 30, 30, 30 },
+ .vactive = { 768, 768, 768 },
+ .vfront_porch = { 8, 8, 8 },
+ .vback_porch = { 12, 12, 12 },
+ .vsync_len = { 12, 12, 12 },
+ .flags = DISPLAY_FLAGS_VSYNC_LOW | DISPLAY_FLAGS_HSYNC_LOW,
};
static const struct panel_desc innolux_n116bge = {
- .modes = &innolux_n116bge_mode,
- .num_modes = 1,
+ .timings = &innolux_n116bge_timing,
+ .num_timings = 1,
.bpc = 6,
.size = {
.width = 256,
@@ -1721,6 +1912,7 @@ static const struct panel_desc koe_tx31d200vm0baa = {
.height = 109,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing kyo_tcg121xglp_timing = {
@@ -1745,6 +1937,7 @@ static const struct panel_desc kyo_tcg121xglp = {
.height = 184,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode lemaker_bl035_rgb_002_mode = {
@@ -1793,6 +1986,7 @@ static const struct panel_desc lg_lb070wv8 = {
.height = 91,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode lg_lp079qx1_sp0v_mode = {
@@ -1900,6 +2094,40 @@ static const struct drm_display_mode mitsubishi_aa070mc01_mode = {
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
};
+static const struct drm_display_mode logicpd_type_28_mode = {
+ .clock = 9000,
+ .hdisplay = 480,
+ .hsync_start = 480 + 3,
+ .hsync_end = 480 + 3 + 42,
+ .htotal = 480 + 3 + 42 + 2,
+
+ .vdisplay = 272,
+ .vsync_start = 272 + 2,
+ .vsync_end = 272 + 2 + 11,
+ .vtotal = 272 + 2 + 11 + 3,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+};
+
+static const struct panel_desc logicpd_type_28 = {
+ .modes = &logicpd_type_28_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 105,
+ .height = 67,
+ },
+ .delay = {
+ .prepare = 200,
+ .enable = 200,
+ .unprepare = 200,
+ .disable = 200,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE |
+ DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE,
+};
+
static const struct panel_desc mitsubishi_aa070mc01 = {
.modes = &mitsubishi_aa070mc01_mode,
.num_modes = 1,
@@ -1915,6 +2143,7 @@ static const struct panel_desc mitsubishi_aa070mc01 = {
.disable = 400,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
.bus_flags = DRM_BUS_FLAG_DE_HIGH,
};
@@ -1943,6 +2172,7 @@ static const struct panel_desc nec_nl12880bc20_05 = {
.disable = 50,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode nec_nl4827hc19_05b_mode = {
@@ -2045,6 +2275,7 @@ static const struct panel_desc nlt_nl192108ac18_02d = {
.unprepare = 500,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode nvd_9128_mode = {
@@ -2068,6 +2299,7 @@ static const struct panel_desc nvd_9128 = {
.height = 88,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing okaya_rs800480t_7x0gp_timing = {
@@ -2157,6 +2389,33 @@ static const struct panel_desc ontat_yx700wv03 = {
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
};
+static const struct drm_display_mode ortustech_com37h3m_mode = {
+ .clock = 22153,
+ .hdisplay = 480,
+ .hsync_start = 480 + 8,
+ .hsync_end = 480 + 8 + 10,
+ .htotal = 480 + 8 + 10 + 10,
+ .vdisplay = 640,
+ .vsync_start = 640 + 4,
+ .vsync_end = 640 + 4 + 3,
+ .vtotal = 640 + 4 + 3 + 4,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static const struct panel_desc ortustech_com37h3m = {
+ .modes = &ortustech_com37h3m_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 56, /* 56.16mm */
+ .height = 75, /* 74.88mm */
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE |
+ DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE,
+};
+
static const struct drm_display_mode ortustech_com43h4m85ulc_mode = {
.clock = 25000,
.hdisplay = 480,
@@ -2206,6 +2465,7 @@ static const struct panel_desc osddisplays_osd070t1718_19ts = {
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
+ .connector_type = DRM_MODE_CONNECTOR_DPI,
};
static const struct drm_display_mode pda_91_00156_a0_mode = {
@@ -2354,6 +2614,83 @@ static const struct panel_desc samsung_ltn140at29_301 = {
},
};
+static const struct display_timing satoz_sat050at40h12r2_timing = {
+ .pixelclock = {33300000, 33300000, 50000000},
+ .hactive = {800, 800, 800},
+ .hfront_porch = {16, 210, 354},
+ .hback_porch = {46, 46, 46},
+ .hsync_len = {1, 1, 40},
+ .vactive = {480, 480, 480},
+ .vfront_porch = {7, 22, 147},
+ .vback_porch = {23, 23, 23},
+ .vsync_len = {1, 1, 20},
+};
+
+static const struct panel_desc satoz_sat050at40h12r2 = {
+ .timings = &satoz_sat050at40h12r2_timing,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 108,
+ .height = 65,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
+static const struct drm_display_mode sharp_ld_d5116z01b_mode = {
+ .clock = 168480,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 48,
+ .hsync_end = 1920 + 48 + 32,
+ .htotal = 1920 + 48 + 32 + 80,
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 3,
+ .vsync_end = 1280 + 3 + 10,
+ .vtotal = 1280 + 3 + 10 + 57,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+};
+
+static const struct panel_desc sharp_ld_d5116z01b = {
+ .modes = &sharp_ld_d5116z01b_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 260,
+ .height = 120,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_DATA_MSB_TO_LSB,
+};
+
+static const struct drm_display_mode sharp_lq070y3dg3b_mode = {
+ .clock = 33260,
+ .hdisplay = 800,
+ .hsync_start = 800 + 64,
+ .hsync_end = 800 + 64 + 128,
+ .htotal = 800 + 64 + 128 + 64,
+ .vdisplay = 480,
+ .vsync_start = 480 + 8,
+ .vsync_end = 480 + 8 + 2,
+ .vtotal = 480 + 8 + 2 + 35,
+ .vrefresh = 60,
+ .flags = DISPLAY_FLAGS_PIXDATA_POSEDGE,
+};
+
+static const struct panel_desc sharp_lq070y3dg3b = {
+ .modes = &sharp_lq070y3dg3b_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 152, /* 152.4mm */
+ .height = 91, /* 91.4mm */
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE |
+ DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE,
+};
+
static const struct drm_display_mode sharp_lq035q7db03_mode = {
.clock = 5500,
.hdisplay = 240,
@@ -2400,6 +2737,7 @@ static const struct panel_desc sharp_lq101k1ly04 = {
.height = 136,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing sharp_lq123p1jx31_timing = {
@@ -2454,6 +2792,33 @@ static const struct panel_desc sharp_lq150x1lg11 = {
.bus_format = MEDIA_BUS_FMT_RGB565_1X16,
};
+static const struct display_timing sharp_ls020b1dd01d_timing = {
+ .pixelclock = { 2000000, 4200000, 5000000 },
+ .hactive = { 240, 240, 240 },
+ .hfront_porch = { 66, 66, 66 },
+ .hback_porch = { 1, 1, 1 },
+ .hsync_len = { 1, 1, 1 },
+ .vactive = { 160, 160, 160 },
+ .vfront_porch = { 52, 52, 52 },
+ .vback_porch = { 6, 6, 6 },
+ .vsync_len = { 10, 10, 10 },
+ .flags = DISPLAY_FLAGS_HSYNC_HIGH | DISPLAY_FLAGS_VSYNC_LOW,
+};
+
+static const struct panel_desc sharp_ls020b1dd01d = {
+ .timings = &sharp_ls020b1dd01d_timing,
+ .num_timings = 1,
+ .bpc = 6,
+ .size = {
+ .width = 42,
+ .height = 28,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB565_1X16,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH
+ | DRM_BUS_FLAG_PIXDATA_NEGEDGE
+ | DRM_BUS_FLAG_SHARP_SIGNALS,
+};
+
static const struct drm_display_mode shelly_sca07010_bfn_lnn_mode = {
.clock = 33300,
.hdisplay = 800,
@@ -2552,6 +2917,7 @@ static const struct panel_desc tianma_tm070jdhg30 = {
.height = 95,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing tianma_tm070rvhg71_timing = {
@@ -2576,6 +2942,65 @@ static const struct panel_desc tianma_tm070rvhg71 = {
.height = 86,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
+static const struct drm_display_mode ti_nspire_cx_lcd_mode[] = {
+ {
+ .clock = 10000,
+ .hdisplay = 320,
+ .hsync_start = 320 + 50,
+ .hsync_end = 320 + 50 + 6,
+ .htotal = 320 + 50 + 6 + 38,
+ .vdisplay = 240,
+ .vsync_start = 240 + 3,
+ .vsync_end = 240 + 3 + 1,
+ .vtotal = 240 + 3 + 1 + 17,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+ },
+};
+
+static const struct panel_desc ti_nspire_cx_lcd_panel = {
+ .modes = ti_nspire_cx_lcd_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 65,
+ .height = 49,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_PIXDATA_NEGEDGE,
+};
+
+static const struct drm_display_mode ti_nspire_classic_lcd_mode[] = {
+ {
+ .clock = 10000,
+ .hdisplay = 320,
+ .hsync_start = 320 + 6,
+ .hsync_end = 320 + 6 + 6,
+ .htotal = 320 + 6 + 6 + 6,
+ .vdisplay = 240,
+ .vsync_start = 240 + 0,
+ .vsync_end = 240 + 0 + 1,
+ .vtotal = 240 + 0 + 1 + 0,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+ },
+};
+
+static const struct panel_desc ti_nspire_classic_lcd_panel = {
+ .modes = ti_nspire_classic_lcd_mode,
+ .num_modes = 1,
+ /* The grayscale panel has 8 bit for the color .. Y (black) */
+ .bpc = 8,
+ .size = {
+ .width = 71,
+ .height = 53,
+ },
+ /* This is the grayscale bus format */
+ .bus_format = MEDIA_BUS_FMT_Y8_1X8,
+ .bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE,
};
static const struct drm_display_mode toshiba_lt089ac29000_mode = {
@@ -2600,6 +3025,7 @@ static const struct panel_desc toshiba_lt089ac29000 = {
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode tpk_f07a_0102_mode = {
@@ -2670,6 +3096,7 @@ static const struct panel_desc urt_umsh_8596md_lvds = {
.height = 91,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct panel_desc urt_umsh_8596md_parallel = {
@@ -2784,6 +3211,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "auo,b101xtn01",
.data = &auo_b101xtn01,
}, {
+ .compatible = "auo,b116xa01",
+ .data = &auo_b116xak01,
+ }, {
.compatible = "auo,b116xw03",
.data = &auo_b116xw03,
}, {
@@ -2826,6 +3256,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "boe,nv101wxmn51",
.data = &boe_nv101wxmn51,
}, {
+ .compatible = "boe,nv140fhmn49",
+ .data = &boe_nv140fhmn49,
+ }, {
.compatible = "cdtech,s043wq26h-ct7",
.data = &cdtech_s043wq26h_ct7,
}, {
@@ -2883,6 +3316,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "giantplus,gpg482739qs5",
.data = &giantplus_gpg482739qs5
}, {
+ .compatible = "giantplus,gpm940b0",
+ .data = &giantplus_gpm940b0,
+ }, {
.compatible = "hannstar,hsd070pww1",
.data = &hannstar_hsd070pww1,
}, {
@@ -2949,6 +3385,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "lg,lp129qe",
.data = &lg_lp129qe,
}, {
+ .compatible = "logicpd,type28",
+ .data = &logicpd_type_28,
+ }, {
.compatible = "mitsubishi,aa070mc01-ca1",
.data = &mitsubishi_aa070mc01,
}, {
@@ -2979,6 +3418,12 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "ontat,yx700wv03",
.data = &ontat_yx700wv03,
}, {
+ .compatible = "ortustech,com37h3m05dtc",
+ .data = &ortustech_com37h3m,
+ }, {
+ .compatible = "ortustech,com37h3m99dtc",
+ .data = &ortustech_com37h3m,
+ }, {
.compatible = "ortustech,com43h4m85ulc",
.data = &ortustech_com43h4m85ulc,
}, {
@@ -3003,9 +3448,18 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "samsung,ltn140at29-301",
.data = &samsung_ltn140at29_301,
}, {
+ .compatible = "satoz,sat050at40h12r2",
+ .data = &satoz_sat050at40h12r2,
+ }, {
+ .compatible = "sharp,ld-d5116z01b",
+ .data = &sharp_ld_d5116z01b,
+ }, {
.compatible = "sharp,lq035q7db03",
.data = &sharp_lq035q7db03,
}, {
+ .compatible = "sharp,lq070y3dg3b",
+ .data = &sharp_lq070y3dg3b,
+ }, {
.compatible = "sharp,lq101k1ly04",
.data = &sharp_lq101k1ly04,
}, {
@@ -3015,6 +3469,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "sharp,lq150x1lg11",
.data = &sharp_lq150x1lg11,
}, {
+ .compatible = "sharp,ls020b1dd01d",
+ .data = &sharp_ls020b1dd01d,
+ }, {
.compatible = "shelly,sca07010-bfn-lnn",
.data = &shelly_sca07010_bfn_lnn,
}, {
@@ -3030,6 +3487,12 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "tianma,tm070rvhg71",
.data = &tianma_tm070rvhg71,
}, {
+ .compatible = "ti,nspire-cx-lcd-panel",
+ .data = &ti_nspire_cx_lcd_panel,
+ }, {
+ .compatible = "ti,nspire-classic-lcd-panel",
+ .data = &ti_nspire_classic_lcd_panel,
+ }, {
.compatible = "toshiba,lt089ac29000",
.data = &toshiba_lt089ac29000,
}, {
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7701.c b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
index 09c5d9a6f9fa..4b4f2558e3b4 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7701.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
@@ -9,7 +9,6 @@
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
-#include <linux/backlight.h>
#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include <linux/module.h>
@@ -103,7 +102,6 @@ struct st7701 {
struct mipi_dsi_device *dsi;
const struct st7701_panel_desc *desc;
- struct backlight_device *backlight;
struct regulator_bulk_data *supplies;
struct gpio_desc *reset;
unsigned int sleep_delay;
@@ -223,7 +221,6 @@ static int st7701_enable(struct drm_panel *panel)
struct st7701 *st7701 = panel_to_st7701(panel);
ST7701_DSI(st7701, MIPI_DCS_SET_DISPLAY_ON, 0x00);
- backlight_enable(st7701->backlight);
return 0;
}
@@ -232,7 +229,6 @@ static int st7701_disable(struct drm_panel *panel)
{
struct st7701 *st7701 = panel_to_st7701(panel);
- backlight_disable(st7701->backlight);
ST7701_DSI(st7701, MIPI_DCS_SET_DISPLAY_OFF, 0x00);
return 0;
@@ -264,13 +260,14 @@ static int st7701_unprepare(struct drm_panel *panel)
return 0;
}
-static int st7701_get_modes(struct drm_panel *panel)
+static int st7701_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
struct st7701 *st7701 = panel_to_st7701(panel);
const struct drm_display_mode *desc_mode = st7701->desc->mode;
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(panel->drm, desc_mode);
+ mode = drm_mode_duplicate(connector->dev, desc_mode);
if (!mode) {
DRM_DEV_ERROR(&st7701->dsi->dev,
"failed to add mode %ux%ux@%u\n",
@@ -280,10 +277,10 @@ static int st7701_get_modes(struct drm_panel *panel)
}
drm_mode_set_name(mode);
- drm_mode_probed_add(panel->connector, mode);
+ drm_mode_probed_add(connector, mode);
- panel->connector->display_info.width_mm = desc_mode->width_mm;
- panel->connector->display_info.height_mm = desc_mode->height_mm;
+ connector->display_info.width_mm = desc_mode->width_mm;
+ connector->display_info.height_mm = desc_mode->height_mm;
return 1;
}
@@ -365,11 +362,8 @@ static int st7701_dsi_probe(struct mipi_dsi_device *dsi)
return PTR_ERR(st7701->reset);
}
- st7701->backlight = devm_of_find_backlight(&dsi->dev);
- if (IS_ERR(st7701->backlight))
- return PTR_ERR(st7701->backlight);
-
- drm_panel_init(&st7701->panel);
+ drm_panel_init(&st7701->panel, &dsi->dev, &st7701_funcs,
+ DRM_MODE_CONNECTOR_DSI);
/**
* Once sleep out has been issued, ST7701 IC required to wait 120ms
@@ -381,8 +375,10 @@ static int st7701_dsi_probe(struct mipi_dsi_device *dsi)
* ts8550b and there is no valid documentation for that.
*/
st7701->sleep_delay = 120 + desc->panel_sleep_delay;
- st7701->panel.funcs = &st7701_funcs;
- st7701->panel.dev = &dsi->dev;
+
+ ret = drm_panel_of_backlight(&st7701->panel);
+ if (ret)
+ return ret;
ret = drm_panel_add(&st7701->panel);
if (ret < 0)
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
index 5e3e92ea9ea6..cc02c54c1b2e 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
@@ -3,7 +3,6 @@
* Copyright (C) 2017 Free Electrons
*/
-#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
@@ -116,7 +115,6 @@ struct st7789v {
struct drm_panel panel;
struct spi_device *spi;
struct gpio_desc *reset;
- struct backlight_device *backlight;
struct regulator *power;
};
@@ -170,14 +168,14 @@ static const struct drm_display_mode default_mode = {
.vrefresh = 60,
};
-static int st7789v_get_modes(struct drm_panel *panel)
+static int st7789v_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
- struct drm_connector *connector = panel->connector;
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(panel->drm, &default_mode);
+ mode = drm_mode_duplicate(connector->dev, &default_mode);
if (!mode) {
- dev_err(panel->drm->dev, "failed to add mode %ux%ux@%u\n",
+ dev_err(panel->dev, "failed to add mode %ux%ux@%u\n",
default_mode.hdisplay, default_mode.vdisplay,
default_mode.vrefresh);
return -ENOMEM;
@@ -188,8 +186,8 @@ static int st7789v_get_modes(struct drm_panel *panel)
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
drm_mode_probed_add(connector, mode);
- panel->connector->display_info.width_mm = 61;
- panel->connector->display_info.height_mm = 103;
+ connector->display_info.width_mm = 61;
+ connector->display_info.height_mm = 103;
return 1;
}
@@ -323,12 +321,6 @@ static int st7789v_enable(struct drm_panel *panel)
{
struct st7789v *ctx = panel_to_st7789v(panel);
- if (ctx->backlight) {
- ctx->backlight->props.state &= ~BL_CORE_FBBLANK;
- ctx->backlight->props.power = FB_BLANK_UNBLANK;
- backlight_update_status(ctx->backlight);
- }
-
return st7789v_write_command(ctx, MIPI_DCS_SET_DISPLAY_ON);
}
@@ -339,12 +331,6 @@ static int st7789v_disable(struct drm_panel *panel)
ST7789V_TEST(ret, st7789v_write_command(ctx, MIPI_DCS_SET_DISPLAY_OFF));
- if (ctx->backlight) {
- ctx->backlight->props.power = FB_BLANK_POWERDOWN;
- ctx->backlight->props.state |= BL_CORE_FBBLANK;
- backlight_update_status(ctx->backlight);
- }
-
return 0;
}
@@ -370,7 +356,6 @@ static const struct drm_panel_funcs st7789v_drm_funcs = {
static int st7789v_probe(struct spi_device *spi)
{
- struct device_node *backlight;
struct st7789v *ctx;
int ret;
@@ -381,8 +366,8 @@ static int st7789v_probe(struct spi_device *spi)
spi_set_drvdata(spi, ctx);
ctx->spi = spi;
- ctx->panel.dev = &spi->dev;
- ctx->panel.funcs = &st7789v_drm_funcs;
+ drm_panel_init(&ctx->panel, &spi->dev, &st7789v_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
ctx->power = devm_regulator_get(&spi->dev, "power");
if (IS_ERR(ctx->power))
@@ -394,26 +379,15 @@ static int st7789v_probe(struct spi_device *spi)
return PTR_ERR(ctx->reset);
}
- backlight = of_parse_phandle(spi->dev.of_node, "backlight", 0);
- if (backlight) {
- ctx->backlight = of_find_backlight_by_node(backlight);
- of_node_put(backlight);
-
- if (!ctx->backlight)
- return -EPROBE_DEFER;
- }
+ ret = drm_panel_of_backlight(&ctx->panel);
+ if (ret)
+ return ret;
ret = drm_panel_add(&ctx->panel);
if (ret < 0)
- goto err_free_backlight;
+ return ret;
return 0;
-
-err_free_backlight:
- if (ctx->backlight)
- put_device(&ctx->backlight->dev);
-
- return ret;
}
static int st7789v_remove(struct spi_device *spi)
@@ -422,9 +396,6 @@ static int st7789v_remove(struct spi_device *spi)
drm_panel_remove(&ctx->panel);
- if (ctx->backlight)
- put_device(&ctx->backlight->dev);
-
return 0;
}
diff --git a/drivers/gpu/drm/panel/panel-sony-acx424akp.c b/drivers/gpu/drm/panel/panel-sony-acx424akp.c
new file mode 100644
index 000000000000..de0abf76ae6f
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-sony-acx424akp.c
@@ -0,0 +1,550 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * MIPI-DSI Sony ACX424AKP panel driver. This is a 480x864
+ * AMOLED panel with a command-only DSI interface.
+ *
+ * Copyright (C) Linaro Ltd. 2019
+ * Author: Linus Walleij
+ * Based on code and know-how from Marcus Lorentzon
+ * Copyright (C) ST-Ericsson SA 2010
+ */
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+#define ACX424_DCS_READ_ID1 0xDA
+#define ACX424_DCS_READ_ID2 0xDB
+#define ACX424_DCS_READ_ID3 0xDC
+#define ACX424_DCS_SET_MDDI 0xAE
+
+/*
+ * Sony seems to use vendor ID 0x81
+ */
+#define DISPLAY_SONY_ACX424AKP_ID1 0x811b
+#define DISPLAY_SONY_ACX424AKP_ID2 0x811a
+/*
+ * The third ID looks like a bug, vendor IDs begin at 0x80
+ * and panel 00 ... seems like default values.
+ */
+#define DISPLAY_SONY_ACX424AKP_ID3 0x8000
+
+struct acx424akp {
+ struct drm_panel panel;
+ struct device *dev;
+ struct backlight_device *bl;
+ struct regulator *supply;
+ struct gpio_desc *reset_gpio;
+ bool video_mode;
+};
+
+static const struct drm_display_mode sony_acx424akp_vid_mode = {
+ .clock = 330000,
+ .hdisplay = 480,
+ .hsync_start = 480 + 15,
+ .hsync_end = 480 + 15 + 0,
+ .htotal = 480 + 15 + 0 + 15,
+ .vdisplay = 864,
+ .vsync_start = 864 + 14,
+ .vsync_end = 864 + 14 + 1,
+ .vtotal = 864 + 14 + 1 + 11,
+ .vrefresh = 60,
+ .width_mm = 48,
+ .height_mm = 84,
+ .flags = DRM_MODE_FLAG_PVSYNC,
+};
+
+/*
+ * The timings are not very helpful as the display is used in
+ * command mode using the maximum HS frequency.
+ */
+static const struct drm_display_mode sony_acx424akp_cmd_mode = {
+ .clock = 420160,
+ .hdisplay = 480,
+ .hsync_start = 480 + 154,
+ .hsync_end = 480 + 154 + 16,
+ .htotal = 480 + 154 + 16 + 32,
+ .vdisplay = 864,
+ .vsync_start = 864 + 1,
+ .vsync_end = 864 + 1 + 1,
+ .vtotal = 864 + 1 + 1 + 1,
+ /*
+ * Some desired refresh rate, experiments at the maximum "pixel"
+ * clock speed (HS clock 420 MHz) yields around 117Hz.
+ */
+ .vrefresh = 60,
+ .width_mm = 48,
+ .height_mm = 84,
+};
+
+static inline struct acx424akp *panel_to_acx424akp(struct drm_panel *panel)
+{
+ return container_of(panel, struct acx424akp, panel);
+}
+
+#define FOSC 20 /* 20Mhz */
+#define SCALE_FACTOR_NS_DIV_MHZ 1000
+
+static int acx424akp_set_brightness(struct backlight_device *bl)
+{
+ struct acx424akp *acx = bl_get_data(bl);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(acx->dev);
+ int period_ns = 1023;
+ int duty_ns = bl->props.brightness;
+ u8 pwm_ratio;
+ u8 pwm_div;
+ u8 par;
+ int ret;
+
+ /* Calculate the PWM duty cycle in n/256's */
+ pwm_ratio = max(((duty_ns * 256) / period_ns) - 1, 1);
+ pwm_div = max(1,
+ ((FOSC * period_ns) / 256) /
+ SCALE_FACTOR_NS_DIV_MHZ);
+
+ /* Set up PWM dutycycle ONE byte (differs from the standard) */
+ DRM_DEV_DEBUG(acx->dev, "calculated duty cycle %02x\n", pwm_ratio);
+ ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_DISPLAY_BRIGHTNESS,
+ &pwm_ratio, 1);
+ if (ret < 0) {
+ DRM_DEV_ERROR(acx->dev,
+ "failed to set display PWM ratio (%d)\n",
+ ret);
+ return ret;
+ }
+
+ /*
+ * Sequence to write PWMDIV:
+ * address data
+ * 0xF3 0xAA CMD2 Unlock
+ * 0x00 0x01 Enter CMD2 page 0
+ * 0X7D 0x01 No reload MTP of CMD2 P1
+ * 0x22 PWMDIV
+ * 0x7F 0xAA CMD2 page 1 lock
+ */
+ par = 0xaa;
+ ret = mipi_dsi_dcs_write(dsi, 0xf3, &par, 1);
+ if (ret < 0) {
+ DRM_DEV_ERROR(acx->dev,
+ "failed to unlock CMD 2 (%d)\n",
+ ret);
+ return ret;
+ }
+ par = 0x01;
+ ret = mipi_dsi_dcs_write(dsi, 0x00, &par, 1);
+ if (ret < 0) {
+ DRM_DEV_ERROR(acx->dev,
+ "failed to enter page 1 (%d)\n",
+ ret);
+ return ret;
+ }
+ par = 0x01;
+ ret = mipi_dsi_dcs_write(dsi, 0x7d, &par, 1);
+ if (ret < 0) {
+ DRM_DEV_ERROR(acx->dev,
+ "failed to disable MTP reload (%d)\n",
+ ret);
+ return ret;
+ }
+ ret = mipi_dsi_dcs_write(dsi, 0x22, &pwm_div, 1);
+ if (ret < 0) {
+ DRM_DEV_ERROR(acx->dev,
+ "failed to set PWM divisor (%d)\n",
+ ret);
+ return ret;
+ }
+ par = 0xaa;
+ ret = mipi_dsi_dcs_write(dsi, 0x7f, &par, 1);
+ if (ret < 0) {
+ DRM_DEV_ERROR(acx->dev,
+ "failed to lock CMD 2 (%d)\n",
+ ret);
+ return ret;
+ }
+
+ /* Enable backlight */
+ par = 0x24;
+ ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY,
+ &par, 1);
+ if (ret < 0) {
+ DRM_DEV_ERROR(acx->dev,
+ "failed to enable display backlight (%d)\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct backlight_ops acx424akp_bl_ops = {
+ .update_status = acx424akp_set_brightness,
+};
+
+static int acx424akp_read_id(struct acx424akp *acx)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(acx->dev);
+ u8 vendor, version, panel;
+ u16 val;
+ int ret;
+
+ ret = mipi_dsi_dcs_read(dsi, ACX424_DCS_READ_ID1, &vendor, 1);
+ if (ret < 0) {
+ DRM_DEV_ERROR(acx->dev, "could not vendor ID byte\n");
+ return ret;
+ }
+ ret = mipi_dsi_dcs_read(dsi, ACX424_DCS_READ_ID2, &version, 1);
+ if (ret < 0) {
+ DRM_DEV_ERROR(acx->dev, "could not read device version byte\n");
+ return ret;
+ }
+ ret = mipi_dsi_dcs_read(dsi, ACX424_DCS_READ_ID3, &panel, 1);
+ if (ret < 0) {
+ DRM_DEV_ERROR(acx->dev, "could not read panel ID byte\n");
+ return ret;
+ }
+
+ if (vendor == 0x00) {
+ DRM_DEV_ERROR(acx->dev, "device vendor ID is zero\n");
+ return -ENODEV;
+ }
+
+ val = (vendor << 8) | panel;
+ switch (val) {
+ case DISPLAY_SONY_ACX424AKP_ID1:
+ case DISPLAY_SONY_ACX424AKP_ID2:
+ case DISPLAY_SONY_ACX424AKP_ID3:
+ DRM_DEV_INFO(acx->dev,
+ "MTP vendor: %02x, version: %02x, panel: %02x\n",
+ vendor, version, panel);
+ break;
+ default:
+ DRM_DEV_INFO(acx->dev,
+ "unknown vendor: %02x, version: %02x, panel: %02x\n",
+ vendor, version, panel);
+ break;
+ }
+
+ return 0;
+}
+
+static int acx424akp_power_on(struct acx424akp *acx)
+{
+ int ret;
+
+ ret = regulator_enable(acx->supply);
+ if (ret) {
+ DRM_DEV_ERROR(acx->dev, "failed to enable supply (%d)\n", ret);
+ return ret;
+ }
+
+ /* Assert RESET */
+ gpiod_set_value_cansleep(acx->reset_gpio, 1);
+ udelay(20);
+ /* De-assert RESET */
+ gpiod_set_value_cansleep(acx->reset_gpio, 0);
+ usleep_range(11000, 20000);
+
+ return 0;
+}
+
+static void acx424akp_power_off(struct acx424akp *acx)
+{
+ /* Assert RESET */
+ gpiod_set_value_cansleep(acx->reset_gpio, 1);
+ usleep_range(11000, 20000);
+
+ regulator_disable(acx->supply);
+}
+
+static int acx424akp_prepare(struct drm_panel *panel)
+{
+ struct acx424akp *acx = panel_to_acx424akp(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(acx->dev);
+ const u8 mddi = 3;
+ int ret;
+
+ ret = acx424akp_power_on(acx);
+ if (ret)
+ return ret;
+
+ ret = acx424akp_read_id(acx);
+ if (ret) {
+ DRM_DEV_ERROR(acx->dev, "failed to read panel ID (%d)\n", ret);
+ goto err_power_off;
+ }
+
+ /* Enabe tearing mode: send TE (tearing effect) at VBLANK */
+ ret = mipi_dsi_dcs_set_tear_on(dsi,
+ MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+ if (ret) {
+ DRM_DEV_ERROR(acx->dev, "failed to enable vblank TE (%d)\n",
+ ret);
+ goto err_power_off;
+ }
+
+ /*
+ * Set MDDI
+ *
+ * This presumably deactivates the Qualcomm MDDI interface and
+ * selects DSI, similar code is found in other drivers such as the
+ * Sharp LS043T1LE01 which makes us suspect that this panel may be
+ * using a Novatek NT35565 or similar display driver chip that shares
+ * this command. Due to the lack of documentation we cannot know for
+ * sure.
+ */
+ ret = mipi_dsi_dcs_write(dsi, ACX424_DCS_SET_MDDI,
+ &mddi, sizeof(mddi));
+ if (ret < 0) {
+ DRM_DEV_ERROR(acx->dev, "failed to set MDDI (%d)\n", ret);
+ goto err_power_off;
+ }
+
+ /* Exit sleep mode */
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret) {
+ DRM_DEV_ERROR(acx->dev, "failed to exit sleep mode (%d)\n",
+ ret);
+ goto err_power_off;
+ }
+ msleep(140);
+
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret) {
+ DRM_DEV_ERROR(acx->dev, "failed to turn display on (%d)\n",
+ ret);
+ goto err_power_off;
+ }
+ if (acx->video_mode) {
+ /* In video mode turn peripheral on */
+ ret = mipi_dsi_turn_on_peripheral(dsi);
+ if (ret) {
+ dev_err(acx->dev, "failed to turn on peripheral\n");
+ goto err_power_off;
+ }
+ }
+
+ acx->bl->props.power = FB_BLANK_NORMAL;
+
+ return 0;
+
+err_power_off:
+ acx424akp_power_off(acx);
+ return ret;
+}
+
+static int acx424akp_unprepare(struct drm_panel *panel)
+{
+ struct acx424akp *acx = panel_to_acx424akp(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(acx->dev);
+ u8 par;
+ int ret;
+
+ /* Disable backlight */
+ par = 0x00;
+ ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY,
+ &par, 1);
+ if (ret) {
+ DRM_DEV_ERROR(acx->dev,
+ "failed to disable display backlight (%d)\n",
+ ret);
+ return ret;
+ }
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret) {
+ DRM_DEV_ERROR(acx->dev, "failed to turn display off (%d)\n",
+ ret);
+ return ret;
+ }
+
+ /* Enter sleep mode */
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret) {
+ DRM_DEV_ERROR(acx->dev, "failed to enter sleep mode (%d)\n",
+ ret);
+ return ret;
+ }
+ msleep(85);
+
+ acx424akp_power_off(acx);
+ acx->bl->props.power = FB_BLANK_POWERDOWN;
+
+ return 0;
+}
+
+static int acx424akp_enable(struct drm_panel *panel)
+{
+ struct acx424akp *acx = panel_to_acx424akp(panel);
+
+ /*
+ * The backlight is on as long as the display is on
+ * so no use to call backlight_enable() here.
+ */
+ acx->bl->props.power = FB_BLANK_UNBLANK;
+
+ return 0;
+}
+
+static int acx424akp_disable(struct drm_panel *panel)
+{
+ struct acx424akp *acx = panel_to_acx424akp(panel);
+
+ /*
+ * The backlight is on as long as the display is on
+ * so no use to call backlight_disable() here.
+ */
+ acx->bl->props.power = FB_BLANK_NORMAL;
+
+ return 0;
+}
+
+static int acx424akp_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct acx424akp *acx = panel_to_acx424akp(panel);
+ struct drm_display_mode *mode;
+
+ if (acx->video_mode)
+ mode = drm_mode_duplicate(connector->dev,
+ &sony_acx424akp_vid_mode);
+ else
+ mode = drm_mode_duplicate(connector->dev,
+ &sony_acx424akp_cmd_mode);
+ if (!mode) {
+ DRM_ERROR("bad mode or failed to add mode\n");
+ return -EINVAL;
+ }
+ drm_mode_set_name(mode);
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+
+ drm_mode_probed_add(connector, mode);
+
+ return 1; /* Number of modes */
+}
+
+static const struct drm_panel_funcs acx424akp_drm_funcs = {
+ .disable = acx424akp_disable,
+ .unprepare = acx424akp_unprepare,
+ .prepare = acx424akp_prepare,
+ .enable = acx424akp_enable,
+ .get_modes = acx424akp_get_modes,
+};
+
+static int acx424akp_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct acx424akp *acx;
+ int ret;
+
+ acx = devm_kzalloc(dev, sizeof(struct acx424akp), GFP_KERNEL);
+ if (!acx)
+ return -ENOMEM;
+ acx->video_mode = of_property_read_bool(dev->of_node,
+ "enforce-video-mode");
+
+ mipi_dsi_set_drvdata(dsi, acx);
+ acx->dev = dev;
+
+ dsi->lanes = 2;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ /*
+ * FIXME: these come from the ST-Ericsson vendor driver for the
+ * HREF520 and seems to reflect limitations in the PLLs on that
+ * platform, if you have the datasheet, please cross-check the
+ * actual max rates.
+ */
+ dsi->lp_rate = 19200000;
+ dsi->hs_rate = 420160000;
+
+ if (acx->video_mode)
+ /* Burst mode using event for sync */
+ dsi->mode_flags =
+ MIPI_DSI_MODE_VIDEO |
+ MIPI_DSI_MODE_VIDEO_BURST;
+ else
+ dsi->mode_flags =
+ MIPI_DSI_CLOCK_NON_CONTINUOUS |
+ MIPI_DSI_MODE_EOT_PACKET;
+
+ acx->supply = devm_regulator_get(dev, "vddi");
+ if (IS_ERR(acx->supply))
+ return PTR_ERR(acx->supply);
+
+ /* This asserts RESET by default */
+ acx->reset_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(acx->reset_gpio)) {
+ ret = PTR_ERR(acx->reset_gpio);
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev, "failed to request GPIO (%d)\n",
+ ret);
+ return ret;
+ }
+
+ drm_panel_init(&acx->panel, dev, &acx424akp_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ acx->bl = devm_backlight_device_register(dev, "acx424akp", dev, acx,
+ &acx424akp_bl_ops, NULL);
+ if (IS_ERR(acx->bl)) {
+ DRM_DEV_ERROR(dev, "failed to register backlight device\n");
+ return PTR_ERR(acx->bl);
+ }
+ acx->bl->props.max_brightness = 1023;
+ acx->bl->props.brightness = 512;
+ acx->bl->props.power = FB_BLANK_POWERDOWN;
+
+ ret = drm_panel_add(&acx->panel);
+ if (ret < 0)
+ return ret;
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ drm_panel_remove(&acx->panel);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int acx424akp_remove(struct mipi_dsi_device *dsi)
+{
+ struct acx424akp *acx = mipi_dsi_get_drvdata(dsi);
+
+ mipi_dsi_detach(dsi);
+ drm_panel_remove(&acx->panel);
+
+ return 0;
+}
+
+static const struct of_device_id acx424akp_of_match[] = {
+ { .compatible = "sony,acx424akp" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, acx424akp_of_match);
+
+static struct mipi_dsi_driver acx424akp_driver = {
+ .probe = acx424akp_probe,
+ .remove = acx424akp_remove,
+ .driver = {
+ .name = "panel-sony-acx424akp",
+ .of_match_table = acx424akp_of_match,
+ },
+};
+module_mipi_dsi_driver(acx424akp_driver);
+
+MODULE_AUTHOR("Linus Wallei <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("MIPI-DSI Sony acx424akp Panel Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-sony-acx565akm.c b/drivers/gpu/drm/panel/panel-sony-acx565akm.c
new file mode 100644
index 000000000000..5c4b6f6e5c2d
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-sony-acx565akm.c
@@ -0,0 +1,707 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Sony ACX565AKM LCD Panel driver
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated
+ *
+ * Based on the omapdrm-specific panel-sony-acx565akm driver
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Author: Imre Deak <imre.deak@nokia.com>
+ */
+
+/*
+ * TODO (to be addressed with hardware access to test the changes):
+ *
+ * - Update backlight support to use backlight_update_status() etc.
+ * - Use prepare/unprepare for the basic power on/off of the backligt
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/spi/spi.h>
+#include <video/mipi_display.h>
+
+#include <drm/drm_connector.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+#define CTRL_DISP_BRIGHTNESS_CTRL_ON BIT(5)
+#define CTRL_DISP_AMBIENT_LIGHT_CTRL_ON BIT(4)
+#define CTRL_DISP_BACKLIGHT_ON BIT(2)
+#define CTRL_DISP_AUTO_BRIGHTNESS_ON BIT(1)
+
+#define MIPID_CMD_WRITE_CABC 0x55
+#define MIPID_CMD_READ_CABC 0x56
+
+#define MIPID_VER_LPH8923 3
+#define MIPID_VER_LS041Y3 4
+#define MIPID_VER_L4F00311 8
+#define MIPID_VER_ACX565AKM 9
+
+struct acx565akm_panel {
+ struct drm_panel panel;
+
+ struct spi_device *spi;
+ struct gpio_desc *reset_gpio;
+ struct backlight_device *backlight;
+
+ struct mutex mutex;
+
+ const char *name;
+ u8 display_id[3];
+ int model;
+ int revision;
+ bool has_bc;
+ bool has_cabc;
+
+ bool enabled;
+ unsigned int cabc_mode;
+ /*
+ * Next value of jiffies when we can issue the next sleep in/out
+ * command.
+ */
+ unsigned long hw_guard_end;
+ unsigned long hw_guard_wait; /* max guard time in jiffies */
+};
+
+#define to_acx565akm_device(p) container_of(p, struct acx565akm_panel, panel)
+
+static void acx565akm_transfer(struct acx565akm_panel *lcd, int cmd,
+ const u8 *wbuf, int wlen, u8 *rbuf, int rlen)
+{
+ struct spi_message m;
+ struct spi_transfer *x, xfer[5];
+ int ret;
+
+ spi_message_init(&m);
+
+ memset(xfer, 0, sizeof(xfer));
+ x = &xfer[0];
+
+ cmd &= 0xff;
+ x->tx_buf = &cmd;
+ x->bits_per_word = 9;
+ x->len = 2;
+
+ if (rlen > 1 && wlen == 0) {
+ /*
+ * Between the command and the response data there is a
+ * dummy clock cycle. Add an extra bit after the command
+ * word to account for this.
+ */
+ x->bits_per_word = 10;
+ cmd <<= 1;
+ }
+ spi_message_add_tail(x, &m);
+
+ if (wlen) {
+ x++;
+ x->tx_buf = wbuf;
+ x->len = wlen;
+ x->bits_per_word = 9;
+ spi_message_add_tail(x, &m);
+ }
+
+ if (rlen) {
+ x++;
+ x->rx_buf = rbuf;
+ x->len = rlen;
+ spi_message_add_tail(x, &m);
+ }
+
+ ret = spi_sync(lcd->spi, &m);
+ if (ret < 0)
+ dev_dbg(&lcd->spi->dev, "spi_sync %d\n", ret);
+}
+
+static inline void acx565akm_cmd(struct acx565akm_panel *lcd, int cmd)
+{
+ acx565akm_transfer(lcd, cmd, NULL, 0, NULL, 0);
+}
+
+static inline void acx565akm_write(struct acx565akm_panel *lcd,
+ int reg, const u8 *buf, int len)
+{
+ acx565akm_transfer(lcd, reg, buf, len, NULL, 0);
+}
+
+static inline void acx565akm_read(struct acx565akm_panel *lcd,
+ int reg, u8 *buf, int len)
+{
+ acx565akm_transfer(lcd, reg, NULL, 0, buf, len);
+}
+
+/* -----------------------------------------------------------------------------
+ * Auto Brightness Control Via sysfs
+ */
+
+static unsigned int acx565akm_get_cabc_mode(struct acx565akm_panel *lcd)
+{
+ return lcd->cabc_mode;
+}
+
+static void acx565akm_set_cabc_mode(struct acx565akm_panel *lcd,
+ unsigned int mode)
+{
+ u16 cabc_ctrl;
+
+ lcd->cabc_mode = mode;
+ if (!lcd->enabled)
+ return;
+ cabc_ctrl = 0;
+ acx565akm_read(lcd, MIPID_CMD_READ_CABC, (u8 *)&cabc_ctrl, 1);
+ cabc_ctrl &= ~3;
+ cabc_ctrl |= (1 << 8) | (mode & 3);
+ acx565akm_write(lcd, MIPID_CMD_WRITE_CABC, (u8 *)&cabc_ctrl, 2);
+}
+
+static unsigned int acx565akm_get_hw_cabc_mode(struct acx565akm_panel *lcd)
+{
+ u8 cabc_ctrl;
+
+ acx565akm_read(lcd, MIPID_CMD_READ_CABC, &cabc_ctrl, 1);
+ return cabc_ctrl & 3;
+}
+
+static const char * const acx565akm_cabc_modes[] = {
+ "off", /* always used when CABC is not supported */
+ "ui",
+ "still-image",
+ "moving-image",
+};
+
+static ssize_t cabc_mode_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct acx565akm_panel *lcd = dev_get_drvdata(dev);
+ const char *mode_str;
+ int mode;
+
+ if (!lcd->has_cabc)
+ mode = 0;
+ else
+ mode = acx565akm_get_cabc_mode(lcd);
+
+ mode_str = "unknown";
+ if (mode >= 0 && mode < ARRAY_SIZE(acx565akm_cabc_modes))
+ mode_str = acx565akm_cabc_modes[mode];
+
+ return sprintf(buf, "%s\n", mode_str);
+}
+
+static ssize_t cabc_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct acx565akm_panel *lcd = dev_get_drvdata(dev);
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(acx565akm_cabc_modes); i++) {
+ const char *mode_str = acx565akm_cabc_modes[i];
+ int cmp_len = strlen(mode_str);
+
+ if (count > 0 && buf[count - 1] == '\n')
+ count--;
+ if (count != cmp_len)
+ continue;
+
+ if (strncmp(buf, mode_str, cmp_len) == 0)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(acx565akm_cabc_modes))
+ return -EINVAL;
+
+ if (!lcd->has_cabc && i != 0)
+ return -EINVAL;
+
+ mutex_lock(&lcd->mutex);
+ acx565akm_set_cabc_mode(lcd, i);
+ mutex_unlock(&lcd->mutex);
+
+ return count;
+}
+
+static ssize_t cabc_available_modes_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct acx565akm_panel *lcd = dev_get_drvdata(dev);
+ unsigned int i;
+ size_t len = 0;
+
+ if (!lcd->has_cabc)
+ return sprintf(buf, "%s\n", acx565akm_cabc_modes[0]);
+
+ for (i = 0; i < ARRAY_SIZE(acx565akm_cabc_modes); i++)
+ len += sprintf(&buf[len], "%s%s", i ? " " : "",
+ acx565akm_cabc_modes[i]);
+
+ buf[len++] = '\n';
+
+ return len;
+}
+
+static DEVICE_ATTR_RW(cabc_mode);
+static DEVICE_ATTR_RO(cabc_available_modes);
+
+static struct attribute *acx565akm_cabc_attrs[] = {
+ &dev_attr_cabc_mode.attr,
+ &dev_attr_cabc_available_modes.attr,
+ NULL,
+};
+
+static const struct attribute_group acx565akm_cabc_attr_group = {
+ .attrs = acx565akm_cabc_attrs,
+};
+
+/* -----------------------------------------------------------------------------
+ * Backlight Device
+ */
+
+static int acx565akm_get_actual_brightness(struct acx565akm_panel *lcd)
+{
+ u8 bv;
+
+ acx565akm_read(lcd, MIPI_DCS_GET_DISPLAY_BRIGHTNESS, &bv, 1);
+
+ return bv;
+}
+
+static void acx565akm_set_brightness(struct acx565akm_panel *lcd, int level)
+{
+ u16 ctrl;
+ int bv;
+
+ bv = level | (1 << 8);
+ acx565akm_write(lcd, MIPI_DCS_SET_DISPLAY_BRIGHTNESS, (u8 *)&bv, 2);
+
+ acx565akm_read(lcd, MIPI_DCS_GET_CONTROL_DISPLAY, (u8 *)&ctrl, 1);
+ if (level)
+ ctrl |= CTRL_DISP_BRIGHTNESS_CTRL_ON |
+ CTRL_DISP_BACKLIGHT_ON;
+ else
+ ctrl &= ~(CTRL_DISP_BRIGHTNESS_CTRL_ON |
+ CTRL_DISP_BACKLIGHT_ON);
+
+ ctrl |= 1 << 8;
+ acx565akm_write(lcd, MIPI_DCS_WRITE_CONTROL_DISPLAY, (u8 *)&ctrl, 2);
+}
+
+static int acx565akm_bl_update_status_locked(struct backlight_device *dev)
+{
+ struct acx565akm_panel *lcd = dev_get_drvdata(&dev->dev);
+ int level;
+
+ if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
+ dev->props.power == FB_BLANK_UNBLANK)
+ level = dev->props.brightness;
+ else
+ level = 0;
+
+ acx565akm_set_brightness(lcd, level);
+
+ return 0;
+}
+
+static int acx565akm_bl_update_status(struct backlight_device *dev)
+{
+ struct acx565akm_panel *lcd = dev_get_drvdata(&dev->dev);
+ int ret;
+
+ mutex_lock(&lcd->mutex);
+ ret = acx565akm_bl_update_status_locked(dev);
+ mutex_unlock(&lcd->mutex);
+
+ return ret;
+}
+
+static int acx565akm_bl_get_intensity(struct backlight_device *dev)
+{
+ struct acx565akm_panel *lcd = dev_get_drvdata(&dev->dev);
+ unsigned int intensity;
+
+ mutex_lock(&lcd->mutex);
+
+ if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
+ dev->props.power == FB_BLANK_UNBLANK)
+ intensity = acx565akm_get_actual_brightness(lcd);
+ else
+ intensity = 0;
+
+ mutex_unlock(&lcd->mutex);
+
+ return intensity;
+}
+
+static const struct backlight_ops acx565akm_bl_ops = {
+ .get_brightness = acx565akm_bl_get_intensity,
+ .update_status = acx565akm_bl_update_status,
+};
+
+static int acx565akm_backlight_init(struct acx565akm_panel *lcd)
+{
+ struct backlight_properties props = {
+ .fb_blank = FB_BLANK_UNBLANK,
+ .power = FB_BLANK_UNBLANK,
+ .type = BACKLIGHT_RAW,
+ };
+ int ret;
+
+ lcd->backlight = backlight_device_register(lcd->name, &lcd->spi->dev,
+ lcd, &acx565akm_bl_ops,
+ &props);
+ if (IS_ERR(lcd->backlight)) {
+ ret = PTR_ERR(lcd->backlight);
+ lcd->backlight = NULL;
+ return ret;
+ }
+
+ if (lcd->has_cabc) {
+ ret = sysfs_create_group(&lcd->backlight->dev.kobj,
+ &acx565akm_cabc_attr_group);
+ if (ret < 0) {
+ dev_err(&lcd->spi->dev,
+ "%s failed to create sysfs files\n", __func__);
+ backlight_device_unregister(lcd->backlight);
+ return ret;
+ }
+
+ lcd->cabc_mode = acx565akm_get_hw_cabc_mode(lcd);
+ }
+
+ lcd->backlight->props.max_brightness = 255;
+ lcd->backlight->props.brightness = acx565akm_get_actual_brightness(lcd);
+
+ acx565akm_bl_update_status_locked(lcd->backlight);
+
+ return 0;
+}
+
+static void acx565akm_backlight_cleanup(struct acx565akm_panel *lcd)
+{
+ if (lcd->has_cabc)
+ sysfs_remove_group(&lcd->backlight->dev.kobj,
+ &acx565akm_cabc_attr_group);
+
+ backlight_device_unregister(lcd->backlight);
+}
+
+/* -----------------------------------------------------------------------------
+ * DRM Bridge Operations
+ */
+
+static void acx565akm_set_sleep_mode(struct acx565akm_panel *lcd, int on)
+{
+ int cmd = on ? MIPI_DCS_ENTER_SLEEP_MODE : MIPI_DCS_EXIT_SLEEP_MODE;
+ unsigned long wait;
+
+ /*
+ * We have to keep 120msec between sleep in/out commands.
+ * (8.2.15, 8.2.16).
+ */
+ wait = lcd->hw_guard_end - jiffies;
+ if ((long)wait > 0 && wait <= lcd->hw_guard_wait) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(wait);
+ }
+
+ acx565akm_cmd(lcd, cmd);
+
+ lcd->hw_guard_wait = msecs_to_jiffies(120);
+ lcd->hw_guard_end = jiffies + lcd->hw_guard_wait;
+}
+
+static void acx565akm_set_display_state(struct acx565akm_panel *lcd,
+ int enabled)
+{
+ int cmd = enabled ? MIPI_DCS_SET_DISPLAY_ON : MIPI_DCS_SET_DISPLAY_OFF;
+
+ acx565akm_cmd(lcd, cmd);
+}
+
+static int acx565akm_power_on(struct acx565akm_panel *lcd)
+{
+ /*FIXME tweak me */
+ msleep(50);
+
+ gpiod_set_value(lcd->reset_gpio, 1);
+
+ if (lcd->enabled) {
+ dev_dbg(&lcd->spi->dev, "panel already enabled\n");
+ return 0;
+ }
+
+ /*
+ * We have to meet all the following delay requirements:
+ * 1. tRW: reset pulse width 10usec (7.12.1)
+ * 2. tRT: reset cancel time 5msec (7.12.1)
+ * 3. Providing PCLK,HS,VS signals for 2 frames = ~50msec worst
+ * case (7.6.2)
+ * 4. 120msec before the sleep out command (7.12.1)
+ */
+ msleep(120);
+
+ acx565akm_set_sleep_mode(lcd, 0);
+ lcd->enabled = true;
+
+ /* 5msec between sleep out and the next command. (8.2.16) */
+ usleep_range(5000, 10000);
+ acx565akm_set_display_state(lcd, 1);
+ acx565akm_set_cabc_mode(lcd, lcd->cabc_mode);
+
+ return acx565akm_bl_update_status_locked(lcd->backlight);
+}
+
+static void acx565akm_power_off(struct acx565akm_panel *lcd)
+{
+ if (!lcd->enabled)
+ return;
+
+ acx565akm_set_display_state(lcd, 0);
+ acx565akm_set_sleep_mode(lcd, 1);
+ lcd->enabled = false;
+ /*
+ * We have to provide PCLK,HS,VS signals for 2 frames (worst case
+ * ~50msec) after sending the sleep in command and asserting the
+ * reset signal. We probably could assert the reset w/o the delay
+ * but we still delay to avoid possible artifacts. (7.6.1)
+ */
+ msleep(50);
+
+ gpiod_set_value(lcd->reset_gpio, 0);
+
+ /* FIXME need to tweak this delay */
+ msleep(100);
+}
+
+static int acx565akm_disable(struct drm_panel *panel)
+{
+ struct acx565akm_panel *lcd = to_acx565akm_device(panel);
+
+ mutex_lock(&lcd->mutex);
+ acx565akm_power_off(lcd);
+ mutex_unlock(&lcd->mutex);
+
+ return 0;
+}
+
+static int acx565akm_enable(struct drm_panel *panel)
+{
+ struct acx565akm_panel *lcd = to_acx565akm_device(panel);
+
+ mutex_lock(&lcd->mutex);
+ acx565akm_power_on(lcd);
+ mutex_unlock(&lcd->mutex);
+
+ return 0;
+}
+
+static const struct drm_display_mode acx565akm_mode = {
+ .clock = 24000,
+ .hdisplay = 800,
+ .hsync_start = 800 + 28,
+ .hsync_end = 800 + 28 + 4,
+ .htotal = 800 + 28 + 4 + 24,
+ .vdisplay = 480,
+ .vsync_start = 480 + 3,
+ .vsync_end = 480 + 3 + 3,
+ .vtotal = 480 + 3 + 3 + 4,
+ .vrefresh = 57,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ .width_mm = 77,
+ .height_mm = 46,
+};
+
+static int acx565akm_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &acx565akm_mode);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+ drm_mode_probed_add(connector, mode);
+
+ connector->display_info.width_mm = acx565akm_mode.width_mm;
+ connector->display_info.height_mm = acx565akm_mode.height_mm;
+ connector->display_info.bus_flags = DRM_BUS_FLAG_DE_HIGH
+ | DRM_BUS_FLAG_SYNC_SAMPLE_POSEDGE
+ | DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE;
+
+ return 1;
+}
+
+static const struct drm_panel_funcs acx565akm_funcs = {
+ .disable = acx565akm_disable,
+ .enable = acx565akm_enable,
+ .get_modes = acx565akm_get_modes,
+};
+
+/* -----------------------------------------------------------------------------
+ * Probe, Detect and Remove
+ */
+
+static int acx565akm_detect(struct acx565akm_panel *lcd)
+{
+ __be32 value;
+ u32 status;
+ int ret = 0;
+
+ /*
+ * After being taken out of reset the panel needs 5ms before the first
+ * command can be sent.
+ */
+ gpiod_set_value(lcd->reset_gpio, 1);
+ usleep_range(5000, 10000);
+
+ acx565akm_read(lcd, MIPI_DCS_GET_DISPLAY_STATUS, (u8 *)&value, 4);
+ status = __be32_to_cpu(value);
+ lcd->enabled = (status & (1 << 17)) && (status & (1 << 10));
+
+ dev_dbg(&lcd->spi->dev,
+ "LCD panel %s by bootloader (status 0x%04x)\n",
+ lcd->enabled ? "enabled" : "disabled ", status);
+
+ acx565akm_read(lcd, MIPI_DCS_GET_DISPLAY_ID, lcd->display_id, 3);
+ dev_dbg(&lcd->spi->dev, "MIPI display ID: %02x%02x%02x\n",
+ lcd->display_id[0], lcd->display_id[1], lcd->display_id[2]);
+
+ switch (lcd->display_id[0]) {
+ case 0x10:
+ lcd->model = MIPID_VER_ACX565AKM;
+ lcd->name = "acx565akm";
+ lcd->has_bc = 1;
+ lcd->has_cabc = 1;
+ break;
+ case 0x29:
+ lcd->model = MIPID_VER_L4F00311;
+ lcd->name = "l4f00311";
+ break;
+ case 0x45:
+ lcd->model = MIPID_VER_LPH8923;
+ lcd->name = "lph8923";
+ break;
+ case 0x83:
+ lcd->model = MIPID_VER_LS041Y3;
+ lcd->name = "ls041y3";
+ break;
+ default:
+ lcd->name = "unknown";
+ dev_err(&lcd->spi->dev, "unknown display ID\n");
+ ret = -ENODEV;
+ goto done;
+ }
+
+ lcd->revision = lcd->display_id[1];
+
+ dev_info(&lcd->spi->dev, "%s rev %02x panel detected\n",
+ lcd->name, lcd->revision);
+
+done:
+ if (!lcd->enabled)
+ gpiod_set_value(lcd->reset_gpio, 0);
+
+ return ret;
+}
+
+static int acx565akm_probe(struct spi_device *spi)
+{
+ struct acx565akm_panel *lcd;
+ int ret;
+
+ lcd = devm_kzalloc(&spi->dev, sizeof(*lcd), GFP_KERNEL);
+ if (!lcd)
+ return -ENOMEM;
+
+ spi_set_drvdata(spi, lcd);
+ spi->mode = SPI_MODE_3;
+
+ lcd->spi = spi;
+ mutex_init(&lcd->mutex);
+
+ lcd->reset_gpio = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(lcd->reset_gpio)) {
+ dev_err(&spi->dev, "failed to get reset GPIO\n");
+ return PTR_ERR(lcd->reset_gpio);
+ }
+
+ ret = acx565akm_detect(lcd);
+ if (ret < 0) {
+ dev_err(&spi->dev, "panel detection failed\n");
+ return ret;
+ }
+
+ if (lcd->has_bc) {
+ ret = acx565akm_backlight_init(lcd);
+ if (ret < 0)
+ return ret;
+ }
+
+ drm_panel_init(&lcd->panel, &lcd->spi->dev, &acx565akm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+
+ ret = drm_panel_add(&lcd->panel);
+ if (ret < 0) {
+ if (lcd->has_bc)
+ acx565akm_backlight_cleanup(lcd);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int acx565akm_remove(struct spi_device *spi)
+{
+ struct acx565akm_panel *lcd = spi_get_drvdata(spi);
+
+ drm_panel_remove(&lcd->panel);
+
+ if (lcd->has_bc)
+ acx565akm_backlight_cleanup(lcd);
+
+ drm_panel_disable(&lcd->panel);
+ drm_panel_unprepare(&lcd->panel);
+
+ return 0;
+}
+
+static const struct of_device_id acx565akm_of_match[] = {
+ { .compatible = "sony,acx565akm", },
+ { /* sentinel */ },
+};
+
+MODULE_DEVICE_TABLE(of, acx565akm_of_match);
+
+static const struct spi_device_id acx565akm_ids[] = {
+ { "acx565akm", 0 },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(spi, acx565akm_ids);
+
+static struct spi_driver acx565akm_driver = {
+ .probe = acx565akm_probe,
+ .remove = acx565akm_remove,
+ .id_table = acx565akm_ids,
+ .driver = {
+ .name = "panel-sony-acx565akm",
+ .of_match_table = acx565akm_of_match,
+ },
+};
+
+module_spi_driver(acx565akm_driver);
+
+MODULE_AUTHOR("Nokia Corporation");
+MODULE_DESCRIPTION("Sony ACX565AKM LCD Panel Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
new file mode 100644
index 000000000000..cf29405a2dbe
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
@@ -0,0 +1,391 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Toppoly TD028TTEC1 Panel Driver
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated
+ *
+ * Based on the omapdrm-specific panel-tpo-td028ttec1 driver
+ *
+ * Copyright (C) 2008 Nokia Corporation
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ *
+ * Neo 1973 code (jbt6k74.c):
+ * Copyright (C) 2006-2007 OpenMoko, Inc.
+ * Author: Harald Welte <laforge@openmoko.org>
+ *
+ * Ported and adapted from Neo 1973 U-Boot by:
+ * H. Nikolaus Schaller <hns@goldelico.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+
+#include <drm/drm_connector.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+#define JBT_COMMAND 0x000
+#define JBT_DATA 0x100
+
+#define JBT_REG_SLEEP_IN 0x10
+#define JBT_REG_SLEEP_OUT 0x11
+
+#define JBT_REG_DISPLAY_OFF 0x28
+#define JBT_REG_DISPLAY_ON 0x29
+
+#define JBT_REG_RGB_FORMAT 0x3a
+#define JBT_REG_QUAD_RATE 0x3b
+
+#define JBT_REG_POWER_ON_OFF 0xb0
+#define JBT_REG_BOOSTER_OP 0xb1
+#define JBT_REG_BOOSTER_MODE 0xb2
+#define JBT_REG_BOOSTER_FREQ 0xb3
+#define JBT_REG_OPAMP_SYSCLK 0xb4
+#define JBT_REG_VSC_VOLTAGE 0xb5
+#define JBT_REG_VCOM_VOLTAGE 0xb6
+#define JBT_REG_EXT_DISPL 0xb7
+#define JBT_REG_OUTPUT_CONTROL 0xb8
+#define JBT_REG_DCCLK_DCEV 0xb9
+#define JBT_REG_DISPLAY_MODE1 0xba
+#define JBT_REG_DISPLAY_MODE2 0xbb
+#define JBT_REG_DISPLAY_MODE 0xbc
+#define JBT_REG_ASW_SLEW 0xbd
+#define JBT_REG_DUMMY_DISPLAY 0xbe
+#define JBT_REG_DRIVE_SYSTEM 0xbf
+
+#define JBT_REG_SLEEP_OUT_FR_A 0xc0
+#define JBT_REG_SLEEP_OUT_FR_B 0xc1
+#define JBT_REG_SLEEP_OUT_FR_C 0xc2
+#define JBT_REG_SLEEP_IN_LCCNT_D 0xc3
+#define JBT_REG_SLEEP_IN_LCCNT_E 0xc4
+#define JBT_REG_SLEEP_IN_LCCNT_F 0xc5
+#define JBT_REG_SLEEP_IN_LCCNT_G 0xc6
+
+#define JBT_REG_GAMMA1_FINE_1 0xc7
+#define JBT_REG_GAMMA1_FINE_2 0xc8
+#define JBT_REG_GAMMA1_INCLINATION 0xc9
+#define JBT_REG_GAMMA1_BLUE_OFFSET 0xca
+
+#define JBT_REG_BLANK_CONTROL 0xcf
+#define JBT_REG_BLANK_TH_TV 0xd0
+#define JBT_REG_CKV_ON_OFF 0xd1
+#define JBT_REG_CKV_1_2 0xd2
+#define JBT_REG_OEV_TIMING 0xd3
+#define JBT_REG_ASW_TIMING_1 0xd4
+#define JBT_REG_ASW_TIMING_2 0xd5
+
+#define JBT_REG_HCLOCK_VGA 0xec
+#define JBT_REG_HCLOCK_QVGA 0xed
+
+struct td028ttec1_panel {
+ struct drm_panel panel;
+
+ struct spi_device *spi;
+};
+
+#define to_td028ttec1_device(p) container_of(p, struct td028ttec1_panel, panel)
+
+static int jbt_ret_write_0(struct td028ttec1_panel *lcd, u8 reg, int *err)
+{
+ struct spi_device *spi = lcd->spi;
+ u16 tx_buf = JBT_COMMAND | reg;
+ int ret;
+
+ if (err && *err)
+ return *err;
+
+ ret = spi_write(spi, (u8 *)&tx_buf, sizeof(tx_buf));
+ if (ret < 0) {
+ dev_err(&spi->dev, "%s: SPI write failed: %d\n", __func__, ret);
+ if (err)
+ *err = ret;
+ }
+
+ return ret;
+}
+
+static int jbt_reg_write_1(struct td028ttec1_panel *lcd,
+ u8 reg, u8 data, int *err)
+{
+ struct spi_device *spi = lcd->spi;
+ u16 tx_buf[2];
+ int ret;
+
+ if (err && *err)
+ return *err;
+
+ tx_buf[0] = JBT_COMMAND | reg;
+ tx_buf[1] = JBT_DATA | data;
+
+ ret = spi_write(spi, (u8 *)tx_buf, sizeof(tx_buf));
+ if (ret < 0) {
+ dev_err(&spi->dev, "%s: SPI write failed: %d\n", __func__, ret);
+ if (err)
+ *err = ret;
+ }
+
+ return ret;
+}
+
+static int jbt_reg_write_2(struct td028ttec1_panel *lcd,
+ u8 reg, u16 data, int *err)
+{
+ struct spi_device *spi = lcd->spi;
+ u16 tx_buf[3];
+ int ret;
+
+ if (err && *err)
+ return *err;
+
+ tx_buf[0] = JBT_COMMAND | reg;
+ tx_buf[1] = JBT_DATA | (data >> 8);
+ tx_buf[2] = JBT_DATA | (data & 0xff);
+
+ ret = spi_write(spi, (u8 *)tx_buf, sizeof(tx_buf));
+ if (ret < 0) {
+ dev_err(&spi->dev, "%s: SPI write failed: %d\n", __func__, ret);
+ if (err)
+ *err = ret;
+ }
+
+ return ret;
+}
+
+static int td028ttec1_prepare(struct drm_panel *panel)
+{
+ struct td028ttec1_panel *lcd = to_td028ttec1_device(panel);
+ unsigned int i;
+ int ret = 0;
+
+ /* Three times command zero */
+ for (i = 0; i < 3; ++i) {
+ jbt_ret_write_0(lcd, 0x00, &ret);
+ usleep_range(1000, 2000);
+ }
+
+ /* deep standby out */
+ jbt_reg_write_1(lcd, JBT_REG_POWER_ON_OFF, 0x17, &ret);
+
+ /* RGB I/F on, RAM write off, QVGA through, SIGCON enable */
+ jbt_reg_write_1(lcd, JBT_REG_DISPLAY_MODE, 0x80, &ret);
+
+ /* Quad mode off */
+ jbt_reg_write_1(lcd, JBT_REG_QUAD_RATE, 0x00, &ret);
+
+ /* AVDD on, XVDD on */
+ jbt_reg_write_1(lcd, JBT_REG_POWER_ON_OFF, 0x16, &ret);
+
+ /* Output control */
+ jbt_reg_write_2(lcd, JBT_REG_OUTPUT_CONTROL, 0xfff9, &ret);
+
+ /* Sleep mode off */
+ jbt_ret_write_0(lcd, JBT_REG_SLEEP_OUT, &ret);
+
+ /* at this point we have like 50% grey */
+
+ /* initialize register set */
+ jbt_reg_write_1(lcd, JBT_REG_DISPLAY_MODE1, 0x01, &ret);
+ jbt_reg_write_1(lcd, JBT_REG_DISPLAY_MODE2, 0x00, &ret);
+ jbt_reg_write_1(lcd, JBT_REG_RGB_FORMAT, 0x60, &ret);
+ jbt_reg_write_1(lcd, JBT_REG_DRIVE_SYSTEM, 0x10, &ret);
+ jbt_reg_write_1(lcd, JBT_REG_BOOSTER_OP, 0x56, &ret);
+ jbt_reg_write_1(lcd, JBT_REG_BOOSTER_MODE, 0x33, &ret);
+ jbt_reg_write_1(lcd, JBT_REG_BOOSTER_FREQ, 0x11, &ret);
+ jbt_reg_write_1(lcd, JBT_REG_BOOSTER_FREQ, 0x11, &ret);
+ jbt_reg_write_1(lcd, JBT_REG_OPAMP_SYSCLK, 0x02, &ret);
+ jbt_reg_write_1(lcd, JBT_REG_VSC_VOLTAGE, 0x2b, &ret);
+ jbt_reg_write_1(lcd, JBT_REG_VCOM_VOLTAGE, 0x40, &ret);
+ jbt_reg_write_1(lcd, JBT_REG_EXT_DISPL, 0x03, &ret);
+ jbt_reg_write_1(lcd, JBT_REG_DCCLK_DCEV, 0x04, &ret);
+ /*
+ * default of 0x02 in JBT_REG_ASW_SLEW responsible for 72Hz requirement
+ * to avoid red / blue flicker
+ */
+ jbt_reg_write_1(lcd, JBT_REG_ASW_SLEW, 0x04, &ret);
+ jbt_reg_write_1(lcd, JBT_REG_DUMMY_DISPLAY, 0x00, &ret);
+
+ jbt_reg_write_1(lcd, JBT_REG_SLEEP_OUT_FR_A, 0x11, &ret);
+ jbt_reg_write_1(lcd, JBT_REG_SLEEP_OUT_FR_B, 0x11, &ret);
+ jbt_reg_write_1(lcd, JBT_REG_SLEEP_OUT_FR_C, 0x11, &ret);
+ jbt_reg_write_2(lcd, JBT_REG_SLEEP_IN_LCCNT_D, 0x2040, &ret);
+ jbt_reg_write_2(lcd, JBT_REG_SLEEP_IN_LCCNT_E, 0x60c0, &ret);
+ jbt_reg_write_2(lcd, JBT_REG_SLEEP_IN_LCCNT_F, 0x1020, &ret);
+ jbt_reg_write_2(lcd, JBT_REG_SLEEP_IN_LCCNT_G, 0x60c0, &ret);
+
+ jbt_reg_write_2(lcd, JBT_REG_GAMMA1_FINE_1, 0x5533, &ret);
+ jbt_reg_write_1(lcd, JBT_REG_GAMMA1_FINE_2, 0x00, &ret);
+ jbt_reg_write_1(lcd, JBT_REG_GAMMA1_INCLINATION, 0x00, &ret);
+ jbt_reg_write_1(lcd, JBT_REG_GAMMA1_BLUE_OFFSET, 0x00, &ret);
+
+ jbt_reg_write_2(lcd, JBT_REG_HCLOCK_VGA, 0x1f0, &ret);
+ jbt_reg_write_1(lcd, JBT_REG_BLANK_CONTROL, 0x02, &ret);
+ jbt_reg_write_2(lcd, JBT_REG_BLANK_TH_TV, 0x0804, &ret);
+
+ jbt_reg_write_1(lcd, JBT_REG_CKV_ON_OFF, 0x01, &ret);
+ jbt_reg_write_2(lcd, JBT_REG_CKV_1_2, 0x0000, &ret);
+
+ jbt_reg_write_2(lcd, JBT_REG_OEV_TIMING, 0x0d0e, &ret);
+ jbt_reg_write_2(lcd, JBT_REG_ASW_TIMING_1, 0x11a4, &ret);
+ jbt_reg_write_1(lcd, JBT_REG_ASW_TIMING_2, 0x0e, &ret);
+
+ return ret;
+}
+
+static int td028ttec1_enable(struct drm_panel *panel)
+{
+ struct td028ttec1_panel *lcd = to_td028ttec1_device(panel);
+ int ret;
+
+ ret = jbt_ret_write_0(lcd, JBT_REG_DISPLAY_ON, NULL);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int td028ttec1_disable(struct drm_panel *panel)
+{
+ struct td028ttec1_panel *lcd = to_td028ttec1_device(panel);
+
+ jbt_ret_write_0(lcd, JBT_REG_DISPLAY_OFF, NULL);
+
+ return 0;
+}
+
+static int td028ttec1_unprepare(struct drm_panel *panel)
+{
+ struct td028ttec1_panel *lcd = to_td028ttec1_device(panel);
+
+ jbt_reg_write_2(lcd, JBT_REG_OUTPUT_CONTROL, 0x8002, NULL);
+ jbt_ret_write_0(lcd, JBT_REG_SLEEP_IN, NULL);
+ jbt_reg_write_1(lcd, JBT_REG_POWER_ON_OFF, 0x00, NULL);
+
+ return 0;
+}
+
+static const struct drm_display_mode td028ttec1_mode = {
+ .clock = 22153,
+ .hdisplay = 480,
+ .hsync_start = 480 + 24,
+ .hsync_end = 480 + 24 + 8,
+ .htotal = 480 + 24 + 8 + 8,
+ .vdisplay = 640,
+ .vsync_start = 640 + 4,
+ .vsync_end = 640 + 4 + 2,
+ .vtotal = 640 + 4 + 2 + 2,
+ .vrefresh = 66,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ .width_mm = 43,
+ .height_mm = 58,
+};
+
+static int td028ttec1_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &td028ttec1_mode);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+ drm_mode_probed_add(connector, mode);
+
+ connector->display_info.width_mm = td028ttec1_mode.width_mm;
+ connector->display_info.height_mm = td028ttec1_mode.height_mm;
+ /*
+ * FIXME: According to the datasheet sync signals are sampled on the
+ * rising edge of the clock, but the code running on the OpenMoko Neo
+ * FreeRunner and Neo 1973 indicates sampling on the falling edge. This
+ * should be tested on a real device.
+ */
+ connector->display_info.bus_flags = DRM_BUS_FLAG_DE_HIGH
+ | DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE
+ | DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE;
+
+ return 1;
+}
+
+static const struct drm_panel_funcs td028ttec1_funcs = {
+ .prepare = td028ttec1_prepare,
+ .enable = td028ttec1_enable,
+ .disable = td028ttec1_disable,
+ .unprepare = td028ttec1_unprepare,
+ .get_modes = td028ttec1_get_modes,
+};
+
+static int td028ttec1_probe(struct spi_device *spi)
+{
+ struct td028ttec1_panel *lcd;
+ int ret;
+
+ lcd = devm_kzalloc(&spi->dev, sizeof(*lcd), GFP_KERNEL);
+ if (!lcd)
+ return -ENOMEM;
+
+ spi_set_drvdata(spi, lcd);
+ lcd->spi = spi;
+
+ spi->mode = SPI_MODE_3;
+ spi->bits_per_word = 9;
+
+ ret = spi_setup(spi);
+ if (ret < 0) {
+ dev_err(&spi->dev, "failed to setup SPI: %d\n", ret);
+ return ret;
+ }
+
+ drm_panel_init(&lcd->panel, &lcd->spi->dev, &td028ttec1_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+
+ ret = drm_panel_of_backlight(&lcd->panel);
+ if (ret)
+ return ret;
+
+ return drm_panel_add(&lcd->panel);
+}
+
+static int td028ttec1_remove(struct spi_device *spi)
+{
+ struct td028ttec1_panel *lcd = spi_get_drvdata(spi);
+
+ drm_panel_remove(&lcd->panel);
+ drm_panel_disable(&lcd->panel);
+ drm_panel_unprepare(&lcd->panel);
+
+ return 0;
+}
+
+static const struct of_device_id td028ttec1_of_match[] = {
+ { .compatible = "tpo,td028ttec1", },
+ /* DT backward compatibility. */
+ { .compatible = "toppoly,td028ttec1", },
+ { /* sentinel */ },
+};
+
+MODULE_DEVICE_TABLE(of, td028ttec1_of_match);
+
+static const struct spi_device_id td028ttec1_ids[] = {
+ { "td028ttec1", 0 },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(spi, td028ttec1_ids);
+
+static struct spi_driver td028ttec1_driver = {
+ .probe = td028ttec1_probe,
+ .remove = td028ttec1_remove,
+ .id_table = td028ttec1_ids,
+ .driver = {
+ .name = "panel-tpo-td028ttec1",
+ .of_match_table = td028ttec1_of_match,
+ },
+};
+
+module_spi_driver(td028ttec1_driver);
+
+MODULE_AUTHOR("H. Nikolaus Schaller <hns@goldelico.com>");
+MODULE_DESCRIPTION("Toppoly TD028TTEC1 panel driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c b/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c
new file mode 100644
index 000000000000..75f1f1f1b6de
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c
@@ -0,0 +1,515 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Toppoly TD043MTEA1 Panel Driver
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated
+ *
+ * Based on the omapdrm-specific panel-tpo-td043mtea1 driver
+ *
+ * Author: GraĹľvydas Ignotas <notasas@gmail.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+
+#include <drm/drm_connector.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+#define TPO_R02_MODE(x) ((x) & 7)
+#define TPO_R02_MODE_800x480 7
+#define TPO_R02_NCLK_RISING BIT(3)
+#define TPO_R02_HSYNC_HIGH BIT(4)
+#define TPO_R02_VSYNC_HIGH BIT(5)
+
+#define TPO_R03_NSTANDBY BIT(0)
+#define TPO_R03_EN_CP_CLK BIT(1)
+#define TPO_R03_EN_VGL_PUMP BIT(2)
+#define TPO_R03_EN_PWM BIT(3)
+#define TPO_R03_DRIVING_CAP_100 BIT(4)
+#define TPO_R03_EN_PRE_CHARGE BIT(6)
+#define TPO_R03_SOFTWARE_CTL BIT(7)
+
+#define TPO_R04_NFLIP_H BIT(0)
+#define TPO_R04_NFLIP_V BIT(1)
+#define TPO_R04_CP_CLK_FREQ_1H BIT(2)
+#define TPO_R04_VGL_FREQ_1H BIT(4)
+
+#define TPO_R03_VAL_NORMAL \
+ (TPO_R03_NSTANDBY | TPO_R03_EN_CP_CLK | TPO_R03_EN_VGL_PUMP | \
+ TPO_R03_EN_PWM | TPO_R03_DRIVING_CAP_100 | TPO_R03_EN_PRE_CHARGE | \
+ TPO_R03_SOFTWARE_CTL)
+
+#define TPO_R03_VAL_STANDBY \
+ (TPO_R03_DRIVING_CAP_100 | TPO_R03_EN_PRE_CHARGE | \
+ TPO_R03_SOFTWARE_CTL)
+
+static const u16 td043mtea1_def_gamma[12] = {
+ 105, 315, 381, 431, 490, 537, 579, 686, 780, 837, 880, 1023
+};
+
+struct td043mtea1_panel {
+ struct drm_panel panel;
+
+ struct spi_device *spi;
+ struct regulator *vcc_reg;
+ struct gpio_desc *reset_gpio;
+
+ unsigned int mode;
+ u16 gamma[12];
+ bool vmirror;
+ bool powered_on;
+ bool spi_suspended;
+ bool power_on_resume;
+};
+
+#define to_td043mtea1_device(p) container_of(p, struct td043mtea1_panel, panel)
+
+/* -----------------------------------------------------------------------------
+ * Hardware Access
+ */
+
+static int td043mtea1_write(struct td043mtea1_panel *lcd, u8 addr, u8 value)
+{
+ struct spi_message msg;
+ struct spi_transfer xfer;
+ u16 data;
+ int ret;
+
+ spi_message_init(&msg);
+
+ memset(&xfer, 0, sizeof(xfer));
+
+ data = ((u16)addr << 10) | (1 << 8) | value;
+ xfer.tx_buf = &data;
+ xfer.bits_per_word = 16;
+ xfer.len = 2;
+ spi_message_add_tail(&xfer, &msg);
+
+ ret = spi_sync(lcd->spi, &msg);
+ if (ret < 0)
+ dev_warn(&lcd->spi->dev, "failed to write to LCD reg (%d)\n",
+ ret);
+
+ return ret;
+}
+
+static void td043mtea1_write_gamma(struct td043mtea1_panel *lcd)
+{
+ const u16 *gamma = lcd->gamma;
+ unsigned int i;
+ u8 val;
+
+ /* gamma bits [9:8] */
+ for (val = i = 0; i < 4; i++)
+ val |= (gamma[i] & 0x300) >> ((i + 1) * 2);
+ td043mtea1_write(lcd, 0x11, val);
+
+ for (val = i = 0; i < 4; i++)
+ val |= (gamma[i + 4] & 0x300) >> ((i + 1) * 2);
+ td043mtea1_write(lcd, 0x12, val);
+
+ for (val = i = 0; i < 4; i++)
+ val |= (gamma[i + 8] & 0x300) >> ((i + 1) * 2);
+ td043mtea1_write(lcd, 0x13, val);
+
+ /* gamma bits [7:0] */
+ for (i = 0; i < 12; i++)
+ td043mtea1_write(lcd, 0x14 + i, gamma[i] & 0xff);
+}
+
+static int td043mtea1_write_mirror(struct td043mtea1_panel *lcd)
+{
+ u8 reg4 = TPO_R04_NFLIP_H | TPO_R04_NFLIP_V |
+ TPO_R04_CP_CLK_FREQ_1H | TPO_R04_VGL_FREQ_1H;
+ if (lcd->vmirror)
+ reg4 &= ~TPO_R04_NFLIP_V;
+
+ return td043mtea1_write(lcd, 4, reg4);
+}
+
+static int td043mtea1_power_on(struct td043mtea1_panel *lcd)
+{
+ int ret;
+
+ if (lcd->powered_on)
+ return 0;
+
+ ret = regulator_enable(lcd->vcc_reg);
+ if (ret < 0)
+ return ret;
+
+ /* Wait for the panel to stabilize. */
+ msleep(160);
+
+ gpiod_set_value(lcd->reset_gpio, 0);
+
+ td043mtea1_write(lcd, 2, TPO_R02_MODE(lcd->mode) | TPO_R02_NCLK_RISING);
+ td043mtea1_write(lcd, 3, TPO_R03_VAL_NORMAL);
+ td043mtea1_write(lcd, 0x20, 0xf0);
+ td043mtea1_write(lcd, 0x21, 0xf0);
+ td043mtea1_write_mirror(lcd);
+ td043mtea1_write_gamma(lcd);
+
+ lcd->powered_on = true;
+
+ return 0;
+}
+
+static void td043mtea1_power_off(struct td043mtea1_panel *lcd)
+{
+ if (!lcd->powered_on)
+ return;
+
+ td043mtea1_write(lcd, 3, TPO_R03_VAL_STANDBY | TPO_R03_EN_PWM);
+
+ gpiod_set_value(lcd->reset_gpio, 1);
+
+ /* wait for at least 2 vsyncs before cutting off power */
+ msleep(50);
+
+ td043mtea1_write(lcd, 3, TPO_R03_VAL_STANDBY);
+
+ regulator_disable(lcd->vcc_reg);
+
+ lcd->powered_on = false;
+}
+
+/* -----------------------------------------------------------------------------
+ * sysfs
+ */
+
+static ssize_t vmirror_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct td043mtea1_panel *lcd = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", lcd->vmirror);
+}
+
+static ssize_t vmirror_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct td043mtea1_panel *lcd = dev_get_drvdata(dev);
+ int val;
+ int ret;
+
+ ret = kstrtoint(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ lcd->vmirror = !!val;
+
+ ret = td043mtea1_write_mirror(lcd);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct td043mtea1_panel *lcd = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", lcd->mode);
+}
+
+static ssize_t mode_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct td043mtea1_panel *lcd = dev_get_drvdata(dev);
+ long val;
+ int ret;
+
+ ret = kstrtol(buf, 0, &val);
+ if (ret != 0 || val & ~7)
+ return -EINVAL;
+
+ lcd->mode = val;
+
+ val |= TPO_R02_NCLK_RISING;
+ td043mtea1_write(lcd, 2, val);
+
+ return count;
+}
+
+static ssize_t gamma_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct td043mtea1_panel *lcd = dev_get_drvdata(dev);
+ ssize_t len = 0;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(lcd->gamma); i++) {
+ ret = snprintf(buf + len, PAGE_SIZE - len, "%u ",
+ lcd->gamma[i]);
+ if (ret < 0)
+ return ret;
+ len += ret;
+ }
+ buf[len - 1] = '\n';
+
+ return len;
+}
+
+static ssize_t gamma_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct td043mtea1_panel *lcd = dev_get_drvdata(dev);
+ unsigned int g[12];
+ unsigned int i;
+ int ret;
+
+ ret = sscanf(buf, "%u %u %u %u %u %u %u %u %u %u %u %u",
+ &g[0], &g[1], &g[2], &g[3], &g[4], &g[5],
+ &g[6], &g[7], &g[8], &g[9], &g[10], &g[11]);
+ if (ret != 12)
+ return -EINVAL;
+
+ for (i = 0; i < 12; i++)
+ lcd->gamma[i] = g[i];
+
+ td043mtea1_write_gamma(lcd);
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(vmirror);
+static DEVICE_ATTR_RW(mode);
+static DEVICE_ATTR_RW(gamma);
+
+static struct attribute *td043mtea1_attrs[] = {
+ &dev_attr_vmirror.attr,
+ &dev_attr_mode.attr,
+ &dev_attr_gamma.attr,
+ NULL,
+};
+
+static const struct attribute_group td043mtea1_attr_group = {
+ .attrs = td043mtea1_attrs,
+};
+
+/* -----------------------------------------------------------------------------
+ * Panel Operations
+ */
+
+static int td043mtea1_unprepare(struct drm_panel *panel)
+{
+ struct td043mtea1_panel *lcd = to_td043mtea1_device(panel);
+
+ if (!lcd->spi_suspended)
+ td043mtea1_power_off(lcd);
+
+ return 0;
+}
+
+static int td043mtea1_prepare(struct drm_panel *panel)
+{
+ struct td043mtea1_panel *lcd = to_td043mtea1_device(panel);
+ int ret;
+
+ /*
+ * If we are resuming from system suspend, SPI might not be enabled
+ * yet, so we'll program the LCD from SPI PM resume callback.
+ */
+ if (lcd->spi_suspended)
+ return 0;
+
+ ret = td043mtea1_power_on(lcd);
+ if (ret) {
+ dev_err(&lcd->spi->dev, "%s: power on failed (%d)\n",
+ __func__, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct drm_display_mode td043mtea1_mode = {
+ .clock = 36000,
+ .hdisplay = 800,
+ .hsync_start = 800 + 68,
+ .hsync_end = 800 + 68 + 1,
+ .htotal = 800 + 68 + 1 + 214,
+ .vdisplay = 480,
+ .vsync_start = 480 + 39,
+ .vsync_end = 480 + 39 + 1,
+ .vtotal = 480 + 39 + 1 + 34,
+ .vrefresh = 60,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ .width_mm = 94,
+ .height_mm = 56,
+};
+
+static int td043mtea1_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &td043mtea1_mode);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+ drm_mode_probed_add(connector, mode);
+
+ connector->display_info.width_mm = td043mtea1_mode.width_mm;
+ connector->display_info.height_mm = td043mtea1_mode.height_mm;
+ /*
+ * FIXME: According to the datasheet sync signals are sampled on the
+ * rising edge of the clock, but the code running on the OMAP3 Pandora
+ * indicates sampling on the falling edge. This should be tested on a
+ * real device.
+ */
+ connector->display_info.bus_flags = DRM_BUS_FLAG_DE_HIGH
+ | DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE
+ | DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE;
+
+ return 1;
+}
+
+static const struct drm_panel_funcs td043mtea1_funcs = {
+ .unprepare = td043mtea1_unprepare,
+ .prepare = td043mtea1_prepare,
+ .get_modes = td043mtea1_get_modes,
+};
+
+/* -----------------------------------------------------------------------------
+ * Power Management, Probe and Remove
+ */
+
+static int __maybe_unused td043mtea1_suspend(struct device *dev)
+{
+ struct td043mtea1_panel *lcd = dev_get_drvdata(dev);
+
+ if (lcd->powered_on) {
+ td043mtea1_power_off(lcd);
+ lcd->powered_on = true;
+ }
+
+ lcd->spi_suspended = true;
+
+ return 0;
+}
+
+static int __maybe_unused td043mtea1_resume(struct device *dev)
+{
+ struct td043mtea1_panel *lcd = dev_get_drvdata(dev);
+ int ret;
+
+ lcd->spi_suspended = false;
+
+ if (lcd->powered_on) {
+ lcd->powered_on = false;
+ ret = td043mtea1_power_on(lcd);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(td043mtea1_pm_ops, td043mtea1_suspend,
+ td043mtea1_resume);
+
+static int td043mtea1_probe(struct spi_device *spi)
+{
+ struct td043mtea1_panel *lcd;
+ int ret;
+
+ lcd = devm_kzalloc(&spi->dev, sizeof(*lcd), GFP_KERNEL);
+ if (lcd == NULL)
+ return -ENOMEM;
+
+ spi_set_drvdata(spi, lcd);
+ lcd->spi = spi;
+ lcd->mode = TPO_R02_MODE_800x480;
+ memcpy(lcd->gamma, td043mtea1_def_gamma, sizeof(lcd->gamma));
+
+ lcd->vcc_reg = devm_regulator_get(&spi->dev, "vcc");
+ if (IS_ERR(lcd->vcc_reg)) {
+ dev_err(&spi->dev, "failed to get VCC regulator\n");
+ return PTR_ERR(lcd->vcc_reg);
+ }
+
+ lcd->reset_gpio = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(lcd->reset_gpio)) {
+ dev_err(&spi->dev, "failed to get reset GPIO\n");
+ return PTR_ERR(lcd->reset_gpio);
+ }
+
+ spi->bits_per_word = 16;
+ spi->mode = SPI_MODE_0;
+
+ ret = spi_setup(spi);
+ if (ret < 0) {
+ dev_err(&spi->dev, "failed to setup SPI: %d\n", ret);
+ return ret;
+ }
+
+ ret = sysfs_create_group(&spi->dev.kobj, &td043mtea1_attr_group);
+ if (ret < 0) {
+ dev_err(&spi->dev, "failed to create sysfs files\n");
+ return ret;
+ }
+
+ drm_panel_init(&lcd->panel, &lcd->spi->dev, &td043mtea1_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+
+ ret = drm_panel_add(&lcd->panel);
+ if (ret < 0) {
+ sysfs_remove_group(&spi->dev.kobj, &td043mtea1_attr_group);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int td043mtea1_remove(struct spi_device *spi)
+{
+ struct td043mtea1_panel *lcd = spi_get_drvdata(spi);
+
+ drm_panel_remove(&lcd->panel);
+ drm_panel_disable(&lcd->panel);
+ drm_panel_unprepare(&lcd->panel);
+
+ sysfs_remove_group(&spi->dev.kobj, &td043mtea1_attr_group);
+
+ return 0;
+}
+
+static const struct of_device_id td043mtea1_of_match[] = {
+ { .compatible = "tpo,td043mtea1", },
+ { /* sentinel */ },
+};
+
+MODULE_DEVICE_TABLE(of, td043mtea1_of_match);
+
+static const struct spi_device_id td043mtea1_ids[] = {
+ { "td043mtea1", 0 },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(spi, td043mtea1_ids);
+
+static struct spi_driver td043mtea1_driver = {
+ .probe = td043mtea1_probe,
+ .remove = td043mtea1_remove,
+ .id_table = td043mtea1_ids,
+ .driver = {
+ .name = "panel-tpo-td043mtea1",
+ .pm = &td043mtea1_pm_ops,
+ .of_match_table = td043mtea1_of_match,
+ },
+};
+
+module_spi_driver(td043mtea1_driver);
+
+MODULE_AUTHOR("GraĹľvydas Ignotas <notasas@gmail.com>");
+MODULE_DESCRIPTION("TPO TD043MTEA1 Panel Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-tpo-tpg110.c b/drivers/gpu/drm/panel/panel-tpo-tpg110.c
index 71591e5f5938..8472d018c16f 100644
--- a/drivers/gpu/drm/panel/panel-tpo-tpg110.c
+++ b/drivers/gpu/drm/panel/panel-tpo-tpg110.c
@@ -14,13 +14,13 @@
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
-#include <linux/backlight.h>
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
@@ -77,10 +77,6 @@ struct tpg110 {
*/
struct drm_panel panel;
/**
- * @backlight: backlight for this panel
- */
- struct backlight_device *backlight;
- /**
* @panel_type: the panel mode as detected
*/
const struct tpg110_panel_mode *panel_mode;
@@ -356,8 +352,6 @@ static int tpg110_disable(struct drm_panel *panel)
val &= ~TPG110_CTRL2_PM;
tpg110_write_reg(tpg, TPG110_CTRL2_PM, val);
- backlight_disable(tpg->backlight);
-
return 0;
}
@@ -366,8 +360,6 @@ static int tpg110_enable(struct drm_panel *panel)
struct tpg110 *tpg = to_tpg110(panel);
u8 val;
- backlight_enable(tpg->backlight);
-
/* Take chip out of standby */
val = tpg110_read_reg(tpg, TPG110_CTRL2_PM);
val |= TPG110_CTRL2_PM;
@@ -384,9 +376,9 @@ static int tpg110_enable(struct drm_panel *panel)
* presents the mode that is configured for the system under use,
* and which is detected by reading the registers of the display.
*/
-static int tpg110_get_modes(struct drm_panel *panel)
+static int tpg110_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
- struct drm_connector *connector = panel->connector;
struct tpg110 *tpg = to_tpg110(panel);
struct drm_display_mode *mode;
@@ -394,7 +386,7 @@ static int tpg110_get_modes(struct drm_panel *panel)
connector->display_info.height_mm = tpg->height;
connector->display_info.bus_flags = tpg->panel_mode->bus_flags;
- mode = drm_mode_duplicate(panel->drm, &tpg->panel_mode->mode);
+ mode = drm_mode_duplicate(connector->dev, &tpg->panel_mode->mode);
drm_mode_set_name(mode);
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
@@ -432,11 +424,6 @@ static int tpg110_probe(struct spi_device *spi)
if (ret)
DRM_DEV_ERROR(dev, "no panel height specified\n");
- /* Look for some optional backlight */
- tpg->backlight = devm_of_find_backlight(dev);
- if (IS_ERR(tpg->backlight))
- return PTR_ERR(tpg->backlight);
-
/* This asserts the GRESTB signal, putting the display into reset */
tpg->grestb = devm_gpiod_get(dev, "grestb", GPIOD_OUT_HIGH);
if (IS_ERR(tpg->grestb)) {
@@ -457,9 +444,13 @@ static int tpg110_probe(struct spi_device *spi)
if (ret)
return ret;
- drm_panel_init(&tpg->panel);
- tpg->panel.dev = dev;
- tpg->panel.funcs = &tpg110_drm_funcs;
+ drm_panel_init(&tpg->panel, dev, &tpg110_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+
+ ret = drm_panel_of_backlight(&tpg->panel);
+ if (ret)
+ return ret;
+
spi_set_drvdata(spi, tpg);
return drm_panel_add(&tpg->panel);
diff --git a/drivers/gpu/drm/panel/panel-truly-nt35597.c b/drivers/gpu/drm/panel/panel-truly-nt35597.c
index 77e1311b7c69..012ca62bf30e 100644
--- a/drivers/gpu/drm/panel/panel-truly-nt35597.c
+++ b/drivers/gpu/drm/panel/panel-truly-nt35597.c
@@ -454,9 +454,9 @@ static int truly_nt35597_enable(struct drm_panel *panel)
return 0;
}
-static int truly_nt35597_get_modes(struct drm_panel *panel)
+static int truly_nt35597_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
- struct drm_connector *connector = panel->connector;
struct truly_nt35597 *ctx = panel_to_ctx(panel);
struct drm_display_mode *mode;
const struct nt35597_config *config;
@@ -518,9 +518,8 @@ static int truly_nt35597_panel_add(struct truly_nt35597 *ctx)
/* dual port */
gpiod_set_value(ctx->mode_gpio, 0);
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = dev;
- ctx->panel.funcs = &truly_nt35597_drm_funcs;
+ drm_panel_init(&ctx->panel, dev, &truly_nt35597_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
drm_panel_add(&ctx->panel);
return 0;
diff --git a/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c b/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c
new file mode 100644
index 000000000000..1645aceab597
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c
@@ -0,0 +1,398 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xinpeng xpp055c272 5.5" MIPI-DSI panel driver
+ * Copyright (C) 2019 Theobroma Systems Design und Consulting GmbH
+ *
+ * based on
+ *
+ * Rockteck jh057n00900 5.5" MIPI-DSI panel driver
+ * Copyright (C) Purism SPC 2019
+ */
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+#include <video/display_timing.h>
+#include <video/mipi_display.h>
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/media-bus-format.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+/* Manufacturer specific Commands send via DSI */
+#define XPP055C272_CMD_ALL_PIXEL_OFF 0x22
+#define XPP055C272_CMD_ALL_PIXEL_ON 0x23
+#define XPP055C272_CMD_SETDISP 0xb2
+#define XPP055C272_CMD_SETRGBIF 0xb3
+#define XPP055C272_CMD_SETCYC 0xb4
+#define XPP055C272_CMD_SETBGP 0xb5
+#define XPP055C272_CMD_SETVCOM 0xb6
+#define XPP055C272_CMD_SETOTP 0xb7
+#define XPP055C272_CMD_SETPOWER_EXT 0xb8
+#define XPP055C272_CMD_SETEXTC 0xb9
+#define XPP055C272_CMD_SETMIPI 0xbA
+#define XPP055C272_CMD_SETVDC 0xbc
+#define XPP055C272_CMD_SETPCR 0xbf
+#define XPP055C272_CMD_SETSCR 0xc0
+#define XPP055C272_CMD_SETPOWER 0xc1
+#define XPP055C272_CMD_SETECO 0xc6
+#define XPP055C272_CMD_SETPANEL 0xcc
+#define XPP055C272_CMD_SETGAMMA 0xe0
+#define XPP055C272_CMD_SETEQ 0xe3
+#define XPP055C272_CMD_SETGIP1 0xe9
+#define XPP055C272_CMD_SETGIP2 0xea
+
+struct xpp055c272 {
+ struct device *dev;
+ struct drm_panel panel;
+ struct gpio_desc *reset_gpio;
+ struct regulator *vci;
+ struct regulator *iovcc;
+ bool prepared;
+};
+
+static inline struct xpp055c272 *panel_to_xpp055c272(struct drm_panel *panel)
+{
+ return container_of(panel, struct xpp055c272, panel);
+}
+
+#define dsi_generic_write_seq(dsi, cmd, seq...) do { \
+ static const u8 d[] = { seq }; \
+ int ret; \
+ ret = mipi_dsi_dcs_write(dsi, cmd, d, ARRAY_SIZE(d)); \
+ if (ret < 0) \
+ return ret; \
+ } while (0)
+
+static int xpp055c272_init_sequence(struct xpp055c272 *ctx)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ struct device *dev = ctx->dev;
+
+ /*
+ * Init sequence was supplied by the panel vendor without much
+ * documentation.
+ */
+ dsi_generic_write_seq(dsi, XPP055C272_CMD_SETEXTC, 0xf1, 0x12, 0x83);
+ dsi_generic_write_seq(dsi, XPP055C272_CMD_SETMIPI,
+ 0x33, 0x81, 0x05, 0xf9, 0x0e, 0x0e, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0x25,
+ 0x00, 0x91, 0x0a, 0x00, 0x00, 0x02, 0x4f, 0x01,
+ 0x00, 0x00, 0x37);
+ dsi_generic_write_seq(dsi, XPP055C272_CMD_SETPOWER_EXT, 0x25);
+ dsi_generic_write_seq(dsi, XPP055C272_CMD_SETPCR, 0x02, 0x11, 0x00);
+ dsi_generic_write_seq(dsi, XPP055C272_CMD_SETRGBIF,
+ 0x0c, 0x10, 0x0a, 0x50, 0x03, 0xff, 0x00, 0x00,
+ 0x00, 0x00);
+ dsi_generic_write_seq(dsi, XPP055C272_CMD_SETSCR,
+ 0x73, 0x73, 0x50, 0x50, 0x00, 0x00, 0x08, 0x70,
+ 0x00);
+ dsi_generic_write_seq(dsi, XPP055C272_CMD_SETVDC, 0x46);
+ dsi_generic_write_seq(dsi, XPP055C272_CMD_SETPANEL, 0x0b);
+ dsi_generic_write_seq(dsi, XPP055C272_CMD_SETCYC, 0x80);
+ dsi_generic_write_seq(dsi, XPP055C272_CMD_SETDISP, 0xc8, 0x12, 0x30);
+ dsi_generic_write_seq(dsi, XPP055C272_CMD_SETEQ,
+ 0x07, 0x07, 0x0B, 0x0B, 0x03, 0x0B, 0x00, 0x00,
+ 0x00, 0x00, 0xFF, 0x00, 0xC0, 0x10);
+ dsi_generic_write_seq(dsi, XPP055C272_CMD_SETPOWER,
+ 0x53, 0x00, 0x1e, 0x1e, 0x77, 0xe1, 0xcc, 0xdd,
+ 0x67, 0x77, 0x33, 0x33);
+ dsi_generic_write_seq(dsi, XPP055C272_CMD_SETECO, 0x00, 0x00, 0xff,
+ 0xff, 0x01, 0xff);
+ dsi_generic_write_seq(dsi, XPP055C272_CMD_SETBGP, 0x09, 0x09);
+ msleep(20);
+
+ dsi_generic_write_seq(dsi, XPP055C272_CMD_SETVCOM, 0x87, 0x95);
+ dsi_generic_write_seq(dsi, XPP055C272_CMD_SETGIP1,
+ 0xc2, 0x10, 0x05, 0x05, 0x10, 0x05, 0xa0, 0x12,
+ 0x31, 0x23, 0x3f, 0x81, 0x0a, 0xa0, 0x37, 0x18,
+ 0x00, 0x80, 0x01, 0x00, 0x00, 0x00, 0x00, 0x80,
+ 0x01, 0x00, 0x00, 0x00, 0x48, 0xf8, 0x86, 0x42,
+ 0x08, 0x88, 0x88, 0x80, 0x88, 0x88, 0x88, 0x58,
+ 0xf8, 0x87, 0x53, 0x18, 0x88, 0x88, 0x81, 0x88,
+ 0x88, 0x88, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
+ dsi_generic_write_seq(dsi, XPP055C272_CMD_SETGIP2,
+ 0x00, 0x1a, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1f, 0x88, 0x81, 0x35,
+ 0x78, 0x88, 0x88, 0x85, 0x88, 0x88, 0x88, 0x0f,
+ 0x88, 0x80, 0x24, 0x68, 0x88, 0x88, 0x84, 0x88,
+ 0x88, 0x88, 0x23, 0x10, 0x00, 0x00, 0x1c, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x05,
+ 0xa0, 0x00, 0x00, 0x00, 0x00);
+ dsi_generic_write_seq(dsi, XPP055C272_CMD_SETGAMMA,
+ 0x00, 0x06, 0x08, 0x2a, 0x31, 0x3f, 0x38, 0x36,
+ 0x07, 0x0c, 0x0d, 0x11, 0x13, 0x12, 0x13, 0x11,
+ 0x18, 0x00, 0x06, 0x08, 0x2a, 0x31, 0x3f, 0x38,
+ 0x36, 0x07, 0x0c, 0x0d, 0x11, 0x13, 0x12, 0x13,
+ 0x11, 0x18);
+
+ msleep(60);
+
+ DRM_DEV_DEBUG_DRIVER(dev, "Panel init sequence done\n");
+ return 0;
+}
+
+static int xpp055c272_unprepare(struct drm_panel *panel)
+{
+ struct xpp055c272 *ctx = panel_to_xpp055c272(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ int ret;
+
+ if (!ctx->prepared)
+ return 0;
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret < 0)
+ DRM_DEV_ERROR(ctx->dev, "failed to set display off: %d\n",
+ ret);
+
+ mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev, "failed to enter sleep mode: %d\n",
+ ret);
+ return ret;
+ }
+
+ regulator_disable(ctx->iovcc);
+ regulator_disable(ctx->vci);
+
+ ctx->prepared = false;
+
+ return 0;
+}
+
+static int xpp055c272_prepare(struct drm_panel *panel)
+{
+ struct xpp055c272 *ctx = panel_to_xpp055c272(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ int ret;
+
+ if (ctx->prepared)
+ return 0;
+
+ DRM_DEV_DEBUG_DRIVER(ctx->dev, "Resetting the panel\n");
+ ret = regulator_enable(ctx->vci);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev,
+ "Failed to enable vci supply: %d\n", ret);
+ return ret;
+ }
+ ret = regulator_enable(ctx->iovcc);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev,
+ "Failed to enable iovcc supply: %d\n", ret);
+ goto disable_vci;
+ }
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ /* T6: 10us */
+ usleep_range(10, 20);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+
+ /* T8: 20ms */
+ msleep(20);
+
+ ret = xpp055c272_init_sequence(ctx);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev, "Panel init sequence failed: %d\n",
+ ret);
+ goto disable_iovcc;
+ }
+
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev, "Failed to exit sleep mode: %d\n", ret);
+ goto disable_iovcc;
+ }
+
+ /* T9: 120ms */
+ msleep(120);
+
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev, "Failed to set display on: %d\n", ret);
+ goto disable_iovcc;
+ }
+
+ msleep(50);
+
+ ctx->prepared = true;
+
+ return 0;
+
+disable_iovcc:
+ regulator_disable(ctx->iovcc);
+disable_vci:
+ regulator_disable(ctx->vci);
+ return ret;
+}
+
+static const struct drm_display_mode default_mode = {
+ .hdisplay = 720,
+ .hsync_start = 720 + 40,
+ .hsync_end = 720 + 40 + 10,
+ .htotal = 720 + 40 + 10 + 40,
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 22,
+ .vsync_end = 1280 + 22 + 4,
+ .vtotal = 1280 + 22 + 4 + 11,
+ .vrefresh = 60,
+ .clock = 64000,
+ .width_mm = 68,
+ .height_mm = 121,
+};
+
+static int xpp055c272_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct xpp055c272 *ctx = panel_to_xpp055c272(panel);
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &default_mode);
+ if (!mode) {
+ DRM_DEV_ERROR(ctx->dev, "Failed to add mode %ux%u@%u\n",
+ default_mode.hdisplay, default_mode.vdisplay,
+ default_mode.vrefresh);
+ return -ENOMEM;
+ }
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs xpp055c272_funcs = {
+ .unprepare = xpp055c272_unprepare,
+ .prepare = xpp055c272_prepare,
+ .get_modes = xpp055c272_get_modes,
+};
+
+static int xpp055c272_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct xpp055c272 *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset_gpio)) {
+ DRM_DEV_ERROR(dev, "cannot get reset gpio\n");
+ return PTR_ERR(ctx->reset_gpio);
+ }
+
+ ctx->vci = devm_regulator_get(dev, "vci");
+ if (IS_ERR(ctx->vci)) {
+ ret = PTR_ERR(ctx->vci);
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev,
+ "Failed to request vci regulator: %d\n",
+ ret);
+ return ret;
+ }
+
+ ctx->iovcc = devm_regulator_get(dev, "iovcc");
+ if (IS_ERR(ctx->iovcc)) {
+ ret = PTR_ERR(ctx->iovcc);
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev,
+ "Failed to request iovcc regulator: %d\n",
+ ret);
+ return ret;
+ }
+
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ ctx->dev = dev;
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_EOT_PACKET;
+
+ drm_panel_init(&ctx->panel, &dsi->dev, &xpp055c272_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ ret = drm_panel_of_backlight(&ctx->panel);
+ if (ret)
+ return ret;
+
+ drm_panel_add(&ctx->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "mipi_dsi_attach failed: %d\n", ret);
+ drm_panel_remove(&ctx->panel);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void xpp055c272_shutdown(struct mipi_dsi_device *dsi)
+{
+ struct xpp055c272 *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = drm_panel_unprepare(&ctx->panel);
+ if (ret < 0)
+ DRM_DEV_ERROR(&dsi->dev, "Failed to unprepare panel: %d\n",
+ ret);
+
+ ret = drm_panel_disable(&ctx->panel);
+ if (ret < 0)
+ DRM_DEV_ERROR(&dsi->dev, "Failed to disable panel: %d\n",
+ ret);
+}
+
+static int xpp055c272_remove(struct mipi_dsi_device *dsi)
+{
+ struct xpp055c272 *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ xpp055c272_shutdown(dsi);
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ DRM_DEV_ERROR(&dsi->dev, "Failed to detach from DSI host: %d\n",
+ ret);
+
+ drm_panel_remove(&ctx->panel);
+
+ return 0;
+}
+
+static const struct of_device_id xpp055c272_of_match[] = {
+ { .compatible = "xinpeng,xpp055c272" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, xpp055c272_of_match);
+
+static struct mipi_dsi_driver xpp055c272_driver = {
+ .driver = {
+ .name = "panel-xinpeng-xpp055c272",
+ .of_match_table = xpp055c272_of_match,
+ },
+ .probe = xpp055c272_probe,
+ .remove = xpp055c272_remove,
+ .shutdown = xpp055c272_shutdown,
+};
+module_mipi_dsi_driver(xpp055c272_driver);
+
+MODULE_AUTHOR("Heiko Stuebner <heiko.stuebner@theobroma-systems.com>");
+MODULE_DESCRIPTION("DRM driver for Xinpeng xpp055c272 MIPI DSI panel");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panfrost/Makefile b/drivers/gpu/drm/panfrost/Makefile
index ecf0864cb515..b71935862417 100644
--- a/drivers/gpu/drm/panfrost/Makefile
+++ b/drivers/gpu/drm/panfrost/Makefile
@@ -5,6 +5,7 @@ panfrost-y := \
panfrost_device.o \
panfrost_devfreq.o \
panfrost_gem.o \
+ panfrost_gem_shrinker.o \
panfrost_gpu.o \
panfrost_job.o \
panfrost_mmu.o \
diff --git a/drivers/gpu/drm/panfrost/TODO b/drivers/gpu/drm/panfrost/TODO
index c2e44add37d8..8c811a9e683b 100644
--- a/drivers/gpu/drm/panfrost/TODO
+++ b/drivers/gpu/drm/panfrost/TODO
@@ -6,22 +6,9 @@
- Bifrost specific feature and issue handling
- Coherent DMA support
-- Support for 2MB pages. The io-pgtable code already supports this. Finishing
- support involves either copying or adapting the iommu API to handle passing
- aligned addresses and sizes to the io-pgtable code.
-
-- Per FD address space support. The h/w supports multiple addresses spaces.
- The hard part is handling when more address spaces are needed than what
- the h/w provides.
-
-- Support pinning pages on demand (GPU page faults).
-
- Support userspace controlled GPU virtual addresses. Needed for Vulkan. (Tomeu)
-- Support for madvise and a shrinker.
-
- Compute job support. So called 'compute only' jobs need to be plumbed up to
userspace.
-- Performance counter support. (Boris)
-
+- Support core dump on job failure
diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
index db798532b0b6..413987038fbf 100644
--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright 2019 Collabora ltd. */
#include <linux/devfreq.h>
+#include <linux/devfreq_cooling.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/clk.h>
@@ -13,97 +14,45 @@
#include "panfrost_gpu.h"
#include "panfrost_regs.h"
-static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev, int slot);
+static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev);
static int panfrost_devfreq_target(struct device *dev, unsigned long *freq,
u32 flags)
{
- struct panfrost_device *pfdev = platform_get_drvdata(to_platform_device(dev));
struct dev_pm_opp *opp;
- unsigned long old_clk_rate = pfdev->devfreq.cur_freq;
- unsigned long target_volt, target_rate;
int err;
opp = devfreq_recommended_opp(dev, freq, flags);
if (IS_ERR(opp))
return PTR_ERR(opp);
-
- target_rate = dev_pm_opp_get_freq(opp);
- target_volt = dev_pm_opp_get_voltage(opp);
dev_pm_opp_put(opp);
- if (old_clk_rate == target_rate)
- return 0;
-
- /*
- * If frequency scaling from low to high, adjust voltage first.
- * If frequency scaling from high to low, adjust frequency first.
- */
- if (old_clk_rate < target_rate) {
- err = regulator_set_voltage(pfdev->regulator, target_volt,
- target_volt);
- if (err) {
- dev_err(dev, "Cannot set voltage %lu uV\n",
- target_volt);
- return err;
- }
- }
-
- err = clk_set_rate(pfdev->clock, target_rate);
- if (err) {
- dev_err(dev, "Cannot set frequency %lu (%d)\n", target_rate,
- err);
- regulator_set_voltage(pfdev->regulator, pfdev->devfreq.cur_volt,
- pfdev->devfreq.cur_volt);
+ err = dev_pm_opp_set_rate(dev, *freq);
+ if (err)
return err;
- }
-
- if (old_clk_rate > target_rate) {
- err = regulator_set_voltage(pfdev->regulator, target_volt,
- target_volt);
- if (err)
- dev_err(dev, "Cannot set voltage %lu uV\n", target_volt);
- }
-
- pfdev->devfreq.cur_freq = target_rate;
- pfdev->devfreq.cur_volt = target_volt;
return 0;
}
static void panfrost_devfreq_reset(struct panfrost_device *pfdev)
{
- ktime_t now = ktime_get();
- int i;
-
- for (i = 0; i < NUM_JOB_SLOTS; i++) {
- pfdev->devfreq.slot[i].busy_time = 0;
- pfdev->devfreq.slot[i].idle_time = 0;
- pfdev->devfreq.slot[i].time_last_update = now;
- }
+ pfdev->devfreq.busy_time = 0;
+ pfdev->devfreq.idle_time = 0;
+ pfdev->devfreq.time_last_update = ktime_get();
}
static int panfrost_devfreq_get_dev_status(struct device *dev,
struct devfreq_dev_status *status)
{
- struct panfrost_device *pfdev = platform_get_drvdata(to_platform_device(dev));
- int i;
+ struct panfrost_device *pfdev = dev_get_drvdata(dev);
- for (i = 0; i < NUM_JOB_SLOTS; i++) {
- panfrost_devfreq_update_utilization(pfdev, i);
- }
+ panfrost_devfreq_update_utilization(pfdev);
status->current_frequency = clk_get_rate(pfdev->clock);
- status->total_time = ktime_to_ns(ktime_add(pfdev->devfreq.slot[0].busy_time,
- pfdev->devfreq.slot[0].idle_time));
+ status->total_time = ktime_to_ns(ktime_add(pfdev->devfreq.busy_time,
+ pfdev->devfreq.idle_time));
- status->busy_time = 0;
- for (i = 0; i < NUM_JOB_SLOTS; i++) {
- status->busy_time += ktime_to_ns(pfdev->devfreq.slot[i].busy_time);
- }
-
- /* We're scheduling only to one core atm, so don't divide for now */
- /* status->busy_time /= NUM_JOB_SLOTS; */
+ status->busy_time = ktime_to_ns(pfdev->devfreq.busy_time);
panfrost_devfreq_reset(pfdev);
@@ -115,31 +64,22 @@ static int panfrost_devfreq_get_dev_status(struct device *dev,
return 0;
}
-static int panfrost_devfreq_get_cur_freq(struct device *dev, unsigned long *freq)
-{
- struct panfrost_device *pfdev = platform_get_drvdata(to_platform_device(dev));
-
- *freq = pfdev->devfreq.cur_freq;
-
- return 0;
-}
-
static struct devfreq_dev_profile panfrost_devfreq_profile = {
.polling_ms = 50, /* ~3 frames */
.target = panfrost_devfreq_target,
.get_dev_status = panfrost_devfreq_get_dev_status,
- .get_cur_freq = panfrost_devfreq_get_cur_freq,
};
int panfrost_devfreq_init(struct panfrost_device *pfdev)
{
int ret;
struct dev_pm_opp *opp;
+ unsigned long cur_freq;
+ struct device *dev = &pfdev->pdev->dev;
+ struct devfreq *devfreq;
+ struct thermal_cooling_device *cooling;
- if (!pfdev->regulator)
- return 0;
-
- ret = dev_pm_opp_of_add_table(&pfdev->pdev->dev);
+ ret = dev_pm_opp_of_add_table(dev);
if (ret == -ENODEV) /* Optional, continue without devfreq */
return 0;
else if (ret)
@@ -147,37 +87,46 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev)
panfrost_devfreq_reset(pfdev);
- pfdev->devfreq.cur_freq = clk_get_rate(pfdev->clock);
+ cur_freq = clk_get_rate(pfdev->clock);
- opp = devfreq_recommended_opp(&pfdev->pdev->dev, &pfdev->devfreq.cur_freq, 0);
+ opp = devfreq_recommended_opp(dev, &cur_freq, 0);
if (IS_ERR(opp))
return PTR_ERR(opp);
- panfrost_devfreq_profile.initial_freq = pfdev->devfreq.cur_freq;
+ panfrost_devfreq_profile.initial_freq = cur_freq;
dev_pm_opp_put(opp);
- pfdev->devfreq.devfreq = devm_devfreq_add_device(&pfdev->pdev->dev,
- &panfrost_devfreq_profile, "simple_ondemand", NULL);
- if (IS_ERR(pfdev->devfreq.devfreq)) {
- DRM_DEV_ERROR(&pfdev->pdev->dev, "Couldn't initialize GPU devfreq\n");
- ret = PTR_ERR(pfdev->devfreq.devfreq);
- pfdev->devfreq.devfreq = NULL;
- return ret;
+ devfreq = devm_devfreq_add_device(dev, &panfrost_devfreq_profile,
+ DEVFREQ_GOV_SIMPLE_ONDEMAND, NULL);
+ if (IS_ERR(devfreq)) {
+ DRM_DEV_ERROR(dev, "Couldn't initialize GPU devfreq\n");
+ dev_pm_opp_of_remove_table(dev);
+ return PTR_ERR(devfreq);
}
+ pfdev->devfreq.devfreq = devfreq;
+
+ cooling = of_devfreq_cooling_register(dev->of_node, devfreq);
+ if (IS_ERR(cooling))
+ DRM_DEV_INFO(dev, "Failed to register cooling device\n");
+ else
+ pfdev->devfreq.cooling = cooling;
return 0;
}
-void panfrost_devfreq_resume(struct panfrost_device *pfdev)
+void panfrost_devfreq_fini(struct panfrost_device *pfdev)
{
- int i;
+ if (pfdev->devfreq.cooling)
+ devfreq_cooling_unregister(pfdev->devfreq.cooling);
+ dev_pm_opp_of_remove_table(&pfdev->pdev->dev);
+}
+void panfrost_devfreq_resume(struct panfrost_device *pfdev)
+{
if (!pfdev->devfreq.devfreq)
return;
panfrost_devfreq_reset(pfdev);
- for (i = 0; i < NUM_JOB_SLOTS; i++)
- pfdev->devfreq.slot[i].busy = false;
devfreq_resume_device(pfdev->devfreq.devfreq);
}
@@ -190,9 +139,8 @@ void panfrost_devfreq_suspend(struct panfrost_device *pfdev)
devfreq_suspend_device(pfdev->devfreq.devfreq);
}
-static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev, int slot)
+static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev)
{
- struct panfrost_devfreq_slot *devfreq_slot = &pfdev->devfreq.slot[slot];
ktime_t now;
ktime_t last;
@@ -200,22 +148,27 @@ static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev, i
return;
now = ktime_get();
- last = pfdev->devfreq.slot[slot].time_last_update;
+ last = pfdev->devfreq.time_last_update;
- /* If we last recorded a transition to busy, we have been idle since */
- if (devfreq_slot->busy)
- pfdev->devfreq.slot[slot].busy_time += ktime_sub(now, last);
+ if (atomic_read(&pfdev->devfreq.busy_count) > 0)
+ pfdev->devfreq.busy_time += ktime_sub(now, last);
else
- pfdev->devfreq.slot[slot].idle_time += ktime_sub(now, last);
+ pfdev->devfreq.idle_time += ktime_sub(now, last);
+
+ pfdev->devfreq.time_last_update = now;
+}
- pfdev->devfreq.slot[slot].time_last_update = now;
+void panfrost_devfreq_record_busy(struct panfrost_device *pfdev)
+{
+ panfrost_devfreq_update_utilization(pfdev);
+ atomic_inc(&pfdev->devfreq.busy_count);
}
-/* The job scheduler is expected to call this at every transition busy <-> idle */
-void panfrost_devfreq_record_transition(struct panfrost_device *pfdev, int slot)
+void panfrost_devfreq_record_idle(struct panfrost_device *pfdev)
{
- struct panfrost_devfreq_slot *devfreq_slot = &pfdev->devfreq.slot[slot];
+ int count;
- panfrost_devfreq_update_utilization(pfdev, slot);
- devfreq_slot->busy = !devfreq_slot->busy;
+ panfrost_devfreq_update_utilization(pfdev);
+ count = atomic_dec_if_positive(&pfdev->devfreq.busy_count);
+ WARN_ON(count < 0);
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.h b/drivers/gpu/drm/panfrost/panfrost_devfreq.h
index eb999531ed90..0611beffc8d0 100644
--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.h
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.h
@@ -5,10 +5,12 @@
#define __PANFROST_DEVFREQ_H__
int panfrost_devfreq_init(struct panfrost_device *pfdev);
+void panfrost_devfreq_fini(struct panfrost_device *pfdev);
void panfrost_devfreq_resume(struct panfrost_device *pfdev);
void panfrost_devfreq_suspend(struct panfrost_device *pfdev);
-void panfrost_devfreq_record_transition(struct panfrost_device *pfdev, int slot);
+void panfrost_devfreq_record_busy(struct panfrost_device *pfdev);
+void panfrost_devfreq_record_idle(struct panfrost_device *pfdev);
#endif /* __PANFROST_DEVFREQ_H__ */
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c
index 8a111d7c0200..238fb6d54df4 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.c
+++ b/drivers/gpu/drm/panfrost/panfrost_device.c
@@ -5,7 +5,6 @@
#include <linux/clk.h>
#include <linux/reset.h>
#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include "panfrost_device.h"
@@ -90,12 +89,9 @@ static int panfrost_regulator_init(struct panfrost_device *pfdev)
{
int ret;
- pfdev->regulator = devm_regulator_get_optional(pfdev->dev, "mali");
+ pfdev->regulator = devm_regulator_get(pfdev->dev, "mali");
if (IS_ERR(pfdev->regulator)) {
ret = PTR_ERR(pfdev->regulator);
- pfdev->regulator = NULL;
- if (ret == -ENODEV)
- return 0;
dev_err(pfdev->dev, "failed to get regulator: %d\n", ret);
return ret;
}
@@ -111,8 +107,7 @@ static int panfrost_regulator_init(struct panfrost_device *pfdev)
static void panfrost_regulator_fini(struct panfrost_device *pfdev)
{
- if (pfdev->regulator)
- regulator_disable(pfdev->regulator);
+ regulator_disable(pfdev->regulator);
}
int panfrost_device_init(struct panfrost_device *pfdev)
@@ -123,8 +118,9 @@ int panfrost_device_init(struct panfrost_device *pfdev)
mutex_init(&pfdev->sched_lock);
mutex_init(&pfdev->reset_lock);
INIT_LIST_HEAD(&pfdev->scheduled_jobs);
+ INIT_LIST_HEAD(&pfdev->as_lru_list);
- spin_lock_init(&pfdev->hwaccess_lock);
+ spin_lock_init(&pfdev->as_lock);
err = panfrost_clk_init(pfdev);
if (err) {
@@ -164,14 +160,6 @@ int panfrost_device_init(struct panfrost_device *pfdev)
if (err)
goto err_out4;
- /* runtime PM will wake us up later */
- panfrost_gpu_power_off(pfdev);
-
- pm_runtime_set_active(pfdev->dev);
- pm_runtime_get_sync(pfdev->dev);
- pm_runtime_mark_last_busy(pfdev->dev);
- pm_runtime_put_autosuspend(pfdev->dev);
-
err = panfrost_perfcnt_init(pfdev);
if (err)
goto err_out5;
@@ -254,18 +242,22 @@ const char *panfrost_exception_name(struct panfrost_device *pfdev, u32 exception
return "UNKNOWN";
}
+void panfrost_device_reset(struct panfrost_device *pfdev)
+{
+ panfrost_gpu_soft_reset(pfdev);
+
+ panfrost_gpu_power_on(pfdev);
+ panfrost_mmu_reset(pfdev);
+ panfrost_job_enable_interrupts(pfdev);
+}
+
#ifdef CONFIG_PM
int panfrost_device_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct panfrost_device *pfdev = platform_get_drvdata(pdev);
- panfrost_gpu_soft_reset(pfdev);
-
- /* TODO: Re-enable all other address spaces */
- panfrost_gpu_power_on(pfdev);
- panfrost_mmu_enable(pfdev, 0);
- panfrost_job_enable_interrupts(pfdev);
+ panfrost_device_reset(pfdev);
panfrost_devfreq_resume(pfdev);
return 0;
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h
index 83cc01cafde1..06713811b92c 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.h
+++ b/drivers/gpu/drm/panfrost/panfrost_device.h
@@ -5,6 +5,8 @@
#ifndef __PANFROST_DEVICE_H__
#define __PANFROST_DEVICE_H__
+#include <linux/atomic.h>
+#include <linux/io-pgtable.h>
#include <linux/spinlock.h>
#include <drm/drm_device.h>
#include <drm/drm_mm.h>
@@ -43,28 +45,17 @@ struct panfrost_features {
u32 js_features[16];
u32 nr_core_groups;
+ u32 thread_tls_alloc;
unsigned long hw_features[64 / BITS_PER_LONG];
unsigned long hw_issues[64 / BITS_PER_LONG];
};
-struct panfrost_devfreq_slot {
- ktime_t busy_time;
- ktime_t idle_time;
- ktime_t time_last_update;
- bool busy;
-};
-
struct panfrost_device {
struct device *dev;
struct drm_device *ddev;
struct platform_device *pdev;
- spinlock_t hwaccess_lock;
-
- struct drm_mm mm;
- spinlock_t mm_lock;
-
void __iomem *iomem;
struct clk *clock;
struct clk *bus_clock;
@@ -73,7 +64,11 @@ struct panfrost_device {
struct panfrost_features features;
- struct panfrost_mmu *mmu;
+ spinlock_t as_lock;
+ unsigned long as_in_use_mask;
+ unsigned long as_alloc_mask;
+ struct list_head as_lru_list;
+
struct panfrost_job_slot *js;
struct panfrost_job *jobs[NUM_JOB_SLOTS];
@@ -84,19 +79,36 @@ struct panfrost_device {
struct mutex sched_lock;
struct mutex reset_lock;
+ struct mutex shrinker_lock;
+ struct list_head shrinker_list;
+ struct shrinker shrinker;
+
struct {
struct devfreq *devfreq;
struct thermal_cooling_device *cooling;
- unsigned long cur_freq;
- unsigned long cur_volt;
- struct panfrost_devfreq_slot slot[NUM_JOB_SLOTS];
+ ktime_t busy_time;
+ ktime_t idle_time;
+ ktime_t time_last_update;
+ atomic_t busy_count;
} devfreq;
};
+struct panfrost_mmu {
+ struct io_pgtable_cfg pgtbl_cfg;
+ struct io_pgtable_ops *pgtbl_ops;
+ int as;
+ atomic_t as_count;
+ struct list_head list;
+};
+
struct panfrost_file_priv {
struct panfrost_device *pfdev;
struct drm_sched_entity sched_entity[NUM_JOB_SLOTS];
+
+ struct panfrost_mmu mmu;
+ struct drm_mm mm;
+ spinlock_t mm_lock;
};
static inline struct panfrost_device *to_panfrost_device(struct drm_device *ddev)
@@ -127,6 +139,7 @@ int panfrost_unstable_ioctl_check(void);
int panfrost_device_init(struct panfrost_device *pfdev);
void panfrost_device_fini(struct panfrost_device *pfdev);
+void panfrost_device_reset(struct panfrost_device *pfdev);
int panfrost_device_resume(struct device *dev);
int panfrost_device_suspend(struct device *dev);
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index 85b4b51b6a0d..b7a618db3ee2 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -32,10 +32,42 @@ static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct
if (param->pad != 0)
return -EINVAL;
+#define PANFROST_FEATURE(name, member) \
+ case DRM_PANFROST_PARAM_ ## name: \
+ param->value = pfdev->features.member; \
+ break
+#define PANFROST_FEATURE_ARRAY(name, member, max) \
+ case DRM_PANFROST_PARAM_ ## name ## 0 ... \
+ DRM_PANFROST_PARAM_ ## name ## max: \
+ param->value = pfdev->features.member[param->param - \
+ DRM_PANFROST_PARAM_ ## name ## 0]; \
+ break
+
switch (param->param) {
- case DRM_PANFROST_PARAM_GPU_PROD_ID:
- param->value = pfdev->features.id;
- break;
+ PANFROST_FEATURE(GPU_PROD_ID, id);
+ PANFROST_FEATURE(GPU_REVISION, revision);
+ PANFROST_FEATURE(SHADER_PRESENT, shader_present);
+ PANFROST_FEATURE(TILER_PRESENT, tiler_present);
+ PANFROST_FEATURE(L2_PRESENT, l2_present);
+ PANFROST_FEATURE(STACK_PRESENT, stack_present);
+ PANFROST_FEATURE(AS_PRESENT, as_present);
+ PANFROST_FEATURE(JS_PRESENT, js_present);
+ PANFROST_FEATURE(L2_FEATURES, l2_features);
+ PANFROST_FEATURE(CORE_FEATURES, core_features);
+ PANFROST_FEATURE(TILER_FEATURES, tiler_features);
+ PANFROST_FEATURE(MEM_FEATURES, mem_features);
+ PANFROST_FEATURE(MMU_FEATURES, mmu_features);
+ PANFROST_FEATURE(THREAD_FEATURES, thread_features);
+ PANFROST_FEATURE(MAX_THREADS, max_threads);
+ PANFROST_FEATURE(THREAD_MAX_WORKGROUP_SZ,
+ thread_max_workgroup_sz);
+ PANFROST_FEATURE(THREAD_MAX_BARRIER_SZ,
+ thread_max_barrier_sz);
+ PANFROST_FEATURE(COHERENCY_FEATURES, coherency_features);
+ PANFROST_FEATURE_ARRAY(TEXTURE_FEATURES, texture_features, 3);
+ PANFROST_FEATURE_ARRAY(JS_FEATURES, js_features, 15);
+ PANFROST_FEATURE(NR_CORE_GROUPS, nr_core_groups);
+ PANFROST_FEATURE(THREAD_TLS_ALLOC, thread_tls_alloc);
default:
return -EINVAL;
}
@@ -46,29 +78,35 @@ static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct
static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
struct drm_file *file)
{
- int ret;
- struct drm_gem_shmem_object *shmem;
+ struct panfrost_file_priv *priv = file->driver_priv;
+ struct panfrost_gem_object *bo;
struct drm_panfrost_create_bo *args = data;
+ struct panfrost_gem_mapping *mapping;
- if (!args->size || args->flags || args->pad)
+ if (!args->size || args->pad ||
+ (args->flags & ~(PANFROST_BO_NOEXEC | PANFROST_BO_HEAP)))
return -EINVAL;
- shmem = drm_gem_shmem_create_with_handle(file, dev, args->size,
- &args->handle);
- if (IS_ERR(shmem))
- return PTR_ERR(shmem);
+ /* Heaps should never be executable */
+ if ((args->flags & PANFROST_BO_HEAP) &&
+ !(args->flags & PANFROST_BO_NOEXEC))
+ return -EINVAL;
- ret = panfrost_mmu_map(to_panfrost_bo(&shmem->base));
- if (ret)
- goto err_free;
+ bo = panfrost_gem_create_with_handle(file, dev, args->size, args->flags,
+ &args->handle);
+ if (IS_ERR(bo))
+ return PTR_ERR(bo);
+
+ mapping = panfrost_gem_mapping_get(bo, priv);
+ if (!mapping) {
+ drm_gem_object_put_unlocked(&bo->base.base);
+ return -EINVAL;
+ }
- args->offset = to_panfrost_bo(&shmem->base)->node.start << PAGE_SHIFT;
+ args->offset = mapping->mmnode.start << PAGE_SHIFT;
+ panfrost_gem_mapping_put(mapping);
return 0;
-
-err_free:
- drm_gem_handle_delete(file, args->handle);
- return ret;
}
/**
@@ -90,6 +128,11 @@ panfrost_lookup_bos(struct drm_device *dev,
struct drm_panfrost_submit *args,
struct panfrost_job *job)
{
+ struct panfrost_file_priv *priv = file_priv->driver_priv;
+ struct panfrost_gem_object *bo;
+ unsigned int i;
+ int ret;
+
job->bo_count = args->bo_handle_count;
if (!job->bo_count)
@@ -101,9 +144,33 @@ panfrost_lookup_bos(struct drm_device *dev,
if (!job->implicit_fences)
return -ENOMEM;
- return drm_gem_objects_lookup(file_priv,
- (void __user *)(uintptr_t)args->bo_handles,
- job->bo_count, &job->bos);
+ ret = drm_gem_objects_lookup(file_priv,
+ (void __user *)(uintptr_t)args->bo_handles,
+ job->bo_count, &job->bos);
+ if (ret)
+ return ret;
+
+ job->mappings = kvmalloc_array(job->bo_count,
+ sizeof(struct panfrost_gem_mapping *),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!job->mappings)
+ return -ENOMEM;
+
+ for (i = 0; i < job->bo_count; i++) {
+ struct panfrost_gem_mapping *mapping;
+
+ bo = to_panfrost_bo(job->bos[i]);
+ mapping = panfrost_gem_mapping_get(bo, priv);
+ if (!mapping) {
+ ret = -EINVAL;
+ break;
+ }
+
+ atomic_inc(&bo->gpu_usecount);
+ job->mappings[i] = mapping;
+ }
+
+ return ret;
}
/**
@@ -245,7 +312,7 @@ panfrost_ioctl_wait_bo(struct drm_device *dev, void *data,
if (!gem_obj)
return -ENOENT;
- ret = reservation_object_wait_timeout_rcu(gem_obj->resv, true,
+ ret = dma_resv_wait_timeout_rcu(gem_obj->resv, true,
true, timeout);
if (!ret)
ret = timeout ? -ETIMEDOUT : -EBUSY;
@@ -273,18 +340,27 @@ static int panfrost_ioctl_mmap_bo(struct drm_device *dev, void *data,
return -ENOENT;
}
+ /* Don't allow mmapping of heap objects as pages are not pinned. */
+ if (to_panfrost_bo(gem_obj)->is_heap) {
+ ret = -EINVAL;
+ goto out;
+ }
+
ret = drm_gem_create_mmap_offset(gem_obj);
if (ret == 0)
args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
- drm_gem_object_put_unlocked(gem_obj);
+out:
+ drm_gem_object_put_unlocked(gem_obj);
return ret;
}
static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct panfrost_file_priv *priv = file_priv->driver_priv;
struct drm_panfrost_get_bo_offset *args = data;
+ struct panfrost_gem_mapping *mapping;
struct drm_gem_object *gem_obj;
struct panfrost_gem_object *bo;
@@ -295,12 +371,77 @@ static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data,
}
bo = to_panfrost_bo(gem_obj);
- args->offset = bo->node.start << PAGE_SHIFT;
-
+ mapping = panfrost_gem_mapping_get(bo, priv);
drm_gem_object_put_unlocked(gem_obj);
+
+ if (!mapping)
+ return -EINVAL;
+
+ args->offset = mapping->mmnode.start << PAGE_SHIFT;
+ panfrost_gem_mapping_put(mapping);
return 0;
}
+static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct panfrost_file_priv *priv = file_priv->driver_priv;
+ struct drm_panfrost_madvise *args = data;
+ struct panfrost_device *pfdev = dev->dev_private;
+ struct drm_gem_object *gem_obj;
+ struct panfrost_gem_object *bo;
+ int ret = 0;
+
+ gem_obj = drm_gem_object_lookup(file_priv, args->handle);
+ if (!gem_obj) {
+ DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
+ return -ENOENT;
+ }
+
+ bo = to_panfrost_bo(gem_obj);
+
+ mutex_lock(&pfdev->shrinker_lock);
+ mutex_lock(&bo->mappings.lock);
+ if (args->madv == PANFROST_MADV_DONTNEED) {
+ struct panfrost_gem_mapping *first;
+
+ first = list_first_entry(&bo->mappings.list,
+ struct panfrost_gem_mapping,
+ node);
+
+ /*
+ * If we want to mark the BO purgeable, there must be only one
+ * user: the caller FD.
+ * We could do something smarter and mark the BO purgeable only
+ * when all its users have marked it purgeable, but globally
+ * visible/shared BOs are likely to never be marked purgeable
+ * anyway, so let's not bother.
+ */
+ if (!list_is_singular(&bo->mappings.list) ||
+ WARN_ON_ONCE(first->mmu != &priv->mmu)) {
+ ret = -EINVAL;
+ goto out_unlock_mappings;
+ }
+ }
+
+ args->retained = drm_gem_shmem_madvise(gem_obj, args->madv);
+
+ if (args->retained) {
+ if (args->madv == PANFROST_MADV_DONTNEED)
+ list_add_tail(&bo->base.madv_list,
+ &pfdev->shrinker_list);
+ else if (args->madv == PANFROST_MADV_WILLNEED)
+ list_del_init(&bo->base.madv_list);
+ }
+
+out_unlock_mappings:
+ mutex_unlock(&bo->mappings.lock);
+ mutex_unlock(&pfdev->shrinker_lock);
+
+ drm_gem_object_put_unlocked(gem_obj);
+ return ret;
+}
+
int panfrost_unstable_ioctl_check(void)
{
if (!unstable_ioctls)
@@ -309,9 +450,36 @@ int panfrost_unstable_ioctl_check(void)
return 0;
}
+#define PFN_4G (SZ_4G >> PAGE_SHIFT)
+#define PFN_4G_MASK (PFN_4G - 1)
+#define PFN_16M (SZ_16M >> PAGE_SHIFT)
+
+static void panfrost_drm_mm_color_adjust(const struct drm_mm_node *node,
+ unsigned long color,
+ u64 *start, u64 *end)
+{
+ /* Executable buffers can't start or end on a 4GB boundary */
+ if (!(color & PANFROST_BO_NOEXEC)) {
+ u64 next_seg;
+
+ if ((*start & PFN_4G_MASK) == 0)
+ (*start)++;
+
+ if ((*end & PFN_4G_MASK) == 0)
+ (*end)--;
+
+ next_seg = ALIGN(*start, PFN_4G);
+ if (next_seg - *start <= PFN_16M)
+ *start = next_seg + 1;
+
+ *end = min(*end, ALIGN(*start, PFN_4G) - 1);
+ }
+}
+
static int
panfrost_open(struct drm_device *dev, struct drm_file *file)
{
+ int ret;
struct panfrost_device *pfdev = dev->dev_private;
struct panfrost_file_priv *panfrost_priv;
@@ -322,7 +490,28 @@ panfrost_open(struct drm_device *dev, struct drm_file *file)
panfrost_priv->pfdev = pfdev;
file->driver_priv = panfrost_priv;
- return panfrost_job_open(panfrost_priv);
+ spin_lock_init(&panfrost_priv->mm_lock);
+
+ /* 4G enough for now. can be 48-bit */
+ drm_mm_init(&panfrost_priv->mm, SZ_32M >> PAGE_SHIFT, (SZ_4G - SZ_32M) >> PAGE_SHIFT);
+ panfrost_priv->mm.color_adjust = panfrost_drm_mm_color_adjust;
+
+ ret = panfrost_mmu_pgtable_alloc(panfrost_priv);
+ if (ret)
+ goto err_pgtable;
+
+ ret = panfrost_job_open(panfrost_priv);
+ if (ret)
+ goto err_job;
+
+ return 0;
+
+err_job:
+ panfrost_mmu_pgtable_free(panfrost_priv);
+err_pgtable:
+ drm_mm_takedown(&panfrost_priv->mm);
+ kfree(panfrost_priv);
+ return ret;
}
static void
@@ -330,21 +519,19 @@ panfrost_postclose(struct drm_device *dev, struct drm_file *file)
{
struct panfrost_file_priv *panfrost_priv = file->driver_priv;
- panfrost_perfcnt_close(panfrost_priv);
+ panfrost_perfcnt_close(file);
panfrost_job_close(panfrost_priv);
+ panfrost_mmu_pgtable_free(panfrost_priv);
+ drm_mm_takedown(&panfrost_priv->mm);
kfree(panfrost_priv);
}
-/* DRM_AUTH is required on SUBMIT for now, while all clients share a single
- * address space. Note that render nodes would be able to submit jobs that
- * could access BOs from clients authenticated with the master node.
- */
static const struct drm_ioctl_desc panfrost_drm_driver_ioctls[] = {
#define PANFROST_IOCTL(n, func, flags) \
DRM_IOCTL_DEF_DRV(PANFROST_##n, panfrost_ioctl_##func, flags)
- PANFROST_IOCTL(SUBMIT, submit, DRM_RENDER_ALLOW | DRM_AUTH),
+ PANFROST_IOCTL(SUBMIT, submit, DRM_RENDER_ALLOW),
PANFROST_IOCTL(WAIT_BO, wait_bo, DRM_RENDER_ALLOW),
PANFROST_IOCTL(CREATE_BO, create_bo, DRM_RENDER_ALLOW),
PANFROST_IOCTL(MMAP_BO, mmap_bo, DRM_RENDER_ALLOW),
@@ -352,13 +539,18 @@ static const struct drm_ioctl_desc panfrost_drm_driver_ioctls[] = {
PANFROST_IOCTL(GET_BO_OFFSET, get_bo_offset, DRM_RENDER_ALLOW),
PANFROST_IOCTL(PERFCNT_ENABLE, perfcnt_enable, DRM_RENDER_ALLOW),
PANFROST_IOCTL(PERFCNT_DUMP, perfcnt_dump, DRM_RENDER_ALLOW),
+ PANFROST_IOCTL(MADVISE, madvise, DRM_RENDER_ALLOW),
};
-DEFINE_DRM_GEM_SHMEM_FOPS(panfrost_drm_driver_fops);
+DEFINE_DRM_GEM_FOPS(panfrost_drm_driver_fops);
+/*
+ * Panfrost driver version:
+ * - 1.0 - initial interface
+ * - 1.1 - adds HEAP and NOEXEC flags for CREATE_BO
+ */
static struct drm_driver panfrost_drm_driver = {
- .driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_PRIME |
- DRIVER_SYNCOBJ,
+ .driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ,
.open = panfrost_open,
.postclose = panfrost_postclose,
.ioctls = panfrost_drm_driver_ioctls,
@@ -368,7 +560,7 @@ static struct drm_driver panfrost_drm_driver = {
.desc = "panfrost DRM",
.date = "20180908",
.major = 1,
- .minor = 0,
+ .minor = 1,
.gem_create_object = panfrost_gem_create_object,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
@@ -400,14 +592,8 @@ static int panfrost_probe(struct platform_device *pdev)
ddev->dev_private = pfdev;
pfdev->ddev = ddev;
- spin_lock_init(&pfdev->mm_lock);
-
- /* 4G enough for now. can be 48-bit */
- drm_mm_init(&pfdev->mm, SZ_32M >> PAGE_SHIFT, (SZ_4G - SZ_32M) >> PAGE_SHIFT);
-
- pm_runtime_use_autosuspend(pfdev->dev);
- pm_runtime_set_autosuspend_delay(pfdev->dev, 50); /* ~3 frames */
- pm_runtime_enable(pfdev->dev);
+ mutex_init(&pfdev->shrinker_lock);
+ INIT_LIST_HEAD(&pfdev->shrinker_list);
err = panfrost_device_init(pfdev);
if (err) {
@@ -423,20 +609,30 @@ static int panfrost_probe(struct platform_device *pdev)
goto err_out1;
}
+ pm_runtime_set_active(pfdev->dev);
+ pm_runtime_mark_last_busy(pfdev->dev);
+ pm_runtime_enable(pfdev->dev);
+ pm_runtime_set_autosuspend_delay(pfdev->dev, 50); /* ~3 frames */
+ pm_runtime_use_autosuspend(pfdev->dev);
+
/*
* Register the DRM device with the core and the connectors with
* sysfs
*/
err = drm_dev_register(ddev, 0);
if (err < 0)
- goto err_out1;
+ goto err_out2;
+
+ panfrost_gem_shrinker_init(ddev);
return 0;
+err_out2:
+ pm_runtime_disable(pfdev->dev);
+ panfrost_devfreq_fini(pfdev);
err_out1:
panfrost_device_fini(pfdev);
err_out0:
- pm_runtime_disable(pfdev->dev);
drm_dev_put(ddev);
return err;
}
@@ -447,10 +643,14 @@ static int panfrost_remove(struct platform_device *pdev)
struct drm_device *ddev = pfdev->ddev;
drm_dev_unregister(ddev);
+ panfrost_gem_shrinker_cleanup(ddev);
+
pm_runtime_get_sync(pfdev->dev);
- pm_runtime_put_sync_autosuspend(pfdev->dev);
- pm_runtime_disable(pfdev->dev);
+ panfrost_devfreq_fini(pfdev);
panfrost_device_fini(pfdev);
+ pm_runtime_put_sync_suspend(pfdev->dev);
+ pm_runtime_disable(pfdev->dev);
+
drm_dev_put(ddev);
return 0;
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
index b46416be5a54..17b654e1eb94 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
@@ -19,25 +19,195 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj)
struct panfrost_gem_object *bo = to_panfrost_bo(obj);
struct panfrost_device *pfdev = obj->dev->dev_private;
- if (bo->is_mapped)
- panfrost_mmu_unmap(bo);
+ /*
+ * Make sure the BO is no longer inserted in the shrinker list before
+ * taking care of the destruction itself. If we don't do that we have a
+ * race condition between this function and what's done in
+ * panfrost_gem_shrinker_scan().
+ */
+ mutex_lock(&pfdev->shrinker_lock);
+ list_del_init(&bo->base.madv_list);
+ mutex_unlock(&pfdev->shrinker_lock);
- spin_lock(&pfdev->mm_lock);
- drm_mm_remove_node(&bo->node);
- spin_unlock(&pfdev->mm_lock);
+ /*
+ * If we still have mappings attached to the BO, there's a problem in
+ * our refcounting.
+ */
+ WARN_ON_ONCE(!list_empty(&bo->mappings.list));
+
+ if (bo->sgts) {
+ int i;
+ int n_sgt = bo->base.base.size / SZ_2M;
+
+ for (i = 0; i < n_sgt; i++) {
+ if (bo->sgts[i].sgl) {
+ dma_unmap_sg(pfdev->dev, bo->sgts[i].sgl,
+ bo->sgts[i].nents, DMA_BIDIRECTIONAL);
+ sg_free_table(&bo->sgts[i]);
+ }
+ }
+ kfree(bo->sgts);
+ }
drm_gem_shmem_free_object(obj);
}
+struct panfrost_gem_mapping *
+panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
+ struct panfrost_file_priv *priv)
+{
+ struct panfrost_gem_mapping *iter, *mapping = NULL;
+
+ mutex_lock(&bo->mappings.lock);
+ list_for_each_entry(iter, &bo->mappings.list, node) {
+ if (iter->mmu == &priv->mmu) {
+ kref_get(&iter->refcount);
+ mapping = iter;
+ break;
+ }
+ }
+ mutex_unlock(&bo->mappings.lock);
+
+ return mapping;
+}
+
+static void
+panfrost_gem_teardown_mapping(struct panfrost_gem_mapping *mapping)
+{
+ struct panfrost_file_priv *priv;
+
+ if (mapping->active)
+ panfrost_mmu_unmap(mapping);
+
+ priv = container_of(mapping->mmu, struct panfrost_file_priv, mmu);
+ spin_lock(&priv->mm_lock);
+ if (drm_mm_node_allocated(&mapping->mmnode))
+ drm_mm_remove_node(&mapping->mmnode);
+ spin_unlock(&priv->mm_lock);
+}
+
+static void panfrost_gem_mapping_release(struct kref *kref)
+{
+ struct panfrost_gem_mapping *mapping;
+
+ mapping = container_of(kref, struct panfrost_gem_mapping, refcount);
+
+ panfrost_gem_teardown_mapping(mapping);
+ drm_gem_object_put_unlocked(&mapping->obj->base.base);
+ kfree(mapping);
+}
+
+void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping)
+{
+ if (!mapping)
+ return;
+
+ kref_put(&mapping->refcount, panfrost_gem_mapping_release);
+}
+
+void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo)
+{
+ struct panfrost_gem_mapping *mapping;
+
+ mutex_lock(&bo->mappings.lock);
+ list_for_each_entry(mapping, &bo->mappings.list, node)
+ panfrost_gem_teardown_mapping(mapping);
+ mutex_unlock(&bo->mappings.lock);
+}
+
+int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
+{
+ int ret;
+ size_t size = obj->size;
+ u64 align;
+ struct panfrost_gem_object *bo = to_panfrost_bo(obj);
+ unsigned long color = bo->noexec ? PANFROST_BO_NOEXEC : 0;
+ struct panfrost_file_priv *priv = file_priv->driver_priv;
+ struct panfrost_gem_mapping *mapping;
+
+ mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
+ if (!mapping)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&mapping->node);
+ kref_init(&mapping->refcount);
+ drm_gem_object_get(obj);
+ mapping->obj = bo;
+
+ /*
+ * Executable buffers cannot cross a 16MB boundary as the program
+ * counter is 24-bits. We assume executable buffers will be less than
+ * 16MB and aligning executable buffers to their size will avoid
+ * crossing a 16MB boundary.
+ */
+ if (!bo->noexec)
+ align = size >> PAGE_SHIFT;
+ else
+ align = size >= SZ_2M ? SZ_2M >> PAGE_SHIFT : 0;
+
+ mapping->mmu = &priv->mmu;
+ spin_lock(&priv->mm_lock);
+ ret = drm_mm_insert_node_generic(&priv->mm, &mapping->mmnode,
+ size >> PAGE_SHIFT, align, color, 0);
+ spin_unlock(&priv->mm_lock);
+ if (ret)
+ goto err;
+
+ if (!bo->is_heap) {
+ ret = panfrost_mmu_map(mapping);
+ if (ret)
+ goto err;
+ }
+
+ mutex_lock(&bo->mappings.lock);
+ WARN_ON(bo->base.madv != PANFROST_MADV_WILLNEED);
+ list_add_tail(&mapping->node, &bo->mappings.list);
+ mutex_unlock(&bo->mappings.lock);
+
+err:
+ if (ret)
+ panfrost_gem_mapping_put(mapping);
+ return ret;
+}
+
+void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv)
+{
+ struct panfrost_file_priv *priv = file_priv->driver_priv;
+ struct panfrost_gem_object *bo = to_panfrost_bo(obj);
+ struct panfrost_gem_mapping *mapping = NULL, *iter;
+
+ mutex_lock(&bo->mappings.lock);
+ list_for_each_entry(iter, &bo->mappings.list, node) {
+ if (iter->mmu == &priv->mmu) {
+ mapping = iter;
+ list_del(&iter->node);
+ break;
+ }
+ }
+ mutex_unlock(&bo->mappings.lock);
+
+ panfrost_gem_mapping_put(mapping);
+}
+
+static int panfrost_gem_pin(struct drm_gem_object *obj)
+{
+ if (to_panfrost_bo(obj)->is_heap)
+ return -EINVAL;
+
+ return drm_gem_shmem_pin(obj);
+}
+
static const struct drm_gem_object_funcs panfrost_gem_funcs = {
.free = panfrost_gem_free_object,
+ .open = panfrost_gem_open,
+ .close = panfrost_gem_close,
.print_info = drm_gem_shmem_print_info,
- .pin = drm_gem_shmem_pin,
+ .pin = panfrost_gem_pin,
.unpin = drm_gem_shmem_unpin,
.get_sg_table = drm_gem_shmem_get_sg_table,
.vmap = drm_gem_shmem_vmap,
.vunmap = drm_gem_shmem_vunmap,
- .vm_ops = &drm_gem_shmem_vm_ops,
+ .mmap = drm_gem_shmem_mmap,
};
/**
@@ -50,32 +220,52 @@ static const struct drm_gem_object_funcs panfrost_gem_funcs = {
*/
struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size)
{
- int ret;
- struct panfrost_device *pfdev = dev->dev_private;
struct panfrost_gem_object *obj;
- u64 align;
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (!obj)
return NULL;
+ INIT_LIST_HEAD(&obj->mappings.list);
+ mutex_init(&obj->mappings.lock);
obj->base.base.funcs = &panfrost_gem_funcs;
- size = roundup(size, PAGE_SIZE);
- align = size >= SZ_2M ? SZ_2M >> PAGE_SHIFT : 0;
+ return &obj->base.base;
+}
- spin_lock(&pfdev->mm_lock);
- ret = drm_mm_insert_node_generic(&pfdev->mm, &obj->node,
- size >> PAGE_SHIFT, align, 0, 0);
- spin_unlock(&pfdev->mm_lock);
- if (ret)
- goto free_obj;
+struct panfrost_gem_object *
+panfrost_gem_create_with_handle(struct drm_file *file_priv,
+ struct drm_device *dev, size_t size,
+ u32 flags,
+ uint32_t *handle)
+{
+ int ret;
+ struct drm_gem_shmem_object *shmem;
+ struct panfrost_gem_object *bo;
- return &obj->base.base;
+ /* Round up heap allocations to 2MB to keep fault handling simple */
+ if (flags & PANFROST_BO_HEAP)
+ size = roundup(size, SZ_2M);
+
+ shmem = drm_gem_shmem_create(dev, size);
+ if (IS_ERR(shmem))
+ return ERR_CAST(shmem);
+
+ bo = to_panfrost_bo(&shmem->base);
+ bo->noexec = !!(flags & PANFROST_BO_NOEXEC);
+ bo->is_heap = !!(flags & PANFROST_BO_HEAP);
-free_obj:
- kfree(obj);
- return ERR_PTR(ret);
+ /*
+ * Allocate an id of idr table where the obj is registered
+ * and handle has the id what user can see.
+ */
+ ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
+ /* drop reference from allocate - handle holds it now. */
+ drm_gem_object_put_unlocked(&shmem->base);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return bo;
}
struct drm_gem_object *
@@ -84,17 +274,14 @@ panfrost_gem_prime_import_sg_table(struct drm_device *dev,
struct sg_table *sgt)
{
struct drm_gem_object *obj;
- struct panfrost_gem_object *pobj;
+ struct panfrost_gem_object *bo;
obj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt);
if (IS_ERR(obj))
return ERR_CAST(obj);
- pobj = to_panfrost_bo(obj);
-
- obj->resv = attach->dmabuf->resv;
-
- panfrost_mmu_map(pobj);
+ bo = to_panfrost_bo(obj);
+ bo->noexec = true;
return obj;
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h b/drivers/gpu/drm/panfrost/panfrost_gem.h
index 6dbcaba020fc..b3517ff9630c 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.h
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.h
@@ -7,11 +7,46 @@
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_mm.h>
+struct panfrost_mmu;
+
struct panfrost_gem_object {
struct drm_gem_shmem_object base;
+ struct sg_table *sgts;
+
+ /*
+ * Use a list for now. If searching a mapping ever becomes the
+ * bottleneck, we should consider using an RB-tree, or even better,
+ * let the core store drm_gem_object_mapping entries (where we
+ * could place driver specific data) instead of drm_gem_object ones
+ * in its drm_file->object_idr table.
+ *
+ * struct drm_gem_object_mapping {
+ * struct drm_gem_object *obj;
+ * void *driver_priv;
+ * };
+ */
+ struct {
+ struct list_head list;
+ struct mutex lock;
+ } mappings;
+
+ /*
+ * Count the number of jobs referencing this BO so we don't let the
+ * shrinker reclaim this object prematurely.
+ */
+ atomic_t gpu_usecount;
- struct drm_mm_node node;
- bool is_mapped;
+ bool noexec :1;
+ bool is_heap :1;
+};
+
+struct panfrost_gem_mapping {
+ struct list_head node;
+ struct kref refcount;
+ struct panfrost_gem_object *obj;
+ struct drm_mm_node mmnode;
+ struct panfrost_mmu *mmu;
+ bool active :1;
};
static inline
@@ -20,6 +55,12 @@ struct panfrost_gem_object *to_panfrost_bo(struct drm_gem_object *obj)
return container_of(to_drm_gem_shmem_obj(obj), struct panfrost_gem_object, base);
}
+static inline struct panfrost_gem_mapping *
+drm_mm_node_to_panfrost_mapping(struct drm_mm_node *node)
+{
+ return container_of(node, struct panfrost_gem_mapping, mmnode);
+}
+
struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size);
struct drm_gem_object *
@@ -27,4 +68,23 @@ panfrost_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sgt);
+struct panfrost_gem_object *
+panfrost_gem_create_with_handle(struct drm_file *file_priv,
+ struct drm_device *dev, size_t size,
+ u32 flags,
+ uint32_t *handle);
+
+int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv);
+void panfrost_gem_close(struct drm_gem_object *obj,
+ struct drm_file *file_priv);
+
+struct panfrost_gem_mapping *
+panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
+ struct panfrost_file_priv *priv);
+void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping);
+void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo);
+
+void panfrost_gem_shrinker_init(struct drm_device *dev);
+void panfrost_gem_shrinker_cleanup(struct drm_device *dev);
+
#endif /* __PANFROST_GEM_H__ */
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
new file mode 100644
index 000000000000..288e46c40673
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019 Arm Ltd.
+ *
+ * Based on msm_gem_freedreno.c:
+ * Copyright (C) 2016 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include <linux/list.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_gem_shmem_helper.h>
+
+#include "panfrost_device.h"
+#include "panfrost_gem.h"
+#include "panfrost_mmu.h"
+
+static unsigned long
+panfrost_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
+{
+ struct panfrost_device *pfdev =
+ container_of(shrinker, struct panfrost_device, shrinker);
+ struct drm_gem_shmem_object *shmem;
+ unsigned long count = 0;
+
+ if (!mutex_trylock(&pfdev->shrinker_lock))
+ return 0;
+
+ list_for_each_entry(shmem, &pfdev->shrinker_list, madv_list) {
+ if (drm_gem_shmem_is_purgeable(shmem))
+ count += shmem->base.size >> PAGE_SHIFT;
+ }
+
+ mutex_unlock(&pfdev->shrinker_lock);
+
+ return count;
+}
+
+static bool panfrost_gem_purge(struct drm_gem_object *obj)
+{
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+ struct panfrost_gem_object *bo = to_panfrost_bo(obj);
+
+ if (atomic_read(&bo->gpu_usecount))
+ return false;
+
+ if (!mutex_trylock(&shmem->pages_lock))
+ return false;
+
+ panfrost_gem_teardown_mappings(bo);
+ drm_gem_shmem_purge_locked(obj);
+
+ mutex_unlock(&shmem->pages_lock);
+ return true;
+}
+
+static unsigned long
+panfrost_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
+{
+ struct panfrost_device *pfdev =
+ container_of(shrinker, struct panfrost_device, shrinker);
+ struct drm_gem_shmem_object *shmem, *tmp;
+ unsigned long freed = 0;
+
+ if (!mutex_trylock(&pfdev->shrinker_lock))
+ return SHRINK_STOP;
+
+ list_for_each_entry_safe(shmem, tmp, &pfdev->shrinker_list, madv_list) {
+ if (freed >= sc->nr_to_scan)
+ break;
+ if (drm_gem_shmem_is_purgeable(shmem) &&
+ panfrost_gem_purge(&shmem->base)) {
+ freed += shmem->base.size >> PAGE_SHIFT;
+ list_del_init(&shmem->madv_list);
+ }
+ }
+
+ mutex_unlock(&pfdev->shrinker_lock);
+
+ if (freed > 0)
+ pr_info_ratelimited("Purging %lu bytes\n", freed << PAGE_SHIFT);
+
+ return freed;
+}
+
+/**
+ * panfrost_gem_shrinker_init - Initialize panfrost shrinker
+ * @dev: DRM device
+ *
+ * This function registers and sets up the panfrost shrinker.
+ */
+void panfrost_gem_shrinker_init(struct drm_device *dev)
+{
+ struct panfrost_device *pfdev = dev->dev_private;
+ pfdev->shrinker.count_objects = panfrost_gem_shrinker_count;
+ pfdev->shrinker.scan_objects = panfrost_gem_shrinker_scan;
+ pfdev->shrinker.seeks = DEFAULT_SEEKS;
+ WARN_ON(register_shrinker(&pfdev->shrinker));
+}
+
+/**
+ * panfrost_gem_shrinker_cleanup - Clean up panfrost shrinker
+ * @dev: DRM device
+ *
+ * This function unregisters the panfrost shrinker.
+ */
+void panfrost_gem_shrinker_cleanup(struct drm_device *dev)
+{
+ struct panfrost_device *pfdev = dev->dev_private;
+
+ if (pfdev->shrinker.nr_deferred) {
+ unregister_shrinker(&pfdev->shrinker);
+ }
+}
diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c
index 20ab333fc925..8822ec13a0d6 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gpu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c
@@ -208,6 +208,9 @@ static void panfrost_gpu_init_features(struct panfrost_device *pfdev)
pfdev->features.mem_features = gpu_read(pfdev, GPU_MEM_FEATURES);
pfdev->features.mmu_features = gpu_read(pfdev, GPU_MMU_FEATURES);
pfdev->features.thread_features = gpu_read(pfdev, GPU_THREAD_FEATURES);
+ pfdev->features.max_threads = gpu_read(pfdev, GPU_THREAD_MAX_THREADS);
+ pfdev->features.thread_max_workgroup_sz = gpu_read(pfdev, GPU_THREAD_MAX_WORKGROUP_SIZE);
+ pfdev->features.thread_max_barrier_sz = gpu_read(pfdev, GPU_THREAD_MAX_BARRIER_SIZE);
pfdev->features.coherency_features = gpu_read(pfdev, GPU_COHERENCY_FEATURES);
for (i = 0; i < 4; i++)
pfdev->features.texture_features[i] = gpu_read(pfdev, GPU_TEXTURE_FEATURES(i));
@@ -232,6 +235,8 @@ static void panfrost_gpu_init_features(struct panfrost_device *pfdev)
pfdev->features.stack_present = gpu_read(pfdev, GPU_STACK_PRESENT_LO);
pfdev->features.stack_present |= (u64)gpu_read(pfdev, GPU_STACK_PRESENT_HI) << 32;
+ pfdev->features.thread_tls_alloc = gpu_read(pfdev, GPU_THREAD_TLS_ALLOC);
+
gpu_id = gpu_read(pfdev, GPU_ID);
pfdev->features.revision = gpu_id & 0xffff;
pfdev->features.id = gpu_id >> 16;
diff --git a/drivers/gpu/drm/panfrost/panfrost_issues.h b/drivers/gpu/drm/panfrost/panfrost_issues.h
index cec6dcdadb5c..8e59d765bf19 100644
--- a/drivers/gpu/drm/panfrost/panfrost_issues.h
+++ b/drivers/gpu/drm/panfrost/panfrost_issues.h
@@ -13,37 +13,118 @@
* to care about.
*/
enum panfrost_hw_issue {
+ /* Need way to guarantee that all previously-translated memory accesses
+ * are commited */
HW_ISSUE_6367,
+
+ /* On job complete with non-done the cache is not flushed */
HW_ISSUE_6787,
+
+ /* Write of PRFCNT_CONFIG_MODE_MANUAL to PRFCNT_CONFIG causes a
+ * instrumentation dump if PRFCNT_TILER_EN is enabled */
HW_ISSUE_8186,
+
+ /* TIB: Reports faults from a vtile which has not yet been allocated */
HW_ISSUE_8245,
+
+ /* uTLB deadlock could occur when writing to an invalid page at the
+ * same time as access to a valid page in the same uTLB cache line ( ==
+ * 4 PTEs == 16K block of mapping) */
HW_ISSUE_8316,
+
+ /* HT: TERMINATE for RUN command ignored if previous LOAD_DESCRIPTOR is
+ * still executing */
HW_ISSUE_8394,
+
+ /* CSE: Sends a TERMINATED response for a task that should not be
+ * terminated */
HW_ISSUE_8401,
+
+ /* Repeatedly Soft-stopping a job chain consisting of (Vertex Shader,
+ * Cache Flush, Tiler) jobs causes DATA_INVALID_FAULT on tiler job. */
HW_ISSUE_8408,
+
+ /* Disable the Pause Buffer in the LS pipe. */
HW_ISSUE_8443,
+
+ /* Change in RMUs in use causes problems related with the core's SDC */
HW_ISSUE_8987,
+
+ /* Compute endpoint has a 4-deep queue of tasks, meaning a soft stop
+ * won't complete until all 4 tasks have completed */
HW_ISSUE_9435,
+
+ /* HT: Tiler returns TERMINATED for non-terminated command */
HW_ISSUE_9510,
+
+ /* Occasionally the GPU will issue multiple page faults for the same
+ * address before the MMU page table has been read by the GPU */
HW_ISSUE_9630,
+
+ /* RA DCD load request to SDC returns invalid load ignore causing
+ * colour buffer mismatch */
HW_ISSUE_10327,
+
+ /* MMU TLB invalidation hazards */
HW_ISSUE_10649,
+
+ /* Missing cache flush in multi core-group configuration */
HW_ISSUE_10676,
+
+ /* Chicken bit on T72X for a hardware workaround in compiler */
HW_ISSUE_10797,
+
+ /* Soft-stopping fragment jobs might fail with TILE_RANGE_FAULT */
HW_ISSUE_10817,
+
+ /* Intermittent missing interrupt on job completion */
HW_ISSUE_10883,
+
+ /* Soft-stopping fragment jobs might fail with TILE_RANGE_ERROR
+ * (similar to issue 10817) and can use #10817 workaround */
HW_ISSUE_10959,
+
+ /* Soft-stopped fragment shader job can restart with out-of-bound
+ * restart index */
HW_ISSUE_10969,
+
+ /* Race condition can cause tile list corruption */
HW_ISSUE_11020,
+
+ /* Write buffer can cause tile list corruption */
HW_ISSUE_11024,
+
+ /* Pause buffer can cause a fragment job hang */
HW_ISSUE_11035,
+
+ /* Dynamic Core Scaling not supported due to errata */
HW_ISSUE_11056,
+
+ /* Clear encoder state for a hard stopped fragment job which is AFBC
+ * encoded by soft resetting the GPU. Only for T76X r0p0, r0p1 and
+ * r0p1_50rel0 */
HW_ISSUE_T76X_3542,
+
+ /* Keep tiler module clock on to prevent GPU stall */
HW_ISSUE_T76X_3953,
+
+ /* Must ensure L2 is not transitioning when we reset. Workaround with a
+ * busy wait until L2 completes transition; ensure there is a maximum
+ * loop count as she may never complete her transition. (On chips
+ * without this errata, it's totally okay if L2 transitions.) */
HW_ISSUE_TMIX_8463,
+
+ /* Don't set SC_LS_ATTR_CHECK_DISABLE/SC_LS_ALLOW_ATTR_TYPES */
GPUCORE_1619,
+
+ /* When a hard-stop follows close after a soft-stop, the completion
+ * code for the terminated job may be incorrectly set to STOPPED */
HW_ISSUE_TMIX_8438,
+
+ /* "Protected mode" is buggy on Mali-G31 some Bifrost chips, so the
+ * kernel must fiddle with L2 caches to prevent data leakage */
HW_ISSUE_TGOX_R1_1234,
+
HW_ISSUE_END
};
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index 9bb9260d9181..7157dfd7dea3 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -6,7 +6,7 @@
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
#include <drm/gpu_scheduler.h>
#include <drm/panfrost_drm.h>
@@ -141,7 +141,6 @@ static void panfrost_job_write_affinity(struct panfrost_device *pfdev,
static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
{
struct panfrost_device *pfdev = job->pfdev;
- unsigned long flags;
u32 cfg;
u64 jc_head = job->jc;
int ret;
@@ -150,11 +149,13 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
if (ret < 0)
return;
- if (WARN_ON(job_read(pfdev, JS_COMMAND_NEXT(js))))
- goto end;
+ if (WARN_ON(job_read(pfdev, JS_COMMAND_NEXT(js)))) {
+ pm_runtime_put_sync_autosuspend(pfdev->dev);
+ return;
+ }
- panfrost_devfreq_record_transition(pfdev, js);
- spin_lock_irqsave(&pfdev->hwaccess_lock, flags);
+ cfg = panfrost_mmu_as_get(pfdev, &job->file_priv->mmu);
+ panfrost_devfreq_record_busy(pfdev);
job_write(pfdev, JS_HEAD_NEXT_LO(js), jc_head & 0xFFFFFFFF);
job_write(pfdev, JS_HEAD_NEXT_HI(js), jc_head >> 32);
@@ -163,8 +164,7 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
/* start MMU, medium priority, cache clean/flush on end, clean/flush on
* start */
- /* TODO: different address spaces */
- cfg = JS_CONFIG_THREAD_PRI(8) |
+ cfg |= JS_CONFIG_THREAD_PRI(8) |
JS_CONFIG_START_FLUSH_CLEAN_INVALIDATE |
JS_CONFIG_END_FLUSH_CLEAN_INVALIDATE;
@@ -184,12 +184,6 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
job, js, jc_head);
job_write(pfdev, JS_COMMAND_NEXT(js), JS_COMMAND_START);
-
- spin_unlock_irqrestore(&pfdev->hwaccess_lock, flags);
-
-end:
- pm_runtime_mark_last_busy(pfdev->dev);
- pm_runtime_put_autosuspend(pfdev->dev);
}
static void panfrost_acquire_object_fences(struct drm_gem_object **bos,
@@ -199,7 +193,7 @@ static void panfrost_acquire_object_fences(struct drm_gem_object **bos,
int i;
for (i = 0; i < bo_count; i++)
- implicit_fences[i] = reservation_object_get_excl_rcu(bos[i]->resv);
+ implicit_fences[i] = dma_resv_get_excl_rcu(bos[i]->resv);
}
static void panfrost_attach_object_fences(struct drm_gem_object **bos,
@@ -209,7 +203,7 @@ static void panfrost_attach_object_fences(struct drm_gem_object **bos,
int i;
for (i = 0; i < bo_count; i++)
- reservation_object_add_excl_fence(bos[i]->resv, fence);
+ dma_resv_add_excl_fence(bos[i]->resv, fence);
}
int panfrost_job_push(struct panfrost_job *job)
@@ -274,9 +268,25 @@ static void panfrost_job_cleanup(struct kref *ref)
dma_fence_put(job->done_fence);
dma_fence_put(job->render_done_fence);
+ if (job->mappings) {
+ for (i = 0; i < job->bo_count; i++) {
+ if (!job->mappings[i])
+ break;
+
+ atomic_dec(&job->mappings[i]->obj->gpu_usecount);
+ panfrost_gem_mapping_put(job->mappings[i]);
+ }
+ kvfree(job->mappings);
+ }
+
if (job->bos) {
- for (i = 0; i < job->bo_count; i++)
+ struct panfrost_gem_object *bo;
+
+ for (i = 0; i < job->bo_count; i++) {
+ bo = to_panfrost_bo(job->bos[i]);
drm_gem_object_put_unlocked(job->bos[i]);
+ }
+
kvfree(job->bos);
}
@@ -368,6 +378,7 @@ static void panfrost_job_timedout(struct drm_sched_job *sched_job)
struct panfrost_job *job = to_panfrost_job(sched_job);
struct panfrost_device *pfdev = job->pfdev;
int js = panfrost_job_get_slot(job);
+ unsigned long flags;
int i;
/*
@@ -377,30 +388,39 @@ static void panfrost_job_timedout(struct drm_sched_job *sched_job)
if (dma_fence_is_signaled(job->done_fence))
return;
- dev_err(pfdev->dev, "gpu sched timeout, js=%d, status=0x%x, head=0x%x, tail=0x%x, sched_job=%p",
+ dev_err(pfdev->dev, "gpu sched timeout, js=%d, config=0x%x, status=0x%x, head=0x%x, tail=0x%x, sched_job=%p",
js,
+ job_read(pfdev, JS_CONFIG(js)),
job_read(pfdev, JS_STATUS(js)),
job_read(pfdev, JS_HEAD_LO(js)),
job_read(pfdev, JS_TAIL_LO(js)),
sched_job);
- mutex_lock(&pfdev->reset_lock);
+ if (!mutex_trylock(&pfdev->reset_lock))
+ return;
- for (i = 0; i < NUM_JOB_SLOTS; i++)
- drm_sched_stop(&pfdev->js->queue[i].sched, sched_job);
+ for (i = 0; i < NUM_JOB_SLOTS; i++) {
+ struct drm_gpu_scheduler *sched = &pfdev->js->queue[i].sched;
- if (sched_job)
- drm_sched_increase_karma(sched_job);
+ drm_sched_stop(sched, sched_job);
+ if (js != i)
+ /* Ensure any timeouts on other slots have finished */
+ cancel_delayed_work_sync(&sched->work_tdr);
+ }
- /* panfrost_core_dump(pfdev); */
+ drm_sched_increase_karma(sched_job);
- panfrost_devfreq_record_transition(pfdev, js);
- panfrost_gpu_soft_reset(pfdev);
+ spin_lock_irqsave(&pfdev->js->job_lock, flags);
+ for (i = 0; i < NUM_JOB_SLOTS; i++) {
+ if (pfdev->jobs[i]) {
+ pm_runtime_put_noidle(pfdev->dev);
+ pfdev->jobs[i] = NULL;
+ }
+ }
+ spin_unlock_irqrestore(&pfdev->js->job_lock, flags);
- /* TODO: Re-enable all other address spaces */
- panfrost_mmu_enable(pfdev, 0);
- panfrost_gpu_power_on(pfdev);
- panfrost_job_enable_interrupts(pfdev);
+ panfrost_devfreq_record_idle(pfdev);
+ panfrost_device_reset(pfdev);
for (i = 0; i < NUM_JOB_SLOTS; i++)
drm_sched_resubmit_jobs(&pfdev->js->queue[i].sched);
@@ -453,8 +473,21 @@ static irqreturn_t panfrost_job_irq_handler(int irq, void *data)
}
if (status & JOB_INT_MASK_DONE(j)) {
- panfrost_devfreq_record_transition(pfdev, j);
- dma_fence_signal(pfdev->jobs[j]->done_fence);
+ struct panfrost_job *job;
+
+ spin_lock(&pfdev->js->job_lock);
+ job = pfdev->jobs[j];
+ /* Only NULL if job timeout occurred */
+ if (job) {
+ pfdev->jobs[j] = NULL;
+
+ panfrost_mmu_as_put(pfdev, &job->file_priv->mmu);
+ panfrost_devfreq_record_idle(pfdev);
+
+ dma_fence_signal_locked(job->done_fence);
+ pm_runtime_put_autosuspend(pfdev->dev);
+ }
+ spin_unlock(&pfdev->js->job_lock);
}
status &= ~mask;
@@ -525,12 +558,14 @@ int panfrost_job_open(struct panfrost_file_priv *panfrost_priv)
{
struct panfrost_device *pfdev = panfrost_priv->pfdev;
struct panfrost_job_slot *js = pfdev->js;
- struct drm_sched_rq *rq;
+ struct drm_gpu_scheduler *sched;
int ret, i;
for (i = 0; i < NUM_JOB_SLOTS; i++) {
- rq = &js->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
- ret = drm_sched_entity_init(&panfrost_priv->sched_entity[i], &rq, 1, NULL);
+ sched = &js->queue[i].sched;
+ ret = drm_sched_entity_init(&panfrost_priv->sched_entity[i],
+ DRM_SCHED_PRIORITY_NORMAL, &sched,
+ 1, NULL);
if (WARN_ON(ret))
return ret;
}
@@ -550,14 +585,14 @@ int panfrost_job_is_idle(struct panfrost_device *pfdev)
struct panfrost_job_slot *js = pfdev->js;
int i;
+ /* Check whether the hardware is idle */
+ if (atomic_read(&pfdev->devfreq.busy_count))
+ return false;
+
for (i = 0; i < NUM_JOB_SLOTS; i++) {
/* If there are any jobs in the HW queue, we're not idle */
if (atomic_read(&js->queue[i].sched.hw_rq_count))
return false;
-
- /* Check whether the hardware is idle */
- if (pfdev->devfreq.slot[i].busy)
- return false;
}
return true;
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.h b/drivers/gpu/drm/panfrost/panfrost_job.h
index 62454128a792..bbd3ba97ff67 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.h
+++ b/drivers/gpu/drm/panfrost/panfrost_job.h
@@ -32,6 +32,7 @@ struct panfrost_job {
/* Exclusive fences we have taken from the BOs to wait for */
struct dma_fence **implicit_fences;
+ struct panfrost_gem_mapping **mappings;
struct drm_gem_object **bos;
u32 bo_count;
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index 92ac995dd9c6..763cfca886a7 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -1,7 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
+#include <linux/atomic.h>
#include <linux/bitfield.h>
#include <linux/delay.h>
+#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
@@ -9,6 +11,7 @@
#include <linux/iommu.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/shmem_fs.h>
#include <linux/sizes.h>
#include "panfrost_device.h"
@@ -20,12 +23,6 @@
#define mmu_write(dev, reg, data) writel(data, dev->iomem + reg)
#define mmu_read(dev, reg) readl(dev->iomem + reg)
-struct panfrost_mmu {
- struct io_pgtable_cfg pgtbl_cfg;
- struct io_pgtable_ops *pgtbl_ops;
- struct mutex lock;
-};
-
static int wait_ready(struct panfrost_device *pfdev, u32 as_nr)
{
int ret;
@@ -83,13 +80,11 @@ static void lock_region(struct panfrost_device *pfdev, u32 as_nr,
}
-static int mmu_hw_do_operation(struct panfrost_device *pfdev, u32 as_nr,
- u64 iova, size_t size, u32 op)
+static int mmu_hw_do_operation_locked(struct panfrost_device *pfdev, int as_nr,
+ u64 iova, size_t size, u32 op)
{
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&pfdev->hwaccess_lock, flags);
+ if (as_nr < 0)
+ return 0;
if (op != AS_COMMAND_UNLOCK)
lock_region(pfdev, as_nr, iova, size);
@@ -98,21 +93,29 @@ static int mmu_hw_do_operation(struct panfrost_device *pfdev, u32 as_nr,
write_cmd(pfdev, as_nr, op);
/* Wait for the flush to complete */
- ret = wait_ready(pfdev, as_nr);
+ return wait_ready(pfdev, as_nr);
+}
- spin_unlock_irqrestore(&pfdev->hwaccess_lock, flags);
+static int mmu_hw_do_operation(struct panfrost_device *pfdev,
+ struct panfrost_mmu *mmu,
+ u64 iova, size_t size, u32 op)
+{
+ int ret;
+ spin_lock(&pfdev->as_lock);
+ ret = mmu_hw_do_operation_locked(pfdev, mmu->as, iova, size, op);
+ spin_unlock(&pfdev->as_lock);
return ret;
}
-void panfrost_mmu_enable(struct panfrost_device *pfdev, u32 as_nr)
+static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
{
- struct io_pgtable_cfg *cfg = &pfdev->mmu->pgtbl_cfg;
+ int as_nr = mmu->as;
+ struct io_pgtable_cfg *cfg = &mmu->pgtbl_cfg;
u64 transtab = cfg->arm_mali_lpae_cfg.transtab;
u64 memattr = cfg->arm_mali_lpae_cfg.memattr;
- mmu_write(pfdev, MMU_INT_CLEAR, ~0);
- mmu_write(pfdev, MMU_INT_MASK, ~0);
+ mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), transtab & 0xffffffffUL);
mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), transtab >> 32);
@@ -126,8 +129,10 @@ void panfrost_mmu_enable(struct panfrost_device *pfdev, u32 as_nr)
write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
}
-static void mmu_disable(struct panfrost_device *pfdev, u32 as_nr)
+static void panfrost_mmu_disable(struct panfrost_device *pfdev, u32 as_nr)
{
+ mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
+
mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), 0);
mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), 0);
@@ -137,6 +142,80 @@ static void mmu_disable(struct panfrost_device *pfdev, u32 as_nr)
write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
}
+u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
+{
+ int as;
+
+ spin_lock(&pfdev->as_lock);
+
+ as = mmu->as;
+ if (as >= 0) {
+ int en = atomic_inc_return(&mmu->as_count);
+ WARN_ON(en >= NUM_JOB_SLOTS);
+
+ list_move(&mmu->list, &pfdev->as_lru_list);
+ goto out;
+ }
+
+ /* Check for a free AS */
+ as = ffz(pfdev->as_alloc_mask);
+ if (!(BIT(as) & pfdev->features.as_present)) {
+ struct panfrost_mmu *lru_mmu;
+
+ list_for_each_entry_reverse(lru_mmu, &pfdev->as_lru_list, list) {
+ if (!atomic_read(&lru_mmu->as_count))
+ break;
+ }
+ WARN_ON(&lru_mmu->list == &pfdev->as_lru_list);
+
+ list_del_init(&lru_mmu->list);
+ as = lru_mmu->as;
+
+ WARN_ON(as < 0);
+ lru_mmu->as = -1;
+ }
+
+ /* Assign the free or reclaimed AS to the FD */
+ mmu->as = as;
+ set_bit(as, &pfdev->as_alloc_mask);
+ atomic_set(&mmu->as_count, 1);
+ list_add(&mmu->list, &pfdev->as_lru_list);
+
+ dev_dbg(pfdev->dev, "Assigned AS%d to mmu %p, alloc_mask=%lx", as, mmu, pfdev->as_alloc_mask);
+
+ panfrost_mmu_enable(pfdev, mmu);
+
+out:
+ spin_unlock(&pfdev->as_lock);
+ return as;
+}
+
+void panfrost_mmu_as_put(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
+{
+ atomic_dec(&mmu->as_count);
+ WARN_ON(atomic_read(&mmu->as_count) < 0);
+}
+
+void panfrost_mmu_reset(struct panfrost_device *pfdev)
+{
+ struct panfrost_mmu *mmu, *mmu_tmp;
+
+ spin_lock(&pfdev->as_lock);
+
+ pfdev->as_alloc_mask = 0;
+
+ list_for_each_entry_safe(mmu, mmu_tmp, &pfdev->as_lru_list, list) {
+ mmu->as = -1;
+ atomic_set(&mmu->as_count, 0);
+ list_del_init(&mmu->list);
+ }
+
+ spin_unlock(&pfdev->as_lock);
+
+ mmu_write(pfdev, MMU_INT_CLEAR, ~0);
+ mmu_write(pfdev, MMU_INT_MASK, ~0);
+}
+
static size_t get_pgsize(u64 addr, size_t size)
{
if (addr & (SZ_2M - 1) || size < SZ_2M)
@@ -145,110 +224,110 @@ static size_t get_pgsize(u64 addr, size_t size)
return SZ_2M;
}
-int panfrost_mmu_map(struct panfrost_gem_object *bo)
+static void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
+ struct panfrost_mmu *mmu,
+ u64 iova, size_t size)
{
- struct drm_gem_object *obj = &bo->base.base;
- struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
- struct io_pgtable_ops *ops = pfdev->mmu->pgtbl_ops;
- u64 iova = bo->node.start << PAGE_SHIFT;
- unsigned int count;
- struct scatterlist *sgl;
- struct sg_table *sgt;
- int ret;
+ if (mmu->as < 0)
+ return;
- if (WARN_ON(bo->is_mapped))
- return 0;
+ pm_runtime_get_noresume(pfdev->dev);
- sgt = drm_gem_shmem_get_pages_sgt(obj);
- if (WARN_ON(IS_ERR(sgt)))
- return PTR_ERR(sgt);
+ /* Flush the PTs only if we're already awake */
+ if (pm_runtime_active(pfdev->dev))
+ mmu_hw_do_operation(pfdev, mmu, iova, size, AS_COMMAND_FLUSH_PT);
- ret = pm_runtime_get_sync(pfdev->dev);
- if (ret < 0)
- return ret;
+ pm_runtime_put_sync_autosuspend(pfdev->dev);
+}
- mutex_lock(&pfdev->mmu->lock);
+static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
+ u64 iova, int prot, struct sg_table *sgt)
+{
+ unsigned int count;
+ struct scatterlist *sgl;
+ struct io_pgtable_ops *ops = mmu->pgtbl_ops;
+ u64 start_iova = iova;
for_each_sg(sgt->sgl, sgl, sgt->nents, count) {
unsigned long paddr = sg_dma_address(sgl);
size_t len = sg_dma_len(sgl);
- dev_dbg(pfdev->dev, "map: iova=%llx, paddr=%lx, len=%zx", iova, paddr, len);
+ dev_dbg(pfdev->dev, "map: as=%d, iova=%llx, paddr=%lx, len=%zx", mmu->as, iova, paddr, len);
while (len) {
size_t pgsize = get_pgsize(iova | paddr, len);
- ops->map(ops, iova, paddr, pgsize, IOMMU_WRITE | IOMMU_READ);
+ ops->map(ops, iova, paddr, pgsize, prot);
iova += pgsize;
paddr += pgsize;
len -= pgsize;
}
}
- mmu_hw_do_operation(pfdev, 0, bo->node.start << PAGE_SHIFT,
- bo->node.size << PAGE_SHIFT, AS_COMMAND_FLUSH_PT);
+ panfrost_mmu_flush_range(pfdev, mmu, start_iova, iova - start_iova);
+
+ return 0;
+}
+
+int panfrost_mmu_map(struct panfrost_gem_mapping *mapping)
+{
+ struct panfrost_gem_object *bo = mapping->obj;
+ struct drm_gem_object *obj = &bo->base.base;
+ struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
+ struct sg_table *sgt;
+ int prot = IOMMU_READ | IOMMU_WRITE;
+
+ if (WARN_ON(mapping->active))
+ return 0;
- mutex_unlock(&pfdev->mmu->lock);
+ if (bo->noexec)
+ prot |= IOMMU_NOEXEC;
- pm_runtime_mark_last_busy(pfdev->dev);
- pm_runtime_put_autosuspend(pfdev->dev);
- bo->is_mapped = true;
+ sgt = drm_gem_shmem_get_pages_sgt(obj);
+ if (WARN_ON(IS_ERR(sgt)))
+ return PTR_ERR(sgt);
+
+ mmu_map_sg(pfdev, mapping->mmu, mapping->mmnode.start << PAGE_SHIFT,
+ prot, sgt);
+ mapping->active = true;
return 0;
}
-void panfrost_mmu_unmap(struct panfrost_gem_object *bo)
+void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping)
{
+ struct panfrost_gem_object *bo = mapping->obj;
struct drm_gem_object *obj = &bo->base.base;
struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
- struct io_pgtable_ops *ops = pfdev->mmu->pgtbl_ops;
- u64 iova = bo->node.start << PAGE_SHIFT;
- size_t len = bo->node.size << PAGE_SHIFT;
+ struct io_pgtable_ops *ops = mapping->mmu->pgtbl_ops;
+ u64 iova = mapping->mmnode.start << PAGE_SHIFT;
+ size_t len = mapping->mmnode.size << PAGE_SHIFT;
size_t unmapped_len = 0;
- int ret;
-
- if (WARN_ON(!bo->is_mapped))
- return;
-
- dev_dbg(pfdev->dev, "unmap: iova=%llx, len=%zx", iova, len);
- ret = pm_runtime_get_sync(pfdev->dev);
- if (ret < 0)
+ if (WARN_ON(!mapping->active))
return;
- mutex_lock(&pfdev->mmu->lock);
+ dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx",
+ mapping->mmu->as, iova, len);
while (unmapped_len < len) {
size_t unmapped_page;
size_t pgsize = get_pgsize(iova, len - unmapped_len);
- unmapped_page = ops->unmap(ops, iova, pgsize);
- if (!unmapped_page)
- break;
-
- iova += unmapped_page;
- unmapped_len += unmapped_page;
+ if (ops->iova_to_phys(ops, iova)) {
+ unmapped_page = ops->unmap(ops, iova, pgsize, NULL);
+ WARN_ON(unmapped_page != pgsize);
+ }
+ iova += pgsize;
+ unmapped_len += pgsize;
}
- mmu_hw_do_operation(pfdev, 0, bo->node.start << PAGE_SHIFT,
- bo->node.size << PAGE_SHIFT, AS_COMMAND_FLUSH_PT);
-
- mutex_unlock(&pfdev->mmu->lock);
-
- pm_runtime_mark_last_busy(pfdev->dev);
- pm_runtime_put_autosuspend(pfdev->dev);
- bo->is_mapped = false;
+ panfrost_mmu_flush_range(pfdev, mapping->mmu,
+ mapping->mmnode.start << PAGE_SHIFT, len);
+ mapping->active = false;
}
static void mmu_tlb_inv_context_s1(void *cookie)
-{
- struct panfrost_device *pfdev = cookie;
-
- mmu_hw_do_operation(pfdev, 0, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
-}
-
-static void mmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
- size_t granule, bool leaf, void *cookie)
{}
static void mmu_tlb_sync_context(void *cookie)
@@ -257,12 +336,207 @@ static void mmu_tlb_sync_context(void *cookie)
// TODO: Wait 1000 GPU cycles for HW_ISSUE_6367/T60X
}
-static const struct iommu_gather_ops mmu_tlb_ops = {
+static void mmu_tlb_flush_walk(unsigned long iova, size_t size, size_t granule,
+ void *cookie)
+{
+ mmu_tlb_sync_context(cookie);
+}
+
+static void mmu_tlb_flush_leaf(unsigned long iova, size_t size, size_t granule,
+ void *cookie)
+{
+ mmu_tlb_sync_context(cookie);
+}
+
+static const struct iommu_flush_ops mmu_tlb_ops = {
.tlb_flush_all = mmu_tlb_inv_context_s1,
- .tlb_add_flush = mmu_tlb_inv_range_nosync,
- .tlb_sync = mmu_tlb_sync_context,
+ .tlb_flush_walk = mmu_tlb_flush_walk,
+ .tlb_flush_leaf = mmu_tlb_flush_leaf,
};
+int panfrost_mmu_pgtable_alloc(struct panfrost_file_priv *priv)
+{
+ struct panfrost_mmu *mmu = &priv->mmu;
+ struct panfrost_device *pfdev = priv->pfdev;
+
+ INIT_LIST_HEAD(&mmu->list);
+ mmu->as = -1;
+
+ mmu->pgtbl_cfg = (struct io_pgtable_cfg) {
+ .pgsize_bitmap = SZ_4K | SZ_2M,
+ .ias = FIELD_GET(0xff, pfdev->features.mmu_features),
+ .oas = FIELD_GET(0xff00, pfdev->features.mmu_features),
+ .tlb = &mmu_tlb_ops,
+ .iommu_dev = pfdev->dev,
+ };
+
+ mmu->pgtbl_ops = alloc_io_pgtable_ops(ARM_MALI_LPAE, &mmu->pgtbl_cfg,
+ priv);
+ if (!mmu->pgtbl_ops)
+ return -EINVAL;
+
+ return 0;
+}
+
+void panfrost_mmu_pgtable_free(struct panfrost_file_priv *priv)
+{
+ struct panfrost_device *pfdev = priv->pfdev;
+ struct panfrost_mmu *mmu = &priv->mmu;
+
+ spin_lock(&pfdev->as_lock);
+ if (mmu->as >= 0) {
+ pm_runtime_get_noresume(pfdev->dev);
+ if (pm_runtime_active(pfdev->dev))
+ panfrost_mmu_disable(pfdev, mmu->as);
+ pm_runtime_put_autosuspend(pfdev->dev);
+
+ clear_bit(mmu->as, &pfdev->as_alloc_mask);
+ clear_bit(mmu->as, &pfdev->as_in_use_mask);
+ list_del(&mmu->list);
+ }
+ spin_unlock(&pfdev->as_lock);
+
+ free_io_pgtable_ops(mmu->pgtbl_ops);
+}
+
+static struct panfrost_gem_mapping *
+addr_to_mapping(struct panfrost_device *pfdev, int as, u64 addr)
+{
+ struct panfrost_gem_mapping *mapping = NULL;
+ struct panfrost_file_priv *priv;
+ struct drm_mm_node *node;
+ u64 offset = addr >> PAGE_SHIFT;
+ struct panfrost_mmu *mmu;
+
+ spin_lock(&pfdev->as_lock);
+ list_for_each_entry(mmu, &pfdev->as_lru_list, list) {
+ if (as == mmu->as)
+ goto found_mmu;
+ }
+ goto out;
+
+found_mmu:
+ priv = container_of(mmu, struct panfrost_file_priv, mmu);
+
+ spin_lock(&priv->mm_lock);
+
+ drm_mm_for_each_node(node, &priv->mm) {
+ if (offset >= node->start &&
+ offset < (node->start + node->size)) {
+ mapping = drm_mm_node_to_panfrost_mapping(node);
+
+ kref_get(&mapping->refcount);
+ break;
+ }
+ }
+
+ spin_unlock(&priv->mm_lock);
+out:
+ spin_unlock(&pfdev->as_lock);
+ return mapping;
+}
+
+#define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE)
+
+static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
+ u64 addr)
+{
+ int ret, i;
+ struct panfrost_gem_mapping *bomapping;
+ struct panfrost_gem_object *bo;
+ struct address_space *mapping;
+ pgoff_t page_offset;
+ struct sg_table *sgt;
+ struct page **pages;
+
+ bomapping = addr_to_mapping(pfdev, as, addr);
+ if (!bomapping)
+ return -ENOENT;
+
+ bo = bomapping->obj;
+ if (!bo->is_heap) {
+ dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)",
+ bomapping->mmnode.start << PAGE_SHIFT);
+ ret = -EINVAL;
+ goto err_bo;
+ }
+ WARN_ON(bomapping->mmu->as != as);
+
+ /* Assume 2MB alignment and size multiple */
+ addr &= ~((u64)SZ_2M - 1);
+ page_offset = addr >> PAGE_SHIFT;
+ page_offset -= bomapping->mmnode.start;
+
+ mutex_lock(&bo->base.pages_lock);
+
+ if (!bo->base.pages) {
+ bo->sgts = kvmalloc_array(bo->base.base.size / SZ_2M,
+ sizeof(struct sg_table), GFP_KERNEL | __GFP_ZERO);
+ if (!bo->sgts) {
+ mutex_unlock(&bo->base.pages_lock);
+ ret = -ENOMEM;
+ goto err_bo;
+ }
+
+ pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT,
+ sizeof(struct page *), GFP_KERNEL | __GFP_ZERO);
+ if (!pages) {
+ kfree(bo->sgts);
+ bo->sgts = NULL;
+ mutex_unlock(&bo->base.pages_lock);
+ ret = -ENOMEM;
+ goto err_bo;
+ }
+ bo->base.pages = pages;
+ bo->base.pages_use_count = 1;
+ } else
+ pages = bo->base.pages;
+
+ mapping = bo->base.base.filp->f_mapping;
+ mapping_set_unevictable(mapping);
+
+ for (i = page_offset; i < page_offset + NUM_FAULT_PAGES; i++) {
+ pages[i] = shmem_read_mapping_page(mapping, i);
+ if (IS_ERR(pages[i])) {
+ mutex_unlock(&bo->base.pages_lock);
+ ret = PTR_ERR(pages[i]);
+ goto err_pages;
+ }
+ }
+
+ mutex_unlock(&bo->base.pages_lock);
+
+ sgt = &bo->sgts[page_offset / (SZ_2M / PAGE_SIZE)];
+ ret = sg_alloc_table_from_pages(sgt, pages + page_offset,
+ NUM_FAULT_PAGES, 0, SZ_2M, GFP_KERNEL);
+ if (ret)
+ goto err_pages;
+
+ if (!dma_map_sg(pfdev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL)) {
+ ret = -EINVAL;
+ goto err_map;
+ }
+
+ mmu_map_sg(pfdev, bomapping->mmu, addr,
+ IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
+
+ bomapping->active = true;
+
+ dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr);
+
+ panfrost_gem_mapping_put(bomapping);
+
+ return 0;
+
+err_map:
+ sg_free_table(sgt);
+err_pages:
+ drm_gem_shmem_put_pages(&bo->base);
+err_bo:
+ drm_gem_object_put_unlocked(&bo->base.base);
+ return ret;
+}
+
static const char *access_type_name(struct panfrost_device *pfdev,
u32 fault_status)
{
@@ -287,13 +561,19 @@ static const char *access_type_name(struct panfrost_device *pfdev,
static irqreturn_t panfrost_mmu_irq_handler(int irq, void *data)
{
struct panfrost_device *pfdev = data;
- u32 status = mmu_read(pfdev, MMU_INT_STAT);
- int i;
- if (!status)
+ if (!mmu_read(pfdev, MMU_INT_STAT))
return IRQ_NONE;
- dev_err(pfdev->dev, "mmu irq status=%x\n", status);
+ mmu_write(pfdev, MMU_INT_MASK, 0);
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data)
+{
+ struct panfrost_device *pfdev = data;
+ u32 status = mmu_read(pfdev, MMU_INT_RAWSTAT);
+ int i, ret;
for (i = 0; status; i++) {
u32 mask = BIT(i) | BIT(i + 16);
@@ -315,6 +595,18 @@ static irqreturn_t panfrost_mmu_irq_handler(int irq, void *data)
access_type = (fault_status >> 8) & 0x3;
source_id = (fault_status >> 16);
+ /* Page fault only */
+ if ((status & mask) == BIT(i)) {
+ WARN_ON(exception_type < 0xC1 || exception_type > 0xC4);
+
+ ret = panfrost_mmu_map_fault_addr(pfdev, i, addr);
+ if (!ret) {
+ mmu_write(pfdev, MMU_INT_CLEAR, BIT(i));
+ status &= ~mask;
+ continue;
+ }
+ }
+
/* terminal fault, print info about the fault */
dev_err(pfdev->dev,
"Unhandled Page fault in AS%d at VA 0x%016llX\n"
@@ -337,50 +629,26 @@ static irqreturn_t panfrost_mmu_irq_handler(int irq, void *data)
status &= ~mask;
}
+ mmu_write(pfdev, MMU_INT_MASK, ~0);
return IRQ_HANDLED;
};
int panfrost_mmu_init(struct panfrost_device *pfdev)
{
- struct io_pgtable_ops *pgtbl_ops;
int err, irq;
- pfdev->mmu = devm_kzalloc(pfdev->dev, sizeof(*pfdev->mmu), GFP_KERNEL);
- if (!pfdev->mmu)
- return -ENOMEM;
-
- mutex_init(&pfdev->mmu->lock);
-
irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "mmu");
if (irq <= 0)
return -ENODEV;
- err = devm_request_irq(pfdev->dev, irq, panfrost_mmu_irq_handler,
- IRQF_SHARED, "mmu", pfdev);
+ err = devm_request_threaded_irq(pfdev->dev, irq, panfrost_mmu_irq_handler,
+ panfrost_mmu_irq_handler_thread,
+ IRQF_SHARED, "mmu", pfdev);
if (err) {
dev_err(pfdev->dev, "failed to request mmu irq");
return err;
}
- mmu_write(pfdev, MMU_INT_CLEAR, ~0);
- mmu_write(pfdev, MMU_INT_MASK, ~0);
-
- pfdev->mmu->pgtbl_cfg = (struct io_pgtable_cfg) {
- .pgsize_bitmap = SZ_4K | SZ_2M,
- .ias = FIELD_GET(0xff, pfdev->features.mmu_features),
- .oas = FIELD_GET(0xff00, pfdev->features.mmu_features),
- .tlb = &mmu_tlb_ops,
- .iommu_dev = pfdev->dev,
- };
-
- pgtbl_ops = alloc_io_pgtable_ops(ARM_MALI_LPAE, &pfdev->mmu->pgtbl_cfg,
- pfdev);
- if (!pgtbl_ops)
- return -ENOMEM;
-
- pfdev->mmu->pgtbl_ops = pgtbl_ops;
-
- panfrost_mmu_enable(pfdev, 0);
return 0;
}
@@ -388,7 +656,4 @@ int panfrost_mmu_init(struct panfrost_device *pfdev)
void panfrost_mmu_fini(struct panfrost_device *pfdev)
{
mmu_write(pfdev, MMU_INT_MASK, 0);
- mmu_disable(pfdev, 0);
-
- free_io_pgtable_ops(pfdev->mmu->pgtbl_ops);
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.h b/drivers/gpu/drm/panfrost/panfrost_mmu.h
index f5878d86a5ce..44fc2edf63ce 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.h
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.h
@@ -4,14 +4,21 @@
#ifndef __PANFROST_MMU_H__
#define __PANFROST_MMU_H__
-struct panfrost_gem_object;
+struct panfrost_gem_mapping;
+struct panfrost_file_priv;
+struct panfrost_mmu;
-int panfrost_mmu_map(struct panfrost_gem_object *bo);
-void panfrost_mmu_unmap(struct panfrost_gem_object *bo);
+int panfrost_mmu_map(struct panfrost_gem_mapping *mapping);
+void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping);
int panfrost_mmu_init(struct panfrost_device *pfdev);
void panfrost_mmu_fini(struct panfrost_device *pfdev);
+void panfrost_mmu_reset(struct panfrost_device *pfdev);
-void panfrost_mmu_enable(struct panfrost_device *pfdev, u32 as_nr);
+u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu);
+void panfrost_mmu_as_put(struct panfrost_device *pfdev, struct panfrost_mmu *mmu);
+
+int panfrost_mmu_pgtable_alloc(struct panfrost_file_priv *priv);
+void panfrost_mmu_pgtable_free(struct panfrost_file_priv *priv);
#endif
diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
index 83c57d325ca8..684820448be3 100644
--- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
+++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
@@ -16,6 +16,7 @@
#include "panfrost_issues.h"
#include "panfrost_job.h"
#include "panfrost_mmu.h"
+#include "panfrost_perfcnt.h"
#include "panfrost_regs.h"
#define COUNTERS_PER_BLOCK 64
@@ -24,7 +25,7 @@
#define V4_SHADERS_PER_COREGROUP 4
struct panfrost_perfcnt {
- struct panfrost_gem_object *bo;
+ struct panfrost_gem_mapping *mapping;
size_t bosize;
void *buf;
struct panfrost_file_priv *user;
@@ -48,7 +49,7 @@ static int panfrost_perfcnt_dump_locked(struct panfrost_device *pfdev)
int ret;
reinit_completion(&pfdev->perfcnt->dump_comp);
- gpuva = pfdev->perfcnt->bo->node.start << PAGE_SHIFT;
+ gpuva = pfdev->perfcnt->mapping->mmnode.start << PAGE_SHIFT;
gpu_write(pfdev, GPU_PERFCNT_BASE_LO, gpuva);
gpu_write(pfdev, GPU_PERFCNT_BASE_HI, gpuva >> 32);
gpu_write(pfdev, GPU_INT_CLEAR,
@@ -66,9 +67,10 @@ static int panfrost_perfcnt_dump_locked(struct panfrost_device *pfdev)
}
static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
- struct panfrost_file_priv *user,
+ struct drm_file *file_priv,
unsigned int counterset)
{
+ struct panfrost_file_priv *user = file_priv->driver_priv;
struct panfrost_perfcnt *perfcnt = pfdev->perfcnt;
struct drm_gem_shmem_object *bo;
u32 cfg;
@@ -87,17 +89,22 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
if (IS_ERR(bo))
return PTR_ERR(bo);
- perfcnt->bo = to_panfrost_bo(&bo->base);
-
/* Map the perfcnt buf in the address space attached to file_priv. */
- ret = panfrost_mmu_map(perfcnt->bo);
+ ret = panfrost_gem_open(&bo->base, file_priv);
if (ret)
goto err_put_bo;
+ perfcnt->mapping = panfrost_gem_mapping_get(to_panfrost_bo(&bo->base),
+ user);
+ if (!perfcnt->mapping) {
+ ret = -EINVAL;
+ goto err_close_bo;
+ }
+
perfcnt->buf = drm_gem_shmem_vmap(&bo->base);
if (IS_ERR(perfcnt->buf)) {
ret = PTR_ERR(perfcnt->buf);
- goto err_put_bo;
+ goto err_put_mapping;
}
/*
@@ -152,18 +159,26 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
if (panfrost_has_hw_issue(pfdev, HW_ISSUE_8186))
gpu_write(pfdev, GPU_PRFCNT_TILER_EN, 0xffffffff);
+ /* The BO ref is retained by the mapping. */
+ drm_gem_object_put_unlocked(&bo->base);
+
return 0;
err_vunmap:
- drm_gem_shmem_vunmap(&perfcnt->bo->base.base, perfcnt->buf);
+ drm_gem_shmem_vunmap(&bo->base, perfcnt->buf);
+err_put_mapping:
+ panfrost_gem_mapping_put(perfcnt->mapping);
+err_close_bo:
+ panfrost_gem_close(&bo->base, file_priv);
err_put_bo:
drm_gem_object_put_unlocked(&bo->base);
return ret;
}
static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev,
- struct panfrost_file_priv *user)
+ struct drm_file *file_priv)
{
+ struct panfrost_file_priv *user = file_priv->driver_priv;
struct panfrost_perfcnt *perfcnt = pfdev->perfcnt;
if (user != perfcnt->user)
@@ -177,10 +192,11 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev,
GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_OFF));
perfcnt->user = NULL;
- drm_gem_shmem_vunmap(&perfcnt->bo->base.base, perfcnt->buf);
+ drm_gem_shmem_vunmap(&perfcnt->mapping->obj->base.base, perfcnt->buf);
perfcnt->buf = NULL;
- drm_gem_object_put_unlocked(&perfcnt->bo->base.base);
- perfcnt->bo = NULL;
+ panfrost_gem_close(&perfcnt->mapping->obj->base.base, file_priv);
+ panfrost_gem_mapping_put(perfcnt->mapping);
+ perfcnt->mapping = NULL;
pm_runtime_mark_last_busy(pfdev->dev);
pm_runtime_put_autosuspend(pfdev->dev);
@@ -190,7 +206,6 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev,
int panfrost_ioctl_perfcnt_enable(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- struct panfrost_file_priv *pfile = file_priv->driver_priv;
struct panfrost_device *pfdev = dev->dev_private;
struct panfrost_perfcnt *perfcnt = pfdev->perfcnt;
struct drm_panfrost_perfcnt_enable *req = data;
@@ -206,10 +221,10 @@ int panfrost_ioctl_perfcnt_enable(struct drm_device *dev, void *data,
mutex_lock(&perfcnt->lock);
if (req->enable)
- ret = panfrost_perfcnt_enable_locked(pfdev, pfile,
+ ret = panfrost_perfcnt_enable_locked(pfdev, file_priv,
req->counterset);
else
- ret = panfrost_perfcnt_disable_locked(pfdev, pfile);
+ ret = panfrost_perfcnt_disable_locked(pfdev, file_priv);
mutex_unlock(&perfcnt->lock);
return ret;
@@ -247,15 +262,16 @@ out:
return ret;
}
-void panfrost_perfcnt_close(struct panfrost_file_priv *pfile)
+void panfrost_perfcnt_close(struct drm_file *file_priv)
{
+ struct panfrost_file_priv *pfile = file_priv->driver_priv;
struct panfrost_device *pfdev = pfile->pfdev;
struct panfrost_perfcnt *perfcnt = pfdev->perfcnt;
pm_runtime_get_sync(pfdev->dev);
mutex_lock(&perfcnt->lock);
if (perfcnt->user == pfile)
- panfrost_perfcnt_disable_locked(pfdev, pfile);
+ panfrost_perfcnt_disable_locked(pfdev, file_priv);
mutex_unlock(&perfcnt->lock);
pm_runtime_mark_last_busy(pfdev->dev);
pm_runtime_put_autosuspend(pfdev->dev);
diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.h b/drivers/gpu/drm/panfrost/panfrost_perfcnt.h
index 13b8fdaa1b43..8bbcf5f5fb33 100644
--- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.h
+++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.h
@@ -9,7 +9,7 @@ void panfrost_perfcnt_sample_done(struct panfrost_device *pfdev);
void panfrost_perfcnt_clean_cache_done(struct panfrost_device *pfdev);
int panfrost_perfcnt_init(struct panfrost_device *pfdev);
void panfrost_perfcnt_fini(struct panfrost_device *pfdev);
-void panfrost_perfcnt_close(struct panfrost_file_priv *pfile);
+void panfrost_perfcnt_close(struct drm_file *file_priv);
int panfrost_ioctl_perfcnt_enable(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int panfrost_ioctl_perfcnt_dump(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/pl111/pl111_debugfs.c b/drivers/gpu/drm/pl111/pl111_debugfs.c
index 8d6a40469f0b..3c8e82016854 100644
--- a/drivers/gpu/drm/pl111/pl111_debugfs.c
+++ b/drivers/gpu/drm/pl111/pl111_debugfs.c
@@ -5,8 +5,10 @@
#include <linux/amba/clcd-regs.h>
#include <linux/seq_file.h>
+
#include <drm/drm_debugfs.h>
-#include <drm/drmP.h>
+#include <drm/drm_file.h>
+
#include "pl111_drm.h"
#define REGDEF(reg) { reg, #reg }
diff --git a/drivers/gpu/drm/pl111/pl111_display.c b/drivers/gpu/drm/pl111/pl111_display.c
index 15d2755fdba4..703ddc803c55 100644
--- a/drivers/gpu/drm/pl111/pl111_display.c
+++ b/drivers/gpu/drm/pl111/pl111_display.c
@@ -11,14 +11,16 @@
#include <linux/amba/clcd-regs.h>
#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/version.h>
#include <linux/dma-buf.h>
#include <linux/of_graph.h>
-#include <drm/drmP.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_vblank.h>
#include "pl111_drm.h"
@@ -46,10 +48,10 @@ irqreturn_t pl111_irq(int irq, void *data)
}
static enum drm_mode_status
-pl111_mode_valid(struct drm_crtc *crtc,
+pl111_mode_valid(struct drm_simple_display_pipe *pipe,
const struct drm_display_mode *mode)
{
- struct drm_device *drm = crtc->dev;
+ struct drm_device *drm = pipe->crtc.dev;
struct pl111_drm_dev_private *priv = drm->dev_private;
u32 cpp = priv->variant->fb_bpp / 8;
u64 bw;
@@ -126,6 +128,7 @@ static void pl111_display_enable(struct drm_simple_display_pipe *pipe,
struct drm_framebuffer *fb = plane->state->fb;
struct drm_connector *connector = priv->connector;
struct drm_bridge *bridge = priv->bridge;
+ bool grayscale = false;
u32 cntl;
u32 ppl, hsw, hfp, hbp;
u32 lpp, vsw, vfp, vbp;
@@ -185,6 +188,20 @@ static void pl111_display_enable(struct drm_simple_display_pipe *pipe,
if (connector->display_info.bus_flags &
DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
tim2 |= TIM2_IPC;
+
+ if (connector->display_info.num_bus_formats == 1 &&
+ connector->display_info.bus_formats[0] ==
+ MEDIA_BUS_FMT_Y8_1X8)
+ grayscale = true;
+
+ /*
+ * The AC pin bias frequency is set to max count when using
+ * grayscale so at least once in a while we will reverse
+ * polarity and get rid of any DC built up that could
+ * damage the display.
+ */
+ if (grayscale)
+ tim2 |= TIM2_ACB_MASK;
}
if (bridge) {
@@ -216,8 +233,18 @@ static void pl111_display_enable(struct drm_simple_display_pipe *pipe,
writel(0, priv->regs + CLCD_TIM3);
- /* Hard-code TFT panel */
- cntl = CNTL_LCDEN | CNTL_LCDTFT | CNTL_LCDVCOMP(1);
+ /*
+ * Detect grayscale bus format. We do not support a grayscale mode
+ * toward userspace, instead we expose an RGB24 buffer and then the
+ * hardware will activate its grayscaler to convert to the grayscale
+ * format.
+ */
+ if (grayscale)
+ cntl = CNTL_LCDEN | CNTL_LCDMONO8;
+ else
+ /* Else we assume TFT display */
+ cntl = CNTL_LCDEN | CNTL_LCDTFT | CNTL_LCDVCOMP(1);
+
/* On the ST Micro variant, assume all 24 bits are connected */
if (priv->variant->st_bitmux_control)
cntl |= CNTL_ST_CDWID_24;
@@ -546,25 +573,8 @@ pl111_init_clock_divider(struct drm_device *drm)
int pl111_display_init(struct drm_device *drm)
{
struct pl111_drm_dev_private *priv = drm->dev_private;
- struct device *dev = drm->dev;
- struct device_node *endpoint;
- u32 tft_r0b0g0[3];
int ret;
- endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
- if (!endpoint)
- return -ENODEV;
-
- if (of_property_read_u32_array(endpoint,
- "arm,pl11x,tft-r0g0b0-pads",
- tft_r0b0g0,
- ARRAY_SIZE(tft_r0b0g0)) != 0) {
- dev_err(dev, "arm,pl11x,tft-r0g0b0-pads should be 3 ints\n");
- of_node_put(endpoint);
- return -ENOENT;
- }
- of_node_put(endpoint);
-
ret = pl111_init_clock_divider(drm);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/pl111/pl111_drm.h b/drivers/gpu/drm/pl111/pl111_drm.h
index b2c5e9f34051..77d2da9a8a7c 100644
--- a/drivers/gpu/drm/pl111/pl111_drm.h
+++ b/drivers/gpu/drm/pl111/pl111_drm.h
@@ -13,14 +13,15 @@
#ifndef _PL111_DRM_H_
#define _PL111_DRM_H_
-#include <drm/drm_gem.h>
-#include <drm/drm_simple_kms_helper.h>
+#include <linux/clk-provider.h>
+#include <linux/interrupt.h>
+
+#include <drm/drm_bridge.h>
#include <drm/drm_connector.h>
#include <drm/drm_encoder.h>
+#include <drm/drm_gem.h>
#include <drm/drm_panel.h>
-#include <drm/drm_bridge.h>
-#include <linux/clk-provider.h>
-#include <linux/interrupt.h>
+#include <drm/drm_simple_kms_helper.h>
#define CLCD_IRQ_NEXTBASE_UPDATE BIT(2)
diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c
index 01f8462aa2db..aa8aa8d9e405 100644
--- a/drivers/gpu/drm/pl111/pl111_drv.c
+++ b/drivers/gpu/drm/pl111/pl111_drv.c
@@ -48,18 +48,18 @@
#include <linux/amba/bus.h>
#include <linux/amba/clcd-regs.h>
-#include <linux/version.h>
-#include <linux/shmem_fs.h>
#include <linux/dma-buf.h>
#include <linux/module.h>
-#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/of_reserved_mem.h>
+#include <linux/shmem_fs.h>
+#include <linux/slab.h>
+#include <linux/version.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
+#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
@@ -67,6 +67,7 @@
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "pl111_drm.h"
#include "pl111_versatile.h"
@@ -149,8 +150,8 @@ static int pl111_modeset_init(struct drm_device *dev)
return -EPROBE_DEFER;
if (panel) {
- bridge = drm_panel_bridge_add(panel,
- DRM_MODE_CONNECTOR_Unknown);
+ bridge = drm_panel_bridge_add_typed(panel,
+ DRM_MODE_CONNECTOR_Unknown);
if (IS_ERR(bridge)) {
ret = PTR_ERR(bridge);
goto out_config;
@@ -165,7 +166,7 @@ static int pl111_modeset_init(struct drm_device *dev)
priv->bridge = bridge;
if (panel) {
priv->panel = panel;
- priv->connector = panel->connector;
+ priv->connector = drm_panel_bridge_connector(bridge);
}
ret = pl111_display_init(dev);
@@ -224,7 +225,7 @@ DEFINE_DRM_GEM_CMA_FOPS(drm_fops);
static struct drm_driver pl111_drm_driver = {
.driver_features =
- DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_ATOMIC,
+ DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.ioctls = NULL,
.fops = &drm_fops,
.name = "pl111",
@@ -238,9 +239,7 @@ static struct drm_driver pl111_drm_driver = {
.gem_vm_ops = &drm_gem_cma_vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_import = drm_gem_prime_import,
.gem_prime_import_sg_table = pl111_gem_import_sg_table,
- .gem_prime_export = drm_gem_prime_export,
.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
.gem_prime_mmap = drm_gem_cma_prime_mmap,
.gem_prime_vmap = drm_gem_cma_prime_vmap,
diff --git a/drivers/gpu/drm/pl111/pl111_nomadik.h b/drivers/gpu/drm/pl111/pl111_nomadik.h
index 19d663d46353..47ccf5c839fc 100644
--- a/drivers/gpu/drm/pl111/pl111_nomadik.h
+++ b/drivers/gpu/drm/pl111/pl111_nomadik.h
@@ -1,10 +1,11 @@
// SPDX-License-Identifier: GPL-2.0+
-#include <linux/device.h>
#ifndef PL111_NOMADIK_H
#define PL111_NOMADIK_H
#endif
+struct device;
+
#ifdef CONFIG_ARCH_NOMADIK
void pl111_nomadik_init(struct device *dev);
diff --git a/drivers/gpu/drm/pl111/pl111_versatile.c b/drivers/gpu/drm/pl111/pl111_versatile.c
index 38f4ee05285e..09aeaffb7660 100644
--- a/drivers/gpu/drm/pl111/pl111_versatile.c
+++ b/drivers/gpu/drm/pl111/pl111_versatile.c
@@ -1,13 +1,14 @@
// SPDX-License-Identifier: GPL-2.0-only
+
#include <linux/amba/clcd-regs.h>
+#include <linux/bitops.h>
#include <linux/device.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/regmap.h>
-#include <linux/mfd/syscon.h>
-#include <linux/bitops.h>
-#include <linux/module.h>
-#include <drm/drmP.h>
+
#include "pl111_versatile.h"
#include "pl111_vexpress.h"
#include "pl111_drm.h"
diff --git a/drivers/gpu/drm/pl111/pl111_versatile.h b/drivers/gpu/drm/pl111/pl111_versatile.h
index 41aa6d969dc6..143877010042 100644
--- a/drivers/gpu/drm/pl111/pl111_versatile.h
+++ b/drivers/gpu/drm/pl111/pl111_versatile.h
@@ -4,6 +4,9 @@
#ifndef PL111_VERSATILE_H
#define PL111_VERSATILE_H
+struct device;
+struct pl111_drm_dev_private;
+
int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv);
#endif
diff --git a/drivers/gpu/drm/pl111/pl111_vexpress.c b/drivers/gpu/drm/pl111/pl111_vexpress.c
index 38c938c9adda..350570fe06b5 100644
--- a/drivers/gpu/drm/pl111/pl111_vexpress.c
+++ b/drivers/gpu/drm/pl111/pl111_vexpress.c
@@ -51,6 +51,7 @@ int pl111_vexpress_clcd_init(struct device *dev,
}
if (of_device_is_compatible(child, "arm,hdlcd")) {
has_coretile_hdlcd = true;
+ of_node_put(child);
break;
}
}
diff --git a/drivers/gpu/drm/qxl/Kconfig b/drivers/gpu/drm/qxl/Kconfig
index d0d691b31f4a..ca3f51c2a8fe 100644
--- a/drivers/gpu/drm/qxl/Kconfig
+++ b/drivers/gpu/drm/qxl/Kconfig
@@ -4,6 +4,7 @@ config DRM_QXL
depends on DRM && PCI && MMU
select DRM_KMS_HELPER
select DRM_TTM
+ select DRM_TTM_HELPER
select CRC32
help
QXL virtual GPU for Spice virtualization desktop integration.
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index 0a2e51af1230..ef09dc6bc635 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -25,6 +25,8 @@
/* QXL cmd/ring handling */
+#include <linux/delay.h>
+
#include <drm/drm_util.h>
#include "qxl_drv.h"
@@ -375,7 +377,7 @@ void qxl_io_destroy_primary(struct qxl_device *qdev)
{
wait_for_io_cmd(qdev, 0, QXL_IO_DESTROY_PRIMARY_ASYNC);
qdev->primary_bo->is_primary = false;
- drm_gem_object_put_unlocked(&qdev->primary_bo->gem_base);
+ drm_gem_object_put_unlocked(&qdev->primary_bo->tbo.base);
qdev->primary_bo = NULL;
}
@@ -402,7 +404,7 @@ void qxl_io_create_primary(struct qxl_device *qdev, struct qxl_bo *bo)
wait_for_io_cmd(qdev, 0, QXL_IO_CREATE_PRIMARY_ASYNC);
qdev->primary_bo = bo;
qdev->primary_bo->is_primary = true;
- drm_gem_object_get(&qdev->primary_bo->gem_base);
+ drm_gem_object_get(&qdev->primary_bo->tbo.base);
}
void qxl_io_memslot_add(struct qxl_device *qdev, uint8_t id)
diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
index 118422549828..a4f4175bbdbe 100644
--- a/drivers/gpu/drm/qxl/qxl_debugfs.c
+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
@@ -28,9 +28,9 @@
* Alon Levy <alevy@redhat.com>
*/
-#include <linux/debugfs.h>
+#include <drm/drm_debugfs.h>
+#include <drm/drm_file.h>
-#include <drm/drmP.h>
#include "qxl_drv.h"
#include "qxl_object.h"
@@ -57,16 +57,16 @@ qxl_debugfs_buffers_info(struct seq_file *m, void *data)
struct qxl_bo *bo;
list_for_each_entry(bo, &qdev->gem.objects, list) {
- struct reservation_object_list *fobj;
+ struct dma_resv_list *fobj;
int rel;
rcu_read_lock();
- fobj = rcu_dereference(bo->tbo.resv->fence);
+ fobj = rcu_dereference(bo->tbo.base.resv->fence);
rel = fobj ? fobj->shared_count : 0;
rcu_read_unlock();
seq_printf(m, "size %ld, pc %d, num releases %d\n",
- (unsigned long)bo->gem_base.size,
+ (unsigned long)bo->tbo.base.size,
bo->pin_count, rel);
}
return 0;
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 8b319ebbb0fb..16d73b22f3f5 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -24,11 +24,14 @@
*/
#include <linux/crc32.h>
+#include <linux/delay.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "qxl_drv.h"
#include "qxl_object.h"
@@ -794,7 +797,7 @@ static int qxl_plane_prepare_fb(struct drm_plane *plane,
qdev->dumb_shadow_bo->surf.height != surf.height) {
if (qdev->dumb_shadow_bo) {
drm_gem_object_put_unlocked
- (&qdev->dumb_shadow_bo->gem_base);
+ (&qdev->dumb_shadow_bo->tbo.base);
qdev->dumb_shadow_bo = NULL;
}
qxl_bo_create(qdev, surf.height * surf.stride,
@@ -804,10 +807,10 @@ static int qxl_plane_prepare_fb(struct drm_plane *plane,
if (user_bo->shadow != qdev->dumb_shadow_bo) {
if (user_bo->shadow) {
drm_gem_object_put_unlocked
- (&user_bo->shadow->gem_base);
+ (&user_bo->shadow->tbo.base);
user_bo->shadow = NULL;
}
- drm_gem_object_get(&qdev->dumb_shadow_bo->gem_base);
+ drm_gem_object_get(&qdev->dumb_shadow_bo->tbo.base);
user_bo->shadow = qdev->dumb_shadow_bo;
}
}
@@ -838,7 +841,7 @@ static void qxl_plane_cleanup_fb(struct drm_plane *plane,
qxl_bo_unpin(user_bo);
if (old_state->fb != plane->state->fb && user_bo->shadow) {
- drm_gem_object_put_unlocked(&user_bo->shadow->gem_base);
+ drm_gem_object_put_unlocked(&user_bo->shadow->tbo.base);
user_bo->shadow = NULL;
}
}
diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c
index 97c3f1a95a32..5bebf1ea1c5d 100644
--- a/drivers/gpu/drm/qxl/qxl_draw.c
+++ b/drivers/gpu/drm/qxl/qxl_draw.c
@@ -20,6 +20,8 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <drm/drm_fourcc.h>
+
#include "qxl_drv.h"
#include "qxl_object.h"
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index f33e349c4ec5..1d601f57a6ba 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -28,14 +28,18 @@
* Alon Levy <alevy@redhat.com>
*/
-#include <linux/module.h>
+#include "qxl_drv.h"
#include <linux/console.h>
+#include <linux/module.h>
+#include <linux/pci.h>
-#include <drm/drmP.h>
#include <drm/drm.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
#include <drm/drm_modeset_helper.h>
+#include <drm/drm_prime.h>
#include <drm/drm_probe_helper.h>
-#include "qxl_drv.h"
+
#include "qxl_object.h"
static const struct pci_device_id pciidlist[] = {
@@ -59,6 +63,11 @@ module_param_named(num_heads, qxl_num_crtc, int, 0400);
static struct drm_driver qxl_driver;
static struct pci_driver qxl_pci_driver;
+static bool is_vga(struct pci_dev *pdev)
+{
+ return pdev->class == PCI_CLASS_DISPLAY_VGA << 8;
+}
+
static int
qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
@@ -79,13 +88,21 @@ qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto free_dev;
- ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "qxl");
+ ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "qxl");
if (ret)
goto disable_pci;
+ if (is_vga(pdev)) {
+ ret = vga_get_interruptible(pdev, VGA_RSRC_LEGACY_IO);
+ if (ret) {
+ DRM_ERROR("can't get legacy vga ioports\n");
+ goto disable_pci;
+ }
+ }
+
ret = qxl_device_init(qdev, &qxl_driver, pdev);
if (ret)
- goto disable_pci;
+ goto put_vga;
ret = qxl_modeset_init(qdev);
if (ret)
@@ -105,6 +122,9 @@ modeset_cleanup:
qxl_modeset_fini(qdev);
unload:
qxl_device_fini(qdev);
+put_vga:
+ if (is_vga(pdev))
+ vga_put(pdev, VGA_RSRC_LEGACY_IO);
disable_pci:
pci_disable_device(pdev);
free_dev:
@@ -122,21 +142,15 @@ qxl_pci_remove(struct pci_dev *pdev)
qxl_modeset_fini(qdev);
qxl_device_fini(qdev);
+ if (is_vga(pdev))
+ vga_put(pdev, VGA_RSRC_LEGACY_IO);
dev->dev_private = NULL;
kfree(qdev);
drm_dev_put(dev);
}
-static const struct file_operations qxl_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .poll = drm_poll,
- .read = drm_read,
- .mmap = qxl_mmap,
-};
+DEFINE_DRM_GEM_FOPS(qxl_fops);
static int qxl_drm_freeze(struct drm_device *dev)
{
@@ -206,16 +220,14 @@ static int qxl_pm_resume(struct device *dev)
static int qxl_pm_thaw(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
return qxl_drm_resume(drm_dev, true);
}
static int qxl_pm_freeze(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
return qxl_drm_freeze(drm_dev);
}
@@ -247,8 +259,7 @@ static struct pci_driver qxl_pci_driver = {
};
static struct drm_driver qxl_driver = {
- .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
- DRIVER_ATOMIC,
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.dumb_create = qxl_mode_dumb_create,
.dumb_map_offset = qxl_mode_dumb_mmap,
@@ -257,18 +268,8 @@ static struct drm_driver qxl_driver = {
#endif
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = drm_gem_prime_export,
- .gem_prime_import = drm_gem_prime_import,
- .gem_prime_pin = qxl_gem_prime_pin,
- .gem_prime_unpin = qxl_gem_prime_unpin,
- .gem_prime_get_sg_table = qxl_gem_prime_get_sg_table,
.gem_prime_import_sg_table = qxl_gem_prime_import_sg_table,
- .gem_prime_vmap = qxl_gem_prime_vmap,
- .gem_prime_vunmap = qxl_gem_prime_vunmap,
.gem_prime_mmap = qxl_gem_prime_mmap,
- .gem_free_object_unlocked = qxl_gem_object_free,
- .gem_open_object = qxl_gem_object_open,
- .gem_close_object = qxl_gem_object_close,
.fops = &qxl_fops,
.ioctls = qxl_ioctls,
.irq_handler = qxl_irq_handler,
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 2896bb6fdbf4..27e45a2d6b52 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -31,22 +31,22 @@
*/
#include <linux/dma-fence.h>
-#include <linux/workqueue.h>
#include <linux/firmware.h>
#include <linux/platform_device.h>
+#include <linux/workqueue.h>
#include <drm/drm_crtc.h>
#include <drm/drm_encoder.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_ttm_helper.h>
+#include <drm/drm_ioctl.h>
#include <drm/drm_gem.h>
-#include <drm/drmP.h>
+#include <drm/qxl_drm.h>
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
-/* just for ttm_validate_buffer */
#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_placement.h>
-#include <drm/qxl_drm.h>
#include "qxl_dev.h"
@@ -72,12 +72,13 @@ extern int qxl_max_ioctls;
QXL_INTERRUPT_CLIENT_MONITORS_CONFIG)
struct qxl_bo {
+ struct ttm_buffer_object tbo;
+
/* Protected by gem.mutex */
struct list_head list;
/* Protected by tbo.reserved */
struct ttm_place placements[3];
struct ttm_placement placement;
- struct ttm_buffer_object tbo;
struct ttm_bo_kmap_obj kmap;
unsigned int pin_count;
void *kptr;
@@ -85,7 +86,6 @@ struct qxl_bo {
int type;
/* Constant after initialization */
- struct drm_gem_object gem_base;
unsigned int is_primary:1; /* is this now a primary surface */
unsigned int is_dumb:1;
struct qxl_bo *shadow;
@@ -94,7 +94,7 @@ struct qxl_bo {
uint32_t surface_id;
struct qxl_release *surf_create;
};
-#define gem_to_qxl_bo(gobj) container_of((gobj), struct qxl_bo, gem_base)
+#define gem_to_qxl_bo(gobj) container_of((gobj), struct qxl_bo, tbo.base)
#define to_qxl_bo(tobj) container_of((tobj), struct qxl_bo, tbo)
struct qxl_gem {
@@ -355,7 +355,8 @@ int qxl_mode_dumb_mmap(struct drm_file *filp,
/* qxl ttm */
int qxl_ttm_init(struct qxl_device *qdev);
void qxl_ttm_fini(struct qxl_device *qdev);
-int qxl_mmap(struct file *filp, struct vm_area_struct *vma);
+int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem);
/* qxl image */
diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c
index 89606c819d82..69f37db1027a 100644
--- a/drivers/gpu/drm/qxl/qxl_gem.c
+++ b/drivers/gpu/drm/qxl/qxl_gem.c
@@ -23,7 +23,6 @@
* Alon Levy
*/
-#include <drm/drmP.h>
#include <drm/drm.h>
#include "qxl_drv.h"
@@ -64,7 +63,7 @@ int qxl_gem_object_create(struct qxl_device *qdev, int size,
size, initial_domain, alignment, r);
return r;
}
- *obj = &qbo->gem_base;
+ *obj = &qbo->tbo.base;
mutex_lock(&qdev->gem.mutex);
list_add_tail(&qbo->list, &qdev->gem.objects);
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index d410e2925162..8117a45b3610 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -23,6 +23,9 @@
* Alon Levy
*/
+#include <linux/pci.h>
+#include <linux/uaccess.h>
+
#include "qxl_drv.h"
#include "qxl_object.h"
diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
index 3bb31add6350..8435af108632 100644
--- a/drivers/gpu/drm/qxl/qxl_irq.c
+++ b/drivers/gpu/drm/qxl/qxl_irq.c
@@ -23,6 +23,10 @@
* Alon Levy
*/
+#include <linux/pci.h>
+
+#include <drm/drm_irq.h>
+
#include "qxl_drv.h"
irqreturn_t qxl_irq_handler(int irq, void *arg)
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
index bee61fa2c9bc..bfc1631093e9 100644
--- a/drivers/gpu/drm/qxl/qxl_kms.c
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -23,11 +23,14 @@
* Alon Levy
*/
-#include "qxl_drv.h"
-#include "qxl_object.h"
+#include <linux/io-mapping.h>
+#include <linux/pci.h>
+#include <drm/drm_drv.h>
#include <drm/drm_probe_helper.h>
-#include <linux/io-mapping.h>
+
+#include "qxl_drv.h"
+#include "qxl_object.h"
int qxl_log_level;
@@ -181,7 +184,7 @@ int qxl_device_init(struct qxl_device *qdev,
if (!qxl_check_device(qdev)) {
r = -ENODEV;
- goto surface_mapping_free;
+ goto rom_unmap;
}
r = qxl_bo_init(qdev);
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index 4928fa602944..ab72dc3476e9 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -33,14 +33,14 @@ static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo)
struct qxl_device *qdev;
bo = to_qxl_bo(tbo);
- qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
+ qdev = (struct qxl_device *)bo->tbo.base.dev->dev_private;
qxl_surface_evict(qdev, bo, false);
WARN_ON_ONCE(bo->map_count > 0);
mutex_lock(&qdev->gem.mutex);
list_del_init(&bo->list);
mutex_unlock(&qdev->gem.mutex);
- drm_gem_object_release(&bo->gem_base);
+ drm_gem_object_release(&bo->tbo.base);
kfree(bo);
}
@@ -54,9 +54,14 @@ bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo)
void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned)
{
u32 c = 0;
- u32 pflag = pinned ? TTM_PL_FLAG_NO_EVICT : 0;
+ u32 pflag = 0;
unsigned int i;
+ if (pinned)
+ pflag |= TTM_PL_FLAG_NO_EVICT;
+ if (qbo->tbo.base.size <= PAGE_SIZE)
+ pflag |= TTM_PL_FLAG_TOPDOWN;
+
qbo->placement.placement = qbo->placements;
qbo->placement.busy_placement = qbo->placements;
if (domain == QXL_GEM_DOMAIN_VRAM)
@@ -77,6 +82,19 @@ void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned)
}
}
+static const struct drm_gem_object_funcs qxl_object_funcs = {
+ .free = qxl_gem_object_free,
+ .open = qxl_gem_object_open,
+ .close = qxl_gem_object_close,
+ .pin = qxl_gem_prime_pin,
+ .unpin = qxl_gem_prime_unpin,
+ .get_sg_table = qxl_gem_prime_get_sg_table,
+ .vmap = qxl_gem_prime_vmap,
+ .vunmap = qxl_gem_prime_vunmap,
+ .mmap = drm_gem_ttm_mmap,
+ .print_info = drm_gem_ttm_print_info,
+};
+
int qxl_bo_create(struct qxl_device *qdev,
unsigned long size, bool kernel, bool pinned, u32 domain,
struct qxl_surface *surf,
@@ -95,11 +113,12 @@ int qxl_bo_create(struct qxl_device *qdev,
if (bo == NULL)
return -ENOMEM;
size = roundup(size, PAGE_SIZE);
- r = drm_gem_object_init(&qdev->ddev, &bo->gem_base, size);
+ r = drm_gem_object_init(&qdev->ddev, &bo->tbo.base, size);
if (unlikely(r)) {
kfree(bo);
return r;
}
+ bo->tbo.base.funcs = &qxl_object_funcs;
bo->type = domain;
bo->pin_count = pinned ? 1 : 0;
bo->surface_id = 0;
@@ -148,7 +167,6 @@ int qxl_bo_kmap(struct qxl_bo *bo, void **ptr)
void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
struct qxl_bo *bo, int page_offset)
{
- struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
void *rptr;
int ret;
struct io_mapping *map;
@@ -160,9 +178,7 @@ void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
else
goto fallback;
- (void) ttm_mem_io_lock(man, false);
- ret = ttm_mem_io_reserve(bo->tbo.bdev, &bo->tbo.mem);
- ttm_mem_io_unlock(man);
+ ret = qxl_ttm_io_mem_reserve(bo->tbo.bdev, &bo->tbo.mem);
return io_mapping_map_atomic_wc(map, bo->tbo.mem.bus.offset + page_offset);
fallback:
@@ -193,17 +209,11 @@ void qxl_bo_kunmap(struct qxl_bo *bo)
void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
struct qxl_bo *bo, void *pmap)
{
- struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
-
if ((bo->tbo.mem.mem_type != TTM_PL_VRAM) &&
(bo->tbo.mem.mem_type != TTM_PL_PRIV))
goto fallback;
io_mapping_unmap_atomic(pmap);
-
- (void) ttm_mem_io_lock(man, false);
- ttm_mem_io_free(bo->tbo.bdev, &bo->tbo.mem);
- ttm_mem_io_unlock(man);
return;
fallback:
qxl_bo_kunmap(bo);
@@ -214,20 +224,20 @@ void qxl_bo_unref(struct qxl_bo **bo)
if ((*bo) == NULL)
return;
- drm_gem_object_put_unlocked(&(*bo)->gem_base);
+ drm_gem_object_put_unlocked(&(*bo)->tbo.base);
*bo = NULL;
}
struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
{
- drm_gem_object_get(&bo->gem_base);
+ drm_gem_object_get(&bo->tbo.base);
return bo;
}
static int __qxl_bo_pin(struct qxl_bo *bo)
{
struct ttm_operation_ctx ctx = { false, false };
- struct drm_device *ddev = bo->gem_base.dev;
+ struct drm_device *ddev = bo->tbo.base.dev;
int r;
if (bo->pin_count) {
@@ -247,7 +257,7 @@ static int __qxl_bo_pin(struct qxl_bo *bo)
static int __qxl_bo_unpin(struct qxl_bo *bo)
{
struct ttm_operation_ctx ctx = { false, false };
- struct drm_device *ddev = bo->gem_base.dev;
+ struct drm_device *ddev = bo->tbo.base.dev;
int r, i;
if (!bo->pin_count) {
@@ -310,13 +320,13 @@ void qxl_bo_force_delete(struct qxl_device *qdev)
dev_err(qdev->ddev.dev, "Userspace still has active objects !\n");
list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) {
dev_err(qdev->ddev.dev, "%p %p %lu %lu force free\n",
- &bo->gem_base, bo, (unsigned long)bo->gem_base.size,
- *((unsigned long *)&bo->gem_base.refcount));
+ &bo->tbo.base, bo, (unsigned long)bo->tbo.base.size,
+ *((unsigned long *)&bo->tbo.base.refcount));
mutex_lock(&qdev->gem.mutex);
list_del_init(&bo->list);
mutex_unlock(&qdev->gem.mutex);
/* this should unref the ttm bo */
- drm_gem_object_put_unlocked(&bo->gem_base);
+ drm_gem_object_put_unlocked(&bo->tbo.base);
}
}
diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
index 255b914e2a7b..8ae54ba7857c 100644
--- a/drivers/gpu/drm/qxl/qxl_object.h
+++ b/drivers/gpu/drm/qxl/qxl_object.h
@@ -34,7 +34,7 @@ static inline int qxl_bo_reserve(struct qxl_bo *bo, bool no_wait)
r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS) {
- struct drm_device *ddev = bo->gem_base.dev;
+ struct drm_device *ddev = bo->tbo.base.dev;
dev_err(ddev->dev, "%p reserve failed\n", bo);
}
@@ -60,7 +60,7 @@ static inline unsigned long qxl_bo_size(struct qxl_bo *bo)
static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo)
{
- return drm_vma_node_offset_addr(&bo->tbo.vma_node);
+ return drm_vma_node_offset_addr(&bo->tbo.base.vma_node);
}
static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type,
@@ -71,7 +71,7 @@ static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type,
r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS) {
- struct drm_device *ddev = bo->gem_base.dev;
+ struct drm_device *ddev = bo->tbo.base.dev;
dev_err(ddev->dev, "%p reserve failed for wait\n",
bo);
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 49f9a9385393..2feca734c7b1 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -19,9 +19,13 @@
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
+
+#include <linux/delay.h>
+
+#include <trace/events/dma_fence.h>
+
#include "qxl_drv.h"
#include "qxl_object.h"
-#include <trace/events/dma_fence.h>
/*
* drawable cmd cache - allocate a bunch of VRAM pages, suballocate
@@ -234,12 +238,12 @@ static int qxl_release_validate_bo(struct qxl_bo *bo)
return ret;
}
- ret = reservation_object_reserve_shared(bo->tbo.resv, 1);
+ ret = dma_resv_reserve_shared(bo->tbo.base.resv, 1);
if (ret)
return ret;
/* allocate a surface for reserved + validated buffers */
- ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo);
+ ret = qxl_bo_check_id(bo->tbo.base.dev->dev_private, bo);
if (ret)
return ret;
return 0;
@@ -256,7 +260,7 @@ int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
return 0;
ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos,
- !no_intr, NULL, true);
+ !no_intr, NULL);
if (ret)
return ret;
@@ -425,7 +429,6 @@ void qxl_release_unmap(struct qxl_device *qdev,
void qxl_release_fence_buffer_objects(struct qxl_release *release)
{
struct ttm_buffer_object *bo;
- struct ttm_bo_global *glob;
struct ttm_bo_device *bdev;
struct ttm_validate_buffer *entry;
struct qxl_device *qdev;
@@ -447,18 +450,16 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
release->id | 0xf0000000, release->base.seqno);
trace_dma_fence_emit(&release->base);
- glob = bdev->glob;
-
- spin_lock(&glob->lru_lock);
+ spin_lock(&ttm_bo_glob.lru_lock);
list_for_each_entry(entry, &release->bos, head) {
bo = entry->bo;
- reservation_object_add_shared_fence(bo->resv, &release->base);
- ttm_bo_add_to_lru(bo);
- reservation_object_unlock(bo->resv);
+ dma_resv_add_shared_fence(bo->base.resv, &release->base);
+ ttm_bo_move_to_lru_tail(bo, NULL);
+ dma_resv_unlock(bo->base.resv);
}
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
ww_acquire_fini(&release->ticket);
}
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 0234f8556ada..16a5e903533d 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -23,19 +23,21 @@
* Alon Levy
*/
+#include <linux/delay.h>
+
+#include <drm/drm.h>
+#include <drm/drm_file.h>
+#include <drm/drm_debugfs.h>
+#include <drm/qxl_drm.h>
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_placement.h>
-#include <drm/ttm/ttm_page_alloc.h>
#include <drm/ttm/ttm_module.h>
-#include <drm/drmP.h>
-#include <drm/drm.h>
-#include <drm/qxl_drm.h>
+#include <drm/ttm/ttm_page_alloc.h>
+#include <drm/ttm/ttm_placement.h>
+
#include "qxl_drv.h"
#include "qxl_object.h"
-#include <linux/delay.h>
-
static struct qxl_device *qxl_get_qdev(struct ttm_bo_device *bdev)
{
struct qxl_mman *mman;
@@ -46,47 +48,6 @@ static struct qxl_device *qxl_get_qdev(struct ttm_bo_device *bdev)
return qdev;
}
-static struct vm_operations_struct qxl_ttm_vm_ops;
-static const struct vm_operations_struct *ttm_vm_ops;
-
-static vm_fault_t qxl_ttm_fault(struct vm_fault *vmf)
-{
- struct ttm_buffer_object *bo;
- vm_fault_t ret;
-
- bo = (struct ttm_buffer_object *)vmf->vma->vm_private_data;
- if (bo == NULL)
- return VM_FAULT_NOPAGE;
- ret = ttm_vm_ops->fault(vmf);
- return ret;
-}
-
-int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- int r;
- struct drm_file *file_priv = filp->private_data;
- struct qxl_device *qdev = file_priv->minor->dev->dev_private;
-
- if (qdev == NULL) {
- DRM_ERROR(
- "filp->private_data->minor->dev->dev_private == NULL\n");
- return -EINVAL;
- }
- DRM_DEBUG_DRIVER("filp->private_data = 0x%p, vma->vm_pgoff = %lx\n",
- filp->private_data, vma->vm_pgoff);
-
- r = ttm_bo_mmap(filp, vma, &qdev->mman.bdev);
- if (unlikely(r != 0))
- return r;
- if (unlikely(ttm_vm_ops == NULL)) {
- ttm_vm_ops = vma->vm_ops;
- qxl_ttm_vm_ops = *ttm_vm_ops;
- qxl_ttm_vm_ops.fault = &qxl_ttm_fault;
- }
- vma->vm_ops = &qxl_ttm_vm_ops;
- return 0;
-}
-
static int qxl_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
{
return 0;
@@ -149,16 +110,8 @@ static void qxl_evict_flags(struct ttm_buffer_object *bo,
*placement = qbo->placement;
}
-static int qxl_verify_access(struct ttm_buffer_object *bo, struct file *filp)
-{
- struct qxl_bo *qbo = to_qxl_bo(bo);
-
- return drm_vma_node_verify_access(&qbo->gem_base.vma_node,
- filp->private_data);
-}
-
-static int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem)
+int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
struct qxl_device *qdev = qxl_get_qdev(bdev);
@@ -295,7 +248,7 @@ static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
if (!qxl_ttm_bo_is_qxl_bo(bo))
return;
qbo = to_qxl_bo(bo);
- qdev = qbo->gem_base.dev->dev_private;
+ qdev = qbo->tbo.base.dev->dev_private;
if (bo->mem.mem_type == TTM_PL_PRIV && qbo->surface_id)
qxl_surface_evict(qdev, qbo, new_mem ? true : false);
@@ -308,7 +261,6 @@ static struct ttm_bo_driver qxl_bo_driver = {
.eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = &qxl_evict_flags,
.move = &qxl_bo_move,
- .verify_access = &qxl_verify_access,
.io_mem_reserve = &qxl_ttm_io_mem_reserve,
.io_mem_free = &qxl_ttm_io_mem_free,
.move_notify = &qxl_bo_move_notify,
@@ -323,6 +275,7 @@ int qxl_ttm_init(struct qxl_device *qdev)
r = ttm_bo_device_init(&qdev->mman.bdev,
&qxl_bo_driver,
qdev->ddev.anon_inode->i_mapping,
+ qdev->ddev.vma_offset_manager,
false);
if (r) {
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
@@ -366,14 +319,11 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_mm *mm = (struct drm_mm *)node->info_ent->data;
- struct drm_device *dev = node->minor->dev;
- struct qxl_device *rdev = dev->dev_private;
- struct ttm_bo_global *glob = rdev->mman.bdev.glob;
struct drm_printer p = drm_seq_file_printer(m);
- spin_lock(&glob->lru_lock);
+ spin_lock(&ttm_bo_glob.lru_lock);
drm_mm_print(mm, &p);
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
return 0;
}
#endif
diff --git a/drivers/gpu/drm/r128/Makefile b/drivers/gpu/drm/r128/Makefile
index ae8a1860c6b8..c07a069533ef 100644
--- a/drivers/gpu/drm/r128/Makefile
+++ b/drivers/gpu/drm/r128/Makefile
@@ -3,7 +3,7 @@
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-r128-y := r128_drv.o r128_cce.o r128_state.o r128_irq.o
+r128-y := r128_drv.o r128_cce.o r128_state.o r128_irq.o ati_pcigart.o
r128-$(CONFIG_COMPAT) += r128_ioc32.o
diff --git a/drivers/gpu/drm/ati_pcigart.c b/drivers/gpu/drm/r128/ati_pcigart.c
index 2a413e291a60..9b4072f97215 100644
--- a/drivers/gpu/drm/ati_pcigart.c
+++ b/drivers/gpu/drm/r128/ati_pcigart.c
@@ -33,12 +33,12 @@
#include <linux/export.h>
-#include <drm/ati_pcigart.h>
#include <drm/drm_device.h>
-#include <drm/drm_os_linux.h>
#include <drm/drm_pci.h>
#include <drm/drm_print.h>
+#include "ati_pcigart.h"
+
# define ATI_PCIGART_PAGE_SIZE 4096 /**< PCI GART page size */
static int drm_ati_alloc_pcigart_table(struct drm_device *dev,
@@ -96,7 +96,6 @@ int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info
return 1;
}
-EXPORT_SYMBOL(drm_ati_pcigart_cleanup);
int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
{
@@ -169,6 +168,7 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
page_base = (u32) entry->busaddr[i];
for (j = 0; j < (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE); j++) {
+ u32 offset;
u32 val;
switch(gart_info->gart_reg_if) {
@@ -184,10 +184,12 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
break;
}
if (gart_info->gart_table_location ==
- DRM_ATI_GART_MAIN)
+ DRM_ATI_GART_MAIN) {
pci_gart[gart_idx] = cpu_to_le32(val);
- else
- DRM_WRITE32(map, gart_idx * sizeof(u32), val);
+ } else {
+ offset = gart_idx * sizeof(u32);
+ writel(val, (void __iomem *)map->handle + offset);
+ }
gart_idx++;
page_base += ATI_PCIGART_PAGE_SIZE;
}
@@ -205,4 +207,3 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
gart_info->bus_addr = bus_address;
return ret;
}
-EXPORT_SYMBOL(drm_ati_pcigart_init);
diff --git a/drivers/gpu/drm/r128/ati_pcigart.h b/drivers/gpu/drm/r128/ati_pcigart.h
new file mode 100644
index 000000000000..a728a1364e66
--- /dev/null
+++ b/drivers/gpu/drm/r128/ati_pcigart.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef DRM_ATI_PCIGART_H
+#define DRM_ATI_PCIGART_H
+
+#include <drm/drm_legacy.h>
+
+/* location of GART table */
+#define DRM_ATI_GART_MAIN 1
+#define DRM_ATI_GART_FB 2
+
+#define DRM_ATI_GART_PCI 1
+#define DRM_ATI_GART_PCIE 2
+#define DRM_ATI_GART_IGP 3
+
+struct drm_ati_pcigart_info {
+ int gart_table_location;
+ int gart_reg_if;
+ void *addr;
+ dma_addr_t bus_addr;
+ dma_addr_t table_mask;
+ struct drm_dma_handle *table_handle;
+ struct drm_local_map mapping;
+ int table_size;
+};
+
+extern int drm_ati_pcigart_init(struct drm_device *dev,
+ struct drm_ati_pcigart_info * gart_info);
+extern int drm_ati_pcigart_cleanup(struct drm_device *dev,
+ struct drm_ati_pcigart_info * gart_info);
+
+#endif
diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c
index fd74f744604f..b7a5f162ebae 100644
--- a/drivers/gpu/drm/r128/r128_drv.c
+++ b/drivers/gpu/drm/r128/r128_drv.c
@@ -30,10 +30,10 @@
*/
#include <linux/module.h>
+#include <linux/pci.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
-#include <drm/drm_pci.h>
#include <drm/drm_pciids.h>
#include <drm/drm_vblank.h>
#include <drm/r128_drm.h>
diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
index ba8c30ed91d1..8b256123cf2b 100644
--- a/drivers/gpu/drm/r128/r128_drv.h
+++ b/drivers/gpu/drm/r128/r128_drv.h
@@ -39,11 +39,12 @@
#include <linux/io.h>
#include <linux/irqreturn.h>
-#include <drm/ati_pcigart.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_legacy.h>
#include <drm/r128_drm.h>
+#include "ati_pcigart.h"
+
/* General customization:
*/
#define DRIVER_AUTHOR "Gareth Hughes, VA Linux Systems Inc."
diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
index 6589f9e0310e..6ac71755c22d 100644
--- a/drivers/gpu/drm/r128/r128_ioc32.c
+++ b/drivers/gpu/drm/r128/r128_ioc32.c
@@ -29,10 +29,11 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
+
#include <linux/compat.h>
-#include <drm/drmP.h>
#include <drm/r128_drm.h>
+
#include "r128_drv.h"
typedef struct drm_r128_init32 {
diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
index 9730f4918944..d84e9c96e20a 100644
--- a/drivers/gpu/drm/r128/r128_irq.c
+++ b/drivers/gpu/drm/r128/r128_irq.c
@@ -30,8 +30,11 @@
* Eric Anholt <anholt@FreeBSD.org>
*/
-#include <drm/drmP.h>
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
+#include <drm/drm_vblank.h>
#include <drm/r128_drm.h>
+
#include "r128_drv.h"
u32 r128_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 92ccd7aed0d4..c693b2ca0329 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -5,7 +5,7 @@
ccflags-y := -Idrivers/gpu/drm/amd/include
-hostprogs-y := mkregtable
+hostprogs := mkregtable
clean-files := rn50_reg_safe.h r100_reg_safe.h r200_reg_safe.h rv515_reg_safe.h r300_reg_safe.h r420_reg_safe.h rs600_reg_safe.h r600_reg_safe.h evergreen_reg_safe.h cayman_reg_safe.h
quiet_cmd_mkregtable = MKREGTABLE $@
diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h
index 364b895e7ebb..1bf06c91cd95 100644
--- a/drivers/gpu/drm/radeon/atom.h
+++ b/drivers/gpu/drm/radeon/atom.h
@@ -25,6 +25,7 @@
#ifndef ATOM_H
#define ATOM_H
+#include <linux/mutex.h>
#include <linux/types.h>
#define ATOM_BIOS_MAGIC 0xAA55
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index da2c9e295408..be583695427a 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -244,9 +244,8 @@ static void atombios_blank_crtc(struct drm_crtc *crtc, int state)
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
- if (ASIC_IS_DCE8(rdev)) {
+ if (ASIC_IS_DCE8(rdev))
WREG32(vga_control_regs[radeon_crtc->crtc_id], vga_control);
- }
}
static void atombios_powergate_crtc(struct drm_crtc *crtc, int state)
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 6f38375c77c8..15b00a347560 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -412,7 +412,6 @@ int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- struct radeon_connector_atom_dig *dig_connector;
int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
u16 dp_bridge = radeon_connector_encoder_get_dp_bridge_encoder_id(connector);
u8 tmp;
@@ -423,8 +422,6 @@ int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
if (!radeon_connector->con_priv)
return panel_mode;
- dig_connector = radeon_connector->con_priv;
-
if (dp_bridge != ENCODER_OBJECT_ID_NONE) {
/* DP bridge chips */
if (drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux,
@@ -816,9 +813,8 @@ void radeon_dp_link_train(struct drm_encoder *encoder,
dp_info.use_dpencoder = true;
index = GetIndexIntoMasterTable(COMMAND, DPEncoderService);
if (atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) {
- if (crev > 1) {
+ if (crev > 1)
dp_info.use_dpencoder = false;
- }
}
dp_info.enc_id = 0;
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index cc8f32a1b03c..cc5ee1b3af84 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -26,10 +26,10 @@
#include <linux/backlight.h>
#include <linux/dmi.h>
+#include <linux/pci.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_file.h>
-#include <drm/drm_pci.h>
#include <drm/radeon_drm.h>
#include "atom.h"
@@ -1885,11 +1885,10 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
if (ASIC_IS_AVIVO(rdev))
args.v1.ucCRTC = radeon_crtc->crtc_id;
else {
- if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1) {
+ if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1)
args.v1.ucCRTC = radeon_crtc->crtc_id;
- } else {
+ else
args.v1.ucCRTC = radeon_crtc->crtc_id << 2;
- }
}
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
@@ -2234,9 +2233,9 @@ assigned:
DRM_ERROR("Got encoder index incorrect - returning 0\n");
return 0;
}
- if (rdev->mode_info.active_encoders & (1 << enc_idx)) {
+ if (rdev->mode_info.active_encoders & (1 << enc_idx))
DRM_ERROR("chosen encoder in use %d\n", enc_idx);
- }
+
rdev->mode_info.active_encoders |= (1 << enc_idx);
return enc_idx;
}
diff --git a/drivers/gpu/drm/radeon/atombios_i2c.c b/drivers/gpu/drm/radeon/atombios_i2c.c
index a570ce40af19..ab4d21072191 100644
--- a/drivers/gpu/drm/radeon/atombios_i2c.c
+++ b/drivers/gpu/drm/radeon/atombios_i2c.c
@@ -68,11 +68,6 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
memcpy(&out, &buf[1], num);
args.lpI2CDataOut = cpu_to_le16(out);
} else {
- if (num > ATOM_MAX_HW_I2C_READ) {
- DRM_ERROR("hw i2c: tried to read too many bytes (%d vs 255)\n", num);
- r = -EINVAL;
- goto done;
- }
args.ucRegIndex = 0;
args.lpI2CDataOut = 0;
}
diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c
index ce37de020b91..d1d8aaf8323c 100644
--- a/drivers/gpu/drm/radeon/btc_dpm.c
+++ b/drivers/gpu/drm/radeon/btc_dpm.c
@@ -22,10 +22,9 @@
* Authors: Alex Deucher
*/
+#include <linux/pci.h>
#include <linux/seq_file.h>
-#include <drm/drm_pci.h>
-
#include "atom.h"
#include "btc_dpm.h"
#include "btcd.h"
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index c6fd123f60b5..a9257bed3484 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -22,10 +22,9 @@
*/
#include <linux/firmware.h>
+#include <linux/pci.h>
#include <linux/seq_file.h>
-#include <drm/drm_pci.h>
-
#include "atom.h"
#include "ci_dpm.h"
#include "cikd.h"
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 40f4d29edfe2..5c42877fd6fb 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -23,10 +23,10 @@
*/
#include <linux/firmware.h>
-#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
-#include <drm/drm_pci.h>
#include <drm/drm_vblank.h>
#include "atom.h"
@@ -221,9 +221,7 @@ int ci_get_temp(struct radeon_device *rdev)
else
actual_temp = temp & 0x1ff;
- actual_temp = actual_temp * 1000;
-
- return actual_temp;
+ return actual_temp * 1000;
}
/* get temperature in millidegrees */
@@ -239,9 +237,7 @@ int kv_get_temp(struct radeon_device *rdev)
else
actual_temp = 0;
- actual_temp = actual_temp * 1000;
-
- return actual_temp;
+ return actual_temp * 1000;
}
/*
@@ -3659,7 +3655,7 @@ bool cik_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
- struct reservation_object *resv)
+ struct dma_resv *resv)
{
struct radeon_fence *fence;
struct radeon_sync sync;
@@ -6969,8 +6965,8 @@ static int cik_irq_init(struct radeon_device *rdev)
}
/* setup interrupt control */
- /* XXX this should actually be a bus address, not an MC address. same on older asics */
- WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
+ /* set dummy read address to dummy page address */
+ WREG32(INTERRUPT_CNTL2, rdev->dummy_page.addr >> 8);
interrupt_cntl = RREG32(INTERRUPT_CNTL);
/* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
* IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
@@ -8141,7 +8137,7 @@ static void cik_uvd_init(struct radeon_device *rdev)
* there. So it is pointless to try to go through that code
* hence why we disable uvd here.
*/
- rdev->has_uvd = 0;
+ rdev->has_uvd = false;
return;
}
rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
@@ -8213,7 +8209,7 @@ static void cik_vce_init(struct radeon_device *rdev)
* there. So it is pointless to try to go through that code
* hence why we disable vce here.
*/
- rdev->has_vce = 0;
+ rdev->has_vce = false;
return;
}
rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_obj = NULL;
@@ -9504,7 +9500,6 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
{
struct pci_dev *root = rdev->pdev->bus->self;
enum pci_bus_speed speed_cap;
- int bridge_pos, gpu_pos;
u32 speed_cntl, current_data_rate;
int i;
u16 tmp16;
@@ -9546,12 +9541,7 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
}
- bridge_pos = pci_pcie_cap(root);
- if (!bridge_pos)
- return;
-
- gpu_pos = pci_pcie_cap(rdev->pdev);
- if (!gpu_pos)
+ if (!pci_is_pcie(root) || !pci_is_pcie(rdev->pdev))
return;
if (speed_cap == PCIE_SPEED_8_0GT) {
@@ -9561,14 +9551,17 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
u16 bridge_cfg2, gpu_cfg2;
u32 max_lw, current_lw, tmp;
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+ &bridge_cfg);
+ pcie_capability_read_word(rdev->pdev, PCI_EXP_LNKCTL,
+ &gpu_cfg);
tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_write_word(root, PCI_EXP_LNKCTL, tmp16);
tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
- pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_write_word(rdev->pdev, PCI_EXP_LNKCTL,
+ tmp16);
tmp = RREG32_PCIE_PORT(PCIE_LC_STATUS1);
max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
@@ -9586,15 +9579,23 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
for (i = 0; i < 10; i++) {
/* check status */
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16);
+ pcie_capability_read_word(rdev->pdev,
+ PCI_EXP_DEVSTA,
+ &tmp16);
if (tmp16 & PCI_EXP_DEVSTA_TRPND)
break;
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+ &bridge_cfg);
+ pcie_capability_read_word(rdev->pdev,
+ PCI_EXP_LNKCTL,
+ &gpu_cfg);
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2);
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
+ &bridge_cfg2);
+ pcie_capability_read_word(rdev->pdev,
+ PCI_EXP_LNKCTL2,
+ &gpu_cfg2);
tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
tmp |= LC_SET_QUIESCE;
@@ -9607,26 +9608,45 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
msleep(100);
/* linkctl */
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+ &tmp16);
tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_write_word(root, PCI_EXP_LNKCTL,
+ tmp16);
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16);
+ pcie_capability_read_word(rdev->pdev,
+ PCI_EXP_LNKCTL,
+ &tmp16);
tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
- pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_write_word(rdev->pdev,
+ PCI_EXP_LNKCTL,
+ tmp16);
/* linkctl2 */
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~((1 << 4) | (7 << 9));
- tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16);
-
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~((1 << 4) | (7 << 9));
- tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
- pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
+ &tmp16);
+ tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN);
+ tmp16 |= (bridge_cfg2 &
+ (PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN));
+ pcie_capability_write_word(root,
+ PCI_EXP_LNKCTL2,
+ tmp16);
+
+ pcie_capability_read_word(rdev->pdev,
+ PCI_EXP_LNKCTL2,
+ &tmp16);
+ tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN);
+ tmp16 |= (gpu_cfg2 &
+ (PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN));
+ pcie_capability_write_word(rdev->pdev,
+ PCI_EXP_LNKCTL2,
+ tmp16);
tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
tmp &= ~LC_SET_QUIESCE;
@@ -9640,15 +9660,15 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE;
WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~0xf;
+ pcie_capability_read_word(rdev->pdev, PCI_EXP_LNKCTL2, &tmp16);
+ tmp16 &= ~PCI_EXP_LNKCTL2_TLS;
if (speed_cap == PCIE_SPEED_8_0GT)
- tmp16 |= 3; /* gen3 */
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_8_0GT; /* gen3 */
else if (speed_cap == PCIE_SPEED_5_0GT)
- tmp16 |= 2; /* gen2 */
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_5_0GT; /* gen2 */
else
- tmp16 |= 1; /* gen1 */
- pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_2_5GT; /* gen1 */
+ pcie_capability_write_word(rdev->pdev, PCI_EXP_LNKCTL2, tmp16);
speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE;
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index 589217a7e435..68403e77756d 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -333,7 +333,7 @@ void cik_sdma_enable(struct radeon_device *rdev, bool enable)
u32 me_cntl, reg_offset;
int i;
- if (enable == false) {
+ if (!enable) {
cik_sdma_gfx_stop(rdev);
cik_sdma_rlc_stop(rdev);
}
@@ -579,7 +579,7 @@ void cik_sdma_fini(struct radeon_device *rdev)
struct radeon_fence *cik_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
- struct reservation_object *resv)
+ struct dma_resv *resv)
{
struct radeon_fence *fence;
struct radeon_sync sync;
diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c
index 32ed60f1048b..35b177d77791 100644
--- a/drivers/gpu/drm/radeon/cypress_dpm.c
+++ b/drivers/gpu/drm/radeon/cypress_dpm.c
@@ -22,7 +22,7 @@
* Authors: Alex Deucher
*/
-#include <drm/drm_pci.h>
+#include <linux/pci.h>
#include "atom.h"
#include "cypress_dpm.h"
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 1d978a3d9c82..14d90dc376e7 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -23,9 +23,9 @@
*/
#include <linux/firmware.h>
+#include <linux/pci.h>
#include <linux/slab.h>
-#include <drm/drm_pci.h>
#include <drm/drm_vblank.h>
#include <drm/radeon_drm.h>
@@ -4945,7 +4945,7 @@ static void evergreen_uvd_init(struct radeon_device *rdev)
* there. So it is pointless to try to go through that code
* hence why we disable uvd here.
*/
- rdev->has_uvd = 0;
+ rdev->has_uvd = false;
return;
}
rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
diff --git a/drivers/gpu/drm/radeon/evergreen_dma.c b/drivers/gpu/drm/radeon/evergreen_dma.c
index 5505a04ca402..a46ee6c2099d 100644
--- a/drivers/gpu/drm/radeon/evergreen_dma.c
+++ b/drivers/gpu/drm/radeon/evergreen_dma.c
@@ -108,7 +108,7 @@ struct radeon_fence *evergreen_copy_dma(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_gpu_pages,
- struct reservation_object *resv)
+ struct dma_resv *resv)
{
struct radeon_fence *fence;
struct radeon_sync sync;
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index 0d8d30b78f95..5e6086eb1807 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -21,10 +21,9 @@
*
*/
+#include <linux/pci.h>
#include <linux/seq_file.h>
-#include <drm/drm_pci.h>
-
#include "cikd.h"
#include "kv_dpm.h"
#include "r600_dpm.h"
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 410f626a39d4..02feb0801fd3 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -23,10 +23,10 @@
*/
#include <linux/firmware.h>
-#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
-#include <drm/drm_pci.h>
#include <drm/radeon_drm.h>
#include "atom.h"
@@ -2017,7 +2017,7 @@ static void cayman_uvd_init(struct radeon_device *rdev)
* there. So it is pointless to try to go through that code
* hence why we disable uvd here.
*/
- rdev->has_uvd = 0;
+ rdev->has_uvd = false;
return;
}
rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
@@ -2085,7 +2085,7 @@ static void cayman_vce_init(struct radeon_device *rdev)
* there. So it is pointless to try to go through that code
* hence why we disable vce here.
*/
- rdev->has_vce = 0;
+ rdev->has_vce = false;
return;
}
rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_obj = NULL;
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index d9e62ca65ab8..b57c37ddd164 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -22,10 +22,9 @@
*/
#include <linux/math64.h>
+#include <linux/pci.h>
#include <linux/seq_file.h>
-#include <drm/drm_pci.h>
-
#include "atom.h"
#include "ni_dpm.h"
#include "nid.h"
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 5c05193da520..24c8db673931 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -26,16 +26,16 @@
* Jerome Glisse
*/
-#include <linux/seq_file.h>
-#include <linux/slab.h>
#include <linux/firmware.h>
#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_device.h>
#include <drm/drm_file.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_pci.h>
#include <drm/drm_vblank.h>
#include <drm/radeon_drm.h>
@@ -891,7 +891,7 @@ struct radeon_fence *r100_copy_blit(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_gpu_pages,
- struct reservation_object *resv)
+ struct dma_resv *resv)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
struct radeon_fence *fence;
@@ -1823,11 +1823,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
case RADEON_PP_TXFORMAT_2:
i = (reg - RADEON_PP_TXFORMAT_0) / 24;
if (idx_value & RADEON_TXFORMAT_NON_POWER2) {
- track->textures[i].use_pitch = 1;
+ track->textures[i].use_pitch = true;
} else {
- track->textures[i].use_pitch = 0;
- track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
- track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
+ track->textures[i].use_pitch = false;
+ track->textures[i].width = 1 << ((idx_value & RADEON_TXFORMAT_WIDTH_MASK) >> RADEON_TXFORMAT_WIDTH_SHIFT);
+ track->textures[i].height = 1 << ((idx_value & RADEON_TXFORMAT_HEIGHT_MASK) >> RADEON_TXFORMAT_HEIGHT_SHIFT);
}
if (idx_value & RADEON_TXFORMAT_CUBIC_MAP_ENABLE)
track->textures[i].tex_coord_type = 2;
@@ -2387,12 +2387,12 @@ void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track
else
track->num_texture = 6;
track->maxy = 2048;
- track->separate_cube = 1;
+ track->separate_cube = true;
} else {
track->num_cb = 4;
track->num_texture = 16;
track->maxy = 4096;
- track->separate_cube = 0;
+ track->separate_cube = false;
track->aaresolve = false;
track->aa.robj = NULL;
}
@@ -2815,7 +2815,7 @@ void r100_vga_set_state(struct radeon_device *rdev, bool state)
uint32_t temp;
temp = RREG32(RADEON_CONFIG_CNTL);
- if (state == false) {
+ if (!state) {
temp &= ~RADEON_CFG_VGA_RAM_EN;
temp |= RADEON_CFG_VGA_IO_DIS;
} else {
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index 9ce6dd83d284..f5f2ffea5ab2 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -84,7 +84,7 @@ struct radeon_fence *r200_copy_dma(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_gpu_pages,
- struct reservation_object *resv)
+ struct dma_resv *resv)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
struct radeon_fence *fence;
@@ -476,8 +476,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
track->textures[i].use_pitch = 1;
} else {
track->textures[i].use_pitch = 0;
- track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
- track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
+ track->textures[i].width = 1 << ((idx_value & RADEON_TXFORMAT_WIDTH_MASK) >> RADEON_TXFORMAT_WIDTH_SHIFT);
+ track->textures[i].height = 1 << ((idx_value & RADEON_TXFORMAT_HEIGHT_MASK) >> RADEON_TXFORMAT_HEIGHT_SHIFT);
}
if (idx_value & R200_TXFORMAT_LOOKUP_DISABLE)
track->textures[i].lookup_disable = true;
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 44856e3a7108..3b7ead5be5bf 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -26,6 +26,7 @@
* Jerome Glisse
*/
+#include <linux/pci.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
@@ -34,7 +35,6 @@
#include <drm/drm_debugfs.h>
#include <drm/drm_device.h>
#include <drm/drm_file.h>
-#include <drm/drm_pci.h>
#include <drm/radeon_drm.h>
#include "r100_track.h"
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 83282ee2bde0..1d4c04e0a449 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -26,13 +26,13 @@
* Jerome Glisse
*/
+#include <linux/pci.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_device.h>
#include <drm/drm_file.h>
-#include <drm/drm_pci.h>
#include "atom.h"
#include "r100d.h"
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 7d175a9e8330..d9a33ca768f3 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -26,14 +26,14 @@
* Jerome Glisse
*/
-#include <linux/slab.h>
-#include <linux/seq_file.h>
#include <linux/firmware.h>
#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/seq_file.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_device.h>
-#include <drm/drm_pci.h>
#include <drm/drm_vblank.h>
#include <drm/radeon_drm.h>
@@ -2963,7 +2963,7 @@ bool r600_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
- struct reservation_object *resv)
+ struct dma_resv *resv)
{
struct radeon_fence *fence;
struct radeon_sync sync;
@@ -3053,7 +3053,7 @@ static void r600_uvd_init(struct radeon_device *rdev)
* there. So it is pointless to try to go through that code
* hence why we disable uvd here.
*/
- rdev->has_uvd = 0;
+ rdev->has_uvd = false;
return;
}
rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
@@ -3191,7 +3191,7 @@ void r600_vga_set_state(struct radeon_device *rdev, bool state)
uint32_t temp;
temp = RREG32(CONFIG_CNTL);
- if (state == false) {
+ if (!state) {
temp &= ~(1<<0);
temp |= (1<<1);
} else {
@@ -3696,8 +3696,8 @@ int r600_irq_init(struct radeon_device *rdev)
}
/* setup interrupt control */
- /* set dummy read address to ring address */
- WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
+ /* set dummy read address to dummy page address */
+ WREG32(INTERRUPT_CNTL2, rdev->dummy_page.addr >> 8);
interrupt_cntl = RREG32(INTERRUPT_CNTL);
/* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
* IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index d6c28a5d77ab..49e8266461f8 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -350,7 +350,7 @@ static void r600_cs_track_init(struct r600_cs_track *track)
static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
{
struct r600_cs_track *track = p->track;
- u32 slice_tile_max, size, tmp;
+ u32 slice_tile_max, tmp;
u32 height, height_align, pitch, pitch_align, depth_align;
u64 base_offset, base_align;
struct array_mode_checker array_check;
@@ -360,7 +360,6 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
/* When resolve is used, the second colorbuffer has always 1 sample. */
unsigned nsamples = track->is_resolve && i == 1 ? 1 : track->nsamples;
- size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i];
format = G_0280A0_FORMAT(track->cb_color_info[i]);
if (!r600_fmt_is_valid_color(format)) {
dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n",
@@ -517,7 +516,7 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
{
struct r600_cs_track *track = p->track;
- u32 nviews, bpe, ntiles, size, slice_tile_max, tmp;
+ u32 nviews, bpe, ntiles, slice_tile_max, tmp;
u32 height_align, pitch_align, depth_align;
u32 pitch = 8192;
u32 height = 8192;
@@ -564,7 +563,6 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
}
ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF);
} else {
- size = radeon_bo_size(track->db_bo);
/* pitch in pixels */
pitch = (G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1) * 8;
slice_tile_max = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
@@ -2342,7 +2340,6 @@ int r600_cs_parse(struct radeon_cs_parser *p)
int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
struct radeon_bo_list **cs_reloc)
{
- struct radeon_cs_chunk *relocs_chunk;
unsigned idx;
*cs_reloc = NULL;
@@ -2350,7 +2347,6 @@ int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
DRM_ERROR("No relocation chunk !\n");
return -EINVAL;
}
- relocs_chunk = p->chunk_relocs;
idx = p->dma_reloc_idx;
if (idx >= p->nrelocs) {
DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c
index 35d92ef8a0d4..af6c0da45f28 100644
--- a/drivers/gpu/drm/radeon/r600_dma.c
+++ b/drivers/gpu/drm/radeon/r600_dma.c
@@ -444,7 +444,7 @@ void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
struct radeon_fence *r600_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
- struct reservation_object *resv)
+ struct dma_resv *resv)
{
struct radeon_fence *fence;
struct radeon_sync sync;
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 32808e50be12..30e32adc1fc6 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -68,6 +68,10 @@
#include <linux/hashtable.h>
#include <linux/dma-fence.h>
+#ifdef CONFIG_MMU_NOTIFIER
+#include <linux/mmu_notifier.h>
+#endif
+
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
@@ -505,15 +509,15 @@ struct radeon_bo {
struct list_head va;
/* Constant after initialization */
struct radeon_device *rdev;
- struct drm_gem_object gem_base;
struct ttm_bo_kmap_obj dma_buf_vmap;
pid_t pid;
- struct radeon_mn *mn;
- struct list_head mn_list;
+#ifdef CONFIG_MMU_NOTIFIER
+ struct mmu_interval_notifier notifier;
+#endif
};
-#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
+#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, tbo.base)
int radeon_gem_debugfs_init(struct radeon_device *rdev);
@@ -620,7 +624,7 @@ void radeon_sync_fence(struct radeon_sync *sync,
struct radeon_fence *fence);
int radeon_sync_resv(struct radeon_device *rdev,
struct radeon_sync *sync,
- struct reservation_object *resv,
+ struct dma_resv *resv,
bool shared);
int radeon_sync_rings(struct radeon_device *rdev,
struct radeon_sync *sync,
@@ -1913,20 +1917,20 @@ struct radeon_asic {
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_gpu_pages,
- struct reservation_object *resv);
+ struct dma_resv *resv);
u32 blit_ring_index;
struct radeon_fence *(*dma)(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_gpu_pages,
- struct reservation_object *resv);
+ struct dma_resv *resv);
u32 dma_ring_index;
/* method used for bo copy */
struct radeon_fence *(*copy)(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_gpu_pages,
- struct reservation_object *resv);
+ struct dma_resv *resv);
/* ring used for bo copies */
u32 copy_ring_index;
} copy;
@@ -2387,7 +2391,6 @@ struct radeon_device {
struct radeon_wb wb;
struct radeon_dummy_page dummy_page;
bool shutdown;
- bool need_dma32;
bool need_swiotlb;
bool accel_working;
bool fastfb_working; /* IGP feature*/
@@ -2451,9 +2454,6 @@ struct radeon_device {
/* tracking pinned memory */
u64 vram_pin_size;
u64 gart_pin_size;
-
- struct mutex mn_lock;
- DECLARE_HASHTABLE(mn_hash, 7);
};
bool radeon_is_px(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
index 4de16f3badb4..0aca7bdf54c7 100644
--- a/drivers/gpu/drm/radeon/radeon_agp.c
+++ b/drivers/gpu/drm/radeon/radeon_agp.c
@@ -25,9 +25,10 @@
* Jerome Glisse <glisse@freedesktop.org>
*/
+#include <linux/pci.h>
+
#include <drm/drm_agpsupport.h>
#include <drm/drm_device.h>
-#include <drm/drm_pci.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index dc3c2227e06a..495700d16fc9 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -27,10 +27,10 @@
*/
#include <linux/console.h>
+#include <linux/pci.h>
#include <linux/vgaarb.h>
#include <drm/drm_crtc_helper.h>
-#include <drm/drm_pci.h>
#include <drm/radeon_drm.h>
#include "atom.h"
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index e3f036c20d64..a74fa18cd27b 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -86,7 +86,7 @@ struct radeon_fence *r100_copy_blit(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_gpu_pages,
- struct reservation_object *resv);
+ struct dma_resv *resv);
int r100_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
uint32_t offset, uint32_t obj_size);
@@ -157,7 +157,7 @@ struct radeon_fence *r200_copy_dma(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_gpu_pages,
- struct reservation_object *resv);
+ struct dma_resv *resv);
void r200_set_safe_registers(struct radeon_device *rdev);
/*
@@ -347,11 +347,11 @@ int r600_dma_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
- struct reservation_object *resv);
+ struct dma_resv *resv);
struct radeon_fence *r600_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
- struct reservation_object *resv);
+ struct dma_resv *resv);
void r600_hpd_init(struct radeon_device *rdev);
void r600_hpd_fini(struct radeon_device *rdev);
bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
@@ -473,7 +473,7 @@ void r700_cp_fini(struct radeon_device *rdev);
struct radeon_fence *rv770_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
- struct reservation_object *resv);
+ struct dma_resv *resv);
u32 rv770_get_xclk(struct radeon_device *rdev);
int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
int rv770_get_temp(struct radeon_device *rdev);
@@ -547,7 +547,7 @@ void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
struct radeon_fence *evergreen_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
- struct reservation_object *resv);
+ struct dma_resv *resv);
int evergreen_get_temp(struct radeon_device *rdev);
int evergreen_get_allowed_info_register(struct radeon_device *rdev,
u32 reg, u32 *val);
@@ -725,7 +725,7 @@ int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
struct radeon_fence *si_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
- struct reservation_object *resv);
+ struct dma_resv *resv);
void si_dma_vm_copy_pages(struct radeon_device *rdev,
struct radeon_ib *ib,
@@ -796,11 +796,11 @@ void cik_sdma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
struct radeon_fence *cik_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
- struct reservation_object *resv);
+ struct dma_resv *resv);
struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
- struct reservation_object *resv);
+ struct dma_resv *resv);
int cik_sdma_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 226a7bf0eb7a..848ef68d9086 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -24,8 +24,9 @@
* Alex Deucher
*/
+#include <linux/pci.h>
+
#include <drm/drm_device.h>
-#include <drm/drm_pci.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
@@ -569,7 +570,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
path_size += le16_to_cpu(path->usSize);
if (device_support & le16_to_cpu(path->usDeviceTag)) {
- uint8_t con_obj_id, con_obj_num, con_obj_type;
+ uint8_t con_obj_id, con_obj_num;
con_obj_id =
(le16_to_cpu(path->usConnObjectId) & OBJECT_ID_MASK)
@@ -577,9 +578,6 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
con_obj_num =
(le16_to_cpu(path->usConnObjectId) & ENUM_ID_MASK)
>> ENUM_ID_SHIFT;
- con_obj_type =
- (le16_to_cpu(path->usConnObjectId) &
- OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
/* TODO CV support */
if (le16_to_cpu(path->usDeviceTag) ==
@@ -647,15 +645,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
router.ddc_valid = false;
router.cd_valid = false;
for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) {
- uint8_t grph_obj_id, grph_obj_num, grph_obj_type;
-
- grph_obj_id =
- (le16_to_cpu(path->usGraphicObjIds[j]) &
- OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
- grph_obj_num =
- (le16_to_cpu(path->usGraphicObjIds[j]) &
- ENUM_ID_MASK) >> ENUM_ID_SHIFT;
- grph_obj_type =
+ uint8_t grph_obj_type =
(le16_to_cpu(path->usGraphicObjIds[j]) &
OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
index b9aea5776d3d..8c63ccb8b623 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.c
+++ b/drivers/gpu/drm/radeon/radeon_audio.c
@@ -288,7 +288,7 @@ static void radeon_audio_interface_init(struct radeon_device *rdev)
} else {
rdev->audio.funcs = &r600_funcs;
rdev->audio.hdmi_funcs = &r600_hdmi_funcs;
- rdev->audio.dp_funcs = 0;
+ rdev->audio.dp_funcs = NULL;
}
}
@@ -367,10 +367,10 @@ static void radeon_audio_write_sad_regs(struct drm_encoder *encoder)
return;
sad_count = drm_edid_to_sad(radeon_connector_edid(connector), &sads);
- if (sad_count <= 0) {
+ if (sad_count < 0)
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
+ if (sad_count <= 0)
return;
- }
BUG_ON(!sads);
if (radeon_encoder->audio && radeon_encoder->audio->write_sad_regs)
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index 7ce5064a59f6..ac9a5ec481c3 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -35,7 +35,7 @@
static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size,
uint64_t saddr, uint64_t daddr,
int flag, int n,
- struct reservation_object *resv)
+ struct dma_resv *resv)
{
unsigned long start_jiffies;
unsigned long end_jiffies;
@@ -122,7 +122,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
if (rdev->asic->copy.dma) {
time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
RADEON_BENCHMARK_COPY_DMA, n,
- dobj->tbo.resv);
+ dobj->tbo.base.resv);
if (time < 0)
goto out_cleanup;
if (time > 0)
@@ -133,7 +133,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
if (rdev->asic->copy.blit) {
time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
RADEON_BENCHMARK_COPY_BLIT, n,
- dobj->tbo.resv);
+ dobj->tbo.base.resv);
if (time < 0)
goto out_cleanup;
if (time > 0)
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 4d1490fbb075..c42f73fad3e3 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -26,11 +26,11 @@
* Jerome Glisse
*/
-#include <linux/slab.h>
#include <linux/acpi.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
#include <drm/drm_device.h>
-#include <drm/drm_pci.h>
#include "atom.h"
#include "radeon.h"
@@ -664,17 +664,17 @@ bool radeon_get_bios(struct radeon_device *rdev)
uint16_t tmp;
r = radeon_atrm_get_bios(rdev);
- if (r == false)
+ if (!r)
r = radeon_acpi_vfct_bios(rdev);
- if (r == false)
+ if (!r)
r = igp_read_bios_from_vram(rdev);
- if (r == false)
+ if (!r)
r = radeon_read_bios(rdev);
- if (r == false)
+ if (!r)
r = radeon_read_disabled_bios(rdev);
- if (r == false)
+ if (!r)
r = radeon_read_platform_bios(rdev);
- if (r == false || rdev->bios == NULL) {
+ if (!r || rdev->bios == NULL) {
DRM_ERROR("Unable to locate a BIOS ROM\n");
rdev->bios = NULL;
return false;
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
index 9057b32f4498..c594ca68e3a7 100644
--- a/drivers/gpu/drm/radeon/radeon_clocks.c
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -26,8 +26,9 @@
* Jerome Glisse
*/
+#include <linux/pci.h>
+
#include <drm/drm_device.h>
-#include <drm/drm_pci.h>
#include <drm/radeon_drm.h>
#include "atom.h"
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index c18ae15189f3..c3e49c973812 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -25,8 +25,9 @@
* Alex Deucher
*/
+#include <linux/pci.h>
+
#include <drm/drm_device.h>
-#include <drm/drm_pci.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
@@ -2638,7 +2639,7 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
{
struct drm_device *dev = rdev->ddev;
u16 offset, misc, misc2 = 0;
- u8 rev, blocks, tmp;
+ u8 rev, tmp;
int state_index = 0;
struct radeon_i2c_bus_rec i2c_bus;
@@ -2731,7 +2732,6 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
offset = combios_get_table_offset(dev, COMBIOS_POWERPLAY_INFO_TABLE);
if (offset) {
rev = RBIOS8(offset);
- blocks = RBIOS8(offset + 0x2);
/* power mode 0 tends to be the only valid one */
rdev->pm.power_state[state_index].num_clock_modes = 1;
rdev->pm.power_state[state_index].clock_info[0].mclk = RBIOS32(offset + 0x5 + 0x2);
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index c60d1a44d22a..fe12d9d91d7a 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -249,11 +249,10 @@ radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_c
struct drm_encoder *encoder;
const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
bool connected;
- int i;
best_encoder = connector_funcs->best_encoder(connector);
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
if ((encoder == best_encoder) && (status == connector_status_connected))
connected = true;
else
@@ -269,9 +268,8 @@ radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_c
static struct drm_encoder *radeon_find_encoder(struct drm_connector *connector, int encoder_type)
{
struct drm_encoder *encoder;
- int i;
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
if (encoder->encoder_type == encoder_type)
return encoder;
}
@@ -380,10 +378,9 @@ static int radeon_ddc_get_modes(struct drm_connector *connector)
static struct drm_encoder *radeon_best_single_encoder(struct drm_connector *connector)
{
struct drm_encoder *encoder;
- int i;
/* pick the first one */
- drm_connector_for_each_possible_encoder(connector, encoder, i)
+ drm_connector_for_each_possible_encoder(connector, encoder)
return encoder;
return NULL;
@@ -428,14 +425,13 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector,
list_for_each_entry(conflict, &dev->mode_config.connector_list, head) {
struct drm_encoder *enc;
- int i;
if (conflict == connector)
continue;
radeon_conflict = to_radeon_connector(conflict);
- drm_connector_for_each_possible_encoder(conflict, enc, i) {
+ drm_connector_for_each_possible_encoder(conflict, enc) {
/* if the IDs match */
if (enc == encoder) {
if (conflict->status != connector_status_connected)
@@ -444,7 +440,7 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector,
if (radeon_conflict->use_digital)
continue;
- if (priority == true) {
+ if (priority) {
DRM_DEBUG_KMS("1: conflicting encoders switching off %s\n",
conflict->name);
DRM_DEBUG_KMS("in favor of %s\n",
@@ -704,9 +700,9 @@ static int radeon_connector_set_property(struct drm_connector *connector, struct
else
ret = radeon_legacy_get_tmds_info_from_combios(radeon_encoder, tmds);
}
- if (val == 1 || ret == false) {
+ if (val == 1 || !ret)
radeon_legacy_get_tmds_info_from_table(radeon_encoder, tmds);
- }
+
radeon_property_change_mode(&radeon_encoder->base);
}
@@ -752,7 +748,7 @@ static int radeon_connector_set_property(struct drm_connector *connector, struct
radeon_encoder->output_csc = val;
- if (connector->encoder->crtc) {
+ if (connector->encoder && connector->encoder->crtc) {
struct drm_crtc *crtc = connector->encoder->crtc;
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
@@ -1363,9 +1359,7 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
/* find analog encoder */
if (radeon_connector->dac_load_detect) {
- int i;
-
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
if (encoder->encoder_type != DRM_MODE_ENCODER_DAC &&
encoder->encoder_type != DRM_MODE_ENCODER_TVDAC)
continue;
@@ -1443,9 +1437,8 @@ static struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_encoder *encoder;
- int i;
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
if (radeon_connector->use_digital == true) {
if (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)
return encoder;
@@ -1460,7 +1453,7 @@ static struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector)
/* then check use digitial */
/* pick the first one */
- drm_connector_for_each_possible_encoder(connector, encoder, i)
+ drm_connector_for_each_possible_encoder(connector, encoder)
return encoder;
return NULL;
@@ -1603,9 +1596,8 @@ u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *conn
{
struct drm_encoder *encoder;
struct radeon_encoder *radeon_encoder;
- int i;
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
radeon_encoder = to_radeon_encoder(encoder);
switch (radeon_encoder->encoder_id) {
@@ -1624,10 +1616,9 @@ static bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector)
{
struct drm_encoder *encoder;
struct radeon_encoder *radeon_encoder;
- int i;
bool found = false;
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
radeon_encoder = to_radeon_encoder(encoder);
if (radeon_encoder->caps & ATOM_ENCODER_CAP_RECORD_HBR2)
found = true;
@@ -1870,6 +1861,7 @@ radeon_add_atom_connector(struct drm_device *dev,
struct radeon_connector_atom_dig *radeon_dig_connector;
struct drm_encoder *encoder;
struct radeon_encoder *radeon_encoder;
+ struct i2c_adapter *ddc = NULL;
uint32_t subpixel_order = SubPixelNone;
bool shared_ddc = false;
bool is_dp_bridge = false;
@@ -1947,17 +1939,21 @@ radeon_add_atom_connector(struct drm_device *dev,
radeon_connector->con_priv = radeon_dig_connector;
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
- if (radeon_connector->ddc_bus)
+ if (radeon_connector->ddc_bus) {
has_aux = true;
- else
+ ddc = &radeon_connector->ddc_bus->adapter;
+ } else {
DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ }
}
switch (connector_type) {
case DRM_MODE_CONNECTOR_VGA:
case DRM_MODE_CONNECTOR_DVIA:
default:
- drm_connector_init(dev, &radeon_connector->base,
- &radeon_dp_connector_funcs, connector_type);
+ drm_connector_init_with_ddc(dev, &radeon_connector->base,
+ &radeon_dp_connector_funcs,
+ connector_type,
+ ddc);
drm_connector_helper_add(&radeon_connector->base,
&radeon_dp_connector_helper_funcs);
connector->interlace_allowed = true;
@@ -1979,8 +1975,10 @@ radeon_add_atom_connector(struct drm_device *dev,
case DRM_MODE_CONNECTOR_HDMIA:
case DRM_MODE_CONNECTOR_HDMIB:
case DRM_MODE_CONNECTOR_DisplayPort:
- drm_connector_init(dev, &radeon_connector->base,
- &radeon_dp_connector_funcs, connector_type);
+ drm_connector_init_with_ddc(dev, &radeon_connector->base,
+ &radeon_dp_connector_funcs,
+ connector_type,
+ ddc);
drm_connector_helper_add(&radeon_connector->base,
&radeon_dp_connector_helper_funcs);
drm_object_attach_property(&radeon_connector->base.base,
@@ -2027,8 +2025,10 @@ radeon_add_atom_connector(struct drm_device *dev,
break;
case DRM_MODE_CONNECTOR_LVDS:
case DRM_MODE_CONNECTOR_eDP:
- drm_connector_init(dev, &radeon_connector->base,
- &radeon_lvds_bridge_connector_funcs, connector_type);
+ drm_connector_init_with_ddc(dev, &radeon_connector->base,
+ &radeon_lvds_bridge_connector_funcs,
+ connector_type,
+ ddc);
drm_connector_helper_add(&radeon_connector->base,
&radeon_dp_connector_helper_funcs);
drm_object_attach_property(&radeon_connector->base.base,
@@ -2042,13 +2042,18 @@ radeon_add_atom_connector(struct drm_device *dev,
} else {
switch (connector_type) {
case DRM_MODE_CONNECTOR_VGA:
- drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
- drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ else
+ ddc = &radeon_connector->ddc_bus->adapter;
}
+ drm_connector_init_with_ddc(dev, &radeon_connector->base,
+ &radeon_vga_connector_funcs,
+ connector_type,
+ ddc);
+ drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
radeon_connector->dac_load_detect = true;
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
@@ -2067,13 +2072,18 @@ radeon_add_atom_connector(struct drm_device *dev,
connector->doublescan_allowed = true;
break;
case DRM_MODE_CONNECTOR_DVIA:
- drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
- drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ else
+ ddc = &radeon_connector->ddc_bus->adapter;
}
+ drm_connector_init_with_ddc(dev, &radeon_connector->base,
+ &radeon_vga_connector_funcs,
+ connector_type,
+ ddc);
+ drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
radeon_connector->dac_load_detect = true;
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
@@ -2098,13 +2108,18 @@ radeon_add_atom_connector(struct drm_device *dev,
goto failed;
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
- drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
- drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ else
+ ddc = &radeon_connector->ddc_bus->adapter;
}
+ drm_connector_init_with_ddc(dev, &radeon_connector->base,
+ &radeon_dvi_connector_funcs,
+ connector_type,
+ ddc);
+ drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
subpixel_order = SubPixelHorizontalRGB;
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.coherent_mode_property,
@@ -2155,13 +2170,18 @@ radeon_add_atom_connector(struct drm_device *dev,
goto failed;
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
- drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
- drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
DRM_ERROR("HDMI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ else
+ ddc = &radeon_connector->ddc_bus->adapter;
}
+ drm_connector_init_with_ddc(dev, &radeon_connector->base,
+ &radeon_dvi_connector_funcs,
+ connector_type,
+ ddc);
+ drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.coherent_mode_property,
1);
@@ -2205,15 +2225,20 @@ radeon_add_atom_connector(struct drm_device *dev,
goto failed;
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
- drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
- drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
- if (radeon_connector->ddc_bus)
+ if (radeon_connector->ddc_bus) {
has_aux = true;
- else
+ ddc = &radeon_connector->ddc_bus->adapter;
+ } else {
DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ }
}
+ drm_connector_init_with_ddc(dev, &radeon_connector->base,
+ &radeon_dp_connector_funcs,
+ connector_type,
+ ddc);
+ drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
subpixel_order = SubPixelHorizontalRGB;
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.coherent_mode_property,
@@ -2255,15 +2280,20 @@ radeon_add_atom_connector(struct drm_device *dev,
goto failed;
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
- drm_connector_init(dev, &radeon_connector->base, &radeon_edp_connector_funcs, connector_type);
- drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
- if (radeon_connector->ddc_bus)
+ if (radeon_connector->ddc_bus) {
has_aux = true;
- else
+ ddc = &radeon_connector->ddc_bus->adapter;
+ } else {
DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ }
}
+ drm_connector_init_with_ddc(dev, &radeon_connector->base,
+ &radeon_edp_connector_funcs,
+ connector_type,
+ ddc);
+ drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
drm_object_attach_property(&radeon_connector->base.base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
@@ -2274,7 +2304,10 @@ radeon_add_atom_connector(struct drm_device *dev,
case DRM_MODE_CONNECTOR_SVIDEO:
case DRM_MODE_CONNECTOR_Composite:
case DRM_MODE_CONNECTOR_9PinDIN:
- drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
+ drm_connector_init_with_ddc(dev, &radeon_connector->base,
+ &radeon_tv_connector_funcs,
+ connector_type,
+ ddc);
drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
radeon_connector->dac_load_detect = true;
drm_object_attach_property(&radeon_connector->base.base,
@@ -2294,13 +2327,18 @@ radeon_add_atom_connector(struct drm_device *dev,
goto failed;
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
- drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
- drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ else
+ ddc = &radeon_connector->ddc_bus->adapter;
}
+ drm_connector_init_with_ddc(dev, &radeon_connector->base,
+ &radeon_lvds_connector_funcs,
+ connector_type,
+ ddc);
+ drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
drm_object_attach_property(&radeon_connector->base.base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
@@ -2344,6 +2382,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
struct radeon_device *rdev = dev->dev_private;
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
+ struct i2c_adapter *ddc = NULL;
uint32_t subpixel_order = SubPixelNone;
if (connector_type == DRM_MODE_CONNECTOR_Unknown)
@@ -2378,13 +2417,18 @@ radeon_add_legacy_connector(struct drm_device *dev,
switch (connector_type) {
case DRM_MODE_CONNECTOR_VGA:
- drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
- drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ else
+ ddc = &radeon_connector->ddc_bus->adapter;
}
+ drm_connector_init_with_ddc(dev, &radeon_connector->base,
+ &radeon_vga_connector_funcs,
+ connector_type,
+ ddc);
+ drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
radeon_connector->dac_load_detect = true;
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
@@ -2395,13 +2439,18 @@ radeon_add_legacy_connector(struct drm_device *dev,
connector->doublescan_allowed = true;
break;
case DRM_MODE_CONNECTOR_DVIA:
- drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
- drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ else
+ ddc = &radeon_connector->ddc_bus->adapter;
}
+ drm_connector_init_with_ddc(dev, &radeon_connector->base,
+ &radeon_vga_connector_funcs,
+ connector_type,
+ ddc);
+ drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
radeon_connector->dac_load_detect = true;
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
@@ -2413,13 +2462,18 @@ radeon_add_legacy_connector(struct drm_device *dev,
break;
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_DVID:
- drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
- drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ else
+ ddc = &radeon_connector->ddc_bus->adapter;
}
+ drm_connector_init_with_ddc(dev, &radeon_connector->base,
+ &radeon_dvi_connector_funcs,
+ connector_type,
+ ddc);
+ drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
if (connector_type == DRM_MODE_CONNECTOR_DVII) {
radeon_connector->dac_load_detect = true;
drm_object_attach_property(&radeon_connector->base.base,
@@ -2436,7 +2490,10 @@ radeon_add_legacy_connector(struct drm_device *dev,
case DRM_MODE_CONNECTOR_SVIDEO:
case DRM_MODE_CONNECTOR_Composite:
case DRM_MODE_CONNECTOR_9PinDIN:
- drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
+ drm_connector_init_with_ddc(dev, &radeon_connector->base,
+ &radeon_tv_connector_funcs,
+ connector_type,
+ ddc);
drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
radeon_connector->dac_load_detect = true;
/* RS400,RC410,RS480 chipset seems to report a lot
@@ -2458,13 +2515,18 @@ radeon_add_legacy_connector(struct drm_device *dev,
connector->doublescan_allowed = false;
break;
case DRM_MODE_CONNECTOR_LVDS:
- drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
- drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ else
+ ddc = &radeon_connector->ddc_bus->adapter;
}
+ drm_connector_init_with_ddc(dev, &radeon_connector->base,
+ &radeon_lvds_connector_funcs,
+ connector_type,
+ ddc);
+ drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
drm_object_attach_property(&radeon_connector->base.base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index cef0e697a2ea..0d0ab8e0ff3b 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -26,11 +26,11 @@
*/
#include <linux/list_sort.h>
+#include <linux/pci.h>
#include <linux/uaccess.h>
#include <drm/drm_device.h>
#include <drm/drm_file.h>
-#include <drm/drm_pci.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
@@ -255,9 +255,9 @@ static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
int r;
list_for_each_entry(reloc, &p->validated, tv.head) {
- struct reservation_object *resv;
+ struct dma_resv *resv;
- resv = reloc->robj->tbo.resv;
+ resv = reloc->robj->tbo.base.resv;
r = radeon_sync_resv(p->rdev, &p->ib.sync, resv,
reloc->tv.num_shared);
if (r)
@@ -443,7 +443,7 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo
if (bo == NULL)
continue;
- drm_gem_object_put_unlocked(&bo->gem_base);
+ drm_gem_object_put_unlocked(&bo->tbo.base);
}
}
kfree(parser->track);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index dceb554e5674..a522e092038b 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -28,6 +28,7 @@
#include <linux/console.h>
#include <linux/efi.h>
+#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/vga_switcheroo.h>
@@ -38,7 +39,6 @@
#include <drm/drm_debugfs.h>
#include <drm/drm_device.h>
#include <drm/drm_file.h>
-#include <drm/drm_pci.h>
#include <drm/drm_probe_helper.h>
#include <drm/radeon_drm.h>
@@ -1325,8 +1325,6 @@ int radeon_device_init(struct radeon_device *rdev,
init_rwsem(&rdev->pm.mclk_lock);
init_rwsem(&rdev->exclusive_lock);
init_waitqueue_head(&rdev->irq.vblank_queue);
- mutex_init(&rdev->mn_lock);
- hash_init(rdev->mn_hash);
r = radeon_gem_init(rdev);
if (r)
return r;
@@ -1365,34 +1363,27 @@ int radeon_device_init(struct radeon_device *rdev,
else
rdev->mc.mc_mask = 0xffffffffULL; /* 32 bit MC */
- /* set DMA mask + need_dma32 flags.
+ /* set DMA mask.
* PCIE - can handle 40-bits.
* IGP - can handle 40-bits
* AGP - generally dma32 is safest
* PCI - dma32 for legacy pci gart, 40 bits on newer asics
*/
- rdev->need_dma32 = false;
+ dma_bits = 40;
if (rdev->flags & RADEON_IS_AGP)
- rdev->need_dma32 = true;
+ dma_bits = 32;
if ((rdev->flags & RADEON_IS_PCI) &&
(rdev->family <= CHIP_RS740))
- rdev->need_dma32 = true;
+ dma_bits = 32;
#ifdef CONFIG_PPC64
if (rdev->family == CHIP_CEDAR)
- rdev->need_dma32 = true;
+ dma_bits = 32;
#endif
- dma_bits = rdev->need_dma32 ? 32 : 40;
- r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
+ r = dma_set_mask_and_coherent(&rdev->pdev->dev, DMA_BIT_MASK(dma_bits));
if (r) {
- rdev->need_dma32 = true;
- dma_bits = 32;
pr_warn("radeon: No suitable DMA available\n");
- }
- r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
- if (r) {
- pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
- pr_warn("radeon: No coherent DMA available\n");
+ return r;
}
rdev->need_swiotlb = drm_need_swiotlb(dma_bits);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index bd52f15e6330..d07c7db0c815 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -24,6 +24,7 @@
* Alex Deucher
*/
+#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <linux/gcd.h>
@@ -36,7 +37,6 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drm_pci.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -127,6 +127,8 @@ static void dce5_crtc_load_lut(struct drm_crtc *crtc)
DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id);
+ msleep(10);
+
WREG32(NI_INPUT_CSC_CONTROL + radeon_crtc->crtc_offset,
(NI_INPUT_CSC_GRPH_MODE(NI_INPUT_CSC_BYPASS) |
NI_INPUT_CSC_OVL_MODE(NI_INPUT_CSC_BYPASS)));
@@ -275,7 +277,7 @@ static void radeon_unpin_work_func(struct work_struct *__work)
} else
DRM_ERROR("failed to reserve buffer after flip\n");
- drm_gem_object_put_unlocked(&work->old_rbo->gem_base);
+ drm_gem_object_put_unlocked(&work->old_rbo->tbo.base);
kfree(work);
}
@@ -533,7 +535,7 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc,
DRM_ERROR("failed to pin new rbo buffer before flip\n");
goto cleanup;
}
- work->fence = dma_fence_get(reservation_object_get_excl(new_rbo->tbo.resv));
+ work->fence = dma_fence_get(dma_resv_get_excl(new_rbo->tbo.base.resv));
radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL);
radeon_bo_unreserve(new_rbo);
@@ -607,7 +609,7 @@ pflip_cleanup:
radeon_bo_unreserve(new_rbo);
cleanup:
- drm_gem_object_put_unlocked(&work->old_rbo->gem_base);
+ drm_gem_object_put_unlocked(&work->old_rbo->tbo.base);
dma_fence_put(work->fence);
kfree(work);
return r;
@@ -672,7 +674,6 @@ static void radeon_crtc_init(struct drm_device *dev, int index)
{
struct radeon_device *rdev = dev->dev_private;
struct radeon_crtc *radeon_crtc;
- int i;
radeon_crtc = kzalloc(sizeof(struct radeon_crtc) + (RADEONFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
if (radeon_crtc == NULL)
@@ -701,12 +702,6 @@ static void radeon_crtc_init(struct drm_device *dev, int index)
radeon_crtc->mode_set.num_connectors = 0;
#endif
- for (i = 0; i < 256; i++) {
- radeon_crtc->lut_r[i] = i << 2;
- radeon_crtc->lut_g[i] = i << 2;
- radeon_crtc->lut_b[i] = i << 2;
- }
-
if (rdev->is_atom_bios && (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom))
radeon_atombios_init_crtc(dev, radeon_crtc);
else
@@ -847,11 +842,11 @@ static bool radeon_setup_enc_conn(struct drm_device *dev)
if (rdev->bios) {
if (rdev->is_atom_bios) {
ret = radeon_get_atom_connector_info_from_supported_devices_table(dev);
- if (ret == false)
+ if (!ret)
ret = radeon_get_atom_connector_info_from_object_table(dev);
} else {
ret = radeon_get_legacy_connector_info_from_bios(dev);
- if (ret == false)
+ if (!ret)
ret = radeon_get_legacy_connector_info_from_table(dev);
}
} else {
@@ -1687,7 +1682,6 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct radeon_encoder *radeon_encoder;
struct drm_connector *connector;
- struct radeon_connector *radeon_connector;
bool first = true;
u32 src_v = 1, dst_v = 1;
u32 src_h = 1, dst_h = 1;
@@ -1700,7 +1694,6 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
continue;
radeon_encoder = to_radeon_encoder(encoder);
connector = radeon_get_connector_for_encoder(encoder);
- radeon_connector = to_radeon_connector(connector);
if (first) {
/* set scaling */
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index 2994f07fbad9..28eef9282874 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -233,21 +233,26 @@ drm_encoder *radeon_mst_best_encoder(struct drm_connector *connector)
return &radeon_connector->mst_encoder->base;
}
+static int
+radeon_dp_mst_detect(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx,
+ bool force)
+{
+ struct radeon_connector *radeon_connector =
+ to_radeon_connector(connector);
+ struct radeon_connector *master = radeon_connector->mst_port;
+
+ return drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr,
+ radeon_connector->port);
+}
+
static const struct drm_connector_helper_funcs radeon_dp_mst_connector_helper_funcs = {
.get_modes = radeon_dp_mst_get_modes,
.mode_valid = radeon_dp_mst_mode_valid,
.best_encoder = radeon_mst_best_encoder,
+ .detect_ctx = radeon_dp_mst_detect,
};
-static enum drm_connector_status
-radeon_dp_mst_detect(struct drm_connector *connector, bool force)
-{
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- struct radeon_connector *master = radeon_connector->mst_port;
-
- return drm_dp_mst_detect_port(connector, &master->mst_mgr, radeon_connector->port);
-}
-
static void
radeon_dp_mst_connector_destroy(struct drm_connector *connector)
{
@@ -262,7 +267,6 @@ radeon_dp_mst_connector_destroy(struct drm_connector *connector)
static const struct drm_connector_funcs radeon_dp_mst_connector_funcs = {
.dpms = drm_helper_connector_dpms,
- .detect = radeon_dp_mst_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = radeon_dp_mst_connector_destroy,
};
@@ -514,7 +518,7 @@ static bool radeon_mst_mode_fixup(struct drm_encoder *encoder,
mst_enc = radeon_encoder->enc_priv;
- mst_enc->pbn = drm_dp_calc_pbn_mode(adjusted_mode->clock, bpp);
+ mst_enc->pbn = drm_dp_calc_pbn_mode(adjusted_mode->clock, bpp, false);
mst_enc->primary->active_device = mst_enc->primary->devices & mst_enc->connector->devices;
DRM_DEBUG_KMS("setting active device to %08x from %08x %08x for encoder %d\n",
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index a6cbe11f79c6..fd74e2611185 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -35,6 +35,7 @@
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/vga_switcheroo.h>
+#include <linux/mmu_notifier.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
@@ -130,8 +131,7 @@ int radeon_gem_object_open(struct drm_gem_object *obj,
struct drm_file *file_priv);
void radeon_gem_object_close(struct drm_gem_object *obj,
struct drm_file *file_priv);
-struct dma_buf *radeon_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *gobj,
+struct dma_buf *radeon_gem_prime_export(struct drm_gem_object *gobj,
int flags);
extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int crtc,
unsigned int flags, int *vpos, int *hpos,
@@ -153,7 +153,6 @@ struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
struct sg_table *sg);
int radeon_gem_prime_pin(struct drm_gem_object *obj);
void radeon_gem_prime_unpin(struct drm_gem_object *obj);
-struct reservation_object *radeon_gem_prime_res_obj(struct drm_gem_object *);
void *radeon_gem_prime_vmap(struct drm_gem_object *obj);
void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
@@ -325,13 +324,44 @@ bool radeon_device_is_virtual(void);
static int radeon_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
+ unsigned long flags = 0;
int ret;
+ if (!ent)
+ return -ENODEV; /* Avoid NULL-ptr deref in drm_get_pci_dev */
+
+ flags = ent->driver_data;
+
+ if (!radeon_si_support) {
+ switch (flags & RADEON_FAMILY_MASK) {
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_VERDE:
+ case CHIP_OLAND:
+ case CHIP_HAINAN:
+ dev_info(&pdev->dev,
+ "SI support disabled by module param\n");
+ return -ENODEV;
+ }
+ }
+ if (!radeon_cik_support) {
+ switch (flags & RADEON_FAMILY_MASK) {
+ case CHIP_KAVERI:
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ dev_info(&pdev->dev,
+ "CIK support disabled by module param\n");
+ return -ENODEV;
+ }
+ }
+
if (vga_switcheroo_client_probe_defer(pdev))
return -EPROBE_DEFER;
/* Get rid of things like offb */
- ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "radeondrmfb");
+ ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "radeondrmfb");
if (ret)
return ret;
@@ -354,19 +384,28 @@ radeon_pci_shutdown(struct pci_dev *pdev)
*/
if (radeon_device_is_virtual())
radeon_pci_remove(pdev);
+
+#ifdef CONFIG_PPC64
+ /*
+ * Some adapters need to be suspended before a
+ * shutdown occurs in order to prevent an error
+ * during kexec.
+ * Make this power specific becauase it breaks
+ * some non-power boards.
+ */
+ radeon_suspend_kms(pci_get_drvdata(pdev), true, true, false);
+#endif
}
static int radeon_pmops_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
return radeon_suspend_kms(drm_dev, true, true, false);
}
static int radeon_pmops_resume(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
/* GPU comes up enabled by the bios on resume */
if (radeon_is_px(drm_dev)) {
@@ -380,15 +419,13 @@ static int radeon_pmops_resume(struct device *dev)
static int radeon_pmops_freeze(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
return radeon_suspend_kms(drm_dev, false, true, true);
}
static int radeon_pmops_thaw(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
return radeon_resume_kms(drm_dev, false, true);
}
@@ -447,8 +484,7 @@ static int radeon_pmops_runtime_resume(struct device *dev)
static int radeon_pmops_runtime_idle(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
struct drm_crtc *crtc;
if (!radeon_is_px(drm_dev)) {
@@ -539,7 +575,7 @@ radeon_get_crtc_scanout_position(struct drm_device *dev, unsigned int pipe,
static struct drm_driver kms_driver = {
.driver_features =
- DRIVER_USE_AGP | DRIVER_GEM | DRIVER_PRIME | DRIVER_RENDER,
+ DRIVER_USE_AGP | DRIVER_GEM | DRIVER_RENDER,
.load = radeon_driver_load_kms,
.open = radeon_driver_open_kms,
.postclose = radeon_driver_postclose_kms,
@@ -565,10 +601,8 @@ static struct drm_driver kms_driver = {
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = radeon_gem_prime_export,
- .gem_prime_import = drm_gem_prime_import,
.gem_prime_pin = radeon_gem_prime_pin,
.gem_prime_unpin = radeon_gem_prime_unpin,
- .gem_prime_res_obj = radeon_gem_prime_res_obj,
.gem_prime_get_sg_table = radeon_gem_prime_get_sg_table,
.gem_prime_import_sg_table = radeon_gem_prime_import_sg_table,
.gem_prime_vmap = radeon_gem_prime_vmap,
@@ -624,6 +658,7 @@ static void __exit radeon_exit(void)
{
pci_unregister_driver(pdriver);
radeon_unregister_atpx_handler();
+ mmu_notifier_synchronize();
}
module_init(radeon_init);
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index a0c99087034a..ced022fae19d 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -24,9 +24,10 @@
* Alex Deucher
*/
+#include <linux/pci.h>
+
#include <drm/drm_crtc_helper.h>
#include <drm/drm_device.h>
-#include <drm/drm_pci.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 2c564f4f3468..ec0b7d6c994d 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -25,6 +25,7 @@
*/
#include <linux/module.h>
+#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/vga_switcheroo.h>
@@ -33,7 +34,6 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_pci.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
@@ -73,7 +73,7 @@ radeonfb_release(struct fb_info *info, int user)
return 0;
}
-static struct fb_ops radeonfb_ops = {
+static const struct fb_ops radeonfb_ops = {
.owner = THIS_MODULE,
DRM_FB_HELPER_DEFAULT_OPS,
.fb_open = radeonfb_open,
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index d4d3778d0a98..f178ba321715 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -26,9 +26,9 @@
* Jerome Glisse
*/
+#include <linux/pci.h>
#include <linux/vmalloc.h>
-#include <drm/drm_pci.h>
#include <drm/radeon_drm.h>
#ifdef CONFIG_X86
#include <asm/set_memory.h>
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index d8bc5d2dfd61..068c3e5da173 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -26,10 +26,11 @@
* Jerome Glisse
*/
+#include <linux/pci.h>
+
#include <drm/drm_debugfs.h>
#include <drm/drm_device.h>
#include <drm/drm_file.h>
-#include <drm/drm_pci.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
@@ -83,7 +84,7 @@ retry:
}
return r;
}
- *obj = &robj->gem_base;
+ *obj = &robj->tbo.base;
robj->pid = task_pid_nr(current);
mutex_lock(&rdev->gem.mutex);
@@ -114,7 +115,7 @@ static int radeon_gem_set_domain(struct drm_gem_object *gobj,
}
if (domain == RADEON_GEM_DOMAIN_CPU) {
/* Asking for cpu access wait for object idle */
- r = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ);
+ r = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ);
if (!r)
r = -EBUSY;
@@ -296,6 +297,8 @@ int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
uint32_t handle;
int r;
+ args->addr = untagged_addr(args->addr);
+
if (offset_in_page(args->addr | args->size))
return -EINVAL;
@@ -449,7 +452,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
}
robj = gem_to_radeon_bo(gobj);
- r = reservation_object_test_signaled_rcu(robj->tbo.resv, true);
+ r = dma_resv_test_signaled_rcu(robj->tbo.base.resv, true);
if (r == 0)
r = -EBUSY;
else
@@ -478,7 +481,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
}
robj = gem_to_radeon_bo(gobj);
- ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ);
+ ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ);
if (ret == 0)
r = -EBUSY;
else if (ret < 0)
@@ -564,7 +567,7 @@ static void radeon_gem_va_update_vm(struct radeon_device *rdev,
if (!vm_bos)
return;
- r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL, true);
+ r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
if (r)
goto error_free;
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index d465a3de7732..545e31e6cc3a 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -25,10 +25,10 @@
*/
#include <linux/export.h>
+#include <linux/pci.h>
#include <drm/drm_device.h>
#include <drm/drm_edid.h>
-#include <drm/drm_pci.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index d9613638f9cc..b86bc88ad430 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -26,12 +26,12 @@
* Jerome Glisse
*/
+#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_irq.h>
-#include <drm/drm_pci.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include <drm/radeon_drm.h>
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 07f7ace42c4b..d24f23a81656 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -26,6 +26,7 @@
* Jerome Glisse
*/
+#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
@@ -34,7 +35,6 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_file.h>
#include <drm/drm_ioctl.h>
-#include <drm/drm_pci.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
@@ -100,31 +100,6 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
struct radeon_device *rdev;
int r, acpi_status;
- if (!radeon_si_support) {
- switch (flags & RADEON_FAMILY_MASK) {
- case CHIP_TAHITI:
- case CHIP_PITCAIRN:
- case CHIP_VERDE:
- case CHIP_OLAND:
- case CHIP_HAINAN:
- dev_info(dev->dev,
- "SI support disabled by module param\n");
- return -ENODEV;
- }
- }
- if (!radeon_cik_support) {
- switch (flags & RADEON_FAMILY_MASK) {
- case CHIP_KAVERI:
- case CHIP_BONAIRE:
- case CHIP_HAWAII:
- case CHIP_KABINI:
- case CHIP_MULLINS:
- dev_info(dev->dev,
- "CIK support disabled by module param\n");
- return -ENODEV;
- }
- }
-
rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL);
if (rdev == NULL) {
return -ENOMEM;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index ef100b790463..44d060f75318 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -25,11 +25,11 @@
*/
#include <linux/backlight.h>
+#include <linux/pci.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_file.h>
-#include <drm/drm_pci.h>
#include <drm/drm_util.h>
#include <drm/radeon_drm.h>
@@ -1712,7 +1712,7 @@ static struct radeon_encoder_int_tmds *radeon_legacy_get_tmds_info(struct radeon
else
ret = radeon_legacy_get_tmds_info_from_combios(encoder, tmds);
- if (ret == false)
+ if (!ret)
radeon_legacy_get_tmds_info_from_table(encoder, tmds);
return tmds;
@@ -1735,7 +1735,7 @@ static struct radeon_encoder_ext_tmds *radeon_legacy_get_ext_tmds_info(struct ra
ret = radeon_legacy_get_ext_tmds_info_from_combios(encoder, tmds);
- if (ret == false)
+ if (!ret)
radeon_legacy_get_ext_tmds_info_from_table(encoder, tmds);
return tmds;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_tv.c b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
index f132eec737ad..d9df7f311e76 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_tv.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
@@ -537,7 +537,7 @@ void radeon_legacy_tv_mode_set(struct drm_encoder *encoder,
uint32_t tv_master_cntl, tv_rgb_cntl, tv_dac_cntl;
uint32_t tv_modulator_cntl1, tv_modulator_cntl2;
uint32_t tv_vscaler_cntl1, tv_vscaler_cntl2;
- uint32_t tv_pll_cntl, tv_pll_cntl1, tv_ftotal;
+ uint32_t tv_pll_cntl, tv_ftotal;
uint32_t tv_y_fall_cntl, tv_y_rise_cntl, tv_y_saw_tooth_cntl;
uint32_t m, n, p;
const uint16_t *hor_timing;
@@ -709,12 +709,6 @@ void radeon_legacy_tv_mode_set(struct drm_encoder *encoder,
(((n >> 9) & RADEON_TV_N0HI_MASK) << RADEON_TV_N0HI_SHIFT) |
((p & RADEON_TV_P_MASK) << RADEON_TV_P_SHIFT);
- tv_pll_cntl1 = (((4 & RADEON_TVPCP_MASK) << RADEON_TVPCP_SHIFT) |
- ((4 & RADEON_TVPVG_MASK) << RADEON_TVPVG_SHIFT) |
- ((1 & RADEON_TVPDC_MASK) << RADEON_TVPDC_SHIFT) |
- RADEON_TVCLK_SRC_SEL_TVPLL |
- RADEON_TVPLL_TEST_DIS);
-
tv_dac->tv.tv_uv_adr = 0xc8;
if (tv_dac->tv_std == TV_STD_NTSC ||
diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c
index 8c3871ed23a9..f93829f08a4d 100644
--- a/drivers/gpu/drm/radeon/radeon_mn.c
+++ b/drivers/gpu/drm/radeon/radeon_mn.c
@@ -36,212 +36,53 @@
#include "radeon.h"
-struct radeon_mn {
- /* constant after initialisation */
- struct radeon_device *rdev;
- struct mm_struct *mm;
- struct mmu_notifier mn;
-
- /* only used on destruction */
- struct work_struct work;
-
- /* protected by rdev->mn_lock */
- struct hlist_node node;
-
- /* objects protected by lock */
- struct mutex lock;
- struct rb_root_cached objects;
-};
-
-struct radeon_mn_node {
- struct interval_tree_node it;
- struct list_head bos;
-};
-
/**
- * radeon_mn_destroy - destroy the rmn
- *
- * @work: previously sheduled work item
- *
- * Lazy destroys the notifier from a work item
- */
-static void radeon_mn_destroy(struct work_struct *work)
-{
- struct radeon_mn *rmn = container_of(work, struct radeon_mn, work);
- struct radeon_device *rdev = rmn->rdev;
- struct radeon_mn_node *node, *next_node;
- struct radeon_bo *bo, *next_bo;
-
- mutex_lock(&rdev->mn_lock);
- mutex_lock(&rmn->lock);
- hash_del(&rmn->node);
- rbtree_postorder_for_each_entry_safe(node, next_node,
- &rmn->objects.rb_root, it.rb) {
-
- interval_tree_remove(&node->it, &rmn->objects);
- list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) {
- bo->mn = NULL;
- list_del_init(&bo->mn_list);
- }
- kfree(node);
- }
- mutex_unlock(&rmn->lock);
- mutex_unlock(&rdev->mn_lock);
- mmu_notifier_unregister(&rmn->mn, rmn->mm);
- kfree(rmn);
-}
-
-/**
- * radeon_mn_release - callback to notify about mm destruction
+ * radeon_mn_invalidate - callback to notify about mm change
*
* @mn: our notifier
- * @mn: the mm this callback is about
- *
- * Shedule a work item to lazy destroy our notifier.
- */
-static void radeon_mn_release(struct mmu_notifier *mn,
- struct mm_struct *mm)
-{
- struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn);
- INIT_WORK(&rmn->work, radeon_mn_destroy);
- schedule_work(&rmn->work);
-}
-
-/**
- * radeon_mn_invalidate_range_start - callback to notify about mm change
- *
- * @mn: our notifier
- * @mn: the mm this callback is about
- * @start: start of updated range
- * @end: end of updated range
+ * @range: the VMA under invalidation
*
* We block for all BOs between start and end to be idle and
* unmap them by move them into system domain again.
*/
-static int radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
- const struct mmu_notifier_range *range)
+static bool radeon_mn_invalidate(struct mmu_interval_notifier *mn,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq)
{
- struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn);
+ struct radeon_bo *bo = container_of(mn, struct radeon_bo, notifier);
struct ttm_operation_ctx ctx = { false, false };
- struct interval_tree_node *it;
- unsigned long end;
- int ret = 0;
-
- /* notification is exclusive, but interval is inclusive */
- end = range->end - 1;
-
- /* TODO we should be able to split locking for interval tree and
- * the tear down.
- */
- if (mmu_notifier_range_blockable(range))
- mutex_lock(&rmn->lock);
- else if (!mutex_trylock(&rmn->lock))
- return -EAGAIN;
+ long r;
- it = interval_tree_iter_first(&rmn->objects, range->start, end);
- while (it) {
- struct radeon_mn_node *node;
- struct radeon_bo *bo;
- long r;
+ if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound)
+ return true;
- if (!mmu_notifier_range_blockable(range)) {
- ret = -EAGAIN;
- goto out_unlock;
- }
+ if (!mmu_notifier_range_blockable(range))
+ return false;
- node = container_of(it, struct radeon_mn_node, it);
- it = interval_tree_iter_next(it, range->start, end);
-
- list_for_each_entry(bo, &node->bos, mn_list) {
-
- if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound)
- continue;
-
- r = radeon_bo_reserve(bo, true);
- if (r) {
- DRM_ERROR("(%ld) failed to reserve user bo\n", r);
- continue;
- }
-
- r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
- true, false, MAX_SCHEDULE_TIMEOUT);
- if (r <= 0)
- DRM_ERROR("(%ld) failed to wait for user bo\n", r);
-
- radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
- if (r)
- DRM_ERROR("(%ld) failed to validate user bo\n", r);
-
- radeon_bo_unreserve(bo);
- }
+ r = radeon_bo_reserve(bo, true);
+ if (r) {
+ DRM_ERROR("(%ld) failed to reserve user bo\n", r);
+ return true;
}
-
-out_unlock:
- mutex_unlock(&rmn->lock);
-
- return ret;
-}
-
-static const struct mmu_notifier_ops radeon_mn_ops = {
- .release = radeon_mn_release,
- .invalidate_range_start = radeon_mn_invalidate_range_start,
-};
-
-/**
- * radeon_mn_get - create notifier context
- *
- * @rdev: radeon device pointer
- *
- * Creates a notifier context for current->mm.
- */
-static struct radeon_mn *radeon_mn_get(struct radeon_device *rdev)
-{
- struct mm_struct *mm = current->mm;
- struct radeon_mn *rmn;
- int r;
-
- if (down_write_killable(&mm->mmap_sem))
- return ERR_PTR(-EINTR);
- mutex_lock(&rdev->mn_lock);
+ r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, true, false,
+ MAX_SCHEDULE_TIMEOUT);
+ if (r <= 0)
+ DRM_ERROR("(%ld) failed to wait for user bo\n", r);
- hash_for_each_possible(rdev->mn_hash, rmn, node, (unsigned long)mm)
- if (rmn->mm == mm)
- goto release_locks;
-
- rmn = kzalloc(sizeof(*rmn), GFP_KERNEL);
- if (!rmn) {
- rmn = ERR_PTR(-ENOMEM);
- goto release_locks;
- }
-
- rmn->rdev = rdev;
- rmn->mm = mm;
- rmn->mn.ops = &radeon_mn_ops;
- mutex_init(&rmn->lock);
- rmn->objects = RB_ROOT_CACHED;
-
- r = __mmu_notifier_register(&rmn->mn, mm);
+ radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (r)
- goto free_rmn;
-
- hash_add(rdev->mn_hash, &rmn->node, (unsigned long)mm);
-
-release_locks:
- mutex_unlock(&rdev->mn_lock);
- up_write(&mm->mmap_sem);
-
- return rmn;
-
-free_rmn:
- mutex_unlock(&rdev->mn_lock);
- up_write(&mm->mmap_sem);
- kfree(rmn);
+ DRM_ERROR("(%ld) failed to validate user bo\n", r);
- return ERR_PTR(r);
+ radeon_bo_unreserve(bo);
+ return true;
}
+static const struct mmu_interval_notifier_ops radeon_mn_ops = {
+ .invalidate = radeon_mn_invalidate,
+};
+
/**
* radeon_mn_register - register a BO for notifier updates
*
@@ -253,50 +94,20 @@ free_rmn:
*/
int radeon_mn_register(struct radeon_bo *bo, unsigned long addr)
{
- unsigned long end = addr + radeon_bo_size(bo) - 1;
- struct radeon_device *rdev = bo->rdev;
- struct radeon_mn *rmn;
- struct radeon_mn_node *node = NULL;
- struct list_head bos;
- struct interval_tree_node *it;
-
- rmn = radeon_mn_get(rdev);
- if (IS_ERR(rmn))
- return PTR_ERR(rmn);
-
- INIT_LIST_HEAD(&bos);
-
- mutex_lock(&rmn->lock);
-
- while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) {
- kfree(node);
- node = container_of(it, struct radeon_mn_node, it);
- interval_tree_remove(&node->it, &rmn->objects);
- addr = min(it->start, addr);
- end = max(it->last, end);
- list_splice(&node->bos, &bos);
- }
-
- if (!node) {
- node = kmalloc(sizeof(struct radeon_mn_node), GFP_KERNEL);
- if (!node) {
- mutex_unlock(&rmn->lock);
- return -ENOMEM;
- }
- }
-
- bo->mn = rmn;
-
- node->it.start = addr;
- node->it.last = end;
- INIT_LIST_HEAD(&node->bos);
- list_splice(&bos, &node->bos);
- list_add(&bo->mn_list, &node->bos);
-
- interval_tree_insert(&node->it, &rmn->objects);
-
- mutex_unlock(&rmn->lock);
-
+ int ret;
+
+ ret = mmu_interval_notifier_insert(&bo->notifier, current->mm, addr,
+ radeon_bo_size(bo), &radeon_mn_ops);
+ if (ret)
+ return ret;
+
+ /*
+ * FIXME: radeon appears to allow get_user_pages to run during
+ * invalidate_range_start/end, which is not a safe way to read the
+ * PTEs. It should use the mmu_interval_read_begin() scheme around the
+ * get_user_pages to ensure that the PTEs are read properly
+ */
+ mmu_interval_read_begin(&bo->notifier);
return 0;
}
@@ -309,31 +120,8 @@ int radeon_mn_register(struct radeon_bo *bo, unsigned long addr)
*/
void radeon_mn_unregister(struct radeon_bo *bo)
{
- struct radeon_device *rdev = bo->rdev;
- struct radeon_mn *rmn;
- struct list_head *head;
-
- mutex_lock(&rdev->mn_lock);
- rmn = bo->mn;
- if (rmn == NULL) {
- mutex_unlock(&rdev->mn_lock);
+ if (!bo->notifier.mm)
return;
- }
-
- mutex_lock(&rmn->lock);
- /* save the next list entry for later */
- head = bo->mn_list.next;
-
- bo->mn = NULL;
- list_del(&bo->mn_list);
-
- if (list_empty(head)) {
- struct radeon_mn_node *node;
- node = container_of(head, struct radeon_mn_node, bos);
- interval_tree_remove(&node->it, &rmn->objects);
- kfree(node);
- }
-
- mutex_unlock(&rmn->lock);
- mutex_unlock(&rdev->mn_lock);
+ mmu_interval_notifier_remove(&bo->notifier);
+ bo->notifier.mm = NULL;
}
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index fd470d6bf3f4..96565171d13e 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -327,7 +327,6 @@ enum radeon_flip_status {
struct radeon_crtc {
struct drm_crtc base;
int crtc_id;
- u16 lut_r[256], lut_g[256], lut_b[256];
bool enabled;
bool can_tile;
bool cursor_out_of_bounds;
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 21f73fc86f38..140d94cc080d 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -85,9 +85,9 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
mutex_unlock(&bo->rdev->gem.mutex);
radeon_bo_clear_surface_reg(bo);
WARN_ON_ONCE(!list_empty(&bo->va));
- if (bo->gem_base.import_attach)
- drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg);
- drm_gem_object_release(&bo->gem_base);
+ if (bo->tbo.base.import_attach)
+ drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg);
+ drm_gem_object_release(&bo->tbo.base);
kfree(bo);
}
@@ -183,7 +183,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
int radeon_bo_create(struct radeon_device *rdev,
unsigned long size, int byte_align, bool kernel,
u32 domain, u32 flags, struct sg_table *sg,
- struct reservation_object *resv,
+ struct dma_resv *resv,
struct radeon_bo **bo_ptr)
{
struct radeon_bo *bo;
@@ -209,7 +209,7 @@ int radeon_bo_create(struct radeon_device *rdev,
bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
if (bo == NULL)
return -ENOMEM;
- drm_gem_private_object_init(rdev->ddev, &bo->gem_base, size);
+ drm_gem_private_object_init(rdev->ddev, &bo->tbo.base, size);
bo->rdev = rdev;
bo->surface_reg = -1;
INIT_LIST_HEAD(&bo->list);
@@ -442,13 +442,13 @@ void radeon_bo_force_delete(struct radeon_device *rdev)
dev_err(rdev->dev, "Userspace still has active objects !\n");
list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
dev_err(rdev->dev, "%p %p %lu %lu force free\n",
- &bo->gem_base, bo, (unsigned long)bo->gem_base.size,
- *((unsigned long *)&bo->gem_base.refcount));
+ &bo->tbo.base, bo, (unsigned long)bo->tbo.base.size,
+ *((unsigned long *)&bo->tbo.base.refcount));
mutex_lock(&bo->rdev->gem.mutex);
list_del_init(&bo->list);
mutex_unlock(&bo->rdev->gem.mutex);
/* this should unref the ttm bo */
- drm_gem_object_put_unlocked(&bo->gem_base);
+ drm_gem_object_put_unlocked(&bo->tbo.base);
}
}
@@ -542,7 +542,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev);
INIT_LIST_HEAD(&duplicates);
- r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates, true);
+ r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates);
if (unlikely(r != 0)) {
return r;
}
@@ -610,7 +610,7 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo)
int steal;
int i;
- lockdep_assert_held(&bo->tbo.resv->lock.base);
+ dma_resv_assert_held(bo->tbo.base.resv);
if (!bo->tiling_flags)
return 0;
@@ -736,7 +736,7 @@ void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
uint32_t *tiling_flags,
uint32_t *pitch)
{
- lockdep_assert_held(&bo->tbo.resv->lock.base);
+ dma_resv_assert_held(bo->tbo.base.resv);
if (tiling_flags)
*tiling_flags = bo->tiling_flags;
@@ -748,7 +748,7 @@ int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
bool force_drop)
{
if (!force_drop)
- lockdep_assert_held(&bo->tbo.resv->lock.base);
+ dma_resv_assert_held(bo->tbo.base.resv);
if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
return 0;
@@ -870,10 +870,10 @@ int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence,
bool shared)
{
- struct reservation_object *resv = bo->tbo.resv;
+ struct dma_resv *resv = bo->tbo.base.resv;
if (shared)
- reservation_object_add_shared_fence(resv, &fence->base);
+ dma_resv_add_shared_fence(resv, &fence->base);
else
- reservation_object_add_excl_fence(resv, &fence->base);
+ dma_resv_add_excl_fence(resv, &fence->base);
}
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 9ffd8215d38a..d23f2ed4126e 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -116,7 +116,7 @@ static inline unsigned radeon_bo_gpu_page_alignment(struct radeon_bo *bo)
*/
static inline u64 radeon_bo_mmap_offset(struct radeon_bo *bo)
{
- return drm_vma_node_offset_addr(&bo->tbo.vma_node);
+ return drm_vma_node_offset_addr(&bo->tbo.base.vma_node);
}
extern int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
@@ -126,7 +126,7 @@ extern int radeon_bo_create(struct radeon_device *rdev,
unsigned long size, int byte_align,
bool kernel, u32 domain, u32 flags,
struct sg_table *sg,
- struct reservation_object *resv,
+ struct dma_resv *resv,
struct radeon_bo **bo_ptr);
extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
extern void radeon_bo_kunmap(struct radeon_bo *bo);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 5d10e11a9225..8c5d6fda0d75 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -23,10 +23,10 @@
#include <linux/hwmon-sysfs.h>
#include <linux/hwmon.h>
+#include <linux/pci.h>
#include <linux/power_supply.h>
#include <drm/drm_debugfs.h>
-#include <drm/drm_pci.h>
#include <drm/drm_vblank.h>
#include "atom.h"
@@ -1789,7 +1789,7 @@ static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish
u32 stat_crtc = 0;
bool in_vbl = radeon_pm_in_vbl(rdev);
- if (in_vbl == false)
+ if (!in_vbl)
DRM_DEBUG_DRIVER("not in vbl for pm change %08x at %s\n", stat_crtc,
finish ? "exit" : "entry");
return in_vbl;
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
index d3a5bea9a2c5..b906e8fbd5f3 100644
--- a/drivers/gpu/drm/radeon/radeon_prime.c
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -63,15 +63,15 @@ struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sg)
{
- struct reservation_object *resv = attach->dmabuf->resv;
+ struct dma_resv *resv = attach->dmabuf->resv;
struct radeon_device *rdev = dev->dev_private;
struct radeon_bo *bo;
int ret;
- ww_mutex_lock(&resv->lock, NULL);
+ dma_resv_lock(resv, NULL);
ret = radeon_bo_create(rdev, attach->dmabuf->size, PAGE_SIZE, false,
RADEON_GEM_DOMAIN_GTT, 0, sg, resv, &bo);
- ww_mutex_unlock(&resv->lock);
+ dma_resv_unlock(resv);
if (ret)
return ERR_PTR(ret);
@@ -80,7 +80,7 @@ struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
mutex_unlock(&rdev->gem.mutex);
bo->prime_shared_count = 1;
- return &bo->gem_base;
+ return &bo->tbo.base;
}
int radeon_gem_prime_pin(struct drm_gem_object *obj)
@@ -117,19 +117,11 @@ void radeon_gem_prime_unpin(struct drm_gem_object *obj)
}
-struct reservation_object *radeon_gem_prime_res_obj(struct drm_gem_object *obj)
-{
- struct radeon_bo *bo = gem_to_radeon_bo(obj);
-
- return bo->tbo.resv;
-}
-
-struct dma_buf *radeon_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *gobj,
+struct dma_buf *radeon_gem_prime_export(struct drm_gem_object *gobj,
int flags)
{
struct radeon_bo *bo = gem_to_radeon_bo(gobj);
if (radeon_ttm_tt_has_userptr(bo->tbo.ttm))
return ERR_PTR(-EPERM);
- return drm_gem_prime_export(dev, gobj, flags);
+ return drm_gem_prime_export(gobj, flags);
}
diff --git a/drivers/gpu/drm/radeon/radeon_sync.c b/drivers/gpu/drm/radeon/radeon_sync.c
index 8c9780b5a884..55cc77a73c7b 100644
--- a/drivers/gpu/drm/radeon/radeon_sync.c
+++ b/drivers/gpu/drm/radeon/radeon_sync.c
@@ -87,30 +87,30 @@ void radeon_sync_fence(struct radeon_sync *sync,
*/
int radeon_sync_resv(struct radeon_device *rdev,
struct radeon_sync *sync,
- struct reservation_object *resv,
+ struct dma_resv *resv,
bool shared)
{
- struct reservation_object_list *flist;
+ struct dma_resv_list *flist;
struct dma_fence *f;
struct radeon_fence *fence;
unsigned i;
int r = 0;
/* always sync to the exclusive fence */
- f = reservation_object_get_excl(resv);
+ f = dma_resv_get_excl(resv);
fence = f ? to_radeon_fence(f) : NULL;
if (fence && fence->rdev == rdev)
radeon_sync_fence(sync, fence);
else if (f)
r = dma_fence_wait(f, true);
- flist = reservation_object_get_list(resv);
+ flist = dma_resv_get_list(resv);
if (shared || !flist || r)
return r;
for (i = 0; i < flist->shared_count; ++i) {
f = rcu_dereference_protected(flist->shared[i],
- reservation_object_held(resv));
+ dma_resv_held(resv));
fence = to_radeon_fence(f);
if (fence && fence->rdev == rdev)
radeon_sync_fence(sync, fence);
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index 0f6ba81a1669..a5e1d2139e80 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -120,11 +120,11 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
if (ring == R600_RING_TYPE_DMA_INDEX)
fence = radeon_copy_dma(rdev, gtt_addr, vram_addr,
size / RADEON_GPU_PAGE_SIZE,
- vram_obj->tbo.resv);
+ vram_obj->tbo.base.resv);
else
fence = radeon_copy_blit(rdev, gtt_addr, vram_addr,
size / RADEON_GPU_PAGE_SIZE,
- vram_obj->tbo.resv);
+ vram_obj->tbo.base.resv);
if (IS_ERR(fence)) {
DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
r = PTR_ERR(fence);
@@ -171,11 +171,11 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
if (ring == R600_RING_TYPE_DMA_INDEX)
fence = radeon_copy_dma(rdev, vram_addr, gtt_addr,
size / RADEON_GPU_PAGE_SIZE,
- vram_obj->tbo.resv);
+ vram_obj->tbo.base.resv);
else
fence = radeon_copy_blit(rdev, vram_addr, gtt_addr,
size / RADEON_GPU_PAGE_SIZE,
- vram_obj->tbo.resv);
+ vram_obj->tbo.base.resv);
if (IS_ERR(fence)) {
DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
r = PTR_ERR(fence);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index fb3696bc616d..3b92311d30b9 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -32,6 +32,7 @@
#include <linux/dma-mapping.h>
#include <linux/pagemap.h>
+#include <linux/pci.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/swap.h>
@@ -41,7 +42,6 @@
#include <drm/drm_debugfs.h>
#include <drm/drm_device.h>
#include <drm/drm_file.h>
-#include <drm/drm_pci.h>
#include <drm/drm_prime.h>
#include <drm/radeon_drm.h>
#include <drm/ttm/ttm_bo_api.h>
@@ -184,7 +184,7 @@ static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
if (radeon_ttm_tt_has_userptr(bo->ttm))
return -EPERM;
- return drm_vma_node_verify_access(&rbo->gem_base.vma_node,
+ return drm_vma_node_verify_access(&rbo->tbo.base.vma_node,
filp->private_data);
}
@@ -244,7 +244,7 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
num_pages = new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
- fence = radeon_copy(rdev, old_start, new_start, num_pages, bo->resv);
+ fence = radeon_copy(rdev, old_start, new_start, num_pages, bo->base.resv);
if (IS_ERR(fence))
return PTR_ERR(fence);
@@ -443,7 +443,7 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_
mem->bus.size);
else
mem->bus.addr =
- ioremap_nocache(mem->bus.base + mem->bus.offset,
+ ioremap(mem->bus.base + mem->bus.offset,
mem->bus.size);
if (!mem->bus.addr)
return -ENOMEM;
@@ -794,7 +794,8 @@ int radeon_ttm_init(struct radeon_device *rdev)
r = ttm_bo_device_init(&rdev->mman.bdev,
&radeon_bo_driver,
rdev->ddev->anon_inode->i_mapping,
- rdev->need_dma32);
+ rdev->ddev->vma_offset_manager,
+ dma_addressing_limited(&rdev->pdev->dev));
if (r) {
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
return r;
@@ -880,9 +881,6 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
man->size = size >> PAGE_SHIFT;
}
-static struct vm_operations_struct radeon_ttm_vm_ops;
-static const struct vm_operations_struct *ttm_vm_ops = NULL;
-
static vm_fault_t radeon_ttm_fault(struct vm_fault *vmf)
{
struct ttm_buffer_object *bo;
@@ -890,34 +888,36 @@ static vm_fault_t radeon_ttm_fault(struct vm_fault *vmf)
vm_fault_t ret;
bo = (struct ttm_buffer_object *)vmf->vma->vm_private_data;
- if (bo == NULL) {
+ if (bo == NULL)
return VM_FAULT_NOPAGE;
- }
+
rdev = radeon_get_rdev(bo->bdev);
down_read(&rdev->pm.mclk_lock);
- ret = ttm_vm_ops->fault(vmf);
+ ret = ttm_bo_vm_fault(vmf);
up_read(&rdev->pm.mclk_lock);
return ret;
}
+static struct vm_operations_struct radeon_ttm_vm_ops = {
+ .fault = radeon_ttm_fault,
+ .open = ttm_bo_vm_open,
+ .close = ttm_bo_vm_close,
+ .access = ttm_bo_vm_access
+};
+
int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
{
int r;
struct drm_file *file_priv = filp->private_data;
struct radeon_device *rdev = file_priv->minor->dev->dev_private;
- if (rdev == NULL) {
+ if (rdev == NULL)
return -EINVAL;
- }
+
r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
- if (unlikely(r != 0)) {
+ if (unlikely(r != 0))
return r;
- }
- if (unlikely(ttm_vm_ops == NULL)) {
- ttm_vm_ops = vma->vm_ops;
- radeon_ttm_vm_ops = *ttm_vm_ops;
- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
- }
+
vma->vm_ops = &radeon_ttm_vm_ops;
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index ff4f794d1c86..1ad5c3b86b64 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -477,7 +477,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
return -EINVAL;
}
- f = reservation_object_get_excl(bo->tbo.resv);
+ f = dma_resv_get_excl(bo->tbo.base.resv);
if (f) {
r = radeon_fence_wait((struct radeon_fence *)f, false);
if (r) {
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
index 59db54ace428..5e8006444704 100644
--- a/drivers/gpu/drm/radeon/radeon_vce.c
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -388,9 +388,9 @@ int radeon_vce_get_create_msg(struct radeon_device *rdev, int ring,
ib.ptr[i] = cpu_to_le32(0x0);
r = radeon_ib_schedule(rdev, &ib, NULL, false);
- if (r) {
+ if (r)
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
- }
+
if (fence)
*fence = radeon_fence_ref(ib.fence);
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index 8512b02e9583..f60fae0aed11 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -296,9 +296,9 @@ struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
struct radeon_bo_va *bo_va;
list_for_each_entry(bo_va, &bo->va, bo_list) {
- if (bo_va->vm == vm) {
+ if (bo_va->vm == vm)
return bo_va;
- }
+
}
return NULL;
}
@@ -323,9 +323,9 @@ struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
struct radeon_bo_va *bo_va;
bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
- if (bo_va == NULL) {
+ if (bo_va == NULL)
return NULL;
- }
+
bo_va->vm = vm;
bo_va->bo = bo;
bo_va->it.start = 0;
@@ -702,7 +702,7 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
if (ib.length_dw != 0) {
radeon_asic_vm_pad_ib(rdev, &ib);
- radeon_sync_resv(rdev, &ib.sync, pd->tbo.resv, true);
+ radeon_sync_resv(rdev, &ib.sync, pd->tbo.base.resv, true);
WARN_ON(ib.length_dw > ndw);
r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r) {
@@ -830,8 +830,8 @@ static int radeon_vm_update_ptes(struct radeon_device *rdev,
uint64_t pte;
int r;
- radeon_sync_resv(rdev, &ib->sync, pt->tbo.resv, true);
- r = reservation_object_reserve_shared(pt->tbo.resv, 1);
+ radeon_sync_resv(rdev, &ib->sync, pt->tbo.base.resv, true);
+ r = dma_resv_reserve_shared(pt->tbo.base.resv, 1);
if (r)
return r;
@@ -947,9 +947,9 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
if (mem) {
addr = (u64)mem->start << PAGE_SHIFT;
- if (mem->mem_type != TTM_PL_SYSTEM) {
+ if (mem->mem_type != TTM_PL_SYSTEM)
bo_va->flags |= RADEON_VM_PAGE_VALID;
- }
+
if (mem->mem_type == TTM_PL_TT) {
bo_va->flags |= RADEON_VM_PAGE_SYSTEM;
if (!(bo_va->bo->flags & (RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC)))
@@ -1233,9 +1233,9 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
struct radeon_bo_va *bo_va, *tmp;
int i, r;
- if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
+ if (!RB_EMPTY_ROOT(&vm->va.rb_root))
dev_err(rdev->dev, "still active bo inside vm\n");
- }
+
rbtree_postorder_for_each_entry_safe(bo_va, tmp,
&vm->va.rb_root, it.rb) {
interval_tree_remove(&bo_va->it, &vm->va);
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 2f8ff089f7b1..c88b4906f7bc 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -37,9 +37,9 @@
*/
#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/pci.h>
#include <drm/drm_device.h>
-#include <drm/drm_pci.h>
#include <drm/drm_vblank.h>
#include "atom.h"
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 267d8a9134c8..c296f94f9700 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -26,7 +26,7 @@
* Jerome Glisse
*/
-#include <drm/drm_pci.h>
+#include <linux/pci.h>
#include "atom.h"
#include "radeon.h"
diff --git a/drivers/gpu/drm/radeon/rs780_dpm.c b/drivers/gpu/drm/radeon/rs780_dpm.c
index 72dbf3251c53..17390074277a 100644
--- a/drivers/gpu/drm/radeon/rs780_dpm.c
+++ b/drivers/gpu/drm/radeon/rs780_dpm.c
@@ -22,10 +22,9 @@
* Authors: Alex Deucher
*/
+#include <linux/pci.h>
#include <linux/seq_file.h>
-#include <drm/drm_pci.h>
-
#include "atom.h"
#include "r600_dpm.h"
#include "radeon.h"
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 7a6fc66d6a40..21f653ae1e1b 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -27,10 +27,10 @@
*/
#include <linux/firmware.h>
+#include <linux/pci.h>
#include <linux/slab.h>
#include <drm/drm_device.h>
-#include <drm/drm_pci.h>
#include <drm/radeon_drm.h>
#include "atom.h"
@@ -1703,7 +1703,7 @@ static void rv770_uvd_init(struct radeon_device *rdev)
* there. So it is pointless to try to go through that code
* hence why we disable uvd here.
*/
- rdev->has_uvd = 0;
+ rdev->has_uvd = false;
return;
}
rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
diff --git a/drivers/gpu/drm/radeon/rv770_dma.c b/drivers/gpu/drm/radeon/rv770_dma.c
index 0866b38ef264..4c91614b5e70 100644
--- a/drivers/gpu/drm/radeon/rv770_dma.c
+++ b/drivers/gpu/drm/radeon/rv770_dma.c
@@ -42,7 +42,7 @@
struct radeon_fence *rv770_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
- struct reservation_object *resv)
+ struct dma_resv *resv)
{
struct radeon_fence *fence;
struct radeon_sync sync;
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 05894d198a79..93dcab548a83 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -23,10 +23,10 @@
*/
#include <linux/firmware.h>
-#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
-#include <drm/drm_pci.h>
#include <drm/drm_vblank.h>
#include <drm/radeon_drm.h>
@@ -3257,7 +3257,7 @@ static void si_gpu_init(struct radeon_device *rdev)
/* XXX what about 12? */
rdev->config.si.tile_config |= (3 << 0);
break;
- }
+ }
switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
case 0: /* four banks */
rdev->config.si.tile_config |= 0 << 4;
@@ -5997,8 +5997,8 @@ static int si_irq_init(struct radeon_device *rdev)
}
/* setup interrupt control */
- /* set dummy read address to ring address */
- WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
+ /* set dummy read address to dummy page address */
+ WREG32(INTERRUPT_CNTL2, rdev->dummy_page.addr >> 8);
interrupt_cntl = RREG32(INTERRUPT_CNTL);
/* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
* IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
@@ -6472,7 +6472,7 @@ static void si_uvd_init(struct radeon_device *rdev)
* there. So it is pointless to try to go through that code
* hence why we disable uvd here.
*/
- rdev->has_uvd = 0;
+ rdev->has_uvd = false;
return;
}
rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
@@ -6539,7 +6539,7 @@ static void si_vce_init(struct radeon_device *rdev)
* there. So it is pointless to try to go through that code
* hence why we disable vce here.
*/
- rdev->has_vce = 0;
+ rdev->has_vce = false;
return;
}
rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_obj = NULL;
@@ -7087,7 +7087,6 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
{
struct pci_dev *root = rdev->pdev->bus->self;
enum pci_bus_speed speed_cap;
- int bridge_pos, gpu_pos;
u32 speed_cntl, current_data_rate;
int i;
u16 tmp16;
@@ -7129,12 +7128,7 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
}
- bridge_pos = pci_pcie_cap(root);
- if (!bridge_pos)
- return;
-
- gpu_pos = pci_pcie_cap(rdev->pdev);
- if (!gpu_pos)
+ if (!pci_is_pcie(root) || !pci_is_pcie(rdev->pdev))
return;
if (speed_cap == PCIE_SPEED_8_0GT) {
@@ -7144,14 +7138,17 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
u16 bridge_cfg2, gpu_cfg2;
u32 max_lw, current_lw, tmp;
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+ &bridge_cfg);
+ pcie_capability_read_word(rdev->pdev, PCI_EXP_LNKCTL,
+ &gpu_cfg);
tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_write_word(root, PCI_EXP_LNKCTL, tmp16);
tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
- pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_write_word(rdev->pdev, PCI_EXP_LNKCTL,
+ tmp16);
tmp = RREG32_PCIE(PCIE_LC_STATUS1);
max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
@@ -7169,15 +7166,23 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
for (i = 0; i < 10; i++) {
/* check status */
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16);
+ pcie_capability_read_word(rdev->pdev,
+ PCI_EXP_DEVSTA,
+ &tmp16);
if (tmp16 & PCI_EXP_DEVSTA_TRPND)
break;
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+ &bridge_cfg);
+ pcie_capability_read_word(rdev->pdev,
+ PCI_EXP_LNKCTL,
+ &gpu_cfg);
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2);
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
+ &bridge_cfg2);
+ pcie_capability_read_word(rdev->pdev,
+ PCI_EXP_LNKCTL2,
+ &gpu_cfg2);
tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
tmp |= LC_SET_QUIESCE;
@@ -7190,26 +7195,46 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
msleep(100);
/* linkctl */
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+ &tmp16);
tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_write_word(root,
+ PCI_EXP_LNKCTL,
+ tmp16);
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16);
+ pcie_capability_read_word(rdev->pdev,
+ PCI_EXP_LNKCTL,
+ &tmp16);
tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
- pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_write_word(rdev->pdev,
+ PCI_EXP_LNKCTL,
+ tmp16);
/* linkctl2 */
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~((1 << 4) | (7 << 9));
- tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16);
-
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~((1 << 4) | (7 << 9));
- tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
- pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
+ &tmp16);
+ tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN);
+ tmp16 |= (bridge_cfg2 &
+ (PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN));
+ pcie_capability_write_word(root,
+ PCI_EXP_LNKCTL2,
+ tmp16);
+
+ pcie_capability_read_word(rdev->pdev,
+ PCI_EXP_LNKCTL2,
+ &tmp16);
+ tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN);
+ tmp16 |= (gpu_cfg2 &
+ (PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN));
+ pcie_capability_write_word(rdev->pdev,
+ PCI_EXP_LNKCTL2,
+ tmp16);
tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
tmp &= ~LC_SET_QUIESCE;
@@ -7223,15 +7248,15 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE;
WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~0xf;
+ pcie_capability_read_word(rdev->pdev, PCI_EXP_LNKCTL2, &tmp16);
+ tmp16 &= ~PCI_EXP_LNKCTL2_TLS;
if (speed_cap == PCIE_SPEED_8_0GT)
- tmp16 |= 3; /* gen3 */
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_8_0GT; /* gen3 */
else if (speed_cap == PCIE_SPEED_5_0GT)
- tmp16 |= 2; /* gen2 */
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_5_0GT; /* gen2 */
else
- tmp16 |= 1; /* gen1 */
- pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_2_5GT; /* gen1 */
+ pcie_capability_write_word(rdev->pdev, PCI_EXP_LNKCTL2, tmp16);
speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE;
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c
index 4773bb7d947e..d2fa302a5be9 100644
--- a/drivers/gpu/drm/radeon/si_dma.c
+++ b/drivers/gpu/drm/radeon/si_dma.c
@@ -231,7 +231,7 @@ void si_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
struct radeon_fence *si_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
- struct reservation_object *resv)
+ struct dma_resv *resv)
{
struct radeon_fence *fence;
struct radeon_sync sync;
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 460fd98e40a7..05e8b4d0af3f 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -22,10 +22,9 @@
*/
#include <linux/math64.h>
+#include <linux/pci.h>
#include <linux/seq_file.h>
-#include <drm/drm_pci.h>
-
#include "atom.h"
#include "r600_dpm.h"
#include "radeon.h"
@@ -1958,6 +1957,7 @@ static void si_initialize_powertune_defaults(struct radeon_device *rdev)
case 0x682C:
si_pi->cac_weights = cac_weights_cape_verde_pro;
si_pi->dte_data = dte_data_sun_xt;
+ update_dte_from_pl2 = true;
break;
case 0x6825:
case 0x6827:
@@ -3639,14 +3639,13 @@ static int si_notify_smc_display_change(struct radeon_device *rdev,
static void si_program_response_times(struct radeon_device *rdev)
{
- u32 voltage_response_time, backbias_response_time, acpi_delay_time, vbi_time_out;
+ u32 voltage_response_time, acpi_delay_time, vbi_time_out;
u32 vddc_dly, acpi_dly, vbi_dly;
u32 reference_clock;
si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_mvdd_chg_time, 1);
voltage_response_time = (u32)rdev->pm.dpm.voltage_response_time;
- backbias_response_time = (u32)rdev->pm.dpm.backbias_response_time;
if (voltage_response_time == 0)
voltage_response_time = 1000;
@@ -5899,7 +5898,7 @@ static int si_patch_single_dependency_table_based_on_leakage(struct radeon_devic
static int si_patch_dependency_tables_based_on_leakage(struct radeon_device *rdev)
{
- int ret = 0;
+ int ret;
ret = si_patch_single_dependency_table_based_on_leakage(rdev,
&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index 65302f9d025e..4d93b84aa739 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -21,10 +21,9 @@
*
*/
+#include <linux/pci.h>
#include <linux/seq_file.h>
-#include <drm/drm_pci.h>
-
#include "r600_dpm.h"
#include "radeon.h"
#include "radeon_asic.h"
diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig
index 1529849e217e..0919f1f159a4 100644
--- a/drivers/gpu/drm/rcar-du/Kconfig
+++ b/drivers/gpu/drm/rcar-du/Kconfig
@@ -4,6 +4,7 @@ config DRM_RCAR_DU
depends on DRM && OF
depends on ARM || ARM64
depends on ARCH_RENESAS || COMPILE_TEST
+ imply DRM_RCAR_CMM
imply DRM_RCAR_LVDS
select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
@@ -13,6 +14,13 @@ config DRM_RCAR_DU
Choose this option if you have an R-Car chipset.
If M is selected the module will be called rcar-du-drm.
+config DRM_RCAR_CMM
+ tristate "R-Car DU Color Management Module (CMM) Support"
+ depends on DRM && OF
+ depends on DRM_RCAR_DU
+ help
+ Enable support for R-Car Color Management Module (CMM).
+
config DRM_RCAR_DW_HDMI
tristate "R-Car DU Gen3 HDMI Encoder Support"
depends on DRM && OF
diff --git a/drivers/gpu/drm/rcar-du/Makefile b/drivers/gpu/drm/rcar-du/Makefile
index 6c2ed9c46467..4d1187ccc3e5 100644
--- a/drivers/gpu/drm/rcar-du/Makefile
+++ b/drivers/gpu/drm/rcar-du/Makefile
@@ -15,6 +15,7 @@ rcar-du-drm-$(CONFIG_DRM_RCAR_LVDS) += rcar_du_of.o \
rcar-du-drm-$(CONFIG_DRM_RCAR_VSP) += rcar_du_vsp.o
rcar-du-drm-$(CONFIG_DRM_RCAR_WRITEBACK) += rcar_du_writeback.o
+obj-$(CONFIG_DRM_RCAR_CMM) += rcar_cmm.o
obj-$(CONFIG_DRM_RCAR_DU) += rcar-du-drm.o
obj-$(CONFIG_DRM_RCAR_DW_HDMI) += rcar_dw_hdmi.o
obj-$(CONFIG_DRM_RCAR_LVDS) += rcar_lvds.o
diff --git a/drivers/gpu/drm/rcar-du/rcar_cmm.c b/drivers/gpu/drm/rcar-du/rcar_cmm.c
new file mode 100644
index 000000000000..c578095b09a5
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_cmm.c
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * rcar_cmm.c -- R-Car Display Unit Color Management Module
+ *
+ * Copyright (C) 2019 Jacopo Mondi <jacopo+renesas@jmondi.org>
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drm_color_mgmt.h>
+
+#include "rcar_cmm.h"
+
+#define CM2_LUT_CTRL 0x0000
+#define CM2_LUT_CTRL_LUT_EN BIT(0)
+#define CM2_LUT_TBL_BASE 0x0600
+#define CM2_LUT_TBL(__i) (CM2_LUT_TBL_BASE + (__i) * 4)
+
+struct rcar_cmm {
+ void __iomem *base;
+
+ /*
+ * @lut: 1D-LUT state
+ * @lut.enabled: 1D-LUT enabled flag
+ */
+ struct {
+ bool enabled;
+ } lut;
+};
+
+static inline int rcar_cmm_read(struct rcar_cmm *rcmm, u32 reg)
+{
+ return ioread32(rcmm->base + reg);
+}
+
+static inline void rcar_cmm_write(struct rcar_cmm *rcmm, u32 reg, u32 data)
+{
+ iowrite32(data, rcmm->base + reg);
+}
+
+/*
+ * rcar_cmm_lut_write() - Scale the DRM LUT table entries to hardware precision
+ * and write to the CMM registers
+ * @rcmm: Pointer to the CMM device
+ * @drm_lut: Pointer to the DRM LUT table
+ */
+static void rcar_cmm_lut_write(struct rcar_cmm *rcmm,
+ const struct drm_color_lut *drm_lut)
+{
+ unsigned int i;
+
+ for (i = 0; i < CM2_LUT_SIZE; ++i) {
+ u32 entry = drm_color_lut_extract(drm_lut[i].red, 8) << 16
+ | drm_color_lut_extract(drm_lut[i].green, 8) << 8
+ | drm_color_lut_extract(drm_lut[i].blue, 8);
+
+ rcar_cmm_write(rcmm, CM2_LUT_TBL(i), entry);
+ }
+}
+
+/*
+ * rcar_cmm_setup() - Configure the CMM unit
+ * @pdev: The platform device associated with the CMM instance
+ * @config: The CMM unit configuration
+ *
+ * Configure the CMM unit with the given configuration. Currently enabling,
+ * disabling and programming of the 1-D LUT unit is supported.
+ *
+ * As rcar_cmm_setup() accesses the CMM registers the unit should be powered
+ * and its functional clock enabled. To guarantee this, before any call to
+ * this function is made, the CMM unit has to be enabled by calling
+ * rcar_cmm_enable() first.
+ *
+ * TODO: Add support for LUT double buffer operations to avoid updating the
+ * LUT table entries while a frame is being displayed.
+ */
+int rcar_cmm_setup(struct platform_device *pdev,
+ const struct rcar_cmm_config *config)
+{
+ struct rcar_cmm *rcmm = platform_get_drvdata(pdev);
+
+ /* Disable LUT if no table is provided. */
+ if (!config->lut.table) {
+ if (rcmm->lut.enabled) {
+ rcar_cmm_write(rcmm, CM2_LUT_CTRL, 0);
+ rcmm->lut.enabled = false;
+ }
+
+ return 0;
+ }
+
+ /* Enable LUT and program the new gamma table values. */
+ if (!rcmm->lut.enabled) {
+ rcar_cmm_write(rcmm, CM2_LUT_CTRL, CM2_LUT_CTRL_LUT_EN);
+ rcmm->lut.enabled = true;
+ }
+
+ rcar_cmm_lut_write(rcmm, config->lut.table);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rcar_cmm_setup);
+
+/*
+ * rcar_cmm_enable() - Enable the CMM unit
+ * @pdev: The platform device associated with the CMM instance
+ *
+ * When the output of the corresponding DU channel is routed to the CMM unit,
+ * the unit shall be enabled before the DU channel is started, and remain
+ * enabled until the channel is stopped. The CMM unit shall be disabled with
+ * rcar_cmm_disable().
+ *
+ * Calls to rcar_cmm_enable() and rcar_cmm_disable() are not reference-counted.
+ * It is an error to attempt to enable an already enabled CMM unit, or to
+ * attempt to disable a disabled unit.
+ */
+int rcar_cmm_enable(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rcar_cmm_enable);
+
+/*
+ * rcar_cmm_disable() - Disable the CMM unit
+ * @pdev: The platform device associated with the CMM instance
+ *
+ * See rcar_cmm_enable() for usage information.
+ *
+ * Disabling the CMM unit disable all the internal processing blocks. The CMM
+ * state shall thus be restored with rcar_cmm_setup() when re-enabling the CMM
+ * unit after the next rcar_cmm_enable() call.
+ */
+void rcar_cmm_disable(struct platform_device *pdev)
+{
+ struct rcar_cmm *rcmm = platform_get_drvdata(pdev);
+
+ rcar_cmm_write(rcmm, CM2_LUT_CTRL, 0);
+ rcmm->lut.enabled = false;
+
+ pm_runtime_put(&pdev->dev);
+}
+EXPORT_SYMBOL_GPL(rcar_cmm_disable);
+
+/*
+ * rcar_cmm_init() - Initialize the CMM unit
+ * @pdev: The platform device associated with the CMM instance
+ *
+ * Return: 0 on success, -EPROBE_DEFER if the CMM is not available yet,
+ * -ENODEV if the DRM_RCAR_CMM config option is disabled
+ */
+int rcar_cmm_init(struct platform_device *pdev)
+{
+ struct rcar_cmm *rcmm = platform_get_drvdata(pdev);
+
+ if (!rcmm)
+ return -EPROBE_DEFER;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rcar_cmm_init);
+
+static int rcar_cmm_probe(struct platform_device *pdev)
+{
+ struct rcar_cmm *rcmm;
+
+ rcmm = devm_kzalloc(&pdev->dev, sizeof(*rcmm), GFP_KERNEL);
+ if (!rcmm)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, rcmm);
+
+ rcmm->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(rcmm->base))
+ return PTR_ERR(rcmm->base);
+
+ pm_runtime_enable(&pdev->dev);
+
+ return 0;
+}
+
+static int rcar_cmm_remove(struct platform_device *pdev)
+{
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id rcar_cmm_of_table[] = {
+ { .compatible = "renesas,rcar-gen3-cmm", },
+ { .compatible = "renesas,rcar-gen2-cmm", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, rcar_cmm_of_table);
+
+static struct platform_driver rcar_cmm_platform_driver = {
+ .probe = rcar_cmm_probe,
+ .remove = rcar_cmm_remove,
+ .driver = {
+ .name = "rcar-cmm",
+ .of_match_table = rcar_cmm_of_table,
+ },
+};
+
+module_platform_driver(rcar_cmm_platform_driver);
+
+MODULE_AUTHOR("Jacopo Mondi <jacopo+renesas@jmondi.org>");
+MODULE_DESCRIPTION("Renesas R-Car CMM Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/rcar-du/rcar_cmm.h b/drivers/gpu/drm/rcar-du/rcar_cmm.h
new file mode 100644
index 000000000000..b5f7ec6db04a
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_cmm.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * rcar_cmm.h -- R-Car Display Unit Color Management Module
+ *
+ * Copyright (C) 2019 Jacopo Mondi <jacopo+renesas@jmondi.org>
+ */
+
+#ifndef __RCAR_CMM_H__
+#define __RCAR_CMM_H__
+
+#define CM2_LUT_SIZE 256
+
+struct drm_color_lut;
+struct platform_device;
+
+/**
+ * struct rcar_cmm_config - CMM configuration
+ *
+ * @lut: 1D-LUT configuration
+ * @lut.table: 1D-LUT table entries. Disable LUT operations when NULL
+ */
+struct rcar_cmm_config {
+ struct {
+ struct drm_color_lut *table;
+ } lut;
+};
+
+#if IS_ENABLED(CONFIG_DRM_RCAR_CMM)
+int rcar_cmm_init(struct platform_device *pdev);
+
+int rcar_cmm_enable(struct platform_device *pdev);
+void rcar_cmm_disable(struct platform_device *pdev);
+
+int rcar_cmm_setup(struct platform_device *pdev,
+ const struct rcar_cmm_config *config);
+#else
+static inline int rcar_cmm_init(struct platform_device *pdev)
+{
+ return -ENODEV;
+}
+
+static inline int rcar_cmm_enable(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static inline void rcar_cmm_disable(struct platform_device *pdev)
+{
+}
+
+static inline int rcar_cmm_setup(struct platform_device *pdev,
+ const struct rcar_cmm_config *config)
+{
+ return 0;
+}
+#endif /* IS_ENABLED(CONFIG_DRM_RCAR_CMM) */
+
+#endif /* __RCAR_CMM_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 2da46e3dc4ae..d73e88ddecd0 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -14,6 +14,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
#include <drm/drm_fb_cma_helper.h>
@@ -21,6 +22,7 @@
#include <drm/drm_plane_helper.h>
#include <drm/drm_vblank.h>
+#include "rcar_cmm.h"
#include "rcar_du_crtc.h"
#include "rcar_du_drv.h"
#include "rcar_du_encoder.h"
@@ -475,6 +477,45 @@ static void rcar_du_crtc_wait_page_flip(struct rcar_du_crtc *rcrtc)
}
/* -----------------------------------------------------------------------------
+ * Color Management Module (CMM)
+ */
+
+static int rcar_du_cmm_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct drm_property_blob *drm_lut = state->gamma_lut;
+ struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
+ struct device *dev = rcrtc->dev->dev;
+
+ if (!drm_lut)
+ return 0;
+
+ /* We only accept fully populated LUT tables. */
+ if (drm_color_lut_size(drm_lut) != CM2_LUT_SIZE) {
+ dev_err(dev, "invalid gamma lut size: %zu bytes\n",
+ drm_lut->length);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void rcar_du_cmm_setup(struct drm_crtc *crtc)
+{
+ struct drm_property_blob *drm_lut = crtc->state->gamma_lut;
+ struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
+ struct rcar_cmm_config cmm_config = {};
+
+ if (!rcrtc->cmm)
+ return;
+
+ if (drm_lut)
+ cmm_config.lut.table = (struct drm_color_lut *)drm_lut->data;
+
+ rcar_cmm_setup(rcrtc->cmm, &cmm_config);
+}
+
+/* -----------------------------------------------------------------------------
* Start/Stop and Suspend/Resume
*/
@@ -619,6 +660,9 @@ static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc)
if (rcar_du_has(rcrtc->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
rcar_du_vsp_disable(rcrtc);
+ if (rcrtc->cmm)
+ rcar_cmm_disable(rcrtc->cmm);
+
/*
* Select switch sync mode. This stops display operation and configures
* the HSYNC and VSYNC signals as inputs.
@@ -642,6 +686,11 @@ static int rcar_du_crtc_atomic_check(struct drm_crtc *crtc,
{
struct rcar_du_crtc_state *rstate = to_rcar_crtc_state(state);
struct drm_encoder *encoder;
+ int ret;
+
+ ret = rcar_du_cmm_check(crtc, state);
+ if (ret)
+ return ret;
/* Store the routes from the CRTC output to the DU outputs. */
rstate->outputs = 0;
@@ -667,6 +716,8 @@ static void rcar_du_crtc_atomic_enable(struct drm_crtc *crtc,
struct rcar_du_crtc_state *rstate = to_rcar_crtc_state(crtc->state);
struct rcar_du_device *rcdu = rcrtc->dev;
+ if (rcrtc->cmm)
+ rcar_cmm_enable(rcrtc->cmm);
rcar_du_crtc_get(rcrtc);
/*
@@ -680,12 +731,20 @@ static void rcar_du_crtc_atomic_enable(struct drm_crtc *crtc,
rcdu->encoders[RCAR_DU_OUTPUT_LVDS0 + rcrtc->index];
const struct drm_display_mode *mode =
&crtc->state->adjusted_mode;
+ struct drm_bridge *bridge;
- rcar_lvds_clk_enable(encoder->base.bridge,
- mode->clock * 1000);
+ bridge = drm_bridge_chain_get_first_bridge(&encoder->base);
+ rcar_lvds_clk_enable(bridge, mode->clock * 1000);
}
rcar_du_crtc_start(rcrtc);
+
+ /*
+ * TODO: The chip manual indicates that CMM tables should be written
+ * after the DU channel has been activated. Investigate the impact
+ * of this restriction on the first displayed frame.
+ */
+ rcar_du_cmm_setup(crtc);
}
static void rcar_du_crtc_atomic_disable(struct drm_crtc *crtc,
@@ -702,12 +761,14 @@ static void rcar_du_crtc_atomic_disable(struct drm_crtc *crtc,
rstate->outputs == BIT(RCAR_DU_OUTPUT_DPAD0)) {
struct rcar_du_encoder *encoder =
rcdu->encoders[RCAR_DU_OUTPUT_LVDS0 + rcrtc->index];
+ struct drm_bridge *bridge;
/*
* Disable the LVDS clock output, see
* rcar_du_crtc_atomic_enable().
*/
- rcar_lvds_clk_disable(encoder->base.bridge);
+ bridge = drm_bridge_chain_get_first_bridge(&encoder->base);
+ rcar_lvds_clk_disable(bridge);
}
spin_lock_irq(&crtc->dev->event_lock);
@@ -739,6 +800,10 @@ static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc,
*/
rcar_du_crtc_get(rcrtc);
+ /* If the active state changed, we let .atomic_enable handle CMM. */
+ if (crtc->state->color_mgmt_changed && !crtc->state->active_changed)
+ rcar_du_cmm_setup(crtc);
+
if (rcar_du_has(rcrtc->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
rcar_du_vsp_atomic_begin(rcrtc);
}
@@ -1075,6 +1140,7 @@ static const struct drm_crtc_funcs crtc_funcs_gen3 = {
.set_crc_source = rcar_du_crtc_set_crc_source,
.verify_crc_source = rcar_du_crtc_verify_crc_source,
.get_crc_sources = rcar_du_crtc_get_crc_sources,
+ .gamma_set = drm_atomic_helper_legacy_gamma_set,
};
/* -----------------------------------------------------------------------------
@@ -1194,6 +1260,15 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int swindex,
if (ret < 0)
return ret;
+ /* CMM might be disabled for this CRTC. */
+ if (rcdu->cmms[swindex]) {
+ rcrtc->cmm = rcdu->cmms[swindex];
+ rgrp->cmms_mask |= BIT(hwindex % 2);
+
+ drm_mode_crtc_set_gamma_size(crtc, CM2_LUT_SIZE);
+ drm_crtc_enable_color_mgmt(crtc, 0, false, CM2_LUT_SIZE);
+ }
+
drm_crtc_helper_add(crtc, &crtc_helper_funcs);
/* Start with vertical blanking interrupt reporting disabled. */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
index 3b7fc668996f..5f2940c42225 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
@@ -39,6 +39,7 @@ struct rcar_du_vsp;
* @vblank_wait: wait queue used to signal vertical blanking
* @vblank_count: number of vertical blanking interrupts to wait for
* @group: CRTC group this CRTC belongs to
+ * @cmm: CMM associated with this CRTC
* @vsp: VSP feeding video to this CRTC
* @vsp_pipe: index of the VSP pipeline feeding video to this CRTC
* @writeback: the writeback connector
@@ -64,6 +65,7 @@ struct rcar_du_crtc {
unsigned int vblank_count;
struct rcar_du_group *group;
+ struct platform_device *cmm;
struct rcar_du_vsp *vsp;
unsigned int vsp_pipe;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index 6df37c2a9678..654e2dd08146 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -131,6 +131,35 @@ static const struct rcar_du_device_info rcar_du_r8a774a1_info = {
.dpll_mask = BIT(1),
};
+static const struct rcar_du_device_info rcar_du_r8a774b1_info = {
+ .gen = 3,
+ .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+ | RCAR_DU_FEATURE_VSP1_SOURCE
+ | RCAR_DU_FEATURE_INTERLACED
+ | RCAR_DU_FEATURE_TVM_SYNC,
+ .channels_mask = BIT(3) | BIT(1) | BIT(0),
+ .routes = {
+ /*
+ * R8A774B1 has one RGB output, one LVDS output and one HDMI
+ * output.
+ */
+ [RCAR_DU_OUTPUT_DPAD0] = {
+ .possible_crtcs = BIT(2),
+ .port = 0,
+ },
+ [RCAR_DU_OUTPUT_HDMI0] = {
+ .possible_crtcs = BIT(1),
+ .port = 1,
+ },
+ [RCAR_DU_OUTPUT_LVDS0] = {
+ .possible_crtcs = BIT(0),
+ .port = 2,
+ },
+ },
+ .num_lvds = 1,
+ .dpll_mask = BIT(1),
+};
+
static const struct rcar_du_device_info rcar_du_r8a774c0_info = {
.gen = 3,
.features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
@@ -370,7 +399,10 @@ static const struct rcar_du_device_info rcar_du_r8a77970_info = {
| RCAR_DU_FEATURE_TVM_SYNC,
.channels_mask = BIT(0),
.routes = {
- /* R8A77970 has one RGB output and one LVDS output. */
+ /*
+ * R8A77970 and R8A77980 have one RGB output and one LVDS
+ * output.
+ */
[RCAR_DU_OUTPUT_DPAD0] = {
.possible_crtcs = BIT(0),
.port = 0,
@@ -416,6 +448,7 @@ static const struct of_device_id rcar_du_of_table[] = {
{ .compatible = "renesas,du-r8a7745", .data = &rzg1_du_r8a7745_info },
{ .compatible = "renesas,du-r8a77470", .data = &rzg1_du_r8a77470_info },
{ .compatible = "renesas,du-r8a774a1", .data = &rcar_du_r8a774a1_info },
+ { .compatible = "renesas,du-r8a774b1", .data = &rcar_du_r8a774b1_info },
{ .compatible = "renesas,du-r8a774c0", .data = &rcar_du_r8a774c0_info },
{ .compatible = "renesas,du-r8a7779", .data = &rcar_du_r8a7779_info },
{ .compatible = "renesas,du-r8a7790", .data = &rcar_du_r8a7790_info },
@@ -427,6 +460,7 @@ static const struct of_device_id rcar_du_of_table[] = {
{ .compatible = "renesas,du-r8a7796", .data = &rcar_du_r8a7796_info },
{ .compatible = "renesas,du-r8a77965", .data = &rcar_du_r8a77965_info },
{ .compatible = "renesas,du-r8a77970", .data = &rcar_du_r8a77970_info },
+ { .compatible = "renesas,du-r8a77980", .data = &rcar_du_r8a77970_info },
{ .compatible = "renesas,du-r8a77990", .data = &rcar_du_r8a7799x_info },
{ .compatible = "renesas,du-r8a77995", .data = &rcar_du_r8a7799x_info },
{ }
@@ -441,14 +475,11 @@ MODULE_DEVICE_TABLE(of, rcar_du_of_table);
DEFINE_DRM_GEM_CMA_FOPS(rcar_du_fops);
static struct drm_driver rcar_du_driver = {
- .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME
- | DRIVER_ATOMIC,
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_import = drm_gem_prime_import,
- .gem_prime_export = drm_gem_prime_export,
.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
.gem_prime_vmap = drm_gem_cma_prime_vmap,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
index 1327cd0df90a..61504c54e2ec 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
@@ -13,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/wait.h>
+#include "rcar_cmm.h"
#include "rcar_du_crtc.h"
#include "rcar_du_group.h"
#include "rcar_du_vsp.h"
@@ -85,6 +86,7 @@ struct rcar_du_device {
struct rcar_du_encoder *encoders[RCAR_DU_OUTPUT_MAX];
struct rcar_du_group groups[RCAR_DU_MAX_GROUPS];
+ struct platform_device *cmms[RCAR_DU_MAX_CRTCS];
struct rcar_du_vsp vsps[RCAR_DU_MAX_VSPS];
struct {
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
index 0f00bdfe2366..3cd83a030a04 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
@@ -9,6 +9,7 @@
#include <linux/export.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_panel.h>
@@ -84,8 +85,8 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
goto done;
}
- bridge = devm_drm_panel_bridge_add(rcdu->dev, panel,
- DRM_MODE_CONNECTOR_DPI);
+ bridge = devm_drm_panel_bridge_add_typed(rcdu->dev, panel,
+ DRM_MODE_CONNECTOR_DPI);
if (IS_ERR(bridge)) {
ret = PTR_ERR(bridge);
goto done;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.c b/drivers/gpu/drm/rcar-du/rcar_du_group.c
index 9eee47969e77..88a783ceb3e9 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_group.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_group.c
@@ -135,6 +135,7 @@ static void rcar_du_group_setup_didsr(struct rcar_du_group *rgrp)
static void rcar_du_group_setup(struct rcar_du_group *rgrp)
{
struct rcar_du_device *rcdu = rgrp->dev;
+ u32 defr7 = DEFR7_CODE;
/* Enable extended features */
rcar_du_group_write(rgrp, DEFR, DEFR_CODE | DEFR_DEFE);
@@ -147,6 +148,15 @@ static void rcar_du_group_setup(struct rcar_du_group *rgrp)
rcar_du_group_setup_pins(rgrp);
+ /*
+ * TODO: Handle routing of the DU output to CMM dynamically, as we
+ * should bypass CMM completely when no color management feature is
+ * used.
+ */
+ defr7 |= (rgrp->cmms_mask & BIT(1) ? DEFR7_CMME1 : 0) |
+ (rgrp->cmms_mask & BIT(0) ? DEFR7_CMME0 : 0);
+ rcar_du_group_write(rgrp, DEFR7, defr7);
+
if (rcdu->info->gen >= 2) {
rcar_du_group_setup_defr8(rgrp);
rcar_du_group_setup_didsr(rgrp);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.h b/drivers/gpu/drm/rcar-du/rcar_du_group.h
index 87950c1f6a52..e9906609c635 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_group.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_group.h
@@ -22,6 +22,7 @@ struct rcar_du_device;
* @mmio_offset: registers offset in the device memory map
* @index: group index
* @channels_mask: bitmask of populated DU channels in this group
+ * @cmms_mask: bitmask of available CMMs in this group
* @num_crtcs: number of CRTCs in this group (1 or 2)
* @use_count: number of users of the group (rcar_du_group_(get|put))
* @used_crtcs: number of CRTCs currently in use
@@ -37,6 +38,7 @@ struct rcar_du_group {
unsigned int index;
unsigned int channels_mask;
+ unsigned int cmms_mask;
unsigned int num_crtcs;
unsigned int use_count;
unsigned int used_crtcs;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index 2dc9caee8767..fcfd916227d1 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -17,7 +17,9 @@
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
+#include <linux/device.h>
#include <linux/of_graph.h>
+#include <linux/of_platform.h>
#include <linux/wait.h>
#include "rcar_du_crtc.h"
@@ -542,6 +544,7 @@ static int rcar_du_properties_init(struct rcar_du_device *rcdu)
static int rcar_du_vsps_init(struct rcar_du_device *rcdu)
{
const struct device_node *np = rcdu->dev->of_node;
+ const char *vsps_prop_name = "renesas,vsps";
struct of_phandle_args args;
struct {
struct device_node *np;
@@ -557,15 +560,21 @@ static int rcar_du_vsps_init(struct rcar_du_device *rcdu)
* entry contains a pointer to the VSP DT node and a bitmask of the
* connected DU CRTCs.
*/
- cells = of_property_count_u32_elems(np, "vsps") / rcdu->num_crtcs - 1;
+ ret = of_property_count_u32_elems(np, vsps_prop_name);
+ if (ret < 0) {
+ /* Backward compatibility with old DTBs. */
+ vsps_prop_name = "vsps";
+ ret = of_property_count_u32_elems(np, vsps_prop_name);
+ }
+ cells = ret / rcdu->num_crtcs - 1;
if (cells > 1)
return -EINVAL;
for (i = 0; i < rcdu->num_crtcs; ++i) {
unsigned int j;
- ret = of_parse_phandle_with_fixed_args(np, "vsps", cells, i,
- &args);
+ ret = of_parse_phandle_with_fixed_args(np, vsps_prop_name,
+ cells, i, &args);
if (ret < 0)
goto error;
@@ -585,7 +594,11 @@ static int rcar_du_vsps_init(struct rcar_du_device *rcdu)
vsps[j].crtcs_mask |= BIT(i);
- /* Store the VSP pointer and pipe index in the CRTC. */
+ /*
+ * Store the VSP pointer and pipe index in the CRTC. If the
+ * second cell of the 'renesas,vsps' specifier isn't present,
+ * default to 0 to remain compatible with older DT bindings.
+ */
rcdu->crtcs[i].vsp = &rcdu->vsps[j];
rcdu->crtcs[i].vsp_pipe = cells >= 1 ? args.args[0] : 0;
}
@@ -614,6 +627,75 @@ error:
return ret;
}
+static int rcar_du_cmm_init(struct rcar_du_device *rcdu)
+{
+ const struct device_node *np = rcdu->dev->of_node;
+ unsigned int i;
+ int cells;
+
+ cells = of_property_count_u32_elems(np, "renesas,cmms");
+ if (cells == -EINVAL)
+ return 0;
+
+ if (cells > rcdu->num_crtcs) {
+ dev_err(rcdu->dev,
+ "Invalid number of entries in 'renesas,cmms'\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < cells; ++i) {
+ struct platform_device *pdev;
+ struct device_link *link;
+ struct device_node *cmm;
+ int ret;
+
+ cmm = of_parse_phandle(np, "renesas,cmms", i);
+ if (IS_ERR(cmm)) {
+ dev_err(rcdu->dev,
+ "Failed to parse 'renesas,cmms' property\n");
+ return PTR_ERR(cmm);
+ }
+
+ if (!of_device_is_available(cmm)) {
+ /* It's fine to have a phandle to a non-enabled CMM. */
+ of_node_put(cmm);
+ continue;
+ }
+
+ pdev = of_find_device_by_node(cmm);
+ if (IS_ERR(pdev)) {
+ dev_err(rcdu->dev, "No device found for CMM%u\n", i);
+ of_node_put(cmm);
+ return PTR_ERR(pdev);
+ }
+
+ of_node_put(cmm);
+
+ /*
+ * -ENODEV is used to report that the CMM config option is
+ * disabled: return 0 and let the DU continue probing.
+ */
+ ret = rcar_cmm_init(pdev);
+ if (ret)
+ return ret == -ENODEV ? 0 : ret;
+
+ /*
+ * Enforce suspend/resume ordering by making the CMM a provider
+ * of the DU: CMM is suspended after and resumed before the DU.
+ */
+ link = device_link_add(rcdu->dev, &pdev->dev, DL_FLAG_STATELESS);
+ if (!link) {
+ dev_err(rcdu->dev,
+ "Failed to create device link to CMM%u\n", i);
+ return -EINVAL;
+ }
+
+ rcdu->cmms[i] = pdev;
+ }
+
+ return 0;
+}
+
int rcar_du_modeset_init(struct rcar_du_device *rcdu)
{
static const unsigned int mmio_offsets[] = {
@@ -704,6 +786,11 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
return ret;
}
+ /* Initialize the Color Management Modules. */
+ ret = rcar_du_cmm_init(rcdu);
+ if (ret)
+ return ret;
+
/* Create the CRTCs. */
for (swindex = 0, hwindex = 0; swindex < rcdu->num_crtcs; ++hwindex) {
struct rcar_du_group *rgrp;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_regs.h b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
index bc87f080b170..fb9964949368 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_regs.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
@@ -197,6 +197,11 @@
#define DEFR6_MLOS1 (1 << 2)
#define DEFR6_DEFAULT (DEFR6_CODE | DEFR6_TCNE1)
+#define DEFR7 0x000ec
+#define DEFR7_CODE (0x7779 << 16)
+#define DEFR7_CMME1 BIT(6)
+#define DEFR7_CMME0 BIT(4)
+
/* -----------------------------------------------------------------------------
* R8A7790-only Control Registers
*/
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_writeback.c b/drivers/gpu/drm/rcar-du/rcar_du_writeback.c
index ae07290bba6a..04efa78d70b6 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_writeback.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_writeback.c
@@ -147,7 +147,7 @@ static int rcar_du_wb_enc_atomic_check(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct drm_framebuffer *fb;
- if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
+ if (!conn_state->writeback_job)
return 0;
fb = conn_state->writeback_job->fb;
@@ -221,7 +221,7 @@ void rcar_du_writeback_setup(struct rcar_du_crtc *rcrtc,
unsigned int i;
state = rcrtc->writeback.base.state;
- if (!state || !state->writeback_job || !state->writeback_job->fb)
+ if (!state || !state->writeback_job)
return;
fb = state->writeback_job->fb;
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c
index 1c62578590f4..8ffa4fbbdeb3 100644
--- a/drivers/gpu/drm/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c
@@ -16,10 +16,12 @@
#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/sys_soc.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
+#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
@@ -35,6 +37,12 @@ enum rcar_lvds_mode {
RCAR_LVDS_MODE_VESA = 4,
};
+enum rcar_lvds_link_type {
+ RCAR_LVDS_SINGLE_LINK = 0,
+ RCAR_LVDS_DUAL_LINK_EVEN_ODD_PIXELS = 1,
+ RCAR_LVDS_DUAL_LINK_ODD_EVEN_PIXELS = 2,
+};
+
#define RCAR_LVDS_QUIRK_LANES BIT(0) /* LVDS lanes 1 and 3 inverted */
#define RCAR_LVDS_QUIRK_GEN3_LVEN BIT(1) /* LVEN bit needs to be set on R8A77970/R8A7799x */
#define RCAR_LVDS_QUIRK_PWD BIT(2) /* PWD bit available (all of Gen3 but E3) */
@@ -64,18 +72,15 @@ struct rcar_lvds {
struct clk *dotclkin[2]; /* External DU clocks */
} clocks;
- struct drm_display_mode display_mode;
- enum rcar_lvds_mode mode;
-
struct drm_bridge *companion;
- bool dual_link;
+ enum rcar_lvds_link_type link_type;
};
-#define bridge_to_rcar_lvds(bridge) \
- container_of(bridge, struct rcar_lvds, bridge)
+#define bridge_to_rcar_lvds(b) \
+ container_of(b, struct rcar_lvds, bridge)
-#define connector_to_rcar_lvds(connector) \
- container_of(connector, struct rcar_lvds, connector)
+#define connector_to_rcar_lvds(c) \
+ container_of(c, struct rcar_lvds, connector)
static void rcar_lvds_write(struct rcar_lvds *lvds, u32 reg, u32 data)
{
@@ -90,7 +95,7 @@ static int rcar_lvds_connector_get_modes(struct drm_connector *connector)
{
struct rcar_lvds *lvds = connector_to_rcar_lvds(connector);
- return drm_panel_get_modes(lvds->panel);
+ return drm_panel_get_modes(lvds->panel, connector);
}
static int rcar_lvds_connector_atomic_check(struct drm_connector *connector,
@@ -401,10 +406,53 @@ EXPORT_SYMBOL_GPL(rcar_lvds_clk_disable);
* Bridge
*/
-static void rcar_lvds_enable(struct drm_bridge *bridge)
+static enum rcar_lvds_mode rcar_lvds_get_lvds_mode(struct rcar_lvds *lvds,
+ const struct drm_connector *connector)
+{
+ const struct drm_display_info *info;
+ enum rcar_lvds_mode mode;
+
+ /*
+ * There is no API yet to retrieve LVDS mode from a bridge, only panels
+ * are supported.
+ */
+ if (!lvds->panel)
+ return RCAR_LVDS_MODE_JEIDA;
+
+ info = &connector->display_info;
+ if (!info->num_bus_formats || !info->bus_formats) {
+ dev_warn(lvds->dev,
+ "no LVDS bus format reported, using JEIDA\n");
+ return RCAR_LVDS_MODE_JEIDA;
+ }
+
+ switch (info->bus_formats[0]) {
+ case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
+ case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA:
+ mode = RCAR_LVDS_MODE_JEIDA;
+ break;
+ case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG:
+ mode = RCAR_LVDS_MODE_VESA;
+ break;
+ default:
+ dev_warn(lvds->dev,
+ "unsupported LVDS bus format 0x%04x, using JEIDA\n",
+ info->bus_formats[0]);
+ return RCAR_LVDS_MODE_JEIDA;
+ }
+
+ if (info->bus_flags & DRM_BUS_FLAG_DATA_LSB_TO_MSB)
+ mode |= RCAR_LVDS_MODE_MIRROR;
+
+ return mode;
+}
+
+static void __rcar_lvds_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state,
+ struct drm_crtc *crtc,
+ struct drm_connector *connector)
{
struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
- const struct drm_display_mode *mode = &lvds->display_mode;
u32 lvdhcr;
u32 lvdcr0;
int ret;
@@ -414,8 +462,9 @@ static void rcar_lvds_enable(struct drm_bridge *bridge)
return;
/* Enable the companion LVDS encoder in dual-link mode. */
- if (lvds->dual_link && lvds->companion)
- lvds->companion->funcs->enable(lvds->companion);
+ if (lvds->link_type != RCAR_LVDS_SINGLE_LINK && lvds->companion)
+ __rcar_lvds_atomic_enable(lvds->companion, state, crtc,
+ connector);
/*
* Hardcode the channels and control signals routing for now.
@@ -439,30 +488,51 @@ static void rcar_lvds_enable(struct drm_bridge *bridge)
rcar_lvds_write(lvds, LVDCHCR, lvdhcr);
if (lvds->info->quirks & RCAR_LVDS_QUIRK_DUAL_LINK) {
- /*
- * Configure vertical stripe based on the mode of operation of
- * the connected device.
- */
- rcar_lvds_write(lvds, LVDSTRIPE,
- lvds->dual_link ? LVDSTRIPE_ST_ON : 0);
+ u32 lvdstripe = 0;
+
+ if (lvds->link_type != RCAR_LVDS_SINGLE_LINK) {
+ /*
+ * By default we generate even pixels from the primary
+ * encoder and odd pixels from the companion encoder.
+ * Swap pixels around if the sink requires odd pixels
+ * from the primary encoder and even pixels from the
+ * companion encoder.
+ */
+ bool swap_pixels = lvds->link_type ==
+ RCAR_LVDS_DUAL_LINK_ODD_EVEN_PIXELS;
+
+ /*
+ * Configure vertical stripe since we are dealing with
+ * an LVDS dual-link connection.
+ *
+ * ST_SWAP is reserved for the companion encoder, only
+ * set it in the primary encoder.
+ */
+ lvdstripe = LVDSTRIPE_ST_ON
+ | (lvds->companion && swap_pixels ?
+ LVDSTRIPE_ST_SWAP : 0);
+ }
+ rcar_lvds_write(lvds, LVDSTRIPE, lvdstripe);
}
/*
* PLL clock configuration on all instances but the companion in
* dual-link mode.
*/
- if (!lvds->dual_link || lvds->companion)
+ if (lvds->link_type == RCAR_LVDS_SINGLE_LINK || lvds->companion) {
+ const struct drm_crtc_state *crtc_state =
+ drm_atomic_get_new_crtc_state(state, crtc);
+ const struct drm_display_mode *mode =
+ &crtc_state->adjusted_mode;
+
lvds->info->pll_setup(lvds, mode->clock * 1000);
+ }
/* Set the LVDS mode and select the input. */
- lvdcr0 = lvds->mode << LVDCR0_LVMD_SHIFT;
+ lvdcr0 = rcar_lvds_get_lvds_mode(lvds, connector) << LVDCR0_LVMD_SHIFT;
if (lvds->bridge.encoder) {
- /*
- * FIXME: We should really retrieve the CRTC through the state,
- * but how do we get a state pointer?
- */
- if (drm_crtc_index(lvds->bridge.encoder->crtc) == 2)
+ if (drm_crtc_index(crtc) == 2)
lvdcr0 |= LVDCR0_DUSEL;
}
@@ -519,7 +589,21 @@ static void rcar_lvds_enable(struct drm_bridge *bridge)
}
}
-static void rcar_lvds_disable(struct drm_bridge *bridge)
+static void rcar_lvds_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+ struct drm_connector *connector;
+ struct drm_crtc *crtc;
+
+ connector = drm_atomic_get_new_connector_for_encoder(state,
+ bridge->encoder);
+ crtc = drm_atomic_get_new_connector_state(state, connector)->crtc;
+
+ __rcar_lvds_atomic_enable(bridge, state, crtc, connector);
+}
+
+static void rcar_lvds_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
@@ -533,8 +617,8 @@ static void rcar_lvds_disable(struct drm_bridge *bridge)
rcar_lvds_write(lvds, LVDPLLCR, 0);
/* Disable the companion LVDS encoder in dual-link mode. */
- if (lvds->dual_link && lvds->companion)
- lvds->companion->funcs->disable(lvds->companion);
+ if (lvds->link_type != RCAR_LVDS_SINGLE_LINK && lvds->companion)
+ lvds->companion->funcs->atomic_disable(lvds->companion, state);
clk_disable_unprepare(lvds->clocks.mod);
}
@@ -557,54 +641,6 @@ static bool rcar_lvds_mode_fixup(struct drm_bridge *bridge,
return true;
}
-static void rcar_lvds_get_lvds_mode(struct rcar_lvds *lvds)
-{
- struct drm_display_info *info = &lvds->connector.display_info;
- enum rcar_lvds_mode mode;
-
- /*
- * There is no API yet to retrieve LVDS mode from a bridge, only panels
- * are supported.
- */
- if (!lvds->panel)
- return;
-
- if (!info->num_bus_formats || !info->bus_formats) {
- dev_err(lvds->dev, "no LVDS bus format reported\n");
- return;
- }
-
- switch (info->bus_formats[0]) {
- case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
- case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA:
- mode = RCAR_LVDS_MODE_JEIDA;
- break;
- case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG:
- mode = RCAR_LVDS_MODE_VESA;
- break;
- default:
- dev_err(lvds->dev, "unsupported LVDS bus format 0x%04x\n",
- info->bus_formats[0]);
- return;
- }
-
- if (info->bus_flags & DRM_BUS_FLAG_DATA_LSB_TO_MSB)
- mode |= RCAR_LVDS_MODE_MIRROR;
-
- lvds->mode = mode;
-}
-
-static void rcar_lvds_mode_set(struct drm_bridge *bridge,
- const struct drm_display_mode *mode,
- const struct drm_display_mode *adjusted_mode)
-{
- struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
-
- lvds->display_mode = *adjusted_mode;
-
- rcar_lvds_get_lvds_mode(lvds);
-}
-
static int rcar_lvds_attach(struct drm_bridge *bridge)
{
struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
@@ -646,17 +682,16 @@ static void rcar_lvds_detach(struct drm_bridge *bridge)
static const struct drm_bridge_funcs rcar_lvds_bridge_ops = {
.attach = rcar_lvds_attach,
.detach = rcar_lvds_detach,
- .enable = rcar_lvds_enable,
- .disable = rcar_lvds_disable,
+ .atomic_enable = rcar_lvds_atomic_enable,
+ .atomic_disable = rcar_lvds_atomic_disable,
.mode_fixup = rcar_lvds_mode_fixup,
- .mode_set = rcar_lvds_mode_set,
};
bool rcar_lvds_dual_link(struct drm_bridge *bridge)
{
struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
- return lvds->dual_link;
+ return lvds->link_type != RCAR_LVDS_SINGLE_LINK;
}
EXPORT_SYMBOL_GPL(rcar_lvds_dual_link);
@@ -668,15 +703,16 @@ static int rcar_lvds_parse_dt_companion(struct rcar_lvds *lvds)
{
const struct of_device_id *match;
struct device_node *companion;
+ struct device_node *port0, *port1;
+ struct rcar_lvds *companion_lvds;
struct device *dev = lvds->dev;
+ int dual_link;
int ret = 0;
/* Locate the companion LVDS encoder for dual-link operation, if any. */
companion = of_parse_phandle(dev->of_node, "renesas,companion", 0);
- if (!companion) {
- dev_err(dev, "Companion LVDS encoder not found\n");
- return -ENXIO;
- }
+ if (!companion)
+ return 0;
/*
* Sanity check: the companion encoder must have the same compatible
@@ -689,13 +725,68 @@ static int rcar_lvds_parse_dt_companion(struct rcar_lvds *lvds)
goto done;
}
+ /*
+ * We need to work out if the sink is expecting us to function in
+ * dual-link mode. We do this by looking at the DT port nodes we are
+ * connected to, if they are marked as expecting even pixels and
+ * odd pixels than we need to enable vertical stripe output.
+ */
+ port0 = of_graph_get_port_by_id(dev->of_node, 1);
+ port1 = of_graph_get_port_by_id(companion, 1);
+ dual_link = drm_of_lvds_get_dual_link_pixel_order(port0, port1);
+ of_node_put(port0);
+ of_node_put(port1);
+
+ switch (dual_link) {
+ case DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS:
+ lvds->link_type = RCAR_LVDS_DUAL_LINK_ODD_EVEN_PIXELS;
+ break;
+ case DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS:
+ lvds->link_type = RCAR_LVDS_DUAL_LINK_EVEN_ODD_PIXELS;
+ break;
+ default:
+ /*
+ * Early dual-link bridge specific implementations populate the
+ * timings field of drm_bridge. If the flag is set, we assume
+ * that we are expected to generate even pixels from the primary
+ * encoder, and odd pixels from the companion encoder.
+ */
+ if (lvds->next_bridge && lvds->next_bridge->timings &&
+ lvds->next_bridge->timings->dual_link)
+ lvds->link_type = RCAR_LVDS_DUAL_LINK_EVEN_ODD_PIXELS;
+ else
+ lvds->link_type = RCAR_LVDS_SINGLE_LINK;
+ }
+
+ if (lvds->link_type == RCAR_LVDS_SINGLE_LINK) {
+ dev_dbg(dev, "Single-link configuration detected\n");
+ goto done;
+ }
+
lvds->companion = of_drm_find_bridge(companion);
if (!lvds->companion) {
ret = -EPROBE_DEFER;
goto done;
}
- dev_dbg(dev, "Found companion encoder %pOF\n", companion);
+ dev_dbg(dev,
+ "Dual-link configuration detected (companion encoder %pOF)\n",
+ companion);
+
+ if (lvds->link_type == RCAR_LVDS_DUAL_LINK_ODD_EVEN_PIXELS)
+ dev_dbg(dev, "Data swapping required\n");
+
+ /*
+ * FIXME: We should not be messing with the companion encoder private
+ * data from the primary encoder, we should rather let the companion
+ * encoder work things out on its own. However, the companion encoder
+ * doesn't hold a reference to the primary encoder, and
+ * drm_of_lvds_get_dual_link_pixel_order needs to be given references
+ * to the output ports of both encoders, therefore leave it like this
+ * for the time being.
+ */
+ companion_lvds = bridge_to_rcar_lvds(lvds->companion);
+ companion_lvds->link_type = lvds->link_type;
done:
of_node_put(companion);
@@ -705,79 +796,17 @@ done:
static int rcar_lvds_parse_dt(struct rcar_lvds *lvds)
{
- struct device_node *local_output = NULL;
- struct device_node *remote_input = NULL;
- struct device_node *remote = NULL;
- struct device_node *node;
- bool is_bridge = false;
- int ret = 0;
-
- local_output = of_graph_get_endpoint_by_regs(lvds->dev->of_node, 1, 0);
- if (!local_output) {
- dev_dbg(lvds->dev, "unconnected port@1\n");
- ret = -ENODEV;
- goto done;
- }
-
- /*
- * Locate the connected entity and infer its type from the number of
- * endpoints.
- */
- remote = of_graph_get_remote_port_parent(local_output);
- if (!remote) {
- dev_dbg(lvds->dev, "unconnected endpoint %pOF\n", local_output);
- ret = -ENODEV;
- goto done;
- }
+ int ret;
- if (!of_device_is_available(remote)) {
- dev_dbg(lvds->dev, "connected entity %pOF is disabled\n",
- remote);
- ret = -ENODEV;
+ ret = drm_of_find_panel_or_bridge(lvds->dev->of_node, 1, 0,
+ &lvds->panel, &lvds->next_bridge);
+ if (ret)
goto done;
- }
- remote_input = of_graph_get_remote_endpoint(local_output);
-
- for_each_endpoint_of_node(remote, node) {
- if (node != remote_input) {
- /*
- * We've found one endpoint other than the input, this
- * must be a bridge.
- */
- is_bridge = true;
- of_node_put(node);
- break;
- }
- }
-
- if (is_bridge) {
- lvds->next_bridge = of_drm_find_bridge(remote);
- if (!lvds->next_bridge) {
- ret = -EPROBE_DEFER;
- goto done;
- }
-
- if (lvds->info->quirks & RCAR_LVDS_QUIRK_DUAL_LINK)
- lvds->dual_link = lvds->next_bridge->timings
- ? lvds->next_bridge->timings->dual_link
- : false;
- } else {
- lvds->panel = of_drm_find_panel(remote);
- if (IS_ERR(lvds->panel)) {
- ret = PTR_ERR(lvds->panel);
- goto done;
- }
- }
-
- if (lvds->dual_link)
+ if (lvds->info->quirks & RCAR_LVDS_QUIRK_DUAL_LINK)
ret = rcar_lvds_parse_dt_companion(lvds);
done:
- of_node_put(local_output);
- of_node_put(remote_input);
- of_node_put(remote);
-
/*
* On D3/E3 the LVDS encoder provides a clock to the DU, which can be
* used for the DPAD output even when the LVDS output is not connected.
@@ -844,8 +873,23 @@ static int rcar_lvds_get_clocks(struct rcar_lvds *lvds)
return 0;
}
+static const struct rcar_lvds_device_info rcar_lvds_r8a7790es1_info = {
+ .gen = 2,
+ .quirks = RCAR_LVDS_QUIRK_LANES,
+ .pll_setup = rcar_lvds_pll_setup_gen2,
+};
+
+static const struct soc_device_attribute lvds_quirk_matches[] = {
+ {
+ .soc_id = "r8a7790", .revision = "ES1.*",
+ .data = &rcar_lvds_r8a7790es1_info,
+ },
+ { /* sentinel */ }
+};
+
static int rcar_lvds_probe(struct platform_device *pdev)
{
+ const struct soc_device_attribute *attr;
struct rcar_lvds *lvds;
struct resource *mem;
int ret;
@@ -859,6 +903,10 @@ static int rcar_lvds_probe(struct platform_device *pdev)
lvds->dev = &pdev->dev;
lvds->info = of_device_get_match_data(&pdev->dev);
+ attr = soc_device_match(lvds_quirk_matches);
+ if (attr)
+ lvds->info = attr->data;
+
ret = rcar_lvds_parse_dt(lvds);
if (ret < 0)
return ret;
@@ -895,12 +943,6 @@ static const struct rcar_lvds_device_info rcar_lvds_gen2_info = {
.pll_setup = rcar_lvds_pll_setup_gen2,
};
-static const struct rcar_lvds_device_info rcar_lvds_r8a7790_info = {
- .gen = 2,
- .quirks = RCAR_LVDS_QUIRK_LANES,
- .pll_setup = rcar_lvds_pll_setup_gen2,
-};
-
static const struct rcar_lvds_device_info rcar_lvds_gen3_info = {
.gen = 3,
.quirks = RCAR_LVDS_QUIRK_PWD,
@@ -931,8 +973,9 @@ static const struct of_device_id rcar_lvds_of_table[] = {
{ .compatible = "renesas,r8a7743-lvds", .data = &rcar_lvds_gen2_info },
{ .compatible = "renesas,r8a7744-lvds", .data = &rcar_lvds_gen2_info },
{ .compatible = "renesas,r8a774a1-lvds", .data = &rcar_lvds_gen3_info },
+ { .compatible = "renesas,r8a774b1-lvds", .data = &rcar_lvds_gen3_info },
{ .compatible = "renesas,r8a774c0-lvds", .data = &rcar_lvds_r8a77990_info },
- { .compatible = "renesas,r8a7790-lvds", .data = &rcar_lvds_r8a7790_info },
+ { .compatible = "renesas,r8a7790-lvds", .data = &rcar_lvds_gen2_info },
{ .compatible = "renesas,r8a7791-lvds", .data = &rcar_lvds_gen2_info },
{ .compatible = "renesas,r8a7793-lvds", .data = &rcar_lvds_gen2_info },
{ .compatible = "renesas,r8a7795-lvds", .data = &rcar_lvds_gen3_info },
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
index 6f4222f8beeb..310aa1546893 100644
--- a/drivers/gpu/drm/rockchip/Kconfig
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -28,17 +28,17 @@ config ROCKCHIP_ANALOGIX_DP
on RK3288 or RK3399 based SoC, you should select this option.
config ROCKCHIP_CDN_DP
- bool "Rockchip cdn DP"
+ bool "Rockchip cdn DP"
depends on EXTCON=y || (EXTCON=m && DRM_ROCKCHIP=m)
- help
+ help
This selects support for Rockchip SoC specific extensions
for the cdn DP driver. If you want to enable Dp on
RK3399 based SoC, you should select this
option.
config ROCKCHIP_DW_HDMI
- bool "Rockchip specific extensions for Synopsys DW HDMI"
- help
+ bool "Rockchip specific extensions for Synopsys DW HDMI"
+ help
This selects support for Rockchip SoC specific extensions
for the Synopsys DesignWare HDMI driver. If you want to
enable HDMI on RK3288 or RK3399 based SoC, you should select
@@ -46,6 +46,7 @@ config ROCKCHIP_DW_HDMI
config ROCKCHIP_DW_MIPI_DSI
bool "Rockchip specific extensions for Synopsys DW MIPI DSI"
+ select GENERIC_PHY_MIPI_DPHY
help
This selects support for Rockchip SoC specific extensions
for the Synopsys DesignWare HDMI driver. If you want to
diff --git a/drivers/gpu/drm/rockchip/Makefile b/drivers/gpu/drm/rockchip/Makefile
index 524684ba7f6a..17a9e7eb2130 100644
--- a/drivers/gpu/drm/rockchip/Makefile
+++ b/drivers/gpu/drm/rockchip/Makefile
@@ -4,8 +4,7 @@
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
rockchipdrm-y := rockchip_drm_drv.o rockchip_drm_fb.o \
- rockchip_drm_gem.o rockchip_drm_psr.o \
- rockchip_drm_vop.o rockchip_vop_reg.o
+ rockchip_drm_gem.o rockchip_drm_vop.o rockchip_vop_reg.o
rockchipdrm-$(CONFIG_DRM_FBDEV_EMULATION) += rockchip_drm_fbdev.o
rockchipdrm-$(CONFIG_ROCKCHIP_ANALOGIX_DP) += analogix_dp-rockchip.o
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index 9aae3d8e99ef..f38f5e113c6b 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -16,19 +16,18 @@
#include <linux/reset.h>
#include <linux/clk.h>
-#include <drm/drmP.h>
-#include <drm/drm_dp_helper.h>
-#include <drm/drm_of.h>
-#include <drm/drm_panel.h>
-#include <drm/drm_probe_helper.h>
-
#include <video/of_videomode.h>
#include <video/videomode.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/bridge/analogix_dp.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
#include "rockchip_drm_drv.h"
-#include "rockchip_drm_psr.h"
#include "rockchip_drm_vop.h"
#define RK3288_GRF_SOC_CON6 0x25c
@@ -73,29 +72,6 @@ struct rockchip_dp_device {
struct analogix_dp_plat_data plat_data;
};
-static int analogix_dp_psr_set(struct drm_encoder *encoder, bool enabled)
-{
- struct rockchip_dp_device *dp = to_dp(encoder);
- int ret;
-
- if (!analogix_dp_psr_enabled(dp->adp))
- return 0;
-
- DRM_DEV_DEBUG(dp->dev, "%s PSR...\n", enabled ? "Entry" : "Exit");
-
- ret = rockchip_drm_wait_vact_end(dp->encoder.crtc,
- PSR_WAIT_LINE_FLAG_TIMEOUT_MS);
- if (ret) {
- DRM_DEV_ERROR(dp->dev, "line flag interrupt did not arrive\n");
- return -ETIMEDOUT;
- }
-
- if (enabled)
- return analogix_dp_enable_psr(dp->adp);
- else
- return analogix_dp_disable_psr(dp->adp);
-}
-
static int rockchip_dp_pre_init(struct rockchip_dp_device *dp)
{
reset_control_assert(dp->rst);
@@ -126,21 +102,9 @@ static int rockchip_dp_poweron_start(struct analogix_dp_plat_data *plat_data)
return ret;
}
-static int rockchip_dp_poweron_end(struct analogix_dp_plat_data *plat_data)
-{
- struct rockchip_dp_device *dp = to_dp(plat_data);
-
- return rockchip_drm_psr_inhibit_put(&dp->encoder);
-}
-
static int rockchip_dp_powerdown(struct analogix_dp_plat_data *plat_data)
{
struct rockchip_dp_device *dp = to_dp(plat_data);
- int ret;
-
- ret = rockchip_drm_psr_inhibit_get(&dp->encoder);
- if (ret != 0)
- return ret;
clk_disable_unprepare(dp->pclk);
@@ -180,12 +144,42 @@ static void rockchip_dp_drm_encoder_mode_set(struct drm_encoder *encoder,
/* do nothing */
}
-static void rockchip_dp_drm_encoder_enable(struct drm_encoder *encoder)
+static
+struct drm_crtc *rockchip_dp_drm_get_new_crtc(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
+{
+ struct drm_connector *connector;
+ struct drm_connector_state *conn_state;
+
+ connector = drm_atomic_get_new_connector_for_encoder(state, encoder);
+ if (!connector)
+ return NULL;
+
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
+ if (!conn_state)
+ return NULL;
+
+ return conn_state->crtc;
+}
+
+static void rockchip_dp_drm_encoder_enable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
{
struct rockchip_dp_device *dp = to_dp(encoder);
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state;
int ret;
u32 val;
+ crtc = rockchip_dp_drm_get_new_crtc(encoder, state);
+ if (!crtc)
+ return;
+
+ old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
+ /* Coming back from self refresh, nothing to do */
+ if (old_crtc_state && old_crtc_state->self_refresh_active)
+ return;
+
ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder);
if (ret < 0)
return;
@@ -210,9 +204,27 @@ static void rockchip_dp_drm_encoder_enable(struct drm_encoder *encoder)
clk_disable_unprepare(dp->grfclk);
}
-static void rockchip_dp_drm_encoder_nop(struct drm_encoder *encoder)
+static void rockchip_dp_drm_encoder_disable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
{
- /* do nothing */
+ struct rockchip_dp_device *dp = to_dp(encoder);
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *new_crtc_state = NULL;
+ int ret;
+
+ crtc = rockchip_dp_drm_get_new_crtc(encoder, state);
+ /* No crtc means we're doing a full shutdown */
+ if (!crtc)
+ return;
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ /* If we're not entering self-refresh, no need to wait for vact */
+ if (!new_crtc_state || !new_crtc_state->self_refresh_active)
+ return;
+
+ ret = rockchip_drm_wait_vact_end(crtc, PSR_WAIT_LINE_FLAG_TIMEOUT_MS);
+ if (ret)
+ DRM_DEV_ERROR(dp->dev, "line flag irq timed out\n");
}
static int
@@ -241,8 +253,8 @@ rockchip_dp_drm_encoder_atomic_check(struct drm_encoder *encoder,
static struct drm_encoder_helper_funcs rockchip_dp_encoder_helper_funcs = {
.mode_fixup = rockchip_dp_drm_encoder_mode_fixup,
.mode_set = rockchip_dp_drm_encoder_mode_set,
- .enable = rockchip_dp_drm_encoder_enable,
- .disable = rockchip_dp_drm_encoder_nop,
+ .atomic_enable = rockchip_dp_drm_encoder_enable,
+ .atomic_disable = rockchip_dp_drm_encoder_disable,
.atomic_check = rockchip_dp_drm_encoder_atomic_check,
};
@@ -334,23 +346,16 @@ static int rockchip_dp_bind(struct device *dev, struct device *master,
dp->plat_data.dev_type = dp->data->chip_type;
dp->plat_data.power_on_start = rockchip_dp_poweron_start;
- dp->plat_data.power_on_end = rockchip_dp_poweron_end;
dp->plat_data.power_off = rockchip_dp_powerdown;
dp->plat_data.get_modes = rockchip_dp_get_modes;
- ret = rockchip_drm_psr_register(&dp->encoder, analogix_dp_psr_set);
- if (ret < 0)
- goto err_cleanup_encoder;
-
dp->adp = analogix_dp_bind(dev, dp->drm_dev, &dp->plat_data);
if (IS_ERR(dp->adp)) {
ret = PTR_ERR(dp->adp);
- goto err_unreg_psr;
+ goto err_cleanup_encoder;
}
return 0;
-err_unreg_psr:
- rockchip_drm_psr_unregister(&dp->encoder);
err_cleanup_encoder:
dp->encoder.funcs->destroy(&dp->encoder);
return ret;
@@ -362,7 +367,6 @@ static void rockchip_dp_unbind(struct device *dev, struct device *master,
struct rockchip_dp_device *dp = dev_get_drvdata(dev);
analogix_dp_unbind(dp->adp);
- rockchip_drm_psr_unregister(&dp->encoder);
dp->encoder.funcs->destroy(&dp->encoder);
dp->adp = ERR_PTR(-ENODEV);
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
index 8c32c32be85c..eed594bd38d3 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
@@ -4,24 +4,23 @@
* Author: Chris Zhong <zyw@rock-chips.com>
*/
-#include <drm/drmP.h>
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_dp_helper.h>
-#include <drm/drm_edid.h>
-#include <drm/drm_of.h>
-#include <drm/drm_probe_helper.h>
-
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/extcon.h>
#include <linux/firmware.h>
-#include <linux/regmap.h>
-#include <linux/reset.h>
#include <linux/mfd/syscon.h>
#include <linux/phy/phy.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
#include <sound/hdmi-codec.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_of.h>
+#include <drm/drm_probe_helper.h>
+
#include "cdn-dp-core.h"
#include "cdn-dp-reg.h"
#include "rockchip_drm_vop.h"
@@ -478,8 +477,8 @@ static int cdn_dp_disable(struct cdn_dp_device *dp)
cdn_dp_set_firmware_active(dp, false);
cdn_dp_clk_disable(dp);
dp->active = false;
- dp->link.rate = 0;
- dp->link.num_lanes = 0;
+ dp->max_lanes = 0;
+ dp->max_rate = 0;
if (!dp->connected) {
kfree(dp->edid);
dp->edid = NULL;
@@ -571,7 +570,7 @@ static bool cdn_dp_check_link_status(struct cdn_dp_device *dp)
struct cdn_dp_port *port = cdn_dp_connected_port(dp);
u8 sink_lanes = drm_dp_max_lane_count(dp->dpcd);
- if (!port || !dp->link.rate || !dp->link.num_lanes)
+ if (!port || !dp->max_rate || !dp->max_lanes)
return false;
if (cdn_dp_dpcd_read(dp, DP_LANE0_1_STATUS, link_status,
@@ -953,8 +952,8 @@ static void cdn_dp_pd_event_work(struct work_struct *work)
/* Enabled and connected with a sink, re-train if requested */
} else if (!cdn_dp_check_link_status(dp)) {
- unsigned int rate = dp->link.rate;
- unsigned int lanes = dp->link.num_lanes;
+ unsigned int rate = dp->max_rate;
+ unsigned int lanes = dp->max_lanes;
struct drm_display_mode *mode = &dp->mode;
DRM_DEV_INFO(dp->dev, "Connected with sink. Re-train link\n");
@@ -967,7 +966,7 @@ static void cdn_dp_pd_event_work(struct work_struct *work)
/* If training result is changed, update the video config */
if (mode->clock &&
- (rate != dp->link.rate || lanes != dp->link.num_lanes)) {
+ (rate != dp->max_rate || lanes != dp->max_lanes)) {
ret = cdn_dp_config_video(dp);
if (ret) {
dp->connected = false;
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.h b/drivers/gpu/drm/rockchip/cdn-dp-core.h
index f18a01e6cbc2..81ac9b658a70 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.h
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.h
@@ -7,10 +7,10 @@
#ifndef _CDN_DP_CORE_H
#define _CDN_DP_CORE_H
-#include <drm/drmP.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
+
#include "rockchip_drm_drv.h"
#define MAX_PHY 2
@@ -92,9 +92,10 @@ struct cdn_dp_device {
struct reset_control *core_rst;
struct audio_info audio_info;
struct video_info video_info;
- struct drm_dp_link link;
struct cdn_dp_port *port[MAX_PHY];
u8 ports;
+ u8 max_lanes;
+ unsigned int max_rate;
u8 lanes;
int active_port;
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-reg.c b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
index 077c87021908..7361c07cb4a7 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-reg.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
@@ -535,8 +535,8 @@ static int cdn_dp_get_training_status(struct cdn_dp_device *dp)
if (ret)
goto err_get_training_status;
- dp->link.rate = drm_dp_bw_code_to_link_rate(status[0]);
- dp->link.num_lanes = status[1];
+ dp->max_rate = drm_dp_bw_code_to_link_rate(status[0]);
+ dp->max_lanes = status[1];
err_get_training_status:
if (ret)
@@ -560,8 +560,8 @@ int cdn_dp_train_link(struct cdn_dp_device *dp)
return ret;
}
- DRM_DEV_DEBUG_KMS(dp->dev, "rate:0x%x, lanes:%d\n", dp->link.rate,
- dp->link.num_lanes);
+ DRM_DEV_DEBUG_KMS(dp->dev, "rate:0x%x, lanes:%d\n", dp->max_rate,
+ dp->max_lanes);
return ret;
}
@@ -639,7 +639,7 @@ int cdn_dp_config_video(struct cdn_dp_device *dp)
bit_per_pix = (video->color_fmt == YCBCR_4_2_2) ?
(video->color_depth * 2) : (video->color_depth * 3);
- link_rate = dp->link.rate / 1000;
+ link_rate = dp->max_rate / 1000;
ret = cdn_dp_reg_write(dp, BND_HSYNC2VSYNC, VIF_BYPASS_INTERLACE);
if (ret)
@@ -659,14 +659,13 @@ int cdn_dp_config_video(struct cdn_dp_device *dp)
do {
tu_size_reg += 2;
symbol = tu_size_reg * mode->clock * bit_per_pix;
- do_div(symbol, dp->link.num_lanes * link_rate * 8);
+ do_div(symbol, dp->max_lanes * link_rate * 8);
rem = do_div(symbol, 1000);
if (tu_size_reg > 64) {
ret = -EINVAL;
DRM_DEV_ERROR(dp->dev,
"tu error, clk:%d, lanes:%d, rate:%d\n",
- mode->clock, dp->link.num_lanes,
- link_rate);
+ mode->clock, dp->max_lanes, link_rate);
goto err_config_video;
}
} while ((symbol <= 1) || (tu_size_reg - symbol < 4) ||
@@ -680,7 +679,7 @@ int cdn_dp_config_video(struct cdn_dp_device *dp)
/* set the FIFO Buffer size */
val = div_u64(mode->clock * (symbol + 1), 1000) + link_rate;
- val /= (dp->link.num_lanes * link_rate);
+ val /= (dp->max_lanes * link_rate);
val = div_u64(8 * (symbol + 1), bit_per_pix) - val;
val += 2;
ret = cdn_dp_reg_write(dp, DP_VC_TABLE(15), val);
@@ -833,7 +832,7 @@ static void cdn_dp_audio_config_i2s(struct cdn_dp_device *dp,
u32 val;
if (audio->channels == 2) {
- if (dp->link.num_lanes == 1)
+ if (dp->max_lanes == 1)
sub_pckt_num = 2;
else
sub_pckt_num = 4;
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
index ef8486e5e2cd..6e1270e45f97 100644
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
@@ -6,20 +6,22 @@
* Nickey Yang <nickey.yang@rock-chips.com>
*/
-#include <drm/drmP.h>
-#include <drm/drm_mipi_dsi.h>
-#include <drm/bridge/dw_mipi_dsi.h>
-#include <drm/drm_of.h>
#include <linux/clk.h>
#include <linux/iopoll.h>
#include <linux/math64.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/phy/phy.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
+
#include <video/mipi_display.h>
+#include <drm/bridge/dw_mipi_dsi.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
+
#include "rockchip_drm_drv.h"
#include "rockchip_drm_vop.h"
@@ -138,6 +140,12 @@
#define DW_MIPI_NEEDS_PHY_CFG_CLK BIT(0)
#define DW_MIPI_NEEDS_GRF_CLK BIT(1)
+#define PX30_GRF_PD_VO_CON1 0x0438
+#define PX30_DSI_FORCETXSTOPMODE (0xf << 7)
+#define PX30_DSI_FORCERXMODE BIT(6)
+#define PX30_DSI_TURNDISABLE BIT(5)
+#define PX30_DSI_LCDC_SEL BIT(0)
+
#define RK3288_GRF_SOC_CON6 0x025c
#define RK3288_DSI0_LCDC_SEL BIT(6)
#define RK3288_DSI1_LCDC_SEL BIT(9)
@@ -222,6 +230,10 @@ struct dw_mipi_dsi_rockchip {
bool is_slave;
struct dw_mipi_dsi_rockchip *slave;
+ /* optional external dphy */
+ struct phy *phy;
+ union phy_configure_opts phy_opts;
+
unsigned int lane_mbps; /* per lane */
u16 input_div;
u16 feedback_div;
@@ -358,6 +370,9 @@ static int dw_mipi_dsi_phy_init(void *priv_data)
struct dw_mipi_dsi_rockchip *dsi = priv_data;
int ret, i, vco;
+ if (dsi->phy)
+ return 0;
+
/*
* Get vco from frequency(lane_mbps)
* vco frequency table
@@ -466,6 +481,28 @@ static int dw_mipi_dsi_phy_init(void *priv_data)
return ret;
}
+static void dw_mipi_dsi_phy_power_on(void *priv_data)
+{
+ struct dw_mipi_dsi_rockchip *dsi = priv_data;
+ int ret;
+
+ ret = phy_set_mode(dsi->phy, PHY_MODE_MIPI_DPHY);
+ if (ret) {
+ DRM_DEV_ERROR(dsi->dev, "failed to set phy mode: %d\n", ret);
+ return;
+ }
+
+ phy_configure(dsi->phy, &dsi->phy_opts);
+ phy_power_on(dsi->phy);
+}
+
+static void dw_mipi_dsi_phy_power_off(void *priv_data)
+{
+ struct dw_mipi_dsi_rockchip *dsi = priv_data;
+
+ phy_power_off(dsi->phy);
+}
+
static int
dw_mipi_dsi_get_lane_mbps(void *priv_data, const struct drm_display_mode *mode,
unsigned long mode_flags, u32 lanes, u32 format,
@@ -503,6 +540,17 @@ dw_mipi_dsi_get_lane_mbps(void *priv_data, const struct drm_display_mode *mode,
"DPHY clock frequency is out of range\n");
}
+ /* for external phy only a the mipi_dphy_config is necessary */
+ if (dsi->phy) {
+ phy_mipi_dphy_get_default_config(mode->clock * 1000 * 10 / 8,
+ bpp, lanes,
+ &dsi->phy_opts.mipi_dphy);
+ dsi->lane_mbps = target_mbps;
+ *lane_mbps = dsi->lane_mbps;
+
+ return 0;
+ }
+
fin = clk_get_rate(dsi->pllref_clk);
fout = target_mbps * USEC_PER_SEC;
@@ -558,9 +606,89 @@ dw_mipi_dsi_get_lane_mbps(void *priv_data, const struct drm_display_mode *mode,
return 0;
}
+struct hstt {
+ unsigned int maxfreq;
+ struct dw_mipi_dsi_dphy_timing timing;
+};
+
+#define HSTT(_maxfreq, _c_lp2hs, _c_hs2lp, _d_lp2hs, _d_hs2lp) \
+{ \
+ .maxfreq = _maxfreq, \
+ .timing = { \
+ .clk_lp2hs = _c_lp2hs, \
+ .clk_hs2lp = _c_hs2lp, \
+ .data_lp2hs = _d_lp2hs, \
+ .data_hs2lp = _d_hs2lp, \
+ } \
+}
+
+/* Table A-3 High-Speed Transition Times */
+struct hstt hstt_table[] = {
+ HSTT( 90, 32, 20, 26, 13),
+ HSTT( 100, 35, 23, 28, 14),
+ HSTT( 110, 32, 22, 26, 13),
+ HSTT( 130, 31, 20, 27, 13),
+ HSTT( 140, 33, 22, 26, 14),
+ HSTT( 150, 33, 21, 26, 14),
+ HSTT( 170, 32, 20, 27, 13),
+ HSTT( 180, 36, 23, 30, 15),
+ HSTT( 200, 40, 22, 33, 15),
+ HSTT( 220, 40, 22, 33, 15),
+ HSTT( 240, 44, 24, 36, 16),
+ HSTT( 250, 48, 24, 38, 17),
+ HSTT( 270, 48, 24, 38, 17),
+ HSTT( 300, 50, 27, 41, 18),
+ HSTT( 330, 56, 28, 45, 18),
+ HSTT( 360, 59, 28, 48, 19),
+ HSTT( 400, 61, 30, 50, 20),
+ HSTT( 450, 67, 31, 55, 21),
+ HSTT( 500, 73, 31, 59, 22),
+ HSTT( 550, 79, 36, 63, 24),
+ HSTT( 600, 83, 37, 68, 25),
+ HSTT( 650, 90, 38, 73, 27),
+ HSTT( 700, 95, 40, 77, 28),
+ HSTT( 750, 102, 40, 84, 28),
+ HSTT( 800, 106, 42, 87, 30),
+ HSTT( 850, 113, 44, 93, 31),
+ HSTT( 900, 118, 47, 98, 32),
+ HSTT( 950, 124, 47, 102, 34),
+ HSTT(1000, 130, 49, 107, 35),
+ HSTT(1050, 135, 51, 111, 37),
+ HSTT(1100, 139, 51, 114, 38),
+ HSTT(1150, 146, 54, 120, 40),
+ HSTT(1200, 153, 57, 125, 41),
+ HSTT(1250, 158, 58, 130, 42),
+ HSTT(1300, 163, 58, 135, 44),
+ HSTT(1350, 168, 60, 140, 45),
+ HSTT(1400, 172, 64, 144, 47),
+ HSTT(1450, 176, 65, 148, 48),
+ HSTT(1500, 181, 66, 153, 50)
+};
+
+static int
+dw_mipi_dsi_phy_get_timing(void *priv_data, unsigned int lane_mbps,
+ struct dw_mipi_dsi_dphy_timing *timing)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hstt_table); i++)
+ if (lane_mbps < hstt_table[i].maxfreq)
+ break;
+
+ if (i == ARRAY_SIZE(hstt_table))
+ i--;
+
+ *timing = hstt_table[i].timing;
+
+ return 0;
+}
+
static const struct dw_mipi_dsi_phy_ops dw_mipi_dsi_rockchip_phy_ops = {
.init = dw_mipi_dsi_phy_init,
+ .power_on = dw_mipi_dsi_phy_power_on,
+ .power_off = dw_mipi_dsi_phy_power_off,
.get_lane_mbps = dw_mipi_dsi_get_lane_mbps,
+ .get_timing = dw_mipi_dsi_phy_get_timing,
};
static void dw_mipi_dsi_rockchip_config(struct dw_mipi_dsi_rockchip *dsi,
@@ -915,16 +1043,33 @@ static int dw_mipi_dsi_rockchip_probe(struct platform_device *pdev)
}
if (!dsi->cdata) {
- dev_err(dev, "no dsi-config for %s node\n", np->name);
+ DRM_DEV_ERROR(dev, "no dsi-config for %s node\n", np->name);
return -EINVAL;
}
+ /* try to get a possible external dphy */
+ dsi->phy = devm_phy_optional_get(dev, "dphy");
+ if (IS_ERR(dsi->phy)) {
+ ret = PTR_ERR(dsi->phy);
+ DRM_DEV_ERROR(dev, "failed to get mipi dphy: %d\n", ret);
+ return ret;
+ }
+
dsi->pllref_clk = devm_clk_get(dev, "ref");
if (IS_ERR(dsi->pllref_clk)) {
- ret = PTR_ERR(dsi->pllref_clk);
- DRM_DEV_ERROR(dev,
- "Unable to get pll reference clock: %d\n", ret);
- return ret;
+ if (dsi->phy) {
+ /*
+ * if external phy is present, pll will be
+ * generated there.
+ */
+ dsi->pllref_clk = NULL;
+ } else {
+ ret = PTR_ERR(dsi->pllref_clk);
+ DRM_DEV_ERROR(dev,
+ "Unable to get pll reference clock: %d\n",
+ ret);
+ return ret;
+ }
}
if (dsi->cdata->flags & DW_MIPI_NEEDS_PHY_CFG_CLK) {
@@ -988,6 +1133,24 @@ static int dw_mipi_dsi_rockchip_remove(struct platform_device *pdev)
return 0;
}
+static const struct rockchip_dw_dsi_chip_data px30_chip_data[] = {
+ {
+ .reg = 0xff450000,
+ .lcdsel_grf_reg = PX30_GRF_PD_VO_CON1,
+ .lcdsel_big = HIWORD_UPDATE(0, PX30_DSI_LCDC_SEL),
+ .lcdsel_lit = HIWORD_UPDATE(PX30_DSI_LCDC_SEL,
+ PX30_DSI_LCDC_SEL),
+
+ .lanecfg1_grf_reg = PX30_GRF_PD_VO_CON1,
+ .lanecfg1 = HIWORD_UPDATE(0, PX30_DSI_TURNDISABLE |
+ PX30_DSI_FORCERXMODE |
+ PX30_DSI_FORCETXSTOPMODE),
+
+ .max_data_lanes = 4,
+ },
+ { /* sentinel */ }
+};
+
static const struct rockchip_dw_dsi_chip_data rk3288_chip_data[] = {
{
.reg = 0xff960000,
@@ -1056,6 +1219,9 @@ static const struct rockchip_dw_dsi_chip_data rk3399_chip_data[] = {
static const struct of_device_id dw_mipi_dsi_rockchip_dt_ids[] = {
{
+ .compatible = "rockchip,px30-mipi-dsi",
+ .data = &px30_chip_data,
+ }, {
.compatible = "rockchip,rk3288-mipi-dsi",
.data = &rk3288_chip_data,
}, {
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index cdc304d4cd02..7f56d8c3491d 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -10,11 +10,10 @@
#include <linux/phy/phy.h>
#include <linux/regmap.h>
-#include <drm/drm_of.h>
-#include <drm/drmP.h>
+#include <drm/bridge/dw_hdmi.h>
#include <drm/drm_edid.h>
+#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
-#include <drm/bridge/dw_hdmi.h>
#include "rockchip_drm_drv.h"
#include "rockchip_drm_vop.h"
@@ -451,6 +450,7 @@ static const struct dw_hdmi_plat_data rk3328_hdmi_drv_data = {
.phy_ops = &rk3328_hdmi_phy_ops,
.phy_name = "inno_dw_hdmi_phy2",
.phy_force_vendor = true,
+ .use_drm_infoframe = true,
};
static struct rockchip_hdmi_chip_data rk3399_chip_data = {
@@ -465,6 +465,7 @@ static const struct dw_hdmi_plat_data rk3399_hdmi_drv_data = {
.cur_ctr = rockchip_cur_ctr,
.phy_config = rockchip_phy_config,
.phy_data = &rk3399_chip_data,
+ .use_drm_infoframe = true,
};
static const struct of_device_id dw_hdmi_rockchip_dt_ids[] = {
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c
index f8ca98d294d0..e5864e823020 100644
--- a/drivers/gpu/drm/rockchip/inno_hdmi.c
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.c
@@ -15,10 +15,9 @@
#include <linux/mutex.h>
#include <linux/of_device.h>
-#include <drm/drm_of.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
+#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
#include "rockchip_drm_drv.h"
@@ -625,8 +624,10 @@ static int inno_hdmi_register(struct drm_device *drm, struct inno_hdmi *hdmi)
drm_connector_helper_add(&hdmi->connector,
&inno_hdmi_connector_helper_funcs);
- drm_connector_init(drm, &hdmi->connector, &inno_hdmi_connector_funcs,
- DRM_MODE_CONNECTOR_HDMIA);
+ drm_connector_init_with_ddc(drm, &hdmi->connector,
+ &inno_hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA,
+ hdmi->ddc);
drm_connector_attach_encoder(&hdmi->connector, encoder);
diff --git a/drivers/gpu/drm/rockchip/rk3066_hdmi.c b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
index 85fc5f01f761..fe203d38664e 100644
--- a/drivers/gpu/drm/rockchip/rk3066_hdmi.c
+++ b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
@@ -564,9 +564,10 @@ rk3066_hdmi_register(struct drm_device *drm, struct rk3066_hdmi *hdmi)
drm_connector_helper_add(&hdmi->connector,
&rk3066_hdmi_connector_helper_funcs);
- drm_connector_init(drm, &hdmi->connector,
- &rk3066_hdmi_connector_funcs,
- DRM_MODE_CONNECTOR_HDMIA);
+ drm_connector_init_with_ddc(drm, &hdmi->connector,
+ &rk3066_hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA,
+ hdmi->ddc);
drm_connector_attach_encoder(&hdmi->connector, encoder);
@@ -640,6 +641,9 @@ static int rk3066_hdmi_i2c_write(struct rk3066_hdmi *hdmi, struct i2c_msg *msgs)
if (msgs->addr == DDC_ADDR)
hdmi->i2c->ddc_addr = msgs->buf[0];
+ /* Set edid fifo first address. */
+ hdmi_writeb(hdmi, HDMI_EDID_FIFO_ADDR, 0x00);
+
/* Set edid word address 0x00/0x80. */
hdmi_writeb(hdmi, HDMI_EDID_WORD_ADDR, hdmi->i2c->ddc_addr);
@@ -743,7 +747,6 @@ static int rk3066_hdmi_bind(struct device *dev, struct device *master,
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = data;
struct rk3066_hdmi *hdmi;
- struct resource *iores;
int irq;
int ret;
@@ -753,12 +756,7 @@ static int rk3066_hdmi_bind(struct device *dev, struct device *master,
hdmi->dev = dev;
hdmi->drm_dev = drm;
-
- iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!iores)
- return -ENXIO;
-
- hdmi->regs = devm_ioremap_resource(dev, iores);
+ hdmi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hdmi->regs))
return PTR_ERR(hdmi->regs);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index 53d2c5bd61dc..20ecb1508a22 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -6,11 +6,6 @@
* based on exynos_drm_drv.c
*/
-#include <drm/drmP.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_of.h>
-#include <drm/drm_probe_helper.h>
#include <linux/dma-mapping.h>
#include <linux/dma-iommu.h>
#include <linux/pm_runtime.h>
@@ -21,6 +16,13 @@
#include <linux/console.h>
#include <linux/iommu.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_of.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+
#include "rockchip_drm_drv.h"
#include "rockchip_drm_fb.h"
#include "rockchip_drm_fbdev.h"
@@ -212,16 +214,13 @@ static const struct file_operations rockchip_drm_driver_fops = {
};
static struct drm_driver rockchip_drm_driver = {
- .driver_features = DRIVER_MODESET | DRIVER_GEM |
- DRIVER_PRIME | DRIVER_ATOMIC,
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.lastclose = drm_fb_helper_lastclose,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.gem_free_object_unlocked = rockchip_gem_free_object,
.dumb_create = rockchip_gem_dumb_create,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_import = drm_gem_prime_import,
- .gem_prime_export = drm_gem_prime_export,
.gem_prime_get_sg_table = rockchip_gem_prime_get_sg_table,
.gem_prime_import_sg_table = rockchip_gem_prime_import_sg_table,
.gem_prime_vmap = rockchip_gem_prime_vmap,
@@ -330,8 +329,7 @@ static struct component_match *rockchip_drm_match_add(struct device *dev)
struct device *p = NULL, *d;
do {
- d = bus_find_device(&platform_bus_type, p, &drv->driver,
- (void *)platform_bus_type.match);
+ d = platform_find_device_by_driver(p, &drv->driver);
put_device(p);
p = d;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
index 64ca87cf6d50..221e72e71432 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
@@ -5,18 +5,18 @@
*/
#include <linux/kernel.h>
+
#include <drm/drm.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_probe_helper.h>
#include "rockchip_drm_drv.h"
#include "rockchip_drm_fb.h"
#include "rockchip_drm_gem.h"
-#include "rockchip_drm_psr.h"
static const struct drm_framebuffer_funcs rockchip_drm_fb_funcs = {
.destroy = drm_gem_fb_destroy,
@@ -53,87 +53,12 @@ rockchip_fb_alloc(struct drm_device *dev, const struct drm_mode_fb_cmd2 *mode_cm
return fb;
}
-static struct drm_framebuffer *
-rockchip_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
- const struct drm_mode_fb_cmd2 *mode_cmd)
-{
- const struct drm_format_info *info = drm_get_format_info(dev,
- mode_cmd);
- struct drm_framebuffer *fb;
- struct drm_gem_object *objs[ROCKCHIP_MAX_FB_BUFFER];
- struct drm_gem_object *obj;
- int num_planes = min_t(int, info->num_planes, ROCKCHIP_MAX_FB_BUFFER);
- int ret;
- int i;
-
- for (i = 0; i < num_planes; i++) {
- unsigned int width = mode_cmd->width / (i ? info->hsub : 1);
- unsigned int height = mode_cmd->height / (i ? info->vsub : 1);
- unsigned int min_size;
-
- obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]);
- if (!obj) {
- DRM_DEV_ERROR(dev->dev,
- "Failed to lookup GEM object\n");
- ret = -ENXIO;
- goto err_gem_object_unreference;
- }
-
- min_size = (height - 1) * mode_cmd->pitches[i] +
- mode_cmd->offsets[i] +
- width * info->cpp[i];
-
- if (obj->size < min_size) {
- drm_gem_object_put_unlocked(obj);
- ret = -EINVAL;
- goto err_gem_object_unreference;
- }
- objs[i] = obj;
- }
-
- fb = rockchip_fb_alloc(dev, mode_cmd, objs, i);
- if (IS_ERR(fb)) {
- ret = PTR_ERR(fb);
- goto err_gem_object_unreference;
- }
-
- return fb;
-
-err_gem_object_unreference:
- for (i--; i >= 0; i--)
- drm_gem_object_put_unlocked(objs[i]);
- return ERR_PTR(ret);
-}
-
-static void
-rockchip_atomic_helper_commit_tail_rpm(struct drm_atomic_state *old_state)
-{
- struct drm_device *dev = old_state->dev;
-
- rockchip_drm_psr_inhibit_get_state(old_state);
-
- drm_atomic_helper_commit_modeset_disables(dev, old_state);
-
- drm_atomic_helper_commit_modeset_enables(dev, old_state);
-
- drm_atomic_helper_commit_planes(dev, old_state,
- DRM_PLANE_COMMIT_ACTIVE_ONLY);
-
- rockchip_drm_psr_inhibit_put_state(old_state);
-
- drm_atomic_helper_commit_hw_done(old_state);
-
- drm_atomic_helper_wait_for_vblanks(dev, old_state);
-
- drm_atomic_helper_cleanup_planes(dev, old_state);
-}
-
static const struct drm_mode_config_helper_funcs rockchip_mode_config_helpers = {
- .atomic_commit_tail = rockchip_atomic_helper_commit_tail_rpm,
+ .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
};
static const struct drm_mode_config_funcs rockchip_drm_mode_config_funcs = {
- .fb_create = rockchip_user_fb_create,
+ .fb_create = drm_gem_fb_create_with_dirty,
.output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
index bb8ac18298f6..521fe42ac5e2 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
@@ -5,8 +5,8 @@
*/
#include <drm/drm.h>
-#include <drm/drmP.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_probe_helper.h>
#include "rockchip_drm_drv.h"
@@ -27,7 +27,7 @@ static int rockchip_fbdev_mmap(struct fb_info *info,
return rockchip_gem_mmap_buf(private->fbdev_bo, vma);
}
-static struct fb_ops rockchip_drm_fbdev_ops = {
+static const struct fb_ops rockchip_drm_fbdev_ops = {
.owner = THIS_MODULE,
DRM_FB_HELPER_DEFAULT_OPS,
.fb_mmap = rockchip_fbdev_mmap,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index ba9e77acbe16..7582d0e6a60a 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -4,14 +4,14 @@
* Author:Mark Yao <mark.yao@rock-chips.com>
*/
+#include <linux/dma-buf.h>
+#include <linux/iommu.h>
+
#include <drm/drm.h>
-#include <drm/drmP.h>
#include <drm/drm_gem.h>
+#include <drm/drm_prime.h>
#include <drm/drm_vma_manager.h>
-#include <linux/dma-buf.h>
-#include <linux/iommu.h>
-
#include "rockchip_drm_drv.h"
#include "rockchip_drm_gem.h"
@@ -294,7 +294,7 @@ static void rockchip_gem_release_object(struct rockchip_gem_object *rk_obj)
kfree(rk_obj);
}
-struct rockchip_gem_object *
+static struct rockchip_gem_object *
rockchip_gem_alloc_object(struct drm_device *drm, unsigned int size)
{
struct rockchip_gem_object *rk_obj;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_psr.c b/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
deleted file mode 100644
index b604747fe453..000000000000
--- a/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
+++ /dev/null
@@ -1,282 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
- * Author: Yakir Yang <ykk@rock-chips.com>
- */
-
-#include <drm/drmP.h>
-#include <drm/drm_atomic.h>
-#include <drm/drm_probe_helper.h>
-
-#include "rockchip_drm_drv.h"
-#include "rockchip_drm_psr.h"
-
-#define PSR_FLUSH_TIMEOUT_MS 100
-
-struct psr_drv {
- struct list_head list;
- struct drm_encoder *encoder;
-
- struct mutex lock;
- int inhibit_count;
- bool enabled;
-
- struct delayed_work flush_work;
-
- int (*set)(struct drm_encoder *encoder, bool enable);
-};
-
-static struct psr_drv *find_psr_by_encoder(struct drm_encoder *encoder)
-{
- struct rockchip_drm_private *drm_drv = encoder->dev->dev_private;
- struct psr_drv *psr;
-
- mutex_lock(&drm_drv->psr_list_lock);
- list_for_each_entry(psr, &drm_drv->psr_list, list) {
- if (psr->encoder == encoder)
- goto out;
- }
- psr = ERR_PTR(-ENODEV);
-
-out:
- mutex_unlock(&drm_drv->psr_list_lock);
- return psr;
-}
-
-static int psr_set_state_locked(struct psr_drv *psr, bool enable)
-{
- int ret;
-
- if (psr->inhibit_count > 0)
- return -EINVAL;
-
- if (enable == psr->enabled)
- return 0;
-
- ret = psr->set(psr->encoder, enable);
- if (ret)
- return ret;
-
- psr->enabled = enable;
- return 0;
-}
-
-static void psr_flush_handler(struct work_struct *work)
-{
- struct psr_drv *psr = container_of(to_delayed_work(work),
- struct psr_drv, flush_work);
-
- mutex_lock(&psr->lock);
- psr_set_state_locked(psr, true);
- mutex_unlock(&psr->lock);
-}
-
-/**
- * rockchip_drm_psr_inhibit_put - release PSR inhibit on given encoder
- * @encoder: encoder to obtain the PSR encoder
- *
- * Decrements PSR inhibit count on given encoder. Should be called only
- * for a PSR inhibit count increment done before. If PSR inhibit counter
- * reaches zero, PSR flush work is scheduled to make the hardware enter
- * PSR mode in PSR_FLUSH_TIMEOUT_MS.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int rockchip_drm_psr_inhibit_put(struct drm_encoder *encoder)
-{
- struct psr_drv *psr = find_psr_by_encoder(encoder);
-
- if (IS_ERR(psr))
- return PTR_ERR(psr);
-
- mutex_lock(&psr->lock);
- --psr->inhibit_count;
- WARN_ON(psr->inhibit_count < 0);
- if (!psr->inhibit_count)
- mod_delayed_work(system_wq, &psr->flush_work,
- PSR_FLUSH_TIMEOUT_MS);
- mutex_unlock(&psr->lock);
-
- return 0;
-}
-EXPORT_SYMBOL(rockchip_drm_psr_inhibit_put);
-
-void rockchip_drm_psr_inhibit_get_state(struct drm_atomic_state *state)
-{
- struct drm_crtc *crtc;
- struct drm_crtc_state *crtc_state;
- struct drm_encoder *encoder;
- u32 encoder_mask = 0;
- int i;
-
- for_each_old_crtc_in_state(state, crtc, crtc_state, i) {
- encoder_mask |= crtc_state->encoder_mask;
- encoder_mask |= crtc->state->encoder_mask;
- }
-
- drm_for_each_encoder_mask(encoder, state->dev, encoder_mask)
- rockchip_drm_psr_inhibit_get(encoder);
-}
-EXPORT_SYMBOL(rockchip_drm_psr_inhibit_get_state);
-
-void rockchip_drm_psr_inhibit_put_state(struct drm_atomic_state *state)
-{
- struct drm_crtc *crtc;
- struct drm_crtc_state *crtc_state;
- struct drm_encoder *encoder;
- u32 encoder_mask = 0;
- int i;
-
- for_each_old_crtc_in_state(state, crtc, crtc_state, i) {
- encoder_mask |= crtc_state->encoder_mask;
- encoder_mask |= crtc->state->encoder_mask;
- }
-
- drm_for_each_encoder_mask(encoder, state->dev, encoder_mask)
- rockchip_drm_psr_inhibit_put(encoder);
-}
-EXPORT_SYMBOL(rockchip_drm_psr_inhibit_put_state);
-
-/**
- * rockchip_drm_psr_inhibit_get - acquire PSR inhibit on given encoder
- * @encoder: encoder to obtain the PSR encoder
- *
- * Increments PSR inhibit count on given encoder. This function guarantees
- * that after it returns PSR is turned off on given encoder and no PSR-related
- * hardware state change occurs at least until a matching call to
- * rockchip_drm_psr_inhibit_put() is done.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int rockchip_drm_psr_inhibit_get(struct drm_encoder *encoder)
-{
- struct psr_drv *psr = find_psr_by_encoder(encoder);
-
- if (IS_ERR(psr))
- return PTR_ERR(psr);
-
- mutex_lock(&psr->lock);
- psr_set_state_locked(psr, false);
- ++psr->inhibit_count;
- mutex_unlock(&psr->lock);
- cancel_delayed_work_sync(&psr->flush_work);
-
- return 0;
-}
-EXPORT_SYMBOL(rockchip_drm_psr_inhibit_get);
-
-static void rockchip_drm_do_flush(struct psr_drv *psr)
-{
- cancel_delayed_work_sync(&psr->flush_work);
-
- mutex_lock(&psr->lock);
- if (!psr_set_state_locked(psr, false))
- mod_delayed_work(system_wq, &psr->flush_work,
- PSR_FLUSH_TIMEOUT_MS);
- mutex_unlock(&psr->lock);
-}
-
-/**
- * rockchip_drm_psr_flush_all - force to flush all registered PSR encoders
- * @dev: drm device
- *
- * Disable the PSR function for all registered encoders, and then enable the
- * PSR function back after PSR_FLUSH_TIMEOUT. If encoder PSR state have been
- * changed during flush time, then keep the state no change after flush
- * timeout.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-void rockchip_drm_psr_flush_all(struct drm_device *dev)
-{
- struct rockchip_drm_private *drm_drv = dev->dev_private;
- struct psr_drv *psr;
-
- mutex_lock(&drm_drv->psr_list_lock);
- list_for_each_entry(psr, &drm_drv->psr_list, list)
- rockchip_drm_do_flush(psr);
- mutex_unlock(&drm_drv->psr_list_lock);
-}
-EXPORT_SYMBOL(rockchip_drm_psr_flush_all);
-
-/**
- * rockchip_drm_psr_register - register encoder to psr driver
- * @encoder: encoder that obtain the PSR function
- * @psr_set: call back to set PSR state
- *
- * The function returns with PSR inhibit counter initialized with one
- * and the caller (typically encoder driver) needs to call
- * rockchip_drm_psr_inhibit_put() when it becomes ready to accept PSR
- * enable request.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int rockchip_drm_psr_register(struct drm_encoder *encoder,
- int (*psr_set)(struct drm_encoder *, bool enable))
-{
- struct rockchip_drm_private *drm_drv;
- struct psr_drv *psr;
-
- if (!encoder || !psr_set)
- return -EINVAL;
-
- drm_drv = encoder->dev->dev_private;
-
- psr = kzalloc(sizeof(struct psr_drv), GFP_KERNEL);
- if (!psr)
- return -ENOMEM;
-
- INIT_DELAYED_WORK(&psr->flush_work, psr_flush_handler);
- mutex_init(&psr->lock);
-
- psr->inhibit_count = 1;
- psr->enabled = false;
- psr->encoder = encoder;
- psr->set = psr_set;
-
- mutex_lock(&drm_drv->psr_list_lock);
- list_add_tail(&psr->list, &drm_drv->psr_list);
- mutex_unlock(&drm_drv->psr_list_lock);
-
- return 0;
-}
-EXPORT_SYMBOL(rockchip_drm_psr_register);
-
-/**
- * rockchip_drm_psr_unregister - unregister encoder to psr driver
- * @encoder: encoder that obtain the PSR function
- * @psr_set: call back to set PSR state
- *
- * It is expected that the PSR inhibit counter is 1 when this function is
- * called, which corresponds to a state when related encoder has been
- * disconnected from any CRTCs and its driver called
- * rockchip_drm_psr_inhibit_get() to stop the PSR logic.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-void rockchip_drm_psr_unregister(struct drm_encoder *encoder)
-{
- struct rockchip_drm_private *drm_drv = encoder->dev->dev_private;
- struct psr_drv *psr, *n;
-
- mutex_lock(&drm_drv->psr_list_lock);
- list_for_each_entry_safe(psr, n, &drm_drv->psr_list, list) {
- if (psr->encoder == encoder) {
- /*
- * Any other value would mean that the encoder
- * is still in use.
- */
- WARN_ON(psr->inhibit_count != 1);
-
- list_del(&psr->list);
- kfree(psr);
- }
- }
- mutex_unlock(&drm_drv->psr_list_lock);
-}
-EXPORT_SYMBOL(rockchip_drm_psr_unregister);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_psr.h b/drivers/gpu/drm/rockchip/rockchip_drm_psr.h
deleted file mode 100644
index 28a9c399114e..000000000000
--- a/drivers/gpu/drm/rockchip/rockchip_drm_psr.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
- * Author: Yakir Yang <ykk@rock-chips.com>
- */
-
-#ifndef __ROCKCHIP_DRM_PSR___
-#define __ROCKCHIP_DRM_PSR___
-
-void rockchip_drm_psr_flush_all(struct drm_device *dev);
-
-int rockchip_drm_psr_inhibit_put(struct drm_encoder *encoder);
-int rockchip_drm_psr_inhibit_get(struct drm_encoder *encoder);
-
-void rockchip_drm_psr_inhibit_get_state(struct drm_atomic_state *state);
-void rockchip_drm_psr_inhibit_put_state(struct drm_atomic_state *state);
-
-int rockchip_drm_psr_register(struct drm_encoder *encoder,
- int (*psr_set)(struct drm_encoder *, bool enable));
-void rockchip_drm_psr_unregister(struct drm_encoder *encoder);
-
-#endif /* __ROCKCHIP_DRM_PSR__ */
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 09a790c2f3a1..d04b3492bdac 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -4,37 +4,38 @@
* Author:Mark Yao <mark.yao@rock-chips.com>
*/
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/delay.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/overflow.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
#include <drm/drm.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_crtc.h>
#include <drm/drm_flip_work.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_self_refresh_helper.h>
+#include <drm/drm_vblank.h>
+
#ifdef CONFIG_DRM_ANALOGIX_DP
#include <drm/bridge/analogix_dp.h>
#endif
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/clk.h>
-#include <linux/iopoll.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/component.h>
-#include <linux/overflow.h>
-
-#include <linux/reset.h>
-#include <linux/delay.h>
-
#include "rockchip_drm_drv.h"
#include "rockchip_drm_gem.h"
#include "rockchip_drm_fb.h"
-#include "rockchip_drm_psr.h"
#include "rockchip_drm_vop.h"
#include "rockchip_rgb.h"
@@ -79,7 +80,7 @@
vop_get_intr_type(vop, &vop->data->intr->name, type)
#define VOP_WIN_GET(vop, win, name) \
- vop_read_reg(vop, win->offset, win->phy->name)
+ vop_read_reg(vop, win->base, &win->phy->name)
#define VOP_WIN_HAS_REG(win, name) \
(!!(win->phy->name.mask))
@@ -124,6 +125,7 @@ struct vop {
bool is_enabled;
struct completion dsp_hold_completion;
+ unsigned int win_enabled;
/* protected by dev->event_lock */
struct drm_pending_vblank_event *event;
@@ -137,6 +139,7 @@ struct vop {
uint32_t *regsbak;
void __iomem *regs;
+ void __iomem *lut_regs;
/* physical map length of vop register */
uint32_t len;
@@ -528,8 +531,10 @@ static void vop_core_clks_disable(struct vop *vop)
clk_disable(vop->hclk);
}
-static void vop_win_disable(struct vop *vop, const struct vop_win_data *win)
+static void vop_win_disable(struct vop *vop, const struct vop_win *vop_win)
{
+ const struct vop_win_data *win = vop_win->data;
+
if (win->phy->scl && win->phy->scl->ext) {
VOP_SCL_SET_EXT(vop, win, yrgb_hor_scl_mode, SCALE_NONE);
VOP_SCL_SET_EXT(vop, win, yrgb_ver_scl_mode, SCALE_NONE);
@@ -538,9 +543,10 @@ static void vop_win_disable(struct vop *vop, const struct vop_win_data *win)
}
VOP_WIN_SET(vop, win, enable, 0);
+ vop->win_enabled &= ~BIT(VOP_WIN_TO_INDEX(vop_win));
}
-static int vop_enable(struct drm_crtc *crtc)
+static int vop_enable(struct drm_crtc *crtc, struct drm_crtc_state *old_state)
{
struct vop *vop = to_vop(crtc);
int ret, i;
@@ -580,12 +586,17 @@ static int vop_enable(struct drm_crtc *crtc)
* We need to make sure that all windows are disabled before we
* enable the crtc. Otherwise we might try to scan from a destroyed
* buffer later.
+ *
+ * In the case of enable-after-PSR, we don't need to worry about this
+ * case since the buffer is guaranteed to be valid and disabling the
+ * window will result in screen glitches on PSR exit.
*/
- for (i = 0; i < vop->data->win_size; i++) {
- struct vop_win *vop_win = &vop->win[i];
- const struct vop_win_data *win = vop_win->data;
+ if (!old_state || !old_state->self_refresh_active) {
+ for (i = 0; i < vop->data->win_size; i++) {
+ struct vop_win *vop_win = &vop->win[i];
- vop_win_disable(vop, win);
+ vop_win_disable(vop, vop_win);
+ }
}
spin_unlock(&vop->reg_lock);
@@ -615,6 +626,25 @@ err_put_pm_runtime:
return ret;
}
+static void rockchip_drm_set_win_enabled(struct drm_crtc *crtc, bool enabled)
+{
+ struct vop *vop = to_vop(crtc);
+ int i;
+
+ spin_lock(&vop->reg_lock);
+
+ for (i = 0; i < vop->data->win_size; i++) {
+ struct vop_win *vop_win = &vop->win[i];
+ const struct vop_win_data *win = vop_win->data;
+
+ VOP_WIN_SET(vop, win, enable,
+ enabled && (vop->win_enabled & BIT(i)));
+ }
+ vop_cfg_done(vop);
+
+ spin_unlock(&vop->reg_lock);
+}
+
static void vop_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
@@ -622,9 +652,16 @@ static void vop_crtc_atomic_disable(struct drm_crtc *crtc,
WARN_ON(vop->event);
+ if (crtc->state->self_refresh_active)
+ rockchip_drm_set_win_enabled(crtc, false);
+
mutex_lock(&vop->vop_lock);
+
drm_crtc_vblank_off(crtc);
+ if (crtc->state->self_refresh_active)
+ goto out;
+
/*
* Vop standby will take effect at end of current frame,
* if dsp hold valid irq happen, it means standby complete.
@@ -655,6 +692,8 @@ static void vop_crtc_atomic_disable(struct drm_crtc *crtc,
clk_disable(vop->dclk);
vop_core_clks_disable(vop);
pm_runtime_put(vop->dev);
+
+out:
mutex_unlock(&vop->vop_lock);
if (crtc->state->event && !crtc->state->active) {
@@ -726,7 +765,6 @@ static void vop_plane_atomic_disable(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct vop_win *vop_win = to_vop_win(plane);
- const struct vop_win_data *win = vop_win->data;
struct vop *vop = to_vop(old_state->crtc);
if (!old_state->crtc)
@@ -734,7 +772,7 @@ static void vop_plane_atomic_disable(struct drm_plane *plane,
spin_lock(&vop->reg_lock);
- vop_win_disable(vop, win);
+ vop_win_disable(vop, vop_win);
spin_unlock(&vop->reg_lock);
}
@@ -873,6 +911,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
}
VOP_WIN_SET(vop, win, enable, 1);
+ vop->win_enabled |= BIT(win_index);
spin_unlock(&vop->reg_lock);
}
@@ -924,12 +963,10 @@ static void vop_plane_atomic_async_update(struct drm_plane *plane,
swap(plane->state->fb, new_state->fb);
if (vop->is_enabled) {
- rockchip_drm_psr_inhibit_get_state(new_state->state);
vop_plane_atomic_update(plane, plane->state);
spin_lock(&vop->reg_lock);
vop_cfg_done(vop);
spin_unlock(&vop->reg_lock);
- rockchip_drm_psr_inhibit_put_state(new_state->state);
/*
* A scanout can still be occurring, so we can't drop the
@@ -1004,14 +1041,118 @@ static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *adjusted_mode)
{
struct vop *vop = to_vop(crtc);
+ unsigned long rate;
- adjusted_mode->clock =
- DIV_ROUND_UP(clk_round_rate(vop->dclk,
- adjusted_mode->clock * 1000), 1000);
+ /*
+ * Clock craziness.
+ *
+ * Key points:
+ *
+ * - DRM works in in kHz.
+ * - Clock framework works in Hz.
+ * - Rockchip's clock driver picks the clock rate that is the
+ * same _OR LOWER_ than the one requested.
+ *
+ * Action plan:
+ *
+ * 1. When DRM gives us a mode, we should add 999 Hz to it. That way
+ * if the clock we need is 60000001 Hz (~60 MHz) and DRM tells us to
+ * make 60000 kHz then the clock framework will actually give us
+ * the right clock.
+ *
+ * NOTE: if the PLL (maybe through a divider) could actually make
+ * a clock rate 999 Hz higher instead of the one we want then this
+ * could be a problem. Unfortunately there's not much we can do
+ * since it's baked into DRM to use kHz. It shouldn't matter in
+ * practice since Rockchip PLLs are controlled by tables and
+ * even if there is a divider in the middle I wouldn't expect PLL
+ * rates in the table that are just a few kHz different.
+ *
+ * 2. Get the clock framework to round the rate for us to tell us
+ * what it will actually make.
+ *
+ * 3. Store the rounded up rate so that we don't need to worry about
+ * this in the actual clk_set_rate().
+ */
+ rate = clk_round_rate(vop->dclk, adjusted_mode->clock * 1000 + 999);
+ adjusted_mode->clock = DIV_ROUND_UP(rate, 1000);
return true;
}
+static bool vop_dsp_lut_is_enabled(struct vop *vop)
+{
+ return vop_read_reg(vop, 0, &vop->data->common->dsp_lut_en);
+}
+
+static void vop_crtc_write_gamma_lut(struct vop *vop, struct drm_crtc *crtc)
+{
+ struct drm_color_lut *lut = crtc->state->gamma_lut->data;
+ unsigned int i;
+
+ for (i = 0; i < crtc->gamma_size; i++) {
+ u32 word;
+
+ word = (drm_color_lut_extract(lut[i].red, 10) << 20) |
+ (drm_color_lut_extract(lut[i].green, 10) << 10) |
+ drm_color_lut_extract(lut[i].blue, 10);
+ writel(word, vop->lut_regs + i * 4);
+ }
+}
+
+static void vop_crtc_gamma_set(struct vop *vop, struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct drm_crtc_state *state = crtc->state;
+ unsigned int idle;
+ int ret;
+
+ if (!vop->lut_regs)
+ return;
+ /*
+ * To disable gamma (gamma_lut is null) or to write
+ * an update to the LUT, clear dsp_lut_en.
+ */
+ spin_lock(&vop->reg_lock);
+ VOP_REG_SET(vop, common, dsp_lut_en, 0);
+ vop_cfg_done(vop);
+ spin_unlock(&vop->reg_lock);
+
+ /*
+ * In order to write the LUT to the internal memory,
+ * we need to first make sure the dsp_lut_en bit is cleared.
+ */
+ ret = readx_poll_timeout(vop_dsp_lut_is_enabled, vop,
+ idle, !idle, 5, 30 * 1000);
+ if (ret) {
+ DRM_DEV_ERROR(vop->dev, "display LUT RAM enable timeout!\n");
+ return;
+ }
+
+ if (!state->gamma_lut)
+ return;
+
+ spin_lock(&vop->reg_lock);
+ vop_crtc_write_gamma_lut(vop, crtc);
+ VOP_REG_SET(vop, common, dsp_lut_en, 1);
+ vop_cfg_done(vop);
+ spin_unlock(&vop->reg_lock);
+}
+
+static void vop_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ struct vop *vop = to_vop(crtc);
+
+ /*
+ * Only update GAMMA if the 'active' flag is not changed,
+ * otherwise it's updated by .atomic_enable.
+ */
+ if (crtc->state->color_mgmt_changed &&
+ !crtc->state->active_changed)
+ vop_crtc_gamma_set(vop, crtc, old_crtc_state);
+}
+
static void vop_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
@@ -1033,19 +1174,31 @@ static void vop_crtc_atomic_enable(struct drm_crtc *crtc,
int dither_bpc = s->output_bpc ? s->output_bpc : 10;
int ret;
+ if (old_state && old_state->self_refresh_active) {
+ drm_crtc_vblank_on(crtc);
+ rockchip_drm_set_win_enabled(crtc, true);
+ return;
+ }
+
+ /*
+ * If we have a GAMMA LUT in the state, then let's make sure
+ * it's updated. We might be coming out of suspend,
+ * which means the LUT internal memory needs to be re-written.
+ */
+ if (crtc->state->gamma_lut)
+ vop_crtc_gamma_set(vop, crtc, old_state);
+
mutex_lock(&vop->vop_lock);
WARN_ON(vop->event);
- ret = vop_enable(crtc);
+ ret = vop_enable(crtc, old_state);
if (ret) {
mutex_unlock(&vop->vop_lock);
DRM_DEV_ERROR(vop->dev, "Failed to enable vop (%d)\n", ret);
return;
}
-
- pin_pol = BIT(DCLK_INVERT);
- pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) ?
+ pin_pol = (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) ?
BIT(HSYNC_POSITIVE) : 0;
pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) ?
BIT(VSYNC_POSITIVE) : 0;
@@ -1054,25 +1207,29 @@ static void vop_crtc_atomic_enable(struct drm_crtc *crtc,
switch (s->output_type) {
case DRM_MODE_CONNECTOR_LVDS:
- VOP_REG_SET(vop, output, rgb_en, 1);
+ VOP_REG_SET(vop, output, rgb_dclk_pol, 1);
VOP_REG_SET(vop, output, rgb_pin_pol, pin_pol);
+ VOP_REG_SET(vop, output, rgb_en, 1);
break;
case DRM_MODE_CONNECTOR_eDP:
+ VOP_REG_SET(vop, output, edp_dclk_pol, 1);
VOP_REG_SET(vop, output, edp_pin_pol, pin_pol);
VOP_REG_SET(vop, output, edp_en, 1);
break;
case DRM_MODE_CONNECTOR_HDMIA:
+ VOP_REG_SET(vop, output, hdmi_dclk_pol, 1);
VOP_REG_SET(vop, output, hdmi_pin_pol, pin_pol);
VOP_REG_SET(vop, output, hdmi_en, 1);
break;
case DRM_MODE_CONNECTOR_DSI:
+ VOP_REG_SET(vop, output, mipi_dclk_pol, 1);
VOP_REG_SET(vop, output, mipi_pin_pol, pin_pol);
VOP_REG_SET(vop, output, mipi_en, 1);
VOP_REG_SET(vop, output, mipi_dual_channel_en,
!!(s->output_flags & ROCKCHIP_OUTPUT_DSI_DUAL));
break;
case DRM_MODE_CONNECTOR_DisplayPort:
- pin_pol &= ~BIT(DCLK_INVERT);
+ VOP_REG_SET(vop, output, dp_dclk_pol, 0);
VOP_REG_SET(vop, output, dp_pin_pol, pin_pol);
VOP_REG_SET(vop, output, dp_en, 1);
break;
@@ -1149,6 +1306,26 @@ static void vop_wait_for_irq_handler(struct vop *vop)
synchronize_irq(vop->irq);
}
+static int vop_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *crtc_state)
+{
+ struct vop *vop = to_vop(crtc);
+
+ if (vop->lut_regs && crtc_state->color_mgmt_changed &&
+ crtc_state->gamma_lut) {
+ unsigned int len;
+
+ len = drm_color_lut_size(crtc_state->gamma_lut);
+ if (len != crtc->gamma_size) {
+ DRM_DEBUG_KMS("Invalid LUT size; got %d, expected %d\n",
+ len, crtc->gamma_size);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static void vop_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
@@ -1201,6 +1378,8 @@ static void vop_crtc_atomic_flush(struct drm_crtc *crtc,
static const struct drm_crtc_helper_funcs vop_crtc_helper_funcs = {
.mode_fixup = vop_crtc_mode_fixup,
+ .atomic_check = vop_crtc_atomic_check,
+ .atomic_begin = vop_crtc_atomic_begin,
.atomic_flush = vop_crtc_atomic_flush,
.atomic_enable = vop_crtc_atomic_enable,
.atomic_disable = vop_crtc_atomic_disable,
@@ -1319,6 +1498,7 @@ static const struct drm_crtc_funcs vop_crtc_funcs = {
.disable_vblank = vop_crtc_disable_vblank,
.set_crc_source = vop_crtc_set_crc_source,
.verify_crc_source = vop_crtc_verify_crc_source,
+ .gamma_set = drm_atomic_helper_legacy_gamma_set,
};
static void vop_fb_unref_worker(struct drm_flip_work *work, void *val)
@@ -1476,6 +1656,10 @@ static int vop_create_crtc(struct vop *vop)
goto err_cleanup_planes;
drm_crtc_helper_add(crtc, &vop_crtc_helper_funcs);
+ if (vop->lut_regs) {
+ drm_mode_crtc_set_gamma_size(crtc, vop_data->lut_size);
+ drm_crtc_enable_color_mgmt(crtc, 0, false, vop_data->lut_size);
+ }
/*
* Create drm_planes for overlay windows with possible_crtcs restricted
@@ -1519,6 +1703,12 @@ static int vop_create_crtc(struct vop *vop)
init_completion(&vop->line_flag_completion);
crtc->port = port;
+ ret = drm_self_refresh_helper_init(crtc);
+ if (ret)
+ DRM_DEV_DEBUG_KMS(vop->dev,
+ "Failed to init %s with SR helpers %d, ignoring\n",
+ crtc->name, ret);
+
return 0;
err_cleanup_crtc:
@@ -1536,6 +1726,8 @@ static void vop_destroy_crtc(struct vop *vop)
struct drm_device *drm_dev = vop->drm_dev;
struct drm_plane *plane, *tmp;
+ drm_self_refresh_helper_cleanup(crtc);
+
of_node_put(crtc->port);
/*
@@ -1560,7 +1752,6 @@ static void vop_destroy_crtc(struct vop *vop)
static int vop_initial(struct vop *vop)
{
- const struct vop_data *vop_data = vop->data;
struct reset_control *ahb_rst;
int i, ret;
@@ -1627,12 +1818,13 @@ static int vop_initial(struct vop *vop)
VOP_REG_SET(vop, misc, global_regdone_en, 1);
VOP_REG_SET(vop, common, dsp_blank, 0);
- for (i = 0; i < vop_data->win_size; i++) {
- const struct vop_win_data *win = &vop_data->win[i];
+ for (i = 0; i < vop->data->win_size; i++) {
+ struct vop_win *vop_win = &vop->win[i];
+ const struct vop_win_data *win = vop_win->data;
int channel = i * 2 + 1;
VOP_WIN_SET(vop, win, channel, (channel + 1) << 4 | channel);
- vop_win_disable(vop, win);
+ vop_win_disable(vop, vop_win);
VOP_WIN_SET(vop, win, gate, 1);
}
@@ -1772,6 +1964,17 @@ static int vop_bind(struct device *dev, struct device *master, void *data)
if (IS_ERR(vop->regs))
return PTR_ERR(vop->regs);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res) {
+ if (!vop_data->lut_size) {
+ DRM_DEV_ERROR(dev, "no gamma LUT size defined\n");
+ return -EINVAL;
+ }
+ vop->lut_regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(vop->lut_regs))
+ return PTR_ERR(vop->lut_regs);
+ }
+
vop->regsbak = devm_kzalloc(dev, vop->len, GFP_KERNEL);
if (!vop->regsbak)
return -ENOMEM;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
index 2149a889c29d..0b3d18c457b2 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -46,10 +46,15 @@ struct vop_modeset {
struct vop_output {
struct vop_reg pin_pol;
struct vop_reg dp_pin_pol;
+ struct vop_reg dp_dclk_pol;
struct vop_reg edp_pin_pol;
+ struct vop_reg edp_dclk_pol;
struct vop_reg hdmi_pin_pol;
+ struct vop_reg hdmi_dclk_pol;
struct vop_reg mipi_pin_pol;
+ struct vop_reg mipi_dclk_pol;
struct vop_reg rgb_pin_pol;
+ struct vop_reg rgb_dclk_pol;
struct vop_reg dp_en;
struct vop_reg edp_en;
struct vop_reg hdmi_en;
@@ -67,6 +72,7 @@ struct vop_common {
struct vop_reg dither_down_mode;
struct vop_reg dither_down_en;
struct vop_reg dither_up;
+ struct vop_reg dsp_lut_en;
struct vop_reg gate_en;
struct vop_reg mmu_en;
struct vop_reg out_mode;
@@ -170,6 +176,7 @@ struct vop_data {
const struct vop_win_yuv2yuv_data *win_yuv2yuv;
const struct vop_win_data *win;
unsigned int win_size;
+ unsigned int lut_size;
#define VOP_FEATURE_OUTPUT_RGB10 BIT(0)
#define VOP_FEATURE_INTERNAL_RGB BIT(1)
@@ -294,8 +301,7 @@ enum dither_down_mode_sel {
enum vop_pol {
HSYNC_POSITIVE = 0,
VSYNC_POSITIVE = 1,
- DEN_NEGATIVE = 2,
- DCLK_INVERT = 3
+ DEN_NEGATIVE = 2
};
#define FRAC_16_16(mult, div) (((mult) << 16) / (div))
diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
index 830858a809e5..f25a36743cbd 100644
--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c
+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
@@ -6,21 +6,23 @@
* Sandy Huang <hjc@rock-chips.com>
*/
-#include <drm/drmP.h>
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_dp_helper.h>
-#include <drm/drm_panel.h>
-#include <drm/drm_of.h>
-#include <drm/drm_probe_helper.h>
-
-#include <linux/component.h>
#include <linux/clk.h>
+#include <linux/component.h>
#include <linux/mfd/syscon.h>
#include <linux/of_graph.h>
+#include <linux/phy/phy.h>
#include <linux/pinctrl/devinfo.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/reset.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
#include "rockchip_drm_drv.h"
#include "rockchip_drm_vop.h"
@@ -30,6 +32,8 @@
#define DISPLAY_OUTPUT_LVDS 1
#define DISPLAY_OUTPUT_DUAL_LVDS 2
+struct rockchip_lvds;
+
#define connector_to_lvds(c) \
container_of(c, struct rockchip_lvds, connector)
@@ -38,16 +42,12 @@
/**
* rockchip_lvds_soc_data - rockchip lvds Soc private data
- * @ch1_offset: lvds channel 1 registe offset
- * grf_soc_con6: general registe offset for LVDS contrl
- * grf_soc_con7: general registe offset for LVDS contrl
- * has_vop_sel: to indicate whether need to choose from different VOP.
+ * @probe: LVDS platform probe function
+ * @helper_funcs: LVDS connector helper functions
*/
struct rockchip_lvds_soc_data {
- u32 ch1_offset;
- int grf_soc_con6;
- int grf_soc_con7;
- bool has_vop_sel;
+ int (*probe)(struct platform_device *pdev, struct rockchip_lvds *lvds);
+ const struct drm_encoder_helper_funcs *helper_funcs;
};
struct rockchip_lvds {
@@ -55,6 +55,7 @@ struct rockchip_lvds {
void __iomem *regs;
struct regmap *grf;
struct clk *pclk;
+ struct phy *dphy;
const struct rockchip_lvds_soc_data *soc_data;
int output; /* rgb lvds or dual lvds output */
int format; /* vesa or jeida format */
@@ -66,15 +67,16 @@ struct rockchip_lvds {
struct dev_pin_info *pins;
};
-static inline void lvds_writel(struct rockchip_lvds *lvds, u32 offset, u32 val)
+static inline void rk3288_writel(struct rockchip_lvds *lvds, u32 offset,
+ u32 val)
{
writel_relaxed(val, lvds->regs + offset);
if (lvds->output == DISPLAY_OUTPUT_LVDS)
return;
- writel_relaxed(val, lvds->regs + offset + lvds->soc_data->ch1_offset);
+ writel_relaxed(val, lvds->regs + offset + RK3288_LVDS_CH1_OFFSET);
}
-static inline int lvds_name_to_format(const char *s)
+static inline int rockchip_lvds_name_to_format(const char *s)
{
if (strncmp(s, "jeida-18", 8) == 0)
return LVDS_JEIDA_18;
@@ -86,7 +88,7 @@ static inline int lvds_name_to_format(const char *s)
return -EINVAL;
}
-static inline int lvds_name_to_output(const char *s)
+static inline int rockchip_lvds_name_to_output(const char *s)
{
if (strncmp(s, "rgb", 3) == 0)
return DISPLAY_OUTPUT_RGB;
@@ -98,7 +100,41 @@ static inline int lvds_name_to_output(const char *s)
return -EINVAL;
}
-static int rockchip_lvds_poweron(struct rockchip_lvds *lvds)
+static const struct drm_connector_funcs rockchip_lvds_connector_funcs = {
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int rockchip_lvds_connector_get_modes(struct drm_connector *connector)
+{
+ struct rockchip_lvds *lvds = connector_to_lvds(connector);
+ struct drm_panel *panel = lvds->panel;
+
+ return drm_panel_get_modes(panel, connector);
+}
+
+static const
+struct drm_connector_helper_funcs rockchip_lvds_connector_helper_funcs = {
+ .get_modes = rockchip_lvds_connector_get_modes,
+};
+
+static int
+rockchip_lvds_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
+
+ s->output_mode = ROCKCHIP_OUT_MODE_P888;
+ s->output_type = DRM_MODE_CONNECTOR_LVDS;
+
+ return 0;
+}
+
+static int rk3288_lvds_poweron(struct rockchip_lvds *lvds)
{
int ret;
u32 val;
@@ -120,66 +156,73 @@ static int rockchip_lvds_poweron(struct rockchip_lvds *lvds)
if (lvds->output == DISPLAY_OUTPUT_RGB) {
val |= RK3288_LVDS_CH0_REG0_TTL_EN |
RK3288_LVDS_CH0_REG0_LANECK_EN;
- lvds_writel(lvds, RK3288_LVDS_CH0_REG0, val);
- lvds_writel(lvds, RK3288_LVDS_CH0_REG2,
- RK3288_LVDS_PLL_FBDIV_REG2(0x46));
- lvds_writel(lvds, RK3288_LVDS_CH0_REG4,
- RK3288_LVDS_CH0_REG4_LANECK_TTL_MODE |
- RK3288_LVDS_CH0_REG4_LANE4_TTL_MODE |
- RK3288_LVDS_CH0_REG4_LANE3_TTL_MODE |
- RK3288_LVDS_CH0_REG4_LANE2_TTL_MODE |
- RK3288_LVDS_CH0_REG4_LANE1_TTL_MODE |
- RK3288_LVDS_CH0_REG4_LANE0_TTL_MODE);
- lvds_writel(lvds, RK3288_LVDS_CH0_REG5,
- RK3288_LVDS_CH0_REG5_LANECK_TTL_DATA |
- RK3288_LVDS_CH0_REG5_LANE4_TTL_DATA |
- RK3288_LVDS_CH0_REG5_LANE3_TTL_DATA |
- RK3288_LVDS_CH0_REG5_LANE2_TTL_DATA |
- RK3288_LVDS_CH0_REG5_LANE1_TTL_DATA |
- RK3288_LVDS_CH0_REG5_LANE0_TTL_DATA);
+ rk3288_writel(lvds, RK3288_LVDS_CH0_REG0, val);
+ rk3288_writel(lvds, RK3288_LVDS_CH0_REG2,
+ RK3288_LVDS_PLL_FBDIV_REG2(0x46));
+ rk3288_writel(lvds, RK3288_LVDS_CH0_REG4,
+ RK3288_LVDS_CH0_REG4_LANECK_TTL_MODE |
+ RK3288_LVDS_CH0_REG4_LANE4_TTL_MODE |
+ RK3288_LVDS_CH0_REG4_LANE3_TTL_MODE |
+ RK3288_LVDS_CH0_REG4_LANE2_TTL_MODE |
+ RK3288_LVDS_CH0_REG4_LANE1_TTL_MODE |
+ RK3288_LVDS_CH0_REG4_LANE0_TTL_MODE);
+ rk3288_writel(lvds, RK3288_LVDS_CH0_REG5,
+ RK3288_LVDS_CH0_REG5_LANECK_TTL_DATA |
+ RK3288_LVDS_CH0_REG5_LANE4_TTL_DATA |
+ RK3288_LVDS_CH0_REG5_LANE3_TTL_DATA |
+ RK3288_LVDS_CH0_REG5_LANE2_TTL_DATA |
+ RK3288_LVDS_CH0_REG5_LANE1_TTL_DATA |
+ RK3288_LVDS_CH0_REG5_LANE0_TTL_DATA);
} else {
val |= RK3288_LVDS_CH0_REG0_LVDS_EN |
RK3288_LVDS_CH0_REG0_LANECK_EN;
- lvds_writel(lvds, RK3288_LVDS_CH0_REG0, val);
- lvds_writel(lvds, RK3288_LVDS_CH0_REG1,
- RK3288_LVDS_CH0_REG1_LANECK_BIAS |
- RK3288_LVDS_CH0_REG1_LANE4_BIAS |
- RK3288_LVDS_CH0_REG1_LANE3_BIAS |
- RK3288_LVDS_CH0_REG1_LANE2_BIAS |
- RK3288_LVDS_CH0_REG1_LANE1_BIAS |
- RK3288_LVDS_CH0_REG1_LANE0_BIAS);
- lvds_writel(lvds, RK3288_LVDS_CH0_REG2,
- RK3288_LVDS_CH0_REG2_RESERVE_ON |
- RK3288_LVDS_CH0_REG2_LANECK_LVDS_MODE |
- RK3288_LVDS_CH0_REG2_LANE4_LVDS_MODE |
- RK3288_LVDS_CH0_REG2_LANE3_LVDS_MODE |
- RK3288_LVDS_CH0_REG2_LANE2_LVDS_MODE |
- RK3288_LVDS_CH0_REG2_LANE1_LVDS_MODE |
- RK3288_LVDS_CH0_REG2_LANE0_LVDS_MODE |
- RK3288_LVDS_PLL_FBDIV_REG2(0x46));
- lvds_writel(lvds, RK3288_LVDS_CH0_REG4, 0x00);
- lvds_writel(lvds, RK3288_LVDS_CH0_REG5, 0x00);
+ rk3288_writel(lvds, RK3288_LVDS_CH0_REG0, val);
+ rk3288_writel(lvds, RK3288_LVDS_CH0_REG1,
+ RK3288_LVDS_CH0_REG1_LANECK_BIAS |
+ RK3288_LVDS_CH0_REG1_LANE4_BIAS |
+ RK3288_LVDS_CH0_REG1_LANE3_BIAS |
+ RK3288_LVDS_CH0_REG1_LANE2_BIAS |
+ RK3288_LVDS_CH0_REG1_LANE1_BIAS |
+ RK3288_LVDS_CH0_REG1_LANE0_BIAS);
+ rk3288_writel(lvds, RK3288_LVDS_CH0_REG2,
+ RK3288_LVDS_CH0_REG2_RESERVE_ON |
+ RK3288_LVDS_CH0_REG2_LANECK_LVDS_MODE |
+ RK3288_LVDS_CH0_REG2_LANE4_LVDS_MODE |
+ RK3288_LVDS_CH0_REG2_LANE3_LVDS_MODE |
+ RK3288_LVDS_CH0_REG2_LANE2_LVDS_MODE |
+ RK3288_LVDS_CH0_REG2_LANE1_LVDS_MODE |
+ RK3288_LVDS_CH0_REG2_LANE0_LVDS_MODE |
+ RK3288_LVDS_PLL_FBDIV_REG2(0x46));
+ rk3288_writel(lvds, RK3288_LVDS_CH0_REG4, 0x00);
+ rk3288_writel(lvds, RK3288_LVDS_CH0_REG5, 0x00);
}
- lvds_writel(lvds, RK3288_LVDS_CH0_REG3, RK3288_LVDS_PLL_FBDIV_REG3(0x46));
- lvds_writel(lvds, RK3288_LVDS_CH0_REGD, RK3288_LVDS_PLL_PREDIV_REGD(0x0a));
- lvds_writel(lvds, RK3288_LVDS_CH0_REG20, RK3288_LVDS_CH0_REG20_LSB);
-
- lvds_writel(lvds, RK3288_LVDS_CFG_REGC, RK3288_LVDS_CFG_REGC_PLL_ENABLE);
- lvds_writel(lvds, RK3288_LVDS_CFG_REG21, RK3288_LVDS_CFG_REG21_TX_ENABLE);
+ rk3288_writel(lvds, RK3288_LVDS_CH0_REG3,
+ RK3288_LVDS_PLL_FBDIV_REG3(0x46));
+ rk3288_writel(lvds, RK3288_LVDS_CH0_REGD,
+ RK3288_LVDS_PLL_PREDIV_REGD(0x0a));
+ rk3288_writel(lvds, RK3288_LVDS_CH0_REG20,
+ RK3288_LVDS_CH0_REG20_LSB);
+
+ rk3288_writel(lvds, RK3288_LVDS_CFG_REGC,
+ RK3288_LVDS_CFG_REGC_PLL_ENABLE);
+ rk3288_writel(lvds, RK3288_LVDS_CFG_REG21,
+ RK3288_LVDS_CFG_REG21_TX_ENABLE);
return 0;
}
-static void rockchip_lvds_poweroff(struct rockchip_lvds *lvds)
+static void rk3288_lvds_poweroff(struct rockchip_lvds *lvds)
{
int ret;
u32 val;
- lvds_writel(lvds, RK3288_LVDS_CFG_REG21, RK3288_LVDS_CFG_REG21_TX_ENABLE);
- lvds_writel(lvds, RK3288_LVDS_CFG_REGC, RK3288_LVDS_CFG_REGC_PLL_ENABLE);
+ rk3288_writel(lvds, RK3288_LVDS_CFG_REG21,
+ RK3288_LVDS_CFG_REG21_TX_ENABLE);
+ rk3288_writel(lvds, RK3288_LVDS_CFG_REGC,
+ RK3288_LVDS_CFG_REGC_PLL_ENABLE);
val = LVDS_DUAL | LVDS_TTL_EN | LVDS_CH0_EN | LVDS_CH1_EN | LVDS_PWRDN;
val |= val << 16;
- ret = regmap_write(lvds->grf, lvds->soc_data->grf_soc_con7, val);
+ ret = regmap_write(lvds->grf, RK3288_LVDS_GRF_SOC_CON7, val);
if (ret != 0)
DRM_DEV_ERROR(lvds->dev, "Could not write to GRF: %d\n", ret);
@@ -187,29 +230,8 @@ static void rockchip_lvds_poweroff(struct rockchip_lvds *lvds)
clk_disable(lvds->pclk);
}
-static const struct drm_connector_funcs rockchip_lvds_connector_funcs = {
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = drm_connector_cleanup,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static int rockchip_lvds_connector_get_modes(struct drm_connector *connector)
-{
- struct rockchip_lvds *lvds = connector_to_lvds(connector);
- struct drm_panel *panel = lvds->panel;
-
- return drm_panel_get_modes(panel);
-}
-
-static const
-struct drm_connector_helper_funcs rockchip_lvds_connector_helper_funcs = {
- .get_modes = rockchip_lvds_connector_get_modes,
-};
-
-static void rockchip_lvds_grf_config(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
+static int rk3288_lvds_grf_config(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
{
struct rockchip_lvds *lvds = encoder_to_lvds(encoder);
u8 pin_hsync = (mode->flags & DRM_MODE_FLAG_PHSYNC) ? 1 : 0;
@@ -233,22 +255,19 @@ static void rockchip_lvds_grf_config(struct drm_encoder *encoder,
val |= (pin_dclk << 8) | (pin_hsync << 9);
val |= (0xffff << 16);
- ret = regmap_write(lvds->grf, lvds->soc_data->grf_soc_con7, val);
- if (ret != 0) {
+ ret = regmap_write(lvds->grf, RK3288_LVDS_GRF_SOC_CON7, val);
+ if (ret)
DRM_DEV_ERROR(lvds->dev, "Could not write to GRF: %d\n", ret);
- return;
- }
+
+ return ret;
}
-static int rockchip_lvds_set_vop_source(struct rockchip_lvds *lvds,
- struct drm_encoder *encoder)
+static int rk3288_lvds_set_vop_source(struct rockchip_lvds *lvds,
+ struct drm_encoder *encoder)
{
u32 val;
int ret;
- if (!lvds->soc_data->has_vop_sel)
- return 0;
-
ret = drm_of_encoder_active_endpoint_id(lvds->dev->of_node, encoder);
if (ret < 0)
return ret;
@@ -257,56 +276,162 @@ static int rockchip_lvds_set_vop_source(struct rockchip_lvds *lvds,
if (ret)
val |= RK3288_LVDS_SOC_CON6_SEL_VOP_LIT;
- ret = regmap_write(lvds->grf, lvds->soc_data->grf_soc_con6, val);
+ ret = regmap_write(lvds->grf, RK3288_LVDS_GRF_SOC_CON6, val);
if (ret < 0)
return ret;
return 0;
}
-static int
-rockchip_lvds_encoder_atomic_check(struct drm_encoder *encoder,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
+static void rk3288_lvds_encoder_enable(struct drm_encoder *encoder)
{
- struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
+ struct rockchip_lvds *lvds = encoder_to_lvds(encoder);
+ struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
+ int ret;
- s->output_mode = ROCKCHIP_OUT_MODE_P888;
- s->output_type = DRM_MODE_CONNECTOR_LVDS;
+ drm_panel_prepare(lvds->panel);
- return 0;
+ ret = rk3288_lvds_poweron(lvds);
+ if (ret < 0) {
+ DRM_DEV_ERROR(lvds->dev, "failed to power on LVDS: %d\n", ret);
+ drm_panel_unprepare(lvds->panel);
+ return;
+ }
+
+ ret = rk3288_lvds_grf_config(encoder, mode);
+ if (ret) {
+ DRM_DEV_ERROR(lvds->dev, "failed to configure LVDS: %d\n", ret);
+ drm_panel_unprepare(lvds->panel);
+ return;
+ }
+
+ ret = rk3288_lvds_set_vop_source(lvds, encoder);
+ if (ret) {
+ DRM_DEV_ERROR(lvds->dev, "failed to set VOP source: %d\n", ret);
+ drm_panel_unprepare(lvds->panel);
+ return;
+ }
+
+ drm_panel_enable(lvds->panel);
+}
+
+static void rk3288_lvds_encoder_disable(struct drm_encoder *encoder)
+{
+ struct rockchip_lvds *lvds = encoder_to_lvds(encoder);
+
+ drm_panel_disable(lvds->panel);
+ rk3288_lvds_poweroff(lvds);
+ drm_panel_unprepare(lvds->panel);
+}
+
+static int px30_lvds_poweron(struct rockchip_lvds *lvds)
+{
+ int ret;
+
+ ret = pm_runtime_get_sync(lvds->dev);
+ if (ret < 0) {
+ DRM_DEV_ERROR(lvds->dev, "failed to get pm runtime: %d\n", ret);
+ return ret;
+ }
+
+ /* Enable LVDS mode */
+ return regmap_update_bits(lvds->grf, PX30_LVDS_GRF_PD_VO_CON1,
+ PX30_LVDS_MODE_EN(1) | PX30_LVDS_P2S_EN(1),
+ PX30_LVDS_MODE_EN(1) | PX30_LVDS_P2S_EN(1));
+}
+
+static void px30_lvds_poweroff(struct rockchip_lvds *lvds)
+{
+ regmap_update_bits(lvds->grf, PX30_LVDS_GRF_PD_VO_CON1,
+ PX30_LVDS_MODE_EN(1) | PX30_LVDS_P2S_EN(1),
+ PX30_LVDS_MODE_EN(0) | PX30_LVDS_P2S_EN(0));
+
+ pm_runtime_put(lvds->dev);
+}
+
+static int px30_lvds_grf_config(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ struct rockchip_lvds *lvds = encoder_to_lvds(encoder);
+
+ if (lvds->output != DISPLAY_OUTPUT_LVDS) {
+ DRM_DEV_ERROR(lvds->dev, "Unsupported display output %d\n",
+ lvds->output);
+ return -EINVAL;
+ }
+
+ /* Set format */
+ return regmap_update_bits(lvds->grf, PX30_LVDS_GRF_PD_VO_CON1,
+ PX30_LVDS_FORMAT(lvds->format),
+ PX30_LVDS_FORMAT(lvds->format));
+}
+
+static int px30_lvds_set_vop_source(struct rockchip_lvds *lvds,
+ struct drm_encoder *encoder)
+{
+ int vop;
+
+ vop = drm_of_encoder_active_endpoint_id(lvds->dev->of_node, encoder);
+ if (vop < 0)
+ return vop;
+
+ return regmap_update_bits(lvds->grf, PX30_LVDS_GRF_PD_VO_CON1,
+ PX30_LVDS_VOP_SEL(1),
+ PX30_LVDS_VOP_SEL(vop));
}
-static void rockchip_lvds_encoder_enable(struct drm_encoder *encoder)
+static void px30_lvds_encoder_enable(struct drm_encoder *encoder)
{
struct rockchip_lvds *lvds = encoder_to_lvds(encoder);
struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
int ret;
drm_panel_prepare(lvds->panel);
- ret = rockchip_lvds_poweron(lvds);
- if (ret < 0) {
- DRM_DEV_ERROR(lvds->dev, "failed to power on lvds: %d\n", ret);
+
+ ret = px30_lvds_poweron(lvds);
+ if (ret) {
+ DRM_DEV_ERROR(lvds->dev, "failed to power on LVDS: %d\n", ret);
+ drm_panel_unprepare(lvds->panel);
+ return;
+ }
+
+ ret = px30_lvds_grf_config(encoder, mode);
+ if (ret) {
+ DRM_DEV_ERROR(lvds->dev, "failed to configure LVDS: %d\n", ret);
drm_panel_unprepare(lvds->panel);
+ return;
}
- rockchip_lvds_grf_config(encoder, mode);
- rockchip_lvds_set_vop_source(lvds, encoder);
+
+ ret = px30_lvds_set_vop_source(lvds, encoder);
+ if (ret) {
+ DRM_DEV_ERROR(lvds->dev, "failed to set VOP source: %d\n", ret);
+ drm_panel_unprepare(lvds->panel);
+ return;
+ }
+
drm_panel_enable(lvds->panel);
}
-static void rockchip_lvds_encoder_disable(struct drm_encoder *encoder)
+static void px30_lvds_encoder_disable(struct drm_encoder *encoder)
{
struct rockchip_lvds *lvds = encoder_to_lvds(encoder);
drm_panel_disable(lvds->panel);
- rockchip_lvds_poweroff(lvds);
+ px30_lvds_poweroff(lvds);
drm_panel_unprepare(lvds->panel);
}
static const
-struct drm_encoder_helper_funcs rockchip_lvds_encoder_helper_funcs = {
- .enable = rockchip_lvds_encoder_enable,
- .disable = rockchip_lvds_encoder_disable,
+struct drm_encoder_helper_funcs rk3288_lvds_encoder_helper_funcs = {
+ .enable = rk3288_lvds_encoder_enable,
+ .disable = rk3288_lvds_encoder_disable,
+ .atomic_check = rockchip_lvds_encoder_atomic_check,
+};
+
+static const
+struct drm_encoder_helper_funcs px30_lvds_encoder_helper_funcs = {
+ .enable = px30_lvds_encoder_enable,
+ .disable = px30_lvds_encoder_disable,
.atomic_check = rockchip_lvds_encoder_atomic_check,
};
@@ -314,11 +439,88 @@ static const struct drm_encoder_funcs rockchip_lvds_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
+static int rk3288_lvds_probe(struct platform_device *pdev,
+ struct rockchip_lvds *lvds)
+{
+ struct resource *res;
+ int ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ lvds->regs = devm_ioremap_resource(lvds->dev, res);
+ if (IS_ERR(lvds->regs))
+ return PTR_ERR(lvds->regs);
+
+ lvds->pclk = devm_clk_get(lvds->dev, "pclk_lvds");
+ if (IS_ERR(lvds->pclk)) {
+ DRM_DEV_ERROR(lvds->dev, "could not get pclk_lvds\n");
+ return PTR_ERR(lvds->pclk);
+ }
+
+ lvds->pins = devm_kzalloc(lvds->dev, sizeof(*lvds->pins),
+ GFP_KERNEL);
+ if (!lvds->pins)
+ return -ENOMEM;
+
+ lvds->pins->p = devm_pinctrl_get(lvds->dev);
+ if (IS_ERR(lvds->pins->p)) {
+ DRM_DEV_ERROR(lvds->dev, "no pinctrl handle\n");
+ devm_kfree(lvds->dev, lvds->pins);
+ lvds->pins = NULL;
+ } else {
+ lvds->pins->default_state =
+ pinctrl_lookup_state(lvds->pins->p, "lcdc");
+ if (IS_ERR(lvds->pins->default_state)) {
+ DRM_DEV_ERROR(lvds->dev, "no default pinctrl state\n");
+ devm_kfree(lvds->dev, lvds->pins);
+ lvds->pins = NULL;
+ }
+ }
+
+ ret = clk_prepare(lvds->pclk);
+ if (ret < 0) {
+ DRM_DEV_ERROR(lvds->dev, "failed to prepare pclk_lvds\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int px30_lvds_probe(struct platform_device *pdev,
+ struct rockchip_lvds *lvds)
+{
+ int ret;
+
+ /* MSB */
+ ret = regmap_update_bits(lvds->grf, PX30_LVDS_GRF_PD_VO_CON1,
+ PX30_LVDS_MSBSEL(1),
+ PX30_LVDS_MSBSEL(1));
+ if (ret)
+ return ret;
+
+ /* PHY */
+ lvds->dphy = devm_phy_get(&pdev->dev, "dphy");
+ if (IS_ERR(lvds->dphy))
+ return PTR_ERR(lvds->dphy);
+
+ phy_init(lvds->dphy);
+ if (ret)
+ return ret;
+
+ phy_set_mode(lvds->dphy, PHY_MODE_LVDS);
+ if (ret)
+ return ret;
+
+ return phy_power_on(lvds->dphy);
+}
+
static const struct rockchip_lvds_soc_data rk3288_lvds_data = {
- .ch1_offset = 0x100,
- .grf_soc_con6 = 0x025c,
- .grf_soc_con7 = 0x0260,
- .has_vop_sel = true,
+ .probe = rk3288_lvds_probe,
+ .helper_funcs = &rk3288_lvds_encoder_helper_funcs,
+};
+
+static const struct rockchip_lvds_soc_data px30_lvds_data = {
+ .probe = px30_lvds_probe,
+ .helper_funcs = &px30_lvds_encoder_helper_funcs,
};
static const struct of_device_id rockchip_lvds_dt_ids[] = {
@@ -326,6 +528,10 @@ static const struct of_device_id rockchip_lvds_dt_ids[] = {
.compatible = "rockchip,rk3288-lvds",
.data = &rk3288_lvds_data
},
+ {
+ .compatible = "rockchip,px30-lvds",
+ .data = &px30_lvds_data
+ },
{}
};
MODULE_DEVICE_TABLE(of, rockchip_lvds_dt_ids);
@@ -377,7 +583,7 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
/* default set it as output rgb */
lvds->output = DISPLAY_OUTPUT_RGB;
else
- lvds->output = lvds_name_to_output(name);
+ lvds->output = rockchip_lvds_name_to_output(name);
if (lvds->output < 0) {
DRM_DEV_ERROR(dev, "invalid output type [%s]\n", name);
@@ -389,7 +595,7 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
/* default set it as format vesa 18 */
lvds->format = LVDS_VESA_18;
else
- lvds->format = lvds_name_to_format(name);
+ lvds->format = rockchip_lvds_name_to_format(name);
if (lvds->format < 0) {
DRM_DEV_ERROR(dev, "invalid data-mapping format [%s]\n", name);
@@ -409,7 +615,7 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
goto err_put_remote;
}
- drm_encoder_helper_add(encoder, &rockchip_lvds_encoder_helper_funcs);
+ drm_encoder_helper_add(encoder, lvds->soc_data->helper_funcs);
if (lvds->panel) {
connector = &lvds->connector;
@@ -470,8 +676,10 @@ static void rockchip_lvds_unbind(struct device *dev, struct device *master,
void *data)
{
struct rockchip_lvds *lvds = dev_get_drvdata(dev);
+ const struct drm_encoder_helper_funcs *encoder_funcs;
- rockchip_lvds_encoder_disable(&lvds->encoder);
+ encoder_funcs = lvds->soc_data->helper_funcs;
+ encoder_funcs->disable(&lvds->encoder);
if (lvds->panel)
drm_panel_detach(lvds->panel);
pm_runtime_disable(dev);
@@ -489,7 +697,6 @@ static int rockchip_lvds_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct rockchip_lvds *lvds;
const struct of_device_id *match;
- struct resource *res;
int ret;
if (!dev->of_node)
@@ -505,37 +712,6 @@ static int rockchip_lvds_probe(struct platform_device *pdev)
return -ENODEV;
lvds->soc_data = match->data;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- lvds->regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(lvds->regs))
- return PTR_ERR(lvds->regs);
-
- lvds->pclk = devm_clk_get(&pdev->dev, "pclk_lvds");
- if (IS_ERR(lvds->pclk)) {
- DRM_DEV_ERROR(dev, "could not get pclk_lvds\n");
- return PTR_ERR(lvds->pclk);
- }
-
- lvds->pins = devm_kzalloc(lvds->dev, sizeof(*lvds->pins),
- GFP_KERNEL);
- if (!lvds->pins)
- return -ENOMEM;
-
- lvds->pins->p = devm_pinctrl_get(lvds->dev);
- if (IS_ERR(lvds->pins->p)) {
- DRM_DEV_ERROR(dev, "no pinctrl handle\n");
- devm_kfree(lvds->dev, lvds->pins);
- lvds->pins = NULL;
- } else {
- lvds->pins->default_state =
- pinctrl_lookup_state(lvds->pins->p, "lcdc");
- if (IS_ERR(lvds->pins->default_state)) {
- DRM_DEV_ERROR(dev, "no default pinctrl state\n");
- devm_kfree(lvds->dev, lvds->pins);
- lvds->pins = NULL;
- }
- }
-
lvds->grf = syscon_regmap_lookup_by_phandle(dev->of_node,
"rockchip,grf");
if (IS_ERR(lvds->grf)) {
@@ -543,13 +719,14 @@ static int rockchip_lvds_probe(struct platform_device *pdev)
return PTR_ERR(lvds->grf);
}
- dev_set_drvdata(dev, lvds);
-
- ret = clk_prepare(lvds->pclk);
- if (ret < 0) {
- DRM_DEV_ERROR(dev, "failed to prepare pclk_lvds\n");
+ ret = lvds->soc_data->probe(pdev, lvds);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "Platform initialization failed\n");
return ret;
}
+
+ dev_set_drvdata(dev, lvds);
+
ret = component_add(&pdev->dev, &rockchip_lvds_component_ops);
if (ret < 0) {
DRM_DEV_ERROR(dev, "failed to add component\n");
diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.h b/drivers/gpu/drm/rockchip/rockchip_lvds.h
index 029bad8e1a14..4ce967d23813 100644
--- a/drivers/gpu/drm/rockchip/rockchip_lvds.h
+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.h
@@ -70,7 +70,10 @@
#define RK3288_LVDS_CFG_REG21 0x84
#define RK3288_LVDS_CFG_REG21_TX_ENABLE 0x92
#define RK3288_LVDS_CFG_REG21_TX_DISABLE 0x00
-#define RK3288_LVDS_CH1_OFFSET 0x100
+#define RK3288_LVDS_CH1_OFFSET 0x100
+
+#define RK3288_LVDS_GRF_SOC_CON6 0x025C
+#define RK3288_LVDS_GRF_SOC_CON7 0x0260
/* fbdiv value is split over 2 registers, with bit8 in reg2 */
#define RK3288_LVDS_PLL_FBDIV_REG2(_fbd) \
@@ -103,4 +106,18 @@
#define LVDS_VESA_18 2
#define LVDS_JEIDA_18 3
+#define HIWORD_UPDATE(v, h, l) ((GENMASK(h, l) << 16) | ((v) << (l)))
+
+#define PX30_LVDS_GRF_PD_VO_CON0 0x434
+#define PX30_LVDS_TIE_CLKS(val) HIWORD_UPDATE(val, 8, 8)
+#define PX30_LVDS_INVERT_CLKS(val) HIWORD_UPDATE(val, 9, 9)
+#define PX30_LVDS_INVERT_DCLK(val) HIWORD_UPDATE(val, 5, 5)
+
+#define PX30_LVDS_GRF_PD_VO_CON1 0x438
+#define PX30_LVDS_FORMAT(val) HIWORD_UPDATE(val, 14, 13)
+#define PX30_LVDS_MODE_EN(val) HIWORD_UPDATE(val, 12, 12)
+#define PX30_LVDS_MSBSEL(val) HIWORD_UPDATE(val, 11, 11)
+#define PX30_LVDS_P2S_EN(val) HIWORD_UPDATE(val, 6, 6)
+#define PX30_LVDS_VOP_SEL(val) HIWORD_UPDATE(val, 1, 1)
+
#endif /* _ROCKCHIP_LVDS_ */
diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.c b/drivers/gpu/drm/rockchip/rockchip_rgb.c
index ce4d82d293e4..ae730275a34f 100644
--- a/drivers/gpu/drm/rockchip/rockchip_rgb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_rgb.c
@@ -5,16 +5,16 @@
* Sandy Huang <hjc@rock-chips.com>
*/
-#include <drm/drmP.h>
+#include <linux/component.h>
+#include <linux/of_graph.h>
+
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_dp_helper.h>
-#include <drm/drm_panel.h>
#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
-#include <linux/component.h>
-#include <linux/of_graph.h>
-
#include "rockchip_drm_drv.h"
#include "rockchip_drm_vop.h"
@@ -136,7 +136,8 @@ struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
drm_encoder_helper_add(encoder, &rockchip_rgb_encoder_helper_funcs);
if (panel) {
- bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_LVDS);
+ bridge = drm_panel_bridge_add_typed(panel,
+ DRM_MODE_CONNECTOR_LVDS);
if (IS_ERR(bridge))
return ERR_CAST(bridge);
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
index 7b9c74750f6d..7a9d979c8d5d 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -4,13 +4,19 @@
* Author:Mark Yao <mark.yao@rock-chips.com>
*/
-#include <drm/drmP.h>
-
-#include <linux/kernel.h>
#include <linux/component.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <drm/drm_fourcc.h>
+#include <drm/drm_plane.h>
+#include <drm/drm_print.h>
#include "rockchip_drm_vop.h"
#include "rockchip_vop_reg.h"
+#include "rockchip_drm_drv.h"
#define _VOP_REG(off, _mask, _shift, _write_mask, _relaxed) \
{ \
@@ -209,9 +215,11 @@ static const struct vop_modeset px30_modeset = {
};
static const struct vop_output px30_output = {
- .rgb_pin_pol = VOP_REG(PX30_DSP_CTRL0, 0xf, 1),
- .mipi_pin_pol = VOP_REG(PX30_DSP_CTRL0, 0xf, 25),
+ .rgb_dclk_pol = VOP_REG(PX30_DSP_CTRL0, 0x1, 1),
+ .rgb_pin_pol = VOP_REG(PX30_DSP_CTRL0, 0x7, 2),
.rgb_en = VOP_REG(PX30_DSP_CTRL0, 0x1, 0),
+ .mipi_dclk_pol = VOP_REG(PX30_DSP_CTRL0, 0x1, 25),
+ .mipi_pin_pol = VOP_REG(PX30_DSP_CTRL0, 0x7, 26),
.mipi_en = VOP_REG(PX30_DSP_CTRL0, 0x1, 24),
};
@@ -593,6 +601,7 @@ static const struct vop_common rk3288_common = {
.dither_down_en = VOP_REG(RK3288_DSP_CTRL1, 0x1, 2),
.pre_dither_down = VOP_REG(RK3288_DSP_CTRL1, 0x1, 1),
.dither_up = VOP_REG(RK3288_DSP_CTRL1, 0x1, 6),
+ .dsp_lut_en = VOP_REG(RK3288_DSP_CTRL1, 0x1, 0),
.data_blank = VOP_REG(RK3288_DSP_CTRL0, 0x1, 19),
.dsp_blank = VOP_REG(RK3288_DSP_CTRL0, 0x3, 18),
.out_mode = VOP_REG(RK3288_DSP_CTRL0, 0xf, 0),
@@ -641,6 +650,7 @@ static const struct vop_data rk3288_vop = {
.output = &rk3288_output,
.win = rk3288_vop_win_data,
.win_size = ARRAY_SIZE(rk3288_vop_win_data),
+ .lut_size = 1024,
};
static const int rk3368_vop_intrs[] = {
@@ -712,10 +722,14 @@ static const struct vop_win_data rk3368_vop_win_data[] = {
};
static const struct vop_output rk3368_output = {
- .rgb_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 16),
- .hdmi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 20),
- .edp_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 24),
- .mipi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 28),
+ .rgb_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 19),
+ .hdmi_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 23),
+ .edp_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 27),
+ .mipi_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 31),
+ .rgb_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 16),
+ .hdmi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 20),
+ .edp_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 24),
+ .mipi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 28),
.rgb_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 12),
.hdmi_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 13),
.edp_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 14),
@@ -759,11 +773,16 @@ static const struct vop_data rk3366_vop = {
};
static const struct vop_output rk3399_output = {
- .dp_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0xf, 16),
- .rgb_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 16),
- .hdmi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 20),
- .edp_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 24),
- .mipi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 28),
+ .dp_dclk_pol = VOP_REG(RK3399_DSP_CTRL1, 0x1, 19),
+ .rgb_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 19),
+ .hdmi_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 23),
+ .edp_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 27),
+ .mipi_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 31),
+ .dp_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0x7, 16),
+ .rgb_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 16),
+ .hdmi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 20),
+ .edp_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 24),
+ .mipi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 28),
.dp_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 11),
.rgb_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 12),
.hdmi_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 13),
@@ -867,14 +886,18 @@ static const struct vop_modeset rk3328_modeset = {
};
static const struct vop_output rk3328_output = {
+ .rgb_dclk_pol = VOP_REG(RK3328_DSP_CTRL1, 0x1, 19),
+ .hdmi_dclk_pol = VOP_REG(RK3328_DSP_CTRL1, 0x1, 23),
+ .edp_dclk_pol = VOP_REG(RK3328_DSP_CTRL1, 0x1, 27),
+ .mipi_dclk_pol = VOP_REG(RK3328_DSP_CTRL1, 0x1, 31),
.rgb_en = VOP_REG(RK3328_SYS_CTRL, 0x1, 12),
.hdmi_en = VOP_REG(RK3328_SYS_CTRL, 0x1, 13),
.edp_en = VOP_REG(RK3328_SYS_CTRL, 0x1, 14),
.mipi_en = VOP_REG(RK3328_SYS_CTRL, 0x1, 15),
- .rgb_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0xf, 16),
- .hdmi_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0xf, 20),
- .edp_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0xf, 24),
- .mipi_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0xf, 28),
+ .rgb_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0x7, 16),
+ .hdmi_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0x7, 20),
+ .edp_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0x7, 24),
+ .mipi_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0x7, 28),
};
static const struct vop_misc rk3328_misc = {
diff --git a/drivers/gpu/drm/savage/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c
index 2966fcfd9548..799bd11adb9c 100644
--- a/drivers/gpu/drm/savage/savage_drv.c
+++ b/drivers/gpu/drm/savage/savage_drv.c
@@ -24,10 +24,10 @@
*/
#include <linux/module.h>
+#include <linux/pci.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
-#include <drm/drm_pci.h>
#include <drm/drm_pciids.h>
#include "savage_drv.h"
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
index 1626f3967130..d79086498aff 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
@@ -28,8 +28,6 @@
#include <linux/types.h>
#include <linux/tracepoint.h>
-#include <drm/drmP.h>
-
#undef TRACE_SYSTEM
#define TRACE_SYSTEM gpu_scheduler
#define TRACE_INCLUDE_FILE gpu_scheduler_trace
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 671c90f34ede..63bccd201b97 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -22,6 +22,10 @@
*/
#include <linux/kthread.h>
+#include <linux/slab.h>
+#include <linux/completion.h>
+
+#include <drm/drm_print.h>
#include <drm/gpu_scheduler.h>
#include "gpu_scheduler_trace.h"
@@ -34,44 +38,40 @@
* submit to HW ring.
*
* @entity: scheduler entity to init
- * @rq_list: the list of run queue on which jobs from this
+ * @priority: priority of the entity
+ * @sched_list: the list of drm scheds on which jobs from this
* entity can be submitted
- * @num_rq_list: number of run queue in rq_list
+ * @num_sched_list: number of drm sched in sched_list
* @guilty: atomic_t set to 1 when a job on this queue
* is found to be guilty causing a timeout
*
- * Note: the rq_list should have atleast one element to schedule
+ * Note: the sched_list should have at least one element to schedule
* the entity
*
* Returns 0 on success or a negative error code on failure.
*/
int drm_sched_entity_init(struct drm_sched_entity *entity,
- struct drm_sched_rq **rq_list,
- unsigned int num_rq_list,
+ enum drm_sched_priority priority,
+ struct drm_gpu_scheduler **sched_list,
+ unsigned int num_sched_list,
atomic_t *guilty)
{
- int i;
-
- if (!(entity && rq_list && (num_rq_list == 0 || rq_list[0])))
+ if (!(entity && sched_list && (num_sched_list == 0 || sched_list[0])))
return -EINVAL;
memset(entity, 0, sizeof(struct drm_sched_entity));
INIT_LIST_HEAD(&entity->list);
entity->rq = NULL;
entity->guilty = guilty;
- entity->num_rq_list = num_rq_list;
- entity->rq_list = kcalloc(num_rq_list, sizeof(struct drm_sched_rq *),
- GFP_KERNEL);
- if (!entity->rq_list)
- return -ENOMEM;
-
- for (i = 0; i < num_rq_list; ++i)
- entity->rq_list[i] = rq_list[i];
+ entity->num_sched_list = num_sched_list;
+ entity->priority = priority;
+ entity->sched_list = num_sched_list > 1 ? sched_list : NULL;
+ entity->last_scheduled = NULL;
- if (num_rq_list)
- entity->rq = rq_list[0];
+ if(num_sched_list)
+ entity->rq = &sched_list[0]->sched_rq[entity->priority];
- entity->last_scheduled = NULL;
+ init_completion(&entity->entity_idle);
spin_lock_init(&entity->rq_lock);
spsc_queue_init(&entity->job_queue);
@@ -130,21 +130,21 @@ static struct drm_sched_rq *
drm_sched_entity_get_free_sched(struct drm_sched_entity *entity)
{
struct drm_sched_rq *rq = NULL;
- unsigned int min_jobs = UINT_MAX, num_jobs;
+ unsigned int min_score = UINT_MAX, num_score;
int i;
- for (i = 0; i < entity->num_rq_list; ++i) {
- struct drm_gpu_scheduler *sched = entity->rq_list[i]->sched;
+ for (i = 0; i < entity->num_sched_list; ++i) {
+ struct drm_gpu_scheduler *sched = entity->sched_list[i];
- if (!entity->rq_list[i]->sched->ready) {
+ if (!entity->sched_list[i]->ready) {
DRM_WARN("sched%s is not ready, skipping", sched->name);
continue;
}
- num_jobs = atomic_read(&sched->num_jobs);
- if (num_jobs < min_jobs) {
- min_jobs = num_jobs;
- rq = entity->rq_list[i];
+ num_score = atomic_read(&sched->score);
+ if (num_score < min_score) {
+ min_score = num_score;
+ rq = &entity->sched_list[i]->sched_rq[entity->priority];
}
}
@@ -283,11 +283,12 @@ void drm_sched_entity_fini(struct drm_sched_entity *entity)
*/
if (spsc_queue_count(&entity->job_queue)) {
if (sched) {
- /* Park the kernel for a moment to make sure it isn't processing
- * our enity.
+ /*
+ * Wait for thread to idle to make sure it isn't processing
+ * this entity.
*/
- kthread_park(sched->thread);
- kthread_unpark(sched->thread);
+ wait_for_completion(&entity->entity_idle);
+
}
if (entity->dependency) {
dma_fence_remove_callback(entity->dependency,
@@ -301,7 +302,6 @@ void drm_sched_entity_fini(struct drm_sched_entity *entity)
dma_fence_put(entity->last_scheduled);
entity->last_scheduled = NULL;
- kfree(entity->rq_list);
}
EXPORT_SYMBOL(drm_sched_entity_fini);
@@ -347,15 +347,6 @@ static void drm_sched_entity_wakeup(struct dma_fence *f,
}
/**
- * drm_sched_entity_set_rq_priority - helper for drm_sched_entity_set_priority
- */
-static void drm_sched_entity_set_rq_priority(struct drm_sched_rq **rq,
- enum drm_sched_priority priority)
-{
- *rq = &(*rq)->sched->sched_rq[priority];
-}
-
-/**
* drm_sched_entity_set_priority - Sets priority of the entity
*
* @entity: scheduler entity
@@ -366,19 +357,8 @@ static void drm_sched_entity_set_rq_priority(struct drm_sched_rq **rq,
void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
enum drm_sched_priority priority)
{
- unsigned int i;
-
spin_lock(&entity->rq_lock);
-
- for (i = 0; i < entity->num_rq_list; ++i)
- drm_sched_entity_set_rq_priority(&entity->rq_list[i], priority);
-
- if (entity->rq) {
- drm_sched_rq_remove_entity(entity->rq, entity);
- drm_sched_entity_set_rq_priority(&entity->rq, priority);
- drm_sched_rq_add_entity(entity->rq, entity);
- }
-
+ entity->priority = priority;
spin_unlock(&entity->rq_lock);
}
EXPORT_SYMBOL(drm_sched_entity_set_priority);
@@ -483,20 +463,20 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
struct dma_fence *fence;
struct drm_sched_rq *rq;
- if (spsc_queue_count(&entity->job_queue) || entity->num_rq_list <= 1)
+ if (spsc_queue_count(&entity->job_queue) || entity->num_sched_list <= 1)
return;
fence = READ_ONCE(entity->last_scheduled);
if (fence && !dma_fence_is_signaled(fence))
return;
+ spin_lock(&entity->rq_lock);
rq = drm_sched_entity_get_free_sched(entity);
- if (rq == entity->rq)
- return;
+ if (rq != entity->rq) {
+ drm_sched_rq_remove_entity(entity->rq, entity);
+ entity->rq = rq;
+ }
- spin_lock(&entity->rq_lock);
- drm_sched_rq_remove_entity(entity->rq, entity);
- entity->rq = rq;
spin_unlock(&entity->rq_lock);
}
@@ -518,7 +498,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
bool first;
trace_drm_sched_job(sched_job, entity);
- atomic_inc(&entity->rq->sched->num_jobs);
+ atomic_inc(&entity->rq->sched->score);
WRITE_ONCE(entity->last_user, current->group_leader);
first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c
index d8d2dff9ea2f..8b45c3a1b84e 100644
--- a/drivers/gpu/drm/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/scheduler/sched_fence.c
@@ -22,9 +22,11 @@
*/
#include <linux/kthread.h>
-#include <linux/wait.h>
+#include <linux/module.h>
#include <linux/sched.h>
-#include <drm/drmP.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+
#include <drm/gpu_scheduler.h>
static struct kmem_cache *sched_fence_slab;
@@ -126,13 +128,13 @@ static void drm_sched_fence_release_finished(struct dma_fence *f)
dma_fence_put(&fence->scheduled);
}
-const struct dma_fence_ops drm_sched_fence_ops_scheduled = {
+static const struct dma_fence_ops drm_sched_fence_ops_scheduled = {
.get_driver_name = drm_sched_fence_get_driver_name,
.get_timeline_name = drm_sched_fence_get_timeline_name,
.release = drm_sched_fence_release_scheduled,
};
-const struct dma_fence_ops drm_sched_fence_ops_finished = {
+static const struct dma_fence_ops drm_sched_fence_ops_finished = {
.get_driver_name = drm_sched_fence_get_driver_name,
.get_timeline_name = drm_sched_fence_get_timeline_name,
.release = drm_sched_fence_release_finished,
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index c1058eece16b..71ce6215956f 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -47,8 +47,10 @@
#include <linux/kthread.h>
#include <linux/wait.h>
#include <linux/sched.h>
+#include <linux/completion.h>
#include <uapi/linux/sched/types.h>
-#include <drm/drmP.h>
+
+#include <drm/drm_print.h>
#include <drm/gpu_scheduler.h>
#include <drm/spsc_queue.h>
@@ -90,6 +92,7 @@ void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
if (!list_empty(&entity->list))
return;
spin_lock(&rq->lock);
+ atomic_inc(&rq->sched->score);
list_add_tail(&entity->list, &rq->entities);
spin_unlock(&rq->lock);
}
@@ -108,6 +111,7 @@ void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
if (list_empty(&entity->list))
return;
spin_lock(&rq->lock);
+ atomic_dec(&rq->sched->score);
list_del_init(&entity->list);
if (rq->current_entity == entity)
rq->current_entity = NULL;
@@ -133,6 +137,7 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq)
list_for_each_entry_continue(entity, &rq->entities, list) {
if (drm_sched_entity_is_ready(entity)) {
rq->current_entity = entity;
+ reinit_completion(&entity->entity_idle);
spin_unlock(&rq->lock);
return entity;
}
@@ -143,6 +148,7 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq)
if (drm_sched_entity_is_ready(entity)) {
rq->current_entity = entity;
+ reinit_completion(&entity->entity_idle);
spin_unlock(&rq->lock);
return entity;
}
@@ -283,10 +289,21 @@ static void drm_sched_job_timedout(struct work_struct *work)
unsigned long flags;
sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
+
+ /* Protects against concurrent deletion in drm_sched_get_cleanup_job */
+ spin_lock_irqsave(&sched->job_list_lock, flags);
job = list_first_entry_or_null(&sched->ring_mirror_list,
struct drm_sched_job, node);
if (job) {
+ /*
+ * Remove the bad job so it cannot be freed by concurrent
+ * drm_sched_cleanup_jobs. It will be reinserted back after sched->thread
+ * is parked at which point it's safe.
+ */
+ list_del_init(&job->node);
+ spin_unlock_irqrestore(&sched->job_list_lock, flags);
+
job->sched->ops->timedout_job(job);
/*
@@ -297,6 +314,8 @@ static void drm_sched_job_timedout(struct work_struct *work)
job->sched->ops->free_job(job);
sched->free_guilty = false;
}
+ } else {
+ spin_unlock_irqrestore(&sched->job_list_lock, flags);
}
spin_lock_irqsave(&sched->job_list_lock, flags);
@@ -369,6 +388,20 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
kthread_park(sched->thread);
/*
+ * Reinsert back the bad job here - now it's safe as
+ * drm_sched_get_cleanup_job cannot race against us and release the
+ * bad job at this point - we parked (waited for) any in progress
+ * (earlier) cleanups and drm_sched_get_cleanup_job will not be called
+ * now until the scheduler thread is unparked.
+ */
+ if (bad && bad->sched == sched)
+ /*
+ * Add at the head of the queue to reflect it was the earliest
+ * job extracted.
+ */
+ list_add(&bad->node, &sched->ring_mirror_list);
+
+ /*
* Iterate the job list from later to earlier one and either deactive
* their HW callbacks or remove them from mirror list if they already
* signaled.
@@ -478,6 +511,7 @@ void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
struct drm_sched_job *s_job, *tmp;
uint64_t guilty_context;
bool found_guilty = false;
+ struct dma_fence *fence;
list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
struct drm_sched_fence *s_fence = s_job->s_fence;
@@ -491,7 +525,18 @@ void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
dma_fence_set_error(&s_fence->finished, -ECANCELED);
dma_fence_put(s_job->s_fence->parent);
- s_job->s_fence->parent = sched->ops->run_job(s_job);
+ fence = sched->ops->run_job(s_job);
+
+ if (IS_ERR_OR_NULL(fence)) {
+ if (IS_ERR(fence))
+ dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
+
+ s_job->s_fence->parent = NULL;
+ } else {
+ s_job->s_fence->parent = fence;
+ }
+
+
}
}
EXPORT_SYMBOL(drm_sched_resubmit_jobs);
@@ -612,7 +657,7 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
struct drm_gpu_scheduler *sched = s_fence->sched;
atomic_dec(&sched->hw_rq_count);
- atomic_dec(&sched->num_jobs);
+ atomic_dec(&sched->score);
trace_drm_sched_process_job(s_fence);
@@ -621,43 +666,45 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
}
/**
- * drm_sched_cleanup_jobs - destroy finished jobs
+ * drm_sched_get_cleanup_job - fetch the next finished job to be destroyed
*
* @sched: scheduler instance
*
- * Remove all finished jobs from the mirror list and destroy them.
+ * Returns the next finished job from the mirror list (if there is one)
+ * ready for it to be destroyed.
*/
-static void drm_sched_cleanup_jobs(struct drm_gpu_scheduler *sched)
+static struct drm_sched_job *
+drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
{
+ struct drm_sched_job *job;
unsigned long flags;
- /* Don't destroy jobs while the timeout worker is running */
- if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
- !cancel_delayed_work(&sched->work_tdr))
- return;
-
+ /*
+ * Don't destroy jobs while the timeout worker is running OR thread
+ * is being parked and hence assumed to not touch ring_mirror_list
+ */
+ if ((sched->timeout != MAX_SCHEDULE_TIMEOUT &&
+ !cancel_delayed_work(&sched->work_tdr)) ||
+ __kthread_should_park(sched->thread))
+ return NULL;
- while (!list_empty(&sched->ring_mirror_list)) {
- struct drm_sched_job *job;
+ spin_lock_irqsave(&sched->job_list_lock, flags);
- job = list_first_entry(&sched->ring_mirror_list,
+ job = list_first_entry_or_null(&sched->ring_mirror_list,
struct drm_sched_job, node);
- if (!dma_fence_is_signaled(&job->s_fence->finished))
- break;
- spin_lock_irqsave(&sched->job_list_lock, flags);
+ if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
/* remove job from ring_mirror_list */
list_del_init(&job->node);
- spin_unlock_irqrestore(&sched->job_list_lock, flags);
-
- sched->ops->free_job(job);
+ } else {
+ job = NULL;
+ /* queue timeout for next job */
+ drm_sched_start_timeout(sched);
}
- /* queue timeout for next job */
- spin_lock_irqsave(&sched->job_list_lock, flags);
- drm_sched_start_timeout(sched);
spin_unlock_irqrestore(&sched->job_list_lock, flags);
+ return job;
}
/**
@@ -697,17 +744,27 @@ static int drm_sched_main(void *param)
struct drm_sched_fence *s_fence;
struct drm_sched_job *sched_job;
struct dma_fence *fence;
+ struct drm_sched_job *cleanup_job = NULL;
wait_event_interruptible(sched->wake_up_worker,
- (drm_sched_cleanup_jobs(sched),
+ (cleanup_job = drm_sched_get_cleanup_job(sched)) ||
(!drm_sched_blocked(sched) &&
(entity = drm_sched_select_entity(sched))) ||
- kthread_should_stop()));
+ kthread_should_stop());
+
+ if (cleanup_job) {
+ sched->ops->free_job(cleanup_job);
+ /* queue timeout for next job */
+ drm_sched_start_timeout(sched);
+ }
if (!entity)
continue;
sched_job = drm_sched_entity_pop_job(entity);
+
+ complete(&entity->entity_idle);
+
if (!sched_job)
continue;
@@ -719,7 +776,7 @@ static int drm_sched_main(void *param)
fence = sched->ops->run_job(sched_job);
drm_sched_fence_scheduled(s_fence);
- if (fence) {
+ if (!IS_ERR_OR_NULL(fence)) {
s_fence->parent = dma_fence_get(fence);
r = dma_fence_add_callback(fence, &sched_job->cb,
drm_sched_process_job);
@@ -729,8 +786,12 @@ static int drm_sched_main(void *param)
DRM_ERROR("fence add callback failed (%d)\n",
r);
dma_fence_put(fence);
- } else
+ } else {
+ if (IS_ERR(fence))
+ dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
+
drm_sched_process_job(NULL, &sched_job->cb);
+ }
wake_up(&sched->job_scheduled);
}
@@ -771,7 +832,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
spin_lock_init(&sched->job_list_lock);
atomic_set(&sched->hw_rq_count, 0);
INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
- atomic_set(&sched->num_jobs, 0);
+ atomic_set(&sched->score, 0);
atomic64_set(&sched->job_id_count, 0);
/* Each scheduler will run on a seperate kernel thread */
diff --git a/drivers/gpu/drm/selftests/Makefile b/drivers/gpu/drm/selftests/Makefile
index aae88f8a016c..0856e4b12f70 100644
--- a/drivers/gpu/drm/selftests/Makefile
+++ b/drivers/gpu/drm/selftests/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
test-drm_modeset-y := test-drm_modeset_common.o test-drm_plane_helper.o \
test-drm_format.o test-drm_framebuffer.o \
- test-drm_damage_helper.o
+ test-drm_damage_helper.o test-drm_dp_mst_helper.o \
+ test-drm_rect.o
obj-$(CONFIG_DRM_DEBUG_SELFTEST) += test-drm_mm.o test-drm_modeset.o test-drm_cmdline_parser.o
diff --git a/drivers/gpu/drm/selftests/drm_cmdline_selftests.h b/drivers/gpu/drm/selftests/drm_cmdline_selftests.h
index b45824ec7c8f..ceac7af9a172 100644
--- a/drivers/gpu/drm/selftests/drm_cmdline_selftests.h
+++ b/drivers/gpu/drm/selftests/drm_cmdline_selftests.h
@@ -9,6 +9,13 @@
#define cmdline_test(test) selftest(test, test)
+cmdline_test(drm_cmdline_test_force_d_only)
+cmdline_test(drm_cmdline_test_force_D_only_dvi)
+cmdline_test(drm_cmdline_test_force_D_only_hdmi)
+cmdline_test(drm_cmdline_test_force_D_only_not_digital)
+cmdline_test(drm_cmdline_test_force_e_only)
+cmdline_test(drm_cmdline_test_margin_only)
+cmdline_test(drm_cmdline_test_interlace_only)
cmdline_test(drm_cmdline_test_res)
cmdline_test(drm_cmdline_test_res_missing_x)
cmdline_test(drm_cmdline_test_res_missing_y)
@@ -53,3 +60,8 @@ cmdline_test(drm_cmdline_test_vmirror)
cmdline_test(drm_cmdline_test_margin_options)
cmdline_test(drm_cmdline_test_multiple_options)
cmdline_test(drm_cmdline_test_invalid_option)
+cmdline_test(drm_cmdline_test_bpp_extra_and_option)
+cmdline_test(drm_cmdline_test_extra_and_option)
+cmdline_test(drm_cmdline_test_freestanding_options)
+cmdline_test(drm_cmdline_test_freestanding_force_e_and_options)
+cmdline_test(drm_cmdline_test_panel_orientation)
diff --git a/drivers/gpu/drm/selftests/drm_modeset_selftests.h b/drivers/gpu/drm/selftests/drm_modeset_selftests.h
index 464753746013..782e285ca383 100644
--- a/drivers/gpu/drm/selftests/drm_modeset_selftests.h
+++ b/drivers/gpu/drm/selftests/drm_modeset_selftests.h
@@ -6,6 +6,10 @@
*
* Tests are executed in order by igt/drm_selftests_helper
*/
+selftest(drm_rect_clip_scaled_div_by_zero, igt_drm_rect_clip_scaled_div_by_zero)
+selftest(drm_rect_clip_scaled_not_clipped, igt_drm_rect_clip_scaled_not_clipped)
+selftest(drm_rect_clip_scaled_clipped, igt_drm_rect_clip_scaled_clipped)
+selftest(drm_rect_clip_scaled_signed_vs_unsigned, igt_drm_rect_clip_scaled_signed_vs_unsigned)
selftest(check_plane_state, igt_check_plane_state)
selftest(check_drm_format_block_width, igt_check_drm_format_block_width)
selftest(check_drm_format_block_height, igt_check_drm_format_block_height)
@@ -32,3 +36,5 @@ selftest(damage_iter_damage_one_intersect, igt_damage_iter_damage_one_intersect)
selftest(damage_iter_damage_one_outside, igt_damage_iter_damage_one_outside)
selftest(damage_iter_damage_src_moved, igt_damage_iter_damage_src_moved)
selftest(damage_iter_damage_not_visible, igt_damage_iter_damage_not_visible)
+selftest(dp_mst_calc_pbn_mode, igt_dp_mst_calc_pbn_mode)
+selftest(dp_mst_sideband_msg_req_decode, igt_dp_mst_sideband_msg_req_decode)
diff --git a/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c b/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c
index 14c96edb13df..520f3e66a384 100644
--- a/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c
+++ b/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c
@@ -17,6 +17,136 @@
static const struct drm_connector no_connector = {};
+static int drm_cmdline_test_force_e_only(void *ignored)
+{
+ struct drm_cmdline_mode mode = { };
+
+ FAIL_ON(!drm_mode_parse_command_line_for_connector("e",
+ &no_connector,
+ &mode));
+ FAIL_ON(mode.specified);
+ FAIL_ON(mode.refresh_specified);
+ FAIL_ON(mode.bpp_specified);
+
+ FAIL_ON(mode.rb);
+ FAIL_ON(mode.cvt);
+ FAIL_ON(mode.interlace);
+ FAIL_ON(mode.margins);
+ FAIL_ON(mode.force != DRM_FORCE_ON);
+
+ return 0;
+}
+
+static int drm_cmdline_test_force_D_only_not_digital(void *ignored)
+{
+ struct drm_cmdline_mode mode = { };
+
+ FAIL_ON(!drm_mode_parse_command_line_for_connector("D",
+ &no_connector,
+ &mode));
+ FAIL_ON(mode.specified);
+ FAIL_ON(mode.refresh_specified);
+ FAIL_ON(mode.bpp_specified);
+
+ FAIL_ON(mode.rb);
+ FAIL_ON(mode.cvt);
+ FAIL_ON(mode.interlace);
+ FAIL_ON(mode.margins);
+ FAIL_ON(mode.force != DRM_FORCE_ON);
+
+ return 0;
+}
+
+static const struct drm_connector connector_hdmi = {
+ .connector_type = DRM_MODE_CONNECTOR_HDMIB,
+};
+
+static int drm_cmdline_test_force_D_only_hdmi(void *ignored)
+{
+ struct drm_cmdline_mode mode = { };
+
+ FAIL_ON(!drm_mode_parse_command_line_for_connector("D",
+ &connector_hdmi,
+ &mode));
+ FAIL_ON(mode.specified);
+ FAIL_ON(mode.refresh_specified);
+ FAIL_ON(mode.bpp_specified);
+
+ FAIL_ON(mode.rb);
+ FAIL_ON(mode.cvt);
+ FAIL_ON(mode.interlace);
+ FAIL_ON(mode.margins);
+ FAIL_ON(mode.force != DRM_FORCE_ON_DIGITAL);
+
+ return 0;
+}
+
+static const struct drm_connector connector_dvi = {
+ .connector_type = DRM_MODE_CONNECTOR_DVII,
+};
+
+static int drm_cmdline_test_force_D_only_dvi(void *ignored)
+{
+ struct drm_cmdline_mode mode = { };
+
+ FAIL_ON(!drm_mode_parse_command_line_for_connector("D",
+ &connector_dvi,
+ &mode));
+ FAIL_ON(mode.specified);
+ FAIL_ON(mode.refresh_specified);
+ FAIL_ON(mode.bpp_specified);
+
+ FAIL_ON(mode.rb);
+ FAIL_ON(mode.cvt);
+ FAIL_ON(mode.interlace);
+ FAIL_ON(mode.margins);
+ FAIL_ON(mode.force != DRM_FORCE_ON_DIGITAL);
+
+ return 0;
+}
+
+static int drm_cmdline_test_force_d_only(void *ignored)
+{
+ struct drm_cmdline_mode mode = { };
+
+ FAIL_ON(!drm_mode_parse_command_line_for_connector("d",
+ &no_connector,
+ &mode));
+ FAIL_ON(mode.specified);
+ FAIL_ON(mode.refresh_specified);
+ FAIL_ON(mode.bpp_specified);
+
+ FAIL_ON(mode.rb);
+ FAIL_ON(mode.cvt);
+ FAIL_ON(mode.interlace);
+ FAIL_ON(mode.margins);
+ FAIL_ON(mode.force != DRM_FORCE_OFF);
+
+ return 0;
+}
+
+static int drm_cmdline_test_margin_only(void *ignored)
+{
+ struct drm_cmdline_mode mode = { };
+
+ FAIL_ON(drm_mode_parse_command_line_for_connector("m",
+ &no_connector,
+ &mode));
+
+ return 0;
+}
+
+static int drm_cmdline_test_interlace_only(void *ignored)
+{
+ struct drm_cmdline_mode mode = { };
+
+ FAIL_ON(drm_mode_parse_command_line_for_connector("i",
+ &no_connector,
+ &mode));
+
+ return 0;
+}
+
static int drm_cmdline_test_res(void *ignored)
{
struct drm_cmdline_mode mode = { };
@@ -862,6 +992,128 @@ static int drm_cmdline_test_invalid_option(void *ignored)
return 0;
}
+static int drm_cmdline_test_bpp_extra_and_option(void *ignored)
+{
+ struct drm_cmdline_mode mode = { };
+
+ FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480-24e,rotate=180",
+ &no_connector,
+ &mode));
+ FAIL_ON(!mode.specified);
+ FAIL_ON(mode.xres != 720);
+ FAIL_ON(mode.yres != 480);
+ FAIL_ON(mode.rotation_reflection != DRM_MODE_ROTATE_180);
+
+ FAIL_ON(mode.refresh_specified);
+
+ FAIL_ON(!mode.bpp_specified);
+ FAIL_ON(mode.bpp != 24);
+
+ FAIL_ON(mode.rb);
+ FAIL_ON(mode.cvt);
+ FAIL_ON(mode.interlace);
+ FAIL_ON(mode.margins);
+ FAIL_ON(mode.force != DRM_FORCE_ON);
+
+ return 0;
+}
+
+static int drm_cmdline_test_extra_and_option(void *ignored)
+{
+ struct drm_cmdline_mode mode = { };
+
+ FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480e,rotate=180",
+ &no_connector,
+ &mode));
+ FAIL_ON(!mode.specified);
+ FAIL_ON(mode.xres != 720);
+ FAIL_ON(mode.yres != 480);
+ FAIL_ON(mode.rotation_reflection != DRM_MODE_ROTATE_180);
+
+ FAIL_ON(mode.refresh_specified);
+ FAIL_ON(mode.bpp_specified);
+
+ FAIL_ON(mode.rb);
+ FAIL_ON(mode.cvt);
+ FAIL_ON(mode.interlace);
+ FAIL_ON(mode.margins);
+ FAIL_ON(mode.force != DRM_FORCE_ON);
+
+ return 0;
+}
+
+static int drm_cmdline_test_freestanding_options(void *ignored)
+{
+ struct drm_cmdline_mode mode = { };
+
+ FAIL_ON(!drm_mode_parse_command_line_for_connector("margin_right=14,margin_left=24,margin_bottom=36,margin_top=42",
+ &no_connector,
+ &mode));
+ FAIL_ON(mode.specified);
+ FAIL_ON(mode.refresh_specified);
+ FAIL_ON(mode.bpp_specified);
+
+ FAIL_ON(mode.tv_margins.right != 14);
+ FAIL_ON(mode.tv_margins.left != 24);
+ FAIL_ON(mode.tv_margins.bottom != 36);
+ FAIL_ON(mode.tv_margins.top != 42);
+
+ FAIL_ON(mode.rb);
+ FAIL_ON(mode.cvt);
+ FAIL_ON(mode.interlace);
+ FAIL_ON(mode.margins);
+ FAIL_ON(mode.force != DRM_FORCE_UNSPECIFIED);
+
+ return 0;
+}
+
+static int drm_cmdline_test_freestanding_force_e_and_options(void *ignored)
+{
+ struct drm_cmdline_mode mode = { };
+
+ FAIL_ON(!drm_mode_parse_command_line_for_connector("e,margin_right=14,margin_left=24,margin_bottom=36,margin_top=42",
+ &no_connector,
+ &mode));
+ FAIL_ON(mode.specified);
+ FAIL_ON(mode.refresh_specified);
+ FAIL_ON(mode.bpp_specified);
+
+ FAIL_ON(mode.tv_margins.right != 14);
+ FAIL_ON(mode.tv_margins.left != 24);
+ FAIL_ON(mode.tv_margins.bottom != 36);
+ FAIL_ON(mode.tv_margins.top != 42);
+
+ FAIL_ON(mode.rb);
+ FAIL_ON(mode.cvt);
+ FAIL_ON(mode.interlace);
+ FAIL_ON(mode.margins);
+ FAIL_ON(mode.force != DRM_FORCE_ON);
+
+ return 0;
+}
+
+static int drm_cmdline_test_panel_orientation(void *ignored)
+{
+ struct drm_cmdline_mode mode = { };
+
+ FAIL_ON(!drm_mode_parse_command_line_for_connector("panel_orientation=upside_down",
+ &no_connector,
+ &mode));
+ FAIL_ON(mode.specified);
+ FAIL_ON(mode.refresh_specified);
+ FAIL_ON(mode.bpp_specified);
+
+ FAIL_ON(mode.panel_orientation != DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP);
+
+ FAIL_ON(mode.rb);
+ FAIL_ON(mode.cvt);
+ FAIL_ON(mode.interlace);
+ FAIL_ON(mode.margins);
+ FAIL_ON(mode.force != DRM_FORCE_UNSPECIFIED);
+
+ return 0;
+}
+
#include "drm_selftest.c"
static int __init test_drm_cmdline_init(void)
diff --git a/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c b/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c
new file mode 100644
index 000000000000..bd990d178765
--- /dev/null
+++ b/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Test cases for for the DRM DP MST helpers
+ */
+
+#define PREFIX_STR "[drm_dp_mst_helper]"
+
+#include <drm/drm_dp_mst_helper.h>
+#include <drm/drm_print.h>
+
+#include "../drm_dp_mst_topology_internal.h"
+#include "test-drm_modeset_common.h"
+
+int igt_dp_mst_calc_pbn_mode(void *ignored)
+{
+ int pbn, i;
+ const struct {
+ int rate;
+ int bpp;
+ int expected;
+ bool dsc;
+ } test_params[] = {
+ { 154000, 30, 689, false },
+ { 234000, 30, 1047, false },
+ { 297000, 24, 1063, false },
+ { 332880, 24, 50, true },
+ { 324540, 24, 49, true },
+ };
+
+ for (i = 0; i < ARRAY_SIZE(test_params); i++) {
+ pbn = drm_dp_calc_pbn_mode(test_params[i].rate,
+ test_params[i].bpp,
+ test_params[i].dsc);
+ FAIL(pbn != test_params[i].expected,
+ "Expected PBN %d for clock %d bpp %d, got %d\n",
+ test_params[i].expected, test_params[i].rate,
+ test_params[i].bpp, pbn);
+ }
+
+ return 0;
+}
+
+static bool
+sideband_msg_req_equal(const struct drm_dp_sideband_msg_req_body *in,
+ const struct drm_dp_sideband_msg_req_body *out)
+{
+ const struct drm_dp_remote_i2c_read_tx *txin, *txout;
+ int i;
+
+ if (in->req_type != out->req_type)
+ return false;
+
+ switch (in->req_type) {
+ /*
+ * Compare struct members manually for request types which can't be
+ * compared simply using memcmp(). This is because said request types
+ * contain pointers to other allocated structs
+ */
+ case DP_REMOTE_I2C_READ:
+#define IN in->u.i2c_read
+#define OUT out->u.i2c_read
+ if (IN.num_bytes_read != OUT.num_bytes_read ||
+ IN.num_transactions != OUT.num_transactions ||
+ IN.port_number != OUT.port_number ||
+ IN.read_i2c_device_id != OUT.read_i2c_device_id)
+ return false;
+
+ for (i = 0; i < IN.num_transactions; i++) {
+ txin = &IN.transactions[i];
+ txout = &OUT.transactions[i];
+
+ if (txin->i2c_dev_id != txout->i2c_dev_id ||
+ txin->no_stop_bit != txout->no_stop_bit ||
+ txin->num_bytes != txout->num_bytes ||
+ txin->i2c_transaction_delay !=
+ txout->i2c_transaction_delay)
+ return false;
+
+ if (memcmp(txin->bytes, txout->bytes,
+ txin->num_bytes) != 0)
+ return false;
+ }
+ break;
+#undef IN
+#undef OUT
+
+ case DP_REMOTE_DPCD_WRITE:
+#define IN in->u.dpcd_write
+#define OUT out->u.dpcd_write
+ if (IN.dpcd_address != OUT.dpcd_address ||
+ IN.num_bytes != OUT.num_bytes ||
+ IN.port_number != OUT.port_number)
+ return false;
+
+ return memcmp(IN.bytes, OUT.bytes, IN.num_bytes) == 0;
+#undef IN
+#undef OUT
+
+ case DP_REMOTE_I2C_WRITE:
+#define IN in->u.i2c_write
+#define OUT out->u.i2c_write
+ if (IN.port_number != OUT.port_number ||
+ IN.write_i2c_device_id != OUT.write_i2c_device_id ||
+ IN.num_bytes != OUT.num_bytes)
+ return false;
+
+ return memcmp(IN.bytes, OUT.bytes, IN.num_bytes) == 0;
+#undef IN
+#undef OUT
+
+ default:
+ return memcmp(in, out, sizeof(*in)) == 0;
+ }
+
+ return true;
+}
+
+static bool
+sideband_msg_req_encode_decode(struct drm_dp_sideband_msg_req_body *in)
+{
+ struct drm_dp_sideband_msg_req_body out = {0};
+ struct drm_printer p = drm_err_printer(PREFIX_STR);
+ struct drm_dp_sideband_msg_tx txmsg;
+ int i, ret;
+
+ drm_dp_encode_sideband_req(in, &txmsg);
+ ret = drm_dp_decode_sideband_req(&txmsg, &out);
+ if (ret < 0) {
+ drm_printf(&p, "Failed to decode sideband request: %d\n",
+ ret);
+ return false;
+ }
+
+ if (!sideband_msg_req_equal(in, &out)) {
+ drm_printf(&p, "Encode/decode failed, expected:\n");
+ drm_dp_dump_sideband_msg_req_body(in, 1, &p);
+ drm_printf(&p, "Got:\n");
+ drm_dp_dump_sideband_msg_req_body(&out, 1, &p);
+ return false;
+ }
+
+ switch (in->req_type) {
+ case DP_REMOTE_DPCD_WRITE:
+ kfree(out.u.dpcd_write.bytes);
+ break;
+ case DP_REMOTE_I2C_READ:
+ for (i = 0; i < out.u.i2c_read.num_transactions; i++)
+ kfree(out.u.i2c_read.transactions[i].bytes);
+ break;
+ case DP_REMOTE_I2C_WRITE:
+ kfree(out.u.i2c_write.bytes);
+ break;
+ }
+
+ /* Clear everything but the req_type for the input */
+ memset(&in->u, 0, sizeof(in->u));
+
+ return true;
+}
+
+int igt_dp_mst_sideband_msg_req_decode(void *unused)
+{
+ struct drm_dp_sideband_msg_req_body in = { 0 };
+ u8 data[] = { 0xff, 0x0, 0xdd };
+ int i;
+
+#define DO_TEST() FAIL_ON(!sideband_msg_req_encode_decode(&in))
+
+ in.req_type = DP_ENUM_PATH_RESOURCES;
+ in.u.port_num.port_number = 5;
+ DO_TEST();
+
+ in.req_type = DP_POWER_UP_PHY;
+ in.u.port_num.port_number = 5;
+ DO_TEST();
+
+ in.req_type = DP_POWER_DOWN_PHY;
+ in.u.port_num.port_number = 5;
+ DO_TEST();
+
+ in.req_type = DP_ALLOCATE_PAYLOAD;
+ in.u.allocate_payload.number_sdp_streams = 3;
+ for (i = 0; i < in.u.allocate_payload.number_sdp_streams; i++)
+ in.u.allocate_payload.sdp_stream_sink[i] = i + 1;
+ DO_TEST();
+ in.u.allocate_payload.port_number = 0xf;
+ DO_TEST();
+ in.u.allocate_payload.vcpi = 0x7f;
+ DO_TEST();
+ in.u.allocate_payload.pbn = U16_MAX;
+ DO_TEST();
+
+ in.req_type = DP_QUERY_PAYLOAD;
+ in.u.query_payload.port_number = 0xf;
+ DO_TEST();
+ in.u.query_payload.vcpi = 0x7f;
+ DO_TEST();
+
+ in.req_type = DP_REMOTE_DPCD_READ;
+ in.u.dpcd_read.port_number = 0xf;
+ DO_TEST();
+ in.u.dpcd_read.dpcd_address = 0xfedcb;
+ DO_TEST();
+ in.u.dpcd_read.num_bytes = U8_MAX;
+ DO_TEST();
+
+ in.req_type = DP_REMOTE_DPCD_WRITE;
+ in.u.dpcd_write.port_number = 0xf;
+ DO_TEST();
+ in.u.dpcd_write.dpcd_address = 0xfedcb;
+ DO_TEST();
+ in.u.dpcd_write.num_bytes = ARRAY_SIZE(data);
+ in.u.dpcd_write.bytes = data;
+ DO_TEST();
+
+ in.req_type = DP_REMOTE_I2C_READ;
+ in.u.i2c_read.port_number = 0xf;
+ DO_TEST();
+ in.u.i2c_read.read_i2c_device_id = 0x7f;
+ DO_TEST();
+ in.u.i2c_read.num_transactions = 3;
+ in.u.i2c_read.num_bytes_read = ARRAY_SIZE(data) * 3;
+ for (i = 0; i < in.u.i2c_read.num_transactions; i++) {
+ in.u.i2c_read.transactions[i].bytes = data;
+ in.u.i2c_read.transactions[i].num_bytes = ARRAY_SIZE(data);
+ in.u.i2c_read.transactions[i].i2c_dev_id = 0x7f & ~i;
+ in.u.i2c_read.transactions[i].i2c_transaction_delay = 0xf & ~i;
+ }
+ DO_TEST();
+
+ in.req_type = DP_REMOTE_I2C_WRITE;
+ in.u.i2c_write.port_number = 0xf;
+ DO_TEST();
+ in.u.i2c_write.write_i2c_device_id = 0x7f;
+ DO_TEST();
+ in.u.i2c_write.num_bytes = ARRAY_SIZE(data);
+ in.u.i2c_write.bytes = data;
+ DO_TEST();
+
+#undef DO_TEST
+ return 0;
+}
diff --git a/drivers/gpu/drm/selftests/test-drm_framebuffer.c b/drivers/gpu/drm/selftests/test-drm_framebuffer.c
index a04d02dacce2..2d29ea6f92e2 100644
--- a/drivers/gpu/drm/selftests/test-drm_framebuffer.c
+++ b/drivers/gpu/drm/selftests/test-drm_framebuffer.c
@@ -3,7 +3,12 @@
* Test cases for the drm_framebuffer functions
*/
-#include <drm/drmP.h>
+#include <linux/kernel.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_mode.h>
+#include <drm/drm_fourcc.h>
+
#include "../drm_crtc_internal.h"
#include "test-drm_modeset_common.h"
@@ -121,7 +126,7 @@ static struct drm_framebuffer_test createbuffer_tests[] = {
.handles = { 1, 1, 0 }, .pitches = { MAX_WIDTH, MAX_WIDTH - 1, 0 },
}
},
-{ .buffer_created = 0, .name = "NV12 Invalid modifier/misssing DRM_MODE_FB_MODIFIERS flag",
+{ .buffer_created = 0, .name = "NV12 Invalid modifier/missing DRM_MODE_FB_MODIFIERS flag",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_NV12,
.handles = { 1, 1, 0 }, .modifier = { DRM_FORMAT_MOD_SAMSUNG_64_32_TILE, 0, 0 },
.pitches = { MAX_WIDTH, MAX_WIDTH, 0 },
diff --git a/drivers/gpu/drm/selftests/test-drm_mm.c b/drivers/gpu/drm/selftests/test-drm_mm.c
index 388f9844f4ba..9aabe82dcd3a 100644
--- a/drivers/gpu/drm/selftests/test-drm_mm.c
+++ b/drivers/gpu/drm/selftests/test-drm_mm.c
@@ -854,7 +854,7 @@ static bool assert_contiguous_in_range(struct drm_mm *mm,
if (start > 0) {
node = __drm_mm_interval_first(mm, 0, start - 1);
- if (node->allocated) {
+ if (drm_mm_node_allocated(node)) {
pr_err("node before start: node=%llx+%llu, start=%llx\n",
node->start, node->size, start);
return false;
@@ -863,7 +863,7 @@ static bool assert_contiguous_in_range(struct drm_mm *mm,
if (end < U64_MAX) {
node = __drm_mm_interval_first(mm, end, U64_MAX);
- if (node->allocated) {
+ if (drm_mm_node_allocated(node)) {
pr_err("node after end: node=%llx+%llu, end=%llx\n",
node->start, node->size, end);
return false;
@@ -1156,12 +1156,12 @@ static void show_holes(const struct drm_mm *mm, int count)
struct drm_mm_node *next = list_next_entry(hole, node_list);
const char *node1 = NULL, *node2 = NULL;
- if (hole->allocated)
+ if (drm_mm_node_allocated(hole))
node1 = kasprintf(GFP_KERNEL,
"[%llx + %lld, color=%ld], ",
hole->start, hole->size, hole->color);
- if (next->allocated)
+ if (drm_mm_node_allocated(next))
node2 = kasprintf(GFP_KERNEL,
", [%llx + %lld, color=%ld]",
next->start, next->size, next->color);
@@ -1900,18 +1900,18 @@ static void separate_adjacent_colors(const struct drm_mm_node *node,
u64 *start,
u64 *end)
{
- if (node->allocated && node->color != color)
+ if (drm_mm_node_allocated(node) && node->color != color)
++*start;
node = list_next_entry(node, node_list);
- if (node->allocated && node->color != color)
+ if (drm_mm_node_allocated(node) && node->color != color)
--*end;
}
static bool colors_abutt(const struct drm_mm_node *node)
{
if (!drm_mm_hole_follows(node) &&
- list_next_entry(node, node_list)->allocated) {
+ drm_mm_node_allocated(list_next_entry(node, node_list))) {
pr_err("colors abutt; %ld [%llx + %llx] is next to %ld [%llx + %llx]!\n",
node->color, node->start, node->size,
list_next_entry(node, node_list)->color,
diff --git a/drivers/gpu/drm/selftests/test-drm_modeset_common.h b/drivers/gpu/drm/selftests/test-drm_modeset_common.h
index 8c76f09c12d1..cfb51d8da2bc 100644
--- a/drivers/gpu/drm/selftests/test-drm_modeset_common.h
+++ b/drivers/gpu/drm/selftests/test-drm_modeset_common.h
@@ -3,6 +3,9 @@
#ifndef __TEST_DRM_MODESET_COMMON_H__
#define __TEST_DRM_MODESET_COMMON_H__
+#include <linux/errno.h>
+#include <linux/printk.h>
+
#define FAIL(test, msg, ...) \
do { \
if (test) { \
@@ -13,6 +16,10 @@
#define FAIL_ON(x) FAIL((x), "%s", "FAIL_ON(" __stringify(x) ")\n")
+int igt_drm_rect_clip_scaled_div_by_zero(void *ignored);
+int igt_drm_rect_clip_scaled_not_clipped(void *ignored);
+int igt_drm_rect_clip_scaled_clipped(void *ignored);
+int igt_drm_rect_clip_scaled_signed_vs_unsigned(void *ignored);
int igt_check_plane_state(void *ignored);
int igt_check_drm_format_block_width(void *ignored);
int igt_check_drm_format_block_height(void *ignored);
@@ -39,5 +46,7 @@ int igt_damage_iter_damage_one_intersect(void *ignored);
int igt_damage_iter_damage_one_outside(void *ignored);
int igt_damage_iter_damage_src_moved(void *ignored);
int igt_damage_iter_damage_not_visible(void *ignored);
+int igt_dp_mst_calc_pbn_mode(void *ignored);
+int igt_dp_mst_sideband_msg_req_decode(void *ignored);
#endif
diff --git a/drivers/gpu/drm/selftests/test-drm_rect.c b/drivers/gpu/drm/selftests/test-drm_rect.c
new file mode 100644
index 000000000000..3a5ff38321f4
--- /dev/null
+++ b/drivers/gpu/drm/selftests/test-drm_rect.c
@@ -0,0 +1,223 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test cases for the drm_rect functions
+ */
+
+#define pr_fmt(fmt) "drm_rect: " fmt
+
+#include <linux/limits.h>
+
+#include <drm/drm_rect.h>
+
+#include "test-drm_modeset_common.h"
+
+int igt_drm_rect_clip_scaled_div_by_zero(void *ignored)
+{
+ struct drm_rect src, dst, clip;
+ bool visible;
+
+ /*
+ * Make sure we don't divide by zero when dst
+ * width/height is zero and dst and clip do not intersect.
+ */
+ drm_rect_init(&src, 0, 0, 0, 0);
+ drm_rect_init(&dst, 0, 0, 0, 0);
+ drm_rect_init(&clip, 1, 1, 1, 1);
+ visible = drm_rect_clip_scaled(&src, &dst, &clip);
+ FAIL(visible, "Destination not be visible\n");
+ FAIL(drm_rect_visible(&src), "Source should not be visible\n");
+
+ drm_rect_init(&src, 0, 0, 0, 0);
+ drm_rect_init(&dst, 3, 3, 0, 0);
+ drm_rect_init(&clip, 1, 1, 1, 1);
+ visible = drm_rect_clip_scaled(&src, &dst, &clip);
+ FAIL(visible, "Destination not be visible\n");
+ FAIL(drm_rect_visible(&src), "Source should not be visible\n");
+
+ return 0;
+}
+
+int igt_drm_rect_clip_scaled_not_clipped(void *ignored)
+{
+ struct drm_rect src, dst, clip;
+ bool visible;
+
+ /* 1:1 scaling */
+ drm_rect_init(&src, 0, 0, 1 << 16, 1 << 16);
+ drm_rect_init(&dst, 0, 0, 1, 1);
+ drm_rect_init(&clip, 0, 0, 1, 1);
+
+ visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+ FAIL(src.x1 != 0 || src.x2 != 1 << 16 ||
+ src.y1 != 0 || src.y2 != 1 << 16,
+ "Source badly clipped\n");
+ FAIL(dst.x1 != 0 || dst.x2 != 1 ||
+ dst.y1 != 0 || dst.y2 != 1,
+ "Destination badly clipped\n");
+ FAIL(!visible, "Destination should be visible\n");
+ FAIL(!drm_rect_visible(&src), "Source should be visible\n");
+
+ /* 2:1 scaling */
+ drm_rect_init(&src, 0, 0, 2 << 16, 2 << 16);
+ drm_rect_init(&dst, 0, 0, 1, 1);
+ drm_rect_init(&clip, 0, 0, 1, 1);
+
+ visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+ FAIL(src.x1 != 0 || src.x2 != 2 << 16 ||
+ src.y1 != 0 || src.y2 != 2 << 16,
+ "Source badly clipped\n");
+ FAIL(dst.x1 != 0 || dst.x2 != 1 ||
+ dst.y1 != 0 || dst.y2 != 1,
+ "Destination badly clipped\n");
+ FAIL(!visible, "Destination should be visible\n");
+ FAIL(!drm_rect_visible(&src), "Source should be visible\n");
+
+ /* 1:2 scaling */
+ drm_rect_init(&src, 0, 0, 1 << 16, 1 << 16);
+ drm_rect_init(&dst, 0, 0, 2, 2);
+ drm_rect_init(&clip, 0, 0, 2, 2);
+
+ visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+ FAIL(src.x1 != 0 || src.x2 != 1 << 16 ||
+ src.y1 != 0 || src.y2 != 1 << 16,
+ "Source badly clipped\n");
+ FAIL(dst.x1 != 0 || dst.x2 != 2 ||
+ dst.y1 != 0 || dst.y2 != 2,
+ "Destination badly clipped\n");
+ FAIL(!visible, "Destination should be visible\n");
+ FAIL(!drm_rect_visible(&src), "Source should be visible\n");
+
+ return 0;
+}
+
+int igt_drm_rect_clip_scaled_clipped(void *ignored)
+{
+ struct drm_rect src, dst, clip;
+ bool visible;
+
+ /* 1:1 scaling top/left clip */
+ drm_rect_init(&src, 0, 0, 2 << 16, 2 << 16);
+ drm_rect_init(&dst, 0, 0, 2, 2);
+ drm_rect_init(&clip, 0, 0, 1, 1);
+
+ visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+ FAIL(src.x1 != 0 || src.x2 != 1 << 16 ||
+ src.y1 != 0 || src.y2 != 1 << 16,
+ "Source badly clipped\n");
+ FAIL(dst.x1 != 0 || dst.x2 != 1 ||
+ dst.y1 != 0 || dst.y2 != 1,
+ "Destination badly clipped\n");
+ FAIL(!visible, "Destination should be visible\n");
+ FAIL(!drm_rect_visible(&src), "Source should be visible\n");
+
+ /* 1:1 scaling bottom/right clip */
+ drm_rect_init(&src, 0, 0, 2 << 16, 2 << 16);
+ drm_rect_init(&dst, 0, 0, 2, 2);
+ drm_rect_init(&clip, 1, 1, 1, 1);
+
+ visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+ FAIL(src.x1 != 1 << 16 || src.x2 != 2 << 16 ||
+ src.y1 != 1 << 16 || src.y2 != 2 << 16,
+ "Source badly clipped\n");
+ FAIL(dst.x1 != 1 || dst.x2 != 2 ||
+ dst.y1 != 1 || dst.y2 != 2,
+ "Destination badly clipped\n");
+ FAIL(!visible, "Destination should be visible\n");
+ FAIL(!drm_rect_visible(&src), "Source should be visible\n");
+
+ /* 2:1 scaling top/left clip */
+ drm_rect_init(&src, 0, 0, 4 << 16, 4 << 16);
+ drm_rect_init(&dst, 0, 0, 2, 2);
+ drm_rect_init(&clip, 0, 0, 1, 1);
+
+ visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+ FAIL(src.x1 != 0 || src.x2 != 2 << 16 ||
+ src.y1 != 0 || src.y2 != 2 << 16,
+ "Source badly clipped\n");
+ FAIL(dst.x1 != 0 || dst.x2 != 1 ||
+ dst.y1 != 0 || dst.y2 != 1,
+ "Destination badly clipped\n");
+ FAIL(!visible, "Destination should be visible\n");
+ FAIL(!drm_rect_visible(&src), "Source should be visible\n");
+
+ /* 2:1 scaling bottom/right clip */
+ drm_rect_init(&src, 0, 0, 4 << 16, 4 << 16);
+ drm_rect_init(&dst, 0, 0, 2, 2);
+ drm_rect_init(&clip, 1, 1, 1, 1);
+
+ visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+ FAIL(src.x1 != 2 << 16 || src.x2 != 4 << 16 ||
+ src.y1 != 2 << 16 || src.y2 != 4 << 16,
+ "Source badly clipped\n");
+ FAIL(dst.x1 != 1 || dst.x2 != 2 ||
+ dst.y1 != 1 || dst.y2 != 2,
+ "Destination badly clipped\n");
+ FAIL(!visible, "Destination should be visible\n");
+ FAIL(!drm_rect_visible(&src), "Source should be visible\n");
+
+ /* 1:2 scaling top/left clip */
+ drm_rect_init(&src, 0, 0, 2 << 16, 2 << 16);
+ drm_rect_init(&dst, 0, 0, 4, 4);
+ drm_rect_init(&clip, 0, 0, 2, 2);
+
+ visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+ FAIL(src.x1 != 0 || src.x2 != 1 << 16 ||
+ src.y1 != 0 || src.y2 != 1 << 16,
+ "Source badly clipped\n");
+ FAIL(dst.x1 != 0 || dst.x2 != 2 ||
+ dst.y1 != 0 || dst.y2 != 2,
+ "Destination badly clipped\n");
+ FAIL(!visible, "Destination should be visible\n");
+ FAIL(!drm_rect_visible(&src), "Source should be visible\n");
+
+ /* 1:2 scaling bottom/right clip */
+ drm_rect_init(&src, 0, 0, 2 << 16, 2 << 16);
+ drm_rect_init(&dst, 0, 0, 4, 4);
+ drm_rect_init(&clip, 2, 2, 2, 2);
+
+ visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+ FAIL(src.x1 != 1 << 16 || src.x2 != 2 << 16 ||
+ src.y1 != 1 << 16 || src.y2 != 2 << 16,
+ "Source badly clipped\n");
+ FAIL(dst.x1 != 2 || dst.x2 != 4 ||
+ dst.y1 != 2 || dst.y2 != 4,
+ "Destination badly clipped\n");
+ FAIL(!visible, "Destination should be visible\n");
+ FAIL(!drm_rect_visible(&src), "Source should be visible\n");
+
+ return 0;
+}
+
+int igt_drm_rect_clip_scaled_signed_vs_unsigned(void *ignored)
+{
+ struct drm_rect src, dst, clip;
+ bool visible;
+
+ /*
+ * 'clip.x2 - dst.x1 >= dst width' could result a negative
+ * src rectangle width which is no longer expected by the
+ * code as it's using unsigned types. This could lead to
+ * the clipped source rectangle appering visible when it
+ * should have been fully clipped. Make sure both rectangles
+ * end up invisible.
+ */
+ drm_rect_init(&src, 0, 0, INT_MAX, INT_MAX);
+ drm_rect_init(&dst, 0, 0, 2, 2);
+ drm_rect_init(&clip, 3, 3, 1, 1);
+
+ visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+ FAIL(visible, "Destination should not be visible\n");
+ FAIL(drm_rect_visible(&src), "Source should not be visible\n");
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index b6988a6d698e..75a752d59ef1 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -10,13 +10,14 @@
#include <linux/backlight.h>
#include <linux/clk.h>
-#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "shmob_drm_backlight.h"
#include "shmob_drm_crtc.h"
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.h b/drivers/gpu/drm/shmobile/shmob_drm_crtc.h
index 9ca6920641d8..21718843f46d 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.h
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.h
@@ -10,12 +10,14 @@
#ifndef __SHMOB_DRM_CRTC_H__
#define __SHMOB_DRM_CRTC_H__
-#include <drm/drmP.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_connector.h>
#include <drm/drm_encoder.h>
struct backlight_device;
+struct drm_pending_vblank_event;
struct shmob_drm_device;
+struct shmob_drm_format_info;
struct shmob_drm_crtc {
struct drm_crtc crtc;
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index cb821adfc321..b8c0930959c7 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -15,10 +15,12 @@
#include <linux/pm.h>
#include <linux/slab.h>
-#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_drv.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_irq.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "shmob_drm_drv.h"
#include "shmob_drm_kms.h"
@@ -127,15 +129,12 @@ static irqreturn_t shmob_drm_irq(int irq, void *arg)
DEFINE_DRM_GEM_CMA_FOPS(shmob_drm_fops);
static struct drm_driver shmob_drm_driver = {
- .driver_features = DRIVER_GEM | DRIVER_MODESET
- | DRIVER_PRIME,
+ .driver_features = DRIVER_GEM | DRIVER_MODESET,
.irq_handler = shmob_drm_irq,
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_import = drm_gem_prime_import,
- .gem_prime_export = drm_gem_prime_export,
.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
.gem_prime_vmap = drm_gem_cma_prime_vmap,
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_kms.c b/drivers/gpu/drm/shmobile/shmob_drm_kms.c
index 2e08bc203bf9..c51197b6fd85 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_kms.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_kms.c
@@ -7,7 +7,6 @@
* Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*/
-#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_plane.c b/drivers/gpu/drm/shmobile/shmob_drm_plane.c
index 1d1ee5e51351..cbc464f006b4 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_plane.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_plane.c
@@ -7,10 +7,10 @@
* Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*/
-#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_cma_helper.h>
#include "shmob_drm_drv.h"
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_plane.h b/drivers/gpu/drm/shmobile/shmob_drm_plane.h
index bae67cc8c628..e72b21a4288f 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_plane.h
+++ b/drivers/gpu/drm/shmobile/shmob_drm_plane.h
@@ -10,6 +10,7 @@
#ifndef __SHMOB_DRM_PLANE_H__
#define __SHMOB_DRM_PLANE_H__
+struct drm_plane;
struct shmob_drm_device;
int shmob_drm_plane_create(struct shmob_drm_device *sdev, unsigned int index);
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_regs.h b/drivers/gpu/drm/shmobile/shmob_drm_regs.h
index 9eb0b3d01df8..058533685c4c 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_regs.h
+++ b/drivers/gpu/drm/shmobile/shmob_drm_regs.h
@@ -11,6 +11,9 @@
#define __SHMOB_DRM_REGS_H__
#include <linux/io.h>
+#include <linux/jiffies.h>
+
+#include "shmob_drm_drv.h"
/* Register definitions */
#define LDDCKPAT1R 0x400
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index ee3801201ecc..2c54b33abb54 100644
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -26,10 +26,10 @@
*/
#include <linux/module.h>
+#include <linux/pci.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
-#include <drm/drm_pci.h>
#include <drm/drm_pciids.h>
#include <drm/sis_drm.h>
diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c
index 0bf7c332cf0b..ea64c1dcaf63 100644
--- a/drivers/gpu/drm/sti/sti_cursor.c
+++ b/drivers/gpu/drm/sti/sti_cursor.c
@@ -47,7 +47,7 @@ struct dma_pixmap {
void *base;
};
-/**
+/*
* STI Cursor structure
*
* @sti_plane: sti_plane structure
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index bb6ae6dd66c9..a39fc36f815b 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -23,7 +23,6 @@
#include "sti_crtc.h"
#include "sti_drv.h"
-#include "sti_drv.h"
#include "sti_plane.h"
#define DRIVER_NAME "sti"
@@ -141,8 +140,7 @@ static void sti_mode_config_init(struct drm_device *dev)
DEFINE_DRM_GEM_CMA_FOPS(sti_driver_fops);
static struct drm_driver sti_driver = {
- .driver_features = DRIVER_MODESET |
- DRIVER_GEM | DRIVER_PRIME | DRIVER_ATOMIC,
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = drm_gem_cma_dumb_create,
@@ -153,8 +151,6 @@ static struct drm_driver sti_driver = {
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = drm_gem_prime_export,
- .gem_prime_import = drm_gem_prime_import,
.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
.gem_prime_vmap = drm_gem_cma_prime_vmap,
diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
index 9e6d5d8b7030..b2778ec1cdd7 100644
--- a/drivers/gpu/drm/sti/sti_dvo.c
+++ b/drivers/gpu/drm/sti/sti_dvo.c
@@ -12,6 +12,7 @@
#include <linux/platform_device.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_device.h>
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
@@ -65,7 +66,7 @@ static struct dvo_config rgb_24bit_de_cfg = {
.awg_fwgen_fct = sti_awg_generate_code_data_enable_mode,
};
-/**
+/*
* STI digital video output structure
*
* @dev: driver device
@@ -221,8 +222,7 @@ static void sti_dvo_disable(struct drm_bridge *bridge)
writel(0x00000000, dvo->regs + DVO_DOF_CFG);
- if (dvo->panel)
- dvo->panel->funcs->disable(dvo->panel);
+ drm_panel_disable(dvo->panel);
/* Disable/unprepare dvo clock */
clk_disable_unprepare(dvo->clk_pix);
@@ -262,8 +262,7 @@ static void sti_dvo_pre_enable(struct drm_bridge *bridge)
if (clk_prepare_enable(dvo->clk))
DRM_ERROR("Failed to prepare/enable dvo clk\n");
- if (dvo->panel)
- dvo->panel->funcs->enable(dvo->panel);
+ drm_panel_enable(dvo->panel);
/* Set LUT */
writel(config->lowbyte, dvo->regs + DVO_LUT_PROG_LOW);
@@ -340,7 +339,7 @@ static int sti_dvo_connector_get_modes(struct drm_connector *connector)
struct sti_dvo *dvo = dvo_connector->dvo;
if (dvo->panel)
- return dvo->panel->funcs->get_modes(dvo->panel);
+ return drm_panel_get_modes(dvo->panel, connector);
return 0;
}
@@ -535,7 +534,7 @@ static int sti_dvo_probe(struct platform_device *pdev)
DRM_ERROR("Invalid dvo resource\n");
return -ENOMEM;
}
- dvo->regs = devm_ioremap_nocache(dev, res->start,
+ dvo->regs = devm_ioremap(dev, res->start,
resource_size(res));
if (!dvo->regs)
return -ENOMEM;
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index 8e926cd6a1c8..11595c748844 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -103,7 +103,7 @@ struct sti_gdp_node_list {
dma_addr_t btm_field_paddr;
};
-/**
+/*
* STI GDP structure
*
* @sti_plane: sti_plane structure
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index 94e404f13234..2bb32009d117 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -12,6 +12,7 @@
#include <linux/seq_file.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_device.h>
#include <drm/drm_file.h>
@@ -230,7 +231,7 @@ static const struct sti_hda_video_config hda_supported_modes[] = {
AWGi_720x480p_60, NN_720x480p_60, VID_ED}
};
-/**
+/*
* STI hd analog structure
*
* @dev: driver device
@@ -758,14 +759,14 @@ static int sti_hda_probe(struct platform_device *pdev)
DRM_ERROR("Invalid hda resource\n");
return -ENOMEM;
}
- hda->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
+ hda->regs = devm_ioremap(dev, res->start, resource_size(res));
if (!hda->regs)
return -ENOMEM;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"video-dacs-ctrl");
if (res) {
- hda->video_dacs_ctrl = devm_ioremap_nocache(dev, res->start,
+ hda->video_dacs_ctrl = devm_ioremap(dev, res->start,
resource_size(res));
if (!hda->video_dacs_ctrl)
return -ENOMEM;
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index f03d617edc4c..64ed102033c8 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -9,11 +9,12 @@
#include <linux/debugfs.h>
#include <linux/hdmi.h>
#include <linux/module.h>
-#include <linux/of_gpio.h>
+#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
@@ -333,7 +334,6 @@ static void hdmi_infoframe_reset(struct sti_hdmi *hdmi,
* Helper to concatenate infoframe in 32 bits word
*
* @ptr: pointer on the hdmi internal structure
- * @data: infoframe to write
* @size: size to write
*/
static inline unsigned int hdmi_infoframe_subpack(const u8 *ptr, size_t size)
@@ -543,13 +543,14 @@ static int hdmi_vendor_infoframe_config(struct sti_hdmi *hdmi)
return 0;
}
+#define HDMI_TIMEOUT_SWRESET 100 /*milliseconds */
+
/**
* Software reset of the hdmi subsystem
*
* @hdmi: pointer on the hdmi internal structure
*
*/
-#define HDMI_TIMEOUT_SWRESET 100 /*milliseconds */
static void hdmi_swreset(struct sti_hdmi *hdmi)
{
u32 val;
@@ -849,10 +850,13 @@ static int hdmi_audio_configure(struct sti_hdmi *hdmi)
switch (info->channels) {
case 8:
audio_cfg |= HDMI_AUD_CFG_CH78_VALID;
+ /* fall through */
case 6:
audio_cfg |= HDMI_AUD_CFG_CH56_VALID;
+ /* fall through */
case 4:
audio_cfg |= HDMI_AUD_CFG_CH34_VALID | HDMI_AUD_CFG_8CH;
+ /* fall through */
case 2:
audio_cfg |= HDMI_AUD_CFG_CH12_VALID;
break;
@@ -1253,6 +1257,7 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
struct drm_device *drm_dev = data;
struct drm_encoder *encoder;
struct sti_hdmi_connector *connector;
+ struct cec_connector_info conn_info;
struct drm_connector *drm_connector;
struct drm_bridge *bridge;
int err;
@@ -1284,8 +1289,10 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
drm_connector->polled = DRM_CONNECTOR_POLL_HPD;
- drm_connector_init(drm_dev, drm_connector,
- &sti_hdmi_connector_funcs, DRM_MODE_CONNECTOR_HDMIA);
+ drm_connector_init_with_ddc(drm_dev, drm_connector,
+ &sti_hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA,
+ hdmi->ddc_adapt);
drm_connector_helper_add(drm_connector,
&sti_hdmi_connector_helper_funcs);
@@ -1313,6 +1320,14 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
goto err_sysfs;
}
+ cec_fill_conn_info_from_drm(&conn_info, drm_connector);
+ hdmi->notifier = cec_notifier_conn_register(&hdmi->dev, NULL,
+ &conn_info);
+ if (!hdmi->notifier) {
+ hdmi->drm_connector = NULL;
+ return -ENOMEM;
+ }
+
/* Enable default interrupts */
hdmi_write(hdmi, HDMI_DEFAULT_INT, HDMI_INT_EN);
@@ -1326,6 +1341,9 @@ err_sysfs:
static void sti_hdmi_unbind(struct device *dev,
struct device *master, void *data)
{
+ struct sti_hdmi *hdmi = dev_get_drvdata(dev);
+
+ cec_notifier_conn_unregister(hdmi->notifier);
}
static const struct component_ops sti_hdmi_ops = {
@@ -1375,7 +1393,7 @@ static int sti_hdmi_probe(struct platform_device *pdev)
ret = -ENOMEM;
goto release_adapter;
}
- hdmi->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
+ hdmi->regs = devm_ioremap(dev, res->start, resource_size(res));
if (!hdmi->regs) {
ret = -ENOMEM;
goto release_adapter;
@@ -1431,10 +1449,6 @@ static int sti_hdmi_probe(struct platform_device *pdev)
goto release_adapter;
}
- hdmi->notifier = cec_notifier_get(&pdev->dev);
- if (!hdmi->notifier)
- goto release_adapter;
-
hdmi->reset = devm_reset_control_get(dev, "hdmi");
/* Take hdmi out of reset */
if (!IS_ERR(hdmi->reset))
@@ -1454,14 +1468,11 @@ static int sti_hdmi_remove(struct platform_device *pdev)
{
struct sti_hdmi *hdmi = dev_get_drvdata(&pdev->dev);
- cec_notifier_set_phys_addr(hdmi->notifier, CEC_PHYS_ADDR_INVALID);
-
i2c_put_adapter(hdmi->ddc_adapt);
if (hdmi->audio_pdev)
platform_device_unregister(hdmi->audio_pdev);
component_del(&pdev->dev, &sti_hdmi_ops);
- cec_notifier_put(hdmi->notifier);
return 0;
}
diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c
index e1b3c8cb7287..c36a8da373cb 100644
--- a/drivers/gpu/drm/sti/sti_tvout.c
+++ b/drivers/gpu/drm/sti/sti_tvout.c
@@ -157,9 +157,9 @@ static void tvout_write(struct sti_tvout *tvout, u32 val, int offset)
*
* @tvout: tvout structure
* @reg: register to set
- * @cr_r:
- * @y_g:
- * @cb_b:
+ * @cr_r: red chroma or red order
+ * @y_g: y or green order
+ * @cb_b: blue chroma or blue order
*/
static void tvout_vip_set_color_order(struct sti_tvout *tvout, int reg,
u32 cr_r, u32 y_g, u32 cb_b)
@@ -214,7 +214,7 @@ static void tvout_vip_set_rnd(struct sti_tvout *tvout, int reg, u32 rnd)
* @tvout: tvout structure
* @reg: register to set
* @main_path: main or auxiliary path
- * @sel_input: selected_input (main/aux + conv)
+ * @video_out: selected_input (main/aux + conv)
*/
static void tvout_vip_set_sel_input(struct sti_tvout *tvout,
int reg,
@@ -251,7 +251,7 @@ static void tvout_vip_set_sel_input(struct sti_tvout *tvout,
*
* @tvout: tvout structure
* @reg: register to set
- * @in_vid_signed: used video input format
+ * @in_vid_fmt: used video input format
*/
static void tvout_vip_set_in_vid_fmt(struct sti_tvout *tvout,
int reg, u32 in_vid_fmt)
@@ -669,10 +669,9 @@ sti_tvout_create_dvo_encoder(struct drm_device *dev,
encoder->tvout = tvout;
- drm_encoder = (struct drm_encoder *)encoder;
+ drm_encoder = &encoder->encoder;
drm_encoder->possible_crtcs = ENCODER_CRTC_MASK;
- drm_encoder->possible_clones = 1 << 0;
drm_encoder_init(dev, drm_encoder,
&sti_tvout_encoder_funcs, DRM_MODE_ENCODER_LVDS,
@@ -722,10 +721,9 @@ static struct drm_encoder *sti_tvout_create_hda_encoder(struct drm_device *dev,
encoder->tvout = tvout;
- drm_encoder = (struct drm_encoder *) encoder;
+ drm_encoder = &encoder->encoder;
drm_encoder->possible_crtcs = ENCODER_CRTC_MASK;
- drm_encoder->possible_clones = 1 << 0;
drm_encoder_init(dev, drm_encoder,
&sti_tvout_encoder_funcs, DRM_MODE_ENCODER_DAC, NULL);
@@ -771,10 +769,9 @@ static struct drm_encoder *sti_tvout_create_hdmi_encoder(struct drm_device *dev,
encoder->tvout = tvout;
- drm_encoder = (struct drm_encoder *) encoder;
+ drm_encoder = &encoder->encoder;
drm_encoder->possible_crtcs = ENCODER_CRTC_MASK;
- drm_encoder->possible_clones = 1 << 1;
drm_encoder_init(dev, drm_encoder,
&sti_tvout_encoder_funcs, DRM_MODE_ENCODER_TMDS, NULL);
@@ -790,6 +787,13 @@ static void sti_tvout_create_encoders(struct drm_device *dev,
tvout->hdmi = sti_tvout_create_hdmi_encoder(dev, tvout);
tvout->hda = sti_tvout_create_hda_encoder(dev, tvout);
tvout->dvo = sti_tvout_create_dvo_encoder(dev, tvout);
+
+ tvout->hdmi->possible_clones = drm_encoder_mask(tvout->hdmi) |
+ drm_encoder_mask(tvout->hda) | drm_encoder_mask(tvout->dvo);
+ tvout->hda->possible_clones = drm_encoder_mask(tvout->hdmi) |
+ drm_encoder_mask(tvout->hda) | drm_encoder_mask(tvout->dvo);
+ tvout->dvo->possible_clones = drm_encoder_mask(tvout->hdmi) |
+ drm_encoder_mask(tvout->hda) | drm_encoder_mask(tvout->dvo);
}
static void sti_tvout_destroy_encoders(struct sti_tvout *tvout)
@@ -856,7 +860,7 @@ static int sti_tvout_probe(struct platform_device *pdev)
DRM_ERROR("Invalid glue resource\n");
return -ENOMEM;
}
- tvout->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
+ tvout->regs = devm_ioremap(dev, res->start, resource_size(res));
if (!tvout->regs)
return -ENOMEM;
diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c
index ef4009f11396..5e5f82b6a5d9 100644
--- a/drivers/gpu/drm/sti/sti_vtg.c
+++ b/drivers/gpu/drm/sti/sti_vtg.c
@@ -121,7 +121,7 @@ struct sti_vtg_sync_params {
u32 vsync_off_bot;
};
-/**
+/*
* STI VTG structure
*
* @regs: register mapping
@@ -393,7 +393,7 @@ static int vtg_probe(struct platform_device *pdev)
DRM_ERROR("Get memory resource failed\n");
return -ENOMEM;
}
- vtg->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
+ vtg->regs = devm_ioremap(dev, res->start, resource_size(res));
if (!vtg->regs) {
DRM_ERROR("failed to remap I/O memory\n");
return -ENOMEM;
diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c
index 9dee4e430de5..5a9f9aca8bc2 100644
--- a/drivers/gpu/drm/stm/drv.c
+++ b/drivers/gpu/drm/stm/drv.c
@@ -54,8 +54,7 @@ static int stm_gem_cma_dumb_create(struct drm_file *file,
DEFINE_DRM_GEM_CMA_FOPS(drv_driver_fops);
static struct drm_driver drv_driver = {
- .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
- DRIVER_ATOMIC,
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.name = "stm",
.desc = "STMicroelectronics SoC DRM",
.date = "20170330",
@@ -68,8 +67,6 @@ static struct drm_driver drv_driver = {
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
- .gem_prime_export = drm_gem_prime_export,
- .gem_prime_import = drm_gem_prime_import,
.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
.gem_prime_vmap = drm_gem_cma_prime_vmap,
diff --git a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
index 0ab32fee6c1b..4b165635b2d4 100644
--- a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
+++ b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
@@ -8,13 +8,17 @@
#include <linux/clk.h>
#include <linux/iopoll.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
+#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
-#include <drm/drmP.h>
-#include <drm/drm_mipi_dsi.h>
-#include <drm/bridge/dw_mipi_dsi.h>
+
#include <video/mipi_display.h>
+#include <drm/bridge/dw_mipi_dsi.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_print.h>
+
#define HWVER_130 0x31333000 /* IP version 1.30 */
#define HWVER_131 0x31333100 /* IP version 1.31 */
@@ -256,8 +260,11 @@ dw_mipi_dsi_get_lane_mbps(void *priv_data, const struct drm_display_mode *mode,
/* Compute requested pll out */
bpp = mipi_dsi_pixel_format_to_bpp(format);
pll_out_khz = mode->clock * bpp / lanes;
+
/* Add 20% to pll out to be higher than pixel bw (burst mode only) */
- pll_out_khz = (pll_out_khz * 12) / 10;
+ if (mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
+ pll_out_khz = (pll_out_khz * 12) / 10;
+
if (pll_out_khz > dsi->lane_max_kbps) {
pll_out_khz = dsi->lane_max_kbps;
DRM_WARN("Warning max phy mbps is used\n");
@@ -302,11 +309,24 @@ dw_mipi_dsi_get_lane_mbps(void *priv_data, const struct drm_display_mode *mode,
return 0;
}
+static int
+dw_mipi_dsi_phy_get_timing(void *priv_data, unsigned int lane_mbps,
+ struct dw_mipi_dsi_dphy_timing *timing)
+{
+ timing->clk_hs2lp = 0x40;
+ timing->clk_lp2hs = 0x40;
+ timing->data_hs2lp = 0x40;
+ timing->data_lp2hs = 0x40;
+
+ return 0;
+}
+
static const struct dw_mipi_dsi_phy_ops dw_mipi_dsi_stm_phy_ops = {
.init = dw_mipi_dsi_phy_init,
.power_on = dw_mipi_dsi_phy_power_on,
.power_off = dw_mipi_dsi_phy_power_off,
.get_lane_mbps = dw_mipi_dsi_get_lane_mbps,
+ .get_timing = dw_mipi_dsi_phy_get_timing,
};
static struct dw_mipi_dsi_plat_data dw_mipi_dsi_stm_plat_data = {
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index 2fe6c4a8d915..c2815e8ae1da 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_graph.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
@@ -26,6 +27,7 @@
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
@@ -435,9 +437,6 @@ static void ltdc_crtc_atomic_enable(struct drm_crtc *crtc,
/* Commit shadow registers = update planes at next vblank */
reg_set(ldev->regs, LTDC_SRCR, SRCR_VBR);
- /* Enable LTDC */
- reg_set(ldev->regs, LTDC_GCR, GCR_LTDCEN);
-
drm_crtc_vblank_on(crtc);
}
@@ -451,9 +450,6 @@ static void ltdc_crtc_atomic_disable(struct drm_crtc *crtc,
drm_crtc_vblank_off(crtc);
- /* disable LTDC */
- reg_clear(ldev->regs, LTDC_GCR, GCR_LTDCEN);
-
/* disable IRQ */
reg_clear(ldev->regs, LTDC_IER, IER_RRIE | IER_FUIE | IER_TERRIE);
@@ -922,6 +918,7 @@ static const struct drm_plane_funcs ltdc_plane_funcs = {
};
static const struct drm_plane_helper_funcs ltdc_plane_helper_funcs = {
+ .prepare_fb = drm_gem_fb_prepare_fb,
.atomic_check = ltdc_plane_atomic_check,
.atomic_update = ltdc_plane_atomic_update,
.atomic_disable = ltdc_plane_atomic_disable,
@@ -1038,6 +1035,54 @@ static const struct drm_encoder_funcs ltdc_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
+static void ltdc_encoder_disable(struct drm_encoder *encoder)
+{
+ struct drm_device *ddev = encoder->dev;
+ struct ltdc_device *ldev = ddev->dev_private;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ /* Disable LTDC */
+ reg_clear(ldev->regs, LTDC_GCR, GCR_LTDCEN);
+
+ /* Set to sleep state the pinctrl whatever type of encoder */
+ pinctrl_pm_select_sleep_state(ddev->dev);
+}
+
+static void ltdc_encoder_enable(struct drm_encoder *encoder)
+{
+ struct drm_device *ddev = encoder->dev;
+ struct ltdc_device *ldev = ddev->dev_private;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ /* Enable LTDC */
+ reg_set(ldev->regs, LTDC_GCR, GCR_LTDCEN);
+}
+
+static void ltdc_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *ddev = encoder->dev;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ /*
+ * Set to default state the pinctrl only with DPI type.
+ * Others types like DSI, don't need pinctrl due to
+ * internal bridge (the signals do not come out of the chipset).
+ */
+ if (encoder->encoder_type == DRM_MODE_ENCODER_DPI)
+ pinctrl_pm_select_default_state(ddev->dev);
+}
+
+static const struct drm_encoder_helper_funcs ltdc_encoder_helper_funcs = {
+ .disable = ltdc_encoder_disable,
+ .enable = ltdc_encoder_enable,
+ .mode_set = ltdc_encoder_mode_set,
+};
+
static int ltdc_encoder_init(struct drm_device *ddev, struct drm_bridge *bridge)
{
struct drm_encoder *encoder;
@@ -1053,6 +1098,8 @@ static int ltdc_encoder_init(struct drm_device *ddev, struct drm_bridge *bridge)
drm_encoder_init(ddev, encoder, &ltdc_encoder_funcs,
DRM_MODE_ENCODER_DPI, NULL);
+ drm_encoder_helper_add(encoder, &ltdc_encoder_helper_funcs);
+
ret = drm_bridge_attach(encoder, bridge, NULL);
if (ret) {
drm_encoder_cleanup(encoder);
@@ -1234,8 +1281,8 @@ int ltdc_load(struct drm_device *ddev)
/* Add endpoints panels or bridges if any */
for (i = 0; i < MAX_ENDPOINTS; i++) {
if (panel[i]) {
- bridge[i] = drm_panel_bridge_add(panel[i],
- DRM_MODE_CONNECTOR_DPI);
+ bridge[i] = drm_panel_bridge_add_typed(panel[i],
+ DRM_MODE_CONNECTOR_DPI);
if (IS_ERR(bridge[i])) {
DRM_ERROR("panel-bridge endpoint %d\n", i);
ret = PTR_ERR(bridge[i]);
@@ -1278,6 +1325,8 @@ int ltdc_load(struct drm_device *ddev)
clk_disable_unprepare(ldev->pixel_clk);
+ pinctrl_pm_select_sleep_state(ddev->dev);
+
pm_runtime_enable(ddev->dev);
return 0;
diff --git a/drivers/gpu/drm/sun4i/Kconfig b/drivers/gpu/drm/sun4i/Kconfig
index 37e90e42943f..5755f0432e77 100644
--- a/drivers/gpu/drm/sun4i/Kconfig
+++ b/drivers/gpu/drm/sun4i/Kconfig
@@ -17,18 +17,18 @@ config DRM_SUN4I
if DRM_SUN4I
config DRM_SUN4I_HDMI
- tristate "Allwinner A10 HDMI Controller Support"
- default DRM_SUN4I
- help
+ tristate "Allwinner A10 HDMI Controller Support"
+ default DRM_SUN4I
+ help
Choose this option if you have an Allwinner SoC with an HDMI
controller.
config DRM_SUN4I_HDMI_CEC
- bool "Allwinner A10 HDMI CEC Support"
- depends on DRM_SUN4I_HDMI
- select CEC_CORE
- select CEC_PIN
- help
+ bool "Allwinner A10 HDMI CEC Support"
+ depends on DRM_SUN4I_HDMI
+ select CEC_CORE
+ select CEC_PIN
+ help
Choose this option if you have an Allwinner SoC with an HDMI
controller and want to use CEC.
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index 78d8c3afe825..072ea113e6be 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -6,21 +6,23 @@
* Maxime Ripard <maxime.ripard@free-electrons.com>
*/
-#include <drm/drmP.h>
+#include <linux/component.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
-#include <linux/component.h>
-#include <linux/list.h>
-#include <linux/of_device.h>
-#include <linux/of_graph.h>
-#include <linux/reset.h>
-
#include "sun4i_backend.h"
#include "sun4i_drv.h"
#include "sun4i_frontend.h"
@@ -854,6 +856,13 @@ static int sun4i_backend_bind(struct device *dev, struct device *master,
ret = PTR_ERR(backend->mod_clk);
goto err_disable_bus_clk;
}
+
+ ret = clk_set_rate_exclusive(backend->mod_clk, 300000000);
+ if (ret) {
+ dev_err(dev, "Couldn't set the module clock frequency\n");
+ goto err_disable_bus_clk;
+ }
+
clk_prepare_enable(backend->mod_clk);
backend->ram_clk = devm_clk_get(dev, "ram");
@@ -930,6 +939,7 @@ static int sun4i_backend_bind(struct device *dev, struct device *master,
err_disable_ram_clk:
clk_disable_unprepare(backend->ram_clk);
err_disable_mod_clk:
+ clk_rate_exclusive_put(backend->mod_clk);
clk_disable_unprepare(backend->mod_clk);
err_disable_bus_clk:
clk_disable_unprepare(backend->bus_clk);
@@ -950,6 +960,7 @@ static void sun4i_backend_unbind(struct device *dev, struct device *master,
sun4i_backend_free_sat(dev);
clk_disable_unprepare(backend->ram_clk);
+ clk_rate_exclusive_put(backend->mod_clk);
clk_disable_unprepare(backend->mod_clk);
clk_disable_unprepare(backend->bus_clk);
reset_control_assert(backend->reset);
diff --git a/drivers/gpu/drm/sun4i/sun4i_crtc.c b/drivers/gpu/drm/sun4i/sun4i_crtc.c
index 9d8504f813a4..3a153648b369 100644
--- a/drivers/gpu/drm/sun4i/sun4i_crtc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_crtc.c
@@ -6,12 +6,6 @@
* Maxime Ripard <maxime.ripard@free-electrons.com>
*/
-#include <drm/drmP.h>
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_modes.h>
-#include <drm/drm_probe_helper.h>
-
#include <linux/clk-provider.h>
#include <linux/ioport.h>
#include <linux/of_address.h>
@@ -21,6 +15,13 @@
#include <video/videomode.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+
#include "sun4i_backend.h"
#include "sun4i_crtc.h"
#include "sun4i_drv.h"
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index 1a1b52e6f73e..328272ff77d8 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -8,16 +8,19 @@
#include <linux/component.h>
#include <linux/kfifo.h>
+#include <linux/module.h>
#include <linux/of_graph.h>
#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "sun4i_drv.h"
#include "sun4i_frontend.h"
@@ -38,7 +41,7 @@ static int drm_sun4i_gem_dumb_create(struct drm_file *file_priv,
DEFINE_DRM_GEM_CMA_FOPS(sun4i_drv_fops);
static struct drm_driver sun4i_drv_driver = {
- .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC,
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
/* Generic Operations */
.fops = &sun4i_drv_fops,
@@ -82,7 +85,6 @@ static int sun4i_drv_bind(struct device *dev)
}
drm_mode_config_init(drm);
- drm->mode_config.allow_fb_modifiers = true;
ret = component_bind_all(drm->dev, drm);
if (ret) {
@@ -343,6 +345,27 @@ static int sun4i_drv_add_endpoints(struct device *dev,
return count;
}
+#ifdef CONFIG_PM_SLEEP
+static int sun4i_drv_drm_sys_suspend(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+
+ return drm_mode_config_helper_suspend(drm);
+}
+
+static int sun4i_drv_drm_sys_resume(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+
+ return drm_mode_config_helper_resume(drm);
+}
+#endif
+
+static const struct dev_pm_ops sun4i_drv_drm_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(sun4i_drv_drm_sys_suspend,
+ sun4i_drv_drm_sys_resume)
+};
+
static int sun4i_drv_probe(struct platform_device *pdev)
{
struct component_match *match = NULL;
@@ -415,6 +438,7 @@ static struct platform_driver sun4i_drv_platform_driver = {
.driver = {
.name = "sun4i-drm",
.of_match_table = sun4i_drv_of_table,
+ .pm = &sun4i_drv_drm_pm_ops,
},
};
module_platform_driver(sun4i_drv_platform_driver);
diff --git a/drivers/gpu/drm/sun4i/sun4i_framebuffer.c b/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
index 35c040716680..1568f68f9a9e 100644
--- a/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
+++ b/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
@@ -9,7 +9,6 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drmP.h>
#include "sun4i_drv.h"
#include "sun4i_framebuffer.h"
diff --git a/drivers/gpu/drm/sun4i/sun4i_frontend.c b/drivers/gpu/drm/sun4i/sun4i_frontend.c
index 346c8071bd38..ec2a032e07b9 100644
--- a/drivers/gpu/drm/sun4i/sun4i_frontend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_frontend.c
@@ -3,9 +3,6 @@
* Copyright (C) 2017 Free Electrons
* Maxime Ripard <maxime.ripard@free-electrons.com>
*/
-#include <drm/drmP.h>
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_fb_cma_helper.h>
#include <linux/clk.h>
#include <linux/component.h>
@@ -16,6 +13,13 @@
#include <linux/regmap.h>
#include <linux/reset.h>
+#include <drm/drm_device.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_plane.h>
+
#include "sun4i_drv.h"
#include "sun4i_frontend.h"
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
index 9c3f99339b82..68d4644ac2dc 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
@@ -5,23 +5,24 @@
* Maxime Ripard <maxime.ripard@free-electrons.com>
*/
-#include <drm/drmP.h>
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_probe_helper.h>
-#include <drm/drm_edid.h>
-#include <drm/drm_encoder.h>
-#include <drm/drm_of.h>
-#include <drm/drm_panel.h>
-
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/iopoll.h>
+#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/reset.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+
#include "sun4i_backend.h"
#include "sun4i_crtc.h"
#include "sun4i_drv.h"
@@ -489,6 +490,7 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
{
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = data;
+ struct cec_connector_info conn_info;
struct sun4i_drv *drv = drm->dev_private;
struct sun4i_hdmi *hdmi;
struct resource *res;
@@ -628,8 +630,7 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
#ifdef CONFIG_DRM_SUN4I_HDMI_CEC
hdmi->cec_adap = cec_pin_allocate_adapter(&sun4i_hdmi_cec_pin_ops,
- hdmi, "sun4i", CEC_CAP_TRANSMIT | CEC_CAP_LOG_ADDRS |
- CEC_CAP_PASSTHROUGH | CEC_CAP_RC);
+ hdmi, "sun4i", CEC_CAP_DEFAULTS | CEC_CAP_CONNECTOR_INFO);
ret = PTR_ERR_OR_ZERO(hdmi->cec_adap);
if (ret < 0)
goto err_cleanup_connector;
@@ -639,14 +640,17 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
drm_connector_helper_add(&hdmi->connector,
&sun4i_hdmi_connector_helper_funcs);
- ret = drm_connector_init(drm, &hdmi->connector,
- &sun4i_hdmi_connector_funcs,
- DRM_MODE_CONNECTOR_HDMIA);
+ ret = drm_connector_init_with_ddc(drm, &hdmi->connector,
+ &sun4i_hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA,
+ hdmi->ddc_i2c);
if (ret) {
dev_err(dev,
"Couldn't initialise the HDMI connector\n");
goto err_cleanup_connector;
}
+ cec_fill_conn_info_from_drm(&conn_info, &hdmi->connector);
+ cec_s_conn_info(hdmi->cec_adap, &conn_info);
/* There is no HPD interrupt, so we need to poll the controller */
hdmi->connector.polled = DRM_CONNECTOR_POLL_CONNECT |
@@ -681,8 +685,6 @@ static void sun4i_hdmi_unbind(struct device *dev, struct device *master,
struct sun4i_hdmi *hdmi = dev_get_drvdata(dev);
cec_unregister_adapter(hdmi->cec_adap);
- drm_connector_cleanup(&hdmi->connector);
- drm_encoder_cleanup(&hdmi->encoder);
i2c_del_adapter(hdmi->i2c);
i2c_put_adapter(hdmi->ddc_i2c);
clk_disable_unprepare(hdmi->mod_clk);
diff --git a/drivers/gpu/drm/sun4i/sun4i_layer.c b/drivers/gpu/drm/sun4i/sun4i_layer.c
index e72dd4de90ce..acfbfd4463a1 100644
--- a/drivers/gpu/drm/sun4i/sun4i_layer.c
+++ b/drivers/gpu/drm/sun4i/sun4i_layer.c
@@ -7,9 +7,8 @@
*/
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drmP.h>
+#include <drm/drm_plane_helper.h>
#include "sun4i_backend.h"
#include "sun4i_frontend.h"
@@ -251,11 +250,11 @@ struct drm_plane **sun4i_layers_init(struct drm_device *drm,
dev_err(drm->dev, "Couldn't initialize %s plane\n",
i ? "overlay" : "primary");
return ERR_CAST(layer);
- };
+ }
layer->id = i;
planes[i] = &layer->plane;
- };
+ }
return planes;
}
diff --git a/drivers/gpu/drm/sun4i/sun4i_lvds.c b/drivers/gpu/drm/sun4i/sun4i_lvds.c
index 3a3ba99fed22..65b7a8739666 100644
--- a/drivers/gpu/drm/sun4i/sun4i_lvds.c
+++ b/drivers/gpu/drm/sun4i/sun4i_lvds.c
@@ -6,10 +6,11 @@
#include <linux/clk.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "sun4i_crtc.h"
@@ -42,7 +43,7 @@ static int sun4i_lvds_get_modes(struct drm_connector *connector)
struct sun4i_lvds *lvds =
drm_connector_to_sun4i_lvds(connector);
- return drm_panel_get_modes(lvds->panel);
+ return drm_panel_get_modes(lvds->panel, connector);
}
static struct drm_connector_helper_funcs sun4i_lvds_con_helper_funcs = {
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c
index a901ec689b62..b27f16af50f5 100644
--- a/drivers/gpu/drm/sun4i/sun4i_rgb.c
+++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c
@@ -8,10 +8,11 @@
#include <linux/clk.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "sun4i_crtc.h"
@@ -46,7 +47,7 @@ static int sun4i_rgb_get_modes(struct drm_connector *connector)
struct sun4i_rgb *rgb =
drm_connector_to_sun4i_rgb(connector);
- return drm_panel_get_modes(rgb->panel);
+ return drm_panel_get_modes(rgb->panel, connector);
}
/*
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index 64c43ee6bd92..c81cdce6ed55 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -6,26 +6,29 @@
* Maxime Ripard <maxime.ripard@free-electrons.com>
*/
-#include <drm/drmP.h>
+#include <linux/component.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_connector.h>
#include <drm/drm_crtc.h>
#include <drm/drm_encoder.h>
#include <drm/drm_modes.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include <uapi/drm/drm_mode.h>
-#include <linux/component.h>
-#include <linux/ioport.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
-#include <linux/regmap.h>
-#include <linux/reset.h>
-
#include "sun4i_crtc.h"
#include "sun4i_dotclock.h"
#include "sun4i_drv.h"
@@ -314,6 +317,7 @@ static void sun4i_tcon0_mode_set_dithering(struct sun4i_tcon *tcon,
/* R and B components are only 5 bits deep */
val |= SUN4I_TCON0_FRM_CTL_MODE_R;
val |= SUN4I_TCON0_FRM_CTL_MODE_B;
+ /* Fall through */
case MEDIA_BUS_FMT_RGB666_1X18:
case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
/* Fall through: enable dithering */
@@ -478,14 +482,14 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
const struct drm_display_mode *mode)
{
struct drm_connector *connector = sun4i_tcon_get_connector(encoder);
- struct drm_display_info display_info = connector->display_info;
+ const struct drm_display_info *info = &connector->display_info;
unsigned int bp, hsync, vsync;
u8 clk_delay;
u32 val = 0;
WARN_ON(!tcon->quirks->has_channel_0);
- tcon->dclk_min_div = 6;
+ tcon->dclk_min_div = tcon->quirks->dclk_min_div;
tcon->dclk_max_div = 127;
sun4i_tcon0_mode_set_common(tcon, mode);
@@ -539,7 +543,7 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
val |= SUN4I_TCON0_IO_POL_VSYNC_POSITIVE;
- if (display_info.bus_flags & DRM_BUS_FLAG_DE_LOW)
+ if (info->bus_flags & DRM_BUS_FLAG_DE_LOW)
val |= SUN4I_TCON0_IO_POL_DE_NEGATIVE;
/*
@@ -557,10 +561,10 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
* Following code is a way to avoid quirks all around TCON
* and DOTCLOCK drivers.
*/
- if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE)
+ if (info->bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE)
clk_set_phase(tcon->dclk, 240);
- if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
+ if (info->bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
clk_set_phase(tcon->dclk, 0);
regmap_update_bits(tcon->regs, SUN4I_TCON0_IO_POL_REG,
@@ -1422,12 +1426,14 @@ static int sun8i_r40_tcon_tv_set_mux(struct sun4i_tcon *tcon,
static const struct sun4i_tcon_quirks sun4i_a10_quirks = {
.has_channel_0 = true,
.has_channel_1 = true,
+ .dclk_min_div = 4,
.set_mux = sun4i_a10_tcon_set_mux,
};
static const struct sun4i_tcon_quirks sun5i_a13_quirks = {
.has_channel_0 = true,
.has_channel_1 = true,
+ .dclk_min_div = 4,
.set_mux = sun5i_a13_tcon_set_mux,
};
@@ -1436,6 +1442,7 @@ static const struct sun4i_tcon_quirks sun6i_a31_quirks = {
.has_channel_1 = true,
.has_lvds_alt = true,
.needs_de_be_mux = true,
+ .dclk_min_div = 1,
.set_mux = sun6i_tcon_set_mux,
};
@@ -1443,11 +1450,13 @@ static const struct sun4i_tcon_quirks sun6i_a31s_quirks = {
.has_channel_0 = true,
.has_channel_1 = true,
.needs_de_be_mux = true,
+ .dclk_min_div = 1,
};
static const struct sun4i_tcon_quirks sun7i_a20_quirks = {
.has_channel_0 = true,
.has_channel_1 = true,
+ .dclk_min_div = 4,
/* Same display pipeline structure as A10 */
.set_mux = sun4i_a10_tcon_set_mux,
};
@@ -1455,11 +1464,13 @@ static const struct sun4i_tcon_quirks sun7i_a20_quirks = {
static const struct sun4i_tcon_quirks sun8i_a33_quirks = {
.has_channel_0 = true,
.has_lvds_alt = true,
+ .dclk_min_div = 1,
};
static const struct sun4i_tcon_quirks sun8i_a83t_lcd_quirks = {
.supports_lvds = true,
.has_channel_0 = true,
+ .dclk_min_div = 1,
};
static const struct sun4i_tcon_quirks sun8i_a83t_tv_quirks = {
@@ -1473,11 +1484,13 @@ static const struct sun4i_tcon_quirks sun8i_r40_tv_quirks = {
static const struct sun4i_tcon_quirks sun8i_v3s_quirks = {
.has_channel_0 = true,
+ .dclk_min_div = 1,
};
static const struct sun4i_tcon_quirks sun9i_a80_tcon_lcd_quirks = {
- .has_channel_0 = true,
- .needs_edp_reset = true,
+ .has_channel_0 = true,
+ .needs_edp_reset = true,
+ .dclk_min_div = 1,
};
static const struct sun4i_tcon_quirks sun9i_a80_tcon_tv_quirks = {
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h
index f9f1fe80b206..a62ec826ae71 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.h
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h
@@ -224,6 +224,7 @@ struct sun4i_tcon_quirks {
bool needs_de_be_mux; /* sun6i needs mux to select backend */
bool needs_edp_reset; /* a80 edp reset needed for tcon0 access */
bool supports_lvds; /* Does the TCON support an LVDS output? */
+ u8 dclk_min_div; /* minimum divider for TCON0 DCLK */
/* callback to handle tcon muxing options */
int (*set_mux)(struct sun4i_tcon *, const struct drm_encoder *);
diff --git a/drivers/gpu/drm/sun4i/sun4i_tv.c b/drivers/gpu/drm/sun4i/sun4i_tv.c
index f998153c141f..39c15282e448 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tv.c
@@ -8,14 +8,16 @@
#include <linux/clk.h>
#include <linux/component.h>
+#include <linux/module.h>
#include <linux/of_address.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/reset.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "sun4i_crtc.h"
diff --git a/drivers/gpu/drm/sun4i/sun6i_drc.c b/drivers/gpu/drm/sun4i/sun6i_drc.c
index f7ab72244796..4fbe9a6b5182 100644
--- a/drivers/gpu/drm/sun4i/sun6i_drc.c
+++ b/drivers/gpu/drm/sun4i/sun6i_drc.c
@@ -56,6 +56,13 @@ static int sun6i_drc_bind(struct device *dev, struct device *master,
ret = PTR_ERR(drc->mod_clk);
goto err_disable_bus_clk;
}
+
+ ret = clk_set_rate_exclusive(drc->mod_clk, 300000000);
+ if (ret) {
+ dev_err(dev, "Couldn't set the module clock frequency\n");
+ goto err_disable_bus_clk;
+ }
+
clk_prepare_enable(drc->mod_clk);
return 0;
@@ -72,6 +79,7 @@ static void sun6i_drc_unbind(struct device *dev, struct device *master,
{
struct sun6i_drc *drc = dev_get_drvdata(dev);
+ clk_rate_exclusive_put(drc->mod_clk);
clk_disable_unprepare(drc->mod_clk);
clk_disable_unprepare(drc->bus_clk);
reset_control_assert(drc->reset);
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
index a1fc8b520985..a75fcb113172 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
@@ -9,19 +9,21 @@
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/crc-ccitt.h>
+#include <linux/module.h>
#include <linux/of_address.h>
+#include <linux/phy/phy-mipi-dphy.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <linux/slab.h>
-#include <linux/phy/phy.h>
-#include <linux/phy/phy-mipi-dphy.h>
-
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "sun4i_crtc.h"
@@ -364,8 +366,7 @@ static void sun6i_dsi_inst_init(struct sun6i_dsi *dsi,
static u16 sun6i_dsi_get_video_start_delay(struct sun6i_dsi *dsi,
struct drm_display_mode *mode)
{
- u16 start = clamp(mode->vtotal - mode->vdisplay - 10, 8, 100);
- u16 delay = mode->vtotal - (mode->vsync_end - mode->vdisplay) + start;
+ u16 delay = mode->vtotal - (mode->vsync_start - mode->vdisplay) + 1;
if (delay > mode->vtotal)
delay = delay % mode->vtotal;
@@ -436,9 +437,9 @@ static void sun6i_dsi_setup_burst(struct sun6i_dsi *dsi,
SUN6I_DSI_BURST_LINE_SYNC_POINT(SUN6I_DSI_SYNC_POINT));
val = SUN6I_DSI_TCON_DRQ_ENABLE_MODE;
- } else if ((mode->hsync_end - mode->hdisplay) > 20) {
+ } else if ((mode->hsync_start - mode->hdisplay) > 20) {
/* Maaaaaagic */
- u16 drq = (mode->hsync_end - mode->hdisplay) - 20;
+ u16 drq = (mode->hsync_start - mode->hdisplay) - 20;
drq *= mipi_dsi_pixel_format_to_bpp(device->format);
drq /= 32;
@@ -568,11 +569,12 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
(mode->htotal - mode->hsync_end) * Bpp - HBP_PACKET_OVERHEAD);
/*
- * The frontporch is set using a blanking packet (4
- * bytes + payload + 2 bytes). Its minimal size is
- * therefore 6 bytes
+ * The frontporch is set using a sync event (4 bytes)
+ * and two blanking packets (each one is 4 bytes +
+ * payload + 2 bytes). Its minimal size is therefore
+ * 16 bytes
*/
-#define HFP_PACKET_OVERHEAD 6
+#define HFP_PACKET_OVERHEAD 16
hfp = max((unsigned int)HFP_PACKET_OVERHEAD,
(mode->hsync_start - mode->hdisplay) * Bpp - HFP_PACKET_OVERHEAD);
@@ -793,7 +795,7 @@ static int sun6i_dsi_get_modes(struct drm_connector *connector)
{
struct sun6i_dsi *dsi = connector_to_sun6i_dsi(connector);
- return drm_panel_get_modes(dsi->panel);
+ return drm_panel_get_modes(dsi->panel, connector);
}
static struct drm_connector_helper_funcs sun6i_dsi_connector_helper_funcs = {
@@ -830,8 +832,8 @@ static u32 sun6i_dsi_dcs_build_pkt_hdr(struct sun6i_dsi *dsi,
u32 pkt = msg->type;
if (msg->type == MIPI_DSI_DCS_LONG_WRITE) {
- pkt |= ((msg->tx_len + 1) & 0xffff) << 8;
- pkt |= (((msg->tx_len + 1) >> 8) & 0xffff) << 16;
+ pkt |= ((msg->tx_len) & 0xffff) << 8;
+ pkt |= (((msg->tx_len) >> 8) & 0xffff) << 16;
} else {
pkt |= (((u8 *)msg->tx_buf)[0] << 8);
if (msg->tx_len > 1)
@@ -993,6 +995,7 @@ static ssize_t sun6i_dsi_transfer(struct mipi_dsi_host *host,
ret = sun6i_dsi_dcs_read(dsi, msg);
break;
}
+ /* Else, fall through */
default:
ret = -EINVAL;
@@ -1078,6 +1081,7 @@ static const struct component_ops sun6i_dsi_ops = {
static int sun6i_dsi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ const char *bus_clk_name = NULL;
struct sun6i_dsi *dsi;
struct resource *res;
void __iomem *base;
@@ -1091,6 +1095,10 @@ static int sun6i_dsi_probe(struct platform_device *pdev)
dsi->host.ops = &sun6i_dsi_host_ops;
dsi->host.dev = dev;
+ if (of_device_is_compatible(dev->of_node,
+ "allwinner,sun6i-a31-mipi-dsi"))
+ bus_clk_name = "bus";
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(dev, res);
if (IS_ERR(base)) {
@@ -1098,11 +1106,10 @@ static int sun6i_dsi_probe(struct platform_device *pdev)
return PTR_ERR(base);
}
- dsi->regs = devm_regmap_init_mmio_clk(dev, "bus", base,
- &sun6i_dsi_regmap_config);
- if (IS_ERR(dsi->regs)) {
- dev_err(dev, "Couldn't create the DSI encoder regmap\n");
- return PTR_ERR(dsi->regs);
+ dsi->regulator = devm_regulator_get(dev, "vcc-dsi");
+ if (IS_ERR(dsi->regulator)) {
+ dev_err(dev, "Couldn't get VCC-DSI supply\n");
+ return PTR_ERR(dsi->regulator);
}
dsi->reset = devm_reset_control_get_shared(dev, NULL);
@@ -1111,10 +1118,30 @@ static int sun6i_dsi_probe(struct platform_device *pdev)
return PTR_ERR(dsi->reset);
}
- dsi->mod_clk = devm_clk_get(dev, "mod");
- if (IS_ERR(dsi->mod_clk)) {
- dev_err(dev, "Couldn't get the DSI mod clock\n");
- return PTR_ERR(dsi->mod_clk);
+ dsi->regs = devm_regmap_init_mmio(dev, base, &sun6i_dsi_regmap_config);
+ if (IS_ERR(dsi->regs)) {
+ dev_err(dev, "Couldn't init regmap\n");
+ return PTR_ERR(dsi->regs);
+ }
+
+ dsi->bus_clk = devm_clk_get(dev, bus_clk_name);
+ if (IS_ERR(dsi->bus_clk)) {
+ dev_err(dev, "Couldn't get the DSI bus clock\n");
+ return PTR_ERR(dsi->bus_clk);
+ }
+
+ ret = regmap_mmio_attach_clk(dsi->regs, dsi->bus_clk);
+ if (ret)
+ return ret;
+
+ if (of_device_is_compatible(dev->of_node,
+ "allwinner,sun6i-a31-mipi-dsi")) {
+ dsi->mod_clk = devm_clk_get(dev, "mod");
+ if (IS_ERR(dsi->mod_clk)) {
+ dev_err(dev, "Couldn't get the DSI mod clock\n");
+ ret = PTR_ERR(dsi->mod_clk);
+ goto err_attach_clk;
+ }
}
/*
@@ -1152,6 +1179,9 @@ err_pm_disable:
pm_runtime_disable(dev);
err_unprotect_clk:
clk_rate_exclusive_put(dsi->mod_clk);
+err_attach_clk:
+ if (!IS_ERR(dsi->bus_clk))
+ regmap_mmio_detach_clk(dsi->regs);
return ret;
}
@@ -1165,12 +1195,22 @@ static int sun6i_dsi_remove(struct platform_device *pdev)
pm_runtime_disable(dev);
clk_rate_exclusive_put(dsi->mod_clk);
+ if (!IS_ERR(dsi->bus_clk))
+ regmap_mmio_detach_clk(dsi->regs);
+
return 0;
}
static int __maybe_unused sun6i_dsi_runtime_resume(struct device *dev)
{
struct sun6i_dsi *dsi = dev_get_drvdata(dev);
+ int err;
+
+ err = regulator_enable(dsi->regulator);
+ if (err) {
+ dev_err(dsi->dev, "failed to enable VCC-DSI supply: %d\n", err);
+ return err;
+ }
reset_control_deassert(dsi->reset);
clk_prepare_enable(dsi->mod_clk);
@@ -1203,6 +1243,7 @@ static int __maybe_unused sun6i_dsi_runtime_suspend(struct device *dev)
clk_disable_unprepare(dsi->mod_clk);
reset_control_assert(dsi->reset);
+ regulator_disable(dsi->regulator);
return 0;
}
@@ -1215,6 +1256,7 @@ static const struct dev_pm_ops sun6i_dsi_pm_ops = {
static const struct of_device_id sun6i_dsi_of_table[] = {
{ .compatible = "allwinner,sun6i-a31-mipi-dsi" },
+ { .compatible = "allwinner,sun50i-a64-mipi-dsi" },
{ }
};
MODULE_DEVICE_TABLE(of, sun6i_dsi_of_table);
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h
index 5c3ad5be0690..3f4846f581ef 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h
@@ -23,6 +23,7 @@ struct sun6i_dsi {
struct clk *bus_clk;
struct clk *mod_clk;
struct regmap *regs;
+ struct regulator *regulator;
struct reset_control *reset;
struct phy *dphy;
diff --git a/drivers/gpu/drm/sun4i/sun8i_csc.c b/drivers/gpu/drm/sun4i/sun8i_csc.c
index b8c059f1a118..781955dd4995 100644
--- a/drivers/gpu/drm/sun4i/sun8i_csc.c
+++ b/drivers/gpu/drm/sun4i/sun8i_csc.c
@@ -3,7 +3,7 @@
* Copyright (C) Jernej Skrabec <jernej.skrabec@siol.net>
*/
-#include <drm/drmP.h>
+#include <drm/drm_print.h>
#include "sun8i_csc.h"
#include "sun8i_mixer.h"
@@ -18,16 +18,59 @@ static const u32 ccsc_base[2][2] = {
* First tree values in each line are multiplication factor and last
* value is constant, which is added at the end.
*/
-static const u32 yuv2rgb[] = {
- 0x000004A8, 0x00000000, 0x00000662, 0xFFFC845A,
- 0x000004A8, 0xFFFFFE6F, 0xFFFFFCBF, 0x00021DF4,
- 0x000004A8, 0x00000813, 0x00000000, 0xFFFBAC4A,
+
+static const u32 yuv2rgb[2][2][12] = {
+ [DRM_COLOR_YCBCR_LIMITED_RANGE] = {
+ [DRM_COLOR_YCBCR_BT601] = {
+ 0x000004A8, 0x00000000, 0x00000662, 0xFFFC8451,
+ 0x000004A8, 0xFFFFFE6F, 0xFFFFFCC0, 0x00021E4D,
+ 0x000004A8, 0x00000811, 0x00000000, 0xFFFBACA9,
+ },
+ [DRM_COLOR_YCBCR_BT709] = {
+ 0x000004A8, 0x00000000, 0x0000072B, 0xFFFC1F99,
+ 0x000004A8, 0xFFFFFF26, 0xFFFFFDDF, 0x00013383,
+ 0x000004A8, 0x00000873, 0x00000000, 0xFFFB7BEF,
+ }
+ },
+ [DRM_COLOR_YCBCR_FULL_RANGE] = {
+ [DRM_COLOR_YCBCR_BT601] = {
+ 0x00000400, 0x00000000, 0x0000059B, 0xFFFD322E,
+ 0x00000400, 0xFFFFFEA0, 0xFFFFFD25, 0x00021DD5,
+ 0x00000400, 0x00000716, 0x00000000, 0xFFFC74BD,
+ },
+ [DRM_COLOR_YCBCR_BT709] = {
+ 0x00000400, 0x00000000, 0x0000064C, 0xFFFCD9B4,
+ 0x00000400, 0xFFFFFF41, 0xFFFFFE21, 0x00014F96,
+ 0x00000400, 0x0000076C, 0x00000000, 0xFFFC49EF,
+ }
+ },
};
-static const u32 yvu2rgb[] = {
- 0x000004A8, 0x00000662, 0x00000000, 0xFFFC845A,
- 0x000004A8, 0xFFFFFCBF, 0xFFFFFE6F, 0x00021DF4,
- 0x000004A8, 0x00000000, 0x00000813, 0xFFFBAC4A,
+static const u32 yvu2rgb[2][2][12] = {
+ [DRM_COLOR_YCBCR_LIMITED_RANGE] = {
+ [DRM_COLOR_YCBCR_BT601] = {
+ 0x000004A8, 0x00000662, 0x00000000, 0xFFFC8451,
+ 0x000004A8, 0xFFFFFCC0, 0xFFFFFE6F, 0x00021E4D,
+ 0x000004A8, 0x00000000, 0x00000811, 0xFFFBACA9,
+ },
+ [DRM_COLOR_YCBCR_BT709] = {
+ 0x000004A8, 0x0000072B, 0x00000000, 0xFFFC1F99,
+ 0x000004A8, 0xFFFFFDDF, 0xFFFFFF26, 0x00013383,
+ 0x000004A8, 0x00000000, 0x00000873, 0xFFFB7BEF,
+ }
+ },
+ [DRM_COLOR_YCBCR_FULL_RANGE] = {
+ [DRM_COLOR_YCBCR_BT601] = {
+ 0x00000400, 0x0000059B, 0x00000000, 0xFFFD322E,
+ 0x00000400, 0xFFFFFD25, 0xFFFFFEA0, 0x00021DD5,
+ 0x00000400, 0x00000000, 0x00000716, 0xFFFC74BD,
+ },
+ [DRM_COLOR_YCBCR_BT709] = {
+ 0x00000400, 0x0000064C, 0x00000000, 0xFFFCD9B4,
+ 0x00000400, 0xFFFFFE21, 0xFFFFFF41, 0x00014F96,
+ 0x00000400, 0x00000000, 0x0000076C, 0xFFFC49EF,
+ }
+ },
};
/*
@@ -53,57 +96,98 @@ static const u32 yvu2rgb[] = {
* c20 c21 c22 [d2 const2]
*/
-static const u32 yuv2rgb_de3[] = {
- 0x0002542a, 0x00000000, 0x0003312a, 0xffc00000,
- 0x0002542a, 0xffff376b, 0xfffe5fc3, 0xfe000000,
- 0x0002542a, 0x000408d3, 0x00000000, 0xfe000000,
+static const u32 yuv2rgb_de3[2][2][12] = {
+ [DRM_COLOR_YCBCR_LIMITED_RANGE] = {
+ [DRM_COLOR_YCBCR_BT601] = {
+ 0x0002542A, 0x00000000, 0x0003312A, 0xFFC00000,
+ 0x0002542A, 0xFFFF376B, 0xFFFE5FC3, 0xFE000000,
+ 0x0002542A, 0x000408D2, 0x00000000, 0xFE000000,
+ },
+ [DRM_COLOR_YCBCR_BT709] = {
+ 0x0002542A, 0x00000000, 0x000395E2, 0xFFC00000,
+ 0x0002542A, 0xFFFF92D2, 0xFFFEEF27, 0xFE000000,
+ 0x0002542A, 0x0004398C, 0x00000000, 0xFE000000,
+ }
+ },
+ [DRM_COLOR_YCBCR_FULL_RANGE] = {
+ [DRM_COLOR_YCBCR_BT601] = {
+ 0x00020000, 0x00000000, 0x0002CDD2, 0x00000000,
+ 0x00020000, 0xFFFF4FCE, 0xFFFE925D, 0xFE000000,
+ 0x00020000, 0x00038B43, 0x00000000, 0xFE000000,
+ },
+ [DRM_COLOR_YCBCR_BT709] = {
+ 0x00020000, 0x00000000, 0x0003264C, 0x00000000,
+ 0x00020000, 0xFFFFA018, 0xFFFF1053, 0xFE000000,
+ 0x00020000, 0x0003B611, 0x00000000, 0xFE000000,
+ }
+ },
};
-static const u32 yvu2rgb_de3[] = {
- 0x0002542a, 0x0003312a, 0x00000000, 0xffc00000,
- 0x0002542a, 0xfffe5fc3, 0xffff376b, 0xfe000000,
- 0x0002542a, 0x00000000, 0x000408d3, 0xfe000000,
+static const u32 yvu2rgb_de3[2][2][12] = {
+ [DRM_COLOR_YCBCR_LIMITED_RANGE] = {
+ [DRM_COLOR_YCBCR_BT601] = {
+ 0x0002542A, 0x0003312A, 0x00000000, 0xFFC00000,
+ 0x0002542A, 0xFFFE5FC3, 0xFFFF376B, 0xFE000000,
+ 0x0002542A, 0x00000000, 0x000408D2, 0xFE000000,
+ },
+ [DRM_COLOR_YCBCR_BT709] = {
+ 0x0002542A, 0x000395E2, 0x00000000, 0xFFC00000,
+ 0x0002542A, 0xFFFEEF27, 0xFFFF92D2, 0xFE000000,
+ 0x0002542A, 0x00000000, 0x0004398C, 0xFE000000,
+ }
+ },
+ [DRM_COLOR_YCBCR_FULL_RANGE] = {
+ [DRM_COLOR_YCBCR_BT601] = {
+ 0x00020000, 0x0002CDD2, 0x00000000, 0x00000000,
+ 0x00020000, 0xFFFE925D, 0xFFFF4FCE, 0xFE000000,
+ 0x00020000, 0x00000000, 0x00038B43, 0xFE000000,
+ },
+ [DRM_COLOR_YCBCR_BT709] = {
+ 0x00020000, 0x0003264C, 0x00000000, 0x00000000,
+ 0x00020000, 0xFFFF1053, 0xFFFFA018, 0xFE000000,
+ 0x00020000, 0x00000000, 0x0003B611, 0xFE000000,
+ }
+ },
};
static void sun8i_csc_set_coefficients(struct regmap *map, u32 base,
- enum sun8i_csc_mode mode)
+ enum sun8i_csc_mode mode,
+ enum drm_color_encoding encoding,
+ enum drm_color_range range)
{
const u32 *table;
- int i, data;
+ u32 base_reg;
switch (mode) {
case SUN8I_CSC_MODE_YUV2RGB:
- table = yuv2rgb;
+ table = yuv2rgb[range][encoding];
break;
case SUN8I_CSC_MODE_YVU2RGB:
- table = yvu2rgb;
+ table = yvu2rgb[range][encoding];
break;
default:
DRM_WARN("Wrong CSC mode specified.\n");
return;
}
- for (i = 0; i < 12; i++) {
- data = table[i];
- /* For some reason, 0x200 must be added to constant parts */
- if (((i + 1) & 3) == 0)
- data += 0x200;
- regmap_write(map, SUN8I_CSC_COEFF(base, i), data);
- }
+ base_reg = SUN8I_CSC_COEFF(base, 0);
+ regmap_bulk_write(map, base_reg, table, 12);
}
static void sun8i_de3_ccsc_set_coefficients(struct regmap *map, int layer,
- enum sun8i_csc_mode mode)
+ enum sun8i_csc_mode mode,
+ enum drm_color_encoding encoding,
+ enum drm_color_range range)
{
const u32 *table;
u32 base_reg;
switch (mode) {
case SUN8I_CSC_MODE_YUV2RGB:
- table = yuv2rgb_de3;
+ table = yuv2rgb_de3[range][encoding];
break;
case SUN8I_CSC_MODE_YVU2RGB:
- table = yvu2rgb_de3;
+ table = yvu2rgb_de3[range][encoding];
break;
default:
DRM_WARN("Wrong CSC mode specified.\n");
@@ -142,19 +226,22 @@ static void sun8i_de3_ccsc_enable(struct regmap *map, int layer, bool enable)
}
void sun8i_csc_set_ccsc_coefficients(struct sun8i_mixer *mixer, int layer,
- enum sun8i_csc_mode mode)
+ enum sun8i_csc_mode mode,
+ enum drm_color_encoding encoding,
+ enum drm_color_range range)
{
u32 base;
if (mixer->cfg->is_de3) {
- sun8i_de3_ccsc_set_coefficients(mixer->engine.regs,
- layer, mode);
+ sun8i_de3_ccsc_set_coefficients(mixer->engine.regs, layer,
+ mode, encoding, range);
return;
}
base = ccsc_base[mixer->cfg->ccsc][layer];
- sun8i_csc_set_coefficients(mixer->engine.regs, base, mode);
+ sun8i_csc_set_coefficients(mixer->engine.regs, base,
+ mode, encoding, range);
}
void sun8i_csc_enable_ccsc(struct sun8i_mixer *mixer, int layer, bool enable)
diff --git a/drivers/gpu/drm/sun4i/sun8i_csc.h b/drivers/gpu/drm/sun4i/sun8i_csc.h
index dce4c444bcd6..f42441b1b14d 100644
--- a/drivers/gpu/drm/sun4i/sun8i_csc.h
+++ b/drivers/gpu/drm/sun4i/sun8i_csc.h
@@ -6,6 +6,8 @@
#ifndef _SUN8I_CSC_H_
#define _SUN8I_CSC_H_
+#include <drm/drm_color_mgmt.h>
+
struct sun8i_mixer;
/* VI channel CSC units offsets */
@@ -26,7 +28,9 @@ enum sun8i_csc_mode {
};
void sun8i_csc_set_ccsc_coefficients(struct sun8i_mixer *mixer, int layer,
- enum sun8i_csc_mode mode);
+ enum sun8i_csc_mode mode,
+ enum drm_color_encoding encoding,
+ enum drm_color_range range);
void sun8i_csc_enable_ccsc(struct sun8i_mixer *mixer, int layer, bool enable);
#endif
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
index 39d8509d96a0..e8a317d5ba19 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
@@ -8,9 +8,8 @@
#include <linux/of_device.h>
#include <linux/platform_device.h>
-#include <drm/drm_of.h>
-#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_of.h>
#include "sun8i_dw_hdmi.h"
#include "sun8i_tcon_top.h"
@@ -98,10 +97,34 @@ crtcs_exit:
return crtcs;
}
+static int sun8i_dw_hdmi_find_connector_pdev(struct device *dev,
+ struct platform_device **pdev_out)
+{
+ struct platform_device *pdev;
+ struct device_node *remote;
+
+ remote = of_graph_get_remote_node(dev->of_node, 1, -1);
+ if (!remote)
+ return -ENODEV;
+
+ if (!of_device_is_compatible(remote, "hdmi-connector")) {
+ of_node_put(remote);
+ return -ENODEV;
+ }
+
+ pdev = of_find_device_by_node(remote);
+ of_node_put(remote);
+ if (!pdev)
+ return -ENODEV;
+
+ *pdev_out = pdev;
+ return 0;
+}
+
static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
void *data)
{
- struct platform_device *pdev = to_platform_device(dev);
+ struct platform_device *pdev = to_platform_device(dev), *connector_pdev;
struct dw_hdmi_plat_data *plat_data;
struct drm_device *drm = data;
struct device_node *phy_node;
@@ -151,16 +174,30 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
return PTR_ERR(hdmi->regulator);
}
+ ret = sun8i_dw_hdmi_find_connector_pdev(dev, &connector_pdev);
+ if (!ret) {
+ hdmi->ddc_en = gpiod_get_optional(&connector_pdev->dev,
+ "ddc-en", GPIOD_OUT_HIGH);
+ platform_device_put(connector_pdev);
+
+ if (IS_ERR(hdmi->ddc_en)) {
+ dev_err(dev, "Couldn't get ddc-en gpio\n");
+ return PTR_ERR(hdmi->ddc_en);
+ }
+ }
+
ret = regulator_enable(hdmi->regulator);
if (ret) {
dev_err(dev, "Failed to enable regulator\n");
- return ret;
+ goto err_unref_ddc_en;
}
+ gpiod_set_value(hdmi->ddc_en, 1);
+
ret = reset_control_deassert(hdmi->rst_ctrl);
if (ret) {
dev_err(dev, "Could not deassert ctrl reset control\n");
- goto err_disable_regulator;
+ goto err_disable_ddc_en;
}
ret = clk_prepare_enable(hdmi->clk_tmds);
@@ -189,6 +226,7 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
sun8i_hdmi_phy_init(hdmi->phy);
plat_data->mode_valid = hdmi->quirks->mode_valid;
+ plat_data->use_drm_infoframe = hdmi->quirks->use_drm_infoframe;
sun8i_hdmi_phy_set_ops(hdmi->phy, plat_data);
platform_set_drvdata(pdev, hdmi);
@@ -213,8 +251,12 @@ err_disable_clk_tmds:
clk_disable_unprepare(hdmi->clk_tmds);
err_assert_ctrl_reset:
reset_control_assert(hdmi->rst_ctrl);
-err_disable_regulator:
+err_disable_ddc_en:
+ gpiod_set_value(hdmi->ddc_en, 0);
regulator_disable(hdmi->regulator);
+err_unref_ddc_en:
+ if (hdmi->ddc_en)
+ gpiod_put(hdmi->ddc_en);
return ret;
}
@@ -228,7 +270,11 @@ static void sun8i_dw_hdmi_unbind(struct device *dev, struct device *master,
sun8i_hdmi_phy_remove(hdmi);
clk_disable_unprepare(hdmi->clk_tmds);
reset_control_assert(hdmi->rst_ctrl);
+ gpiod_set_value(hdmi->ddc_en, 0);
regulator_disable(hdmi->regulator);
+
+ if (hdmi->ddc_en)
+ gpiod_put(hdmi->ddc_en);
}
static const struct component_ops sun8i_dw_hdmi_ops = {
@@ -255,6 +301,7 @@ static const struct sun8i_dw_hdmi_quirks sun8i_a83t_quirks = {
static const struct sun8i_dw_hdmi_quirks sun50i_h6_quirks = {
.mode_valid = sun8i_dw_hdmi_mode_valid_h6,
+ .use_drm_infoframe = true,
};
static const struct of_device_id sun8i_dw_hdmi_dt_ids[] = {
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
index 720c5aa8adc1..8e64945167e9 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
@@ -9,6 +9,7 @@
#include <drm/bridge/dw_hdmi.h>
#include <drm/drm_encoder.h>
#include <linux/clk.h>
+#include <linux/gpio/consumer.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
@@ -178,6 +179,7 @@ struct sun8i_dw_hdmi_quirks {
enum drm_mode_status (*mode_valid)(struct drm_connector *connector,
const struct drm_display_mode *mode);
unsigned int set_rate : 1;
+ unsigned int use_drm_infoframe : 1;
};
struct sun8i_dw_hdmi {
@@ -190,6 +192,7 @@ struct sun8i_dw_hdmi {
struct regulator *regulator;
const struct sun8i_dw_hdmi_quirks *quirks;
struct reset_control *rst_ctrl;
+ struct gpio_desc *ddc_en;
};
static inline struct sun8i_dw_hdmi *
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c
index c2eedf58bf4b..7c24f8f832a5 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c
@@ -7,7 +7,13 @@
* Copyright (C) 2015 NextThing Co
*/
-#include <drm/drmP.h>
+#include <linux/component.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/reset.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_cma_helper.h>
@@ -15,12 +21,6 @@
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
-#include <linux/component.h>
-#include <linux/dma-mapping.h>
-#include <linux/of_device.h>
-#include <linux/of_graph.h>
-#include <linux/reset.h>
-
#include "sun4i_drv.h"
#include "sun8i_mixer.h"
#include "sun8i_ui_layer.h"
@@ -286,10 +286,10 @@ static struct drm_plane **sun8i_layers_init(struct drm_device *drm,
dev_err(drm->dev,
"Couldn't initialize overlay plane\n");
return ERR_CAST(layer);
- };
+ }
planes[i] = &layer->plane;
- };
+ }
for (i = 0; i < mixer->cfg->ui_num; i++) {
struct sun8i_ui_layer *layer;
@@ -299,10 +299,10 @@ static struct drm_plane **sun8i_layers_init(struct drm_device *drm,
dev_err(drm->dev, "Couldn't initialize %s plane\n",
i ? "overlay" : "primary");
return ERR_CAST(layer);
- };
+ }
planes[mixer->cfg->vi_num + i] = &layer->plane;
- };
+ }
return planes;
}
diff --git a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
index 3267d0f9b9b2..75d8e60c149d 100644
--- a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
+++ b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
@@ -1,18 +1,18 @@
// SPDX-License-Identifier: GPL-2.0+
/* Copyright (c) 2018 Jernej Skrabec <jernej.skrabec@siol.net> */
-#include <drm/drmP.h>
-
-#include <dt-bindings/clock/sun8i-tcon-top.h>
#include <linux/bitfield.h>
#include <linux/component.h>
#include <linux/device.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
+#include <dt-bindings/clock/sun8i-tcon-top.h>
+
#include "sun8i_tcon_top.h"
struct sun8i_tcon_top_quirks {
diff --git a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
index dd2a1c851939..c87fd842918e 100644
--- a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
@@ -13,11 +13,11 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drmP.h>
#include "sun8i_ui_layer.h"
#include "sun8i_mixer.h"
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
index bd0e6a52d1d8..42d445d23773 100644
--- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
@@ -11,7 +11,6 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drmP.h>
#include "sun8i_vi_layer.h"
#include "sun8i_mixer.h"
@@ -232,7 +231,9 @@ static int sun8i_vi_layer_update_formats(struct sun8i_mixer *mixer, int channel,
SUN8I_MIXER_CHAN_VI_LAYER_ATTR_FBFMT_MASK, val);
if (fmt_info->csc != SUN8I_CSC_MODE_OFF) {
- sun8i_csc_set_ccsc_coefficients(mixer, channel, fmt_info->csc);
+ sun8i_csc_set_ccsc_coefficients(mixer, channel, fmt_info->csc,
+ state->color_encoding,
+ state->color_range);
sun8i_csc_enable_ccsc(mixer, channel, true);
} else {
sun8i_csc_enable_ccsc(mixer, channel, false);
@@ -441,6 +442,7 @@ struct sun8i_vi_layer *sun8i_vi_layer_init_one(struct drm_device *drm,
struct sun8i_mixer *mixer,
int index)
{
+ u32 supported_encodings, supported_ranges;
struct sun8i_vi_layer *layer;
unsigned int plane_cnt;
int ret;
@@ -469,6 +471,22 @@ struct sun8i_vi_layer *sun8i_vi_layer_init_one(struct drm_device *drm,
return ERR_PTR(ret);
}
+ supported_encodings = BIT(DRM_COLOR_YCBCR_BT601) |
+ BIT(DRM_COLOR_YCBCR_BT709);
+
+ supported_ranges = BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
+ BIT(DRM_COLOR_YCBCR_FULL_RANGE);
+
+ ret = drm_plane_create_color_properties(&layer->plane,
+ supported_encodings,
+ supported_ranges,
+ DRM_COLOR_YCBCR_BT709,
+ DRM_COLOR_YCBCR_LIMITED_RANGE);
+ if (ret) {
+ dev_err(drm->dev, "Couldn't add encoding and range properties!\n");
+ return ERR_PTR(ret);
+ }
+
drm_plane_helper_add(&layer->plane, &sun8i_vi_layer_helper_funcs);
layer->mixer = mixer;
layer->channel = index;
diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c
index 3a1476818c65..ab699bf0ac5c 100644
--- a/drivers/gpu/drm/tdfx/tdfx_drv.c
+++ b/drivers/gpu/drm/tdfx/tdfx_drv.c
@@ -31,12 +31,15 @@
*/
#include <linux/module.h>
+#include <linux/pci.h>
-#include <drm/drmP.h>
-#include "tdfx_drv.h"
-
-#include <drm/drm_pciids.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_ioctl.h>
#include <drm/drm_legacy.h>
+#include <drm/drm_pciids.h>
+
+#include "tdfx_drv.h"
static struct pci_device_id pciidlist[] = {
tdfx_PCI_IDS
diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig
index 1d1269fde3c1..5043dcaf1cf9 100644
--- a/drivers/gpu/drm/tegra/Kconfig
+++ b/drivers/gpu/drm/tegra/Kconfig
@@ -9,7 +9,7 @@ config DRM_TEGRA
select DRM_MIPI_DSI
select DRM_PANEL
select TEGRA_HOST1X
- select IOMMU_IOVA if IOMMU_SUPPORT
+ select IOMMU_IOVA
select CEC_CORE if CEC_NOTIFIER
help
Choose this option if you have an NVIDIA Tegra SoC.
diff --git a/drivers/gpu/drm/tegra/Makefile b/drivers/gpu/drm/tegra/Makefile
index 33c463e8d49f..d6cf202414f0 100644
--- a/drivers/gpu/drm/tegra/Makefile
+++ b/drivers/gpu/drm/tegra/Makefile
@@ -5,6 +5,7 @@ tegra-drm-y := \
drm.o \
gem.o \
fb.o \
+ dp.o \
hub.o \
plane.o \
dc.o \
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 4a75d149e368..7c70fd31a4c2 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -6,23 +6,28 @@
#include <linux/clk.h>
#include <linux/debugfs.h>
+#include <linux/delay.h>
#include <linux/iommu.h>
+#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <soc/tegra/pmc.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_debugfs.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_vblank.h>
+
#include "dc.h"
#include "drm.h"
#include "gem.h"
#include "hub.h"
#include "plane.h"
-#include <drm/drm_atomic.h>
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_plane_helper.h>
-
static void tegra_crtc_atomic_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state);
@@ -710,9 +715,7 @@ static void tegra_plane_atomic_update(struct drm_plane *plane,
window.swap = state->swap;
for (i = 0; i < fb->format->num_planes; i++) {
- struct tegra_bo *bo = tegra_fb_get_plane(fb, i);
-
- window.base[i] = bo->paddr + fb->offsets[i];
+ window.base[i] = state->iova[i] + fb->offsets[i];
/*
* Tegra uses a shared stride for UV planes. Framebuffers are
@@ -727,6 +730,8 @@ static void tegra_plane_atomic_update(struct drm_plane *plane,
}
static const struct drm_plane_helper_funcs tegra_plane_helper_funcs = {
+ .prepare_fb = tegra_plane_prepare_fb,
+ .cleanup_fb = tegra_plane_cleanup_fb,
.atomic_check = tegra_plane_atomic_check,
.atomic_disable = tegra_plane_atomic_disable,
.atomic_update = tegra_plane_atomic_update,
@@ -832,16 +837,15 @@ static int tegra_cursor_atomic_check(struct drm_plane *plane,
static void tegra_cursor_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
- struct tegra_bo *bo = tegra_fb_get_plane(plane->state->fb, 0);
+ struct tegra_plane_state *state = to_tegra_plane_state(plane->state);
struct tegra_dc *dc = to_tegra_dc(plane->state->crtc);
- struct drm_plane_state *state = plane->state;
u32 value = CURSOR_CLIP_DISPLAY;
/* rien ne va plus */
if (!plane->state->crtc || !plane->state->fb)
return;
- switch (state->crtc_w) {
+ switch (plane->state->crtc_w) {
case 32:
value |= CURSOR_SIZE_32x32;
break;
@@ -859,16 +863,16 @@ static void tegra_cursor_atomic_update(struct drm_plane *plane,
break;
default:
- WARN(1, "cursor size %ux%u not supported\n", state->crtc_w,
- state->crtc_h);
+ WARN(1, "cursor size %ux%u not supported\n",
+ plane->state->crtc_w, plane->state->crtc_h);
return;
}
- value |= (bo->paddr >> 10) & 0x3fffff;
+ value |= (state->iova[0] >> 10) & 0x3fffff;
tegra_dc_writel(dc, value, DC_DISP_CURSOR_START_ADDR);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- value = (bo->paddr >> 32) & 0x3;
+ value = (state->iova[0] >> 32) & 0x3;
tegra_dc_writel(dc, value, DC_DISP_CURSOR_START_ADDR_HI);
#endif
@@ -887,7 +891,8 @@ static void tegra_cursor_atomic_update(struct drm_plane *plane,
tegra_dc_writel(dc, value, DC_DISP_BLEND_CURSOR_CONTROL);
/* position the cursor */
- value = (state->crtc_y & 0x3fff) << 16 | (state->crtc_x & 0x3fff);
+ value = (plane->state->crtc_y & 0x3fff) << 16 |
+ (plane->state->crtc_x & 0x3fff);
tegra_dc_writel(dc, value, DC_DISP_CURSOR_POSITION);
}
@@ -909,6 +914,8 @@ static void tegra_cursor_atomic_disable(struct drm_plane *plane,
}
static const struct drm_plane_helper_funcs tegra_cursor_plane_helper_funcs = {
+ .prepare_fb = tegra_plane_prepare_fb,
+ .cleanup_fb = tegra_plane_cleanup_fb,
.atomic_check = tegra_cursor_atomic_check,
.atomic_update = tegra_cursor_atomic_update,
.atomic_disable = tegra_cursor_atomic_disable,
@@ -1720,6 +1727,7 @@ static void tegra_crtc_atomic_disable(struct drm_crtc *crtc,
{
struct tegra_dc *dc = to_tegra_dc(crtc);
u32 value;
+ int err;
if (!tegra_dc_idle(dc)) {
tegra_dc_stop(dc);
@@ -1766,7 +1774,9 @@ static void tegra_crtc_atomic_disable(struct drm_crtc *crtc,
spin_unlock_irq(&crtc->dev->event_lock);
- pm_runtime_put_sync(dc->dev);
+ err = host1x_client_suspend(&dc->client);
+ if (err < 0)
+ dev_err(dc->dev, "failed to suspend: %d\n", err);
}
static void tegra_crtc_atomic_enable(struct drm_crtc *crtc,
@@ -1776,8 +1786,13 @@ static void tegra_crtc_atomic_enable(struct drm_crtc *crtc,
struct tegra_dc_state *state = to_dc_state(crtc->state);
struct tegra_dc *dc = to_tegra_dc(crtc);
u32 value;
+ int err;
- pm_runtime_get_sync(dc->dev);
+ err = host1x_client_resume(&dc->client);
+ if (err < 0) {
+ dev_err(dc->dev, "failed to resume: %d\n", err);
+ return;
+ }
/* initialize display controller */
if (dc->syncpt) {
@@ -1989,7 +2004,7 @@ static bool tegra_dc_has_window_groups(struct tegra_dc *dc)
static int tegra_dc_init(struct host1x_client *client)
{
- struct drm_device *drm = dev_get_drvdata(client->parent);
+ struct drm_device *drm = dev_get_drvdata(client->host);
unsigned long flags = HOST1X_SYNCPT_CLIENT_MANAGED;
struct tegra_dc *dc = host1x_client_to_dc(client);
struct tegra_drm *tegra = drm->dev_private;
@@ -2005,13 +2020,21 @@ static int tegra_dc_init(struct host1x_client *client)
if (!tegra_dc_has_window_groups(dc))
return 0;
+ /*
+ * Set the display hub as the host1x client parent for the display
+ * controller. This is needed for the runtime reference counting that
+ * ensures the display hub is always powered when any of the display
+ * controllers are.
+ */
+ if (dc->soc->has_nvdisplay)
+ client->parent = &tegra->hub->client;
+
dc->syncpt = host1x_syncpt_request(client, flags);
if (!dc->syncpt)
dev_warn(dc->dev, "failed to allocate syncpoint\n");
- dc->group = host1x_client_iommu_attach(client, true);
- if (IS_ERR(dc->group)) {
- err = PTR_ERR(dc->group);
+ err = host1x_client_iommu_attach(client);
+ if (err < 0 && err != -ENODEV) {
dev_err(client->dev, "failed to attach to domain: %d\n", err);
return err;
}
@@ -2069,6 +2092,12 @@ static int tegra_dc_init(struct host1x_client *client)
goto cleanup;
}
+ /*
+ * Inherit the DMA parameters (such as maximum segment size) from the
+ * parent host1x device.
+ */
+ client->dev->dma_parms = client->host->dma_parms;
+
return 0;
cleanup:
@@ -2078,7 +2107,7 @@ cleanup:
if (!IS_ERR(primary))
drm_plane_cleanup(primary);
- host1x_client_iommu_detach(client, dc->group);
+ host1x_client_iommu_detach(client);
host1x_syncpt_free(dc->syncpt);
return err;
@@ -2092,6 +2121,9 @@ static int tegra_dc_exit(struct host1x_client *client)
if (!tegra_dc_has_window_groups(dc))
return 0;
+ /* avoid a dangling pointer just in case this disappears */
+ client->dev->dma_parms = NULL;
+
devm_free_irq(dc->dev, dc->irq, dc);
err = tegra_dc_rgb_exit(dc);
@@ -2100,15 +2132,80 @@ static int tegra_dc_exit(struct host1x_client *client)
return err;
}
- host1x_client_iommu_detach(client, dc->group);
+ host1x_client_iommu_detach(client);
host1x_syncpt_free(dc->syncpt);
return 0;
}
+static int tegra_dc_runtime_suspend(struct host1x_client *client)
+{
+ struct tegra_dc *dc = host1x_client_to_dc(client);
+ struct device *dev = client->dev;
+ int err;
+
+ err = reset_control_assert(dc->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to assert reset: %d\n", err);
+ return err;
+ }
+
+ if (dc->soc->has_powergate)
+ tegra_powergate_power_off(dc->powergate);
+
+ clk_disable_unprepare(dc->clk);
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+
+static int tegra_dc_runtime_resume(struct host1x_client *client)
+{
+ struct tegra_dc *dc = host1x_client_to_dc(client);
+ struct device *dev = client->dev;
+ int err;
+
+ err = pm_runtime_get_sync(dev);
+ if (err < 0) {
+ dev_err(dev, "failed to get runtime PM: %d\n", err);
+ return err;
+ }
+
+ if (dc->soc->has_powergate) {
+ err = tegra_powergate_sequence_power_up(dc->powergate, dc->clk,
+ dc->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to power partition: %d\n", err);
+ goto put_rpm;
+ }
+ } else {
+ err = clk_prepare_enable(dc->clk);
+ if (err < 0) {
+ dev_err(dev, "failed to enable clock: %d\n", err);
+ goto put_rpm;
+ }
+
+ err = reset_control_deassert(dc->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to deassert reset: %d\n", err);
+ goto disable_clk;
+ }
+ }
+
+ return 0;
+
+disable_clk:
+ clk_disable_unprepare(dc->clk);
+put_rpm:
+ pm_runtime_put_sync(dev);
+ return err;
+}
+
static const struct host1x_client_ops dc_client_ops = {
.init = tegra_dc_init,
.exit = tegra_dc_exit,
+ .suspend = tegra_dc_runtime_suspend,
+ .resume = tegra_dc_runtime_resume,
};
static const struct tegra_dc_soc_info tegra20_dc_soc_info = {
@@ -2520,65 +2617,10 @@ static int tegra_dc_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static int tegra_dc_suspend(struct device *dev)
-{
- struct tegra_dc *dc = dev_get_drvdata(dev);
- int err;
-
- err = reset_control_assert(dc->rst);
- if (err < 0) {
- dev_err(dev, "failed to assert reset: %d\n", err);
- return err;
- }
-
- if (dc->soc->has_powergate)
- tegra_powergate_power_off(dc->powergate);
-
- clk_disable_unprepare(dc->clk);
-
- return 0;
-}
-
-static int tegra_dc_resume(struct device *dev)
-{
- struct tegra_dc *dc = dev_get_drvdata(dev);
- int err;
-
- if (dc->soc->has_powergate) {
- err = tegra_powergate_sequence_power_up(dc->powergate, dc->clk,
- dc->rst);
- if (err < 0) {
- dev_err(dev, "failed to power partition: %d\n", err);
- return err;
- }
- } else {
- err = clk_prepare_enable(dc->clk);
- if (err < 0) {
- dev_err(dev, "failed to enable clock: %d\n", err);
- return err;
- }
-
- err = reset_control_deassert(dc->rst);
- if (err < 0) {
- dev_err(dev, "failed to deassert reset: %d\n", err);
- return err;
- }
- }
-
- return 0;
-}
-#endif
-
-static const struct dev_pm_ops tegra_dc_pm_ops = {
- SET_RUNTIME_PM_OPS(tegra_dc_suspend, tegra_dc_resume, NULL)
-};
-
struct platform_driver tegra_dc_driver = {
.driver = {
.name = "tegra-dc",
.of_match_table = tegra_dc_of_match,
- .pm = &tegra_dc_pm_ops,
},
.probe = tegra_dc_probe,
.remove = tegra_dc_remove,
diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h
index 0c4d17851f47..3d8ddccd758f 100644
--- a/drivers/gpu/drm/tegra/dc.h
+++ b/drivers/gpu/drm/tegra/dc.h
@@ -90,8 +90,6 @@ struct tegra_dc {
struct drm_info_list *debugfs_files;
const struct tegra_dc_soc_info *soc;
-
- struct iommu_group *group;
};
static inline struct tegra_dc *
diff --git a/drivers/gpu/drm/tegra/dp.c b/drivers/gpu/drm/tegra/dp.c
new file mode 100644
index 000000000000..70dfb7d1dec5
--- /dev/null
+++ b/drivers/gpu/drm/tegra/dp.c
@@ -0,0 +1,876 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright (C) 2013-2019 NVIDIA Corporation
+ * Copyright (C) 2015 Rob Clark
+ */
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_print.h>
+
+#include "dp.h"
+
+static const u8 drm_dp_edp_revisions[] = { 0x11, 0x12, 0x13, 0x14 };
+
+static void drm_dp_link_caps_reset(struct drm_dp_link_caps *caps)
+{
+ caps->enhanced_framing = false;
+ caps->tps3_supported = false;
+ caps->fast_training = false;
+ caps->channel_coding = false;
+ caps->alternate_scrambler_reset = false;
+}
+
+void drm_dp_link_caps_copy(struct drm_dp_link_caps *dest,
+ const struct drm_dp_link_caps *src)
+{
+ dest->enhanced_framing = src->enhanced_framing;
+ dest->tps3_supported = src->tps3_supported;
+ dest->fast_training = src->fast_training;
+ dest->channel_coding = src->channel_coding;
+ dest->alternate_scrambler_reset = src->alternate_scrambler_reset;
+}
+
+static void drm_dp_link_reset(struct drm_dp_link *link)
+{
+ unsigned int i;
+
+ if (!link)
+ return;
+
+ link->revision = 0;
+ link->max_rate = 0;
+ link->max_lanes = 0;
+
+ drm_dp_link_caps_reset(&link->caps);
+ link->aux_rd_interval.cr = 0;
+ link->aux_rd_interval.ce = 0;
+ link->edp = 0;
+
+ link->rate = 0;
+ link->lanes = 0;
+
+ for (i = 0; i < DP_MAX_SUPPORTED_RATES; i++)
+ link->rates[i] = 0;
+
+ link->num_rates = 0;
+}
+
+/**
+ * drm_dp_link_add_rate() - add a rate to the list of supported rates
+ * @link: the link to add the rate to
+ * @rate: the rate to add
+ *
+ * Add a link rate to the list of supported link rates.
+ *
+ * Returns:
+ * 0 on success or one of the following negative error codes on failure:
+ * - ENOSPC if the maximum number of supported rates has been reached
+ * - EEXISTS if the link already supports this rate
+ *
+ * See also:
+ * drm_dp_link_remove_rate()
+ */
+int drm_dp_link_add_rate(struct drm_dp_link *link, unsigned long rate)
+{
+ unsigned int i, pivot;
+
+ if (link->num_rates == DP_MAX_SUPPORTED_RATES)
+ return -ENOSPC;
+
+ for (pivot = 0; pivot < link->num_rates; pivot++)
+ if (rate <= link->rates[pivot])
+ break;
+
+ if (pivot != link->num_rates && rate == link->rates[pivot])
+ return -EEXIST;
+
+ for (i = link->num_rates; i > pivot; i--)
+ link->rates[i] = link->rates[i - 1];
+
+ link->rates[pivot] = rate;
+ link->num_rates++;
+
+ return 0;
+}
+
+/**
+ * drm_dp_link_remove_rate() - remove a rate from the list of supported rates
+ * @link: the link from which to remove the rate
+ * @rate: the rate to remove
+ *
+ * Removes a link rate from the list of supported link rates.
+ *
+ * Returns:
+ * 0 on success or one of the following negative error codes on failure:
+ * - EINVAL if the specified rate is not among the supported rates
+ *
+ * See also:
+ * drm_dp_link_add_rate()
+ */
+int drm_dp_link_remove_rate(struct drm_dp_link *link, unsigned long rate)
+{
+ unsigned int i;
+
+ for (i = 0; i < link->num_rates; i++)
+ if (rate == link->rates[i])
+ break;
+
+ if (i == link->num_rates)
+ return -EINVAL;
+
+ link->num_rates--;
+
+ while (i < link->num_rates) {
+ link->rates[i] = link->rates[i + 1];
+ i++;
+ }
+
+ return 0;
+}
+
+/**
+ * drm_dp_link_update_rates() - normalize the supported link rates array
+ * @link: the link for which to normalize the supported link rates
+ *
+ * Users should call this function after they've manually modified the array
+ * of supported link rates. This function removes any stale entries, compacts
+ * the array and updates the supported link rate count. Note that calling the
+ * drm_dp_link_remove_rate() function already does this janitorial work.
+ *
+ * See also:
+ * drm_dp_link_add_rate(), drm_dp_link_remove_rate()
+ */
+void drm_dp_link_update_rates(struct drm_dp_link *link)
+{
+ unsigned int i, count = 0;
+
+ for (i = 0; i < link->num_rates; i++) {
+ if (link->rates[i] != 0)
+ link->rates[count++] = link->rates[i];
+ }
+
+ for (i = count; i < link->num_rates; i++)
+ link->rates[i] = 0;
+
+ link->num_rates = count;
+}
+
+/**
+ * drm_dp_link_probe() - probe a DisplayPort link for capabilities
+ * @aux: DisplayPort AUX channel
+ * @link: pointer to structure in which to return link capabilities
+ *
+ * The structure filled in by this function can usually be passed directly
+ * into drm_dp_link_power_up() and drm_dp_link_configure() to power up and
+ * configure the link based on the link's capabilities.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link)
+{
+ u8 dpcd[DP_RECEIVER_CAP_SIZE], value;
+ unsigned int rd_interval;
+ int err;
+
+ drm_dp_link_reset(link);
+
+ err = drm_dp_dpcd_read(aux, DP_DPCD_REV, dpcd, sizeof(dpcd));
+ if (err < 0)
+ return err;
+
+ link->revision = dpcd[DP_DPCD_REV];
+ link->max_rate = drm_dp_max_link_rate(dpcd);
+ link->max_lanes = drm_dp_max_lane_count(dpcd);
+
+ link->caps.enhanced_framing = drm_dp_enhanced_frame_cap(dpcd);
+ link->caps.tps3_supported = drm_dp_tps3_supported(dpcd);
+ link->caps.fast_training = drm_dp_fast_training_cap(dpcd);
+ link->caps.channel_coding = drm_dp_channel_coding_supported(dpcd);
+
+ if (drm_dp_alternate_scrambler_reset_cap(dpcd)) {
+ link->caps.alternate_scrambler_reset = true;
+
+ err = drm_dp_dpcd_readb(aux, DP_EDP_DPCD_REV, &value);
+ if (err < 0)
+ return err;
+
+ if (value >= ARRAY_SIZE(drm_dp_edp_revisions))
+ DRM_ERROR("unsupported eDP version: %02x\n", value);
+ else
+ link->edp = drm_dp_edp_revisions[value];
+ }
+
+ /*
+ * The DPCD stores the AUX read interval in units of 4 ms. There are
+ * two special cases:
+ *
+ * 1) if the TRAINING_AUX_RD_INTERVAL field is 0, the clock recovery
+ * and channel equalization should use 100 us or 400 us AUX read
+ * intervals, respectively
+ *
+ * 2) for DP v1.4 and above, clock recovery should always use 100 us
+ * AUX read intervals
+ */
+ rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
+ DP_TRAINING_AUX_RD_MASK;
+
+ if (rd_interval > 4) {
+ DRM_DEBUG_KMS("AUX interval %u out of range (max. 4)\n",
+ rd_interval);
+ rd_interval = 4;
+ }
+
+ rd_interval *= 4 * USEC_PER_MSEC;
+
+ if (rd_interval == 0 || link->revision >= DP_DPCD_REV_14)
+ link->aux_rd_interval.cr = 100;
+
+ if (rd_interval == 0)
+ link->aux_rd_interval.ce = 400;
+
+ link->rate = link->max_rate;
+ link->lanes = link->max_lanes;
+
+ /* Parse SUPPORTED_LINK_RATES from eDP 1.4 */
+ if (link->edp >= 0x14) {
+ u8 supported_rates[DP_MAX_SUPPORTED_RATES * 2];
+ unsigned int i;
+ u16 rate;
+
+ err = drm_dp_dpcd_read(aux, DP_SUPPORTED_LINK_RATES,
+ supported_rates,
+ sizeof(supported_rates));
+ if (err < 0)
+ return err;
+
+ for (i = 0; i < DP_MAX_SUPPORTED_RATES; i++) {
+ rate = supported_rates[i * 2 + 1] << 8 |
+ supported_rates[i * 2 + 0];
+
+ drm_dp_link_add_rate(link, rate * 200);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * drm_dp_link_power_up() - power up a DisplayPort link
+ * @aux: DisplayPort AUX channel
+ * @link: pointer to a structure containing the link configuration
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link)
+{
+ u8 value;
+ int err;
+
+ /* DP_SET_POWER register is only available on DPCD v1.1 and later */
+ if (link->revision < 0x11)
+ return 0;
+
+ err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
+ if (err < 0)
+ return err;
+
+ value &= ~DP_SET_POWER_MASK;
+ value |= DP_SET_POWER_D0;
+
+ err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
+ if (err < 0)
+ return err;
+
+ /*
+ * According to the DP 1.1 specification, a "Sink Device must exit the
+ * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink
+ * Control Field" (register 0x600).
+ */
+ usleep_range(1000, 2000);
+
+ return 0;
+}
+
+/**
+ * drm_dp_link_power_down() - power down a DisplayPort link
+ * @aux: DisplayPort AUX channel
+ * @link: pointer to a structure containing the link configuration
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link)
+{
+ u8 value;
+ int err;
+
+ /* DP_SET_POWER register is only available on DPCD v1.1 and later */
+ if (link->revision < 0x11)
+ return 0;
+
+ err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
+ if (err < 0)
+ return err;
+
+ value &= ~DP_SET_POWER_MASK;
+ value |= DP_SET_POWER_D3;
+
+ err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+/**
+ * drm_dp_link_configure() - configure a DisplayPort link
+ * @aux: DisplayPort AUX channel
+ * @link: pointer to a structure containing the link configuration
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link)
+{
+ u8 values[2], value;
+ int err;
+
+ if (link->ops && link->ops->configure) {
+ err = link->ops->configure(link);
+ if (err < 0) {
+ DRM_ERROR("failed to configure DP link: %d\n", err);
+ return err;
+ }
+ }
+
+ values[0] = drm_dp_link_rate_to_bw_code(link->rate);
+ values[1] = link->lanes;
+
+ if (link->caps.enhanced_framing)
+ values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+
+ err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, values, sizeof(values));
+ if (err < 0)
+ return err;
+
+ if (link->caps.channel_coding)
+ value = DP_SET_ANSI_8B10B;
+ else
+ value = 0;
+
+ err = drm_dp_dpcd_writeb(aux, DP_MAIN_LINK_CHANNEL_CODING_SET, value);
+ if (err < 0)
+ return err;
+
+ if (link->caps.alternate_scrambler_reset) {
+ err = drm_dp_dpcd_writeb(aux, DP_EDP_CONFIGURATION_SET,
+ DP_ALTERNATE_SCRAMBLER_RESET_ENABLE);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * drm_dp_link_choose() - choose the lowest possible configuration for a mode
+ * @link: DRM DP link object
+ * @mode: DRM display mode
+ * @info: DRM display information
+ *
+ * According to the eDP specification, a source should select a configuration
+ * with the lowest number of lanes and the lowest possible link rate that can
+ * match the bitrate requirements of a video mode. However it must ensure not
+ * to exceed the capabilities of the sink.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int drm_dp_link_choose(struct drm_dp_link *link,
+ const struct drm_display_mode *mode,
+ const struct drm_display_info *info)
+{
+ /* available link symbol clock rates */
+ static const unsigned int rates[3] = { 162000, 270000, 540000 };
+ /* available number of lanes */
+ static const unsigned int lanes[3] = { 1, 2, 4 };
+ unsigned long requirement, capacity;
+ unsigned int rate = link->max_rate;
+ unsigned int i, j;
+
+ /* bandwidth requirement */
+ requirement = mode->clock * info->bpc * 3;
+
+ for (i = 0; i < ARRAY_SIZE(lanes) && lanes[i] <= link->max_lanes; i++) {
+ for (j = 0; j < ARRAY_SIZE(rates) && rates[j] <= rate; j++) {
+ /*
+ * Capacity for this combination of lanes and rate,
+ * factoring in the ANSI 8B/10B encoding.
+ *
+ * Link rates in the DRM DP helpers are really link
+ * symbol frequencies, so a tenth of the actual rate
+ * of the link.
+ */
+ capacity = lanes[i] * (rates[j] * 10) * 8 / 10;
+
+ if (capacity >= requirement) {
+ DRM_DEBUG_KMS("using %u lanes at %u kHz (%lu/%lu kbps)\n",
+ lanes[i], rates[j], requirement,
+ capacity);
+ link->lanes = lanes[i];
+ link->rate = rates[j];
+ return 0;
+ }
+ }
+ }
+
+ return -ERANGE;
+}
+
+/**
+ * DOC: Link training
+ *
+ * These functions contain common logic and helpers to implement DisplayPort
+ * link training.
+ */
+
+/**
+ * drm_dp_link_train_init() - initialize DisplayPort link training state
+ * @train: DisplayPort link training state
+ */
+void drm_dp_link_train_init(struct drm_dp_link_train *train)
+{
+ struct drm_dp_link_train_set *request = &train->request;
+ struct drm_dp_link_train_set *adjust = &train->adjust;
+ unsigned int i;
+
+ for (i = 0; i < 4; i++) {
+ request->voltage_swing[i] = 0;
+ adjust->voltage_swing[i] = 0;
+
+ request->pre_emphasis[i] = 0;
+ adjust->pre_emphasis[i] = 0;
+
+ request->post_cursor[i] = 0;
+ adjust->post_cursor[i] = 0;
+ }
+
+ train->pattern = DP_TRAINING_PATTERN_DISABLE;
+ train->clock_recovered = false;
+ train->channel_equalized = false;
+}
+
+static bool drm_dp_link_train_valid(const struct drm_dp_link_train *train)
+{
+ return train->clock_recovered && train->channel_equalized;
+}
+
+static int drm_dp_link_apply_training(struct drm_dp_link *link)
+{
+ struct drm_dp_link_train_set *request = &link->train.request;
+ unsigned int lanes = link->lanes, *vs, *pe, *pc, i;
+ struct drm_dp_aux *aux = link->aux;
+ u8 values[4], pattern = 0;
+ int err;
+
+ err = link->ops->apply_training(link);
+ if (err < 0) {
+ DRM_ERROR("failed to apply link training: %d\n", err);
+ return err;
+ }
+
+ vs = request->voltage_swing;
+ pe = request->pre_emphasis;
+ pc = request->post_cursor;
+
+ /* write currently selected voltage-swing and pre-emphasis levels */
+ for (i = 0; i < lanes; i++)
+ values[i] = DP_TRAIN_VOLTAGE_SWING_LEVEL(vs[i]) |
+ DP_TRAIN_PRE_EMPHASIS_LEVEL(pe[i]);
+
+ err = drm_dp_dpcd_write(aux, DP_TRAINING_LANE0_SET, values, lanes);
+ if (err < 0) {
+ DRM_ERROR("failed to set training parameters: %d\n", err);
+ return err;
+ }
+
+ /* write currently selected post-cursor level (if supported) */
+ if (link->revision >= 0x12 && link->rate == 540000) {
+ values[0] = values[1] = 0;
+
+ for (i = 0; i < lanes; i++)
+ values[i / 2] |= DP_LANE_POST_CURSOR(i, pc[i]);
+
+ err = drm_dp_dpcd_write(aux, DP_TRAINING_LANE0_1_SET2, values,
+ DIV_ROUND_UP(lanes, 2));
+ if (err < 0) {
+ DRM_ERROR("failed to set post-cursor: %d\n", err);
+ return err;
+ }
+ }
+
+ /* write link pattern */
+ if (link->train.pattern != DP_TRAINING_PATTERN_DISABLE)
+ pattern |= DP_LINK_SCRAMBLING_DISABLE;
+
+ pattern |= link->train.pattern;
+
+ err = drm_dp_dpcd_writeb(aux, DP_TRAINING_PATTERN_SET, pattern);
+ if (err < 0) {
+ DRM_ERROR("failed to set training pattern: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static void drm_dp_link_train_wait(struct drm_dp_link *link)
+{
+ unsigned long min = 0;
+
+ switch (link->train.pattern) {
+ case DP_TRAINING_PATTERN_1:
+ min = link->aux_rd_interval.cr;
+ break;
+
+ case DP_TRAINING_PATTERN_2:
+ case DP_TRAINING_PATTERN_3:
+ min = link->aux_rd_interval.ce;
+ break;
+
+ default:
+ break;
+ }
+
+ if (min > 0)
+ usleep_range(min, 2 * min);
+}
+
+static void drm_dp_link_get_adjustments(struct drm_dp_link *link,
+ u8 status[DP_LINK_STATUS_SIZE])
+{
+ struct drm_dp_link_train_set *adjust = &link->train.adjust;
+ unsigned int i;
+
+ for (i = 0; i < link->lanes; i++) {
+ adjust->voltage_swing[i] =
+ drm_dp_get_adjust_request_voltage(status, i) >>
+ DP_TRAIN_VOLTAGE_SWING_SHIFT;
+
+ adjust->pre_emphasis[i] =
+ drm_dp_get_adjust_request_pre_emphasis(status, i) >>
+ DP_TRAIN_PRE_EMPHASIS_SHIFT;
+
+ adjust->post_cursor[i] =
+ drm_dp_get_adjust_request_post_cursor(status, i);
+ }
+}
+
+static void drm_dp_link_train_adjust(struct drm_dp_link_train *train)
+{
+ struct drm_dp_link_train_set *request = &train->request;
+ struct drm_dp_link_train_set *adjust = &train->adjust;
+ unsigned int i;
+
+ for (i = 0; i < 4; i++)
+ if (request->voltage_swing[i] != adjust->voltage_swing[i])
+ request->voltage_swing[i] = adjust->voltage_swing[i];
+
+ for (i = 0; i < 4; i++)
+ if (request->pre_emphasis[i] != adjust->pre_emphasis[i])
+ request->pre_emphasis[i] = adjust->pre_emphasis[i];
+
+ for (i = 0; i < 4; i++)
+ if (request->post_cursor[i] != adjust->post_cursor[i])
+ request->post_cursor[i] = adjust->post_cursor[i];
+}
+
+static int drm_dp_link_recover_clock(struct drm_dp_link *link)
+{
+ u8 status[DP_LINK_STATUS_SIZE];
+ int err;
+
+ err = drm_dp_link_apply_training(link);
+ if (err < 0)
+ return err;
+
+ drm_dp_link_train_wait(link);
+
+ err = drm_dp_dpcd_read_link_status(link->aux, status);
+ if (err < 0) {
+ DRM_ERROR("failed to read link status: %d\n", err);
+ return err;
+ }
+
+ if (!drm_dp_clock_recovery_ok(status, link->lanes))
+ drm_dp_link_get_adjustments(link, status);
+ else
+ link->train.clock_recovered = true;
+
+ return 0;
+}
+
+static int drm_dp_link_clock_recovery(struct drm_dp_link *link)
+{
+ unsigned int repeat;
+ int err;
+
+ /* start clock recovery using training pattern 1 */
+ link->train.pattern = DP_TRAINING_PATTERN_1;
+
+ for (repeat = 1; repeat < 5; repeat++) {
+ err = drm_dp_link_recover_clock(link);
+ if (err < 0) {
+ DRM_ERROR("failed to recover clock: %d\n", err);
+ return err;
+ }
+
+ if (link->train.clock_recovered)
+ break;
+
+ drm_dp_link_train_adjust(&link->train);
+ }
+
+ return 0;
+}
+
+static int drm_dp_link_equalize_channel(struct drm_dp_link *link)
+{
+ struct drm_dp_aux *aux = link->aux;
+ u8 status[DP_LINK_STATUS_SIZE];
+ int err;
+
+ err = drm_dp_link_apply_training(link);
+ if (err < 0)
+ return err;
+
+ drm_dp_link_train_wait(link);
+
+ err = drm_dp_dpcd_read_link_status(aux, status);
+ if (err < 0) {
+ DRM_ERROR("failed to read link status: %d\n", err);
+ return err;
+ }
+
+ if (!drm_dp_clock_recovery_ok(status, link->lanes)) {
+ DRM_ERROR("clock recovery lost while equalizing channel\n");
+ link->train.clock_recovered = false;
+ return 0;
+ }
+
+ if (!drm_dp_channel_eq_ok(status, link->lanes))
+ drm_dp_link_get_adjustments(link, status);
+ else
+ link->train.channel_equalized = true;
+
+ return 0;
+}
+
+static int drm_dp_link_channel_equalization(struct drm_dp_link *link)
+{
+ unsigned int repeat;
+ int err;
+
+ /* start channel equalization using pattern 2 or 3 */
+ if (link->caps.tps3_supported)
+ link->train.pattern = DP_TRAINING_PATTERN_3;
+ else
+ link->train.pattern = DP_TRAINING_PATTERN_2;
+
+ for (repeat = 1; repeat < 5; repeat++) {
+ err = drm_dp_link_equalize_channel(link);
+ if (err < 0) {
+ DRM_ERROR("failed to equalize channel: %d\n", err);
+ return err;
+ }
+
+ if (link->train.channel_equalized)
+ break;
+
+ drm_dp_link_train_adjust(&link->train);
+ }
+
+ return 0;
+}
+
+static int drm_dp_link_downgrade(struct drm_dp_link *link)
+{
+ switch (link->rate) {
+ case 162000:
+ return -EINVAL;
+
+ case 270000:
+ link->rate = 162000;
+ break;
+
+ case 540000:
+ link->rate = 270000;
+ return 0;
+ }
+
+ return 0;
+}
+
+static void drm_dp_link_train_disable(struct drm_dp_link *link)
+{
+ int err;
+
+ link->train.pattern = DP_TRAINING_PATTERN_DISABLE;
+
+ err = drm_dp_link_apply_training(link);
+ if (err < 0)
+ DRM_ERROR("failed to disable link training: %d\n", err);
+}
+
+static int drm_dp_link_train_full(struct drm_dp_link *link)
+{
+ int err;
+
+retry:
+ DRM_DEBUG_KMS("full-training link: %u lane%s at %u MHz\n",
+ link->lanes, (link->lanes > 1) ? "s" : "",
+ link->rate / 100);
+
+ err = drm_dp_link_configure(link->aux, link);
+ if (err < 0) {
+ DRM_ERROR("failed to configure DP link: %d\n", err);
+ return err;
+ }
+
+ err = drm_dp_link_clock_recovery(link);
+ if (err < 0) {
+ DRM_ERROR("clock recovery failed: %d\n", err);
+ goto out;
+ }
+
+ if (!link->train.clock_recovered) {
+ DRM_ERROR("clock recovery failed, downgrading link\n");
+
+ err = drm_dp_link_downgrade(link);
+ if (err < 0)
+ goto out;
+
+ goto retry;
+ }
+
+ DRM_DEBUG_KMS("clock recovery succeeded\n");
+
+ err = drm_dp_link_channel_equalization(link);
+ if (err < 0) {
+ DRM_ERROR("channel equalization failed: %d\n", err);
+ goto out;
+ }
+
+ if (!link->train.channel_equalized) {
+ DRM_ERROR("channel equalization failed, downgrading link\n");
+
+ err = drm_dp_link_downgrade(link);
+ if (err < 0)
+ goto out;
+
+ goto retry;
+ }
+
+ DRM_DEBUG_KMS("channel equalization succeeded\n");
+
+out:
+ drm_dp_link_train_disable(link);
+ return err;
+}
+
+static int drm_dp_link_train_fast(struct drm_dp_link *link)
+{
+ u8 status[DP_LINK_STATUS_SIZE];
+ int err;
+
+ DRM_DEBUG_KMS("fast-training link: %u lane%s at %u MHz\n",
+ link->lanes, (link->lanes > 1) ? "s" : "",
+ link->rate / 100);
+
+ err = drm_dp_link_configure(link->aux, link);
+ if (err < 0) {
+ DRM_ERROR("failed to configure DP link: %d\n", err);
+ return err;
+ }
+
+ /* transmit training pattern 1 for 500 microseconds */
+ link->train.pattern = DP_TRAINING_PATTERN_1;
+
+ err = drm_dp_link_apply_training(link);
+ if (err < 0)
+ goto out;
+
+ usleep_range(500, 1000);
+
+ /* transmit training pattern 2 or 3 for 500 microseconds */
+ if (link->caps.tps3_supported)
+ link->train.pattern = DP_TRAINING_PATTERN_3;
+ else
+ link->train.pattern = DP_TRAINING_PATTERN_2;
+
+ err = drm_dp_link_apply_training(link);
+ if (err < 0)
+ goto out;
+
+ usleep_range(500, 1000);
+
+ err = drm_dp_dpcd_read_link_status(link->aux, status);
+ if (err < 0) {
+ DRM_ERROR("failed to read link status: %d\n", err);
+ goto out;
+ }
+
+ if (!drm_dp_clock_recovery_ok(status, link->lanes)) {
+ DRM_ERROR("clock recovery failed\n");
+ err = -EIO;
+ }
+
+ if (!drm_dp_channel_eq_ok(status, link->lanes)) {
+ DRM_ERROR("channel equalization failed\n");
+ err = -EIO;
+ }
+
+out:
+ drm_dp_link_train_disable(link);
+ return err;
+}
+
+/**
+ * drm_dp_link_train() - perform DisplayPort link training
+ * @link: a DP link object
+ *
+ * Uses the context stored in the DP link object to perform link training. It
+ * is expected that drivers will call drm_dp_link_probe() to obtain the link
+ * capabilities before performing link training.
+ *
+ * If the sink supports fast link training (no AUX CH handshake) and valid
+ * training settings are available, this function will try to perform fast
+ * link training and fall back to full link training on failure.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int drm_dp_link_train(struct drm_dp_link *link)
+{
+ int err;
+
+ drm_dp_link_train_init(&link->train);
+
+ if (link->caps.fast_training) {
+ if (drm_dp_link_train_valid(&link->train)) {
+ err = drm_dp_link_train_fast(link);
+ if (err < 0)
+ DRM_ERROR("fast link training failed: %d\n",
+ err);
+ else
+ return 0;
+ } else {
+ DRM_DEBUG_KMS("training parameters not available\n");
+ }
+ } else {
+ DRM_DEBUG_KMS("fast link training not supported\n");
+ }
+
+ err = drm_dp_link_train_full(link);
+ if (err < 0)
+ DRM_ERROR("full link training failed: %d\n", err);
+
+ return err;
+}
diff --git a/drivers/gpu/drm/tegra/dp.h b/drivers/gpu/drm/tegra/dp.h
new file mode 100644
index 000000000000..cb12ed0c54e7
--- /dev/null
+++ b/drivers/gpu/drm/tegra/dp.h
@@ -0,0 +1,177 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2013-2019 NVIDIA Corporation.
+ * Copyright (C) 2015 Rob Clark
+ */
+
+#ifndef DRM_TEGRA_DP_H
+#define DRM_TEGRA_DP_H 1
+
+#include <linux/types.h>
+
+struct drm_display_info;
+struct drm_display_mode;
+struct drm_dp_aux;
+struct drm_dp_link;
+
+/**
+ * struct drm_dp_link_caps - DP link capabilities
+ */
+struct drm_dp_link_caps {
+ /**
+ * @enhanced_framing:
+ *
+ * enhanced framing capability (mandatory as of DP 1.2)
+ */
+ bool enhanced_framing;
+
+ /**
+ * tps3_supported:
+ *
+ * training pattern sequence 3 supported for equalization
+ */
+ bool tps3_supported;
+
+ /**
+ * @fast_training:
+ *
+ * AUX CH handshake not required for link training
+ */
+ bool fast_training;
+
+ /**
+ * @channel_coding:
+ *
+ * ANSI 8B/10B channel coding capability
+ */
+ bool channel_coding;
+
+ /**
+ * @alternate_scrambler_reset:
+ *
+ * eDP alternate scrambler reset capability
+ */
+ bool alternate_scrambler_reset;
+};
+
+void drm_dp_link_caps_copy(struct drm_dp_link_caps *dest,
+ const struct drm_dp_link_caps *src);
+
+/**
+ * struct drm_dp_link_ops - DP link operations
+ */
+struct drm_dp_link_ops {
+ /**
+ * @apply_training:
+ */
+ int (*apply_training)(struct drm_dp_link *link);
+
+ /**
+ * @configure:
+ */
+ int (*configure)(struct drm_dp_link *link);
+};
+
+#define DP_TRAIN_VOLTAGE_SWING_LEVEL(x) ((x) << 0)
+#define DP_TRAIN_PRE_EMPHASIS_LEVEL(x) ((x) << 3)
+#define DP_LANE_POST_CURSOR(i, x) (((x) & 0x3) << (((i) & 1) << 2))
+
+/**
+ * struct drm_dp_link_train_set - link training settings
+ * @voltage_swing: per-lane voltage swing
+ * @pre_emphasis: per-lane pre-emphasis
+ * @post_cursor: per-lane post-cursor
+ */
+struct drm_dp_link_train_set {
+ unsigned int voltage_swing[4];
+ unsigned int pre_emphasis[4];
+ unsigned int post_cursor[4];
+};
+
+/**
+ * struct drm_dp_link_train - link training state information
+ * @request: currently requested settings
+ * @adjust: adjustments requested by sink
+ * @pattern: currently requested training pattern
+ * @clock_recovered: flag to track if clock recovery has completed
+ * @channel_equalized: flag to track if channel equalization has completed
+ */
+struct drm_dp_link_train {
+ struct drm_dp_link_train_set request;
+ struct drm_dp_link_train_set adjust;
+
+ unsigned int pattern;
+
+ bool clock_recovered;
+ bool channel_equalized;
+};
+
+/**
+ * struct drm_dp_link - DP link capabilities and configuration
+ * @revision: DP specification revision supported on the link
+ * @max_rate: maximum clock rate supported on the link
+ * @max_lanes: maximum number of lanes supported on the link
+ * @caps: capabilities supported on the link (see &drm_dp_link_caps)
+ * @aux_rd_interval: AUX read interval to use for training (in microseconds)
+ * @edp: eDP revision (0x11: eDP 1.1, 0x12: eDP 1.2, ...)
+ * @rate: currently configured link rate
+ * @lanes: currently configured number of lanes
+ * @rates: additional supported link rates in kHz (eDP 1.4)
+ * @num_rates: number of additional supported link rates (eDP 1.4)
+ */
+struct drm_dp_link {
+ unsigned char revision;
+ unsigned int max_rate;
+ unsigned int max_lanes;
+
+ struct drm_dp_link_caps caps;
+
+ /**
+ * @cr: clock recovery read interval
+ * @ce: channel equalization read interval
+ */
+ struct {
+ unsigned int cr;
+ unsigned int ce;
+ } aux_rd_interval;
+
+ unsigned char edp;
+
+ unsigned int rate;
+ unsigned int lanes;
+
+ unsigned long rates[DP_MAX_SUPPORTED_RATES];
+ unsigned int num_rates;
+
+ /**
+ * @ops: DP link operations
+ */
+ const struct drm_dp_link_ops *ops;
+
+ /**
+ * @aux: DP AUX channel
+ */
+ struct drm_dp_aux *aux;
+
+ /**
+ * @train: DP link training state
+ */
+ struct drm_dp_link_train train;
+};
+
+int drm_dp_link_add_rate(struct drm_dp_link *link, unsigned long rate);
+int drm_dp_link_remove_rate(struct drm_dp_link *link, unsigned long rate);
+void drm_dp_link_update_rates(struct drm_dp_link *link);
+
+int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link);
+int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link);
+int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link);
+int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link);
+int drm_dp_link_choose(struct drm_dp_link *link,
+ const struct drm_display_mode *mode,
+ const struct drm_display_info *info);
+
+void drm_dp_link_train_init(struct drm_dp_link_train *train);
+int drm_dp_link_train(struct drm_dp_link *link);
+
+#endif
diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
index 2d94da225e51..7dfb50f65067 100644
--- a/drivers/gpu/drm/tegra/dpaux.c
+++ b/drivers/gpu/drm/tegra/dpaux.c
@@ -8,19 +8,22 @@
#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinmux.h>
-#include <linux/pm_runtime.h>
#include <linux/platform_device.h>
-#include <linux/reset.h>
+#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
#include <linux/workqueue.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_panel.h>
+#include "dp.h"
#include "dpaux.h"
#include "drm.h"
#include "trace.h"
@@ -28,10 +31,18 @@
static DEFINE_MUTEX(dpaux_lock);
static LIST_HEAD(dpaux_list);
+struct tegra_dpaux_soc {
+ unsigned int cmh;
+ unsigned int drvz;
+ unsigned int drvi;
+};
+
struct tegra_dpaux {
struct drm_dp_aux aux;
struct device *dev;
+ const struct tegra_dpaux_soc *soc;
+
void __iomem *regs;
int irq;
@@ -119,6 +130,7 @@ static ssize_t tegra_dpaux_transfer(struct drm_dp_aux *aux,
struct tegra_dpaux *dpaux = to_dpaux(aux);
unsigned long status;
ssize_t ret = 0;
+ u8 reply = 0;
u32 value;
/* Tegra has 4x4 byte DP AUX transmit and receive FIFOs. */
@@ -213,23 +225,23 @@ static ssize_t tegra_dpaux_transfer(struct drm_dp_aux *aux,
switch ((value & DPAUX_DP_AUXSTAT_REPLY_TYPE_MASK) >> 16) {
case 0x00:
- msg->reply = DP_AUX_NATIVE_REPLY_ACK;
+ reply = DP_AUX_NATIVE_REPLY_ACK;
break;
case 0x01:
- msg->reply = DP_AUX_NATIVE_REPLY_NACK;
+ reply = DP_AUX_NATIVE_REPLY_NACK;
break;
case 0x02:
- msg->reply = DP_AUX_NATIVE_REPLY_DEFER;
+ reply = DP_AUX_NATIVE_REPLY_DEFER;
break;
case 0x04:
- msg->reply = DP_AUX_I2C_REPLY_NACK;
+ reply = DP_AUX_I2C_REPLY_NACK;
break;
case 0x08:
- msg->reply = DP_AUX_I2C_REPLY_DEFER;
+ reply = DP_AUX_I2C_REPLY_DEFER;
break;
}
@@ -237,14 +249,24 @@ static ssize_t tegra_dpaux_transfer(struct drm_dp_aux *aux,
if (msg->request & DP_AUX_I2C_READ) {
size_t count = value & DPAUX_DP_AUXSTAT_REPLY_MASK;
- if (WARN_ON(count != msg->size))
- count = min_t(size_t, count, msg->size);
+ /*
+ * There might be a smarter way to do this, but since
+ * the DP helpers will already retry transactions for
+ * an -EBUSY return value, simply reuse that instead.
+ */
+ if (count != msg->size) {
+ ret = -EBUSY;
+ goto out;
+ }
tegra_dpaux_read_fifo(dpaux, msg->buffer, count);
ret = count;
}
}
+ msg->reply = reply;
+
+out:
return ret;
}
@@ -309,9 +331,9 @@ static int tegra_dpaux_pad_config(struct tegra_dpaux *dpaux, unsigned function)
switch (function) {
case DPAUX_PADCTL_FUNC_AUX:
- value = DPAUX_HYBRID_PADCTL_AUX_CMH(2) |
- DPAUX_HYBRID_PADCTL_AUX_DRVZ(4) |
- DPAUX_HYBRID_PADCTL_AUX_DRVI(0x18) |
+ value = DPAUX_HYBRID_PADCTL_AUX_CMH(dpaux->soc->cmh) |
+ DPAUX_HYBRID_PADCTL_AUX_DRVZ(dpaux->soc->drvz) |
+ DPAUX_HYBRID_PADCTL_AUX_DRVI(dpaux->soc->drvi) |
DPAUX_HYBRID_PADCTL_AUX_INPUT_RCV |
DPAUX_HYBRID_PADCTL_MODE_AUX;
break;
@@ -319,9 +341,9 @@ static int tegra_dpaux_pad_config(struct tegra_dpaux *dpaux, unsigned function)
case DPAUX_PADCTL_FUNC_I2C:
value = DPAUX_HYBRID_PADCTL_I2C_SDA_INPUT_RCV |
DPAUX_HYBRID_PADCTL_I2C_SCL_INPUT_RCV |
- DPAUX_HYBRID_PADCTL_AUX_CMH(2) |
- DPAUX_HYBRID_PADCTL_AUX_DRVZ(4) |
- DPAUX_HYBRID_PADCTL_AUX_DRVI(0x18) |
+ DPAUX_HYBRID_PADCTL_AUX_CMH(dpaux->soc->cmh) |
+ DPAUX_HYBRID_PADCTL_AUX_DRVZ(dpaux->soc->drvz) |
+ DPAUX_HYBRID_PADCTL_AUX_DRVI(dpaux->soc->drvi) |
DPAUX_HYBRID_PADCTL_MODE_I2C;
break;
@@ -435,6 +457,7 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
if (!dpaux)
return -ENOMEM;
+ dpaux->soc = of_device_get_match_data(&pdev->dev);
INIT_WORK(&dpaux->work, tegra_dpaux_hotplug);
init_completion(&dpaux->complete);
INIT_LIST_HEAD(&dpaux->list);
@@ -492,6 +515,8 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
return PTR_ERR(dpaux->vdd);
}
+
+ dpaux->vdd = NULL;
}
platform_set_drvdata(pdev, dpaux);
@@ -563,7 +588,7 @@ static int tegra_dpaux_remove(struct platform_device *pdev)
/* make sure pads are powered down when not in use */
tegra_dpaux_pad_power_down(dpaux);
- pm_runtime_put(&pdev->dev);
+ pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
drm_dp_aux_unregister(&dpaux->aux);
@@ -640,11 +665,29 @@ static const struct dev_pm_ops tegra_dpaux_pm_ops = {
SET_RUNTIME_PM_OPS(tegra_dpaux_suspend, tegra_dpaux_resume, NULL)
};
+static const struct tegra_dpaux_soc tegra124_dpaux_soc = {
+ .cmh = 0x02,
+ .drvz = 0x04,
+ .drvi = 0x18,
+};
+
+static const struct tegra_dpaux_soc tegra210_dpaux_soc = {
+ .cmh = 0x02,
+ .drvz = 0x04,
+ .drvi = 0x30,
+};
+
+static const struct tegra_dpaux_soc tegra194_dpaux_soc = {
+ .cmh = 0x02,
+ .drvz = 0x04,
+ .drvi = 0x2c,
+};
+
static const struct of_device_id tegra_dpaux_of_match[] = {
- { .compatible = "nvidia,tegra194-dpaux", },
- { .compatible = "nvidia,tegra186-dpaux", },
- { .compatible = "nvidia,tegra210-dpaux", },
- { .compatible = "nvidia,tegra124-dpaux", },
+ { .compatible = "nvidia,tegra194-dpaux", .data = &tegra194_dpaux_soc },
+ { .compatible = "nvidia,tegra186-dpaux", .data = &tegra210_dpaux_soc },
+ { .compatible = "nvidia,tegra210-dpaux", .data = &tegra210_dpaux_soc },
+ { .compatible = "nvidia,tegra124-dpaux", .data = &tegra124_dpaux_soc },
{ },
};
MODULE_DEVICE_TABLE(of, tegra_dpaux_of_match);
@@ -685,25 +728,32 @@ int drm_dp_aux_attach(struct drm_dp_aux *aux, struct tegra_output *output)
output->connector.polled = DRM_CONNECTOR_POLL_HPD;
dpaux->output = output;
- err = regulator_enable(dpaux->vdd);
- if (err < 0)
- return err;
+ if (output->panel) {
+ enum drm_connector_status status;
+
+ if (dpaux->vdd) {
+ err = regulator_enable(dpaux->vdd);
+ if (err < 0)
+ return err;
+ }
- timeout = jiffies + msecs_to_jiffies(250);
+ timeout = jiffies + msecs_to_jiffies(250);
- while (time_before(jiffies, timeout)) {
- enum drm_connector_status status;
+ while (time_before(jiffies, timeout)) {
+ status = drm_dp_aux_detect(aux);
+
+ if (status == connector_status_connected)
+ break;
- status = drm_dp_aux_detect(aux);
- if (status == connector_status_connected) {
- enable_irq(dpaux->irq);
- return 0;
+ usleep_range(1000, 2000);
}
- usleep_range(1000, 2000);
+ if (status != connector_status_connected)
+ return -ETIMEDOUT;
}
- return -ETIMEDOUT;
+ enable_irq(dpaux->irq);
+ return 0;
}
int drm_dp_aux_detach(struct drm_dp_aux *aux)
@@ -714,25 +764,33 @@ int drm_dp_aux_detach(struct drm_dp_aux *aux)
disable_irq(dpaux->irq);
- err = regulator_disable(dpaux->vdd);
- if (err < 0)
- return err;
+ if (dpaux->output->panel) {
+ enum drm_connector_status status;
- timeout = jiffies + msecs_to_jiffies(250);
+ if (dpaux->vdd) {
+ err = regulator_disable(dpaux->vdd);
+ if (err < 0)
+ return err;
+ }
- while (time_before(jiffies, timeout)) {
- enum drm_connector_status status;
+ timeout = jiffies + msecs_to_jiffies(250);
- status = drm_dp_aux_detect(aux);
- if (status == connector_status_disconnected) {
- dpaux->output = NULL;
- return 0;
+ while (time_before(jiffies, timeout)) {
+ status = drm_dp_aux_detect(aux);
+
+ if (status == connector_status_disconnected)
+ break;
+
+ usleep_range(1000, 2000);
}
- usleep_range(1000, 2000);
+ if (status != connector_status_disconnected)
+ return -ETIMEDOUT;
+
+ dpaux->output = NULL;
}
- return -ETIMEDOUT;
+ return 0;
}
enum drm_connector_status drm_dp_aux_detect(struct drm_dp_aux *aux)
@@ -763,72 +821,3 @@ int drm_dp_aux_disable(struct drm_dp_aux *aux)
return 0;
}
-
-int drm_dp_aux_prepare(struct drm_dp_aux *aux, u8 encoding)
-{
- int err;
-
- err = drm_dp_dpcd_writeb(aux, DP_MAIN_LINK_CHANNEL_CODING_SET,
- encoding);
- if (err < 0)
- return err;
-
- return 0;
-}
-
-int drm_dp_aux_train(struct drm_dp_aux *aux, struct drm_dp_link *link,
- u8 pattern)
-{
- u8 tp = pattern & DP_TRAINING_PATTERN_MASK;
- u8 status[DP_LINK_STATUS_SIZE], values[4];
- unsigned int i;
- int err;
-
- err = drm_dp_dpcd_writeb(aux, DP_TRAINING_PATTERN_SET, pattern);
- if (err < 0)
- return err;
-
- if (tp == DP_TRAINING_PATTERN_DISABLE)
- return 0;
-
- for (i = 0; i < link->num_lanes; i++)
- values[i] = DP_TRAIN_MAX_PRE_EMPHASIS_REACHED |
- DP_TRAIN_PRE_EMPH_LEVEL_0 |
- DP_TRAIN_MAX_SWING_REACHED |
- DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
-
- err = drm_dp_dpcd_write(aux, DP_TRAINING_LANE0_SET, values,
- link->num_lanes);
- if (err < 0)
- return err;
-
- usleep_range(500, 1000);
-
- err = drm_dp_dpcd_read_link_status(aux, status);
- if (err < 0)
- return err;
-
- switch (tp) {
- case DP_TRAINING_PATTERN_1:
- if (!drm_dp_clock_recovery_ok(status, link->num_lanes))
- return -EAGAIN;
-
- break;
-
- case DP_TRAINING_PATTERN_2:
- if (!drm_dp_channel_eq_ok(status, link->num_lanes))
- return -EAGAIN;
-
- break;
-
- default:
- dev_err(aux->dev, "unsupported training pattern %u\n", tp);
- return -EINVAL;
- }
-
- err = drm_dp_dpcd_writeb(aux, DP_EDP_CONFIGURATION_SET, 0);
- if (err < 0)
- return err;
-
- return 0;
-}
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index ddb802bce0a3..bd268028fb3d 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -8,13 +8,17 @@
#include <linux/host1x.h>
#include <linux/idr.h>
#include <linux/iommu.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-
-#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
-#include <asm/dma-iommu.h>
-#endif
+#include <drm/drm_debugfs.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_prime.h>
+#include <drm/drm_vblank.h>
#include "drm.h"
#include "gem.h"
@@ -78,168 +82,6 @@ tegra_drm_mode_config_helpers = {
.atomic_commit_tail = tegra_atomic_commit_tail,
};
-static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
-{
- struct host1x_device *device = to_host1x_device(drm->dev);
- struct tegra_drm *tegra;
- int err;
-
- tegra = kzalloc(sizeof(*tegra), GFP_KERNEL);
- if (!tegra)
- return -ENOMEM;
-
- if (iommu_present(&platform_bus_type)) {
- tegra->domain = iommu_domain_alloc(&platform_bus_type);
- if (!tegra->domain) {
- err = -ENOMEM;
- goto free;
- }
-
- err = iova_cache_get();
- if (err < 0)
- goto domain;
- }
-
- mutex_init(&tegra->clients_lock);
- INIT_LIST_HEAD(&tegra->clients);
-
- drm->dev_private = tegra;
- tegra->drm = drm;
-
- drm_mode_config_init(drm);
-
- drm->mode_config.min_width = 0;
- drm->mode_config.min_height = 0;
-
- drm->mode_config.max_width = 4096;
- drm->mode_config.max_height = 4096;
-
- drm->mode_config.allow_fb_modifiers = true;
-
- drm->mode_config.normalize_zpos = true;
-
- drm->mode_config.funcs = &tegra_drm_mode_config_funcs;
- drm->mode_config.helper_private = &tegra_drm_mode_config_helpers;
-
- err = tegra_drm_fb_prepare(drm);
- if (err < 0)
- goto config;
-
- drm_kms_helper_poll_init(drm);
-
- err = host1x_device_init(device);
- if (err < 0)
- goto fbdev;
-
- if (tegra->domain) {
- u64 carveout_start, carveout_end, gem_start, gem_end;
- u64 dma_mask = dma_get_mask(&device->dev);
- dma_addr_t start, end;
- unsigned long order;
-
- start = tegra->domain->geometry.aperture_start & dma_mask;
- end = tegra->domain->geometry.aperture_end & dma_mask;
-
- gem_start = start;
- gem_end = end - CARVEOUT_SZ;
- carveout_start = gem_end + 1;
- carveout_end = end;
-
- order = __ffs(tegra->domain->pgsize_bitmap);
- init_iova_domain(&tegra->carveout.domain, 1UL << order,
- carveout_start >> order);
-
- tegra->carveout.shift = iova_shift(&tegra->carveout.domain);
- tegra->carveout.limit = carveout_end >> tegra->carveout.shift;
-
- drm_mm_init(&tegra->mm, gem_start, gem_end - gem_start + 1);
- mutex_init(&tegra->mm_lock);
-
- DRM_DEBUG("IOMMU apertures:\n");
- DRM_DEBUG(" GEM: %#llx-%#llx\n", gem_start, gem_end);
- DRM_DEBUG(" Carveout: %#llx-%#llx\n", carveout_start,
- carveout_end);
- }
-
- if (tegra->hub) {
- err = tegra_display_hub_prepare(tegra->hub);
- if (err < 0)
- goto device;
- }
-
- /*
- * We don't use the drm_irq_install() helpers provided by the DRM
- * core, so we need to set this manually in order to allow the
- * DRM_IOCTL_WAIT_VBLANK to operate correctly.
- */
- drm->irq_enabled = true;
-
- /* syncpoints are used for full 32-bit hardware VBLANK counters */
- drm->max_vblank_count = 0xffffffff;
-
- err = drm_vblank_init(drm, drm->mode_config.num_crtc);
- if (err < 0)
- goto hub;
-
- drm_mode_config_reset(drm);
-
- err = tegra_drm_fb_init(drm);
- if (err < 0)
- goto hub;
-
- return 0;
-
-hub:
- if (tegra->hub)
- tegra_display_hub_cleanup(tegra->hub);
-device:
- host1x_device_exit(device);
-fbdev:
- drm_kms_helper_poll_fini(drm);
- tegra_drm_fb_free(drm);
-config:
- drm_mode_config_cleanup(drm);
-
- if (tegra->domain) {
- mutex_destroy(&tegra->mm_lock);
- drm_mm_takedown(&tegra->mm);
- put_iova_domain(&tegra->carveout.domain);
- iova_cache_put();
- }
-domain:
- if (tegra->domain)
- iommu_domain_free(tegra->domain);
-free:
- kfree(tegra);
- return err;
-}
-
-static void tegra_drm_unload(struct drm_device *drm)
-{
- struct host1x_device *device = to_host1x_device(drm->dev);
- struct tegra_drm *tegra = drm->dev_private;
- int err;
-
- drm_kms_helper_poll_fini(drm);
- tegra_drm_fb_exit(drm);
- drm_atomic_helper_shutdown(drm);
- drm_mode_config_cleanup(drm);
-
- err = host1x_device_exit(device);
- if (err < 0)
- return;
-
- if (tegra->domain) {
- mutex_destroy(&tegra->mm_lock);
- drm_mm_takedown(&tegra->mm);
- put_iova_domain(&tegra->carveout.domain);
- iova_cache_put();
- iommu_domain_free(tegra->domain);
- }
-
- kfree(tegra);
-}
-
static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
{
struct tegra_drm_file *fpriv;
@@ -303,6 +145,8 @@ static int host1x_reloc_copy_from_user(struct host1x_reloc *dest,
if (err < 0)
return err;
+ dest->flags = HOST1X_RELOC_READ | HOST1X_RELOC_WRITE;
+
dest->cmdbuf.bo = host1x_bo_lookup(file, cmdbuf);
if (!dest->cmdbuf.bo)
return -ENOENT;
@@ -888,33 +732,33 @@ static int tegra_gem_get_flags(struct drm_device *drm, void *data,
static const struct drm_ioctl_desc tegra_drm_ioctls[] = {
#ifdef CONFIG_DRM_TEGRA_STAGING
DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create,
- DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap,
- DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read,
- DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr,
- DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_WAIT, tegra_syncpt_wait,
- DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(TEGRA_OPEN_CHANNEL, tegra_open_channel,
- DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(TEGRA_CLOSE_CHANNEL, tegra_close_channel,
- DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt,
- DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit,
- DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT_BASE, tegra_get_syncpt_base,
- DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_TILING, tegra_gem_set_tiling,
- DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_TILING, tegra_gem_get_tiling,
- DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_FLAGS, tegra_gem_set_flags,
- DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_FLAGS, tegra_gem_get_flags,
- DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
#endif
};
@@ -1004,10 +848,8 @@ static int tegra_debugfs_init(struct drm_minor *minor)
#endif
static struct drm_driver tegra_drm_driver = {
- .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
+ .driver_features = DRIVER_MODESET | DRIVER_GEM |
DRIVER_ATOMIC | DRIVER_RENDER,
- .load = tegra_drm_load,
- .unload = tegra_drm_unload,
.open = tegra_drm_open,
.postclose = tegra_drm_postclose,
.lastclose = drm_fb_helper_lastclose,
@@ -1060,57 +902,61 @@ int tegra_drm_unregister_client(struct tegra_drm *tegra,
return 0;
}
-struct iommu_group *host1x_client_iommu_attach(struct host1x_client *client,
- bool shared)
+int host1x_client_iommu_attach(struct host1x_client *client)
{
- struct drm_device *drm = dev_get_drvdata(client->parent);
+ struct iommu_domain *domain = iommu_get_domain_for_dev(client->dev);
+ struct drm_device *drm = dev_get_drvdata(client->host);
struct tegra_drm *tegra = drm->dev_private;
struct iommu_group *group = NULL;
int err;
+ /*
+ * If the host1x client is already attached to an IOMMU domain that is
+ * not the shared IOMMU domain, don't try to attach it to a different
+ * domain. This allows using the IOMMU-backed DMA API.
+ */
+ if (domain && domain != tegra->domain)
+ return 0;
+
if (tegra->domain) {
group = iommu_group_get(client->dev);
- if (!group) {
- dev_err(client->dev, "failed to get IOMMU group\n");
- return ERR_PTR(-ENODEV);
- }
+ if (!group)
+ return -ENODEV;
- if (!shared || (shared && (group != tegra->group))) {
-#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
- if (client->dev->archdata.mapping) {
- struct dma_iommu_mapping *mapping =
- to_dma_iommu_mapping(client->dev);
- arm_iommu_detach_device(client->dev);
- arm_iommu_release_mapping(mapping);
- }
-#endif
+ if (domain != tegra->domain) {
err = iommu_attach_group(tegra->domain, group);
if (err < 0) {
iommu_group_put(group);
- return ERR_PTR(err);
+ return err;
}
-
- if (shared && !tegra->group)
- tegra->group = group;
}
+
+ tegra->use_explicit_iommu = true;
}
- return group;
+ client->group = group;
+
+ return 0;
}
-void host1x_client_iommu_detach(struct host1x_client *client,
- struct iommu_group *group)
+void host1x_client_iommu_detach(struct host1x_client *client)
{
- struct drm_device *drm = dev_get_drvdata(client->parent);
+ struct drm_device *drm = dev_get_drvdata(client->host);
struct tegra_drm *tegra = drm->dev_private;
+ struct iommu_domain *domain;
- if (group) {
- if (group == tegra->group) {
- iommu_detach_group(tegra->domain, group);
- tegra->group = NULL;
- }
+ if (client->group) {
+ /*
+ * Devices that are part of the same group may no longer be
+ * attached to a domain at this point because their group may
+ * have been detached by an earlier client.
+ */
+ domain = iommu_get_domain_for_dev(client->dev);
+ if (domain)
+ iommu_detach_group(tegra->domain, client->group);
- iommu_group_put(group);
+ iommu_group_put(client->group);
+ client->group = NULL;
}
}
@@ -1191,9 +1037,55 @@ void tegra_drm_free(struct tegra_drm *tegra, size_t size, void *virt,
free_pages((unsigned long)virt, get_order(size));
}
+static bool host1x_drm_wants_iommu(struct host1x_device *dev)
+{
+ struct iommu_domain *domain;
+
+ /*
+ * If the Tegra DRM clients are backed by an IOMMU, push buffers are
+ * likely to be allocated beyond the 32-bit boundary if sufficient
+ * system memory is available. This is problematic on earlier Tegra
+ * generations where host1x supports a maximum of 32 address bits in
+ * the GATHER opcode. In this case, unless host1x is behind an IOMMU
+ * as well it won't be able to process buffers allocated beyond the
+ * 32-bit boundary.
+ *
+ * The DMA API will use bounce buffers in this case, so that could
+ * perhaps still be made to work, even if less efficient, but there
+ * is another catch: in order to perform cache maintenance on pages
+ * allocated for discontiguous buffers we need to map and unmap the
+ * SG table representing these buffers. This is fine for something
+ * small like a push buffer, but it exhausts the bounce buffer pool
+ * (typically on the order of a few MiB) for framebuffers (many MiB
+ * for any modern resolution).
+ *
+ * Work around this by making sure that Tegra DRM clients only use
+ * an IOMMU if the parent host1x also uses an IOMMU.
+ *
+ * Note that there's still a small gap here that we don't cover: if
+ * the DMA API is backed by an IOMMU there's no way to control which
+ * device is attached to an IOMMU and which isn't, except via wiring
+ * up the device tree appropriately. This is considered an problem
+ * of integration, so care must be taken for the DT to be consistent.
+ */
+ domain = iommu_get_domain_for_dev(dev->dev.parent);
+
+ /*
+ * Tegra20 and Tegra30 don't support addressing memory beyond the
+ * 32-bit boundary, so the regular GATHER opcodes will always be
+ * sufficient and whether or not the host1x is attached to an IOMMU
+ * doesn't matter.
+ */
+ if (!domain && dma_get_mask(dev->dev.parent) <= DMA_BIT_MASK(32))
+ return true;
+
+ return domain != NULL;
+}
+
static int host1x_drm_probe(struct host1x_device *dev)
{
struct drm_driver *driver = &tegra_drm_driver;
+ struct tegra_drm *tegra;
struct drm_device *drm;
int err;
@@ -1201,18 +1093,151 @@ static int host1x_drm_probe(struct host1x_device *dev)
if (IS_ERR(drm))
return PTR_ERR(drm);
+ tegra = kzalloc(sizeof(*tegra), GFP_KERNEL);
+ if (!tegra) {
+ err = -ENOMEM;
+ goto put;
+ }
+
+ if (host1x_drm_wants_iommu(dev) && iommu_present(&platform_bus_type)) {
+ tegra->domain = iommu_domain_alloc(&platform_bus_type);
+ if (!tegra->domain) {
+ err = -ENOMEM;
+ goto free;
+ }
+
+ err = iova_cache_get();
+ if (err < 0)
+ goto domain;
+ }
+
+ mutex_init(&tegra->clients_lock);
+ INIT_LIST_HEAD(&tegra->clients);
+
dev_set_drvdata(&dev->dev, drm);
+ drm->dev_private = tegra;
+ tegra->drm = drm;
+
+ drm_mode_config_init(drm);
+
+ drm->mode_config.min_width = 0;
+ drm->mode_config.min_height = 0;
+
+ drm->mode_config.max_width = 4096;
+ drm->mode_config.max_height = 4096;
+
+ drm->mode_config.allow_fb_modifiers = true;
+
+ drm->mode_config.normalize_zpos = true;
+
+ drm->mode_config.funcs = &tegra_drm_mode_config_funcs;
+ drm->mode_config.helper_private = &tegra_drm_mode_config_helpers;
- err = drm_fb_helper_remove_conflicting_framebuffers(NULL, "tegradrmfb", false);
+ err = tegra_drm_fb_prepare(drm);
if (err < 0)
- goto put;
+ goto config;
+
+ drm_kms_helper_poll_init(drm);
+
+ err = host1x_device_init(dev);
+ if (err < 0)
+ goto fbdev;
+
+ if (tegra->use_explicit_iommu) {
+ u64 carveout_start, carveout_end, gem_start, gem_end;
+ u64 dma_mask = dma_get_mask(&dev->dev);
+ dma_addr_t start, end;
+ unsigned long order;
+
+ start = tegra->domain->geometry.aperture_start & dma_mask;
+ end = tegra->domain->geometry.aperture_end & dma_mask;
+
+ gem_start = start;
+ gem_end = end - CARVEOUT_SZ;
+ carveout_start = gem_end + 1;
+ carveout_end = end;
+
+ order = __ffs(tegra->domain->pgsize_bitmap);
+ init_iova_domain(&tegra->carveout.domain, 1UL << order,
+ carveout_start >> order);
+
+ tegra->carveout.shift = iova_shift(&tegra->carveout.domain);
+ tegra->carveout.limit = carveout_end >> tegra->carveout.shift;
+
+ drm_mm_init(&tegra->mm, gem_start, gem_end - gem_start + 1);
+ mutex_init(&tegra->mm_lock);
+
+ DRM_DEBUG_DRIVER("IOMMU apertures:\n");
+ DRM_DEBUG_DRIVER(" GEM: %#llx-%#llx\n", gem_start, gem_end);
+ DRM_DEBUG_DRIVER(" Carveout: %#llx-%#llx\n", carveout_start,
+ carveout_end);
+ } else if (tegra->domain) {
+ iommu_domain_free(tegra->domain);
+ tegra->domain = NULL;
+ iova_cache_put();
+ }
+
+ if (tegra->hub) {
+ err = tegra_display_hub_prepare(tegra->hub);
+ if (err < 0)
+ goto device;
+ }
+
+ /*
+ * We don't use the drm_irq_install() helpers provided by the DRM
+ * core, so we need to set this manually in order to allow the
+ * DRM_IOCTL_WAIT_VBLANK to operate correctly.
+ */
+ drm->irq_enabled = true;
+
+ /* syncpoints are used for full 32-bit hardware VBLANK counters */
+ drm->max_vblank_count = 0xffffffff;
+
+ err = drm_vblank_init(drm, drm->mode_config.num_crtc);
+ if (err < 0)
+ goto hub;
+
+ drm_mode_config_reset(drm);
+
+ err = drm_fb_helper_remove_conflicting_framebuffers(NULL, "tegradrmfb",
+ false);
+ if (err < 0)
+ goto hub;
+
+ err = tegra_drm_fb_init(drm);
+ if (err < 0)
+ goto hub;
err = drm_dev_register(drm, 0);
if (err < 0)
- goto put;
+ goto fb;
return 0;
+fb:
+ tegra_drm_fb_exit(drm);
+hub:
+ if (tegra->hub)
+ tegra_display_hub_cleanup(tegra->hub);
+device:
+ if (tegra->domain) {
+ mutex_destroy(&tegra->mm_lock);
+ drm_mm_takedown(&tegra->mm);
+ put_iova_domain(&tegra->carveout.domain);
+ iova_cache_put();
+ }
+
+ host1x_device_exit(dev);
+fbdev:
+ drm_kms_helper_poll_fini(drm);
+ tegra_drm_fb_free(drm);
+config:
+ drm_mode_config_cleanup(drm);
+domain:
+ if (tegra->domain)
+ iommu_domain_free(tegra->domain);
+free:
+ kfree(tegra);
put:
drm_dev_put(drm);
return err;
@@ -1221,8 +1246,32 @@ put:
static int host1x_drm_remove(struct host1x_device *dev)
{
struct drm_device *drm = dev_get_drvdata(&dev->dev);
+ struct tegra_drm *tegra = drm->dev_private;
+ int err;
drm_dev_unregister(drm);
+
+ drm_kms_helper_poll_fini(drm);
+ tegra_drm_fb_exit(drm);
+ drm_atomic_helper_shutdown(drm);
+ drm_mode_config_cleanup(drm);
+
+ if (tegra->hub)
+ tegra_display_hub_cleanup(tegra->hub);
+
+ err = host1x_device_exit(dev);
+ if (err < 0)
+ dev_err(&dev->dev, "host1x device cleanup failed: %d\n", err);
+
+ if (tegra->domain) {
+ mutex_destroy(&tegra->mm_lock);
+ drm_mm_takedown(&tegra->mm);
+ put_iova_domain(&tegra->carveout.domain);
+ iova_cache_put();
+ iommu_domain_free(tegra->domain);
+ }
+
+ kfree(tegra);
drm_dev_put(drm);
return 0;
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index 86daa19fcf24..ed99b67deb29 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -7,18 +7,17 @@
#ifndef HOST1X_DRM_H
#define HOST1X_DRM_H 1
-#include <uapi/drm/tegra_drm.h>
#include <linux/host1x.h>
#include <linux/iova.h>
#include <linux/of_gpio.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_edid.h>
#include <drm/drm_encoder.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fixed.h>
#include <drm/drm_probe_helper.h>
+#include <uapi/drm/tegra_drm.h>
#include "gem.h"
#include "hub.h"
@@ -37,7 +36,7 @@ struct tegra_drm {
struct drm_device *drm;
struct iommu_domain *domain;
- struct iommu_group *group;
+ bool use_explicit_iommu;
struct mutex mm_lock;
struct drm_mm mm;
@@ -101,10 +100,8 @@ int tegra_drm_register_client(struct tegra_drm *tegra,
struct tegra_drm_client *client);
int tegra_drm_unregister_client(struct tegra_drm *tegra,
struct tegra_drm_client *client);
-struct iommu_group *host1x_client_iommu_attach(struct host1x_client *client,
- bool shared);
-void host1x_client_iommu_detach(struct host1x_client *client,
- struct iommu_group *group);
+int host1x_client_iommu_attach(struct host1x_client *client);
+void host1x_client_iommu_detach(struct host1x_client *client);
int tegra_drm_init(struct tegra_drm *tegra, struct drm_device *drm);
int tegra_drm_exit(struct tegra_drm *tegra);
@@ -147,6 +144,8 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output);
void tegra_output_exit(struct tegra_output *output);
void tegra_output_find_possible_crtcs(struct tegra_output *output,
struct drm_device *drm);
+int tegra_output_suspend(struct tegra_output *output);
+int tegra_output_resume(struct tegra_output *output);
int tegra_output_connector_get_modes(struct drm_connector *connector);
enum drm_connector_status
@@ -156,17 +155,12 @@ void tegra_output_connector_destroy(struct drm_connector *connector);
void tegra_output_encoder_destroy(struct drm_encoder *encoder);
/* from dpaux.c */
-struct drm_dp_link;
-
struct drm_dp_aux *drm_dp_aux_find_by_of_node(struct device_node *np);
enum drm_connector_status drm_dp_aux_detect(struct drm_dp_aux *aux);
int drm_dp_aux_attach(struct drm_dp_aux *aux, struct tegra_output *output);
int drm_dp_aux_detach(struct drm_dp_aux *aux);
int drm_dp_aux_enable(struct drm_dp_aux *aux);
int drm_dp_aux_disable(struct drm_dp_aux *aux);
-int drm_dp_aux_prepare(struct drm_dp_aux *aux, u8 encoding);
-int drm_dp_aux_train(struct drm_dp_aux *aux, struct drm_dp_link *link,
- u8 pattern);
/* from fb.c */
struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer,
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index 2fbfefe9cb42..88b9d64c77bf 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -5,22 +5,24 @@
#include <linux/clk.h>
#include <linux/debugfs.h>
+#include <linux/delay.h>
#include <linux/host1x.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
#include <linux/reset.h>
-#include <linux/regulator/consumer.h>
+#include <video/mipi_display.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_debugfs.h>
+#include <drm/drm_file.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_panel.h>
-#include <video/mipi_display.h>
-
#include "dc.h"
#include "drm.h"
#include "dsi.h"
@@ -838,7 +840,9 @@ static void tegra_dsi_unprepare(struct tegra_dsi *dsi)
dev_err(dsi->dev, "failed to disable MIPI calibration: %d\n",
err);
- pm_runtime_put(dsi->dev);
+ err = host1x_client_suspend(&dsi->client);
+ if (err < 0)
+ dev_err(dsi->dev, "failed to suspend: %d\n", err);
}
static void tegra_dsi_encoder_disable(struct drm_encoder *encoder)
@@ -880,11 +884,15 @@ static void tegra_dsi_encoder_disable(struct drm_encoder *encoder)
tegra_dsi_unprepare(dsi);
}
-static void tegra_dsi_prepare(struct tegra_dsi *dsi)
+static int tegra_dsi_prepare(struct tegra_dsi *dsi)
{
int err;
- pm_runtime_get_sync(dsi->dev);
+ err = host1x_client_resume(&dsi->client);
+ if (err < 0) {
+ dev_err(dsi->dev, "failed to resume: %d\n", err);
+ return err;
+ }
err = tegra_mipi_enable(dsi->mipi);
if (err < 0)
@@ -897,6 +905,8 @@ static void tegra_dsi_prepare(struct tegra_dsi *dsi)
if (dsi->slave)
tegra_dsi_prepare(dsi->slave);
+
+ return 0;
}
static void tegra_dsi_encoder_enable(struct drm_encoder *encoder)
@@ -907,8 +917,13 @@ static void tegra_dsi_encoder_enable(struct drm_encoder *encoder)
struct tegra_dsi *dsi = to_dsi(output);
struct tegra_dsi_state *state;
u32 value;
+ int err;
- tegra_dsi_prepare(dsi);
+ err = tegra_dsi_prepare(dsi);
+ if (err < 0) {
+ dev_err(dsi->dev, "failed to prepare: %d\n", err);
+ return;
+ }
state = tegra_dsi_get_state(dsi);
@@ -1028,7 +1043,7 @@ static const struct drm_encoder_helper_funcs tegra_dsi_encoder_helper_funcs = {
static int tegra_dsi_init(struct host1x_client *client)
{
- struct drm_device *drm = dev_get_drvdata(client->parent);
+ struct drm_device *drm = dev_get_drvdata(client->host);
struct tegra_dsi *dsi = host1x_client_to_dsi(client);
int err;
@@ -1073,9 +1088,89 @@ static int tegra_dsi_exit(struct host1x_client *client)
return 0;
}
+static int tegra_dsi_runtime_suspend(struct host1x_client *client)
+{
+ struct tegra_dsi *dsi = host1x_client_to_dsi(client);
+ struct device *dev = client->dev;
+ int err;
+
+ if (dsi->rst) {
+ err = reset_control_assert(dsi->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to assert reset: %d\n", err);
+ return err;
+ }
+ }
+
+ usleep_range(1000, 2000);
+
+ clk_disable_unprepare(dsi->clk_lp);
+ clk_disable_unprepare(dsi->clk);
+
+ regulator_disable(dsi->vdd);
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+
+static int tegra_dsi_runtime_resume(struct host1x_client *client)
+{
+ struct tegra_dsi *dsi = host1x_client_to_dsi(client);
+ struct device *dev = client->dev;
+ int err;
+
+ err = pm_runtime_get_sync(dev);
+ if (err < 0) {
+ dev_err(dev, "failed to get runtime PM: %d\n", err);
+ return err;
+ }
+
+ err = regulator_enable(dsi->vdd);
+ if (err < 0) {
+ dev_err(dev, "failed to enable VDD supply: %d\n", err);
+ goto put_rpm;
+ }
+
+ err = clk_prepare_enable(dsi->clk);
+ if (err < 0) {
+ dev_err(dev, "cannot enable DSI clock: %d\n", err);
+ goto disable_vdd;
+ }
+
+ err = clk_prepare_enable(dsi->clk_lp);
+ if (err < 0) {
+ dev_err(dev, "cannot enable low-power clock: %d\n", err);
+ goto disable_clk;
+ }
+
+ usleep_range(1000, 2000);
+
+ if (dsi->rst) {
+ err = reset_control_deassert(dsi->rst);
+ if (err < 0) {
+ dev_err(dev, "cannot assert reset: %d\n", err);
+ goto disable_clk_lp;
+ }
+ }
+
+ return 0;
+
+disable_clk_lp:
+ clk_disable_unprepare(dsi->clk_lp);
+disable_clk:
+ clk_disable_unprepare(dsi->clk);
+disable_vdd:
+ regulator_disable(dsi->vdd);
+put_rpm:
+ pm_runtime_put_sync(dev);
+ return err;
+}
+
static const struct host1x_client_ops dsi_client_ops = {
.init = tegra_dsi_init,
.exit = tegra_dsi_exit,
+ .suspend = tegra_dsi_runtime_suspend,
+ .resume = tegra_dsi_runtime_resume,
};
static int tegra_dsi_setup_clocks(struct tegra_dsi *dsi)
@@ -1594,79 +1689,6 @@ static int tegra_dsi_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static int tegra_dsi_suspend(struct device *dev)
-{
- struct tegra_dsi *dsi = dev_get_drvdata(dev);
- int err;
-
- if (dsi->rst) {
- err = reset_control_assert(dsi->rst);
- if (err < 0) {
- dev_err(dev, "failed to assert reset: %d\n", err);
- return err;
- }
- }
-
- usleep_range(1000, 2000);
-
- clk_disable_unprepare(dsi->clk_lp);
- clk_disable_unprepare(dsi->clk);
-
- regulator_disable(dsi->vdd);
-
- return 0;
-}
-
-static int tegra_dsi_resume(struct device *dev)
-{
- struct tegra_dsi *dsi = dev_get_drvdata(dev);
- int err;
-
- err = regulator_enable(dsi->vdd);
- if (err < 0) {
- dev_err(dsi->dev, "failed to enable VDD supply: %d\n", err);
- return err;
- }
-
- err = clk_prepare_enable(dsi->clk);
- if (err < 0) {
- dev_err(dev, "cannot enable DSI clock: %d\n", err);
- goto disable_vdd;
- }
-
- err = clk_prepare_enable(dsi->clk_lp);
- if (err < 0) {
- dev_err(dev, "cannot enable low-power clock: %d\n", err);
- goto disable_clk;
- }
-
- usleep_range(1000, 2000);
-
- if (dsi->rst) {
- err = reset_control_deassert(dsi->rst);
- if (err < 0) {
- dev_err(dev, "cannot assert reset: %d\n", err);
- goto disable_clk_lp;
- }
- }
-
- return 0;
-
-disable_clk_lp:
- clk_disable_unprepare(dsi->clk_lp);
-disable_clk:
- clk_disable_unprepare(dsi->clk);
-disable_vdd:
- regulator_disable(dsi->vdd);
- return err;
-}
-#endif
-
-static const struct dev_pm_ops tegra_dsi_pm_ops = {
- SET_RUNTIME_PM_OPS(tegra_dsi_suspend, tegra_dsi_resume, NULL)
-};
-
static const struct of_device_id tegra_dsi_of_match[] = {
{ .compatible = "nvidia,tegra210-dsi", },
{ .compatible = "nvidia,tegra132-dsi", },
@@ -1680,7 +1702,6 @@ struct platform_driver tegra_dsi_driver = {
.driver = {
.name = "tegra-dsi",
.of_match_table = tegra_dsi_of_match,
- .pm = &tegra_dsi_pm_ops,
},
.probe = tegra_dsi_probe,
.remove = tegra_dsi_remove,
diff --git a/drivers/gpu/drm/tegra/falcon.c b/drivers/gpu/drm/tegra/falcon.c
index f49ad36e24db..56edef06c48e 100644
--- a/drivers/gpu/drm/tegra/falcon.c
+++ b/drivers/gpu/drm/tegra/falcon.c
@@ -58,32 +58,17 @@ static int falcon_copy_chunk(struct falcon *falcon,
static void falcon_copy_firmware_image(struct falcon *falcon,
const struct firmware *firmware)
{
- u32 *firmware_vaddr = falcon->firmware.vaddr;
- dma_addr_t daddr;
+ u32 *virt = falcon->firmware.virt;
size_t i;
- int err;
/* copy the whole thing taking into account endianness */
for (i = 0; i < firmware->size / sizeof(u32); i++)
- firmware_vaddr[i] = le32_to_cpu(((u32 *)firmware->data)[i]);
-
- /* ensure that caches are flushed and falcon can see the firmware */
- daddr = dma_map_single(falcon->dev, firmware_vaddr,
- falcon->firmware.size, DMA_TO_DEVICE);
- err = dma_mapping_error(falcon->dev, daddr);
- if (err) {
- dev_err(falcon->dev, "failed to map firmware: %d\n", err);
- return;
- }
- dma_sync_single_for_device(falcon->dev, daddr,
- falcon->firmware.size, DMA_TO_DEVICE);
- dma_unmap_single(falcon->dev, daddr, falcon->firmware.size,
- DMA_TO_DEVICE);
+ virt[i] = le32_to_cpu(((u32 *)firmware->data)[i]);
}
static int falcon_parse_firmware_image(struct falcon *falcon)
{
- struct falcon_fw_bin_header_v1 *bin = (void *)falcon->firmware.vaddr;
+ struct falcon_fw_bin_header_v1 *bin = (void *)falcon->firmware.virt;
struct falcon_fw_os_header_v1 *os;
/* endian problems would show up right here */
@@ -104,7 +89,7 @@ static int falcon_parse_firmware_image(struct falcon *falcon)
return -EINVAL;
}
- os = falcon->firmware.vaddr + bin->os_header_offset;
+ os = falcon->firmware.virt + bin->os_header_offset;
falcon->firmware.bin_data.size = bin->os_size;
falcon->firmware.bin_data.offset = bin->os_data_offset;
@@ -125,6 +110,8 @@ int falcon_read_firmware(struct falcon *falcon, const char *name)
if (err < 0)
return err;
+ falcon->firmware.size = falcon->firmware.firmware->size;
+
return 0;
}
@@ -133,16 +120,6 @@ int falcon_load_firmware(struct falcon *falcon)
const struct firmware *firmware = falcon->firmware.firmware;
int err;
- falcon->firmware.size = firmware->size;
-
- /* allocate iova space for the firmware */
- falcon->firmware.vaddr = falcon->ops->alloc(falcon, firmware->size,
- &falcon->firmware.paddr);
- if (IS_ERR(falcon->firmware.vaddr)) {
- dev_err(falcon->dev, "DMA memory mapping failed\n");
- return PTR_ERR(falcon->firmware.vaddr);
- }
-
/* copy firmware image into local area. this also ensures endianness */
falcon_copy_firmware_image(falcon, firmware);
@@ -150,45 +127,26 @@ int falcon_load_firmware(struct falcon *falcon)
err = falcon_parse_firmware_image(falcon);
if (err < 0) {
dev_err(falcon->dev, "failed to parse firmware image\n");
- goto err_setup_firmware_image;
+ return err;
}
release_firmware(firmware);
falcon->firmware.firmware = NULL;
return 0;
-
-err_setup_firmware_image:
- falcon->ops->free(falcon, falcon->firmware.size,
- falcon->firmware.paddr, falcon->firmware.vaddr);
-
- return err;
}
int falcon_init(struct falcon *falcon)
{
- /* check mandatory ops */
- if (!falcon->ops || !falcon->ops->alloc || !falcon->ops->free)
- return -EINVAL;
-
- falcon->firmware.vaddr = NULL;
+ falcon->firmware.virt = NULL;
return 0;
}
void falcon_exit(struct falcon *falcon)
{
- if (falcon->firmware.firmware) {
+ if (falcon->firmware.firmware)
release_firmware(falcon->firmware.firmware);
- falcon->firmware.firmware = NULL;
- }
-
- if (falcon->firmware.vaddr) {
- falcon->ops->free(falcon, falcon->firmware.size,
- falcon->firmware.paddr,
- falcon->firmware.vaddr);
- falcon->firmware.vaddr = NULL;
- }
}
int falcon_boot(struct falcon *falcon)
@@ -197,7 +155,7 @@ int falcon_boot(struct falcon *falcon)
u32 value;
int err;
- if (!falcon->firmware.vaddr)
+ if (!falcon->firmware.virt)
return -EINVAL;
err = readl_poll_timeout(falcon->regs + FALCON_DMACTL, value,
@@ -210,7 +168,7 @@ int falcon_boot(struct falcon *falcon)
falcon_writel(falcon, 0, FALCON_DMACTL);
/* setup the address of the binary data so Falcon can access it later */
- falcon_writel(falcon, (falcon->firmware.paddr +
+ falcon_writel(falcon, (falcon->firmware.iova +
falcon->firmware.bin_data.offset) >> 8,
FALCON_DMATRFBASE);
diff --git a/drivers/gpu/drm/tegra/falcon.h b/drivers/gpu/drm/tegra/falcon.h
index 3d1243217410..c56ee32d92ee 100644
--- a/drivers/gpu/drm/tegra/falcon.h
+++ b/drivers/gpu/drm/tegra/falcon.h
@@ -74,15 +74,6 @@ struct falcon_fw_os_header_v1 {
u32 data_size;
};
-struct falcon;
-
-struct falcon_ops {
- void *(*alloc)(struct falcon *falcon, size_t size,
- dma_addr_t *paddr);
- void (*free)(struct falcon *falcon, size_t size,
- dma_addr_t paddr, void *vaddr);
-};
-
struct falcon_firmware_section {
unsigned long offset;
size_t size;
@@ -93,8 +84,9 @@ struct falcon_firmware {
const struct firmware *firmware;
/* Raw firmware data */
- dma_addr_t paddr;
- void *vaddr;
+ dma_addr_t iova;
+ dma_addr_t phys;
+ void *virt;
size_t size;
/* Parsed firmware information */
@@ -107,8 +99,6 @@ struct falcon {
/* Set by falcon client */
struct device *dev;
void __iomem *regs;
- const struct falcon_ops *ops;
- void *data;
struct falcon_firmware firmware;
};
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index 888ed0d74ccd..84f0e01e3428 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -9,11 +9,13 @@
#include <linux/console.h>
-#include "drm.h"
-#include "gem.h"
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_modeset_helper.h>
+#include "drm.h"
+#include "gem.h"
+
#ifdef CONFIG_DRM_FBDEV_EMULATION
static inline struct tegra_fbdev *to_tegra_fbdev(struct drm_fb_helper *helper)
{
@@ -190,7 +192,7 @@ static int tegra_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
return __tegra_gem_mmap(&bo->gem, vma);
}
-static struct fb_ops tegra_fb_ops = {
+static const struct fb_ops tegra_fb_ops = {
.owner = THIS_MODULE,
DRM_FB_HELPER_DEFAULT_OPS,
.fb_fillrect = drm_fb_helper_sys_fillrect,
@@ -267,10 +269,10 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
}
}
- drm->mode_config.fb_base = (resource_size_t)bo->paddr;
+ drm->mode_config.fb_base = (resource_size_t)bo->iova;
info->screen_base = (void __iomem *)bo->vaddr + offset;
info->screen_size = size;
- info->fix.smem_start = (unsigned long)(bo->paddr + offset);
+ info->fix.smem_start = (unsigned long)(bo->iova + offset);
info->fix.smem_len = size;
return 0;
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index df53a46285a3..623768100c6a 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -12,6 +12,9 @@
#include <linux/dma-buf.h>
#include <linux/iommu.h>
+
+#include <drm/drm_drv.h>
+#include <drm/drm_prime.h>
#include <drm/tegra_drm.h>
#include "drm.h"
@@ -24,17 +27,106 @@ static void tegra_bo_put(struct host1x_bo *bo)
drm_gem_object_put_unlocked(&obj->gem);
}
-static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt)
+/* XXX move this into lib/scatterlist.c? */
+static int sg_alloc_table_from_sg(struct sg_table *sgt, struct scatterlist *sg,
+ unsigned int nents, gfp_t gfp_mask)
+{
+ struct scatterlist *dst;
+ unsigned int i;
+ int err;
+
+ err = sg_alloc_table(sgt, nents, gfp_mask);
+ if (err < 0)
+ return err;
+
+ dst = sgt->sgl;
+
+ for (i = 0; i < nents; i++) {
+ sg_set_page(dst, sg_page(sg), sg->length, 0);
+ dst = sg_next(dst);
+ sg = sg_next(sg);
+ }
+
+ return 0;
+}
+
+static struct sg_table *tegra_bo_pin(struct device *dev, struct host1x_bo *bo,
+ dma_addr_t *phys)
{
struct tegra_bo *obj = host1x_to_tegra_bo(bo);
+ struct sg_table *sgt;
+ int err;
- *sgt = obj->sgt;
+ /*
+ * If we've manually mapped the buffer object through the IOMMU, make
+ * sure to return the IOVA address of our mapping.
+ *
+ * Similarly, for buffers that have been allocated by the DMA API the
+ * physical address can be used for devices that are not attached to
+ * an IOMMU. For these devices, callers must pass a valid pointer via
+ * the @phys argument.
+ *
+ * Imported buffers were also already mapped at import time, so the
+ * existing mapping can be reused.
+ */
+ if (phys) {
+ *phys = obj->iova;
+ return NULL;
+ }
+
+ /*
+ * If we don't have a mapping for this buffer yet, return an SG table
+ * so that host1x can do the mapping for us via the DMA API.
+ */
+ sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt)
+ return ERR_PTR(-ENOMEM);
- return obj->paddr;
+ if (obj->pages) {
+ /*
+ * If the buffer object was allocated from the explicit IOMMU
+ * API code paths, construct an SG table from the pages.
+ */
+ err = sg_alloc_table_from_pages(sgt, obj->pages, obj->num_pages,
+ 0, obj->gem.size, GFP_KERNEL);
+ if (err < 0)
+ goto free;
+ } else if (obj->sgt) {
+ /*
+ * If the buffer object already has an SG table but no pages
+ * were allocated for it, it means the buffer was imported and
+ * the SG table needs to be copied to avoid overwriting any
+ * other potential users of the original SG table.
+ */
+ err = sg_alloc_table_from_sg(sgt, obj->sgt->sgl, obj->sgt->nents,
+ GFP_KERNEL);
+ if (err < 0)
+ goto free;
+ } else {
+ /*
+ * If the buffer object had no pages allocated and if it was
+ * not imported, it had to be allocated with the DMA API, so
+ * the DMA API helper can be used.
+ */
+ err = dma_get_sgtable(dev, sgt, obj->vaddr, obj->iova,
+ obj->gem.size);
+ if (err < 0)
+ goto free;
+ }
+
+ return sgt;
+
+free:
+ kfree(sgt);
+ return ERR_PTR(err);
}
-static void tegra_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
+static void tegra_bo_unpin(struct device *dev, struct sg_table *sgt)
{
+ if (sgt) {
+ sg_free_table(sgt);
+ kfree(sgt);
+ }
}
static void *tegra_bo_mmap(struct host1x_bo *bo)
@@ -62,32 +154,6 @@ static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
vunmap(addr);
}
-static void *tegra_bo_kmap(struct host1x_bo *bo, unsigned int page)
-{
- struct tegra_bo *obj = host1x_to_tegra_bo(bo);
-
- if (obj->vaddr)
- return obj->vaddr + page * PAGE_SIZE;
- else if (obj->gem.import_attach)
- return dma_buf_kmap(obj->gem.import_attach->dmabuf, page);
- else
- return vmap(obj->pages + page, 1, VM_MAP,
- pgprot_writecombine(PAGE_KERNEL));
-}
-
-static void tegra_bo_kunmap(struct host1x_bo *bo, unsigned int page,
- void *addr)
-{
- struct tegra_bo *obj = host1x_to_tegra_bo(bo);
-
- if (obj->vaddr)
- return;
- else if (obj->gem.import_attach)
- dma_buf_kunmap(obj->gem.import_attach->dmabuf, page, addr);
- else
- vunmap(addr);
-}
-
static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
{
struct tegra_bo *obj = host1x_to_tegra_bo(bo);
@@ -104,8 +170,6 @@ static const struct host1x_bo_ops tegra_bo_ops = {
.unpin = tegra_bo_unpin,
.mmap = tegra_bo_mmap,
.munmap = tegra_bo_munmap,
- .kmap = tegra_bo_kmap,
- .kunmap = tegra_bo_kunmap,
};
static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
@@ -130,9 +194,9 @@ static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
goto unlock;
}
- bo->paddr = bo->mm->start;
+ bo->iova = bo->mm->start;
- bo->size = iommu_map_sg(tegra->domain, bo->paddr, bo->sgt->sgl,
+ bo->size = iommu_map_sg(tegra->domain, bo->iova, bo->sgt->sgl,
bo->sgt->nents, prot);
if (!bo->size) {
dev_err(tegra->drm->dev, "failed to map buffer\n");
@@ -158,7 +222,7 @@ static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
return 0;
mutex_lock(&tegra->mm_lock);
- iommu_unmap(tegra->domain, bo->paddr, bo->size);
+ iommu_unmap(tegra->domain, bo->iova, bo->size);
drm_mm_remove_node(bo->mm);
mutex_unlock(&tegra->mm_lock);
@@ -206,7 +270,7 @@ static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
sg_free_table(bo->sgt);
kfree(bo->sgt);
} else if (bo->vaddr) {
- dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->paddr);
+ dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->iova);
}
}
@@ -261,7 +325,7 @@ static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
} else {
size_t size = bo->gem.size;
- bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->paddr,
+ bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->iova,
GFP_KERNEL | __GFP_NOWARN);
if (!bo->vaddr) {
dev_err(drm->dev,
@@ -356,13 +420,6 @@ static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
err = tegra_bo_iommu_map(tegra, bo);
if (err < 0)
goto detach;
- } else {
- if (bo->sgt->nents > 1) {
- err = -EINVAL;
- goto detach;
- }
-
- bo->paddr = sg_dma_address(bo->sgt->sgl);
}
bo->gem.import_attach = attach;
@@ -458,7 +515,7 @@ int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma)
vma->vm_flags &= ~VM_PFNMAP;
vma->vm_pgoff = 0;
- err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->paddr,
+ err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->iova,
gem->size);
if (err < 0) {
drm_gem_vm_close(vma);
@@ -505,25 +562,18 @@ tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
return NULL;
if (bo->pages) {
- struct scatterlist *sg;
- unsigned int i;
-
- if (sg_alloc_table(sgt, bo->num_pages, GFP_KERNEL))
- goto free;
-
- for_each_sg(sgt->sgl, sg, bo->num_pages, i)
- sg_set_page(sg, bo->pages[i], PAGE_SIZE, 0);
-
- if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
+ if (sg_alloc_table_from_pages(sgt, bo->pages, bo->num_pages,
+ 0, gem->size, GFP_KERNEL) < 0)
goto free;
} else {
- if (sg_alloc_table(sgt, 1, GFP_KERNEL))
+ if (dma_get_sgtable(attach->dev, sgt, bo->vaddr, bo->iova,
+ gem->size) < 0)
goto free;
-
- sg_dma_address(sgt->sgl) = bo->paddr;
- sg_dma_len(sgt->sgl) = gem->size;
}
+ if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
+ goto free;
+
return sgt;
free:
@@ -579,16 +629,6 @@ static int tegra_gem_prime_end_cpu_access(struct dma_buf *buf,
return 0;
}
-static void *tegra_gem_prime_kmap(struct dma_buf *buf, unsigned long page)
-{
- return NULL;
-}
-
-static void tegra_gem_prime_kunmap(struct dma_buf *buf, unsigned long page,
- void *addr)
-{
-}
-
static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
{
struct drm_gem_object *gem = buf->priv;
@@ -619,27 +659,24 @@ static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
.release = tegra_gem_prime_release,
.begin_cpu_access = tegra_gem_prime_begin_cpu_access,
.end_cpu_access = tegra_gem_prime_end_cpu_access,
- .map = tegra_gem_prime_kmap,
- .unmap = tegra_gem_prime_kunmap,
.mmap = tegra_gem_prime_mmap,
.vmap = tegra_gem_prime_vmap,
.vunmap = tegra_gem_prime_vunmap,
};
-struct dma_buf *tegra_gem_prime_export(struct drm_device *drm,
- struct drm_gem_object *gem,
+struct dma_buf *tegra_gem_prime_export(struct drm_gem_object *gem,
int flags)
{
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
exp_info.exp_name = KBUILD_MODNAME;
- exp_info.owner = drm->driver->fops->owner;
+ exp_info.owner = gem->dev->driver->fops->owner;
exp_info.ops = &tegra_gem_prime_dmabuf_ops;
exp_info.size = gem->size;
exp_info.flags = flags;
exp_info.priv = gem;
- return drm_gem_dmabuf_export(drm, &exp_info);
+ return drm_gem_dmabuf_export(gem->dev, &exp_info);
}
struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
diff --git a/drivers/gpu/drm/tegra/gem.h b/drivers/gpu/drm/tegra/gem.h
index 413eae83ad81..fafb5724499b 100644
--- a/drivers/gpu/drm/tegra/gem.h
+++ b/drivers/gpu/drm/tegra/gem.h
@@ -11,7 +11,6 @@
#include <linux/host1x.h>
#include <drm/drm.h>
-#include <drm/drmP.h>
#include <drm/drm_gem.h>
#define TEGRA_BO_BOTTOM_UP (1 << 0)
@@ -32,7 +31,7 @@ struct tegra_bo {
struct host1x_bo base;
unsigned long flags;
struct sg_table *sgt;
- dma_addr_t paddr;
+ dma_addr_t iova;
void *vaddr;
struct drm_mm_node *mm;
@@ -70,8 +69,7 @@ extern const struct vm_operations_struct tegra_bo_vm_ops;
int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma);
int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma);
-struct dma_buf *tegra_gem_prime_export(struct drm_device *drm,
- struct drm_gem_object *gem,
+struct dma_buf *tegra_gem_prime_export(struct drm_gem_object *gem,
int flags);
struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
struct dma_buf *buf);
diff --git a/drivers/gpu/drm/tegra/gr2d.c b/drivers/gpu/drm/tegra/gr2d.c
index 8dbfb30344e7..48363f744bb9 100644
--- a/drivers/gpu/drm/tegra/gr2d.c
+++ b/drivers/gpu/drm/tegra/gr2d.c
@@ -5,6 +5,7 @@
#include <linux/clk.h>
#include <linux/iommu.h>
+#include <linux/module.h>
#include <linux/of_device.h>
#include "drm.h"
@@ -16,7 +17,6 @@ struct gr2d_soc {
};
struct gr2d {
- struct iommu_group *group;
struct tegra_drm_client client;
struct host1x_channel *channel;
struct clk *clk;
@@ -34,12 +34,12 @@ static inline struct gr2d *to_gr2d(struct tegra_drm_client *client)
static int gr2d_init(struct host1x_client *client)
{
struct tegra_drm_client *drm = host1x_to_drm_client(client);
- struct drm_device *dev = dev_get_drvdata(client->parent);
+ struct drm_device *dev = dev_get_drvdata(client->host);
unsigned long flags = HOST1X_SYNCPT_HAS_BASE;
struct gr2d *gr2d = to_gr2d(drm);
int err;
- gr2d->channel = host1x_channel_request(client->dev);
+ gr2d->channel = host1x_channel_request(client);
if (!gr2d->channel)
return -ENOMEM;
@@ -50,9 +50,8 @@ static int gr2d_init(struct host1x_client *client)
goto put;
}
- gr2d->group = host1x_client_iommu_attach(client, false);
- if (IS_ERR(gr2d->group)) {
- err = PTR_ERR(gr2d->group);
+ err = host1x_client_iommu_attach(client);
+ if (err < 0) {
dev_err(client->dev, "failed to attach to domain: %d\n", err);
goto free;
}
@@ -66,7 +65,7 @@ static int gr2d_init(struct host1x_client *client)
return 0;
detach:
- host1x_client_iommu_detach(client, gr2d->group);
+ host1x_client_iommu_detach(client);
free:
host1x_syncpt_free(client->syncpts[0]);
put:
@@ -77,7 +76,7 @@ put:
static int gr2d_exit(struct host1x_client *client)
{
struct tegra_drm_client *drm = host1x_to_drm_client(client);
- struct drm_device *dev = dev_get_drvdata(client->parent);
+ struct drm_device *dev = dev_get_drvdata(client->host);
struct tegra_drm *tegra = dev->dev_private;
struct gr2d *gr2d = to_gr2d(drm);
int err;
@@ -86,7 +85,7 @@ static int gr2d_exit(struct host1x_client *client)
if (err < 0)
return err;
- host1x_client_iommu_detach(client, gr2d->group);
+ host1x_client_iommu_detach(client);
host1x_syncpt_free(client->syncpts[0]);
host1x_channel_put(gr2d->channel);
diff --git a/drivers/gpu/drm/tegra/gr3d.c b/drivers/gpu/drm/tegra/gr3d.c
index 8b9a35b1cbb3..c0a528be0369 100644
--- a/drivers/gpu/drm/tegra/gr3d.c
+++ b/drivers/gpu/drm/tegra/gr3d.c
@@ -23,7 +23,6 @@ struct gr3d_soc {
};
struct gr3d {
- struct iommu_group *group;
struct tegra_drm_client client;
struct host1x_channel *channel;
struct clk *clk_secondary;
@@ -44,12 +43,12 @@ static inline struct gr3d *to_gr3d(struct tegra_drm_client *client)
static int gr3d_init(struct host1x_client *client)
{
struct tegra_drm_client *drm = host1x_to_drm_client(client);
- struct drm_device *dev = dev_get_drvdata(client->parent);
+ struct drm_device *dev = dev_get_drvdata(client->host);
unsigned long flags = HOST1X_SYNCPT_HAS_BASE;
struct gr3d *gr3d = to_gr3d(drm);
int err;
- gr3d->channel = host1x_channel_request(client->dev);
+ gr3d->channel = host1x_channel_request(client);
if (!gr3d->channel)
return -ENOMEM;
@@ -60,9 +59,8 @@ static int gr3d_init(struct host1x_client *client)
goto put;
}
- gr3d->group = host1x_client_iommu_attach(client, false);
- if (IS_ERR(gr3d->group)) {
- err = PTR_ERR(gr3d->group);
+ err = host1x_client_iommu_attach(client);
+ if (err < 0) {
dev_err(client->dev, "failed to attach to domain: %d\n", err);
goto free;
}
@@ -76,7 +74,7 @@ static int gr3d_init(struct host1x_client *client)
return 0;
detach:
- host1x_client_iommu_detach(client, gr3d->group);
+ host1x_client_iommu_detach(client);
free:
host1x_syncpt_free(client->syncpts[0]);
put:
@@ -87,7 +85,7 @@ put:
static int gr3d_exit(struct host1x_client *client)
{
struct tegra_drm_client *drm = host1x_to_drm_client(client);
- struct drm_device *dev = dev_get_drvdata(client->parent);
+ struct drm_device *dev = dev_get_drvdata(client->host);
struct gr3d *gr3d = to_gr3d(drm);
int err;
@@ -95,7 +93,7 @@ static int gr3d_exit(struct host1x_client *client)
if (err < 0)
return err;
- host1x_client_iommu_detach(client, gr3d->group);
+ host1x_client_iommu_detach(client);
host1x_syncpt_free(client->syncpts[0]);
host1x_channel_put(gr3d->channel);
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index 334c4d7d238b..6f117628f257 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -6,9 +6,11 @@
#include <linux/clk.h>
#include <linux/debugfs.h>
+#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/hdmi.h>
#include <linux/math64.h>
+#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
@@ -16,6 +18,9 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_debugfs.h>
+#include <drm/drm_file.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_probe_helper.h>
#include "hda.h"
@@ -1141,6 +1146,7 @@ static void tegra_hdmi_encoder_disable(struct drm_encoder *encoder)
struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
struct tegra_hdmi *hdmi = to_hdmi(output);
u32 value;
+ int err;
/*
* The following accesses registers of the display controller, so make
@@ -1166,7 +1172,9 @@ static void tegra_hdmi_encoder_disable(struct drm_encoder *encoder)
tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_INT_ENABLE);
tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_INT_MASK);
- pm_runtime_put(hdmi->dev);
+ err = host1x_client_suspend(&hdmi->client);
+ if (err < 0)
+ dev_err(hdmi->dev, "failed to suspend: %d\n", err);
}
static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder)
@@ -1181,7 +1189,11 @@ static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder)
u32 value;
int err;
- pm_runtime_get_sync(hdmi->dev);
+ err = host1x_client_resume(&hdmi->client);
+ if (err < 0) {
+ dev_err(hdmi->dev, "failed to resume: %d\n", err);
+ return;
+ }
/*
* Enable and unmask the HDA codec SCRATCH0 register interrupt. This
@@ -1419,15 +1431,16 @@ static const struct drm_encoder_helper_funcs tegra_hdmi_encoder_helper_funcs = {
static int tegra_hdmi_init(struct host1x_client *client)
{
- struct drm_device *drm = dev_get_drvdata(client->parent);
struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client);
+ struct drm_device *drm = dev_get_drvdata(client->host);
int err;
hdmi->output.dev = client->dev;
- drm_connector_init(drm, &hdmi->output.connector,
- &tegra_hdmi_connector_funcs,
- DRM_MODE_CONNECTOR_HDMIA);
+ drm_connector_init_with_ddc(drm, &hdmi->output.connector,
+ &tegra_hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA,
+ hdmi->output.ddc);
drm_connector_helper_add(&hdmi->output.connector,
&tegra_hdmi_connector_helper_funcs);
hdmi->output.connector.dpms = DRM_MODE_DPMS_OFF;
@@ -1484,9 +1497,66 @@ static int tegra_hdmi_exit(struct host1x_client *client)
return 0;
}
+static int tegra_hdmi_runtime_suspend(struct host1x_client *client)
+{
+ struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client);
+ struct device *dev = client->dev;
+ int err;
+
+ err = reset_control_assert(hdmi->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to assert reset: %d\n", err);
+ return err;
+ }
+
+ usleep_range(1000, 2000);
+
+ clk_disable_unprepare(hdmi->clk);
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+
+static int tegra_hdmi_runtime_resume(struct host1x_client *client)
+{
+ struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client);
+ struct device *dev = client->dev;
+ int err;
+
+ err = pm_runtime_get_sync(dev);
+ if (err < 0) {
+ dev_err(dev, "failed to get runtime PM: %d\n", err);
+ return err;
+ }
+
+ err = clk_prepare_enable(hdmi->clk);
+ if (err < 0) {
+ dev_err(dev, "failed to enable clock: %d\n", err);
+ goto put_rpm;
+ }
+
+ usleep_range(1000, 2000);
+
+ err = reset_control_deassert(hdmi->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to deassert reset: %d\n", err);
+ goto disable_clk;
+ }
+
+ return 0;
+
+disable_clk:
+ clk_disable_unprepare(hdmi->clk);
+put_rpm:
+ pm_runtime_put_sync(dev);
+ return err;
+}
+
static const struct host1x_client_ops hdmi_client_ops = {
.init = tegra_hdmi_init,
.exit = tegra_hdmi_exit,
+ .suspend = tegra_hdmi_runtime_suspend,
+ .resume = tegra_hdmi_runtime_resume,
};
static const struct tegra_hdmi_config tegra20_hdmi_config = {
@@ -1694,58 +1764,10 @@ static int tegra_hdmi_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static int tegra_hdmi_suspend(struct device *dev)
-{
- struct tegra_hdmi *hdmi = dev_get_drvdata(dev);
- int err;
-
- err = reset_control_assert(hdmi->rst);
- if (err < 0) {
- dev_err(dev, "failed to assert reset: %d\n", err);
- return err;
- }
-
- usleep_range(1000, 2000);
-
- clk_disable_unprepare(hdmi->clk);
-
- return 0;
-}
-
-static int tegra_hdmi_resume(struct device *dev)
-{
- struct tegra_hdmi *hdmi = dev_get_drvdata(dev);
- int err;
-
- err = clk_prepare_enable(hdmi->clk);
- if (err < 0) {
- dev_err(dev, "failed to enable clock: %d\n", err);
- return err;
- }
-
- usleep_range(1000, 2000);
-
- err = reset_control_deassert(hdmi->rst);
- if (err < 0) {
- dev_err(dev, "failed to deassert reset: %d\n", err);
- clk_disable_unprepare(hdmi->clk);
- return err;
- }
-
- return 0;
-}
-#endif
-
-static const struct dev_pm_ops tegra_hdmi_pm_ops = {
- SET_RUNTIME_PM_OPS(tegra_hdmi_suspend, tegra_hdmi_resume, NULL)
-};
-
struct platform_driver tegra_hdmi_driver = {
.driver = {
.name = "tegra-hdmi",
.of_match_table = tegra_hdmi_of_match,
- .pm = &tegra_hdmi_pm_ops,
},
.probe = tegra_hdmi_probe,
.remove = tegra_hdmi_remove,
diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c
index 92f202ec0577..8183e617bf6b 100644
--- a/drivers/gpu/drm/tegra/hub.c
+++ b/drivers/gpu/drm/tegra/hub.c
@@ -4,6 +4,7 @@
*/
#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/host1x.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -13,9 +14,9 @@
#include <linux/pm_runtime.h>
#include <linux/reset.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_probe_helper.h>
#include "drm.h"
@@ -94,17 +95,25 @@ static inline void tegra_plane_writel(struct tegra_plane *plane, u32 value,
static int tegra_windowgroup_enable(struct tegra_windowgroup *wgrp)
{
+ int err = 0;
+
mutex_lock(&wgrp->lock);
if (wgrp->usecount == 0) {
- pm_runtime_get_sync(wgrp->parent);
+ err = host1x_client_resume(wgrp->parent);
+ if (err < 0) {
+ dev_err(wgrp->parent->dev, "failed to resume: %d\n", err);
+ goto unlock;
+ }
+
reset_control_deassert(wgrp->rst);
}
wgrp->usecount++;
- mutex_unlock(&wgrp->lock);
- return 0;
+unlock:
+ mutex_unlock(&wgrp->lock);
+ return err;
}
static void tegra_windowgroup_disable(struct tegra_windowgroup *wgrp)
@@ -120,7 +129,7 @@ static void tegra_windowgroup_disable(struct tegra_windowgroup *wgrp)
wgrp->index);
}
- pm_runtime_put(wgrp->parent);
+ host1x_client_suspend(wgrp->parent);
}
wgrp->usecount--;
@@ -378,6 +387,7 @@ static void tegra_shared_plane_atomic_disable(struct drm_plane *plane,
struct tegra_plane *p = to_tegra_plane(plane);
struct tegra_dc *dc;
u32 value;
+ int err;
/* rien ne va plus */
if (!old_state || !old_state->crtc)
@@ -385,6 +395,12 @@ static void tegra_shared_plane_atomic_disable(struct drm_plane *plane,
dc = to_tegra_dc(old_state->crtc);
+ err = host1x_client_resume(&dc->client);
+ if (err < 0) {
+ dev_err(dc->dev, "failed to resume: %d\n", err);
+ return;
+ }
+
/*
* XXX Legacy helpers seem to sometimes call ->atomic_disable() even
* on planes that are already disabled. Make sure we fallback to the
@@ -393,15 +409,13 @@ static void tegra_shared_plane_atomic_disable(struct drm_plane *plane,
if (WARN_ON(p->dc == NULL))
p->dc = dc;
- pm_runtime_get_sync(dc->dev);
-
value = tegra_plane_readl(p, DC_WIN_WIN_OPTIONS);
value &= ~WIN_ENABLE;
tegra_plane_writel(p, value, DC_WIN_WIN_OPTIONS);
tegra_dc_remove_shared_plane(dc, p);
- pm_runtime_put(dc->dev);
+ host1x_client_suspend(&dc->client);
}
static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
@@ -412,9 +426,9 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
unsigned int zpos = plane->state->normalized_zpos;
struct drm_framebuffer *fb = plane->state->fb;
struct tegra_plane *p = to_tegra_plane(plane);
- struct tegra_bo *bo;
dma_addr_t base;
u32 value;
+ int err;
/* rien ne va plus */
if (!plane->state->crtc || !plane->state->fb)
@@ -425,7 +439,11 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
return;
}
- pm_runtime_get_sync(dc->dev);
+ err = host1x_client_resume(&dc->client);
+ if (err < 0) {
+ dev_err(dc->dev, "failed to resume: %d\n", err);
+ return;
+ }
tegra_dc_assign_shared_plane(dc, p);
@@ -455,8 +473,7 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
/* disable compression */
tegra_plane_writel(p, 0, DC_WINBUF_CDE_CONTROL);
- bo = tegra_fb_get_plane(fb, 0);
- base = bo->paddr;
+ base = state->iova[0] + fb->offsets[0];
tegra_plane_writel(p, state->format, DC_WIN_COLOR_DEPTH);
tegra_plane_writel(p, 0, DC_WIN_PRECOMP_WGRP_PARAMS);
@@ -516,10 +533,12 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
value &= ~CONTROL_CSC_ENABLE;
tegra_plane_writel(p, value, DC_WIN_WINDOW_SET_CONTROL);
- pm_runtime_put(dc->dev);
+ host1x_client_suspend(&dc->client);
}
static const struct drm_plane_helper_funcs tegra_shared_plane_helper_funcs = {
+ .prepare_fb = tegra_plane_prepare_fb,
+ .cleanup_fb = tegra_plane_cleanup_fb,
.atomic_check = tegra_shared_plane_atomic_check,
.atomic_update = tegra_shared_plane_atomic_update,
.atomic_disable = tegra_shared_plane_atomic_disable,
@@ -550,7 +569,7 @@ struct drm_plane *tegra_shared_plane_create(struct drm_device *drm,
plane->base.index = index;
plane->wgrp = &hub->wgrps[wgrp];
- plane->wgrp->parent = dc->dev;
+ plane->wgrp->parent = &dc->client;
p = &plane->base.base;
@@ -604,11 +623,8 @@ static struct tegra_display_hub_state *
tegra_display_hub_get_state(struct tegra_display_hub *hub,
struct drm_atomic_state *state)
{
- struct drm_device *drm = dev_get_drvdata(hub->client.parent);
struct drm_private_state *priv;
- WARN_ON(!drm_modeset_is_locked(&drm->mode_config.connection_mutex));
-
priv = drm_atomic_get_private_obj_state(state, &hub->base);
if (IS_ERR(priv))
return ERR_CAST(priv);
@@ -658,8 +674,13 @@ int tegra_display_hub_atomic_check(struct drm_device *drm,
static void tegra_display_hub_update(struct tegra_dc *dc)
{
u32 value;
+ int err;
- pm_runtime_get_sync(dc->dev);
+ err = host1x_client_resume(&dc->client);
+ if (err < 0) {
+ dev_err(dc->dev, "failed to resume: %d\n", err);
+ return;
+ }
value = tegra_dc_readl(dc, DC_CMD_IHUB_COMMON_MISC_CTL);
value &= ~LATENCY_EVENT;
@@ -674,7 +695,7 @@ static void tegra_display_hub_update(struct tegra_dc *dc)
tegra_dc_writel(dc, COMMON_ACTREQ, DC_CMD_STATE_CONTROL);
tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
- pm_runtime_put(dc->dev);
+ host1x_client_suspend(&dc->client);
}
void tegra_display_hub_atomic_commit(struct drm_device *drm,
@@ -707,7 +728,7 @@ void tegra_display_hub_atomic_commit(struct drm_device *drm,
static int tegra_display_hub_init(struct host1x_client *client)
{
struct tegra_display_hub *hub = to_tegra_display_hub(client);
- struct drm_device *drm = dev_get_drvdata(client->parent);
+ struct drm_device *drm = dev_get_drvdata(client->host);
struct tegra_drm *tegra = drm->dev_private;
struct tegra_display_hub_state *state;
@@ -725,7 +746,7 @@ static int tegra_display_hub_init(struct host1x_client *client)
static int tegra_display_hub_exit(struct host1x_client *client)
{
- struct drm_device *drm = dev_get_drvdata(client->parent);
+ struct drm_device *drm = dev_get_drvdata(client->host);
struct tegra_drm *tegra = drm->dev_private;
drm_atomic_private_obj_fini(&tegra->hub->base);
@@ -734,9 +755,85 @@ static int tegra_display_hub_exit(struct host1x_client *client)
return 0;
}
+static int tegra_display_hub_runtime_suspend(struct host1x_client *client)
+{
+ struct tegra_display_hub *hub = to_tegra_display_hub(client);
+ struct device *dev = client->dev;
+ unsigned int i = hub->num_heads;
+ int err;
+
+ err = reset_control_assert(hub->rst);
+ if (err < 0)
+ return err;
+
+ while (i--)
+ clk_disable_unprepare(hub->clk_heads[i]);
+
+ clk_disable_unprepare(hub->clk_hub);
+ clk_disable_unprepare(hub->clk_dsc);
+ clk_disable_unprepare(hub->clk_disp);
+
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+
+static int tegra_display_hub_runtime_resume(struct host1x_client *client)
+{
+ struct tegra_display_hub *hub = to_tegra_display_hub(client);
+ struct device *dev = client->dev;
+ unsigned int i;
+ int err;
+
+ err = pm_runtime_get_sync(dev);
+ if (err < 0) {
+ dev_err(dev, "failed to get runtime PM: %d\n", err);
+ return err;
+ }
+
+ err = clk_prepare_enable(hub->clk_disp);
+ if (err < 0)
+ goto put_rpm;
+
+ err = clk_prepare_enable(hub->clk_dsc);
+ if (err < 0)
+ goto disable_disp;
+
+ err = clk_prepare_enable(hub->clk_hub);
+ if (err < 0)
+ goto disable_dsc;
+
+ for (i = 0; i < hub->num_heads; i++) {
+ err = clk_prepare_enable(hub->clk_heads[i]);
+ if (err < 0)
+ goto disable_heads;
+ }
+
+ err = reset_control_deassert(hub->rst);
+ if (err < 0)
+ goto disable_heads;
+
+ return 0;
+
+disable_heads:
+ while (i--)
+ clk_disable_unprepare(hub->clk_heads[i]);
+
+ clk_disable_unprepare(hub->clk_hub);
+disable_dsc:
+ clk_disable_unprepare(hub->clk_dsc);
+disable_disp:
+ clk_disable_unprepare(hub->clk_disp);
+put_rpm:
+ pm_runtime_put_sync(dev);
+ return err;
+}
+
static const struct host1x_client_ops tegra_display_hub_ops = {
.init = tegra_display_hub_init,
.exit = tegra_display_hub_exit,
+ .suspend = tegra_display_hub_runtime_suspend,
+ .resume = tegra_display_hub_runtime_resume,
};
static int tegra_display_hub_probe(struct platform_device *pdev)
@@ -853,6 +950,7 @@ static int tegra_display_hub_probe(struct platform_device *pdev)
static int tegra_display_hub_remove(struct platform_device *pdev)
{
struct tegra_display_hub *hub = platform_get_drvdata(pdev);
+ unsigned int i;
int err;
err = host1x_client_unregister(&hub->client);
@@ -861,78 +959,17 @@ static int tegra_display_hub_remove(struct platform_device *pdev)
err);
}
- pm_runtime_disable(&pdev->dev);
-
- return err;
-}
-
-static int __maybe_unused tegra_display_hub_suspend(struct device *dev)
-{
- struct tegra_display_hub *hub = dev_get_drvdata(dev);
- unsigned int i = hub->num_heads;
- int err;
-
- err = reset_control_assert(hub->rst);
- if (err < 0)
- return err;
-
- while (i--)
- clk_disable_unprepare(hub->clk_heads[i]);
-
- clk_disable_unprepare(hub->clk_hub);
- clk_disable_unprepare(hub->clk_dsc);
- clk_disable_unprepare(hub->clk_disp);
-
- return 0;
-}
-
-static int __maybe_unused tegra_display_hub_resume(struct device *dev)
-{
- struct tegra_display_hub *hub = dev_get_drvdata(dev);
- unsigned int i;
- int err;
-
- err = clk_prepare_enable(hub->clk_disp);
- if (err < 0)
- return err;
-
- err = clk_prepare_enable(hub->clk_dsc);
- if (err < 0)
- goto disable_disp;
-
- err = clk_prepare_enable(hub->clk_hub);
- if (err < 0)
- goto disable_dsc;
+ for (i = 0; i < hub->soc->num_wgrps; i++) {
+ struct tegra_windowgroup *wgrp = &hub->wgrps[i];
- for (i = 0; i < hub->num_heads; i++) {
- err = clk_prepare_enable(hub->clk_heads[i]);
- if (err < 0)
- goto disable_heads;
+ mutex_destroy(&wgrp->lock);
}
- err = reset_control_deassert(hub->rst);
- if (err < 0)
- goto disable_heads;
-
- return 0;
-
-disable_heads:
- while (i--)
- clk_disable_unprepare(hub->clk_heads[i]);
+ pm_runtime_disable(&pdev->dev);
- clk_disable_unprepare(hub->clk_hub);
-disable_dsc:
- clk_disable_unprepare(hub->clk_dsc);
-disable_disp:
- clk_disable_unprepare(hub->clk_disp);
return err;
}
-static const struct dev_pm_ops tegra_display_hub_pm_ops = {
- SET_RUNTIME_PM_OPS(tegra_display_hub_suspend,
- tegra_display_hub_resume, NULL)
-};
-
static const struct tegra_display_hub_soc tegra186_display_hub = {
.num_wgrps = 6,
.supports_dsc = true,
@@ -960,7 +997,6 @@ struct platform_driver tegra_display_hub_driver = {
.driver = {
.name = "tegra-display-hub",
.of_match_table = tegra_display_hub_of_match,
- .pm = &tegra_display_hub_pm_ops,
},
.probe = tegra_display_hub_probe,
.remove = tegra_display_hub_remove,
diff --git a/drivers/gpu/drm/tegra/hub.h b/drivers/gpu/drm/tegra/hub.h
index 41541e261c91..3efa1be07ff8 100644
--- a/drivers/gpu/drm/tegra/hub.h
+++ b/drivers/gpu/drm/tegra/hub.h
@@ -6,7 +6,6 @@
#ifndef TEGRA_HUB_H
#define TEGRA_HUB_H 1
-#include <drm/drmP.h>
#include <drm/drm_plane.h>
#include "plane.h"
@@ -18,7 +17,7 @@ struct tegra_windowgroup {
struct mutex lock;
unsigned int index;
- struct device *parent;
+ struct host1x_client *parent;
struct reset_control *rst;
};
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
index bdcaa4c7168c..a264259b97a2 100644
--- a/drivers/gpu/drm/tegra/output.c
+++ b/drivers/gpu/drm/tegra/output.c
@@ -23,7 +23,7 @@ int tegra_output_connector_get_modes(struct drm_connector *connector)
* ignore any other means of obtaining a mode.
*/
if (output->panel) {
- err = output->panel->funcs->get_modes(output->panel);
+ err = drm_panel_get_modes(output->panel, connector);
if (err > 0)
return err;
}
@@ -70,6 +70,11 @@ tegra_output_connector_detect(struct drm_connector *connector, bool force)
void tegra_output_connector_destroy(struct drm_connector *connector)
{
+ struct tegra_output *output = connector_to_output(connector);
+
+ if (output->cec)
+ cec_notifier_conn_unregister(output->cec);
+
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
}
@@ -163,18 +168,11 @@ int tegra_output_probe(struct tegra_output *output)
disable_irq(output->hpd_irq);
}
- output->cec = cec_notifier_get(output->dev);
- if (!output->cec)
- return -ENOMEM;
-
return 0;
}
void tegra_output_remove(struct tegra_output *output)
{
- if (output->cec)
- cec_notifier_put(output->cec);
-
if (output->hpd_gpio)
free_irq(output->hpd_irq, output);
@@ -184,6 +182,7 @@ void tegra_output_remove(struct tegra_output *output)
int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
{
+ int connector_type;
int err;
if (output->panel) {
@@ -199,6 +198,21 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
if (output->hpd_gpio)
enable_irq(output->hpd_irq);
+ connector_type = output->connector.connector_type;
+ /*
+ * Create a CEC notifier for HDMI connector.
+ */
+ if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
+ connector_type == DRM_MODE_CONNECTOR_HDMIB) {
+ struct cec_connector_info conn_info;
+
+ cec_fill_conn_info_from_drm(&conn_info, &output->connector);
+ output->cec = cec_notifier_conn_register(output->dev, NULL,
+ &conn_info);
+ if (!output->cec)
+ return -ENOMEM;
+ }
+
return 0;
}
@@ -236,3 +250,19 @@ void tegra_output_find_possible_crtcs(struct tegra_output *output,
output->encoder.possible_crtcs = mask;
}
+
+int tegra_output_suspend(struct tegra_output *output)
+{
+ if (output->hpd_irq)
+ disable_irq(output->hpd_irq);
+
+ return 0;
+}
+
+int tegra_output_resume(struct tegra_output *output)
+{
+ if (output->hpd_irq)
+ enable_irq(output->hpd_irq);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/tegra/plane.c b/drivers/gpu/drm/tegra/plane.c
index df80ca07e46e..9ccfb56e9b01 100644
--- a/drivers/gpu/drm/tegra/plane.c
+++ b/drivers/gpu/drm/tegra/plane.c
@@ -3,8 +3,12 @@
* Copyright (C) 2017 NVIDIA CORPORATION. All rights reserved.
*/
+#include <linux/iommu.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
#include "dc.h"
@@ -22,6 +26,7 @@ static void tegra_plane_reset(struct drm_plane *plane)
{
struct tegra_plane *p = to_tegra_plane(plane);
struct tegra_plane_state *state;
+ unsigned int i;
if (plane->state)
__drm_atomic_helper_plane_destroy_state(plane->state);
@@ -35,6 +40,9 @@ static void tegra_plane_reset(struct drm_plane *plane)
plane->state->plane = plane;
plane->state->zpos = p->index;
plane->state->normalized_zpos = p->index;
+
+ for (i = 0; i < 3; i++)
+ state->iova[i] = DMA_MAPPING_ERROR;
}
}
@@ -59,6 +67,11 @@ tegra_plane_atomic_duplicate_state(struct drm_plane *plane)
for (i = 0; i < 2; i++)
copy->blending[i] = state->blending[i];
+ for (i = 0; i < 3; i++) {
+ copy->iova[i] = DMA_MAPPING_ERROR;
+ copy->sgt[i] = NULL;
+ }
+
return &copy->base;
}
@@ -94,6 +107,115 @@ const struct drm_plane_funcs tegra_plane_funcs = {
.format_mod_supported = tegra_plane_format_mod_supported,
};
+static int tegra_dc_pin(struct tegra_dc *dc, struct tegra_plane_state *state)
+{
+ struct iommu_domain *domain = iommu_get_domain_for_dev(dc->dev);
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < state->base.fb->format->num_planes; i++) {
+ struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i);
+ dma_addr_t phys_addr, *phys;
+ struct sg_table *sgt;
+
+ if (!domain || dc->client.group)
+ phys = &phys_addr;
+ else
+ phys = NULL;
+
+ sgt = host1x_bo_pin(dc->dev, &bo->base, phys);
+ if (IS_ERR(sgt)) {
+ err = PTR_ERR(sgt);
+ goto unpin;
+ }
+
+ if (sgt) {
+ err = dma_map_sg(dc->dev, sgt->sgl, sgt->nents,
+ DMA_TO_DEVICE);
+ if (err == 0) {
+ err = -ENOMEM;
+ goto unpin;
+ }
+
+ /*
+ * The display controller needs contiguous memory, so
+ * fail if the buffer is discontiguous and we fail to
+ * map its SG table to a single contiguous chunk of
+ * I/O virtual memory.
+ */
+ if (err > 1) {
+ err = -EINVAL;
+ goto unpin;
+ }
+
+ state->iova[i] = sg_dma_address(sgt->sgl);
+ state->sgt[i] = sgt;
+ } else {
+ state->iova[i] = phys_addr;
+ }
+ }
+
+ return 0;
+
+unpin:
+ dev_err(dc->dev, "failed to map plane %u: %d\n", i, err);
+
+ while (i--) {
+ struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i);
+ struct sg_table *sgt = state->sgt[i];
+
+ if (sgt)
+ dma_unmap_sg(dc->dev, sgt->sgl, sgt->nents,
+ DMA_TO_DEVICE);
+
+ host1x_bo_unpin(dc->dev, &bo->base, sgt);
+ state->iova[i] = DMA_MAPPING_ERROR;
+ state->sgt[i] = NULL;
+ }
+
+ return err;
+}
+
+static void tegra_dc_unpin(struct tegra_dc *dc, struct tegra_plane_state *state)
+{
+ unsigned int i;
+
+ for (i = 0; i < state->base.fb->format->num_planes; i++) {
+ struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i);
+ struct sg_table *sgt = state->sgt[i];
+
+ if (sgt)
+ dma_unmap_sg(dc->dev, sgt->sgl, sgt->nents,
+ DMA_TO_DEVICE);
+
+ host1x_bo_unpin(dc->dev, &bo->base, sgt);
+ state->iova[i] = DMA_MAPPING_ERROR;
+ state->sgt[i] = NULL;
+ }
+}
+
+int tegra_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct tegra_dc *dc = to_tegra_dc(state->crtc);
+
+ if (!state->fb)
+ return 0;
+
+ drm_gem_fb_prepare_fb(plane, state);
+
+ return tegra_dc_pin(dc, to_tegra_plane_state(state));
+}
+
+void tegra_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct tegra_dc *dc = to_tegra_dc(state->crtc);
+
+ if (dc)
+ tegra_dc_unpin(dc, to_tegra_plane_state(state));
+}
+
int tegra_plane_state_add(struct tegra_plane *plane,
struct drm_plane_state *state)
{
diff --git a/drivers/gpu/drm/tegra/plane.h b/drivers/gpu/drm/tegra/plane.h
index 510c394e6d9a..a158a915109a 100644
--- a/drivers/gpu/drm/tegra/plane.h
+++ b/drivers/gpu/drm/tegra/plane.h
@@ -39,6 +39,9 @@ struct tegra_plane_legacy_blending_state {
struct tegra_plane_state {
struct drm_plane_state base;
+ struct sg_table *sgt[3];
+ dma_addr_t iova[3];
+
struct tegra_bo_tiling tiling;
u32 format;
u32 swap;
@@ -61,6 +64,11 @@ to_tegra_plane_state(struct drm_plane_state *state)
extern const struct drm_plane_funcs tegra_plane_funcs;
+int tegra_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *state);
+void tegra_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *state);
+
int tegra_plane_state_add(struct tegra_plane *plane,
struct drm_plane_state *state);
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 4ffe3794e6d3..81226a4953c1 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -8,6 +8,7 @@
#include <linux/debugfs.h>
#include <linux/gpio.h>
#include <linux/io.h>
+#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -17,11 +18,14 @@
#include <soc/tegra/pmc.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_debugfs.h>
#include <drm/drm_dp_helper.h>
+#include <drm/drm_file.h>
#include <drm/drm_panel.h>
#include <drm/drm_scdc_helper.h>
#include "dc.h"
+#include "dp.h"
#include "drm.h"
#include "hda.h"
#include "sor.h"
@@ -367,10 +371,11 @@ struct tegra_sor_regs {
};
struct tegra_sor_soc {
- bool supports_edp;
bool supports_lvds;
bool supports_hdmi;
bool supports_dp;
+ bool supports_audio;
+ bool supports_hdcp;
const struct tegra_sor_regs *regs;
bool has_nvdisplay;
@@ -379,6 +384,12 @@ struct tegra_sor_soc {
unsigned int num_settings;
const u8 *xbar_cfg;
+ const u8 *lane_map;
+
+ const u8 (*voltage_swing)[4][4];
+ const u8 (*pre_emphasis)[4][4];
+ const u8 (*post_cursor)[4][4];
+ const u8 (*tx_pu)[4][4];
};
struct tegra_sor;
@@ -387,6 +398,8 @@ struct tegra_sor_ops {
const char *name;
int (*probe)(struct tegra_sor *sor);
int (*remove)(struct tegra_sor *sor);
+ void (*audio_enable)(struct tegra_sor *sor);
+ void (*audio_disable)(struct tegra_sor *sor);
};
struct tegra_sor {
@@ -409,6 +422,7 @@ struct tegra_sor {
u8 xbar_cfg[5];
+ struct drm_dp_link link;
struct drm_dp_aux *aux;
struct drm_info_list *debugfs_files;
@@ -511,10 +525,19 @@ static inline struct tegra_clk_sor_pad *to_pad(struct clk_hw *hw)
return container_of(hw, struct tegra_clk_sor_pad, hw);
}
-static const char * const tegra_clk_sor_pad_parents[] = {
- "pll_d2_out0", "pll_dp"
+static const char * const tegra_clk_sor_pad_parents[2][2] = {
+ { "pll_d_out0", "pll_dp" },
+ { "pll_d2_out0", "pll_dp" },
};
+/*
+ * Implementing ->set_parent() here isn't really required because the parent
+ * will be explicitly selected in the driver code via the DP_CLK_SEL mux in
+ * the SOR_CLK_CNTRL register. This is primarily for compatibility with the
+ * Tegra186 and later SoC generations where the BPMP implements this clock
+ * and doesn't expose the mux via the common clock framework.
+ */
+
static int tegra_clk_sor_pad_set_parent(struct clk_hw *hw, u8 index)
{
struct tegra_clk_sor_pad *pad = to_pad(hw);
@@ -583,8 +606,8 @@ static struct clk *tegra_clk_sor_pad_register(struct tegra_sor *sor,
init.name = name;
init.flags = 0;
- init.parent_names = tegra_clk_sor_pad_parents;
- init.num_parents = ARRAY_SIZE(tegra_clk_sor_pad_parents);
+ init.parent_names = tegra_clk_sor_pad_parents[sor->index];
+ init.num_parents = ARRAY_SIZE(tegra_clk_sor_pad_parents[sor->index]);
init.ops = &tegra_clk_sor_pad_ops;
pad->hw.init = &init;
@@ -594,112 +617,340 @@ static struct clk *tegra_clk_sor_pad_register(struct tegra_sor *sor,
return clk;
}
-static int tegra_sor_dp_train_fast(struct tegra_sor *sor,
- struct drm_dp_link *link)
+static void tegra_sor_filter_rates(struct tegra_sor *sor)
{
+ struct drm_dp_link *link = &sor->link;
unsigned int i;
- u8 pattern;
+
+ /* Tegra only supports RBR, HBR and HBR2 */
+ for (i = 0; i < link->num_rates; i++) {
+ switch (link->rates[i]) {
+ case 1620000:
+ case 2700000:
+ case 5400000:
+ break;
+
+ default:
+ DRM_DEBUG_KMS("link rate %lu kHz not supported\n",
+ link->rates[i]);
+ link->rates[i] = 0;
+ break;
+ }
+ }
+
+ drm_dp_link_update_rates(link);
+}
+
+static int tegra_sor_power_up_lanes(struct tegra_sor *sor, unsigned int lanes)
+{
+ unsigned long timeout;
u32 value;
- int err;
- /* setup lane parameters */
- value = SOR_LANE_DRIVE_CURRENT_LANE3(0x40) |
- SOR_LANE_DRIVE_CURRENT_LANE2(0x40) |
- SOR_LANE_DRIVE_CURRENT_LANE1(0x40) |
- SOR_LANE_DRIVE_CURRENT_LANE0(0x40);
- tegra_sor_writel(sor, value, SOR_LANE_DRIVE_CURRENT0);
+ /*
+ * Clear or set the PD_TXD bit corresponding to each lane, depending
+ * on whether it is used or not.
+ */
+ value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
- value = SOR_LANE_PREEMPHASIS_LANE3(0x0f) |
- SOR_LANE_PREEMPHASIS_LANE2(0x0f) |
- SOR_LANE_PREEMPHASIS_LANE1(0x0f) |
- SOR_LANE_PREEMPHASIS_LANE0(0x0f);
- tegra_sor_writel(sor, value, SOR_LANE_PREEMPHASIS0);
+ if (lanes <= 2)
+ value &= ~(SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[3]) |
+ SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[2]));
+ else
+ value |= SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[3]) |
+ SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[2]);
- value = SOR_LANE_POSTCURSOR_LANE3(0x00) |
- SOR_LANE_POSTCURSOR_LANE2(0x00) |
- SOR_LANE_POSTCURSOR_LANE1(0x00) |
- SOR_LANE_POSTCURSOR_LANE0(0x00);
- tegra_sor_writel(sor, value, SOR_LANE_POSTCURSOR0);
+ if (lanes <= 1)
+ value &= ~SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[1]);
+ else
+ value |= SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[1]);
- /* disable LVDS mode */
- tegra_sor_writel(sor, 0, SOR_LVDS);
+ if (lanes == 0)
+ value &= ~SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[0]);
+ else
+ value |= SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[0]);
+
+ tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
+
+ /* start lane sequencer */
+ value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_DOWN |
+ SOR_LANE_SEQ_CTL_POWER_STATE_UP;
+ tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL);
+
+ timeout = jiffies + msecs_to_jiffies(250);
+
+ while (time_before(jiffies, timeout)) {
+ value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL);
+ if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0)
+ break;
+
+ usleep_range(250, 1000);
+ }
+
+ if ((value & SOR_LANE_SEQ_CTL_TRIGGER) != 0)
+ return -ETIMEDOUT;
+ return 0;
+}
+
+static int tegra_sor_power_down_lanes(struct tegra_sor *sor)
+{
+ unsigned long timeout;
+ u32 value;
+
+ /* power down all lanes */
value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
- value |= SOR_DP_PADCTL_TX_PU_ENABLE;
- value &= ~SOR_DP_PADCTL_TX_PU_MASK;
- value |= SOR_DP_PADCTL_TX_PU(2); /* XXX: don't hardcode? */
+ value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_0 |
+ SOR_DP_PADCTL_PD_TXD_1 | SOR_DP_PADCTL_PD_TXD_2);
tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
+ /* start lane sequencer */
+ value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_UP |
+ SOR_LANE_SEQ_CTL_POWER_STATE_DOWN;
+ tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL);
+
+ timeout = jiffies + msecs_to_jiffies(250);
+
+ while (time_before(jiffies, timeout)) {
+ value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL);
+ if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0)
+ break;
+
+ usleep_range(25, 100);
+ }
+
+ if ((value & SOR_LANE_SEQ_CTL_TRIGGER) != 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static void tegra_sor_dp_precharge(struct tegra_sor *sor, unsigned int lanes)
+{
+ u32 value;
+
+ /* pre-charge all used lanes */
value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
- value |= SOR_DP_PADCTL_CM_TXD_3 | SOR_DP_PADCTL_CM_TXD_2 |
- SOR_DP_PADCTL_CM_TXD_1 | SOR_DP_PADCTL_CM_TXD_0;
+
+ if (lanes <= 2)
+ value &= ~(SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[3]) |
+ SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[2]));
+ else
+ value |= SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[3]) |
+ SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[2]);
+
+ if (lanes <= 1)
+ value &= ~SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[1]);
+ else
+ value |= SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[1]);
+
+ if (lanes == 0)
+ value &= ~SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[0]);
+ else
+ value |= SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[0]);
+
tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
- usleep_range(10, 100);
+ usleep_range(15, 100);
value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
value &= ~(SOR_DP_PADCTL_CM_TXD_3 | SOR_DP_PADCTL_CM_TXD_2 |
SOR_DP_PADCTL_CM_TXD_1 | SOR_DP_PADCTL_CM_TXD_0);
tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
+}
- err = drm_dp_aux_prepare(sor->aux, DP_SET_ANSI_8B10B);
- if (err < 0)
- return err;
+static void tegra_sor_dp_term_calibrate(struct tegra_sor *sor)
+{
+ u32 mask = 0x08, adj = 0, value;
- for (i = 0, value = 0; i < link->num_lanes; i++) {
- unsigned long lane = SOR_DP_TPG_CHANNEL_CODING |
- SOR_DP_TPG_SCRAMBLER_NONE |
- SOR_DP_TPG_PATTERN_TRAIN1;
- value = (value << 8) | lane;
+ /* enable pad calibration logic */
+ value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
+ value &= ~SOR_DP_PADCTL_PAD_CAL_PD;
+ tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll1);
+ value |= SOR_PLL1_TMDS_TERM;
+ tegra_sor_writel(sor, value, sor->soc->regs->pll1);
+
+ while (mask) {
+ adj |= mask;
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll1);
+ value &= ~SOR_PLL1_TMDS_TERMADJ_MASK;
+ value |= SOR_PLL1_TMDS_TERMADJ(adj);
+ tegra_sor_writel(sor, value, sor->soc->regs->pll1);
+
+ usleep_range(100, 200);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll1);
+ if (value & SOR_PLL1_TERM_COMPOUT)
+ adj &= ~mask;
+
+ mask >>= 1;
}
- tegra_sor_writel(sor, value, SOR_DP_TPG);
+ value = tegra_sor_readl(sor, sor->soc->regs->pll1);
+ value &= ~SOR_PLL1_TMDS_TERMADJ_MASK;
+ value |= SOR_PLL1_TMDS_TERMADJ(adj);
+ tegra_sor_writel(sor, value, sor->soc->regs->pll1);
+
+ /* disable pad calibration logic */
+ value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
+ value |= SOR_DP_PADCTL_PAD_CAL_PD;
+ tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
+}
- pattern = DP_TRAINING_PATTERN_1;
+static int tegra_sor_dp_link_apply_training(struct drm_dp_link *link)
+{
+ struct tegra_sor *sor = container_of(link, struct tegra_sor, link);
+ u32 voltage_swing = 0, pre_emphasis = 0, post_cursor = 0;
+ const struct tegra_sor_soc *soc = sor->soc;
+ u32 pattern = 0, tx_pu = 0, value;
+ unsigned int i;
- err = drm_dp_aux_train(sor->aux, link, pattern);
- if (err < 0)
- return err;
+ for (value = 0, i = 0; i < link->lanes; i++) {
+ u8 vs = link->train.request.voltage_swing[i];
+ u8 pe = link->train.request.pre_emphasis[i];
+ u8 pc = link->train.request.post_cursor[i];
+ u8 shift = sor->soc->lane_map[i] << 3;
- value = tegra_sor_readl(sor, SOR_DP_SPARE0);
- value |= SOR_DP_SPARE_SEQ_ENABLE;
- value &= ~SOR_DP_SPARE_PANEL_INTERNAL;
- value |= SOR_DP_SPARE_MACRO_SOR_CLK;
- tegra_sor_writel(sor, value, SOR_DP_SPARE0);
+ voltage_swing |= soc->voltage_swing[pc][vs][pe] << shift;
+ pre_emphasis |= soc->pre_emphasis[pc][vs][pe] << shift;
+ post_cursor |= soc->post_cursor[pc][vs][pe] << shift;
+
+ if (sor->soc->tx_pu[pc][vs][pe] > tx_pu)
+ tx_pu = sor->soc->tx_pu[pc][vs][pe];
+
+ switch (link->train.pattern) {
+ case DP_TRAINING_PATTERN_DISABLE:
+ value = SOR_DP_TPG_SCRAMBLER_GALIOS |
+ SOR_DP_TPG_PATTERN_NONE;
+ break;
+
+ case DP_TRAINING_PATTERN_1:
+ value = SOR_DP_TPG_SCRAMBLER_NONE |
+ SOR_DP_TPG_PATTERN_TRAIN1;
+ break;
+
+ case DP_TRAINING_PATTERN_2:
+ value = SOR_DP_TPG_SCRAMBLER_NONE |
+ SOR_DP_TPG_PATTERN_TRAIN2;
+ break;
+
+ case DP_TRAINING_PATTERN_3:
+ value = SOR_DP_TPG_SCRAMBLER_NONE |
+ SOR_DP_TPG_PATTERN_TRAIN3;
+ break;
+
+ default:
+ return -EINVAL;
+ }
- for (i = 0, value = 0; i < link->num_lanes; i++) {
- unsigned long lane = SOR_DP_TPG_CHANNEL_CODING |
- SOR_DP_TPG_SCRAMBLER_NONE |
- SOR_DP_TPG_PATTERN_TRAIN2;
- value = (value << 8) | lane;
+ if (link->caps.channel_coding)
+ value |= SOR_DP_TPG_CHANNEL_CODING;
+
+ pattern = pattern << 8 | value;
}
- tegra_sor_writel(sor, value, SOR_DP_TPG);
+ tegra_sor_writel(sor, voltage_swing, SOR_LANE_DRIVE_CURRENT0);
+ tegra_sor_writel(sor, pre_emphasis, SOR_LANE_PREEMPHASIS0);
- pattern = DP_LINK_SCRAMBLING_DISABLE | DP_TRAINING_PATTERN_2;
+ if (link->caps.tps3_supported)
+ tegra_sor_writel(sor, post_cursor, SOR_LANE_POSTCURSOR0);
- err = drm_dp_aux_train(sor->aux, link, pattern);
- if (err < 0)
- return err;
+ tegra_sor_writel(sor, pattern, SOR_DP_TPG);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
+ value &= ~SOR_DP_PADCTL_TX_PU_MASK;
+ value |= SOR_DP_PADCTL_TX_PU_ENABLE;
+ value |= SOR_DP_PADCTL_TX_PU(tx_pu);
+ tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
+
+ usleep_range(20, 100);
+
+ return 0;
+}
- for (i = 0, value = 0; i < link->num_lanes; i++) {
- unsigned long lane = SOR_DP_TPG_CHANNEL_CODING |
- SOR_DP_TPG_SCRAMBLER_GALIOS |
- SOR_DP_TPG_PATTERN_NONE;
- value = (value << 8) | lane;
+static int tegra_sor_dp_link_configure(struct drm_dp_link *link)
+{
+ struct tegra_sor *sor = container_of(link, struct tegra_sor, link);
+ unsigned int rate, lanes;
+ u32 value;
+ int err;
+
+ rate = drm_dp_link_rate_to_bw_code(link->rate);
+ lanes = link->lanes;
+
+ /* configure link speed and lane count */
+ value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
+ value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK;
+ value |= SOR_CLK_CNTRL_DP_LINK_SPEED(rate);
+ tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
+
+ value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
+ value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK;
+ value |= SOR_DP_LINKCTL_LANE_COUNT(lanes);
+
+ if (link->caps.enhanced_framing)
+ value |= SOR_DP_LINKCTL_ENHANCED_FRAME;
+
+ tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
+
+ usleep_range(400, 1000);
+
+ /* configure load pulse position adjustment */
+ value = tegra_sor_readl(sor, sor->soc->regs->pll1);
+ value &= ~SOR_PLL1_LOADADJ_MASK;
+
+ switch (rate) {
+ case DP_LINK_BW_1_62:
+ value |= SOR_PLL1_LOADADJ(0x3);
+ break;
+
+ case DP_LINK_BW_2_7:
+ value |= SOR_PLL1_LOADADJ(0x4);
+ break;
+
+ case DP_LINK_BW_5_4:
+ value |= SOR_PLL1_LOADADJ(0x6);
+ break;
}
- tegra_sor_writel(sor, value, SOR_DP_TPG);
+ tegra_sor_writel(sor, value, sor->soc->regs->pll1);
- pattern = DP_TRAINING_PATTERN_DISABLE;
+ /* use alternate scrambler reset for eDP */
+ value = tegra_sor_readl(sor, SOR_DP_SPARE0);
- err = drm_dp_aux_train(sor->aux, link, pattern);
- if (err < 0)
+ if (link->edp == 0)
+ value &= ~SOR_DP_SPARE_PANEL_INTERNAL;
+ else
+ value |= SOR_DP_SPARE_PANEL_INTERNAL;
+
+ tegra_sor_writel(sor, value, SOR_DP_SPARE0);
+
+ err = tegra_sor_power_down_lanes(sor);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to power down lanes: %d\n", err);
+ return err;
+ }
+
+ /* power up and pre-charge lanes */
+ err = tegra_sor_power_up_lanes(sor, lanes);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to power up %u lane%s: %d\n",
+ lanes, (lanes != 1) ? "s" : "", err);
return err;
+ }
+
+ tegra_sor_dp_precharge(sor, lanes);
return 0;
}
+static const struct drm_dp_link_ops tegra_sor_dp_link_ops = {
+ .apply_training = tegra_sor_dp_link_apply_training,
+ .configure = tegra_sor_dp_link_configure,
+};
+
static void tegra_sor_super_update(struct tegra_sor *sor)
{
tegra_sor_writel(sor, 0, SOR_SUPER_STATE0);
@@ -909,11 +1160,11 @@ static int tegra_sor_compute_config(struct tegra_sor *sor,
u32 num_syms_per_line;
unsigned int i;
- if (!link_rate || !link->num_lanes || !pclk || !config->bits_per_pixel)
+ if (!link_rate || !link->lanes || !pclk || !config->bits_per_pixel)
return -EINVAL;
- output = link_rate * 8 * link->num_lanes;
input = pclk * config->bits_per_pixel;
+ output = link_rate * 8 * link->lanes;
if (input >= output)
return -ERANGE;
@@ -956,7 +1207,7 @@ static int tegra_sor_compute_config(struct tegra_sor *sor,
watermark = div_u64(watermark + params.error, f);
config->watermark = watermark + (config->bits_per_pixel / 8) + 2;
num_syms_per_line = (mode->hdisplay * config->bits_per_pixel) *
- (link->num_lanes * 8);
+ (link->lanes * 8);
if (config->watermark > 30) {
config->watermark = 30;
@@ -973,15 +1224,15 @@ static int tegra_sor_compute_config(struct tegra_sor *sor,
num = ((mode->htotal - mode->hdisplay) - 7) * link_rate;
config->hblank_symbols = div_u64(num, pclk);
- if (link->capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
+ if (link->caps.enhanced_framing)
config->hblank_symbols -= 3;
- config->hblank_symbols -= 12 / link->num_lanes;
+ config->hblank_symbols -= 12 / link->lanes;
/* compute the number of symbols per vertical blanking interval */
num = (mode->hdisplay - 25) * link_rate;
config->vblank_symbols = div_u64(num, pclk);
- config->vblank_symbols -= 36 / link->num_lanes + 4;
+ config->vblank_symbols -= 36 / link->lanes + 4;
dev_dbg(sor->dev, "blank symbols: H:%u V:%u\n", config->hblank_symbols,
config->vblank_symbols);
@@ -1197,29 +1448,6 @@ static int tegra_sor_power_down(struct tegra_sor *sor)
return err;
}
- value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
- value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_0 |
- SOR_DP_PADCTL_PD_TXD_1 | SOR_DP_PADCTL_PD_TXD_2);
- tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
-
- /* stop lane sequencer */
- value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_UP |
- SOR_LANE_SEQ_CTL_POWER_STATE_DOWN;
- tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL);
-
- timeout = jiffies + msecs_to_jiffies(250);
-
- while (time_before(jiffies, timeout)) {
- value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL);
- if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0)
- break;
-
- usleep_range(25, 100);
- }
-
- if ((value & SOR_LANE_SEQ_CTL_TRIGGER) != 0)
- return -ETIMEDOUT;
-
value = tegra_sor_readl(sor, sor->soc->regs->pll2);
value |= SOR_PLL2_PORT_POWERDOWN;
tegra_sor_writel(sor, value, sor->soc->regs->pll2);
@@ -1581,403 +1809,6 @@ static const struct drm_encoder_funcs tegra_sor_encoder_funcs = {
.destroy = tegra_output_encoder_destroy,
};
-static void tegra_sor_edp_disable(struct drm_encoder *encoder)
-{
- struct tegra_output *output = encoder_to_output(encoder);
- struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
- struct tegra_sor *sor = to_sor(output);
- u32 value;
- int err;
-
- if (output->panel)
- drm_panel_disable(output->panel);
-
- err = tegra_sor_detach(sor);
- if (err < 0)
- dev_err(sor->dev, "failed to detach SOR: %d\n", err);
-
- tegra_sor_writel(sor, 0, SOR_STATE1);
- tegra_sor_update(sor);
-
- /*
- * The following accesses registers of the display controller, so make
- * sure it's only executed when the output is attached to one.
- */
- if (dc) {
- value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
- value &= ~SOR_ENABLE(0);
- tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
-
- tegra_dc_commit(dc);
- }
-
- err = tegra_sor_power_down(sor);
- if (err < 0)
- dev_err(sor->dev, "failed to power down SOR: %d\n", err);
-
- if (sor->aux) {
- err = drm_dp_aux_disable(sor->aux);
- if (err < 0)
- dev_err(sor->dev, "failed to disable DP: %d\n", err);
- }
-
- err = tegra_io_pad_power_disable(sor->pad);
- if (err < 0)
- dev_err(sor->dev, "failed to power off I/O pad: %d\n", err);
-
- if (output->panel)
- drm_panel_unprepare(output->panel);
-
- pm_runtime_put(sor->dev);
-}
-
-#if 0
-static int calc_h_ref_to_sync(const struct drm_display_mode *mode,
- unsigned int *value)
-{
- unsigned int hfp, hsw, hbp, a = 0, b;
-
- hfp = mode->hsync_start - mode->hdisplay;
- hsw = mode->hsync_end - mode->hsync_start;
- hbp = mode->htotal - mode->hsync_end;
-
- pr_info("hfp: %u, hsw: %u, hbp: %u\n", hfp, hsw, hbp);
-
- b = hfp - 1;
-
- pr_info("a: %u, b: %u\n", a, b);
- pr_info("a + hsw + hbp = %u\n", a + hsw + hbp);
-
- if (a + hsw + hbp <= 11) {
- a = 1 + 11 - hsw - hbp;
- pr_info("a: %u\n", a);
- }
-
- if (a > b)
- return -EINVAL;
-
- if (hsw < 1)
- return -EINVAL;
-
- if (mode->hdisplay < 16)
- return -EINVAL;
-
- if (value) {
- if (b > a && a % 2)
- *value = a + 1;
- else
- *value = a;
- }
-
- return 0;
-}
-#endif
-
-static void tegra_sor_edp_enable(struct drm_encoder *encoder)
-{
- struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
- struct tegra_output *output = encoder_to_output(encoder);
- struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
- struct tegra_sor *sor = to_sor(output);
- struct tegra_sor_config config;
- struct tegra_sor_state *state;
- struct drm_dp_link link;
- u8 rate, lanes;
- unsigned int i;
- int err = 0;
- u32 value;
-
- state = to_sor_state(output->connector.state);
-
- pm_runtime_get_sync(sor->dev);
-
- if (output->panel)
- drm_panel_prepare(output->panel);
-
- err = drm_dp_aux_enable(sor->aux);
- if (err < 0)
- dev_err(sor->dev, "failed to enable DP: %d\n", err);
-
- err = drm_dp_link_probe(sor->aux, &link);
- if (err < 0) {
- dev_err(sor->dev, "failed to probe eDP link: %d\n", err);
- return;
- }
-
- /* switch to safe parent clock */
- err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
- if (err < 0)
- dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
-
- memset(&config, 0, sizeof(config));
- config.bits_per_pixel = state->bpc * 3;
-
- err = tegra_sor_compute_config(sor, mode, &config, &link);
- if (err < 0)
- dev_err(sor->dev, "failed to compute configuration: %d\n", err);
-
- value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
- value &= ~SOR_CLK_CNTRL_DP_CLK_SEL_MASK;
- value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_DPCLK;
- tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
-
- value = tegra_sor_readl(sor, sor->soc->regs->pll2);
- value &= ~SOR_PLL2_BANDGAP_POWERDOWN;
- tegra_sor_writel(sor, value, sor->soc->regs->pll2);
- usleep_range(20, 100);
-
- value = tegra_sor_readl(sor, sor->soc->regs->pll3);
- value |= SOR_PLL3_PLL_VDD_MODE_3V3;
- tegra_sor_writel(sor, value, sor->soc->regs->pll3);
-
- value = SOR_PLL0_ICHPMP(0xf) | SOR_PLL0_VCOCAP_RST |
- SOR_PLL0_PLLREG_LEVEL_V45 | SOR_PLL0_RESISTOR_EXT;
- tegra_sor_writel(sor, value, sor->soc->regs->pll0);
-
- value = tegra_sor_readl(sor, sor->soc->regs->pll2);
- value |= SOR_PLL2_SEQ_PLLCAPPD;
- value &= ~SOR_PLL2_SEQ_PLLCAPPD_ENFORCE;
- value |= SOR_PLL2_LVDS_ENABLE;
- tegra_sor_writel(sor, value, sor->soc->regs->pll2);
-
- value = SOR_PLL1_TERM_COMPOUT | SOR_PLL1_TMDS_TERM;
- tegra_sor_writel(sor, value, sor->soc->regs->pll1);
-
- while (true) {
- value = tegra_sor_readl(sor, sor->soc->regs->pll2);
- if ((value & SOR_PLL2_SEQ_PLLCAPPD_ENFORCE) == 0)
- break;
-
- usleep_range(250, 1000);
- }
-
- value = tegra_sor_readl(sor, sor->soc->regs->pll2);
- value &= ~SOR_PLL2_POWERDOWN_OVERRIDE;
- value &= ~SOR_PLL2_PORT_POWERDOWN;
- tegra_sor_writel(sor, value, sor->soc->regs->pll2);
-
- /*
- * power up
- */
-
- /* set safe link bandwidth (1.62 Gbps) */
- value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
- value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK;
- value |= SOR_CLK_CNTRL_DP_LINK_SPEED_G1_62;
- tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
-
- /* step 1 */
- value = tegra_sor_readl(sor, sor->soc->regs->pll2);
- value |= SOR_PLL2_SEQ_PLLCAPPD_ENFORCE | SOR_PLL2_PORT_POWERDOWN |
- SOR_PLL2_BANDGAP_POWERDOWN;
- tegra_sor_writel(sor, value, sor->soc->regs->pll2);
-
- value = tegra_sor_readl(sor, sor->soc->regs->pll0);
- value |= SOR_PLL0_VCOPD | SOR_PLL0_PWR;
- tegra_sor_writel(sor, value, sor->soc->regs->pll0);
-
- value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
- value &= ~SOR_DP_PADCTL_PAD_CAL_PD;
- tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
-
- /* step 2 */
- err = tegra_io_pad_power_enable(sor->pad);
- if (err < 0)
- dev_err(sor->dev, "failed to power on I/O pad: %d\n", err);
-
- usleep_range(5, 100);
-
- /* step 3 */
- value = tegra_sor_readl(sor, sor->soc->regs->pll2);
- value &= ~SOR_PLL2_BANDGAP_POWERDOWN;
- tegra_sor_writel(sor, value, sor->soc->regs->pll2);
-
- usleep_range(20, 100);
-
- /* step 4 */
- value = tegra_sor_readl(sor, sor->soc->regs->pll0);
- value &= ~SOR_PLL0_VCOPD;
- value &= ~SOR_PLL0_PWR;
- tegra_sor_writel(sor, value, sor->soc->regs->pll0);
-
- value = tegra_sor_readl(sor, sor->soc->regs->pll2);
- value &= ~SOR_PLL2_SEQ_PLLCAPPD_ENFORCE;
- tegra_sor_writel(sor, value, sor->soc->regs->pll2);
-
- usleep_range(200, 1000);
-
- /* step 5 */
- value = tegra_sor_readl(sor, sor->soc->regs->pll2);
- value &= ~SOR_PLL2_PORT_POWERDOWN;
- tegra_sor_writel(sor, value, sor->soc->regs->pll2);
-
- /* XXX not in TRM */
- for (value = 0, i = 0; i < 5; i++)
- value |= SOR_XBAR_CTRL_LINK0_XSEL(i, sor->xbar_cfg[i]) |
- SOR_XBAR_CTRL_LINK1_XSEL(i, i);
-
- tegra_sor_writel(sor, 0x00000000, SOR_XBAR_POL);
- tegra_sor_writel(sor, value, SOR_XBAR_CTRL);
-
- /* switch to DP parent clock */
- err = tegra_sor_set_parent_clock(sor, sor->clk_dp);
- if (err < 0)
- dev_err(sor->dev, "failed to set parent clock: %d\n", err);
-
- /* power DP lanes */
- value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
-
- if (link.num_lanes <= 2)
- value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_2);
- else
- value |= SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_2;
-
- if (link.num_lanes <= 1)
- value &= ~SOR_DP_PADCTL_PD_TXD_1;
- else
- value |= SOR_DP_PADCTL_PD_TXD_1;
-
- if (link.num_lanes == 0)
- value &= ~SOR_DP_PADCTL_PD_TXD_0;
- else
- value |= SOR_DP_PADCTL_PD_TXD_0;
-
- tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
-
- value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
- value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK;
- value |= SOR_DP_LINKCTL_LANE_COUNT(link.num_lanes);
- tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
-
- /* start lane sequencer */
- value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_DOWN |
- SOR_LANE_SEQ_CTL_POWER_STATE_UP;
- tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL);
-
- while (true) {
- value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL);
- if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0)
- break;
-
- usleep_range(250, 1000);
- }
-
- /* set link bandwidth */
- value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
- value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK;
- value |= drm_dp_link_rate_to_bw_code(link.rate) << 2;
- tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
-
- tegra_sor_apply_config(sor, &config);
-
- /* enable link */
- value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
- value |= SOR_DP_LINKCTL_ENABLE;
- value |= SOR_DP_LINKCTL_ENHANCED_FRAME;
- tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
-
- for (i = 0, value = 0; i < 4; i++) {
- unsigned long lane = SOR_DP_TPG_CHANNEL_CODING |
- SOR_DP_TPG_SCRAMBLER_GALIOS |
- SOR_DP_TPG_PATTERN_NONE;
- value = (value << 8) | lane;
- }
-
- tegra_sor_writel(sor, value, SOR_DP_TPG);
-
- /* enable pad calibration logic */
- value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
- value |= SOR_DP_PADCTL_PAD_CAL_PD;
- tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
-
- err = drm_dp_link_probe(sor->aux, &link);
- if (err < 0)
- dev_err(sor->dev, "failed to probe eDP link: %d\n", err);
-
- err = drm_dp_link_power_up(sor->aux, &link);
- if (err < 0)
- dev_err(sor->dev, "failed to power up eDP link: %d\n", err);
-
- err = drm_dp_link_configure(sor->aux, &link);
- if (err < 0)
- dev_err(sor->dev, "failed to configure eDP link: %d\n", err);
-
- rate = drm_dp_link_rate_to_bw_code(link.rate);
- lanes = link.num_lanes;
-
- value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
- value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK;
- value |= SOR_CLK_CNTRL_DP_LINK_SPEED(rate);
- tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
-
- value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
- value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK;
- value |= SOR_DP_LINKCTL_LANE_COUNT(lanes);
-
- if (link.capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
- value |= SOR_DP_LINKCTL_ENHANCED_FRAME;
-
- tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
-
- /* disable training pattern generator */
-
- for (i = 0; i < link.num_lanes; i++) {
- unsigned long lane = SOR_DP_TPG_CHANNEL_CODING |
- SOR_DP_TPG_SCRAMBLER_GALIOS |
- SOR_DP_TPG_PATTERN_NONE;
- value = (value << 8) | lane;
- }
-
- tegra_sor_writel(sor, value, SOR_DP_TPG);
-
- err = tegra_sor_dp_train_fast(sor, &link);
- if (err < 0)
- dev_err(sor->dev, "DP fast link training failed: %d\n", err);
-
- dev_dbg(sor->dev, "fast link training succeeded\n");
-
- err = tegra_sor_power_up(sor, 250);
- if (err < 0)
- dev_err(sor->dev, "failed to power up SOR: %d\n", err);
-
- /* CSTM (LVDS, link A/B, upper) */
- value = SOR_CSTM_LVDS | SOR_CSTM_LINK_ACT_A | SOR_CSTM_LINK_ACT_B |
- SOR_CSTM_UPPER;
- tegra_sor_writel(sor, value, SOR_CSTM);
-
- /* use DP-A protocol */
- value = tegra_sor_readl(sor, SOR_STATE1);
- value &= ~SOR_STATE_ASY_PROTOCOL_MASK;
- value |= SOR_STATE_ASY_PROTOCOL_DP_A;
- tegra_sor_writel(sor, value, SOR_STATE1);
-
- tegra_sor_mode_set(sor, mode, state);
-
- /* PWM setup */
- err = tegra_sor_setup_pwm(sor, 250);
- if (err < 0)
- dev_err(sor->dev, "failed to setup PWM: %d\n", err);
-
- tegra_sor_update(sor);
-
- value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
- value |= SOR_ENABLE(0);
- tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
-
- tegra_dc_commit(dc);
-
- err = tegra_sor_attach(sor);
- if (err < 0)
- dev_err(sor->dev, "failed to attach SOR: %d\n", err);
-
- err = tegra_sor_wakeup(sor);
- if (err < 0)
- dev_err(sor->dev, "failed to enable DC: %d\n", err);
-
- if (output->panel)
- drm_panel_enable(output->panel);
-}
-
static int
tegra_sor_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
@@ -2027,12 +1858,6 @@ tegra_sor_encoder_atomic_check(struct drm_encoder *encoder,
return 0;
}
-static const struct drm_encoder_helper_funcs tegra_sor_edp_helpers = {
- .disable = tegra_sor_edp_disable,
- .enable = tegra_sor_edp_enable,
- .atomic_check = tegra_sor_encoder_atomic_check,
-};
-
static inline u32 tegra_sor_hdmi_subpack(const u8 *ptr, size_t size)
{
u32 value = 0;
@@ -2157,6 +1982,15 @@ static void tegra_sor_audio_prepare(struct tegra_sor *sor)
{
u32 value;
+ /*
+ * Enable and unmask the HDA codec SCRATCH0 register interrupt. This
+ * is used for interoperability between the HDA codec driver and the
+ * HDMI/DP driver.
+ */
+ value = SOR_INT_CODEC_SCRATCH1 | SOR_INT_CODEC_SCRATCH0;
+ tegra_sor_writel(sor, value, SOR_INT_ENABLE);
+ tegra_sor_writel(sor, value, SOR_INT_MASK);
+
tegra_sor_write_eld(sor);
value = SOR_AUDIO_HDA_PRESENSE_ELDV | SOR_AUDIO_HDA_PRESENSE_PD;
@@ -2166,6 +2000,32 @@ static void tegra_sor_audio_prepare(struct tegra_sor *sor)
static void tegra_sor_audio_unprepare(struct tegra_sor *sor)
{
tegra_sor_writel(sor, 0, SOR_AUDIO_HDA_PRESENSE);
+ tegra_sor_writel(sor, 0, SOR_INT_MASK);
+ tegra_sor_writel(sor, 0, SOR_INT_ENABLE);
+}
+
+static void tegra_sor_audio_enable(struct tegra_sor *sor)
+{
+ u32 value;
+
+ value = tegra_sor_readl(sor, SOR_AUDIO_CNTRL);
+
+ /* select HDA audio input */
+ value &= ~SOR_AUDIO_CNTRL_SOURCE_SELECT(SOURCE_SELECT_MASK);
+ value |= SOR_AUDIO_CNTRL_SOURCE_SELECT(SOURCE_SELECT_HDA);
+
+ /* inject null samples */
+ if (sor->format.channels != 2)
+ value &= ~SOR_AUDIO_CNTRL_INJECT_NULLSMPL;
+ else
+ value |= SOR_AUDIO_CNTRL_INJECT_NULLSMPL;
+
+ value |= SOR_AUDIO_CNTRL_AFIFO_FLUSH;
+
+ tegra_sor_writel(sor, value, SOR_AUDIO_CNTRL);
+
+ /* enable advertising HBR capability */
+ tegra_sor_writel(sor, SOR_AUDIO_SPARE_HBR_ENABLE, SOR_AUDIO_SPARE);
}
static int tegra_sor_hdmi_enable_audio_infoframe(struct tegra_sor *sor)
@@ -2203,24 +2063,7 @@ static void tegra_sor_hdmi_audio_enable(struct tegra_sor *sor)
{
u32 value;
- value = tegra_sor_readl(sor, SOR_AUDIO_CNTRL);
-
- /* select HDA audio input */
- value &= ~SOR_AUDIO_CNTRL_SOURCE_SELECT(SOURCE_SELECT_MASK);
- value |= SOR_AUDIO_CNTRL_SOURCE_SELECT(SOURCE_SELECT_HDA);
-
- /* inject null samples */
- if (sor->format.channels != 2)
- value &= ~SOR_AUDIO_CNTRL_INJECT_NULLSMPL;
- else
- value |= SOR_AUDIO_CNTRL_INJECT_NULLSMPL;
-
- value |= SOR_AUDIO_CNTRL_AFIFO_FLUSH;
-
- tegra_sor_writel(sor, value, SOR_AUDIO_CNTRL);
-
- /* enable advertising HBR capability */
- tegra_sor_writel(sor, SOR_AUDIO_SPARE_HBR_ENABLE, SOR_AUDIO_SPARE);
+ tegra_sor_audio_enable(sor);
tegra_sor_writel(sor, 0, SOR_HDMI_ACR_CTRL);
@@ -2396,9 +2239,9 @@ static void tegra_sor_hdmi_disable(struct drm_encoder *encoder)
value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
if (!sor->soc->has_nvdisplay)
- value &= ~(SOR1_TIMING_CYA | SOR_ENABLE(1));
- else
- value &= ~SOR_ENABLE(sor->index);
+ value &= ~SOR1_TIMING_CYA;
+
+ value &= ~SOR_ENABLE(sor->index);
tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
@@ -2412,7 +2255,7 @@ static void tegra_sor_hdmi_disable(struct drm_encoder *encoder)
if (err < 0)
dev_err(sor->dev, "failed to power off I/O pad: %d\n", err);
- pm_runtime_put(sor->dev);
+ host1x_client_suspend(&sor->client);
}
static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
@@ -2433,7 +2276,11 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
mode = &encoder->crtc->state->adjusted_mode;
pclk = mode->clock * 1000;
- pm_runtime_get_sync(sor->dev);
+ err = host1x_client_resume(&sor->client);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to resume: %d\n", err);
+ return;
+ }
/* switch to safe parent clock */
err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
@@ -2556,16 +2403,34 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
tegra_sor_writel(sor, 0x00000000, SOR_XBAR_POL);
tegra_sor_writel(sor, value, SOR_XBAR_CTRL);
- /* switch to parent clock */
- err = clk_set_parent(sor->clk, sor->clk_parent);
+ /*
+ * Switch the pad clock to the DP clock. Note that we cannot actually
+ * do this because Tegra186 and later don't support clk_set_parent()
+ * on the sorX_pad_clkout clocks. We already do the equivalent above
+ * using the DP_CLK_SEL mux of the SOR_CLK_CNTRL register.
+ */
+#if 0
+ err = clk_set_parent(sor->clk_pad, sor->clk_dp);
if (err < 0) {
- dev_err(sor->dev, "failed to set parent clock: %d\n", err);
+ dev_err(sor->dev, "failed to select pad parent clock: %d\n",
+ err);
return;
}
+#endif
+ /* switch the SOR clock to the pad clock */
err = tegra_sor_set_parent_clock(sor, sor->clk_pad);
if (err < 0) {
- dev_err(sor->dev, "failed to set pad clock: %d\n", err);
+ dev_err(sor->dev, "failed to select SOR parent clock: %d\n",
+ err);
+ return;
+ }
+
+ /* switch the output clock to the parent pixel clock */
+ err = clk_set_parent(sor->clk, sor->clk_parent);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to select output parent clock: %d\n",
+ err);
return;
}
@@ -2771,9 +2636,9 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
if (!sor->soc->has_nvdisplay)
- value |= SOR_ENABLE(1) | SOR1_TIMING_CYA;
- else
- value |= SOR_ENABLE(sor->index);
+ value |= SOR1_TIMING_CYA;
+
+ value |= SOR_ENABLE(sor->index);
tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
@@ -2800,18 +2665,411 @@ static const struct drm_encoder_helper_funcs tegra_sor_hdmi_helpers = {
.atomic_check = tegra_sor_encoder_atomic_check,
};
+static void tegra_sor_dp_disable(struct drm_encoder *encoder)
+{
+ struct tegra_output *output = encoder_to_output(encoder);
+ struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
+ struct tegra_sor *sor = to_sor(output);
+ u32 value;
+ int err;
+
+ if (output->panel)
+ drm_panel_disable(output->panel);
+
+ /*
+ * Do not attempt to power down a DP link if we're not connected since
+ * the AUX transactions would just be timing out.
+ */
+ if (output->connector.status != connector_status_disconnected) {
+ err = drm_dp_link_power_down(sor->aux, &sor->link);
+ if (err < 0)
+ dev_err(sor->dev, "failed to power down link: %d\n",
+ err);
+ }
+
+ err = tegra_sor_detach(sor);
+ if (err < 0)
+ dev_err(sor->dev, "failed to detach SOR: %d\n", err);
+
+ tegra_sor_writel(sor, 0, SOR_STATE1);
+ tegra_sor_update(sor);
+
+ value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+ value &= ~SOR_ENABLE(sor->index);
+ tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+ tegra_dc_commit(dc);
+
+ value = tegra_sor_readl(sor, SOR_STATE1);
+ value &= ~SOR_STATE_ASY_PROTOCOL_MASK;
+ value &= ~SOR_STATE_ASY_SUBOWNER_MASK;
+ value &= ~SOR_STATE_ASY_OWNER_MASK;
+ tegra_sor_writel(sor, value, SOR_STATE1);
+ tegra_sor_update(sor);
+
+ /* switch to safe parent clock */
+ err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
+ if (err < 0)
+ dev_err(sor->dev, "failed to set safe clock: %d\n", err);
+
+ err = tegra_sor_power_down(sor);
+ if (err < 0)
+ dev_err(sor->dev, "failed to power down SOR: %d\n", err);
+
+ err = tegra_io_pad_power_disable(sor->pad);
+ if (err < 0)
+ dev_err(sor->dev, "failed to power off I/O pad: %d\n", err);
+
+ err = drm_dp_aux_disable(sor->aux);
+ if (err < 0)
+ dev_err(sor->dev, "failed disable DPAUX: %d\n", err);
+
+ if (output->panel)
+ drm_panel_unprepare(output->panel);
+
+ host1x_client_suspend(&sor->client);
+}
+
+static void tegra_sor_dp_enable(struct drm_encoder *encoder)
+{
+ struct tegra_output *output = encoder_to_output(encoder);
+ struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
+ struct tegra_sor *sor = to_sor(output);
+ struct tegra_sor_config config;
+ struct tegra_sor_state *state;
+ struct drm_display_mode *mode;
+ struct drm_display_info *info;
+ unsigned int i;
+ u32 value;
+ int err;
+
+ state = to_sor_state(output->connector.state);
+ mode = &encoder->crtc->state->adjusted_mode;
+ info = &output->connector.display_info;
+
+ err = host1x_client_resume(&sor->client);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to resume: %d\n", err);
+ return;
+ }
+
+ /* switch to safe parent clock */
+ err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
+ if (err < 0)
+ dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
+
+ err = tegra_io_pad_power_enable(sor->pad);
+ if (err < 0)
+ dev_err(sor->dev, "failed to power on LVDS rail: %d\n", err);
+
+ usleep_range(20, 100);
+
+ err = drm_dp_aux_enable(sor->aux);
+ if (err < 0)
+ dev_err(sor->dev, "failed to enable DPAUX: %d\n", err);
+
+ err = drm_dp_link_probe(sor->aux, &sor->link);
+ if (err < 0)
+ dev_err(sor->dev, "failed to probe DP link: %d\n", err);
+
+ tegra_sor_filter_rates(sor);
+
+ err = drm_dp_link_choose(&sor->link, mode, info);
+ if (err < 0)
+ dev_err(sor->dev, "failed to choose link: %d\n", err);
+
+ if (output->panel)
+ drm_panel_prepare(output->panel);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll2);
+ value &= ~SOR_PLL2_BANDGAP_POWERDOWN;
+ tegra_sor_writel(sor, value, sor->soc->regs->pll2);
+
+ usleep_range(20, 40);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll3);
+ value |= SOR_PLL3_PLL_VDD_MODE_3V3;
+ tegra_sor_writel(sor, value, sor->soc->regs->pll3);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll0);
+ value &= ~(SOR_PLL0_VCOPD | SOR_PLL0_PWR);
+ tegra_sor_writel(sor, value, sor->soc->regs->pll0);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll2);
+ value &= ~SOR_PLL2_SEQ_PLLCAPPD_ENFORCE;
+ value |= SOR_PLL2_SEQ_PLLCAPPD;
+ tegra_sor_writel(sor, value, sor->soc->regs->pll2);
+
+ usleep_range(200, 400);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll2);
+ value &= ~SOR_PLL2_POWERDOWN_OVERRIDE;
+ value &= ~SOR_PLL2_PORT_POWERDOWN;
+ tegra_sor_writel(sor, value, sor->soc->regs->pll2);
+
+ value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
+ value &= ~SOR_CLK_CNTRL_DP_CLK_SEL_MASK;
+
+ if (output->panel)
+ value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_DPCLK;
+ else
+ value |= SOR_CLK_CNTRL_DP_CLK_SEL_DIFF_DPCLK;
+
+ tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
+
+ usleep_range(200, 400);
+
+ value = tegra_sor_readl(sor, SOR_DP_SPARE0);
+ /* XXX not in TRM */
+ if (output->panel)
+ value |= SOR_DP_SPARE_PANEL_INTERNAL;
+ else
+ value &= ~SOR_DP_SPARE_PANEL_INTERNAL;
+
+ value |= SOR_DP_SPARE_SEQ_ENABLE;
+ tegra_sor_writel(sor, value, SOR_DP_SPARE0);
+
+ /* XXX not in TRM */
+ tegra_sor_writel(sor, 0, SOR_LVDS);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll0);
+ value &= ~SOR_PLL0_ICHPMP_MASK;
+ value &= ~SOR_PLL0_VCOCAP_MASK;
+ value |= SOR_PLL0_ICHPMP(0x1);
+ value |= SOR_PLL0_VCOCAP(0x3);
+ value |= SOR_PLL0_RESISTOR_EXT;
+ tegra_sor_writel(sor, value, sor->soc->regs->pll0);
+
+ /* XXX not in TRM */
+ for (value = 0, i = 0; i < 5; i++)
+ value |= SOR_XBAR_CTRL_LINK0_XSEL(i, sor->soc->xbar_cfg[i]) |
+ SOR_XBAR_CTRL_LINK1_XSEL(i, i);
+
+ tegra_sor_writel(sor, 0x00000000, SOR_XBAR_POL);
+ tegra_sor_writel(sor, value, SOR_XBAR_CTRL);
+
+ /*
+ * Switch the pad clock to the DP clock. Note that we cannot actually
+ * do this because Tegra186 and later don't support clk_set_parent()
+ * on the sorX_pad_clkout clocks. We already do the equivalent above
+ * using the DP_CLK_SEL mux of the SOR_CLK_CNTRL register.
+ */
+#if 0
+ err = clk_set_parent(sor->clk_pad, sor->clk_parent);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to select pad parent clock: %d\n",
+ err);
+ return;
+ }
+#endif
+
+ /* switch the SOR clock to the pad clock */
+ err = tegra_sor_set_parent_clock(sor, sor->clk_pad);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to select SOR parent clock: %d\n",
+ err);
+ return;
+ }
+
+ /* switch the output clock to the parent pixel clock */
+ err = clk_set_parent(sor->clk, sor->clk_parent);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to select output parent clock: %d\n",
+ err);
+ return;
+ }
+
+ /* use DP-A protocol */
+ value = tegra_sor_readl(sor, SOR_STATE1);
+ value &= ~SOR_STATE_ASY_PROTOCOL_MASK;
+ value |= SOR_STATE_ASY_PROTOCOL_DP_A;
+ tegra_sor_writel(sor, value, SOR_STATE1);
+
+ /* enable port */
+ value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
+ value |= SOR_DP_LINKCTL_ENABLE;
+ tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
+
+ tegra_sor_dp_term_calibrate(sor);
+
+ err = drm_dp_link_train(&sor->link);
+ if (err < 0)
+ dev_err(sor->dev, "link training failed: %d\n", err);
+ else
+ dev_dbg(sor->dev, "link training succeeded\n");
+
+ err = drm_dp_link_power_up(sor->aux, &sor->link);
+ if (err < 0)
+ dev_err(sor->dev, "failed to power up DP link: %d\n", err);
+
+ /* compute configuration */
+ memset(&config, 0, sizeof(config));
+ config.bits_per_pixel = state->bpc * 3;
+
+ err = tegra_sor_compute_config(sor, mode, &config, &sor->link);
+ if (err < 0)
+ dev_err(sor->dev, "failed to compute configuration: %d\n", err);
+
+ tegra_sor_apply_config(sor, &config);
+ tegra_sor_mode_set(sor, mode, state);
+
+ if (output->panel) {
+ /* CSTM (LVDS, link A/B, upper) */
+ value = SOR_CSTM_LVDS | SOR_CSTM_LINK_ACT_A | SOR_CSTM_LINK_ACT_B |
+ SOR_CSTM_UPPER;
+ tegra_sor_writel(sor, value, SOR_CSTM);
+
+ /* PWM setup */
+ err = tegra_sor_setup_pwm(sor, 250);
+ if (err < 0)
+ dev_err(sor->dev, "failed to setup PWM: %d\n", err);
+ }
+
+ tegra_sor_update(sor);
+
+ err = tegra_sor_power_up(sor, 250);
+ if (err < 0)
+ dev_err(sor->dev, "failed to power up SOR: %d\n", err);
+
+ /* attach and wake up */
+ err = tegra_sor_attach(sor);
+ if (err < 0)
+ dev_err(sor->dev, "failed to attach SOR: %d\n", err);
+
+ value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+ value |= SOR_ENABLE(sor->index);
+ tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+
+ tegra_dc_commit(dc);
+
+ err = tegra_sor_wakeup(sor);
+ if (err < 0)
+ dev_err(sor->dev, "failed to wakeup SOR: %d\n", err);
+
+ if (output->panel)
+ drm_panel_enable(output->panel);
+}
+
+static const struct drm_encoder_helper_funcs tegra_sor_dp_helpers = {
+ .disable = tegra_sor_dp_disable,
+ .enable = tegra_sor_dp_enable,
+ .atomic_check = tegra_sor_encoder_atomic_check,
+};
+
+static int tegra_sor_hdmi_probe(struct tegra_sor *sor)
+{
+ int err;
+
+ sor->avdd_io_supply = devm_regulator_get(sor->dev, "avdd-io");
+ if (IS_ERR(sor->avdd_io_supply)) {
+ dev_err(sor->dev, "cannot get AVDD I/O supply: %ld\n",
+ PTR_ERR(sor->avdd_io_supply));
+ return PTR_ERR(sor->avdd_io_supply);
+ }
+
+ err = regulator_enable(sor->avdd_io_supply);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to enable AVDD I/O supply: %d\n",
+ err);
+ return err;
+ }
+
+ sor->vdd_pll_supply = devm_regulator_get(sor->dev, "vdd-pll");
+ if (IS_ERR(sor->vdd_pll_supply)) {
+ dev_err(sor->dev, "cannot get VDD PLL supply: %ld\n",
+ PTR_ERR(sor->vdd_pll_supply));
+ return PTR_ERR(sor->vdd_pll_supply);
+ }
+
+ err = regulator_enable(sor->vdd_pll_supply);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to enable VDD PLL supply: %d\n",
+ err);
+ return err;
+ }
+
+ sor->hdmi_supply = devm_regulator_get(sor->dev, "hdmi");
+ if (IS_ERR(sor->hdmi_supply)) {
+ dev_err(sor->dev, "cannot get HDMI supply: %ld\n",
+ PTR_ERR(sor->hdmi_supply));
+ return PTR_ERR(sor->hdmi_supply);
+ }
+
+ err = regulator_enable(sor->hdmi_supply);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to enable HDMI supply: %d\n", err);
+ return err;
+ }
+
+ INIT_DELAYED_WORK(&sor->scdc, tegra_sor_hdmi_scdc_work);
+
+ return 0;
+}
+
+static int tegra_sor_hdmi_remove(struct tegra_sor *sor)
+{
+ regulator_disable(sor->hdmi_supply);
+ regulator_disable(sor->vdd_pll_supply);
+ regulator_disable(sor->avdd_io_supply);
+
+ return 0;
+}
+
+static const struct tegra_sor_ops tegra_sor_hdmi_ops = {
+ .name = "HDMI",
+ .probe = tegra_sor_hdmi_probe,
+ .remove = tegra_sor_hdmi_remove,
+ .audio_enable = tegra_sor_hdmi_audio_enable,
+ .audio_disable = tegra_sor_hdmi_audio_disable,
+};
+
+static int tegra_sor_dp_probe(struct tegra_sor *sor)
+{
+ int err;
+
+ sor->avdd_io_supply = devm_regulator_get(sor->dev, "avdd-io-hdmi-dp");
+ if (IS_ERR(sor->avdd_io_supply))
+ return PTR_ERR(sor->avdd_io_supply);
+
+ err = regulator_enable(sor->avdd_io_supply);
+ if (err < 0)
+ return err;
+
+ sor->vdd_pll_supply = devm_regulator_get(sor->dev, "vdd-hdmi-dp-pll");
+ if (IS_ERR(sor->vdd_pll_supply))
+ return PTR_ERR(sor->vdd_pll_supply);
+
+ err = regulator_enable(sor->vdd_pll_supply);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int tegra_sor_dp_remove(struct tegra_sor *sor)
+{
+ regulator_disable(sor->vdd_pll_supply);
+ regulator_disable(sor->avdd_io_supply);
+
+ return 0;
+}
+
+static const struct tegra_sor_ops tegra_sor_dp_ops = {
+ .name = "DP",
+ .probe = tegra_sor_dp_probe,
+ .remove = tegra_sor_dp_remove,
+};
+
static int tegra_sor_init(struct host1x_client *client)
{
- struct drm_device *drm = dev_get_drvdata(client->parent);
+ struct drm_device *drm = dev_get_drvdata(client->host);
const struct drm_encoder_helper_funcs *helpers = NULL;
struct tegra_sor *sor = host1x_client_to_sor(client);
int connector = DRM_MODE_CONNECTOR_Unknown;
int encoder = DRM_MODE_ENCODER_NONE;
- u32 value;
int err;
if (!sor->aux) {
- if (sor->soc->supports_hdmi) {
+ if (sor->ops == &tegra_sor_hdmi_ops) {
connector = DRM_MODE_CONNECTOR_HDMIA;
encoder = DRM_MODE_ENCODER_TMDS;
helpers = &tegra_sor_hdmi_helpers;
@@ -2820,21 +3078,26 @@ static int tegra_sor_init(struct host1x_client *client)
encoder = DRM_MODE_ENCODER_LVDS;
}
} else {
- if (sor->soc->supports_edp) {
+ if (sor->output.panel) {
connector = DRM_MODE_CONNECTOR_eDP;
encoder = DRM_MODE_ENCODER_TMDS;
- helpers = &tegra_sor_edp_helpers;
- } else if (sor->soc->supports_dp) {
+ helpers = &tegra_sor_dp_helpers;
+ } else {
connector = DRM_MODE_CONNECTOR_DisplayPort;
encoder = DRM_MODE_ENCODER_TMDS;
+ helpers = &tegra_sor_dp_helpers;
}
+
+ sor->link.ops = &tegra_sor_dp_link_ops;
+ sor->link.aux = sor->aux;
}
sor->output.dev = sor->dev;
- drm_connector_init(drm, &sor->output.connector,
- &tegra_sor_connector_funcs,
- connector);
+ drm_connector_init_with_ddc(drm, &sor->output.connector,
+ &tegra_sor_connector_funcs,
+ connector,
+ sor->output.ddc);
drm_connector_helper_add(&sor->output.connector,
&tegra_sor_connector_helper_funcs);
sor->output.connector.dpms = DRM_MODE_DPMS_OFF;
@@ -2910,15 +3173,6 @@ static int tegra_sor_init(struct host1x_client *client)
if (err < 0)
return err;
- /*
- * Enable and unmask the HDA codec SCRATCH0 register interrupt. This
- * is used for interoperability between the HDA codec driver and the
- * HDMI/DP driver.
- */
- value = SOR_INT_CODEC_SCRATCH1 | SOR_INT_CODEC_SCRATCH0;
- tegra_sor_writel(sor, value, SOR_INT_ENABLE);
- tegra_sor_writel(sor, value, SOR_INT_MASK);
-
return 0;
}
@@ -2927,9 +3181,6 @@ static int tegra_sor_exit(struct host1x_client *client)
struct tegra_sor *sor = host1x_client_to_sor(client);
int err;
- tegra_sor_writel(sor, 0, SOR_INT_MASK);
- tegra_sor_writel(sor, 0, SOR_INT_ENABLE);
-
tegra_output_exit(&sor->output);
if (sor->aux) {
@@ -2947,78 +3198,80 @@ static int tegra_sor_exit(struct host1x_client *client)
return 0;
}
-static const struct host1x_client_ops sor_client_ops = {
- .init = tegra_sor_init,
- .exit = tegra_sor_exit,
-};
-
-static const struct tegra_sor_ops tegra_sor_edp_ops = {
- .name = "eDP",
-};
-
-static int tegra_sor_hdmi_probe(struct tegra_sor *sor)
+static int tegra_sor_runtime_suspend(struct host1x_client *client)
{
+ struct tegra_sor *sor = host1x_client_to_sor(client);
+ struct device *dev = client->dev;
int err;
- sor->avdd_io_supply = devm_regulator_get(sor->dev, "avdd-io");
- if (IS_ERR(sor->avdd_io_supply)) {
- dev_err(sor->dev, "cannot get AVDD I/O supply: %ld\n",
- PTR_ERR(sor->avdd_io_supply));
- return PTR_ERR(sor->avdd_io_supply);
- }
+ if (sor->rst) {
+ err = reset_control_assert(sor->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to assert reset: %d\n", err);
+ return err;
+ }
- err = regulator_enable(sor->avdd_io_supply);
- if (err < 0) {
- dev_err(sor->dev, "failed to enable AVDD I/O supply: %d\n",
- err);
- return err;
+ reset_control_release(sor->rst);
}
- sor->vdd_pll_supply = devm_regulator_get(sor->dev, "vdd-pll");
- if (IS_ERR(sor->vdd_pll_supply)) {
- dev_err(sor->dev, "cannot get VDD PLL supply: %ld\n",
- PTR_ERR(sor->vdd_pll_supply));
- return PTR_ERR(sor->vdd_pll_supply);
- }
+ usleep_range(1000, 2000);
- err = regulator_enable(sor->vdd_pll_supply);
+ clk_disable_unprepare(sor->clk);
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+
+static int tegra_sor_runtime_resume(struct host1x_client *client)
+{
+ struct tegra_sor *sor = host1x_client_to_sor(client);
+ struct device *dev = client->dev;
+ int err;
+
+ err = pm_runtime_get_sync(dev);
if (err < 0) {
- dev_err(sor->dev, "failed to enable VDD PLL supply: %d\n",
- err);
+ dev_err(dev, "failed to get runtime PM: %d\n", err);
return err;
}
- sor->hdmi_supply = devm_regulator_get(sor->dev, "hdmi");
- if (IS_ERR(sor->hdmi_supply)) {
- dev_err(sor->dev, "cannot get HDMI supply: %ld\n",
- PTR_ERR(sor->hdmi_supply));
- return PTR_ERR(sor->hdmi_supply);
- }
-
- err = regulator_enable(sor->hdmi_supply);
+ err = clk_prepare_enable(sor->clk);
if (err < 0) {
- dev_err(sor->dev, "failed to enable HDMI supply: %d\n", err);
- return err;
+ dev_err(dev, "failed to enable clock: %d\n", err);
+ goto put_rpm;
}
- INIT_DELAYED_WORK(&sor->scdc, tegra_sor_hdmi_scdc_work);
+ usleep_range(1000, 2000);
- return 0;
-}
+ if (sor->rst) {
+ err = reset_control_acquire(sor->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to acquire reset: %d\n", err);
+ goto disable_clk;
+ }
-static int tegra_sor_hdmi_remove(struct tegra_sor *sor)
-{
- regulator_disable(sor->hdmi_supply);
- regulator_disable(sor->vdd_pll_supply);
- regulator_disable(sor->avdd_io_supply);
+ err = reset_control_deassert(sor->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to deassert reset: %d\n", err);
+ goto release_reset;
+ }
+ }
return 0;
+
+release_reset:
+ reset_control_release(sor->rst);
+disable_clk:
+ clk_disable_unprepare(sor->clk);
+put_rpm:
+ pm_runtime_put_sync(dev);
+ return err;
}
-static const struct tegra_sor_ops tegra_sor_hdmi_ops = {
- .name = "HDMI",
- .probe = tegra_sor_hdmi_probe,
- .remove = tegra_sor_hdmi_remove,
+static const struct host1x_client_ops sor_client_ops = {
+ .init = tegra_sor_init,
+ .exit = tegra_sor_exit,
+ .suspend = tegra_sor_runtime_suspend,
+ .resume = tegra_sor_runtime_resume,
};
static const u8 tegra124_sor_xbar_cfg[5] = {
@@ -3040,14 +3293,161 @@ static const struct tegra_sor_regs tegra124_sor_regs = {
.dp_padctl2 = 0x73,
};
+/* Tegra124 and Tegra132 have lanes 0 and 2 swapped. */
+static const u8 tegra124_sor_lane_map[4] = {
+ 2, 1, 0, 3,
+};
+
+static const u8 tegra124_sor_voltage_swing[4][4][4] = {
+ {
+ { 0x13, 0x19, 0x1e, 0x28 },
+ { 0x1e, 0x25, 0x2d, },
+ { 0x28, 0x32, },
+ { 0x3c, },
+ }, {
+ { 0x12, 0x17, 0x1b, 0x25 },
+ { 0x1c, 0x23, 0x2a, },
+ { 0x25, 0x2f, },
+ { 0x39, }
+ }, {
+ { 0x12, 0x16, 0x1a, 0x22 },
+ { 0x1b, 0x20, 0x27, },
+ { 0x24, 0x2d, },
+ { 0x36, },
+ }, {
+ { 0x11, 0x14, 0x17, 0x1f },
+ { 0x19, 0x1e, 0x24, },
+ { 0x22, 0x2a, },
+ { 0x32, },
+ },
+};
+
+static const u8 tegra124_sor_pre_emphasis[4][4][4] = {
+ {
+ { 0x00, 0x09, 0x13, 0x25 },
+ { 0x00, 0x0f, 0x1e, },
+ { 0x00, 0x14, },
+ { 0x00, },
+ }, {
+ { 0x00, 0x0a, 0x14, 0x28 },
+ { 0x00, 0x0f, 0x1e, },
+ { 0x00, 0x14, },
+ { 0x00 },
+ }, {
+ { 0x00, 0x0a, 0x14, 0x28 },
+ { 0x00, 0x0f, 0x1e, },
+ { 0x00, 0x14, },
+ { 0x00, },
+ }, {
+ { 0x00, 0x0a, 0x14, 0x28 },
+ { 0x00, 0x0f, 0x1e, },
+ { 0x00, 0x14, },
+ { 0x00, },
+ },
+};
+
+static const u8 tegra124_sor_post_cursor[4][4][4] = {
+ {
+ { 0x00, 0x00, 0x00, 0x00 },
+ { 0x00, 0x00, 0x00, },
+ { 0x00, 0x00, },
+ { 0x00, },
+ }, {
+ { 0x02, 0x02, 0x04, 0x05 },
+ { 0x02, 0x04, 0x05, },
+ { 0x04, 0x05, },
+ { 0x05, },
+ }, {
+ { 0x04, 0x05, 0x08, 0x0b },
+ { 0x05, 0x09, 0x0b, },
+ { 0x08, 0x0a, },
+ { 0x0b, },
+ }, {
+ { 0x05, 0x09, 0x0b, 0x12 },
+ { 0x09, 0x0d, 0x12, },
+ { 0x0b, 0x0f, },
+ { 0x12, },
+ },
+};
+
+static const u8 tegra124_sor_tx_pu[4][4][4] = {
+ {
+ { 0x20, 0x30, 0x40, 0x60 },
+ { 0x30, 0x40, 0x60, },
+ { 0x40, 0x60, },
+ { 0x60, },
+ }, {
+ { 0x20, 0x20, 0x30, 0x50 },
+ { 0x30, 0x40, 0x50, },
+ { 0x40, 0x50, },
+ { 0x60, },
+ }, {
+ { 0x20, 0x20, 0x30, 0x40, },
+ { 0x30, 0x30, 0x40, },
+ { 0x40, 0x50, },
+ { 0x60, },
+ }, {
+ { 0x20, 0x20, 0x20, 0x40, },
+ { 0x30, 0x30, 0x40, },
+ { 0x40, 0x40, },
+ { 0x60, },
+ },
+};
+
static const struct tegra_sor_soc tegra124_sor = {
- .supports_edp = true,
.supports_lvds = true,
.supports_hdmi = false,
- .supports_dp = false,
+ .supports_dp = true,
+ .supports_audio = false,
+ .supports_hdcp = false,
+ .regs = &tegra124_sor_regs,
+ .has_nvdisplay = false,
+ .xbar_cfg = tegra124_sor_xbar_cfg,
+ .lane_map = tegra124_sor_lane_map,
+ .voltage_swing = tegra124_sor_voltage_swing,
+ .pre_emphasis = tegra124_sor_pre_emphasis,
+ .post_cursor = tegra124_sor_post_cursor,
+ .tx_pu = tegra124_sor_tx_pu,
+};
+
+static const u8 tegra132_sor_pre_emphasis[4][4][4] = {
+ {
+ { 0x00, 0x08, 0x12, 0x24 },
+ { 0x01, 0x0e, 0x1d, },
+ { 0x01, 0x13, },
+ { 0x00, },
+ }, {
+ { 0x00, 0x08, 0x12, 0x24 },
+ { 0x00, 0x0e, 0x1d, },
+ { 0x00, 0x13, },
+ { 0x00 },
+ }, {
+ { 0x00, 0x08, 0x12, 0x24 },
+ { 0x00, 0x0e, 0x1d, },
+ { 0x00, 0x13, },
+ { 0x00, },
+ }, {
+ { 0x00, 0x08, 0x12, 0x24 },
+ { 0x00, 0x0e, 0x1d, },
+ { 0x00, 0x13, },
+ { 0x00, },
+ },
+};
+
+static const struct tegra_sor_soc tegra132_sor = {
+ .supports_lvds = true,
+ .supports_hdmi = false,
+ .supports_dp = true,
+ .supports_audio = false,
+ .supports_hdcp = false,
.regs = &tegra124_sor_regs,
.has_nvdisplay = false,
.xbar_cfg = tegra124_sor_xbar_cfg,
+ .lane_map = tegra124_sor_lane_map,
+ .voltage_swing = tegra124_sor_voltage_swing,
+ .pre_emphasis = tegra132_sor_pre_emphasis,
+ .post_cursor = tegra124_sor_post_cursor,
+ .tx_pu = tegra124_sor_tx_pu,
};
static const struct tegra_sor_regs tegra210_sor_regs = {
@@ -3065,33 +3465,50 @@ static const struct tegra_sor_regs tegra210_sor_regs = {
.dp_padctl2 = 0x73,
};
+static const u8 tegra210_sor_xbar_cfg[5] = {
+ 2, 1, 0, 3, 4
+};
+
+static const u8 tegra210_sor_lane_map[4] = {
+ 0, 1, 2, 3,
+};
+
static const struct tegra_sor_soc tegra210_sor = {
- .supports_edp = true,
.supports_lvds = false,
.supports_hdmi = false,
- .supports_dp = false,
+ .supports_dp = true,
+ .supports_audio = false,
+ .supports_hdcp = false,
+
.regs = &tegra210_sor_regs,
.has_nvdisplay = false,
- .xbar_cfg = tegra124_sor_xbar_cfg,
-};
-static const u8 tegra210_sor_xbar_cfg[5] = {
- 2, 1, 0, 3, 4
+ .xbar_cfg = tegra210_sor_xbar_cfg,
+ .lane_map = tegra210_sor_lane_map,
+ .voltage_swing = tegra124_sor_voltage_swing,
+ .pre_emphasis = tegra124_sor_pre_emphasis,
+ .post_cursor = tegra124_sor_post_cursor,
+ .tx_pu = tegra124_sor_tx_pu,
};
static const struct tegra_sor_soc tegra210_sor1 = {
- .supports_edp = false,
.supports_lvds = false,
.supports_hdmi = true,
.supports_dp = true,
+ .supports_audio = true,
+ .supports_hdcp = true,
.regs = &tegra210_sor_regs,
.has_nvdisplay = false,
.num_settings = ARRAY_SIZE(tegra210_sor_hdmi_defaults),
.settings = tegra210_sor_hdmi_defaults,
-
.xbar_cfg = tegra210_sor_xbar_cfg,
+ .lane_map = tegra210_sor_lane_map,
+ .voltage_swing = tegra124_sor_voltage_swing,
+ .pre_emphasis = tegra124_sor_pre_emphasis,
+ .post_cursor = tegra124_sor_post_cursor,
+ .tx_pu = tegra124_sor_tx_pu,
};
static const struct tegra_sor_regs tegra186_sor_regs = {
@@ -3109,31 +3526,72 @@ static const struct tegra_sor_regs tegra186_sor_regs = {
.dp_padctl2 = 0x16a,
};
-static const struct tegra_sor_soc tegra186_sor = {
- .supports_edp = false,
- .supports_lvds = false,
- .supports_hdmi = false,
- .supports_dp = true,
-
- .regs = &tegra186_sor_regs,
- .has_nvdisplay = true,
+static const u8 tegra186_sor_voltage_swing[4][4][4] = {
+ {
+ { 0x13, 0x19, 0x1e, 0x28 },
+ { 0x1e, 0x25, 0x2d, },
+ { 0x28, 0x32, },
+ { 0x39, },
+ }, {
+ { 0x12, 0x16, 0x1b, 0x25 },
+ { 0x1c, 0x23, 0x2a, },
+ { 0x25, 0x2f, },
+ { 0x37, }
+ }, {
+ { 0x12, 0x16, 0x1a, 0x22 },
+ { 0x1b, 0x20, 0x27, },
+ { 0x24, 0x2d, },
+ { 0x35, },
+ }, {
+ { 0x11, 0x14, 0x17, 0x1f },
+ { 0x19, 0x1e, 0x24, },
+ { 0x22, 0x2a, },
+ { 0x32, },
+ },
+};
- .xbar_cfg = tegra124_sor_xbar_cfg,
+static const u8 tegra186_sor_pre_emphasis[4][4][4] = {
+ {
+ { 0x00, 0x08, 0x12, 0x24 },
+ { 0x01, 0x0e, 0x1d, },
+ { 0x01, 0x13, },
+ { 0x00, },
+ }, {
+ { 0x00, 0x08, 0x12, 0x24 },
+ { 0x00, 0x0e, 0x1d, },
+ { 0x00, 0x13, },
+ { 0x00 },
+ }, {
+ { 0x00, 0x08, 0x14, 0x24 },
+ { 0x00, 0x0e, 0x1d, },
+ { 0x00, 0x13, },
+ { 0x00, },
+ }, {
+ { 0x00, 0x08, 0x12, 0x24 },
+ { 0x00, 0x0e, 0x1d, },
+ { 0x00, 0x13, },
+ { 0x00, },
+ },
};
-static const struct tegra_sor_soc tegra186_sor1 = {
- .supports_edp = false,
+static const struct tegra_sor_soc tegra186_sor = {
.supports_lvds = false,
.supports_hdmi = true,
.supports_dp = true,
+ .supports_audio = true,
+ .supports_hdcp = true,
.regs = &tegra186_sor_regs,
.has_nvdisplay = true,
.num_settings = ARRAY_SIZE(tegra186_sor_hdmi_defaults),
.settings = tegra186_sor_hdmi_defaults,
-
.xbar_cfg = tegra124_sor_xbar_cfg,
+ .lane_map = tegra124_sor_lane_map,
+ .voltage_swing = tegra186_sor_voltage_swing,
+ .pre_emphasis = tegra186_sor_pre_emphasis,
+ .post_cursor = tegra124_sor_post_cursor,
+ .tx_pu = tegra124_sor_tx_pu,
};
static const struct tegra_sor_regs tegra194_sor_regs = {
@@ -3152,10 +3610,11 @@ static const struct tegra_sor_regs tegra194_sor_regs = {
};
static const struct tegra_sor_soc tegra194_sor = {
- .supports_edp = true,
.supports_lvds = false,
.supports_hdmi = true,
.supports_dp = true,
+ .supports_audio = true,
+ .supports_hdcp = true,
.regs = &tegra194_sor_regs,
.has_nvdisplay = true,
@@ -3164,14 +3623,19 @@ static const struct tegra_sor_soc tegra194_sor = {
.settings = tegra194_sor_hdmi_defaults,
.xbar_cfg = tegra210_sor_xbar_cfg,
+ .lane_map = tegra124_sor_lane_map,
+ .voltage_swing = tegra186_sor_voltage_swing,
+ .pre_emphasis = tegra186_sor_pre_emphasis,
+ .post_cursor = tegra124_sor_post_cursor,
+ .tx_pu = tegra124_sor_tx_pu,
};
static const struct of_device_id tegra_sor_of_match[] = {
{ .compatible = "nvidia,tegra194-sor", .data = &tegra194_sor },
- { .compatible = "nvidia,tegra186-sor1", .data = &tegra186_sor1 },
{ .compatible = "nvidia,tegra186-sor", .data = &tegra186_sor },
{ .compatible = "nvidia,tegra210-sor1", .data = &tegra210_sor1 },
{ .compatible = "nvidia,tegra210-sor", .data = &tegra210_sor },
+ { .compatible = "nvidia,tegra132-sor", .data = &tegra132_sor },
{ .compatible = "nvidia,tegra124-sor", .data = &tegra124_sor },
{ },
};
@@ -3197,6 +3661,11 @@ static int tegra_sor_parse_dt(struct tegra_sor *sor)
* earlier
*/
sor->pad = TEGRA_IO_PAD_HDMI_DP0 + sor->index;
+ } else {
+ if (!sor->soc->supports_audio)
+ sor->index = 0;
+ else
+ sor->index = 1;
}
err = of_property_read_u32_array(np, "nvidia,xbar-cfg", xbar_cfg, 5);
@@ -3231,9 +3700,11 @@ static irqreturn_t tegra_sor_irq(int irq, void *data)
tegra_hda_parse_format(format, &sor->format);
- tegra_sor_hdmi_audio_enable(sor);
+ if (sor->ops->audio_enable)
+ sor->ops->audio_enable(sor);
} else {
- tegra_sor_hdmi_audio_disable(sor);
+ if (sor->ops->audio_disable)
+ sor->ops->audio_disable(sor);
}
}
@@ -3270,6 +3741,8 @@ static int tegra_sor_probe(struct platform_device *pdev)
if (!sor->aux)
return -EPROBE_DEFER;
+
+ sor->output.ddc = &sor->aux->ddc;
}
if (!sor->aux) {
@@ -3284,16 +3757,15 @@ static int tegra_sor_probe(struct platform_device *pdev)
return -ENODEV;
}
} else {
- if (sor->soc->supports_edp) {
- sor->ops = &tegra_sor_edp_ops;
- sor->pad = TEGRA_IO_PAD_LVDS;
- } else if (sor->soc->supports_dp) {
- dev_err(&pdev->dev, "DisplayPort not supported yet\n");
- return -ENODEV;
- } else {
- dev_err(&pdev->dev, "unknown (DP) support\n");
- return -ENODEV;
- }
+ np = of_parse_phandle(pdev->dev.of_node, "nvidia,panel", 0);
+ /*
+ * No need to keep this around since we only use it as a check
+ * to see if a panel is connected (eDP) or not (DP).
+ */
+ of_node_put(np);
+
+ sor->ops = &tegra_sor_dp_ops;
+ sor->pad = TEGRA_IO_PAD_LVDS;
}
err = tegra_sor_parse_dt(sor);
@@ -3443,43 +3915,54 @@ static int tegra_sor_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, sor);
pm_runtime_enable(&pdev->dev);
+ INIT_LIST_HEAD(&sor->client.list);
+ sor->client.ops = &sor_client_ops;
+ sor->client.dev = &pdev->dev;
+
+ err = host1x_client_register(&sor->client);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to register host1x client: %d\n",
+ err);
+ goto rpm_disable;
+ }
+
/*
* On Tegra210 and earlier, provide our own implementation for the
* pad output clock.
*/
if (!sor->clk_pad) {
- err = pm_runtime_get_sync(&pdev->dev);
+ char *name;
+
+ name = devm_kasprintf(sor->dev, GFP_KERNEL, "sor%u_pad_clkout",
+ sor->index);
+ if (!name) {
+ err = -ENOMEM;
+ goto unregister;
+ }
+
+ err = host1x_client_resume(&sor->client);
if (err < 0) {
- dev_err(&pdev->dev, "failed to get runtime PM: %d\n",
- err);
- goto remove;
+ dev_err(sor->dev, "failed to resume: %d\n", err);
+ goto unregister;
}
- sor->clk_pad = tegra_clk_sor_pad_register(sor,
- "sor1_pad_clkout");
- pm_runtime_put(&pdev->dev);
+ sor->clk_pad = tegra_clk_sor_pad_register(sor, name);
+ host1x_client_suspend(&sor->client);
}
if (IS_ERR(sor->clk_pad)) {
err = PTR_ERR(sor->clk_pad);
- dev_err(&pdev->dev, "failed to register SOR pad clock: %d\n",
- err);
- goto remove;
- }
-
- INIT_LIST_HEAD(&sor->client.list);
- sor->client.ops = &sor_client_ops;
- sor->client.dev = &pdev->dev;
-
- err = host1x_client_register(&sor->client);
- if (err < 0) {
- dev_err(&pdev->dev, "failed to register host1x client: %d\n",
+ dev_err(sor->dev, "failed to register SOR pad clock: %d\n",
err);
- goto remove;
+ goto unregister;
}
return 0;
+unregister:
+ host1x_client_unregister(&sor->client);
+rpm_disable:
+ pm_runtime_disable(&pdev->dev);
remove:
if (sor->ops && sor->ops->remove)
sor->ops->remove(sor);
@@ -3493,8 +3976,6 @@ static int tegra_sor_remove(struct platform_device *pdev)
struct tegra_sor *sor = platform_get_drvdata(pdev);
int err;
- pm_runtime_disable(&pdev->dev);
-
err = host1x_client_unregister(&sor->client);
if (err < 0) {
dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
@@ -3502,6 +3983,8 @@ static int tegra_sor_remove(struct platform_device *pdev)
return err;
}
+ pm_runtime_disable(&pdev->dev);
+
if (sor->ops && sor->ops->remove) {
err = sor->ops->remove(sor);
if (err < 0)
@@ -3513,65 +3996,54 @@ static int tegra_sor_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static int tegra_sor_suspend(struct device *dev)
+static int __maybe_unused tegra_sor_suspend(struct device *dev)
{
struct tegra_sor *sor = dev_get_drvdata(dev);
int err;
- if (sor->rst) {
- err = reset_control_assert(sor->rst);
+ err = tegra_output_suspend(&sor->output);
+ if (err < 0) {
+ dev_err(dev, "failed to suspend output: %d\n", err);
+ return err;
+ }
+
+ if (sor->hdmi_supply) {
+ err = regulator_disable(sor->hdmi_supply);
if (err < 0) {
- dev_err(dev, "failed to assert reset: %d\n", err);
+ tegra_output_resume(&sor->output);
return err;
}
-
- reset_control_release(sor->rst);
}
- usleep_range(1000, 2000);
-
- clk_disable_unprepare(sor->clk);
-
return 0;
}
-static int tegra_sor_resume(struct device *dev)
+static int __maybe_unused tegra_sor_resume(struct device *dev)
{
struct tegra_sor *sor = dev_get_drvdata(dev);
int err;
- err = clk_prepare_enable(sor->clk);
- if (err < 0) {
- dev_err(dev, "failed to enable clock: %d\n", err);
- return err;
+ if (sor->hdmi_supply) {
+ err = regulator_enable(sor->hdmi_supply);
+ if (err < 0)
+ return err;
}
- usleep_range(1000, 2000);
+ err = tegra_output_resume(&sor->output);
+ if (err < 0) {
+ dev_err(dev, "failed to resume output: %d\n", err);
- if (sor->rst) {
- err = reset_control_acquire(sor->rst);
- if (err < 0) {
- dev_err(dev, "failed to acquire reset: %d\n", err);
- clk_disable_unprepare(sor->clk);
- return err;
- }
+ if (sor->hdmi_supply)
+ regulator_disable(sor->hdmi_supply);
- err = reset_control_deassert(sor->rst);
- if (err < 0) {
- dev_err(dev, "failed to deassert reset: %d\n", err);
- reset_control_release(sor->rst);
- clk_disable_unprepare(sor->clk);
- return err;
- }
+ return err;
}
return 0;
}
-#endif
static const struct dev_pm_ops tegra_sor_pm_ops = {
- SET_RUNTIME_PM_OPS(tegra_sor_suspend, tegra_sor_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(tegra_sor_suspend, tegra_sor_resume)
};
struct platform_driver tegra_sor_driver = {
diff --git a/drivers/gpu/drm/tegra/sor.h b/drivers/gpu/drm/tegra/sor.h
index f8efd8be4b7c..00e09d5dca30 100644
--- a/drivers/gpu/drm/tegra/sor.h
+++ b/drivers/gpu/drm/tegra/sor.h
@@ -39,6 +39,7 @@
#define SOR_STATE_ASY_CRC_MODE_NON_ACTIVE (0x2 << 6)
#define SOR_STATE_ASY_CRC_MODE_COMPLETE (0x1 << 6)
#define SOR_STATE_ASY_CRC_MODE_ACTIVE (0x0 << 6)
+#define SOR_STATE_ASY_SUBOWNER_MASK (0x3 << 4)
#define SOR_STATE_ASY_OWNER_MASK 0xf
#define SOR_STATE_ASY_OWNER(x) (((x) & 0xf) << 0)
@@ -283,10 +284,12 @@
#define SOR_DP_PADCTL_CM_TXD_2 (1 << 6)
#define SOR_DP_PADCTL_CM_TXD_1 (1 << 5)
#define SOR_DP_PADCTL_CM_TXD_0 (1 << 4)
+#define SOR_DP_PADCTL_CM_TXD(x) (1 << (4 + (x)))
#define SOR_DP_PADCTL_PD_TXD_3 (1 << 3)
#define SOR_DP_PADCTL_PD_TXD_0 (1 << 2)
#define SOR_DP_PADCTL_PD_TXD_1 (1 << 1)
#define SOR_DP_PADCTL_PD_TXD_2 (1 << 0)
+#define SOR_DP_PADCTL_PD_TXD(x) (1 << (0 + (x)))
#define SOR_DP_PADCTL1 0x5d
diff --git a/drivers/gpu/drm/tegra/vic.c b/drivers/gpu/drm/tegra/vic.c
index 958548ef69e7..ade56b860cf9 100644
--- a/drivers/gpu/drm/tegra/vic.c
+++ b/drivers/gpu/drm/tegra/vic.c
@@ -4,6 +4,7 @@
*/
#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/host1x.h>
#include <linux/iommu.h>
#include <linux/module.h>
@@ -33,7 +34,6 @@ struct vic {
void __iomem *regs;
struct tegra_drm_client client;
struct host1x_channel *channel;
- struct iommu_domain *domain;
struct device *dev;
struct clk *clk;
struct reset_control *rst;
@@ -96,6 +96,9 @@ static int vic_runtime_suspend(struct device *dev)
static int vic_boot(struct vic *vic)
{
+#ifdef CONFIG_IOMMU_API
+ struct iommu_fwspec *spec = dev_iommu_fwspec_get(vic->dev);
+#endif
u32 fce_ucode_size, fce_bin_data_offset;
void *hdr;
int err = 0;
@@ -104,15 +107,14 @@ static int vic_boot(struct vic *vic)
return 0;
#ifdef CONFIG_IOMMU_API
- if (vic->config->supports_sid) {
- struct iommu_fwspec *spec = dev_iommu_fwspec_get(vic->dev);
+ if (vic->config->supports_sid && spec) {
u32 value;
value = TRANSCFG_ATT(1, TRANSCFG_SID_FALCON) |
TRANSCFG_ATT(0, TRANSCFG_SID_HW);
vic_writel(vic, value, VIC_TFBIF_TRANSCFG);
- if (spec && spec->num_ids > 0) {
+ if (spec->num_ids > 0) {
value = spec->ids[0] & 0xffff;
vic_writel(vic, value, VIC_THI_STREAMID0);
@@ -131,9 +133,9 @@ static int vic_boot(struct vic *vic)
if (err < 0)
return err;
- hdr = vic->falcon.firmware.vaddr;
+ hdr = vic->falcon.firmware.virt;
fce_bin_data_offset = *(u32 *)(hdr + VIC_UCODE_FCE_DATA_OFFSET);
- hdr = vic->falcon.firmware.vaddr +
+ hdr = vic->falcon.firmware.virt +
*(u32 *)(hdr + VIC_UCODE_FCE_HEADER_OFFSET);
fce_ucode_size = *(u32 *)(hdr + FCE_UCODE_SIZE_OFFSET);
@@ -141,7 +143,7 @@ static int vic_boot(struct vic *vic)
falcon_execute_method(&vic->falcon, VIC_SET_FCE_UCODE_SIZE,
fce_ucode_size);
falcon_execute_method(&vic->falcon, VIC_SET_FCE_UCODE_OFFSET,
- (vic->falcon.firmware.paddr + fce_bin_data_offset)
+ (vic->falcon.firmware.iova + fce_bin_data_offset)
>> 8);
err = falcon_wait_idle(&vic->falcon);
@@ -156,48 +158,21 @@ static int vic_boot(struct vic *vic)
return 0;
}
-static void *vic_falcon_alloc(struct falcon *falcon, size_t size,
- dma_addr_t *iova)
-{
- struct tegra_drm *tegra = falcon->data;
-
- return tegra_drm_alloc(tegra, size, iova);
-}
-
-static void vic_falcon_free(struct falcon *falcon, size_t size,
- dma_addr_t iova, void *va)
-{
- struct tegra_drm *tegra = falcon->data;
-
- return tegra_drm_free(tegra, size, va, iova);
-}
-
-static const struct falcon_ops vic_falcon_ops = {
- .alloc = vic_falcon_alloc,
- .free = vic_falcon_free
-};
-
static int vic_init(struct host1x_client *client)
{
struct tegra_drm_client *drm = host1x_to_drm_client(client);
- struct iommu_group *group = iommu_group_get(client->dev);
- struct drm_device *dev = dev_get_drvdata(client->parent);
+ struct drm_device *dev = dev_get_drvdata(client->host);
struct tegra_drm *tegra = dev->dev_private;
struct vic *vic = to_vic(drm);
int err;
- if (group && tegra->domain) {
- err = iommu_attach_group(tegra->domain, group);
- if (err < 0) {
- dev_err(vic->dev, "failed to attach to domain: %d\n",
- err);
- return err;
- }
-
- vic->domain = tegra->domain;
+ err = host1x_client_iommu_attach(client);
+ if (err < 0 && err != -ENODEV) {
+ dev_err(vic->dev, "failed to attach to domain: %d\n", err);
+ return err;
}
- vic->channel = host1x_channel_request(client->dev);
+ vic->channel = host1x_channel_request(client);
if (!vic->channel) {
err = -ENOMEM;
goto detach;
@@ -213,6 +188,12 @@ static int vic_init(struct host1x_client *client)
if (err < 0)
goto free_syncpt;
+ /*
+ * Inherit the DMA parameters (such as maximum segment size) from the
+ * parent host1x device.
+ */
+ client->dev->dma_parms = client->host->dma_parms;
+
return 0;
free_syncpt:
@@ -220,8 +201,7 @@ free_syncpt:
free_channel:
host1x_channel_put(vic->channel);
detach:
- if (group && tegra->domain)
- iommu_detach_group(tegra->domain, group);
+ host1x_client_iommu_detach(client);
return err;
}
@@ -229,22 +209,32 @@ detach:
static int vic_exit(struct host1x_client *client)
{
struct tegra_drm_client *drm = host1x_to_drm_client(client);
- struct iommu_group *group = iommu_group_get(client->dev);
- struct drm_device *dev = dev_get_drvdata(client->parent);
+ struct drm_device *dev = dev_get_drvdata(client->host);
struct tegra_drm *tegra = dev->dev_private;
struct vic *vic = to_vic(drm);
int err;
+ /* avoid a dangling pointer just in case this disappears */
+ client->dev->dma_parms = NULL;
+
err = tegra_drm_unregister_client(tegra, drm);
if (err < 0)
return err;
host1x_syncpt_free(client->syncpts[0]);
host1x_channel_put(vic->channel);
-
- if (vic->domain) {
- iommu_detach_group(vic->domain, group);
- vic->domain = NULL;
+ host1x_client_iommu_detach(client);
+
+ if (client->group) {
+ dma_unmap_single(vic->dev, vic->falcon.firmware.phys,
+ vic->falcon.firmware.size, DMA_TO_DEVICE);
+ tegra_drm_free(tegra, vic->falcon.firmware.size,
+ vic->falcon.firmware.virt,
+ vic->falcon.firmware.iova);
+ } else {
+ dma_free_coherent(vic->dev, vic->falcon.firmware.size,
+ vic->falcon.firmware.virt,
+ vic->falcon.firmware.iova);
}
return 0;
@@ -257,25 +247,64 @@ static const struct host1x_client_ops vic_client_ops = {
static int vic_load_firmware(struct vic *vic)
{
+ struct host1x_client *client = &vic->client.base;
+ struct tegra_drm *tegra = vic->client.drm;
+ dma_addr_t iova;
+ size_t size;
+ void *virt;
int err;
- if (vic->falcon.data)
+ if (vic->falcon.firmware.virt)
return 0;
- vic->falcon.data = vic->client.drm;
-
err = falcon_read_firmware(&vic->falcon, vic->config->firmware);
if (err < 0)
- goto cleanup;
+ return err;
+
+ size = vic->falcon.firmware.size;
+
+ if (!client->group) {
+ virt = dma_alloc_coherent(vic->dev, size, &iova, GFP_KERNEL);
+
+ err = dma_mapping_error(vic->dev, iova);
+ if (err < 0)
+ return err;
+ } else {
+ virt = tegra_drm_alloc(tegra, size, &iova);
+ }
+
+ vic->falcon.firmware.virt = virt;
+ vic->falcon.firmware.iova = iova;
err = falcon_load_firmware(&vic->falcon);
if (err < 0)
goto cleanup;
+ /*
+ * In this case we have received an IOVA from the shared domain, so we
+ * need to make sure to get the physical address so that the DMA API
+ * knows what memory pages to flush the cache for.
+ */
+ if (client->group) {
+ dma_addr_t phys;
+
+ phys = dma_map_single(vic->dev, virt, size, DMA_TO_DEVICE);
+
+ err = dma_mapping_error(vic->dev, phys);
+ if (err < 0)
+ goto cleanup;
+
+ vic->falcon.firmware.phys = phys;
+ }
+
return 0;
cleanup:
- vic->falcon.data = NULL;
+ if (!client->group)
+ dma_free_coherent(vic->dev, size, virt, iova);
+ else
+ tegra_drm_free(tegra, size, virt, iova);
+
return err;
}
@@ -357,13 +386,14 @@ static const struct vic_config vic_t194_config = {
.supports_sid = true,
};
-static const struct of_device_id vic_match[] = {
+static const struct of_device_id tegra_vic_of_match[] = {
{ .compatible = "nvidia,tegra124-vic", .data = &vic_t124_config },
{ .compatible = "nvidia,tegra210-vic", .data = &vic_t210_config },
{ .compatible = "nvidia,tegra186-vic", .data = &vic_t186_config },
{ .compatible = "nvidia,tegra194-vic", .data = &vic_t194_config },
{ },
};
+MODULE_DEVICE_TABLE(of, tegra_vic_of_match);
static int vic_probe(struct platform_device *pdev)
{
@@ -373,6 +403,13 @@ static int vic_probe(struct platform_device *pdev)
struct vic *vic;
int err;
+ /* inherit DMA mask from host1x parent */
+ err = dma_coerce_mask_and_coherent(dev, *dev->parent->dma_mask);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
+ return err;
+ }
+
vic = devm_kzalloc(dev, sizeof(*vic), GFP_KERNEL);
if (!vic)
return -ENOMEM;
@@ -409,7 +446,6 @@ static int vic_probe(struct platform_device *pdev)
vic->falcon.dev = dev;
vic->falcon.regs = vic->regs;
- vic->falcon.ops = &vic_falcon_ops;
err = falcon_init(&vic->falcon);
if (err < 0)
@@ -481,7 +517,7 @@ static const struct dev_pm_ops vic_pm_ops = {
struct platform_driver tegra_vic_driver = {
.driver = {
.name = "tegra-vic",
- .of_match_table = vic_match,
+ .of_match_table = tegra_vic_of_match,
.pm = &vic_pm_ops
},
.probe = vic_probe,
diff --git a/drivers/gpu/drm/tilcdc/Makefile b/drivers/gpu/drm/tilcdc/Makefile
index 87f9480e43b0..662bf3a348c9 100644
--- a/drivers/gpu/drm/tilcdc/Makefile
+++ b/drivers/gpu/drm/tilcdc/Makefile
@@ -6,7 +6,6 @@ endif
tilcdc-y := \
tilcdc_plane.o \
tilcdc_crtc.o \
- tilcdc_tfp410.o \
tilcdc_panel.o \
tilcdc_external.o \
tilcdc_drv.o
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
index 650d162e374b..e9dd5e5cb4e7 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -4,16 +4,20 @@
* Author: Rob Clark <robdclark@gmail.com>
*/
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/of_graph.h>
+#include <linux/pm_runtime.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_flip_work.h>
-#include <drm/drm_plane_helper.h>
-#include <linux/workqueue.h>
-#include <linux/completion.h>
-#include <linux/dma-mapping.h>
-#include <linux/of_graph.h>
-#include <linux/math64.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_print.h>
+#include <drm/drm_vblank.h>
#include "tilcdc_drv.h"
#include "tilcdc_regs.h"
@@ -646,9 +650,6 @@ static bool tilcdc_crtc_mode_fixup(struct drm_crtc *crtc,
static int tilcdc_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
- struct drm_display_mode *mode = &state->mode;
- int ret;
-
/* If we are not active we don't care */
if (!state->active)
return 0;
@@ -660,12 +661,6 @@ static int tilcdc_crtc_atomic_check(struct drm_crtc *crtc,
return -EINVAL;
}
- ret = tilcdc_crtc_mode_valid(crtc, mode);
- if (ret) {
- dev_dbg(crtc->dev->dev, "Mode \"%s\" not valid", mode->name);
- return -EINVAL;
- }
-
return 0;
}
@@ -717,13 +712,6 @@ static const struct drm_crtc_funcs tilcdc_crtc_funcs = {
.disable_vblank = tilcdc_crtc_disable_vblank,
};
-static const struct drm_crtc_helper_funcs tilcdc_crtc_helper_funcs = {
- .mode_fixup = tilcdc_crtc_mode_fixup,
- .atomic_check = tilcdc_crtc_atomic_check,
- .atomic_enable = tilcdc_crtc_atomic_enable,
- .atomic_disable = tilcdc_crtc_atomic_disable,
-};
-
int tilcdc_crtc_max_width(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -738,7 +726,9 @@ int tilcdc_crtc_max_width(struct drm_crtc *crtc)
return max_width;
}
-int tilcdc_crtc_mode_valid(struct drm_crtc *crtc, struct drm_display_mode *mode)
+static enum drm_mode_status
+tilcdc_crtc_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
{
struct tilcdc_drm_private *priv = crtc->dev->dev_private;
unsigned int bandwidth;
@@ -826,6 +816,14 @@ int tilcdc_crtc_mode_valid(struct drm_crtc *crtc, struct drm_display_mode *mode)
return MODE_OK;
}
+static const struct drm_crtc_helper_funcs tilcdc_crtc_helper_funcs = {
+ .mode_valid = tilcdc_crtc_mode_valid,
+ .mode_fixup = tilcdc_crtc_mode_fixup,
+ .atomic_check = tilcdc_crtc_atomic_check,
+ .atomic_enable = tilcdc_crtc_atomic_enable,
+ .atomic_disable = tilcdc_crtc_atomic_disable,
+};
+
void tilcdc_crtc_set_panel_info(struct drm_crtc *crtc,
const struct tilcdc_panel_info *info)
{
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index 7339bab3a0a1..0791a0200cc3 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -7,19 +7,29 @@
/* LCDC DRM driver, based on da8xx-fb */
#include <linux/component.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
#include <linux/pinctrl/consumer.h>
-#include <linux/suspend.h>
-#include <drm/drm_atomic.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_debugfs.h>
+#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_irq.h>
+#include <drm/drm_mm.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+
#include "tilcdc_drv.h"
-#include "tilcdc_regs.h"
-#include "tilcdc_tfp410.h"
-#include "tilcdc_panel.h"
#include "tilcdc_external.h"
+#include "tilcdc_panel.h"
+#include "tilcdc_regs.h"
static LIST_HEAD(module_list);
@@ -53,12 +63,6 @@ void tilcdc_module_cleanup(struct tilcdc_module *mod)
static struct of_device_id tilcdc_of_match[];
-static struct drm_framebuffer *tilcdc_fb_create(struct drm_device *dev,
- struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd)
-{
- return drm_gem_fb_create(dev, file_priv, mode_cmd);
-}
-
static int tilcdc_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
@@ -129,7 +133,7 @@ static int tilcdc_commit(struct drm_device *dev,
}
static const struct drm_mode_config_funcs mode_config_funcs = {
- .fb_create = tilcdc_fb_create,
+ .fb_create = drm_gem_fb_create,
.atomic_check = tilcdc_atomic_check,
.atomic_commit = tilcdc_commit,
};
@@ -188,7 +192,6 @@ static void tilcdc_fini(struct drm_device *dev)
drm_kms_helper_poll_fini(dev);
drm_irq_uninstall(dev);
drm_mode_config_cleanup(dev);
- tilcdc_remove_external_device(dev);
if (priv->clk)
clk_put(priv->clk);
@@ -246,7 +249,7 @@ static int tilcdc_init(struct drm_driver *ddrv, struct device *dev)
goto init_failed;
}
- priv->mmio = ioremap_nocache(res->start, resource_size(res));
+ priv->mmio = ioremap(res->start, resource_size(res));
if (!priv->mmio) {
dev_err(dev, "failed to ioremap\n");
ret = -ENOMEM;
@@ -501,8 +504,7 @@ static int tilcdc_debugfs_init(struct drm_minor *minor)
DEFINE_DRM_GEM_CMA_FOPS(fops);
static struct drm_driver tilcdc_driver = {
- .driver_features = (DRIVER_GEM | DRIVER_MODESET |
- DRIVER_PRIME | DRIVER_ATOMIC),
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.irq_handler = tilcdc_irq,
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_print_info = drm_gem_cma_print_info,
@@ -511,8 +513,6 @@ static struct drm_driver tilcdc_driver = {
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_import = drm_gem_prime_import,
- .gem_prime_export = drm_gem_prime_export,
.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
.gem_prime_vmap = drm_gem_cma_prime_vmap,
@@ -642,7 +642,6 @@ static struct platform_driver tilcdc_platform_driver = {
static int __init tilcdc_drm_init(void)
{
DBG("init");
- tilcdc_tfp410_init();
tilcdc_panel_init();
return platform_driver_register(&tilcdc_platform_driver);
}
@@ -652,7 +651,6 @@ static void __exit tilcdc_drm_fini(void)
DBG("fini");
platform_driver_unregister(&tilcdc_platform_driver);
tilcdc_panel_fini();
- tilcdc_tfp410_fini();
}
module_init(tilcdc_drm_init);
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.h b/drivers/gpu/drm/tilcdc/tilcdc_drv.h
index 99432296c0ff..18815e75ca4f 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.h
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.h
@@ -7,21 +7,24 @@
#ifndef __TILCDC_DRV_H__
#define __TILCDC_DRV_H__
-#include <linux/clk.h>
#include <linux/cpufreq.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/pm.h>
-#include <linux/pm_runtime.h>
-#include <linux/slab.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/list.h>
-
-#include <drm/drmP.h>
-#include <drm/drm_bridge.h>
-#include <drm/drm_fb_cma_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <linux/irqreturn.h>
+
+#include <drm/drm_print.h>
+
+struct clk;
+struct workqueue_struct;
+
+struct drm_connector;
+struct drm_connector_helper_funcs;
+struct drm_crtc;
+struct drm_device;
+struct drm_display_mode;
+struct drm_encoder;
+struct drm_framebuffer;
+struct drm_minor;
+struct drm_pending_vblank_event;
+struct drm_plane;
/* Defaulting to pixel clock defined on AM335x */
#define TILCDC_DEFAULT_MAX_PIXELCLOCK 126000
@@ -74,7 +77,6 @@ struct tilcdc_drm_private {
struct drm_encoder *external_encoder;
struct drm_connector *external_connector;
- const struct drm_connector_helper_funcs *connector_funcs;
bool is_registered;
bool is_componentized;
@@ -156,7 +158,6 @@ void tilcdc_crtc_set_panel_info(struct drm_crtc *crtc,
const struct tilcdc_panel_info *info);
void tilcdc_crtc_set_simulate_vesa_sync(struct drm_crtc *crtc,
bool simulate_vesa_sync);
-int tilcdc_crtc_mode_valid(struct drm_crtc *crtc, struct drm_display_mode *mode);
int tilcdc_crtc_max_width(struct drm_crtc *crtc);
void tilcdc_crtc_shutdown(struct drm_crtc *crtc);
int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_external.c b/drivers/gpu/drm/tilcdc/tilcdc_external.c
index 7050eb4cf152..51d034e095f4 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_external.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_external.c
@@ -6,7 +6,9 @@
#include <linux/component.h>
#include <linux/of_graph.h>
+
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_of.h>
#include "tilcdc_drv.h"
@@ -37,64 +39,6 @@ static const struct tilcdc_panel_info panel_info_default = {
.raster_order = 0,
};
-static int tilcdc_external_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- struct tilcdc_drm_private *priv = connector->dev->dev_private;
- int ret;
-
- ret = tilcdc_crtc_mode_valid(priv->crtc, mode);
- if (ret != MODE_OK)
- return ret;
-
- BUG_ON(priv->external_connector != connector);
- BUG_ON(!priv->connector_funcs);
-
- /* If the connector has its own mode_valid call it. */
- if (!IS_ERR(priv->connector_funcs) &&
- priv->connector_funcs->mode_valid)
- return priv->connector_funcs->mode_valid(connector, mode);
-
- return MODE_OK;
-}
-
-static int tilcdc_add_external_connector(struct drm_device *dev,
- struct drm_connector *connector)
-{
- struct tilcdc_drm_private *priv = dev->dev_private;
- struct drm_connector_helper_funcs *connector_funcs;
-
- /* There should never be more than one connector */
- if (WARN_ON(priv->external_connector))
- return -EINVAL;
-
- priv->external_connector = connector;
- connector_funcs = devm_kzalloc(dev->dev, sizeof(*connector_funcs),
- GFP_KERNEL);
- if (!connector_funcs)
- return -ENOMEM;
-
- /* connector->helper_private contains always struct
- * connector_helper_funcs pointer. For tilcdc crtc to have a
- * say if a specific mode is Ok, we need to install our own
- * helper functions. In our helper functions we copy
- * everything else but use our own mode_valid() (above).
- */
- if (connector->helper_private) {
- priv->connector_funcs = connector->helper_private;
- *connector_funcs = *priv->connector_funcs;
- } else {
- priv->connector_funcs = ERR_PTR(-ENOENT);
- }
- connector_funcs->mode_valid = tilcdc_external_mode_valid;
- drm_connector_helper_add(connector, connector_funcs);
-
- dev_dbg(dev->dev, "External connector '%s' connected\n",
- connector->name);
-
- return 0;
-}
-
static
struct drm_connector *tilcdc_encoder_find_connector(struct drm_device *ddev,
struct drm_encoder *encoder)
@@ -115,7 +59,6 @@ struct drm_connector *tilcdc_encoder_find_connector(struct drm_device *ddev,
int tilcdc_add_component_encoder(struct drm_device *ddev)
{
struct tilcdc_drm_private *priv = ddev->dev_private;
- struct drm_connector *connector;
struct drm_encoder *encoder;
list_for_each_entry(encoder, &ddev->mode_config.encoder_list, head)
@@ -127,28 +70,17 @@ int tilcdc_add_component_encoder(struct drm_device *ddev)
return -ENODEV;
}
- connector = tilcdc_encoder_find_connector(ddev, encoder);
+ priv->external_connector =
+ tilcdc_encoder_find_connector(ddev, encoder);
- if (!connector)
+ if (!priv->external_connector)
return -ENODEV;
/* Only tda998x is supported at the moment. */
tilcdc_crtc_set_simulate_vesa_sync(priv->crtc, true);
tilcdc_crtc_set_panel_info(priv->crtc, &panel_info_tda998x);
- return tilcdc_add_external_connector(ddev, connector);
-}
-
-void tilcdc_remove_external_device(struct drm_device *dev)
-{
- struct tilcdc_drm_private *priv = dev->dev_private;
-
- /* Restore the original helper functions, if any. */
- if (IS_ERR(priv->connector_funcs))
- drm_connector_helper_add(priv->external_connector, NULL);
- else if (priv->connector_funcs)
- drm_connector_helper_add(priv->external_connector,
- priv->connector_funcs);
+ return 0;
}
static const struct drm_encoder_funcs tilcdc_external_encoder_funcs = {
@@ -159,7 +91,6 @@ static
int tilcdc_attach_bridge(struct drm_device *ddev, struct drm_bridge *bridge)
{
struct tilcdc_drm_private *priv = ddev->dev_private;
- struct drm_connector *connector;
int ret;
priv->external_encoder->possible_crtcs = BIT(0);
@@ -172,13 +103,12 @@ int tilcdc_attach_bridge(struct drm_device *ddev, struct drm_bridge *bridge)
tilcdc_crtc_set_panel_info(priv->crtc, &panel_info_default);
- connector = tilcdc_encoder_find_connector(ddev, priv->external_encoder);
- if (!connector)
+ priv->external_connector =
+ tilcdc_encoder_find_connector(ddev, priv->external_encoder);
+ if (!priv->external_connector)
return -ENODEV;
- ret = tilcdc_add_external_connector(ddev, connector);
-
- return ret;
+ return 0;
}
int tilcdc_attach_external_device(struct drm_device *ddev)
@@ -210,8 +140,8 @@ int tilcdc_attach_external_device(struct drm_device *ddev)
}
if (panel) {
- bridge = devm_drm_panel_bridge_add(ddev->dev, panel,
- DRM_MODE_CONNECTOR_DPI);
+ bridge = devm_drm_panel_bridge_add_typed(ddev->dev, panel,
+ DRM_MODE_CONNECTOR_DPI);
if (IS_ERR(bridge)) {
ret = PTR_ERR(bridge);
goto err_encoder_cleanup;
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_external.h b/drivers/gpu/drm/tilcdc/tilcdc_external.h
index 7024b4877fdf..fb4476694cd8 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_external.h
+++ b/drivers/gpu/drm/tilcdc/tilcdc_external.h
@@ -8,7 +8,6 @@
#define __TILCDC_EXTERNAL_H__
int tilcdc_add_component_encoder(struct drm_device *dev);
-void tilcdc_remove_external_device(struct drm_device *dev);
int tilcdc_get_external_components(struct device *dev,
struct component_match **match);
int tilcdc_attach_external_device(struct drm_device *ddev);
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
index 22b100d2e174..5584e656b857 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
@@ -4,14 +4,17 @@
* Author: Rob Clark <robdclark@gmail.com>
*/
-#include <linux/pinctrl/pinmux.h>
-#include <linux/pinctrl/consumer.h>
-#include <linux/backlight.h>
#include <linux/gpio/consumer.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+
#include <video/display_timing.h>
#include <video/of_display_timing.h>
#include <video/videomode.h>
-#include <drm/drm_atomic_helper.h>
+
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_probe_helper.h>
#include "tilcdc_drv.h"
@@ -160,14 +163,6 @@ static int panel_connector_get_modes(struct drm_connector *connector)
return i;
}
-static int panel_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- struct tilcdc_drm_private *priv = connector->dev->dev_private;
- /* our only constraints are what the crtc can generate: */
- return tilcdc_crtc_mode_valid(priv->crtc, mode);
-}
-
static struct drm_encoder *panel_connector_best_encoder(
struct drm_connector *connector)
{
@@ -185,7 +180,6 @@ static const struct drm_connector_funcs panel_connector_funcs = {
static const struct drm_connector_helper_funcs panel_connector_helper_funcs = {
.get_modes = panel_connector_get_modes,
- .mode_valid = panel_connector_mode_valid,
.best_encoder = panel_connector_best_encoder,
};
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_plane.c b/drivers/gpu/drm/tilcdc/tilcdc_plane.c
index 8c2776acdf99..e2090020b3a0 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_plane.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_plane.c
@@ -4,16 +4,14 @@
* Author: Jyri Sarha <jsarha@ti.com>
*/
-#include <drm/drmP.h>
-
#include <drm/drm_atomic.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_atomic_helper.h>
-#include <uapi/drm/drm_fourcc.h>
+#include <drm/drm_fourcc.h>
#include "tilcdc_drv.h"
-static struct drm_plane_funcs tilcdc_plane_funcs = {
+static const struct drm_plane_funcs tilcdc_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = drm_plane_cleanup,
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
deleted file mode 100644
index 62d014c20988..000000000000
--- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
+++ /dev/null
@@ -1,385 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2012 Texas Instruments
- * Author: Rob Clark <robdclark@gmail.com>
- */
-
-#include <linux/i2c.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
-#include <linux/pinctrl/pinmux.h>
-#include <linux/pinctrl/consumer.h>
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_probe_helper.h>
-
-#include "tilcdc_drv.h"
-#include "tilcdc_tfp410.h"
-
-struct tfp410_module {
- struct tilcdc_module base;
- struct i2c_adapter *i2c;
- int gpio;
-};
-#define to_tfp410_module(x) container_of(x, struct tfp410_module, base)
-
-
-static const struct tilcdc_panel_info dvi_info = {
- .ac_bias = 255,
- .ac_bias_intrpt = 0,
- .dma_burst_sz = 16,
- .bpp = 16,
- .fdd = 0x80,
- .tft_alt_mode = 0,
- .sync_edge = 0,
- .sync_ctrl = 1,
- .raster_order = 0,
-};
-
-/*
- * Encoder:
- */
-
-struct tfp410_encoder {
- struct drm_encoder base;
- struct tfp410_module *mod;
- int dpms;
-};
-#define to_tfp410_encoder(x) container_of(x, struct tfp410_encoder, base)
-
-static void tfp410_encoder_dpms(struct drm_encoder *encoder, int mode)
-{
- struct tfp410_encoder *tfp410_encoder = to_tfp410_encoder(encoder);
-
- if (tfp410_encoder->dpms == mode)
- return;
-
- if (mode == DRM_MODE_DPMS_ON) {
- DBG("Power on");
- gpio_direction_output(tfp410_encoder->mod->gpio, 1);
- } else {
- DBG("Power off");
- gpio_direction_output(tfp410_encoder->mod->gpio, 0);
- }
-
- tfp410_encoder->dpms = mode;
-}
-
-static void tfp410_encoder_prepare(struct drm_encoder *encoder)
-{
- tfp410_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
-}
-
-static void tfp410_encoder_commit(struct drm_encoder *encoder)
-{
- tfp410_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
-}
-
-static void tfp410_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- /* nothing needed */
-}
-
-static const struct drm_encoder_funcs tfp410_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
-static const struct drm_encoder_helper_funcs tfp410_encoder_helper_funcs = {
- .dpms = tfp410_encoder_dpms,
- .prepare = tfp410_encoder_prepare,
- .commit = tfp410_encoder_commit,
- .mode_set = tfp410_encoder_mode_set,
-};
-
-static struct drm_encoder *tfp410_encoder_create(struct drm_device *dev,
- struct tfp410_module *mod)
-{
- struct tfp410_encoder *tfp410_encoder;
- struct drm_encoder *encoder;
- int ret;
-
- tfp410_encoder = devm_kzalloc(dev->dev, sizeof(*tfp410_encoder),
- GFP_KERNEL);
- if (!tfp410_encoder)
- return NULL;
-
- tfp410_encoder->dpms = DRM_MODE_DPMS_OFF;
- tfp410_encoder->mod = mod;
-
- encoder = &tfp410_encoder->base;
- encoder->possible_crtcs = 1;
-
- ret = drm_encoder_init(dev, encoder, &tfp410_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
- if (ret < 0)
- goto fail;
-
- drm_encoder_helper_add(encoder, &tfp410_encoder_helper_funcs);
-
- return encoder;
-
-fail:
- drm_encoder_cleanup(encoder);
- return NULL;
-}
-
-/*
- * Connector:
- */
-
-struct tfp410_connector {
- struct drm_connector base;
-
- struct drm_encoder *encoder; /* our connected encoder */
- struct tfp410_module *mod;
-};
-#define to_tfp410_connector(x) container_of(x, struct tfp410_connector, base)
-
-
-static void tfp410_connector_destroy(struct drm_connector *connector)
-{
- drm_connector_unregister(connector);
- drm_connector_cleanup(connector);
-}
-
-static enum drm_connector_status tfp410_connector_detect(
- struct drm_connector *connector,
- bool force)
-{
- struct tfp410_connector *tfp410_connector = to_tfp410_connector(connector);
-
- if (drm_probe_ddc(tfp410_connector->mod->i2c))
- return connector_status_connected;
-
- return connector_status_unknown;
-}
-
-static int tfp410_connector_get_modes(struct drm_connector *connector)
-{
- struct tfp410_connector *tfp410_connector = to_tfp410_connector(connector);
- struct edid *edid;
- int ret = 0;
-
- edid = drm_get_edid(connector, tfp410_connector->mod->i2c);
-
- drm_connector_update_edid_property(connector, edid);
-
- if (edid) {
- ret = drm_add_edid_modes(connector, edid);
- kfree(edid);
- }
-
- return ret;
-}
-
-static int tfp410_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- struct tilcdc_drm_private *priv = connector->dev->dev_private;
- /* our only constraints are what the crtc can generate: */
- return tilcdc_crtc_mode_valid(priv->crtc, mode);
-}
-
-static struct drm_encoder *tfp410_connector_best_encoder(
- struct drm_connector *connector)
-{
- struct tfp410_connector *tfp410_connector = to_tfp410_connector(connector);
- return tfp410_connector->encoder;
-}
-
-static const struct drm_connector_funcs tfp410_connector_funcs = {
- .destroy = tfp410_connector_destroy,
- .detect = tfp410_connector_detect,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static const struct drm_connector_helper_funcs tfp410_connector_helper_funcs = {
- .get_modes = tfp410_connector_get_modes,
- .mode_valid = tfp410_connector_mode_valid,
- .best_encoder = tfp410_connector_best_encoder,
-};
-
-static struct drm_connector *tfp410_connector_create(struct drm_device *dev,
- struct tfp410_module *mod, struct drm_encoder *encoder)
-{
- struct tfp410_connector *tfp410_connector;
- struct drm_connector *connector;
- int ret;
-
- tfp410_connector = devm_kzalloc(dev->dev, sizeof(*tfp410_connector),
- GFP_KERNEL);
- if (!tfp410_connector)
- return NULL;
-
- tfp410_connector->encoder = encoder;
- tfp410_connector->mod = mod;
-
- connector = &tfp410_connector->base;
-
- drm_connector_init(dev, connector, &tfp410_connector_funcs,
- DRM_MODE_CONNECTOR_DVID);
- drm_connector_helper_add(connector, &tfp410_connector_helper_funcs);
-
- connector->polled = DRM_CONNECTOR_POLL_CONNECT |
- DRM_CONNECTOR_POLL_DISCONNECT;
-
- connector->interlace_allowed = 0;
- connector->doublescan_allowed = 0;
-
- ret = drm_connector_attach_encoder(connector, encoder);
- if (ret)
- goto fail;
-
- return connector;
-
-fail:
- tfp410_connector_destroy(connector);
- return NULL;
-}
-
-/*
- * Module:
- */
-
-static int tfp410_modeset_init(struct tilcdc_module *mod, struct drm_device *dev)
-{
- struct tfp410_module *tfp410_mod = to_tfp410_module(mod);
- struct tilcdc_drm_private *priv = dev->dev_private;
- struct drm_encoder *encoder;
- struct drm_connector *connector;
-
- encoder = tfp410_encoder_create(dev, tfp410_mod);
- if (!encoder)
- return -ENOMEM;
-
- connector = tfp410_connector_create(dev, tfp410_mod, encoder);
- if (!connector)
- return -ENOMEM;
-
- priv->encoders[priv->num_encoders++] = encoder;
- priv->connectors[priv->num_connectors++] = connector;
-
- tilcdc_crtc_set_panel_info(priv->crtc, &dvi_info);
- return 0;
-}
-
-static const struct tilcdc_module_ops tfp410_module_ops = {
- .modeset_init = tfp410_modeset_init,
-};
-
-/*
- * Device:
- */
-
-static int tfp410_probe(struct platform_device *pdev)
-{
- struct device_node *node = pdev->dev.of_node;
- struct device_node *i2c_node;
- struct tfp410_module *tfp410_mod;
- struct tilcdc_module *mod;
- struct pinctrl *pinctrl;
- uint32_t i2c_phandle;
- int ret = -EINVAL;
-
- /* bail out early if no DT data: */
- if (!node) {
- dev_err(&pdev->dev, "device-tree data is missing\n");
- return -ENXIO;
- }
-
- tfp410_mod = devm_kzalloc(&pdev->dev, sizeof(*tfp410_mod), GFP_KERNEL);
- if (!tfp410_mod)
- return -ENOMEM;
-
- mod = &tfp410_mod->base;
- pdev->dev.platform_data = mod;
-
- tilcdc_module_init(mod, "tfp410", &tfp410_module_ops);
-
- pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
- if (IS_ERR(pinctrl))
- dev_warn(&pdev->dev, "pins are not configured\n");
-
- if (of_property_read_u32(node, "i2c", &i2c_phandle)) {
- dev_err(&pdev->dev, "could not get i2c bus phandle\n");
- goto fail;
- }
-
- i2c_node = of_find_node_by_phandle(i2c_phandle);
- if (!i2c_node) {
- dev_err(&pdev->dev, "could not get i2c bus node\n");
- goto fail;
- }
-
- tfp410_mod->i2c = of_find_i2c_adapter_by_node(i2c_node);
- if (!tfp410_mod->i2c) {
- dev_err(&pdev->dev, "could not get i2c\n");
- of_node_put(i2c_node);
- goto fail;
- }
-
- of_node_put(i2c_node);
-
- tfp410_mod->gpio = of_get_named_gpio_flags(node, "powerdn-gpio",
- 0, NULL);
- if (tfp410_mod->gpio < 0) {
- dev_warn(&pdev->dev, "No power down GPIO\n");
- } else {
- ret = gpio_request(tfp410_mod->gpio, "DVI_PDn");
- if (ret) {
- dev_err(&pdev->dev, "could not get DVI_PDn gpio\n");
- goto fail_adapter;
- }
- }
-
- return 0;
-
-fail_adapter:
- i2c_put_adapter(tfp410_mod->i2c);
-
-fail:
- tilcdc_module_cleanup(mod);
- return ret;
-}
-
-static int tfp410_remove(struct platform_device *pdev)
-{
- struct tilcdc_module *mod = dev_get_platdata(&pdev->dev);
- struct tfp410_module *tfp410_mod = to_tfp410_module(mod);
-
- i2c_put_adapter(tfp410_mod->i2c);
- gpio_free(tfp410_mod->gpio);
-
- tilcdc_module_cleanup(mod);
-
- return 0;
-}
-
-static const struct of_device_id tfp410_of_match[] = {
- { .compatible = "ti,tilcdc,tfp410", },
- { },
-};
-
-struct platform_driver tfp410_driver = {
- .probe = tfp410_probe,
- .remove = tfp410_remove,
- .driver = {
- .owner = THIS_MODULE,
- .name = "tfp410",
- .of_match_table = tfp410_of_match,
- },
-};
-
-int __init tilcdc_tfp410_init(void)
-{
- return platform_driver_register(&tfp410_driver);
-}
-
-void __exit tilcdc_tfp410_fini(void)
-{
- platform_driver_unregister(&tfp410_driver);
-}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.h b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.h
deleted file mode 100644
index f9aaf6911ffc..000000000000
--- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2012 Texas Instruments
- * Author: Rob Clark <robdclark@gmail.com>
- */
-
-#ifndef __TILCDC_TFP410_H__
-#define __TILCDC_TFP410_H__
-
-/* sub-module for tfp410 dvi adaptor */
-
-int tilcdc_tfp410_init(void);
-void tilcdc_tfp410_fini(void);
-
-#endif /* __TILCDC_TFP410_H__ */
diff --git a/drivers/gpu/drm/tinydrm/Kconfig b/drivers/gpu/drm/tiny/Kconfig
index 87819c82bcce..a46ac284dd5e 100644
--- a/drivers/gpu/drm/tinydrm/Kconfig
+++ b/drivers/gpu/drm/tiny/Kconfig
@@ -1,21 +1,21 @@
# SPDX-License-Identifier: GPL-2.0-only
-menuconfig DRM_TINYDRM
- tristate "Support for simple displays"
- depends on DRM
+
+config DRM_GM12U320
+ tristate "GM12U320 driver for USB projectors"
+ depends on DRM && USB
select DRM_KMS_HELPER
- select DRM_KMS_CMA_HELPER
+ select DRM_GEM_SHMEM_HELPER
help
- Choose this option if you have a tinydrm supported display.
- If M is selected the module will be called tinydrm.
-
-config TINYDRM_MIPI_DBI
- tristate
+ This is a KMS driver for projectors which use the GM12U320 chipset
+ for video transfer over USB2/3, such as the Acer C120 mini projector.
config TINYDRM_HX8357D
tristate "DRM support for HX8357D display panels"
- depends on DRM_TINYDRM && SPI
- depends on BACKLIGHT_CLASS_DEVICE
- select TINYDRM_MIPI_DBI
+ depends on DRM && SPI
+ select DRM_KMS_HELPER
+ select DRM_KMS_CMA_HELPER
+ select DRM_MIPI_DBI
+ select BACKLIGHT_CLASS_DEVICE
help
DRM driver for the following HX8357D panels:
* YX350HV15-T 3.5" 340x350 TFT (Adafruit 3.5")
@@ -24,8 +24,10 @@ config TINYDRM_HX8357D
config TINYDRM_ILI9225
tristate "DRM support for ILI9225 display panels"
- depends on DRM_TINYDRM && SPI
- select TINYDRM_MIPI_DBI
+ depends on DRM && SPI
+ select DRM_KMS_HELPER
+ select DRM_KMS_CMA_HELPER
+ select DRM_MIPI_DBI
help
DRM driver for the following Ilitek ILI9225 panels:
* No-name 2.2" color screen module
@@ -34,9 +36,11 @@ config TINYDRM_ILI9225
config TINYDRM_ILI9341
tristate "DRM support for ILI9341 display panels"
- depends on DRM_TINYDRM && SPI
- depends on BACKLIGHT_CLASS_DEVICE
- select TINYDRM_MIPI_DBI
+ depends on DRM && SPI
+ select DRM_KMS_HELPER
+ select DRM_KMS_CMA_HELPER
+ select DRM_MIPI_DBI
+ select BACKLIGHT_CLASS_DEVICE
help
DRM driver for the following Ilitek ILI9341 panels:
* YX240QV29-T 2.4" 240x320 TFT (Adafruit 2.4")
@@ -45,17 +49,20 @@ config TINYDRM_ILI9341
config TINYDRM_MI0283QT
tristate "DRM support for MI0283QT"
- depends on DRM_TINYDRM && SPI
- depends on BACKLIGHT_CLASS_DEVICE
- select TINYDRM_MIPI_DBI
+ depends on DRM && SPI
+ select DRM_KMS_HELPER
+ select DRM_KMS_CMA_HELPER
+ select DRM_MIPI_DBI
+ select BACKLIGHT_CLASS_DEVICE
help
DRM driver for the Multi-Inno MI0283QT display panel
If M is selected the module will be called mi0283qt.
config TINYDRM_REPAPER
tristate "DRM support for Pervasive Displays RePaper panels (V231)"
- depends on DRM_TINYDRM && SPI
- depends on THERMAL || !THERMAL
+ depends on DRM && SPI
+ select DRM_KMS_HELPER
+ select DRM_KMS_CMA_HELPER
help
DRM driver for the following Pervasive Displays panels:
1.44" TFT EPD Panel (E1144CS021)
@@ -67,8 +74,10 @@ config TINYDRM_REPAPER
config TINYDRM_ST7586
tristate "DRM support for Sitronix ST7586 display panels"
- depends on DRM_TINYDRM && SPI
- select TINYDRM_MIPI_DBI
+ depends on DRM && SPI
+ select DRM_KMS_HELPER
+ select DRM_KMS_CMA_HELPER
+ select DRM_MIPI_DBI
help
DRM driver for the following Sitronix ST7586 panels:
* LEGO MINDSTORMS EV3
@@ -77,9 +86,11 @@ config TINYDRM_ST7586
config TINYDRM_ST7735R
tristate "DRM support for Sitronix ST7735R display panels"
- depends on DRM_TINYDRM && SPI
- depends on BACKLIGHT_CLASS_DEVICE
- select TINYDRM_MIPI_DBI
+ depends on DRM && SPI
+ select DRM_KMS_HELPER
+ select DRM_KMS_CMA_HELPER
+ select DRM_MIPI_DBI
+ select BACKLIGHT_CLASS_DEVICE
help
DRM driver Sitronix ST7735R with one of the following LCDs:
* JD-T18003-T01 1.8" 128x160 TFT
diff --git a/drivers/gpu/drm/tinydrm/Makefile b/drivers/gpu/drm/tiny/Makefile
index 48ec8ed9dc16..896cf31132d3 100644
--- a/drivers/gpu/drm/tinydrm/Makefile
+++ b/drivers/gpu/drm/tiny/Makefile
@@ -1,10 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_DRM_TINYDRM) += core/
-# Controllers
-obj-$(CONFIG_TINYDRM_MIPI_DBI) += mipi-dbi.o
-
-# Displays
+obj-$(CONFIG_DRM_GM12U320) += gm12u320.o
obj-$(CONFIG_TINYDRM_HX8357D) += hx8357d.o
obj-$(CONFIG_TINYDRM_ILI9225) += ili9225.o
obj-$(CONFIG_TINYDRM_ILI9341) += ili9341.o
diff --git a/drivers/gpu/drm/tiny/gm12u320.c b/drivers/gpu/drm/tiny/gm12u320.c
new file mode 100644
index 000000000000..94fb1f593564
--- /dev/null
+++ b/drivers/gpu/drm/tiny/gm12u320.c
@@ -0,0 +1,804 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2019 Hans de Goede <hdegoede@redhat.com>
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_file.h>
+#include <drm/drm_format_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
+#include <drm/drm_vblank.h>
+
+static bool eco_mode;
+module_param(eco_mode, bool, 0644);
+MODULE_PARM_DESC(eco_mode, "Turn on Eco mode (less bright, more silent)");
+
+#define DRIVER_NAME "gm12u320"
+#define DRIVER_DESC "Grain Media GM12U320 USB projector display"
+#define DRIVER_DATE "2019"
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+
+/*
+ * The DLP has an actual width of 854 pixels, but that is not a multiple
+ * of 8, breaking things left and right, so we export a width of 848.
+ */
+#define GM12U320_USER_WIDTH 848
+#define GM12U320_REAL_WIDTH 854
+#define GM12U320_HEIGHT 480
+
+#define GM12U320_BLOCK_COUNT 20
+
+#define GM12U320_ERR(fmt, ...) \
+ DRM_DEV_ERROR(&gm12u320->udev->dev, fmt, ##__VA_ARGS__)
+
+#define MISC_RCV_EPT 1
+#define DATA_RCV_EPT 2
+#define DATA_SND_EPT 3
+#define MISC_SND_EPT 4
+
+#define DATA_BLOCK_HEADER_SIZE 84
+#define DATA_BLOCK_CONTENT_SIZE 64512
+#define DATA_BLOCK_FOOTER_SIZE 20
+#define DATA_BLOCK_SIZE (DATA_BLOCK_HEADER_SIZE + \
+ DATA_BLOCK_CONTENT_SIZE + \
+ DATA_BLOCK_FOOTER_SIZE)
+#define DATA_LAST_BLOCK_CONTENT_SIZE 4032
+#define DATA_LAST_BLOCK_SIZE (DATA_BLOCK_HEADER_SIZE + \
+ DATA_LAST_BLOCK_CONTENT_SIZE + \
+ DATA_BLOCK_FOOTER_SIZE)
+
+#define CMD_SIZE 31
+#define READ_STATUS_SIZE 13
+#define MISC_VALUE_SIZE 4
+
+#define CMD_TIMEOUT msecs_to_jiffies(200)
+#define DATA_TIMEOUT msecs_to_jiffies(1000)
+#define IDLE_TIMEOUT msecs_to_jiffies(2000)
+#define FIRST_FRAME_TIMEOUT msecs_to_jiffies(2000)
+
+#define MISC_REQ_GET_SET_ECO_A 0xff
+#define MISC_REQ_GET_SET_ECO_B 0x35
+/* Windows driver does once every second, with arg d = 1, other args 0 */
+#define MISC_REQ_UNKNOWN1_A 0xff
+#define MISC_REQ_UNKNOWN1_B 0x38
+/* Windows driver does this on init, with arg a, b = 0, c = 0xa0, d = 4 */
+#define MISC_REQ_UNKNOWN2_A 0xa5
+#define MISC_REQ_UNKNOWN2_B 0x00
+
+struct gm12u320_device {
+ struct drm_device dev;
+ struct drm_simple_display_pipe pipe;
+ struct drm_connector conn;
+ struct usb_device *udev;
+ unsigned char *cmd_buf;
+ unsigned char *data_buf[GM12U320_BLOCK_COUNT];
+ bool pipe_enabled;
+ struct {
+ bool run;
+ struct workqueue_struct *workq;
+ struct work_struct work;
+ wait_queue_head_t waitq;
+ struct mutex lock;
+ struct drm_framebuffer *fb;
+ struct drm_rect rect;
+ } fb_update;
+};
+
+static const char cmd_data[CMD_SIZE] = {
+ 0x55, 0x53, 0x42, 0x43, 0x00, 0x00, 0x00, 0x00,
+ 0x68, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x10, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xfc, 0x00, 0x80, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+static const char cmd_draw[CMD_SIZE] = {
+ 0x55, 0x53, 0x42, 0x43, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0xfe,
+ 0x00, 0x00, 0x00, 0xc0, 0xd1, 0x05, 0x00, 0x40,
+ 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+static const char cmd_misc[CMD_SIZE] = {
+ 0x55, 0x53, 0x42, 0x43, 0x00, 0x00, 0x00, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x80, 0x01, 0x10, 0xfd,
+ 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+static const char data_block_header[DATA_BLOCK_HEADER_SIZE] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xfb, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x04, 0x15, 0x00, 0x00, 0xfc, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0xdb
+};
+
+static const char data_last_block_header[DATA_BLOCK_HEADER_SIZE] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xfb, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x2a, 0x00, 0x20, 0x00, 0xc0, 0x0f, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0xd7
+};
+
+static const char data_block_footer[DATA_BLOCK_FOOTER_SIZE] = {
+ 0xfb, 0x14, 0x02, 0x20, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x80, 0x00, 0x00, 0x4f
+};
+
+static int gm12u320_usb_alloc(struct gm12u320_device *gm12u320)
+{
+ int i, block_size;
+ const char *hdr;
+
+ gm12u320->cmd_buf = kmalloc(CMD_SIZE, GFP_KERNEL);
+ if (!gm12u320->cmd_buf)
+ return -ENOMEM;
+
+ for (i = 0; i < GM12U320_BLOCK_COUNT; i++) {
+ if (i == GM12U320_BLOCK_COUNT - 1) {
+ block_size = DATA_LAST_BLOCK_SIZE;
+ hdr = data_last_block_header;
+ } else {
+ block_size = DATA_BLOCK_SIZE;
+ hdr = data_block_header;
+ }
+
+ gm12u320->data_buf[i] = kzalloc(block_size, GFP_KERNEL);
+ if (!gm12u320->data_buf[i])
+ return -ENOMEM;
+
+ memcpy(gm12u320->data_buf[i], hdr, DATA_BLOCK_HEADER_SIZE);
+ memcpy(gm12u320->data_buf[i] +
+ (block_size - DATA_BLOCK_FOOTER_SIZE),
+ data_block_footer, DATA_BLOCK_FOOTER_SIZE);
+ }
+
+ gm12u320->fb_update.workq = create_singlethread_workqueue(DRIVER_NAME);
+ if (!gm12u320->fb_update.workq)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void gm12u320_usb_free(struct gm12u320_device *gm12u320)
+{
+ int i;
+
+ if (gm12u320->fb_update.workq)
+ destroy_workqueue(gm12u320->fb_update.workq);
+
+ for (i = 0; i < GM12U320_BLOCK_COUNT; i++)
+ kfree(gm12u320->data_buf[i]);
+
+ kfree(gm12u320->cmd_buf);
+}
+
+static int gm12u320_misc_request(struct gm12u320_device *gm12u320,
+ u8 req_a, u8 req_b,
+ u8 arg_a, u8 arg_b, u8 arg_c, u8 arg_d)
+{
+ int ret, len;
+
+ memcpy(gm12u320->cmd_buf, &cmd_misc, CMD_SIZE);
+ gm12u320->cmd_buf[20] = req_a;
+ gm12u320->cmd_buf[21] = req_b;
+ gm12u320->cmd_buf[22] = arg_a;
+ gm12u320->cmd_buf[23] = arg_b;
+ gm12u320->cmd_buf[24] = arg_c;
+ gm12u320->cmd_buf[25] = arg_d;
+
+ /* Send request */
+ ret = usb_bulk_msg(gm12u320->udev,
+ usb_sndbulkpipe(gm12u320->udev, MISC_SND_EPT),
+ gm12u320->cmd_buf, CMD_SIZE, &len, CMD_TIMEOUT);
+ if (ret || len != CMD_SIZE) {
+ GM12U320_ERR("Misc. req. error %d\n", ret);
+ return -EIO;
+ }
+
+ /* Read value */
+ ret = usb_bulk_msg(gm12u320->udev,
+ usb_rcvbulkpipe(gm12u320->udev, MISC_RCV_EPT),
+ gm12u320->cmd_buf, MISC_VALUE_SIZE, &len,
+ DATA_TIMEOUT);
+ if (ret || len != MISC_VALUE_SIZE) {
+ GM12U320_ERR("Misc. value error %d\n", ret);
+ return -EIO;
+ }
+ /* cmd_buf[0] now contains the read value, which we don't use */
+
+ /* Read status */
+ ret = usb_bulk_msg(gm12u320->udev,
+ usb_rcvbulkpipe(gm12u320->udev, MISC_RCV_EPT),
+ gm12u320->cmd_buf, READ_STATUS_SIZE, &len,
+ CMD_TIMEOUT);
+ if (ret || len != READ_STATUS_SIZE) {
+ GM12U320_ERR("Misc. status error %d\n", ret);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void gm12u320_32bpp_to_24bpp_packed(u8 *dst, u8 *src, int len)
+{
+ while (len--) {
+ *dst++ = *src++;
+ *dst++ = *src++;
+ *dst++ = *src++;
+ src++;
+ }
+}
+
+static void gm12u320_copy_fb_to_blocks(struct gm12u320_device *gm12u320)
+{
+ int block, dst_offset, len, remain, ret, x1, x2, y1, y2;
+ struct drm_framebuffer *fb;
+ void *vaddr;
+ u8 *src;
+
+ mutex_lock(&gm12u320->fb_update.lock);
+
+ if (!gm12u320->fb_update.fb)
+ goto unlock;
+
+ fb = gm12u320->fb_update.fb;
+ x1 = gm12u320->fb_update.rect.x1;
+ x2 = gm12u320->fb_update.rect.x2;
+ y1 = gm12u320->fb_update.rect.y1;
+ y2 = gm12u320->fb_update.rect.y2;
+
+ vaddr = drm_gem_shmem_vmap(fb->obj[0]);
+ if (IS_ERR(vaddr)) {
+ GM12U320_ERR("failed to vmap fb: %ld\n", PTR_ERR(vaddr));
+ goto put_fb;
+ }
+
+ if (fb->obj[0]->import_attach) {
+ ret = dma_buf_begin_cpu_access(
+ fb->obj[0]->import_attach->dmabuf, DMA_FROM_DEVICE);
+ if (ret) {
+ GM12U320_ERR("dma_buf_begin_cpu_access err: %d\n", ret);
+ goto vunmap;
+ }
+ }
+
+ src = vaddr + y1 * fb->pitches[0] + x1 * 4;
+
+ x1 += (GM12U320_REAL_WIDTH - GM12U320_USER_WIDTH) / 2;
+ x2 += (GM12U320_REAL_WIDTH - GM12U320_USER_WIDTH) / 2;
+
+ for (; y1 < y2; y1++) {
+ remain = 0;
+ len = (x2 - x1) * 3;
+ dst_offset = (y1 * GM12U320_REAL_WIDTH + x1) * 3;
+ block = dst_offset / DATA_BLOCK_CONTENT_SIZE;
+ dst_offset %= DATA_BLOCK_CONTENT_SIZE;
+
+ if ((dst_offset + len) > DATA_BLOCK_CONTENT_SIZE) {
+ remain = dst_offset + len - DATA_BLOCK_CONTENT_SIZE;
+ len = DATA_BLOCK_CONTENT_SIZE - dst_offset;
+ }
+
+ dst_offset += DATA_BLOCK_HEADER_SIZE;
+ len /= 3;
+
+ gm12u320_32bpp_to_24bpp_packed(
+ gm12u320->data_buf[block] + dst_offset,
+ src, len);
+
+ if (remain) {
+ block++;
+ dst_offset = DATA_BLOCK_HEADER_SIZE;
+ gm12u320_32bpp_to_24bpp_packed(
+ gm12u320->data_buf[block] + dst_offset,
+ src + len * 4, remain / 3);
+ }
+ src += fb->pitches[0];
+ }
+
+ if (fb->obj[0]->import_attach) {
+ ret = dma_buf_end_cpu_access(fb->obj[0]->import_attach->dmabuf,
+ DMA_FROM_DEVICE);
+ if (ret)
+ GM12U320_ERR("dma_buf_end_cpu_access err: %d\n", ret);
+ }
+vunmap:
+ drm_gem_shmem_vunmap(fb->obj[0], vaddr);
+put_fb:
+ drm_framebuffer_put(fb);
+ gm12u320->fb_update.fb = NULL;
+unlock:
+ mutex_unlock(&gm12u320->fb_update.lock);
+}
+
+static void gm12u320_fb_update_work(struct work_struct *work)
+{
+ struct gm12u320_device *gm12u320 =
+ container_of(work, struct gm12u320_device, fb_update.work);
+ int draw_status_timeout = FIRST_FRAME_TIMEOUT;
+ int block, block_size, len;
+ int frame = 0;
+ int ret = 0;
+
+ while (gm12u320->fb_update.run) {
+ gm12u320_copy_fb_to_blocks(gm12u320);
+
+ for (block = 0; block < GM12U320_BLOCK_COUNT; block++) {
+ if (block == GM12U320_BLOCK_COUNT - 1)
+ block_size = DATA_LAST_BLOCK_SIZE;
+ else
+ block_size = DATA_BLOCK_SIZE;
+
+ /* Send data command to device */
+ memcpy(gm12u320->cmd_buf, cmd_data, CMD_SIZE);
+ gm12u320->cmd_buf[8] = block_size & 0xff;
+ gm12u320->cmd_buf[9] = block_size >> 8;
+ gm12u320->cmd_buf[20] = 0xfc - block * 4;
+ gm12u320->cmd_buf[21] = block | (frame << 7);
+
+ ret = usb_bulk_msg(gm12u320->udev,
+ usb_sndbulkpipe(gm12u320->udev, DATA_SND_EPT),
+ gm12u320->cmd_buf, CMD_SIZE, &len,
+ CMD_TIMEOUT);
+ if (ret || len != CMD_SIZE)
+ goto err;
+
+ /* Send data block to device */
+ ret = usb_bulk_msg(gm12u320->udev,
+ usb_sndbulkpipe(gm12u320->udev, DATA_SND_EPT),
+ gm12u320->data_buf[block], block_size,
+ &len, DATA_TIMEOUT);
+ if (ret || len != block_size)
+ goto err;
+
+ /* Read status */
+ ret = usb_bulk_msg(gm12u320->udev,
+ usb_rcvbulkpipe(gm12u320->udev, DATA_RCV_EPT),
+ gm12u320->cmd_buf, READ_STATUS_SIZE, &len,
+ CMD_TIMEOUT);
+ if (ret || len != READ_STATUS_SIZE)
+ goto err;
+ }
+
+ /* Send draw command to device */
+ memcpy(gm12u320->cmd_buf, cmd_draw, CMD_SIZE);
+ ret = usb_bulk_msg(gm12u320->udev,
+ usb_sndbulkpipe(gm12u320->udev, DATA_SND_EPT),
+ gm12u320->cmd_buf, CMD_SIZE, &len, CMD_TIMEOUT);
+ if (ret || len != CMD_SIZE)
+ goto err;
+
+ /* Read status */
+ ret = usb_bulk_msg(gm12u320->udev,
+ usb_rcvbulkpipe(gm12u320->udev, DATA_RCV_EPT),
+ gm12u320->cmd_buf, READ_STATUS_SIZE, &len,
+ draw_status_timeout);
+ if (ret || len != READ_STATUS_SIZE)
+ goto err;
+
+ draw_status_timeout = CMD_TIMEOUT;
+ frame = !frame;
+
+ /*
+ * We must draw a frame every 2s otherwise the projector
+ * switches back to showing its logo.
+ */
+ wait_event_timeout(gm12u320->fb_update.waitq,
+ !gm12u320->fb_update.run ||
+ gm12u320->fb_update.fb != NULL,
+ IDLE_TIMEOUT);
+ }
+ return;
+err:
+ /* Do not log errors caused by module unload or device unplug */
+ if (ret != -ENODEV && ret != -ECONNRESET && ret != -ESHUTDOWN)
+ GM12U320_ERR("Frame update error: %d\n", ret);
+}
+
+static void gm12u320_fb_mark_dirty(struct drm_framebuffer *fb,
+ struct drm_rect *dirty)
+{
+ struct gm12u320_device *gm12u320 = fb->dev->dev_private;
+ struct drm_framebuffer *old_fb = NULL;
+ bool wakeup = false;
+
+ mutex_lock(&gm12u320->fb_update.lock);
+
+ if (gm12u320->fb_update.fb != fb) {
+ old_fb = gm12u320->fb_update.fb;
+ drm_framebuffer_get(fb);
+ gm12u320->fb_update.fb = fb;
+ gm12u320->fb_update.rect = *dirty;
+ wakeup = true;
+ } else {
+ struct drm_rect *rect = &gm12u320->fb_update.rect;
+
+ rect->x1 = min(rect->x1, dirty->x1);
+ rect->y1 = min(rect->y1, dirty->y1);
+ rect->x2 = max(rect->x2, dirty->x2);
+ rect->y2 = max(rect->y2, dirty->y2);
+ }
+
+ mutex_unlock(&gm12u320->fb_update.lock);
+
+ if (wakeup)
+ wake_up(&gm12u320->fb_update.waitq);
+
+ if (old_fb)
+ drm_framebuffer_put(old_fb);
+}
+
+static void gm12u320_start_fb_update(struct gm12u320_device *gm12u320)
+{
+ mutex_lock(&gm12u320->fb_update.lock);
+ gm12u320->fb_update.run = true;
+ mutex_unlock(&gm12u320->fb_update.lock);
+
+ queue_work(gm12u320->fb_update.workq, &gm12u320->fb_update.work);
+}
+
+static void gm12u320_stop_fb_update(struct gm12u320_device *gm12u320)
+{
+ mutex_lock(&gm12u320->fb_update.lock);
+ gm12u320->fb_update.run = false;
+ mutex_unlock(&gm12u320->fb_update.lock);
+
+ wake_up(&gm12u320->fb_update.waitq);
+ cancel_work_sync(&gm12u320->fb_update.work);
+
+ mutex_lock(&gm12u320->fb_update.lock);
+ if (gm12u320->fb_update.fb) {
+ drm_framebuffer_put(gm12u320->fb_update.fb);
+ gm12u320->fb_update.fb = NULL;
+ }
+ mutex_unlock(&gm12u320->fb_update.lock);
+}
+
+static int gm12u320_set_ecomode(struct gm12u320_device *gm12u320)
+{
+ return gm12u320_misc_request(gm12u320, MISC_REQ_GET_SET_ECO_A,
+ MISC_REQ_GET_SET_ECO_B, 0x01 /* set */,
+ eco_mode ? 0x01 : 0x00, 0x00, 0x01);
+}
+
+/* ------------------------------------------------------------------ */
+/* gm12u320 connector */
+
+/*
+ * We use fake EDID info so that userspace know that it is dealing with
+ * an Acer projector, rather then listing this as an "unknown" monitor.
+ * Note this assumes this driver is only ever used with the Acer C120, if we
+ * add support for other devices the vendor and model should be parameterized.
+ */
+static struct edid gm12u320_edid = {
+ .header = { 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 },
+ .mfg_id = { 0x04, 0x72 }, /* "ACR" */
+ .prod_code = { 0x20, 0xc1 }, /* C120h */
+ .serial = 0xaa55aa55,
+ .mfg_week = 1,
+ .mfg_year = 16,
+ .version = 1, /* EDID 1.3 */
+ .revision = 3, /* EDID 1.3 */
+ .input = 0x08, /* Analog input */
+ .features = 0x0a, /* Pref timing in DTD 1 */
+ .standard_timings = { { 1, 1 }, { 1, 1 }, { 1, 1 }, { 1, 1 },
+ { 1, 1 }, { 1, 1 }, { 1, 1 }, { 1, 1 } },
+ .detailed_timings = { {
+ .pixel_clock = 3383,
+ /* hactive = 848, hblank = 256 */
+ .data.pixel_data.hactive_lo = 0x50,
+ .data.pixel_data.hblank_lo = 0x00,
+ .data.pixel_data.hactive_hblank_hi = 0x31,
+ /* vactive = 480, vblank = 28 */
+ .data.pixel_data.vactive_lo = 0xe0,
+ .data.pixel_data.vblank_lo = 0x1c,
+ .data.pixel_data.vactive_vblank_hi = 0x10,
+ /* hsync offset 40 pw 128, vsync offset 1 pw 4 */
+ .data.pixel_data.hsync_offset_lo = 0x28,
+ .data.pixel_data.hsync_pulse_width_lo = 0x80,
+ .data.pixel_data.vsync_offset_pulse_width_lo = 0x14,
+ .data.pixel_data.hsync_vsync_offset_pulse_width_hi = 0x00,
+ /* Digital separate syncs, hsync+, vsync+ */
+ .data.pixel_data.misc = 0x1e,
+ }, {
+ .pixel_clock = 0,
+ .data.other_data.type = 0xfd, /* Monitor ranges */
+ .data.other_data.data.range.min_vfreq = 59,
+ .data.other_data.data.range.max_vfreq = 61,
+ .data.other_data.data.range.min_hfreq_khz = 29,
+ .data.other_data.data.range.max_hfreq_khz = 32,
+ .data.other_data.data.range.pixel_clock_mhz = 4, /* 40 MHz */
+ .data.other_data.data.range.flags = 0,
+ .data.other_data.data.range.formula.cvt = {
+ 0xa0, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20 },
+ }, {
+ .pixel_clock = 0,
+ .data.other_data.type = 0xfc, /* Model string */
+ .data.other_data.data.str.str = {
+ 'P', 'r', 'o', 'j', 'e', 'c', 't', 'o', 'r', '\n',
+ ' ', ' ', ' ' },
+ }, {
+ .pixel_clock = 0,
+ .data.other_data.type = 0xfe, /* Unspecified text / padding */
+ .data.other_data.data.str.str = {
+ '\n', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ',
+ ' ', ' ', ' ' },
+ } },
+ .checksum = 0x13,
+};
+
+static int gm12u320_conn_get_modes(struct drm_connector *connector)
+{
+ drm_connector_update_edid_property(connector, &gm12u320_edid);
+ return drm_add_edid_modes(connector, &gm12u320_edid);
+}
+
+static const struct drm_connector_helper_funcs gm12u320_conn_helper_funcs = {
+ .get_modes = gm12u320_conn_get_modes,
+};
+
+static const struct drm_connector_funcs gm12u320_conn_funcs = {
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int gm12u320_conn_init(struct gm12u320_device *gm12u320)
+{
+ drm_connector_helper_add(&gm12u320->conn, &gm12u320_conn_helper_funcs);
+ return drm_connector_init(&gm12u320->dev, &gm12u320->conn,
+ &gm12u320_conn_funcs, DRM_MODE_CONNECTOR_VGA);
+}
+
+/* ------------------------------------------------------------------ */
+/* gm12u320 (simple) display pipe */
+
+static void gm12u320_pipe_enable(struct drm_simple_display_pipe *pipe,
+ struct drm_crtc_state *crtc_state,
+ struct drm_plane_state *plane_state)
+{
+ struct gm12u320_device *gm12u320 = pipe->crtc.dev->dev_private;
+ struct drm_rect rect = { 0, 0, GM12U320_USER_WIDTH, GM12U320_HEIGHT };
+
+ gm12u320_fb_mark_dirty(plane_state->fb, &rect);
+ gm12u320_start_fb_update(gm12u320);
+ gm12u320->pipe_enabled = true;
+}
+
+static void gm12u320_pipe_disable(struct drm_simple_display_pipe *pipe)
+{
+ struct gm12u320_device *gm12u320 = pipe->crtc.dev->dev_private;
+
+ gm12u320_stop_fb_update(gm12u320);
+ gm12u320->pipe_enabled = false;
+}
+
+static void gm12u320_pipe_update(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *old_state)
+{
+ struct drm_plane_state *state = pipe->plane.state;
+ struct drm_crtc *crtc = &pipe->crtc;
+ struct drm_rect rect;
+
+ if (drm_atomic_helper_damage_merged(old_state, state, &rect))
+ gm12u320_fb_mark_dirty(pipe->plane.state->fb, &rect);
+
+ if (crtc->state->event) {
+ spin_lock_irq(&crtc->dev->event_lock);
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ spin_unlock_irq(&crtc->dev->event_lock);
+ }
+}
+
+static const struct drm_simple_display_pipe_funcs gm12u320_pipe_funcs = {
+ .enable = gm12u320_pipe_enable,
+ .disable = gm12u320_pipe_disable,
+ .update = gm12u320_pipe_update,
+};
+
+static const uint32_t gm12u320_pipe_formats[] = {
+ DRM_FORMAT_XRGB8888,
+};
+
+static const uint64_t gm12u320_pipe_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+static void gm12u320_driver_release(struct drm_device *dev)
+{
+ struct gm12u320_device *gm12u320 = dev->dev_private;
+
+ gm12u320_usb_free(gm12u320);
+ drm_mode_config_cleanup(dev);
+ drm_dev_fini(dev);
+ kfree(gm12u320);
+}
+
+DEFINE_DRM_GEM_FOPS(gm12u320_fops);
+
+static struct drm_driver gm12u320_drm_driver = {
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+
+ .release = gm12u320_driver_release,
+ .fops = &gm12u320_fops,
+ DRM_GEM_SHMEM_DRIVER_OPS,
+};
+
+static const struct drm_mode_config_funcs gm12u320_mode_config_funcs = {
+ .fb_create = drm_gem_fb_create_with_dirty,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static int gm12u320_usb_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
+{
+ struct gm12u320_device *gm12u320;
+ struct drm_device *dev;
+ int ret;
+
+ /*
+ * The gm12u320 presents itself to the system as 2 usb mass-storage
+ * interfaces, we only care about / need the first one.
+ */
+ if (interface->cur_altsetting->desc.bInterfaceNumber != 0)
+ return -ENODEV;
+
+ gm12u320 = kzalloc(sizeof(*gm12u320), GFP_KERNEL);
+ if (gm12u320 == NULL)
+ return -ENOMEM;
+
+ gm12u320->udev = interface_to_usbdev(interface);
+ INIT_WORK(&gm12u320->fb_update.work, gm12u320_fb_update_work);
+ mutex_init(&gm12u320->fb_update.lock);
+ init_waitqueue_head(&gm12u320->fb_update.waitq);
+
+ dev = &gm12u320->dev;
+ ret = drm_dev_init(dev, &gm12u320_drm_driver, &interface->dev);
+ if (ret) {
+ kfree(gm12u320);
+ return ret;
+ }
+ dev->dev_private = gm12u320;
+
+ drm_mode_config_init(dev);
+ dev->mode_config.min_width = GM12U320_USER_WIDTH;
+ dev->mode_config.max_width = GM12U320_USER_WIDTH;
+ dev->mode_config.min_height = GM12U320_HEIGHT;
+ dev->mode_config.max_height = GM12U320_HEIGHT;
+ dev->mode_config.funcs = &gm12u320_mode_config_funcs;
+
+ ret = gm12u320_usb_alloc(gm12u320);
+ if (ret)
+ goto err_put;
+
+ ret = gm12u320_set_ecomode(gm12u320);
+ if (ret)
+ goto err_put;
+
+ ret = gm12u320_conn_init(gm12u320);
+ if (ret)
+ goto err_put;
+
+ ret = drm_simple_display_pipe_init(&gm12u320->dev,
+ &gm12u320->pipe,
+ &gm12u320_pipe_funcs,
+ gm12u320_pipe_formats,
+ ARRAY_SIZE(gm12u320_pipe_formats),
+ gm12u320_pipe_modifiers,
+ &gm12u320->conn);
+ if (ret)
+ goto err_put;
+
+ drm_mode_config_reset(dev);
+
+ usb_set_intfdata(interface, dev);
+ ret = drm_dev_register(dev, 0);
+ if (ret)
+ goto err_put;
+
+ drm_fbdev_generic_setup(dev, 0);
+
+ return 0;
+
+err_put:
+ drm_dev_put(dev);
+ return ret;
+}
+
+static void gm12u320_usb_disconnect(struct usb_interface *interface)
+{
+ struct drm_device *dev = usb_get_intfdata(interface);
+ struct gm12u320_device *gm12u320 = dev->dev_private;
+
+ gm12u320_stop_fb_update(gm12u320);
+ drm_dev_unplug(dev);
+ drm_dev_put(dev);
+}
+
+static __maybe_unused int gm12u320_suspend(struct usb_interface *interface,
+ pm_message_t message)
+{
+ struct drm_device *dev = usb_get_intfdata(interface);
+ struct gm12u320_device *gm12u320 = dev->dev_private;
+
+ if (gm12u320->pipe_enabled)
+ gm12u320_stop_fb_update(gm12u320);
+
+ return 0;
+}
+
+static __maybe_unused int gm12u320_resume(struct usb_interface *interface)
+{
+ struct drm_device *dev = usb_get_intfdata(interface);
+ struct gm12u320_device *gm12u320 = dev->dev_private;
+
+ gm12u320_set_ecomode(gm12u320);
+ if (gm12u320->pipe_enabled)
+ gm12u320_start_fb_update(gm12u320);
+
+ return 0;
+}
+
+static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x1de1, 0xc102) },
+ {},
+};
+MODULE_DEVICE_TABLE(usb, id_table);
+
+static struct usb_driver gm12u320_usb_driver = {
+ .name = "gm12u320",
+ .probe = gm12u320_usb_probe,
+ .disconnect = gm12u320_usb_disconnect,
+ .id_table = id_table,
+#ifdef CONFIG_PM
+ .suspend = gm12u320_suspend,
+ .resume = gm12u320_resume,
+ .reset_resume = gm12u320_resume,
+#endif
+};
+
+module_usb_driver(gm12u320_usb_driver);
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tinydrm/hx8357d.c b/drivers/gpu/drm/tiny/hx8357d.c
index 5773d0fb6ca1..9af8ff84974f 100644
--- a/drivers/gpu/drm/tinydrm/hx8357d.c
+++ b/drivers/gpu/drm/tiny/hx8357d.c
@@ -21,9 +21,8 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modeset_helper.h>
-#include <drm/tinydrm/mipi-dbi.h>
-#include <drm/tinydrm/tinydrm-helpers.h>
#include <video/mipi_display.h>
#define HX8357D_SETOSC 0xb0
@@ -48,7 +47,8 @@ static void yx240qv29_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
- struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev);
+ struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(pipe->crtc.dev);
+ struct mipi_dbi *dbi = &dbidev->dbi;
u8 addr_mode;
int ret, idx;
@@ -57,29 +57,29 @@ static void yx240qv29_enable(struct drm_simple_display_pipe *pipe,
DRM_DEBUG_KMS("\n");
- ret = mipi_dbi_poweron_conditional_reset(mipi);
+ ret = mipi_dbi_poweron_conditional_reset(dbidev);
if (ret < 0)
goto out_exit;
if (ret == 1)
goto out_enable;
/* setextc */
- mipi_dbi_command(mipi, HX8357D_SETEXTC, 0xFF, 0x83, 0x57);
+ mipi_dbi_command(dbi, HX8357D_SETEXTC, 0xFF, 0x83, 0x57);
msleep(150);
/* setRGB which also enables SDO */
- mipi_dbi_command(mipi, HX8357D_SETRGB, 0x00, 0x00, 0x06, 0x06);
+ mipi_dbi_command(dbi, HX8357D_SETRGB, 0x00, 0x00, 0x06, 0x06);
/* -1.52V */
- mipi_dbi_command(mipi, HX8357D_SETCOM, 0x25);
+ mipi_dbi_command(dbi, HX8357D_SETCOM, 0x25);
/* Normal mode 70Hz, Idle mode 55 Hz */
- mipi_dbi_command(mipi, HX8357D_SETOSC, 0x68);
+ mipi_dbi_command(dbi, HX8357D_SETOSC, 0x68);
/* Set Panel - BGR, Gate direction swapped */
- mipi_dbi_command(mipi, HX8357D_SETPANEL, 0x05);
+ mipi_dbi_command(dbi, HX8357D_SETPANEL, 0x05);
- mipi_dbi_command(mipi, HX8357D_SETPOWER,
+ mipi_dbi_command(dbi, HX8357D_SETPOWER,
0x00, /* Not deep standby */
0x15, /* BT */
0x1C, /* VSPR */
@@ -87,7 +87,7 @@ static void yx240qv29_enable(struct drm_simple_display_pipe *pipe,
0x83, /* AP */
0xAA); /* FS */
- mipi_dbi_command(mipi, HX8357D_SETSTBA,
+ mipi_dbi_command(dbi, HX8357D_SETSTBA,
0x50, /* OPON normal */
0x50, /* OPON idle */
0x01, /* STBA */
@@ -95,7 +95,7 @@ static void yx240qv29_enable(struct drm_simple_display_pipe *pipe,
0x1E, /* STBA */
0x08); /* GEN */
- mipi_dbi_command(mipi, HX8357D_SETCYC,
+ mipi_dbi_command(dbi, HX8357D_SETCYC,
0x02, /* NW 0x02 */
0x40, /* RTN */
0x00, /* DIV */
@@ -104,7 +104,7 @@ static void yx240qv29_enable(struct drm_simple_display_pipe *pipe,
0x0D, /* GDON */
0x78); /* GDOFF */
- mipi_dbi_command(mipi, HX8357D_SETGAMMA,
+ mipi_dbi_command(dbi, HX8357D_SETGAMMA,
0x02,
0x0A,
0x11,
@@ -141,25 +141,25 @@ static void yx240qv29_enable(struct drm_simple_display_pipe *pipe,
0x01);
/* 16 bit */
- mipi_dbi_command(mipi, MIPI_DCS_SET_PIXEL_FORMAT,
+ mipi_dbi_command(dbi, MIPI_DCS_SET_PIXEL_FORMAT,
MIPI_DCS_PIXEL_FMT_16BIT);
/* TE off */
- mipi_dbi_command(mipi, MIPI_DCS_SET_TEAR_ON, 0x00);
+ mipi_dbi_command(dbi, MIPI_DCS_SET_TEAR_ON, 0x00);
/* tear line */
- mipi_dbi_command(mipi, MIPI_DCS_SET_TEAR_SCANLINE, 0x00, 0x02);
+ mipi_dbi_command(dbi, MIPI_DCS_SET_TEAR_SCANLINE, 0x00, 0x02);
/* Exit Sleep */
- mipi_dbi_command(mipi, MIPI_DCS_EXIT_SLEEP_MODE);
+ mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE);
msleep(150);
/* display on */
- mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_ON);
+ mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_ON);
usleep_range(5000, 7000);
out_enable:
- switch (mipi->rotation) {
+ switch (dbidev->rotation) {
default:
addr_mode = HX8357D_MADCTL_MX | HX8357D_MADCTL_MY;
break;
@@ -173,8 +173,8 @@ out_enable:
addr_mode = HX8357D_MADCTL_MV | HX8357D_MADCTL_MX;
break;
}
- mipi_dbi_command(mipi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
- mipi_dbi_enable_flush(mipi, crtc_state, plane_state);
+ mipi_dbi_command(dbi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
+ mipi_dbi_enable_flush(dbidev, crtc_state, plane_state);
out_exit:
drm_dev_exit(idx);
}
@@ -193,7 +193,7 @@ static const struct drm_display_mode yx350hv15_mode = {
DEFINE_DRM_GEM_CMA_FOPS(hx8357d_fops);
static struct drm_driver hx8357d_driver = {
- .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC,
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &hx8357d_fops,
.release = mipi_dbi_release,
DRM_GEM_CMA_VMAP_DRIVER_OPS,
@@ -220,20 +220,20 @@ MODULE_DEVICE_TABLE(spi, hx8357d_id);
static int hx8357d_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
+ struct mipi_dbi_dev *dbidev;
struct drm_device *drm;
- struct mipi_dbi *mipi;
struct gpio_desc *dc;
u32 rotation = 0;
int ret;
- mipi = kzalloc(sizeof(*mipi), GFP_KERNEL);
- if (!mipi)
+ dbidev = kzalloc(sizeof(*dbidev), GFP_KERNEL);
+ if (!dbidev)
return -ENOMEM;
- drm = &mipi->drm;
+ drm = &dbidev->drm;
ret = devm_drm_dev_init(dev, drm, &hx8357d_driver);
if (ret) {
- kfree(mipi);
+ kfree(dbidev);
return ret;
}
@@ -245,17 +245,17 @@ static int hx8357d_probe(struct spi_device *spi)
return PTR_ERR(dc);
}
- mipi->backlight = devm_of_find_backlight(dev);
- if (IS_ERR(mipi->backlight))
- return PTR_ERR(mipi->backlight);
+ dbidev->backlight = devm_of_find_backlight(dev);
+ if (IS_ERR(dbidev->backlight))
+ return PTR_ERR(dbidev->backlight);
device_property_read_u32(dev, "rotation", &rotation);
- ret = mipi_dbi_spi_init(spi, mipi, dc);
+ ret = mipi_dbi_spi_init(spi, &dbidev->dbi, dc);
if (ret)
return ret;
- ret = mipi_dbi_init(mipi, &hx8357d_pipe_funcs, &yx350hv15_mode, rotation);
+ ret = mipi_dbi_dev_init(dbidev, &hx8357d_pipe_funcs, &yx350hv15_mode, rotation);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/tinydrm/ili9225.c b/drivers/gpu/drm/tiny/ili9225.c
index ea69019f2f33..c66acc566c2b 100644
--- a/drivers/gpu/drm/tinydrm/ili9225.c
+++ b/drivers/gpu/drm/tiny/ili9225.c
@@ -24,10 +24,9 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_mipi_dbi.h>
#include <drm/drm_rect.h>
#include <drm/drm_vblank.h>
-#include <drm/tinydrm/mipi-dbi.h>
-#include <drm/tinydrm/tinydrm-helpers.h>
#define ILI9225_DRIVER_READ_CODE 0x00
#define ILI9225_DRIVER_OUTPUT_CONTROL 0x01
@@ -69,27 +68,28 @@
#define ILI9225_GAMMA_CONTROL_9 0x58
#define ILI9225_GAMMA_CONTROL_10 0x59
-static inline int ili9225_command(struct mipi_dbi *mipi, u8 cmd, u16 data)
+static inline int ili9225_command(struct mipi_dbi *dbi, u8 cmd, u16 data)
{
u8 par[2] = { data >> 8, data & 0xff };
- return mipi_dbi_command_buf(mipi, cmd, par, 2);
+ return mipi_dbi_command_buf(dbi, cmd, par, 2);
}
static void ili9225_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
{
struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
- struct mipi_dbi *mipi = drm_to_mipi_dbi(fb->dev);
+ struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(fb->dev);
unsigned int height = rect->y2 - rect->y1;
unsigned int width = rect->x2 - rect->x1;
- bool swap = mipi->swap_bytes;
+ struct mipi_dbi *dbi = &dbidev->dbi;
+ bool swap = dbi->swap_bytes;
u16 x_start, y_start;
u16 x1, x2, y1, y2;
int idx, ret = 0;
bool full;
void *tr;
- if (!mipi->enabled)
+ if (!dbidev->enabled)
return;
if (!drm_dev_enter(fb->dev, &idx))
@@ -99,17 +99,17 @@ static void ili9225_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
DRM_DEBUG_KMS("Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect));
- if (!mipi->dc || !full || swap ||
+ if (!dbi->dc || !full || swap ||
fb->format->format == DRM_FORMAT_XRGB8888) {
- tr = mipi->tx_buf;
- ret = mipi_dbi_buf_copy(mipi->tx_buf, fb, rect, swap);
+ tr = dbidev->tx_buf;
+ ret = mipi_dbi_buf_copy(dbidev->tx_buf, fb, rect, swap);
if (ret)
goto err_msg;
} else {
tr = cma_obj->vaddr;
}
- switch (mipi->rotation) {
+ switch (dbidev->rotation) {
default:
x1 = rect->x1;
x2 = rect->x2 - 1;
@@ -144,15 +144,15 @@ static void ili9225_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
break;
}
- ili9225_command(mipi, ILI9225_HORIZ_WINDOW_ADDR_1, x2);
- ili9225_command(mipi, ILI9225_HORIZ_WINDOW_ADDR_2, x1);
- ili9225_command(mipi, ILI9225_VERT_WINDOW_ADDR_1, y2);
- ili9225_command(mipi, ILI9225_VERT_WINDOW_ADDR_2, y1);
+ ili9225_command(dbi, ILI9225_HORIZ_WINDOW_ADDR_1, x2);
+ ili9225_command(dbi, ILI9225_HORIZ_WINDOW_ADDR_2, x1);
+ ili9225_command(dbi, ILI9225_VERT_WINDOW_ADDR_1, y2);
+ ili9225_command(dbi, ILI9225_VERT_WINDOW_ADDR_2, y1);
- ili9225_command(mipi, ILI9225_RAM_ADDRESS_SET_1, x_start);
- ili9225_command(mipi, ILI9225_RAM_ADDRESS_SET_2, y_start);
+ ili9225_command(dbi, ILI9225_RAM_ADDRESS_SET_1, x_start);
+ ili9225_command(dbi, ILI9225_RAM_ADDRESS_SET_2, y_start);
- ret = mipi_dbi_command_buf(mipi, ILI9225_WRITE_DATA_TO_GRAM, tr,
+ ret = mipi_dbi_command_buf(dbi, ILI9225_WRITE_DATA_TO_GRAM, tr,
width * height * 2);
err_msg:
if (ret)
@@ -183,9 +183,10 @@ static void ili9225_pipe_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
- struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev);
+ struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(pipe->crtc.dev);
struct drm_framebuffer *fb = plane_state->fb;
struct device *dev = pipe->crtc.dev->dev;
+ struct mipi_dbi *dbi = &dbidev->dbi;
struct drm_rect rect = {
.x1 = 0,
.x2 = fb->width,
@@ -200,7 +201,7 @@ static void ili9225_pipe_enable(struct drm_simple_display_pipe *pipe,
DRM_DEBUG_KMS("\n");
- mipi_dbi_hw_reset(mipi);
+ mipi_dbi_hw_reset(dbi);
/*
* There don't seem to be two example init sequences that match, so
@@ -208,31 +209,31 @@ static void ili9225_pipe_enable(struct drm_simple_display_pipe *pipe,
* https://github.com/Nkawu/TFT_22_ILI9225/blob/master/src/TFT_22_ILI9225.cpp
*/
- ret = ili9225_command(mipi, ILI9225_POWER_CONTROL_1, 0x0000);
+ ret = ili9225_command(dbi, ILI9225_POWER_CONTROL_1, 0x0000);
if (ret) {
DRM_DEV_ERROR(dev, "Error sending command %d\n", ret);
goto out_exit;
}
- ili9225_command(mipi, ILI9225_POWER_CONTROL_2, 0x0000);
- ili9225_command(mipi, ILI9225_POWER_CONTROL_3, 0x0000);
- ili9225_command(mipi, ILI9225_POWER_CONTROL_4, 0x0000);
- ili9225_command(mipi, ILI9225_POWER_CONTROL_5, 0x0000);
+ ili9225_command(dbi, ILI9225_POWER_CONTROL_2, 0x0000);
+ ili9225_command(dbi, ILI9225_POWER_CONTROL_3, 0x0000);
+ ili9225_command(dbi, ILI9225_POWER_CONTROL_4, 0x0000);
+ ili9225_command(dbi, ILI9225_POWER_CONTROL_5, 0x0000);
msleep(40);
- ili9225_command(mipi, ILI9225_POWER_CONTROL_2, 0x0018);
- ili9225_command(mipi, ILI9225_POWER_CONTROL_3, 0x6121);
- ili9225_command(mipi, ILI9225_POWER_CONTROL_4, 0x006f);
- ili9225_command(mipi, ILI9225_POWER_CONTROL_5, 0x495f);
- ili9225_command(mipi, ILI9225_POWER_CONTROL_1, 0x0800);
+ ili9225_command(dbi, ILI9225_POWER_CONTROL_2, 0x0018);
+ ili9225_command(dbi, ILI9225_POWER_CONTROL_3, 0x6121);
+ ili9225_command(dbi, ILI9225_POWER_CONTROL_4, 0x006f);
+ ili9225_command(dbi, ILI9225_POWER_CONTROL_5, 0x495f);
+ ili9225_command(dbi, ILI9225_POWER_CONTROL_1, 0x0800);
msleep(10);
- ili9225_command(mipi, ILI9225_POWER_CONTROL_2, 0x103b);
+ ili9225_command(dbi, ILI9225_POWER_CONTROL_2, 0x103b);
msleep(50);
- switch (mipi->rotation) {
+ switch (dbidev->rotation) {
default:
am_id = 0x30;
break;
@@ -246,43 +247,43 @@ static void ili9225_pipe_enable(struct drm_simple_display_pipe *pipe,
am_id = 0x28;
break;
}
- ili9225_command(mipi, ILI9225_DRIVER_OUTPUT_CONTROL, 0x011c);
- ili9225_command(mipi, ILI9225_LCD_AC_DRIVING_CONTROL, 0x0100);
- ili9225_command(mipi, ILI9225_ENTRY_MODE, 0x1000 | am_id);
- ili9225_command(mipi, ILI9225_DISPLAY_CONTROL_1, 0x0000);
- ili9225_command(mipi, ILI9225_BLANK_PERIOD_CONTROL_1, 0x0808);
- ili9225_command(mipi, ILI9225_FRAME_CYCLE_CONTROL, 0x1100);
- ili9225_command(mipi, ILI9225_INTERFACE_CONTROL, 0x0000);
- ili9225_command(mipi, ILI9225_OSCILLATION_CONTROL, 0x0d01);
- ili9225_command(mipi, ILI9225_VCI_RECYCLING, 0x0020);
- ili9225_command(mipi, ILI9225_RAM_ADDRESS_SET_1, 0x0000);
- ili9225_command(mipi, ILI9225_RAM_ADDRESS_SET_2, 0x0000);
-
- ili9225_command(mipi, ILI9225_GATE_SCAN_CONTROL, 0x0000);
- ili9225_command(mipi, ILI9225_VERTICAL_SCROLL_1, 0x00db);
- ili9225_command(mipi, ILI9225_VERTICAL_SCROLL_2, 0x0000);
- ili9225_command(mipi, ILI9225_VERTICAL_SCROLL_3, 0x0000);
- ili9225_command(mipi, ILI9225_PARTIAL_DRIVING_POS_1, 0x00db);
- ili9225_command(mipi, ILI9225_PARTIAL_DRIVING_POS_2, 0x0000);
-
- ili9225_command(mipi, ILI9225_GAMMA_CONTROL_1, 0x0000);
- ili9225_command(mipi, ILI9225_GAMMA_CONTROL_2, 0x0808);
- ili9225_command(mipi, ILI9225_GAMMA_CONTROL_3, 0x080a);
- ili9225_command(mipi, ILI9225_GAMMA_CONTROL_4, 0x000a);
- ili9225_command(mipi, ILI9225_GAMMA_CONTROL_5, 0x0a08);
- ili9225_command(mipi, ILI9225_GAMMA_CONTROL_6, 0x0808);
- ili9225_command(mipi, ILI9225_GAMMA_CONTROL_7, 0x0000);
- ili9225_command(mipi, ILI9225_GAMMA_CONTROL_8, 0x0a00);
- ili9225_command(mipi, ILI9225_GAMMA_CONTROL_9, 0x0710);
- ili9225_command(mipi, ILI9225_GAMMA_CONTROL_10, 0x0710);
-
- ili9225_command(mipi, ILI9225_DISPLAY_CONTROL_1, 0x0012);
+ ili9225_command(dbi, ILI9225_DRIVER_OUTPUT_CONTROL, 0x011c);
+ ili9225_command(dbi, ILI9225_LCD_AC_DRIVING_CONTROL, 0x0100);
+ ili9225_command(dbi, ILI9225_ENTRY_MODE, 0x1000 | am_id);
+ ili9225_command(dbi, ILI9225_DISPLAY_CONTROL_1, 0x0000);
+ ili9225_command(dbi, ILI9225_BLANK_PERIOD_CONTROL_1, 0x0808);
+ ili9225_command(dbi, ILI9225_FRAME_CYCLE_CONTROL, 0x1100);
+ ili9225_command(dbi, ILI9225_INTERFACE_CONTROL, 0x0000);
+ ili9225_command(dbi, ILI9225_OSCILLATION_CONTROL, 0x0d01);
+ ili9225_command(dbi, ILI9225_VCI_RECYCLING, 0x0020);
+ ili9225_command(dbi, ILI9225_RAM_ADDRESS_SET_1, 0x0000);
+ ili9225_command(dbi, ILI9225_RAM_ADDRESS_SET_2, 0x0000);
+
+ ili9225_command(dbi, ILI9225_GATE_SCAN_CONTROL, 0x0000);
+ ili9225_command(dbi, ILI9225_VERTICAL_SCROLL_1, 0x00db);
+ ili9225_command(dbi, ILI9225_VERTICAL_SCROLL_2, 0x0000);
+ ili9225_command(dbi, ILI9225_VERTICAL_SCROLL_3, 0x0000);
+ ili9225_command(dbi, ILI9225_PARTIAL_DRIVING_POS_1, 0x00db);
+ ili9225_command(dbi, ILI9225_PARTIAL_DRIVING_POS_2, 0x0000);
+
+ ili9225_command(dbi, ILI9225_GAMMA_CONTROL_1, 0x0000);
+ ili9225_command(dbi, ILI9225_GAMMA_CONTROL_2, 0x0808);
+ ili9225_command(dbi, ILI9225_GAMMA_CONTROL_3, 0x080a);
+ ili9225_command(dbi, ILI9225_GAMMA_CONTROL_4, 0x000a);
+ ili9225_command(dbi, ILI9225_GAMMA_CONTROL_5, 0x0a08);
+ ili9225_command(dbi, ILI9225_GAMMA_CONTROL_6, 0x0808);
+ ili9225_command(dbi, ILI9225_GAMMA_CONTROL_7, 0x0000);
+ ili9225_command(dbi, ILI9225_GAMMA_CONTROL_8, 0x0a00);
+ ili9225_command(dbi, ILI9225_GAMMA_CONTROL_9, 0x0710);
+ ili9225_command(dbi, ILI9225_GAMMA_CONTROL_10, 0x0710);
+
+ ili9225_command(dbi, ILI9225_DISPLAY_CONTROL_1, 0x0012);
msleep(50);
- ili9225_command(mipi, ILI9225_DISPLAY_CONTROL_1, 0x1017);
+ ili9225_command(dbi, ILI9225_DISPLAY_CONTROL_1, 0x1017);
- mipi->enabled = true;
+ dbidev->enabled = true;
ili9225_fb_dirty(fb, &rect);
out_exit:
drm_dev_exit(idx);
@@ -290,7 +291,8 @@ out_exit:
static void ili9225_pipe_disable(struct drm_simple_display_pipe *pipe)
{
- struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev);
+ struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(pipe->crtc.dev);
+ struct mipi_dbi *dbi = &dbidev->dbi;
DRM_DEBUG_KMS("\n");
@@ -301,39 +303,39 @@ static void ili9225_pipe_disable(struct drm_simple_display_pipe *pipe)
* unplug.
*/
- if (!mipi->enabled)
+ if (!dbidev->enabled)
return;
- ili9225_command(mipi, ILI9225_DISPLAY_CONTROL_1, 0x0000);
+ ili9225_command(dbi, ILI9225_DISPLAY_CONTROL_1, 0x0000);
msleep(50);
- ili9225_command(mipi, ILI9225_POWER_CONTROL_2, 0x0007);
+ ili9225_command(dbi, ILI9225_POWER_CONTROL_2, 0x0007);
msleep(50);
- ili9225_command(mipi, ILI9225_POWER_CONTROL_1, 0x0a02);
+ ili9225_command(dbi, ILI9225_POWER_CONTROL_1, 0x0a02);
- mipi->enabled = false;
+ dbidev->enabled = false;
}
-static int ili9225_dbi_command(struct mipi_dbi *mipi, u8 *cmd, u8 *par,
+static int ili9225_dbi_command(struct mipi_dbi *dbi, u8 *cmd, u8 *par,
size_t num)
{
- struct spi_device *spi = mipi->spi;
+ struct spi_device *spi = dbi->spi;
unsigned int bpw = 8;
u32 speed_hz;
int ret;
- gpiod_set_value_cansleep(mipi->dc, 0);
+ gpiod_set_value_cansleep(dbi->dc, 0);
speed_hz = mipi_dbi_spi_cmd_max_speed(spi, 1);
- ret = tinydrm_spi_transfer(spi, speed_hz, NULL, 8, cmd, 1);
+ ret = mipi_dbi_spi_transfer(spi, speed_hz, 8, cmd, 1);
if (ret || !num)
return ret;
- if (*cmd == ILI9225_WRITE_DATA_TO_GRAM && !mipi->swap_bytes)
+ if (*cmd == ILI9225_WRITE_DATA_TO_GRAM && !dbi->swap_bytes)
bpw = 16;
- gpiod_set_value_cansleep(mipi->dc, 1);
+ gpiod_set_value_cansleep(dbi->dc, 1);
speed_hz = mipi_dbi_spi_cmd_max_speed(spi, num);
- return tinydrm_spi_transfer(spi, speed_hz, NULL, bpw, par, num);
+ return mipi_dbi_spi_transfer(spi, speed_hz, bpw, par, num);
}
static const struct drm_simple_display_pipe_funcs ili9225_pipe_funcs = {
@@ -350,8 +352,7 @@ static const struct drm_display_mode ili9225_mode = {
DEFINE_DRM_GEM_CMA_FOPS(ili9225_fops);
static struct drm_driver ili9225_driver = {
- .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
- DRIVER_ATOMIC,
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &ili9225_fops,
.release = mipi_dbi_release,
DRM_GEM_CMA_VMAP_DRIVER_OPS,
@@ -377,29 +378,31 @@ MODULE_DEVICE_TABLE(spi, ili9225_id);
static int ili9225_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
+ struct mipi_dbi_dev *dbidev;
struct drm_device *drm;
- struct mipi_dbi *mipi;
+ struct mipi_dbi *dbi;
struct gpio_desc *rs;
u32 rotation = 0;
int ret;
- mipi = kzalloc(sizeof(*mipi), GFP_KERNEL);
- if (!mipi)
+ dbidev = kzalloc(sizeof(*dbidev), GFP_KERNEL);
+ if (!dbidev)
return -ENOMEM;
- drm = &mipi->drm;
+ dbi = &dbidev->dbi;
+ drm = &dbidev->drm;
ret = devm_drm_dev_init(dev, drm, &ili9225_driver);
if (ret) {
- kfree(mipi);
+ kfree(dbidev);
return ret;
}
drm_mode_config_init(drm);
- mipi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
- if (IS_ERR(mipi->reset)) {
+ dbi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(dbi->reset)) {
DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n");
- return PTR_ERR(mipi->reset);
+ return PTR_ERR(dbi->reset);
}
rs = devm_gpiod_get(dev, "rs", GPIOD_OUT_LOW);
@@ -410,14 +413,14 @@ static int ili9225_probe(struct spi_device *spi)
device_property_read_u32(dev, "rotation", &rotation);
- ret = mipi_dbi_spi_init(spi, mipi, rs);
+ ret = mipi_dbi_spi_init(spi, dbi, rs);
if (ret)
return ret;
/* override the command function set in mipi_dbi_spi_init() */
- mipi->command = ili9225_dbi_command;
+ dbi->command = ili9225_dbi_command;
- ret = mipi_dbi_init(mipi, &ili9225_pipe_funcs, &ili9225_mode, rotation);
+ ret = mipi_dbi_dev_init(dbidev, &ili9225_pipe_funcs, &ili9225_mode, rotation);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/tinydrm/ili9341.c b/drivers/gpu/drm/tiny/ili9341.c
index 4ade9e4b924f..33b51dc7faa8 100644
--- a/drivers/gpu/drm/tinydrm/ili9341.c
+++ b/drivers/gpu/drm/tiny/ili9341.c
@@ -20,9 +20,8 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modeset_helper.h>
-#include <drm/tinydrm/mipi-dbi.h>
-#include <drm/tinydrm/tinydrm-helpers.h>
#include <video/mipi_display.h>
#define ILI9341_FRMCTR1 0xb1
@@ -54,7 +53,8 @@ static void yx240qv29_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
- struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev);
+ struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(pipe->crtc.dev);
+ struct mipi_dbi *dbi = &dbidev->dbi;
u8 addr_mode;
int ret, idx;
@@ -63,57 +63,57 @@ static void yx240qv29_enable(struct drm_simple_display_pipe *pipe,
DRM_DEBUG_KMS("\n");
- ret = mipi_dbi_poweron_conditional_reset(mipi);
+ ret = mipi_dbi_poweron_conditional_reset(dbidev);
if (ret < 0)
goto out_exit;
if (ret == 1)
goto out_enable;
- mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_OFF);
+ mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_OFF);
- mipi_dbi_command(mipi, ILI9341_PWCTRLB, 0x00, 0xc1, 0x30);
- mipi_dbi_command(mipi, ILI9341_PWRSEQ, 0x64, 0x03, 0x12, 0x81);
- mipi_dbi_command(mipi, ILI9341_DTCTRLA, 0x85, 0x00, 0x78);
- mipi_dbi_command(mipi, ILI9341_PWCTRLA, 0x39, 0x2c, 0x00, 0x34, 0x02);
- mipi_dbi_command(mipi, ILI9341_PUMPCTRL, 0x20);
- mipi_dbi_command(mipi, ILI9341_DTCTRLB, 0x00, 0x00);
+ mipi_dbi_command(dbi, ILI9341_PWCTRLB, 0x00, 0xc1, 0x30);
+ mipi_dbi_command(dbi, ILI9341_PWRSEQ, 0x64, 0x03, 0x12, 0x81);
+ mipi_dbi_command(dbi, ILI9341_DTCTRLA, 0x85, 0x00, 0x78);
+ mipi_dbi_command(dbi, ILI9341_PWCTRLA, 0x39, 0x2c, 0x00, 0x34, 0x02);
+ mipi_dbi_command(dbi, ILI9341_PUMPCTRL, 0x20);
+ mipi_dbi_command(dbi, ILI9341_DTCTRLB, 0x00, 0x00);
/* Power Control */
- mipi_dbi_command(mipi, ILI9341_PWCTRL1, 0x23);
- mipi_dbi_command(mipi, ILI9341_PWCTRL2, 0x10);
+ mipi_dbi_command(dbi, ILI9341_PWCTRL1, 0x23);
+ mipi_dbi_command(dbi, ILI9341_PWCTRL2, 0x10);
/* VCOM */
- mipi_dbi_command(mipi, ILI9341_VMCTRL1, 0x3e, 0x28);
- mipi_dbi_command(mipi, ILI9341_VMCTRL2, 0x86);
+ mipi_dbi_command(dbi, ILI9341_VMCTRL1, 0x3e, 0x28);
+ mipi_dbi_command(dbi, ILI9341_VMCTRL2, 0x86);
/* Memory Access Control */
- mipi_dbi_command(mipi, MIPI_DCS_SET_PIXEL_FORMAT, MIPI_DCS_PIXEL_FMT_16BIT);
+ mipi_dbi_command(dbi, MIPI_DCS_SET_PIXEL_FORMAT, MIPI_DCS_PIXEL_FMT_16BIT);
/* Frame Rate */
- mipi_dbi_command(mipi, ILI9341_FRMCTR1, 0x00, 0x1b);
+ mipi_dbi_command(dbi, ILI9341_FRMCTR1, 0x00, 0x1b);
/* Gamma */
- mipi_dbi_command(mipi, ILI9341_EN3GAM, 0x00);
- mipi_dbi_command(mipi, MIPI_DCS_SET_GAMMA_CURVE, 0x01);
- mipi_dbi_command(mipi, ILI9341_PGAMCTRL,
+ mipi_dbi_command(dbi, ILI9341_EN3GAM, 0x00);
+ mipi_dbi_command(dbi, MIPI_DCS_SET_GAMMA_CURVE, 0x01);
+ mipi_dbi_command(dbi, ILI9341_PGAMCTRL,
0x0f, 0x31, 0x2b, 0x0c, 0x0e, 0x08, 0x4e, 0xf1,
0x37, 0x07, 0x10, 0x03, 0x0e, 0x09, 0x00);
- mipi_dbi_command(mipi, ILI9341_NGAMCTRL,
+ mipi_dbi_command(dbi, ILI9341_NGAMCTRL,
0x00, 0x0e, 0x14, 0x03, 0x11, 0x07, 0x31, 0xc1,
0x48, 0x08, 0x0f, 0x0c, 0x31, 0x36, 0x0f);
/* DDRAM */
- mipi_dbi_command(mipi, ILI9341_ETMOD, 0x07);
+ mipi_dbi_command(dbi, ILI9341_ETMOD, 0x07);
/* Display */
- mipi_dbi_command(mipi, ILI9341_DISCTRL, 0x08, 0x82, 0x27, 0x00);
- mipi_dbi_command(mipi, MIPI_DCS_EXIT_SLEEP_MODE);
+ mipi_dbi_command(dbi, ILI9341_DISCTRL, 0x08, 0x82, 0x27, 0x00);
+ mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE);
msleep(100);
- mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_ON);
+ mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_ON);
msleep(100);
out_enable:
- switch (mipi->rotation) {
+ switch (dbidev->rotation) {
default:
addr_mode = ILI9341_MADCTL_MX;
break;
@@ -129,8 +129,8 @@ out_enable:
break;
}
addr_mode |= ILI9341_MADCTL_BGR;
- mipi_dbi_command(mipi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
- mipi_dbi_enable_flush(mipi, crtc_state, plane_state);
+ mipi_dbi_command(dbi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
+ mipi_dbi_enable_flush(dbidev, crtc_state, plane_state);
out_exit:
drm_dev_exit(idx);
}
@@ -149,7 +149,7 @@ static const struct drm_display_mode yx240qv29_mode = {
DEFINE_DRM_GEM_CMA_FOPS(ili9341_fops);
static struct drm_driver ili9341_driver = {
- .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC,
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &ili9341_fops,
.release = mipi_dbi_release,
DRM_GEM_CMA_VMAP_DRIVER_OPS,
@@ -176,29 +176,31 @@ MODULE_DEVICE_TABLE(spi, ili9341_id);
static int ili9341_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
+ struct mipi_dbi_dev *dbidev;
struct drm_device *drm;
- struct mipi_dbi *mipi;
+ struct mipi_dbi *dbi;
struct gpio_desc *dc;
u32 rotation = 0;
int ret;
- mipi = kzalloc(sizeof(*mipi), GFP_KERNEL);
- if (!mipi)
+ dbidev = kzalloc(sizeof(*dbidev), GFP_KERNEL);
+ if (!dbidev)
return -ENOMEM;
- drm = &mipi->drm;
+ dbi = &dbidev->dbi;
+ drm = &dbidev->drm;
ret = devm_drm_dev_init(dev, drm, &ili9341_driver);
if (ret) {
- kfree(mipi);
+ kfree(dbidev);
return ret;
}
drm_mode_config_init(drm);
- mipi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
- if (IS_ERR(mipi->reset)) {
+ dbi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(dbi->reset)) {
DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n");
- return PTR_ERR(mipi->reset);
+ return PTR_ERR(dbi->reset);
}
dc = devm_gpiod_get_optional(dev, "dc", GPIOD_OUT_LOW);
@@ -207,17 +209,17 @@ static int ili9341_probe(struct spi_device *spi)
return PTR_ERR(dc);
}
- mipi->backlight = devm_of_find_backlight(dev);
- if (IS_ERR(mipi->backlight))
- return PTR_ERR(mipi->backlight);
+ dbidev->backlight = devm_of_find_backlight(dev);
+ if (IS_ERR(dbidev->backlight))
+ return PTR_ERR(dbidev->backlight);
device_property_read_u32(dev, "rotation", &rotation);
- ret = mipi_dbi_spi_init(spi, mipi, dc);
+ ret = mipi_dbi_spi_init(spi, dbi, dc);
if (ret)
return ret;
- ret = mipi_dbi_init(mipi, &ili9341_pipe_funcs, &yx240qv29_mode, rotation);
+ ret = mipi_dbi_dev_init(dbidev, &ili9341_pipe_funcs, &yx240qv29_mode, rotation);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/tinydrm/mi0283qt.c b/drivers/gpu/drm/tiny/mi0283qt.c
index fdefa53455d4..e2cfd9a17143 100644
--- a/drivers/gpu/drm/tinydrm/mi0283qt.c
+++ b/drivers/gpu/drm/tiny/mi0283qt.c
@@ -18,9 +18,8 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modeset_helper.h>
-#include <drm/tinydrm/mipi-dbi.h>
-#include <drm/tinydrm/tinydrm-helpers.h>
#include <video/mipi_display.h>
#define ILI9341_FRMCTR1 0xb1
@@ -52,7 +51,8 @@ static void mi0283qt_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
- struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev);
+ struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(pipe->crtc.dev);
+ struct mipi_dbi *dbi = &dbidev->dbi;
u8 addr_mode;
int ret, idx;
@@ -61,53 +61,53 @@ static void mi0283qt_enable(struct drm_simple_display_pipe *pipe,
DRM_DEBUG_KMS("\n");
- ret = mipi_dbi_poweron_conditional_reset(mipi);
+ ret = mipi_dbi_poweron_conditional_reset(dbidev);
if (ret < 0)
goto out_exit;
if (ret == 1)
goto out_enable;
- mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_OFF);
+ mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_OFF);
- mipi_dbi_command(mipi, ILI9341_PWCTRLB, 0x00, 0x83, 0x30);
- mipi_dbi_command(mipi, ILI9341_PWRSEQ, 0x64, 0x03, 0x12, 0x81);
- mipi_dbi_command(mipi, ILI9341_DTCTRLA, 0x85, 0x01, 0x79);
- mipi_dbi_command(mipi, ILI9341_PWCTRLA, 0x39, 0x2c, 0x00, 0x34, 0x02);
- mipi_dbi_command(mipi, ILI9341_PUMPCTRL, 0x20);
- mipi_dbi_command(mipi, ILI9341_DTCTRLB, 0x00, 0x00);
+ mipi_dbi_command(dbi, ILI9341_PWCTRLB, 0x00, 0x83, 0x30);
+ mipi_dbi_command(dbi, ILI9341_PWRSEQ, 0x64, 0x03, 0x12, 0x81);
+ mipi_dbi_command(dbi, ILI9341_DTCTRLA, 0x85, 0x01, 0x79);
+ mipi_dbi_command(dbi, ILI9341_PWCTRLA, 0x39, 0x2c, 0x00, 0x34, 0x02);
+ mipi_dbi_command(dbi, ILI9341_PUMPCTRL, 0x20);
+ mipi_dbi_command(dbi, ILI9341_DTCTRLB, 0x00, 0x00);
/* Power Control */
- mipi_dbi_command(mipi, ILI9341_PWCTRL1, 0x26);
- mipi_dbi_command(mipi, ILI9341_PWCTRL2, 0x11);
+ mipi_dbi_command(dbi, ILI9341_PWCTRL1, 0x26);
+ mipi_dbi_command(dbi, ILI9341_PWCTRL2, 0x11);
/* VCOM */
- mipi_dbi_command(mipi, ILI9341_VMCTRL1, 0x35, 0x3e);
- mipi_dbi_command(mipi, ILI9341_VMCTRL2, 0xbe);
+ mipi_dbi_command(dbi, ILI9341_VMCTRL1, 0x35, 0x3e);
+ mipi_dbi_command(dbi, ILI9341_VMCTRL2, 0xbe);
/* Memory Access Control */
- mipi_dbi_command(mipi, MIPI_DCS_SET_PIXEL_FORMAT, MIPI_DCS_PIXEL_FMT_16BIT);
+ mipi_dbi_command(dbi, MIPI_DCS_SET_PIXEL_FORMAT, MIPI_DCS_PIXEL_FMT_16BIT);
/* Frame Rate */
- mipi_dbi_command(mipi, ILI9341_FRMCTR1, 0x00, 0x1b);
+ mipi_dbi_command(dbi, ILI9341_FRMCTR1, 0x00, 0x1b);
/* Gamma */
- mipi_dbi_command(mipi, ILI9341_EN3GAM, 0x08);
- mipi_dbi_command(mipi, MIPI_DCS_SET_GAMMA_CURVE, 0x01);
- mipi_dbi_command(mipi, ILI9341_PGAMCTRL,
+ mipi_dbi_command(dbi, ILI9341_EN3GAM, 0x08);
+ mipi_dbi_command(dbi, MIPI_DCS_SET_GAMMA_CURVE, 0x01);
+ mipi_dbi_command(dbi, ILI9341_PGAMCTRL,
0x1f, 0x1a, 0x18, 0x0a, 0x0f, 0x06, 0x45, 0x87,
0x32, 0x0a, 0x07, 0x02, 0x07, 0x05, 0x00);
- mipi_dbi_command(mipi, ILI9341_NGAMCTRL,
+ mipi_dbi_command(dbi, ILI9341_NGAMCTRL,
0x00, 0x25, 0x27, 0x05, 0x10, 0x09, 0x3a, 0x78,
0x4d, 0x05, 0x18, 0x0d, 0x38, 0x3a, 0x1f);
/* DDRAM */
- mipi_dbi_command(mipi, ILI9341_ETMOD, 0x07);
+ mipi_dbi_command(dbi, ILI9341_ETMOD, 0x07);
/* Display */
- mipi_dbi_command(mipi, ILI9341_DISCTRL, 0x0a, 0x82, 0x27, 0x00);
- mipi_dbi_command(mipi, MIPI_DCS_EXIT_SLEEP_MODE);
+ mipi_dbi_command(dbi, ILI9341_DISCTRL, 0x0a, 0x82, 0x27, 0x00);
+ mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE);
msleep(100);
- mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_ON);
+ mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_ON);
msleep(100);
out_enable:
@@ -117,7 +117,7 @@ out_enable:
* As a result, we need to always apply the rotation value
* regardless of the display "on/off" state.
*/
- switch (mipi->rotation) {
+ switch (dbidev->rotation) {
default:
addr_mode = ILI9341_MADCTL_MV | ILI9341_MADCTL_MY |
ILI9341_MADCTL_MX;
@@ -133,8 +133,8 @@ out_enable:
break;
}
addr_mode |= ILI9341_MADCTL_BGR;
- mipi_dbi_command(mipi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
- mipi_dbi_enable_flush(mipi, crtc_state, plane_state);
+ mipi_dbi_command(dbi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
+ mipi_dbi_enable_flush(dbidev, crtc_state, plane_state);
out_exit:
drm_dev_exit(idx);
}
@@ -153,8 +153,7 @@ static const struct drm_display_mode mi0283qt_mode = {
DEFINE_DRM_GEM_CMA_FOPS(mi0283qt_fops);
static struct drm_driver mi0283qt_driver = {
- .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
- DRIVER_ATOMIC,
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &mi0283qt_fops,
.release = mipi_dbi_release,
DRM_GEM_CMA_VMAP_DRIVER_OPS,
@@ -181,29 +180,31 @@ MODULE_DEVICE_TABLE(spi, mi0283qt_id);
static int mi0283qt_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
+ struct mipi_dbi_dev *dbidev;
struct drm_device *drm;
- struct mipi_dbi *mipi;
+ struct mipi_dbi *dbi;
struct gpio_desc *dc;
u32 rotation = 0;
int ret;
- mipi = kzalloc(sizeof(*mipi), GFP_KERNEL);
- if (!mipi)
+ dbidev = kzalloc(sizeof(*dbidev), GFP_KERNEL);
+ if (!dbidev)
return -ENOMEM;
- drm = &mipi->drm;
+ dbi = &dbidev->dbi;
+ drm = &dbidev->drm;
ret = devm_drm_dev_init(dev, drm, &mi0283qt_driver);
if (ret) {
- kfree(mipi);
+ kfree(dbidev);
return ret;
}
drm_mode_config_init(drm);
- mipi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
- if (IS_ERR(mipi->reset)) {
+ dbi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(dbi->reset)) {
DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n");
- return PTR_ERR(mipi->reset);
+ return PTR_ERR(dbi->reset);
}
dc = devm_gpiod_get_optional(dev, "dc", GPIOD_OUT_LOW);
@@ -212,21 +213,21 @@ static int mi0283qt_probe(struct spi_device *spi)
return PTR_ERR(dc);
}
- mipi->regulator = devm_regulator_get(dev, "power");
- if (IS_ERR(mipi->regulator))
- return PTR_ERR(mipi->regulator);
+ dbidev->regulator = devm_regulator_get(dev, "power");
+ if (IS_ERR(dbidev->regulator))
+ return PTR_ERR(dbidev->regulator);
- mipi->backlight = devm_of_find_backlight(dev);
- if (IS_ERR(mipi->backlight))
- return PTR_ERR(mipi->backlight);
+ dbidev->backlight = devm_of_find_backlight(dev);
+ if (IS_ERR(dbidev->backlight))
+ return PTR_ERR(dbidev->backlight);
device_property_read_u32(dev, "rotation", &rotation);
- ret = mipi_dbi_spi_init(spi, mipi, dc);
+ ret = mipi_dbi_spi_init(spi, dbi, dc);
if (ret)
return ret;
- ret = mipi_dbi_init(mipi, &mi0283qt_pipe_funcs, &mi0283qt_mode, rotation);
+ ret = mipi_dbi_dev_init(dbidev, &mi0283qt_pipe_funcs, &mi0283qt_mode, rotation);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/tinydrm/repaper.c b/drivers/gpu/drm/tiny/repaper.c
index 97a874b40394..76d179200775 100644
--- a/drivers/gpu/drm/tinydrm/repaper.c
+++ b/drivers/gpu/drm/tiny/repaper.c
@@ -23,6 +23,7 @@
#include <linux/thermal.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_connector.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
@@ -30,10 +31,11 @@
#include <drm/drm_format_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_modes.h>
#include <drm/drm_rect.h>
#include <drm/drm_vblank.h>
+#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
-#include <drm/tinydrm/tinydrm-helpers.h>
#define REPAPER_RID_G2_COG_ID 0x12
@@ -60,6 +62,8 @@ enum repaper_epd_border_byte {
struct repaper_epd {
struct drm_device drm;
struct drm_simple_display_pipe pipe;
+ const struct drm_display_mode *mode;
+ struct drm_connector connector;
struct spi_device *spi;
struct gpio_desc *panel_on;
@@ -873,6 +877,39 @@ static const struct drm_simple_display_pipe_funcs repaper_pipe_funcs = {
.prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
};
+static int repaper_connector_get_modes(struct drm_connector *connector)
+{
+ struct repaper_epd *epd = drm_to_epd(connector->dev);
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, epd->mode);
+ if (!mode) {
+ DRM_ERROR("Failed to duplicate mode\n");
+ return 0;
+ }
+
+ drm_mode_set_name(mode);
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+ drm_mode_probed_add(connector, mode);
+
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+
+ return 1;
+}
+
+static const struct drm_connector_helper_funcs repaper_connector_hfuncs = {
+ .get_modes = repaper_connector_get_modes,
+};
+
+static const struct drm_connector_funcs repaper_connector_funcs = {
+ .reset = drm_atomic_helper_connector_reset,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
static const struct drm_mode_config_funcs repaper_mode_config_funcs = {
.fb_create = drm_gem_fb_create_with_dirty,
.atomic_check = drm_atomic_helper_check,
@@ -925,8 +962,7 @@ static const u8 repaper_e2271cs021_cs[] = { 0x00, 0x00, 0x00, 0x7f,
DEFINE_DRM_GEM_CMA_FOPS(repaper_fops);
static struct drm_driver repaper_driver = {
- .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
- DRIVER_ATOMIC,
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &repaper_fops,
.release = repaper_release,
DRM_GEM_CMA_VMAP_DRIVER_OPS,
@@ -1096,6 +1132,7 @@ static int repaper_probe(struct spi_device *spi)
return -ENODEV;
}
+ epd->mode = mode;
epd->width = mode->hdisplay;
epd->height = mode->vdisplay;
epd->factored_stage_time = epd->stage_time;
@@ -1110,10 +1147,20 @@ static int repaper_probe(struct spi_device *spi)
if (!epd->current_frame)
return -ENOMEM;
- ret = tinydrm_display_pipe_init(drm, &epd->pipe, &repaper_pipe_funcs,
- DRM_MODE_CONNECTOR_VIRTUAL,
- repaper_formats,
- ARRAY_SIZE(repaper_formats), mode, 0);
+ drm->mode_config.min_width = mode->hdisplay;
+ drm->mode_config.max_width = mode->hdisplay;
+ drm->mode_config.min_height = mode->vdisplay;
+ drm->mode_config.max_height = mode->vdisplay;
+
+ drm_connector_helper_add(&epd->connector, &repaper_connector_hfuncs);
+ ret = drm_connector_init(drm, &epd->connector, &repaper_connector_funcs,
+ DRM_MODE_CONNECTOR_SPI);
+ if (ret)
+ return ret;
+
+ ret = drm_simple_display_pipe_init(drm, &epd->pipe, &repaper_pipe_funcs,
+ repaper_formats, ARRAY_SIZE(repaper_formats),
+ NULL, &epd->connector);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/tinydrm/st7586.c b/drivers/gpu/drm/tiny/st7586.c
index 9ac626265152..060cc756194f 100644
--- a/drivers/gpu/drm/tinydrm/st7586.c
+++ b/drivers/gpu/drm/tiny/st7586.c
@@ -21,10 +21,9 @@
#include <drm/drm_format_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_mipi_dbi.h>
#include <drm/drm_rect.h>
#include <drm/drm_vblank.h>
-#include <drm/tinydrm/mipi-dbi.h>
-#include <drm/tinydrm/tinydrm-helpers.h>
/* controller-specific commands */
#define ST7586_DISP_MODE_GRAY 0x38
@@ -115,10 +114,11 @@ static int st7586_buf_copy(void *dst, struct drm_framebuffer *fb,
static void st7586_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
{
- struct mipi_dbi *mipi = drm_to_mipi_dbi(fb->dev);
+ struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(fb->dev);
+ struct mipi_dbi *dbi = &dbidev->dbi;
int start, end, idx, ret = 0;
- if (!mipi->enabled)
+ if (!dbidev->enabled)
return;
if (!drm_dev_enter(fb->dev, &idx))
@@ -130,7 +130,7 @@ static void st7586_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
DRM_DEBUG_KMS("Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect));
- ret = st7586_buf_copy(mipi->tx_buf, fb, rect);
+ ret = st7586_buf_copy(dbidev->tx_buf, fb, rect);
if (ret)
goto err_msg;
@@ -138,15 +138,15 @@ static void st7586_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
start = rect->x1 / 3;
end = rect->x2 / 3;
- mipi_dbi_command(mipi, MIPI_DCS_SET_COLUMN_ADDRESS,
+ mipi_dbi_command(dbi, MIPI_DCS_SET_COLUMN_ADDRESS,
(start >> 8) & 0xFF, start & 0xFF,
(end >> 8) & 0xFF, (end - 1) & 0xFF);
- mipi_dbi_command(mipi, MIPI_DCS_SET_PAGE_ADDRESS,
+ mipi_dbi_command(dbi, MIPI_DCS_SET_PAGE_ADDRESS,
(rect->y1 >> 8) & 0xFF, rect->y1 & 0xFF,
(rect->y2 >> 8) & 0xFF, (rect->y2 - 1) & 0xFF);
- ret = mipi_dbi_command_buf(mipi, MIPI_DCS_WRITE_MEMORY_START,
- (u8 *)mipi->tx_buf,
+ ret = mipi_dbi_command_buf(dbi, MIPI_DCS_WRITE_MEMORY_START,
+ (u8 *)dbidev->tx_buf,
(end - start) * (rect->y2 - rect->y1));
err_msg:
if (ret)
@@ -177,8 +177,9 @@ static void st7586_pipe_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
- struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev);
+ struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(pipe->crtc.dev);
struct drm_framebuffer *fb = plane_state->fb;
+ struct mipi_dbi *dbi = &dbidev->dbi;
struct drm_rect rect = {
.x1 = 0,
.x2 = fb->width,
@@ -193,35 +194,35 @@ static void st7586_pipe_enable(struct drm_simple_display_pipe *pipe,
DRM_DEBUG_KMS("\n");
- ret = mipi_dbi_poweron_reset(mipi);
+ ret = mipi_dbi_poweron_reset(dbidev);
if (ret)
goto out_exit;
- mipi_dbi_command(mipi, ST7586_AUTO_READ_CTRL, 0x9f);
- mipi_dbi_command(mipi, ST7586_OTP_RW_CTRL, 0x00);
+ mipi_dbi_command(dbi, ST7586_AUTO_READ_CTRL, 0x9f);
+ mipi_dbi_command(dbi, ST7586_OTP_RW_CTRL, 0x00);
msleep(10);
- mipi_dbi_command(mipi, ST7586_OTP_READ);
+ mipi_dbi_command(dbi, ST7586_OTP_READ);
msleep(20);
- mipi_dbi_command(mipi, ST7586_OTP_CTRL_OUT);
- mipi_dbi_command(mipi, MIPI_DCS_EXIT_SLEEP_MODE);
- mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_OFF);
+ mipi_dbi_command(dbi, ST7586_OTP_CTRL_OUT);
+ mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE);
+ mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_OFF);
msleep(50);
- mipi_dbi_command(mipi, ST7586_SET_VOP_OFFSET, 0x00);
- mipi_dbi_command(mipi, ST7586_SET_VOP, 0xe3, 0x00);
- mipi_dbi_command(mipi, ST7586_SET_BIAS_SYSTEM, 0x02);
- mipi_dbi_command(mipi, ST7586_SET_BOOST_LEVEL, 0x04);
- mipi_dbi_command(mipi, ST7586_ENABLE_ANALOG, 0x1d);
- mipi_dbi_command(mipi, ST7586_SET_NLINE_INV, 0x00);
- mipi_dbi_command(mipi, ST7586_DISP_MODE_GRAY);
- mipi_dbi_command(mipi, ST7586_ENABLE_DDRAM, 0x02);
+ mipi_dbi_command(dbi, ST7586_SET_VOP_OFFSET, 0x00);
+ mipi_dbi_command(dbi, ST7586_SET_VOP, 0xe3, 0x00);
+ mipi_dbi_command(dbi, ST7586_SET_BIAS_SYSTEM, 0x02);
+ mipi_dbi_command(dbi, ST7586_SET_BOOST_LEVEL, 0x04);
+ mipi_dbi_command(dbi, ST7586_ENABLE_ANALOG, 0x1d);
+ mipi_dbi_command(dbi, ST7586_SET_NLINE_INV, 0x00);
+ mipi_dbi_command(dbi, ST7586_DISP_MODE_GRAY);
+ mipi_dbi_command(dbi, ST7586_ENABLE_DDRAM, 0x02);
- switch (mipi->rotation) {
+ switch (dbidev->rotation) {
default:
addr_mode = 0x00;
break;
@@ -235,26 +236,26 @@ static void st7586_pipe_enable(struct drm_simple_display_pipe *pipe,
addr_mode = ST7586_DISP_CTRL_MX;
break;
}
- mipi_dbi_command(mipi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
+ mipi_dbi_command(dbi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
- mipi_dbi_command(mipi, ST7586_SET_DISP_DUTY, 0x7f);
- mipi_dbi_command(mipi, ST7586_SET_PART_DISP, 0xa0);
- mipi_dbi_command(mipi, MIPI_DCS_SET_PARTIAL_AREA, 0x00, 0x00, 0x00, 0x77);
- mipi_dbi_command(mipi, MIPI_DCS_EXIT_INVERT_MODE);
+ mipi_dbi_command(dbi, ST7586_SET_DISP_DUTY, 0x7f);
+ mipi_dbi_command(dbi, ST7586_SET_PART_DISP, 0xa0);
+ mipi_dbi_command(dbi, MIPI_DCS_SET_PARTIAL_ROWS, 0x00, 0x00, 0x00, 0x77);
+ mipi_dbi_command(dbi, MIPI_DCS_EXIT_INVERT_MODE);
msleep(100);
- mipi->enabled = true;
+ dbidev->enabled = true;
st7586_fb_dirty(fb, &rect);
- mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_ON);
+ mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_ON);
out_exit:
drm_dev_exit(idx);
}
static void st7586_pipe_disable(struct drm_simple_display_pipe *pipe)
{
- struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev);
+ struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(pipe->crtc.dev);
/*
* This callback is not protected by drm_dev_enter/exit since we want to
@@ -265,11 +266,11 @@ static void st7586_pipe_disable(struct drm_simple_display_pipe *pipe)
DRM_DEBUG_KMS("\n");
- if (!mipi->enabled)
+ if (!dbidev->enabled)
return;
- mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_OFF);
- mipi->enabled = false;
+ mipi_dbi_command(&dbidev->dbi, MIPI_DCS_SET_DISPLAY_OFF);
+ dbidev->enabled = false;
}
static const u32 st7586_formats[] = {
@@ -283,12 +284,6 @@ static const struct drm_simple_display_pipe_funcs st7586_pipe_funcs = {
.prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
};
-static const struct drm_mode_config_funcs st7586_mode_config_funcs = {
- .fb_create = drm_gem_fb_create_with_dirty,
- .atomic_check = drm_atomic_helper_check,
- .atomic_commit = drm_atomic_helper_commit,
-};
-
static const struct drm_display_mode st7586_mode = {
DRM_SIMPLE_MODE(178, 128, 37, 27),
};
@@ -296,8 +291,7 @@ static const struct drm_display_mode st7586_mode = {
DEFINE_DRM_GEM_CMA_FOPS(st7586_fops);
static struct drm_driver st7586_driver = {
- .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
- DRIVER_ATOMIC,
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &st7586_fops,
.release = mipi_dbi_release,
DRM_GEM_CMA_VMAP_DRIVER_OPS,
@@ -324,39 +318,34 @@ MODULE_DEVICE_TABLE(spi, st7586_id);
static int st7586_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
+ struct mipi_dbi_dev *dbidev;
struct drm_device *drm;
- struct mipi_dbi *mipi;
+ struct mipi_dbi *dbi;
struct gpio_desc *a0;
u32 rotation = 0;
size_t bufsize;
int ret;
- mipi = kzalloc(sizeof(*mipi), GFP_KERNEL);
- if (!mipi)
+ dbidev = kzalloc(sizeof(*dbidev), GFP_KERNEL);
+ if (!dbidev)
return -ENOMEM;
- drm = &mipi->drm;
+ dbi = &dbidev->dbi;
+ drm = &dbidev->drm;
ret = devm_drm_dev_init(dev, drm, &st7586_driver);
if (ret) {
- kfree(mipi);
+ kfree(dbidev);
return ret;
}
drm_mode_config_init(drm);
- drm->mode_config.preferred_depth = 32;
- drm->mode_config.funcs = &st7586_mode_config_funcs;
-
- mutex_init(&mipi->cmdlock);
bufsize = (st7586_mode.vdisplay + 2) / 3 * st7586_mode.hdisplay;
- mipi->tx_buf = devm_kmalloc(dev, bufsize, GFP_KERNEL);
- if (!mipi->tx_buf)
- return -ENOMEM;
- mipi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
- if (IS_ERR(mipi->reset)) {
+ dbi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(dbi->reset)) {
DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n");
- return PTR_ERR(mipi->reset);
+ return PTR_ERR(dbi->reset);
}
a0 = devm_gpiod_get(dev, "a0", GPIOD_OUT_LOW);
@@ -366,14 +355,19 @@ static int st7586_probe(struct spi_device *spi)
}
device_property_read_u32(dev, "rotation", &rotation);
- mipi->rotation = rotation;
- ret = mipi_dbi_spi_init(spi, mipi, a0);
+ ret = mipi_dbi_spi_init(spi, dbi, a0);
if (ret)
return ret;
/* Cannot read from this controller via SPI */
- mipi->read_commands = NULL;
+ dbi->read_commands = NULL;
+
+ ret = mipi_dbi_dev_init_with_formats(dbidev, &st7586_pipe_funcs,
+ st7586_formats, ARRAY_SIZE(st7586_formats),
+ &st7586_mode, rotation, bufsize);
+ if (ret)
+ return ret;
/*
* we are using 8-bit data, so we are not actually swapping anything,
@@ -382,16 +376,7 @@ static int st7586_probe(struct spi_device *spi)
* bytes on little-endian systems and causes out of order data to be
* sent to the display).
*/
- mipi->swap_bytes = true;
-
- ret = tinydrm_display_pipe_init(drm, &mipi->pipe, &st7586_pipe_funcs,
- DRM_MODE_CONNECTOR_VIRTUAL,
- st7586_formats, ARRAY_SIZE(st7586_formats),
- &st7586_mode, rotation);
- if (ret)
- return ret;
-
- drm_plane_enable_fb_damage_clips(&mipi->pipe.plane);
+ dbi->swap_bytes = true;
drm_mode_config_reset(drm);
@@ -401,9 +386,6 @@ static int st7586_probe(struct spi_device *spi)
spi_set_drvdata(spi, drm);
- DRM_DEBUG_KMS("preferred_depth=%u, rotation = %u\n",
- drm->mode_config.preferred_depth, rotation);
-
drm_fbdev_generic_setup(drm, 0);
return 0;
diff --git a/drivers/gpu/drm/tinydrm/st7735r.c b/drivers/gpu/drm/tiny/st7735r.c
index ce9109e613e0..3f4487c71684 100644
--- a/drivers/gpu/drm/tinydrm/st7735r.c
+++ b/drivers/gpu/drm/tiny/st7735r.c
@@ -19,8 +19,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/tinydrm/mipi-dbi.h>
-#include <drm/tinydrm/tinydrm-helpers.h>
+#include <drm/drm_mipi_dbi.h>
#define ST7735R_FRMCTR1 0xb1
#define ST7735R_FRMCTR2 0xb2
@@ -43,7 +42,8 @@ static void jd_t18003_t01_pipe_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
- struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev);
+ struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(pipe->crtc.dev);
+ struct mipi_dbi *dbi = &dbidev->dbi;
int ret, idx;
u8 addr_mode;
@@ -52,28 +52,28 @@ static void jd_t18003_t01_pipe_enable(struct drm_simple_display_pipe *pipe,
DRM_DEBUG_KMS("\n");
- ret = mipi_dbi_poweron_reset(mipi);
+ ret = mipi_dbi_poweron_reset(dbidev);
if (ret)
goto out_exit;
msleep(150);
- mipi_dbi_command(mipi, MIPI_DCS_EXIT_SLEEP_MODE);
+ mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE);
msleep(500);
- mipi_dbi_command(mipi, ST7735R_FRMCTR1, 0x01, 0x2c, 0x2d);
- mipi_dbi_command(mipi, ST7735R_FRMCTR2, 0x01, 0x2c, 0x2d);
- mipi_dbi_command(mipi, ST7735R_FRMCTR3, 0x01, 0x2c, 0x2d, 0x01, 0x2c,
+ mipi_dbi_command(dbi, ST7735R_FRMCTR1, 0x01, 0x2c, 0x2d);
+ mipi_dbi_command(dbi, ST7735R_FRMCTR2, 0x01, 0x2c, 0x2d);
+ mipi_dbi_command(dbi, ST7735R_FRMCTR3, 0x01, 0x2c, 0x2d, 0x01, 0x2c,
0x2d);
- mipi_dbi_command(mipi, ST7735R_INVCTR, 0x07);
- mipi_dbi_command(mipi, ST7735R_PWCTR1, 0xa2, 0x02, 0x84);
- mipi_dbi_command(mipi, ST7735R_PWCTR2, 0xc5);
- mipi_dbi_command(mipi, ST7735R_PWCTR3, 0x0a, 0x00);
- mipi_dbi_command(mipi, ST7735R_PWCTR4, 0x8a, 0x2a);
- mipi_dbi_command(mipi, ST7735R_PWCTR5, 0x8a, 0xee);
- mipi_dbi_command(mipi, ST7735R_VMCTR1, 0x0e);
- mipi_dbi_command(mipi, MIPI_DCS_EXIT_INVERT_MODE);
- switch (mipi->rotation) {
+ mipi_dbi_command(dbi, ST7735R_INVCTR, 0x07);
+ mipi_dbi_command(dbi, ST7735R_PWCTR1, 0xa2, 0x02, 0x84);
+ mipi_dbi_command(dbi, ST7735R_PWCTR2, 0xc5);
+ mipi_dbi_command(dbi, ST7735R_PWCTR3, 0x0a, 0x00);
+ mipi_dbi_command(dbi, ST7735R_PWCTR4, 0x8a, 0x2a);
+ mipi_dbi_command(dbi, ST7735R_PWCTR5, 0x8a, 0xee);
+ mipi_dbi_command(dbi, ST7735R_VMCTR1, 0x0e);
+ mipi_dbi_command(dbi, MIPI_DCS_EXIT_INVERT_MODE);
+ switch (dbidev->rotation) {
default:
addr_mode = ST7735R_MX | ST7735R_MY;
break;
@@ -87,24 +87,24 @@ static void jd_t18003_t01_pipe_enable(struct drm_simple_display_pipe *pipe,
addr_mode = ST7735R_MY | ST7735R_MV;
break;
}
- mipi_dbi_command(mipi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
- mipi_dbi_command(mipi, MIPI_DCS_SET_PIXEL_FORMAT,
+ mipi_dbi_command(dbi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
+ mipi_dbi_command(dbi, MIPI_DCS_SET_PIXEL_FORMAT,
MIPI_DCS_PIXEL_FMT_16BIT);
- mipi_dbi_command(mipi, ST7735R_GAMCTRP1, 0x02, 0x1c, 0x07, 0x12, 0x37,
+ mipi_dbi_command(dbi, ST7735R_GAMCTRP1, 0x02, 0x1c, 0x07, 0x12, 0x37,
0x32, 0x29, 0x2d, 0x29, 0x25, 0x2b, 0x39, 0x00, 0x01,
0x03, 0x10);
- mipi_dbi_command(mipi, ST7735R_GAMCTRN1, 0x03, 0x1d, 0x07, 0x06, 0x2e,
+ mipi_dbi_command(dbi, ST7735R_GAMCTRN1, 0x03, 0x1d, 0x07, 0x06, 0x2e,
0x2c, 0x29, 0x2d, 0x2e, 0x2e, 0x37, 0x3f, 0x00, 0x00,
0x02, 0x10);
- mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_ON);
+ mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_ON);
msleep(100);
- mipi_dbi_command(mipi, MIPI_DCS_ENTER_NORMAL_MODE);
+ mipi_dbi_command(dbi, MIPI_DCS_ENTER_NORMAL_MODE);
msleep(20);
- mipi_dbi_enable_flush(mipi, crtc_state, plane_state);
+ mipi_dbi_enable_flush(dbidev, crtc_state, plane_state);
out_exit:
drm_dev_exit(idx);
}
@@ -123,8 +123,7 @@ static const struct drm_display_mode jd_t18003_t01_mode = {
DEFINE_DRM_GEM_CMA_FOPS(st7735r_fops);
static struct drm_driver st7735r_driver = {
- .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
- DRIVER_ATOMIC,
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &st7735r_fops,
.release = mipi_dbi_release,
DRM_GEM_CMA_VMAP_DRIVER_OPS,
@@ -151,29 +150,31 @@ MODULE_DEVICE_TABLE(spi, st7735r_id);
static int st7735r_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
+ struct mipi_dbi_dev *dbidev;
struct drm_device *drm;
- struct mipi_dbi *mipi;
+ struct mipi_dbi *dbi;
struct gpio_desc *dc;
u32 rotation = 0;
int ret;
- mipi = kzalloc(sizeof(*mipi), GFP_KERNEL);
- if (!mipi)
+ dbidev = kzalloc(sizeof(*dbidev), GFP_KERNEL);
+ if (!dbidev)
return -ENOMEM;
- drm = &mipi->drm;
+ dbi = &dbidev->dbi;
+ drm = &dbidev->drm;
ret = devm_drm_dev_init(dev, drm, &st7735r_driver);
if (ret) {
- kfree(mipi);
+ kfree(dbidev);
return ret;
}
drm_mode_config_init(drm);
- mipi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
- if (IS_ERR(mipi->reset)) {
+ dbi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(dbi->reset)) {
DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n");
- return PTR_ERR(mipi->reset);
+ return PTR_ERR(dbi->reset);
}
dc = devm_gpiod_get(dev, "dc", GPIOD_OUT_LOW);
@@ -182,20 +183,20 @@ static int st7735r_probe(struct spi_device *spi)
return PTR_ERR(dc);
}
- mipi->backlight = devm_of_find_backlight(dev);
- if (IS_ERR(mipi->backlight))
- return PTR_ERR(mipi->backlight);
+ dbidev->backlight = devm_of_find_backlight(dev);
+ if (IS_ERR(dbidev->backlight))
+ return PTR_ERR(dbidev->backlight);
device_property_read_u32(dev, "rotation", &rotation);
- ret = mipi_dbi_spi_init(spi, mipi, dc);
+ ret = mipi_dbi_spi_init(spi, dbi, dc);
if (ret)
return ret;
/* Cannot read from Adafruit 1.8" display via SPI */
- mipi->read_commands = NULL;
+ dbi->read_commands = NULL;
- ret = mipi_dbi_init(mipi, &jd_t18003_t01_pipe_funcs, &jd_t18003_t01_mode, rotation);
+ ret = mipi_dbi_dev_init(dbidev, &jd_t18003_t01_pipe_funcs, &jd_t18003_t01_mode, rotation);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/tinydrm/core/Makefile b/drivers/gpu/drm/tinydrm/core/Makefile
deleted file mode 100644
index 01065e920aea..000000000000
--- a/drivers/gpu/drm/tinydrm/core/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-tinydrm-y := tinydrm-pipe.o tinydrm-helpers.o
-
-obj-$(CONFIG_DRM_TINYDRM) += tinydrm.o
diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c b/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c
deleted file mode 100644
index dfeafac4c656..000000000000
--- a/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c
+++ /dev/null
@@ -1,207 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2016 Noralf Trønnes
- */
-
-#include <linux/backlight.h>
-#include <linux/dma-buf.h>
-#include <linux/module.h>
-#include <linux/pm.h>
-#include <linux/spi/spi.h>
-#include <linux/swab.h>
-
-#include <drm/drm_device.h>
-#include <drm/drm_drv.h>
-#include <drm/drm_fourcc.h>
-#include <drm/drm_framebuffer.h>
-#include <drm/drm_print.h>
-#include <drm/drm_rect.h>
-#include <drm/tinydrm/tinydrm-helpers.h>
-
-static unsigned int spi_max;
-module_param(spi_max, uint, 0400);
-MODULE_PARM_DESC(spi_max, "Set a lower SPI max transfer size");
-
-#if IS_ENABLED(CONFIG_SPI)
-
-/**
- * tinydrm_spi_max_transfer_size - Determine max SPI transfer size
- * @spi: SPI device
- * @max_len: Maximum buffer size needed (optional)
- *
- * This function returns the maximum size to use for SPI transfers. It checks
- * the SPI master, the optional @max_len and the module parameter spi_max and
- * returns the smallest.
- *
- * Returns:
- * Maximum size for SPI transfers
- */
-size_t tinydrm_spi_max_transfer_size(struct spi_device *spi, size_t max_len)
-{
- size_t ret;
-
- ret = min(spi_max_transfer_size(spi), spi->master->max_dma_len);
- if (max_len)
- ret = min(ret, max_len);
- if (spi_max)
- ret = min_t(size_t, ret, spi_max);
- ret &= ~0x3;
- if (ret < 4)
- ret = 4;
-
- return ret;
-}
-EXPORT_SYMBOL(tinydrm_spi_max_transfer_size);
-
-/**
- * tinydrm_spi_bpw_supported - Check if bits per word is supported
- * @spi: SPI device
- * @bpw: Bits per word
- *
- * This function checks to see if the SPI master driver supports @bpw.
- *
- * Returns:
- * True if @bpw is supported, false otherwise.
- */
-bool tinydrm_spi_bpw_supported(struct spi_device *spi, u8 bpw)
-{
- u32 bpw_mask = spi->master->bits_per_word_mask;
-
- if (bpw == 8)
- return true;
-
- if (!bpw_mask) {
- dev_warn_once(&spi->dev,
- "bits_per_word_mask not set, assume 8-bit only\n");
- return false;
- }
-
- if (bpw_mask & SPI_BPW_MASK(bpw))
- return true;
-
- return false;
-}
-EXPORT_SYMBOL(tinydrm_spi_bpw_supported);
-
-static void
-tinydrm_dbg_spi_print(struct spi_device *spi, struct spi_transfer *tr,
- const void *buf, int idx, bool tx)
-{
- u32 speed_hz = tr->speed_hz ? tr->speed_hz : spi->max_speed_hz;
- char linebuf[3 * 32];
-
- hex_dump_to_buffer(buf, tr->len, 16,
- DIV_ROUND_UP(tr->bits_per_word, 8),
- linebuf, sizeof(linebuf), false);
-
- printk(KERN_DEBUG
- " tr(%i): speed=%u%s, bpw=%i, len=%u, %s_buf=[%s%s]\n", idx,
- speed_hz > 1000000 ? speed_hz / 1000000 : speed_hz / 1000,
- speed_hz > 1000000 ? "MHz" : "kHz", tr->bits_per_word, tr->len,
- tx ? "tx" : "rx", linebuf, tr->len > 16 ? " ..." : "");
-}
-
-/* called through tinydrm_dbg_spi_message() */
-void _tinydrm_dbg_spi_message(struct spi_device *spi, struct spi_message *m)
-{
- struct spi_transfer *tmp;
- int i = 0;
-
- list_for_each_entry(tmp, &m->transfers, transfer_list) {
-
- if (tmp->tx_buf)
- tinydrm_dbg_spi_print(spi, tmp, tmp->tx_buf, i, true);
- if (tmp->rx_buf)
- tinydrm_dbg_spi_print(spi, tmp, tmp->rx_buf, i, false);
- i++;
- }
-}
-EXPORT_SYMBOL(_tinydrm_dbg_spi_message);
-
-/**
- * tinydrm_spi_transfer - SPI transfer helper
- * @spi: SPI device
- * @speed_hz: Override speed (optional)
- * @header: Optional header transfer
- * @bpw: Bits per word
- * @buf: Buffer to transfer
- * @len: Buffer length
- *
- * This SPI transfer helper breaks up the transfer of @buf into chunks which
- * the SPI master driver can handle. If the machine is Little Endian and the
- * SPI master driver doesn't support 16 bits per word, it swaps the bytes and
- * does a 8-bit transfer.
- * If @header is set, it is prepended to each SPI message.
- *
- * Returns:
- * Zero on success, negative error code on failure.
- */
-int tinydrm_spi_transfer(struct spi_device *spi, u32 speed_hz,
- struct spi_transfer *header, u8 bpw, const void *buf,
- size_t len)
-{
- struct spi_transfer tr = {
- .bits_per_word = bpw,
- .speed_hz = speed_hz,
- };
- struct spi_message m;
- u16 *swap_buf = NULL;
- size_t max_chunk;
- size_t chunk;
- int ret = 0;
-
- if (WARN_ON_ONCE(bpw != 8 && bpw != 16))
- return -EINVAL;
-
- max_chunk = tinydrm_spi_max_transfer_size(spi, 0);
-
- if (drm_debug & DRM_UT_DRIVER)
- pr_debug("[drm:%s] bpw=%u, max_chunk=%zu, transfers:\n",
- __func__, bpw, max_chunk);
-
- if (bpw == 16 && !tinydrm_spi_bpw_supported(spi, 16)) {
- tr.bits_per_word = 8;
- if (tinydrm_machine_little_endian()) {
- swap_buf = kmalloc(min(len, max_chunk), GFP_KERNEL);
- if (!swap_buf)
- return -ENOMEM;
- }
- }
-
- spi_message_init(&m);
- if (header)
- spi_message_add_tail(header, &m);
- spi_message_add_tail(&tr, &m);
-
- while (len) {
- chunk = min(len, max_chunk);
-
- tr.tx_buf = buf;
- tr.len = chunk;
-
- if (swap_buf) {
- const u16 *buf16 = buf;
- unsigned int i;
-
- for (i = 0; i < chunk / 2; i++)
- swap_buf[i] = swab16(buf16[i]);
-
- tr.tx_buf = swap_buf;
- }
-
- buf += chunk;
- len -= chunk;
-
- tinydrm_dbg_spi_message(spi, &m);
- ret = spi_sync(spi, &m);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(tinydrm_spi_transfer);
-
-#endif /* CONFIG_SPI */
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c b/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c
deleted file mode 100644
index ed798fd95152..000000000000
--- a/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c
+++ /dev/null
@@ -1,179 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2016 Noralf Trønnes
- */
-
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_drv.h>
-#include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drm_modes.h>
-#include <drm/drm_probe_helper.h>
-#include <drm/drm_print.h>
-#include <drm/drm_simple_kms_helper.h>
-
-struct tinydrm_connector {
- struct drm_connector base;
- struct drm_display_mode mode;
-};
-
-static inline struct tinydrm_connector *
-to_tinydrm_connector(struct drm_connector *connector)
-{
- return container_of(connector, struct tinydrm_connector, base);
-}
-
-static int tinydrm_connector_get_modes(struct drm_connector *connector)
-{
- struct tinydrm_connector *tconn = to_tinydrm_connector(connector);
- struct drm_display_mode *mode;
-
- mode = drm_mode_duplicate(connector->dev, &tconn->mode);
- if (!mode) {
- DRM_ERROR("Failed to duplicate mode\n");
- return 0;
- }
-
- if (mode->name[0] == '\0')
- drm_mode_set_name(mode);
-
- mode->type |= DRM_MODE_TYPE_PREFERRED;
- drm_mode_probed_add(connector, mode);
-
- if (mode->width_mm) {
- connector->display_info.width_mm = mode->width_mm;
- connector->display_info.height_mm = mode->height_mm;
- }
-
- return 1;
-}
-
-static const struct drm_connector_helper_funcs tinydrm_connector_hfuncs = {
- .get_modes = tinydrm_connector_get_modes,
-};
-
-static enum drm_connector_status
-tinydrm_connector_detect(struct drm_connector *connector, bool force)
-{
- if (drm_dev_is_unplugged(connector->dev))
- return connector_status_disconnected;
-
- return connector->status;
-}
-
-static void tinydrm_connector_destroy(struct drm_connector *connector)
-{
- struct tinydrm_connector *tconn = to_tinydrm_connector(connector);
-
- drm_connector_cleanup(connector);
- kfree(tconn);
-}
-
-static const struct drm_connector_funcs tinydrm_connector_funcs = {
- .reset = drm_atomic_helper_connector_reset,
- .detect = tinydrm_connector_detect,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = tinydrm_connector_destroy,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-struct drm_connector *
-tinydrm_connector_create(struct drm_device *drm,
- const struct drm_display_mode *mode,
- int connector_type)
-{
- struct tinydrm_connector *tconn;
- struct drm_connector *connector;
- int ret;
-
- tconn = kzalloc(sizeof(*tconn), GFP_KERNEL);
- if (!tconn)
- return ERR_PTR(-ENOMEM);
-
- drm_mode_copy(&tconn->mode, mode);
- connector = &tconn->base;
-
- drm_connector_helper_add(connector, &tinydrm_connector_hfuncs);
- ret = drm_connector_init(drm, connector, &tinydrm_connector_funcs,
- connector_type);
- if (ret) {
- kfree(tconn);
- return ERR_PTR(ret);
- }
-
- connector->status = connector_status_connected;
-
- return connector;
-}
-
-static int tinydrm_rotate_mode(struct drm_display_mode *mode,
- unsigned int rotation)
-{
- if (rotation == 0 || rotation == 180) {
- return 0;
- } else if (rotation == 90 || rotation == 270) {
- swap(mode->hdisplay, mode->vdisplay);
- swap(mode->hsync_start, mode->vsync_start);
- swap(mode->hsync_end, mode->vsync_end);
- swap(mode->htotal, mode->vtotal);
- swap(mode->width_mm, mode->height_mm);
- return 0;
- } else {
- return -EINVAL;
- }
-}
-
-/**
- * tinydrm_display_pipe_init - Initialize display pipe
- * @drm: DRM device
- * @pipe: Display pipe
- * @funcs: Display pipe functions
- * @connector_type: Connector type
- * @formats: Array of supported formats (DRM_FORMAT\_\*)
- * @format_count: Number of elements in @formats
- * @mode: Supported mode
- * @rotation: Initial @mode rotation in degrees Counter Clock Wise
- *
- * This function sets up a &drm_simple_display_pipe with a &drm_connector that
- * has one fixed &drm_display_mode which is rotated according to @rotation.
- *
- * Returns:
- * Zero on success, negative error code on failure.
- */
-int tinydrm_display_pipe_init(struct drm_device *drm,
- struct drm_simple_display_pipe *pipe,
- const struct drm_simple_display_pipe_funcs *funcs,
- int connector_type,
- const uint32_t *formats,
- unsigned int format_count,
- const struct drm_display_mode *mode,
- unsigned int rotation)
-{
- struct drm_display_mode mode_copy;
- struct drm_connector *connector;
- int ret;
- static const uint64_t modifiers[] = {
- DRM_FORMAT_MOD_LINEAR,
- DRM_FORMAT_MOD_INVALID
- };
-
- drm_mode_copy(&mode_copy, mode);
- ret = tinydrm_rotate_mode(&mode_copy, rotation);
- if (ret) {
- DRM_ERROR("Illegal rotation value %u\n", rotation);
- return -EINVAL;
- }
-
- drm->mode_config.min_width = mode_copy.hdisplay;
- drm->mode_config.max_width = mode_copy.hdisplay;
- drm->mode_config.min_height = mode_copy.vdisplay;
- drm->mode_config.max_height = mode_copy.vdisplay;
-
- connector = tinydrm_connector_create(drm, &mode_copy, connector_type);
- if (IS_ERR(connector))
- return PTR_ERR(connector);
-
- return drm_simple_display_pipe_init(drm, pipe, funcs, formats,
- format_count, modifiers, connector);
-}
-EXPORT_SYMBOL(tinydrm_display_pipe_init);
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
index 01fc670ce7a2..caea2a099496 100644
--- a/drivers/gpu/drm/ttm/Makefile
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -4,8 +4,8 @@
ttm-y := ttm_memory.o ttm_tt.o ttm_bo.o \
ttm_bo_util.o ttm_bo_vm.o ttm_module.o \
- ttm_execbuf_util.o ttm_page_alloc.o ttm_bo_manager.o \
- ttm_page_alloc_dma.o
+ ttm_execbuf_util.o ttm_page_alloc.o ttm_bo_manager.o
ttm-$(CONFIG_AGP) += ttm_agp_backend.o
+ttm-$(CONFIG_DRM_TTM_DMA_PAGE_POOL) += ttm_page_alloc_dma.o
obj-$(CONFIG_DRM_TTM) += ttm.o
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
index ea4d59eb8966..6050dc846894 100644
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -51,7 +51,7 @@ struct ttm_agp_backend {
static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
{
struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
- struct page *dummy_read_page = ttm->bdev->glob->dummy_read_page;
+ struct page *dummy_read_page = ttm_bo_glob.dummy_read_page;
struct drm_mm_node *node = bo_mem->mm_node;
struct agp_memory *mem;
int ret, cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 58c403eda04e..5df596fb0280 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -41,7 +41,7 @@
#include <linux/file.h>
#include <linux/module.h>
#include <linux/atomic.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
static void ttm_bo_global_kobj_release(struct kobject *kobj);
@@ -51,6 +51,7 @@ static void ttm_bo_global_kobj_release(struct kobject *kobj);
DEFINE_MUTEX(ttm_global_mutex);
unsigned ttm_bo_glob_use_count;
struct ttm_bo_global ttm_bo_glob;
+EXPORT_SYMBOL(ttm_bo_glob);
static struct attribute ttm_bo_count = {
.name = "bo_count",
@@ -148,22 +149,20 @@ static void ttm_bo_release_list(struct kref *list_kref)
{
struct ttm_buffer_object *bo =
container_of(list_kref, struct ttm_buffer_object, list_kref);
- struct ttm_bo_device *bdev = bo->bdev;
size_t acc_size = bo->acc_size;
BUG_ON(kref_read(&bo->list_kref));
BUG_ON(kref_read(&bo->kref));
- BUG_ON(atomic_read(&bo->cpu_writers));
BUG_ON(bo->mem.mm_node != NULL);
BUG_ON(!list_empty(&bo->lru));
BUG_ON(!list_empty(&bo->ddestroy));
ttm_tt_destroy(bo->ttm);
- atomic_dec(&bo->bdev->glob->bo_count);
+ atomic_dec(&ttm_bo_glob.bo_count);
dma_fence_put(bo->moving);
- reservation_object_fini(&bo->ttm_resv);
- mutex_destroy(&bo->wu_mutex);
+ if (!ttm_bo_uses_embedded_gem_object(bo))
+ dma_resv_fini(&bo->base._resv);
bo->destroy(bo);
- ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
+ ttm_mem_global_free(&ttm_mem_glob, acc_size);
}
static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
@@ -172,7 +171,7 @@ static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man;
- reservation_object_assert_held(bo->resv);
+ dma_resv_assert_held(bo->base.resv);
if (!list_empty(&bo->lru))
return;
@@ -184,25 +183,20 @@ static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
list_add_tail(&bo->lru, &man->lru[bo->priority]);
kref_get(&bo->list_kref);
- if (bo->ttm && !(bo->ttm->page_flags &
- (TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED))) {
- list_add_tail(&bo->swap, &bdev->glob->swap_lru[bo->priority]);
+ if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm &&
+ !(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG |
+ TTM_PAGE_FLAG_SWAPPED))) {
+ list_add_tail(&bo->swap, &ttm_bo_glob.swap_lru[bo->priority]);
kref_get(&bo->list_kref);
}
}
-void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
-{
- ttm_bo_add_mem_to_lru(bo, &bo->mem);
-}
-EXPORT_SYMBOL(ttm_bo_add_to_lru);
-
static void ttm_bo_ref_bug(struct kref *list_kref)
{
BUG();
}
-void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
+static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
bool notify = false;
@@ -222,16 +216,6 @@ void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
bdev->driver->del_from_lru_notify(bo);
}
-void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo)
-{
- struct ttm_bo_global *glob = bo->bdev->glob;
-
- spin_lock(&glob->lru_lock);
- ttm_bo_del_from_lru(bo);
- spin_unlock(&glob->lru_lock);
-}
-EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
-
static void ttm_bo_bulk_move_set_pos(struct ttm_lru_bulk_move_pos *pos,
struct ttm_buffer_object *bo)
{
@@ -243,10 +227,10 @@ static void ttm_bo_bulk_move_set_pos(struct ttm_lru_bulk_move_pos *pos,
void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
struct ttm_lru_bulk_move *bulk)
{
- reservation_object_assert_held(bo->resv);
+ dma_resv_assert_held(bo->base.resv);
ttm_bo_del_from_lru(bo);
- ttm_bo_add_to_lru(bo);
+ ttm_bo_add_mem_to_lru(bo, &bo->mem);
if (bulk && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
switch (bo->mem.mem_type) {
@@ -276,8 +260,8 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
if (!pos->first)
continue;
- reservation_object_assert_held(pos->first->resv);
- reservation_object_assert_held(pos->last->resv);
+ dma_resv_assert_held(pos->first->base.resv);
+ dma_resv_assert_held(pos->last->base.resv);
man = &pos->first->bdev->man[TTM_PL_TT];
list_bulk_move_tail(&man->lru[i], &pos->first->lru,
@@ -291,8 +275,8 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
if (!pos->first)
continue;
- reservation_object_assert_held(pos->first->resv);
- reservation_object_assert_held(pos->last->resv);
+ dma_resv_assert_held(pos->first->base.resv);
+ dma_resv_assert_held(pos->last->base.resv);
man = &pos->first->bdev->man[TTM_PL_VRAM];
list_bulk_move_tail(&man->lru[i], &pos->first->lru,
@@ -306,10 +290,10 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
if (!pos->first)
continue;
- reservation_object_assert_held(pos->first->resv);
- reservation_object_assert_held(pos->last->resv);
+ dma_resv_assert_held(pos->first->base.resv);
+ dma_resv_assert_held(pos->last->base.resv);
- lru = &pos->first->bdev->glob->swap_lru[i];
+ lru = &ttm_bo_glob.swap_lru[i];
list_bulk_move_tail(lru, &pos->first->swap, &pos->last->swap);
}
}
@@ -438,32 +422,32 @@ static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
{
int r;
- if (bo->resv == &bo->ttm_resv)
+ if (bo->base.resv == &bo->base._resv)
return 0;
- BUG_ON(!reservation_object_trylock(&bo->ttm_resv));
+ BUG_ON(!dma_resv_trylock(&bo->base._resv));
- r = reservation_object_copy_fences(&bo->ttm_resv, bo->resv);
+ r = dma_resv_copy_fences(&bo->base._resv, bo->base.resv);
if (r)
- reservation_object_unlock(&bo->ttm_resv);
+ dma_resv_unlock(&bo->base._resv);
return r;
}
static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
{
- struct reservation_object_list *fobj;
+ struct dma_resv_list *fobj;
struct dma_fence *fence;
int i;
- fobj = reservation_object_get_list(&bo->ttm_resv);
- fence = reservation_object_get_excl(&bo->ttm_resv);
+ fobj = dma_resv_get_list(&bo->base._resv);
+ fence = dma_resv_get_excl(&bo->base._resv);
if (fence && !fence->ops->signaled)
dma_fence_enable_sw_signaling(fence);
for (i = 0; fobj && i < fobj->shared_count; ++i) {
fence = rcu_dereference_protected(fobj->shared[i],
- reservation_object_held(bo->resv));
+ dma_resv_held(bo->base.resv));
if (!fence->ops->signaled)
dma_fence_enable_sw_signaling(fence);
@@ -473,7 +457,6 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_bo_global *glob = bdev->glob;
int ret;
ret = ttm_bo_individualize_resv(bo);
@@ -481,23 +464,23 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
/* Last resort, if we fail to allocate memory for the
* fences block for the BO to become idle
*/
- reservation_object_wait_timeout_rcu(bo->resv, true, false,
+ dma_resv_wait_timeout_rcu(bo->base.resv, true, false,
30 * HZ);
- spin_lock(&glob->lru_lock);
+ spin_lock(&ttm_bo_glob.lru_lock);
goto error;
}
- spin_lock(&glob->lru_lock);
- ret = reservation_object_trylock(bo->resv) ? 0 : -EBUSY;
+ spin_lock(&ttm_bo_glob.lru_lock);
+ ret = dma_resv_trylock(bo->base.resv) ? 0 : -EBUSY;
if (!ret) {
- if (reservation_object_test_signaled_rcu(&bo->ttm_resv, true)) {
+ if (dma_resv_test_signaled_rcu(&bo->base._resv, true)) {
ttm_bo_del_from_lru(bo);
- spin_unlock(&glob->lru_lock);
- if (bo->resv != &bo->ttm_resv)
- reservation_object_unlock(&bo->ttm_resv);
+ spin_unlock(&ttm_bo_glob.lru_lock);
+ if (bo->base.resv != &bo->base._resv)
+ dma_resv_unlock(&bo->base._resv);
ttm_bo_cleanup_memtype_use(bo);
- reservation_object_unlock(bo->resv);
+ dma_resv_unlock(bo->base.resv);
return;
}
@@ -510,18 +493,18 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
*/
if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
- ttm_bo_add_to_lru(bo);
+ ttm_bo_move_to_lru_tail(bo, NULL);
}
- reservation_object_unlock(bo->resv);
+ dma_resv_unlock(bo->base.resv);
}
- if (bo->resv != &bo->ttm_resv)
- reservation_object_unlock(&bo->ttm_resv);
+ if (bo->base.resv != &bo->base._resv)
+ dma_resv_unlock(&bo->base._resv);
error:
kref_get(&bo->list_kref);
list_add_tail(&bo->ddestroy, &bdev->ddestroy);
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
schedule_delayed_work(&bdev->wq,
((HZ / 100) < 1) ? 1 : HZ / 100);
@@ -544,16 +527,15 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
bool interruptible, bool no_wait_gpu,
bool unlock_resv)
{
- struct ttm_bo_global *glob = bo->bdev->glob;
- struct reservation_object *resv;
+ struct dma_resv *resv;
int ret;
if (unlikely(list_empty(&bo->ddestroy)))
- resv = bo->resv;
+ resv = bo->base.resv;
else
- resv = &bo->ttm_resv;
+ resv = &bo->base._resv;
- if (reservation_object_test_signaled_rcu(resv, true))
+ if (dma_resv_test_signaled_rcu(resv, true))
ret = 0;
else
ret = -EBUSY;
@@ -562,10 +544,10 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
long lret;
if (unlock_resv)
- reservation_object_unlock(bo->resv);
- spin_unlock(&glob->lru_lock);
+ dma_resv_unlock(bo->base.resv);
+ spin_unlock(&ttm_bo_glob.lru_lock);
- lret = reservation_object_wait_timeout_rcu(resv, true,
+ lret = dma_resv_wait_timeout_rcu(resv, true,
interruptible,
30 * HZ);
@@ -574,8 +556,8 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
else if (lret == 0)
return -EBUSY;
- spin_lock(&glob->lru_lock);
- if (unlock_resv && !reservation_object_trylock(bo->resv)) {
+ spin_lock(&ttm_bo_glob.lru_lock);
+ if (unlock_resv && !dma_resv_trylock(bo->base.resv)) {
/*
* We raced, and lost, someone else holds the reservation now,
* and is probably busy in ttm_bo_cleanup_memtype_use.
@@ -584,7 +566,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
* delayed destruction would succeed, so just return success
* here.
*/
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
return 0;
}
ret = 0;
@@ -592,8 +574,8 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
if (ret || unlikely(list_empty(&bo->ddestroy))) {
if (unlock_resv)
- reservation_object_unlock(bo->resv);
- spin_unlock(&glob->lru_lock);
+ dma_resv_unlock(bo->base.resv);
+ spin_unlock(&ttm_bo_glob.lru_lock);
return ret;
}
@@ -601,11 +583,11 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
list_del_init(&bo->ddestroy);
kref_put(&bo->list_kref, ttm_bo_ref_bug);
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
ttm_bo_cleanup_memtype_use(bo);
if (unlock_resv)
- reservation_object_unlock(bo->resv);
+ dma_resv_unlock(bo->base.resv);
return 0;
}
@@ -616,7 +598,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
*/
static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
{
- struct ttm_bo_global *glob = bdev->glob;
+ struct ttm_bo_global *glob = &ttm_bo_glob;
struct list_head removed;
bool empty;
@@ -631,14 +613,14 @@ static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
kref_get(&bo->list_kref);
list_move_tail(&bo->ddestroy, &removed);
- if (remove_all || bo->resv != &bo->ttm_resv) {
+ if (remove_all || bo->base.resv != &bo->base._resv) {
spin_unlock(&glob->lru_lock);
- reservation_object_lock(bo->resv, NULL);
+ dma_resv_lock(bo->base.resv, NULL);
spin_lock(&glob->lru_lock);
ttm_bo_cleanup_refs(bo, false, !remove_all, true);
- } else if (reservation_object_trylock(bo->resv)) {
+ } else if (dma_resv_trylock(bo->base.resv)) {
ttm_bo_cleanup_refs(bo, false, !remove_all, true);
} else {
spin_unlock(&glob->lru_lock);
@@ -671,7 +653,10 @@ static void ttm_bo_release(struct kref *kref)
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
- drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node);
+ if (bo->bdev->driver->release_notify)
+ bo->bdev->driver->release_notify(bo);
+
+ drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
ttm_mem_io_lock(man, false);
ttm_mem_io_free_vm(bo);
ttm_mem_io_unlock(man);
@@ -707,7 +692,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo,
struct ttm_placement placement;
int ret = 0;
- reservation_object_assert_held(bo->resv);
+ dma_resv_assert_held(bo->base.resv);
placement.num_placement = 0;
placement.num_busy_placement = 0;
@@ -777,8 +762,8 @@ static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
{
bool ret = false;
- if (bo->resv == ctx->resv) {
- reservation_object_assert_held(bo->resv);
+ if (bo->base.resv == ctx->resv) {
+ dma_resv_assert_held(bo->base.resv);
if (ctx->flags & TTM_OPT_FLAG_ALLOW_RES_EVICT
|| !list_empty(&bo->ddestroy))
ret = true;
@@ -786,7 +771,7 @@ static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
if (busy)
*busy = false;
} else {
- ret = reservation_object_trylock(bo->resv);
+ ret = dma_resv_trylock(bo->base.resv);
*locked = ret;
if (busy)
*busy = !ret;
@@ -814,10 +799,10 @@ static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo,
return -EBUSY;
if (ctx->interruptible)
- r = reservation_object_lock_interruptible(busy_bo->resv,
+ r = dma_resv_lock_interruptible(busy_bo->base.resv,
ticket);
else
- r = reservation_object_lock(busy_bo->resv, ticket);
+ r = dma_resv_lock(busy_bo->base.resv, ticket);
/*
* TODO: It would be better to keep the BO locked until allocation is at
@@ -825,7 +810,7 @@ static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo,
* of TTM.
*/
if (!r)
- reservation_object_unlock(busy_bo->resv);
+ dma_resv_unlock(busy_bo->base.resv);
return r == -EDEADLK ? -EBUSY : r;
}
@@ -837,21 +822,20 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
struct ww_acquire_ctx *ticket)
{
struct ttm_buffer_object *bo = NULL, *busy_bo = NULL;
- struct ttm_bo_global *glob = bdev->glob;
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
bool locked = false;
unsigned i;
int ret;
- spin_lock(&glob->lru_lock);
+ spin_lock(&ttm_bo_glob.lru_lock);
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
list_for_each_entry(bo, &man->lru[i], lru) {
bool busy;
if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
&busy)) {
- if (busy && !busy_bo &&
- bo->resv->lock.ctx != ticket)
+ if (busy && !busy_bo && ticket !=
+ dma_resv_locking_ctx(bo->base.resv))
busy_bo = bo;
continue;
}
@@ -859,7 +843,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
if (place && !bdev->driver->eviction_valuable(bo,
place)) {
if (locked)
- reservation_object_unlock(bo->resv);
+ dma_resv_unlock(bo->base.resv);
continue;
}
break;
@@ -874,11 +858,11 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
if (!bo) {
if (busy_bo)
- ttm_bo_get(busy_bo);
- spin_unlock(&glob->lru_lock);
+ kref_get(&busy_bo->list_kref);
+ spin_unlock(&ttm_bo_glob.lru_lock);
ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket);
if (busy_bo)
- ttm_bo_put(busy_bo);
+ kref_put(&busy_bo->list_kref, ttm_bo_release_list);
return ret;
}
@@ -891,17 +875,11 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
return ret;
}
- ttm_bo_del_from_lru(bo);
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
ret = ttm_bo_evict(bo, ctx);
- if (locked) {
+ if (locked)
ttm_bo_unreserve(bo);
- } else {
- spin_lock(&glob->lru_lock);
- ttm_bo_add_to_lru(bo);
- spin_unlock(&glob->lru_lock);
- }
kref_put(&bo->list_kref, ttm_bo_release_list);
return ret;
@@ -921,7 +899,8 @@ EXPORT_SYMBOL(ttm_bo_mem_put);
*/
static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
struct ttm_mem_type_manager *man,
- struct ttm_mem_reg *mem)
+ struct ttm_mem_reg *mem,
+ bool no_wait_gpu)
{
struct dma_fence *fence;
int ret;
@@ -930,19 +909,22 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
fence = dma_fence_get(man->move);
spin_unlock(&man->move_lock);
- if (fence) {
- reservation_object_add_shared_fence(bo->resv, fence);
+ if (!fence)
+ return 0;
- ret = reservation_object_reserve_shared(bo->resv, 1);
- if (unlikely(ret)) {
- dma_fence_put(fence);
- return ret;
- }
+ if (no_wait_gpu)
+ return -EBUSY;
- dma_fence_put(bo->moving);
- bo->moving = fence;
+ dma_resv_add_shared_fence(bo->base.resv, fence);
+
+ ret = dma_resv_reserve_shared(bo->base.resv, 1);
+ if (unlikely(ret)) {
+ dma_fence_put(fence);
+ return ret;
}
+ dma_fence_put(bo->moving);
+ bo->moving = fence;
return 0;
}
@@ -957,8 +939,10 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+ struct ww_acquire_ctx *ticket;
int ret;
+ ticket = dma_resv_locking_ctx(bo->base.resv);
do {
ret = (*man->func->get_node)(man, bo, place, mem);
if (unlikely(ret != 0))
@@ -966,12 +950,12 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
if (mem->mm_node)
break;
ret = ttm_mem_evict_first(bdev, mem->mem_type, place, ctx,
- bo->resv->lock.ctx);
+ ticket);
if (unlikely(ret != 0))
return ret;
} while (1);
- return ttm_bo_add_move_fence(bo, man, mem);
+ return ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
}
static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
@@ -1061,12 +1045,10 @@ static int ttm_bo_mem_placement(struct ttm_buffer_object *bo,
mem->mem_type = mem_type;
mem->placement = cur_flags;
- if (bo->mem.mem_type < mem_type && !list_empty(&bo->lru)) {
- spin_lock(&bo->bdev->glob->lru_lock);
- ttm_bo_del_from_lru(bo);
- ttm_bo_add_mem_to_lru(bo, mem);
- spin_unlock(&bo->bdev->glob->lru_lock);
- }
+ spin_lock(&ttm_bo_glob.lru_lock);
+ ttm_bo_del_from_lru(bo);
+ ttm_bo_add_mem_to_lru(bo, mem);
+ spin_unlock(&ttm_bo_glob.lru_lock);
return 0;
}
@@ -1088,7 +1070,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
bool type_found = false;
int i, ret;
- ret = reservation_object_reserve_shared(bo->resv, 1);
+ ret = dma_resv_reserve_shared(bo->base.resv, 1);
if (unlikely(ret))
return ret;
@@ -1113,14 +1095,18 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
if (unlikely(ret))
goto error;
- if (mem->mm_node) {
- ret = ttm_bo_add_move_fence(bo, man, mem);
- if (unlikely(ret)) {
- (*man->func->put_node)(man, mem);
- goto error;
- }
- return 0;
+ if (!mem->mm_node)
+ continue;
+
+ ret = ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
+ if (unlikely(ret)) {
+ (*man->func->put_node)(man, mem);
+ if (ret == -EBUSY)
+ continue;
+
+ goto error;
}
+ return 0;
}
for (i = 0; i < placement->num_busy_placement; ++i) {
@@ -1153,9 +1139,9 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
error:
if (bo->mem.mem_type == TTM_PL_SYSTEM && !list_empty(&bo->lru)) {
- spin_lock(&bo->bdev->glob->lru_lock);
+ spin_lock(&ttm_bo_glob.lru_lock);
ttm_bo_move_to_lru_tail(bo, NULL);
- spin_unlock(&bo->bdev->glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
}
return ret;
@@ -1169,7 +1155,7 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
int ret = 0;
struct ttm_mem_reg mem;
- reservation_object_assert_held(bo->resv);
+ dma_resv_assert_held(bo->base.resv);
mem.num_pages = bo->num_pages;
mem.size = mem.num_pages << PAGE_SHIFT;
@@ -1239,7 +1225,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
int ret;
uint32_t new_flags;
- reservation_object_assert_held(bo->resv);
+ dma_resv_assert_held(bo->base.resv);
/*
* Check whether we need to move buffer.
*/
@@ -1276,12 +1262,12 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
struct ttm_operation_ctx *ctx,
size_t acc_size,
struct sg_table *sg,
- struct reservation_object *resv,
+ struct dma_resv *resv,
void (*destroy) (struct ttm_buffer_object *))
{
+ struct ttm_mem_global *mem_glob = &ttm_mem_glob;
int ret = 0;
unsigned long num_pages;
- struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
bool locked;
ret = ttm_mem_global_alloc(mem_glob, acc_size, ctx);
@@ -1308,12 +1294,10 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
kref_init(&bo->kref);
kref_init(&bo->list_kref);
- atomic_set(&bo->cpu_writers, 0);
INIT_LIST_HEAD(&bo->lru);
INIT_LIST_HEAD(&bo->ddestroy);
INIT_LIST_HEAD(&bo->swap);
INIT_LIST_HEAD(&bo->io_reserve_lru);
- mutex_init(&bo->wu_mutex);
bo->bdev = bdev;
bo->type = type;
bo->num_pages = num_pages;
@@ -1329,14 +1313,20 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
bo->acc_size = acc_size;
bo->sg = sg;
if (resv) {
- bo->resv = resv;
- reservation_object_assert_held(bo->resv);
+ bo->base.resv = resv;
+ dma_resv_assert_held(bo->base.resv);
} else {
- bo->resv = &bo->ttm_resv;
+ bo->base.resv = &bo->base._resv;
+ }
+ if (!ttm_bo_uses_embedded_gem_object(bo)) {
+ /*
+ * bo.gem is not initialized, so we have to setup the
+ * struct elements we want use regardless.
+ */
+ dma_resv_init(&bo->base._resv);
+ drm_vma_node_reset(&bo->base.vma_node);
}
- reservation_object_init(&bo->ttm_resv);
- atomic_inc(&bo->bdev->glob->bo_count);
- drm_vma_node_reset(&bo->vma_node);
+ atomic_inc(&ttm_bo_glob.bo_count);
/*
* For ttm_bo_type_device buffers, allocate
@@ -1344,14 +1334,14 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
*/
if (bo->type == ttm_bo_type_device ||
bo->type == ttm_bo_type_sg)
- ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node,
+ ret = drm_vma_offset_add(bdev->vma_manager, &bo->base.vma_node,
bo->mem.num_pages);
/* passed reservation objects should already be locked,
* since otherwise lockdep will be angered in radeon.
*/
if (!resv) {
- locked = reservation_object_trylock(bo->resv);
+ locked = dma_resv_trylock(bo->base.resv);
WARN_ON(!locked);
}
@@ -1366,11 +1356,9 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
return ret;
}
- if (resv && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
- spin_lock(&bdev->glob->lru_lock);
- ttm_bo_add_to_lru(bo);
- spin_unlock(&bdev->glob->lru_lock);
- }
+ spin_lock(&ttm_bo_glob.lru_lock);
+ ttm_bo_move_to_lru_tail(bo, NULL);
+ spin_unlock(&ttm_bo_glob.lru_lock);
return ret;
}
@@ -1385,7 +1373,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
bool interruptible,
size_t acc_size,
struct sg_table *sg,
- struct reservation_object *resv,
+ struct dma_resv *resv,
void (*destroy) (struct ttm_buffer_object *))
{
struct ttm_operation_ctx ctx = { interruptible, false };
@@ -1468,7 +1456,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
.flags = TTM_OPT_FLAG_FORCE_ALLOC
};
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
- struct ttm_bo_global *glob = bdev->glob;
+ struct ttm_bo_global *glob = &ttm_bo_glob;
struct dma_fence *fence;
int ret;
unsigned i;
@@ -1637,8 +1625,6 @@ static int ttm_bo_global_init(void)
goto out;
spin_lock_init(&glob->lru_lock);
- glob->mem_glob = &ttm_mem_glob;
- glob->mem_glob->bo_glob = glob;
glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
if (unlikely(glob->dummy_read_page == NULL)) {
@@ -1662,10 +1648,10 @@ out:
int ttm_bo_device_release(struct ttm_bo_device *bdev)
{
+ struct ttm_bo_global *glob = &ttm_bo_glob;
int ret = 0;
unsigned i = TTM_NUM_MEM_TYPES;
struct ttm_mem_type_manager *man;
- struct ttm_bo_global *glob = bdev->glob;
while (i--) {
man = &bdev->man[i];
@@ -1695,8 +1681,6 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
pr_debug("Swap list %d was clean\n", i);
spin_unlock(&glob->lru_lock);
- drm_vma_offset_manager_destroy(&bdev->vma_manager);
-
if (!ret)
ttm_bo_global_release();
@@ -1707,11 +1691,15 @@ EXPORT_SYMBOL(ttm_bo_device_release);
int ttm_bo_device_init(struct ttm_bo_device *bdev,
struct ttm_bo_driver *driver,
struct address_space *mapping,
+ struct drm_vma_offset_manager *vma_manager,
bool need_dma32)
{
struct ttm_bo_global *glob = &ttm_bo_glob;
int ret;
+ if (WARN_ON(vma_manager == NULL))
+ return -EINVAL;
+
ret = ttm_bo_global_init();
if (ret)
return ret;
@@ -1728,13 +1716,10 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
if (unlikely(ret != 0))
goto out_no_sys;
- drm_vma_offset_manager_init(&bdev->vma_manager,
- DRM_FILE_PAGE_OFFSET_START,
- DRM_FILE_PAGE_OFFSET_SIZE);
+ bdev->vma_manager = vma_manager;
INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
INIT_LIST_HEAD(&bdev->ddestroy);
bdev->dev_mapping = mapping;
- bdev->glob = glob;
bdev->need_dma32 = need_dma32;
mutex_lock(&ttm_global_mutex);
list_add_tail(&bdev->device_list, &glob->device_list);
@@ -1772,7 +1757,7 @@ void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
- drm_vma_node_unmap(&bo->vma_node, bdev->dev_mapping);
+ drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
ttm_mem_io_free_vm(bo);
}
@@ -1795,13 +1780,13 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
long timeout = 15 * HZ;
if (no_wait) {
- if (reservation_object_test_signaled_rcu(bo->resv, true))
+ if (dma_resv_test_signaled_rcu(bo->base.resv, true))
return 0;
else
return -EBUSY;
}
- timeout = reservation_object_wait_timeout_rcu(bo->resv, true,
+ timeout = dma_resv_wait_timeout_rcu(bo->base.resv, true,
interruptible, timeout);
if (timeout < 0)
return timeout;
@@ -1809,36 +1794,11 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
if (timeout == 0)
return -EBUSY;
- reservation_object_add_excl_fence(bo->resv, NULL);
+ dma_resv_add_excl_fence(bo->base.resv, NULL);
return 0;
}
EXPORT_SYMBOL(ttm_bo_wait);
-int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
-{
- int ret = 0;
-
- /*
- * Using ttm_bo_reserve makes sure the lru lists are updated.
- */
-
- ret = ttm_bo_reserve(bo, true, no_wait, NULL);
- if (unlikely(ret != 0))
- return ret;
- ret = ttm_bo_wait(bo, true, no_wait);
- if (likely(ret == 0))
- atomic_inc(&bo->cpu_writers);
- ttm_bo_unreserve(bo);
- return ret;
-}
-EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
-
-void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
-{
- atomic_dec(&bo->cpu_writers);
-}
-EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
-
/**
* A buffer object shrink method that tries to swap out the first
* buffer object on the bo_global::swap_lru list.
@@ -1925,7 +1885,7 @@ out:
* already swapped buffer.
*/
if (locked)
- reservation_object_unlock(bo->resv);
+ dma_resv_unlock(bo->base.resv);
kref_put(&bo->list_kref, ttm_bo_release_list);
return ret;
}
@@ -1938,41 +1898,6 @@ void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
.no_wait_gpu = false
};
- while (ttm_bo_swapout(bdev->glob, &ctx) == 0)
- ;
+ while (ttm_bo_swapout(&ttm_bo_glob, &ctx) == 0);
}
EXPORT_SYMBOL(ttm_bo_swapout_all);
-
-/**
- * ttm_bo_wait_unreserved - interruptible wait for a buffer object to become
- * unreserved
- *
- * @bo: Pointer to buffer
- */
-int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo)
-{
- int ret;
-
- /*
- * In the absense of a wait_unlocked API,
- * Use the bo::wu_mutex to avoid triggering livelocks due to
- * concurrent use of this function. Note that this use of
- * bo::wu_mutex can go away if we change locking order to
- * mmap_sem -> bo::reserve.
- */
- ret = mutex_lock_interruptible(&bo->wu_mutex);
- if (unlikely(ret != 0))
- return -ERESTARTSYS;
- if (!ww_mutex_is_locked(&bo->resv->lock))
- goto out_unlock;
- ret = reservation_object_lock_interruptible(bo->resv, NULL);
- if (ret == -EINTR)
- ret = -ERESTARTSYS;
- if (unlikely(ret != 0))
- goto out_unlock;
- reservation_object_unlock(bo->resv);
-
-out_unlock:
- mutex_unlock(&bo->wu_mutex);
- return ret;
-}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 9f918b992f7e..49ed55779128 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -38,7 +38,7 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/module.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
struct ttm_transfer_obj {
struct ttm_buffer_object base;
@@ -102,7 +102,6 @@ int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
mutex_lock(&man->io_reserve_mutex);
return 0;
}
-EXPORT_SYMBOL(ttm_mem_io_lock);
void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
{
@@ -111,7 +110,6 @@ void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
mutex_unlock(&man->io_reserve_mutex);
}
-EXPORT_SYMBOL(ttm_mem_io_unlock);
static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
{
@@ -153,7 +151,6 @@ retry:
}
return ret;
}
-EXPORT_SYMBOL(ttm_mem_io_reserve);
void ttm_mem_io_free(struct ttm_bo_device *bdev,
struct ttm_mem_reg *mem)
@@ -169,7 +166,6 @@ void ttm_mem_io_free(struct ttm_bo_device *bdev,
bdev->driver->io_mem_free(bdev, mem);
}
-EXPORT_SYMBOL(ttm_mem_io_free);
int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
{
@@ -222,7 +218,7 @@ static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *m
if (mem->placement & TTM_PL_FLAG_WC)
addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
else
- addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
+ addr = ioremap(mem->bus.base + mem->bus.offset, mem->bus.size);
if (!addr) {
(void) ttm_mem_io_lock(man, false);
ttm_mem_io_free(bdev, mem);
@@ -503,23 +499,23 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
* TODO: Explicit member copy would probably be better here.
*/
- atomic_inc(&bo->bdev->glob->bo_count);
+ atomic_inc(&ttm_bo_glob.bo_count);
INIT_LIST_HEAD(&fbo->base.ddestroy);
INIT_LIST_HEAD(&fbo->base.lru);
INIT_LIST_HEAD(&fbo->base.swap);
INIT_LIST_HEAD(&fbo->base.io_reserve_lru);
- mutex_init(&fbo->base.wu_mutex);
fbo->base.moving = NULL;
- drm_vma_node_reset(&fbo->base.vma_node);
- atomic_set(&fbo->base.cpu_writers, 0);
+ drm_vma_node_reset(&fbo->base.base.vma_node);
kref_init(&fbo->base.list_kref);
kref_init(&fbo->base.kref);
fbo->base.destroy = &ttm_transfered_destroy;
fbo->base.acc_size = 0;
- fbo->base.resv = &fbo->base.ttm_resv;
- reservation_object_init(fbo->base.resv);
- ret = reservation_object_trylock(fbo->base.resv);
+ if (bo->base.resv == &bo->base._resv)
+ fbo->base.base.resv = &fbo->base.base._resv;
+
+ dma_resv_init(&fbo->base.base._resv);
+ ret = dma_resv_trylock(&fbo->base.base._resv);
WARN_ON(!ret);
*new_obj = &fbo->base;
@@ -568,7 +564,7 @@ static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
size);
else
- map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
+ map->virtual = ioremap(bo->mem.bus.base + bo->mem.bus.offset + offset,
size);
}
return (!map->virtual) ? -ENOMEM : 0;
@@ -689,7 +685,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
int ret;
struct ttm_buffer_object *ghost_obj;
- reservation_object_add_excl_fence(bo->resv, fence);
+ dma_resv_add_excl_fence(bo->base.resv, fence);
if (evict) {
ret = ttm_bo_wait(bo, false, false);
if (ret)
@@ -716,7 +712,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
if (ret)
return ret;
- reservation_object_add_excl_fence(ghost_obj->resv, fence);
+ dma_resv_add_excl_fence(&ghost_obj->base._resv, fence);
/**
* If we're not moving to fixed memory, the TTM object
@@ -729,7 +725,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
else
bo->ttm = NULL;
- ttm_bo_unreserve(ghost_obj);
+ dma_resv_unlock(&ghost_obj->base._resv);
ttm_bo_put(ghost_obj);
}
@@ -752,7 +748,7 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
int ret;
- reservation_object_add_excl_fence(bo->resv, fence);
+ dma_resv_add_excl_fence(bo->base.resv, fence);
if (!evict) {
struct ttm_buffer_object *ghost_obj;
@@ -772,7 +768,7 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
if (ret)
return ret;
- reservation_object_add_excl_fence(ghost_obj->resv, fence);
+ dma_resv_add_excl_fence(&ghost_obj->base._resv, fence);
/**
* If we're not moving to fixed memory, the TTM object
@@ -785,7 +781,7 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
else
bo->ttm = NULL;
- ttm_bo_unreserve(ghost_obj);
+ dma_resv_unlock(&ghost_obj->base._resv);
ttm_bo_put(ghost_obj);
} else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) {
@@ -841,7 +837,7 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
if (ret)
return ret;
- ret = reservation_object_copy_fences(ghost->resv, bo->resv);
+ ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv);
/* Last resort, wait for the BO to be idle when we are OOM */
if (ret)
ttm_bo_wait(bo, false, false);
@@ -850,7 +846,7 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
bo->mem.mem_type = TTM_PL_SYSTEM;
bo->ttm = NULL;
- ttm_bo_unreserve(ghost);
+ dma_resv_unlock(&ghost->base._resv);
ttm_bo_put(ghost);
return 0;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 6dacff49c1cc..389128b8c4dd 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -42,8 +42,6 @@
#include <linux/uaccess.h>
#include <linux/mem_encrypt.h>
-#define TTM_BO_VM_NUM_PREFAULT 16
-
static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
struct vm_fault *vmf)
{
@@ -71,7 +69,7 @@ static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
ttm_bo_get(bo);
up_read(&vmf->vma->vm_mm->mmap_sem);
(void) dma_fence_wait(bo->moving, true);
- reservation_object_unlock(bo->resv);
+ dma_resv_unlock(bo->base.resv);
ttm_bo_put(bo);
goto out_unlock;
}
@@ -106,59 +104,101 @@ static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo,
+ page_offset;
}
-static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
+/**
+ * ttm_bo_vm_reserve - Reserve a buffer object in a retryable vm callback
+ * @bo: The buffer object
+ * @vmf: The fault structure handed to the callback
+ *
+ * vm callbacks like fault() and *_mkwrite() allow for the mm_sem to be dropped
+ * during long waits, and after the wait the callback will be restarted. This
+ * is to allow other threads using the same virtual memory space concurrent
+ * access to map(), unmap() completely unrelated buffer objects. TTM buffer
+ * object reservations sometimes wait for GPU and should therefore be
+ * considered long waits. This function reserves the buffer object interruptibly
+ * taking this into account. Starvation is avoided by the vm system not
+ * allowing too many repeated restarts.
+ * This function is intended to be used in customized fault() and _mkwrite()
+ * handlers.
+ *
+ * Return:
+ * 0 on success and the bo was reserved.
+ * VM_FAULT_RETRY if blocking wait.
+ * VM_FAULT_NOPAGE if blocking wait and retrying was not allowed.
+ */
+vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
+ struct vm_fault *vmf)
{
- struct vm_area_struct *vma = vmf->vma;
- struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
- vma->vm_private_data;
- struct ttm_bo_device *bdev = bo->bdev;
- unsigned long page_offset;
- unsigned long page_last;
- unsigned long pfn;
- struct ttm_tt *ttm = NULL;
- struct page *page;
- int err;
- int i;
- vm_fault_t ret = VM_FAULT_NOPAGE;
- unsigned long address = vmf->address;
- struct ttm_mem_type_manager *man =
- &bdev->man[bo->mem.mem_type];
- struct vm_area_struct cvma;
-
/*
* Work around locking order reversal in fault / nopfn
* between mmap_sem and bo_reserve: Perform a trylock operation
* for reserve, and if it fails, retry the fault after waiting
* for the buffer to become unreserved.
*/
- if (unlikely(!reservation_object_trylock(bo->resv))) {
+ if (unlikely(!dma_resv_trylock(bo->base.resv))) {
if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
ttm_bo_get(bo);
up_read(&vmf->vma->vm_mm->mmap_sem);
- (void) ttm_bo_wait_unreserved(bo);
+ if (!dma_resv_lock_interruptible(bo->base.resv,
+ NULL))
+ dma_resv_unlock(bo->base.resv);
ttm_bo_put(bo);
}
return VM_FAULT_RETRY;
}
- /*
- * If we'd want to change locking order to
- * mmap_sem -> bo::reserve, we'd use a blocking reserve here
- * instead of retrying the fault...
- */
- return VM_FAULT_NOPAGE;
+ if (dma_resv_lock_interruptible(bo->base.resv, NULL))
+ return VM_FAULT_NOPAGE;
}
+ return 0;
+}
+EXPORT_SYMBOL(ttm_bo_vm_reserve);
+
+/**
+ * ttm_bo_vm_fault_reserved - TTM fault helper
+ * @vmf: The struct vm_fault given as argument to the fault callback
+ * @prot: The page protection to be used for this memory area.
+ * @num_prefault: Maximum number of prefault pages. The caller may want to
+ * specify this based on madvice settings and the size of the GPU object
+ * backed by the memory.
+ *
+ * This function inserts one or more page table entries pointing to the
+ * memory backing the buffer object, and then returns a return code
+ * instructing the caller to retry the page access.
+ *
+ * Return:
+ * VM_FAULT_NOPAGE on success or pending signal
+ * VM_FAULT_SIGBUS on unspecified error
+ * VM_FAULT_OOM on out-of-memory
+ * VM_FAULT_RETRY if retryable wait
+ */
+vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
+ pgprot_t prot,
+ pgoff_t num_prefault)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct ttm_buffer_object *bo = vma->vm_private_data;
+ struct ttm_bo_device *bdev = bo->bdev;
+ unsigned long page_offset;
+ unsigned long page_last;
+ unsigned long pfn;
+ struct ttm_tt *ttm = NULL;
+ struct page *page;
+ int err;
+ pgoff_t i;
+ vm_fault_t ret = VM_FAULT_NOPAGE;
+ unsigned long address = vmf->address;
+ struct ttm_mem_type_manager *man =
+ &bdev->man[bo->mem.mem_type];
+
/*
* Refuse to fault imported pages. This should be handled
* (if at all) by redirecting mmap to the exporter.
*/
- if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
- ret = VM_FAULT_SIGBUS;
- goto out_unlock;
- }
+ if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG))
+ return VM_FAULT_SIGBUS;
if (bdev->driver->fault_reserve_notify) {
struct dma_fence *moving = dma_fence_get(bo->moving);
@@ -169,17 +209,15 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
break;
case -EBUSY:
case -ERESTARTSYS:
- ret = VM_FAULT_NOPAGE;
- goto out_unlock;
+ return VM_FAULT_NOPAGE;
default:
- ret = VM_FAULT_SIGBUS;
- goto out_unlock;
+ return VM_FAULT_SIGBUS;
}
if (bo->moving != moving) {
- spin_lock(&bdev->glob->lru_lock);
+ spin_lock(&ttm_bo_glob.lru_lock);
ttm_bo_move_to_lru_tail(bo, NULL);
- spin_unlock(&bdev->glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
}
dma_fence_put(moving);
}
@@ -189,21 +227,12 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
* move.
*/
ret = ttm_bo_vm_fault_idle(bo, vmf);
- if (unlikely(ret != 0)) {
- if (ret == VM_FAULT_RETRY &&
- !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
- /* The BO has already been unreserved. */
- return ret;
- }
-
- goto out_unlock;
- }
+ if (unlikely(ret != 0))
+ return ret;
err = ttm_mem_io_lock(man, true);
- if (unlikely(err != 0)) {
- ret = VM_FAULT_NOPAGE;
- goto out_unlock;
- }
+ if (unlikely(err != 0))
+ return VM_FAULT_NOPAGE;
err = ttm_mem_io_reserve_vm(bo);
if (unlikely(err != 0)) {
ret = VM_FAULT_SIGBUS;
@@ -211,27 +240,17 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
}
page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
- vma->vm_pgoff - drm_vma_node_start(&bo->vma_node);
+ vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node);
page_last = vma_pages(vma) + vma->vm_pgoff -
- drm_vma_node_start(&bo->vma_node);
+ drm_vma_node_start(&bo->base.vma_node);
if (unlikely(page_offset >= bo->num_pages)) {
ret = VM_FAULT_SIGBUS;
goto out_io_unlock;
}
- /*
- * Make a local vma copy to modify the page_prot member
- * and vm_flags if necessary. The vma parameter is protected
- * by mmap_sem in write mode.
- */
- cvma = *vma;
- cvma.vm_page_prot = vm_get_page_prot(cvma.vm_flags);
-
- if (bo->mem.bus.is_iomem) {
- cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
- cvma.vm_page_prot);
- } else {
+ prot = ttm_io_prot(bo->mem.placement, prot);
+ if (!bo->mem.bus.is_iomem) {
struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false,
@@ -240,24 +259,21 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
};
ttm = bo->ttm;
- cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
- cvma.vm_page_prot);
-
- /* Allocate all page at once, most common usage */
- if (ttm_tt_populate(ttm, &ctx)) {
+ if (ttm_tt_populate(bo->ttm, &ctx)) {
ret = VM_FAULT_OOM;
goto out_io_unlock;
}
+ } else {
+ /* Iomem should not be marked encrypted */
+ prot = pgprot_decrypted(prot);
}
/*
* Speculatively prefault a number of pages. Only error on
* first page.
*/
- for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
+ for (i = 0; i < num_prefault; ++i) {
if (bo->mem.bus.is_iomem) {
- /* Iomem should not be marked encrypted */
- cvma.vm_page_prot = pgprot_decrypted(cvma.vm_page_prot);
pfn = ttm_bo_io_mem_pfn(bo, page_offset);
} else {
page = ttm->pages[page_offset];
@@ -267,26 +283,33 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
} else if (unlikely(!page)) {
break;
}
- page->index = drm_vma_node_start(&bo->vma_node) +
+ page->index = drm_vma_node_start(&bo->base.vma_node) +
page_offset;
pfn = page_to_pfn(page);
}
- if (vma->vm_flags & VM_MIXEDMAP)
- ret = vmf_insert_mixed(&cvma, address,
- __pfn_to_pfn_t(pfn, PFN_DEV));
- else
- ret = vmf_insert_pfn(&cvma, address, pfn);
-
/*
- * Somebody beat us to this PTE or prefaulting to
- * an already populated PTE, or prefaulting error.
+ * Note that the value of @prot at this point may differ from
+ * the value of @vma->vm_page_prot in the caching- and
+ * encryption bits. This is because the exact location of the
+ * data may not be known at mmap() time and may also change
+ * at arbitrary times while the data is mmap'ed.
+ * See vmf_insert_mixed_prot() for a discussion.
*/
+ if (vma->vm_flags & VM_MIXEDMAP)
+ ret = vmf_insert_mixed_prot(vma, address,
+ __pfn_to_pfn_t(pfn, PFN_DEV),
+ prot);
+ else
+ ret = vmf_insert_pfn_prot(vma, address, pfn, prot);
- if (unlikely((ret == VM_FAULT_NOPAGE && i > 0)))
- break;
- else if (unlikely(ret & VM_FAULT_ERROR))
- goto out_io_unlock;
+ /* Never error on prefaulted PTEs */
+ if (unlikely((ret & VM_FAULT_ERROR))) {
+ if (i == 0)
+ goto out_io_unlock;
+ else
+ break;
+ }
address += PAGE_SIZE;
if (unlikely(++page_offset >= page_last))
@@ -295,28 +318,50 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
ret = VM_FAULT_NOPAGE;
out_io_unlock:
ttm_mem_io_unlock(man);
-out_unlock:
- reservation_object_unlock(bo->resv);
return ret;
}
+EXPORT_SYMBOL(ttm_bo_vm_fault_reserved);
-static void ttm_bo_vm_open(struct vm_area_struct *vma)
+vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
{
- struct ttm_buffer_object *bo =
- (struct ttm_buffer_object *)vma->vm_private_data;
+ struct vm_area_struct *vma = vmf->vma;
+ pgprot_t prot;
+ struct ttm_buffer_object *bo = vma->vm_private_data;
+ vm_fault_t ret;
+
+ ret = ttm_bo_vm_reserve(bo, vmf);
+ if (ret)
+ return ret;
+
+ prot = vma->vm_page_prot;
+ ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT);
+ if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
+ return ret;
+
+ dma_resv_unlock(bo->base.resv);
+
+ return ret;
+}
+EXPORT_SYMBOL(ttm_bo_vm_fault);
+
+void ttm_bo_vm_open(struct vm_area_struct *vma)
+{
+ struct ttm_buffer_object *bo = vma->vm_private_data;
WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping);
ttm_bo_get(bo);
}
+EXPORT_SYMBOL(ttm_bo_vm_open);
-static void ttm_bo_vm_close(struct vm_area_struct *vma)
+void ttm_bo_vm_close(struct vm_area_struct *vma)
{
- struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
+ struct ttm_buffer_object *bo = vma->vm_private_data;
ttm_bo_put(bo);
vma->vm_private_data = NULL;
}
+EXPORT_SYMBOL(ttm_bo_vm_close);
static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo,
unsigned long offset,
@@ -357,8 +402,8 @@ static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo,
return len;
}
-static int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
- void *buf, int len, int write)
+int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
+ void *buf, int len, int write)
{
unsigned long offset = (addr) - vma->vm_start;
struct ttm_buffer_object *bo = vma->vm_private_data;
@@ -394,6 +439,7 @@ static int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
return ret;
}
+EXPORT_SYMBOL(ttm_bo_vm_access);
static const struct vm_operations_struct ttm_bo_vm_ops = {
.fault = ttm_bo_vm_fault,
@@ -409,15 +455,16 @@ static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
struct drm_vma_offset_node *node;
struct ttm_buffer_object *bo = NULL;
- drm_vma_offset_lock_lookup(&bdev->vma_manager);
+ drm_vma_offset_lock_lookup(bdev->vma_manager);
- node = drm_vma_offset_lookup_locked(&bdev->vma_manager, offset, pages);
+ node = drm_vma_offset_lookup_locked(bdev->vma_manager, offset, pages);
if (likely(node)) {
- bo = container_of(node, struct ttm_buffer_object, vma_node);
+ bo = container_of(node, struct ttm_buffer_object,
+ base.vma_node);
bo = ttm_bo_get_unless_zero(bo);
}
- drm_vma_offset_unlock_lookup(&bdev->vma_manager);
+ drm_vma_offset_unlock_lookup(bdev->vma_manager);
if (!bo)
pr_err("Could not find buffer object to map\n");
@@ -425,6 +472,28 @@ static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
return bo;
}
+static void ttm_bo_mmap_vma_setup(struct ttm_buffer_object *bo, struct vm_area_struct *vma)
+{
+ vma->vm_ops = &ttm_bo_vm_ops;
+
+ /*
+ * Note: We're transferring the bo reference to
+ * vma->vm_private_data here.
+ */
+
+ vma->vm_private_data = bo;
+
+ /*
+ * We'd like to use VM_PFNMAP on shared mappings, where
+ * (vma->vm_flags & VM_SHARED) != 0, for performance reasons,
+ * but for some reason VM_PFNMAP + x86 PAT + write-combine is very
+ * bad for performance. Until that has been sorted out, use
+ * VM_MIXEDMAP on all mappings. See freedesktop.org bug #75719
+ */
+ vma->vm_flags |= VM_MIXEDMAP;
+ vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+}
+
int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
struct ttm_bo_device *bdev)
{
@@ -448,24 +517,7 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
if (unlikely(ret != 0))
goto out_unref;
- vma->vm_ops = &ttm_bo_vm_ops;
-
- /*
- * Note: We're transferring the bo reference to
- * vma->vm_private_data here.
- */
-
- vma->vm_private_data = bo;
-
- /*
- * We'd like to use VM_PFNMAP on shared mappings, where
- * (vma->vm_flags & VM_SHARED) != 0, for performance reasons,
- * but for some reason VM_PFNMAP + x86 PAT + write-combine is very
- * bad for performance. Until that has been sorted out, use
- * VM_MIXEDMAP on all mappings. See freedesktop.org bug #75719
- */
- vma->vm_flags |= VM_MIXEDMAP;
- vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+ ttm_bo_mmap_vma_setup(bo, vma);
return 0;
out_unref:
ttm_bo_put(bo);
@@ -473,17 +525,10 @@ out_unref:
}
EXPORT_SYMBOL(ttm_bo_mmap);
-int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
+int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
{
- if (vma->vm_pgoff != 0)
- return -EACCES;
-
ttm_bo_get(bo);
-
- vma->vm_ops = &ttm_bo_vm_ops;
- vma->vm_private_data = bo;
- vma->vm_flags |= VM_MIXEDMAP;
- vma->vm_flags |= VM_IO | VM_DONTEXPAND;
+ ttm_bo_mmap_vma_setup(bo, vma);
return 0;
}
-EXPORT_SYMBOL(ttm_fbdev_mmap);
+EXPORT_SYMBOL(ttm_bo_mmap_obj);
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 957ec375a4ba..1797f04c0534 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -39,17 +39,7 @@ static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
list_for_each_entry_continue_reverse(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
- reservation_object_unlock(bo->resv);
- }
-}
-
-static void ttm_eu_del_from_lru_locked(struct list_head *list)
-{
- struct ttm_validate_buffer *entry;
-
- list_for_each_entry(entry, list, head) {
- struct ttm_buffer_object *bo = entry->bo;
- ttm_bo_del_from_lru(bo);
+ dma_resv_unlock(bo->base.resv);
}
}
@@ -57,23 +47,18 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
struct list_head *list)
{
struct ttm_validate_buffer *entry;
- struct ttm_bo_global *glob;
if (list_empty(list))
return;
- entry = list_first_entry(list, struct ttm_validate_buffer, head);
- glob = entry->bo->bdev->glob;
-
- spin_lock(&glob->lru_lock);
+ spin_lock(&ttm_bo_glob.lru_lock);
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
- if (list_empty(&bo->lru))
- ttm_bo_add_to_lru(bo);
- reservation_object_unlock(bo->resv);
+ ttm_bo_move_to_lru_tail(bo, NULL);
+ dma_resv_unlock(bo->base.resv);
}
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
if (ticket)
ww_acquire_fini(ticket);
@@ -94,18 +79,14 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation);
int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
struct list_head *list, bool intr,
- struct list_head *dups, bool del_lru)
+ struct list_head *dups)
{
- struct ttm_bo_global *glob;
struct ttm_validate_buffer *entry;
int ret;
if (list_empty(list))
return 0;
- entry = list_first_entry(list, struct ttm_validate_buffer, head);
- glob = entry->bo->bdev->glob;
-
if (ticket)
ww_acquire_init(ticket, &reservation_ww_class);
@@ -113,12 +94,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
struct ttm_buffer_object *bo = entry->bo;
ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
- if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) {
- reservation_object_unlock(bo->resv);
-
- ret = -EBUSY;
-
- } else if (ret == -EALREADY && dups) {
+ if (ret == -EALREADY && dups) {
struct ttm_validate_buffer *safe = entry;
entry = list_prev_entry(entry, head);
list_del(&safe->head);
@@ -130,7 +106,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
if (!entry->num_shared)
continue;
- ret = reservation_object_reserve_shared(bo->resv,
+ ret = dma_resv_reserve_shared(bo->base.resv,
entry->num_shared);
if (!ret)
continue;
@@ -144,16 +120,16 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
if (ret == -EDEADLK) {
if (intr) {
- ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
- ticket);
+ ret = dma_resv_lock_slow_interruptible(bo->base.resv,
+ ticket);
} else {
- ww_mutex_lock_slow(&bo->resv->lock, ticket);
+ dma_resv_lock_slow(bo->base.resv, ticket);
ret = 0;
}
}
if (!ret && entry->num_shared)
- ret = reservation_object_reserve_shared(bo->resv,
+ ret = dma_resv_reserve_shared(bo->base.resv,
entry->num_shared);
if (unlikely(ret != 0)) {
@@ -173,11 +149,6 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
list_add(&entry->head, list);
}
- if (del_lru) {
- spin_lock(&glob->lru_lock);
- ttm_eu_del_from_lru_locked(list);
- spin_unlock(&glob->lru_lock);
- }
return 0;
}
EXPORT_SYMBOL(ttm_eu_reserve_buffers);
@@ -187,30 +158,22 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
struct dma_fence *fence)
{
struct ttm_validate_buffer *entry;
- struct ttm_buffer_object *bo;
- struct ttm_bo_global *glob;
if (list_empty(list))
return;
- bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
- glob = bo->bdev->glob;
-
- spin_lock(&glob->lru_lock);
-
+ spin_lock(&ttm_bo_glob.lru_lock);
list_for_each_entry(entry, list, head) {
- bo = entry->bo;
+ struct ttm_buffer_object *bo = entry->bo;
+
if (entry->num_shared)
- reservation_object_add_shared_fence(bo->resv, fence);
- else
- reservation_object_add_excl_fence(bo->resv, fence);
- if (list_empty(&bo->lru))
- ttm_bo_add_to_lru(bo);
+ dma_resv_add_shared_fence(bo->base.resv, fence);
else
- ttm_bo_move_to_lru_tail(bo, NULL);
- reservation_object_unlock(bo->resv);
+ dma_resv_add_excl_fence(bo->base.resv, fence);
+ ttm_bo_move_to_lru_tail(bo, NULL);
+ dma_resv_unlock(bo->base.resv);
}
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
if (ticket)
ww_acquire_fini(ticket);
}
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index 8617958b7ae6..acd63b70d814 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -275,7 +275,7 @@ static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
spin_unlock(&glob->lock);
- ret = ttm_bo_swapout(glob->bo_glob, ctx);
+ ret = ttm_bo_swapout(&ttm_bo_glob, ctx);
spin_lock(&glob->lock);
if (unlikely(ret != 0))
break;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 627f8dc91d0e..b40a4678c296 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -1028,7 +1028,7 @@ void ttm_page_alloc_fini(void)
static void
ttm_pool_unpopulate_helper(struct ttm_tt *ttm, unsigned mem_count_update)
{
- struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
+ struct ttm_mem_global *mem_glob = &ttm_mem_glob;
unsigned i;
if (mem_count_update == 0)
@@ -1049,7 +1049,7 @@ put_pages:
int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{
- struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
+ struct ttm_mem_global *mem_glob = &ttm_mem_glob;
unsigned i;
int ret;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index 7d78e6deac89..bf876faea592 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -33,7 +33,6 @@
* when freed).
*/
-#if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU)
#define pr_fmt(fmt) "[TTM] " fmt
#include <linux/dma-mapping.h>
@@ -886,8 +885,8 @@ static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge)
int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
struct ttm_operation_ctx *ctx)
{
+ struct ttm_mem_global *mem_glob = &ttm_mem_glob;
struct ttm_tt *ttm = &ttm_dma->ttm;
- struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
unsigned long num_pages = ttm->num_pages;
struct dma_pool *pool;
struct dma_page *d_page;
@@ -991,8 +990,8 @@ EXPORT_SYMBOL_GPL(ttm_dma_populate);
/* Put all pages in pages list to correct pool to wait for reuse */
void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
{
+ struct ttm_mem_global *mem_glob = &ttm_mem_glob;
struct ttm_tt *ttm = &ttm_dma->ttm;
- struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
struct dma_pool *pool;
struct dma_page *d_page, *next;
enum pool_type type;
@@ -1238,5 +1237,3 @@ int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
return 0;
}
EXPORT_SYMBOL_GPL(ttm_dma_page_alloc_debugfs);
-
-#endif
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index e3a0691582ff..2ec448e1d663 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -48,7 +48,7 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
struct ttm_bo_device *bdev = bo->bdev;
uint32_t page_flags = 0;
- reservation_object_assert_held(bo->resv);
+ dma_resv_assert_held(bo->base.resv);
if (bdev->need_dma32)
page_flags |= TTM_PAGE_FLAG_DMA32;
@@ -223,8 +223,9 @@ void ttm_tt_destroy(struct ttm_tt *ttm)
ttm->func->destroy(ttm);
}
-void ttm_tt_init_fields(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
- uint32_t page_flags)
+static void ttm_tt_init_fields(struct ttm_tt *ttm,
+ struct ttm_buffer_object *bo,
+ uint32_t page_flags)
{
ttm->bdev = bo->bdev;
ttm->num_pages = bo->num_pages;
diff --git a/drivers/gpu/drm/tve200/tve200_display.c b/drivers/gpu/drm/tve200/tve200_display.c
index 58fd31030834..d733bbc4ac0e 100644
--- a/drivers/gpu/drm/tve200/tve200_display.c
+++ b/drivers/gpu/drm/tve200/tve200_display.c
@@ -9,16 +9,18 @@
* Copyright (C) 2011 Texas Instruments
* Copyright (C) 2017 Eric Anholt
*/
+
#include <linux/clk.h>
#include <linux/version.h>
#include <linux/dma-buf.h>
#include <linux/of_graph.h>
-#include <drm/drmP.h>
-#include <drm/drm_panel.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_vblank.h>
#include "tve200_drm.h"
diff --git a/drivers/gpu/drm/tve200/tve200_drm.h b/drivers/gpu/drm/tve200/tve200_drm.h
index 62061b518397..5420b52ea16b 100644
--- a/drivers/gpu/drm/tve200/tve200_drm.h
+++ b/drivers/gpu/drm/tve200/tve200_drm.h
@@ -13,6 +13,18 @@
#ifndef _TVE200_DRM_H_
#define _TVE200_DRM_H_
+#include <linux/irqreturn.h>
+
+#include <drm/drm_simple_kms_helper.h>
+
+struct clk;
+struct drm_bridge;
+struct drm_connector;
+struct drm_device;
+struct drm_file;
+struct drm_mode_create_dumb;
+struct drm_panel;
+
/* Bits 2-31 are valid physical base addresses */
#define TVE200_Y_FRAME_BASE_ADDR 0x00
#define TVE200_U_FRAME_BASE_ADDR 0x04
@@ -89,9 +101,6 @@
#define TVE200_CTRL_4 0x24
#define TVE200_CTRL_4_RESET BIT(0) /* triggers reset of TVE200 */
-#include <drm/drm_gem.h>
-#include <drm/drm_simple_kms_helper.h>
-
struct tve200_drm_dev_private {
struct drm_device *drm;
diff --git a/drivers/gpu/drm/tve200/tve200_drv.c b/drivers/gpu/drm/tve200/tve200_drv.c
index 6e695fbeb6bc..00ba9e5ce130 100644
--- a/drivers/gpu/drm/tve200/tve200_drv.c
+++ b/drivers/gpu/drm/tve200/tve200_drv.c
@@ -37,9 +37,9 @@
#include <linux/slab.h>
#include <linux/version.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
+#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
@@ -47,6 +47,7 @@
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "tve200_drm.h"
@@ -79,8 +80,8 @@ static int tve200_modeset_init(struct drm_device *dev)
if (ret && ret != -ENODEV)
return ret;
if (panel) {
- bridge = drm_panel_bridge_add(panel,
- DRM_MODE_CONNECTOR_Unknown);
+ bridge = drm_panel_bridge_add_typed(panel,
+ DRM_MODE_CONNECTOR_Unknown);
if (IS_ERR(bridge)) {
ret = PTR_ERR(bridge);
goto out_bridge;
@@ -109,7 +110,7 @@ static int tve200_modeset_init(struct drm_device *dev)
}
priv->panel = panel;
- priv->connector = panel->connector;
+ priv->connector = drm_panel_bridge_connector(bridge);
priv->bridge = bridge;
dev_info(dev->dev, "attached to panel %s\n",
@@ -137,8 +138,7 @@ finish:
DEFINE_DRM_GEM_CMA_FOPS(drm_fops);
static struct drm_driver tve200_drm_driver = {
- .driver_features =
- DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_ATOMIC,
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.ioctls = NULL,
.fops = &drm_fops,
.name = "tve200",
@@ -153,8 +153,6 @@ static struct drm_driver tve200_drm_driver = {
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_import = drm_gem_prime_import,
- .gem_prime_export = drm_gem_prime_export,
.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
.gem_prime_vmap = drm_gem_cma_prime_vmap,
diff --git a/drivers/gpu/drm/udl/Kconfig b/drivers/gpu/drm/udl/Kconfig
index b4d179b87f01..1f497d8f1ae5 100644
--- a/drivers/gpu/drm/udl/Kconfig
+++ b/drivers/gpu/drm/udl/Kconfig
@@ -2,10 +2,10 @@
config DRM_UDL
tristate "DisplayLink"
depends on DRM
- depends on USB_SUPPORT
+ depends on USB
depends on USB_ARCH_HAS_HCD
- select USB
+ select DRM_GEM_SHMEM_HELPER
select DRM_KMS_HELPER
help
This is a KMS driver for the USB displaylink video adapters.
- Say M/Y to add support for these devices via drm/kms interfaces.
+ Say M/Y to add support for these devices via drm/kms interfaces.
diff --git a/drivers/gpu/drm/udl/Makefile b/drivers/gpu/drm/udl/Makefile
index e5bb6f757e11..b50179bb4de0 100644
--- a/drivers/gpu/drm/udl/Makefile
+++ b/drivers/gpu/drm/udl/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-udl-y := udl_drv.o udl_modeset.o udl_connector.o udl_encoder.o udl_main.o udl_fb.o udl_transfer.o udl_gem.o udl_dmabuf.o
+udl-y := udl_drv.o udl_modeset.o udl_connector.o udl_main.o udl_transfer.o udl_gem.o
obj-$(CONFIG_DRM_UDL) := udl.o
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index 921561875d7f..e9671d38b4a0 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -7,11 +7,10 @@
* Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
*/
-#include <drm/drmP.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_edid.h>
+#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_probe_helper.h>
+
#include "udl_connector.h"
#include "udl_drv.h"
@@ -92,20 +91,6 @@ udl_detect(struct drm_connector *connector, bool force)
return connector_status_connected;
}
-static struct drm_encoder*
-udl_best_single_encoder(struct drm_connector *connector)
-{
- int enc_id = connector->encoder_ids[0];
- return drm_encoder_find(connector->dev, NULL, enc_id);
-}
-
-static int udl_connector_set_property(struct drm_connector *connector,
- struct drm_property *property,
- uint64_t val)
-{
- return 0;
-}
-
static void udl_connector_destroy(struct drm_connector *connector)
{
struct udl_drm_connector *udl_connector =
@@ -113,7 +98,6 @@ static void udl_connector_destroy(struct drm_connector *connector)
struct udl_drm_connector,
connector);
- drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(udl_connector->edid);
kfree(connector);
@@ -122,35 +106,34 @@ static void udl_connector_destroy(struct drm_connector *connector)
static const struct drm_connector_helper_funcs udl_connector_helper_funcs = {
.get_modes = udl_get_modes,
.mode_valid = udl_mode_valid,
- .best_encoder = udl_best_single_encoder,
};
static const struct drm_connector_funcs udl_connector_funcs = {
.dpms = drm_helper_connector_dpms,
+ .reset = drm_atomic_helper_connector_reset,
.detect = udl_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = udl_connector_destroy,
- .set_property = udl_connector_set_property,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-int udl_connector_init(struct drm_device *dev, struct drm_encoder *encoder)
+struct drm_connector *udl_connector_init(struct drm_device *dev)
{
struct udl_drm_connector *udl_connector;
struct drm_connector *connector;
udl_connector = kzalloc(sizeof(struct udl_drm_connector), GFP_KERNEL);
if (!udl_connector)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
connector = &udl_connector->connector;
drm_connector_init(dev, connector, &udl_connector_funcs,
DRM_MODE_CONNECTOR_DVII);
drm_connector_helper_add(connector, &udl_connector_helper_funcs);
- drm_connector_register(connector);
- drm_connector_attach_encoder(connector, encoder);
connector->polled = DRM_CONNECTOR_POLL_HPD |
DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
- return 0;
+ return connector;
}
diff --git a/drivers/gpu/drm/udl/udl_connector.h b/drivers/gpu/drm/udl/udl_connector.h
index 0fb0db5c4612..7f2d392df173 100644
--- a/drivers/gpu/drm/udl/udl_connector.h
+++ b/drivers/gpu/drm/udl/udl_connector.h
@@ -3,6 +3,8 @@
#include <drm/drm_crtc.h>
+struct edid;
+
struct udl_drm_connector {
struct drm_connector connector;
/* last udl_detect edid */
diff --git a/drivers/gpu/drm/udl/udl_dmabuf.c b/drivers/gpu/drm/udl/udl_dmabuf.c
deleted file mode 100644
index a28892146f7c..000000000000
--- a/drivers/gpu/drm/udl/udl_dmabuf.c
+++ /dev/null
@@ -1,254 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * udl_dmabuf.c
- *
- * Copyright (c) 2014 The Chromium OS Authors
- */
-
-#include <drm/drmP.h>
-#include "udl_drv.h"
-#include <linux/shmem_fs.h>
-#include <linux/dma-buf.h>
-
-struct udl_drm_dmabuf_attachment {
- struct sg_table sgt;
- enum dma_data_direction dir;
- bool is_mapped;
-};
-
-static int udl_attach_dma_buf(struct dma_buf *dmabuf,
- struct dma_buf_attachment *attach)
-{
- struct udl_drm_dmabuf_attachment *udl_attach;
-
- DRM_DEBUG_PRIME("[DEV:%s] size:%zd\n", dev_name(attach->dev),
- attach->dmabuf->size);
-
- udl_attach = kzalloc(sizeof(*udl_attach), GFP_KERNEL);
- if (!udl_attach)
- return -ENOMEM;
-
- udl_attach->dir = DMA_NONE;
- attach->priv = udl_attach;
-
- return 0;
-}
-
-static void udl_detach_dma_buf(struct dma_buf *dmabuf,
- struct dma_buf_attachment *attach)
-{
- struct udl_drm_dmabuf_attachment *udl_attach = attach->priv;
- struct sg_table *sgt;
-
- if (!udl_attach)
- return;
-
- DRM_DEBUG_PRIME("[DEV:%s] size:%zd\n", dev_name(attach->dev),
- attach->dmabuf->size);
-
- sgt = &udl_attach->sgt;
-
- if (udl_attach->dir != DMA_NONE)
- dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
- udl_attach->dir);
-
- sg_free_table(sgt);
- kfree(udl_attach);
- attach->priv = NULL;
-}
-
-static struct sg_table *udl_map_dma_buf(struct dma_buf_attachment *attach,
- enum dma_data_direction dir)
-{
- struct udl_drm_dmabuf_attachment *udl_attach = attach->priv;
- struct udl_gem_object *obj = to_udl_bo(attach->dmabuf->priv);
- struct drm_device *dev = obj->base.dev;
- struct udl_device *udl = dev->dev_private;
- struct scatterlist *rd, *wr;
- struct sg_table *sgt = NULL;
- unsigned int i;
- int page_count;
- int nents, ret;
-
- DRM_DEBUG_PRIME("[DEV:%s] size:%zd dir=%d\n", dev_name(attach->dev),
- attach->dmabuf->size, dir);
-
- /* just return current sgt if already requested. */
- if (udl_attach->dir == dir && udl_attach->is_mapped)
- return &udl_attach->sgt;
-
- if (!obj->pages) {
- ret = udl_gem_get_pages(obj);
- if (ret) {
- DRM_ERROR("failed to map pages.\n");
- return ERR_PTR(ret);
- }
- }
-
- page_count = obj->base.size / PAGE_SIZE;
- obj->sg = drm_prime_pages_to_sg(obj->pages, page_count);
- if (IS_ERR(obj->sg)) {
- DRM_ERROR("failed to allocate sgt.\n");
- return ERR_CAST(obj->sg);
- }
-
- sgt = &udl_attach->sgt;
-
- ret = sg_alloc_table(sgt, obj->sg->orig_nents, GFP_KERNEL);
- if (ret) {
- DRM_ERROR("failed to alloc sgt.\n");
- return ERR_PTR(-ENOMEM);
- }
-
- mutex_lock(&udl->gem_lock);
-
- rd = obj->sg->sgl;
- wr = sgt->sgl;
- for (i = 0; i < sgt->orig_nents; ++i) {
- sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
- rd = sg_next(rd);
- wr = sg_next(wr);
- }
-
- if (dir != DMA_NONE) {
- nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
- if (!nents) {
- DRM_ERROR("failed to map sgl with iommu.\n");
- sg_free_table(sgt);
- sgt = ERR_PTR(-EIO);
- goto err_unlock;
- }
- }
-
- udl_attach->is_mapped = true;
- udl_attach->dir = dir;
- attach->priv = udl_attach;
-
-err_unlock:
- mutex_unlock(&udl->gem_lock);
- return sgt;
-}
-
-static void udl_unmap_dma_buf(struct dma_buf_attachment *attach,
- struct sg_table *sgt,
- enum dma_data_direction dir)
-{
- /* Nothing to do. */
- DRM_DEBUG_PRIME("[DEV:%s] size:%zd dir:%d\n", dev_name(attach->dev),
- attach->dmabuf->size, dir);
-}
-
-static void *udl_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
-{
- /* TODO */
-
- return NULL;
-}
-
-static void udl_dmabuf_kunmap(struct dma_buf *dma_buf,
- unsigned long page_num, void *addr)
-{
- /* TODO */
-}
-
-static int udl_dmabuf_mmap(struct dma_buf *dma_buf,
- struct vm_area_struct *vma)
-{
- /* TODO */
-
- return -EINVAL;
-}
-
-static const struct dma_buf_ops udl_dmabuf_ops = {
- .attach = udl_attach_dma_buf,
- .detach = udl_detach_dma_buf,
- .map_dma_buf = udl_map_dma_buf,
- .unmap_dma_buf = udl_unmap_dma_buf,
- .map = udl_dmabuf_kmap,
- .unmap = udl_dmabuf_kunmap,
- .mmap = udl_dmabuf_mmap,
- .release = drm_gem_dmabuf_release,
-};
-
-struct dma_buf *udl_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *obj, int flags)
-{
- DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
-
- exp_info.ops = &udl_dmabuf_ops;
- exp_info.size = obj->size;
- exp_info.flags = flags;
- exp_info.priv = obj;
-
- return drm_gem_dmabuf_export(dev, &exp_info);
-}
-
-static int udl_prime_create(struct drm_device *dev,
- size_t size,
- struct sg_table *sg,
- struct udl_gem_object **obj_p)
-{
- struct udl_gem_object *obj;
- int npages;
-
- npages = size / PAGE_SIZE;
-
- *obj_p = NULL;
- obj = udl_gem_alloc_object(dev, npages * PAGE_SIZE);
- if (!obj)
- return -ENOMEM;
-
- obj->sg = sg;
- obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
- if (obj->pages == NULL) {
- DRM_ERROR("obj pages is NULL %d\n", npages);
- return -ENOMEM;
- }
-
- drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages);
-
- *obj_p = obj;
- return 0;
-}
-
-struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
- struct dma_buf *dma_buf)
-{
- struct dma_buf_attachment *attach;
- struct sg_table *sg;
- struct udl_gem_object *uobj;
- int ret;
-
- /* need to attach */
- get_device(dev->dev);
- attach = dma_buf_attach(dma_buf, dev->dev);
- if (IS_ERR(attach)) {
- put_device(dev->dev);
- return ERR_CAST(attach);
- }
-
- get_dma_buf(dma_buf);
-
- sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
- if (IS_ERR(sg)) {
- ret = PTR_ERR(sg);
- goto fail_detach;
- }
-
- ret = udl_prime_create(dev, dma_buf->size, sg, &uobj);
- if (ret)
- goto fail_unmap;
-
- uobj->base.import_attach = attach;
- uobj->flags = UDL_BO_WC;
-
- return &uobj->base;
-
-fail_unmap:
- dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
-fail_detach:
- dma_buf_detach(dma_buf, attach);
- dma_buf_put(dma_buf);
- put_device(dev->dev);
- return ERR_PTR(ret);
-}
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index 4a49facb608d..e6c1cd77d4d4 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -4,9 +4,16 @@
*/
#include <linux/module.h>
-#include <drm/drmP.h>
+
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_file.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_ioctl.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_print.h>
+
#include "udl_drv.h"
static int udl_usb_suspend(struct usb_interface *interface,
@@ -14,36 +21,17 @@ static int udl_usb_suspend(struct usb_interface *interface,
{
struct drm_device *dev = usb_get_intfdata(interface);
- drm_kms_helper_poll_disable(dev);
- return 0;
+ return drm_mode_config_helper_suspend(dev);
}
static int udl_usb_resume(struct usb_interface *interface)
{
struct drm_device *dev = usb_get_intfdata(interface);
- drm_kms_helper_poll_enable(dev);
- udl_modeset_restore(dev);
- return 0;
+ return drm_mode_config_helper_resume(dev);
}
-static const struct vm_operations_struct udl_gem_vm_ops = {
- .fault = udl_gem_fault,
- .open = drm_gem_vm_open,
- .close = drm_gem_vm_close,
-};
-
-static const struct file_operations udl_driver_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .mmap = udl_drm_gem_mmap,
- .poll = drm_poll,
- .read = drm_read,
- .unlocked_ioctl = drm_ioctl,
- .release = drm_release,
- .compat_ioctl = drm_compat_ioctl,
- .llseek = noop_llseek,
-};
+DEFINE_DRM_GEM_FOPS(udl_driver_fops);
static void udl_driver_release(struct drm_device *dev)
{
@@ -54,21 +42,14 @@ static void udl_driver_release(struct drm_device *dev)
}
static struct drm_driver driver = {
- .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
+ .driver_features = DRIVER_ATOMIC | DRIVER_GEM | DRIVER_MODESET,
.release = udl_driver_release,
/* gem hooks */
- .gem_free_object_unlocked = udl_gem_free_object,
- .gem_vm_ops = &udl_gem_vm_ops,
+ .gem_create_object = udl_driver_gem_create_object,
- .dumb_create = udl_dumb_create,
- .dumb_map_offset = udl_gem_mmap,
.fops = &udl_driver_fops,
-
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = udl_gem_prime_export,
- .gem_prime_import = udl_gem_prime_import,
+ DRM_GEM_SHMEM_DRIVER_OPS,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
@@ -124,8 +105,14 @@ static int udl_usb_probe(struct usb_interface *interface,
DRM_INFO("Initialized udl on minor %d\n", udl->drm.primary->index);
+ r = drm_fbdev_generic_setup(&udl->drm, 0);
+ if (r)
+ goto err_drm_dev_unregister;
+
return 0;
+err_drm_dev_unregister:
+ drm_dev_unregister(&udl->drm);
err_free:
drm_dev_put(&udl->drm);
return r;
@@ -136,7 +123,6 @@ static void udl_usb_disconnect(struct usb_interface *interface)
struct drm_device *dev = usb_get_intfdata(interface);
drm_kms_helper_poll_disable(dev);
- udl_fbdev_unplug(dev);
udl_drop_usb(dev);
drm_dev_unplug(dev);
drm_dev_put(dev);
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index a928801026c1..e67227c44cc4 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -11,9 +11,15 @@
#ifndef UDL_DRV_H
#define UDL_DRV_H
+#include <linux/mm_types.h>
#include <linux/usb.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem.h>
-#include <linux/mm_types.h>
+#include <drm/drm_simple_kms_helper.h>
+
+struct drm_mode_create_dumb;
#define DRIVER_NAME "udl"
#define DRIVER_DESC "DisplayLink"
@@ -23,9 +29,6 @@
#define DRIVER_MINOR 0
#define DRIVER_PATCHLEVEL 1
-#define UDL_BO_CACHEABLE (1 << 0)
-#define UDL_BO_WC (1 << 1)
-
struct udl_device;
struct urb_node {
@@ -44,57 +47,29 @@ struct urb_list {
size_t size;
};
-struct udl_fbdev;
-
struct udl_device {
struct drm_device drm;
struct device *dev;
struct usb_device *udev;
- struct drm_crtc *crtc;
+
+ struct drm_simple_display_pipe display_pipe;
struct mutex gem_lock;
int sku_pixel_limit;
struct urb_list urbs;
- atomic_t lost_pixels; /* 1 = a render op failed. Need screen refresh */
- struct udl_fbdev *fbdev;
char mode_buf[1024];
uint32_t mode_buf_len;
- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
- atomic_t bytes_sent; /* to usb, after compression including overhead */
- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
};
#define to_udl(x) container_of(x, struct udl_device, drm)
-struct udl_gem_object {
- struct drm_gem_object base;
- struct page **pages;
- void *vmapping;
- struct sg_table *sg;
- unsigned int flags;
-};
-
-#define to_udl_bo(x) container_of(x, struct udl_gem_object, base)
-
-struct udl_framebuffer {
- struct drm_framebuffer base;
- struct udl_gem_object *obj;
- bool active_16; /* active on the 16-bit channel */
-};
-
-#define to_udl_fb(x) container_of(x, struct udl_framebuffer, base)
-
/* modeset */
int udl_modeset_init(struct drm_device *dev);
-void udl_modeset_restore(struct drm_device *dev);
void udl_modeset_cleanup(struct drm_device *dev);
-int udl_connector_init(struct drm_device *dev, struct drm_encoder *encoder);
-
-struct drm_encoder *udl_encoder_init(struct drm_device *dev);
+struct drm_connector *udl_connector_init(struct drm_device *dev);
struct urb *udl_get_urb(struct drm_device *dev);
@@ -104,42 +79,12 @@ void udl_urb_completion(struct urb *urb);
int udl_init(struct udl_device *udl);
void udl_fini(struct drm_device *dev);
-int udl_fbdev_init(struct drm_device *dev);
-void udl_fbdev_cleanup(struct drm_device *dev);
-void udl_fbdev_unplug(struct drm_device *dev);
-struct drm_framebuffer *
-udl_fb_user_fb_create(struct drm_device *dev,
- struct drm_file *file,
- const struct drm_mode_fb_cmd2 *mode_cmd);
-
int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr,
const char *front, char **urb_buf_ptr,
- u32 byte_offset, u32 device_byte_offset, u32 byte_width,
- int *ident_ptr, int *sent_ptr);
-
-int udl_dumb_create(struct drm_file *file_priv,
- struct drm_device *dev,
- struct drm_mode_create_dumb *args);
-int udl_gem_mmap(struct drm_file *file_priv, struct drm_device *dev,
- uint32_t handle, uint64_t *offset);
-
-void udl_gem_free_object(struct drm_gem_object *gem_obj);
-struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
- size_t size);
-struct dma_buf *udl_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *obj, int flags);
-struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
- struct dma_buf *dma_buf);
-
-int udl_gem_get_pages(struct udl_gem_object *obj);
-void udl_gem_put_pages(struct udl_gem_object *obj);
-int udl_gem_vmap(struct udl_gem_object *obj);
-void udl_gem_vunmap(struct udl_gem_object *obj);
-int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
-vm_fault_t udl_gem_fault(struct vm_fault *vmf);
-
-int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
- int width, int height);
+ u32 byte_offset, u32 device_byte_offset, u32 byte_width);
+
+struct drm_gem_object *udl_driver_gem_create_object(struct drm_device *dev,
+ size_t size);
int udl_drop_usb(struct drm_device *dev);
@@ -153,4 +98,13 @@ int udl_drop_usb(struct drm_device *dev);
#define CMD_WRITE_COPY16 "\xAF\x6A" /**< 16 bit copy command. */
#define CMD_WRITE_RLX16 "\xAF\x6B" /**< 16 bit extended run length command. */
+/* On/Off for driving the DisplayLink framebuffer to the display */
+#define UDL_REG_BLANK_MODE 0x1f
+
+#define UDL_BLANK_MODE_ON 0x00 /* hsync and vsync on, visible */
+#define UDL_BLANK_MODE_BLANKED 0x01 /* hsync and vsync on, blanked */
+#define UDL_BLANK_MODE_VSYNC_OFF 0x03 /* vsync off, blanked */
+#define UDL_BLANK_MODE_HSYNC_OFF 0x05 /* hsync off, blanked */
+#define UDL_BLANK_MODE_POWERDOWN 0x07 /* powered off; requires modeset */
+
#endif
diff --git a/drivers/gpu/drm/udl/udl_encoder.c b/drivers/gpu/drm/udl/udl_encoder.c
deleted file mode 100644
index f87989e6ee51..000000000000
--- a/drivers/gpu/drm/udl/udl_encoder.c
+++ /dev/null
@@ -1,70 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2012 Red Hat
- * based in parts on udlfb.c:
- * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it>
- * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
- * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
- */
-
-#include <drm/drmP.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
-#include "udl_drv.h"
-
-/* dummy encoder */
-static void udl_enc_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
- kfree(encoder);
-}
-
-static void udl_encoder_disable(struct drm_encoder *encoder)
-{
-}
-
-static void udl_encoder_prepare(struct drm_encoder *encoder)
-{
-}
-
-static void udl_encoder_commit(struct drm_encoder *encoder)
-{
-}
-
-static void udl_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
-}
-
-static void
-udl_encoder_dpms(struct drm_encoder *encoder, int mode)
-{
-}
-
-static const struct drm_encoder_helper_funcs udl_helper_funcs = {
- .dpms = udl_encoder_dpms,
- .prepare = udl_encoder_prepare,
- .mode_set = udl_encoder_mode_set,
- .commit = udl_encoder_commit,
- .disable = udl_encoder_disable,
-};
-
-static const struct drm_encoder_funcs udl_enc_funcs = {
- .destroy = udl_enc_destroy,
-};
-
-struct drm_encoder *udl_encoder_init(struct drm_device *dev)
-{
- struct drm_encoder *encoder;
-
- encoder = kzalloc(sizeof(struct drm_encoder), GFP_KERNEL);
- if (!encoder)
- return NULL;
-
- drm_encoder_init(dev, encoder, &udl_enc_funcs, DRM_MODE_ENCODER_TMDS,
- NULL);
- drm_encoder_helper_add(encoder, &udl_helper_funcs);
- encoder->possible_crtcs = 1;
- return encoder;
-}
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
deleted file mode 100644
index e1116bf7b9d7..000000000000
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ /dev/null
@@ -1,528 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2012 Red Hat
- *
- * based in parts on udlfb.c:
- * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it>
- * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
- * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
- */
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/fb.h>
-#include <linux/dma-buf.h>
-#include <linux/mem_encrypt.h>
-
-#include <drm/drmP.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
-#include "udl_drv.h"
-
-#include <drm/drm_fb_helper.h>
-
-#define DL_DEFIO_WRITE_DELAY (HZ/20) /* fb_deferred_io.delay in jiffies */
-
-static int fb_defio = 0; /* Optionally enable experimental fb_defio mmap support */
-static int fb_bpp = 16;
-
-module_param(fb_bpp, int, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP);
-module_param(fb_defio, int, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP);
-
-struct udl_fbdev {
- struct drm_fb_helper helper; /* must be first */
- struct udl_framebuffer ufb;
- int fb_count;
-};
-
-#define DL_ALIGN_UP(x, a) ALIGN(x, a)
-#define DL_ALIGN_DOWN(x, a) ALIGN_DOWN(x, a)
-
-/** Read the red component (0..255) of a 32 bpp colour. */
-#define DLO_RGB_GETRED(col) (uint8_t)((col) & 0xFF)
-
-/** Read the green component (0..255) of a 32 bpp colour. */
-#define DLO_RGB_GETGRN(col) (uint8_t)(((col) >> 8) & 0xFF)
-
-/** Read the blue component (0..255) of a 32 bpp colour. */
-#define DLO_RGB_GETBLU(col) (uint8_t)(((col) >> 16) & 0xFF)
-
-/** Return red/green component of a 16 bpp colour number. */
-#define DLO_RG16(red, grn) (uint8_t)((((red) & 0xF8) | ((grn) >> 5)) & 0xFF)
-
-/** Return green/blue component of a 16 bpp colour number. */
-#define DLO_GB16(grn, blu) (uint8_t)(((((grn) & 0x1C) << 3) | ((blu) >> 3)) & 0xFF)
-
-/** Return 8 bpp colour number from red, green and blue components. */
-#define DLO_RGB8(red, grn, blu) ((((red) << 5) | (((grn) & 3) << 3) | ((blu) & 7)) & 0xFF)
-
-#if 0
-static uint8_t rgb8(uint32_t col)
-{
- uint8_t red = DLO_RGB_GETRED(col);
- uint8_t grn = DLO_RGB_GETGRN(col);
- uint8_t blu = DLO_RGB_GETBLU(col);
-
- return DLO_RGB8(red, grn, blu);
-}
-
-static uint16_t rgb16(uint32_t col)
-{
- uint8_t red = DLO_RGB_GETRED(col);
- uint8_t grn = DLO_RGB_GETGRN(col);
- uint8_t blu = DLO_RGB_GETBLU(col);
-
- return (DLO_RG16(red, grn) << 8) + DLO_GB16(grn, blu);
-}
-#endif
-
-int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
- int width, int height)
-{
- struct drm_device *dev = fb->base.dev;
- struct udl_device *udl = to_udl(dev);
- int i, ret;
- char *cmd;
- cycles_t start_cycles, end_cycles;
- int bytes_sent = 0;
- int bytes_identical = 0;
- struct urb *urb;
- int aligned_x;
- int log_bpp;
-
- BUG_ON(!is_power_of_2(fb->base.format->cpp[0]));
- log_bpp = __ffs(fb->base.format->cpp[0]);
-
- if (!fb->active_16)
- return 0;
-
- if (!fb->obj->vmapping) {
- ret = udl_gem_vmap(fb->obj);
- if (ret == -ENOMEM) {
- DRM_ERROR("failed to vmap fb\n");
- return 0;
- }
- if (!fb->obj->vmapping) {
- DRM_ERROR("failed to vmapping\n");
- return 0;
- }
- }
-
- aligned_x = DL_ALIGN_DOWN(x, sizeof(unsigned long));
- width = DL_ALIGN_UP(width + (x-aligned_x), sizeof(unsigned long));
- x = aligned_x;
-
- if ((width <= 0) ||
- (x + width > fb->base.width) ||
- (y + height > fb->base.height))
- return -EINVAL;
-
- start_cycles = get_cycles();
-
- urb = udl_get_urb(dev);
- if (!urb)
- return 0;
- cmd = urb->transfer_buffer;
-
- for (i = y; i < y + height ; i++) {
- const int line_offset = fb->base.pitches[0] * i;
- const int byte_offset = line_offset + (x << log_bpp);
- const int dev_byte_offset = (fb->base.width * i + x) << log_bpp;
- if (udl_render_hline(dev, log_bpp, &urb,
- (char *) fb->obj->vmapping,
- &cmd, byte_offset, dev_byte_offset,
- width << log_bpp,
- &bytes_identical, &bytes_sent))
- goto error;
- }
-
- if (cmd > (char *) urb->transfer_buffer) {
- /* Send partial buffer remaining before exiting */
- int len;
- if (cmd < (char *) urb->transfer_buffer + urb->transfer_buffer_length)
- *cmd++ = 0xAF;
- len = cmd - (char *) urb->transfer_buffer;
- ret = udl_submit_urb(dev, urb, len);
- bytes_sent += len;
- } else
- udl_urb_completion(urb);
-
-error:
- atomic_add(bytes_sent, &udl->bytes_sent);
- atomic_add(bytes_identical, &udl->bytes_identical);
- atomic_add((width * height) << log_bpp, &udl->bytes_rendered);
- end_cycles = get_cycles();
- atomic_add(((unsigned int) ((end_cycles - start_cycles)
- >> 10)), /* Kcycles */
- &udl->cpu_kcycles_used);
-
- return 0;
-}
-
-static int udl_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
-{
- unsigned long start = vma->vm_start;
- unsigned long size = vma->vm_end - vma->vm_start;
- unsigned long offset;
- unsigned long page, pos;
-
- if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
- return -EINVAL;
-
- offset = vma->vm_pgoff << PAGE_SHIFT;
-
- if (offset > info->fix.smem_len || size > info->fix.smem_len - offset)
- return -EINVAL;
-
- pos = (unsigned long)info->fix.smem_start + offset;
-
- pr_debug("mmap() framebuffer addr:%lu size:%lu\n",
- pos, size);
-
- /* We don't want the framebuffer to be mapped encrypted */
- vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
-
- while (size > 0) {
- page = vmalloc_to_pfn((void *)pos);
- if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED))
- return -EAGAIN;
-
- start += PAGE_SIZE;
- pos += PAGE_SIZE;
- if (size > PAGE_SIZE)
- size -= PAGE_SIZE;
- else
- size = 0;
- }
-
- /* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */
- return 0;
-}
-
-/*
- * It's common for several clients to have framebuffer open simultaneously.
- * e.g. both fbcon and X. Makes things interesting.
- * Assumes caller is holding info->lock (for open and release at least)
- */
-static int udl_fb_open(struct fb_info *info, int user)
-{
- struct udl_fbdev *ufbdev = info->par;
- struct drm_device *dev = ufbdev->ufb.base.dev;
- struct udl_device *udl = to_udl(dev);
-
- /* If the USB device is gone, we don't accept new opens */
- if (drm_dev_is_unplugged(&udl->drm))
- return -ENODEV;
-
- ufbdev->fb_count++;
-
-#ifdef CONFIG_DRM_FBDEV_EMULATION
- if (fb_defio && (info->fbdefio == NULL)) {
- /* enable defio at last moment if not disabled by client */
-
- struct fb_deferred_io *fbdefio;
-
- fbdefio = kzalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
-
- if (fbdefio) {
- fbdefio->delay = DL_DEFIO_WRITE_DELAY;
- fbdefio->deferred_io = drm_fb_helper_deferred_io;
- }
-
- info->fbdefio = fbdefio;
- fb_deferred_io_init(info);
- }
-#endif
-
- pr_debug("open /dev/fb%d user=%d fb_info=%p count=%d\n",
- info->node, user, info, ufbdev->fb_count);
-
- return 0;
-}
-
-
-/*
- * Assumes caller is holding info->lock mutex (for open and release at least)
- */
-static int udl_fb_release(struct fb_info *info, int user)
-{
- struct udl_fbdev *ufbdev = info->par;
-
- ufbdev->fb_count--;
-
-#ifdef CONFIG_DRM_FBDEV_EMULATION
- if ((ufbdev->fb_count == 0) && (info->fbdefio)) {
- fb_deferred_io_cleanup(info);
- kfree(info->fbdefio);
- info->fbdefio = NULL;
- info->fbops->fb_mmap = udl_fb_mmap;
- }
-#endif
-
- pr_debug("released /dev/fb%d user=%d count=%d\n",
- info->node, user, ufbdev->fb_count);
-
- return 0;
-}
-
-static struct fb_ops udlfb_ops = {
- .owner = THIS_MODULE,
- DRM_FB_HELPER_DEFAULT_OPS,
- .fb_fillrect = drm_fb_helper_sys_fillrect,
- .fb_copyarea = drm_fb_helper_sys_copyarea,
- .fb_imageblit = drm_fb_helper_sys_imageblit,
- .fb_mmap = udl_fb_mmap,
- .fb_open = udl_fb_open,
- .fb_release = udl_fb_release,
-};
-
-static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb,
- struct drm_file *file,
- unsigned flags, unsigned color,
- struct drm_clip_rect *clips,
- unsigned num_clips)
-{
- struct udl_framebuffer *ufb = to_udl_fb(fb);
- int i;
- int ret = 0;
-
- drm_modeset_lock_all(fb->dev);
-
- if (!ufb->active_16)
- goto unlock;
-
- if (ufb->obj->base.import_attach) {
- ret = dma_buf_begin_cpu_access(ufb->obj->base.import_attach->dmabuf,
- DMA_FROM_DEVICE);
- if (ret)
- goto unlock;
- }
-
- for (i = 0; i < num_clips; i++) {
- ret = udl_handle_damage(ufb, clips[i].x1, clips[i].y1,
- clips[i].x2 - clips[i].x1,
- clips[i].y2 - clips[i].y1);
- if (ret)
- break;
- }
-
- if (ufb->obj->base.import_attach) {
- ret = dma_buf_end_cpu_access(ufb->obj->base.import_attach->dmabuf,
- DMA_FROM_DEVICE);
- }
-
- unlock:
- drm_modeset_unlock_all(fb->dev);
-
- return ret;
-}
-
-static void udl_user_framebuffer_destroy(struct drm_framebuffer *fb)
-{
- struct udl_framebuffer *ufb = to_udl_fb(fb);
-
- if (ufb->obj)
- drm_gem_object_put_unlocked(&ufb->obj->base);
-
- drm_framebuffer_cleanup(fb);
- kfree(ufb);
-}
-
-static const struct drm_framebuffer_funcs udlfb_funcs = {
- .destroy = udl_user_framebuffer_destroy,
- .dirty = udl_user_framebuffer_dirty,
-};
-
-
-static int
-udl_framebuffer_init(struct drm_device *dev,
- struct udl_framebuffer *ufb,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct udl_gem_object *obj)
-{
- int ret;
-
- ufb->obj = obj;
- drm_helper_mode_fill_fb_struct(dev, &ufb->base, mode_cmd);
- ret = drm_framebuffer_init(dev, &ufb->base, &udlfb_funcs);
- return ret;
-}
-
-
-static int udlfb_create(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- struct udl_fbdev *ufbdev =
- container_of(helper, struct udl_fbdev, helper);
- struct drm_device *dev = ufbdev->helper.dev;
- struct fb_info *info;
- struct drm_framebuffer *fb;
- struct drm_mode_fb_cmd2 mode_cmd;
- struct udl_gem_object *obj;
- uint32_t size;
- int ret = 0;
-
- if (sizes->surface_bpp == 24)
- sizes->surface_bpp = 32;
-
- mode_cmd.width = sizes->surface_width;
- mode_cmd.height = sizes->surface_height;
- mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
-
- mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
- sizes->surface_depth);
-
- size = mode_cmd.pitches[0] * mode_cmd.height;
- size = ALIGN(size, PAGE_SIZE);
-
- obj = udl_gem_alloc_object(dev, size);
- if (!obj)
- goto out;
-
- ret = udl_gem_vmap(obj);
- if (ret) {
- DRM_ERROR("failed to vmap fb\n");
- goto out_gfree;
- }
-
- info = drm_fb_helper_alloc_fbi(helper);
- if (IS_ERR(info)) {
- ret = PTR_ERR(info);
- goto out_gfree;
- }
-
- ret = udl_framebuffer_init(dev, &ufbdev->ufb, &mode_cmd, obj);
- if (ret)
- goto out_gfree;
-
- fb = &ufbdev->ufb.base;
-
- ufbdev->helper.fb = fb;
-
- info->screen_base = ufbdev->ufb.obj->vmapping;
- info->fix.smem_len = size;
- info->fix.smem_start = (unsigned long)ufbdev->ufb.obj->vmapping;
-
- info->fbops = &udlfb_ops;
- drm_fb_helper_fill_info(info, &ufbdev->helper, sizes);
-
- DRM_DEBUG_KMS("allocated %dx%d vmal %p\n",
- fb->width, fb->height,
- ufbdev->ufb.obj->vmapping);
-
- return ret;
-out_gfree:
- drm_gem_object_put_unlocked(&ufbdev->ufb.obj->base);
-out:
- return ret;
-}
-
-static const struct drm_fb_helper_funcs udl_fb_helper_funcs = {
- .fb_probe = udlfb_create,
-};
-
-static void udl_fbdev_destroy(struct drm_device *dev,
- struct udl_fbdev *ufbdev)
-{
- drm_fb_helper_unregister_fbi(&ufbdev->helper);
- drm_fb_helper_fini(&ufbdev->helper);
- if (ufbdev->ufb.obj) {
- drm_framebuffer_unregister_private(&ufbdev->ufb.base);
- drm_framebuffer_cleanup(&ufbdev->ufb.base);
- drm_gem_object_put_unlocked(&ufbdev->ufb.obj->base);
- }
-}
-
-int udl_fbdev_init(struct drm_device *dev)
-{
- struct udl_device *udl = to_udl(dev);
- int bpp_sel = fb_bpp;
- struct udl_fbdev *ufbdev;
- int ret;
-
- ufbdev = kzalloc(sizeof(struct udl_fbdev), GFP_KERNEL);
- if (!ufbdev)
- return -ENOMEM;
-
- udl->fbdev = ufbdev;
-
- drm_fb_helper_prepare(dev, &ufbdev->helper, &udl_fb_helper_funcs);
-
- ret = drm_fb_helper_init(dev, &ufbdev->helper, 1);
- if (ret)
- goto free;
-
- ret = drm_fb_helper_single_add_all_connectors(&ufbdev->helper);
- if (ret)
- goto fini;
-
- /* disable all the possible outputs/crtcs before entering KMS mode */
- drm_helper_disable_unused_functions(dev);
-
- ret = drm_fb_helper_initial_config(&ufbdev->helper, bpp_sel);
- if (ret)
- goto fini;
-
- return 0;
-
-fini:
- drm_fb_helper_fini(&ufbdev->helper);
-free:
- kfree(ufbdev);
- return ret;
-}
-
-void udl_fbdev_cleanup(struct drm_device *dev)
-{
- struct udl_device *udl = to_udl(dev);
- if (!udl->fbdev)
- return;
-
- udl_fbdev_destroy(dev, udl->fbdev);
- kfree(udl->fbdev);
- udl->fbdev = NULL;
-}
-
-void udl_fbdev_unplug(struct drm_device *dev)
-{
- struct udl_device *udl = to_udl(dev);
- struct udl_fbdev *ufbdev;
- if (!udl->fbdev)
- return;
-
- ufbdev = udl->fbdev;
- drm_fb_helper_unlink_fbi(&ufbdev->helper);
-}
-
-struct drm_framebuffer *
-udl_fb_user_fb_create(struct drm_device *dev,
- struct drm_file *file,
- const struct drm_mode_fb_cmd2 *mode_cmd)
-{
- struct drm_gem_object *obj;
- struct udl_framebuffer *ufb;
- int ret;
- uint32_t size;
-
- obj = drm_gem_object_lookup(file, mode_cmd->handles[0]);
- if (obj == NULL)
- return ERR_PTR(-ENOENT);
-
- size = mode_cmd->pitches[0] * mode_cmd->height;
- size = ALIGN(size, PAGE_SIZE);
-
- if (size > obj->size) {
- DRM_ERROR("object size not sufficient for fb %d %zu %d %d\n", size, obj->size, mode_cmd->pitches[0], mode_cmd->height);
- return ERR_PTR(-ENOMEM);
- }
-
- ufb = kzalloc(sizeof(*ufb), GFP_KERNEL);
- if (ufb == NULL)
- return ERR_PTR(-ENOMEM);
-
- ret = udl_framebuffer_init(dev, ufb, mode_cmd, to_udl_bo(obj));
- if (ret) {
- kfree(ufb);
- return ERR_PTR(-EINVAL);
- }
- return &ufb->base;
-}
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index c6ca2c09bc97..b6e26f98aa0a 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -3,226 +3,104 @@
* Copyright (C) 2012 Red Hat
*/
-#include <drm/drmP.h>
-#include "udl_drv.h"
-#include <linux/shmem_fs.h>
#include <linux/dma-buf.h>
+#include <linux/vmalloc.h>
-struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
- size_t size)
-{
- struct udl_gem_object *obj;
-
- obj = kzalloc(sizeof(*obj), GFP_KERNEL);
- if (obj == NULL)
- return NULL;
-
- if (drm_gem_object_init(dev, &obj->base, size) != 0) {
- kfree(obj);
- return NULL;
- }
-
- obj->flags = UDL_BO_CACHEABLE;
- return obj;
-}
+#include <drm/drm_drv.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_mode.h>
+#include <drm/drm_prime.h>
-static int
-udl_gem_create(struct drm_file *file,
- struct drm_device *dev,
- uint64_t size,
- uint32_t *handle_p)
-{
- struct udl_gem_object *obj;
- int ret;
- u32 handle;
-
- size = roundup(size, PAGE_SIZE);
-
- obj = udl_gem_alloc_object(dev, size);
- if (obj == NULL)
- return -ENOMEM;
-
- ret = drm_gem_handle_create(file, &obj->base, &handle);
- if (ret) {
- drm_gem_object_release(&obj->base);
- kfree(obj);
- return ret;
- }
-
- drm_gem_object_put_unlocked(&obj->base);
- *handle_p = handle;
- return 0;
-}
-
-static void update_vm_cache_attr(struct udl_gem_object *obj,
- struct vm_area_struct *vma)
-{
- DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);
-
- /* non-cacheable as default. */
- if (obj->flags & UDL_BO_CACHEABLE) {
- vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
- } else if (obj->flags & UDL_BO_WC) {
- vma->vm_page_prot =
- pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
- } else {
- vma->vm_page_prot =
- pgprot_noncached(vm_get_page_prot(vma->vm_flags));
- }
-}
+#include "udl_drv.h"
-int udl_dumb_create(struct drm_file *file,
- struct drm_device *dev,
- struct drm_mode_create_dumb *args)
-{
- args->pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
- args->size = args->pitch * args->height;
- return udl_gem_create(file, dev,
- args->size, &args->handle);
-}
+/*
+ * GEM object funcs
+ */
-int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+static int udl_gem_object_mmap(struct drm_gem_object *obj,
+ struct vm_area_struct *vma)
{
int ret;
- ret = drm_gem_mmap(filp, vma);
+ ret = drm_gem_shmem_mmap(obj, vma);
if (ret)
return ret;
- vma->vm_flags &= ~VM_PFNMAP;
- vma->vm_flags |= VM_MIXEDMAP;
-
- update_vm_cache_attr(to_udl_bo(vma->vm_private_data), vma);
-
- return ret;
-}
-
-vm_fault_t udl_gem_fault(struct vm_fault *vmf)
-{
- struct vm_area_struct *vma = vmf->vma;
- struct udl_gem_object *obj = to_udl_bo(vma->vm_private_data);
- struct page *page;
- unsigned int page_offset;
-
- page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
-
- if (!obj->pages)
- return VM_FAULT_SIGBUS;
-
- page = obj->pages[page_offset];
- return vmf_insert_page(vma, vmf->address, page);
-}
-
-int udl_gem_get_pages(struct udl_gem_object *obj)
-{
- struct page **pages;
-
- if (obj->pages)
- return 0;
-
- pages = drm_gem_get_pages(&obj->base);
- if (IS_ERR(pages))
- return PTR_ERR(pages);
-
- obj->pages = pages;
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+ if (obj->import_attach)
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
return 0;
}
-void udl_gem_put_pages(struct udl_gem_object *obj)
-{
- if (obj->base.import_attach) {
- kvfree(obj->pages);
- obj->pages = NULL;
- return;
- }
-
- drm_gem_put_pages(&obj->base, obj->pages, false, false);
- obj->pages = NULL;
-}
-
-int udl_gem_vmap(struct udl_gem_object *obj)
+static void *udl_gem_object_vmap(struct drm_gem_object *obj)
{
- int page_count = obj->base.size / PAGE_SIZE;
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
int ret;
- if (obj->base.import_attach) {
- obj->vmapping = dma_buf_vmap(obj->base.import_attach->dmabuf);
- if (!obj->vmapping)
- return -ENOMEM;
- return 0;
- }
-
- ret = udl_gem_get_pages(obj);
+ ret = mutex_lock_interruptible(&shmem->vmap_lock);
if (ret)
- return ret;
+ return ERR_PTR(ret);
- obj->vmapping = vmap(obj->pages, page_count, 0, PAGE_KERNEL);
- if (!obj->vmapping)
- return -ENOMEM;
- return 0;
-}
+ if (shmem->vmap_use_count++ > 0)
+ goto out;
-void udl_gem_vunmap(struct udl_gem_object *obj)
-{
- if (obj->base.import_attach) {
- dma_buf_vunmap(obj->base.import_attach->dmabuf, obj->vmapping);
- return;
+ ret = drm_gem_shmem_get_pages(shmem);
+ if (ret)
+ goto err_zero_use;
+
+ if (obj->import_attach)
+ shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf);
+ else
+ shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
+ VM_MAP, PAGE_KERNEL);
+
+ if (!shmem->vaddr) {
+ DRM_DEBUG_KMS("Failed to vmap pages\n");
+ ret = -ENOMEM;
+ goto err_put_pages;
}
- vunmap(obj->vmapping);
-
- udl_gem_put_pages(obj);
+out:
+ mutex_unlock(&shmem->vmap_lock);
+ return shmem->vaddr;
+
+err_put_pages:
+ drm_gem_shmem_put_pages(shmem);
+err_zero_use:
+ shmem->vmap_use_count = 0;
+ mutex_unlock(&shmem->vmap_lock);
+ return ERR_PTR(ret);
}
-void udl_gem_free_object(struct drm_gem_object *gem_obj)
-{
- struct udl_gem_object *obj = to_udl_bo(gem_obj);
+static const struct drm_gem_object_funcs udl_gem_object_funcs = {
+ .free = drm_gem_shmem_free_object,
+ .print_info = drm_gem_shmem_print_info,
+ .pin = drm_gem_shmem_pin,
+ .unpin = drm_gem_shmem_unpin,
+ .get_sg_table = drm_gem_shmem_get_sg_table,
+ .vmap = udl_gem_object_vmap,
+ .vunmap = drm_gem_shmem_vunmap,
+ .mmap = udl_gem_object_mmap,
+};
- if (obj->vmapping)
- udl_gem_vunmap(obj);
-
- if (gem_obj->import_attach) {
- drm_prime_gem_destroy(gem_obj, obj->sg);
- put_device(gem_obj->dev->dev);
- }
-
- if (obj->pages)
- udl_gem_put_pages(obj);
-
- drm_gem_free_mmap_offset(gem_obj);
-}
+/*
+ * Helpers for struct drm_driver
+ */
-/* the dumb interface doesn't work with the GEM straight MMAP
- interface, it expects to do MMAP on the drm fd, like normal */
-int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
- uint32_t handle, uint64_t *offset)
+struct drm_gem_object *udl_driver_gem_create_object(struct drm_device *dev,
+ size_t size)
{
- struct udl_gem_object *gobj;
+ struct drm_gem_shmem_object *shmem;
struct drm_gem_object *obj;
- struct udl_device *udl = to_udl(dev);
- int ret = 0;
-
- mutex_lock(&udl->gem_lock);
- obj = drm_gem_object_lookup(file, handle);
- if (obj == NULL) {
- ret = -ENOENT;
- goto unlock;
- }
- gobj = to_udl_bo(obj);
- ret = udl_gem_get_pages(gobj);
- if (ret)
- goto out;
- ret = drm_gem_create_mmap_offset(obj);
- if (ret)
- goto out;
+ shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
+ if (!shmem)
+ return NULL;
- *offset = drm_vma_node_offset_addr(&gobj->base.vma_node);
+ obj = &shmem->base;
+ obj->funcs = &udl_gem_object_funcs;
-out:
- drm_gem_object_put_unlocked(&gobj->base);
-unlock:
- mutex_unlock(&udl->gem_lock);
- return ret;
+ return obj;
}
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index 1a99c7647444..538718919916 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -7,9 +7,11 @@
* Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
* Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
*/
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
+
+#include <drm/drm.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+
#include "udl_drv.h"
/* -BULK_SIZE as per usb-skeleton. Can we get full page and avoid overhead? */
@@ -138,7 +140,6 @@ void udl_urb_completion(struct urb *urb)
urb->status == -ESHUTDOWN)) {
DRM_ERROR("%s - nonzero write bulk status received: %d\n",
__func__, urb->status);
- atomic_set(&udl->lost_pixels, 1);
}
}
@@ -269,7 +270,6 @@ struct urb *udl_get_urb(struct drm_device *dev)
/* Wait for an in-flight buffer to complete and get re-queued */
ret = down_timeout(&udl->urbs.limit_sem, GET_URB_TIMEOUT);
if (ret) {
- atomic_set(&udl->lost_pixels, 1);
DRM_INFO("wait for urb interrupted: %x available: %d\n",
ret, udl->urbs.available);
goto error;
@@ -302,7 +302,6 @@ int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len)
ret = usb_submit_urb(urb, GFP_ATOMIC);
if (ret) {
udl_urb_completion(urb); /* because no one else will */
- atomic_set(&udl->lost_pixels, 1);
DRM_ERROR("usb_submit_urb error %x\n", ret);
}
return ret;
@@ -336,10 +335,6 @@ int udl_init(struct udl_device *udl)
if (ret)
goto err;
- ret = udl_fbdev_init(dev);
- if (ret)
- goto err;
-
drm_kms_helper_poll_init(dev);
return 0;
@@ -365,6 +360,4 @@ void udl_fini(struct drm_device *dev)
if (udl->urbs.count)
udl_free_urb_list(dev);
-
- udl_fbdev_cleanup(dev);
}
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index 793722d0c8cd..22af17959053 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -9,12 +9,21 @@
*/
-#include <drm/drmP.h>
-#include <drm/drm_crtc.h>
+#include <linux/dma-buf.h>
+
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
-#include <drm/drm_plane_helper.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_vblank.h>
+
#include "udl_drv.h"
+#define UDL_COLOR_DEPTH_16BPP 0
+
/*
* All DisplayLink bulk operations start with 0xAF, followed by specific code
* All operations are written to buffers which then later get sent to device
@@ -38,31 +47,9 @@ static char *udl_vidreg_unlock(char *buf)
return udl_set_register(buf, 0xFF, 0xFF);
}
-/*
- * On/Off for driving the DisplayLink framebuffer to the display
- * 0x00 H and V sync on
- * 0x01 H and V sync off (screen blank but powered)
- * 0x07 DPMS powerdown (requires modeset to come back)
- */
-static char *udl_set_blank(char *buf, int dpms_mode)
+static char *udl_set_blank_mode(char *buf, u8 mode)
{
- u8 reg;
- switch (dpms_mode) {
- case DRM_MODE_DPMS_OFF:
- reg = 0x07;
- break;
- case DRM_MODE_DPMS_STANDBY:
- reg = 0x05;
- break;
- case DRM_MODE_DPMS_SUSPEND:
- reg = 0x01;
- break;
- case DRM_MODE_DPMS_ON:
- reg = 0x00;
- break;
- }
-
- return udl_set_register(buf, 0x1f, reg);
+ return udl_set_register(buf, UDL_REG_BLANK_MODE, mode);
}
static char *udl_set_color_depth(char *buf, u8 selection)
@@ -233,6 +220,11 @@ static int udl_crtc_write_mode_to_hw(struct drm_crtc *crtc)
char *buf;
int retval;
+ if (udl->mode_buf_len == 0) {
+ DRM_ERROR("No mode set\n");
+ return -EINVAL;
+ }
+
urb = udl_get_urb(dev);
if (!urb)
return -ENOMEM;
@@ -245,80 +237,152 @@ static int udl_crtc_write_mode_to_hw(struct drm_crtc *crtc)
return retval;
}
+static long udl_log_cpp(unsigned int cpp)
+{
+ if (WARN_ON(!is_power_of_2(cpp)))
+ return -EINVAL;
+ return __ffs(cpp);
+}
-static void udl_crtc_dpms(struct drm_crtc *crtc, int mode)
+static int udl_aligned_damage_clip(struct drm_rect *clip, int x, int y,
+ int width, int height)
{
- struct drm_device *dev = crtc->dev;
- struct udl_device *udl = dev->dev_private;
- int retval;
+ int x1, x2;
- if (mode == DRM_MODE_DPMS_OFF) {
- char *buf;
- struct urb *urb;
- urb = udl_get_urb(dev);
- if (!urb)
- return;
-
- buf = (char *)urb->transfer_buffer;
- buf = udl_vidreg_lock(buf);
- buf = udl_set_blank(buf, mode);
- buf = udl_vidreg_unlock(buf);
-
- buf = udl_dummy_render(buf);
- retval = udl_submit_urb(dev, urb, buf - (char *)
- urb->transfer_buffer);
- } else {
- if (udl->mode_buf_len == 0) {
- DRM_ERROR("Trying to enable DPMS with no mode\n");
- return;
- }
- udl_crtc_write_mode_to_hw(crtc);
- }
+ if (WARN_ON_ONCE(x < 0) ||
+ WARN_ON_ONCE(y < 0) ||
+ WARN_ON_ONCE(width < 0) ||
+ WARN_ON_ONCE(height < 0))
+ return -EINVAL;
-}
+ x1 = ALIGN_DOWN(x, sizeof(unsigned long));
+ x2 = ALIGN(width + (x - x1), sizeof(unsigned long)) + x1;
+
+ clip->x1 = x1;
+ clip->y1 = y;
+ clip->x2 = x2;
+ clip->y2 = y + height;
-#if 0
-static int
-udl_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
- int x, int y, enum mode_set_atomic state)
-{
return 0;
}
-static int
-udl_pipe_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
+int udl_handle_damage(struct drm_framebuffer *fb, int x, int y,
+ int width, int height)
{
- return 0;
+ struct drm_device *dev = fb->dev;
+ struct dma_buf_attachment *import_attach = fb->obj[0]->import_attach;
+ int i, ret, tmp_ret;
+ char *cmd;
+ struct urb *urb;
+ struct drm_rect clip;
+ int log_bpp;
+ void *vaddr;
+
+ ret = udl_log_cpp(fb->format->cpp[0]);
+ if (ret < 0)
+ return ret;
+ log_bpp = ret;
+
+ ret = udl_aligned_damage_clip(&clip, x, y, width, height);
+ if (ret)
+ return ret;
+ else if ((clip.x2 > fb->width) || (clip.y2 > fb->height))
+ return -EINVAL;
+
+ if (import_attach) {
+ ret = dma_buf_begin_cpu_access(import_attach->dmabuf,
+ DMA_FROM_DEVICE);
+ if (ret)
+ return ret;
+ }
+
+ vaddr = drm_gem_shmem_vmap(fb->obj[0]);
+ if (IS_ERR(vaddr)) {
+ DRM_ERROR("failed to vmap fb\n");
+ goto out_dma_buf_end_cpu_access;
+ }
+
+ urb = udl_get_urb(dev);
+ if (!urb)
+ goto out_drm_gem_shmem_vunmap;
+ cmd = urb->transfer_buffer;
+
+ for (i = clip.y1; i < clip.y2; i++) {
+ const int line_offset = fb->pitches[0] * i;
+ const int byte_offset = line_offset + (clip.x1 << log_bpp);
+ const int dev_byte_offset = (fb->width * i + clip.x1) << log_bpp;
+ const int byte_width = (clip.x2 - clip.x1) << log_bpp;
+ ret = udl_render_hline(dev, log_bpp, &urb, (char *)vaddr,
+ &cmd, byte_offset, dev_byte_offset,
+ byte_width);
+ if (ret)
+ goto out_drm_gem_shmem_vunmap;
+ }
+
+ if (cmd > (char *)urb->transfer_buffer) {
+ /* Send partial buffer remaining before exiting */
+ int len;
+ if (cmd < (char *)urb->transfer_buffer + urb->transfer_buffer_length)
+ *cmd++ = 0xAF;
+ len = cmd - (char *)urb->transfer_buffer;
+ ret = udl_submit_urb(dev, urb, len);
+ } else {
+ udl_urb_completion(urb);
+ }
+
+ ret = 0;
+
+out_drm_gem_shmem_vunmap:
+ drm_gem_shmem_vunmap(fb->obj[0], vaddr);
+out_dma_buf_end_cpu_access:
+ if (import_attach) {
+ tmp_ret = dma_buf_end_cpu_access(import_attach->dmabuf,
+ DMA_FROM_DEVICE);
+ if (tmp_ret && !ret)
+ ret = tmp_ret; /* only update ret if not set yet */
+ }
+
+ return ret;
}
-#endif
-static int udl_crtc_mode_set(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode,
- int x, int y,
- struct drm_framebuffer *old_fb)
+/*
+ * Simple display pipeline
+ */
+
+static const uint32_t udl_simple_display_pipe_formats[] = {
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+};
+
+static enum drm_mode_status
+udl_simple_display_pipe_mode_valid(struct drm_simple_display_pipe *pipe,
+ const struct drm_display_mode *mode)
+{
+ return MODE_OK;
+}
+static void
+udl_simple_display_pipe_enable(struct drm_simple_display_pipe *pipe,
+ struct drm_crtc_state *crtc_state,
+ struct drm_plane_state *plane_state)
{
+ struct drm_crtc *crtc = &pipe->crtc;
struct drm_device *dev = crtc->dev;
- struct udl_framebuffer *ufb = to_udl_fb(crtc->primary->fb);
+ struct drm_framebuffer *fb = plane_state->fb;
struct udl_device *udl = dev->dev_private;
+ struct drm_display_mode *mode = &crtc_state->mode;
char *buf;
char *wrptr;
- int color_depth = 0;
+ int color_depth = UDL_COLOR_DEPTH_16BPP;
- udl->crtc = crtc;
+ crtc_state->no_vblank = true;
buf = (char *)udl->mode_buf;
- /* for now we just clip 24 -> 16 - if we fix that fix this */
- /*if (crtc->fb->bits_per_pixel != 16)
- color_depth = 1; */
-
/* This first section has to do with setting the base address on the
- * controller * associated with the display. There are 2 base
- * pointers, currently, we only * use the 16 bpp segment.
- */
+ * controller associated with the display. There are 2 base
+ * pointers, currently, we only use the 16 bpp segment.
+ */
wrptr = udl_vidreg_lock(buf);
wrptr = udl_set_color_depth(wrptr, color_depth);
/* set base for 16bpp segment to 0 */
@@ -326,108 +390,95 @@ static int udl_crtc_mode_set(struct drm_crtc *crtc,
/* set base for 8bpp segment to end of fb */
wrptr = udl_set_base8bpp(wrptr, 2 * mode->vdisplay * mode->hdisplay);
- wrptr = udl_set_vid_cmds(wrptr, adjusted_mode);
- wrptr = udl_set_blank(wrptr, DRM_MODE_DPMS_ON);
+ wrptr = udl_set_vid_cmds(wrptr, mode);
+ wrptr = udl_set_blank_mode(wrptr, UDL_BLANK_MODE_ON);
wrptr = udl_vidreg_unlock(wrptr);
wrptr = udl_dummy_render(wrptr);
- if (old_fb) {
- struct udl_framebuffer *uold_fb = to_udl_fb(old_fb);
- uold_fb->active_16 = false;
- }
- ufb->active_16 = true;
udl->mode_buf_len = wrptr - buf;
- /* damage all of it */
- udl_handle_damage(ufb, 0, 0, ufb->base.width, ufb->base.height);
- return 0;
-}
-
+ udl_handle_damage(fb, 0, 0, fb->width, fb->height);
-static void udl_crtc_disable(struct drm_crtc *crtc)
-{
- udl_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
-}
+ if (!crtc_state->mode_changed)
+ return;
-static void udl_crtc_destroy(struct drm_crtc *crtc)
-{
- drm_crtc_cleanup(crtc);
- kfree(crtc);
+ /* enable display */
+ udl_crtc_write_mode_to_hw(crtc);
}
-static int udl_crtc_page_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event,
- uint32_t page_flip_flags,
- struct drm_modeset_acquire_ctx *ctx)
+static void
+udl_simple_display_pipe_disable(struct drm_simple_display_pipe *pipe)
{
- struct udl_framebuffer *ufb = to_udl_fb(fb);
+ struct drm_crtc *crtc = &pipe->crtc;
struct drm_device *dev = crtc->dev;
+ struct urb *urb;
+ char *buf;
- struct drm_framebuffer *old_fb = crtc->primary->fb;
- if (old_fb) {
- struct udl_framebuffer *uold_fb = to_udl_fb(old_fb);
- uold_fb->active_16 = false;
- }
- ufb->active_16 = true;
-
- udl_handle_damage(ufb, 0, 0, fb->width, fb->height);
+ urb = udl_get_urb(dev);
+ if (!urb)
+ return;
- spin_lock_irq(&dev->event_lock);
- if (event)
- drm_crtc_send_vblank_event(crtc, event);
- spin_unlock_irq(&dev->event_lock);
- crtc->primary->fb = fb;
+ buf = (char *)urb->transfer_buffer;
+ buf = udl_vidreg_lock(buf);
+ buf = udl_set_blank_mode(buf, UDL_BLANK_MODE_POWERDOWN);
+ buf = udl_vidreg_unlock(buf);
+ buf = udl_dummy_render(buf);
- return 0;
+ udl_submit_urb(dev, urb, buf - (char *)urb->transfer_buffer);
}
-static void udl_crtc_prepare(struct drm_crtc *crtc)
+static int
+udl_simple_display_pipe_check(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state,
+ struct drm_crtc_state *crtc_state)
{
+ return 0;
}
-static void udl_crtc_commit(struct drm_crtc *crtc)
+static void
+udl_simple_display_pipe_update(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *old_plane_state)
{
- udl_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
-}
-
-static const struct drm_crtc_helper_funcs udl_helper_funcs = {
- .dpms = udl_crtc_dpms,
- .mode_set = udl_crtc_mode_set,
- .prepare = udl_crtc_prepare,
- .commit = udl_crtc_commit,
- .disable = udl_crtc_disable,
-};
-
-static const struct drm_crtc_funcs udl_crtc_funcs = {
- .set_config = drm_crtc_helper_set_config,
- .destroy = udl_crtc_destroy,
- .page_flip = udl_crtc_page_flip,
-};
+ struct drm_plane_state *state = pipe->plane.state;
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_rect rect;
-static int udl_crtc_init(struct drm_device *dev)
-{
- struct drm_crtc *crtc;
+ if (!fb)
+ return;
- crtc = kzalloc(sizeof(struct drm_crtc) + sizeof(struct drm_connector *), GFP_KERNEL);
- if (crtc == NULL)
- return -ENOMEM;
+ if (drm_atomic_helper_damage_merged(old_plane_state, state, &rect))
+ udl_handle_damage(fb, rect.x1, rect.y1, rect.x2 - rect.x1,
+ rect.y2 - rect.y1);
+}
- drm_crtc_init(dev, crtc, &udl_crtc_funcs);
- drm_crtc_helper_add(crtc, &udl_helper_funcs);
+static const
+struct drm_simple_display_pipe_funcs udl_simple_display_pipe_funcs = {
+ .mode_valid = udl_simple_display_pipe_mode_valid,
+ .enable = udl_simple_display_pipe_enable,
+ .disable = udl_simple_display_pipe_disable,
+ .check = udl_simple_display_pipe_check,
+ .update = udl_simple_display_pipe_update,
+ .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
+};
- return 0;
-}
+/*
+ * Modesetting
+ */
static const struct drm_mode_config_funcs udl_mode_funcs = {
- .fb_create = udl_fb_user_fb_create,
- .output_poll_changed = NULL,
+ .fb_create = drm_gem_fb_create_with_dirty,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
};
int udl_modeset_init(struct drm_device *dev)
{
- struct drm_encoder *encoder;
+ size_t format_count = ARRAY_SIZE(udl_simple_display_pipe_formats);
+ struct udl_device *udl = dev->dev_private;
+ struct drm_connector *connector;
+ int ret;
+
drm_mode_config_init(dev);
dev->mode_config.min_width = 640;
@@ -437,29 +488,32 @@ int udl_modeset_init(struct drm_device *dev)
dev->mode_config.max_height = 2048;
dev->mode_config.prefer_shadow = 0;
- dev->mode_config.preferred_depth = 24;
+ dev->mode_config.preferred_depth = 16;
dev->mode_config.funcs = &udl_mode_funcs;
- udl_crtc_init(dev);
+ connector = udl_connector_init(dev);
+ if (IS_ERR(connector)) {
+ ret = PTR_ERR(connector);
+ goto err_drm_mode_config_cleanup;
+ }
- encoder = udl_encoder_init(dev);
+ format_count = ARRAY_SIZE(udl_simple_display_pipe_formats);
- udl_connector_init(dev, encoder);
+ ret = drm_simple_display_pipe_init(dev, &udl->display_pipe,
+ &udl_simple_display_pipe_funcs,
+ udl_simple_display_pipe_formats,
+ format_count, NULL, connector);
+ if (ret)
+ goto err_drm_mode_config_cleanup;
- return 0;
-}
+ drm_mode_config_reset(dev);
-void udl_modeset_restore(struct drm_device *dev)
-{
- struct udl_device *udl = dev->dev_private;
- struct udl_framebuffer *ufb;
+ return 0;
- if (!udl->crtc || !udl->crtc->primary->fb)
- return;
- udl_crtc_commit(udl->crtc);
- ufb = to_udl_fb(udl->crtc->primary->fb);
- udl_handle_damage(ufb, 0, 0, ufb->base.width, ufb->base.height);
+err_drm_mode_config_cleanup:
+ drm_mode_config_cleanup(dev);
+ return ret;
}
void udl_modeset_cleanup(struct drm_device *dev)
diff --git a/drivers/gpu/drm/udl/udl_transfer.c b/drivers/gpu/drm/udl/udl_transfer.c
index 6837f592f6ba..971927669d6b 100644
--- a/drivers/gpu/drm/udl/udl_transfer.c
+++ b/drivers/gpu/drm/udl/udl_transfer.c
@@ -7,12 +7,8 @@
* Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
*/
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/fb.h>
#include <asm/unaligned.h>
-#include <drm/drmP.h>
#include "udl_drv.h"
#define MAX_CMD_PIXELS 255
@@ -216,8 +212,7 @@ static void udl_compress_hline16(
int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr,
const char *front, char **urb_buf_ptr,
u32 byte_offset, u32 device_byte_offset,
- u32 byte_width,
- int *ident_ptr, int *sent_ptr)
+ u32 byte_width)
{
const u8 *line_start, *line_end, *next_pixel;
u32 base16 = 0 + (device_byte_offset >> log_bpp) * 2;
@@ -239,12 +234,12 @@ int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr,
if (cmd >= cmd_end) {
int len = cmd - (u8 *) urb->transfer_buffer;
- if (udl_submit_urb(dev, urb, len))
- return 1; /* lost pixels is set */
- *sent_ptr += len;
+ int ret = udl_submit_urb(dev, urb, len);
+ if (ret)
+ return ret;
urb = udl_get_urb(dev);
if (!urb)
- return 1; /* lost_pixels is set */
+ return -EAGAIN;
*urb_ptr = urb;
cmd = urb->transfer_buffer;
cmd_end = &cmd[urb->transfer_buffer_length];
@@ -255,4 +250,3 @@ int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr,
return 0;
}
-
diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c
index a22b75a3a533..edd299ab53d8 100644
--- a/drivers/gpu/drm/v3d/v3d_bo.c
+++ b/drivers/gpu/drm/v3d/v3d_bo.c
@@ -58,7 +58,7 @@ static const struct drm_gem_object_funcs v3d_gem_funcs = {
.get_sg_table = drm_gem_shmem_get_sg_table,
.vmap = drm_gem_shmem_vmap,
.vunmap = drm_gem_shmem_vunmap,
- .vm_ops = &drm_gem_shmem_vm_ops,
+ .mmap = drm_gem_shmem_mmap,
};
/* gem_create_object function for allocating a BO struct and doing
diff --git a/drivers/gpu/drm/v3d/v3d_debugfs.c b/drivers/gpu/drm/v3d/v3d_debugfs.c
index 78a78938e81f..9e953ce64ef7 100644
--- a/drivers/gpu/drm/v3d/v3d_debugfs.c
+++ b/drivers/gpu/drm/v3d/v3d_debugfs.c
@@ -6,7 +6,8 @@
#include <linux/debugfs.h>
#include <linux/pm_runtime.h>
#include <linux/seq_file.h>
-#include <drm/drmP.h>
+
+#include <drm/drm_debugfs.h>
#include "v3d_drv.h"
#include "v3d_regs.h"
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index fea597f4db8a..eaa8e9682373 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -14,16 +14,19 @@
#include <linux/clk.h>
#include <linux/device.h>
+#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
+
+#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
+#include <uapi/drm/v3d_drm.h>
-#include "uapi/drm/v3d_drm.h"
#include "v3d_drv.h"
#include "v3d_regs.h"
@@ -123,6 +126,9 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data,
case DRM_V3D_PARAM_SUPPORTS_CSD:
args->value = v3d_has_csd(v3d);
return 0;
+ case DRM_V3D_PARAM_SUPPORTS_CACHE_FLUSH:
+ args->value = 1;
+ return 0;
default:
DRM_DEBUG("Unknown parameter %d\n", args->param);
return -EINVAL;
@@ -134,7 +140,7 @@ v3d_open(struct drm_device *dev, struct drm_file *file)
{
struct v3d_dev *v3d = to_v3d_dev(dev);
struct v3d_file_priv *v3d_priv;
- struct drm_sched_rq *rq;
+ struct drm_gpu_scheduler *sched;
int i;
v3d_priv = kzalloc(sizeof(*v3d_priv), GFP_KERNEL);
@@ -144,8 +150,10 @@ v3d_open(struct drm_device *dev, struct drm_file *file)
v3d_priv->v3d = v3d;
for (i = 0; i < V3D_MAX_QUEUES; i++) {
- rq = &v3d->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
- drm_sched_entity_init(&v3d_priv->sched_entity[i], &rq, 1, NULL);
+ sched = &v3d->queue[i].sched;
+ drm_sched_entity_init(&v3d_priv->sched_entity[i],
+ DRM_SCHED_PRIORITY_NORMAL, &sched,
+ 1, NULL);
}
file->driver_priv = v3d_priv;
@@ -166,7 +174,7 @@ v3d_postclose(struct drm_device *dev, struct drm_file *file)
kfree(v3d_priv);
}
-DEFINE_DRM_GEM_SHMEM_FOPS(v3d_drm_fops);
+DEFINE_DRM_GEM_FOPS(v3d_drm_fops);
/* DRM_AUTH is required on SUBMIT_CL for now, while we don't have GMP
* protection between clients. Note that render nodes would be be
@@ -188,7 +196,6 @@ static const struct drm_ioctl_desc v3d_drm_ioctls[] = {
static struct drm_driver v3d_drm_driver = {
.driver_features = (DRIVER_GEM |
DRIVER_RENDER |
- DRIVER_PRIME |
DRIVER_SYNCOBJ),
.open = v3d_open,
diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
index 9aad9da1eb11..9a35c555ec52 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.h
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -1,14 +1,23 @@
// SPDX-License-Identifier: GPL-2.0+
/* Copyright (C) 2015-2018 Broadcom */
-#include <linux/mm_types.h>
-#include <drm/drmP.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/spinlock_types.h>
+#include <linux/workqueue.h>
+
#include <drm/drm_encoder.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/gpu_scheduler.h>
+
#include "uapi/drm/v3d_drm.h"
+struct clk;
+struct device;
+struct platform_device;
+struct reset_control;
+
#define GMP_GRANULARITY (128 * 1024)
/* Enum for each of the V3D queues. */
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index 27e0f87075d9..549dde83408b 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -1,17 +1,19 @@
// SPDX-License-Identifier: GPL-2.0+
/* Copyright (C) 2014-2018 Broadcom */
-#include <drm/drmP.h>
-#include <drm/drm_syncobj.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
-#include <linux/device.h>
-#include <linux/io.h>
#include <linux/sched/signal.h>
+#include <linux/uaccess.h>
+
+#include <drm/drm_syncobj.h>
+#include <uapi/drm/v3d_drm.h>
-#include "uapi/drm/v3d_drm.h"
#include "v3d_drv.h"
#include "v3d_regs.h"
#include "v3d_trace.h"
@@ -407,7 +409,7 @@ v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
if (args->pad != 0)
return -EINVAL;
- ret = drm_gem_reservation_object_wait(file_priv, args->handle,
+ ret = drm_gem_dma_resv_wait(file_priv, args->handle,
true, timeout_jiffies);
/* Decrement the user's timeout, in case we got interrupted
@@ -493,7 +495,7 @@ v3d_attach_fences_and_unlock_reservation(struct drm_file *file_priv,
for (i = 0; i < job->bo_count; i++) {
/* XXX: Use shared fences for read-only objects. */
- reservation_object_add_excl_fence(job->bo[i]->resv,
+ dma_resv_add_excl_fence(job->bo[i]->resv,
job->done_fence);
}
@@ -528,13 +530,16 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
struct drm_v3d_submit_cl *args = data;
struct v3d_bin_job *bin = NULL;
struct v3d_render_job *render;
+ struct v3d_job *clean_job = NULL;
+ struct v3d_job *last_job;
struct ww_acquire_ctx acquire_ctx;
int ret = 0;
trace_v3d_submit_cl_ioctl(&v3d->drm, args->rcl_start, args->rcl_end);
- if (args->pad != 0) {
- DRM_INFO("pad must be zero: %d\n", args->pad);
+ if (args->flags != 0 &&
+ args->flags != DRM_V3D_SUBMIT_CL_FLUSH_CACHE) {
+ DRM_INFO("invalid flags: %d\n", args->flags);
return -EINVAL;
}
@@ -555,13 +560,17 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
if (args->bcl_start != args->bcl_end) {
bin = kcalloc(1, sizeof(*bin), GFP_KERNEL);
- if (!bin)
+ if (!bin) {
+ v3d_job_put(&render->base);
return -ENOMEM;
+ }
ret = v3d_job_init(v3d, file_priv, &bin->base,
v3d_job_free, args->in_sync_bcl);
if (ret) {
+ kfree(bin);
v3d_job_put(&render->base);
+ kfree(bin);
return ret;
}
@@ -573,12 +582,31 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
bin->render = render;
}
- ret = v3d_lookup_bos(dev, file_priv, &render->base,
+ if (args->flags & DRM_V3D_SUBMIT_CL_FLUSH_CACHE) {
+ clean_job = kcalloc(1, sizeof(*clean_job), GFP_KERNEL);
+ if (!clean_job) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ ret = v3d_job_init(v3d, file_priv, clean_job, v3d_job_free, 0);
+ if (ret) {
+ kfree(clean_job);
+ clean_job = NULL;
+ goto fail;
+ }
+
+ last_job = clean_job;
+ } else {
+ last_job = &render->base;
+ }
+
+ ret = v3d_lookup_bos(dev, file_priv, last_job,
args->bo_handles, args->bo_handle_count);
if (ret)
goto fail;
- ret = v3d_lock_bo_reservations(&render->base, &acquire_ctx);
+ ret = v3d_lock_bo_reservations(last_job, &acquire_ctx);
if (ret)
goto fail;
@@ -597,28 +625,44 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
ret = v3d_push_job(v3d_priv, &render->base, V3D_RENDER);
if (ret)
goto fail_unreserve;
+
+ if (clean_job) {
+ struct dma_fence *render_fence =
+ dma_fence_get(render->base.done_fence);
+ ret = drm_gem_fence_array_add(&clean_job->deps, render_fence);
+ if (ret)
+ goto fail_unreserve;
+ ret = v3d_push_job(v3d_priv, clean_job, V3D_CACHE_CLEAN);
+ if (ret)
+ goto fail_unreserve;
+ }
+
mutex_unlock(&v3d->sched_lock);
v3d_attach_fences_and_unlock_reservation(file_priv,
- &render->base,
+ last_job,
&acquire_ctx,
args->out_sync,
- render->base.done_fence);
+ last_job->done_fence);
if (bin)
v3d_job_put(&bin->base);
v3d_job_put(&render->base);
+ if (clean_job)
+ v3d_job_put(clean_job);
return 0;
fail_unreserve:
mutex_unlock(&v3d->sched_lock);
- drm_gem_unlock_reservations(render->base.bo,
- render->base.bo_count, &acquire_ctx);
+ drm_gem_unlock_reservations(last_job->bo,
+ last_job->bo_count, &acquire_ctx);
fail:
if (bin)
v3d_job_put(&bin->base);
v3d_job_put(&render->base);
+ if (clean_job)
+ v3d_job_put(clean_job);
return ret;
}
diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c
index 268d8a889ac5..662e67279a7b 100644
--- a/drivers/gpu/drm/v3d/v3d_irq.c
+++ b/drivers/gpu/drm/v3d/v3d_irq.c
@@ -13,6 +13,8 @@
* current job can make progress.
*/
+#include <linux/platform_device.h>
+
#include "v3d_drv.h"
#include "v3d_regs.h"
#include "v3d_trace.h"
diff --git a/drivers/gpu/drm/vboxvideo/Kconfig b/drivers/gpu/drm/vboxvideo/Kconfig
index 56ba510f21a2..45fe135d6e43 100644
--- a/drivers/gpu/drm/vboxvideo/Kconfig
+++ b/drivers/gpu/drm/vboxvideo/Kconfig
@@ -4,6 +4,8 @@ config DRM_VBOXVIDEO
depends on DRM && X86 && PCI
select DRM_KMS_HELPER
select DRM_VRAM_HELPER
+ select DRM_TTM
+ select DRM_TTM_HELPER
select GENERIC_ALLOCATOR
help
This is a KMS driver for the virtual Graphics Card used in
diff --git a/drivers/gpu/drm/vboxvideo/Makefile b/drivers/gpu/drm/vboxvideo/Makefile
index 1224f313af0c..f2e968b5ffa6 100644
--- a/drivers/gpu/drm/vboxvideo/Makefile
+++ b/drivers/gpu/drm/vboxvideo/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
vboxvideo-y := hgsmi_base.o modesetting.o vbva_base.o \
- vbox_drv.o vbox_fb.o vbox_hgsmi.o vbox_irq.o vbox_main.o \
- vbox_mode.o vbox_prime.o vbox_ttm.o
+ vbox_drv.o vbox_hgsmi.o vbox_irq.o vbox_main.o \
+ vbox_mode.o vbox_ttm.o
obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo.o
diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c
index 02537ab9cc08..8512d970a09f 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_drv.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c
@@ -14,6 +14,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_file.h>
#include <drm/drm_ioctl.h>
@@ -32,10 +33,6 @@ static const struct pci_device_id pciidlist[] = {
};
MODULE_DEVICE_TABLE(pci, pciidlist);
-static struct drm_fb_helper_funcs vbox_fb_helper_funcs = {
- .fb_probe = vboxfb_create,
-};
-
static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct vbox_private *vbox;
@@ -79,20 +76,16 @@ static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto err_mode_fini;
- ret = drm_fb_helper_fbdev_setup(&vbox->ddev, &vbox->fb_helper,
- &vbox_fb_helper_funcs, 32,
- vbox->num_crtcs);
+ ret = drm_fbdev_generic_setup(&vbox->ddev, 32);
if (ret)
goto err_irq_fini;
ret = drm_dev_register(&vbox->ddev, 0);
if (ret)
- goto err_fbdev_fini;
+ goto err_irq_fini;
return 0;
-err_fbdev_fini:
- vbox_fbdev_fini(vbox);
err_irq_fini:
vbox_irq_fini(vbox);
err_mode_fini:
@@ -113,7 +106,6 @@ static void vbox_pci_remove(struct pci_dev *pdev)
struct vbox_private *vbox = pci_get_drvdata(pdev);
drm_dev_unregister(&vbox->ddev);
- vbox_fbdev_fini(vbox);
vbox_irq_fini(vbox);
vbox_mode_fini(vbox);
vbox_mm_fini(vbox);
@@ -189,14 +181,11 @@ static struct pci_driver vbox_pci_driver = {
#endif
};
-static const struct file_operations vbox_fops = {
- .owner = THIS_MODULE,
- DRM_VRAM_MM_FILE_OPERATIONS
-};
+DEFINE_DRM_GEM_FOPS(vbox_fops);
static struct drm_driver driver = {
.driver_features =
- DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_ATOMIC,
+ DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.lastclose = drm_fb_helper_lastclose,
@@ -210,17 +199,6 @@ static struct drm_driver driver = {
.patchlevel = DRIVER_PATCHLEVEL,
DRM_GEM_VRAM_DRIVER,
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = drm_gem_prime_export,
- .gem_prime_import = drm_gem_prime_import,
- .gem_prime_pin = vbox_gem_prime_pin,
- .gem_prime_unpin = vbox_gem_prime_unpin,
- .gem_prime_get_sg_table = vbox_gem_prime_get_sg_table,
- .gem_prime_import_sg_table = vbox_gem_prime_import_sg_table,
- .gem_prime_vmap = vbox_gem_prime_vmap,
- .gem_prime_vunmap = vbox_gem_prime_vunmap,
- .gem_prime_mmap = vbox_gem_prime_mmap,
};
static int __init vbox_init(void)
diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.h b/drivers/gpu/drm/vboxvideo/vbox_drv.h
index 9028f946bc06..87421903816c 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_drv.h
+++ b/drivers/gpu/drm/vboxvideo/vbox_drv.h
@@ -16,12 +16,9 @@
#include <linux/string.h>
#include <drm/drm_encoder.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_vram_helper.h>
-#include <drm/drm_vram_mm_helper.h>
-
#include "vboxvideo_guest.h"
#include "vboxvideo_vbe.h"
#include "hgsmi_ch_setup.h"
@@ -48,16 +45,9 @@
sizeof(struct hgsmi_host_flags))
#define HOST_FLAGS_OFFSET GUEST_HEAP_USABLE_SIZE
-struct vbox_framebuffer {
- struct drm_framebuffer base;
- struct drm_gem_object *obj;
-};
-
struct vbox_private {
/* Must be first; or we must define our own release callback */
struct drm_device ddev;
- struct drm_fb_helper fb_helper;
- struct vbox_framebuffer afb;
u8 __iomem *guest_heap;
u8 __iomem *vbva_buffers;
@@ -137,7 +127,6 @@ struct vbox_encoder {
#define to_vbox_crtc(x) container_of(x, struct vbox_crtc, base)
#define to_vbox_connector(x) container_of(x, struct vbox_connector, base)
#define to_vbox_encoder(x) container_of(x, struct vbox_encoder, base)
-#define to_vbox_framebuffer(x) container_of(x, struct vbox_framebuffer, base)
bool vbox_check_supported(u16 id);
int vbox_hw_init(struct vbox_private *vbox);
@@ -148,37 +137,9 @@ void vbox_mode_fini(struct vbox_private *vbox);
void vbox_report_caps(struct vbox_private *vbox);
-void vbox_framebuffer_dirty_rectangles(struct drm_framebuffer *fb,
- struct drm_clip_rect *rects,
- unsigned int num_rects);
-
-int vbox_framebuffer_init(struct vbox_private *vbox,
- struct vbox_framebuffer *vbox_fb,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object *obj);
-
-int vboxfb_create(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes);
-void vbox_fbdev_fini(struct vbox_private *vbox);
-
int vbox_mm_init(struct vbox_private *vbox);
void vbox_mm_fini(struct vbox_private *vbox);
-int vbox_gem_create(struct vbox_private *vbox,
- u32 size, bool iskernel, struct drm_gem_object **obj);
-
-/* vbox_prime.c */
-int vbox_gem_prime_pin(struct drm_gem_object *obj);
-void vbox_gem_prime_unpin(struct drm_gem_object *obj);
-struct sg_table *vbox_gem_prime_get_sg_table(struct drm_gem_object *obj);
-struct drm_gem_object *vbox_gem_prime_import_sg_table(
- struct drm_device *dev, struct dma_buf_attachment *attach,
- struct sg_table *table);
-void *vbox_gem_prime_vmap(struct drm_gem_object *obj);
-void vbox_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
-int vbox_gem_prime_mmap(struct drm_gem_object *obj,
- struct vm_area_struct *area);
-
/* vbox_irq.c */
int vbox_irq_init(struct vbox_private *vbox);
void vbox_irq_fini(struct vbox_private *vbox);
diff --git a/drivers/gpu/drm/vboxvideo/vbox_fb.c b/drivers/gpu/drm/vboxvideo/vbox_fb.c
deleted file mode 100644
index 8f74bcffc034..000000000000
--- a/drivers/gpu/drm/vboxvideo/vbox_fb.c
+++ /dev/null
@@ -1,149 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright (C) 2013-2017 Oracle Corporation
- * This file is based on ast_fb.c
- * Copyright 2012 Red Hat Inc.
- * Authors: Dave Airlie <airlied@redhat.com>
- * Michael Thayer <michael.thayer@oracle.com,
- */
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/fb.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/string.h>
-#include <linux/sysrq.h>
-#include <linux/tty.h>
-
-#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_fourcc.h>
-
-#include "vbox_drv.h"
-#include "vboxvideo.h"
-
-#ifdef CONFIG_DRM_KMS_FB_HELPER
-static struct fb_deferred_io vbox_defio = {
- .delay = HZ / 30,
- .deferred_io = drm_fb_helper_deferred_io,
-};
-#endif
-
-static struct fb_ops vboxfb_ops = {
- .owner = THIS_MODULE,
- DRM_FB_HELPER_DEFAULT_OPS,
- .fb_fillrect = drm_fb_helper_sys_fillrect,
- .fb_copyarea = drm_fb_helper_sys_copyarea,
- .fb_imageblit = drm_fb_helper_sys_imageblit,
-};
-
-int vboxfb_create(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- struct vbox_private *vbox =
- container_of(helper, struct vbox_private, fb_helper);
- struct pci_dev *pdev = vbox->ddev.pdev;
- struct drm_mode_fb_cmd2 mode_cmd;
- struct drm_framebuffer *fb;
- struct fb_info *info;
- struct drm_gem_object *gobj;
- struct drm_gem_vram_object *gbo;
- int size, ret;
- s64 gpu_addr;
- u32 pitch;
-
- mode_cmd.width = sizes->surface_width;
- mode_cmd.height = sizes->surface_height;
- pitch = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
- mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
- sizes->surface_depth);
- mode_cmd.pitches[0] = pitch;
-
- size = pitch * mode_cmd.height;
-
- ret = vbox_gem_create(vbox, size, true, &gobj);
- if (ret) {
- DRM_ERROR("failed to create fbcon backing object %d\n", ret);
- return ret;
- }
-
- ret = vbox_framebuffer_init(vbox, &vbox->afb, &mode_cmd, gobj);
- if (ret)
- return ret;
-
- gbo = drm_gem_vram_of_gem(gobj);
-
- ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
- if (ret)
- return ret;
-
- info = drm_fb_helper_alloc_fbi(helper);
- if (IS_ERR(info))
- return PTR_ERR(info);
-
- info->screen_size = size;
- info->screen_base = (char __iomem *)drm_gem_vram_kmap(gbo, true, NULL);
- if (IS_ERR(info->screen_base))
- return PTR_ERR(info->screen_base);
-
- fb = &vbox->afb.base;
- helper->fb = fb;
-
- info->fbops = &vboxfb_ops;
-
- /*
- * This seems to be done for safety checking that the framebuffer
- * is not registered twice by different drivers.
- */
- info->apertures->ranges[0].base = pci_resource_start(pdev, 0);
- info->apertures->ranges[0].size = pci_resource_len(pdev, 0);
-
- drm_fb_helper_fill_info(info, helper, sizes);
-
- gpu_addr = drm_gem_vram_offset(gbo);
- if (gpu_addr < 0)
- return (int)gpu_addr;
- info->fix.smem_start = info->apertures->ranges[0].base + gpu_addr;
- info->fix.smem_len = vbox->available_vram_size - gpu_addr;
-
-#ifdef CONFIG_DRM_KMS_FB_HELPER
- info->fbdefio = &vbox_defio;
- fb_deferred_io_init(info);
-#endif
-
- info->pixmap.flags = FB_PIXMAP_SYSTEM;
-
- DRM_DEBUG_KMS("allocated %dx%d\n", fb->width, fb->height);
-
- return 0;
-}
-
-void vbox_fbdev_fini(struct vbox_private *vbox)
-{
- struct vbox_framebuffer *afb = &vbox->afb;
-
-#ifdef CONFIG_DRM_KMS_FB_HELPER
- if (vbox->fb_helper.fbdev && vbox->fb_helper.fbdev->fbdefio)
- fb_deferred_io_cleanup(vbox->fb_helper.fbdev);
-#endif
-
- drm_fb_helper_unregister_fbi(&vbox->fb_helper);
-
- if (afb->obj) {
- struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(afb->obj);
-
- drm_gem_vram_kunmap(gbo);
- drm_gem_vram_unpin(gbo);
-
- drm_gem_object_put_unlocked(afb->obj);
- afb->obj = NULL;
- }
- drm_fb_helper_fini(&vbox->fb_helper);
-
- drm_framebuffer_unregister_private(&afb->base);
- drm_framebuffer_cleanup(&afb->base);
-}
diff --git a/drivers/gpu/drm/vboxvideo/vbox_main.c b/drivers/gpu/drm/vboxvideo/vbox_main.c
index 18693e2bf72a..9dcab115a261 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_main.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_main.c
@@ -11,22 +11,12 @@
#include <linux/vbox_err.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_damage_helper.h>
#include "vbox_drv.h"
#include "vboxvideo_guest.h"
#include "vboxvideo_vbe.h"
-static void vbox_user_framebuffer_destroy(struct drm_framebuffer *fb)
-{
- struct vbox_framebuffer *vbox_fb = to_vbox_framebuffer(fb);
-
- if (vbox_fb->obj)
- drm_gem_object_put_unlocked(vbox_fb->obj);
-
- drm_framebuffer_cleanup(fb);
- kfree(fb);
-}
-
void vbox_report_caps(struct vbox_private *vbox)
{
u32 caps = VBVACAPS_DISABLE_CURSOR_INTEGRATION |
@@ -38,87 +28,6 @@ void vbox_report_caps(struct vbox_private *vbox)
hgsmi_send_caps_info(vbox->guest_pool, caps);
}
-/* Send information about dirty rectangles to VBVA. */
-void vbox_framebuffer_dirty_rectangles(struct drm_framebuffer *fb,
- struct drm_clip_rect *rects,
- unsigned int num_rects)
-{
- struct vbox_private *vbox = fb->dev->dev_private;
- struct drm_display_mode *mode;
- struct drm_crtc *crtc;
- int crtc_x, crtc_y;
- unsigned int i;
-
- mutex_lock(&vbox->hw_mutex);
- list_for_each_entry(crtc, &fb->dev->mode_config.crtc_list, head) {
- if (crtc->primary->state->fb != fb)
- continue;
-
- mode = &crtc->state->mode;
- crtc_x = crtc->primary->state->src_x >> 16;
- crtc_y = crtc->primary->state->src_y >> 16;
-
- for (i = 0; i < num_rects; ++i) {
- struct vbva_cmd_hdr cmd_hdr;
- unsigned int crtc_id = to_vbox_crtc(crtc)->crtc_id;
-
- if (rects[i].x1 > crtc_x + mode->hdisplay ||
- rects[i].y1 > crtc_y + mode->vdisplay ||
- rects[i].x2 < crtc_x ||
- rects[i].y2 < crtc_y)
- continue;
-
- cmd_hdr.x = (s16)rects[i].x1;
- cmd_hdr.y = (s16)rects[i].y1;
- cmd_hdr.w = (u16)rects[i].x2 - rects[i].x1;
- cmd_hdr.h = (u16)rects[i].y2 - rects[i].y1;
-
- if (!vbva_buffer_begin_update(&vbox->vbva_info[crtc_id],
- vbox->guest_pool))
- continue;
-
- vbva_write(&vbox->vbva_info[crtc_id], vbox->guest_pool,
- &cmd_hdr, sizeof(cmd_hdr));
- vbva_buffer_end_update(&vbox->vbva_info[crtc_id]);
- }
- }
- mutex_unlock(&vbox->hw_mutex);
-}
-
-static int vbox_user_framebuffer_dirty(struct drm_framebuffer *fb,
- struct drm_file *file_priv,
- unsigned int flags, unsigned int color,
- struct drm_clip_rect *rects,
- unsigned int num_rects)
-{
- vbox_framebuffer_dirty_rectangles(fb, rects, num_rects);
-
- return 0;
-}
-
-static const struct drm_framebuffer_funcs vbox_fb_funcs = {
- .destroy = vbox_user_framebuffer_destroy,
- .dirty = vbox_user_framebuffer_dirty,
-};
-
-int vbox_framebuffer_init(struct vbox_private *vbox,
- struct vbox_framebuffer *vbox_fb,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object *obj)
-{
- int ret;
-
- drm_helper_mode_fill_fb_struct(&vbox->ddev, &vbox_fb->base, mode_cmd);
- vbox_fb->obj = obj;
- ret = drm_framebuffer_init(&vbox->ddev, &vbox_fb->base, &vbox_fb_funcs);
- if (ret) {
- DRM_ERROR("framebuffer init failed %d\n", ret);
- return ret;
- }
-
- return 0;
-}
-
static int vbox_accel_init(struct vbox_private *vbox)
{
struct vbva_buffer *vbva;
@@ -270,29 +179,3 @@ void vbox_hw_fini(struct vbox_private *vbox)
gen_pool_destroy(vbox->guest_pool);
pci_iounmap(vbox->ddev.pdev, vbox->guest_heap);
}
-
-int vbox_gem_create(struct vbox_private *vbox,
- u32 size, bool iskernel, struct drm_gem_object **obj)
-{
- struct drm_gem_vram_object *gbo;
- int ret;
-
- *obj = NULL;
-
- size = roundup(size, PAGE_SIZE);
- if (size == 0)
- return -EINVAL;
-
- gbo = drm_gem_vram_create(&vbox->ddev, &vbox->ddev.vram_mm->bdev,
- size, 0, false);
- if (IS_ERR(gbo)) {
- ret = PTR_ERR(gbo);
- if (ret != -ERESTARTSYS)
- DRM_ERROR("failed to allocate GEM object\n");
- return ret;
- }
-
- *obj = &gbo->gem;
-
- return 0;
-}
diff --git a/drivers/gpu/drm/vboxvideo/vbox_mode.c b/drivers/gpu/drm/vboxvideo/vbox_mode.c
index e1e48ba919eb..19612132c8a3 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_mode.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_mode.c
@@ -13,7 +13,9 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -133,7 +135,7 @@ static bool vbox_set_up_input_mapping(struct vbox_private *vbox)
if (!fb1) {
fb1 = fb;
- if (to_vbox_framebuffer(fb1) == &vbox->afb)
+ if (fb1 == vbox->ddev.fb_helper->fb)
break;
} else if (fb != fb1) {
single_framebuffer = false;
@@ -172,8 +174,7 @@ static void vbox_crtc_set_base_and_mode(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int x, int y)
{
- struct drm_gem_vram_object *gbo =
- drm_gem_vram_of_gem(to_vbox_framebuffer(fb)->obj);
+ struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(fb->obj[0]);
struct vbox_private *vbox = crtc->dev->dev_private;
struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
bool needs_modeset = drm_atomic_crtc_needs_modeset(crtc->state);
@@ -283,10 +284,43 @@ static void vbox_primary_atomic_update(struct drm_plane *plane,
{
struct drm_crtc *crtc = plane->state->crtc;
struct drm_framebuffer *fb = plane->state->fb;
+ struct vbox_private *vbox = fb->dev->dev_private;
+ struct drm_mode_rect *clips;
+ uint32_t num_clips, i;
vbox_crtc_set_base_and_mode(crtc, fb,
plane->state->src_x >> 16,
plane->state->src_y >> 16);
+
+ /* Send information about dirty rectangles to VBVA. */
+
+ clips = drm_plane_get_damage_clips(plane->state);
+ num_clips = drm_plane_get_damage_clips_count(plane->state);
+
+ if (!num_clips)
+ return;
+
+ mutex_lock(&vbox->hw_mutex);
+
+ for (i = 0; i < num_clips; ++i, ++clips) {
+ struct vbva_cmd_hdr cmd_hdr;
+ unsigned int crtc_id = to_vbox_crtc(crtc)->crtc_id;
+
+ cmd_hdr.x = (s16)clips->x1;
+ cmd_hdr.y = (s16)clips->y1;
+ cmd_hdr.w = (u16)clips->x2 - clips->x1;
+ cmd_hdr.h = (u16)clips->y2 - clips->y1;
+
+ if (!vbva_buffer_begin_update(&vbox->vbva_info[crtc_id],
+ vbox->guest_pool))
+ continue;
+
+ vbva_write(&vbox->vbva_info[crtc_id], vbox->guest_pool,
+ &cmd_hdr, sizeof(cmd_hdr));
+ vbva_buffer_end_update(&vbox->vbva_info[crtc_id]);
+ }
+
+ mutex_unlock(&vbox->hw_mutex);
}
static void vbox_primary_atomic_disable(struct drm_plane *plane,
@@ -300,35 +334,6 @@ static void vbox_primary_atomic_disable(struct drm_plane *plane,
old_state->src_y >> 16);
}
-static int vbox_primary_prepare_fb(struct drm_plane *plane,
- struct drm_plane_state *new_state)
-{
- struct drm_gem_vram_object *gbo;
- int ret;
-
- if (!new_state->fb)
- return 0;
-
- gbo = drm_gem_vram_of_gem(to_vbox_framebuffer(new_state->fb)->obj);
- ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
- if (ret)
- DRM_WARN("Error %d pinning new fb, out of video mem?\n", ret);
-
- return ret;
-}
-
-static void vbox_primary_cleanup_fb(struct drm_plane *plane,
- struct drm_plane_state *old_state)
-{
- struct drm_gem_vram_object *gbo;
-
- if (!old_state->fb)
- return;
-
- gbo = drm_gem_vram_of_gem(to_vbox_framebuffer(old_state->fb)->obj);
- drm_gem_vram_unpin(gbo);
-}
-
static int vbox_cursor_atomic_check(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
@@ -386,8 +391,7 @@ static void vbox_cursor_atomic_update(struct drm_plane *plane,
container_of(plane->dev, struct vbox_private, ddev);
struct vbox_crtc *vbox_crtc = to_vbox_crtc(plane->state->crtc);
struct drm_framebuffer *fb = plane->state->fb;
- struct drm_gem_vram_object *gbo =
- drm_gem_vram_of_gem(to_vbox_framebuffer(fb)->obj);
+ struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(fb->obj[0]);
u32 width = plane->state->crtc_w;
u32 height = plane->state->crtc_h;
size_t data_size, mask_size;
@@ -459,30 +463,6 @@ static void vbox_cursor_atomic_disable(struct drm_plane *plane,
mutex_unlock(&vbox->hw_mutex);
}
-static int vbox_cursor_prepare_fb(struct drm_plane *plane,
- struct drm_plane_state *new_state)
-{
- struct drm_gem_vram_object *gbo;
-
- if (!new_state->fb)
- return 0;
-
- gbo = drm_gem_vram_of_gem(to_vbox_framebuffer(new_state->fb)->obj);
- return drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_SYSTEM);
-}
-
-static void vbox_cursor_cleanup_fb(struct drm_plane *plane,
- struct drm_plane_state *old_state)
-{
- struct drm_gem_vram_object *gbo;
-
- if (!plane->state->fb)
- return;
-
- gbo = drm_gem_vram_of_gem(to_vbox_framebuffer(plane->state->fb)->obj);
- drm_gem_vram_unpin(gbo);
-}
-
static const u32 vbox_cursor_plane_formats[] = {
DRM_FORMAT_ARGB8888,
};
@@ -491,8 +471,8 @@ static const struct drm_plane_helper_funcs vbox_cursor_helper_funcs = {
.atomic_check = vbox_cursor_atomic_check,
.atomic_update = vbox_cursor_atomic_update,
.atomic_disable = vbox_cursor_atomic_disable,
- .prepare_fb = vbox_cursor_prepare_fb,
- .cleanup_fb = vbox_cursor_cleanup_fb,
+ .prepare_fb = drm_gem_vram_plane_helper_prepare_fb,
+ .cleanup_fb = drm_gem_vram_plane_helper_cleanup_fb,
};
static const struct drm_plane_funcs vbox_cursor_plane_funcs = {
@@ -513,8 +493,8 @@ static const struct drm_plane_helper_funcs vbox_primary_helper_funcs = {
.atomic_check = vbox_primary_atomic_check,
.atomic_update = vbox_primary_atomic_update,
.atomic_disable = vbox_primary_atomic_disable,
- .prepare_fb = vbox_primary_prepare_fb,
- .cleanup_fb = vbox_primary_cleanup_fb,
+ .prepare_fb = drm_gem_vram_plane_helper_prepare_fb,
+ .cleanup_fb = drm_gem_vram_plane_helper_cleanup_fb,
};
static const struct drm_plane_funcs vbox_primary_plane_funcs = {
@@ -856,40 +836,8 @@ static int vbox_connector_init(struct drm_device *dev,
return 0;
}
-static struct drm_framebuffer *vbox_user_framebuffer_create(
- struct drm_device *dev,
- struct drm_file *filp,
- const struct drm_mode_fb_cmd2 *mode_cmd)
-{
- struct vbox_private *vbox =
- container_of(dev, struct vbox_private, ddev);
- struct drm_gem_object *obj;
- struct vbox_framebuffer *vbox_fb;
- int ret = -ENOMEM;
-
- obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
- if (!obj)
- return ERR_PTR(-ENOENT);
-
- vbox_fb = kzalloc(sizeof(*vbox_fb), GFP_KERNEL);
- if (!vbox_fb)
- goto err_unref_obj;
-
- ret = vbox_framebuffer_init(vbox, vbox_fb, mode_cmd, obj);
- if (ret)
- goto err_free_vbox_fb;
-
- return &vbox_fb->base;
-
-err_free_vbox_fb:
- kfree(vbox_fb);
-err_unref_obj:
- drm_gem_object_put_unlocked(obj);
- return ERR_PTR(ret);
-}
-
static const struct drm_mode_config_funcs vbox_mode_funcs = {
- .fb_create = vbox_user_framebuffer_create,
+ .fb_create = drm_gem_fb_create_with_dirty,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
diff --git a/drivers/gpu/drm/vboxvideo/vbox_prime.c b/drivers/gpu/drm/vboxvideo/vbox_prime.c
deleted file mode 100644
index 702b1aa53494..000000000000
--- a/drivers/gpu/drm/vboxvideo/vbox_prime.c
+++ /dev/null
@@ -1,56 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright (C) 2017 Oracle Corporation
- * Copyright 2017 Canonical
- * Authors: Andreas Pokorny
- */
-
-#include "vbox_drv.h"
-
-/*
- * Based on qxl_prime.c:
- * Empty Implementations as there should not be any other driver for a virtual
- * device that might share buffers with vboxvideo
- */
-
-int vbox_gem_prime_pin(struct drm_gem_object *obj)
-{
- WARN_ONCE(1, "not implemented");
- return -ENODEV;
-}
-
-void vbox_gem_prime_unpin(struct drm_gem_object *obj)
-{
- WARN_ONCE(1, "not implemented");
-}
-
-struct sg_table *vbox_gem_prime_get_sg_table(struct drm_gem_object *obj)
-{
- WARN_ONCE(1, "not implemented");
- return ERR_PTR(-ENODEV);
-}
-
-struct drm_gem_object *vbox_gem_prime_import_sg_table(
- struct drm_device *dev, struct dma_buf_attachment *attach,
- struct sg_table *table)
-{
- WARN_ONCE(1, "not implemented");
- return ERR_PTR(-ENODEV);
-}
-
-void *vbox_gem_prime_vmap(struct drm_gem_object *obj)
-{
- WARN_ONCE(1, "not implemented");
- return ERR_PTR(-ENODEV);
-}
-
-void vbox_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
-{
- WARN_ONCE(1, "not implemented");
-}
-
-int vbox_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *area)
-{
- WARN_ONCE(1, "not implemented");
- return -ENODEV;
-}
diff --git a/drivers/gpu/drm/vboxvideo/vbox_ttm.c b/drivers/gpu/drm/vboxvideo/vbox_ttm.c
index b82595a9ed0f..976423d0c3cc 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_ttm.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_ttm.c
@@ -17,8 +17,7 @@ int vbox_mm_init(struct vbox_private *vbox)
struct drm_device *dev = &vbox->ddev;
vmm = drm_vram_helper_alloc_mm(dev, pci_resource_start(dev->pdev, 0),
- vbox->available_vram_size,
- &drm_gem_vram_mm_funcs);
+ vbox->available_vram_size);
if (IS_ERR(vmm)) {
ret = PTR_ERR(vmm);
DRM_ERROR("Error initializing VRAM MM; %d\n", ret);
diff --git a/drivers/gpu/drm/vc4/Kconfig b/drivers/gpu/drm/vc4/Kconfig
index 7c2317efd5b7..118e8a426b1a 100644
--- a/drivers/gpu/drm/vc4/Kconfig
+++ b/drivers/gpu/drm/vc4/Kconfig
@@ -22,9 +22,9 @@ config DRM_VC4
our display setup.
config DRM_VC4_HDMI_CEC
- bool "Broadcom VC4 HDMI CEC Support"
- depends on DRM_VC4
- select CEC_CORE
- help
+ bool "Broadcom VC4 HDMI CEC Support"
+ depends on DRM_VC4
+ select CEC_CORE
+ help
Choose this option if you have a Broadcom VC4 GPU
and want to use CEC.
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index a75a2f98b82f..72d30d90b856 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -655,8 +655,7 @@ static void vc4_bo_cache_time_timer(struct timer_list *t)
schedule_work(&vc4->bo_cache.time_work);
}
-struct dma_buf *
-vc4_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags)
+struct dma_buf * vc4_prime_export(struct drm_gem_object *obj, int flags)
{
struct vc4_bo *bo = to_vc4_bo(obj);
struct dma_buf *dmabuf;
@@ -678,7 +677,7 @@ vc4_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags)
return ERR_PTR(ret);
}
- dmabuf = drm_gem_prime_export(dev, obj, flags);
+ dmabuf = drm_gem_prime_export(obj, flags);
if (IS_ERR(dmabuf))
vc4_bo_dec_usecnt(bo);
@@ -791,8 +790,6 @@ vc4_prime_import_sg_table(struct drm_device *dev,
if (IS_ERR(obj))
return obj;
- obj->resv = attach->dmabuf->resv;
-
return obj;
}
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 5ea8db74418a..b00e20f5ce05 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -29,15 +29,18 @@
* ones that set the clock.
*/
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/of_device.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include <linux/clk.h>
-#include <drm/drm_fb_cma_helper.h>
-#include <linux/component.h>
-#include <linux/of_device.h>
+#include <drm/drm_vblank.h>
+
#include "vc4_drv.h"
#include "vc4_regs.h"
@@ -991,7 +994,7 @@ static void vc4_crtc_destroy_state(struct drm_crtc *crtc,
struct vc4_dev *vc4 = to_vc4_dev(crtc->dev);
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(state);
- if (vc4_state->mm.allocated) {
+ if (drm_mm_node_allocated(&vc4_state->mm)) {
unsigned long flags;
spin_lock_irqsave(&vc4->hvs->mm_lock, flags);
diff --git a/drivers/gpu/drm/vc4/vc4_debugfs.c b/drivers/gpu/drm/vc4/vc4_debugfs.c
index 4829a00c16b0..b61b2d3407b5 100644
--- a/drivers/gpu/drm/vc4/vc4_debugfs.c
+++ b/drivers/gpu/drm/vc4/vc4_debugfs.c
@@ -7,7 +7,6 @@
#include <linux/circ_buf.h>
#include <linux/ctype.h>
#include <linux/debugfs.h>
-#include <drm/drmP.h>
#include "vc4_drv.h"
#include "vc4_regs.h"
diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c
index 8a27a6acee61..c586325de2a5 100644
--- a/drivers/gpu/drm/vc4/vc4_dpi.c
+++ b/drivers/gpu/drm/vc4/vc4_dpi.c
@@ -249,7 +249,8 @@ static int vc4_dpi_init_bridge(struct vc4_dpi *dpi)
}
if (panel)
- bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_DPI);
+ bridge = drm_panel_bridge_add_typed(panel,
+ DRM_MODE_CONNECTOR_DPI);
return drm_bridge_attach(dpi->encoder, bridge, NULL);
}
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index bf11930e40e1..5e6fb6c2307f 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -23,16 +23,21 @@
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/device.h>
+#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_atomic_helper.h>
+#include <drm/drm_vblank.h>
#include "uapi/drm/vc4_drm.h"
+
#include "vc4_drv.h"
#include "vc4_regs.h"
@@ -177,7 +182,6 @@ static struct drm_driver vc4_drm_driver = {
DRIVER_ATOMIC |
DRIVER_GEM |
DRIVER_RENDER |
- DRIVER_PRIME |
DRIVER_SYNCOBJ),
.open = vc4_open,
.postclose = vc4_close,
@@ -199,7 +203,6 @@ static struct drm_driver vc4_drm_driver = {
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_import = drm_gem_prime_import,
.gem_prime_export = vc4_prime_export,
.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
.gem_prime_import_sg_table = vc4_prime_import_sg_table,
@@ -237,8 +240,7 @@ static void vc4_match_add_drivers(struct device *dev,
struct device_driver *drv = &drivers[i]->driver;
struct device *p = NULL, *d;
- while ((d = bus_find_device(&platform_bus_type, p, drv,
- (void *)platform_bus_type.match))) {
+ while ((d = platform_find_device_by_driver(p, drv))) {
put_device(p);
component_match_add(dev, match, compare_dev, d);
p = d;
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 616c011bcb82..6627b20c99e9 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -3,16 +3,23 @@
* Copyright (C) 2015 Broadcom
*/
-#include <linux/mm_types.h>
-#include <drm/drmP.h>
-#include <drm/drm_util.h>
+#include <linux/delay.h>
+#include <linux/refcount.h>
+#include <linux/uaccess.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_debugfs.h>
+#include <drm/drm_device.h>
#include <drm/drm_encoder.h>
#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_atomic.h>
-#include <drm/drm_syncobj.h>
+#include <drm/drm_mm.h>
+#include <drm/drm_modeset_lock.h>
#include "uapi/drm/vc4_drm.h"
+struct drm_device;
+struct drm_gem_object;
+
/* Don't forget to update vc4_bo.c: bo_type_names[] when adding to
* this.
*/
@@ -705,8 +712,7 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size,
int vc4_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
-struct dma_buf *vc4_prime_export(struct drm_device *dev,
- struct drm_gem_object *obj, int flags);
+struct dma_buf *vc4_prime_export(struct drm_gem_object *obj, int flags);
int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index 1db39b570cf4..fd8a2eb60505 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -18,22 +18,26 @@
* hopefully present.
*/
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_edid.h>
-#include <drm/drm_mipi_dsi.h>
-#include <drm/drm_of.h>
-#include <drm/drm_panel.h>
-#include <drm/drm_probe_helper.h>
-#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/component.h>
+#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/i2c.h>
#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/pm_runtime.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
+
#include "vc4_drv.h"
#include "vc4_regs.h"
@@ -495,6 +499,7 @@ struct vc4_dsi {
struct mipi_dsi_host dsi_host;
struct drm_encoder *encoder;
struct drm_bridge *bridge;
+ struct list_head bridge_chain;
void __iomem *regs;
@@ -748,10 +753,19 @@ static void vc4_dsi_encoder_disable(struct drm_encoder *encoder)
struct vc4_dsi_encoder *vc4_encoder = to_vc4_dsi_encoder(encoder);
struct vc4_dsi *dsi = vc4_encoder->dsi;
struct device *dev = &dsi->pdev->dev;
+ struct drm_bridge *iter;
+
+ list_for_each_entry_reverse(iter, &dsi->bridge_chain, chain_node) {
+ if (iter->funcs->disable)
+ iter->funcs->disable(iter);
+ }
- drm_bridge_disable(dsi->bridge);
vc4_dsi_ulps(dsi, true);
- drm_bridge_post_disable(dsi->bridge);
+
+ list_for_each_entry_from(iter, &dsi->bridge_chain, chain_node) {
+ if (iter->funcs->post_disable)
+ iter->funcs->post_disable(iter);
+ }
clk_disable_unprepare(dsi->pll_phy_clock);
clk_disable_unprepare(dsi->escape_clock);
@@ -819,6 +833,7 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
struct vc4_dsi *dsi = vc4_encoder->dsi;
struct device *dev = &dsi->pdev->dev;
bool debug_dump_regs = false;
+ struct drm_bridge *iter;
unsigned long hs_clock;
u32 ui_ns;
/* Minimum LP state duration in escape clock cycles. */
@@ -1051,7 +1066,10 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
vc4_dsi_ulps(dsi, false);
- drm_bridge_pre_enable(dsi->bridge);
+ list_for_each_entry_reverse(iter, &dsi->bridge_chain, chain_node) {
+ if (iter->funcs->pre_enable)
+ iter->funcs->pre_enable(iter);
+ }
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
DSI_PORT_WRITE(DISP0_CTRL,
@@ -1068,7 +1086,10 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
DSI_DISP0_ENABLE);
}
- drm_bridge_enable(dsi->bridge);
+ list_for_each_entry(iter, &dsi->bridge_chain, chain_node) {
+ if (iter->funcs->enable)
+ iter->funcs->enable(iter);
+ }
if (debug_dump_regs) {
struct drm_printer p = drm_info_printer(&dsi->pdev->dev);
@@ -1456,6 +1477,8 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
GFP_KERNEL);
if (!vc4_dsi_encoder)
return -ENOMEM;
+
+ INIT_LIST_HEAD(&dsi->bridge_chain);
vc4_dsi_encoder->base.type = VC4_ENCODER_TYPE_DSI1;
vc4_dsi_encoder->dsi = dsi;
dsi->encoder = &vc4_dsi_encoder->base.base;
@@ -1572,8 +1595,8 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
}
if (panel) {
- dsi->bridge = devm_drm_panel_bridge_add(dev, panel,
- DRM_MODE_CONNECTOR_DSI);
+ dsi->bridge = devm_drm_panel_bridge_add_typed(dev, panel,
+ DRM_MODE_CONNECTOR_DSI);
if (IS_ERR(dsi->bridge))
return PTR_ERR(dsi->bridge);
}
@@ -1606,7 +1629,7 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
* from our driver, since we need to sequence them within the
* encoder's enable/disable paths.
*/
- dsi->encoder->bridge = NULL;
+ list_splice_init(&dsi->encoder->bridge_chain, &dsi->bridge_chain);
if (dsi->port == 0)
vc4_debugfs_add_regset32(drm, "dsi0_regs", &dsi->regset);
@@ -1628,6 +1651,11 @@ static void vc4_dsi_unbind(struct device *dev, struct device *master,
if (dsi->bridge)
pm_runtime_disable(dev);
+ /*
+ * Restore the bridge_chain so the bridge detach procedure can happen
+ * normally.
+ */
+ list_splice_init(&dsi->bridge_chain, &dsi->encoder->bridge_chain);
vc4_dsi_encoder_destroy(dsi->encoder);
if (dsi->port == 1)
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 84795d928f20..e1cfc3ccd05a 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -29,6 +29,8 @@
#include <linux/sched/signal.h>
#include <linux/dma-fence-array.h>
+#include <drm/drm_syncobj.h>
+
#include "uapi/drm/vc4_drm.h"
#include "vc4_drv.h"
#include "vc4_regs.h"
@@ -541,7 +543,7 @@ vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno)
bo = to_vc4_bo(&exec->bo[i]->base);
bo->seqno = seqno;
- reservation_object_add_shared_fence(bo->base.base.resv, exec->fence);
+ dma_resv_add_shared_fence(bo->base.base.resv, exec->fence);
}
list_for_each_entry(bo, &exec->unref_list, unref_head) {
@@ -552,7 +554,7 @@ vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno)
bo = to_vc4_bo(&exec->rcl_write_bo[i]->base);
bo->write_seqno = seqno;
- reservation_object_add_excl_fence(bo->base.base.resv, exec->fence);
+ dma_resv_add_excl_fence(bo->base.base.resv, exec->fence);
}
}
@@ -566,7 +568,7 @@ vc4_unlock_bo_reservations(struct drm_device *dev,
for (i = 0; i < exec->bo_count; i++) {
struct drm_gem_object *bo = &exec->bo[i]->base;
- ww_mutex_unlock(&bo->resv->lock);
+ dma_resv_unlock(bo->resv);
}
ww_acquire_fini(acquire_ctx);
@@ -593,8 +595,7 @@ vc4_lock_bo_reservations(struct drm_device *dev,
retry:
if (contended_lock != -1) {
bo = &exec->bo[contended_lock]->base;
- ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
- acquire_ctx);
+ ret = dma_resv_lock_slow_interruptible(bo->resv, acquire_ctx);
if (ret) {
ww_acquire_done(acquire_ctx);
return ret;
@@ -607,19 +608,19 @@ retry:
bo = &exec->bo[i]->base;
- ret = ww_mutex_lock_interruptible(&bo->resv->lock, acquire_ctx);
+ ret = dma_resv_lock_interruptible(bo->resv, acquire_ctx);
if (ret) {
int j;
for (j = 0; j < i; j++) {
bo = &exec->bo[j]->base;
- ww_mutex_unlock(&bo->resv->lock);
+ dma_resv_unlock(bo->resv);
}
if (contended_lock != -1 && contended_lock >= i) {
bo = &exec->bo[contended_lock]->base;
- ww_mutex_unlock(&bo->resv->lock);
+ dma_resv_unlock(bo->resv);
}
if (ret == -EDEADLK) {
@@ -640,7 +641,7 @@ retry:
for (i = 0; i < exec->bo_count; i++) {
bo = &exec->bo[i]->base;
- ret = reservation_object_reserve_shared(bo->resv, 1);
+ ret = dma_resv_reserve_shared(bo->resv, 1);
if (ret) {
vc4_unlock_bo_reservations(dev, exec, acquire_ctx);
return ret;
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index ee7d4e7b0ee3..cea18dc15f77 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -267,7 +267,8 @@ static const struct drm_connector_helper_funcs vc4_hdmi_connector_helper_funcs =
};
static struct drm_connector *vc4_hdmi_connector_init(struct drm_device *dev,
- struct drm_encoder *encoder)
+ struct drm_encoder *encoder,
+ struct i2c_adapter *ddc)
{
struct drm_connector *connector;
struct vc4_hdmi_connector *hdmi_connector;
@@ -281,8 +282,10 @@ static struct drm_connector *vc4_hdmi_connector_init(struct drm_device *dev,
hdmi_connector->encoder = encoder;
- drm_connector_init(dev, connector, &vc4_hdmi_connector_funcs,
- DRM_MODE_CONNECTOR_HDMIA);
+ drm_connector_init_with_ddc(dev, connector,
+ &vc4_hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA,
+ ddc);
drm_connector_helper_add(connector, &vc4_hdmi_connector_helper_funcs);
/* Create and attach TV margin props to this connector. */
@@ -398,10 +401,7 @@ static void vc4_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
HDMI_QUANTIZATION_RANGE_LIMITED :
HDMI_QUANTIZATION_RANGE_FULL);
- frame.avi.right_bar = cstate->tv.margins.right;
- frame.avi.left_bar = cstate->tv.margins.left;
- frame.avi.top_bar = cstate->tv.margins.top;
- frame.avi.bottom_bar = cstate->tv.margins.bottom;
+ drm_hdmi_avi_infoframe_bars(&frame.avi, cstate);
vc4_hdmi_write_infoframe(encoder, &frame);
}
@@ -1285,6 +1285,9 @@ static const struct cec_adap_ops vc4_hdmi_cec_adap_ops = {
static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
{
+#ifdef CONFIG_DRM_VC4_HDMI_CEC
+ struct cec_connector_info conn_info;
+#endif
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = dev_get_drvdata(master);
struct vc4_dev *vc4 = drm->dev_private;
@@ -1395,7 +1398,8 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
DRM_MODE_ENCODER_TMDS, NULL);
drm_encoder_helper_add(hdmi->encoder, &vc4_hdmi_encoder_helper_funcs);
- hdmi->connector = vc4_hdmi_connector_init(drm, hdmi->encoder);
+ hdmi->connector =
+ vc4_hdmi_connector_init(drm, hdmi->encoder, hdmi->ddc);
if (IS_ERR(hdmi->connector)) {
ret = PTR_ERR(hdmi->connector);
goto err_destroy_encoder;
@@ -1403,13 +1407,15 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
#ifdef CONFIG_DRM_VC4_HDMI_CEC
hdmi->cec_adap = cec_allocate_adapter(&vc4_hdmi_cec_adap_ops,
vc4, "vc4",
- CEC_CAP_TRANSMIT |
- CEC_CAP_LOG_ADDRS |
- CEC_CAP_PASSTHROUGH |
- CEC_CAP_RC, 1);
+ CEC_CAP_DEFAULTS |
+ CEC_CAP_CONNECTOR_INFO, 1);
ret = PTR_ERR_OR_ZERO(hdmi->cec_adap);
if (ret < 0)
goto err_destroy_conn;
+
+ cec_fill_conn_info_from_drm(&conn_info, hdmi->connector);
+ cec_s_conn_info(hdmi->cec_adap, &conn_info);
+
HDMI_WRITE(VC4_HDMI_CPU_MASK_SET, 0xffffffff);
value = HDMI_READ(VC4_HDMI_CEC_CNTRL_1);
value &= ~VC4_HDMI_CEC_DIV_CLK_CNT_MASK;
diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
index 0f633bef6b9d..5a43659da319 100644
--- a/drivers/gpu/drm/vc4/vc4_hvs.c
+++ b/drivers/gpu/drm/vc4/vc4_hvs.c
@@ -19,8 +19,11 @@
* each CRTC.
*/
-#include <drm/drm_atomic_helper.h>
#include <linux/component.h>
+#include <linux/platform_device.h>
+
+#include <drm/drm_atomic_helper.h>
+
#include "vc4_drv.h"
#include "vc4_regs.h"
@@ -312,7 +315,7 @@ static void vc4_hvs_unbind(struct device *dev, struct device *master,
struct drm_device *drm = dev_get_drvdata(master);
struct vc4_dev *vc4 = drm->dev_private;
- if (vc4->hvs->mitchell_netravali_filter.allocated)
+ if (drm_mm_node_allocated(&vc4->hvs->mitchell_netravali_filter))
drm_mm_remove_node(&vc4->hvs->mitchell_netravali_filter);
drm_mm_takedown(&vc4->hvs->dlist_mm);
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index 70d079b7b39f..78d4fb0499e3 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -11,12 +11,14 @@
* crtc, HDMI encoder).
*/
-#include <drm/drm_crtc.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+
#include "vc4_drv.h"
#include "vc4_regs.h"
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 0a0207c350a5..4934127f0d76 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -17,11 +17,14 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_uapi.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
-#include <drm/drm_atomic_uapi.h>
#include "uapi/drm/vc4_drm.h"
+
#include "vc4_drv.h"
#include "vc4_regs.h"
@@ -175,7 +178,7 @@ static void vc4_plane_destroy_state(struct drm_plane *plane,
struct vc4_dev *vc4 = to_vc4_dev(plane->dev);
struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
- if (vc4_state->lbm.allocated) {
+ if (drm_mm_node_allocated(&vc4_state->lbm)) {
unsigned long irqflags;
spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags);
@@ -554,7 +557,7 @@ static int vc4_plane_allocate_lbm(struct drm_plane_state *state)
/* Allocate the LBM memory that the HVS will use for temporary
* storage due to our scaling/format conversion.
*/
- if (!vc4_state->lbm.allocated) {
+ if (!drm_mm_node_allocated(&vc4_state->lbm)) {
int ret;
spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags);
@@ -1123,7 +1126,6 @@ static int vc4_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct vc4_bo *bo;
- struct dma_fence *fence;
int ret;
if (!state->fb)
@@ -1131,8 +1133,7 @@ static int vc4_prepare_fb(struct drm_plane *plane,
bo = to_vc4_bo(&drm_fb_cma_get_gem_obj(state->fb, 0)->base);
- fence = reservation_object_get_excl_rcu(bo->base.base.resv);
- drm_atomic_set_fence_for_plane(state, fence);
+ drm_gem_fb_prepare_fb(plane, state);
if (plane->state->fb == state->fb)
return 0;
diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c
index 96f91c1b4b6e..bf720206727f 100644
--- a/drivers/gpu/drm/vc4/vc4_txp.c
+++ b/drivers/gpu/drm/vc4/vc4_txp.c
@@ -7,18 +7,20 @@
* Boris Brezillon <boris.brezillon@bootlin.com>
*/
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_fb_cma_helper.h>
-#include <drm/drm_edid.h>
-#include <drm/drm_panel.h>
-#include <drm/drm_probe_helper.h>
-#include <drm/drm_writeback.h>
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/of_graph.h>
#include <linux/of_platform.h>
#include <linux/pm_runtime.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_writeback.h>
+
#include "vc4_drv.h"
#include "vc4_regs.h"
@@ -229,7 +231,7 @@ static int vc4_txp_connector_atomic_check(struct drm_connector *conn,
int i;
conn_state = drm_atomic_get_new_connector_state(state, conn);
- if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
+ if (!conn_state->writeback_job)
return 0;
crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
@@ -269,8 +271,7 @@ static void vc4_txp_connector_atomic_commit(struct drm_connector *conn,
u32 ctrl;
int i;
- if (WARN_ON(!conn_state->writeback_job ||
- !conn_state->writeback_job->fb))
+ if (WARN_ON(!conn_state->writeback_job))
return;
mode = &conn_state->crtc->state->adjusted_mode;
diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
index fee4f90e71aa..cea77a21b205 100644
--- a/drivers/gpu/drm/vc4/vc4_v3d.c
+++ b/drivers/gpu/drm/vc4/vc4_v3d.c
@@ -7,7 +7,11 @@
#include <linux/clk.h>
#include <linux/component.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+
+#include <drm/drm_irq.h>
+
#include "vc4_drv.h"
#include "vc4_regs.h"
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index 11a8f99ba18c..909eba43664a 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -30,10 +30,17 @@
* software renderer and the X server for efficient buffer sharing.
*/
+#include <linux/dma-buf.h>
#include <linux/module.h>
-#include <linux/ramfs.h>
+#include <linux/platform_device.h>
#include <linux/shmem_fs.h>
-#include <linux/dma-buf.h>
+#include <linux/vmalloc.h>
+
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_prime.h>
+
#include "vgem_drv.h"
#define DRIVER_NAME "vgem"
@@ -189,9 +196,10 @@ static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
return ERR_CAST(obj);
ret = drm_gem_handle_create(file, &obj->base, handle);
- drm_gem_object_put_unlocked(&obj->base);
- if (ret)
+ if (ret) {
+ drm_gem_object_put_unlocked(&obj->base);
return ERR_PTR(ret);
+ }
return &obj->base;
}
@@ -214,7 +222,9 @@ static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
args->size = gem_object->size;
args->pitch = pitch;
- DRM_DEBUG_DRIVER("Created object of size %lld\n", size);
+ drm_gem_object_put_unlocked(gem_object);
+
+ DRM_DEBUG("Created object of size %llu\n", args->size);
return 0;
}
@@ -246,8 +256,8 @@ unref:
}
static struct drm_ioctl_desc vgem_ioctls[] = {
- DRM_IOCTL_DEF_DRV(VGEM_FENCE_ATTACH, vgem_fence_attach_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL, vgem_fence_signal_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(VGEM_FENCE_ATTACH, vgem_fence_attach_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL, vgem_fence_signal_ioctl, DRM_RENDER_ALLOW),
};
static int vgem_mmap(struct file *filp, struct vm_area_struct *vma)
@@ -427,8 +437,7 @@ static void vgem_release(struct drm_device *dev)
}
static struct drm_driver vgem_driver = {
- .driver_features = DRIVER_GEM | DRIVER_PRIME |
- DRIVER_RENDER,
+ .driver_features = DRIVER_GEM | DRIVER_RENDER,
.release = vgem_release,
.open = vgem_open,
.postclose = vgem_postclose,
@@ -446,7 +455,6 @@ static struct drm_driver vgem_driver = {
.gem_prime_pin = vgem_prime_pin,
.gem_prime_unpin = vgem_prime_unpin,
.gem_prime_import = vgem_prime_import,
- .gem_prime_export = drm_gem_prime_export,
.gem_prime_import_sg_table = vgem_prime_import_sg_table,
.gem_prime_get_sg_table = vgem_prime_get_sg_table,
.gem_prime_vmap = vgem_prime_vmap,
diff --git a/drivers/gpu/drm/vgem/vgem_drv.h b/drivers/gpu/drm/vgem/vgem_drv.h
index 5c8f6d619ff3..0ed300317f87 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.h
+++ b/drivers/gpu/drm/vgem/vgem_drv.h
@@ -29,7 +29,6 @@
#ifndef _VGEM_DRV_H_
#define _VGEM_DRV_H_
-#include <drm/drmP.h>
#include <drm/drm_gem.h>
#include <drm/drm_cache.h>
diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
index eb17c0cd3727..9268f6fc3f66 100644
--- a/drivers/gpu/drm/vgem/vgem_fence.c
+++ b/drivers/gpu/drm/vgem/vgem_fence.c
@@ -21,7 +21,9 @@
*/
#include <linux/dma-buf.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
+
+#include <drm/drm_file.h>
#include "vgem_drv.h"
@@ -100,22 +102,6 @@ static struct dma_fence *vgem_fence_create(struct vgem_file *vfile,
return &fence->base;
}
-static int attach_dmabuf(struct drm_device *dev,
- struct drm_gem_object *obj)
-{
- struct dma_buf *dmabuf;
-
- if (obj->dma_buf)
- return 0;
-
- dmabuf = dev->driver->gem_prime_export(dev, obj, 0);
- if (IS_ERR(dmabuf))
- return PTR_ERR(dmabuf);
-
- obj->dma_buf = dmabuf;
- return 0;
-}
-
/*
* vgem_fence_attach_ioctl (DRM_IOCTL_VGEM_FENCE_ATTACH):
*
@@ -142,7 +128,7 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
{
struct drm_vgem_fence_attach *arg = data;
struct vgem_file *vfile = file->driver_priv;
- struct reservation_object *resv;
+ struct dma_resv *resv;
struct drm_gem_object *obj;
struct dma_fence *fence;
int ret;
@@ -157,10 +143,6 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
if (!obj)
return -ENOENT;
- ret = attach_dmabuf(dev, obj);
- if (ret)
- goto err;
-
fence = vgem_fence_create(vfile, arg->flags);
if (!fence) {
ret = -ENOMEM;
@@ -168,8 +150,8 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
}
/* Check for a conflicting fence */
- resv = obj->dma_buf->resv;
- if (!reservation_object_test_signaled_rcu(resv,
+ resv = obj->resv;
+ if (!dma_resv_test_signaled_rcu(resv,
arg->flags & VGEM_FENCE_WRITE)) {
ret = -EBUSY;
goto err_fence;
@@ -177,12 +159,12 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
/* Expose the fence via the dma-buf */
ret = 0;
- reservation_object_lock(resv, NULL);
+ dma_resv_lock(resv, NULL);
if (arg->flags & VGEM_FENCE_WRITE)
- reservation_object_add_excl_fence(resv, fence);
- else if ((ret = reservation_object_reserve_shared(resv, 1)) == 0)
- reservation_object_add_shared_fence(resv, fence);
- reservation_object_unlock(resv);
+ dma_resv_add_excl_fence(resv, fence);
+ else if ((ret = dma_resv_reserve_shared(resv, 1)) == 0)
+ dma_resv_add_shared_fence(resv, fence);
+ dma_resv_unlock(resv);
/* Record the fence in our idr for later signaling */
if (ret == 0) {
diff --git a/drivers/gpu/drm/via/via_dma.c b/drivers/gpu/drm/via/via_dma.c
index d17d8f245c1a..1208445e341d 100644
--- a/drivers/gpu/drm/via/via_dma.c
+++ b/drivers/gpu/drm/via/via_dma.c
@@ -34,8 +34,15 @@
* Thomas Hellstrom.
*/
-#include <drm/drmP.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+
+#include <drm/drm.h>
+#include <drm/drm_agpsupport.h>
+#include <drm/drm_device.h>
+#include <drm/drm_file.h>
#include <drm/via_drm.h>
+
#include "via_drv.h"
#include "via_3d_reg.h"
@@ -430,14 +437,14 @@ static int via_hook_segment(drm_via_private_t *dev_priv,
diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
count = 10000000;
while (diff == 0 && count--) {
- paused = (VIA_READ(0x41c) & 0x80000000);
+ paused = (via_read(dev_priv, 0x41c) & 0x80000000);
if (paused)
break;
reader = *(dev_priv->hw_addr_ptr);
diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
}
- paused = VIA_READ(0x41c) & 0x80000000;
+ paused = via_read(dev_priv, 0x41c) & 0x80000000;
if (paused && !no_pci_fire) {
reader = *(dev_priv->hw_addr_ptr);
@@ -454,10 +461,10 @@ static int via_hook_segment(drm_via_private_t *dev_priv,
* doesn't make a difference.
*/
- VIA_WRITE(VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
- VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_hi);
- VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_lo);
- VIA_READ(VIA_REG_TRANSPACE);
+ via_write(dev_priv, VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
+ via_write(dev_priv, VIA_REG_TRANSPACE, pause_addr_hi);
+ via_write(dev_priv, VIA_REG_TRANSPACE, pause_addr_lo);
+ via_read(dev_priv, VIA_REG_TRANSPACE);
}
}
return paused;
@@ -467,10 +474,10 @@ static int via_wait_idle(drm_via_private_t *dev_priv)
{
int count = 10000000;
- while (!(VIA_READ(VIA_REG_STATUS) & VIA_VR_QUEUE_BUSY) && --count)
+ while (!(via_read(dev_priv, VIA_REG_STATUS) & VIA_VR_QUEUE_BUSY) && --count)
;
- while (count && (VIA_READ(VIA_REG_STATUS) &
+ while (count && (via_read(dev_priv, VIA_REG_STATUS) &
(VIA_CMD_RGTR_BUSY | VIA_2D_ENG_BUSY |
VIA_3D_ENG_BUSY)))
--count;
@@ -536,21 +543,21 @@ static void via_cmdbuf_start(drm_via_private_t *dev_priv)
via_flush_write_combine();
(void) *(volatile uint32_t *)dev_priv->last_pause_ptr;
- VIA_WRITE(VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
- VIA_WRITE(VIA_REG_TRANSPACE, command);
- VIA_WRITE(VIA_REG_TRANSPACE, start_addr_lo);
- VIA_WRITE(VIA_REG_TRANSPACE, end_addr_lo);
+ via_write(dev_priv, VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
+ via_write(dev_priv, VIA_REG_TRANSPACE, command);
+ via_write(dev_priv, VIA_REG_TRANSPACE, start_addr_lo);
+ via_write(dev_priv, VIA_REG_TRANSPACE, end_addr_lo);
- VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_hi);
- VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_lo);
+ via_write(dev_priv, VIA_REG_TRANSPACE, pause_addr_hi);
+ via_write(dev_priv, VIA_REG_TRANSPACE, pause_addr_lo);
wmb();
- VIA_WRITE(VIA_REG_TRANSPACE, command | HC_HAGPCMNT_MASK);
- VIA_READ(VIA_REG_TRANSPACE);
+ via_write(dev_priv, VIA_REG_TRANSPACE, command | HC_HAGPCMNT_MASK);
+ via_read(dev_priv, VIA_REG_TRANSPACE);
dev_priv->dma_diff = 0;
count = 10000000;
- while (!(VIA_READ(0x41c) & 0x80000000) && count--);
+ while (!(via_read(dev_priv, 0x41c) & 0x80000000) && count--);
reader = *(dev_priv->hw_addr_ptr);
ptr = ((volatile char *)dev_priv->last_pause_ptr - dev_priv->dma_ptr) +
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
index 062067438f1d..551fa31629af 100644
--- a/drivers/gpu/drm/via/via_dmablit.c
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -34,13 +34,16 @@
* the same DMA mappings?
*/
-#include <drm/drmP.h>
-#include <drm/via_drm.h>
-#include "via_drv.h"
-#include "via_dmablit.h"
-
#include <linux/pagemap.h>
+#include <linux/pci.h>
#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include <drm/drm_device.h>
+#include <drm/via_drm.h>
+
+#include "via_dmablit.h"
+#include "via_drv.h"
#define VIA_PGDN(x) (((unsigned long)(x)) & PAGE_MASK)
#define VIA_PGOFF(x) (((unsigned long)(x)) & ~PAGE_MASK)
@@ -171,7 +174,6 @@ via_map_blit_for_device(struct pci_dev *pdev,
static void
via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
{
- struct page *page;
int i;
switch (vsg->state) {
@@ -186,13 +188,8 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
kfree(vsg->desc_pages);
/* fall through */
case dr_via_pages_locked:
- for (i = 0; i < vsg->num_pages; ++i) {
- if (NULL != (page = vsg->pages[i])) {
- if (!PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction))
- SetPageDirty(page);
- put_page(page);
- }
- }
+ unpin_user_pages_dirty_lock(vsg->pages, vsg->num_pages,
+ (vsg->direction == DMA_FROM_DEVICE));
/* fall through */
case dr_via_pages_alloc:
vfree(vsg->pages);
@@ -214,16 +211,16 @@ via_fire_dmablit(struct drm_device *dev, drm_via_sg_info_t *vsg, int engine)
{
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
- VIA_WRITE(VIA_PCI_DMA_MAR0 + engine*0x10, 0);
- VIA_WRITE(VIA_PCI_DMA_DAR0 + engine*0x10, 0);
- VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD |
+ via_write(dev_priv, VIA_PCI_DMA_MAR0 + engine*0x10, 0);
+ via_write(dev_priv, VIA_PCI_DMA_DAR0 + engine*0x10, 0);
+ via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD |
VIA_DMA_CSR_DE);
- VIA_WRITE(VIA_PCI_DMA_MR0 + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE);
- VIA_WRITE(VIA_PCI_DMA_BCR0 + engine*0x10, 0);
- VIA_WRITE(VIA_PCI_DMA_DPR0 + engine*0x10, vsg->chain_start);
+ via_write(dev_priv, VIA_PCI_DMA_MR0 + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE);
+ via_write(dev_priv, VIA_PCI_DMA_BCR0 + engine*0x10, 0);
+ via_write(dev_priv, VIA_PCI_DMA_DPR0 + engine*0x10, vsg->chain_start);
wmb();
- VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DE | VIA_DMA_CSR_TS);
- VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04);
+ via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DE | VIA_DMA_CSR_TS);
+ via_read(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04);
}
/*
@@ -242,7 +239,7 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
vsg->pages = vzalloc(array_size(sizeof(struct page *), vsg->num_pages));
if (NULL == vsg->pages)
return -ENOMEM;
- ret = get_user_pages_fast((unsigned long)xfer->mem_addr,
+ ret = pin_user_pages_fast((unsigned long)xfer->mem_addr,
vsg->num_pages,
vsg->direction == DMA_FROM_DEVICE ? FOLL_WRITE : 0,
vsg->pages);
@@ -291,7 +288,7 @@ via_abort_dmablit(struct drm_device *dev, int engine)
{
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
- VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TA);
+ via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TA);
}
static void
@@ -299,7 +296,7 @@ via_dmablit_engine_off(struct drm_device *dev, int engine)
{
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
- VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD);
+ via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD);
}
@@ -330,7 +327,7 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
spin_lock_irqsave(&blitq->blit_lock, irqsave);
done_transfer = blitq->is_active &&
- ((status = VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD);
+ ((status = via_read(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD);
done_transfer = done_transfer || (blitq->aborting && !(status & VIA_DMA_CSR_DE));
cur = blitq->cur;
@@ -349,7 +346,7 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
* Clear transfer done flag.
*/
- VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD);
+ via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD);
blitq->is_active = 0;
blitq->aborting = 0;
@@ -436,7 +433,7 @@ via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine)
int ret = 0;
if (via_dmablit_active(blitq, engine, handle, &queue)) {
- DRM_WAIT_ON(ret, *queue, 3 * HZ,
+ VIA_WAIT_ON(ret, *queue, 3 * HZ,
!via_dmablit_active(blitq, engine, handle, NULL));
}
DRM_DEBUG("DMA blit sync handle 0x%x engine %d returned %d\n",
@@ -687,7 +684,7 @@ via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
while (blitq->num_free == 0) {
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
- DRM_WAIT_ON(ret, blitq->busy_queue, HZ, blitq->num_free > 0);
+ VIA_WAIT_ON(ret, blitq->busy_queue, HZ, blitq->num_free > 0);
if (ret)
return (-EINTR == ret) ? -EAGAIN : ret;
diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c
index af6a12d3c058..5da38082821f 100644
--- a/drivers/gpu/drm/via/via_drv.c
+++ b/drivers/gpu/drm/via/via_drv.c
@@ -23,12 +23,15 @@
*/
#include <linux/module.h>
+#include <linux/pci.h>
-#include <drm/drmP.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_pciids.h>
#include <drm/via_drm.h>
+
#include "via_drv.h"
-#include <drm/drm_pciids.h>
static int via_driver_open(struct drm_device *dev, struct drm_file *file)
{
diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
index 6d1ae834484c..d5ad1b05bf77 100644
--- a/drivers/gpu/drm/via/via_drv.h
+++ b/drivers/gpu/drm/via/via_drv.h
@@ -24,8 +24,16 @@
#ifndef _VIA_DRV_H_
#define _VIA_DRV_H_
-#include <drm/drm_mm.h>
+#include <linux/irqreturn.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include <linux/sched/signal.h>
+#include <linux/wait.h>
+
+#include <drm/drm_ioctl.h>
#include <drm/drm_legacy.h>
+#include <drm/drm_mm.h>
+#include <drm/via_drm.h>
#define DRIVER_AUTHOR "Various"
@@ -113,12 +121,67 @@ enum via_family {
};
/* VIA MMIO register access */
-#define VIA_BASE ((dev_priv->mmio))
+static inline u32 via_read(struct drm_via_private *dev_priv, u32 reg)
+{
+ return readl((void __iomem *)(dev_priv->mmio->handle + reg));
+}
+
+static inline void via_write(struct drm_via_private *dev_priv, u32 reg,
+ u32 val)
+{
+ writel(val, (void __iomem *)(dev_priv->mmio->handle + reg));
+}
+
+static inline void via_write8(struct drm_via_private *dev_priv, u32 reg,
+ u32 val)
+{
+ writeb(val, (void __iomem *)(dev_priv->mmio->handle + reg));
+}
+
+static inline void via_write8_mask(struct drm_via_private *dev_priv,
+ u32 reg, u32 mask, u32 val)
+{
+ u32 tmp;
+
+ tmp = readb((void __iomem *)(dev_priv->mmio->handle + reg));
+ tmp = (tmp & ~mask) | (val & mask);
+ writeb(tmp, (void __iomem *)(dev_priv->mmio->handle + reg));
+}
-#define VIA_READ(reg) DRM_READ32(VIA_BASE, reg)
-#define VIA_WRITE(reg, val) DRM_WRITE32(VIA_BASE, reg, val)
-#define VIA_READ8(reg) DRM_READ8(VIA_BASE, reg)
-#define VIA_WRITE8(reg, val) DRM_WRITE8(VIA_BASE, reg, val)
+/*
+ * Poll in a loop waiting for 'contidition' to be true.
+ * Note: A direct replacement with wait_event_interruptible_timeout()
+ * will not work unless driver is updated to emit wake_up()
+ * in relevant places that can impact the 'condition'
+ *
+ * Returns:
+ * ret keeps current value if 'condition' becomes true
+ * ret = -BUSY if timeout happens
+ * ret = -EINTR if a signal interrupted the waiting period
+ */
+#define VIA_WAIT_ON( ret, queue, timeout, condition ) \
+do { \
+ DECLARE_WAITQUEUE(entry, current); \
+ unsigned long end = jiffies + (timeout); \
+ add_wait_queue(&(queue), &entry); \
+ \
+ for (;;) { \
+ __set_current_state(TASK_INTERRUPTIBLE); \
+ if (condition) \
+ break; \
+ if (time_after_eq(jiffies, end)) { \
+ ret = -EBUSY; \
+ break; \
+ } \
+ schedule_timeout((HZ/100 > 1) ? HZ/100 : 1); \
+ if (signal_pending(current)) { \
+ ret = -EINTR; \
+ break; \
+ } \
+ } \
+ __set_current_state(TASK_RUNNING); \
+ remove_wait_queue(&(queue), &entry); \
+} while (0)
extern const struct drm_ioctl_desc via_ioctls[];
extern int via_max_ioctl;
diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
index c96830ccc0ec..24cc445169e2 100644
--- a/drivers/gpu/drm/via/via_irq.c
+++ b/drivers/gpu/drm/via/via_irq.c
@@ -35,8 +35,10 @@
* The refresh rate is also calculated for video playback sync purposes.
*/
-#include <drm/drmP.h>
+#include <drm/drm_device.h>
+#include <drm/drm_vblank.h>
#include <drm/via_drm.h>
+
#include "via_drv.h"
#define VIA_REG_INTERRUPT 0x200
@@ -108,7 +110,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
drm_via_irq_t *cur_irq = dev_priv->via_irqs;
int i;
- status = VIA_READ(VIA_REG_INTERRUPT);
+ status = via_read(dev_priv, VIA_REG_INTERRUPT);
if (status & VIA_IRQ_VBLANK_PENDING) {
atomic_inc(&dev_priv->vbl_received);
if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
@@ -143,7 +145,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
}
/* Acknowledge interrupts */
- VIA_WRITE(VIA_REG_INTERRUPT, status);
+ via_write(dev_priv, VIA_REG_INTERRUPT, status);
if (handled)
@@ -158,8 +160,8 @@ static __inline__ void viadrv_acknowledge_irqs(drm_via_private_t *dev_priv)
if (dev_priv) {
/* Acknowledge interrupts */
- status = VIA_READ(VIA_REG_INTERRUPT);
- VIA_WRITE(VIA_REG_INTERRUPT, status |
+ status = via_read(dev_priv, VIA_REG_INTERRUPT);
+ via_write(dev_priv, VIA_REG_INTERRUPT, status |
dev_priv->irq_pending_mask);
}
}
@@ -174,11 +176,11 @@ int via_enable_vblank(struct drm_device *dev, unsigned int pipe)
return -EINVAL;
}
- status = VIA_READ(VIA_REG_INTERRUPT);
- VIA_WRITE(VIA_REG_INTERRUPT, status | VIA_IRQ_VBLANK_ENABLE);
+ status = via_read(dev_priv, VIA_REG_INTERRUPT);
+ via_write(dev_priv, VIA_REG_INTERRUPT, status | VIA_IRQ_VBLANK_ENABLE);
- VIA_WRITE8(0x83d4, 0x11);
- VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30);
+ via_write8(dev_priv, 0x83d4, 0x11);
+ via_write8_mask(dev_priv, 0x83d5, 0x30, 0x30);
return 0;
}
@@ -188,11 +190,11 @@ void via_disable_vblank(struct drm_device *dev, unsigned int pipe)
drm_via_private_t *dev_priv = dev->dev_private;
u32 status;
- status = VIA_READ(VIA_REG_INTERRUPT);
- VIA_WRITE(VIA_REG_INTERRUPT, status & ~VIA_IRQ_VBLANK_ENABLE);
+ status = via_read(dev_priv, VIA_REG_INTERRUPT);
+ via_write(dev_priv, VIA_REG_INTERRUPT, status & ~VIA_IRQ_VBLANK_ENABLE);
- VIA_WRITE8(0x83d4, 0x11);
- VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) & ~0x30);
+ via_write8(dev_priv, 0x83d4, 0x11);
+ via_write8_mask(dev_priv, 0x83d5, 0x30, 0);
if (pipe != 0)
DRM_ERROR("%s: bad crtc %u\n", __func__, pipe);
@@ -233,12 +235,12 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
cur_irq = dev_priv->via_irqs + real_irq;
if (masks[real_irq][2] && !force_sequence) {
- DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
- ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
+ VIA_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
+ ((via_read(dev_priv, masks[irq][2]) & masks[irq][3]) ==
masks[irq][4]));
cur_irq_sequence = atomic_read(&cur_irq->irq_received);
} else {
- DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
+ VIA_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
(((cur_irq_sequence =
atomic_read(&cur_irq->irq_received)) -
*sequence) <= (1 << 23)));
@@ -292,8 +294,8 @@ void via_driver_irq_preinstall(struct drm_device *dev)
dev_priv->last_vblank_valid = 0;
/* Clear VSync interrupt regs */
- status = VIA_READ(VIA_REG_INTERRUPT);
- VIA_WRITE(VIA_REG_INTERRUPT, status &
+ status = via_read(dev_priv, VIA_REG_INTERRUPT);
+ via_write(dev_priv, VIA_REG_INTERRUPT, status &
~(dev_priv->irq_enable_mask));
/* Clear bits if they're already high */
@@ -310,13 +312,13 @@ int via_driver_irq_postinstall(struct drm_device *dev)
if (!dev_priv)
return -EINVAL;
- status = VIA_READ(VIA_REG_INTERRUPT);
- VIA_WRITE(VIA_REG_INTERRUPT, status | VIA_IRQ_GLOBAL
+ status = via_read(dev_priv, VIA_REG_INTERRUPT);
+ via_write(dev_priv, VIA_REG_INTERRUPT, status | VIA_IRQ_GLOBAL
| dev_priv->irq_enable_mask);
/* Some magic, oh for some data sheets ! */
- VIA_WRITE8(0x83d4, 0x11);
- VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30);
+ via_write8(dev_priv, 0x83d4, 0x11);
+ via_write8_mask(dev_priv, 0x83d5, 0x30, 0x30);
return 0;
}
@@ -331,11 +333,11 @@ void via_driver_irq_uninstall(struct drm_device *dev)
/* Some more magic, oh for some data sheets ! */
- VIA_WRITE8(0x83d4, 0x11);
- VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) & ~0x30);
+ via_write8(dev_priv, 0x83d4, 0x11);
+ via_write8_mask(dev_priv, 0x83d5, 0x30, 0);
- status = VIA_READ(VIA_REG_INTERRUPT);
- VIA_WRITE(VIA_REG_INTERRUPT, status &
+ status = via_read(dev_priv, VIA_REG_INTERRUPT);
+ via_write(dev_priv, VIA_REG_INTERRUPT, status &
~(VIA_IRQ_VBLANK_ENABLE | dev_priv->irq_enable_mask));
}
}
diff --git a/drivers/gpu/drm/via/via_map.c b/drivers/gpu/drm/via/via_map.c
index 2ad865870372..255c5066a939 100644
--- a/drivers/gpu/drm/via/via_map.c
+++ b/drivers/gpu/drm/via/via_map.c
@@ -21,8 +21,13 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
-#include <drm/drmP.h>
+
+#include <linux/pci.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_vblank.h>
#include <drm/via_drm.h>
+
#include "via_drv.h"
static int via_do_init_map(struct drm_device *dev, drm_via_init_t *init)
diff --git a/drivers/gpu/drm/via/via_mm.c b/drivers/gpu/drm/via/via_mm.c
index 4217d66a5cc6..45cc9e900260 100644
--- a/drivers/gpu/drm/via/via_mm.c
+++ b/drivers/gpu/drm/via/via_mm.c
@@ -25,8 +25,13 @@
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
-#include <drm/drmP.h>
+#include <linux/slab.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_file.h>
+#include <drm/drm_irq.h>
#include <drm/via_drm.h>
+
#include "via_drv.h"
#define VIA_MM_ALIGN_SHIFT 4
diff --git a/drivers/gpu/drm/via/via_verifier.c b/drivers/gpu/drm/via/via_verifier.c
index fb2609434df7..8d8135f424ef 100644
--- a/drivers/gpu/drm/via/via_verifier.c
+++ b/drivers/gpu/drm/via/via_verifier.c
@@ -28,13 +28,13 @@
* be very slow.
*/
-#include "via_3d_reg.h"
-#include <drm/drmP.h>
-#include <drm/via_drm.h>
+#include <drm/drm_device.h>
#include <drm/drm_legacy.h>
-#include "via_verifier.h"
+#include <drm/via_drm.h>
+
+#include "via_3d_reg.h"
#include "via_drv.h"
-#include <linux/kernel.h>
+#include "via_verifier.h"
typedef enum {
state_command,
@@ -725,14 +725,14 @@ via_parse_header2(drm_via_private_t *dev_priv, uint32_t const **buffer,
next_fire = dev_priv->fire_offsets[*fire_count];
buf++;
cmd = (*buf & 0xFFFF0000) >> 16;
- VIA_WRITE(HC_REG_TRANS_SET + HC_REG_BASE, *buf++);
+ via_write(dev_priv, HC_REG_TRANS_SET + HC_REG_BASE, *buf++);
switch (cmd) {
case HC_ParaType_CmdVdata:
while ((buf < buf_end) &&
(*fire_count < dev_priv->num_fire_offsets) &&
(*buf & HC_ACMD_MASK) == HC_ACMD_HCmdB) {
while (buf <= next_fire) {
- VIA_WRITE(HC_REG_TRANS_SPACE + HC_REG_BASE +
+ via_write(dev_priv, HC_REG_TRANS_SPACE + HC_REG_BASE +
(burst & 63), *buf++);
burst += 4;
}
@@ -753,7 +753,7 @@ via_parse_header2(drm_via_private_t *dev_priv, uint32_t const **buffer,
(*buf & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6)
break;
- VIA_WRITE(HC_REG_TRANS_SPACE + HC_REG_BASE +
+ via_write(dev_priv, HC_REG_TRANS_SPACE + HC_REG_BASE +
(burst & 63), *buf++);
burst += 4;
}
@@ -843,7 +843,7 @@ via_parse_header1(drm_via_private_t *dev_priv, uint32_t const **buffer,
cmd = *buf;
if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1)
break;
- VIA_WRITE((cmd & ~HALCYON_HEADER1MASK) << 2, *++buf);
+ via_write(dev_priv, (cmd & ~HALCYON_HEADER1MASK) << 2, *++buf);
buf++;
}
*buffer = buf;
@@ -894,7 +894,7 @@ via_parse_vheader5(drm_via_private_t *dev_priv, uint32_t const **buffer,
i = count = *buf;
buf += 3;
while (i--)
- VIA_WRITE(addr, *buf++);
+ via_write(dev_priv, addr, *buf++);
if (count & 3)
buf += 4 - (count & 3);
*buffer = buf;
@@ -950,7 +950,7 @@ via_parse_vheader6(drm_via_private_t *dev_priv, uint32_t const **buffer,
buf += 3;
while (i--) {
addr = *buf++;
- VIA_WRITE(addr, *buf++);
+ via_write(dev_priv, addr, *buf++);
}
count <<= 1;
if (count & 3)
diff --git a/drivers/gpu/drm/via/via_video.c b/drivers/gpu/drm/via/via_video.c
index a9ffbad1cfdd..53b1f58f99b4 100644
--- a/drivers/gpu/drm/via/via_video.c
+++ b/drivers/gpu/drm/via/via_video.c
@@ -25,8 +25,9 @@
* Video and XvMC related functions.
*/
-#include <drm/drmP.h>
+#include <drm/drm_device.h>
#include <drm/via_drm.h>
+
#include "via_drv.h"
void via_init_futex(drm_via_private_t *dev_priv)
@@ -82,7 +83,7 @@ int via_decoder_futex(struct drm_device *dev, void *data, struct drm_file *file_
switch (fx->func) {
case VIA_FUTEX_WAIT:
- DRM_WAIT_ON(ret, dev_priv->decoder_queue[fx->lock],
+ VIA_WAIT_ON(ret, dev_priv->decoder_queue[fx->lock],
(fx->ms / 10) * (HZ / 100), *lock != fx->val);
return ret;
case VIA_FUTEX_WAKE:
diff --git a/drivers/gpu/drm/virtio/Kconfig b/drivers/gpu/drm/virtio/Kconfig
index ba36e933bb49..eff3047052d4 100644
--- a/drivers/gpu/drm/virtio/Kconfig
+++ b/drivers/gpu/drm/virtio/Kconfig
@@ -3,7 +3,7 @@ config DRM_VIRTIO_GPU
tristate "Virtio GPU driver"
depends on DRM && VIRTIO && MMU
select DRM_KMS_HELPER
- select DRM_TTM
+ select DRM_GEM_SHMEM_HELPER
help
This is the virtual GPU driver for virtio. It can be used with
QEMU based VMMs (like KVM or Xen).
diff --git a/drivers/gpu/drm/virtio/Makefile b/drivers/gpu/drm/virtio/Makefile
index 458e606a936f..92aa2b3d349d 100644
--- a/drivers/gpu/drm/virtio/Makefile
+++ b/drivers/gpu/drm/virtio/Makefile
@@ -4,7 +4,7 @@
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
virtio-gpu-y := virtgpu_drv.o virtgpu_kms.o virtgpu_gem.o \
- virtgpu_display.o virtgpu_vq.o virtgpu_ttm.o \
+ virtgpu_display.o virtgpu_vq.o \
virtgpu_fence.o virtgpu_object.o virtgpu_debugfs.o virtgpu_plane.o \
virtgpu_ioctl.o virtgpu_prime.o virtgpu_trace_points.o
diff --git a/drivers/gpu/drm/virtio/virtgpu_debugfs.c b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
index ed0fcda713c3..5156e6b279db 100644
--- a/drivers/gpu/drm/virtio/virtgpu_debugfs.c
+++ b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
@@ -23,8 +23,8 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <linux/debugfs.h>
-#include <drm/drmP.h>
+#include <drm/drm_debugfs.h>
+#include <drm/drm_file.h>
#include "virtgpu_drv.h"
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index ba16e8cb7124..0966208ec30d 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -25,11 +25,14 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "virtgpu_drv.h"
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_damage_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "virtgpu_drv.h"
#define XRES_MIN 32
#define YRES_MIN 32
@@ -40,6 +43,9 @@
#define XRES_MAX 8192
#define YRES_MAX 8192
+#define drm_connector_to_virtio_gpu_output(x) \
+ container_of(x, struct virtio_gpu_output, conn)
+
static const struct drm_crtc_funcs virtio_gpu_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.destroy = drm_crtc_cleanup,
@@ -56,7 +62,7 @@ static const struct drm_framebuffer_funcs virtio_gpu_fb_funcs = {
.dirty = drm_atomic_helper_dirtyfb,
};
-int
+static int
virtio_gpu_framebuffer_init(struct drm_device *dev,
struct virtio_gpu_framebuffer *vgfb,
const struct drm_mode_fb_cmd2 *mode_cmd,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index c50868753132..8cf27af3ad53 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -29,10 +29,13 @@
#include <linux/module.h>
#include <linux/console.h>
#include <linux/pci.h>
-#include <drm/drmP.h>
+
#include <drm/drm.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
#include "virtgpu_drv.h"
+
static struct drm_driver driver;
static int virtio_gpu_modeset = -1;
@@ -53,7 +56,6 @@ static int virtio_gpu_pci_quirk(struct drm_device *dev, struct virtio_device *vd
dev->pdev = pdev;
if (vga)
drm_fb_helper_remove_conflicting_pci_framebuffers(pdev,
- 0,
"virtiodrmfb");
/*
@@ -135,7 +137,7 @@ static void virtio_gpu_remove(struct virtio_device *vdev)
drm_dev_unregister(dev);
virtio_gpu_deinit(dev);
- drm_put_dev(dev);
+ drm_dev_put(dev);
}
static void virtio_gpu_config_changed(struct virtio_device *vdev)
@@ -182,20 +184,10 @@ MODULE_AUTHOR("Dave Airlie <airlied@redhat.com>");
MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>");
MODULE_AUTHOR("Alon Levy");
-static const struct file_operations virtio_gpu_driver_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .mmap = virtio_gpu_mmap,
- .poll = drm_poll,
- .read = drm_read,
- .unlocked_ioctl = drm_ioctl,
- .release = drm_release,
- .compat_ioctl = drm_compat_ioctl,
- .llseek = noop_llseek,
-};
+DEFINE_DRM_GEM_FOPS(virtio_gpu_driver_fops);
static struct drm_driver driver = {
- .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_RENDER | DRIVER_ATOMIC,
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC,
.open = virtio_gpu_driver_open,
.postclose = virtio_gpu_driver_postclose,
@@ -207,17 +199,10 @@ static struct drm_driver driver = {
#endif
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = drm_gem_prime_export,
- .gem_prime_import = drm_gem_prime_import,
- .gem_prime_get_sg_table = virtgpu_gem_prime_get_sg_table,
+ .gem_prime_mmap = drm_gem_prime_mmap,
.gem_prime_import_sg_table = virtgpu_gem_prime_import_sg_table,
- .gem_prime_vmap = virtgpu_gem_prime_vmap,
- .gem_prime_vunmap = virtgpu_gem_prime_vunmap,
- .gem_prime_mmap = virtgpu_gem_prime_mmap,
- .gem_free_object_unlocked = virtio_gpu_gem_free_object,
- .gem_open_object = virtio_gpu_gem_object_open,
- .gem_close_object = virtio_gpu_gem_object_close,
+ .gem_create_object = virtio_gpu_create_object,
.fops = &virtio_gpu_driver_fops,
.ioctls = virtio_gpu_ioctls,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 9e2d3062b01d..7e69c06e168e 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -31,16 +31,14 @@
#include <linux/virtio_config.h>
#include <linux/virtio_gpu.h>
-#include <drm/drmP.h>
-#include <drm/drm_gem.h>
#include <drm/drm_atomic.h>
#include <drm/drm_encoder.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_ioctl.h>
#include <drm/drm_probe_helper.h>
-#include <drm/ttm/ttm_bo_api.h>
-#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_placement.h>
-#include <drm/ttm/ttm_module.h>
+#include <drm/virtgpu_drm.h>
#define DRIVER_NAME "virtio_gpu"
#define DRIVER_DESC "virtio GPU"
@@ -68,21 +66,23 @@ struct virtio_gpu_object_params {
};
struct virtio_gpu_object {
- struct drm_gem_object gem_base;
+ struct drm_gem_shmem_object base;
uint32_t hw_res_handle;
struct sg_table *pages;
uint32_t mapped;
- void *vmap;
bool dumb;
- struct ttm_place placement_code;
- struct ttm_placement placement;
- struct ttm_buffer_object tbo;
- struct ttm_bo_kmap_obj kmap;
bool created;
};
#define gem_to_virtio_gpu_obj(gobj) \
- container_of((gobj), struct virtio_gpu_object, gem_base)
+ container_of((gobj), struct virtio_gpu_object, base.base)
+
+struct virtio_gpu_object_array {
+ struct ww_acquire_ctx ticket;
+ struct list_head next;
+ u32 nents, total;
+ struct drm_gem_object *objs[];
+};
struct virtio_gpu_vbuffer;
struct virtio_gpu_device;
@@ -103,8 +103,6 @@ struct virtio_gpu_fence {
struct virtio_gpu_fence_driver *drv;
struct list_head node;
};
-#define to_virtio_fence(x) \
- container_of(x, struct virtio_gpu_fence, f)
struct virtio_gpu_vbuffer {
char *buf;
@@ -115,9 +113,9 @@ struct virtio_gpu_vbuffer {
char *resp_buf;
int resp_size;
-
virtio_gpu_resp_cb resp_cb;
+ struct virtio_gpu_object_array *objs;
struct list_head list;
};
@@ -135,10 +133,6 @@ struct virtio_gpu_output {
};
#define drm_crtc_to_virtio_gpu_output(x) \
container_of(x, struct virtio_gpu_output, crtc)
-#define drm_connector_to_virtio_gpu_output(x) \
- container_of(x, struct virtio_gpu_output, conn)
-#define drm_encoder_to_virtio_gpu_output(x) \
- container_of(x, struct virtio_gpu_output, enc)
struct virtio_gpu_framebuffer {
struct drm_framebuffer base;
@@ -147,10 +141,6 @@ struct virtio_gpu_framebuffer {
#define to_virtio_gpu_framebuffer(x) \
container_of(x, struct virtio_gpu_framebuffer, base)
-struct virtio_gpu_mman {
- struct ttm_bo_device bdev;
-};
-
struct virtio_gpu_queue {
struct virtqueue *vq;
spinlock_t qlock;
@@ -179,8 +169,6 @@ struct virtio_gpu_device {
struct virtio_device *vdev;
- struct virtio_gpu_mman mman;
-
struct virtio_gpu_output outputs[VIRTIO_GPU_MAX_SCANOUTS];
uint32_t num_scanouts;
@@ -189,6 +177,9 @@ struct virtio_gpu_device {
struct kmem_cache *vbufs;
bool vqs_ready;
+ bool disable_notify;
+ bool pending_notify;
+
struct ida resource_ida;
wait_queue_head_t resp_wq;
@@ -205,6 +196,10 @@ struct virtio_gpu_device {
struct work_struct config_changed_work;
+ struct work_struct obj_free_work;
+ spinlock_t obj_free_lock;
+ struct list_head obj_free_list;
+
struct virtio_gpu_drv_capset *capsets;
uint32_t num_capsets;
struct list_head cap_cache;
@@ -217,9 +212,6 @@ struct virtio_gpu_fpriv {
/* virtio_ioctl.c */
#define DRM_VIRTIO_NUM_IOCTLS 10
extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS];
-int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
- struct list_head *head);
-void virtio_gpu_unref_list(struct list_head *head);
/* virtio_kms.c */
int virtio_gpu_init(struct drm_device *dev);
@@ -240,10 +232,6 @@ int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
struct drm_file *file);
void virtio_gpu_gem_object_close(struct drm_gem_object *obj,
struct drm_file *file);
-struct virtio_gpu_object*
-virtio_gpu_alloc_object(struct drm_device *dev,
- struct virtio_gpu_object_params *params,
- struct virtio_gpu_fence *fence);
int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
@@ -251,20 +239,35 @@ int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv,
struct drm_device *dev,
uint32_t handle, uint64_t *offset_p);
+struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents);
+struct virtio_gpu_object_array*
+virtio_gpu_array_from_handles(struct drm_file *drm_file, u32 *handles, u32 nents);
+void virtio_gpu_array_add_obj(struct virtio_gpu_object_array *objs,
+ struct drm_gem_object *obj);
+int virtio_gpu_array_lock_resv(struct virtio_gpu_object_array *objs);
+void virtio_gpu_array_unlock_resv(struct virtio_gpu_object_array *objs);
+void virtio_gpu_array_add_fence(struct virtio_gpu_object_array *objs,
+ struct dma_fence *fence);
+void virtio_gpu_array_put_free(struct virtio_gpu_object_array *objs);
+void virtio_gpu_array_put_free_delayed(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object_array *objs);
+void virtio_gpu_array_put_free_work(struct work_struct *work);
+
/* virtio vg */
int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev);
void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev);
void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo,
struct virtio_gpu_object_params *params,
+ struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence);
void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
uint32_t resource_id);
void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_object *bo,
uint64_t offset,
- __le32 width, __le32 height,
- __le32 x, __le32 y,
+ uint32_t width, uint32_t height,
+ uint32_t x, uint32_t y,
+ struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence);
void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
uint32_t resource_id,
@@ -295,28 +298,32 @@ void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
uint32_t id);
void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
uint32_t ctx_id,
- uint32_t resource_id);
+ struct virtio_gpu_object_array *objs);
void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
uint32_t ctx_id,
- uint32_t resource_id);
+ struct virtio_gpu_object_array *objs);
void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
void *data, uint32_t data_size,
- uint32_t ctx_id, struct virtio_gpu_fence *fence);
+ uint32_t ctx_id,
+ struct virtio_gpu_object_array *objs,
+ struct virtio_gpu_fence *fence);
void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
- uint32_t resource_id, uint32_t ctx_id,
+ uint32_t ctx_id,
uint64_t offset, uint32_t level,
- struct virtio_gpu_box *box,
+ struct drm_virtgpu_3d_box *box,
+ struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence);
void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_object *bo,
uint32_t ctx_id,
uint64_t offset, uint32_t level,
- struct virtio_gpu_box *box,
+ struct drm_virtgpu_3d_box *box,
+ struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence);
void
virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo,
struct virtio_gpu_object_params *params,
+ struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence);
void virtio_gpu_ctrl_ack(struct virtqueue *vq);
void virtio_gpu_cursor_ack(struct virtqueue *vq);
@@ -325,11 +332,10 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work);
void virtio_gpu_dequeue_cursor_func(struct work_struct *work);
void virtio_gpu_dequeue_fence_func(struct work_struct *work);
+void virtio_gpu_disable_notify(struct virtio_gpu_device *vgdev);
+void virtio_gpu_enable_notify(struct virtio_gpu_device *vgdev);
+
/* virtio_gpu_display.c */
-int virtio_gpu_framebuffer_init(struct drm_device *dev,
- struct virtio_gpu_framebuffer *vgfb,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object *obj);
void virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev);
void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev);
@@ -339,13 +345,7 @@ struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
enum drm_plane_type type,
int index);
-/* virtio_gpu_ttm.c */
-int virtio_gpu_ttm_init(struct virtio_gpu_device *vgdev);
-void virtio_gpu_ttm_fini(struct virtio_gpu_device *vgdev);
-int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma);
-
/* virtio_gpu_fence.c */
-bool virtio_fence_signaled(struct dma_fence *f);
struct virtio_gpu_fence *virtio_gpu_fence_alloc(
struct virtio_gpu_device *vgdev);
void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
@@ -355,73 +355,18 @@ void virtio_gpu_fence_event_process(struct virtio_gpu_device *vdev,
u64 last_seq);
/* virtio_gpu_object */
+struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
+ size_t size);
int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object_params *params,
struct virtio_gpu_object **bo_ptr,
struct virtio_gpu_fence *fence);
-void virtio_gpu_object_kunmap(struct virtio_gpu_object *bo);
-int virtio_gpu_object_kmap(struct virtio_gpu_object *bo);
-int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev,
- struct virtio_gpu_object *bo);
-void virtio_gpu_object_free_sg_table(struct virtio_gpu_object *bo);
-int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait);
-
/* virtgpu_prime.c */
-struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
struct drm_device *dev, struct dma_buf_attachment *attach,
struct sg_table *sgt);
-void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj);
-void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
-int virtgpu_gem_prime_mmap(struct drm_gem_object *obj,
- struct vm_area_struct *vma);
-
-static inline struct virtio_gpu_object*
-virtio_gpu_object_ref(struct virtio_gpu_object *bo)
-{
- ttm_bo_get(&bo->tbo);
- return bo;
-}
-
-static inline void virtio_gpu_object_unref(struct virtio_gpu_object **bo)
-{
- struct ttm_buffer_object *tbo;
-
- if ((*bo) == NULL)
- return;
- tbo = &((*bo)->tbo);
- ttm_bo_put(tbo);
- *bo = NULL;
-}
-
-static inline u64 virtio_gpu_object_mmap_offset(struct virtio_gpu_object *bo)
-{
- return drm_vma_node_offset_addr(&bo->tbo.vma_node);
-}
-
-static inline int virtio_gpu_object_reserve(struct virtio_gpu_object *bo,
- bool no_wait)
-{
- int r;
-
- r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL);
- if (unlikely(r != 0)) {
- if (r != -ERESTARTSYS) {
- struct virtio_gpu_device *qdev =
- bo->gem_base.dev->dev_private;
- dev_err(qdev->dev, "%p reserve failed\n", bo);
- }
- return r;
- }
- return 0;
-}
-
-static inline void virtio_gpu_object_unreserve(struct virtio_gpu_object *bo)
-{
- ttm_bo_unreserve(&bo->tbo);
-}
-
-/* virgl debufs */
+
+/* virgl debugfs */
int virtio_gpu_debugfs_init(struct drm_minor *minor);
#endif
diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c
index 70d6c4329778..5b2a4146c5bd 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fence.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fence.c
@@ -23,10 +23,13 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <drm/drmP.h>
#include <trace/events/dma_fence.h>
+
#include "virtgpu_drv.h"
+#define to_virtio_fence(x) \
+ container_of(x, struct virtio_gpu_fence, f)
+
static const char *virtio_get_driver_name(struct dma_fence *f)
{
return "virtio_gpu";
@@ -37,10 +40,14 @@ static const char *virtio_get_timeline_name(struct dma_fence *f)
return "controlq";
}
-bool virtio_fence_signaled(struct dma_fence *f)
+static bool virtio_fence_signaled(struct dma_fence *f)
{
struct virtio_gpu_fence *fence = to_virtio_fence(f);
+ if (WARN_ON_ONCE(fence->f.seqno == 0))
+ /* leaked fence outside driver before completing
+ * initialization with virtio_gpu_fence_emit */
+ return false;
if (atomic64_read(&fence->drv->last_seq) >= fence->f.seqno)
return true;
return false;
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
index 1e49e08dd545..0a2b62279647 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -23,32 +23,10 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <drm/drmP.h>
-#include "virtgpu_drv.h"
-
-void virtio_gpu_gem_free_object(struct drm_gem_object *gem_obj)
-{
- struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(gem_obj);
-
- if (obj)
- virtio_gpu_object_unref(&obj);
-}
-
-struct virtio_gpu_object*
-virtio_gpu_alloc_object(struct drm_device *dev,
- struct virtio_gpu_object_params *params,
- struct virtio_gpu_fence *fence)
-{
- struct virtio_gpu_device *vgdev = dev->dev_private;
- struct virtio_gpu_object *obj;
- int ret;
-
- ret = virtio_gpu_object_create(vgdev, params, &obj, fence);
- if (ret)
- return ERR_PTR(ret);
+#include <drm/drm_file.h>
+#include <drm/drm_fourcc.h>
- return obj;
-}
+#include "virtgpu_drv.h"
int virtio_gpu_gem_create(struct drm_file *file,
struct drm_device *dev,
@@ -56,24 +34,25 @@ int virtio_gpu_gem_create(struct drm_file *file,
struct drm_gem_object **obj_p,
uint32_t *handle_p)
{
+ struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_object *obj;
int ret;
u32 handle;
- obj = virtio_gpu_alloc_object(dev, params, NULL);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
+ ret = virtio_gpu_object_create(vgdev, params, &obj, NULL);
+ if (ret < 0)
+ return ret;
- ret = drm_gem_handle_create(file, &obj->gem_base, &handle);
+ ret = drm_gem_handle_create(file, &obj->base.base, &handle);
if (ret) {
- drm_gem_object_release(&obj->gem_base);
+ drm_gem_object_release(&obj->base.base);
return ret;
}
- *obj_p = &obj->gem_base;
+ *obj_p = &obj->base.base;
/* drop reference from allocate - handle holds it now */
- drm_gem_object_put_unlocked(&obj->gem_base);
+ drm_gem_object_put_unlocked(&obj->base.base);
*handle_p = handle;
return 0;
@@ -117,14 +96,12 @@ int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv,
uint32_t handle, uint64_t *offset_p)
{
struct drm_gem_object *gobj;
- struct virtio_gpu_object *obj;
BUG_ON(!offset_p);
gobj = drm_gem_object_lookup(file_priv, handle);
if (gobj == NULL)
return -ENOENT;
- obj = gem_to_virtio_gpu_obj(gobj);
- *offset_p = virtio_gpu_object_mmap_offset(obj);
+ *offset_p = drm_vma_node_offset_addr(&gobj->vma_node);
drm_gem_object_put_unlocked(gobj);
return 0;
}
@@ -134,19 +111,18 @@ int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
{
struct virtio_gpu_device *vgdev = obj->dev->dev_private;
struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
- struct virtio_gpu_object *qobj = gem_to_virtio_gpu_obj(obj);
- int r;
+ struct virtio_gpu_object_array *objs;
if (!vgdev->has_virgl_3d)
return 0;
- r = virtio_gpu_object_reserve(qobj, false);
- if (r)
- return r;
+ objs = virtio_gpu_array_alloc(1);
+ if (!objs)
+ return -ENOMEM;
+ virtio_gpu_array_add_obj(objs, obj);
virtio_gpu_cmd_context_attach_resource(vgdev, vfpriv->ctx_id,
- qobj->hw_res_handle);
- virtio_gpu_object_unreserve(qobj);
+ objs);
return 0;
}
@@ -155,17 +131,136 @@ void virtio_gpu_gem_object_close(struct drm_gem_object *obj,
{
struct virtio_gpu_device *vgdev = obj->dev->dev_private;
struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
- struct virtio_gpu_object *qobj = gem_to_virtio_gpu_obj(obj);
- int r;
+ struct virtio_gpu_object_array *objs;
if (!vgdev->has_virgl_3d)
return;
- r = virtio_gpu_object_reserve(qobj, false);
- if (r)
+ objs = virtio_gpu_array_alloc(1);
+ if (!objs)
return;
+ virtio_gpu_array_add_obj(objs, obj);
virtio_gpu_cmd_context_detach_resource(vgdev, vfpriv->ctx_id,
- qobj->hw_res_handle);
- virtio_gpu_object_unreserve(qobj);
+ objs);
+}
+
+struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents)
+{
+ struct virtio_gpu_object_array *objs;
+ size_t size = sizeof(*objs) + sizeof(objs->objs[0]) * nents;
+
+ objs = kmalloc(size, GFP_KERNEL);
+ if (!objs)
+ return NULL;
+
+ objs->nents = 0;
+ objs->total = nents;
+ return objs;
+}
+
+static void virtio_gpu_array_free(struct virtio_gpu_object_array *objs)
+{
+ kfree(objs);
+}
+
+struct virtio_gpu_object_array*
+virtio_gpu_array_from_handles(struct drm_file *drm_file, u32 *handles, u32 nents)
+{
+ struct virtio_gpu_object_array *objs;
+ u32 i;
+
+ objs = virtio_gpu_array_alloc(nents);
+ if (!objs)
+ return NULL;
+
+ for (i = 0; i < nents; i++) {
+ objs->objs[i] = drm_gem_object_lookup(drm_file, handles[i]);
+ if (!objs->objs[i]) {
+ objs->nents = i;
+ virtio_gpu_array_put_free(objs);
+ return NULL;
+ }
+ }
+ objs->nents = i;
+ return objs;
+}
+
+void virtio_gpu_array_add_obj(struct virtio_gpu_object_array *objs,
+ struct drm_gem_object *obj)
+{
+ if (WARN_ON_ONCE(objs->nents == objs->total))
+ return;
+
+ drm_gem_object_get(obj);
+ objs->objs[objs->nents] = obj;
+ objs->nents++;
+}
+
+int virtio_gpu_array_lock_resv(struct virtio_gpu_object_array *objs)
+{
+ int ret;
+
+ if (objs->nents == 1) {
+ ret = dma_resv_lock_interruptible(objs->objs[0]->resv, NULL);
+ } else {
+ ret = drm_gem_lock_reservations(objs->objs, objs->nents,
+ &objs->ticket);
+ }
+ return ret;
+}
+
+void virtio_gpu_array_unlock_resv(struct virtio_gpu_object_array *objs)
+{
+ if (objs->nents == 1) {
+ dma_resv_unlock(objs->objs[0]->resv);
+ } else {
+ drm_gem_unlock_reservations(objs->objs, objs->nents,
+ &objs->ticket);
+ }
+}
+
+void virtio_gpu_array_add_fence(struct virtio_gpu_object_array *objs,
+ struct dma_fence *fence)
+{
+ int i;
+
+ for (i = 0; i < objs->nents; i++)
+ dma_resv_add_excl_fence(objs->objs[i]->resv, fence);
+}
+
+void virtio_gpu_array_put_free(struct virtio_gpu_object_array *objs)
+{
+ u32 i;
+
+ for (i = 0; i < objs->nents; i++)
+ drm_gem_object_put_unlocked(objs->objs[i]);
+ virtio_gpu_array_free(objs);
+}
+
+void virtio_gpu_array_put_free_delayed(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object_array *objs)
+{
+ spin_lock(&vgdev->obj_free_lock);
+ list_add_tail(&objs->next, &vgdev->obj_free_list);
+ spin_unlock(&vgdev->obj_free_lock);
+ schedule_work(&vgdev->obj_free_work);
+}
+
+void virtio_gpu_array_put_free_work(struct work_struct *work)
+{
+ struct virtio_gpu_device *vgdev =
+ container_of(work, struct virtio_gpu_device, obj_free_work);
+ struct virtio_gpu_object_array *objs;
+
+ spin_lock(&vgdev->obj_free_lock);
+ while (!list_empty(&vgdev->obj_free_list)) {
+ objs = list_first_entry(&vgdev->obj_free_list,
+ struct virtio_gpu_object_array, next);
+ list_del(&objs->next);
+ spin_unlock(&vgdev->obj_free_lock);
+ virtio_gpu_array_put_free(objs);
+ spin_lock(&vgdev->obj_free_lock);
+ }
+ spin_unlock(&vgdev->obj_free_lock);
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index ac60be9b5c19..205ec4abae2b 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -25,23 +25,13 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <drm/drmP.h>
-#include <drm/virtgpu_drm.h>
-#include <drm/ttm/ttm_execbuf_util.h>
+#include <linux/file.h>
#include <linux/sync_file.h>
-#include "virtgpu_drv.h"
+#include <drm/drm_file.h>
+#include <drm/virtgpu_drm.h>
-static void convert_to_hw_box(struct virtio_gpu_box *dst,
- const struct drm_virtgpu_3d_box *src)
-{
- dst->x = cpu_to_le32(src->x);
- dst->y = cpu_to_le32(src->y);
- dst->z = cpu_to_le32(src->z);
- dst->w = cpu_to_le32(src->w);
- dst->h = cpu_to_le32(src->h);
- dst->d = cpu_to_le32(src->d);
-}
+#include "virtgpu_drv.h"
static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -54,45 +44,6 @@ static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
&virtio_gpu_map->offset);
}
-int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
- struct list_head *head)
-{
- struct ttm_operation_ctx ctx = { false, false };
- struct ttm_validate_buffer *buf;
- struct ttm_buffer_object *bo;
- struct virtio_gpu_object *qobj;
- int ret;
-
- ret = ttm_eu_reserve_buffers(ticket, head, true, NULL, true);
- if (ret != 0)
- return ret;
-
- list_for_each_entry(buf, head, head) {
- bo = buf->bo;
- qobj = container_of(bo, struct virtio_gpu_object, tbo);
- ret = ttm_bo_validate(bo, &qobj->placement, &ctx);
- if (ret) {
- ttm_eu_backoff_reservation(ticket, head);
- return ret;
- }
- }
- return 0;
-}
-
-void virtio_gpu_unref_list(struct list_head *head)
-{
- struct ttm_validate_buffer *buf;
- struct ttm_buffer_object *bo;
- struct virtio_gpu_object *qobj;
-
- list_for_each_entry(buf, head, head) {
- bo = buf->bo;
- qobj = container_of(bo, struct virtio_gpu_object, tbo);
-
- drm_gem_object_put_unlocked(&qobj->gem_base);
- }
-}
-
/*
* Usage of execbuffer:
* Relocations need to take into account the full VIRTIO_GPUDrawable size.
@@ -105,16 +56,11 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
struct drm_virtgpu_execbuffer *exbuf = data;
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv;
- struct drm_gem_object *gobj;
struct virtio_gpu_fence *out_fence;
- struct virtio_gpu_object *qobj;
int ret;
uint32_t *bo_handles = NULL;
void __user *user_bo_handles = NULL;
- struct list_head validate_list;
- struct ttm_validate_buffer *buflist = NULL;
- int i;
- struct ww_acquire_ctx ticket;
+ struct virtio_gpu_object_array *buflist = NULL;
struct sync_file *sync_file;
int in_fence_fd = exbuf->fence_fd;
int out_fence_fd = -1;
@@ -155,15 +101,10 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
return out_fence_fd;
}
- INIT_LIST_HEAD(&validate_list);
if (exbuf->num_bo_handles) {
-
bo_handles = kvmalloc_array(exbuf->num_bo_handles,
- sizeof(uint32_t), GFP_KERNEL);
- buflist = kvmalloc_array(exbuf->num_bo_handles,
- sizeof(struct ttm_validate_buffer),
- GFP_KERNEL | __GFP_ZERO);
- if (!bo_handles || !buflist) {
+ sizeof(uint32_t), GFP_KERNEL);
+ if (!bo_handles) {
ret = -ENOMEM;
goto out_unused_fd;
}
@@ -175,27 +116,23 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
goto out_unused_fd;
}
- for (i = 0; i < exbuf->num_bo_handles; i++) {
- gobj = drm_gem_object_lookup(drm_file, bo_handles[i]);
- if (!gobj) {
- ret = -ENOENT;
- goto out_unused_fd;
- }
-
- qobj = gem_to_virtio_gpu_obj(gobj);
- buflist[i].bo = &qobj->tbo;
-
- list_add(&buflist[i].head, &validate_list);
+ buflist = virtio_gpu_array_from_handles(drm_file, bo_handles,
+ exbuf->num_bo_handles);
+ if (!buflist) {
+ ret = -ENOENT;
+ goto out_unused_fd;
}
kvfree(bo_handles);
bo_handles = NULL;
}
- ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
- if (ret)
- goto out_free;
+ if (buflist) {
+ ret = virtio_gpu_array_lock_resv(buflist);
+ if (ret)
+ goto out_unused_fd;
+ }
- buf = memdup_user(u64_to_user_ptr(exbuf->command), exbuf->size);
+ buf = vmemdup_user(u64_to_user_ptr(exbuf->command), exbuf->size);
if (IS_ERR(buf)) {
ret = PTR_ERR(buf);
goto out_unresv;
@@ -220,24 +157,18 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
}
virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
- vfpriv->ctx_id, out_fence);
-
- ttm_eu_fence_buffer_objects(&ticket, &validate_list, &out_fence->f);
-
- /* fence the command bo */
- virtio_gpu_unref_list(&validate_list);
- kvfree(buflist);
+ vfpriv->ctx_id, buflist, out_fence);
return 0;
out_memdup:
- kfree(buf);
+ kvfree(buf);
out_unresv:
- ttm_eu_backoff_reservation(&ticket, &validate_list);
-out_free:
- virtio_gpu_unref_list(&validate_list);
+ if (buflist)
+ virtio_gpu_array_unlock_resv(buflist);
out_unused_fd:
kvfree(bo_handles);
- kvfree(buflist);
+ if (buflist)
+ virtio_gpu_array_put_free(buflist);
if (out_fence_fd >= 0)
put_unused_fd(out_fence_fd);
@@ -314,11 +245,11 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
fence = virtio_gpu_fence_alloc(vgdev);
if (!fence)
return -ENOMEM;
- qobj = virtio_gpu_alloc_object(dev, &params, fence);
+ ret = virtio_gpu_object_create(vgdev, &params, &qobj, fence);
dma_fence_put(&fence->f);
- if (IS_ERR(qobj))
- return PTR_ERR(qobj);
- obj = &qobj->gem_base;
+ if (ret < 0)
+ return ret;
+ obj = &qobj->base.base;
ret = drm_gem_handle_create(file_priv, obj, &handle);
if (ret) {
@@ -345,7 +276,7 @@ static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
qobj = gem_to_virtio_gpu_obj(gobj);
- ri->size = qobj->gem_base.size;
+ ri->size = qobj->base.base.size;
ri->res_handle = qobj->hw_res_handle;
drm_gem_object_put_unlocked(gobj);
return 0;
@@ -358,50 +289,37 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
struct drm_virtgpu_3d_transfer_from_host *args = data;
- struct ttm_operation_ctx ctx = { true, false };
- struct drm_gem_object *gobj = NULL;
- struct virtio_gpu_object *qobj = NULL;
+ struct virtio_gpu_object_array *objs;
struct virtio_gpu_fence *fence;
int ret;
u32 offset = args->offset;
- struct virtio_gpu_box box;
if (vgdev->has_virgl_3d == false)
return -ENOSYS;
- gobj = drm_gem_object_lookup(file, args->bo_handle);
- if (gobj == NULL)
+ objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1);
+ if (objs == NULL)
return -ENOENT;
- qobj = gem_to_virtio_gpu_obj(gobj);
-
- ret = virtio_gpu_object_reserve(qobj, false);
- if (ret)
- goto out;
-
- ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx);
- if (unlikely(ret))
- goto out_unres;
-
- convert_to_hw_box(&box, &args->box);
+ ret = virtio_gpu_array_lock_resv(objs);
+ if (ret != 0)
+ goto err_put_free;
fence = virtio_gpu_fence_alloc(vgdev);
if (!fence) {
ret = -ENOMEM;
- goto out_unres;
+ goto err_unlock;
}
virtio_gpu_cmd_transfer_from_host_3d
- (vgdev, qobj->hw_res_handle,
- vfpriv->ctx_id, offset, args->level,
- &box, fence);
- reservation_object_add_excl_fence(qobj->tbo.resv,
- &fence->f);
-
+ (vgdev, vfpriv->ctx_id, offset, args->level,
+ &args->box, objs, fence);
dma_fence_put(&fence->f);
-out_unres:
- virtio_gpu_object_unreserve(qobj);
-out:
- drm_gem_object_put_unlocked(gobj);
+ return 0;
+
+err_unlock:
+ virtio_gpu_array_unlock_resv(objs);
+err_put_free:
+ virtio_gpu_array_put_free(objs);
return ret;
}
@@ -411,75 +329,69 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
struct drm_virtgpu_3d_transfer_to_host *args = data;
- struct ttm_operation_ctx ctx = { true, false };
- struct drm_gem_object *gobj = NULL;
- struct virtio_gpu_object *qobj = NULL;
+ struct virtio_gpu_object_array *objs;
struct virtio_gpu_fence *fence;
- struct virtio_gpu_box box;
int ret;
u32 offset = args->offset;
- gobj = drm_gem_object_lookup(file, args->bo_handle);
- if (gobj == NULL)
+ objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1);
+ if (objs == NULL)
return -ENOENT;
- qobj = gem_to_virtio_gpu_obj(gobj);
-
- ret = virtio_gpu_object_reserve(qobj, false);
- if (ret)
- goto out;
-
- ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx);
- if (unlikely(ret))
- goto out_unres;
-
- convert_to_hw_box(&box, &args->box);
if (!vgdev->has_virgl_3d) {
virtio_gpu_cmd_transfer_to_host_2d
- (vgdev, qobj, offset,
- box.w, box.h, box.x, box.y, NULL);
+ (vgdev, offset,
+ args->box.w, args->box.h, args->box.x, args->box.y,
+ objs, NULL);
} else {
+ ret = virtio_gpu_array_lock_resv(objs);
+ if (ret != 0)
+ goto err_put_free;
+
+ ret = -ENOMEM;
fence = virtio_gpu_fence_alloc(vgdev);
- if (!fence) {
- ret = -ENOMEM;
- goto out_unres;
- }
+ if (!fence)
+ goto err_unlock;
+
virtio_gpu_cmd_transfer_to_host_3d
- (vgdev, qobj,
+ (vgdev,
vfpriv ? vfpriv->ctx_id : 0, offset,
- args->level, &box, fence);
- reservation_object_add_excl_fence(qobj->tbo.resv,
- &fence->f);
+ args->level, &args->box, objs, fence);
dma_fence_put(&fence->f);
}
+ return 0;
-out_unres:
- virtio_gpu_object_unreserve(qobj);
-out:
- drm_gem_object_put_unlocked(gobj);
+err_unlock:
+ virtio_gpu_array_unlock_resv(objs);
+err_put_free:
+ virtio_gpu_array_put_free(objs);
return ret;
}
static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
+ struct drm_file *file)
{
struct drm_virtgpu_3d_wait *args = data;
- struct drm_gem_object *gobj = NULL;
- struct virtio_gpu_object *qobj = NULL;
+ struct drm_gem_object *obj;
+ long timeout = 15 * HZ;
int ret;
- bool nowait = false;
- gobj = drm_gem_object_lookup(file, args->handle);
- if (gobj == NULL)
+ obj = drm_gem_object_lookup(file, args->handle);
+ if (obj == NULL)
return -ENOENT;
- qobj = gem_to_virtio_gpu_obj(gobj);
-
- if (args->flags & VIRTGPU_WAIT_NOWAIT)
- nowait = true;
- ret = virtio_gpu_object_wait(qobj, nowait);
+ if (args->flags & VIRTGPU_WAIT_NOWAIT) {
+ ret = dma_resv_test_signaled_rcu(obj->resv, true);
+ } else {
+ ret = dma_resv_wait_timeout_rcu(obj->resv, true, true,
+ timeout);
+ }
+ if (ret == 0)
+ ret = -EBUSY;
+ else if (ret > 0)
+ ret = 0;
- drm_gem_object_put_unlocked(gobj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
@@ -553,34 +465,34 @@ copy_exit:
struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VIRTGPU_EXECBUFFER, virtio_gpu_execbuffer_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VIRTGPU_GETPARAM, virtio_gpu_getparam_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE,
virtio_gpu_resource_create_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_INFO, virtio_gpu_resource_info_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
/* make transfer async to the main ring? - no sure, can we
* thread these in the underlying GL
*/
DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_FROM_HOST,
virtio_gpu_transfer_from_host_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_TO_HOST,
virtio_gpu_transfer_to_host_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VIRTGPU_WAIT, virtio_gpu_wait_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
};
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index 84b6a6bf00c6..2f5773e43557 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -25,7 +25,9 @@
#include <linux/virtio.h>
#include <linux/virtio_config.h>
-#include <drm/drmP.h>
+
+#include <drm/drm_file.h>
+
#include "virtgpu_drv.h"
static void virtio_gpu_config_changed_work_func(struct work_struct *work)
@@ -145,19 +147,23 @@ int virtio_gpu_init(struct drm_device *dev)
INIT_WORK(&vgdev->config_changed_work,
virtio_gpu_config_changed_work_func);
+ INIT_WORK(&vgdev->obj_free_work,
+ virtio_gpu_array_put_free_work);
+ INIT_LIST_HEAD(&vgdev->obj_free_list);
+ spin_lock_init(&vgdev->obj_free_lock);
+
#ifdef __LITTLE_ENDIAN
if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_VIRGL))
vgdev->has_virgl_3d = true;
- DRM_INFO("virgl 3d acceleration %s\n",
- vgdev->has_virgl_3d ? "enabled" : "not supported by host");
-#else
- DRM_INFO("virgl 3d acceleration not supported by guest\n");
#endif
if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_EDID)) {
vgdev->has_edid = true;
- DRM_INFO("EDID support available.\n");
}
+ DRM_INFO("features: %cvirgl %cedid\n",
+ vgdev->has_virgl_3d ? '+' : '-',
+ vgdev->has_edid ? '+' : '-');
+
ret = virtio_find_vqs(vgdev->vdev, 2, vqs, callbacks, names, NULL);
if (ret) {
DRM_ERROR("failed to find virt queues\n");
@@ -171,12 +177,6 @@ int virtio_gpu_init(struct drm_device *dev)
goto err_vbufs;
}
- ret = virtio_gpu_ttm_init(vgdev);
- if (ret) {
- DRM_ERROR("failed to init ttm %d\n", ret);
- goto err_ttm;
- }
-
/* get display info */
virtio_cread(vgdev->vdev, struct virtio_gpu_config,
num_scanouts, &num_scanouts);
@@ -208,8 +208,6 @@ int virtio_gpu_init(struct drm_device *dev)
return 0;
err_scanouts:
- virtio_gpu_ttm_fini(vgdev);
-err_ttm:
virtio_gpu_free_vbufs(vgdev);
err_vbufs:
vgdev->vdev->config->del_vqs(vgdev->vdev);
@@ -232,6 +230,7 @@ void virtio_gpu_deinit(struct drm_device *dev)
{
struct virtio_gpu_device *vgdev = dev->dev_private;
+ flush_work(&vgdev->obj_free_work);
vgdev->vqs_ready = false;
flush_work(&vgdev->ctrlq.dequeue_work);
flush_work(&vgdev->cursorq.dequeue_work);
@@ -240,7 +239,6 @@ void virtio_gpu_deinit(struct drm_device *dev)
vgdev->vdev->config->del_vqs(vgdev->vdev);
virtio_gpu_modeset_fini(vgdev);
- virtio_gpu_ttm_fini(vgdev);
virtio_gpu_free_vbufs(vgdev);
virtio_gpu_cleanup_cap_cache(vgdev);
kfree(vgdev->capsets);
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index b2da31310d24..017a9e0fc3bb 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -23,73 +23,83 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <drm/ttm/ttm_execbuf_util.h>
+#include <linux/moduleparam.h>
#include "virtgpu_drv.h"
+static int virtio_gpu_virglrenderer_workaround = 1;
+module_param_named(virglhack, virtio_gpu_virglrenderer_workaround, int, 0400);
+
static int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
uint32_t *resid)
{
-#if 0
- int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL);
-
- if (handle < 0)
- return handle;
-#else
- static int handle;
-
- /*
- * FIXME: dirty hack to avoid re-using IDs, virglrenderer
- * can't deal with that. Needs fixing in virglrenderer, also
- * should figure a better way to handle that in the guest.
- */
- handle++;
-#endif
-
- *resid = handle + 1;
+ if (virtio_gpu_virglrenderer_workaround) {
+ /*
+ * Hack to avoid re-using resource IDs.
+ *
+ * virglrenderer versions up to (and including) 0.7.0
+ * can't deal with that. virglrenderer commit
+ * "f91a9dd35715 Fix unlinking resources from hash
+ * table." (Feb 2019) fixes the bug.
+ */
+ static int handle;
+ handle++;
+ *resid = handle + 1;
+ } else {
+ int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL);
+ if (handle < 0)
+ return handle;
+ *resid = handle + 1;
+ }
return 0;
}
static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
{
-#if 0
- ida_free(&vgdev->resource_ida, id - 1);
-#endif
+ if (!virtio_gpu_virglrenderer_workaround) {
+ ida_free(&vgdev->resource_ida, id - 1);
+ }
}
-static void virtio_gpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
+static void virtio_gpu_free_object(struct drm_gem_object *obj)
{
- struct virtio_gpu_object *bo;
- struct virtio_gpu_device *vgdev;
-
- bo = container_of(tbo, struct virtio_gpu_object, tbo);
- vgdev = (struct virtio_gpu_device *)bo->gem_base.dev->dev_private;
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
+ struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
+ if (bo->pages)
+ virtio_gpu_object_detach(vgdev, bo);
if (bo->created)
virtio_gpu_cmd_unref_resource(vgdev, bo->hw_res_handle);
- if (bo->pages)
- virtio_gpu_object_free_sg_table(bo);
- if (bo->vmap)
- virtio_gpu_object_kunmap(bo);
- drm_gem_object_release(&bo->gem_base);
virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
- kfree(bo);
+
+ drm_gem_shmem_free_object(obj);
}
-static void virtio_gpu_init_ttm_placement(struct virtio_gpu_object *vgbo)
+static const struct drm_gem_object_funcs virtio_gpu_gem_funcs = {
+ .free = virtio_gpu_free_object,
+ .open = virtio_gpu_gem_object_open,
+ .close = virtio_gpu_gem_object_close,
+
+ .print_info = drm_gem_shmem_print_info,
+ .pin = drm_gem_shmem_pin,
+ .unpin = drm_gem_shmem_unpin,
+ .get_sg_table = drm_gem_shmem_get_sg_table,
+ .vmap = drm_gem_shmem_vmap,
+ .vunmap = drm_gem_shmem_vunmap,
+ .mmap = &drm_gem_shmem_mmap,
+};
+
+struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
+ size_t size)
{
- u32 c = 1;
-
- vgbo->placement.placement = &vgbo->placement_code;
- vgbo->placement.busy_placement = &vgbo->placement_code;
- vgbo->placement_code.fpfn = 0;
- vgbo->placement_code.lpfn = 0;
- vgbo->placement_code.flags =
- TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT |
- TTM_PL_FLAG_NO_EVICT;
- vgbo->placement.num_placement = c;
- vgbo->placement.num_busy_placement = c;
+ struct virtio_gpu_object *bo;
+
+ bo = kzalloc(sizeof(*bo), GFP_KERNEL);
+ if (!bo)
+ return NULL;
+ bo->base.base.funcs = &virtio_gpu_gem_funcs;
+ return &bo->base.base;
}
int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
@@ -97,151 +107,59 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object **bo_ptr,
struct virtio_gpu_fence *fence)
{
+ struct virtio_gpu_object_array *objs = NULL;
+ struct drm_gem_shmem_object *shmem_obj;
struct virtio_gpu_object *bo;
- size_t acc_size;
int ret;
*bo_ptr = NULL;
- acc_size = ttm_bo_dma_acc_size(&vgdev->mman.bdev, params->size,
- sizeof(struct virtio_gpu_object));
+ params->size = roundup(params->size, PAGE_SIZE);
+ shmem_obj = drm_gem_shmem_create(vgdev->ddev, params->size);
+ if (IS_ERR(shmem_obj))
+ return PTR_ERR(shmem_obj);
+ bo = gem_to_virtio_gpu_obj(&shmem_obj->base);
- bo = kzalloc(sizeof(struct virtio_gpu_object), GFP_KERNEL);
- if (bo == NULL)
- return -ENOMEM;
ret = virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle);
- if (ret < 0) {
- kfree(bo);
- return ret;
- }
- params->size = roundup(params->size, PAGE_SIZE);
- ret = drm_gem_object_init(vgdev->ddev, &bo->gem_base, params->size);
- if (ret != 0) {
- virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
- kfree(bo);
- return ret;
- }
+ if (ret < 0)
+ goto err_free_gem;
+
bo->dumb = params->dumb;
+ if (fence) {
+ ret = -ENOMEM;
+ objs = virtio_gpu_array_alloc(1);
+ if (!objs)
+ goto err_put_id;
+ virtio_gpu_array_add_obj(objs, &bo->base.base);
+
+ ret = virtio_gpu_array_lock_resv(objs);
+ if (ret != 0)
+ goto err_put_objs;
+ }
+
if (params->virgl) {
- virtio_gpu_cmd_resource_create_3d(vgdev, bo, params, fence);
+ virtio_gpu_cmd_resource_create_3d(vgdev, bo, params,
+ objs, fence);
} else {
- virtio_gpu_cmd_create_resource(vgdev, bo, params, fence);
+ virtio_gpu_cmd_create_resource(vgdev, bo, params,
+ objs, fence);
}
- virtio_gpu_init_ttm_placement(bo);
- ret = ttm_bo_init(&vgdev->mman.bdev, &bo->tbo, params->size,
- ttm_bo_type_device, &bo->placement, 0,
- true, acc_size, NULL, NULL,
- &virtio_gpu_ttm_bo_destroy);
- /* ttm_bo_init failure will call the destroy */
- if (ret != 0)
+ ret = virtio_gpu_object_attach(vgdev, bo, NULL);
+ if (ret != 0) {
+ virtio_gpu_free_object(&shmem_obj->base);
return ret;
-
- if (fence) {
- struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
- struct list_head validate_list;
- struct ttm_validate_buffer mainbuf;
- struct ww_acquire_ctx ticket;
- unsigned long irq_flags;
- bool signaled;
-
- INIT_LIST_HEAD(&validate_list);
- memset(&mainbuf, 0, sizeof(struct ttm_validate_buffer));
-
- /* use a gem reference since unref list undoes them */
- drm_gem_object_get(&bo->gem_base);
- mainbuf.bo = &bo->tbo;
- list_add(&mainbuf.head, &validate_list);
-
- ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
- if (ret == 0) {
- spin_lock_irqsave(&drv->lock, irq_flags);
- signaled = virtio_fence_signaled(&fence->f);
- if (!signaled)
- /* virtio create command still in flight */
- ttm_eu_fence_buffer_objects(&ticket, &validate_list,
- &fence->f);
- spin_unlock_irqrestore(&drv->lock, irq_flags);
- if (signaled)
- /* virtio create command finished */
- ttm_eu_backoff_reservation(&ticket, &validate_list);
- }
- virtio_gpu_unref_list(&validate_list);
}
*bo_ptr = bo;
return 0;
-}
-
-void virtio_gpu_object_kunmap(struct virtio_gpu_object *bo)
-{
- bo->vmap = NULL;
- ttm_bo_kunmap(&bo->kmap);
-}
-
-int virtio_gpu_object_kmap(struct virtio_gpu_object *bo)
-{
- bool is_iomem;
- int r;
-
- WARN_ON(bo->vmap);
-
- r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
- if (r)
- return r;
- bo->vmap = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
- return 0;
-}
-int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev,
- struct virtio_gpu_object *bo)
-{
- int ret;
- struct page **pages = bo->tbo.ttm->pages;
- int nr_pages = bo->tbo.num_pages;
- struct ttm_operation_ctx ctx = {
- .interruptible = false,
- .no_wait_gpu = false
- };
-
- /* wtf swapping */
- if (bo->pages)
- return 0;
-
- if (bo->tbo.ttm->state == tt_unpopulated)
- bo->tbo.ttm->bdev->driver->ttm_tt_populate(bo->tbo.ttm, &ctx);
- bo->pages = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
- if (!bo->pages)
- goto out;
-
- ret = sg_alloc_table_from_pages(bo->pages, pages, nr_pages, 0,
- nr_pages << PAGE_SHIFT, GFP_KERNEL);
- if (ret)
- goto out;
- return 0;
-out:
- kfree(bo->pages);
- bo->pages = NULL;
- return -ENOMEM;
-}
-
-void virtio_gpu_object_free_sg_table(struct virtio_gpu_object *bo)
-{
- sg_free_table(bo->pages);
- kfree(bo->pages);
- bo->pages = NULL;
-}
-
-int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait)
-{
- int r;
-
- r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL);
- if (unlikely(r != 0))
- return r;
- r = ttm_bo_wait(&bo->tbo, true, no_wait);
- ttm_bo_unreserve(&bo->tbo);
- return r;
+err_put_objs:
+ virtio_gpu_array_put_free(objs);
+err_put_id:
+ virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
+err_free_gem:
+ drm_gem_shmem_free_object(&shmem_obj->base);
+ return ret;
}
-
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index 024c2aa0c929..d1c3f5fbfee4 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -23,9 +23,12 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "virtgpu_drv.h"
-#include <drm/drm_plane_helper.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_plane_helper.h>
+
+#include "virtgpu_drv.h"
static const uint32_t virtio_gpu_formats[] = {
DRM_FORMAT_HOST_XRGB8888,
@@ -82,7 +85,45 @@ static const struct drm_plane_funcs virtio_gpu_plane_funcs = {
static int virtio_gpu_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
- return 0;
+ bool is_cursor = plane->type == DRM_PLANE_TYPE_CURSOR;
+ struct drm_crtc_state *crtc_state;
+ int ret;
+
+ if (!state->fb || WARN_ON(!state->crtc))
+ return 0;
+
+ crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ ret = drm_atomic_helper_check_plane_state(state, crtc_state,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ is_cursor, true);
+ return ret;
+}
+
+static void virtio_gpu_update_dumb_bo(struct virtio_gpu_device *vgdev,
+ struct drm_plane_state *state,
+ struct drm_rect *rect)
+{
+ struct virtio_gpu_object *bo =
+ gem_to_virtio_gpu_obj(state->fb->obj[0]);
+ struct virtio_gpu_object_array *objs;
+ uint32_t w = rect->x2 - rect->x1;
+ uint32_t h = rect->y2 - rect->y1;
+ uint32_t x = rect->x1;
+ uint32_t y = rect->y1;
+ uint32_t off = x * state->fb->format->cpp[0] +
+ y * state->fb->pitches[0];
+
+ objs = virtio_gpu_array_alloc(1);
+ if (!objs)
+ return;
+ virtio_gpu_array_add_obj(objs, &bo->base.base);
+
+ virtio_gpu_cmd_transfer_to_host_2d(vgdev, off, w, h, x, y,
+ objs, NULL);
}
static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
@@ -91,9 +132,8 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
struct drm_device *dev = plane->dev;
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_output *output = NULL;
- struct virtio_gpu_framebuffer *vgfb;
struct virtio_gpu_object *bo;
- uint32_t handle;
+ struct drm_rect rect;
if (plane->state->crtc)
output = drm_crtc_to_virtio_gpu_output(plane->state->crtc);
@@ -102,40 +142,52 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
if (WARN_ON(!output))
return;
- if (plane->state->fb && output->enabled) {
- vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
- bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
- handle = bo->hw_res_handle;
- if (bo->dumb) {
- virtio_gpu_cmd_transfer_to_host_2d
- (vgdev, bo, 0,
- cpu_to_le32(plane->state->src_w >> 16),
- cpu_to_le32(plane->state->src_h >> 16),
- cpu_to_le32(plane->state->src_x >> 16),
- cpu_to_le32(plane->state->src_y >> 16), NULL);
- }
- } else {
- handle = 0;
+ if (!plane->state->fb || !output->enabled) {
+ DRM_DEBUG("nofb\n");
+ virtio_gpu_cmd_set_scanout(vgdev, output->index, 0,
+ plane->state->src_w >> 16,
+ plane->state->src_h >> 16,
+ 0, 0);
+ return;
+ }
+
+ if (!drm_atomic_helper_damage_merged(old_state, plane->state, &rect))
+ return;
+
+ virtio_gpu_disable_notify(vgdev);
+
+ bo = gem_to_virtio_gpu_obj(plane->state->fb->obj[0]);
+ if (bo->dumb)
+ virtio_gpu_update_dumb_bo(vgdev, plane->state, &rect);
+
+ if (plane->state->fb != old_state->fb ||
+ plane->state->src_w != old_state->src_w ||
+ plane->state->src_h != old_state->src_h ||
+ plane->state->src_x != old_state->src_x ||
+ plane->state->src_y != old_state->src_y) {
+ DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d, src %dx%d+%d+%d\n",
+ bo->hw_res_handle,
+ plane->state->crtc_w, plane->state->crtc_h,
+ plane->state->crtc_x, plane->state->crtc_y,
+ plane->state->src_w >> 16,
+ plane->state->src_h >> 16,
+ plane->state->src_x >> 16,
+ plane->state->src_y >> 16);
+ virtio_gpu_cmd_set_scanout(vgdev, output->index,
+ bo->hw_res_handle,
+ plane->state->src_w >> 16,
+ plane->state->src_h >> 16,
+ plane->state->src_x >> 16,
+ plane->state->src_y >> 16);
}
- DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d, src %dx%d+%d+%d\n", handle,
- plane->state->crtc_w, plane->state->crtc_h,
- plane->state->crtc_x, plane->state->crtc_y,
- plane->state->src_w >> 16,
- plane->state->src_h >> 16,
- plane->state->src_x >> 16,
- plane->state->src_y >> 16);
- virtio_gpu_cmd_set_scanout(vgdev, output->index, handle,
- plane->state->src_w >> 16,
- plane->state->src_h >> 16,
- plane->state->src_x >> 16,
- plane->state->src_y >> 16);
- if (handle)
- virtio_gpu_cmd_resource_flush(vgdev, handle,
- plane->state->src_x >> 16,
- plane->state->src_y >> 16,
- plane->state->src_w >> 16,
- plane->state->src_h >> 16);
+ virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle,
+ rect.x1,
+ rect.y1,
+ rect.x2 - rect.x1,
+ rect.y2 - rect.y1);
+
+ virtio_gpu_enable_notify(vgdev);
}
static int virtio_gpu_cursor_prepare_fb(struct drm_plane *plane,
@@ -184,7 +236,6 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
struct virtio_gpu_framebuffer *vgfb;
struct virtio_gpu_object *bo = NULL;
uint32_t handle;
- int ret = 0;
if (plane->state->crtc)
output = drm_crtc_to_virtio_gpu_output(plane->state->crtc);
@@ -203,20 +254,21 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
if (bo && bo->dumb && (plane->state->fb != old_state->fb)) {
/* new cursor -- update & wait */
+ struct virtio_gpu_object_array *objs;
+
+ objs = virtio_gpu_array_alloc(1);
+ if (!objs)
+ return;
+ virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]);
+ virtio_gpu_array_lock_resv(objs);
virtio_gpu_cmd_transfer_to_host_2d
- (vgdev, bo, 0,
- cpu_to_le32(plane->state->crtc_w),
- cpu_to_le32(plane->state->crtc_h),
- 0, 0, vgfb->fence);
- ret = virtio_gpu_object_reserve(bo, false);
- if (!ret) {
- reservation_object_add_excl_fence(bo->tbo.resv,
- &vgfb->fence->f);
- dma_fence_put(&vgfb->fence->f);
- vgfb->fence = NULL;
- virtio_gpu_object_unreserve(bo);
- virtio_gpu_object_wait(bo, false);
- }
+ (vgdev, 0,
+ plane->state->crtc_w,
+ plane->state->crtc_h,
+ 0, 0, objs, vgfb->fence);
+ dma_fence_wait(&vgfb->fence->f, true);
+ dma_fence_put(&vgfb->fence->f);
+ vgfb->fence = NULL;
}
if (plane->state->fb != old_state->fb) {
diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c
index 8fbf71bd0c5e..050d24c39a8f 100644
--- a/drivers/gpu/drm/virtio/virtgpu_prime.c
+++ b/drivers/gpu/drm/virtio/virtgpu_prime.c
@@ -22,52 +22,17 @@
* Authors: Andreas Pokorny
*/
+#include <drm/drm_prime.h>
+
#include "virtgpu_drv.h"
/* Empty Implementations as there should not be any other driver for a virtual
* device that might share buffers with virtgpu
*/
-struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
-{
- struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
-
- if (!bo->tbo.ttm->pages || !bo->tbo.ttm->num_pages)
- /* should not happen */
- return ERR_PTR(-EINVAL);
-
- return drm_prime_pages_to_sg(bo->tbo.ttm->pages,
- bo->tbo.ttm->num_pages);
-}
-
struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
struct drm_device *dev, struct dma_buf_attachment *attach,
struct sg_table *table)
{
return ERR_PTR(-ENODEV);
}
-
-void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj)
-{
- struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
- int ret;
-
- ret = virtio_gpu_object_kmap(bo);
- if (ret)
- return NULL;
- return bo->vmap;
-}
-
-void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
-{
- virtio_gpu_object_kunmap(gem_to_virtio_gpu_obj(obj));
-}
-
-int virtgpu_gem_prime_mmap(struct drm_gem_object *obj,
- struct vm_area_struct *vma)
-{
- struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
-
- bo->gem_base.vma_node.vm_node.start = bo->tbo.vma_node.vm_node.start;
- return drm_gem_prime_mmap(obj, vma);
-}
diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c
deleted file mode 100644
index 300ef3a83538..000000000000
--- a/drivers/gpu/drm/virtio/virtgpu_ttm.c
+++ /dev/null
@@ -1,304 +0,0 @@
-/*
- * Copyright (C) 2015 Red Hat, Inc.
- * All Rights Reserved.
- *
- * Authors:
- * Dave Airlie
- * Alon Levy
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <drm/ttm/ttm_bo_api.h>
-#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_placement.h>
-#include <drm/ttm/ttm_page_alloc.h>
-#include <drm/ttm/ttm_module.h>
-#include <drm/drmP.h>
-#include <drm/drm.h>
-#include <drm/virtgpu_drm.h>
-#include "virtgpu_drv.h"
-
-#include <linux/delay.h>
-
-static struct
-virtio_gpu_device *virtio_gpu_get_vgdev(struct ttm_bo_device *bdev)
-{
- struct virtio_gpu_mman *mman;
- struct virtio_gpu_device *vgdev;
-
- mman = container_of(bdev, struct virtio_gpu_mman, bdev);
- vgdev = container_of(mman, struct virtio_gpu_device, mman);
- return vgdev;
-}
-
-int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- struct drm_file *file_priv;
- struct virtio_gpu_device *vgdev;
- int r;
-
- file_priv = filp->private_data;
- vgdev = file_priv->minor->dev->dev_private;
- if (vgdev == NULL) {
- DRM_ERROR(
- "filp->private_data->minor->dev->dev_private == NULL\n");
- return -EINVAL;
- }
- r = ttm_bo_mmap(filp, vma, &vgdev->mman.bdev);
-
- return r;
-}
-
-static int virtio_gpu_invalidate_caches(struct ttm_bo_device *bdev,
- uint32_t flags)
-{
- return 0;
-}
-
-static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
- struct ttm_buffer_object *bo,
- const struct ttm_place *place,
- struct ttm_mem_reg *mem)
-{
- mem->mm_node = (void *)1;
- return 0;
-}
-
-static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
- struct ttm_mem_reg *mem)
-{
- mem->mm_node = (void *)NULL;
-}
-
-static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
- unsigned long p_size)
-{
- return 0;
-}
-
-static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
-{
- return 0;
-}
-
-static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
- struct drm_printer *printer)
-{
-}
-
-static const struct ttm_mem_type_manager_func virtio_gpu_bo_manager_func = {
- .init = ttm_bo_man_init,
- .takedown = ttm_bo_man_takedown,
- .get_node = ttm_bo_man_get_node,
- .put_node = ttm_bo_man_put_node,
- .debug = ttm_bo_man_debug
-};
-
-static int virtio_gpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
- struct ttm_mem_type_manager *man)
-{
- switch (type) {
- case TTM_PL_SYSTEM:
- /* System memory */
- man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
- man->available_caching = TTM_PL_MASK_CACHING;
- man->default_caching = TTM_PL_FLAG_CACHED;
- break;
- case TTM_PL_TT:
- man->func = &virtio_gpu_bo_manager_func;
- man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
- man->available_caching = TTM_PL_MASK_CACHING;
- man->default_caching = TTM_PL_FLAG_CACHED;
- break;
- default:
- DRM_ERROR("Unsupported memory type %u\n", (unsigned int)type);
- return -EINVAL;
- }
- return 0;
-}
-
-static void virtio_gpu_evict_flags(struct ttm_buffer_object *bo,
- struct ttm_placement *placement)
-{
- static const struct ttm_place placements = {
- .fpfn = 0,
- .lpfn = 0,
- .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM,
- };
-
- placement->placement = &placements;
- placement->busy_placement = &placements;
- placement->num_placement = 1;
- placement->num_busy_placement = 1;
-}
-
-static int virtio_gpu_verify_access(struct ttm_buffer_object *bo,
- struct file *filp)
-{
- return 0;
-}
-
-static int virtio_gpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem)
-{
- struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
-
- mem->bus.addr = NULL;
- mem->bus.offset = 0;
- mem->bus.size = mem->num_pages << PAGE_SHIFT;
- mem->bus.base = 0;
- mem->bus.is_iomem = false;
- if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
- return -EINVAL;
- switch (mem->mem_type) {
- case TTM_PL_SYSTEM:
- case TTM_PL_TT:
- /* system memory */
- return 0;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static void virtio_gpu_ttm_io_mem_free(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem)
-{
-}
-
-/*
- * TTM backend functions.
- */
-struct virtio_gpu_ttm_tt {
- struct ttm_dma_tt ttm;
- struct virtio_gpu_object *obj;
-};
-
-static int virtio_gpu_ttm_tt_bind(struct ttm_tt *ttm,
- struct ttm_mem_reg *bo_mem)
-{
- struct virtio_gpu_ttm_tt *gtt =
- container_of(ttm, struct virtio_gpu_ttm_tt, ttm.ttm);
- struct virtio_gpu_device *vgdev =
- virtio_gpu_get_vgdev(gtt->obj->tbo.bdev);
-
- virtio_gpu_object_attach(vgdev, gtt->obj, NULL);
- return 0;
-}
-
-static int virtio_gpu_ttm_tt_unbind(struct ttm_tt *ttm)
-{
- struct virtio_gpu_ttm_tt *gtt =
- container_of(ttm, struct virtio_gpu_ttm_tt, ttm.ttm);
- struct virtio_gpu_device *vgdev =
- virtio_gpu_get_vgdev(gtt->obj->tbo.bdev);
-
- virtio_gpu_object_detach(vgdev, gtt->obj);
- return 0;
-}
-
-static void virtio_gpu_ttm_tt_destroy(struct ttm_tt *ttm)
-{
- struct virtio_gpu_ttm_tt *gtt =
- container_of(ttm, struct virtio_gpu_ttm_tt, ttm.ttm);
-
- ttm_dma_tt_fini(&gtt->ttm);
- kfree(gtt);
-}
-
-static struct ttm_backend_func virtio_gpu_tt_func = {
- .bind = &virtio_gpu_ttm_tt_bind,
- .unbind = &virtio_gpu_ttm_tt_unbind,
- .destroy = &virtio_gpu_ttm_tt_destroy,
-};
-
-static struct ttm_tt *virtio_gpu_ttm_tt_create(struct ttm_buffer_object *bo,
- uint32_t page_flags)
-{
- struct virtio_gpu_device *vgdev;
- struct virtio_gpu_ttm_tt *gtt;
-
- vgdev = virtio_gpu_get_vgdev(bo->bdev);
- gtt = kzalloc(sizeof(struct virtio_gpu_ttm_tt), GFP_KERNEL);
- if (gtt == NULL)
- return NULL;
- gtt->ttm.ttm.func = &virtio_gpu_tt_func;
- gtt->obj = container_of(bo, struct virtio_gpu_object, tbo);
- if (ttm_dma_tt_init(&gtt->ttm, bo, page_flags)) {
- kfree(gtt);
- return NULL;
- }
- return &gtt->ttm.ttm;
-}
-
-static void virtio_gpu_bo_swap_notify(struct ttm_buffer_object *tbo)
-{
- struct virtio_gpu_object *bo;
-
- bo = container_of(tbo, struct virtio_gpu_object, tbo);
-
- if (bo->pages)
- virtio_gpu_object_free_sg_table(bo);
-}
-
-static struct ttm_bo_driver virtio_gpu_bo_driver = {
- .ttm_tt_create = &virtio_gpu_ttm_tt_create,
- .invalidate_caches = &virtio_gpu_invalidate_caches,
- .init_mem_type = &virtio_gpu_init_mem_type,
- .eviction_valuable = ttm_bo_eviction_valuable,
- .evict_flags = &virtio_gpu_evict_flags,
- .verify_access = &virtio_gpu_verify_access,
- .io_mem_reserve = &virtio_gpu_ttm_io_mem_reserve,
- .io_mem_free = &virtio_gpu_ttm_io_mem_free,
- .swap_notify = &virtio_gpu_bo_swap_notify,
-};
-
-int virtio_gpu_ttm_init(struct virtio_gpu_device *vgdev)
-{
- int r;
-
- /* No others user of address space so set it to 0 */
- r = ttm_bo_device_init(&vgdev->mman.bdev,
- &virtio_gpu_bo_driver,
- vgdev->ddev->anon_inode->i_mapping,
- false);
- if (r) {
- DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
- goto err_dev_init;
- }
-
- r = ttm_bo_init_mm(&vgdev->mman.bdev, TTM_PL_TT, 0);
- if (r) {
- DRM_ERROR("Failed initializing GTT heap.\n");
- goto err_mm_init;
- }
- return 0;
-
-err_mm_init:
- ttm_bo_device_release(&vgdev->mman.bdev);
-err_dev_init:
- return r;
-}
-
-void virtio_gpu_ttm_fini(struct virtio_gpu_device *vgdev)
-{
- ttm_bo_device_release(&vgdev->mman.bdev);
- DRM_INFO("virtio_gpu: ttm finalized\n");
-}
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 981ee16e3ee9..5914e79d3429 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -26,19 +26,31 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <drm/drmP.h>
-#include "virtgpu_drv.h"
-#include "virtgpu_trace.h"
+#include <linux/dma-mapping.h>
#include <linux/virtio.h>
#include <linux/virtio_config.h>
#include <linux/virtio_ring.h>
+#include "virtgpu_drv.h"
+#include "virtgpu_trace.h"
+
#define MAX_INLINE_CMD_SIZE 96
#define MAX_INLINE_RESP_SIZE 24
#define VBUFFER_SIZE (sizeof(struct virtio_gpu_vbuffer) \
+ MAX_INLINE_CMD_SIZE \
+ MAX_INLINE_RESP_SIZE)
+static void convert_to_hw_box(struct virtio_gpu_box *dst,
+ const struct drm_virtgpu_3d_box *src)
+{
+ dst->x = cpu_to_le32(src->x);
+ dst->y = cpu_to_le32(src->y);
+ dst->z = cpu_to_le32(src->z);
+ dst->w = cpu_to_le32(src->w);
+ dst->h = cpu_to_le32(src->h);
+ dst->d = cpu_to_le32(src->d);
+}
+
void virtio_gpu_ctrl_ack(struct virtqueue *vq)
{
struct drm_device *dev = vq->vdev->priv;
@@ -154,7 +166,7 @@ static void free_vbuf(struct virtio_gpu_device *vgdev,
{
if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
kfree(vbuf->resp_buf);
- kfree(vbuf->data_buf);
+ kvfree(vbuf->data_buf);
kmem_cache_free(vgdev->vbufs, vbuf);
}
@@ -191,7 +203,7 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
} while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
spin_unlock(&vgdev->ctrlq.qlock);
- list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
+ list_for_each_entry(entry, &reclaim_list, list) {
resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp);
@@ -218,14 +230,18 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
}
if (entry->resp_cb)
entry->resp_cb(vgdev, entry);
-
- list_del(&entry->list);
- free_vbuf(vgdev, entry);
}
wake_up(&vgdev->ctrlq.ack_queue);
if (fence_id)
virtio_gpu_fence_event_process(vgdev, fence_id);
+
+ list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
+ if (entry->objs)
+ virtio_gpu_array_put_free_delayed(vgdev, entry->objs);
+ list_del(&entry->list);
+ free_vbuf(vgdev, entry);
+ }
}
void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
@@ -251,26 +267,67 @@ void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
wake_up(&vgdev->cursorq.ack_queue);
}
-static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_vbuffer *vbuf)
+/* Create sg_table from a vmalloc'd buffer. */
+static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents)
+{
+ int ret, s, i;
+ struct sg_table *sgt;
+ struct scatterlist *sg;
+ struct page *pg;
+
+ if (WARN_ON(!PAGE_ALIGNED(data)))
+ return NULL;
+
+ sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt)
+ return NULL;
+
+ *sg_ents = DIV_ROUND_UP(size, PAGE_SIZE);
+ ret = sg_alloc_table(sgt, *sg_ents, GFP_KERNEL);
+ if (ret) {
+ kfree(sgt);
+ return NULL;
+ }
+
+ for_each_sg(sgt->sgl, sg, *sg_ents, i) {
+ pg = vmalloc_to_page(data);
+ if (!pg) {
+ sg_free_table(sgt);
+ kfree(sgt);
+ return NULL;
+ }
+
+ s = min_t(int, PAGE_SIZE, size);
+ sg_set_page(sg, pg, s, 0);
+
+ size -= s;
+ data += s;
+ }
+
+ return sgt;
+}
+
+static bool virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf,
+ struct scatterlist *vout)
__releases(&vgdev->ctrlq.qlock)
__acquires(&vgdev->ctrlq.qlock)
{
struct virtqueue *vq = vgdev->ctrlq.vq;
- struct scatterlist *sgs[3], vcmd, vout, vresp;
+ struct scatterlist *sgs[3], vcmd, vresp;
int outcnt = 0, incnt = 0;
+ bool notify = false;
int ret;
if (!vgdev->vqs_ready)
- return -ENODEV;
+ return notify;
sg_init_one(&vcmd, vbuf->buf, vbuf->size);
sgs[outcnt + incnt] = &vcmd;
outcnt++;
- if (vbuf->data_size) {
- sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
- sgs[outcnt + incnt] = &vout;
+ if (vout) {
+ sgs[outcnt + incnt] = vout;
outcnt++;
}
@@ -291,32 +348,35 @@ retry:
trace_virtio_gpu_cmd_queue(vq,
(struct virtio_gpu_ctrl_hdr *)vbuf->buf);
- virtqueue_kick(vq);
+ notify = virtqueue_kick_prepare(vq);
}
-
- if (!ret)
- ret = vq->num_free;
- return ret;
-}
-
-static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_vbuffer *vbuf)
-{
- int rc;
-
- spin_lock(&vgdev->ctrlq.qlock);
- rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
- spin_unlock(&vgdev->ctrlq.qlock);
- return rc;
+ return notify;
}
-static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_vbuffer *vbuf,
- struct virtio_gpu_ctrl_hdr *hdr,
- struct virtio_gpu_fence *fence)
+static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf,
+ struct virtio_gpu_ctrl_hdr *hdr,
+ struct virtio_gpu_fence *fence)
{
struct virtqueue *vq = vgdev->ctrlq.vq;
- int rc;
+ struct scatterlist *vout = NULL, sg;
+ struct sg_table *sgt = NULL;
+ bool notify;
+ int outcnt = 0;
+
+ if (vbuf->data_size) {
+ if (is_vmalloc_addr(vbuf->data_buf)) {
+ sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size,
+ &outcnt);
+ if (!sgt)
+ return;
+ vout = sgt->sgl;
+ } else {
+ sg_init_one(&sg, vbuf->data_buf, vbuf->data_size);
+ vout = &sg;
+ outcnt = 1;
+ }
+ }
again:
spin_lock(&vgdev->ctrlq.qlock);
@@ -329,29 +389,66 @@ again:
* to wait for free space, which can result in fence ids being
* submitted out-of-order.
*/
- if (vq->num_free < 3) {
+ if (vq->num_free < 2 + outcnt) {
spin_unlock(&vgdev->ctrlq.qlock);
wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3);
goto again;
}
- if (fence)
+ if (hdr && fence) {
virtio_gpu_fence_emit(vgdev, hdr, fence);
- rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
+ if (vbuf->objs) {
+ virtio_gpu_array_add_fence(vbuf->objs, &fence->f);
+ virtio_gpu_array_unlock_resv(vbuf->objs);
+ }
+ }
+ notify = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf, vout);
spin_unlock(&vgdev->ctrlq.qlock);
- return rc;
+ if (notify) {
+ if (vgdev->disable_notify)
+ vgdev->pending_notify = true;
+ else
+ virtqueue_notify(vgdev->ctrlq.vq);
+ }
+
+ if (sgt) {
+ sg_free_table(sgt);
+ kfree(sgt);
+ }
+}
+
+void virtio_gpu_disable_notify(struct virtio_gpu_device *vgdev)
+{
+ vgdev->disable_notify = true;
+}
+
+void virtio_gpu_enable_notify(struct virtio_gpu_device *vgdev)
+{
+ vgdev->disable_notify = false;
+
+ if (!vgdev->pending_notify)
+ return;
+ vgdev->pending_notify = false;
+ virtqueue_notify(vgdev->ctrlq.vq);
+}
+
+static void virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf)
+{
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL, NULL);
}
-static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_vbuffer *vbuf)
+static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf)
{
struct virtqueue *vq = vgdev->cursorq.vq;
struct scatterlist *sgs[1], ccmd;
+ bool notify;
int ret;
int outcnt;
if (!vgdev->vqs_ready)
- return -ENODEV;
+ return;
sg_init_one(&ccmd, vbuf->buf, vbuf->size);
sgs[0] = &ccmd;
@@ -369,14 +466,13 @@ retry:
trace_virtio_gpu_cmd_queue(vq,
(struct virtio_gpu_ctrl_hdr *)vbuf->buf);
- virtqueue_kick(vq);
+ notify = virtqueue_kick_prepare(vq);
}
spin_unlock(&vgdev->cursorq.qlock);
- if (!ret)
- ret = vq->num_free;
- return ret;
+ if (notify)
+ virtqueue_notify(vq);
}
/* just create gem objects for userspace and long lived objects,
@@ -387,6 +483,7 @@ retry:
void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo,
struct virtio_gpu_object_params *params,
+ struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence)
{
struct virtio_gpu_resource_create_2d *cmd_p;
@@ -394,6 +491,7 @@ void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
+ vbuf->objs = objs;
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
@@ -480,12 +578,13 @@ void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
}
void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_object *bo,
uint64_t offset,
- __le32 width, __le32 height,
- __le32 x, __le32 y,
+ uint32_t width, uint32_t height,
+ uint32_t x, uint32_t y,
+ struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence)
{
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
struct virtio_gpu_transfer_to_host_2d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
@@ -497,14 +596,15 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
+ vbuf->objs = objs;
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
cmd_p->offset = cpu_to_le64(offset);
- cmd_p->r.width = width;
- cmd_p->r.height = height;
- cmd_p->r.x = x;
- cmd_p->r.y = y;
+ cmd_p->r.width = cpu_to_le32(width);
+ cmd_p->r.height = cpu_to_le32(height);
+ cmd_p->r.x = cpu_to_le32(x);
+ cmd_p->r.y = cpu_to_le32(y);
virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
}
@@ -825,34 +925,38 @@ void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
uint32_t ctx_id,
- uint32_t resource_id)
+ struct virtio_gpu_object_array *objs)
{
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
struct virtio_gpu_ctx_resource *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
+ vbuf->objs = objs;
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
- cmd_p->resource_id = cpu_to_le32(resource_id);
+ cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
}
void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
uint32_t ctx_id,
- uint32_t resource_id)
+ struct virtio_gpu_object_array *objs)
{
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
struct virtio_gpu_ctx_resource *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
+ vbuf->objs = objs;
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
- cmd_p->resource_id = cpu_to_le32(resource_id);
+ cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
}
@@ -860,6 +964,7 @@ void
virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo,
struct virtio_gpu_object_params *params,
+ struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence)
{
struct virtio_gpu_resource_create_3d *cmd_p;
@@ -867,6 +972,7 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
+ vbuf->objs = objs;
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
@@ -887,12 +993,13 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
}
void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_object *bo,
uint32_t ctx_id,
uint64_t offset, uint32_t level,
- struct virtio_gpu_box *box,
+ struct drm_virtgpu_3d_box *box,
+ struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence)
{
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
struct virtio_gpu_transfer_host_3d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
@@ -905,10 +1012,12 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
+ vbuf->objs = objs;
+
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
- cmd_p->box = *box;
+ convert_to_hw_box(&cmd_p->box, box);
cmd_p->offset = cpu_to_le64(offset);
cmd_p->level = cpu_to_le32(level);
@@ -916,21 +1025,25 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
}
void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
- uint32_t resource_id, uint32_t ctx_id,
+ uint32_t ctx_id,
uint64_t offset, uint32_t level,
- struct virtio_gpu_box *box,
+ struct drm_virtgpu_3d_box *box,
+ struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence)
{
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
struct virtio_gpu_transfer_host_3d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
+ vbuf->objs = objs;
+
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
- cmd_p->resource_id = cpu_to_le32(resource_id);
- cmd_p->box = *box;
+ cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
+ convert_to_hw_box(&cmd_p->box, box);
cmd_p->offset = cpu_to_le64(offset);
cmd_p->level = cpu_to_le32(level);
@@ -939,7 +1052,9 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
void *data, uint32_t data_size,
- uint32_t ctx_id, struct virtio_gpu_fence *fence)
+ uint32_t ctx_id,
+ struct virtio_gpu_object_array *objs,
+ struct virtio_gpu_fence *fence)
{
struct virtio_gpu_cmd_submit *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
@@ -949,6 +1064,7 @@ void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
vbuf->data_buf = data;
vbuf->data_size = data_size;
+ vbuf->objs = objs;
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
@@ -964,17 +1080,21 @@ int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
struct virtio_gpu_mem_entry *ents;
struct scatterlist *sg;
- int si, nents;
+ int si, nents, ret;
if (WARN_ON_ONCE(!obj->created))
return -EINVAL;
+ if (WARN_ON_ONCE(obj->pages))
+ return -EINVAL;
- if (!obj->pages) {
- int ret;
+ ret = drm_gem_shmem_pin(&obj->base.base);
+ if (ret < 0)
+ return -EINVAL;
- ret = virtio_gpu_object_get_sg_table(vgdev, obj);
- if (ret)
- return ret;
+ obj->pages = drm_gem_shmem_get_sg_table(&obj->base.base);
+ if (obj->pages == NULL) {
+ drm_gem_shmem_unpin(&obj->base.base);
+ return -EINVAL;
}
if (use_dma_api) {
@@ -1013,6 +1133,9 @@ void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
{
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
+ if (WARN_ON_ONCE(!obj->pages))
+ return;
+
if (use_dma_api && obj->mapped) {
struct virtio_gpu_fence *fence = virtio_gpu_fence_alloc(vgdev);
/* detach backing and wait for the host process it ... */
@@ -1028,6 +1151,11 @@ void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
} else {
virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, NULL);
}
+
+ sg_free_table(obj->pages);
+ obj->pages = NULL;
+
+ drm_gem_shmem_unpin(&obj->base.base);
}
void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
diff --git a/drivers/gpu/drm/vkms/Makefile b/drivers/gpu/drm/vkms/Makefile
index 89f09bec7b23..0b767d7efa24 100644
--- a/drivers/gpu/drm/vkms/Makefile
+++ b/drivers/gpu/drm/vkms/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-vkms-y := vkms_drv.o vkms_plane.o vkms_output.o vkms_crtc.o vkms_gem.o vkms_crc.o
+vkms-y := vkms_drv.o vkms_plane.o vkms_output.o vkms_crtc.o vkms_gem.o vkms_composer.o
obj-$(CONFIG_DRM_VKMS) += vkms.o
diff --git a/drivers/gpu/drm/vkms/vkms_composer.c b/drivers/gpu/drm/vkms/vkms_composer.c
new file mode 100644
index 000000000000..4af2f19480f4
--- /dev/null
+++ b/drivers/gpu/drm/vkms/vkms_composer.c
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/crc32.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "vkms_drv.h"
+
+/**
+ * compute_crc - Compute CRC value on output frame
+ *
+ * @vaddr_out: address to final framebuffer
+ * @composer: framebuffer's metadata
+ *
+ * returns CRC value computed using crc32 on the visible portion of
+ * the final framebuffer at vaddr_out
+ */
+static uint32_t compute_crc(void *vaddr_out, struct vkms_composer *composer)
+{
+ int i, j, src_offset;
+ int x_src = composer->src.x1 >> 16;
+ int y_src = composer->src.y1 >> 16;
+ int h_src = drm_rect_height(&composer->src) >> 16;
+ int w_src = drm_rect_width(&composer->src) >> 16;
+ u32 crc = 0;
+
+ for (i = y_src; i < y_src + h_src; ++i) {
+ for (j = x_src; j < x_src + w_src; ++j) {
+ src_offset = composer->offset
+ + (i * composer->pitch)
+ + (j * composer->cpp);
+ /* XRGB format ignores Alpha channel */
+ memset(vaddr_out + src_offset + 24, 0, 8);
+ crc = crc32_le(crc, vaddr_out + src_offset,
+ sizeof(u32));
+ }
+ }
+
+ return crc;
+}
+
+/**
+ * blend - blend value at vaddr_src with value at vaddr_dst
+ * @vaddr_dst: destination address
+ * @vaddr_src: source address
+ * @dest_composer: destination framebuffer's metadata
+ * @src_composer: source framebuffer's metadata
+ *
+ * Blend value at vaddr_src with value at vaddr_dst.
+ * Currently, this function write value of vaddr_src on value
+ * at vaddr_dst using buffer's metadata to locate the new values
+ * from vaddr_src and their destination at vaddr_dst.
+ *
+ * TODO: Use the alpha value to blend vaddr_src with vaddr_dst
+ * instead of overwriting it.
+ */
+static void blend(void *vaddr_dst, void *vaddr_src,
+ struct vkms_composer *dest_composer,
+ struct vkms_composer *src_composer)
+{
+ int i, j, j_dst, i_dst;
+ int offset_src, offset_dst;
+
+ int x_src = src_composer->src.x1 >> 16;
+ int y_src = src_composer->src.y1 >> 16;
+
+ int x_dst = src_composer->dst.x1;
+ int y_dst = src_composer->dst.y1;
+ int h_dst = drm_rect_height(&src_composer->dst);
+ int w_dst = drm_rect_width(&src_composer->dst);
+
+ int y_limit = y_src + h_dst;
+ int x_limit = x_src + w_dst;
+
+ for (i = y_src, i_dst = y_dst; i < y_limit; ++i) {
+ for (j = x_src, j_dst = x_dst; j < x_limit; ++j) {
+ offset_dst = dest_composer->offset
+ + (i_dst * dest_composer->pitch)
+ + (j_dst++ * dest_composer->cpp);
+ offset_src = src_composer->offset
+ + (i * src_composer->pitch)
+ + (j * src_composer->cpp);
+
+ memcpy(vaddr_dst + offset_dst,
+ vaddr_src + offset_src, sizeof(u32));
+ }
+ i_dst++;
+ }
+}
+
+static void compose_cursor(struct vkms_composer *cursor_composer,
+ struct vkms_composer *primary_composer,
+ void *vaddr_out)
+{
+ struct drm_gem_object *cursor_obj;
+ struct vkms_gem_object *cursor_vkms_obj;
+
+ cursor_obj = drm_gem_fb_get_obj(&cursor_composer->fb, 0);
+ cursor_vkms_obj = drm_gem_to_vkms_gem(cursor_obj);
+
+ if (WARN_ON(!cursor_vkms_obj->vaddr))
+ return;
+
+ blend(vaddr_out, cursor_vkms_obj->vaddr,
+ primary_composer, cursor_composer);
+}
+
+static uint32_t _vkms_get_crc(struct vkms_composer *primary_composer,
+ struct vkms_composer *cursor_composer)
+{
+ struct drm_framebuffer *fb = &primary_composer->fb;
+ struct drm_gem_object *gem_obj = drm_gem_fb_get_obj(fb, 0);
+ struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(gem_obj);
+ void *vaddr_out = kzalloc(vkms_obj->gem.size, GFP_KERNEL);
+ u32 crc = 0;
+
+ if (!vaddr_out) {
+ DRM_ERROR("Failed to allocate memory for output frame.");
+ return 0;
+ }
+
+ if (WARN_ON(!vkms_obj->vaddr)) {
+ kfree(vaddr_out);
+ return crc;
+ }
+
+ memcpy(vaddr_out, vkms_obj->vaddr, vkms_obj->gem.size);
+
+ if (cursor_composer)
+ compose_cursor(cursor_composer, primary_composer, vaddr_out);
+
+ crc = compute_crc(vaddr_out, primary_composer);
+
+ kfree(vaddr_out);
+
+ return crc;
+}
+
+/**
+ * vkms_composer_worker - ordered work_struct to compute CRC
+ *
+ * @work: work_struct
+ *
+ * Work handler for composing and computing CRCs. work_struct scheduled in
+ * an ordered workqueue that's periodically scheduled to run by
+ * _vblank_handle() and flushed at vkms_atomic_crtc_destroy_state().
+ */
+void vkms_composer_worker(struct work_struct *work)
+{
+ struct vkms_crtc_state *crtc_state = container_of(work,
+ struct vkms_crtc_state,
+ composer_work);
+ struct drm_crtc *crtc = crtc_state->base.crtc;
+ struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
+ struct vkms_composer *primary_composer = NULL;
+ struct vkms_composer *cursor_composer = NULL;
+ u32 crc32 = 0;
+ u64 frame_start, frame_end;
+ bool crc_pending;
+
+ spin_lock_irq(&out->composer_lock);
+ frame_start = crtc_state->frame_start;
+ frame_end = crtc_state->frame_end;
+ crc_pending = crtc_state->crc_pending;
+ crtc_state->frame_start = 0;
+ crtc_state->frame_end = 0;
+ crtc_state->crc_pending = false;
+ spin_unlock_irq(&out->composer_lock);
+
+ /*
+ * We raced with the vblank hrtimer and previous work already computed
+ * the crc, nothing to do.
+ */
+ if (!crc_pending)
+ return;
+
+ if (crtc_state->num_active_planes >= 1)
+ primary_composer = crtc_state->active_planes[0]->composer;
+
+ if (crtc_state->num_active_planes == 2)
+ cursor_composer = crtc_state->active_planes[1]->composer;
+
+ if (primary_composer)
+ crc32 = _vkms_get_crc(primary_composer, cursor_composer);
+
+ /*
+ * The worker can fall behind the vblank hrtimer, make sure we catch up.
+ */
+ while (frame_start <= frame_end)
+ drm_crtc_add_crc_entry(crtc, true, frame_start++, &crc32);
+}
+
+static const char * const pipe_crc_sources[] = {"auto"};
+
+const char *const *vkms_get_crc_sources(struct drm_crtc *crtc,
+ size_t *count)
+{
+ *count = ARRAY_SIZE(pipe_crc_sources);
+ return pipe_crc_sources;
+}
+
+static int vkms_crc_parse_source(const char *src_name, bool *enabled)
+{
+ int ret = 0;
+
+ if (!src_name) {
+ *enabled = false;
+ } else if (strcmp(src_name, "auto") == 0) {
+ *enabled = true;
+ } else {
+ *enabled = false;
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+int vkms_verify_crc_source(struct drm_crtc *crtc, const char *src_name,
+ size_t *values_cnt)
+{
+ bool enabled;
+
+ if (vkms_crc_parse_source(src_name, &enabled) < 0) {
+ DRM_DEBUG_DRIVER("unknown source %s\n", src_name);
+ return -EINVAL;
+ }
+
+ *values_cnt = 1;
+
+ return 0;
+}
+
+int vkms_set_crc_source(struct drm_crtc *crtc, const char *src_name)
+{
+ struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
+ bool enabled = false;
+ int ret = 0;
+
+ ret = vkms_crc_parse_source(src_name, &enabled);
+
+ spin_lock_irq(&out->lock);
+ out->composer_enabled = enabled;
+ spin_unlock_irq(&out->lock);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/vkms/vkms_crc.c b/drivers/gpu/drm/vkms/vkms_crc.c
deleted file mode 100644
index e66ff25c008e..000000000000
--- a/drivers/gpu/drm/vkms/vkms_crc.c
+++ /dev/null
@@ -1,272 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-
-#include "vkms_drv.h"
-#include <linux/crc32.h>
-#include <drm/drm_atomic.h>
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_gem_framebuffer_helper.h>
-
-/**
- * compute_crc - Compute CRC value on output frame
- *
- * @vaddr_out: address to final framebuffer
- * @crc_out: framebuffer's metadata
- *
- * returns CRC value computed using crc32 on the visible portion of
- * the final framebuffer at vaddr_out
- */
-static uint32_t compute_crc(void *vaddr_out, struct vkms_crc_data *crc_out)
-{
- int i, j, src_offset;
- int x_src = crc_out->src.x1 >> 16;
- int y_src = crc_out->src.y1 >> 16;
- int h_src = drm_rect_height(&crc_out->src) >> 16;
- int w_src = drm_rect_width(&crc_out->src) >> 16;
- u32 crc = 0;
-
- for (i = y_src; i < y_src + h_src; ++i) {
- for (j = x_src; j < x_src + w_src; ++j) {
- src_offset = crc_out->offset
- + (i * crc_out->pitch)
- + (j * crc_out->cpp);
- /* XRGB format ignores Alpha channel */
- memset(vaddr_out + src_offset + 24, 0, 8);
- crc = crc32_le(crc, vaddr_out + src_offset,
- sizeof(u32));
- }
- }
-
- return crc;
-}
-
-/**
- * blend - belnd value at vaddr_src with value at vaddr_dst
- * @vaddr_dst: destination address
- * @vaddr_src: source address
- * @crc_dst: destination framebuffer's metadata
- * @crc_src: source framebuffer's metadata
- *
- * Blend value at vaddr_src with value at vaddr_dst.
- * Currently, this function write value at vaddr_src on value
- * at vaddr_dst using buffer's metadata to locate the new values
- * from vaddr_src and their distenation at vaddr_dst.
- *
- * Todo: Use the alpha value to blend vaddr_src with vaddr_dst
- * instead of overwriting it.
- */
-static void blend(void *vaddr_dst, void *vaddr_src,
- struct vkms_crc_data *crc_dst,
- struct vkms_crc_data *crc_src)
-{
- int i, j, j_dst, i_dst;
- int offset_src, offset_dst;
-
- int x_src = crc_src->src.x1 >> 16;
- int y_src = crc_src->src.y1 >> 16;
-
- int x_dst = crc_src->dst.x1;
- int y_dst = crc_src->dst.y1;
- int h_dst = drm_rect_height(&crc_src->dst);
- int w_dst = drm_rect_width(&crc_src->dst);
-
- int y_limit = y_src + h_dst;
- int x_limit = x_src + w_dst;
-
- for (i = y_src, i_dst = y_dst; i < y_limit; ++i) {
- for (j = x_src, j_dst = x_dst; j < x_limit; ++j) {
- offset_dst = crc_dst->offset
- + (i_dst * crc_dst->pitch)
- + (j_dst++ * crc_dst->cpp);
- offset_src = crc_src->offset
- + (i * crc_src->pitch)
- + (j * crc_src->cpp);
-
- memcpy(vaddr_dst + offset_dst,
- vaddr_src + offset_src, sizeof(u32));
- }
- i_dst++;
- }
-}
-
-static void compose_cursor(struct vkms_crc_data *cursor_crc,
- struct vkms_crc_data *primary_crc, void *vaddr_out)
-{
- struct drm_gem_object *cursor_obj;
- struct vkms_gem_object *cursor_vkms_obj;
-
- cursor_obj = drm_gem_fb_get_obj(&cursor_crc->fb, 0);
- cursor_vkms_obj = drm_gem_to_vkms_gem(cursor_obj);
-
- mutex_lock(&cursor_vkms_obj->pages_lock);
- if (!cursor_vkms_obj->vaddr) {
- DRM_WARN("cursor plane vaddr is NULL");
- goto out;
- }
-
- blend(vaddr_out, cursor_vkms_obj->vaddr, primary_crc, cursor_crc);
-
-out:
- mutex_unlock(&cursor_vkms_obj->pages_lock);
-}
-
-static uint32_t _vkms_get_crc(struct vkms_crc_data *primary_crc,
- struct vkms_crc_data *cursor_crc)
-{
- struct drm_framebuffer *fb = &primary_crc->fb;
- struct drm_gem_object *gem_obj = drm_gem_fb_get_obj(fb, 0);
- struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(gem_obj);
- void *vaddr_out = kzalloc(vkms_obj->gem.size, GFP_KERNEL);
- u32 crc = 0;
-
- if (!vaddr_out) {
- DRM_ERROR("Failed to allocate memory for output frame.");
- return 0;
- }
-
- mutex_lock(&vkms_obj->pages_lock);
- if (WARN_ON(!vkms_obj->vaddr)) {
- mutex_unlock(&vkms_obj->pages_lock);
- kfree(vaddr_out);
- return crc;
- }
-
- memcpy(vaddr_out, vkms_obj->vaddr, vkms_obj->gem.size);
- mutex_unlock(&vkms_obj->pages_lock);
-
- if (cursor_crc)
- compose_cursor(cursor_crc, primary_crc, vaddr_out);
-
- crc = compute_crc(vaddr_out, primary_crc);
-
- kfree(vaddr_out);
-
- return crc;
-}
-
-/**
- * vkms_crc_work_handle - ordered work_struct to compute CRC
- *
- * @work: work_struct
- *
- * Work handler for computing CRCs. work_struct scheduled in
- * an ordered workqueue that's periodically scheduled to run by
- * _vblank_handle() and flushed at vkms_atomic_crtc_destroy_state().
- */
-void vkms_crc_work_handle(struct work_struct *work)
-{
- struct vkms_crtc_state *crtc_state = container_of(work,
- struct vkms_crtc_state,
- crc_work);
- struct drm_crtc *crtc = crtc_state->base.crtc;
- struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
- struct vkms_device *vdev = container_of(out, struct vkms_device,
- output);
- struct vkms_crc_data *primary_crc = NULL;
- struct vkms_crc_data *cursor_crc = NULL;
- struct drm_plane *plane;
- u32 crc32 = 0;
- u64 frame_start, frame_end;
- unsigned long flags;
-
- spin_lock_irqsave(&out->state_lock, flags);
- frame_start = crtc_state->frame_start;
- frame_end = crtc_state->frame_end;
- spin_unlock_irqrestore(&out->state_lock, flags);
-
- /* _vblank_handle() hasn't updated frame_start yet */
- if (!frame_start || frame_start == frame_end)
- goto out;
-
- drm_for_each_plane(plane, &vdev->drm) {
- struct vkms_plane_state *vplane_state;
- struct vkms_crc_data *crc_data;
-
- vplane_state = to_vkms_plane_state(plane->state);
- crc_data = vplane_state->crc_data;
-
- if (drm_framebuffer_read_refcount(&crc_data->fb) == 0)
- continue;
-
- if (plane->type == DRM_PLANE_TYPE_PRIMARY)
- primary_crc = crc_data;
- else
- cursor_crc = crc_data;
- }
-
- if (primary_crc)
- crc32 = _vkms_get_crc(primary_crc, cursor_crc);
-
- frame_end = drm_crtc_accurate_vblank_count(crtc);
-
- /* queue_work can fail to schedule crc_work; add crc for
- * missing frames
- */
- while (frame_start <= frame_end)
- drm_crtc_add_crc_entry(crtc, true, frame_start++, &crc32);
-
-out:
- /* to avoid using the same value for frame number again */
- spin_lock_irqsave(&out->state_lock, flags);
- crtc_state->frame_end = frame_end;
- crtc_state->frame_start = 0;
- spin_unlock_irqrestore(&out->state_lock, flags);
-}
-
-static const char * const pipe_crc_sources[] = {"auto"};
-
-const char *const *vkms_get_crc_sources(struct drm_crtc *crtc,
- size_t *count)
-{
- *count = ARRAY_SIZE(pipe_crc_sources);
- return pipe_crc_sources;
-}
-
-static int vkms_crc_parse_source(const char *src_name, bool *enabled)
-{
- int ret = 0;
-
- if (!src_name) {
- *enabled = false;
- } else if (strcmp(src_name, "auto") == 0) {
- *enabled = true;
- } else {
- *enabled = false;
- ret = -EINVAL;
- }
-
- return ret;
-}
-
-int vkms_verify_crc_source(struct drm_crtc *crtc, const char *src_name,
- size_t *values_cnt)
-{
- bool enabled;
-
- if (vkms_crc_parse_source(src_name, &enabled) < 0) {
- DRM_DEBUG_DRIVER("unknown source %s\n", src_name);
- return -EINVAL;
- }
-
- *values_cnt = 1;
-
- return 0;
-}
-
-int vkms_set_crc_source(struct drm_crtc *crtc, const char *src_name)
-{
- struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
- bool enabled = false;
- unsigned long flags;
- int ret = 0;
-
- ret = vkms_crc_parse_source(src_name, &enabled);
-
- /* make sure nothing is scheduled on crtc workq */
- flush_workqueue(out->crc_workq);
-
- spin_lock_irqsave(&out->lock, flags);
- out->crc_enabled = enabled;
- spin_unlock_irqrestore(&out->lock, flags);
-
- return ret;
-}
diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
index 4d11292bc6f3..74f703b8d22a 100644
--- a/drivers/gpu/drm/vkms/vkms_crtc.c
+++ b/drivers/gpu/drm/vkms/vkms_crtc.c
@@ -1,46 +1,54 @@
// SPDX-License-Identifier: GPL-2.0+
-#include "vkms_drv.h"
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "vkms_drv.h"
static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
{
struct vkms_output *output = container_of(timer, struct vkms_output,
vblank_hrtimer);
struct drm_crtc *crtc = &output->crtc;
- struct vkms_crtc_state *state = to_vkms_crtc_state(crtc->state);
+ struct vkms_crtc_state *state;
u64 ret_overrun;
bool ret;
- spin_lock(&output->lock);
-
ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer,
output->period_ns);
WARN_ON(ret_overrun != 1);
+ spin_lock(&output->lock);
ret = drm_crtc_handle_vblank(crtc);
if (!ret)
DRM_ERROR("vkms failure on handling vblank");
- if (state && output->crc_enabled) {
+ state = output->composer_state;
+ spin_unlock(&output->lock);
+
+ if (state && output->composer_enabled) {
u64 frame = drm_crtc_accurate_vblank_count(crtc);
- /* update frame_start only if a queued vkms_crc_work_handle()
+ /* update frame_start only if a queued vkms_composer_worker()
* has read the data
*/
- spin_lock(&output->state_lock);
- if (!state->frame_start)
+ spin_lock(&output->composer_lock);
+ if (!state->crc_pending)
state->frame_start = frame;
- spin_unlock(&output->state_lock);
+ else
+ DRM_DEBUG_DRIVER("crc worker falling behind, frame_start: %llu, frame_end: %llu\n",
+ state->frame_start, frame);
+ state->frame_end = frame;
+ state->crc_pending = true;
+ spin_unlock(&output->composer_lock);
- ret = queue_work(output->crc_workq, &state->crc_work);
+ ret = queue_work(output->composer_workq, &state->composer_work);
if (!ret)
- DRM_WARN("failed to queue vkms_crc_work_handle");
+ DRM_DEBUG_DRIVER("Composer worker already queued\n");
}
- spin_unlock(&output->lock);
-
return HRTIMER_RESTART;
}
@@ -76,7 +84,7 @@ bool vkms_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
struct vkms_output *output = &vkmsdev->output;
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
- *vblank_time = output->vblank_hrtimer.node.expires;
+ *vblank_time = READ_ONCE(output->vblank_hrtimer.node.expires);
if (WARN_ON(*vblank_time == vblank->time))
return true;
@@ -107,7 +115,7 @@ vkms_atomic_crtc_duplicate_state(struct drm_crtc *crtc)
__drm_atomic_helper_crtc_duplicate_state(crtc, &vkms_state->base);
- INIT_WORK(&vkms_state->crc_work, vkms_crc_work_handle);
+ INIT_WORK(&vkms_state->composer_work, vkms_composer_worker);
return &vkms_state->base;
}
@@ -119,10 +127,9 @@ static void vkms_atomic_crtc_destroy_state(struct drm_crtc *crtc,
__drm_atomic_helper_crtc_destroy_state(state);
- if (vkms_state) {
- flush_work(&vkms_state->crc_work);
- kfree(vkms_state);
- }
+ WARN_ON(work_pending(&vkms_state->composer_work));
+ kfree(vkms_state->active_planes);
+ kfree(vkms_state);
}
static void vkms_atomic_crtc_reset(struct drm_crtc *crtc)
@@ -135,7 +142,7 @@ static void vkms_atomic_crtc_reset(struct drm_crtc *crtc)
__drm_atomic_helper_crtc_reset(crtc, &vkms_state->base);
if (vkms_state)
- INIT_WORK(&vkms_state->crc_work, vkms_crc_work_handle);
+ INIT_WORK(&vkms_state->composer_work, vkms_composer_worker);
}
static const struct drm_crtc_funcs vkms_crtc_funcs = {
@@ -152,6 +159,52 @@ static const struct drm_crtc_funcs vkms_crtc_funcs = {
.verify_crc_source = vkms_verify_crc_source,
};
+static int vkms_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct vkms_crtc_state *vkms_state = to_vkms_crtc_state(state);
+ struct drm_plane *plane;
+ struct drm_plane_state *plane_state;
+ int i = 0, ret;
+
+ if (vkms_state->active_planes)
+ return 0;
+
+ ret = drm_atomic_add_affected_planes(state->state, crtc);
+ if (ret < 0)
+ return ret;
+
+ drm_for_each_plane_mask(plane, crtc->dev, state->plane_mask) {
+ plane_state = drm_atomic_get_existing_plane_state(state->state,
+ plane);
+ WARN_ON(!plane_state);
+
+ if (!plane_state->visible)
+ continue;
+
+ i++;
+ }
+
+ vkms_state->active_planes = kcalloc(i, sizeof(plane), GFP_KERNEL);
+ if (!vkms_state->active_planes)
+ return -ENOMEM;
+ vkms_state->num_active_planes = i;
+
+ i = 0;
+ drm_for_each_plane_mask(plane, crtc->dev, state->plane_mask) {
+ plane_state = drm_atomic_get_existing_plane_state(state->state,
+ plane);
+
+ if (!plane_state->visible)
+ continue;
+
+ vkms_state->active_planes[i++] =
+ to_vkms_plane_state(plane_state);
+ }
+
+ return 0;
+}
+
static void vkms_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
@@ -170,7 +223,7 @@ static void vkms_crtc_atomic_begin(struct drm_crtc *crtc,
struct vkms_output *vkms_output = drm_crtc_to_vkms_output(crtc);
/* This lock is held across the atomic commit to block vblank timer
- * from scheduling vkms_crc_work_handle until the crc_data is updated
+ * from scheduling vkms_composer_worker until the composer is updated
*/
spin_lock_irq(&vkms_output->lock);
}
@@ -179,25 +232,27 @@ static void vkms_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
struct vkms_output *vkms_output = drm_crtc_to_vkms_output(crtc);
- unsigned long flags;
if (crtc->state->event) {
- spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ spin_lock(&crtc->dev->event_lock);
if (drm_crtc_vblank_get(crtc) != 0)
drm_crtc_send_vblank_event(crtc, crtc->state->event);
else
drm_crtc_arm_vblank_event(crtc, crtc->state->event);
- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ spin_unlock(&crtc->dev->event_lock);
crtc->state->event = NULL;
}
+ vkms_output->composer_state = to_vkms_crtc_state(crtc->state);
+
spin_unlock_irq(&vkms_output->lock);
}
static const struct drm_crtc_helper_funcs vkms_crtc_helper_funcs = {
+ .atomic_check = vkms_crtc_atomic_check,
.atomic_begin = vkms_crtc_atomic_begin,
.atomic_flush = vkms_crtc_atomic_flush,
.atomic_enable = vkms_crtc_atomic_enable,
@@ -220,10 +275,10 @@ int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
drm_crtc_helper_add(crtc, &vkms_crtc_helper_funcs);
spin_lock_init(&vkms_out->lock);
- spin_lock_init(&vkms_out->state_lock);
+ spin_lock_init(&vkms_out->composer_lock);
- vkms_out->crc_workq = alloc_ordered_workqueue("vkms_crc_workq", 0);
- if (!vkms_out->crc_workq)
+ vkms_out->composer_workq = alloc_ordered_workqueue("vkms_composer", 0);
+ if (!vkms_out->composer_workq)
return -ENOMEM;
return ret;
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
index 738dd6206d85..25bd7519295f 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.c
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
@@ -3,18 +3,27 @@
/**
* DOC: vkms (Virtual Kernel Modesetting)
*
- * vkms is a software-only model of a kms driver that is useful for testing,
- * or for running X (or similar) on headless machines and be able to still
- * use the GPU. vkms aims to enable a virtual display without the need for
- * a hardware display capability.
+ * VKMS is a software-only model of a KMS driver that is useful for testing
+ * and for running X (or similar) on headless machines. VKMS aims to enable
+ * a virtual display with no need of a hardware display capability, releasing
+ * the GPU in DRM API tests.
*/
#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+
#include <drm/drm_gem.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_file.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_ioctl.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+
#include "vkms_drv.h"
#define DRIVER_NAME "vkms"
@@ -55,7 +64,36 @@ static void vkms_release(struct drm_device *dev)
drm_atomic_helper_shutdown(&vkms->drm);
drm_mode_config_cleanup(&vkms->drm);
drm_dev_fini(&vkms->drm);
- destroy_workqueue(vkms->output.crc_workq);
+ destroy_workqueue(vkms->output.composer_workq);
+}
+
+static void vkms_atomic_commit_tail(struct drm_atomic_state *old_state)
+{
+ struct drm_device *dev = old_state->dev;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state;
+ int i;
+
+ drm_atomic_helper_commit_modeset_disables(dev, old_state);
+
+ drm_atomic_helper_commit_planes(dev, old_state, 0);
+
+ drm_atomic_helper_commit_modeset_enables(dev, old_state);
+
+ drm_atomic_helper_fake_vblank(old_state);
+
+ drm_atomic_helper_commit_hw_done(old_state);
+
+ drm_atomic_helper_wait_for_flip_done(dev, old_state);
+
+ for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+ struct vkms_crtc_state *vkms_state =
+ to_vkms_crtc_state(old_crtc_state);
+
+ flush_work(&vkms_state->composer_work);
+ }
+
+ drm_atomic_helper_cleanup_planes(dev, old_state);
}
static struct drm_driver vkms_driver = {
@@ -66,6 +104,8 @@ static struct drm_driver vkms_driver = {
.gem_vm_ops = &vkms_gem_vm_ops,
.gem_free_object_unlocked = vkms_gem_free_object,
.get_vblank_timestamp = vkms_get_vblank_timestamp,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_import_sg_table = vkms_prime_import_sg_table,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
@@ -80,6 +120,10 @@ static const struct drm_mode_config_funcs vkms_mode_funcs = {
.atomic_commit = drm_atomic_helper_commit,
};
+static const struct drm_mode_config_helper_funcs vkms_mode_config_helpers = {
+ .atomic_commit_tail = vkms_atomic_commit_tail,
+};
+
static int vkms_modeset_init(struct vkms_device *vkmsdev)
{
struct drm_device *dev = &vkmsdev->drm;
@@ -91,8 +135,9 @@ static int vkms_modeset_init(struct vkms_device *vkmsdev)
dev->mode_config.max_width = XRES_MAX;
dev->mode_config.max_height = YRES_MAX;
dev->mode_config.preferred_depth = 24;
+ dev->mode_config.helper_private = &vkms_mode_config_helpers;
- return vkms_output_init(vkmsdev);
+ return vkms_output_init(vkmsdev, 0);
}
static int __init vkms_init(void)
@@ -115,6 +160,14 @@ static int __init vkms_init(void)
if (ret)
goto out_unregister;
+ ret = dma_coerce_mask_and_coherent(vkms_device->drm.dev,
+ DMA_BIT_MASK(64));
+
+ if (ret) {
+ DRM_ERROR("Could not initialize DMA support\n");
+ goto out_fini;
+ }
+
vkms_device->drm.irq_enabled = true;
ret = drm_vblank_init(&vkms_device->drm, 1);
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index b92c30c66a6f..7d52e24564db 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -3,11 +3,11 @@
#ifndef _VKMS_DRV_H_
#define _VKMS_DRV_H_
-#include <drm/drmP.h>
+#include <linux/hrtimer.h>
+
#include <drm/drm.h>
#include <drm/drm_gem.h>
#include <drm/drm_encoder.h>
-#include <linux/hrtimer.h>
#define XRES_MIN 20
#define YRES_MIN 20
@@ -20,7 +20,7 @@
extern bool enable_cursor;
-struct vkms_crc_data {
+struct vkms_composer {
struct drm_framebuffer fb;
struct drm_rect src, dst;
unsigned int offset;
@@ -31,23 +31,30 @@ struct vkms_crc_data {
/**
* vkms_plane_state - Driver specific plane state
* @base: base plane state
- * @crc_data: data required for CRC computation
+ * @composer: data required for composing computation
*/
struct vkms_plane_state {
struct drm_plane_state base;
- struct vkms_crc_data *crc_data;
+ struct vkms_composer *composer;
};
/**
* vkms_crtc_state - Driver specific CRTC state
* @base: base CRTC state
- * @crc_work: work struct to compute and add CRC entries
+ * @composer_work: work struct to compose and add CRC entries
* @n_frame_start: start frame number for computed CRC
* @n_frame_end: end frame number for computed CRC
*/
struct vkms_crtc_state {
struct drm_crtc_state base;
- struct work_struct crc_work;
+ struct work_struct composer_work;
+
+ int num_active_planes;
+ /* stack of active planes for crc computation, should be in z order */
+ struct vkms_plane_state **active_planes;
+
+ /* below three are protected by vkms_output.composer_lock */
+ bool crc_pending;
u64 frame_start;
u64 frame_end;
};
@@ -59,13 +66,16 @@ struct vkms_output {
struct hrtimer vblank_hrtimer;
ktime_t period_ns;
struct drm_pending_vblank_event *event;
- bool crc_enabled;
- /* ordered wq for crc_work */
- struct workqueue_struct *crc_workq;
- /* protects concurrent access to crc_data */
+ /* ordered wq for composer_work */
+ struct workqueue_struct *composer_workq;
+ /* protects concurrent access to composer */
spinlock_t lock;
- /* protects concurrent access to crtc_state */
- spinlock_t state_lock;
+
+ /* protected by @lock */
+ bool composer_enabled;
+ struct vkms_crtc_state *composer_state;
+
+ spinlock_t composer_lock;
};
struct vkms_device {
@@ -105,10 +115,10 @@ bool vkms_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
int *max_error, ktime_t *vblank_time,
bool in_vblank_irq);
-int vkms_output_init(struct vkms_device *vkmsdev);
+int vkms_output_init(struct vkms_device *vkmsdev, int index);
struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev,
- enum drm_plane_type type);
+ enum drm_plane_type type, int index);
/* Gem stuff */
struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
@@ -127,12 +137,20 @@ int vkms_gem_vmap(struct drm_gem_object *obj);
void vkms_gem_vunmap(struct drm_gem_object *obj);
+/* Prime */
+struct drm_gem_object *
+vkms_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sg);
+
/* CRC Support */
const char *const *vkms_get_crc_sources(struct drm_crtc *crtc,
size_t *count);
int vkms_set_crc_source(struct drm_crtc *crtc, const char *src_name);
int vkms_verify_crc_source(struct drm_crtc *crtc, const char *source_name,
size_t *values_cnt);
-void vkms_crc_work_handle(struct work_struct *work);
+
+/* Composer Support */
+void vkms_composer_worker(struct work_struct *work);
#endif /* _VKMS_DRV_H_ */
diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c
index 69048e73377d..2e01186fb943 100644
--- a/drivers/gpu/drm/vkms/vkms_gem.c
+++ b/drivers/gpu/drm/vkms/vkms_gem.c
@@ -1,6 +1,9 @@
// SPDX-License-Identifier: GPL-2.0+
+#include <linux/dma-buf.h>
#include <linux/shmem_fs.h>
+#include <linux/vmalloc.h>
+#include <drm/drm_prime.h>
#include "vkms_drv.h"
@@ -217,3 +220,28 @@ out:
mutex_unlock(&vkms_obj->pages_lock);
return ret;
}
+
+struct drm_gem_object *
+vkms_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sg)
+{
+ struct vkms_gem_object *obj;
+ int npages;
+
+ obj = __vkms_gem_create(dev, attach->dmabuf->size);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ npages = PAGE_ALIGN(attach->dmabuf->size) / PAGE_SIZE;
+ DRM_DEBUG_PRIME("Importing %d pages\n", npages);
+
+ obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
+ if (!obj->pages) {
+ vkms_gem_free_object(&obj->gem);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages);
+ return &obj->gem;
+}
diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c
index 56fb5c2a2315..fb1941a6522c 100644
--- a/drivers/gpu/drm/vkms/vkms_output.c
+++ b/drivers/gpu/drm/vkms/vkms_output.c
@@ -35,7 +35,7 @@ static const struct drm_connector_helper_funcs vkms_conn_helper_funcs = {
.get_modes = vkms_conn_get_modes,
};
-int vkms_output_init(struct vkms_device *vkmsdev)
+int vkms_output_init(struct vkms_device *vkmsdev, int index)
{
struct vkms_output *output = &vkmsdev->output;
struct drm_device *dev = &vkmsdev->drm;
@@ -45,12 +45,12 @@ int vkms_output_init(struct vkms_device *vkmsdev)
struct drm_plane *primary, *cursor = NULL;
int ret;
- primary = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_PRIMARY);
+ primary = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_PRIMARY, index);
if (IS_ERR(primary))
return PTR_ERR(primary);
if (enable_cursor) {
- cursor = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_CURSOR);
+ cursor = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_CURSOR, index);
if (IS_ERR(cursor)) {
ret = PTR_ERR(cursor);
goto err_cursor;
diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c
index 0fceb6258422..5fc8f85aaf3d 100644
--- a/drivers/gpu/drm/vkms/vkms_plane.c
+++ b/drivers/gpu/drm/vkms/vkms_plane.c
@@ -1,10 +1,12 @@
// SPDX-License-Identifier: GPL-2.0+
-#include "vkms_drv.h"
-#include <drm/drm_plane_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_plane_helper.h>
+
+#include "vkms_drv.h"
static const u32 vkms_formats[] = {
DRM_FORMAT_XRGB8888,
@@ -18,20 +20,20 @@ static struct drm_plane_state *
vkms_plane_duplicate_state(struct drm_plane *plane)
{
struct vkms_plane_state *vkms_state;
- struct vkms_crc_data *crc_data;
+ struct vkms_composer *composer;
vkms_state = kzalloc(sizeof(*vkms_state), GFP_KERNEL);
if (!vkms_state)
return NULL;
- crc_data = kzalloc(sizeof(*crc_data), GFP_KERNEL);
- if (!crc_data) {
- DRM_DEBUG_KMS("Couldn't allocate crc_data\n");
+ composer = kzalloc(sizeof(*composer), GFP_KERNEL);
+ if (!composer) {
+ DRM_DEBUG_KMS("Couldn't allocate composer\n");
kfree(vkms_state);
return NULL;
}
- vkms_state->crc_data = crc_data;
+ vkms_state->composer = composer;
__drm_atomic_helper_plane_duplicate_state(plane,
&vkms_state->base);
@@ -49,12 +51,12 @@ static void vkms_plane_destroy_state(struct drm_plane *plane,
/* dropping the reference we acquired in
* vkms_primary_plane_update()
*/
- if (drm_framebuffer_read_refcount(&vkms_state->crc_data->fb))
- drm_framebuffer_put(&vkms_state->crc_data->fb);
+ if (drm_framebuffer_read_refcount(&vkms_state->composer->fb))
+ drm_framebuffer_put(&vkms_state->composer->fb);
}
- kfree(vkms_state->crc_data);
- vkms_state->crc_data = NULL;
+ kfree(vkms_state->composer);
+ vkms_state->composer = NULL;
__drm_atomic_helper_plane_destroy_state(old_state);
kfree(vkms_state);
@@ -91,21 +93,21 @@ static void vkms_plane_atomic_update(struct drm_plane *plane,
{
struct vkms_plane_state *vkms_plane_state;
struct drm_framebuffer *fb = plane->state->fb;
- struct vkms_crc_data *crc_data;
+ struct vkms_composer *composer;
if (!plane->state->crtc || !fb)
return;
vkms_plane_state = to_vkms_plane_state(plane->state);
- crc_data = vkms_plane_state->crc_data;
- memcpy(&crc_data->src, &plane->state->src, sizeof(struct drm_rect));
- memcpy(&crc_data->dst, &plane->state->dst, sizeof(struct drm_rect));
- memcpy(&crc_data->fb, fb, sizeof(struct drm_framebuffer));
- drm_framebuffer_get(&crc_data->fb);
- crc_data->offset = fb->offsets[0];
- crc_data->pitch = fb->pitches[0];
- crc_data->cpp = fb->format->cpp[0];
+ composer = vkms_plane_state->composer;
+ memcpy(&composer->src, &plane->state->src, sizeof(struct drm_rect));
+ memcpy(&composer->dst, &plane->state->dst, sizeof(struct drm_rect));
+ memcpy(&composer->fb, fb, sizeof(struct drm_framebuffer));
+ drm_framebuffer_get(&composer->fb);
+ composer->offset = fb->offsets[0];
+ composer->pitch = fb->pitches[0];
+ composer->cpp = fb->format->cpp[0];
}
static int vkms_plane_atomic_check(struct drm_plane *plane,
@@ -176,7 +178,7 @@ static const struct drm_plane_helper_funcs vkms_primary_helper_funcs = {
};
struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev,
- enum drm_plane_type type)
+ enum drm_plane_type type, int index)
{
struct drm_device *dev = &vkmsdev->drm;
const struct drm_plane_helper_funcs *funcs;
@@ -198,7 +200,7 @@ struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev,
funcs = &vkms_primary_helper_funcs;
}
- ret = drm_universal_plane_init(dev, plane, 0,
+ ret = drm_universal_plane_init(dev, plane, 1 << index,
&vkms_plane_funcs,
formats, nformats,
NULL, type, NULL);
diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig
index 6b28a326f8bb..15acdf2a7c0f 100644
--- a/drivers/gpu/drm/vmwgfx/Kconfig
+++ b/drivers/gpu/drm/vmwgfx/Kconfig
@@ -8,6 +8,7 @@ config DRM_VMWGFX
select FB_CFB_IMAGEBLIT
select DRM_TTM
select FB
+ select MAPPING_DIRTY_HELPERS
# Only needed for the transitional use of drm_crtc_init - can be removed
# again once vmwgfx sets up the primary plane itself.
select DRM_KMS_HELPER
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index 8841bd30e1e5..c877a21a0739 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -8,7 +8,7 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \
vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \
vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o \
- vmwgfx_validation.o \
+ vmwgfx_validation.o vmwgfx_page_dirty.o \
ttm_object.o ttm_lock.o
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
index f2bfd3d80598..61414f105c67 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
@@ -1280,7 +1280,6 @@ svga3dsurface_get_pixel_offset(SVGA3dSurfaceFormat format,
return offset;
}
-
static inline u32
svga3dsurface_get_image_offset(SVGA3dSurfaceFormat format,
surf_size_struct baseLevelSize,
@@ -1375,4 +1374,236 @@ svga3dsurface_is_screen_target_format(SVGA3dSurfaceFormat format)
return svga3dsurface_is_dx_screen_target_format(format);
}
+/**
+ * struct svga3dsurface_mip - Mimpmap level information
+ * @bytes: Bytes required in the backing store of this mipmap level.
+ * @img_stride: Byte stride per image.
+ * @row_stride: Byte stride per block row.
+ * @size: The size of the mipmap.
+ */
+struct svga3dsurface_mip {
+ size_t bytes;
+ size_t img_stride;
+ size_t row_stride;
+ struct drm_vmw_size size;
+
+};
+
+/**
+ * struct svga3dsurface_cache - Cached surface information
+ * @desc: Pointer to the surface descriptor
+ * @mip: Array of mipmap level information. Valid size is @num_mip_levels.
+ * @mip_chain_bytes: Bytes required in the backing store for the whole chain
+ * of mip levels.
+ * @sheet_bytes: Bytes required in the backing store for a sheet
+ * representing a single sample.
+ * @num_mip_levels: Valid size of the @mip array. Number of mipmap levels in
+ * a chain.
+ * @num_layers: Number of slices in an array texture or number of faces in
+ * a cubemap texture.
+ */
+struct svga3dsurface_cache {
+ const struct svga3d_surface_desc *desc;
+ struct svga3dsurface_mip mip[DRM_VMW_MAX_MIP_LEVELS];
+ size_t mip_chain_bytes;
+ size_t sheet_bytes;
+ u32 num_mip_levels;
+ u32 num_layers;
+};
+
+/**
+ * struct svga3dsurface_loc - Surface location
+ * @sub_resource: Surface subresource. Defined as layer * num_mip_levels +
+ * mip_level.
+ * @x: X coordinate.
+ * @y: Y coordinate.
+ * @z: Z coordinate.
+ */
+struct svga3dsurface_loc {
+ u32 sub_resource;
+ u32 x, y, z;
+};
+
+/**
+ * svga3dsurface_subres - Compute the subresource from layer and mipmap.
+ * @cache: Surface layout data.
+ * @mip_level: The mipmap level.
+ * @layer: The surface layer (face or array slice).
+ *
+ * Return: The subresource.
+ */
+static inline u32 svga3dsurface_subres(const struct svga3dsurface_cache *cache,
+ u32 mip_level, u32 layer)
+{
+ return cache->num_mip_levels * layer + mip_level;
+}
+
+/**
+ * svga3dsurface_setup_cache - Build a surface cache entry
+ * @size: The surface base level dimensions.
+ * @format: The surface format.
+ * @num_mip_levels: Number of mipmap levels.
+ * @num_layers: Number of layers.
+ * @cache: Pointer to a struct svga3dsurface_cach object to be filled in.
+ *
+ * Return: Zero on success, -EINVAL on invalid surface layout.
+ */
+static inline int svga3dsurface_setup_cache(const struct drm_vmw_size *size,
+ SVGA3dSurfaceFormat format,
+ u32 num_mip_levels,
+ u32 num_layers,
+ u32 num_samples,
+ struct svga3dsurface_cache *cache)
+{
+ const struct svga3d_surface_desc *desc;
+ u32 i;
+
+ memset(cache, 0, sizeof(*cache));
+ cache->desc = desc = svga3dsurface_get_desc(format);
+ cache->num_mip_levels = num_mip_levels;
+ cache->num_layers = num_layers;
+ for (i = 0; i < cache->num_mip_levels; i++) {
+ struct svga3dsurface_mip *mip = &cache->mip[i];
+
+ mip->size = svga3dsurface_get_mip_size(*size, i);
+ mip->bytes = svga3dsurface_get_image_buffer_size
+ (desc, &mip->size, 0);
+ mip->row_stride =
+ __KERNEL_DIV_ROUND_UP(mip->size.width,
+ desc->block_size.width) *
+ desc->bytes_per_block * num_samples;
+ if (!mip->row_stride)
+ goto invalid_dim;
+
+ mip->img_stride =
+ __KERNEL_DIV_ROUND_UP(mip->size.height,
+ desc->block_size.height) *
+ mip->row_stride;
+ if (!mip->img_stride)
+ goto invalid_dim;
+
+ cache->mip_chain_bytes += mip->bytes;
+ }
+ cache->sheet_bytes = cache->mip_chain_bytes * num_layers;
+ if (!cache->sheet_bytes)
+ goto invalid_dim;
+
+ return 0;
+
+invalid_dim:
+ VMW_DEBUG_USER("Invalid surface layout for dirty tracking.\n");
+ return -EINVAL;
+}
+
+/**
+ * svga3dsurface_get_loc - Get a surface location from an offset into the
+ * backing store
+ * @cache: Surface layout data.
+ * @loc: Pointer to a struct svga3dsurface_loc to be filled in.
+ * @offset: Offset into the surface backing store.
+ */
+static inline void
+svga3dsurface_get_loc(const struct svga3dsurface_cache *cache,
+ struct svga3dsurface_loc *loc,
+ size_t offset)
+{
+ const struct svga3dsurface_mip *mip = &cache->mip[0];
+ const struct svga3d_surface_desc *desc = cache->desc;
+ u32 layer;
+ int i;
+
+ if (offset >= cache->sheet_bytes)
+ offset %= cache->sheet_bytes;
+
+ layer = offset / cache->mip_chain_bytes;
+ offset -= layer * cache->mip_chain_bytes;
+ for (i = 0; i < cache->num_mip_levels; ++i, ++mip) {
+ if (mip->bytes > offset)
+ break;
+ offset -= mip->bytes;
+ }
+
+ loc->sub_resource = svga3dsurface_subres(cache, i, layer);
+ loc->z = offset / mip->img_stride;
+ offset -= loc->z * mip->img_stride;
+ loc->z *= desc->block_size.depth;
+ loc->y = offset / mip->row_stride;
+ offset -= loc->y * mip->row_stride;
+ loc->y *= desc->block_size.height;
+ loc->x = offset / desc->bytes_per_block;
+ loc->x *= desc->block_size.width;
+}
+
+/**
+ * svga3dsurface_inc_loc - Clamp increment a surface location with one block
+ * size
+ * in each dimension.
+ * @loc: Pointer to a struct svga3dsurface_loc to be incremented.
+ *
+ * When computing the size of a range as size = end - start, the range does not
+ * include the end element. However a location representing the last byte
+ * of a touched region in the backing store *is* included in the range.
+ * This function modifies such a location to match the end definition
+ * given as start + size which is the one used in a SVGA3dBox.
+ */
+static inline void
+svga3dsurface_inc_loc(const struct svga3dsurface_cache *cache,
+ struct svga3dsurface_loc *loc)
+{
+ const struct svga3d_surface_desc *desc = cache->desc;
+ u32 mip = loc->sub_resource % cache->num_mip_levels;
+ const struct drm_vmw_size *size = &cache->mip[mip].size;
+
+ loc->sub_resource++;
+ loc->x += desc->block_size.width;
+ if (loc->x > size->width)
+ loc->x = size->width;
+ loc->y += desc->block_size.height;
+ if (loc->y > size->height)
+ loc->y = size->height;
+ loc->z += desc->block_size.depth;
+ if (loc->z > size->depth)
+ loc->z = size->depth;
+}
+
+/**
+ * svga3dsurface_min_loc - The start location in a subresource
+ * @cache: Surface layout data.
+ * @sub_resource: The subresource.
+ * @loc: Pointer to a struct svga3dsurface_loc to be filled in.
+ */
+static inline void
+svga3dsurface_min_loc(const struct svga3dsurface_cache *cache,
+ u32 sub_resource,
+ struct svga3dsurface_loc *loc)
+{
+ loc->sub_resource = sub_resource;
+ loc->x = loc->y = loc->z = 0;
+}
+
+/**
+ * svga3dsurface_min_loc - The end location in a subresource
+ * @cache: Surface layout data.
+ * @sub_resource: The subresource.
+ * @loc: Pointer to a struct svga3dsurface_loc to be filled in.
+ *
+ * Following the end definition given in svga3dsurface_inc_loc(),
+ * Compute the end location of a surface subresource.
+ */
+static inline void
+svga3dsurface_max_loc(const struct svga3dsurface_cache *cache,
+ u32 sub_resource,
+ struct svga3dsurface_loc *loc)
+{
+ const struct drm_vmw_size *size;
+ u32 mip;
+
+ loc->sub_resource = sub_resource + 1;
+ mip = sub_resource % cache->num_mip_levels;
+ size = &cache->mip[mip].size;
+ loc->x = size->width;
+ loc->y = size->height;
+ loc->z = size->depth;
+}
+
#endif /* _SVGA3D_SURFACEDEFS_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/ttm_lock.c b/drivers/gpu/drm/vmwgfx/ttm_lock.c
index 16b2083cb9d4..5971c72e6d10 100644
--- a/drivers/gpu/drm/vmwgfx/ttm_lock.c
+++ b/drivers/gpu/drm/vmwgfx/ttm_lock.c
@@ -29,7 +29,6 @@
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
-#include <drm/ttm/ttm_module.h>
#include <linux/atomic.h>
#include <linux/errno.h>
#include <linux/wait.h>
@@ -49,8 +48,6 @@ void ttm_lock_init(struct ttm_lock *lock)
init_waitqueue_head(&lock->queue);
lock->rw = 0;
lock->flags = 0;
- lock->kill_takers = false;
- lock->signal = SIGKILL;
}
void ttm_read_unlock(struct ttm_lock *lock)
@@ -66,11 +63,6 @@ static bool __ttm_read_lock(struct ttm_lock *lock)
bool locked = false;
spin_lock(&lock->lock);
- if (unlikely(lock->kill_takers)) {
- send_sig(lock->signal, current, 0);
- spin_unlock(&lock->lock);
- return false;
- }
if (lock->rw >= 0 && lock->flags == 0) {
++lock->rw;
locked = true;
@@ -98,11 +90,6 @@ static bool __ttm_read_trylock(struct ttm_lock *lock, bool *locked)
*locked = false;
spin_lock(&lock->lock);
- if (unlikely(lock->kill_takers)) {
- send_sig(lock->signal, current, 0);
- spin_unlock(&lock->lock);
- return false;
- }
if (lock->rw >= 0 && lock->flags == 0) {
++lock->rw;
block = false;
@@ -147,11 +134,6 @@ static bool __ttm_write_lock(struct ttm_lock *lock)
bool locked = false;
spin_lock(&lock->lock);
- if (unlikely(lock->kill_takers)) {
- send_sig(lock->signal, current, 0);
- spin_unlock(&lock->lock);
- return false;
- }
if (lock->rw == 0 && ((lock->flags & ~TTM_WRITE_LOCK_PENDING) == 0)) {
lock->rw = -1;
lock->flags &= ~TTM_WRITE_LOCK_PENDING;
@@ -182,88 +164,6 @@ int ttm_write_lock(struct ttm_lock *lock, bool interruptible)
return ret;
}
-static int __ttm_vt_unlock(struct ttm_lock *lock)
-{
- int ret = 0;
-
- spin_lock(&lock->lock);
- if (unlikely(!(lock->flags & TTM_VT_LOCK)))
- ret = -EINVAL;
- lock->flags &= ~TTM_VT_LOCK;
- wake_up_all(&lock->queue);
- spin_unlock(&lock->lock);
-
- return ret;
-}
-
-static void ttm_vt_lock_remove(struct ttm_base_object **p_base)
-{
- struct ttm_base_object *base = *p_base;
- struct ttm_lock *lock = container_of(base, struct ttm_lock, base);
- int ret;
-
- *p_base = NULL;
- ret = __ttm_vt_unlock(lock);
- BUG_ON(ret != 0);
-}
-
-static bool __ttm_vt_lock(struct ttm_lock *lock)
-{
- bool locked = false;
-
- spin_lock(&lock->lock);
- if (lock->rw == 0) {
- lock->flags &= ~TTM_VT_LOCK_PENDING;
- lock->flags |= TTM_VT_LOCK;
- locked = true;
- } else {
- lock->flags |= TTM_VT_LOCK_PENDING;
- }
- spin_unlock(&lock->lock);
- return locked;
-}
-
-int ttm_vt_lock(struct ttm_lock *lock,
- bool interruptible,
- struct ttm_object_file *tfile)
-{
- int ret = 0;
-
- if (interruptible) {
- ret = wait_event_interruptible(lock->queue,
- __ttm_vt_lock(lock));
- if (unlikely(ret != 0)) {
- spin_lock(&lock->lock);
- lock->flags &= ~TTM_VT_LOCK_PENDING;
- wake_up_all(&lock->queue);
- spin_unlock(&lock->lock);
- return ret;
- }
- } else
- wait_event(lock->queue, __ttm_vt_lock(lock));
-
- /*
- * Add a base-object, the destructor of which will
- * make sure the lock is released if the client dies
- * while holding it.
- */
-
- ret = ttm_base_object_init(tfile, &lock->base, false,
- ttm_lock_type, &ttm_vt_lock_remove, NULL);
- if (ret)
- (void)__ttm_vt_unlock(lock);
- else
- lock->vt_holder = tfile;
-
- return ret;
-}
-
-int ttm_vt_unlock(struct ttm_lock *lock)
-{
- return ttm_ref_object_base_unref(lock->vt_holder,
- lock->base.handle, TTM_REF_USAGE);
-}
-
void ttm_suspend_unlock(struct ttm_lock *lock)
{
spin_lock(&lock->lock);
diff --git a/drivers/gpu/drm/vmwgfx/ttm_lock.h b/drivers/gpu/drm/vmwgfx/ttm_lock.h
index 0c3af9836863..af8b28ca546f 100644
--- a/drivers/gpu/drm/vmwgfx/ttm_lock.h
+++ b/drivers/gpu/drm/vmwgfx/ttm_lock.h
@@ -49,8 +49,8 @@
#ifndef _TTM_LOCK_H_
#define _TTM_LOCK_H_
-#include <linux/wait.h>
#include <linux/atomic.h>
+#include <linux/wait.h>
#include "ttm_object.h"
@@ -63,8 +63,6 @@
* @lock: Spinlock protecting some lock members.
* @rw: Read-write lock counter. Protected by @lock.
* @flags: Lock state. Protected by @lock.
- * @kill_takers: Boolean whether to kill takers of the lock.
- * @signal: Signal to send when kill_takers is true.
*/
struct ttm_lock {
@@ -73,9 +71,6 @@ struct ttm_lock {
spinlock_t lock;
int32_t rw;
uint32_t flags;
- bool kill_takers;
- int signal;
- struct ttm_object_file *vt_holder;
};
@@ -220,29 +215,4 @@ extern void ttm_write_unlock(struct ttm_lock *lock);
*/
extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible);
-/**
- * ttm_lock_set_kill
- *
- * @lock: Pointer to a struct ttm_lock
- * @val: Boolean whether to kill processes taking the lock.
- * @signal: Signal to send to the process taking the lock.
- *
- * The kill-when-taking-lock functionality is used to kill processes that keep
- * on using the TTM functionality when its resources has been taken down, for
- * example when the X server exits. A typical sequence would look like this:
- * - X server takes lock in write mode.
- * - ttm_lock_set_kill() is called with @val set to true.
- * - As part of X server exit, TTM resources are taken down.
- * - X server releases the lock on file release.
- * - Another dri client wants to render, takes the lock and is killed.
- *
- */
-static inline void ttm_lock_set_kill(struct ttm_lock *lock, bool val,
- int signal)
-{
- lock->kill_takers = val;
- if (val)
- lock->signal = signal;
-}
-
#endif
diff --git a/drivers/gpu/drm/vmwgfx/ttm_object.h b/drivers/gpu/drm/vmwgfx/ttm_object.h
index 50d26c7ff42d..ede26df87c93 100644
--- a/drivers/gpu/drm/vmwgfx/ttm_object.h
+++ b/drivers/gpu/drm/vmwgfx/ttm_object.h
@@ -37,11 +37,12 @@
#ifndef _TTM_OBJECT_H_
#define _TTM_OBJECT_H_
-#include <linux/list.h>
-#include <drm/drm_hashtab.h>
+#include <linux/dma-buf.h>
#include <linux/kref.h>
+#include <linux/list.h>
#include <linux/rcupdate.h>
-#include <linux/dma-buf.h>
+
+#include <drm/drm_hashtab.h>
#include <drm/ttm/ttm_memory.h>
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
index f6ab79d23923..cd9805c045cb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
@@ -27,9 +27,10 @@
#ifndef _VMWGFX_BINDING_H_
#define _VMWGFX_BINDING_H_
-#include "device_include/svga3d_reg.h"
#include <linux/list.h>
+#include "device_include/svga3d_reg.h"
+
#define VMW_MAX_VIEW_BINDINGS 128
struct vmw_private;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
index fc6673cde289..bb46ca0c458f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
@@ -459,9 +459,9 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
/* Buffer objects need to be either pinned or reserved: */
if (!(dst->mem.placement & TTM_PL_FLAG_NO_EVICT))
- lockdep_assert_held(&dst->resv->lock.base);
+ dma_resv_assert_held(dst->base.resv);
if (!(src->mem.placement & TTM_PL_FLAG_NO_EVICT))
- lockdep_assert_held(&src->resv->lock.base);
+ dma_resv_assert_held(src->base.resv);
if (dst->ttm->state == tt_unpopulated) {
ret = dst->ttm->bdev->driver->ttm_tt_populate(dst->ttm, &ctx);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index 5d5c2bce01f3..8b71bf6b58ef 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -28,7 +28,6 @@
#include <drm/ttm/ttm_placement.h>
-#include <drm/drmP.h>
#include "vmwgfx_drv.h"
#include "ttm_object.h"
@@ -342,7 +341,7 @@ void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
uint32_t old_mem_type = bo->mem.mem_type;
int ret;
- lockdep_assert_held(&bo->resv->lock.base);
+ dma_resv_assert_held(bo->base.resv);
if (pin) {
if (vbo->pin_count++ > 0)
@@ -463,6 +462,8 @@ void vmw_bo_bo_free(struct ttm_buffer_object *bo)
{
struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
+ WARN_ON(vmw_bo->dirty);
+ WARN_ON(!RB_EMPTY_ROOT(&vmw_bo->res_tree));
vmw_bo_unmap(vmw_bo);
kfree(vmw_bo);
}
@@ -476,8 +477,11 @@ void vmw_bo_bo_free(struct ttm_buffer_object *bo)
static void vmw_user_bo_destroy(struct ttm_buffer_object *bo)
{
struct vmw_user_buffer_object *vmw_user_bo = vmw_user_buffer_object(bo);
+ struct vmw_buffer_object *vbo = &vmw_user_bo->vbo;
- vmw_bo_unmap(&vmw_user_bo->vbo);
+ WARN_ON(vbo->dirty);
+ WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree));
+ vmw_bo_unmap(vbo);
ttm_prime_object_kfree(vmw_user_bo, prime);
}
@@ -510,8 +514,9 @@ int vmw_bo_init(struct vmw_private *dev_priv,
acc_size = vmw_bo_acc_size(dev_priv, size, user);
memset(vmw_bo, 0, sizeof(*vmw_bo));
-
- INIT_LIST_HEAD(&vmw_bo->res_list);
+ BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
+ vmw_bo->base.priority = 3;
+ vmw_bo->res_tree = RB_ROOT;
ret = ttm_bo_init(bdev, &vmw_bo->base, size,
ttm_bo_type_device, placement,
@@ -565,7 +570,7 @@ static void vmw_user_bo_ref_obj_release(struct ttm_base_object *base,
switch (ref_type) {
case TTM_REF_SYNCCPU_WRITE:
- ttm_bo_synccpu_write_release(&user_bo->vbo.base);
+ atomic_dec(&user_bo->vbo.cpu_writers);
break;
default:
WARN_ONCE(true, "Undefined buffer object reference release.\n");
@@ -681,16 +686,16 @@ static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
struct ttm_object_file *tfile,
uint32_t flags)
{
+ bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
struct ttm_buffer_object *bo = &user_bo->vbo.base;
bool existed;
int ret;
if (flags & drm_vmw_synccpu_allow_cs) {
- bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
long lret;
- lret = reservation_object_wait_timeout_rcu
- (bo->resv, true, true,
+ lret = dma_resv_wait_timeout_rcu
+ (bo->base.resv, true, true,
nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
if (!lret)
return -EBUSY;
@@ -699,15 +704,22 @@ static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
return 0;
}
- ret = ttm_bo_synccpu_write_grab
- (bo, !!(flags & drm_vmw_synccpu_dontblock));
+ ret = ttm_bo_reserve(bo, true, nonblock, NULL);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = ttm_bo_wait(bo, true, nonblock);
+ if (likely(ret == 0))
+ atomic_inc(&user_bo->vbo.cpu_writers);
+
+ ttm_bo_unreserve(bo);
if (unlikely(ret != 0))
return ret;
ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
TTM_REF_SYNCCPU_WRITE, &existed, false);
if (ret != 0 || existed)
- ttm_bo_synccpu_write_release(&user_bo->vbo.base);
+ atomic_dec(&user_bo->vbo.cpu_writers);
return ret;
}
@@ -835,7 +847,7 @@ int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
goto out_no_bo;
rep->handle = handle;
- rep->map_handle = drm_vma_node_offset_addr(&vbo->base.vma_node);
+ rep->map_handle = drm_vma_node_offset_addr(&vbo->base.base.vma_node);
rep->cur_gmr_id = handle;
rep->cur_gmr_offset = 0;
@@ -1007,10 +1019,10 @@ void vmw_bo_fence_single(struct ttm_buffer_object *bo,
if (fence == NULL) {
vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
- reservation_object_add_excl_fence(bo->resv, &fence->base);
+ dma_resv_add_excl_fence(bo->base.resv, &fence->base);
dma_fence_put(&fence->base);
} else
- reservation_object_add_excl_fence(bo->resv, &fence->base);
+ dma_resv_add_excl_fence(bo->base.resv, &fence->base);
}
@@ -1077,7 +1089,7 @@ int vmw_dumb_map_offset(struct drm_file *file_priv,
if (ret != 0)
return -EINVAL;
- *offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
+ *offset = drm_vma_node_offset_addr(&out_buf->base.base.vma_node);
vmw_bo_unreference(&out_buf);
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index 56979e412ca8..065015d2a8f6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -25,6 +25,9 @@
*
**************************************************************************/
+#include <linux/dmapool.h>
+#include <linux/pci.h>
+
#include <drm/ttm/ttm_bo_api.h>
#include "vmwgfx_drv.h"
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
index 4ac55fc2bf97..44d858ce4ce7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
@@ -209,8 +209,10 @@ int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
cres->hash.key = user_key | (res_type << 24);
ret = drm_ht_insert_item(&man->resources, &cres->hash);
- if (unlikely(ret != 0))
+ if (unlikely(ret != 0)) {
+ kfree(cres);
goto out_invalid_key;
+ }
cres->state = VMW_CMDBUF_RES_ADD;
cres->res = vmw_resource_reference(res);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index 63f111068a44..a56c9d802382 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -88,6 +88,8 @@ static const struct vmw_res_func vmw_gb_context_func = {
.res_type = vmw_res_context,
.needs_backup = true,
.may_evict = true,
+ .prio = 3,
+ .dirty_prio = 3,
.type_name = "guest backed contexts",
.backup_placement = &vmw_mob_placement,
.create = vmw_gb_context_create,
@@ -100,6 +102,8 @@ static const struct vmw_res_func vmw_dx_context_func = {
.res_type = vmw_res_dx_context,
.needs_backup = true,
.may_evict = true,
+ .prio = 3,
+ .dirty_prio = 3,
.type_name = "dx contexts",
.backup_placement = &vmw_mob_placement,
.create = vmw_dx_context_create,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index b4f6e1217c9d..3ca5cf375b01 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -116,6 +116,8 @@ static const struct vmw_res_func vmw_cotable_func = {
.res_type = vmw_res_cotable,
.needs_backup = true,
.may_evict = true,
+ .prio = 3,
+ .dirty_prio = 3,
.type_name = "context guest backed object tables",
.backup_placement = &vmw_mob_placement,
.create = vmw_cotable_create,
@@ -169,7 +171,7 @@ static int vmw_cotable_unscrub(struct vmw_resource *res)
} *cmd;
WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
- lockdep_assert_held(&bo->resv->lock.base);
+ dma_resv_assert_held(bo->base.resv);
cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
if (!cmd)
@@ -307,11 +309,11 @@ static int vmw_cotable_unbind(struct vmw_resource *res,
struct ttm_buffer_object *bo = val_buf->bo;
struct vmw_fence_obj *fence;
- if (list_empty(&res->mob_head))
+ if (!vmw_resource_mob_attached(res))
return 0;
WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
- lockdep_assert_held(&bo->resv->lock.base);
+ dma_resv_assert_held(bo->base.resv);
mutex_lock(&dev_priv->binding_mutex);
if (!vcotbl->scrubbed)
@@ -453,6 +455,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
goto out_wait;
}
+ vmw_resource_mob_detach(res);
res->backup = buf;
res->backup_size = new_size;
vcotbl->size_read_back = cur_size_read_back;
@@ -467,12 +470,12 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
res->backup = old_buf;
res->backup_size = old_size;
vcotbl->size_read_back = old_size_read_back;
+ vmw_resource_mob_attach(res);
goto out_wait;
}
+ vmw_resource_mob_attach(res);
/* Let go of the old mob. */
- list_del(&res->mob_head);
- list_add_tail(&res->mob_head, &buf->res_list);
vmw_bo_unreference(&old_buf);
res->id = vcotbl->type;
@@ -496,7 +499,7 @@ out_wait:
* is called before bind() in the validation sequence is instead used for two
* things.
* 1) Unscrub the cotable if it is scrubbed and still attached to a backup
- * buffer, that is, if @res->mob_head is non-empty.
+ * buffer.
* 2) Resize the cotable if needed.
*/
static int vmw_cotable_create(struct vmw_resource *res)
@@ -512,7 +515,7 @@ static int vmw_cotable_create(struct vmw_resource *res)
new_size *= 2;
if (likely(new_size <= res->backup_size)) {
- if (vcotbl->scrubbed && !list_empty(&res->mob_head)) {
+ if (vcotbl->scrubbed && vmw_resource_mob_attached(res)) {
ret = vmw_cotable_unscrub(res);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 9506190a0300..827458f49112 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -24,17 +24,22 @@
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
-#include <linux/module.h>
+
#include <linux/console.h>
#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/pci.h>
-#include <drm/drmP.h>
-#include "vmwgfx_drv.h"
-#include "vmwgfx_binding.h"
-#include "ttm_object.h"
-#include <drm/ttm/ttm_placement.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_sysfs.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_module.h>
+#include <drm/ttm/ttm_placement.h>
+
+#include "ttm_object.h"
+#include "vmwgfx_binding.h"
+#include "vmwgfx_drv.h"
#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
#define VMWGFX_CHIP_SVGAII 0
@@ -145,6 +150,9 @@
#define DRM_IOCTL_VMW_GB_SURFACE_REF_EXT \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF_EXT, \
union drm_vmw_gb_surface_reference_ext_arg)
+#define DRM_IOCTL_VMW_MSG \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_MSG, \
+ struct drm_vmw_msg_arg)
/**
* The core DRM version of this macro doesn't account for
@@ -160,9 +168,9 @@
static const struct drm_ioctl_desc vmw_ioctls[] = {
VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_bo_alloc_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_bo_unref_ioctl,
DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
@@ -177,16 +185,16 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
DRM_MASTER),
VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
- VMW_IOCTL_DEF(VMW_EXECBUF, NULL, DRM_AUTH |
+ DRM_RENDER_ALLOW),
+ VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
DRM_RENDER_ALLOW),
@@ -196,9 +204,9 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
/* these allow direct access to the framebuffers mark as master only */
VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
@@ -216,28 +224,31 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_CREATE_SHADER,
vmw_shader_define_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_UNREF_SHADER,
vmw_shader_destroy_ioctl,
DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE,
vmw_gb_surface_define_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
vmw_gb_surface_reference_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_SYNCCPU,
vmw_user_bo_synccpu_ioctl,
DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
vmw_extended_context_define_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE_EXT,
vmw_gb_surface_define_ext_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_GB_SURFACE_REF_EXT,
vmw_gb_surface_reference_ext_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
+ VMW_IOCTL_DEF(VMW_MSG,
+ vmw_msg_ioctl,
+ DRM_RENDER_ALLOW),
};
static const struct pci_device_id vmw_pci_id_list[] = {
@@ -254,7 +265,6 @@ static int vmw_restrict_dma_mask;
static int vmw_assume_16bpp;
static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
-static void vmw_master_init(struct vmw_master *);
static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
void *ptr);
@@ -572,8 +582,7 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
else
dev_priv->map_mode = vmw_dma_map_populate;
- /* No TTM coherent page pool? FIXME: Ask TTM instead! */
- if (!(IS_ENABLED(CONFIG_SWIOTLB) || IS_ENABLED(CONFIG_INTEL_IOMMU)) &&
+ if (!IS_ENABLED(CONFIG_DRM_TTM_DMA_PAGE_POOL) &&
(dev_priv->map_mode == vmw_dma_alloc_coherent))
return -EINVAL;
@@ -641,7 +650,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
INIT_LIST_HEAD(&dev_priv->res_lru[i]);
}
- mutex_init(&dev_priv->init_mutex);
init_waitqueue_head(&dev_priv->fence_queue);
init_waitqueue_head(&dev_priv->fifo_queue);
dev_priv->fence_queue_waiters = 0;
@@ -765,10 +773,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
dev_priv->mmio_start, dev_priv->mmio_size / 1024);
- vmw_master_init(&dev_priv->fbdev_master);
- ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
- dev_priv->active_master = &dev_priv->fbdev_master;
-
dev_priv->mmio_virt = memremap(dev_priv->mmio_start,
dev_priv->mmio_size, MEMREMAP_WB);
@@ -828,9 +832,13 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
goto out_no_fman;
}
+ drm_vma_offset_manager_init(&dev_priv->vma_manager,
+ DRM_FILE_PAGE_OFFSET_START,
+ DRM_FILE_PAGE_OFFSET_SIZE);
ret = ttm_bo_device_init(&dev_priv->bdev,
&vmw_bo_driver,
dev->anon_inode->i_mapping,
+ &dev_priv->vma_manager,
false);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed initializing TTM buffer object driver.\n");
@@ -987,6 +995,7 @@ static void vmw_driver_unload(struct drm_device *dev)
if (dev_priv->has_mob)
(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
(void) ttm_bo_device_release(&dev_priv->bdev);
+ drm_vma_offset_manager_destroy(&dev_priv->vma_manager);
vmw_release_device_late(dev_priv);
vmw_fence_manager_takedown(dev_priv->fman);
if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
@@ -1010,18 +1019,7 @@ static void vmw_driver_unload(struct drm_device *dev)
static void vmw_postclose(struct drm_device *dev,
struct drm_file *file_priv)
{
- struct vmw_fpriv *vmw_fp;
-
- vmw_fp = vmw_fpriv(file_priv);
-
- if (vmw_fp->locked_master) {
- struct vmw_master *vmaster =
- vmw_master(vmw_fp->locked_master);
-
- ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
- ttm_vt_unlock(&vmaster->lock);
- drm_master_put(&vmw_fp->locked_master);
- }
+ struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
ttm_object_file_release(&vmw_fp->tfile);
kfree(vmw_fp);
@@ -1050,55 +1048,6 @@ out_no_tfile:
return ret;
}
-static struct vmw_master *vmw_master_check(struct drm_device *dev,
- struct drm_file *file_priv,
- unsigned int flags)
-{
- int ret;
- struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
- struct vmw_master *vmaster;
-
- if (!drm_is_primary_client(file_priv) || !(flags & DRM_AUTH))
- return NULL;
-
- ret = mutex_lock_interruptible(&dev->master_mutex);
- if (unlikely(ret != 0))
- return ERR_PTR(-ERESTARTSYS);
-
- if (drm_is_current_master(file_priv)) {
- mutex_unlock(&dev->master_mutex);
- return NULL;
- }
-
- /*
- * Check if we were previously master, but now dropped. In that
- * case, allow at least render node functionality.
- */
- if (vmw_fp->locked_master) {
- mutex_unlock(&dev->master_mutex);
-
- if (flags & DRM_RENDER_ALLOW)
- return NULL;
-
- DRM_ERROR("Dropped master trying to access ioctl that "
- "requires authentication.\n");
- return ERR_PTR(-EACCES);
- }
- mutex_unlock(&dev->master_mutex);
-
- /*
- * Take the TTM lock. Possibly sleep waiting for the authenticating
- * master to become master again, or for a SIGTERM if the
- * authenticating master exits.
- */
- vmaster = vmw_master(file_priv->master);
- ret = ttm_read_lock(&vmaster->lock, true);
- if (unlikely(ret != 0))
- vmaster = ERR_PTR(ret);
-
- return vmaster;
-}
-
static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg,
long (*ioctl_func)(struct file *, unsigned int,
@@ -1107,9 +1056,7 @@ static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
struct drm_file *file_priv = filp->private_data;
struct drm_device *dev = file_priv->minor->dev;
unsigned int nr = DRM_IOCTL_NR(cmd);
- struct vmw_master *vmaster;
unsigned int flags;
- long ret;
/*
* Do extra checking on driver private ioctls.
@@ -1121,15 +1068,7 @@ static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
&vmw_ioctls[nr - DRM_COMMAND_BASE];
if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) {
- ret = (long) drm_ioctl_permit(ioctl->flags, file_priv);
- if (unlikely(ret != 0))
- return ret;
-
- if (unlikely((cmd & (IOC_IN | IOC_OUT)) != IOC_IN))
- goto out_io_encoding;
-
- return (long) vmw_execbuf_ioctl(dev, arg, file_priv,
- _IOC_SIZE(cmd));
+ return ioctl_func(filp, cmd, arg);
} else if (nr == DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT) {
if (!drm_is_current_master(file_priv) &&
!capable(CAP_SYS_ADMIN))
@@ -1143,21 +1082,7 @@ static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
} else if (!drm_ioctl_flags(nr, &flags))
return -EINVAL;
- vmaster = vmw_master_check(dev, file_priv, flags);
- if (IS_ERR(vmaster)) {
- ret = PTR_ERR(vmaster);
-
- if (ret != -ERESTARTSYS)
- DRM_INFO("IOCTL ERROR Command %d, Error %ld.\n",
- nr, ret);
- return ret;
- }
-
- ret = ioctl_func(filp, cmd, arg);
- if (vmaster)
- ttm_read_unlock(&vmaster->lock);
-
- return ret;
+ return ioctl_func(filp, cmd, arg);
out_io_encoding:
DRM_ERROR("Invalid command format, ioctl %d\n",
@@ -1180,69 +1105,10 @@ static long vmw_compat_ioctl(struct file *filp, unsigned int cmd,
}
#endif
-static void vmw_lastclose(struct drm_device *dev)
-{
-}
-
-static void vmw_master_init(struct vmw_master *vmaster)
-{
- ttm_lock_init(&vmaster->lock);
-}
-
-static int vmw_master_create(struct drm_device *dev,
- struct drm_master *master)
-{
- struct vmw_master *vmaster;
-
- vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
- if (unlikely(!vmaster))
- return -ENOMEM;
-
- vmw_master_init(vmaster);
- ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
- master->driver_priv = vmaster;
-
- return 0;
-}
-
-static void vmw_master_destroy(struct drm_device *dev,
- struct drm_master *master)
-{
- struct vmw_master *vmaster = vmw_master(master);
-
- master->driver_priv = NULL;
- kfree(vmaster);
-}
-
static int vmw_master_set(struct drm_device *dev,
struct drm_file *file_priv,
bool from_open)
{
- struct vmw_private *dev_priv = vmw_priv(dev);
- struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
- struct vmw_master *active = dev_priv->active_master;
- struct vmw_master *vmaster = vmw_master(file_priv->master);
- int ret = 0;
-
- if (active) {
- BUG_ON(active != &dev_priv->fbdev_master);
- ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
- if (unlikely(ret != 0))
- return ret;
-
- ttm_lock_set_kill(&active->lock, true, SIGTERM);
- dev_priv->active_master = NULL;
- }
-
- ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
- if (!from_open) {
- ttm_vt_unlock(&vmaster->lock);
- BUG_ON(vmw_fp->locked_master != file_priv->master);
- drm_master_put(&vmw_fp->locked_master);
- }
-
- dev_priv->active_master = vmaster;
-
/*
* Inform a new master that the layout may have changed while
* it was gone.
@@ -1257,31 +1123,10 @@ static void vmw_master_drop(struct drm_device *dev,
struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
- struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
- struct vmw_master *vmaster = vmw_master(file_priv->master);
- int ret;
-
- /**
- * Make sure the master doesn't disappear while we have
- * it locked.
- */
- vmw_fp->locked_master = drm_master_get(file_priv->master);
- ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
vmw_kms_legacy_hotspot_clear(dev_priv);
- if (unlikely((ret != 0))) {
- DRM_ERROR("Unable to lock TTM at VT switch.\n");
- drm_master_put(&vmw_fp->locked_master);
- }
-
- ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
-
if (!dev_priv->enable_fb)
vmw_svga_disable(dev_priv);
-
- dev_priv->active_master = &dev_priv->fbdev_master;
- ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
- ttm_vt_unlock(&dev_priv->fbdev_master.lock);
}
/**
@@ -1372,8 +1217,10 @@ static void vmw_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
+ drm_dev_unregister(dev);
+ vmw_driver_unload(dev);
+ drm_dev_put(dev);
pci_disable_device(pdev);
- drm_put_dev(dev);
}
static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
@@ -1551,17 +1398,12 @@ static const struct file_operations vmwgfx_driver_fops = {
static struct drm_driver driver = {
.driver_features =
- DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER | DRIVER_ATOMIC,
- .load = vmw_driver_load,
- .unload = vmw_driver_unload,
- .lastclose = vmw_lastclose,
+ DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC,
.get_vblank_counter = vmw_get_vblank_counter,
.enable_vblank = vmw_enable_vblank,
.disable_vblank = vmw_disable_vblank,
.ioctls = vmw_ioctls,
.num_ioctls = ARRAY_SIZE(vmw_ioctls),
- .master_create = vmw_master_create,
- .master_destroy = vmw_master_destroy,
.master_set = vmw_master_set,
.master_drop = vmw_master_drop,
.open = vmw_driver_open,
@@ -1595,7 +1437,39 @@ static struct pci_driver vmw_pci_driver = {
static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- return drm_get_pci_dev(pdev, ent, &driver);
+ struct drm_device *dev;
+ int ret;
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ dev = drm_dev_alloc(&driver, &pdev->dev);
+ if (IS_ERR(dev)) {
+ ret = PTR_ERR(dev);
+ goto err_pci_disable_device;
+ }
+
+ dev->pdev = pdev;
+ pci_set_drvdata(pdev, dev);
+
+ ret = vmw_driver_load(dev, ent->driver_data);
+ if (ret)
+ goto err_drm_dev_put;
+
+ ret = drm_dev_register(dev, ent->driver_data);
+ if (ret)
+ goto err_vmw_driver_unload;
+
+ return 0;
+
+err_vmw_driver_unload:
+ vmw_driver_unload(dev);
+err_drm_dev_put:
+ drm_dev_put(dev);
+err_pci_disable_device:
+ pci_disable_device(pdev);
+ return ret;
}
static int __init vmwgfx_init(void)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 366dcfc1f9bb..86b69397d166 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -28,25 +28,37 @@
#ifndef _VMWGFX_DRV_H_
#define _VMWGFX_DRV_H_
-#include "vmwgfx_validation.h"
-#include "vmwgfx_reg.h"
-#include <drm/drmP.h>
-#include <drm/vmwgfx_drm.h>
-#include <drm/drm_hashtab.h>
-#include <drm/drm_auth.h>
#include <linux/suspend.h>
+#include <linux/sync_file.h>
+
+#include <drm/drm_auth.h>
+#include <drm/drm_device.h>
+#include <drm/drm_file.h>
+#include <drm/drm_hashtab.h>
+#include <drm/drm_rect.h>
+
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/ttm/ttm_module.h>
-#include "vmwgfx_fence.h"
-#include "ttm_object.h"
+
#include "ttm_lock.h"
-#include <linux/sync_file.h>
+#include "ttm_object.h"
+
+#include "vmwgfx_fence.h"
+#include "vmwgfx_reg.h"
+#include "vmwgfx_validation.h"
+
+/*
+ * FIXME: vmwgfx_drm.h needs to be last due to dependencies.
+ * uapi headers should not depend on header files outside uapi/.
+ */
+#include <drm/vmwgfx_drm.h>
+
#define VMWGFX_DRIVER_NAME "vmwgfx"
-#define VMWGFX_DRIVER_DATE "20180704"
+#define VMWGFX_DRIVER_DATE "20200114"
#define VMWGFX_DRIVER_MAJOR 2
-#define VMWGFX_DRIVER_MINOR 15
+#define VMWGFX_DRIVER_MINOR 17
#define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
#define VMWGFX_MAX_RELOCATIONS 2048
@@ -81,19 +93,33 @@
#define VMW_RES_SHADER ttm_driver_type4
struct vmw_fpriv {
- struct drm_master *locked_master;
struct ttm_object_file *tfile;
bool gb_aware; /* user-space is guest-backed aware */
};
+/**
+ * struct vmw_buffer_object - TTM buffer object with vmwgfx additions
+ * @base: The TTM buffer object
+ * @res_tree: RB tree of resources using this buffer object as a backing MOB
+ * @pin_count: pin depth
+ * @cpu_writers: Number of synccpu write grabs. Protected by reservation when
+ * increased. May be decreased without reservation.
+ * @dx_query_ctx: DX context if this buffer object is used as a DX query MOB
+ * @map: Kmap object for semi-persistent mappings
+ * @res_prios: Eviction priority counts for attached resources
+ * @dirty: structure for user-space dirty-tracking
+ */
struct vmw_buffer_object {
struct ttm_buffer_object base;
- struct list_head res_list;
+ struct rb_root res_tree;
s32 pin_count;
+ atomic_t cpu_writers;
/* Not ref-counted. Protected by binding_mutex */
struct vmw_resource *dx_query_ctx;
/* Protected by reservation */
struct ttm_bo_kmap_obj map;
+ u32 res_prios[TTM_MAX_BO_PRIORITY];
+ struct vmw_bo_dirty *dirty;
};
/**
@@ -124,7 +150,8 @@ struct vmw_res_func;
* @res_dirty: Resource contains data not yet in the backup buffer. Protected
* by resource reserved.
* @backup_dirty: Backup buffer contains data not yet in the HW resource.
- * Protecte by resource reserved.
+ * Protected by resource reserved.
+ * @coherent: Emulate coherency by tracking vm accesses.
* @backup: The backup buffer if any. Protected by resource reserved.
* @backup_offset: Offset into the backup buffer if any. Protected by resource
* reserved. Note that only a few resource types can have a @backup_offset
@@ -133,28 +160,32 @@ struct vmw_res_func;
* pin-count greater than zero. It is not on the resource LRU lists and its
* backup buffer is pinned. Hence it can't be evicted.
* @func: Method vtable for this resource. Immutable.
+ * @mob_node; Node for the MOB backup rbtree. Protected by @backup reserved.
* @lru_head: List head for the LRU list. Protected by @dev_priv::resource_lock.
- * @mob_head: List head for the MOB backup list. Protected by @backup reserved.
* @binding_head: List head for the context binding list. Protected by
* the @dev_priv::binding_mutex
* @res_free: The resource destructor.
* @hw_destroy: Callback to destroy the resource on the device, as part of
* resource destruction.
*/
+struct vmw_resource_dirty;
struct vmw_resource {
struct kref kref;
struct vmw_private *dev_priv;
int id;
+ u32 used_prio;
unsigned long backup_size;
- bool res_dirty;
- bool backup_dirty;
+ u32 res_dirty : 1;
+ u32 backup_dirty : 1;
+ u32 coherent : 1;
struct vmw_buffer_object *backup;
unsigned long backup_offset;
unsigned long pin_count;
const struct vmw_res_func *func;
+ struct rb_node mob_node;
struct list_head lru_head;
- struct list_head mob_head;
struct list_head binding_head;
+ struct vmw_resource_dirty *dirty;
void (*res_free) (struct vmw_resource *res);
void (*hw_destroy) (struct vmw_resource *res);
};
@@ -376,10 +407,6 @@ struct vmw_sw_context{
struct vmw_legacy_display;
struct vmw_overlay;
-struct vmw_master {
- struct ttm_lock lock;
-};
-
struct vmw_vga_topology_state {
uint32_t width;
uint32_t height;
@@ -420,6 +447,7 @@ struct vmw_private {
struct vmw_fifo_state fifo;
struct drm_device *dev;
+ struct drm_vma_offset_manager vma_manager;
unsigned long vmw_chipset;
unsigned int io_start;
uint32_t vram_start;
@@ -484,11 +512,6 @@ struct vmw_private {
spinlock_t resource_lock;
struct idr res_idr[vmw_res_max];
- /*
- * Block lastclose from racing with firstopen.
- */
-
- struct mutex init_mutex;
/*
* A resource manager for kernel-only surfaces and
@@ -542,11 +565,8 @@ struct vmw_private {
spinlock_t svga_lock;
/**
- * Master management.
+ * PM management.
*/
-
- struct vmw_master *active_master;
- struct vmw_master fbdev_master;
struct notifier_block pm_nb;
bool refuse_hibernation;
bool suspend_locked;
@@ -612,11 +632,6 @@ static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv)
return (struct vmw_fpriv *)file_priv->driver_priv;
}
-static inline struct vmw_master *vmw_master(struct drm_master *master)
-{
- return (struct vmw_master *) master->driver_priv;
-}
-
/*
* The locking here is fine-grained, so that it is performed once
* for every read- and write operation. This is of course costly, but we
@@ -669,7 +684,8 @@ extern void vmw_resource_unreference(struct vmw_resource **p_res);
extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
extern struct vmw_resource *
vmw_resource_reference_unless_doomed(struct vmw_resource *res);
-extern int vmw_resource_validate(struct vmw_resource *res, bool intr);
+extern int vmw_resource_validate(struct vmw_resource *res, bool intr,
+ bool dirtying);
extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
bool no_backup);
extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
@@ -709,6 +725,23 @@ extern void vmw_query_move_notify(struct ttm_buffer_object *bo,
extern int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob);
extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
extern void vmw_resource_unbind_list(struct vmw_buffer_object *vbo);
+void vmw_resource_mob_attach(struct vmw_resource *res);
+void vmw_resource_mob_detach(struct vmw_resource *res);
+void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start,
+ pgoff_t end);
+int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
+ pgoff_t end, pgoff_t *num_prefault);
+
+/**
+ * vmw_resource_mob_attached - Whether a resource currently has a mob attached
+ * @res: The resource
+ *
+ * Return: true if the resource has a mob attached, false otherwise.
+ */
+static inline bool vmw_resource_mob_attached(const struct vmw_resource *res)
+{
+ return !RB_EMPTY_NODE(&res->mob_node);
+}
/**
* vmw_user_resource_noref_release - release a user resource pointer looked up
@@ -787,6 +820,54 @@ static inline void vmw_user_bo_noref_release(void)
ttm_base_object_noref_release();
}
+/**
+ * vmw_bo_adjust_prio - Adjust the buffer object eviction priority
+ * according to attached resources
+ * @vbo: The struct vmw_buffer_object
+ */
+static inline void vmw_bo_prio_adjust(struct vmw_buffer_object *vbo)
+{
+ int i = ARRAY_SIZE(vbo->res_prios);
+
+ while (i--) {
+ if (vbo->res_prios[i]) {
+ vbo->base.priority = i;
+ return;
+ }
+ }
+
+ vbo->base.priority = 3;
+}
+
+/**
+ * vmw_bo_prio_add - Notify a buffer object of a newly attached resource
+ * eviction priority
+ * @vbo: The struct vmw_buffer_object
+ * @prio: The resource priority
+ *
+ * After being notified, the code assigns the highest resource eviction priority
+ * to the backing buffer object (mob).
+ */
+static inline void vmw_bo_prio_add(struct vmw_buffer_object *vbo, int prio)
+{
+ if (vbo->res_prios[prio]++ == 0)
+ vmw_bo_prio_adjust(vbo);
+}
+
+/**
+ * vmw_bo_prio_del - Notify a buffer object of a resource with a certain
+ * priority being removed
+ * @vbo: The struct vmw_buffer_object
+ * @prio: The resource priority
+ *
+ * After being notified, the code assigns the highest resource eviction priority
+ * to the backing buffer object (mob).
+ */
+static inline void vmw_bo_prio_del(struct vmw_buffer_object *vbo, int prio)
+{
+ if (--vbo->res_prios[prio] == 0)
+ vmw_bo_prio_adjust(vbo);
+}
/**
* Misc Ioctl functionality - vmwgfx_ioctl.c
@@ -915,8 +996,8 @@ static inline struct page *vmw_piter_page(struct vmw_piter *viter)
* Command submission - vmwgfx_execbuf.c
*/
-extern int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
- struct drm_file *file_priv, size_t size);
+extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
extern int vmw_execbuf_process(struct drm_file *file_priv,
struct vmw_private *dev_priv,
void __user *user_commands,
@@ -1016,7 +1097,6 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
int vmw_kms_write_svga(struct vmw_private *vmw_priv,
unsigned width, unsigned height, unsigned pitch,
unsigned bpp, unsigned depth);
-void vmw_kms_idle_workqueues(struct vmw_master *vmaster);
bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
uint32_t pitch,
uint32_t height);
@@ -1323,6 +1403,8 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
int vmw_host_get_guestinfo(const char *guest_info_param,
char *buffer, size_t *length);
int vmw_host_log(const char *log);
+int vmw_msg_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
/* VMW logging */
@@ -1338,6 +1420,25 @@ int vmw_host_log(const char *log);
#define VMW_DEBUG_USER(fmt, ...) \
DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__)
+/* Resource dirtying - vmwgfx_page_dirty.c */
+void vmw_bo_dirty_scan(struct vmw_buffer_object *vbo);
+int vmw_bo_dirty_add(struct vmw_buffer_object *vbo);
+void vmw_bo_dirty_transfer_to_res(struct vmw_resource *res);
+void vmw_bo_dirty_clear_res(struct vmw_resource *res);
+void vmw_bo_dirty_release(struct vmw_buffer_object *vbo);
+void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo,
+ pgoff_t start, pgoff_t end);
+vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf);
+vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf);
+
+/**
+ * VMW_DEBUG_KMS - Debug output for kernel mode-setting
+ *
+ * This macro is for debugging vmwgfx mode-setting code.
+ */
+#define VMW_DEBUG_KMS(fmt, ...) \
+ DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__)
+
/**
* Inline helper functions
*/
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 33533d126277..73489a45decb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -2377,9 +2377,12 @@ static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
{
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearRenderTargetView) =
container_of(header, typeof(*cmd), header);
+ struct vmw_resource *ret;
- return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_rt,
- cmd->body.renderTargetViewId));
+ ret = vmw_view_id_val_add(sw_context, vmw_view_rt,
+ cmd->body.renderTargetViewId);
+
+ return PTR_ERR_OR_ZERO(ret);
}
/**
@@ -2396,9 +2399,12 @@ static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
{
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearDepthStencilView) =
container_of(header, typeof(*cmd), header);
+ struct vmw_resource *ret;
+
+ ret = vmw_view_id_val_add(sw_context, vmw_view_ds,
+ cmd->body.depthStencilViewId);
- return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_ds,
- cmd->body.depthStencilViewId));
+ return PTR_ERR_OR_ZERO(ret);
}
static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
@@ -2560,7 +2566,6 @@ static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
offsetof(typeof(*cmd), sid));
cmd = container_of(header, typeof(*cmd), header);
-
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
VMW_RES_DIRTY_NONE, user_surface_converter,
&cmd->sid, NULL);
@@ -2742,9 +2747,12 @@ static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
{
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXGenMips) =
container_of(header, typeof(*cmd), header);
+ struct vmw_resource *ret;
- return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_sr,
- cmd->body.shaderResourceViewId));
+ ret = vmw_view_id_val_add(sw_context, vmw_view_sr,
+ cmd->body.shaderResourceViewId);
+
+ return PTR_ERR_OR_ZERO(ret);
}
/**
@@ -3995,54 +4003,40 @@ void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
mutex_unlock(&dev_priv->cmdbuf_mutex);
}
-int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
- struct drm_file *file_priv, size_t size)
+int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
- struct drm_vmw_execbuf_arg arg;
+ struct drm_vmw_execbuf_arg *arg = data;
int ret;
- static const size_t copy_offset[] = {
- offsetof(struct drm_vmw_execbuf_arg, context_handle),
- sizeof(struct drm_vmw_execbuf_arg)};
struct dma_fence *in_fence = NULL;
- if (unlikely(size < copy_offset[0])) {
- VMW_DEBUG_USER("Invalid command size, ioctl %d\n",
- DRM_VMW_EXECBUF);
- return -EINVAL;
- }
-
- if (copy_from_user(&arg, (void __user *) data, copy_offset[0]) != 0)
- return -EFAULT;
-
/*
* Extend the ioctl argument while maintaining backwards compatibility:
- * We take different code paths depending on the value of arg.version.
+ * We take different code paths depending on the value of arg->version.
+ *
+ * Note: The ioctl argument is extended and zeropadded by core DRM.
*/
- if (unlikely(arg.version > DRM_VMW_EXECBUF_VERSION ||
- arg.version == 0)) {
+ if (unlikely(arg->version > DRM_VMW_EXECBUF_VERSION ||
+ arg->version == 0)) {
VMW_DEBUG_USER("Incorrect execbuf version.\n");
return -EINVAL;
}
- if (arg.version > 1 &&
- copy_from_user(&arg.context_handle,
- (void __user *) (data + copy_offset[0]),
- copy_offset[arg.version - 1] - copy_offset[0]) != 0)
- return -EFAULT;
-
- switch (arg.version) {
+ switch (arg->version) {
case 1:
- arg.context_handle = (uint32_t) -1;
+ /* For v1 core DRM have extended + zeropadded the data */
+ arg->context_handle = (uint32_t) -1;
break;
case 2:
default:
+ /* For v2 and later core DRM would have correctly copied it */
break;
}
/* If imported a fence FD from elsewhere, then wait on it */
- if (arg.flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
- in_fence = sync_file_get_fence(arg.imported_fence_fd);
+ if (arg->flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
+ in_fence = sync_file_get_fence(arg->imported_fence_fd);
if (!in_fence) {
VMW_DEBUG_USER("Cannot get imported fence\n");
@@ -4059,11 +4053,11 @@ int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
return ret;
ret = vmw_execbuf_process(file_priv, dev_priv,
- (void __user *)(unsigned long)arg.commands,
- NULL, arg.command_size, arg.throttle_us,
- arg.context_handle,
- (void __user *)(unsigned long)arg.fence_rep,
- NULL, arg.flags);
+ (void __user *)(unsigned long)arg->commands,
+ NULL, arg->command_size, arg->throttle_us,
+ arg->context_handle,
+ (void __user *)(unsigned long)arg->fence_rep,
+ NULL, arg->flags);
ttm_read_unlock(&dev_priv->reservation_sem);
if (unlikely(ret != 0))
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 972e8fda6d35..c59806d40e15 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -26,14 +26,14 @@
*
**************************************************************************/
-#include <linux/export.h>
+#include <linux/pci.h>
+
+#include <drm/drm_fourcc.h>
+#include <drm/ttm/ttm_placement.h>
-#include <drm/drmP.h>
#include "vmwgfx_drv.h"
#include "vmwgfx_kms.h"
-#include <drm/ttm/ttm_placement.h>
-
#define VMW_DIRTY_DELAY (HZ / 30)
struct vmw_fb_par {
@@ -624,7 +624,7 @@ out_unlock:
}
-static struct fb_ops vmw_fb_ops = {
+static const struct fb_ops vmw_fb_ops = {
.owner = THIS_MODULE,
.fb_check_var = vmw_fb_check_var,
.fb_set_par = vmw_fb_set_par,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 301260e23e52..178a6cd1a06f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -25,7 +25,8 @@
*
**************************************************************************/
-#include <drm/drmP.h>
+#include <linux/sched/signal.h>
+
#include "vmwgfx_drv.h"
#define VMW_FENCE_WRAP (1 << 31)
@@ -184,6 +185,9 @@ static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
spin_lock(f->lock);
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags))
+ goto out;
+
if (intr && signal_pending(current)) {
ret = -ERESTARTSYS;
goto out;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
index c9382933c2b9..50e9fdd7acf1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
@@ -32,8 +32,11 @@
#define VMW_FENCE_WAIT_TIMEOUT (5*HZ)
-struct vmw_private;
+struct drm_device;
+struct drm_file;
+struct drm_pending_event;
+struct vmw_private;
struct vmw_fence_manager;
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index ff3586cb6851..e5252ef3812f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -25,10 +25,12 @@
*
**************************************************************************/
-#include "vmwgfx_drv.h"
-#include <drm/drmP.h>
+#include <linux/sched/signal.h>
+
#include <drm/ttm/ttm_placement.h>
+#include "vmwgfx_drv.h"
+
struct vmw_temp_set_context {
SVGA3dCmdHeader header;
SVGA3dCmdDXTempSetContext body;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
index ae7acc6f3dda..83c0d5a3e4fd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -25,10 +25,10 @@
*
**************************************************************************/
-#include "vmwgfx_drv.h"
-#include <drm/drmP.h>
#include <drm/ttm/ttm_bo_driver.h>
+#include "vmwgfx_drv.h"
+
#define VMW_PPN_SIZE (sizeof(unsigned long))
/* A future safe maximum remap size. */
#define VMW_PPN_PER_REMAP ((31 * 1024) / VMW_PPN_SIZE)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index c3ad4478266b..75f3efee21a4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -25,7 +25,8 @@
*
**************************************************************************/
-#include <drm/drmP.h>
+#include <linux/sched/signal.h>
+
#include "vmwgfx_drv.h"
#define VMW_FENCE_WRAP (1 << 24)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index b97bc8e5944b..f47d5710cc95 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -25,12 +25,16 @@
*
**************************************************************************/
-#include "vmwgfx_kms.h"
-#include <drm/drm_plane_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_rect.h>
#include <drm/drm_damage_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_rect.h>
+#include <drm/drm_sysfs.h>
+#include <drm/drm_vblank.h>
+
+#include "vmwgfx_kms.h"
/* Might need a hrtimer here? */
#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
@@ -1462,7 +1466,7 @@ static int vmw_kms_check_display_memory(struct drm_device *dev,
if (dev_priv->active_display_unit == vmw_du_screen_target &&
(drm_rect_width(&rects[i]) > dev_priv->stdu_max_width ||
drm_rect_height(&rects[i]) > dev_priv->stdu_max_height)) {
- DRM_ERROR("Screen size not supported.\n");
+ VMW_DEBUG_KMS("Screen size not supported.\n");
return -EINVAL;
}
@@ -1486,7 +1490,7 @@ static int vmw_kms_check_display_memory(struct drm_device *dev,
* limit on primary bounding box
*/
if (pixel_mem > dev_priv->prim_bb_mem) {
- DRM_ERROR("Combined output size too large.\n");
+ VMW_DEBUG_KMS("Combined output size too large.\n");
return -EINVAL;
}
@@ -1496,7 +1500,7 @@ static int vmw_kms_check_display_memory(struct drm_device *dev,
bb_mem = (u64) bounding_box.x2 * bounding_box.y2 * 4;
if (bb_mem > dev_priv->prim_bb_mem) {
- DRM_ERROR("Topology is beyond supported limits.\n");
+ VMW_DEBUG_KMS("Topology is beyond supported limits.\n");
return -EINVAL;
}
}
@@ -1645,6 +1649,7 @@ static int vmw_kms_check_topology(struct drm_device *dev,
struct vmw_connector_state *vmw_conn_state;
if (!du->pref_active && new_crtc_state->enable) {
+ VMW_DEBUG_KMS("Enabling a disabled display unit\n");
ret = -EINVAL;
goto clean;
}
@@ -1701,17 +1706,11 @@ vmw_kms_atomic_check_modeset(struct drm_device *dev,
return ret;
ret = vmw_kms_check_implicit(dev, state);
- if (ret)
- return ret;
-
- if (!state->allow_modeset)
+ if (ret) {
+ VMW_DEBUG_KMS("Invalid implicit state\n");
return ret;
+ }
- /*
- * Legacy path do not set allow_modeset properly like
- * @drm_atomic_helper_update_plane, This will result in unnecessary call
- * to vmw_kms_check_topology. So extra set of check.
- */
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
if (drm_atomic_crtc_needs_modeset(crtc_state))
need_modeset = true;
@@ -2347,6 +2346,9 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
if (!arg->num_outputs) {
struct drm_rect def_rect = {0, 0, 800, 600};
+ VMW_DEBUG_KMS("Default layout x1 = %d y1 = %d x2 = %d y2 = %d\n",
+ def_rect.x1, def_rect.y1,
+ def_rect.x2, def_rect.y2);
vmw_du_update_layout(dev_priv, 1, &def_rect);
return 0;
}
@@ -2367,6 +2369,7 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
drm_rects = (struct drm_rect *)rects;
+ VMW_DEBUG_KMS("Layout count = %u\n", arg->num_outputs);
for (i = 0; i < arg->num_outputs; i++) {
struct drm_vmw_rect curr_rect;
@@ -2383,6 +2386,10 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
drm_rects[i].x2 = curr_rect.x + curr_rect.w;
drm_rects[i].y2 = curr_rect.y + curr_rect.h;
+ VMW_DEBUG_KMS(" x1 = %d y1 = %d x2 = %d y2 = %d\n",
+ drm_rects[i].x1, drm_rects[i].y1,
+ drm_rects[i].x2, drm_rects[i].y2);
+
/*
* Currently this check is limiting the topology within
* mode_config->max (which actually is max texture size
@@ -2393,7 +2400,9 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
if (drm_rects[i].x1 < 0 || drm_rects[i].y1 < 0 ||
drm_rects[i].x2 > mode_config->max_width ||
drm_rects[i].y2 > mode_config->max_height) {
- DRM_ERROR("Invalid GUI layout.\n");
+ VMW_DEBUG_KMS("Invalid layout %d %d %d %d\n",
+ drm_rects[i].x1, drm_rects[i].y1,
+ drm_rects[i].x2, drm_rects[i].y2);
ret = -EINVAL;
goto out_free;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 535b03599e55..3ee03227607c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -28,9 +28,9 @@
#ifndef VMWGFX_KMS_H_
#define VMWGFX_KMS_H_
-#include <drm/drmP.h>
#include <drm/drm_encoder.h>
#include <drm/drm_probe_helper.h>
+
#include "vmwgfx_drv.h"
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 25e6343bcf21..5702219ec38f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -25,11 +25,13 @@
*
**************************************************************************/
-#include "vmwgfx_kms.h"
-#include <drm/drm_plane_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_vblank.h>
+#include "vmwgfx_kms.h"
#define vmw_crtc_to_ldu(x) \
container_of(x, struct vmw_legacy_display_unit, base.crtc)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index 406edc8cef35..0a6bbac00896 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -25,6 +25,8 @@
*
**************************************************************************/
+#include <linux/highmem.h>
+
#include "vmwgfx_drv.h"
/*
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
index 59e9d05ab928..e9f448a5ebb3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
@@ -24,17 +24,17 @@
*
*/
-
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
#include <linux/frame.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mem_encrypt.h>
+
#include <asm/hypervisor.h>
-#include <drm/drmP.h>
+
#include "vmwgfx_drv.h"
#include "vmwgfx_msg.h"
-
#define MESSAGE_STATUS_SUCCESS 0x0001
#define MESSAGE_STATUS_DORECV 0x0002
#define MESSAGE_STATUS_CPT 0x0010
@@ -46,8 +46,6 @@
#define RETRIES 3
#define VMW_HYPERVISOR_MAGIC 0x564D5868
-#define VMW_HYPERVISOR_PORT 0x5658
-#define VMW_HYPERVISOR_HB_PORT 0x5659
#define VMW_PORT_CMD_MSG 30
#define VMW_PORT_CMD_HB_MSG 0
@@ -59,6 +57,8 @@
#define HIGH_WORD(X) ((X & 0xFFFF0000) >> 16)
+#define MAX_USER_MSG_LENGTH PAGE_SIZE
+
static u32 vmw_msg_enabled = 1;
enum rpc_msg_type {
@@ -93,7 +93,7 @@ static int vmw_open_channel(struct rpc_channel *channel, unsigned int protocol)
VMW_PORT(VMW_PORT_CMD_OPEN_CHANNEL,
(protocol | GUESTMSG_FLAG_COOKIE), si, di,
- VMW_HYPERVISOR_PORT,
+ 0,
VMW_HYPERVISOR_MAGIC,
eax, ebx, ecx, edx, si, di);
@@ -126,7 +126,7 @@ static int vmw_close_channel(struct rpc_channel *channel)
VMW_PORT(VMW_PORT_CMD_CLOSE_CHANNEL,
0, si, di,
- (VMW_HYPERVISOR_PORT | (channel->channel_id << 16)),
+ channel->channel_id << 16,
VMW_HYPERVISOR_MAGIC,
eax, ebx, ecx, edx, si, di);
@@ -151,7 +151,8 @@ static unsigned long vmw_port_hb_out(struct rpc_channel *channel,
unsigned long si, di, eax, ebx, ecx, edx;
unsigned long msg_len = strlen(msg);
- if (hb) {
+ /* HB port can't access encrypted memory. */
+ if (hb && !mem_encrypt_active()) {
unsigned long bp = channel->cookie_high;
si = (uintptr_t) msg;
@@ -160,7 +161,8 @@ static unsigned long vmw_port_hb_out(struct rpc_channel *channel,
VMW_PORT_HB_OUT(
(MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG,
msg_len, si, di,
- VMW_HYPERVISOR_HB_PORT | (channel->channel_id << 16),
+ VMWARE_HYPERVISOR_HB | (channel->channel_id << 16) |
+ VMWARE_HYPERVISOR_OUT,
VMW_HYPERVISOR_MAGIC, bp,
eax, ebx, ecx, edx, si, di);
@@ -181,7 +183,7 @@ static unsigned long vmw_port_hb_out(struct rpc_channel *channel,
VMW_PORT(VMW_PORT_CMD_MSG | (MSG_TYPE_SENDPAYLOAD << 16),
word, si, di,
- VMW_HYPERVISOR_PORT | (channel->channel_id << 16),
+ channel->channel_id << 16,
VMW_HYPERVISOR_MAGIC,
eax, ebx, ecx, edx, si, di);
}
@@ -204,7 +206,8 @@ static unsigned long vmw_port_hb_in(struct rpc_channel *channel, char *reply,
{
unsigned long si, di, eax, ebx, ecx, edx;
- if (hb) {
+ /* HB port can't access encrypted memory */
+ if (hb && !mem_encrypt_active()) {
unsigned long bp = channel->cookie_low;
si = channel->cookie_high;
@@ -213,7 +216,7 @@ static unsigned long vmw_port_hb_in(struct rpc_channel *channel, char *reply,
VMW_PORT_HB_IN(
(MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG,
reply_len, si, di,
- VMW_HYPERVISOR_HB_PORT | (channel->channel_id << 16),
+ VMWARE_HYPERVISOR_HB | (channel->channel_id << 16),
VMW_HYPERVISOR_MAGIC, bp,
eax, ebx, ecx, edx, si, di);
@@ -230,7 +233,7 @@ static unsigned long vmw_port_hb_in(struct rpc_channel *channel, char *reply,
VMW_PORT(VMW_PORT_CMD_MSG | (MSG_TYPE_RECVPAYLOAD << 16),
MESSAGE_STATUS_SUCCESS, si, di,
- VMW_HYPERVISOR_PORT | (channel->channel_id << 16),
+ channel->channel_id << 16,
VMW_HYPERVISOR_MAGIC,
eax, ebx, ecx, edx, si, di);
@@ -269,7 +272,7 @@ static int vmw_send_msg(struct rpc_channel *channel, const char *msg)
VMW_PORT(VMW_PORT_CMD_SENDSIZE,
msg_len, si, di,
- VMW_HYPERVISOR_PORT | (channel->channel_id << 16),
+ channel->channel_id << 16,
VMW_HYPERVISOR_MAGIC,
eax, ebx, ecx, edx, si, di);
@@ -327,7 +330,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
VMW_PORT(VMW_PORT_CMD_RECVSIZE,
0, si, di,
- (VMW_HYPERVISOR_PORT | (channel->channel_id << 16)),
+ channel->channel_id << 16,
VMW_HYPERVISOR_MAGIC,
eax, ebx, ecx, edx, si, di);
@@ -353,7 +356,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
!!(HIGH_WORD(ecx) & MESSAGE_STATUS_HB));
if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) == 0) {
kfree(reply);
-
+ reply = NULL;
if ((HIGH_WORD(ebx) & MESSAGE_STATUS_CPT) != 0) {
/* A checkpoint occurred. Retry. */
continue;
@@ -371,13 +374,13 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
VMW_PORT(VMW_PORT_CMD_RECVSTATUS,
MESSAGE_STATUS_SUCCESS, si, di,
- (VMW_HYPERVISOR_PORT | (channel->channel_id << 16)),
+ channel->channel_id << 16,
VMW_HYPERVISOR_MAGIC,
eax, ebx, ecx, edx, si, di);
if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) {
kfree(reply);
-
+ reply = NULL;
if ((HIGH_WORD(ecx) & MESSAGE_STATUS_CPT) != 0) {
/* A checkpoint occurred. Retry. */
continue;
@@ -389,10 +392,8 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
break;
}
- if (retries == RETRIES) {
- kfree(reply);
+ if (!reply)
return -EINVAL;
- }
*msg_len = reply_len;
*msg = reply;
@@ -518,3 +519,84 @@ out_open:
return -EINVAL;
}
+
+
+/**
+ * vmw_msg_ioctl: Sends and receveives a message to/from host from/to user-space
+ *
+ * Sends a message from user-space to host.
+ * Can also receive a result from host and return that to user-space.
+ *
+ * @dev: Identifies the drm device.
+ * @data: Pointer to the ioctl argument.
+ * @file_priv: Identifies the caller.
+ * Return: Zero on success, negative error code on error.
+ */
+
+int vmw_msg_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vmw_msg_arg *arg =
+ (struct drm_vmw_msg_arg *) data;
+ struct rpc_channel channel;
+ char *msg;
+ int length;
+
+ msg = kmalloc(MAX_USER_MSG_LENGTH, GFP_KERNEL);
+ if (!msg) {
+ DRM_ERROR("Cannot allocate memory for log message.\n");
+ return -ENOMEM;
+ }
+
+ length = strncpy_from_user(msg, (void __user *)((unsigned long)arg->send),
+ MAX_USER_MSG_LENGTH);
+ if (length < 0 || length >= MAX_USER_MSG_LENGTH) {
+ DRM_ERROR("Userspace message access failure.\n");
+ kfree(msg);
+ return -EINVAL;
+ }
+
+
+ if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM)) {
+ DRM_ERROR("Failed to open channel.\n");
+ goto out_open;
+ }
+
+ if (vmw_send_msg(&channel, msg)) {
+ DRM_ERROR("Failed to send message to host.\n");
+ goto out_msg;
+ }
+
+ if (!arg->send_only) {
+ char *reply = NULL;
+ size_t reply_len = 0;
+
+ if (vmw_recv_msg(&channel, (void *) &reply, &reply_len)) {
+ DRM_ERROR("Failed to receive message from host.\n");
+ goto out_msg;
+ }
+ if (reply && reply_len > 0) {
+ if (copy_to_user((void __user *)((unsigned long)arg->receive),
+ reply, reply_len)) {
+ DRM_ERROR("Failed to copy message to userspace.\n");
+ kfree(reply);
+ goto out_msg;
+ }
+ arg->receive_len = (__u32)reply_len;
+ }
+ kfree(reply);
+ }
+
+ vmw_close_channel(&channel);
+ kfree(msg);
+
+ return 0;
+
+out_msg:
+ vmw_close_channel(&channel);
+out_open:
+ kfree(msg);
+
+ return -EINVAL;
+}
+
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
index 4907e50fb20a..f685c7071dec 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
@@ -32,6 +32,7 @@
#ifndef _VMWGFX_MSG_H
#define _VMWGFX_MSG_H
+#include <asm/vmware.h>
/**
* Hypervisor-specific bi-directional communication channel. Should never
@@ -44,7 +45,7 @@
* @in_ebx: [IN] Message Len, through EBX
* @in_si: [IN] Input argument through SI, set to 0 if not used
* @in_di: [IN] Input argument through DI, set ot 0 if not used
- * @port_num: [IN] port number + [channel id]
+ * @flags: [IN] hypercall flags + [channel id]
* @magic: [IN] hypervisor magic value
* @eax: [OUT] value of EAX register
* @ebx: [OUT] e.g. status from an HB message status command
@@ -54,10 +55,10 @@
* @di: [OUT]
*/
#define VMW_PORT(cmd, in_ebx, in_si, in_di, \
- port_num, magic, \
+ flags, magic, \
eax, ebx, ecx, edx, si, di) \
({ \
- asm volatile ("inl %%dx, %%eax;" : \
+ asm volatile (VMWARE_HYPERCALL : \
"=a"(eax), \
"=b"(ebx), \
"=c"(ecx), \
@@ -67,7 +68,7 @@
"a"(magic), \
"b"(in_ebx), \
"c"(cmd), \
- "d"(port_num), \
+ "d"(flags), \
"S"(in_si), \
"D"(in_di) : \
"memory"); \
@@ -85,7 +86,7 @@
* @in_ecx: [IN] Message Len, through ECX
* @in_si: [IN] Input argument through SI, set to 0 if not used
* @in_di: [IN] Input argument through DI, set to 0 if not used
- * @port_num: [IN] port number + [channel id]
+ * @flags: [IN] hypercall flags + [channel id]
* @magic: [IN] hypervisor magic value
* @bp: [IN]
* @eax: [OUT] value of EAX register
@@ -98,12 +99,12 @@
#ifdef __x86_64__
#define VMW_PORT_HB_OUT(cmd, in_ecx, in_si, in_di, \
- port_num, magic, bp, \
+ flags, magic, bp, \
eax, ebx, ecx, edx, si, di) \
({ \
asm volatile ("push %%rbp;" \
"mov %12, %%rbp;" \
- "rep outsb;" \
+ VMWARE_HYPERCALL_HB_OUT \
"pop %%rbp;" : \
"=a"(eax), \
"=b"(ebx), \
@@ -114,7 +115,7 @@
"a"(magic), \
"b"(cmd), \
"c"(in_ecx), \
- "d"(port_num), \
+ "d"(flags), \
"S"(in_si), \
"D"(in_di), \
"r"(bp) : \
@@ -123,12 +124,12 @@
#define VMW_PORT_HB_IN(cmd, in_ecx, in_si, in_di, \
- port_num, magic, bp, \
+ flags, magic, bp, \
eax, ebx, ecx, edx, si, di) \
({ \
asm volatile ("push %%rbp;" \
"mov %12, %%rbp;" \
- "rep insb;" \
+ VMWARE_HYPERCALL_HB_IN \
"pop %%rbp" : \
"=a"(eax), \
"=b"(ebx), \
@@ -139,7 +140,7 @@
"a"(magic), \
"b"(cmd), \
"c"(in_ecx), \
- "d"(port_num), \
+ "d"(flags), \
"S"(in_si), \
"D"(in_di), \
"r"(bp) : \
@@ -157,13 +158,13 @@
* just pushed it.
*/
#define VMW_PORT_HB_OUT(cmd, in_ecx, in_si, in_di, \
- port_num, magic, bp, \
+ flags, magic, bp, \
eax, ebx, ecx, edx, si, di) \
({ \
asm volatile ("push %12;" \
"push %%ebp;" \
"mov 0x04(%%esp), %%ebp;" \
- "rep outsb;" \
+ VMWARE_HYPERCALL_HB_OUT \
"pop %%ebp;" \
"add $0x04, %%esp;" : \
"=a"(eax), \
@@ -175,7 +176,7 @@
"a"(magic), \
"b"(cmd), \
"c"(in_ecx), \
- "d"(port_num), \
+ "d"(flags), \
"S"(in_si), \
"D"(in_di), \
"m"(bp) : \
@@ -184,13 +185,13 @@
#define VMW_PORT_HB_IN(cmd, in_ecx, in_si, in_di, \
- port_num, magic, bp, \
+ flags, magic, bp, \
eax, ebx, ecx, edx, si, di) \
({ \
asm volatile ("push %12;" \
"push %%ebp;" \
"mov 0x04(%%esp), %%ebp;" \
- "rep insb;" \
+ VMWARE_HYPERCALL_HB_IN \
"pop %%ebp;" \
"add $0x04, %%esp;" : \
"=a"(eax), \
@@ -202,7 +203,7 @@
"a"(magic), \
"b"(cmd), \
"c"(in_ecx), \
- "d"(port_num), \
+ "d"(flags), \
"S"(in_si), \
"D"(in_di), \
"m"(bp) : \
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index d5ef8cf802de..fdb52f6d29fb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -25,15 +25,13 @@
*
**************************************************************************/
-
-#include <drm/drmP.h>
-#include "vmwgfx_drv.h"
-
#include <drm/ttm/ttm_placement.h>
#include "device_include/svga_overlay.h"
#include "device_include/svga_escape.h"
+#include "vmwgfx_drv.h"
+
#define VMW_MAX_NUM_STREAMS 1
#define VMW_OVERLAY_CAP_MASK (SVGA_FIFO_CAP_VIDEO | SVGA_FIFO_CAP_ESCAPE)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
new file mode 100644
index 000000000000..f07aa857587c
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
@@ -0,0 +1,488 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/**************************************************************************
+ *
+ * Copyright 2019 VMware, Inc., Palo Alto, CA., USA
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+#include "vmwgfx_drv.h"
+
+/*
+ * Different methods for tracking dirty:
+ * VMW_BO_DIRTY_PAGETABLE - Scan the pagetable for hardware dirty bits
+ * VMW_BO_DIRTY_MKWRITE - Write-protect page table entries and record write-
+ * accesses in the VM mkwrite() callback
+ */
+enum vmw_bo_dirty_method {
+ VMW_BO_DIRTY_PAGETABLE,
+ VMW_BO_DIRTY_MKWRITE,
+};
+
+/*
+ * No dirtied pages at scan trigger a transition to the _MKWRITE method,
+ * similarly a certain percentage of dirty pages trigger a transition to
+ * the _PAGETABLE method. How many triggers should we wait for before
+ * changing method?
+ */
+#define VMW_DIRTY_NUM_CHANGE_TRIGGERS 2
+
+/* Percentage to trigger a transition to the _PAGETABLE method */
+#define VMW_DIRTY_PERCENTAGE 10
+
+/**
+ * struct vmw_bo_dirty - Dirty information for buffer objects
+ * @start: First currently dirty bit
+ * @end: Last currently dirty bit + 1
+ * @method: The currently used dirty method
+ * @change_count: Number of consecutive method change triggers
+ * @ref_count: Reference count for this structure
+ * @bitmap_size: The size of the bitmap in bits. Typically equal to the
+ * nuber of pages in the bo.
+ * @size: The accounting size for this struct.
+ * @bitmap: A bitmap where each bit represents a page. A set bit means a
+ * dirty page.
+ */
+struct vmw_bo_dirty {
+ unsigned long start;
+ unsigned long end;
+ enum vmw_bo_dirty_method method;
+ unsigned int change_count;
+ unsigned int ref_count;
+ unsigned long bitmap_size;
+ size_t size;
+ unsigned long bitmap[0];
+};
+
+/**
+ * vmw_bo_dirty_scan_pagetable - Perform a pagetable scan for dirty bits
+ * @vbo: The buffer object to scan
+ *
+ * Scans the pagetable for dirty bits. Clear those bits and modify the
+ * dirty structure with the results. This function may change the
+ * dirty-tracking method.
+ */
+static void vmw_bo_dirty_scan_pagetable(struct vmw_buffer_object *vbo)
+{
+ struct vmw_bo_dirty *dirty = vbo->dirty;
+ pgoff_t offset = drm_vma_node_start(&vbo->base.base.vma_node);
+ struct address_space *mapping = vbo->base.bdev->dev_mapping;
+ pgoff_t num_marked;
+
+ num_marked = clean_record_shared_mapping_range
+ (mapping,
+ offset, dirty->bitmap_size,
+ offset, &dirty->bitmap[0],
+ &dirty->start, &dirty->end);
+ if (num_marked == 0)
+ dirty->change_count++;
+ else
+ dirty->change_count = 0;
+
+ if (dirty->change_count > VMW_DIRTY_NUM_CHANGE_TRIGGERS) {
+ dirty->change_count = 0;
+ dirty->method = VMW_BO_DIRTY_MKWRITE;
+ wp_shared_mapping_range(mapping,
+ offset, dirty->bitmap_size);
+ clean_record_shared_mapping_range(mapping,
+ offset, dirty->bitmap_size,
+ offset, &dirty->bitmap[0],
+ &dirty->start, &dirty->end);
+ }
+}
+
+/**
+ * vmw_bo_dirty_scan_mkwrite - Reset the mkwrite dirty-tracking method
+ * @vbo: The buffer object to scan
+ *
+ * Write-protect pages written to so that consecutive write accesses will
+ * trigger a call to mkwrite.
+ *
+ * This function may change the dirty-tracking method.
+ */
+static void vmw_bo_dirty_scan_mkwrite(struct vmw_buffer_object *vbo)
+{
+ struct vmw_bo_dirty *dirty = vbo->dirty;
+ unsigned long offset = drm_vma_node_start(&vbo->base.base.vma_node);
+ struct address_space *mapping = vbo->base.bdev->dev_mapping;
+ pgoff_t num_marked;
+
+ if (dirty->end <= dirty->start)
+ return;
+
+ num_marked = wp_shared_mapping_range(vbo->base.bdev->dev_mapping,
+ dirty->start + offset,
+ dirty->end - dirty->start);
+
+ if (100UL * num_marked / dirty->bitmap_size >
+ VMW_DIRTY_PERCENTAGE) {
+ dirty->change_count++;
+ } else {
+ dirty->change_count = 0;
+ }
+
+ if (dirty->change_count > VMW_DIRTY_NUM_CHANGE_TRIGGERS) {
+ pgoff_t start = 0;
+ pgoff_t end = dirty->bitmap_size;
+
+ dirty->method = VMW_BO_DIRTY_PAGETABLE;
+ clean_record_shared_mapping_range(mapping, offset, end, offset,
+ &dirty->bitmap[0],
+ &start, &end);
+ bitmap_clear(&dirty->bitmap[0], 0, dirty->bitmap_size);
+ if (dirty->start < dirty->end)
+ bitmap_set(&dirty->bitmap[0], dirty->start,
+ dirty->end - dirty->start);
+ dirty->change_count = 0;
+ }
+}
+
+/**
+ * vmw_bo_dirty_scan - Scan for dirty pages and add them to the dirty
+ * tracking structure
+ * @vbo: The buffer object to scan
+ *
+ * This function may change the dirty tracking method.
+ */
+void vmw_bo_dirty_scan(struct vmw_buffer_object *vbo)
+{
+ struct vmw_bo_dirty *dirty = vbo->dirty;
+
+ if (dirty->method == VMW_BO_DIRTY_PAGETABLE)
+ vmw_bo_dirty_scan_pagetable(vbo);
+ else
+ vmw_bo_dirty_scan_mkwrite(vbo);
+}
+
+/**
+ * vmw_bo_dirty_pre_unmap - write-protect and pick up dirty pages before
+ * an unmap_mapping_range operation.
+ * @vbo: The buffer object,
+ * @start: First page of the range within the buffer object.
+ * @end: Last page of the range within the buffer object + 1.
+ *
+ * If we're using the _PAGETABLE scan method, we may leak dirty pages
+ * when calling unmap_mapping_range(). This function makes sure we pick
+ * up all dirty pages.
+ */
+static void vmw_bo_dirty_pre_unmap(struct vmw_buffer_object *vbo,
+ pgoff_t start, pgoff_t end)
+{
+ struct vmw_bo_dirty *dirty = vbo->dirty;
+ unsigned long offset = drm_vma_node_start(&vbo->base.base.vma_node);
+ struct address_space *mapping = vbo->base.bdev->dev_mapping;
+
+ if (dirty->method != VMW_BO_DIRTY_PAGETABLE || start >= end)
+ return;
+
+ wp_shared_mapping_range(mapping, start + offset, end - start);
+ clean_record_shared_mapping_range(mapping, start + offset,
+ end - start, offset,
+ &dirty->bitmap[0], &dirty->start,
+ &dirty->end);
+}
+
+/**
+ * vmw_bo_dirty_unmap - Clear all ptes pointing to a range within a bo
+ * @vbo: The buffer object,
+ * @start: First page of the range within the buffer object.
+ * @end: Last page of the range within the buffer object + 1.
+ *
+ * This is similar to ttm_bo_unmap_virtual_locked() except it takes a subrange.
+ */
+void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo,
+ pgoff_t start, pgoff_t end)
+{
+ unsigned long offset = drm_vma_node_start(&vbo->base.base.vma_node);
+ struct address_space *mapping = vbo->base.bdev->dev_mapping;
+
+ vmw_bo_dirty_pre_unmap(vbo, start, end);
+ unmap_shared_mapping_range(mapping, (offset + start) << PAGE_SHIFT,
+ (loff_t) (end - start) << PAGE_SHIFT);
+}
+
+/**
+ * vmw_bo_dirty_add - Add a dirty-tracking user to a buffer object
+ * @vbo: The buffer object
+ *
+ * This function registers a dirty-tracking user to a buffer object.
+ * A user can be for example a resource or a vma in a special user-space
+ * mapping.
+ *
+ * Return: Zero on success, -ENOMEM on memory allocation failure.
+ */
+int vmw_bo_dirty_add(struct vmw_buffer_object *vbo)
+{
+ struct vmw_bo_dirty *dirty = vbo->dirty;
+ pgoff_t num_pages = vbo->base.num_pages;
+ size_t size, acc_size;
+ int ret;
+ static struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .no_wait_gpu = false
+ };
+
+ if (dirty) {
+ dirty->ref_count++;
+ return 0;
+ }
+
+ size = sizeof(*dirty) + BITS_TO_LONGS(num_pages) * sizeof(long);
+ acc_size = ttm_round_pot(size);
+ ret = ttm_mem_global_alloc(&ttm_mem_glob, acc_size, &ctx);
+ if (ret) {
+ VMW_DEBUG_USER("Out of graphics memory for buffer object "
+ "dirty tracker.\n");
+ return ret;
+ }
+ dirty = kvzalloc(size, GFP_KERNEL);
+ if (!dirty) {
+ ret = -ENOMEM;
+ goto out_no_dirty;
+ }
+
+ dirty->size = acc_size;
+ dirty->bitmap_size = num_pages;
+ dirty->start = dirty->bitmap_size;
+ dirty->end = 0;
+ dirty->ref_count = 1;
+ if (num_pages < PAGE_SIZE / sizeof(pte_t)) {
+ dirty->method = VMW_BO_DIRTY_PAGETABLE;
+ } else {
+ struct address_space *mapping = vbo->base.bdev->dev_mapping;
+ pgoff_t offset = drm_vma_node_start(&vbo->base.base.vma_node);
+
+ dirty->method = VMW_BO_DIRTY_MKWRITE;
+
+ /* Write-protect and then pick up already dirty bits */
+ wp_shared_mapping_range(mapping, offset, num_pages);
+ clean_record_shared_mapping_range(mapping, offset, num_pages,
+ offset,
+ &dirty->bitmap[0],
+ &dirty->start, &dirty->end);
+ }
+
+ vbo->dirty = dirty;
+
+ return 0;
+
+out_no_dirty:
+ ttm_mem_global_free(&ttm_mem_glob, acc_size);
+ return ret;
+}
+
+/**
+ * vmw_bo_dirty_release - Release a dirty-tracking user from a buffer object
+ * @vbo: The buffer object
+ *
+ * This function releases a dirty-tracking user from a buffer object.
+ * If the reference count reaches zero, then the dirty-tracking object is
+ * freed and the pointer to it cleared.
+ *
+ * Return: Zero on success, -ENOMEM on memory allocation failure.
+ */
+void vmw_bo_dirty_release(struct vmw_buffer_object *vbo)
+{
+ struct vmw_bo_dirty *dirty = vbo->dirty;
+
+ if (dirty && --dirty->ref_count == 0) {
+ size_t acc_size = dirty->size;
+
+ kvfree(dirty);
+ ttm_mem_global_free(&ttm_mem_glob, acc_size);
+ vbo->dirty = NULL;
+ }
+}
+
+/**
+ * vmw_bo_dirty_transfer_to_res - Pick up a resource's dirty region from
+ * its backing mob.
+ * @res: The resource
+ *
+ * This function will pick up all dirty ranges affecting the resource from
+ * it's backup mob, and call vmw_resource_dirty_update() once for each
+ * range. The transferred ranges will be cleared from the backing mob's
+ * dirty tracking.
+ */
+void vmw_bo_dirty_transfer_to_res(struct vmw_resource *res)
+{
+ struct vmw_buffer_object *vbo = res->backup;
+ struct vmw_bo_dirty *dirty = vbo->dirty;
+ pgoff_t start, cur, end;
+ unsigned long res_start = res->backup_offset;
+ unsigned long res_end = res->backup_offset + res->backup_size;
+
+ WARN_ON_ONCE(res_start & ~PAGE_MASK);
+ res_start >>= PAGE_SHIFT;
+ res_end = DIV_ROUND_UP(res_end, PAGE_SIZE);
+
+ if (res_start >= dirty->end || res_end <= dirty->start)
+ return;
+
+ cur = max(res_start, dirty->start);
+ res_end = max(res_end, dirty->end);
+ while (cur < res_end) {
+ unsigned long num;
+
+ start = find_next_bit(&dirty->bitmap[0], res_end, cur);
+ if (start >= res_end)
+ break;
+
+ end = find_next_zero_bit(&dirty->bitmap[0], res_end, start + 1);
+ cur = end + 1;
+ num = end - start;
+ bitmap_clear(&dirty->bitmap[0], start, num);
+ vmw_resource_dirty_update(res, start, end);
+ }
+
+ if (res_start <= dirty->start && res_end > dirty->start)
+ dirty->start = res_end;
+ if (res_start < dirty->end && res_end >= dirty->end)
+ dirty->end = res_start;
+}
+
+/**
+ * vmw_bo_dirty_clear_res - Clear a resource's dirty region from
+ * its backing mob.
+ * @res: The resource
+ *
+ * This function will clear all dirty ranges affecting the resource from
+ * it's backup mob's dirty tracking.
+ */
+void vmw_bo_dirty_clear_res(struct vmw_resource *res)
+{
+ unsigned long res_start = res->backup_offset;
+ unsigned long res_end = res->backup_offset + res->backup_size;
+ struct vmw_buffer_object *vbo = res->backup;
+ struct vmw_bo_dirty *dirty = vbo->dirty;
+
+ res_start >>= PAGE_SHIFT;
+ res_end = DIV_ROUND_UP(res_end, PAGE_SIZE);
+
+ if (res_start >= dirty->end || res_end <= dirty->start)
+ return;
+
+ res_start = max(res_start, dirty->start);
+ res_end = min(res_end, dirty->end);
+ bitmap_clear(&dirty->bitmap[0], res_start, res_end - res_start);
+
+ if (res_start <= dirty->start && res_end > dirty->start)
+ dirty->start = res_end;
+ if (res_start < dirty->end && res_end >= dirty->end)
+ dirty->end = res_start;
+}
+
+vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
+ vma->vm_private_data;
+ vm_fault_t ret;
+ unsigned long page_offset;
+ unsigned int save_flags;
+ struct vmw_buffer_object *vbo =
+ container_of(bo, typeof(*vbo), base);
+
+ /*
+ * mkwrite() doesn't handle the VM_FAULT_RETRY return value correctly.
+ * So make sure the TTM helpers are aware.
+ */
+ save_flags = vmf->flags;
+ vmf->flags &= ~FAULT_FLAG_ALLOW_RETRY;
+ ret = ttm_bo_vm_reserve(bo, vmf);
+ vmf->flags = save_flags;
+ if (ret)
+ return ret;
+
+ page_offset = vmf->pgoff - drm_vma_node_start(&bo->base.vma_node);
+ if (unlikely(page_offset >= bo->num_pages)) {
+ ret = VM_FAULT_SIGBUS;
+ goto out_unlock;
+ }
+
+ if (vbo->dirty && vbo->dirty->method == VMW_BO_DIRTY_MKWRITE &&
+ !test_bit(page_offset, &vbo->dirty->bitmap[0])) {
+ struct vmw_bo_dirty *dirty = vbo->dirty;
+
+ __set_bit(page_offset, &dirty->bitmap[0]);
+ dirty->start = min(dirty->start, page_offset);
+ dirty->end = max(dirty->end, page_offset + 1);
+ }
+
+out_unlock:
+ dma_resv_unlock(bo->base.resv);
+ return ret;
+}
+
+vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
+ vma->vm_private_data;
+ struct vmw_buffer_object *vbo =
+ container_of(bo, struct vmw_buffer_object, base);
+ pgoff_t num_prefault;
+ pgprot_t prot;
+ vm_fault_t ret;
+
+ ret = ttm_bo_vm_reserve(bo, vmf);
+ if (ret)
+ return ret;
+
+ num_prefault = (vma->vm_flags & VM_RAND_READ) ? 1 :
+ TTM_BO_VM_NUM_PREFAULT;
+
+ if (vbo->dirty) {
+ pgoff_t allowed_prefault;
+ unsigned long page_offset;
+
+ page_offset = vmf->pgoff -
+ drm_vma_node_start(&bo->base.vma_node);
+ if (page_offset >= bo->num_pages ||
+ vmw_resources_clean(vbo, page_offset,
+ page_offset + PAGE_SIZE,
+ &allowed_prefault)) {
+ ret = VM_FAULT_SIGBUS;
+ goto out_unlock;
+ }
+
+ num_prefault = min(num_prefault, allowed_prefault);
+ }
+
+ /*
+ * If we don't track dirty using the MKWRITE method, make sure
+ * sure the page protection is write-enabled so we don't get
+ * a lot of unnecessary write faults.
+ */
+ if (vbo->dirty && vbo->dirty->method == VMW_BO_DIRTY_MKWRITE)
+ prot = vma->vm_page_prot;
+ else
+ prot = vm_get_page_prot(vma->vm_flags);
+
+ ret = ttm_bo_vm_fault_reserved(vmf, prot, num_prefault);
+ if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
+ return ret;
+
+out_unlock:
+ dma_resv_unlock(bo->base.resv);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
index e420675e8db3..d9552a1efd13 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
@@ -62,45 +62,12 @@ static void vmw_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
{
}
-static void *vmw_prime_dmabuf_vmap(struct dma_buf *dma_buf)
-{
- return NULL;
-}
-
-static void vmw_prime_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
-{
-}
-
-static void *vmw_prime_dmabuf_kmap(struct dma_buf *dma_buf,
- unsigned long page_num)
-{
- return NULL;
-}
-
-static void vmw_prime_dmabuf_kunmap(struct dma_buf *dma_buf,
- unsigned long page_num, void *addr)
-{
-
-}
-
-static int vmw_prime_dmabuf_mmap(struct dma_buf *dma_buf,
- struct vm_area_struct *vma)
-{
- WARN_ONCE(true, "Attempted use of dmabuf mmap. Bad.\n");
- return -ENOSYS;
-}
-
const struct dma_buf_ops vmw_prime_dmabuf_ops = {
.attach = vmw_prime_map_attach,
.detach = vmw_prime_map_detach,
.map_dma_buf = vmw_prime_map_dma_buf,
.unmap_dma_buf = vmw_prime_unmap_dma_buf,
.release = NULL,
- .map = vmw_prime_dmabuf_kmap,
- .unmap = vmw_prime_dmabuf_kunmap,
- .mmap = vmw_prime_dmabuf_mmap,
- .vmap = vmw_prime_dmabuf_vmap,
- .vunmap = vmw_prime_dmabuf_vunmap,
};
int vmw_prime_fd_to_handle(struct drm_device *dev,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 1d38a8b2f2ec..c8441030637a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -25,15 +25,58 @@
*
**************************************************************************/
-#include "vmwgfx_drv.h"
-#include <drm/vmwgfx_drm.h>
#include <drm/ttm/ttm_placement.h>
-#include <drm/drmP.h>
+
#include "vmwgfx_resource_priv.h"
#include "vmwgfx_binding.h"
+#include "vmwgfx_drv.h"
#define VMW_RES_EVICT_ERR_COUNT 10
+/**
+ * vmw_resource_mob_attach - Mark a resource as attached to its backing mob
+ * @res: The resource
+ */
+void vmw_resource_mob_attach(struct vmw_resource *res)
+{
+ struct vmw_buffer_object *backup = res->backup;
+ struct rb_node **new = &backup->res_tree.rb_node, *parent = NULL;
+
+ dma_resv_assert_held(res->backup->base.base.resv);
+ res->used_prio = (res->res_dirty) ? res->func->dirty_prio :
+ res->func->prio;
+
+ while (*new) {
+ struct vmw_resource *this =
+ container_of(*new, struct vmw_resource, mob_node);
+
+ parent = *new;
+ new = (res->backup_offset < this->backup_offset) ?
+ &((*new)->rb_left) : &((*new)->rb_right);
+ }
+
+ rb_link_node(&res->mob_node, parent, new);
+ rb_insert_color(&res->mob_node, &backup->res_tree);
+
+ vmw_bo_prio_add(backup, res->used_prio);
+}
+
+/**
+ * vmw_resource_mob_detach - Mark a resource as detached from its backing mob
+ * @res: The resource
+ */
+void vmw_resource_mob_detach(struct vmw_resource *res)
+{
+ struct vmw_buffer_object *backup = res->backup;
+
+ dma_resv_assert_held(backup->base.base.resv);
+ if (vmw_resource_mob_attached(res)) {
+ rb_erase(&res->mob_node, &backup->res_tree);
+ RB_CLEAR_NODE(&res->mob_node);
+ vmw_bo_prio_del(backup, res->used_prio);
+ }
+}
+
struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
{
kref_get(&res->kref);
@@ -80,7 +123,7 @@ static void vmw_resource_release(struct kref *kref)
struct ttm_buffer_object *bo = &res->backup->base;
ttm_bo_reserve(bo, false, false, NULL);
- if (!list_empty(&res->mob_head) &&
+ if (vmw_resource_mob_attached(res) &&
res->func->unbind != NULL) {
struct ttm_validate_buffer val_buf;
@@ -89,7 +132,11 @@ static void vmw_resource_release(struct kref *kref)
res->func->unbind(res, false, &val_buf);
}
res->backup_dirty = false;
- list_del_init(&res->mob_head);
+ vmw_resource_mob_detach(res);
+ if (res->dirty)
+ res->func->dirty_free(res);
+ if (res->coherent)
+ vmw_bo_dirty_release(res->backup);
ttm_bo_unreserve(bo);
vmw_bo_unreference(&res->backup);
}
@@ -171,14 +218,17 @@ int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
res->res_free = res_free;
res->dev_priv = dev_priv;
res->func = func;
+ RB_CLEAR_NODE(&res->mob_node);
INIT_LIST_HEAD(&res->lru_head);
- INIT_LIST_HEAD(&res->mob_head);
INIT_LIST_HEAD(&res->binding_head);
res->id = -1;
res->backup = NULL;
res->backup_offset = 0;
res->backup_dirty = false;
res->res_dirty = false;
+ res->coherent = false;
+ res->used_prio = 3;
+ res->dirty = NULL;
if (delay_id)
return 0;
else
@@ -343,7 +393,8 @@ out_no_bo:
* should be retried once resources have been freed up.
*/
static int vmw_resource_do_validate(struct vmw_resource *res,
- struct ttm_validate_buffer *val_buf)
+ struct ttm_validate_buffer *val_buf,
+ bool dirtying)
{
int ret = 0;
const struct vmw_res_func *func = res->func;
@@ -355,14 +406,47 @@ static int vmw_resource_do_validate(struct vmw_resource *res,
}
if (func->bind &&
- ((func->needs_backup && list_empty(&res->mob_head) &&
+ ((func->needs_backup && !vmw_resource_mob_attached(res) &&
val_buf->bo != NULL) ||
(!func->needs_backup && val_buf->bo != NULL))) {
ret = func->bind(res, val_buf);
if (unlikely(ret != 0))
goto out_bind_failed;
if (func->needs_backup)
- list_add_tail(&res->mob_head, &res->backup->res_list);
+ vmw_resource_mob_attach(res);
+ }
+
+ /*
+ * Handle the case where the backup mob is marked coherent but
+ * the resource isn't.
+ */
+ if (func->dirty_alloc && vmw_resource_mob_attached(res) &&
+ !res->coherent) {
+ if (res->backup->dirty && !res->dirty) {
+ ret = func->dirty_alloc(res);
+ if (ret)
+ return ret;
+ } else if (!res->backup->dirty && res->dirty) {
+ func->dirty_free(res);
+ }
+ }
+
+ /*
+ * Transfer the dirty regions to the resource and update
+ * the resource.
+ */
+ if (res->dirty) {
+ if (dirtying && !res->res_dirty) {
+ pgoff_t start = res->backup_offset >> PAGE_SHIFT;
+ pgoff_t end = __KERNEL_DIV_ROUND_UP
+ (res->backup_offset + res->backup_size,
+ PAGE_SIZE);
+
+ vmw_bo_dirty_unmap(res->backup, start, end);
+ }
+
+ vmw_bo_dirty_transfer_to_res(res);
+ return func->dirty_sync(res);
}
return 0;
@@ -402,19 +486,29 @@ void vmw_resource_unreserve(struct vmw_resource *res,
if (switch_backup && new_backup != res->backup) {
if (res->backup) {
- lockdep_assert_held(&res->backup->base.resv->lock.base);
- list_del_init(&res->mob_head);
+ vmw_resource_mob_detach(res);
+ if (res->coherent)
+ vmw_bo_dirty_release(res->backup);
vmw_bo_unreference(&res->backup);
}
if (new_backup) {
res->backup = vmw_bo_reference(new_backup);
- lockdep_assert_held(&new_backup->base.resv->lock.base);
- list_add_tail(&res->mob_head, &new_backup->res_list);
+
+ /*
+ * The validation code should already have added a
+ * dirty tracker here.
+ */
+ WARN_ON(res->coherent && !new_backup->dirty);
+
+ vmw_resource_mob_attach(res);
} else {
res->backup = NULL;
}
+ } else if (switch_backup && res->coherent) {
+ vmw_bo_dirty_release(res->backup);
}
+
if (switch_backup)
res->backup_offset = new_backup_offset;
@@ -464,12 +558,11 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
val_buf->bo = &res->backup->base;
val_buf->num_shared = 0;
list_add_tail(&val_buf->head, &val_list);
- ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL,
- true);
+ ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
if (unlikely(ret != 0))
goto out_no_reserve;
- if (res->func->needs_backup && list_empty(&res->mob_head))
+ if (res->func->needs_backup && !vmw_resource_mob_attached(res))
return 0;
backup_dirty = res->backup_dirty;
@@ -574,11 +667,11 @@ static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
return ret;
if (unlikely(func->unbind != NULL &&
- (!func->needs_backup || !list_empty(&res->mob_head)))) {
+ (!func->needs_backup || vmw_resource_mob_attached(res)))) {
ret = func->unbind(res, res->res_dirty, &val_buf);
if (unlikely(ret != 0))
goto out_no_unbind;
- list_del_init(&res->mob_head);
+ vmw_resource_mob_detach(res);
}
ret = func->destroy(res);
res->backup_dirty = true;
@@ -595,6 +688,7 @@ out_no_unbind:
* to the device.
* @res: The resource to make visible to the device.
* @intr: Perform waits interruptible if possible.
+ * @dirtying: Pending GPU operation will dirty the resource
*
* On succesful return, any backup DMA buffer pointed to by @res->backup will
* be reserved and validated.
@@ -604,7 +698,8 @@ out_no_unbind:
* Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
* on failure.
*/
-int vmw_resource_validate(struct vmw_resource *res, bool intr)
+int vmw_resource_validate(struct vmw_resource *res, bool intr,
+ bool dirtying)
{
int ret;
struct vmw_resource *evict_res;
@@ -621,7 +716,7 @@ int vmw_resource_validate(struct vmw_resource *res, bool intr)
if (res->backup)
val_buf.bo = &res->backup->base;
do {
- ret = vmw_resource_do_validate(res, &val_buf);
+ ret = vmw_resource_do_validate(res, &val_buf, dirtying);
if (likely(ret != -EBUSY))
break;
@@ -660,7 +755,7 @@ int vmw_resource_validate(struct vmw_resource *res, bool intr)
if (unlikely(ret != 0))
goto out_no_validate;
else if (!res->func->needs_backup && res->backup) {
- list_del_init(&res->mob_head);
+ WARN_ON_ONCE(vmw_resource_mob_attached(res));
vmw_bo_unreference(&res->backup);
}
@@ -684,22 +779,23 @@ out_no_validate:
*/
void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
{
-
- struct vmw_resource *res, *next;
struct ttm_validate_buffer val_buf = {
.bo = &vbo->base,
.num_shared = 0
};
- lockdep_assert_held(&vbo->base.resv->lock.base);
- list_for_each_entry_safe(res, next, &vbo->res_list, mob_head) {
- if (!res->func->unbind)
- continue;
+ dma_resv_assert_held(vbo->base.base.resv);
+ while (!RB_EMPTY_ROOT(&vbo->res_tree)) {
+ struct rb_node *node = vbo->res_tree.rb_node;
+ struct vmw_resource *res =
+ container_of(node, struct vmw_resource, mob_node);
+
+ if (!WARN_ON_ONCE(!res->func->unbind))
+ (void) res->func->unbind(res, res->res_dirty, &val_buf);
- (void) res->func->unbind(res, res->res_dirty, &val_buf);
res->backup_dirty = true;
res->res_dirty = false;
- list_del_init(&res->mob_head);
+ vmw_resource_mob_detach(res);
}
(void) ttm_bo_wait(&vbo->base, false, false);
@@ -920,7 +1016,7 @@ int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
/* Do we really need to pin the MOB as well? */
vmw_bo_pin_reserved(vbo, true);
}
- ret = vmw_resource_validate(res, interruptible);
+ ret = vmw_resource_validate(res, interruptible, true);
if (vbo)
ttm_bo_unreserve(&vbo->base);
if (ret)
@@ -980,3 +1076,101 @@ enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
{
return res->func->res_type;
}
+
+/**
+ * vmw_resource_update_dirty - Update a resource's dirty tracker with a
+ * sequential range of touched backing store memory.
+ * @res: The resource.
+ * @start: The first page touched.
+ * @end: The last page touched + 1.
+ */
+void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start,
+ pgoff_t end)
+{
+ if (res->dirty)
+ res->func->dirty_range_add(res, start << PAGE_SHIFT,
+ end << PAGE_SHIFT);
+}
+
+/**
+ * vmw_resources_clean - Clean resources intersecting a mob range
+ * @vbo: The mob buffer object
+ * @start: The mob page offset starting the range
+ * @end: The mob page offset ending the range
+ * @num_prefault: Returns how many pages including the first have been
+ * cleaned and are ok to prefault
+ */
+int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
+ pgoff_t end, pgoff_t *num_prefault)
+{
+ struct rb_node *cur = vbo->res_tree.rb_node;
+ struct vmw_resource *found = NULL;
+ unsigned long res_start = start << PAGE_SHIFT;
+ unsigned long res_end = end << PAGE_SHIFT;
+ unsigned long last_cleaned = 0;
+
+ /*
+ * Find the resource with lowest backup_offset that intersects the
+ * range.
+ */
+ while (cur) {
+ struct vmw_resource *cur_res =
+ container_of(cur, struct vmw_resource, mob_node);
+
+ if (cur_res->backup_offset >= res_end) {
+ cur = cur->rb_left;
+ } else if (cur_res->backup_offset + cur_res->backup_size <=
+ res_start) {
+ cur = cur->rb_right;
+ } else {
+ found = cur_res;
+ cur = cur->rb_left;
+ /* Continue to look for resources with lower offsets */
+ }
+ }
+
+ /*
+ * In order of increasing backup_offset, clean dirty resorces
+ * intersecting the range.
+ */
+ while (found) {
+ if (found->res_dirty) {
+ int ret;
+
+ if (!found->func->clean)
+ return -EINVAL;
+
+ ret = found->func->clean(found);
+ if (ret)
+ return ret;
+
+ found->res_dirty = false;
+ }
+ last_cleaned = found->backup_offset + found->backup_size;
+ cur = rb_next(&found->mob_node);
+ if (!cur)
+ break;
+
+ found = container_of(cur, struct vmw_resource, mob_node);
+ if (found->backup_offset >= res_end)
+ break;
+ }
+
+ /*
+ * Set number of pages allowed prefaulting and fence the buffer object
+ */
+ *num_prefault = 1;
+ if (last_cleaned > res_start) {
+ struct ttm_buffer_object *bo = &vbo->base;
+
+ *num_prefault = __KERNEL_DIV_ROUND_UP(last_cleaned - res_start,
+ PAGE_SIZE);
+ vmw_bo_fence_single(bo, NULL);
+ if (bo->moving)
+ dma_fence_put(bo->moving);
+ bo->moving = dma_fence_get
+ (dma_resv_get_excl(bo->base.resv));
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
index 7e19eba0b0b8..3b7438b2d289 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
@@ -71,6 +71,13 @@ struct vmw_user_resource_conv {
* @commit_notify: If the resource is a command buffer managed resource,
* callback to notify that a define or remove command
* has been committed to the device.
+ * @dirty_alloc: Allocate a dirty tracker. NULL if dirty-tracking is not
+ * supported.
+ * @dirty_free: Free the dirty tracker.
+ * @dirty_sync: Upload the dirty mob contents to the resource.
+ * @dirty_add_range: Add a sequential dirty range to the resource
+ * dirty tracker.
+ * @clean: Clean the resource.
*/
struct vmw_res_func {
enum vmw_res_type res_type;
@@ -78,6 +85,8 @@ struct vmw_res_func {
const char *type_name;
struct ttm_placement *backup_placement;
bool may_evict;
+ u32 prio;
+ u32 dirty_prio;
int (*create) (struct vmw_resource *res);
int (*destroy) (struct vmw_resource *res);
@@ -88,6 +97,12 @@ struct vmw_res_func {
struct ttm_validate_buffer *val_buf);
void (*commit_notify)(struct vmw_resource *res,
enum vmw_cmdbuf_res_state state);
+ int (*dirty_alloc)(struct vmw_resource *res);
+ void (*dirty_free)(struct vmw_resource *res);
+ int (*dirty_sync)(struct vmw_resource *res);
+ void (*dirty_range_add)(struct vmw_resource *res, size_t start,
+ size_t end);
+ int (*clean)(struct vmw_resource *res);
};
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 9a2a3836d89a..e5a283263211 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -25,12 +25,14 @@
*
**************************************************************************/
-#include "vmwgfx_kms.h"
-#include <drm/drm_plane_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_vblank.h>
+#include "vmwgfx_kms.h"
#define vmw_crtc_to_sou(x) \
container_of(x, struct vmw_screen_object_unit, base.crtc)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index d310d21f0d54..e139fdfd1635 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -95,6 +95,8 @@ static const struct vmw_res_func vmw_gb_shader_func = {
.res_type = vmw_res_shader,
.needs_backup = true,
.may_evict = true,
+ .prio = 3,
+ .dirty_prio = 3,
.type_name = "guest backed shaders",
.backup_placement = &vmw_mob_placement,
.create = vmw_gb_shader_create,
@@ -106,7 +108,9 @@ static const struct vmw_res_func vmw_gb_shader_func = {
static const struct vmw_res_func vmw_dx_shader_func = {
.res_type = vmw_res_shader,
.needs_backup = true,
- .may_evict = false,
+ .may_evict = true,
+ .prio = 3,
+ .dirty_prio = 3,
.type_name = "dx shaders",
.backup_placement = &vmw_mob_placement,
.create = vmw_dx_shader_create,
@@ -423,7 +427,7 @@ static int vmw_dx_shader_create(struct vmw_resource *res)
WARN_ON_ONCE(!shader->committed);
- if (!list_empty(&res->mob_head)) {
+ if (vmw_resource_mob_attached(res)) {
mutex_lock(&dev_priv->binding_mutex);
ret = vmw_dx_shader_unscrub(res);
mutex_unlock(&dev_priv->binding_mutex);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index f803bb5e782b..41a96fb49835 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -25,12 +25,15 @@
*
******************************************************************************/
-#include "vmwgfx_kms.h"
-#include "device_include/svga3d_surfacedefs.h"
-#include <drm/drm_plane_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "vmwgfx_kms.h"
+#include "device_include/svga3d_surfacedefs.h"
#define vmw_crtc_to_stdu(x) \
container_of(x, struct vmw_screen_target_display_unit, base.crtc)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 219471903bc1..3ce630aa4fde 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -68,6 +68,20 @@ struct vmw_surface_offset {
uint32_t bo_offset;
};
+/**
+ * vmw_surface_dirty - Surface dirty-tracker
+ * @cache: Cached layout information of the surface.
+ * @size: Accounting size for the struct vmw_surface_dirty.
+ * @num_subres: Number of subresources.
+ * @boxes: Array of SVGA3dBoxes indicating dirty regions. One per subresource.
+ */
+struct vmw_surface_dirty {
+ struct svga3dsurface_cache cache;
+ size_t size;
+ u32 num_subres;
+ SVGA3dBox boxes[0];
+};
+
static void vmw_user_surface_free(struct vmw_resource *res);
static struct vmw_resource *
vmw_user_surface_base_to_res(struct ttm_base_object *base);
@@ -96,6 +110,13 @@ vmw_gb_surface_reference_internal(struct drm_device *dev,
struct drm_vmw_gb_surface_ref_ext_rep *rep,
struct drm_file *file_priv);
+static void vmw_surface_dirty_free(struct vmw_resource *res);
+static int vmw_surface_dirty_alloc(struct vmw_resource *res);
+static int vmw_surface_dirty_sync(struct vmw_resource *res);
+static void vmw_surface_dirty_range_add(struct vmw_resource *res, size_t start,
+ size_t end);
+static int vmw_surface_clean(struct vmw_resource *res);
+
static const struct vmw_user_resource_conv user_surface_conv = {
.object_type = VMW_RES_SURFACE,
.base_obj_to_res = vmw_user_surface_base_to_res,
@@ -112,6 +133,8 @@ static const struct vmw_res_func vmw_legacy_surface_func = {
.res_type = vmw_res_surface,
.needs_backup = false,
.may_evict = true,
+ .prio = 1,
+ .dirty_prio = 1,
.type_name = "legacy surfaces",
.backup_placement = &vmw_srf_placement,
.create = &vmw_legacy_srf_create,
@@ -124,12 +147,19 @@ static const struct vmw_res_func vmw_gb_surface_func = {
.res_type = vmw_res_surface,
.needs_backup = true,
.may_evict = true,
+ .prio = 1,
+ .dirty_prio = 2,
.type_name = "guest backed surfaces",
.backup_placement = &vmw_mob_placement,
.create = vmw_gb_surface_create,
.destroy = vmw_gb_surface_destroy,
.bind = vmw_gb_surface_bind,
- .unbind = vmw_gb_surface_unbind
+ .unbind = vmw_gb_surface_unbind,
+ .dirty_alloc = vmw_surface_dirty_alloc,
+ .dirty_free = vmw_surface_dirty_free,
+ .dirty_sync = vmw_surface_dirty_sync,
+ .dirty_range_add = vmw_surface_dirty_range_add,
+ .clean = vmw_surface_clean,
};
/**
@@ -332,7 +362,6 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res)
{
struct vmw_private *dev_priv = res->dev_priv;
- struct vmw_surface *srf;
void *cmd;
if (res->func->destroy == vmw_gb_surface_destroy) {
@@ -356,7 +385,6 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res)
*/
mutex_lock(&dev_priv->cmdbuf_mutex);
- srf = vmw_res_to_srf(res);
dev_priv->used_memory_size -= res->backup_size;
mutex_unlock(&dev_priv->cmdbuf_mutex);
}
@@ -637,6 +665,7 @@ static void vmw_user_surface_free(struct vmw_resource *res)
struct vmw_private *dev_priv = srf->res.dev_priv;
uint32_t size = user_srf->size;
+ WARN_ON_ONCE(res->dirty);
if (user_srf->master)
drm_master_put(&user_srf->master);
kfree(srf->offsets);
@@ -905,22 +934,12 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
uint32_t handle;
struct ttm_base_object *base;
int ret;
- bool require_exist = false;
if (handle_type == DRM_VMW_HANDLE_PRIME) {
ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle);
if (unlikely(ret != 0))
return ret;
} else {
- if (unlikely(drm_is_render_client(file_priv)))
- require_exist = true;
-
- if (READ_ONCE(vmw_fpriv(file_priv)->locked_master)) {
- DRM_ERROR("Locked master refused legacy "
- "surface reference.\n");
- return -EACCES;
- }
-
handle = u_handle;
}
@@ -937,9 +956,18 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
}
if (handle_type != DRM_VMW_HANDLE_PRIME) {
+ bool require_exist = false;
+
user_srf = container_of(base, struct vmw_user_surface,
prime.base);
+ /* Error out if we are unauthenticated primary */
+ if (drm_is_primary_client(file_priv) &&
+ !file_priv->authenticated) {
+ ret = -EACCES;
+ goto out_bad_resource;
+ }
+
/*
* Make sure the surface creator has the same
* authenticating master, or is already registered with us.
@@ -948,6 +976,9 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
user_srf->master != file_priv->master)
require_exist = true;
+ if (unlikely(drm_is_render_client(file_priv)))
+ require_exist = true;
+
ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL,
require_exist);
if (unlikely(ret != 0)) {
@@ -1170,10 +1201,16 @@ static int vmw_gb_surface_bind(struct vmw_resource *res,
cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_SURFACE;
cmd2->header.size = sizeof(cmd2->body);
cmd2->body.sid = res->id;
- res->backup_dirty = false;
}
vmw_fifo_commit(dev_priv, submit_size);
+ if (res->backup->dirty && res->backup_dirty) {
+ /* We've just made a full upload. Cear dirty regions. */
+ vmw_bo_dirty_clear_res(res);
+ }
+
+ res->backup_dirty = false;
+
return 0;
}
@@ -1638,7 +1675,8 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
}
}
} else if (req->base.drm_surface_flags &
- drm_vmw_surface_flag_create_buffer)
+ (drm_vmw_surface_flag_create_buffer |
+ drm_vmw_surface_flag_coherent))
ret = vmw_user_bo_alloc(dev_priv, tfile,
res->backup_size,
req->base.drm_surface_flags &
@@ -1652,6 +1690,26 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
goto out_unlock;
}
+ if (req->base.drm_surface_flags & drm_vmw_surface_flag_coherent) {
+ struct vmw_buffer_object *backup = res->backup;
+
+ ttm_bo_reserve(&backup->base, false, false, NULL);
+ if (!res->func->dirty_alloc)
+ ret = -EINVAL;
+ if (!ret)
+ ret = vmw_bo_dirty_add(backup);
+ if (!ret) {
+ res->coherent = true;
+ ret = res->func->dirty_alloc(res);
+ }
+ ttm_bo_unreserve(&backup->base);
+ if (ret) {
+ vmw_resource_unreference(&res);
+ goto out_unlock;
+ }
+
+ }
+
tmp = vmw_resource_reference(res);
ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
req->base.drm_surface_flags &
@@ -1669,7 +1727,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
rep->backup_size = res->backup_size;
if (res->backup) {
rep->buffer_map_handle =
- drm_vma_node_offset_addr(&res->backup->base.vma_node);
+ drm_vma_node_offset_addr(&res->backup->base.base.vma_node);
rep->buffer_size = res->backup->base.num_pages * PAGE_SIZE;
rep->buffer_handle = backup_handle;
} else {
@@ -1745,7 +1803,7 @@ vmw_gb_surface_reference_internal(struct drm_device *dev,
rep->crep.backup_size = srf->res.backup_size;
rep->crep.buffer_handle = backup_handle;
rep->crep.buffer_map_handle =
- drm_vma_node_offset_addr(&srf->res.backup->base.vma_node);
+ drm_vma_node_offset_addr(&srf->res.backup->base.base.vma_node);
rep->crep.buffer_size = srf->res.backup->base.num_pages * PAGE_SIZE;
rep->creq.version = drm_vmw_gb_surface_v1;
@@ -1760,3 +1818,338 @@ out_bad_resource:
return ret;
}
+
+/**
+ * vmw_subres_dirty_add - Add a dirty region to a subresource
+ * @dirty: The surfaces's dirty tracker.
+ * @loc_start: The location corresponding to the start of the region.
+ * @loc_end: The location corresponding to the end of the region.
+ *
+ * As we are assuming that @loc_start and @loc_end represent a sequential
+ * range of backing store memory, if the region spans multiple lines then
+ * regardless of the x coordinate, the full lines are dirtied.
+ * Correspondingly if the region spans multiple z slices, then full rather
+ * than partial z slices are dirtied.
+ */
+static void vmw_subres_dirty_add(struct vmw_surface_dirty *dirty,
+ const struct svga3dsurface_loc *loc_start,
+ const struct svga3dsurface_loc *loc_end)
+{
+ const struct svga3dsurface_cache *cache = &dirty->cache;
+ SVGA3dBox *box = &dirty->boxes[loc_start->sub_resource];
+ u32 mip = loc_start->sub_resource % cache->num_mip_levels;
+ const struct drm_vmw_size *size = &cache->mip[mip].size;
+ u32 box_c2 = box->z + box->d;
+
+ if (WARN_ON(loc_start->sub_resource >= dirty->num_subres))
+ return;
+
+ if (box->d == 0 || box->z > loc_start->z)
+ box->z = loc_start->z;
+ if (box_c2 < loc_end->z)
+ box->d = loc_end->z - box->z;
+
+ if (loc_start->z + 1 == loc_end->z) {
+ box_c2 = box->y + box->h;
+ if (box->h == 0 || box->y > loc_start->y)
+ box->y = loc_start->y;
+ if (box_c2 < loc_end->y)
+ box->h = loc_end->y - box->y;
+
+ if (loc_start->y + 1 == loc_end->y) {
+ box_c2 = box->x + box->w;
+ if (box->w == 0 || box->x > loc_start->x)
+ box->x = loc_start->x;
+ if (box_c2 < loc_end->x)
+ box->w = loc_end->x - box->x;
+ } else {
+ box->x = 0;
+ box->w = size->width;
+ }
+ } else {
+ box->y = 0;
+ box->h = size->height;
+ box->x = 0;
+ box->w = size->width;
+ }
+}
+
+/**
+ * vmw_subres_dirty_full - Mark a full subresource as dirty
+ * @dirty: The surface's dirty tracker.
+ * @subres: The subresource
+ */
+static void vmw_subres_dirty_full(struct vmw_surface_dirty *dirty, u32 subres)
+{
+ const struct svga3dsurface_cache *cache = &dirty->cache;
+ u32 mip = subres % cache->num_mip_levels;
+ const struct drm_vmw_size *size = &cache->mip[mip].size;
+ SVGA3dBox *box = &dirty->boxes[subres];
+
+ box->x = 0;
+ box->y = 0;
+ box->z = 0;
+ box->w = size->width;
+ box->h = size->height;
+ box->d = size->depth;
+}
+
+/*
+ * vmw_surface_tex_dirty_add_range - The dirty_add_range callback for texture
+ * surfaces.
+ */
+static void vmw_surface_tex_dirty_range_add(struct vmw_resource *res,
+ size_t start, size_t end)
+{
+ struct vmw_surface_dirty *dirty =
+ (struct vmw_surface_dirty *) res->dirty;
+ size_t backup_end = res->backup_offset + res->backup_size;
+ struct svga3dsurface_loc loc1, loc2;
+ const struct svga3dsurface_cache *cache;
+
+ start = max_t(size_t, start, res->backup_offset) - res->backup_offset;
+ end = min(end, backup_end) - res->backup_offset;
+ cache = &dirty->cache;
+ svga3dsurface_get_loc(cache, &loc1, start);
+ svga3dsurface_get_loc(cache, &loc2, end - 1);
+ svga3dsurface_inc_loc(cache, &loc2);
+
+ if (loc1.sub_resource + 1 == loc2.sub_resource) {
+ /* Dirty range covers a single sub-resource */
+ vmw_subres_dirty_add(dirty, &loc1, &loc2);
+ } else {
+ /* Dirty range covers multiple sub-resources */
+ struct svga3dsurface_loc loc_min, loc_max;
+ u32 sub_res;
+
+ svga3dsurface_max_loc(cache, loc1.sub_resource, &loc_max);
+ vmw_subres_dirty_add(dirty, &loc1, &loc_max);
+ svga3dsurface_min_loc(cache, loc2.sub_resource - 1, &loc_min);
+ vmw_subres_dirty_add(dirty, &loc_min, &loc2);
+ for (sub_res = loc1.sub_resource + 1;
+ sub_res < loc2.sub_resource - 1; ++sub_res)
+ vmw_subres_dirty_full(dirty, sub_res);
+ }
+}
+
+/*
+ * vmw_surface_tex_dirty_add_range - The dirty_add_range callback for buffer
+ * surfaces.
+ */
+static void vmw_surface_buf_dirty_range_add(struct vmw_resource *res,
+ size_t start, size_t end)
+{
+ struct vmw_surface_dirty *dirty =
+ (struct vmw_surface_dirty *) res->dirty;
+ const struct svga3dsurface_cache *cache = &dirty->cache;
+ size_t backup_end = res->backup_offset + cache->mip_chain_bytes;
+ SVGA3dBox *box = &dirty->boxes[0];
+ u32 box_c2;
+
+ box->h = box->d = 1;
+ start = max_t(size_t, start, res->backup_offset) - res->backup_offset;
+ end = min(end, backup_end) - res->backup_offset;
+ box_c2 = box->x + box->w;
+ if (box->w == 0 || box->x > start)
+ box->x = start;
+ if (box_c2 < end)
+ box->w = end - box->x;
+}
+
+/*
+ * vmw_surface_tex_dirty_add_range - The dirty_add_range callback for surfaces
+ */
+static void vmw_surface_dirty_range_add(struct vmw_resource *res, size_t start,
+ size_t end)
+{
+ struct vmw_surface *srf = vmw_res_to_srf(res);
+
+ if (WARN_ON(end <= res->backup_offset ||
+ start >= res->backup_offset + res->backup_size))
+ return;
+
+ if (srf->format == SVGA3D_BUFFER)
+ vmw_surface_buf_dirty_range_add(res, start, end);
+ else
+ vmw_surface_tex_dirty_range_add(res, start, end);
+}
+
+/*
+ * vmw_surface_dirty_sync - The surface's dirty_sync callback.
+ */
+static int vmw_surface_dirty_sync(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ bool has_dx = 0;
+ u32 i, num_dirty;
+ struct vmw_surface_dirty *dirty =
+ (struct vmw_surface_dirty *) res->dirty;
+ size_t alloc_size;
+ const struct svga3dsurface_cache *cache = &dirty->cache;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXUpdateSubResource body;
+ } *cmd1;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdUpdateGBImage body;
+ } *cmd2;
+ void *cmd;
+
+ num_dirty = 0;
+ for (i = 0; i < dirty->num_subres; ++i) {
+ const SVGA3dBox *box = &dirty->boxes[i];
+
+ if (box->d)
+ num_dirty++;
+ }
+
+ if (!num_dirty)
+ goto out;
+
+ alloc_size = num_dirty * ((has_dx) ? sizeof(*cmd1) : sizeof(*cmd2));
+ cmd = VMW_FIFO_RESERVE(dev_priv, alloc_size);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd1 = cmd;
+ cmd2 = cmd;
+
+ for (i = 0; i < dirty->num_subres; ++i) {
+ const SVGA3dBox *box = &dirty->boxes[i];
+
+ if (!box->d)
+ continue;
+
+ /*
+ * DX_UPDATE_SUBRESOURCE is aware of array surfaces.
+ * UPDATE_GB_IMAGE is not.
+ */
+ if (has_dx) {
+ cmd1->header.id = SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE;
+ cmd1->header.size = sizeof(cmd1->body);
+ cmd1->body.sid = res->id;
+ cmd1->body.subResource = i;
+ cmd1->body.box = *box;
+ cmd1++;
+ } else {
+ cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
+ cmd2->header.size = sizeof(cmd2->body);
+ cmd2->body.image.sid = res->id;
+ cmd2->body.image.face = i / cache->num_mip_levels;
+ cmd2->body.image.mipmap = i -
+ (cache->num_mip_levels * cmd2->body.image.face);
+ cmd2->body.box = *box;
+ cmd2++;
+ }
+
+ }
+ vmw_fifo_commit(dev_priv, alloc_size);
+ out:
+ memset(&dirty->boxes[0], 0, sizeof(dirty->boxes[0]) *
+ dirty->num_subres);
+
+ return 0;
+}
+
+/*
+ * vmw_surface_dirty_alloc - The surface's dirty_alloc callback.
+ */
+static int vmw_surface_dirty_alloc(struct vmw_resource *res)
+{
+ struct vmw_surface *srf = vmw_res_to_srf(res);
+ struct vmw_surface_dirty *dirty;
+ u32 num_layers = 1;
+ u32 num_mip;
+ u32 num_subres;
+ u32 num_samples;
+ size_t dirty_size, acc_size;
+ static struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .no_wait_gpu = false
+ };
+ int ret;
+
+ if (srf->array_size)
+ num_layers = srf->array_size;
+ else if (srf->flags & SVGA3D_SURFACE_CUBEMAP)
+ num_layers *= SVGA3D_MAX_SURFACE_FACES;
+
+ num_mip = srf->mip_levels[0];
+ if (!num_mip)
+ num_mip = 1;
+
+ num_subres = num_layers * num_mip;
+ dirty_size = sizeof(*dirty) + num_subres * sizeof(dirty->boxes[0]);
+ acc_size = ttm_round_pot(dirty_size);
+ ret = ttm_mem_global_alloc(vmw_mem_glob(res->dev_priv),
+ acc_size, &ctx);
+ if (ret) {
+ VMW_DEBUG_USER("Out of graphics memory for surface "
+ "dirty tracker.\n");
+ return ret;
+ }
+
+ dirty = kvzalloc(dirty_size, GFP_KERNEL);
+ if (!dirty) {
+ ret = -ENOMEM;
+ goto out_no_dirty;
+ }
+
+ num_samples = max_t(u32, 1, srf->multisample_count);
+ ret = svga3dsurface_setup_cache(&srf->base_size, srf->format, num_mip,
+ num_layers, num_samples, &dirty->cache);
+ if (ret)
+ goto out_no_cache;
+
+ dirty->num_subres = num_subres;
+ dirty->size = acc_size;
+ res->dirty = (struct vmw_resource_dirty *) dirty;
+
+ return 0;
+
+out_no_cache:
+ kvfree(dirty);
+out_no_dirty:
+ ttm_mem_global_free(vmw_mem_glob(res->dev_priv), acc_size);
+ return ret;
+}
+
+/*
+ * vmw_surface_dirty_free - The surface's dirty_free callback
+ */
+static void vmw_surface_dirty_free(struct vmw_resource *res)
+{
+ struct vmw_surface_dirty *dirty =
+ (struct vmw_surface_dirty *) res->dirty;
+ size_t acc_size = dirty->size;
+
+ kvfree(dirty);
+ ttm_mem_global_free(vmw_mem_glob(res->dev_priv), acc_size);
+ res->dirty = NULL;
+}
+
+/*
+ * vmw_surface_clean - The surface's clean callback
+ */
+static int vmw_surface_clean(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ size_t alloc_size;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdReadbackGBSurface body;
+ } *cmd;
+
+ alloc_size = sizeof(*cmd);
+ cmd = VMW_FIFO_RESERVE(dev_priv, alloc_size);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.sid = res->id;
+ vmw_fifo_commit(dev_priv, alloc_size);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
index 8bafa6eac5a8..aa7e50f63b94 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
@@ -25,15 +25,31 @@
*
**************************************************************************/
-#include <drm/drmP.h>
#include "vmwgfx_drv.h"
int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
{
+ static const struct vm_operations_struct vmw_vm_ops = {
+ .pfn_mkwrite = vmw_bo_vm_mkwrite,
+ .page_mkwrite = vmw_bo_vm_mkwrite,
+ .fault = vmw_bo_vm_fault,
+ .open = ttm_bo_vm_open,
+ .close = ttm_bo_vm_close
+ };
struct drm_file *file_priv = filp->private_data;
struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev);
+ int ret = ttm_bo_mmap(filp, vma, &dev_priv->bdev);
- return ttm_bo_mmap(filp, vma, &dev_priv->bdev);
+ if (ret)
+ return ret;
+
+ vma->vm_ops = &vmw_vm_ops;
+
+ /* Use VM_PFNMAP rather than VM_MIXEDMAP if not a COW mapping */
+ if ((vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) != VM_MAYWRITE)
+ vma->vm_flags = (vma->vm_flags & ~VM_MIXEDMAP) | VM_PFNMAP;
+
+ return 0;
}
/* struct vmw_validation_mem callback */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
index f611b2290a1b..e69bc373ae2e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
@@ -33,6 +33,8 @@
* struct vmw_validation_bo_node - Buffer object validation metadata.
* @base: Metadata used for TTM reservation- and validation.
* @hash: A hash entry used for the duplicate detection hash table.
+ * @coherent_count: If switching backup buffers, number of new coherent
+ * resources that will have this buffer as a backup buffer.
* @as_mob: Validate as mob.
* @cpu_blit: Validate for cpu blit access.
*
@@ -42,6 +44,7 @@
struct vmw_validation_bo_node {
struct ttm_validate_buffer base;
struct drm_hash_item hash;
+ unsigned int coherent_count;
u32 as_mob : 1;
u32 cpu_blit : 1;
};
@@ -459,6 +462,19 @@ int vmw_validation_res_reserve(struct vmw_validation_context *ctx,
if (ret)
goto out_unreserve;
}
+
+ if (val->switching_backup && val->new_backup &&
+ res->coherent) {
+ struct vmw_validation_bo_node *bo_node =
+ vmw_validation_find_bo_dup(ctx,
+ val->new_backup);
+
+ if (WARN_ON(!bo_node)) {
+ ret = -EINVAL;
+ goto out_unreserve;
+ }
+ bo_node->coherent_count++;
+ }
}
return 0;
@@ -521,6 +537,9 @@ int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo,
};
int ret;
+ if (atomic_read(&vbo->cpu_writers))
+ return -EBUSY;
+
if (vbo->pin_count > 0)
return 0;
@@ -562,6 +581,9 @@ int vmw_validation_bo_validate(struct vmw_validation_context *ctx, bool intr)
int ret;
list_for_each_entry(entry, &ctx->bo_list, base.head) {
+ struct vmw_buffer_object *vbo =
+ container_of(entry->base.bo, typeof(*vbo), base);
+
if (entry->cpu_blit) {
struct ttm_operation_ctx ctx = {
.interruptible = intr,
@@ -576,6 +598,27 @@ int vmw_validation_bo_validate(struct vmw_validation_context *ctx, bool intr)
}
if (ret)
return ret;
+
+ /*
+ * Rather than having the resource code allocating the bo
+ * dirty tracker in resource_unreserve() where we can't fail,
+ * Do it here when validating the buffer object.
+ */
+ if (entry->coherent_count) {
+ unsigned int coherent_count = entry->coherent_count;
+
+ while (coherent_count) {
+ ret = vmw_bo_dirty_add(vbo);
+ if (ret)
+ return ret;
+
+ coherent_count--;
+ }
+ entry->coherent_count -= coherent_count;
+ }
+
+ if (vbo->dirty)
+ vmw_bo_dirty_scan(vbo);
}
return 0;
}
@@ -601,7 +644,8 @@ int vmw_validation_res_validate(struct vmw_validation_context *ctx, bool intr)
struct vmw_resource *res = val->res;
struct vmw_buffer_object *backup = res->backup;
- ret = vmw_resource_validate(res, intr);
+ ret = vmw_resource_validate(res, intr, val->dirty_set &&
+ val->dirty);
if (ret) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Failed to validate resource.\n");
@@ -828,3 +872,34 @@ int vmw_validation_preload_res(struct vmw_validation_context *ctx,
ctx->mem_size_left += size;
return 0;
}
+
+/**
+ * vmw_validation_bo_backoff - Unreserve buffer objects registered with a
+ * validation context
+ * @ctx: The validation context
+ *
+ * This function unreserves the buffer objects previously reserved using
+ * vmw_validation_bo_reserve. It's typically used as part of an error path
+ */
+void vmw_validation_bo_backoff(struct vmw_validation_context *ctx)
+{
+ struct vmw_validation_bo_node *entry;
+
+ /*
+ * Switching coherent resource backup buffers failed.
+ * Release corresponding buffer object dirty trackers.
+ */
+ list_for_each_entry(entry, &ctx->bo_list, base.head) {
+ if (entry->coherent_count) {
+ unsigned int coherent_count = entry->coherent_count;
+ struct vmw_buffer_object *vbo =
+ container_of(entry->base.bo, typeof(*vbo),
+ base);
+
+ while (coherent_count--)
+ vmw_bo_dirty_release(vbo);
+ }
+ }
+
+ ttm_eu_backoff_reservation(&ctx->ticket, &ctx->bo_list);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
index 1d2322ad6fd5..739906d1b3eb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
@@ -28,9 +28,10 @@
#ifndef _VMWGFX_VALIDATION_H_
#define _VMWGFX_VALIDATION_H_
-#include <drm/drm_hashtab.h>
#include <linux/list.h>
#include <linux/ww_mutex.h>
+
+#include <drm/drm_hashtab.h>
#include <drm/ttm/ttm_execbuf_util.h>
#define VMW_RES_DIRTY_NONE 0
@@ -169,21 +170,7 @@ vmw_validation_bo_reserve(struct vmw_validation_context *ctx,
bool intr)
{
return ttm_eu_reserve_buffers(&ctx->ticket, &ctx->bo_list, intr,
- NULL, true);
-}
-
-/**
- * vmw_validation_bo_backoff - Unreserve buffer objects registered with a
- * validation context
- * @ctx: The validation context
- *
- * This function unreserves the buffer objects previously reserved using
- * vmw_validation_bo_reserve. It's typically used as part of an error path
- */
-static inline void
-vmw_validation_bo_backoff(struct vmw_validation_context *ctx)
-{
- ttm_eu_backoff_reservation(&ctx->ticket, &ctx->bo_list);
+ NULL);
}
/**
@@ -268,4 +255,6 @@ int vmw_validation_preload_res(struct vmw_validation_context *ctx,
unsigned int size);
void vmw_validation_res_set_dirty(struct vmw_validation_context *ctx,
void *val_private, u32 dirty);
+void vmw_validation_bo_backoff(struct vmw_validation_context *ctx);
+
#endif
diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c
index 84aa4d61dc42..4be49c1aef51 100644
--- a/drivers/gpu/drm/xen/xen_drm_front.c
+++ b/drivers/gpu/drm/xen/xen_drm_front.c
@@ -8,13 +8,18 @@
* Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
*/
-#include <drm/drmP.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_ioctl.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_file.h>
#include <drm/drm_gem.h>
-#include <linux/of_device.h>
-
#include <xen/platform_pci.h>
#include <xen/xen.h>
#include <xen/xenbus.h>
@@ -485,15 +490,12 @@ static const struct vm_operations_struct xen_drm_drv_vm_ops = {
};
static struct drm_driver xen_drm_driver = {
- .driver_features = DRIVER_GEM | DRIVER_MODESET |
- DRIVER_PRIME | DRIVER_ATOMIC,
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.release = xen_drm_drv_release,
.gem_vm_ops = &xen_drm_drv_vm_ops,
.gem_free_object_unlocked = xen_drm_drv_free_object_unlocked,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_import = drm_gem_prime_import,
- .gem_prime_export = drm_gem_prime_export,
.gem_prime_import_sg_table = xen_drm_front_gem_import_sg_table,
.gem_prime_get_sg_table = xen_drm_front_gem_get_sg_table,
.gem_prime_vmap = xen_drm_front_gem_prime_vmap,
@@ -716,17 +718,9 @@ static int xen_drv_probe(struct xenbus_device *xb_dev,
struct device *dev = &xb_dev->dev;
int ret;
- /*
- * The device is not spawn from a device tree, so arch_setup_dma_ops
- * is not called, thus leaving the device with dummy DMA ops.
- * This makes the device return error on PRIME buffer import, which
- * is not correct: to fix this call of_dma_configure() with a NULL
- * node to set default DMA ops.
- */
- dev->coherent_dma_mask = DMA_BIT_MASK(32);
- ret = of_dma_configure(dev, NULL, true);
+ ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (ret < 0) {
- DRM_ERROR("Cannot setup DMA ops, ret %d", ret);
+ DRM_ERROR("Cannot setup DMA mask, ret %d", ret);
return ret;
}
diff --git a/drivers/gpu/drm/xen/xen_drm_front.h b/drivers/gpu/drm/xen/xen_drm_front.h
index 5693b4a4b02b..f92c258350ca 100644
--- a/drivers/gpu/drm/xen/xen_drm_front.h
+++ b/drivers/gpu/drm/xen/xen_drm_front.h
@@ -11,13 +11,18 @@
#ifndef __XEN_DRM_FRONT_H_
#define __XEN_DRM_FRONT_H_
-#include <drm/drmP.h>
-#include <drm/drm_simple_kms_helper.h>
-
#include <linux/scatterlist.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_simple_kms_helper.h>
+
#include "xen_drm_front_cfg.h"
+struct drm_device;
+struct drm_framebuffer;
+struct drm_gem_object;
+struct drm_pending_vblank_event;
+
/**
* DOC: Driver modes of operation in terms of display buffers used
*
diff --git a/drivers/gpu/drm/xen/xen_drm_front_cfg.c b/drivers/gpu/drm/xen/xen_drm_front_cfg.c
index 5baf2b9de93c..ec53b9cc9e0e 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_cfg.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_cfg.c
@@ -8,10 +8,10 @@
* Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
*/
-#include <drm/drmP.h>
-
#include <linux/device.h>
+#include <drm/drm_print.h>
+
#include <xen/interface/io/displif.h>
#include <xen/xenbus.h>
diff --git a/drivers/gpu/drm/xen/xen_drm_front_conn.c b/drivers/gpu/drm/xen/xen_drm_front_conn.c
index 9f5f31f77f1e..459702fa990e 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_conn.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_conn.c
@@ -9,6 +9,7 @@
*/
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
#include <drm/drm_probe_helper.h>
#include <video/videomode.h>
diff --git a/drivers/gpu/drm/xen/xen_drm_front_conn.h b/drivers/gpu/drm/xen/xen_drm_front_conn.h
index 39de7cf5adbe..3adacba9a23b 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_conn.h
+++ b/drivers/gpu/drm/xen/xen_drm_front_conn.h
@@ -11,11 +11,10 @@
#ifndef __XEN_DRM_FRONT_CONN_H_
#define __XEN_DRM_FRONT_CONN_H_
-#include <drm/drmP.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_encoder.h>
+#include <linux/types.h>
-#include <linux/wait.h>
+struct drm_connector;
+struct xen_drm_front_drm_info;
struct xen_drm_front_drm_info;
diff --git a/drivers/gpu/drm/xen/xen_drm_front_evtchnl.c b/drivers/gpu/drm/xen/xen_drm_front_evtchnl.c
index 945226a95e9b..e10d95dddb99 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_evtchnl.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_evtchnl.c
@@ -8,11 +8,11 @@
* Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
*/
-#include <drm/drmP.h>
-
#include <linux/errno.h>
#include <linux/irq.h>
+#include <drm/drm_print.h>
+
#include <xen/xenbus.h>
#include <xen/events.h>
#include <xen/grant_table.h>
diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c
index a24548489dde..f0b85e094111 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_gem.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c
@@ -8,20 +8,19 @@
* Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
*/
-#include "xen_drm_front_gem.h"
+#include <linux/dma-buf.h>
+#include <linux/scatterlist.h>
+#include <linux/shmem_fs.h>
-#include <drm/drmP.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem.h>
+#include <drm/drm_prime.h>
#include <drm/drm_probe_helper.h>
-#include <linux/dma-buf.h>
-#include <linux/scatterlist.h>
-#include <linux/shmem_fs.h>
-
#include <xen/balloon.h>
#include "xen_drm_front.h"
+#include "xen_drm_front_gem.h"
struct xen_gem_object {
struct drm_gem_object base;
diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.h b/drivers/gpu/drm/xen/xen_drm_front_gem.h
index d5ab734fdafe..a39675fa31b2 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_gem.h
+++ b/drivers/gpu/drm/xen/xen_drm_front_gem.h
@@ -11,7 +11,12 @@
#ifndef __XEN_DRM_FRONT_GEM_H
#define __XEN_DRM_FRONT_GEM_H
-#include <drm/drmP.h>
+struct dma_buf_attachment;
+struct drm_device;
+struct drm_gem_object;
+struct file;
+struct sg_table;
+struct vm_area_struct;
struct drm_gem_object *xen_drm_front_gem_create(struct drm_device *dev,
size_t size);
diff --git a/drivers/gpu/drm/xen/xen_drm_front_kms.c b/drivers/gpu/drm/xen/xen_drm_front_kms.c
index c2955d375394..4f34c5208180 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_kms.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_kms.c
@@ -8,17 +8,18 @@
* Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
*/
-#include "xen_drm_front_kms.h"
-
-#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "xen_drm_front.h"
#include "xen_drm_front_conn.h"
+#include "xen_drm_front_kms.h"
/*
* Timeout in ms to wait for frame done event from the backend:
@@ -45,7 +46,7 @@ static void fb_destroy(struct drm_framebuffer *fb)
drm_gem_fb_destroy(fb);
}
-static struct drm_framebuffer_funcs fb_funcs = {
+static const struct drm_framebuffer_funcs fb_funcs = {
.destroy = fb_destroy,
};
@@ -62,14 +63,7 @@ fb_create(struct drm_device *dev, struct drm_file *filp,
if (IS_ERR_OR_NULL(fb))
return fb;
- gem_obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
- if (!gem_obj) {
- DRM_ERROR("Failed to lookup GEM object\n");
- ret = -ENOENT;
- goto fail;
- }
-
- drm_gem_object_put_unlocked(gem_obj);
+ gem_obj = fb->obj[0];
ret = xen_drm_front_fb_attach(drm_info->front_info,
xen_drm_front_dbuf_to_cookie(gem_obj),
@@ -269,11 +263,12 @@ static void display_update(struct drm_simple_display_pipe *pipe,
}
static enum drm_mode_status
-display_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode)
+display_mode_valid(struct drm_simple_display_pipe *pipe,
+ const struct drm_display_mode *mode)
{
struct xen_drm_front_drm_pipeline *pipeline =
- container_of(crtc, struct xen_drm_front_drm_pipeline,
- pipe.crtc);
+ container_of(pipe, struct xen_drm_front_drm_pipeline,
+ pipe);
if (mode->hdisplay != pipeline->width)
return MODE_ERROR;
diff --git a/drivers/gpu/drm/zte/zx_drm_drv.c b/drivers/gpu/drm/zte/zx_drm_drv.c
index 520d7369f85a..1141c1ed1ed0 100644
--- a/drivers/gpu/drm/zte/zx_drm_drv.c
+++ b/drivers/gpu/drm/zte/zx_drm_drv.c
@@ -14,13 +14,14 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drmP.h>
+#include <drm/drm_vblank.h>
#include "zx_drm_drv.h"
#include "zx_vou.h"
@@ -34,15 +35,12 @@ static const struct drm_mode_config_funcs zx_drm_mode_config_funcs = {
DEFINE_DRM_GEM_CMA_FOPS(zx_drm_fops);
static struct drm_driver zx_drm_driver = {
- .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
- DRIVER_ATOMIC,
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = drm_gem_cma_dumb_create,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = drm_gem_prime_export,
- .gem_prime_import = drm_gem_prime_import,
.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
.gem_prime_vmap = drm_gem_cma_prime_vmap,
diff --git a/drivers/gpu/drm/zte/zx_hdmi.c b/drivers/gpu/drm/zte/zx_hdmi.c
index bfe918b27c5c..b98a1420dcd3 100644
--- a/drivers/gpu/drm/zte/zx_hdmi.c
+++ b/drivers/gpu/drm/zte/zx_hdmi.c
@@ -19,7 +19,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drmP.h>
+#include <drm/drm_print.h>
#include <sound/hdmi-codec.h>
@@ -319,8 +319,10 @@ static int zx_hdmi_register(struct drm_device *drm, struct zx_hdmi *hdmi)
hdmi->connector.polled = DRM_CONNECTOR_POLL_HPD;
- drm_connector_init(drm, &hdmi->connector, &zx_hdmi_connector_funcs,
- DRM_MODE_CONNECTOR_HDMIA);
+ drm_connector_init_with_ddc(drm, &hdmi->connector,
+ &zx_hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA,
+ &hdmi->ddc->adap);
drm_connector_helper_add(&hdmi->connector,
&zx_hdmi_connector_helper_funcs);
diff --git a/drivers/gpu/drm/zte/zx_plane.c b/drivers/gpu/drm/zte/zx_plane.c
index 6b812aad411b..086c50fac689 100644
--- a/drivers/gpu/drm/zte/zx_plane.c
+++ b/drivers/gpu/drm/zte/zx_plane.c
@@ -7,10 +7,10 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_plane_helper.h>
-#include <drm/drmP.h>
#include "zx_common_regs.h"
#include "zx_drm_drv.h"
diff --git a/drivers/gpu/drm/zte/zx_tvenc.c b/drivers/gpu/drm/zte/zx_tvenc.c
index a768c567b557..c598b7daf1f1 100644
--- a/drivers/gpu/drm/zte/zx_tvenc.c
+++ b/drivers/gpu/drm/zte/zx_tvenc.c
@@ -7,11 +7,13 @@
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drmP.h>
#include "zx_drm_drv.h"
#include "zx_tvenc_regs.h"
diff --git a/drivers/gpu/drm/zte/zx_vga.c b/drivers/gpu/drm/zte/zx_vga.c
index 1634a08707fb..c4fa3bbaba78 100644
--- a/drivers/gpu/drm/zte/zx_vga.c
+++ b/drivers/gpu/drm/zte/zx_vga.c
@@ -7,11 +7,13 @@
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drmP.h>
#include "zx_drm_drv.h"
#include "zx_vga_regs.h"
@@ -163,8 +165,10 @@ static int zx_vga_register(struct drm_device *drm, struct zx_vga *vga)
vga->connector.polled = DRM_CONNECTOR_POLL_HPD;
- ret = drm_connector_init(drm, connector, &zx_vga_connector_funcs,
- DRM_MODE_CONNECTOR_VGA);
+ ret = drm_connector_init_with_ddc(drm, connector,
+ &zx_vga_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA,
+ &vga->ddc->adap);
if (ret) {
DRM_DEV_ERROR(dev, "failed to init connector: %d\n", ret);
goto clean_encoder;
diff --git a/drivers/gpu/drm/zte/zx_vou.c b/drivers/gpu/drm/zte/zx_vou.c
index 81b4cf107b75..5259ff2825f9 100644
--- a/drivers/gpu/drm/zte/zx_vou.c
+++ b/drivers/gpu/drm/zte/zx_vou.c
@@ -6,7 +6,10 @@
#include <linux/clk.h>
#include <linux/component.h>
+#include <linux/module.h>
#include <linux/of_address.h>
+#include <linux/platform_device.h>
+
#include <video/videomode.h>
#include <drm/drm_atomic_helper.h>
@@ -17,7 +20,7 @@
#include <drm/drm_of.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drmP.h>
+#include <drm/drm_vblank.h>
#include "zx_common_regs.h"
#include "zx_drm_drv.h"
OpenPOWER on IntegriCloud